Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/fs/Kconfig b/fs/Kconfig
new file mode 100644
index 0000000..6a4ad4b
--- /dev/null
+++ b/fs/Kconfig
@@ -0,0 +1,1729 @@
+#
+# File system configuration
+#
+
+menu "File systems"
+
+config EXT2_FS
+	tristate "Second extended fs support"
+	help
+	  Ext2 is a standard Linux file system for hard disks.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called ext2.  Be aware however that the file system
+	  of your root partition (the one containing the directory /) cannot
+	  be compiled as a module, and so this could be dangerous.
+
+	  If unsure, say Y.
+
+config EXT2_FS_XATTR
+	bool "Ext2 extended attributes"
+	depends on EXT2_FS
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config EXT2_FS_POSIX_ACL
+	bool "Ext2 POSIX Access Control Lists"
+	depends on EXT2_FS_XATTR
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config EXT2_FS_SECURITY
+	bool "Ext2 Security Labels"
+	depends on EXT2_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ext2 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config EXT3_FS
+	tristate "Ext3 journalling file system support"
+	help
+	  This is the journaling version of the Second extended file system
+	  (often called ext3), the de facto standard Linux file system
+	  (method to organize files on a storage device) for hard disks.
+
+	  The journaling code included in this driver means you do not have
+	  to run e2fsck (file system checker) on your file systems after a
+	  crash.  The journal keeps track of any changes that were being made
+	  at the time the system crashed, and can ensure that your file system
+	  is consistent without the need for a lengthy check.
+
+	  Other than adding the journal to the file system, the on-disk format
+	  of ext3 is identical to ext2.  It is possible to freely switch
+	  between using the ext3 driver and the ext2 driver, as long as the
+	  file system has been cleanly unmounted, or e2fsck is run on the file
+	  system.
+
+	  To add a journal on an existing ext2 file system or change the
+	  behavior of ext3 file systems, you can use the tune2fs utility ("man
+	  tune2fs").  To modify attributes of files and directories on ext3
+	  file systems, use chattr ("man chattr").  You need to be using
+	  e2fsprogs version 1.20 or later in order to create ext3 journals
+	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called ext3.  Be aware however that the file system
+	  of your root partition (the one containing the directory /) cannot
+	  be compiled as a module, and so this may be dangerous.
+
+config EXT3_FS_XATTR
+	bool "Ext3 extended attributes"
+	depends on EXT3_FS
+	default y
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+	  You need this for POSIX ACL support on ext3.
+
+config EXT3_FS_POSIX_ACL
+	bool "Ext3 POSIX Access Control Lists"
+	depends on EXT3_FS_XATTR
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config EXT3_FS_SECURITY
+	bool "Ext3 Security Labels"
+	depends on EXT3_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ext3 filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config JBD
+# CONFIG_JBD could be its own option (even modular), but until there are
+# other users than ext3, we will simply make it be the same as CONFIG_EXT3_FS
+# dep_tristate '  Journal Block Device support (JBD for ext3)' CONFIG_JBD $CONFIG_EXT3_FS
+	tristate
+	default EXT3_FS
+	help
+	  This is a generic journaling layer for block devices.  It is
+	  currently used by the ext3 file system, but it could also be used to
+	  add journal support to other file systems or block devices such as
+	  RAID or LVM.
+
+	  If you are using the ext3 file system, you need to say Y here. If
+	  you are not using ext3 then you will probably want to say N.
+
+	  To compile this device as a module, choose M here: the module will be
+	  called jbd.  If you are compiling ext3 into the kernel, you cannot
+	  compile this code as a module.
+
+config JBD_DEBUG
+	bool "JBD (ext3) debugging support"
+	depends on JBD
+	help
+	  If you are using the ext3 journaled file system (or potentially any
+	  other file system/device using JBD), this option allows you to
+	  enable debugging output while the system is running, in order to
+	  help track down any problems you are having.  By default the
+	  debugging output will be turned off.
+
+	  If you select Y here, then you will be able to turn on debugging
+	  with "echo N > /proc/sys/fs/jbd-debug", where N is a number between
+	  1 and 5, the higher the number, the more debugging output is
+	  generated.  To turn debugging off again, do
+	  "echo 0 > /proc/sys/fs/jbd-debug".
+
+config FS_MBCACHE
+# Meta block cache for Extended Attributes (ext2/ext3)
+	tristate
+	depends on EXT2_FS_XATTR || EXT3_FS_XATTR
+	default y if EXT2_FS=y || EXT3_FS=y
+	default m if EXT2_FS=m || EXT3_FS=m
+
+config REISERFS_FS
+	tristate "Reiserfs support"
+	help
+	  Stores not just filenames but the files themselves in a balanced
+	  tree.  Uses journaling.
+
+	  Balanced trees are more efficient than traditional file system
+	  architectural foundations.
+
+	  In general, ReiserFS is as fast as ext2, but is very efficient with
+	  large directories and small files.  Additional patches are needed
+	  for NFS and quotas, please see <http://www.namesys.com/> for links.
+
+	  It is more easily extended to have features currently found in
+	  database and keyword search systems than block allocation based file
+	  systems are.  The next version will be so extended, and will support
+	  plugins consistent with our motto ``It takes more than a license to
+	  make source code open.''
+
+	  Read <http://www.namesys.com/> to learn more about reiserfs.
+
+	  Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
+
+	  If you like it, you can pay us to add new features to it that you
+	  need, buy a support contract, or pay us to port it to another OS.
+
+config REISERFS_CHECK
+	bool "Enable reiserfs debug mode"
+	depends on REISERFS_FS
+	help
+	  If you set this to Y, then ReiserFS will perform every check it can
+	  possibly imagine of its internal consistency throughout its
+	  operation.  It will also go substantially slower.  More than once we
+	  have forgotten that this was on, and then gone despondent over the
+	  latest benchmarks.:-) Use of this option allows our team to go all
+	  out in checking for consistency when debugging without fear of its
+	  effect on end users.  If you are on the verge of sending in a bug
+	  report, say Y and you might get a useful error message.  Almost
+	  everyone should say N.
+
+config REISERFS_PROC_INFO
+	bool "Stats in /proc/fs/reiserfs"
+	depends on REISERFS_FS
+	help
+	  Create under /proc/fs/reiserfs a hierarchy of files, displaying
+	  various ReiserFS statistics and internal data at the expense of
+	  making your kernel or module slightly larger (+8 KB). This also
+	  increases the amount of kernel memory required for each mount.
+	  Almost everyone but ReiserFS developers and people fine-tuning
+	  reiserfs or tracing problems should say N.
+
+config REISERFS_FS_XATTR
+	bool "ReiserFS extended attributes"
+	depends on REISERFS_FS
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config REISERFS_FS_POSIX_ACL
+	bool "ReiserFS POSIX Access Control Lists"
+	depends on REISERFS_FS_XATTR
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config REISERFS_FS_SECURITY
+	bool "ReiserFS Security Labels"
+	depends on REISERFS_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the ReiserFS filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config JFS_FS
+	tristate "JFS filesystem support"
+	select NLS
+	help
+	  This is a port of IBM's Journaled Filesystem .  More information is
+	  available in the file <file:Documentation/filesystems/jfs.txt>.
+
+	  If you do not intend to use the JFS filesystem, say N.
+
+config JFS_POSIX_ACL
+	bool "JFS POSIX Access Control Lists"
+	depends on JFS_FS
+	help
+	  Posix Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the Posix ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N
+
+config JFS_SECURITY
+	bool "JFS Security Labels"
+	depends on JFS_FS
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the jfs filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config JFS_DEBUG
+	bool "JFS debugging"
+	depends on JFS_FS
+	help
+	  If you are experiencing any problems with the JFS filesystem, say
+	  Y here.  This will result in additional debugging messages to be
+	  written to the system log.  Under normal circumstances, this
+	  results in very little overhead.
+
+config JFS_STATISTICS
+	bool "JFS statistics"
+	depends on JFS_FS
+	help
+	  Enabling this option will cause statistics from the JFS file system
+	  to be made available to the user in the /proc/fs/jfs/ directory.
+
+config FS_POSIX_ACL
+# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs)
+#
+# NOTE: you can implement Posix ACLs without these helpers (XFS does).
+# 	Never use this symbol for ifdefs.
+#
+	bool
+	depends on EXT2_FS_POSIX_ACL || EXT3_FS_POSIX_ACL || JFS_POSIX_ACL || REISERFS_FS_POSIX_ACL || NFSD_V4
+	default y
+
+source "fs/xfs/Kconfig"
+
+config MINIX_FS
+	tristate "Minix fs support"
+	help
+	  Minix is a simple operating system used in many classes about OS's.
+	  The minix file system (method to organize files on a hard disk
+	  partition or a floppy disk) was the original file system for Linux,
+	  but has been superseded by the second extended file system ext2fs.
+	  You don't want to use the minix file system on your hard disk
+	  because of certain built-in restrictions, but it is sometimes found
+	  on older Linux floppy disks.  This option will enlarge your kernel
+	  by about 28 KB. If unsure, say N.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called minix.  Note that the file system of your root
+	  partition (the one containing the directory /) cannot be compiled as
+	  a module.
+
+config ROMFS_FS
+	tristate "ROM file system support"
+	---help---
+	  This is a very small read-only file system mainly intended for
+	  initial ram disks of installation disks, but it could be used for
+	  other read-only media as well.  Read
+	  <file:Documentation/filesystems/romfs.txt> for details.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called romfs.  Note that the file system of your
+	  root partition (the one containing the directory /) cannot be a
+	  module.
+
+	  If you don't know whether you need it, then you don't need it:
+	  answer N.
+
+config QUOTA
+	bool "Quota support"
+	help
+	  If you say Y here, you will be able to set per user limits for disk
+	  usage (also called disk quotas). Currently, it works for the
+	  ext2, ext3, and reiserfs file system. ext3 also supports journalled
+	  quotas for which you don't need to run quotacheck(8) after an unclean
+	  shutdown. You need additional software in order to use quota support
+	  (you can download sources from
+	  <http://www.sf.net/projects/linuxquota/>). For further details, read
+	  the Quota mini-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>, or the documentation provided
+	  with the quota tools. Probably the quota support is only useful for
+	  multi user systems. If unsure, say N.
+
+config QFMT_V1
+	tristate "Old quota format support"
+	depends on QUOTA
+	help
+	  This quota format was (is) used by kernels earlier than 2.4.22. If
+	  you have quota working and you don't want to convert to new quota
+	  format say Y here.
+
+config QFMT_V2
+	tristate "Quota format v2 support"
+	depends on QUOTA
+	help
+	  This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+	  need this functionality say Y here. Note that you will need recent
+	  quota utilities (>= 3.01) for new quota format with this kernel.
+
+config QUOTACTL
+	bool
+	depends on XFS_QUOTA || QUOTA
+	default y
+
+config DNOTIFY
+	bool "Dnotify support" if EMBEDDED
+	default y
+	help
+	  Dnotify is a directory-based per-fd file change notification system
+	  that uses signals to communicate events to user-space.  There exist
+	  superior alternatives, but some applications may still rely on
+	  dnotify.
+
+	  Because of this, if unsure, say Y.
+
+config AUTOFS_FS
+	tristate "Kernel automounter support"
+	help
+	  The automounter is a tool to automatically mount remote file systems
+	  on demand. This implementation is partially kernel-based to reduce
+	  overhead in the already-mounted case; this is unlike the BSD
+	  automounter (amd), which is a pure user space daemon.
+
+	  To use the automounter you need the user-space tools from the autofs
+	  package; you can find the location in <file:Documentation/Changes>.
+	  You also want to answer Y to "NFS file system support", below.
+
+	  If you want to use the newer version of the automounter with more
+	  features, say N here and say Y to "Kernel automounter v4 support",
+	  below.
+
+	  To compile this support as a module, choose M here: the module will be
+	  called autofs.
+
+	  If you are not a part of a fairly large, distributed network, you
+	  probably do not need an automounter, and can say N here.
+
+config AUTOFS4_FS
+	tristate "Kernel automounter version 4 support (also supports v3)"
+	help
+	  The automounter is a tool to automatically mount remote file systems
+	  on demand. This implementation is partially kernel-based to reduce
+	  overhead in the already-mounted case; this is unlike the BSD
+	  automounter (amd), which is a pure user space daemon.
+
+	  To use the automounter you need the user-space tools from
+	  <ftp://ftp.kernel.org/pub/linux/daemons/autofs/v4/>; you also
+	  want to answer Y to "NFS file system support", below.
+
+	  To compile this support as a module, choose M here: the module will be
+	  called autofs4.  You will need to add "alias autofs autofs4" to your
+	  modules configuration file.
+
+	  If you are not a part of a fairly large, distributed network or
+	  don't have a laptop which needs to dynamically reconfigure to the
+	  local network, you probably do not need an automounter, and can say
+	  N here.
+
+menu "CD-ROM/DVD Filesystems"
+
+config ISO9660_FS
+	tristate "ISO 9660 CDROM file system support"
+	help
+	  This is the standard file system used on CD-ROMs.  It was previously
+	  known as "High Sierra File System" and is called "hsfs" on other
+	  Unix systems.  The so-called Rock-Ridge extensions which allow for
+	  long Unix filenames and symbolic links are also supported by this
+	  driver.  If you have a CD-ROM drive and want to do more with it than
+	  just listen to audio CDs and watch its LEDs, say Y (and read
+	  <file:Documentation/filesystems/isofs.txt> and the CD-ROM-HOWTO,
+	  available from <http://www.tldp.org/docs.html#howto>), thereby
+	  enlarging your kernel by about 27 KB; otherwise say N.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called isofs.
+
+config JOLIET
+	bool "Microsoft Joliet CDROM extensions"
+	depends on ISO9660_FS
+	select NLS
+	help
+	  Joliet is a Microsoft extension for the ISO 9660 CD-ROM file system
+	  which allows for long filenames in unicode format (unicode is the
+	  new 16 bit character code, successor to ASCII, which encodes the
+	  characters of almost all languages of the world; see
+	  <http://www.unicode.org/> for more information).  Say Y here if you
+	  want to be able to read Joliet CD-ROMs under Linux.
+
+config ZISOFS
+	bool "Transparent decompression extension"
+	depends on ISO9660_FS
+	select ZLIB_INFLATE
+	help
+	  This is a Linux-specific extension to RockRidge which lets you store
+	  data in compressed form on a CD-ROM and have it transparently
+	  decompressed when the CD-ROM is accessed.  See
+	  <http://www.kernel.org/pub/linux/utils/fs/zisofs/> for the tools
+	  necessary to create such a filesystem.  Say Y here if you want to be
+	  able to read such compressed CD-ROMs.
+
+config ZISOFS_FS
+# for fs/nls/Config.in
+	tristate
+	depends on ZISOFS
+	default ISO9660_FS
+
+config UDF_FS
+	tristate "UDF file system support"
+	help
+	  This is the new file system used on some CD-ROMs and DVDs. Say Y if
+	  you intend to mount DVD discs or CDRW's written in packet mode, or
+	  if written to by other UDF utilities, such as DirectCD.
+	  Please read <file:Documentation/filesystems/udf.txt>.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called udf.
+
+	  If unsure, say N.
+
+config UDF_NLS
+	bool
+	default y
+	depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y)
+
+endmenu
+
+menu "DOS/FAT/NT Filesystems"
+
+config FAT_FS
+	tristate
+	select NLS
+	help
+	  If you want to use one of the FAT-based file systems (the MS-DOS and
+	  VFAT (Windows 95) file systems), then you must say Y or M here
+	  to include FAT support. You will then be able to mount partitions or
+	  diskettes with FAT-based file systems and transparently access the
+	  files on them, i.e. MSDOS files will look and behave just like all
+	  other Unix files.
+
+	  This FAT support is not a file system in itself, it only provides
+	  the foundation for the other file systems. You will have to say Y or
+	  M to at least one of "MSDOS fs support" or "VFAT fs support" in
+	  order to make use of it.
+
+	  Another way to read and write MSDOS floppies and hard drive
+	  partitions from within Linux (but not transparently) is with the
+	  mtools ("man mtools") program suite. You don't need to say Y here in
+	  order to do that.
+
+	  If you need to move large files on floppies between a DOS and a
+	  Linux box, say Y here, mount the floppy under Linux with an MSDOS
+	  file system and use GNU tar's M option. GNU tar is a program
+	  available for Unix and DOS ("man tar" or "info tar").
+
+	  It is now also becoming possible to read and write compressed FAT
+	  file systems; read <file:Documentation/filesystems/fat_cvf.txt> for
+	  details.
+
+	  The FAT support will enlarge your kernel by about 37 KB. If unsure,
+	  say Y.
+
+	  To compile this as a module, choose M here: the module will be called
+	  fat.  Note that if you compile the FAT support as a module, you
+	  cannot compile any of the FAT-based file systems into the kernel
+	  -- they will have to be modules as well.
+
+config MSDOS_FS
+	tristate "MSDOS fs support"
+	select FAT_FS
+	help
+	  This allows you to mount MSDOS partitions of your hard drive (unless
+	  they are compressed; to access compressed MSDOS partitions under
+	  Linux, you can either use the DOS emulator DOSEMU, described in the
+	  DOSEMU-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>, or try dmsdosfs in
+	  <ftp://ibiblio.org/pub/Linux/system/filesystems/dosfs/>. If you
+	  intend to use dosemu with a non-compressed MSDOS partition, say Y
+	  here) and MSDOS floppies. This means that file access becomes
+	  transparent, i.e. the MSDOS files look and behave just like all
+	  other Unix files.
+
+	  If you have Windows 95 or Windows NT installed on your MSDOS
+	  partitions, you should use the VFAT file system (say Y to "VFAT fs
+	  support" below), or you will not be able to see the long filenames
+	  generated by Windows 95 / Windows NT.
+
+	  This option will enlarge your kernel by about 7 KB. If unsure,
+	  answer Y. This will only work if you said Y to "DOS FAT fs support"
+	  as well. To compile this as a module, choose M here: the module will
+	  be called msdos.
+
+config VFAT_FS
+	tristate "VFAT (Windows-95) fs support"
+	select FAT_FS
+	help
+	  This option provides support for normal Windows file systems with
+	  long filenames.  That includes non-compressed FAT-based file systems
+	  used by Windows 95, Windows 98, Windows NT 4.0, and the Unix
+	  programs from the mtools package.
+
+	  The VFAT support enlarges your kernel by about 10 KB and it only
+	  works if you said Y to the "DOS FAT fs support" above.  Please read
+	  the file <file:Documentation/filesystems/vfat.txt> for details.  If
+	  unsure, say Y.
+
+	  To compile this as a module, choose M here: the module will be called
+	  vfat.
+
+config FAT_DEFAULT_CODEPAGE
+	int "Default codepage for FAT"
+	depends on MSDOS_FS || VFAT_FS
+	default 437
+	help
+	  This option should be set to the codepage of your FAT filesystems.
+	  It can be overridden with the "codepage" mount option.
+	  See <file:Documentation/filesystems/vfat.txt> for more information.
+
+config FAT_DEFAULT_IOCHARSET
+	string "Default iocharset for FAT"
+	depends on VFAT_FS
+	default "iso8859-1"
+	help
+	  Set this to the default input/output character set you'd
+	  like FAT to use. It should probably match the character set
+	  that most of your FAT filesystems use, and can be overridden
+	  with the "iocharset" mount option for FAT filesystems.
+	  Note that "utf8" is not recommended for FAT filesystems.
+	  If unsure, you shouldn't set "utf8" here.
+	  See <file:Documentation/filesystems/vfat.txt> for more information.
+
+config NTFS_FS
+	tristate "NTFS file system support"
+	select NLS
+	help
+	  NTFS is the file system of Microsoft Windows NT, 2000, XP and 2003.
+
+	  Saying Y or M here enables read support.  There is partial, but
+	  safe, write support available.  For write support you must also
+	  say Y to "NTFS write support" below.
+
+	  There are also a number of user-space tools available, called
+	  ntfsprogs.  These include ntfsundelete and ntfsresize, that work
+	  without NTFS support enabled in the kernel.
+
+	  This is a rewrite from scratch of Linux NTFS support and replaced
+	  the old NTFS code starting with Linux 2.5.11.  A backport to
+	  the Linux 2.4 kernel series is separately available as a patch
+	  from the project web site.
+
+	  For more information see <file:Documentation/filesystems/ntfs.txt>
+	  and <http://linux-ntfs.sourceforge.net/>.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called ntfs.
+
+	  If you are not using Windows NT, 2000, XP or 2003 in addition to
+	  Linux on your computer it is safe to say N.
+
+config NTFS_DEBUG
+	bool "NTFS debugging support"
+	depends on NTFS_FS
+	help
+	  If you are experiencing any problems with the NTFS file system, say
+	  Y here.  This will result in additional consistency checks to be
+	  performed by the driver as well as additional debugging messages to
+	  be written to the system log.  Note that debugging messages are
+	  disabled by default.  To enable them, supply the option debug_msgs=1
+	  at the kernel command line when booting the kernel or as an option
+	  to insmod when loading the ntfs module.  Once the driver is active,
+	  you can enable debugging messages by doing (as root):
+	  echo 1 > /proc/sys/fs/ntfs-debug
+	  Replacing the "1" with "0" would disable debug messages.
+
+	  If you leave debugging messages disabled, this results in little
+	  overhead, but enabling debug messages results in very significant
+	  slowdown of the system.
+
+	  When reporting bugs, please try to have available a full dump of
+	  debugging messages while the misbehaviour was occurring.
+
+config NTFS_RW
+	bool "NTFS write support"
+	depends on NTFS_FS
+	help
+	  This enables the partial, but safe, write support in the NTFS driver.
+
+	  The only supported operation is overwriting existing files, without
+	  changing the file length.  No file or directory creation, deletion or
+	  renaming is possible.  Note only non-resident files can be written to
+	  so you may find that some very small files (<500 bytes or so) cannot
+	  be written to.
+
+	  While we cannot guarantee that it will not damage any data, we have
+	  so far not received a single report where the driver would have
+	  damaged someones data so we assume it is perfectly safe to use.
+
+	  Note:  While write support is safe in this version (a rewrite from
+	  scratch of the NTFS support), it should be noted that the old NTFS
+	  write support, included in Linux 2.5.10 and before (since 1997),
+	  is not safe.
+
+	  This is currently useful with TopologiLinux.  TopologiLinux is run
+	  on top of any DOS/Microsoft Windows system without partitioning your
+	  hard disk.  Unlike other Linux distributions TopologiLinux does not
+	  need its own partition.  For more information see
+	  <http://topologi-linux.sourceforge.net/>
+
+	  It is perfectly safe to say N here.
+
+endmenu
+
+menu "Pseudo filesystems"
+
+config PROC_FS
+	bool "/proc file system support"
+	help
+	  This is a virtual file system providing information about the status
+	  of the system. "Virtual" means that it doesn't take up any space on
+	  your hard disk: the files are created on the fly by the kernel when
+	  you try to access them. Also, you cannot read the files with older
+	  version of the program less: you need to use more or cat.
+
+	  It's totally cool; for example, "cat /proc/interrupts" gives
+	  information about what the different IRQs are used for at the moment
+	  (there is a small number of Interrupt ReQuest lines in your computer
+	  that are used by the attached devices to gain the CPU's attention --
+	  often a source of trouble if two devices are mistakenly configured
+	  to use the same IRQ). The program procinfo to display some
+	  information about your system gathered from the /proc file system.
+
+	  Before you can use the /proc file system, it has to be mounted,
+	  meaning it has to be given a location in the directory hierarchy.
+	  That location should be /proc. A command such as "mount -t proc proc
+	  /proc" or the equivalent line in /etc/fstab does the job.
+
+	  The /proc file system is explained in the file
+	  <file:Documentation/filesystems/proc.txt> and on the proc(5) manpage
+	  ("man 5 proc").
+
+	  This option will enlarge your kernel by about 67 KB. Several
+	  programs depend on this, so everyone should say Y here.
+
+config PROC_KCORE
+	bool "/proc/kcore support" if !ARM
+	depends on PROC_FS && MMU
+
+config SYSFS
+	bool "sysfs file system support" if EMBEDDED
+	default y
+	help
+	The sysfs filesystem is a virtual filesystem that the kernel uses to
+	export internal kernel objects, their attributes, and their
+	relationships to one another.
+
+	Users can use sysfs to ascertain useful information about the running
+	kernel, such as the devices the kernel has discovered on each bus and
+	which driver each is bound to. sysfs can also be used to tune devices
+	and other kernel subsystems.
+
+	Some system agents rely on the information in sysfs to operate.
+	/sbin/hotplug uses device and object attributes in sysfs to assist in
+	delegating policy decisions, like persistantly naming devices.
+
+	sysfs is currently used by the block subsystem to mount the root
+	partition.  If sysfs is disabled you must specify the boot device on
+	the kernel boot command line via its major and minor numbers.  For
+	example, "root=03:01" for /dev/hda1.
+
+	Designers of embedded systems may wish to say N here to conserve space.
+
+config DEVFS_FS
+	bool "/dev file system support (OBSOLETE)"
+	depends on EXPERIMENTAL
+	help
+	  This is support for devfs, a virtual file system (like /proc) which
+	  provides the file system interface to device drivers, normally found
+	  in /dev. Devfs does not depend on major and minor number
+	  allocations. Device drivers register entries in /dev which then
+	  appear automatically, which means that the system administrator does
+	  not have to create character and block special device files in the
+	  /dev directory using the mknod command (or MAKEDEV script) anymore.
+
+	  This is work in progress. If you want to use this, you *must* read
+	  the material in <file:Documentation/filesystems/devfs/>, especially
+	  the file README there.
+
+	  Note that devfs no longer manages /dev/pts!  If you are using UNIX98
+	  ptys, you will also need to mount the /dev/pts filesystem (devpts).
+
+	  Note that devfs has been obsoleted by udev,
+	  <http://www.kernel.org/pub/linux/utils/kernel/hotplug/>.
+	  It has been stripped down to a bare minimum and is only provided for
+	  legacy installations that use its naming scheme which is
+	  unfortunately different from the names normal Linux installations
+	  use.
+
+	  If unsure, say N.
+
+config DEVFS_MOUNT
+	bool "Automatically mount at boot"
+	depends on DEVFS_FS
+	help
+	  This option appears if you have CONFIG_DEVFS_FS enabled. Setting
+	  this to 'Y' will make the kernel automatically mount devfs onto /dev
+	  when the system is booted, before the init thread is started.
+	  You can override this with the "devfs=nomount" boot option.
+
+	  If unsure, say N.
+
+config DEVFS_DEBUG
+	bool "Debug devfs"
+	depends on DEVFS_FS
+	help
+	  If you say Y here, then the /dev file system code will generate
+	  debugging messages. See the file
+	  <file:Documentation/filesystems/devfs/boot-options> for more
+	  details.
+
+	  If unsure, say N.
+
+config DEVPTS_FS_XATTR
+	bool "/dev/pts Extended Attributes"
+	depends on UNIX98_PTYS
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config DEVPTS_FS_SECURITY
+	bool "/dev/pts Security Labels"
+	depends on DEVPTS_FS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the /dev/pts filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config TMPFS
+	bool "Virtual memory file system support (former shm fs)"
+	help
+	  Tmpfs is a file system which keeps all files in virtual memory.
+
+	  Everything in tmpfs is temporary in the sense that no files will be
+	  created on your hard drive. The files live in memory and swap
+	  space. If you unmount a tmpfs instance, everything stored therein is
+	  lost.
+
+	  See <file:Documentation/filesystems/tmpfs.txt> for details.
+
+config TMPFS_XATTR
+	bool "tmpfs Extended Attributes"
+	depends on TMPFS
+	help
+	  Extended attributes are name:value pairs associated with inodes by
+	  the kernel or by users (see the attr(5) manual page, or visit
+	  <http://acl.bestbits.at/> for details).
+
+	  If unsure, say N.
+
+config TMPFS_SECURITY
+	bool "tmpfs Security Labels"
+	depends on TMPFS_XATTR
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute handler for file security
+	  labels in the tmpfs filesystem.
+	  If you are not using a security module that requires using
+	  extended attributes for file security labels, say N.
+
+config HUGETLBFS
+	bool "HugeTLB file system support"
+	depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || X86_64 || BROKEN
+
+config HUGETLB_PAGE
+	def_bool HUGETLBFS
+
+config RAMFS
+	bool
+	default y
+	---help---
+	  Ramfs is a file system which keeps all files in RAM. It allows
+	  read and write access.
+
+	  It is more of an programming example than a useable file system.  If
+	  you need a file system which lives in RAM with limit checking use
+	  tmpfs.
+
+	  To compile this as a module, choose M here: the module will be called
+	  ramfs.
+
+endmenu
+
+menu "Miscellaneous filesystems"
+
+config ADFS_FS
+	tristate "ADFS file system support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  The Acorn Disc Filing System is the standard file system of the
+	  RiscOS operating system which runs on Acorn's ARM-based Risc PC
+	  systems and the Acorn Archimedes range of machines. If you say Y
+	  here, Linux will be able to read from ADFS partitions on hard drives
+	  and from ADFS-formatted floppy discs. If you also want to be able to
+	  write to those devices, say Y to "ADFS write support" below.
+
+	  The ADFS partition should be the first partition (i.e.,
+	  /dev/[hs]d?1) on each of your drives. Please read the file
+	  <file:Documentation/filesystems/adfs.txt> for further details.
+
+	  To compile this code as a module, choose M here: the module will be
+	  called adfs.
+
+	  If unsure, say N.
+
+config ADFS_FS_RW
+	bool "ADFS write support (DANGEROUS)"
+	depends on ADFS_FS
+	help
+	  If you say Y here, you will be able to write to ADFS partitions on
+	  hard drives and ADFS-formatted floppy disks. This is experimental
+	  codes, so if you're unsure, say N.
+
+config AFFS_FS
+	tristate "Amiga FFS file system support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  The Fast File System (FFS) is the common file system used on hard
+	  disks by Amiga(tm) systems since AmigaOS Version 1.3 (34.20).  Say Y
+	  if you want to be able to read and write files from and to an Amiga
+	  FFS partition on your hard drive.  Amiga floppies however cannot be
+	  read with this driver due to an incompatibility of the floppy
+	  controller used in an Amiga and the standard floppy controller in
+	  PCs and workstations. Read <file:Documentation/filesystems/affs.txt>
+	  and <file:fs/affs/Changes>.
+
+	  With this driver you can also mount disk files used by Bernd
+	  Schmidt's Un*X Amiga Emulator
+	  (<http://www.freiburg.linux.de/~uae/>).
+	  If you want to do this, you will also need to say Y or M to "Loop
+	  device support", above.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called affs.  If unsure, say N.
+
+config HFS_FS
+	tristate "Apple Macintosh file system support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  If you say Y here, you will be able to mount Macintosh-formatted
+	  floppy disks and hard drive partitions with full read-write access.
+	  Please read <file:fs/hfs/HFS.txt> to learn about the available mount
+	  options.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called hfs.
+
+config HFSPLUS_FS
+	tristate "Apple Extended HFS file system support"
+	select NLS
+	select NLS_UTF8
+	help
+	  If you say Y here, you will be able to mount extended format
+	  Macintosh-formatted hard drive partitions with full read-write access.
+
+	  This file system is often called HFS+ and was introduced with
+	  MacOS 8. It includes all Mac specific filesystem data such as
+	  data forks and creator codes, but it also has several UNIX
+	  style features such as file ownership and permissions.
+
+config BEFS_FS
+	tristate "BeOS file system (BeFS) support (read only) (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	select NLS
+	help
+	  The BeOS File System (BeFS) is the native file system of Be, Inc's
+	  BeOS. Notable features include support for arbitrary attributes
+	  on files and directories, and database-like indeces on selected
+	  attributes. (Also note that this driver doesn't make those features
+	  available at this time). It is a 64 bit filesystem, so it supports
+	  extremly large volumes and files.
+
+	  If you use this filesystem, you should also say Y to at least one
+	  of the NLS (native language support) options below.
+
+	  If you don't know what this is about, say N.
+
+	  To compile this as a module, choose M here: the module will be
+	  called befs.
+
+config BEFS_DEBUG
+	bool "Debug BeFS"
+	depends on BEFS_FS
+	help
+	  If you say Y here, you can use the 'debug' mount option to enable
+	  debugging output from the driver. 
+
+config BFS_FS
+	tristate "BFS file system support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  Boot File System (BFS) is a file system used under SCO UnixWare to
+	  allow the bootloader access to the kernel image and other important
+	  files during the boot process.  It is usually mounted under /stand
+	  and corresponds to the slice marked as "STAND" in the UnixWare
+	  partition.  You should say Y if you want to read or write the files
+	  on your /stand slice from within Linux.  You then also need to say Y
+	  to "UnixWare slices support", below.  More information about the BFS
+	  file system is contained in the file
+	  <file:Documentation/filesystems/bfs.txt>.
+
+	  If you don't know what this is about, say N.
+
+	  To compile this as a module, choose M here: the module will be called
+	  bfs.  Note that the file system of your root partition (the one
+	  containing the directory /) cannot be compiled as a module.
+
+
+
+config EFS_FS
+	tristate "EFS file system support (read only) (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  EFS is an older file system used for non-ISO9660 CD-ROMs and hard
+	  disk partitions by SGI's IRIX operating system (IRIX 6.0 and newer
+	  uses the XFS file system for hard disk partitions however).
+
+	  This implementation only offers read-only access. If you don't know
+	  what all this is about, it's safe to say N. For more information
+	  about EFS see its home page at <http://aeschi.ch.eu.org/efs/>.
+
+	  To compile the EFS file system support as a module, choose M here: the
+	  module will be called efs.
+
+config JFFS_FS
+	tristate "Journalling Flash File System (JFFS) support"
+	depends on MTD
+	help
+	  JFFS is the Journaling Flash File System developed by Axis
+	  Communications in Sweden, aimed at providing a crash/powerdown-safe
+	  file system for disk-less embedded devices. Further information is
+	  available at (<http://developer.axis.com/software/jffs/>).
+
+config JFFS_FS_VERBOSE
+	int "JFFS debugging verbosity (0 = quiet, 3 = noisy)"
+	depends on JFFS_FS
+	default "0"
+	help
+	  Determines the verbosity level of the JFFS debugging messages.
+
+config JFFS_PROC_FS
+	bool "JFFS stats available in /proc filesystem"
+	depends on JFFS_FS && PROC_FS
+	help
+	  Enabling this option will cause statistics from mounted JFFS file systems
+	  to be made available to the user in the /proc/fs/jffs/ directory.
+
+config JFFS2_FS
+	tristate "Journalling Flash File System v2 (JFFS2) support"
+	select CRC32
+	depends on MTD
+	help
+	  JFFS2 is the second generation of the Journalling Flash File System
+	  for use on diskless embedded devices. It provides improved wear
+	  levelling, compression and support for hard links. You cannot use
+	  this on normal block devices, only on 'MTD' devices.
+
+	  Further information on the design and implementation of JFFS2 is
+	  available at <http://sources.redhat.com/jffs2/>.
+
+config JFFS2_FS_DEBUG
+	int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)"
+	depends on JFFS2_FS
+	default "0"
+	help
+	  This controls the amount of debugging messages produced by the JFFS2
+	  code. Set it to zero for use in production systems. For evaluation,
+	  testing and debugging, it's advisable to set it to one. This will
+	  enable a few assertions and will print debugging messages at the
+	  KERN_DEBUG loglevel, where they won't normally be visible. Level 2
+	  is unlikely to be useful - it enables extra debugging in certain
+	  areas which at one point needed debugging, but when the bugs were
+	  located and fixed, the detailed messages were relegated to level 2.
+
+	  If reporting bugs, please try to have available a full dump of the
+	  messages at debug level 1 while the misbehaviour was occurring.
+
+config JFFS2_FS_NAND
+	bool "JFFS2 support for NAND flash"
+	depends on JFFS2_FS
+	default n
+	help
+	  This enables the support for NAND flash in JFFS2. NAND is a newer
+	  type of flash chip design than the traditional NOR flash, with
+	  higher density but a handful of characteristics which make it more
+	  interesting for the file system to use.
+
+	  Say 'N' unless you have NAND flash.
+
+config JFFS2_FS_NOR_ECC
+        bool "JFFS2 support for ECC'd NOR flash (EXPERIMENTAL)"
+        depends on JFFS2_FS && EXPERIMENTAL
+        default n
+        help
+          This enables the experimental support for NOR flash with transparent
+          ECC for JFFS2. This type of flash chip is not common, however it is
+          available from ST Microelectronics.
+
+config JFFS2_COMPRESSION_OPTIONS
+	bool "Advanced compression options for JFFS2"
+	depends on JFFS2_FS
+	default n
+	help
+	  Enabling this option allows you to explicitly choose which
+	  compression modules, if any, are enabled in JFFS2. Removing
+	  compressors and mean you cannot read existing file systems,
+	  and enabling experimental compressors can mean that you
+	  write a file system which cannot be read by a standard kernel.
+
+	  If unsure, you should _definitely_ say 'N'.
+
+config JFFS2_ZLIB
+	bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS
+	select ZLIB_INFLATE
+	select ZLIB_DEFLATE
+	depends on JFFS2_FS
+	default y
+        help
+          Zlib is designed to be a free, general-purpose, legally unencumbered,
+          lossless data-compression library for use on virtually any computer 
+          hardware and operating system. See <http://www.gzip.org/zlib/> for
+          further information.
+          
+          Say 'Y' if unsure.
+
+config JFFS2_RTIME
+	bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+	depends on JFFS2_FS
+	default y
+        help
+          Rtime does manage to recompress already-compressed data. Say 'Y' if unsure.
+
+config JFFS2_RUBIN
+	bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS
+	depends on JFFS2_FS
+	default n
+        help
+          RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
+
+choice
+        prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
+        default JFFS2_CMODE_PRIORITY
+        depends on JFFS2_FS
+        help
+          You can set here the default compression mode of JFFS2 from 
+          the available compression modes. Don't touch if unsure.
+
+config JFFS2_CMODE_NONE
+        bool "no compression"
+        help
+          Uses no compression.
+
+config JFFS2_CMODE_PRIORITY
+        bool "priority"
+        help
+          Tries the compressors in a predefinied order and chooses the first 
+          successful one.
+
+config JFFS2_CMODE_SIZE
+        bool "size (EXPERIMENTAL)"
+        help
+          Tries all compressors and chooses the one which has the smallest 
+          result.
+
+endchoice
+
+config CRAMFS
+	tristate "Compressed ROM file system support (cramfs)"
+	select ZLIB_INFLATE
+	help
+	  Saying Y here includes support for CramFs (Compressed ROM File
+	  System).  CramFs is designed to be a simple, small, and compressed
+	  file system for ROM based embedded systems.  CramFs is read-only,
+	  limited to 256MB file systems (with 16MB files), and doesn't support
+	  16/32 bits uid/gid, hard links and timestamps.
+
+	  See <file:Documentation/filesystems/cramfs.txt> and
+	  <file:fs/cramfs/README> for further information.
+
+	  To compile this as a module, choose M here: the module will be called
+	  cramfs.  Note that the root file system (the one containing the
+	  directory /) cannot be compiled as a module.
+
+	  If unsure, say N.
+
+config VXFS_FS
+	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
+	help
+	  FreeVxFS is a file system driver that support the VERITAS VxFS(TM)
+	  file system format.  VERITAS VxFS(TM) is the standard file system
+	  of SCO UnixWare (and possibly others) and optionally available
+	  for Sunsoft Solaris, HP-UX and many other operating systems.
+	  Currently only readonly access is supported.
+
+	  NOTE: the file system type as used by mount(1), mount(2) and
+	  fstab(5) is 'vxfs' as it describes the file system format, not
+	  the actual driver.
+
+	  To compile this as a module, choose M here: the module will be
+	  called freevxfs.  If unsure, say N.
+
+
+config HPFS_FS
+	tristate "OS/2 HPFS file system support"
+	help
+	  OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
+	  is the file system used for organizing files on OS/2 hard disk
+	  partitions. Say Y if you want to be able to read files from and
+	  write files to an OS/2 HPFS partition on your hard drive. OS/2
+	  floppies however are in regular MSDOS format, so you don't need this
+	  option in order to be able to read them. Read
+	  <file:Documentation/filesystems/hpfs.txt>.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called hpfs.  If unsure, say N.
+
+
+
+config QNX4FS_FS
+	tristate "QNX4 file system support (read only)"
+	help
+	  This is the file system used by the real-time operating systems
+	  QNX 4 and QNX 6 (the latter is also called QNX RTP).
+	  Further information is available at <http://www.qnx.com/>.
+	  Say Y if you intend to mount QNX hard disks or floppies.
+	  Unless you say Y to "QNX4FS read-write support" below, you will
+	  only be able to read these file systems.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called qnx4.
+
+	  If you don't know whether you need it, then you don't need it:
+	  answer N.
+
+config QNX4FS_RW
+	bool "QNX4FS write support (DANGEROUS)"
+	depends on QNX4FS_FS && EXPERIMENTAL && BROKEN
+	help
+	  Say Y if you want to test write support for QNX4 file systems.
+
+	  It's currently broken, so for now:
+	  answer N.
+
+
+
+config SYSV_FS
+	tristate "System V/Xenix/V7/Coherent file system support"
+	help
+	  SCO, Xenix and Coherent are commercial Unix systems for Intel
+	  machines, and Version 7 was used on the DEC PDP-11. Saying Y
+	  here would allow you to read from their floppies and hard disk
+	  partitions.
+
+	  If you have floppies or hard disk partitions like that, it is likely
+	  that they contain binaries from those other Unix systems; in order
+	  to run these binaries, you will want to install linux-abi which is a
+	  a set of kernel modules that lets you run SCO, Xenix, Wyse,
+	  UnixWare, Dell Unix and System V programs under Linux.  It is
+	  available via FTP (user: ftp) from
+	  <ftp://ftp.openlinux.org/pub/people/hch/linux-abi/>).
+	  NOTE: that will work only for binaries from Intel-based systems;
+	  PDP ones will have to wait until somebody ports Linux to -11 ;-)
+
+	  If you only intend to mount files from some other Unix over the
+	  network using NFS, you don't need the System V file system support
+	  (but you need NFS file system support obviously).
+
+	  Note that this option is generally not needed for floppies, since a
+	  good portable way to transport files and directories between unixes
+	  (and even other operating systems) is given by the tar program ("man
+	  tar" or preferably "info tar").  Note also that this option has
+	  nothing whatsoever to do with the option "System V IPC". Read about
+	  the System V file system in
+	  <file:Documentation/filesystems/sysv-fs.txt>.
+	  Saying Y here will enlarge your kernel by about 27 KB.
+
+	  To compile this as a module, choose M here: the module will be called
+	  sysv.
+
+	  If you haven't heard about all of this before, it's safe to say N.
+
+
+
+config UFS_FS
+	tristate "UFS file system support (read only)"
+	help
+	  BSD and derivate versions of Unix (such as SunOS, FreeBSD, NetBSD,
+	  OpenBSD and NeXTstep) use a file system called UFS. Some System V
+	  Unixes can create and mount hard disk partitions and diskettes using
+	  this file system as well. Saying Y here will allow you to read from
+	  these partitions; if you also want to write to them, say Y to the
+	  experimental "UFS file system write support", below. Please read the
+	  file <file:Documentation/filesystems/ufs.txt> for more information.
+
+          The recently released UFS2 variant (used in FreeBSD 5.x) is
+          READ-ONLY supported.
+
+	  If you only intend to mount files from some other Unix over the
+	  network using NFS, you don't need the UFS file system support (but
+	  you need NFS file system support obviously).
+
+	  Note that this option is generally not needed for floppies, since a
+	  good portable way to transport files and directories between unixes
+	  (and even other operating systems) is given by the tar program ("man
+	  tar" or preferably "info tar").
+
+	  When accessing NeXTstep files, you may need to convert them from the
+	  NeXT character set to the Latin1 character set; use the program
+	  recode ("info recode") for this purpose.
+
+	  To compile the UFS file system support as a module, choose M here: the
+	  module will be called ufs.
+
+	  If you haven't heard about all of this before, it's safe to say N.
+
+config UFS_FS_WRITE
+	bool "UFS file system write support (DANGEROUS)"
+	depends on UFS_FS && EXPERIMENTAL
+	help
+	  Say Y here if you want to try writing to UFS partitions. This is
+	  experimental, so you should back up your UFS partitions beforehand.
+
+endmenu
+
+menu "Network File Systems"
+	depends on NET
+
+config NFS_FS
+	tristate "NFS file system support"
+	depends on INET
+	select LOCKD
+	select SUNRPC
+	help
+	  If you are connected to some other (usually local) Unix computer
+	  (using SLIP, PLIP, PPP or Ethernet) and want to mount files residing
+	  on that computer (the NFS server) using the Network File Sharing
+	  protocol, say Y. "Mounting files" means that the client can access
+	  the files with usual UNIX commands as if they were sitting on the
+	  client's hard disk. For this to work, the server must run the
+	  programs nfsd and mountd (but does not need to have NFS file system
+	  support enabled in its kernel). NFS is explained in the Network
+	  Administrator's Guide, available from
+	  <http://www.tldp.org/docs.html#guide>, on its man page: "man
+	  nfs", and in the NFS-HOWTO.
+
+	  A superior but less widely used alternative to NFS is provided by
+	  the Coda file system; see "Coda file system support" below.
+
+	  If you say Y here, you should have said Y to TCP/IP networking also.
+	  This option would enlarge your kernel by about 27 KB.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called nfs.
+
+	  If you are configuring a diskless machine which will mount its root
+	  file system over NFS at boot time, say Y here and to "Kernel
+	  level IP autoconfiguration" above and to "Root file system on NFS"
+	  below. You cannot compile this driver as a module in this case.
+	  There are two packages designed for booting diskless machines over
+	  the net: netboot, available from
+	  <http://ftp1.sourceforge.net/netboot/>, and Etherboot,
+	  available from <http://ftp1.sourceforge.net/etherboot/>.
+
+	  If you don't know what all this is about, say N.
+
+config NFS_V3
+	bool "Provide NFSv3 client support"
+	depends on NFS_FS
+	help
+	  Say Y here if you want your NFS client to be able to speak version
+	  3 of the NFS protocol.
+
+	  If unsure, say Y.
+
+config NFS_V4
+	bool "Provide NFSv4 client support (EXPERIMENTAL)"
+	depends on NFS_FS && EXPERIMENTAL
+	select RPCSEC_GSS_KRB5
+	help
+	  Say Y here if you want your NFS client to be able to speak the newer
+	  version 4 of the NFS protocol.
+
+	  Note: Requires auxiliary userspace daemons which may be found on
+		http://www.citi.umich.edu/projects/nfsv4/
+
+	  If unsure, say N.
+
+config NFS_DIRECTIO
+	bool "Allow direct I/O on NFS files (EXPERIMENTAL)"
+	depends on NFS_FS && EXPERIMENTAL
+	help
+	  This option enables applications to perform uncached I/O on files
+	  in NFS file systems using the O_DIRECT open() flag.  When O_DIRECT
+	  is set for a file, its data is not cached in the system's page
+	  cache.  Data is moved to and from user-level application buffers
+	  directly.  Unlike local disk-based file systems, NFS O_DIRECT has
+	  no alignment restrictions.
+
+	  Unless your program is designed to use O_DIRECT properly, you are
+	  much better off allowing the NFS client to manage data caching for
+	  you.  Misusing O_DIRECT can cause poor server performance or network
+	  storms.  This kernel build option defaults OFF to avoid exposing
+	  system administrators unwittingly to a potentially hazardous
+	  feature.
+
+	  For more details on NFS O_DIRECT, see fs/nfs/direct.c.
+
+	  If unsure, say N.  This reduces the size of the NFS client, and
+	  causes open() to return EINVAL if a file residing in NFS is
+	  opened with the O_DIRECT flag.
+
+config NFSD
+	tristate "NFS server support"
+	depends on INET
+	select LOCKD
+	select SUNRPC
+	select EXPORTFS
+	help
+	  If you want your Linux box to act as an NFS *server*, so that other
+	  computers on your local network which support NFS can access certain
+	  directories on your box transparently, you have two options: you can
+	  use the self-contained user space program nfsd, in which case you
+	  should say N here, or you can say Y and use the kernel based NFS
+	  server. The advantage of the kernel based solution is that it is
+	  faster.
+
+	  In either case, you will need support software; the respective
+	  locations are given in the file <file:Documentation/Changes> in the
+	  NFS section.
+
+	  If you say Y here, you will get support for version 2 of the NFS
+	  protocol (NFSv2). If you also want NFSv3, say Y to the next question
+	  as well.
+
+	  Please read the NFS-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  To compile the NFS server support as a module, choose M here: the
+	  module will be called nfsd.  If unsure, say N.
+
+config NFSD_V3
+	bool "Provide NFSv3 server support"
+	depends on NFSD
+	help
+	  If you would like to include the NFSv3 server as well as the NFSv2
+	  server, say Y here.  If unsure, say Y.
+
+config NFSD_V4
+	bool "Provide NFSv4 server support (EXPERIMENTAL)"
+	depends on NFSD_V3 && EXPERIMENTAL
+	select NFSD_TCP
+	help
+	  If you would like to include the NFSv4 server as well as the NFSv2
+	  and NFSv3 servers, say Y here.  This feature is experimental, and
+	  should only be used if you are interested in helping to test NFSv4.
+	  If unsure, say N.
+
+config NFSD_TCP
+	bool "Provide NFS server over TCP support"
+	depends on NFSD
+	default y
+	help
+	  If you want your NFS server to support TCP connections, say Y here.
+	  TCP connections usually perform better than the default UDP when
+	  the network is lossy or congested.  If unsure, say Y.
+
+config ROOT_NFS
+	bool "Root file system on NFS"
+	depends on NFS_FS=y && IP_PNP
+	help
+	  If you want your Linux box to mount its whole root file system (the
+	  one containing the directory /) from some other computer over the
+	  net via NFS (presumably because your box doesn't have a hard disk),
+	  say Y. Read <file:Documentation/nfsroot.txt> for details. It is
+	  likely that in this case, you also want to say Y to "Kernel level IP
+	  autoconfiguration" so that your box can discover its network address
+	  at boot time.
+
+	  Most people say N here.
+
+config LOCKD
+	tristate
+
+config LOCKD_V4
+	bool
+	depends on NFSD_V3 || NFS_V3
+	default y
+
+config EXPORTFS
+	tristate
+
+config SUNRPC
+	tristate
+
+config SUNRPC_GSS
+	tristate
+
+config RPCSEC_GSS_KRB5
+	tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
+	depends on SUNRPC && EXPERIMENTAL
+	select SUNRPC_GSS
+	select CRYPTO
+	select CRYPTO_MD5
+	select CRYPTO_DES
+	help
+	  Provides for secure RPC calls by means of a gss-api
+	  mechanism based on Kerberos V5. This is required for
+	  NFSv4.
+
+	  Note: Requires an auxiliary userspace daemon which may be found on
+		http://www.citi.umich.edu/projects/nfsv4/
+
+	  If unsure, say N.
+
+config RPCSEC_GSS_SPKM3
+	tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
+	depends on SUNRPC && EXPERIMENTAL
+	select SUNRPC_GSS
+	select CRYPTO
+	select CRYPTO_MD5
+	select CRYPTO_DES
+	help
+	  Provides for secure RPC calls by means of a gss-api
+	  mechanism based on the SPKM3 public-key mechanism.
+
+	  Note: Requires an auxiliary userspace daemon which may be found on
+	  	http://www.citi.umich.edu/projects/nfsv4/
+
+	  If unsure, say N.
+
+config SMB_FS
+	tristate "SMB file system support (to mount Windows shares etc.)"
+	depends on INET
+	select NLS
+	help
+	  SMB (Server Message Block) is the protocol Windows for Workgroups
+	  (WfW), Windows 95/98, Windows NT and OS/2 Lan Manager use to share
+	  files and printers over local networks.  Saying Y here allows you to
+	  mount their file systems (often called "shares" in this context) and
+	  access them just like any other Unix directory.  Currently, this
+	  works only if the Windows machines use TCP/IP as the underlying
+	  transport protocol, and not NetBEUI.  For details, read
+	  <file:Documentation/filesystems/smbfs.txt> and the SMB-HOWTO,
+	  available from <http://www.tldp.org/docs.html#howto>.
+
+	  Note: if you just want your box to act as an SMB *server* and make
+	  files and printing services available to Windows clients (which need
+	  to have a TCP/IP stack), you don't need to say Y here; you can use
+	  the program SAMBA (available from <ftp://ftp.samba.org/pub/samba/>)
+	  for that.
+
+	  General information about how to connect Linux, Windows machines and
+	  Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
+
+	  To compile the SMB support as a module, choose M here: the module will
+	  be called smbfs.  Most people say N, however.
+
+config SMB_NLS_DEFAULT
+	bool "Use a default NLS"
+	depends on SMB_FS
+	help
+	  Enabling this will make smbfs use nls translations by default. You
+	  need to specify the local charset (CONFIG_NLS_DEFAULT) in the nls
+	  settings and you need to give the default nls for the SMB server as
+	  CONFIG_SMB_NLS_REMOTE.
+
+	  The nls settings can be changed at mount time, if your smbmount
+	  supports that, using the codepage and iocharset parameters.
+
+	  smbmount from samba 2.2.0 or later supports this.
+
+config SMB_NLS_REMOTE
+	string "Default Remote NLS Option"
+	depends on SMB_NLS_DEFAULT
+	default "cp437"
+	help
+	  This setting allows you to specify a default value for which
+	  codepage the server uses. If this field is left blank no
+	  translations will be done by default. The local codepage/charset
+	  default to CONFIG_NLS_DEFAULT.
+
+	  The nls settings can be changed at mount time, if your smbmount
+	  supports that, using the codepage and iocharset parameters.
+
+	  smbmount from samba 2.2.0 or later supports this.
+
+config CIFS
+	tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)"
+	depends on INET
+	select NLS
+	help
+	  This is the client VFS module for the Common Internet File System
+	  (CIFS) protocol which is the successor to the Server Message Block 
+	  (SMB) protocol, the native file sharing mechanism for most early
+	  PC operating systems.  The CIFS protocol is fully supported by 
+	  file servers such as Windows 2000 (including Windows 2003, NT 4  
+	  and Windows XP) as well by Samba (which provides excellent CIFS
+	  server support for Linux and many other operating systems). Currently
+	  you must use the smbfs client filesystem to access older SMB servers
+	  such as Windows 9x and OS/2.
+
+	  The intent of the cifs module is to provide an advanced
+	  network file system client for mounting to CIFS compliant servers, 
+	  including support for dfs (hierarchical name space), secure per-user
+	  session establishment, safe distributed caching (oplock), optional
+	  packet signing, Unicode and other internationalization improvements, 
+	  and optional Winbind (nsswitch) integration. You do not need to enable
+	  cifs if running only a (Samba) server. It is possible to enable both
+	  smbfs and cifs (e.g. if you are using CIFS for accessing Windows 2003
+	  and Samba 3 servers, and smbfs for accessing old servers). If you need 
+	  to mount to Samba or Windows 2003 servers from this machine, say Y.
+
+config CIFS_STATS
+        bool "CIFS statistics"
+        depends on CIFS
+        help
+          Enabling this option will cause statistics for each server share
+	  mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+
+config CIFS_XATTR
+        bool "CIFS extended attributes (EXPERIMENTAL)"
+        depends on CIFS
+        help
+          Extended attributes are name:value pairs associated with inodes by
+          the kernel or by users (see the attr(5) manual page, or visit
+          <http://acl.bestbits.at/> for details).  CIFS maps the name of
+          extended attributes beginning with the user namespace prefix
+          to SMB/CIFS EAs. EAs are stored on Windows servers without the
+          user namespace prefix, but their names are seen by Linux cifs clients
+          prefaced by the user namespace prefix. The system namespace
+          (used by some filesystems to store ACLs) is not supported at
+          this time.
+                                                                                                    
+          If unsure, say N.
+
+config CIFS_POSIX
+        bool "CIFS POSIX Extensions (EXPERIMENTAL)"
+        depends on CIFS_XATTR
+        help
+          Enabling this option will cause the cifs client to attempt to
+	  negotiate a newer dialect with servers, such as Samba 3.0.5
+	  or later, that optionally can handle more POSIX like (rather
+	  than Windows like) file behavior.  It also enables
+	  support for POSIX ACLs (getfacl and setfacl) to servers
+	  (such as Samba 3.10 and later) which can negotiate
+	  CIFS POSIX ACL support.  If unsure, say N.
+
+config CIFS_EXPERIMENTAL
+	  bool "CIFS Experimental Features (EXPERIMENTAL)"
+	  depends on CIFS
+	  help
+	    Enables cifs features under testing. These features
+	    are highly experimental.  If unsure, say N.
+
+config NCP_FS
+	tristate "NCP file system support (to mount NetWare volumes)"
+	depends on IPX!=n || INET
+	help
+	  NCP (NetWare Core Protocol) is a protocol that runs over IPX and is
+	  used by Novell NetWare clients to talk to file servers.  It is to
+	  IPX what NFS is to TCP/IP, if that helps.  Saying Y here allows you
+	  to mount NetWare file server volumes and to access them just like
+	  any other Unix directory.  For details, please read the file
+	  <file:Documentation/filesystems/ncpfs.txt> in the kernel source and
+	  the IPX-HOWTO from <http://www.tldp.org/docs.html#howto>.
+
+	  You do not have to say Y here if you want your Linux box to act as a
+	  file *server* for Novell NetWare clients.
+
+	  General information about how to connect Linux, Windows machines and
+	  Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
+
+	  To compile this as a module, choose M here: the module will be called
+	  ncpfs.  Say N unless you are connected to a Novell network.
+
+source "fs/ncpfs/Kconfig"
+
+config CODA_FS
+	tristate "Coda file system support (advanced network fs)"
+	depends on INET
+	help
+	  Coda is an advanced network file system, similar to NFS in that it
+	  enables you to mount file systems of a remote server and access them
+	  with regular Unix commands as if they were sitting on your hard
+	  disk.  Coda has several advantages over NFS: support for
+	  disconnected operation (e.g. for laptops), read/write server
+	  replication, security model for authentication and encryption,
+	  persistent client caches and write back caching.
+
+	  If you say Y here, your Linux box will be able to act as a Coda
+	  *client*.  You will need user level code as well, both for the
+	  client and server.  Servers are currently user level, i.e. they need
+	  no kernel support.  Please read
+	  <file:Documentation/filesystems/coda.txt> and check out the Coda
+	  home page <http://www.coda.cs.cmu.edu/>.
+
+	  To compile the coda client support as a module, choose M here: the
+	  module will be called coda.
+
+config CODA_FS_OLD_API
+	bool "Use 96-bit Coda file identifiers"
+	depends on CODA_FS
+	help
+	  A new kernel-userspace API had to be introduced for Coda v6.0
+	  to support larger 128-bit file identifiers as needed by the
+	  new realms implementation.
+
+	  However this new API is not backward compatible with older
+	  clients. If you really need to run the old Coda userspace
+	  cache manager then say Y.
+	  
+	  For most cases you probably want to say N.
+
+config AFS_FS
+# for fs/nls/Config.in
+	tristate "Andrew File System support (AFS) (Experimental)"
+	depends on INET && EXPERIMENTAL
+	select RXRPC
+	help
+	  If you say Y here, you will get an experimental Andrew File System
+	  driver. It currently only supports unsecured read-only AFS access.
+
+	  See <file:Documentation/filesystems/afs.txt> for more intormation.
+
+	  If unsure, say N.
+
+config RXRPC
+	tristate
+
+endmenu
+
+menu "Partition Types"
+
+source "fs/partitions/Kconfig"
+
+endmenu
+
+source "fs/nls/Kconfig"
+
+endmenu
+
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
new file mode 100644
index 0000000..434c19d
--- /dev/null
+++ b/fs/Kconfig.binfmt
@@ -0,0 +1,134 @@
+config BINFMT_ELF
+	bool "Kernel support for ELF binaries"
+	depends on MMU
+	default y
+	---help---
+	  ELF (Executable and Linkable Format) is a format for libraries and
+	  executables used across different architectures and operating
+	  systems. Saying Y here will enable your kernel to run ELF binaries
+	  and enlarge it by about 13 KB. ELF support under Linux has now all
+	  but replaced the traditional Linux a.out formats (QMAGIC and ZMAGIC)
+	  because it is portable (this does *not* mean that you will be able
+	  to run executables from different architectures or operating systems
+	  however) and makes building run-time libraries very easy. Many new
+	  executables are distributed solely in ELF format. You definitely
+	  want to say Y here.
+
+	  Information about ELF is contained in the ELF HOWTO available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  If you find that after upgrading from Linux kernel 1.2 and saying Y
+	  here, you still can't run any ELF binaries (they just crash), then
+	  you'll have to install the newest ELF runtime libraries, including
+	  ld.so (check the file <file:Documentation/Changes> for location and
+	  latest version).
+
+config BINFMT_ELF_FDPIC
+	bool "Kernel support for FDPIC ELF binaries"
+	default y
+	depends on FRV
+	help
+	  ELF FDPIC binaries are based on ELF, but allow the individual load
+	  segments of a binary to be located in memory independently of each
+	  other. This makes this format ideal for use in environments where no
+	  MMU is available as it still permits text segments to be shared,
+	  even if data segments are not.
+
+	  It is also possible to run FDPIC ELF binaries on MMU linux also.
+
+config BINFMT_FLAT
+	tristate "Kernel support for flat binaries"
+	depends on !MMU || SUPERH
+	help
+	  Support uClinux FLAT format binaries.
+
+config BINFMT_ZFLAT
+	bool "Enable ZFLAT support"
+	depends on BINFMT_FLAT
+	select ZLIB_INFLATE
+	help
+	  Support FLAT format compressed binaries
+
+config BINFMT_SHARED_FLAT
+	bool "Enable shared FLAT support"
+	depends on BINFMT_FLAT
+	help
+	  Support FLAT shared libraries
+
+config BINFMT_AOUT
+	tristate "Kernel support for a.out and ECOFF binaries"
+	depends on (X86 && !X86_64) || ALPHA || ARM || M68K || SPARC32
+	---help---
+	  A.out (Assembler.OUTput) is a set of formats for libraries and
+	  executables used in the earliest versions of UNIX.  Linux used
+	  the a.out formats QMAGIC and ZMAGIC until they were replaced
+	  with the ELF format.
+
+	  The conversion to ELF started in 1995.  This option is primarily
+	  provided for historical interest and for the benefit of those
+	  who need to run binaries from that era.
+
+	  Most people should answer N here.  If you think you may have
+	  occasional use for this format, enable module support above
+	  and answer M here to compile this support as a module called
+	  binfmt_aout.
+
+	  If any crucial components of your system (such as /sbin/init
+	  or /lib/ld.so) are still in a.out format, you will have to
+	  say Y here.
+
+config OSF4_COMPAT
+	bool "OSF/1 v4 readv/writev compatibility"
+	depends on ALPHA && BINFMT_AOUT
+	help
+	  Say Y if you are using OSF/1 binaries (like Netscape and Acrobat)
+	  with v4 shared libraries freely available from Compaq. If you're
+	  going to use shared libraries from Tru64 version 5.0 or later, say N.
+
+config BINFMT_EM86
+	tristate "Kernel support for Linux/Intel ELF binaries"
+	depends on ALPHA
+	---help---
+	  Say Y here if you want to be able to execute Linux/Intel ELF
+	  binaries just like native Alpha binaries on your Alpha machine. For
+	  this to work, you need to have the emulator /usr/bin/em86 in place.
+
+	  You can get the same functionality by saying N here and saying Y to
+	  "Kernel support for MISC binaries".
+
+	  You may answer M to compile the emulation support as a module and
+	  later load the module when you want to use a Linux/Intel binary. The
+	  module will be called binfmt_em86. If unsure, say Y.
+
+config BINFMT_SOM
+	tristate "Kernel support for SOM binaries"
+	depends on PARISC && HPUX
+	help
+	  SOM is a binary executable format inherited from HP/UX.  Say
+	  Y here to be able to load and execute SOM binaries directly.
+
+config BINFMT_MISC
+	tristate "Kernel support for MISC binaries"
+	---help---
+	  If you say Y here, it will be possible to plug wrapper-driven binary
+	  formats into the kernel. You will like this especially when you use
+	  programs that need an interpreter to run like Java, Python, .NET or
+	  Emacs-Lisp. It's also useful if you often run DOS executables under
+	  the Linux DOS emulator DOSEMU (read the DOSEMU-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>). Once you have
+	  registered such a binary class with the kernel, you can start one of
+	  those programs simply by typing in its name at a shell prompt; Linux
+	  will automatically feed it to the correct interpreter.
+
+	  You can do other nice things, too. Read the file
+	  <file:Documentation/binfmt_misc.txt> to learn how to use this
+	  feature, <file:Documentation/java.txt> for information about how
+	  to include Java support. and <file:Documentation/mono.txt> for
+          information about how to include Mono-based .NET support.
+
+          To use binfmt_misc, you will need to mount it:
+		mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
+
+	  You may say M here for module support and later load the module when
+	  you have use for it; the module is called binfmt_misc. If you
+	  don't know what to answer at this point, say Y.
diff --git a/fs/Makefile b/fs/Makefile
new file mode 100644
index 0000000..443f2bc
--- /dev/null
+++ b/fs/Makefile
@@ -0,0 +1,97 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+# 
+
+obj-y :=	open.o read_write.o file_table.o buffer.o  bio.o super.o \
+		block_dev.o char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
+		ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
+		attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
+		seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
+
+obj-$(CONFIG_EPOLL)		+= eventpoll.o
+obj-$(CONFIG_COMPAT)		+= compat.o
+
+nfsd-$(CONFIG_NFSD)		:= nfsctl.o
+obj-y				+= $(nfsd-y) $(nfsd-m)
+
+obj-$(CONFIG_BINFMT_AOUT)	+= binfmt_aout.o
+obj-$(CONFIG_BINFMT_EM86)	+= binfmt_em86.o
+obj-$(CONFIG_BINFMT_MISC)	+= binfmt_misc.o
+
+# binfmt_script is always there
+obj-y				+= binfmt_script.o
+
+obj-$(CONFIG_BINFMT_ELF)	+= binfmt_elf.o
+obj-$(CONFIG_BINFMT_ELF_FDPIC)	+= binfmt_elf_fdpic.o
+obj-$(CONFIG_BINFMT_SOM)	+= binfmt_som.o
+obj-$(CONFIG_BINFMT_FLAT)	+= binfmt_flat.o
+
+obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
+obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o xattr_acl.o
+
+obj-$(CONFIG_QUOTA)		+= dquot.o
+obj-$(CONFIG_QFMT_V1)		+= quota_v1.o
+obj-$(CONFIG_QFMT_V2)		+= quota_v2.o
+obj-$(CONFIG_QUOTACTL)		+= quota.o
+
+obj-$(CONFIG_DNOTIFY)		+= dnotify.o
+
+obj-$(CONFIG_PROC_FS)		+= proc/
+obj-y				+= partitions/
+obj-$(CONFIG_SYSFS)		+= sysfs/
+obj-y				+= devpts/
+
+obj-$(CONFIG_PROFILING)		+= dcookies.o
+ 
+# Do not add any filesystems before this line
+obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
+obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
+obj-$(CONFIG_JBD)		+= jbd/
+obj-$(CONFIG_EXT2_FS)		+= ext2/
+obj-$(CONFIG_CRAMFS)		+= cramfs/
+obj-$(CONFIG_RAMFS)		+= ramfs/
+obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
+obj-$(CONFIG_CODA_FS)		+= coda/
+obj-$(CONFIG_MINIX_FS)		+= minix/
+obj-$(CONFIG_FAT_FS)		+= fat/
+obj-$(CONFIG_MSDOS_FS)		+= msdos/
+obj-$(CONFIG_VFAT_FS)		+= vfat/
+obj-$(CONFIG_BFS_FS)		+= bfs/
+obj-$(CONFIG_ISO9660_FS)	+= isofs/
+obj-$(CONFIG_DEVFS_FS)		+= devfs/
+obj-$(CONFIG_HFSPLUS_FS)	+= hfsplus/ # Before hfs to find wrapped HFS+
+obj-$(CONFIG_HFS_FS)		+= hfs/
+obj-$(CONFIG_VXFS_FS)		+= freevxfs/
+obj-$(CONFIG_NFS_FS)		+= nfs/
+obj-$(CONFIG_EXPORTFS)		+= exportfs/
+obj-$(CONFIG_NFSD)		+= nfsd/
+obj-$(CONFIG_LOCKD)		+= lockd/
+obj-$(CONFIG_NLS)		+= nls/
+obj-$(CONFIG_SYSV_FS)		+= sysv/
+obj-$(CONFIG_SMB_FS)		+= smbfs/
+obj-$(CONFIG_CIFS)		+= cifs/
+obj-$(CONFIG_NCP_FS)		+= ncpfs/
+obj-$(CONFIG_HPFS_FS)		+= hpfs/
+obj-$(CONFIG_NTFS_FS)		+= ntfs/
+obj-$(CONFIG_UFS_FS)		+= ufs/
+obj-$(CONFIG_EFS_FS)		+= efs/
+obj-$(CONFIG_JFFS_FS)		+= jffs/
+obj-$(CONFIG_JFFS2_FS)		+= jffs2/
+obj-$(CONFIG_AFFS_FS)		+= affs/
+obj-$(CONFIG_ROMFS_FS)		+= romfs/
+obj-$(CONFIG_QNX4FS_FS)		+= qnx4/
+obj-$(CONFIG_AUTOFS_FS)		+= autofs/
+obj-$(CONFIG_AUTOFS4_FS)	+= autofs4/
+obj-$(CONFIG_ADFS_FS)		+= adfs/
+obj-$(CONFIG_UDF_FS)		+= udf/
+obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
+obj-$(CONFIG_JFS_FS)		+= jfs/
+obj-$(CONFIG_XFS_FS)		+= xfs/
+obj-$(CONFIG_AFS_FS)		+= afs/
+obj-$(CONFIG_BEFS_FS)		+= befs/
+obj-$(CONFIG_HOSTFS)		+= hostfs/
+obj-$(CONFIG_HPPFS)		+= hppfs/
+obj-$(CONFIG_DEBUG_FS)		+= debugfs/
diff --git a/fs/adfs/Makefile b/fs/adfs/Makefile
new file mode 100644
index 0000000..9b2d71a
--- /dev/null
+++ b/fs/adfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux adfs filesystem routines.
+#
+
+obj-$(CONFIG_ADFS_FS) += adfs.o
+
+adfs-objs := dir.o dir_f.o dir_fplus.o file.o inode.o map.o super.o
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
new file mode 100644
index 0000000..63f5df9
--- /dev/null
+++ b/fs/adfs/adfs.h
@@ -0,0 +1,127 @@
+/* Internal data structures for ADFS */
+
+#define ADFS_FREE_FRAG		 0
+#define ADFS_BAD_FRAG		 1
+#define ADFS_ROOT_FRAG		 2
+
+#define ADFS_NDA_OWNER_READ	(1 << 0)
+#define ADFS_NDA_OWNER_WRITE	(1 << 1)
+#define ADFS_NDA_LOCKED		(1 << 2)
+#define ADFS_NDA_DIRECTORY	(1 << 3)
+#define ADFS_NDA_EXECUTE	(1 << 4)
+#define ADFS_NDA_PUBLIC_READ	(1 << 5)
+#define ADFS_NDA_PUBLIC_WRITE	(1 << 6)
+
+#include <linux/version.h>
+#include "dir_f.h"
+
+struct buffer_head;
+
+/*
+ * Directory handling
+ */
+struct adfs_dir {
+	struct super_block	*sb;
+
+	int			nr_buffers;
+	struct buffer_head	*bh[4];
+	unsigned int		pos;
+	unsigned int		parent_id;
+
+	struct adfs_dirheader	dirhead;
+	union  adfs_dirtail	dirtail;
+};
+
+/*
+ * This is the overall maximum name length
+ */
+#define ADFS_MAX_NAME_LEN	256
+struct object_info {
+	__u32		parent_id;		/* parent object id	*/
+	__u32		file_id;		/* object id		*/
+	__u32		loadaddr;		/* load address		*/
+	__u32		execaddr;		/* execution address	*/
+	__u32		size;			/* size			*/
+	__u8		attr;			/* RISC OS attributes	*/
+	unsigned char	name_len;		/* name length		*/
+	char		name[ADFS_MAX_NAME_LEN];/* file name		*/
+};
+
+struct adfs_dir_ops {
+	int	(*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
+	int	(*setpos)(struct adfs_dir *dir, unsigned int fpos);
+	int	(*getnext)(struct adfs_dir *dir, struct object_info *obj);
+	int	(*update)(struct adfs_dir *dir, struct object_info *obj);
+	int	(*create)(struct adfs_dir *dir, struct object_info *obj);
+	int	(*remove)(struct adfs_dir *dir, struct object_info *obj);
+	void	(*free)(struct adfs_dir *dir);
+};
+
+struct adfs_discmap {
+	struct buffer_head	*dm_bh;
+	__u32			dm_startblk;
+	unsigned int		dm_startbit;
+	unsigned int		dm_endbit;
+};
+
+/* Inode stuff */
+struct inode *adfs_iget(struct super_block *sb, struct object_info *obj);
+int adfs_write_inode(struct inode *inode,int unused);
+int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
+
+/* map.c */
+extern int adfs_map_lookup(struct super_block *sb, unsigned int frag_id, unsigned int offset);
+extern unsigned int adfs_map_free(struct super_block *sb);
+
+/* Misc */
+void __adfs_error(struct super_block *sb, const char *function,
+		  const char *fmt, ...);
+#define adfs_error(sb, fmt...) __adfs_error(sb, __FUNCTION__, fmt)
+
+/* super.c */
+
+/*
+ * Inodes and file operations
+ */
+
+/* dir_*.c */
+extern struct inode_operations adfs_dir_inode_operations;
+extern struct file_operations adfs_dir_operations;
+extern struct dentry_operations adfs_dentry_operations;
+extern struct adfs_dir_ops adfs_f_dir_ops;
+extern struct adfs_dir_ops adfs_fplus_dir_ops;
+
+extern int adfs_dir_update(struct super_block *sb, struct object_info *obj);
+
+/* file.c */
+extern struct inode_operations adfs_file_inode_operations;
+extern struct file_operations adfs_file_operations;
+
+extern inline __u32 signed_asl(__u32 val, signed int shift)
+{
+	if (shift >= 0)
+		val <<= shift;
+	else
+		val >>= -shift;
+	return val;
+}
+
+/*
+ * Calculate the address of a block in an object given the block offset
+ * and the object identity.
+ *
+ * The root directory ID should always be looked up in the map [3.4]
+ */
+extern inline int
+__adfs_block_map(struct super_block *sb, unsigned int object_id,
+		 unsigned int block)
+{
+	if (object_id & 255) {
+		unsigned int off;
+
+		off = (object_id & 255) - 1;
+		block += off << ADFS_SB(sb)->s_log2sharesize;
+	}
+
+	return adfs_map_lookup(sb, object_id >> 8, block);
+}
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
new file mode 100644
index 0000000..0b4c3a0
--- /dev/null
+++ b/fs/adfs/dir.c
@@ -0,0 +1,302 @@
+/*
+ *  linux/fs/adfs/dir.c
+ *
+ *  Copyright (C) 1999-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Common directory handling for ADFS
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>		/* for file_fsync() */
+
+#include "adfs.h"
+
+/*
+ * For future.  This should probably be per-directory.
+ */
+static DEFINE_RWLOCK(adfs_dir_lock);
+
+static int
+adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
+	struct object_info obj;
+	struct adfs_dir dir;
+	int ret = 0;
+
+	lock_kernel();	
+
+	if (filp->f_pos >> 32)
+		goto out;
+
+	ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+	if (ret)
+		goto out;
+
+	switch ((unsigned long)filp->f_pos) {
+	case 0:
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+			goto free_out;
+		filp->f_pos += 1;
+
+	case 1:
+		if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0)
+			goto free_out;
+		filp->f_pos += 1;
+
+	default:
+		break;
+	}
+
+	read_lock(&adfs_dir_lock);
+
+	ret = ops->setpos(&dir, filp->f_pos - 2);
+	if (ret)
+		goto unlock_out;
+	while (ops->getnext(&dir, &obj) == 0) {
+		if (filldir(dirent, obj.name, obj.name_len,
+			    filp->f_pos, obj.file_id, DT_UNKNOWN) < 0)
+			goto unlock_out;
+		filp->f_pos += 1;
+	}
+
+unlock_out:
+	read_unlock(&adfs_dir_lock);
+
+free_out:
+	ops->free(&dir);
+
+out:
+	unlock_kernel();
+	return ret;
+}
+
+int
+adfs_dir_update(struct super_block *sb, struct object_info *obj)
+{
+	int ret = -EINVAL;
+#ifdef CONFIG_ADFS_FS_RW
+	struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
+	struct adfs_dir dir;
+
+	printk(KERN_INFO "adfs_dir_update: object %06X in dir %06X\n",
+		 obj->file_id, obj->parent_id);
+
+	if (!ops->update) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = ops->read(sb, obj->parent_id, 0, &dir);
+	if (ret)
+		goto out;
+
+	write_lock(&adfs_dir_lock);
+	ret = ops->update(&dir, obj);
+	write_unlock(&adfs_dir_lock);
+
+	ops->free(&dir);
+out:
+#endif
+	return ret;
+}
+
+static int
+adfs_match(struct qstr *name, struct object_info *obj)
+{
+	int i;
+
+	if (name->len != obj->name_len)
+		return 0;
+
+	for (i = 0; i < name->len; i++) {
+		char c1, c2;
+
+		c1 = name->name[i];
+		c2 = obj->name[i];
+
+		if (c1 >= 'A' && c1 <= 'Z')
+			c1 += 'a' - 'A';
+		if (c2 >= 'A' && c2 <= 'Z')
+			c2 += 'a' - 'A';
+
+		if (c1 != c2)
+			return 0;
+	}
+	return 1;
+}
+
+static int
+adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_info *obj)
+{
+	struct super_block *sb = inode->i_sb;
+	struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
+	struct adfs_dir dir;
+	int ret;
+
+	ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+	if (ret)
+		goto out;
+
+	if (ADFS_I(inode)->parent_id != dir.parent_id) {
+		adfs_error(sb, "parent directory changed under me! (%lx but got %lx)\n",
+			   ADFS_I(inode)->parent_id, dir.parent_id);
+		ret = -EIO;
+		goto free_out;
+	}
+
+	obj->parent_id = inode->i_ino;
+
+	/*
+	 * '.' is handled by reserved_lookup() in fs/namei.c
+	 */
+	if (name->len == 2 && name->name[0] == '.' && name->name[1] == '.') {
+		/*
+		 * Currently unable to fill in the rest of 'obj',
+		 * but this is better than nothing.  We need to
+		 * ascend one level to find it's parent.
+		 */
+		obj->name_len = 0;
+		obj->file_id  = obj->parent_id;
+		goto free_out;
+	}
+
+	read_lock(&adfs_dir_lock);
+
+	ret = ops->setpos(&dir, 0);
+	if (ret)
+		goto unlock_out;
+
+	ret = -ENOENT;
+	while (ops->getnext(&dir, obj) == 0) {
+		if (adfs_match(name, obj)) {
+			ret = 0;
+			break;
+		}
+	}
+
+unlock_out:
+	read_unlock(&adfs_dir_lock);
+
+free_out:
+	ops->free(&dir);
+out:
+	return ret;
+}
+
+struct file_operations adfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= adfs_readdir,
+	.fsync		= file_fsync,
+};
+
+static int
+adfs_hash(struct dentry *parent, struct qstr *qstr)
+{
+	const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
+	const unsigned char *name;
+	unsigned long hash;
+	int i;
+
+	if (qstr->len < name_len)
+		return 0;
+
+	/*
+	 * Truncate the name in place, avoids
+	 * having to define a compare function.
+	 */
+	qstr->len = i = name_len;
+	name = qstr->name;
+	hash = init_name_hash();
+	while (i--) {
+		char c;
+
+		c = *name++;
+		if (c >= 'A' && c <= 'Z')
+			c += 'a' - 'A';
+
+		hash = partial_name_hash(c, hash);
+	}
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+/*
+ * Compare two names, taking note of the name length
+ * requirements of the underlying filesystem.
+ */
+static int
+adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
+{
+	int i;
+
+	if (entry->len != name->len)
+		return 1;
+
+	for (i = 0; i < name->len; i++) {
+		char a, b;
+
+		a = entry->name[i];
+		b = name->name[i];
+
+		if (a >= 'A' && a <= 'Z')
+			a += 'a' - 'A';
+		if (b >= 'A' && b <= 'Z')
+			b += 'a' - 'A';
+
+		if (a != b)
+			return 1;
+	}
+	return 0;
+}
+
+struct dentry_operations adfs_dentry_operations = {
+	.d_hash		= adfs_hash,
+	.d_compare	= adfs_compare,
+};
+
+static struct dentry *
+adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	struct object_info obj;
+	int error;
+
+	dentry->d_op = &adfs_dentry_operations;	
+	lock_kernel();
+	error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
+	if (error == 0) {
+		error = -EACCES;
+		/*
+		 * This only returns NULL if get_empty_inode
+		 * fails.
+		 */
+		inode = adfs_iget(dir->i_sb, &obj);
+		if (inode)
+			error = 0;
+	}
+	unlock_kernel();
+	d_add(dentry, inode);
+	return ERR_PTR(error);
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations adfs_dir_inode_operations = {
+	.lookup		= adfs_lookup,
+	.setattr	= adfs_notify_change,
+};
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
new file mode 100644
index 0000000..bbfc862
--- /dev/null
+++ b/fs/adfs/dir_f.c
@@ -0,0 +1,460 @@
+/*
+ *  linux/fs/adfs/dir_f.c
+ *
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  E and F format directory handling
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/spinlock.h>
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+
+#include "adfs.h"
+#include "dir_f.h"
+
+static void adfs_f_free(struct adfs_dir *dir);
+
+/*
+ * Read an (unaligned) value of length 1..4 bytes
+ */
+static inline unsigned int adfs_readval(unsigned char *p, int len)
+{
+	unsigned int val = 0;
+
+	switch (len) {
+	case 4:		val |= p[3] << 24;
+	case 3:		val |= p[2] << 16;
+	case 2:		val |= p[1] << 8;
+	default:	val |= p[0];
+	}
+	return val;
+}
+
+static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
+{
+	switch (len) {
+	case 4:		p[3] = val >> 24;
+	case 3:		p[2] = val >> 16;
+	case 2:		p[1] = val >> 8;
+	default:	p[0] = val;
+	}
+}
+
+static inline int adfs_readname(char *buf, char *ptr, int maxlen)
+{
+	char *old_buf = buf;
+
+	while (*ptr >= ' ' && maxlen--) {
+		if (*ptr == '/')
+			*buf++ = '.';
+		else
+			*buf++ = *ptr;
+		ptr++;
+	}
+	*buf = '\0';
+
+	return buf - old_buf;
+}
+
+#define ror13(v) ((v >> 13) | (v << 19))
+
+#define dir_u8(idx)				\
+	({ int _buf = idx >> blocksize_bits;	\
+	   int _off = idx - (_buf << blocksize_bits);\
+	  *(u8 *)(bh[_buf]->b_data + _off);	\
+	})
+
+#define dir_u32(idx)				\
+	({ int _buf = idx >> blocksize_bits;	\
+	   int _off = idx - (_buf << blocksize_bits);\
+	  *(__le32 *)(bh[_buf]->b_data + _off);	\
+	})
+
+#define bufoff(_bh,_idx)			\
+	({ int _buf = _idx >> blocksize_bits;	\
+	   int _off = _idx - (_buf << blocksize_bits);\
+	  (u8 *)(_bh[_buf]->b_data + _off);	\
+	})
+
+/*
+ * There are some algorithms that are nice in
+ * assembler, but a bitch in C...  This is one
+ * of them.
+ */
+static u8
+adfs_dir_checkbyte(const struct adfs_dir *dir)
+{
+	struct buffer_head * const *bh = dir->bh;
+	const int blocksize_bits = dir->sb->s_blocksize_bits;
+	union { __le32 *ptr32; u8 *ptr8; } ptr, end;
+	u32 dircheck = 0;
+	int last = 5 - 26;
+	int i = 0;
+
+	/*
+	 * Accumulate each word up to the last whole
+	 * word of the last directory entry.  This
+	 * can spread across several buffer heads.
+	 */
+	do {
+		last += 26;
+		do {
+			dircheck = le32_to_cpu(dir_u32(i)) ^ ror13(dircheck);
+
+			i += sizeof(u32);
+		} while (i < (last & ~3));
+	} while (dir_u8(last) != 0);
+
+	/*
+	 * Accumulate the last few bytes.  These
+	 * bytes will be within the same bh.
+	 */
+	if (i != last) {
+		ptr.ptr8 = bufoff(bh, i);
+		end.ptr8 = ptr.ptr8 + last - i;
+
+		do
+			dircheck = *ptr.ptr8++ ^ ror13(dircheck);
+		while (ptr.ptr8 < end.ptr8);
+	}
+
+	/*
+	 * The directory tail is in the final bh
+	 * Note that contary to the RISC OS PRMs,
+	 * the first few bytes are NOT included
+	 * in the check.  All bytes are in the
+	 * same bh.
+	 */
+	ptr.ptr8 = bufoff(bh, 2008);
+	end.ptr8 = ptr.ptr8 + 36;
+
+	do {
+		__le32 v = *ptr.ptr32++;
+		dircheck = le32_to_cpu(v) ^ ror13(dircheck);
+	} while (ptr.ptr32 < end.ptr32);
+
+	return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff;
+}
+
+/*
+ * Read and check that a directory is valid
+ */
+static int
+adfs_dir_read(struct super_block *sb, unsigned long object_id,
+	      unsigned int size, struct adfs_dir *dir)
+{
+	const unsigned int blocksize_bits = sb->s_blocksize_bits;
+	int blk = 0;
+
+	/*
+	 * Directories which are not a multiple of 2048 bytes
+	 * are considered bad v2 [3.6]
+	 */
+	if (size & 2047)
+		goto bad_dir;
+
+	size >>= blocksize_bits;
+
+	dir->nr_buffers = 0;
+	dir->sb = sb;
+
+	for (blk = 0; blk < size; blk++) {
+		int phys;
+
+		phys = __adfs_block_map(sb, object_id, blk);
+		if (!phys) {
+			adfs_error(sb, "dir object %lX has a hole at offset %d",
+				   object_id, blk);
+			goto release_buffers;
+		}
+
+		dir->bh[blk] = sb_bread(sb, phys);
+		if (!dir->bh[blk])
+			goto release_buffers;
+	}
+
+	memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
+	memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
+
+	if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
+	    memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
+		goto bad_dir;
+
+	if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
+	    memcmp(&dir->dirhead.startname, "Hugo", 4))
+		goto bad_dir;
+
+	if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
+		goto bad_dir;
+
+	dir->nr_buffers = blk;
+
+	return 0;
+
+bad_dir:
+	adfs_error(sb, "corrupted directory fragment %lX",
+		   object_id);
+release_buffers:
+	for (blk -= 1; blk >= 0; blk -= 1)
+		brelse(dir->bh[blk]);
+
+	dir->sb = NULL;
+
+	return -EIO;
+}
+
+/*
+ * convert a disk-based directory entry to a Linux ADFS directory entry
+ */
+static inline void
+adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
+{
+	obj->name_len =	adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN);
+	obj->file_id  = adfs_readval(de->dirinddiscadd, 3);
+	obj->loadaddr = adfs_readval(de->dirload, 4);
+	obj->execaddr = adfs_readval(de->direxec, 4);
+	obj->size     = adfs_readval(de->dirlen,  4);
+	obj->attr     = de->newdiratts;
+}
+
+/*
+ * convert a Linux ADFS directory entry to a disk-based directory entry
+ */
+static inline void
+adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj)
+{
+	adfs_writeval(de->dirinddiscadd, 3, obj->file_id);
+	adfs_writeval(de->dirload, 4, obj->loadaddr);
+	adfs_writeval(de->direxec, 4, obj->execaddr);
+	adfs_writeval(de->dirlen,  4, obj->size);
+	de->newdiratts = obj->attr;
+}
+
+/*
+ * get a directory entry.  Note that the caller is responsible
+ * for holding the relevant locks.
+ */
+static int
+__adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
+{
+	struct super_block *sb = dir->sb;
+	struct adfs_direntry de;
+	int thissize, buffer, offset;
+
+	buffer = pos >> sb->s_blocksize_bits;
+
+	if (buffer > dir->nr_buffers)
+		return -EINVAL;
+
+	offset = pos & (sb->s_blocksize - 1);
+	thissize = sb->s_blocksize - offset;
+	if (thissize > 26)
+		thissize = 26;
+
+	memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
+	if (thissize != 26)
+		memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
+		       26 - thissize);
+
+	if (!de.dirobname[0])
+		return -ENOENT;
+
+	adfs_dir2obj(obj, &de);
+
+	return 0;
+}
+
+static int
+__adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj)
+{
+	struct super_block *sb = dir->sb;
+	struct adfs_direntry de;
+	int thissize, buffer, offset;
+
+	buffer = pos >> sb->s_blocksize_bits;
+
+	if (buffer > dir->nr_buffers)
+		return -EINVAL;
+
+	offset = pos & (sb->s_blocksize - 1);
+	thissize = sb->s_blocksize - offset;
+	if (thissize > 26)
+		thissize = 26;
+
+	/*
+	 * Get the entry in total
+	 */
+	memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
+	if (thissize != 26)
+		memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
+		       26 - thissize);
+
+	/*
+	 * update it
+	 */
+	adfs_obj2dir(&de, obj);
+
+	/*
+	 * Put the new entry back
+	 */
+	memcpy(dir->bh[buffer]->b_data + offset, &de, thissize);
+	if (thissize != 26)
+		memcpy(dir->bh[buffer + 1]->b_data, ((char *)&de) + thissize,
+		       26 - thissize);
+
+	return 0;
+}
+
+/*
+ * the caller is responsible for holding the necessary
+ * locks.
+ */
+static int
+adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id)
+{
+	int pos, ret;
+
+	ret = -ENOENT;
+
+	for (pos = 5; pos < ADFS_NUM_DIR_ENTRIES * 26 + 5; pos += 26) {
+		struct object_info obj;
+
+		if (!__adfs_dir_get(dir, pos, &obj))
+			break;
+
+		if (obj.file_id == object_id) {
+			ret = pos;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int
+adfs_f_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
+{
+	int ret;
+
+	if (sz != ADFS_NEWDIR_SIZE)
+		return -EIO;
+
+	ret = adfs_dir_read(sb, id, sz, dir);
+	if (ret)
+		adfs_error(sb, "unable to read directory");
+	else
+		dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3);
+
+	return ret;
+}
+
+static int
+adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos)
+{
+	if (fpos >= ADFS_NUM_DIR_ENTRIES)
+		return -ENOENT;
+
+	dir->pos = 5 + fpos * 26;
+	return 0;
+}
+
+static int
+adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj)
+{
+	unsigned int ret;
+
+	ret = __adfs_dir_get(dir, dir->pos, obj);
+	if (ret == 0)
+		dir->pos += 26;
+
+	return ret;
+}
+
+static int
+adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
+{
+	struct super_block *sb = dir->sb;
+	int ret, i;
+
+	ret = adfs_dir_find_entry(dir, obj->file_id);
+	if (ret < 0) {
+		adfs_error(dir->sb, "unable to locate entry to update");
+		goto out;
+	}
+
+	__adfs_dir_put(dir, ret, obj);
+ 
+	/*
+	 * Increment directory sequence number
+	 */
+	dir->bh[0]->b_data[0] += 1;
+	dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 6] += 1;
+
+	ret = adfs_dir_checkbyte(dir);
+	/*
+	 * Update directory check byte
+	 */
+	dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 1] = ret;
+
+#if 1
+	{
+	const unsigned int blocksize_bits = sb->s_blocksize_bits;
+
+	memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
+	memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
+
+	if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
+	    memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
+		goto bad_dir;
+
+	if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
+	    memcmp(&dir->dirhead.startname, "Hugo", 4))
+		goto bad_dir;
+
+	if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
+		goto bad_dir;
+	}
+#endif
+	for (i = dir->nr_buffers - 1; i >= 0; i--)
+		mark_buffer_dirty(dir->bh[i]);
+
+	ret = 0;
+out:
+	return ret;
+#if 1
+bad_dir:
+	adfs_error(dir->sb, "whoops!  I broke a directory!");
+	return -EIO;
+#endif
+}
+
+static void
+adfs_f_free(struct adfs_dir *dir)
+{
+	int i;
+
+	for (i = dir->nr_buffers - 1; i >= 0; i--) {
+		brelse(dir->bh[i]);
+		dir->bh[i] = NULL;
+	}
+
+	dir->nr_buffers = 0;
+	dir->sb = NULL;
+}
+
+struct adfs_dir_ops adfs_f_dir_ops = {
+	.read		= adfs_f_read,
+	.setpos		= adfs_f_setpos,
+	.getnext	= adfs_f_getnext,
+	.update		= adfs_f_update,
+	.free		= adfs_f_free
+};
diff --git a/fs/adfs/dir_f.h b/fs/adfs/dir_f.h
new file mode 100644
index 0000000..e471340
--- /dev/null
+++ b/fs/adfs/dir_f.h
@@ -0,0 +1,65 @@
+/*
+ *  linux/fs/adfs/dir_f.h
+ *
+ *  Copyright (C) 1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Structures of directories on the F format disk
+ */
+#ifndef ADFS_DIR_F_H
+#define ADFS_DIR_F_H
+
+/*
+ * Directory header
+ */
+struct adfs_dirheader {
+	unsigned char startmasseq;
+	unsigned char startname[4];
+};
+
+#define ADFS_NEWDIR_SIZE	2048
+#define ADFS_NUM_DIR_ENTRIES	77
+
+/*
+ * Directory entries
+ */
+struct adfs_direntry {
+#define ADFS_F_NAME_LEN 10
+	char dirobname[ADFS_F_NAME_LEN];
+	__u8 dirload[4];
+	__u8 direxec[4];
+	__u8 dirlen[4];
+	__u8 dirinddiscadd[3];
+	__u8 newdiratts;
+};
+
+/*
+ * Directory tail
+ */
+union adfs_dirtail {
+	struct {
+		unsigned char dirlastmask;
+		char dirname[10];
+		unsigned char dirparent[3];
+		char dirtitle[19];
+		unsigned char reserved[14];
+		unsigned char endmasseq;
+		unsigned char endname[4];
+		unsigned char dircheckbyte;
+	} old;
+	struct {
+		unsigned char dirlastmask;
+		unsigned char reserved[2];
+		unsigned char dirparent[3];
+		char dirtitle[19];
+		char dirname[10];
+		unsigned char endmasseq;
+		unsigned char endname[4];
+		unsigned char dircheckbyte;
+	} new;
+};
+
+#endif
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
new file mode 100644
index 0000000..1ec644e
--- /dev/null
+++ b/fs/adfs/dir_fplus.c
@@ -0,0 +1,179 @@
+/*
+ *  linux/fs/adfs/dir_fplus.c
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/spinlock.h>
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+
+#include "adfs.h"
+#include "dir_fplus.h"
+
+static int
+adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
+{
+	struct adfs_bigdirheader *h;
+	struct adfs_bigdirtail *t;
+	unsigned long block;
+	unsigned int blk, size;
+	int i, ret = -EIO;
+
+	dir->nr_buffers = 0;
+
+	block = __adfs_block_map(sb, id, 0);
+	if (!block) {
+		adfs_error(sb, "dir object %X has a hole at offset 0", id);
+		goto out;
+	}
+
+	dir->bh[0] = sb_bread(sb, block);
+	if (!dir->bh[0])
+		goto out;
+	dir->nr_buffers += 1;
+
+	h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+	size = le32_to_cpu(h->bigdirsize);
+	if (size != sz) {
+		printk(KERN_WARNING "adfs: adfs_fplus_read: directory header size\n"
+				" does not match directory size\n");
+	}
+
+	if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
+	    h->bigdirversion[2] != 0 || size & 2047 ||
+	    h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME))
+		goto out;
+
+	size >>= sb->s_blocksize_bits;
+	for (blk = 1; blk < size; blk++) {
+		block = __adfs_block_map(sb, id, blk);
+		if (!block) {
+			adfs_error(sb, "dir object %X has a hole at offset %d", id, blk);
+			goto out;
+		}
+
+		dir->bh[blk] = sb_bread(sb, block);
+		if (!dir->bh[blk])
+			goto out;
+		dir->nr_buffers = blk;
+	}
+
+	t = (struct adfs_bigdirtail *)(dir->bh[size - 1]->b_data + (sb->s_blocksize - 8));
+
+	if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
+	    t->bigdirendmasseq != h->startmasseq ||
+	    t->reserved[0] != 0 || t->reserved[1] != 0)
+		goto out;
+
+	dir->parent_id = le32_to_cpu(h->bigdirparent);
+	dir->sb = sb;
+	return 0;
+out:
+	for (i = 0; i < dir->nr_buffers; i++)
+		brelse(dir->bh[i]);
+	dir->sb = NULL;
+	return ret;
+}
+
+static int
+adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
+{
+	struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+	int ret = -ENOENT;
+
+	if (fpos <= le32_to_cpu(h->bigdirentries)) {
+		dir->pos = fpos;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static void
+dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
+{
+	struct super_block *sb = dir->sb;
+	unsigned int buffer, partial, remainder;
+
+	buffer = offset >> sb->s_blocksize_bits;
+	offset &= sb->s_blocksize - 1;
+
+	partial = sb->s_blocksize - offset;
+
+	if (partial >= len)
+		memcpy(to, dir->bh[buffer]->b_data + offset, len);
+	else {
+		char *c = (char *)to;
+
+		remainder = len - partial;
+
+		memcpy(c, dir->bh[buffer]->b_data + offset, partial);
+		memcpy(c + partial, dir->bh[buffer + 1]->b_data, remainder);
+	}
+}
+
+static int
+adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
+{
+	struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+	struct adfs_bigdirentry bde;
+	unsigned int offset;
+	int i, ret = -ENOENT;
+
+	if (dir->pos >= le32_to_cpu(h->bigdirentries))
+		goto out;
+
+	offset = offsetof(struct adfs_bigdirheader, bigdirname);
+	offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
+	offset += dir->pos * sizeof(struct adfs_bigdirentry);
+
+	dir_memcpy(dir, offset, &bde, sizeof(struct adfs_bigdirentry));
+
+	obj->loadaddr = le32_to_cpu(bde.bigdirload);
+	obj->execaddr = le32_to_cpu(bde.bigdirexec);
+	obj->size     = le32_to_cpu(bde.bigdirlen);
+	obj->file_id  = le32_to_cpu(bde.bigdirindaddr);
+	obj->attr     = le32_to_cpu(bde.bigdirattr);
+	obj->name_len = le32_to_cpu(bde.bigdirobnamelen);
+
+	offset = offsetof(struct adfs_bigdirheader, bigdirname);
+	offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
+	offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry);
+	offset += le32_to_cpu(bde.bigdirobnameptr);
+
+	dir_memcpy(dir, offset, obj->name, obj->name_len);
+	for (i = 0; i < obj->name_len; i++)
+		if (obj->name[i] == '/')
+			obj->name[i] = '.';
+
+	dir->pos += 1;
+	ret = 0;
+out:
+	return ret;
+}
+
+static void
+adfs_fplus_free(struct adfs_dir *dir)
+{
+	int i;
+
+	for (i = 0; i < dir->nr_buffers; i++)
+		brelse(dir->bh[i]);
+	dir->sb = NULL;
+}
+
+struct adfs_dir_ops adfs_fplus_dir_ops = {
+	.read		= adfs_fplus_read,
+	.setpos		= adfs_fplus_setpos,
+	.getnext	= adfs_fplus_getnext,
+	.free		= adfs_fplus_free
+};
diff --git a/fs/adfs/dir_fplus.h b/fs/adfs/dir_fplus.h
new file mode 100644
index 0000000..b55aa41
--- /dev/null
+++ b/fs/adfs/dir_fplus.h
@@ -0,0 +1,45 @@
+/*
+ *  linux/fs/adfs/dir_fplus.h
+ *
+ *  Copyright (C) 1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Structures of directories on the F+ format disk
+ */
+
+#define ADFS_FPLUS_NAME_LEN	255
+
+#define BIGDIRSTARTNAME ('S' | 'B' << 8 | 'P' << 16 | 'r' << 24)
+#define BIGDIRENDNAME	('o' | 'v' << 8 | 'e' << 16 | 'n' << 24)
+
+struct adfs_bigdirheader {
+	__u8	startmasseq;
+	__u8	bigdirversion[3];
+	__le32	bigdirstartname;
+	__le32	bigdirnamelen;
+	__le32	bigdirsize;
+	__le32	bigdirentries;
+	__le32	bigdirnamesize;
+	__le32	bigdirparent;
+	char	bigdirname[1];
+};
+
+struct adfs_bigdirentry {
+	__le32	bigdirload;
+	__le32	bigdirexec;
+	__le32	bigdirlen;
+	__le32	bigdirindaddr;
+	__le32	bigdirattr;
+	__le32	bigdirobnamelen;
+	__le32	bigdirobnameptr;
+};
+
+struct adfs_bigdirtail {
+	__le32	bigdirendname;
+	__u8	bigdirendmasseq;
+	__u8	reserved[2];
+	__u8	bigdircheckbyte;
+};
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
new file mode 100644
index 0000000..afebbfd
--- /dev/null
+++ b/fs/adfs/file.c
@@ -0,0 +1,43 @@
+/*
+ *  linux/fs/adfs/file.c
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * from:
+ *
+ *  linux/fs/ext2/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  adfs regular file handling primitives           
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/buffer_head.h>			/* for file_fsync() */
+#include <linux/adfs_fs.h>
+
+#include "adfs.h"
+
+struct file_operations adfs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.mmap		= generic_file_mmap,
+	.fsync		= file_fsync,
+	.write		= generic_file_write,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations adfs_file_inode_operations = {
+	.setattr	= adfs_notify_change,
+};
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
new file mode 100644
index 0000000..a02802a
--- /dev/null
+++ b/fs/adfs/inode.c
@@ -0,0 +1,395 @@
+/*
+ *  linux/fs/adfs/inode.c
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+
+#include "adfs.h"
+
+/*
+ * Lookup/Create a block at offset 'block' into 'inode'.  We currently do
+ * not support creation of new blocks, so we return -EIO for this case.
+ */
+static int
+adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
+	       int create)
+{
+	if (block < 0)
+		goto abort_negative;
+
+	if (!create) {
+		if (block >= inode->i_blocks)
+			goto abort_toobig;
+
+		block = __adfs_block_map(inode->i_sb, inode->i_ino, block);
+		if (block)
+			map_bh(bh, inode->i_sb, block);
+		return 0;
+	}
+	/* don't support allocation of blocks yet */
+	return -EIO;
+
+abort_negative:
+	adfs_error(inode->i_sb, "block %d < 0", block);
+	return -EIO;
+
+abort_toobig:
+	return 0;
+}
+
+static int adfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, adfs_get_block, wbc);
+}
+
+static int adfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, adfs_get_block);
+}
+
+static int adfs_prepare_write(struct file *file, struct page *page, unsigned int from, unsigned int to)
+{
+	return cont_prepare_write(page, from, to, adfs_get_block,
+		&ADFS_I(page->mapping->host)->mmu_private);
+}
+
+static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, adfs_get_block);
+}
+
+static struct address_space_operations adfs_aops = {
+	.readpage	= adfs_readpage,
+	.writepage	= adfs_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= adfs_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= _adfs_bmap
+};
+
+static inline unsigned int
+adfs_filetype(struct inode *inode)
+{
+	unsigned int type;
+
+	if (ADFS_I(inode)->stamped)
+		type = (ADFS_I(inode)->loadaddr >> 8) & 0xfff;
+	else
+		type = (unsigned int) -1;
+
+	return type;
+}
+
+/*
+ * Convert ADFS attributes and filetype to Linux permission.
+ */
+static umode_t
+adfs_atts2mode(struct super_block *sb, struct inode *inode)
+{
+	unsigned int filetype, attr = ADFS_I(inode)->attr;
+	umode_t mode, rmask;
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	if (attr & ADFS_NDA_DIRECTORY) {
+		mode = S_IRUGO & asb->s_owner_mask;
+		return S_IFDIR | S_IXUGO | mode;
+	}
+
+	filetype = adfs_filetype(inode);
+
+	switch (filetype) {
+	case 0xfc0:	/* LinkFS */
+		return S_IFLNK|S_IRWXUGO;
+
+	case 0xfe6:	/* UnixExec */
+		rmask = S_IRUGO | S_IXUGO;
+		break;
+
+	default:
+		rmask = S_IRUGO;
+	}
+
+	mode = S_IFREG;
+
+	if (attr & ADFS_NDA_OWNER_READ)
+		mode |= rmask & asb->s_owner_mask;
+
+	if (attr & ADFS_NDA_OWNER_WRITE)
+		mode |= S_IWUGO & asb->s_owner_mask;
+
+	if (attr & ADFS_NDA_PUBLIC_READ)
+		mode |= rmask & asb->s_other_mask;
+
+	if (attr & ADFS_NDA_PUBLIC_WRITE)
+		mode |= S_IWUGO & asb->s_other_mask;
+	return mode;
+}
+
+/*
+ * Convert Linux permission to ADFS attribute.  We try to do the reverse
+ * of atts2mode, but there is not a 1:1 translation.
+ */
+static int
+adfs_mode2atts(struct super_block *sb, struct inode *inode)
+{
+	umode_t mode;
+	int attr;
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	/* FIXME: should we be able to alter a link? */
+	if (S_ISLNK(inode->i_mode))
+		return ADFS_I(inode)->attr;
+
+	if (S_ISDIR(inode->i_mode))
+		attr = ADFS_NDA_DIRECTORY;
+	else
+		attr = 0;
+
+	mode = inode->i_mode & asb->s_owner_mask;
+	if (mode & S_IRUGO)
+		attr |= ADFS_NDA_OWNER_READ;
+	if (mode & S_IWUGO)
+		attr |= ADFS_NDA_OWNER_WRITE;
+
+	mode = inode->i_mode & asb->s_other_mask;
+	mode &= ~asb->s_owner_mask;
+	if (mode & S_IRUGO)
+		attr |= ADFS_NDA_PUBLIC_READ;
+	if (mode & S_IWUGO)
+		attr |= ADFS_NDA_PUBLIC_WRITE;
+
+	return attr;
+}
+
+/*
+ * Convert an ADFS time to Unix time.  ADFS has a 40-bit centi-second time
+ * referenced to 1 Jan 1900 (til 2248)
+ */
+static void
+adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
+{
+	unsigned int high, low;
+
+	if (ADFS_I(inode)->stamped == 0)
+		goto cur_time;
+
+	high = ADFS_I(inode)->loadaddr << 24;
+	low  = ADFS_I(inode)->execaddr;
+
+	high |= low >> 8;
+	low  &= 255;
+
+	/* Files dated pre  01 Jan 1970 00:00:00. */
+	if (high < 0x336e996a)
+		goto too_early;
+
+	/* Files dated post 18 Jan 2038 03:14:05. */
+	if (high >= 0x656e9969)
+		goto too_late;
+
+	/* discard 2208988800 (0x336e996a00) seconds of time */
+	high -= 0x336e996a;
+
+	/* convert 40-bit centi-seconds to 32-bit seconds */
+	tv->tv_sec = (((high % 100) << 8) + low) / 100 + (high / 100 << 8);
+	tv->tv_nsec = 0;
+	return;
+
+ cur_time:
+	*tv = CURRENT_TIME_SEC;
+	return;
+
+ too_early:
+	tv->tv_sec = tv->tv_nsec = 0;
+	return;
+
+ too_late:
+	tv->tv_sec = 0x7ffffffd;
+	tv->tv_nsec = 0;
+	return;
+}
+
+/*
+ * Convert an Unix time to ADFS time.  We only do this if the entry has a
+ * time/date stamp already.
+ */
+static void
+adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
+{
+	unsigned int high, low;
+
+	if (ADFS_I(inode)->stamped) {
+		/* convert 32-bit seconds to 40-bit centi-seconds */
+		low  = (secs & 255) * 100;
+		high = (secs / 256) * 100 + (low >> 8) + 0x336e996a;
+
+		ADFS_I(inode)->loadaddr = (high >> 24) |
+				(ADFS_I(inode)->loadaddr & ~0xff);
+		ADFS_I(inode)->execaddr = (low & 255) | (high << 8);
+	}
+}
+
+/*
+ * Fill in the inode information from the object information.
+ *
+ * Note that this is an inode-less filesystem, so we can't use the inode
+ * number to reference the metadata on the media.  Instead, we use the
+ * inode number to hold the object ID, which in turn will tell us where
+ * the data is held.  We also save the parent object ID, and with these
+ * two, we can locate the metadata.
+ *
+ * This does mean that we rely on an objects parent remaining the same at
+ * all times - we cannot cope with a cross-directory rename (yet).
+ */
+struct inode *
+adfs_iget(struct super_block *sb, struct object_info *obj)
+{
+	struct inode *inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		goto out;
+
+	inode->i_uid	 = ADFS_SB(sb)->s_uid;
+	inode->i_gid	 = ADFS_SB(sb)->s_gid;
+	inode->i_ino	 = obj->file_id;
+	inode->i_size	 = obj->size;
+	inode->i_nlink	 = 2;
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks	 = (inode->i_size + sb->s_blocksize - 1) >>
+			    sb->s_blocksize_bits;
+
+	/*
+	 * we need to save the parent directory ID so that
+	 * write_inode can update the directory information
+	 * for this file.  This will need special handling
+	 * for cross-directory renames.
+	 */
+	ADFS_I(inode)->parent_id = obj->parent_id;
+	ADFS_I(inode)->loadaddr  = obj->loadaddr;
+	ADFS_I(inode)->execaddr  = obj->execaddr;
+	ADFS_I(inode)->attr      = obj->attr;
+	ADFS_I(inode)->stamped	  = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
+
+	inode->i_mode	 = adfs_atts2mode(sb, inode);
+	adfs_adfs2unix_time(&inode->i_mtime, inode);
+	inode->i_atime = inode->i_mtime;
+	inode->i_ctime = inode->i_mtime;
+
+	if (S_ISDIR(inode->i_mode)) {
+		inode->i_op	= &adfs_dir_inode_operations;
+		inode->i_fop	= &adfs_dir_operations;
+	} else if (S_ISREG(inode->i_mode)) {
+		inode->i_op	= &adfs_file_inode_operations;
+		inode->i_fop	= &adfs_file_operations;
+		inode->i_mapping->a_ops = &adfs_aops;
+		ADFS_I(inode)->mmu_private = inode->i_size;
+	}
+
+	insert_inode_hash(inode);
+
+out:
+	return inode;
+}
+
+/*
+ * Validate and convert a changed access mode/time to their ADFS equivalents.
+ * adfs_write_inode will actually write the information back to the directory
+ * later.
+ */
+int
+adfs_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	unsigned int ia_valid = attr->ia_valid;
+	int error;
+	
+	lock_kernel();
+
+	error = inode_change_ok(inode, attr);
+
+	/*
+	 * we can't change the UID or GID of any file -
+	 * we have a global UID/GID in the superblock
+	 */
+	if ((ia_valid & ATTR_UID && attr->ia_uid != ADFS_SB(sb)->s_uid) ||
+	    (ia_valid & ATTR_GID && attr->ia_gid != ADFS_SB(sb)->s_gid))
+		error = -EPERM;
+
+	if (error)
+		goto out;
+
+	if (ia_valid & ATTR_SIZE)
+		error = vmtruncate(inode, attr->ia_size);
+
+	if (error)
+		goto out;
+
+	if (ia_valid & ATTR_MTIME) {
+		inode->i_mtime = attr->ia_mtime;
+		adfs_unix2adfs_time(inode, attr->ia_mtime.tv_sec);
+	}
+	/*
+	 * FIXME: should we make these == to i_mtime since we don't
+	 * have the ability to represent them in our filesystem?
+	 */
+	if (ia_valid & ATTR_ATIME)
+		inode->i_atime = attr->ia_atime;
+	if (ia_valid & ATTR_CTIME)
+		inode->i_ctime = attr->ia_ctime;
+	if (ia_valid & ATTR_MODE) {
+		ADFS_I(inode)->attr = adfs_mode2atts(sb, inode);
+		inode->i_mode = adfs_atts2mode(sb, inode);
+	}
+
+	/*
+	 * FIXME: should we be marking this inode dirty even if
+	 * we don't have any metadata to write back?
+	 */
+	if (ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MODE))
+		mark_inode_dirty(inode);
+out:
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * write an existing inode back to the directory, and therefore the disk.
+ * The adfs-specific inode data has already been updated by
+ * adfs_notify_change()
+ */
+int adfs_write_inode(struct inode *inode, int unused)
+{
+	struct super_block *sb = inode->i_sb;
+	struct object_info obj;
+	int ret;
+
+	lock_kernel();
+	obj.file_id	= inode->i_ino;
+	obj.name_len	= 0;
+	obj.parent_id	= ADFS_I(inode)->parent_id;
+	obj.loadaddr	= ADFS_I(inode)->loadaddr;
+	obj.execaddr	= ADFS_I(inode)->execaddr;
+	obj.attr	= ADFS_I(inode)->attr;
+	obj.size	= inode->i_size;
+
+	ret = adfs_dir_update(sb, &obj);
+	unlock_kernel();
+	return ret;
+}
+MODULE_LICENSE("GPL");
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
new file mode 100644
index 0000000..92ab4fb
--- /dev/null
+++ b/fs/adfs/map.c
@@ -0,0 +1,296 @@
+/*
+ *  linux/fs/adfs/map.c
+ *
+ *  Copyright (C) 1997-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/spinlock.h>
+#include <linux/buffer_head.h>
+
+#include <asm/unaligned.h>
+
+#include "adfs.h"
+
+/*
+ * The ADFS map is basically a set of sectors.  Each sector is called a
+ * zone which contains a bitstream made up of variable sized fragments.
+ * Each bit refers to a set of bytes in the filesystem, defined by
+ * log2bpmb.  This may be larger or smaller than the sector size, but
+ * the overall size it describes will always be a round number of
+ * sectors.  A fragment id is always idlen bits long.
+ *
+ *  < idlen > <       n        > <1>
+ * +---------+-------//---------+---+
+ * | frag id |  0000....000000  | 1 |
+ * +---------+-------//---------+---+
+ *
+ * The physical disk space used by a fragment is taken from the start of
+ * the fragment id up to and including the '1' bit - ie, idlen + n + 1
+ * bits.
+ *
+ * A fragment id can be repeated multiple times in the whole map for
+ * large or fragmented files.  The first map zone a fragment starts in
+ * is given by fragment id / ids_per_zone - this allows objects to start
+ * from any zone on the disk.
+ *
+ * Free space is described by a linked list of fragments.  Each free
+ * fragment describes free space in the same way as the other fragments,
+ * however, the frag id specifies an offset (in map bits) from the end
+ * of this fragment to the start of the next free fragment.
+ *
+ * Objects stored on the disk are allocated object ids (we use these as
+ * our inode numbers.)  Object ids contain a fragment id and an optional
+ * offset.  This allows a directory fragment to contain small files
+ * associated with that directory.
+ */
+
+/*
+ * For the future...
+ */
+static DEFINE_RWLOCK(adfs_map_lock);
+
+/*
+ * This is fun.  We need to load up to 19 bits from the map at an
+ * arbitary bit alignment.  (We're limited to 19 bits by F+ version 2).
+ */
+#define GET_FRAG_ID(_map,_start,_idmask)				\
+	({								\
+		unsigned char *_m = _map + (_start >> 3);		\
+		u32 _frag = get_unaligned((u32 *)_m);			\
+		_frag >>= (_start & 7);					\
+		_frag & _idmask;					\
+	})
+
+/*
+ * return the map bit offset of the fragment frag_id in the zone dm.
+ * Note that the loop is optimised for best asm code - look at the
+ * output of:
+ *  gcc -D__KERNEL__ -O2 -I../../include -o - -S map.c
+ */
+static int
+lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
+	    const unsigned int frag_id, unsigned int *offset)
+{
+	const unsigned int mapsize = dm->dm_endbit;
+	const u32 idmask = (1 << idlen) - 1;
+	unsigned char *map = dm->dm_bh->b_data + 4;
+	unsigned int start = dm->dm_startbit;
+	unsigned int mapptr;
+	u32 frag;
+
+	do {
+		frag = GET_FRAG_ID(map, start, idmask);
+		mapptr = start + idlen;
+
+		/*
+		 * find end of fragment
+		 */
+		{
+			__le32 *_map = (__le32 *)map;
+			u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
+			while (v == 0) {
+				mapptr = (mapptr & ~31) + 32;
+				if (mapptr >= mapsize)
+					goto error;
+				v = le32_to_cpu(_map[mapptr >> 5]);
+			}
+
+			mapptr += 1 + ffz(~v);
+		}
+
+		if (frag == frag_id)
+			goto found;
+again:
+		start = mapptr;
+	} while (mapptr < mapsize);
+	return -1;
+
+error:
+	printk(KERN_ERR "adfs: oversized fragment 0x%x at 0x%x-0x%x\n",
+		frag, start, mapptr);
+	return -1;
+
+found:
+	{
+		int length = mapptr - start;
+		if (*offset >= length) {
+			*offset -= length;
+			goto again;
+		}
+	}
+	return start + *offset;
+}
+
+/*
+ * Scan the free space map, for this zone, calculating the total
+ * number of map bits in each free space fragment.
+ *
+ * Note: idmask is limited to 15 bits [3.2]
+ */
+static unsigned int
+scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
+{
+	const unsigned int mapsize = dm->dm_endbit + 32;
+	const unsigned int idlen  = asb->s_idlen;
+	const unsigned int frag_idlen = idlen <= 15 ? idlen : 15;
+	const u32 idmask = (1 << frag_idlen) - 1;
+	unsigned char *map = dm->dm_bh->b_data;
+	unsigned int start = 8, mapptr;
+	u32 frag;
+	unsigned long total = 0;
+
+	/*
+	 * get fragment id
+	 */
+	frag = GET_FRAG_ID(map, start, idmask);
+
+	/*
+	 * If the freelink is null, then no free fragments
+	 * exist in this zone.
+	 */
+	if (frag == 0)
+		return 0;
+
+	do {
+		start += frag;
+
+		/*
+		 * get fragment id
+		 */
+		frag = GET_FRAG_ID(map, start, idmask);
+		mapptr = start + idlen;
+
+		/*
+		 * find end of fragment
+		 */
+		{
+			__le32 *_map = (__le32 *)map;
+			u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
+			while (v == 0) {
+				mapptr = (mapptr & ~31) + 32;
+				if (mapptr >= mapsize)
+					goto error;
+				v = le32_to_cpu(_map[mapptr >> 5]);
+			}
+
+			mapptr += 1 + ffz(~v);
+		}
+
+		total += mapptr - start;
+	} while (frag >= idlen + 1);
+
+	if (frag != 0)
+		printk(KERN_ERR "adfs: undersized free fragment\n");
+
+	return total;
+error:
+	printk(KERN_ERR "adfs: oversized free fragment\n");
+	return 0;
+}
+
+static int
+scan_map(struct adfs_sb_info *asb, unsigned int zone,
+	 const unsigned int frag_id, unsigned int mapoff)
+{
+	const unsigned int idlen = asb->s_idlen;
+	struct adfs_discmap *dm, *dm_end;
+	int result;
+
+	dm	= asb->s_map + zone;
+	zone	= asb->s_map_size;
+	dm_end	= asb->s_map + zone;
+
+	do {
+		result = lookup_zone(dm, idlen, frag_id, &mapoff);
+
+		if (result != -1)
+			goto found;
+
+		dm ++;
+		if (dm == dm_end)
+			dm = asb->s_map;
+	} while (--zone > 0);
+
+	return -1;
+found:
+	result -= dm->dm_startbit;
+	result += dm->dm_startblk;
+
+	return result;
+}
+
+/*
+ * calculate the amount of free blocks in the map.
+ *
+ *              n=1
+ *  total_free = E(free_in_zone_n)
+ *              nzones
+ */
+unsigned int
+adfs_map_free(struct super_block *sb)
+{
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+	struct adfs_discmap *dm;
+	unsigned int total = 0;
+	unsigned int zone;
+
+	dm   = asb->s_map;
+	zone = asb->s_map_size;
+
+	do {
+		total += scan_free_map(asb, dm++);
+	} while (--zone > 0);
+
+	return signed_asl(total, asb->s_map2blk);
+}
+
+int
+adfs_map_lookup(struct super_block *sb, unsigned int frag_id,
+		unsigned int offset)
+{
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+	unsigned int zone, mapoff;
+	int result;
+
+	/*
+	 * map & root fragment is special - it starts in the center of the
+	 * disk.  The other fragments start at zone (frag / ids_per_zone)
+	 */
+	if (frag_id == ADFS_ROOT_FRAG)
+		zone = asb->s_map_size >> 1;
+	else
+		zone = frag_id / asb->s_ids_per_zone;
+
+	if (zone >= asb->s_map_size)
+		goto bad_fragment;
+
+	/* Convert sector offset to map offset */
+	mapoff = signed_asl(offset, -asb->s_map2blk);
+
+	read_lock(&adfs_map_lock);
+	result = scan_map(asb, zone, frag_id, mapoff);
+	read_unlock(&adfs_map_lock);
+
+	if (result > 0) {
+		unsigned int secoff;
+
+		/* Calculate sector offset into map block */
+		secoff = offset - signed_asl(mapoff, asb->s_map2blk);
+		return secoff + signed_asl(result, asb->s_map2blk);
+	}
+
+	adfs_error(sb, "fragment 0x%04x at offset %d not found in map",
+		   frag_id, offset);
+	return 0;
+
+bad_fragment:
+	adfs_error(sb, "invalid fragment 0x%04x (zone = %d, max = %d)",
+		   frag_id, zone, asb->s_map_size);
+	return 0;
+}
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
new file mode 100644
index 0000000..2439632
--- /dev/null
+++ b/fs/adfs/super.c
@@ -0,0 +1,508 @@
+/*
+ *  linux/fs/adfs/super.c
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/adfs_fs.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <stdarg.h>
+
+#include "adfs.h"
+#include "dir_f.h"
+#include "dir_fplus.h"
+
+void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...)
+{
+	char error_buf[128];
+	va_list args;
+
+	va_start(args, fmt);
+	vsprintf(error_buf, fmt, args);
+	va_end(args);
+
+	printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
+		sb->s_id, function ? ": " : "",
+		function ? function : "", error_buf);
+}
+
+static int adfs_checkdiscrecord(struct adfs_discrecord *dr)
+{
+	int i;
+
+	/* sector size must be 256, 512 or 1024 bytes */
+	if (dr->log2secsize != 8 &&
+	    dr->log2secsize != 9 &&
+	    dr->log2secsize != 10)
+		return 1;
+
+	/* idlen must be at least log2secsize + 3 */
+	if (dr->idlen < dr->log2secsize + 3)
+		return 1;
+
+	/* we cannot have such a large disc that we
+	 * are unable to represent sector offsets in
+	 * 32 bits.  This works out at 2.0 TB.
+	 */
+	if (le32_to_cpu(dr->disc_size_high) >> dr->log2secsize)
+		return 1;
+
+	/* idlen must be no greater than 19 v2 [1.0] */
+	if (dr->idlen > 19)
+		return 1;
+
+	/* reserved bytes should be zero */
+	for (i = 0; i < sizeof(dr->unused52); i++)
+		if (dr->unused52[i] != 0)
+			return 1;
+
+	return 0;
+}
+
+static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
+{
+	unsigned int v0, v1, v2, v3;
+	int i;
+
+	v0 = v1 = v2 = v3 = 0;
+	for (i = sb->s_blocksize - 4; i; i -= 4) {
+		v0 += map[i]     + (v3 >> 8);
+		v3 &= 0xff;
+		v1 += map[i + 1] + (v0 >> 8);
+		v0 &= 0xff;
+		v2 += map[i + 2] + (v1 >> 8);
+		v1 &= 0xff;
+		v3 += map[i + 3] + (v2 >> 8);
+		v2 &= 0xff;
+	}
+	v0 +=           v3 >> 8;
+	v1 += map[1] + (v0 >> 8);
+	v2 += map[2] + (v1 >> 8);
+	v3 += map[3] + (v2 >> 8);
+
+	return v0 ^ v1 ^ v2 ^ v3;
+}
+
+static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm)
+{
+	unsigned char crosscheck = 0, zonecheck = 1;
+	int i;
+
+	for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) {
+		unsigned char *map;
+
+		map = dm[i].dm_bh->b_data;
+
+		if (adfs_calczonecheck(sb, map) != map[0]) {
+			adfs_error(sb, "zone %d fails zonecheck", i);
+			zonecheck = 0;
+		}
+		crosscheck ^= map[3];
+	}
+	if (crosscheck != 0xff)
+		adfs_error(sb, "crosscheck != 0xff");
+	return crosscheck == 0xff && zonecheck;
+}
+
+static void adfs_put_super(struct super_block *sb)
+{
+	int i;
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	for (i = 0; i < asb->s_map_size; i++)
+		brelse(asb->s_map[i].dm_bh);
+	kfree(asb->s_map);
+	kfree(asb);
+	sb->s_fs_info = NULL;
+}
+
+enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err};
+
+static match_table_t tokens = {
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_ownmask, "ownmask=%o"},
+	{Opt_othmask, "othmask=%o"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(struct super_block *sb, char *options)
+{
+	char *p;
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+	int option;
+
+	if (!options)
+		return 0;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_uid:
+			if (match_int(args, &option))
+				return -EINVAL;
+			asb->s_uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(args, &option))
+				return -EINVAL;
+			asb->s_gid = option;
+			break;
+		case Opt_ownmask:
+			if (match_octal(args, &option))
+				return -EINVAL;
+			asb->s_owner_mask = option;
+			break;
+		case Opt_othmask:
+			if (match_octal(args, &option))
+				return -EINVAL;
+			asb->s_other_mask = option;
+			break;
+		default:
+			printk("ADFS-fs: unrecognised mount option \"%s\" "
+					"or missing value\n", p);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int adfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return parse_options(sb, data);
+}
+
+static int adfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	buf->f_type    = ADFS_SUPER_MAGIC;
+	buf->f_namelen = asb->s_namelen;
+	buf->f_bsize   = sb->s_blocksize;
+	buf->f_blocks  = asb->s_size;
+	buf->f_files   = asb->s_ids_per_zone * asb->s_map_size;
+	buf->f_bavail  =
+	buf->f_bfree   = adfs_map_free(sb);
+	buf->f_ffree   = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks;
+
+	return 0;
+}
+
+static kmem_cache_t *adfs_inode_cachep;
+
+static struct inode *adfs_alloc_inode(struct super_block *sb)
+{
+	struct adfs_inode_info *ei;
+	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void adfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
+					     sizeof(struct adfs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (adfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(adfs_inode_cachep))
+		printk(KERN_INFO "adfs_inode_cache: not all structures were freed\n");
+}
+
+static struct super_operations adfs_sops = {
+	.alloc_inode	= adfs_alloc_inode,
+	.destroy_inode	= adfs_destroy_inode,
+	.write_inode	= adfs_write_inode,
+	.put_super	= adfs_put_super,
+	.statfs		= adfs_statfs,
+	.remount_fs	= adfs_remount,
+};
+
+static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
+{
+	struct adfs_discmap *dm;
+	unsigned int map_addr, zone_size, nzones;
+	int i, zone;
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	nzones    = asb->s_map_size;
+	zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
+	map_addr  = (nzones >> 1) * zone_size -
+		     ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0);
+	map_addr  = signed_asl(map_addr, asb->s_map2blk);
+
+	asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
+
+	dm = kmalloc(nzones * sizeof(*dm), GFP_KERNEL);
+	if (dm == NULL) {
+		adfs_error(sb, "not enough memory");
+		return NULL;
+	}
+
+	for (zone = 0; zone < nzones; zone++, map_addr++) {
+		dm[zone].dm_startbit = 0;
+		dm[zone].dm_endbit   = zone_size;
+		dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
+		dm[zone].dm_bh       = sb_bread(sb, map_addr);
+
+		if (!dm[zone].dm_bh) {
+			adfs_error(sb, "unable to read map");
+			goto error_free;
+		}
+	}
+
+	/* adjust the limits for the first and last map zones */
+	i = zone - 1;
+	dm[0].dm_startblk = 0;
+	dm[0].dm_startbit = ADFS_DR_SIZE_BITS;
+	dm[i].dm_endbit   = (le32_to_cpu(dr->disc_size_high) << (32 - dr->log2bpmb)) +
+			    (le32_to_cpu(dr->disc_size) >> dr->log2bpmb) +
+			    (ADFS_DR_SIZE_BITS - i * zone_size);
+
+	if (adfs_checkmap(sb, dm))
+		return dm;
+
+	adfs_error(sb, NULL, "map corrupted");
+
+error_free:
+	while (--zone >= 0)
+		brelse(dm[zone].dm_bh);
+
+	kfree(dm);
+	return NULL;
+}
+
+static inline unsigned long adfs_discsize(struct adfs_discrecord *dr, int block_bits)
+{
+	unsigned long discsize;
+
+	discsize  = le32_to_cpu(dr->disc_size_high) << (32 - block_bits);
+	discsize |= le32_to_cpu(dr->disc_size) >> block_bits;
+
+	return discsize;
+}
+
+static int adfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct adfs_discrecord *dr;
+	struct buffer_head *bh;
+	struct object_info root_obj;
+	unsigned char *b_data;
+	struct adfs_sb_info *asb;
+	struct inode *root;
+
+	sb->s_flags |= MS_NODIRATIME;
+
+	asb = kmalloc(sizeof(*asb), GFP_KERNEL);
+	if (!asb)
+		return -ENOMEM;
+	sb->s_fs_info = asb;
+	memset(asb, 0, sizeof(*asb));
+
+	/* set default options */
+	asb->s_uid = 0;
+	asb->s_gid = 0;
+	asb->s_owner_mask = S_IRWXU;
+	asb->s_other_mask = S_IRWXG | S_IRWXO;
+
+	if (parse_options(sb, data))
+		goto error;
+
+	sb_set_blocksize(sb, BLOCK_SIZE);
+	if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
+		adfs_error(sb, "unable to read superblock");
+		goto error;
+	}
+
+	b_data = bh->b_data + (ADFS_DISCRECORD % BLOCK_SIZE);
+
+	if (adfs_checkbblk(b_data)) {
+		if (!silent)
+			printk("VFS: Can't find an adfs filesystem on dev "
+				"%s.\n", sb->s_id);
+		goto error_free_bh;
+	}
+
+	dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
+
+	/*
+	 * Do some sanity checks on the ADFS disc record
+	 */
+	if (adfs_checkdiscrecord(dr)) {
+		if (!silent)
+			printk("VPS: Can't find an adfs filesystem on dev "
+				"%s.\n", sb->s_id);
+		goto error_free_bh;
+	}
+
+	brelse(bh);
+	if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+		bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
+		if (!bh) {
+			adfs_error(sb, "couldn't read superblock on "
+				"2nd try.");
+			goto error;
+		}
+		b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
+		if (adfs_checkbblk(b_data)) {
+			adfs_error(sb, "disc record mismatch, very weird!");
+			goto error_free_bh;
+		}
+		dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
+	} else {
+		if (!silent)
+			printk(KERN_ERR "VFS: Unsupported blocksize on dev "
+				"%s.\n", sb->s_id);
+		goto error;
+	}
+
+	/*
+	 * blocksize on this device should now be set to the ADFS log2secsize
+	 */
+
+	sb->s_magic		= ADFS_SUPER_MAGIC;
+	asb->s_idlen		= dr->idlen;
+	asb->s_map_size		= dr->nzones | (dr->nzones_high << 8);
+	asb->s_map2blk		= dr->log2bpmb - dr->log2secsize;
+	asb->s_size    		= adfs_discsize(dr, sb->s_blocksize_bits);
+	asb->s_version 		= dr->format_version;
+	asb->s_log2sharesize	= dr->log2sharesize;
+	
+	asb->s_map = adfs_read_map(sb, dr);
+	if (!asb->s_map)
+		goto error_free_bh;
+
+	brelse(bh);
+
+	/*
+	 * set up enough so that we can read an inode
+	 */
+	sb->s_op = &adfs_sops;
+
+	dr = (struct adfs_discrecord *)(asb->s_map[0].dm_bh->b_data + 4);
+
+	root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root);
+	root_obj.name_len  = 0;
+	root_obj.loadaddr  = 0;
+	root_obj.execaddr  = 0;
+	root_obj.size	   = ADFS_NEWDIR_SIZE;
+	root_obj.attr	   = ADFS_NDA_DIRECTORY   | ADFS_NDA_OWNER_READ |
+			     ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ;
+
+	/*
+	 * If this is a F+ disk with variable length directories,
+	 * get the root_size from the disc record.
+	 */
+	if (asb->s_version) {
+		root_obj.size = le32_to_cpu(dr->root_size);
+		asb->s_dir     = &adfs_fplus_dir_ops;
+		asb->s_namelen = ADFS_FPLUS_NAME_LEN;
+	} else {
+		asb->s_dir     = &adfs_f_dir_ops;
+		asb->s_namelen = ADFS_F_NAME_LEN;
+	}
+
+	root = adfs_iget(sb, &root_obj);
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		int i;
+		iput(root);
+		for (i = 0; i < asb->s_map_size; i++)
+			brelse(asb->s_map[i].dm_bh);
+		kfree(asb->s_map);
+		adfs_error(sb, "get root inode failed\n");
+		goto error;
+	} else
+		sb->s_root->d_op = &adfs_dentry_operations;
+	return 0;
+
+error_free_bh:
+	brelse(bh);
+error:
+	sb->s_fs_info = NULL;
+	kfree(asb);
+	return -EINVAL;
+}
+
+static struct super_block *adfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, adfs_fill_super);
+}
+
+static struct file_system_type adfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "adfs",
+	.get_sb		= adfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_adfs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&adfs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_adfs_fs(void)
+{
+	unregister_filesystem(&adfs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_adfs_fs)
+module_exit(exit_adfs_fs)
diff --git a/fs/affs/Changes b/fs/affs/Changes
new file mode 100644
index 0000000..a29409c
--- /dev/null
+++ b/fs/affs/Changes
@@ -0,0 +1,343 @@
+(Note: I consider version numbers as cheap. That means
+that I do not like numbers like 0.1 and the like for
+things that can be used since quite some time. But
+then, 3.1 doesn't mean 'perfectly stable', too.)
+
+Known bugs:
+-----------
+
+- Doesn't work on the alpha. The only 64/32-bit
+  problem that I'm aware of (pointer/int conversion
+  in readdir()) gives compiler warnings but is
+  apparently not causing the failure, as directory
+  reads basically work (but all files are of size 0).
+  Alas, I've got no alpha to debug. :-(
+
+- The partition checker (drivers/block/genhd.c)
+  doesn't work with devices which have 256 byte
+  blocks (some very old SCSI drives). 
+
+- The feature to automatically make the fs clean
+  might leave a trashed file system with the
+  bitmap flag set valid.
+
+- When a file is truncated to a size that is not
+  a multiple of the blocksize, the rest of the
+  last allocated block is not cleared. Well,
+  this fs never claimed to be Posix conformant.
+
+Please direct bug reports to: zippel@linux-m68k.org
+
+Version 3.20
+------------
+- kill kernel lock
+- fix for a possible bitmap corruption
+
+Version 3.19
+------------
+
+- sizeof changes from Kernel Janitor Project
+- several bug fixes found with fsx
+
+Version 3.18
+------------
+
+- change to global min macro + warning fixes
+- add module tags
+
+Version 3.17
+------------
+
+- locking fixes
+- wrong sign in __affs_hash_dentry
+- remove unnecessary check in affs_new_inode
+- enable international mode for dircache fs
+
+Version 3.16
+------------
+
+- use mark_buffer_dirty_inode instead of mark_buffer_dirty.
+- introduce affs_lock_{link|dir|ext}.
+
+Version 3.15
+------------
+
+- disable link to directories until we can properly support them.
+- locking fixes for link creation/removal.
+
+Version 3.14
+------------
+
+- correctly cut off long file names for compares
+- correctly initialize s_last_bmap
+
+Version 3.13
+------------
+
+Major cleanup for 2.4 [Roman Zippel]
+- new extended block handling
+- new bitmap allocation functions
+- locking should be safe for the future
+- cleanup of some interfaces
+
+Version 3.12
+------------
+
+more 2.4 fixes: [Roman Zippel]
+- s_lock changes
+- increased getblock mess
+- clear meta blocks 
+
+Version 3.11
+------------
+
+- Converted to use 2.3.x page cache [Dave Jones <dave@powertweak.com>]
+- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>]
+
+Version 3.10
+------------
+
+- Changed partition checker to allow devices
+  with physical blocks != 512 bytes.
+
+- The partition checker now also ignores the
+  word at 0xd0 that Windows likes to write to.
+
+Version 3.9
+-----------
+
+- Moved cleanup from release_file() to put_inode().
+  This makes the first one obsolete.
+
+- truncate() zeroes the unused remainder of a
+  partially used last block when a file is truncated.
+  It also marks the inode dirty now (which is not
+  really necessary as notify_change() will do
+  it anyway).
+
+- Added a few comments, fixed some typos (and
+  introduced some new ones), made the debug messages
+  more consistent. Changed a bad example in the
+  doc file (affs.txt).
+
+- Sets the NOEXEC flag in read_super() for old file
+  systems, since you can't run programs on them.
+
+Version 3.8
+-----------
+Bill Hawes kindly reviewed the affs and sent me the
+patches he did. They're marked (BH). Thanks, Bill!
+
+- Cleanup of error handling in read_super().
+  Didn't release all resources in case of an
+  error. (BH)
+
+- put_inode() releases the ext cache only if it's
+  no longer needed. (BH)
+
+- One set of dentry callbacks is enough. (BH)
+
+- Cleanup of error handling in namei.c. (BH)
+
+- Cleanup of error handling in file.c. (BH)
+
+- The original blocksize of the device is
+  restored when the fs is unmounted. (BH)
+
+- getblock() did not invalidate the key cache
+  when it allocated a new block.
+
+- Removed some unnecessary locks as Bill
+  suggested.
+
+- Simplified match_name(), changed all hashing
+  and case insensitive name comparisons to use
+  uppercase. This makes the tolower() routines
+  obsolete.
+
+- Added mount option 'mufs' to force muFS
+  uid/gid interpretation.
+
+- File mode changes were not updated on disk.
+  This was fixed before, but somehow got lost.
+
+Version 3.7
+-----------
+
+- Added dentry callbacks to allow the dcache to
+  operate case insensitive and length ignorant
+  like the affs itself.
+
+- getblock() didn't update the lastblock field in the
+  inode if the fs was not an OFS. This bug only shows
+  up if a file was enlarged via truncate() and there
+  was not enough space.
+
+- Remove some more superfluous code left over from
+  the old link days ...
+
+- Fixed some oversights which were in patch 2.1.78.
+
+- Fixed a few typos.
+
+Version 3.6
+-----------
+
+- dentry changes. (Thanks to Jes Sorensen for his help.)
+
+- Fixed bug in balloc(): Superblock was not set dirty after
+  the bitmap was changed, so the bitmap wasn't sync'd.
+
+- Fixed nasty bug in find_new_zone(): If the current
+  zone number was zero, the loop didn't terminate,
+  causing a solid lock-up.
+
+- Removed support for old-style directory reads.
+
+- Fixed bug in add_entry(): When doing a sorted insert,
+  the pointer to the next entry in the hash chain wasn't
+  correctly byte-swapped. Since most of the users of the
+  affs use it on a 68k, they didn't notice. But why did
+  I not find this during my tests?
+
+- Fixed some oversights (version wasn't updated on some
+  directory changes).
+
+- Handling of hard links rewritten. To the VFS
+  they appear now as normal Unix links. They are
+  now resolved only once in lookup(). The backside
+  is that unlink(), rename() and rmdir() have to
+  be smart about them, but the result is worth the
+  effort. This also led to some code cleanup.
+
+- Changed name type to unsigned char; the test for
+  invalid filenames didn't work correctly.
+  (Thanks to Michael Krause for pointing at this.)
+
+- Changed mapping of executable flag.
+
+- Changed all network byte-order macros to the
+  recommended ones.
+
+- Added a remount function, so attempts to remount
+  a dircache filesystem or one with errors read/write
+  can be trapped. Previously, ro remounts didn't
+  flush the super block, and rw remounts didn't
+  create allocation zones ...
+
+- Call shrink_dcache_parent() in rmdir().
+  (Thanks to Bill Hawes.)
+
+- Permission checks in unlink().
+
+- Allow mounting of volumes with superfluous
+  bitmap pointers read only, also allows them
+  to be remounted read/write.
+
+- Owner/Group defaults now to the fs user (i.e.
+  the one that mounted it) instead of root. This
+  obsoletes the mount options uid and gid.
+
+- Argument to volume option could overflow the
+  name buffer. It is now silently truncated to
+  30 characters. (Damn it! This kind of bug
+  is too embarrassing.)
+
+- Split inode.c into 2 files, the superblock
+  routines desperately wanted their own file.
+
+- truncate() didn't allocate an extension block
+  cache. If a file was extended by means of
+  truncate(), this led to an Oops.
+
+- fsuser is now checked last.
+
+- rename() will not ignore changes in filename
+  casing any more (though mv(1) still won't allow
+  you to do "mv oldname OldName").
+
+Version 3.5
+-----------
+
+- Extension block caches are now allocated on
+  demand instead of when a file is opened, as
+  files can be read and written without opening
+  them (e. g. the loopback device does this).
+
+- Removed an unused function.
+
+Version 3.4
+-----------
+
+- Hash chains are now sorted by block numbers.
+  (Thanks to Kars de Jong for finding this.)
+- Removed all unnecessary external symbols.
+
+Version 3.3
+-----------
+
+- Tried to make all types 'correct' and consistent.
+- Errors and warnings are now reported via a
+  function. They are all prefixed by a severity
+  and have the same appearance:
+    "AFFS: <function>: <error message>"
+  (There's one exception to this, as in that function
+  is no pointer to the super block available.)
+- The filesystem is remounted read-only after an
+  error.
+- The names of newly created filesystem objects are
+  now checked for validity.
+- Minor cleanups in comments.
+- Added this Changes file. At last!
+
+Version 3.2
+-----------
+
+- Extension block cache: Reading/writing of huge files
+  (several MB) is much faster (of course the added
+  overhead slows down opening, but this is hardly
+  noticeable).
+- The same get_block()-routine can now be used for
+  both OFS and FFS.
+- The super block is now searched in the block that
+  was calculated and in the one following. This
+  should remedy the round-off error introduced by
+  the 1-k blocks that Linux uses.
+- Minor changes to adhere to the new VFS interface.
+- The number of used blocks is now also calculated
+  if the filesystem is mounted read-only.
+- Prefixed some constants with AFFS_ to avoid name
+  clashes.
+- Removed 'EXPERIMENTAL' status.
+
+Version 3.1
+-----------
+
+- Fixed a nasty bug which didn't allow read-only
+  mounts.
+- Allow dir-cache filesystems to be mounted
+  read only.
+- OFS support.
+- Several other changes I just cannot remember
+  any more.
+
+Version 3.0
+-----------
+
+- Almost complete rewrite for the new VFS
+  interface in Linux 1.3.
+- Write support.
+- Support for hard and symbolic links.
+- Lots of things I remember even less ...
+
+Version 2.0
+-----------
+
+- Fixed a few things to get it compiled.
+- Automatic root block calculation.
+- Partition checker for genhd.c
+
+========================================
+
+Let's just call Ray Burr's original affs
+'Version 1.0'.
diff --git a/fs/affs/Makefile b/fs/affs/Makefile
new file mode 100644
index 0000000..b2c4f54
--- /dev/null
+++ b/fs/affs/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux affs filesystem routines.
+#
+
+#EXTRA_CFLAGS=-DDEBUG=1
+
+obj-$(CONFIG_AFFS_FS) += affs.o
+
+affs-objs := super.o namei.o inode.o file.o dir.o amigaffs.o bitmap.o symlink.o
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
new file mode 100644
index 0000000..0c6799f
--- /dev/null
+++ b/fs/affs/affs.h
@@ -0,0 +1,304 @@
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/affs_fs.h>
+#include <linux/amigaffs.h>
+
+/* AmigaOS allows file names with up to 30 characters length.
+ * Names longer than that will be silently truncated. If you
+ * want to disallow this, comment out the following #define.
+ * Creating filesystem objects with longer names will then
+ * result in an error (ENAMETOOLONG).
+ */
+/*#define AFFS_NO_TRUNCATE */
+
+/* Ugly macros make the code more pretty. */
+
+#define GET_END_PTR(st,p,sz)		 ((st *)((char *)(p)+((sz)-sizeof(st))))
+#define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
+#define AFFS_BLOCK(sb, bh, blk)		(AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
+
+#ifdef __LITTLE_ENDIAN
+#define BO_EXBITS	0x18UL
+#elif defined(__BIG_ENDIAN)
+#define BO_EXBITS	0x00UL
+#else
+#error Endianness must be known for affs to work.
+#endif
+
+#define AFFS_HEAD(bh)		((struct affs_head *)(bh)->b_data)
+#define AFFS_TAIL(sb, bh)	((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail)))
+#define AFFS_ROOT_HEAD(bh)	((struct affs_root_head *)(bh)->b_data)
+#define AFFS_ROOT_TAIL(sb, bh)	((struct affs_root_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_root_tail)))
+#define AFFS_DATA_HEAD(bh)	((struct affs_data_head *)(bh)->b_data)
+#define AFFS_DATA(bh)		(((struct affs_data_head *)(bh)->b_data)->data)
+
+#define AFFS_CACHE_SIZE		PAGE_SIZE
+
+#define AFFS_MAX_PREALLOC	32
+#define AFFS_LC_SIZE		(AFFS_CACHE_SIZE/sizeof(u32)/2)
+#define AFFS_AC_SIZE		(AFFS_CACHE_SIZE/sizeof(struct affs_ext_key)/2)
+#define AFFS_AC_MASK		(AFFS_AC_SIZE-1)
+
+struct affs_ext_key {
+	u32	ext;				/* idx of the extended block */
+	u32	key;				/* block number */
+};
+
+/*
+ * affs fs inode data in memory
+ */
+struct affs_inode_info {
+	u32	 i_opencnt;
+	struct semaphore i_link_lock;		/* Protects internal inode access. */
+	struct semaphore i_ext_lock;		/* Protects internal inode access. */
+#define i_hash_lock i_ext_lock
+	u32	 i_blkcnt;			/* block count */
+	u32	 i_extcnt;			/* extended block count */
+	u32	*i_lc;				/* linear cache of extended blocks */
+	u32	 i_lc_size;
+	u32	 i_lc_shift;
+	u32	 i_lc_mask;
+	struct affs_ext_key *i_ac;		/* associative cache of extended blocks */
+	u32	 i_ext_last;			/* last accessed extended block */
+	struct buffer_head *i_ext_bh;		/* bh of last extended block */
+	loff_t	 mmu_private;
+	u32	 i_protect;			/* unused attribute bits */
+	u32	 i_lastalloc;			/* last allocated block */
+	int	 i_pa_cnt;			/* number of preallocated blocks */
+	struct inode vfs_inode;
+};
+
+/* short cut to get to the affs specific inode data */
+static inline struct affs_inode_info *AFFS_I(struct inode *inode)
+{
+	return list_entry(inode, struct affs_inode_info, vfs_inode);
+}
+
+/*
+ * super-block data in memory
+ *
+ * Block numbers are adjusted for their actual size
+ *
+ */
+
+struct affs_bm_info {
+	u32 bm_key;			/* Disk block number */
+	u32 bm_free;			/* Free blocks in here */
+};
+
+struct affs_sb_info {
+	int s_partition_size;		/* Partition size in blocks. */
+	int s_reserved;			/* Number of reserved blocks. */
+	//u32 s_blksize;			/* Initial device blksize */
+	u32 s_data_blksize;		/* size of the data block w/o header */
+	u32 s_root_block;		/* FFS root block number. */
+	int s_hashsize;			/* Size of hash table. */
+	unsigned long s_flags;		/* See below. */
+	uid_t s_uid;			/* uid to override */
+	gid_t s_gid;			/* gid to override */
+	umode_t s_mode;			/* mode to override */
+	struct buffer_head *s_root_bh;	/* Cached root block. */
+	struct semaphore s_bmlock;	/* Protects bitmap access. */
+	struct affs_bm_info *s_bitmap;	/* Bitmap infos. */
+	u32 s_bmap_count;		/* # of bitmap blocks. */
+	u32 s_bmap_bits;		/* # of bits in one bitmap blocks */
+	u32 s_last_bmap;
+	struct buffer_head *s_bmap_bh;
+	char *s_prefix;			/* Prefix for volumes and assigns. */
+	int s_prefix_len;		/* Length of prefix. */
+	char s_volume[32];		/* Volume prefix for absolute symlinks. */
+};
+
+#define SF_INTL		0x0001		/* International filesystem. */
+#define SF_BM_VALID	0x0002		/* Bitmap is valid. */
+#define SF_IMMUTABLE	0x0004		/* Protection bits cannot be changed */
+#define SF_QUIET	0x0008		/* chmod errors will be not reported */
+#define SF_SETUID	0x0010		/* Ignore Amiga uid */
+#define SF_SETGID	0x0020		/* Ignore Amiga gid */
+#define SF_SETMODE	0x0040		/* Ignore Amiga protection bits */
+#define SF_MUFS		0x0100		/* Use MUFS uid/gid mapping */
+#define SF_OFS		0x0200		/* Old filesystem */
+#define SF_PREFIX	0x0400		/* Buffer for prefix is allocated */
+#define SF_VERBOSE	0x0800		/* Talk about fs when mounting */
+
+/* short cut to get to the affs specific sb data */
+static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/* amigaffs.c */
+
+extern int	affs_insert_hash(struct inode *inode, struct buffer_head *bh);
+extern int	affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh);
+extern int	affs_remove_header(struct dentry *dentry);
+extern u32	affs_checksum_block(struct super_block *sb, struct buffer_head *bh);
+extern void	affs_fix_checksum(struct super_block *sb, struct buffer_head *bh);
+extern void	secs_to_datestamp(time_t secs, struct affs_date *ds);
+extern mode_t	prot_to_mode(u32 prot);
+extern void	mode_to_prot(struct inode *inode);
+extern void	affs_error(struct super_block *sb, const char *function, const char *fmt, ...);
+extern void	affs_warning(struct super_block *sb, const char *function, const char *fmt, ...);
+extern int	affs_check_name(const unsigned char *name, int len);
+extern int	affs_copy_name(unsigned char *bstr, struct dentry *dentry);
+
+/* bitmap. c */
+
+extern u32	affs_count_free_blocks(struct super_block *s);
+extern void	affs_free_block(struct super_block *sb, u32 block);
+extern u32	affs_alloc_block(struct inode *inode, u32 goal);
+extern int	affs_init_bitmap(struct super_block *sb, int *flags);
+extern void	affs_free_bitmap(struct super_block *sb);
+
+/* namei.c */
+
+extern int	affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
+extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *);
+extern int	affs_unlink(struct inode *dir, struct dentry *dentry);
+extern int	affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *);
+extern int	affs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+extern int	affs_rmdir(struct inode *dir, struct dentry *dentry);
+extern int	affs_link(struct dentry *olddentry, struct inode *dir,
+			  struct dentry *dentry);
+extern int	affs_symlink(struct inode *dir, struct dentry *dentry,
+			     const char *symname);
+extern int	affs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			    struct inode *new_dir, struct dentry *new_dentry);
+
+/* inode.c */
+
+extern unsigned long		 affs_parent_ino(struct inode *dir);
+extern struct inode		*affs_new_inode(struct inode *dir);
+extern int			 affs_notify_change(struct dentry *dentry, struct iattr *attr);
+extern void			 affs_put_inode(struct inode *inode);
+extern void			 affs_delete_inode(struct inode *inode);
+extern void			 affs_clear_inode(struct inode *inode);
+extern void			 affs_read_inode(struct inode *inode);
+extern int			 affs_write_inode(struct inode *inode, int);
+extern int			 affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
+
+/* file.c */
+
+void		affs_free_prealloc(struct inode *inode);
+extern void	affs_truncate(struct inode *);
+
+/* dir.c */
+
+extern void   affs_dir_truncate(struct inode *);
+
+/* jump tables */
+
+extern struct inode_operations	 affs_file_inode_operations;
+extern struct inode_operations	 affs_dir_inode_operations;
+extern struct inode_operations   affs_symlink_inode_operations;
+extern struct file_operations	 affs_file_operations;
+extern struct file_operations	 affs_file_operations_ofs;
+extern struct file_operations	 affs_dir_operations;
+extern struct address_space_operations	 affs_symlink_aops;
+extern struct address_space_operations	 affs_aops;
+extern struct address_space_operations	 affs_aops_ofs;
+
+extern struct dentry_operations	 affs_dentry_operations;
+extern struct dentry_operations	 affs_dentry_operations_intl;
+
+static inline void
+affs_set_blocksize(struct super_block *sb, int size)
+{
+	sb_set_blocksize(sb, size);
+}
+static inline struct buffer_head *
+affs_bread(struct super_block *sb, int block)
+{
+	pr_debug("affs_bread: %d\n", block);
+	if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size)
+		return sb_bread(sb, block);
+	return NULL;
+}
+static inline struct buffer_head *
+affs_getblk(struct super_block *sb, int block)
+{
+	pr_debug("affs_getblk: %d\n", block);
+	if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size)
+		return sb_getblk(sb, block);
+	return NULL;
+}
+static inline struct buffer_head *
+affs_getzeroblk(struct super_block *sb, int block)
+{
+	struct buffer_head *bh;
+	pr_debug("affs_getzeroblk: %d\n", block);
+	if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) {
+		bh = sb_getblk(sb, block);
+		lock_buffer(bh);
+		memset(bh->b_data, 0 , sb->s_blocksize);
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		return bh;
+	}
+	return NULL;
+}
+static inline struct buffer_head *
+affs_getemptyblk(struct super_block *sb, int block)
+{
+	struct buffer_head *bh;
+	pr_debug("affs_getemptyblk: %d\n", block);
+	if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) {
+		bh = sb_getblk(sb, block);
+		wait_on_buffer(bh);
+		set_buffer_uptodate(bh);
+		return bh;
+	}
+	return NULL;
+}
+static inline void
+affs_brelse(struct buffer_head *bh)
+{
+	if (bh)
+		pr_debug("affs_brelse: %lld\n", (long long) bh->b_blocknr);
+	brelse(bh);
+}
+
+static inline void
+affs_adjust_checksum(struct buffer_head *bh, u32 val)
+{
+	u32 tmp = be32_to_cpu(((__be32 *)bh->b_data)[5]);
+	((__be32 *)bh->b_data)[5] = cpu_to_be32(tmp - val);
+}
+static inline void
+affs_adjust_bitmapchecksum(struct buffer_head *bh, u32 val)
+{
+	u32 tmp = be32_to_cpu(((__be32 *)bh->b_data)[0]);
+	((__be32 *)bh->b_data)[0] = cpu_to_be32(tmp - val);
+}
+
+static inline void
+affs_lock_link(struct inode *inode)
+{
+	down(&AFFS_I(inode)->i_link_lock);
+}
+static inline void
+affs_unlock_link(struct inode *inode)
+{
+	up(&AFFS_I(inode)->i_link_lock);
+}
+static inline void
+affs_lock_dir(struct inode *inode)
+{
+	down(&AFFS_I(inode)->i_hash_lock);
+}
+static inline void
+affs_unlock_dir(struct inode *inode)
+{
+	up(&AFFS_I(inode)->i_hash_lock);
+}
+static inline void
+affs_lock_ext(struct inode *inode)
+{
+	down(&AFFS_I(inode)->i_ext_lock);
+}
+static inline void
+affs_unlock_ext(struct inode *inode)
+{
+	up(&AFFS_I(inode)->i_ext_lock);
+}
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
new file mode 100644
index 0000000..ccd624e
--- /dev/null
+++ b/fs/affs/amigaffs.c
@@ -0,0 +1,509 @@
+/*
+ *  linux/fs/affs/amigaffs.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Amiga FFS filesystem.
+ *
+ *  Please send bug reports to: hjw@zvw.de
+ */
+
+#include "affs.h"
+
+extern struct timezone sys_tz;
+
+static char ErrorBuffer[256];
+
+/*
+ * Functions for accessing Amiga-FFS structures.
+ */
+
+
+/* Insert a header block bh into the directory dir
+ * caller must hold AFFS_DIR->i_hash_lock!
+ */
+
+int
+affs_insert_hash(struct inode *dir, struct buffer_head *bh)
+{
+	struct super_block *sb = dir->i_sb;
+	struct buffer_head *dir_bh;
+	u32 ino, hash_ino;
+	int offset;
+
+	ino = bh->b_blocknr;
+	offset = affs_hash_name(sb, AFFS_TAIL(sb, bh)->name + 1, AFFS_TAIL(sb, bh)->name[0]);
+
+	pr_debug("AFFS: insert_hash(dir=%u, ino=%d)\n", (u32)dir->i_ino, ino);
+
+	dir_bh = affs_bread(sb, dir->i_ino);
+	if (!dir_bh)
+		return -EIO;
+
+	hash_ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[offset]);
+	while (hash_ino) {
+		affs_brelse(dir_bh);
+		dir_bh = affs_bread(sb, hash_ino);
+		if (!dir_bh)
+			return -EIO;
+		hash_ino = be32_to_cpu(AFFS_TAIL(sb, dir_bh)->hash_chain);
+	}
+	AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino);
+	AFFS_TAIL(sb, bh)->hash_chain = 0;
+	affs_fix_checksum(sb, bh);
+
+	if (dir->i_ino == dir_bh->b_blocknr)
+		AFFS_HEAD(dir_bh)->table[offset] = cpu_to_be32(ino);
+	else
+		AFFS_TAIL(sb, dir_bh)->hash_chain = cpu_to_be32(ino);
+
+	affs_adjust_checksum(dir_bh, ino);
+	mark_buffer_dirty_inode(dir_bh, dir);
+	affs_brelse(dir_bh);
+
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_version++;
+	mark_inode_dirty(dir);
+
+	return 0;
+}
+
+/* Remove a header block from its directory.
+ * caller must hold AFFS_DIR->i_hash_lock!
+ */
+
+int
+affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
+{
+	struct super_block *sb;
+	struct buffer_head *bh;
+	u32 rem_ino, hash_ino;
+	__be32 ino;
+	int offset, retval;
+
+	sb = dir->i_sb;
+	rem_ino = rem_bh->b_blocknr;
+	offset = affs_hash_name(sb, AFFS_TAIL(sb, rem_bh)->name+1, AFFS_TAIL(sb, rem_bh)->name[0]);
+	pr_debug("AFFS: remove_hash(dir=%d, ino=%d, hashval=%d)\n", (u32)dir->i_ino, rem_ino, offset);
+
+	bh = affs_bread(sb, dir->i_ino);
+	if (!bh)
+		return -EIO;
+
+	retval = -ENOENT;
+	hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]);
+	while (hash_ino) {
+		if (hash_ino == rem_ino) {
+			ino = AFFS_TAIL(sb, rem_bh)->hash_chain;
+			if (dir->i_ino == bh->b_blocknr)
+				AFFS_HEAD(bh)->table[offset] = ino;
+			else
+				AFFS_TAIL(sb, bh)->hash_chain = ino;
+			affs_adjust_checksum(bh, be32_to_cpu(ino) - hash_ino);
+			mark_buffer_dirty_inode(bh, dir);
+			AFFS_TAIL(sb, rem_bh)->parent = 0;
+			retval = 0;
+			break;
+		}
+		affs_brelse(bh);
+		bh = affs_bread(sb, hash_ino);
+		if (!bh)
+			return -EIO;
+		hash_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
+	}
+
+	affs_brelse(bh);
+
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_version++;
+	mark_inode_dirty(dir);
+
+	return retval;
+}
+
+static void
+affs_fix_dcache(struct dentry *dentry, u32 entry_ino)
+{
+	struct inode *inode = dentry->d_inode;
+	void *data = dentry->d_fsdata;
+	struct list_head *head, *next;
+
+	spin_lock(&dcache_lock);
+	head = &inode->i_dentry;
+	next = head->next;
+	while (next != head) {
+		dentry = list_entry(next, struct dentry, d_alias);
+		if (entry_ino == (u32)(long)dentry->d_fsdata) {
+			dentry->d_fsdata = data;
+			break;
+		}
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+
+/* Remove header from link chain */
+
+static int
+affs_remove_link(struct dentry *dentry)
+{
+	struct inode *dir, *inode = dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh = NULL, *link_bh = NULL;
+	u32 link_ino, ino;
+	int retval;
+
+	pr_debug("AFFS: remove_link(key=%ld)\n", inode->i_ino);
+	retval = -EIO;
+	bh = affs_bread(sb, inode->i_ino);
+	if (!bh)
+		goto done;
+
+	link_ino = (u32)(long)dentry->d_fsdata;
+	if (inode->i_ino == link_ino) {
+		/* we can't remove the head of the link, as its blocknr is still used as ino,
+		 * so we remove the block of the first link instead.
+		 */ 
+		link_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain);
+		link_bh = affs_bread(sb, link_ino);
+		if (!link_bh)
+			goto done;
+
+		dir = iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent));
+		if (!dir)
+			goto done;
+
+		affs_lock_dir(dir);
+		affs_fix_dcache(dentry, link_ino);
+		retval = affs_remove_hash(dir, link_bh);
+		if (retval)
+			goto done;
+		mark_buffer_dirty_inode(link_bh, inode);
+
+		memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
+		retval = affs_insert_hash(dir, bh);
+		if (retval)
+			goto done;
+		mark_buffer_dirty_inode(bh, inode);
+
+		affs_unlock_dir(dir);
+		iput(dir);
+	} else {
+		link_bh = affs_bread(sb, link_ino);
+		if (!link_bh)
+			goto done;
+	}
+
+	while ((ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain)) != 0) {
+		if (ino == link_ino) {
+			__be32 ino2 = AFFS_TAIL(sb, link_bh)->link_chain;
+			AFFS_TAIL(sb, bh)->link_chain = ino2;
+			affs_adjust_checksum(bh, be32_to_cpu(ino2) - link_ino);
+			mark_buffer_dirty_inode(bh, inode);
+			retval = 0;
+			/* Fix the link count, if bh is a normal header block without links */
+			switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
+			case ST_LINKDIR:
+			case ST_LINKFILE:
+				break;
+			default:
+				if (!AFFS_TAIL(sb, bh)->link_chain)
+					inode->i_nlink = 1;
+			}
+			affs_free_block(sb, link_ino);
+			goto done;
+		}
+		affs_brelse(bh);
+		bh = affs_bread(sb, ino);
+		if (!bh)
+			goto done;
+	}
+	retval = -ENOENT;
+done:
+	affs_brelse(link_bh);
+	affs_brelse(bh);
+	return retval;
+}
+
+
+static int
+affs_empty_dir(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh;
+	int retval, size;
+
+	retval = -EIO;
+	bh = affs_bread(sb, inode->i_ino);
+	if (!bh)
+		goto done;
+
+	retval = -ENOTEMPTY;
+	for (size = AFFS_SB(sb)->s_hashsize - 1; size >= 0; size--)
+		if (AFFS_HEAD(bh)->table[size])
+			goto not_empty;
+	retval = 0;
+not_empty:
+	affs_brelse(bh);
+done:
+	return retval;
+}
+
+
+/* Remove a filesystem object. If the object to be removed has
+ * links to it, one of the links must be changed to inherit
+ * the file or directory. As above, any inode will do.
+ * The buffer will not be freed. If the header is a link, the
+ * block will be marked as free.
+ * This function returns a negative error number in case of
+ * an error, else 0 if the inode is to be deleted or 1 if not.
+ */
+
+int
+affs_remove_header(struct dentry *dentry)
+{
+	struct super_block *sb;
+	struct inode *inode, *dir;
+	struct buffer_head *bh = NULL;
+	int retval;
+
+	dir = dentry->d_parent->d_inode;
+	sb = dir->i_sb;
+
+	retval = -ENOENT;
+	inode = dentry->d_inode;
+	if (!inode)
+		goto done;
+
+	pr_debug("AFFS: remove_header(key=%ld)\n", inode->i_ino);
+	retval = -EIO;
+	bh = affs_bread(sb, (u32)(long)dentry->d_fsdata);
+	if (!bh)
+		goto done;
+
+	affs_lock_link(inode);
+	affs_lock_dir(dir);
+	switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
+	case ST_USERDIR:
+		/* if we ever want to support links to dirs
+		 * i_hash_lock of the inode must only be
+		 * taken after some checks
+		 */
+		affs_lock_dir(inode);
+		retval = affs_empty_dir(inode);
+		affs_unlock_dir(inode);
+		if (retval)
+			goto done_unlock;
+		break;
+	default:
+		break;
+	}
+
+	retval = affs_remove_hash(dir, bh);
+	if (retval)
+		goto done_unlock;
+	mark_buffer_dirty_inode(bh, inode);
+
+	affs_unlock_dir(dir);
+
+	if (inode->i_nlink > 1)
+		retval = affs_remove_link(dentry);
+	else
+		inode->i_nlink = 0;
+	affs_unlock_link(inode);
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+
+done:
+	affs_brelse(bh);
+	return retval;
+
+done_unlock:
+	affs_unlock_dir(dir);
+	affs_unlock_link(inode);
+	goto done;
+}
+
+/* Checksum a block, do various consistency checks and optionally return
+   the blocks type number.  DATA points to the block.  If their pointers
+   are non-null, *PTYPE and *STYPE are set to the primary and secondary
+   block types respectively, *HASHSIZE is set to the size of the hashtable
+   (which lets us calculate the block size).
+   Returns non-zero if the block is not consistent. */
+
+u32
+affs_checksum_block(struct super_block *sb, struct buffer_head *bh)
+{
+	__be32 *ptr = (__be32 *)bh->b_data;
+	u32 sum;
+	int bsize;
+
+	sum = 0;
+	for (bsize = sb->s_blocksize / sizeof(__be32); bsize > 0; bsize--)
+		sum += be32_to_cpu(*ptr++);
+	return sum;
+}
+
+/*
+ * Calculate the checksum of a disk block and store it
+ * at the indicated position.
+ */
+
+void
+affs_fix_checksum(struct super_block *sb, struct buffer_head *bh)
+{
+	int cnt = sb->s_blocksize / sizeof(__be32);
+	__be32 *ptr = (__be32 *)bh->b_data;
+	u32 checksum;
+	__be32 *checksumptr;
+
+	checksumptr = ptr + 5;
+	*checksumptr = 0;
+	for (checksum = 0; cnt > 0; ptr++, cnt--)
+		checksum += be32_to_cpu(*ptr);
+	*checksumptr = cpu_to_be32(-checksum);
+}
+
+void
+secs_to_datestamp(time_t secs, struct affs_date *ds)
+{
+	u32	 days;
+	u32	 minute;
+
+	secs -= sys_tz.tz_minuteswest * 60 + ((8 * 365 + 2) * 24 * 60 * 60);
+	if (secs < 0)
+		secs = 0;
+	days    = secs / 86400;
+	secs   -= days * 86400;
+	minute  = secs / 60;
+	secs   -= minute * 60;
+
+	ds->days = cpu_to_be32(days);
+	ds->mins = cpu_to_be32(minute);
+	ds->ticks = cpu_to_be32(secs * 50);
+}
+
+mode_t
+prot_to_mode(u32 prot)
+{
+	int mode = 0;
+
+	if (!(prot & FIBF_NOWRITE))
+		mode |= S_IWUSR;
+	if (!(prot & FIBF_NOREAD))
+		mode |= S_IRUSR;
+	if (!(prot & FIBF_NOEXECUTE))
+		mode |= S_IXUSR;
+	if (prot & FIBF_GRP_WRITE)
+		mode |= S_IWGRP;
+	if (prot & FIBF_GRP_READ)
+		mode |= S_IRGRP;
+	if (prot & FIBF_GRP_EXECUTE)
+		mode |= S_IXGRP;
+	if (prot & FIBF_OTR_WRITE)
+		mode |= S_IWOTH;
+	if (prot & FIBF_OTR_READ)
+		mode |= S_IROTH;
+	if (prot & FIBF_OTR_EXECUTE)
+		mode |= S_IXOTH;
+
+	return mode;
+}
+
+void
+mode_to_prot(struct inode *inode)
+{
+	u32 prot = AFFS_I(inode)->i_protect;
+	mode_t mode = inode->i_mode;
+
+	if (!(mode & S_IXUSR))
+		prot |= FIBF_NOEXECUTE;
+	if (!(mode & S_IRUSR))
+		prot |= FIBF_NOREAD;
+	if (!(mode & S_IWUSR))
+		prot |= FIBF_NOWRITE;
+	if (mode & S_IXGRP)
+		prot |= FIBF_GRP_EXECUTE;
+	if (mode & S_IRGRP)
+		prot |= FIBF_GRP_READ;
+	if (mode & S_IWGRP)
+		prot |= FIBF_GRP_WRITE;
+	if (mode & S_IXOTH)
+		prot |= FIBF_OTR_EXECUTE;
+	if (mode & S_IROTH)
+		prot |= FIBF_OTR_READ;
+	if (mode & S_IWOTH)
+		prot |= FIBF_OTR_WRITE;
+
+	AFFS_I(inode)->i_protect = prot;
+}
+
+void
+affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
+{
+	va_list	 args;
+
+	va_start(args,fmt);
+	vsprintf(ErrorBuffer,fmt,args);
+	va_end(args);
+
+	printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id,
+		function,ErrorBuffer);
+	if (!(sb->s_flags & MS_RDONLY))
+		printk(KERN_WARNING "AFFS: Remounting filesystem read-only\n");
+	sb->s_flags |= MS_RDONLY;
+}
+
+void
+affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
+{
+	va_list	 args;
+
+	va_start(args,fmt);
+	vsprintf(ErrorBuffer,fmt,args);
+	va_end(args);
+
+	printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id,
+		function,ErrorBuffer);
+}
+
+/* Check if the name is valid for a affs object. */
+
+int
+affs_check_name(const unsigned char *name, int len)
+{
+	int	 i;
+
+	if (len > 30)
+#ifdef AFFS_NO_TRUNCATE
+		return -ENAMETOOLONG;
+#else
+		len = 30;
+#endif
+
+	for (i = 0; i < len; i++) {
+		if (name[i] < ' ' || name[i] == ':'
+		    || (name[i] > 0x7e && name[i] < 0xa0))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* This function copies name to bstr, with at most 30
+ * characters length. The bstr will be prepended by
+ * a length byte.
+ * NOTE: The name will must be already checked by
+ *       affs_check_name()!
+ */
+
+int
+affs_copy_name(unsigned char *bstr, struct dentry *dentry)
+{
+	int len = min(dentry->d_name.len, 30u);
+
+	*bstr++ = len;
+	memcpy(bstr, dentry->d_name.name, len);
+	return len;
+}
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
new file mode 100644
index 0000000..b0b9536
--- /dev/null
+++ b/fs/affs/bitmap.c
@@ -0,0 +1,390 @@
+/*
+ *  linux/fs/affs/bitmap.c
+ *
+ *  (c) 1996 Hans-Joachim Widmaier
+ *
+ *  bitmap.c contains the code that handles all bitmap related stuff -
+ *  block allocation, deallocation, calculation of free space.
+ */
+
+#include "affs.h"
+
+/* This is, of course, shamelessly stolen from fs/minix */
+
+static int nibblemap[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4 };
+
+static u32
+affs_count_free_bits(u32 blocksize, const void *data)
+{
+	const u32 *map;
+	u32 free;
+	u32 tmp;
+
+	map = data;
+	free = 0;
+	for (blocksize /= 4; blocksize > 0; blocksize--) {
+		tmp = *map++;
+		while (tmp) {
+			free += nibblemap[tmp & 0xf];
+			tmp >>= 4;
+		}
+	}
+
+	return free;
+}
+
+u32
+affs_count_free_blocks(struct super_block *sb)
+{
+	struct affs_bm_info *bm;
+	u32 free;
+	int i;
+
+	pr_debug("AFFS: count_free_blocks()\n");
+
+	if (sb->s_flags & MS_RDONLY)
+		return 0;
+
+	down(&AFFS_SB(sb)->s_bmlock);
+
+	bm = AFFS_SB(sb)->s_bitmap;
+	free = 0;
+	for (i = AFFS_SB(sb)->s_bmap_count; i > 0; bm++, i--)
+		free += bm->bm_free;
+
+	up(&AFFS_SB(sb)->s_bmlock);
+
+	return free;
+}
+
+void
+affs_free_block(struct super_block *sb, u32 block)
+{
+	struct affs_sb_info *sbi = AFFS_SB(sb);
+	struct affs_bm_info *bm;
+	struct buffer_head *bh;
+	u32 blk, bmap, bit, mask, tmp;
+	__be32 *data;
+
+	pr_debug("AFFS: free_block(%u)\n", block);
+
+	if (block > sbi->s_partition_size)
+		goto err_range;
+
+	blk     = block - sbi->s_reserved;
+	bmap    = blk / sbi->s_bmap_bits;
+	bit     = blk % sbi->s_bmap_bits;
+	bm      = &sbi->s_bitmap[bmap];
+
+	down(&sbi->s_bmlock);
+
+	bh = sbi->s_bmap_bh;
+	if (sbi->s_last_bmap != bmap) {
+		affs_brelse(bh);
+		bh = affs_bread(sb, bm->bm_key);
+		if (!bh)
+			goto err_bh_read;
+		sbi->s_bmap_bh = bh;
+		sbi->s_last_bmap = bmap;
+	}
+
+	mask = 1 << (bit & 31);
+	data = (__be32 *)bh->b_data + bit / 32 + 1;
+
+	/* mark block free */
+	tmp = be32_to_cpu(*data);
+	if (tmp & mask)
+		goto err_free;
+	*data = cpu_to_be32(tmp | mask);
+
+	/* fix checksum */
+	tmp = be32_to_cpu(*(__be32 *)bh->b_data);
+	*(__be32 *)bh->b_data = cpu_to_be32(tmp - mask);
+
+	mark_buffer_dirty(bh);
+	sb->s_dirt = 1;
+	bm->bm_free++;
+
+	up(&sbi->s_bmlock);
+	return;
+
+err_free:
+	affs_warning(sb,"affs_free_block","Trying to free block %u which is already free", block);
+	up(&sbi->s_bmlock);
+	return;
+
+err_bh_read:
+	affs_error(sb,"affs_free_block","Cannot read bitmap block %u", bm->bm_key);
+	sbi->s_bmap_bh = NULL;
+	sbi->s_last_bmap = ~0;
+	up(&sbi->s_bmlock);
+	return;
+
+err_range:
+	affs_error(sb, "affs_free_block","Block %u outside partition", block);
+	return;
+}
+
+/*
+ * Allocate a block in the given allocation zone.
+ * Since we have to byte-swap the bitmap on little-endian
+ * machines, this is rather expensive. Therefor we will
+ * preallocate up to 16 blocks from the same word, if
+ * possible. We are not doing preallocations in the
+ * header zone, though.
+ */
+
+u32
+affs_alloc_block(struct inode *inode, u32 goal)
+{
+	struct super_block *sb;
+	struct affs_sb_info *sbi;
+	struct affs_bm_info *bm;
+	struct buffer_head *bh;
+	__be32 *data, *enddata;
+	u32 blk, bmap, bit, mask, mask2, tmp;
+	int i;
+
+	sb = inode->i_sb;
+	sbi = AFFS_SB(sb);
+
+	pr_debug("AFFS: balloc(inode=%lu,goal=%u): ", inode->i_ino, goal);
+
+	if (AFFS_I(inode)->i_pa_cnt) {
+		pr_debug("%d\n", AFFS_I(inode)->i_lastalloc+1);
+		AFFS_I(inode)->i_pa_cnt--;
+		return ++AFFS_I(inode)->i_lastalloc;
+	}
+
+	if (!goal || goal > sbi->s_partition_size) {
+		if (goal)
+			affs_warning(sb, "affs_balloc", "invalid goal %d", goal);
+		//if (!AFFS_I(inode)->i_last_block)
+		//	affs_warning(sb, "affs_balloc", "no last alloc block");
+		goal = sbi->s_reserved;
+	}
+
+	blk = goal - sbi->s_reserved;
+	bmap = blk / sbi->s_bmap_bits;
+	bm = &sbi->s_bitmap[bmap];
+
+	down(&sbi->s_bmlock);
+
+	if (bm->bm_free)
+		goto find_bmap_bit;
+
+find_bmap:
+	/* search for the next bmap buffer with free bits */
+	i = sbi->s_bmap_count;
+	do {
+		if (--i < 0)
+			goto err_full;
+		bmap++;
+		bm++;
+		if (bmap < sbi->s_bmap_count)
+			continue;
+		/* restart search at zero */
+		bmap = 0;
+		bm = sbi->s_bitmap;
+	} while (!bm->bm_free);
+	blk = bmap * sbi->s_bmap_bits;
+
+find_bmap_bit:
+
+	bh = sbi->s_bmap_bh;
+	if (sbi->s_last_bmap != bmap) {
+		affs_brelse(bh);
+		bh = affs_bread(sb, bm->bm_key);
+		if (!bh)
+			goto err_bh_read;
+		sbi->s_bmap_bh = bh;
+		sbi->s_last_bmap = bmap;
+	}
+
+	/* find an unused block in this bitmap block */
+	bit = blk % sbi->s_bmap_bits;
+	data = (__be32 *)bh->b_data + bit / 32 + 1;
+	enddata = (__be32 *)((u8 *)bh->b_data + sb->s_blocksize);
+	mask = ~0UL << (bit & 31);
+	blk &= ~31UL;
+
+	tmp = be32_to_cpu(*data);
+	if (tmp & mask)
+		goto find_bit;
+
+	/* scan the rest of the buffer */
+	do {
+		blk += 32;
+		if (++data >= enddata)
+			/* didn't find something, can only happen
+			 * if scan didn't start at 0, try next bmap
+			 */
+			goto find_bmap;
+	} while (!*data);
+	tmp = be32_to_cpu(*data);
+	mask = ~0;
+
+find_bit:
+	/* finally look for a free bit in the word */
+	bit = ffs(tmp & mask) - 1;
+	blk += bit + sbi->s_reserved;
+	mask2 = mask = 1 << (bit & 31);
+	AFFS_I(inode)->i_lastalloc = blk;
+
+	/* prealloc as much as possible within this word */
+	while ((mask2 <<= 1)) {
+		if (!(tmp & mask2))
+			break;
+		AFFS_I(inode)->i_pa_cnt++;
+		mask |= mask2;
+	}
+	bm->bm_free -= AFFS_I(inode)->i_pa_cnt + 1;
+
+	*data = cpu_to_be32(tmp & ~mask);
+
+	/* fix checksum */
+	tmp = be32_to_cpu(*(__be32 *)bh->b_data);
+	*(__be32 *)bh->b_data = cpu_to_be32(tmp + mask);
+
+	mark_buffer_dirty(bh);
+	sb->s_dirt = 1;
+
+	up(&sbi->s_bmlock);
+
+	pr_debug("%d\n", blk);
+	return blk;
+
+err_bh_read:
+	affs_error(sb,"affs_read_block","Cannot read bitmap block %u", bm->bm_key);
+	sbi->s_bmap_bh = NULL;
+	sbi->s_last_bmap = ~0;
+err_full:
+	up(&sbi->s_bmlock);
+	pr_debug("failed\n");
+	return 0;
+}
+
+int affs_init_bitmap(struct super_block *sb, int *flags)
+{
+	struct affs_bm_info *bm;
+	struct buffer_head *bmap_bh = NULL, *bh = NULL;
+	__be32 *bmap_blk;
+	u32 size, blk, end, offset, mask;
+	int i, res = 0;
+	struct affs_sb_info *sbi = AFFS_SB(sb);
+
+	if (*flags & MS_RDONLY)
+		return 0;
+
+	if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
+		printk(KERN_NOTICE "AFFS: Bitmap invalid - mounting %s read only\n",
+			sb->s_id);
+		*flags |= MS_RDONLY;
+		return 0;
+	}
+
+	sbi->s_last_bmap = ~0;
+	sbi->s_bmap_bh = NULL;
+	sbi->s_bmap_bits = sb->s_blocksize * 8 - 32;
+	sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
+				 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
+	size = sbi->s_bmap_count * sizeof(*bm);
+	bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL);
+	if (!sbi->s_bitmap) {
+		printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
+		return -ENOMEM;
+	}
+	memset(sbi->s_bitmap, 0, size);
+
+	bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
+	blk = sb->s_blocksize / 4 - 49;
+	end = blk + 25;
+
+	for (i = sbi->s_bmap_count; i > 0; bm++, i--) {
+		affs_brelse(bh);
+
+		bm->bm_key = be32_to_cpu(bmap_blk[blk]);
+		bh = affs_bread(sb, bm->bm_key);
+		if (!bh) {
+			printk(KERN_ERR "AFFS: Cannot read bitmap\n");
+			res = -EIO;
+			goto out;
+		}
+		if (affs_checksum_block(sb, bh)) {
+			printk(KERN_WARNING "AFFS: Bitmap %u invalid - mounting %s read only.\n",
+			       bm->bm_key, sb->s_id);
+			*flags |= MS_RDONLY;
+			goto out;
+		}
+		pr_debug("AFFS: read bitmap block %d: %d\n", blk, bm->bm_key);
+		bm->bm_free = affs_count_free_bits(sb->s_blocksize - 4, bh->b_data + 4);
+
+		/* Don't try read the extension if this is the last block,
+		 * but we also need the right bm pointer below
+		 */
+		if (++blk < end || i == 1)
+			continue;
+		if (bmap_bh)
+			affs_brelse(bmap_bh);
+		bmap_bh = affs_bread(sb, be32_to_cpu(bmap_blk[blk]));
+		if (!bmap_bh) {
+			printk(KERN_ERR "AFFS: Cannot read bitmap extension\n");
+			res = -EIO;
+			goto out;
+		}
+		bmap_blk = (__be32 *)bmap_bh->b_data;
+		blk = 0;
+		end = sb->s_blocksize / 4 - 1;
+	}
+
+	offset = (sbi->s_partition_size - sbi->s_reserved) % sbi->s_bmap_bits;
+	mask = ~(0xFFFFFFFFU << (offset & 31));
+	pr_debug("last word: %d %d %d\n", offset, offset / 32 + 1, mask);
+	offset = offset / 32 + 1;
+
+	if (mask) {
+		u32 old, new;
+
+		/* Mark unused bits in the last word as allocated */
+		old = be32_to_cpu(((__be32 *)bh->b_data)[offset]);
+		new = old & mask;
+		//if (old != new) {
+			((__be32 *)bh->b_data)[offset] = cpu_to_be32(new);
+			/* fix checksum */
+			//new -= old;
+			//old = be32_to_cpu(*(__be32 *)bh->b_data);
+			//*(__be32 *)bh->b_data = cpu_to_be32(old - new);
+			//mark_buffer_dirty(bh);
+		//}
+		/* correct offset for the bitmap count below */
+		//offset++;
+	}
+	while (++offset < sb->s_blocksize / 4)
+		((__be32 *)bh->b_data)[offset] = 0;
+	((__be32 *)bh->b_data)[0] = 0;
+	((__be32 *)bh->b_data)[0] = cpu_to_be32(-affs_checksum_block(sb, bh));
+	mark_buffer_dirty(bh);
+
+	/* recalculate bitmap count for last block */
+	bm--;
+	bm->bm_free = affs_count_free_bits(sb->s_blocksize - 4, bh->b_data + 4);
+
+out:
+	affs_brelse(bh);
+	affs_brelse(bmap_bh);
+	return res;
+}
+
+void affs_free_bitmap(struct super_block *sb)
+{
+	struct affs_sb_info *sbi = AFFS_SB(sb);
+
+	if (!sbi->s_bitmap)
+		return;
+
+	affs_brelse(sbi->s_bmap_bh);
+	sbi->s_bmap_bh = NULL;
+	sbi->s_last_bmap = ~0;
+	kfree(sbi->s_bitmap);
+	sbi->s_bitmap = NULL;
+}
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
new file mode 100644
index 0000000..548efd0
--- /dev/null
+++ b/fs/affs/dir.c
@@ -0,0 +1,155 @@
+/*
+ *  linux/fs/affs/dir.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
+ *
+ *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ *
+ *  affs directory handling functions
+ *
+ */
+
+#include "affs.h"
+
+static int affs_readdir(struct file *, void *, filldir_t);
+
+struct file_operations affs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= affs_readdir,
+	.fsync		= file_fsync,
+};
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations affs_dir_inode_operations = {
+	.create		= affs_create,
+	.lookup		= affs_lookup,
+	.link		= affs_link,
+	.unlink		= affs_unlink,
+	.symlink	= affs_symlink,
+	.mkdir		= affs_mkdir,
+	.rmdir		= affs_rmdir,
+	.rename		= affs_rename,
+	.setattr	= affs_notify_change,
+};
+
+static int
+affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode		*inode = filp->f_dentry->d_inode;
+	struct super_block	*sb = inode->i_sb;
+	struct buffer_head	*dir_bh;
+	struct buffer_head	*fh_bh;
+	unsigned char		*name;
+	int			 namelen;
+	u32			 i;
+	int			 hash_pos;
+	int			 chain_pos;
+	u32			 f_pos;
+	u32			 ino;
+	int			 stored;
+	int			 res;
+
+	pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)filp->f_pos);
+
+	stored = 0;
+	res    = -EIO;
+	dir_bh = NULL;
+	fh_bh  = NULL;
+	f_pos  = filp->f_pos;
+
+	if (f_pos == 0) {
+		filp->private_data = (void *)0;
+		if (filldir(dirent, ".", 1, f_pos, inode->i_ino, DT_DIR) < 0)
+			return 0;
+		filp->f_pos = f_pos = 1;
+		stored++;
+	}
+	if (f_pos == 1) {
+		if (filldir(dirent, "..", 2, f_pos, parent_ino(filp->f_dentry), DT_DIR) < 0)
+			return stored;
+		filp->f_pos = f_pos = 2;
+		stored++;
+	}
+
+	affs_lock_dir(inode);
+	chain_pos = (f_pos - 2) & 0xffff;
+	hash_pos  = (f_pos - 2) >> 16;
+	if (chain_pos == 0xffff) {
+		affs_warning(sb, "readdir", "More than 65535 entries in chain");
+		chain_pos = 0;
+		hash_pos++;
+		filp->f_pos = ((hash_pos << 16) | chain_pos) + 2;
+	}
+	dir_bh = affs_bread(sb, inode->i_ino);
+	if (!dir_bh)
+		goto readdir_out;
+
+	/* If the directory hasn't changed since the last call to readdir(),
+	 * we can jump directly to where we left off.
+	 */
+	ino = (u32)(long)filp->private_data;
+	if (ino && filp->f_version == inode->i_version) {
+		pr_debug("AFFS: readdir() left off=%d\n", ino);
+		goto inside;
+	}
+
+	ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
+	for (i = 0; ino && i < chain_pos; i++) {
+		fh_bh = affs_bread(sb, ino);
+		if (!fh_bh) {
+			affs_error(sb, "readdir","Cannot read block %d", i);
+			goto readdir_out;
+		}
+		ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
+		affs_brelse(fh_bh);
+		fh_bh = NULL;
+	}
+	if (ino)
+		goto inside;
+	hash_pos++;
+
+	for (; hash_pos < AFFS_SB(sb)->s_hashsize; hash_pos++) {
+		ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
+		if (!ino)
+			continue;
+		f_pos = (hash_pos << 16) + 2;
+inside:
+		do {
+			fh_bh = affs_bread(sb, ino);
+			if (!fh_bh) {
+				affs_error(sb, "readdir","Cannot read block %d", ino);
+				goto readdir_done;
+			}
+
+			namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30);
+			name = AFFS_TAIL(sb, fh_bh)->name + 1;
+			pr_debug("AFFS: readdir(): filldir(\"%.*s\", ino=%u), hash=%d, f_pos=%x\n",
+				 namelen, name, ino, hash_pos, f_pos);
+			if (filldir(dirent, name, namelen, f_pos, ino, DT_UNKNOWN) < 0)
+				goto readdir_done;
+			stored++;
+			f_pos++;
+			ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
+			affs_brelse(fh_bh);
+			fh_bh = NULL;
+		} while (ino);
+	}
+readdir_done:
+	filp->f_pos = f_pos;
+	filp->f_version = inode->i_version;
+	filp->private_data = (void *)(long)ino;
+	res = stored;
+
+readdir_out:
+	affs_brelse(dir_bh);
+	affs_brelse(fh_bh);
+	affs_unlock_dir(inode);
+	pr_debug("AFFS: readdir()=%d\n", stored);
+	return res;
+}
diff --git a/fs/affs/file.c b/fs/affs/file.c
new file mode 100644
index 0000000..6744924
--- /dev/null
+++ b/fs/affs/file.c
@@ -0,0 +1,920 @@
+/*
+ *  linux/fs/affs/file.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
+ *
+ *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ *
+ *  affs regular file handling primitives
+ */
+
+#include "affs.h"
+
+#if PAGE_SIZE < 4096
+#error PAGE_SIZE must be at least 4096
+#endif
+
+static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
+static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
+static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
+static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
+static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos);
+static int affs_file_open(struct inode *inode, struct file *filp);
+static int affs_file_release(struct inode *inode, struct file *filp);
+
+struct file_operations affs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= affs_file_write,
+	.mmap		= generic_file_mmap,
+	.open		= affs_file_open,
+	.release	= affs_file_release,
+	.fsync		= file_fsync,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations affs_file_inode_operations = {
+	.truncate	= affs_truncate,
+	.setattr	= affs_notify_change,
+};
+
+static int
+affs_file_open(struct inode *inode, struct file *filp)
+{
+	if (atomic_read(&filp->f_count) != 1)
+		return 0;
+	pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
+	AFFS_I(inode)->i_opencnt++;
+	return 0;
+}
+
+static int
+affs_file_release(struct inode *inode, struct file *filp)
+{
+	if (atomic_read(&filp->f_count) != 0)
+		return 0;
+	pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
+	AFFS_I(inode)->i_opencnt--;
+	if (!AFFS_I(inode)->i_opencnt)
+		affs_free_prealloc(inode);
+
+	return 0;
+}
+
+static int
+affs_grow_extcache(struct inode *inode, u32 lc_idx)
+{
+	struct super_block	*sb = inode->i_sb;
+	struct buffer_head	*bh;
+	u32 lc_max;
+	int i, j, key;
+
+	if (!AFFS_I(inode)->i_lc) {
+		char *ptr = (char *)get_zeroed_page(GFP_NOFS);
+		if (!ptr)
+			return -ENOMEM;
+		AFFS_I(inode)->i_lc = (u32 *)ptr;
+		AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
+	}
+
+	lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
+
+	if (AFFS_I(inode)->i_extcnt > lc_max) {
+		u32 lc_shift, lc_mask, tmp, off;
+
+		/* need to recalculate linear cache, start from old size */
+		lc_shift = AFFS_I(inode)->i_lc_shift;
+		tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
+		for (; tmp; tmp >>= 1)
+			lc_shift++;
+		lc_mask = (1 << lc_shift) - 1;
+
+		/* fix idx and old size to new shift */
+		lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
+		AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
+
+		/* first shrink old cache to make more space */
+		off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
+		for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
+			AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
+
+		AFFS_I(inode)->i_lc_shift = lc_shift;
+		AFFS_I(inode)->i_lc_mask = lc_mask;
+	}
+
+	/* fill cache to the needed index */
+	i = AFFS_I(inode)->i_lc_size;
+	AFFS_I(inode)->i_lc_size = lc_idx + 1;
+	for (; i <= lc_idx; i++) {
+		if (!i) {
+			AFFS_I(inode)->i_lc[0] = inode->i_ino;
+			continue;
+		}
+		key = AFFS_I(inode)->i_lc[i - 1];
+		j = AFFS_I(inode)->i_lc_mask + 1;
+		// unlock cache
+		for (; j > 0; j--) {
+			bh = affs_bread(sb, key);
+			if (!bh)
+				goto err;
+			key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
+			affs_brelse(bh);
+		}
+		// lock cache
+		AFFS_I(inode)->i_lc[i] = key;
+	}
+
+	return 0;
+
+err:
+	// lock cache
+	return -EIO;
+}
+
+static struct buffer_head *
+affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *new_bh;
+	u32 blocknr, tmp;
+
+	blocknr = affs_alloc_block(inode, bh->b_blocknr);
+	if (!blocknr)
+		return ERR_PTR(-ENOSPC);
+
+	new_bh = affs_getzeroblk(sb, blocknr);
+	if (!new_bh) {
+		affs_free_block(sb, blocknr);
+		return ERR_PTR(-EIO);
+	}
+
+	AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
+	AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
+	AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
+	AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
+	affs_fix_checksum(sb, new_bh);
+
+	mark_buffer_dirty_inode(new_bh, inode);
+
+	tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
+	if (tmp)
+		affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
+	AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
+	affs_adjust_checksum(bh, blocknr - tmp);
+	mark_buffer_dirty_inode(bh, inode);
+
+	AFFS_I(inode)->i_extcnt++;
+	mark_inode_dirty(inode);
+
+	return new_bh;
+}
+
+static inline struct buffer_head *
+affs_get_extblock(struct inode *inode, u32 ext)
+{
+	/* inline the simplest case: same extended block as last time */
+	struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
+	if (ext == AFFS_I(inode)->i_ext_last)
+		atomic_inc(&bh->b_count);
+	else
+		/* we have to do more (not inlined) */
+		bh = affs_get_extblock_slow(inode, ext);
+
+	return bh;
+}
+
+static struct buffer_head *
+affs_get_extblock_slow(struct inode *inode, u32 ext)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh;
+	u32 ext_key;
+	u32 lc_idx, lc_off, ac_idx;
+	u32 tmp, idx;
+
+	if (ext == AFFS_I(inode)->i_ext_last + 1) {
+		/* read the next extended block from the current one */
+		bh = AFFS_I(inode)->i_ext_bh;
+		ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
+		if (ext < AFFS_I(inode)->i_extcnt)
+			goto read_ext;
+		if (ext > AFFS_I(inode)->i_extcnt)
+			BUG();
+		bh = affs_alloc_extblock(inode, bh, ext);
+		if (IS_ERR(bh))
+			return bh;
+		goto store_ext;
+	}
+
+	if (ext == 0) {
+		/* we seek back to the file header block */
+		ext_key = inode->i_ino;
+		goto read_ext;
+	}
+
+	if (ext >= AFFS_I(inode)->i_extcnt) {
+		struct buffer_head *prev_bh;
+
+		/* allocate a new extended block */
+		if (ext > AFFS_I(inode)->i_extcnt)
+			BUG();
+
+		/* get previous extended block */
+		prev_bh = affs_get_extblock(inode, ext - 1);
+		if (IS_ERR(prev_bh))
+			return prev_bh;
+		bh = affs_alloc_extblock(inode, prev_bh, ext);
+		affs_brelse(prev_bh);
+		if (IS_ERR(bh))
+			return bh;
+		goto store_ext;
+	}
+
+again:
+	/* check if there is an extended cache and whether it's large enough */
+	lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
+	lc_off = ext & AFFS_I(inode)->i_lc_mask;
+
+	if (lc_idx >= AFFS_I(inode)->i_lc_size) {
+		int err;
+
+		err = affs_grow_extcache(inode, lc_idx);
+		if (err)
+			return ERR_PTR(err);
+		goto again;
+	}
+
+	/* every n'th key we find in the linear cache */
+	if (!lc_off) {
+		ext_key = AFFS_I(inode)->i_lc[lc_idx];
+		goto read_ext;
+	}
+
+	/* maybe it's still in the associative cache */
+	ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
+	if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
+		ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
+		goto read_ext;
+	}
+
+	/* try to find one of the previous extended blocks */
+	tmp = ext;
+	idx = ac_idx;
+	while (--tmp, --lc_off > 0) {
+		idx = (idx - 1) & AFFS_AC_MASK;
+		if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
+			ext_key = AFFS_I(inode)->i_ac[idx].key;
+			goto find_ext;
+		}
+	}
+
+	/* fall back to the linear cache */
+	ext_key = AFFS_I(inode)->i_lc[lc_idx];
+find_ext:
+	/* read all extended blocks until we find the one we need */
+	//unlock cache
+	do {
+		bh = affs_bread(sb, ext_key);
+		if (!bh)
+			goto err_bread;
+		ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
+		affs_brelse(bh);
+		tmp++;
+	} while (tmp < ext);
+	//lock cache
+
+	/* store it in the associative cache */
+	// recalculate ac_idx?
+	AFFS_I(inode)->i_ac[ac_idx].ext = ext;
+	AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
+
+read_ext:
+	/* finally read the right extended block */
+	//unlock cache
+	bh = affs_bread(sb, ext_key);
+	if (!bh)
+		goto err_bread;
+	//lock cache
+
+store_ext:
+	/* release old cached extended block and store the new one */
+	affs_brelse(AFFS_I(inode)->i_ext_bh);
+	AFFS_I(inode)->i_ext_last = ext;
+	AFFS_I(inode)->i_ext_bh = bh;
+	atomic_inc(&bh->b_count);
+
+	return bh;
+
+err_bread:
+	affs_brelse(bh);
+	return ERR_PTR(-EIO);
+}
+
+static int
+affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
+{
+	struct super_block	*sb = inode->i_sb;
+	struct buffer_head	*ext_bh;
+	u32			 ext;
+
+	pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
+
+
+	if (block > (sector_t)0x7fffffffUL)
+		BUG();
+
+	if (block >= AFFS_I(inode)->i_blkcnt) {
+		if (block > AFFS_I(inode)->i_blkcnt || !create)
+			goto err_big;
+	} else
+		create = 0;
+
+	//lock cache
+	affs_lock_ext(inode);
+
+	ext = (u32)block / AFFS_SB(sb)->s_hashsize;
+	block -= ext * AFFS_SB(sb)->s_hashsize;
+	ext_bh = affs_get_extblock(inode, ext);
+	if (IS_ERR(ext_bh))
+		goto err_ext;
+	map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
+
+	if (create) {
+		u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
+		if (!blocknr)
+			goto err_alloc;
+		set_buffer_new(bh_result);
+		AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
+		AFFS_I(inode)->i_blkcnt++;
+
+		/* store new block */
+		if (bh_result->b_blocknr)
+			affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
+		AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
+		AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
+		affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
+		bh_result->b_blocknr = blocknr;
+
+		if (!block) {
+			/* insert first block into header block */
+			u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
+			if (tmp)
+				affs_warning(sb, "get_block", "first block already set (%d)", tmp);
+			AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
+			affs_adjust_checksum(ext_bh, blocknr - tmp);
+		}
+	}
+
+	affs_brelse(ext_bh);
+	//unlock cache
+	affs_unlock_ext(inode);
+	return 0;
+
+err_big:
+	affs_error(inode->i_sb,"get_block","strange block request %d", block);
+	return -EIO;
+err_ext:
+	// unlock cache
+	affs_unlock_ext(inode);
+	return PTR_ERR(ext_bh);
+err_alloc:
+	brelse(ext_bh);
+	clear_buffer_mapped(bh_result);
+	bh_result->b_bdev = NULL;
+	// unlock cache
+	affs_unlock_ext(inode);
+	return -ENOSPC;
+}
+
+static int affs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, affs_get_block, wbc);
+}
+static int affs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, affs_get_block);
+}
+static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return cont_prepare_write(page, from, to, affs_get_block,
+		&AFFS_I(page->mapping->host)->mmu_private);
+}
+static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,affs_get_block);
+}
+struct address_space_operations affs_aops = {
+	.readpage = affs_readpage,
+	.writepage = affs_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = affs_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = _affs_bmap
+};
+
+static inline struct buffer_head *
+affs_bread_ino(struct inode *inode, int block, int create)
+{
+	struct buffer_head *bh, tmp_bh;
+	int err;
+
+	tmp_bh.b_state = 0;
+	err = affs_get_block(inode, block, &tmp_bh, create);
+	if (!err) {
+		bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
+		if (bh) {
+			bh->b_state |= tmp_bh.b_state;
+			return bh;
+		}
+		err = -EIO;
+	}
+	return ERR_PTR(err);
+}
+
+static inline struct buffer_head *
+affs_getzeroblk_ino(struct inode *inode, int block)
+{
+	struct buffer_head *bh, tmp_bh;
+	int err;
+
+	tmp_bh.b_state = 0;
+	err = affs_get_block(inode, block, &tmp_bh, 1);
+	if (!err) {
+		bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
+		if (bh) {
+			bh->b_state |= tmp_bh.b_state;
+			return bh;
+		}
+		err = -EIO;
+	}
+	return ERR_PTR(err);
+}
+
+static inline struct buffer_head *
+affs_getemptyblk_ino(struct inode *inode, int block)
+{
+	struct buffer_head *bh, tmp_bh;
+	int err;
+
+	tmp_bh.b_state = 0;
+	err = affs_get_block(inode, block, &tmp_bh, 1);
+	if (!err) {
+		bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
+		if (bh) {
+			bh->b_state |= tmp_bh.b_state;
+			return bh;
+		}
+		err = -EIO;
+	}
+	return ERR_PTR(err);
+}
+
+static ssize_t
+affs_file_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	ssize_t retval;
+
+	retval = generic_file_write (file, buf, count, ppos);
+	if (retval >0) {
+		struct inode *inode = file->f_dentry->d_inode;
+		inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+	}
+	return retval;
+}
+
+static int
+affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh;
+	char *data;
+	u32 bidx, boff, bsize;
+	u32 tmp;
+
+	pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
+	if (from > to || to > PAGE_CACHE_SIZE)
+		BUG();
+	kmap(page);
+	data = page_address(page);
+	bsize = AFFS_SB(sb)->s_data_blksize;
+	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+	bidx = tmp / bsize;
+	boff = tmp % bsize;
+
+	while (from < to) {
+		bh = affs_bread_ino(inode, bidx, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+		tmp = min(bsize - boff, to - from);
+		if (from + tmp > to || tmp > bsize)
+			BUG();
+		memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
+		affs_brelse(bh);
+		bidx++;
+		from += tmp;
+		boff = 0;
+	}
+	flush_dcache_page(page);
+	kunmap(page);
+	return 0;
+}
+
+static int
+affs_extent_file_ofs(struct inode *inode, u32 newsize)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh, *prev_bh;
+	u32 bidx, boff;
+	u32 size, bsize;
+	u32 tmp;
+
+	pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
+	bsize = AFFS_SB(sb)->s_data_blksize;
+	bh = NULL;
+	size = AFFS_I(inode)->mmu_private;
+	bidx = size / bsize;
+	boff = size % bsize;
+	if (boff) {
+		bh = affs_bread_ino(inode, bidx, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+		tmp = min(bsize - boff, newsize - size);
+		if (boff + tmp > bsize || tmp > bsize)
+			BUG();
+		memset(AFFS_DATA(bh) + boff, 0, tmp);
+		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
+		affs_fix_checksum(sb, bh);
+		mark_buffer_dirty_inode(bh, inode);
+		size += tmp;
+		bidx++;
+	} else if (bidx) {
+		bh = affs_bread_ino(inode, bidx - 1, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+	}
+
+	while (size < newsize) {
+		prev_bh = bh;
+		bh = affs_getzeroblk_ino(inode, bidx);
+		if (IS_ERR(bh))
+			goto out;
+		tmp = min(bsize, newsize - size);
+		if (tmp > bsize)
+			BUG();
+		AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+		AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+		AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
+		affs_fix_checksum(sb, bh);
+		bh->b_state &= ~(1UL << BH_New);
+		mark_buffer_dirty_inode(bh, inode);
+		if (prev_bh) {
+			u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
+			if (tmp)
+				affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
+			AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
+			affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
+			mark_buffer_dirty_inode(prev_bh, inode);
+			affs_brelse(prev_bh);
+		}
+		size += bsize;
+		bidx++;
+	}
+	affs_brelse(bh);
+	inode->i_size = AFFS_I(inode)->mmu_private = newsize;
+	return 0;
+
+out:
+	inode->i_size = AFFS_I(inode)->mmu_private = newsize;
+	return PTR_ERR(bh);
+}
+
+static int
+affs_readpage_ofs(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	u32 to;
+	int err;
+
+	pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
+	to = PAGE_CACHE_SIZE;
+	if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
+		to = inode->i_size & ~PAGE_CACHE_MASK;
+		memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
+	}
+
+	err = affs_do_readpage_ofs(file, page, 0, to);
+	if (!err)
+		SetPageUptodate(page);
+	unlock_page(page);
+	return err;
+}
+
+static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	u32 size, offset;
+	u32 tmp;
+	int err = 0;
+
+	pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
+	offset = page->index << PAGE_CACHE_SHIFT;
+	if (offset + from > AFFS_I(inode)->mmu_private) {
+		err = affs_extent_file_ofs(inode, offset + from);
+		if (err)
+			return err;
+	}
+	size = inode->i_size;
+
+	if (PageUptodate(page))
+		return 0;
+
+	if (from) {
+		err = affs_do_readpage_ofs(file, page, 0, from);
+		if (err)
+			return err;
+	}
+	if (to < PAGE_CACHE_SIZE) {
+		char *kaddr = kmap_atomic(page, KM_USER0);
+
+		memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		if (size > offset + to) {
+			if (size < offset + PAGE_CACHE_SIZE)
+				tmp = size & ~PAGE_CACHE_MASK;
+			else
+				tmp = PAGE_CACHE_SIZE;
+			err = affs_do_readpage_ofs(file, page, to, tmp);
+		}
+	}
+	return err;
+}
+
+static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh, *prev_bh;
+	char *data;
+	u32 bidx, boff, bsize;
+	u32 tmp;
+	int written;
+
+	pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
+	bsize = AFFS_SB(sb)->s_data_blksize;
+	data = page_address(page);
+
+	bh = NULL;
+	written = 0;
+	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+	bidx = tmp / bsize;
+	boff = tmp % bsize;
+	if (boff) {
+		bh = affs_bread_ino(inode, bidx, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+		tmp = min(bsize - boff, to - from);
+		if (boff + tmp > bsize || tmp > bsize)
+			BUG();
+		memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
+		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
+		affs_fix_checksum(sb, bh);
+		mark_buffer_dirty_inode(bh, inode);
+		written += tmp;
+		from += tmp;
+		bidx++;
+	} else if (bidx) {
+		bh = affs_bread_ino(inode, bidx - 1, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+	}
+	while (from + bsize <= to) {
+		prev_bh = bh;
+		bh = affs_getemptyblk_ino(inode, bidx);
+		if (IS_ERR(bh))
+			goto out;
+		memcpy(AFFS_DATA(bh), data + from, bsize);
+		if (buffer_new(bh)) {
+			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
+			AFFS_DATA_HEAD(bh)->next = 0;
+			bh->b_state &= ~(1UL << BH_New);
+			if (prev_bh) {
+				u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
+				if (tmp)
+					affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
+				AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
+				affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
+				mark_buffer_dirty_inode(prev_bh, inode);
+			}
+		}
+		affs_brelse(prev_bh);
+		affs_fix_checksum(sb, bh);
+		mark_buffer_dirty_inode(bh, inode);
+		written += bsize;
+		from += bsize;
+		bidx++;
+	}
+	if (from < to) {
+		prev_bh = bh;
+		bh = affs_bread_ino(inode, bidx, 1);
+		if (IS_ERR(bh))
+			goto out;
+		tmp = min(bsize, to - from);
+		if (tmp > bsize)
+			BUG();
+		memcpy(AFFS_DATA(bh), data + from, tmp);
+		if (buffer_new(bh)) {
+			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
+			AFFS_DATA_HEAD(bh)->next = 0;
+			bh->b_state &= ~(1UL << BH_New);
+			if (prev_bh) {
+				u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
+				if (tmp)
+					affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
+				AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
+				affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
+				mark_buffer_dirty_inode(prev_bh, inode);
+			}
+		} else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
+			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
+		affs_brelse(prev_bh);
+		affs_fix_checksum(sb, bh);
+		mark_buffer_dirty_inode(bh, inode);
+		written += tmp;
+		from += tmp;
+		bidx++;
+	}
+	SetPageUptodate(page);
+
+done:
+	affs_brelse(bh);
+	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+	if (tmp > inode->i_size)
+		inode->i_size = AFFS_I(inode)->mmu_private = tmp;
+
+	return written;
+
+out:
+	bh = prev_bh;
+	if (!written)
+		written = PTR_ERR(bh);
+	goto done;
+}
+
+struct address_space_operations affs_aops_ofs = {
+	.readpage = affs_readpage_ofs,
+	//.writepage = affs_writepage_ofs,
+	//.sync_page = affs_sync_page_ofs,
+	.prepare_write = affs_prepare_write_ofs,
+	.commit_write = affs_commit_write_ofs
+};
+
+/* Free any preallocated blocks. */
+
+void
+affs_free_prealloc(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+
+	pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
+
+	while (AFFS_I(inode)->i_pa_cnt) {
+		AFFS_I(inode)->i_pa_cnt--;
+		affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
+	}
+}
+
+/* Truncate (or enlarge) a file to the requested size. */
+
+void
+affs_truncate(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	u32 ext, ext_key;
+	u32 last_blk, blkcnt, blk;
+	u32 size;
+	struct buffer_head *ext_bh;
+	int i;
+
+	pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
+		 (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
+
+	last_blk = 0;
+	ext = 0;
+	if (inode->i_size) {
+		last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
+		ext = last_blk / AFFS_SB(sb)->s_hashsize;
+	}
+
+	if (inode->i_size > AFFS_I(inode)->mmu_private) {
+		struct address_space *mapping = inode->i_mapping;
+		struct page *page;
+		u32 size = inode->i_size - 1;
+		int res;
+
+		page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
+		if (!page)
+			return;
+		size = (size & (PAGE_CACHE_SIZE - 1)) + 1;
+		res = mapping->a_ops->prepare_write(NULL, page, size, size);
+		if (!res)
+			res = mapping->a_ops->commit_write(NULL, page, size, size);
+		unlock_page(page);
+		page_cache_release(page);
+		mark_inode_dirty(inode);
+		return;
+	} else if (inode->i_size == AFFS_I(inode)->mmu_private)
+		return;
+
+	// lock cache
+	ext_bh = affs_get_extblock(inode, ext);
+	if (IS_ERR(ext_bh)) {
+		affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
+			     ext, PTR_ERR(ext_bh));
+		return;
+	}
+	if (AFFS_I(inode)->i_lc) {
+		/* clear linear cache */
+		i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
+		if (AFFS_I(inode)->i_lc_size > i) {
+			AFFS_I(inode)->i_lc_size = i;
+			for (; i < AFFS_LC_SIZE; i++)
+				AFFS_I(inode)->i_lc[i] = 0;
+		}
+		/* clear associative cache */
+		for (i = 0; i < AFFS_AC_SIZE; i++)
+			if (AFFS_I(inode)->i_ac[i].ext >= ext)
+				AFFS_I(inode)->i_ac[i].ext = 0;
+	}
+	ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
+
+	blkcnt = AFFS_I(inode)->i_blkcnt;
+	i = 0;
+	blk = last_blk;
+	if (inode->i_size) {
+		i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
+		blk++;
+	} else
+		AFFS_HEAD(ext_bh)->first_data = 0;
+	size = AFFS_SB(sb)->s_hashsize;
+	if (size > blkcnt - blk + i)
+		size = blkcnt - blk + i;
+	for (; i < size; i++, blk++) {
+		affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
+		AFFS_BLOCK(sb, ext_bh, i) = 0;
+	}
+	AFFS_TAIL(sb, ext_bh)->extension = 0;
+	affs_fix_checksum(sb, ext_bh);
+	mark_buffer_dirty_inode(ext_bh, inode);
+	affs_brelse(ext_bh);
+
+	if (inode->i_size) {
+		AFFS_I(inode)->i_blkcnt = last_blk + 1;
+		AFFS_I(inode)->i_extcnt = ext + 1;
+		if (AFFS_SB(sb)->s_flags & SF_OFS) {
+			struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
+			u32 tmp;
+			if (IS_ERR(ext_bh)) {
+				affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
+					     ext, PTR_ERR(ext_bh));
+				return;
+			}
+			tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
+			AFFS_DATA_HEAD(bh)->next = 0;
+			affs_adjust_checksum(bh, -tmp);
+			affs_brelse(bh);
+		}
+	} else {
+		AFFS_I(inode)->i_blkcnt = 0;
+		AFFS_I(inode)->i_extcnt = 1;
+	}
+	AFFS_I(inode)->mmu_private = inode->i_size;
+	// unlock cache
+
+	while (ext_key) {
+		ext_bh = affs_bread(sb, ext_key);
+		size = AFFS_SB(sb)->s_hashsize;
+		if (size > blkcnt - blk)
+			size = blkcnt - blk;
+		for (i = 0; i < size; i++, blk++)
+			affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
+		affs_free_block(sb, ext_key);
+		ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
+		affs_brelse(ext_bh);
+	}
+	affs_free_prealloc(inode);
+}
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
new file mode 100644
index 0000000..7aa6f20
--- /dev/null
+++ b/fs/affs/inode.c
@@ -0,0 +1,411 @@
+/*
+ *  linux/fs/affs/inode.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
+ *
+ *  (C) 1992  Eric Youngdale Modified for ISO9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ */
+
+#include "affs.h"
+
+extern struct inode_operations affs_symlink_inode_operations;
+extern struct timezone sys_tz;
+
+void
+affs_read_inode(struct inode *inode)
+{
+	struct super_block	*sb = inode->i_sb;
+	struct affs_sb_info	*sbi = AFFS_SB(sb);
+	struct buffer_head	*bh;
+	struct affs_head	*head;
+	struct affs_tail	*tail;
+	u32			 block;
+	u32			 size;
+	u32			 prot;
+	u16			 id;
+
+	pr_debug("AFFS: read_inode(%lu)\n",inode->i_ino);
+
+	block = inode->i_ino;
+	bh = affs_bread(sb, block);
+	if (!bh) {
+		affs_warning(sb, "read_inode", "Cannot read block %d", block);
+		goto bad_inode;
+	}
+	if (affs_checksum_block(sb, bh) || be32_to_cpu(AFFS_HEAD(bh)->ptype) != T_SHORT) {
+		affs_warning(sb,"read_inode",
+			   "Checksum or type (ptype=%d) error on inode %d",
+			   AFFS_HEAD(bh)->ptype, block);
+		goto bad_inode;
+	}
+
+	head = AFFS_HEAD(bh);
+	tail = AFFS_TAIL(sb, bh);
+	prot = be32_to_cpu(tail->protect);
+
+	inode->i_size = 0;
+	inode->i_nlink = 1;
+	inode->i_mode = 0;
+	AFFS_I(inode)->i_extcnt = 1;
+	AFFS_I(inode)->i_ext_last = ~1;
+	AFFS_I(inode)->i_protect = prot;
+	AFFS_I(inode)->i_opencnt = 0;
+	AFFS_I(inode)->i_blkcnt = 0;
+	AFFS_I(inode)->i_lc = NULL;
+	AFFS_I(inode)->i_lc_size = 0;
+	AFFS_I(inode)->i_lc_shift = 0;
+	AFFS_I(inode)->i_lc_mask = 0;
+	AFFS_I(inode)->i_ac = NULL;
+	AFFS_I(inode)->i_ext_bh = NULL;
+	AFFS_I(inode)->mmu_private = 0;
+	AFFS_I(inode)->i_lastalloc = 0;
+	AFFS_I(inode)->i_pa_cnt = 0;
+
+	if (sbi->s_flags & SF_SETMODE)
+		inode->i_mode = sbi->s_mode;
+	else
+		inode->i_mode = prot_to_mode(prot);
+
+	id = be16_to_cpu(tail->uid);
+	if (id == 0 || sbi->s_flags & SF_SETUID)
+		inode->i_uid = sbi->s_uid;
+	else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
+		inode->i_uid = 0;
+	else
+		inode->i_uid = id;
+
+	id = be16_to_cpu(tail->gid);
+	if (id == 0 || sbi->s_flags & SF_SETGID)
+		inode->i_gid = sbi->s_gid;
+	else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
+		inode->i_gid = 0;
+	else
+		inode->i_gid = id;
+
+	switch (be32_to_cpu(tail->stype)) {
+	case ST_ROOT:
+		inode->i_uid = sbi->s_uid;
+		inode->i_gid = sbi->s_gid;
+		/* fall through */
+	case ST_USERDIR:
+		if (be32_to_cpu(tail->stype) == ST_USERDIR ||
+		    sbi->s_flags & SF_SETMODE) {
+			if (inode->i_mode & S_IRUSR)
+				inode->i_mode |= S_IXUSR;
+			if (inode->i_mode & S_IRGRP)
+				inode->i_mode |= S_IXGRP;
+			if (inode->i_mode & S_IROTH)
+				inode->i_mode |= S_IXOTH;
+			inode->i_mode |= S_IFDIR;
+		} else
+			inode->i_mode = S_IRUGO | S_IXUGO | S_IWUSR | S_IFDIR;
+		if (tail->link_chain)
+			inode->i_nlink = 2;
+		/* Maybe it should be controlled by mount parameter? */
+		//inode->i_mode |= S_ISVTX;
+		inode->i_op = &affs_dir_inode_operations;
+		inode->i_fop = &affs_dir_operations;
+		break;
+	case ST_LINKDIR:
+#if 0
+		affs_warning(sb, "read_inode", "inode is LINKDIR");
+		goto bad_inode;
+#else
+		inode->i_mode |= S_IFDIR;
+		inode->i_op = NULL;
+		inode->i_fop = NULL;
+		break;
+#endif
+	case ST_LINKFILE:
+		affs_warning(sb, "read_inode", "inode is LINKFILE");
+		goto bad_inode;
+	case ST_FILE:
+		size = be32_to_cpu(tail->size);
+		inode->i_mode |= S_IFREG;
+		AFFS_I(inode)->mmu_private = inode->i_size = size;
+		if (inode->i_size) {
+			AFFS_I(inode)->i_blkcnt = (size - 1) /
+					       sbi->s_data_blksize + 1;
+			AFFS_I(inode)->i_extcnt = (AFFS_I(inode)->i_blkcnt - 1) /
+					       sbi->s_hashsize + 1;
+		}
+		if (tail->link_chain)
+			inode->i_nlink = 2;
+		inode->i_mapping->a_ops = (sbi->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
+		inode->i_op = &affs_file_inode_operations;
+		inode->i_fop = &affs_file_operations;
+		break;
+	case ST_SOFTLINK:
+		inode->i_mode |= S_IFLNK;
+		inode->i_op = &affs_symlink_inode_operations;
+		inode->i_data.a_ops = &affs_symlink_aops;
+		break;
+	}
+
+	inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec
+		       = (be32_to_cpu(tail->change.days) * (24 * 60 * 60) +
+		         be32_to_cpu(tail->change.mins) * 60 +
+			 be32_to_cpu(tail->change.ticks) / 50 +
+			 ((8 * 365 + 2) * 24 * 60 * 60)) +
+			 sys_tz.tz_minuteswest * 60;
+	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
+	affs_brelse(bh);
+	return;
+
+bad_inode:
+	make_bad_inode(inode);
+	affs_brelse(bh);
+	return;
+}
+
+int
+affs_write_inode(struct inode *inode, int unused)
+{
+	struct super_block	*sb = inode->i_sb;
+	struct buffer_head	*bh;
+	struct affs_tail	*tail;
+	uid_t			 uid;
+	gid_t			 gid;
+
+	pr_debug("AFFS: write_inode(%lu)\n",inode->i_ino);
+
+	if (!inode->i_nlink)
+		// possibly free block
+		return 0;
+	bh = affs_bread(sb, inode->i_ino);
+	if (!bh) {
+		affs_error(sb,"write_inode","Cannot read block %lu",inode->i_ino);
+		return -EIO;
+	}
+	tail = AFFS_TAIL(sb, bh);
+	if (tail->stype == cpu_to_be32(ST_ROOT)) {
+		secs_to_datestamp(inode->i_mtime.tv_sec,&AFFS_ROOT_TAIL(sb, bh)->root_change);
+	} else {
+		tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect);
+		tail->size = cpu_to_be32(inode->i_size);
+		secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change);
+		if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
+			uid = inode->i_uid;
+			gid = inode->i_gid;
+			if (AFFS_SB(sb)->s_flags & SF_MUFS) {
+				if (inode->i_uid == 0 || inode->i_uid == 0xFFFF)
+					uid = inode->i_uid ^ ~0;
+				if (inode->i_gid == 0 || inode->i_gid == 0xFFFF)
+					gid = inode->i_gid ^ ~0;
+			}
+			if (!(AFFS_SB(sb)->s_flags & SF_SETUID))
+				tail->uid = cpu_to_be16(uid);
+			if (!(AFFS_SB(sb)->s_flags & SF_SETGID))
+				tail->gid = cpu_to_be16(gid);
+		}
+	}
+	affs_fix_checksum(sb, bh);
+	mark_buffer_dirty_inode(bh, inode);
+	affs_brelse(bh);
+	affs_free_prealloc(inode);
+	return 0;
+}
+
+int
+affs_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	pr_debug("AFFS: notify_change(%lu,0x%x)\n",inode->i_ino,attr->ia_valid);
+
+	error = inode_change_ok(inode,attr);
+	if (error)
+		goto out;
+
+	if (((attr->ia_valid & ATTR_UID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETUID)) ||
+	    ((attr->ia_valid & ATTR_GID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETGID)) ||
+	    ((attr->ia_valid & ATTR_MODE) &&
+	     (AFFS_SB(inode->i_sb)->s_flags & (SF_SETMODE | SF_IMMUTABLE)))) {
+		if (!(AFFS_SB(inode->i_sb)->s_flags & SF_QUIET))
+			error = -EPERM;
+		goto out;
+	}
+
+	error = inode_setattr(inode, attr);
+	if (!error && (attr->ia_valid & ATTR_MODE))
+		mode_to_prot(inode);
+out:
+	return error;
+}
+
+void
+affs_put_inode(struct inode *inode)
+{
+	pr_debug("AFFS: put_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
+	affs_free_prealloc(inode);
+	if (atomic_read(&inode->i_count) == 1) {
+		down(&inode->i_sem);
+		if (inode->i_size != AFFS_I(inode)->mmu_private)
+			affs_truncate(inode);
+		up(&inode->i_sem);
+	}
+}
+
+void
+affs_delete_inode(struct inode *inode)
+{
+	pr_debug("AFFS: delete_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
+	inode->i_size = 0;
+	if (S_ISREG(inode->i_mode))
+		affs_truncate(inode);
+	clear_inode(inode);
+	affs_free_block(inode->i_sb, inode->i_ino);
+}
+
+void
+affs_clear_inode(struct inode *inode)
+{
+	unsigned long cache_page = (unsigned long) AFFS_I(inode)->i_lc;
+
+	pr_debug("AFFS: clear_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
+	if (cache_page) {
+		pr_debug("AFFS: freeing ext cache\n");
+		AFFS_I(inode)->i_lc = NULL;
+		AFFS_I(inode)->i_ac = NULL;
+		free_page(cache_page);
+	}
+	affs_brelse(AFFS_I(inode)->i_ext_bh);
+	AFFS_I(inode)->i_ext_last = ~1;
+	AFFS_I(inode)->i_ext_bh = NULL;
+}
+
+struct inode *
+affs_new_inode(struct inode *dir)
+{
+	struct super_block	*sb = dir->i_sb;
+	struct inode		*inode;
+	u32			 block;
+	struct buffer_head	*bh;
+
+	if (!(inode = new_inode(sb)))
+		goto err_inode;
+
+	if (!(block = affs_alloc_block(dir, dir->i_ino)))
+		goto err_block;
+
+	bh = affs_getzeroblk(sb, block);
+	if (!bh)
+		goto err_bh;
+	mark_buffer_dirty_inode(bh, inode);
+	affs_brelse(bh);
+
+	inode->i_uid     = current->fsuid;
+	inode->i_gid     = current->fsgid;
+	inode->i_ino     = block;
+	inode->i_nlink   = 1;
+	inode->i_mtime   = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	AFFS_I(inode)->i_opencnt = 0;
+	AFFS_I(inode)->i_blkcnt = 0;
+	AFFS_I(inode)->i_lc = NULL;
+	AFFS_I(inode)->i_lc_size = 0;
+	AFFS_I(inode)->i_lc_shift = 0;
+	AFFS_I(inode)->i_lc_mask = 0;
+	AFFS_I(inode)->i_ac = NULL;
+	AFFS_I(inode)->i_ext_bh = NULL;
+	AFFS_I(inode)->mmu_private = 0;
+	AFFS_I(inode)->i_protect = 0;
+	AFFS_I(inode)->i_lastalloc = 0;
+	AFFS_I(inode)->i_pa_cnt = 0;
+	AFFS_I(inode)->i_extcnt = 1;
+	AFFS_I(inode)->i_ext_last = ~1;
+
+	insert_inode_hash(inode);
+
+	return inode;
+
+err_bh:
+	affs_free_block(sb, block);
+err_block:
+	iput(inode);
+err_inode:
+	return NULL;
+}
+
+/*
+ * Add an entry to a directory. Create the header block
+ * and insert it into the hash table.
+ */
+
+int
+affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type)
+{
+	struct super_block *sb = dir->i_sb;
+	struct buffer_head *inode_bh = NULL;
+	struct buffer_head *bh = NULL;
+	u32 block = 0;
+	int retval;
+
+	pr_debug("AFFS: add_entry(dir=%u, inode=%u, \"%*s\", type=%d)\n", (u32)dir->i_ino,
+	         (u32)inode->i_ino, (int)dentry->d_name.len, dentry->d_name.name, type);
+
+	retval = -EIO;
+	bh = affs_bread(sb, inode->i_ino);
+	if (!bh)
+		goto done;
+
+	affs_lock_link(inode);
+	switch (type) {
+	case ST_LINKFILE:
+	case ST_LINKDIR:
+		inode_bh = bh;
+		retval = -ENOSPC;
+		block = affs_alloc_block(dir, dir->i_ino);
+		if (!block)
+			goto err;
+		retval = -EIO;
+		bh = affs_getzeroblk(sb, block);
+		if (!bh)
+			goto err;
+		break;
+	default:
+		break;
+	}
+
+	AFFS_HEAD(bh)->ptype = cpu_to_be32(T_SHORT);
+	AFFS_HEAD(bh)->key = cpu_to_be32(bh->b_blocknr);
+	affs_copy_name(AFFS_TAIL(sb, bh)->name, dentry);
+	AFFS_TAIL(sb, bh)->stype = cpu_to_be32(type);
+	AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino);
+
+	if (inode_bh) {
+		__be32 chain;
+	       	chain = AFFS_TAIL(sb, inode_bh)->link_chain;
+		AFFS_TAIL(sb, bh)->original = cpu_to_be32(inode->i_ino);
+		AFFS_TAIL(sb, bh)->link_chain = chain;
+		AFFS_TAIL(sb, inode_bh)->link_chain = cpu_to_be32(block);
+		affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain));
+		mark_buffer_dirty_inode(inode_bh, inode);
+		inode->i_nlink = 2;
+		atomic_inc(&inode->i_count);
+	}
+	affs_fix_checksum(sb, bh);
+	mark_buffer_dirty_inode(bh, inode);
+	dentry->d_fsdata = (void *)(long)bh->b_blocknr;
+
+	affs_lock_dir(dir);
+	retval = affs_insert_hash(dir, bh);
+	mark_buffer_dirty_inode(bh, inode);
+	affs_unlock_dir(dir);
+	affs_unlock_link(inode);
+
+	d_instantiate(dentry, inode);
+done:
+	affs_brelse(inode_bh);
+	affs_brelse(bh);
+	return retval;
+err:
+	if (block)
+		affs_free_block(sb, block);
+	affs_unlock_link(inode);
+	goto done;
+}
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
new file mode 100644
index 0000000..d4c2d63
--- /dev/null
+++ b/fs/affs/namei.c
@@ -0,0 +1,443 @@
+/*
+ *  linux/fs/affs/namei.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ */
+
+#include "affs.h"
+
+typedef int (*toupper_t)(int);
+
+static int	 affs_toupper(int ch);
+static int	 affs_hash_dentry(struct dentry *, struct qstr *);
+static int       affs_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+static int	 affs_intl_toupper(int ch);
+static int	 affs_intl_hash_dentry(struct dentry *, struct qstr *);
+static int       affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+
+struct dentry_operations affs_dentry_operations = {
+	.d_hash		= affs_hash_dentry,
+	.d_compare	= affs_compare_dentry,
+};
+
+static struct dentry_operations affs_intl_dentry_operations = {
+	.d_hash		= affs_intl_hash_dentry,
+	.d_compare	= affs_intl_compare_dentry,
+};
+
+
+/* Simple toupper() for DOS\1 */
+
+static int
+affs_toupper(int ch)
+{
+	return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch;
+}
+
+/* International toupper() for DOS\3 ("international") */
+
+static int
+affs_intl_toupper(int ch)
+{
+	return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0
+		&& ch <= 0xFE && ch != 0xF7) ?
+		ch - ('a' - 'A') : ch;
+}
+
+static inline toupper_t
+affs_get_toupper(struct super_block *sb)
+{
+	return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper;
+}
+
+/*
+ * Note: the dentry argument is the parent dentry.
+ */
+static inline int
+__affs_hash_dentry(struct dentry *dentry, struct qstr *qstr, toupper_t toupper)
+{
+	const u8 *name = qstr->name;
+	unsigned long hash;
+	int i;
+
+	i = affs_check_name(qstr->name,qstr->len);
+	if (i)
+		return i;
+
+	hash = init_name_hash();
+	i = min(qstr->len, 30u);
+	for (; i > 0; name++, i--)
+		hash = partial_name_hash(toupper(*name), hash);
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+static int
+affs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+{
+	return __affs_hash_dentry(dentry, qstr, affs_toupper);
+}
+static int
+affs_intl_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+{
+	return __affs_hash_dentry(dentry, qstr, affs_intl_toupper);
+}
+
+static inline int
+__affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b, toupper_t toupper)
+{
+	const u8 *aname = a->name;
+	const u8 *bname = b->name;
+	int len;
+
+	/* 'a' is the qstr of an already existing dentry, so the name
+	 * must be valid. 'b' must be validated first.
+	 */
+
+	if (affs_check_name(b->name,b->len))
+		return 1;
+
+	/* If the names are longer than the allowed 30 chars,
+	 * the excess is ignored, so their length may differ.
+	 */
+	len = a->len;
+	if (len >= 30) {
+		if (b->len < 30)
+			return 1;
+		len = 30;
+	} else if (len != b->len)
+		return 1;
+
+	for (; len > 0; len--)
+		if (toupper(*aname++) != toupper(*bname++))
+			return 1;
+
+	return 0;
+}
+
+static int
+affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	return __affs_compare_dentry(dentry, a, b, affs_toupper);
+}
+static int
+affs_intl_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	return __affs_compare_dentry(dentry, a, b, affs_intl_toupper);
+}
+
+/*
+ * NOTE! unlike strncmp, affs_match returns 1 for success, 0 for failure.
+ */
+
+static inline int
+affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
+{
+	const u8 *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+
+	if (len >= 30) {
+		if (*name2 < 30)
+			return 0;
+		len = 30;
+	} else if (len != *name2)
+		return 0;
+
+	for (name2++; len > 0; len--)
+		if (toupper(*name++) != toupper(*name2++))
+			return 0;
+	return 1;
+}
+
+int
+affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
+{
+	toupper_t toupper = affs_get_toupper(sb);
+	int hash;
+
+	hash = len = min(len, 30u);
+	for (; len > 0; len--)
+		hash = (hash * 13 + toupper(*name++)) & 0x7ff;
+
+	return hash % AFFS_SB(sb)->s_hashsize;
+}
+
+static struct buffer_head *
+affs_find_entry(struct inode *dir, struct dentry *dentry)
+{
+	struct super_block *sb = dir->i_sb;
+	struct buffer_head *bh;
+	toupper_t toupper = affs_get_toupper(sb);
+	u32 key;
+
+	pr_debug("AFFS: find_entry(\"%.*s\")\n", (int)dentry->d_name.len, dentry->d_name.name);
+
+	bh = affs_bread(sb, dir->i_ino);
+	if (!bh)
+		return ERR_PTR(-EIO);
+
+	key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]);
+
+	for (;;) {
+		affs_brelse(bh);
+		if (key == 0)
+			return NULL;
+		bh = affs_bread(sb, key);
+		if (!bh)
+			return ERR_PTR(-EIO);
+		if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
+			return bh;
+		key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
+	}
+}
+
+struct dentry *
+affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct buffer_head *bh;
+	struct inode *inode = NULL;
+
+	pr_debug("AFFS: lookup(\"%.*s\")\n",(int)dentry->d_name.len,dentry->d_name.name);
+
+	affs_lock_dir(dir);
+	bh = affs_find_entry(dir, dentry);
+	affs_unlock_dir(dir);
+	if (IS_ERR(bh)) {
+		return ERR_PTR(PTR_ERR(bh));
+	}
+	if (bh) {
+		u32 ino = bh->b_blocknr;
+
+		/* store the real header ino in d_fsdata for faster lookups */
+		dentry->d_fsdata = (void *)(long)ino;
+		switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
+		//link to dirs disabled
+		//case ST_LINKDIR:
+		case ST_LINKFILE:
+			ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
+		}
+		affs_brelse(bh);
+		inode = iget(sb, ino);
+		if (!inode) {
+			return ERR_PTR(-EACCES);
+		}
+	}
+	dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
+	d_add(dentry, inode);
+	return NULL;
+}
+
+int
+affs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	pr_debug("AFFS: unlink(dir=%d, \"%.*s\")\n", (u32)dir->i_ino,
+		 (int)dentry->d_name.len, dentry->d_name.name);
+
+	return affs_remove_header(dentry);
+}
+
+int
+affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode	*inode;
+	int		 error;
+
+	pr_debug("AFFS: create(%lu,\"%.*s\",0%o)\n",dir->i_ino,(int)dentry->d_name.len,
+		 dentry->d_name.name,mode);
+
+	inode = affs_new_inode(dir);
+	if (!inode)
+		return -ENOSPC;
+
+	inode->i_mode = mode;
+	mode_to_prot(inode);
+	mark_inode_dirty(inode);
+
+	inode->i_op = &affs_file_inode_operations;
+	inode->i_fop = &affs_file_operations;
+	inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
+	error = affs_add_entry(dir, inode, dentry, ST_FILE);
+	if (error) {
+		inode->i_nlink = 0;
+		iput(inode);
+		return error;
+	}
+	return 0;
+}
+
+int
+affs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct inode		*inode;
+	int			 error;
+
+	pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%o)\n",dir->i_ino,
+		 (int)dentry->d_name.len,dentry->d_name.name,mode);
+
+	inode = affs_new_inode(dir);
+	if (!inode)
+		return -ENOSPC;
+
+	inode->i_mode = S_IFDIR | mode;
+	mode_to_prot(inode);
+
+	inode->i_op = &affs_dir_inode_operations;
+	inode->i_fop = &affs_dir_operations;
+
+	error = affs_add_entry(dir, inode, dentry, ST_USERDIR);
+	if (error) {
+		inode->i_nlink = 0;
+		mark_inode_dirty(inode);
+		iput(inode);
+		return error;
+	}
+	return 0;
+}
+
+int
+affs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	pr_debug("AFFS: rmdir(dir=%u, \"%.*s\")\n", (u32)dir->i_ino,
+		 (int)dentry->d_name.len, dentry->d_name.name);
+
+	return affs_remove_header(dentry);
+}
+
+int
+affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct super_block	*sb = dir->i_sb;
+	struct buffer_head	*bh;
+	struct inode		*inode;
+	char			*p;
+	int			 i, maxlen, error;
+	char			 c, lc;
+
+	pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino,
+		 (int)dentry->d_name.len,dentry->d_name.name,symname);
+
+	maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1;
+	inode  = affs_new_inode(dir);
+	if (!inode)
+		return -ENOSPC;
+
+	inode->i_op = &affs_symlink_inode_operations;
+	inode->i_data.a_ops = &affs_symlink_aops;
+	inode->i_mode = S_IFLNK | 0777;
+	mode_to_prot(inode);
+
+	error = -EIO;
+	bh = affs_bread(sb, inode->i_ino);
+	if (!bh)
+		goto err;
+	i  = 0;
+	p  = (char *)AFFS_HEAD(bh)->table;
+	lc = '/';
+	if (*symname == '/') {
+		while (*symname == '/')
+			symname++;
+		while (AFFS_SB(sb)->s_volume[i])	/* Cannot overflow */
+			*p++ = AFFS_SB(sb)->s_volume[i++];
+	}
+	while (i < maxlen && (c = *symname++)) {
+		if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
+			*p++ = '/';
+			i++;
+			symname += 2;
+			lc = '/';
+		} else if (c == '.' && lc == '/' && *symname == '/') {
+			symname++;
+			lc = '/';
+		} else {
+			*p++ = c;
+			lc   = c;
+			i++;
+		}
+		if (lc == '/')
+			while (*symname == '/')
+				symname++;
+	}
+	*p = 0;
+	mark_buffer_dirty_inode(bh, inode);
+	affs_brelse(bh);
+	mark_inode_dirty(inode);
+
+	error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK);
+	if (error)
+		goto err;
+
+	return 0;
+
+err:
+	inode->i_nlink = 0;
+	mark_inode_dirty(inode);
+	iput(inode);
+	return error;
+}
+
+int
+affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+
+	pr_debug("AFFS: link(%u, %u, \"%.*s\")\n", (u32)inode->i_ino, (u32)dir->i_ino,
+		 (int)dentry->d_name.len,dentry->d_name.name);
+
+	return affs_add_entry(dir, inode, dentry, ST_LINKFILE);
+}
+
+int
+affs_rename(struct inode *old_dir, struct dentry *old_dentry,
+	    struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct super_block *sb = old_dir->i_sb;
+	struct buffer_head *bh = NULL;
+	int retval;
+
+	pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
+		 (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
+		 (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
+
+	retval = affs_check_name(new_dentry->d_name.name,new_dentry->d_name.len);
+	if (retval)
+		return retval;
+
+	/* Unlink destination if it already exists */
+	if (new_dentry->d_inode) {
+		retval = affs_remove_header(new_dentry);
+		if (retval)
+			return retval;
+	}
+
+	retval = -EIO;
+	bh = affs_bread(sb, old_dentry->d_inode->i_ino);
+	if (!bh)
+		goto done;
+
+	/* Remove header from its parent directory. */
+	affs_lock_dir(old_dir);
+	retval = affs_remove_hash(old_dir, bh);
+	affs_unlock_dir(old_dir);
+	if (retval)
+		goto done;
+
+	/* And insert it into the new directory with the new name. */
+	affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry);
+	affs_fix_checksum(sb, bh);
+	affs_lock_dir(new_dir);
+	retval = affs_insert_hash(new_dir, bh);
+	affs_unlock_dir(new_dir);
+	/* TODO: move it back to old_dir, if error? */
+
+done:
+	mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir);
+	affs_brelse(bh);
+	return retval;
+}
diff --git a/fs/affs/super.c b/fs/affs/super.c
new file mode 100644
index 0000000..9c30807
--- /dev/null
+++ b/fs/affs/super.c
@@ -0,0 +1,569 @@
+/*
+ *  linux/fs/affs/inode.c
+ *
+ *  (c) 1996  Hans-Joachim Widmaier - Rewritten
+ *
+ *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
+ *
+ *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include "affs.h"
+
+extern struct timezone sys_tz;
+
+static int affs_statfs(struct super_block *sb, struct kstatfs *buf);
+static int affs_remount (struct super_block *sb, int *flags, char *data);
+
+static void
+affs_put_super(struct super_block *sb)
+{
+	struct affs_sb_info *sbi = AFFS_SB(sb);
+	pr_debug("AFFS: put_super()\n");
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag = cpu_to_be32(1);
+		secs_to_datestamp(get_seconds(),
+				  &AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->disk_change);
+		affs_fix_checksum(sb, sbi->s_root_bh);
+		mark_buffer_dirty(sbi->s_root_bh);
+	}
+
+	if (sbi->s_prefix)
+		kfree(sbi->s_prefix);
+	affs_free_bitmap(sb);
+	affs_brelse(sbi->s_root_bh);
+	kfree(sbi);
+	sb->s_fs_info = NULL;
+	return;
+}
+
+static void
+affs_write_super(struct super_block *sb)
+{
+	int clean = 2;
+	struct affs_sb_info *sbi = AFFS_SB(sb);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		//	if (sbi->s_bitmap[i].bm_bh) {
+		//		if (buffer_dirty(sbi->s_bitmap[i].bm_bh)) {
+		//			clean = 0;
+		AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag = cpu_to_be32(clean);
+		secs_to_datestamp(get_seconds(),
+				  &AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->disk_change);
+		affs_fix_checksum(sb, sbi->s_root_bh);
+		mark_buffer_dirty(sbi->s_root_bh);
+		sb->s_dirt = !clean;	/* redo until bitmap synced */
+	} else
+		sb->s_dirt = 0;
+
+	pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
+}
+
+static kmem_cache_t * affs_inode_cachep;
+
+static struct inode *affs_alloc_inode(struct super_block *sb)
+{
+	struct affs_inode_info *ei;
+	ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void affs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct affs_inode_info *ei = (struct affs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		init_MUTEX(&ei->i_link_lock);
+		init_MUTEX(&ei->i_ext_lock);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static int init_inodecache(void)
+{
+	affs_inode_cachep = kmem_cache_create("affs_inode_cache",
+					     sizeof(struct affs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (affs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(affs_inode_cachep))
+		printk(KERN_INFO "affs_inode_cache: not all structures were freed\n");
+}
+
+static struct super_operations affs_sops = {
+	.alloc_inode	= affs_alloc_inode,
+	.destroy_inode	= affs_destroy_inode,
+	.read_inode	= affs_read_inode,
+	.write_inode	= affs_write_inode,
+	.put_inode	= affs_put_inode,
+	.delete_inode	= affs_delete_inode,
+	.clear_inode	= affs_clear_inode,
+	.put_super	= affs_put_super,
+	.write_super	= affs_write_super,
+	.statfs		= affs_statfs,
+	.remount_fs	= affs_remount,
+};
+
+enum {
+	Opt_bs, Opt_mode, Opt_mufs, Opt_prefix, Opt_protect,
+	Opt_reserved, Opt_root, Opt_setgid, Opt_setuid,
+	Opt_verbose, Opt_volume, Opt_ignore, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_bs, "bs=%u"},
+	{Opt_mode, "mode=%o"},
+	{Opt_mufs, "mufs"},
+	{Opt_prefix, "prefix=%s"},
+	{Opt_protect, "protect"},
+	{Opt_reserved, "reserved=%u"},
+	{Opt_root, "root=%u"},
+	{Opt_setgid, "setgid=%u"},
+	{Opt_setuid, "setuid=%u"},
+	{Opt_verbose, "verbose"},
+	{Opt_volume, "volume=%s"},
+	{Opt_ignore, "grpquota"},
+	{Opt_ignore, "noquota"},
+	{Opt_ignore, "quota"},
+	{Opt_ignore, "usrquota"},
+	{Opt_err, NULL},
+};
+
+static int
+parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s32 *root,
+		int *blocksize, char **prefix, char *volume, unsigned long *mount_opts)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+
+	/* Fill in defaults */
+
+	*uid        = current->uid;
+	*gid        = current->gid;
+	*reserved   = 2;
+	*root       = -1;
+	*blocksize  = -1;
+	volume[0]   = ':';
+	volume[1]   = 0;
+	*mount_opts = 0;
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token, n, option;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_bs:
+			if (match_int(&args[0], &n))
+				return -EINVAL;
+			if (n != 512 && n != 1024 && n != 2048
+			    && n != 4096) {
+				printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
+				return 0;
+			}
+			*blocksize = n;
+			break;
+		case Opt_mode:
+			if (match_octal(&args[0], &option))
+				return 1;
+			*mode = option & 0777;
+			*mount_opts |= SF_SETMODE;
+			break;
+		case Opt_mufs:
+			*mount_opts |= SF_MUFS;
+			break;
+		case Opt_prefix:
+			if (*prefix) {		/* Free any previous prefix */
+				kfree(*prefix);
+				*prefix = NULL;
+			}
+			*prefix = match_strdup(&args[0]);
+			if (!*prefix)
+				return 0;
+			*mount_opts |= SF_PREFIX;
+			break;
+		case Opt_protect:
+			*mount_opts |= SF_IMMUTABLE;
+			break;
+		case Opt_reserved:
+			if (match_int(&args[0], reserved))
+				return 1;
+			break;
+		case Opt_root:
+			if (match_int(&args[0], root))
+				return 1;
+			break;
+		case Opt_setgid:
+			if (match_int(&args[0], &option))
+				return 1;
+			*gid = option;
+			*mount_opts |= SF_SETGID;
+			break;
+		case Opt_setuid:
+			if (match_int(&args[0], &option))
+				return -EINVAL;
+			*uid = option;
+			*mount_opts |= SF_SETUID;
+			break;
+		case Opt_verbose:
+			*mount_opts |= SF_VERBOSE;
+			break;
+		case Opt_volume: {
+			char *vol = match_strdup(&args[0]);
+			strlcpy(volume, vol, 32);
+			kfree(vol);
+			break;
+		}
+		case Opt_ignore:
+		 	/* Silently ignore the quota options */
+			break;
+		default:
+			printk("AFFS: Unrecognized mount option \"%s\" "
+					"or missing value\n", p);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/* This function definitely needs to be split up. Some fine day I'll
+ * hopefully have the guts to do so. Until then: sorry for the mess.
+ */
+
+static int affs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct affs_sb_info	*sbi;
+	struct buffer_head	*root_bh = NULL;
+	struct buffer_head	*boot_bh;
+	struct inode		*root_inode = NULL;
+	s32			 root_block;
+	int			 size, blocksize;
+	u32			 chksum;
+	int			 num_bm;
+	int			 i, j;
+	s32			 key;
+	uid_t			 uid;
+	gid_t			 gid;
+	int			 reserved;
+	unsigned long		 mount_flags;
+	int			 tmp_flags;	/* fix remount prototype... */
+
+	pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
+
+	sb->s_magic             = AFFS_SUPER_MAGIC;
+	sb->s_op                = &affs_sops;
+	sb->s_flags |= MS_NODIRATIME;
+
+	sbi = kmalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(*sbi));
+	init_MUTEX(&sbi->s_bmlock);
+
+	if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
+				&blocksize,&sbi->s_prefix,
+				sbi->s_volume, &mount_flags)) {
+		printk(KERN_ERR "AFFS: Error parsing options\n");
+		return -EINVAL;
+	}
+	/* N.B. after this point s_prefix must be released */
+
+	sbi->s_flags   = mount_flags;
+	sbi->s_mode    = i;
+	sbi->s_uid     = uid;
+	sbi->s_gid     = gid;
+	sbi->s_reserved= reserved;
+
+	/* Get the size of the device in 512-byte blocks.
+	 * If we later see that the partition uses bigger
+	 * blocks, we will have to change it.
+	 */
+
+	size = sb->s_bdev->bd_inode->i_size >> 9;
+	pr_debug("AFFS: initial blocksize=%d, #blocks=%d\n", 512, size);
+
+	affs_set_blocksize(sb, PAGE_SIZE);
+	/* Try to find root block. Its location depends on the block size. */
+
+	i = 512;
+	j = 4096;
+	if (blocksize > 0) {
+		i = j = blocksize;
+		size = size / (blocksize / 512);
+	}
+	for (blocksize = i, key = 0; blocksize <= j; blocksize <<= 1, size >>= 1) {
+		sbi->s_root_block = root_block;
+		if (root_block < 0)
+			sbi->s_root_block = (reserved + size - 1) / 2;
+		pr_debug("AFFS: setting blocksize to %d\n", blocksize);
+		affs_set_blocksize(sb, blocksize);
+		sbi->s_partition_size = size;
+
+		/* The root block location that was calculated above is not
+		 * correct if the partition size is an odd number of 512-
+		 * byte blocks, which will be rounded down to a number of
+		 * 1024-byte blocks, and if there were an even number of
+		 * reserved blocks. Ideally, all partition checkers should
+		 * report the real number of blocks of the real blocksize,
+		 * but since this just cannot be done, we have to try to
+		 * find the root block anyways. In the above case, it is one
+		 * block behind the calculated one. So we check this one, too.
+		 */
+		for (num_bm = 0; num_bm < 2; num_bm++) {
+			pr_debug("AFFS: Dev %s, trying root=%u, bs=%d, "
+				"size=%d, reserved=%d\n",
+				sb->s_id,
+				sbi->s_root_block + num_bm,
+				blocksize, size, reserved);
+			root_bh = affs_bread(sb, sbi->s_root_block + num_bm);
+			if (!root_bh)
+				continue;
+			if (!affs_checksum_block(sb, root_bh) &&
+			    be32_to_cpu(AFFS_ROOT_HEAD(root_bh)->ptype) == T_SHORT &&
+			    be32_to_cpu(AFFS_ROOT_TAIL(sb, root_bh)->stype) == ST_ROOT) {
+				sbi->s_hashsize    = blocksize / 4 - 56;
+				sbi->s_root_block += num_bm;
+				key                        = 1;
+				goto got_root;
+			}
+			affs_brelse(root_bh);
+			root_bh = NULL;
+		}
+	}
+	if (!silent)
+		printk(KERN_ERR "AFFS: No valid root block on device %s\n",
+			sb->s_id);
+	goto out_error;
+
+	/* N.B. after this point bh must be released */
+got_root:
+	root_block = sbi->s_root_block;
+
+	/* Find out which kind of FS we have */
+	boot_bh = sb_bread(sb, 0);
+	if (!boot_bh) {
+		printk(KERN_ERR "AFFS: Cannot read boot block\n");
+		goto out_error;
+	}
+	chksum = be32_to_cpu(*(__be32 *)boot_bh->b_data);
+	brelse(boot_bh);
+
+	/* Dircache filesystems are compatible with non-dircache ones
+	 * when reading. As long as they aren't supported, writing is
+	 * not recommended.
+	 */
+	if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
+	     || chksum == MUFS_DCOFS) && !(sb->s_flags & MS_RDONLY)) {
+		printk(KERN_NOTICE "AFFS: Dircache FS - mounting %s read only\n",
+			sb->s_id);
+		sb->s_flags |= MS_RDONLY;
+	}
+	switch (chksum) {
+		case MUFS_FS:
+		case MUFS_INTLFFS:
+		case MUFS_DCFFS:
+			sbi->s_flags |= SF_MUFS;
+			/* fall thru */
+		case FS_INTLFFS:
+		case FS_DCFFS:
+			sbi->s_flags |= SF_INTL;
+			break;
+		case MUFS_FFS:
+			sbi->s_flags |= SF_MUFS;
+			break;
+		case FS_FFS:
+			break;
+		case MUFS_OFS:
+			sbi->s_flags |= SF_MUFS;
+			/* fall thru */
+		case FS_OFS:
+			sbi->s_flags |= SF_OFS;
+			sb->s_flags |= MS_NOEXEC;
+			break;
+		case MUFS_DCOFS:
+		case MUFS_INTLOFS:
+			sbi->s_flags |= SF_MUFS;
+		case FS_DCOFS:
+		case FS_INTLOFS:
+			sbi->s_flags |= SF_INTL | SF_OFS;
+			sb->s_flags |= MS_NOEXEC;
+			break;
+		default:
+			printk(KERN_ERR "AFFS: Unknown filesystem on device %s: %08X\n",
+				sb->s_id, chksum);
+			goto out_error;
+	}
+
+	if (mount_flags & SF_VERBOSE) {
+		chksum = cpu_to_be32(chksum);
+		printk(KERN_NOTICE "AFFS: Mounting volume \"%*s\": Type=%.3s\\%c, Blocksize=%d\n",
+			AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0],
+			AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1,
+			(char *)&chksum,((char *)&chksum)[3] + '0',blocksize);
+	}
+
+	sb->s_flags |= MS_NODEV | MS_NOSUID;
+
+	sbi->s_data_blksize = sb->s_blocksize;
+	if (sbi->s_flags & SF_OFS)
+		sbi->s_data_blksize -= 24;
+
+	/* Keep super block in cache */
+	sbi->s_root_bh = root_bh;
+	/* N.B. after this point s_root_bh must be released */
+
+	tmp_flags = sb->s_flags;
+	if (affs_init_bitmap(sb, &tmp_flags))
+		goto out_error;
+	sb->s_flags = tmp_flags;
+
+	/* set up enough so that it can read an inode */
+
+	root_inode = iget(sb, root_block);
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root) {
+		printk(KERN_ERR "AFFS: Get root inode failed\n");
+		goto out_error;
+	}
+	sb->s_root->d_op = &affs_dentry_operations;
+
+	pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
+	return 0;
+
+	/*
+	 * Begin the cascaded cleanup ...
+	 */
+out_error:
+	if (root_inode)
+		iput(root_inode);
+	if (sbi->s_bitmap)
+		kfree(sbi->s_bitmap);
+	affs_brelse(root_bh);
+	if (sbi->s_prefix)
+		kfree(sbi->s_prefix);
+	kfree(sbi);
+	sb->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static int
+affs_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct affs_sb_info	*sbi = AFFS_SB(sb);
+	int			 blocksize;
+	uid_t			 uid;
+	gid_t			 gid;
+	int			 mode;
+	int			 reserved;
+	int			 root_block;
+	unsigned long		 mount_flags;
+	int			 res = 0;
+
+	pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
+
+	*flags |= MS_NODIRATIME;
+
+	if (!parse_options(data,&uid,&gid,&mode,&reserved,&root_block,
+	    &blocksize,&sbi->s_prefix,sbi->s_volume,&mount_flags))
+		return -EINVAL;
+	sbi->s_flags = mount_flags;
+	sbi->s_mode  = mode;
+	sbi->s_uid   = uid;
+	sbi->s_gid   = gid;
+
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (*flags & MS_RDONLY) {
+		sb->s_dirt = 1;
+		while (sb->s_dirt)
+			affs_write_super(sb);
+		affs_free_bitmap(sb);
+	} else
+		res = affs_init_bitmap(sb, flags);
+
+	return res;
+}
+
+static int
+affs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	int		 free;
+
+	pr_debug("AFFS: statfs() partsize=%d, reserved=%d\n",AFFS_SB(sb)->s_partition_size,
+	     AFFS_SB(sb)->s_reserved);
+
+	free          = affs_count_free_blocks(sb);
+	buf->f_type    = AFFS_SUPER_MAGIC;
+	buf->f_bsize   = sb->s_blocksize;
+	buf->f_blocks  = AFFS_SB(sb)->s_partition_size - AFFS_SB(sb)->s_reserved;
+	buf->f_bfree   = free;
+	buf->f_bavail  = free;
+	return 0;
+}
+
+static struct super_block *affs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, affs_fill_super);
+}
+
+static struct file_system_type affs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "affs",
+	.get_sb		= affs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_affs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&affs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_affs_fs(void)
+{
+	unregister_filesystem(&affs_fs_type);
+	destroy_inodecache();
+}
+
+MODULE_DESCRIPTION("Amiga filesystem support for Linux");
+MODULE_LICENSE("GPL");
+
+module_init(init_affs_fs)
+module_exit(exit_affs_fs)
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
new file mode 100644
index 0000000..426f0f0
--- /dev/null
+++ b/fs/affs/symlink.c
@@ -0,0 +1,78 @@
+/*
+ *  linux/fs/affs/symlink.c
+ *
+ *  1995  Hans-Joachim Widmaier - Modified for affs.
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  affs symlink handling code
+ */
+
+#include "affs.h"
+
+static int affs_symlink_readpage(struct file *file, struct page *page)
+{
+	struct buffer_head *bh;
+	struct inode *inode = page->mapping->host;
+	char *link = kmap(page);
+	struct slink_front *lf;
+	int err;
+	int			 i, j;
+	char			 c;
+	char			 lc;
+	char			*pf;
+
+	pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
+
+	err = -EIO;
+	bh = affs_bread(inode->i_sb, inode->i_ino);
+	if (!bh)
+		goto fail;
+	i  = 0;
+	j  = 0;
+	lf = (struct slink_front *)bh->b_data;
+	lc = 0;
+	pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
+
+	if (strchr(lf->symname,':')) {	/* Handle assign or volume name */
+		while (i < 1023 && (c = pf[i]))
+			link[i++] = c;
+		while (i < 1023 && lf->symname[j] != ':')
+			link[i++] = lf->symname[j++];
+		if (i < 1023)
+			link[i++] = '/';
+		j++;
+		lc = '/';
+	}
+	while (i < 1023 && (c = lf->symname[j])) {
+		if (c == '/' && lc == '/' && i < 1020) {	/* parent dir */
+			link[i++] = '.';
+			link[i++] = '.';
+		}
+		link[i++] = c;
+		lc = c;
+		j++;
+	}
+	link[i] = '\0';
+	affs_brelse(bh);
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+fail:
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return err;
+}
+
+struct address_space_operations affs_symlink_aops = {
+	.readpage	= affs_symlink_readpage,
+};
+
+struct inode_operations affs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.setattr	= affs_notify_change,
+};
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
new file mode 100644
index 0000000..4029c9d
--- /dev/null
+++ b/fs/afs/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for Red Hat Linux AFS client.
+#
+
+#CFLAGS += -finstrument-functions
+
+kafs-objs := \
+	callback.o \
+	cell.o \
+	cmservice.o \
+	dir.o \
+	file.o \
+	fsclient.o \
+	inode.o \
+	kafsasyncd.o \
+	kafstimod.o \
+	main.o \
+	misc.o \
+	mntpt.o \
+	proc.o \
+	server.o \
+	super.o \
+	vlclient.o \
+	vlocation.o \
+	vnode.o \
+	volume.o
+
+obj-$(CONFIG_AFS_FS)  := kafs.o
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
new file mode 100644
index 0000000..9eb7722
--- /dev/null
+++ b/fs/afs/cache.h
@@ -0,0 +1,27 @@
+/* cache.h: AFS local cache management interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_CACHE_H
+#define _LINUX_AFS_CACHE_H
+
+#undef AFS_CACHING_SUPPORT
+
+#include <linux/mm.h>
+#ifdef AFS_CACHING_SUPPORT
+#include <linux/cachefs.h>
+#endif
+#include "types.h"
+
+#ifdef __KERNEL__
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_CACHE_H */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
new file mode 100644
index 0000000..2fd62f8
--- /dev/null
+++ b/fs/afs/callback.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ *          David Howells <dhowells@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include "server.h"
+#include "vnode.h"
+#include "internal.h"
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to request callback state (re-)initialisation
+ */
+int SRXAFSCM_InitCallBackState(struct afs_server *server)
+{
+	struct list_head callbacks;
+
+	_enter("%p", server);
+
+	INIT_LIST_HEAD(&callbacks);
+
+	/* transfer the callback list from the server to a temp holding area */
+	spin_lock(&server->cb_lock);
+
+	list_add(&callbacks, &server->cb_promises);
+	list_del_init(&server->cb_promises);
+
+	/* munch our way through the list, grabbing the inode, dropping all the
+	 * locks and regetting them in the right order
+	 */
+	while (!list_empty(&callbacks)) {
+		struct afs_vnode *vnode;
+		struct inode *inode;
+
+		vnode = list_entry(callbacks.next, struct afs_vnode, cb_link);
+		list_del_init(&vnode->cb_link);
+
+		/* try and grab the inode - may fail */
+		inode = igrab(AFS_VNODE_TO_I(vnode));
+		if (inode) {
+			int release = 0;
+
+			spin_unlock(&server->cb_lock);
+			spin_lock(&vnode->lock);
+
+			if (vnode->cb_server == server) {
+				vnode->cb_server = NULL;
+				afs_kafstimod_del_timer(&vnode->cb_timeout);
+				spin_lock(&afs_cb_hash_lock);
+				list_del_init(&vnode->cb_hash_link);
+				spin_unlock(&afs_cb_hash_lock);
+				release = 1;
+			}
+
+			spin_unlock(&vnode->lock);
+
+			iput(inode);
+			afs_put_server(server);
+
+			spin_lock(&server->cb_lock);
+		}
+	}
+
+	spin_unlock(&server->cb_lock);
+
+	_leave(" = 0");
+	return 0;
+} /* end SRXAFSCM_InitCallBackState() */
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to break callback promises
+ */
+int SRXAFSCM_CallBack(struct afs_server *server, size_t count,
+		      struct afs_callback callbacks[])
+{
+	_enter("%p,%u,", server, count);
+
+	for (; count > 0; callbacks++, count--) {
+		struct afs_vnode *vnode = NULL;
+		struct inode *inode = NULL;
+		int valid = 0;
+
+		_debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
+		       callbacks->fid.vid,
+		       callbacks->fid.vnode,
+		       callbacks->fid.unique,
+		       callbacks->version,
+		       callbacks->expiry,
+		       callbacks->type
+		       );
+
+		/* find the inode for this fid */
+		spin_lock(&afs_cb_hash_lock);
+
+		list_for_each_entry(vnode,
+				    &afs_cb_hash(server, &callbacks->fid),
+				    cb_hash_link) {
+			if (memcmp(&vnode->fid, &callbacks->fid,
+				   sizeof(struct afs_fid)) != 0)
+				continue;
+
+			/* right vnode, but is it same server? */
+			if (vnode->cb_server != server)
+				break; /* no */
+
+			/* try and nail the inode down */
+			inode = igrab(AFS_VNODE_TO_I(vnode));
+			break;
+		}
+
+		spin_unlock(&afs_cb_hash_lock);
+
+		if (inode) {
+			/* we've found the record for this vnode */
+			spin_lock(&vnode->lock);
+			if (vnode->cb_server == server) {
+				/* the callback _is_ on the calling server */
+				vnode->cb_server = NULL;
+				valid = 1;
+
+				afs_kafstimod_del_timer(&vnode->cb_timeout);
+				vnode->flags |= AFS_VNODE_CHANGED;
+
+				spin_lock(&server->cb_lock);
+				list_del_init(&vnode->cb_link);
+				spin_unlock(&server->cb_lock);
+
+				spin_lock(&afs_cb_hash_lock);
+				list_del_init(&vnode->cb_hash_link);
+				spin_unlock(&afs_cb_hash_lock);
+			}
+			spin_unlock(&vnode->lock);
+
+			if (valid) {
+				invalidate_remote_inode(inode);
+				afs_put_server(server);
+			}
+			iput(inode);
+		}
+	}
+
+	_leave(" = 0");
+	return 0;
+} /* end SRXAFSCM_CallBack() */
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to see if the cache manager is still alive
+ */
+int SRXAFSCM_Probe(struct afs_server *server)
+{
+	_debug("SRXAFSCM_Probe(%p)\n", server);
+	return 0;
+} /* end SRXAFSCM_Probe() */
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
new file mode 100644
index 0000000..009a9ae
--- /dev/null
+++ b/fs/afs/cell.c
@@ -0,0 +1,569 @@
+/* cell.c: AFS cell and server record management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include "volume.h"
+#include "cell.h"
+#include "server.h"
+#include "transport.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include "super.h"
+#include "internal.h"
+
+DECLARE_RWSEM(afs_proc_cells_sem);
+LIST_HEAD(afs_proc_cells);
+
+static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
+static DEFINE_RWLOCK(afs_cells_lock);
+static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
+static struct afs_cell *afs_cell_root;
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_cell_cache_match(void *target,
+						const void *entry);
+static void afs_cell_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_cache_cell_index_def = {
+	.name			= "cell_ix",
+	.data_size		= sizeof(struct afs_cache_cell),
+	.keys[0]		= { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
+	.match			= afs_cell_cache_match,
+	.update			= afs_cell_cache_update,
+};
+#endif
+
+/*****************************************************************************/
+/*
+ * create a cell record
+ * - "name" is the name of the cell
+ * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ */
+int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
+{
+	struct afs_cell *cell;
+	char *next;
+	int ret;
+
+	_enter("%s", name);
+
+	BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
+
+	/* allocate and initialise a cell record */
+	cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL);
+	if (!cell) {
+		_leave(" = -ENOMEM");
+		return -ENOMEM;
+	}
+
+	down_write(&afs_cells_sem);
+
+	memset(cell, 0, sizeof(struct afs_cell));
+	atomic_set(&cell->usage, 0);
+
+	INIT_LIST_HEAD(&cell->link);
+
+	rwlock_init(&cell->sv_lock);
+	INIT_LIST_HEAD(&cell->sv_list);
+	INIT_LIST_HEAD(&cell->sv_graveyard);
+	spin_lock_init(&cell->sv_gylock);
+
+	init_rwsem(&cell->vl_sem);
+	INIT_LIST_HEAD(&cell->vl_list);
+	INIT_LIST_HEAD(&cell->vl_graveyard);
+	spin_lock_init(&cell->vl_gylock);
+
+	strcpy(cell->name,name);
+
+	/* fill in the VL server list from the rest of the string */
+	ret = -EINVAL;
+	do {
+		unsigned a, b, c, d;
+
+		next = strchr(vllist, ':');
+		if (next)
+			*next++ = 0;
+
+		if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4)
+			goto badaddr;
+
+		if (a > 255 || b > 255 || c > 255 || d > 255)
+			goto badaddr;
+
+		cell->vl_addrs[cell->vl_naddrs++].s_addr =
+			htonl((a << 24) | (b << 16) | (c << 8) | d);
+
+		if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS)
+			break;
+
+	} while(vllist = next, vllist);
+
+	/* add a proc dir for this cell */
+	ret = afs_proc_cell_setup(cell);
+	if (ret < 0)
+		goto error;
+
+#ifdef AFS_CACHING_SUPPORT
+	/* put it up for caching */
+	cachefs_acquire_cookie(afs_cache_netfs.primary_index,
+			       &afs_vlocation_cache_index_def,
+			       cell,
+			       &cell->cache);
+#endif
+
+	/* add to the cell lists */
+	write_lock(&afs_cells_lock);
+	list_add_tail(&cell->link, &afs_cells);
+	write_unlock(&afs_cells_lock);
+
+	down_write(&afs_proc_cells_sem);
+	list_add_tail(&cell->proc_link, &afs_proc_cells);
+	up_write(&afs_proc_cells_sem);
+
+	*_cell = cell;
+	up_write(&afs_cells_sem);
+
+	_leave(" = 0 (%p)", cell);
+	return 0;
+
+ badaddr:
+	printk(KERN_ERR "kAFS: bad VL server IP address: '%s'\n", vllist);
+ error:
+	up_write(&afs_cells_sem);
+	kfree(cell);
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_cell_create() */
+
+/*****************************************************************************/
+/*
+ * initialise the cell database from module parameters
+ */
+int afs_cell_init(char *rootcell)
+{
+	struct afs_cell *old_root, *new_root;
+	char *cp;
+	int ret;
+
+	_enter("");
+
+	if (!rootcell) {
+		/* module is loaded with no parameters, or built statically.
+		 * - in the future we might initialize cell DB here.
+		 */
+		_leave(" = 0 (but no root)");
+		return 0;
+	}
+
+	cp = strchr(rootcell, ':');
+	if (!cp) {
+		printk(KERN_ERR "kAFS: no VL server IP addresses specified\n");
+		_leave(" = %d (no colon)", -EINVAL);
+		return -EINVAL;
+	}
+
+	/* allocate a cell record for the root cell */
+	*cp++ = 0;
+	ret = afs_cell_create(rootcell, cp, &new_root);
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	/* as afs_put_cell() takes locks by itself, we have to do
+	 * a little gymnastics to be race-free.
+	 */
+	afs_get_cell(new_root);
+
+	write_lock(&afs_cells_lock);
+	while (afs_cell_root) {
+		old_root = afs_cell_root;
+		afs_cell_root = NULL;
+		write_unlock(&afs_cells_lock);
+		afs_put_cell(old_root);
+		write_lock(&afs_cells_lock);
+	}
+	afs_cell_root = new_root;
+	write_unlock(&afs_cells_lock);
+
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_cell_init() */
+
+/*****************************************************************************/
+/*
+ * lookup a cell record
+ */
+int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell)
+{
+	struct afs_cell *cell;
+	int ret;
+
+	_enter("\"%*.*s\",", namesz, namesz, name ? name : "");
+
+	*_cell = NULL;
+
+	if (name) {
+		/* if the cell was named, look for it in the cell record list */
+		ret = -ENOENT;
+		cell = NULL;
+		read_lock(&afs_cells_lock);
+
+		list_for_each_entry(cell, &afs_cells, link) {
+			if (strncmp(cell->name, name, namesz) == 0) {
+				afs_get_cell(cell);
+				goto found;
+			}
+		}
+		cell = NULL;
+	found:
+
+		read_unlock(&afs_cells_lock);
+
+		if (cell)
+			ret = 0;
+	}
+	else {
+		read_lock(&afs_cells_lock);
+
+		cell = afs_cell_root;
+		if (!cell) {
+			/* this should not happen unless user tries to mount
+			 * when root cell is not set. Return an impossibly
+			 * bizzare errno to alert the user. Things like
+			 * ENOENT might be "more appropriate" but they happen
+			 * for other reasons.
+			 */
+			ret = -EDESTADDRREQ;
+		}
+		else {
+			afs_get_cell(cell);
+			ret = 0;
+		}
+
+		read_unlock(&afs_cells_lock);
+	}
+
+	*_cell = cell;
+	_leave(" = %d (%p)", ret, cell);
+	return ret;
+
+} /* end afs_cell_lookup() */
+
+/*****************************************************************************/
+/*
+ * try and get a cell record
+ */
+struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell)
+{
+	struct afs_cell *cell;
+
+	write_lock(&afs_cells_lock);
+
+	cell = *_cell;
+	if (cell && !list_empty(&cell->link))
+		afs_get_cell(cell);
+	else
+		cell = NULL;
+
+	write_unlock(&afs_cells_lock);
+
+	return cell;
+} /* end afs_get_cell_maybe() */
+
+/*****************************************************************************/
+/*
+ * destroy a cell record
+ */
+void afs_put_cell(struct afs_cell *cell)
+{
+	if (!cell)
+		return;
+
+	_enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
+
+	/* sanity check */
+	BUG_ON(atomic_read(&cell->usage) <= 0);
+
+	/* to prevent a race, the decrement and the dequeue must be effectively
+	 * atomic */
+	write_lock(&afs_cells_lock);
+
+	if (likely(!atomic_dec_and_test(&cell->usage))) {
+		write_unlock(&afs_cells_lock);
+		_leave("");
+		return;
+	}
+
+	write_unlock(&afs_cells_lock);
+
+	BUG_ON(!list_empty(&cell->sv_list));
+	BUG_ON(!list_empty(&cell->sv_graveyard));
+	BUG_ON(!list_empty(&cell->vl_list));
+	BUG_ON(!list_empty(&cell->vl_graveyard));
+
+	_leave(" [unused]");
+} /* end afs_put_cell() */
+
+/*****************************************************************************/
+/*
+ * destroy a cell record
+ */
+static void afs_cell_destroy(struct afs_cell *cell)
+{
+	_enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
+
+	/* to prevent a race, the decrement and the dequeue must be effectively
+	 * atomic */
+	write_lock(&afs_cells_lock);
+
+	/* sanity check */
+	BUG_ON(atomic_read(&cell->usage) != 0);
+
+	list_del_init(&cell->link);
+
+	write_unlock(&afs_cells_lock);
+
+	down_write(&afs_cells_sem);
+
+	afs_proc_cell_remove(cell);
+
+	down_write(&afs_proc_cells_sem);
+	list_del_init(&cell->proc_link);
+	up_write(&afs_proc_cells_sem);
+
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_relinquish_cookie(cell->cache, 0);
+#endif
+
+	up_write(&afs_cells_sem);
+
+	BUG_ON(!list_empty(&cell->sv_list));
+	BUG_ON(!list_empty(&cell->sv_graveyard));
+	BUG_ON(!list_empty(&cell->vl_list));
+	BUG_ON(!list_empty(&cell->vl_graveyard));
+
+	/* finish cleaning up the cell */
+	kfree(cell);
+
+	_leave(" [destroyed]");
+} /* end afs_cell_destroy() */
+
+/*****************************************************************************/
+/*
+ * lookup the server record corresponding to an Rx RPC peer
+ */
+int afs_server_find_by_peer(const struct rxrpc_peer *peer,
+			    struct afs_server **_server)
+{
+	struct afs_server *server;
+	struct afs_cell *cell;
+
+	_enter("%p{a=%08x},", peer, ntohl(peer->addr.s_addr));
+
+	/* search the cell list */
+	read_lock(&afs_cells_lock);
+
+	list_for_each_entry(cell, &afs_cells, link) {
+
+		_debug("? cell %s",cell->name);
+
+		write_lock(&cell->sv_lock);
+
+		/* check the active list */
+		list_for_each_entry(server, &cell->sv_list, link) {
+			_debug("?? server %08x", ntohl(server->addr.s_addr));
+
+			if (memcmp(&server->addr, &peer->addr,
+				   sizeof(struct in_addr)) == 0)
+				goto found_server;
+		}
+
+		/* check the inactive list */
+		spin_lock(&cell->sv_gylock);
+		list_for_each_entry(server, &cell->sv_graveyard, link) {
+			_debug("?? dead server %08x",
+			       ntohl(server->addr.s_addr));
+
+			if (memcmp(&server->addr, &peer->addr,
+				   sizeof(struct in_addr)) == 0)
+				goto found_dead_server;
+		}
+		spin_unlock(&cell->sv_gylock);
+
+		write_unlock(&cell->sv_lock);
+	}
+	read_unlock(&afs_cells_lock);
+
+	_leave(" = -ENOENT");
+	return -ENOENT;
+
+	/* we found it in the graveyard - resurrect it */
+ found_dead_server:
+	list_del(&server->link);
+	list_add_tail(&server->link, &cell->sv_list);
+	afs_get_server(server);
+	afs_kafstimod_del_timer(&server->timeout);
+	spin_unlock(&cell->sv_gylock);
+	goto success;
+
+	/* we found it - increment its ref count and return it */
+ found_server:
+	afs_get_server(server);
+
+ success:
+	write_unlock(&cell->sv_lock);
+	read_unlock(&afs_cells_lock);
+
+	*_server = server;
+	_leave(" = 0 (s=%p c=%p)", server, cell);
+	return 0;
+
+} /* end afs_server_find_by_peer() */
+
+/*****************************************************************************/
+/*
+ * purge in-memory cell database on module unload or afs_init() failure
+ * - the timeout daemon is stopped before calling this
+ */
+void afs_cell_purge(void)
+{
+	struct afs_vlocation *vlocation;
+	struct afs_cell *cell;
+
+	_enter("");
+
+	afs_put_cell(afs_cell_root);
+
+	while (!list_empty(&afs_cells)) {
+		cell = NULL;
+
+		/* remove the next cell from the front of the list */
+		write_lock(&afs_cells_lock);
+
+		if (!list_empty(&afs_cells)) {
+			cell = list_entry(afs_cells.next,
+					  struct afs_cell, link);
+			list_del_init(&cell->link);
+		}
+
+		write_unlock(&afs_cells_lock);
+
+		if (cell) {
+			_debug("PURGING CELL %s (%d)",
+			       cell->name, atomic_read(&cell->usage));
+
+			BUG_ON(!list_empty(&cell->sv_list));
+			BUG_ON(!list_empty(&cell->vl_list));
+
+			/* purge the cell's VL graveyard list */
+			_debug(" - clearing VL graveyard");
+
+			spin_lock(&cell->vl_gylock);
+
+			while (!list_empty(&cell->vl_graveyard)) {
+				vlocation = list_entry(cell->vl_graveyard.next,
+						       struct afs_vlocation,
+						       link);
+				list_del_init(&vlocation->link);
+
+				afs_kafstimod_del_timer(&vlocation->timeout);
+
+				spin_unlock(&cell->vl_gylock);
+
+				afs_vlocation_do_timeout(vlocation);
+				/* TODO: race if move to use krxtimod instead
+				 * of kafstimod */
+
+				spin_lock(&cell->vl_gylock);
+			}
+
+			spin_unlock(&cell->vl_gylock);
+
+			/* purge the cell's server graveyard list */
+			_debug(" - clearing server graveyard");
+
+			spin_lock(&cell->sv_gylock);
+
+			while (!list_empty(&cell->sv_graveyard)) {
+				struct afs_server *server;
+
+				server = list_entry(cell->sv_graveyard.next,
+						    struct afs_server, link);
+				list_del_init(&server->link);
+
+				afs_kafstimod_del_timer(&server->timeout);
+
+				spin_unlock(&cell->sv_gylock);
+
+				afs_server_do_timeout(server);
+
+				spin_lock(&cell->sv_gylock);
+			}
+
+			spin_unlock(&cell->sv_gylock);
+
+			/* now the cell should be left with no references */
+			afs_cell_destroy(cell);
+		}
+	}
+
+	_leave("");
+} /* end afs_cell_purge() */
+
+/*****************************************************************************/
+/*
+ * match a cell record obtained from the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_cell_cache_match(void *target,
+						const void *entry)
+{
+	const struct afs_cache_cell *ccell = entry;
+	struct afs_cell *cell = target;
+
+	_enter("{%s},{%s}", ccell->name, cell->name);
+
+	if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
+		_leave(" = SUCCESS");
+		return CACHEFS_MATCH_SUCCESS;
+	}
+
+	_leave(" = FAILED");
+	return CACHEFS_MATCH_FAILED;
+} /* end afs_cell_cache_match() */
+#endif
+
+/*****************************************************************************/
+/*
+ * update a cell record in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_cell_cache_update(void *source, void *entry)
+{
+	struct afs_cache_cell *ccell = entry;
+	struct afs_cell *cell = source;
+
+	_enter("%p,%p", source, entry);
+
+	strncpy(ccell->name, cell->name, sizeof(ccell->name));
+
+	memcpy(ccell->vl_servers,
+	       cell->vl_addrs,
+	       min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
+
+} /* end afs_cell_cache_update() */
+#endif
diff --git a/fs/afs/cell.h b/fs/afs/cell.h
new file mode 100644
index 0000000..4834910
--- /dev/null
+++ b/fs/afs/cell.h
@@ -0,0 +1,78 @@
+/* cell.h: AFS cell record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_CELL_H
+#define _LINUX_AFS_CELL_H
+
+#include "types.h"
+#include "cache.h"
+
+#define AFS_CELL_MAX_ADDRS 15
+
+extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
+
+/*****************************************************************************/
+/*
+ * entry in the cached cell catalogue
+ */
+struct afs_cache_cell
+{
+	char			name[64];	/* cell name (padded with NULs) */
+	struct in_addr		vl_servers[15];	/* cached cell VL servers */
+};
+
+/*****************************************************************************/
+/*
+ * AFS cell record
+ */
+struct afs_cell
+{
+	atomic_t		usage;
+	struct list_head	link;		/* main cell list link */
+	struct list_head	proc_link;	/* /proc cell list link */
+	struct proc_dir_entry	*proc_dir;	/* /proc dir for this cell */
+#ifdef AFS_CACHING_SUPPORT
+	struct cachefs_cookie	*cache;		/* caching cookie */
+#endif
+
+	/* server record management */
+	rwlock_t		sv_lock;	/* active server list lock */
+	struct list_head	sv_list;	/* active server list */
+	struct list_head	sv_graveyard;	/* inactive server list */
+	spinlock_t		sv_gylock;	/* inactive server list lock */
+
+	/* volume location record management */
+	struct rw_semaphore	vl_sem;		/* volume management serialisation semaphore */
+	struct list_head	vl_list;	/* cell's active VL record list */
+	struct list_head	vl_graveyard;	/* cell's inactive VL record list */
+	spinlock_t		vl_gylock;	/* graveyard lock */
+	unsigned short		vl_naddrs;	/* number of VL servers in addr list */
+	unsigned short		vl_curr_svix;	/* current server index */
+	struct in_addr		vl_addrs[AFS_CELL_MAX_ADDRS];	/* cell VL server addresses */
+
+	char			name[0];	/* cell name - must go last */
+};
+
+extern int afs_cell_init(char *rootcell);
+
+extern int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell);
+
+extern int afs_cell_lookup(const char *name, unsigned nmsize, struct afs_cell **_cell);
+
+#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
+
+extern struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell);
+
+extern void afs_put_cell(struct afs_cell *cell);
+
+extern void afs_cell_purge(void);
+
+#endif /* _LINUX_AFS_CELL_H */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
new file mode 100644
index 0000000..0a57fd7
--- /dev/null
+++ b/fs/afs/cmservice.c
@@ -0,0 +1,652 @@
+/* cmservice.c: AFS Cache Manager Service
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "server.h"
+#include "cell.h"
+#include "transport.h"
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "cmservice.h"
+#include "internal.h"
+
+static unsigned afscm_usage;		/* AFS cache manager usage count */
+static struct rw_semaphore afscm_sem;	/* AFS cache manager start/stop semaphore */
+
+static int afscm_new_call(struct rxrpc_call *call);
+static void afscm_attention(struct rxrpc_call *call);
+static void afscm_error(struct rxrpc_call *call);
+static void afscm_aemap(struct rxrpc_call *call);
+
+static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
+static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
+static void _SRXAFSCM_Probe(struct rxrpc_call *call);
+
+typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
+
+static const struct rxrpc_operation AFSCM_ops[] = {
+	{
+		.id	= 204,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "CallBack",
+		.user	= _SRXAFSCM_CallBack,
+	},
+	{
+		.id	= 205,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "InitCallBackState",
+		.user	= _SRXAFSCM_InitCallBackState,
+	},
+	{
+		.id	= 206,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "Probe",
+		.user	= _SRXAFSCM_Probe,
+	},
+#if 0
+	{
+		.id	= 207,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "GetLock",
+		.user	= _SRXAFSCM_GetLock,
+	},
+	{
+		.id	= 208,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "GetCE",
+		.user	= _SRXAFSCM_GetCE,
+	},
+	{
+		.id	= 209,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "GetXStatsVersion",
+		.user	= _SRXAFSCM_GetXStatsVersion,
+	},
+	{
+		.id	= 210,
+		.asize	= RXRPC_APP_MARK_EOF,
+		.name	= "GetXStats",
+		.user	= _SRXAFSCM_GetXStats,
+	}
+#endif
+};
+
+static struct rxrpc_service AFSCM_service = {
+	.name		= "AFS/CM",
+	.owner		= THIS_MODULE,
+	.link		= LIST_HEAD_INIT(AFSCM_service.link),
+	.new_call	= afscm_new_call,
+	.service_id	= 1,
+	.attn_func	= afscm_attention,
+	.error_func	= afscm_error,
+	.aemap_func	= afscm_aemap,
+	.ops_begin	= &AFSCM_ops[0],
+	.ops_end	= &AFSCM_ops[sizeof(AFSCM_ops) / sizeof(AFSCM_ops[0])],
+};
+
+static DECLARE_COMPLETION(kafscmd_alive);
+static DECLARE_COMPLETION(kafscmd_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
+static LIST_HEAD(kafscmd_attention_list);
+static LIST_HEAD(afscm_calls);
+static DEFINE_SPINLOCK(afscm_calls_lock);
+static DEFINE_SPINLOCK(kafscmd_attention_lock);
+static int kafscmd_die;
+
+/*****************************************************************************/
+/*
+ * AFS Cache Manager kernel thread
+ */
+static int kafscmd(void *arg)
+{
+	DECLARE_WAITQUEUE(myself, current);
+
+	struct rxrpc_call *call;
+	_SRXAFSCM_xxxx_t func;
+	int die;
+
+	printk("kAFS: Started kafscmd %d\n", current->pid);
+
+	daemonize("kafscmd");
+
+	complete(&kafscmd_alive);
+
+	/* loop around looking for things to attend to */
+	do {
+		if (list_empty(&kafscmd_attention_list)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			add_wait_queue(&kafscmd_sleepq, &myself);
+
+			for (;;) {
+				set_current_state(TASK_INTERRUPTIBLE);
+				if (!list_empty(&kafscmd_attention_list) ||
+				    signal_pending(current) ||
+				    kafscmd_die)
+					break;
+
+				schedule();
+			}
+
+			remove_wait_queue(&kafscmd_sleepq, &myself);
+			set_current_state(TASK_RUNNING);
+		}
+
+		die = kafscmd_die;
+
+		/* dequeue the next call requiring attention */
+		call = NULL;
+		spin_lock(&kafscmd_attention_lock);
+
+		if (!list_empty(&kafscmd_attention_list)) {
+			call = list_entry(kafscmd_attention_list.next,
+					  struct rxrpc_call,
+					  app_attn_link);
+			list_del_init(&call->app_attn_link);
+			die = 0;
+		}
+
+		spin_unlock(&kafscmd_attention_lock);
+
+		if (call) {
+			/* act upon it */
+			_debug("@@@ Begin Attend Call %p", call);
+
+			func = call->app_user;
+			if (func)
+				func(call);
+
+			rxrpc_put_call(call);
+
+			_debug("@@@ End Attend Call %p", call);
+		}
+
+	} while(!die);
+
+	/* and that's all */
+	complete_and_exit(&kafscmd_dead, 0);
+
+} /* end kafscmd() */
+
+/*****************************************************************************/
+/*
+ * handle a call coming in to the cache manager
+ * - if I want to keep the call, I must increment its usage count
+ * - the return value will be negated and passed back in an abort packet if
+ *   non-zero
+ * - serialised by virtue of there only being one krxiod
+ */
+static int afscm_new_call(struct rxrpc_call *call)
+{
+	_enter("%p{cid=%u u=%d}",
+	       call, ntohl(call->call_id), atomic_read(&call->usage));
+
+	rxrpc_get_call(call);
+
+	/* add to my current call list */
+	spin_lock(&afscm_calls_lock);
+	list_add(&call->app_link,&afscm_calls);
+	spin_unlock(&afscm_calls_lock);
+
+	_leave(" = 0");
+	return 0;
+
+} /* end afscm_new_call() */
+
+/*****************************************************************************/
+/*
+ * queue on the kafscmd queue for attention
+ */
+static void afscm_attention(struct rxrpc_call *call)
+{
+	_enter("%p{cid=%u u=%d}",
+	       call, ntohl(call->call_id), atomic_read(&call->usage));
+
+	spin_lock(&kafscmd_attention_lock);
+
+	if (list_empty(&call->app_attn_link)) {
+		list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
+		rxrpc_get_call(call);
+	}
+
+	spin_unlock(&kafscmd_attention_lock);
+
+	wake_up(&kafscmd_sleepq);
+
+	_leave(" {u=%d}", atomic_read(&call->usage));
+} /* end afscm_attention() */
+
+/*****************************************************************************/
+/*
+ * handle my call being aborted
+ * - clean up, dequeue and put my ref to the call
+ */
+static void afscm_error(struct rxrpc_call *call)
+{
+	int removed;
+
+	_enter("%p{est=%s ac=%u er=%d}",
+	       call,
+	       rxrpc_call_error_states[call->app_err_state],
+	       call->app_abort_code,
+	       call->app_errno);
+
+	spin_lock(&kafscmd_attention_lock);
+
+	if (list_empty(&call->app_attn_link)) {
+		list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
+		rxrpc_get_call(call);
+	}
+
+	spin_unlock(&kafscmd_attention_lock);
+
+	removed = 0;
+	spin_lock(&afscm_calls_lock);
+	if (!list_empty(&call->app_link)) {
+		list_del_init(&call->app_link);
+		removed = 1;
+	}
+	spin_unlock(&afscm_calls_lock);
+
+	if (removed)
+		rxrpc_put_call(call);
+
+	wake_up(&kafscmd_sleepq);
+
+	_leave("");
+} /* end afscm_error() */
+
+/*****************************************************************************/
+/*
+ * map afs abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afscm_aemap(struct rxrpc_call *call)
+{
+	switch (call->app_err_state) {
+	case RXRPC_ESTATE_LOCAL_ABORT:
+		call->app_abort_code = -call->app_errno;
+		break;
+	case RXRPC_ESTATE_PEER_ABORT:
+		call->app_errno = -ECONNABORTED;
+		break;
+	default:
+		break;
+	}
+} /* end afscm_aemap() */
+
+/*****************************************************************************/
+/*
+ * start the cache manager service if not already started
+ */
+int afscm_start(void)
+{
+	int ret;
+
+	down_write(&afscm_sem);
+	if (!afscm_usage) {
+		ret = kernel_thread(kafscmd, NULL, 0);
+		if (ret < 0)
+			goto out;
+
+		wait_for_completion(&kafscmd_alive);
+
+		ret = rxrpc_add_service(afs_transport, &AFSCM_service);
+		if (ret < 0)
+			goto kill;
+
+		afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
+					afs_mntpt_expiry_timeout * HZ);
+	}
+
+	afscm_usage++;
+	up_write(&afscm_sem);
+
+	return 0;
+
+ kill:
+	kafscmd_die = 1;
+	wake_up(&kafscmd_sleepq);
+	wait_for_completion(&kafscmd_dead);
+
+ out:
+	up_write(&afscm_sem);
+	return ret;
+
+} /* end afscm_start() */
+
+/*****************************************************************************/
+/*
+ * stop the cache manager service
+ */
+void afscm_stop(void)
+{
+	struct rxrpc_call *call;
+
+	down_write(&afscm_sem);
+
+	BUG_ON(afscm_usage == 0);
+	afscm_usage--;
+
+	if (afscm_usage == 0) {
+		/* don't want more incoming calls */
+		rxrpc_del_service(afs_transport, &AFSCM_service);
+
+		/* abort any calls I've still got open (the afscm_error() will
+		 * dequeue them) */
+		spin_lock(&afscm_calls_lock);
+		while (!list_empty(&afscm_calls)) {
+			call = list_entry(afscm_calls.next,
+					  struct rxrpc_call,
+					  app_link);
+
+			list_del_init(&call->app_link);
+			rxrpc_get_call(call);
+			spin_unlock(&afscm_calls_lock);
+
+			rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and
+							 * put */
+
+			_debug("nuking active call %08x.%d",
+			       ntohl(call->conn->conn_id),
+			       ntohl(call->call_id));
+			rxrpc_put_call(call);
+			rxrpc_put_call(call);
+
+			spin_lock(&afscm_calls_lock);
+		}
+		spin_unlock(&afscm_calls_lock);
+
+		/* get rid of my daemon */
+		kafscmd_die = 1;
+		wake_up(&kafscmd_sleepq);
+		wait_for_completion(&kafscmd_dead);
+
+		/* dispose of any calls waiting for attention */
+		spin_lock(&kafscmd_attention_lock);
+		while (!list_empty(&kafscmd_attention_list)) {
+			call = list_entry(kafscmd_attention_list.next,
+					  struct rxrpc_call,
+					  app_attn_link);
+
+			list_del_init(&call->app_attn_link);
+			spin_unlock(&kafscmd_attention_lock);
+
+			rxrpc_put_call(call);
+
+			spin_lock(&kafscmd_attention_lock);
+		}
+		spin_unlock(&kafscmd_attention_lock);
+
+		afs_kafstimod_del_timer(&afs_mntpt_expiry_timer);
+	}
+
+	up_write(&afscm_sem);
+
+} /* end afscm_stop() */
+
+/*****************************************************************************/
+/*
+ * handle the fileserver breaking a set of callbacks
+ */
+static void _SRXAFSCM_CallBack(struct rxrpc_call *call)
+{
+	struct afs_server *server;
+	size_t count, qty, tmp;
+	int ret = 0, removed;
+
+	_enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
+
+	server = afs_server_get_from_peer(call->conn->peer);
+
+	switch (call->app_call_state) {
+		/* we've received the last packet
+		 * - drain all the data from the call and send the reply
+		 */
+	case RXRPC_CSTATE_SRVR_GOT_ARGS:
+		ret = -EBADMSG;
+		qty = call->app_ready_qty;
+		if (qty < 8 || qty > 50 * (6 * 4) + 8)
+			break;
+
+		{
+			struct afs_callback *cb, *pcb;
+			int loop;
+			__be32 *fp, *bp;
+
+			fp = rxrpc_call_alloc_scratch(call, qty);
+
+			/* drag the entire argument block out to the scratch
+			 * space */
+			ret = rxrpc_call_read_data(call, fp, qty, 0);
+			if (ret < 0)
+				break;
+
+			/* and unmarshall the parameter block */
+			ret = -EBADMSG;
+			count = ntohl(*fp++);
+			if (count>AFSCBMAX ||
+			    (count * (3 * 4) + 8 != qty &&
+			     count * (6 * 4) + 8 != qty))
+				break;
+
+			bp = fp + count*3;
+			tmp = ntohl(*bp++);
+			if (tmp > 0 && tmp != count)
+				break;
+			if (tmp == 0)
+				bp = NULL;
+
+			pcb = cb = rxrpc_call_alloc_scratch_s(
+				call, struct afs_callback);
+
+			for (loop = count - 1; loop >= 0; loop--) {
+				pcb->fid.vid	= ntohl(*fp++);
+				pcb->fid.vnode	= ntohl(*fp++);
+				pcb->fid.unique	= ntohl(*fp++);
+				if (bp) {
+					pcb->version	= ntohl(*bp++);
+					pcb->expiry	= ntohl(*bp++);
+					pcb->type	= ntohl(*bp++);
+				}
+				else {
+					pcb->version	= 0;
+					pcb->expiry	= 0;
+					pcb->type	= AFSCM_CB_UNTYPED;
+				}
+				pcb++;
+			}
+
+			/* invoke the actual service routine */
+			ret = SRXAFSCM_CallBack(server, count, cb);
+			if (ret < 0)
+				break;
+		}
+
+		/* send the reply */
+		ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
+					    GFP_KERNEL, 0, &count);
+		if (ret < 0)
+			break;
+		break;
+
+		/* operation complete */
+	case RXRPC_CSTATE_COMPLETE:
+		call->app_user = NULL;
+		removed = 0;
+		spin_lock(&afscm_calls_lock);
+		if (!list_empty(&call->app_link)) {
+			list_del_init(&call->app_link);
+			removed = 1;
+		}
+		spin_unlock(&afscm_calls_lock);
+
+		if (removed)
+			rxrpc_put_call(call);
+		break;
+
+		/* operation terminated on error */
+	case RXRPC_CSTATE_ERROR:
+		call->app_user = NULL;
+		break;
+
+	default:
+		break;
+	}
+
+	if (ret < 0)
+		rxrpc_call_abort(call, ret);
+
+	afs_put_server(server);
+
+	_leave(" = %d", ret);
+
+} /* end _SRXAFSCM_CallBack() */
+
+/*****************************************************************************/
+/*
+ * handle the fileserver asking us to initialise our callback state
+ */
+static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call)
+{
+	struct afs_server *server;
+	size_t count;
+	int ret = 0, removed;
+
+	_enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
+
+	server = afs_server_get_from_peer(call->conn->peer);
+
+	switch (call->app_call_state) {
+		/* we've received the last packet - drain all the data from the
+		 * call */
+	case RXRPC_CSTATE_SRVR_GOT_ARGS:
+		/* shouldn't be any args */
+		ret = -EBADMSG;
+		break;
+
+		/* send the reply when asked for it */
+	case RXRPC_CSTATE_SRVR_SND_REPLY:
+		/* invoke the actual service routine */
+		ret = SRXAFSCM_InitCallBackState(server);
+		if (ret < 0)
+			break;
+
+		ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
+					    GFP_KERNEL, 0, &count);
+		if (ret < 0)
+			break;
+		break;
+
+		/* operation complete */
+	case RXRPC_CSTATE_COMPLETE:
+		call->app_user = NULL;
+		removed = 0;
+		spin_lock(&afscm_calls_lock);
+		if (!list_empty(&call->app_link)) {
+			list_del_init(&call->app_link);
+			removed = 1;
+		}
+		spin_unlock(&afscm_calls_lock);
+
+		if (removed)
+			rxrpc_put_call(call);
+		break;
+
+		/* operation terminated on error */
+	case RXRPC_CSTATE_ERROR:
+		call->app_user = NULL;
+		break;
+
+	default:
+		break;
+	}
+
+	if (ret < 0)
+		rxrpc_call_abort(call, ret);
+
+	afs_put_server(server);
+
+	_leave(" = %d", ret);
+
+} /* end _SRXAFSCM_InitCallBackState() */
+
+/*****************************************************************************/
+/*
+ * handle a probe from a fileserver
+ */
+static void _SRXAFSCM_Probe(struct rxrpc_call *call)
+{
+	struct afs_server *server;
+	size_t count;
+	int ret = 0, removed;
+
+	_enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
+
+	server = afs_server_get_from_peer(call->conn->peer);
+
+	switch (call->app_call_state) {
+		/* we've received the last packet - drain all the data from the
+		 * call */
+	case RXRPC_CSTATE_SRVR_GOT_ARGS:
+		/* shouldn't be any args */
+		ret = -EBADMSG;
+		break;
+
+		/* send the reply when asked for it */
+	case RXRPC_CSTATE_SRVR_SND_REPLY:
+		/* invoke the actual service routine */
+		ret = SRXAFSCM_Probe(server);
+		if (ret < 0)
+			break;
+
+		ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
+					    GFP_KERNEL, 0, &count);
+		if (ret < 0)
+			break;
+		break;
+
+		/* operation complete */
+	case RXRPC_CSTATE_COMPLETE:
+		call->app_user = NULL;
+		removed = 0;
+		spin_lock(&afscm_calls_lock);
+		if (!list_empty(&call->app_link)) {
+			list_del_init(&call->app_link);
+			removed = 1;
+		}
+		spin_unlock(&afscm_calls_lock);
+
+		if (removed)
+			rxrpc_put_call(call);
+		break;
+
+		/* operation terminated on error */
+	case RXRPC_CSTATE_ERROR:
+		call->app_user = NULL;
+		break;
+
+	default:
+		break;
+	}
+
+	if (ret < 0)
+		rxrpc_call_abort(call, ret);
+
+	afs_put_server(server);
+
+	_leave(" = %d", ret);
+
+} /* end _SRXAFSCM_Probe() */
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h
new file mode 100644
index 0000000..af8d4d6
--- /dev/null
+++ b/fs/afs/cmservice.h
@@ -0,0 +1,29 @@
+/* cmservice.h: AFS Cache Manager Service declarations
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_CMSERVICE_H
+#define _LINUX_AFS_CMSERVICE_H
+
+#include <rxrpc/transport.h>
+#include "types.h"
+
+/* cache manager start/stop */
+extern int afscm_start(void);
+extern void afscm_stop(void);
+
+/* cache manager server functions */
+extern int SRXAFSCM_InitCallBackState(struct afs_server *server);
+extern int SRXAFSCM_CallBack(struct afs_server *server,
+			     size_t count,
+			     struct afs_callback callbacks[]);
+extern int SRXAFSCM_Probe(struct afs_server *server);
+
+#endif /* _LINUX_AFS_CMSERVICE_H */
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
new file mode 100644
index 0000000..6682d6d
--- /dev/null
+++ b/fs/afs/dir.c
@@ -0,0 +1,666 @@
+/* dir.c: AFS filesystem directory handling
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include "vnode.h"
+#include "volume.h"
+#include <rxrpc/call.h>
+#include "super.h"
+#include "internal.h"
+
+static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry,
+				     struct nameidata *nd);
+static int afs_dir_open(struct inode *inode, struct file *file);
+static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
+static int afs_d_delete(struct dentry *dentry);
+static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
+				  loff_t fpos, ino_t ino, unsigned dtype);
+
+struct file_operations afs_dir_file_operations = {
+	.open		= afs_dir_open,
+	.readdir	= afs_dir_readdir,
+};
+
+struct inode_operations afs_dir_inode_operations = {
+	.lookup		= afs_dir_lookup,
+	.getattr	= afs_inode_getattr,
+#if 0 /* TODO */
+	.create		= afs_dir_create,
+	.link		= afs_dir_link,
+	.unlink		= afs_dir_unlink,
+	.symlink	= afs_dir_symlink,
+	.mkdir		= afs_dir_mkdir,
+	.rmdir		= afs_dir_rmdir,
+	.mknod		= afs_dir_mknod,
+	.rename		= afs_dir_rename,
+#endif
+};
+
+static struct dentry_operations afs_fs_dentry_operations = {
+	.d_revalidate	= afs_d_revalidate,
+	.d_delete	= afs_d_delete,
+};
+
+#define AFS_DIR_HASHTBL_SIZE	128
+#define AFS_DIR_DIRENT_SIZE	32
+#define AFS_DIRENT_PER_BLOCK	64
+
+union afs_dirent {
+	struct {
+		uint8_t		valid;
+		uint8_t		unused[1];
+		__be16		hash_next;
+		__be32		vnode;
+		__be32		unique;
+		uint8_t		name[16];
+		uint8_t		overflow[4];	/* if any char of the name (inc
+						 * NUL) reaches here, consume
+						 * the next dirent too */
+	} u;
+	uint8_t	extended_name[32];
+};
+
+/* AFS directory page header (one at the beginning of every 2048-byte chunk) */
+struct afs_dir_pagehdr {
+	__be16		npages;
+	__be16		magic;
+#define AFS_DIR_MAGIC htons(1234)
+	uint8_t		nentries;
+	uint8_t		bitmap[8];
+	uint8_t		pad[19];
+};
+
+/* directory block layout */
+union afs_dir_block {
+
+	struct afs_dir_pagehdr pagehdr;
+
+	struct {
+		struct afs_dir_pagehdr	pagehdr;
+		uint8_t			alloc_ctrs[128];
+		/* dir hash table */
+		uint16_t		hashtable[AFS_DIR_HASHTBL_SIZE];
+	} hdr;
+
+	union afs_dirent dirents[AFS_DIRENT_PER_BLOCK];
+};
+
+/* layout on a linux VM page */
+struct afs_dir_page {
+	union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)];
+};
+
+struct afs_dir_lookup_cookie {
+	struct afs_fid	fid;
+	const char	*name;
+	size_t		nlen;
+	int		found;
+};
+
+/*****************************************************************************/
+/*
+ * check that a directory page is valid
+ */
+static inline void afs_dir_check_page(struct inode *dir, struct page *page)
+{
+	struct afs_dir_page *dbuf;
+	loff_t latter;
+	int tmp, qty;
+
+#if 0
+	/* check the page count */
+	qty = desc.size / sizeof(dbuf->blocks[0]);
+	if (qty == 0)
+		goto error;
+
+	if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) {
+		printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
+		       __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages));
+		goto error;
+	}
+#endif
+
+	/* determine how many magic numbers there should be in this page */
+	latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT);
+	if (latter >= PAGE_SIZE)
+		qty = PAGE_SIZE;
+	else
+		qty = latter;
+	qty /= sizeof(union afs_dir_block);
+
+	/* check them */
+	dbuf = page_address(page);
+	for (tmp = 0; tmp < qty; tmp++) {
+		if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
+			printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
+			       __FUNCTION__, dir->i_ino, tmp, qty,
+			       ntohs(dbuf->blocks[tmp].pagehdr.magic));
+			goto error;
+		}
+	}
+
+	SetPageChecked(page);
+	return;
+
+ error:
+	SetPageChecked(page);
+	SetPageError(page);
+
+} /* end afs_dir_check_page() */
+
+/*****************************************************************************/
+/*
+ * discard a page cached in the pagecache
+ */
+static inline void afs_dir_put_page(struct page *page)
+{
+	kunmap(page);
+	page_cache_release(page);
+
+} /* end afs_dir_put_page() */
+
+/*****************************************************************************/
+/*
+ * get a page into the pagecache
+ */
+static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
+{
+	struct page *page;
+
+	_enter("{%lu},%lu", dir->i_ino, index);
+
+	page = read_cache_page(dir->i_mapping,index,
+			       (filler_t *) dir->i_mapping->a_ops->readpage,
+			       NULL);
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		kmap(page);
+		if (!PageUptodate(page))
+			goto fail;
+		if (!PageChecked(page))
+			afs_dir_check_page(dir, page);
+		if (PageError(page))
+			goto fail;
+	}
+	return page;
+
+ fail:
+	afs_dir_put_page(page);
+	return ERR_PTR(-EIO);
+} /* end afs_dir_get_page() */
+
+/*****************************************************************************/
+/*
+ * open an AFS directory file
+ */
+static int afs_dir_open(struct inode *inode, struct file *file)
+{
+	_enter("{%lu}", inode->i_ino);
+
+	BUG_ON(sizeof(union afs_dir_block) != 2048);
+	BUG_ON(sizeof(union afs_dirent) != 32);
+
+	if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
+		return -ENOENT;
+
+	_leave(" = 0");
+	return 0;
+
+} /* end afs_dir_open() */
+
+/*****************************************************************************/
+/*
+ * deal with one block in an AFS directory
+ */
+static int afs_dir_iterate_block(unsigned *fpos,
+				 union afs_dir_block *block,
+				 unsigned blkoff,
+				 void *cookie,
+				 filldir_t filldir)
+{
+	union afs_dirent *dire;
+	unsigned offset, next, curr;
+	size_t nlen;
+	int tmp, ret;
+
+	_enter("%u,%x,%p,,",*fpos,blkoff,block);
+
+	curr = (*fpos - blkoff) / sizeof(union afs_dirent);
+
+	/* walk through the block, an entry at a time */
+	for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
+	     offset < AFS_DIRENT_PER_BLOCK;
+	     offset = next
+	     ) {
+		next = offset + 1;
+
+		/* skip entries marked unused in the bitmap */
+		if (!(block->pagehdr.bitmap[offset / 8] &
+		      (1 << (offset % 8)))) {
+			_debug("ENT[%Zu.%u]: unused\n",
+			       blkoff / sizeof(union afs_dir_block), offset);
+			if (offset >= curr)
+				*fpos = blkoff +
+					next * sizeof(union afs_dirent);
+			continue;
+		}
+
+		/* got a valid entry */
+		dire = &block->dirents[offset];
+		nlen = strnlen(dire->u.name,
+			       sizeof(*block) -
+			       offset * sizeof(union afs_dirent));
+
+		_debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n",
+		       blkoff / sizeof(union afs_dir_block), offset,
+		       (offset < curr ? "skip" : "fill"),
+		       nlen, dire->u.name);
+
+		/* work out where the next possible entry is */
+		for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_dirent)) {
+			if (next >= AFS_DIRENT_PER_BLOCK) {
+				_debug("ENT[%Zu.%u]:"
+				       " %u travelled beyond end dir block"
+				       " (len %u/%Zu)\n",
+				       blkoff / sizeof(union afs_dir_block),
+				       offset, next, tmp, nlen);
+				return -EIO;
+			}
+			if (!(block->pagehdr.bitmap[next / 8] &
+			      (1 << (next % 8)))) {
+				_debug("ENT[%Zu.%u]:"
+				       " %u unmarked extension (len %u/%Zu)\n",
+				       blkoff / sizeof(union afs_dir_block),
+				       offset, next, tmp, nlen);
+				return -EIO;
+			}
+
+			_debug("ENT[%Zu.%u]: ext %u/%Zu\n",
+			       blkoff / sizeof(union afs_dir_block),
+			       next, tmp, nlen);
+			next++;
+		}
+
+		/* skip if starts before the current position */
+		if (offset < curr)
+			continue;
+
+		/* found the next entry */
+		ret = filldir(cookie,
+			      dire->u.name,
+			      nlen,
+			      blkoff + offset * sizeof(union afs_dirent),
+			      ntohl(dire->u.vnode),
+			      filldir == afs_dir_lookup_filldir ?
+			      ntohl(dire->u.unique) : DT_UNKNOWN);
+		if (ret < 0) {
+			_leave(" = 0 [full]");
+			return 0;
+		}
+
+		*fpos = blkoff + next * sizeof(union afs_dirent);
+	}
+
+	_leave(" = 1 [more]");
+	return 1;
+} /* end afs_dir_iterate_block() */
+
+/*****************************************************************************/
+/*
+ * read an AFS directory
+ */
+static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
+			   filldir_t filldir)
+{
+	union afs_dir_block	*dblock;
+	struct afs_dir_page *dbuf;
+	struct page *page;
+	unsigned blkoff, limit;
+	int ret;
+
+	_enter("{%lu},%u,,", dir->i_ino, *fpos);
+
+	if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+		_leave(" = -ESTALE");
+		return -ESTALE;
+	}
+
+	/* round the file position up to the next entry boundary */
+	*fpos += sizeof(union afs_dirent) - 1;
+	*fpos &= ~(sizeof(union afs_dirent) - 1);
+
+	/* walk through the blocks in sequence */
+	ret = 0;
+	while (*fpos < dir->i_size) {
+		blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1);
+
+		/* fetch the appropriate page from the directory */
+		page = afs_dir_get_page(dir, blkoff / PAGE_SIZE);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			break;
+		}
+
+		limit = blkoff & ~(PAGE_SIZE - 1);
+
+		dbuf = page_address(page);
+
+		/* deal with the individual blocks stashed on this page */
+		do {
+			dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
+					       sizeof(union afs_dir_block)];
+			ret = afs_dir_iterate_block(fpos, dblock, blkoff,
+						    cookie, filldir);
+			if (ret != 1) {
+				afs_dir_put_page(page);
+				goto out;
+			}
+
+			blkoff += sizeof(union afs_dir_block);
+
+		} while (*fpos < dir->i_size && blkoff < limit);
+
+		afs_dir_put_page(page);
+		ret = 0;
+	}
+
+ out:
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_dir_iterate() */
+
+/*****************************************************************************/
+/*
+ * read an AFS directory
+ */
+static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir)
+{
+	unsigned fpos;
+	int ret;
+
+	_enter("{%Ld,{%lu}}", file->f_pos, file->f_dentry->d_inode->i_ino);
+
+	fpos = file->f_pos;
+	ret = afs_dir_iterate(file->f_dentry->d_inode, &fpos, cookie, filldir);
+	file->f_pos = fpos;
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_dir_readdir() */
+
+/*****************************************************************************/
+/*
+ * search the directory for a name
+ * - if afs_dir_iterate_block() spots this function, it'll pass the FID
+ *   uniquifier through dtype
+ */
+static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
+				  loff_t fpos, ino_t ino, unsigned dtype)
+{
+	struct afs_dir_lookup_cookie *cookie = _cookie;
+
+	_enter("{%s,%Zu},%s,%u,,%lu,%u",
+	       cookie->name, cookie->nlen, name, nlen, ino, dtype);
+
+	if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) {
+		_leave(" = 0 [no]");
+		return 0;
+	}
+
+	cookie->fid.vnode = ino;
+	cookie->fid.unique = dtype;
+	cookie->found = 1;
+
+	_leave(" = -1 [found]");
+	return -1;
+} /* end afs_dir_lookup_filldir() */
+
+/*****************************************************************************/
+/*
+ * look up an entry in a directory
+ */
+static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry,
+				     struct nameidata *nd)
+{
+	struct afs_dir_lookup_cookie cookie;
+	struct afs_super_info *as;
+	struct afs_vnode *vnode;
+	struct inode *inode;
+	unsigned fpos;
+	int ret;
+
+	_enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name);
+
+	/* insanity checks first */
+	BUG_ON(sizeof(union afs_dir_block) != 2048);
+	BUG_ON(sizeof(union afs_dirent) != 32);
+
+	if (dentry->d_name.len > 255) {
+		_leave(" = -ENAMETOOLONG");
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+
+	vnode = AFS_FS_I(dir);
+	if (vnode->flags & AFS_VNODE_DELETED) {
+		_leave(" = -ESTALE");
+		return ERR_PTR(-ESTALE);
+	}
+
+	as = dir->i_sb->s_fs_info;
+
+	/* search the directory */
+	cookie.name	= dentry->d_name.name;
+	cookie.nlen	= dentry->d_name.len;
+	cookie.fid.vid	= as->volume->vid;
+	cookie.found	= 0;
+
+	fpos = 0;
+	ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir);
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ERR_PTR(ret);
+	}
+
+	ret = -ENOENT;
+	if (!cookie.found) {
+		_leave(" = %d", ret);
+		return ERR_PTR(ret);
+	}
+
+	/* instantiate the dentry */
+	ret = afs_iget(dir->i_sb, &cookie.fid, &inode);
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ERR_PTR(ret);
+	}
+
+	dentry->d_op = &afs_fs_dentry_operations;
+	dentry->d_fsdata = (void *) (unsigned long) vnode->status.version;
+
+	d_add(dentry, inode);
+	_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
+	       cookie.fid.vnode,
+	       cookie.fid.unique,
+	       dentry->d_inode->i_ino,
+	       dentry->d_inode->i_version);
+
+	return NULL;
+} /* end afs_dir_lookup() */
+
+/*****************************************************************************/
+/*
+ * check that a dentry lookup hit has found a valid entry
+ * - NOTE! the hit can be a negative hit too, so we can't assume we have an
+ *   inode
+ * (derived from nfs_lookup_revalidate)
+ */
+static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct afs_dir_lookup_cookie cookie;
+	struct dentry *parent;
+	struct inode *inode, *dir;
+	unsigned fpos;
+	int ret;
+
+	_enter("{sb=%p n=%s},", dentry->d_sb, dentry->d_name.name);
+
+	/* lock down the parent dentry so we can peer at it */
+	parent = dget_parent(dentry->d_parent);
+
+	dir = parent->d_inode;
+	inode = dentry->d_inode;
+
+	/* handle a negative dentry */
+	if (!inode)
+		goto out_bad;
+
+	/* handle a bad inode */
+	if (is_bad_inode(inode)) {
+		printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
+		       dentry->d_parent->d_name.name, dentry->d_name.name);
+		goto out_bad;
+	}
+
+	/* force a full look up if the parent directory changed since last the
+	 * server was consulted
+	 * - otherwise this inode must still exist, even if the inode details
+	 *   themselves have changed
+	 */
+	if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
+		afs_vnode_fetch_status(AFS_FS_I(dir));
+
+	if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+		_debug("%s: parent dir deleted", dentry->d_name.name);
+		goto out_bad;
+	}
+
+	if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
+		_debug("%s: file already deleted", dentry->d_name.name);
+		goto out_bad;
+	}
+
+	if ((unsigned long) dentry->d_fsdata !=
+	    (unsigned long) AFS_FS_I(dir)->status.version) {
+		_debug("%s: parent changed %lu -> %u",
+		       dentry->d_name.name,
+		       (unsigned long) dentry->d_fsdata,
+		       (unsigned) AFS_FS_I(dir)->status.version);
+
+		/* search the directory for this vnode */
+		cookie.name	= dentry->d_name.name;
+		cookie.nlen	= dentry->d_name.len;
+		cookie.fid.vid	= AFS_FS_I(inode)->volume->vid;
+		cookie.found	= 0;
+
+		fpos = 0;
+		ret = afs_dir_iterate(dir, &fpos, &cookie,
+				      afs_dir_lookup_filldir);
+		if (ret < 0) {
+			_debug("failed to iterate dir %s: %d",
+			       parent->d_name.name, ret);
+			goto out_bad;
+		}
+
+		if (!cookie.found) {
+			_debug("%s: dirent not found", dentry->d_name.name);
+			goto not_found;
+		}
+
+		/* if the vnode ID has changed, then the dirent points to a
+		 * different file */
+		if (cookie.fid.vnode != AFS_FS_I(inode)->fid.vnode) {
+			_debug("%s: dirent changed", dentry->d_name.name);
+			goto not_found;
+		}
+
+		/* if the vnode ID uniqifier has changed, then the file has
+		 * been deleted */
+		if (cookie.fid.unique != AFS_FS_I(inode)->fid.unique) {
+			_debug("%s: file deleted (uq %u -> %u I:%lu)",
+			       dentry->d_name.name,
+			       cookie.fid.unique,
+			       AFS_FS_I(inode)->fid.unique,
+			       inode->i_version);
+			spin_lock(&AFS_FS_I(inode)->lock);
+			AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
+			spin_unlock(&AFS_FS_I(inode)->lock);
+			invalidate_remote_inode(inode);
+			goto out_bad;
+		}
+
+		dentry->d_fsdata =
+			(void *) (unsigned long) AFS_FS_I(dir)->status.version;
+	}
+
+ out_valid:
+	dput(parent);
+	_leave(" = 1 [valid]");
+	return 1;
+
+	/* the dirent, if it exists, now points to a different vnode */
+ not_found:
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+	spin_unlock(&dentry->d_lock);
+
+ out_bad:
+	if (inode) {
+		/* don't unhash if we have submounts */
+		if (have_submounts(dentry))
+			goto out_valid;
+	}
+
+	shrink_dcache_parent(dentry);
+
+	_debug("dropping dentry %s/%s",
+	       dentry->d_parent->d_name.name, dentry->d_name.name);
+	d_drop(dentry);
+
+	dput(parent);
+
+	_leave(" = 0 [bad]");
+	return 0;
+} /* end afs_d_revalidate() */
+
+/*****************************************************************************/
+/*
+ * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
+ * sleep)
+ * - called from dput() when d_count is going to 0.
+ * - return 1 to request dentry be unhashed, 0 otherwise
+ */
+static int afs_d_delete(struct dentry *dentry)
+{
+	_enter("%s", dentry->d_name.name);
+
+	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+		goto zap;
+
+	if (dentry->d_inode) {
+		if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED)
+			goto zap;
+	}
+
+	_leave(" = 0 [keep]");
+	return 0;
+
+ zap:
+	_leave(" = 1 [zap]");
+	return 1;
+} /* end afs_d_delete() */
diff --git a/fs/afs/errors.h b/fs/afs/errors.h
new file mode 100644
index 0000000..574d94a
--- /dev/null
+++ b/fs/afs/errors.h
@@ -0,0 +1,34 @@
+/* errors.h: AFS abort/error codes
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_ERRORS_H
+#define _LINUX_AFS_ERRORS_H
+
+#include "types.h"
+
+/* file server abort codes */
+typedef enum {
+	VSALVAGE	= 101,	/* volume needs salvaging */
+	VNOVNODE	= 102,	/* no such file/dir (vnode) */
+	VNOVOL		= 103,	/* no such volume or volume unavailable */
+	VVOLEXISTS	= 104,	/* volume name already exists */
+	VNOSERVICE	= 105,	/* volume not currently in service */
+	VOFFLINE	= 106,	/* volume is currently offline (more info available [VVL-spec]) */
+	VONLINE		= 107,	/* volume is already online */
+	VDISKFULL	= 108,	/* disk partition is full */
+	VOVERQUOTA	= 109,	/* volume's maximum quota exceeded */
+	VBUSY		= 110,	/* volume is temporarily unavailable */
+	VMOVED		= 111,	/* volume moved to new server - ask this FS where */
+} afs_rxfs_abort_t;
+
+extern int afs_abort_to_error(int abortcode);
+
+#endif /* _LINUX_AFS_ERRORS_H */
diff --git a/fs/afs/file.c b/fs/afs/file.c
new file mode 100644
index 0000000..6b6bb7c
--- /dev/null
+++ b/fs/afs/file.c
@@ -0,0 +1,305 @@
+/* file.c: AFS filesystem file handling
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include "volume.h"
+#include "vnode.h"
+#include <rxrpc/call.h>
+#include "internal.h"
+
+#if 0
+static int afs_file_open(struct inode *inode, struct file *file);
+static int afs_file_release(struct inode *inode, struct file *file);
+#endif
+
+static int afs_file_readpage(struct file *file, struct page *page);
+static int afs_file_invalidatepage(struct page *page, unsigned long offset);
+static int afs_file_releasepage(struct page *page, int gfp_flags);
+
+static ssize_t afs_file_write(struct file *file, const char __user *buf,
+			      size_t size, loff_t *off);
+
+struct inode_operations afs_file_inode_operations = {
+	.getattr	= afs_inode_getattr,
+};
+
+struct file_operations afs_file_file_operations = {
+	.read		= generic_file_read,
+	.write		= afs_file_write,
+	.mmap		= generic_file_mmap,
+#if 0
+	.open		= afs_file_open,
+	.release	= afs_file_release,
+	.fsync		= afs_file_fsync,
+#endif
+};
+
+struct address_space_operations afs_fs_aops = {
+	.readpage	= afs_file_readpage,
+	.sync_page	= block_sync_page,
+	.set_page_dirty	= __set_page_dirty_nobuffers,
+	.releasepage	= afs_file_releasepage,
+	.invalidatepage	= afs_file_invalidatepage,
+};
+
+/*****************************************************************************/
+/*
+ * AFS file write
+ */
+static ssize_t afs_file_write(struct file *file, const char __user *buf,
+			      size_t size, loff_t *off)
+{
+	struct afs_vnode *vnode;
+
+	vnode = AFS_FS_I(file->f_dentry->d_inode);
+	if (vnode->flags & AFS_VNODE_DELETED)
+		return -ESTALE;
+
+	return -EIO;
+} /* end afs_file_write() */
+
+/*****************************************************************************/
+/*
+ * deal with notification that a page was read from the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_file_readpage_read_complete(void *cookie_data,
+					    struct page *page,
+					    void *data,
+					    int error)
+{
+	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
+
+	if (error)
+		SetPageError(page);
+	else
+		SetPageUptodate(page);
+	unlock_page(page);
+
+} /* end afs_file_readpage_read_complete() */
+#endif
+
+/*****************************************************************************/
+/*
+ * deal with notification that a page was written to the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_file_readpage_write_complete(void *cookie_data,
+					     struct page *page,
+					     void *data,
+					     int error)
+{
+	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
+
+	unlock_page(page);
+
+} /* end afs_file_readpage_write_complete() */
+#endif
+
+/*****************************************************************************/
+/*
+ * AFS read page from file (or symlink)
+ */
+static int afs_file_readpage(struct file *file, struct page *page)
+{
+	struct afs_rxfs_fetch_descriptor desc;
+#ifdef AFS_CACHING_SUPPORT
+	struct cachefs_page *pageio;
+#endif
+	struct afs_vnode *vnode;
+	struct inode *inode;
+	int ret;
+
+	inode = page->mapping->host;
+
+	_enter("{%lu},{%lu}", inode->i_ino, page->index);
+
+	vnode = AFS_FS_I(inode);
+
+	if (!PageLocked(page))
+		PAGE_BUG(page);
+
+	ret = -ESTALE;
+	if (vnode->flags & AFS_VNODE_DELETED)
+		goto error;
+
+#ifdef AFS_CACHING_SUPPORT
+	ret = cachefs_page_get_private(page, &pageio, GFP_NOIO);
+	if (ret < 0)
+		goto error;
+
+	/* is it cached? */
+	ret = cachefs_read_or_alloc_page(vnode->cache,
+					 page,
+					 afs_file_readpage_read_complete,
+					 NULL,
+					 GFP_KERNEL);
+#else
+	ret = -ENOBUFS;
+#endif
+
+	switch (ret) {
+		/* read BIO submitted and wb-journal entry found */
+	case 1:
+		BUG(); // TODO - handle wb-journal match
+
+		/* read BIO submitted (page in cache) */
+	case 0:
+		break;
+
+		/* no page available in cache */
+	case -ENOBUFS:
+	case -ENODATA:
+	default:
+		desc.fid	= vnode->fid;
+		desc.offset	= page->index << PAGE_CACHE_SHIFT;
+		desc.size	= min((size_t) (inode->i_size - desc.offset),
+				      (size_t) PAGE_SIZE);
+		desc.buffer	= kmap(page);
+
+		clear_page(desc.buffer);
+
+		/* read the contents of the file from the server into the
+		 * page */
+		ret = afs_vnode_fetch_data(vnode, &desc);
+		kunmap(page);
+		if (ret < 0) {
+			if (ret==-ENOENT) {
+				_debug("got NOENT from server"
+				       " - marking file deleted and stale");
+				vnode->flags |= AFS_VNODE_DELETED;
+				ret = -ESTALE;
+			}
+
+#ifdef AFS_CACHING_SUPPORT
+			cachefs_uncache_page(vnode->cache, page);
+#endif
+			goto error;
+		}
+
+		SetPageUptodate(page);
+
+#ifdef AFS_CACHING_SUPPORT
+		if (cachefs_write_page(vnode->cache,
+				       page,
+				       afs_file_readpage_write_complete,
+				       NULL,
+				       GFP_KERNEL) != 0
+		    ) {
+			cachefs_uncache_page(vnode->cache, page);
+			unlock_page(page);
+		}
+#else
+		unlock_page(page);
+#endif
+	}
+
+	_leave(" = 0");
+	return 0;
+
+ error:
+	SetPageError(page);
+	unlock_page(page);
+
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_file_readpage() */
+
+/*****************************************************************************/
+/*
+ * get a page cookie for the specified page
+ */
+#ifdef AFS_CACHING_SUPPORT
+int afs_cache_get_page_cookie(struct page *page,
+			      struct cachefs_page **_page_cookie)
+{
+	int ret;
+
+	_enter("");
+	ret = cachefs_page_get_private(page,_page_cookie, GFP_NOIO);
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_cache_get_page_cookie() */
+#endif
+
+/*****************************************************************************/
+/*
+ * invalidate part or all of a page
+ */
+static int afs_file_invalidatepage(struct page *page, unsigned long offset)
+{
+	int ret = 1;
+
+	_enter("{%lu},%lu", page->index, offset);
+
+	BUG_ON(!PageLocked(page));
+
+	if (PagePrivate(page)) {
+#ifdef AFS_CACHING_SUPPORT
+		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+		cachefs_uncache_page(vnode->cache,page);
+#endif
+
+		/* We release buffers only if the entire page is being
+		 * invalidated.
+		 * The get_block cached value has been unconditionally
+		 * invalidated, so real IO is not possible anymore.
+		 */
+		if (offset == 0) {
+			BUG_ON(!PageLocked(page));
+
+			ret = 0;
+			if (!PageWriteback(page))
+				ret = page->mapping->a_ops->releasepage(page,
+									0);
+		}
+	}
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_file_invalidatepage() */
+
+/*****************************************************************************/
+/*
+ * release a page and cleanup its private data
+ */
+static int afs_file_releasepage(struct page *page, int gfp_flags)
+{
+	struct cachefs_page *pageio;
+
+	_enter("{%lu},%x", page->index, gfp_flags);
+
+	if (PagePrivate(page)) {
+#ifdef AFS_CACHING_SUPPORT
+		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+		cachefs_uncache_page(vnode->cache, page);
+#endif
+
+		pageio = (struct cachefs_page *) page->private;
+		page->private = 0;
+		ClearPagePrivate(page);
+
+		if (pageio)
+			kfree(pageio);
+	}
+
+	_leave(" = 0");
+	return 0;
+} /* end afs_file_releasepage() */
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
new file mode 100644
index 0000000..61bc371
--- /dev/null
+++ b/fs/afs/fsclient.c
@@ -0,0 +1,837 @@
+/* fsclient.c: AFS File Server client stubs
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "fsclient.h"
+#include "cmservice.h"
+#include "vnode.h"
+#include "server.h"
+#include "errors.h"
+#include "internal.h"
+
+#define FSFETCHSTATUS		132	/* AFS Fetch file status */
+#define FSFETCHDATA		130	/* AFS Fetch file data */
+#define FSGIVEUPCALLBACKS	147	/* AFS Discard callback promises */
+#define FSGETVOLUMEINFO		148	/* AFS Get root volume information */
+#define FSGETROOTVOLUME		151	/* AFS Get root volume name */
+#define FSLOOKUP		161	/* AFS lookup file in directory */
+
+/*****************************************************************************/
+/*
+ * map afs abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afs_rxfs_aemap(struct rxrpc_call *call)
+{
+	switch (call->app_err_state) {
+	case RXRPC_ESTATE_LOCAL_ABORT:
+		call->app_abort_code = -call->app_errno;
+		break;
+	case RXRPC_ESTATE_PEER_ABORT:
+		call->app_errno = afs_abort_to_error(call->app_abort_code);
+		break;
+	default:
+		break;
+	}
+} /* end afs_rxfs_aemap() */
+
+/*****************************************************************************/
+/*
+ * get the root volume name from a fileserver
+ * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ */
+#if 0
+int afs_rxfs_get_root_volume(struct afs_server *server,
+			     char *buf, size_t *buflen)
+{
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[2];
+	size_t sent;
+	int ret;
+	u32 param[1];
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	kenter("%p,%p,%u",server, buf, *buflen);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_get_fsconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSGETROOTVOLUME;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	param[0] = htonl(FSGETROOTVOLUME);
+
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
+		    signal_pending(current))
+			break;
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+
+	ret = -EINTR;
+	if (signal_pending(current))
+		goto abort;
+
+	switch (call->app_call_state) {
+	case RXRPC_CSTATE_ERROR:
+		ret = call->app_errno;
+		kdebug("Got Error: %d", ret);
+		goto out_unwait;
+
+	case RXRPC_CSTATE_CLNT_GOT_REPLY:
+		/* read the reply */
+		kdebug("Got Reply: qty=%d", call->app_ready_qty);
+
+		ret = -EBADMSG;
+		if (call->app_ready_qty <= 4)
+			goto abort;
+
+		ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0);
+		if (ret < 0)
+			goto abort;
+
+#if 0
+		/* unmarshall the reply */
+		bp = buffer;
+		for (loop = 0; loop < 65; loop++)
+			entry->name[loop] = ntohl(*bp++);
+		entry->name[64] = 0;
+
+		entry->type = ntohl(*bp++);
+		entry->num_servers = ntohl(*bp++);
+
+		for (loop = 0; loop < 8; loop++)
+			entry->servers[loop].addr.s_addr = *bp++;
+
+		for (loop = 0; loop < 8; loop++)
+			entry->servers[loop].partition = ntohl(*bp++);
+
+		for (loop = 0; loop < 8; loop++)
+			entry->servers[loop].flags = ntohl(*bp++);
+
+		for (loop = 0; loop < 3; loop++)
+			entry->volume_ids[loop] = ntohl(*bp++);
+
+		entry->clone_id = ntohl(*bp++);
+		entry->flags = ntohl(*bp);
+#endif
+
+		/* success */
+		ret = 0;
+		goto out_unwait;
+
+	default:
+		BUG();
+	}
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_fsconn(server, conn);
+ out:
+	kleave("");
+	return ret;
+} /* end afs_rxfs_get_root_volume() */
+#endif
+
+/*****************************************************************************/
+/*
+ * get information about a volume
+ */
+#if 0
+int afs_rxfs_get_volume_info(struct afs_server *server,
+			     const char *name,
+			     struct afs_volume_info *vinfo)
+{
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[3];
+	size_t sent;
+	int ret;
+	u32 param[2], *bp, zero;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	_enter("%p,%s,%p", server, name, vinfo);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_get_fsconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSGETVOLUMEINFO;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	piov[1].iov_len = strlen(name);
+	piov[1].iov_base = (char *) name;
+
+	zero = 0;
+	piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+	piov[2].iov_base = &zero;
+
+	param[0] = htonl(FSGETVOLUMEINFO);
+	param[1] = htonl(piov[1].iov_len);
+
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	bp = rxrpc_call_alloc_scratch(call, 64);
+
+	ret = rxrpc_call_read_data(call, bp, 64,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0) {
+		if (ret == -ECONNABORTED) {
+			ret = call->app_errno;
+			goto out_unwait;
+		}
+		goto abort;
+	}
+
+	/* unmarshall the reply */
+	vinfo->vid = ntohl(*bp++);
+	vinfo->type = ntohl(*bp++);
+
+	vinfo->type_vids[0] = ntohl(*bp++);
+	vinfo->type_vids[1] = ntohl(*bp++);
+	vinfo->type_vids[2] = ntohl(*bp++);
+	vinfo->type_vids[3] = ntohl(*bp++);
+	vinfo->type_vids[4] = ntohl(*bp++);
+
+	vinfo->nservers = ntohl(*bp++);
+	vinfo->servers[0].addr.s_addr = *bp++;
+	vinfo->servers[1].addr.s_addr = *bp++;
+	vinfo->servers[2].addr.s_addr = *bp++;
+	vinfo->servers[3].addr.s_addr = *bp++;
+	vinfo->servers[4].addr.s_addr = *bp++;
+	vinfo->servers[5].addr.s_addr = *bp++;
+	vinfo->servers[6].addr.s_addr = *bp++;
+	vinfo->servers[7].addr.s_addr = *bp++;
+
+	ret = -EBADMSG;
+	if (vinfo->nservers > 8)
+		goto abort;
+
+	/* success */
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_fsconn(server, conn);
+ out:
+	_leave("");
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+
+} /* end afs_rxfs_get_volume_info() */
+#endif
+
+/*****************************************************************************/
+/*
+ * fetch the status information for a file
+ */
+int afs_rxfs_fetch_file_status(struct afs_server *server,
+			       struct afs_vnode *vnode,
+			       struct afs_volsync *volsync)
+{
+	struct afs_server_callslot callslot;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	size_t sent;
+	int ret;
+	__be32 *bp;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	_enter("%p,{%u,%u,%u}",
+	       server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_request_callslot(server, &callslot);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap,
+				&call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSFETCHSTATUS;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	bp = rxrpc_call_alloc_scratch(call, 16);
+	bp[0] = htonl(FSFETCHSTATUS);
+	bp[1] = htonl(vnode->fid.vid);
+	bp[2] = htonl(vnode->fid.vnode);
+	bp[3] = htonl(vnode->fid.unique);
+
+	piov[0].iov_len = 16;
+	piov[0].iov_base = bp;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	bp = rxrpc_call_alloc_scratch(call, 120);
+
+	ret = rxrpc_call_read_data(call, bp, 120,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0) {
+		if (ret == -ECONNABORTED) {
+			ret = call->app_errno;
+			goto out_unwait;
+		}
+		goto abort;
+	}
+
+	/* unmarshall the reply */
+	vnode->status.if_version	= ntohl(*bp++);
+	vnode->status.type		= ntohl(*bp++);
+	vnode->status.nlink		= ntohl(*bp++);
+	vnode->status.size		= ntohl(*bp++);
+	vnode->status.version		= ntohl(*bp++);
+	vnode->status.author		= ntohl(*bp++);
+	vnode->status.owner		= ntohl(*bp++);
+	vnode->status.caller_access	= ntohl(*bp++);
+	vnode->status.anon_access	= ntohl(*bp++);
+	vnode->status.mode		= ntohl(*bp++);
+	vnode->status.parent.vid	= vnode->fid.vid;
+	vnode->status.parent.vnode	= ntohl(*bp++);
+	vnode->status.parent.unique	= ntohl(*bp++);
+	bp++; /* seg size */
+	vnode->status.mtime_client	= ntohl(*bp++);
+	vnode->status.mtime_server	= ntohl(*bp++);
+	bp++; /* group */
+	bp++; /* sync counter */
+	vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+	bp++; /* spare2 */
+	bp++; /* spare3 */
+	bp++; /* spare4 */
+
+	vnode->cb_version		= ntohl(*bp++);
+	vnode->cb_expiry		= ntohl(*bp++);
+	vnode->cb_type			= ntohl(*bp++);
+
+	if (volsync) {
+		volsync->creation	= ntohl(*bp++);
+		bp++; /* spare2 */
+		bp++; /* spare3 */
+		bp++; /* spare4 */
+		bp++; /* spare5 */
+		bp++; /* spare6 */
+	}
+
+	/* success */
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_callslot(server, &callslot);
+ out:
+	_leave("");
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+} /* end afs_rxfs_fetch_file_status() */
+
+/*****************************************************************************/
+/*
+ * fetch the contents of a file or directory
+ */
+int afs_rxfs_fetch_file_data(struct afs_server *server,
+			     struct afs_vnode *vnode,
+			     struct afs_rxfs_fetch_descriptor *desc,
+			     struct afs_volsync *volsync)
+{
+	struct afs_server_callslot callslot;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	size_t sent;
+	int ret;
+	__be32 *bp;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	_enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}",
+	       server,
+	       desc->fid.vid,
+	       desc->fid.vnode,
+	       desc->fid.unique,
+	       desc->size,
+	       desc->offset);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_request_callslot(server, &callslot);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSFETCHDATA;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	bp = rxrpc_call_alloc_scratch(call, 24);
+	bp[0] = htonl(FSFETCHDATA);
+	bp[1] = htonl(desc->fid.vid);
+	bp[2] = htonl(desc->fid.vnode);
+	bp[3] = htonl(desc->fid.unique);
+	bp[4] = htonl(desc->offset);
+	bp[5] = htonl(desc->size);
+
+	piov[0].iov_len = 24;
+	piov[0].iov_base = bp;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the data count to arrive */
+	ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK);
+	if (ret < 0)
+		goto read_failed;
+
+	desc->actual = ntohl(bp[0]);
+	if (desc->actual != desc->size) {
+		ret = -EBADMSG;
+		goto abort;
+	}
+
+	/* call the app to read the actual data */
+	rxrpc_call_reset_scratch(call);
+
+	ret = rxrpc_call_read_data(call, desc->buffer, desc->actual,
+				   RXRPC_CALL_READ_BLOCK);
+	if (ret < 0)
+		goto read_failed;
+
+	/* wait for the rest of the reply to completely arrive */
+	rxrpc_call_reset_scratch(call);
+	bp = rxrpc_call_alloc_scratch(call, 120);
+
+	ret = rxrpc_call_read_data(call, bp, 120,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0)
+		goto read_failed;
+
+	/* unmarshall the reply */
+	vnode->status.if_version	= ntohl(*bp++);
+	vnode->status.type		= ntohl(*bp++);
+	vnode->status.nlink		= ntohl(*bp++);
+	vnode->status.size		= ntohl(*bp++);
+	vnode->status.version		= ntohl(*bp++);
+	vnode->status.author		= ntohl(*bp++);
+	vnode->status.owner		= ntohl(*bp++);
+	vnode->status.caller_access	= ntohl(*bp++);
+	vnode->status.anon_access	= ntohl(*bp++);
+	vnode->status.mode		= ntohl(*bp++);
+	vnode->status.parent.vid	= desc->fid.vid;
+	vnode->status.parent.vnode	= ntohl(*bp++);
+	vnode->status.parent.unique	= ntohl(*bp++);
+	bp++; /* seg size */
+	vnode->status.mtime_client	= ntohl(*bp++);
+	vnode->status.mtime_server	= ntohl(*bp++);
+	bp++; /* group */
+	bp++; /* sync counter */
+	vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+	bp++; /* spare2 */
+	bp++; /* spare3 */
+	bp++; /* spare4 */
+
+	vnode->cb_version		= ntohl(*bp++);
+	vnode->cb_expiry		= ntohl(*bp++);
+	vnode->cb_type			= ntohl(*bp++);
+
+	if (volsync) {
+		volsync->creation	= ntohl(*bp++);
+		bp++; /* spare2 */
+		bp++; /* spare3 */
+		bp++; /* spare4 */
+		bp++; /* spare5 */
+		bp++; /* spare6 */
+	}
+
+	/* success */
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq,&myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_callslot(server, &callslot);
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+ read_failed:
+	if (ret == -ECONNABORTED) {
+		ret = call->app_errno;
+		goto out_unwait;
+	}
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+
+} /* end afs_rxfs_fetch_file_data() */
+
+/*****************************************************************************/
+/*
+ * ask the AFS fileserver to discard a callback request on a file
+ */
+int afs_rxfs_give_up_callback(struct afs_server *server,
+			      struct afs_vnode *vnode)
+{
+	struct afs_server_callslot callslot;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	size_t sent;
+	int ret;
+	__be32 *bp;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	_enter("%p,{%u,%u,%u}",
+	       server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_request_callslot(server, &callslot);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSGIVEUPCALLBACKS;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4);
+
+	piov[0].iov_len = (1 + 4 + 4) * 4;
+	piov[0].iov_base = bp;
+
+	*bp++ = htonl(FSGIVEUPCALLBACKS);
+	*bp++ = htonl(1);
+	*bp++ = htonl(vnode->fid.vid);
+	*bp++ = htonl(vnode->fid.vnode);
+	*bp++ = htonl(vnode->fid.unique);
+	*bp++ = htonl(1);
+	*bp++ = htonl(vnode->cb_version);
+	*bp++ = htonl(vnode->cb_expiry);
+	*bp++ = htonl(vnode->cb_type);
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
+		    signal_pending(current))
+			break;
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+
+	ret = -EINTR;
+	if (signal_pending(current))
+		goto abort;
+
+	switch (call->app_call_state) {
+	case RXRPC_CSTATE_ERROR:
+		ret = call->app_errno;
+		goto out_unwait;
+
+	case RXRPC_CSTATE_CLNT_GOT_REPLY:
+		ret = 0;
+		goto out_unwait;
+
+	default:
+		BUG();
+	}
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_callslot(server, &callslot);
+ out:
+	_leave("");
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+} /* end afs_rxfs_give_up_callback() */
+
+/*****************************************************************************/
+/*
+ * look a filename up in a directory
+ * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ */
+#if 0
+int afs_rxfs_lookup(struct afs_server *server,
+		    struct afs_vnode *dir,
+		    const char *filename,
+		    struct afs_vnode *vnode,
+		    struct afs_volsync *volsync)
+{
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[3];
+	size_t sent;
+	int ret;
+	u32 *bp, zero;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	kenter("%p,{%u,%u,%u},%s",
+	       server, fid->vid, fid->vnode, fid->unique, filename);
+
+	/* get hold of the fileserver connection */
+	ret = afs_server_get_fsconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = FSLOOKUP;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq,&myself);
+
+	/* marshall the parameters */
+	bp = rxrpc_call_alloc_scratch(call, 20);
+
+	zero = 0;
+
+	piov[0].iov_len = 20;
+	piov[0].iov_base = bp;
+	piov[1].iov_len = strlen(filename);
+	piov[1].iov_base = (char *) filename;
+	piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+	piov[2].iov_base = &zero;
+
+	*bp++ = htonl(FSLOOKUP);
+	*bp++ = htonl(dirfid->vid);
+	*bp++ = htonl(dirfid->vnode);
+	*bp++ = htonl(dirfid->unique);
+	*bp++ = htonl(piov[1].iov_len);
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	bp = rxrpc_call_alloc_scratch(call, 220);
+
+	ret = rxrpc_call_read_data(call, bp, 220,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0) {
+		if (ret == -ECONNABORTED) {
+			ret = call->app_errno;
+			goto out_unwait;
+		}
+		goto abort;
+	}
+
+	/* unmarshall the reply */
+	fid->vid		= ntohl(*bp++);
+	fid->vnode		= ntohl(*bp++);
+	fid->unique		= ntohl(*bp++);
+
+	vnode->status.if_version	= ntohl(*bp++);
+	vnode->status.type		= ntohl(*bp++);
+	vnode->status.nlink		= ntohl(*bp++);
+	vnode->status.size		= ntohl(*bp++);
+	vnode->status.version		= ntohl(*bp++);
+	vnode->status.author		= ntohl(*bp++);
+	vnode->status.owner		= ntohl(*bp++);
+	vnode->status.caller_access	= ntohl(*bp++);
+	vnode->status.anon_access	= ntohl(*bp++);
+	vnode->status.mode		= ntohl(*bp++);
+	vnode->status.parent.vid	= dirfid->vid;
+	vnode->status.parent.vnode	= ntohl(*bp++);
+	vnode->status.parent.unique	= ntohl(*bp++);
+	bp++; /* seg size */
+	vnode->status.mtime_client	= ntohl(*bp++);
+	vnode->status.mtime_server	= ntohl(*bp++);
+	bp++; /* group */
+	bp++; /* sync counter */
+	vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+	bp++; /* spare2 */
+	bp++; /* spare3 */
+	bp++; /* spare4 */
+
+	dir->status.if_version		= ntohl(*bp++);
+	dir->status.type		= ntohl(*bp++);
+	dir->status.nlink		= ntohl(*bp++);
+	dir->status.size		= ntohl(*bp++);
+	dir->status.version		= ntohl(*bp++);
+	dir->status.author		= ntohl(*bp++);
+	dir->status.owner		= ntohl(*bp++);
+	dir->status.caller_access	= ntohl(*bp++);
+	dir->status.anon_access		= ntohl(*bp++);
+	dir->status.mode		= ntohl(*bp++);
+	dir->status.parent.vid		= dirfid->vid;
+	dir->status.parent.vnode	= ntohl(*bp++);
+	dir->status.parent.unique	= ntohl(*bp++);
+	bp++; /* seg size */
+	dir->status.mtime_client	= ntohl(*bp++);
+	dir->status.mtime_server	= ntohl(*bp++);
+	bp++; /* group */
+	bp++; /* sync counter */
+	dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+	bp++; /* spare2 */
+	bp++; /* spare3 */
+	bp++; /* spare4 */
+
+	callback->fid		= *fid;
+	callback->version	= ntohl(*bp++);
+	callback->expiry	= ntohl(*bp++);
+	callback->type		= ntohl(*bp++);
+
+	if (volsync) {
+		volsync->creation	= ntohl(*bp++);
+		bp++; /* spare2 */
+		bp++; /* spare3 */
+		bp++; /* spare4 */
+		bp++; /* spare5 */
+		bp++; /* spare6 */
+	}
+
+	/* success */
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	afs_server_release_fsconn(server, conn);
+ out:
+	kleave("");
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+} /* end afs_rxfs_lookup() */
+#endif
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h
new file mode 100644
index 0000000..8ba3e74
--- /dev/null
+++ b/fs/afs/fsclient.h
@@ -0,0 +1,54 @@
+/* fsclient.h: AFS File Server client stub declarations
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_FSCLIENT_H
+#define _LINUX_AFS_FSCLIENT_H
+
+#include "server.h"
+
+extern int afs_rxfs_get_volume_info(struct afs_server *server,
+				    const char *name,
+				    struct afs_volume_info *vinfo);
+
+extern int afs_rxfs_fetch_file_status(struct afs_server *server,
+				      struct afs_vnode *vnode,
+				      struct afs_volsync *volsync);
+
+struct afs_rxfs_fetch_descriptor {
+	struct afs_fid	fid;		/* file ID to fetch */
+	size_t		size;		/* total number of bytes to fetch */
+	off_t		offset;		/* offset in file to start from */
+	void		*buffer;	/* read buffer */
+	size_t		actual;		/* actual size sent back by server */
+};
+
+extern int afs_rxfs_fetch_file_data(struct afs_server *server,
+				    struct afs_vnode *vnode,
+				    struct afs_rxfs_fetch_descriptor *desc,
+				    struct afs_volsync *volsync);
+
+extern int afs_rxfs_give_up_callback(struct afs_server *server,
+				     struct afs_vnode *vnode);
+
+/* this doesn't appear to work in OpenAFS server */
+extern int afs_rxfs_lookup(struct afs_server *server,
+			   struct afs_vnode *dir,
+			   const char *filename,
+			   struct afs_vnode *vnode,
+			   struct afs_volsync *volsync);
+
+/* this is apparently mis-implemented in OpenAFS server */
+extern int afs_rxfs_get_root_volume(struct afs_server *server,
+				    char *buf,
+				    size_t *buflen);
+
+
+#endif /* _LINUX_AFS_FSCLIENT_H */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
new file mode 100644
index 0000000..c476fde
--- /dev/null
+++ b/fs/afs/inode.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ *          David Howells <dhowells@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "vnode.h"
+#include "super.h"
+#include "internal.h"
+
+struct afs_iget_data {
+	struct afs_fid		fid;
+	struct afs_volume	*volume;	/* volume on which resides */
+};
+
+/*****************************************************************************/
+/*
+ * map the AFS file status to the inode member variables
+ */
+static int afs_inode_map_status(struct afs_vnode *vnode)
+{
+	struct inode *inode = AFS_VNODE_TO_I(vnode);
+
+	_debug("FS: ft=%d lk=%d sz=%Zu ver=%Lu mod=%hu",
+	       vnode->status.type,
+	       vnode->status.nlink,
+	       vnode->status.size,
+	       vnode->status.version,
+	       vnode->status.mode);
+
+	switch (vnode->status.type) {
+	case AFS_FTYPE_FILE:
+		inode->i_mode	= S_IFREG | vnode->status.mode;
+		inode->i_op	= &afs_file_inode_operations;
+		inode->i_fop	= &afs_file_file_operations;
+		break;
+	case AFS_FTYPE_DIR:
+		inode->i_mode	= S_IFDIR | vnode->status.mode;
+		inode->i_op	= &afs_dir_inode_operations;
+		inode->i_fop	= &afs_dir_file_operations;
+		break;
+	case AFS_FTYPE_SYMLINK:
+		inode->i_mode	= S_IFLNK | vnode->status.mode;
+		inode->i_op	= &page_symlink_inode_operations;
+		break;
+	default:
+		printk("kAFS: AFS vnode with undefined type\n");
+		return -EBADMSG;
+	}
+
+	inode->i_nlink		= vnode->status.nlink;
+	inode->i_uid		= vnode->status.owner;
+	inode->i_gid		= 0;
+	inode->i_size		= vnode->status.size;
+	inode->i_ctime.tv_sec	= vnode->status.mtime_server;
+	inode->i_ctime.tv_nsec	= 0;
+	inode->i_atime		= inode->i_mtime = inode->i_ctime;
+	inode->i_blksize	= PAGE_CACHE_SIZE;
+	inode->i_blocks		= 0;
+	inode->i_version	= vnode->fid.unique;
+	inode->i_mapping->a_ops	= &afs_fs_aops;
+
+	/* check to see whether a symbolic link is really a mountpoint */
+	if (vnode->status.type == AFS_FTYPE_SYMLINK) {
+		afs_mntpt_check_symlink(vnode);
+
+		if (vnode->flags & AFS_VNODE_MOUNTPOINT) {
+			inode->i_mode	= S_IFDIR | vnode->status.mode;
+			inode->i_op	= &afs_mntpt_inode_operations;
+			inode->i_fop	= &afs_mntpt_file_operations;
+		}
+	}
+
+	return 0;
+} /* end afs_inode_map_status() */
+
+/*****************************************************************************/
+/*
+ * attempt to fetch the status of an inode, coelescing multiple simultaneous
+ * fetches
+ */
+static int afs_inode_fetch_status(struct inode *inode)
+{
+	struct afs_vnode *vnode;
+	int ret;
+
+	vnode = AFS_FS_I(inode);
+
+	ret = afs_vnode_fetch_status(vnode);
+
+	if (ret == 0)
+		ret = afs_inode_map_status(vnode);
+
+	return ret;
+
+} /* end afs_inode_fetch_status() */
+
+/*****************************************************************************/
+/*
+ * iget5() comparator
+ */
+static int afs_iget5_test(struct inode *inode, void *opaque)
+{
+	struct afs_iget_data *data = opaque;
+
+	return inode->i_ino == data->fid.vnode &&
+		inode->i_version == data->fid.unique;
+} /* end afs_iget5_test() */
+
+/*****************************************************************************/
+/*
+ * iget5() inode initialiser
+ */
+static int afs_iget5_set(struct inode *inode, void *opaque)
+{
+	struct afs_iget_data *data = opaque;
+	struct afs_vnode *vnode = AFS_FS_I(inode);
+
+	inode->i_ino = data->fid.vnode;
+	inode->i_version = data->fid.unique;
+	vnode->fid = data->fid;
+	vnode->volume = data->volume;
+
+	return 0;
+} /* end afs_iget5_set() */
+
+/*****************************************************************************/
+/*
+ * inode retrieval
+ */
+inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
+		    struct inode **_inode)
+{
+	struct afs_iget_data data = { .fid = *fid };
+	struct afs_super_info *as;
+	struct afs_vnode *vnode;
+	struct inode *inode;
+	int ret;
+
+	_enter(",{%u,%u,%u},,", fid->vid, fid->vnode, fid->unique);
+
+	as = sb->s_fs_info;
+	data.volume = as->volume;
+
+	inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
+			     &data);
+	if (!inode) {
+		_leave(" = -ENOMEM");
+		return -ENOMEM;
+	}
+
+	vnode = AFS_FS_I(inode);
+
+	/* deal with an existing inode */
+	if (!(inode->i_state & I_NEW)) {
+		ret = afs_vnode_fetch_status(vnode);
+		if (ret==0)
+			*_inode = inode;
+		else
+			iput(inode);
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+#ifdef AFS_CACHING_SUPPORT
+	/* set up caching before reading the status, as fetch-status reads the
+	 * first page of symlinks to see if they're really mntpts */
+	cachefs_acquire_cookie(vnode->volume->cache,
+			       NULL,
+			       vnode,
+			       &vnode->cache);
+#endif
+
+	/* okay... it's a new inode */
+	inode->i_flags |= S_NOATIME;
+	vnode->flags |= AFS_VNODE_CHANGED;
+	ret = afs_inode_fetch_status(inode);
+	if (ret<0)
+		goto bad_inode;
+
+	/* success */
+	unlock_new_inode(inode);
+
+	*_inode = inode;
+	_leave(" = 0 [CB { v=%u x=%lu t=%u }]",
+	       vnode->cb_version,
+	       vnode->cb_timeout.timo_jif,
+	       vnode->cb_type);
+	return 0;
+
+	/* failure */
+ bad_inode:
+	make_bad_inode(inode);
+	unlock_new_inode(inode);
+	iput(inode);
+
+	_leave(" = %d [bad]", ret);
+	return ret;
+} /* end afs_iget() */
+
+/*****************************************************************************/
+/*
+ * read the attributes of an inode
+ */
+int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
+		      struct kstat *stat)
+{
+	struct afs_vnode *vnode;
+	struct inode *inode;
+	int ret;
+
+	inode = dentry->d_inode;
+
+	_enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version);
+
+	vnode = AFS_FS_I(inode);
+
+	ret = afs_inode_fetch_status(inode);
+	if (ret == -ENOENT) {
+		_leave(" = %d [%d %p]",
+		       ret, atomic_read(&dentry->d_count), dentry->d_inode);
+		return ret;
+	}
+	else if (ret < 0) {
+		make_bad_inode(inode);
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	/* transfer attributes from the inode structure to the stat
+	 * structure */
+	generic_fillattr(inode, stat);
+
+	_leave(" = 0 CB { v=%u x=%u t=%u }",
+	       vnode->cb_version,
+	       vnode->cb_expiry,
+	       vnode->cb_type);
+
+	return 0;
+} /* end afs_inode_getattr() */
+
+/*****************************************************************************/
+/*
+ * clear an AFS inode
+ */
+void afs_clear_inode(struct inode *inode)
+{
+	struct afs_vnode *vnode;
+
+	vnode = AFS_FS_I(inode);
+
+	_enter("ino=%lu { vn=%08x v=%u x=%u t=%u }",
+	       inode->i_ino,
+	       vnode->fid.vnode,
+	       vnode->cb_version,
+	       vnode->cb_expiry,
+	       vnode->cb_type
+	       );
+
+	BUG_ON(inode->i_ino != vnode->fid.vnode);
+
+	afs_vnode_give_up_callback(vnode);
+
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_relinquish_cookie(vnode->cache, 0);
+	vnode->cache = NULL;
+#endif
+
+	_leave("");
+} /* end afs_clear_inode() */
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
new file mode 100644
index 0000000..f09860b
--- /dev/null
+++ b/fs/afs/internal.h
@@ -0,0 +1,140 @@
+/* internal.h: internal AFS stuff
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AFS_INTERNAL_H
+#define AFS_INTERNAL_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/*
+ * debug tracing
+ */
+#define kenter(FMT, a...)	printk("==> %s("FMT")\n",__FUNCTION__ , ## a)
+#define kleave(FMT, a...)	printk("<== %s()"FMT"\n",__FUNCTION__ , ## a)
+#define kdebug(FMT, a...)	printk(FMT"\n" , ## a)
+#define kproto(FMT, a...)	printk("### "FMT"\n" , ## a)
+#define knet(FMT, a...)		printk(FMT"\n" , ## a)
+
+#ifdef __KDEBUG
+#define _enter(FMT, a...)	kenter(FMT , ## a)
+#define _leave(FMT, a...)	kleave(FMT , ## a)
+#define _debug(FMT, a...)	kdebug(FMT , ## a)
+#define _proto(FMT, a...)	kproto(FMT , ## a)
+#define _net(FMT, a...)		knet(FMT , ## a)
+#else
+#define _enter(FMT, a...)	do { } while(0)
+#define _leave(FMT, a...)	do { } while(0)
+#define _debug(FMT, a...)	do { } while(0)
+#define _proto(FMT, a...)	do { } while(0)
+#define _net(FMT, a...)		do { } while(0)
+#endif
+
+static inline void afs_discard_my_signals(void)
+{
+	while (signal_pending(current)) {
+		siginfo_t sinfo;
+
+		spin_lock_irq(&current->sighand->siglock);
+		dequeue_signal(current,&current->blocked, &sinfo);
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
+/*
+ * cell.c
+ */
+extern struct rw_semaphore afs_proc_cells_sem;
+extern struct list_head afs_proc_cells;
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_cache_cell_index_def;
+#endif
+
+/*
+ * dir.c
+ */
+extern struct inode_operations afs_dir_inode_operations;
+extern struct file_operations afs_dir_file_operations;
+
+/*
+ * file.c
+ */
+extern struct address_space_operations afs_fs_aops;
+extern struct inode_operations afs_file_inode_operations;
+extern struct file_operations afs_file_file_operations;
+
+#ifdef AFS_CACHING_SUPPORT
+extern int afs_cache_get_page_cookie(struct page *page,
+				     struct cachefs_page **_page_cookie);
+#endif
+
+/*
+ * inode.c
+ */
+extern int afs_iget(struct super_block *sb, struct afs_fid *fid,
+		    struct inode **_inode);
+extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
+			     struct kstat *stat);
+extern void afs_clear_inode(struct inode *inode);
+
+/*
+ * key_afs.c
+ */
+#ifdef CONFIG_KEYS
+extern int afs_key_register(void);
+extern void afs_key_unregister(void);
+#endif
+
+/*
+ * main.c
+ */
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_netfs afs_cache_netfs;
+#endif
+
+/*
+ * mntpt.c
+ */
+extern struct inode_operations afs_mntpt_inode_operations;
+extern struct file_operations afs_mntpt_file_operations;
+extern struct afs_timer afs_mntpt_expiry_timer;
+extern struct afs_timer_ops afs_mntpt_expiry_timer_ops;
+extern unsigned long afs_mntpt_expiry_timeout;
+
+extern int afs_mntpt_check_symlink(struct afs_vnode *vnode);
+
+/*
+ * super.c
+ */
+extern int afs_fs_init(void);
+extern void afs_fs_exit(void);
+
+#define AFS_CB_HASH_COUNT (PAGE_SIZE / sizeof(struct list_head))
+
+extern struct list_head afs_cb_hash_tbl[];
+extern spinlock_t afs_cb_hash_lock;
+
+#define afs_cb_hash(SRV,FID) \
+	afs_cb_hash_tbl[((unsigned long)(SRV) + \
+			(FID)->vid + (FID)->vnode + (FID)->unique) % \
+			AFS_CB_HASH_COUNT]
+
+/*
+ * proc.c
+ */
+extern int afs_proc_init(void);
+extern void afs_proc_cleanup(void);
+extern int afs_proc_cell_setup(struct afs_cell *cell);
+extern void afs_proc_cell_remove(struct afs_cell *cell);
+
+#endif /* AFS_INTERNAL_H */
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
new file mode 100644
index 0000000..6fc88ae
--- /dev/null
+++ b/fs/afs/kafsasyncd.c
@@ -0,0 +1,257 @@
+/* kafsasyncd.c: AFS asynchronous operation daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * The AFS async daemon is used to the following:
+ * - probe "dead" servers to see whether they've come back to life yet.
+ * - probe "live" servers that we haven't talked to for a while to see if they are better
+ *   candidates for serving than what we're currently using
+ * - poll volume location servers to keep up to date volume location lists
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "cell.h"
+#include "server.h"
+#include "volume.h"
+#include "kafsasyncd.h"
+#include "kafstimod.h"
+#include <rxrpc/call.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static DECLARE_COMPLETION(kafsasyncd_alive);
+static DECLARE_COMPLETION(kafsasyncd_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
+static struct task_struct *kafsasyncd_task;
+static int kafsasyncd_die;
+
+static int kafsasyncd(void *arg);
+
+static LIST_HEAD(kafsasyncd_async_attnq);
+static LIST_HEAD(kafsasyncd_async_busyq);
+static DEFINE_SPINLOCK(kafsasyncd_async_lock);
+
+static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
+{
+}
+
+static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
+{
+}
+
+/*****************************************************************************/
+/*
+ * start the async daemon
+ */
+int afs_kafsasyncd_start(void)
+{
+	int ret;
+
+	ret = kernel_thread(kafsasyncd, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	wait_for_completion(&kafsasyncd_alive);
+
+	return ret;
+} /* end afs_kafsasyncd_start() */
+
+/*****************************************************************************/
+/*
+ * stop the async daemon
+ */
+void afs_kafsasyncd_stop(void)
+{
+	/* get rid of my daemon */
+	kafsasyncd_die = 1;
+	wake_up(&kafsasyncd_sleepq);
+	wait_for_completion(&kafsasyncd_dead);
+
+} /* end afs_kafsasyncd_stop() */
+
+/*****************************************************************************/
+/*
+ * probing daemon
+ */
+static int kafsasyncd(void *arg)
+{
+	struct afs_async_op *op;
+	int die;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	kafsasyncd_task = current;
+
+	printk("kAFS: Started kafsasyncd %d\n", current->pid);
+
+	daemonize("kafsasyncd");
+
+	complete(&kafsasyncd_alive);
+
+	/* loop around looking for things to attend to */
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&kafsasyncd_sleepq, &myself);
+
+		for (;;) {
+			if (!list_empty(&kafsasyncd_async_attnq) ||
+			    signal_pending(current) ||
+			    kafsasyncd_die)
+				break;
+
+			schedule();
+			set_current_state(TASK_INTERRUPTIBLE);
+		}
+
+		remove_wait_queue(&kafsasyncd_sleepq, &myself);
+		set_current_state(TASK_RUNNING);
+
+		try_to_freeze(PF_FREEZE);
+
+		/* discard pending signals */
+		afs_discard_my_signals();
+
+		die = kafsasyncd_die;
+
+		/* deal with the next asynchronous operation requiring
+		 * attention */
+		if (!list_empty(&kafsasyncd_async_attnq)) {
+			struct afs_async_op *op;
+
+			_debug("@@@ Begin Asynchronous Operation");
+
+			op = NULL;
+			spin_lock(&kafsasyncd_async_lock);
+
+			if (!list_empty(&kafsasyncd_async_attnq)) {
+				op = list_entry(kafsasyncd_async_attnq.next,
+						struct afs_async_op, link);
+				list_del(&op->link);
+				list_add_tail(&op->link,
+					      &kafsasyncd_async_busyq);
+			}
+
+			spin_unlock(&kafsasyncd_async_lock);
+
+			_debug("@@@ Operation %p {%p}\n",
+			       op, op ? op->ops : NULL);
+
+			if (op)
+				op->ops->attend(op);
+
+			_debug("@@@ End Asynchronous Operation");
+		}
+
+	} while(!die);
+
+	/* need to kill all outstanding asynchronous operations before
+	 * exiting */
+	kafsasyncd_task = NULL;
+	spin_lock(&kafsasyncd_async_lock);
+
+	/* fold the busy and attention queues together */
+	list_splice_init(&kafsasyncd_async_busyq,
+			 &kafsasyncd_async_attnq);
+
+	/* dequeue kafsasyncd from all their wait queues */
+	list_for_each_entry(op, &kafsasyncd_async_attnq, link) {
+		op->call->app_attn_func = kafsasyncd_null_call_attn_func;
+		op->call->app_error_func = kafsasyncd_null_call_error_func;
+		remove_wait_queue(&op->call->waitq, &op->waiter);
+	}
+
+	spin_unlock(&kafsasyncd_async_lock);
+
+	/* abort all the operations */
+	while (!list_empty(&kafsasyncd_async_attnq)) {
+		op = list_entry(kafsasyncd_async_attnq.next, struct afs_async_op, link);
+		list_del_init(&op->link);
+
+		rxrpc_call_abort(op->call, -EIO);
+		rxrpc_put_call(op->call);
+		op->call = NULL;
+
+		op->ops->discard(op);
+	}
+
+	/* and that's all */
+	_leave("");
+	complete_and_exit(&kafsasyncd_dead, 0);
+
+} /* end kafsasyncd() */
+
+/*****************************************************************************/
+/*
+ * begin an operation
+ * - place operation on busy queue
+ */
+void afs_kafsasyncd_begin_op(struct afs_async_op *op)
+{
+	_enter("");
+
+	spin_lock(&kafsasyncd_async_lock);
+
+	init_waitqueue_entry(&op->waiter, kafsasyncd_task);
+	add_wait_queue(&op->call->waitq, &op->waiter);
+
+	list_del(&op->link);
+	list_add_tail(&op->link, &kafsasyncd_async_busyq);
+
+	spin_unlock(&kafsasyncd_async_lock);
+
+	_leave("");
+} /* end afs_kafsasyncd_begin_op() */
+
+/*****************************************************************************/
+/*
+ * request attention for an operation
+ * - move to attention queue
+ */
+void afs_kafsasyncd_attend_op(struct afs_async_op *op)
+{
+	_enter("");
+
+	spin_lock(&kafsasyncd_async_lock);
+
+	list_del(&op->link);
+	list_add_tail(&op->link, &kafsasyncd_async_attnq);
+
+	spin_unlock(&kafsasyncd_async_lock);
+
+	wake_up(&kafsasyncd_sleepq);
+
+	_leave("");
+} /* end afs_kafsasyncd_attend_op() */
+
+/*****************************************************************************/
+/*
+ * terminate an operation
+ * - remove from either queue
+ */
+void afs_kafsasyncd_terminate_op(struct afs_async_op *op)
+{
+	_enter("");
+
+	spin_lock(&kafsasyncd_async_lock);
+
+	if (!list_empty(&op->link)) {
+		list_del_init(&op->link);
+		remove_wait_queue(&op->call->waitq, &op->waiter);
+	}
+
+	spin_unlock(&kafsasyncd_async_lock);
+
+	wake_up(&kafsasyncd_sleepq);
+
+	_leave("");
+} /* end afs_kafsasyncd_terminate_op() */
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h
new file mode 100644
index 0000000..791803f
--- /dev/null
+++ b/fs/afs/kafsasyncd.h
@@ -0,0 +1,52 @@
+/* kafsasyncd.h: AFS asynchronous operation daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_KAFSASYNCD_H
+#define _LINUX_AFS_KAFSASYNCD_H
+
+#include "types.h"
+
+struct afs_async_op;
+
+struct afs_async_op_ops {
+	void (*attend)(struct afs_async_op *op);
+	void (*discard)(struct afs_async_op *op);
+};
+
+/*****************************************************************************/
+/*
+ * asynchronous operation record
+ */
+struct afs_async_op
+{
+	struct list_head		link;
+	struct afs_server		*server;	/* server being contacted */
+	struct rxrpc_call		*call;		/* RxRPC call performing op */
+	wait_queue_t			waiter;		/* wait queue for kafsasyncd */
+	const struct afs_async_op_ops	*ops;		/* operations */
+};
+
+static inline void afs_async_op_init(struct afs_async_op *op,
+				     const struct afs_async_op_ops *ops)
+{
+	INIT_LIST_HEAD(&op->link);
+	op->call = NULL;
+	op->ops = ops;
+}
+
+extern int afs_kafsasyncd_start(void);
+extern void afs_kafsasyncd_stop(void);
+
+extern void afs_kafsasyncd_begin_op(struct afs_async_op *op);
+extern void afs_kafsasyncd_attend_op(struct afs_async_op *op);
+extern void afs_kafsasyncd_terminate_op(struct afs_async_op *op);
+
+#endif /* _LINUX_AFS_KAFSASYNCD_H */
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
new file mode 100644
index 0000000..86e710d
--- /dev/null
+++ b/fs/afs/kafstimod.c
@@ -0,0 +1,204 @@
+/* kafstimod.c: AFS timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "cell.h"
+#include "volume.h"
+#include "kafstimod.h"
+#include <asm/errno.h>
+#include "internal.h"
+
+static DECLARE_COMPLETION(kafstimod_alive);
+static DECLARE_COMPLETION(kafstimod_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
+static int kafstimod_die;
+
+static LIST_HEAD(kafstimod_list);
+static DEFINE_SPINLOCK(kafstimod_lock);
+
+static int kafstimod(void *arg);
+
+/*****************************************************************************/
+/*
+ * start the timeout daemon
+ */
+int afs_kafstimod_start(void)
+{
+	int ret;
+
+	ret = kernel_thread(kafstimod, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	wait_for_completion(&kafstimod_alive);
+
+	return ret;
+} /* end afs_kafstimod_start() */
+
+/*****************************************************************************/
+/*
+ * stop the timeout daemon
+ */
+void afs_kafstimod_stop(void)
+{
+	/* get rid of my daemon */
+	kafstimod_die = 1;
+	wake_up(&kafstimod_sleepq);
+	wait_for_completion(&kafstimod_dead);
+
+} /* end afs_kafstimod_stop() */
+
+/*****************************************************************************/
+/*
+ * timeout processing daemon
+ */
+static int kafstimod(void *arg)
+{
+	struct afs_timer *timer;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	printk("kAFS: Started kafstimod %d\n", current->pid);
+
+	daemonize("kafstimod");
+
+	complete(&kafstimod_alive);
+
+	/* loop around looking for things to attend to */
+ loop:
+	set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(&kafstimod_sleepq, &myself);
+
+	for (;;) {
+		unsigned long jif;
+		signed long timeout;
+
+		/* deal with the server being asked to die */
+		if (kafstimod_die) {
+			remove_wait_queue(&kafstimod_sleepq, &myself);
+			_leave("");
+			complete_and_exit(&kafstimod_dead, 0);
+		}
+
+		try_to_freeze(PF_FREEZE);
+
+		/* discard pending signals */
+		afs_discard_my_signals();
+
+		/* work out the time to elapse before the next event */
+		spin_lock(&kafstimod_lock);
+		if (list_empty(&kafstimod_list)) {
+			timeout = MAX_SCHEDULE_TIMEOUT;
+		}
+		else {
+			timer = list_entry(kafstimod_list.next,
+					   struct afs_timer, link);
+			timeout = timer->timo_jif;
+			jif = jiffies;
+
+			if (time_before_eq((unsigned long) timeout, jif))
+				goto immediate;
+
+			else {
+				timeout = (long) timeout - (long) jiffies;
+			}
+		}
+		spin_unlock(&kafstimod_lock);
+
+		schedule_timeout(timeout);
+
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+
+	/* the thing on the front of the queue needs processing
+	 * - we come here with the lock held and timer pointing to the expired
+	 *   entry
+	 */
+ immediate:
+	remove_wait_queue(&kafstimod_sleepq, &myself);
+	set_current_state(TASK_RUNNING);
+
+	_debug("@@@ Begin Timeout of %p", timer);
+
+	/* dequeue the timer */
+	list_del_init(&timer->link);
+	spin_unlock(&kafstimod_lock);
+
+	/* call the timeout function */
+	timer->ops->timed_out(timer);
+
+	_debug("@@@ End Timeout");
+	goto loop;
+
+} /* end kafstimod() */
+
+/*****************************************************************************/
+/*
+ * (re-)queue a timer
+ */
+void afs_kafstimod_add_timer(struct afs_timer *timer, unsigned long timeout)
+{
+	struct afs_timer *ptimer;
+	struct list_head *_p;
+
+	_enter("%p,%lu", timer, timeout);
+
+	spin_lock(&kafstimod_lock);
+
+	list_del(&timer->link);
+
+	/* the timer was deferred or reset - put it back in the queue at the
+	 * right place */
+	timer->timo_jif = jiffies + timeout;
+
+	list_for_each(_p, &kafstimod_list) {
+		ptimer = list_entry(_p, struct afs_timer, link);
+		if (time_before(timer->timo_jif, ptimer->timo_jif))
+			break;
+	}
+
+	list_add_tail(&timer->link, _p); /* insert before stopping point */
+
+	spin_unlock(&kafstimod_lock);
+
+	wake_up(&kafstimod_sleepq);
+
+	_leave("");
+} /* end afs_kafstimod_add_timer() */
+
+/*****************************************************************************/
+/*
+ * dequeue a timer
+ * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
+ */
+int afs_kafstimod_del_timer(struct afs_timer *timer)
+{
+	int ret = 0;
+
+	_enter("%p", timer);
+
+	spin_lock(&kafstimod_lock);
+
+	if (list_empty(&timer->link))
+		ret = -ENOENT;
+	else
+		list_del_init(&timer->link);
+
+	spin_unlock(&kafstimod_lock);
+
+	wake_up(&kafstimod_sleepq);
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_kafstimod_del_timer() */
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h
new file mode 100644
index 0000000..e312f1a
--- /dev/null
+++ b/fs/afs/kafstimod.h
@@ -0,0 +1,49 @@
+/* kafstimod.h: AFS timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_KAFSTIMOD_H
+#define _LINUX_AFS_KAFSTIMOD_H
+
+#include "types.h"
+
+struct afs_timer;
+
+struct afs_timer_ops {
+	/* called when the front of the timer queue has timed out */
+	void (*timed_out)(struct afs_timer *timer);
+};
+
+/*****************************************************************************/
+/*
+ * AFS timer/timeout record
+ */
+struct afs_timer
+{
+	struct list_head		link;		/* link in timer queue */
+	unsigned long			timo_jif;	/* timeout time */
+	const struct afs_timer_ops	*ops;		/* timeout expiry function */
+};
+
+static inline void afs_timer_init(struct afs_timer *timer,
+				  const struct afs_timer_ops *ops)
+{
+	INIT_LIST_HEAD(&timer->link);
+	timer->ops = ops;
+}
+
+extern int afs_kafstimod_start(void);
+extern void afs_kafstimod_stop(void);
+
+extern void afs_kafstimod_add_timer(struct afs_timer *timer,
+				    unsigned long timeout);
+extern int afs_kafstimod_del_timer(struct afs_timer *timer);
+
+#endif /* _LINUX_AFS_KAFSTIMOD_H */
diff --git a/fs/afs/main.c b/fs/afs/main.c
new file mode 100644
index 0000000..913c689
--- /dev/null
+++ b/fs/afs/main.c
@@ -0,0 +1,286 @@
+/* main.c: AFS client file system
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/call.h>
+#include <rxrpc/peer.h>
+#include "cache.h"
+#include "cell.h"
+#include "server.h"
+#include "fsclient.h"
+#include "cmservice.h"
+#include "kafstimod.h"
+#include "kafsasyncd.h"
+#include "internal.h"
+
+struct rxrpc_transport *afs_transport;
+
+static int afs_adding_peer(struct rxrpc_peer *peer);
+static void afs_discarding_peer(struct rxrpc_peer *peer);
+
+
+MODULE_DESCRIPTION("AFS Client File System");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+static char *rootcell;
+
+module_param(rootcell, charp, 0);
+MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
+
+
+static struct rxrpc_peer_ops afs_peer_ops = {
+	.adding		= afs_adding_peer,
+	.discarding	= afs_discarding_peer,
+};
+
+struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
+DEFINE_SPINLOCK(afs_cb_hash_lock);
+
+#ifdef AFS_CACHING_SUPPORT
+static struct cachefs_netfs_operations afs_cache_ops = {
+	.get_page_cookie	= afs_cache_get_page_cookie,
+};
+
+struct cachefs_netfs afs_cache_netfs = {
+	.name			= "afs",
+	.version		= 0,
+	.ops			= &afs_cache_ops,
+};
+#endif
+
+/*****************************************************************************/
+/*
+ * initialise the AFS client FS module
+ */
+static int __init afs_init(void)
+{
+	int loop, ret;
+
+	printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
+
+	/* initialise the callback hash table */
+	spin_lock_init(&afs_cb_hash_lock);
+	for (loop = AFS_CB_HASH_COUNT - 1; loop >= 0; loop--)
+		INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
+
+	/* register the /proc stuff */
+	ret = afs_proc_init();
+	if (ret < 0)
+		return ret;
+
+#ifdef AFS_CACHING_SUPPORT
+	/* we want to be able to cache */
+	ret = cachefs_register_netfs(&afs_cache_netfs,
+				     &afs_cache_cell_index_def);
+	if (ret < 0)
+		goto error;
+#endif
+
+#ifdef CONFIG_KEYS_TURNED_OFF
+	ret = afs_key_register();
+	if (ret < 0)
+		goto error_cache;
+#endif
+
+	/* initialise the cell DB */
+	ret = afs_cell_init(rootcell);
+	if (ret < 0)
+		goto error_keys;
+
+	/* start the timeout daemon */
+	ret = afs_kafstimod_start();
+	if (ret < 0)
+		goto error_keys;
+
+	/* start the async operation daemon */
+	ret = afs_kafsasyncd_start();
+	if (ret < 0)
+		goto error_kafstimod;
+
+	/* create the RxRPC transport */
+	ret = rxrpc_create_transport(7001, &afs_transport);
+	if (ret < 0)
+		goto error_kafsasyncd;
+
+	afs_transport->peer_ops = &afs_peer_ops;
+
+	/* register the filesystems */
+	ret = afs_fs_init();
+	if (ret < 0)
+		goto error_transport;
+
+	return ret;
+
+ error_transport:
+	rxrpc_put_transport(afs_transport);
+ error_kafsasyncd:
+	afs_kafsasyncd_stop();
+ error_kafstimod:
+	afs_kafstimod_stop();
+ error_keys:
+#ifdef CONFIG_KEYS_TURNED_OFF
+	afs_key_unregister();
+ error_cache:
+#endif
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_unregister_netfs(&afs_cache_netfs);
+ error:
+#endif
+	afs_cell_purge();
+	afs_proc_cleanup();
+	printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
+	return ret;
+} /* end afs_init() */
+
+/* XXX late_initcall is kludgy, but the only alternative seems to create
+ * a transport upon the first mount, which is worse. Or is it?
+ */
+late_initcall(afs_init);	/* must be called after net/ to create socket */
+/*****************************************************************************/
+/*
+ * clean up on module removal
+ */
+static void __exit afs_exit(void)
+{
+	printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
+
+	afs_fs_exit();
+	rxrpc_put_transport(afs_transport);
+	afs_kafstimod_stop();
+	afs_kafsasyncd_stop();
+	afs_cell_purge();
+#ifdef CONFIG_KEYS_TURNED_OFF
+	afs_key_unregister();
+#endif
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_unregister_netfs(&afs_cache_netfs);
+#endif
+	afs_proc_cleanup();
+
+} /* end afs_exit() */
+
+module_exit(afs_exit);
+
+/*****************************************************************************/
+/*
+ * notification that new peer record is being added
+ * - called from krxsecd
+ * - return an error to induce an abort
+ * - mustn't sleep (caller holds an rwlock)
+ */
+static int afs_adding_peer(struct rxrpc_peer *peer)
+{
+	struct afs_server *server;
+	int ret;
+
+	_debug("kAFS: Adding new peer %08x\n", ntohl(peer->addr.s_addr));
+
+	/* determine which server the peer resides in (if any) */
+	ret = afs_server_find_by_peer(peer, &server);
+	if (ret < 0)
+		return ret; /* none that we recognise, so abort */
+
+	_debug("Server %p{u=%d}\n", server, atomic_read(&server->usage));
+
+	_debug("Cell %p{u=%d}\n",
+	       server->cell, atomic_read(&server->cell->usage));
+
+	/* cross-point the structs under a global lock */
+	spin_lock(&afs_server_peer_lock);
+	peer->user = server;
+	server->peer = peer;
+	spin_unlock(&afs_server_peer_lock);
+
+	afs_put_server(server);
+
+	return 0;
+} /* end afs_adding_peer() */
+
+/*****************************************************************************/
+/*
+ * notification that a peer record is being discarded
+ * - called from krxiod or krxsecd
+ */
+static void afs_discarding_peer(struct rxrpc_peer *peer)
+{
+	struct afs_server *server;
+
+	_enter("%p",peer);
+
+	_debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
+	       ntohl(peer->addr.s_addr),
+	       (long) (peer->rtt / 1000),
+	       (long) (peer->rtt % 1000));
+
+	/* uncross-point the structs under a global lock */
+	spin_lock(&afs_server_peer_lock);
+	server = peer->user;
+	if (server) {
+		peer->user = NULL;
+		server->peer = NULL;
+	}
+	spin_unlock(&afs_server_peer_lock);
+
+	_leave("");
+
+} /* end afs_discarding_peer() */
+
+/*****************************************************************************/
+/*
+ * clear the dead space between task_struct and kernel stack
+ * - called by supplying -finstrument-functions to gcc
+ */
+#if 0
+void __cyg_profile_func_enter (void *this_fn, void *call_site)
+__attribute__((no_instrument_function));
+
+void __cyg_profile_func_enter (void *this_fn, void *call_site)
+{
+       asm volatile("  movl    %%esp,%%edi     \n"
+                    "  andl    %0,%%edi        \n"
+                    "  addl    %1,%%edi        \n"
+                    "  movl    %%esp,%%ecx     \n"
+                    "  subl    %%edi,%%ecx     \n"
+                    "  shrl    $2,%%ecx        \n"
+                    "  movl    $0xedededed,%%eax     \n"
+                    "  rep stosl               \n"
+                    :
+                    : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
+                    : "eax", "ecx", "edi", "memory", "cc"
+                    );
+}
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+__attribute__((no_instrument_function));
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+{
+       asm volatile("  movl    %%esp,%%edi     \n"
+                    "  andl    %0,%%edi        \n"
+                    "  addl    %1,%%edi        \n"
+                    "  movl    %%esp,%%ecx     \n"
+                    "  subl    %%edi,%%ecx     \n"
+                    "  shrl    $2,%%ecx        \n"
+                    "  movl    $0xdadadada,%%eax     \n"
+                    "  rep stosl               \n"
+                    :
+                    : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
+                    : "eax", "ecx", "edi", "memory", "cc"
+                    );
+}
+#endif
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
new file mode 100644
index 0000000..e4fce66
--- /dev/null
+++ b/fs/afs/misc.c
@@ -0,0 +1,39 @@
+/* misc.c: miscellaneous bits
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include "errors.h"
+#include "internal.h"
+
+/*****************************************************************************/
+/*
+ * convert an AFS abort code to a Linux error number
+ */
+int afs_abort_to_error(int abortcode)
+{
+	switch (abortcode) {
+	case VSALVAGE:		return -EIO;
+	case VNOVNODE:		return -ENOENT;
+	case VNOVOL:		return -ENXIO;
+	case VVOLEXISTS:	return -EEXIST;
+	case VNOSERVICE:	return -EIO;
+	case VOFFLINE:		return -ENOENT;
+	case VONLINE:		return -EEXIST;
+	case VDISKFULL:		return -ENOSPC;
+	case VOVERQUOTA:	return -EDQUOT;
+	case VBUSY:		return -EBUSY;
+	case VMOVED:		return -ENXIO;
+	default:		return -EIO;
+	}
+
+} /* end afs_abort_to_error() */
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
new file mode 100644
index 0000000..bfc28ab
--- /dev/null
+++ b/fs/afs/mntpt.c
@@ -0,0 +1,287 @@
+/* mntpt.c: mountpoint management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/namespace.h>
+#include "super.h"
+#include "cell.h"
+#include "volume.h"
+#include "vnode.h"
+#include "internal.h"
+
+
+static struct dentry *afs_mntpt_lookup(struct inode *dir,
+				       struct dentry *dentry,
+				       struct nameidata *nd);
+static int afs_mntpt_open(struct inode *inode, struct file *file);
+static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
+
+struct file_operations afs_mntpt_file_operations = {
+	.open		= afs_mntpt_open,
+};
+
+struct inode_operations afs_mntpt_inode_operations = {
+	.lookup		= afs_mntpt_lookup,
+	.follow_link	= afs_mntpt_follow_link,
+	.readlink	= page_readlink,
+	.getattr	= afs_inode_getattr,
+};
+
+static LIST_HEAD(afs_vfsmounts);
+
+static void afs_mntpt_expiry_timed_out(struct afs_timer *timer);
+
+struct afs_timer_ops afs_mntpt_expiry_timer_ops = {
+	.timed_out	= afs_mntpt_expiry_timed_out,
+};
+
+struct afs_timer afs_mntpt_expiry_timer;
+
+unsigned long afs_mntpt_expiry_timeout = 20;
+
+/*****************************************************************************/
+/*
+ * check a symbolic link to see whether it actually encodes a mountpoint
+ * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
+ */
+int afs_mntpt_check_symlink(struct afs_vnode *vnode)
+{
+	struct page *page;
+	filler_t *filler;
+	size_t size;
+	char *buf;
+	int ret;
+
+	_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
+
+	/* read the contents of the symlink into the pagecache */
+	filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
+
+	page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
+			       filler, NULL);
+	if (IS_ERR(page)) {
+		ret = PTR_ERR(page);
+		goto out;
+	}
+
+	ret = -EIO;
+	wait_on_page_locked(page);
+	buf = kmap(page);
+	if (!PageUptodate(page))
+		goto out_free;
+	if (PageError(page))
+		goto out_free;
+
+	/* examine the symlink's contents */
+	size = vnode->status.size;
+	_debug("symlink to %*.*s", size, (int) size, buf);
+
+	if (size > 2 &&
+	    (buf[0] == '%' || buf[0] == '#') &&
+	    buf[size - 1] == '.'
+	    ) {
+		_debug("symlink is a mountpoint");
+		spin_lock(&vnode->lock);
+		vnode->flags |= AFS_VNODE_MOUNTPOINT;
+		spin_unlock(&vnode->lock);
+	}
+
+	ret = 0;
+
+ out_free:
+	kunmap(page);
+	page_cache_release(page);
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_mntpt_check_symlink() */
+
+/*****************************************************************************/
+/*
+ * no valid lookup procedure on this sort of dir
+ */
+static struct dentry *afs_mntpt_lookup(struct inode *dir,
+				       struct dentry *dentry,
+				       struct nameidata *nd)
+{
+	kenter("%p,%p{%p{%s},%s}",
+	       dir,
+	       dentry,
+	       dentry->d_parent,
+	       dentry->d_parent ?
+	       dentry->d_parent->d_name.name : (const unsigned char *) "",
+	       dentry->d_name.name);
+
+	return ERR_PTR(-EREMOTE);
+} /* end afs_mntpt_lookup() */
+
+/*****************************************************************************/
+/*
+ * no valid open procedure on this sort of dir
+ */
+static int afs_mntpt_open(struct inode *inode, struct file *file)
+{
+	kenter("%p,%p{%p{%s},%s}",
+	       inode, file,
+	       file->f_dentry->d_parent,
+	       file->f_dentry->d_parent ?
+	       file->f_dentry->d_parent->d_name.name :
+	       (const unsigned char *) "",
+	       file->f_dentry->d_name.name);
+
+	return -EREMOTE;
+} /* end afs_mntpt_open() */
+
+/*****************************************************************************/
+/*
+ * create a vfsmount to be automounted
+ */
+static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
+{
+	struct afs_super_info *super;
+	struct vfsmount *mnt;
+	struct page *page = NULL;
+	size_t size;
+	char *buf, *devname = NULL, *options = NULL;
+	filler_t *filler;
+	int ret;
+
+	kenter("{%s}", mntpt->d_name.name);
+
+	BUG_ON(!mntpt->d_inode);
+
+	ret = -EINVAL;
+	size = mntpt->d_inode->i_size;
+	if (size > PAGE_SIZE - 1)
+		goto error;
+
+	ret = -ENOMEM;
+	devname = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!devname)
+		goto error;
+
+	options = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!options)
+		goto error;
+
+	/* read the contents of the AFS special symlink */
+	filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
+
+	page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
+	if (IS_ERR(page)) {
+		ret = PTR_ERR(page);
+		goto error;
+	}
+
+	ret = -EIO;
+	wait_on_page_locked(page);
+	if (!PageUptodate(page) || PageError(page))
+		goto error;
+
+	buf = kmap(page);
+	memcpy(devname, buf, size);
+	kunmap(page);
+	page_cache_release(page);
+	page = NULL;
+
+	/* work out what options we want */
+	super = AFS_FS_S(mntpt->d_sb);
+	memcpy(options, "cell=", 5);
+	strcpy(options + 5, super->volume->cell->name);
+	if (super->volume->type == AFSVL_RWVOL)
+		strcat(options, ",rwpath");
+
+	/* try and do the mount */
+	kdebug("--- attempting mount %s -o %s ---", devname, options);
+	mnt = do_kern_mount("afs", 0, devname, options);
+	kdebug("--- mount result %p ---", mnt);
+
+	free_page((unsigned long) devname);
+	free_page((unsigned long) options);
+	kleave(" = %p", mnt);
+	return mnt;
+
+ error:
+	if (page)
+		page_cache_release(page);
+	if (devname)
+		free_page((unsigned long) devname);
+	if (options)
+		free_page((unsigned long) options);
+	kleave(" = %d", ret);
+	return ERR_PTR(ret);
+} /* end afs_mntpt_do_automount() */
+
+/*****************************************************************************/
+/*
+ * follow a link from a mountpoint directory, thus causing it to be mounted
+ */
+static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct vfsmount *newmnt;
+	struct dentry *old_dentry;
+	int err;
+
+	kenter("%p{%s},{%s:%p{%s}}",
+	       dentry,
+	       dentry->d_name.name,
+	       nd->mnt->mnt_devname,
+	       dentry,
+	       nd->dentry->d_name.name);
+
+	newmnt = afs_mntpt_do_automount(dentry);
+	if (IS_ERR(newmnt)) {
+		path_release(nd);
+		return PTR_ERR(newmnt);
+	}
+
+	old_dentry = nd->dentry;
+	nd->dentry = dentry;
+	err = do_add_mount(newmnt, nd, 0, &afs_vfsmounts);
+	nd->dentry = old_dentry;
+
+	path_release(nd);
+
+	if (!err) {
+		mntget(newmnt);
+		nd->mnt = newmnt;
+		dget(newmnt->mnt_root);
+		nd->dentry = newmnt->mnt_root;
+	}
+
+	kleave(" = %d", err);
+	return err;
+} /* end afs_mntpt_follow_link() */
+
+/*****************************************************************************/
+/*
+ * handle mountpoint expiry timer going off
+ */
+static void afs_mntpt_expiry_timed_out(struct afs_timer *timer)
+{
+	kenter("");
+
+	mark_mounts_for_expiry(&afs_vfsmounts);
+
+	afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
+				afs_mntpt_expiry_timeout * HZ);
+
+	kleave("");
+} /* end afs_mntpt_expiry_timed_out() */
diff --git a/fs/afs/mount.h b/fs/afs/mount.h
new file mode 100644
index 0000000..9d2f46e
--- /dev/null
+++ b/fs/afs/mount.h
@@ -0,0 +1,23 @@
+/* mount.h: mount parameters
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_MOUNT_H
+#define _LINUX_AFS_MOUNT_H
+
+struct afs_mountdata {
+	const char		*volume;	/* name of volume */
+	const char		*cell;		/* name of cell containing volume */
+	const char		*cache;		/* name of cache block device */
+	size_t			nservers;	/* number of server addresses listed */
+	uint32_t		servers[10];	/* IP addresses of servers in this cell */
+};
+
+#endif /* _LINUX_AFS_MOUNT_H */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
new file mode 100644
index 0000000..9c81b8f
--- /dev/null
+++ b/fs/afs/proc.c
@@ -0,0 +1,857 @@
+/* proc.c: /proc interface for AFS
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "cell.h"
+#include "volume.h"
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static struct proc_dir_entry *proc_afs;
+
+
+static int afs_proc_cells_open(struct inode *inode, struct file *file);
+static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos);
+static void afs_proc_cells_stop(struct seq_file *p, void *v);
+static int afs_proc_cells_show(struct seq_file *m, void *v);
+static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
+				    size_t size, loff_t *_pos);
+
+static struct seq_operations afs_proc_cells_ops = {
+	.start	= afs_proc_cells_start,
+	.next	= afs_proc_cells_next,
+	.stop	= afs_proc_cells_stop,
+	.show	= afs_proc_cells_show,
+};
+
+static struct file_operations afs_proc_cells_fops = {
+	.open		= afs_proc_cells_open,
+	.read		= seq_read,
+	.write		= afs_proc_cells_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int afs_proc_rootcell_open(struct inode *inode, struct file *file);
+static int afs_proc_rootcell_release(struct inode *inode, struct file *file);
+static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf,
+				      size_t size, loff_t *_pos);
+static ssize_t afs_proc_rootcell_write(struct file *file,
+				       const char __user *buf,
+				       size_t size, loff_t *_pos);
+
+static struct file_operations afs_proc_rootcell_fops = {
+	.open		= afs_proc_rootcell_open,
+	.read		= afs_proc_rootcell_read,
+	.write		= afs_proc_rootcell_write,
+	.llseek		= no_llseek,
+	.release	= afs_proc_rootcell_release
+};
+
+static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
+static int afs_proc_cell_volumes_release(struct inode *inode,
+					 struct file *file);
+static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
+					loff_t *pos);
+static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_volumes_ops = {
+	.start	= afs_proc_cell_volumes_start,
+	.next	= afs_proc_cell_volumes_next,
+	.stop	= afs_proc_cell_volumes_stop,
+	.show	= afs_proc_cell_volumes_show,
+};
+
+static struct file_operations afs_proc_cell_volumes_fops = {
+	.open		= afs_proc_cell_volumes_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= afs_proc_cell_volumes_release,
+};
+
+static int afs_proc_cell_vlservers_open(struct inode *inode,
+					struct file *file);
+static int afs_proc_cell_vlservers_release(struct inode *inode,
+					   struct file *file);
+static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
+					  loff_t *pos);
+static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_vlservers_ops = {
+	.start	= afs_proc_cell_vlservers_start,
+	.next	= afs_proc_cell_vlservers_next,
+	.stop	= afs_proc_cell_vlservers_stop,
+	.show	= afs_proc_cell_vlservers_show,
+};
+
+static struct file_operations afs_proc_cell_vlservers_fops = {
+	.open		= afs_proc_cell_vlservers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= afs_proc_cell_vlservers_release,
+};
+
+static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
+static int afs_proc_cell_servers_release(struct inode *inode,
+					 struct file *file);
+static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_servers_next(struct seq_file *p, void *v,
+					loff_t *pos);
+static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_servers_ops = {
+	.start	= afs_proc_cell_servers_start,
+	.next	= afs_proc_cell_servers_next,
+	.stop	= afs_proc_cell_servers_stop,
+	.show	= afs_proc_cell_servers_show,
+};
+
+static struct file_operations afs_proc_cell_servers_fops = {
+	.open		= afs_proc_cell_servers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= afs_proc_cell_servers_release,
+};
+
+/*****************************************************************************/
+/*
+ * initialise the /proc/fs/afs/ directory
+ */
+int afs_proc_init(void)
+{
+	struct proc_dir_entry *p;
+
+	_enter("");
+
+	proc_afs = proc_mkdir("fs/afs", NULL);
+	if (!proc_afs)
+		goto error;
+	proc_afs->owner = THIS_MODULE;
+
+	p = create_proc_entry("cells", 0, proc_afs);
+	if (!p)
+		goto error_proc;
+	p->proc_fops = &afs_proc_cells_fops;
+	p->owner = THIS_MODULE;
+
+	p = create_proc_entry("rootcell", 0, proc_afs);
+	if (!p)
+		goto error_cells;
+	p->proc_fops = &afs_proc_rootcell_fops;
+	p->owner = THIS_MODULE;
+
+	_leave(" = 0");
+	return 0;
+
+ error_cells:
+ 	remove_proc_entry("cells", proc_afs);
+ error_proc:
+	remove_proc_entry("fs/afs", NULL);
+ error:
+	_leave(" = -ENOMEM");
+	return -ENOMEM;
+
+} /* end afs_proc_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the /proc/fs/afs/ directory
+ */
+void afs_proc_cleanup(void)
+{
+	remove_proc_entry("cells", proc_afs);
+
+	remove_proc_entry("fs/afs", NULL);
+
+} /* end afs_proc_cleanup() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/cells" which provides a summary of extant cells
+ */
+static int afs_proc_cells_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *m;
+	int ret;
+
+	ret = seq_open(file, &afs_proc_cells_ops);
+	if (ret < 0)
+		return ret;
+
+	m = file->private_data;
+	m->private = PDE(inode)->data;
+
+	return 0;
+} /* end afs_proc_cells_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the
+ * first item
+ */
+static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
+{
+	struct list_head *_p;
+	loff_t pos = *_pos;
+
+	/* lock the list against modification */
+	down_read(&afs_proc_cells_sem);
+
+	/* allow for the header line */
+	if (!pos)
+		return (void *) 1;
+	pos--;
+
+	/* find the n'th element in the list */
+	list_for_each(_p, &afs_proc_cells)
+		if (!pos--)
+			break;
+
+	return _p != &afs_proc_cells ? _p : NULL;
+} /* end afs_proc_cells_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
+{
+	struct list_head *_p;
+
+	(*pos)++;
+
+	_p = v;
+	_p = v == (void *) 1 ? afs_proc_cells.next : _p->next;
+
+	return _p != &afs_proc_cells ? _p : NULL;
+} /* end afs_proc_cells_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cells_stop(struct seq_file *p, void *v)
+{
+	up_read(&afs_proc_cells_sem);
+
+} /* end afs_proc_cells_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of cell lines
+ */
+static int afs_proc_cells_show(struct seq_file *m, void *v)
+{
+	struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
+
+	/* display header on line 1 */
+	if (v == (void *) 1) {
+		seq_puts(m, "USE NAME\n");
+		return 0;
+	}
+
+	/* display one cell per line on subsequent lines */
+	seq_printf(m, "%3d %s\n", atomic_read(&cell->usage), cell->name);
+
+	return 0;
+} /* end afs_proc_cells_show() */
+
+/*****************************************************************************/
+/*
+ * handle writes to /proc/fs/afs/cells
+ * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]"
+ */
+static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
+				    size_t size, loff_t *_pos)
+{
+	char *kbuf, *name, *args;
+	int ret;
+
+	/* start by dragging the command into memory */
+	if (size <= 1 || size >= PAGE_SIZE)
+		return -EINVAL;
+
+	kbuf = kmalloc(size + 1, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(kbuf, buf, size) != 0)
+		goto done;
+	kbuf[size] = 0;
+
+	/* trim to first NL */
+	name = memchr(kbuf, '\n', size);
+	if (name)
+		*name = 0;
+
+	/* split into command, name and argslist */
+	name = strchr(kbuf, ' ');
+	if (!name)
+		goto inval;
+	do {
+		*name++ = 0;
+	} while(*name == ' ');
+	if (!*name)
+		goto inval;
+
+	args = strchr(name, ' ');
+	if (!args)
+		goto inval;
+	do {
+		*args++ = 0;
+	} while(*args == ' ');
+	if (!*args)
+		goto inval;
+
+	/* determine command to perform */
+	_debug("cmd=%s name=%s args=%s", kbuf, name, args);
+
+	if (strcmp(kbuf, "add") == 0) {
+		struct afs_cell *cell;
+		ret = afs_cell_create(name, args, &cell);
+		if (ret < 0)
+			goto done;
+
+		printk("kAFS: Added new cell '%s'\n", name);
+	}
+	else {
+		goto inval;
+	}
+
+	ret = size;
+
+ done:
+	kfree(kbuf);
+	_leave(" = %d", ret);
+	return ret;
+
+ inval:
+	ret = -EINVAL;
+	printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
+	goto done;
+} /* end afs_proc_cells_write() */
+
+/*****************************************************************************/
+/*
+ * Stubs for /proc/fs/afs/rootcell
+ */
+static int afs_proc_rootcell_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int afs_proc_rootcell_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf,
+				      size_t size, loff_t *_pos)
+{
+	return 0;
+}
+
+/*****************************************************************************/
+/*
+ * handle writes to /proc/fs/afs/rootcell
+ * - to initialize rootcell: echo "cell.name:192.168.231.14"
+ */
+static ssize_t afs_proc_rootcell_write(struct file *file,
+				       const char __user *buf,
+				       size_t size, loff_t *_pos)
+{
+	char *kbuf, *s;
+	int ret;
+
+	/* start by dragging the command into memory */
+	if (size <= 1 || size >= PAGE_SIZE)
+		return -EINVAL;
+
+	ret = -ENOMEM;
+	kbuf = kmalloc(size + 1, GFP_KERNEL);
+	if (!kbuf)
+		goto nomem;
+
+	ret = -EFAULT;
+	if (copy_from_user(kbuf, buf, size) != 0)
+		goto infault;
+	kbuf[size] = 0;
+
+	/* trim to first NL */
+	s = memchr(kbuf, '\n', size);
+	if (s)
+		*s = 0;
+
+	/* determine command to perform */
+	_debug("rootcell=%s", kbuf);
+
+	ret = afs_cell_init(kbuf);
+	if (ret >= 0)
+		ret = size;	/* consume everything, always */
+
+ infault:
+	kfree(kbuf);
+ nomem:
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_proc_rootcell_write() */
+
+/*****************************************************************************/
+/*
+ * initialise /proc/fs/afs/<cell>/
+ */
+int afs_proc_cell_setup(struct afs_cell *cell)
+{
+	struct proc_dir_entry *p;
+
+	_enter("%p{%s}", cell, cell->name);
+
+	cell->proc_dir = proc_mkdir(cell->name, proc_afs);
+	if (!cell->proc_dir)
+		return -ENOMEM;
+
+	p = create_proc_entry("servers", 0, cell->proc_dir);
+	if (!p)
+		goto error_proc;
+	p->proc_fops = &afs_proc_cell_servers_fops;
+	p->owner = THIS_MODULE;
+	p->data = cell;
+
+	p = create_proc_entry("vlservers", 0, cell->proc_dir);
+	if (!p)
+		goto error_servers;
+	p->proc_fops = &afs_proc_cell_vlservers_fops;
+	p->owner = THIS_MODULE;
+	p->data = cell;
+
+	p = create_proc_entry("volumes", 0, cell->proc_dir);
+	if (!p)
+		goto error_vlservers;
+	p->proc_fops = &afs_proc_cell_volumes_fops;
+	p->owner = THIS_MODULE;
+	p->data = cell;
+
+	_leave(" = 0");
+	return 0;
+
+ error_vlservers:
+	remove_proc_entry("vlservers", cell->proc_dir);
+ error_servers:
+	remove_proc_entry("servers", cell->proc_dir);
+ error_proc:
+	remove_proc_entry(cell->name, proc_afs);
+	_leave(" = -ENOMEM");
+	return -ENOMEM;
+} /* end afs_proc_cell_setup() */
+
+/*****************************************************************************/
+/*
+ * remove /proc/fs/afs/<cell>/
+ */
+void afs_proc_cell_remove(struct afs_cell *cell)
+{
+	_enter("");
+
+	remove_proc_entry("volumes", cell->proc_dir);
+	remove_proc_entry("vlservers", cell->proc_dir);
+	remove_proc_entry("servers", cell->proc_dir);
+	remove_proc_entry(cell->name, proc_afs);
+
+	_leave("");
+} /* end afs_proc_cell_remove() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
+ */
+static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
+{
+	struct afs_cell *cell;
+	struct seq_file *m;
+	int ret;
+
+	cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data);
+	if (!cell)
+		return -ENOENT;
+
+	ret = seq_open(file, &afs_proc_cell_volumes_ops);
+	if (ret < 0)
+		return ret;
+
+	m = file->private_data;
+	m->private = cell;
+
+	return 0;
+} /* end afs_proc_cell_volumes_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
+{
+	struct afs_cell *cell = PDE(inode)->data;
+	int ret;
+
+	ret = seq_release(inode,file);
+
+	afs_put_cell(cell);
+
+	return ret;
+} /* end afs_proc_cell_volumes_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the
+ * first item
+ */
+static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
+{
+	struct list_head *_p;
+	struct afs_cell *cell = m->private;
+	loff_t pos = *_pos;
+
+	_enter("cell=%p pos=%Ld", cell, *_pos);
+
+	/* lock the list against modification */
+	down_read(&cell->vl_sem);
+
+	/* allow for the header line */
+	if (!pos)
+		return (void *) 1;
+	pos--;
+
+	/* find the n'th element in the list */
+	list_for_each(_p, &cell->vl_list)
+		if (!pos--)
+			break;
+
+	return _p != &cell->vl_list ? _p : NULL;
+} /* end afs_proc_cell_volumes_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
+					loff_t *_pos)
+{
+	struct list_head *_p;
+	struct afs_cell *cell = p->private;
+
+	_enter("cell=%p pos=%Ld", cell, *_pos);
+
+	(*_pos)++;
+
+	_p = v;
+	_p = v == (void *) 1 ? cell->vl_list.next : _p->next;
+
+	return _p != &cell->vl_list ? _p : NULL;
+} /* end afs_proc_cell_volumes_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
+{
+	struct afs_cell *cell = p->private;
+
+	up_read(&cell->vl_sem);
+
+} /* end afs_proc_cell_volumes_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
+{
+	struct afs_vlocation *vlocation =
+		list_entry(v, struct afs_vlocation, link);
+
+	/* display header on line 1 */
+	if (v == (void *) 1) {
+		seq_puts(m, "USE VLID[0]  VLID[1]  VLID[2]  NAME\n");
+		return 0;
+	}
+
+	/* display one cell per line on subsequent lines */
+	seq_printf(m, "%3d %08x %08x %08x %s\n",
+		   atomic_read(&vlocation->usage),
+		   vlocation->vldb.vid[0],
+		   vlocation->vldb.vid[1],
+		   vlocation->vldb.vid[2],
+		   vlocation->vldb.name
+		   );
+
+	return 0;
+} /* end afs_proc_cell_volumes_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume
+ * location server
+ */
+static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
+{
+	struct afs_cell *cell;
+	struct seq_file *m;
+	int ret;
+
+	cell = afs_get_cell_maybe((struct afs_cell**)&PDE(inode)->data);
+	if (!cell)
+		return -ENOENT;
+
+	ret = seq_open(file,&afs_proc_cell_vlservers_ops);
+	if (ret<0)
+		return ret;
+
+	m = file->private_data;
+	m->private = cell;
+
+	return 0;
+} /* end afs_proc_cell_vlservers_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_vlservers_release(struct inode *inode,
+					   struct file *file)
+{
+	struct afs_cell *cell = PDE(inode)->data;
+	int ret;
+
+	ret = seq_release(inode,file);
+
+	afs_put_cell(cell);
+
+	return ret;
+} /* end afs_proc_cell_vlservers_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the
+ * first item
+ */
+static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
+{
+	struct afs_cell *cell = m->private;
+	loff_t pos = *_pos;
+
+	_enter("cell=%p pos=%Ld", cell, *_pos);
+
+	/* lock the list against modification */
+	down_read(&cell->vl_sem);
+
+	/* allow for the header line */
+	if (!pos)
+		return (void *) 1;
+	pos--;
+
+	if (pos >= cell->vl_naddrs)
+		return NULL;
+
+	return &cell->vl_addrs[pos];
+} /* end afs_proc_cell_vlservers_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
+					  loff_t *_pos)
+{
+	struct afs_cell *cell = p->private;
+	loff_t pos;
+
+	_enter("cell=%p{nad=%u} pos=%Ld", cell, cell->vl_naddrs, *_pos);
+
+	pos = *_pos;
+	(*_pos)++;
+	if (pos >= cell->vl_naddrs)
+		return NULL;
+
+	return &cell->vl_addrs[pos];
+} /* end afs_proc_cell_vlservers_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
+{
+	struct afs_cell *cell = p->private;
+
+	up_read(&cell->vl_sem);
+
+} /* end afs_proc_cell_vlservers_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
+{
+	struct in_addr *addr = v;
+
+	/* display header on line 1 */
+	if (v == (struct in_addr *) 1) {
+		seq_puts(m, "ADDRESS\n");
+		return 0;
+	}
+
+	/* display one cell per line on subsequent lines */
+	seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr));
+
+	return 0;
+} /* end afs_proc_cell_vlservers_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/servers" which provides a summary of active
+ * servers
+ */
+static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
+{
+	struct afs_cell *cell;
+	struct seq_file *m;
+	int ret;
+
+	cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data);
+	if (!cell)
+		return -ENOENT;
+
+	ret = seq_open(file, &afs_proc_cell_servers_ops);
+	if (ret < 0)
+		return ret;
+
+	m = file->private_data;
+	m->private = cell;
+
+	return 0;
+} /* end afs_proc_cell_servers_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_servers_release(struct inode *inode,
+					 struct file *file)
+{
+	struct afs_cell *cell = PDE(inode)->data;
+	int ret;
+
+	ret = seq_release(inode, file);
+
+	afs_put_cell(cell);
+
+	return ret;
+} /* end afs_proc_cell_servers_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the
+ * first item
+ */
+static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
+{
+	struct list_head *_p;
+	struct afs_cell *cell = m->private;
+	loff_t pos = *_pos;
+
+	_enter("cell=%p pos=%Ld", cell, *_pos);
+
+	/* lock the list against modification */
+	read_lock(&cell->sv_lock);
+
+	/* allow for the header line */
+	if (!pos)
+		return (void *) 1;
+	pos--;
+
+	/* find the n'th element in the list */
+	list_for_each(_p, &cell->sv_list)
+		if (!pos--)
+			break;
+
+	return _p != &cell->sv_list ? _p : NULL;
+} /* end afs_proc_cell_servers_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_servers_next(struct seq_file *p, void *v,
+					loff_t *_pos)
+{
+	struct list_head *_p;
+	struct afs_cell *cell = p->private;
+
+	_enter("cell=%p pos=%Ld", cell, *_pos);
+
+	(*_pos)++;
+
+	_p = v;
+	_p = v == (void *) 1 ? cell->sv_list.next : _p->next;
+
+	return _p != &cell->sv_list ? _p : NULL;
+} /* end afs_proc_cell_servers_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
+{
+	struct afs_cell *cell = p->private;
+
+	read_unlock(&cell->sv_lock);
+
+} /* end afs_proc_cell_servers_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
+{
+	struct afs_server *server = list_entry(v, struct afs_server, link);
+	char ipaddr[20];
+
+	/* display header on line 1 */
+	if (v == (void *) 1) {
+		seq_puts(m, "USE ADDR            STATE\n");
+		return 0;
+	}
+
+	/* display one cell per line on subsequent lines */
+	sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr));
+	seq_printf(m, "%3d %-15.15s %5d\n",
+		   atomic_read(&server->usage),
+		   ipaddr,
+		   server->fs_state
+		   );
+
+	return 0;
+} /* end afs_proc_cell_servers_show() */
diff --git a/fs/afs/server.c b/fs/afs/server.c
new file mode 100644
index 0000000..62b093a
--- /dev/null
+++ b/fs/afs/server.c
@@ -0,0 +1,502 @@
+/* server.c: AFS server record management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include "volume.h"
+#include "cell.h"
+#include "server.h"
+#include "transport.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include "internal.h"
+
+DEFINE_SPINLOCK(afs_server_peer_lock);
+
+#define FS_SERVICE_ID		1	/* AFS Volume Location Service ID */
+#define VL_SERVICE_ID		52	/* AFS Volume Location Service ID */
+
+static void __afs_server_timeout(struct afs_timer *timer)
+{
+	struct afs_server *server =
+		list_entry(timer, struct afs_server, timeout);
+
+	_debug("SERVER TIMEOUT [%p{u=%d}]",
+	       server, atomic_read(&server->usage));
+
+	afs_server_do_timeout(server);
+}
+
+static const struct afs_timer_ops afs_server_timer_ops = {
+	.timed_out	= __afs_server_timeout,
+};
+
+/*****************************************************************************/
+/*
+ * lookup a server record in a cell
+ * - TODO: search the cell's server list
+ */
+int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
+		      struct afs_server **_server)
+{
+	struct afs_server *server, *active, *zombie;
+	int loop;
+
+	_enter("%p,%08x,", cell, ntohl(addr->s_addr));
+
+	/* allocate and initialise a server record */
+	server = kmalloc(sizeof(struct afs_server), GFP_KERNEL);
+	if (!server) {
+		_leave(" = -ENOMEM");
+		return -ENOMEM;
+	}
+
+	memset(server, 0, sizeof(struct afs_server));
+	atomic_set(&server->usage, 1);
+
+	INIT_LIST_HEAD(&server->link);
+	init_rwsem(&server->sem);
+	INIT_LIST_HEAD(&server->fs_callq);
+	spin_lock_init(&server->fs_lock);
+	INIT_LIST_HEAD(&server->cb_promises);
+	spin_lock_init(&server->cb_lock);
+
+	for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++)
+		server->fs_conn_cnt[loop] = 4;
+
+	memcpy(&server->addr, addr, sizeof(struct in_addr));
+	server->addr.s_addr = addr->s_addr;
+
+	afs_timer_init(&server->timeout, &afs_server_timer_ops);
+
+	/* add to the cell */
+	write_lock(&cell->sv_lock);
+
+	/* check the active list */
+	list_for_each_entry(active, &cell->sv_list, link) {
+		if (active->addr.s_addr == addr->s_addr)
+			goto use_active_server;
+	}
+
+	/* check the inactive list */
+	spin_lock(&cell->sv_gylock);
+	list_for_each_entry(zombie, &cell->sv_graveyard, link) {
+		if (zombie->addr.s_addr == addr->s_addr)
+			goto resurrect_server;
+	}
+	spin_unlock(&cell->sv_gylock);
+
+	afs_get_cell(cell);
+	server->cell = cell;
+	list_add_tail(&server->link, &cell->sv_list);
+
+	write_unlock(&cell->sv_lock);
+
+	*_server = server;
+	_leave(" = 0 (%p)", server);
+	return 0;
+
+	/* found a matching active server */
+ use_active_server:
+	_debug("active server");
+	afs_get_server(active);
+	write_unlock(&cell->sv_lock);
+
+	kfree(server);
+
+	*_server = active;
+	_leave(" = 0 (%p)", active);
+	return 0;
+
+	/* found a matching server in the graveyard, so resurrect it and
+	 * dispose of the new record */
+ resurrect_server:
+	_debug("resurrecting server");
+
+	list_del(&zombie->link);
+	list_add_tail(&zombie->link, &cell->sv_list);
+	afs_get_server(zombie);
+	afs_kafstimod_del_timer(&zombie->timeout);
+	spin_unlock(&cell->sv_gylock);
+	write_unlock(&cell->sv_lock);
+
+	kfree(server);
+
+	*_server = zombie;
+	_leave(" = 0 (%p)", zombie);
+	return 0;
+
+} /* end afs_server_lookup() */
+
+/*****************************************************************************/
+/*
+ * destroy a server record
+ * - removes from the cell list
+ */
+void afs_put_server(struct afs_server *server)
+{
+	struct afs_cell *cell;
+
+	if (!server)
+		return;
+
+	_enter("%p", server);
+
+	cell = server->cell;
+
+	/* sanity check */
+	BUG_ON(atomic_read(&server->usage) <= 0);
+
+	/* to prevent a race, the decrement and the dequeue must be effectively
+	 * atomic */
+	write_lock(&cell->sv_lock);
+
+	if (likely(!atomic_dec_and_test(&server->usage))) {
+		write_unlock(&cell->sv_lock);
+		_leave("");
+		return;
+	}
+
+	spin_lock(&cell->sv_gylock);
+	list_del(&server->link);
+	list_add_tail(&server->link, &cell->sv_graveyard);
+
+	/* time out in 10 secs */
+	afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
+
+	spin_unlock(&cell->sv_gylock);
+	write_unlock(&cell->sv_lock);
+
+	_leave(" [killed]");
+} /* end afs_put_server() */
+
+/*****************************************************************************/
+/*
+ * timeout server record
+ * - removes from the cell's graveyard if the usage count is zero
+ */
+void afs_server_do_timeout(struct afs_server *server)
+{
+	struct rxrpc_peer *peer;
+	struct afs_cell *cell;
+	int loop;
+
+	_enter("%p", server);
+
+	cell = server->cell;
+
+	BUG_ON(atomic_read(&server->usage) < 0);
+
+	/* remove from graveyard if still dead */
+	spin_lock(&cell->vl_gylock);
+	if (atomic_read(&server->usage) == 0)
+		list_del_init(&server->link);
+	else
+		server = NULL;
+	spin_unlock(&cell->vl_gylock);
+
+	if (!server) {
+		_leave("");
+		return; /* resurrected */
+	}
+
+	/* we can now destroy it properly */
+	afs_put_cell(cell);
+
+	/* uncross-point the structs under a global lock */
+	spin_lock(&afs_server_peer_lock);
+	peer = server->peer;
+	if (peer) {
+		server->peer = NULL;
+		peer->user = NULL;
+	}
+	spin_unlock(&afs_server_peer_lock);
+
+	/* finish cleaning up the server */
+	for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--)
+		if (server->fs_conn[loop])
+			rxrpc_put_connection(server->fs_conn[loop]);
+
+	if (server->vlserver)
+		rxrpc_put_connection(server->vlserver);
+
+	kfree(server);
+
+	_leave(" [destroyed]");
+} /* end afs_server_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * get a callslot on a connection to the fileserver on the specified server
+ */
+int afs_server_request_callslot(struct afs_server *server,
+				struct afs_server_callslot *callslot)
+{
+	struct afs_server_callslot *pcallslot;
+	struct rxrpc_connection *conn;
+	int nconn, ret;
+
+	_enter("%p,",server);
+
+	INIT_LIST_HEAD(&callslot->link);
+	callslot->task = current;
+	callslot->conn = NULL;
+	callslot->nconn = -1;
+	callslot->ready = 0;
+
+	ret = 0;
+	conn = NULL;
+
+	/* get hold of a callslot first */
+	spin_lock(&server->fs_lock);
+
+	/* resurrect the server if it's death timeout has expired */
+	if (server->fs_state) {
+		if (time_before(jiffies, server->fs_dead_jif)) {
+			ret = server->fs_state;
+			spin_unlock(&server->fs_lock);
+			_leave(" = %d [still dead]", ret);
+			return ret;
+		}
+
+		server->fs_state = 0;
+	}
+
+	/* try and find a connection that has spare callslots */
+	for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) {
+		if (server->fs_conn_cnt[nconn] > 0) {
+			server->fs_conn_cnt[nconn]--;
+			spin_unlock(&server->fs_lock);
+			callslot->nconn = nconn;
+			goto obtained_slot;
+		}
+	}
+
+	/* none were available - wait interruptibly for one to become
+	 * available */
+	set_current_state(TASK_INTERRUPTIBLE);
+	list_add_tail(&callslot->link, &server->fs_callq);
+	spin_unlock(&server->fs_lock);
+
+	while (!callslot->ready && !signal_pending(current)) {
+		schedule();
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+
+	set_current_state(TASK_RUNNING);
+
+	/* even if we were interrupted we may still be queued */
+	if (!callslot->ready) {
+		spin_lock(&server->fs_lock);
+		list_del_init(&callslot->link);
+		spin_unlock(&server->fs_lock);
+	}
+
+	nconn = callslot->nconn;
+
+	/* if interrupted, we must release any slot we also got before
+	 * returning an error */
+	if (signal_pending(current)) {
+		ret = -EINTR;
+		goto error_release;
+	}
+
+	/* if we were woken up with an error, then pass that error back to the
+	 * called */
+	if (nconn < 0) {
+		_leave(" = %d", callslot->errno);
+		return callslot->errno;
+	}
+
+	/* were we given a connection directly? */
+	if (callslot->conn) {
+		/* yes - use it */
+		_leave(" = 0 (nc=%d)", nconn);
+		return 0;
+	}
+
+	/* got a callslot, but no connection */
+ obtained_slot:
+
+	/* need to get hold of the RxRPC connection */
+	down_write(&server->sem);
+
+	/* quick check to see if there's an outstanding error */
+	ret = server->fs_state;
+	if (ret)
+		goto error_release_upw;
+
+	if (server->fs_conn[nconn]) {
+		/* reuse an existing connection */
+		rxrpc_get_connection(server->fs_conn[nconn]);
+		callslot->conn = server->fs_conn[nconn];
+	}
+	else {
+		/* create a new connection */
+		ret = rxrpc_create_connection(afs_transport,
+					      htons(7000),
+					      server->addr.s_addr,
+					      FS_SERVICE_ID,
+					      NULL,
+					      &server->fs_conn[nconn]);
+
+		if (ret < 0)
+			goto error_release_upw;
+
+		callslot->conn = server->fs_conn[0];
+		rxrpc_get_connection(callslot->conn);
+	}
+
+	up_write(&server->sem);
+
+ 	_leave(" = 0");
+	return 0;
+
+	/* handle an error occurring */
+ error_release_upw:
+	up_write(&server->sem);
+
+ error_release:
+	/* either release the callslot or pass it along to another deserving
+	 * task */
+	spin_lock(&server->fs_lock);
+
+	if (nconn < 0) {
+		/* no callslot allocated */
+	}
+	else if (list_empty(&server->fs_callq)) {
+		/* no one waiting */
+		server->fs_conn_cnt[nconn]++;
+		spin_unlock(&server->fs_lock);
+	}
+	else {
+		/* someone's waiting - dequeue them and wake them up */
+		pcallslot = list_entry(server->fs_callq.next,
+				       struct afs_server_callslot, link);
+		list_del_init(&pcallslot->link);
+
+		pcallslot->errno = server->fs_state;
+		if (!pcallslot->errno) {
+			/* pass them out callslot details */
+			callslot->conn = xchg(&pcallslot->conn,
+					      callslot->conn);
+			pcallslot->nconn = nconn;
+			callslot->nconn = nconn = -1;
+		}
+		pcallslot->ready = 1;
+		wake_up_process(pcallslot->task);
+		spin_unlock(&server->fs_lock);
+	}
+
+	rxrpc_put_connection(callslot->conn);
+	callslot->conn = NULL;
+
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_server_request_callslot() */
+
+/*****************************************************************************/
+/*
+ * release a callslot back to the server
+ * - transfers the RxRPC connection to the next pending callslot if possible
+ */
+void afs_server_release_callslot(struct afs_server *server,
+				 struct afs_server_callslot *callslot)
+{
+	struct afs_server_callslot *pcallslot;
+
+	_enter("{ad=%08x,cnt=%u},{%d}",
+	       ntohl(server->addr.s_addr),
+	       server->fs_conn_cnt[callslot->nconn],
+	       callslot->nconn);
+
+	BUG_ON(callslot->nconn < 0);
+
+	spin_lock(&server->fs_lock);
+
+	if (list_empty(&server->fs_callq)) {
+		/* no one waiting */
+		server->fs_conn_cnt[callslot->nconn]++;
+		spin_unlock(&server->fs_lock);
+	}
+	else {
+		/* someone's waiting - dequeue them and wake them up */
+		pcallslot = list_entry(server->fs_callq.next,
+				       struct afs_server_callslot, link);
+		list_del_init(&pcallslot->link);
+
+		pcallslot->errno = server->fs_state;
+		if (!pcallslot->errno) {
+			/* pass them out callslot details */
+			callslot->conn = xchg(&pcallslot->conn, callslot->conn);
+			pcallslot->nconn = callslot->nconn;
+			callslot->nconn = -1;
+		}
+
+		pcallslot->ready = 1;
+		wake_up_process(pcallslot->task);
+		spin_unlock(&server->fs_lock);
+	}
+
+	rxrpc_put_connection(callslot->conn);
+
+	_leave("");
+} /* end afs_server_release_callslot() */
+
+/*****************************************************************************/
+/*
+ * get a handle to a connection to the vlserver (volume location) on the
+ * specified server
+ */
+int afs_server_get_vlconn(struct afs_server *server,
+			  struct rxrpc_connection **_conn)
+{
+	struct rxrpc_connection *conn;
+	int ret;
+
+	_enter("%p,", server);
+
+	ret = 0;
+	conn = NULL;
+	down_read(&server->sem);
+
+	if (server->vlserver) {
+		/* reuse an existing connection */
+		rxrpc_get_connection(server->vlserver);
+		conn = server->vlserver;
+		up_read(&server->sem);
+	}
+	else {
+		/* create a new connection */
+		up_read(&server->sem);
+		down_write(&server->sem);
+		if (!server->vlserver) {
+			ret = rxrpc_create_connection(afs_transport,
+						      htons(7003),
+						      server->addr.s_addr,
+						      VL_SERVICE_ID,
+						      NULL,
+						      &server->vlserver);
+		}
+		if (ret == 0) {
+			rxrpc_get_connection(server->vlserver);
+			conn = server->vlserver;
+		}
+		up_write(&server->sem);
+	}
+
+	*_conn = conn;
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_server_get_vlconn() */
diff --git a/fs/afs/server.h b/fs/afs/server.h
new file mode 100644
index 0000000..c3d2411
--- /dev/null
+++ b/fs/afs/server.h
@@ -0,0 +1,102 @@
+/* server.h: AFS server record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_SERVER_H
+#define _LINUX_AFS_SERVER_H
+
+#include "types.h"
+#include "kafstimod.h"
+#include <rxrpc/peer.h>
+#include <linux/rwsem.h>
+
+extern spinlock_t afs_server_peer_lock;
+
+/*****************************************************************************/
+/*
+ * AFS server record
+ */
+struct afs_server
+{
+	atomic_t		usage;
+	struct afs_cell		*cell;		/* cell in which server resides */
+	struct list_head	link;		/* link in cell's server list */
+	struct rw_semaphore	sem;		/* access lock */
+	struct afs_timer	timeout;	/* graveyard timeout */
+	struct in_addr		addr;		/* server address */
+	struct rxrpc_peer	*peer;		/* peer record for this server */
+	struct rxrpc_connection	*vlserver;	/* connection to the volume location service */
+
+	/* file service access */
+#define AFS_SERVER_CONN_LIST_SIZE 2
+	struct rxrpc_connection	*fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
+	unsigned		fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE];	/* per conn call count */
+	struct list_head	fs_callq;	/* queue of processes waiting to make a call */
+	spinlock_t		fs_lock;	/* access lock */
+	int			fs_state;      	/* 0 or reason FS currently marked dead (-errno) */
+	unsigned		fs_rtt;		/* FS round trip time */
+	unsigned long		fs_act_jif;	/* time at which last activity occurred */
+	unsigned long		fs_dead_jif;	/* time at which no longer to be considered dead */
+
+	/* callback promise management */
+	struct list_head	cb_promises;	/* as yet unbroken promises from this server */
+	spinlock_t		cb_lock;	/* access lock */
+};
+
+extern int afs_server_lookup(struct afs_cell *cell,
+			     const struct in_addr *addr,
+			     struct afs_server **_server);
+
+#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
+
+extern void afs_put_server(struct afs_server *server);
+extern void afs_server_do_timeout(struct afs_server *server);
+
+extern int afs_server_find_by_peer(const struct rxrpc_peer *peer,
+				   struct afs_server **_server);
+
+extern int afs_server_get_vlconn(struct afs_server *server,
+				 struct rxrpc_connection **_conn);
+
+static inline
+struct afs_server *afs_server_get_from_peer(struct rxrpc_peer *peer)
+{
+	struct afs_server *server;
+
+	spin_lock(&afs_server_peer_lock);
+	server = peer->user;
+	if (server)
+		afs_get_server(server);
+	spin_unlock(&afs_server_peer_lock);
+
+	return server;
+}
+
+/*****************************************************************************/
+/*
+ * AFS server callslot grant record
+ */
+struct afs_server_callslot
+{
+	struct list_head	link;		/* link in server's list */
+	struct task_struct	*task;		/* process waiting to make call */
+	struct rxrpc_connection	*conn;		/* connection to use (or NULL on error) */
+	short			nconn;		/* connection slot number (-1 on error) */
+	char			ready;		/* T when ready */
+	int			errno;		/* error number if nconn==-1 */
+};
+
+extern int afs_server_request_callslot(struct afs_server *server,
+				       struct afs_server_callslot *callslot);
+
+extern void afs_server_release_callslot(struct afs_server *server,
+					struct afs_server_callslot *callslot);
+
+#endif /* _LINUX_AFS_SERVER_H */
diff --git a/fs/afs/super.c b/fs/afs/super.c
new file mode 100644
index 0000000..d6fa8e5
--- /dev/null
+++ b/fs/afs/super.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Howells <dhowells@redhat.com>
+ *          David Woodhouse <dwmw2@cambridge.redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "vnode.h"
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "super.h"
+#include "internal.h"
+
+#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
+
+struct afs_mount_params {
+	int			rwpath;
+	struct afs_cell		*default_cell;
+	struct afs_volume	*volume;
+};
+
+static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
+			    unsigned long flags);
+
+static struct super_block *afs_get_sb(struct file_system_type *fs_type,
+				      int flags, const char *dev_name,
+				      void *data);
+
+static struct inode *afs_alloc_inode(struct super_block *sb);
+
+static void afs_put_super(struct super_block *sb);
+
+static void afs_destroy_inode(struct inode *inode);
+
+static struct file_system_type afs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "afs",
+	.get_sb		= afs_get_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags	= FS_BINARY_MOUNTDATA,
+};
+
+static struct super_operations afs_super_ops = {
+	.statfs		= simple_statfs,
+	.alloc_inode	= afs_alloc_inode,
+	.drop_inode	= generic_delete_inode,
+	.destroy_inode	= afs_destroy_inode,
+	.clear_inode	= afs_clear_inode,
+	.put_super	= afs_put_super,
+};
+
+static kmem_cache_t *afs_inode_cachep;
+static atomic_t afs_count_active_inodes;
+
+/*****************************************************************************/
+/*
+ * initialise the filesystem
+ */
+int __init afs_fs_init(void)
+{
+	int ret;
+
+	_enter("");
+
+	afs_timer_init(&afs_mntpt_expiry_timer, &afs_mntpt_expiry_timer_ops);
+
+	/* create ourselves an inode cache */
+	atomic_set(&afs_count_active_inodes, 0);
+
+	ret = -ENOMEM;
+	afs_inode_cachep = kmem_cache_create("afs_inode_cache",
+					     sizeof(struct afs_vnode),
+					     0,
+					     SLAB_HWCACHE_ALIGN,
+					     afs_i_init_once,
+					     NULL);
+	if (!afs_inode_cachep) {
+		printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
+		return ret;
+	}
+
+	/* now export our filesystem to lesser mortals */
+	ret = register_filesystem(&afs_fs_type);
+	if (ret < 0) {
+		kmem_cache_destroy(afs_inode_cachep);
+		kleave(" = %d", ret);
+		return ret;
+	}
+
+	kleave(" = 0");
+	return 0;
+} /* end afs_fs_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the filesystem
+ */
+void __exit afs_fs_exit(void)
+{
+	unregister_filesystem(&afs_fs_type);
+
+	if (atomic_read(&afs_count_active_inodes) != 0) {
+		printk("kAFS: %d active inode objects still present\n",
+		       atomic_read(&afs_count_active_inodes));
+		BUG();
+	}
+
+	kmem_cache_destroy(afs_inode_cachep);
+
+} /* end afs_fs_exit() */
+
+/*****************************************************************************/
+/*
+ * check that an argument has a value
+ */
+static int want_arg(char **_value, const char *option)
+{
+	if (!_value || !*_value || !**_value) {
+		printk(KERN_NOTICE "kAFS: %s: argument missing\n", option);
+		return 0;
+	}
+	return 1;
+} /* end want_arg() */
+
+/*****************************************************************************/
+/*
+ * check that there's no subsequent value
+ */
+static int want_no_value(char *const *_value, const char *option)
+{
+	if (*_value && **_value) {
+		printk(KERN_NOTICE "kAFS: %s: Invalid argument: %s\n",
+		       option, *_value);
+		return 0;
+	}
+	return 1;
+} /* end want_no_value() */
+
+/*****************************************************************************/
+/*
+ * parse the mount options
+ * - this function has been shamelessly adapted from the ext3 fs which
+ *   shamelessly adapted it from the msdos fs
+ */
+static int afs_super_parse_options(struct afs_mount_params *params,
+				   char *options,
+				   const char **devname)
+{
+	char *key, *value;
+	int ret;
+
+	_enter("%s", options);
+
+	options[PAGE_SIZE - 1] = 0;
+
+	ret = 0;
+	while ((key = strsep(&options, ",")) != 0)
+	{
+		value = strchr(key, '=');
+		if (value)
+			*value++ = 0;
+
+		printk("kAFS: KEY: %s, VAL:%s\n", key, value ?: "-");
+
+		if (strcmp(key, "rwpath") == 0) {
+			if (!want_no_value(&value, "rwpath"))
+				return -EINVAL;
+			params->rwpath = 1;
+			continue;
+		}
+		else if (strcmp(key, "vol") == 0) {
+			if (!want_arg(&value, "vol"))
+				return -EINVAL;
+			*devname = value;
+			continue;
+		}
+		else if (strcmp(key, "cell") == 0) {
+			if (!want_arg(&value, "cell"))
+				return -EINVAL;
+			afs_put_cell(params->default_cell);
+			ret = afs_cell_lookup(value,
+					      strlen(value),
+					      &params->default_cell);
+			if (ret < 0)
+				return -EINVAL;
+			continue;
+		}
+
+		printk("kAFS: Unknown mount option: '%s'\n",  key);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = 0;
+
+ error:
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_super_parse_options() */
+
+/*****************************************************************************/
+/*
+ * check a superblock to see if it's the one we're looking for
+ */
+static int afs_test_super(struct super_block *sb, void *data)
+{
+	struct afs_mount_params *params = data;
+	struct afs_super_info *as = sb->s_fs_info;
+
+	return as->volume == params->volume;
+} /* end afs_test_super() */
+
+/*****************************************************************************/
+/*
+ * fill in the superblock
+ */
+static int afs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct afs_mount_params *params = data;
+	struct afs_super_info *as = NULL;
+	struct afs_fid fid;
+	struct dentry *root = NULL;
+	struct inode *inode = NULL;
+	int ret;
+
+	kenter("");
+
+	/* allocate a superblock info record */
+	as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+	if (!as) {
+		_leave(" = -ENOMEM");
+		return -ENOMEM;
+	}
+
+	memset(as, 0, sizeof(struct afs_super_info));
+
+	afs_get_volume(params->volume);
+	as->volume = params->volume;
+
+	/* fill in the superblock */
+	sb->s_blocksize		= PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits	= PAGE_CACHE_SHIFT;
+	sb->s_magic		= AFS_FS_MAGIC;
+	sb->s_op		= &afs_super_ops;
+	sb->s_fs_info		= as;
+
+	/* allocate the root inode and dentry */
+	fid.vid		= as->volume->vid;
+	fid.vnode	= 1;
+	fid.unique	= 1;
+	ret = afs_iget(sb, &fid, &inode);
+	if (ret < 0)
+		goto error;
+
+	ret = -ENOMEM;
+	root = d_alloc_root(inode);
+	if (!root)
+		goto error;
+
+	sb->s_root = root;
+
+	kleave(" = 0");
+	return 0;
+
+ error:
+	iput(inode);
+	afs_put_volume(as->volume);
+	kfree(as);
+
+	sb->s_fs_info = NULL;
+
+	kleave(" = %d", ret);
+	return ret;
+} /* end afs_fill_super() */
+
+/*****************************************************************************/
+/*
+ * get an AFS superblock
+ * - TODO: don't use get_sb_nodev(), but rather call sget() directly
+ */
+static struct super_block *afs_get_sb(struct file_system_type *fs_type,
+				      int flags,
+				      const char *dev_name,
+				      void *options)
+{
+	struct afs_mount_params params;
+	struct super_block *sb;
+	int ret;
+
+	_enter(",,%s,%p", dev_name, options);
+
+	memset(&params, 0, sizeof(params));
+
+	/* start the cache manager */
+	ret = afscm_start();
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ERR_PTR(ret);
+	}
+
+	/* parse the options */
+	if (options) {
+		ret = afs_super_parse_options(&params, options, &dev_name);
+		if (ret < 0)
+			goto error;
+		if (!dev_name) {
+			printk("kAFS: no volume name specified\n");
+			ret = -EINVAL;
+			goto error;
+		}
+	}
+
+	/* parse the device name */
+	ret = afs_volume_lookup(dev_name,
+				params.default_cell,
+				params.rwpath,
+				&params.volume);
+	if (ret < 0)
+		goto error;
+
+	/* allocate a deviceless superblock */
+	sb = sget(fs_type, afs_test_super, set_anon_super, &params);
+	if (IS_ERR(sb))
+		goto error;
+
+	sb->s_flags = flags;
+
+	ret = afs_fill_super(sb, &params, flags & MS_VERBOSE ? 1 : 0);
+	if (ret < 0) {
+		up_write(&sb->s_umount);
+		deactivate_super(sb);
+		goto error;
+	}
+	sb->s_flags |= MS_ACTIVE;
+
+	afs_put_volume(params.volume);
+	afs_put_cell(params.default_cell);
+	_leave(" = %p", sb);
+	return sb;
+
+ error:
+	afs_put_volume(params.volume);
+	afs_put_cell(params.default_cell);
+	afscm_stop();
+	_leave(" = %d", ret);
+	return ERR_PTR(ret);
+} /* end afs_get_sb() */
+
+/*****************************************************************************/
+/*
+ * finish the unmounting process on the superblock
+ */
+static void afs_put_super(struct super_block *sb)
+{
+	struct afs_super_info *as = sb->s_fs_info;
+
+	_enter("");
+
+	afs_put_volume(as->volume);
+	afscm_stop();
+
+	_leave("");
+} /* end afs_put_super() */
+
+/*****************************************************************************/
+/*
+ * initialise an inode cache slab element prior to any use
+ */
+static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep,
+			    unsigned long flags)
+{
+	struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		memset(vnode, 0, sizeof(*vnode));
+		inode_init_once(&vnode->vfs_inode);
+		init_waitqueue_head(&vnode->update_waitq);
+		spin_lock_init(&vnode->lock);
+		INIT_LIST_HEAD(&vnode->cb_link);
+		INIT_LIST_HEAD(&vnode->cb_hash_link);
+		afs_timer_init(&vnode->cb_timeout,
+			       &afs_vnode_cb_timed_out_ops);
+	}
+
+} /* end afs_i_init_once() */
+
+/*****************************************************************************/
+/*
+ * allocate an AFS inode struct from our slab cache
+ */
+static struct inode *afs_alloc_inode(struct super_block *sb)
+{
+	struct afs_vnode *vnode;
+
+	vnode = (struct afs_vnode *)
+		kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL);
+	if (!vnode)
+		return NULL;
+
+	atomic_inc(&afs_count_active_inodes);
+
+	memset(&vnode->fid, 0, sizeof(vnode->fid));
+	memset(&vnode->status, 0, sizeof(vnode->status));
+
+	vnode->volume		= NULL;
+	vnode->update_cnt	= 0;
+	vnode->flags		= 0;
+
+	return &vnode->vfs_inode;
+} /* end afs_alloc_inode() */
+
+/*****************************************************************************/
+/*
+ * destroy an AFS inode struct
+ */
+static void afs_destroy_inode(struct inode *inode)
+{
+	_enter("{%lu}", inode->i_ino);
+
+	kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
+
+	atomic_dec(&afs_count_active_inodes);
+
+} /* end afs_destroy_inode() */
diff --git a/fs/afs/super.h b/fs/afs/super.h
new file mode 100644
index 0000000..ac11362
--- /dev/null
+++ b/fs/afs/super.h
@@ -0,0 +1,43 @@
+/* super.h: AFS filesystem internal private data
+ *
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ *          David Howells <dhowells@redhat.com>
+ *
+ */
+
+#ifndef _LINUX_AFS_SUPER_H
+#define _LINUX_AFS_SUPER_H
+
+#include <linux/fs.h>
+#include "server.h"
+
+#ifdef __KERNEL__
+
+/*****************************************************************************/
+/*
+ * AFS superblock private data
+ * - there's one superblock per volume
+ */
+struct afs_super_info
+{
+	struct afs_volume	*volume;	/* volume record */
+	char			rwparent;	/* T if parent is R/W AFS volume */
+};
+
+static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_SUPER_H */
diff --git a/fs/afs/transport.h b/fs/afs/transport.h
new file mode 100644
index 0000000..7013ae6
--- /dev/null
+++ b/fs/afs/transport.h
@@ -0,0 +1,21 @@
+/* transport.h: AFS transport management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_TRANSPORT_H
+#define _LINUX_AFS_TRANSPORT_H
+
+#include "types.h"
+#include <rxrpc/transport.h>
+
+/* the cache manager transport endpoint */
+extern struct rxrpc_transport *afs_transport;
+
+#endif /* _LINUX_AFS_TRANSPORT_H */
diff --git a/fs/afs/types.h b/fs/afs/types.h
new file mode 100644
index 0000000..b1a2367
--- /dev/null
+++ b/fs/afs/types.h
@@ -0,0 +1,125 @@
+/* types.h: AFS types
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_TYPES_H
+#define _LINUX_AFS_TYPES_H
+
+#ifdef __KERNEL__
+#include <rxrpc/types.h>
+#endif /* __KERNEL__ */
+
+typedef unsigned			afs_volid_t;
+typedef unsigned			afs_vnodeid_t;
+typedef unsigned long long		afs_dataversion_t;
+
+typedef enum {
+	AFSVL_RWVOL,			/* read/write volume */
+	AFSVL_ROVOL,			/* read-only volume */
+	AFSVL_BACKVOL,			/* backup volume */
+} __attribute__((packed)) afs_voltype_t;
+
+typedef enum {
+	AFS_FTYPE_INVALID	= 0,
+	AFS_FTYPE_FILE		= 1,
+	AFS_FTYPE_DIR		= 2,
+	AFS_FTYPE_SYMLINK	= 3,
+} afs_file_type_t;
+
+#ifdef __KERNEL__
+
+struct afs_cell;
+struct afs_vnode;
+
+/*****************************************************************************/
+/*
+ * AFS file identifier
+ */
+struct afs_fid
+{
+	afs_volid_t	vid;		/* volume ID */
+	afs_vnodeid_t	vnode;		/* file index within volume */
+	unsigned	unique;		/* unique ID number (file index version) */
+};
+
+/*****************************************************************************/
+/*
+ * AFS callback notification
+ */
+typedef enum {
+	AFSCM_CB_UNTYPED	= 0,	/* no type set on CB break */
+	AFSCM_CB_EXCLUSIVE	= 1,	/* CB exclusive to CM [not implemented] */
+	AFSCM_CB_SHARED		= 2,	/* CB shared by other CM's */
+	AFSCM_CB_DROPPED	= 3,	/* CB promise cancelled by file server */
+} afs_callback_type_t;
+
+struct afs_callback
+{
+	struct afs_server	*server;	/* server that made the promise */
+	struct afs_fid		fid;		/* file identifier */
+	unsigned		version;	/* callback version */
+	unsigned		expiry;		/* time at which expires */
+	afs_callback_type_t	type;		/* type of callback */
+};
+
+#define AFSCBMAX 50
+
+/*****************************************************************************/
+/*
+ * AFS volume information
+ */
+struct afs_volume_info
+{
+	afs_volid_t		vid;		/* volume ID */
+	afs_voltype_t		type;		/* type of this volume */
+	afs_volid_t		type_vids[5];	/* volume ID's for possible types for this vol */
+	
+	/* list of fileservers serving this volume */
+	size_t			nservers;	/* number of entries used in servers[] */
+	struct {
+		struct in_addr	addr;		/* fileserver address */
+	} servers[8];
+};
+
+/*****************************************************************************/
+/*
+ * AFS file status information
+ */
+struct afs_file_status
+{
+	unsigned		if_version;	/* interface version */
+#define AFS_FSTATUS_VERSION	1
+
+	afs_file_type_t		type;		/* file type */
+	unsigned		nlink;		/* link count */
+	size_t			size;		/* file size */
+	afs_dataversion_t	version;	/* current data version */
+	unsigned		author;		/* author ID */
+	unsigned		owner;		/* owner ID */
+	unsigned		caller_access;	/* access rights for authenticated caller */
+	unsigned		anon_access;	/* access rights for unauthenticated caller */
+	umode_t			mode;		/* UNIX mode */
+	struct afs_fid		parent;		/* parent file ID */
+	time_t			mtime_client;	/* last time client changed data */
+	time_t			mtime_server;	/* last time server changed data */
+};
+
+/*****************************************************************************/
+/*
+ * AFS volume synchronisation information
+ */
+struct afs_volsync
+{
+	time_t			creation;	/* volume creation time */
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_TYPES_H */
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
new file mode 100644
index 0000000..7b0e319
--- /dev/null
+++ b/fs/afs/vlclient.c
@@ -0,0 +1,695 @@
+/* vlclient.c: AFS Volume Location Service client
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "server.h"
+#include "volume.h"
+#include "vlclient.h"
+#include "kafsasyncd.h"
+#include "kafstimod.h"
+#include "errors.h"
+#include "internal.h"
+
+#define VLGETENTRYBYID		503	/* AFS Get Cache Entry By ID operation ID */
+#define VLGETENTRYBYNAME	504	/* AFS Get Cache Entry By Name operation ID */
+#define VLPROBE			514	/* AFS Probe Volume Location Service operation ID */
+
+static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
+static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
+
+/*****************************************************************************/
+/*
+ * map afs VL abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afs_rxvl_aemap(struct rxrpc_call *call)
+{
+	int err;
+
+	_enter("{%u,%u,%d}",
+	       call->app_err_state, call->app_abort_code, call->app_errno);
+
+	switch (call->app_err_state) {
+	case RXRPC_ESTATE_LOCAL_ABORT:
+		call->app_abort_code = -call->app_errno;
+		return;
+
+	case RXRPC_ESTATE_PEER_ABORT:
+		switch (call->app_abort_code) {
+		case AFSVL_IDEXIST:		err = -EEXIST;		break;
+		case AFSVL_IO:			err = -EREMOTEIO;	break;
+		case AFSVL_NAMEEXIST:		err = -EEXIST;		break;
+		case AFSVL_CREATEFAIL:		err = -EREMOTEIO;	break;
+		case AFSVL_NOENT:		err = -ENOMEDIUM;	break;
+		case AFSVL_EMPTY:		err = -ENOMEDIUM;	break;
+		case AFSVL_ENTDELETED:		err = -ENOMEDIUM;	break;
+		case AFSVL_BADNAME:		err = -EINVAL;		break;
+		case AFSVL_BADINDEX:		err = -EINVAL;		break;
+		case AFSVL_BADVOLTYPE:		err = -EINVAL;		break;
+		case AFSVL_BADSERVER:		err = -EINVAL;		break;
+		case AFSVL_BADPARTITION:	err = -EINVAL;		break;
+		case AFSVL_REPSFULL:		err = -EFBIG;		break;
+		case AFSVL_NOREPSERVER:		err = -ENOENT;		break;
+		case AFSVL_DUPREPSERVER:	err = -EEXIST;		break;
+		case AFSVL_RWNOTFOUND:		err = -ENOENT;		break;
+		case AFSVL_BADREFCOUNT:		err = -EINVAL;		break;
+		case AFSVL_SIZEEXCEEDED:	err = -EINVAL;		break;
+		case AFSVL_BADENTRY:		err = -EINVAL;		break;
+		case AFSVL_BADVOLIDBUMP:	err = -EINVAL;		break;
+		case AFSVL_IDALREADYHASHED:	err = -EINVAL;		break;
+		case AFSVL_ENTRYLOCKED:		err = -EBUSY;		break;
+		case AFSVL_BADVOLOPER:		err = -EBADRQC;		break;
+		case AFSVL_BADRELLOCKTYPE:	err = -EINVAL;		break;
+		case AFSVL_RERELEASE:		err = -EREMOTEIO;	break;
+		case AFSVL_BADSERVERFLAG:	err = -EINVAL;		break;
+		case AFSVL_PERM:		err = -EACCES;		break;
+		case AFSVL_NOMEM:		err = -EREMOTEIO;	break;
+		default:
+			err = afs_abort_to_error(call->app_abort_code);
+			break;
+		}
+		call->app_errno = err;
+		return;
+
+	default:
+		return;
+	}
+} /* end afs_rxvl_aemap() */
+
+#if 0
+/*****************************************************************************/
+/*
+ * probe a volume location server to see if it is still alive -- unused
+ */
+static int afs_rxvl_probe(struct afs_server *server, int alloc_flags)
+{
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	size_t sent;
+	int ret;
+	__be32 param[1];
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	/* get hold of the vlserver connection */
+	ret = afs_server_get_vlconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = VLPROBE;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	param[0] = htonl(VLPROBE);
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET,
+				    alloc_flags, 0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
+		    signal_pending(current))
+			break;
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+
+	ret = -EINTR;
+	if (signal_pending(current))
+		goto abort;
+
+	switch (call->app_call_state) {
+	case RXRPC_CSTATE_ERROR:
+		ret = call->app_errno;
+		goto out_unwait;
+
+	case RXRPC_CSTATE_CLNT_GOT_REPLY:
+		ret = 0;
+		goto out_unwait;
+
+	default:
+		BUG();
+	}
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	rxrpc_put_connection(conn);
+ out:
+	return ret;
+
+} /* end afs_rxvl_probe() */
+#endif
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by name
+ */
+int afs_rxvl_get_entry_by_name(struct afs_server *server,
+			       const char *volname,
+			       unsigned volnamesz,
+			       struct afs_cache_vlocation *entry)
+{
+	DECLARE_WAITQUEUE(myself, current);
+
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[3];
+	unsigned tmp;
+	size_t sent;
+	int ret, loop;
+	__be32 *bp, param[2], zero;
+
+	_enter(",%*.*s,%u,", volnamesz, volnamesz, volname, volnamesz);
+
+	memset(entry, 0, sizeof(*entry));
+
+	/* get hold of the vlserver connection */
+	ret = afs_server_get_vlconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = VLGETENTRYBYNAME;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	piov[1].iov_len = volnamesz;
+	piov[1].iov_base = (char *) volname;
+
+	zero = 0;
+	piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+	piov[2].iov_base = &zero;
+
+	param[0] = htonl(VLGETENTRYBYNAME);
+	param[1] = htonl(piov[1].iov_len);
+
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	bp = rxrpc_call_alloc_scratch(call, 384);
+
+	ret = rxrpc_call_read_data(call, bp, 384,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0) {
+		if (ret == -ECONNABORTED) {
+			ret = call->app_errno;
+			goto out_unwait;
+		}
+		goto abort;
+	}
+
+	/* unmarshall the reply */
+	for (loop = 0; loop < 64; loop++)
+		entry->name[loop] = ntohl(*bp++);
+	bp++; /* final NUL */
+
+	bp++; /* type */
+	entry->nservers = ntohl(*bp++);
+
+	for (loop = 0; loop < 8; loop++)
+		entry->servers[loop].s_addr = *bp++;
+
+	bp += 8; /* partition IDs */
+
+	for (loop = 0; loop < 8; loop++) {
+		tmp = ntohl(*bp++);
+		if (tmp & AFS_VLSF_RWVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
+		if (tmp & AFS_VLSF_ROVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
+		if (tmp & AFS_VLSF_BACKVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
+	}
+
+	entry->vid[0] = ntohl(*bp++);
+	entry->vid[1] = ntohl(*bp++);
+	entry->vid[2] = ntohl(*bp++);
+
+	bp++; /* clone ID */
+
+	tmp = ntohl(*bp++); /* flags */
+	if (tmp & AFS_VLF_RWEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_RW;
+	if (tmp & AFS_VLF_ROEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_RO;
+	if (tmp & AFS_VLF_BACKEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_BAK;
+
+	ret = -ENOMEDIUM;
+	if (!entry->vidmask)
+		goto abort;
+
+	/* success */
+	entry->rtime = get_seconds();
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	rxrpc_put_connection(conn);
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+} /* end afs_rxvl_get_entry_by_name() */
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by ID
+ */
+int afs_rxvl_get_entry_by_id(struct afs_server *server,
+			     afs_volid_t volid,
+			     afs_voltype_t voltype,
+			     struct afs_cache_vlocation *entry)
+{
+	DECLARE_WAITQUEUE(myself, current);
+
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	unsigned tmp;
+	size_t sent;
+	int ret, loop;
+	__be32 *bp, param[3];
+
+	_enter(",%x,%d,", volid, voltype);
+
+	memset(entry, 0, sizeof(*entry));
+
+	/* get hold of the vlserver connection */
+	ret = afs_server_get_vlconn(server, &conn);
+	if (ret < 0)
+		goto out;
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		goto out_put_conn;
+	}
+	call->app_opcode = VLGETENTRYBYID;
+
+	/* we want to get event notifications from the call */
+	add_wait_queue(&call->waitq, &myself);
+
+	/* marshall the parameters */
+	param[0] = htonl(VLGETENTRYBYID);
+	param[1] = htonl(volid);
+	param[2] = htonl(voltype);
+
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0)
+		goto abort;
+
+	/* wait for the reply to completely arrive */
+	bp = rxrpc_call_alloc_scratch(call, 384);
+
+	ret = rxrpc_call_read_data(call, bp, 384,
+				   RXRPC_CALL_READ_BLOCK |
+				   RXRPC_CALL_READ_ALL);
+	if (ret < 0) {
+		if (ret == -ECONNABORTED) {
+			ret = call->app_errno;
+			goto out_unwait;
+		}
+		goto abort;
+	}
+
+	/* unmarshall the reply */
+	for (loop = 0; loop < 64; loop++)
+		entry->name[loop] = ntohl(*bp++);
+	bp++; /* final NUL */
+
+	bp++; /* type */
+	entry->nservers = ntohl(*bp++);
+
+	for (loop = 0; loop < 8; loop++)
+		entry->servers[loop].s_addr = *bp++;
+
+	bp += 8; /* partition IDs */
+
+	for (loop = 0; loop < 8; loop++) {
+		tmp = ntohl(*bp++);
+		if (tmp & AFS_VLSF_RWVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
+		if (tmp & AFS_VLSF_ROVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
+		if (tmp & AFS_VLSF_BACKVOL)
+			entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
+	}
+
+	entry->vid[0] = ntohl(*bp++);
+	entry->vid[1] = ntohl(*bp++);
+	entry->vid[2] = ntohl(*bp++);
+
+	bp++; /* clone ID */
+
+	tmp = ntohl(*bp++); /* flags */
+	if (tmp & AFS_VLF_RWEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_RW;
+	if (tmp & AFS_VLF_ROEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_RO;
+	if (tmp & AFS_VLF_BACKEXISTS)
+		entry->vidmask |= AFS_VOL_VTM_BAK;
+
+	ret = -ENOMEDIUM;
+	if (!entry->vidmask)
+		goto abort;
+
+#if 0 /* TODO: remove */
+	entry->nservers = 3;
+	entry->servers[0].s_addr = htonl(0xac101249);
+	entry->servers[1].s_addr = htonl(0xac101243);
+	entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
+
+	entry->srvtmask[0] = AFS_VOL_VTM_RO;
+	entry->srvtmask[1] = AFS_VOL_VTM_RO;
+	entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
+#endif
+
+	/* success */
+	entry->rtime = get_seconds();
+	ret = 0;
+
+ out_unwait:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&call->waitq, &myself);
+	rxrpc_put_call(call);
+ out_put_conn:
+	rxrpc_put_connection(conn);
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+ abort:
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	rxrpc_call_abort(call, ret);
+	schedule();
+	goto out_unwait;
+} /* end afs_rxvl_get_entry_by_id() */
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by ID asynchronously
+ */
+int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op,
+				   afs_volid_t volid,
+				   afs_voltype_t voltype)
+{
+	struct rxrpc_connection *conn;
+	struct rxrpc_call *call;
+	struct kvec piov[1];
+	size_t sent;
+	int ret;
+	__be32 param[3];
+
+	_enter(",%x,%d,", volid, voltype);
+
+	/* get hold of the vlserver connection */
+	ret = afs_server_get_vlconn(op->server, &conn);
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	/* create a call through that connection */
+	ret = rxrpc_create_call(conn,
+				afs_rxvl_get_entry_by_id_attn,
+				afs_rxvl_get_entry_by_id_error,
+				afs_rxvl_aemap,
+				&op->call);
+	rxrpc_put_connection(conn);
+
+	if (ret < 0) {
+		printk("kAFS: Unable to create call: %d\n", ret);
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	op->call->app_opcode = VLGETENTRYBYID;
+	op->call->app_user = op;
+
+	call = op->call;
+	rxrpc_get_call(call);
+
+	/* send event notifications from the call to kafsasyncd */
+	afs_kafsasyncd_begin_op(op);
+
+	/* marshall the parameters */
+	param[0] = htonl(VLGETENTRYBYID);
+	param[1] = htonl(volid);
+	param[2] = htonl(voltype);
+
+	piov[0].iov_len = sizeof(param);
+	piov[0].iov_base = param;
+
+	/* allocate result read buffer in scratch space */
+	call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call, 384);
+
+	/* send the parameters to the server */
+	ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
+				    0, &sent);
+	if (ret < 0) {
+		rxrpc_call_abort(call, ret); /* handle from kafsasyncd */
+		ret = 0;
+		goto out;
+	}
+
+	/* wait for the reply to completely arrive */
+	ret = rxrpc_call_read_data(call, call->app_scr_ptr, 384, 0);
+	switch (ret) {
+	case 0:
+	case -EAGAIN:
+	case -ECONNABORTED:
+		ret = 0;
+		break;	/* all handled by kafsasyncd */
+
+	default:
+		rxrpc_call_abort(call, ret); /* make kafsasyncd handle it */
+		ret = 0;
+		break;
+	}
+
+ out:
+	rxrpc_put_call(call);
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_rxvl_get_entry_by_id_async() */
+
+/*****************************************************************************/
+/*
+ * attend to the asynchronous get VLDB entry by ID
+ */
+int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op,
+				    struct afs_cache_vlocation *entry)
+{
+	__be32 *bp;
+	__u32 tmp;
+	int loop, ret;
+
+	_enter("{op=%p cst=%u}", op, op->call->app_call_state);
+
+	memset(entry, 0, sizeof(*entry));
+
+	if (op->call->app_call_state == RXRPC_CSTATE_COMPLETE) {
+		/* operation finished */
+		afs_kafsasyncd_terminate_op(op);
+
+		bp = op->call->app_scr_ptr;
+
+		/* unmarshall the reply */
+		for (loop = 0; loop < 64; loop++)
+			entry->name[loop] = ntohl(*bp++);
+		bp++; /* final NUL */
+
+		bp++; /* type */
+		entry->nservers = ntohl(*bp++);
+
+		for (loop = 0; loop < 8; loop++)
+			entry->servers[loop].s_addr = *bp++;
+
+		bp += 8; /* partition IDs */
+
+		for (loop = 0; loop < 8; loop++) {
+			tmp = ntohl(*bp++);
+			if (tmp & AFS_VLSF_RWVOL)
+				entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
+			if (tmp & AFS_VLSF_ROVOL)
+				entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
+			if (tmp & AFS_VLSF_BACKVOL)
+				entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
+		}
+
+		entry->vid[0] = ntohl(*bp++);
+		entry->vid[1] = ntohl(*bp++);
+		entry->vid[2] = ntohl(*bp++);
+
+		bp++; /* clone ID */
+
+		tmp = ntohl(*bp++); /* flags */
+		if (tmp & AFS_VLF_RWEXISTS)
+			entry->vidmask |= AFS_VOL_VTM_RW;
+		if (tmp & AFS_VLF_ROEXISTS)
+			entry->vidmask |= AFS_VOL_VTM_RO;
+		if (tmp & AFS_VLF_BACKEXISTS)
+			entry->vidmask |= AFS_VOL_VTM_BAK;
+
+		ret = -ENOMEDIUM;
+		if (!entry->vidmask) {
+			rxrpc_call_abort(op->call, ret);
+			goto done;
+		}
+
+#if 0 /* TODO: remove */
+		entry->nservers = 3;
+		entry->servers[0].s_addr = htonl(0xac101249);
+		entry->servers[1].s_addr = htonl(0xac101243);
+		entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
+
+		entry->srvtmask[0] = AFS_VOL_VTM_RO;
+		entry->srvtmask[1] = AFS_VOL_VTM_RO;
+		entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
+#endif
+
+		/* success */
+		entry->rtime = get_seconds();
+		ret = 0;
+		goto done;
+	}
+
+	if (op->call->app_call_state == RXRPC_CSTATE_ERROR) {
+		/* operation error */
+		ret = op->call->app_errno;
+		goto done;
+	}
+
+	_leave(" = -EAGAIN");
+	return -EAGAIN;
+
+ done:
+	rxrpc_put_call(op->call);
+	op->call = NULL;
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_rxvl_get_entry_by_id_async2() */
+
+/*****************************************************************************/
+/*
+ * handle attention events on an async get-entry-by-ID op
+ * - called from krxiod
+ */
+static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
+{
+	struct afs_async_op *op = call->app_user;
+
+	_enter("{op=%p cst=%u}", op, call->app_call_state);
+
+	switch (call->app_call_state) {
+	case RXRPC_CSTATE_COMPLETE:
+		afs_kafsasyncd_attend_op(op);
+		break;
+	case RXRPC_CSTATE_CLNT_RCV_REPLY:
+		if (call->app_async_read)
+			break;
+	case RXRPC_CSTATE_CLNT_GOT_REPLY:
+		if (call->app_read_count == 0)
+			break;
+		printk("kAFS: Reply bigger than expected"
+		       " {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}",
+		       call->app_call_state,
+		       call->app_async_read,
+		       call->app_mark,
+		       call->app_ready_qty,
+		       call->pkt_rcv_count,
+		       call->app_last_rcv ? " last" : "");
+
+		rxrpc_call_abort(call, -EBADMSG);
+		break;
+	default:
+		BUG();
+	}
+
+	_leave("");
+
+} /* end afs_rxvl_get_entry_by_id_attn() */
+
+/*****************************************************************************/
+/*
+ * handle error events on an async get-entry-by-ID op
+ * - called from krxiod
+ */
+static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call)
+{
+	struct afs_async_op *op = call->app_user;
+
+	_enter("{op=%p cst=%u}", op, call->app_call_state);
+
+	afs_kafsasyncd_attend_op(op);
+
+	_leave("");
+
+} /* end afs_rxvl_get_entry_by_id_error() */
diff --git a/fs/afs/vlclient.h b/fs/afs/vlclient.h
new file mode 100644
index 0000000..e3d6011
--- /dev/null
+++ b/fs/afs/vlclient.h
@@ -0,0 +1,93 @@
+/* vlclient.h: Volume Location Service client interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VLCLIENT_H
+#define _LINUX_AFS_VLCLIENT_H
+
+#include "types.h"
+
+enum AFSVL_Errors {
+	AFSVL_IDEXIST 		= 363520,	/* Volume Id entry exists in vl database */
+	AFSVL_IO 		= 363521,	/* I/O related error */
+	AFSVL_NAMEEXIST 	= 363522,	/* Volume name entry exists in vl database */
+	AFSVL_CREATEFAIL 	= 363523,	/* Internal creation failure */
+	AFSVL_NOENT 		= 363524,	/* No such entry */
+	AFSVL_EMPTY 		= 363525,	/* Vl database is empty */
+	AFSVL_ENTDELETED 	= 363526,	/* Entry is deleted (soft delete) */
+	AFSVL_BADNAME 		= 363527,	/* Volume name is illegal */
+	AFSVL_BADINDEX 		= 363528,	/* Index is out of range */
+	AFSVL_BADVOLTYPE 	= 363529,	/* Bad volume type */
+	AFSVL_BADSERVER 	= 363530,	/* Illegal server number (out of range) */
+	AFSVL_BADPARTITION 	= 363531,	/* Bad partition number */
+	AFSVL_REPSFULL 		= 363532,	/* Run out of space for Replication sites */
+	AFSVL_NOREPSERVER 	= 363533,	/* No such Replication server site exists */
+	AFSVL_DUPREPSERVER 	= 363534,	/* Replication site already exists */
+	AFSVL_RWNOTFOUND 	= 363535,	/* Parent R/W entry not found */
+	AFSVL_BADREFCOUNT 	= 363536,	/* Illegal Reference Count number */
+	AFSVL_SIZEEXCEEDED 	= 363537,	/* Vl size for attributes exceeded */
+	AFSVL_BADENTRY 		= 363538,	/* Bad incoming vl entry */
+	AFSVL_BADVOLIDBUMP 	= 363539,	/* Illegal max volid increment */
+	AFSVL_IDALREADYHASHED 	= 363540,	/* RO/BACK id already hashed */
+	AFSVL_ENTRYLOCKED 	= 363541,	/* Vl entry is already locked */
+	AFSVL_BADVOLOPER 	= 363542,	/* Bad volume operation code */
+	AFSVL_BADRELLOCKTYPE 	= 363543,	/* Bad release lock type */
+	AFSVL_RERELEASE 	= 363544,	/* Status report: last release was aborted */
+	AFSVL_BADSERVERFLAG 	= 363545,	/* Invalid replication site server °ag */
+	AFSVL_PERM 		= 363546,	/* No permission access */
+	AFSVL_NOMEM 		= 363547,	/* malloc/realloc failed to alloc enough memory */
+};
+
+/* maps to "struct vldbentry" in vvl-spec.pdf */
+struct afs_vldbentry {
+	char		name[65];		/* name of volume (including NUL char) */
+	afs_voltype_t	type;			/* volume type */
+	unsigned	num_servers;		/* num servers that hold instances of this vol */
+	unsigned	clone_id;		/* cloning ID */
+
+	unsigned	flags;
+#define AFS_VLF_RWEXISTS	0x1000		/* R/W volume exists */
+#define AFS_VLF_ROEXISTS	0x2000		/* R/O volume exists */
+#define AFS_VLF_BACKEXISTS	0x4000		/* backup volume exists */
+
+	afs_volid_t	volume_ids[3];		/* volume IDs */
+
+	struct {
+		struct in_addr	addr;		/* server address */
+		unsigned	partition;	/* partition ID on this server */
+		unsigned	flags;		/* server specific flags */
+#define AFS_VLSF_NEWREPSITE	0x0001	/* unused */
+#define AFS_VLSF_ROVOL		0x0002	/* this server holds a R/O instance of the volume */
+#define AFS_VLSF_RWVOL		0x0004	/* this server holds a R/W instance of the volume */
+#define AFS_VLSF_BACKVOL	0x0008	/* this server holds a backup instance of the volume */
+	} servers[8];
+
+};
+
+/* look up a volume location database entry by name */
+extern int afs_rxvl_get_entry_by_name(struct afs_server *server,
+				      const char *volname,
+				      unsigned volnamesz,
+				      struct afs_cache_vlocation *entry);
+
+/* look up a volume location database entry by ID */
+extern int afs_rxvl_get_entry_by_id(struct afs_server *server,
+				    afs_volid_t	volid,
+				    afs_voltype_t voltype,
+				    struct afs_cache_vlocation *entry);
+
+extern int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op,
+					  afs_volid_t volid,
+					  afs_voltype_t voltype);
+
+extern int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op,
+					   struct afs_cache_vlocation *entry);
+
+#endif /* _LINUX_AFS_VLCLIENT_H */
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
new file mode 100644
index 0000000..eced206
--- /dev/null
+++ b/fs/afs/vlocation.c
@@ -0,0 +1,954 @@
+/* vlocation.c: volume location management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include <rxrpc/connection.h>
+#include "internal.h"
+
+#define AFS_VLDB_TIMEOUT HZ*1000
+
+static void afs_vlocation_update_timer(struct afs_timer *timer);
+static void afs_vlocation_update_attend(struct afs_async_op *op);
+static void afs_vlocation_update_discard(struct afs_async_op *op);
+static void __afs_put_vlocation(struct afs_vlocation *vlocation);
+
+static void __afs_vlocation_timeout(struct afs_timer *timer)
+{
+	struct afs_vlocation *vlocation =
+		list_entry(timer, struct afs_vlocation, timeout);
+
+	_debug("VL TIMEOUT [%s{u=%d}]",
+	       vlocation->vldb.name, atomic_read(&vlocation->usage));
+
+	afs_vlocation_do_timeout(vlocation);
+}
+
+static const struct afs_timer_ops afs_vlocation_timer_ops = {
+	.timed_out	= __afs_vlocation_timeout,
+};
+
+static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
+	.timed_out	= afs_vlocation_update_timer,
+};
+
+static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
+	.attend		= afs_vlocation_update_attend,
+	.discard	= afs_vlocation_update_discard,
+};
+
+static LIST_HEAD(afs_vlocation_update_pendq);	/* queue of VLs awaiting update */
+static struct afs_vlocation *afs_vlocation_update;	/* VL currently being updated */
+static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vlocation_cache_match(void *target,
+						     const void *entry);
+static void afs_vlocation_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_vlocation_cache_index_def = {
+	.name		= "vldb",
+	.data_size	= sizeof(struct afs_cache_vlocation),
+	.keys[0]	= { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
+	.match		= afs_vlocation_cache_match,
+	.update		= afs_vlocation_cache_update,
+};
+#endif
+
+/*****************************************************************************/
+/*
+ * iterate through the VL servers in a cell until one of them admits knowing
+ * about the volume in question
+ * - caller must have cell->vl_sem write-locked
+ */
+static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
+					   const char *name,
+					   unsigned namesz,
+					   struct afs_cache_vlocation *vldb)
+{
+	struct afs_server *server = NULL;
+	struct afs_cell *cell = vlocation->cell;
+	int count, ret;
+
+	_enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz);
+
+	ret = -ENOMEDIUM;
+	for (count = cell->vl_naddrs; count > 0; count--) {
+		_debug("CellServ[%hu]: %08x",
+		       cell->vl_curr_svix,
+		       cell->vl_addrs[cell->vl_curr_svix].s_addr);
+
+		/* try and create a server */
+		ret = afs_server_lookup(cell,
+					&cell->vl_addrs[cell->vl_curr_svix],
+					&server);
+		switch (ret) {
+		case 0:
+			break;
+		case -ENOMEM:
+		case -ENONET:
+			goto out;
+		default:
+			goto rotate;
+		}
+
+		/* attempt to access the VL server */
+		ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb);
+		switch (ret) {
+		case 0:
+			afs_put_server(server);
+			goto out;
+		case -ENOMEM:
+		case -ENONET:
+		case -ENETUNREACH:
+		case -EHOSTUNREACH:
+		case -ECONNREFUSED:
+			down_write(&server->sem);
+			if (server->vlserver) {
+				rxrpc_put_connection(server->vlserver);
+				server->vlserver = NULL;
+			}
+			up_write(&server->sem);
+			afs_put_server(server);
+			if (ret == -ENOMEM || ret == -ENONET)
+				goto out;
+			goto rotate;
+		case -ENOMEDIUM:
+			afs_put_server(server);
+			goto out;
+		default:
+			afs_put_server(server);
+			ret = -ENOMEDIUM;
+			goto rotate;
+		}
+
+		/* rotate the server records upon lookup failure */
+	rotate:
+		cell->vl_curr_svix++;
+		cell->vl_curr_svix %= cell->vl_naddrs;
+	}
+
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_vlocation_access_vl_by_name() */
+
+/*****************************************************************************/
+/*
+ * iterate through the VL servers in a cell until one of them admits knowing
+ * about the volume in question
+ * - caller must have cell->vl_sem write-locked
+ */
+static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
+					 afs_volid_t volid,
+					 afs_voltype_t voltype,
+					 struct afs_cache_vlocation *vldb)
+{
+	struct afs_server *server = NULL;
+	struct afs_cell *cell = vlocation->cell;
+	int count, ret;
+
+	_enter("%s,%x,%d,", cell->name, volid, voltype);
+
+	ret = -ENOMEDIUM;
+	for (count = cell->vl_naddrs; count > 0; count--) {
+		_debug("CellServ[%hu]: %08x",
+		       cell->vl_curr_svix,
+		       cell->vl_addrs[cell->vl_curr_svix].s_addr);
+
+		/* try and create a server */
+		ret = afs_server_lookup(cell,
+					&cell->vl_addrs[cell->vl_curr_svix],
+					&server);
+		switch (ret) {
+		case 0:
+			break;
+		case -ENOMEM:
+		case -ENONET:
+			goto out;
+		default:
+			goto rotate;
+		}
+
+		/* attempt to access the VL server */
+		ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb);
+		switch (ret) {
+		case 0:
+			afs_put_server(server);
+			goto out;
+		case -ENOMEM:
+		case -ENONET:
+		case -ENETUNREACH:
+		case -EHOSTUNREACH:
+		case -ECONNREFUSED:
+			down_write(&server->sem);
+			if (server->vlserver) {
+				rxrpc_put_connection(server->vlserver);
+				server->vlserver = NULL;
+			}
+			up_write(&server->sem);
+			afs_put_server(server);
+			if (ret == -ENOMEM || ret == -ENONET)
+				goto out;
+			goto rotate;
+		case -ENOMEDIUM:
+			afs_put_server(server);
+			goto out;
+		default:
+			afs_put_server(server);
+			ret = -ENOMEDIUM;
+			goto rotate;
+		}
+
+		/* rotate the server records upon lookup failure */
+	rotate:
+		cell->vl_curr_svix++;
+		cell->vl_curr_svix %= cell->vl_naddrs;
+	}
+
+ out:
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_vlocation_access_vl_by_id() */
+
+/*****************************************************************************/
+/*
+ * lookup volume location
+ * - caller must have cell->vol_sem write-locked
+ * - iterate through the VL servers in a cell until one of them admits knowing
+ *   about the volume in question
+ * - lookup in the local cache if not able to find on the VL server
+ * - insert/update in the local cache if did get a VL response
+ */
+int afs_vlocation_lookup(struct afs_cell *cell,
+			 const char *name,
+			 unsigned namesz,
+			 struct afs_vlocation **_vlocation)
+{
+	struct afs_cache_vlocation vldb;
+	struct afs_vlocation *vlocation;
+	afs_voltype_t voltype;
+	afs_volid_t vid;
+	int active = 0, ret;
+
+	_enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz);
+
+	if (namesz > sizeof(vlocation->vldb.name)) {
+		_leave(" = -ENAMETOOLONG");
+		return -ENAMETOOLONG;
+	}
+
+	/* search the cell's active list first */
+	list_for_each_entry(vlocation, &cell->vl_list, link) {
+		if (namesz < sizeof(vlocation->vldb.name) &&
+		    vlocation->vldb.name[namesz] != '\0')
+			continue;
+
+		if (memcmp(vlocation->vldb.name, name, namesz) == 0)
+			goto found_in_memory;
+	}
+
+	/* search the cell's graveyard list second */
+	spin_lock(&cell->vl_gylock);
+	list_for_each_entry(vlocation, &cell->vl_graveyard, link) {
+		if (namesz < sizeof(vlocation->vldb.name) &&
+		    vlocation->vldb.name[namesz] != '\0')
+			continue;
+
+		if (memcmp(vlocation->vldb.name, name, namesz) == 0)
+			goto found_in_graveyard;
+	}
+	spin_unlock(&cell->vl_gylock);
+
+	/* not in the cell's in-memory lists - create a new record */
+	vlocation = kmalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
+	if (!vlocation)
+		return -ENOMEM;
+
+	memset(vlocation, 0, sizeof(struct afs_vlocation));
+	atomic_set(&vlocation->usage, 1);
+	INIT_LIST_HEAD(&vlocation->link);
+	rwlock_init(&vlocation->lock);
+	memcpy(vlocation->vldb.name, name, namesz);
+
+	afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops);
+	afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops);
+	afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops);
+
+	afs_get_cell(cell);
+	vlocation->cell = cell;
+
+	list_add_tail(&vlocation->link, &cell->vl_list);
+
+#ifdef AFS_CACHING_SUPPORT
+	/* we want to store it in the cache, plus it might already be
+	 * encached */
+	cachefs_acquire_cookie(cell->cache,
+			       &afs_volume_cache_index_def,
+			       vlocation,
+			       &vlocation->cache);
+
+	if (vlocation->valid)
+		goto found_in_cache;
+#endif
+
+	/* try to look up an unknown volume in the cell VL databases by name */
+	ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb);
+	if (ret < 0) {
+		printk("kAFS: failed to locate '%*.*s' in cell '%s'\n",
+		       namesz, namesz, name, cell->name);
+		goto error;
+	}
+
+	goto found_on_vlserver;
+
+ found_in_graveyard:
+	/* found in the graveyard - resurrect */
+	_debug("found in graveyard");
+	atomic_inc(&vlocation->usage);
+	list_del(&vlocation->link);
+	list_add_tail(&vlocation->link, &cell->vl_list);
+	spin_unlock(&cell->vl_gylock);
+
+	afs_kafstimod_del_timer(&vlocation->timeout);
+	goto active;
+
+ found_in_memory:
+	/* found in memory - check to see if it's active */
+	_debug("found in memory");
+	atomic_inc(&vlocation->usage);
+
+ active:
+	active = 1;
+
+#ifdef AFS_CACHING_SUPPORT
+ found_in_cache:
+#endif
+	/* try to look up a cached volume in the cell VL databases by ID */
+	_debug("found in cache");
+
+	_debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+	       vlocation->vldb.name,
+	       vlocation->vldb.vidmask,
+	       ntohl(vlocation->vldb.servers[0].s_addr),
+	       vlocation->vldb.srvtmask[0],
+	       ntohl(vlocation->vldb.servers[1].s_addr),
+	       vlocation->vldb.srvtmask[1],
+	       ntohl(vlocation->vldb.servers[2].s_addr),
+	       vlocation->vldb.srvtmask[2]
+	       );
+
+	_debug("Vids: %08x %08x %08x",
+	       vlocation->vldb.vid[0],
+	       vlocation->vldb.vid[1],
+	       vlocation->vldb.vid[2]);
+
+	if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
+		vid = vlocation->vldb.vid[0];
+		voltype = AFSVL_RWVOL;
+	}
+	else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
+		vid = vlocation->vldb.vid[1];
+		voltype = AFSVL_ROVOL;
+	}
+	else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
+		vid = vlocation->vldb.vid[2];
+		voltype = AFSVL_BACKVOL;
+	}
+	else {
+		BUG();
+		vid = 0;
+		voltype = 0;
+	}
+
+	ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb);
+	switch (ret) {
+		/* net error */
+	default:
+		printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n",
+		       namesz, namesz, name, vid, cell->name, ret);
+		goto error;
+
+		/* pulled from local cache into memory */
+	case 0:
+		goto found_on_vlserver;
+
+		/* uh oh... looks like the volume got deleted */
+	case -ENOMEDIUM:
+		printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n",
+		       namesz, namesz, name, vid, cell->name);
+
+		/* TODO: make existing record unavailable */
+		goto error;
+	}
+
+ found_on_vlserver:
+	_debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+	       namesz, namesz, name,
+	       vldb.vidmask,
+	       ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
+	       ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
+	       ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
+	       );
+
+	_debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]);
+
+	if ((namesz < sizeof(vlocation->vldb.name) &&
+	     vlocation->vldb.name[namesz] != '\0') ||
+	    memcmp(vldb.name, name, namesz) != 0)
+		printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n",
+		       namesz, namesz, name, vldb.name);
+
+	memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
+
+	afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ);
+
+#ifdef AFS_CACHING_SUPPORT
+	/* update volume entry in local cache */
+	cachefs_update_cookie(vlocation->cache);
+#endif
+
+	*_vlocation = vlocation;
+	_leave(" = 0 (%p)",vlocation);
+	return 0;
+
+ error:
+	if (vlocation) {
+		if (active) {
+			__afs_put_vlocation(vlocation);
+		}
+		else {
+			list_del(&vlocation->link);
+#ifdef AFS_CACHING_SUPPORT
+			cachefs_relinquish_cookie(vlocation->cache, 0);
+#endif
+			afs_put_cell(vlocation->cell);
+			kfree(vlocation);
+		}
+	}
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_vlocation_lookup() */
+
+/*****************************************************************************/
+/*
+ * finish using a volume location record
+ * - caller must have cell->vol_sem write-locked
+ */
+static void __afs_put_vlocation(struct afs_vlocation *vlocation)
+{
+	struct afs_cell *cell;
+
+	if (!vlocation)
+		return;
+
+	_enter("%s", vlocation->vldb.name);
+
+	cell = vlocation->cell;
+
+	/* sanity check */
+	BUG_ON(atomic_read(&vlocation->usage) <= 0);
+
+	spin_lock(&cell->vl_gylock);
+	if (likely(!atomic_dec_and_test(&vlocation->usage))) {
+		spin_unlock(&cell->vl_gylock);
+		_leave("");
+		return;
+	}
+
+	/* move to graveyard queue */
+	list_del(&vlocation->link);
+	list_add_tail(&vlocation->link,&cell->vl_graveyard);
+
+	/* remove from pending timeout queue (refcounted if actually being
+	 * updated) */
+	list_del_init(&vlocation->upd_op.link);
+
+	/* time out in 10 secs */
+	afs_kafstimod_del_timer(&vlocation->upd_timer);
+	afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ);
+
+	spin_unlock(&cell->vl_gylock);
+
+	_leave(" [killed]");
+} /* end __afs_put_vlocation() */
+
+/*****************************************************************************/
+/*
+ * finish using a volume location record
+ */
+void afs_put_vlocation(struct afs_vlocation *vlocation)
+{
+	if (vlocation) {
+		struct afs_cell *cell = vlocation->cell;
+
+		down_write(&cell->vl_sem);
+		__afs_put_vlocation(vlocation);
+		up_write(&cell->vl_sem);
+	}
+} /* end afs_put_vlocation() */
+
+/*****************************************************************************/
+/*
+ * timeout vlocation record
+ * - removes from the cell's graveyard if the usage count is zero
+ */
+void afs_vlocation_do_timeout(struct afs_vlocation *vlocation)
+{
+	struct afs_cell *cell;
+
+	_enter("%s", vlocation->vldb.name);
+
+	cell = vlocation->cell;
+
+	BUG_ON(atomic_read(&vlocation->usage) < 0);
+
+	/* remove from graveyard if still dead */
+	spin_lock(&cell->vl_gylock);
+	if (atomic_read(&vlocation->usage) == 0)
+		list_del_init(&vlocation->link);
+	else
+		vlocation = NULL;
+	spin_unlock(&cell->vl_gylock);
+
+	if (!vlocation) {
+		_leave("");
+		return; /* resurrected */
+	}
+
+	/* we can now destroy it properly */
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_relinquish_cookie(vlocation->cache, 0);
+#endif
+	afs_put_cell(cell);
+
+	kfree(vlocation);
+
+	_leave(" [destroyed]");
+} /* end afs_vlocation_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * send an update operation to the currently selected server
+ */
+static int afs_vlocation_update_begin(struct afs_vlocation *vlocation)
+{
+	afs_voltype_t voltype;
+	afs_volid_t vid;
+	int ret;
+
+	_enter("%s{ufs=%u ucs=%u}",
+	       vlocation->vldb.name,
+	       vlocation->upd_first_svix,
+	       vlocation->upd_curr_svix);
+
+	/* try to look up a cached volume in the cell VL databases by ID */
+	if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
+		vid = vlocation->vldb.vid[0];
+		voltype = AFSVL_RWVOL;
+	}
+	else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
+		vid = vlocation->vldb.vid[1];
+		voltype = AFSVL_ROVOL;
+	}
+	else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
+		vid = vlocation->vldb.vid[2];
+		voltype = AFSVL_BACKVOL;
+	}
+	else {
+		BUG();
+		vid = 0;
+		voltype = 0;
+	}
+
+	/* contact the chosen server */
+	ret = afs_server_lookup(
+		vlocation->cell,
+		&vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
+		&vlocation->upd_op.server);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENOMEM:
+	case -ENONET:
+	default:
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	/* initiate the update operation */
+	ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype);
+	if (ret < 0) {
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_vlocation_update_begin() */
+
+/*****************************************************************************/
+/*
+ * abandon updating a VL record
+ * - does not restart the update timer
+ */
+static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation,
+					 afs_vlocation_upd_t state,
+					 int ret)
+{
+	_enter("%s,%u", vlocation->vldb.name, state);
+
+	if (ret < 0)
+		printk("kAFS: Abandoning VL update '%s': %d\n",
+		       vlocation->vldb.name, ret);
+
+	/* discard the server record */
+	afs_put_server(vlocation->upd_op.server);
+	vlocation->upd_op.server = NULL;
+
+	spin_lock(&afs_vlocation_update_lock);
+	afs_vlocation_update = NULL;
+	vlocation->upd_state = state;
+
+	/* TODO: start updating next VL record on pending list */
+
+	spin_unlock(&afs_vlocation_update_lock);
+
+	_leave("");
+} /* end afs_vlocation_update_abandon() */
+
+/*****************************************************************************/
+/*
+ * handle periodic update timeouts and busy retry timeouts
+ * - called from kafstimod
+ */
+static void afs_vlocation_update_timer(struct afs_timer *timer)
+{
+	struct afs_vlocation *vlocation =
+		list_entry(timer, struct afs_vlocation, upd_timer);
+	int ret;
+
+	_enter("%s", vlocation->vldb.name);
+
+	/* only update if not in the graveyard (defend against putting too) */
+	spin_lock(&vlocation->cell->vl_gylock);
+
+	if (!atomic_read(&vlocation->usage))
+		goto out_unlock1;
+
+	spin_lock(&afs_vlocation_update_lock);
+
+	/* if we were woken up due to EBUSY sleep then restart immediately if
+	 * possible or else jump to front of pending queue */
+	if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) {
+		if (afs_vlocation_update) {
+			list_add(&vlocation->upd_op.link,
+				 &afs_vlocation_update_pendq);
+		}
+		else {
+			afs_get_vlocation(vlocation);
+			afs_vlocation_update = vlocation;
+			vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+		}
+		goto out_unlock2;
+	}
+
+	/* put on pending queue if there's already another update in progress */
+	if (afs_vlocation_update) {
+		vlocation->upd_state = AFS_VLUPD_PENDING;
+		list_add_tail(&vlocation->upd_op.link,
+			      &afs_vlocation_update_pendq);
+		goto out_unlock2;
+	}
+
+	/* hold a ref on it while actually updating */
+	afs_get_vlocation(vlocation);
+	afs_vlocation_update = vlocation;
+	vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+
+	spin_unlock(&afs_vlocation_update_lock);
+	spin_unlock(&vlocation->cell->vl_gylock);
+
+	/* okay... we can start the update */
+	_debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name);
+	vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
+	vlocation->upd_curr_svix = vlocation->upd_first_svix;
+	vlocation->upd_rej_cnt = 0;
+	vlocation->upd_busy_cnt = 0;
+
+	ret = afs_vlocation_update_begin(vlocation);
+	if (ret < 0) {
+		afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
+		afs_kafstimod_add_timer(&vlocation->upd_timer,
+					AFS_VLDB_TIMEOUT);
+		afs_put_vlocation(vlocation);
+	}
+
+	_leave("");
+	return;
+
+ out_unlock2:
+	spin_unlock(&afs_vlocation_update_lock);
+ out_unlock1:
+	spin_unlock(&vlocation->cell->vl_gylock);
+	_leave("");
+	return;
+
+} /* end afs_vlocation_update_timer() */
+
+/*****************************************************************************/
+/*
+ * attend to an update operation upon which an event happened
+ * - called in kafsasyncd context
+ */
+static void afs_vlocation_update_attend(struct afs_async_op *op)
+{
+	struct afs_cache_vlocation vldb;
+	struct afs_vlocation *vlocation =
+		list_entry(op, struct afs_vlocation, upd_op);
+	unsigned tmp;
+	int ret;
+
+	_enter("%s", vlocation->vldb.name);
+
+	ret = afs_rxvl_get_entry_by_id_async2(op, &vldb);
+	switch (ret) {
+	case -EAGAIN:
+		_leave(" [unfinished]");
+		return;
+
+	case 0:
+		_debug("END VL UPDATE: %d\n", ret);
+		vlocation->valid = 1;
+
+		_debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
+		       vldb.vidmask,
+		       ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
+		       ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
+		       ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
+		       );
+
+		_debug("Vids: %08x %08x %08x",
+		       vldb.vid[0], vldb.vid[1], vldb.vid[2]);
+
+		afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
+
+		down_write(&vlocation->cell->vl_sem);
+
+		/* actually update the cache */
+		if (strncmp(vldb.name, vlocation->vldb.name,
+			    sizeof(vlocation->vldb.name)) != 0)
+			printk("kAFS: name of volume '%s'"
+			       " changed to '%s' on server\n",
+			       vlocation->vldb.name, vldb.name);
+
+		memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
+
+#if 0
+		/* TODO update volume entry in local cache */
+#endif
+
+		up_write(&vlocation->cell->vl_sem);
+
+		if (ret < 0)
+			printk("kAFS: failed to update local cache: %d\n", ret);
+
+		afs_kafstimod_add_timer(&vlocation->upd_timer,
+					AFS_VLDB_TIMEOUT);
+		afs_put_vlocation(vlocation);
+		_leave(" [found]");
+		return;
+
+	case -ENOMEDIUM:
+		vlocation->upd_rej_cnt++;
+		goto try_next;
+
+		/* the server is locked - retry in a very short while */
+	case -EBUSY:
+		vlocation->upd_busy_cnt++;
+		if (vlocation->upd_busy_cnt > 3)
+			goto try_next; /* too many retries */
+
+		afs_vlocation_update_abandon(vlocation,
+					     AFS_VLUPD_BUSYSLEEP, 0);
+		afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2);
+		afs_put_vlocation(vlocation);
+		_leave(" [busy]");
+		return;
+
+	case -ENETUNREACH:
+	case -EHOSTUNREACH:
+	case -ECONNREFUSED:
+	case -EREMOTEIO:
+		/* record bad vlserver info in the cell too
+		 * - TODO: use down_write_trylock() if available
+		 */
+		if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
+			vlocation->cell->vl_curr_svix =
+				vlocation->cell->vl_curr_svix %
+				vlocation->cell->vl_naddrs;
+
+	case -EBADRQC:
+	case -EINVAL:
+	case -EACCES:
+	case -EBADMSG:
+		goto try_next;
+
+	default:
+		goto abandon;
+	}
+
+	/* try contacting the next server */
+ try_next:
+	vlocation->upd_busy_cnt = 0;
+
+	/* discard the server record */
+	afs_put_server(vlocation->upd_op.server);
+	vlocation->upd_op.server = NULL;
+
+	tmp = vlocation->cell->vl_naddrs;
+	if (tmp == 0)
+		goto abandon;
+
+	vlocation->upd_curr_svix++;
+	if (vlocation->upd_curr_svix >= tmp)
+		vlocation->upd_curr_svix = 0;
+	if (vlocation->upd_first_svix >= tmp)
+		vlocation->upd_first_svix = tmp - 1;
+
+	/* move to the next server */
+	if (vlocation->upd_curr_svix != vlocation->upd_first_svix) {
+		afs_vlocation_update_begin(vlocation);
+		_leave(" [next]");
+		return;
+	}
+
+	/* run out of servers to try - was the volume rejected? */
+	if (vlocation->upd_rej_cnt > 0) {
+		printk("kAFS: Active volume no longer valid '%s'\n",
+		       vlocation->vldb.name);
+		vlocation->valid = 0;
+		afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
+		afs_kafstimod_add_timer(&vlocation->upd_timer,
+					AFS_VLDB_TIMEOUT);
+		afs_put_vlocation(vlocation);
+		_leave(" [invalidated]");
+		return;
+	}
+
+	/* abandon the update */
+ abandon:
+	afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
+	afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10);
+	afs_put_vlocation(vlocation);
+	_leave(" [abandoned]");
+
+} /* end afs_vlocation_update_attend() */
+
+/*****************************************************************************/
+/*
+ * deal with an update operation being discarded
+ * - called in kafsasyncd context when it's dying due to rmmod
+ * - the call has already been aborted and put()'d
+ */
+static void afs_vlocation_update_discard(struct afs_async_op *op)
+{
+	struct afs_vlocation *vlocation =
+		list_entry(op, struct afs_vlocation, upd_op);
+
+	_enter("%s", vlocation->vldb.name);
+
+	afs_put_server(op->server);
+	op->server = NULL;
+
+	afs_put_vlocation(vlocation);
+
+	_leave("");
+} /* end afs_vlocation_update_discard() */
+
+/*****************************************************************************/
+/*
+ * match a VLDB record stored in the cache
+ * - may also load target from entry
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vlocation_cache_match(void *target,
+						     const void *entry)
+{
+	const struct afs_cache_vlocation *vldb = entry;
+	struct afs_vlocation *vlocation = target;
+
+	_enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
+
+	if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
+	    ) {
+		if (!vlocation->valid ||
+		    vlocation->vldb.rtime == vldb->rtime
+		    ) {
+			vlocation->vldb = *vldb;
+			vlocation->valid = 1;
+			_leave(" = SUCCESS [c->m]");
+			return CACHEFS_MATCH_SUCCESS;
+		}
+		/* need to update cache if cached info differs */
+		else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
+			/* delete if VIDs for this name differ */
+			if (memcmp(&vlocation->vldb.vid,
+				   &vldb->vid,
+				   sizeof(vldb->vid)) != 0) {
+				_leave(" = DELETE");
+				return CACHEFS_MATCH_SUCCESS_DELETE;
+			}
+
+			_leave(" = UPDATE");
+			return CACHEFS_MATCH_SUCCESS_UPDATE;
+		}
+		else {
+			_leave(" = SUCCESS");
+			return CACHEFS_MATCH_SUCCESS;
+		}
+	}
+
+	_leave(" = FAILED");
+	return CACHEFS_MATCH_FAILED;
+} /* end afs_vlocation_cache_match() */
+#endif
+
+/*****************************************************************************/
+/*
+ * update a VLDB record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_vlocation_cache_update(void *source, void *entry)
+{
+	struct afs_cache_vlocation *vldb = entry;
+	struct afs_vlocation *vlocation = source;
+
+	_enter("");
+
+	*vldb = vlocation->vldb;
+
+} /* end afs_vlocation_cache_update() */
+#endif
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
new file mode 100644
index 0000000..9867fef
--- /dev/null
+++ b/fs/afs/vnode.c
@@ -0,0 +1,395 @@
+/* vnode.c: AFS vnode management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "vnode.h"
+#include "internal.h"
+
+static void afs_vnode_cb_timed_out(struct afs_timer *timer);
+
+struct afs_timer_ops afs_vnode_cb_timed_out_ops = {
+	.timed_out	= afs_vnode_cb_timed_out,
+};
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vnode_cache_match(void *target,
+						 const void *entry);
+static void afs_vnode_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_vnode_cache_index_def = {
+	.name		= "vnode",
+	.data_size	= sizeof(struct afs_cache_vnode),
+	.keys[0]	= { CACHEFS_INDEX_KEYS_BIN, 4 },
+	.match		= afs_vnode_cache_match,
+	.update		= afs_vnode_cache_update,
+};
+#endif
+
+/*****************************************************************************/
+/*
+ * handle a callback timing out
+ * TODO: retain a ref to vnode struct for an outstanding callback timeout
+ */
+static void afs_vnode_cb_timed_out(struct afs_timer *timer)
+{
+	struct afs_server *oldserver;
+	struct afs_vnode *vnode;
+
+	vnode = list_entry(timer, struct afs_vnode, cb_timeout);
+
+	_enter("%p", vnode);
+
+	/* set the changed flag in the vnode and release the server */
+	spin_lock(&vnode->lock);
+
+	oldserver = xchg(&vnode->cb_server, NULL);
+	if (oldserver) {
+		vnode->flags |= AFS_VNODE_CHANGED;
+
+		spin_lock(&afs_cb_hash_lock);
+		list_del_init(&vnode->cb_hash_link);
+		spin_unlock(&afs_cb_hash_lock);
+
+		spin_lock(&oldserver->cb_lock);
+		list_del_init(&vnode->cb_link);
+		spin_unlock(&oldserver->cb_lock);
+	}
+
+	spin_unlock(&vnode->lock);
+
+	afs_put_server(oldserver);
+
+	_leave("");
+} /* end afs_vnode_cb_timed_out() */
+
+/*****************************************************************************/
+/*
+ * finish off updating the recorded status of a file
+ * - starts callback expiry timer
+ * - adds to server's callback list
+ */
+static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
+					     struct afs_server *server,
+					     int ret)
+{
+	struct afs_server *oldserver = NULL;
+
+	_enter("%p,%p,%d", vnode, server, ret);
+
+	spin_lock(&vnode->lock);
+
+	vnode->flags &= ~AFS_VNODE_CHANGED;
+
+	if (ret == 0) {
+		/* adjust the callback timeout appropriately */
+		afs_kafstimod_add_timer(&vnode->cb_timeout,
+					vnode->cb_expiry * HZ);
+
+		spin_lock(&afs_cb_hash_lock);
+		list_del(&vnode->cb_hash_link);
+		list_add_tail(&vnode->cb_hash_link,
+			      &afs_cb_hash(server, &vnode->fid));
+		spin_unlock(&afs_cb_hash_lock);
+
+		/* swap ref to old callback server with that for new callback
+		 * server */
+		oldserver = xchg(&vnode->cb_server, server);
+		if (oldserver != server) {
+			if (oldserver) {
+				spin_lock(&oldserver->cb_lock);
+				list_del_init(&vnode->cb_link);
+				spin_unlock(&oldserver->cb_lock);
+			}
+
+			afs_get_server(server);
+			spin_lock(&server->cb_lock);
+			list_add_tail(&vnode->cb_link, &server->cb_promises);
+			spin_unlock(&server->cb_lock);
+		}
+		else {
+			/* same server */
+			oldserver = NULL;
+		}
+	}
+	else if (ret == -ENOENT) {
+		/* the file was deleted - clear the callback timeout */
+		oldserver = xchg(&vnode->cb_server, NULL);
+		afs_kafstimod_del_timer(&vnode->cb_timeout);
+
+		_debug("got NOENT from server - marking file deleted");
+		vnode->flags |= AFS_VNODE_DELETED;
+	}
+
+	vnode->update_cnt--;
+
+	spin_unlock(&vnode->lock);
+
+	wake_up_all(&vnode->update_waitq);
+
+	afs_put_server(oldserver);
+
+	_leave("");
+
+} /* end afs_vnode_finalise_status_update() */
+
+/*****************************************************************************/
+/*
+ * fetch file status from the volume
+ * - don't issue a fetch if:
+ *   - the changed bit is not set and there's a valid callback
+ *   - there are any outstanding ops that will fetch the status
+ * - TODO implement local caching
+ */
+int afs_vnode_fetch_status(struct afs_vnode *vnode)
+{
+	struct afs_server *server;
+	int ret;
+
+	DECLARE_WAITQUEUE(myself, current);
+
+	_enter("%s,{%u,%u,%u}",
+	       vnode->volume->vlocation->vldb.name,
+	       vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+
+	if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) {
+		_leave(" [unchanged]");
+		return 0;
+	}
+
+	if (vnode->flags & AFS_VNODE_DELETED) {
+		_leave(" [deleted]");
+		return -ENOENT;
+	}
+
+	spin_lock(&vnode->lock);
+
+	if (!(vnode->flags & AFS_VNODE_CHANGED)) {
+		spin_unlock(&vnode->lock);
+		_leave(" [unchanged]");
+		return 0;
+	}
+
+	if (vnode->update_cnt > 0) {
+		/* someone else started a fetch */
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		add_wait_queue(&vnode->update_waitq, &myself);
+
+		/* wait for the status to be updated */
+		for (;;) {
+			if (!(vnode->flags & AFS_VNODE_CHANGED))
+				break;
+			if (vnode->flags & AFS_VNODE_DELETED)
+				break;
+
+			/* it got updated and invalidated all before we saw
+			 * it */
+			if (vnode->update_cnt == 0) {
+				remove_wait_queue(&vnode->update_waitq,
+						  &myself);
+				set_current_state(TASK_RUNNING);
+				goto get_anyway;
+			}
+
+			spin_unlock(&vnode->lock);
+
+			schedule();
+			set_current_state(TASK_UNINTERRUPTIBLE);
+
+			spin_lock(&vnode->lock);
+		}
+
+		remove_wait_queue(&vnode->update_waitq, &myself);
+		spin_unlock(&vnode->lock);
+		set_current_state(TASK_RUNNING);
+
+		return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
+	}
+
+ get_anyway:
+	/* okay... we're going to have to initiate the op */
+	vnode->update_cnt++;
+
+	spin_unlock(&vnode->lock);
+
+	/* merge AFS status fetches and clear outstanding callback on this
+	 * vnode */
+	do {
+		/* pick a server to query */
+		ret = afs_volume_pick_fileserver(vnode->volume, &server);
+		if (ret<0)
+			return ret;
+
+		_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+		ret = afs_rxfs_fetch_file_status(server, vnode, NULL);
+
+	} while (!afs_volume_release_fileserver(vnode->volume, server, ret));
+
+	/* adjust the flags */
+	afs_vnode_finalise_status_update(vnode, server, ret);
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_vnode_fetch_status() */
+
+/*****************************************************************************/
+/*
+ * fetch file data from the volume
+ * - TODO implement caching and server failover
+ */
+int afs_vnode_fetch_data(struct afs_vnode *vnode,
+			 struct afs_rxfs_fetch_descriptor *desc)
+{
+	struct afs_server *server;
+	int ret;
+
+	_enter("%s,{%u,%u,%u}",
+	       vnode->volume->vlocation->vldb.name,
+	       vnode->fid.vid,
+	       vnode->fid.vnode,
+	       vnode->fid.unique);
+
+	/* this op will fetch the status */
+	spin_lock(&vnode->lock);
+	vnode->update_cnt++;
+	spin_unlock(&vnode->lock);
+
+	/* merge in AFS status fetches and clear outstanding callback on this
+	 * vnode */
+	do {
+		/* pick a server to query */
+		ret = afs_volume_pick_fileserver(vnode->volume, &server);
+		if (ret < 0)
+			return ret;
+
+		_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+		ret = afs_rxfs_fetch_file_data(server, vnode, desc, NULL);
+
+	} while (!afs_volume_release_fileserver(vnode->volume, server, ret));
+
+	/* adjust the flags */
+	afs_vnode_finalise_status_update(vnode, server, ret);
+
+	_leave(" = %d", ret);
+	return ret;
+
+} /* end afs_vnode_fetch_data() */
+
+/*****************************************************************************/
+/*
+ * break any outstanding callback on a vnode
+ * - only relevent to server that issued it
+ */
+int afs_vnode_give_up_callback(struct afs_vnode *vnode)
+{
+	struct afs_server *server;
+	int ret;
+
+	_enter("%s,{%u,%u,%u}",
+	       vnode->volume->vlocation->vldb.name,
+	       vnode->fid.vid,
+	       vnode->fid.vnode,
+	       vnode->fid.unique);
+
+	spin_lock(&afs_cb_hash_lock);
+	list_del_init(&vnode->cb_hash_link);
+	spin_unlock(&afs_cb_hash_lock);
+
+	/* set the changed flag in the vnode and release the server */
+	spin_lock(&vnode->lock);
+
+	afs_kafstimod_del_timer(&vnode->cb_timeout);
+
+	server = xchg(&vnode->cb_server, NULL);
+	if (server) {
+		vnode->flags |= AFS_VNODE_CHANGED;
+
+		spin_lock(&server->cb_lock);
+		list_del_init(&vnode->cb_link);
+		spin_unlock(&server->cb_lock);
+	}
+
+	spin_unlock(&vnode->lock);
+
+	ret = 0;
+	if (server) {
+		ret = afs_rxfs_give_up_callback(server, vnode);
+		afs_put_server(server);
+	}
+
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_vnode_give_up_callback() */
+
+/*****************************************************************************/
+/*
+ * match a vnode record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vnode_cache_match(void *target,
+						 const void *entry)
+{
+	const struct afs_cache_vnode *cvnode = entry;
+	struct afs_vnode *vnode = target;
+
+	_enter("{%x,%x,%Lx},{%x,%x,%Lx}",
+	       vnode->fid.vnode,
+	       vnode->fid.unique,
+	       vnode->status.version,
+	       cvnode->vnode_id,
+	       cvnode->vnode_unique,
+	       cvnode->data_version);
+
+	if (vnode->fid.vnode != cvnode->vnode_id) {
+		_leave(" = FAILED");
+		return CACHEFS_MATCH_FAILED;
+	}
+
+	if (vnode->fid.unique != cvnode->vnode_unique ||
+	    vnode->status.version != cvnode->data_version) {
+		_leave(" = DELETE");
+		return CACHEFS_MATCH_SUCCESS_DELETE;
+	}
+
+	_leave(" = SUCCESS");
+	return CACHEFS_MATCH_SUCCESS;
+} /* end afs_vnode_cache_match() */
+#endif
+
+/*****************************************************************************/
+/*
+ * update a vnode record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_vnode_cache_update(void *source, void *entry)
+{
+	struct afs_cache_vnode *cvnode = entry;
+	struct afs_vnode *vnode = source;
+
+	_enter("");
+
+	cvnode->vnode_id	= vnode->fid.vnode;
+	cvnode->vnode_unique	= vnode->fid.unique;
+	cvnode->data_version	= vnode->status.version;
+
+} /* end afs_vnode_cache_update() */
+#endif
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h
new file mode 100644
index 0000000..b86a971
--- /dev/null
+++ b/fs/afs/vnode.h
@@ -0,0 +1,94 @@
+/* vnode.h: AFS vnode record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VNODE_H
+#define _LINUX_AFS_VNODE_H
+
+#include <linux/fs.h>
+#include "server.h"
+#include "kafstimod.h"
+#include "cache.h"
+
+#ifdef __KERNEL__
+
+struct afs_rxfs_fetch_descriptor;
+
+/*****************************************************************************/
+/*
+ * vnode catalogue entry
+ */
+struct afs_cache_vnode
+{
+	afs_vnodeid_t		vnode_id;	/* vnode ID */
+	unsigned		vnode_unique;	/* vnode ID uniquifier */
+	afs_dataversion_t	data_version;	/* data version */
+};
+
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_vnode_cache_index_def;
+#endif
+
+/*****************************************************************************/
+/*
+ * AFS inode private data
+ */
+struct afs_vnode
+{
+	struct inode		vfs_inode;	/* the VFS's inode record */
+
+	struct afs_volume	*volume;	/* volume on which vnode resides */
+	struct afs_fid		fid;		/* the file identifier for this inode */
+	struct afs_file_status	status;		/* AFS status info for this file */
+#ifdef AFS_CACHING_SUPPORT
+	struct cachefs_cookie	*cache;		/* caching cookie */
+#endif
+
+	wait_queue_head_t	update_waitq;	/* status fetch waitqueue */
+	unsigned		update_cnt;	/* number of outstanding ops that will update the
+						 * status */
+	spinlock_t		lock;		/* waitqueue/flags lock */
+	unsigned		flags;
+#define AFS_VNODE_CHANGED	0x00000001	/* set if vnode reported changed by callback */
+#define AFS_VNODE_DELETED	0x00000002	/* set if vnode deleted on server */
+#define AFS_VNODE_MOUNTPOINT	0x00000004	/* set if vnode is a mountpoint symlink */
+
+	/* outstanding callback notification on this file */
+	struct afs_server	*cb_server;	/* server that made the current promise */
+	struct list_head	cb_link;	/* link in server's promises list */
+	struct list_head	cb_hash_link;	/* link in master callback hash */
+	struct afs_timer	cb_timeout;	/* timeout on promise */
+	unsigned		cb_version;	/* callback version */
+	unsigned		cb_expiry;	/* callback expiry time */
+	afs_callback_type_t	cb_type;	/* type of callback */
+};
+
+static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
+{
+	return container_of(inode,struct afs_vnode,vfs_inode);
+}
+
+static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
+{
+	return &vnode->vfs_inode;
+}
+
+extern int afs_vnode_fetch_status(struct afs_vnode *vnode);
+
+extern int afs_vnode_fetch_data(struct afs_vnode *vnode,
+				struct afs_rxfs_fetch_descriptor *desc);
+
+extern int afs_vnode_give_up_callback(struct afs_vnode *vnode);
+
+extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_VNODE_H */
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
new file mode 100644
index 0000000..0ff4b86
--- /dev/null
+++ b/fs/afs/volume.c
@@ -0,0 +1,520 @@
+/* volume.c: AFS volume management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "vnode.h"
+#include "cell.h"
+#include "cache.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "internal.h"
+
+#ifdef __KDEBUG
+static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
+#endif
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_volume_cache_match(void *target,
+						  const void *entry);
+static void afs_volume_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_volume_cache_index_def = {
+	.name		= "volume",
+	.data_size	= sizeof(struct afs_cache_vhash),
+	.keys[0]	= { CACHEFS_INDEX_KEYS_BIN, 1 },
+	.keys[1]	= { CACHEFS_INDEX_KEYS_BIN, 1 },
+	.match		= afs_volume_cache_match,
+	.update		= afs_volume_cache_update,
+};
+#endif
+
+/*****************************************************************************/
+/*
+ * lookup a volume by name
+ * - this can be one of the following:
+ *	"%[cell:]volume[.]"		R/W volume
+ *	"#[cell:]volume[.]"		R/O or R/W volume (rwparent=0),
+ *					 or R/W (rwparent=1) volume
+ *	"%[cell:]volume.readonly"	R/O volume
+ *	"#[cell:]volume.readonly"	R/O volume
+ *	"%[cell:]volume.backup"		Backup volume
+ *	"#[cell:]volume.backup"		Backup volume
+ *
+ * The cell name is optional, and defaults to the current cell.
+ *
+ * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin
+ * Guide
+ * - Rule 1: Explicit type suffix forces access of that type or nothing
+ *           (no suffix, then use Rule 2 & 3)
+ * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W
+ *           if not available
+ * - Rule 3: If parent volume is R/W, then only mount R/W volume unless
+ *           explicitly told otherwise
+ */
+int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
+		      struct afs_volume **_volume)
+{
+	struct afs_vlocation *vlocation = NULL;
+	struct afs_volume *volume = NULL;
+	afs_voltype_t type;
+	const char *cellname, *volname, *suffix;
+	char srvtmask;
+	int force, ret, loop, cellnamesz, volnamesz;
+
+	_enter("%s,,%d,", name, rwpath);
+
+	if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) {
+		printk("kAFS: unparsable volume name\n");
+		return -EINVAL;
+	}
+
+	/* determine the type of volume we're looking for */
+	force = 0;
+	type = AFSVL_ROVOL;
+
+	if (rwpath || name[0] == '%') {
+		type = AFSVL_RWVOL;
+		force = 1;
+	}
+
+	suffix = strrchr(name, '.');
+	if (suffix) {
+		if (strcmp(suffix, ".readonly") == 0) {
+			type = AFSVL_ROVOL;
+			force = 1;
+		}
+		else if (strcmp(suffix, ".backup") == 0) {
+			type = AFSVL_BACKVOL;
+			force = 1;
+		}
+		else if (suffix[1] == 0) {
+		}
+		else {
+			suffix = NULL;
+		}
+	}
+
+	/* split the cell and volume names */
+	name++;
+	volname = strchr(name, ':');
+	if (volname) {
+		cellname = name;
+		cellnamesz = volname - name;
+		volname++;
+	}
+	else {
+		volname = name;
+		cellname = NULL;
+		cellnamesz = 0;
+	}
+
+	volnamesz = suffix ? suffix - volname : strlen(volname);
+
+	_debug("CELL:%*.*s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
+	       cellnamesz, cellnamesz, cellname ?: "", cell,
+	       volnamesz, volnamesz, volname, suffix ?: "-",
+	       type,
+	       force ? " FORCE" : "");
+
+	/* lookup the cell record */
+	if (cellname || !cell) {
+		ret = afs_cell_lookup(cellname, cellnamesz, &cell);
+		if (ret<0) {
+			printk("kAFS: unable to lookup cell '%s'\n",
+			       cellname ?: "");
+			goto error;
+		}
+	}
+	else {
+		afs_get_cell(cell);
+	}
+
+	/* lookup the volume location record */
+	ret = afs_vlocation_lookup(cell, volname, volnamesz, &vlocation);
+	if (ret < 0)
+		goto error;
+
+	/* make the final decision on the type we want */
+	ret = -ENOMEDIUM;
+	if (force && !(vlocation->vldb.vidmask & (1 << type)))
+		goto error;
+
+	srvtmask = 0;
+	for (loop = 0; loop < vlocation->vldb.nservers; loop++)
+		srvtmask |= vlocation->vldb.srvtmask[loop];
+
+	if (force) {
+		if (!(srvtmask & (1 << type)))
+			goto error;
+	}
+	else if (srvtmask & AFS_VOL_VTM_RO) {
+		type = AFSVL_ROVOL;
+	}
+	else if (srvtmask & AFS_VOL_VTM_RW) {
+		type = AFSVL_RWVOL;
+	}
+	else {
+		goto error;
+	}
+
+	down_write(&cell->vl_sem);
+
+	/* is the volume already active? */
+	if (vlocation->vols[type]) {
+		/* yes - re-use it */
+		volume = vlocation->vols[type];
+		afs_get_volume(volume);
+		goto success;
+	}
+
+	/* create a new volume record */
+	_debug("creating new volume record");
+
+	ret = -ENOMEM;
+	volume = kmalloc(sizeof(struct afs_volume), GFP_KERNEL);
+	if (!volume)
+		goto error_up;
+
+	memset(volume, 0, sizeof(struct afs_volume));
+	atomic_set(&volume->usage, 1);
+	volume->type		= type;
+	volume->type_force	= force;
+	volume->cell		= cell;
+	volume->vid		= vlocation->vldb.vid[type];
+
+	init_rwsem(&volume->server_sem);
+
+	/* look up all the applicable server records */
+	for (loop = 0; loop < 8; loop++) {
+		if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
+			ret = afs_server_lookup(
+				volume->cell,
+				&vlocation->vldb.servers[loop],
+				&volume->servers[volume->nservers]);
+			if (ret < 0)
+				goto error_discard;
+
+			volume->nservers++;
+		}
+	}
+
+	/* attach the cache and volume location */
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_acquire_cookie(vlocation->cache,
+			       &afs_vnode_cache_index_def,
+			       volume,
+			       &volume->cache);
+#endif
+
+	afs_get_vlocation(vlocation);
+	volume->vlocation = vlocation;
+
+	vlocation->vols[type] = volume;
+
+ success:
+	_debug("kAFS selected %s volume %08x",
+	       afs_voltypes[volume->type], volume->vid);
+	*_volume = volume;
+	ret = 0;
+
+	/* clean up */
+ error_up:
+	up_write(&cell->vl_sem);
+ error:
+	afs_put_vlocation(vlocation);
+	afs_put_cell(cell);
+
+	_leave(" = %d (%p)", ret, volume);
+	return ret;
+
+ error_discard:
+	up_write(&cell->vl_sem);
+
+	for (loop = volume->nservers - 1; loop >= 0; loop--)
+		afs_put_server(volume->servers[loop]);
+
+	kfree(volume);
+	goto error;
+} /* end afs_volume_lookup() */
+
+/*****************************************************************************/
+/*
+ * destroy a volume record
+ */
+void afs_put_volume(struct afs_volume *volume)
+{
+	struct afs_vlocation *vlocation;
+	int loop;
+
+	if (!volume)
+		return;
+
+	_enter("%p", volume);
+
+	vlocation = volume->vlocation;
+
+	/* sanity check */
+	BUG_ON(atomic_read(&volume->usage) <= 0);
+
+	/* to prevent a race, the decrement and the dequeue must be effectively
+	 * atomic */
+	down_write(&vlocation->cell->vl_sem);
+
+	if (likely(!atomic_dec_and_test(&volume->usage))) {
+		up_write(&vlocation->cell->vl_sem);
+		_leave("");
+		return;
+	}
+
+	vlocation->vols[volume->type] = NULL;
+
+	up_write(&vlocation->cell->vl_sem);
+
+	/* finish cleaning up the volume */
+#ifdef AFS_CACHING_SUPPORT
+	cachefs_relinquish_cookie(volume->cache, 0);
+#endif
+	afs_put_vlocation(vlocation);
+
+	for (loop = volume->nservers - 1; loop >= 0; loop--)
+		afs_put_server(volume->servers[loop]);
+
+	kfree(volume);
+
+	_leave(" [destroyed]");
+} /* end afs_put_volume() */
+
+/*****************************************************************************/
+/*
+ * pick a server to use to try accessing this volume
+ * - returns with an elevated usage count on the server chosen
+ */
+int afs_volume_pick_fileserver(struct afs_volume *volume,
+			       struct afs_server **_server)
+{
+	struct afs_server *server;
+	int ret, state, loop;
+
+	_enter("%s", volume->vlocation->vldb.name);
+
+	down_read(&volume->server_sem);
+
+	/* handle the no-server case */
+	if (volume->nservers == 0) {
+		ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
+		up_read(&volume->server_sem);
+		_leave(" = %d [no servers]", ret);
+		return ret;
+	}
+
+	/* basically, just search the list for the first live server and use
+	 * that */
+	ret = 0;
+	for (loop = 0; loop < volume->nservers; loop++) {
+		server = volume->servers[loop];
+		state = server->fs_state;
+
+		switch (state) {
+			/* found an apparently healthy server */
+		case 0:
+			afs_get_server(server);
+			up_read(&volume->server_sem);
+			*_server = server;
+			_leave(" = 0 (picked %08x)",
+			       ntohl(server->addr.s_addr));
+			return 0;
+
+		case -ENETUNREACH:
+			if (ret == 0)
+				ret = state;
+			break;
+
+		case -EHOSTUNREACH:
+			if (ret == 0 ||
+			    ret == -ENETUNREACH)
+				ret = state;
+			break;
+
+		case -ECONNREFUSED:
+			if (ret == 0 ||
+			    ret == -ENETUNREACH ||
+			    ret == -EHOSTUNREACH)
+				ret = state;
+			break;
+
+		default:
+		case -EREMOTEIO:
+			if (ret == 0 ||
+			    ret == -ENETUNREACH ||
+			    ret == -EHOSTUNREACH ||
+			    ret == -ECONNREFUSED)
+				ret = state;
+			break;
+		}
+	}
+
+	/* no available servers
+	 * - TODO: handle the no active servers case better
+	 */
+	up_read(&volume->server_sem);
+	_leave(" = %d", ret);
+	return ret;
+} /* end afs_volume_pick_fileserver() */
+
+/*****************************************************************************/
+/*
+ * release a server after use
+ * - releases the ref on the server struct that was acquired by picking
+ * - records result of using a particular server to access a volume
+ * - return 0 to try again, 1 if okay or to issue error
+ */
+int afs_volume_release_fileserver(struct afs_volume *volume,
+				  struct afs_server *server,
+				  int result)
+{
+	unsigned loop;
+
+	_enter("%s,%08x,%d",
+	       volume->vlocation->vldb.name, ntohl(server->addr.s_addr),
+	       result);
+
+	switch (result) {
+		/* success */
+	case 0:
+		server->fs_act_jif = jiffies;
+		break;
+
+		/* the fileserver denied all knowledge of the volume */
+	case -ENOMEDIUM:
+		server->fs_act_jif = jiffies;
+		down_write(&volume->server_sem);
+
+		/* first, find where the server is in the active list (if it
+		 * is) */
+		for (loop = 0; loop < volume->nservers; loop++)
+			if (volume->servers[loop] == server)
+				goto present;
+
+		/* no longer there - may have been discarded by another op */
+		goto try_next_server_upw;
+
+	present:
+		volume->nservers--;
+		memmove(&volume->servers[loop],
+			&volume->servers[loop + 1],
+			sizeof(volume->servers[loop]) *
+			(volume->nservers - loop));
+		volume->servers[volume->nservers] = NULL;
+		afs_put_server(server);
+		volume->rjservers++;
+
+		if (volume->nservers > 0)
+			/* another server might acknowledge its existence */
+			goto try_next_server_upw;
+
+		/* handle the case where all the fileservers have rejected the
+		 * volume
+		 * - TODO: try asking the fileservers for volume information
+		 * - TODO: contact the VL server again to see if the volume is
+		 *         no longer registered
+		 */
+		up_write(&volume->server_sem);
+		afs_put_server(server);
+		_leave(" [completely rejected]");
+		return 1;
+
+		/* problem reaching the server */
+	case -ENETUNREACH:
+	case -EHOSTUNREACH:
+	case -ECONNREFUSED:
+	case -ETIMEDOUT:
+	case -EREMOTEIO:
+		/* mark the server as dead
+		 * TODO: vary dead timeout depending on error
+		 */
+		spin_lock(&server->fs_lock);
+		if (!server->fs_state) {
+			server->fs_dead_jif = jiffies + HZ * 10;
+			server->fs_state = result;
+			printk("kAFS: SERVER DEAD state=%d\n", result);
+		}
+		spin_unlock(&server->fs_lock);
+		goto try_next_server;
+
+		/* miscellaneous error */
+	default:
+		server->fs_act_jif = jiffies;
+	case -ENOMEM:
+	case -ENONET:
+		break;
+	}
+
+	/* tell the caller to accept the result */
+	afs_put_server(server);
+	_leave("");
+	return 1;
+
+	/* tell the caller to loop around and try the next server */
+ try_next_server_upw:
+	up_write(&volume->server_sem);
+ try_next_server:
+	afs_put_server(server);
+	_leave(" [try next server]");
+	return 0;
+
+} /* end afs_volume_release_fileserver() */
+
+/*****************************************************************************/
+/*
+ * match a volume hash record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_volume_cache_match(void *target,
+						  const void *entry)
+{
+	const struct afs_cache_vhash *vhash = entry;
+	struct afs_volume *volume = target;
+
+	_enter("{%u},{%u}", volume->type, vhash->vtype);
+
+	if (volume->type == vhash->vtype) {
+		_leave(" = SUCCESS");
+		return CACHEFS_MATCH_SUCCESS;
+	}
+
+	_leave(" = FAILED");
+	return CACHEFS_MATCH_FAILED;
+} /* end afs_volume_cache_match() */
+#endif
+
+/*****************************************************************************/
+/*
+ * update a volume hash record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_volume_cache_update(void *source, void *entry)
+{
+	struct afs_cache_vhash *vhash = entry;
+	struct afs_volume *volume = source;
+
+	_enter("");
+
+	vhash->vtype = volume->type;
+
+} /* end afs_volume_cache_update() */
+#endif
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
new file mode 100644
index 0000000..1e69188
--- /dev/null
+++ b/fs/afs/volume.h
@@ -0,0 +1,142 @@
+/* volume.h: AFS volume management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VOLUME_H
+#define _LINUX_AFS_VOLUME_H
+
+#include "types.h"
+#include "fsclient.h"
+#include "kafstimod.h"
+#include "kafsasyncd.h"
+#include "cache.h"
+
+#define __packed __attribute__((packed))
+
+typedef enum {
+	AFS_VLUPD_SLEEP,		/* sleeping waiting for update timer to fire */
+	AFS_VLUPD_PENDING,		/* on pending queue */
+	AFS_VLUPD_INPROGRESS,		/* op in progress */
+	AFS_VLUPD_BUSYSLEEP,		/* sleeping because server returned EBUSY */
+	
+} __attribute__((packed)) afs_vlocation_upd_t;
+
+/*****************************************************************************/
+/*
+ * entry in the cached volume location catalogue
+ */
+struct afs_cache_vlocation
+{
+	uint8_t			name[64];	/* volume name (lowercase, padded with NULs) */
+	uint8_t			nservers;	/* number of entries used in servers[] */
+	uint8_t			vidmask;	/* voltype mask for vid[] */
+	uint8_t			srvtmask[8];	/* voltype masks for servers[] */
+#define AFS_VOL_VTM_RW	0x01 /* R/W version of the volume is available (on this server) */
+#define AFS_VOL_VTM_RO	0x02 /* R/O version of the volume is available (on this server) */
+#define AFS_VOL_VTM_BAK	0x04 /* backup version of the volume is available (on this server) */
+
+	afs_volid_t		vid[3];		/* volume IDs for R/W, R/O and Bak volumes */
+	struct in_addr		servers[8];	/* fileserver addresses */
+	time_t			rtime;		/* last retrieval time */
+};
+
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_vlocation_cache_index_def;
+#endif
+
+/*****************************************************************************/
+/*
+ * volume -> vnode hash table entry
+ */
+struct afs_cache_vhash
+{
+	afs_voltype_t		vtype;		/* which volume variation */
+	uint8_t			hash_bucket;	/* which hash bucket this represents */
+} __attribute__((packed));
+
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_volume_cache_index_def;
+#endif
+
+/*****************************************************************************/
+/*
+ * AFS volume location record
+ */
+struct afs_vlocation
+{
+	atomic_t		usage;
+	struct list_head	link;		/* link in cell volume location list */
+	struct afs_timer	timeout;	/* decaching timer */
+	struct afs_cell		*cell;		/* cell to which volume belongs */
+#ifdef AFS_CACHING_SUPPORT
+	struct cachefs_cookie	*cache;		/* caching cookie */
+#endif
+	struct afs_cache_vlocation vldb;	/* volume information DB record */
+	struct afs_volume	*vols[3];	/* volume access record pointer (index by type) */
+	rwlock_t		lock;		/* access lock */
+	unsigned long		read_jif;	/* time at which last read from vlserver */
+	struct afs_timer	upd_timer;	/* update timer */
+	struct afs_async_op	upd_op;		/* update operation */
+	afs_vlocation_upd_t	upd_state;	/* update state */
+	unsigned short		upd_first_svix;	/* first server index during update */
+	unsigned short		upd_curr_svix;	/* current server index during update */
+	unsigned short		upd_rej_cnt;	/* ENOMEDIUM count during update */
+	unsigned short		upd_busy_cnt;	/* EBUSY count during update */
+	unsigned short		valid;		/* T if valid */
+};
+
+extern int afs_vlocation_lookup(struct afs_cell *cell,
+				const char *name,
+				unsigned namesz,
+				struct afs_vlocation **_vlocation);
+
+#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern void afs_put_vlocation(struct afs_vlocation *vlocation);
+extern void afs_vlocation_do_timeout(struct afs_vlocation *vlocation);
+
+/*****************************************************************************/
+/*
+ * AFS volume access record
+ */
+struct afs_volume
+{
+	atomic_t		usage;
+	struct afs_cell		*cell;		/* cell to which belongs (unrefd ptr) */
+	struct afs_vlocation	*vlocation;	/* volume location */
+#ifdef AFS_CACHING_SUPPORT
+	struct cachefs_cookie	*cache;		/* caching cookie */
+#endif
+	afs_volid_t		vid;		/* volume ID */
+	afs_voltype_t __packed	type;		/* type of volume */
+	char			type_force;	/* force volume type (suppress R/O -> R/W) */
+	unsigned short		nservers;	/* number of server slots filled */
+	unsigned short		rjservers;	/* number of servers discarded due to -ENOMEDIUM */
+	struct afs_server	*servers[8];	/* servers on which volume resides (ordered) */
+	struct rw_semaphore	server_sem;	/* lock for accessing current server */
+};
+
+extern int afs_volume_lookup(const char *name,
+			     struct afs_cell *cell,
+			     int rwpath,
+			     struct afs_volume **_volume);
+
+#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern void afs_put_volume(struct afs_volume *volume);
+
+extern int afs_volume_pick_fileserver(struct afs_volume *volume,
+				      struct afs_server **_server);
+
+extern int afs_volume_release_fileserver(struct afs_volume *volume,
+					 struct afs_server *server,
+					 int result);
+
+#endif /* _LINUX_AFS_VOLUME_H */
diff --git a/fs/aio.c b/fs/aio.c
new file mode 100644
index 0000000..d06a2667
--- /dev/null
+++ b/fs/aio.c
@@ -0,0 +1,1729 @@
+/*
+ *	An async IO implementation for Linux
+ *	Written by Benjamin LaHaise <bcrl@kvack.org>
+ *
+ *	Implements an efficient asynchronous io interface.
+ *
+ *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
+ *
+ *	See ../COPYING for licensing terms.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/aio_abi.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#define DEBUG 0
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/aio.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/security.h>
+
+#include <asm/kmap_types.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+
+#if DEBUG > 1
+#define dprintk		printk
+#else
+#define dprintk(x...)	do { ; } while (0)
+#endif
+
+long aio_run = 0; /* for testing only */
+long aio_wakeups = 0; /* for testing only */
+
+/*------ sysctl variables----*/
+atomic_t aio_nr = ATOMIC_INIT(0);	/* current system wide number of aio requests */
+unsigned aio_max_nr = 0x10000;	/* system wide maximum number of aio requests */
+/*----end sysctl variables---*/
+
+static kmem_cache_t	*kiocb_cachep;
+static kmem_cache_t	*kioctx_cachep;
+
+static struct workqueue_struct *aio_wq;
+
+/* Used for rare fput completion. */
+static void aio_fput_routine(void *);
+static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+
+static DEFINE_SPINLOCK(fput_lock);
+LIST_HEAD(fput_head);
+
+static void aio_kick_handler(void *);
+
+/* aio_setup
+ *	Creates the slab caches used by the aio routines, panic on
+ *	failure as this is done early during the boot sequence.
+ */
+static int __init aio_setup(void)
+{
+	kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb),
+				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+	kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx),
+				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+
+	aio_wq = create_workqueue("aio");
+
+	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
+
+	return 0;
+}
+
+static void aio_free_ring(struct kioctx *ctx)
+{
+	struct aio_ring_info *info = &ctx->ring_info;
+	long i;
+
+	for (i=0; i<info->nr_pages; i++)
+		put_page(info->ring_pages[i]);
+
+	if (info->mmap_size) {
+		down_write(&ctx->mm->mmap_sem);
+		do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
+		up_write(&ctx->mm->mmap_sem);
+	}
+
+	if (info->ring_pages && info->ring_pages != info->internal_pages)
+		kfree(info->ring_pages);
+	info->ring_pages = NULL;
+	info->nr = 0;
+}
+
+static int aio_setup_ring(struct kioctx *ctx)
+{
+	struct aio_ring *ring;
+	struct aio_ring_info *info = &ctx->ring_info;
+	unsigned nr_events = ctx->max_reqs;
+	unsigned long size;
+	int nr_pages;
+
+	/* Compensate for the ring buffer's head/tail overlap entry */
+	nr_events += 2;	/* 1 is required, 2 for good luck */
+
+	size = sizeof(struct aio_ring);
+	size += sizeof(struct io_event) * nr_events;
+	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+	if (nr_pages < 0)
+		return -EINVAL;
+
+	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+
+	info->nr = 0;
+	info->ring_pages = info->internal_pages;
+	if (nr_pages > AIO_RING_PAGES) {
+		info->ring_pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_KERNEL);
+		if (!info->ring_pages)
+			return -ENOMEM;
+		memset(info->ring_pages, 0, sizeof(struct page *) * nr_pages);
+	}
+
+	info->mmap_size = nr_pages * PAGE_SIZE;
+	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
+	down_write(&ctx->mm->mmap_sem);
+	info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
+				  PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
+				  0);
+	if (IS_ERR((void *)info->mmap_base)) {
+		up_write(&ctx->mm->mmap_sem);
+		printk("mmap err: %ld\n", -info->mmap_base);
+		info->mmap_size = 0;
+		aio_free_ring(ctx);
+		return -EAGAIN;
+	}
+
+	dprintk("mmap address: 0x%08lx\n", info->mmap_base);
+	info->nr_pages = get_user_pages(current, ctx->mm,
+					info->mmap_base, nr_pages, 
+					1, 0, info->ring_pages, NULL);
+	up_write(&ctx->mm->mmap_sem);
+
+	if (unlikely(info->nr_pages != nr_pages)) {
+		aio_free_ring(ctx);
+		return -EAGAIN;
+	}
+
+	ctx->user_id = info->mmap_base;
+
+	info->nr = nr_events;		/* trusted copy */
+
+	ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+	ring->nr = nr_events;	/* user copy */
+	ring->id = ctx->user_id;
+	ring->head = ring->tail = 0;
+	ring->magic = AIO_RING_MAGIC;
+	ring->compat_features = AIO_RING_COMPAT_FEATURES;
+	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
+	ring->header_length = sizeof(struct aio_ring);
+	kunmap_atomic(ring, KM_USER0);
+
+	return 0;
+}
+
+
+/* aio_ring_event: returns a pointer to the event at the given index from
+ * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
+ */
+#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
+#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
+#define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
+
+#define aio_ring_event(info, nr, km) ({					\
+	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
+	struct io_event *__event;					\
+	__event = kmap_atomic(						\
+			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
+	__event += pos % AIO_EVENTS_PER_PAGE;				\
+	__event;							\
+})
+
+#define put_aio_ring_event(event, km) do {	\
+	struct io_event *__event = (event);	\
+	(void)__event;				\
+	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
+} while(0)
+
+/* ioctx_alloc
+ *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
+ */
+static struct kioctx *ioctx_alloc(unsigned nr_events)
+{
+	struct mm_struct *mm;
+	struct kioctx *ctx;
+
+	/* Prevent overflows */
+	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
+	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
+		pr_debug("ENOMEM: nr_events too high\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (nr_events > aio_max_nr)
+		return ERR_PTR(-EAGAIN);
+
+	ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->max_reqs = nr_events;
+	mm = ctx->mm = current->mm;
+	atomic_inc(&mm->mm_count);
+
+	atomic_set(&ctx->users, 1);
+	spin_lock_init(&ctx->ctx_lock);
+	spin_lock_init(&ctx->ring_info.ring_lock);
+	init_waitqueue_head(&ctx->wait);
+
+	INIT_LIST_HEAD(&ctx->active_reqs);
+	INIT_LIST_HEAD(&ctx->run_list);
+	INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+
+	if (aio_setup_ring(ctx) < 0)
+		goto out_freectx;
+
+	/* limit the number of system wide aios */
+	atomic_add(ctx->max_reqs, &aio_nr);	/* undone by __put_ioctx */
+	if (unlikely(atomic_read(&aio_nr) > aio_max_nr))
+		goto out_cleanup;
+
+	/* now link into global list.  kludge.  FIXME */
+	write_lock(&mm->ioctx_list_lock);
+	ctx->next = mm->ioctx_list;
+	mm->ioctx_list = ctx;
+	write_unlock(&mm->ioctx_list_lock);
+
+	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+		ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
+	return ctx;
+
+out_cleanup:
+	atomic_sub(ctx->max_reqs, &aio_nr);
+	ctx->max_reqs = 0;	/* prevent __put_ioctx from sub'ing aio_nr */
+	__put_ioctx(ctx);
+	return ERR_PTR(-EAGAIN);
+
+out_freectx:
+	mmdrop(mm);
+	kmem_cache_free(kioctx_cachep, ctx);
+	ctx = ERR_PTR(-ENOMEM);
+
+	dprintk("aio: error allocating ioctx %p\n", ctx);
+	return ctx;
+}
+
+/* aio_cancel_all
+ *	Cancels all outstanding aio requests on an aio context.  Used 
+ *	when the processes owning a context have all exited to encourage 
+ *	the rapid destruction of the kioctx.
+ */
+static void aio_cancel_all(struct kioctx *ctx)
+{
+	int (*cancel)(struct kiocb *, struct io_event *);
+	struct io_event res;
+	spin_lock_irq(&ctx->ctx_lock);
+	ctx->dead = 1;
+	while (!list_empty(&ctx->active_reqs)) {
+		struct list_head *pos = ctx->active_reqs.next;
+		struct kiocb *iocb = list_kiocb(pos);
+		list_del_init(&iocb->ki_list);
+		cancel = iocb->ki_cancel;
+		kiocbSetCancelled(iocb);
+		if (cancel) {
+			iocb->ki_users++;
+			spin_unlock_irq(&ctx->ctx_lock);
+			cancel(iocb, &res);
+			spin_lock_irq(&ctx->ctx_lock);
+		}
+	}
+	spin_unlock_irq(&ctx->ctx_lock);
+}
+
+void wait_for_all_aios(struct kioctx *ctx)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	if (!ctx->reqs_active)
+		return;
+
+	add_wait_queue(&ctx->wait, &wait);
+	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	while (ctx->reqs_active) {
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+	__set_task_state(tsk, TASK_RUNNING);
+	remove_wait_queue(&ctx->wait, &wait);
+}
+
+/* wait_on_sync_kiocb:
+ *	Waits on the given sync kiocb to complete.
+ */
+ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
+{
+	while (iocb->ki_users) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (!iocb->ki_users)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	return iocb->ki_user_data;
+}
+
+/* exit_aio: called when the last user of mm goes away.  At this point, 
+ * there is no way for any new requests to be submited or any of the 
+ * io_* syscalls to be called on the context.  However, there may be 
+ * outstanding requests which hold references to the context; as they 
+ * go away, they will call put_ioctx and release any pinned memory
+ * associated with the request (held via struct page * references).
+ */
+void fastcall exit_aio(struct mm_struct *mm)
+{
+	struct kioctx *ctx = mm->ioctx_list;
+	mm->ioctx_list = NULL;
+	while (ctx) {
+		struct kioctx *next = ctx->next;
+		ctx->next = NULL;
+		aio_cancel_all(ctx);
+
+		wait_for_all_aios(ctx);
+		/*
+		 * this is an overkill, but ensures we don't leave
+		 * the ctx on the aio_wq
+		 */
+		flush_workqueue(aio_wq);
+
+		if (1 != atomic_read(&ctx->users))
+			printk(KERN_DEBUG
+				"exit_aio:ioctx still alive: %d %d %d\n",
+				atomic_read(&ctx->users), ctx->dead,
+				ctx->reqs_active);
+		put_ioctx(ctx);
+		ctx = next;
+	}
+}
+
+/* __put_ioctx
+ *	Called when the last user of an aio context has gone away,
+ *	and the struct needs to be freed.
+ */
+void fastcall __put_ioctx(struct kioctx *ctx)
+{
+	unsigned nr_events = ctx->max_reqs;
+
+	if (unlikely(ctx->reqs_active))
+		BUG();
+
+	cancel_delayed_work(&ctx->wq);
+	flush_workqueue(aio_wq);
+	aio_free_ring(ctx);
+	mmdrop(ctx->mm);
+	ctx->mm = NULL;
+	pr_debug("__put_ioctx: freeing %p\n", ctx);
+	kmem_cache_free(kioctx_cachep, ctx);
+
+	atomic_sub(nr_events, &aio_nr);
+}
+
+/* aio_get_req
+ *	Allocate a slot for an aio request.  Increments the users count
+ * of the kioctx so that the kioctx stays around until all requests are
+ * complete.  Returns NULL if no requests are free.
+ *
+ * Returns with kiocb->users set to 2.  The io submit code path holds
+ * an extra reference while submitting the i/o.
+ * This prevents races between the aio code path referencing the
+ * req (after submitting it) and aio_complete() freeing the req.
+ */
+static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
+static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
+{
+	struct kiocb *req = NULL;
+	struct aio_ring *ring;
+	int okay = 0;
+
+	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
+	if (unlikely(!req))
+		return NULL;
+
+	req->ki_flags = 1 << KIF_LOCKED;
+	req->ki_users = 2;
+	req->ki_key = 0;
+	req->ki_ctx = ctx;
+	req->ki_cancel = NULL;
+	req->ki_retry = NULL;
+	req->ki_obj.user = NULL;
+	req->ki_dtor = NULL;
+	req->private = NULL;
+	INIT_LIST_HEAD(&req->ki_run_list);
+
+	/* Check if the completion queue has enough free space to
+	 * accept an event from this io.
+	 */
+	spin_lock_irq(&ctx->ctx_lock);
+	ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
+	if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
+		list_add(&req->ki_list, &ctx->active_reqs);
+		get_ioctx(ctx);
+		ctx->reqs_active++;
+		okay = 1;
+	}
+	kunmap_atomic(ring, KM_USER0);
+	spin_unlock_irq(&ctx->ctx_lock);
+
+	if (!okay) {
+		kmem_cache_free(kiocb_cachep, req);
+		req = NULL;
+	}
+
+	return req;
+}
+
+static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+{
+	struct kiocb *req;
+	/* Handle a potential starvation case -- should be exceedingly rare as 
+	 * requests will be stuck on fput_head only if the aio_fput_routine is 
+	 * delayed and the requests were the last user of the struct file.
+	 */
+	req = __aio_get_req(ctx);
+	if (unlikely(NULL == req)) {
+		aio_fput_routine(NULL);
+		req = __aio_get_req(ctx);
+	}
+	return req;
+}
+
+static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
+{
+	if (req->ki_dtor)
+		req->ki_dtor(req);
+	req->ki_ctx = NULL;
+	req->ki_filp = NULL;
+	req->ki_obj.user = NULL;
+	req->ki_dtor = NULL;
+	req->private = NULL;
+	kmem_cache_free(kiocb_cachep, req);
+	ctx->reqs_active--;
+
+	if (unlikely(!ctx->reqs_active && ctx->dead))
+		wake_up(&ctx->wait);
+}
+
+static void aio_fput_routine(void *data)
+{
+	spin_lock_irq(&fput_lock);
+	while (likely(!list_empty(&fput_head))) {
+		struct kiocb *req = list_kiocb(fput_head.next);
+		struct kioctx *ctx = req->ki_ctx;
+
+		list_del(&req->ki_list);
+		spin_unlock_irq(&fput_lock);
+
+		/* Complete the fput */
+		__fput(req->ki_filp);
+
+		/* Link the iocb into the context's free list */
+		spin_lock_irq(&ctx->ctx_lock);
+		really_put_req(ctx, req);
+		spin_unlock_irq(&ctx->ctx_lock);
+
+		put_ioctx(ctx);
+		spin_lock_irq(&fput_lock);
+	}
+	spin_unlock_irq(&fput_lock);
+}
+
+/* __aio_put_req
+ *	Returns true if this put was the last user of the request.
+ */
+static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
+{
+	dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n",
+		req, atomic_read(&req->ki_filp->f_count));
+
+	req->ki_users --;
+	if (unlikely(req->ki_users < 0))
+		BUG();
+	if (likely(req->ki_users))
+		return 0;
+	list_del(&req->ki_list);		/* remove from active_reqs */
+	req->ki_cancel = NULL;
+	req->ki_retry = NULL;
+
+	/* Must be done under the lock to serialise against cancellation.
+	 * Call this aio_fput as it duplicates fput via the fput_work.
+	 */
+	if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
+		get_ioctx(ctx);
+		spin_lock(&fput_lock);
+		list_add(&req->ki_list, &fput_head);
+		spin_unlock(&fput_lock);
+		queue_work(aio_wq, &fput_work);
+	} else
+		really_put_req(ctx, req);
+	return 1;
+}
+
+/* aio_put_req
+ *	Returns true if this put was the last user of the kiocb,
+ *	false if the request is still in use.
+ */
+int fastcall aio_put_req(struct kiocb *req)
+{
+	struct kioctx *ctx = req->ki_ctx;
+	int ret;
+	spin_lock_irq(&ctx->ctx_lock);
+	ret = __aio_put_req(ctx, req);
+	spin_unlock_irq(&ctx->ctx_lock);
+	if (ret)
+		put_ioctx(ctx);
+	return ret;
+}
+
+/*	Lookup an ioctx id.  ioctx_list is lockless for reads.
+ *	FIXME: this is O(n) and is only suitable for development.
+ */
+struct kioctx *lookup_ioctx(unsigned long ctx_id)
+{
+	struct kioctx *ioctx;
+	struct mm_struct *mm;
+
+	mm = current->mm;
+	read_lock(&mm->ioctx_list_lock);
+	for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
+		if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
+			get_ioctx(ioctx);
+			break;
+		}
+	read_unlock(&mm->ioctx_list_lock);
+
+	return ioctx;
+}
+
+/*
+ * use_mm
+ *	Makes the calling kernel thread take on the specified
+ *	mm context.
+ *	Called by the retry thread execute retries within the
+ *	iocb issuer's mm context, so that copy_from/to_user
+ *	operations work seamlessly for aio.
+ *	(Note: this routine is intended to be called only
+ *	from a kernel thread context)
+ */
+static void use_mm(struct mm_struct *mm)
+{
+	struct mm_struct *active_mm;
+	struct task_struct *tsk = current;
+
+	task_lock(tsk);
+	tsk->flags |= PF_BORROWED_MM;
+	active_mm = tsk->active_mm;
+	atomic_inc(&mm->mm_count);
+	tsk->mm = mm;
+	tsk->active_mm = mm;
+	activate_mm(active_mm, mm);
+	task_unlock(tsk);
+
+	mmdrop(active_mm);
+}
+
+/*
+ * unuse_mm
+ *	Reverses the effect of use_mm, i.e. releases the
+ *	specified mm context which was earlier taken on
+ *	by the calling kernel thread
+ *	(Note: this routine is intended to be called only
+ *	from a kernel thread context)
+ *
+ * Comments: Called with ctx->ctx_lock held. This nests
+ * task_lock instead ctx_lock.
+ */
+void unuse_mm(struct mm_struct *mm)
+{
+	struct task_struct *tsk = current;
+
+	task_lock(tsk);
+	tsk->flags &= ~PF_BORROWED_MM;
+	tsk->mm = NULL;
+	/* active_mm is still 'mm' */
+	enter_lazy_tlb(mm, tsk);
+	task_unlock(tsk);
+}
+
+/*
+ * Queue up a kiocb to be retried. Assumes that the kiocb
+ * has already been marked as kicked, and places it on
+ * the retry run list for the corresponding ioctx, if it
+ * isn't already queued. Returns 1 if it actually queued
+ * the kiocb (to tell the caller to activate the work
+ * queue to process it), or 0, if it found that it was
+ * already queued.
+ *
+ * Should be called with the spin lock iocb->ki_ctx->ctx_lock
+ * held
+ */
+static inline int __queue_kicked_iocb(struct kiocb *iocb)
+{
+	struct kioctx *ctx = iocb->ki_ctx;
+
+	if (list_empty(&iocb->ki_run_list)) {
+		list_add_tail(&iocb->ki_run_list,
+			&ctx->run_list);
+		iocb->ki_queued++;
+		return 1;
+	}
+	return 0;
+}
+
+/* aio_run_iocb
+ *	This is the core aio execution routine. It is
+ *	invoked both for initial i/o submission and
+ *	subsequent retries via the aio_kick_handler.
+ *	Expects to be invoked with iocb->ki_ctx->lock
+ *	already held. The lock is released and reaquired
+ *	as needed during processing.
+ *
+ * Calls the iocb retry method (already setup for the
+ * iocb on initial submission) for operation specific
+ * handling, but takes care of most of common retry
+ * execution details for a given iocb. The retry method
+ * needs to be non-blocking as far as possible, to avoid
+ * holding up other iocbs waiting to be serviced by the
+ * retry kernel thread.
+ *
+ * The trickier parts in this code have to do with
+ * ensuring that only one retry instance is in progress
+ * for a given iocb at any time. Providing that guarantee
+ * simplifies the coding of individual aio operations as
+ * it avoids various potential races.
+ */
+static ssize_t aio_run_iocb(struct kiocb *iocb)
+{
+	struct kioctx	*ctx = iocb->ki_ctx;
+	ssize_t (*retry)(struct kiocb *);
+	ssize_t ret;
+
+	if (iocb->ki_retried++ > 1024*1024) {
+		printk("Maximal retry count.  Bytes done %Zd\n",
+			iocb->ki_nbytes - iocb->ki_left);
+		return -EAGAIN;
+	}
+
+	if (!(iocb->ki_retried & 0xff)) {
+		pr_debug("%ld retry: %d of %d (kick %ld, Q %ld run %ld, wake %ld)\n",
+			iocb->ki_retried,
+			iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes,
+			iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups);
+	}
+
+	if (!(retry = iocb->ki_retry)) {
+		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
+		return 0;
+	}
+
+	/*
+	 * We don't want the next retry iteration for this
+	 * operation to start until this one has returned and
+	 * updated the iocb state. However, wait_queue functions
+	 * can trigger a kick_iocb from interrupt context in the
+	 * meantime, indicating that data is available for the next
+	 * iteration. We want to remember that and enable the
+	 * next retry iteration _after_ we are through with
+	 * this one.
+	 *
+	 * So, in order to be able to register a "kick", but
+	 * prevent it from being queued now, we clear the kick
+	 * flag, but make the kick code *think* that the iocb is
+	 * still on the run list until we are actually done.
+	 * When we are done with this iteration, we check if
+	 * the iocb was kicked in the meantime and if so, queue
+	 * it up afresh.
+	 */
+
+	kiocbClearKicked(iocb);
+
+	/*
+	 * This is so that aio_complete knows it doesn't need to
+	 * pull the iocb off the run list (We can't just call
+	 * INIT_LIST_HEAD because we don't want a kick_iocb to
+	 * queue this on the run list yet)
+	 */
+	iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
+	spin_unlock_irq(&ctx->ctx_lock);
+
+	/* Quit retrying if the i/o has been cancelled */
+	if (kiocbIsCancelled(iocb)) {
+		ret = -EINTR;
+		aio_complete(iocb, ret, 0);
+		/* must not access the iocb after this */
+		goto out;
+	}
+
+	/*
+	 * Now we are all set to call the retry method in async
+	 * context. By setting this thread's io_wait context
+	 * to point to the wait queue entry inside the currently
+	 * running iocb for the duration of the retry, we ensure
+	 * that async notification wakeups are queued by the
+	 * operation instead of blocking waits, and when notified,
+	 * cause the iocb to be kicked for continuation (through
+	 * the aio_wake_function callback).
+	 */
+	BUG_ON(current->io_wait != NULL);
+	current->io_wait = &iocb->ki_wait;
+	ret = retry(iocb);
+	current->io_wait = NULL;
+
+	if (-EIOCBRETRY != ret) {
+ 		if (-EIOCBQUEUED != ret) {
+			BUG_ON(!list_empty(&iocb->ki_wait.task_list));
+			aio_complete(iocb, ret, 0);
+			/* must not access the iocb after this */
+		}
+	} else {
+		/*
+		 * Issue an additional retry to avoid waiting forever if
+		 * no waits were queued (e.g. in case of a short read).
+		 */
+		if (list_empty(&iocb->ki_wait.task_list))
+			kiocbSetKicked(iocb);
+	}
+out:
+	spin_lock_irq(&ctx->ctx_lock);
+
+	if (-EIOCBRETRY == ret) {
+		/*
+		 * OK, now that we are done with this iteration
+		 * and know that there is more left to go,
+		 * this is where we let go so that a subsequent
+		 * "kick" can start the next iteration
+		 */
+
+		/* will make __queue_kicked_iocb succeed from here on */
+		INIT_LIST_HEAD(&iocb->ki_run_list);
+		/* we must queue the next iteration ourselves, if it
+		 * has already been kicked */
+		if (kiocbIsKicked(iocb)) {
+			__queue_kicked_iocb(iocb);
+		}
+	}
+	return ret;
+}
+
+/*
+ * __aio_run_iocbs:
+ * 	Process all pending retries queued on the ioctx
+ * 	run list.
+ * Assumes it is operating within the aio issuer's mm
+ * context. Expects to be called with ctx->ctx_lock held
+ */
+static int __aio_run_iocbs(struct kioctx *ctx)
+{
+	struct kiocb *iocb;
+	int count = 0;
+	LIST_HEAD(run_list);
+
+	list_splice_init(&ctx->run_list, &run_list);
+	while (!list_empty(&run_list)) {
+		iocb = list_entry(run_list.next, struct kiocb,
+			ki_run_list);
+		list_del(&iocb->ki_run_list);
+		/*
+		 * Hold an extra reference while retrying i/o.
+		 */
+		iocb->ki_users++;       /* grab extra reference */
+		aio_run_iocb(iocb);
+		if (__aio_put_req(ctx, iocb))  /* drop extra ref */
+			put_ioctx(ctx);
+		count++;
+ 	}
+	aio_run++;
+	if (!list_empty(&ctx->run_list))
+		return 1;
+	return 0;
+}
+
+static void aio_queue_work(struct kioctx * ctx)
+{
+	unsigned long timeout;
+	/*
+	 * if someone is waiting, get the work started right
+	 * away, otherwise, use a longer delay
+	 */
+	smp_mb();
+	if (waitqueue_active(&ctx->wait))
+		timeout = 1;
+	else
+		timeout = HZ/10;
+	queue_delayed_work(aio_wq, &ctx->wq, timeout);
+}
+
+
+/*
+ * aio_run_iocbs:
+ * 	Process all pending retries queued on the ioctx
+ * 	run list.
+ * Assumes it is operating within the aio issuer's mm
+ * context.
+ */
+static inline void aio_run_iocbs(struct kioctx *ctx)
+{
+	int requeue;
+
+	spin_lock_irq(&ctx->ctx_lock);
+
+	requeue = __aio_run_iocbs(ctx);
+	spin_unlock_irq(&ctx->ctx_lock);
+	if (requeue)
+		aio_queue_work(ctx);
+}
+
+/*
+ * just like aio_run_iocbs, but keeps running them until
+ * the list stays empty
+ */
+static inline void aio_run_all_iocbs(struct kioctx *ctx)
+{
+	spin_lock_irq(&ctx->ctx_lock);
+	while (__aio_run_iocbs(ctx))
+		;
+	spin_unlock_irq(&ctx->ctx_lock);
+}
+
+/*
+ * aio_kick_handler:
+ * 	Work queue handler triggered to process pending
+ * 	retries on an ioctx. Takes on the aio issuer's
+ *	mm context before running the iocbs, so that
+ *	copy_xxx_user operates on the issuer's address
+ *      space.
+ * Run on aiod's context.
+ */
+static void aio_kick_handler(void *data)
+{
+	struct kioctx *ctx = data;
+	mm_segment_t oldfs = get_fs();
+	int requeue;
+
+	set_fs(USER_DS);
+	use_mm(ctx->mm);
+	spin_lock_irq(&ctx->ctx_lock);
+	requeue =__aio_run_iocbs(ctx);
+ 	unuse_mm(ctx->mm);
+	spin_unlock_irq(&ctx->ctx_lock);
+	set_fs(oldfs);
+	/*
+	 * we're in a worker thread already, don't use queue_delayed_work,
+	 */
+	if (requeue)
+		queue_work(aio_wq, &ctx->wq);
+}
+
+
+/*
+ * Called by kick_iocb to queue the kiocb for retry
+ * and if required activate the aio work queue to process
+ * it
+ */
+void queue_kicked_iocb(struct kiocb *iocb)
+{
+ 	struct kioctx	*ctx = iocb->ki_ctx;
+	unsigned long flags;
+	int run = 0;
+
+	WARN_ON((!list_empty(&iocb->ki_wait.task_list)));
+
+	spin_lock_irqsave(&ctx->ctx_lock, flags);
+	run = __queue_kicked_iocb(iocb);
+	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+	if (run) {
+		aio_queue_work(ctx);
+		aio_wakeups++;
+	}
+}
+
+/*
+ * kick_iocb:
+ *      Called typically from a wait queue callback context
+ *      (aio_wake_function) to trigger a retry of the iocb.
+ *      The retry is usually executed by aio workqueue
+ *      threads (See aio_kick_handler).
+ */
+void fastcall kick_iocb(struct kiocb *iocb)
+{
+	/* sync iocbs are easy: they can only ever be executing from a 
+	 * single context. */
+	if (is_sync_kiocb(iocb)) {
+		kiocbSetKicked(iocb);
+	        wake_up_process(iocb->ki_obj.tsk);
+		return;
+	}
+
+	iocb->ki_kicked++;
+	/* If its already kicked we shouldn't queue it again */
+	if (!kiocbTryKick(iocb)) {
+		queue_kicked_iocb(iocb);
+	}
+}
+EXPORT_SYMBOL(kick_iocb);
+
+/* aio_complete
+ *	Called when the io request on the given iocb is complete.
+ *	Returns true if this is the last user of the request.  The 
+ *	only other user of the request can be the cancellation code.
+ */
+int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
+{
+	struct kioctx	*ctx = iocb->ki_ctx;
+	struct aio_ring_info	*info;
+	struct aio_ring	*ring;
+	struct io_event	*event;
+	unsigned long	flags;
+	unsigned long	tail;
+	int		ret;
+
+	/* Special case handling for sync iocbs: events go directly
+	 * into the iocb for fast handling.  Note that this will not 
+	 * work if we allow sync kiocbs to be cancelled. in which
+	 * case the usage count checks will have to move under ctx_lock
+	 * for all cases.
+	 */
+	if (is_sync_kiocb(iocb)) {
+		int ret;
+
+		iocb->ki_user_data = res;
+		if (iocb->ki_users == 1) {
+			iocb->ki_users = 0;
+			ret = 1;
+		} else {
+			spin_lock_irq(&ctx->ctx_lock);
+			iocb->ki_users--;
+			ret = (0 == iocb->ki_users);
+			spin_unlock_irq(&ctx->ctx_lock);
+		}
+		/* sync iocbs put the task here for us */
+		wake_up_process(iocb->ki_obj.tsk);
+		return ret;
+	}
+
+	info = &ctx->ring_info;
+
+	/* add a completion event to the ring buffer.
+	 * must be done holding ctx->ctx_lock to prevent
+	 * other code from messing with the tail
+	 * pointer since we might be called from irq
+	 * context.
+	 */
+	spin_lock_irqsave(&ctx->ctx_lock, flags);
+
+	if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
+		list_del_init(&iocb->ki_run_list);
+
+	/*
+	 * cancelled requests don't get events, userland was given one
+	 * when the event got cancelled.
+	 */
+	if (kiocbIsCancelled(iocb))
+		goto put_rq;
+
+	ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
+
+	tail = info->tail;
+	event = aio_ring_event(info, tail, KM_IRQ0);
+	tail = (tail + 1) % info->nr;
+
+	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
+	event->data = iocb->ki_user_data;
+	event->res = res;
+	event->res2 = res2;
+
+	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
+		ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
+		res, res2);
+
+	/* after flagging the request as done, we
+	 * must never even look at it again
+	 */
+	smp_wmb();	/* make event visible before updating tail */
+
+	info->tail = tail;
+	ring->tail = tail;
+
+	put_aio_ring_event(event, KM_IRQ0);
+	kunmap_atomic(ring, KM_IRQ1);
+
+	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
+
+	pr_debug("%ld retries: %d of %d (kicked %ld, Q %ld run %ld wake %ld)\n",
+		iocb->ki_retried,
+		iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes,
+		iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups);
+put_rq:
+	/* everything turned out well, dispose of the aiocb. */
+	ret = __aio_put_req(ctx, iocb);
+
+	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+
+	if (waitqueue_active(&ctx->wait))
+		wake_up(&ctx->wait);
+
+	if (ret)
+		put_ioctx(ctx);
+
+	return ret;
+}
+
+/* aio_read_evt
+ *	Pull an event off of the ioctx's event ring.  Returns the number of 
+ *	events fetched (0 or 1 ;-)
+ *	FIXME: make this use cmpxchg.
+ *	TODO: make the ringbuffer user mmap()able (requires FIXME).
+ */
+static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+{
+	struct aio_ring_info *info = &ioctx->ring_info;
+	struct aio_ring *ring;
+	unsigned long head;
+	int ret = 0;
+
+	ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
+		 (unsigned long)ring->head, (unsigned long)ring->tail,
+		 (unsigned long)ring->nr);
+
+	if (ring->head == ring->tail)
+		goto out;
+
+	spin_lock(&info->ring_lock);
+
+	head = ring->head % info->nr;
+	if (head != ring->tail) {
+		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+		*ent = *evp;
+		head = (head + 1) % info->nr;
+		smp_mb(); /* finish reading the event before updatng the head */
+		ring->head = head;
+		ret = 1;
+		put_aio_ring_event(evp, KM_USER1);
+	}
+	spin_unlock(&info->ring_lock);
+
+out:
+	kunmap_atomic(ring, KM_USER0);
+	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
+		 (unsigned long)ring->head, (unsigned long)ring->tail);
+	return ret;
+}
+
+struct aio_timeout {
+	struct timer_list	timer;
+	int			timed_out;
+	struct task_struct	*p;
+};
+
+static void timeout_func(unsigned long data)
+{
+	struct aio_timeout *to = (struct aio_timeout *)data;
+
+	to->timed_out = 1;
+	wake_up_process(to->p);
+}
+
+static inline void init_timeout(struct aio_timeout *to)
+{
+	init_timer(&to->timer);
+	to->timer.data = (unsigned long)to;
+	to->timer.function = timeout_func;
+	to->timed_out = 0;
+	to->p = current;
+}
+
+static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
+			       const struct timespec *ts)
+{
+	to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
+	if (time_after(to->timer.expires, jiffies))
+		add_timer(&to->timer);
+	else
+		to->timed_out = 1;
+}
+
+static inline void clear_timeout(struct aio_timeout *to)
+{
+	del_singleshot_timer_sync(&to->timer);
+}
+
+static int read_events(struct kioctx *ctx,
+			long min_nr, long nr,
+			struct io_event __user *event,
+			struct timespec __user *timeout)
+{
+	long			start_jiffies = jiffies;
+	struct task_struct	*tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+	int			ret;
+	int			i = 0;
+	struct io_event		ent;
+	struct aio_timeout	to;
+	int 			event_loop = 0; /* testing only */
+	int			retry = 0;
+
+	/* needed to zero any padding within an entry (there shouldn't be 
+	 * any, but C is fun!
+	 */
+	memset(&ent, 0, sizeof(ent));
+retry:
+	ret = 0;
+	while (likely(i < nr)) {
+		ret = aio_read_evt(ctx, &ent);
+		if (unlikely(ret <= 0))
+			break;
+
+		dprintk("read event: %Lx %Lx %Lx %Lx\n",
+			ent.data, ent.obj, ent.res, ent.res2);
+
+		/* Could we split the check in two? */
+		ret = -EFAULT;
+		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
+			dprintk("aio: lost an event due to EFAULT.\n");
+			break;
+		}
+		ret = 0;
+
+		/* Good, event copied to userland, update counts. */
+		event ++;
+		i ++;
+	}
+
+	if (min_nr <= i)
+		return i;
+	if (ret)
+		return ret;
+
+	/* End fast path */
+
+	/* racey check, but it gets redone */
+	if (!retry && unlikely(!list_empty(&ctx->run_list))) {
+		retry = 1;
+		aio_run_all_iocbs(ctx);
+		goto retry;
+	}
+
+	init_timeout(&to);
+	if (timeout) {
+		struct timespec	ts;
+		ret = -EFAULT;
+		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
+			goto out;
+
+		set_timeout(start_jiffies, &to, &ts);
+	}
+
+	while (likely(i < nr)) {
+		add_wait_queue_exclusive(&ctx->wait, &wait);
+		do {
+			set_task_state(tsk, TASK_INTERRUPTIBLE);
+			ret = aio_read_evt(ctx, &ent);
+			if (ret)
+				break;
+			if (min_nr <= i)
+				break;
+			ret = 0;
+			if (to.timed_out)	/* Only check after read evt */
+				break;
+			schedule();
+			event_loop++;
+			if (signal_pending(tsk)) {
+				ret = -EINTR;
+				break;
+			}
+			/*ret = aio_read_evt(ctx, &ent);*/
+		} while (1) ;
+
+		set_task_state(tsk, TASK_RUNNING);
+		remove_wait_queue(&ctx->wait, &wait);
+
+		if (unlikely(ret <= 0))
+			break;
+
+		ret = -EFAULT;
+		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
+			dprintk("aio: lost an event due to EFAULT.\n");
+			break;
+		}
+
+		/* Good, event copied to userland, update counts. */
+		event ++;
+		i ++;
+	}
+
+	if (timeout)
+		clear_timeout(&to);
+out:
+	pr_debug("event loop executed %d times\n", event_loop);
+	pr_debug("aio_run %ld\n", aio_run);
+	pr_debug("aio_wakeups %ld\n", aio_wakeups);
+	return i ? i : ret;
+}
+
+/* Take an ioctx and remove it from the list of ioctx's.  Protects 
+ * against races with itself via ->dead.
+ */
+static void io_destroy(struct kioctx *ioctx)
+{
+	struct mm_struct *mm = current->mm;
+	struct kioctx **tmp;
+	int was_dead;
+
+	/* delete the entry from the list is someone else hasn't already */
+	write_lock(&mm->ioctx_list_lock);
+	was_dead = ioctx->dead;
+	ioctx->dead = 1;
+	for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx;
+	     tmp = &(*tmp)->next)
+		;
+	if (*tmp)
+		*tmp = ioctx->next;
+	write_unlock(&mm->ioctx_list_lock);
+
+	dprintk("aio_release(%p)\n", ioctx);
+	if (likely(!was_dead))
+		put_ioctx(ioctx);	/* twice for the list */
+
+	aio_cancel_all(ioctx);
+	wait_for_all_aios(ioctx);
+	put_ioctx(ioctx);	/* once for the lookup */
+}
+
+/* sys_io_setup:
+ *	Create an aio_context capable of receiving at least nr_events.
+ *	ctxp must not point to an aio_context that already exists, and
+ *	must be initialized to 0 prior to the call.  On successful
+ *	creation of the aio_context, *ctxp is filled in with the resulting 
+ *	handle.  May fail with -EINVAL if *ctxp is not initialized,
+ *	if the specified nr_events exceeds internal limits.  May fail 
+ *	with -EAGAIN if the specified nr_events exceeds the user's limit 
+ *	of available events.  May fail with -ENOMEM if insufficient kernel
+ *	resources are available.  May fail with -EFAULT if an invalid
+ *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
+ *	implemented.
+ */
+asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
+{
+	struct kioctx *ioctx = NULL;
+	unsigned long ctx;
+	long ret;
+
+	ret = get_user(ctx, ctxp);
+	if (unlikely(ret))
+		goto out;
+
+	ret = -EINVAL;
+	if (unlikely(ctx || (int)nr_events <= 0)) {
+		pr_debug("EINVAL: io_setup: ctx or nr_events > max\n");
+		goto out;
+	}
+
+	ioctx = ioctx_alloc(nr_events);
+	ret = PTR_ERR(ioctx);
+	if (!IS_ERR(ioctx)) {
+		ret = put_user(ioctx->user_id, ctxp);
+		if (!ret)
+			return 0;
+
+		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+		io_destroy(ioctx);
+	}
+
+out:
+	return ret;
+}
+
+/* sys_io_destroy:
+ *	Destroy the aio_context specified.  May cancel any outstanding 
+ *	AIOs and block on completion.  Will fail with -ENOSYS if not
+ *	implemented.  May fail with -EFAULT if the context pointed to
+ *	is invalid.
+ */
+asmlinkage long sys_io_destroy(aio_context_t ctx)
+{
+	struct kioctx *ioctx = lookup_ioctx(ctx);
+	if (likely(NULL != ioctx)) {
+		io_destroy(ioctx);
+		return 0;
+	}
+	pr_debug("EINVAL: io_destroy: invalid context id\n");
+	return -EINVAL;
+}
+
+/*
+ * Default retry method for aio_read (also used for first time submit)
+ * Responsible for updating iocb state as retries progress
+ */
+static ssize_t aio_pread(struct kiocb *iocb)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	ssize_t ret = 0;
+
+	ret = file->f_op->aio_read(iocb, iocb->ki_buf,
+		iocb->ki_left, iocb->ki_pos);
+
+	/*
+	 * Can't just depend on iocb->ki_left to determine
+	 * whether we are done. This may have been a short read.
+	 */
+	if (ret > 0) {
+		iocb->ki_buf += ret;
+		iocb->ki_left -= ret;
+		/*
+		 * For pipes and sockets we return once we have
+		 * some data; for regular files we retry till we
+		 * complete the entire read or find that we can't
+		 * read any more data (e.g short reads).
+		 */
+		if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))
+			ret = -EIOCBRETRY;
+	}
+
+	/* This means we must have transferred all that we could */
+	/* No need to retry anymore */
+	if ((ret == 0) || (iocb->ki_left == 0))
+		ret = iocb->ki_nbytes - iocb->ki_left;
+
+	return ret;
+}
+
+/*
+ * Default retry method for aio_write (also used for first time submit)
+ * Responsible for updating iocb state as retries progress
+ */
+static ssize_t aio_pwrite(struct kiocb *iocb)
+{
+	struct file *file = iocb->ki_filp;
+	ssize_t ret = 0;
+
+	ret = file->f_op->aio_write(iocb, iocb->ki_buf,
+		iocb->ki_left, iocb->ki_pos);
+
+	if (ret > 0) {
+		iocb->ki_buf += ret;
+		iocb->ki_left -= ret;
+
+		ret = -EIOCBRETRY;
+	}
+
+	/* This means we must have transferred all that we could */
+	/* No need to retry anymore */
+	if ((ret == 0) || (iocb->ki_left == 0))
+		ret = iocb->ki_nbytes - iocb->ki_left;
+
+	return ret;
+}
+
+static ssize_t aio_fdsync(struct kiocb *iocb)
+{
+	struct file *file = iocb->ki_filp;
+	ssize_t ret = -EINVAL;
+
+	if (file->f_op->aio_fsync)
+		ret = file->f_op->aio_fsync(iocb, 1);
+	return ret;
+}
+
+static ssize_t aio_fsync(struct kiocb *iocb)
+{
+	struct file *file = iocb->ki_filp;
+	ssize_t ret = -EINVAL;
+
+	if (file->f_op->aio_fsync)
+		ret = file->f_op->aio_fsync(iocb, 0);
+	return ret;
+}
+
+/*
+ * aio_setup_iocb:
+ *	Performs the initial checks and aio retry method
+ *	setup for the kiocb at the time of io submission.
+ */
+ssize_t aio_setup_iocb(struct kiocb *kiocb)
+{
+	struct file *file = kiocb->ki_filp;
+	ssize_t ret = 0;
+
+	switch (kiocb->ki_opcode) {
+	case IOCB_CMD_PREAD:
+		ret = -EBADF;
+		if (unlikely(!(file->f_mode & FMODE_READ)))
+			break;
+		ret = -EFAULT;
+		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
+			kiocb->ki_left)))
+			break;
+		ret = -EINVAL;
+		if (file->f_op->aio_read)
+			kiocb->ki_retry = aio_pread;
+		break;
+	case IOCB_CMD_PWRITE:
+		ret = -EBADF;
+		if (unlikely(!(file->f_mode & FMODE_WRITE)))
+			break;
+		ret = -EFAULT;
+		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
+			kiocb->ki_left)))
+			break;
+		ret = -EINVAL;
+		if (file->f_op->aio_write)
+			kiocb->ki_retry = aio_pwrite;
+		break;
+	case IOCB_CMD_FDSYNC:
+		ret = -EINVAL;
+		if (file->f_op->aio_fsync)
+			kiocb->ki_retry = aio_fdsync;
+		break;
+	case IOCB_CMD_FSYNC:
+		ret = -EINVAL;
+		if (file->f_op->aio_fsync)
+			kiocb->ki_retry = aio_fsync;
+		break;
+	default:
+		dprintk("EINVAL: io_submit: no operation provided\n");
+		ret = -EINVAL;
+	}
+
+	if (!kiocb->ki_retry)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * aio_wake_function:
+ * 	wait queue callback function for aio notification,
+ * 	Simply triggers a retry of the operation via kick_iocb.
+ *
+ * 	This callback is specified in the wait queue entry in
+ *	a kiocb	(current->io_wait points to this wait queue
+ *	entry when an aio operation executes; it is used
+ * 	instead of a synchronous wait when an i/o blocking
+ *	condition is encountered during aio).
+ *
+ * Note:
+ * This routine is executed with the wait queue lock held.
+ * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
+ * the ioctx lock inside the wait queue lock. This is safe
+ * because this callback isn't used for wait queues which
+ * are nested inside ioctx lock (i.e. ctx->wait)
+ */
+int aio_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+	struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
+
+	list_del_init(&wait->task_list);
+	kick_iocb(iocb);
+	return 1;
+}
+
+int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+			 struct iocb *iocb)
+{
+	struct kiocb *req;
+	struct file *file;
+	ssize_t ret;
+
+	/* enforce forwards compatibility on users */
+	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 ||
+		     iocb->aio_reserved3)) {
+		pr_debug("EINVAL: io_submit: reserve field set\n");
+		return -EINVAL;
+	}
+
+	/* prevent overflows */
+	if (unlikely(
+	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
+	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
+	    ((ssize_t)iocb->aio_nbytes < 0)
+	   )) {
+		pr_debug("EINVAL: io_submit: overflow check\n");
+		return -EINVAL;
+	}
+
+	file = fget(iocb->aio_fildes);
+	if (unlikely(!file))
+		return -EBADF;
+
+	req = aio_get_req(ctx);		/* returns with 2 references to req */
+	if (unlikely(!req)) {
+		fput(file);
+		return -EAGAIN;
+	}
+
+	req->ki_filp = file;
+	iocb->aio_key = req->ki_key;
+	ret = put_user(iocb->aio_key, &user_iocb->aio_key);
+	if (unlikely(ret)) {
+		dprintk("EFAULT: aio_key\n");
+		goto out_put_req;
+	}
+
+	req->ki_obj.user = user_iocb;
+	req->ki_user_data = iocb->aio_data;
+	req->ki_pos = iocb->aio_offset;
+
+	req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
+	req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
+	req->ki_opcode = iocb->aio_lio_opcode;
+	init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
+	INIT_LIST_HEAD(&req->ki_wait.task_list);
+	req->ki_run_list.next = req->ki_run_list.prev = NULL;
+	req->ki_retry = NULL;
+	req->ki_retried = 0;
+	req->ki_kicked = 0;
+	req->ki_queued = 0;
+	aio_run = 0;
+	aio_wakeups = 0;
+
+	ret = aio_setup_iocb(req);
+
+	if (ret)
+		goto out_put_req;
+
+	spin_lock_irq(&ctx->ctx_lock);
+	list_add_tail(&req->ki_run_list, &ctx->run_list);
+	/* drain the run list */
+	while (__aio_run_iocbs(ctx))
+		;
+	spin_unlock_irq(&ctx->ctx_lock);
+	aio_put_req(req);	/* drop extra ref to req */
+	return 0;
+
+out_put_req:
+	aio_put_req(req);	/* drop extra ref to req */
+	aio_put_req(req);	/* drop i/o ref to req */
+	return ret;
+}
+
+/* sys_io_submit:
+ *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
+ *	the number of iocbs queued.  May return -EINVAL if the aio_context
+ *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
+ *	*iocbpp[0] is not properly initialized, if the operation specified
+ *	is invalid for the file descriptor in the iocb.  May fail with
+ *	-EFAULT if any of the data structures point to invalid data.  May
+ *	fail with -EBADF if the file descriptor specified in the first
+ *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
+ *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
+ *	fail with -ENOSYS if not implemented.
+ */
+asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
+			      struct iocb __user * __user *iocbpp)
+{
+	struct kioctx *ctx;
+	long ret = 0;
+	int i;
+
+	if (unlikely(nr < 0))
+		return -EINVAL;
+
+	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
+		return -EFAULT;
+
+	ctx = lookup_ioctx(ctx_id);
+	if (unlikely(!ctx)) {
+		pr_debug("EINVAL: io_submit: invalid context id\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * AKPM: should this return a partial result if some of the IOs were
+	 * successfully submitted?
+	 */
+	for (i=0; i<nr; i++) {
+		struct iocb __user *user_iocb;
+		struct iocb tmp;
+
+		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = io_submit_one(ctx, user_iocb, &tmp);
+		if (ret)
+			break;
+	}
+
+	put_ioctx(ctx);
+	return i ? i : ret;
+}
+
+/* lookup_kiocb
+ *	Finds a given iocb for cancellation.
+ *	MUST be called with ctx->ctx_lock held.
+ */
+struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key)
+{
+	struct list_head *pos;
+	/* TODO: use a hash or array, this sucks. */
+	list_for_each(pos, &ctx->active_reqs) {
+		struct kiocb *kiocb = list_kiocb(pos);
+		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
+			return kiocb;
+	}
+	return NULL;
+}
+
+/* sys_io_cancel:
+ *	Attempts to cancel an iocb previously passed to io_submit.  If
+ *	the operation is successfully cancelled, the resulting event is
+ *	copied into the memory pointed to by result without being placed
+ *	into the completion queue and 0 is returned.  May fail with
+ *	-EFAULT if any of the data structures pointed to are invalid.
+ *	May fail with -EINVAL if aio_context specified by ctx_id is
+ *	invalid.  May fail with -EAGAIN if the iocb specified was not
+ *	cancelled.  Will fail with -ENOSYS if not implemented.
+ */
+asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
+			      struct io_event __user *result)
+{
+	int (*cancel)(struct kiocb *iocb, struct io_event *res);
+	struct kioctx *ctx;
+	struct kiocb *kiocb;
+	u32 key;
+	int ret;
+
+	ret = get_user(key, &iocb->aio_key);
+	if (unlikely(ret))
+		return -EFAULT;
+
+	ctx = lookup_ioctx(ctx_id);
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	spin_lock_irq(&ctx->ctx_lock);
+	ret = -EAGAIN;
+	kiocb = lookup_kiocb(ctx, iocb, key);
+	if (kiocb && kiocb->ki_cancel) {
+		cancel = kiocb->ki_cancel;
+		kiocb->ki_users ++;
+		kiocbSetCancelled(kiocb);
+	} else
+		cancel = NULL;
+	spin_unlock_irq(&ctx->ctx_lock);
+
+	if (NULL != cancel) {
+		struct io_event tmp;
+		pr_debug("calling cancel\n");
+		memset(&tmp, 0, sizeof(tmp));
+		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
+		tmp.data = kiocb->ki_user_data;
+		ret = cancel(kiocb, &tmp);
+		if (!ret) {
+			/* Cancellation succeeded -- copy the result
+			 * into the user's buffer.
+			 */
+			if (copy_to_user(result, &tmp, sizeof(tmp)))
+				ret = -EFAULT;
+		}
+	} else
+		printk(KERN_DEBUG "iocb has no cancel operation\n");
+
+	put_ioctx(ctx);
+
+	return ret;
+}
+
+/* io_getevents:
+ *	Attempts to read at least min_nr events and up to nr events from
+ *	the completion queue for the aio_context specified by ctx_id.  May
+ *	fail with -EINVAL if ctx_id is invalid, if min_nr is out of range,
+ *	if nr is out of range, if when is out of range.  May fail with
+ *	-EFAULT if any of the memory specified to is invalid.  May return
+ *	0 or < min_nr if no events are available and the timeout specified
+ *	by when	has elapsed, where when == NULL specifies an infinite
+ *	timeout.  Note that the timeout pointed to by when is relative and
+ *	will be updated if not NULL and the operation blocks.  Will fail
+ *	with -ENOSYS if not implemented.
+ */
+asmlinkage long sys_io_getevents(aio_context_t ctx_id,
+				 long min_nr,
+				 long nr,
+				 struct io_event __user *events,
+				 struct timespec __user *timeout)
+{
+	struct kioctx *ioctx = lookup_ioctx(ctx_id);
+	long ret = -EINVAL;
+
+	if (likely(ioctx)) {
+		if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
+			ret = read_events(ioctx, min_nr, nr, events, timeout);
+		put_ioctx(ioctx);
+	}
+
+	return ret;
+}
+
+__initcall(aio_setup);
+
+EXPORT_SYMBOL(aio_complete);
+EXPORT_SYMBOL(aio_put_req);
+EXPORT_SYMBOL(wait_on_sync_kiocb);
diff --git a/fs/attr.c b/fs/attr.c
new file mode 100644
index 0000000..c3c76fe
--- /dev/null
+++ b/fs/attr.c
@@ -0,0 +1,208 @@
+/*
+ *  linux/fs/attr.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  changes by Thomas Schoebel-Theuer
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/dnotify.h>
+#include <linux/fcntl.h>
+#include <linux/quotaops.h>
+#include <linux/security.h>
+#include <linux/time.h>
+
+/* Taken over from the old code... */
+
+/* POSIX UID/GID verification for setting inode attributes. */
+int inode_change_ok(struct inode *inode, struct iattr *attr)
+{
+	int retval = -EPERM;
+	unsigned int ia_valid = attr->ia_valid;
+
+	/* If force is set do it anyway. */
+	if (ia_valid & ATTR_FORCE)
+		goto fine;
+
+	/* Make sure a caller can chown. */
+	if ((ia_valid & ATTR_UID) &&
+	    (current->fsuid != inode->i_uid ||
+	     attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN))
+		goto error;
+
+	/* Make sure caller can chgrp. */
+	if ((ia_valid & ATTR_GID) &&
+	    (current->fsuid != inode->i_uid ||
+	    (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) &&
+	    !capable(CAP_CHOWN))
+		goto error;
+
+	/* Make sure a caller can chmod. */
+	if (ia_valid & ATTR_MODE) {
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			goto error;
+		/* Also check the setgid bit! */
+		if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
+				inode->i_gid) && !capable(CAP_FSETID))
+			attr->ia_mode &= ~S_ISGID;
+	}
+
+	/* Check for setting the inode time. */
+	if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
+		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+			goto error;
+	}
+fine:
+	retval = 0;
+error:
+	return retval;
+}
+
+EXPORT_SYMBOL(inode_change_ok);
+
+int inode_setattr(struct inode * inode, struct iattr * attr)
+{
+	unsigned int ia_valid = attr->ia_valid;
+	int error = 0;
+
+	if (ia_valid & ATTR_SIZE) {
+		if (attr->ia_size != i_size_read(inode)) {
+			error = vmtruncate(inode, attr->ia_size);
+			if (error || (ia_valid == ATTR_SIZE))
+				goto out;
+		} else {
+			/*
+			 * We skipped the truncate but must still update
+			 * timestamps
+			 */
+			ia_valid |= ATTR_MTIME|ATTR_CTIME;
+		}
+	}
+
+	if (ia_valid & ATTR_UID)
+		inode->i_uid = attr->ia_uid;
+	if (ia_valid & ATTR_GID)
+		inode->i_gid = attr->ia_gid;
+	if (ia_valid & ATTR_ATIME)
+		inode->i_atime = timespec_trunc(attr->ia_atime,
+						inode->i_sb->s_time_gran);
+	if (ia_valid & ATTR_MTIME)
+		inode->i_mtime = timespec_trunc(attr->ia_mtime,
+						inode->i_sb->s_time_gran);
+	if (ia_valid & ATTR_CTIME)
+		inode->i_ctime = timespec_trunc(attr->ia_ctime,
+						inode->i_sb->s_time_gran);
+	if (ia_valid & ATTR_MODE) {
+		umode_t mode = attr->ia_mode;
+
+		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+			mode &= ~S_ISGID;
+		inode->i_mode = mode;
+	}
+	mark_inode_dirty(inode);
+out:
+	return error;
+}
+
+EXPORT_SYMBOL(inode_setattr);
+
+int setattr_mask(unsigned int ia_valid)
+{
+	unsigned long dn_mask = 0;
+
+	if (ia_valid & ATTR_UID)
+		dn_mask |= DN_ATTRIB;
+	if (ia_valid & ATTR_GID)
+		dn_mask |= DN_ATTRIB;
+	if (ia_valid & ATTR_SIZE)
+		dn_mask |= DN_MODIFY;
+	/* both times implies a utime(s) call */
+	if ((ia_valid & (ATTR_ATIME|ATTR_MTIME)) == (ATTR_ATIME|ATTR_MTIME))
+		dn_mask |= DN_ATTRIB;
+	else if (ia_valid & ATTR_ATIME)
+		dn_mask |= DN_ACCESS;
+	else if (ia_valid & ATTR_MTIME)
+		dn_mask |= DN_MODIFY;
+	if (ia_valid & ATTR_MODE)
+		dn_mask |= DN_ATTRIB;
+	return dn_mask;
+}
+
+int notify_change(struct dentry * dentry, struct iattr * attr)
+{
+	struct inode *inode = dentry->d_inode;
+	mode_t mode;
+	int error;
+	struct timespec now;
+	unsigned int ia_valid = attr->ia_valid;
+
+	if (!inode)
+		BUG();
+
+	mode = inode->i_mode;
+	now = current_fs_time(inode->i_sb);
+
+	attr->ia_ctime = now;
+	if (!(ia_valid & ATTR_ATIME_SET))
+		attr->ia_atime = now;
+	if (!(ia_valid & ATTR_MTIME_SET))
+		attr->ia_mtime = now;
+	if (ia_valid & ATTR_KILL_SUID) {
+		attr->ia_valid &= ~ATTR_KILL_SUID;
+		if (mode & S_ISUID) {
+			if (!(ia_valid & ATTR_MODE)) {
+				ia_valid = attr->ia_valid |= ATTR_MODE;
+				attr->ia_mode = inode->i_mode;
+			}
+			attr->ia_mode &= ~S_ISUID;
+		}
+	}
+	if (ia_valid & ATTR_KILL_SGID) {
+		attr->ia_valid &= ~ ATTR_KILL_SGID;
+		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+			if (!(ia_valid & ATTR_MODE)) {
+				ia_valid = attr->ia_valid |= ATTR_MODE;
+				attr->ia_mode = inode->i_mode;
+			}
+			attr->ia_mode &= ~S_ISGID;
+		}
+	}
+	if (!attr->ia_valid)
+		return 0;
+
+	if (ia_valid & ATTR_SIZE)
+		down_write(&dentry->d_inode->i_alloc_sem);
+
+	if (inode->i_op && inode->i_op->setattr) {
+		error = security_inode_setattr(dentry, attr);
+		if (!error)
+			error = inode->i_op->setattr(dentry, attr);
+	} else {
+		error = inode_change_ok(inode, attr);
+		if (!error)
+			error = security_inode_setattr(dentry, attr);
+		if (!error) {
+			if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
+				error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+			if (!error)
+				error = inode_setattr(inode, attr);
+		}
+	}
+
+	if (ia_valid & ATTR_SIZE)
+		up_write(&dentry->d_inode->i_alloc_sem);
+
+	if (!error) {
+		unsigned long dn_mask = setattr_mask(ia_valid);
+		if (dn_mask)
+			dnotify_parent(dentry, dn_mask);
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(notify_change);
diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile
new file mode 100644
index 0000000..453a60f
--- /dev/null
+++ b/fs/autofs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux autofs-filesystem routines.
+#
+
+obj-$(CONFIG_AUTOFS_FS) += autofs.o
+
+autofs-objs := dirhash.o init.o inode.o root.o symlink.o waitq.o
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
new file mode 100644
index 0000000..6171431
--- /dev/null
+++ b/fs/autofs/autofs_i.h
@@ -0,0 +1,164 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *   
+ * linux/fs/autofs/autofs_i.h
+ *
+ *   Copyright 1997-1998 Transmeta Corporation - All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/* Internal header file for autofs */
+
+#include <linux/auto_fs.h>
+
+/* This is the range of ioctl() numbers we claim as ours */
+#define AUTOFS_IOC_FIRST     AUTOFS_IOC_READY
+#define AUTOFS_IOC_COUNT     32
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+
+#include <asm/current.h>
+#include <asm/uaccess.h>
+
+#ifdef DEBUG
+#define DPRINTK(D) (printk D)
+#else
+#define DPRINTK(D) ((void)0)
+#endif
+
+#define AUTOFS_SUPER_MAGIC 0x0187
+
+/*
+ * If the daemon returns a negative response (AUTOFS_IOC_FAIL) then the
+ * kernel will keep the negative response cached for up to the time given
+ * here, although the time can be shorter if the kernel throws the dcache
+ * entry away.  This probably should be settable from user space.
+ */
+#define AUTOFS_NEGATIVE_TIMEOUT (60*HZ)	/* 1 minute */
+
+/* Structures associated with the root directory hash table */
+
+#define AUTOFS_HASH_SIZE 67
+
+struct autofs_dir_ent {
+	int hash;
+	char *name;
+	int len;
+	ino_t ino;
+	struct dentry *dentry;
+	/* Linked list of entries */
+	struct autofs_dir_ent *next;
+	struct autofs_dir_ent **back;
+	/* The following entries are for the expiry system */
+	unsigned long last_usage;
+	struct list_head exp;
+};
+
+struct autofs_dirhash {
+	struct autofs_dir_ent *h[AUTOFS_HASH_SIZE];
+	struct list_head expiry_head;
+};
+
+struct autofs_wait_queue {
+	wait_queue_head_t queue;
+	struct autofs_wait_queue *next;
+	autofs_wqt_t wait_queue_token;
+	/* We use the following to see what we are waiting for */
+	int hash;
+	int len;
+	char *name;
+	/* This is for status reporting upon return */
+	int status;
+	int wait_ctr;
+};
+
+struct autofs_symlink {
+	char *data;
+	int len;
+	time_t mtime;
+};
+
+#define AUTOFS_MAX_SYMLINKS 256
+
+#define AUTOFS_ROOT_INO      1
+#define AUTOFS_FIRST_SYMLINK 2
+#define AUTOFS_FIRST_DIR_INO (AUTOFS_FIRST_SYMLINK+AUTOFS_MAX_SYMLINKS)
+
+#define AUTOFS_SYMLINK_BITMAP_LEN \
+	((AUTOFS_MAX_SYMLINKS+((sizeof(long)*1)-1))/(sizeof(long)*8))
+
+#define AUTOFS_SBI_MAGIC 0x6d4a556d
+
+struct autofs_sb_info {
+	u32 magic;
+	struct file *pipe;
+	pid_t oz_pgrp;
+	int catatonic;
+	unsigned long exp_timeout;
+	ino_t next_dir_ino;
+	struct autofs_wait_queue *queues; /* Wait queue pointer */
+	struct autofs_dirhash dirhash; /* Root directory hash */
+	struct autofs_symlink symlink[AUTOFS_MAX_SYMLINKS];
+	unsigned long symlink_bitmap[AUTOFS_SYMLINK_BITMAP_LEN];
+};
+
+static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
+{
+	return (struct autofs_sb_info *)(sb->s_fs_info);
+}
+
+/* autofs_oz_mode(): do we see the man behind the curtain?  (The
+   processes which do manipulations for us in user space sees the raw
+   filesystem without "magic".) */
+
+static inline int autofs_oz_mode(struct autofs_sb_info *sbi) {
+	return sbi->catatonic || process_group(current) == sbi->oz_pgrp;
+}
+
+/* Hash operations */
+
+void autofs_initialize_hash(struct autofs_dirhash *);
+struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *,struct qstr *);
+void autofs_hash_insert(struct autofs_dirhash *,struct autofs_dir_ent *);
+void autofs_hash_delete(struct autofs_dir_ent *);
+struct autofs_dir_ent *autofs_hash_enum(const struct autofs_dirhash *,off_t *,struct autofs_dir_ent *);
+void autofs_hash_dputall(struct autofs_dirhash *);
+void autofs_hash_nuke(struct autofs_dirhash *);
+
+/* Expiration-handling functions */
+
+void autofs_update_usage(struct autofs_dirhash *,struct autofs_dir_ent *);
+struct autofs_dir_ent *autofs_expire(struct super_block *,struct autofs_sb_info *, struct vfsmount *mnt);
+
+/* Operations structures */
+
+extern struct inode_operations autofs_root_inode_operations;
+extern struct inode_operations autofs_symlink_inode_operations;
+extern struct file_operations autofs_root_operations;
+
+/* Initializing function */
+
+int autofs_fill_super(struct super_block *, void *, int);
+
+/* Queue management functions */
+
+int autofs_wait(struct autofs_sb_info *,struct qstr *);
+int autofs_wait_release(struct autofs_sb_info *,autofs_wqt_t,int);
+void autofs_catatonic_mode(struct autofs_sb_info *);
+
+#ifdef DEBUG
+void autofs_say(const char *name, int len);
+#else
+#define autofs_say(n,l) ((void)0)
+#endif
diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c
new file mode 100644
index 0000000..448143f
--- /dev/null
+++ b/fs/autofs/dirhash.c
@@ -0,0 +1,249 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/dirhash.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include "autofs_i.h"
+
+/* Functions for maintenance of expiry queue */
+
+static void autofs_init_usage(struct autofs_dirhash *dh,
+			      struct autofs_dir_ent *ent)
+{
+	list_add_tail(&ent->exp, &dh->expiry_head);
+	ent->last_usage = jiffies;
+}
+
+static void autofs_delete_usage(struct autofs_dir_ent *ent)
+{
+	list_del(&ent->exp);
+}
+
+void autofs_update_usage(struct autofs_dirhash *dh,
+			 struct autofs_dir_ent *ent)
+{
+	autofs_delete_usage(ent);   /* Unlink from current position */
+	autofs_init_usage(dh,ent);  /* Relink at queue tail */
+}
+
+struct autofs_dir_ent *autofs_expire(struct super_block *sb,
+				     struct autofs_sb_info *sbi,
+				     struct vfsmount *mnt)
+{
+	struct autofs_dirhash *dh = &sbi->dirhash;
+	struct autofs_dir_ent *ent;
+	struct dentry *dentry;
+	unsigned long timeout = sbi->exp_timeout;
+
+	while (1) {
+		if ( list_empty(&dh->expiry_head) || sbi->catatonic )
+			return NULL;	/* No entries */
+		/* We keep the list sorted by last_usage and want old stuff */
+		ent = list_entry(dh->expiry_head.next, struct autofs_dir_ent, exp);
+		if (jiffies - ent->last_usage < timeout)
+			break;
+		/* Move to end of list in case expiry isn't desirable */
+		autofs_update_usage(dh, ent);
+
+		/* Check to see that entry is expirable */
+		if ( ent->ino < AUTOFS_FIRST_DIR_INO )
+			return ent; /* Symlinks are always expirable */
+
+		/* Get the dentry for the autofs subdirectory */
+		dentry = ent->dentry;
+
+		if ( !dentry ) {
+			/* Should only happen in catatonic mode */
+			printk("autofs: dentry == NULL but inode range is directory, entry %s\n", ent->name);
+			autofs_delete_usage(ent);
+			continue;
+		}
+
+		if ( !dentry->d_inode ) {
+			dput(dentry);
+			printk("autofs: negative dentry on expiry queue: %s\n",
+			       ent->name);
+			autofs_delete_usage(ent);
+			continue;
+		}
+
+		/* Make sure entry is mounted and unused; note that dentry will
+		   point to the mounted-on-top root. */
+		if (!S_ISDIR(dentry->d_inode->i_mode)||!d_mountpoint(dentry)) {
+			DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+			continue;
+		}
+		mntget(mnt);
+		dget(dentry);
+		if (!follow_down(&mnt, &dentry)) {
+			dput(dentry);
+			mntput(mnt);
+			DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+			continue;
+		}
+		while (d_mountpoint(dentry) && follow_down(&mnt, &dentry))
+			;
+		dput(dentry);
+
+		if ( may_umount(mnt) == 0 ) {
+			mntput(mnt);
+			DPRINTK(("autofs: signaling expire on %s\n", ent->name));
+			return ent; /* Expirable! */
+		}
+		DPRINTK(("autofs: didn't expire due to may_umount: %s\n", ent->name));
+		mntput(mnt);
+	}
+	return NULL;		/* No expirable entries */
+}
+
+void autofs_initialize_hash(struct autofs_dirhash *dh) {
+	memset(&dh->h, 0, AUTOFS_HASH_SIZE*sizeof(struct autofs_dir_ent *));
+	INIT_LIST_HEAD(&dh->expiry_head);
+}
+
+struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *dh, struct qstr *name)
+{
+	struct autofs_dir_ent *dhn;
+
+	DPRINTK(("autofs_hash_lookup: hash = 0x%08x, name = ", name->hash));
+	autofs_say(name->name,name->len);
+
+	for ( dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE] ; dhn ; dhn = dhn->next ) {
+		if ( name->hash == dhn->hash &&
+		     name->len == dhn->len &&
+		     !memcmp(name->name, dhn->name, name->len) )
+			break;
+	}
+
+	return dhn;
+}
+
+void autofs_hash_insert(struct autofs_dirhash *dh, struct autofs_dir_ent *ent)
+{
+	struct autofs_dir_ent **dhnp;
+
+	DPRINTK(("autofs_hash_insert: hash = 0x%08x, name = ", ent->hash));
+	autofs_say(ent->name,ent->len);
+
+	autofs_init_usage(dh,ent);
+	if (ent->dentry)
+		dget(ent->dentry);
+
+	dhnp = &dh->h[(unsigned) ent->hash % AUTOFS_HASH_SIZE];
+	ent->next = *dhnp;
+	ent->back = dhnp;
+	*dhnp = ent;
+	if ( ent->next )
+		ent->next->back = &(ent->next);
+}
+
+void autofs_hash_delete(struct autofs_dir_ent *ent)
+{
+	*(ent->back) = ent->next;
+	if ( ent->next )
+		ent->next->back = ent->back;
+
+	autofs_delete_usage(ent);
+
+	if ( ent->dentry )
+		dput(ent->dentry);
+	kfree(ent->name);
+	kfree(ent);
+}
+
+/*
+ * Used by readdir().  We must validate "ptr", so we can't simply make it
+ * a pointer.  Values below 0xffff are reserved; calling with any value
+ * <= 0x10000 will return the first entry found.
+ *
+ * "last" can be NULL or the value returned by the last search *if* we
+ * want the next sequential entry.
+ */
+struct autofs_dir_ent *autofs_hash_enum(const struct autofs_dirhash *dh,
+					off_t *ptr, struct autofs_dir_ent *last)
+{
+	int bucket, ecount, i;
+	struct autofs_dir_ent *ent;
+
+	bucket = (*ptr >> 16) - 1;
+	ecount = *ptr & 0xffff;
+
+	if ( bucket < 0 ) {
+		bucket = ecount = 0;
+	} 
+
+	DPRINTK(("autofs_hash_enum: bucket %d, entry %d\n", bucket, ecount));
+
+	ent = last ? last->next : NULL;
+
+	if ( ent ) {
+		ecount++;
+	} else {
+		while  ( bucket < AUTOFS_HASH_SIZE ) {
+			ent = dh->h[bucket];
+			for ( i = ecount ; ent && i ; i-- )
+				ent = ent->next;
+			
+			if (ent) {
+				ecount++; /* Point to *next* entry */
+				break;
+			}
+			
+			bucket++; ecount = 0;
+		}
+	}
+
+#ifdef DEBUG
+	if ( !ent )
+		printk("autofs_hash_enum: nothing found\n");
+	else {
+		printk("autofs_hash_enum: found hash %08x, name", ent->hash);
+		autofs_say(ent->name,ent->len);
+	}
+#endif
+
+	*ptr = ((bucket+1) << 16) + ecount;
+	return ent;
+}
+
+/* Iterate over all the ents, and remove all dentry pointers.  Used on
+   entering catatonic mode, in order to make the filesystem unmountable. */
+void autofs_hash_dputall(struct autofs_dirhash *dh)
+{
+	int i;
+	struct autofs_dir_ent *ent;
+
+	for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
+		for ( ent = dh->h[i] ; ent ; ent = ent->next ) {
+			if ( ent->dentry ) {
+				dput(ent->dentry);
+				ent->dentry = NULL;
+			}
+		}
+	}
+}
+
+/* Delete everything.  This is used on filesystem destruction, so we
+   make no attempt to keep the pointers valid */
+void autofs_hash_nuke(struct autofs_dirhash *dh)
+{
+	int i;
+	struct autofs_dir_ent *ent, *nent;
+
+	for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
+		for ( ent = dh->h[i] ; ent ; ent = nent ) {
+			nent = ent->next;
+			if ( ent->dentry )
+				dput(ent->dentry);
+			kfree(ent->name);
+			kfree(ent);
+		}
+	}
+}
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
new file mode 100644
index 0000000..b977ece
--- /dev/null
+++ b/fs/autofs/init.c
@@ -0,0 +1,52 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/init.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include "autofs_i.h"
+
+static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, autofs_fill_super);
+}
+
+static struct file_system_type autofs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "autofs",
+	.get_sb		= autofs_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init init_autofs_fs(void)
+{
+	return register_filesystem(&autofs_fs_type);
+}
+
+static void __exit exit_autofs_fs(void)
+{
+	unregister_filesystem(&autofs_fs_type);
+}
+
+module_init(init_autofs_fs);
+module_exit(exit_autofs_fs);
+
+#ifdef DEBUG
+void autofs_say(const char *name, int len)
+{
+	printk("(%d: ", len);
+	while ( len-- )
+		printk("%c", *name++);
+	printk(")\n");
+}
+#endif
+MODULE_LICENSE("GPL");
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
new file mode 100644
index 0000000..4888c1f
--- /dev/null
+++ b/fs/autofs/inode.c
@@ -0,0 +1,250 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/inode.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/parser.h>
+#include <linux/bitops.h>
+#include "autofs_i.h"
+#include <linux/module.h>
+
+static void autofs_put_super(struct super_block *sb)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(sb);
+	unsigned int n;
+
+	if ( !sbi->catatonic )
+		autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */
+
+	autofs_hash_nuke(&sbi->dirhash);
+	for ( n = 0 ; n < AUTOFS_MAX_SYMLINKS ; n++ ) {
+		if ( test_bit(n, sbi->symlink_bitmap) )
+			kfree(sbi->symlink[n].data);
+	}
+
+	kfree(sb->s_fs_info);
+
+	DPRINTK(("autofs: shutting down\n"));
+}
+
+static void autofs_read_inode(struct inode *inode);
+
+static struct super_operations autofs_sops = {
+	.read_inode	= autofs_read_inode,
+	.put_super	= autofs_put_super,
+	.statfs		= simple_statfs,
+};
+
+enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto};
+
+static match_table_t autofs_tokens = {
+	{Opt_fd, "fd=%u"},
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_pgrp, "pgrp=%u"},
+	{Opt_minproto, "minproto=%u"},
+	{Opt_maxproto, "maxproto=%u"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, pid_t *pgrp, int *minproto, int *maxproto)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+
+	*uid = current->uid;
+	*gid = current->gid;
+	*pgrp = process_group(current);
+
+	*minproto = *maxproto = AUTOFS_PROTO_VERSION;
+
+	*pipefd = -1;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, autofs_tokens, args);
+		switch (token) {
+		case Opt_fd:
+			if (match_int(&args[0], &option))
+				return 1;
+			*pipefd = option;
+			break;
+		case Opt_uid:
+			if (match_int(&args[0], &option))
+				return 1;
+			*uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(&args[0], &option))
+				return 1;
+			*gid = option;
+			break;
+		case Opt_pgrp:
+			if (match_int(&args[0], &option))
+				return 1;
+			*pgrp = option;
+			break;
+		case Opt_minproto:
+			if (match_int(&args[0], &option))
+				return 1;
+			*minproto = option;
+			break;
+		case Opt_maxproto:
+			if (match_int(&args[0], &option))
+				return 1;
+			*maxproto = option;
+			break;
+		default:
+			return 1;
+		}
+	}
+	return (*pipefd < 0);
+}
+
+int autofs_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct inode * root_inode;
+	struct dentry * root;
+	struct file * pipe;
+	int pipefd;
+	struct autofs_sb_info *sbi;
+	int minproto, maxproto;
+
+	sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+	if ( !sbi )
+		goto fail_unlock;
+	memset(sbi, 0, sizeof(*sbi));
+	DPRINTK(("autofs: starting up, sbi = %p\n",sbi));
+
+	s->s_fs_info = sbi;
+	sbi->magic = AUTOFS_SBI_MAGIC;
+	sbi->catatonic = 0;
+	sbi->exp_timeout = 0;
+	sbi->oz_pgrp = process_group(current);
+	autofs_initialize_hash(&sbi->dirhash);
+	sbi->queues = NULL;
+	memset(sbi->symlink_bitmap, 0, sizeof(long)*AUTOFS_SYMLINK_BITMAP_LEN);
+	sbi->next_dir_ino = AUTOFS_FIRST_DIR_INO;
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = AUTOFS_SUPER_MAGIC;
+	s->s_op = &autofs_sops;
+	s->s_time_gran = 1;
+
+	root_inode = iget(s, AUTOFS_ROOT_INO);
+	root = d_alloc_root(root_inode);
+	pipe = NULL;
+
+	if (!root)
+		goto fail_iput;
+
+	/* Can this call block?  - WTF cares? s is locked. */
+	if ( parse_options(data,&pipefd,&root_inode->i_uid,&root_inode->i_gid,&sbi->oz_pgrp,&minproto,&maxproto) ) {
+		printk("autofs: called with bogus options\n");
+		goto fail_dput;
+	}
+
+	/* Couldn't this be tested earlier? */
+	if ( minproto > AUTOFS_PROTO_VERSION || 
+	     maxproto < AUTOFS_PROTO_VERSION ) {
+		printk("autofs: kernel does not match daemon version\n");
+		goto fail_dput;
+	}
+
+	DPRINTK(("autofs: pipe fd = %d, pgrp = %u\n", pipefd, sbi->oz_pgrp));
+	pipe = fget(pipefd);
+	
+	if ( !pipe ) {
+		printk("autofs: could not open pipe file descriptor\n");
+		goto fail_dput;
+	}
+	if ( !pipe->f_op || !pipe->f_op->write )
+		goto fail_fput;
+	sbi->pipe = pipe;
+
+	/*
+	 * Success! Install the root dentry now to indicate completion.
+	 */
+	s->s_root = root;
+	return 0;
+
+fail_fput:
+	printk("autofs: pipe file descriptor does not contain proper ops\n");
+	fput(pipe);
+fail_dput:
+	dput(root);
+	goto fail_free;
+fail_iput:
+	printk("autofs: get root dentry failed\n");
+	iput(root_inode);
+fail_free:
+	kfree(sbi);
+fail_unlock:
+	return -EINVAL;
+}
+
+static void autofs_read_inode(struct inode *inode)
+{
+	ino_t ino = inode->i_ino;
+	unsigned int n;
+	struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
+
+	/* Initialize to the default case (stub directory) */
+
+	inode->i_op = &simple_dir_inode_operations;
+	inode->i_fop = &simple_dir_operations;
+	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+	inode->i_nlink = 2;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_blocks = 0;
+	inode->i_blksize = 1024;
+
+	if ( ino == AUTOFS_ROOT_INO ) {
+		inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
+		inode->i_op = &autofs_root_inode_operations;
+		inode->i_fop = &autofs_root_operations;
+		inode->i_uid = inode->i_gid = 0; /* Changed in read_super */
+		return;
+	} 
+	
+	inode->i_uid = inode->i_sb->s_root->d_inode->i_uid;
+	inode->i_gid = inode->i_sb->s_root->d_inode->i_gid;
+	
+	if ( ino >= AUTOFS_FIRST_SYMLINK && ino < AUTOFS_FIRST_DIR_INO ) {
+		/* Symlink inode - should be in symlink list */
+		struct autofs_symlink *sl;
+
+		n = ino - AUTOFS_FIRST_SYMLINK;
+		if ( n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) {
+			printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino);
+			return;
+		}
+		
+		inode->i_op = &autofs_symlink_inode_operations;
+		sl = &sbi->symlink[n];
+		inode->u.generic_ip = sl;
+		inode->i_mode = S_IFLNK | S_IRWXUGO;
+		inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = sl->mtime;
+		inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+		inode->i_size = sl->len;
+		inode->i_nlink = 1;
+	}
+}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
new file mode 100644
index 0000000..a1ab1c0
--- /dev/null
+++ b/fs/autofs/root.c
@@ -0,0 +1,564 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/root.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/smp_lock.h>
+#include "autofs_i.h"
+
+static int autofs_root_readdir(struct file *,void *,filldir_t);
+static struct dentry *autofs_root_lookup(struct inode *,struct dentry *, struct nameidata *);
+static int autofs_root_symlink(struct inode *,struct dentry *,const char *);
+static int autofs_root_unlink(struct inode *,struct dentry *);
+static int autofs_root_rmdir(struct inode *,struct dentry *);
+static int autofs_root_mkdir(struct inode *,struct dentry *,int);
+static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
+
+struct file_operations autofs_root_operations = {
+	.read		= generic_read_dir,
+	.readdir	= autofs_root_readdir,
+	.ioctl		= autofs_root_ioctl,
+};
+
+struct inode_operations autofs_root_inode_operations = {
+        .lookup		= autofs_root_lookup,
+        .unlink		= autofs_root_unlink,
+        .symlink	= autofs_root_symlink,
+        .mkdir		= autofs_root_mkdir,
+        .rmdir		= autofs_root_rmdir,
+};
+
+static int autofs_root_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct autofs_dir_ent *ent = NULL;
+	struct autofs_dirhash *dirhash;
+	struct autofs_sb_info *sbi;
+	struct inode * inode = filp->f_dentry->d_inode;
+	off_t onr, nr;
+
+	lock_kernel();
+
+	sbi = autofs_sbi(inode->i_sb);
+	dirhash = &sbi->dirhash;
+	nr = filp->f_pos;
+
+	switch(nr)
+	{
+	case 0:
+		if (filldir(dirent, ".", 1, nr, inode->i_ino, DT_DIR) < 0)
+			goto out;
+		filp->f_pos = ++nr;
+		/* fall through */
+	case 1:
+		if (filldir(dirent, "..", 2, nr, inode->i_ino, DT_DIR) < 0)
+			goto out;
+		filp->f_pos = ++nr;
+		/* fall through */
+	default:
+		while ( onr = nr, ent = autofs_hash_enum(dirhash,&nr,ent) ) {
+			if ( !ent->dentry || d_mountpoint(ent->dentry) ) {
+				if (filldir(dirent,ent->name,ent->len,onr,ent->ino,DT_UNKNOWN) < 0)
+					goto out;
+				filp->f_pos = nr;
+			}
+		}
+		break;
+	}
+
+out:
+	unlock_kernel();
+	return 0;
+}
+
+static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, struct autofs_sb_info *sbi)
+{
+	struct inode * inode;
+	struct autofs_dir_ent *ent;
+	int status = 0;
+
+	if ( !(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name)) ) {
+		do {
+			if ( status && dentry->d_inode ) {
+				if ( status != -ENOENT )
+					printk("autofs warning: lookup failure on positive dentry, status = %d, name = %s\n", status, dentry->d_name.name);
+				return 0; /* Try to get the kernel to invalidate this dentry */
+			}
+
+			/* Turn this into a real negative dentry? */
+			if (status == -ENOENT) {
+				dentry->d_time = jiffies + AUTOFS_NEGATIVE_TIMEOUT;
+				dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
+				return 1;
+			} else if (status) {
+				/* Return a negative dentry, but leave it "pending" */
+				return 1;
+			}
+			status = autofs_wait(sbi, &dentry->d_name);
+		} while (!(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name)) );
+	}
+
+	/* Abuse this field as a pointer to the directory entry, used to
+	   find the expire list pointers */
+	dentry->d_time = (unsigned long) ent;
+	
+	if (!dentry->d_inode) {
+		inode = iget(sb, ent->ino);
+		if (!inode) {
+			/* Failed, but leave pending for next time */
+			return 1;
+		}
+		dentry->d_inode = inode;
+	}
+
+	/* If this is a directory that isn't a mount point, bitch at the
+	   daemon and fix it in user space */
+	if ( S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) ) {
+		return !autofs_wait(sbi, &dentry->d_name);
+	}
+
+	/* We don't update the usages for the autofs daemon itself, this
+	   is necessary for recursive autofs mounts */
+	if ( !autofs_oz_mode(sbi) ) {
+		autofs_update_usage(&sbi->dirhash,ent);
+	}
+
+	dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
+	return 1;
+}
+
+
+/*
+ * Revalidate is called on every cache lookup.  Some of those
+ * cache lookups may actually happen while the dentry is not
+ * yet completely filled in, and revalidate has to delay such
+ * lookups..
+ */
+static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
+{
+	struct inode * dir;
+	struct autofs_sb_info *sbi;
+	struct autofs_dir_ent *ent;
+	int res;
+
+	lock_kernel();
+	dir = dentry->d_parent->d_inode;
+	sbi = autofs_sbi(dir->i_sb);
+
+	/* Pending dentry */
+	if ( dentry->d_flags & DCACHE_AUTOFS_PENDING ) {
+		if (autofs_oz_mode(sbi))
+			res = 1;
+		else
+			res = try_to_fill_dentry(dentry, dir->i_sb, sbi);
+		unlock_kernel();
+		return res;
+	}
+
+	/* Negative dentry.. invalidate if "old" */
+	if (!dentry->d_inode) {
+		unlock_kernel();
+		return (dentry->d_time - jiffies <= AUTOFS_NEGATIVE_TIMEOUT);
+	}
+		
+	/* Check for a non-mountpoint directory */
+	if ( S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) ) {
+		if (autofs_oz_mode(sbi))
+			res = 1;
+		else
+			res = try_to_fill_dentry(dentry, dir->i_sb, sbi);
+		unlock_kernel();
+		return res;
+	}
+
+	/* Update the usage list */
+	if ( !autofs_oz_mode(sbi) ) {
+		ent = (struct autofs_dir_ent *) dentry->d_time;
+		if ( ent )
+			autofs_update_usage(&sbi->dirhash,ent);
+	}
+	unlock_kernel();
+	return 1;
+}
+
+static struct dentry_operations autofs_dentry_operations = {
+	.d_revalidate	= autofs_revalidate,
+};
+
+static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct autofs_sb_info *sbi;
+	int oz_mode;
+
+	DPRINTK(("autofs_root_lookup: name = "));
+	lock_kernel();
+	autofs_say(dentry->d_name.name,dentry->d_name.len);
+
+	if (dentry->d_name.len > NAME_MAX) {
+		unlock_kernel();
+		return ERR_PTR(-ENAMETOOLONG);/* File name too long to exist */
+	}
+
+	sbi = autofs_sbi(dir->i_sb);
+
+	oz_mode = autofs_oz_mode(sbi);
+	DPRINTK(("autofs_lookup: pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n",
+		 current->pid, process_group(current), sbi->catatonic, oz_mode));
+
+	/*
+	 * Mark the dentry incomplete, but add it. This is needed so
+	 * that the VFS layer knows about the dentry, and we can count
+	 * on catching any lookups through the revalidate.
+	 *
+	 * Let all the hard work be done by the revalidate function that
+	 * needs to be able to do this anyway..
+	 *
+	 * We need to do this before we release the directory semaphore.
+	 */
+	dentry->d_op = &autofs_dentry_operations;
+	dentry->d_flags |= DCACHE_AUTOFS_PENDING;
+	d_add(dentry, NULL);
+
+	up(&dir->i_sem);
+	autofs_revalidate(dentry, nd);
+	down(&dir->i_sem);
+
+	/*
+	 * If we are still pending, check if we had to handle
+	 * a signal. If so we can force a restart..
+	 */
+	if (dentry->d_flags & DCACHE_AUTOFS_PENDING) {
+		/* See if we were interrupted */
+		if (signal_pending(current)) {
+			sigset_t *sigset = &current->pending.signal;
+			if (sigismember (sigset, SIGKILL) ||
+			    sigismember (sigset, SIGQUIT) ||
+			    sigismember (sigset, SIGINT)) {
+				unlock_kernel();
+				return ERR_PTR(-ERESTARTNOINTR);
+			}
+		}
+	}
+	unlock_kernel();
+
+	/*
+	 * If this dentry is unhashed, then we shouldn't honour this
+	 * lookup even if the dentry is positive.  Returning ENOENT here
+	 * doesn't do the right thing for all system calls, but it should
+	 * be OK for the operations we permit from an autofs.
+	 */
+	if ( dentry->d_inode && d_unhashed(dentry) )
+		return ERR_PTR(-ENOENT);
+
+	return NULL;
+}
+
+static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+	struct autofs_dirhash *dh = &sbi->dirhash;
+	struct autofs_dir_ent *ent;
+	unsigned int n;
+	int slsize;
+	struct autofs_symlink *sl;
+
+	DPRINTK(("autofs_root_symlink: %s <- ", symname));
+	autofs_say(dentry->d_name.name,dentry->d_name.len);
+
+	lock_kernel();
+	if ( !autofs_oz_mode(sbi) ) {
+		unlock_kernel();
+		return -EACCES;
+	}
+
+	if ( autofs_hash_lookup(dh, &dentry->d_name) ) {
+		unlock_kernel();
+		return -EEXIST;
+	}
+
+	n = find_first_zero_bit(sbi->symlink_bitmap,AUTOFS_MAX_SYMLINKS);
+	if ( n >= AUTOFS_MAX_SYMLINKS ) {
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	set_bit(n,sbi->symlink_bitmap);
+	sl = &sbi->symlink[n];
+	sl->len = strlen(symname);
+	sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
+	if ( !sl->data ) {
+		clear_bit(n,sbi->symlink_bitmap);
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL);
+	if ( !ent ) {
+		kfree(sl->data);
+		clear_bit(n,sbi->symlink_bitmap);
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL);
+	if ( !ent->name ) {
+		kfree(sl->data);
+		kfree(ent);
+		clear_bit(n,sbi->symlink_bitmap);
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	memcpy(sl->data,symname,slsize);
+	sl->mtime = get_seconds();
+
+	ent->ino = AUTOFS_FIRST_SYMLINK + n;
+	ent->hash = dentry->d_name.hash;
+	memcpy(ent->name, dentry->d_name.name, 1+(ent->len = dentry->d_name.len));
+	ent->dentry = NULL;	/* We don't keep the dentry for symlinks */
+
+	autofs_hash_insert(dh,ent);
+	d_instantiate(dentry, iget(dir->i_sb,ent->ino));
+	unlock_kernel();
+	return 0;
+}
+
+/*
+ * NOTE!
+ *
+ * Normal filesystems would do a "d_delete()" to tell the VFS dcache
+ * that the file no longer exists. However, doing that means that the
+ * VFS layer can turn the dentry into a negative dentry, which we
+ * obviously do not want (we're dropping the entry not because it
+ * doesn't exist, but because it has timed out).
+ *
+ * Also see autofs_root_rmdir()..
+ */
+static int autofs_root_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+	struct autofs_dirhash *dh = &sbi->dirhash;
+	struct autofs_dir_ent *ent;
+	unsigned int n;
+
+	/* This allows root to remove symlinks */
+	lock_kernel();
+	if ( !autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) {
+		unlock_kernel();
+		return -EACCES;
+	}
+
+	ent = autofs_hash_lookup(dh, &dentry->d_name);
+	if ( !ent ) {
+		unlock_kernel();
+		return -ENOENT;
+	}
+
+	n = ent->ino - AUTOFS_FIRST_SYMLINK;
+	if ( n >= AUTOFS_MAX_SYMLINKS ) {
+		unlock_kernel();
+		return -EISDIR;	/* It's a directory, dummy */
+	}
+	if ( !test_bit(n,sbi->symlink_bitmap) ) {
+		unlock_kernel();
+		return -EINVAL;	/* Nonexistent symlink?  Shouldn't happen */
+	}
+	
+	dentry->d_time = (unsigned long)(struct autofs_dirhash *)NULL;
+	autofs_hash_delete(ent);
+	clear_bit(n,sbi->symlink_bitmap);
+	kfree(sbi->symlink[n].data);
+	d_drop(dentry);
+	
+	unlock_kernel();
+	return 0;
+}
+
+static int autofs_root_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+	struct autofs_dirhash *dh = &sbi->dirhash;
+	struct autofs_dir_ent *ent;
+
+	lock_kernel();
+	if ( !autofs_oz_mode(sbi) ) {
+		unlock_kernel();
+		return -EACCES;
+	}
+
+	ent = autofs_hash_lookup(dh, &dentry->d_name);
+	if ( !ent ) {
+		unlock_kernel();
+		return -ENOENT;
+	}
+
+	if ( (unsigned int)ent->ino < AUTOFS_FIRST_DIR_INO ) {
+		unlock_kernel();
+		return -ENOTDIR; /* Not a directory */
+	}
+
+	if ( ent->dentry != dentry ) {
+		printk("autofs_rmdir: odentry != dentry for entry %s\n", dentry->d_name.name);
+	}
+
+	dentry->d_time = (unsigned long)(struct autofs_dir_ent *)NULL;
+	autofs_hash_delete(ent);
+	dir->i_nlink--;
+	d_drop(dentry);
+	unlock_kernel();
+
+	return 0;
+}
+
+static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+	struct autofs_dirhash *dh = &sbi->dirhash;
+	struct autofs_dir_ent *ent;
+	ino_t ino;
+
+	lock_kernel();
+	if ( !autofs_oz_mode(sbi) ) {
+		unlock_kernel();
+		return -EACCES;
+	}
+
+	ent = autofs_hash_lookup(dh, &dentry->d_name);
+	if ( ent ) {
+		unlock_kernel();
+		return -EEXIST;
+	}
+
+	if ( sbi->next_dir_ino < AUTOFS_FIRST_DIR_INO ) {
+		printk("autofs: Out of inode numbers -- what the heck did you do??\n");
+		unlock_kernel();
+		return -ENOSPC;
+	}
+	ino = sbi->next_dir_ino++;
+
+	ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL);
+	if ( !ent ) {
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL);
+	if ( !ent->name ) {
+		kfree(ent);
+		unlock_kernel();
+		return -ENOSPC;
+	}
+
+	ent->hash = dentry->d_name.hash;
+	memcpy(ent->name, dentry->d_name.name, 1+(ent->len = dentry->d_name.len));
+	ent->ino = ino;
+	ent->dentry = dentry;
+	autofs_hash_insert(dh,ent);
+
+	dir->i_nlink++;
+	d_instantiate(dentry, iget(dir->i_sb,ino));
+	unlock_kernel();
+
+	return 0;
+}
+
+/* Get/set timeout ioctl() operation */
+static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi,
+					 unsigned long __user *p)
+{
+	unsigned long ntimeout;
+
+	if (get_user(ntimeout, p) ||
+	    put_user(sbi->exp_timeout / HZ, p))
+		return -EFAULT;
+
+	if ( ntimeout > ULONG_MAX/HZ )
+		sbi->exp_timeout = 0;
+	else
+		sbi->exp_timeout = ntimeout * HZ;
+
+	return 0;
+}
+
+/* Return protocol version */
+static inline int autofs_get_protover(int __user *p)
+{
+	return put_user(AUTOFS_PROTO_VERSION, p);
+}
+
+/* Perform an expiry operation */
+static inline int autofs_expire_run(struct super_block *sb,
+				    struct autofs_sb_info *sbi,
+				    struct vfsmount *mnt,
+				    struct autofs_packet_expire __user *pkt_p)
+{
+	struct autofs_dir_ent *ent;
+	struct autofs_packet_expire pkt;
+
+	memset(&pkt,0,sizeof pkt);
+
+	pkt.hdr.proto_version = AUTOFS_PROTO_VERSION;
+	pkt.hdr.type = autofs_ptype_expire;
+
+	if ( !sbi->exp_timeout ||
+	     !(ent = autofs_expire(sb,sbi,mnt)) )
+		return -EAGAIN;
+
+	pkt.len = ent->len;
+	memcpy(pkt.name, ent->name, pkt.len);
+	pkt.name[pkt.len] = '\0';
+
+	if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) )
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+ * ioctl()'s on the root directory is the chief method for the daemon to
+ * generate kernel reactions
+ */
+static int autofs_root_ioctl(struct inode *inode, struct file *filp,
+			     unsigned int cmd, unsigned long arg)
+{
+	struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
+	void __user *argp = (void __user *)arg;
+
+	DPRINTK(("autofs_ioctl: cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\n",cmd,arg,sbi,process_group(current)));
+
+	if ( _IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) ||
+	     _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT )
+		return -ENOTTY;
+	
+	if ( !autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) )
+		return -EPERM;
+	
+	switch(cmd) {
+	case AUTOFS_IOC_READY:	/* Wait queue: go ahead and retry */
+		return autofs_wait_release(sbi,(autofs_wqt_t)arg,0);
+	case AUTOFS_IOC_FAIL:	/* Wait queue: fail with ENOENT */
+		return autofs_wait_release(sbi,(autofs_wqt_t)arg,-ENOENT);
+	case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */
+		autofs_catatonic_mode(sbi);
+		return 0;
+	case AUTOFS_IOC_PROTOVER: /* Get protocol version */
+		return autofs_get_protover(argp);
+	case AUTOFS_IOC_SETTIMEOUT:
+		return autofs_get_set_timeout(sbi, argp);
+	case AUTOFS_IOC_EXPIRE:
+		return autofs_expire_run(inode->i_sb, sbi, filp->f_vfsmnt,
+					 argp);
+	default:
+		return -ENOSYS;
+	}
+}
diff --git a/fs/autofs/symlink.c b/fs/autofs/symlink.c
new file mode 100644
index 0000000..f028396
--- /dev/null
+++ b/fs/autofs/symlink.c
@@ -0,0 +1,25 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/symlink.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include "autofs_i.h"
+
+static int autofs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+	nd_set_link(nd, s);
+	return 0;
+}
+
+struct inode_operations autofs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= autofs_follow_link
+};
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
new file mode 100644
index 0000000..1fcaa15
--- /dev/null
+++ b/fs/autofs/waitq.c
@@ -0,0 +1,206 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/autofs/waitq.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/signal.h>
+#include <linux/file.h>
+#include "autofs_i.h"
+
+/* We make this a static variable rather than a part of the superblock; it
+   is better if we don't reassign numbers easily even across filesystems */
+static autofs_wqt_t autofs_next_wait_queue = 1;
+
+/* These are the signals we allow interrupting a pending mount */
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT))
+
+void autofs_catatonic_mode(struct autofs_sb_info *sbi)
+{
+	struct autofs_wait_queue *wq, *nwq;
+
+	DPRINTK(("autofs: entering catatonic mode\n"));
+
+	sbi->catatonic = 1;
+	wq = sbi->queues;
+	sbi->queues = NULL;	/* Erase all wait queues */
+	while ( wq ) {
+		nwq = wq->next;
+		wq->status = -ENOENT; /* Magic is gone - report failure */
+		kfree(wq->name);
+		wq->name = NULL;
+		wake_up(&wq->queue);
+		wq = nwq;
+	}
+	fput(sbi->pipe);	/* Close the pipe */
+	autofs_hash_dputall(&sbi->dirhash); /* Remove all dentry pointers */
+}
+
+static int autofs_write(struct file *file, const void *addr, int bytes)
+{
+	unsigned long sigpipe, flags;
+	mm_segment_t fs;
+	const char *data = (const char *)addr;
+	ssize_t wr = 0;
+
+	/** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
+
+	sigpipe = sigismember(&current->pending.signal, SIGPIPE);
+
+	/* Save pointer to user space and point back to kernel space */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	while (bytes &&
+	       (wr = file->f_op->write(file,data,bytes,&file->f_pos)) > 0) {
+		data += wr;
+		bytes -= wr;
+	}
+
+	set_fs(fs);
+
+	/* Keep the currently executing process from receiving a
+	   SIGPIPE unless it was already supposed to get one */
+	if (wr == -EPIPE && !sigpipe) {
+		spin_lock_irqsave(&current->sighand->siglock, flags);
+		sigdelset(&current->pending.signal, SIGPIPE);
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+	}
+
+	return (bytes > 0);
+}
+	
+static void autofs_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq)
+{
+	struct autofs_packet_missing pkt;
+
+	DPRINTK(("autofs_wait: wait id = 0x%08lx, name = ", wq->wait_queue_token));
+	autofs_say(wq->name,wq->len);
+
+	memset(&pkt,0,sizeof pkt); /* For security reasons */
+
+	pkt.hdr.proto_version = AUTOFS_PROTO_VERSION;
+	pkt.hdr.type = autofs_ptype_missing;
+	pkt.wait_queue_token = wq->wait_queue_token;
+	pkt.len = wq->len;
+        memcpy(pkt.name, wq->name, pkt.len);
+	pkt.name[pkt.len] = '\0';
+
+	if ( autofs_write(sbi->pipe,&pkt,sizeof(struct autofs_packet_missing)) )
+		autofs_catatonic_mode(sbi);
+}
+
+int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
+{
+	struct autofs_wait_queue *wq;
+	int status;
+
+	/* In catatonic mode, we don't wait for nobody */
+	if ( sbi->catatonic )
+		return -ENOENT;
+	
+	/* We shouldn't be able to get here, but just in case */
+	if ( name->len > NAME_MAX )
+		return -ENOENT;
+
+	for ( wq = sbi->queues ; wq ; wq = wq->next ) {
+		if ( wq->hash == name->hash &&
+		     wq->len == name->len &&
+		     wq->name && !memcmp(wq->name,name->name,name->len) )
+			break;
+	}
+	
+	if ( !wq ) {
+		/* Create a new wait queue */
+		wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
+		if ( !wq )
+			return -ENOMEM;
+
+		wq->name = kmalloc(name->len,GFP_KERNEL);
+		if ( !wq->name ) {
+			kfree(wq);
+			return -ENOMEM;
+		}
+		wq->wait_queue_token = autofs_next_wait_queue++;
+		init_waitqueue_head(&wq->queue);
+		wq->hash = name->hash;
+		wq->len = name->len;
+		wq->status = -EINTR; /* Status return if interrupted */
+		memcpy(wq->name, name->name, name->len);
+		wq->next = sbi->queues;
+		sbi->queues = wq;
+
+		/* autofs_notify_daemon() may block */
+		wq->wait_ctr = 2;
+		autofs_notify_daemon(sbi,wq);
+	} else
+		wq->wait_ctr++;
+
+	/* wq->name is NULL if and only if the lock is already released */
+
+	if ( sbi->catatonic ) {
+		/* We might have slept, so check again for catatonic mode */
+		wq->status = -ENOENT;
+		if ( wq->name ) {
+			kfree(wq->name);
+			wq->name = NULL;
+		}
+	}
+
+	if ( wq->name ) {
+		/* Block all but "shutdown" signals while waiting */
+		sigset_t sigmask;
+
+		siginitsetinv(&sigmask, SHUTDOWN_SIGS);
+		sigprocmask(SIG_BLOCK, &sigmask, &sigmask);
+
+		interruptible_sleep_on(&wq->queue);
+
+		sigprocmask(SIG_SETMASK, &sigmask, NULL);
+	} else {
+		DPRINTK(("autofs_wait: skipped sleeping\n"));
+	}
+
+	status = wq->status;
+
+	if ( ! --wq->wait_ctr )	/* Are we the last process to need status? */
+		kfree(wq);
+
+	return status;
+}
+
+
+int autofs_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
+{
+	struct autofs_wait_queue *wq, **wql;
+
+	for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
+		if ( wq->wait_queue_token == wait_queue_token )
+			break;
+	}
+	if ( !wq )
+		return -EINVAL;
+
+	*wql = wq->next;	/* Unlink from chain */
+	kfree(wq->name);
+	wq->name = NULL;	/* Do not wait on this queue */
+
+	wq->status = status;
+
+	if ( ! --wq->wait_ctr )	/* Is anyone still waiting for this guy? */
+		kfree(wq);
+	else
+		wake_up(&wq->queue);
+
+	return 0;
+}
+
diff --git a/fs/autofs4/Makefile b/fs/autofs4/Makefile
new file mode 100644
index 0000000..f2c3b79
--- /dev/null
+++ b/fs/autofs4/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux autofs-filesystem routines.
+#
+
+obj-$(CONFIG_AUTOFS4_FS) += autofs4.o
+
+autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
new file mode 100644
index 0000000..f5a52c8
--- /dev/null
+++ b/fs/autofs4/autofs_i.h
@@ -0,0 +1,193 @@
+/* -*- c -*- ------------------------------------------------------------- *
+ *   
+ * linux/fs/autofs/autofs_i.h
+ *
+ *   Copyright 1997-1998 Transmeta Corporation - All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/* Internal header file for autofs */
+
+#include <linux/auto_fs4.h>
+#include <linux/list.h>
+
+/* This is the range of ioctl() numbers we claim as ours */
+#define AUTOFS_IOC_FIRST     AUTOFS_IOC_READY
+#define AUTOFS_IOC_COUNT     32
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define DPRINTK(fmt,args...) do { printk(KERN_DEBUG "pid %d: %s: " fmt "\n" , current->pid , __FUNCTION__ , ##args); } while(0)
+#else
+#define DPRINTK(fmt,args...) do {} while(0)
+#endif
+
+#define AUTOFS_SUPER_MAGIC 0x0187
+
+/*
+ * If the daemon returns a negative response (AUTOFS_IOC_FAIL) then the
+ * kernel will keep the negative response cached for up to the time given
+ * here, although the time can be shorter if the kernel throws the dcache
+ * entry away.  This probably should be settable from user space.
+ */
+#define AUTOFS_NEGATIVE_TIMEOUT (60*HZ)	/* 1 minute */
+
+/* Unified info structure.  This is pointed to by both the dentry and
+   inode structures.  Each file in the filesystem has an instance of this
+   structure.  It holds a reference to the dentry, so dentries are never
+   flushed while the file exists.  All name lookups are dealt with at the
+   dentry level, although the filesystem can interfere in the validation
+   process.  Readdir is implemented by traversing the dentry lists. */
+struct autofs_info {
+	struct dentry	*dentry;
+	struct inode	*inode;
+
+	int		flags;
+
+	struct autofs_sb_info *sbi;
+	unsigned long last_used;
+
+	mode_t	mode;
+	size_t	size;
+
+	void (*free)(struct autofs_info *);
+	union {
+		const char *symlink;
+	} u;
+};
+
+#define AUTOFS_INF_EXPIRING	(1<<0) /* dentry is in the process of expiring */
+
+struct autofs_wait_queue {
+	wait_queue_head_t queue;
+	struct autofs_wait_queue *next;
+	autofs_wqt_t wait_queue_token;
+	/* We use the following to see what we are waiting for */
+	int hash;
+	int len;
+	char *name;
+	/* This is for status reporting upon return */
+	int status;
+	atomic_t wait_ctr;
+};
+
+#define AUTOFS_SBI_MAGIC 0x6d4a556d
+
+struct autofs_sb_info {
+	u32 magic;
+	struct file *pipe;
+	pid_t oz_pgrp;
+	int catatonic;
+	int version;
+	int sub_version;
+	unsigned long exp_timeout;
+	int reghost_enabled;
+	int needs_reghost;
+	struct super_block *sb;
+	struct semaphore wq_sem;
+	struct autofs_wait_queue *queues; /* Wait queue pointer */
+};
+
+static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
+{
+	return (struct autofs_sb_info *)(sb->s_fs_info);
+}
+
+static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
+{
+	return (struct autofs_info *)(dentry->d_fsdata);
+}
+
+/* autofs4_oz_mode(): do we see the man behind the curtain?  (The
+   processes which do manipulations for us in user space sees the raw
+   filesystem without "magic".) */
+
+static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) {
+	return sbi->catatonic || process_group(current) == sbi->oz_pgrp;
+}
+
+/* Does a dentry have some pending activity? */
+static inline int autofs4_ispending(struct dentry *dentry)
+{
+	struct autofs_info *inf = autofs4_dentry_ino(dentry);
+
+	return (dentry->d_flags & DCACHE_AUTOFS_PENDING) ||
+		(inf != NULL && inf->flags & AUTOFS_INF_EXPIRING);
+}
+
+static inline void autofs4_copy_atime(struct file *src, struct file *dst)
+{
+	dst->f_dentry->d_inode->i_atime = src->f_dentry->d_inode->i_atime;
+	return;
+}
+
+struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *);
+void autofs4_free_ino(struct autofs_info *);
+
+/* Expiration */
+int is_autofs4_dentry(struct dentry *);
+int autofs4_expire_run(struct super_block *, struct vfsmount *,
+			struct autofs_sb_info *,
+			struct autofs_packet_expire __user *);
+int autofs4_expire_multi(struct super_block *, struct vfsmount *,
+			struct autofs_sb_info *, int __user *);
+
+/* Operations structures */
+
+extern struct inode_operations autofs4_symlink_inode_operations;
+extern struct inode_operations autofs4_dir_inode_operations;
+extern struct inode_operations autofs4_root_inode_operations;
+extern struct file_operations autofs4_dir_operations;
+extern struct file_operations autofs4_root_operations;
+
+/* Initializing function */
+
+int autofs4_fill_super(struct super_block *, void *, int);
+struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info *sbi, mode_t mode);
+
+/* Queue management functions */
+
+enum autofs_notify
+{
+	NFY_NONE,
+	NFY_MOUNT,
+	NFY_EXPIRE
+};
+
+int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
+int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int);
+void autofs4_catatonic_mode(struct autofs_sb_info *);
+
+static inline int simple_positive(struct dentry *dentry)
+{
+	return dentry->d_inode && !d_unhashed(dentry);
+}
+
+static inline int simple_empty_nolock(struct dentry *dentry)
+{
+	struct dentry *child;
+	int ret = 0;
+
+	list_for_each_entry(child, &dentry->d_subdirs, d_child)
+		if (simple_positive(child))
+			goto out;
+	ret = 1;
+out:
+	return ret;
+}
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
new file mode 100644
index 0000000..31540a6
--- /dev/null
+++ b/fs/autofs4/expire.c
@@ -0,0 +1,358 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/expire.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *  Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ *  Copyright 2001-2003 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include "autofs_i.h"
+
+static unsigned long now;
+
+/* Check if a dentry can be expired return 1 if it can else return 0 */
+static inline int autofs4_can_expire(struct dentry *dentry,
+					unsigned long timeout, int do_now)
+{
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+
+	/* dentry in the process of being deleted */
+	if (ino == NULL)
+		return 0;
+
+	/* No point expiring a pending mount */
+	if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
+		return 0;
+
+	if (!do_now) {
+		/* Too young to die */
+		if (time_after(ino->last_used + timeout, now))
+			return 0;
+
+		/* update last_used here :-
+		   - obviously makes sense if it is in use now
+		   - less obviously, prevents rapid-fire expire
+		     attempts if expire fails the first time */
+		ino->last_used = now;
+	}
+
+	return 1;
+}
+
+/* Check a mount point for busyness return 1 if not busy, otherwise */
+static int autofs4_check_mount(struct vfsmount *mnt, struct dentry *dentry)
+{
+	int status = 0;
+
+	DPRINTK("dentry %p %.*s",
+		dentry, (int)dentry->d_name.len, dentry->d_name.name);
+
+	mntget(mnt);
+	dget(dentry);
+
+	if (!follow_down(&mnt, &dentry))
+		goto done;
+
+	while (d_mountpoint(dentry) && follow_down(&mnt, &dentry))
+		;
+
+	/* This is an autofs submount, we can't expire it */
+	if (is_autofs4_dentry(dentry))
+		goto done;
+
+	/* The big question */
+	if (may_umount_tree(mnt) == 0)
+		status = 1;
+done:
+	DPRINTK("returning = %d", status);
+	mntput(mnt);
+	dput(dentry);
+	return status;
+}
+
+/* Check a directory tree of mount points for busyness
+ * The tree is not busy iff no mountpoints are busy
+ * Return 1 if the tree is busy or 0 otherwise
+ */
+static int autofs4_check_tree(struct vfsmount *mnt,
+	       		      struct dentry *top,
+			      unsigned long timeout,
+			      int do_now)
+{
+	struct dentry *this_parent = top;
+	struct list_head *next;
+
+	DPRINTK("parent %p %.*s",
+		top, (int)top->d_name.len, top->d_name.name);
+
+	/* Negative dentry - give up */
+	if (!simple_positive(top))
+		return 0;
+
+	/* Timeout of a tree mount is determined by its top dentry */
+	if (!autofs4_can_expire(top, timeout, do_now))
+		return 0;
+
+	spin_lock(&dcache_lock);
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	while (next != &this_parent->d_subdirs) {
+		struct dentry *dentry = list_entry(next, struct dentry, d_child);
+
+		/* Negative dentry - give up */
+		if (!simple_positive(dentry)) {
+			next = next->next;
+			continue;
+		}
+
+		DPRINTK("dentry %p %.*s",
+			dentry, (int)dentry->d_name.len, dentry->d_name.name);
+
+		if (!simple_empty_nolock(dentry)) {
+			this_parent = dentry;
+			goto repeat;
+		}
+
+		dentry = dget(dentry);
+		spin_unlock(&dcache_lock);
+
+		if (d_mountpoint(dentry)) {
+			/* First busy => tree busy */
+			if (!autofs4_check_mount(mnt, dentry)) {
+				dput(dentry);
+				return 0;
+			}
+		}
+
+		dput(dentry);
+		spin_lock(&dcache_lock);
+		next = next->next;
+	}
+
+	if (this_parent != top) {
+		next = this_parent->d_child.next;
+		this_parent = this_parent->d_parent;
+		goto resume;
+	}
+	spin_unlock(&dcache_lock);
+
+	return 1;
+}
+
+static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
+					   struct dentry *parent,
+					   unsigned long timeout,
+					   int do_now)
+{
+	struct dentry *this_parent = parent;
+	struct list_head *next;
+
+	DPRINTK("parent %p %.*s",
+		parent, (int)parent->d_name.len, parent->d_name.name);
+
+	spin_lock(&dcache_lock);
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	while (next != &this_parent->d_subdirs) {
+		struct dentry *dentry = list_entry(next, struct dentry, d_child);
+
+		/* Negative dentry - give up */
+		if (!simple_positive(dentry)) {
+			next = next->next;
+			continue;
+		}
+
+		DPRINTK("dentry %p %.*s",
+			dentry, (int)dentry->d_name.len, dentry->d_name.name);
+
+		if (!list_empty(&dentry->d_subdirs)) {
+			this_parent = dentry;
+			goto repeat;
+		}
+
+		dentry = dget(dentry);
+		spin_unlock(&dcache_lock);
+
+		if (d_mountpoint(dentry)) {
+			/* Can we expire this guy */
+			if (!autofs4_can_expire(dentry, timeout, do_now))
+				goto cont;
+
+			/* Can we umount this guy */
+			if (autofs4_check_mount(mnt, dentry))
+				return dentry;
+
+		}
+cont:
+		dput(dentry);
+		spin_lock(&dcache_lock);
+		next = next->next;
+	}
+
+	if (this_parent != parent) {
+		next = this_parent->d_child.next;
+		this_parent = this_parent->d_parent;
+		goto resume;
+	}
+	spin_unlock(&dcache_lock);
+
+	return NULL;
+}
+
+/*
+ * Find an eligible tree to time-out
+ * A tree is eligible if :-
+ *  - it is unused by any user process
+ *  - it has been unused for exp_timeout time
+ */
+static struct dentry *autofs4_expire(struct super_block *sb,
+				     struct vfsmount *mnt,
+				     struct autofs_sb_info *sbi,
+				     int how)
+{
+	unsigned long timeout;
+	struct dentry *root = sb->s_root;
+	struct dentry *expired = NULL;
+	struct list_head *next;
+	int do_now = how & AUTOFS_EXP_IMMEDIATE;
+	int exp_leaves = how & AUTOFS_EXP_LEAVES;
+
+	if ( !sbi->exp_timeout || !root )
+		return NULL;
+
+	now = jiffies;
+	timeout = sbi->exp_timeout;
+
+	spin_lock(&dcache_lock);
+	next = root->d_subdirs.next;
+
+	/* On exit from the loop expire is set to a dgot dentry
+	 * to expire or it's NULL */
+	while ( next != &root->d_subdirs ) {
+		struct dentry *dentry = list_entry(next, struct dentry, d_child);
+
+		/* Negative dentry - give up */
+		if ( !simple_positive(dentry) ) {
+			next = next->next;
+			continue;
+		}
+
+		dentry = dget(dentry);
+		spin_unlock(&dcache_lock);
+
+		/* Case 1: indirect mount or top level direct mount */
+		if (d_mountpoint(dentry)) {
+			DPRINTK("checking mountpoint %p %.*s",
+				dentry, (int)dentry->d_name.len, dentry->d_name.name);
+
+			/* Can we expire this guy */
+			if (!autofs4_can_expire(dentry, timeout, do_now))
+				goto next;
+
+			/* Can we umount this guy */
+			if (autofs4_check_mount(mnt, dentry)) {
+				expired = dentry;
+				break;
+			}
+			goto next;
+		}
+
+		if ( simple_empty(dentry) )
+			goto next;
+
+		/* Case 2: tree mount, expire iff entire tree is not busy */
+		if (!exp_leaves) {
+			if (autofs4_check_tree(mnt, dentry, timeout, do_now)) {
+			expired = dentry;
+			break;
+			}
+		/* Case 3: direct mount, expire individual leaves */
+		} else {
+			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
+			if (expired) {
+				dput(dentry);
+				break;
+			}
+		}
+next:
+		dput(dentry);
+		spin_lock(&dcache_lock);
+		next = next->next;
+	}
+
+	if ( expired ) {
+		DPRINTK("returning %p %.*s",
+			expired, (int)expired->d_name.len, expired->d_name.name);
+		spin_lock(&dcache_lock);
+		list_del(&expired->d_parent->d_subdirs);
+		list_add(&expired->d_parent->d_subdirs, &expired->d_child);
+		spin_unlock(&dcache_lock);
+		return expired;
+	}
+	spin_unlock(&dcache_lock);
+
+	return NULL;
+}
+
+/* Perform an expiry operation */
+int autofs4_expire_run(struct super_block *sb,
+		      struct vfsmount *mnt,
+		      struct autofs_sb_info *sbi,
+		      struct autofs_packet_expire __user *pkt_p)
+{
+	struct autofs_packet_expire pkt;
+	struct dentry *dentry;
+
+	memset(&pkt,0,sizeof pkt);
+
+	pkt.hdr.proto_version = sbi->version;
+	pkt.hdr.type = autofs_ptype_expire;
+
+	if ((dentry = autofs4_expire(sb, mnt, sbi, 0)) == NULL)
+		return -EAGAIN;
+
+	pkt.len = dentry->d_name.len;
+	memcpy(pkt.name, dentry->d_name.name, pkt.len);
+	pkt.name[pkt.len] = '\0';
+	dput(dentry);
+
+	if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) )
+		return -EFAULT;
+
+	return 0;
+}
+
+/* Call repeatedly until it returns -EAGAIN, meaning there's nothing
+   more to be done */
+int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+			struct autofs_sb_info *sbi, int __user *arg)
+{
+	struct dentry *dentry;
+	int ret = -EAGAIN;
+	int do_now = 0;
+
+	if (arg && get_user(do_now, arg))
+		return -EFAULT;
+
+	if ((dentry = autofs4_expire(sb, mnt, sbi, do_now)) != NULL) {
+		struct autofs_info *de_info = autofs4_dentry_ino(dentry);
+
+		/* This is synchronous because it makes the daemon a
+                   little easier */
+		de_info->flags |= AUTOFS_INF_EXPIRING;
+		ret = autofs4_wait(sbi, dentry, NFY_EXPIRE);
+		de_info->flags &= ~AUTOFS_INF_EXPIRING;
+		dput(dentry);
+	}
+		
+	return ret;
+}
+
diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c
new file mode 100644
index 0000000..acecec8
--- /dev/null
+++ b/fs/autofs4/init.c
@@ -0,0 +1,42 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/init.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include "autofs_i.h"
+
+static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, autofs4_fill_super);
+}
+
+static struct file_system_type autofs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "autofs",
+	.get_sb		= autofs_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init init_autofs4_fs(void)
+{
+	return register_filesystem(&autofs_fs_type);
+}
+
+static void __exit exit_autofs4_fs(void)
+{
+	unregister_filesystem(&autofs_fs_type);
+}
+
+module_init(init_autofs4_fs) 
+module_exit(exit_autofs4_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
new file mode 100644
index 0000000..a525607
--- /dev/null
+++ b/fs/autofs4/inode.c
@@ -0,0 +1,324 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/inode.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/parser.h>
+#include <linux/bitops.h>
+#include "autofs_i.h"
+#include <linux/module.h>
+
+static void ino_lnkfree(struct autofs_info *ino)
+{
+	if (ino->u.symlink) {
+		kfree(ino->u.symlink);
+		ino->u.symlink = NULL;
+	}
+}
+
+struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
+				     struct autofs_sb_info *sbi, mode_t mode)
+{
+	int reinit = 1;
+
+	if (ino == NULL) {
+		reinit = 0;
+		ino = kmalloc(sizeof(*ino), GFP_KERNEL);
+	}
+
+	if (ino == NULL)
+		return NULL;
+
+	ino->flags = 0;
+	ino->mode = mode;
+	ino->inode = NULL;
+	ino->dentry = NULL;
+	ino->size = 0;
+
+	ino->last_used = jiffies;
+
+	ino->sbi = sbi;
+
+	if (reinit && ino->free)
+		(ino->free)(ino);
+
+	memset(&ino->u, 0, sizeof(ino->u));
+
+	ino->free = NULL;
+
+	if (S_ISLNK(mode))
+		ino->free = ino_lnkfree;
+
+	return ino;
+}
+
+void autofs4_free_ino(struct autofs_info *ino)
+{
+	if (ino->dentry) {
+		ino->dentry->d_fsdata = NULL;
+		if (ino->dentry->d_inode)
+			dput(ino->dentry);
+		ino->dentry = NULL;
+	}
+	if (ino->free)
+		(ino->free)(ino);
+	kfree(ino);
+}
+
+static void autofs4_put_super(struct super_block *sb)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(sb);
+
+	sb->s_fs_info = NULL;
+
+	if ( !sbi->catatonic )
+		autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */
+
+	kfree(sbi);
+
+	DPRINTK("shutting down");
+}
+
+static struct super_operations autofs4_sops = {
+	.put_super	= autofs4_put_super,
+	.statfs		= simple_statfs,
+};
+
+enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto};
+
+static match_table_t tokens = {
+	{Opt_fd, "fd=%u"},
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_pgrp, "pgrp=%u"},
+	{Opt_minproto, "minproto=%u"},
+	{Opt_maxproto, "maxproto=%u"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid,
+			 pid_t *pgrp, int *minproto, int *maxproto)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+
+	*uid = current->uid;
+	*gid = current->gid;
+	*pgrp = process_group(current);
+
+	*minproto = AUTOFS_MIN_PROTO_VERSION;
+	*maxproto = AUTOFS_MAX_PROTO_VERSION;
+
+	*pipefd = -1;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_fd:
+			if (match_int(args, pipefd))
+				return 1;
+			break;
+		case Opt_uid:
+			if (match_int(args, &option))
+				return 1;
+			*uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(args, &option))
+				return 1;
+			*gid = option;
+			break;
+		case Opt_pgrp:
+			if (match_int(args, &option))
+				return 1;
+			*pgrp = option;
+			break;
+		case Opt_minproto:
+			if (match_int(args, &option))
+				return 1;
+			*minproto = option;
+			break;
+		case Opt_maxproto:
+			if (match_int(args, &option))
+				return 1;
+			*maxproto = option;
+			break;
+		default:
+			return 1;
+		}
+	}
+	return (*pipefd < 0);
+}
+
+static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
+{
+	struct autofs_info *ino;
+
+	ino = autofs4_init_ino(NULL, sbi, S_IFDIR | 0755);
+	if (!ino)
+		return NULL;
+
+	return ino;
+}
+
+int autofs4_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct inode * root_inode;
+	struct dentry * root;
+	struct file * pipe;
+	int pipefd;
+	struct autofs_sb_info *sbi;
+	struct autofs_info *ino;
+	int minproto, maxproto;
+
+	sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL);
+	if ( !sbi )
+		goto fail_unlock;
+	DPRINTK("starting up, sbi = %p",sbi);
+
+	memset(sbi, 0, sizeof(*sbi));
+
+	s->s_fs_info = sbi;
+	sbi->magic = AUTOFS_SBI_MAGIC;
+	sbi->catatonic = 0;
+	sbi->exp_timeout = 0;
+	sbi->oz_pgrp = process_group(current);
+	sbi->sb = s;
+	sbi->version = 0;
+	sbi->sub_version = 0;
+	init_MUTEX(&sbi->wq_sem);
+	sbi->queues = NULL;
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = AUTOFS_SUPER_MAGIC;
+	s->s_op = &autofs4_sops;
+	s->s_time_gran = 1;
+
+	/*
+	 * Get the root inode and dentry, but defer checking for errors.
+	 */
+	ino = autofs4_mkroot(sbi);
+	if (!ino)
+		goto fail_free;
+	root_inode = autofs4_get_inode(s, ino);
+	kfree(ino);
+	if (!root_inode)
+		goto fail_free;
+
+	root_inode->i_op = &autofs4_root_inode_operations;
+	root_inode->i_fop = &autofs4_root_operations;
+	root = d_alloc_root(root_inode);
+	pipe = NULL;
+
+	if (!root)
+		goto fail_iput;
+
+	/* Can this call block? */
+	if (parse_options(data, &pipefd,
+			  &root_inode->i_uid, &root_inode->i_gid,
+			  &sbi->oz_pgrp,
+			  &minproto, &maxproto)) {
+		printk("autofs: called with bogus options\n");
+		goto fail_dput;
+	}
+
+	/* Couldn't this be tested earlier? */
+	if (maxproto < AUTOFS_MIN_PROTO_VERSION ||
+	    minproto > AUTOFS_MAX_PROTO_VERSION) {
+		printk("autofs: kernel does not match daemon version "
+		       "daemon (%d, %d) kernel (%d, %d)\n",
+			minproto, maxproto,
+			AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
+		goto fail_dput;
+	}
+
+	sbi->version = maxproto > AUTOFS_MAX_PROTO_VERSION ? AUTOFS_MAX_PROTO_VERSION : maxproto;
+	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+
+	DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp);
+	pipe = fget(pipefd);
+	
+	if ( !pipe ) {
+		printk("autofs: could not open pipe file descriptor\n");
+		goto fail_dput;
+	}
+	if ( !pipe->f_op || !pipe->f_op->write )
+		goto fail_fput;
+	sbi->pipe = pipe;
+
+	/*
+	 * Success! Install the root dentry now to indicate completion.
+	 */
+	s->s_root = root;
+	return 0;
+	
+	/*
+	 * Failure ... clean up.
+	 */
+fail_fput:
+	printk("autofs: pipe file descriptor does not contain proper ops\n");
+	fput(pipe);
+	/* fall through */
+fail_dput:
+	dput(root);
+	goto fail_free;
+fail_iput:
+	printk("autofs: get root dentry failed\n");
+	iput(root_inode);
+fail_free:
+	kfree(sbi);
+fail_unlock:
+	return -EINVAL;
+}
+
+struct inode *autofs4_get_inode(struct super_block *sb,
+				struct autofs_info *inf)
+{
+	struct inode *inode = new_inode(sb);
+
+	if (inode == NULL)
+		return NULL;
+
+	inf->inode = inode;
+	inode->i_mode = inf->mode;
+	if (sb->s_root) {
+		inode->i_uid = sb->s_root->d_inode->i_uid;
+		inode->i_gid = sb->s_root->d_inode->i_gid;
+	} else {
+		inode->i_uid = 0;
+		inode->i_gid = 0;
+	}
+	inode->i_blksize = PAGE_CACHE_SIZE;
+	inode->i_blocks = 0;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+	if (S_ISDIR(inf->mode)) {
+		inode->i_nlink = 2;
+		inode->i_op = &autofs4_dir_inode_operations;
+		inode->i_fop = &autofs4_dir_operations;
+	} else if (S_ISLNK(inf->mode)) {
+		inode->i_size = inf->size;
+		inode->i_op = &autofs4_symlink_inode_operations;
+	}
+
+	return inode;
+}
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
new file mode 100644
index 0000000..3765c04
--- /dev/null
+++ b/fs/autofs4/root.c
@@ -0,0 +1,808 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/root.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *  Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ *  Copyright 2001-2003 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/smp_lock.h>
+#include "autofs_i.h"
+
+static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
+static int autofs4_dir_unlink(struct inode *,struct dentry *);
+static int autofs4_dir_rmdir(struct inode *,struct dentry *);
+static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
+static int autofs4_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
+static int autofs4_dir_open(struct inode *inode, struct file *file);
+static int autofs4_dir_close(struct inode *inode, struct file *file);
+static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir);
+static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir);
+static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
+static int autofs4_dcache_readdir(struct file *, void *, filldir_t);
+
+struct file_operations autofs4_root_operations = {
+	.open		= dcache_dir_open,
+	.release	= dcache_dir_close,
+	.read		= generic_read_dir,
+	.readdir	= autofs4_root_readdir,
+	.ioctl		= autofs4_root_ioctl,
+};
+
+struct file_operations autofs4_dir_operations = {
+	.open		= autofs4_dir_open,
+	.release	= autofs4_dir_close,
+	.read		= generic_read_dir,
+	.readdir	= autofs4_dir_readdir,
+};
+
+struct inode_operations autofs4_root_inode_operations = {
+	.lookup		= autofs4_lookup,
+	.unlink		= autofs4_dir_unlink,
+	.symlink	= autofs4_dir_symlink,
+	.mkdir		= autofs4_dir_mkdir,
+	.rmdir		= autofs4_dir_rmdir,
+};
+
+struct inode_operations autofs4_dir_inode_operations = {
+	.lookup		= autofs4_lookup,
+	.unlink		= autofs4_dir_unlink,
+	.symlink	= autofs4_dir_symlink,
+	.mkdir		= autofs4_dir_mkdir,
+	.rmdir		= autofs4_dir_rmdir,
+};
+
+static int autofs4_root_readdir(struct file *file, void *dirent,
+				filldir_t filldir)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(file->f_dentry->d_sb);
+	int oz_mode = autofs4_oz_mode(sbi);
+
+	DPRINTK("called, filp->f_pos = %lld", file->f_pos);
+
+	/*
+	 * Don't set reghost flag if:
+	 * 1) f_pos is larger than zero -- we've already been here.
+	 * 2) we haven't even enabled reghosting in the 1st place.
+	 * 3) this is the daemon doing a readdir
+	 */
+	if (oz_mode && file->f_pos == 0 && sbi->reghost_enabled)
+		sbi->needs_reghost = 1;
+
+	DPRINTK("needs_reghost = %d", sbi->needs_reghost);
+
+	return autofs4_dcache_readdir(file, dirent, filldir);
+}
+
+/* Update usage from here to top of tree, so that scan of
+   top-level directories will give a useful result */
+static void autofs4_update_usage(struct dentry *dentry)
+{
+	struct dentry *top = dentry->d_sb->s_root;
+
+	spin_lock(&dcache_lock);
+	for(; dentry != top; dentry = dentry->d_parent) {
+		struct autofs_info *ino = autofs4_dentry_ino(dentry);
+
+		if (ino) {
+			update_atime(dentry->d_inode);
+			ino->last_used = jiffies;
+		}
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/*
+ * From 2.4 kernel readdir.c
+ */
+static int autofs4_dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	int i;
+	struct dentry *dentry = filp->f_dentry;
+
+	i = filp->f_pos;
+	switch (i) {
+		case 0:
+			if (filldir(dirent, ".", 1, i, dentry->d_inode->i_ino, DT_DIR) < 0)
+				break;
+			i++;
+			filp->f_pos++;
+			/* fallthrough */
+		case 1:
+			if (filldir(dirent, "..", 2, i, dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+				break;
+			i++;
+			filp->f_pos++;
+			/* fallthrough */
+		default: {
+			struct list_head *list;
+			int j = i-2;
+
+			spin_lock(&dcache_lock);
+			list = dentry->d_subdirs.next;
+
+			for (;;) {
+				if (list == &dentry->d_subdirs) {
+					spin_unlock(&dcache_lock);
+					return 0;
+				}
+				if (!j)
+					break;
+				j--;
+				list = list->next;
+			}
+
+			while(1) {
+				struct dentry *de = list_entry(list, struct dentry, d_child);
+
+				if (!d_unhashed(de) && de->d_inode) {
+					spin_unlock(&dcache_lock);
+					if (filldir(dirent, de->d_name.name, de->d_name.len, filp->f_pos, de->d_inode->i_ino, DT_UNKNOWN) < 0)
+						break;
+					spin_lock(&dcache_lock);
+				}
+				filp->f_pos++;
+				list = list->next;
+				if (list != &dentry->d_subdirs)
+					continue;
+				spin_unlock(&dcache_lock);
+				break;
+			}
+		}
+	}
+	return 0;
+}
+
+static int autofs4_dir_open(struct inode *inode, struct file *file)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct vfsmount *mnt = file->f_vfsmnt;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	int status;
+
+	DPRINTK("file=%p dentry=%p %.*s",
+		file, dentry, dentry->d_name.len, dentry->d_name.name);
+
+	if (autofs4_oz_mode(sbi))
+		goto out;
+
+	if (autofs4_ispending(dentry)) {
+		DPRINTK("dentry busy");
+		return -EBUSY;
+	}
+
+	if (!d_mountpoint(dentry) && dentry->d_op && dentry->d_op->d_revalidate) {
+		struct nameidata nd;
+		int empty;
+
+		/* In case there are stale directory dentrys from a failed mount */
+		spin_lock(&dcache_lock);
+		empty = list_empty(&dentry->d_subdirs);
+		spin_unlock(&dcache_lock);
+
+		if (!empty)
+			d_invalidate(dentry);
+
+		nd.flags = LOOKUP_DIRECTORY;
+		status = (dentry->d_op->d_revalidate)(dentry, &nd);
+
+		if (!status)
+			return -ENOENT;
+	}
+
+	if (d_mountpoint(dentry)) {
+		struct file *fp = NULL;
+		struct vfsmount *fp_mnt = mntget(mnt);
+		struct dentry *fp_dentry = dget(dentry);
+
+		while (follow_down(&fp_mnt, &fp_dentry) && d_mountpoint(fp_dentry));
+
+		fp = dentry_open(fp_dentry, fp_mnt, file->f_flags);
+		status = PTR_ERR(fp);
+		if (IS_ERR(fp)) {
+			file->private_data = NULL;
+			return status;
+		}
+		file->private_data = fp;
+	}
+out:
+	return 0;
+}
+
+static int autofs4_dir_close(struct inode *inode, struct file *file)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+
+	DPRINTK("file=%p dentry=%p %.*s",
+		file, dentry, dentry->d_name.len, dentry->d_name.name);
+
+	if (autofs4_oz_mode(sbi))
+		goto out;
+
+	if (autofs4_ispending(dentry)) {
+		DPRINTK("dentry busy");
+		return -EBUSY;
+	}
+
+	if (d_mountpoint(dentry)) {
+		struct file *fp = file->private_data;
+
+		if (!fp)
+			return -ENOENT;
+
+		filp_close(fp, current->files);
+		file->private_data = NULL;
+	}
+out:
+	return 0;
+}
+
+static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	int status;
+
+	DPRINTK("file=%p dentry=%p %.*s",
+		file, dentry, dentry->d_name.len, dentry->d_name.name);
+
+	if (autofs4_oz_mode(sbi))
+		goto out;
+
+	if (autofs4_ispending(dentry)) {
+		DPRINTK("dentry busy");
+		return -EBUSY;
+	}
+
+	if (d_mountpoint(dentry)) {
+		struct file *fp = file->private_data;
+
+		if (!fp)
+			return -ENOENT;
+
+		if (!fp->f_op || !fp->f_op->readdir)
+			goto out;
+
+		status = vfs_readdir(fp, filldir, dirent);
+		file->f_pos = fp->f_pos;
+		if (status)
+			autofs4_copy_atime(file, fp);
+		return status;
+	}
+out:
+	return autofs4_dcache_readdir(file, dirent, filldir);
+}
+
+static int try_to_fill_dentry(struct dentry *dentry, 
+			      struct super_block *sb,
+			      struct autofs_sb_info *sbi, int flags)
+{
+	struct autofs_info *de_info = autofs4_dentry_ino(dentry);
+	int status = 0;
+
+	/* Block on any pending expiry here; invalidate the dentry
+           when expiration is done to trigger mount request with a new
+           dentry */
+	if (de_info && (de_info->flags & AUTOFS_INF_EXPIRING)) {
+		DPRINTK("waiting for expire %p name=%.*s",
+			 dentry, dentry->d_name.len, dentry->d_name.name);
+
+		status = autofs4_wait(sbi, dentry, NFY_NONE);
+		
+		DPRINTK("expire done status=%d", status);
+		
+		return 0;
+	}
+
+	DPRINTK("dentry=%p %.*s ino=%p",
+		 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+
+	/* Wait for a pending mount, triggering one if there isn't one already */
+	if (dentry->d_inode == NULL) {
+		DPRINTK("waiting for mount name=%.*s",
+			 dentry->d_name.len, dentry->d_name.name);
+
+		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+		 
+		DPRINTK("mount done status=%d", status);
+
+		if (status && dentry->d_inode)
+			return 0; /* Try to get the kernel to invalidate this dentry */
+		
+		/* Turn this into a real negative dentry? */
+		if (status == -ENOENT) {
+			dentry->d_time = jiffies + AUTOFS_NEGATIVE_TIMEOUT;
+			spin_lock(&dentry->d_lock);
+			dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
+			spin_unlock(&dentry->d_lock);
+			return 1;
+		} else if (status) {
+			/* Return a negative dentry, but leave it "pending" */
+			return 1;
+		}
+	/* Trigger mount for path component or follow link */
+	} else if (flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) ||
+			current->link_count) {
+		DPRINTK("waiting for mount name=%.*s",
+			dentry->d_name.len, dentry->d_name.name);
+
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags |= DCACHE_AUTOFS_PENDING;
+		spin_unlock(&dentry->d_lock);
+		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+
+		DPRINTK("mount done status=%d", status);
+
+		if (status) {
+			spin_lock(&dentry->d_lock);
+			dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
+			spin_unlock(&dentry->d_lock);
+			return 0;
+		}
+	}
+
+	/* We don't update the usages for the autofs daemon itself, this
+	   is necessary for recursive autofs mounts */
+	if (!autofs4_oz_mode(sbi))
+		autofs4_update_usage(dentry);
+
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
+	spin_unlock(&dentry->d_lock);
+	return 1;
+}
+
+/*
+ * Revalidate is called on every cache lookup.  Some of those
+ * cache lookups may actually happen while the dentry is not
+ * yet completely filled in, and revalidate has to delay such
+ * lookups..
+ */
+static int autofs4_revalidate(struct dentry * dentry, struct nameidata *nd)
+{
+	struct inode * dir = dentry->d_parent->d_inode;
+	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
+	int oz_mode = autofs4_oz_mode(sbi);
+	int flags = nd ? nd->flags : 0;
+	int status = 1;
+
+	/* Pending dentry */
+	if (autofs4_ispending(dentry)) {
+		if (!oz_mode)
+			status = try_to_fill_dentry(dentry, dir->i_sb, sbi, flags);
+		return status;
+	}
+
+	/* Negative dentry.. invalidate if "old" */
+	if (dentry->d_inode == NULL)
+		return (dentry->d_time - jiffies <= AUTOFS_NEGATIVE_TIMEOUT);
+
+	/* Check for a non-mountpoint directory with no contents */
+	spin_lock(&dcache_lock);
+	if (S_ISDIR(dentry->d_inode->i_mode) &&
+	    !d_mountpoint(dentry) && 
+	    list_empty(&dentry->d_subdirs)) {
+		DPRINTK("dentry=%p %.*s, emptydir",
+			 dentry, dentry->d_name.len, dentry->d_name.name);
+		spin_unlock(&dcache_lock);
+		if (!oz_mode)
+			status = try_to_fill_dentry(dentry, dir->i_sb, sbi, flags);
+		return status;
+	}
+	spin_unlock(&dcache_lock);
+
+	/* Update the usage list */
+	if (!oz_mode)
+		autofs4_update_usage(dentry);
+
+	return 1;
+}
+
+static void autofs4_dentry_release(struct dentry *de)
+{
+	struct autofs_info *inf;
+
+	DPRINTK("releasing %p", de);
+
+	inf = autofs4_dentry_ino(de);
+	de->d_fsdata = NULL;
+
+	if (inf) {
+		inf->dentry = NULL;
+		inf->inode = NULL;
+
+		autofs4_free_ino(inf);
+	}
+}
+
+/* For dentries of directories in the root dir */
+static struct dentry_operations autofs4_root_dentry_operations = {
+	.d_revalidate	= autofs4_revalidate,
+	.d_release	= autofs4_dentry_release,
+};
+
+/* For other dentries */
+static struct dentry_operations autofs4_dentry_operations = {
+	.d_revalidate	= autofs4_revalidate,
+	.d_release	= autofs4_dentry_release,
+};
+
+/* Lookups in the root directory */
+static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct autofs_sb_info *sbi;
+	int oz_mode;
+
+	DPRINTK("name = %.*s",
+		dentry->d_name.len, dentry->d_name.name);
+
+	if (dentry->d_name.len > NAME_MAX)
+		return ERR_PTR(-ENAMETOOLONG);/* File name too long to exist */
+
+	sbi = autofs4_sbi(dir->i_sb);
+
+	oz_mode = autofs4_oz_mode(sbi);
+	DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
+		 current->pid, process_group(current), sbi->catatonic, oz_mode);
+
+	/*
+	 * Mark the dentry incomplete, but add it. This is needed so
+	 * that the VFS layer knows about the dentry, and we can count
+	 * on catching any lookups through the revalidate.
+	 *
+	 * Let all the hard work be done by the revalidate function that
+	 * needs to be able to do this anyway..
+	 *
+	 * We need to do this before we release the directory semaphore.
+	 */
+	dentry->d_op = &autofs4_root_dentry_operations;
+
+	if (!oz_mode) {
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags |= DCACHE_AUTOFS_PENDING;
+		spin_unlock(&dentry->d_lock);
+	}
+	dentry->d_fsdata = NULL;
+	d_add(dentry, NULL);
+
+	if (dentry->d_op && dentry->d_op->d_revalidate) {
+		up(&dir->i_sem);
+		(dentry->d_op->d_revalidate)(dentry, nd);
+		down(&dir->i_sem);
+	}
+
+	/*
+	 * If we are still pending, check if we had to handle
+	 * a signal. If so we can force a restart..
+	 */
+	if (dentry->d_flags & DCACHE_AUTOFS_PENDING) {
+		/* See if we were interrupted */
+		if (signal_pending(current)) {
+			sigset_t *sigset = &current->pending.signal;
+			if (sigismember (sigset, SIGKILL) ||
+			    sigismember (sigset, SIGQUIT) ||
+			    sigismember (sigset, SIGINT)) {
+			    return ERR_PTR(-ERESTARTNOINTR);
+			}
+		}
+	}
+
+	/*
+	 * If this dentry is unhashed, then we shouldn't honour this
+	 * lookup even if the dentry is positive.  Returning ENOENT here
+	 * doesn't do the right thing for all system calls, but it should
+	 * be OK for the operations we permit from an autofs.
+	 */
+	if ( dentry->d_inode && d_unhashed(dentry) )
+		return ERR_PTR(-ENOENT);
+
+	return NULL;
+}
+
+static int autofs4_dir_symlink(struct inode *dir, 
+			       struct dentry *dentry,
+			       const char *symname)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	struct inode *inode;
+	char *cp;
+
+	DPRINTK("%s <- %.*s", symname,
+		dentry->d_name.len, dentry->d_name.name);
+
+	if (!autofs4_oz_mode(sbi))
+		return -EACCES;
+
+	ino = autofs4_init_ino(ino, sbi, S_IFLNK | 0555);
+	if (ino == NULL)
+		return -ENOSPC;
+
+	ino->size = strlen(symname);
+	ino->u.symlink = cp = kmalloc(ino->size + 1, GFP_KERNEL);
+
+	if (cp == NULL) {
+		kfree(ino);
+		return -ENOSPC;
+	}
+
+	strcpy(cp, symname);
+
+	inode = autofs4_get_inode(dir->i_sb, ino);
+	d_instantiate(dentry, inode);
+
+	if (dir == dir->i_sb->s_root->d_inode)
+		dentry->d_op = &autofs4_root_dentry_operations;
+	else
+		dentry->d_op = &autofs4_dentry_operations;
+
+	dentry->d_fsdata = ino;
+	ino->dentry = dget(dentry);
+	ino->inode = inode;
+
+	dir->i_mtime = CURRENT_TIME;
+
+	return 0;
+}
+
+/*
+ * NOTE!
+ *
+ * Normal filesystems would do a "d_delete()" to tell the VFS dcache
+ * that the file no longer exists. However, doing that means that the
+ * VFS layer can turn the dentry into a negative dentry.  We don't want
+ * this, because since the unlink is probably the result of an expire.
+ * We simply d_drop it, which allows the dentry lookup to remount it
+ * if necessary.
+ *
+ * If a process is blocked on the dentry waiting for the expire to finish,
+ * it will invalidate the dentry and try to mount with a new one.
+ *
+ * Also see autofs4_dir_rmdir()..
+ */
+static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	
+	/* This allows root to remove symlinks */
+	if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) )
+		return -EACCES;
+
+	dput(ino->dentry);
+
+	dentry->d_inode->i_size = 0;
+	dentry->d_inode->i_nlink = 0;
+
+	dir->i_mtime = CURRENT_TIME;
+
+	d_drop(dentry);
+
+	return 0;
+}
+
+static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	
+	if (!autofs4_oz_mode(sbi))
+		return -EACCES;
+
+	spin_lock(&dcache_lock);
+	if (!list_empty(&dentry->d_subdirs)) {
+		spin_unlock(&dcache_lock);
+		return -ENOTEMPTY;
+	}
+	spin_lock(&dentry->d_lock);
+	__d_drop(dentry);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+
+	dput(ino->dentry);
+
+	dentry->d_inode->i_size = 0;
+	dentry->d_inode->i_nlink = 0;
+
+	if (dir->i_nlink)
+		dir->i_nlink--;
+
+	return 0;
+}
+
+static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	struct inode *inode;
+
+	if ( !autofs4_oz_mode(sbi) )
+		return -EACCES;
+
+	DPRINTK("dentry %p, creating %.*s",
+		dentry, dentry->d_name.len, dentry->d_name.name);
+
+	ino = autofs4_init_ino(ino, sbi, S_IFDIR | 0555);
+	if (ino == NULL)
+		return -ENOSPC;
+
+	inode = autofs4_get_inode(dir->i_sb, ino);
+	d_instantiate(dentry, inode);
+
+	if (dir == dir->i_sb->s_root->d_inode)
+		dentry->d_op = &autofs4_root_dentry_operations;
+	else
+		dentry->d_op = &autofs4_dentry_operations;
+
+	dentry->d_fsdata = ino;
+	ino->dentry = dget(dentry);
+	ino->inode = inode;
+	dir->i_nlink++;
+	dir->i_mtime = CURRENT_TIME;
+
+	return 0;
+}
+
+/* Get/set timeout ioctl() operation */
+static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi,
+					 unsigned long __user *p)
+{
+	int rv;
+	unsigned long ntimeout;
+
+	if ( (rv = get_user(ntimeout, p)) ||
+	     (rv = put_user(sbi->exp_timeout/HZ, p)) )
+		return rv;
+
+	if ( ntimeout > ULONG_MAX/HZ )
+		sbi->exp_timeout = 0;
+	else
+		sbi->exp_timeout = ntimeout * HZ;
+
+	return 0;
+}
+
+/* Return protocol version */
+static inline int autofs4_get_protover(struct autofs_sb_info *sbi, int __user *p)
+{
+	return put_user(sbi->version, p);
+}
+
+/* Return protocol sub version */
+static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi, int __user *p)
+{
+	return put_user(sbi->sub_version, p);
+}
+
+/*
+ * Tells the daemon whether we need to reghost or not. Also, clears
+ * the reghost_needed flag.
+ */
+static inline int autofs4_ask_reghost(struct autofs_sb_info *sbi, int __user *p)
+{
+	int status;
+
+	DPRINTK("returning %d", sbi->needs_reghost);
+
+	status = put_user(sbi->needs_reghost, p);
+	if ( status )
+		return status;
+
+	sbi->needs_reghost = 0;
+	return 0;
+}
+
+/*
+ * Enable / Disable reghosting ioctl() operation
+ */
+static inline int autofs4_toggle_reghost(struct autofs_sb_info *sbi, int __user *p)
+{
+	int status;
+	int val;
+
+	status = get_user(val, p);
+
+	DPRINTK("reghost = %d", val);
+
+	if (status)
+		return status;
+
+	/* turn on/off reghosting, with the val */
+	sbi->reghost_enabled = val;
+	return 0;
+}
+
+/*
+* Tells the daemon whether it can umount the autofs mount.
+*/
+static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
+{
+	int status = 0;
+
+	if (may_umount(mnt) == 0)
+		status = 1;
+
+	DPRINTK("returning %d", status);
+
+	status = put_user(status, p);
+
+	return status;
+}
+
+/* Identify autofs4_dentries - this is so we can tell if there's
+   an extra dentry refcount or not.  We only hold a refcount on the
+   dentry if its non-negative (ie, d_inode != NULL)
+*/
+int is_autofs4_dentry(struct dentry *dentry)
+{
+	return dentry && dentry->d_inode &&
+		(dentry->d_op == &autofs4_root_dentry_operations ||
+		 dentry->d_op == &autofs4_dentry_operations) &&
+		dentry->d_fsdata != NULL;
+}
+
+/*
+ * ioctl()'s on the root directory is the chief method for the daemon to
+ * generate kernel reactions
+ */
+static int autofs4_root_ioctl(struct inode *inode, struct file *filp,
+			     unsigned int cmd, unsigned long arg)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(inode->i_sb);
+	void __user *p = (void __user *)arg;
+
+	DPRINTK("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u",
+		cmd,arg,sbi,process_group(current));
+
+	if ( _IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) ||
+	     _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT )
+		return -ENOTTY;
+	
+	if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) )
+		return -EPERM;
+	
+	switch(cmd) {
+	case AUTOFS_IOC_READY:	/* Wait queue: go ahead and retry */
+		return autofs4_wait_release(sbi,(autofs_wqt_t)arg,0);
+	case AUTOFS_IOC_FAIL:	/* Wait queue: fail with ENOENT */
+		return autofs4_wait_release(sbi,(autofs_wqt_t)arg,-ENOENT);
+	case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */
+		autofs4_catatonic_mode(sbi);
+		return 0;
+	case AUTOFS_IOC_PROTOVER: /* Get protocol version */
+		return autofs4_get_protover(sbi, p);
+	case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */
+		return autofs4_get_protosubver(sbi, p);
+	case AUTOFS_IOC_SETTIMEOUT:
+		return autofs4_get_set_timeout(sbi, p);
+
+	case AUTOFS_IOC_TOGGLEREGHOST:
+		return autofs4_toggle_reghost(sbi, p);
+	case AUTOFS_IOC_ASKREGHOST:
+		return autofs4_ask_reghost(sbi, p);
+
+	case AUTOFS_IOC_ASKUMOUNT:
+		return autofs4_ask_umount(filp->f_vfsmnt, p);
+
+	/* return a single thing to expire */
+	case AUTOFS_IOC_EXPIRE:
+		return autofs4_expire_run(inode->i_sb,filp->f_vfsmnt,sbi, p);
+	/* same as above, but can send multiple expires through pipe */
+	case AUTOFS_IOC_EXPIRE_MULTI:
+		return autofs4_expire_multi(inode->i_sb,filp->f_vfsmnt,sbi, p);
+
+	default:
+		return -ENOSYS;
+	}
+}
diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
new file mode 100644
index 0000000..c265a66
--- /dev/null
+++ b/fs/autofs4/symlink.c
@@ -0,0 +1,25 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/symlink.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include "autofs_i.h"
+
+static int autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	nd_set_link(nd, (char *)ino->u.symlink);
+	return 0;
+}
+
+struct inode_operations autofs4_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= autofs4_follow_link
+};
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
new file mode 100644
index 0000000..1ab24a6
--- /dev/null
+++ b/fs/autofs4/waitq.c
@@ -0,0 +1,303 @@
+/* -*- c -*- --------------------------------------------------------------- *
+ *
+ * linux/fs/autofs/waitq.c
+ *
+ *  Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
+ *  Copyright 2001-2003 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/signal.h>
+#include <linux/file.h>
+#include "autofs_i.h"
+
+/* We make this a static variable rather than a part of the superblock; it
+   is better if we don't reassign numbers easily even across filesystems */
+static autofs_wqt_t autofs4_next_wait_queue = 1;
+
+/* These are the signals we allow interrupting a pending mount */
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT))
+
+void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
+{
+	struct autofs_wait_queue *wq, *nwq;
+
+	DPRINTK("entering catatonic mode");
+
+	sbi->catatonic = 1;
+	wq = sbi->queues;
+	sbi->queues = NULL;	/* Erase all wait queues */
+	while ( wq ) {
+		nwq = wq->next;
+		wq->status = -ENOENT; /* Magic is gone - report failure */
+		kfree(wq->name);
+		wq->name = NULL;
+		wake_up_interruptible(&wq->queue);
+		wq = nwq;
+	}
+	if (sbi->pipe) {
+		fput(sbi->pipe);	/* Close the pipe */
+		sbi->pipe = NULL;
+	}
+
+	shrink_dcache_sb(sbi->sb);
+}
+
+static int autofs4_write(struct file *file, const void *addr, int bytes)
+{
+	unsigned long sigpipe, flags;
+	mm_segment_t fs;
+	const char *data = (const char *)addr;
+	ssize_t wr = 0;
+
+	/** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
+
+	sigpipe = sigismember(&current->pending.signal, SIGPIPE);
+
+	/* Save pointer to user space and point back to kernel space */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	while (bytes &&
+	       (wr = file->f_op->write(file,data,bytes,&file->f_pos)) > 0) {
+		data += wr;
+		bytes -= wr;
+	}
+
+	set_fs(fs);
+
+	/* Keep the currently executing process from receiving a
+	   SIGPIPE unless it was already supposed to get one */
+	if (wr == -EPIPE && !sigpipe) {
+		spin_lock_irqsave(&current->sighand->siglock, flags);
+		sigdelset(&current->pending.signal, SIGPIPE);
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+	}
+
+	return (bytes > 0);
+}
+	
+static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+				 struct autofs_wait_queue *wq,
+				 int type)
+{
+	union autofs_packet_union pkt;
+	size_t pktsz;
+
+	DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d",
+		wq->wait_queue_token, wq->len, wq->name, type);
+
+	memset(&pkt,0,sizeof pkt); /* For security reasons */
+
+	pkt.hdr.proto_version = sbi->version;
+	pkt.hdr.type = type;
+	if (type == autofs_ptype_missing) {
+		struct autofs_packet_missing *mp = &pkt.missing;
+
+		pktsz = sizeof(*mp);
+
+		mp->wait_queue_token = wq->wait_queue_token;
+		mp->len = wq->len;
+		memcpy(mp->name, wq->name, wq->len);
+		mp->name[wq->len] = '\0';
+	} else if (type == autofs_ptype_expire_multi) {
+		struct autofs_packet_expire_multi *ep = &pkt.expire_multi;
+
+		pktsz = sizeof(*ep);
+
+		ep->wait_queue_token = wq->wait_queue_token;
+		ep->len = wq->len;
+		memcpy(ep->name, wq->name, wq->len);
+		ep->name[wq->len] = '\0';
+	} else {
+		printk("autofs4_notify_daemon: bad type %d!\n", type);
+		return;
+	}
+
+	if (autofs4_write(sbi->pipe, &pkt, pktsz))
+		autofs4_catatonic_mode(sbi);
+}
+
+static int autofs4_getpath(struct autofs_sb_info *sbi,
+			   struct dentry *dentry, char **name)
+{
+	struct dentry *root = sbi->sb->s_root;
+	struct dentry *tmp;
+	char *buf = *name;
+	char *p;
+	int len = 0;
+
+	spin_lock(&dcache_lock);
+	for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
+		len += tmp->d_name.len + 1;
+
+	if (--len > NAME_MAX) {
+		spin_unlock(&dcache_lock);
+		return 0;
+	}
+
+	*(buf + len) = '\0';
+	p = buf + len - dentry->d_name.len;
+	strncpy(p, dentry->d_name.name, dentry->d_name.len);
+
+	for (tmp = dentry->d_parent; tmp != root ; tmp = tmp->d_parent) {
+		*(--p) = '/';
+		p -= tmp->d_name.len;
+		strncpy(p, tmp->d_name.name, tmp->d_name.len);
+	}
+	spin_unlock(&dcache_lock);
+
+	return len;
+}
+
+int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
+		enum autofs_notify notify)
+{
+	struct autofs_wait_queue *wq;
+	char *name;
+	int len, status;
+
+	/* In catatonic mode, we don't wait for nobody */
+	if ( sbi->catatonic )
+		return -ENOENT;
+	
+	name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	len = autofs4_getpath(sbi, dentry, &name);
+	if (!len) {
+		kfree(name);
+		return -ENOENT;
+	}
+
+	if (down_interruptible(&sbi->wq_sem)) {
+		kfree(name);
+		return -EINTR;
+	}
+
+	for (wq = sbi->queues ; wq ; wq = wq->next) {
+		if (wq->hash == dentry->d_name.hash &&
+		    wq->len == len &&
+		    wq->name && !memcmp(wq->name, name, len))
+			break;
+	}
+
+	if ( !wq ) {
+		/* Create a new wait queue */
+		wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
+		if ( !wq ) {
+			kfree(name);
+			up(&sbi->wq_sem);
+			return -ENOMEM;
+		}
+
+		wq->wait_queue_token = autofs4_next_wait_queue;
+		if (++autofs4_next_wait_queue == 0)
+			autofs4_next_wait_queue = 1;
+		wq->next = sbi->queues;
+		sbi->queues = wq;
+		init_waitqueue_head(&wq->queue);
+		wq->hash = dentry->d_name.hash;
+		wq->name = name;
+		wq->len = len;
+		wq->status = -EINTR; /* Status return if interrupted */
+		atomic_set(&wq->wait_ctr, 2);
+		up(&sbi->wq_sem);
+
+		DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d",
+			(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
+		/* autofs4_notify_daemon() may block */
+		if (notify != NFY_NONE) {
+			autofs4_notify_daemon(sbi,wq, 
+					notify == NFY_MOUNT ?
+						  autofs_ptype_missing :
+						  autofs_ptype_expire_multi);
+		}
+	} else {
+		atomic_inc(&wq->wait_ctr);
+		up(&sbi->wq_sem);
+		kfree(name);
+		DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
+			(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
+	}
+
+	/* wq->name is NULL if and only if the lock is already released */
+
+	if ( sbi->catatonic ) {
+		/* We might have slept, so check again for catatonic mode */
+		wq->status = -ENOENT;
+		if ( wq->name ) {
+			kfree(wq->name);
+			wq->name = NULL;
+		}
+	}
+
+	if ( wq->name ) {
+		/* Block all but "shutdown" signals while waiting */
+		sigset_t oldset;
+		unsigned long irqflags;
+
+		spin_lock_irqsave(&current->sighand->siglock, irqflags);
+		oldset = current->blocked;
+		siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
+
+		wait_event_interruptible(wq->queue, wq->name == NULL);
+
+		spin_lock_irqsave(&current->sighand->siglock, irqflags);
+		current->blocked = oldset;
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
+	} else {
+		DPRINTK("skipped sleeping");
+	}
+
+	status = wq->status;
+
+	/* Are we the last process to need status? */
+	if (atomic_dec_and_test(&wq->wait_ctr))
+		kfree(wq);
+
+	return status;
+}
+
+
+int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
+{
+	struct autofs_wait_queue *wq, **wql;
+
+	down(&sbi->wq_sem);
+	for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
+		if ( wq->wait_queue_token == wait_queue_token )
+			break;
+	}
+
+	if ( !wq ) {
+		up(&sbi->wq_sem);
+		return -EINVAL;
+	}
+
+	*wql = wq->next;	/* Unlink from chain */
+	up(&sbi->wq_sem);
+	kfree(wq->name);
+	wq->name = NULL;	/* Do not wait on this queue */
+
+	wq->status = status;
+
+	if (atomic_dec_and_test(&wq->wait_ctr))	/* Is anyone still waiting for this guy? */
+		kfree(wq);
+	else
+		wake_up_interruptible(&wq->queue);
+
+	return 0;
+}
+
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
new file mode 100644
index 0000000..672a319
--- /dev/null
+++ b/fs/bad_inode.c
@@ -0,0 +1,123 @@
+/*
+ *  linux/fs/bad_inode.c
+ *
+ *  Copyright (C) 1997, Stephen Tweedie
+ *
+ *  Provide stub functions for unreadable inodes
+ *
+ *  Fabian Frederick : August 2003 - All file operations assigned to EIO
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/smp_lock.h>
+#include <linux/namei.h>
+
+static int return_EIO(void)
+{
+	return -EIO;
+}
+
+#define EIO_ERROR ((void *) (return_EIO))
+
+static struct file_operations bad_file_ops =
+{
+	.llseek		= EIO_ERROR,
+	.aio_read	= EIO_ERROR,
+	.read		= EIO_ERROR,
+	.write		= EIO_ERROR,
+	.aio_write	= EIO_ERROR,
+	.readdir	= EIO_ERROR,
+	.poll		= EIO_ERROR,
+	.ioctl		= EIO_ERROR,
+	.mmap		= EIO_ERROR,
+	.open		= EIO_ERROR,
+	.flush		= EIO_ERROR,
+	.release	= EIO_ERROR,
+	.fsync		= EIO_ERROR,
+	.aio_fsync	= EIO_ERROR,
+	.fasync		= EIO_ERROR,
+	.lock		= EIO_ERROR,
+	.readv		= EIO_ERROR,
+	.writev		= EIO_ERROR,
+	.sendfile	= EIO_ERROR,
+	.sendpage	= EIO_ERROR,
+	.get_unmapped_area = EIO_ERROR,
+};
+
+struct inode_operations bad_inode_ops =
+{
+	.create		= EIO_ERROR,
+	.lookup		= EIO_ERROR,
+	.link		= EIO_ERROR,
+	.unlink		= EIO_ERROR,
+	.symlink	= EIO_ERROR,
+	.mkdir		= EIO_ERROR,
+	.rmdir		= EIO_ERROR,
+	.mknod		= EIO_ERROR,
+	.rename		= EIO_ERROR,
+	.readlink	= EIO_ERROR,
+	/* follow_link must be no-op, otherwise unmounting this inode
+	   won't work */
+	.truncate	= EIO_ERROR,
+	.permission	= EIO_ERROR,
+	.getattr	= EIO_ERROR,
+	.setattr	= EIO_ERROR,
+	.setxattr	= EIO_ERROR,
+	.getxattr	= EIO_ERROR,
+	.listxattr	= EIO_ERROR,
+	.removexattr	= EIO_ERROR,
+};
+
+
+/*
+ * When a filesystem is unable to read an inode due to an I/O error in
+ * its read_inode() function, it can call make_bad_inode() to return a
+ * set of stubs which will return EIO errors as required. 
+ *
+ * We only need to do limited initialisation: all other fields are
+ * preinitialised to zero automatically.
+ */
+ 
+/**
+ *	make_bad_inode - mark an inode bad due to an I/O error
+ *	@inode: Inode to mark bad
+ *
+ *	When an inode cannot be read due to a media or remote network
+ *	failure this function makes the inode "bad" and causes I/O operations
+ *	on it to fail from this point on.
+ */
+ 
+void make_bad_inode(struct inode * inode) 
+{
+	remove_inode_hash(inode);
+
+	inode->i_mode = S_IFREG;
+	inode->i_atime = inode->i_mtime = inode->i_ctime =
+		current_fs_time(inode->i_sb);
+	inode->i_op = &bad_inode_ops;	
+	inode->i_fop = &bad_file_ops;	
+}
+EXPORT_SYMBOL(make_bad_inode);
+
+/*
+ * This tests whether an inode has been flagged as bad. The test uses
+ * &bad_inode_ops to cover the case of invalidated inodes as well as
+ * those created by make_bad_inode() above.
+ */
+ 
+/**
+ *	is_bad_inode - is an inode errored
+ *	@inode: inode to test
+ *
+ *	Returns true if the inode in question has been marked as bad.
+ */
+ 
+int is_bad_inode(struct inode * inode) 
+{
+	return (inode->i_op == &bad_inode_ops);	
+}
+
+EXPORT_SYMBOL(is_bad_inode);
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog
new file mode 100644
index 0000000..ce8c787
--- /dev/null
+++ b/fs/befs/ChangeLog
@@ -0,0 +1,417 @@
+Version 0.92 (2002-03-29)
+==========
+* Minor cleanup. Ran Lindent on the sources.
+
+Version 0.92 (2002-03-27)
+==========
+* Fixed module makefile problem. It was not compiling all the correct 
+    source files!
+* Removed duplicated function definition
+* Fixed potential null pointer dereference when reporting an error
+
+Version 0.91 (2002-03-26)
+==========
+* Oy! Fixed stupid bug that would cause an unresolved symbol error.
+	Thanks to Laszlo Boszormenyi for pointing this out to me.
+
+Version 0.9 (2002-03-14)
+==========
+* Added Sergey S. Kostyliov's patch to eliminate memcpy() overhead
+	from b+tree operations. Changes the befs_read_datastream() interface.
+
+* Segregated the functions that interface directly with the linux  vfs 
+	interface into their own file called linuxvfs.c. [WD]
+
+Version 0.64 (2002-02-07)
+==========
+* Did the string comparision really right this time (btree.c) [WD]
+
+* Fixed up some places where I assumed that a long int could hold
+	a pointer value. (btree.c) [WD]
+
+* Andrew Farnham <andrewfarnham@uq.net.au> pointed out that the module
+	wouldn't work on older (<2.4.10) kernels due to an unresolved symbol.
+	This is bad, since 2.4.9 is still the current RedHat kernel. I added
+	a workaround for this problem (compatibility.h) [WD]
+
+* Sergey S. Kostyliov made befs_find_key() use a binary search to find 
+	keys within btree nodes, rather than the linear search we were using 
+	before. (btree.c) [Sergey S. Kostyliov <rathamahata@php4.ru>]
+
+* Made a debian package of the source for use with kernel-package. [WD]
+
+
+Version 0.63 (2002-01-31)
+==========
+* Fixed bug in befs_find_brun_indirect() that would result in the wrong
+	block being read. It was introduced when adding byteswapping in 
+	0.61. (datastream.c) [WD]
+
+* Fixed a longstanding bug in befs_find_key() that would result in it 
+	finding the first key that is a substring of the string it is searching
+	for. For example, this would cause files in the same directory with 
+	names like file1 and file2 to mysteriously be duplicates of each other 
+	(because they have the same inode number). Many thanks to Pavel Roskin 
+	for reporting this serious bug!!!
+	(btree.c) [WD]
+
+* Added support for long symlinks, after Axel Dorfler explained up how 
+	they work. I had forgotten all about them. (inode.c, symlink.c) [WD]
+
+* Documentation improvements in source. [WD]
+
+* Makefile fix for independent module when CONFIG_MODVERSION is set in 
+	kernel config [Pavel Roskin <proski@gnu.org>]
+
+* Compile warning fix for namei.c. [Sergey S. Kostyliov <rathamahata@php4.ru>]
+
+
+Version 0.62
+==========
+* Fixed makefile for module install [WD]
+
+
+Version 0.61 (2002-01-20)
+==========
+* Made functions in endian.h to do the correct byteswapping, no matter
+	the arch. [WD]
+
+* Abbandoned silly checks for a NULL superblock pointer in debug.c. [WD]
+
+* Misc code cleanups. Also cleanup of this changelog file. [WD]
+
+* Added byteswapping to all metadata reads from disk.
+	Uses the functions from endian.h [WD]
+
+* Remove the typedef of struct super_block to vfs_sb, as it offended
+	certain peoples' aesthetic sense. [WD]
+
+* Ditto with the befs_read_block() interface. [WD]
+ 
+
+Version 0.6 (2001-12-15)
+==========
+* Cleanup of NLS functions (util.c) [WD]
+
+* Make directory lookup/read use the NLS if an iocharset is provided. [WD]
+
+* Fixed stupid bug where specifying the uid or gid mount options as '0' 
+	would result in the filesystem using the on-disk uid and gid. [WD]
+
+* Added mount option to control debug printing. 
+	The option is, simply enough, 'debug'. 
+	(super.c, debug.c) [WD]
+
+* Removed notion of btree handle from btree.c. It was unnecessary, as the
+	linux VFS doesn't allow us to keep any state between calls. Updated 
+	dir.c, namei.c befs_fs.h to account for it. [WD]
+
+* Improved handleing of overflow nodes when listing directories. 
+	Now works for overflow nodes hanging off of nodes other than the root 
+	node. This is the cleaner solution to Brent Miszalaski's problem. [WD]
+
+* Added new debug/warning/error print functions in debug.c. 
+	More flexible. Will soon be controllable at mount time 
+	(see TODO). [WD]
+
+* Rewrote datastream positon lookups.
+	(datastream.c) [WD]
+
+* Moved the TODO list to its own file.
+
+
+Version 0.50 (2001-11-13)
+==========
+* Added workaround for mis-understanding of the nature of the b+trees used 
+	in directories. A cleaner solution will come after I've thought about it 
+	for a while. Thanks to Brent Miszalaski for finding and reporting this bug. 
+	(btree.c) [WD]
+
+* Minor cleanups
+
+* Added test for "impossible" condition of empty internal nodes in 
+	seekleaf() in btree.c [WD]
+
+* Implemented the abstracted read_block() in io.c [WD]
+
+* Cleaned up the inode validation in inode.c [WD]
+
+* Anton Altaparmakov figured out (by asking Linus :) ) what was causing the 
+ 	hanging disk io problem. It turns out you need to have the sync_pages 
+	callback defined in your address_space_ops, even if it just uses the 
+	default linux-supplied implementation. Fixed. Works now.
+	(file.c) [WD]
+
+* Anton Altaparmakov and Christoph Hellwig alerted me to the fact that 
+	filesystem code should be using GFP_NOFS instead of GFP_KERNEL as the 
+	priority parameter to kmalloc(). Fixed. 
+	(datastream.c, btree.c super.c inode.c) [WD]
+
+* Anton also told me that the blocksize is not allowed to be larger than 
+	the page size in linux, which is 4k i386. Oops. Added a test for 
+	(blocksize > PAGE_SIZE), and refuse to mount in that case. What this 
+	practicaly means is that 8k blocksize volumes won't work without a major
+	restructuring of the driver (or an alpha or other 64bit hardware). [WD]
+
+* Cleaned up the befs_count_blocks() function. Much smarter now. 
+	And somewhat smaller too. [WD]
+
+* Made inode allocations use a slab cache 
+	(super.c inode.c) [WD]
+
+* Moved the freeing of the private inode section from put_inode() to 
+	clear_inode(). This fixes a potential free twice type bug. Put_inode() 
+	can be called multiple times for each inode struct. [WD]
+
+* Converted all non vfs-callback functions to use befs_sb_info as the 
+	superblock type, rather than struct super_block. This is for 
+	portablity. [WD]
+
+* Fixed a couple of compile warnings due to use of malloc.h, when slab.h 
+	is the new way. (inode.c, super.c) [WD]
+
+* Fixed erronous includes of linux/befs_fs_i.h and linux/befs_fs_sb.h 
+	in inode.c [WD]
+
+Version 0.45 (2001-10-29)
+==========
+* Added functions to get the private superblock and inode structures from 
+	their enclosing public structures. Switched all references to the 
+	private portions to use them. (many files) [WD]
+
+* Made read_super and read_inode allocate the private portions of those 
+	structures into the generic pointer fields of the public structures 
+	with kmalloc(). put_super and put_inode free them. This allows us not 
+	to have to touch the definitions of the public structures in 
+	include/linux/fs.h. Also, befs_inode_info is huge (becuase of the 
+	symlink string). (super.c, inode.c, befs_fs.h) [WD]
+
+* Fixed a thinko that was corrupting file reads after the first block_run 
+	is done being read. (datastream.c) [WD]
+
+* Removed fsync() hooks, since a read-only filesystem doesn't need them. 
+	[Christoph Hellwig].
+
+* Fixed befs_readlink() (symlink.c) [Christoph Hellwig].
+
+* Removed all the Read-Write stuff. I'll redo it when it is time to add 
+	write support (various files) [WD].
+
+* Removed prototypes for functions who's definitions have been removed 
+	(befs_fs.h) [WD].
+
+
+Version 0.4 (2001-10-28)
+==========
+* Made it an option to use the old non-pagecache befs_file_read() for 
+	testing purposes. (fs/Config.in)
+
+* Fixed unused variable warnings when compiling without debugging.
+
+* Fixed a bug where the inode and super_block didn't get their blockbits 
+	fields set (inode.c and super.c). 
+
+* Release patch version 11. AKA befs-driver version 0.4.
+
+* Thats right. New versioning scheme. 
+	I've done some serious testing on it now (on my box anyhow), and it 
+	seems stable and not outragously slow. Existing features are more-or-less 
+	correct (see TODO list). But it isn't 1.0 yet. I think 0.4 gives me some 
+	headroom before the big 1.0.
+
+
+2001-10-26
+==========
+* Fixed date format in this file. Was I smoking crack?
+
+* Removed old datastream code from file.c, since it is nolonger used.
+
+* Generic_read_file() is now used to read regular file data. 
+	It doesn't chew up the buffer cache (it does page io instead), and seems 
+	to be about as fast (even though it has to look up each file block 
+	indivdualy). And it knows about doing readahead, which is a major plus. 
+	So it does i/o in much larger chunks. It is the correct linux way. It 
+	uses befs_get_block() by way of befs_readpage() to find the disk offsets 
+	of blocks, which in turn calls befs_fpos2brun() in datastream.c to do 
+	the hard work of finding the disk block number.
+
+* Changed method of checking for a dirty filesystem in befs_read_super 
+	(super.c). Now we check to see if log_start and log_end differ. If so, 
+	the journal needs to be replayed, and the filesystem cannot be mounted.
+
+* Fixed an extra instance of MOD_DEC_USE_COUNT in super.c
+
+* Fixed a problem with reading the superblock on devices with large sector 
+	sizes (such as cdroms) on linux 2.4.10 and up.
+
+2001-10-24
+==========
+* Fix nasty bug in converting block numbers to struct befs_inode_addr. 
+	Subtle, because the old version was only sometimes wrong. 
+	Probably responsible for lots of problems. (inode.c)
+
+* Fix bug with reading an empty directory. (btree.c and dir.c)
+
+* This one looks good. Release patch version 10
+
+2001-10-23
+==========
+* Added btree searching function.
+
+* Use befs_btree_find in befs_lookup (namei.c)
+
+* Additional comments in btree.c
+
+2001-10-22
+==========
+* Added B+tree reading functions (in btree.c). 
+	Made befs_readdir() use them them instead of the cruft in index.c.
+
+2001-09-11
+==========
+* Converted befs_read_file() to use the new datastream code.
+
+* Finally updated the README file.
+
+* Added many comments.
+
+* Posted version 6
+
+* Removed byte-order conversion code. 
+	I have no intention of supporting it, and it was very ugly. 
+	Flow control with #ifdef (ugh). Maybe I'll redo it once 
+	native byteorder works 100%.
+
+2001-09-10
+==========
+* Finished implementing read_datastream()
+
+* made befs_read_brun() more general
+	Supports an offset to start at and a max bytes to read
+	Added a wrapper function to give the old call
+
+2001-09-30
+==========
+* Discovered that the datastream handleing code in file.c is quite deficient 
+	in several respects. For one thing, it doesn't deal with indirect blocks
+
+* Rewrote datastream handleing.
+
+* Created io.c, for io related functions.
+	Previously, the befs_bread() funtions lived in file.c
+	Created the befs_read_brun() function.
+
+
+2001-09-07
+==========
+* Made a function to actually count the number of fs blocks used by a file.
+	And helper functions.
+	(fs/befs/inode.c)
+
+2001-09-05
+==========
+* Fixed a misunderstanding of the inode fields. 
+	This fixed the problmem with wrong file sizes from du and others.
+	The i_blocks field of the inode struct is not the number of blocks for the
+	inode, it is the number of blocks for the file.	Also, i_blksize is not
+	necessarily the size of the inode, although in  practice it works out.
+	Changed to blocksize of filesystem.
+	(fs/befs/inode.c)
+
+* Permanently removed code that had been provisionally ifdefed out of befs_fs.h
+
+* Since we don't support access time, make that field zero, instead of 
+	copying m_time.
+	(fs/befs/inode.c)
+
+* Added sanity check for inode reading
+	Make sure inode we got was the one we asked for. 
+	(fs/befs/inode.c)
+
+* Code cleanup
+	Local pointers to commonly used structures in inode.c.
+	Got rid of abominations befs_iaddr2inode() and befs_inode2ino(). 
+	Replaced with single function iaddr2blockno().
+	(fs/befs/super.c) (fs/befs/inode.c)
+
+2001-09-01
+==========
+* Fixed the problem with statfs where it would always claim the disk was 
+	half full, due to improper understanding of the statfs fields.
+	(fs/befs/super.c)
+
+* Posted verion 4 of the patch
+
+2001-09-01
+==========
+* Changed the macros in befs_fs.h to inline functions.
+	More readable. Typesafe. Better
+	(include/linux/befs_fs.h)
+
+* Moved type definitions from befs_fs.h to a new file, befs_fs_types.h 
+	Because befs_fs_i.h and befs_fs_sb.h were including befs_fs.h for the 
+	typedefs, and they are inlcuded in <linux/fs.h>, which has definitions 
+	that I want the inline functions in befs_fs.h to be able to see. Nasty
+	circularity.
+	(include/linux/befs_fs.h)
+
+2001-08-30
+==========
+* Cleaned up some wording.
+
+* Added additional consitency checks on mount
+	Check block_size agrees with block_shift
+	Check flags == BEFS_CLEAN
+	(fs/befs/super.c)
+
+* Tell the kernel to only mount befs read-only. 
+	By setting the MS_RDONLY flag in befs_read_super().
+	Not that it was possible to write before. But now the kernel won't even try.
+	(fs/befs/super.c)
+
+* Got rid of kernel warning on mount.
+	The kernel doesn't like it if you call set_blocksize() on a device when 
+	you have some of its blocks open. Moved the second set_blocksize() to the
+	very end of befs_read_super(), after we are done with the disk superblock.
+	(fs/befs/super.c)
+	
+* Fixed wrong number of args bug in befs_dump_inode
+	(fs/befs/debug.c)
+
+* Solved lots of type mismatches in kprint()s
+	(everwhere)
+
+2001-08-27
+==========
+* Cleaned up the fs/Config.in entries a bit, now slightly more descriptive.
+
+* BeFS depends on NLS, so I made activating BeFS enable the NLS questions
+	(fs/nls/Config.in)
+
+* Added Configure.help entries for CONFIG_BEFS_FS and CONFIG_DEBUG_BEFS
+	(Documentation/Configure.help)
+
+2001-08-??
+==========
+* Removed superblock locking calls in befs_read_super(). In 2.4, the VFS 
+	hands us a super_block struct that is already locked.
+
+2001-08-13
+==========
+* Will Dyson <will_dyson@pobox.com> is now attempting to maintain this module
+	Makoto Kato <m_kato@ga2.so-net.ne.jp> is original author.Daniel Berlin 
+	also did some work on it (fixing it up for the later 2.3.x kernels, IIRC).
+
+* Fixed compile errors on 2.4.1 kernel (WD)
+	Resolve rejected patches
+	Accomodate changed NLS interface (util.h)
+	Needed to include <linux/slab.h> in most files
+	Makefile changes
+	fs/Config.in changes
+
+* Tried to niceify the code using the ext2 fs as a guide
+	Declare befs_fs_type using the DECLARE_FSTYPE_DEV() macro
+
+* Made it a configure option to turn on debugging (fs/Config.in)
+
+* Compiles on 2.4.7
diff --git a/fs/befs/Makefile b/fs/befs/Makefile
new file mode 100644
index 0000000..2f370bd
--- /dev/null
+++ b/fs/befs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux BeOS filesystem routines.
+#
+ 
+obj-$(CONFIG_BEFS_FS) += befs.o
+
+befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o
diff --git a/fs/befs/TODO b/fs/befs/TODO
new file mode 100644
index 0000000..3250921
--- /dev/null
+++ b/fs/befs/TODO
@@ -0,0 +1,14 @@
+TODO
+==========
+
+* Convert comments to the Kernel-Doc format.
+
+* Befs_fs.h has gotten big and messy. No reason not to break it up into 
+	smaller peices.
+
+* See if Alexander Viro's option parser made it into the kernel tree. 
+	Use that if we can. (include/linux/parser.h)
+
+* See if we really need separate types for on-disk and in-memory 
+	representations of the superblock and inode.
+
diff --git a/fs/befs/attribute.c b/fs/befs/attribute.c
new file mode 100644
index 0000000..e329d72
--- /dev/null
+++ b/fs/befs/attribute.c
@@ -0,0 +1,117 @@
+/*
+ * linux/fs/befs/attribute.c
+ *
+ * Copyright (C) 2002 Will Dyson <will_dyson@pobox.com>
+ *
+ * Many thanks to Dominic Giampaolo, author of "Practical File System
+ * Design with the Be File System", for such a helpful book.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "befs.h"
+#include "endian.h"
+
+#define SD_DATA(sd)\
+	(void*)((char*)sd + sizeof(*sd) + (sd->name_size - sizeof(sd->name)))
+
+#define SD_NEXT(sd)\
+	(befs_small_data*)((char*)sd + sizeof(*sd) + (sd->name_size - \
+	sizeof(sd->name) + sd->data_size))
+
+int
+list_small_data(struct super_block *sb, befs_inode * inode, filldir_t filldir);
+
+befs_small_data *
+find_small_data(struct super_block *sb, befs_inode * inode,
+				 const char *name);
+int
+read_small_data(struct super_block *sb, befs_inode * inode,
+		 befs_small_data * sdata, void *buf, size_t bufsize);
+
+/**
+ *
+ *
+ *
+ *
+ *
+ */
+befs_small_data *
+find_small_data(struct super_block *sb, befs_inode * inode, const char *name)
+{
+	befs_small_data *sdata = inode->small_data;
+
+	while (sdata->type != 0) {
+		if (strcmp(name, sdata->name) != 0) {
+			return sdata;
+		}
+		sdata = SD_NEXT(sdata);
+	}
+	return NULL;
+}
+
+/**
+ *
+ *
+ *
+ *
+ *
+ */
+int
+read_small_data(struct super_block *sb, befs_inode * inode,
+		const char *name, void *buf, size_t bufsize)
+{
+	befs_small_data *sdata;
+
+	sdata = find_small_data(sb, inode, name);
+	if (sdata == NULL)
+		return BEFS_ERR;
+	else if (sdata->data_size > bufsize)
+		return BEFS_ERR;
+
+	memcpy(buf, SD_DATA(sdata), sdata->data_size);
+
+	return BEFS_OK;
+}
+
+/**
+ *
+ *
+ *
+ *
+ *
+ */
+int
+list_small_data(struct super_block *sb, befs_inode * inode)
+{
+
+}
+
+/**
+ *
+ *
+ *
+ *
+ *
+ */
+int
+list_attr(struct super_block *sb, befs_inode * inode)
+{
+
+}
+
+/**
+ *
+ *
+ *
+ *
+ *
+ */
+int
+read_attr(struct super_block *sb, befs_inode * inode)
+{
+
+}
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
new file mode 100644
index 0000000..057a2c3
--- /dev/null
+++ b/fs/befs/befs.h
@@ -0,0 +1,154 @@
+/*
+ * befs.h
+ *
+ * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com>
+ * Copyright (C) 1999 Makoto Kato (m_kato@ga2.so-net.ne.jp)
+ */
+
+#ifndef _LINUX_BEFS_H
+#define _LINUX_BEFS_H
+
+#include "befs_fs_types.h"
+
+/* used in debug.c */
+#define BEFS_VERSION "0.9.3"
+
+
+typedef u64 befs_blocknr_t;
+/*
+ * BeFS in memory structures
+ */
+
+typedef struct befs_mount_options {
+	gid_t gid;
+	uid_t uid;
+	int use_gid;
+	int use_uid;
+	int debug;
+	char *iocharset;
+} befs_mount_options;
+
+typedef struct befs_sb_info {
+	u32 magic1;
+	u32 block_size;
+	u32 block_shift;
+	int byte_order;
+	befs_off_t num_blocks;
+	befs_off_t used_blocks;
+	u32 inode_size;
+	u32 magic2;
+
+	/* Allocation group information */
+	u32 blocks_per_ag;
+	u32 ag_shift;
+	u32 num_ags;
+
+	/* jornal log entry */
+	befs_block_run log_blocks;
+	befs_off_t log_start;
+	befs_off_t log_end;
+
+	befs_inode_addr root_dir;
+	befs_inode_addr indices;
+	u32 magic3;
+
+	befs_mount_options mount_opts;
+	struct nls_table *nls;
+
+} befs_sb_info;
+
+typedef struct befs_inode_info {
+	u32 i_flags;
+	u32 i_type;
+
+	befs_inode_addr i_inode_num;
+	befs_inode_addr i_parent;
+	befs_inode_addr i_attribute;
+
+	union {
+		befs_data_stream ds;
+		char symlink[BEFS_SYMLINK_LEN];
+	} i_data;
+
+	struct inode vfs_inode;
+
+} befs_inode_info;
+
+enum befs_err {
+	BEFS_OK,
+	BEFS_ERR,
+	BEFS_BAD_INODE,
+	BEFS_BT_END,
+	BEFS_BT_EMPTY,
+	BEFS_BT_MATCH,
+	BEFS_BT_PARMATCH,
+	BEFS_BT_NOT_FOUND
+};
+
+
+/****************************/
+/* debug.c */
+void befs_error(const struct super_block *sb, const char *fmt, ...);
+void befs_warning(const struct super_block *sb, const char *fmt, ...);
+void befs_debug(const struct super_block *sb, const char *fmt, ...);
+
+void befs_dump_super_block(const struct super_block *sb, befs_super_block *);
+void befs_dump_inode(const struct super_block *sb, befs_inode *);
+void befs_dump_index_entry(const struct super_block *sb, befs_btree_super *);
+void befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *);
+/****************************/
+
+
+/* Gets a pointer to the private portion of the super_block
+ * structure from the public part
+ */
+static inline befs_sb_info *
+BEFS_SB(const struct super_block *super)
+{
+	return (befs_sb_info *) super->s_fs_info;
+}
+
+static inline befs_inode_info *
+BEFS_I(const struct inode *inode)
+{
+	return list_entry(inode, struct befs_inode_info, vfs_inode);
+}
+
+static inline befs_blocknr_t
+iaddr2blockno(struct super_block *sb, befs_inode_addr * iaddr)
+{
+	return ((iaddr->allocation_group << BEFS_SB(sb)->ag_shift) +
+		iaddr->start);
+}
+
+static inline befs_inode_addr
+blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
+{
+	befs_inode_addr iaddr;
+	iaddr.allocation_group = blockno >> BEFS_SB(sb)->ag_shift;
+	iaddr.start =
+	    blockno - (iaddr.allocation_group << BEFS_SB(sb)->ag_shift);
+	iaddr.len = 1;
+
+	return iaddr;
+}
+
+static inline unsigned int
+befs_iaddrs_per_block(struct super_block *sb)
+{
+	return BEFS_SB(sb)->block_size / sizeof (befs_inode_addr);
+}
+
+static inline int
+befs_iaddr_is_empty(befs_inode_addr * iaddr)
+{
+	return (!iaddr->allocation_group) && (!iaddr->start) && (!iaddr->len);
+}
+
+static inline size_t
+befs_brun_size(struct super_block *sb, befs_block_run run)
+{
+	return BEFS_SB(sb)->block_size * run.len;
+}
+
+#endif				/* _LINUX_BEFS_H */
diff --git a/fs/befs/befs_fs_types.h b/fs/befs/befs_fs_types.h
new file mode 100644
index 0000000..9095518
--- /dev/null
+++ b/fs/befs/befs_fs_types.h
@@ -0,0 +1,213 @@
+/*
+ * include/linux/befs_fs_types.h
+ *
+ * Copyright (C) 2001 Will Dyson (will@cs.earlham.edu)
+ *
+ *
+ *
+ * from linux/include/linux/befs_fs.h
+ *
+ * Copyright (C) 1999 Makoto Kato (m_kato@ga2.so-net.ne.jp)
+ *
+ */
+
+#ifndef _LINUX_BEFS_FS_TYPES
+#define _LINUX_BEFS_FS_TYPES
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#endif /*__KERNEL__*/
+
+#define PACKED __attribute__ ((__packed__))
+
+/*
+ * Max name lengths of BFS
+ */
+
+#define BEFS_NAME_LEN 255
+
+#define BEFS_SYMLINK_LEN 144
+#define BEFS_NUM_DIRECT_BLOCKS 12
+#define B_OS_NAME_LENGTH 32
+
+/* The datastream blocks mapped by the double-indirect
+ * block are always 4 fs blocks long.
+ * This eliminates the need for linear searches among
+ * the potentially huge number of indirect blocks
+ *
+ * Err. Should that be 4 fs blocks or 4k???
+ * It matters on large blocksize volumes
+ */
+#define BEFS_DBLINDIR_BRUN_LEN 4
+
+/*
+ * Flags of superblock
+ */
+
+enum super_flags {
+	BEFS_BYTESEX_BE,
+	BEFS_BYTESEX_LE,
+	BEFS_CLEAN = 0x434c454e,
+	BEFS_DIRTY = 0x44495254,
+	BEFS_SUPER_MAGIC1 = 0x42465331,	/* BFS1 */
+	BEFS_SUPER_MAGIC2 = 0xdd121031,
+	BEFS_SUPER_MAGIC3 = 0x15b6830e,
+};
+
+#define BEFS_BYTEORDER_NATIVE 0x42494745
+
+#define BEFS_SUPER_MAGIC BEFS_SUPER_MAGIC1
+
+/*
+ * Flags of inode
+ */
+
+#define BEFS_INODE_MAGIC1 0x3bbe0ad9
+
+enum inode_flags {
+	BEFS_INODE_IN_USE = 0x00000001,
+	BEFS_ATTR_INODE = 0x00000004,
+	BEFS_INODE_LOGGED = 0x00000008,
+	BEFS_INODE_DELETED = 0x00000010,
+	BEFS_LONG_SYMLINK = 0x00000040,
+	BEFS_PERMANENT_FLAG = 0x0000ffff,
+	BEFS_INODE_NO_CREATE = 0x00010000,
+	BEFS_INODE_WAS_WRITTEN = 0x00020000,
+	BEFS_NO_TRANSACTION = 0x00040000,
+};
+/* 
+ * On-Disk datastructures of BeFS
+ */
+
+typedef u64 befs_off_t;
+typedef u64 befs_time_t;
+typedef void befs_binode_etc;
+
+/* Block runs */
+typedef struct {
+	u32 allocation_group;
+	u16 start;
+	u16 len;
+} PACKED befs_block_run;
+
+typedef befs_block_run befs_inode_addr;
+
+/*
+ * The Superblock Structure
+ */
+typedef struct {
+	char name[B_OS_NAME_LENGTH];
+	u32 magic1;
+	u32 fs_byte_order;
+
+	u32 block_size;
+	u32 block_shift;
+
+	befs_off_t num_blocks;
+	befs_off_t used_blocks;
+
+	u32 inode_size;
+
+	u32 magic2;
+	u32 blocks_per_ag;
+	u32 ag_shift;
+	u32 num_ags;
+
+	u32 flags;
+
+	befs_block_run log_blocks;
+	befs_off_t log_start;
+	befs_off_t log_end;
+
+	u32 magic3;
+	befs_inode_addr root_dir;
+	befs_inode_addr indices;
+
+} PACKED befs_super_block;
+
+/* 
+ * Note: the indirect and dbl_indir block_runs may
+ * be longer than one block!
+ */
+typedef struct {
+	befs_block_run direct[BEFS_NUM_DIRECT_BLOCKS];
+	befs_off_t max_direct_range;
+	befs_block_run indirect;
+	befs_off_t max_indirect_range;
+	befs_block_run double_indirect;
+	befs_off_t max_double_indirect_range;
+	befs_off_t size;
+} PACKED befs_data_stream;
+
+/* Attribute */
+typedef struct {
+	u32 type;
+	u16 name_size;
+	u16 data_size;
+	char name[1];
+} PACKED befs_small_data;
+
+/* Inode structure */
+typedef struct {
+	u32 magic1;
+	befs_inode_addr inode_num;
+	u32 uid;
+	u32 gid;
+	u32 mode;
+	u32 flags;
+	befs_time_t create_time;
+	befs_time_t last_modified_time;
+	befs_inode_addr parent;
+	befs_inode_addr attributes;
+	u32 type;
+
+	u32 inode_size;
+	u32 etc;		/* not use */
+
+	union {
+		befs_data_stream datastream;
+		char symlink[BEFS_SYMLINK_LEN];
+	} data;
+
+	u32 pad[4];		/* not use */
+	befs_small_data small_data[1];
+} PACKED befs_inode;
+
+/*
+ * B+tree superblock
+ */
+
+#define BEFS_BTREE_MAGIC 0x69f6c2e8
+
+enum btree_types {
+	BTREE_STRING_TYPE = 0,
+	BTREE_INT32_TYPE = 1,
+	BTREE_UINT32_TYPE = 2,
+	BTREE_INT64_TYPE = 3,
+	BTREE_UINT64_TYPE = 4,
+	BTREE_FLOAT_TYPE = 5,
+	BTREE_DOUBLE_TYPE = 6
+};
+
+typedef struct {
+	u32 magic;
+	u32 node_size;
+	u32 max_depth;
+	u32 data_type;
+	befs_off_t root_node_ptr;
+	befs_off_t free_node_ptr;
+	befs_off_t max_size;
+} PACKED befs_btree_super;
+
+/*
+ * Header stucture of each btree node
+ */
+typedef struct {
+	befs_off_t left;
+	befs_off_t right;
+	befs_off_t overflow;
+	u16 all_key_count;
+	u16 all_key_length;
+} PACKED befs_btree_nodehead;
+
+#endif				/* _LINUX_BEFS_FS_TYPES */
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
new file mode 100644
index 0000000..76e2197
--- /dev/null
+++ b/fs/befs/btree.c
@@ -0,0 +1,788 @@
+/*
+ * linux/fs/befs/btree.c
+ *
+ * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com>
+ *
+ * Licensed under the GNU GPL. See the file COPYING for details.
+ *
+ * 2002-02-05: Sergey S. Kostyliov added binary search withing
+ * 		btree nodes.
+ *
+ * Many thanks to:
+ *
+ * Dominic Giampaolo, author of "Practical File System
+ * Design with the Be File System", for such a helpful book.
+ * 
+ * Marcus J. Ranum, author of the b+tree package in 
+ * comp.sources.misc volume 10. This code is not copied from that
+ * work, but it is partially based on it.
+ *
+ * Makoto Kato, author of the original BeFS for linux filesystem
+ * driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/buffer_head.h>
+
+#include "befs.h"
+#include "btree.h"
+#include "datastream.h"
+#include "endian.h"
+
+/*
+ * The btree functions in this file are built on top of the
+ * datastream.c interface, which is in turn built on top of the
+ * io.c interface.
+ */
+
+/* Befs B+tree structure:
+ * 
+ * The first thing in the tree is the tree superblock. It tells you
+ * all kinds of useful things about the tree, like where the rootnode
+ * is located, and the size of the nodes (always 1024 with current version
+ * of BeOS).
+ *
+ * The rest of the tree consists of a series of nodes. Nodes contain a header
+ * (struct befs_btree_nodehead), the packed key data, an array of shorts 
+ * containing the ending offsets for each of the keys, and an array of
+ * befs_off_t values. In interior nodes, the keys are the ending keys for 
+ * the childnode they point to, and the values are offsets into the 
+ * datastream containing the tree. 
+ */
+
+/* Note:
+ * 
+ * The book states 2 confusing things about befs b+trees. First, 
+ * it states that the overflow field of node headers is used by internal nodes
+ * to point to another node that "effectively continues this one". Here is what
+ * I believe that means. Each key in internal nodes points to another node that
+ * contains key values less than itself. Inspection reveals that the last key 
+ * in the internal node is not the last key in the index. Keys that are 
+ * greater than the last key in the internal node go into the overflow node. 
+ * I imagine there is a performance reason for this.
+ *
+ * Second, it states that the header of a btree node is sufficient to 
+ * distinguish internal nodes from leaf nodes. Without saying exactly how. 
+ * After figuring out the first, it becomes obvious that internal nodes have
+ * overflow nodes and leafnodes do not.
+ */
+
+/* 
+ * Currently, this code is only good for directory B+trees.
+ * In order to be used for other BFS indexes, it needs to be extended to handle
+ * duplicate keys and non-string keytypes (int32, int64, float, double).
+ */
+
+/*
+ * In memory structure of each btree node
+ */
+typedef struct {
+	befs_btree_nodehead head;	/* head of node converted to cpu byteorder */
+	struct buffer_head *bh;
+	befs_btree_nodehead *od_node;	/* on disk node */
+} befs_btree_node;
+
+/* local constants */
+static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL;
+
+/* local functions */
+static int befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds,
+			       befs_btree_super * bt_super,
+			       befs_btree_node * this_node,
+			       befs_off_t * node_off);
+
+static int befs_bt_read_super(struct super_block *sb, befs_data_stream * ds,
+			      befs_btree_super * sup);
+
+static int befs_bt_read_node(struct super_block *sb, befs_data_stream * ds,
+			     befs_btree_node * node, befs_off_t node_off);
+
+static int befs_leafnode(befs_btree_node * node);
+
+static u16 *befs_bt_keylen_index(befs_btree_node * node);
+
+static befs_off_t *befs_bt_valarray(befs_btree_node * node);
+
+static char *befs_bt_keydata(befs_btree_node * node);
+
+static int befs_find_key(struct super_block *sb, befs_btree_node * node,
+			 const char *findkey, befs_off_t * value);
+
+static char *befs_bt_get_key(struct super_block *sb, befs_btree_node * node,
+			     int index, u16 * keylen);
+
+static int befs_compare_strings(const void *key1, int keylen1,
+				const void *key2, int keylen2);
+
+/**
+ * befs_bt_read_super - read in btree superblock convert to cpu byteorder
+ * @sb: Filesystem superblock
+ * @ds: Datastream to read from
+ * @sup: Buffer in which to place the btree superblock
+ *
+ * Calls befs_read_datastream to read in the btree superblock and
+ * makes sure it is in cpu byteorder, byteswapping if necessary.
+ *
+ * On success, returns BEFS_OK and *@sup contains the btree superblock,
+ * in cpu byte order.
+ *
+ * On failure, BEFS_ERR is returned.
+ */
+static int
+befs_bt_read_super(struct super_block *sb, befs_data_stream * ds,
+		   befs_btree_super * sup)
+{
+	struct buffer_head *bh = NULL;
+	befs_btree_super *od_sup = NULL;
+
+	befs_debug(sb, "---> befs_btree_read_super()");
+
+	bh = befs_read_datastream(sb, ds, 0, NULL);
+
+	if (!bh) {
+		befs_error(sb, "Couldn't read index header.");
+		goto error;
+	}
+	od_sup = (befs_btree_super *) bh->b_data;
+	befs_dump_index_entry(sb, od_sup);
+
+	sup->magic = fs32_to_cpu(sb, od_sup->magic);
+	sup->node_size = fs32_to_cpu(sb, od_sup->node_size);
+	sup->max_depth = fs32_to_cpu(sb, od_sup->max_depth);
+	sup->data_type = fs32_to_cpu(sb, od_sup->data_type);
+	sup->root_node_ptr = fs64_to_cpu(sb, od_sup->root_node_ptr);
+	sup->free_node_ptr = fs64_to_cpu(sb, od_sup->free_node_ptr);
+	sup->max_size = fs64_to_cpu(sb, od_sup->max_size);
+
+	brelse(bh);
+	if (sup->magic != BEFS_BTREE_MAGIC) {
+		befs_error(sb, "Index header has bad magic.");
+		goto error;
+	}
+
+	befs_debug(sb, "<--- befs_btree_read_super()");
+	return BEFS_OK;
+
+      error:
+	befs_debug(sb, "<--- befs_btree_read_super() ERROR");
+	return BEFS_ERR;
+}
+
+/**
+ * befs_bt_read_node - read in btree node and convert to cpu byteorder
+ * @sb: Filesystem superblock
+ * @ds: Datastream to read from
+ * @node: Buffer in which to place the btree node
+ * @node_off: Starting offset (in bytes) of the node in @ds
+ *
+ * Calls befs_read_datastream to read in the indicated btree node and
+ * makes sure its header fields are in cpu byteorder, byteswapping if
+ * necessary.
+ * Note: node->bh must be NULL when this function called first
+ * time. Don't forget brelse(node->bh) after last call.
+ *
+ * On success, returns BEFS_OK and *@node contains the btree node that
+ * starts at @node_off, with the node->head fields in cpu byte order.
+ *
+ * On failure, BEFS_ERR is returned.
+ */
+
+static int
+befs_bt_read_node(struct super_block *sb, befs_data_stream * ds,
+		  befs_btree_node * node, befs_off_t node_off)
+{
+	uint off = 0;
+
+	befs_debug(sb, "---> befs_bt_read_node()");
+
+	if (node->bh)
+		brelse(node->bh);
+
+	node->bh = befs_read_datastream(sb, ds, node_off, &off);
+	if (!node->bh) {
+		befs_error(sb, "befs_bt_read_node() failed to read "
+			   "node at %Lu", node_off);
+		befs_debug(sb, "<--- befs_bt_read_node() ERROR");
+
+		return BEFS_ERR;
+	}
+	node->od_node =
+	    (befs_btree_nodehead *) ((void *) node->bh->b_data + off);
+
+	befs_dump_index_node(sb, node->od_node);
+
+	node->head.left = fs64_to_cpu(sb, node->od_node->left);
+	node->head.right = fs64_to_cpu(sb, node->od_node->right);
+	node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow);
+	node->head.all_key_count =
+	    fs16_to_cpu(sb, node->od_node->all_key_count);
+	node->head.all_key_length =
+	    fs16_to_cpu(sb, node->od_node->all_key_length);
+
+	befs_debug(sb, "<--- befs_btree_read_node()");
+	return BEFS_OK;
+}
+
+/**
+ * befs_btree_find - Find a key in a befs B+tree
+ * @sb: Filesystem superblock
+ * @ds: Datastream containing btree
+ * @key: Key string to lookup in btree
+ * @value: Value stored with @key
+ *
+ * On sucess, returns BEFS_OK and sets *@value to the value stored
+ * with @key (usually the disk block number of an inode).
+ *
+ * On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND.
+ * 
+ * Algorithm: 
+ *   Read the superblock and rootnode of the b+tree.
+ *   Drill down through the interior nodes using befs_find_key().
+ *   Once at the correct leaf node, use befs_find_key() again to get the
+ *   actuall value stored with the key.
+ */
+int
+befs_btree_find(struct super_block *sb, befs_data_stream * ds,
+		const char *key, befs_off_t * value)
+{
+	befs_btree_node *this_node = NULL;
+	befs_btree_super bt_super;
+	befs_off_t node_off;
+	int res;
+
+	befs_debug(sb, "---> befs_btree_find() Key: %s", key);
+
+	if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) {
+		befs_error(sb,
+			   "befs_btree_find() failed to read index superblock");
+		goto error;
+	}
+
+	this_node = (befs_btree_node *) kmalloc(sizeof (befs_btree_node),
+						GFP_NOFS);
+	if (!this_node) {
+		befs_error(sb, "befs_btree_find() failed to allocate %u "
+			   "bytes of memory", sizeof (befs_btree_node));
+		goto error;
+	}
+
+	this_node->bh = NULL;
+
+	/* read in root node */
+	node_off = bt_super.root_node_ptr;
+	if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) {
+		befs_error(sb, "befs_btree_find() failed to read "
+			   "node at %Lu", node_off);
+		goto error_alloc;
+	}
+
+	while (!befs_leafnode(this_node)) {
+		res = befs_find_key(sb, this_node, key, &node_off);
+		if (res == BEFS_BT_NOT_FOUND)
+			node_off = this_node->head.overflow;
+		/* if no match, go to overflow node */
+		if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) {
+			befs_error(sb, "befs_btree_find() failed to read "
+				   "node at %Lu", node_off);
+			goto error_alloc;
+		}
+	}
+
+	/* at the correct leaf node now */
+
+	res = befs_find_key(sb, this_node, key, value);
+
+	brelse(this_node->bh);
+	kfree(this_node);
+
+	if (res != BEFS_BT_MATCH) {
+		befs_debug(sb, "<--- befs_btree_find() Key %s not found", key);
+		*value = 0;
+		return BEFS_BT_NOT_FOUND;
+	}
+	befs_debug(sb, "<--- befs_btree_find() Found key %s, value %Lu",
+		   key, *value);
+	return BEFS_OK;
+
+      error_alloc:
+	kfree(this_node);
+      error:
+	*value = 0;
+	befs_debug(sb, "<--- befs_btree_find() ERROR");
+	return BEFS_ERR;
+}
+
+/**
+ * befs_find_key - Search for a key within a node
+ * @sb: Filesystem superblock
+ * @node: Node to find the key within
+ * @key: Keystring to search for
+ * @value: If key is found, the value stored with the key is put here
+ *
+ * finds exact match if one exists, and returns BEFS_BT_MATCH
+ * If no exact match, finds first key in node that is greater
+ * (alphabetically) than the search key and returns BEFS_BT_PARMATCH
+ * (for partial match, I guess). Can you think of something better to
+ * call it?
+ *
+ * If no key was a match or greater than the search key, return
+ * BEFS_BT_NOT_FOUND.
+ *
+ * Use binary search instead of a linear.
+ */
+static int
+befs_find_key(struct super_block *sb, befs_btree_node * node,
+	      const char *findkey, befs_off_t * value)
+{
+	int first, last, mid;
+	int eq;
+	u16 keylen;
+	int findkey_len;
+	char *thiskey;
+	befs_off_t *valarray;
+
+	befs_debug(sb, "---> befs_find_key() %s", findkey);
+
+	*value = 0;
+
+	findkey_len = strlen(findkey);
+
+	/* if node can not contain key, just skeep this node */
+	last = node->head.all_key_count - 1;
+	thiskey = befs_bt_get_key(sb, node, last, &keylen);
+
+	eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len);
+	if (eq < 0) {
+		befs_debug(sb, "<--- befs_find_key() %s not found", findkey);
+		return BEFS_BT_NOT_FOUND;
+	}
+
+	valarray = befs_bt_valarray(node);
+
+	/* simple binary search */
+	first = 0;
+	mid = 0;
+	while (last >= first) {
+		mid = (last + first) / 2;
+		befs_debug(sb, "first: %d, last: %d, mid: %d", first, last,
+			   mid);
+		thiskey = befs_bt_get_key(sb, node, mid, &keylen);
+		eq = befs_compare_strings(thiskey, keylen, findkey,
+					  findkey_len);
+
+		if (eq == 0) {
+			befs_debug(sb, "<--- befs_find_key() found %s at %d",
+				   thiskey, mid);
+
+			*value = fs64_to_cpu(sb, valarray[mid]);
+			return BEFS_BT_MATCH;
+		}
+		if (eq > 0)
+			last = mid - 1;
+		else
+			first = mid + 1;
+	}
+	if (eq < 0)
+		*value = fs64_to_cpu(sb, valarray[mid + 1]);
+	else
+		*value = fs64_to_cpu(sb, valarray[mid]);
+	befs_debug(sb, "<--- befs_find_key() found %s at %d", thiskey, mid);
+	return BEFS_BT_PARMATCH;
+}
+
+/**
+ * befs_btree_read - Traverse leafnodes of a btree
+ * @sb: Filesystem superblock
+ * @ds: Datastream containing btree
+ * @key_no: Key number (alphabetical order) of key to read
+ * @bufsize: Size of the buffer to return key in
+ * @keybuf: Pointer to a buffer to put the key in
+ * @keysize: Length of the returned key
+ * @value: Value stored with the returned key
+ *
+ * Heres how it works: Key_no is the index of the key/value pair to 
+ * return in keybuf/value.
+ * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is 
+ * the number of charecters in the key (just a convenience).
+ *
+ * Algorithm:
+ *   Get the first leafnode of the tree. See if the requested key is in that
+ *   node. If not, follow the node->right link to the next leafnode. Repeat 
+ *   until the (key_no)th key is found or the tree is out of keys.
+ */
+int
+befs_btree_read(struct super_block *sb, befs_data_stream * ds,
+		loff_t key_no, size_t bufsize, char *keybuf, size_t * keysize,
+		befs_off_t * value)
+{
+	befs_btree_node *this_node;
+	befs_btree_super bt_super;
+	befs_off_t node_off = 0;
+	int cur_key;
+	befs_off_t *valarray;
+	char *keystart;
+	u16 keylen;
+	int res;
+
+	uint key_sum = 0;
+
+	befs_debug(sb, "---> befs_btree_read()");
+
+	if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) {
+		befs_error(sb,
+			   "befs_btree_read() failed to read index superblock");
+		goto error;
+	}
+
+	if ((this_node = (befs_btree_node *)
+	     kmalloc(sizeof (befs_btree_node), GFP_NOFS)) == NULL) {
+		befs_error(sb, "befs_btree_read() failed to allocate %u "
+			   "bytes of memory", sizeof (befs_btree_node));
+		goto error;
+	}
+
+	node_off = bt_super.root_node_ptr;
+	this_node->bh = NULL;
+
+	/* seeks down to first leafnode, reads it into this_node */
+	res = befs_btree_seekleaf(sb, ds, &bt_super, this_node, &node_off);
+	if (res == BEFS_BT_EMPTY) {
+		brelse(this_node->bh);
+		kfree(this_node);
+		*value = 0;
+		*keysize = 0;
+		befs_debug(sb, "<--- befs_btree_read() Tree is EMPTY");
+		return BEFS_BT_EMPTY;
+	} else if (res == BEFS_ERR) {
+		goto error_alloc;
+	}
+
+	/* find the leaf node containing the key_no key */
+
+	while (key_sum + this_node->head.all_key_count <= key_no) {
+
+		/* no more nodes to look in: key_no is too large */
+		if (this_node->head.right == befs_bt_inval) {
+			*keysize = 0;
+			*value = 0;
+			befs_debug(sb,
+				   "<--- befs_btree_read() END of keys at %Lu",
+				   key_sum + this_node->head.all_key_count);
+			brelse(this_node->bh);
+			kfree(this_node);
+			return BEFS_BT_END;
+		}
+
+		key_sum += this_node->head.all_key_count;
+		node_off = this_node->head.right;
+
+		if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) {
+			befs_error(sb, "befs_btree_read() failed to read "
+				   "node at %Lu", node_off);
+			goto error_alloc;
+		}
+	}
+
+	/* how many keys into this_node is key_no */
+	cur_key = key_no - key_sum;
+
+	/* get pointers to datastructures within the node body */
+	valarray = befs_bt_valarray(this_node);
+
+	keystart = befs_bt_get_key(sb, this_node, cur_key, &keylen);
+
+	befs_debug(sb, "Read [%Lu,%d]: keysize %d", node_off, cur_key, keylen);
+
+	if (bufsize < keylen + 1) {
+		befs_error(sb, "befs_btree_read() keybuf too small (%u) "
+			   "for key of size %d", bufsize, keylen);
+		brelse(this_node->bh);
+		goto error_alloc;
+	};
+
+	strncpy(keybuf, keystart, keylen);
+	*value = fs64_to_cpu(sb, valarray[cur_key]);
+	*keysize = keylen;
+	keybuf[keylen] = '\0';
+
+	befs_debug(sb, "Read [%Lu,%d]: Key \"%.*s\", Value %Lu", node_off,
+		   cur_key, keylen, keybuf, *value);
+
+	brelse(this_node->bh);
+	kfree(this_node);
+
+	befs_debug(sb, "<--- befs_btree_read()");
+
+	return BEFS_OK;
+
+      error_alloc:
+	kfree(this_node);
+
+      error:
+	*keysize = 0;
+	*value = 0;
+	befs_debug(sb, "<--- befs_btree_read() ERROR");
+	return BEFS_ERR;
+}
+
+/**
+ * befs_btree_seekleaf - Find the first leafnode in the btree
+ * @sb: Filesystem superblock
+ * @ds: Datastream containing btree
+ * @bt_super: Pointer to the superblock of the btree
+ * @this_node: Buffer to return the leafnode in
+ * @node_off: Pointer to offset of current node within datastream. Modified
+ * 		by the function.
+ *
+ *
+ * Helper function for btree traverse. Moves the current position to the 
+ * start of the first leaf node.
+ *
+ * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY.
+ */
+static int
+befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds,
+		    befs_btree_super * bt_super, befs_btree_node * this_node,
+		    befs_off_t * node_off)
+{
+
+	befs_debug(sb, "---> befs_btree_seekleaf()");
+
+	if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) {
+		befs_error(sb, "befs_btree_seekleaf() failed to read "
+			   "node at %Lu", *node_off);
+		goto error;
+	}
+	befs_debug(sb, "Seekleaf to root node %Lu", *node_off);
+
+	if (this_node->head.all_key_count == 0 && befs_leafnode(this_node)) {
+		befs_debug(sb, "<--- befs_btree_seekleaf() Tree is EMPTY");
+		return BEFS_BT_EMPTY;
+	}
+
+	while (!befs_leafnode(this_node)) {
+
+		if (this_node->head.all_key_count == 0) {
+			befs_debug(sb, "befs_btree_seekleaf() encountered "
+				   "an empty interior node: %Lu. Using Overflow "
+				   "node: %Lu", *node_off,
+				   this_node->head.overflow);
+			*node_off = this_node->head.overflow;
+		} else {
+			befs_off_t *valarray = befs_bt_valarray(this_node);
+			*node_off = fs64_to_cpu(sb, valarray[0]);
+		}
+		if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) {
+			befs_error(sb, "befs_btree_seekleaf() failed to read "
+				   "node at %Lu", *node_off);
+			goto error;
+		}
+
+		befs_debug(sb, "Seekleaf to child node %Lu", *node_off);
+	}
+	befs_debug(sb, "Node %Lu is a leaf node", *node_off);
+
+	return BEFS_OK;
+
+      error:
+	befs_debug(sb, "<--- befs_btree_seekleaf() ERROR");
+	return BEFS_ERR;
+}
+
+/**
+ * befs_leafnode - Determine if the btree node is a leaf node or an 
+ * interior node
+ * @node: Pointer to node structure to test
+ * 
+ * Return 1 if leaf, 0 if interior
+ */
+static int
+befs_leafnode(befs_btree_node * node)
+{
+	/* all interior nodes (and only interior nodes) have an overflow node */
+	if (node->head.overflow == befs_bt_inval)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * befs_bt_keylen_index - Finds start of keylen index in a node
+ * @node: Pointer to the node structure to find the keylen index within
+ *
+ * Returns a pointer to the start of the key length index array
+ * of the B+tree node *@node
+ *
+ * "The length of all the keys in the node is added to the size of the
+ * header and then rounded up to a multiple of four to get the beginning
+ * of the key length index" (p.88, practical filesystem design).
+ *
+ * Except that rounding up to 8 works, and rounding up to 4 doesn't.
+ */
+static u16 *
+befs_bt_keylen_index(befs_btree_node * node)
+{
+	const int keylen_align = 8;
+	unsigned long int off =
+	    (sizeof (befs_btree_nodehead) + node->head.all_key_length);
+	ulong tmp = off % keylen_align;
+
+	if (tmp)
+		off += keylen_align - tmp;
+
+	return (u16 *) ((void *) node->od_node + off);
+}
+
+/**
+ * befs_bt_valarray - Finds the start of value array in a node
+ * @node: Pointer to the node structure to find the value array within
+ *
+ * Returns a pointer to the start of the value array
+ * of the node pointed to by the node header
+ */
+static befs_off_t *
+befs_bt_valarray(befs_btree_node * node)
+{
+	void *keylen_index_start = (void *) befs_bt_keylen_index(node);
+	size_t keylen_index_size = node->head.all_key_count * sizeof (u16);
+
+	return (befs_off_t *) (keylen_index_start + keylen_index_size);
+}
+
+/**
+ * befs_bt_keydata - Finds start of keydata array in a node
+ * @node: Pointer to the node structure to find the keydata array within
+ *
+ * Returns a pointer to the start of the keydata array
+ * of the node pointed to by the node header 
+ */
+static char *
+befs_bt_keydata(befs_btree_node * node)
+{
+	return (char *) ((void *) node->od_node + sizeof (befs_btree_nodehead));
+}
+
+/**
+ * befs_bt_get_key - returns a pointer to the start of a key
+ * @sb: filesystem superblock
+ * @node: node in which to look for the key
+ * @index: the index of the key to get
+ * @keylen: modified to be the length of the key at @index
+ *
+ * Returns a valid pointer into @node on success.
+ * Returns NULL on failure (bad input) and sets *@keylen = 0
+ */
+static char *
+befs_bt_get_key(struct super_block *sb, befs_btree_node * node,
+		int index, u16 * keylen)
+{
+	int prev_key_end;
+	char *keystart;
+	u16 *keylen_index;
+
+	if (index < 0 || index > node->head.all_key_count) {
+		*keylen = 0;
+		return NULL;
+	}
+
+	keystart = befs_bt_keydata(node);
+	keylen_index = befs_bt_keylen_index(node);
+
+	if (index == 0)
+		prev_key_end = 0;
+	else
+		prev_key_end = fs16_to_cpu(sb, keylen_index[index - 1]);
+
+	*keylen = fs16_to_cpu(sb, keylen_index[index]) - prev_key_end;
+
+	return keystart + prev_key_end;
+}
+
+/**
+ * befs_compare_strings - compare two strings
+ * @key1: pointer to the first key to be compared 
+ * @keylen1: length in bytes of key1
+ * @key2: pointer to the second key to be compared
+ * @kelen2: length in bytes of key2
+ *
+ * Returns 0 if @key1 and @key2 are equal.
+ * Returns >0 if @key1 is greater.
+ * Returns <0 if @key2 is greater..
+ */
+static int
+befs_compare_strings(const void *key1, int keylen1,
+		     const void *key2, int keylen2)
+{
+	int len = min_t(int, keylen1, keylen2);
+	int result = strncmp(key1, key2, len);
+	if (result == 0)
+		result = keylen1 - keylen2;
+	return result;
+}
+
+/* These will be used for non-string keyed btrees */
+#if 0
+static int
+btree_compare_int32(cont void *key1, int keylen1, const void *key2, int keylen2)
+{
+	return *(int32_t *) key1 - *(int32_t *) key2;
+}
+
+static int
+btree_compare_uint32(cont void *key1, int keylen1,
+		     const void *key2, int keylen2)
+{
+	if (*(u_int32_t *) key1 == *(u_int32_t *) key2)
+		return 0;
+	else if (*(u_int32_t *) key1 > *(u_int32_t *) key2)
+		return 1;
+
+	return -1;
+}
+static int
+btree_compare_int64(cont void *key1, int keylen1, const void *key2, int keylen2)
+{
+	if (*(int64_t *) key1 == *(int64_t *) key2)
+		return 0;
+	else if (*(int64_t *) key1 > *(int64_t *) key2)
+		return 1;
+
+	return -1;
+}
+
+static int
+btree_compare_uint64(cont void *key1, int keylen1,
+		     const void *key2, int keylen2)
+{
+	if (*(u_int64_t *) key1 == *(u_int64_t *) key2)
+		return 0;
+	else if (*(u_int64_t *) key1 > *(u_int64_t *) key2)
+		return 1;
+
+	return -1;
+}
+
+static int
+btree_compare_float(cont void *key1, int keylen1, const void *key2, int keylen2)
+{
+	float result = *(float *) key1 - *(float *) key2;
+	if (result == 0.0f)
+		return 0;
+
+	return (result < 0.0f) ? -1 : 1;
+}
+
+static int
+btree_compare_double(cont void *key1, int keylen1,
+		     const void *key2, int keylen2)
+{
+	double result = *(double *) key1 - *(double *) key2;
+	if (result == 0.0)
+		return 0;
+
+	return (result < 0.0) ? -1 : 1;
+}
+#endif				//0
diff --git a/fs/befs/btree.h b/fs/befs/btree.h
new file mode 100644
index 0000000..92e781e
--- /dev/null
+++ b/fs/befs/btree.h
@@ -0,0 +1,13 @@
+/*
+ * btree.h
+ * 
+ */
+
+
+int befs_btree_find(struct super_block *sb, befs_data_stream * ds,
+		    const char *key, befs_off_t * value);
+
+int befs_btree_read(struct super_block *sb, befs_data_stream * ds,
+		    loff_t key_no, size_t bufsize, char *keybuf,
+		    size_t * keysize, befs_off_t * value);
+
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
new file mode 100644
index 0000000..785f6b2
--- /dev/null
+++ b/fs/befs/datastream.c
@@ -0,0 +1,528 @@
+/*
+ * linux/fs/befs/datastream.c
+ *
+ * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
+ *
+ * Based on portions of file.c by Makoto Kato <m_kato@ga2.so-net.ne.jp>
+ *
+ * Many thanks to Dominic Giampaolo, author of "Practical File System
+ * Design with the Be File System", for such a helpful book.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+
+#include "befs.h"
+#include "datastream.h"
+#include "io.h"
+#include "endian.h"
+
+const befs_inode_addr BAD_IADDR = { 0, 0, 0 };
+
+static int befs_find_brun_direct(struct super_block *sb,
+				 befs_data_stream * data,
+				 befs_blocknr_t blockno, befs_block_run * run);
+
+static int befs_find_brun_indirect(struct super_block *sb,
+				   befs_data_stream * data,
+				   befs_blocknr_t blockno,
+				   befs_block_run * run);
+
+static int befs_find_brun_dblindirect(struct super_block *sb,
+				      befs_data_stream * data,
+				      befs_blocknr_t blockno,
+				      befs_block_run * run);
+
+/**
+ * befs_read_datastream - get buffer_head containing data, starting from pos.
+ * @sb: Filesystem superblock
+ * @ds: datastrem to find data with
+ * @pos: start of data
+ * @off: offset of data in buffer_head->b_data
+ *
+ * Returns pointer to buffer_head containing data starting with offset @off,
+ * if you don't need to know offset just set @off = NULL.
+ */
+struct buffer_head *
+befs_read_datastream(struct super_block *sb, befs_data_stream * ds,
+		     befs_off_t pos, uint * off)
+{
+	struct buffer_head *bh = NULL;
+	befs_block_run run;
+	befs_blocknr_t block;	/* block coresponding to pos */
+
+	befs_debug(sb, "---> befs_read_datastream() %Lu", pos);
+	block = pos >> BEFS_SB(sb)->block_shift;
+	if (off)
+		*off = pos - (block << BEFS_SB(sb)->block_shift);
+
+	if (befs_fblock2brun(sb, ds, block, &run) != BEFS_OK) {
+		befs_error(sb, "BeFS: Error finding disk addr of block %lu",
+			   block);
+		befs_debug(sb, "<--- befs_read_datastream() ERROR");
+		return NULL;
+	}
+	bh = befs_bread_iaddr(sb, run);
+	if (!bh) {
+		befs_error(sb, "BeFS: Error reading block %lu from datastream",
+			   block);
+		return NULL;
+	}
+
+	befs_debug(sb, "<--- befs_read_datastream() read data, starting at %Lu",
+		   pos);
+
+	return bh;
+}
+
+/*
+ * Takes a file position and gives back a brun who's starting block
+ * is block number fblock of the file.
+ * 
+ * Returns BEFS_OK or BEFS_ERR.
+ * 
+ * Calls specialized functions for each of the three possible
+ * datastream regions.
+ *
+ * 2001-11-15 Will Dyson
+ */
+int
+befs_fblock2brun(struct super_block *sb, befs_data_stream * data,
+		 befs_blocknr_t fblock, befs_block_run * run)
+{
+	int err;
+	befs_off_t pos = fblock << BEFS_SB(sb)->block_shift;
+
+	if (pos < data->max_direct_range) {
+		err = befs_find_brun_direct(sb, data, fblock, run);
+
+	} else if (pos < data->max_indirect_range) {
+		err = befs_find_brun_indirect(sb, data, fblock, run);
+
+	} else if (pos < data->max_double_indirect_range) {
+		err = befs_find_brun_dblindirect(sb, data, fblock, run);
+
+	} else {
+		befs_error(sb,
+			   "befs_fblock2brun() was asked to find block %lu, "
+			   "which is not mapped by the datastream\n", fblock);
+		err = BEFS_ERR;
+	}
+	return err;
+}
+
+/**
+ * befs_read_lsmylink - read long symlink from datastream.
+ * @sb: Filesystem superblock 
+ * @ds: Datastrem to read from
+ * @buf: Buffer in wich to place long symlink data
+ * @len: Length of the long symlink in bytes
+ *
+ * Returns the number of bytes read
+ */
+size_t
+befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff,
+		   befs_off_t len)
+{
+	befs_off_t bytes_read = 0;	/* bytes readed */
+	u16 plen;
+	struct buffer_head *bh = NULL;
+	befs_debug(sb, "---> befs_read_lsymlink() length: %Lu", len);
+
+	while (bytes_read < len) {
+		bh = befs_read_datastream(sb, ds, bytes_read, NULL);
+		if (!bh) {
+			befs_error(sb, "BeFS: Error reading datastream block "
+				   "starting from %Lu", bytes_read);
+			befs_debug(sb, "<--- befs_read_lsymlink() ERROR");
+			return bytes_read;
+
+		}
+		plen = ((bytes_read + BEFS_SB(sb)->block_size) < len) ?
+		    BEFS_SB(sb)->block_size : len - bytes_read;
+		memcpy(buff + bytes_read, bh->b_data, plen);
+		brelse(bh);
+		bytes_read += plen;
+	}
+
+	befs_debug(sb, "<--- befs_read_lsymlink() read %u bytes", bytes_read);
+	return bytes_read;
+}
+
+/**
+ * befs_count_blocks - blocks used by a file
+ * @sb: Filesystem superblock
+ * @ds: Datastream of the file
+ *
+ * Counts the number of fs blocks that the file represented by
+ * inode occupies on the filesystem, counting both regular file
+ * data and filesystem metadata (and eventually attribute data
+ * when we support attributes)
+*/
+
+befs_blocknr_t
+befs_count_blocks(struct super_block * sb, befs_data_stream * ds)
+{
+	befs_blocknr_t blocks;
+	befs_blocknr_t datablocks;	/* File data blocks */
+	befs_blocknr_t metablocks;	/* FS metadata blocks */
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+
+	befs_debug(sb, "---> befs_count_blocks()");
+
+	datablocks = ds->size >> befs_sb->block_shift;
+	if (ds->size & (befs_sb->block_size - 1))
+		datablocks += 1;
+
+	metablocks = 1;		/* Start with 1 block for inode */
+
+	/* Size of indirect block */
+	if (ds->size > ds->max_direct_range)
+		metablocks += ds->indirect.len;
+
+	/*
+	   Double indir block, plus all the indirect blocks it mapps
+	   In the double-indirect range, all block runs of data are
+	   BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know 
+	   how many data block runs are in the double-indirect region,
+	   and from that we know how many indirect blocks it takes to
+	   map them. We assume that the indirect blocks are also
+	   BEFS_DBLINDIR_BRUN_LEN blocks long.
+	 */
+	if (ds->size > ds->max_indirect_range && ds->max_indirect_range != 0) {
+		uint dbl_bytes;
+		uint dbl_bruns;
+		uint indirblocks;
+
+		dbl_bytes =
+		    ds->max_double_indirect_range - ds->max_indirect_range;
+		dbl_bruns =
+		    dbl_bytes / (befs_sb->block_size * BEFS_DBLINDIR_BRUN_LEN);
+		indirblocks = dbl_bruns / befs_iaddrs_per_block(sb);
+
+		metablocks += ds->double_indirect.len;
+		metablocks += indirblocks;
+	}
+
+	blocks = datablocks + metablocks;
+	befs_debug(sb, "<--- befs_count_blocks() %u blocks", blocks);
+
+	return blocks;
+}
+
+/*
+	Finds the block run that starts at file block number blockno
+	in the file represented by the datastream data, if that 
+	blockno is in the direct region of the datastream.
+	
+	sb: the superblock
+	data: the datastream
+	blockno: the blocknumber to find
+	run: The found run is passed back through this pointer
+	
+	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+	otherwise.
+	
+	Algorithm:
+	Linear search. Checks each element of array[] to see if it
+	contains the blockno-th filesystem block. This is necessary
+	because the block runs map variable amounts of data. Simply
+	keeps a count of the number of blocks searched so far (sum),
+	incrementing this by the length of each block run as we come
+	across it. Adds sum to *count before returning (this is so
+	you can search multiple arrays that are logicaly one array,
+	as in the indirect region code).
+	
+	When/if blockno is found, if blockno is inside of a block 
+	run as stored on disk, we offset the start and lenght members 
+	of the block run, so that blockno is the start and len is
+	still valid (the run ends in the same place).
+	
+	2001-11-15 Will Dyson
+*/
+static int
+befs_find_brun_direct(struct super_block *sb, befs_data_stream * data,
+		      befs_blocknr_t blockno, befs_block_run * run)
+{
+	int i;
+	befs_block_run *array = data->direct;
+	befs_blocknr_t sum;
+	befs_blocknr_t max_block =
+	    data->max_direct_range >> BEFS_SB(sb)->block_shift;
+
+	befs_debug(sb, "---> befs_find_brun_direct(), find %lu", blockno);
+
+	if (blockno > max_block) {
+		befs_error(sb, "befs_find_brun_direct() passed block outside of"
+			   "direct region");
+		return BEFS_ERR;
+	}
+
+	for (i = 0, sum = 0; i < BEFS_NUM_DIRECT_BLOCKS;
+	     sum += array[i].len, i++) {
+		if (blockno >= sum && blockno < sum + (array[i].len)) {
+			int offset = blockno - sum;
+			run->allocation_group = array[i].allocation_group;
+			run->start = array[i].start + offset;
+			run->len = array[i].len - offset;
+
+			befs_debug(sb, "---> befs_find_brun_direct(), "
+				   "found %lu at direct[%d]", blockno, i);
+			return BEFS_OK;
+		}
+	}
+
+	befs_debug(sb, "---> befs_find_brun_direct() ERROR");
+	return BEFS_ERR;
+}
+
+/*
+	Finds the block run that starts at file block number blockno
+	in the file represented by the datastream data, if that 
+	blockno is in the indirect region of the datastream.
+	
+	sb: the superblock
+	data: the datastream
+	blockno: the blocknumber to find
+	run: The found run is passed back through this pointer
+	
+	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+	otherwise.
+	
+	Algorithm:
+	For each block in the indirect run of the datastream, read
+	it in and search through it for	search_blk.
+	
+	XXX:
+	Really should check to make sure blockno is inside indirect
+	region.
+	
+	2001-11-15 Will Dyson
+*/
+static int
+befs_find_brun_indirect(struct super_block *sb,
+			befs_data_stream * data, befs_blocknr_t blockno,
+			befs_block_run * run)
+{
+	int i, j;
+	befs_blocknr_t sum = 0;
+	befs_blocknr_t indir_start_blk;
+	befs_blocknr_t search_blk;
+	struct buffer_head *indirblock;
+	befs_block_run *array;
+
+	befs_block_run indirect = data->indirect;
+	befs_blocknr_t indirblockno = iaddr2blockno(sb, &indirect);
+	int arraylen = befs_iaddrs_per_block(sb);
+
+	befs_debug(sb, "---> befs_find_brun_indirect(), find %lu", blockno);
+
+	indir_start_blk = data->max_direct_range >> BEFS_SB(sb)->block_shift;
+	search_blk = blockno - indir_start_blk;
+
+	/* Examine blocks of the indirect run one at a time */
+	for (i = 0; i < indirect.len; i++) {
+		indirblock = befs_bread(sb, indirblockno + i);
+		if (indirblock == NULL) {
+			befs_debug(sb,
+				   "---> befs_find_brun_indirect() failed to "
+				   "read disk block %lu from the indirect brun",
+				   indirblockno + i);
+			return BEFS_ERR;
+		}
+
+		array = (befs_block_run *) indirblock->b_data;
+
+		for (j = 0; j < arraylen; ++j) {
+			int len = fs16_to_cpu(sb, array[j].len);
+
+			if (search_blk >= sum && search_blk < sum + len) {
+				int offset = search_blk - sum;
+				run->allocation_group =
+				    fs32_to_cpu(sb, array[j].allocation_group);
+				run->start =
+				    fs16_to_cpu(sb, array[j].start) + offset;
+				run->len =
+				    fs16_to_cpu(sb, array[j].len) - offset;
+
+				brelse(indirblock);
+				befs_debug(sb,
+					   "<--- befs_find_brun_indirect() found "
+					   "file block %lu at indirect[%d]",
+					   blockno, j + (i * arraylen));
+				return BEFS_OK;
+			}
+			sum += len;
+		}
+
+		brelse(indirblock);
+	}
+
+	/* Only fallthrough is an error */
+	befs_error(sb, "BeFS: befs_find_brun_indirect() failed to find "
+		   "file block %lu", blockno);
+
+	befs_debug(sb, "<--- befs_find_brun_indirect() ERROR");
+	return BEFS_ERR;
+}
+
+/*
+	Finds the block run that starts at file block number blockno
+	in the file represented by the datastream data, if that 
+	blockno is in the double-indirect region of the datastream.
+	
+	sb: the superblock
+	data: the datastream
+	blockno: the blocknumber to find
+	run: The found run is passed back through this pointer
+	
+	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+	otherwise.
+	
+	Algorithm:
+	The block runs in the double-indirect region are different.
+	They are always allocated 4 fs blocks at a time, so each
+	block run maps a constant amount of file data. This means
+	that we can directly calculate how many block runs into the
+	double-indirect region we need to go to get to the one that
+	maps a particular filesystem block.
+	
+	We do this in two stages. First we calculate which of the
+	inode addresses in the double-indirect block will point us
+	to the indirect block that contains the mapping for the data,
+	then we calculate which of the inode addresses in that 
+	indirect block maps the data block we are after.
+	
+	Oh, and once we've done that, we actually read in the blocks 
+	that contain the inode addresses we calculated above. Even 
+	though the double-indirect run may be several blocks long, 
+	we can calculate which of those blocks will contain the index
+	we are after and only read that one. We then follow it to 
+	the indirect block and perform a  similar process to find
+	the actual block run that maps the data block we are interested
+	in.
+	
+	Then we offset the run as in befs_find_brun_array() and we are 
+	done.
+	
+	2001-11-15 Will Dyson
+*/
+static int
+befs_find_brun_dblindirect(struct super_block *sb,
+			   befs_data_stream * data, befs_blocknr_t blockno,
+			   befs_block_run * run)
+{
+	int dblindir_indx;
+	int indir_indx;
+	int offset;
+	int dbl_which_block;
+	int which_block;
+	int dbl_block_indx;
+	int block_indx;
+	off_t dblindir_leftover;
+	befs_blocknr_t blockno_at_run_start;
+	struct buffer_head *dbl_indir_block;
+	struct buffer_head *indir_block;
+	befs_block_run indir_run;
+	befs_inode_addr *iaddr_array = NULL;
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+
+	befs_blocknr_t indir_start_blk =
+	    data->max_indirect_range >> befs_sb->block_shift;
+
+	off_t dbl_indir_off = blockno - indir_start_blk;
+
+	/* number of data blocks mapped by each of the iaddrs in
+	 * the indirect block pointed to by the double indirect block
+	 */
+	size_t iblklen = BEFS_DBLINDIR_BRUN_LEN;
+
+	/* number of data blocks mapped by each of the iaddrs in
+	 * the double indirect block
+	 */
+	size_t diblklen = iblklen * befs_iaddrs_per_block(sb)
+	    * BEFS_DBLINDIR_BRUN_LEN;
+
+	befs_debug(sb, "---> befs_find_brun_dblindirect() find %lu", blockno);
+
+	/* First, discover which of the double_indir->indir blocks
+	 * contains pos. Then figure out how much of pos that
+	 * accounted for. Then discover which of the iaddrs in
+	 * the indirect block contains pos.
+	 */
+
+	dblindir_indx = dbl_indir_off / diblklen;
+	dblindir_leftover = dbl_indir_off % diblklen;
+	indir_indx = dblindir_leftover / diblklen;
+
+	/* Read double indirect block */
+	dbl_which_block = dblindir_indx / befs_iaddrs_per_block(sb);
+	if (dbl_which_block > data->double_indirect.len) {
+		befs_error(sb, "The double-indirect index calculated by "
+			   "befs_read_brun_dblindirect(), %d, is outside the range "
+			   "of the double-indirect block", dblindir_indx);
+		return BEFS_ERR;
+	}
+
+	dbl_indir_block =
+	    befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) +
+					dbl_which_block);
+	if (dbl_indir_block == NULL) {
+		befs_error(sb, "befs_read_brun_dblindirect() couldn't read the "
+			   "double-indirect block at blockno %lu",
+			   iaddr2blockno(sb,
+					 &data->double_indirect) +
+			   dbl_which_block);
+		brelse(dbl_indir_block);
+		return BEFS_ERR;
+	}
+
+	dbl_block_indx =
+	    dblindir_indx - (dbl_which_block * befs_iaddrs_per_block(sb));
+	iaddr_array = (befs_inode_addr *) dbl_indir_block->b_data;
+	indir_run = fsrun_to_cpu(sb, iaddr_array[dbl_block_indx]);
+	brelse(dbl_indir_block);
+	iaddr_array = NULL;
+
+	/* Read indirect block */
+	which_block = indir_indx / befs_iaddrs_per_block(sb);
+	if (which_block > indir_run.len) {
+		befs_error(sb, "The indirect index calculated by "
+			   "befs_read_brun_dblindirect(), %d, is outside the range "
+			   "of the indirect block", indir_indx);
+		return BEFS_ERR;
+	}
+
+	indir_block =
+	    befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block);
+	if (indir_block == NULL) {
+		befs_error(sb, "befs_read_brun_dblindirect() couldn't read the "
+			   "indirect block at blockno %lu",
+			   iaddr2blockno(sb, &indir_run) + which_block);
+		brelse(indir_block);
+		return BEFS_ERR;
+	}
+
+	block_indx = indir_indx - (which_block * befs_iaddrs_per_block(sb));
+	iaddr_array = (befs_inode_addr *) indir_block->b_data;
+	*run = fsrun_to_cpu(sb, iaddr_array[block_indx]);
+	brelse(indir_block);
+	iaddr_array = NULL;
+
+	blockno_at_run_start = indir_start_blk;
+	blockno_at_run_start += diblklen * dblindir_indx;
+	blockno_at_run_start += iblklen * indir_indx;
+	offset = blockno - blockno_at_run_start;
+
+	run->start += offset;
+	run->len -= offset;
+
+	befs_debug(sb, "Found file block %lu in double_indirect[%d][%d],"
+		   " double_indirect_leftover = %lu",
+		   blockno, dblindir_indx, indir_indx, dblindir_leftover);
+
+	return BEFS_OK;
+}
diff --git a/fs/befs/datastream.h b/fs/befs/datastream.h
new file mode 100644
index 0000000..45e8a3c
--- /dev/null
+++ b/fs/befs/datastream.h
@@ -0,0 +1,19 @@
+/*
+ * datastream.h
+ *
+ */
+
+struct buffer_head *befs_read_datastream(struct super_block *sb,
+					 befs_data_stream * ds, befs_off_t pos,
+					 uint * off);
+
+int befs_fblock2brun(struct super_block *sb, befs_data_stream * data,
+		     befs_blocknr_t fblock, befs_block_run * run);
+
+size_t befs_read_lsymlink(struct super_block *sb, befs_data_stream * data,
+			  void *buff, befs_off_t len);
+
+befs_blocknr_t befs_count_blocks(struct super_block *sb, befs_data_stream * ds);
+
+extern const befs_inode_addr BAD_IADDR;
+
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
new file mode 100644
index 0000000..875cc0a
--- /dev/null
+++ b/fs/befs/debug.c
@@ -0,0 +1,283 @@
+/*
+ *  linux/fs/befs/debug.c
+ * 
+ * Copyright (C) 2001 Will Dyson (will_dyson at pobox.com)
+ *
+ * With help from the ntfs-tng driver by Anton Altparmakov
+ *
+ * Copyright (C) 1999  Makoto Kato (m_kato@ga2.so-net.ne.jp)
+ *
+ * debug functions
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+#endif				/* __KERNEL__ */
+
+#include "befs.h"
+#include "endian.h"
+
+#define ERRBUFSIZE 1024
+
+void
+befs_error(const struct super_block *sb, const char *fmt, ...)
+{
+	va_list args;
+	char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL);
+	if (err_buf == NULL) {
+		printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
+		return;
+	}
+
+	va_start(args, fmt);
+	vsnprintf(err_buf, ERRBUFSIZE, fmt, args);
+	va_end(args);
+
+	printk(KERN_ERR "BeFS(%s): %s\n", sb->s_id, err_buf);
+	kfree(err_buf);
+}
+
+void
+befs_warning(const struct super_block *sb, const char *fmt, ...)
+{
+	va_list args;
+	char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL);
+	if (err_buf == NULL) {
+		printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
+		return;
+	}
+
+	va_start(args, fmt);
+	vsnprintf(err_buf, ERRBUFSIZE, fmt, args);
+	va_end(args);
+
+	printk(KERN_WARNING "BeFS(%s): %s\n", sb->s_id, err_buf);
+
+	kfree(err_buf);
+}
+
+void
+befs_debug(const struct super_block *sb, const char *fmt, ...)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	va_list args;
+	char *err_buf = NULL;
+
+	if (BEFS_SB(sb)->mount_opts.debug) {
+		err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL);
+		if (err_buf == NULL) {
+			printk(KERN_ERR "could not allocate %d bytes\n",
+				ERRBUFSIZE);
+			return;
+		}
+
+		va_start(args, fmt);
+		vsnprintf(err_buf, ERRBUFSIZE, fmt, args);
+		va_end(args);
+
+		printk(KERN_DEBUG "BeFS(%s): %s\n", sb->s_id, err_buf);
+
+		kfree(err_buf);
+	}
+
+#endif				//CONFIG_BEFS_DEBUG
+}
+
+void
+befs_dump_inode(const struct super_block *sb, befs_inode * inode)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	befs_block_run tmp_run;
+
+	befs_debug(sb, "befs_inode information");
+
+	befs_debug(sb, "  magic1 %08x", fs32_to_cpu(sb, inode->magic1));
+
+	tmp_run = fsrun_to_cpu(sb, inode->inode_num);
+	befs_debug(sb, "  inode_num %u, %hu, %hu",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+	befs_debug(sb, "  uid %u", fs32_to_cpu(sb, inode->uid));
+	befs_debug(sb, "  gid %u", fs32_to_cpu(sb, inode->gid));
+	befs_debug(sb, "  mode %08x", fs32_to_cpu(sb, inode->mode));
+	befs_debug(sb, "  flags %08x", fs32_to_cpu(sb, inode->flags));
+	befs_debug(sb, "  create_time %Lu",
+		   fs64_to_cpu(sb, inode->create_time));
+	befs_debug(sb, "  last_modified_time %Lu",
+		   fs64_to_cpu(sb, inode->last_modified_time));
+
+	tmp_run = fsrun_to_cpu(sb, inode->parent);
+	befs_debug(sb, "  parent [%u, %hu, %hu]",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+	tmp_run = fsrun_to_cpu(sb, inode->attributes);
+	befs_debug(sb, "  attributes [%u, %hu, %hu]",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+	befs_debug(sb, "  type %08x", fs32_to_cpu(sb, inode->type));
+	befs_debug(sb, "  inode_size %u", fs32_to_cpu(sb, inode->inode_size));
+
+	if (S_ISLNK(inode->mode)) {
+		befs_debug(sb, "  Symbolic link [%s]", inode->data.symlink);
+	} else {
+		int i;
+
+		for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; i++) {
+			tmp_run =
+			    fsrun_to_cpu(sb, inode->data.datastream.direct[i]);
+			befs_debug(sb, "  direct %d [%u, %hu, %hu]", i,
+				   tmp_run.allocation_group, tmp_run.start,
+				   tmp_run.len);
+		}
+		befs_debug(sb, "  max_direct_range %Lu",
+			   fs64_to_cpu(sb,
+				       inode->data.datastream.
+				       max_direct_range));
+
+		tmp_run = fsrun_to_cpu(sb, inode->data.datastream.indirect);
+		befs_debug(sb, "  indirect [%u, %hu, %hu]",
+			   tmp_run.allocation_group,
+			   tmp_run.start, tmp_run.len);
+
+		befs_debug(sb, "  max_indirect_range %Lu",
+			   fs64_to_cpu(sb,
+				       inode->data.datastream.
+				       max_indirect_range));
+
+		tmp_run =
+		    fsrun_to_cpu(sb, inode->data.datastream.double_indirect);
+		befs_debug(sb, "  double indirect [%u, %hu, %hu]",
+			   tmp_run.allocation_group, tmp_run.start,
+			   tmp_run.len);
+
+		befs_debug(sb, "  max_double_indirect_range %Lu",
+			   fs64_to_cpu(sb,
+				       inode->data.datastream.
+				       max_double_indirect_range));
+
+		befs_debug(sb, "  size %Lu",
+			   fs64_to_cpu(sb, inode->data.datastream.size));
+	}
+
+#endif				//CONFIG_BEFS_DEBUG
+}
+
+/*
+ * Display super block structure for debug.
+ */
+
+void
+befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	befs_block_run tmp_run;
+
+	befs_debug(sb, "befs_super_block information");
+
+	befs_debug(sb, "  name %s", sup->name);
+	befs_debug(sb, "  magic1 %08x", fs32_to_cpu(sb, sup->magic1));
+	befs_debug(sb, "  fs_byte_order %08x",
+		   fs32_to_cpu(sb, sup->fs_byte_order));
+
+	befs_debug(sb, "  block_size %u", fs32_to_cpu(sb, sup->block_size));
+	befs_debug(sb, "  block_shift %u", fs32_to_cpu(sb, sup->block_shift));
+
+	befs_debug(sb, "  num_blocks %Lu", fs64_to_cpu(sb, sup->num_blocks));
+	befs_debug(sb, "  used_blocks %Lu", fs64_to_cpu(sb, sup->used_blocks));
+
+	befs_debug(sb, "  magic2 %08x", fs32_to_cpu(sb, sup->magic2));
+	befs_debug(sb, "  blocks_per_ag %u",
+		   fs32_to_cpu(sb, sup->blocks_per_ag));
+	befs_debug(sb, "  ag_shift %u", fs32_to_cpu(sb, sup->ag_shift));
+	befs_debug(sb, "  num_ags %u", fs32_to_cpu(sb, sup->num_ags));
+
+	befs_debug(sb, "  flags %08x", fs32_to_cpu(sb, sup->flags));
+
+	tmp_run = fsrun_to_cpu(sb, sup->log_blocks);
+	befs_debug(sb, "  log_blocks %u, %hu, %hu",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+	befs_debug(sb, "  log_start %Ld", fs64_to_cpu(sb, sup->log_start));
+	befs_debug(sb, "  log_end %Ld", fs64_to_cpu(sb, sup->log_end));
+
+	befs_debug(sb, "  magic3 %08x", fs32_to_cpu(sb, sup->magic3));
+
+	tmp_run = fsrun_to_cpu(sb, sup->root_dir);
+	befs_debug(sb, "  root_dir %u, %hu, %hu",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+	tmp_run = fsrun_to_cpu(sb, sup->indices);
+	befs_debug(sb, "  indices %u, %hu, %hu",
+		   tmp_run.allocation_group, tmp_run.start, tmp_run.len);
+
+#endif				//CONFIG_BEFS_DEBUG
+}
+
+#if 0
+/* unused */
+void
+befs_dump_small_data(const struct super_block *sb, befs_small_data * sd)
+{
+}
+
+/* unused */
+void
+befs_dump_run(const struct super_block *sb, befs_block_run run)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	run = fsrun_to_cpu(sb, run);
+
+	befs_debug(sb, "[%u, %hu, %hu]",
+		   run.allocation_group, run.start, run.len);
+
+#endif				//CONFIG_BEFS_DEBUG
+}
+#endif  /*  0  */
+
+void
+befs_dump_index_entry(const struct super_block *sb, befs_btree_super * super)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	befs_debug(sb, "Btree super structure");
+	befs_debug(sb, "  magic %08x", fs32_to_cpu(sb, super->magic));
+	befs_debug(sb, "  node_size %u", fs32_to_cpu(sb, super->node_size));
+	befs_debug(sb, "  max_depth %08x", fs32_to_cpu(sb, super->max_depth));
+
+	befs_debug(sb, "  data_type %08x", fs32_to_cpu(sb, super->data_type));
+	befs_debug(sb, "  root_node_pointer %016LX",
+		   fs64_to_cpu(sb, super->root_node_ptr));
+	befs_debug(sb, "  free_node_pointer %016LX",
+		   fs64_to_cpu(sb, super->free_node_ptr));
+	befs_debug(sb, "  maximum size %016LX",
+		   fs64_to_cpu(sb, super->max_size));
+
+#endif				//CONFIG_BEFS_DEBUG
+}
+
+void
+befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead * node)
+{
+#ifdef CONFIG_BEFS_DEBUG
+
+	befs_debug(sb, "Btree node structure");
+	befs_debug(sb, "  left %016LX", fs64_to_cpu(sb, node->left));
+	befs_debug(sb, "  right %016LX", fs64_to_cpu(sb, node->right));
+	befs_debug(sb, "  overflow %016LX", fs64_to_cpu(sb, node->overflow));
+	befs_debug(sb, "  all_key_count %hu",
+		   fs16_to_cpu(sb, node->all_key_count));
+	befs_debug(sb, "  all_key_length %hu",
+		   fs16_to_cpu(sb, node->all_key_length));
+
+#endif				//CONFIG_BEFS_DEBUG
+}
diff --git a/fs/befs/endian.h b/fs/befs/endian.h
new file mode 100644
index 0000000..9ecaea4
--- /dev/null
+++ b/fs/befs/endian.h
@@ -0,0 +1,126 @@
+/*
+ * linux/fs/befs/endian.h
+ *
+ * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
+ *
+ * Partially based on similar funtions in the sysv driver.
+ */
+
+#ifndef LINUX_BEFS_ENDIAN
+#define LINUX_BEFS_ENDIAN
+
+#include <linux/byteorder/generic.h>
+#include "befs.h"
+
+static inline u64
+fs64_to_cpu(const struct super_block *sb, u64 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return le64_to_cpu(n);
+	else
+		return be64_to_cpu(n);
+}
+
+static inline u64
+cpu_to_fs64(const struct super_block *sb, u64 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return cpu_to_le64(n);
+	else
+		return cpu_to_be64(n);
+}
+
+static inline u32
+fs32_to_cpu(const struct super_block *sb, u32 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return le32_to_cpu(n);
+	else
+		return be32_to_cpu(n);
+}
+
+static inline u32
+cpu_to_fs32(const struct super_block *sb, u32 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return cpu_to_le32(n);
+	else
+		return cpu_to_be32(n);
+}
+
+static inline u16
+fs16_to_cpu(const struct super_block *sb, u16 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return le16_to_cpu(n);
+	else
+		return be16_to_cpu(n);
+}
+
+static inline u16
+cpu_to_fs16(const struct super_block *sb, u16 n)
+{
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+		return cpu_to_le16(n);
+	else
+		return cpu_to_be16(n);
+}
+
+/* Composite types below here */
+
+static inline befs_block_run
+fsrun_to_cpu(const struct super_block *sb, befs_block_run n)
+{
+	befs_block_run run;
+
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
+		run.allocation_group = le32_to_cpu(n.allocation_group);
+		run.start = le16_to_cpu(n.start);
+		run.len = le16_to_cpu(n.len);
+	} else {
+		run.allocation_group = be32_to_cpu(n.allocation_group);
+		run.start = be16_to_cpu(n.start);
+		run.len = be16_to_cpu(n.len);
+	}
+	return run;
+}
+
+static inline befs_block_run
+cpu_to_fsrun(const struct super_block *sb, befs_block_run n)
+{
+	befs_block_run run;
+
+	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
+		run.allocation_group = cpu_to_le32(n.allocation_group);
+		run.start = cpu_to_le16(n.start);
+		run.len = cpu_to_le16(n.len);
+	} else {
+		run.allocation_group = cpu_to_be32(n.allocation_group);
+		run.start = cpu_to_be16(n.start);
+		run.len = cpu_to_be16(n.len);
+	}
+	return run;
+}
+
+static inline befs_data_stream
+fsds_to_cpu(const struct super_block *sb, befs_data_stream n)
+{
+	befs_data_stream data;
+	int i;
+
+	for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
+		data.direct[i] = fsrun_to_cpu(sb, n.direct[i]);
+
+	data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range);
+	data.indirect = fsrun_to_cpu(sb, n.indirect);
+	data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range);
+	data.double_indirect = fsrun_to_cpu(sb, n.double_indirect);
+	data.max_double_indirect_range = fs64_to_cpu(sb,
+						     n.
+						     max_double_indirect_range);
+	data.size = fs64_to_cpu(sb, n.size);
+
+	return data;
+}
+
+#endif				//LINUX_BEFS_ENDIAN
diff --git a/fs/befs/inode.c b/fs/befs/inode.c
new file mode 100644
index 0000000..d41c924
--- /dev/null
+++ b/fs/befs/inode.c
@@ -0,0 +1,53 @@
+/*
+ * inode.c
+ * 
+ * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
+ */
+
+#include <linux/fs.h>
+
+#include "befs.h"
+#include "inode.h"
+#include "endian.h"
+
+/*
+	Validates the correctness of the befs inode
+	Returns BEFS_OK if the inode should be used, otherwise
+	returns BEFS_BAD_INODE
+*/
+int
+befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+		 befs_blocknr_t inode)
+{
+	u32 magic1 = fs32_to_cpu(sb, raw_inode->magic1);
+	befs_inode_addr ino_num = fsrun_to_cpu(sb, raw_inode->inode_num);
+	u32 flags = fs32_to_cpu(sb, raw_inode->flags);
+
+	/* check magic header. */
+	if (magic1 != BEFS_INODE_MAGIC1) {
+		befs_error(sb,
+			   "Inode has a bad magic header - inode = %lu", inode);
+		return BEFS_BAD_INODE;
+	}
+
+	/*
+	 * Sanity check2: inodes store their own block address. Check it.
+	 */
+	if (inode != iaddr2blockno(sb, &ino_num)) {
+		befs_error(sb, "inode blocknr field disagrees with vfs "
+			   "VFS: %lu, Inode %lu",
+			   inode, iaddr2blockno(sb, &ino_num));
+		return BEFS_BAD_INODE;
+	}
+
+	/*
+	 * check flag
+	 */
+
+	if (!(flags & BEFS_INODE_IN_USE)) {
+		befs_error(sb, "inode is not used - inode = %lu", inode);
+		return BEFS_BAD_INODE;
+	}
+
+	return BEFS_OK;
+}
diff --git a/fs/befs/inode.h b/fs/befs/inode.h
new file mode 100644
index 0000000..9dc7fd9
--- /dev/null
+++ b/fs/befs/inode.h
@@ -0,0 +1,8 @@
+/*
+ * inode.h
+ * 
+ */
+
+int befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+		     befs_blocknr_t inode);
+
diff --git a/fs/befs/io.c b/fs/befs/io.c
new file mode 100644
index 0000000..ddef98a
--- /dev/null
+++ b/fs/befs/io.c
@@ -0,0 +1,83 @@
+/*
+ * linux/fs/befs/io.c
+ *
+ * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
+ *
+ * Based on portions of file.c and inode.c 
+ * by Makoto Kato (m_kato@ga2.so-net.ne.jp)
+ *
+ * Many thanks to Dominic Giampaolo, author of Practical File System
+ * Design with the Be File System, for such a helpful book.
+ *
+ */
+
+#include <linux/buffer_head.h>
+
+#include "befs.h"
+#include "io.h"
+
+/*
+ * Converts befs notion of disk addr to a disk offset and uses
+ * linux kernel function sb_bread() to get the buffer containing
+ * the offset. -Will Dyson
+ *
+ */
+
+struct buffer_head *
+befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
+{
+	struct buffer_head *bh = NULL;
+	befs_blocknr_t block = 0;
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+
+	befs_debug(sb, "---> Enter befs_read_iaddr() "
+		   "[%u, %hu, %hu]",
+		   iaddr.allocation_group, iaddr.start, iaddr.len);
+
+	if (iaddr.allocation_group > befs_sb->num_ags) {
+		befs_error(sb, "BEFS: Invalid allocation group %u, max is %u",
+			   iaddr.allocation_group, befs_sb->num_ags);
+		goto error;
+	}
+
+	block = iaddr2blockno(sb, &iaddr);
+
+	befs_debug(sb, "befs_read_iaddr: offset = %lu", block);
+
+	bh = sb_bread(sb, block);
+
+	if (bh == NULL) {
+		befs_error(sb, "Failed to read block %lu", block);
+		goto error;
+	}
+
+	befs_debug(sb, "<--- befs_read_iaddr()");
+	return bh;
+
+      error:
+	befs_debug(sb, "<--- befs_read_iaddr() ERROR");
+	return NULL;
+}
+
+struct buffer_head *
+befs_bread(struct super_block *sb, befs_blocknr_t block)
+{
+	struct buffer_head *bh = NULL;
+
+	befs_debug(sb, "---> Enter befs_read() %Lu", block);
+
+	bh = sb_bread(sb, block);
+
+	if (bh == NULL) {
+		befs_error(sb, "Failed to read block %lu", block);
+		goto error;
+	}
+
+	befs_debug(sb, "<--- befs_read()");
+
+	return bh;
+
+      error:
+	befs_debug(sb, "<--- befs_read() ERROR");
+	return NULL;
+}
diff --git a/fs/befs/io.h b/fs/befs/io.h
new file mode 100644
index 0000000..9b78266
--- /dev/null
+++ b/fs/befs/io.h
@@ -0,0 +1,9 @@
+/*
+ * io.h
+ */
+
+struct buffer_head *befs_bread_iaddr(struct super_block *sb,
+				     befs_inode_addr iaddr);
+
+struct buffer_head *befs_bread(struct super_block *sb, befs_blocknr_t block);
+
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
new file mode 100644
index 0000000..de5bb28
--- /dev/null
+++ b/fs/befs/linuxvfs.c
@@ -0,0 +1,964 @@
+/*
+ * linux/fs/befs/linuxvfs.c
+ *
+ * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/nls.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <linux/namei.h>
+
+#include "befs.h"
+#include "btree.h"
+#include "inode.h"
+#include "datastream.h"
+#include "super.h"
+#include "io.h"
+#include "endian.h"
+
+MODULE_DESCRIPTION("BeOS File System (BeFS) driver");
+MODULE_AUTHOR("Will Dyson");
+MODULE_LICENSE("GPL");
+
+/* The units the vfs expects inode->i_blocks to be in */
+#define VFS_BLOCK_SIZE 512
+
+static int befs_readdir(struct file *, void *, filldir_t);
+static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+static int befs_readpage(struct file *file, struct page *page);
+static sector_t befs_bmap(struct address_space *mapping, sector_t block);
+static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static void befs_read_inode(struct inode *ino);
+static struct inode *befs_alloc_inode(struct super_block *sb);
+static void befs_destroy_inode(struct inode *inode);
+static int befs_init_inodecache(void);
+static void befs_destroy_inodecache(void);
+static int befs_follow_link(struct dentry *, struct nameidata *);
+static void befs_put_link(struct dentry *, struct nameidata *);
+static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
+			char **out, int *out_len);
+static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
+			char **out, int *out_len);
+static void befs_put_super(struct super_block *);
+static int befs_remount(struct super_block *, int *, char *);
+static int befs_statfs(struct super_block *, struct kstatfs *);
+static int parse_options(char *, befs_mount_options *);
+
+static const struct super_operations befs_sops = {
+	.read_inode	= befs_read_inode,	/* initialize & read inode */
+	.alloc_inode	= befs_alloc_inode,	/* allocate a new inode */
+	.destroy_inode	= befs_destroy_inode, /* deallocate an inode */
+	.put_super	= befs_put_super,	/* uninit super */
+	.statfs		= befs_statfs,	/* statfs */
+	.remount_fs	= befs_remount,
+};
+
+/* slab cache for befs_inode_info objects */
+static kmem_cache_t *befs_inode_cachep;
+
+static struct file_operations befs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= befs_readdir,
+};
+
+static struct inode_operations befs_dir_inode_operations = {
+	.lookup		= befs_lookup,
+};
+
+static struct file_operations befs_file_operations = {
+	.llseek		= default_llseek,
+	.read		= generic_file_read,
+	.mmap		= generic_file_readonly_mmap,
+};
+
+static struct address_space_operations befs_aops = {
+	.readpage	= befs_readpage,
+	.sync_page	= block_sync_page,
+	.bmap		= befs_bmap,
+};
+
+static struct inode_operations befs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= befs_follow_link,
+	.put_link	= befs_put_link,
+};
+
+/* 
+ * Called by generic_file_read() to read a page of data
+ * 
+ * In turn, simply calls a generic block read function and
+ * passes it the address of befs_get_block, for mapping file
+ * positions to disk blocks.
+ */
+static int
+befs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, befs_get_block);
+}
+
+static sector_t
+befs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, befs_get_block);
+}
+
+/* 
+ * Generic function to map a file position (block) to a 
+ * disk offset (passed back in bh_result).
+ *
+ * Used by many higher level functions.
+ *
+ * Calls befs_fblock2brun() in datastream.c to do the real work.
+ *
+ * -WD 10-26-01
+ */
+
+static int
+befs_get_block(struct inode *inode, sector_t block,
+	       struct buffer_head *bh_result, int create)
+{
+	struct super_block *sb = inode->i_sb;
+	befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
+	befs_block_run run = BAD_IADDR;
+	int res = 0;
+	ulong disk_off;
+
+	befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld",
+		   inode->i_ino, block);
+
+	if (block < 0) {
+		befs_error(sb, "befs_get_block() was asked for a block "
+			   "number less than zero: block %ld in inode %lu",
+			   block, inode->i_ino);
+		return -EIO;
+	}
+
+	if (create) {
+		befs_error(sb, "befs_get_block() was asked to write to "
+			   "block %ld in inode %lu", block, inode->i_ino);
+		return -EPERM;
+	}
+
+	res = befs_fblock2brun(sb, ds, block, &run);
+	if (res != BEFS_OK) {
+		befs_error(sb,
+			   "<--- befs_get_block() for inode %lu, block "
+			   "%ld ERROR", inode->i_ino, block);
+		return -EFBIG;
+	}
+
+	disk_off = (ulong) iaddr2blockno(sb, &run);
+
+	map_bh(bh_result, inode->i_sb, disk_off);
+
+	befs_debug(sb, "<--- befs_get_block() for inode %lu, block %ld, "
+		   "disk address %lu", inode->i_ino, block, disk_off);
+
+	return 0;
+}
+
+static struct dentry *
+befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	struct super_block *sb = dir->i_sb;
+	befs_data_stream *ds = &BEFS_I(dir)->i_data.ds;
+	befs_off_t offset;
+	int ret;
+	int utfnamelen;
+	char *utfname;
+	const char *name = dentry->d_name.name;
+
+	befs_debug(sb, "---> befs_lookup() "
+		   "name %s inode %ld", dentry->d_name.name, dir->i_ino);
+
+	/* Convert to UTF-8 */
+	if (BEFS_SB(sb)->nls) {
+		ret =
+		    befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen);
+		if (ret < 0) {
+			befs_debug(sb, "<--- befs_lookup() ERROR");
+			return ERR_PTR(ret);
+		}
+		ret = befs_btree_find(sb, ds, utfname, &offset);
+		kfree(utfname);
+
+	} else {
+		ret = befs_btree_find(sb, ds, dentry->d_name.name, &offset);
+	}
+
+	if (ret == BEFS_BT_NOT_FOUND) {
+		befs_debug(sb, "<--- befs_lookup() %s not found",
+			   dentry->d_name.name);
+		return ERR_PTR(-ENOENT);
+
+	} else if (ret != BEFS_OK || offset == 0) {
+		befs_warning(sb, "<--- befs_lookup() Error");
+		return ERR_PTR(-ENODATA);
+	}
+
+	inode = iget(dir->i_sb, (ino_t) offset);
+	if (!inode)
+		return ERR_PTR(-EACCES);
+
+	d_add(dentry, inode);
+
+	befs_debug(sb, "<--- befs_lookup()");
+
+	return NULL;
+}
+
+static int
+befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
+	befs_off_t value;
+	int result;
+	size_t keysize;
+	unsigned char d_type;
+	char keybuf[BEFS_NAME_LEN + 1];
+	char *nlsname;
+	int nlsnamelen;
+	const char *dirname = filp->f_dentry->d_name.name;
+
+	befs_debug(sb, "---> befs_readdir() "
+		   "name %s, inode %ld, filp->f_pos %Ld",
+		   dirname, inode->i_ino, filp->f_pos);
+
+	result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1,
+				 keybuf, &keysize, &value);
+
+	if (result == BEFS_ERR) {
+		befs_debug(sb, "<--- befs_readdir() ERROR");
+		befs_error(sb, "IO error reading %s (inode %lu)",
+			   dirname, inode->i_ino);
+		return -EIO;
+
+	} else if (result == BEFS_BT_END) {
+		befs_debug(sb, "<--- befs_readdir() END");
+		return 0;
+
+	} else if (result == BEFS_BT_EMPTY) {
+		befs_debug(sb, "<--- befs_readdir() Empty directory");
+		return 0;
+	}
+
+	d_type = DT_UNKNOWN;
+
+	/* Convert to NLS */
+	if (BEFS_SB(sb)->nls) {
+		result =
+		    befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
+		if (result < 0) {
+			befs_debug(sb, "<--- befs_readdir() ERROR");
+			return result;
+		}
+		result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos,
+				 (ino_t) value, d_type);
+		kfree(nlsname);
+
+	} else {
+		result = filldir(dirent, keybuf, keysize, filp->f_pos,
+				 (ino_t) value, d_type);
+	}
+
+	filp->f_pos++;
+
+	befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
+
+	return 0;
+}
+
+static struct inode *
+befs_alloc_inode(struct super_block *sb)
+{
+        struct befs_inode_info *bi;
+        bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
+							SLAB_KERNEL);
+        if (!bi)
+                return NULL;
+        return &bi->vfs_inode;
+}
+
+static void
+befs_destroy_inode(struct inode *inode)
+{
+        kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+        struct befs_inode_info *bi = (struct befs_inode_info *) foo;
+	
+	        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+		            SLAB_CTOR_CONSTRUCTOR) {
+			inode_init_once(&bi->vfs_inode);
+		}
+}
+
+static void
+befs_read_inode(struct inode *inode)
+{
+	struct buffer_head *bh = NULL;
+	befs_inode *raw_inode = NULL;
+
+	struct super_block *sb = inode->i_sb;
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+	befs_inode_info *befs_ino = NULL;
+
+	befs_debug(sb, "---> befs_read_inode() " "inode = %lu", inode->i_ino);
+
+	befs_ino = BEFS_I(inode);
+
+	/* convert from vfs's inode number to befs's inode number */
+	befs_ino->i_inode_num = blockno2iaddr(sb, inode->i_ino);
+
+	befs_debug(sb, "  real inode number [%u, %hu, %hu]",
+		   befs_ino->i_inode_num.allocation_group,
+		   befs_ino->i_inode_num.start, befs_ino->i_inode_num.len);
+
+	bh = befs_bread(sb, inode->i_ino);
+	if (!bh) {
+		befs_error(sb, "unable to read inode block - "
+			   "inode = %lu", inode->i_ino);
+		goto unaquire_none;
+	}
+
+	raw_inode = (befs_inode *) bh->b_data;
+
+	befs_dump_inode(sb, raw_inode);
+
+	if (befs_check_inode(sb, raw_inode, inode->i_ino) != BEFS_OK) {
+		befs_error(sb, "Bad inode: %lu", inode->i_ino);
+		goto unaquire_bh;
+	}
+
+	inode->i_mode = (umode_t) fs32_to_cpu(sb, raw_inode->mode);
+
+	/*
+	 * set uid and gid.  But since current BeOS is single user OS, so
+	 * you can change by "uid" or "gid" options.
+	 */   
+
+	inode->i_uid = befs_sb->mount_opts.use_uid ?
+	    befs_sb->mount_opts.uid : (uid_t) fs32_to_cpu(sb, raw_inode->uid);
+	inode->i_gid = befs_sb->mount_opts.use_gid ?
+	    befs_sb->mount_opts.gid : (gid_t) fs32_to_cpu(sb, raw_inode->gid);
+
+	inode->i_nlink = 1;
+
+	/*
+	 * BEFS's time is 64 bits, but current VFS is 32 bits...
+	 * BEFS don't have access time. Nor inode change time. VFS
+	 * doesn't have creation time.
+	 * Also, the lower 16 bits of the last_modified_time and 
+	 * create_time are just a counter to help ensure uniqueness
+	 * for indexing purposes. (PFD, page 54)
+	 */
+
+	inode->i_mtime.tv_sec =
+	    fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
+	inode->i_mtime.tv_nsec = 0;   /* lower 16 bits are not a time */	
+	inode->i_ctime = inode->i_mtime;
+	inode->i_atime = inode->i_mtime;
+	inode->i_blksize = befs_sb->block_size;
+
+	befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
+	befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
+	befs_ino->i_attribute = fsrun_to_cpu(sb, raw_inode->attributes);
+	befs_ino->i_flags = fs32_to_cpu(sb, raw_inode->flags);
+
+	if (S_ISLNK(inode->i_mode) && !(befs_ino->i_flags & BEFS_LONG_SYMLINK)){
+		inode->i_size = 0;
+		inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE;
+		strncpy(befs_ino->i_data.symlink, raw_inode->data.symlink,
+			BEFS_SYMLINK_LEN);
+	} else {
+		int num_blks;
+
+		befs_ino->i_data.ds =
+		    fsds_to_cpu(sb, raw_inode->data.datastream);
+
+		num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds);
+		inode->i_blocks =
+		    num_blks * (befs_sb->block_size / VFS_BLOCK_SIZE);
+		inode->i_size = befs_ino->i_data.ds.size;
+	}
+
+	inode->i_mapping->a_ops = &befs_aops;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_fop = &befs_file_operations;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &befs_dir_inode_operations;
+		inode->i_fop = &befs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &befs_symlink_inode_operations;
+	} else {
+		befs_error(sb, "Inode %lu is not a regular file, "
+			   "directory or symlink. THAT IS WRONG! BeFS has no "
+			   "on disk special files", inode->i_ino);
+		goto unaquire_bh;
+	}
+
+	brelse(bh);
+	befs_debug(sb, "<--- befs_read_inode()");
+	return;
+
+      unaquire_bh:
+	brelse(bh);
+
+      unaquire_none:
+	make_bad_inode(inode);
+	befs_debug(sb, "<--- befs_read_inode() - Bad inode");
+	return;
+}
+
+/* Initialize the inode cache. Called at fs setup.
+ * 
+ * Taken from NFS implementation by Al Viro.
+ */
+static int
+befs_init_inodecache(void)
+{
+	befs_inode_cachep = kmem_cache_create("befs_inode_cache",
+					      sizeof (struct befs_inode_info),
+					      0, SLAB_RECLAIM_ACCOUNT,
+					      init_once, NULL);
+	if (befs_inode_cachep == NULL) {
+		printk(KERN_ERR "befs_init_inodecache: "
+		       "Couldn't initalize inode slabcache\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* Called at fs teardown.
+ * 
+ * Taken from NFS implementation by Al Viro.
+ */
+static void
+befs_destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(befs_inode_cachep))
+		printk(KERN_ERR "befs_destroy_inodecache: "
+		       "not all structures were freed\n");
+}
+
+/*
+ * The inode of symbolic link is different to data stream.
+ * The data stream become link name. Unless the LONG_SYMLINK
+ * flag is set.
+ */
+static int
+befs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+	char *link;
+
+	if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+		struct super_block *sb = dentry->d_sb;
+		befs_data_stream *data = &befs_ino->i_data.ds;
+		befs_off_t len = data->size;
+
+		befs_debug(sb, "Follow long symlink");
+
+		link = kmalloc(len, GFP_NOFS);
+		if (!link) {
+			link = ERR_PTR(-ENOMEM);
+		} else if (befs_read_lsymlink(sb, data, link, len) != len) {
+			kfree(link);
+			befs_error(sb, "Failed to read entire long symlink");
+			link = ERR_PTR(-EIO);
+		}
+	} else {
+		link = befs_ino->i_data.symlink;
+	}
+
+	nd_set_link(nd, link);
+	return 0;
+}
+
+static void befs_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+	if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+		char *p = nd_get_link(nd);
+		if (!IS_ERR(p))
+			kfree(p);
+	}
+}
+
+/*
+ * UTF-8 to NLS charset  convert routine
+ * 
+ *
+ * Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than
+ * the nls tables directly
+ */
+
+static int
+befs_utf2nls(struct super_block *sb, const char *in,
+	     int in_len, char **out, int *out_len)
+{
+	struct nls_table *nls = BEFS_SB(sb)->nls;
+	int i, o;
+	wchar_t uni;
+	int unilen, utflen;
+	char *result;
+	int maxlen = in_len; /* The utf8->nls conversion can't make more chars */
+
+	befs_debug(sb, "---> utf2nls()");
+
+	if (!nls) {
+		befs_error(sb, "befs_utf2nls called with no NLS table loaded");
+		return -EINVAL;
+	}
+
+	*out = result = kmalloc(maxlen, GFP_NOFS);
+	if (!*out) {
+		befs_error(sb, "befs_utf2nls() cannot allocate memory");
+		*out_len = 0;
+		return -ENOMEM;
+	}
+
+	for (i = o = 0; i < in_len; i += utflen, o += unilen) {
+
+		/* convert from UTF-8 to Unicode */
+		utflen = utf8_mbtowc(&uni, &in[i], in_len - i);
+		if (utflen < 0) {
+			goto conv_err;
+		}
+
+		/* convert from Unicode to nls */
+		unilen = nls->uni2char(uni, &result[o], in_len - o);
+		if (unilen < 0) {
+			goto conv_err;
+		}
+	}
+	result[o] = '\0';
+	*out_len = o;
+
+	befs_debug(sb, "<--- utf2nls()");
+
+	return o;
+
+      conv_err:
+	befs_error(sb, "Name using character set %s contains a character that "
+		   "cannot be converted to unicode.", nls->charset);
+	befs_debug(sb, "<--- utf2nls()");
+	kfree(result);
+	return -EILSEQ;
+}
+
+/**
+ * befs_nls2utf - Convert NLS string to utf8 encodeing
+ * @sb: Superblock
+ * @src: Input string buffer in NLS format
+ * @srclen: Length of input string in bytes
+ * @dest: The output string in UTF8 format
+ * @destlen: Length of the output buffer
+ * 
+ * Converts input string @src, which is in the format of the loaded NLS map,
+ * into a utf8 string.
+ * 
+ * The destination string @dest is allocated by this function and the caller is
+ * responsible for freeing it with kfree()
+ * 
+ * On return, *@destlen is the length of @dest in bytes.
+ *
+ * On success, the return value is the number of utf8 characters written to
+ * the output buffer @dest.
+ *  
+ * On Failure, a negative number coresponding to the error code is returned.
+ */
+
+static int
+befs_nls2utf(struct super_block *sb, const char *in,
+	     int in_len, char **out, int *out_len)
+{
+	struct nls_table *nls = BEFS_SB(sb)->nls;
+	int i, o;
+	wchar_t uni;
+	int unilen, utflen;
+	char *result;
+	int maxlen = 3 * in_len;
+
+	befs_debug(sb, "---> nls2utf()\n");
+
+	if (!nls) {
+		befs_error(sb, "befs_nls2utf called with no NLS table loaded.");
+		return -EINVAL;
+	}
+
+	*out = result = kmalloc(maxlen, GFP_NOFS);
+	if (!*out) {
+		befs_error(sb, "befs_nls2utf() cannot allocate memory");
+		*out_len = 0;
+		return -ENOMEM;
+	}
+
+	for (i = o = 0; i < in_len; i += unilen, o += utflen) {
+
+		/* convert from nls to unicode */
+		unilen = nls->char2uni(&in[i], in_len - i, &uni);
+		if (unilen < 0) {
+			goto conv_err;
+		}
+
+		/* convert from unicode to UTF-8 */
+		utflen = utf8_wctomb(&result[o], uni, 3);
+		if (utflen <= 0) {
+			goto conv_err;
+		}
+	}
+
+	result[o] = '\0';
+	*out_len = o;
+
+	befs_debug(sb, "<--- nls2utf()");
+
+	return i;
+
+      conv_err:
+	befs_error(sb, "Name using charecter set %s contains a charecter that "
+		   "cannot be converted to unicode.", nls->charset);
+	befs_debug(sb, "<--- nls2utf()");
+	kfree(result);
+	return -EILSEQ;
+}
+
+/**
+ * Use the
+ *
+ */
+enum {
+	Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
+};
+
+static match_table_t befs_tokens = {
+	{Opt_uid, "uid=%d"},
+	{Opt_gid, "gid=%d"},
+	{Opt_charset, "iocharset=%s"},
+	{Opt_debug, "debug"},
+	{Opt_err, NULL}
+};
+
+static int
+parse_options(char *options, befs_mount_options * opts)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+
+	/* Initialize options */
+	opts->uid = 0;
+	opts->gid = 0;
+	opts->use_uid = 0;
+	opts->use_gid = 0;
+	opts->iocharset = NULL;
+	opts->debug = 0;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, befs_tokens, args);
+		switch (token) {
+		case Opt_uid:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0) {
+				printk(KERN_ERR "BeFS: Invalid uid %d, "
+						"using default\n", option);
+				break;
+			}
+			opts->uid = option;
+			opts->use_uid = 1;
+			break;
+		case Opt_gid:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0) {
+				printk(KERN_ERR "BeFS: Invalid gid %d, "
+						"using default\n", option);
+				break;
+			}
+			opts->gid = option;
+			opts->use_gid = 1;
+			break;
+		case Opt_charset:
+			kfree(opts->iocharset);
+			opts->iocharset = match_strdup(&args[0]);
+			if (!opts->iocharset) {
+				printk(KERN_ERR "BeFS: allocation failure for "
+						"iocharset string\n");
+				return 0;
+			}
+			break;
+		case Opt_debug:
+			opts->debug = 1;
+			break;
+		default:
+			printk(KERN_ERR "BeFS: Unrecognized mount option \"%s\" "
+					"or missing value\n", p);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/* This function has the responsibiltiy of getting the
+ * filesystem ready for unmounting. 
+ * Basicly, we free everything that we allocated in
+ * befs_read_inode
+ */
+static void
+befs_put_super(struct super_block *sb)
+{
+	if (BEFS_SB(sb)->mount_opts.iocharset) {
+		kfree(BEFS_SB(sb)->mount_opts.iocharset);
+		BEFS_SB(sb)->mount_opts.iocharset = NULL;
+	}
+
+	if (BEFS_SB(sb)->nls) {
+		unload_nls(BEFS_SB(sb)->nls);
+		BEFS_SB(sb)->nls = NULL;
+	}
+
+	if (sb->s_fs_info) {
+		kfree(sb->s_fs_info);
+		sb->s_fs_info = NULL;
+	}
+	return;
+}
+
+/* Allocate private field of the superblock, fill it.
+ *
+ * Finish filling the public superblock fields
+ * Make the root directory
+ * Load a set of NLS translations if needed.
+ */
+static int
+befs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct buffer_head *bh;
+	befs_sb_info *befs_sb;
+	befs_super_block *disk_sb;
+	struct inode *root;
+
+	const unsigned long sb_block = 0;
+	const off_t x86_sb_off = 512;
+
+	sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL);
+	if (sb->s_fs_info == NULL) {
+		printk(KERN_ERR
+		       "BeFS(%s): Unable to allocate memory for private "
+		       "portion of superblock. Bailing.\n", sb->s_id);
+		goto unaquire_none;
+	}
+	befs_sb = BEFS_SB(sb);
+	memset(befs_sb, 0, sizeof(befs_sb_info));
+
+	if (!parse_options((char *) data, &befs_sb->mount_opts)) {
+		befs_error(sb, "cannot parse mount options");
+		goto unaquire_priv_sbp;
+	}
+
+	befs_debug(sb, "---> befs_fill_super()");
+
+#ifndef CONFIG_BEFS_RW
+	if (!(sb->s_flags & MS_RDONLY)) {
+		befs_warning(sb,
+			     "No write support. Marking filesystem read-only");
+		sb->s_flags |= MS_RDONLY;
+	}
+#endif				/* CONFIG_BEFS_RW */
+
+	/*
+	 * Set dummy blocksize to read super block.
+	 * Will be set to real fs blocksize later.
+	 *
+	 * Linux 2.4.10 and later refuse to read blocks smaller than
+	 * the hardsect size for the device. But we also need to read at 
+	 * least 1k to get the second 512 bytes of the volume.
+	 * -WD 10-26-01
+	 */ 
+	sb_min_blocksize(sb, 1024);
+
+	if (!(bh = sb_bread(sb, sb_block))) {
+		befs_error(sb, "unable to read superblock");
+		goto unaquire_priv_sbp;
+	}
+
+	/* account for offset of super block on x86 */
+	disk_sb = (befs_super_block *) bh->b_data;
+	if ((le32_to_cpu(disk_sb->magic1) == BEFS_SUPER_MAGIC1) ||
+	    (be32_to_cpu(disk_sb->magic1) == BEFS_SUPER_MAGIC1)) {
+		befs_debug(sb, "Using PPC superblock location");
+	} else {
+		befs_debug(sb, "Using x86 superblock location");
+		disk_sb =
+		    (befs_super_block *) ((void *) bh->b_data + x86_sb_off);
+	}
+
+	if (befs_load_sb(sb, disk_sb) != BEFS_OK)
+		goto unaquire_bh;
+
+	befs_dump_super_block(sb, disk_sb);
+
+	brelse(bh);
+
+	if (befs_check_sb(sb) != BEFS_OK)
+		goto unaquire_priv_sbp;
+
+	if( befs_sb->num_blocks > ~((sector_t)0) ) {
+		befs_error(sb, "blocks count: %Lu "
+			"is larger than the host can use",
+			befs_sb->num_blocks);
+		goto unaquire_priv_sbp;
+	}
+
+	/*
+	 * set up enough so that it can read an inode
+	 * Fill in kernel superblock fields from private sb
+	 */
+	sb->s_magic = BEFS_SUPER_MAGIC;
+	/* Set real blocksize of fs */
+	sb_set_blocksize(sb, (ulong) befs_sb->block_size);
+	sb->s_op = (struct super_operations *) &befs_sops;
+	root = iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		iput(root);
+		befs_error(sb, "get root inode failed");
+		goto unaquire_priv_sbp;
+	}
+
+	/* load nls library */
+	if (befs_sb->mount_opts.iocharset) {
+		befs_debug(sb, "Loading nls: %s",
+			   befs_sb->mount_opts.iocharset);
+		befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset);
+		if (!befs_sb->nls) {
+			befs_warning(sb, "Cannot load nls %s"
+					" loading default nls",
+					befs_sb->mount_opts.iocharset);
+			befs_sb->nls = load_nls_default();
+		}
+	/* load default nls if none is specified  in mount options */
+	} else {
+		befs_debug(sb, "Loading default nls");
+		befs_sb->nls = load_nls_default();
+	}
+
+	return 0;
+/*****************/
+      unaquire_bh:
+	brelse(bh);
+
+      unaquire_priv_sbp:
+	kfree(sb->s_fs_info);
+
+      unaquire_none:
+	sb->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static int
+befs_remount(struct super_block *sb, int *flags, char *data)
+{
+	if (!(*flags & MS_RDONLY))
+		return -EINVAL;
+	return 0;
+}
+
+static int
+befs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+
+	befs_debug(sb, "---> befs_statfs()");
+
+	buf->f_type = BEFS_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = BEFS_SB(sb)->num_blocks;
+	buf->f_bfree = BEFS_SB(sb)->num_blocks - BEFS_SB(sb)->used_blocks;
+	buf->f_bavail = buf->f_bfree;
+	buf->f_files = 0;	/* UNKNOWN */
+	buf->f_ffree = 0;	/* UNKNOWN */
+	buf->f_namelen = BEFS_NAME_LEN;
+
+	befs_debug(sb, "<--- befs_statfs()");
+
+	return 0;
+}
+
+static struct super_block *
+befs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name,
+	    void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super);
+}
+
+static struct file_system_type befs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "befs",
+	.get_sb		= befs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,	
+};
+
+static int __init
+init_befs_fs(void)
+{
+	int err;
+
+	printk(KERN_INFO "BeFS version: %s\n", BEFS_VERSION);
+
+	err = befs_init_inodecache();
+	if (err)
+		goto unaquire_none;
+
+	err = register_filesystem(&befs_fs_type);
+	if (err)
+		goto unaquire_inodecache;
+
+	return 0;
+
+unaquire_inodecache:
+	befs_destroy_inodecache();
+
+unaquire_none:
+	return err;
+}
+
+static void __exit
+exit_befs_fs(void)
+{
+	befs_destroy_inodecache();
+
+	unregister_filesystem(&befs_fs_type);
+}
+
+/*
+Macros that typecheck the init and exit functions,
+ensures that they are called at init and cleanup,
+and eliminates warnings about unused functions.
+*/
+module_init(init_befs_fs)
+module_exit(exit_befs_fs)
diff --git a/fs/befs/super.c b/fs/befs/super.c
new file mode 100644
index 0000000..4557acb
--- /dev/null
+++ b/fs/befs/super.c
@@ -0,0 +1,112 @@
+/*
+ * super.c
+ *
+ * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com>
+ *
+ * Licensed under the GNU GPL. See the file COPYING for details.
+ *
+ */
+
+#include <linux/fs.h>
+
+#include "befs.h"
+#include "super.h"
+#include "endian.h"
+
+/**
+ * load_befs_sb -- Read from disk and properly byteswap all the fields
+ * of the befs superblock
+ *
+ *
+ *
+ *
+ */
+int
+befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
+{
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+
+	/* Check the byte order of the filesystem */
+	if (le32_to_cpu(disk_sb->fs_byte_order) == BEFS_BYTEORDER_NATIVE)
+	    befs_sb->byte_order = BEFS_BYTESEX_LE;
+	else if (be32_to_cpu(disk_sb->fs_byte_order) == BEFS_BYTEORDER_NATIVE)
+	    befs_sb->byte_order = BEFS_BYTESEX_BE;	
+
+	befs_sb->magic1 = fs32_to_cpu(sb, disk_sb->magic1);
+	befs_sb->magic2 = fs32_to_cpu(sb, disk_sb->magic2);
+	befs_sb->magic3 = fs32_to_cpu(sb, disk_sb->magic3);
+	befs_sb->block_size = fs32_to_cpu(sb, disk_sb->block_size);
+	befs_sb->block_shift = fs32_to_cpu(sb, disk_sb->block_shift);
+	befs_sb->num_blocks = fs64_to_cpu(sb, disk_sb->num_blocks);
+	befs_sb->used_blocks = fs64_to_cpu(sb, disk_sb->used_blocks);
+	befs_sb->inode_size = fs32_to_cpu(sb, disk_sb->inode_size);
+
+	befs_sb->blocks_per_ag = fs32_to_cpu(sb, disk_sb->blocks_per_ag);
+	befs_sb->ag_shift = fs32_to_cpu(sb, disk_sb->ag_shift);
+	befs_sb->num_ags = fs32_to_cpu(sb, disk_sb->num_ags);
+
+	befs_sb->log_blocks = fsrun_to_cpu(sb, disk_sb->log_blocks);
+	befs_sb->log_start = fs64_to_cpu(sb, disk_sb->log_start);
+	befs_sb->log_end = fs64_to_cpu(sb, disk_sb->log_end);
+
+	befs_sb->root_dir = fsrun_to_cpu(sb, disk_sb->root_dir);
+	befs_sb->indices = fsrun_to_cpu(sb, disk_sb->indices);
+	befs_sb->nls = NULL;
+
+	return BEFS_OK;
+}
+
+int
+befs_check_sb(struct super_block *sb)
+{
+	befs_sb_info *befs_sb = BEFS_SB(sb);
+
+	/* Check magic headers of super block */
+	if ((befs_sb->magic1 != BEFS_SUPER_MAGIC1)
+	    || (befs_sb->magic2 != BEFS_SUPER_MAGIC2)
+	    || (befs_sb->magic3 != BEFS_SUPER_MAGIC3)) {
+		befs_error(sb, "invalid magic header");
+		return BEFS_ERR;
+	}
+
+	/*
+	 * Check blocksize of BEFS.
+	 *
+	 * Blocksize of BEFS is 1024, 2048, 4096 or 8192.
+	 */
+
+	if ((befs_sb->block_size != 1024)
+	    && (befs_sb->block_size != 2048)
+	    && (befs_sb->block_size != 4096)
+	    && (befs_sb->block_size != 8192)) {
+		befs_error(sb, "invalid blocksize: %u", befs_sb->block_size);
+		return BEFS_ERR;
+	}
+
+	if (befs_sb->block_size > PAGE_SIZE) {
+		befs_error(sb, "blocksize(%u) cannot be larger"
+			   "than system pagesize(%lu)", befs_sb->block_size,
+			   PAGE_SIZE);
+		return BEFS_ERR;
+	}
+
+	/*
+	   * block_shift and block_size encode the same information
+	   * in different ways as a consistency check.
+	 */
+
+	if ((1 << befs_sb->block_shift) != befs_sb->block_size) {
+		befs_error(sb, "block_shift disagrees with block_size. "
+			   "Corruption likely.");
+		return BEFS_ERR;
+	}
+
+	if (befs_sb->log_start != befs_sb->log_end) {
+		befs_error(sb, "Filesystem not clean! There are blocks in the "
+			   "journal. You must boot into BeOS and mount this volume "
+			   "to make it clean.");
+		return BEFS_ERR;
+	}
+
+	return BEFS_OK;
+}
diff --git a/fs/befs/super.h b/fs/befs/super.h
new file mode 100644
index 0000000..dc45563
--- /dev/null
+++ b/fs/befs/super.h
@@ -0,0 +1,8 @@
+/*
+ * super.h
+ */
+
+int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb);
+
+int befs_check_sb(struct super_block *sb);
+
diff --git a/fs/bfs/Makefile b/fs/bfs/Makefile
new file mode 100644
index 0000000..c787b36
--- /dev/null
+++ b/fs/bfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for BFS filesystem.
+#
+
+obj-$(CONFIG_BFS_FS) += bfs.o
+
+bfs-objs := inode.o file.o dir.o
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
new file mode 100644
index 0000000..1020dbc
--- /dev/null
+++ b/fs/bfs/bfs.h
@@ -0,0 +1,60 @@
+/*
+ *	fs/bfs/bfs.h
+ *	Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com>
+ */
+#ifndef _FS_BFS_BFS_H
+#define _FS_BFS_BFS_H
+
+#include <linux/bfs_fs.h>
+
+/*
+ * BFS file system in-core superblock info
+ */
+struct bfs_sb_info {
+	unsigned long si_blocks;
+	unsigned long si_freeb;
+	unsigned long si_freei;
+	unsigned long si_lf_ioff;
+	unsigned long si_lf_sblk;
+	unsigned long si_lf_eblk;
+	unsigned long si_lasti;
+	unsigned long * si_imap;
+	struct buffer_head * si_sbh;		/* buffer header w/superblock */
+	struct bfs_super_block * si_bfs_sb;	/* superblock in si_sbh->b_data */
+};
+
+/*
+ * BFS file system in-core inode info
+ */
+struct bfs_inode_info {
+	unsigned long i_dsk_ino; /* inode number from the disk, can be 0 */
+	unsigned long i_sblock;
+	unsigned long i_eblock;
+	struct inode vfs_inode;
+};
+
+static inline struct bfs_sb_info *BFS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline struct bfs_inode_info *BFS_I(struct inode *inode)
+{
+	return list_entry(inode, struct bfs_inode_info, vfs_inode);
+}
+
+
+#define printf(format, args...) \
+	printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args)
+
+
+/* file.c */
+extern struct inode_operations bfs_file_inops;
+extern struct file_operations bfs_file_operations;
+extern struct address_space_operations bfs_aops;
+
+/* dir.c */
+extern struct inode_operations bfs_dir_inops;
+extern struct file_operations bfs_dir_operations;
+
+#endif /* _FS_BFS_BFS_H */
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
new file mode 100644
index 0000000..5a1e5ce
--- /dev/null
+++ b/fs/bfs/dir.c
@@ -0,0 +1,362 @@
+/*
+ *	fs/bfs/dir.c
+ *	BFS directory operations.
+ *	Copyright (C) 1999,2000  Tigran Aivazian <tigran@veritas.com>
+ */
+
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include "bfs.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define dprintf(x...)	printf(x)
+#else
+#define dprintf(x...)
+#endif
+
+static int bfs_add_entry(struct inode * dir, const char * name, int namelen, int ino);
+static struct buffer_head * bfs_find_entry(struct inode * dir, 
+	const char * name, int namelen, struct bfs_dirent ** res_dir);
+
+static int bfs_readdir(struct file * f, void * dirent, filldir_t filldir)
+{
+	struct inode * dir = f->f_dentry->d_inode;
+	struct buffer_head * bh;
+	struct bfs_dirent * de;
+	unsigned int offset;
+	int block;
+
+	lock_kernel();
+
+	if (f->f_pos & (BFS_DIRENT_SIZE-1)) {
+		printf("Bad f_pos=%08lx for %s:%08lx\n", (unsigned long)f->f_pos, 
+			dir->i_sb->s_id, dir->i_ino);
+		unlock_kernel();
+		return -EBADF;
+	}
+
+	while (f->f_pos < dir->i_size) {
+		offset = f->f_pos & (BFS_BSIZE-1);
+		block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS);
+		bh = sb_bread(dir->i_sb, block);
+		if (!bh) {
+			f->f_pos += BFS_BSIZE - offset;
+			continue;
+		}
+		do {
+			de = (struct bfs_dirent *)(bh->b_data + offset);
+			if (de->ino) {
+				int size = strnlen(de->name, BFS_NAMELEN);
+				if (filldir(dirent, de->name, size, f->f_pos, de->ino, DT_UNKNOWN) < 0) {
+					brelse(bh);
+					unlock_kernel();
+					return 0;
+				}
+			}
+			offset += BFS_DIRENT_SIZE;
+			f->f_pos += BFS_DIRENT_SIZE;
+		} while (offset < BFS_BSIZE && f->f_pos < dir->i_size);
+		brelse(bh);
+	}
+
+	unlock_kernel();
+	return 0;	
+}
+
+struct file_operations bfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= bfs_readdir,
+	.fsync		= file_fsync,
+};
+
+extern void dump_imap(const char *, struct super_block *);
+
+static int bfs_create(struct inode * dir, struct dentry * dentry, int mode,
+		struct nameidata *nd)
+{
+	int err;
+	struct inode * inode;
+	struct super_block * s = dir->i_sb;
+	struct bfs_sb_info * info = BFS_SB(s);
+	unsigned long ino;
+
+	inode = new_inode(s);
+	if (!inode)
+		return -ENOSPC;
+	lock_kernel();
+	ino = find_first_zero_bit(info->si_imap, info->si_lasti);
+	if (ino > info->si_lasti) {
+		unlock_kernel();
+		iput(inode);
+		return -ENOSPC;
+	}
+	set_bit(ino, info->si_imap);	
+	info->si_freei--;
+	inode->i_uid = current->fsuid;
+	inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_blocks = inode->i_blksize = 0;
+	inode->i_op = &bfs_file_inops;
+	inode->i_fop = &bfs_file_operations;
+	inode->i_mapping->a_ops = &bfs_aops;
+	inode->i_mode = mode;
+	inode->i_ino = ino;
+	BFS_I(inode)->i_dsk_ino = ino;
+	BFS_I(inode)->i_sblock = 0;
+	BFS_I(inode)->i_eblock = 0;
+	insert_inode_hash(inode);
+        mark_inode_dirty(inode);
+	dump_imap("create",s);
+
+	err = bfs_add_entry(dir, dentry->d_name.name, dentry->d_name.len, inode->i_ino);
+	if (err) {
+		inode->i_nlink--;
+		mark_inode_dirty(inode);
+		iput(inode);
+		unlock_kernel();
+		return err;
+	}
+	unlock_kernel();
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static struct dentry * bfs_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct inode * inode = NULL;
+	struct buffer_head * bh;
+	struct bfs_dirent * de;
+
+	if (dentry->d_name.len > BFS_NAMELEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	lock_kernel();
+	bh = bfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &de);
+	if (bh) {
+		unsigned long ino = le32_to_cpu(de->ino);
+		brelse(bh);
+		inode = iget(dir->i_sb, ino);
+		if (!inode) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static int bfs_link(struct dentry * old, struct inode * dir, struct dentry * new)
+{
+	struct inode * inode = old->d_inode;
+	int err;
+
+	lock_kernel();
+	err = bfs_add_entry(dir, new->d_name.name, new->d_name.len, inode->i_ino);
+	if (err) {
+		unlock_kernel();
+		return err;
+	}
+	inode->i_nlink++;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	atomic_inc(&inode->i_count);
+	d_instantiate(new, inode);
+	unlock_kernel();
+	return 0;
+}
+
+
+static int bfs_unlink(struct inode * dir, struct dentry * dentry)
+{
+	int error = -ENOENT;
+	struct inode * inode;
+	struct buffer_head * bh;
+	struct bfs_dirent * de;
+
+	inode = dentry->d_inode;
+	lock_kernel();
+	bh = bfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &de);
+	if (!bh || de->ino != inode->i_ino) 
+		goto out_brelse;
+
+	if (!inode->i_nlink) {
+		printf("unlinking non-existent file %s:%lu (nlink=%d)\n", inode->i_sb->s_id, 
+				inode->i_ino, inode->i_nlink);
+		inode->i_nlink = 1;
+	}
+	de->ino = 0;
+	mark_buffer_dirty(bh);
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	inode->i_nlink--;
+	inode->i_ctime = dir->i_ctime;
+	mark_inode_dirty(inode);
+	error = 0;
+
+out_brelse:
+	brelse(bh);
+	unlock_kernel();
+	return error;
+}
+
+static int bfs_rename(struct inode * old_dir, struct dentry * old_dentry, 
+			struct inode * new_dir, struct dentry * new_dentry)
+{
+	struct inode * old_inode, * new_inode;
+	struct buffer_head * old_bh, * new_bh;
+	struct bfs_dirent * old_de, * new_de;		
+	int error = -ENOENT;
+
+	old_bh = new_bh = NULL;
+	old_inode = old_dentry->d_inode;
+	if (S_ISDIR(old_inode->i_mode))
+		return -EINVAL;
+
+	lock_kernel();
+	old_bh = bfs_find_entry(old_dir, 
+				old_dentry->d_name.name, 
+				old_dentry->d_name.len, &old_de);
+
+	if (!old_bh || old_de->ino != old_inode->i_ino)
+		goto end_rename;
+
+	error = -EPERM;
+	new_inode = new_dentry->d_inode;
+	new_bh = bfs_find_entry(new_dir, 
+				new_dentry->d_name.name, 
+				new_dentry->d_name.len, &new_de);
+
+	if (new_bh && !new_inode) {
+		brelse(new_bh);
+		new_bh = NULL;
+	}
+	if (!new_bh) {
+		error = bfs_add_entry(new_dir, 
+					new_dentry->d_name.name,
+			 		new_dentry->d_name.len, old_inode->i_ino);
+		if (error)
+			goto end_rename;
+	}
+	old_de->ino = 0;
+	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(old_dir);
+	if (new_inode) {
+		new_inode->i_nlink--;
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(new_inode);
+	}
+	mark_buffer_dirty(old_bh);
+	error = 0;
+
+end_rename:
+	unlock_kernel();
+	brelse(old_bh);
+	brelse(new_bh);
+	return error;
+}
+
+struct inode_operations bfs_dir_inops = {
+	.create			= bfs_create,
+	.lookup			= bfs_lookup,
+	.link			= bfs_link,
+	.unlink			= bfs_unlink,
+	.rename			= bfs_rename,
+};
+
+static int bfs_add_entry(struct inode * dir, const char * name, int namelen, int ino)
+{
+	struct buffer_head * bh;
+	struct bfs_dirent * de;
+	int block, sblock, eblock, off, eoff;
+	int i;
+
+	dprintf("name=%s, namelen=%d\n", name, namelen);
+
+	if (!namelen)
+		return -ENOENT;
+	if (namelen > BFS_NAMELEN)
+		return -ENAMETOOLONG;
+
+	sblock = BFS_I(dir)->i_sblock;
+	eblock = BFS_I(dir)->i_eblock;
+	eoff = dir->i_size % BFS_BSIZE;
+	for (block=sblock; block<=eblock; block++) {
+		bh = sb_bread(dir->i_sb, block);
+		if(!bh) 
+			return -ENOSPC;
+		for (off=0; off<BFS_BSIZE; off+=BFS_DIRENT_SIZE) {
+			de = (struct bfs_dirent *)(bh->b_data + off);
+			if (block==eblock && off>=eoff) {
+				/* Do not read/interpret the garbage in the end of eblock. */
+				de->ino = 0;
+			}
+			if (!de->ino) {
+				if ((block-sblock)*BFS_BSIZE + off >= dir->i_size) {
+					dir->i_size += BFS_DIRENT_SIZE;
+					dir->i_ctime = CURRENT_TIME_SEC;
+				}
+				dir->i_mtime = CURRENT_TIME_SEC;
+				mark_inode_dirty(dir);
+				de->ino = ino;
+				for (i=0; i<BFS_NAMELEN; i++)
+					de->name[i] = (i < namelen) ? name[i] : 0;
+				mark_buffer_dirty(bh);
+				brelse(bh);
+				return 0;
+			}
+		}
+		brelse(bh);
+	}
+	return -ENOSPC;
+}
+
+static inline int bfs_namecmp(int len, const char * name, const char * buffer)
+{
+	if (len < BFS_NAMELEN && buffer[len])
+		return 0;
+	return !memcmp(name, buffer, len);
+}
+
+static struct buffer_head * bfs_find_entry(struct inode * dir, 
+	const char * name, int namelen, struct bfs_dirent ** res_dir)
+{
+	unsigned long block, offset;
+	struct buffer_head * bh;
+	struct bfs_dirent * de;
+
+	*res_dir = NULL;
+	if (namelen > BFS_NAMELEN)
+		return NULL;
+	bh = NULL;
+	block = offset = 0;
+	while (block * BFS_BSIZE + offset < dir->i_size) {
+		if (!bh) {
+			bh = sb_bread(dir->i_sb, BFS_I(dir)->i_sblock + block);
+			if (!bh) {
+				block++;
+				continue;
+			}
+		}
+		de = (struct bfs_dirent *)(bh->b_data + offset);
+		offset += BFS_DIRENT_SIZE;
+		if (de->ino && bfs_namecmp(namelen, name, de->name)) {
+			*res_dir = de;
+			return bh;
+		}
+		if (offset < bh->b_size)
+			continue;
+		brelse(bh);
+		bh = NULL;
+		offset = 0;
+		block++;
+	}
+	brelse(bh);
+	return NULL;
+}
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
new file mode 100644
index 0000000..747fd1e
--- /dev/null
+++ b/fs/bfs/file.c
@@ -0,0 +1,162 @@
+/*
+ *	fs/bfs/file.c
+ *	BFS file operations.
+ *	Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include "bfs.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define dprintf(x...)	printf(x)
+#else
+#define dprintf(x...)
+#endif
+
+struct file_operations bfs_file_operations = {
+	.llseek 	= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.sendfile	= generic_file_sendfile,
+};
+
+static int bfs_move_block(unsigned long from, unsigned long to, struct super_block *sb)
+{
+	struct buffer_head *bh, *new;
+
+	bh = sb_bread(sb, from);
+	if (!bh)
+		return -EIO;
+	new = sb_getblk(sb, to);
+	memcpy(new->b_data, bh->b_data, bh->b_size);
+	mark_buffer_dirty(new);
+	bforget(bh);
+	brelse(new);
+	return 0;
+}
+
+static int bfs_move_blocks(struct super_block *sb, unsigned long start, unsigned long end, 
+				unsigned long where)
+{
+	unsigned long i;
+
+	dprintf("%08lx-%08lx->%08lx\n", start, end, where);
+	for (i = start; i <= end; i++)
+		if(bfs_move_block(i, where + i, sb)) {
+			dprintf("failed to move block %08lx -> %08lx\n", i, where + i);
+			return -EIO;
+		}
+	return 0;
+}
+
+static int bfs_get_block(struct inode * inode, sector_t block, 
+	struct buffer_head * bh_result, int create)
+{
+	long phys;
+	int err;
+	struct super_block *sb = inode->i_sb;
+	struct bfs_sb_info *info = BFS_SB(sb);
+	struct bfs_inode_info *bi = BFS_I(inode);
+	struct buffer_head *sbh = info->si_sbh;
+
+	if (block < 0 || block > info->si_blocks)
+		return -EIO;
+
+	phys = bi->i_sblock + block;
+	if (!create) {
+		if (phys <= bi->i_eblock) {
+			dprintf("c=%d, b=%08lx, phys=%08lx (granted)\n", create, block, phys);
+			map_bh(bh_result, sb, phys);
+		}
+		return 0;
+	}
+
+	/* if the file is not empty and the requested block is within the range
+	   of blocks allocated for this file, we can grant it */
+	if (inode->i_size && phys <= bi->i_eblock) {
+		dprintf("c=%d, b=%08lx, phys=%08lx (interim block granted)\n", 
+				create, block, phys);
+		map_bh(bh_result, sb, phys);
+		return 0;
+	}
+
+	/* the rest has to be protected against itself */
+	lock_kernel();
+
+	/* if the last data block for this file is the last allocated block, we can
+	   extend the file trivially, without moving it anywhere */
+	if (bi->i_eblock == info->si_lf_eblk) {
+		dprintf("c=%d, b=%08lx, phys=%08lx (simple extension)\n", 
+				create, block, phys);
+		map_bh(bh_result, sb, phys);
+		info->si_freeb -= phys - bi->i_eblock;
+		info->si_lf_eblk = bi->i_eblock = phys;
+		mark_inode_dirty(inode);
+		mark_buffer_dirty(sbh);
+		err = 0;
+		goto out;
+	}
+
+	/* Ok, we have to move this entire file to the next free block */
+	phys = info->si_lf_eblk + 1;
+	if (bi->i_sblock) { /* if data starts on block 0 then there is no data */
+		err = bfs_move_blocks(inode->i_sb, bi->i_sblock, 
+				bi->i_eblock, phys);
+		if (err) {
+			dprintf("failed to move ino=%08lx -> fs corruption\n", inode->i_ino);
+			goto out;
+		}
+	} else
+		err = 0;
+
+	dprintf("c=%d, b=%08lx, phys=%08lx (moved)\n", create, block, phys);
+	bi->i_sblock = phys;
+	phys += block;
+	info->si_lf_eblk = bi->i_eblock = phys;
+
+	/* this assumes nothing can write the inode back while we are here
+	 * and thus update inode->i_blocks! (XXX)*/
+	info->si_freeb -= bi->i_eblock - bi->i_sblock + 1 - inode->i_blocks;
+	mark_inode_dirty(inode);
+	mark_buffer_dirty(sbh);
+	map_bh(bh_result, sb, phys);
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int bfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, bfs_get_block, wbc);
+}
+
+static int bfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, bfs_get_block);
+}
+
+static int bfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page, from, to, bfs_get_block);
+}
+
+static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, bfs_get_block);
+}
+
+struct address_space_operations bfs_aops = {
+	.readpage	= bfs_readpage,
+	.writepage	= bfs_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= bfs_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= bfs_bmap,
+};
+
+struct inode_operations bfs_file_inops;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
new file mode 100644
index 0000000..64e0fb33
--- /dev/null
+++ b/fs/bfs/inode.c
@@ -0,0 +1,420 @@
+/*
+ *	fs/bfs/inode.c
+ *	BFS superblock and inode operations.
+ *	Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com>
+ *	From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <asm/uaccess.h>
+#include "bfs.h"
+
+MODULE_AUTHOR("Tigran A. Aivazian <tigran@veritas.com>");
+MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux");
+MODULE_LICENSE("GPL");
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define dprintf(x...)	printf(x)
+#else
+#define dprintf(x...)
+#endif
+
+void dump_imap(const char *prefix, struct super_block * s);
+
+static void bfs_read_inode(struct inode * inode)
+{
+	unsigned long ino = inode->i_ino;
+	struct bfs_inode * di;
+	struct buffer_head * bh;
+	int block, off;
+
+	if (ino < BFS_ROOT_INO || ino > BFS_SB(inode->i_sb)->si_lasti) {
+		printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino);
+		make_bad_inode(inode);
+		return;
+	}
+
+	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
+	bh = sb_bread(inode->i_sb, block);
+	if (!bh) {
+		printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, ino);
+		make_bad_inode(inode);
+		return;
+	}
+
+	off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
+	di = (struct bfs_inode *)bh->b_data + off;
+
+	inode->i_mode = 0x0000FFFF & di->i_mode;
+	if (di->i_vtype == BFS_VDIR) {
+		inode->i_mode |= S_IFDIR;
+		inode->i_op = &bfs_dir_inops;
+		inode->i_fop = &bfs_dir_operations;
+	} else if (di->i_vtype == BFS_VREG) {
+		inode->i_mode |= S_IFREG;
+		inode->i_op = &bfs_file_inops;
+		inode->i_fop = &bfs_file_operations;
+		inode->i_mapping->a_ops = &bfs_aops;
+	}
+
+	inode->i_uid = di->i_uid;
+	inode->i_gid = di->i_gid;
+	inode->i_nlink = di->i_nlink;
+	inode->i_size = BFS_FILESIZE(di);
+	inode->i_blocks = BFS_FILEBLOCKS(di);
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_atime.tv_sec = di->i_atime;
+	inode->i_mtime.tv_sec = di->i_mtime;
+	inode->i_ctime.tv_sec = di->i_ctime;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	BFS_I(inode)->i_dsk_ino = di->i_ino; /* can be 0 so we store a copy */
+	BFS_I(inode)->i_sblock = di->i_sblock;
+	BFS_I(inode)->i_eblock = di->i_eblock;
+
+	brelse(bh);
+}
+
+static int bfs_write_inode(struct inode * inode, int unused)
+{
+	unsigned long ino = inode->i_ino;
+	struct bfs_inode * di;
+	struct buffer_head * bh;
+	int block, off;
+
+	if (ino < BFS_ROOT_INO || ino > BFS_SB(inode->i_sb)->si_lasti) {
+		printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino);
+		return -EIO;
+	}
+
+	lock_kernel();
+	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
+	bh = sb_bread(inode->i_sb, block);
+	if (!bh) {
+		printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, ino);
+		unlock_kernel();
+		return -EIO;
+	}
+
+	off = (ino - BFS_ROOT_INO)%BFS_INODES_PER_BLOCK;
+	di = (struct bfs_inode *)bh->b_data + off;
+
+	if (inode->i_ino == BFS_ROOT_INO)
+		di->i_vtype = BFS_VDIR;
+	else
+		di->i_vtype = BFS_VREG;
+
+	di->i_ino = inode->i_ino;
+	di->i_mode = inode->i_mode;
+	di->i_uid = inode->i_uid;
+	di->i_gid = inode->i_gid;
+	di->i_nlink = inode->i_nlink;
+	di->i_atime = inode->i_atime.tv_sec;
+	di->i_mtime = inode->i_mtime.tv_sec;
+	di->i_ctime = inode->i_ctime.tv_sec;
+	di->i_sblock = BFS_I(inode)->i_sblock;
+	di->i_eblock = BFS_I(inode)->i_eblock;
+	di->i_eoffset = di->i_sblock * BFS_BSIZE + inode->i_size - 1;
+
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	unlock_kernel();
+	return 0;
+}
+
+static void bfs_delete_inode(struct inode * inode)
+{
+	unsigned long ino = inode->i_ino;
+	struct bfs_inode * di;
+	struct buffer_head * bh;
+	int block, off;
+	struct super_block * s = inode->i_sb;
+	struct bfs_sb_info * info = BFS_SB(s);
+
+	dprintf("ino=%08lx\n", inode->i_ino);
+
+	if (inode->i_ino < BFS_ROOT_INO || inode->i_ino > info->si_lasti) {
+		printf("invalid ino=%08lx\n", inode->i_ino);
+		return;
+	}
+	
+	inode->i_size = 0;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	lock_kernel();
+	mark_inode_dirty(inode);
+	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
+	bh = sb_bread(s, block);
+	if (!bh) {
+		printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, ino);
+		unlock_kernel();
+		return;
+	}
+	off = (ino - BFS_ROOT_INO)%BFS_INODES_PER_BLOCK;
+	di = (struct bfs_inode *)bh->b_data + off;
+	if (di->i_ino) {
+		info->si_freeb += BFS_FILEBLOCKS(di);
+		info->si_freei++;
+		clear_bit(di->i_ino, info->si_imap);
+		dump_imap("delete_inode", s);
+	}
+	di->i_ino = 0;
+	di->i_sblock = 0;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+
+	/* if this was the last file, make the previous 
+	   block "last files last block" even if there is no real file there,
+	   saves us 1 gap */
+	if (info->si_lf_eblk == BFS_I(inode)->i_eblock) {
+		info->si_lf_eblk = BFS_I(inode)->i_sblock - 1;
+		mark_buffer_dirty(info->si_sbh);
+	}
+	unlock_kernel();
+	clear_inode(inode);
+}
+
+static void bfs_put_super(struct super_block *s)
+{
+	struct bfs_sb_info *info = BFS_SB(s);
+	brelse(info->si_sbh);
+	kfree(info->si_imap);
+	kfree(info);
+	s->s_fs_info = NULL;
+}
+
+static int bfs_statfs(struct super_block *s, struct kstatfs *buf)
+{
+	struct bfs_sb_info *info = BFS_SB(s);
+	u64 id = huge_encode_dev(s->s_bdev->bd_dev);
+	buf->f_type = BFS_MAGIC;
+	buf->f_bsize = s->s_blocksize;
+	buf->f_blocks = info->si_blocks;
+	buf->f_bfree = buf->f_bavail = info->si_freeb;
+	buf->f_files = info->si_lasti + 1 - BFS_ROOT_INO;
+	buf->f_ffree = info->si_freei;
+	buf->f_fsid.val[0] = (u32)id;
+	buf->f_fsid.val[1] = (u32)(id >> 32);
+	buf->f_namelen = BFS_NAMELEN;
+	return 0;
+}
+
+static void bfs_write_super(struct super_block *s)
+{
+	lock_kernel();
+	if (!(s->s_flags & MS_RDONLY))
+		mark_buffer_dirty(BFS_SB(s)->si_sbh);
+	s->s_dirt = 0;
+	unlock_kernel();
+}
+
+static kmem_cache_t * bfs_inode_cachep;
+
+static struct inode *bfs_alloc_inode(struct super_block *sb)
+{
+	struct bfs_inode_info *bi;
+	bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL);
+	if (!bi)
+		return NULL;
+	return &bi->vfs_inode;
+}
+
+static void bfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct bfs_inode_info *bi = foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&bi->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	bfs_inode_cachep = kmem_cache_create("bfs_inode_cache",
+					     sizeof(struct bfs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (bfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(bfs_inode_cachep))
+		printk(KERN_INFO "bfs_inode_cache: not all structures were freed\n");
+}
+
+static struct super_operations bfs_sops = {
+	.alloc_inode	= bfs_alloc_inode,
+	.destroy_inode	= bfs_destroy_inode,
+	.read_inode	= bfs_read_inode,
+	.write_inode	= bfs_write_inode,
+	.delete_inode	= bfs_delete_inode,
+	.put_super	= bfs_put_super,
+	.write_super	= bfs_write_super,
+	.statfs		= bfs_statfs,
+};
+
+void dump_imap(const char *prefix, struct super_block * s)
+{
+#if 0
+	int i;
+	char *tmpbuf = (char *)get_zeroed_page(GFP_KERNEL);
+
+	if (!tmpbuf)
+		return;
+	for (i=BFS_SB(s)->si_lasti; i>=0; i--) {
+		if (i>PAGE_SIZE-100) break;
+		if (test_bit(i, BFS_SB(s)->si_imap))
+			strcat(tmpbuf, "1");
+		else
+			strcat(tmpbuf, "0");
+	}
+	printk(KERN_ERR "BFS-fs: %s: lasti=%08lx <%s>\n", prefix, BFS_SB(s)->si_lasti, tmpbuf);
+	free_page((unsigned long)tmpbuf);
+#endif
+}
+
+static int bfs_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct buffer_head * bh;
+	struct bfs_super_block * bfs_sb;
+	struct inode * inode;
+	int i, imap_len;
+	struct bfs_sb_info * info;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+	s->s_fs_info = info;
+	memset(info, 0, sizeof(*info));
+
+	sb_set_blocksize(s, BFS_BSIZE);
+
+	bh = sb_bread(s, 0);
+	if(!bh)
+		goto out;
+	bfs_sb = (struct bfs_super_block *)bh->b_data;
+	if (bfs_sb->s_magic != BFS_MAGIC) {
+		if (!silent)
+			printf("No BFS filesystem on %s (magic=%08x)\n", 
+				s->s_id, bfs_sb->s_magic);
+		goto out;
+	}
+	if (BFS_UNCLEAN(bfs_sb, s) && !silent)
+		printf("%s is unclean, continuing\n", s->s_id);
+
+	s->s_magic = BFS_MAGIC;
+	info->si_bfs_sb = bfs_sb;
+	info->si_sbh = bh;
+	info->si_lasti = (bfs_sb->s_start - BFS_BSIZE)/sizeof(struct bfs_inode) 
+			+ BFS_ROOT_INO - 1;
+
+	imap_len = info->si_lasti/8 + 1;
+	info->si_imap = kmalloc(imap_len, GFP_KERNEL);
+	if (!info->si_imap)
+		goto out;
+	memset(info->si_imap, 0, imap_len);
+	for (i=0; i<BFS_ROOT_INO; i++) 
+		set_bit(i, info->si_imap);
+
+	s->s_op = &bfs_sops;
+	inode = iget(s, BFS_ROOT_INO);
+	if (!inode) {
+		kfree(info->si_imap);
+		goto out;
+	}
+	s->s_root = d_alloc_root(inode);
+	if (!s->s_root) {
+		iput(inode);
+		kfree(info->si_imap);
+		goto out;
+	}
+
+	info->si_blocks = (bfs_sb->s_end + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */
+	info->si_freeb = (bfs_sb->s_end + 1 - bfs_sb->s_start)>>BFS_BSIZE_BITS;
+	info->si_freei = 0;
+	info->si_lf_eblk = 0;
+	info->si_lf_sblk = 0;
+	info->si_lf_ioff = 0;
+	for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) {
+		inode = iget(s,i);
+		if (BFS_I(inode)->i_dsk_ino == 0)
+			info->si_freei++;
+		else {
+			set_bit(i, info->si_imap);
+			info->si_freeb -= inode->i_blocks;
+			if (BFS_I(inode)->i_eblock > info->si_lf_eblk) {
+				info->si_lf_eblk = BFS_I(inode)->i_eblock;
+				info->si_lf_sblk = BFS_I(inode)->i_sblock;
+				info->si_lf_ioff = BFS_INO2OFF(i);
+			}
+		}
+		iput(inode);
+	}
+	if (!(s->s_flags & MS_RDONLY)) {
+		mark_buffer_dirty(bh);
+		s->s_dirt = 1;
+	} 
+	dump_imap("read_super", s);
+	return 0;
+
+out:
+	brelse(bh);
+	kfree(info);
+	s->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static struct super_block *bfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, bfs_fill_super);
+}
+
+static struct file_system_type bfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "bfs",
+	.get_sb		= bfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_bfs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+        err = register_filesystem(&bfs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_bfs_fs(void)
+{
+	unregister_filesystem(&bfs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_bfs_fs)
+module_exit(exit_bfs_fs)
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
new file mode 100644
index 0000000..009b892
--- /dev/null
+++ b/fs/binfmt_aout.c
@@ -0,0 +1,550 @@
+/*
+ *  linux/fs/binfmt_aout.c
+ *
+ *  Copyright (C) 1991, 1992, 1996  Linus Torvalds
+ */
+
+#include <linux/module.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
+static int load_aout_library(struct file*);
+static int aout_core_dump(long signr, struct pt_regs * regs, struct file *file);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+static struct linux_binfmt aout_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_aout_binary,
+	.load_shlib	= load_aout_library,
+	.core_dump	= aout_core_dump,
+	.min_coredump	= PAGE_SIZE
+};
+
+#define BAD_ADDR(x)	((unsigned long)(x) >= TASK_SIZE)
+
+static int set_brk(unsigned long start, unsigned long end)
+{
+	start = PAGE_ALIGN(start);
+	end = PAGE_ALIGN(end);
+	if (end > start) {
+		unsigned long addr;
+		down_write(&current->mm->mmap_sem);
+		addr = do_brk(start, end - start);
+		up_write(&current->mm->mmap_sem);
+		if (BAD_ADDR(addr))
+			return addr;
+	}
+	return 0;
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * macros to write out all the necessary info.
+ */
+
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+#define DUMP_WRITE(addr, nr)	\
+	if (!dump_write(file, (void *)(addr), (nr))) \
+		goto end_coredump;
+
+#define DUMP_SEEK(offset) \
+if (file->f_op->llseek) { \
+	if (file->f_op->llseek(file,(offset),0) != (offset)) \
+ 		goto end_coredump; \
+} else file->f_pos = (offset)
+
+/*
+ * Routine writes a core dump image in the current directory.
+ * Currently only a stub-function.
+ *
+ * Note that setuid/setgid files won't make a core-dump if the uid/gid
+ * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
+ * field, which also makes sure the core-dumps won't be recursive if the
+ * dumping of the process results in another error..
+ */
+
+static int aout_core_dump(long signr, struct pt_regs * regs, struct file *file)
+{
+	mm_segment_t fs;
+	int has_dumped = 0;
+	unsigned long dump_start, dump_size;
+	struct user dump;
+#if defined(__alpha__)
+#       define START_DATA(u)	(u.start_data)
+#elif defined(__arm__)
+#	define START_DATA(u)	((u.u_tsize << PAGE_SHIFT) + u.start_code)
+#elif defined(__sparc__)
+#       define START_DATA(u)    (u.u_tsize)
+#elif defined(__i386__) || defined(__mc68000__) || defined(__arch_um__)
+#       define START_DATA(u)	(u.u_tsize << PAGE_SHIFT)
+#endif
+#ifdef __sparc__
+#       define START_STACK(u)   ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
+#else
+#       define START_STACK(u)   (u.start_stack)
+#endif
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	has_dumped = 1;
+	current->flags |= PF_DUMPCORE;
+       	strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
+#ifndef __sparc__
+	dump.u_ar0 = (void *)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump)));
+#endif
+	dump.signal = signr;
+	dump_thread(regs, &dump);
+
+/* If the size of the dump file exceeds the rlimit, then see what would happen
+   if we wrote the stack, but not the data area.  */
+#ifdef __sparc__
+	if ((dump.u_dsize+dump.u_ssize) >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_dsize = 0;
+#else
+	if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_dsize = 0;
+#endif
+
+/* Make sure we have enough room to write the stack and data areas. */
+#ifdef __sparc__
+	if ((dump.u_ssize) >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_ssize = 0;
+#else
+	if ((dump.u_ssize+1) * PAGE_SIZE >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_ssize = 0;
+#endif
+
+/* make sure we actually have a data and stack area to dump */
+	set_fs(USER_DS);
+#ifdef __sparc__
+	if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize))
+		dump.u_dsize = 0;
+	if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize))
+		dump.u_ssize = 0;
+#else
+	if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize << PAGE_SHIFT))
+		dump.u_dsize = 0;
+	if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize << PAGE_SHIFT))
+		dump.u_ssize = 0;
+#endif
+
+	set_fs(KERNEL_DS);
+/* struct user */
+	DUMP_WRITE(&dump,sizeof(dump));
+/* Now dump all of the user data.  Include malloced stuff as well */
+#ifndef __sparc__
+	DUMP_SEEK(PAGE_SIZE);
+#endif
+/* now we start writing out the user space info */
+	set_fs(USER_DS);
+/* Dump the data area */
+	if (dump.u_dsize != 0) {
+		dump_start = START_DATA(dump);
+#ifdef __sparc__
+		dump_size = dump.u_dsize;
+#else
+		dump_size = dump.u_dsize << PAGE_SHIFT;
+#endif
+		DUMP_WRITE(dump_start,dump_size);
+	}
+/* Now prepare to dump the stack area */
+	if (dump.u_ssize != 0) {
+		dump_start = START_STACK(dump);
+#ifdef __sparc__
+		dump_size = dump.u_ssize;
+#else
+		dump_size = dump.u_ssize << PAGE_SHIFT;
+#endif
+		DUMP_WRITE(dump_start,dump_size);
+	}
+/* Finally dump the task struct.  Not be used by gdb, but could be useful */
+	set_fs(KERNEL_DS);
+	DUMP_WRITE(current,sizeof(*current));
+end_coredump:
+	set_fs(fs);
+	return has_dumped;
+}
+
+/*
+ * create_aout_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+static unsigned long __user *create_aout_tables(char __user *p, struct linux_binprm * bprm)
+{
+	char __user * __user *argv;
+	char __user * __user *envp;
+	unsigned long __user *sp;
+	int argc = bprm->argc;
+	int envc = bprm->envc;
+
+	sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p);
+#ifdef __sparc__
+	/* This imposes the proper stack alignment for a new process. */
+	sp = (void __user *) (((unsigned long) sp) & ~7);
+	if ((envc+argc+3)&1) --sp;
+#endif
+#ifdef __alpha__
+/* whee.. test-programs are so much fun. */
+	put_user(0, --sp);
+	put_user(0, --sp);
+	if (bprm->loader) {
+		put_user(0, --sp);
+		put_user(0x3eb, --sp);
+		put_user(bprm->loader, --sp);
+		put_user(0x3ea, --sp);
+	}
+	put_user(bprm->exec, --sp);
+	put_user(0x3e9, --sp);
+#endif
+	sp -= envc+1;
+	envp = (char __user * __user *) sp;
+	sp -= argc+1;
+	argv = (char __user * __user *) sp;
+#if defined(__i386__) || defined(__mc68000__) || defined(__arm__) || defined(__arch_um__)
+	put_user((unsigned long) envp,--sp);
+	put_user((unsigned long) argv,--sp);
+#endif
+	put_user(argc,--sp);
+	current->mm->arg_start = (unsigned long) p;
+	while (argc-->0) {
+		char c;
+		put_user(p,argv++);
+		do {
+			get_user(c,p++);
+		} while (c);
+	}
+	put_user(NULL,argv);
+	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+	while (envc-->0) {
+		char c;
+		put_user(p,envp++);
+		do {
+			get_user(c,p++);
+		} while (c);
+	}
+	put_user(NULL,envp);
+	current->mm->env_end = (unsigned long) p;
+	return sp;
+}
+
+/*
+ * These are the functions used to load a.out style executables and shared
+ * libraries.  There is no binary dependent code anywhere else.
+ */
+
+static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+	struct exec ex;
+	unsigned long error;
+	unsigned long fd_offset;
+	unsigned long rlim;
+	int retval;
+
+	ex = *((struct exec *) bprm->buf);		/* exec-header */
+	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
+	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
+	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
+	    i_size_read(bprm->file->f_dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+		return -ENOEXEC;
+	}
+
+	fd_offset = N_TXTOFF(ex);
+
+	/* Check initial limits. This avoids letting people circumvent
+	 * size limits imposed on them by creating programs with large
+	 * arrays in the data or bss.
+	 */
+	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+	if (rlim >= RLIM_INFINITY)
+		rlim = ~0;
+	if (ex.a_data + ex.a_bss > rlim)
+		return -ENOMEM;
+
+	/* Flush all traces of the currently running executable */
+	retval = flush_old_exec(bprm);
+	if (retval)
+		return retval;
+
+	/* OK, This is the point of no return */
+#if defined(__alpha__)
+	SET_AOUT_PERSONALITY(bprm, ex);
+#elif defined(__sparc__)
+	set_personality(PER_SUNOS);
+#if !defined(__sparc_v9__)
+	memcpy(&current->thread.core_exec, &ex, sizeof(struct exec));
+#endif
+#else
+	set_personality(PER_LINUX);
+#endif
+
+	current->mm->end_code = ex.a_text +
+		(current->mm->start_code = N_TXTADDR(ex));
+	current->mm->end_data = ex.a_data +
+		(current->mm->start_data = N_DATADDR(ex));
+	current->mm->brk = ex.a_bss +
+		(current->mm->start_brk = N_BSSADDR(ex));
+	current->mm->free_area_cache = current->mm->mmap_base;
+
+	set_mm_counter(current->mm, rss, 0);
+	current->mm->mmap = NULL;
+	compute_creds(bprm);
+ 	current->flags &= ~PF_FORKNOEXEC;
+#ifdef __sparc__
+	if (N_MAGIC(ex) == NMAGIC) {
+		loff_t pos = fd_offset;
+		/* Fuck me plenty... */
+		/* <AOL></AOL> */
+		down_write(&current->mm->mmap_sem);	
+		error = do_brk(N_TXTADDR(ex), ex.a_text);
+		up_write(&current->mm->mmap_sem);
+		bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
+			  ex.a_text, &pos);
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(N_DATADDR(ex), ex.a_data);
+		up_write(&current->mm->mmap_sem);
+		bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex),
+			  ex.a_data, &pos);
+		goto beyond_if;
+	}
+#endif
+
+	if (N_MAGIC(ex) == OMAGIC) {
+		unsigned long text_addr, map_size;
+		loff_t pos;
+
+		text_addr = N_TXTADDR(ex);
+
+#if defined(__alpha__) || defined(__sparc__)
+		pos = fd_offset;
+		map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1;
+#else
+		pos = 32;
+		map_size = ex.a_text+ex.a_data;
+#endif
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(text_addr & PAGE_MASK, map_size);
+		up_write(&current->mm->mmap_sem);
+		if (error != (text_addr & PAGE_MASK)) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+
+		error = bprm->file->f_op->read(bprm->file,
+			  (char __user *)text_addr,
+			  ex.a_text+ex.a_data, &pos);
+		if ((signed long)error < 0) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+			 
+		flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
+	} else {
+		static unsigned long error_time, error_time2;
+		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
+		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
+		{
+			printk(KERN_NOTICE "executable not page aligned\n");
+			error_time2 = jiffies;
+		}
+
+		if ((fd_offset & ~PAGE_MASK) != 0 &&
+		    (jiffies-error_time) > 5*HZ)
+		{
+			printk(KERN_WARNING 
+			       "fd_offset is not page aligned. Please convert program: %s\n",
+			       bprm->file->f_dentry->d_name.name);
+			error_time = jiffies;
+		}
+
+		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
+			loff_t pos = fd_offset;
+			down_write(&current->mm->mmap_sem);
+			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+			up_write(&current->mm->mmap_sem);
+			bprm->file->f_op->read(bprm->file,
+					(char __user *)N_TXTADDR(ex),
+					ex.a_text+ex.a_data, &pos);
+			flush_icache_range((unsigned long) N_TXTADDR(ex),
+					   (unsigned long) N_TXTADDR(ex) +
+					   ex.a_text+ex.a_data);
+			goto beyond_if;
+		}
+
+		down_write(&current->mm->mmap_sem);
+		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+			PROT_READ | PROT_EXEC,
+			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+			fd_offset);
+		up_write(&current->mm->mmap_sem);
+
+		if (error != N_TXTADDR(ex)) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+
+		down_write(&current->mm->mmap_sem);
+ 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+				PROT_READ | PROT_WRITE | PROT_EXEC,
+				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+				fd_offset + ex.a_text);
+		up_write(&current->mm->mmap_sem);
+		if (error != N_DATADDR(ex)) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+	}
+beyond_if:
+	set_binfmt(&aout_format);
+
+	retval = set_brk(current->mm->start_brk, current->mm->brk);
+	if (retval < 0) {
+		send_sig(SIGKILL, current, 0);
+		return retval;
+	}
+
+	retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+	if (retval < 0) { 
+		/* Someone check-me: is this error path enough? */ 
+		send_sig(SIGKILL, current, 0); 
+		return retval;
+	}
+
+	current->mm->start_stack =
+		(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
+#ifdef __alpha__
+	regs->gp = ex.a_gpvalue;
+#endif
+	start_thread(regs, ex.a_entry, current->mm->start_stack);
+	if (unlikely(current->ptrace & PT_PTRACED)) {
+		if (current->ptrace & PT_TRACE_EXEC)
+			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
+		else
+			send_sig(SIGTRAP, current, 0);
+	}
+	return 0;
+}
+
+static int load_aout_library(struct file *file)
+{
+	struct inode * inode;
+	unsigned long bss, start_addr, len;
+	unsigned long error;
+	int retval;
+	struct exec ex;
+
+	inode = file->f_dentry->d_inode;
+
+	retval = -ENOEXEC;
+	error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
+	if (error != sizeof(ex))
+		goto out;
+
+	/* We come in here for the regular a.out style of shared libraries */
+	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
+	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
+	    i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+		goto out;
+	}
+
+	if (N_FLAGS(ex))
+		goto out;
+
+	/* For  QMAGIC, the starting address is 0x20 into the page.  We mask
+	   this off to get the starting address for the page */
+
+	start_addr =  ex.a_entry & 0xfffff000;
+
+	if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
+		static unsigned long error_time;
+		loff_t pos = N_TXTOFF(ex);
+
+		if ((jiffies-error_time) > 5*HZ)
+		{
+			printk(KERN_WARNING 
+			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
+			       file->f_dentry->d_name.name);
+			error_time = jiffies;
+		}
+		down_write(&current->mm->mmap_sem);
+		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
+		up_write(&current->mm->mmap_sem);
+		
+		file->f_op->read(file, (char __user *)start_addr,
+			ex.a_text + ex.a_data, &pos);
+		flush_icache_range((unsigned long) start_addr,
+				   (unsigned long) start_addr + ex.a_text + ex.a_data);
+
+		retval = 0;
+		goto out;
+	}
+	/* Now use mmap to map the library into memory. */
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+			PROT_READ | PROT_WRITE | PROT_EXEC,
+			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+			N_TXTOFF(ex));
+	up_write(&current->mm->mmap_sem);
+	retval = error;
+	if (error != start_addr)
+		goto out;
+
+	len = PAGE_ALIGN(ex.a_text + ex.a_data);
+	bss = ex.a_text + ex.a_data + ex.a_bss;
+	if (bss > len) {
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(start_addr + len, bss - len);
+		up_write(&current->mm->mmap_sem);
+		retval = error;
+		if (error != start_addr + len)
+			goto out;
+	}
+	retval = 0;
+out:
+	return retval;
+}
+
+static int __init init_aout_binfmt(void)
+{
+	return register_binfmt(&aout_format);
+}
+
+static void __exit exit_aout_binfmt(void)
+{
+	unregister_binfmt(&aout_format);
+}
+
+core_initcall(init_aout_binfmt);
+module_exit(exit_aout_binfmt);
+MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
new file mode 100644
index 0000000..76ec9d8
--- /dev/null
+++ b/fs/binfmt_elf.c
@@ -0,0 +1,1677 @@
+/*
+ * linux/fs/binfmt_elf.c
+ *
+ * These are the functions used to load ELF format executables as used
+ * on SVr4 machines.  Information on the format may be found in the book
+ * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
+ * Tools".
+ *
+ * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/binfmts.h>
+#include <linux/string.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/personality.h>
+#include <linux/elfcore.h>
+#include <linux/init.h>
+#include <linux/highuid.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/compiler.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/random.h>
+
+#include <asm/uaccess.h>
+#include <asm/param.h>
+#include <asm/page.h>
+
+#include <linux/elf.h>
+
+static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
+static int load_elf_library(struct file*);
+static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
+extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
+
+#ifndef elf_addr_t
+#define elf_addr_t unsigned long
+#endif
+
+/*
+ * If we don't support core dumping, then supply a NULL so we
+ * don't even try.
+ */
+#ifdef USE_ELF_CORE_DUMP
+static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
+#else
+#define elf_core_dump	NULL
+#endif
+
+#if ELF_EXEC_PAGESIZE > PAGE_SIZE
+# define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
+#else
+# define ELF_MIN_ALIGN	PAGE_SIZE
+#endif
+
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS	0
+#endif
+
+#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
+#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
+#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
+
+static struct linux_binfmt elf_format = {
+		.module		= THIS_MODULE,
+		.load_binary	= load_elf_binary,
+		.load_shlib	= load_elf_library,
+		.core_dump	= elf_core_dump,
+		.min_coredump	= ELF_EXEC_PAGESIZE
+};
+
+#define BAD_ADDR(x)	((unsigned long)(x) > TASK_SIZE)
+
+static int set_brk(unsigned long start, unsigned long end)
+{
+	start = ELF_PAGEALIGN(start);
+	end = ELF_PAGEALIGN(end);
+	if (end > start) {
+		unsigned long addr;
+		down_write(&current->mm->mmap_sem);
+		addr = do_brk(start, end - start);
+		up_write(&current->mm->mmap_sem);
+		if (BAD_ADDR(addr))
+			return addr;
+	}
+	current->mm->start_brk = current->mm->brk = end;
+	return 0;
+}
+
+
+/* We need to explicitly zero any fractional pages
+   after the data section (i.e. bss).  This would
+   contain the junk from the file that should not
+   be in memory */
+
+
+static int padzero(unsigned long elf_bss)
+{
+	unsigned long nbyte;
+
+	nbyte = ELF_PAGEOFFSET(elf_bss);
+	if (nbyte) {
+		nbyte = ELF_MIN_ALIGN - nbyte;
+		if (clear_user((void __user *) elf_bss, nbyte))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/* Let's use some macros to make this stack manipulation a litle clearer */
+#ifdef CONFIG_STACK_GROWSUP
+#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
+#define STACK_ROUND(sp, items) \
+	((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
+#define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
+#else
+#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
+#define STACK_ROUND(sp, items) \
+	(((unsigned long) (sp - items)) &~ 15UL)
+#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
+#endif
+
+static int
+create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
+		int interp_aout, unsigned long load_addr,
+		unsigned long interp_load_addr)
+{
+	unsigned long p = bprm->p;
+	int argc = bprm->argc;
+	int envc = bprm->envc;
+	elf_addr_t __user *argv;
+	elf_addr_t __user *envp;
+	elf_addr_t __user *sp;
+	elf_addr_t __user *u_platform;
+	const char *k_platform = ELF_PLATFORM;
+	int items;
+	elf_addr_t *elf_info;
+	int ei_index = 0;
+	struct task_struct *tsk = current;
+
+	/*
+	 * If this architecture has a platform capability string, copy it
+	 * to userspace.  In some cases (Sparc), this info is impossible
+	 * for userspace to get any other way, in others (i386) it is
+	 * merely difficult.
+	 */
+
+	u_platform = NULL;
+	if (k_platform) {
+		size_t len = strlen(k_platform) + 1;
+
+		/*
+		 * In some cases (e.g. Hyper-Threading), we want to avoid L1
+		 * evictions by the processes running on the same package. One
+		 * thing we can do is to shuffle the initial stack for them.
+		 */
+	 
+		p = arch_align_stack(p);
+
+		u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
+		if (__copy_to_user(u_platform, k_platform, len))
+			return -EFAULT;
+	}
+
+	/* Create the ELF interpreter info */
+	elf_info = (elf_addr_t *) current->mm->saved_auxv;
+#define NEW_AUX_ENT(id, val) \
+	do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
+
+#ifdef ARCH_DLINFO
+	/* 
+	 * ARCH_DLINFO must come first so PPC can do its special alignment of
+	 * AUXV.
+	 */
+	ARCH_DLINFO;
+#endif
+	NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
+	NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
+	NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
+	NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
+	NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
+	NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
+	NEW_AUX_ENT(AT_BASE, interp_load_addr);
+	NEW_AUX_ENT(AT_FLAGS, 0);
+	NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
+	NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
+	NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
+	NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
+	NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
+ 	NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
+	if (k_platform) {
+		NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
+	}
+	if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
+		NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
+	}
+#undef NEW_AUX_ENT
+	/* AT_NULL is zero; clear the rest too */
+	memset(&elf_info[ei_index], 0,
+	       sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
+
+	/* And advance past the AT_NULL entry.  */
+	ei_index += 2;
+
+	sp = STACK_ADD(p, ei_index);
+
+	items = (argc + 1) + (envc + 1);
+	if (interp_aout) {
+		items += 3; /* a.out interpreters require argv & envp too */
+	} else {
+		items += 1; /* ELF interpreters only put argc on the stack */
+	}
+	bprm->p = STACK_ROUND(sp, items);
+
+	/* Point sp at the lowest address on the stack */
+#ifdef CONFIG_STACK_GROWSUP
+	sp = (elf_addr_t __user *)bprm->p - items - ei_index;
+	bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
+#else
+	sp = (elf_addr_t __user *)bprm->p;
+#endif
+
+	/* Now, let's put argc (and argv, envp if appropriate) on the stack */
+	if (__put_user(argc, sp++))
+		return -EFAULT;
+	if (interp_aout) {
+		argv = sp + 2;
+		envp = argv + argc + 1;
+		__put_user((elf_addr_t)(unsigned long)argv, sp++);
+		__put_user((elf_addr_t)(unsigned long)envp, sp++);
+	} else {
+		argv = sp;
+		envp = argv + argc + 1;
+	}
+
+	/* Populate argv and envp */
+	p = current->mm->arg_start;
+	while (argc-- > 0) {
+		size_t len;
+		__put_user((elf_addr_t)p, argv++);
+		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
+		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+			return 0;
+		p += len;
+	}
+	if (__put_user(0, argv))
+		return -EFAULT;
+	current->mm->arg_end = current->mm->env_start = p;
+	while (envc-- > 0) {
+		size_t len;
+		__put_user((elf_addr_t)p, envp++);
+		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
+		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+			return 0;
+		p += len;
+	}
+	if (__put_user(0, envp))
+		return -EFAULT;
+	current->mm->env_end = p;
+
+	/* Put the elf_info on the stack in the right place.  */
+	sp = (elf_addr_t __user *)envp + 1;
+	if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+		return -EFAULT;
+	return 0;
+}
+
+#ifndef elf_map
+
+static unsigned long elf_map(struct file *filep, unsigned long addr,
+			struct elf_phdr *eppnt, int prot, int type)
+{
+	unsigned long map_addr;
+
+	down_write(&current->mm->mmap_sem);
+	map_addr = do_mmap(filep, ELF_PAGESTART(addr),
+			   eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
+			   eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+	up_write(&current->mm->mmap_sem);
+	return(map_addr);
+}
+
+#endif /* !elf_map */
+
+/* This is much more generalized than the library routine read function,
+   so we keep this separate.  Technically the library read function
+   is only provided so that we can read a.out libraries that have
+   an ELF header */
+
+static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
+				     struct file * interpreter,
+				     unsigned long *interp_load_addr)
+{
+	struct elf_phdr *elf_phdata;
+	struct elf_phdr *eppnt;
+	unsigned long load_addr = 0;
+	int load_addr_set = 0;
+	unsigned long last_bss = 0, elf_bss = 0;
+	unsigned long error = ~0UL;
+	int retval, i, size;
+
+	/* First of all, some simple consistency checks */
+	if (interp_elf_ex->e_type != ET_EXEC &&
+	    interp_elf_ex->e_type != ET_DYN)
+		goto out;
+	if (!elf_check_arch(interp_elf_ex))
+		goto out;
+	if (!interpreter->f_op || !interpreter->f_op->mmap)
+		goto out;
+
+	/*
+	 * If the size of this structure has changed, then punt, since
+	 * we will be doing the wrong thing.
+	 */
+	if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
+		goto out;
+	if (interp_elf_ex->e_phnum < 1 ||
+		interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
+		goto out;
+
+	/* Now read in all of the header information */
+
+	size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
+	if (size > ELF_MIN_ALIGN)
+		goto out;
+	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+	if (!elf_phdata)
+		goto out;
+
+	retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
+	error = -EIO;
+	if (retval != size) {
+		if (retval < 0)
+			error = retval;	
+		goto out_close;
+	}
+
+	eppnt = elf_phdata;
+	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
+	  if (eppnt->p_type == PT_LOAD) {
+	    int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
+	    int elf_prot = 0;
+	    unsigned long vaddr = 0;
+	    unsigned long k, map_addr;
+
+	    if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
+	    if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
+	    if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+	    vaddr = eppnt->p_vaddr;
+	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
+	    	elf_type |= MAP_FIXED;
+
+	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
+	    error = map_addr;
+	    if (BAD_ADDR(map_addr))
+	    	goto out_close;
+
+	    if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
+		load_addr = map_addr - ELF_PAGESTART(vaddr);
+		load_addr_set = 1;
+	    }
+
+	    /*
+	     * Check to see if the section's size will overflow the
+	     * allowed task size. Note that p_filesz must always be
+	     * <= p_memsize so it is only necessary to check p_memsz.
+	     */
+	    k = load_addr + eppnt->p_vaddr;
+	    if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
+		eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
+	        error = -ENOMEM;
+		goto out_close;
+	    }
+
+	    /*
+	     * Find the end of the file mapping for this phdr, and keep
+	     * track of the largest address we see for this.
+	     */
+	    k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
+	    if (k > elf_bss)
+		elf_bss = k;
+
+	    /*
+	     * Do the same thing for the memory mapping - between
+	     * elf_bss and last_bss is the bss section.
+	     */
+	    k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+	    if (k > last_bss)
+		last_bss = k;
+	  }
+	}
+
+	/*
+	 * Now fill out the bss section.  First pad the last page up
+	 * to the page boundary, and then perform a mmap to make sure
+	 * that there are zero-mapped pages up to and including the 
+	 * last bss page.
+	 */
+	if (padzero(elf_bss)) {
+		error = -EFAULT;
+		goto out_close;
+	}
+
+	elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);	/* What we have mapped so far */
+
+	/* Map the last of the bss segment */
+	if (last_bss > elf_bss) {
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(elf_bss, last_bss - elf_bss);
+		up_write(&current->mm->mmap_sem);
+		if (BAD_ADDR(error))
+			goto out_close;
+	}
+
+	*interp_load_addr = load_addr;
+	error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+
+out_close:
+	kfree(elf_phdata);
+out:
+	return error;
+}
+
+static unsigned long load_aout_interp(struct exec * interp_ex,
+			     struct file * interpreter)
+{
+	unsigned long text_data, elf_entry = ~0UL;
+	char __user * addr;
+	loff_t offset;
+
+	current->mm->end_code = interp_ex->a_text;
+	text_data = interp_ex->a_text + interp_ex->a_data;
+	current->mm->end_data = text_data;
+	current->mm->brk = interp_ex->a_bss + text_data;
+
+	switch (N_MAGIC(*interp_ex)) {
+	case OMAGIC:
+		offset = 32;
+		addr = (char __user *)0;
+		break;
+	case ZMAGIC:
+	case QMAGIC:
+		offset = N_TXTOFF(*interp_ex);
+		addr = (char __user *) N_TXTADDR(*interp_ex);
+		break;
+	default:
+		goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);	
+	do_brk(0, text_data);
+	up_write(&current->mm->mmap_sem);
+	if (!interpreter->f_op || !interpreter->f_op->read)
+		goto out;
+	if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
+		goto out;
+	flush_icache_range((unsigned long)addr,
+	                   (unsigned long)addr + text_data);
+
+
+	down_write(&current->mm->mmap_sem);	
+	do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
+		interp_ex->a_bss);
+	up_write(&current->mm->mmap_sem);
+	elf_entry = interp_ex->a_entry;
+
+out:
+	return elf_entry;
+}
+
+/*
+ * These are the functions used to load ELF style executables and shared
+ * libraries.  There is no binary dependent code anywhere else.
+ */
+
+#define INTERPRETER_NONE 0
+#define INTERPRETER_AOUT 1
+#define INTERPRETER_ELF 2
+
+
+static unsigned long randomize_stack_top(unsigned long stack_top)
+{
+	unsigned int random_variable = 0;
+
+	if (current->flags & PF_RANDOMIZE)
+		random_variable = get_random_int() % (8*1024*1024);
+#ifdef CONFIG_STACK_GROWSUP
+	return PAGE_ALIGN(stack_top + random_variable);
+#else
+	return PAGE_ALIGN(stack_top - random_variable);
+#endif
+}
+
+static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+	struct file *interpreter = NULL; /* to shut gcc up */
+ 	unsigned long load_addr = 0, load_bias = 0;
+	int load_addr_set = 0;
+	char * elf_interpreter = NULL;
+	unsigned int interpreter_type = INTERPRETER_NONE;
+	unsigned char ibcs2_interpreter = 0;
+	unsigned long error;
+	struct elf_phdr * elf_ppnt, *elf_phdata;
+	unsigned long elf_bss, elf_brk;
+	int elf_exec_fileno;
+	int retval, i;
+	unsigned int size;
+	unsigned long elf_entry, interp_load_addr = 0;
+	unsigned long start_code, end_code, start_data, end_data;
+	unsigned long reloc_func_desc = 0;
+	char passed_fileno[6];
+	struct files_struct *files;
+	int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
+	unsigned long def_flags = 0;
+	struct {
+		struct elfhdr elf_ex;
+		struct elfhdr interp_elf_ex;
+  		struct exec interp_ex;
+	} *loc;
+
+	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+	if (!loc) {
+		retval = -ENOMEM;
+		goto out_ret;
+	}
+	
+	/* Get the exec-header */
+	loc->elf_ex = *((struct elfhdr *) bprm->buf);
+
+	retval = -ENOEXEC;
+	/* First of all, some simple consistency checks */
+	if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+		goto out;
+
+	if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
+		goto out;
+	if (!elf_check_arch(&loc->elf_ex))
+		goto out;
+	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+		goto out;
+
+	/* Now read in all of the header information */
+
+	if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
+		goto out;
+	if (loc->elf_ex.e_phnum < 1 ||
+	 	loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
+		goto out;
+	size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
+	retval = -ENOMEM;
+	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+	if (!elf_phdata)
+		goto out;
+
+	retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
+	if (retval != size) {
+		if (retval >= 0)
+			retval = -EIO;
+		goto out_free_ph;
+	}
+
+	files = current->files;		/* Refcounted so ok */
+	retval = unshare_files();
+	if (retval < 0)
+		goto out_free_ph;
+	if (files == current->files) {
+		put_files_struct(files);
+		files = NULL;
+	}
+
+	/* exec will make our files private anyway, but for the a.out
+	   loader stuff we need to do it earlier */
+
+	retval = get_unused_fd();
+	if (retval < 0)
+		goto out_free_fh;
+	get_file(bprm->file);
+	fd_install(elf_exec_fileno = retval, bprm->file);
+
+	elf_ppnt = elf_phdata;
+	elf_bss = 0;
+	elf_brk = 0;
+
+	start_code = ~0UL;
+	end_code = 0;
+	start_data = 0;
+	end_data = 0;
+
+	for (i = 0; i < loc->elf_ex.e_phnum; i++) {
+		if (elf_ppnt->p_type == PT_INTERP) {
+			/* This is the program interpreter used for
+			 * shared libraries - for now assume that this
+			 * is an a.out format binary
+			 */
+
+			retval = -ENOEXEC;
+			if (elf_ppnt->p_filesz > PATH_MAX || 
+			    elf_ppnt->p_filesz < 2)
+				goto out_free_file;
+
+			retval = -ENOMEM;
+			elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
+							   GFP_KERNEL);
+			if (!elf_interpreter)
+				goto out_free_file;
+
+			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
+					   elf_interpreter,
+					   elf_ppnt->p_filesz);
+			if (retval != elf_ppnt->p_filesz) {
+				if (retval >= 0)
+					retval = -EIO;
+				goto out_free_interp;
+			}
+			/* make sure path is NULL terminated */
+			retval = -ENOEXEC;
+			if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
+				goto out_free_interp;
+
+			/* If the program interpreter is one of these two,
+			 * then assume an iBCS2 image. Otherwise assume
+			 * a native linux image.
+			 */
+			if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
+			    strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
+				ibcs2_interpreter = 1;
+
+			/*
+			 * The early SET_PERSONALITY here is so that the lookup
+			 * for the interpreter happens in the namespace of the 
+			 * to-be-execed image.  SET_PERSONALITY can select an
+			 * alternate root.
+			 *
+			 * However, SET_PERSONALITY is NOT allowed to switch
+			 * this task into the new images's memory mapping
+			 * policy - that is, TASK_SIZE must still evaluate to
+			 * that which is appropriate to the execing application.
+			 * This is because exit_mmap() needs to have TASK_SIZE
+			 * evaluate to the size of the old image.
+			 *
+			 * So if (say) a 64-bit application is execing a 32-bit
+			 * application it is the architecture's responsibility
+			 * to defer changing the value of TASK_SIZE until the
+			 * switch really is going to happen - do this in
+			 * flush_thread().	- akpm
+			 */
+			SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
+
+			interpreter = open_exec(elf_interpreter);
+			retval = PTR_ERR(interpreter);
+			if (IS_ERR(interpreter))
+				goto out_free_interp;
+			retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
+			if (retval != BINPRM_BUF_SIZE) {
+				if (retval >= 0)
+					retval = -EIO;
+				goto out_free_dentry;
+			}
+
+			/* Get the exec headers */
+			loc->interp_ex = *((struct exec *) bprm->buf);
+			loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
+			break;
+		}
+		elf_ppnt++;
+	}
+
+	elf_ppnt = elf_phdata;
+	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
+		if (elf_ppnt->p_type == PT_GNU_STACK) {
+			if (elf_ppnt->p_flags & PF_X)
+				executable_stack = EXSTACK_ENABLE_X;
+			else
+				executable_stack = EXSTACK_DISABLE_X;
+			break;
+		}
+	have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
+
+	/* Some simple consistency checks for the interpreter */
+	if (elf_interpreter) {
+		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
+
+		/* Now figure out which format our binary is */
+		if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
+		    (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
+		    (N_MAGIC(loc->interp_ex) != QMAGIC))
+			interpreter_type = INTERPRETER_ELF;
+
+		if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+			interpreter_type &= ~INTERPRETER_ELF;
+
+		retval = -ELIBBAD;
+		if (!interpreter_type)
+			goto out_free_dentry;
+
+		/* Make sure only one type was selected */
+		if ((interpreter_type & INTERPRETER_ELF) &&
+		     interpreter_type != INTERPRETER_ELF) {
+	     		// FIXME - ratelimit this before re-enabling
+			// printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
+			interpreter_type = INTERPRETER_ELF;
+		}
+		/* Verify the interpreter has a valid arch */
+		if ((interpreter_type == INTERPRETER_ELF) &&
+		    !elf_check_arch(&loc->interp_elf_ex))
+			goto out_free_dentry;
+	} else {
+		/* Executables without an interpreter also need a personality  */
+		SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
+	}
+
+	/* OK, we are done with that, now set up the arg stuff,
+	   and then start this sucker up */
+
+	if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
+		char *passed_p = passed_fileno;
+		sprintf(passed_fileno, "%d", elf_exec_fileno);
+
+		if (elf_interpreter) {
+			retval = copy_strings_kernel(1, &passed_p, bprm);
+			if (retval)
+				goto out_free_dentry; 
+			bprm->argc++;
+		}
+	}
+
+	/* Flush all traces of the currently running executable */
+	retval = flush_old_exec(bprm);
+	if (retval)
+		goto out_free_dentry;
+
+	/* Discard our unneeded old files struct */
+	if (files) {
+		steal_locks(files);
+		put_files_struct(files);
+		files = NULL;
+	}
+
+	/* OK, This is the point of no return */
+	current->mm->start_data = 0;
+	current->mm->end_data = 0;
+	current->mm->end_code = 0;
+	current->mm->mmap = NULL;
+	current->flags &= ~PF_FORKNOEXEC;
+	current->mm->def_flags = def_flags;
+
+	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
+	   may depend on the personality.  */
+	SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
+	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+		current->personality |= READ_IMPLIES_EXEC;
+
+	if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+		current->flags |= PF_RANDOMIZE;
+	arch_pick_mmap_layout(current->mm);
+
+	/* Do this so that we can load the interpreter, if need be.  We will
+	   change some of these later */
+	set_mm_counter(current->mm, rss, 0);
+	current->mm->free_area_cache = current->mm->mmap_base;
+	retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
+				 executable_stack);
+	if (retval < 0) {
+		send_sig(SIGKILL, current, 0);
+		goto out_free_dentry;
+	}
+	
+#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
+	retval = arch_setup_additional_pages(bprm, executable_stack);
+	if (retval < 0) {
+		send_sig(SIGKILL, current, 0);
+		goto out_free_dentry;
+	}
+#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
+
+	current->mm->start_stack = bprm->p;
+
+	/* Now we do a little grungy work by mmaping the ELF image into
+	   the correct location in memory.  At this point, we assume that
+	   the image should be loaded at fixed address, not at a variable
+	   address. */
+
+	for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+		int elf_prot = 0, elf_flags;
+		unsigned long k, vaddr;
+
+		if (elf_ppnt->p_type != PT_LOAD)
+			continue;
+
+		if (unlikely (elf_brk > elf_bss)) {
+			unsigned long nbyte;
+	            
+			/* There was a PT_LOAD segment with p_memsz > p_filesz
+			   before this one. Map anonymous pages, if needed,
+			   and clear the area.  */
+			retval = set_brk (elf_bss + load_bias,
+					  elf_brk + load_bias);
+			if (retval) {
+				send_sig(SIGKILL, current, 0);
+				goto out_free_dentry;
+			}
+			nbyte = ELF_PAGEOFFSET(elf_bss);
+			if (nbyte) {
+				nbyte = ELF_MIN_ALIGN - nbyte;
+				if (nbyte > elf_brk - elf_bss)
+					nbyte = elf_brk - elf_bss;
+				if (clear_user((void __user *)elf_bss +
+							load_bias, nbyte)) {
+					/*
+					 * This bss-zeroing can fail if the ELF
+					 * file specifies odd protections.  So
+					 * we don't check the return value
+					 */
+				}
+			}
+		}
+
+		if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
+		if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
+		if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+
+		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
+
+		vaddr = elf_ppnt->p_vaddr;
+		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+			elf_flags |= MAP_FIXED;
+		} else if (loc->elf_ex.e_type == ET_DYN) {
+			/* Try and get dynamic programs out of the way of the default mmap
+			   base, as well as whatever program they might try to exec.  This
+			   is because the brk will follow the loader, and is not movable.  */
+			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+		}
+
+		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+		if (BAD_ADDR(error)) {
+			send_sig(SIGKILL, current, 0);
+			goto out_free_dentry;
+		}
+
+		if (!load_addr_set) {
+			load_addr_set = 1;
+			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
+			if (loc->elf_ex.e_type == ET_DYN) {
+				load_bias += error -
+				             ELF_PAGESTART(load_bias + vaddr);
+				load_addr += load_bias;
+				reloc_func_desc = load_bias;
+			}
+		}
+		k = elf_ppnt->p_vaddr;
+		if (k < start_code) start_code = k;
+		if (start_data < k) start_data = k;
+
+		/*
+		 * Check to see if the section's size will overflow the
+		 * allowed task size. Note that p_filesz must always be
+		 * <= p_memsz so it is only necessary to check p_memsz.
+		 */
+		if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+		    elf_ppnt->p_memsz > TASK_SIZE ||
+		    TASK_SIZE - elf_ppnt->p_memsz < k) {
+			/* set_brk can never work.  Avoid overflows.  */
+			send_sig(SIGKILL, current, 0);
+			goto out_free_dentry;
+		}
+
+		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
+
+		if (k > elf_bss)
+			elf_bss = k;
+		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
+			end_code = k;
+		if (end_data < k)
+			end_data = k;
+		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
+		if (k > elf_brk)
+			elf_brk = k;
+	}
+
+	loc->elf_ex.e_entry += load_bias;
+	elf_bss += load_bias;
+	elf_brk += load_bias;
+	start_code += load_bias;
+	end_code += load_bias;
+	start_data += load_bias;
+	end_data += load_bias;
+
+	/* Calling set_brk effectively mmaps the pages that we need
+	 * for the bss and break sections.  We must do this before
+	 * mapping in the interpreter, to make sure it doesn't wind
+	 * up getting placed where the bss needs to go.
+	 */
+	retval = set_brk(elf_bss, elf_brk);
+	if (retval) {
+		send_sig(SIGKILL, current, 0);
+		goto out_free_dentry;
+	}
+	if (padzero(elf_bss)) {
+		send_sig(SIGSEGV, current, 0);
+		retval = -EFAULT; /* Nobody gets to see this, but.. */
+		goto out_free_dentry;
+	}
+
+	if (elf_interpreter) {
+		if (interpreter_type == INTERPRETER_AOUT)
+			elf_entry = load_aout_interp(&loc->interp_ex,
+						     interpreter);
+		else
+			elf_entry = load_elf_interp(&loc->interp_elf_ex,
+						    interpreter,
+						    &interp_load_addr);
+		if (BAD_ADDR(elf_entry)) {
+			printk(KERN_ERR "Unable to load interpreter %.128s\n",
+				elf_interpreter);
+			force_sig(SIGSEGV, current);
+			retval = -ENOEXEC; /* Nobody gets to see this, but.. */
+			goto out_free_dentry;
+		}
+		reloc_func_desc = interp_load_addr;
+
+		allow_write_access(interpreter);
+		fput(interpreter);
+		kfree(elf_interpreter);
+	} else {
+		elf_entry = loc->elf_ex.e_entry;
+	}
+
+	kfree(elf_phdata);
+
+	if (interpreter_type != INTERPRETER_AOUT)
+		sys_close(elf_exec_fileno);
+
+	set_binfmt(&elf_format);
+
+	compute_creds(bprm);
+	current->flags &= ~PF_FORKNOEXEC;
+	create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
+			load_addr, interp_load_addr);
+	/* N.B. passed_fileno might not be initialized? */
+	if (interpreter_type == INTERPRETER_AOUT)
+		current->mm->arg_start += strlen(passed_fileno) + 1;
+	current->mm->end_code = end_code;
+	current->mm->start_code = start_code;
+	current->mm->start_data = start_data;
+	current->mm->end_data = end_data;
+	current->mm->start_stack = bprm->p;
+
+	if (current->personality & MMAP_PAGE_ZERO) {
+		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
+		   and some applications "depend" upon this behavior.
+		   Since we do not have the power to recompile these, we
+		   emulate the SVr4 behavior.  Sigh.  */
+		down_write(&current->mm->mmap_sem);
+		error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
+				MAP_FIXED | MAP_PRIVATE, 0);
+		up_write(&current->mm->mmap_sem);
+	}
+
+#ifdef ELF_PLAT_INIT
+	/*
+	 * The ABI may specify that certain registers be set up in special
+	 * ways (on i386 %edx is the address of a DT_FINI function, for
+	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
+	 * that the e_entry field is the address of the function descriptor
+	 * for the startup routine, rather than the address of the startup
+	 * routine itself.  This macro performs whatever initialization to
+	 * the regs structure is required as well as any relocations to the
+	 * function descriptor entries when executing dynamically links apps.
+	 */
+	ELF_PLAT_INIT(regs, reloc_func_desc);
+#endif
+
+	start_thread(regs, elf_entry, bprm->p);
+	if (unlikely(current->ptrace & PT_PTRACED)) {
+		if (current->ptrace & PT_TRACE_EXEC)
+			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
+		else
+			send_sig(SIGTRAP, current, 0);
+	}
+	retval = 0;
+out:
+	kfree(loc);
+out_ret:
+	return retval;
+
+	/* error cleanup */
+out_free_dentry:
+	allow_write_access(interpreter);
+	if (interpreter)
+		fput(interpreter);
+out_free_interp:
+	if (elf_interpreter)
+		kfree(elf_interpreter);
+out_free_file:
+	sys_close(elf_exec_fileno);
+out_free_fh:
+	if (files) {
+		put_files_struct(current->files);
+		current->files = files;
+	}
+out_free_ph:
+	kfree(elf_phdata);
+	goto out;
+}
+
+/* This is really simpleminded and specialized - we are loading an
+   a.out library that is given an ELF header. */
+
+static int load_elf_library(struct file *file)
+{
+	struct elf_phdr *elf_phdata;
+	struct elf_phdr *eppnt;
+	unsigned long elf_bss, bss, len;
+	int retval, error, i, j;
+	struct elfhdr elf_ex;
+
+	error = -ENOEXEC;
+	retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
+	if (retval != sizeof(elf_ex))
+		goto out;
+
+	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+		goto out;
+
+	/* First of all, some simple consistency checks */
+	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
+	   !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
+		goto out;
+
+	/* Now read in all of the header information */
+
+	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
+	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
+
+	error = -ENOMEM;
+	elf_phdata = kmalloc(j, GFP_KERNEL);
+	if (!elf_phdata)
+		goto out;
+
+	eppnt = elf_phdata;
+	error = -ENOEXEC;
+	retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
+	if (retval != j)
+		goto out_free_ph;
+
+	for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
+		if ((eppnt + i)->p_type == PT_LOAD)
+			j++;
+	if (j != 1)
+		goto out_free_ph;
+
+	while (eppnt->p_type != PT_LOAD)
+		eppnt++;
+
+	/* Now use mmap to map the library into memory. */
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap(file,
+			ELF_PAGESTART(eppnt->p_vaddr),
+			(eppnt->p_filesz +
+			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
+			PROT_READ | PROT_WRITE | PROT_EXEC,
+			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+			(eppnt->p_offset -
+			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
+	up_write(&current->mm->mmap_sem);
+	if (error != ELF_PAGESTART(eppnt->p_vaddr))
+		goto out_free_ph;
+
+	elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
+	if (padzero(elf_bss)) {
+		error = -EFAULT;
+		goto out_free_ph;
+	}
+
+	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
+	bss = eppnt->p_memsz + eppnt->p_vaddr;
+	if (bss > len) {
+		down_write(&current->mm->mmap_sem);
+		do_brk(len, bss - len);
+		up_write(&current->mm->mmap_sem);
+	}
+	error = 0;
+
+out_free_ph:
+	kfree(elf_phdata);
+out:
+	return error;
+}
+
+/*
+ * Note that some platforms still use traditional core dumps and not
+ * the ELF core dump.  Each platform can select it as appropriate.
+ */
+#ifdef USE_ELF_CORE_DUMP
+
+/*
+ * ELF core dumper
+ *
+ * Modelled on fs/exec.c:aout_core_dump()
+ * Jeremy Fitzhardinge <jeremy@sw.oz.au>
+ */
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+static int dump_seek(struct file *file, off_t off)
+{
+	if (file->f_op->llseek) {
+		if (file->f_op->llseek(file, off, 0) != off)
+			return 0;
+	} else
+		file->f_pos = off;
+	return 1;
+}
+
+/*
+ * Decide whether a segment is worth dumping; default is yes to be
+ * sure (missing info is worse than too much; etc).
+ * Personally I'd include everything, and use the coredump limit...
+ *
+ * I think we should skip something. But I am not sure how. H.J.
+ */
+static int maydump(struct vm_area_struct *vma)
+{
+	/* Do not dump I/O mapped devices or special mappings */
+	if (vma->vm_flags & (VM_IO | VM_RESERVED))
+		return 0;
+
+	/* Dump shared memory only if mapped from an anonymous file.  */
+	if (vma->vm_flags & VM_SHARED)
+		return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
+
+	/* If it hasn't been written to, don't write it out */
+	if (!vma->anon_vma)
+		return 0;
+
+	return 1;
+}
+
+#define roundup(x, y)  ((((x)+((y)-1))/(y))*(y))
+
+/* An ELF note in memory */
+struct memelfnote
+{
+	const char *name;
+	int type;
+	unsigned int datasz;
+	void *data;
+};
+
+static int notesize(struct memelfnote *en)
+{
+	int sz;
+
+	sz = sizeof(struct elf_note);
+	sz += roundup(strlen(en->name) + 1, 4);
+	sz += roundup(en->datasz, 4);
+
+	return sz;
+}
+
+#define DUMP_WRITE(addr, nr)	\
+	do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
+#define DUMP_SEEK(off)	\
+	do { if (!dump_seek(file, (off))) return 0; } while(0)
+
+static int writenote(struct memelfnote *men, struct file *file)
+{
+	struct elf_note en;
+
+	en.n_namesz = strlen(men->name) + 1;
+	en.n_descsz = men->datasz;
+	en.n_type = men->type;
+
+	DUMP_WRITE(&en, sizeof(en));
+	DUMP_WRITE(men->name, en.n_namesz);
+	/* XXX - cast from long long to long to avoid need for libgcc.a */
+	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
+	DUMP_WRITE(men->data, men->datasz);
+	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
+
+	return 1;
+}
+#undef DUMP_WRITE
+#undef DUMP_SEEK
+
+#define DUMP_WRITE(addr, nr)	\
+	if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+		goto end_coredump;
+#define DUMP_SEEK(off)	\
+	if (!dump_seek(file, (off))) \
+		goto end_coredump;
+
+static inline void fill_elf_header(struct elfhdr *elf, int segs)
+{
+	memcpy(elf->e_ident, ELFMAG, SELFMAG);
+	elf->e_ident[EI_CLASS] = ELF_CLASS;
+	elf->e_ident[EI_DATA] = ELF_DATA;
+	elf->e_ident[EI_VERSION] = EV_CURRENT;
+	elf->e_ident[EI_OSABI] = ELF_OSABI;
+	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
+
+	elf->e_type = ET_CORE;
+	elf->e_machine = ELF_ARCH;
+	elf->e_version = EV_CURRENT;
+	elf->e_entry = 0;
+	elf->e_phoff = sizeof(struct elfhdr);
+	elf->e_shoff = 0;
+	elf->e_flags = ELF_CORE_EFLAGS;
+	elf->e_ehsize = sizeof(struct elfhdr);
+	elf->e_phentsize = sizeof(struct elf_phdr);
+	elf->e_phnum = segs;
+	elf->e_shentsize = 0;
+	elf->e_shnum = 0;
+	elf->e_shstrndx = 0;
+	return;
+}
+
+static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
+{
+	phdr->p_type = PT_NOTE;
+	phdr->p_offset = offset;
+	phdr->p_vaddr = 0;
+	phdr->p_paddr = 0;
+	phdr->p_filesz = sz;
+	phdr->p_memsz = 0;
+	phdr->p_flags = 0;
+	phdr->p_align = 0;
+	return;
+}
+
+static void fill_note(struct memelfnote *note, const char *name, int type, 
+		unsigned int sz, void *data)
+{
+	note->name = name;
+	note->type = type;
+	note->datasz = sz;
+	note->data = data;
+	return;
+}
+
+/*
+ * fill up all the fields in prstatus from the given task struct, except registers
+ * which need to be filled up separately.
+ */
+static void fill_prstatus(struct elf_prstatus *prstatus,
+			struct task_struct *p, long signr) 
+{
+	prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
+	prstatus->pr_sigpend = p->pending.signal.sig[0];
+	prstatus->pr_sighold = p->blocked.sig[0];
+	prstatus->pr_pid = p->pid;
+	prstatus->pr_ppid = p->parent->pid;
+	prstatus->pr_pgrp = process_group(p);
+	prstatus->pr_sid = p->signal->session;
+	if (thread_group_leader(p)) {
+		/*
+		 * This is the record for the group leader.  Add in the
+		 * cumulative times of previous dead threads.  This total
+		 * won't include the time of each live thread whose state
+		 * is included in the core dump.  The final total reported
+		 * to our parent process when it calls wait4 will include
+		 * those sums as well as the little bit more time it takes
+		 * this and each other thread to finish dying after the
+		 * core dump synchronization phase.
+		 */
+		cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
+				   &prstatus->pr_utime);
+		cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
+				   &prstatus->pr_stime);
+	} else {
+		cputime_to_timeval(p->utime, &prstatus->pr_utime);
+		cputime_to_timeval(p->stime, &prstatus->pr_stime);
+	}
+	cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
+	cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+}
+
+static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
+		       struct mm_struct *mm)
+{
+	int i, len;
+	
+	/* first copy the parameters from user space */
+	memset(psinfo, 0, sizeof(struct elf_prpsinfo));
+
+	len = mm->arg_end - mm->arg_start;
+	if (len >= ELF_PRARGSZ)
+		len = ELF_PRARGSZ-1;
+	if (copy_from_user(&psinfo->pr_psargs,
+		           (const char __user *)mm->arg_start, len))
+		return -EFAULT;
+	for(i = 0; i < len; i++)
+		if (psinfo->pr_psargs[i] == 0)
+			psinfo->pr_psargs[i] = ' ';
+	psinfo->pr_psargs[len] = 0;
+
+	psinfo->pr_pid = p->pid;
+	psinfo->pr_ppid = p->parent->pid;
+	psinfo->pr_pgrp = process_group(p);
+	psinfo->pr_sid = p->signal->session;
+
+	i = p->state ? ffz(~p->state) + 1 : 0;
+	psinfo->pr_state = i;
+	psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
+	psinfo->pr_zomb = psinfo->pr_sname == 'Z';
+	psinfo->pr_nice = task_nice(p);
+	psinfo->pr_flag = p->flags;
+	SET_UID(psinfo->pr_uid, p->uid);
+	SET_GID(psinfo->pr_gid, p->gid);
+	strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+	
+	return 0;
+}
+
+/* Here is the structure in which status of each thread is captured. */
+struct elf_thread_status
+{
+	struct list_head list;
+	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
+	elf_fpregset_t fpu;		/* NT_PRFPREG */
+	struct task_struct *thread;
+#ifdef ELF_CORE_COPY_XFPREGS
+	elf_fpxregset_t xfpu;		/* NT_PRXFPREG */
+#endif
+	struct memelfnote notes[3];
+	int num_notes;
+};
+
+/*
+ * In order to add the specific thread information for the elf file format,
+ * we need to keep a linked list of every threads pr_status and then
+ * create a single section for them in the final core file.
+ */
+static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
+{
+	int sz = 0;
+	struct task_struct *p = t->thread;
+	t->num_notes = 0;
+
+	fill_prstatus(&t->prstatus, p, signr);
+	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);	
+	
+	fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
+	t->num_notes++;
+	sz += notesize(&t->notes[0]);
+
+	if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
+		fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
+		t->num_notes++;
+		sz += notesize(&t->notes[1]);
+	}
+
+#ifdef ELF_CORE_COPY_XFPREGS
+	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
+		fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
+		t->num_notes++;
+		sz += notesize(&t->notes[2]);
+	}
+#endif	
+	return sz;
+}
+
+/*
+ * Actual dumper
+ *
+ * This is a two-pass process; first we find the offsets of the bits,
+ * and then they are actually written out.  If we run out of core limit
+ * we just truncate.
+ */
+static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
+{
+#define	NUM_NOTES	6
+	int has_dumped = 0;
+	mm_segment_t fs;
+	int segs;
+	size_t size = 0;
+	int i;
+	struct vm_area_struct *vma;
+	struct elfhdr *elf = NULL;
+	off_t offset = 0, dataoff;
+	unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
+	int numnote;
+	struct memelfnote *notes = NULL;
+	struct elf_prstatus *prstatus = NULL;	/* NT_PRSTATUS */
+	struct elf_prpsinfo *psinfo = NULL;	/* NT_PRPSINFO */
+ 	struct task_struct *g, *p;
+ 	LIST_HEAD(thread_list);
+ 	struct list_head *t;
+	elf_fpregset_t *fpu = NULL;
+#ifdef ELF_CORE_COPY_XFPREGS
+	elf_fpxregset_t *xfpu = NULL;
+#endif
+	int thread_status_size = 0;
+	elf_addr_t *auxv;
+
+	/*
+	 * We no longer stop all VM operations.
+	 * 
+	 * This is because those proceses that could possibly change map_count or
+	 * the mmap / vma pages are now blocked in do_exit on current finishing
+	 * this core dump.
+	 *
+	 * Only ptrace can touch these memory addresses, but it doesn't change
+	 * the map_count or the pages allocated.  So no possibility of crashing
+	 * exists while dumping the mm->vm_next areas to the core file.
+	 */
+  
+	/* alloc memory for large data structures: too large to be on stack */
+	elf = kmalloc(sizeof(*elf), GFP_KERNEL);
+	if (!elf)
+		goto cleanup;
+	prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
+	if (!prstatus)
+		goto cleanup;
+	psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
+	if (!psinfo)
+		goto cleanup;
+	notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
+	if (!notes)
+		goto cleanup;
+	fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
+	if (!fpu)
+		goto cleanup;
+#ifdef ELF_CORE_COPY_XFPREGS
+	xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
+	if (!xfpu)
+		goto cleanup;
+#endif
+
+	if (signr) {
+		struct elf_thread_status *tmp;
+		read_lock(&tasklist_lock);
+		do_each_thread(g,p)
+			if (current->mm == p->mm && current != p) {
+				tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+				if (!tmp) {
+					read_unlock(&tasklist_lock);
+					goto cleanup;
+				}
+				memset(tmp, 0, sizeof(*tmp));
+				INIT_LIST_HEAD(&tmp->list);
+				tmp->thread = p;
+				list_add(&tmp->list, &thread_list);
+			}
+		while_each_thread(g,p);
+		read_unlock(&tasklist_lock);
+		list_for_each(t, &thread_list) {
+			struct elf_thread_status *tmp;
+			int sz;
+
+			tmp = list_entry(t, struct elf_thread_status, list);
+			sz = elf_dump_thread_status(signr, tmp);
+			thread_status_size += sz;
+		}
+	}
+	/* now collect the dump for the current */
+	memset(prstatus, 0, sizeof(*prstatus));
+	fill_prstatus(prstatus, current, signr);
+	elf_core_copy_regs(&prstatus->pr_reg, regs);
+	
+	segs = current->mm->map_count;
+#ifdef ELF_CORE_EXTRA_PHDRS
+	segs += ELF_CORE_EXTRA_PHDRS;
+#endif
+
+	/* Set up header */
+	fill_elf_header(elf, segs+1);	/* including notes section */
+
+	has_dumped = 1;
+	current->flags |= PF_DUMPCORE;
+
+	/*
+	 * Set up the notes in similar form to SVR4 core dumps made
+	 * with info from their /proc.
+	 */
+
+	fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
+	
+	fill_psinfo(psinfo, current->group_leader, current->mm);
+	fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+	
+	fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
+  
+	numnote = 3;
+
+	auxv = (elf_addr_t *) current->mm->saved_auxv;
+
+	i = 0;
+	do
+		i += 2;
+	while (auxv[i - 2] != AT_NULL);
+	fill_note(&notes[numnote++], "CORE", NT_AUXV,
+		  i * sizeof (elf_addr_t), auxv);
+
+  	/* Try to dump the FPU. */
+	if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
+		fill_note(notes + numnote++,
+			  "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
+#ifdef ELF_CORE_COPY_XFPREGS
+	if (elf_core_copy_task_xfpregs(current, xfpu))
+		fill_note(notes + numnote++,
+			  "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
+#endif	
+  
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	DUMP_WRITE(elf, sizeof(*elf));
+	offset += sizeof(*elf);				/* Elf header */
+	offset += (segs+1) * sizeof(struct elf_phdr);	/* Program headers */
+
+	/* Write notes phdr entry */
+	{
+		struct elf_phdr phdr;
+		int sz = 0;
+
+		for (i = 0; i < numnote; i++)
+			sz += notesize(notes + i);
+		
+		sz += thread_status_size;
+
+		fill_elf_note_phdr(&phdr, sz, offset);
+		offset += sz;
+		DUMP_WRITE(&phdr, sizeof(phdr));
+	}
+
+	/* Page-align dumped data */
+	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+
+	/* Write program headers for segments dump */
+	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+		struct elf_phdr phdr;
+		size_t sz;
+
+		sz = vma->vm_end - vma->vm_start;
+
+		phdr.p_type = PT_LOAD;
+		phdr.p_offset = offset;
+		phdr.p_vaddr = vma->vm_start;
+		phdr.p_paddr = 0;
+		phdr.p_filesz = maydump(vma) ? sz : 0;
+		phdr.p_memsz = sz;
+		offset += phdr.p_filesz;
+		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+		if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
+		if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
+		phdr.p_align = ELF_EXEC_PAGESIZE;
+
+		DUMP_WRITE(&phdr, sizeof(phdr));
+	}
+
+#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
+	ELF_CORE_WRITE_EXTRA_PHDRS;
+#endif
+
+ 	/* write out the notes section */
+	for (i = 0; i < numnote; i++)
+		if (!writenote(notes + i, file))
+			goto end_coredump;
+
+	/* write out the thread status notes section */
+	list_for_each(t, &thread_list) {
+		struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
+		for (i = 0; i < tmp->num_notes; i++)
+			if (!writenote(&tmp->notes[i], file))
+				goto end_coredump;
+	}
+ 
+	DUMP_SEEK(dataoff);
+
+	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+		unsigned long addr;
+
+		if (!maydump(vma))
+			continue;
+
+		for (addr = vma->vm_start;
+		     addr < vma->vm_end;
+		     addr += PAGE_SIZE) {
+			struct page* page;
+			struct vm_area_struct *vma;
+
+			if (get_user_pages(current, current->mm, addr, 1, 0, 1,
+						&page, &vma) <= 0) {
+				DUMP_SEEK (file->f_pos + PAGE_SIZE);
+			} else {
+				if (page == ZERO_PAGE(addr)) {
+					DUMP_SEEK (file->f_pos + PAGE_SIZE);
+				} else {
+					void *kaddr;
+					flush_cache_page(vma, addr, page_to_pfn(page));
+					kaddr = kmap(page);
+					if ((size += PAGE_SIZE) > limit ||
+					    !dump_write(file, kaddr,
+					    PAGE_SIZE)) {
+						kunmap(page);
+						page_cache_release(page);
+						goto end_coredump;
+					}
+					kunmap(page);
+				}
+				page_cache_release(page);
+			}
+		}
+	}
+
+#ifdef ELF_CORE_WRITE_EXTRA_DATA
+	ELF_CORE_WRITE_EXTRA_DATA;
+#endif
+
+	if ((off_t) file->f_pos != offset) {
+		/* Sanity check */
+		printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+		       (off_t) file->f_pos, offset);
+	}
+
+end_coredump:
+	set_fs(fs);
+
+cleanup:
+	while(!list_empty(&thread_list)) {
+		struct list_head *tmp = thread_list.next;
+		list_del(tmp);
+		kfree(list_entry(tmp, struct elf_thread_status, list));
+	}
+
+	kfree(elf);
+	kfree(prstatus);
+	kfree(psinfo);
+	kfree(notes);
+	kfree(fpu);
+#ifdef ELF_CORE_COPY_XFPREGS
+	kfree(xfpu);
+#endif
+	return has_dumped;
+#undef NUM_NOTES
+}
+
+#endif		/* USE_ELF_CORE_DUMP */
+
+static int __init init_elf_binfmt(void)
+{
+	return register_binfmt(&elf_format);
+}
+
+static void __exit exit_elf_binfmt(void)
+{
+	/* Remove the COFF and ELF loaders. */
+	unregister_binfmt(&elf_format);
+}
+
+core_initcall(init_elf_binfmt);
+module_exit(exit_elf_binfmt);
+MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
new file mode 100644
index 0000000..134c9c0
--- /dev/null
+++ b/fs/binfmt_elf_fdpic.c
@@ -0,0 +1,1101 @@
+/* binfmt_elf_fdpic.c: FDPIC ELF binary format
+ *
+ * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * Derived from binfmt_elf.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/binfmts.h>
+#include <linux/string.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/elf.h>
+#include <linux/elf-fdpic.h>
+#include <linux/elfcore.h>
+
+#include <asm/uaccess.h>
+#include <asm/param.h>
+#include <asm/pgalloc.h>
+
+typedef char *elf_caddr_t;
+#ifndef elf_addr_t
+#define elf_addr_t unsigned long
+#endif
+
+#if 0
+#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
+#else
+#define kdebug(fmt, ...) do {} while(0)
+#endif
+
+MODULE_LICENSE("GPL");
+
+static int load_elf_fdpic_binary(struct linux_binprm *bprm, struct pt_regs *regs);
+//static int load_elf_fdpic_library(struct file *);
+static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params, struct file *file);
+static int elf_fdpic_map_file(struct elf_fdpic_params *params,
+			      struct file *file,
+			      struct mm_struct *mm,
+			      const char *what);
+
+static int create_elf_fdpic_tables(struct linux_binprm *bprm,
+				   struct mm_struct *mm,
+				   struct elf_fdpic_params *exec_params,
+				   struct elf_fdpic_params *interp_params);
+
+#ifndef CONFIG_MMU
+static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *_sp);
+static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *params,
+						   struct file *file,
+						   struct mm_struct *mm);
+#endif
+
+static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
+					     struct file *file,
+					     struct mm_struct *mm);
+
+static struct linux_binfmt elf_fdpic_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_elf_fdpic_binary,
+//	.load_shlib	= load_elf_fdpic_library,
+//	.core_dump	= elf_fdpic_core_dump,
+	.min_coredump	= ELF_EXEC_PAGESIZE,
+};
+
+static int __init init_elf_fdpic_binfmt(void)  { return register_binfmt(&elf_fdpic_format); }
+static void __exit exit_elf_fdpic_binfmt(void) { unregister_binfmt(&elf_fdpic_format); }
+
+module_init(init_elf_fdpic_binfmt)
+module_exit(exit_elf_fdpic_binfmt)
+
+static int is_elf_fdpic(struct elfhdr *hdr, struct file *file)
+{
+	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0)
+		return 0;
+	if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)
+		return 0;
+	if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr))
+		return 0;
+	if (!file->f_op || !file->f_op->mmap)
+		return 0;
+	return 1;
+}
+
+/*****************************************************************************/
+/*
+ * read the program headers table into memory
+ */
+static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params, struct file *file)
+{
+	struct elf32_phdr *phdr;
+	unsigned long size;
+	int retval, loop;
+
+	if (params->hdr.e_phentsize != sizeof(struct elf_phdr))
+		return -ENOMEM;
+	if (params->hdr.e_phnum > 65536U / sizeof(struct elf_phdr))
+		return -ENOMEM;
+
+	size = params->hdr.e_phnum * sizeof(struct elf_phdr);
+	params->phdrs = kmalloc(size, GFP_KERNEL);
+	if (!params->phdrs)
+		return -ENOMEM;
+
+	retval = kernel_read(file, params->hdr.e_phoff, (char *) params->phdrs, size);
+	if (retval < 0)
+		return retval;
+
+	/* determine stack size for this binary */
+	phdr = params->phdrs;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		if (phdr->p_type != PT_GNU_STACK)
+			continue;
+
+		if (phdr->p_flags & PF_X)
+			params->flags |= ELF_FDPIC_FLAG_EXEC_STACK;
+		else
+			params->flags |= ELF_FDPIC_FLAG_NOEXEC_STACK;
+
+		params->stack_size = phdr->p_memsz;
+		break;
+	}
+
+	return 0;
+} /* end elf_fdpic_fetch_phdrs() */
+
+/*****************************************************************************/
+/*
+ * load an fdpic binary into various bits of memory
+ */
+static int load_elf_fdpic_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+{
+	struct elf_fdpic_params exec_params, interp_params;
+	struct elf_phdr *phdr;
+	unsigned long stack_size;
+	struct file *interpreter = NULL; /* to shut gcc up */
+	char *interpreter_name = NULL;
+	int executable_stack;
+	int retval, i;
+
+	memset(&exec_params, 0, sizeof(exec_params));
+	memset(&interp_params, 0, sizeof(interp_params));
+
+	exec_params.hdr = *(struct elfhdr *) bprm->buf;
+	exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE;
+
+	/* check that this is a binary we know how to deal with */
+	retval = -ENOEXEC;
+	if (!is_elf_fdpic(&exec_params.hdr, bprm->file))
+		goto error;
+
+	/* read the program header table */
+	retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file);
+	if (retval < 0)
+		goto error;
+
+	/* scan for a program header that specifies an interpreter */
+	phdr = exec_params.phdrs;
+
+	for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) {
+		switch (phdr->p_type) {
+		case PT_INTERP:
+			retval = -ENOMEM;
+			if (phdr->p_filesz > PATH_MAX)
+				goto error;
+			retval = -ENOENT;
+			if (phdr->p_filesz < 2)
+				goto error;
+
+			/* read the name of the interpreter into memory */
+			interpreter_name = (char *) kmalloc(phdr->p_filesz, GFP_KERNEL);
+			if (!interpreter_name)
+				goto error;
+
+			retval = kernel_read(bprm->file,
+					     phdr->p_offset,
+					     interpreter_name,
+					     phdr->p_filesz);
+			if (retval < 0)
+				goto error;
+
+			retval = -ENOENT;
+			if (interpreter_name[phdr->p_filesz - 1] != '\0')
+				goto error;
+
+			kdebug("Using ELF interpreter %s", interpreter_name);
+
+			/* replace the program with the interpreter */
+			interpreter = open_exec(interpreter_name);
+			retval = PTR_ERR(interpreter);
+			if (IS_ERR(interpreter)) {
+				interpreter = NULL;
+				goto error;
+			}
+
+			retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
+			if (retval < 0)
+				goto error;
+
+			interp_params.hdr = *((struct elfhdr *) bprm->buf);
+			break;
+
+		case PT_LOAD:
+#ifdef CONFIG_MMU
+			if (exec_params.load_addr == 0)
+				exec_params.load_addr = phdr->p_vaddr;
+#endif
+			break;
+		}
+
+	}
+
+	if (elf_check_const_displacement(&exec_params.hdr))
+		exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
+
+	/* perform insanity checks on the interpreter */
+	if (interpreter_name) {
+		retval = -ELIBBAD;
+		if (!is_elf_fdpic(&interp_params.hdr, interpreter))
+			goto error;
+
+		interp_params.flags = ELF_FDPIC_FLAG_PRESENT;
+
+		/* read the interpreter's program header table */
+		retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter);
+		if (retval < 0)
+			goto error;
+	}
+
+	stack_size = exec_params.stack_size;
+	if (stack_size < interp_params.stack_size)
+		stack_size = interp_params.stack_size;
+
+	if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+		executable_stack = EXSTACK_ENABLE_X;
+	else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
+		executable_stack = EXSTACK_DISABLE_X;
+	else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+		executable_stack = EXSTACK_ENABLE_X;
+	else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
+		executable_stack = EXSTACK_DISABLE_X;
+	else
+		executable_stack = EXSTACK_DEFAULT;
+
+	retval = -ENOEXEC;
+	if (stack_size == 0)
+		goto error;
+
+	if (elf_check_const_displacement(&interp_params.hdr))
+		interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
+
+	/* flush all traces of the currently running executable */
+	retval = flush_old_exec(bprm);
+	if (retval)
+		goto error;
+
+	/* there's now no turning back... the old userspace image is dead,
+	 * defunct, deceased, etc. after this point we have to exit via
+	 * error_kill */
+	set_personality(PER_LINUX_FDPIC);
+	set_binfmt(&elf_fdpic_format);
+
+	current->mm->start_code = 0;
+	current->mm->end_code = 0;
+	current->mm->start_stack = 0;
+	current->mm->start_data = 0;
+	current->mm->end_data = 0;
+	current->mm->context.exec_fdpic_loadmap = 0;
+	current->mm->context.interp_fdpic_loadmap = 0;
+
+	current->flags &= ~PF_FORKNOEXEC;
+
+#ifdef CONFIG_MMU
+	elf_fdpic_arch_lay_out_mm(&exec_params,
+				  &interp_params,
+				  &current->mm->start_stack,
+				  &current->mm->start_brk);
+#endif
+
+	/* do this so that we can load the interpreter, if need be
+	 * - we will change some of these later
+	 */
+	set_mm_counter(current->mm, rss, 0);
+
+#ifdef CONFIG_MMU
+	retval = setup_arg_pages(bprm, current->mm->start_stack, executable_stack);
+	if (retval < 0) {
+		send_sig(SIGKILL, current, 0);
+		goto error_kill;
+	}
+#endif
+
+	/* load the executable and interpreter into memory */
+	retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, "executable");
+	if (retval < 0)
+		goto error_kill;
+
+	if (interpreter_name) {
+		retval = elf_fdpic_map_file(&interp_params, interpreter,
+					    current->mm, "interpreter");
+		if (retval < 0) {
+			printk(KERN_ERR "Unable to load interpreter\n");
+			goto error_kill;
+		}
+
+		allow_write_access(interpreter);
+		fput(interpreter);
+		interpreter = NULL;
+	}
+
+#ifdef CONFIG_MMU
+	if (!current->mm->start_brk)
+		current->mm->start_brk = current->mm->end_data;
+
+	current->mm->brk = current->mm->start_brk = PAGE_ALIGN(current->mm->start_brk);
+
+#else
+	/* create a stack and brk area big enough for everyone
+	 * - the brk heap starts at the bottom and works up
+	 * - the stack starts at the top and works down
+	 */
+	stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK;
+	if (stack_size < PAGE_SIZE * 2)
+		stack_size = PAGE_SIZE * 2;
+
+	down_write(&current->mm->mmap_sem);
+	current->mm->start_brk = do_mmap(NULL,
+					 0,
+					 stack_size,
+					 PROT_READ | PROT_WRITE | PROT_EXEC,
+					 MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
+					 0);
+
+	if (IS_ERR((void *) current->mm->start_brk)) {
+		up_write(&current->mm->mmap_sem);
+		retval = current->mm->start_brk;
+		current->mm->start_brk = 0;
+		goto error_kill;
+	}
+
+	if (do_mremap(current->mm->start_brk,
+		      stack_size,
+		      ksize((char *) current->mm->start_brk),
+		      0, 0
+		      ) == current->mm->start_brk
+	    )
+		stack_size = ksize((char *) current->mm->start_brk);
+	up_write(&current->mm->mmap_sem);
+
+	current->mm->brk = current->mm->start_brk;
+	current->mm->context.end_brk = current->mm->start_brk;
+	current->mm->context.end_brk += (stack_size > PAGE_SIZE) ? (stack_size - PAGE_SIZE) : 0;
+	current->mm->start_stack = current->mm->start_brk + stack_size;
+#endif
+
+	compute_creds(bprm);
+	current->flags &= ~PF_FORKNOEXEC;
+	if (create_elf_fdpic_tables(bprm, current->mm, &exec_params, &interp_params) < 0)
+		goto error_kill;
+
+	kdebug("- start_code  %lx",	(long) current->mm->start_code);
+	kdebug("- end_code    %lx",	(long) current->mm->end_code);
+	kdebug("- start_data  %lx",	(long) current->mm->start_data);
+	kdebug("- end_data    %lx",	(long) current->mm->end_data);
+	kdebug("- start_brk   %lx",	(long) current->mm->start_brk);
+	kdebug("- brk         %lx",	(long) current->mm->brk);
+	kdebug("- start_stack %lx",	(long) current->mm->start_stack);
+
+#ifdef ELF_FDPIC_PLAT_INIT
+	/*
+	 * The ABI may specify that certain registers be set up in special
+	 * ways (on i386 %edx is the address of a DT_FINI function, for
+	 * example.  This macro performs whatever initialization to
+	 * the regs structure is required.
+	 */
+	ELF_FDPIC_PLAT_INIT(regs,
+			    exec_params.map_addr,
+			    interp_params.map_addr,
+			    interp_params.dynamic_addr ?: exec_params.dynamic_addr
+			    );
+#endif
+
+	/* everything is now ready... get the userspace context ready to roll */
+	start_thread(regs,
+		     interp_params.entry_addr ?: exec_params.entry_addr,
+		     current->mm->start_stack);
+
+	if (unlikely(current->ptrace & PT_PTRACED)) {
+		if (current->ptrace & PT_TRACE_EXEC)
+			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
+		else
+			send_sig(SIGTRAP, current, 0);
+	}
+
+	retval = 0;
+
+error:
+	if (interpreter) {
+		allow_write_access(interpreter);
+		fput(interpreter);
+	}
+	if (interpreter_name)
+		kfree(interpreter_name);
+	if (exec_params.phdrs)
+		kfree(exec_params.phdrs);
+	if (exec_params.loadmap)
+		kfree(exec_params.loadmap);
+	if (interp_params.phdrs)
+		kfree(interp_params.phdrs);
+	if (interp_params.loadmap)
+		kfree(interp_params.loadmap);
+	return retval;
+
+	/* unrecoverable error - kill the process */
+ error_kill:
+	send_sig(SIGSEGV, current, 0);
+	goto error;
+
+} /* end load_elf_fdpic_binary() */
+
+/*****************************************************************************/
+/*
+ * present useful information to the program
+ */
+static int create_elf_fdpic_tables(struct linux_binprm *bprm,
+				   struct mm_struct *mm,
+				   struct elf_fdpic_params *exec_params,
+				   struct elf_fdpic_params *interp_params)
+{
+	unsigned long sp, csp, nitems;
+	elf_caddr_t *argv, *envp;
+	size_t platform_len = 0, len;
+	char *k_platform, *u_platform, *p;
+	long hwcap;
+	int loop;
+
+	/* we're going to shovel a whole load of stuff onto the stack */
+#ifdef CONFIG_MMU
+	sp = bprm->p;
+#else
+	sp = mm->start_stack;
+
+	/* stack the program arguments and environment */
+	if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0)
+		return -EFAULT;
+#endif
+
+	/* get hold of platform and hardware capabilities masks for the machine
+	 * we are running on.  In some cases (Sparc), this info is impossible
+	 * to get, in others (i386) it is merely difficult.
+	 */
+	hwcap = ELF_HWCAP;
+	k_platform = ELF_PLATFORM;
+
+	if (k_platform) {
+		platform_len = strlen(k_platform) + 1;
+		sp -= platform_len;
+		if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
+			return -EFAULT;
+	}
+
+	u_platform = (char *) sp;
+
+#if defined(__i386__) && defined(CONFIG_SMP)
+	/* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
+	 * by the processes running on the same package. One thing we can do
+	 * is to shuffle the initial stack for them.
+	 *
+	 * the conditionals here are unneeded, but kept in to make the
+	 * code behaviour the same as pre change unless we have hyperthreaded
+	 * processors. This keeps Mr Marcelo Person happier but should be
+	 * removed for 2.5
+	 */
+	if (smp_num_siblings > 1)
+		sp = sp - ((current->pid % 64) << 7);
+#endif
+
+	sp &= ~7UL;
+
+	/* stack the load map(s) */
+	len = sizeof(struct elf32_fdpic_loadmap);
+	len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs;
+	sp = (sp - len) & ~7UL;
+	exec_params->map_addr = sp;
+
+	if (copy_to_user((void *) sp, exec_params->loadmap, len) != 0)
+		return -EFAULT;
+
+	current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
+
+	if (interp_params->loadmap) {
+		len = sizeof(struct elf32_fdpic_loadmap);
+		len += sizeof(struct elf32_fdpic_loadseg) * interp_params->loadmap->nsegs;
+		sp = (sp - len) & ~7UL;
+		interp_params->map_addr = sp;
+
+		if (copy_to_user((void *) sp, interp_params->loadmap, len) != 0)
+			return -EFAULT;
+
+		current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
+	}
+
+	/* force 16 byte _final_ alignment here for generality */
+#define DLINFO_ITEMS 13
+
+	nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0);
+#ifdef DLINFO_ARCH_ITEMS
+	nitems += DLINFO_ARCH_ITEMS;
+#endif
+
+	csp = sp;
+	sp -= nitems * 2 * sizeof(unsigned long);
+	sp -= (bprm->envc + 1) * sizeof(char *);	/* envv[] */
+	sp -= (bprm->argc + 1) * sizeof(char *);	/* argv[] */
+	sp -= 1 * sizeof(unsigned long);		/* argc */
+
+	csp -= sp & 15UL;
+	sp -= sp & 15UL;
+
+	/* put the ELF interpreter info on the stack */
+#define NEW_AUX_ENT(nr, id, val)						\
+	do {									\
+		struct { unsigned long _id, _val; } *ent = (void *) csp;	\
+		__put_user((id), &ent[nr]._id);					\
+		__put_user((val), &ent[nr]._val);				\
+	} while (0)
+
+	csp -= 2 * sizeof(unsigned long);
+	NEW_AUX_ENT(0, AT_NULL, 0);
+	if (k_platform) {
+		csp -= 2 * sizeof(unsigned long);
+		NEW_AUX_ENT(0, AT_PLATFORM, (elf_addr_t)(unsigned long) u_platform);
+	}
+
+	csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
+	NEW_AUX_ENT( 0, AT_HWCAP,		hwcap);
+	NEW_AUX_ENT( 1, AT_PAGESZ,		PAGE_SIZE);
+	NEW_AUX_ENT( 2, AT_CLKTCK,		CLOCKS_PER_SEC);
+	NEW_AUX_ENT( 3, AT_PHDR,		exec_params->ph_addr);
+	NEW_AUX_ENT( 4, AT_PHENT,		sizeof(struct elf_phdr));
+	NEW_AUX_ENT( 5, AT_PHNUM,		exec_params->hdr.e_phnum);
+	NEW_AUX_ENT( 6,	AT_BASE,		interp_params->elfhdr_addr);
+	NEW_AUX_ENT( 7, AT_FLAGS,		0);
+	NEW_AUX_ENT( 8, AT_ENTRY,		exec_params->entry_addr);
+	NEW_AUX_ENT( 9, AT_UID,			(elf_addr_t) current->uid);
+	NEW_AUX_ENT(10, AT_EUID,		(elf_addr_t) current->euid);
+	NEW_AUX_ENT(11, AT_GID,			(elf_addr_t) current->gid);
+	NEW_AUX_ENT(12, AT_EGID,		(elf_addr_t) current->egid);
+
+#ifdef ARCH_DLINFO
+	/* ARCH_DLINFO must come last so platform specific code can enforce
+	 * special alignment requirements on the AUXV if necessary (eg. PPC).
+	 */
+	ARCH_DLINFO;
+#endif
+#undef NEW_AUX_ENT
+
+	/* allocate room for argv[] and envv[] */
+	csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
+	envp = (elf_caddr_t *) csp;
+	csp -= (bprm->argc + 1) * sizeof(elf_caddr_t);
+	argv = (elf_caddr_t *) csp;
+
+	/* stack argc */
+	csp -= sizeof(unsigned long);
+	__put_user(bprm->argc, (unsigned long *) csp);
+
+	if (csp != sp)
+		BUG();
+
+	/* fill in the argv[] array */
+#ifdef CONFIG_MMU
+	current->mm->arg_start = bprm->p;
+#else
+	current->mm->arg_start = current->mm->start_stack - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
+#endif
+
+	p = (char *) current->mm->arg_start;
+	for (loop = bprm->argc; loop > 0; loop--) {
+		__put_user((elf_caddr_t) p, argv++);
+		len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
+		if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+			return -EINVAL;
+		p += len;
+	}
+	__put_user(NULL, argv);
+	current->mm->arg_end = (unsigned long) p;
+
+	/* fill in the envv[] array */
+	current->mm->env_start = (unsigned long) p;
+	for (loop = bprm->envc; loop > 0; loop--) {
+		__put_user((elf_caddr_t)(unsigned long) p, envp++);
+		len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
+		if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+			return -EINVAL;
+		p += len;
+	}
+	__put_user(NULL, envp);
+	current->mm->env_end = (unsigned long) p;
+
+	mm->start_stack = (unsigned long) sp;
+	return 0;
+} /* end create_elf_fdpic_tables() */
+
+/*****************************************************************************/
+/*
+ * transfer the program arguments and environment from the holding pages onto
+ * the stack
+ */
+#ifndef CONFIG_MMU
+static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *_sp)
+{
+	unsigned long index, stop, sp;
+	char *src;
+	int ret = 0;
+
+	stop = bprm->p >> PAGE_SHIFT;
+	sp = *_sp;
+
+	for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
+		src = kmap(bprm->page[index]);
+		sp -= PAGE_SIZE;
+		if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0)
+			ret = -EFAULT;
+		kunmap(bprm->page[index]);
+		if (ret < 0)
+			goto out;
+	}
+
+	*_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15;
+
+ out:
+	return ret;
+} /* end elf_fdpic_transfer_args_to_stack() */
+#endif
+
+/*****************************************************************************/
+/*
+ * load the appropriate binary image (executable or interpreter) into memory
+ * - we assume no MMU is available
+ * - if no other PIC bits are set in params->hdr->e_flags
+ *   - we assume that the LOADable segments in the binary are independently relocatable
+ *   - we assume R/O executable segments are shareable
+ * - else
+ *   - we assume the loadable parts of the image to require fixed displacement
+ *   - the image is not shareable
+ */
+static int elf_fdpic_map_file(struct elf_fdpic_params *params,
+			      struct file *file,
+			      struct mm_struct *mm,
+			      const char *what)
+{
+	struct elf32_fdpic_loadmap *loadmap;
+#ifdef CONFIG_MMU
+	struct elf32_fdpic_loadseg *mseg;
+#endif
+	struct elf32_fdpic_loadseg *seg;
+	struct elf32_phdr *phdr;
+	unsigned long load_addr, stop;
+	unsigned nloads, tmp;
+	size_t size;
+	int loop, ret;
+
+	/* allocate a load map table */
+	nloads = 0;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++)
+		if (params->phdrs[loop].p_type == PT_LOAD)
+			nloads++;
+
+	if (nloads == 0)
+		return -ELIBBAD;
+
+	size = sizeof(*loadmap) + nloads * sizeof(*seg);
+	loadmap = kmalloc(size, GFP_KERNEL);
+	if (!loadmap)
+		return -ENOMEM;
+
+	params->loadmap = loadmap;
+	memset(loadmap, 0, size);
+
+	loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
+	loadmap->nsegs = nloads;
+
+	load_addr = params->load_addr;
+	seg = loadmap->segs;
+
+	/* map the requested LOADs into the memory space */
+	switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
+	case ELF_FDPIC_FLAG_CONSTDISP:
+	case ELF_FDPIC_FLAG_CONTIGUOUS:
+#ifndef CONFIG_MMU
+		ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm);
+		if (ret < 0)
+			return ret;
+		break;
+#endif
+	default:
+		ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	/* map the entry point */
+	if (params->hdr.e_entry) {
+		seg = loadmap->segs;
+		for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
+			if (params->hdr.e_entry >= seg->p_vaddr &&
+			    params->hdr.e_entry < seg->p_vaddr + seg->p_memsz
+			    ) {
+				params->entry_addr =
+					(params->hdr.e_entry - seg->p_vaddr) + seg->addr;
+				break;
+			}
+		}
+	}
+
+	/* determine where the program header table has wound up if mapped */
+	stop = params->hdr.e_phoff + params->hdr.e_phnum * sizeof (struct elf_phdr);
+	phdr = params->phdrs;
+
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		if (phdr->p_offset > params->hdr.e_phoff ||
+		    phdr->p_offset + phdr->p_filesz < stop)
+			continue;
+
+		seg = loadmap->segs;
+		for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
+			if (phdr->p_vaddr >= seg->p_vaddr &&
+			    phdr->p_vaddr + phdr->p_filesz <= seg->p_vaddr + seg->p_memsz
+			    ) {
+				params->ph_addr = (phdr->p_vaddr - seg->p_vaddr) + seg->addr +
+					params->hdr.e_phoff - phdr->p_offset;
+				break;
+			}
+		}
+		break;
+	}
+
+	/* determine where the dynamic section has wound up if there is one */
+	phdr = params->phdrs;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		if (phdr->p_type != PT_DYNAMIC)
+			continue;
+
+		seg = loadmap->segs;
+		for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
+			if (phdr->p_vaddr >= seg->p_vaddr &&
+			    phdr->p_vaddr + phdr->p_memsz <= seg->p_vaddr + seg->p_memsz
+			    ) {
+				params->dynamic_addr = (phdr->p_vaddr - seg->p_vaddr) + seg->addr;
+
+				/* check the dynamic section contains at least one item, and that
+				 * the last item is a NULL entry */
+				if (phdr->p_memsz == 0 ||
+				    phdr->p_memsz % sizeof(Elf32_Dyn) != 0)
+					goto dynamic_error;
+
+				tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
+				if (((Elf32_Dyn *) params->dynamic_addr)[tmp - 1].d_tag != 0)
+					goto dynamic_error;
+				break;
+			}
+		}
+		break;
+	}
+
+	/* now elide adjacent segments in the load map on MMU linux
+	 * - on uClinux the holes between may actually be filled with system stuff or stuff from
+	 *   other processes
+	 */
+#ifdef CONFIG_MMU
+	nloads = loadmap->nsegs;
+	mseg = loadmap->segs;
+	seg = mseg + 1;
+	for (loop = 1; loop < nloads; loop++) {
+		/* see if we have a candidate for merging */
+		if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) {
+			load_addr = PAGE_ALIGN(mseg->addr + mseg->p_memsz);
+			if (load_addr == (seg->addr & PAGE_MASK)) {
+				mseg->p_memsz += load_addr - (mseg->addr + mseg->p_memsz);
+				mseg->p_memsz += seg->addr & ~PAGE_MASK;
+				mseg->p_memsz += seg->p_memsz;
+				loadmap->nsegs--;
+				continue;
+			}
+		}
+
+		mseg++;
+		if (mseg != seg)
+			*mseg = *seg;
+	}
+#endif
+
+	kdebug("Mapped Object [%s]:", what);
+	kdebug("- elfhdr   : %lx", params->elfhdr_addr);
+	kdebug("- entry    : %lx", params->entry_addr);
+	kdebug("- PHDR[]   : %lx", params->ph_addr);
+	kdebug("- DYNAMIC[]: %lx", params->dynamic_addr);
+	seg = loadmap->segs;
+	for (loop = 0; loop < loadmap->nsegs; loop++, seg++)
+		kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]",
+		       loop,
+		       seg->addr, seg->addr + seg->p_memsz - 1,
+		       seg->p_vaddr, seg->p_memsz);
+
+	return 0;
+
+ dynamic_error:
+	printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n",
+	       what, file->f_dentry->d_inode->i_ino);
+	return -ELIBBAD;
+} /* end elf_fdpic_map_file() */
+
+/*****************************************************************************/
+/*
+ * map a file with constant displacement under uClinux
+ */
+#ifndef CONFIG_MMU
+static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *params,
+						   struct file *file,
+						   struct mm_struct *mm)
+{
+	struct elf32_fdpic_loadseg *seg;
+	struct elf32_phdr *phdr;
+	unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags;
+	loff_t fpos;
+	int loop, ret;
+
+	load_addr = params->load_addr;
+	seg = params->loadmap->segs;
+
+	/* determine the bounds of the contiguous overall allocation we must make */
+	phdr = params->phdrs;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		if (params->phdrs[loop].p_type != PT_LOAD)
+			continue;
+
+		if (base > phdr->p_vaddr)
+			base = phdr->p_vaddr;
+		if (top < phdr->p_vaddr + phdr->p_memsz)
+			top = phdr->p_vaddr + phdr->p_memsz;
+	}
+
+	/* allocate one big anon block for everything */
+	mflags = MAP_PRIVATE;
+	if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
+		mflags |= MAP_EXECUTABLE;
+
+	down_write(&mm->mmap_sem);
+	maddr = do_mmap(NULL, load_addr, top - base,
+			PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
+	up_write(&mm->mmap_sem);
+	if (IS_ERR((void *) maddr))
+		return (int) maddr;
+
+	if (load_addr != 0)
+		load_addr += PAGE_ALIGN(top - base);
+
+	/* and then load the file segments into it */
+	phdr = params->phdrs;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		if (params->phdrs[loop].p_type != PT_LOAD)
+			continue;
+
+		fpos = phdr->p_offset;
+
+		seg->addr = maddr + (phdr->p_vaddr - base);
+		seg->p_vaddr = phdr->p_vaddr;
+		seg->p_memsz = phdr->p_memsz;
+
+		ret = file->f_op->read(file, (void *) seg->addr, phdr->p_filesz, &fpos);
+		if (ret < 0)
+			return ret;
+
+		/* map the ELF header address if in this segment */
+		if (phdr->p_offset == 0)
+			params->elfhdr_addr = seg->addr;
+
+		/* clear any space allocated but not loaded */
+		if (phdr->p_filesz < phdr->p_memsz)
+			clear_user((void *) (seg->addr + phdr->p_filesz),
+				   phdr->p_memsz - phdr->p_filesz);
+
+		if (mm) {
+			if (phdr->p_flags & PF_X) {
+				mm->start_code = seg->addr;
+				mm->end_code = seg->addr + phdr->p_memsz;
+			}
+			else if (!mm->start_data) {
+				mm->start_data = seg->addr;
+#ifndef CONFIG_MMU
+				mm->end_data = seg->addr + phdr->p_memsz;
+#endif
+			}
+
+#ifdef CONFIG_MMU
+			if (seg->addr + phdr->p_memsz > mm->end_data)
+				mm->end_data = seg->addr + phdr->p_memsz;
+#endif
+		}
+
+		seg++;
+	}
+
+	return 0;
+} /* end elf_fdpic_map_file_constdisp_on_uclinux() */
+#endif
+
+/*****************************************************************************/
+/*
+ * map a binary by direct mmap() of the individual PT_LOAD segments
+ */
+static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
+					     struct file *file,
+					     struct mm_struct *mm)
+{
+	struct elf32_fdpic_loadseg *seg;
+	struct elf32_phdr *phdr;
+	unsigned long load_addr, delta_vaddr;
+	int loop, dvset;
+
+	load_addr = params->load_addr;
+	delta_vaddr = 0;
+	dvset = 0;
+
+	seg = params->loadmap->segs;
+
+	/* deal with each load segment separately */
+	phdr = params->phdrs;
+	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
+		unsigned long maddr, disp, excess, excess1;
+		int prot = 0, flags;
+
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx",
+		       (unsigned long) phdr->p_vaddr,
+		       (unsigned long) phdr->p_offset,
+		       (unsigned long) phdr->p_filesz,
+		       (unsigned long) phdr->p_memsz);
+
+		/* determine the mapping parameters */
+		if (phdr->p_flags & PF_R) prot |= PROT_READ;
+		if (phdr->p_flags & PF_W) prot |= PROT_WRITE;
+		if (phdr->p_flags & PF_X) prot |= PROT_EXEC;
+
+		flags = MAP_PRIVATE | MAP_DENYWRITE;
+		if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
+			flags |= MAP_EXECUTABLE;
+
+		maddr = 0;
+
+		switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
+		case ELF_FDPIC_FLAG_INDEPENDENT:
+			/* PT_LOADs are independently locatable */
+			break;
+
+		case ELF_FDPIC_FLAG_HONOURVADDR:
+			/* the specified virtual address must be honoured */
+			maddr = phdr->p_vaddr;
+			flags |= MAP_FIXED;
+			break;
+
+		case ELF_FDPIC_FLAG_CONSTDISP:
+			/* constant displacement
+			 * - can be mapped anywhere, but must be mapped as a unit
+			 */
+			if (!dvset) {
+				maddr = load_addr;
+				delta_vaddr = phdr->p_vaddr;
+				dvset = 1;
+			}
+			else {
+				maddr = load_addr + phdr->p_vaddr - delta_vaddr;
+				flags |= MAP_FIXED;
+			}
+			break;
+
+		case ELF_FDPIC_FLAG_CONTIGUOUS:
+			/* contiguity handled later */
+			break;
+
+		default:
+			BUG();
+		}
+
+		maddr &= PAGE_MASK;
+
+		/* create the mapping */
+		disp = phdr->p_vaddr & ~PAGE_MASK;
+		down_write(&mm->mmap_sem);
+		maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
+				phdr->p_offset - disp);
+		up_write(&mm->mmap_sem);
+
+		kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
+		       loop, phdr->p_memsz + disp, prot, flags, phdr->p_offset - disp,
+		       maddr);
+
+		if (IS_ERR((void *) maddr))
+			return (int) maddr;
+
+		if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_CONTIGUOUS)
+			load_addr += PAGE_ALIGN(phdr->p_memsz + disp);
+
+		seg->addr = maddr + disp;
+		seg->p_vaddr = phdr->p_vaddr;
+		seg->p_memsz = phdr->p_memsz;
+
+		/* map the ELF header address if in this segment */
+		if (phdr->p_offset == 0)
+			params->elfhdr_addr = seg->addr;
+
+		/* clear the bit between beginning of mapping and beginning of PT_LOAD */
+		if (prot & PROT_WRITE && disp > 0) {
+			kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
+			clear_user((void *) maddr, disp);
+			maddr += disp;
+		}
+
+		/* clear any space allocated but not loaded
+		 * - on uClinux we can just clear the lot
+		 * - on MMU linux we'll get a SIGBUS beyond the last page
+		 *   extant in the file
+		 */
+		excess = phdr->p_memsz - phdr->p_filesz;
+		excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
+
+#ifdef CONFIG_MMU
+
+		if (excess > excess1) {
+			unsigned long xaddr = maddr + phdr->p_filesz + excess1;
+			unsigned long xmaddr;
+
+			flags |= MAP_FIXED | MAP_ANONYMOUS;
+			down_write(&mm->mmap_sem);
+			xmaddr = do_mmap(NULL, xaddr, excess - excess1, prot, flags, 0);
+			up_write(&mm->mmap_sem);
+
+			kdebug("mmap[%d] <anon>"
+			       " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
+			       loop, xaddr, excess - excess1, prot, flags, xmaddr);
+
+			if (xmaddr != xaddr)
+				return -ENOMEM;
+		}
+
+		if (prot & PROT_WRITE && excess1 > 0) {
+			kdebug("clear[%d] ad=%lx sz=%lx",
+			       loop, maddr + phdr->p_filesz, excess1);
+			clear_user((void *) maddr + phdr->p_filesz, excess1);
+		}
+
+#else
+		if (excess > 0) {
+			kdebug("clear[%d] ad=%lx sz=%lx",
+			       loop, maddr + phdr->p_filesz, excess);
+			clear_user((void *) maddr + phdr->p_filesz, excess);
+		}
+#endif
+
+		if (mm) {
+			if (phdr->p_flags & PF_X) {
+				mm->start_code = maddr;
+				mm->end_code = maddr + phdr->p_memsz;
+			}
+			else if (!mm->start_data) {
+				mm->start_data = maddr;
+				mm->end_data = maddr + phdr->p_memsz;
+			}
+		}
+
+		seg++;
+	}
+
+	return 0;
+} /* end elf_fdpic_map_file_by_direct_mmap() */
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
new file mode 100644
index 0000000..1f2d1ad
--- /dev/null
+++ b/fs/binfmt_em86.c
@@ -0,0 +1,115 @@
+/*
+ *  linux/fs/binfmt_em86.c
+ *
+ *  Based on linux/fs/binfmt_script.c
+ *  Copyright (C) 1996  Martin von Löwis
+ *  original #!-checking implemented by tytso.
+ *
+ *  em86 changes Copyright (C) 1997  Jim Paradis
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/errno.h>
+
+
+#define EM86_INTERP	"/usr/bin/em86"
+#define EM86_I_NAME	"em86"
+
+static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
+{
+	char *interp, *i_name, *i_arg;
+	struct file * file;
+	int retval;
+	struct elfhdr	elf_ex;
+
+	/* Make sure this is a Linux/Intel ELF executable... */
+	elf_ex = *((struct elfhdr *)bprm->buf);
+
+	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+		return  -ENOEXEC;
+
+	/* First of all, some simple consistency checks */
+	if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
+		(!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
+		(!bprm->file->f_op || !bprm->file->f_op->mmap)) {
+			return -ENOEXEC;
+	}
+
+	bprm->sh_bang++;	/* Well, the bang-shell is implicit... */
+	allow_write_access(bprm->file);
+	fput(bprm->file);
+	bprm->file = NULL;
+
+	/* Unlike in the script case, we don't have to do any hairy
+	 * parsing to find our interpreter... it's hardcoded!
+	 */
+	interp = EM86_INTERP;
+	i_name = EM86_I_NAME;
+	i_arg = NULL;		/* We reserve the right to add an arg later */
+
+	/*
+	 * Splice in (1) the interpreter's name for argv[0]
+	 *           (2) (optional) argument to interpreter
+	 *           (3) filename of emulated file (replace argv[0])
+	 *
+	 * This is done in reverse order, because of how the
+	 * user environment and arguments are stored.
+	 */
+	remove_arg_zero(bprm);
+	retval = copy_strings_kernel(1, &bprm->filename, bprm);
+	if (retval < 0) return retval; 
+	bprm->argc++;
+	if (i_arg) {
+		retval = copy_strings_kernel(1, &i_arg, bprm);
+		if (retval < 0) return retval; 
+		bprm->argc++;
+	}
+	retval = copy_strings_kernel(1, &i_name, bprm);
+	if (retval < 0)	return retval;
+	bprm->argc++;
+
+	/*
+	 * OK, now restart the process with the interpreter's inode.
+	 * Note that we use open_exec() as the name is now in kernel
+	 * space, and we don't need to copy it.
+	 */
+	file = open_exec(interp);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	bprm->file = file;
+
+	retval = prepare_binprm(bprm);
+	if (retval < 0)
+		return retval;
+
+	return search_binary_handler(bprm, regs);
+}
+
+static struct linux_binfmt em86_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_em86,
+};
+
+static int __init init_em86_binfmt(void)
+{
+	return register_binfmt(&em86_format);
+}
+
+static void __exit exit_em86_binfmt(void)
+{
+	unregister_binfmt(&em86_format);
+}
+
+core_initcall(init_em86_binfmt);
+module_exit(exit_em86_binfmt);
+MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
new file mode 100644
index 0000000..f0cd67d
--- /dev/null
+++ b/fs/binfmt_flat.c
@@ -0,0 +1,901 @@
+/****************************************************************************/
+/*
+ *  linux/fs/binfmt_flat.c
+ *
+ *	Copyright (C) 2000-2003 David McCullough <davidm@snapgear.com>
+ *	Copyright (C) 2002 Greg Ungerer <gerg@snapgear.com>
+ *	Copyright (C) 2002 SnapGear, by Paul Dale <pauli@snapgear.com>
+ *	Copyright (C) 2000, 2001 Lineo, by David McCullough <davidm@lineo.com>
+ *  based heavily on:
+ *
+ *  linux/fs/binfmt_aout.c:
+ *      Copyright (C) 1991, 1992, 1996  Linus Torvalds
+ *  linux/fs/binfmt_flat.c for 2.0 kernel
+ *	    Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>
+ *	JAN/99 -- coded full program relocation (gerg@snapgear.com)
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+#include <linux/flat.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <asm/cacheflush.h>
+
+/****************************************************************************/
+
+#if 0
+#define DEBUG 1
+#endif
+
+#ifdef DEBUG
+#define	DBG_FLT(a...)	printk(a)
+#else
+#define	DBG_FLT(a...)
+#endif
+
+#define RELOC_FAILED 0xff00ff01		/* Relocation incorrect somewhere */
+#define UNLOADED_LIB 0x7ff000ff		/* Placeholder for unused library */
+
+struct lib_info {
+	struct {
+		unsigned long start_code;		/* Start of text segment */
+		unsigned long start_data;		/* Start of data segment */
+		unsigned long start_brk;		/* End of data segment */
+		unsigned long text_len;			/* Length of text segment */
+		unsigned long entry;			/* Start address for this module */
+		unsigned long build_date;		/* When this one was compiled */
+		short loaded;				/* Has this library been loaded? */
+	} lib_list[MAX_SHARED_LIBS];
+};
+
+#ifdef CONFIG_BINFMT_SHARED_FLAT
+static int load_flat_shared_library(int id, struct lib_info *p);
+#endif
+
+static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs);
+static int flat_core_dump(long signr, struct pt_regs * regs, struct file *file);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+static struct linux_binfmt flat_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_flat_binary,
+	.core_dump	= flat_core_dump,
+	.min_coredump	= PAGE_SIZE
+};
+
+/****************************************************************************/
+/*
+ * Routine writes a core dump image in the current directory.
+ * Currently only a stub-function.
+ */
+
+static int flat_core_dump(long signr, struct pt_regs * regs, struct file *file)
+{
+	printk("Process %s:%d received signr %d and should have core dumped\n",
+			current->comm, current->pid, (int) signr);
+	return(1);
+}
+
+/****************************************************************************/
+/*
+ * create_flat_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+
+static unsigned long create_flat_tables(
+	unsigned long pp,
+	struct linux_binprm * bprm)
+{
+	unsigned long *argv,*envp;
+	unsigned long * sp;
+	char * p = (char*)pp;
+	int argc = bprm->argc;
+	int envc = bprm->envc;
+	char dummy;
+
+	sp = (unsigned long *) ((-(unsigned long)sizeof(char *))&(unsigned long) p);
+
+	sp -= envc+1;
+	envp = sp;
+	sp -= argc+1;
+	argv = sp;
+
+	flat_stack_align(sp);
+	if (flat_argvp_envp_on_stack()) {
+		--sp; put_user((unsigned long) envp, sp);
+		--sp; put_user((unsigned long) argv, sp);
+	}
+
+	put_user(argc,--sp);
+	current->mm->arg_start = (unsigned long) p;
+	while (argc-->0) {
+		put_user((unsigned long) p, argv++);
+		do {
+			get_user(dummy, p); p++;
+		} while (dummy);
+	}
+	put_user((unsigned long) NULL, argv);
+	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+	while (envc-->0) {
+		put_user((unsigned long)p, envp); envp++;
+		do {
+			get_user(dummy, p); p++;
+		} while (dummy);
+	}
+	put_user((unsigned long) NULL, envp);
+	current->mm->env_end = (unsigned long) p;
+	return (unsigned long)sp;
+}
+
+/****************************************************************************/
+
+#ifdef CONFIG_BINFMT_ZFLAT
+
+#include <linux/zlib.h>
+
+#define LBUFSIZE	4000
+
+/* gzip flag byte */
+#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
+#define COMMENT      0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
+#define RESERVED     0xC0 /* bit 6,7:   reserved */
+
+static int decompress_exec(
+	struct linux_binprm *bprm,
+	unsigned long offset,
+	char *dst,
+	long len,
+	int fd)
+{
+	unsigned char *buf;
+	z_stream strm;
+	loff_t fpos;
+	int ret, retval;
+
+	DBG_FLT("decompress_exec(offset=%x,buf=%x,len=%x)\n",(int)offset, (int)dst, (int)len);
+
+	memset(&strm, 0, sizeof(strm));
+	strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+	if (strm.workspace == NULL) {
+		DBG_FLT("binfmt_flat: no memory for decompress workspace\n");
+		return -ENOMEM;
+	}
+	buf = kmalloc(LBUFSIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		DBG_FLT("binfmt_flat: no memory for read buffer\n");
+		retval = -ENOMEM;
+		goto out_free;
+	}
+
+	/* Read in first chunk of data and parse gzip header. */
+	fpos = offset;
+	ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos);
+
+	strm.next_in = buf;
+	strm.avail_in = ret;
+	strm.total_in = 0;
+
+	retval = -ENOEXEC;
+
+	/* Check minimum size -- gzip header */
+	if (ret < 10) {
+		DBG_FLT("binfmt_flat: file too small?\n");
+		goto out_free_buf;
+	}
+
+	/* Check gzip magic number */
+	if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) {
+		DBG_FLT("binfmt_flat: unknown compression magic?\n");
+		goto out_free_buf;
+	}
+
+	/* Check gzip method */
+	if (buf[2] != 8) {
+		DBG_FLT("binfmt_flat: unknown compression method?\n");
+		goto out_free_buf;
+	}
+	/* Check gzip flags */
+	if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) ||
+	    (buf[3] & RESERVED)) {
+		DBG_FLT("binfmt_flat: unknown flags?\n");
+		goto out_free_buf;
+	}
+
+	ret = 10;
+	if (buf[3] & EXTRA_FIELD) {
+		ret += 2 + buf[10] + (buf[11] << 8);
+		if (unlikely(LBUFSIZE == ret)) {
+			DBG_FLT("binfmt_flat: buffer overflow (EXTRA)?\n");
+			goto out_free_buf;
+		}
+	}
+	if (buf[3] & ORIG_NAME) {
+		for (; ret < LBUFSIZE && (buf[ret] != 0); ret++)
+			;
+		if (unlikely(LBUFSIZE == ret)) {
+			DBG_FLT("binfmt_flat: buffer overflow (ORIG_NAME)?\n");
+			goto out_free_buf;
+		}
+	}
+	if (buf[3] & COMMENT) {
+		for (;  ret < LBUFSIZE && (buf[ret] != 0); ret++)
+			;
+		if (unlikely(LBUFSIZE == ret)) {
+			DBG_FLT("binfmt_flat: buffer overflow (COMMENT)?\n");
+			goto out_free_buf;
+		}
+	}
+
+	strm.next_in += ret;
+	strm.avail_in -= ret;
+
+	strm.next_out = dst;
+	strm.avail_out = len;
+	strm.total_out = 0;
+
+	if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) {
+		DBG_FLT("binfmt_flat: zlib init failed?\n");
+		goto out_free_buf;
+	}
+
+	while ((ret = zlib_inflate(&strm, Z_NO_FLUSH)) == Z_OK) {
+		ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos);
+		if (ret <= 0)
+			break;
+		if (ret >= (unsigned long) -4096)
+			break;
+		len -= ret;
+
+		strm.next_in = buf;
+		strm.avail_in = ret;
+		strm.total_in = 0;
+	}
+
+	if (ret < 0) {
+		DBG_FLT("binfmt_flat: decompression failed (%d), %s\n",
+			ret, strm.msg);
+		goto out_zlib;
+	}
+
+	retval = 0;
+out_zlib:
+	zlib_inflateEnd(&strm);
+out_free_buf:
+	kfree(buf);
+out_free:
+	kfree(strm.workspace);
+out:
+	return retval;
+}
+
+#endif /* CONFIG_BINFMT_ZFLAT */
+
+/****************************************************************************/
+
+static unsigned long
+calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
+{
+	unsigned long addr;
+	int id;
+	unsigned long start_brk;
+	unsigned long start_data;
+	unsigned long text_len;
+	unsigned long start_code;
+
+#ifdef CONFIG_BINFMT_SHARED_FLAT
+	if (r == 0)
+		id = curid;	/* Relocs of 0 are always self referring */
+	else {
+		id = (r >> 24) & 0xff;	/* Find ID for this reloc */
+		r &= 0x00ffffff;	/* Trim ID off here */
+	}
+	if (id >= MAX_SHARED_LIBS) {
+		printk("BINFMT_FLAT: reference 0x%x to shared library %d",
+				(unsigned) r, id);
+		goto failed;
+	}
+	if (curid != id) {
+		if (internalp) {
+			printk("BINFMT_FLAT: reloc address 0x%x not in same module "
+					"(%d != %d)", (unsigned) r, curid, id);
+			goto failed;
+		} else if ( ! p->lib_list[id].loaded &&
+				load_flat_shared_library(id, p) > (unsigned long) -4096) {
+			printk("BINFMT_FLAT: failed to load library %d", id);
+			goto failed;
+		}
+		/* Check versioning information (i.e. time stamps) */
+		if (p->lib_list[id].build_date && p->lib_list[curid].build_date &&
+				p->lib_list[curid].build_date < p->lib_list[id].build_date) {
+			printk("BINFMT_FLAT: library %d is younger than %d", id, curid);
+			goto failed;
+		}
+	}
+#else
+	id = 0;
+#endif
+
+	start_brk = p->lib_list[id].start_brk;
+	start_data = p->lib_list[id].start_data;
+	start_code = p->lib_list[id].start_code;
+	text_len = p->lib_list[id].text_len;
+
+	if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
+		printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
+		       (int) r,(int)(start_brk-start_code),(int)text_len);
+		goto failed;
+	}
+
+	if (r < text_len)			/* In text segment */
+		addr = r + start_code;
+	else					/* In data segment */
+		addr = r - text_len + start_data;
+
+	/* Range checked already above so doing the range tests is redundant...*/
+	return(addr);
+
+failed:
+	printk(", killing %s!\n", current->comm);
+	send_sig(SIGSEGV, current, 0);
+
+	return RELOC_FAILED;
+}
+
+/****************************************************************************/
+
+void old_reloc(unsigned long rl)
+{
+#ifdef DEBUG
+	char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
+#endif
+	flat_v2_reloc_t	r;
+	unsigned long *ptr;
+	
+	r.value = rl;
+#if defined(CONFIG_COLDFIRE)
+	ptr = (unsigned long *) (current->mm->start_code + r.reloc.offset);
+#else
+	ptr = (unsigned long *) (current->mm->start_data + r.reloc.offset);
+#endif
+
+#ifdef DEBUG
+	printk("Relocation of variable at DATASEG+%x "
+		"(address %p, currently %x) into segment %s\n",
+		r.reloc.offset, ptr, (int)*ptr, segment[r.reloc.type]);
+#endif
+	
+	switch (r.reloc.type) {
+	case OLD_FLAT_RELOC_TYPE_TEXT:
+		*ptr += current->mm->start_code;
+		break;
+	case OLD_FLAT_RELOC_TYPE_DATA:
+		*ptr += current->mm->start_data;
+		break;
+	case OLD_FLAT_RELOC_TYPE_BSS:
+		*ptr += current->mm->end_data;
+		break;
+	default:
+		printk("BINFMT_FLAT: Unknown relocation type=%x\n", r.reloc.type);
+		break;
+	}
+
+#ifdef DEBUG
+	printk("Relocation became %x\n", (int)*ptr);
+#endif
+}		
+
+/****************************************************************************/
+
+static int load_flat_file(struct linux_binprm * bprm,
+		struct lib_info *libinfo, int id, unsigned long *extra_stack)
+{
+	struct flat_hdr * hdr;
+	unsigned long textpos = 0, datapos = 0, result;
+	unsigned long realdatastart = 0;
+	unsigned long text_len, data_len, bss_len, stack_len, flags;
+	unsigned long memp = 0; /* for finding the brk area */
+	unsigned long extra, rlim;
+	unsigned long *reloc = 0, *rp;
+	struct inode *inode;
+	int i, rev, relocs = 0;
+	loff_t fpos;
+	unsigned long start_code, end_code;
+
+	hdr = ((struct flat_hdr *) bprm->buf);		/* exec-header */
+	inode = bprm->file->f_dentry->d_inode;
+
+	text_len  = ntohl(hdr->data_start);
+	data_len  = ntohl(hdr->data_end) - ntohl(hdr->data_start);
+	bss_len   = ntohl(hdr->bss_end) - ntohl(hdr->data_end);
+	stack_len = ntohl(hdr->stack_size);
+	if (extra_stack) {
+		stack_len += *extra_stack;
+		*extra_stack = stack_len;
+	}
+	relocs    = ntohl(hdr->reloc_count);
+	flags     = ntohl(hdr->flags);
+	rev       = ntohl(hdr->rev);
+
+	if (flags & FLAT_FLAG_KTRACE)
+		printk("BINFMT_FLAT: Loading file: %s\n", bprm->filename);
+
+	if (strncmp(hdr->magic, "bFLT", 4) ||
+			(rev != FLAT_VERSION && rev != OLD_FLAT_VERSION)) {
+		/*
+		 * because a lot of people do not manage to produce good
+		 * flat binaries,  we leave this printk to help them realise
+		 * the problem.  We only print the error if its not a script file
+		 */
+		if (strncmp(hdr->magic, "#!", 2))
+			printk("BINFMT_FLAT: bad magic/rev (0x%x, need 0x%x)\n",
+					rev, (int) FLAT_VERSION);
+		return -ENOEXEC;
+	}
+	
+	/* Don't allow old format executables to use shared libraries */
+	if (rev == OLD_FLAT_VERSION && id != 0) {
+		printk("BINFMT_FLAT: shared libraries are not available before rev 0x%x\n",
+				(int) FLAT_VERSION);
+		return -ENOEXEC;
+	}
+
+	/*
+	 * fix up the flags for the older format,  there were all kinds
+	 * of endian hacks,  this only works for the simple cases
+	 */
+	if (rev == OLD_FLAT_VERSION && flat_old_ram_flag(flags))
+		flags = FLAT_FLAG_RAM;
+
+#ifndef CONFIG_BINFMT_ZFLAT
+	if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
+		printk("Support for ZFLAT executables is not enabled.\n");
+		return -ENOEXEC;
+	}
+#endif
+
+	/*
+	 * Check initial limits. This avoids letting people circumvent
+	 * size limits imposed on them by creating programs with large
+	 * arrays in the data or bss.
+	 */
+	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+	if (rlim >= RLIM_INFINITY)
+		rlim = ~0;
+	if (data_len + bss_len > rlim)
+		return -ENOMEM;
+
+	/* Flush all traces of the currently running executable */
+	if (id == 0) {
+		result = flush_old_exec(bprm);
+		if (result)
+			return result;
+
+		/* OK, This is the point of no return */
+		set_personality(PER_LINUX);
+	}
+
+	/*
+	 * calculate the extra space we need to map in
+	 */
+	extra = max(bss_len + stack_len, relocs * sizeof(unsigned long));
+
+	/*
+	 * there are a couple of cases here,  the separate code/data
+	 * case,  and then the fully copied to RAM case which lumps
+	 * it all together.
+	 */
+	if ((flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP)) == 0) {
+		/*
+		 * this should give us a ROM ptr,  but if it doesn't we don't
+		 * really care
+		 */
+		DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
+
+		down_write(&current->mm->mmap_sem);
+		textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 0, 0);
+		up_write(&current->mm->mmap_sem);
+		if (!textpos  || textpos >= (unsigned long) -4096) {
+			if (!textpos)
+				textpos = (unsigned long) -ENOMEM;
+			printk("Unable to mmap process text, errno %d\n", (int)-textpos);
+			return(textpos);
+		}
+
+		down_write(&current->mm->mmap_sem);
+		realdatastart = do_mmap(0, 0, data_len + extra +
+				MAX_SHARED_LIBS * sizeof(unsigned long),
+				PROT_READ|PROT_WRITE|PROT_EXEC, 0, 0);
+		up_write(&current->mm->mmap_sem);
+
+		if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) {
+			if (!realdatastart)
+				realdatastart = (unsigned long) -ENOMEM;
+			printk("Unable to allocate RAM for process data, errno %d\n",
+					(int)-datapos);
+			do_munmap(current->mm, textpos, text_len);
+			return realdatastart;
+		}
+		datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long);
+
+		DBG_FLT("BINFMT_FLAT: Allocated data+bss+stack (%d bytes): %x\n",
+				(int)(data_len + bss_len + stack_len), (int)datapos);
+
+		fpos = ntohl(hdr->data_start);
+#ifdef CONFIG_BINFMT_ZFLAT
+		if (flags & FLAT_FLAG_GZDATA) {
+			result = decompress_exec(bprm, fpos, (char *) datapos, 
+						 data_len + (relocs * sizeof(unsigned long)), 0);
+		} else
+#endif
+		{
+			result = bprm->file->f_op->read(bprm->file, (char *) datapos,
+					data_len + (relocs * sizeof(unsigned long)), &fpos);
+		}
+		if (result >= (unsigned long)-4096) {
+			printk("Unable to read data+bss, errno %d\n", (int)-result);
+			do_munmap(current->mm, textpos, text_len);
+			do_munmap(current->mm, realdatastart, data_len + extra);
+			return result;
+		}
+
+		reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
+		memp = realdatastart;
+
+	} else {
+
+		down_write(&current->mm->mmap_sem);
+		textpos = do_mmap(0, 0, text_len + data_len + extra +
+					MAX_SHARED_LIBS * sizeof(unsigned long),
+				PROT_READ | PROT_EXEC | PROT_WRITE, 0, 0);
+		up_write(&current->mm->mmap_sem);
+		if (!textpos  || textpos >= (unsigned long) -4096) {
+			if (!textpos)
+				textpos = (unsigned long) -ENOMEM;
+			printk("Unable to allocate RAM for process text/data, errno %d\n",
+					(int)-textpos);
+			return(textpos);
+		}
+
+		realdatastart = textpos + ntohl(hdr->data_start);
+		datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long);
+		reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) +
+				MAX_SHARED_LIBS * sizeof(unsigned long));
+		memp = textpos;
+
+#ifdef CONFIG_BINFMT_ZFLAT
+		/*
+		 * load it all in and treat it like a RAM load from now on
+		 */
+		if (flags & FLAT_FLAG_GZIP) {
+			result = decompress_exec(bprm, sizeof (struct flat_hdr),
+					 (((char *) textpos) + sizeof (struct flat_hdr)),
+					 (text_len + data_len + (relocs * sizeof(unsigned long))
+						  - sizeof (struct flat_hdr)),
+					 0);
+			memmove((void *) datapos, (void *) realdatastart,
+					data_len + (relocs * sizeof(unsigned long)));
+		} else if (flags & FLAT_FLAG_GZDATA) {
+			fpos = 0;
+			result = bprm->file->f_op->read(bprm->file,
+					(char *) textpos, text_len, &fpos);
+			if (result < (unsigned long) -4096)
+				result = decompress_exec(bprm, text_len, (char *) datapos,
+						 data_len + (relocs * sizeof(unsigned long)), 0);
+		}
+		else
+#endif
+		{
+			fpos = 0;
+			result = bprm->file->f_op->read(bprm->file,
+					(char *) textpos, text_len, &fpos);
+			if (result < (unsigned long) -4096) {
+				fpos = ntohl(hdr->data_start);
+				result = bprm->file->f_op->read(bprm->file, (char *) datapos,
+					data_len + (relocs * sizeof(unsigned long)), &fpos);
+			}
+		}
+		if (result >= (unsigned long)-4096) {
+			printk("Unable to read code+data+bss, errno %d\n",(int)-result);
+			do_munmap(current->mm, textpos, text_len + data_len + extra +
+				MAX_SHARED_LIBS * sizeof(unsigned long));
+			return result;
+		}
+	}
+
+	if (flags & FLAT_FLAG_KTRACE)
+		printk("Mapping is %x, Entry point is %x, data_start is %x\n",
+			(int)textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
+
+	/* The main program needs a little extra setup in the task structure */
+	start_code = textpos + sizeof (struct flat_hdr);
+	end_code = textpos + text_len;
+	if (id == 0) {
+		current->mm->start_code = start_code;
+		current->mm->end_code = end_code;
+		current->mm->start_data = datapos;
+		current->mm->end_data = datapos + data_len;
+		/*
+		 * set up the brk stuff, uses any slack left in data/bss/stack
+		 * allocation.  We put the brk after the bss (between the bss
+		 * and stack) like other platforms.
+		 */
+		current->mm->start_brk = datapos + data_len + bss_len;
+		current->mm->brk = (current->mm->start_brk + 3) & ~3;
+		current->mm->context.end_brk = memp + ksize((void *) memp) - stack_len;
+		set_mm_counter(current->mm, rss, 0);
+	}
+
+	if (flags & FLAT_FLAG_KTRACE)
+		printk("%s %s: TEXT=%x-%x DATA=%x-%x BSS=%x-%x\n",
+			id ? "Lib" : "Load", bprm->filename,
+			(int) start_code, (int) end_code,
+			(int) datapos,
+			(int) (datapos + data_len),
+			(int) (datapos + data_len),
+			(int) (((datapos + data_len + bss_len) + 3) & ~3));
+
+	text_len -= sizeof(struct flat_hdr); /* the real code len */
+
+	/* Store the current module values into the global library structure */
+	libinfo->lib_list[id].start_code = start_code;
+	libinfo->lib_list[id].start_data = datapos;
+	libinfo->lib_list[id].start_brk = datapos + data_len + bss_len;
+	libinfo->lib_list[id].text_len = text_len;
+	libinfo->lib_list[id].loaded = 1;
+	libinfo->lib_list[id].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
+	libinfo->lib_list[id].build_date = ntohl(hdr->build_date);
+	
+	/*
+	 * We just load the allocations into some temporary memory to
+	 * help simplify all this mumbo jumbo
+	 *
+	 * We've got two different sections of relocation entries.
+	 * The first is the GOT which resides at the begining of the data segment
+	 * and is terminated with a -1.  This one can be relocated in place.
+	 * The second is the extra relocation entries tacked after the image's
+	 * data segment. These require a little more processing as the entry is
+	 * really an offset into the image which contains an offset into the
+	 * image.
+	 */
+	if (flags & FLAT_FLAG_GOTPIC) {
+		for (rp = (unsigned long *)datapos; *rp != 0xffffffff; rp++) {
+			unsigned long addr;
+			if (*rp) {
+				addr = calc_reloc(*rp, libinfo, id, 0);
+				if (addr == RELOC_FAILED)
+					return -ENOEXEC;
+				*rp = addr;
+			}
+		}
+	}
+
+	/*
+	 * Now run through the relocation entries.
+	 * We've got to be careful here as C++ produces relocatable zero
+	 * entries in the constructor and destructor tables which are then
+	 * tested for being not zero (which will always occur unless we're
+	 * based from address zero).  This causes an endless loop as __start
+	 * is at zero.  The solution used is to not relocate zero addresses.
+	 * This has the negative side effect of not allowing a global data
+	 * reference to be statically initialised to _stext (I've moved
+	 * __start to address 4 so that is okay).
+	 */
+	if (rev > OLD_FLAT_VERSION) {
+		for (i=0; i < relocs; i++) {
+			unsigned long addr, relval;
+
+			/* Get the address of the pointer to be
+			   relocated (of course, the address has to be
+			   relocated first).  */
+			relval = ntohl(reloc[i]);
+			addr = flat_get_relocate_addr(relval);
+			rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
+			if (rp == (unsigned long *)RELOC_FAILED)
+				return -ENOEXEC;
+
+			/* Get the pointer's value.  */
+			addr = flat_get_addr_from_rp(rp, relval, flags);
+			if (addr != 0) {
+				/*
+				 * Do the relocation.  PIC relocs in the data section are
+				 * already in target order
+				 */
+				if ((flags & FLAT_FLAG_GOTPIC) == 0)
+					addr = ntohl(addr);
+				addr = calc_reloc(addr, libinfo, id, 0);
+				if (addr == RELOC_FAILED)
+					return -ENOEXEC;
+
+				/* Write back the relocated pointer.  */
+				flat_put_addr_at_rp(rp, addr, relval);
+			}
+		}
+	} else {
+		for (i=0; i < relocs; i++)
+			old_reloc(ntohl(reloc[i]));
+	}
+	
+	flush_icache_range(start_code, end_code);
+
+	/* zero the BSS,  BRK and stack areas */
+	memset((void*)(datapos + data_len), 0, bss_len + 
+			(memp + ksize((void *) memp) - stack_len -	/* end brk */
+			libinfo->lib_list[id].start_brk) +		/* start brk */
+			stack_len);
+
+	return 0;
+}
+
+
+/****************************************************************************/
+#ifdef CONFIG_BINFMT_SHARED_FLAT
+
+/*
+ * Load a shared library into memory.  The library gets its own data
+ * segment (including bss) but not argv/argc/environ.
+ */
+
+static int load_flat_shared_library(int id, struct lib_info *libs)
+{
+	struct linux_binprm bprm;
+	int res;
+	char buf[16];
+
+	/* Create the file name */
+	sprintf(buf, "/lib/lib%d.so", id);
+
+	/* Open the file up */
+	bprm.filename = buf;
+	bprm.file = open_exec(bprm.filename);
+	res = PTR_ERR(bprm.file);
+	if (IS_ERR(bprm.file))
+		return res;
+
+	res = prepare_binprm(&bprm);
+
+	if (res <= (unsigned long)-4096)
+		res = load_flat_file(&bprm, libs, id, NULL);
+	if (bprm.file) {
+		allow_write_access(bprm.file);
+		fput(bprm.file);
+		bprm.file = NULL;
+	}
+	return(res);
+}
+
+#endif /* CONFIG_BINFMT_SHARED_FLAT */
+/****************************************************************************/
+
+/*
+ * These are the functions used to load flat style executables and shared
+ * libraries.  There is no binary dependent code anywhere else.
+ */
+
+static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+	struct lib_info libinfo;
+	unsigned long p = bprm->p;
+	unsigned long stack_len;
+	unsigned long start_addr;
+	unsigned long *sp;
+	int res;
+	int i, j;
+
+	memset(&libinfo, 0, sizeof(libinfo));
+	/*
+	 * We have to add the size of our arguments to our stack size
+	 * otherwise it's too easy for users to create stack overflows
+	 * by passing in a huge argument list.  And yes,  we have to be
+	 * pedantic and include space for the argv/envp array as it may have
+	 * a lot of entries.
+	 */
+#define TOP_OF_ARGS (PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *))
+	stack_len = TOP_OF_ARGS - bprm->p;             /* the strings */
+	stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
+	stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
+
+	
+	res = load_flat_file(bprm, &libinfo, 0, &stack_len);
+	if (res > (unsigned long)-4096)
+		return res;
+	
+	/* Update data segment pointers for all libraries */
+	for (i=0; i<MAX_SHARED_LIBS; i++)
+		if (libinfo.lib_list[i].loaded)
+			for (j=0; j<MAX_SHARED_LIBS; j++)
+				(-(j+1))[(unsigned long *)(libinfo.lib_list[i].start_data)] =
+					(libinfo.lib_list[j].loaded)?
+						libinfo.lib_list[j].start_data:UNLOADED_LIB;
+
+	compute_creds(bprm);
+ 	current->flags &= ~PF_FORKNOEXEC;
+
+	set_binfmt(&flat_format);
+
+	p = ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
+	DBG_FLT("p=%x\n", (int)p);
+
+	/* copy the arg pages onto the stack, this could be more efficient :-) */
+	for (i = TOP_OF_ARGS - 1; i >= bprm->p; i--)
+		* (char *) --p =
+			((char *) page_address(bprm->page[i/PAGE_SIZE]))[i % PAGE_SIZE];
+
+	sp = (unsigned long *) create_flat_tables(p, bprm);
+	
+	/* Fake some return addresses to ensure the call chain will
+	 * initialise library in order for us.  We are required to call
+	 * lib 1 first, then 2, ... and finally the main program (id 0).
+	 */
+	start_addr = libinfo.lib_list[0].entry;
+
+#ifdef CONFIG_BINFMT_SHARED_FLAT
+	for (i = MAX_SHARED_LIBS-1; i>0; i--) {
+		if (libinfo.lib_list[i].loaded) {
+			/* Push previos first to call address */
+			--sp;	put_user(start_addr, sp);
+			start_addr = libinfo.lib_list[i].entry;
+		}
+	}
+#endif
+	
+	/* Stash our initial stack pointer into the mm structure */
+	current->mm->start_stack = (unsigned long )sp;
+
+	
+	DBG_FLT("start_thread(regs=0x%x, entry=0x%x, start_stack=0x%x)\n",
+		(int)regs, (int)start_addr, (int)current->mm->start_stack);
+	
+	start_thread(regs, start_addr, current->mm->start_stack);
+
+	if (current->ptrace & PT_PTRACED)
+		send_sig(SIGTRAP, current, 0);
+
+	return 0;
+}
+
+/****************************************************************************/
+
+static int __init init_flat_binfmt(void)
+{
+	return register_binfmt(&flat_format);
+}
+
+static void __exit exit_flat_binfmt(void)
+{
+	unregister_binfmt(&flat_format);
+}
+
+/****************************************************************************/
+
+core_initcall(init_flat_binfmt);
+module_exit(exit_flat_binfmt);
+
+/****************************************************************************/
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
new file mode 100644
index 0000000..8ae0db6
--- /dev/null
+++ b/fs/binfmt_misc.c
@@ -0,0 +1,780 @@
+/*
+ *  binfmt_misc.c
+ *
+ *  Copyright (C) 1997 Richard Günther
+ *
+ *  binfmt_misc detects binaries via a magic or filename extension and invokes
+ *  a specified wrapper. This should obsolete binfmt_java, binfmt_em86 and
+ *  binfmt_mz.
+ *
+ *  1997-04-25 first version
+ *  [...]
+ *  1997-05-19 cleanup
+ *  1997-06-26 hpa: pass the real filename rather than argv[0]
+ *  1997-06-30 minor cleanup
+ *  1997-08-09 removed extension stripping, locking cleanup
+ *  2001-02-28 AV: rewritten into something that resembles C. Original didn't.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/binfmts.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+
+enum {
+	VERBOSE_STATUS = 1 /* make it zero to save 400 bytes kernel memory */
+};
+
+static LIST_HEAD(entries);
+static int enabled = 1;
+
+enum {Enabled, Magic};
+#define MISC_FMT_PRESERVE_ARGV0 (1<<31)
+#define MISC_FMT_OPEN_BINARY (1<<30)
+#define MISC_FMT_CREDENTIALS (1<<29)
+
+typedef struct {
+	struct list_head list;
+	unsigned long flags;		/* type, status, etc. */
+	int offset;			/* offset of magic */
+	int size;			/* size of magic/mask */
+	char *magic;			/* magic or filename extension */
+	char *mask;			/* mask, NULL for exact match */
+	char *interpreter;		/* filename of interpreter */
+	char *name;
+	struct dentry *dentry;
+} Node;
+
+static DEFINE_RWLOCK(entries_lock);
+static struct vfsmount *bm_mnt;
+static int entry_count;
+
+/* 
+ * Check if we support the binfmt
+ * if we do, return the node, else NULL
+ * locking is done in load_misc_binary
+ */
+static Node *check_file(struct linux_binprm *bprm)
+{
+	char *p = strrchr(bprm->interp, '.');
+	struct list_head *l;
+
+	list_for_each(l, &entries) {
+		Node *e = list_entry(l, Node, list);
+		char *s;
+		int j;
+
+		if (!test_bit(Enabled, &e->flags))
+			continue;
+
+		if (!test_bit(Magic, &e->flags)) {
+			if (p && !strcmp(e->magic, p + 1))
+				return e;
+			continue;
+		}
+
+		s = bprm->buf + e->offset;
+		if (e->mask) {
+			for (j = 0; j < e->size; j++)
+				if ((*s++ ^ e->magic[j]) & e->mask[j])
+					break;
+		} else {
+			for (j = 0; j < e->size; j++)
+				if ((*s++ ^ e->magic[j]))
+					break;
+		}
+		if (j == e->size)
+			return e;
+	}
+	return NULL;
+}
+
+/*
+ * the loader itself
+ */
+static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+{
+	Node *fmt;
+	struct file * interp_file = NULL;
+	char iname[BINPRM_BUF_SIZE];
+	char *iname_addr = iname;
+	int retval;
+	int fd_binary = -1;
+	struct files_struct *files = NULL;
+
+	retval = -ENOEXEC;
+	if (!enabled)
+		goto _ret;
+
+	/* to keep locking time low, we copy the interpreter string */
+	read_lock(&entries_lock);
+	fmt = check_file(bprm);
+	if (fmt)
+		strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE);
+	read_unlock(&entries_lock);
+	if (!fmt)
+		goto _ret;
+
+	if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
+		remove_arg_zero(bprm);
+	}
+
+	if (fmt->flags & MISC_FMT_OPEN_BINARY) {
+
+		files = current->files;
+		retval = unshare_files();
+		if (retval < 0)
+			goto _ret;
+		if (files == current->files) {
+			put_files_struct(files);
+			files = NULL;
+		}
+		/* if the binary should be opened on behalf of the
+		 * interpreter than keep it open and assign descriptor
+		 * to it */
+ 		fd_binary = get_unused_fd();
+ 		if (fd_binary < 0) {
+ 			retval = fd_binary;
+ 			goto _unshare;
+ 		}
+ 		fd_install(fd_binary, bprm->file);
+
+		/* if the binary is not readable than enforce mm->dumpable=0
+		   regardless of the interpreter's permissions */
+		if (permission(bprm->file->f_dentry->d_inode, MAY_READ, NULL))
+			bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
+
+		allow_write_access(bprm->file);
+		bprm->file = NULL;
+
+		/* mark the bprm that fd should be passed to interp */
+		bprm->interp_flags |= BINPRM_FLAGS_EXECFD;
+		bprm->interp_data = fd_binary;
+
+ 	} else {
+ 		allow_write_access(bprm->file);
+ 		fput(bprm->file);
+ 		bprm->file = NULL;
+ 	}
+	/* make argv[1] be the path to the binary */
+	retval = copy_strings_kernel (1, &bprm->interp, bprm);
+	if (retval < 0)
+		goto _error;
+	bprm->argc++;
+
+	/* add the interp as argv[0] */
+	retval = copy_strings_kernel (1, &iname_addr, bprm);
+	if (retval < 0)
+		goto _error;
+	bprm->argc ++;
+
+	bprm->interp = iname;	/* for binfmt_script */
+
+	interp_file = open_exec (iname);
+	retval = PTR_ERR (interp_file);
+	if (IS_ERR (interp_file))
+		goto _error;
+
+	bprm->file = interp_file;
+	if (fmt->flags & MISC_FMT_CREDENTIALS) {
+		/*
+		 * No need to call prepare_binprm(), it's already been
+		 * done.  bprm->buf is stale, update from interp_file.
+		 */
+		memset(bprm->buf, 0, BINPRM_BUF_SIZE);
+		retval = kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
+	} else
+		retval = prepare_binprm (bprm);
+
+	if (retval < 0)
+		goto _error;
+
+	retval = search_binary_handler (bprm, regs);
+	if (retval < 0)
+		goto _error;
+
+	if (files) {
+		steal_locks(files);
+		put_files_struct(files);
+		files = NULL;
+	}
+_ret:
+	return retval;
+_error:
+	if (fd_binary > 0)
+		sys_close(fd_binary);
+	bprm->interp_flags = 0;
+	bprm->interp_data = 0;
+_unshare:
+	if (files) {
+		put_files_struct(current->files);
+		current->files = files;
+	}
+	goto _ret;
+}
+
+/* Command parsers */
+
+/*
+ * parses and copies one argument enclosed in del from *sp to *dp,
+ * recognising the \x special.
+ * returns pointer to the copied argument or NULL in case of an
+ * error (and sets err) or null argument length.
+ */
+static char *scanarg(char *s, char del)
+{
+	char c;
+
+	while ((c = *s++) != del) {
+		if (c == '\\' && *s == 'x') {
+			s++;
+			if (!isxdigit(*s++))
+				return NULL;
+			if (!isxdigit(*s++))
+				return NULL;
+		}
+	}
+	return s;
+}
+
+static int unquote(char *from)
+{
+	char c = 0, *s = from, *p = from;
+
+	while ((c = *s++) != '\0') {
+		if (c == '\\' && *s == 'x') {
+			s++;
+			c = toupper(*s++);
+			*p = (c - (isdigit(c) ? '0' : 'A' - 10)) << 4;
+			c = toupper(*s++);
+			*p++ |= c - (isdigit(c) ? '0' : 'A' - 10);
+			continue;
+		}
+		*p++ = c;
+	}
+	return p - from;
+}
+
+static inline char * check_special_flags (char * sfs, Node * e)
+{
+	char * p = sfs;
+	int cont = 1;
+
+	/* special flags */
+	while (cont) {
+		switch (*p) {
+			case 'P':
+				p++;
+				e->flags |= MISC_FMT_PRESERVE_ARGV0;
+				break;
+			case 'O':
+				p++;
+				e->flags |= MISC_FMT_OPEN_BINARY;
+				break;
+			case 'C':
+				p++;
+				/* this flags also implies the
+				   open-binary flag */
+				e->flags |= (MISC_FMT_CREDENTIALS |
+						MISC_FMT_OPEN_BINARY);
+				break;
+			default:
+				cont = 0;
+		}
+	}
+
+	return p;
+}
+/*
+ * This registers a new binary format, it recognises the syntax
+ * ':name:type:offset:magic:mask:interpreter:flags'
+ * where the ':' is the IFS, that can be chosen with the first char
+ */
+static Node *create_entry(const char __user *buffer, size_t count)
+{
+	Node *e;
+	int memsize, err;
+	char *buf, *p;
+	char del;
+
+	/* some sanity checks */
+	err = -EINVAL;
+	if ((count < 11) || (count > 256))
+		goto out;
+
+	err = -ENOMEM;
+	memsize = sizeof(Node) + count + 8;
+	e = (Node *) kmalloc(memsize, GFP_USER);
+	if (!e)
+		goto out;
+
+	p = buf = (char *)e + sizeof(Node);
+
+	memset(e, 0, sizeof(Node));
+	if (copy_from_user(buf, buffer, count))
+		goto Efault;
+
+	del = *p++;	/* delimeter */
+
+	memset(buf+count, del, 8);
+
+	e->name = p;
+	p = strchr(p, del);
+	if (!p)
+		goto Einval;
+	*p++ = '\0';
+	if (!e->name[0] ||
+	    !strcmp(e->name, ".") ||
+	    !strcmp(e->name, "..") ||
+	    strchr(e->name, '/'))
+		goto Einval;
+	switch (*p++) {
+		case 'E': e->flags = 1<<Enabled; break;
+		case 'M': e->flags = (1<<Enabled) | (1<<Magic); break;
+		default: goto Einval;
+	}
+	if (*p++ != del)
+		goto Einval;
+	if (test_bit(Magic, &e->flags)) {
+		char *s = strchr(p, del);
+		if (!s)
+			goto Einval;
+		*s++ = '\0';
+		e->offset = simple_strtoul(p, &p, 10);
+		if (*p++)
+			goto Einval;
+		e->magic = p;
+		p = scanarg(p, del);
+		if (!p)
+			goto Einval;
+		p[-1] = '\0';
+		if (!e->magic[0])
+			goto Einval;
+		e->mask = p;
+		p = scanarg(p, del);
+		if (!p)
+			goto Einval;
+		p[-1] = '\0';
+		if (!e->mask[0])
+			e->mask = NULL;
+		e->size = unquote(e->magic);
+		if (e->mask && unquote(e->mask) != e->size)
+			goto Einval;
+		if (e->size + e->offset > BINPRM_BUF_SIZE)
+			goto Einval;
+	} else {
+		p = strchr(p, del);
+		if (!p)
+			goto Einval;
+		*p++ = '\0';
+		e->magic = p;
+		p = strchr(p, del);
+		if (!p)
+			goto Einval;
+		*p++ = '\0';
+		if (!e->magic[0] || strchr(e->magic, '/'))
+			goto Einval;
+		p = strchr(p, del);
+		if (!p)
+			goto Einval;
+		*p++ = '\0';
+	}
+	e->interpreter = p;
+	p = strchr(p, del);
+	if (!p)
+		goto Einval;
+	*p++ = '\0';
+	if (!e->interpreter[0])
+		goto Einval;
+
+
+	p = check_special_flags (p, e);
+
+	if (*p == '\n')
+		p++;
+	if (p != buf + count)
+		goto Einval;
+	return e;
+
+out:
+	return ERR_PTR(err);
+
+Efault:
+	kfree(e);
+	return ERR_PTR(-EFAULT);
+Einval:
+	kfree(e);
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Set status of entry/binfmt_misc:
+ * '1' enables, '0' disables and '-1' clears entry/binfmt_misc
+ */
+static int parse_command(const char __user *buffer, size_t count)
+{
+	char s[4];
+
+	if (!count)
+		return 0;
+	if (count > 3)
+		return -EINVAL;
+	if (copy_from_user(s, buffer, count))
+		return -EFAULT;
+	if (s[count-1] == '\n')
+		count--;
+	if (count == 1 && s[0] == '0')
+		return 1;
+	if (count == 1 && s[0] == '1')
+		return 2;
+	if (count == 2 && s[0] == '-' && s[1] == '1')
+		return 3;
+	return -EINVAL;
+}
+
+/* generic stuff */
+
+static void entry_status(Node *e, char *page)
+{
+	char *dp;
+	char *status = "disabled";
+	const char * flags = "flags: ";
+
+	if (test_bit(Enabled, &e->flags))
+		status = "enabled";
+
+	if (!VERBOSE_STATUS) {
+		sprintf(page, "%s\n", status);
+		return;
+	}
+
+	sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter);
+	dp = page + strlen(page);
+
+	/* print the special flags */
+	sprintf (dp, "%s", flags);
+	dp += strlen (flags);
+	if (e->flags & MISC_FMT_PRESERVE_ARGV0) {
+		*dp ++ = 'P';
+	}
+	if (e->flags & MISC_FMT_OPEN_BINARY) {
+		*dp ++ = 'O';
+	}
+	if (e->flags & MISC_FMT_CREDENTIALS) {
+		*dp ++ = 'C';
+	}
+	*dp ++ = '\n';
+
+
+	if (!test_bit(Magic, &e->flags)) {
+		sprintf(dp, "extension .%s\n", e->magic);
+	} else {
+		int i;
+
+		sprintf(dp, "offset %i\nmagic ", e->offset);
+		dp = page + strlen(page);
+		for (i = 0; i < e->size; i++) {
+			sprintf(dp, "%02x", 0xff & (int) (e->magic[i]));
+			dp += 2;
+		}
+		if (e->mask) {
+			sprintf(dp, "\nmask ");
+			dp += 6;
+			for (i = 0; i < e->size; i++) {
+				sprintf(dp, "%02x", 0xff & (int) (e->mask[i]));
+				dp += 2;
+			}
+		}
+		*dp++ = '\n';
+		*dp = '\0';
+	}
+}
+
+static struct inode *bm_get_inode(struct super_block *sb, int mode)
+{
+	struct inode * inode = new_inode(sb);
+
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = 0;
+		inode->i_gid = 0;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime =
+			current_fs_time(inode->i_sb);
+	}
+	return inode;
+}
+
+static void bm_clear_inode(struct inode *inode)
+{
+	kfree(inode->u.generic_ip);
+}
+
+static void kill_node(Node *e)
+{
+	struct dentry *dentry;
+
+	write_lock(&entries_lock);
+	dentry = e->dentry;
+	if (dentry) {
+		list_del_init(&e->list);
+		e->dentry = NULL;
+	}
+	write_unlock(&entries_lock);
+
+	if (dentry) {
+		dentry->d_inode->i_nlink--;
+		d_drop(dentry);
+		dput(dentry);
+		simple_release_fs(&bm_mnt, &entry_count);
+	}
+}
+
+/* /<entry> */
+
+static ssize_t
+bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+{
+	Node *e = file->f_dentry->d_inode->u.generic_ip;
+	loff_t pos = *ppos;
+	ssize_t res;
+	char *page;
+	int len;
+
+	if (!(page = (char*) __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	entry_status(e, page);
+	len = strlen(page);
+
+	res = -EINVAL;
+	if (pos < 0)
+		goto out;
+	res = 0;
+	if (pos >= len)
+		goto out;
+	if (len < pos + nbytes)
+		nbytes = len - pos;
+	res = -EFAULT;
+	if (copy_to_user(buf, page + pos, nbytes))
+		goto out;
+	*ppos = pos + nbytes;
+	res = nbytes;
+out:
+	free_page((unsigned long) page);
+	return res;
+}
+
+static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+				size_t count, loff_t *ppos)
+{
+	struct dentry *root;
+	Node *e = file->f_dentry->d_inode->u.generic_ip;
+	int res = parse_command(buffer, count);
+
+	switch (res) {
+		case 1: clear_bit(Enabled, &e->flags);
+			break;
+		case 2: set_bit(Enabled, &e->flags);
+			break;
+		case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
+			down(&root->d_inode->i_sem);
+
+			kill_node(e);
+
+			up(&root->d_inode->i_sem);
+			dput(root);
+			break;
+		default: return res;
+	}
+	return count;
+}
+
+static struct file_operations bm_entry_operations = {
+	.read		= bm_entry_read,
+	.write		= bm_entry_write,
+};
+
+/* /register */
+
+static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+			       size_t count, loff_t *ppos)
+{
+	Node *e;
+	struct inode *inode;
+	struct dentry *root, *dentry;
+	struct super_block *sb = file->f_vfsmnt->mnt_sb;
+	int err = 0;
+
+	e = create_entry(buffer, count);
+
+	if (IS_ERR(e))
+		return PTR_ERR(e);
+
+	root = dget(sb->s_root);
+	down(&root->d_inode->i_sem);
+	dentry = lookup_one_len(e->name, root, strlen(e->name));
+	err = PTR_ERR(dentry);
+	if (IS_ERR(dentry))
+		goto out;
+
+	err = -EEXIST;
+	if (dentry->d_inode)
+		goto out2;
+
+	inode = bm_get_inode(sb, S_IFREG | 0644);
+
+	err = -ENOMEM;
+	if (!inode)
+		goto out2;
+
+	err = simple_pin_fs("binfmt_misc", &bm_mnt, &entry_count);
+	if (err) {
+		iput(inode);
+		inode = NULL;
+		goto out2;
+	}
+
+	e->dentry = dget(dentry);
+	inode->u.generic_ip = e;
+	inode->i_fop = &bm_entry_operations;
+
+	d_instantiate(dentry, inode);
+	write_lock(&entries_lock);
+	list_add(&e->list, &entries);
+	write_unlock(&entries_lock);
+
+	err = 0;
+out2:
+	dput(dentry);
+out:
+	up(&root->d_inode->i_sem);
+	dput(root);
+
+	if (err) {
+		kfree(e);
+		return -EINVAL;
+	}
+	return count;
+}
+
+static struct file_operations bm_register_operations = {
+	.write		= bm_register_write,
+};
+
+/* /status */
+
+static ssize_t
+bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+	char *s = enabled ? "enabled" : "disabled";
+	int len = strlen(s);
+	loff_t pos = *ppos;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= len)
+		return 0;
+	if (len < pos + nbytes)
+		nbytes = len - pos;
+	if (copy_to_user(buf, s + pos, nbytes))
+		return -EFAULT;
+	*ppos = pos + nbytes;
+	return nbytes;
+}
+
+static ssize_t bm_status_write(struct file * file, const char __user * buffer,
+		size_t count, loff_t *ppos)
+{
+	int res = parse_command(buffer, count);
+	struct dentry *root;
+
+	switch (res) {
+		case 1: enabled = 0; break;
+		case 2: enabled = 1; break;
+		case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
+			down(&root->d_inode->i_sem);
+
+			while (!list_empty(&entries))
+				kill_node(list_entry(entries.next, Node, list));
+
+			up(&root->d_inode->i_sem);
+			dput(root);
+		default: return res;
+	}
+	return count;
+}
+
+static struct file_operations bm_status_operations = {
+	.read		= bm_status_read,
+	.write		= bm_status_write,
+};
+
+/* Superblock handling */
+
+static struct super_operations s_ops = {
+	.statfs		= simple_statfs,
+	.clear_inode	= bm_clear_inode,
+};
+
+static int bm_fill_super(struct super_block * sb, void * data, int silent)
+{
+	static struct tree_descr bm_files[] = {
+		[1] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
+		[2] = {"register", &bm_register_operations, S_IWUSR},
+		/* last one */ {""}
+	};
+	int err = simple_fill_super(sb, 0x42494e4d, bm_files);
+	if (!err)
+		sb->s_op = &s_ops;
+	return err;
+}
+
+static struct super_block *bm_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, bm_fill_super);
+}
+
+static struct linux_binfmt misc_format = {
+	.module = THIS_MODULE,
+	.load_binary = load_misc_binary,
+};
+
+static struct file_system_type bm_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "binfmt_misc",
+	.get_sb		= bm_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+static int __init init_misc_binfmt(void)
+{
+	int err = register_filesystem(&bm_fs_type);
+	if (!err) {
+		err = register_binfmt(&misc_format);
+		if (err)
+			unregister_filesystem(&bm_fs_type);
+	}
+	return err;
+}
+
+static void __exit exit_misc_binfmt(void)
+{
+	unregister_binfmt(&misc_format);
+	unregister_filesystem(&bm_fs_type);
+}
+
+core_initcall(init_misc_binfmt);
+module_exit(exit_misc_binfmt);
+MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
new file mode 100644
index 0000000..1edbcca
--- /dev/null
+++ b/fs/binfmt_script.c
@@ -0,0 +1,116 @@
+/*
+ *  linux/fs/binfmt_script.c
+ *
+ *  Copyright (C) 1996  Martin von Löwis
+ *  original #!-checking implemented by tytso.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/smp_lock.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+
+static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+{
+	char *cp, *i_name, *i_arg;
+	struct file *file;
+	char interp[BINPRM_BUF_SIZE];
+	int retval;
+
+	if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') || (bprm->sh_bang)) 
+		return -ENOEXEC;
+	/*
+	 * This section does the #! interpretation.
+	 * Sorta complicated, but hopefully it will work.  -TYT
+	 */
+
+	bprm->sh_bang++;
+	allow_write_access(bprm->file);
+	fput(bprm->file);
+	bprm->file = NULL;
+
+	bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
+	if ((cp = strchr(bprm->buf, '\n')) == NULL)
+		cp = bprm->buf+BINPRM_BUF_SIZE-1;
+	*cp = '\0';
+	while (cp > bprm->buf) {
+		cp--;
+		if ((*cp == ' ') || (*cp == '\t'))
+			*cp = '\0';
+		else
+			break;
+	}
+	for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
+	if (*cp == '\0') 
+		return -ENOEXEC; /* No interpreter name found */
+	i_name = cp;
+	i_arg = NULL;
+	for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
+		/* nothing */ ;
+	while ((*cp == ' ') || (*cp == '\t'))
+		*cp++ = '\0';
+	if (*cp)
+		i_arg = cp;
+	strcpy (interp, i_name);
+	/*
+	 * OK, we've parsed out the interpreter name and
+	 * (optional) argument.
+	 * Splice in (1) the interpreter's name for argv[0]
+	 *           (2) (optional) argument to interpreter
+	 *           (3) filename of shell script (replace argv[0])
+	 *
+	 * This is done in reverse order, because of how the
+	 * user environment and arguments are stored.
+	 */
+	remove_arg_zero(bprm);
+	retval = copy_strings_kernel(1, &bprm->interp, bprm);
+	if (retval < 0) return retval; 
+	bprm->argc++;
+	if (i_arg) {
+		retval = copy_strings_kernel(1, &i_arg, bprm);
+		if (retval < 0) return retval; 
+		bprm->argc++;
+	}
+	retval = copy_strings_kernel(1, &i_name, bprm);
+	if (retval) return retval; 
+	bprm->argc++;
+	bprm->interp = interp;
+
+	/*
+	 * OK, now restart the process with the interpreter's dentry.
+	 */
+	file = open_exec(interp);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	bprm->file = file;
+	retval = prepare_binprm(bprm);
+	if (retval < 0)
+		return retval;
+	return search_binary_handler(bprm,regs);
+}
+
+static struct linux_binfmt script_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_script,
+};
+
+static int __init init_script_binfmt(void)
+{
+	return register_binfmt(&script_format);
+}
+
+static void __exit exit_script_binfmt(void)
+{
+	unregister_binfmt(&script_format);
+}
+
+core_initcall(init_script_binfmt);
+module_exit(exit_script_binfmt);
+MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
new file mode 100644
index 0000000..227a268
--- /dev/null
+++ b/fs/binfmt_som.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/binfmt_som.c
+ *
+ * These are the functions used to load SOM format executables as used
+ * by HP-UX.  
+ *
+ * Copyright 1999 Matthew Wilcox <willy@bofh.ai>
+ * based on binfmt_elf which is
+ * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/binfmts.h>
+#include <linux/som.h>
+#include <linux/string.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#include <linux/config.h>
+
+#include <linux/elf.h>
+
+static int load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs);
+static int load_som_library(struct file *);
+
+/*
+ * If we don't support core dumping, then supply a NULL so we
+ * don't even try.
+ */
+#if 0
+static int som_core_dump(long signr, struct pt_regs * regs);
+#else
+#define som_core_dump	NULL
+#endif
+
+#define SOM_PAGESTART(_v) ((_v) & ~(unsigned long)(SOM_PAGESIZE-1))
+#define SOM_PAGEOFFSET(_v) ((_v) & (SOM_PAGESIZE-1))
+#define SOM_PAGEALIGN(_v) (((_v) + SOM_PAGESIZE - 1) & ~(SOM_PAGESIZE - 1))
+
+static struct linux_binfmt som_format = {
+	.module		= THIS_MODULE,
+	.load_binary	= load_som_binary,
+	.load_shlib	= load_som_library,
+	.core_dump	= som_core_dump,
+	.min_coredump	= SOM_PAGESIZE
+};
+
+/*
+ * create_som_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+static void create_som_tables(struct linux_binprm *bprm)
+{
+	char **argv, **envp;
+	int argc = bprm->argc;
+	int envc = bprm->envc;
+	unsigned long p;
+	unsigned long *sp;
+
+	/* Word-align the stack pointer */
+	sp = (unsigned long *)((bprm->p + 3) & ~3);
+
+	envp = (char **) sp;
+	sp += envc + 1;
+	argv = (char **) sp;
+	sp += argc + 1;
+
+	__put_user((unsigned long) envp,++sp);
+	__put_user((unsigned long) argv,++sp);
+
+	__put_user(argc, ++sp);
+
+	bprm->p = (unsigned long) sp;
+
+	p = current->mm->arg_start;
+	while (argc-- > 0) {
+		__put_user((char *)p,argv++);
+		p += strlen_user((char *)p);
+	}
+	__put_user(NULL, argv);
+	current->mm->arg_end = current->mm->env_start = p;
+	while (envc-- > 0) {
+		__put_user((char *)p,envp++);
+		p += strlen_user((char *)p);
+	}
+	__put_user(NULL, envp);
+	current->mm->env_end = p;
+}
+
+static int check_som_header(struct som_hdr *som_ex)
+{
+	int *buf = (int *)som_ex;
+	int i, ck;
+
+	if (som_ex->system_id != SOM_SID_PARISC_1_0 &&
+	    som_ex->system_id != SOM_SID_PARISC_1_1 &&
+	    som_ex->system_id != SOM_SID_PARISC_2_0)
+		return -ENOEXEC;
+
+	if (som_ex->a_magic != SOM_EXEC_NONSHARE &&
+	    som_ex->a_magic != SOM_EXEC_SHARE &&
+	    som_ex->a_magic != SOM_EXEC_DEMAND)
+		return -ENOEXEC;
+
+	if (som_ex->version_id != SOM_ID_OLD &&
+	    som_ex->version_id != SOM_ID_NEW)
+		return -ENOEXEC;
+
+	ck = 0;
+	for (i=0; i<32; i++)
+		ck ^= buf[i];
+	if (ck != 0)
+		return -ENOEXEC;
+
+	return 0;
+}
+
+static int map_som_binary(struct file *file,
+		const struct som_exec_auxhdr *hpuxhdr)
+{
+	unsigned long code_start, code_size, data_start, data_size;
+	unsigned long bss_start, som_brk;
+	int retval;
+	int prot = PROT_READ | PROT_EXEC;
+	int flags = MAP_FIXED|MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
+
+	mm_segment_t old_fs = get_fs();
+	set_fs(get_ds());
+
+	code_start = SOM_PAGESTART(hpuxhdr->exec_tmem);
+	code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize);
+	current->mm->start_code = code_start;
+	current->mm->end_code = code_start + code_size;
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap(file, code_start, code_size, prot,
+			flags, SOM_PAGESTART(hpuxhdr->exec_tfile));
+	up_write(&current->mm->mmap_sem);
+	if (retval < 0 && retval > -1024)
+		goto out;
+
+	data_start = SOM_PAGESTART(hpuxhdr->exec_dmem);
+	data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize);
+	current->mm->start_data = data_start;
+	current->mm->end_data = bss_start = data_start + data_size;
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap(file, data_start, data_size,
+			prot | PROT_WRITE, flags,
+			SOM_PAGESTART(hpuxhdr->exec_dfile));
+	up_write(&current->mm->mmap_sem);
+	if (retval < 0 && retval > -1024)
+		goto out;
+
+	som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize);
+	current->mm->start_brk = current->mm->brk = som_brk;
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap(NULL, bss_start, som_brk - bss_start,
+			prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
+	up_write(&current->mm->mmap_sem);
+	if (retval > 0 || retval < -1024)
+		retval = 0;
+out:
+	set_fs(old_fs);
+	return retval;
+}
+
+
+/*
+ * These are the functions used to load SOM executables and shared
+ * libraries.  There is no binary dependent code anywhere else.
+ */
+
+static int
+load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+	int som_exec_fileno;
+	int retval;
+	unsigned int size;
+	unsigned long som_entry;
+	struct som_hdr *som_ex;
+	struct som_exec_auxhdr *hpuxhdr;
+
+	/* Get the exec-header */
+	som_ex = (struct som_hdr *) bprm->buf;
+
+	retval = check_som_header(som_ex);
+	if (retval != 0)
+		goto out;
+
+	/* Now read in the auxiliary header information */
+
+	retval = -ENOMEM;
+	size = som_ex->aux_header_size;
+	if (size > SOM_PAGESIZE)
+		goto out;
+	hpuxhdr = (struct som_exec_auxhdr *) kmalloc(size, GFP_KERNEL);
+	if (!hpuxhdr)
+		goto out;
+
+	retval = kernel_read(bprm->file, som_ex->aux_header_location,
+			(char *) hpuxhdr, size);
+	if (retval < 0)
+		goto out_free;
+#error "Fix security hole before enabling me"
+	retval = get_unused_fd();
+	if (retval < 0)
+		goto out_free;
+	get_file(bprm->file);
+	fd_install(som_exec_fileno = retval, bprm->file);
+
+	/* Flush all traces of the currently running executable */
+	retval = flush_old_exec(bprm);
+	if (retval)
+		goto out_free;
+
+	/* OK, This is the point of no return */
+	current->flags &= ~PF_FORKNOEXEC;
+	current->personality = PER_HPUX;
+
+	/* Set the task size for HP-UX processes such that
+	 * the gateway page is outside the address space.
+	 * This can be fixed later, but for now, this is much
+	 * easier.
+	 */
+
+	current->thread.task_size = 0xc0000000;
+
+	/* Set map base to allow enough room for hp-ux heap growth */
+
+	current->thread.map_base = 0x80000000;
+
+	retval = map_som_binary(bprm->file, hpuxhdr);
+	if (retval < 0)
+		goto out_free;
+
+	som_entry = hpuxhdr->exec_entry;
+	kfree(hpuxhdr);
+
+	set_binfmt(&som_format);
+	compute_creds(bprm);
+	setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+
+	create_som_tables(bprm);
+
+	current->mm->start_stack = bprm->p;
+	set_mm_counter(current->mm, rss, 0);
+
+#if 0
+	printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk);
+	printk("(end_code) %08lx\n" , (unsigned long) current->mm->end_code);
+	printk("(start_code) %08lx\n" , (unsigned long) current->mm->start_code);
+	printk("(end_data) %08lx\n" , (unsigned long) current->mm->end_data);
+	printk("(start_stack) %08lx\n" , (unsigned long) current->mm->start_stack);
+	printk("(brk) %08lx\n" , (unsigned long) current->mm->brk);
+#endif
+
+	map_hpux_gateway_page(current,current->mm);
+
+	start_thread_som(regs, som_entry, bprm->p);
+	if (current->ptrace & PT_PTRACED)
+		send_sig(SIGTRAP, current, 0);
+	return 0;
+
+	/* error cleanup */
+out_free:
+	kfree(hpuxhdr);
+out:
+	return retval;
+}
+
+static int load_som_library(struct file *f)
+{
+/* No lib support in SOM yet.  gizza chance.. */
+	return -ENOEXEC;
+}
+	/* Install the SOM loader.
+	 * N.B. We *rely* on the table being the right size with the
+	 * right number of free slots...
+	 */
+
+static int __init init_som_binfmt(void)
+{
+	return register_binfmt(&som_format);
+}
+
+static void __exit exit_som_binfmt(void)
+{
+	/* Remove the SOM loader. */
+	unregister_binfmt(&som_format);
+}
+
+core_initcall(init_som_binfmt);
+module_exit(exit_som_binfmt);
diff --git a/fs/bio.c b/fs/bio.c
new file mode 100644
index 0000000..e5349e8
--- /dev/null
+++ b/fs/bio.c
@@ -0,0 +1,1096 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ *
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/workqueue.h>
+
+#define BIO_POOL_SIZE 256
+
+static kmem_cache_t *bio_slab;
+
+#define BIOVEC_NR_POOLS 6
+
+/*
+ * a small number of entries is fine, not going to be performance critical.
+ * basically we just need to survive
+ */
+#define BIO_SPLIT_ENTRIES 8	
+mempool_t *bio_split_pool;
+
+struct biovec_slab {
+	int nr_vecs;
+	char *name; 
+	kmem_cache_t *slab;
+};
+
+/*
+ * if you change this list, also change bvec_alloc or things will
+ * break badly! cannot be bigger than what you can fit into an
+ * unsigned short
+ */
+
+#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
+static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] = {
+	BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
+};
+#undef BV
+
+/*
+ * bio_set is used to allow other portions of the IO system to
+ * allocate their own private memory pools for bio and iovec structures.
+ * These memory pools in turn all allocate from the bio_slab
+ * and the bvec_slabs[].
+ */
+struct bio_set {
+	mempool_t *bio_pool;
+	mempool_t *bvec_pools[BIOVEC_NR_POOLS];
+};
+
+/*
+ * fs_bio_set is the bio_set containing bio and iovec memory pools used by
+ * IO code that does not need private memory pools.
+ */
+static struct bio_set *fs_bio_set;
+
+static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
+{
+	struct bio_vec *bvl;
+	struct biovec_slab *bp;
+
+	/*
+	 * see comment near bvec_array define!
+	 */
+	switch (nr) {
+		case   1        : *idx = 0; break;
+		case   2 ...   4: *idx = 1; break;
+		case   5 ...  16: *idx = 2; break;
+		case  17 ...  64: *idx = 3; break;
+		case  65 ... 128: *idx = 4; break;
+		case 129 ... BIO_MAX_PAGES: *idx = 5; break;
+		default:
+			return NULL;
+	}
+	/*
+	 * idx now points to the pool we want to allocate from
+	 */
+
+	bp = bvec_slabs + *idx;
+	bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
+	if (bvl)
+		memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
+
+	return bvl;
+}
+
+/*
+ * default destructor for a bio allocated with bio_alloc_bioset()
+ */
+static void bio_destructor(struct bio *bio)
+{
+	const int pool_idx = BIO_POOL_IDX(bio);
+	struct bio_set *bs = bio->bi_set;
+
+	BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+
+	mempool_free(bio->bi_io_vec, bs->bvec_pools[pool_idx]);
+	mempool_free(bio, bs->bio_pool);
+}
+
+inline void bio_init(struct bio *bio)
+{
+	bio->bi_next = NULL;
+	bio->bi_flags = 1 << BIO_UPTODATE;
+	bio->bi_rw = 0;
+	bio->bi_vcnt = 0;
+	bio->bi_idx = 0;
+	bio->bi_phys_segments = 0;
+	bio->bi_hw_segments = 0;
+	bio->bi_hw_front_size = 0;
+	bio->bi_hw_back_size = 0;
+	bio->bi_size = 0;
+	bio->bi_max_vecs = 0;
+	bio->bi_end_io = NULL;
+	atomic_set(&bio->bi_cnt, 1);
+	bio->bi_private = NULL;
+}
+
+/**
+ * bio_alloc_bioset - allocate a bio for I/O
+ * @gfp_mask:   the GFP_ mask given to the slab allocator
+ * @nr_iovecs:	number of iovecs to pre-allocate
+ *
+ * Description:
+ *   bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
+ *   If %__GFP_WAIT is set then we will block on the internal pool waiting
+ *   for a &struct bio to become free.
+ *
+ *   allocate bio and iovecs from the memory pools specified by the
+ *   bio_set structure.
+ **/
+struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs)
+{
+	struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
+
+	if (likely(bio)) {
+		struct bio_vec *bvl = NULL;
+
+		bio_init(bio);
+		if (likely(nr_iovecs)) {
+			unsigned long idx;
+
+			bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+			if (unlikely(!bvl)) {
+				mempool_free(bio, bs->bio_pool);
+				bio = NULL;
+				goto out;
+			}
+			bio->bi_flags |= idx << BIO_POOL_OFFSET;
+			bio->bi_max_vecs = bvec_slabs[idx].nr_vecs;
+		}
+		bio->bi_io_vec = bvl;
+		bio->bi_destructor = bio_destructor;
+		bio->bi_set = bs;
+	}
+out:
+	return bio;
+}
+
+struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
+{
+	return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+}
+
+void zero_fill_bio(struct bio *bio)
+{
+	unsigned long flags;
+	struct bio_vec *bv;
+	int i;
+
+	bio_for_each_segment(bv, bio, i) {
+		char *data = bvec_kmap_irq(bv, &flags);
+		memset(data, 0, bv->bv_len);
+		flush_dcache_page(bv->bv_page);
+		bvec_kunmap_irq(data, &flags);
+	}
+}
+EXPORT_SYMBOL(zero_fill_bio);
+
+/**
+ * bio_put - release a reference to a bio
+ * @bio:   bio to release reference to
+ *
+ * Description:
+ *   Put a reference to a &struct bio, either one you have gotten with
+ *   bio_alloc or bio_get. The last put of a bio will free it.
+ **/
+void bio_put(struct bio *bio)
+{
+	BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
+
+	/*
+	 * last put frees it
+	 */
+	if (atomic_dec_and_test(&bio->bi_cnt)) {
+		bio->bi_next = NULL;
+		bio->bi_destructor(bio);
+	}
+}
+
+inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+{
+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, bio);
+
+	return bio->bi_phys_segments;
+}
+
+inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+{
+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, bio);
+
+	return bio->bi_hw_segments;
+}
+
+/**
+ * 	__bio_clone	-	clone a bio
+ * 	@bio: destination bio
+ * 	@bio_src: bio to clone
+ *
+ *	Clone a &bio. Caller will own the returned bio, but not
+ *	the actual data it points to. Reference count of returned
+ * 	bio will be one.
+ */
+inline void __bio_clone(struct bio *bio, struct bio *bio_src)
+{
+	request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+
+	memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec));
+
+	bio->bi_sector = bio_src->bi_sector;
+	bio->bi_bdev = bio_src->bi_bdev;
+	bio->bi_flags |= 1 << BIO_CLONED;
+	bio->bi_rw = bio_src->bi_rw;
+
+	/*
+	 * notes -- maybe just leave bi_idx alone. assume identical mapping
+	 * for the clone
+	 */
+	bio->bi_vcnt = bio_src->bi_vcnt;
+	bio->bi_size = bio_src->bi_size;
+	bio_phys_segments(q, bio);
+	bio_hw_segments(q, bio);
+}
+
+/**
+ *	bio_clone	-	clone a bio
+ *	@bio: bio to clone
+ *	@gfp_mask: allocation priority
+ *
+ * 	Like __bio_clone, only also allocates the returned bio
+ */
+struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
+{
+	struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
+
+	if (b)
+		__bio_clone(b, bio);
+
+	return b;
+}
+
+/**
+ *	bio_get_nr_vecs		- return approx number of vecs
+ *	@bdev:  I/O target
+ *
+ *	Return the approximate number of pages we can send to this target.
+ *	There's no guarantee that you will be able to fit this number of pages
+ *	into a bio, it does not account for dynamic restrictions that vary
+ *	on offset.
+ */
+int bio_get_nr_vecs(struct block_device *bdev)
+{
+	request_queue_t *q = bdev_get_queue(bdev);
+	int nr_pages;
+
+	nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (nr_pages > q->max_phys_segments)
+		nr_pages = q->max_phys_segments;
+	if (nr_pages > q->max_hw_segments)
+		nr_pages = q->max_hw_segments;
+
+	return nr_pages;
+}
+
+static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+			  *page, unsigned int len, unsigned int offset)
+{
+	int retried_segments = 0;
+	struct bio_vec *bvec;
+
+	/*
+	 * cloned bio must not modify vec list
+	 */
+	if (unlikely(bio_flagged(bio, BIO_CLONED)))
+		return 0;
+
+	if (bio->bi_vcnt >= bio->bi_max_vecs)
+		return 0;
+
+	if (((bio->bi_size + len) >> 9) > q->max_sectors)
+		return 0;
+
+	/*
+	 * we might lose a segment or two here, but rather that than
+	 * make this too complex.
+	 */
+
+	while (bio->bi_phys_segments >= q->max_phys_segments
+	       || bio->bi_hw_segments >= q->max_hw_segments
+	       || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
+
+		if (retried_segments)
+			return 0;
+
+		retried_segments = 1;
+		blk_recount_segments(q, bio);
+	}
+
+	/*
+	 * setup the new entry, we might clear it again later if we
+	 * cannot add the page
+	 */
+	bvec = &bio->bi_io_vec[bio->bi_vcnt];
+	bvec->bv_page = page;
+	bvec->bv_len = len;
+	bvec->bv_offset = offset;
+
+	/*
+	 * if queue has other restrictions (eg varying max sector size
+	 * depending on offset), it can specify a merge_bvec_fn in the
+	 * queue to get further control
+	 */
+	if (q->merge_bvec_fn) {
+		/*
+		 * merge_bvec_fn() returns number of bytes it can accept
+		 * at this offset
+		 */
+		if (q->merge_bvec_fn(q, bio, bvec) < len) {
+			bvec->bv_page = NULL;
+			bvec->bv_len = 0;
+			bvec->bv_offset = 0;
+			return 0;
+		}
+	}
+
+	/* If we may be able to merge these biovecs, force a recount */
+	if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
+	    BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
+		bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+	bio->bi_vcnt++;
+	bio->bi_phys_segments++;
+	bio->bi_hw_segments++;
+	bio->bi_size += len;
+	return len;
+}
+
+/**
+ *	bio_add_page	-	attempt to add page to bio
+ *	@bio: destination bio
+ *	@page: page to add
+ *	@len: vec entry length
+ *	@offset: vec entry offset
+ *
+ *	Attempt to add a page to the bio_vec maplist. This can fail for a
+ *	number of reasons, such as the bio being full or target block
+ *	device limitations. The target block device must allow bio's
+ *      smaller than PAGE_SIZE, so it is always possible to add a single
+ *      page to an empty bio.
+ */
+int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
+		 unsigned int offset)
+{
+	return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
+			      len, offset);
+}
+
+struct bio_map_data {
+	struct bio_vec *iovecs;
+	void __user *userptr;
+};
+
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
+{
+	memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
+	bio->bi_private = bmd;
+}
+
+static void bio_free_map_data(struct bio_map_data *bmd)
+{
+	kfree(bmd->iovecs);
+	kfree(bmd);
+}
+
+static struct bio_map_data *bio_alloc_map_data(int nr_segs)
+{
+	struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
+
+	if (!bmd)
+		return NULL;
+
+	bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
+	if (bmd->iovecs)
+		return bmd;
+
+	kfree(bmd);
+	return NULL;
+}
+
+/**
+ *	bio_uncopy_user	-	finish previously mapped bio
+ *	@bio: bio being terminated
+ *
+ *	Free pages allocated from bio_copy_user() and write back data
+ *	to user space in case of a read.
+ */
+int bio_uncopy_user(struct bio *bio)
+{
+	struct bio_map_data *bmd = bio->bi_private;
+	const int read = bio_data_dir(bio) == READ;
+	struct bio_vec *bvec;
+	int i, ret = 0;
+
+	__bio_for_each_segment(bvec, bio, i, 0) {
+		char *addr = page_address(bvec->bv_page);
+		unsigned int len = bmd->iovecs[i].bv_len;
+
+		if (read && !ret && copy_to_user(bmd->userptr, addr, len))
+			ret = -EFAULT;
+
+		__free_page(bvec->bv_page);
+		bmd->userptr += len;
+	}
+	bio_free_map_data(bmd);
+	bio_put(bio);
+	return ret;
+}
+
+/**
+ *	bio_copy_user	-	copy user data to bio
+ *	@q: destination block queue
+ *	@uaddr: start of user address
+ *	@len: length in bytes
+ *	@write_to_vm: bool indicating writing to pages or not
+ *
+ *	Prepares and returns a bio for indirect user io, bouncing data
+ *	to/from kernel pages as necessary. Must be paired with
+ *	call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+			  unsigned int len, int write_to_vm)
+{
+	unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	unsigned long start = uaddr >> PAGE_SHIFT;
+	struct bio_map_data *bmd;
+	struct bio_vec *bvec;
+	struct page *page;
+	struct bio *bio;
+	int i, ret;
+
+	bmd = bio_alloc_map_data(end - start);
+	if (!bmd)
+		return ERR_PTR(-ENOMEM);
+
+	bmd->userptr = (void __user *) uaddr;
+
+	ret = -ENOMEM;
+	bio = bio_alloc(GFP_KERNEL, end - start);
+	if (!bio)
+		goto out_bmd;
+
+	bio->bi_rw |= (!write_to_vm << BIO_RW);
+
+	ret = 0;
+	while (len) {
+		unsigned int bytes = PAGE_SIZE;
+
+		if (bytes > len)
+			bytes = len;
+
+		page = alloc_page(q->bounce_gfp | GFP_KERNEL);
+		if (!page) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
+			ret = -EINVAL;
+			break;
+		}
+
+		len -= bytes;
+	}
+
+	if (ret)
+		goto cleanup;
+
+	/*
+	 * success
+	 */
+	if (!write_to_vm) {
+		char __user *p = (char __user *) uaddr;
+
+		/*
+		 * for a write, copy in data to kernel pages
+		 */
+		ret = -EFAULT;
+		bio_for_each_segment(bvec, bio, i) {
+			char *addr = page_address(bvec->bv_page);
+
+			if (copy_from_user(addr, p, bvec->bv_len))
+				goto cleanup;
+			p += bvec->bv_len;
+		}
+	}
+
+	bio_set_map_data(bmd, bio);
+	return bio;
+cleanup:
+	bio_for_each_segment(bvec, bio, i)
+		__free_page(bvec->bv_page);
+
+	bio_put(bio);
+out_bmd:
+	bio_free_map_data(bmd);
+	return ERR_PTR(ret);
+}
+
+static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
+				  unsigned long uaddr, unsigned int len,
+				  int write_to_vm)
+{
+	unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	unsigned long start = uaddr >> PAGE_SHIFT;
+	const int nr_pages = end - start;
+	int ret, offset, i;
+	struct page **pages;
+	struct bio *bio;
+
+	/*
+	 * transfer and buffer must be aligned to at least hardsector
+	 * size for now, in the future we can relax this restriction
+	 */
+	if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+		return ERR_PTR(-EINVAL);
+
+	bio = bio_alloc(GFP_KERNEL, nr_pages);
+	if (!bio)
+		return ERR_PTR(-ENOMEM);
+
+	ret = -ENOMEM;
+	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+	if (!pages)
+		goto out;
+
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm, uaddr, nr_pages,
+						write_to_vm, 0, pages, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret < nr_pages)
+		goto out;
+
+	bio->bi_bdev = bdev;
+
+	offset = uaddr & ~PAGE_MASK;
+	for (i = 0; i < nr_pages; i++) {
+		unsigned int bytes = PAGE_SIZE - offset;
+
+		if (len <= 0)
+			break;
+
+		if (bytes > len)
+			bytes = len;
+
+		/*
+		 * sorry...
+		 */
+		if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes)
+			break;
+
+		len -= bytes;
+		offset = 0;
+	}
+
+	/*
+	 * release the pages we didn't map into the bio, if any
+	 */
+	while (i < nr_pages)
+		page_cache_release(pages[i++]);
+
+	kfree(pages);
+
+	/*
+	 * set data direction, and check if mapped pages need bouncing
+	 */
+	if (!write_to_vm)
+		bio->bi_rw |= (1 << BIO_RW);
+
+	bio->bi_flags |= (1 << BIO_USER_MAPPED);
+	return bio;
+out:
+	kfree(pages);
+	bio_put(bio);
+	return ERR_PTR(ret);
+}
+
+/**
+ *	bio_map_user	-	map user address into bio
+ *	@bdev: destination block device
+ *	@uaddr: start of user address
+ *	@len: length in bytes
+ *	@write_to_vm: bool indicating writing to pages or not
+ *
+ *	Map the user space address into a bio suitable for io to a block
+ *	device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
+			 unsigned long uaddr, unsigned int len, int write_to_vm)
+{
+	struct bio *bio;
+
+	bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
+
+	if (IS_ERR(bio))
+		return bio;
+
+	/*
+	 * subtle -- if __bio_map_user() ended up bouncing a bio,
+	 * it would normally disappear when its bi_end_io is run.
+	 * however, we need it for the unmap, so grab an extra
+	 * reference to it
+	 */
+	bio_get(bio);
+
+	if (bio->bi_size == len)
+		return bio;
+
+	/*
+	 * don't support partial mappings
+	 */
+	bio_endio(bio, bio->bi_size, 0);
+	bio_unmap_user(bio);
+	return ERR_PTR(-EINVAL);
+}
+
+static void __bio_unmap_user(struct bio *bio)
+{
+	struct bio_vec *bvec;
+	int i;
+
+	/*
+	 * make sure we dirty pages we wrote to
+	 */
+	__bio_for_each_segment(bvec, bio, i, 0) {
+		if (bio_data_dir(bio) == READ)
+			set_page_dirty_lock(bvec->bv_page);
+
+		page_cache_release(bvec->bv_page);
+	}
+
+	bio_put(bio);
+}
+
+/**
+ *	bio_unmap_user	-	unmap a bio
+ *	@bio:		the bio being unmapped
+ *
+ *	Unmap a bio previously mapped by bio_map_user(). Must be called with
+ *	a process context.
+ *
+ *	bio_unmap_user() may sleep.
+ */
+void bio_unmap_user(struct bio *bio)
+{
+	__bio_unmap_user(bio);
+	bio_put(bio);
+}
+
+/*
+ * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
+ * for performing direct-IO in BIOs.
+ *
+ * The problem is that we cannot run set_page_dirty() from interrupt context
+ * because the required locks are not interrupt-safe.  So what we can do is to
+ * mark the pages dirty _before_ performing IO.  And in interrupt context,
+ * check that the pages are still dirty.   If so, fine.  If not, redirty them
+ * in process context.
+ *
+ * We special-case compound pages here: normally this means reads into hugetlb
+ * pages.  The logic in here doesn't really work right for compound pages
+ * because the VM does not uniformly chase down the head page in all cases.
+ * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
+ * handle them at all.  So we skip compound pages here at an early stage.
+ *
+ * Note that this code is very hard to test under normal circumstances because
+ * direct-io pins the pages with get_user_pages().  This makes
+ * is_page_cache_freeable return false, and the VM will not clean the pages.
+ * But other code (eg, pdflush) could clean the pages if they are mapped
+ * pagecache.
+ *
+ * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
+ * deferred bio dirtying paths.
+ */
+
+/*
+ * bio_set_pages_dirty() will mark all the bio's pages as dirty.
+ */
+void bio_set_pages_dirty(struct bio *bio)
+{
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int i;
+
+	for (i = 0; i < bio->bi_vcnt; i++) {
+		struct page *page = bvec[i].bv_page;
+
+		if (page && !PageCompound(page))
+			set_page_dirty_lock(page);
+	}
+}
+
+static void bio_release_pages(struct bio *bio)
+{
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int i;
+
+	for (i = 0; i < bio->bi_vcnt; i++) {
+		struct page *page = bvec[i].bv_page;
+
+		if (page)
+			put_page(page);
+	}
+}
+
+/*
+ * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
+ * If they are, then fine.  If, however, some pages are clean then they must
+ * have been written out during the direct-IO read.  So we take another ref on
+ * the BIO and the offending pages and re-dirty the pages in process context.
+ *
+ * It is expected that bio_check_pages_dirty() will wholly own the BIO from
+ * here on.  It will run one page_cache_release() against each page and will
+ * run one bio_put() against the BIO.
+ */
+
+static void bio_dirty_fn(void *data);
+
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DEFINE_SPINLOCK(bio_dirty_lock);
+static struct bio *bio_dirty_list;
+
+/*
+ * This runs in process context
+ */
+static void bio_dirty_fn(void *data)
+{
+	unsigned long flags;
+	struct bio *bio;
+
+	spin_lock_irqsave(&bio_dirty_lock, flags);
+	bio = bio_dirty_list;
+	bio_dirty_list = NULL;
+	spin_unlock_irqrestore(&bio_dirty_lock, flags);
+
+	while (bio) {
+		struct bio *next = bio->bi_private;
+
+		bio_set_pages_dirty(bio);
+		bio_release_pages(bio);
+		bio_put(bio);
+		bio = next;
+	}
+}
+
+void bio_check_pages_dirty(struct bio *bio)
+{
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int nr_clean_pages = 0;
+	int i;
+
+	for (i = 0; i < bio->bi_vcnt; i++) {
+		struct page *page = bvec[i].bv_page;
+
+		if (PageDirty(page) || PageCompound(page)) {
+			page_cache_release(page);
+			bvec[i].bv_page = NULL;
+		} else {
+			nr_clean_pages++;
+		}
+	}
+
+	if (nr_clean_pages) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&bio_dirty_lock, flags);
+		bio->bi_private = bio_dirty_list;
+		bio_dirty_list = bio;
+		spin_unlock_irqrestore(&bio_dirty_lock, flags);
+		schedule_work(&bio_dirty_work);
+	} else {
+		bio_put(bio);
+	}
+}
+
+/**
+ * bio_endio - end I/O on a bio
+ * @bio:	bio
+ * @bytes_done:	number of bytes completed
+ * @error:	error, if any
+ *
+ * Description:
+ *   bio_endio() will end I/O on @bytes_done number of bytes. This may be
+ *   just a partial part of the bio, or it may be the whole bio. bio_endio()
+ *   is the preferred way to end I/O on a bio, it takes care of decrementing
+ *   bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
+ *   and one of the established -Exxxx (-EIO, for instance) error values in
+ *   case something went wrong. Noone should call bi_end_io() directly on
+ *   a bio unless they own it and thus know that it has an end_io function.
+ **/
+void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
+{
+	if (error)
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+	if (unlikely(bytes_done > bio->bi_size)) {
+		printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
+						bytes_done, bio->bi_size);
+		bytes_done = bio->bi_size;
+	}
+
+	bio->bi_size -= bytes_done;
+	bio->bi_sector += (bytes_done >> 9);
+
+	if (bio->bi_end_io)
+		bio->bi_end_io(bio, bytes_done, error);
+}
+
+void bio_pair_release(struct bio_pair *bp)
+{
+	if (atomic_dec_and_test(&bp->cnt)) {
+		struct bio *master = bp->bio1.bi_private;
+
+		bio_endio(master, master->bi_size, bp->error);
+		mempool_free(bp, bp->bio2.bi_private);
+	}
+}
+
+static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
+{
+	struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
+
+	if (err)
+		bp->error = err;
+
+	if (bi->bi_size)
+		return 1;
+
+	bio_pair_release(bp);
+	return 0;
+}
+
+static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
+{
+	struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
+
+	if (err)
+		bp->error = err;
+
+	if (bi->bi_size)
+		return 1;
+
+	bio_pair_release(bp);
+	return 0;
+}
+
+/*
+ * split a bio - only worry about a bio with a single page
+ * in it's iovec
+ */
+struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
+{
+	struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
+
+	if (!bp)
+		return bp;
+
+	BUG_ON(bi->bi_vcnt != 1);
+	BUG_ON(bi->bi_idx != 0);
+	atomic_set(&bp->cnt, 3);
+	bp->error = 0;
+	bp->bio1 = *bi;
+	bp->bio2 = *bi;
+	bp->bio2.bi_sector += first_sectors;
+	bp->bio2.bi_size -= first_sectors << 9;
+	bp->bio1.bi_size = first_sectors << 9;
+
+	bp->bv1 = bi->bi_io_vec[0];
+	bp->bv2 = bi->bi_io_vec[0];
+	bp->bv2.bv_offset += first_sectors << 9;
+	bp->bv2.bv_len -= first_sectors << 9;
+	bp->bv1.bv_len = first_sectors << 9;
+
+	bp->bio1.bi_io_vec = &bp->bv1;
+	bp->bio2.bi_io_vec = &bp->bv2;
+
+	bp->bio1.bi_end_io = bio_pair_end_1;
+	bp->bio2.bi_end_io = bio_pair_end_2;
+
+	bp->bio1.bi_private = bi;
+	bp->bio2.bi_private = pool;
+
+	return bp;
+}
+
+static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
+{
+	return kmalloc(sizeof(struct bio_pair), gfp_flags);
+}
+
+static void bio_pair_free(void *bp, void *data)
+{
+	kfree(bp);
+}
+
+
+/*
+ * create memory pools for biovec's in a bio_set.
+ * use the global biovec slabs created for general use.
+ */
+static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
+{
+	int i;
+
+	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
+		struct biovec_slab *bp = bvec_slabs + i;
+		mempool_t **bvp = bs->bvec_pools + i;
+
+		if (i >= scale)
+			pool_entries >>= 1;
+
+		*bvp = mempool_create(pool_entries, mempool_alloc_slab,
+					mempool_free_slab, bp->slab);
+		if (!*bvp)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void biovec_free_pools(struct bio_set *bs)
+{
+	int i;
+
+	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
+		mempool_t *bvp = bs->bvec_pools[i];
+
+		if (bvp)
+			mempool_destroy(bvp);
+	}
+
+}
+
+void bioset_free(struct bio_set *bs)
+{
+	if (bs->bio_pool)
+		mempool_destroy(bs->bio_pool);
+
+	biovec_free_pools(bs);
+
+	kfree(bs);
+}
+
+struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
+{
+	struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL);
+
+	if (!bs)
+		return NULL;
+
+	memset(bs, 0, sizeof(*bs));
+	bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
+			mempool_free_slab, bio_slab);
+
+	if (!bs->bio_pool)
+		goto bad;
+
+	if (!biovec_create_pools(bs, bvec_pool_size, scale))
+		return bs;
+
+bad:
+	bioset_free(bs);
+	return NULL;
+}
+
+static void __init biovec_init_slabs(void)
+{
+	int i;
+
+	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
+		int size;
+		struct biovec_slab *bvs = bvec_slabs + i;
+
+		size = bvs->nr_vecs * sizeof(struct bio_vec);
+		bvs->slab = kmem_cache_create(bvs->name, size, 0,
+                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+	}
+}
+
+static int __init init_bio(void)
+{
+	int megabytes, bvec_pool_entries;
+	int scale = BIOVEC_NR_POOLS;
+
+	bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
+				SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+
+	biovec_init_slabs();
+
+	megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
+
+	/*
+	 * find out where to start scaling
+	 */
+	if (megabytes <= 16)
+		scale = 0;
+	else if (megabytes <= 32)
+		scale = 1;
+	else if (megabytes <= 64)
+		scale = 2;
+	else if (megabytes <= 96)
+		scale = 3;
+	else if (megabytes <= 128)
+		scale = 4;
+
+	/*
+	 * scale number of entries
+	 */
+	bvec_pool_entries = megabytes * 2;
+	if (bvec_pool_entries > 256)
+		bvec_pool_entries = 256;
+
+	fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
+	if (!fs_bio_set)
+		panic("bio: can't allocate bios\n");
+
+	bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
+				bio_pair_alloc, bio_pair_free, NULL);
+	if (!bio_split_pool)
+		panic("bio: can't create split pool\n");
+
+	return 0;
+}
+
+subsys_initcall(init_bio);
+
+EXPORT_SYMBOL(bio_alloc);
+EXPORT_SYMBOL(bio_put);
+EXPORT_SYMBOL(bio_endio);
+EXPORT_SYMBOL(bio_init);
+EXPORT_SYMBOL(__bio_clone);
+EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_phys_segments);
+EXPORT_SYMBOL(bio_hw_segments);
+EXPORT_SYMBOL(bio_add_page);
+EXPORT_SYMBOL(bio_get_nr_vecs);
+EXPORT_SYMBOL(bio_map_user);
+EXPORT_SYMBOL(bio_unmap_user);
+EXPORT_SYMBOL(bio_pair_release);
+EXPORT_SYMBOL(bio_split);
+EXPORT_SYMBOL(bio_split_pool);
+EXPORT_SYMBOL(bio_copy_user);
+EXPORT_SYMBOL(bio_uncopy_user);
+EXPORT_SYMBOL(bioset_create);
+EXPORT_SYMBOL(bioset_free);
+EXPORT_SYMBOL(bio_alloc_bioset);
diff --git a/fs/block_dev.c b/fs/block_dev.c
new file mode 100644
index 0000000..d19d07c
--- /dev/null
+++ b/fs/block_dev.c
@@ -0,0 +1,923 @@
+/*
+ *  linux/fs/block_dev.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/major.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/blkpg.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/mount.h>
+#include <linux/uio.h>
+#include <linux/namei.h>
+#include <asm/uaccess.h>
+
+struct bdev_inode {
+	struct block_device bdev;
+	struct inode vfs_inode;
+};
+
+static inline struct bdev_inode *BDEV_I(struct inode *inode)
+{
+	return container_of(inode, struct bdev_inode, vfs_inode);
+}
+
+inline struct block_device *I_BDEV(struct inode *inode)
+{
+	return &BDEV_I(inode)->bdev;
+}
+
+EXPORT_SYMBOL(I_BDEV);
+
+static sector_t max_block(struct block_device *bdev)
+{
+	sector_t retval = ~((sector_t)0);
+	loff_t sz = i_size_read(bdev->bd_inode);
+
+	if (sz) {
+		unsigned int size = block_size(bdev);
+		unsigned int sizebits = blksize_bits(size);
+		retval = (sz >> sizebits);
+	}
+	return retval;
+}
+
+/* Kill _all_ buffers, dirty or not.. */
+static void kill_bdev(struct block_device *bdev)
+{
+	invalidate_bdev(bdev, 1);
+	truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+}	
+
+int set_blocksize(struct block_device *bdev, int size)
+{
+	/* Size must be a power of two, and between 512 and PAGE_SIZE */
+	if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
+		return -EINVAL;
+
+	/* Size cannot be smaller than the size supported by the device */
+	if (size < bdev_hardsect_size(bdev))
+		return -EINVAL;
+
+	/* Don't change the size if it is same as current */
+	if (bdev->bd_block_size != size) {
+		sync_blockdev(bdev);
+		bdev->bd_block_size = size;
+		bdev->bd_inode->i_blkbits = blksize_bits(size);
+		kill_bdev(bdev);
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(set_blocksize);
+
+int sb_set_blocksize(struct super_block *sb, int size)
+{
+	int bits = 9; /* 2^9 = 512 */
+
+	if (set_blocksize(sb->s_bdev, size))
+		return 0;
+	/* If we get here, we know size is power of two
+	 * and it's value is between 512 and PAGE_SIZE */
+	sb->s_blocksize = size;
+	for (size >>= 10; size; size >>= 1)
+		++bits;
+	sb->s_blocksize_bits = bits;
+	return sb->s_blocksize;
+}
+
+EXPORT_SYMBOL(sb_set_blocksize);
+
+int sb_min_blocksize(struct super_block *sb, int size)
+{
+	int minsize = bdev_hardsect_size(sb->s_bdev);
+	if (size < minsize)
+		size = minsize;
+	return sb_set_blocksize(sb, size);
+}
+
+EXPORT_SYMBOL(sb_min_blocksize);
+
+static int
+blkdev_get_block(struct inode *inode, sector_t iblock,
+		struct buffer_head *bh, int create)
+{
+	if (iblock >= max_block(I_BDEV(inode))) {
+		if (create)
+			return -EIO;
+
+		/*
+		 * for reads, we're just trying to fill a partial page.
+		 * return a hole, they will have to call get_block again
+		 * before they can fill it, and they will get -EIO at that
+		 * time
+		 */
+		return 0;
+	}
+	bh->b_bdev = I_BDEV(inode);
+	bh->b_blocknr = iblock;
+	set_buffer_mapped(bh);
+	return 0;
+}
+
+static int
+blkdev_get_blocks(struct inode *inode, sector_t iblock,
+		unsigned long max_blocks, struct buffer_head *bh, int create)
+{
+	sector_t end_block = max_block(I_BDEV(inode));
+
+	if ((iblock + max_blocks) > end_block) {
+		max_blocks = end_block - iblock;
+		if ((long)max_blocks <= 0) {
+			if (create)
+				return -EIO;	/* write fully beyond EOF */
+			/*
+			 * It is a read which is fully beyond EOF.  We return
+			 * a !buffer_mapped buffer
+			 */
+			max_blocks = 0;
+		}
+	}
+
+	bh->b_bdev = I_BDEV(inode);
+	bh->b_blocknr = iblock;
+	bh->b_size = max_blocks << inode->i_blkbits;
+	if (max_blocks)
+		set_buffer_mapped(bh);
+	return 0;
+}
+
+static ssize_t
+blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+			loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_mapping->host;
+
+	return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
+				iov, offset, nr_segs, blkdev_get_blocks, NULL);
+}
+
+static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, blkdev_get_block, wbc);
+}
+
+static int blkdev_readpage(struct file * file, struct page * page)
+{
+	return block_read_full_page(page, blkdev_get_block);
+}
+
+static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page, from, to, blkdev_get_block);
+}
+
+static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_commit_write(page, from, to);
+}
+
+/*
+ * private llseek:
+ * for a block special file file->f_dentry->d_inode->i_size is zero
+ * so we compute the size by hand (just as in block_read/write above)
+ */
+static loff_t block_llseek(struct file *file, loff_t offset, int origin)
+{
+	struct inode *bd_inode = file->f_mapping->host;
+	loff_t size;
+	loff_t retval;
+
+	down(&bd_inode->i_sem);
+	size = i_size_read(bd_inode);
+
+	switch (origin) {
+		case 2:
+			offset += size;
+			break;
+		case 1:
+			offset += file->f_pos;
+	}
+	retval = -EINVAL;
+	if (offset >= 0 && offset <= size) {
+		if (offset != file->f_pos) {
+			file->f_pos = offset;
+		}
+		retval = offset;
+	}
+	up(&bd_inode->i_sem);
+	return retval;
+}
+	
+/*
+ *	Filp is never NULL; the only case when ->fsync() is called with
+ *	NULL first argument is nfsd_sync_dir() and that's not a directory.
+ */
+ 
+static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+{
+	return sync_blockdev(I_BDEV(filp->f_mapping->host));
+}
+
+/*
+ * pseudo-fs
+ */
+
+static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
+static kmem_cache_t * bdev_cachep;
+
+static struct inode *bdev_alloc_inode(struct super_block *sb)
+{
+	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void bdev_destroy_inode(struct inode *inode)
+{
+	struct bdev_inode *bdi = BDEV_I(inode);
+
+	bdi->bdev.bd_inode_backing_dev_info = NULL;
+	kmem_cache_free(bdev_cachep, bdi);
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct bdev_inode *ei = (struct bdev_inode *) foo;
+	struct block_device *bdev = &ei->bdev;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+	{
+		memset(bdev, 0, sizeof(*bdev));
+		sema_init(&bdev->bd_sem, 1);
+		sema_init(&bdev->bd_mount_sem, 1);
+		INIT_LIST_HEAD(&bdev->bd_inodes);
+		INIT_LIST_HEAD(&bdev->bd_list);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static inline void __bd_forget(struct inode *inode)
+{
+	list_del_init(&inode->i_devices);
+	inode->i_bdev = NULL;
+	inode->i_mapping = &inode->i_data;
+}
+
+static void bdev_clear_inode(struct inode *inode)
+{
+	struct block_device *bdev = &BDEV_I(inode)->bdev;
+	struct list_head *p;
+	spin_lock(&bdev_lock);
+	while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
+		__bd_forget(list_entry(p, struct inode, i_devices));
+	}
+	list_del_init(&bdev->bd_list);
+	spin_unlock(&bdev_lock);
+}
+
+static struct super_operations bdev_sops = {
+	.statfs = simple_statfs,
+	.alloc_inode = bdev_alloc_inode,
+	.destroy_inode = bdev_destroy_inode,
+	.drop_inode = generic_delete_inode,
+	.clear_inode = bdev_clear_inode,
+};
+
+static struct super_block *bd_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+}
+
+static struct file_system_type bd_type = {
+	.name		= "bdev",
+	.get_sb		= bd_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static struct vfsmount *bd_mnt;
+struct super_block *blockdev_superblock;
+
+void __init bdev_cache_init(void)
+{
+	int err;
+	bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
+			0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
+			init_once, NULL);
+	err = register_filesystem(&bd_type);
+	if (err)
+		panic("Cannot register bdev pseudo-fs");
+	bd_mnt = kern_mount(&bd_type);
+	err = PTR_ERR(bd_mnt);
+	if (IS_ERR(bd_mnt))
+		panic("Cannot create bdev pseudo-fs");
+	blockdev_superblock = bd_mnt->mnt_sb;	/* For writeback */
+}
+
+/*
+ * Most likely _very_ bad one - but then it's hardly critical for small
+ * /dev and can be fixed when somebody will need really large one.
+ * Keep in mind that it will be fed through icache hash function too.
+ */
+static inline unsigned long hash(dev_t dev)
+{
+	return MAJOR(dev)+MINOR(dev);
+}
+
+static int bdev_test(struct inode *inode, void *data)
+{
+	return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
+}
+
+static int bdev_set(struct inode *inode, void *data)
+{
+	BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
+	return 0;
+}
+
+static LIST_HEAD(all_bdevs);
+
+struct block_device *bdget(dev_t dev)
+{
+	struct block_device *bdev;
+	struct inode *inode;
+
+	inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
+			bdev_test, bdev_set, &dev);
+
+	if (!inode)
+		return NULL;
+
+	bdev = &BDEV_I(inode)->bdev;
+
+	if (inode->i_state & I_NEW) {
+		bdev->bd_contains = NULL;
+		bdev->bd_inode = inode;
+		bdev->bd_block_size = (1 << inode->i_blkbits);
+		bdev->bd_part_count = 0;
+		bdev->bd_invalidated = 0;
+		inode->i_mode = S_IFBLK;
+		inode->i_rdev = dev;
+		inode->i_bdev = bdev;
+		inode->i_data.a_ops = &def_blk_aops;
+		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+		inode->i_data.backing_dev_info = &default_backing_dev_info;
+		spin_lock(&bdev_lock);
+		list_add(&bdev->bd_list, &all_bdevs);
+		spin_unlock(&bdev_lock);
+		unlock_new_inode(inode);
+	}
+	return bdev;
+}
+
+EXPORT_SYMBOL(bdget);
+
+long nr_blockdev_pages(void)
+{
+	struct list_head *p;
+	long ret = 0;
+	spin_lock(&bdev_lock);
+	list_for_each(p, &all_bdevs) {
+		struct block_device *bdev;
+		bdev = list_entry(p, struct block_device, bd_list);
+		ret += bdev->bd_inode->i_mapping->nrpages;
+	}
+	spin_unlock(&bdev_lock);
+	return ret;
+}
+
+void bdput(struct block_device *bdev)
+{
+	iput(bdev->bd_inode);
+}
+
+EXPORT_SYMBOL(bdput);
+ 
+static struct block_device *bd_acquire(struct inode *inode)
+{
+	struct block_device *bdev;
+	spin_lock(&bdev_lock);
+	bdev = inode->i_bdev;
+	if (bdev && igrab(bdev->bd_inode)) {
+		spin_unlock(&bdev_lock);
+		return bdev;
+	}
+	spin_unlock(&bdev_lock);
+	bdev = bdget(inode->i_rdev);
+	if (bdev) {
+		spin_lock(&bdev_lock);
+		if (inode->i_bdev)
+			__bd_forget(inode);
+		inode->i_bdev = bdev;
+		inode->i_mapping = bdev->bd_inode->i_mapping;
+		list_add(&inode->i_devices, &bdev->bd_inodes);
+		spin_unlock(&bdev_lock);
+	}
+	return bdev;
+}
+
+/* Call when you free inode */
+
+void bd_forget(struct inode *inode)
+{
+	spin_lock(&bdev_lock);
+	if (inode->i_bdev)
+		__bd_forget(inode);
+	spin_unlock(&bdev_lock);
+}
+
+int bd_claim(struct block_device *bdev, void *holder)
+{
+	int res;
+	spin_lock(&bdev_lock);
+
+	/* first decide result */
+	if (bdev->bd_holder == holder)
+		res = 0;	 /* already a holder */
+	else if (bdev->bd_holder != NULL)
+		res = -EBUSY; 	 /* held by someone else */
+	else if (bdev->bd_contains == bdev)
+		res = 0;  	 /* is a whole device which isn't held */
+
+	else if (bdev->bd_contains->bd_holder == bd_claim)
+		res = 0; 	 /* is a partition of a device that is being partitioned */
+	else if (bdev->bd_contains->bd_holder != NULL)
+		res = -EBUSY;	 /* is a partition of a held device */
+	else
+		res = 0;	 /* is a partition of an un-held device */
+
+	/* now impose change */
+	if (res==0) {
+		/* note that for a whole device bd_holders
+		 * will be incremented twice, and bd_holder will
+		 * be set to bd_claim before being set to holder
+		 */
+		bdev->bd_contains->bd_holders ++;
+		bdev->bd_contains->bd_holder = bd_claim;
+		bdev->bd_holders++;
+		bdev->bd_holder = holder;
+	}
+	spin_unlock(&bdev_lock);
+	return res;
+}
+
+EXPORT_SYMBOL(bd_claim);
+
+void bd_release(struct block_device *bdev)
+{
+	spin_lock(&bdev_lock);
+	if (!--bdev->bd_contains->bd_holders)
+		bdev->bd_contains->bd_holder = NULL;
+	if (!--bdev->bd_holders)
+		bdev->bd_holder = NULL;
+	spin_unlock(&bdev_lock);
+}
+
+EXPORT_SYMBOL(bd_release);
+
+/*
+ * Tries to open block device by device number.  Use it ONLY if you
+ * really do not have anything better - i.e. when you are behind a
+ * truly sucky interface and all you are given is a device number.  _Never_
+ * to be used for internal purposes.  If you ever need it - reconsider
+ * your API.
+ */
+struct block_device *open_by_devnum(dev_t dev, unsigned mode)
+{
+	struct block_device *bdev = bdget(dev);
+	int err = -ENOMEM;
+	int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
+	if (bdev)
+		err = blkdev_get(bdev, mode, flags);
+	return err ? ERR_PTR(err) : bdev;
+}
+
+EXPORT_SYMBOL(open_by_devnum);
+
+/*
+ * This routine checks whether a removable media has been changed,
+ * and invalidates all buffer-cache-entries in that case. This
+ * is a relatively slow routine, so we have to try to minimize using
+ * it. Thus it is called only upon a 'mount' or 'open'. This
+ * is the best way of combining speed and utility, I think.
+ * People changing diskettes in the middle of an operation deserve
+ * to lose :-)
+ */
+int check_disk_change(struct block_device *bdev)
+{
+	struct gendisk *disk = bdev->bd_disk;
+	struct block_device_operations * bdops = disk->fops;
+
+	if (!bdops->media_changed)
+		return 0;
+	if (!bdops->media_changed(bdev->bd_disk))
+		return 0;
+
+	if (__invalidate_device(bdev, 0))
+		printk("VFS: busy inodes on changed media.\n");
+
+	if (bdops->revalidate_disk)
+		bdops->revalidate_disk(bdev->bd_disk);
+	if (bdev->bd_disk->minors > 1)
+		bdev->bd_invalidated = 1;
+	return 1;
+}
+
+EXPORT_SYMBOL(check_disk_change);
+
+void bd_set_size(struct block_device *bdev, loff_t size)
+{
+	unsigned bsize = bdev_hardsect_size(bdev);
+
+	bdev->bd_inode->i_size = size;
+	while (bsize < PAGE_CACHE_SIZE) {
+		if (size & bsize)
+			break;
+		bsize <<= 1;
+	}
+	bdev->bd_block_size = bsize;
+	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+EXPORT_SYMBOL(bd_set_size);
+
+static int do_open(struct block_device *bdev, struct file *file)
+{
+	struct module *owner = NULL;
+	struct gendisk *disk;
+	int ret = -ENXIO;
+	int part;
+
+	file->f_mapping = bdev->bd_inode->i_mapping;
+	lock_kernel();
+	disk = get_gendisk(bdev->bd_dev, &part);
+	if (!disk) {
+		unlock_kernel();
+		bdput(bdev);
+		return ret;
+	}
+	owner = disk->fops->owner;
+
+	down(&bdev->bd_sem);
+	if (!bdev->bd_openers) {
+		bdev->bd_disk = disk;
+		bdev->bd_contains = bdev;
+		if (!part) {
+			struct backing_dev_info *bdi;
+			if (disk->fops->open) {
+				ret = disk->fops->open(bdev->bd_inode, file);
+				if (ret)
+					goto out_first;
+			}
+			if (!bdev->bd_openers) {
+				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+				bdi = blk_get_backing_dev_info(bdev);
+				if (bdi == NULL)
+					bdi = &default_backing_dev_info;
+				bdev->bd_inode->i_data.backing_dev_info = bdi;
+			}
+			if (bdev->bd_invalidated)
+				rescan_partitions(disk, bdev);
+		} else {
+			struct hd_struct *p;
+			struct block_device *whole;
+			whole = bdget_disk(disk, 0);
+			ret = -ENOMEM;
+			if (!whole)
+				goto out_first;
+			ret = blkdev_get(whole, file->f_mode, file->f_flags);
+			if (ret)
+				goto out_first;
+			bdev->bd_contains = whole;
+			down(&whole->bd_sem);
+			whole->bd_part_count++;
+			p = disk->part[part - 1];
+			bdev->bd_inode->i_data.backing_dev_info =
+			   whole->bd_inode->i_data.backing_dev_info;
+			if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
+				whole->bd_part_count--;
+				up(&whole->bd_sem);
+				ret = -ENXIO;
+				goto out_first;
+			}
+			kobject_get(&p->kobj);
+			bdev->bd_part = p;
+			bd_set_size(bdev, (loff_t) p->nr_sects << 9);
+			up(&whole->bd_sem);
+		}
+	} else {
+		put_disk(disk);
+		module_put(owner);
+		if (bdev->bd_contains == bdev) {
+			if (bdev->bd_disk->fops->open) {
+				ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
+				if (ret)
+					goto out;
+			}
+			if (bdev->bd_invalidated)
+				rescan_partitions(bdev->bd_disk, bdev);
+		} else {
+			down(&bdev->bd_contains->bd_sem);
+			bdev->bd_contains->bd_part_count++;
+			up(&bdev->bd_contains->bd_sem);
+		}
+	}
+	bdev->bd_openers++;
+	up(&bdev->bd_sem);
+	unlock_kernel();
+	return 0;
+
+out_first:
+	bdev->bd_disk = NULL;
+	bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+	if (bdev != bdev->bd_contains)
+		blkdev_put(bdev->bd_contains);
+	bdev->bd_contains = NULL;
+	put_disk(disk);
+	module_put(owner);
+out:
+	up(&bdev->bd_sem);
+	unlock_kernel();
+	if (ret)
+		bdput(bdev);
+	return ret;
+}
+
+int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
+{
+	/*
+	 * This crockload is due to bad choice of ->open() type.
+	 * It will go away.
+	 * For now, block device ->open() routine must _not_
+	 * examine anything in 'inode' argument except ->i_rdev.
+	 */
+	struct file fake_file = {};
+	struct dentry fake_dentry = {};
+	fake_file.f_mode = mode;
+	fake_file.f_flags = flags;
+	fake_file.f_dentry = &fake_dentry;
+	fake_dentry.d_inode = bdev->bd_inode;
+
+	return do_open(bdev, &fake_file);
+}
+
+EXPORT_SYMBOL(blkdev_get);
+
+static int blkdev_open(struct inode * inode, struct file * filp)
+{
+	struct block_device *bdev;
+	int res;
+
+	/*
+	 * Preserve backwards compatibility and allow large file access
+	 * even if userspace doesn't ask for it explicitly. Some mkfs
+	 * binary needs it. We might want to drop this workaround
+	 * during an unstable branch.
+	 */
+	filp->f_flags |= O_LARGEFILE;
+
+	bdev = bd_acquire(inode);
+
+	res = do_open(bdev, filp);
+	if (res)
+		return res;
+
+	if (!(filp->f_flags & O_EXCL) )
+		return 0;
+
+	if (!(res = bd_claim(bdev, filp)))
+		return 0;
+
+	blkdev_put(bdev);
+	return res;
+}
+
+int blkdev_put(struct block_device *bdev)
+{
+	int ret = 0;
+	struct inode *bd_inode = bdev->bd_inode;
+	struct gendisk *disk = bdev->bd_disk;
+
+	down(&bdev->bd_sem);
+	lock_kernel();
+	if (!--bdev->bd_openers) {
+		sync_blockdev(bdev);
+		kill_bdev(bdev);
+	}
+	if (bdev->bd_contains == bdev) {
+		if (disk->fops->release)
+			ret = disk->fops->release(bd_inode, NULL);
+	} else {
+		down(&bdev->bd_contains->bd_sem);
+		bdev->bd_contains->bd_part_count--;
+		up(&bdev->bd_contains->bd_sem);
+	}
+	if (!bdev->bd_openers) {
+		struct module *owner = disk->fops->owner;
+
+		put_disk(disk);
+		module_put(owner);
+
+		if (bdev->bd_contains != bdev) {
+			kobject_put(&bdev->bd_part->kobj);
+			bdev->bd_part = NULL;
+		}
+		bdev->bd_disk = NULL;
+		bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+		if (bdev != bdev->bd_contains) {
+			blkdev_put(bdev->bd_contains);
+		}
+		bdev->bd_contains = NULL;
+	}
+	unlock_kernel();
+	up(&bdev->bd_sem);
+	bdput(bdev);
+	return ret;
+}
+
+EXPORT_SYMBOL(blkdev_put);
+
+static int blkdev_close(struct inode * inode, struct file * filp)
+{
+	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+	if (bdev->bd_holder == filp)
+		bd_release(bdev);
+	return blkdev_put(bdev);
+}
+
+static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
+
+	return generic_file_write_nolock(file, &local_iov, 1, ppos);
+}
+
+static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
+				   size_t count, loff_t pos)
+{
+	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
+
+	return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
+}
+
+static int block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+			unsigned long arg)
+{
+	return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
+}
+
+struct address_space_operations def_blk_aops = {
+	.readpage	= blkdev_readpage,
+	.writepage	= blkdev_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= blkdev_prepare_write,
+	.commit_write	= blkdev_commit_write,
+	.writepages	= generic_writepages,
+	.direct_IO	= blkdev_direct_IO,
+};
+
+struct file_operations def_blk_fops = {
+	.open		= blkdev_open,
+	.release	= blkdev_close,
+	.llseek		= block_llseek,
+	.read		= generic_file_read,
+	.write		= blkdev_file_write,
+  	.aio_read	= generic_file_aio_read,
+  	.aio_write	= blkdev_file_aio_write, 
+	.mmap		= generic_file_mmap,
+	.fsync		= block_fsync,
+	.ioctl		= block_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= compat_blkdev_ioctl,
+#endif
+	.readv		= generic_file_readv,
+	.writev		= generic_file_write_nolock,
+	.sendfile	= generic_file_sendfile,
+};
+
+int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
+{
+	int res;
+	mm_segment_t old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
+	set_fs(old_fs);
+	return res;
+}
+
+EXPORT_SYMBOL(ioctl_by_bdev);
+
+/**
+ * lookup_bdev  - lookup a struct block_device by name
+ *
+ * @path:	special file representing the block device
+ *
+ * Get a reference to the blockdevice at @path in the current
+ * namespace if possible and return it.  Return ERR_PTR(error)
+ * otherwise.
+ */
+struct block_device *lookup_bdev(const char *path)
+{
+	struct block_device *bdev;
+	struct inode *inode;
+	struct nameidata nd;
+	int error;
+
+	if (!path || !*path)
+		return ERR_PTR(-EINVAL);
+
+	error = path_lookup(path, LOOKUP_FOLLOW, &nd);
+	if (error)
+		return ERR_PTR(error);
+
+	inode = nd.dentry->d_inode;
+	error = -ENOTBLK;
+	if (!S_ISBLK(inode->i_mode))
+		goto fail;
+	error = -EACCES;
+	if (nd.mnt->mnt_flags & MNT_NODEV)
+		goto fail;
+	error = -ENOMEM;
+	bdev = bd_acquire(inode);
+	if (!bdev)
+		goto fail;
+out:
+	path_release(&nd);
+	return bdev;
+fail:
+	bdev = ERR_PTR(error);
+	goto out;
+}
+
+/**
+ * open_bdev_excl  -  open a block device by name and set it up for use
+ *
+ * @path:	special file representing the block device
+ * @flags:	%MS_RDONLY for opening read-only
+ * @holder:	owner for exclusion
+ *
+ * Open the blockdevice described by the special file at @path, claim it
+ * for the @holder.
+ */
+struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
+{
+	struct block_device *bdev;
+	mode_t mode = FMODE_READ;
+	int error = 0;
+
+	bdev = lookup_bdev(path);
+	if (IS_ERR(bdev))
+		return bdev;
+
+	if (!(flags & MS_RDONLY))
+		mode |= FMODE_WRITE;
+	error = blkdev_get(bdev, mode, 0);
+	if (error)
+		return ERR_PTR(error);
+	error = -EACCES;
+	if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
+		goto blkdev_put;
+	error = bd_claim(bdev, holder);
+	if (error)
+		goto blkdev_put;
+
+	return bdev;
+	
+blkdev_put:
+	blkdev_put(bdev);
+	return ERR_PTR(error);
+}
+
+EXPORT_SYMBOL(open_bdev_excl);
+
+/**
+ * close_bdev_excl  -  release a blockdevice openen by open_bdev_excl()
+ *
+ * @bdev:	blockdevice to close
+ *
+ * This is the counterpart to open_bdev_excl().
+ */
+void close_bdev_excl(struct block_device *bdev)
+{
+	bd_release(bdev);
+	blkdev_put(bdev);
+}
+
+EXPORT_SYMBOL(close_bdev_excl);
diff --git a/fs/buffer.c b/fs/buffer.c
new file mode 100644
index 0000000..f961605
--- /dev/null
+++ b/fs/buffer.c
@@ -0,0 +1,3152 @@
+/*
+ *  linux/fs/buffer.c
+ *
+ *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
+ */
+
+/*
+ * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
+ *
+ * Removed a lot of unnecessary code and simplified things now that
+ * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
+ *
+ * Speed up hash, lru, and free list operations.  Use gfp() for allocating
+ * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
+ *
+ * Added 32k buffer block sizes - these are required older ARM systems. - RMK
+ *
+ * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/blkdev.h>
+#include <linux/file.h>
+#include <linux/quotaops.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/writeback.h>
+#include <linux/hash.h>
+#include <linux/suspend.h>
+#include <linux/buffer_head.h>
+#include <linux/bio.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/bitops.h>
+#include <linux/mpage.h>
+
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
+static void invalidate_bh_lrus(void);
+
+#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
+
+inline void
+init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
+{
+	bh->b_end_io = handler;
+	bh->b_private = private;
+}
+
+static int sync_buffer(void *word)
+{
+	struct block_device *bd;
+	struct buffer_head *bh
+		= container_of(word, struct buffer_head, b_state);
+
+	smp_mb();
+	bd = bh->b_bdev;
+	if (bd)
+		blk_run_address_space(bd->bd_inode->i_mapping);
+	io_schedule();
+	return 0;
+}
+
+void fastcall __lock_buffer(struct buffer_head *bh)
+{
+	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+							TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(__lock_buffer);
+
+void fastcall unlock_buffer(struct buffer_head *bh)
+{
+	clear_buffer_locked(bh);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&bh->b_state, BH_Lock);
+}
+
+/*
+ * Block until a buffer comes unlocked.  This doesn't stop it
+ * from becoming locked again - you have to lock it yourself
+ * if you want to preserve its state.
+ */
+void __wait_on_buffer(struct buffer_head * bh)
+{
+	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+}
+
+static void
+__clear_page_buffers(struct page *page)
+{
+	ClearPagePrivate(page);
+	page->private = 0;
+	page_cache_release(page);
+}
+
+static void buffer_io_error(struct buffer_head *bh)
+{
+	char b[BDEVNAME_SIZE];
+
+	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
+			bdevname(bh->b_bdev, b),
+			(unsigned long long)bh->b_blocknr);
+}
+
+/*
+ * Default synchronous end-of-IO handler..  Just mark it up-to-date and
+ * unlock the buffer. This is what ll_rw_block uses too.
+ */
+void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+{
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		/* This happens, due to failed READA attempts. */
+		clear_buffer_uptodate(bh);
+	}
+	unlock_buffer(bh);
+	put_bh(bh);
+}
+
+void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
+{
+	char b[BDEVNAME_SIZE];
+
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+			buffer_io_error(bh);
+			printk(KERN_WARNING "lost page write due to "
+					"I/O error on %s\n",
+				       bdevname(bh->b_bdev, b));
+		}
+		set_buffer_write_io_error(bh);
+		clear_buffer_uptodate(bh);
+	}
+	unlock_buffer(bh);
+	put_bh(bh);
+}
+
+/*
+ * Write out and wait upon all the dirty data associated with a block
+ * device via its mapping.  Does not take the superblock lock.
+ */
+int sync_blockdev(struct block_device *bdev)
+{
+	int ret = 0;
+
+	if (bdev) {
+		int err;
+
+		ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
+		err = filemap_fdatawait(bdev->bd_inode->i_mapping);
+		if (!ret)
+			ret = err;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(sync_blockdev);
+
+/*
+ * Write out and wait upon all dirty data associated with this
+ * superblock.  Filesystem data as well as the underlying block
+ * device.  Takes the superblock lock.
+ */
+int fsync_super(struct super_block *sb)
+{
+	sync_inodes_sb(sb, 0);
+	DQUOT_SYNC(sb);
+	lock_super(sb);
+	if (sb->s_dirt && sb->s_op->write_super)
+		sb->s_op->write_super(sb);
+	unlock_super(sb);
+	if (sb->s_op->sync_fs)
+		sb->s_op->sync_fs(sb, 1);
+	sync_blockdev(sb->s_bdev);
+	sync_inodes_sb(sb, 1);
+
+	return sync_blockdev(sb->s_bdev);
+}
+
+/*
+ * Write out and wait upon all dirty data associated with this
+ * device.   Filesystem data as well as the underlying block
+ * device.  Takes the superblock lock.
+ */
+int fsync_bdev(struct block_device *bdev)
+{
+	struct super_block *sb = get_super(bdev);
+	if (sb) {
+		int res = fsync_super(sb);
+		drop_super(sb);
+		return res;
+	}
+	return sync_blockdev(bdev);
+}
+
+/**
+ * freeze_bdev  --  lock a filesystem and force it into a consistent state
+ * @bdev:	blockdevice to lock
+ *
+ * This takes the block device bd_mount_sem to make sure no new mounts
+ * happen on bdev until thaw_bdev() is called.
+ * If a superblock is found on this device, we take the s_umount semaphore
+ * on it to make sure nobody unmounts until the snapshot creation is done.
+ */
+struct super_block *freeze_bdev(struct block_device *bdev)
+{
+	struct super_block *sb;
+
+	down(&bdev->bd_mount_sem);
+	sb = get_super(bdev);
+	if (sb && !(sb->s_flags & MS_RDONLY)) {
+		sb->s_frozen = SB_FREEZE_WRITE;
+		wmb();
+
+		sync_inodes_sb(sb, 0);
+		DQUOT_SYNC(sb);
+
+		lock_super(sb);
+		if (sb->s_dirt && sb->s_op->write_super)
+			sb->s_op->write_super(sb);
+		unlock_super(sb);
+
+		if (sb->s_op->sync_fs)
+			sb->s_op->sync_fs(sb, 1);
+
+		sync_blockdev(sb->s_bdev);
+		sync_inodes_sb(sb, 1);
+
+		sb->s_frozen = SB_FREEZE_TRANS;
+		wmb();
+
+		sync_blockdev(sb->s_bdev);
+
+		if (sb->s_op->write_super_lockfs)
+			sb->s_op->write_super_lockfs(sb);
+	}
+
+	sync_blockdev(bdev);
+	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
+}
+EXPORT_SYMBOL(freeze_bdev);
+
+/**
+ * thaw_bdev  -- unlock filesystem
+ * @bdev:	blockdevice to unlock
+ * @sb:		associated superblock
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_bdev().
+ */
+void thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+	if (sb) {
+		BUG_ON(sb->s_bdev != bdev);
+
+		if (sb->s_op->unlockfs)
+			sb->s_op->unlockfs(sb);
+		sb->s_frozen = SB_UNFROZEN;
+		wmb();
+		wake_up(&sb->s_wait_unfrozen);
+		drop_super(sb);
+	}
+
+	up(&bdev->bd_mount_sem);
+}
+EXPORT_SYMBOL(thaw_bdev);
+
+/*
+ * sync everything.  Start out by waking pdflush, because that writes back
+ * all queues in parallel.
+ */
+static void do_sync(unsigned long wait)
+{
+	wakeup_bdflush(0);
+	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
+	DQUOT_SYNC(NULL);
+	sync_supers();		/* Write the superblocks */
+	sync_filesystems(0);	/* Start syncing the filesystems */
+	sync_filesystems(wait);	/* Waitingly sync the filesystems */
+	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
+	if (!wait)
+		printk("Emergency Sync complete\n");
+	if (unlikely(laptop_mode))
+		laptop_sync_completion();
+}
+
+asmlinkage long sys_sync(void)
+{
+	do_sync(1);
+	return 0;
+}
+
+void emergency_sync(void)
+{
+	pdflush_operation(do_sync, 0);
+}
+
+/*
+ * Generic function to fsync a file.
+ *
+ * filp may be NULL if called via the msync of a vma.
+ */
+ 
+int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
+{
+	struct inode * inode = dentry->d_inode;
+	struct super_block * sb;
+	int ret, err;
+
+	/* sync the inode to buffers */
+	ret = write_inode_now(inode, 0);
+
+	/* sync the superblock to buffers */
+	sb = inode->i_sb;
+	lock_super(sb);
+	if (sb->s_op->write_super)
+		sb->s_op->write_super(sb);
+	unlock_super(sb);
+
+	/* .. finally sync the buffers to disk */
+	err = sync_blockdev(sb->s_bdev);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+asmlinkage long sys_fsync(unsigned int fd)
+{
+	struct file * file;
+	struct address_space *mapping;
+	int ret, err;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	mapping = file->f_mapping;
+
+	ret = -EINVAL;
+	if (!file->f_op || !file->f_op->fsync) {
+		/* Why?  We can still call filemap_fdatawrite */
+		goto out_putf;
+	}
+
+	current->flags |= PF_SYNCWRITE;
+	ret = filemap_fdatawrite(mapping);
+
+	/*
+	 * We need to protect against concurrent writers,
+	 * which could cause livelocks in fsync_buffers_list
+	 */
+	down(&mapping->host->i_sem);
+	err = file->f_op->fsync(file, file->f_dentry, 0);
+	if (!ret)
+		ret = err;
+	up(&mapping->host->i_sem);
+	err = filemap_fdatawait(mapping);
+	if (!ret)
+		ret = err;
+	current->flags &= ~PF_SYNCWRITE;
+
+out_putf:
+	fput(file);
+out:
+	return ret;
+}
+
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+	struct file * file;
+	struct address_space *mapping;
+	int ret, err;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	ret = -EINVAL;
+	if (!file->f_op || !file->f_op->fsync)
+		goto out_putf;
+
+	mapping = file->f_mapping;
+
+	current->flags |= PF_SYNCWRITE;
+	ret = filemap_fdatawrite(mapping);
+	down(&mapping->host->i_sem);
+	err = file->f_op->fsync(file, file->f_dentry, 1);
+	if (!ret)
+		ret = err;
+	up(&mapping->host->i_sem);
+	err = filemap_fdatawait(mapping);
+	if (!ret)
+		ret = err;
+	current->flags &= ~PF_SYNCWRITE;
+
+out_putf:
+	fput(file);
+out:
+	return ret;
+}
+
+/*
+ * Various filesystems appear to want __find_get_block to be non-blocking.
+ * But it's the page lock which protects the buffers.  To get around this,
+ * we get exclusion from try_to_free_buffers with the blockdev mapping's
+ * private_lock.
+ *
+ * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
+ * may be quite high.  This code could TryLock the page, and if that
+ * succeeds, there is no need to take private_lock. (But if
+ * private_lock is contended then so is mapping->tree_lock).
+ */
+static struct buffer_head *
+__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+{
+	struct inode *bd_inode = bdev->bd_inode;
+	struct address_space *bd_mapping = bd_inode->i_mapping;
+	struct buffer_head *ret = NULL;
+	pgoff_t index;
+	struct buffer_head *bh;
+	struct buffer_head *head;
+	struct page *page;
+	int all_mapped = 1;
+
+	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+	page = find_get_page(bd_mapping, index);
+	if (!page)
+		goto out;
+
+	spin_lock(&bd_mapping->private_lock);
+	if (!page_has_buffers(page))
+		goto out_unlock;
+	head = page_buffers(page);
+	bh = head;
+	do {
+		if (bh->b_blocknr == block) {
+			ret = bh;
+			get_bh(bh);
+			goto out_unlock;
+		}
+		if (!buffer_mapped(bh))
+			all_mapped = 0;
+		bh = bh->b_this_page;
+	} while (bh != head);
+
+	/* we might be here because some of the buffers on this page are
+	 * not mapped.  This is due to various races between
+	 * file io on the block device and getblk.  It gets dealt with
+	 * elsewhere, don't buffer_error if we had some unmapped buffers
+	 */
+	if (all_mapped) {
+		printk("__find_get_block_slow() failed. "
+			"block=%llu, b_blocknr=%llu\n",
+			(unsigned long long)block, (unsigned long long)bh->b_blocknr);
+		printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
+	}
+out_unlock:
+	spin_unlock(&bd_mapping->private_lock);
+	page_cache_release(page);
+out:
+	return ret;
+}
+
+/* If invalidate_buffers() will trash dirty buffers, it means some kind
+   of fs corruption is going on. Trashing dirty data always imply losing
+   information that was supposed to be just stored on the physical layer
+   by the user.
+
+   Thus invalidate_buffers in general usage is not allwowed to trash
+   dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
+   be preserved.  These buffers are simply skipped.
+  
+   We also skip buffers which are still in use.  For example this can
+   happen if a userspace program is reading the block device.
+
+   NOTE: In the case where the user removed a removable-media-disk even if
+   there's still dirty data not synced on disk (due a bug in the device driver
+   or due an error of the user), by not destroying the dirty buffers we could
+   generate corruption also on the next media inserted, thus a parameter is
+   necessary to handle this case in the most safe way possible (trying
+   to not corrupt also the new disk inserted with the data belonging to
+   the old now corrupted disk). Also for the ramdisk the natural thing
+   to do in order to release the ramdisk memory is to destroy dirty buffers.
+
+   These are two special cases. Normal usage imply the device driver
+   to issue a sync on the device (without waiting I/O completion) and
+   then an invalidate_buffers call that doesn't trash dirty buffers.
+
+   For handling cache coherency with the blkdev pagecache the 'update' case
+   is been introduced. It is needed to re-read from disk any pinned
+   buffer. NOTE: re-reading from disk is destructive so we can do it only
+   when we assume nobody is changing the buffercache under our I/O and when
+   we think the disk contains more recent information than the buffercache.
+   The update == 1 pass marks the buffers we need to update, the update == 2
+   pass does the actual I/O. */
+void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
+{
+	invalidate_bh_lrus();
+	/*
+	 * FIXME: what about destroy_dirty_buffers?
+	 * We really want to use invalidate_inode_pages2() for
+	 * that, but not until that's cleaned up.
+	 */
+	invalidate_inode_pages(bdev->bd_inode->i_mapping);
+}
+
+/*
+ * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ */
+static void free_more_memory(void)
+{
+	struct zone **zones;
+	pg_data_t *pgdat;
+
+	wakeup_bdflush(1024);
+	yield();
+
+	for_each_pgdat(pgdat) {
+		zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
+		if (*zones)
+			try_to_free_pages(zones, GFP_NOFS, 0);
+	}
+}
+
+/*
+ * I/O completion handler for block_read_full_page() - pages
+ * which come unlocked at the end of I/O.
+ */
+static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+{
+	static DEFINE_SPINLOCK(page_uptodate_lock);
+	unsigned long flags;
+	struct buffer_head *tmp;
+	struct page *page;
+	int page_uptodate = 1;
+
+	BUG_ON(!buffer_async_read(bh));
+
+	page = bh->b_page;
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		clear_buffer_uptodate(bh);
+		if (printk_ratelimit())
+			buffer_io_error(bh);
+		SetPageError(page);
+	}
+
+	/*
+	 * Be _very_ careful from here on. Bad things can happen if
+	 * two buffer heads end IO at almost the same time and both
+	 * decide that the page is now completely done.
+	 */
+	spin_lock_irqsave(&page_uptodate_lock, flags);
+	clear_buffer_async_read(bh);
+	unlock_buffer(bh);
+	tmp = bh;
+	do {
+		if (!buffer_uptodate(tmp))
+			page_uptodate = 0;
+		if (buffer_async_read(tmp)) {
+			BUG_ON(!buffer_locked(tmp));
+			goto still_busy;
+		}
+		tmp = tmp->b_this_page;
+	} while (tmp != bh);
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+
+	/*
+	 * If none of the buffers had errors and they are all
+	 * uptodate then we can set the page uptodate.
+	 */
+	if (page_uptodate && !PageError(page))
+		SetPageUptodate(page);
+	unlock_page(page);
+	return;
+
+still_busy:
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+	return;
+}
+
+/*
+ * Completion handler for block_write_full_page() - pages which are unlocked
+ * during I/O, and which have PageWriteback cleared upon I/O completion.
+ */
+void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+{
+	char b[BDEVNAME_SIZE];
+	static DEFINE_SPINLOCK(page_uptodate_lock);
+	unsigned long flags;
+	struct buffer_head *tmp;
+	struct page *page;
+
+	BUG_ON(!buffer_async_write(bh));
+
+	page = bh->b_page;
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		if (printk_ratelimit()) {
+			buffer_io_error(bh);
+			printk(KERN_WARNING "lost page write due to "
+					"I/O error on %s\n",
+			       bdevname(bh->b_bdev, b));
+		}
+		set_bit(AS_EIO, &page->mapping->flags);
+		clear_buffer_uptodate(bh);
+		SetPageError(page);
+	}
+
+	spin_lock_irqsave(&page_uptodate_lock, flags);
+	clear_buffer_async_write(bh);
+	unlock_buffer(bh);
+	tmp = bh->b_this_page;
+	while (tmp != bh) {
+		if (buffer_async_write(tmp)) {
+			BUG_ON(!buffer_locked(tmp));
+			goto still_busy;
+		}
+		tmp = tmp->b_this_page;
+	}
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+	end_page_writeback(page);
+	return;
+
+still_busy:
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+	return;
+}
+
+/*
+ * If a page's buffers are under async readin (end_buffer_async_read
+ * completion) then there is a possibility that another thread of
+ * control could lock one of the buffers after it has completed
+ * but while some of the other buffers have not completed.  This
+ * locked buffer would confuse end_buffer_async_read() into not unlocking
+ * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
+ * that this buffer is not under async I/O.
+ *
+ * The page comes unlocked when it has no locked buffer_async buffers
+ * left.
+ *
+ * PageLocked prevents anyone starting new async I/O reads any of
+ * the buffers.
+ *
+ * PageWriteback is used to prevent simultaneous writeout of the same
+ * page.
+ *
+ * PageLocked prevents anyone from starting writeback of a page which is
+ * under read I/O (PageWriteback is only ever set against a locked page).
+ */
+static void mark_buffer_async_read(struct buffer_head *bh)
+{
+	bh->b_end_io = end_buffer_async_read;
+	set_buffer_async_read(bh);
+}
+
+void mark_buffer_async_write(struct buffer_head *bh)
+{
+	bh->b_end_io = end_buffer_async_write;
+	set_buffer_async_write(bh);
+}
+EXPORT_SYMBOL(mark_buffer_async_write);
+
+
+/*
+ * fs/buffer.c contains helper functions for buffer-backed address space's
+ * fsync functions.  A common requirement for buffer-based filesystems is
+ * that certain data from the backing blockdev needs to be written out for
+ * a successful fsync().  For example, ext2 indirect blocks need to be
+ * written back and waited upon before fsync() returns.
+ *
+ * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
+ * inode_has_buffers() and invalidate_inode_buffers() are provided for the
+ * management of a list of dependent buffers at ->i_mapping->private_list.
+ *
+ * Locking is a little subtle: try_to_free_buffers() will remove buffers
+ * from their controlling inode's queue when they are being freed.  But
+ * try_to_free_buffers() will be operating against the *blockdev* mapping
+ * at the time, not against the S_ISREG file which depends on those buffers.
+ * So the locking for private_list is via the private_lock in the address_space
+ * which backs the buffers.  Which is different from the address_space 
+ * against which the buffers are listed.  So for a particular address_space,
+ * mapping->private_lock does *not* protect mapping->private_list!  In fact,
+ * mapping->private_list will always be protected by the backing blockdev's
+ * ->private_lock.
+ *
+ * Which introduces a requirement: all buffers on an address_space's
+ * ->private_list must be from the same address_space: the blockdev's.
+ *
+ * address_spaces which do not place buffers at ->private_list via these
+ * utility functions are free to use private_lock and private_list for
+ * whatever they want.  The only requirement is that list_empty(private_list)
+ * be true at clear_inode() time.
+ *
+ * FIXME: clear_inode should not call invalidate_inode_buffers().  The
+ * filesystems should do that.  invalidate_inode_buffers() should just go
+ * BUG_ON(!list_empty).
+ *
+ * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
+ * take an address_space, not an inode.  And it should be called
+ * mark_buffer_dirty_fsync() to clearly define why those buffers are being
+ * queued up.
+ *
+ * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
+ * list if it is already on a list.  Because if the buffer is on a list,
+ * it *must* already be on the right one.  If not, the filesystem is being
+ * silly.  This will save a ton of locking.  But first we have to ensure
+ * that buffers are taken *off* the old inode's list when they are freed
+ * (presumably in truncate).  That requires careful auditing of all
+ * filesystems (do it inside bforget()).  It could also be done by bringing
+ * b_inode back.
+ */
+
+/*
+ * The buffer's backing address_space's private_lock must be held
+ */
+static inline void __remove_assoc_queue(struct buffer_head *bh)
+{
+	list_del_init(&bh->b_assoc_buffers);
+}
+
+int inode_has_buffers(struct inode *inode)
+{
+	return !list_empty(&inode->i_data.private_list);
+}
+
+/*
+ * osync is designed to support O_SYNC io.  It waits synchronously for
+ * all already-submitted IO to complete, but does not queue any new
+ * writes to the disk.
+ *
+ * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
+ * you dirty the buffers, and then use osync_inode_buffers to wait for
+ * completion.  Any other dirty buffers which are not yet queued for
+ * write will not be flushed to disk by the osync.
+ */
+static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
+{
+	struct buffer_head *bh;
+	struct list_head *p;
+	int err = 0;
+
+	spin_lock(lock);
+repeat:
+	list_for_each_prev(p, list) {
+		bh = BH_ENTRY(p);
+		if (buffer_locked(bh)) {
+			get_bh(bh);
+			spin_unlock(lock);
+			wait_on_buffer(bh);
+			if (!buffer_uptodate(bh))
+				err = -EIO;
+			brelse(bh);
+			spin_lock(lock);
+			goto repeat;
+		}
+	}
+	spin_unlock(lock);
+	return err;
+}
+
+/**
+ * sync_mapping_buffers - write out and wait upon a mapping's "associated"
+ *                        buffers
+ * @buffer_mapping - the mapping which backs the buffers' data
+ * @mapping - the mapping which wants those buffers written
+ *
+ * Starts I/O against the buffers at mapping->private_list, and waits upon
+ * that I/O.
+ *
+ * Basically, this is a convenience function for fsync().  @buffer_mapping is
+ * the blockdev which "owns" the buffers and @mapping is a file or directory
+ * which needs those buffers to be written for a successful fsync().
+ */
+int sync_mapping_buffers(struct address_space *mapping)
+{
+	struct address_space *buffer_mapping = mapping->assoc_mapping;
+
+	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
+		return 0;
+
+	return fsync_buffers_list(&buffer_mapping->private_lock,
+					&mapping->private_list);
+}
+EXPORT_SYMBOL(sync_mapping_buffers);
+
+/*
+ * Called when we've recently written block `bblock', and it is known that
+ * `bblock' was for a buffer_boundary() buffer.  This means that the block at
+ * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
+ * dirty, schedule it for IO.  So that indirects merge nicely with their data.
+ */
+void write_boundary_block(struct block_device *bdev,
+			sector_t bblock, unsigned blocksize)
+{
+	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
+	if (bh) {
+		if (buffer_dirty(bh))
+			ll_rw_block(WRITE, 1, &bh);
+		put_bh(bh);
+	}
+}
+
+void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct address_space *buffer_mapping = bh->b_page->mapping;
+
+	mark_buffer_dirty(bh);
+	if (!mapping->assoc_mapping) {
+		mapping->assoc_mapping = buffer_mapping;
+	} else {
+		if (mapping->assoc_mapping != buffer_mapping)
+			BUG();
+	}
+	if (list_empty(&bh->b_assoc_buffers)) {
+		spin_lock(&buffer_mapping->private_lock);
+		list_move_tail(&bh->b_assoc_buffers,
+				&mapping->private_list);
+		spin_unlock(&buffer_mapping->private_lock);
+	}
+}
+EXPORT_SYMBOL(mark_buffer_dirty_inode);
+
+/*
+ * Add a page to the dirty page list.
+ *
+ * It is a sad fact of life that this function is called from several places
+ * deeply under spinlocking.  It may not sleep.
+ *
+ * If the page has buffers, the uptodate buffers are set dirty, to preserve
+ * dirty-state coherency between the page and the buffers.  It the page does
+ * not have buffers then when they are later attached they will all be set
+ * dirty.
+ *
+ * The buffers are dirtied before the page is dirtied.  There's a small race
+ * window in which a writepage caller may see the page cleanness but not the
+ * buffer dirtiness.  That's fine.  If this code were to set the page dirty
+ * before the buffers, a concurrent writepage caller could clear the page dirty
+ * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
+ * page on the dirty page list.
+ *
+ * We use private_lock to lock against try_to_free_buffers while using the
+ * page's buffer list.  Also use this to protect against clean buffers being
+ * added to the page after it was set dirty.
+ *
+ * FIXME: may need to call ->reservepage here as well.  That's rather up to the
+ * address_space though.
+ */
+int __set_page_dirty_buffers(struct page *page)
+{
+	struct address_space * const mapping = page->mapping;
+
+	spin_lock(&mapping->private_lock);
+	if (page_has_buffers(page)) {
+		struct buffer_head *head = page_buffers(page);
+		struct buffer_head *bh = head;
+
+		do {
+			set_buffer_dirty(bh);
+			bh = bh->b_this_page;
+		} while (bh != head);
+	}
+	spin_unlock(&mapping->private_lock);
+
+	if (!TestSetPageDirty(page)) {
+		write_lock_irq(&mapping->tree_lock);
+		if (page->mapping) {	/* Race with truncate? */
+			if (mapping_cap_account_dirty(mapping))
+				inc_page_state(nr_dirty);
+			radix_tree_tag_set(&mapping->page_tree,
+						page_index(page),
+						PAGECACHE_TAG_DIRTY);
+		}
+		write_unlock_irq(&mapping->tree_lock);
+		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+	}
+	
+	return 0;
+}
+EXPORT_SYMBOL(__set_page_dirty_buffers);
+
+/*
+ * Write out and wait upon a list of buffers.
+ *
+ * We have conflicting pressures: we want to make sure that all
+ * initially dirty buffers get waited on, but that any subsequently
+ * dirtied buffers don't.  After all, we don't want fsync to last
+ * forever if somebody is actively writing to the file.
+ *
+ * Do this in two main stages: first we copy dirty buffers to a
+ * temporary inode list, queueing the writes as we go.  Then we clean
+ * up, waiting for those writes to complete.
+ * 
+ * During this second stage, any subsequent updates to the file may end
+ * up refiling the buffer on the original inode's dirty list again, so
+ * there is a chance we will end up with a buffer queued for write but
+ * not yet completed on that list.  So, as a final cleanup we go through
+ * the osync code to catch these locked, dirty buffers without requeuing
+ * any newly dirty buffers for write.
+ */
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
+{
+	struct buffer_head *bh;
+	struct list_head tmp;
+	int err = 0, err2;
+
+	INIT_LIST_HEAD(&tmp);
+
+	spin_lock(lock);
+	while (!list_empty(list)) {
+		bh = BH_ENTRY(list->next);
+		list_del_init(&bh->b_assoc_buffers);
+		if (buffer_dirty(bh) || buffer_locked(bh)) {
+			list_add(&bh->b_assoc_buffers, &tmp);
+			if (buffer_dirty(bh)) {
+				get_bh(bh);
+				spin_unlock(lock);
+				/*
+				 * Ensure any pending I/O completes so that
+				 * ll_rw_block() actually writes the current
+				 * contents - it is a noop if I/O is still in
+				 * flight on potentially older contents.
+				 */
+				wait_on_buffer(bh);
+				ll_rw_block(WRITE, 1, &bh);
+				brelse(bh);
+				spin_lock(lock);
+			}
+		}
+	}
+
+	while (!list_empty(&tmp)) {
+		bh = BH_ENTRY(tmp.prev);
+		__remove_assoc_queue(bh);
+		get_bh(bh);
+		spin_unlock(lock);
+		wait_on_buffer(bh);
+		if (!buffer_uptodate(bh))
+			err = -EIO;
+		brelse(bh);
+		spin_lock(lock);
+	}
+	
+	spin_unlock(lock);
+	err2 = osync_buffers_list(lock, list);
+	if (err)
+		return err;
+	else
+		return err2;
+}
+
+/*
+ * Invalidate any and all dirty buffers on a given inode.  We are
+ * probably unmounting the fs, but that doesn't mean we have already
+ * done a sync().  Just drop the buffers from the inode list.
+ *
+ * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
+ * assumes that all the buffers are against the blockdev.  Not true
+ * for reiserfs.
+ */
+void invalidate_inode_buffers(struct inode *inode)
+{
+	if (inode_has_buffers(inode)) {
+		struct address_space *mapping = &inode->i_data;
+		struct list_head *list = &mapping->private_list;
+		struct address_space *buffer_mapping = mapping->assoc_mapping;
+
+		spin_lock(&buffer_mapping->private_lock);
+		while (!list_empty(list))
+			__remove_assoc_queue(BH_ENTRY(list->next));
+		spin_unlock(&buffer_mapping->private_lock);
+	}
+}
+
+/*
+ * Remove any clean buffers from the inode's buffer list.  This is called
+ * when we're trying to free the inode itself.  Those buffers can pin it.
+ *
+ * Returns true if all buffers were removed.
+ */
+int remove_inode_buffers(struct inode *inode)
+{
+	int ret = 1;
+
+	if (inode_has_buffers(inode)) {
+		struct address_space *mapping = &inode->i_data;
+		struct list_head *list = &mapping->private_list;
+		struct address_space *buffer_mapping = mapping->assoc_mapping;
+
+		spin_lock(&buffer_mapping->private_lock);
+		while (!list_empty(list)) {
+			struct buffer_head *bh = BH_ENTRY(list->next);
+			if (buffer_dirty(bh)) {
+				ret = 0;
+				break;
+			}
+			__remove_assoc_queue(bh);
+		}
+		spin_unlock(&buffer_mapping->private_lock);
+	}
+	return ret;
+}
+
+/*
+ * Create the appropriate buffers when given a page for data area and
+ * the size of each buffer.. Use the bh->b_this_page linked list to
+ * follow the buffers created.  Return NULL if unable to create more
+ * buffers.
+ *
+ * The retry flag is used to differentiate async IO (paging, swapping)
+ * which may not fail from ordinary buffer allocations.
+ */
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
+		int retry)
+{
+	struct buffer_head *bh, *head;
+	long offset;
+
+try_again:
+	head = NULL;
+	offset = PAGE_SIZE;
+	while ((offset -= size) >= 0) {
+		bh = alloc_buffer_head(GFP_NOFS);
+		if (!bh)
+			goto no_grow;
+
+		bh->b_bdev = NULL;
+		bh->b_this_page = head;
+		bh->b_blocknr = -1;
+		head = bh;
+
+		bh->b_state = 0;
+		atomic_set(&bh->b_count, 0);
+		bh->b_size = size;
+
+		/* Link the buffer to its page */
+		set_bh_page(bh, page, offset);
+
+		bh->b_end_io = NULL;
+	}
+	return head;
+/*
+ * In case anything failed, we just free everything we got.
+ */
+no_grow:
+	if (head) {
+		do {
+			bh = head;
+			head = head->b_this_page;
+			free_buffer_head(bh);
+		} while (head);
+	}
+
+	/*
+	 * Return failure for non-async IO requests.  Async IO requests
+	 * are not allowed to fail, so we have to wait until buffer heads
+	 * become available.  But we don't want tasks sleeping with 
+	 * partially complete buffers, so all were released above.
+	 */
+	if (!retry)
+		return NULL;
+
+	/* We're _really_ low on memory. Now we just
+	 * wait for old buffer heads to become free due to
+	 * finishing IO.  Since this is an async request and
+	 * the reserve list is empty, we're sure there are 
+	 * async buffer heads in use.
+	 */
+	free_more_memory();
+	goto try_again;
+}
+EXPORT_SYMBOL_GPL(alloc_page_buffers);
+
+static inline void
+link_dev_buffers(struct page *page, struct buffer_head *head)
+{
+	struct buffer_head *bh, *tail;
+
+	bh = head;
+	do {
+		tail = bh;
+		bh = bh->b_this_page;
+	} while (bh);
+	tail->b_this_page = head;
+	attach_page_buffers(page, head);
+}
+
+/*
+ * Initialise the state of a blockdev page's buffers.
+ */ 
+static void
+init_page_buffers(struct page *page, struct block_device *bdev,
+			sector_t block, int size)
+{
+	struct buffer_head *head = page_buffers(page);
+	struct buffer_head *bh = head;
+	int uptodate = PageUptodate(page);
+
+	do {
+		if (!buffer_mapped(bh)) {
+			init_buffer(bh, NULL, NULL);
+			bh->b_bdev = bdev;
+			bh->b_blocknr = block;
+			if (uptodate)
+				set_buffer_uptodate(bh);
+			set_buffer_mapped(bh);
+		}
+		block++;
+		bh = bh->b_this_page;
+	} while (bh != head);
+}
+
+/*
+ * Create the page-cache page that contains the requested block.
+ *
+ * This is user purely for blockdev mappings.
+ */
+static struct page *
+grow_dev_page(struct block_device *bdev, sector_t block,
+		pgoff_t index, int size)
+{
+	struct inode *inode = bdev->bd_inode;
+	struct page *page;
+	struct buffer_head *bh;
+
+	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+	if (!page)
+		return NULL;
+
+	if (!PageLocked(page))
+		BUG();
+
+	if (page_has_buffers(page)) {
+		bh = page_buffers(page);
+		if (bh->b_size == size) {
+			init_page_buffers(page, bdev, block, size);
+			return page;
+		}
+		if (!try_to_free_buffers(page))
+			goto failed;
+	}
+
+	/*
+	 * Allocate some buffers for this page
+	 */
+	bh = alloc_page_buffers(page, size, 0);
+	if (!bh)
+		goto failed;
+
+	/*
+	 * Link the page to the buffers and initialise them.  Take the
+	 * lock to be atomic wrt __find_get_block(), which does not
+	 * run under the page lock.
+	 */
+	spin_lock(&inode->i_mapping->private_lock);
+	link_dev_buffers(page, bh);
+	init_page_buffers(page, bdev, block, size);
+	spin_unlock(&inode->i_mapping->private_lock);
+	return page;
+
+failed:
+	BUG();
+	unlock_page(page);
+	page_cache_release(page);
+	return NULL;
+}
+
+/*
+ * Create buffers for the specified block device block's page.  If
+ * that page was dirty, the buffers are set dirty also.
+ *
+ * Except that's a bug.  Attaching dirty buffers to a dirty
+ * blockdev's page can result in filesystem corruption, because
+ * some of those buffers may be aliases of filesystem data.
+ * grow_dev_page() will go BUG() if this happens.
+ */
+static inline int
+grow_buffers(struct block_device *bdev, sector_t block, int size)
+{
+	struct page *page;
+	pgoff_t index;
+	int sizebits;
+
+	sizebits = -1;
+	do {
+		sizebits++;
+	} while ((size << sizebits) < PAGE_SIZE);
+
+	index = block >> sizebits;
+	block = index << sizebits;
+
+	/* Create a page with the proper size buffers.. */
+	page = grow_dev_page(bdev, block, index, size);
+	if (!page)
+		return 0;
+	unlock_page(page);
+	page_cache_release(page);
+	return 1;
+}
+
+struct buffer_head *
+__getblk_slow(struct block_device *bdev, sector_t block, int size)
+{
+	/* Size must be multiple of hard sectorsize */
+	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+			(size < 512 || size > PAGE_SIZE))) {
+		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
+					size);
+		printk(KERN_ERR "hardsect size: %d\n",
+					bdev_hardsect_size(bdev));
+
+		dump_stack();
+		return NULL;
+	}
+
+	for (;;) {
+		struct buffer_head * bh;
+
+		bh = __find_get_block(bdev, block, size);
+		if (bh)
+			return bh;
+
+		if (!grow_buffers(bdev, block, size))
+			free_more_memory();
+	}
+}
+
+/*
+ * The relationship between dirty buffers and dirty pages:
+ *
+ * Whenever a page has any dirty buffers, the page's dirty bit is set, and
+ * the page is tagged dirty in its radix tree.
+ *
+ * At all times, the dirtiness of the buffers represents the dirtiness of
+ * subsections of the page.  If the page has buffers, the page dirty bit is
+ * merely a hint about the true dirty state.
+ *
+ * When a page is set dirty in its entirety, all its buffers are marked dirty
+ * (if the page has buffers).
+ *
+ * When a buffer is marked dirty, its page is dirtied, but the page's other
+ * buffers are not.
+ *
+ * Also.  When blockdev buffers are explicitly read with bread(), they
+ * individually become uptodate.  But their backing page remains not
+ * uptodate - even if all of its buffers are uptodate.  A subsequent
+ * block_read_full_page() against that page will discover all the uptodate
+ * buffers, will set the page uptodate and will perform no I/O.
+ */
+
+/**
+ * mark_buffer_dirty - mark a buffer_head as needing writeout
+ *
+ * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
+ * backing page dirty, then tag the page as dirty in its address_space's radix
+ * tree and then attach the address_space's inode to its superblock's dirty
+ * inode list.
+ *
+ * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
+ * mapping->tree_lock and the global inode_lock.
+ */
+void fastcall mark_buffer_dirty(struct buffer_head *bh)
+{
+	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
+		__set_page_dirty_nobuffers(bh->b_page);
+}
+
+/*
+ * Decrement a buffer_head's reference count.  If all buffers against a page
+ * have zero reference count, are clean and unlocked, and if the page is clean
+ * and unlocked then try_to_free_buffers() may strip the buffers from the page
+ * in preparation for freeing it (sometimes, rarely, buffers are removed from
+ * a page but it ends up not being freed, and buffers may later be reattached).
+ */
+void __brelse(struct buffer_head * buf)
+{
+	if (atomic_read(&buf->b_count)) {
+		put_bh(buf);
+		return;
+	}
+	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
+	WARN_ON(1);
+}
+
+/*
+ * bforget() is like brelse(), except it discards any
+ * potentially dirty data.
+ */
+void __bforget(struct buffer_head *bh)
+{
+	clear_buffer_dirty(bh);
+	if (!list_empty(&bh->b_assoc_buffers)) {
+		struct address_space *buffer_mapping = bh->b_page->mapping;
+
+		spin_lock(&buffer_mapping->private_lock);
+		list_del_init(&bh->b_assoc_buffers);
+		spin_unlock(&buffer_mapping->private_lock);
+	}
+	__brelse(bh);
+}
+
+static struct buffer_head *__bread_slow(struct buffer_head *bh)
+{
+	lock_buffer(bh);
+	if (buffer_uptodate(bh)) {
+		unlock_buffer(bh);
+		return bh;
+	} else {
+		get_bh(bh);
+		bh->b_end_io = end_buffer_read_sync;
+		submit_bh(READ, bh);
+		wait_on_buffer(bh);
+		if (buffer_uptodate(bh))
+			return bh;
+	}
+	brelse(bh);
+	return NULL;
+}
+
+/*
+ * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
+ * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
+ * refcount elevated by one when they're in an LRU.  A buffer can only appear
+ * once in a particular CPU's LRU.  A single buffer can be present in multiple
+ * CPU's LRUs at the same time.
+ *
+ * This is a transparent caching front-end to sb_bread(), sb_getblk() and
+ * sb_find_get_block().
+ *
+ * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
+ * a local interrupt disable for that.
+ */
+
+#define BH_LRU_SIZE	8
+
+struct bh_lru {
+	struct buffer_head *bhs[BH_LRU_SIZE];
+};
+
+static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
+
+#ifdef CONFIG_SMP
+#define bh_lru_lock()	local_irq_disable()
+#define bh_lru_unlock()	local_irq_enable()
+#else
+#define bh_lru_lock()	preempt_disable()
+#define bh_lru_unlock()	preempt_enable()
+#endif
+
+static inline void check_irqs_on(void)
+{
+#ifdef irqs_disabled
+	BUG_ON(irqs_disabled());
+#endif
+}
+
+/*
+ * The LRU management algorithm is dopey-but-simple.  Sorry.
+ */
+static void bh_lru_install(struct buffer_head *bh)
+{
+	struct buffer_head *evictee = NULL;
+	struct bh_lru *lru;
+
+	check_irqs_on();
+	bh_lru_lock();
+	lru = &__get_cpu_var(bh_lrus);
+	if (lru->bhs[0] != bh) {
+		struct buffer_head *bhs[BH_LRU_SIZE];
+		int in;
+		int out = 0;
+
+		get_bh(bh);
+		bhs[out++] = bh;
+		for (in = 0; in < BH_LRU_SIZE; in++) {
+			struct buffer_head *bh2 = lru->bhs[in];
+
+			if (bh2 == bh) {
+				__brelse(bh2);
+			} else {
+				if (out >= BH_LRU_SIZE) {
+					BUG_ON(evictee != NULL);
+					evictee = bh2;
+				} else {
+					bhs[out++] = bh2;
+				}
+			}
+		}
+		while (out < BH_LRU_SIZE)
+			bhs[out++] = NULL;
+		memcpy(lru->bhs, bhs, sizeof(bhs));
+	}
+	bh_lru_unlock();
+
+	if (evictee)
+		__brelse(evictee);
+}
+
+/*
+ * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
+ */
+static inline struct buffer_head *
+lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+{
+	struct buffer_head *ret = NULL;
+	struct bh_lru *lru;
+	int i;
+
+	check_irqs_on();
+	bh_lru_lock();
+	lru = &__get_cpu_var(bh_lrus);
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		struct buffer_head *bh = lru->bhs[i];
+
+		if (bh && bh->b_bdev == bdev &&
+				bh->b_blocknr == block && bh->b_size == size) {
+			if (i) {
+				while (i) {
+					lru->bhs[i] = lru->bhs[i - 1];
+					i--;
+				}
+				lru->bhs[0] = bh;
+			}
+			get_bh(bh);
+			ret = bh;
+			break;
+		}
+	}
+	bh_lru_unlock();
+	return ret;
+}
+
+/*
+ * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
+ * it in the LRU and mark it as accessed.  If it is not present then return
+ * NULL
+ */
+struct buffer_head *
+__find_get_block(struct block_device *bdev, sector_t block, int size)
+{
+	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
+
+	if (bh == NULL) {
+		bh = __find_get_block_slow(bdev, block, size);
+		if (bh)
+			bh_lru_install(bh);
+	}
+	if (bh)
+		touch_buffer(bh);
+	return bh;
+}
+EXPORT_SYMBOL(__find_get_block);
+
+/*
+ * __getblk will locate (and, if necessary, create) the buffer_head
+ * which corresponds to the passed block_device, block and size. The
+ * returned buffer has its reference count incremented.
+ *
+ * __getblk() cannot fail - it just keeps trying.  If you pass it an
+ * illegal block number, __getblk() will happily return a buffer_head
+ * which represents the non-existent block.  Very weird.
+ *
+ * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
+ * attempt is failing.  FIXME, perhaps?
+ */
+struct buffer_head *
+__getblk(struct block_device *bdev, sector_t block, int size)
+{
+	struct buffer_head *bh = __find_get_block(bdev, block, size);
+
+	might_sleep();
+	if (bh == NULL)
+		bh = __getblk_slow(bdev, block, size);
+	return bh;
+}
+EXPORT_SYMBOL(__getblk);
+
+/*
+ * Do async read-ahead on a buffer..
+ */
+void __breadahead(struct block_device *bdev, sector_t block, int size)
+{
+	struct buffer_head *bh = __getblk(bdev, block, size);
+	ll_rw_block(READA, 1, &bh);
+	brelse(bh);
+}
+EXPORT_SYMBOL(__breadahead);
+
+/**
+ *  __bread() - reads a specified block and returns the bh
+ *  @block: number of block
+ *  @size: size (in bytes) to read
+ * 
+ *  Reads a specified block, and returns buffer head that contains it.
+ *  It returns NULL if the block was unreadable.
+ */
+struct buffer_head *
+__bread(struct block_device *bdev, sector_t block, int size)
+{
+	struct buffer_head *bh = __getblk(bdev, block, size);
+
+	if (!buffer_uptodate(bh))
+		bh = __bread_slow(bh);
+	return bh;
+}
+EXPORT_SYMBOL(__bread);
+
+/*
+ * invalidate_bh_lrus() is called rarely - but not only at unmount.
+ * This doesn't race because it runs in each cpu either in irq
+ * or with preempt disabled.
+ */
+static void invalidate_bh_lru(void *arg)
+{
+	struct bh_lru *b = &get_cpu_var(bh_lrus);
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		brelse(b->bhs[i]);
+		b->bhs[i] = NULL;
+	}
+	put_cpu_var(bh_lrus);
+}
+	
+static void invalidate_bh_lrus(void)
+{
+	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
+}
+
+void set_bh_page(struct buffer_head *bh,
+		struct page *page, unsigned long offset)
+{
+	bh->b_page = page;
+	if (offset >= PAGE_SIZE)
+		BUG();
+	if (PageHighMem(page))
+		/*
+		 * This catches illegal uses and preserves the offset:
+		 */
+		bh->b_data = (char *)(0 + offset);
+	else
+		bh->b_data = page_address(page) + offset;
+}
+EXPORT_SYMBOL(set_bh_page);
+
+/*
+ * Called when truncating a buffer on a page completely.
+ */
+static inline void discard_buffer(struct buffer_head * bh)
+{
+	lock_buffer(bh);
+	clear_buffer_dirty(bh);
+	bh->b_bdev = NULL;
+	clear_buffer_mapped(bh);
+	clear_buffer_req(bh);
+	clear_buffer_new(bh);
+	clear_buffer_delay(bh);
+	unlock_buffer(bh);
+}
+
+/**
+ * try_to_release_page() - release old fs-specific metadata on a page
+ *
+ * @page: the page which the kernel is trying to free
+ * @gfp_mask: memory allocation flags (and I/O mode)
+ *
+ * The address_space is to try to release any data against the page
+ * (presumably at page->private).  If the release was successful, return `1'.
+ * Otherwise return zero.
+ *
+ * The @gfp_mask argument specifies whether I/O may be performed to release
+ * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
+ *
+ * NOTE: @gfp_mask may go away, and this function may become non-blocking.
+ */
+int try_to_release_page(struct page *page, int gfp_mask)
+{
+	struct address_space * const mapping = page->mapping;
+
+	BUG_ON(!PageLocked(page));
+	if (PageWriteback(page))
+		return 0;
+	
+	if (mapping && mapping->a_ops->releasepage)
+		return mapping->a_ops->releasepage(page, gfp_mask);
+	return try_to_free_buffers(page);
+}
+EXPORT_SYMBOL(try_to_release_page);
+
+/**
+ * block_invalidatepage - invalidate part of all of a buffer-backed page
+ *
+ * @page: the page which is affected
+ * @offset: the index of the truncation point
+ *
+ * block_invalidatepage() is called when all or part of the page has become
+ * invalidatedby a truncate operation.
+ *
+ * block_invalidatepage() does not have to release all buffers, but it must
+ * ensure that no dirty buffer is left outside @offset and that no I/O
+ * is underway against any of the blocks which are outside the truncation
+ * point.  Because the caller is about to free (and possibly reuse) those
+ * blocks on-disk.
+ */
+int block_invalidatepage(struct page *page, unsigned long offset)
+{
+	struct buffer_head *head, *bh, *next;
+	unsigned int curr_off = 0;
+	int ret = 1;
+
+	BUG_ON(!PageLocked(page));
+	if (!page_has_buffers(page))
+		goto out;
+
+	head = page_buffers(page);
+	bh = head;
+	do {
+		unsigned int next_off = curr_off + bh->b_size;
+		next = bh->b_this_page;
+
+		/*
+		 * is this block fully invalidated?
+		 */
+		if (offset <= curr_off)
+			discard_buffer(bh);
+		curr_off = next_off;
+		bh = next;
+	} while (bh != head);
+
+	/*
+	 * We release buffers only if the entire page is being invalidated.
+	 * The get_block cached value has been unconditionally invalidated,
+	 * so real IO is not possible anymore.
+	 */
+	if (offset == 0)
+		ret = try_to_release_page(page, 0);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(block_invalidatepage);
+
+/*
+ * We attach and possibly dirty the buffers atomically wrt
+ * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
+ * is already excluded via the page lock.
+ */
+void create_empty_buffers(struct page *page,
+			unsigned long blocksize, unsigned long b_state)
+{
+	struct buffer_head *bh, *head, *tail;
+
+	head = alloc_page_buffers(page, blocksize, 1);
+	bh = head;
+	do {
+		bh->b_state |= b_state;
+		tail = bh;
+		bh = bh->b_this_page;
+	} while (bh);
+	tail->b_this_page = head;
+
+	spin_lock(&page->mapping->private_lock);
+	if (PageUptodate(page) || PageDirty(page)) {
+		bh = head;
+		do {
+			if (PageDirty(page))
+				set_buffer_dirty(bh);
+			if (PageUptodate(page))
+				set_buffer_uptodate(bh);
+			bh = bh->b_this_page;
+		} while (bh != head);
+	}
+	attach_page_buffers(page, head);
+	spin_unlock(&page->mapping->private_lock);
+}
+EXPORT_SYMBOL(create_empty_buffers);
+
+/*
+ * We are taking a block for data and we don't want any output from any
+ * buffer-cache aliases starting from return from that function and
+ * until the moment when something will explicitly mark the buffer
+ * dirty (hopefully that will not happen until we will free that block ;-)
+ * We don't even need to mark it not-uptodate - nobody can expect
+ * anything from a newly allocated buffer anyway. We used to used
+ * unmap_buffer() for such invalidation, but that was wrong. We definitely
+ * don't want to mark the alias unmapped, for example - it would confuse
+ * anyone who might pick it with bread() afterwards...
+ *
+ * Also..  Note that bforget() doesn't lock the buffer.  So there can
+ * be writeout I/O going on against recently-freed buffers.  We don't
+ * wait on that I/O in bforget() - it's more efficient to wait on the I/O
+ * only if we really need to.  That happens here.
+ */
+void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
+{
+	struct buffer_head *old_bh;
+
+	might_sleep();
+
+	old_bh = __find_get_block_slow(bdev, block, 0);
+	if (old_bh) {
+		clear_buffer_dirty(old_bh);
+		wait_on_buffer(old_bh);
+		clear_buffer_req(old_bh);
+		__brelse(old_bh);
+	}
+}
+EXPORT_SYMBOL(unmap_underlying_metadata);
+
+/*
+ * NOTE! All mapped/uptodate combinations are valid:
+ *
+ *	Mapped	Uptodate	Meaning
+ *
+ *	No	No		"unknown" - must do get_block()
+ *	No	Yes		"hole" - zero-filled
+ *	Yes	No		"allocated" - allocated on disk, not read in
+ *	Yes	Yes		"valid" - allocated and up-to-date in memory.
+ *
+ * "Dirty" is valid only with the last case (mapped+uptodate).
+ */
+
+/*
+ * While block_write_full_page is writing back the dirty buffers under
+ * the page lock, whoever dirtied the buffers may decide to clean them
+ * again at any time.  We handle that by only looking at the buffer
+ * state inside lock_buffer().
+ *
+ * If block_write_full_page() is called for regular writeback
+ * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
+ * locked buffer.   This only can happen if someone has written the buffer
+ * directly, with submit_bh().  At the address_space level PageWriteback
+ * prevents this contention from occurring.
+ */
+static int __block_write_full_page(struct inode *inode, struct page *page,
+			get_block_t *get_block, struct writeback_control *wbc)
+{
+	int err;
+	sector_t block;
+	sector_t last_block;
+	struct buffer_head *bh, *head;
+	int nr_underway = 0;
+
+	BUG_ON(!PageLocked(page));
+
+	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
+
+	if (!page_has_buffers(page)) {
+		create_empty_buffers(page, 1 << inode->i_blkbits,
+					(1 << BH_Dirty)|(1 << BH_Uptodate));
+	}
+
+	/*
+	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
+	 * here, and the (potentially unmapped) buffers may become dirty at
+	 * any time.  If a buffer becomes dirty here after we've inspected it
+	 * then we just miss that fact, and the page stays dirty.
+	 *
+	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
+	 * handle that here by just cleaning them.
+	 */
+
+	block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	head = page_buffers(page);
+	bh = head;
+
+	/*
+	 * Get all the dirty buffers mapped to disk addresses and
+	 * handle any aliases from the underlying blockdev's mapping.
+	 */
+	do {
+		if (block > last_block) {
+			/*
+			 * mapped buffers outside i_size will occur, because
+			 * this page can be outside i_size when there is a
+			 * truncate in progress.
+			 */
+			/*
+			 * The buffer was zeroed by block_write_full_page()
+			 */
+			clear_buffer_dirty(bh);
+			set_buffer_uptodate(bh);
+		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+			err = get_block(inode, block, bh, 1);
+			if (err)
+				goto recover;
+			if (buffer_new(bh)) {
+				/* blockdev mappings never come here */
+				clear_buffer_new(bh);
+				unmap_underlying_metadata(bh->b_bdev,
+							bh->b_blocknr);
+			}
+		}
+		bh = bh->b_this_page;
+		block++;
+	} while (bh != head);
+
+	do {
+		get_bh(bh);
+		if (!buffer_mapped(bh))
+			continue;
+		/*
+		 * If it's a fully non-blocking write attempt and we cannot
+		 * lock the buffer then redirty the page.  Note that this can
+		 * potentially cause a busy-wait loop from pdflush and kswapd
+		 * activity, but those code paths have their own higher-level
+		 * throttling.
+		 */
+		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+			lock_buffer(bh);
+		} else if (test_set_buffer_locked(bh)) {
+			redirty_page_for_writepage(wbc, page);
+			continue;
+		}
+		if (test_clear_buffer_dirty(bh)) {
+			mark_buffer_async_write(bh);
+		} else {
+			unlock_buffer(bh);
+		}
+	} while ((bh = bh->b_this_page) != head);
+
+	/*
+	 * The page and its buffers are protected by PageWriteback(), so we can
+	 * drop the bh refcounts early.
+	 */
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);
+	unlock_page(page);
+
+	do {
+		struct buffer_head *next = bh->b_this_page;
+		if (buffer_async_write(bh)) {
+			submit_bh(WRITE, bh);
+			nr_underway++;
+		}
+		put_bh(bh);
+		bh = next;
+	} while (bh != head);
+
+	err = 0;
+done:
+	if (nr_underway == 0) {
+		/*
+		 * The page was marked dirty, but the buffers were
+		 * clean.  Someone wrote them back by hand with
+		 * ll_rw_block/submit_bh.  A rare case.
+		 */
+		int uptodate = 1;
+		do {
+			if (!buffer_uptodate(bh)) {
+				uptodate = 0;
+				break;
+			}
+			bh = bh->b_this_page;
+		} while (bh != head);
+		if (uptodate)
+			SetPageUptodate(page);
+		end_page_writeback(page);
+		/*
+		 * The page and buffer_heads can be released at any time from
+		 * here on.
+		 */
+		wbc->pages_skipped++;	/* We didn't write this page */
+	}
+	return err;
+
+recover:
+	/*
+	 * ENOSPC, or some other error.  We may already have added some
+	 * blocks to the file, so we need to write these out to avoid
+	 * exposing stale data.
+	 * The page is currently locked and not marked for writeback
+	 */
+	bh = head;
+	/* Recovery: lock and submit the mapped buffers */
+	do {
+		get_bh(bh);
+		if (buffer_mapped(bh) && buffer_dirty(bh)) {
+			lock_buffer(bh);
+			mark_buffer_async_write(bh);
+		} else {
+			/*
+			 * The buffer may have been set dirty during
+			 * attachment to a dirty page.
+			 */
+			clear_buffer_dirty(bh);
+		}
+	} while ((bh = bh->b_this_page) != head);
+	SetPageError(page);
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);
+	unlock_page(page);
+	do {
+		struct buffer_head *next = bh->b_this_page;
+		if (buffer_async_write(bh)) {
+			clear_buffer_dirty(bh);
+			submit_bh(WRITE, bh);
+			nr_underway++;
+		}
+		put_bh(bh);
+		bh = next;
+	} while (bh != head);
+	goto done;
+}
+
+static int __block_prepare_write(struct inode *inode, struct page *page,
+		unsigned from, unsigned to, get_block_t *get_block)
+{
+	unsigned block_start, block_end;
+	sector_t block;
+	int err = 0;
+	unsigned blocksize, bbits;
+	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
+
+	BUG_ON(!PageLocked(page));
+	BUG_ON(from > PAGE_CACHE_SIZE);
+	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(from > to);
+
+	blocksize = 1 << inode->i_blkbits;
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+	head = page_buffers(page);
+
+	bbits = inode->i_blkbits;
+	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+
+	for(bh = head, block_start = 0; bh != head || !block_start;
+	    block++, block_start=block_end, bh = bh->b_this_page) {
+		block_end = block_start + blocksize;
+		if (block_end <= from || block_start >= to) {
+			if (PageUptodate(page)) {
+				if (!buffer_uptodate(bh))
+					set_buffer_uptodate(bh);
+			}
+			continue;
+		}
+		if (buffer_new(bh))
+			clear_buffer_new(bh);
+		if (!buffer_mapped(bh)) {
+			err = get_block(inode, block, bh, 1);
+			if (err)
+				goto out;
+			if (buffer_new(bh)) {
+				clear_buffer_new(bh);
+				unmap_underlying_metadata(bh->b_bdev,
+							bh->b_blocknr);
+				if (PageUptodate(page)) {
+					set_buffer_uptodate(bh);
+					continue;
+				}
+				if (block_end > to || block_start < from) {
+					void *kaddr;
+
+					kaddr = kmap_atomic(page, KM_USER0);
+					if (block_end > to)
+						memset(kaddr+to, 0,
+							block_end-to);
+					if (block_start < from)
+						memset(kaddr+block_start,
+							0, from-block_start);
+					flush_dcache_page(page);
+					kunmap_atomic(kaddr, KM_USER0);
+				}
+				continue;
+			}
+		}
+		if (PageUptodate(page)) {
+			if (!buffer_uptodate(bh))
+				set_buffer_uptodate(bh);
+			continue; 
+		}
+		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+		     (block_start < from || block_end > to)) {
+			ll_rw_block(READ, 1, &bh);
+			*wait_bh++=bh;
+		}
+	}
+	/*
+	 * If we issued read requests - let them complete.
+	 */
+	while(wait_bh > wait) {
+		wait_on_buffer(*--wait_bh);
+		if (!buffer_uptodate(*wait_bh))
+			return -EIO;
+	}
+	return 0;
+out:
+	/*
+	 * Zero out any newly allocated blocks to avoid exposing stale
+	 * data.  If BH_New is set, we know that the block was newly
+	 * allocated in the above loop.
+	 */
+	bh = head;
+	block_start = 0;
+	do {
+		block_end = block_start+blocksize;
+		if (block_end <= from)
+			goto next_bh;
+		if (block_start >= to)
+			break;
+		if (buffer_new(bh)) {
+			void *kaddr;
+
+			clear_buffer_new(bh);
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr+block_start, 0, bh->b_size);
+			kunmap_atomic(kaddr, KM_USER0);
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+		}
+next_bh:
+		block_start = block_end;
+		bh = bh->b_this_page;
+	} while (bh != head);
+	return err;
+}
+
+static int __block_commit_write(struct inode *inode, struct page *page,
+		unsigned from, unsigned to)
+{
+	unsigned block_start, block_end;
+	int partial = 0;
+	unsigned blocksize;
+	struct buffer_head *bh, *head;
+
+	blocksize = 1 << inode->i_blkbits;
+
+	for(bh = head = page_buffers(page), block_start = 0;
+	    bh != head || !block_start;
+	    block_start=block_end, bh = bh->b_this_page) {
+		block_end = block_start + blocksize;
+		if (block_end <= from || block_start >= to) {
+			if (!buffer_uptodate(bh))
+				partial = 1;
+		} else {
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+		}
+	}
+
+	/*
+	 * If this is a partial write which happened to make all buffers
+	 * uptodate then we can optimize away a bogus readpage() for
+	 * the next read(). Here we 'discover' whether the page went
+	 * uptodate as a result of this (potentially partial) write.
+	 */
+	if (!partial)
+		SetPageUptodate(page);
+	return 0;
+}
+
+/*
+ * Generic "read page" function for block devices that have the normal
+ * get_block functionality. This is most of the block device filesystems.
+ * Reads the page asynchronously --- the unlock_buffer() and
+ * set/clear_buffer_uptodate() functions propagate buffer state into the
+ * page struct once IO has completed.
+ */
+int block_read_full_page(struct page *page, get_block_t *get_block)
+{
+	struct inode *inode = page->mapping->host;
+	sector_t iblock, lblock;
+	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+	unsigned int blocksize;
+	int nr, i;
+	int fully_mapped = 1;
+
+	if (!PageLocked(page))
+		PAGE_BUG(page);
+	blocksize = 1 << inode->i_blkbits;
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+	head = page_buffers(page);
+
+	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
+	bh = head;
+	nr = 0;
+	i = 0;
+
+	do {
+		if (buffer_uptodate(bh))
+			continue;
+
+		if (!buffer_mapped(bh)) {
+			fully_mapped = 0;
+			if (iblock < lblock) {
+				if (get_block(inode, iblock, bh, 0))
+					SetPageError(page);
+			}
+			if (!buffer_mapped(bh)) {
+				void *kaddr = kmap_atomic(page, KM_USER0);
+				memset(kaddr + i * blocksize, 0, blocksize);
+				flush_dcache_page(page);
+				kunmap_atomic(kaddr, KM_USER0);
+				set_buffer_uptodate(bh);
+				continue;
+			}
+			/*
+			 * get_block() might have updated the buffer
+			 * synchronously
+			 */
+			if (buffer_uptodate(bh))
+				continue;
+		}
+		arr[nr++] = bh;
+	} while (i++, iblock++, (bh = bh->b_this_page) != head);
+
+	if (fully_mapped)
+		SetPageMappedToDisk(page);
+
+	if (!nr) {
+		/*
+		 * All buffers are uptodate - we can set the page uptodate
+		 * as well. But not if get_block() returned an error.
+		 */
+		if (!PageError(page))
+			SetPageUptodate(page);
+		unlock_page(page);
+		return 0;
+	}
+
+	/* Stage two: lock the buffers */
+	for (i = 0; i < nr; i++) {
+		bh = arr[i];
+		lock_buffer(bh);
+		mark_buffer_async_read(bh);
+	}
+
+	/*
+	 * Stage 3: start the IO.  Check for uptodateness
+	 * inside the buffer lock in case another process reading
+	 * the underlying blockdev brought it uptodate (the sct fix).
+	 */
+	for (i = 0; i < nr; i++) {
+		bh = arr[i];
+		if (buffer_uptodate(bh))
+			end_buffer_async_read(bh, 1);
+		else
+			submit_bh(READ, bh);
+	}
+	return 0;
+}
+
+/* utility function for filesystems that need to do work on expanding
+ * truncates.  Uses prepare/commit_write to allow the filesystem to
+ * deal with the hole.  
+ */
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page;
+	unsigned long index, offset, limit;
+	int err;
+
+	err = -EFBIG;
+        limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+		send_sig(SIGXFSZ, current, 0);
+		goto out;
+	}
+	if (size > inode->i_sb->s_maxbytes)
+		goto out;
+
+	offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+
+	/* ugh.  in prepare/commit_write, if from==to==start of block, we 
+	** skip the prepare.  make sure we never send an offset for the start
+	** of a block
+	*/
+	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+		offset++;
+	}
+	index = size >> PAGE_CACHE_SHIFT;
+	err = -ENOMEM;
+	page = grab_cache_page(mapping, index);
+	if (!page)
+		goto out;
+	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
+	if (!err) {
+		err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+	}
+	unlock_page(page);
+	page_cache_release(page);
+	if (err > 0)
+		err = 0;
+out:
+	return err;
+}
+
+/*
+ * For moronic filesystems that do not allow holes in file.
+ * We may have to extend the file.
+ */
+
+int cont_prepare_write(struct page *page, unsigned offset,
+		unsigned to, get_block_t *get_block, loff_t *bytes)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = mapping->host;
+	struct page *new_page;
+	pgoff_t pgpos;
+	long status;
+	unsigned zerofrom;
+	unsigned blocksize = 1 << inode->i_blkbits;
+	void *kaddr;
+
+	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
+		status = -ENOMEM;
+		new_page = grab_cache_page(mapping, pgpos);
+		if (!new_page)
+			goto out;
+		/* we might sleep */
+		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
+			unlock_page(new_page);
+			page_cache_release(new_page);
+			continue;
+		}
+		zerofrom = *bytes & ~PAGE_CACHE_MASK;
+		if (zerofrom & (blocksize-1)) {
+			*bytes |= (blocksize-1);
+			(*bytes)++;
+		}
+		status = __block_prepare_write(inode, new_page, zerofrom,
+						PAGE_CACHE_SIZE, get_block);
+		if (status)
+			goto out_unmap;
+		kaddr = kmap_atomic(new_page, KM_USER0);
+		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
+		flush_dcache_page(new_page);
+		kunmap_atomic(kaddr, KM_USER0);
+		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
+		unlock_page(new_page);
+		page_cache_release(new_page);
+	}
+
+	if (page->index < pgpos) {
+		/* completely inside the area */
+		zerofrom = offset;
+	} else {
+		/* page covers the boundary, find the boundary offset */
+		zerofrom = *bytes & ~PAGE_CACHE_MASK;
+
+		/* if we will expand the thing last block will be filled */
+		if (to > zerofrom && (zerofrom & (blocksize-1))) {
+			*bytes |= (blocksize-1);
+			(*bytes)++;
+		}
+
+		/* starting below the boundary? Nothing to zero out */
+		if (offset <= zerofrom)
+			zerofrom = offset;
+	}
+	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
+	if (status)
+		goto out1;
+	if (zerofrom < offset) {
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr+zerofrom, 0, offset-zerofrom);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		__block_commit_write(inode, page, zerofrom, offset);
+	}
+	return 0;
+out1:
+	ClearPageUptodate(page);
+	return status;
+
+out_unmap:
+	ClearPageUptodate(new_page);
+	unlock_page(new_page);
+	page_cache_release(new_page);
+out:
+	return status;
+}
+
+int block_prepare_write(struct page *page, unsigned from, unsigned to,
+			get_block_t *get_block)
+{
+	struct inode *inode = page->mapping->host;
+	int err = __block_prepare_write(inode, page, from, to, get_block);
+	if (err)
+		ClearPageUptodate(page);
+	return err;
+}
+
+int block_commit_write(struct page *page, unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	__block_commit_write(inode,page,from,to);
+	return 0;
+}
+
+int generic_commit_write(struct file *file, struct page *page,
+		unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+	__block_commit_write(inode,page,from,to);
+	/*
+	 * No need to use i_size_read() here, the i_size
+	 * cannot change under us because we hold i_sem.
+	 */
+	if (pos > inode->i_size) {
+		i_size_write(inode, pos);
+		mark_inode_dirty(inode);
+	}
+	return 0;
+}
+
+
+/*
+ * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
+ * immediately, while under the page lock.  So it needs a special end_io
+ * handler which does not touch the bh after unlocking it.
+ *
+ * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
+ * a race there is benign: unlock_buffer() only use the bh's address for
+ * hashing after unlocking the buffer, so it doesn't actually touch the bh
+ * itself.
+ */
+static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
+{
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		/* This happens, due to failed READA attempts. */
+		clear_buffer_uptodate(bh);
+	}
+	unlock_buffer(bh);
+}
+
+/*
+ * On entry, the page is fully not uptodate.
+ * On exit the page is fully uptodate in the areas outside (from,to)
+ */
+int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
+			get_block_t *get_block)
+{
+	struct inode *inode = page->mapping->host;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	struct buffer_head map_bh;
+	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
+	unsigned block_in_page;
+	unsigned block_start;
+	sector_t block_in_file;
+	char *kaddr;
+	int nr_reads = 0;
+	int i;
+	int ret = 0;
+	int is_mapped_to_disk = 1;
+	int dirtied_it = 0;
+
+	if (PageMappedToDisk(page))
+		return 0;
+
+	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+	map_bh.b_page = page;
+
+	/*
+	 * We loop across all blocks in the page, whether or not they are
+	 * part of the affected region.  This is so we can discover if the
+	 * page is fully mapped-to-disk.
+	 */
+	for (block_start = 0, block_in_page = 0;
+		  block_start < PAGE_CACHE_SIZE;
+		  block_in_page++, block_start += blocksize) {
+		unsigned block_end = block_start + blocksize;
+		int create;
+
+		map_bh.b_state = 0;
+		create = 1;
+		if (block_start >= to)
+			create = 0;
+		ret = get_block(inode, block_in_file + block_in_page,
+					&map_bh, create);
+		if (ret)
+			goto failed;
+		if (!buffer_mapped(&map_bh))
+			is_mapped_to_disk = 0;
+		if (buffer_new(&map_bh))
+			unmap_underlying_metadata(map_bh.b_bdev,
+							map_bh.b_blocknr);
+		if (PageUptodate(page))
+			continue;
+		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
+			kaddr = kmap_atomic(page, KM_USER0);
+			if (block_start < from) {
+				memset(kaddr+block_start, 0, from-block_start);
+				dirtied_it = 1;
+			}
+			if (block_end > to) {
+				memset(kaddr + to, 0, block_end - to);
+				dirtied_it = 1;
+			}
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+			continue;
+		}
+		if (buffer_uptodate(&map_bh))
+			continue;	/* reiserfs does this */
+		if (block_start < from || block_end > to) {
+			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
+
+			if (!bh) {
+				ret = -ENOMEM;
+				goto failed;
+			}
+			bh->b_state = map_bh.b_state;
+			atomic_set(&bh->b_count, 0);
+			bh->b_this_page = NULL;
+			bh->b_page = page;
+			bh->b_blocknr = map_bh.b_blocknr;
+			bh->b_size = blocksize;
+			bh->b_data = (char *)(long)block_start;
+			bh->b_bdev = map_bh.b_bdev;
+			bh->b_private = NULL;
+			read_bh[nr_reads++] = bh;
+		}
+	}
+
+	if (nr_reads) {
+		struct buffer_head *bh;
+
+		/*
+		 * The page is locked, so these buffers are protected from
+		 * any VM or truncate activity.  Hence we don't need to care
+		 * for the buffer_head refcounts.
+		 */
+		for (i = 0; i < nr_reads; i++) {
+			bh = read_bh[i];
+			lock_buffer(bh);
+			bh->b_end_io = end_buffer_read_nobh;
+			submit_bh(READ, bh);
+		}
+		for (i = 0; i < nr_reads; i++) {
+			bh = read_bh[i];
+			wait_on_buffer(bh);
+			if (!buffer_uptodate(bh))
+				ret = -EIO;
+			free_buffer_head(bh);
+			read_bh[i] = NULL;
+		}
+		if (ret)
+			goto failed;
+	}
+
+	if (is_mapped_to_disk)
+		SetPageMappedToDisk(page);
+	SetPageUptodate(page);
+
+	/*
+	 * Setting the page dirty here isn't necessary for the prepare_write
+	 * function - commit_write will do that.  But if/when this function is
+	 * used within the pagefault handler to ensure that all mmapped pages
+	 * have backing space in the filesystem, we will need to dirty the page
+	 * if its contents were altered.
+	 */
+	if (dirtied_it)
+		set_page_dirty(page);
+
+	return 0;
+
+failed:
+	for (i = 0; i < nr_reads; i++) {
+		if (read_bh[i])
+			free_buffer_head(read_bh[i]);
+	}
+
+	/*
+	 * Error recovery is pretty slack.  Clear the page and mark it dirty
+	 * so we'll later zero out any blocks which _were_ allocated.
+	 */
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr, 0, PAGE_CACHE_SIZE);
+	kunmap_atomic(kaddr, KM_USER0);
+	SetPageUptodate(page);
+	set_page_dirty(page);
+	return ret;
+}
+EXPORT_SYMBOL(nobh_prepare_write);
+
+int nobh_commit_write(struct file *file, struct page *page,
+		unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+
+	set_page_dirty(page);
+	if (pos > inode->i_size) {
+		i_size_write(inode, pos);
+		mark_inode_dirty(inode);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(nobh_commit_write);
+
+/*
+ * nobh_writepage() - based on block_full_write_page() except
+ * that it tries to operate without attaching bufferheads to
+ * the page.
+ */
+int nobh_writepage(struct page *page, get_block_t *get_block,
+			struct writeback_control *wbc)
+{
+	struct inode * const inode = page->mapping->host;
+	loff_t i_size = i_size_read(inode);
+	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	unsigned offset;
+	void *kaddr;
+	int ret;
+
+	/* Is the page fully inside i_size? */
+	if (page->index < end_index)
+		goto out;
+
+	/* Is the page fully outside i_size? (truncate in progress) */
+	offset = i_size & (PAGE_CACHE_SIZE-1);
+	if (page->index >= end_index+1 || !offset) {
+		/*
+		 * The page may have dirty, unmapped buffers.  For example,
+		 * they may have been added in ext3_writepage().  Make them
+		 * freeable here, so the page does not leak.
+		 */
+#if 0
+		/* Not really sure about this  - do we need this ? */
+		if (page->mapping->a_ops->invalidatepage)
+			page->mapping->a_ops->invalidatepage(page, offset);
+#endif
+		unlock_page(page);
+		return 0; /* don't care */
+	}
+
+	/*
+	 * The page straddles i_size.  It must be zeroed out on each and every
+	 * writepage invocation because it may be mmapped.  "A file is mapped
+	 * in multiples of the page size.  For a file that is not a multiple of
+	 * the  page size, the remaining memory is zeroed when mapped, and
+	 * writes to that region are not written out to the file."
+	 */
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+out:
+	ret = mpage_writepage(page, get_block, wbc);
+	if (ret == -EAGAIN)
+		ret = __block_write_full_page(inode, page, get_block, wbc);
+	return ret;
+}
+EXPORT_SYMBOL(nobh_writepage);
+
+/*
+ * This function assumes that ->prepare_write() uses nobh_prepare_write().
+ */
+int nobh_truncate_page(struct address_space *mapping, loff_t from)
+{
+	struct inode *inode = mapping->host;
+	unsigned blocksize = 1 << inode->i_blkbits;
+	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned to;
+	struct page *page;
+	struct address_space_operations *a_ops = mapping->a_ops;
+	char *kaddr;
+	int ret = 0;
+
+	if ((offset & (blocksize - 1)) == 0)
+		goto out;
+
+	ret = -ENOMEM;
+	page = grab_cache_page(mapping, index);
+	if (!page)
+		goto out;
+
+	to = (offset + blocksize) & ~(blocksize - 1);
+	ret = a_ops->prepare_write(NULL, page, offset, to);
+	if (ret == 0) {
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		set_page_dirty(page);
+	}
+	unlock_page(page);
+	page_cache_release(page);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(nobh_truncate_page);
+
+int block_truncate_page(struct address_space *mapping,
+			loff_t from, get_block_t *get_block)
+{
+	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned blocksize;
+	pgoff_t iblock;
+	unsigned length, pos;
+	struct inode *inode = mapping->host;
+	struct page *page;
+	struct buffer_head *bh;
+	void *kaddr;
+	int err;
+
+	blocksize = 1 << inode->i_blkbits;
+	length = offset & (blocksize - 1);
+
+	/* Block boundary? Nothing to do */
+	if (!length)
+		return 0;
+
+	length = blocksize - length;
+	iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	
+	page = grab_cache_page(mapping, index);
+	err = -ENOMEM;
+	if (!page)
+		goto out;
+
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+
+	/* Find the buffer that contains "offset" */
+	bh = page_buffers(page);
+	pos = blocksize;
+	while (offset >= pos) {
+		bh = bh->b_this_page;
+		iblock++;
+		pos += blocksize;
+	}
+
+	err = 0;
+	if (!buffer_mapped(bh)) {
+		err = get_block(inode, iblock, bh, 0);
+		if (err)
+			goto unlock;
+		/* unmapped? It's a hole - nothing to do */
+		if (!buffer_mapped(bh))
+			goto unlock;
+	}
+
+	/* Ok, it's mapped. Make sure it's up-to-date */
+	if (PageUptodate(page))
+		set_buffer_uptodate(bh);
+
+	if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+		err = -EIO;
+		ll_rw_block(READ, 1, &bh);
+		wait_on_buffer(bh);
+		/* Uhhuh. Read error. Complain and punt. */
+		if (!buffer_uptodate(bh))
+			goto unlock;
+	}
+
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + offset, 0, length);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+
+	mark_buffer_dirty(bh);
+	err = 0;
+
+unlock:
+	unlock_page(page);
+	page_cache_release(page);
+out:
+	return err;
+}
+
+/*
+ * The generic ->writepage function for buffer-backed address_spaces
+ */
+int block_write_full_page(struct page *page, get_block_t *get_block,
+			struct writeback_control *wbc)
+{
+	struct inode * const inode = page->mapping->host;
+	loff_t i_size = i_size_read(inode);
+	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	unsigned offset;
+	void *kaddr;
+
+	/* Is the page fully inside i_size? */
+	if (page->index < end_index)
+		return __block_write_full_page(inode, page, get_block, wbc);
+
+	/* Is the page fully outside i_size? (truncate in progress) */
+	offset = i_size & (PAGE_CACHE_SIZE-1);
+	if (page->index >= end_index+1 || !offset) {
+		/*
+		 * The page may have dirty, unmapped buffers.  For example,
+		 * they may have been added in ext3_writepage().  Make them
+		 * freeable here, so the page does not leak.
+		 */
+		block_invalidatepage(page, 0);
+		unlock_page(page);
+		return 0; /* don't care */
+	}
+
+	/*
+	 * The page straddles i_size.  It must be zeroed out on each and every
+	 * writepage invokation because it may be mmapped.  "A file is mapped
+	 * in multiples of the page size.  For a file that is not a multiple of
+	 * the  page size, the remaining memory is zeroed when mapped, and
+	 * writes to that region are not written out to the file."
+	 */
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+	return __block_write_full_page(inode, page, get_block, wbc);
+}
+
+sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
+			    get_block_t *get_block)
+{
+	struct buffer_head tmp;
+	struct inode *inode = mapping->host;
+	tmp.b_state = 0;
+	tmp.b_blocknr = 0;
+	get_block(inode, block, &tmp, 0);
+	return tmp.b_blocknr;
+}
+
+static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
+{
+	struct buffer_head *bh = bio->bi_private;
+
+	if (bio->bi_size)
+		return 1;
+
+	if (err == -EOPNOTSUPP) {
+		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+		set_bit(BH_Eopnotsupp, &bh->b_state);
+	}
+
+	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
+	bio_put(bio);
+	return 0;
+}
+
+int submit_bh(int rw, struct buffer_head * bh)
+{
+	struct bio *bio;
+	int ret = 0;
+
+	BUG_ON(!buffer_locked(bh));
+	BUG_ON(!buffer_mapped(bh));
+	BUG_ON(!bh->b_end_io);
+
+	if (buffer_ordered(bh) && (rw == WRITE))
+		rw = WRITE_BARRIER;
+
+	/*
+	 * Only clear out a write error when rewriting, should this
+	 * include WRITE_SYNC as well?
+	 */
+	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
+		clear_buffer_write_io_error(bh);
+
+	/*
+	 * from here on down, it's all bio -- do the initial mapping,
+	 * submit_bio -> generic_make_request may further map this bio around
+	 */
+	bio = bio_alloc(GFP_NOIO, 1);
+
+	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+	bio->bi_bdev = bh->b_bdev;
+	bio->bi_io_vec[0].bv_page = bh->b_page;
+	bio->bi_io_vec[0].bv_len = bh->b_size;
+	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
+
+	bio->bi_vcnt = 1;
+	bio->bi_idx = 0;
+	bio->bi_size = bh->b_size;
+
+	bio->bi_end_io = end_bio_bh_io_sync;
+	bio->bi_private = bh;
+
+	bio_get(bio);
+	submit_bio(rw, bio);
+
+	if (bio_flagged(bio, BIO_EOPNOTSUPP))
+		ret = -EOPNOTSUPP;
+
+	bio_put(bio);
+	return ret;
+}
+
+/**
+ * ll_rw_block: low-level access to block devices (DEPRECATED)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @nr: number of &struct buffer_heads in the array
+ * @bhs: array of pointers to &struct buffer_head
+ *
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads,
+ * and requests an I/O operation on them, either a %READ or a %WRITE.
+ * The third %READA option is described in the documentation for
+ * generic_make_request() which ll_rw_block() calls.
+ *
+ * This function drops any buffer that it cannot get a lock on (with the
+ * BH_Lock state bit), any buffer that appears to be clean when doing a
+ * write request, and any buffer that appears to be up-to-date when doing
+ * read request.  Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean until
+ * the buffer gets unlocked).
+ *
+ * ll_rw_block sets b_end_io to simple completion handler that marks
+ * the buffer up-to-date (if approriate), unlocks the buffer and wakes
+ * any waiters. 
+ *
+ * All of the buffers must be for the same device, and must also be a
+ * multiple of the current approved size for the device.
+ */
+void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		struct buffer_head *bh = bhs[i];
+
+		if (test_set_buffer_locked(bh))
+			continue;
+
+		get_bh(bh);
+		if (rw == WRITE) {
+			bh->b_end_io = end_buffer_write_sync;
+			if (test_clear_buffer_dirty(bh)) {
+				submit_bh(WRITE, bh);
+				continue;
+			}
+		} else {
+			bh->b_end_io = end_buffer_read_sync;
+			if (!buffer_uptodate(bh)) {
+				submit_bh(rw, bh);
+				continue;
+			}
+		}
+		unlock_buffer(bh);
+		put_bh(bh);
+	}
+}
+
+/*
+ * For a data-integrity writeout, we need to wait upon any in-progress I/O
+ * and then start new I/O and then wait upon it.  The caller must have a ref on
+ * the buffer_head.
+ */
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+	int ret = 0;
+
+	WARN_ON(atomic_read(&bh->b_count) < 1);
+	lock_buffer(bh);
+	if (test_clear_buffer_dirty(bh)) {
+		get_bh(bh);
+		bh->b_end_io = end_buffer_write_sync;
+		ret = submit_bh(WRITE, bh);
+		wait_on_buffer(bh);
+		if (buffer_eopnotsupp(bh)) {
+			clear_buffer_eopnotsupp(bh);
+			ret = -EOPNOTSUPP;
+		}
+		if (!ret && !buffer_uptodate(bh))
+			ret = -EIO;
+	} else {
+		unlock_buffer(bh);
+	}
+	return ret;
+}
+
+/*
+ * try_to_free_buffers() checks if all the buffers on this particular page
+ * are unused, and releases them if so.
+ *
+ * Exclusion against try_to_free_buffers may be obtained by either
+ * locking the page or by holding its mapping's private_lock.
+ *
+ * If the page is dirty but all the buffers are clean then we need to
+ * be sure to mark the page clean as well.  This is because the page
+ * may be against a block device, and a later reattachment of buffers
+ * to a dirty page will set *all* buffers dirty.  Which would corrupt
+ * filesystem data on the same device.
+ *
+ * The same applies to regular filesystem pages: if all the buffers are
+ * clean then we set the page clean and proceed.  To do that, we require
+ * total exclusion from __set_page_dirty_buffers().  That is obtained with
+ * private_lock.
+ *
+ * try_to_free_buffers() is non-blocking.
+ */
+static inline int buffer_busy(struct buffer_head *bh)
+{
+	return atomic_read(&bh->b_count) |
+		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
+}
+
+static int
+drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
+{
+	struct buffer_head *head = page_buffers(page);
+	struct buffer_head *bh;
+
+	bh = head;
+	do {
+		if (buffer_write_io_error(bh))
+			set_bit(AS_EIO, &page->mapping->flags);
+		if (buffer_busy(bh))
+			goto failed;
+		bh = bh->b_this_page;
+	} while (bh != head);
+
+	do {
+		struct buffer_head *next = bh->b_this_page;
+
+		if (!list_empty(&bh->b_assoc_buffers))
+			__remove_assoc_queue(bh);
+		bh = next;
+	} while (bh != head);
+	*buffers_to_free = head;
+	__clear_page_buffers(page);
+	return 1;
+failed:
+	return 0;
+}
+
+int try_to_free_buffers(struct page *page)
+{
+	struct address_space * const mapping = page->mapping;
+	struct buffer_head *buffers_to_free = NULL;
+	int ret = 0;
+
+	BUG_ON(!PageLocked(page));
+	if (PageWriteback(page))
+		return 0;
+
+	if (mapping == NULL) {		/* can this still happen? */
+		ret = drop_buffers(page, &buffers_to_free);
+		goto out;
+	}
+
+	spin_lock(&mapping->private_lock);
+	ret = drop_buffers(page, &buffers_to_free);
+	if (ret) {
+		/*
+		 * If the filesystem writes its buffers by hand (eg ext3)
+		 * then we can have clean buffers against a dirty page.  We
+		 * clean the page here; otherwise later reattachment of buffers
+		 * could encounter a non-uptodate page, which is unresolvable.
+		 * This only applies in the rare case where try_to_free_buffers
+		 * succeeds but the page is not freed.
+		 */
+		clear_page_dirty(page);
+	}
+	spin_unlock(&mapping->private_lock);
+out:
+	if (buffers_to_free) {
+		struct buffer_head *bh = buffers_to_free;
+
+		do {
+			struct buffer_head *next = bh->b_this_page;
+			free_buffer_head(bh);
+			bh = next;
+		} while (bh != buffers_to_free);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(try_to_free_buffers);
+
+int block_sync_page(struct page *page)
+{
+	struct address_space *mapping;
+
+	smp_mb();
+	mapping = page_mapping(page);
+	if (mapping)
+		blk_run_backing_dev(mapping->backing_dev_info, page);
+	return 0;
+}
+
+/*
+ * There are no bdflush tunables left.  But distributions are
+ * still running obsolete flush daemons, so we terminate them here.
+ *
+ * Use of bdflush() is deprecated and will be removed in a future kernel.
+ * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ */
+asmlinkage long sys_bdflush(int func, long data)
+{
+	static int msg_count;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (msg_count < 5) {
+		msg_count++;
+		printk(KERN_INFO
+			"warning: process `%s' used the obsolete bdflush"
+			" system call\n", current->comm);
+		printk(KERN_INFO "Fix your initscripts?\n");
+	}
+
+	if (func == 1)
+		do_exit(0);
+	return 0;
+}
+
+/*
+ * Buffer-head allocation
+ */
+static kmem_cache_t *bh_cachep;
+
+/*
+ * Once the number of bh's in the machine exceeds this level, we start
+ * stripping them in writeback.
+ */
+static int max_buffer_heads;
+
+int buffer_heads_over_limit;
+
+struct bh_accounting {
+	int nr;			/* Number of live bh's */
+	int ratelimit;		/* Limit cacheline bouncing */
+};
+
+static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
+
+static void recalc_bh_state(void)
+{
+	int i;
+	int tot = 0;
+
+	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
+		return;
+	__get_cpu_var(bh_accounting).ratelimit = 0;
+	for_each_cpu(i)
+		tot += per_cpu(bh_accounting, i).nr;
+	buffer_heads_over_limit = (tot > max_buffer_heads);
+}
+	
+struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
+{
+	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+	if (ret) {
+		preempt_disable();
+		__get_cpu_var(bh_accounting).nr++;
+		recalc_bh_state();
+		preempt_enable();
+	}
+	return ret;
+}
+EXPORT_SYMBOL(alloc_buffer_head);
+
+void free_buffer_head(struct buffer_head *bh)
+{
+	BUG_ON(!list_empty(&bh->b_assoc_buffers));
+	kmem_cache_free(bh_cachep, bh);
+	preempt_disable();
+	__get_cpu_var(bh_accounting).nr--;
+	recalc_bh_state();
+	preempt_enable();
+}
+EXPORT_SYMBOL(free_buffer_head);
+
+static void
+init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+{
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+			    SLAB_CTOR_CONSTRUCTOR) {
+		struct buffer_head * bh = (struct buffer_head *)data;
+
+		memset(bh, 0, sizeof(*bh));
+		INIT_LIST_HEAD(&bh->b_assoc_buffers);
+	}
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void buffer_exit_cpu(int cpu)
+{
+	int i;
+	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		brelse(b->bhs[i]);
+		b->bhs[i] = NULL;
+	}
+}
+
+static int buffer_cpu_notify(struct notifier_block *self,
+			      unsigned long action, void *hcpu)
+{
+	if (action == CPU_DEAD)
+		buffer_exit_cpu((unsigned long)hcpu);
+	return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void __init buffer_init(void)
+{
+	int nrpages;
+
+	bh_cachep = kmem_cache_create("buffer_head",
+			sizeof(struct buffer_head), 0,
+			SLAB_PANIC, init_buffer_head, NULL);
+
+	/*
+	 * Limit the bh occupancy to 10% of ZONE_NORMAL
+	 */
+	nrpages = (nr_free_buffer_pages() * 10) / 100;
+	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
+	hotcpu_notifier(buffer_cpu_notify, 0);
+}
+
+EXPORT_SYMBOL(__bforget);
+EXPORT_SYMBOL(__brelse);
+EXPORT_SYMBOL(__wait_on_buffer);
+EXPORT_SYMBOL(block_commit_write);
+EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(block_read_full_page);
+EXPORT_SYMBOL(block_sync_page);
+EXPORT_SYMBOL(block_truncate_page);
+EXPORT_SYMBOL(block_write_full_page);
+EXPORT_SYMBOL(cont_prepare_write);
+EXPORT_SYMBOL(end_buffer_async_write);
+EXPORT_SYMBOL(end_buffer_read_sync);
+EXPORT_SYMBOL(end_buffer_write_sync);
+EXPORT_SYMBOL(file_fsync);
+EXPORT_SYMBOL(fsync_bdev);
+EXPORT_SYMBOL(generic_block_bmap);
+EXPORT_SYMBOL(generic_commit_write);
+EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(init_buffer);
+EXPORT_SYMBOL(invalidate_bdev);
+EXPORT_SYMBOL(ll_rw_block);
+EXPORT_SYMBOL(mark_buffer_dirty);
+EXPORT_SYMBOL(submit_bh);
+EXPORT_SYMBOL(sync_dirty_buffer);
+EXPORT_SYMBOL(unlock_buffer);
diff --git a/fs/char_dev.c b/fs/char_dev.c
new file mode 100644
index 0000000..7357a91
--- /dev/null
+++ b/fs/char_dev.c
@@ -0,0 +1,449 @@
+/*
+ *  linux/fs/char_dev.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <linux/kobject.h>
+#include <linux/kobj_map.h>
+#include <linux/cdev.h>
+
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+static struct kobj_map *cdev_map;
+
+/* degrade to linked list for small systems */
+#define MAX_PROBE_HASH (CONFIG_BASE_SMALL ? 1 : 255)
+
+static DECLARE_MUTEX(chrdevs_lock);
+
+static struct char_device_struct {
+	struct char_device_struct *next;
+	unsigned int major;
+	unsigned int baseminor;
+	int minorct;
+	const char *name;
+	struct file_operations *fops;
+	struct cdev *cdev;		/* will die */
+} *chrdevs[MAX_PROBE_HASH];
+
+/* index in the above */
+static inline int major_to_index(int major)
+{
+	return major % MAX_PROBE_HASH;
+}
+
+/* get char device names in somewhat random order */
+int get_chrdev_list(char *page)
+{
+	struct char_device_struct *cd;
+	int i, len;
+
+	len = sprintf(page, "Character devices:\n");
+
+	down(&chrdevs_lock);
+	for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) {
+		for (cd = chrdevs[i]; cd; cd = cd->next)
+			len += sprintf(page+len, "%3d %s\n",
+				       cd->major, cd->name);
+	}
+	up(&chrdevs_lock);
+
+	return len;
+}
+
+/*
+ * Register a single major with a specified minor range.
+ *
+ * If major == 0 this functions will dynamically allocate a major and return
+ * its number.
+ *
+ * If major > 0 this function will attempt to reserve the passed range of
+ * minors and will return zero on success.
+ *
+ * Returns a -ve errno on failure.
+ */
+static struct char_device_struct *
+__register_chrdev_region(unsigned int major, unsigned int baseminor,
+			   int minorct, const char *name)
+{
+	struct char_device_struct *cd, **cp;
+	int ret = 0;
+	int i;
+
+	cd = kmalloc(sizeof(struct char_device_struct), GFP_KERNEL);
+	if (cd == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	memset(cd, 0, sizeof(struct char_device_struct));
+
+	down(&chrdevs_lock);
+
+	/* temporary */
+	if (major == 0) {
+		for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
+			if (chrdevs[i] == NULL)
+				break;
+		}
+
+		if (i == 0) {
+			ret = -EBUSY;
+			goto out;
+		}
+		major = i;
+		ret = major;
+	}
+
+	cd->major = major;
+	cd->baseminor = baseminor;
+	cd->minorct = minorct;
+	cd->name = name;
+
+	i = major_to_index(major);
+
+	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+		if ((*cp)->major > major ||
+		    ((*cp)->major == major && (*cp)->baseminor >= baseminor))
+			break;
+	if (*cp && (*cp)->major == major &&
+	    (*cp)->baseminor < baseminor + minorct) {
+		ret = -EBUSY;
+		goto out;
+	}
+	cd->next = *cp;
+	*cp = cd;
+	up(&chrdevs_lock);
+	return cd;
+out:
+	up(&chrdevs_lock);
+	kfree(cd);
+	return ERR_PTR(ret);
+}
+
+static struct char_device_struct *
+__unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
+{
+	struct char_device_struct *cd = NULL, **cp;
+	int i = major_to_index(major);
+
+	up(&chrdevs_lock);
+	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+		if ((*cp)->major == major &&
+		    (*cp)->baseminor == baseminor &&
+		    (*cp)->minorct == minorct)
+			break;
+	if (*cp) {
+		cd = *cp;
+		*cp = cd->next;
+	}
+	up(&chrdevs_lock);
+	return cd;
+}
+
+int register_chrdev_region(dev_t from, unsigned count, const char *name)
+{
+	struct char_device_struct *cd;
+	dev_t to = from + count;
+	dev_t n, next;
+
+	for (n = from; n < to; n = next) {
+		next = MKDEV(MAJOR(n)+1, 0);
+		if (next > to)
+			next = to;
+		cd = __register_chrdev_region(MAJOR(n), MINOR(n),
+			       next - n, name);
+		if (IS_ERR(cd))
+			goto fail;
+	}
+	return 0;
+fail:
+	to = n;
+	for (n = from; n < to; n = next) {
+		next = MKDEV(MAJOR(n)+1, 0);
+		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
+	}
+	return PTR_ERR(cd);
+}
+
+int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
+			const char *name)
+{
+	struct char_device_struct *cd;
+	cd = __register_chrdev_region(0, baseminor, count, name);
+	if (IS_ERR(cd))
+		return PTR_ERR(cd);
+	*dev = MKDEV(cd->major, cd->baseminor);
+	return 0;
+}
+
+int register_chrdev(unsigned int major, const char *name,
+		    struct file_operations *fops)
+{
+	struct char_device_struct *cd;
+	struct cdev *cdev;
+	char *s;
+	int err = -ENOMEM;
+
+	cd = __register_chrdev_region(major, 0, 256, name);
+	if (IS_ERR(cd))
+		return PTR_ERR(cd);
+	
+	cdev = cdev_alloc();
+	if (!cdev)
+		goto out2;
+
+	cdev->owner = fops->owner;
+	cdev->ops = fops;
+	kobject_set_name(&cdev->kobj, "%s", name);
+	for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
+		*s = '!';
+		
+	err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
+	if (err)
+		goto out;
+
+	cd->cdev = cdev;
+
+	return major ? 0 : cd->major;
+out:
+	kobject_put(&cdev->kobj);
+out2:
+	kfree(__unregister_chrdev_region(cd->major, 0, 256));
+	return err;
+}
+
+void unregister_chrdev_region(dev_t from, unsigned count)
+{
+	dev_t to = from + count;
+	dev_t n, next;
+
+	for (n = from; n < to; n = next) {
+		next = MKDEV(MAJOR(n)+1, 0);
+		if (next > to)
+			next = to;
+		kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
+	}
+}
+
+int unregister_chrdev(unsigned int major, const char *name)
+{
+	struct char_device_struct *cd;
+	cd = __unregister_chrdev_region(major, 0, 256);
+	if (cd && cd->cdev)
+		cdev_del(cd->cdev);
+	kfree(cd);
+	return 0;
+}
+
+static DEFINE_SPINLOCK(cdev_lock);
+
+static struct kobject *cdev_get(struct cdev *p)
+{
+	struct module *owner = p->owner;
+	struct kobject *kobj;
+
+	if (owner && !try_module_get(owner))
+		return NULL;
+	kobj = kobject_get(&p->kobj);
+	if (!kobj)
+		module_put(owner);
+	return kobj;
+}
+
+void cdev_put(struct cdev *p)
+{
+	if (p) {
+		kobject_put(&p->kobj);
+		module_put(p->owner);
+	}
+}
+
+/*
+ * Called every time a character special file is opened
+ */
+int chrdev_open(struct inode * inode, struct file * filp)
+{
+	struct cdev *p;
+	struct cdev *new = NULL;
+	int ret = 0;
+
+	spin_lock(&cdev_lock);
+	p = inode->i_cdev;
+	if (!p) {
+		struct kobject *kobj;
+		int idx;
+		spin_unlock(&cdev_lock);
+		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
+		if (!kobj)
+			return -ENXIO;
+		new = container_of(kobj, struct cdev, kobj);
+		spin_lock(&cdev_lock);
+		p = inode->i_cdev;
+		if (!p) {
+			inode->i_cdev = p = new;
+			inode->i_cindex = idx;
+			list_add(&inode->i_devices, &p->list);
+			new = NULL;
+		} else if (!cdev_get(p))
+			ret = -ENXIO;
+	} else if (!cdev_get(p))
+		ret = -ENXIO;
+	spin_unlock(&cdev_lock);
+	cdev_put(new);
+	if (ret)
+		return ret;
+	filp->f_op = fops_get(p->ops);
+	if (!filp->f_op) {
+		cdev_put(p);
+		return -ENXIO;
+	}
+	if (filp->f_op->open) {
+		lock_kernel();
+		ret = filp->f_op->open(inode,filp);
+		unlock_kernel();
+	}
+	if (ret)
+		cdev_put(p);
+	return ret;
+}
+
+void cd_forget(struct inode *inode)
+{
+	spin_lock(&cdev_lock);
+	list_del_init(&inode->i_devices);
+	inode->i_cdev = NULL;
+	spin_unlock(&cdev_lock);
+}
+
+void cdev_purge(struct cdev *cdev)
+{
+	spin_lock(&cdev_lock);
+	while (!list_empty(&cdev->list)) {
+		struct inode *inode;
+		inode = container_of(cdev->list.next, struct inode, i_devices);
+		list_del_init(&inode->i_devices);
+		inode->i_cdev = NULL;
+	}
+	spin_unlock(&cdev_lock);
+}
+
+/*
+ * Dummy default file-operations: the only thing this does
+ * is contain the open that then fills in the correct operations
+ * depending on the special file...
+ */
+struct file_operations def_chr_fops = {
+	.open = chrdev_open,
+};
+
+static struct kobject *exact_match(dev_t dev, int *part, void *data)
+{
+	struct cdev *p = data;
+	return &p->kobj;
+}
+
+static int exact_lock(dev_t dev, void *data)
+{
+	struct cdev *p = data;
+	return cdev_get(p) ? 0 : -1;
+}
+
+int cdev_add(struct cdev *p, dev_t dev, unsigned count)
+{
+	p->dev = dev;
+	p->count = count;
+	return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
+}
+
+static void cdev_unmap(dev_t dev, unsigned count)
+{
+	kobj_unmap(cdev_map, dev, count);
+}
+
+void cdev_del(struct cdev *p)
+{
+	cdev_unmap(p->dev, p->count);
+	kobject_put(&p->kobj);
+}
+
+
+static void cdev_default_release(struct kobject *kobj)
+{
+	struct cdev *p = container_of(kobj, struct cdev, kobj);
+	cdev_purge(p);
+}
+
+static void cdev_dynamic_release(struct kobject *kobj)
+{
+	struct cdev *p = container_of(kobj, struct cdev, kobj);
+	cdev_purge(p);
+	kfree(p);
+}
+
+static struct kobj_type ktype_cdev_default = {
+	.release	= cdev_default_release,
+};
+
+static struct kobj_type ktype_cdev_dynamic = {
+	.release	= cdev_dynamic_release,
+};
+
+struct cdev *cdev_alloc(void)
+{
+	struct cdev *p = kmalloc(sizeof(struct cdev), GFP_KERNEL);
+	if (p) {
+		memset(p, 0, sizeof(struct cdev));
+		p->kobj.ktype = &ktype_cdev_dynamic;
+		INIT_LIST_HEAD(&p->list);
+		kobject_init(&p->kobj);
+	}
+	return p;
+}
+
+void cdev_init(struct cdev *cdev, struct file_operations *fops)
+{
+	memset(cdev, 0, sizeof *cdev);
+	INIT_LIST_HEAD(&cdev->list);
+	cdev->kobj.ktype = &ktype_cdev_default;
+	kobject_init(&cdev->kobj);
+	cdev->ops = fops;
+}
+
+static struct kobject *base_probe(dev_t dev, int *part, void *data)
+{
+	if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
+		/* Make old-style 2.4 aliases work */
+		request_module("char-major-%d", MAJOR(dev));
+	return NULL;
+}
+
+void __init chrdev_init(void)
+{
+	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
+}
+
+
+/* Let modules do char dev stuff */
+EXPORT_SYMBOL(register_chrdev_region);
+EXPORT_SYMBOL(unregister_chrdev_region);
+EXPORT_SYMBOL(alloc_chrdev_region);
+EXPORT_SYMBOL(cdev_init);
+EXPORT_SYMBOL(cdev_alloc);
+EXPORT_SYMBOL(cdev_del);
+EXPORT_SYMBOL(cdev_add);
+EXPORT_SYMBOL(register_chrdev);
+EXPORT_SYMBOL(unregister_chrdev);
diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
new file mode 100644
index 0000000..acce36e
--- /dev/null
+++ b/fs/cifs/AUTHORS
@@ -0,0 +1,42 @@
+Original Author
+===============
+Steve French (sfrench@samba.org)
+
+The author wishes to express his appreciation and thanks to:
+Andrew Tridgell (Samba team) for his early suggestions about smb/cifs VFS
+improvements. Thanks to IBM for allowing me the time and test resources to pursue
+this project. Jim McDonough from IBM (and the Samba Team) for his help.
+The IBM Linux JFS team for explaining many esoteric Linux filesystem features.
+Dave Boutcher of IBM Rochester (author of the OS/400 smb/cifs filesystem client)
+for proving years ago that a very good smb/cifs client could be done on a Unix like 
+operating system.  Volker Lendecke, Andrew Tridgell, Urban Widmark, John Newbigin 
+and others for their work on the Linux smbfs module over the years. Thanks to
+the other members of the Storage Network Industry Association CIFS Technical
+Workgroup for their work specifying this highly complex protocol and finally
+thanks to the Samba team for their technical advice and encouragement.
+
+Patch Contributors
+------------------
+Zwane Mwaikambo
+Andi Kleen
+Amrut Joshi
+Shobhit Dayal
+Sergey Vlasov
+Richard Hughes
+Yury Umanets
+Mark Hamzy
+Domen Puncer
+Jesper Juhl
+
+Test case and Bug Report contributors
+-------------------------------------
+Thanks to those in the community who have submitted detailed bug reports
+and debug of problems they have found:  Jochen Dolze, David Blaine,
+Rene Scharfe, Martin Josefsson, Alexander Wild, Anthony Liguori,
+Lars Muller, Urban Widmark, Massimiliano Ferrero, Howard Owen,
+Olaf Kirch, Kieron Briggs, Nick Millington and others. Also special
+mention to the Stanford Checker (SWAT) which pointed out many minor
+bugs in error paths.
+
+And thanks to the IBM LTC and Power test teams and SuSE testers for
+finding multiple bugs during excellent stress test runs.
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
new file mode 100644
index 0000000..5316c8d
--- /dev/null
+++ b/fs/cifs/CHANGES
@@ -0,0 +1,671 @@
+Version 1.31
+------------
+Fix oops in ls when Transact2 FindFirst (or FindNext) returns more than one
+transact response for an SMB request and search entry split across two frames.
+Fix updates of DOS attributes and time fields so that files on NT4 servers
+do not get marked delete on close. Display sizes of cifs buffer pools in
+cifs stats. Fix oops in unmount when cifsd thread being killed by 
+shutdown. Add generic readv/writev and aio support. Report inode numbers 
+consistently in readdir and lookup (when serverino mount option is
+specified use the inode number that the server reports - for both lookup
+and readdir, otherwise by default the locally generated inode number is used
+for inodes created in either path since servers are not always able to 
+provide unique inode numbers when exporting multiple volumes from under one
+sharename).
+
+Version 1.30
+------------
+Allow new nouser_xattr mount parm to disable xattr support for user namespace.
+Do not flag user_xattr mount parm in dmesg.  Retry failures setting file time  
+(mostly affects NT4 servers) by retry with handle based network operation. 
+Add new POSIX Query FS Info for returning statfs info more accurately.
+Handle passwords with multiple commas in them.
+
+Version 1.29
+------------
+Fix default mode in sysfs of cifs module parms.  Remove old readdir routine.
+Fix capabilities flags for large readx so as to allow reads larger than 64K.
+
+Version 1.28
+------------
+Add module init parm for large SMB buffer size (to allow it to be changed
+from its default of 16K) which is especially useful for large file copy
+when mounting with the directio mount option. Fix oops after 
+returning from mount when experimental ExtendedSecurity enabled and
+SpnegoNegotiated returning invalid error. Fix case to retry better when 
+peek returns from 1 to 3 bytes on socket which should have more data.
+Fixed path based calls (such as cifs lookup) to handle path names
+longer than 530 (now can handle PATH_MAX). Fix pass through authentication
+from Samba server to DC (Samba required dummy LM password).
+
+Version 1.27
+------------
+Turn off DNOTIFY (directory change notification support) by default
+(unless built with the experimental flag) to fix hang with KDE
+file browser. Fix DNOTIFY flag mappings.  Fix hang (in wait_event
+waiting on an SMB response) in SendReceive when session dies but
+reconnects quickly from another task.  Add module init  parms for
+minimum number of large and small network buffers in the buffer pools,
+and for the maximum number of simultaneous requests.
+
+Version 1.26
+------------
+Add setfacl support to allow setting of ACLs remotely to Samba 3.10 and later
+and other POSIX CIFS compliant servers.  Fix error mapping for getfacl 
+to EOPNOTSUPP when server does not support posix acls on the wire. Fix 
+improperly zeroed buffer in CIFS Unix extensions set times call. 
+
+Version 1.25
+------------
+Fix internationlization problem in cifs readdir with filenames that map to 
+longer UTF8 strings than the string on the wire was in Unicode.  Add workaround
+for readdir to netapp servers. Fix search rewind (seek into readdir to return 
+non-consecutive entries).  Do not do readdir when server negotiates 
+buffer size to small to fit filename. Add support for reading POSIX ACLs from
+the server (add also acl and noacl mount options).
+
+Version 1.24
+------------
+Optionally allow using server side inode numbers, rather than client generated
+ones by specifying mount option "serverino" - this is required for some apps
+to work which double check hardlinked files and have persistent inode numbers.
+
+Version 1.23
+------------
+Multiple bigendian fixes. On little endian systems (for reconnect after
+network failure) fix tcp session reconnect code so we do not try first
+to reconnect on reverse of port 445. Treat reparse points (NTFS junctions)
+as directories rather than symlinks because we can do follow link on them.
+
+Version 1.22
+------------
+Add config option to enable XATTR (extended attribute) support, mapping
+xattr names in the "user." namespace space to SMB/CIFS EAs. Lots of
+minor fixes pointed out by the Stanford SWAT checker (mostly missing
+or out of order NULL pointer checks in little used error paths).
+
+Version 1.21
+------------
+Add new mount parm to control whether mode check (generic_permission) is done
+on the client.  If Unix extensions are enabled and the uids on the client
+and server do not match, client permission checks are meaningless on
+server uids that do not exist on the client (this does not affect the
+normal ACL check which occurs on the server).  Fix default uid
+on mknod to match create and mkdir. Add optional mount parm to allow
+override of the default uid behavior (in which the server sets the uid
+and gid of newly created files). Normally for network filesystem mounts
+user want the server to set the uid/gid on newly created files (rather than 
+using uid of the client processes you would in a local filesystem).
+
+Version 1.20
+------------
+Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
+info into /proc/fs/cifs/DebugData.  Fix oops in rare oops in readdir 
+(in build_wildcard_path_from_dentry).  Fix mknod to pass type field
+(block/char/fifo) properly.  Remove spurious mount warning log entry when
+credentials passed as mount argument. Set major/minor device number in
+inode for block and char devices when unix extensions enabled.
+
+Version 1.19
+------------
+Fix /proc/fs/cifs/Stats and DebugData display to handle larger
+amounts of return data. Properly limit requests to MAX_REQ (50
+is the usual maximum active multiplex SMB/CIFS requests per server).
+Do not kill cifsd (and thus hurt the other SMB session) when more than one
+session to the same server (but with different userids) exists and one
+of the two user's smb sessions is being removed while leaving the other.
+Do not loop reconnecting in cifsd demultiplex thread when admin
+kills the thread without going through unmount.
+
+Version 1.18
+------------
+Do not rename hardlinked files (since that should be a noop). Flush
+cached write behind data when reopening a file after session abend,
+except when already in write. Grab per socket sem during reconnect 
+to avoid oops in sendmsg if overlapping with reconnect. Do not
+reset cached inode file size on readdir for files open for write on 
+client.
+
+
+Version 1.17
+------------
+Update number of blocks in file so du command is happier (in Linux a fake
+blocksize of 512 is required for calculating number of blocks in inode).
+Fix prepare write of partial pages to read in data from server if possible.
+Fix race on tcpStatus field between unmount and reconnection code, causing
+cifsd process sometimes to hang around forever. Improve out of memory
+checks in cifs_filldir
+
+Version 1.16
+------------
+Fix incorrect file size in file handle based setattr on big endian hardware.
+Fix oops in build_path_from_dentry when out of memory.  Add checks for invalid
+and closing file structs in writepage/partialpagewrite.  Add statistics
+for each mounted share (new menuconfig option). Fix endianness problem in
+volume information displayed in /proc/fs/cifs/DebugData (only affects
+affects big endian architectures). Prevent renames while constructing
+path names for open, mkdir and rmdir.
+
+Version 1.15
+------------
+Change to mempools for alloc smb request buffers and multiplex structs
+to better handle low memory problems (and potential deadlocks).
+
+Version 1.14
+------------
+Fix incomplete listings of large directories on Samba servers when Unix
+extensions enabled.  Fix oops when smb_buffer can not be allocated. Fix
+rename deadlock when writing out dirty pages at same time.
+
+Version 1.13
+------------
+Fix open of files in which O_CREATE can cause the mode to change in
+some cases. Fix case in which retry of write overlaps file close.
+Fix PPC64 build error.  Reduce excessive stack usage in smb password
+hashing. Fix overwrite of Linux user's view of file mode to Windows servers.
+
+Version 1.12
+------------
+Fixes for large file copy, signal handling, socket retry, buffer
+allocation and low memory situations.
+
+Version 1.11
+------------
+Better port 139 support to Windows servers (RFC1001/RFC1002 Session_Initialize)
+also now allowing support for specifying client netbiosname.  NT4 support added.
+
+Version 1.10
+------------
+Fix reconnection (and certain failed mounts) to properly wake up the
+blocked users thread so it does not seem hung (in some cases was blocked
+until the cifs receive timeout expired). Fix spurious error logging
+to kernel log when application with open network files killed. 
+
+Version 1.09
+------------
+Fix /proc/fs module unload warning message (that could be logged
+to the kernel log). Fix intermittent failure in connectathon
+test7 (hardlink count not immediately refreshed in case in which
+inode metadata can be incorrectly kept cached when time near zero)
+
+Version 1.08
+------------
+Allow file_mode and dir_mode (specified at mount time) to be enforced
+locally (the server already enforced its own ACLs too) for servers
+that do not report the correct mode (do not support the 
+CIFS Unix Extensions).
+
+Version 1.07
+------------
+Fix some small memory leaks in some unmount error paths. Fix major leak
+of cache pages in readpages causing multiple read oriented stress
+testcases (including fsx, and even large file copy) to fail over time. 
+
+Version 1.06
+------------
+Send NTCreateX with ATTR_POSIX if Linux/Unix extensions negotiated with server.
+This allows files that differ only in case and improves performance of file
+creation and file open to such servers.  Fix semaphore conflict which causes 
+slow delete of open file to Samba (which unfortunately can cause an oplock
+break to self while vfs_unlink held i_sem) which can hang for 20 seconds.
+
+Version 1.05
+------------
+fixes to cifs_readpages for fsx test case
+
+Version 1.04
+------------
+Fix caching data integrity bug when extending file size especially when no
+oplock on file.  Fix spurious logging of valid already parsed mount options
+that are parsed outside of the cifs vfs such as nosuid.
+
+
+Version 1.03
+------------
+Connect to server when port number override not specified, and tcp port
+unitialized.  Reset search to restart at correct file when kernel routine
+filldir returns error during large directory searches (readdir). 
+
+Version 1.02
+------------
+Fix caching problem when files opened by multiple clients in which 
+page cache could contain stale data, and write through did
+not occur often enough while file was still open when read ahead
+(read oplock) not allowed.  Treat "sep=" when first mount option
+as an overrride of comma as the default separator between mount
+options. 
+
+Version 1.01
+------------
+Allow passwords longer than 16 bytes. Allow null password string.
+
+Version 1.00
+------------
+Gracefully clean up failed mounts when attempting to mount to servers such as
+Windows 98 that terminate tcp sessions during prototocol negotiation.  Handle
+embedded commas in mount parsing of passwords.
+
+Version 0.99
+------------
+Invalidate local inode cached pages on oplock break and when last file
+instance is closed so that the client does not continue using stale local
+copy rather than later modified server copy of file.  Do not reconnect
+when server drops the tcp session prematurely before negotiate
+protocol response.  Fix oops in roepen_file when dentry freed.  Allow
+the support for CIFS Unix Extensions to be disabled via proc interface.
+
+Version 0.98
+------------
+Fix hang in commit_write during reconnection of open files under heavy load.
+Fix unload_nls oops in a mount failure path. Serialize writes to same socket
+which also fixes any possible races when cifs signatures are enabled in SMBs
+being sent out of signature sequence number order.    
+
+Version 0.97
+------------
+Fix byte range locking bug (endian problem) causing bad offset and
+length.
+
+Version 0.96
+------------
+Fix oops (in send_sig) caused by CIFS unmount code trying to
+wake up the demultiplex thread after it had exited. Do not log
+error on harmless oplock release of closed handle.
+
+Version 0.95
+------------
+Fix unsafe global variable usage and password hash failure on gcc 3.3.1
+Fix problem reconnecting secondary mounts to same server after session 
+failure.  Fix invalid dentry - race in mkdir when directory gets created
+by another client between the lookup and mkdir.
+ 
+Version 0.94
+------------
+Fix to list processing in reopen_files. Fix reconnection when server hung
+but tcpip session still alive.  Set proper timeout on socket read.
+
+Version 0.93
+------------
+Add missing mount options including iocharset.  SMP fixes in write and open. 
+Fix errors in reconnecting after TCP session failure.  Fix module unloading
+of default nls codepage
+
+Version 0.92
+------------
+Active smb transactions should never go negative (fix double FreeXid). Fix
+list processing in file routines. Check return code on kmalloc in open.
+Fix spinlock usage for SMP.
+
+Version 0.91
+------------
+Fix oops in reopen_files when invalid dentry. drop dentry on server rename 
+and on revalidate errors. Fix cases where pid is now tgid.  Fix return code
+on create hard link when server does not support them. 
+
+Version 0.90
+------------
+Fix scheduling while atomic error in getting inode info on newly created file. 
+Fix truncate of existing files opened with O_CREAT but not O_TRUNC set.
+
+Version 0.89
+------------
+Fix oops on write to dead tcp session. Remove error log write for case when file open
+O_CREAT but not O_EXCL
+
+Version 0.88
+------------
+Fix non-POSIX behavior on rename of open file and delete of open file by taking 
+advantage of trans2 SetFileInfo rename facility if available on target server.
+Retry on ENOSPC and EAGAIN socket errors.
+
+Version 0.87
+------------
+Fix oops on big endian readdir.  Set blksize to be even power of two (2**blkbits) to fix
+allocation size miscalculation. After oplock token lost do not read through
+cache. 
+
+Version 0.86
+------------
+Fix oops on empty file readahead.  Fix for file size handling for locally cached files.
+
+Version 0.85
+------------
+Fix oops in mkdir when server fails to return inode info. Fix oops in reopen_files
+during auto reconnection to server after server recovered from failure.
+
+Version 0.84
+------------
+Finish support for Linux 2.5 open/create changes, which removes the
+redundant NTCreate/QPathInfo/close that was sent during file create.
+Enable oplock by default. Enable packet signing by default (needed to 
+access many recent Windows servers)
+
+Version 0.83
+------------
+Fix oops when mounting to long server names caused by inverted parms to kmalloc.
+Fix MultiuserMount (/proc/fs/cifs configuration setting) so that when enabled
+we will choose a cifs user session (smb uid) that better matches the local
+uid if a) the mount uid does not match the current uid and b) we have another
+session to the same server (ip address) for a different mount which
+matches the current local uid.
+
+Version 0.82
+------------
+Add support for mknod of block or character devices.  Fix oplock
+code (distributed caching) to properly send response to oplock
+break from server.
+
+Version 0.81
+------------
+Finish up CIFS packet digital signing for the default
+NTLM security case. This should help Windows 2003
+network interoperability since it is common for
+packet signing to be required now. Fix statfs (stat -f)
+which recently started returning errors due to 
+invalid value (-1 instead of 0) being set in the
+struct kstatfs f_ffiles field.
+
+Version 0.80
+-----------
+Fix oops on stopping oplock thread when removing cifs when
+built as module.
+
+Version 0.79
+------------
+Fix mount options for ro (readonly), uid, gid and file and directory mode. 
+
+Version 0.78
+------------
+Fix errors displayed on failed mounts to be more understandable.
+Fixed various incorrect or misleading smb to posix error code mappings.
+
+Version 0.77
+------------
+Fix display of NTFS DFS junctions to display as symlinks.
+They are the network equivalent.  Fix oops in 
+cifs_partialpagewrite caused by missing spinlock protection
+of openfile linked list.  Allow writebehind caching errors to 
+be returned to the application at file close.
+
+Version 0.76
+------------
+Clean up options displayed in /proc/mounts by show_options to
+be more consistent with other filesystems.
+
+Version 0.75
+------------
+Fix delete of readonly file to Windows servers.  Reflect
+presence or absence of read only dos attribute in mode
+bits for servers that do not support CIFS Unix extensions.
+Fix shortened results on readdir of large directories to
+servers supporting CIFS Unix extensions (caused by
+incorrect resume key).
+
+Version 0.74
+------------
+Fix truncate bug (set file size) that could cause hangs e.g. running fsx
+
+Version 0.73
+------------
+unload nls if mount fails.
+
+Version 0.72
+------------
+Add resume key support to search (readdir) code to workaround
+Windows bug.  Add /proc/fs/cifs/LookupCacheEnable which
+allows disabling caching of attribute information for
+lookups.
+
+Version 0.71
+------------
+Add more oplock handling (distributed caching code).  Remove
+dead code.  Remove excessive stack space utilization from
+symlink routines.
+
+Version 0.70
+------------
+Fix oops in get dfs referral (triggered when null path sent in to
+mount).  Add support for overriding rsize at mount time.
+
+Version 0.69
+------------
+Fix buffer overrun in readdir which caused intermittent kernel oopses.
+Fix writepage code to release kmap on write data.  Allow "-ip=" new 
+mount option to be passed in on parameter distinct from the first part
+(server name portion of) the UNC name.  Allow override of the
+tcp port of the target server via new mount option "-port="  
+
+Version 0.68
+------------
+Fix search handle leak on rewind.  Fix setuid and gid so that they are 
+reflected in the local inode immediately.  Cleanup of whitespace
+to make 2.4 and 2.5 versions more consistent.
+
+
+Version 0.67
+------------
+Fix signal sending so that captive thread (cifsd) exits on umount 
+(which was causing the warning in kmem_cache_free of the request buffers
+at rmmod time).  This had broken as a sideeffect of the recent global
+kernel change to daemonize.  Fix memory leak in readdir code which
+showed up in "ls -R" (and applications that did search rewinding).
+
+Version 0.66
+------------
+Reconnect tids and fids after session reconnection (still do not
+reconnect byte range locks though).  Fix problem caching
+lookup information for directory inodes, improving performance,
+especially in deep directory trees.  Fix various build warnings.
+
+Version 0.65
+------------
+Finish fixes to commit write for caching/readahead consistency.  fsx 
+now works to Samba servers.  Fix oops caused when readahead
+was interrupted by a signal.
+
+Version 0.64
+------------
+Fix data corruption (in partial page after truncate) that caused fsx to
+fail to Windows servers.  Cleaned up some extraneous error logging in
+common error paths.  Add generic sendfile support.
+
+Version 0.63
+------------
+Fix memory leak in AllocMidQEntry.
+Finish reconnection logic, so connection with server can be dropped
+(or server rebooted) and the cifs client will reconnect.  
+
+Version 0.62
+------------
+Fix temporary socket leak when bad userid or password specified 
+(or other SMBSessSetup failure).  Increase maximum buffer size to slightly
+over 16K to allow negotiation of up to Samba and Windows server default read 
+sizes.  Add support for readpages
+
+Version 0.61
+------------
+Fix oops when username not passed in on mount.  Extensive fixes and improvements
+to error logging (strip redundant newlines, change debug macros to ensure newline
+passed in and to be more consistent).  Fix writepage wrong file handle problem,
+a readonly file handle could be incorrectly used to attempt to write out
+file updates through the page cache to multiply open files.  This could cause
+the iozone benchmark to fail on the fwrite test. Fix bug mounting two different
+shares to the same Windows server when using different usernames
+(doing this to Samba servers worked but Windows was rejecting it) - now it is
+possible to use different userids when connecting to the same server from a
+Linux client. Fix oops when treeDisconnect called during unmount on
+previously freed socket.
+
+Version 0.60
+------------
+Fix oops in readpages caused by not setting address space operations in inode in 
+rare code path. 
+
+Version 0.59
+------------
+Includes support for deleting of open files and renaming over existing files (per POSIX
+requirement).  Add readlink support for Windows junction points (directory symlinks).
+
+Version 0.58
+------------
+Changed read and write to go through pagecache. Added additional address space operations.
+Memory mapped operations now working.
+
+Version 0.57
+------------
+Added writepage code for additional memory mapping support.  Fixed leak in xids causing
+the simultaneous operations counter (/proc/fs/cifs/SimultaneousOps) to increase on 
+every stat call.  Additional formatting cleanup. 
+
+Version 0.56
+------------
+Fix bigendian bug in order of time conversion. Merge 2.5 to 2.4 version.  Formatting cleanup.   
+
+Version 0.55
+------------
+Fixes from Zwane Mwaikambo for adding missing return code checking in a few places.
+Also included a modified version of his fix to protect global list manipulation of
+the smb session and tree connection and mid related global variables.
+
+Version 0.54
+------------
+Fix problem with captive thread hanging around at unmount time.  Adjust to 2.5.42-pre
+changes to superblock layout.   Remove wasteful allocation of smb buffers (now the send 
+buffer is reused for responses).  Add more oplock handling. Additional minor cleanup.
+
+Version 0.53
+------------
+More stylistic updates to better match kernel style.  Add additional statistics
+for filesystem which can be viewed via /proc/fs/cifs.  Add more pieces of NTLMv2
+and CIFS Packet Signing enablement.
+
+Version 0.52
+------------
+Replace call to sleep_on with safer wait_on_event.
+Make stylistic changes to better match kernel style recommendations.
+Remove most typedef usage (except for the PDUs themselves).
+
+Version 0.51
+------------
+Update mount so the -unc mount option is no longer required (the ip address can be specified
+in a UNC style device name.   Implementation of readpage/writepage started.
+
+Version 0.50
+------------
+Fix intermittent problem with incorrect smb header checking on badly 
+fragmented tcp responses
+
+Version 0.49
+------------
+Fixes to setting of allocation size and file size.
+
+Version 0.48
+------------
+Various 2.5.38 fixes.  Now works on 2.5.38
+
+Version 0.47
+------------
+Prepare for 2.5 kernel merge.  Remove ifdefs.
+
+Version 0.46
+------------
+Socket buffer management fixes.  Fix dual free.
+
+Version 0.45
+------------
+Various big endian fixes for hardlinks and symlinks and also for dfs.
+
+Version 0.44
+------------
+Various big endian fixes for servers with Unix extensions such as Samba
+
+Version 0.43
+------------
+Various FindNext fixes for incorrect filenames on large directory searches on big endian
+clients.  basic posix file i/o tests now work on big endian machines, not just le
+
+Version 0.42
+------------
+SessionSetup and NegotiateProtocol now work from Big Endian machines.
+Various Big Endian fixes found during testing on the Linux on 390.  Various fixes for compatibility with older
+versions of 2.4 kernel (now builds and works again on kernels at least as early as 2.4.7).
+
+Version 0.41
+------------
+Various minor fixes for Connectathon Posix "basic" file i/o test suite.  Directory caching fixed so hardlinked
+files now return the correct rumber of links on fstat as they are repeatedly linked and unlinked.
+
+Version 0.40
+------------
+Implemented "Raw" (i.e. not encapsulated in SPNEGO) NTLMSSP (i.e. the Security Provider Interface used to negotiate
+session advanced session authentication).  Raw NTLMSSP is preferred by Windows 2000 Professional and Windows XP.
+Began implementing support for SPNEGO encapsulation of NTLMSSP based session authentication blobs
+(which is the mechanism preferred by Windows 2000 server in the absence of Kerberos).
+
+Version 0.38
+------------
+Introduced optional mount helper utility mount.cifs and made coreq changes to cifs vfs to enable
+it. Fixed a few bugs in the DFS code (e.g. bcc two bytes too short and incorrect uid in PDU).
+
+Version 0.37
+------------
+Rewrote much of connection and mount/unmount logic to handle bugs with
+multiple uses to same share, multiple users to same server etc.
+
+Version 0.36
+------------
+Fixed major problem with dentry corruption (missing call to dput)
+
+Version 0.35
+------------
+Rewrite of readdir code to fix bug. Various fixes for bigendian machines.
+Begin adding oplock support.  Multiusermount and oplockEnabled flags added to /proc/fs/cifs
+although corresponding function not fully implemented in the vfs yet
+
+Version 0.34
+------------
+Fixed dentry caching bug, misc. cleanup 
+
+Version 0.33
+------------
+Fixed 2.5 support to handle build and configure changes as well as misc. 2.5 changes.  Now can build
+on current 2.5 beta version (2.5.24) of the Linux kernel as well as on 2.4 Linux kernels.
+Support for STATUS codes (newer 32 bit NT error codes) added.  DFS support begun to be added.
+
+Version 0.32
+------------
+Unix extensions (symlink, readlink, hardlink, chmod and some chgrp and chown) implemented
+and tested against Samba 2.2.5
+
+
+Version 0.31
+------------
+1) Fixed lockrange to be correct (it was one byte too short)
+
+2) Fixed GETLK (i.e. the fcntl call to test a range of bytes in a file to see if locked) to correctly 
+show range as locked when there is a conflict with an existing lock.
+
+3) default file perms are now 2767 (indicating support for mandatory locks) instead of 777 for directories
+in most cases.  Eventually will offer optional ability to query server for the correct perms.
+
+3) Fixed eventual trap when mounting twice to different shares on the same server when the first succeeded 
+but the second one was invalid and failed (the second one was incorrectly disconnecting the tcp and smb
+session) 
+
+4) Fixed error logging of valid mount options
+
+5) Removed logging of password field.
+
+6) Moved negotiate, treeDisconnect and uloggoffX (only tConx and SessSetup remain in connect.c) to cifssmb.c
+and cleaned them up and made them more consistent with other cifs functions. 
+
+7) Server support for Unix extensions is now fully detected and FindFirst is implemented both ways 
+(with or without Unix exentions) but FindNext and QueryPathInfo with the Unix extensions are not completed,
+nor is the symlink support using the Unix extensions
+
+8) Started adding the readlink and follow_link code 
+
+Version 0.3 
+-----------
+Initial drop
+
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
new file mode 100644
index 0000000..7384947
--- /dev/null
+++ b/fs/cifs/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Linux CIFS VFS client 
+#
+obj-$(CONFIG_CIFS) += cifs.o
+
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o
diff --git a/fs/cifs/README b/fs/cifs/README
new file mode 100644
index 0000000..0f20edc
--- /dev/null
+++ b/fs/cifs/README
@@ -0,0 +1,475 @@
+The CIFS VFS support for Linux supports many advanced network filesystem 
+features such as heirarchical dfs like namespace, hardlinks, locking and more.  
+It was designed to comply with the SNIA CIFS Technical Reference (which 
+supersedes the 1992 X/Open SMB Standard) as well as to perform best practice 
+practical interoperability with Windows 2000, Windows XP, Samba and equivalent 
+servers.  
+
+For questions or bug reports please contact:
+    sfrench@samba.org (sfrench@us.ibm.com) 
+
+Build instructions:
+==================
+For Linux 2.4:
+1) Get the kernel source (e.g.from http://www.kernel.org)
+and download the cifs vfs source (see the project page
+at http://us1.samba.org/samba/Linux_CIFS_client.html)
+and change directory into the top of the kernel directory
+then patch the kernel (e.g. "patch -p1 < cifs_24.patch") 
+to add the cifs vfs to your kernel configure options if
+it has not already been added (e.g. current SuSE and UL
+users do not need to apply the cifs_24.patch since the cifs vfs is
+already in the kernel configure menu) and then
+mkdir linux/fs/cifs and then copy the current cifs vfs files from
+the cifs download to your kernel build directory e.g.
+
+	cp <cifs_download_dir>/fs/cifs/* to <kernel_download_dir>/fs/cifs
+	
+2) make menuconfig (or make xconfig)
+3) select cifs from within the network filesystem choices
+4) save and exit
+5) make dep
+6) make modules (or "make" if CIFS VFS not to be built as a module)
+
+For Linux 2.6:
+1) Download the kernel (e.g. from http://www.kernel.org or from bitkeeper
+at bk://linux.bkbits.net/linux-2.5) and change directory into the top
+of the kernel directory tree (e.g. /usr/src/linux-2.5.73)
+2) make menuconfig (or make xconfig)
+3) select cifs from within the network filesystem choices
+4) save and exit
+5) make
+
+
+Installation instructions:
+=========================
+If you have built the CIFS vfs as module (successfully) simply
+type "make modules_install" (or if you prefer, manually copy the file to
+the modules directory e.g. /lib/modules/2.4.10-4GB/kernel/fs/cifs/cifs.o).
+
+If you have built the CIFS vfs into the kernel itself, follow the instructions
+for your distribution on how to install a new kernel (usually you
+would simply type "make install").
+
+If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on 
+the CIFS VFS web site) copy it to the same directory in which mount.smbfs and 
+similar files reside (usually /sbin).  Although the helper software is not  
+required, mount.cifs is recommended.  Eventually the Samba 3.0 utility program 
+"net" may also be helpful since it may someday provide easier mount syntax for
+users who are used to Windows e.g.  net use <mount point> <UNC name or cifs URL>
+Note that running the Winbind pam/nss module (logon service) on all of your
+Linux clients is useful in mapping Uids and Gids consistently across the
+domain to the proper network user.  The mount.cifs mount helper can be
+trivially built from Samba 3.0 or later source e.g. by executing:
+
+	gcc samba/source/client/mount.cifs.c -o mount.cifs
+
+If cifs is built as a module, then the size and number of network buffers
+and maximum number of simultaneous requests to one server can be configured.
+Changing these from their defaults is not recommended. By executing modinfo
+	modinfo kernel/fs/cifs/cifs.ko
+on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made
+at module initialization time (by running insmod cifs.ko) can be seen.
+
+Allowing User Mounts
+====================
+To permit users to mount and unmount over directories they own is possible
+with the cifs vfs.  A way to enable such mounting is to mark the mount.cifs
+utility as suid (e.g. "chmod +s /sbin/mount/cifs). To enable users to 
+umount shares they mount requires
+1) mount.cifs version 1.4 or later
+2) an entry for the share in /etc/fstab indicating that a user may
+unmount it e.g.
+//server/usersharename  /mnt/username cifs user 0 0
+
+Note that when the mount.cifs utility is run suid (allowing user mounts), 
+in order to reduce risks, the "nosuid" mount flag is passed in on mount to
+disallow execution of an suid program mounted on the remote target.
+When mount is executed as root, nosuid is not passed in by default,
+and execution of suid programs on the remote target would be enabled
+by default. This can be changed, as with nfs and other filesystems, 
+by simply specifying "nosuid" among the mount options. For user mounts 
+though to be able to pass the suid flag to mount requires rebuilding 
+mount.cifs with the following flag: 
+ 
+        gcc samba/source/client/mount.cifs.c -DCIFS_ALLOW_USR_SUID -o mount.cifs
+
+There is a corresponding manual page for cifs mounting in the Samba 3.0 and
+later source tree in docs/manpages/mount.cifs.8 
+
+Samba Considerations 
+==================== 
+To get the maximum benefit from the CIFS VFS, we recommend using a server that 
+supports the SNIA CIFS Unix Extensions standard (e.g.  Samba 2.2.5 or later or 
+Samba 3.0) but the CIFS vfs works fine with a wide variety of CIFS servers.  
+Note that uid, gid and file permissions will display default values if you do 
+not have a server that supports the Unix extensions for CIFS (such as Samba 
+2.2.5 or later).  To enable the Unix CIFS Extensions in the Samba server, add 
+the line: 
+
+	unix extensions = yes
+	
+to your smb.conf file on the server.  Note that the following smb.conf settings 
+are also useful (on the Samba server) when the majority of clients are Unix or 
+Linux: 
+
+	case sensitive = yes
+	delete readonly = yes 
+	ea support = yes
+
+Note that server ea support is required for supporting xattrs from the Linux
+cifs client, and that EA support is present in later versions of Samba (e.g. 
+3.0.6 and later (also EA support works in all versions of Windows, at least to
+shares on NTFS filesystems).  Extended Attribute (xattr) support is an optional
+feature of most Linux filesystems which may require enabling via
+make menuconfig. Client support for extended attributes (user xattr) can be
+disabled on a per-mount basis by specifying "nouser_xattr" on mount.
+
+The CIFS client can get and set POSIX ACLs (getfacl, setfacl) to Samba servers
+version 3.10 and later.  Setting POSIX ACLs requires enabling both XATTR and 
+then POSIX support in the CIFS configuration options when building the cifs
+module.  POSIX ACL support can be disabled on a per mount basic by specifying
+"noacl" on mount.
+ 
+Some administrators may want to change Samba's smb.conf "map archive" and 
+"create mask" parameters from the default.  Unless the create mask is changed
+newly created files can end up with an unnecessarily restrictive default mode,
+which may not be what you want, although if the CIFS Unix extensions are
+enabled on the server and client, subsequent setattr calls (e.g. chmod) can
+fix the mode.  Note that creating special devices (mknod) remotely 
+may require specifying a mkdev function to Samba if you are not using 
+Samba 3.0.6 or later.  For more information on these see the manual pages
+("man smb.conf") on the Samba server system.  Note that the cifs vfs,
+unlike the smbfs vfs, does not read the smb.conf on the client system 
+(the few optional settings are passed in on mount via -o parameters instead).  
+Note that Samba 2.2.7 or later includes a fix that allows the CIFS VFS to delete
+open files (required for strict POSIX compliance).  Windows Servers already 
+supported this feature. Samba server does not allow symlinks that refer to files
+outside of the share, so in Samba versions prior to 3.0.6, most symlinks to
+files with absolute paths (ie beginning with slash) such as:
+	 ln -s /mnt/foo bar
+would be forbidden. Samba 3.0.6 server or later includes the ability to create 
+such symlinks safely by converting unsafe symlinks (ie symlinks to server 
+files that are outside of the share) to a samba specific format on the server
+that is ignored by local server applications and non-cifs clients and that will
+not be traversed by the Samba server).  This is opaque to the Linux client
+application using the cifs vfs. Absolute symlinks will work to Samba 3.0.5 or
+later, but only for remote clients using the CIFS Unix extensions, and will
+be invisbile to Windows clients and typically will not affect local
+applications running on the same server as Samba.  
+
+Use instructions:
+================
+Once the CIFS VFS support is built into the kernel or installed as a module 
+(cifs.o), you can use mount syntax like the following to access Samba or Windows 
+servers: 
+
+  mount -t cifs //9.53.216.11/e$ /mnt -o user=myname,pass=mypassword
+
+Before -o the option -v may be specified to make the mount.cifs
+mount helper display the mount steps more verbosely.  
+After -o the following commonly used cifs vfs specific options
+are supported:
+
+  user=<username>
+  pass=<password>
+  domain=<domain name>
+  
+Other cifs mount options are described below.  Use of TCP names (in addition to
+ip addresses) is available if the mount helper (mount.cifs) is installed. If
+you do not trust the server to which are mounted, or if you do not have
+cifs signing enabled (and the physical network is insecure), consider use
+of the standard mount options "noexec" and "nosuid" to reduce the risk of 
+running an altered binary on your local system (downloaded from a hostile server
+or altered by a hostile router).
+
+Although mounting using format corresponding to the CIFS URL specification is
+not possible in mount.cifs yet, it is possible to use an alternate format
+for the server and sharename (which is somewhat similar to NFS style mount
+syntax) instead of the more widely used UNC format (i.e. \\server\share):
+  mount -t cifs tcp_name_of_server:share_name /mnt -o user=myname,pass=mypasswd
+
+When using the mount helper mount.cifs, passwords may be specified via alternate
+mechanisms, instead of specifying it after -o using the normal "pass=" syntax
+on the command line:
+1) By including it in a credential file. Specify credentials=filename as one
+of the mount options. Credential files contain two lines
+        username=someuser
+        password=your_password
+2) By specifying the password in the PASSWD environment variable (similarly
+the user name can be taken from the USER environment variable).
+3) By specifying the password in a file by name via PASSWD_FILE
+4) By specifying the password in a file by file descriptor via PASSWD_FD
+
+If no password is provided, mount.cifs will prompt for password entry
+
+Restrictions
+============
+Servers must support the NTLM SMB dialect (which is the most recent, supported 
+by Samba and Windows NT version 4, 2000 and XP and many other SMB/CIFS servers) 
+Servers must support either "pure-TCP" (port 445 TCP/IP CIFS connections) or RFC 
+1001/1002 support for "Netbios-Over-TCP/IP." Neither of these is likely to be a 
+problem as most servers support this.  IPv6 support is planned for the future,
+and is almost complete.
+
+Valid filenames differ between Windows and Linux.  Windows typically restricts
+filenames which contain certain reserved characters (e.g.the character : 
+which is used to delimit the beginning of a stream name by Windows), while
+Linux allows a slightly wider set of valid characters in filenames. Windows
+servers can remap such characters when an explicit mapping is specified in
+the Server's registry.  Samba starting with version 3.10 will allow such 
+filenames (ie those which contain valid Linux characters, which normally
+would be forbidden for Windows/CIFS semantics) as long as the server is
+configured for Unix Extensions (and the client has not disabled
+/proc/fs/cifs/LinuxExtensionsEnabled).
+  
+
+CIFS VFS Mount Options
+======================
+A partial list of the supported mount options follows:
+  user		The user name to use when trying to establish
+		the CIFS session.
+  password	The user password.  If the mount helper is
+		installed, the user will be prompted for password
+		if it is not supplied.
+  ip		The ip address of the target server
+  unc		The target server Universal Network Name (export) to 
+		mount.	
+  domain	Set the SMB/CIFS workgroup name prepended to the
+		username during CIFS session establishment
+  uid		If CIFS Unix extensions are not supported by the server
+		this overrides the default uid for inodes. For mounts to
+		servers which do support the CIFS Unix extensions, such
+		as a properly configured Samba server, the server provides
+		the uid, gid and mode.  For servers which do not support
+		the Unix extensions, the default uid (and gid) returned on
+		lookup of existing files is the uid (gid) of the person
+		who executed the mount (root, except when mount.cifs
+		is configured setuid for user mounts) unless the "uid=" 
+		(gid) mount option is specified.  For the uid (gid) of newly
+		created files and directories, ie files created since 
+		the last mount of the server share, the expected uid 
+		(gid) is cached as as long as the inode remains in 
+		memory on the client.   Also note that permission
+		checks (authorization checks) on accesses to a file occur
+		at the server, but there are cases in which an administrator
+		may want to restrict at the client as well.  For those
+		servers which do not report a uid/gid owner
+		(such as Windows), permissions can also be checked at the
+		client, and a crude form of client side permission checking 
+		can be enabled by specifying file_mode and dir_mode on 
+		the client
+  gid		If CIFS Unix extensions are not supported by the server
+		this overrides the default gid for inodes.
+  file_mode     If CIFS Unix extensions are not supported by the server
+		this overrides the default mode for file inodes.
+  dir_mode      If CIFS Unix extensions are not supported by the server 
+		this overrides the default mode for directory inodes.
+  port		attempt to contact the server on this tcp port, before
+		trying the usual ports (port 445, then 139).
+  iocharset     Codepage used to convert local path names to and from
+		Unicode. Unicode is used by default for network path
+		names if the server supports it.  If iocharset is
+		not specified then the nls_default specified
+		during the local client kernel build will be used.
+		If server does not support Unicode, this parameter is
+		unused.
+  rsize		default read size
+  wsize		default write size
+  rw		mount the network share read-write (note that the
+		server may still consider the share read-only)
+  ro		mount network share read-only
+  version	used to distinguish different versions of the
+		mount helper utility (not typically needed)
+  sep		if first mount option (after the -o), overrides
+		the comma as the separator between the mount
+		parms. e.g.
+			-o user=myname,password=mypassword,domain=mydom
+		could be passed instead with period as the separator by
+			-o sep=.user=myname.password=mypassword.domain=mydom
+		this might be useful when comma is contained within username
+		or password or domain. This option is less important
+		when the cifs mount helper cifs.mount (version 1.1 or later)
+		is used.
+  nosuid        Do not allow remote executables with the suid bit 
+		program to be executed.  This is only meaningful for mounts
+		to servers such as Samba which support the CIFS Unix Extensions.
+		If you do not trust the servers in your network (your mount
+		targets) it is recommended that you specify this option for
+		greater security.
+  exec		Permit execution of binaries on the mount.
+  noexec	Do not permit execution of binaries on the mount.
+  dev		Recognize block devices on the remote mount.
+  nodev		Do not recognize devices on the remote mount.
+  suid          Allow remote files on this mountpoint with suid enabled to 
+		be executed (default for mounts when executed as root,
+		nosuid is default for user mounts).
+  credentials   Although ignored by the cifs kernel component, it is used by 
+		the mount helper, mount.cifs. When mount.cifs is installed it
+		opens and reads the credential file specified in order  
+		to obtain the userid and password arguments which are passed to
+		the cifs vfs.
+  guest         Although ignored by the kernel component, the mount.cifs
+		mount helper will not prompt the user for a password
+		if guest is specified on the mount options.  If no
+		password is specified a null password will be used.
+  perm          Client does permission checks (vfs_permission check of uid
+		and gid of the file against the mode and desired operation),
+		Note that this is in addition to the normal ACL check on the
+		target machine done by the server software. 
+		Client permission checking is enabled by default.
+  noperm        Client does not do permission checks.  This can expose
+		files on this mount to access by other users on the local
+		client system. It is typically only needed when the server
+		supports the CIFS Unix Extensions but the UIDs/GIDs on the
+		client and server system do not match closely enough to allow
+		access by the user doing the mount.
+		Note that this does not affect the normal ACL check on the
+		target machine done by the server software (of the server
+		ACL against the user name provided at mount time).
+  serverino	Use servers inode numbers instead of generating automatically
+		incrementing inode numbers on the client.  Although this will
+		make it easier to spot hardlinked files (as they will have
+		the same inode numbers) and inode numbers may be persistent,
+		note that the server does not guarantee that the inode numbers
+		are unique if multiple server side mounts are exported under a
+		single share (since inode numbers on the servers might not
+		be unique if multiple filesystems are mounted under the same
+		shared higher level directory).  Note that this requires that
+		the server support the CIFS Unix Extensions as other servers
+		do not return a unique IndexNumber on SMB FindFirst (most
+		servers return zero as the IndexNumber).  Parameter has no
+		effect to Windows servers and others which do not support the
+		CIFS Unix Extensions.
+  noserverino   Client generates inode numbers (rather than using the actual one
+		from the server) by default.
+  setuids       If the CIFS Unix extensions are negotiated with the server
+		the client will attempt to set the effective uid and gid of
+		the local process on newly created files, directories, and
+		devices (create, mkdir, mknod).
+  nosetuids     The client will not attempt to set the uid and gid on
+		on newly created files, directories, and devices (create, 
+		mkdir, mknod) which will result in the server setting the
+		uid and gid to the default (usually the server uid of the
+		usern who mounted the share).  Letting the server (rather than
+		the client) set the uid and gid is the default. This
+		parameter has no effect if the CIFS Unix Extensions are not
+		negotiated.
+  netbiosname   When mounting to servers via port 139, specifies the RFC1001
+		source name to use to represent the client netbios machine 
+		name when doing the RFC1001 netbios session initialize.
+  direct        Do not do inode data caching on files opened on this mount.
+		This precludes mmaping files on this mount. In some cases
+		with fast networks and little or no caching benefits on the
+		client (e.g. when the application is doing large sequential
+		reads bigger than page size without rereading the same data) 
+		this can provide better performance than the default
+		behavior which caches reads (reaadahead) and writes 
+		(writebehind) through the local Linux client pagecache 
+		if oplock (caching token) is granted and held. Note that
+		direct allows write operations larger than page size
+		to be sent to the server.
+  acl   	Allow setfacl and getfacl to manage posix ACLs if server
+		supports them.  (default)
+  noacl 	Do not allow setfacl and getfacl calls on this mount
+  user_xattr    Allow getting and setting user xattrs as OS/2 EAs (extended
+		attributes) to the server (default) e.g. via setfattr 
+		and getfattr utilities. 
+  nouser_xattr  Do not allow getfattr/setfattr to get/set xattrs 
+		
+The mount.cifs mount helper also accepts a few mount options before -o
+including:
+
+	-S      take password from stdin (equivalent to setting the environment
+		variable "PASSWD_FD=0"
+	-V      print mount.cifs version
+	-?      display simple usage information
+
+With recent 2.6 kernel versions of modutils, the version of the cifs kernel
+module can be displayed via modinfo.
+
+Misc /proc/fs/cifs Flags and Debug Info
+=======================================
+Informational pseudo-files:
+DebugData		Displays information about active CIFS sessions
+			and shares.
+Stats			Lists summary resource usage information as well as per
+			share statistics, if CONFIG_CIFS_STATS in enabled
+			in the kernel configuration.
+
+Configuration pseudo-files:
+MultiuserMount		If set to one, more than one CIFS session to 
+			the same server ip address can be established
+			if more than one uid accesses the same mount
+			point and if the uids user/password mapping
+			information is available. (default is 0)
+PacketSigningEnabled	If set to one, cifs packet signing is enabled
+			and will be used if the server requires 
+			it.  If set to two, cifs packet signing is
+			required even if the server considers packet
+			signing optional. (default 1)
+cifsFYI			If set to one, additional debug information is
+			logged to the system error log. (default 0)
+ExtendedSecurity	If set to one, SPNEGO session establishment
+			is allowed which enables more advanced 
+			secure CIFS session establishment (default 0)
+NTLMV2Enabled		If set to one, more secure password hashes
+			are used when the server supports them and
+			when kerberos is not negotiated (default 0)
+traceSMB		If set to one, debug information is logged to the
+			system error log with the start of smb requests
+			and responses (default 0)
+LookupCacheEnable	If set to one, inode information is kept cached
+			for one second improving performance of lookups
+			(default 1)
+OplockEnabled		If set to one, safe distributed caching enabled.
+			(default 1)
+LinuxExtensionsEnabled	If set to one then the client will attempt to
+			use the CIFS "UNIX" extensions which are optional
+			protocol enhancements that allow CIFS servers
+			to return accurate UID/GID information as well
+			as support symbolic links. If you use servers
+			such as Samba that support the CIFS Unix
+			extensions but do not want to use symbolic link
+			support and want to map the uid and gid fields 
+			to values supplied at mount (rather than the 
+			actual values, then set this to zero. (default 1)
+
+These experimental features and tracing can be enabled by changing flags in 
+/proc/fs/cifs (after the cifs module has been installed or built into the 
+kernel, e.g.  insmod cifs).  To enable a feature set it to 1 e.g.  to enable 
+tracing to the kernel message log type: 
+
+	echo 1 > /proc/fs/cifs/cifsFYI
+	
+and for more extensive tracing including the start of smb requests and responses
+
+	echo 1 > /proc/fs/cifs/traceSMB
+
+Two other experimental features are under development and to test 
+require enabling CONFIG_CIFS_EXPERIMENTAL
+
+	More efficient write operations and SMB buffer handling
+
+	DNOTIFY fcntl: needed for support of directory change 
+			    notification and perhaps later for file leases)
+
+Per share (per client mount) statistics are available in /proc/fs/cifs/Stats
+if the kernel was configured with cifs statistics enabled.  The statistics
+represent the number of successful (ie non-zero return code from the server) 
+SMB responses to some of the more common commands (open, delete, mkdir etc.).
+Also recorded is the total bytes read and bytes written to the server for
+that share.  Note that due to client caching effects this can be less than the
+number of bytes read and written by the application running on the client.
+The statistics for the number of total SMBs and oplock breaks are different in
+that they represent all for that share, not just those for which the server
+returned success.
+	
+Also note that "cat /proc/fs/cifs/DebugData" will display information about 
+the active sessions and the shares that are mounted.  Note: NTLMv2 enablement 
+will not work since they its implementation is not quite complete yet.
+Do not alter these configuration values unless you are doing specific testing.  
+Enabling extended security works to Windows 2000 Workstations and XP but not to 
+Windows 2000 server or Samba since it does not usually send "raw NTLMSSP" 
+(instead it sends NTLMSSP encapsulated in SPNEGO/GSSAPI, which support is not 
+complete in the CIFS VFS yet).  
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
new file mode 100644
index 0000000..f4e3e1f
--- /dev/null
+++ b/fs/cifs/TODO
@@ -0,0 +1,104 @@
+version 1.22 July 30, 2004 
+
+A Partial List of Missing Features
+==================================
+
+Contributions are welcome.  There are plenty of opportunities
+for visible, important contributions to this module.  Here
+is a partial list of the known problems and missing features:
+
+a) Support for SecurityDescriptors for chmod/chgrp/chown so
+these can be supported for Windows servers
+
+b) Better pam/winbind integration (e.g. to handle uid mapping
+better)
+
+c) multi-user mounts - multiplexed sessionsetups over single vc
+(ie tcp session) - prettying up needed, and more testing needed
+
+d) Kerberos/SPNEGO session setup support - (started)
+
+e) NTLMv2 authentication (mostly implemented)
+
+f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup 
+used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
+and raw NTLMSSP already. This is important when enabling
+extended security and mounting to Windows 2003 Servers
+
+f) Directory entry caching relies on a 1 second timer, rather than 
+using FindNotify or equivalent.  - (started)
+
+g) A few byte range testcases fail due to POSIX vs. Windows/CIFS
+style byte range lock differences
+
+h) quota support
+
+j) finish writepages support (multi-page write behind for improved
+performance) and syncpage
+
+k) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
+extra copy in/out of the socket buffers in some cases.
+
+l) finish support for IPv6.  This is mostly complete but
+needs a simple conversion of ipv6 to sin6_addr from the
+address in string representation.
+
+m) Better optimize open (and pathbased setfilesize) to reduce the
+oplock breaks coming from windows srv.  Piggyback identical file
+opens on top of each other by incrementing reference count rather
+than resending (helps reduce server resource utilization and avoid
+spurious oplock breaks).
+
+o) Improve performance of readpages by sending more than one read
+at a time when 8 pages or more are requested. In conjuntion
+add support for async_cifs_readpages.
+
+p) Add support for storing symlink and fifo info to Windows servers 
+in the Extended Attribute format their SFU clients would recognize.
+
+q) Finish fcntl D_NOTIFY support so kde and gnome file list windows
+will autorefresh (started)
+
+r) Add GUI tool to configure /proc/fs/cifs settings and for display of
+the CIFS statistics (started)
+
+q) implement support for security and trusted categories of xattrs
+(requires minor protocol extension) to enable better support for SELINUX
+
+r) Implement O_DIRECT flag on open (already supported on mount)
+
+KNOWN BUGS (updated December 10, 2004)
+====================================
+1) existing symbolic links (Windows reparse points) are recognized but
+can not be created remotely. They are implemented for Samba and those that
+support the CIFS Unix extensions but Samba has a bug currently handling
+symlink text beginning with slash
+2) follow_link and readdir code does not follow dfs junctions
+but recognizes them
+3) create of new files to FAT partitions on Windows servers can
+succeed but still return access denied (appears to be Windows 
+server not cifs client problem) and has not been reproduced recently.
+NTFS partitions do not have this problem.
+4) debug connectathon lock test case 10 which fails against
+Samba (may be unmappable due to POSIX to Windows lock model
+differences but worth investigating).  Also debug Samba to 
+see why lock test case 7 takes longer to complete to Samba
+than to Windows.
+
+Misc testing to do
+==================
+1) check out max path names and max path name components against various server
+types. Try nested symlinks (8 deep). Return max path name in stat -f information
+
+2) Modify file portion of ltp so it can run against a mounted network
+share and run it against cifs vfs.
+
+3) Additional performance testing and optimization using iozone and similar - 
+there are some easy changes that can be done to parallelize sequential writes,
+and when signing is disabled to request larger read sizes (larger than 
+negotiated size) and send larger write sizes to modern servers.
+
+4) More exhaustively test the recently added NT4 support against various
+NT4 service pack levels, and fix cifs_setattr for setting file times and 
+size to fall back to level 1 when error invalid level returned.
+
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
new file mode 100644
index 0000000..e02010d
--- /dev/null
+++ b/fs/cifs/asn1.c
@@ -0,0 +1,618 @@
+/* 
+ * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
+ * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
+ *      
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifs_debug.h"
+#include "cifsproto.h"
+
+/*****************************************************************************
+ *
+ * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
+ *
+ *****************************************************************************/
+
+/* Class */
+#define ASN1_UNI	0	/* Universal */
+#define ASN1_APL	1	/* Application */
+#define ASN1_CTX	2	/* Context */
+#define ASN1_PRV	3	/* Private */
+
+/* Tag */
+#define ASN1_EOC	0	/* End Of Contents or N/A */
+#define ASN1_BOL	1	/* Boolean */
+#define ASN1_INT	2	/* Integer */
+#define ASN1_BTS	3	/* Bit String */
+#define ASN1_OTS	4	/* Octet String */
+#define ASN1_NUL	5	/* Null */
+#define ASN1_OJI	6	/* Object Identifier  */
+#define ASN1_OJD	7	/* Object Description */
+#define ASN1_EXT	8	/* External */
+#define ASN1_SEQ	16	/* Sequence */
+#define ASN1_SET	17	/* Set */
+#define ASN1_NUMSTR	18	/* Numerical String */
+#define ASN1_PRNSTR	19	/* Printable String */
+#define ASN1_TEXSTR	20	/* Teletext String */
+#define ASN1_VIDSTR	21	/* Video String */
+#define ASN1_IA5STR	22	/* IA5 String */
+#define ASN1_UNITIM	23	/* Universal Time */
+#define ASN1_GENTIM	24	/* General Time */
+#define ASN1_GRASTR	25	/* Graphical String */
+#define ASN1_VISSTR	26	/* Visible String */
+#define ASN1_GENSTR	27	/* General String */
+
+/* Primitive / Constructed methods*/
+#define ASN1_PRI	0	/* Primitive */
+#define ASN1_CON	1	/* Constructed */
+
+/*
+ * Error codes.
+ */
+#define ASN1_ERR_NOERROR		0
+#define ASN1_ERR_DEC_EMPTY		2
+#define ASN1_ERR_DEC_EOC_MISMATCH	3
+#define ASN1_ERR_DEC_LENGTH_MISMATCH	4
+#define ASN1_ERR_DEC_BADVALUE		5
+
+#define SPNEGO_OID_LEN 7
+#define NTLMSSP_OID_LEN  10
+static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
+static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
+
+/* 
+ * ASN.1 context.
+ */
+struct asn1_ctx {
+	int error;		/* Error condition */
+	unsigned char *pointer;	/* Octet just to be decoded */
+	unsigned char *begin;	/* First octet */
+	unsigned char *end;	/* Octet after last octet */
+};
+
+/*
+ * Octet string (not null terminated)
+ */
+struct asn1_octstr {
+	unsigned char *data;
+	unsigned int len;
+};
+
+static void
+asn1_open(struct asn1_ctx *ctx, unsigned char *buf, unsigned int len)
+{
+	ctx->begin = buf;
+	ctx->end = buf + len;
+	ctx->pointer = buf;
+	ctx->error = ASN1_ERR_NOERROR;
+}
+
+static unsigned char
+asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
+{
+	if (ctx->pointer >= ctx->end) {
+		ctx->error = ASN1_ERR_DEC_EMPTY;
+		return 0;
+	}
+	*ch = *(ctx->pointer)++;
+	return 1;
+}
+
+static unsigned char
+asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
+{
+	unsigned char ch;
+
+	*tag = 0;
+
+	do {
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+		*tag <<= 7;
+		*tag |= ch & 0x7F;
+	} while ((ch & 0x80) == 0x80);
+	return 1;
+}
+
+static unsigned char
+asn1_id_decode(struct asn1_ctx *ctx,
+	       unsigned int *cls, unsigned int *con, unsigned int *tag)
+{
+	unsigned char ch;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*cls = (ch & 0xC0) >> 6;
+	*con = (ch & 0x20) >> 5;
+	*tag = (ch & 0x1F);
+
+	if (*tag == 0x1F) {
+		if (!asn1_tag_decode(ctx, tag))
+			return 0;
+	}
+	return 1;
+}
+
+static unsigned char
+asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len)
+{
+	unsigned char ch, cnt;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	if (ch == 0x80)
+		*def = 0;
+	else {
+		*def = 1;
+
+		if (ch < 0x80)
+			*len = ch;
+		else {
+			cnt = (unsigned char) (ch & 0x7F);
+			*len = 0;
+
+			while (cnt > 0) {
+				if (!asn1_octet_decode(ctx, &ch))
+					return 0;
+				*len <<= 8;
+				*len |= ch;
+				cnt--;
+			}
+		}
+	}
+	return 1;
+}
+
+static unsigned char
+asn1_header_decode(struct asn1_ctx *ctx,
+		   unsigned char **eoc,
+		   unsigned int *cls, unsigned int *con, unsigned int *tag)
+{
+	unsigned int def, len;
+
+	if (!asn1_id_decode(ctx, cls, con, tag))
+		return 0;
+
+	if (!asn1_length_decode(ctx, &def, &len))
+		return 0;
+
+	if (def)
+		*eoc = ctx->pointer + len;
+	else
+		*eoc = NULL;
+	return 1;
+}
+
+static unsigned char
+asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
+{
+	unsigned char ch;
+
+	if (eoc == NULL) {
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		if (ch != 0x00) {
+			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		if (ch != 0x00) {
+			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
+			return 0;
+		}
+		return 1;
+	} else {
+		if (ctx->pointer != eoc) {
+			ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
+			return 0;
+		}
+		return 1;
+	}
+}
+
+/* static unsigned char asn1_null_decode(struct asn1_ctx *ctx,
+				      unsigned char *eoc)
+{
+	ctx->pointer = eoc;
+	return 1;
+}
+
+static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
+				      unsigned char *eoc, long *integer)
+{
+	unsigned char ch;
+	unsigned int len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = (signed char) ch;
+	len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof(long)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+}
+
+static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
+				      unsigned char *eoc,
+				      unsigned int *integer)
+{
+	unsigned char ch;
+	unsigned int len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = ch;
+	if (ch == 0)
+		len = 0;
+	else
+		len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof(unsigned int)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+}
+
+static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
+				       unsigned char *eoc,
+				       unsigned long *integer)
+{
+	unsigned char ch;
+	unsigned int len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = ch;
+	if (ch == 0)
+		len = 0;
+	else
+		len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof(unsigned long)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+} 
+
+static unsigned char
+asn1_octets_decode(struct asn1_ctx *ctx,
+		   unsigned char *eoc,
+		   unsigned char **octets, unsigned int *len)
+{
+	unsigned char *ptr;
+
+	*len = 0;
+
+	*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
+	if (*octets == NULL) {
+		return 0;
+	}
+
+	ptr = *octets;
+	while (ctx->pointer < eoc) {
+		if (!asn1_octet_decode(ctx, (unsigned char *) ptr++)) {
+			kfree(*octets);
+			*octets = NULL;
+			return 0;
+		}
+		(*len)++;
+	}
+	return 1;
+} */
+
+static unsigned char
+asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
+{
+	unsigned char ch;
+
+	*subid = 0;
+
+	do {
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*subid <<= 7;
+		*subid |= ch & 0x7F;
+	} while ((ch & 0x80) == 0x80);
+	return 1;
+}
+
+static int 
+asn1_oid_decode(struct asn1_ctx *ctx,
+		unsigned char *eoc, unsigned long **oid, unsigned int *len)
+{
+	unsigned long subid;
+	unsigned int size;
+	unsigned long *optr;
+
+	size = eoc - ctx->pointer + 1;
+	*oid = kmalloc(size * sizeof (unsigned long), GFP_ATOMIC);
+	if (*oid == NULL) {
+		return 0;
+	}
+
+	optr = *oid;
+
+	if (!asn1_subid_decode(ctx, &subid)) {
+		kfree(*oid);
+		*oid = NULL;
+		return 0;
+	}
+
+	if (subid < 40) {
+		optr[0] = 0;
+		optr[1] = subid;
+	} else if (subid < 80) {
+		optr[0] = 1;
+		optr[1] = subid - 40;
+	} else {
+		optr[0] = 2;
+		optr[1] = subid - 80;
+	}
+
+	*len = 2;
+	optr += 2;
+
+	while (ctx->pointer < eoc) {
+		if (++(*len) > size) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			kfree(*oid);
+			*oid = NULL;
+			return 0;
+		}
+
+		if (!asn1_subid_decode(ctx, optr++)) {
+			kfree(*oid);
+			*oid = NULL;
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static int
+compare_oid(unsigned long *oid1, unsigned int oid1len,
+	    unsigned long *oid2, unsigned int oid2len)
+{
+	unsigned int i;
+
+	if (oid1len != oid2len)
+		return 0;
+	else {
+		for (i = 0; i < oid1len; i++) {
+			if (oid1[i] != oid2[i])
+				return 0;
+		}
+		return 1;
+	}
+}
+
+	/* BB check for endian conversion issues here */
+
+int
+decode_negTokenInit(unsigned char *security_blob, int length,
+		    enum securityEnum *secType)
+{
+	struct asn1_ctx ctx;
+	unsigned char *end;
+	unsigned char *sequence_end;
+	unsigned long *oid = NULL;
+	unsigned int cls, con, tag, oidlen, rc;
+	int use_ntlmssp = FALSE;
+
+	*secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default */
+
+	/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
+
+	asn1_open(&ctx, security_blob, length);
+
+	if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+		cFYI(1, ("Error decoding negTokenInit header "));
+		return 0;
+	} else if ((cls != ASN1_APL) || (con != ASN1_CON)
+		   || (tag != ASN1_EOC)) {
+		cFYI(1, ("cls = %d con = %d tag = %d", cls, con, tag));
+		return 0;
+	} else {
+		/*      remember to free obj->oid */
+		rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
+		if (rc) {
+			if ((tag == ASN1_OJI) && (cls == ASN1_PRI)) {
+				rc = asn1_oid_decode(&ctx, end, &oid, &oidlen);
+				if (rc) {
+					rc = compare_oid(oid, oidlen,
+							 SPNEGO_OID,
+							 SPNEGO_OID_LEN);
+					kfree(oid);
+				}
+			} else
+				rc = 0;
+		}
+
+		if (!rc) {
+			cFYI(1, ("Error decoding negTokenInit header"));
+			return 0;
+		}
+
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1, ("Error decoding negTokenInit "));
+			return 0;
+		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+			   || (tag != ASN1_EOC)) {
+			cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 0",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1, ("Error decoding negTokenInit "));
+			return 0;
+		} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+			   || (tag != ASN1_SEQ)) {
+			cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 1",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+			return 0;
+		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+			   || (tag != ASN1_EOC)) {
+			cFYI(1,
+			     ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+
+		if (asn1_header_decode
+		    (&ctx, &sequence_end, &cls, &con, &tag) == 0) {
+			cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+			return 0;
+		} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+			   || (tag != ASN1_SEQ)) {
+			cFYI(1,
+			     ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+
+		while (!asn1_eoc_decode(&ctx, sequence_end)) {
+			rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
+			if (!rc) {
+				cFYI(1,
+				     ("Error 1 decoding negTokenInit header exit 2"));
+				return 0;
+			}
+			if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
+				rc = asn1_oid_decode(&ctx, end, &oid, &oidlen);
+				if(rc) {		
+					cFYI(1,
+					  ("OID len = %d oid = 0x%lx 0x%lx 0x%lx 0x%lx",
+					   oidlen, *oid, *(oid + 1), *(oid + 2),
+					   *(oid + 3)));
+					rc = compare_oid(oid, oidlen, NTLMSSP_OID,
+						 NTLMSSP_OID_LEN);
+					if(oid)
+						kfree(oid);
+					if (rc)
+						use_ntlmssp = TRUE;
+				}
+			} else {
+				cFYI(1,("This should be an oid what is going on? "));
+			}
+		}
+
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1,
+			     ("Error decoding last part of negTokenInit exit 3"));
+			return 0;
+		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {	/* tag = 3 indicating mechListMIC */
+			cFYI(1,
+			     ("Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1,
+			     ("Error decoding last part of negTokenInit exit 5"));
+			return 0;
+		} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+			   || (tag != ASN1_SEQ)) {
+			cFYI(1,
+			     ("Exit 6 cls = %d con = %d tag = %d end = %p (%d)",
+			      cls, con, tag, end, *end));
+		}
+
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1,
+			     ("Error decoding last part of negTokenInit exit 7"));
+			return 0;
+		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+			cFYI(1,
+			     ("Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+			cFYI(1,
+			     ("Error decoding last part of negTokenInit exit 9"));
+			return 0;
+		} else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
+			   || (tag != ASN1_GENSTR)) {
+			cFYI(1,
+			     ("Exit 10 cls = %d con = %d tag = %d end = %p (%d)",
+			      cls, con, tag, end, *end));
+			return 0;
+		}
+		cFYI(1, ("Need to call asn1_octets_decode() function for this %s", ctx.pointer));	/* is this UTF-8 or ASCII? */
+	}
+
+	/* if (use_kerberos) 
+	   *secType = Kerberos 
+	   else */
+	if (use_ntlmssp) {
+		*secType = NTLMSSP;
+	}
+
+	return 1;
+}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
new file mode 100644
index 0000000..db28b56
--- /dev/null
+++ b/fs/cifs/cifs_debug.c
@@ -0,0 +1,805 @@
+/*
+ *   fs/cifs_debug.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2000,2003
+ *
+ *   Modified by Steve French (sfrench@us.ibm.com)
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+
+void
+cifs_dump_mem(char *label, void *data, int length)
+{
+	int i, j;
+	int *intptr = data;
+	char *charptr = data;
+	char buf[10], line[80];
+
+	printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n\n", 
+		label, length, data);
+	for (i = 0; i < length; i += 16) {
+		line[0] = 0;
+		for (j = 0; (j < 4) && (i + j * 4 < length); j++) {
+			sprintf(buf, " %08x", intptr[i / 4 + j]);
+			strcat(line, buf);
+		}
+		buf[0] = ' ';
+		buf[2] = 0;
+		for (j = 0; (j < 16) && (i + j < length); j++) {
+			buf[1] = isprint(charptr[i + j]) ? charptr[i + j] : '.';
+			strcat(line, buf);
+		}
+		printk(KERN_DEBUG "%s\n", line);
+	}
+}
+
+#ifdef CONFIG_PROC_FS
+static int
+cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
+		     int count, int *eof, void *data)
+{
+	struct list_head *tmp;
+	struct list_head *tmp1;
+	struct mid_q_entry * mid_entry;
+	struct cifsSesInfo *ses;
+	struct cifsTconInfo *tcon;
+	int i;
+	int length = 0;
+	char * original_buf = buf;
+
+	*beginBuffer = buf + offset;
+
+	
+	length =
+	    sprintf(buf,
+		    "Display Internal CIFS Data Structures for Debugging\n"
+		    "---------------------------------------------------\n");
+	buf += length;
+
+	length = sprintf(buf, "Servers:\n");
+	buf += length;
+
+	i = 0;
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalSMBSessionList) {
+		i++;
+		ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
+		length =
+		    sprintf(buf,
+			    "\n%d) Name: %s  Domain: %s Mounts: %d ServerOS: %s  \n\tServerNOS: %s\tCapabilities: 0x%x\n\tSMB session status: %d\t",
+				i, ses->serverName, ses->serverDomain, atomic_read(&ses->inUse),
+				ses->serverOS, ses->serverNOS, ses->capabilities,ses->status);
+		buf += length;
+		if(ses->server) {
+			buf += sprintf(buf, "TCP status: %d\n\tLocal Users To Server: %d SecMode: 0x%x Req Active: %d",
+				ses->server->tcpStatus,
+				atomic_read(&ses->server->socketUseCount),
+				ses->server->secMode,
+				atomic_read(&ses->server->inFlight));
+			
+			length = sprintf(buf, "\nMIDs: \n");
+			buf += length;
+
+			spin_lock(&GlobalMid_Lock);
+			list_for_each(tmp1, &ses->server->pending_mid_q) {
+				mid_entry = list_entry(tmp1, struct
+					mid_q_entry,
+					qhead);
+				if(mid_entry) {
+					length = sprintf(buf,"State: %d com: %d pid: %d tsk: %p mid %d\n",mid_entry->midState,mid_entry->command,mid_entry->pid,mid_entry->tsk,mid_entry->mid);
+					buf += length;
+				}
+			}
+			spin_unlock(&GlobalMid_Lock); 
+		}
+
+	}
+	read_unlock(&GlobalSMBSeslock);
+	sprintf(buf, "\n");
+	buf++;
+
+	length = sprintf(buf, "\nShares:\n");
+	buf += length;
+
+	i = 0;
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalTreeConnectionList) {
+		__u32 dev_type;
+		i++;
+		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
+		dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+		length =
+		    sprintf(buf,
+			    "\n%d) %s Uses: %d Type: %s Characteristics: 0x%x Attributes: 0x%x\nPathComponentMax: %d Status: %d",
+			    i, tcon->treeName,
+			    atomic_read(&tcon->useCount),
+			    tcon->nativeFileSystem,
+			    le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
+			    le32_to_cpu(tcon->fsAttrInfo.Attributes),
+			    le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
+			    tcon->tidStatus);
+		buf += length;        
+		if (dev_type == FILE_DEVICE_DISK)
+			length = sprintf(buf, " type: DISK ");
+		else if (dev_type == FILE_DEVICE_CD_ROM)
+			length = sprintf(buf, " type: CDROM ");
+		else
+			length =
+			    sprintf(buf, " type: %d ", dev_type);
+		buf += length;
+		if(tcon->tidStatus == CifsNeedReconnect) {
+			buf += sprintf(buf, "\tDISCONNECTED ");
+			length += 14;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+
+	length = sprintf(buf, "\n");
+	buf += length;
+
+	/* BB add code to dump additional info such as TCP session info now */
+	/* Now calculate total size of returned data */
+	length = buf - original_buf;
+
+	if(offset + count >= length)
+		*eof = 1;
+	if(length < offset) {
+		*eof = 1;
+		return 0;
+	} else {
+		length = length - offset;
+	}
+	if (length > count)
+		length = count;
+
+	return length;
+}
+
+#ifdef CONFIG_CIFS_STATS
+static int
+cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
+		  int count, int *eof, void *data)
+{
+	int item_length,i,length;
+	struct list_head *tmp;
+	struct cifsTconInfo *tcon;
+
+	*beginBuffer = buf + offset;
+
+	length = sprintf(buf,
+			"Resources in use\nCIFS Session: %d\n",
+			sesInfoAllocCount.counter);
+	buf += length;
+	item_length = 
+		sprintf(buf,"Share (unique mount targets): %d\n",
+			tconInfoAllocCount.counter);
+	length += item_length;
+	buf += item_length;      
+	item_length = 
+		sprintf(buf,"SMB Request/Response Buffer: %d Pool size: %d\n",
+			bufAllocCount.counter,cifs_min_rcv + tcpSesAllocCount.counter);
+	length += item_length;
+	buf += item_length;
+	item_length = 
+		sprintf(buf,"SMB Small Req/Resp Buffer: %d Pool size: %d\n",
+			smBufAllocCount.counter,cifs_min_small);
+	length += item_length;
+	buf += item_length;
+	item_length = 
+		sprintf(buf,"Operations (MIDs): %d\n",
+			midCount.counter);
+	length += item_length;
+	buf += item_length;
+	item_length = sprintf(buf,
+		"\n%d session %d share reconnects\n",
+		tcpSesReconnectCount.counter,tconInfoReconnectCount.counter);
+	length += item_length;
+	buf += item_length;
+
+	item_length = sprintf(buf,
+		"Total vfs operations: %d maximum at one time: %d\n",
+		GlobalCurrentXid,GlobalMaxActiveXid);
+	length += item_length;
+	buf += item_length;
+
+	i = 0;
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalTreeConnectionList) {
+		i++;
+		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
+		item_length = sprintf(buf,"\n%d) %s",i, tcon->treeName);
+		buf += item_length;
+		length += item_length;
+		if(tcon->tidStatus == CifsNeedReconnect) {
+			buf += sprintf(buf, "\tDISCONNECTED ");
+			length += 14;
+		}
+		item_length = sprintf(buf,"\nSMBs: %d Oplock Breaks: %d",
+			atomic_read(&tcon->num_smbs_sent),
+			atomic_read(&tcon->num_oplock_brks));
+		buf += item_length;
+		length += item_length;
+		item_length = sprintf(buf,"\nReads: %d Bytes %lld",
+			atomic_read(&tcon->num_reads),
+			(long long)(tcon->bytes_read));
+		buf += item_length;
+		length += item_length;
+		item_length = sprintf(buf,"\nWrites: %d Bytes: %lld",
+			atomic_read(&tcon->num_writes),
+			(long long)(tcon->bytes_written));
+		buf += item_length;
+		length += item_length;
+		item_length = sprintf(buf,
+			"\nOpens: %d Deletes: %d\nMkdirs: %d Rmdirs: %d",
+			atomic_read(&tcon->num_opens),
+			atomic_read(&tcon->num_deletes),
+			atomic_read(&tcon->num_mkdirs),
+			atomic_read(&tcon->num_rmdirs));
+		buf += item_length;
+		length += item_length;
+		item_length = sprintf(buf,
+			"\nRenames: %d T2 Renames %d",
+			atomic_read(&tcon->num_renames),
+			atomic_read(&tcon->num_t2renames));
+		buf += item_length;
+		length += item_length;
+	}
+	read_unlock(&GlobalSMBSeslock);
+
+	buf += sprintf(buf,"\n");
+	length++;
+
+	if(offset + count >= length)
+		*eof = 1;
+	if(length < offset) {
+		*eof = 1;
+		return 0;
+	} else {
+		length = length - offset;
+	}
+	if (length > count)
+		length = count;
+		
+	return length;
+}
+#endif
+
+static struct proc_dir_entry *proc_fs_cifs;
+read_proc_t cifs_txanchor_read;
+static read_proc_t cifsFYI_read;
+static write_proc_t cifsFYI_write;
+static read_proc_t oplockEnabled_read;
+static write_proc_t oplockEnabled_write;
+static read_proc_t lookupFlag_read;
+static write_proc_t lookupFlag_write;
+static read_proc_t traceSMB_read;
+static write_proc_t traceSMB_write;
+static read_proc_t multiuser_mount_read;
+static write_proc_t multiuser_mount_write;
+static read_proc_t extended_security_read;
+static write_proc_t extended_security_write;
+static read_proc_t ntlmv2_enabled_read;
+static write_proc_t ntlmv2_enabled_write;
+static read_proc_t packet_signing_enabled_read;
+static write_proc_t packet_signing_enabled_write;
+static read_proc_t quotaEnabled_read;
+static write_proc_t quotaEnabled_write;
+static read_proc_t linuxExtensionsEnabled_read;
+static write_proc_t linuxExtensionsEnabled_write;
+
+void
+cifs_proc_init(void)
+{
+	struct proc_dir_entry *pde;
+
+	proc_fs_cifs = proc_mkdir("cifs", proc_root_fs);
+	if (proc_fs_cifs == NULL)
+		return;
+
+	proc_fs_cifs->owner = THIS_MODULE;
+	create_proc_read_entry("DebugData", 0, proc_fs_cifs,
+				cifs_debug_data_read, NULL);
+
+#ifdef CONFIG_CIFS_STATS
+	create_proc_read_entry("Stats", 0, proc_fs_cifs,
+				cifs_stats_read, NULL);
+#endif
+	pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs,
+				cifsFYI_read, NULL);
+	if (pde)
+		pde->write_proc = cifsFYI_write;
+
+	pde =
+	    create_proc_read_entry("traceSMB", 0, proc_fs_cifs,
+				traceSMB_read, NULL);
+	if (pde)
+		pde->write_proc = traceSMB_write;
+
+	pde = create_proc_read_entry("OplockEnabled", 0, proc_fs_cifs,
+				oplockEnabled_read, NULL);
+	if (pde)
+		pde->write_proc = oplockEnabled_write;
+
+	pde = create_proc_read_entry("ReenableOldCifsReaddirCode", 0, proc_fs_cifs,
+				quotaEnabled_read, NULL);
+	if (pde)
+		pde->write_proc = quotaEnabled_write;
+
+	pde = create_proc_read_entry("LinuxExtensionsEnabled", 0, proc_fs_cifs,
+				linuxExtensionsEnabled_read, NULL);
+	if (pde)
+		pde->write_proc = linuxExtensionsEnabled_write;
+
+	pde =
+	    create_proc_read_entry("MultiuserMount", 0, proc_fs_cifs,
+				multiuser_mount_read, NULL);
+	if (pde)
+		pde->write_proc = multiuser_mount_write;
+
+	pde =
+	    create_proc_read_entry("ExtendedSecurity", 0, proc_fs_cifs,
+				extended_security_read, NULL);
+	if (pde)
+		pde->write_proc = extended_security_write;
+
+	pde =
+	create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs,
+				lookupFlag_read, NULL);
+	if (pde)
+		pde->write_proc = lookupFlag_write;
+
+	pde =
+	    create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs,
+				ntlmv2_enabled_read, NULL);
+	if (pde)
+		pde->write_proc = ntlmv2_enabled_write;
+
+	pde =
+	    create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs,
+				packet_signing_enabled_read, NULL);
+	if (pde)
+		pde->write_proc = packet_signing_enabled_write;
+}
+
+void
+cifs_proc_clean(void)
+{
+	if (proc_fs_cifs == NULL)
+		return;
+
+	remove_proc_entry("DebugData", proc_fs_cifs);
+	remove_proc_entry("cifsFYI", proc_fs_cifs);
+	remove_proc_entry("traceSMB", proc_fs_cifs);
+#ifdef CONFIG_CIFS_STATS
+	remove_proc_entry("Stats", proc_fs_cifs);
+#endif
+	remove_proc_entry("MultiuserMount", proc_fs_cifs);
+	remove_proc_entry("OplockEnabled", proc_fs_cifs);
+	remove_proc_entry("NTLMV2Enabled",proc_fs_cifs);
+	remove_proc_entry("ExtendedSecurity",proc_fs_cifs);
+	remove_proc_entry("PacketSigningEnabled",proc_fs_cifs);
+	remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs);
+	remove_proc_entry("ReenableOldCifsReaddirCode",proc_fs_cifs);
+	remove_proc_entry("LookupCacheEnabled",proc_fs_cifs);
+	remove_proc_entry("cifs", proc_root_fs);
+}
+
+static int
+cifsFYI_read(char *page, char **start, off_t off, int count,
+	     int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", cifsFYI);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+cifsFYI_write(struct file *file, const char __user *buffer,
+	      unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		cifsFYI = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		cifsFYI = 1;
+
+	return count;
+}
+
+static int
+oplockEnabled_read(char *page, char **start, off_t off,
+		   int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", oplockEnabled);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+oplockEnabled_write(struct file *file, const char __user *buffer,
+		    unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		oplockEnabled = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		oplockEnabled = 1;
+
+	return count;
+}
+
+static int
+quotaEnabled_read(char *page, char **start, off_t off,
+                   int count, int *eof, void *data)
+{
+        int len;
+
+        len = sprintf(page, "%d\n", experimEnabled);
+/* could also check if quotas are enabled in kernel
+	as a whole first */
+        len -= off;
+        *start = page + off;
+
+        if (len > count)
+                len = count;
+        else
+                *eof = 1;
+
+        if (len < 0)
+                len = 0;
+
+        return len;
+}
+static int
+quotaEnabled_write(struct file *file, const char __user *buffer,
+                    unsigned long count, void *data)
+{
+        char c;
+        int rc;
+
+        rc = get_user(c, buffer);
+        if (rc)
+                return rc;
+        if (c == '0' || c == 'n' || c == 'N')
+                experimEnabled = 0;
+        else if (c == '1' || c == 'y' || c == 'Y')
+                experimEnabled = 1;
+
+        return count;
+}
+
+static int
+linuxExtensionsEnabled_read(char *page, char **start, off_t off,
+                   int count, int *eof, void *data)
+{
+        int len;
+
+        len = sprintf(page, "%d\n", linuxExtEnabled);
+/* could also check if quotas are enabled in kernel
+	as a whole first */
+        len -= off;
+        *start = page + off;
+
+        if (len > count)
+                len = count;
+        else
+                *eof = 1;
+
+        if (len < 0)
+                len = 0;
+
+        return len;
+}
+static int
+linuxExtensionsEnabled_write(struct file *file, const char __user *buffer,
+                    unsigned long count, void *data)
+{
+        char c;
+        int rc;
+
+        rc = get_user(c, buffer);
+        if (rc)
+                return rc;
+        if (c == '0' || c == 'n' || c == 'N')
+                linuxExtEnabled = 0;
+        else if (c == '1' || c == 'y' || c == 'Y')
+                linuxExtEnabled = 1;
+
+        return count;
+}
+
+
+static int
+lookupFlag_read(char *page, char **start, off_t off,
+		   int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", lookupCacheEnabled);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+lookupFlag_write(struct file *file, const char __user *buffer,
+		    unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		lookupCacheEnabled = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		lookupCacheEnabled = 1;
+
+	return count;
+}
+static int
+traceSMB_read(char *page, char **start, off_t off, int count,
+	      int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", traceSMB);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+traceSMB_write(struct file *file, const char __user *buffer,
+	       unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		traceSMB = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		traceSMB = 1;
+
+	return count;
+}
+
+static int
+multiuser_mount_read(char *page, char **start, off_t off,
+		     int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", multiuser_mount);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+multiuser_mount_write(struct file *file, const char __user *buffer,
+		      unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		multiuser_mount = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		multiuser_mount = 1;
+
+	return count;
+}
+
+static int
+extended_security_read(char *page, char **start, off_t off,
+		       int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", extended_security);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+extended_security_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		extended_security = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		extended_security = 1;
+
+	return count;
+}
+
+static int
+ntlmv2_enabled_read(char *page, char **start, off_t off,
+		       int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", ntlmv2_support);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+ntlmv2_enabled_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		ntlmv2_support = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		ntlmv2_support = 1;
+
+	return count;
+}
+
+static int
+packet_signing_enabled_read(char *page, char **start, off_t off,
+		       int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", sign_CIFS_PDUs);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+static int
+packet_signing_enabled_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char c;
+	int rc;
+
+	rc = get_user(c, buffer);
+	if (rc)
+		return rc;
+	if (c == '0' || c == 'n' || c == 'N')
+		sign_CIFS_PDUs = 0;
+	else if (c == '1' || c == 'y' || c == 'Y')
+		sign_CIFS_PDUs = 1;
+	else if (c == '2')
+		sign_CIFS_PDUs = 2;
+
+	return count;
+}
+
+
+#endif
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
new file mode 100644
index 0000000..bf24d28
--- /dev/null
+++ b/fs/cifs/cifs_debug.h
@@ -0,0 +1,66 @@
+/*
+ *
+ *   Copyright (c) International Business Machines  Corp., 2000,2002
+ *   Modified by Steve French (sfrench@us.ibm.com)
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+#define CIFS_DEBUG		/* BB temporary */
+
+#ifndef _H_CIFS_DEBUG
+#define _H_CIFS_DEBUG
+
+void cifs_dump_mem(char *label, void *data, int length);
+extern int traceSMB;		/* flag which enables the function below */
+void dump_smb(struct smb_hdr *, int);
+
+/*
+ *	debug ON
+ *	--------
+ */
+#ifdef CIFS_DEBUG
+
+
+/* information message: e.g., configuration, major event */
+extern int cifsFYI;
+#define cifsfyi(format,arg...) if (cifsFYI) printk(KERN_DEBUG " " __FILE__ ": " format "\n" "" , ## arg)
+
+#define cFYI(button,prspec) if (button) cifsfyi prspec
+
+#define cifswarn(format, arg...) printk(KERN_WARNING ": " format "\n" , ## arg)
+
+/* debug event message: */
+extern int cifsERROR;
+
+#define cEVENT(format,arg...) if (cifsERROR) printk(KERN_EVENT __FILE__ ": " format "\n" , ## arg)
+
+/* error event message: e.g., i/o error */
+#define cifserror(format,arg...) if (cifsERROR) printk(KERN_ERR " CIFS VFS: " format "\n" "" , ## arg)
+
+#define cERROR(button, prspec) if (button) cifserror prspec
+
+/*
+ *	debug OFF
+ *	---------
+ */
+#else		/* _CIFS_DEBUG */
+#define cERROR(button,prspec)
+#define cEVENT(format,arg...)
+#define cFYI(button, prspec)
+#define cifserror(format,arg...)
+#endif		/* _CIFS_DEBUG */
+
+#endif				/* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
new file mode 100644
index 0000000..77da902
--- /dev/null
+++ b/fs/cifs/cifs_fs_sb.h
@@ -0,0 +1,39 @@
+/*
+ *   fs/cifs/cifs_fs_sb.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ */
+#ifndef _CIFS_FS_SB_H
+#define _CIFS_FS_SB_H
+
+#define CIFS_MOUNT_NO_PERM      1 /* do not do client vfs_perm check */
+#define CIFS_MOUNT_SET_UID      2 /* set current->euid in create etc. */
+#define CIFS_MOUNT_SERVER_INUM  4 /* inode numbers from uniqueid from server */
+#define CIFS_MOUNT_DIRECT_IO    8 /* do not write nor read through page cache */
+#define CIFS_MOUNT_NO_XATTR  0x10 /* if set - disable xattr support */
+
+struct cifs_sb_info {
+	struct cifsTconInfo *tcon;	/* primary mount */
+	struct list_head nested_tcon_q;
+	struct nls_table *local_nls;
+	unsigned int rsize;
+	unsigned int wsize;
+	uid_t	mnt_uid;
+	gid_t	mnt_gid;
+	mode_t	mnt_file_mode;
+	mode_t	mnt_dir_mode;
+	int     mnt_cifs_flags;
+};
+#endif				/* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
new file mode 100644
index 0000000..a17adf4
--- /dev/null
+++ b/fs/cifs/cifs_unicode.c
@@ -0,0 +1,87 @@
+/*
+ *   fs/cifs/cifs_unicode.c
+ *
+ *   Copyright (c) International Business Machines  Corp., 2000,2002
+ *   Modified by Steve French (sfrench@us.ibm.com)
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include "cifs_unicode.h"
+#include "cifs_uniupr.h"
+#include "cifspdu.h"
+#include "cifs_debug.h"
+
+/*
+ * NAME:	cifs_strfromUCS()
+ *
+ * FUNCTION:	Convert little-endian unicode string to character string
+ *
+ */
+int
+cifs_strfromUCS_le(char *to, const wchar_t * from,	/* LITTLE ENDIAN */
+		   int len, const struct nls_table *codepage)
+{
+	int i;
+	int outlen = 0;
+
+	for (i = 0; (i < len) && from[i]; i++) {
+		int charlen;
+		/* 2.4.0 kernel or greater */
+		charlen =
+		    codepage->uni2char(le16_to_cpu(from[i]), &to[outlen],
+				       NLS_MAX_CHARSET_SIZE);
+		if (charlen > 0) {
+			outlen += charlen;
+		} else {
+			to[outlen++] = '?';
+		}
+	}
+	to[outlen] = 0;
+	return outlen;
+}
+
+/*
+ * NAME:	cifs_strtoUCS()
+ *
+ * FUNCTION:	Convert character string to unicode string
+ *
+ */
+int
+cifs_strtoUCS(wchar_t * to, const char *from, int len,
+	      const struct nls_table *codepage)
+{
+	int charlen;
+	int i;
+
+	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
+
+		/* works for 2.4.0 kernel or later */
+		charlen = codepage->char2uni(from, len, &to[i]);
+		if (charlen < 1) {
+			cERROR(1,
+			       ("cifs_strtoUCS: char2uni returned %d",
+				charlen));
+			to[i] = cpu_to_le16(0x003f);	/* a question mark */
+			charlen = 1;
+		}
+		to[i] = cpu_to_le16(to[i]);
+
+	}
+
+	to[i] = 0;
+	return i;
+}
+
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
new file mode 100644
index 0000000..da8dde9
--- /dev/null
+++ b/fs/cifs/cifs_unicode.h
@@ -0,0 +1,353 @@
+/*
+ * cifs_unicode:  Unicode kernel case support
+ *
+ * Function:
+ *     Convert a unicode character to upper or lower case using
+ *     compressed tables.
+ *
+ *   Copyright (c) International Business Machines  Corp., 2000,2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Notes:
+ *     These APIs are based on the C library functions.  The semantics
+ *     should match the C functions but with expanded size operands.
+ *
+ *     The upper/lower functions are based on a table created by mkupr.
+ *     This is a compressed table of upper and lower case conversion.
+ *
+ */
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+
+#define  UNIUPR_NOLOWER		/* Example to not expand lower case tables */
+
+/* Just define what we want from uniupr.h.  We don't want to define the tables
+ * in each source file.
+ */
+#ifndef	UNICASERANGE_DEFINED
+struct UniCaseRange {
+	wchar_t start;
+	wchar_t end;
+	signed char *table;
+};
+#endif				/* UNICASERANGE_DEFINED */
+
+#ifndef UNIUPR_NOUPPER
+extern signed char CifsUniUpperTable[512];
+extern const struct UniCaseRange CifsUniUpperRange[];
+#endif				/* UNIUPR_NOUPPER */
+
+#ifndef UNIUPR_NOLOWER
+extern signed char UniLowerTable[512];
+extern struct UniCaseRange UniLowerRange[];
+#endif				/* UNIUPR_NOLOWER */
+
+#ifdef __KERNEL__
+int cifs_strfromUCS_le(char *, const wchar_t *, int, const struct nls_table *);
+int cifs_strtoUCS(wchar_t *, const char *, int, const struct nls_table *);
+#endif
+
+/*
+ * UniStrcat:  Concatenate the second string to the first
+ *
+ * Returns:
+ *     Address of the first string
+ */
+static inline wchar_t *
+UniStrcat(wchar_t * ucs1, const wchar_t * ucs2)
+{
+	wchar_t *anchor = ucs1;	/* save a pointer to start of ucs1 */
+
+	while (*ucs1++) ;	/* To end of first string */
+	ucs1--;			/* Return to the null */
+	while ((*ucs1++ = *ucs2++)) ;	/* copy string 2 over */
+	return anchor;
+}
+
+/*
+ * UniStrchr:  Find a character in a string
+ *
+ * Returns:
+ *     Address of first occurrence of character in string
+ *     or NULL if the character is not in the string
+ */
+static inline wchar_t *
+UniStrchr(const wchar_t * ucs, wchar_t uc)
+{
+	while ((*ucs != uc) && *ucs)
+		ucs++;
+
+	if (*ucs == uc)
+		return (wchar_t *) ucs;
+	return NULL;
+}
+
+/*
+ * UniStrcmp:  Compare two strings
+ *
+ * Returns:
+ *     < 0:  First string is less than second
+ *     = 0:  Strings are equal
+ *     > 0:  First string is greater than second
+ */
+static inline int
+UniStrcmp(const wchar_t * ucs1, const wchar_t * ucs2)
+{
+	while ((*ucs1 == *ucs2) && *ucs1) {
+		ucs1++;
+		ucs2++;
+	}
+	return (int) *ucs1 - (int) *ucs2;
+}
+
+/*
+ * UniStrcpy:  Copy a string
+ */
+static inline wchar_t *
+UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
+{
+	wchar_t *anchor = ucs1;	/* save the start of result string */
+
+	while ((*ucs1++ = *ucs2++)) ;
+	return anchor;
+}
+
+/*
+ * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
+ */
+static inline size_t
+UniStrlen(const wchar_t * ucs1)
+{
+	int i = 0;
+
+	while (*ucs1++)
+		i++;
+	return i;
+}
+
+/*
+ * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a string (length limited)
+ */
+static inline size_t
+UniStrnlen(const wchar_t * ucs1, int maxlen)
+{
+	int i = 0;
+
+	while (*ucs1++) {
+		i++;
+		if (i >= maxlen)
+			break;
+	}
+	return i;
+}
+
+/*
+ * UniStrncat:  Concatenate length limited string
+ */
+static inline wchar_t *
+UniStrncat(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+{
+	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
+
+	while (*ucs1++) ;
+	ucs1--;			/* point to null terminator of s1 */
+	while (n-- && (*ucs1 = *ucs2)) {	/* copy s2 after s1 */
+		ucs1++;
+		ucs2++;
+	}
+	*ucs1 = 0;		/* Null terminate the result */
+	return (anchor);
+}
+
+/*
+ * UniStrncmp:  Compare length limited string
+ */
+static inline int
+UniStrncmp(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+{
+	if (!n)
+		return 0;	/* Null strings are equal */
+	while ((*ucs1 == *ucs2) && *ucs1 && --n) {
+		ucs1++;
+		ucs2++;
+	}
+	return (int) *ucs1 - (int) *ucs2;
+}
+
+/*
+ * UniStrncmp_le:  Compare length limited string - native to little-endian
+ */
+static inline int
+UniStrncmp_le(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+{
+	if (!n)
+		return 0;	/* Null strings are equal */
+	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
+		ucs1++;
+		ucs2++;
+	}
+	return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
+}
+
+/*
+ * UniStrncpy:  Copy length limited string with pad
+ */
+static inline wchar_t *
+UniStrncpy(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+{
+	wchar_t *anchor = ucs1;
+
+	while (n-- && *ucs2)	/* Copy the strings */
+		*ucs1++ = *ucs2++;
+
+	n++;
+	while (n--)		/* Pad with nulls */
+		*ucs1++ = 0;
+	return anchor;
+}
+
+/*
+ * UniStrncpy_le:  Copy length limited string with pad to little-endian
+ */
+static inline wchar_t *
+UniStrncpy_le(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+{
+	wchar_t *anchor = ucs1;
+
+	while (n-- && *ucs2)	/* Copy the strings */
+		*ucs1++ = __le16_to_cpu(*ucs2++);
+
+	n++;
+	while (n--)		/* Pad with nulls */
+		*ucs1++ = 0;
+	return anchor;
+}
+
+/*
+ * UniStrstr:  Find a string in a string
+ *
+ * Returns:
+ *     Address of first match found
+ *     NULL if no matching string is found
+ */
+static inline wchar_t *
+UniStrstr(const wchar_t * ucs1, const wchar_t * ucs2)
+{
+	const wchar_t *anchor1 = ucs1;
+	const wchar_t *anchor2 = ucs2;
+
+	while (*ucs1) {
+		if (*ucs1 == *ucs2) {	/* Partial match found */
+			ucs1++;
+			ucs2++;
+		} else {
+			if (!*ucs2)	/* Match found */
+				return (wchar_t *) anchor1;
+			ucs1 = ++anchor1;	/* No match */
+			ucs2 = anchor2;
+		}
+	}
+
+	if (!*ucs2)		/* Both end together */
+		return (wchar_t *) anchor1;	/* Match found */
+	return NULL;		/* No match */
+}
+
+#ifndef UNIUPR_NOUPPER
+/*
+ * UniToupper:  Convert a unicode character to upper case
+ */
+static inline wchar_t
+UniToupper(register wchar_t uc)
+{
+	register const struct UniCaseRange *rp;
+
+	if (uc < sizeof (CifsUniUpperTable)) {	/* Latin characters */
+		return uc + CifsUniUpperTable[uc];	/* Use base tables */
+	} else {
+		rp = CifsUniUpperRange;	/* Use range tables */
+		while (rp->start) {
+			if (uc < rp->start)	/* Before start of range */
+				return uc;	/* Uppercase = input */
+			if (uc <= rp->end)	/* In range */
+				return uc + rp->table[uc - rp->start];
+			rp++;	/* Try next range */
+		}
+	}
+	return uc;		/* Past last range */
+}
+
+/*
+ * UniStrupr:  Upper case a unicode string
+ */
+static inline wchar_t *
+UniStrupr(register wchar_t * upin)
+{
+	register wchar_t *up;
+
+	up = upin;
+	while (*up) {		/* For all characters */
+		*up = UniToupper(*up);
+		up++;
+	}
+	return upin;		/* Return input pointer */
+}
+#endif				/* UNIUPR_NOUPPER */
+
+#ifndef UNIUPR_NOLOWER
+/*
+ * UniTolower:  Convert a unicode character to lower case
+ */
+static inline wchar_t
+UniTolower(wchar_t uc)
+{
+	register struct UniCaseRange *rp;
+
+	if (uc < sizeof (UniLowerTable)) {	/* Latin characters */
+		return uc + UniLowerTable[uc];	/* Use base tables */
+	} else {
+		rp = UniLowerRange;	/* Use range tables */
+		while (rp->start) {
+			if (uc < rp->start)	/* Before start of range */
+				return uc;	/* Uppercase = input */
+			if (uc <= rp->end)	/* In range */
+				return uc + rp->table[uc - rp->start];
+			rp++;	/* Try next range */
+		}
+	}
+	return uc;		/* Past last range */
+}
+
+/*
+ * UniStrlwr:  Lower case a unicode string
+ */
+static inline wchar_t *
+UniStrlwr(register wchar_t * upin)
+{
+	register wchar_t *up;
+
+	up = upin;
+	while (*up) {		/* For all characters */
+		*up = UniTolower(*up);
+		up++;
+	}
+	return upin;		/* Return input pointer */
+}
+
+#endif
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
new file mode 100644
index 0000000..decd138
--- /dev/null
+++ b/fs/cifs/cifs_uniupr.h
@@ -0,0 +1,253 @@
+/*
+ *   Copyright (c) International Business Machines  Corp., 2000,2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * uniupr.h - Unicode compressed case ranges
+ *
+*/
+
+#ifndef UNIUPR_NOUPPER
+/*
+ * Latin upper case
+ */
+signed char CifsUniUpperTable[512] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 040-04f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 050-05f */
+	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 060-06f */
+	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, 0, 0, 0, 0, 0,	/* 070-07f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0c0-0cf */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0d0-0df */
+	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 0e0-0ef */
+	-32, -32, -32, -32, -32, -32, -32, 0, -32, -32, -32, -32, -32, -32, -32, 121,	/* 0f0-0ff */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 100-10f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 110-11f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 120-12f */
+	0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 130-13f */
+	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1,	/* 140-14f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 150-15f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 160-16f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 170-17f */
+	0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,	/* 180-18f */
+	0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
+	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
+	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
+	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1,	/* 1d0-1df */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
+	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
+};
+
+/* Upper case range - Greek */
+static signed char UniCaseRangeU03a0[47] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37,	/* 3a0-3af */
+	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 3b0-3bf */
+	-32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
+	-63, -63,
+};
+
+/* Upper case range - Cyrillic */
+static signed char UniCaseRangeU0430[48] = {
+	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 430-43f */
+	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 440-44f */
+	0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, 0, -80, -80,	/* 450-45f */
+};
+
+/* Upper case range - Extended cyrillic */
+static signed char UniCaseRangeU0490[61] = {
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 490-49f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4a0-4af */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4b0-4bf */
+	0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
+};
+
+/* Upper case range - Extended latin and greek */
+static signed char UniCaseRangeU1e00[509] = {
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e00-1e0f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e10-1e1f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e20-1e2f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e30-1e3f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e40-1e4f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e50-1e5f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e60-1e6f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e70-1e7f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e80-1e8f */
+	0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,	/* 1e90-1e9f */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ea0-1eaf */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1eb0-1ebf */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ec0-1ecf */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ed0-1edf */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ee0-1eef */
+	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f00-1f0f */
+	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f10-1f1f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f20-1f2f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f30-1f3f */
+	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f40-1f4f */
+	0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f50-1f5f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f60-1f6f */
+	74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112, 126, 126, 0, 0,	/* 1f70-1f7f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f80-1f8f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f90-1f9f */
+	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fa0-1faf */
+	8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fb0-1fbf */
+	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fc0-1fcf */
+	8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fd0-1fdf */
+	8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fe0-1fef */
+	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* Upper case range - Wide latin */
+static signed char UniCaseRangeUff40[27] = {
+	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* ff40-ff4f */
+	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+};
+
+/*
+ * Upper Case Range
+ */
+const struct UniCaseRange CifsUniUpperRange[] = {
+	{0x03a0, 0x03ce, UniCaseRangeU03a0},
+	{0x0430, 0x045f, UniCaseRangeU0430},
+	{0x0490, 0x04cc, UniCaseRangeU0490},
+	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
+	{0xff40, 0xff5a, UniCaseRangeUff40},
+	{0}
+};
+#endif
+
+#ifndef UNIUPR_NOLOWER
+/*
+ * Latin lower case
+ */
+static signed char CifsUniLowerTable[512] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 040-04f */
+	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0,	/* 050-05f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 060-06f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 070-07f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 0c0-0cf */
+	32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 0,	/* 0d0-0df */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0e0-0ef */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0f0-0ff */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 100-10f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 110-11f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 120-12f */
+	0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,	/* 130-13f */
+	0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0,	/* 140-14f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 150-15f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 160-16f */
+	1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0, 0,	/* 170-17f */
+	0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79, 0,	/* 180-18f */
+	0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+	1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,	/* 1a0-1af */
+	0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,	/* 1b0-1bf */
+	0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1,	/* 1c0-1cf */
+	0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0,	/* 1d0-1df */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e0-1ef */
+	0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1f0-1ff */
+};
+
+/* Lower case range - Greek */
+static signed char UniCaseRangeL0380[44] = {
+	0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,	/* 380-38f */
+	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 390-39f */
+	32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+};
+
+/* Lower case range - Cyrillic */
+static signed char UniCaseRangeL0400[48] = {
+	0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 0, 80, 80,	/* 400-40f */
+	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 410-41f */
+	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 420-42f */
+};
+
+/* Lower case range - Extended cyrillic */
+static signed char UniCaseRangeL0490[60] = {
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 490-49f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4a0-4af */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4b0-4bf */
+	0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
+};
+
+/* Lower case range - Extended latin and greek */
+static signed char UniCaseRangeL1e00[504] = {
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e00-1e0f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e10-1e1f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e20-1e2f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e30-1e3f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e40-1e4f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e50-1e5f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e60-1e6f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e70-1e7f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e80-1e8f */
+	1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,	/* 1e90-1e9f */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ea0-1eaf */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1eb0-1ebf */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ec0-1ecf */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ed0-1edf */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ee0-1eef */
+	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f00-1f0f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f10-1f1f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f20-1f2f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f30-1f3f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f40-1f4f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,	/* 1f50-1f5f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f60-1f6f */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f70-1f7f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f80-1f8f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f90-1f9f */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1fa0-1faf */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,	/* 1fb0-1fbf */
+	0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0, 0, 0,	/* 1fc0-1fcf */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0,	/* 1fd0-1fdf */
+	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0, 0, 0,	/* 1fe0-1fef */
+	0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* Lower case range - Wide latin */
+static signed char UniCaseRangeLff20[27] = {
+	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* ff20-ff2f */
+	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+};
+
+/*
+ * Lower Case Range
+ */
+const static struct UniCaseRange CifsUniLowerRange[] = {
+	0x0380, 0x03ab, UniCaseRangeL0380,
+	0x0400, 0x042f, UniCaseRangeL0400,
+	0x0490, 0x04cb, UniCaseRangeL0490,
+	0x1e00, 0x1ff7, UniCaseRangeL1e00,
+	0xff20, 0xff3a, UniCaseRangeLff20,
+	0, 0, 0
+};
+#endif
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
new file mode 100644
index 0000000..78829e7
--- /dev/null
+++ b/fs/cifs/cifsencrypt.c
@@ -0,0 +1,209 @@
+/*
+ *   fs/cifs/cifsencrypt.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include "cifspdu.h"
+#include "cifsglob.h" 
+#include "cifs_debug.h"
+#include "md5.h"
+#include "cifs_unicode.h"
+#include "cifsproto.h"
+
+/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
+/* the 16 byte signature must be allocated by the caller  */
+/* Note we only use the 1st eight bytes */
+/* Note that the smb header signature field on input contains the  
+	sequence number before this function is called */
+
+extern void mdfour(unsigned char *out, unsigned char *in, int n);
+extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
+	
+static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, const char * key, char * signature)
+{
+	struct	MD5Context context;
+
+	if((cifs_pdu == NULL) || (signature == NULL))
+		return -EINVAL;
+
+	MD5Init(&context);
+	MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+	MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
+	MD5Final(signature,&context);
+	return 0;
+}
+
+int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct cifsSesInfo * ses,
+	__u32 * pexpected_response_sequence_number)
+{
+	int rc = 0;
+	char smb_signature[20];
+
+	/* BB remember to initialize sequence number elsewhere and initialize mac_signing key elsewhere BB */
+	/* BB remember to add code to save expected sequence number in midQ entry BB */
+
+	if((cifs_pdu == NULL) || (ses == NULL))
+		return -EINVAL;
+
+	if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0) 
+		return rc;
+
+	spin_lock(&GlobalMid_Lock);
+	cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(ses->sequence_number);
+	cifs_pdu->Signature.Sequence.Reserved = 0;
+	
+	*pexpected_response_sequence_number = ses->sequence_number++;
+	ses->sequence_number++;
+	spin_unlock(&GlobalMid_Lock);
+
+	rc = cifs_calculate_signature(cifs_pdu, ses->mac_signing_key,smb_signature);
+	if(rc)
+		memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
+	else
+		memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
+
+	return rc;
+}
+
+int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
+	__u32 expected_sequence_number)
+{
+	unsigned int rc;
+	char server_response_sig[8];
+	char what_we_think_sig_should_be[20];
+
+	if((cifs_pdu == NULL) || (mac_key == NULL))
+		return -EINVAL;
+
+	if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
+		return 0;
+
+	if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
+		struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)cifs_pdu;
+	    if(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
+			return 0;
+	}
+
+	/* BB what if signatures are supposed to be on for session but server does not
+		send one? BB */
+	
+	/* Do not need to verify session setups with signature "BSRSPYL "  */
+	if(memcmp(cifs_pdu->Signature.SecuritySignature,"BSRSPYL ",8)==0)
+		cFYI(1,("dummy signature received for smb command 0x%x",cifs_pdu->Command));
+
+	/* save off the origiginal signature so we can modify the smb and check
+		its signature against what the server sent */
+	memcpy(server_response_sig,cifs_pdu->Signature.SecuritySignature,8);
+
+	cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(expected_sequence_number);
+	cifs_pdu->Signature.Sequence.Reserved = 0;
+
+	rc = cifs_calculate_signature(cifs_pdu, mac_key,
+		what_we_think_sig_should_be);
+
+	if(rc)
+		return rc;
+
+	
+/*	cifs_dump_mem("what we think it should be: ",what_we_think_sig_should_be,16); */
+
+	if(memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+		return -EACCES;
+	else
+		return 0;
+
+}
+
+/* We fill in key by putting in 40 byte array which was allocated by caller */
+int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
+{
+	char temp_key[16];
+	if ((key == NULL) || (rn == NULL))
+		return -EINVAL;
+
+	E_md4hash(password, temp_key);
+	mdfour(key,temp_key,16);
+	memcpy(key+16,rn, CIFS_SESSION_KEY_SIZE);
+	return 0;
+}
+
+int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_info)
+{
+	char temp_hash[16];
+	struct HMACMD5Context ctx;
+	char * ucase_buf;
+	wchar_t * unicode_buf;
+	unsigned int i,user_name_len,dom_name_len;
+
+	if(ses == NULL)
+		return -EINVAL;
+
+	E_md4hash(ses->password, temp_hash);
+
+	hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
+	user_name_len = strlen(ses->userName);
+	if(user_name_len > MAX_USERNAME_SIZE)
+		return -EINVAL;
+	dom_name_len = strlen(ses->domainName);
+	if(dom_name_len > MAX_USERNAME_SIZE)
+		return -EINVAL;
+  
+	ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
+	if(ucase_buf == NULL)
+		return -ENOMEM;
+	unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
+	if(unicode_buf == NULL) {
+		kfree(ucase_buf);
+		return -ENOMEM;
+	}
+   
+	for(i=0;i<user_name_len;i++)
+		ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
+	ucase_buf[i] = 0;
+	user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+	unicode_buf[user_name_len] = 0;
+	user_name_len++;
+
+	for(i=0;i<dom_name_len;i++)
+		ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
+	ucase_buf[i] = 0;
+	dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+
+	unicode_buf[user_name_len + dom_name_len] = 0;
+	hmac_md5_update((const unsigned char *) unicode_buf,
+		(user_name_len+dom_name_len)*2,&ctx);
+
+	hmac_md5_final(ses->mac_signing_key,&ctx);
+	kfree(ucase_buf);
+	kfree(unicode_buf);
+	return 0;
+}
+void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_response)
+{
+	struct HMACMD5Context context;
+	memcpy(v2_session_response + 8, ses->server->cryptKey,8);
+	/* gen_blob(v2_session_response + 16); */
+	hmac_md5_init_limK_to_64(ses->mac_signing_key, 16, &context);
+
+	hmac_md5_update(ses->server->cryptKey,8,&context);
+/*	hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
+
+	hmac_md5_final(v2_session_response,&context);
+}
diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h
new file mode 100644
index 0000000..03e359b
--- /dev/null
+++ b/fs/cifs/cifsencrypt.h
@@ -0,0 +1,34 @@
+/*
+ *   fs/cifs/cifsencrypt.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   Externs for misc. small encryption routines
+ *   so we do not have to put them in cifsproto.h
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* md4.c */
+extern void mdfour(unsigned char *out, unsigned char *in, int n);
+/* smbdes.c */
+extern void E_P16(unsigned char *p14, unsigned char *p16);
+extern void E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24);
+extern void D_P16(unsigned char *p14, unsigned char *in, unsigned char *out);
+extern void E_old_pw_hash(unsigned char *, unsigned char *, unsigned char *);
+
+
+
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
new file mode 100644
index 0000000..5082fce
--- /dev/null
+++ b/fs/cifs/cifsfs.c
@@ -0,0 +1,913 @@
+/*
+ *   fs/cifs/cifsfs.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   Common Internet FileSystem (CIFS) client
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Note that BB means BUGBUG (ie something to fix eventually) */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/vfs.h>
+#include <linux/mempool.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#define DECLARE_GLOBALS_HERE
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+#include <linux/mm.h>
+#define CIFS_MAGIC_NUMBER 0xFF534D42	/* the first four bytes of SMB PDUs */
+
+#ifdef CONFIG_CIFS_QUOTA
+static struct quotactl_ops cifs_quotactl_ops;
+#endif
+
+int cifsFYI = 0;
+int cifsERROR = 1;
+int traceSMB = 0;
+unsigned int oplockEnabled = 1;
+unsigned int experimEnabled = 0;
+unsigned int linuxExtEnabled = 1;
+unsigned int lookupCacheEnabled = 1;
+unsigned int multiuser_mount = 0;
+unsigned int extended_security = 0;
+unsigned int ntlmv2_support = 0;
+unsigned int sign_CIFS_PDUs = 1;
+extern struct task_struct * oplockThread; /* remove sparse warning */
+struct task_struct * oplockThread = NULL;
+unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
+module_param(CIFSMaxBufSize, int, 0);
+MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
+unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
+module_param(cifs_min_rcv, int, 0);
+MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
+unsigned int cifs_min_small = 30;
+module_param(cifs_min_small, int, 0);
+MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
+unsigned int cifs_max_pending = CIFS_MAX_REQ;
+module_param(cifs_max_pending, int, 0);
+MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
+
+static DECLARE_COMPLETION(cifs_oplock_exited);
+
+extern mempool_t *cifs_sm_req_poolp;
+extern mempool_t *cifs_req_poolp;
+extern mempool_t *cifs_mid_poolp;
+
+extern kmem_cache_t *cifs_oplock_cachep;
+
+static int
+cifs_read_super(struct super_block *sb, void *data,
+		const char *devname, int silent)
+{
+	struct inode *inode;
+	struct cifs_sb_info *cifs_sb;
+	int rc = 0;
+
+	sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
+	sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
+	cifs_sb = CIFS_SB(sb);
+	if(cifs_sb == NULL)
+		return -ENOMEM;
+	else
+		memset(cifs_sb,0,sizeof(struct cifs_sb_info));
+	
+
+	rc = cifs_mount(sb, cifs_sb, data, devname);
+
+	if (rc) {
+		if (!silent)
+			cERROR(1,
+			       ("cifs_mount failed w/return code = %d", rc));
+		goto out_mount_failed;
+	}
+
+	sb->s_magic = CIFS_MAGIC_NUMBER;
+	sb->s_op = &cifs_super_ops;
+/*	if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
+	    sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
+#ifdef CONFIG_CIFS_QUOTA
+	sb->s_qcop = &cifs_quotactl_ops;
+#endif
+	sb->s_blocksize = CIFS_MAX_MSGSIZE;
+	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
+	inode = iget(sb, ROOT_I);
+
+	if (!inode) {
+		rc = -ENOMEM;
+		goto out_no_root;
+	}
+
+	sb->s_root = d_alloc_root(inode);
+
+	if (!sb->s_root) {
+		rc = -ENOMEM;
+		goto out_no_root;
+	}
+
+	return 0;
+
+out_no_root:
+	cERROR(1, ("cifs_read_super: get root inode failed"));
+	if (inode)
+		iput(inode);
+
+out_mount_failed:
+	if(cifs_sb) {
+		if(cifs_sb->local_nls)
+			unload_nls(cifs_sb->local_nls);	
+		kfree(cifs_sb);
+	}
+	return rc;
+}
+
+static void
+cifs_put_super(struct super_block *sb)
+{
+	int rc = 0;
+	struct cifs_sb_info *cifs_sb;
+
+	cFYI(1, ("In cifs_put_super"));
+	cifs_sb = CIFS_SB(sb);
+	if(cifs_sb == NULL) {
+		cFYI(1,("Empty cifs superblock info passed to unmount"));
+		return;
+	}
+	rc = cifs_umount(sb, cifs_sb); 
+	if (rc) {
+		cERROR(1, ("cifs_umount failed with return code %d", rc));
+	}
+	unload_nls(cifs_sb->local_nls);
+	kfree(cifs_sb);
+	return;
+}
+
+static int
+cifs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	int xid, rc = -EOPNOTSUPP;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(sb);
+	pTcon = cifs_sb->tcon;
+
+	buf->f_type = CIFS_MAGIC_NUMBER;
+
+	/* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
+	buf->f_namelen = PATH_MAX;	/* PATH_MAX may be too long - it would presumably
+					   be length of total path, note that some servers may be 
+					   able to support more than this, but best to be safe
+					   since Win2k and others can not handle very long filenames */
+	buf->f_files = 0;	/* undefined */
+	buf->f_ffree = 0;	/* unlimited */
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+/* BB we could add a second check for a QFS Unix capability bit */
+    if (pTcon->ses->capabilities & CAP_UNIX)
+	    rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf, cifs_sb->local_nls);
+
+    /* Only need to call the old QFSInfo if failed
+    on newer one */
+    if(rc)
+#endif /* CIFS_EXPERIMENTAL */
+	rc = CIFSSMBQFSInfo(xid, pTcon, buf, cifs_sb->local_nls);
+
+	/*     
+	   int f_type;
+	   __fsid_t f_fsid;
+	   int f_namelen;  */
+	/* BB get from info put in tcon struct at mount time with call to QFSAttrInfo */
+	FreeXid(xid);
+	return 0;		/* always return success? what if volume is no longer available? */
+}
+
+static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
+{
+	struct cifs_sb_info *cifs_sb;
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
+		return 0;
+	} else /* file mode might have been restricted at mount time 
+		on the client (above and beyond ACL on servers) for  
+		servers which do not support setting and viewing mode bits,
+		so allowing client to check permissions is useful */ 
+		return generic_permission(inode, mask, NULL);
+}
+
+static kmem_cache_t *cifs_inode_cachep;
+static kmem_cache_t *cifs_req_cachep;
+static kmem_cache_t *cifs_mid_cachep;
+kmem_cache_t *cifs_oplock_cachep;
+static kmem_cache_t *cifs_sm_req_cachep;
+mempool_t *cifs_sm_req_poolp;
+mempool_t *cifs_req_poolp;
+mempool_t *cifs_mid_poolp;
+
+static struct inode *
+cifs_alloc_inode(struct super_block *sb)
+{
+	struct cifsInodeInfo *cifs_inode;
+	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
+	if (!cifs_inode)
+		return NULL;
+	cifs_inode->cifsAttrs = 0x20;	/* default */
+	atomic_set(&cifs_inode->inUse, 0);
+	cifs_inode->time = 0;
+	/* Until the file is open and we have gotten oplock
+	info back from the server, can not assume caching of
+	file data or metadata */
+	cifs_inode->clientCanCacheRead = FALSE;
+	cifs_inode->clientCanCacheAll = FALSE;
+	cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
+	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
+
+	INIT_LIST_HEAD(&cifs_inode->openFileList);
+	return &cifs_inode->vfs_inode;
+}
+
+static void
+cifs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
+}
+
+/*
+ * cifs_show_options() is for displaying mount options in /proc/mounts.
+ * Not all settable options are displayed but most of the important
+ * ones are.
+ */
+static int
+cifs_show_options(struct seq_file *s, struct vfsmount *m)
+{
+	struct cifs_sb_info *cifs_sb;
+
+	cifs_sb = CIFS_SB(m->mnt_sb);
+
+	if (cifs_sb) {
+		if (cifs_sb->tcon) {
+			seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
+			if (cifs_sb->tcon->ses) {
+				if (cifs_sb->tcon->ses->userName)
+					seq_printf(s, ",username=%s",
+					   cifs_sb->tcon->ses->userName);
+				if(cifs_sb->tcon->ses->domainName)
+					seq_printf(s, ",domain=%s",
+					   cifs_sb->tcon->ses->domainName);
+			}
+		}
+		seq_printf(s, ",rsize=%d",cifs_sb->rsize);
+		seq_printf(s, ",wsize=%d",cifs_sb->wsize);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_CIFS_QUOTA
+int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
+		struct fs_disk_quota * pdquota)
+{
+	int xid;
+	int rc = 0;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifsTconInfo *pTcon;
+	
+	if(cifs_sb)
+		pTcon = cifs_sb->tcon;
+	else
+		return -EIO;
+
+
+	xid = GetXid();
+	if(pTcon) {
+		cFYI(1,("set type: 0x%x id: %d",quota_type,qid));		
+	} else {
+		return -EIO;
+	}
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
+                struct fs_disk_quota * pdquota)
+{
+	int xid;
+	int rc = 0;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifsTconInfo *pTcon;
+
+	if(cifs_sb)
+		pTcon = cifs_sb->tcon;
+	else
+		return -EIO;
+
+	xid = GetXid();
+	if(pTcon) {
+                cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+	} else {
+		rc = -EIO;
+	}
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
+{
+	int xid; 
+	int rc = 0;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifsTconInfo *pTcon;
+
+	if(cifs_sb)
+		pTcon = cifs_sb->tcon;
+	else
+		return -EIO;
+
+	xid = GetXid();
+	if(pTcon) {
+                cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
+	} else {
+		rc = -EIO;
+	}
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
+{
+	int xid;
+	int rc = 0;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifsTconInfo *pTcon;
+
+	if(cifs_sb) {
+		pTcon = cifs_sb->tcon;
+	} else {
+		return -EIO;
+	}
+	xid = GetXid();
+	if(pTcon) {
+		cFYI(1,("pqstats %p",qstats));		
+	} else {
+		rc = -EIO;
+	}
+
+	FreeXid(xid);
+	return rc;
+}
+
+static struct quotactl_ops cifs_quotactl_ops = {
+	.set_xquota	= cifs_xquota_set,
+	.get_xquota	= cifs_xquota_set,
+	.set_xstate	= cifs_xstate_set,
+	.get_xstate	= cifs_xstate_get,
+};
+#endif
+
+static int cifs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+struct super_operations cifs_super_ops = {
+	.read_inode = cifs_read_inode,
+	.put_super = cifs_put_super,
+	.statfs = cifs_statfs,
+	.alloc_inode = cifs_alloc_inode,
+	.destroy_inode = cifs_destroy_inode,
+/*	.drop_inode	    = generic_delete_inode, 
+	.delete_inode	= cifs_delete_inode,  *//* Do not need the above two functions     
+   unless later we add lazy close of inodes or unless the kernel forgets to call
+   us with the same number of releases (closes) as opens */
+	.show_options = cifs_show_options,
+/*    .umount_begin   = cifs_umount_begin, *//* consider adding in the future */
+	.remount_fs = cifs_remount,
+};
+
+static struct super_block *
+cifs_get_sb(struct file_system_type *fs_type,
+	    int flags, const char *dev_name, void *data)
+{
+	int rc;
+	struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
+
+	cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
+
+	if (IS_ERR(sb))
+		return sb;
+
+	sb->s_flags = flags;
+
+	rc = cifs_read_super(sb, data, dev_name, flags & MS_VERBOSE ? 1 : 0);
+	if (rc) {
+		up_write(&sb->s_umount);
+		deactivate_super(sb);
+		return ERR_PTR(rc);
+	}
+	sb->s_flags |= MS_ACTIVE;
+	return sb;
+}
+
+static ssize_t
+cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size,
+          loff_t * poffset)
+{
+	if(file->f_dentry == NULL)
+		return -EIO;
+	else if(file->f_dentry->d_inode == NULL)
+		return -EIO;
+
+	cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset));
+
+	if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) {
+		return generic_file_read(file,read_data,read_size,poffset);
+	} else {
+		/* BB do we need to lock inode from here until after invalidate? */
+/*		if(file->f_dentry->d_inode->i_mapping) {
+			filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
+			filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
+		}*/
+/*		cifs_revalidate(file->f_dentry);*/ /* BB fixme */
+
+		/* BB we should make timer configurable - perhaps 
+		   by simply calling cifs_revalidate here */
+		/* invalidate_remote_inode(file->f_dentry->d_inode);*/
+		return generic_file_read(file,read_data,read_size,poffset);
+	}
+}
+
+static ssize_t
+cifs_write_wrapper(struct file * file, const char __user *write_data,
+           size_t write_size, loff_t * poffset) 
+{
+	ssize_t written;
+
+	if(file->f_dentry == NULL)
+		return -EIO;
+	else if(file->f_dentry->d_inode == NULL)
+		return -EIO;
+
+	cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));
+
+	written = generic_file_write(file,write_data,write_size,poffset);
+	if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll)  {
+		if(file->f_dentry->d_inode->i_mapping) {
+			filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
+		}
+	}
+	return written;
+}
+
+
+static struct file_system_type cifs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "cifs",
+	.get_sb = cifs_get_sb,
+	.kill_sb = kill_anon_super,
+	/*  .fs_flags */
+};
+struct inode_operations cifs_dir_inode_ops = {
+	.create = cifs_create,
+	.lookup = cifs_lookup,
+	.getattr = cifs_getattr,
+	.unlink = cifs_unlink,
+	.link = cifs_hardlink,
+	.mkdir = cifs_mkdir,
+	.rmdir = cifs_rmdir,
+	.rename = cifs_rename,
+	.permission = cifs_permission,
+/*	revalidate:cifs_revalidate,   */
+	.setattr = cifs_setattr,
+	.symlink = cifs_symlink,
+	.mknod   = cifs_mknod,
+#ifdef CONFIG_CIFS_XATTR
+	.setxattr = cifs_setxattr,
+	.getxattr = cifs_getxattr,
+	.listxattr = cifs_listxattr,
+	.removexattr = cifs_removexattr,
+#endif
+};
+
+struct inode_operations cifs_file_inode_ops = {
+/*	revalidate:cifs_revalidate, */
+	.setattr = cifs_setattr,
+	.getattr = cifs_getattr, /* do we need this anymore? */
+	.rename = cifs_rename,
+	.permission = cifs_permission,
+#ifdef CONFIG_CIFS_XATTR
+	.setxattr = cifs_setxattr,
+	.getxattr = cifs_getxattr,
+	.listxattr = cifs_listxattr,
+	.removexattr = cifs_removexattr,
+#endif 
+};
+
+struct inode_operations cifs_symlink_inode_ops = {
+	.readlink = generic_readlink, 
+	.follow_link = cifs_follow_link,
+	.put_link = cifs_put_link,
+	.permission = cifs_permission,
+	/* BB add the following two eventually */
+	/* revalidate: cifs_revalidate,
+	   setattr:    cifs_notify_change, *//* BB do we need notify change */
+#ifdef CONFIG_CIFS_XATTR
+	.setxattr = cifs_setxattr,
+	.getxattr = cifs_getxattr,
+	.listxattr = cifs_listxattr,
+	.removexattr = cifs_removexattr,
+#endif 
+};
+
+struct file_operations cifs_file_ops = {
+	.read = cifs_read_wrapper,
+	.write = cifs_write_wrapper, 
+	.open = cifs_open,
+	.release = cifs_close,
+	.lock = cifs_lock,
+	.fsync = cifs_fsync,
+	.flush = cifs_flush,
+	.mmap  = cifs_file_mmap,
+	.sendfile = generic_file_sendfile,
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+	.readv = generic_file_readv,
+	.writev = generic_file_writev,
+	.aio_read = generic_file_aio_read,
+	.aio_write = generic_file_aio_write,
+	.dir_notify = cifs_dir_notify,
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
+};
+
+struct file_operations cifs_file_direct_ops = {
+	/* no mmap, no aio, no readv - 
+	   BB reevaluate whether they can be done with directio, no cache */
+	.read = cifs_user_read,
+	.write = cifs_user_write,
+	.open = cifs_open,
+	.release = cifs_close,
+	.lock = cifs_lock,
+	.fsync = cifs_fsync,
+	.flush = cifs_flush,
+	.sendfile = generic_file_sendfile, /* BB removeme BB */
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+	.dir_notify = cifs_dir_notify,
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
+};
+
+struct file_operations cifs_dir_ops = {
+	.readdir = cifs_readdir,
+	.release = cifs_closedir,
+	.read    = generic_read_dir,
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+	.dir_notify = cifs_dir_notify,
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
+};
+
+static void
+cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct cifsInodeInfo *cifsi = inode;
+
+	if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		inode_init_once(&cifsi->vfs_inode);
+		INIT_LIST_HEAD(&cifsi->lockList);
+	}
+}
+
+static int
+cifs_init_inodecache(void)
+{
+	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
+					      sizeof (struct cifsInodeInfo),
+					      0, SLAB_RECLAIM_ACCOUNT,
+					      cifs_init_once, NULL);
+	if (cifs_inode_cachep == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void
+cifs_destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(cifs_inode_cachep))
+		printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
+}
+
+static int
+cifs_init_request_bufs(void)
+{
+	if(CIFSMaxBufSize < 8192) {
+	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
+	Unicode path name has to fit in any SMB/CIFS path based frames */
+		CIFSMaxBufSize = 8192;
+	} else if (CIFSMaxBufSize > 1024*127) {
+		CIFSMaxBufSize = 1024 * 127;
+	} else {
+		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
+	}
+/*	cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
+	cifs_req_cachep = kmem_cache_create("cifs_request",
+					    CIFSMaxBufSize +
+					    MAX_CIFS_HDR_SIZE, 0,
+					    SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (cifs_req_cachep == NULL)
+		return -ENOMEM;
+
+	if(cifs_min_rcv < 1)
+		cifs_min_rcv = 1;
+	else if (cifs_min_rcv > 64) {
+		cifs_min_rcv = 64;
+		cERROR(1,("cifs_min_rcv set to maximum (64)"));
+	}
+
+	cifs_req_poolp = mempool_create(cifs_min_rcv,
+					mempool_alloc_slab,
+					mempool_free_slab,
+					cifs_req_cachep);
+
+	if(cifs_req_poolp == NULL) {
+		kmem_cache_destroy(cifs_req_cachep);
+		return -ENOMEM;
+	}
+	/* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and
+	almost all handle based requests (but not write response, nor is it
+	sufficient for path based requests).  A smaller size would have
+	been more efficient (compacting multiple slab items on one 4k page) 
+	for the case in which debug was on, but this larger size allows
+	more SMBs to use small buffer alloc and is still much more
+	efficient to alloc 1 per page off the slab compared to 17K (5page) 
+	alloc of large cifs buffers even when page debugging is on */
+	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
+			MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (cifs_sm_req_cachep == NULL) {
+		mempool_destroy(cifs_req_poolp);
+		kmem_cache_destroy(cifs_req_cachep);
+		return -ENOMEM;              
+	}
+
+	if(cifs_min_small < 2)
+		cifs_min_small = 2;
+	else if (cifs_min_small > 256) {
+		cifs_min_small = 256;
+		cFYI(1,("cifs_min_small set to maximum (256)"));
+	}
+
+	cifs_sm_req_poolp = mempool_create(cifs_min_small,
+				mempool_alloc_slab,
+				mempool_free_slab,
+				cifs_sm_req_cachep);
+
+	if(cifs_sm_req_poolp == NULL) {
+		mempool_destroy(cifs_req_poolp);
+		kmem_cache_destroy(cifs_req_cachep);
+		kmem_cache_destroy(cifs_sm_req_cachep);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void
+cifs_destroy_request_bufs(void)
+{
+	mempool_destroy(cifs_req_poolp);
+	if (kmem_cache_destroy(cifs_req_cachep))
+		printk(KERN_WARNING
+		       "cifs_destroy_request_cache: error not all structures were freed\n");
+	mempool_destroy(cifs_sm_req_poolp);
+	if (kmem_cache_destroy(cifs_sm_req_cachep))
+		printk(KERN_WARNING
+		      "cifs_destroy_request_cache: cifs_small_rq free error\n");
+}
+
+static int
+cifs_init_mids(void)
+{
+	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
+				sizeof (struct mid_q_entry), 0,
+				SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (cifs_mid_cachep == NULL)
+		return -ENOMEM;
+
+	cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
+					mempool_alloc_slab,
+					mempool_free_slab,
+					cifs_mid_cachep);
+	if(cifs_mid_poolp == NULL) {
+		kmem_cache_destroy(cifs_mid_cachep);
+		return -ENOMEM;
+	}
+
+	cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
+				sizeof (struct oplock_q_entry), 0,
+				SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (cifs_oplock_cachep == NULL) {
+		kmem_cache_destroy(cifs_mid_cachep);
+		mempool_destroy(cifs_mid_poolp);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void
+cifs_destroy_mids(void)
+{
+	mempool_destroy(cifs_mid_poolp);
+	if (kmem_cache_destroy(cifs_mid_cachep))
+		printk(KERN_WARNING
+		       "cifs_destroy_mids: error not all structures were freed\n");
+
+	if (kmem_cache_destroy(cifs_oplock_cachep))
+		printk(KERN_WARNING
+		       "error not all oplock structures were freed\n");
+}
+
+static int cifs_oplock_thread(void * dummyarg)
+{
+	struct oplock_q_entry * oplock_item;
+	struct cifsTconInfo *pTcon;
+	struct inode * inode;
+	__u16  netfid;
+	int rc;
+
+	daemonize("cifsoplockd");
+	allow_signal(SIGTERM);
+
+	oplockThread = current;
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+		
+		schedule_timeout(1*HZ);  
+		spin_lock(&GlobalMid_Lock);
+		if(list_empty(&GlobalOplock_Q)) {
+			spin_unlock(&GlobalMid_Lock);
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(39*HZ);
+		} else {
+			oplock_item = list_entry(GlobalOplock_Q.next, 
+				struct oplock_q_entry, qhead);
+			if(oplock_item) {
+				cFYI(1,("found oplock item to write out")); 
+				pTcon = oplock_item->tcon;
+				inode = oplock_item->pinode;
+				netfid = oplock_item->netfid;
+				spin_unlock(&GlobalMid_Lock);
+				DeleteOplockQEntry(oplock_item);
+				/* can not grab inode sem here since it would
+				deadlock when oplock received on delete 
+				since vfs_unlink holds the i_sem across
+				the call */
+				/* down(&inode->i_sem);*/
+				if (S_ISREG(inode->i_mode)) {
+					rc = filemap_fdatawrite(inode->i_mapping);
+					if(CIFS_I(inode)->clientCanCacheRead == 0) {
+						filemap_fdatawait(inode->i_mapping);
+						invalidate_remote_inode(inode);
+					}
+				} else
+					rc = 0;
+				/* up(&inode->i_sem);*/
+				if (rc)
+					CIFS_I(inode)->write_behind_rc = rc;
+				cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
+
+				/* releasing a stale oplock after recent reconnection 
+				of smb session using a now incorrect file 
+				handle is not a data integrity issue but do  
+				not bother sending an oplock release if session 
+				to server still is disconnected since oplock 
+				already released by the server in that case */
+				if(pTcon->tidStatus != CifsNeedReconnect) {
+				    rc = CIFSSMBLock(0, pTcon, netfid,
+					    0 /* len */ , 0 /* offset */, 0, 
+					    0, LOCKING_ANDX_OPLOCK_RELEASE,
+					    0 /* wait flag */);
+					cFYI(1,("Oplock release rc = %d ",rc));
+				}
+			} else
+				spin_unlock(&GlobalMid_Lock);
+		}
+	} while(!signal_pending(current));
+	complete_and_exit (&cifs_oplock_exited, 0);
+}
+
+static int __init
+init_cifs(void)
+{
+	int rc = 0;
+#ifdef CONFIG_PROC_FS
+	cifs_proc_init();
+#endif
+	INIT_LIST_HEAD(&GlobalServerList);	/* BB not implemented yet */
+	INIT_LIST_HEAD(&GlobalSMBSessionList);
+	INIT_LIST_HEAD(&GlobalTreeConnectionList);
+	INIT_LIST_HEAD(&GlobalOplock_Q);
+/*
+ *  Initialize Global counters
+ */
+	atomic_set(&sesInfoAllocCount, 0);
+	atomic_set(&tconInfoAllocCount, 0);
+	atomic_set(&tcpSesAllocCount,0);
+	atomic_set(&tcpSesReconnectCount, 0);
+	atomic_set(&tconInfoReconnectCount, 0);
+
+	atomic_set(&bufAllocCount, 0);
+	atomic_set(&midCount, 0);
+	GlobalCurrentXid = 0;
+	GlobalTotalActiveXid = 0;
+	GlobalMaxActiveXid = 0;
+	rwlock_init(&GlobalSMBSeslock);
+	spin_lock_init(&GlobalMid_Lock);
+
+	if(cifs_max_pending < 2) {
+		cifs_max_pending = 2;
+		cFYI(1,("cifs_max_pending set to min of 2"));
+	} else if(cifs_max_pending > 256) {
+		cifs_max_pending = 256;
+		cFYI(1,("cifs_max_pending set to max of 256"));
+	}
+
+	rc = cifs_init_inodecache();
+	if (!rc) {
+		rc = cifs_init_mids();
+		if (!rc) {
+			rc = cifs_init_request_bufs();
+			if (!rc) {
+				rc = register_filesystem(&cifs_fs_type);
+				if (!rc) {                
+					rc = (int)kernel_thread(cifs_oplock_thread, NULL, 
+						CLONE_FS | CLONE_FILES | CLONE_VM);
+					if(rc > 0)
+						return 0;
+					else 
+						cERROR(1,("error %d create oplock thread",rc));
+				}
+				cifs_destroy_request_bufs();
+			}
+			cifs_destroy_mids();
+		}
+		cifs_destroy_inodecache();
+	}
+#ifdef CONFIG_PROC_FS
+	cifs_proc_clean();
+#endif
+	return rc;
+}
+
+static void __exit
+exit_cifs(void)
+{
+	cFYI(0, ("In unregister ie exit_cifs"));
+#ifdef CONFIG_PROC_FS
+	cifs_proc_clean();
+#endif
+	unregister_filesystem(&cifs_fs_type);
+	cifs_destroy_inodecache();
+	cifs_destroy_mids();
+	cifs_destroy_request_bufs();
+	if(oplockThread) {
+		send_sig(SIGTERM, oplockThread, 1);
+		wait_for_completion(&cifs_oplock_exited);
+	}
+}
+
+MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
+MODULE_LICENSE("GPL");		/* combination of LGPL + GPL source behaves as GPL */
+MODULE_DESCRIPTION
+    ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
+MODULE_VERSION(CIFS_VERSION);
+module_init(init_cifs)
+module_exit(exit_cifs)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
new file mode 100644
index 0000000..451f18a
--- /dev/null
+++ b/fs/cifs/cifsfs.h
@@ -0,0 +1,98 @@
+/*
+ *   fs/cifs/cifsfs.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#ifndef _CIFSFS_H
+#define _CIFSFS_H
+
+#define ROOT_I 2
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+extern struct address_space_operations cifs_addr_ops;
+
+/* Functions related to super block operations */
+extern struct super_operations cifs_super_ops;
+extern void cifs_read_inode(struct inode *);
+extern void cifs_delete_inode(struct inode *);
+/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
+
+/* Functions related to inodes */
+extern struct inode_operations cifs_dir_inode_ops;
+extern int cifs_create(struct inode *, struct dentry *, int, 
+		       struct nameidata *);
+extern struct dentry * cifs_lookup(struct inode *, struct dentry *,
+				  struct nameidata *);
+extern int cifs_unlink(struct inode *, struct dentry *);
+extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
+extern int cifs_mknod(struct inode *, struct dentry *, int, dev_t);
+extern int cifs_mkdir(struct inode *, struct dentry *, int);
+extern int cifs_rmdir(struct inode *, struct dentry *);
+extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
+		       struct dentry *);
+extern int cifs_revalidate(struct dentry *);
+extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int cifs_setattr(struct dentry *, struct iattr *);
+
+extern struct inode_operations cifs_file_inode_ops;
+extern struct inode_operations cifs_symlink_inode_ops;
+
+/* Functions related to files and directories */
+extern struct file_operations cifs_file_ops;
+extern struct file_operations cifs_file_direct_ops; /* if directio mount */
+extern int cifs_open(struct inode *inode, struct file *file);
+extern int cifs_close(struct inode *inode, struct file *file);
+extern int cifs_closedir(struct inode *inode, struct file *file);
+extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
+			 size_t read_size, loff_t * poffset);
+extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
+			 size_t write_size, loff_t * poffset);
+extern int cifs_lock(struct file *, int, struct file_lock *);
+extern int cifs_fsync(struct file *, struct dentry *, int);
+extern int cifs_flush(struct file *);
+extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+extern struct file_operations cifs_dir_ops;
+extern int cifs_dir_open(struct inode *inode, struct file *file);
+extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
+extern int cifs_dir_notify(struct file *, unsigned long arg);
+
+/* Functions related to dir entries */
+extern struct dentry_operations cifs_dentry_ops;
+
+/* Functions related to symlinks */
+extern int cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
+extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd);
+extern int cifs_readlink(struct dentry *direntry, char __user *buffer, 
+			 int buflen);
+extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
+			const char *symname);
+extern int	cifs_removexattr(struct dentry *, const char *);
+extern int 	cifs_setxattr(struct dentry *, const char *, const void *,
+			 size_t, int);
+extern ssize_t	cifs_getxattr(struct dentry *, const char *, void *, size_t);
+extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
+#define CIFS_VERSION   "1.31"
+#endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
new file mode 100644
index 0000000..69aff1a7
--- /dev/null
+++ b/fs/cifs/cifsglob.h
@@ -0,0 +1,439 @@
+/*
+ *   fs/cifs/cifsglob.h
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ * 
+ */
+#include <linux/in.h>
+#include <linux/in6.h>
+#include "cifs_fs_sb.h"
+/*
+ * The sizes of various internal tables and strings
+ */
+#define MAX_UID_INFO 16
+#define MAX_SES_INFO 2
+#define MAX_TCON_INFO 4
+
+#define MAX_TREE_SIZE 2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1
+#define MAX_SERVER_SIZE 15
+#define MAX_SHARE_SIZE  64	/* used to be 20 - this should still be enough */
+#define MAX_USERNAME_SIZE 32	/* 32 is to allow for 15 char names + null
+				   termination then *2 for unicode versions */
+#define MAX_PASSWORD_SIZE 16
+
+#define CIFS_MIN_RCV_POOL 4
+
+/*
+ * MAX_REQ is the maximum number of requests that WE will send
+ * on one socket concurently. It also matches the most common
+ * value of max multiplex returned by servers.  We may 
+ * eventually want to use the negotiated value (in case
+ * future servers can handle more) when we are more confident that
+ * we will not have problems oveloading the socket with pending
+ * write data.
+ */
+#define CIFS_MAX_REQ 50 
+
+#define SERVER_NAME_LENGTH 15
+#define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
+
+/* used to define string lengths for reversing unicode strings */
+/*         (256+1)*2 = 514                                     */
+/*           (max path length + 1 for null) * 2 for unicode    */
+#define MAX_NAME 514
+
+#include "cifspdu.h"
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef XATTR_DOS_ATTRIB
+#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
+#endif
+
+/*
+ * This information is kept on every Server we know about.
+ *
+ * Some things to note:
+ *
+ */
+#define SERVER_NAME_LEN_WITH_NULL	(SERVER_NAME_LENGTH + 1)
+
+/*
+ * CIFS vfs client Status information (based on what we know.)
+ */
+
+ /* associated with each tcp and smb session */
+enum statusEnum {
+	CifsNew = 0,
+	CifsGood,
+	CifsExiting,
+	CifsNeedReconnect
+};
+
+enum securityEnum {
+	NTLM = 0,		/* Legacy NTLM012 auth with NTLM hash */
+	NTLMv2,			/* Legacy NTLM auth with NTLMv2 hash */
+	RawNTLMSSP,		/* NTLMSSP without SPNEGO */
+	NTLMSSP,		/* NTLMSSP via SPNEGO */
+	Kerberos		/* Kerberos via SPNEGO */
+};
+
+enum protocolEnum {
+	IPV4 = 0,
+	IPV6,
+	SCTP
+	/* Netbios frames protocol not supported at this time */
+};
+
+/*
+ *****************************************************************
+ * Except the CIFS PDUs themselves all the
+ * globally interesting structs should go here
+ *****************************************************************
+ */
+
+struct TCP_Server_Info {
+	char server_Name[SERVER_NAME_LEN_WITH_NULL];	/* 15 chars + X'20'in 16th */
+	char unicode_server_Name[SERVER_NAME_LEN_WITH_NULL * 2];	/* Unicode version of server_Name */
+	struct socket *ssocket;
+	union {
+		struct sockaddr_in sockAddr;
+		struct sockaddr_in6 sockAddr6;
+	} addr;
+	wait_queue_head_t response_q; 
+	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
+	struct list_head pending_mid_q;
+	void *Server_NlsInfo;	/* BB - placeholder for future NLS info  */
+	unsigned short server_codepage;	/* codepage for the server    */
+	unsigned long ip_address;	/* IP addr for the server if known     */
+	enum protocolEnum protocolType;	
+	char versionMajor;
+	char versionMinor;
+	unsigned svlocal:1;	/* local server or remote */
+	atomic_t socketUseCount; /* number of open cifs sessions on socket */
+	atomic_t inFlight;  /* number of requests on the wire to server */
+	enum statusEnum tcpStatus; /* what we think the status is */
+	struct semaphore tcpSem;
+	struct task_struct *tsk;
+	char server_GUID[16];
+	char secMode;
+	enum securityEnum secType;
+	unsigned int maxReq;	/* Clients should submit no more */
+	/* than maxReq distinct unanswered SMBs to the server when using  */
+	/* multiplexed reads or writes */
+	unsigned int maxBuf;	/* maxBuf specifies the maximum */
+	/* message size the server can send or receive for non-raw SMBs */
+	unsigned int maxRw;	/* maxRw specifies the maximum */
+	/* message size the server can send or receive for */
+	/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
+	char sessid[4];		/* unique token id for this session */
+	/* (returned on Negotiate */
+	int capabilities; /* allow selective disabling of caps by smb sess */
+	__u16 timeZone;
+	char cryptKey[CIFS_CRYPTO_KEY_SIZE];
+	char workstation_RFC1001_name[16]; /* 16th byte is always zero */
+};
+
+/*
+ * The following is our shortcut to user information.  We surface the uid,
+ * and name. We always get the password on the fly in case it
+ * has changed. We also hang a list of sessions owned by this user off here. 
+ */
+struct cifsUidInfo {
+	struct list_head userList;
+	struct list_head sessionList; /* SMB sessions for this user */
+	uid_t linux_uid;
+	char user[MAX_USERNAME_SIZE + 1];	/* ascii name of user */
+	/* BB may need ptr or callback for PAM or WinBind info */
+};
+
+/*
+ * Session structure.  One of these for each uid session with a particular host
+ */
+struct cifsSesInfo {
+	struct list_head cifsSessionList;
+	struct semaphore sesSem;
+	struct cifsUidInfo *uidInfo;	/* pointer to user info */
+	struct TCP_Server_Info *server;	/* pointer to server info */
+	atomic_t inUse; /* # of mounts (tree connections) on this ses */
+	enum statusEnum status;
+	__u32 sequence_number;  /* needed for CIFS PDU signature */
+	__u16 ipc_tid;		/* special tid for connection to IPC share */
+	__u16 flags;
+	char mac_signing_key[CIFS_SESSION_KEY_SIZE + 16];	
+	char *serverOS;		/* name of operating system underlying the server */
+	char *serverNOS;	/* name of network operating system that the server is running */
+	char *serverDomain;	/* security realm of server */
+	int Suid;		/* remote smb uid  */
+	uid_t linux_uid;        /* local Linux uid */
+	int capabilities;
+	char serverName[SERVER_NAME_LEN_WITH_NULL * 2];	/* BB make bigger for tcp names - will ipv6 and sctp addresses fit here?? */
+	char userName[MAX_USERNAME_SIZE + 1];
+	char domainName[MAX_USERNAME_SIZE + 1];
+	char * password;
+};
+/* session flags */
+#define CIFS_SES_NT4 1
+
+/*
+ * there is one of these for each connection to a resource on a particular
+ * session 
+ */
+struct cifsTconInfo {
+	struct list_head cifsConnectionList;
+	struct list_head openFileList;
+	struct semaphore tconSem;
+	struct cifsSesInfo *ses;	/* pointer to session associated with */
+	char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource (in ASCII not UTF) */
+	char *nativeFileSystem;
+	__u16 tid;		/* The 2 byte tree id */
+	__u16 Flags;		/* optional support bits */
+	enum statusEnum tidStatus;
+	atomic_t useCount;	/* how many mounts (explicit or implicit) to this share */
+#ifdef CONFIG_CIFS_STATS
+	atomic_t num_smbs_sent;
+	atomic_t num_writes;
+	atomic_t num_reads;
+	atomic_t num_oplock_brks;
+	atomic_t num_opens;
+	atomic_t num_deletes;
+	atomic_t num_mkdirs;
+	atomic_t num_rmdirs;
+	atomic_t num_renames;
+	atomic_t num_t2renames;
+	__u64    bytes_read;
+	__u64    bytes_written;
+	spinlock_t stat_lock;
+#endif
+	FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+	FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo;	/* ok if file system name truncated */
+	FILE_SYSTEM_UNIX_INFO fsUnixInfo;
+	unsigned retry:1;
+	/* BB add field for back pointer to sb struct? */
+};
+
+/*
+ * This info hangs off the cifsFileInfo structure.  This is used to track
+ * byte stream locks on the file
+ */
+struct cifsLockInfo {
+	struct cifsLockInfo *next;
+	int start;
+	int length;
+	int type;
+};
+
+/*
+ * One of these for each open instance of a file
+ */
+struct cifs_search_info {
+	loff_t index_of_last_entry;
+	__u16 entries_in_buffer;
+	__u16 info_level;
+	__u32 resume_key;
+	char * ntwrk_buf_start;
+	char * srch_entries_start;
+	char * presume_name;
+	unsigned int resume_name_len;
+	unsigned endOfSearch:1;
+	unsigned emptyDir:1;
+	unsigned unicode:1;
+};
+
+struct cifsFileInfo {
+	struct list_head tlist;	/* pointer to next fid owned by tcon */
+	struct list_head flist;	/* next fid (file instance) for this inode */
+	unsigned int uid;	/* allows finding which FileInfo structure */
+	__u32 pid;		/* process id who opened file */
+	__u16 netfid;		/* file id from remote */
+	/* BB add lock scope info here if needed */ ;
+	/* lock scope id (0 if none) */
+	struct file * pfile; /* needed for writepage */
+	struct inode * pInode; /* needed for oplock break */
+	unsigned closePend:1;	/* file is marked to close */
+	unsigned invalidHandle:1;  /* file closed via session abend */
+	struct semaphore fh_sem; /* prevents reopen race after dead ses*/
+	char * search_resume_name; /* BB removeme BB */
+	unsigned int resume_name_length; /* BB removeme - field renamed and moved BB */
+	struct cifs_search_info srch_inf;
+};
+
+/*
+ * One of these for each file inode
+ */
+
+struct cifsInodeInfo {
+	struct list_head lockList;
+	/* BB add in lists for dirty pages - i.e. write caching info for oplock */
+	struct list_head openFileList;
+	int write_behind_rc;
+	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
+	atomic_t inUse;	 /* num concurrent users (local openers cifs) of file*/
+	unsigned long time;	/* jiffies of last update/check of inode */
+	unsigned clientCanCacheRead:1; /* read oplock */
+	unsigned clientCanCacheAll:1;  /* read and writebehind oplock */
+	unsigned oplockPending:1;
+	struct inode vfs_inode;
+};
+
+static inline struct cifsInodeInfo *
+CIFS_I(struct inode *inode)
+{
+	return container_of(inode, struct cifsInodeInfo, vfs_inode);
+}
+
+static inline struct cifs_sb_info *
+CIFS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+
+/* one of these for every pending CIFS request to the server */
+struct mid_q_entry {
+	struct list_head qhead;	/* mids waiting on reply from this server */
+	__u16 mid;		/* multiplex id */
+	__u16 pid;		/* process id */
+	__u32 sequence_number;  /* for CIFS signing */
+	__u16 command;		/* smb command code */
+	struct timeval when_sent;	/* time when smb sent */
+	struct cifsSesInfo *ses;	/* smb was sent to this server */
+	struct task_struct *tsk;	/* task waiting for response */
+	struct smb_hdr *resp_buf;	/* response buffer */
+	int midState;	/* wish this were enum but can not pass to wait_event */
+};
+
+struct oplock_q_entry {
+	struct list_head qhead;
+	struct inode * pinode;
+	struct cifsTconInfo * tcon; 
+	__u16 netfid;
+};
+
+#define   MID_FREE 0
+#define   MID_REQUEST_ALLOCATED 1
+#define   MID_REQUEST_SUBMITTED 2
+#define   MID_RESPONSE_RECEIVED 4
+#define   MID_RETRY_NEEDED      8 /* session closed while this request out */
+#define   MID_NO_RESP_NEEDED 0x10
+#define   MID_SMALL_BUFFER   0x20 /* 112 byte response buffer instead of 4K */
+
+/*
+ *****************************************************************
+ * All constants go here
+ *****************************************************************
+ */
+
+#define UID_HASH (16)
+
+/*
+ * Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the
+ * following to be declared.
+ */
+
+/****************************************************************************
+ *  Locking notes.  All updates to global variables and lists should be
+ *                  protected by spinlocks or semaphores.
+ *
+ *  Spinlocks
+ *  ---------
+ *  GlobalMid_Lock protects:
+ *	list operations on pending_mid_q and oplockQ
+ *      updates to XID counters, multiplex id  and SMB sequence numbers
+ *  GlobalSMBSesLock protects:
+ *	list operations on tcp and SMB session lists and tCon lists
+ *  f_owner.lock protects certain per file struct operations
+ *  mapping->page_lock protects certain per page operations
+ *
+ *  Semaphores
+ *  ----------
+ *  sesSem     operations on smb session
+ *  tconSem    operations on tree connection
+ *  fh_sem      file handle reconnection operations 
+ *
+ ****************************************************************************/
+
+#ifdef DECLARE_GLOBALS_HERE
+#define GLOBAL_EXTERN
+#else
+#define GLOBAL_EXTERN extern
+#endif
+
+/*
+ * The list of servers that did not respond with NT LM 0.12.
+ * This list helps improve performance and eliminate the messages indicating
+ * that we had a communications error talking to the server in this list. 
+ */
+GLOBAL_EXTERN struct servers_not_supported *NotSuppList;	/*@z4a */
+
+/*
+ * The following is a hash table of all the users we know about.
+ */
+GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH];
+
+GLOBAL_EXTERN struct list_head GlobalServerList; /* BB not implemented yet */
+GLOBAL_EXTERN struct list_head GlobalSMBSessionList;
+GLOBAL_EXTERN struct list_head GlobalTreeConnectionList;
+GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;  /* protects list inserts on 3 above */
+
+GLOBAL_EXTERN struct list_head GlobalOplock_Q;
+
+/*
+ * Global transaction id (XID) information
+ */
+GLOBAL_EXTERN unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
+GLOBAL_EXTERN unsigned int GlobalTotalActiveXid;	/* prot by GlobalMid_Sem */
+GLOBAL_EXTERN unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
+GLOBAL_EXTERN spinlock_t GlobalMid_Lock;  /* protects above and list operations */
+					/* on midQ entries */
+GLOBAL_EXTERN char Local_System_Name[15];
+
+/*
+ *  Global counters, updated atomically
+ */
+GLOBAL_EXTERN atomic_t sesInfoAllocCount;
+GLOBAL_EXTERN atomic_t tconInfoAllocCount;
+GLOBAL_EXTERN atomic_t tcpSesAllocCount;
+GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
+GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+
+/* Various Debug counters to remove someday (BB) */
+GLOBAL_EXTERN atomic_t bufAllocCount;
+GLOBAL_EXTERN atomic_t smBufAllocCount;      
+GLOBAL_EXTERN atomic_t midCount;
+
+/* Misc globals */
+GLOBAL_EXTERN unsigned int multiuser_mount;	/* if enabled allows new sessions
+				to be established on existing mount if we
+				have the uid/password or Kerberos credential 
+				or equivalent for current user */
+GLOBAL_EXTERN unsigned int oplockEnabled;
+GLOBAL_EXTERN unsigned int experimEnabled;
+GLOBAL_EXTERN unsigned int lookupCacheEnabled;
+GLOBAL_EXTERN unsigned int extended_security;	/* if on, session setup sent 
+				with more secure ntlmssp2 challenge/resp */
+GLOBAL_EXTERN unsigned int ntlmv2_support;  /* better optional password hash */
+GLOBAL_EXTERN unsigned int sign_CIFS_PDUs;  /* enable smb packet signing */
+GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
+GLOBAL_EXTERN unsigned int CIFSMaxBufSize;  /* max size not including hdr */
+GLOBAL_EXTERN unsigned int cifs_min_rcv;    /* min size of big ntwrk buf pool */
+GLOBAL_EXTERN unsigned int cifs_min_small;  /* min size of small buf pool */
+GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
new file mode 100644
index 0000000..bcd4a61
--- /dev/null
+++ b/fs/cifs/cifspdu.h
@@ -0,0 +1,1987 @@
+/*
+ *   fs/cifs/cifspdu.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#ifndef _CIFSPDU_H
+#define _CIFSPDU_H
+
+#include <net/sock.h>
+
+#define CIFS_PROT   0
+#define BAD_PROT    CIFS_PROT+1
+
+/* SMB command codes */
+/* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+ (ie which include no useful data other than the SMB error code itself).
+ Knowing this helps avoid response buffer allocations and copy in some cases */
+#define SMB_COM_CREATE_DIRECTORY      0x00 /* trivial response */
+#define SMB_COM_DELETE_DIRECTORY      0x01 /* trivial response */
+#define SMB_COM_CLOSE                 0x04 /* triv req/rsp, timestamp ignored */
+#define SMB_COM_DELETE                0x06 /* trivial response */
+#define SMB_COM_RENAME                0x07 /* trivial response */
+#define SMB_COM_SETATTR               0x09 /* trivial response */
+#define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
+#define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_READ_ANDX             0x2E
+#define SMB_COM_WRITE_ANDX            0x2F
+#define SMB_COM_TRANSACTION2          0x32
+#define SMB_COM_TRANSACTION2_SECONDARY 0x33
+#define SMB_COM_FIND_CLOSE2           0x34 /* trivial response */
+#define SMB_COM_TREE_DISCONNECT       0x71 /* trivial response */
+#define SMB_COM_NEGOTIATE             0x72
+#define SMB_COM_SESSION_SETUP_ANDX    0x73
+#define SMB_COM_LOGOFF_ANDX           0x74 /* trivial response */
+#define SMB_COM_TREE_CONNECT_ANDX     0x75
+#define SMB_COM_NT_TRANSACT           0xA0
+#define SMB_COM_NT_TRANSACT_SECONDARY 0xA1
+#define SMB_COM_NT_CREATE_ANDX        0xA2
+#define SMB_COM_NT_RENAME             0xA5 /* trivial response */
+
+/* Transact2 subcommand codes */
+#define TRANS2_OPEN                   0x00
+#define TRANS2_FIND_FIRST             0x01
+#define TRANS2_FIND_NEXT              0x02
+#define TRANS2_QUERY_FS_INFORMATION   0x03
+#define TRANS2_QUERY_PATH_INFORMATION 0x05
+#define TRANS2_SET_PATH_INFORMATION   0x06
+#define TRANS2_QUERY_FILE_INFORMATION 0x07
+#define TRANS2_SET_FILE_INFORMATION   0x08
+#define TRANS2_GET_DFS_REFERRAL       0x10
+#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
+
+/* NT Transact subcommand codes */
+#define NT_TRANSACT_CREATE            0x01
+#define NT_TRANSACT_IOCTL             0x02
+#define NT_TRANSACT_SET_SECURITY_DESC 0x03
+#define NT_TRANSACT_NOTIFY_CHANGE     0x04
+#define NT_TRANSACT_RENAME            0x05
+#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
+#define NT_TRANSACT_GET_USER_QUOTA    0x07
+#define NT_TRANSACT_SET_USER_QUOTA    0x08
+
+#define MAX_CIFS_HDR_SIZE 256	/* chained NTCreateXReadX will probably be biggest */
+
+/* internal cifs vfs structures */
+/*****************************************************************
+ * All constants go here
+ *****************************************************************
+ */
+
+/*
+ * Starting value for maximum SMB size negotiation
+ */
+#define CIFS_MAX_MSGSIZE (4*4096)
+
+/*
+ * Size of encrypted user password in bytes
+ */
+#define CIFS_ENCPWD_SIZE (16)
+
+/*
+ * Size of the crypto key returned on the negotiate SMB in bytes
+ */
+#define CIFS_CRYPTO_KEY_SIZE (8)
+
+/*
+ * Size of the session key (crypto key encrypted with the password
+ */
+#define CIFS_SESSION_KEY_SIZE (24)
+
+/*
+ * Maximum user name length
+ */
+#define CIFS_UNLEN (20)
+
+/*
+ * Flags on SMB open
+ */
+#define SMBOPEN_WRITE_THROUGH 0x4000
+#define SMBOPEN_DENY_ALL      0x0010
+#define SMBOPEN_DENY_WRITE    0x0020
+#define SMBOPEN_DENY_READ     0x0030
+#define SMBOPEN_DENY_NONE     0x0040
+#define SMBOPEN_READ          0x0000
+#define SMBOPEN_WRITE         0x0001
+#define SMBOPEN_READWRITE     0x0002
+#define SMBOPEN_EXECUTE       0x0003
+
+#define SMBOPEN_OCREATE       0x0010
+#define SMBOPEN_OTRUNC        0x0002
+#define SMBOPEN_OAPPEND       0x0001
+
+/*
+ * SMB flag definitions 
+ */
+#define SMBFLG_EXTD_LOCK 0x01	/* server supports lock-read write-unlock primitives */
+#define SMBFLG_RCV_POSTED 0x02	/* obsolete */
+#define SMBFLG_RSVD 0x04
+#define SMBFLG_CASELESS 0x08	/* all pathnames treated as caseless (off implies case sensitive file handling requested) */
+#define SMBFLG_CANONICAL_PATH_FORMAT 0x10	/* obsolete */
+#define SMBFLG_OLD_OPLOCK 0x20	/* obsolete */
+#define SMBFLG_OLD_OPLOCK_NOTIFY 0x40	/* obsolete */
+#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
+
+/*
+ * SMB flag2 definitions 
+ */
+#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1)	/* can send long (non-8.3) path names in response */
+#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
+#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
+#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+#define SMBFLG2_DFS cpu_to_le16(0x1000)
+#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
+#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+
+/*
+ * These are the file access permission bits defined in CIFS for the
+ * NTCreateAndX as well as the level 0x107
+ * TRANS2_QUERY_PATH_INFORMATION API.  The level 0x107, SMB_QUERY_FILE_ALL_INFO
+ * responds with the AccessFlags.
+ * The AccessFlags specifies the access permissions a caller has to the
+ * file and can have any suitable combination of the following values:
+ */
+
+#define FILE_READ_DATA        0x00000001	/* Data can be read from the file   */
+#define FILE_WRITE_DATA       0x00000002	/* Data can be written to the file  */
+#define FILE_APPEND_DATA      0x00000004	/* Data can be appended to the file */
+#define FILE_READ_EA          0x00000008	/* Extended attributes associated   */
+					 /* with the file can be read        */
+#define FILE_WRITE_EA         0x00000010	/* Extended attributes associated   */
+					 /* with the file can be written     */
+#define FILE_EXECUTE          0x00000020	/*Data can be read into memory from */
+					 /* the file using system paging I/O */
+#define FILE_DELETE_CHILD     0x00000040
+#define FILE_READ_ATTRIBUTES  0x00000080	/* Attributes associated with the   */
+					 /* file can be read                 */
+#define FILE_WRITE_ATTRIBUTES 0x00000100	/* Attributes associated with the   */
+					 /* file can be written              */
+#define DELETE                0x00010000	/* The file can be deleted          */
+#define READ_CONTROL          0x00020000	/* The access control list and      */
+					 /* ownership associated with the    */
+					 /* file can be read                 */
+#define WRITE_DAC             0x00040000	/* The access control list and      */
+					 /* ownership associated with the    */
+					 /* file can be written.             */
+#define WRITE_OWNER           0x00080000	/* Ownership information associated */
+					 /* with the file can be written     */
+#define SYNCHRONIZE           0x00100000	/* The file handle can waited on to */
+					 /* synchronize with the completion  */
+					 /* of an input/output request       */
+#define GENERIC_ALL           0x10000000
+#define GENERIC_EXECUTE       0x20000000
+#define GENERIC_WRITE         0x40000000
+#define GENERIC_READ          0x80000000
+					 /* In summary - Relevant file       */
+					 /* access flags from CIFS are       */
+					 /* file_read_data, file_write_data  */
+					 /* file_execute, file_read_attributes */
+					 /* write_dac, and delete.           */
+
+/*
+ * Invalid readdir handle
+ */
+#define CIFS_NO_HANDLE        0xFFFF
+
+/* IPC$ in ASCII */
+#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
+
+/* IPC$ in Unicode */
+#define CIFS_IPC_UNICODE_RESOURCE "\x00\x49\x00\x50\x00\x43\x00\x24\x00\x00"
+
+/* Unicode Null terminate 2 bytes of 0 */
+#define UNICODE_NULL "\x00\x00"
+#define ASCII_NULL 0x00
+
+/*
+ * Server type values (returned on EnumServer API
+ */
+#define CIFS_SV_TYPE_DC     0x00000008
+#define CIFS_SV_TYPE_BACKDC 0x00000010
+
+/*
+ * Alias type flags (From EnumAlias API call
+ */
+#define CIFS_ALIAS_TYPE_FILE 0x0001
+#define CIFS_SHARE_TYPE_FILE 0x0000
+
+/*
+ * File Attribute flags
+ */
+#define ATTR_READONLY  0x0001
+#define ATTR_HIDDEN    0x0002
+#define ATTR_SYSTEM    0x0004
+#define ATTR_VOLUME    0x0008
+#define ATTR_DIRECTORY 0x0010
+#define ATTR_ARCHIVE   0x0020
+#define ATTR_DEVICE    0x0040
+#define ATTR_NORMAL    0x0080
+#define ATTR_TEMPORARY 0x0100
+#define ATTR_SPARSE    0x0200
+#define ATTR_REPARSE   0x0400
+#define ATTR_COMPRESSED 0x0800
+#define ATTR_OFFLINE    0x1000	/* ie file not immediately available - offline storage */
+#define ATTR_NOT_CONTENT_INDEXED 0x2000
+#define ATTR_ENCRYPTED  0x4000
+#define ATTR_POSIX_SEMANTICS 0x01000000
+#define ATTR_BACKUP_SEMANTICS 0x02000000
+#define ATTR_DELETE_ON_CLOSE 0x04000000
+#define ATTR_SEQUENTIAL_SCAN 0x08000000
+#define ATTR_RANDOM_ACCESS   0x10000000
+#define ATTR_NO_BUFFERING    0x20000000
+#define ATTR_WRITE_THROUGH   0x80000000
+
+/* ShareAccess flags */
+#define FILE_NO_SHARE     0x00000000
+#define FILE_SHARE_READ   0x00000001
+#define FILE_SHARE_WRITE  0x00000002
+#define FILE_SHARE_DELETE 0x00000004
+#define FILE_SHARE_ALL    0x00000007
+
+/* CreateDisposition flags */
+#define FILE_SUPERSEDE    0x00000000
+#define FILE_OPEN         0x00000001
+#define FILE_CREATE       0x00000002
+#define FILE_OPEN_IF      0x00000003
+#define FILE_OVERWRITE    0x00000004
+#define FILE_OVERWRITE_IF 0x00000005
+
+/* CreateOptions */
+#define CREATE_NOT_FILE		0x00000001	/* if set must not be file */
+#define CREATE_WRITE_THROUGH	0x00000002
+#define CREATE_NOT_DIR		0x00000040	/* if set must not be directory */
+#define CREATE_RANDOM_ACCESS	0x00000800
+#define CREATE_DELETE_ON_CLOSE	0x00001000
+#define OPEN_REPARSE_POINT	0x00200000
+
+/* ImpersonationLevel flags */
+#define SECURITY_ANONYMOUS      0
+#define SECURITY_IDENTIFICATION 1
+#define SECURITY_IMPERSONATION  2
+#define SECURITY_DELEGATION     3
+
+/* SecurityFlags */
+#define SECURITY_CONTEXT_TRACKING 0x01
+#define SECURITY_EFFECTIVE_ONLY   0x02
+
+/*
+ * Default PID value, used in all SMBs where the PID is not important
+ */
+#define CIFS_DFT_PID  0x1234
+
+/*
+ * We use the same routine for Copy and Move SMBs.  This flag is used to
+ * distinguish
+ */
+#define CIFS_COPY_OP 1
+#define CIFS_RENAME_OP 2
+
+#define GETU16(var)  (*((__u16 *)var))	/* BB check for endian issues */
+#define GETU32(var)  (*((__u32 *)var))	/* BB check for endian issues */
+
+#pragma pack(1)
+
+struct smb_hdr {
+	__u32 smb_buf_length;	/* big endian on wire *//* BB length is only two or three bytes - with one or two byte type preceding it but that is always zero - we could mask the type byte off just in case BB */
+	__u8 Protocol[4];
+	__u8 Command;
+	union {
+		struct {
+			__u8 ErrorClass;
+			__u8 Reserved;
+			__le16 Error;
+		} DosError;
+		__le32 CifsError;
+	} Status;
+	__u8 Flags;
+	__le16 Flags2;		/* note: le */
+	__le16 PidHigh;
+	union {
+		struct {
+			__le32 SequenceNumber;  /* le */
+			__u32 Reserved; /* zero */
+		} Sequence;
+		__u8 SecuritySignature[8];	/* le */
+	} Signature;
+	__u8 pad[2];
+	__u16 Tid;
+	__le16 Pid;
+	__u16 Uid;
+	__u16 Mid;
+	__u8 WordCount;
+};
+/* given a pointer to an smb_hdr retrieve the value of byte count */
+#define BCC(smb_var) ( *(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2* smb_var->WordCount) ) )
+
+/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
+#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2* smb_var->WordCount) + 2 )
+
+/*
+ * Computer Name Length
+ */
+#define CNLEN 15
+
+/*
+ * Share Name Length					  @S8A
+ * Note:  This length is limited by the SMB used to get   @S8A
+ *        the Share info.   NetShareEnum only returns 13  @S8A
+ *        chars, including the null termination.          @S8A 
+ */
+#define SNLEN 12		/*@S8A */
+
+/*
+ * Comment Length
+ */
+#define MAXCOMMENTLEN 40
+
+/*
+ * The OS/2 maximum path name
+ */
+#define MAX_PATHCONF 256
+
+/*
+ *  SMB frame definitions  (following must be packed structs)
+ *  See the SNIA CIFS Specification for details.
+ *
+ *  The Naming convention is the lower case version of the
+ *  smb command code name for the struct and this is typedef to the
+ *  uppercase version of the same name with the prefix SMB_ removed 
+ *  for brevity.  Although typedefs are not commonly used for 
+ *  structure definitions in the Linux kernel, their use in the
+ *  CIFS standards document, which this code is based on, may
+ *  make this one of the cases where typedefs for structures make
+ *  sense to improve readability for readers of the standards doc.
+ *  Typedefs can always be removed later if they are too distracting
+ *  and they are only used for the CIFSs PDUs themselves, not
+ *  internal cifs vfs structures
+ *  
+ */
+
+typedef struct negotiate_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	unsigned char DialectsArray[1];
+} NEGOTIATE_REQ;
+
+typedef struct negotiate_rsp {
+	struct smb_hdr hdr;	/* wct = 17 */
+	__le16 DialectIndex;
+	__u8 SecurityMode;
+	__le16 MaxMpxCount;
+	__le16 MaxNumberVcs;
+	__le32 MaxBufferSize;
+	__le32 MaxRawSize;
+	__le32 SessionKey;
+	__le32 Capabilities;	/* see below */
+	__le32 SystemTimeLow;
+	__le32 SystemTimeHigh;
+	__le16 ServerTimeZone;
+	__u8 EncryptionKeyLength;
+	__u16 ByteCount;
+	union {
+		unsigned char EncryptionKey[1];	/* if cap extended security is off */
+		/* followed by Domain name - if extended security is off */
+		/* followed by 16 bytes of server GUID */
+		/* followed by security blob if cap_extended_security negotiated */
+		struct {
+			unsigned char GUID[16];
+			unsigned char SecurityBlob[1];
+		} extended_response;
+	} u;
+} NEGOTIATE_RSP;
+
+/* SecurityMode bits */
+#define SECMODE_USER          0x01	/* off indicates share level security */
+#define SECMODE_PW_ENCRYPT    0x02
+#define SECMODE_SIGN_ENABLED  0x04	/* SMB security signatures enabled */
+#define SECMODE_SIGN_REQUIRED 0x08	/* SMB security signatures required */
+
+/* Negotiate response Capabilities */
+#define CAP_RAW_MODE           0x00000001
+#define CAP_MPX_MODE           0x00000002
+#define CAP_UNICODE            0x00000004
+#define CAP_LARGE_FILES        0x00000008
+#define CAP_NT_SMBS            0x00000010	/* implies CAP_NT_FIND */
+#define CAP_RPC_REMOTE_APIS    0x00000020
+#define CAP_STATUS32           0x00000040
+#define CAP_LEVEL_II_OPLOCKS   0x00000080
+#define CAP_LOCK_AND_READ      0x00000100
+#define CAP_NT_FIND            0x00000200
+#define CAP_DFS                0x00001000
+#define CAP_INFOLEVEL_PASSTHRU 0x00002000
+#define CAP_LARGE_READ_X       0x00004000
+#define CAP_LARGE_WRITE_X      0x00008000
+#define CAP_UNIX               0x00800000
+#define CAP_RESERVED           0x02000000
+#define CAP_BULK_TRANSFER      0x20000000
+#define CAP_COMPRESSED_DATA    0x40000000
+#define CAP_EXTENDED_SECURITY  0x80000000
+
+typedef union smb_com_session_setup_andx {
+	struct {		/* request format */
+		struct smb_hdr hdr;	/* wct = 12 */
+		__u8 AndXCommand;
+		__u8 AndXReserved;
+		__le16 AndXOffset;
+		__le16 MaxBufferSize;
+		__le16 MaxMpxCount;
+		__le16 VcNumber;
+		__u32 SessionKey;
+		__le16 SecurityBlobLength;
+		__u32 Reserved;
+		__le32 Capabilities;	/* see below */
+		__le16 ByteCount;
+		unsigned char SecurityBlob[1];	/* followed by */
+		/* STRING NativeOS */
+		/* STRING NativeLanMan */
+	} req;			/* NTLM request format (with extended security */
+
+	struct {		/* request format */
+		struct smb_hdr hdr;	/* wct = 13 */
+		__u8 AndXCommand;
+		__u8 AndXReserved;
+		__le16 AndXOffset;
+		__le16 MaxBufferSize;
+		__le16 MaxMpxCount;
+		__le16 VcNumber;
+		__u32 SessionKey;
+		__le16 CaseInsensitivePasswordLength;	/* ASCII password length */
+		__le16 CaseSensitivePasswordLength;	/* Unicode password length */
+		__u32 Reserved;	/* see below */
+		__le32 Capabilities;
+		__le16 ByteCount;
+		unsigned char CaseInsensitivePassword[1];	/* followed by: */
+		/* unsigned char * CaseSensitivePassword; */
+		/* STRING AccountName */
+		/* STRING PrimaryDomain */
+		/* STRING NativeOS */
+		/* STRING NativeLanMan */
+	} req_no_secext;	/* NTLM request format (without extended security */
+
+	struct {		/* default (NTLM) response format */
+		struct smb_hdr hdr;	/* wct = 4 */
+		__u8 AndXCommand;
+		__u8 AndXReserved;
+		__le16 AndXOffset;
+		__le16 Action;	/* see below */
+		__le16 SecurityBlobLength;
+		__u16 ByteCount;
+		unsigned char SecurityBlob[1];	/* followed by */
+/*      unsigned char  * NativeOS;      */
+/*	unsigned char  * NativeLanMan;  */
+/*      unsigned char  * PrimaryDomain; */
+	} resp;			/* NTLM response format (with or without extended security */
+
+	struct {		/* request format */
+		struct smb_hdr hdr;	/* wct = 10 */
+		__u8 AndXCommand;
+		__u8 AndXReserved;
+		__le16 AndXOffset;
+		__le16 MaxBufferSize;
+		__le16 MaxMpxCount;
+		__le16 VcNumber;
+		__u32 SessionKey;
+		__le16 PassswordLength;
+		__u32 Reserved;
+		__le16 ByteCount;
+		unsigned char AccountPassword[1];	/* followed by */
+		/* STRING AccountName */
+		/* STRING PrimaryDomain */
+		/* STRING NativeOS */
+		/* STRING NativeLanMan */
+	} old_req;		/* pre-NTLM (LANMAN2.1) request format */
+
+	struct {		/* default (NTLM) response format */
+		struct smb_hdr hdr;	/* wct = 3 */
+		__u8 AndXCommand;
+		__u8 AndXReserved;
+		__le16 AndXOffset;
+		__le16 Action;	/* see below */
+		__u16 ByteCount;
+		unsigned char NativeOS[1];	/* followed by */
+/*	unsigned char * NativeLanMan; */
+/*      unsigned char * PrimaryDomain; */
+	} old_resp;		/* pre-NTLM (LANMAN2.1) response format */
+} SESSION_SETUP_ANDX;
+
+#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
+
+/* Capabilities bits (for NTLM SessSetup request) */
+#define CAP_UNICODE            0x00000004
+#define CAP_LARGE_FILES        0x00000008
+#define CAP_NT_SMBS            0x00000010
+#define CAP_STATUS32           0x00000040
+#define CAP_LEVEL_II_OPLOCKS   0x00000080
+#define CAP_NT_FIND            0x00000200	/* reserved should be zero (presumably because NT_SMBs implies the same thing) */
+#define CAP_BULK_TRANSFER      0x20000000
+#define CAP_EXTENDED_SECURITY  0x80000000
+
+/* Action bits */
+#define GUEST_LOGIN 1
+
+typedef struct smb_com_tconx_req {
+	struct smb_hdr hdr;	/* wct = 4 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Flags;		/* see below */
+	__le16 PasswordLength;
+	__le16 ByteCount;
+	unsigned char Password[1];	/* followed by */
+/* STRING Path    *//* \\server\share name */
+	/* STRING Service */
+} TCONX_REQ;
+
+typedef struct smb_com_tconx_rsp {
+	struct smb_hdr hdr;	/* wct = 3 *//* note that Win2000 has sent wct=7 in some cases on responses. Four unspecified words followed OptionalSupport */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OptionalSupport;	/* see below */
+	__u16 ByteCount;
+	unsigned char Service[1];	/* always ASCII, not Unicode */
+	/* STRING NativeFileSystem */
+} TCONX_RSP;
+
+/* tree connect Flags */
+#define DISCONNECT_TID          0x0001
+#define TCON_EXTENDED_SECINFO   0x0008
+/* OptionalSupport bits */
+#define SMB_SUPPORT_SEARCH_BITS 0x0001	/* must have bits (exclusive searches suppt. */
+#define SMB_SHARE_IS_IN_DFS     0x0002
+
+typedef struct smb_com_logoff_andx_req {
+	struct smb_hdr hdr;	/* wct = 2 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__u16 AndXOffset;
+	__u16 ByteCount;
+} LOGOFF_ANDX_REQ;
+
+typedef struct smb_com_logoff_andx_rsp {
+	struct smb_hdr hdr;	/* wct = 2 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__u16 AndXOffset;
+	__u16 ByteCount;
+} LOGOFF_ANDX_RSP;
+
+typedef union smb_com_tree_disconnect {	/* as an altetnative can use flag on tree_connect PDU to effect disconnect *//* probably the simplest SMB PDU */
+	struct {
+		struct smb_hdr hdr;	/* wct = 0 */
+		__u16 ByteCount;	/* bcc = 0 */
+	} req;
+	struct {
+		struct smb_hdr hdr;	/* wct = 0 */
+		__u16 ByteCount;	/* bcc = 0 */
+	} resp;
+} TREE_DISCONNECT;
+
+typedef struct smb_com_close_req {
+	struct smb_hdr hdr;	/* wct = 3 */
+	__u16 FileID;
+	__u32 LastWriteTime;	/* should be zero */
+	__u16 ByteCount;	/* 0 */
+} CLOSE_REQ;
+
+typedef struct smb_com_close_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__u16 ByteCount;	/* bct = 0 */
+} CLOSE_RSP;
+
+typedef struct smb_com_findclose_req {
+	struct smb_hdr hdr; /* wct = 1 */
+	__u16 FileID;
+	__u16 ByteCount;    /* 0 */
+} FINDCLOSE_REQ;
+
+/* OpenFlags */
+#define REQ_OPLOCK         0x00000002
+#define REQ_BATCHOPLOCK    0x00000004
+#define REQ_OPENDIRONLY    0x00000008
+
+typedef struct smb_com_open_req {	/* also handles create */
+	struct smb_hdr hdr;	/* wct = 24 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 Reserved;		/* Must Be Zero */
+	__le16 NameLength;
+	__le32 OpenFlags;
+	__le32 RootDirectoryFid;
+	__le32 DesiredAccess;
+	__le64 AllocationSize;
+	__le32 FileAttributes;
+	__le32 ShareAccess;
+	__le32 CreateDisposition;
+	__le32 CreateOptions;
+	__le32 ImpersonationLevel;
+	__u8 SecurityFlags;
+	__le16 ByteCount;
+	char fileName[1];
+} OPEN_REQ;
+
+/* open response: oplock levels */
+#define OPLOCK_NONE  	 0
+#define OPLOCK_EXCLUSIVE 1
+#define OPLOCK_BATCH	 2
+#define OPLOCK_READ	 3  /* level 2 oplock */
+
+/* open response for CreateAction shifted left */
+#define CIFS_CREATE_ACTION 0x20000 /* file created */
+
+typedef struct smb_com_open_rsp {
+	struct smb_hdr hdr;	/* wct = 34 BB */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 OplockLevel;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 FileAttributes;
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le16 FileType;
+	__le16 DeviceState;
+	__u8 DirectoryFlag;
+	__u16 ByteCount;	/* bct = 0 */
+} OPEN_RSP;
+
+typedef struct smb_com_write_req {
+	struct smb_hdr hdr;	/* wct = 14 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__u32 Reserved;
+	__le16 WriteMode;
+	__le16 Remaining;
+	__le16 DataLengthHigh;
+	__le16 DataLengthLow;
+	__le16 DataOffset;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+	__u8 Pad;		/* BB check for whether padded to DWORD boundary and optimum performance here */
+	char Data[0];
+} WRITE_REQ;
+
+typedef struct smb_com_write_rsp {
+	struct smb_hdr hdr;	/* wct = 6 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Count;
+	__le16 Remaining;
+	__le16 CountHigh;
+	__u16  Reserved;
+	__u16 ByteCount;
+} WRITE_RSP;
+
+typedef struct smb_com_read_req {
+	struct smb_hdr hdr;	/* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__le16 MaxCount;
+	__le16 MinCount;		/* obsolete */
+	__le32 MaxCountHigh;
+	__le16 Remaining;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+} READ_REQ;
+
+typedef struct smb_com_read_rsp {
+	struct smb_hdr hdr;	/* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Remaining;
+	__le16 DataCompactionMode;
+	__le16 Reserved;
+	__le16 DataLength;
+	__le16 DataOffset;
+	__le16 DataLengthHigh;
+	__u64 Reserved2;
+	__u16 ByteCount;
+	__u8 Pad;		/* BB check for whether padded to DWORD boundary and optimum performance here */
+	char Data[1];
+} READ_RSP;
+
+typedef struct locking_andx_range {
+	__le16 Pid;
+	__le16 Pad;
+	__le32 OffsetHigh;
+	__le32 OffsetLow;
+	__le32 LengthHigh;
+	__le32 LengthLow;
+} LOCKING_ANDX_RANGE;
+
+#define LOCKING_ANDX_SHARED_LOCK     0x01
+#define LOCKING_ANDX_OPLOCK_RELEASE  0x02
+#define LOCKING_ANDX_CHANGE_LOCKTYPE 0x04
+#define LOCKING_ANDX_CANCEL_LOCK     0x08
+#define LOCKING_ANDX_LARGE_FILES     0x10	/* always on for us */
+
+typedef struct smb_com_lock_req {
+	struct smb_hdr hdr;	/* wct = 8 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__u8 LockType;
+	__u8 OplockLevel;
+	__le32 Timeout;
+	__le16 NumberOfUnlocks;
+	__le16 NumberOfLocks;
+	__le16 ByteCount;
+	LOCKING_ANDX_RANGE Locks[1];
+} LOCK_REQ;
+
+typedef struct smb_com_lock_rsp {
+	struct smb_hdr hdr;	/* wct = 2 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 ByteCount;
+} LOCK_RSP;
+
+typedef struct smb_com_rename_req {
+	struct smb_hdr hdr;	/* wct = 1 */
+	__le16 SearchAttributes;	/* target file attributes */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} RENAME_REQ;
+
+	/* copy request flags */
+#define COPY_MUST_BE_FILE      0x0001
+#define COPY_MUST_BE_DIR       0x0002
+#define COPY_TARGET_MODE_ASCII 0x0004 /* if not set, binary */
+#define COPY_SOURCE_MODE_ASCII 0x0008 /* if not set, binary */
+#define COPY_VERIFY_WRITES     0x0010
+#define COPY_TREE              0x0020 
+
+typedef struct smb_com_copy_req {
+	struct smb_hdr hdr;	/* wct = 3 */
+	__u16 Tid2;
+	__le16 OpenFunction;
+	__le16 Flags;
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII or Unicode */ 
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName string */
+} COPY_REQ;
+
+typedef struct smb_com_copy_rsp {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__le16 CopyCount;    /* number of files copied */
+	__u16 ByteCount;    /* may be zero */
+	__u8 BufferFormat;  /* 0x04 - only present if errored file follows */
+	unsigned char ErrorFileName[1]; /* only present if error in copy */
+} COPY_RSP;
+
+#define CREATE_HARD_LINK		0x103
+#define MOVEFILE_COPY_ALLOWED		0x0002
+#define MOVEFILE_REPLACE_EXISTING	0x0001
+
+typedef struct smb_com_nt_rename_req {	/* A5 - also used for create hardlink */
+	struct smb_hdr hdr;	/* wct = 4 */
+	__le16 SearchAttributes;	/* target file attributes */
+	__le16 Flags;		/* spec says Information Level */
+	__le32 ClusterCount;
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} NT_RENAME_REQ;
+
+typedef struct smb_com_rename_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__u16 ByteCount;	/* bct = 0 */
+} RENAME_RSP;
+
+typedef struct smb_com_delete_file_req {
+	struct smb_hdr hdr;	/* wct = 1 */
+	__le16 SearchAttributes;
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char fileName[1];
+} DELETE_FILE_REQ;
+
+typedef struct smb_com_delete_file_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__u16 ByteCount;	/* bct = 0 */
+} DELETE_FILE_RSP;
+
+typedef struct smb_com_delete_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} DELETE_DIRECTORY_REQ;
+
+typedef struct smb_com_delete_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__u16 ByteCount;	/* bct = 0 */
+} DELETE_DIRECTORY_RSP;
+
+typedef struct smb_com_create_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} CREATE_DIRECTORY_REQ;
+
+typedef struct smb_com_create_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__u16 ByteCount;	/* bct = 0 */
+} CREATE_DIRECTORY_RSP;
+
+typedef struct smb_com_setattr_req {
+	struct smb_hdr hdr; /* wct = 8 */
+	__le16 attr;
+	__le16 time_low;
+	__le16 time_high;
+	__le16 reserved[5]; /* must be zero */
+	__u16  ByteCount;
+	__u8   BufferFormat; /* 4 = ASCII */
+	unsigned char fileName[1];
+} SETATTR_REQ;
+
+typedef struct smb_com_setattr_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__u16 ByteCount;        /* bct = 0 */
+} SETATTR_RSP;
+
+/* empty wct response to setattr */
+
+/***************************************************/
+/* NT Transact structure defintions follow         */
+/* Currently only ioctl and notify are implemented */
+/***************************************************/
+typedef struct smb_com_transaction_ioctl_req {
+	struct smb_hdr hdr;	/* wct = 23 */
+	__u8 MaxSetupCount;
+	__u16 Reserved;
+	__le32 TotalParameterCount;
+	__le32 TotalDataCount;
+	__le32 MaxParameterCount;
+	__le32 MaxDataCount;
+	__le32 ParameterCount;
+	__le32 ParameterOffset;
+	__le32 DataCount;
+	__le32 DataOffset;
+	__u8 SetupCount; /* four setup words follow subcommand */
+	/* SNIA spec incorrectly included spurious pad here */
+	__le16 SubCommand;/* 2 = IOCTL/FSCTL */
+	__le32 FunctionCode;
+	__u16 Fid;
+	__u8 IsFsctl;    /* 1 = File System Control, 0 = device control (IOCTL)*/
+	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS share)*/
+	__le16 ByteCount;
+	__u8 Pad[3];
+	__u8 Data[1];
+} TRANSACT_IOCTL_REQ;
+
+typedef struct smb_com_transaction_ioctl_rsp {
+	struct smb_hdr hdr;	/* wct = 19 */
+	__u8 Reserved[3];
+	__le32 TotalParameterCount;
+	__le32 TotalDataCount;
+	__le32 ParameterCount;
+	__le32 ParameterOffset;
+	__le32 ParameterDisplacement;
+	__le32 DataCount;
+	__le32 DataOffset;
+	__le32 DataDisplacement;
+	__u8 SetupCount;	/* 1 */
+	__le16 ReturnedDataLen;
+	__u16 ByteCount;
+	__u8 Pad[3];
+} TRANSACT_IOCTL_RSP;
+
+typedef struct smb_com_transaction_change_notify_req {
+	struct smb_hdr hdr;     /* wct = 23 */
+	__u8 MaxSetupCount;
+	__u16 Reserved;
+	__le32 TotalParameterCount;
+	__le32 TotalDataCount;
+	__le32 MaxParameterCount;
+	__le32 MaxDataCount;
+	__le32 ParameterCount;
+	__le32 ParameterOffset;
+	__le32 DataCount;
+	__le32 DataOffset;
+	__u8 SetupCount; /* four setup words follow subcommand */
+	/* SNIA spec incorrectly included spurious pad here */
+	__le16 SubCommand;/* 4 = Change Notify */
+	__le32 CompletionFilter;  /* operation to monitor */
+	__u16 Fid;
+	__u8 WatchTree;  /* 1 = Monitor subdirectories */
+	__u8 Reserved2;
+	__le16 ByteCount;
+/* __u8 Pad[3];*/
+/*	__u8 Data[1];*/
+} TRANSACT_CHANGE_NOTIFY_REQ;
+
+typedef struct smb_com_transaction_change_notify_rsp {
+	struct smb_hdr hdr;	/* wct = 18 */
+	__u8 Reserved[3];
+	__le32 TotalParameterCount;
+	__le32 TotalDataCount;
+	__le32 ParameterCount;
+	__le32 ParameterOffset;
+	__le32 ParameterDisplacement;
+	__le32 DataCount;
+	__le32 DataOffset;
+	__le32 DataDisplacement;
+	__u8 SetupCount;   /* 0 */
+	__u16 ByteCount;
+	/* __u8 Pad[3]; */
+} TRANSACT_CHANGE_NOTIFY_RSP;
+/* Completion Filter flags for Notify */
+#define FILE_NOTIFY_CHANGE_FILE_NAME    0x00000001
+#define FILE_NOTIFY_CHANGE_DIR_NAME     0x00000002
+#define FILE_NOTIFY_CHANGE_NAME         0x00000003
+#define FILE_NOTIFY_CHANGE_ATTRIBUTES   0x00000004
+#define FILE_NOTIFY_CHANGE_SIZE         0x00000008
+#define FILE_NOTIFY_CHANGE_LAST_WRITE   0x00000010
+#define FILE_NOTIFY_CHANGE_LAST_ACCESS  0x00000020
+#define FILE_NOTIFY_CHANGE_CREATION     0x00000040
+#define FILE_NOTIFY_CHANGE_EA           0x00000080
+#define FILE_NOTIFY_CHANGE_SECURITY     0x00000100
+#define FILE_NOTIFY_CHANGE_STREAM_NAME  0x00000200
+#define FILE_NOTIFY_CHANGE_STREAM_SIZE  0x00000400
+#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
+
+#define FILE_ACTION_ADDED		0x00000001
+#define FILE_ACTION_REMOVED		0x00000002
+#define FILE_ACTION_MODIFIED		0x00000003
+#define FILE_ACTION_RENAMED_OLD_NAME	0x00000004
+#define FILE_ACTION_RENAMED_NEW_NAME	0x00000005
+#define FILE_ACTION_ADDED_STREAM	0x00000006
+#define FILE_ACTION_REMOVED_STREAM	0x00000007
+#define FILE_ACTION_MODIFIED_STREAM	0x00000008
+
+/* response contains array of the following structures */
+struct file_notify_information {
+	__le32 NextEntryOffset;
+	__le32 Action;
+	__le32 FileNameLength;
+	__u8  FileName[0];
+}; 
+
+struct reparse_data {
+	__u32	ReparseTag;
+	__u16	ReparseDataLength;
+	__u16	Reserved;
+	__u16	AltNameOffset;
+	__u16	AltNameLen;
+	__u16	TargetNameOffset;
+	__u16	TargetNameLen;
+	char	LinkNamesBuf[1];
+};
+
+struct cifs_quota_data {
+	__u32	rsrvd1;  /* 0 */
+	__u32	sid_size;
+	__u64	rsrvd2;  /* 0 */
+	__u64	space_used;
+	__u64	soft_limit;
+	__u64	hard_limit;
+	char	sid[1];  /* variable size? */
+};
+
+/* quota sub commands */
+#define QUOTA_LIST_CONTINUE	    0
+#define QUOTA_LIST_START	0x100
+#define QUOTA_FOR_SID		0x101
+
+struct trans2_req {
+	/* struct smb_hdr hdr precedes. Set wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand; /* 1st setup word - SetupCount words follow */
+	__le16 ByteCount;
+};
+
+struct smb_t2_req {
+	struct smb_hdr hdr;
+	struct trans2_req t2_req;
+};
+
+struct trans2_resp {
+	/* struct smb_hdr hdr precedes. Note wct = 10 + setup count */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__u16 Reserved;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 ParameterDisplacement;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__le16 DataDisplacement;
+	__u8 SetupCount;
+	__u8 Reserved1;
+	/* SetupWords[SetupCount];
+	__u16 ByteCount;
+	__u16 Reserved2;*/	
+	/* data area follows */
+};
+
+struct smb_t2_rsp {
+	struct smb_hdr hdr;
+	struct trans2_resp t2_rsp;
+};
+
+/* PathInfo/FileInfo infolevels */
+#define SMB_INFO_STANDARD                   1
+#define SMB_SET_FILE_EA                     2
+#define SMB_QUERY_FILE_EA_SIZE              2
+#define SMB_INFO_QUERY_EAS_FROM_LIST        3
+#define SMB_INFO_QUERY_ALL_EAS              4
+#define SMB_INFO_IS_NAME_VALID              6
+#define SMB_QUERY_FILE_BASIC_INFO       0x101
+#define SMB_QUERY_FILE_STANDARD_INFO    0x102
+#define SMB_QUERY_FILE_EA_INFO          0x103
+#define SMB_QUERY_FILE_NAME_INFO        0x104
+#define SMB_QUERY_FILE_ALLOCATION_INFO  0x105
+#define SMB_QUERY_FILE_END_OF_FILEINFO  0x106
+#define SMB_QUERY_FILE_ALL_INFO         0x107
+#define SMB_QUERY_ALT_NAME_INFO         0x108
+#define SMB_QUERY_FILE_STREAM_INFO      0x109
+#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
+#define SMB_QUERY_FILE_UNIX_BASIC       0x200
+#define SMB_QUERY_FILE_UNIX_LINK        0x201
+#define SMB_QUERY_POSIX_ACL             0x204
+#define SMB_QUERY_XATTR                 0x205
+#define SMB_QUERY_ATTR_FLAGS            0x206  /* append,immutable etc. */
+#define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
+#define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
+#define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
+#define SMB_QUERY_FILE_POSITION_INFO    0x3f6 
+#define SMB_QUERY_FILE_MODE_INFO        0x3f8
+#define SMB_QUERY_FILE_ALGN_INFO        0x3f9 
+
+
+#define SMB_SET_FILE_BASIC_INFO	        0x101
+#define SMB_SET_FILE_DISPOSITION_INFO   0x102
+#define SMB_SET_FILE_ALLOCATION_INFO    0x103
+#define SMB_SET_FILE_END_OF_FILE_INFO   0x104
+#define SMB_SET_FILE_UNIX_BASIC         0x200
+#define SMB_SET_FILE_UNIX_LINK          0x201
+#define SMB_SET_FILE_UNIX_HLINK         0x203
+#define SMB_SET_POSIX_ACL               0x204
+#define SMB_SET_XATTR                   0x205
+#define SMB_SET_ATTR_FLAGS              0x206  /* append, immutable etc. */
+#define SMB_SET_FILE_BASIC_INFO2        0x3ec
+#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2 /* BB check if qpathinfo level too */
+#define SMB_FILE_ALL_INFO2              0x3fa
+#define SMB_SET_FILE_ALLOCATION_INFO2   0x3fb
+#define SMB_SET_FILE_END_OF_FILE_INFO2  0x3fc
+#define SMB_FILE_MOVE_CLUSTER_INFO      0x407
+#define SMB_FILE_QUOTA_INFO             0x408
+#define SMB_FILE_REPARSEPOINT_INFO      0x409
+#define SMB_FILE_MAXIMUM_INFO           0x40d
+
+/* Find File infolevels */
+#define SMB_FIND_FILE_DIRECTORY_INFO      0x101
+#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
+#define SMB_FIND_FILE_NAMES_INFO          0x103
+#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
+#define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
+#define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
+#define SMB_FIND_FILE_UNIX                0x202
+
+typedef struct smb_com_transaction2_qpi_req {
+	struct smb_hdr hdr;	/* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;	/* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} TRANSACTION2_QPI_REQ;
+
+typedef struct smb_com_transaction2_qpi_rsp {
+	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u16 Reserved2;	/* parameter word reserved - present for infolevels > 100 */
+} TRANSACTION2_QPI_RSP;
+
+typedef struct smb_com_transaction2_spi_req {
+	struct smb_hdr hdr;	/* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;	/* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} TRANSACTION2_SPI_REQ;
+
+typedef struct smb_com_transaction2_spi_rsp {
+	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u16 Reserved2;	/* parameter word reserved - present for infolevels > 100 */
+} TRANSACTION2_SPI_RSP;
+
+struct set_file_rename {
+	__le32 overwrite;   /* 1 = overwrite dest */
+	__u32 root_fid;   /* zero */
+	__le32 target_name_len;
+	char  target_name[0];  /* Must be unicode */
+};
+
+struct smb_com_transaction2_sfi_req {
+	struct smb_hdr hdr;	/* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;	/* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__u16 Fid;
+	__le16 InformationLevel;
+	__u16 Reserved4;	
+};
+
+struct smb_com_transaction2_sfi_rsp {
+	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u16 Reserved2;	/* parameter word reserved - present for infolevels > 100 */
+};
+
+
+/*
+ * Flags on T2 FINDFIRST and FINDNEXT 
+ */
+#define CIFS_SEARCH_CLOSE_ALWAYS  0x0001
+#define CIFS_SEARCH_CLOSE_AT_END  0x0002
+#define CIFS_SEARCH_RETURN_RESUME 0x0004
+#define CIFS_SEARCH_CONTINUE_FROM_LAST 0x0008
+#define CIFS_SEARCH_BACKUP_SEARCH 0x0010
+
+/*
+ * Size of the resume key on FINDFIRST and FINDNEXT calls
+ */
+#define CIFS_SMB_RESUME_KEY_SIZE 4
+
+typedef struct smb_com_transaction2_ffirst_req {
+	struct smb_hdr hdr;	/* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;	/* one */
+	__u8 Reserved3;
+	__le16 SubCommand;	/* TRANS2_FIND_FIRST */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 SearchAttributes;
+	__le16 SearchCount;
+	__le16 SearchFlags;
+	__le16 InformationLevel;
+	__le32 SearchStorageType;
+	char FileName[1];
+} TRANSACTION2_FFIRST_REQ;
+
+typedef struct smb_com_transaction2_ffirst_rsp {
+	struct smb_hdr hdr;	/* wct = 10 */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+} TRANSACTION2_FFIRST_RSP;
+
+typedef struct smb_com_transaction2_ffirst_rsp_parms {
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} T2_FFIRST_RSP_PARMS;
+
+typedef struct smb_com_transaction2_fnext_req {
+	struct smb_hdr hdr;	/* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;	/* one */
+	__u8 Reserved3;
+	__le16 SubCommand;	/* TRANS2_FIND_NEXT */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 InformationLevel;
+	__u32 ResumeKey;
+	__le16 SearchFlags;
+	char ResumeFileName[1];
+} TRANSACTION2_FNEXT_REQ;
+
+typedef struct smb_com_transaction2_fnext_rsp {
+	struct smb_hdr hdr;	/* wct = 10 */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+} TRANSACTION2_FNEXT_RSP;
+
+typedef struct smb_com_transaction2_fnext_rsp_parms {
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} T2_FNEXT_RSP_PARMS;
+
+/* QFSInfo Levels */
+#define SMB_INFO_ALLOCATION         1
+#define SMB_INFO_VOLUME             2
+#define SMB_QUERY_FS_VOLUME_INFO    0x102
+#define SMB_QUERY_FS_SIZE_INFO      0x103
+#define SMB_QUERY_FS_DEVICE_INFO    0x104
+#define SMB_QUERY_FS_ATTRIBUTE_INFO 0x105
+#define SMB_QUERY_CIFS_UNIX_INFO    0x200
+#define SMB_QUERY_POSIX_FS_INFO     0x201
+#define SMB_QUERY_LABEL_INFO        0x3ea
+#define SMB_QUERY_FS_QUOTA_INFO     0x3ee
+#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
+#define SMB_QUERY_OBJECTID_INFO     0x3f0
+
+typedef struct smb_com_transaction2_qfsi_req {
+	struct smb_hdr hdr;	/* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;	/* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+} TRANSACTION2_QFSI_REQ;
+
+typedef struct smb_com_transaction_qfsi_rsp {
+	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u8 Pad;		/* may be three bytes *//* followed by data area */
+} TRANSACTION2_QFSI_RSP;
+
+typedef struct smb_com_transaction2_get_dfs_refer_req {
+	struct smb_hdr hdr;	/* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;	/* one setup word */
+	__le16 ByteCount;
+	__u8 Pad[3];		/* Win2K has sent 0x0F01 (max resp length perhaps?) followed by one byte pad - doesn't seem to matter though */
+	__le16 MaxReferralLevel;
+	char RequestFileName[1];
+} TRANSACTION2_GET_DFS_REFER_REQ;
+
+typedef struct dfs_referral_level_3 {
+	__le16 VersionNumber;
+	__le16 ReferralSize;
+	__le16 ServerType;	/* 0x0001 = CIFS server */
+	__le16 ReferralFlags;	/* or proximity - not clear which since always set to zero - SNIA spec says 0x01 means strip off PathConsumed chars before submitting RequestFileName to remote node */
+	__le16 TimeToLive;
+	__le16 Proximity;
+	__le16 DfsPathOffset;
+	__le16 DfsAlternatePathOffset;
+	__le16 NetworkAddressOffset;
+} REFERRAL3;
+
+typedef struct smb_com_transaction_get_dfs_refer_rsp {
+	struct smb_hdr hdr;	/* wct = 10 */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u8 Pad;
+	__le16 PathConsumed;
+	__le16 NumberOfReferrals;
+	__le16 DFSFlags;
+	__u16 Pad2;
+	REFERRAL3 referrals[1];	/* array of level 3 dfs_referral structures */
+	/* followed by the strings pointed to by the referral structures */
+} TRANSACTION2_GET_DFS_REFER_RSP;
+
+/* DFS Flags */
+#define DFSREF_REFERRAL_SERVER  0x0001
+#define DFSREF_STORAGE_SERVER   0x0002
+
+/* IOCTL information */
+/* List of ioctl function codes that look to be of interest to remote clients like this. */
+/* Need to do some experimentation to make sure they all work remotely.                  */
+/* Some of the following such as the encryption/compression ones would be                */
+/* invoked from tools via a specialized hook into the VFS rather than via the            */
+/* standard vfs entry points */
+#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
+#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
+#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
+#define FSCTL_LOCK_VOLUME            0x00090018
+#define FSCTL_UNLOCK_VOLUME          0x0009001C
+#define FSCTL_GET_COMPRESSION        0x0009003C
+#define FSCTL_SET_COMPRESSION        0x0009C040
+#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
+#define FSCTL_FILESYS_GET_STATISTICS 0x00090090
+#define FSCTL_SET_REPARSE_POINT      0x000900A4
+#define FSCTL_GET_REPARSE_POINT      0x000900A8
+#define FSCTL_DELETE_REPARSE_POINT   0x000900AC
+#define FSCTL_SET_SPARSE             0x000900C4
+#define FSCTL_SET_ZERO_DATA          0x000900C8
+#define FSCTL_SET_ENCRYPTION         0x000900D7
+#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB
+#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF
+#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3
+#define FSCTL_SIS_COPYFILE           0x00090100
+#define FSCTL_SIS_LINK_FILES         0x0009C104
+
+#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
+#define IO_REPARSE_TAG_HSM           0xC0000004
+#define IO_REPARSE_TAG_SIS           0x80000007
+
+/*
+ ************************************************************************
+ * All structs for everything above the SMB PDUs themselves
+ * (such as the T2 level specific data) go here                  
+ ************************************************************************
+ */
+
+/*
+ * Information on a server
+ */
+
+struct serverInfo {
+	char name[16];
+	unsigned char versionMajor;
+	unsigned char versionMinor;
+	unsigned long type;
+	unsigned int commentOffset;
+};
+
+/*
+ * The following structure is the format of the data returned on a NetShareEnum
+ * with level "90" (x5A)
+ */
+
+struct shareInfo {
+	char shareName[13];
+	char pad;
+	unsigned short type;
+	unsigned int commentOffset;
+};
+
+struct aliasInfo {
+	char aliasName[9];
+	char pad;
+	unsigned int commentOffset;
+	unsigned char type[2];
+};
+
+struct aliasInfo92 {
+	int aliasNameOffset;
+	int serverNameOffset;
+	int shareNameOffset;
+};
+
+typedef struct {
+	__le64 TotalAllocationUnits;
+	__le64 FreeAllocationUnits;
+	__le32 SectorsPerAllocationUnit;
+	__le32 BytesPerSector;
+} FILE_SYSTEM_INFO;		/* size info, level 0x103 */
+
+typedef struct {
+	__le16 MajorVersionNumber;
+	__le16 MinorVersionNumber;
+	__le64 Capability;
+} FILE_SYSTEM_UNIX_INFO;	/* Unix extensions info, level 0x200 */
+/* Linux/Unix extensions capability flags */
+#define CIFS_UNIX_FCNTL_CAP             0x00000001 /* support for fcntl locks */
+#define CIFS_UNIX_POSIX_ACL_CAP         0x00000002
+#define CIFS_UNIX_XATTR_CAP             0x00000004 /*support for new namespace*/
+
+typedef struct {
+	/* For undefined recommended transfer size return -1 in that field */
+	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
+	__le32 BlockSize; 
+    /* The next three fields are in terms of the block size.
+	(above). If block size is unknown, 4096 would be a
+	reasonable block size for a server to report. 
+	Note that returning the blocks/blocksavail removes need
+	to make a second call (to QFSInfo level 0x103 to get this info.
+	UserBlockAvail is typically less than or equal to BlocksAvail,
+	if no distinction is made return the same value in each */
+	__le64 TotalBlocks;
+	__le64 BlocksAvail;       /* bfree */
+	__le64 UserBlocksAvail;   /* bavail */
+    /* For undefined Node fields or FSID return -1 */
+	__le64 TotalFileNodes;
+	__le64 FreeFileNodes;
+	__le64 FileSysIdentifier;   /* fsid */
+	/* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
+	/* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
+} FILE_SYSTEM_POSIX_INFO;
+
+/* DeviceType Flags */
+#define FILE_DEVICE_CD_ROM              0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
+#define FILE_DEVICE_DFS                 0x00000006
+#define FILE_DEVICE_DISK                0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
+#define FILE_DEVICE_FILE_SYSTEM         0x00000009
+#define FILE_DEVICE_NAMED_PIPE          0x00000011
+#define FILE_DEVICE_NETWORK             0x00000012
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL                0x00000015
+#define FILE_DEVICE_PARALLEL_PORT       0x00000016
+#define FILE_DEVICE_PRINTER             0x00000018
+#define FILE_DEVICE_SERIAL_PORT         0x0000001b
+#define FILE_DEVICE_STREAMS             0x0000001e
+#define FILE_DEVICE_TAPE                0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
+#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
+#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
+
+typedef struct {
+	__le32 DeviceType;
+	__le32 DeviceCharacteristics;
+} FILE_SYSTEM_DEVICE_INFO;	/* device info, level 0x104 */
+
+typedef struct {
+	__le32 Attributes;
+	__le32 MaxPathNameComponentLength;
+	__le32 FileSystemNameLen;
+	char FileSystemName[52]; /* do not really need to save this - so potentially get only subset of name */
+} FILE_SYSTEM_ATTRIBUTE_INFO;
+
+/******************************************************************************/
+/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+/******************************************************************************/
+typedef struct { /* data block encoding of response to level 263 QPathInfo */
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad1;
+	__le64 AllocationSize;
+	__le64 EndOfFile;	/* size ie offset to first free byte in file */
+	__le32 NumberOfLinks;	/* hard links */
+	__u8 DeletePending;
+	__u8 Directory;
+	__u16 Pad2;
+	__u64 IndexNumber;
+	__le32 EASize;
+	__le32 AccessFlags;
+	__u64 IndexNumber1;
+	__le64 CurrentByteOffset;
+	__le32 Mode;
+	__le32 AlignmentRequirement;
+	__le32 FileNameLength;
+	char FileName[1];
+} FILE_ALL_INFO;		/* level 0x107 QPathInfo */
+
+/* defines for enumerating possible values of the Unix type field below */
+#define UNIX_FILE      0
+#define UNIX_DIR       1
+#define UNIX_SYMLINK   2
+#define UNIX_CHARDEV   3
+#define UNIX_BLOCKDEV  4
+#define UNIX_FIFO      5
+#define UNIX_SOCKET    6
+typedef struct {
+	__le64 EndOfFile;
+	__le64 NumOfBytes;
+	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
+	__le64 LastAccessTime;
+	__le64 LastModificationTime;
+	__le64 Uid;
+	__le64 Gid;
+	__le32 Type;
+	__le64 DevMajor;
+	__le64 DevMinor;
+	__u64 UniqueId;
+	__le64 Permissions;
+	__le64 Nlinks;
+} FILE_UNIX_BASIC_INFO;		/* level 0x200 QPathInfo */
+
+typedef struct {
+	char LinkDest[1];
+} FILE_UNIX_LINK_INFO;		/* level 0x201 QPathInfo */
+
+/* The following three structures are needed only for
+	setting time to NT4 and some older servers via
+	the primitive DOS time format */
+typedef struct {
+	__u16 Day:5;
+	__u16 Month:4;
+	__u16 Year:7;
+} SMB_DATE;
+
+typedef struct {
+	__u16 TwoSeconds:5;
+	__u16 Minutes:6;
+	__u16 Hours:5;
+} SMB_TIME;
+
+typedef struct {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le32 EASize;
+} FILE_INFO_STANDARD;  /* level 1 SetPath/FileInfo */
+
+typedef struct {
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad;
+} FILE_BASIC_INFO;		/* size info, level 0x101 */
+
+struct file_allocation_info {
+	__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
+};	/* size used on disk, level 0x103 for set, 0x105 for query */
+
+struct file_end_of_file_info {
+	__le64 FileSize;		/* offset to end of file */
+};	/* size info, level 0x104 for set, 0x106 for query */
+
+struct file_alt_name_info {
+	__u8   alt_name[1];
+};      /* level 0x0108 */
+
+struct file_stream_info {
+	__le32 number_of_streams;  /* BB check sizes and verify location */
+	/* followed by info on streams themselves 
+		u64 size;
+		u64 allocation_size 
+		stream info */
+};      /* level 0x109 */
+
+struct file_compression_info {
+	__le64 compressed_size;
+	__le16 format;
+	__u8   unit_shift;
+	__u8   ch_shift;
+	__u8   cl_shift;
+	__u8   pad[3];
+};      /* level 0x10b */
+
+/* POSIX ACL set/query path info structures */
+#define CIFS_ACL_VERSION 1
+struct cifs_posix_ace { /* access control entry (ACE) */
+	__u8  cifs_e_tag;
+	__u8  cifs_e_perm;
+	__le64 cifs_uid; /* or gid */
+}; 
+
+struct cifs_posix_acl { /* access conrol list  (ACL) */
+	__le16	version;
+	__le16	access_entry_count;  /* access ACL - count of entries */
+	__le16	default_entry_count; /* default ACL - count of entries */
+	struct cifs_posix_ace ace_array[0];
+	/* followed by
+	struct cifs_posix_ace default_ace_arraay[] */
+};  /* level 0x204 */
+
+/* types of access control entries already defined in posix_acl.h */
+/* #define CIFS_POSIX_ACL_USER_OBJ	 0x01
+#define CIFS_POSIX_ACL_USER      0x02
+#define CIFS_POSIX_ACL_GROUP_OBJ 0x04
+#define CIFS_POSIX_ACL_GROUP     0x08
+#define CIFS_POSIX_ACL_MASK      0x10
+#define CIFS_POSIX_ACL_OTHER     0x20 */
+
+/* types of perms */
+/* #define CIFS_POSIX_ACL_EXECUTE   0x01
+#define CIFS_POSIX_ACL_WRITE     0x02
+#define CIFS_POSIX_ACL_READ	     0x04 */
+
+/* end of POSIX ACL definitions */
+
+struct file_internal_info {
+	__u64  UniqueId; /* inode number */
+};      /* level 0x3ee */
+struct file_mode_info {
+	__le32	Mode;
+};      /* level 0x3f8 */
+
+struct file_attrib_tag {
+	__le32 Attribute;
+	__le32 ReparseTag;
+};      /* level 0x40b */
+
+
+/********************************************************/
+/*  FindFirst/FindNext transact2 data buffer formats    */ 
+/********************************************************/
+
+typedef struct {
+	__le32 NextEntryOffset;
+	__u32 ResumeKey; /* as with FileIndex - no need to convert */
+	__le64 EndOfFile;
+	__le64 NumOfBytes;
+	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
+	__le64 LastAccessTime;
+	__le64 LastModificationTime;
+	__le64 Uid;
+	__le64 Gid;
+	__le32 Type;
+	__le64 DevMajor;
+	__le64 DevMinor;
+	__u64 UniqueId;
+	__le64 Permissions;
+	__le64 Nlinks;
+	char FileName[1];
+} FILE_UNIX_INFO; /* level 0x202 */
+
+typedef struct {
+	__le32 NextEntryOffset;
+	__u32 FileIndex;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le64 EndOfFile;
+	__le64 AllocationSize;
+	__le32 ExtFileAttributes;
+	__le32 FileNameLength;
+	char FileName[1];
+} FILE_DIRECTORY_INFO;   /* level 0x101 FF response data area */
+
+typedef struct {
+	__le32 NextEntryOffset;
+	__u32 FileIndex;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le64 EndOfFile;
+	__le64 AllocationSize;
+	__le32 ExtFileAttributes;
+	__le32 FileNameLength;
+	__le32 EaSize; /* length of the xattrs */
+	char FileName[1];
+} FILE_FULL_DIRECTORY_INFO;   /* level 0x102 FF response data area */
+
+typedef struct {
+	__le32 NextEntryOffset;
+	__u32 FileIndex;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le64 EndOfFile;
+	__le64 AllocationSize;
+	__le32 ExtFileAttributes;
+	__le32 FileNameLength;
+	__le32 EaSize; /* EA size */
+	__le32 Reserved;
+	__u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+	char FileName[1];
+} SEARCH_ID_FULL_DIR_INFO;   /* level 0x105 FF response data area */
+
+typedef struct {
+	__le32 NextEntryOffset;
+	__u32 FileIndex;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le64 EndOfFile;
+	__le64 AllocationSize;
+	__le32 ExtFileAttributes;
+	__le32 FileNameLength; 
+	__le32 EaSize; /* length of the xattrs */
+	__u8   ShortNameLength;
+	__u8   Reserved;
+	__u8   ShortName[12];
+	char FileName[1];
+} FILE_BOTH_DIRECTORY_INFO;   /* level 0x104 FF response data area */
+
+
+struct gea {
+	unsigned char name_len;
+	char name[1];
+};
+
+struct gealist {
+	unsigned long list_len;
+	struct gea list[1];
+};
+
+struct fea {
+	unsigned char EA_flags;
+	__u8 name_len;
+	__le16 value_len;
+	char name[1];
+	/* optionally followed by value */
+};
+/* flags for _FEA.fEA */
+#define FEA_NEEDEA         0x80	/* need EA bit */
+
+struct fealist {
+	__le32 list_len;
+	struct fea list[1];
+};
+
+/* used to hold an arbitrary blob of data */
+struct data_blob {
+	__u8 *data;
+	size_t length;
+	void (*free) (struct data_blob * data_blob);
+};
+
+
+#ifdef CONFIG_CIFS_POSIX
+/* 
+	For better POSIX semantics from Linux client, (even better
+	than the existing CIFS Unix Extensions) we need updated PDUs for:
+	
+	1) PosixCreateX - to set and return the mode, inode#, device info and
+	perhaps add a CreateDevice - to create Pipes and other special .inodes
+	Also note POSIX open flags
+	2) Close - to return the last write time to do cache across close more safely
+	3) PosixQFSInfo - to return statfs info
+	4) FindFirst return unique inode number - what about resume key, two forms short (matches readdir) and full (enough info to cache inodes)
+	5) Mkdir - set mode
+	
+	And under consideration: 
+	6) FindClose2 (return nanosecond timestamp ??)
+	7) Use nanosecond timestamps throughout all time fields if 
+	   corresponding attribute flag is set
+	8) sendfile - handle based copy
+	9) Direct i/o
+	10) "POSIX ACL" support
+	11) Misc fcntls?
+	
+	what about fixing 64 bit alignment
+	
+	There are also various legacy SMB/CIFS requests used as is
+	
+	From existing Lanman and NTLM dialects:
+	--------------------------------------
+	NEGOTIATE
+	SESSION_SETUP_ANDX (BB which?)
+	TREE_CONNECT_ANDX (BB which wct?)
+	TREE_DISCONNECT (BB add volume timestamp on response)
+	LOGOFF_ANDX
+	DELETE (note delete open file behavior)
+	DELETE_DIRECTORY
+	READ_AND_X
+	WRITE_AND_X
+	LOCKING_AND_X (note posix lock semantics)
+	RENAME (note rename across dirs and open file rename posix behaviors)
+	NT_RENAME (for hardlinks) Is this good enough for all features?
+	FIND_CLOSE2
+	TRANSACTION2 (18 cases)
+		SMB_SET_FILE_END_OF_FILE_INFO2 SMB_SET_PATH_END_OF_FILE_INFO2
+		(BB verify that never need to set allocation size)
+		SMB_SET_FILE_BASIC_INFO2 (setting times - BB can it be done via Unix ext?)
+	
+	COPY (note support for copy across directories) - FUTURE, OPTIONAL
+	setting/getting OS/2 EAs - FUTURE (BB can this handle
+	setting Linux xattrs perfectly)         - OPTIONAL
+	dnotify                                 - FUTURE, OPTIONAL
+	quota                                   - FUTURE, OPTIONAL
+			
+	Note that various requests implemented for NT interop such as 
+		NT_TRANSACT (IOCTL) QueryReparseInfo
+	are unneeded to servers compliant with the CIFS POSIX extensions
+	
+	From CIFS Unix Extensions:
+	-------------------------
+	T2 SET_PATH_INFO (SMB_SET_FILE_UNIX_LINK) for symlinks
+	T2 SET_PATH_INFO (SMB_SET_FILE_BASIC_INFO2)
+	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_LINK)
+	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC) - BB check for missing inode fields
+					Actually need QUERY_FILE_UNIX_INFO since has inode num
+					BB what about a) blksize/blkbits/blocks
+								  b) i_version
+								  c) i_rdev
+								  d) notify mask?
+								  e) generation
+								  f) size_seqcount
+	T2 FIND_FIRST/FIND_NEXT FIND_FILE_UNIX
+	TRANS2_GET_DFS_REFERRAL				  - OPTIONAL but recommended
+	T2_QFS_INFO QueryDevice/AttributeInfo - OPTIONAL
+	
+	
+ */
+
+/* xsymlink is a symlink format that can be used
+   to save symlink info in a regular file when 
+   mounted to operating systems that do not
+   support the cifs Unix extensions or EAs (for xattr
+   based symlinks).  For such a file to be recognized
+   as containing symlink data: 
+
+   1) file size must be 1067, 
+   2) signature must begin file data,
+   3) length field must be set to ASCII representation
+	of a number which is less than or equal to 1024, 
+   4) md5 must match that of the path data */
+
+struct xsymlink {
+	/* 1067 bytes */
+	char signature[4]; /* XSym */ /* not null terminated */
+	char cr0;         /* \n */
+/* ASCII representation of length (4 bytes decimal) terminated by \n not null */
+	char length[4];
+	char cr1;         /* \n */
+/* md5 of valid subset of path ie path[0] through path[length-1] */
+	__u8 md5[32];    
+	char cr2;        /* \n */
+/* if room left, then end with \n then 0x20s by convention but not required */
+	char path[1024];  
+};
+
+typedef struct {
+	/* BB do we need another field for flags? BB */
+	__u32 xattr_name_len;
+	__u32 xattr_value_len;
+	char  xattr_name[0];
+	/* followed by xattr_value[xattr_value_len], no pad */
+} FILE_XATTR_INFO;	/* extended attribute, info level 205 */
+
+
+#endif 
+
+#pragma pack()			/* resume default structure packing */
+
+#endif				/* _CIFSPDU_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
new file mode 100644
index 0000000..787eef4
--- /dev/null
+++ b/fs/cifs/cifsproto.h
@@ -0,0 +1,269 @@
+/*
+ *   fs/cifs/cifsproto.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002,2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+#ifndef _CIFSPROTO_H
+#define _CIFSPROTO_H
+#include <linux/nls.h>
+
+struct statfs;
+
+/*
+ *****************************************************************
+ * All Prototypes
+ *****************************************************************
+ */
+
+extern struct smb_hdr *cifs_buf_get(void);
+extern void cifs_buf_release(void *);
+extern struct smb_hdr *cifs_small_buf_get(void);
+extern void cifs_small_buf_release(void *);
+extern int smb_send(struct socket *, struct smb_hdr *,
+			unsigned int /* length */ , struct sockaddr *);
+extern unsigned int _GetXid(void);
+extern void _FreeXid(unsigned int);
+#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__FUNCTION__, xid,current->fsuid));
+#define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));}
+extern char *build_path_from_dentry(struct dentry *);
+extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+extern void renew_parental_timestamps(struct dentry *direntry);
+extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
+			struct smb_hdr * /* input */ ,
+			struct smb_hdr * /* out */ ,
+			int * /* bytes returned */ , const int long_op);
+extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
+extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
+extern int is_valid_oplock_break(struct smb_hdr *smb);
+extern int is_size_safe_to_change(struct cifsInodeInfo *);
+extern unsigned int smbCalcSize(struct smb_hdr *ptr);
+extern int decode_negTokenInit(unsigned char *security_blob, int length,
+			enum securityEnum *secType);
+extern int cifs_inet_pton(int, char * source, void *dst);
+extern int map_smb_to_linux_error(struct smb_hdr *smb);
+extern void header_assemble(struct smb_hdr *, char /* command */ ,
+			const struct cifsTconInfo *, int
+			/* length of fixed section (word count) in two byte units  */
+			);
+extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, struct cifsTconInfo *);
+extern void DeleteOplockQEntry(struct oplock_q_entry *);
+extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
+extern u64 cifs_UnixTimeToNT(struct timespec);
+extern int cifs_get_inode_info(struct inode **pinode,
+			const unsigned char *search_path, 
+			FILE_ALL_INFO * pfile_info,
+			struct super_block *sb, int xid);
+extern int cifs_get_inode_info_unix(struct inode **pinode,
+			const unsigned char *search_path,
+			struct super_block *sb,int xid);
+
+extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
+			const char *);
+extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
+void cifs_proc_init(void);
+void cifs_proc_clean(void);
+
+extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, 
+			struct nls_table * nls_info);
+extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
+
+extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
+			const char *tree, struct cifsTconInfo *tcon,
+			const struct nls_table *);
+
+extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+            const char *searchName, const struct nls_table *nls_codepage,
+            __u16 *searchHandle, struct cifs_search_info * psrch_inf);
+
+extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+            __u16 searchHandle, struct cifs_search_info * psrch_inf);
+
+extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
+			const __u16 search_handle);
+
+extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			FILE_ALL_INFO * findData,
+			const struct nls_table *nls_codepage);
+
+extern int CIFSSMBUnixQPathInfo(const int xid,
+			struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			FILE_UNIX_BASIC_INFO * pFindData,
+			const struct nls_table *nls_codepage);
+
+extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+			const unsigned char *searchName,
+			unsigned char **targetUNCs,
+			unsigned int *number_of_UNC_in_array,
+			const struct nls_table *nls_codepage);
+
+extern int connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+			const char *old_path,
+			const struct nls_table *nls_codepage);
+extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+			const char *old_path, const struct nls_table *nls_codepage, 
+			unsigned int *pnum_referrals, unsigned char ** preferrals);
+extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
+			struct kstatfs *FSData,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBQFSAttributeInfo(const int xid,
+			struct cifsTconInfo *tcon,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+			struct kstatfs *FSData, const struct nls_table *nls_codepage);
+
+extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
+			const char *fileName, const FILE_BASIC_INFO * data,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
+			const FILE_BASIC_INFO * data, __u16 fid);
+#if 0
+extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
+			char *fileName, __u16 dos_attributes,
+			const struct nls_table *nls_codepage);
+#endif /* possibly unneeded function */
+extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
+			const char *fileName, __u64 size,int setAllocationSizeFlag,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
+			 __u64 size, __u16 fileHandle,__u32 opener_pid, int AllocSizeFlag);
+extern int CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *pTcon,
+			char *full_path, __u64 mode, __u64 uid,
+			__u64 gid, dev_t dev, const struct nls_table *nls_codepage);
+
+extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+			const char *newName,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
+			const char *name, const struct nls_table *nls_codepage);
+
+extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
+			const char *name,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+			const char *fromName, const char *toName,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
+			int netfid, char * target_name, const struct nls_table *nls_codepage);
+extern int CIFSCreateHardLink(const int xid,
+			struct cifsTconInfo *tcon,
+			const char *fromName, const char *toName,
+			const struct nls_table *nls_codepage);
+extern int CIFSUnixCreateHardLink(const int xid,
+			struct cifsTconInfo *tcon,
+			const char *fromName, const char *toName,
+			const struct nls_table *nls_codepage);
+extern int CIFSUnixCreateSymLink(const int xid,
+			struct cifsTconInfo *tcon,
+			const char *fromName, const char *toName,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBUnixQuerySymLink(const int xid,
+			struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			char *syminfo, const int buflen,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBQueryReparseLinkInfo(const int xid, 
+			struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			char *symlinkinfo, const int buflen, __u16 fid,
+			const struct nls_table *nls_codepage);
+
+extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+			const char *fileName, const int disposition,
+			const int access_flags, const int omode,
+			__u16 * netfid, int *pOplock, FILE_ALL_INFO *,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
+			const int smb_file_id);
+
+extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
+			const int netfid, unsigned int count,
+			const __u64 lseek, unsigned int *nbytes, char **buf);
+extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+			const int netfid, const unsigned int count,
+			const __u64 lseek, unsigned int *nbytes,
+			const char *buf, const char __user *ubuf, 
+			const int long_op);
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
+			const int netfid, const unsigned int count,
+			const __u64 offset, unsigned int *nbytes, 
+			const char __user *buf,const int long_op);
+extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+			const unsigned char *searchName, __u64 * inode_number,
+			const struct nls_table *nls_codepage);
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
+
+extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+			const __u16 netfid, const __u64 len,
+			const __u64 offset, const __u32 numUnlock,
+			const __u32 numLock, const __u8 lockType,
+			const int waitFlag);
+
+extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+
+extern struct cifsSesInfo *sesInfoAlloc(void);
+extern void sesInfoFree(struct cifsSesInfo *);
+extern struct cifsTconInfo *tconInfoAlloc(void);
+extern void tconInfoFree(struct cifsTconInfo *);
+
+extern int cifs_reconnect(struct TCP_Server_Info *server);
+
+extern int cifs_sign_smb(struct smb_hdr *, struct cifsSesInfo *,__u32 *);
+extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
+	__u32 expected_sequence_number);
+extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
+extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
+extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
+extern int CIFSSMBCopy(int xid,
+			struct cifsTconInfo *source_tcon,
+			const char *fromName,
+			const __u16 target_tid,
+			const char *toName, const int flags,
+			const struct nls_table *nls_codepage);
+extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 
+			const int notify_subdirs,const __u16 netfid,__u32 filter,
+			const struct nls_table *nls_codepage);
+extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+			const unsigned char *searchName, char * EAData,
+			size_t bufsize, const struct nls_table *nls_codepage);
+extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
+		const unsigned char * searchName,const unsigned char * ea_name,
+		unsigned char * ea_value, size_t buf_size, 
+		const struct nls_table *nls_codepage);
+extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, 
+		const char *fileName, const char * ea_name, 
+		const void * ea_value, const __u16 ea_value_len, 
+		const struct nls_table *nls_codepage);
+extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+		const unsigned char *searchName,
+		char *acl_inf, const int buflen,const int acl_type,
+		const struct nls_table *nls_codepage);
+extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+		const unsigned char *fileName,
+		const char *local_acl, const int buflen, const int acl_type,
+		const struct nls_table *nls_codepage);
+int cifs_ioctl (struct inode * inode, struct file * filep,
+                unsigned int command, unsigned long arg);
+#endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
new file mode 100644
index 0000000..df6a619
--- /dev/null
+++ b/fs/cifs/cifssmb.c
@@ -0,0 +1,4186 @@
+/*
+ *   fs/cifs/cifssmb.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   Contains the routines for constructing the SMB PDUs themselves
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c   */
+ /* These are mostly routines that operate on a pathname, or on a tree id     */
+ /* (mounted volume), but there are eight handle based routines which must be */
+ /* treated slightly different for reconnection purposes since we never want  */
+ /* to reuse a stale file handle and the caller knows the file handle */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/vfs.h>
+#include <linux/posix_acl_xattr.h>
+#include <asm/uaccess.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+
+#ifdef CONFIG_CIFS_POSIX
+static struct {
+	int index;
+	char *name;
+} protocols[] = {
+	{CIFS_PROT, "\2NT LM 0.12"}, 
+	{CIFS_PROT, "\2POSIX 2"},
+	{BAD_PROT, "\2"}
+};
+#else
+static struct {
+	int index;
+	char *name;
+} protocols[] = {
+	{CIFS_PROT, "\2NT LM 0.12"}, 
+	{BAD_PROT, "\2"}
+};
+#endif
+
+
+/* Mark as invalid, all open files on tree connections since they
+   were closed when session to server was lost */
+static void mark_open_files_invalid(struct cifsTconInfo * pTcon)
+{
+	struct cifsFileInfo *open_file = NULL;
+	struct list_head * tmp;
+	struct list_head * tmp1;
+
+/* list all files open on tree connection and mark them invalid */
+	write_lock(&GlobalSMBSeslock);
+	list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
+		open_file = list_entry(tmp,struct cifsFileInfo, tlist);
+		if(open_file) {
+			open_file->invalidHandle = TRUE;
+		}
+	}
+	write_unlock(&GlobalSMBSeslock);
+	/* BB Add call to invalidate_inodes(sb) for all superblocks mounted to this tcon */
+}
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+	 void **request_buf /* returned */)
+{
+	int rc = 0;
+
+	/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
+	   check for tcp and smb session status done differently
+	   for those three - in the calling routine */
+	if(tcon) {
+		if((tcon->ses) && (tcon->ses->server)){
+			struct nls_table *nls_codepage;
+				/* Give Demultiplex thread up to 10 seconds to 
+					reconnect, should be greater than cifs socket
+					timeout which is 7 seconds */
+			while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+				wait_event_interruptible_timeout(tcon->ses->server->response_q,
+					(tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
+				if(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+					/* on "soft" mounts we wait once */
+					if((tcon->retry == FALSE) || 
+					   (tcon->ses->status == CifsExiting)) {
+						cFYI(1,("gave up waiting on reconnect in smb_init"));
+						return -EHOSTDOWN;
+					} /* else "hard" mount - keep retrying until 
+					process is killed or server comes back up */
+				} else /* TCP session is reestablished now */
+					break;
+				 
+			}
+			
+			nls_codepage = load_nls_default();
+		/* need to prevent multiple threads trying to
+		simultaneously reconnect the same SMB session */
+			down(&tcon->ses->sesSem);
+			if(tcon->ses->status == CifsNeedReconnect)
+				rc = cifs_setup_session(0, tcon->ses, nls_codepage);
+			if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+				mark_open_files_invalid(tcon);
+				rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon,
+					nls_codepage);
+				up(&tcon->ses->sesSem);
+				if(rc == 0)
+					atomic_inc(&tconInfoReconnectCount);
+
+				cFYI(1, ("reconnect tcon rc = %d", rc));
+				/* Removed call to reopen open files here - 
+					it is safer (and faster) to reopen files
+					one at a time as needed in read and write */
+
+				/* Check if handle based operation so we 
+					know whether we can continue or not without
+					returning to caller to reset file handle */
+				switch(smb_command) {
+					case SMB_COM_READ_ANDX:
+					case SMB_COM_WRITE_ANDX:
+					case SMB_COM_CLOSE:
+					case SMB_COM_FIND_CLOSE2:
+					case SMB_COM_LOCKING_ANDX: {
+						unload_nls(nls_codepage);
+						return -EAGAIN;
+					}
+				}
+			} else {
+				up(&tcon->ses->sesSem);
+			}
+			unload_nls(nls_codepage);
+
+		} else {
+			return -EIO;
+		}
+	}
+	if(rc)
+		return rc;
+
+	*request_buf = cifs_small_buf_get();
+	if (*request_buf == NULL) {
+		/* BB should we add a retry in here if not a writepage? */
+		return -ENOMEM;
+	}
+
+	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,wct);
+
+#ifdef CONFIG_CIFS_STATS
+        if(tcon != NULL) {
+                atomic_inc(&tcon->num_smbs_sent);
+        }
+#endif /* CONFIG_CIFS_STATS */
+	return rc;
+}  
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+	 void **request_buf /* returned */ ,
+	 void **response_buf /* returned */ )
+{
+	int rc = 0;
+
+	/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
+	   check for tcp and smb session status done differently
+	   for those three - in the calling routine */
+	if(tcon) {
+		if((tcon->ses) && (tcon->ses->server)){
+			struct nls_table *nls_codepage;
+				/* Give Demultiplex thread up to 10 seconds to 
+					reconnect, should be greater than cifs socket
+					timeout which is 7 seconds */
+			while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+				wait_event_interruptible_timeout(tcon->ses->server->response_q,
+					(tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
+				if(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+					/* on "soft" mounts we wait once */
+					if((tcon->retry == FALSE) || 
+					   (tcon->ses->status == CifsExiting)) {
+						cFYI(1,("gave up waiting on reconnect in smb_init"));
+						return -EHOSTDOWN;
+					} /* else "hard" mount - keep retrying until 
+					process is killed or server comes back up */
+				} else /* TCP session is reestablished now */
+					break;
+				 
+			}
+			
+			nls_codepage = load_nls_default();
+		/* need to prevent multiple threads trying to
+		simultaneously reconnect the same SMB session */
+			down(&tcon->ses->sesSem);
+			if(tcon->ses->status == CifsNeedReconnect)
+				rc = cifs_setup_session(0, tcon->ses, nls_codepage);
+			if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+				mark_open_files_invalid(tcon);
+				rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon,
+					nls_codepage);
+				up(&tcon->ses->sesSem);
+				if(rc == 0)
+					atomic_inc(&tconInfoReconnectCount);
+
+				cFYI(1, ("reconnect tcon rc = %d", rc));
+				/* Removed call to reopen open files here - 
+					it is safer (and faster) to reopen files
+					one at a time as needed in read and write */
+
+				/* Check if handle based operation so we 
+					know whether we can continue or not without
+					returning to caller to reset file handle */
+				switch(smb_command) {
+					case SMB_COM_READ_ANDX:
+					case SMB_COM_WRITE_ANDX:
+					case SMB_COM_CLOSE:
+					case SMB_COM_FIND_CLOSE2:
+					case SMB_COM_LOCKING_ANDX: {
+						unload_nls(nls_codepage);
+						return -EAGAIN;
+					}
+				}
+			} else {
+				up(&tcon->ses->sesSem);
+			}
+			unload_nls(nls_codepage);
+
+		} else {
+			return -EIO;
+		}
+	}
+	if(rc)
+		return rc;
+
+	*request_buf = cifs_buf_get();
+	if (*request_buf == NULL) {
+		/* BB should we add a retry in here if not a writepage? */
+		return -ENOMEM;
+	}
+    /* Although the original thought was we needed the response buf for  */
+    /* potential retries of smb operations it turns out we can determine */
+    /* from the mid flags when the request buffer can be resent without  */
+    /* having to use a second distinct buffer for the response */
+	*response_buf = *request_buf; 
+
+	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
+			wct /*wct */ );
+
+#ifdef CONFIG_CIFS_STATS
+        if(tcon != NULL) {
+                atomic_inc(&tcon->num_smbs_sent);
+        }
+#endif /* CONFIG_CIFS_STATS */
+	return rc;
+}
+
+static int validate_t2(struct smb_t2_rsp * pSMB) 
+{
+	int rc = -EINVAL;
+	int total_size;
+	char * pBCC;
+
+	/* check for plausible wct, bcc and t2 data and parm sizes */
+	/* check for parm and data offset going beyond end of smb */
+	if(pSMB->hdr.WordCount >= 10) {
+		if((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
+		   (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
+			/* check that bcc is at least as big as parms + data */
+			/* check that bcc is less than negotiated smb buffer */
+			total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
+			if(total_size < 512) {
+				total_size+=le16_to_cpu(pSMB->t2_rsp.DataCount);
+				/* BCC le converted in SendReceive */
+				pBCC = (pSMB->hdr.WordCount * 2) + sizeof(struct smb_hdr) + 
+					(char *)pSMB;
+				if((total_size <= (*(u16 *)pBCC)) && 
+				   (total_size < 
+					CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
+					return 0;
+				}
+				
+			}
+		}
+	}
+	cifs_dump_mem("Invalid transact2 SMB: ",(char *)pSMB,
+		sizeof(struct smb_t2_rsp) + 16);
+	return rc;
+}
+int
+CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
+{
+	NEGOTIATE_REQ *pSMB;
+	NEGOTIATE_RSP *pSMBr;
+	int rc = 0;
+	int bytes_returned;
+	struct TCP_Server_Info * server;
+	u16 count;
+
+	if(ses->server)
+		server = ses->server;
+	else {
+		rc = -EIO;
+		return rc;
+	}
+	rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ ,
+		      (void **) &pSMB, (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
+	if (extended_security)
+		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
+
+	count = strlen(protocols[0].name) + 1;
+	strncpy(pSMB->DialectsArray, protocols[0].name, 30);	
+    /* null guaranteed to be at end of source and target buffers anyway */
+
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc == 0) {
+		server->secMode = pSMBr->SecurityMode;	
+		server->secType = NTLM; /* BB override default for NTLMv2 or krb*/
+		/* one byte - no need to convert this or EncryptionKeyLen from le,*/
+		server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
+		/* probably no need to store and check maxvcs */
+		server->maxBuf =
+			min(le32_to_cpu(pSMBr->MaxBufferSize),
+			(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
+		server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
+		cFYI(0, ("Max buf = %d ", ses->server->maxBuf));
+		GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
+		server->capabilities = le32_to_cpu(pSMBr->Capabilities);
+		server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);	
+        /* BB with UTC do we ever need to be using srvr timezone? */
+		if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+			memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
+			       CIFS_CRYPTO_KEY_SIZE);
+		} else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
+			   && (pSMBr->EncryptionKeyLength == 0)) {
+			/* decode security blob */
+		} else
+			rc = -EIO;
+
+		/* BB might be helpful to save off the domain of server here */
+
+		if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && 
+			(server->capabilities & CAP_EXTENDED_SECURITY)) {
+			count = pSMBr->ByteCount;
+			if (count < 16)
+				rc = -EIO;
+			else if (count == 16) {
+				server->secType = RawNTLMSSP;
+				if (server->socketUseCount.counter > 1) {
+					if (memcmp
+						(server->server_GUID,
+						pSMBr->u.extended_response.
+						GUID, 16) != 0) {
+						cFYI(1,
+							("UID of server does not match previous connection to same ip address"));
+						memcpy(server->
+							server_GUID,
+							pSMBr->u.
+							extended_response.
+							GUID, 16);
+					}
+				} else
+					memcpy(server->server_GUID,
+					       pSMBr->u.extended_response.
+					       GUID, 16);
+			} else {
+				rc = decode_negTokenInit(pSMBr->u.
+							 extended_response.
+							 SecurityBlob,
+							 count - 16,
+							 &server->secType);
+				if(rc == 1) {
+				/* BB Need to fill struct for sessetup here */
+					rc = -EOPNOTSUPP;
+				} else {
+					rc = -EINVAL;
+				}
+			}
+		} else
+			server->capabilities &= ~CAP_EXTENDED_SECURITY;
+		if(sign_CIFS_PDUs == FALSE) {        
+			if(server->secMode & SECMODE_SIGN_REQUIRED)
+				cERROR(1,
+				 ("Server requires /proc/fs/cifs/PacketSigningEnabled"));
+			server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+		} else if(sign_CIFS_PDUs == 1) {
+			if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+				server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+		}
+				
+	}
+	if (pSMB)
+		cifs_buf_release(pSMB);
+	return rc;
+}
+
+int
+CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response; /* BB removeme BB */
+	int rc = 0;
+	int length;
+
+	cFYI(1, ("In tree disconnect"));
+	/*
+	 *  If last user of the connection and
+	 *  connection alive - disconnect it
+	 *  If this is the last connection on the server session disconnect it
+	 *  (and inside session disconnect we should check if tcp socket needs 
+	 *  to be freed and kernel thread woken up).
+	 */
+	if (tcon)
+		down(&tcon->tconSem);
+	else
+		return -EIO;
+
+	atomic_dec(&tcon->useCount);
+	if (atomic_read(&tcon->useCount) > 0) {
+		up(&tcon->tconSem);
+		return -EBUSY;
+	}
+
+	/* No need to return error on this operation if tid invalidated and 
+	closed on server already e.g. due to tcp session crashing */
+	if(tcon->tidStatus == CifsNeedReconnect) {
+		up(&tcon->tconSem);
+		return 0;  
+	}
+
+	if((tcon->ses == NULL) || (tcon->ses->server == NULL)) {    
+		up(&tcon->tconSem);
+		return -EIO;
+	}
+	rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, (void **)&smb_buffer);
+	if (rc) {
+		up(&tcon->tconSem);
+		return rc;
+	} else {
+		smb_buffer_response = smb_buffer; /* BB removeme BB */
+    }
+	rc = SendReceive(xid, tcon->ses, smb_buffer, smb_buffer_response,
+			 &length, 0);
+	if (rc)
+		cFYI(1, (" Tree disconnect failed %d", rc));
+
+	if (smb_buffer)
+		cifs_small_buf_release(smb_buffer);
+	up(&tcon->tconSem);
+
+	/* No need to return error on this operation if tid invalidated and 
+	closed on server already e.g. due to tcp session crashing */
+	if (rc == -EAGAIN)
+		rc = 0;
+
+	return rc;
+}
+
+int
+CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
+{
+	struct smb_hdr *smb_buffer_response;
+	LOGOFF_ANDX_REQ *pSMB;
+	int rc = 0;
+	int length;
+
+	cFYI(1, ("In SMBLogoff for session disconnect"));
+	if (ses)
+		down(&ses->sesSem);
+	else
+		return -EIO;
+
+	atomic_dec(&ses->inUse);
+	if (atomic_read(&ses->inUse) > 0) {
+		up(&ses->sesSem);
+		return -EBUSY;
+	}
+	rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
+	if (rc) {
+		up(&ses->sesSem);
+		return rc;
+	}
+
+	smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */
+	
+	if(ses->server) {
+		if(ses->server->secMode & 
+		   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+			pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+	}
+
+	pSMB->hdr.Uid = ses->Suid;
+
+	pSMB->AndXCommand = 0xFF;
+	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+			 smb_buffer_response, &length, 0);
+	if (ses->server) {
+		atomic_dec(&ses->server->socketUseCount);
+		if (atomic_read(&ses->server->socketUseCount) == 0) {
+			spin_lock(&GlobalMid_Lock);
+			ses->server->tcpStatus = CifsExiting;
+			spin_unlock(&GlobalMid_Lock);
+			rc = -ESHUTDOWN;
+		}
+	}
+	if (pSMB)
+		cifs_small_buf_release(pSMB);
+	up(&ses->sesSem);
+
+	/* if session dead then we do not need to do ulogoff,
+		since server closed smb session, no sense reporting 
+		error */
+	if (rc == -EAGAIN)
+		rc = 0;
+	return rc;
+}
+
+int
+CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
+	       const char *fileName, const struct nls_table *nls_codepage)
+{
+	DELETE_FILE_REQ *pSMB = NULL;
+	DELETE_FILE_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+
+DelFileRetry:
+	rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->fileName, fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->fileName, fileName, name_len);
+	}
+	pSMB->SearchAttributes =
+	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
+	pSMB->BufferFormat = 0x04;
+	pSMB->hdr.smb_buf_length += name_len + 1;
+	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Error in RMFile = %d", rc));
+	} 
+#ifdef CONFIG_CIFS_STATS
+        else {
+		atomic_inc(&tcon->num_deletes);
+        }
+#endif
+
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto DelFileRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
+	     const char *dirName, const struct nls_table *nls_codepage)
+{
+	DELETE_DIRECTORY_REQ *pSMB = NULL;
+	DELETE_DIRECTORY_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+
+	cFYI(1, ("In CIFSSMBRmDir"));
+RmDirRetry:
+	rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len = cifs_strtoUCS((wchar_t *) pSMB->DirName, dirName, PATH_MAX
+				/* find define for this maxpathcomponent */
+				, nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(dirName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->DirName, dirName, name_len);
+	}
+
+	pSMB->BufferFormat = 0x04;
+	pSMB->hdr.smb_buf_length += name_len + 1;
+	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Error in RMDir = %d", rc));
+	}
+#ifdef CONFIG_CIFS_STATS
+        else {
+		atomic_inc(&tcon->num_rmdirs);
+        }
+#endif
+
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto RmDirRetry;
+	return rc;
+}
+
+int
+CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+	     const char *name, const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	CREATE_DIRECTORY_REQ *pSMB = NULL;
+	CREATE_DIRECTORY_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int name_len;
+
+	cFYI(1, ("In CIFSSMBMkDir"));
+MkDirRetry:
+	rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len = cifs_strtoUCS((wchar_t *) pSMB->DirName, name, PATH_MAX
+					 /* find define for this maxpathcomponent */
+					 , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(name, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->DirName, name, name_len);
+	}
+
+	pSMB->BufferFormat = 0x04;
+	pSMB->hdr.smb_buf_length += name_len + 1;
+	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Error in Mkdir = %d", rc));
+	}
+#ifdef CONFIG_CIFS_STATS
+        else {
+		atomic_inc(&tcon->num_mkdirs);
+        }
+#endif
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto MkDirRetry;
+	return rc;
+}
+
+int
+CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+	    const char *fileName, const int openDisposition,
+	    const int access_flags, const int create_options, __u16 * netfid,
+	    int *pOplock, FILE_ALL_INFO * pfile_info, 
+	    const struct nls_table *nls_codepage)
+{
+	int rc = -EACCES;
+	OPEN_REQ *pSMB = NULL;
+	OPEN_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int name_len;
+	__u16 count;
+
+openRetry:
+	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->AndXCommand = 0xFF;	/* none */
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		count = 1;	/* account for one byte pad to word boundary */
+		name_len =
+		    cifs_strtoUCS((wchar_t *) (pSMB->fileName + 1),
+				  fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+		pSMB->NameLength = cpu_to_le16(name_len);
+	} else {		/* BB improve the check for buffer overruns BB */
+		count = 0;	/* no pad */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		pSMB->NameLength = cpu_to_le16(name_len);
+		strncpy(pSMB->fileName, fileName, name_len);
+	}
+	if (*pOplock & REQ_OPLOCK)
+		pSMB->OpenFlags = cpu_to_le32(REQ_OPLOCK);
+	else if (*pOplock & REQ_BATCHOPLOCK) {
+		pSMB->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
+	}
+	pSMB->DesiredAccess = cpu_to_le32(access_flags);
+	pSMB->AllocationSize = 0;
+	pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL);
+	/* XP does not handle ATTR_POSIX_SEMANTICS */
+	/* but it helps speed up case sensitive checks for other
+	servers such as Samba */
+	if (tcon->ses->capabilities & CAP_UNIX)
+		pSMB->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
+
+	/* if ((omode & S_IWUGO) == 0)
+		pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);*/
+	/*  Above line causes problems due to vfs splitting create into two
+		pieces - need to set mode after file created not while it is
+		being created */
+	pSMB->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
+	pSMB->CreateDisposition = cpu_to_le32(openDisposition);
+	pSMB->CreateOptions = cpu_to_le32(create_options);
+	pSMB->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);	/* BB ??*/
+	pSMB->SecurityFlags =
+	    SECURITY_CONTEXT_TRACKING | SECURITY_EFFECTIVE_ONLY;
+
+	count += name_len;
+	pSMB->hdr.smb_buf_length += count;
+
+	pSMB->ByteCount = cpu_to_le16(count);
+	/* long_op set to 1 to allow for oplock break timeouts */
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 1);
+	if (rc) {
+		cFYI(1, ("Error in Open = %d", rc));
+	} else {
+		*pOplock = pSMBr->OplockLevel;	/* one byte no need to le_to_cpu */
+		*netfid = pSMBr->Fid;	/* cifs fid stays in le */
+		/* Let caller know file was created so we can set the mode. */
+		/* Do we care about the CreateAction in any other cases? */
+		if(cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+			*pOplock |= CIFS_CREATE_ACTION; 
+		if(pfile_info) {
+		    memcpy((char *)pfile_info,(char *)&pSMBr->CreationTime,
+			36 /* CreationTime to Attributes */);
+		    /* the file_info buf is endian converted by caller */
+		    pfile_info->AllocationSize = pSMBr->AllocationSize;
+		    pfile_info->EndOfFile = pSMBr->EndOfFile;
+		    pfile_info->NumberOfLinks = cpu_to_le32(1);
+		}
+
+#ifdef CONFIG_CIFS_STATS
+		atomic_inc(&tcon->num_opens);
+#endif
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto openRetry;
+	return rc;
+}
+
+/* If no buffer passed in, then caller wants to do the copy
+	as in the case of readpages so the SMB buffer must be
+	freed by the caller */
+
+int
+CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
+	    const int netfid, const unsigned int count,
+	    const __u64 lseek, unsigned int *nbytes, char **buf)
+{
+	int rc = -EACCES;
+	READ_REQ *pSMB = NULL;
+	READ_RSP *pSMBr = NULL;
+	char *pReadData = NULL;
+	int bytes_returned;
+
+	cFYI(1,("Reading %d bytes on fid %d",count,netfid));
+
+	*nbytes = 0;
+	rc = smb_init(SMB_COM_READ_ANDX, 12, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	/* tcon and ses pointer are checked in smb_init */
+	if (tcon->ses->server == NULL)
+		return -ECONNABORTED;
+
+	pSMB->AndXCommand = 0xFF;	/* none */
+	pSMB->Fid = netfid;
+	pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
+	pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
+	pSMB->Remaining = 0;
+	pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
+	pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
+	pSMB->ByteCount = 0;  /* no need to do le conversion since it is 0 */
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cERROR(1, ("Send error in read = %d", rc));
+	} else {
+		int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
+		data_length = data_length << 16;
+		data_length += le16_to_cpu(pSMBr->DataLength);
+		*nbytes = data_length;
+
+		/*check that DataLength would not go beyond end of SMB */
+		if ((data_length > CIFSMaxBufSize) 
+				|| (data_length > count)) {
+			cFYI(1,("bad length %d for count %d",data_length,count));
+			rc = -EIO;
+			*nbytes = 0;
+		} else {
+			pReadData =
+			    (char *) (&pSMBr->hdr.Protocol) +
+			    le16_to_cpu(pSMBr->DataOffset);
+/*			if(rc = copy_to_user(buf, pReadData, data_length)) {
+				cERROR(1,("Faulting on read rc = %d",rc));
+				rc = -EFAULT;
+			}*/ /* can not use copy_to_user when using page cache*/
+			if(*buf)
+			    memcpy(*buf,pReadData,data_length);
+		}
+	}
+	if(*buf)
+		cifs_buf_release(pSMB);
+	else
+		*buf = (char *)pSMB;
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+		since file handle passed in no longer valid */
+	return rc;
+}
+
+int
+CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
+	     const int netfid, const unsigned int count,
+	     const __u64 offset, unsigned int *nbytes, const char *buf,
+	     const char __user * ubuf, const int long_op)
+{
+	int rc = -EACCES;
+	WRITE_REQ *pSMB = NULL;
+	WRITE_RSP *pSMBr = NULL;
+	int bytes_returned;
+	__u32 bytes_sent;
+	__u16 byte_count;
+
+	/* cFYI(1,("write at %lld %d bytes",offset,count));*/
+	rc = smb_init(SMB_COM_WRITE_ANDX, 14, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+	/* tcon and ses pointer are checked in smb_init */
+	if (tcon->ses->server == NULL)
+		return -ECONNABORTED;
+
+	pSMB->AndXCommand = 0xFF;	/* none */
+	pSMB->Fid = netfid;
+	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
+	pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
+	pSMB->Reserved = 0xFFFFFFFF;
+	pSMB->WriteMode = 0;
+	pSMB->Remaining = 0;
+
+	/* Can increase buffer size if buffer is big enough in some cases - ie we 
+	can send more if LARGE_WRITE_X capability returned by the server and if
+	our buffer is big enough or if we convert to iovecs on socket writes
+	and eliminate the copy to the CIFS buffer */
+	if(tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
+		bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
+	} else {
+		bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
+			 & ~0xFF;
+	}
+
+	if (bytes_sent > count)
+		bytes_sent = count;
+	pSMB->DataOffset =
+	    cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
+	if(buf)
+	    memcpy(pSMB->Data,buf,bytes_sent);
+	else if(ubuf) {
+		if(copy_from_user(pSMB->Data,ubuf,bytes_sent)) {
+			cifs_buf_release(pSMB);
+			return -EFAULT;
+		}
+	} else {
+		/* No buffer */
+		cifs_buf_release(pSMB);
+		return -EINVAL;
+	}
+
+	byte_count = bytes_sent + 1 /* pad */ ; /* BB fix this for sends > 64K */
+	pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
+	pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
+	pSMB->hdr.smb_buf_length += bytes_sent+1;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, long_op);
+	if (rc) {
+		cFYI(1, ("Send error in write = %d", rc));
+		*nbytes = 0;
+	} else {
+		*nbytes = le16_to_cpu(pSMBr->CountHigh);
+		*nbytes = (*nbytes) << 16;
+		*nbytes += le16_to_cpu(pSMBr->Count);
+	}
+
+	cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
+	     const int netfid, const unsigned int count,
+	     const __u64 offset, unsigned int *nbytes, const char __user *buf,
+	     const int long_op)
+{
+	int rc = -EACCES;
+	WRITE_REQ *pSMB = NULL;
+	WRITE_RSP *pSMBr = NULL;
+	/*int bytes_returned;*/
+	unsigned bytes_sent;
+	__u16 byte_count;
+
+	rc = small_smb_init(SMB_COM_WRITE_ANDX, 14, tcon, (void **) &pSMB);
+    
+	if (rc)
+		return rc;
+	
+	pSMBr = (WRITE_RSP *)pSMB; /* BB removeme BB */
+
+	/* tcon and ses pointer are checked in smb_init */
+	if (tcon->ses->server == NULL)
+		return -ECONNABORTED;
+
+	pSMB->AndXCommand = 0xFF; /* none */
+	pSMB->Fid = netfid;
+	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
+	pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
+	pSMB->Reserved = 0xFFFFFFFF;
+	pSMB->WriteMode = 0;
+	pSMB->Remaining = 0;
+	bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & ~0xFF;
+	if (bytes_sent > count)
+		bytes_sent = count;
+	pSMB->DataLengthHigh = 0;
+	pSMB->DataOffset =
+	    cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
+
+	byte_count = bytes_sent + 1 /* pad */ ;
+	pSMB->DataLengthLow = cpu_to_le16(bytes_sent);
+	pSMB->DataLengthHigh = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+/*	rc = SendReceive2(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, buf, buflen, &bytes_returned, long_op); */  /* BB fixme BB */
+	if (rc) {
+		cFYI(1, ("Send error in write2 (large write) = %d", rc));
+		*nbytes = 0;
+	} else
+		*nbytes = le16_to_cpu(pSMBr->Count);
+
+	cifs_small_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+#endif /* CIFS_EXPERIMENTAL */
+
+int
+CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+	    const __u16 smb_file_id, const __u64 len,
+	    const __u64 offset, const __u32 numUnlock,
+	    const __u32 numLock, const __u8 lockType, const int waitFlag)
+{
+	int rc = 0;
+	LOCK_REQ *pSMB = NULL;
+	LOCK_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int timeout = 0;
+	__u16 count;
+
+	cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock));
+	rc = smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
+		timeout = -1; /* no response expected */
+		pSMB->Timeout = 0;
+	} else if (waitFlag == TRUE) {
+		timeout = 3;  /* blocking operation, no timeout */
+		pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
+	} else {
+		pSMB->Timeout = 0;
+	}
+
+	pSMB->NumberOfLocks = cpu_to_le16(numLock);
+	pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
+	pSMB->LockType = lockType;
+	pSMB->AndXCommand = 0xFF;	/* none */
+	pSMB->Fid = smb_file_id; /* netfid stays le */
+
+	if((numLock != 0) || (numUnlock != 0)) {
+		pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
+		/* BB where to store pid high? */
+		pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
+		pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
+		pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset);
+		pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32));
+		count = sizeof(LOCKING_ANDX_RANGE);
+	} else {
+		/* oplock break */
+		count = 0;
+	}
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
+
+	if (rc) {
+		cFYI(1, ("Send error in Lock = %d", rc));
+	}
+	cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	since file handle passed in no longer valid */
+	return rc;
+}
+
+int
+CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+{
+	int rc = 0;
+	CLOSE_REQ *pSMB = NULL;
+	CLOSE_RSP *pSMBr = NULL;
+	int bytes_returned;
+	cFYI(1, ("In CIFSSMBClose"));
+
+/* do not retry on dead session on close */
+	rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
+	if(rc == -EAGAIN)
+		return 0;
+	if (rc)
+		return rc;
+
+	pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
+
+	pSMB->FileID = (__u16) smb_file_id;
+	pSMB->LastWriteTime = 0;
+	pSMB->ByteCount = 0;
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		if(rc!=-EINTR) {
+			/* EINTR is expected when user ctl-c to kill app */
+			cERROR(1, ("Send error in Close = %d", rc));
+		}
+	}
+
+	cifs_small_buf_release(pSMB);
+
+	/* Since session is dead, file will be closed on server already */
+	if(rc == -EAGAIN)
+		rc = 0;
+
+	return rc;
+}
+
+int
+CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+	      const char *fromName, const char *toName,
+	      const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	RENAME_REQ *pSMB = NULL;
+	RENAME_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int name_len, name_len2;
+	__u16 count;
+
+	cFYI(1, ("In CIFSSMBRename"));
+renameRetry:
+	rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->BufferFormat = 0x04;
+	pSMB->SearchAttributes =
+	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+			ATTR_DIRECTORY);
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->OldFileName, fromName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+		pSMB->OldFileName[name_len] = 0x04;	/* pad */
+	/* protocol requires ASCII signature byte on Unicode string */
+		pSMB->OldFileName[name_len + 1] = 0x00;
+		name_len2 =
+		    cifs_strtoUCS((wchar_t *) & pSMB->
+				  OldFileName[name_len + 2], toName, PATH_MAX,
+				  nls_codepage);
+		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+		name_len2 *= 2;	/* convert to bytes */
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fromName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->OldFileName, fromName, name_len);
+		name_len2 = strnlen(toName, PATH_MAX);
+		name_len2++;	/* trailing null */
+		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
+		strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
+		name_len2++;	/* trailing null */
+		name_len2++;	/* signature byte */
+	}
+
+	count = 1 /* 1st signature byte */  + name_len + name_len2;
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in rename = %d", rc));
+	} 
+
+#ifdef CONFIG_CIFS_STATS
+	  else {
+		atomic_inc(&tcon->num_renames);
+	}
+#endif
+
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto renameRetry;
+
+	return rc;
+}
+
+int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon, 
+		int netfid, char * target_name, const struct nls_table * nls_codepage) 
+{
+	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+	struct set_file_rename * rename_info;
+	char *data_offset;
+	char dummy_string[30];
+	int rc = 0;
+	int bytes_returned = 0;
+	int len_of_str;
+	__u16 params, param_offset, offset, count, byte_count;
+
+	cFYI(1, ("Rename to File by handle"));
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
+			(void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 6;
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+	rename_info = (struct set_file_rename *) data_offset;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+	byte_count = 3 /* pad */  + params;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	/* construct random name ".cifs_tmp<inodenum><mid>" */
+	rename_info->overwrite = cpu_to_le32(1);
+	rename_info->root_fid  = 0;
+	/* unicode only call */
+	if(target_name == NULL) {
+		sprintf(dummy_string,"cifs%x",pSMB->hdr.Mid);
+	        len_of_str = cifs_strtoUCS((wchar_t *) rename_info->target_name, dummy_string, 24, nls_codepage);
+	} else {
+		len_of_str = cifs_strtoUCS((wchar_t *) rename_info->target_name, target_name, PATH_MAX, nls_codepage);
+	}
+	rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
+	count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str) + 2;
+	byte_count += count;
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->Fid = netfid;
+	pSMB->InformationLevel =
+		cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
+                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1,("Send error in Rename (by file handle) = %d", rc));
+	}
+#ifdef CONFIG_CIFS_STATS
+	  else {
+		atomic_inc(&pTcon->num_t2renames);
+	}
+#endif
+	cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+
+int
+CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char * fromName, 
+            const __u16 target_tid, const char *toName, const int flags,
+            const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	COPY_REQ *pSMB = NULL;
+	COPY_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int name_len, name_len2;
+	__u16 count;
+
+	cFYI(1, ("In CIFSSMBCopy"));
+copyRetry:
+	rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
+			(void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->BufferFormat = 0x04;
+	pSMB->Tid2 = target_tid;
+
+	pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len = cifs_strtoUCS((wchar_t *) pSMB->OldFileName, 
+				fromName, 
+				PATH_MAX /* find define for this maxpathcomponent */,
+				nls_codepage);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+		pSMB->OldFileName[name_len] = 0x04;     /* pad */
+		/* protocol requires ASCII signature byte on Unicode string */
+		pSMB->OldFileName[name_len + 1] = 0x00;
+		name_len2 = cifs_strtoUCS((wchar_t *) & pSMB->
+				OldFileName[name_len + 2], toName, PATH_MAX,
+				nls_codepage);
+		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+		name_len2 *= 2; /* convert to bytes */
+	} else {                /* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fromName, PATH_MAX);
+		name_len++;     /* trailing null */
+		strncpy(pSMB->OldFileName, fromName, name_len);
+		name_len2 = strnlen(toName, PATH_MAX);
+		name_len2++;    /* trailing null */
+		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
+		strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
+		name_len2++;    /* trailing null */
+		name_len2++;    /* signature byte */
+	}
+
+	count = 1 /* 1st signature byte */  + name_len + name_len2;
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in copy = %d with %d files copied",
+			rc, le16_to_cpu(pSMBr->CopyCount)));
+	}
+	if (pSMB)
+		cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto copyRetry;
+
+	return rc;
+}
+
+int
+CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
+		      const char *fromName, const char *toName,
+		      const struct nls_table *nls_codepage)
+{
+	TRANSACTION2_SPI_REQ *pSMB = NULL;
+	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+	char *data_offset;
+	int name_len;
+	int name_len_target;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count;
+
+	cFYI(1, ("In Symlink Unix style"));
+createSymLinkRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, fromName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fromName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fromName, name_len);
+	}
+	params = 6 + name_len;
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len_target =
+		    cifs_strtoUCS((wchar_t *) data_offset, toName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len_target++;	/* trailing null */
+		name_len_target *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len_target = strnlen(toName, PATH_MAX);
+		name_len_target++;	/* trailing null */
+		strncpy(data_offset, toName, name_len_target);
+	}
+
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	/* BB find exact max on data count below from sess */
+	pSMB->MaxDataCount = cpu_to_le16(1000);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + name_len_target;
+	pSMB->DataCount = cpu_to_le16(name_len_target);
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1,
+		     ("Send error in SetPathInfo (create symlink) = %d",
+		      rc));
+	}
+
+	if (pSMB)
+		cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto createSymLinkRetry;
+
+	return rc;
+}
+
+int
+CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+		       const char *fromName, const char *toName,
+		       const struct nls_table *nls_codepage)
+{
+	TRANSACTION2_SPI_REQ *pSMB = NULL;
+	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+	char *data_offset;
+	int name_len;
+	int name_len_target;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count;
+
+	cFYI(1, ("In Create Hard link Unix style"));
+createHardLinkRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len = cifs_strtoUCS((wchar_t *) pSMB->FileName, toName, PATH_MAX
+					 /* find define for this maxpathcomponent */
+					 , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(toName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, toName, name_len);
+	}
+	params = 6 + name_len;
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len_target =
+		    cifs_strtoUCS((wchar_t *) data_offset, fromName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len_target++;	/* trailing null */
+		name_len_target *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len_target = strnlen(fromName, PATH_MAX);
+		name_len_target++;	/* trailing null */
+		strncpy(data_offset, fromName, name_len_target);
+	}
+
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	/* BB find exact max on data count below from sess*/
+	pSMB->MaxDataCount = cpu_to_le16(1000);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + name_len_target;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->DataCount = cpu_to_le16(name_len_target);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc));
+	}
+
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto createHardLinkRetry;
+
+	return rc;
+}
+
+int
+CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+		   const char *fromName, const char *toName,
+		   const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	NT_RENAME_REQ *pSMB = NULL;
+	RENAME_RSP *pSMBr = NULL;
+	int bytes_returned;
+	int name_len, name_len2;
+	__u16 count;
+
+	cFYI(1, ("In CIFSCreateHardLink"));
+winCreateHardLinkRetry:
+
+	rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->SearchAttributes =
+	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+			ATTR_DIRECTORY);
+	pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK);
+	pSMB->ClusterCount = 0;
+
+	pSMB->BufferFormat = 0x04;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->OldFileName, fromName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+		pSMB->OldFileName[name_len] = 0;	/* pad */
+		pSMB->OldFileName[name_len + 1] = 0x04; 
+		name_len2 =
+		    cifs_strtoUCS((wchar_t *) & pSMB->
+				  OldFileName[name_len + 2], toName, PATH_MAX,
+				  nls_codepage);
+		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+		name_len2 *= 2;	/* convert to bytes */
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fromName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->OldFileName, fromName, name_len);
+		name_len2 = strnlen(toName, PATH_MAX);
+		name_len2++;	/* trailing null */
+		pSMB->OldFileName[name_len] = 0x04;	/* 2nd buffer format */
+		strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
+		name_len2++;	/* trailing null */
+		name_len2++;	/* signature byte */
+	}
+
+	count = 1 /* string type byte */  + name_len + name_len2;
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in hard link (NT rename) = %d", rc));
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto winCreateHardLinkRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			char *symlinkinfo, const int buflen,
+			const struct nls_table *nls_codepage)
+{
+/* SMB_QUERY_FILE_UNIX_LINK */
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QPathSymLinkInfo (Unix) for path %s", searchName));
+
+querySymLinkRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	/* BB find exact max data count below from sess structure BB */
+	pSMB->MaxDataCount = cpu_to_le16(4000);
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QuerySymLinkInfo = %d", rc));
+	} else {
+		/* decode response */
+
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		if (rc || (pSMBr->ByteCount < 2))
+		/* BB also check enough total bytes returned */
+			rc = -EIO;	/* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+
+			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
+				name_len = UniStrnlen((wchar_t *) ((char *)
+					&pSMBr->hdr.Protocol +data_offset),
+					min_t(const int, buflen,count) / 2);
+				cifs_strfromUCS_le(symlinkinfo,
+					(wchar_t *) ((char *)&pSMBr->hdr.Protocol +
+						data_offset),
+					name_len, nls_codepage);
+			} else {
+				strncpy(symlinkinfo,
+					(char *) &pSMBr->hdr.Protocol + 
+						data_offset,
+					min_t(const int, buflen, count));
+			}
+			symlinkinfo[buflen] = 0;
+	/* just in case so calling code does not go off the end of buffer */
+		}
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto querySymLinkRetry;
+	return rc;
+}
+
+int
+CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
+			const unsigned char *searchName,
+			char *symlinkinfo, const int buflen,__u16 fid,
+			const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	struct smb_com_transaction_ioctl_req * pSMB;
+	struct smb_com_transaction_ioctl_rsp * pSMBr;
+
+	cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName));
+	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->TotalParameterCount = 0 ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le32(2);
+	/* BB find exact data count max from sess structure BB */
+	pSMB->MaxDataCount = cpu_to_le32(4000);
+	pSMB->MaxSetupCount = 4;
+	pSMB->Reserved = 0;
+	pSMB->ParameterOffset = 0;
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 4;
+	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
+	pSMB->IsFsctl = 1; /* FSCTL */
+	pSMB->IsRootFlag = 0;
+	pSMB->Fid = fid; /* file handle always le */
+	pSMB->ByteCount = 0;
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QueryReparseLinkInfo = %d", rc));
+	} else {		/* decode response */
+		__u32 data_offset = le32_to_cpu(pSMBr->DataOffset);
+		__u32 data_count = le32_to_cpu(pSMBr->DataCount);
+		if ((pSMBr->ByteCount < 2) || (data_offset > 512))
+		/* BB also check enough total bytes returned */
+			rc = -EIO;	/* bad smb */
+		else {
+			if(data_count && (data_count < 2048)) {
+				char * end_of_smb = pSMBr->ByteCount + (char *)&pSMBr->ByteCount;
+
+				struct reparse_data * reparse_buf = (struct reparse_data *)
+					((char *)&pSMBr->hdr.Protocol + data_offset);
+				if((char*)reparse_buf >= end_of_smb) {
+					rc = -EIO;
+					goto qreparse_out;
+				}
+				if((reparse_buf->LinkNamesBuf + 
+					reparse_buf->TargetNameOffset +
+					reparse_buf->TargetNameLen) >
+						end_of_smb) {
+					cFYI(1,("reparse buf extended beyond SMB"));
+					rc = -EIO;
+					goto qreparse_out;
+				}
+				
+				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
+					name_len = UniStrnlen((wchar_t *)
+							(reparse_buf->LinkNamesBuf + 
+							reparse_buf->TargetNameOffset),
+							min(buflen/2, reparse_buf->TargetNameLen / 2)); 
+					cifs_strfromUCS_le(symlinkinfo,
+						(wchar_t *) (reparse_buf->LinkNamesBuf + 
+						reparse_buf->TargetNameOffset),
+						name_len, nls_codepage);
+				} else { /* ASCII names */
+					strncpy(symlinkinfo,reparse_buf->LinkNamesBuf + 
+						reparse_buf->TargetNameOffset, 
+						min_t(const int, buflen, reparse_buf->TargetNameLen));
+				}
+			} else {
+				rc = -EIO;
+				cFYI(1,("Invalid return data count on get reparse info ioctl"));
+			}
+			symlinkinfo[buflen] = 0; /* just in case so the caller
+					does not go off the end of the buffer */
+			cFYI(1,("readlink result - %s ",symlinkinfo));
+		}
+	}
+qreparse_out:
+	if (pSMB)
+		cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+
+#ifdef CONFIG_CIFS_POSIX
+
+/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
+static void cifs_convert_ace(posix_acl_xattr_entry * ace, struct cifs_posix_ace * cifs_ace)
+{
+	/* u8 cifs fields do not need le conversion */
+	ace->e_perm = (__u16)cifs_ace->cifs_e_perm; 
+	ace->e_tag  = (__u16)cifs_ace->cifs_e_tag;
+	ace->e_id   = (__u32)le64_to_cpu(cifs_ace->cifs_uid);
+	/* cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id)); */
+
+	return;
+}
+
+/* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
+static int cifs_copy_posix_acl(char * trgt,char * src, const int buflen,const int acl_type,const int size_of_data_area)
+{
+	int size =  0;
+	int i;
+	__u16 count;
+	struct cifs_posix_ace * pACE;
+	struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)src;
+	posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)trgt;
+
+	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
+		return -EOPNOTSUPP;
+
+	if(acl_type & ACL_TYPE_ACCESS) {
+		count = le16_to_cpu(cifs_acl->access_entry_count);
+		pACE = &cifs_acl->ace_array[0];
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if(size_of_data_area < size) {
+			cFYI(1,("bad CIFS POSIX ACL size %d vs. %d",size_of_data_area,size));
+			return -EINVAL;
+		}
+	} else if(acl_type & ACL_TYPE_DEFAULT) {
+		count = le16_to_cpu(cifs_acl->access_entry_count);
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+/* skip past access ACEs to get to default ACEs */
+		pACE = &cifs_acl->ace_array[count];
+		count = le16_to_cpu(cifs_acl->default_entry_count);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if(size_of_data_area < size)
+			return -EINVAL;
+	} else {
+		/* illegal type */
+		return -EINVAL;
+	}
+
+	size = posix_acl_xattr_size(count);
+	if((buflen == 0) || (local_acl == NULL)) {
+		/* used to query ACL EA size */				
+	} else if(size > buflen) {
+		return -ERANGE;
+	} else /* buffer big enough */ {
+		local_acl->a_version = POSIX_ACL_XATTR_VERSION;
+		for(i = 0;i < count ;i++) {
+			cifs_convert_ace(&local_acl->a_entries[i],pACE);
+			pACE ++;
+		}
+	}
+	return size;
+}
+
+static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace * cifs_ace,
+			const posix_acl_xattr_entry * local_ace)
+{
+	__u16 rc = 0; /* 0 = ACL converted ok */
+
+	cifs_ace->cifs_e_perm = (__u8)cpu_to_le16(local_ace->e_perm);
+	cifs_ace->cifs_e_tag =  (__u8)cpu_to_le16(local_ace->e_tag);
+	/* BB is there a better way to handle the large uid? */
+	if(local_ace->e_id == -1) {
+	/* Probably no need to le convert -1 on any arch but can not hurt */
+		cifs_ace->cifs_uid = cpu_to_le64(-1);
+	} else 
+		cifs_ace->cifs_uid = (__u64)cpu_to_le32(local_ace->e_id);
+        /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
+	return rc;
+}
+
+/* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
+static __u16 ACL_to_cifs_posix(char * parm_data,const char * pACL,const int buflen,
+		const int acl_type)
+{
+	__u16 rc = 0;
+        struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)parm_data;
+        posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)pACL;
+	int count;
+	int i;
+
+	if((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
+		return 0;
+
+	count = posix_acl_xattr_count((size_t)buflen);
+	cFYI(1,("setting acl with %d entries from buf of length %d and version of %d",
+		count,buflen,local_acl->a_version));
+	if(local_acl->a_version != 2) {
+		cFYI(1,("unknown POSIX ACL version %d",local_acl->a_version));
+		return 0;
+	}
+	cifs_acl->version = cpu_to_le16(1);
+	if(acl_type == ACL_TYPE_ACCESS) 
+		cifs_acl->access_entry_count = count;
+	else if(acl_type == ACL_TYPE_DEFAULT)
+		cifs_acl->default_entry_count = count;
+	else {
+		cFYI(1,("unknown ACL type %d",acl_type));
+		return 0;
+	}
+	for(i=0;i<count;i++) {
+		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
+					&local_acl->a_entries[i]);
+		if(rc != 0) {
+			/* ACE not converted */
+			break;
+		}
+	}
+	if(rc == 0) {
+		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
+		rc += sizeof(struct cifs_posix_acl);
+		/* BB add check to make sure ACL does not overflow SMB */
+	}
+	return rc;
+}
+
+int
+CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+                        const unsigned char *searchName,
+                        char *acl_inf, const int buflen, const int acl_type,
+                        const struct nls_table *nls_codepage)
+{
+/* SMB_QUERY_POSIX_ACL */
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	__u16 params, byte_count;
+                                                                                                                                             
+	cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName));
+
+queryAclRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		(void **) &pSMBr);
+	if (rc)
+		return rc;
+                                                                                                                                             
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+			cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				, nls_codepage);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+		pSMB->FileName[name_len] = 0;
+		pSMB->FileName[name_len+1] = 0;
+	} else {                /* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;     /* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+        /* BB find exact max data count below from sess structure BB */
+	pSMB->MaxDataCount = cpu_to_le16(4000);
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(
+		offsetof(struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
+	} else {
+		/* decode response */
+ 
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		if (rc || (pSMBr->ByteCount < 2))
+		/* BB also check enough total bytes returned */
+			rc = -EIO;      /* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+			rc = cifs_copy_posix_acl(acl_inf,
+				(char *)&pSMBr->hdr.Protocol+data_offset,
+				buflen,acl_type,count);
+		}
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto queryAclRetry;
+	return rc;
+}
+
+int
+CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+                        const unsigned char *fileName,
+                        const char *local_acl, const int buflen, const int acl_type,
+                        const struct nls_table *nls_codepage)
+{
+	struct smb_com_transaction2_spi_req *pSMB = NULL;
+	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+	char *parm_data;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count, data_count, param_offset, offset;
+
+	cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName));
+setAclRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+                      (void **) &pSMBr);
+	if (rc)
+		return rc;
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+			cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, PATH_MAX
+				, nls_codepage);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+	} else {                /* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;     /* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+	params = 6 + name_len;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+	parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+
+	/* convert to on the wire format for POSIX ACL */
+	data_count = ACL_to_cifs_posix(parm_data,local_acl,buflen,acl_type);
+
+	if(data_count == 0) {
+		rc = -EOPNOTSUPP;
+		goto setACLerrorExit;
+	}
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL);
+	byte_count = 3 /* pad */  + params + data_count;
+	pSMB->DataCount = cpu_to_le16(data_count);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Set POSIX ACL returned %d", rc));
+	}
+
+setACLerrorExit:
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto setAclRetry;
+	return rc;
+}
+
+#endif
+
+int
+CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+		 const unsigned char *searchName,
+		 FILE_ALL_INFO * pFindData,
+		 const struct nls_table *nls_codepage)
+{
+/* level 263 SMB_QUERY_FILE_ALL_INFO */
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	__u16 params, byte_count;
+
+/* cFYI(1, ("In QPathInfo path %s", searchName)); */
+QPathInfoRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QPathInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 40)) 
+			rc = -EIO;	/* bad smb */
+		else if (pFindData){
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			memcpy((char *) pFindData,
+			       (char *) &pSMBr->hdr.Protocol +
+			       data_offset, sizeof (FILE_ALL_INFO));
+		} else
+		    rc = -ENOMEM;
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto QPathInfoRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
+		     const unsigned char *searchName,
+		     FILE_UNIX_BASIC_INFO * pFindData,
+		     const struct nls_table *nls_codepage)
+{
+/* SMB_QUERY_FILE_UNIX_BASIC */
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned = 0;
+	int name_len;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QPathInfo (Unix) the path %s", searchName));
+UnixQPathInfoRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxDataCount = cpu_to_le16(4000); 
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QPathInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
+			rc = -EIO;	/* bad smb */
+		} else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			memcpy((char *) pFindData,
+			       (char *) &pSMBr->hdr.Protocol +
+			       data_offset,
+			       sizeof (FILE_UNIX_BASIC_INFO));
+		}
+	}
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto UnixQPathInfoRetry;
+
+	return rc;
+}
+
+#if 0  /* function unused at present */
+int CIFSFindSingle(const int xid, struct cifsTconInfo *tcon,
+	       const char *searchName, FILE_ALL_INFO * findData,
+	       const struct nls_table *nls_codepage)
+{
+/* level 257 SMB_ */
+	TRANSACTION2_FFIRST_REQ *pSMB = NULL;
+	TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In FindUnique"));
+findUniqueRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 12 + name_len /* includes null */ ;
+	pSMB->TotalDataCount = 0;	/* no EAs */
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(
+         offsetof(struct smb_com_transaction2_ffirst_req,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;	/* one byte, no need to le convert */
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->SearchAttributes =
+	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+			ATTR_DIRECTORY);
+	pSMB->SearchCount = cpu_to_le16(16);	/* BB increase */
+	pSMB->SearchFlags = cpu_to_le16(1);
+	pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
+	pSMB->SearchStorageType = 0;	/* BB what should we set this to? BB */
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+
+	if (rc) {
+		cFYI(1, ("Send error in FindFileDirInfo = %d", rc));
+	} else {		/* decode response */
+
+		/* BB fill in */
+	}
+
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto findUniqueRetry;
+
+	return rc;
+}
+#endif /* end unused (temporarily) function */
+
+/* xid, tcon, searchName and codepage are input parms, rest are returned */
+int
+CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+	      const char *searchName, 
+	      const struct nls_table *nls_codepage,
+	      __u16 *	pnetfid,
+	      struct cifs_search_info * psrch_inf)
+{
+/* level 257 SMB_ */
+	TRANSACTION2_FFIRST_REQ *pSMB = NULL;
+	TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
+	T2_FFIRST_RSP_PARMS * parms;
+	int rc = 0;
+	int bytes_returned = 0;
+	int name_len;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In FindFirst"));
+
+findFirstRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName,searchName,
+				 PATH_MAX, nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+		pSMB->FileName[name_len] = 0; /* null terminate just in case */
+		pSMB->FileName[name_len+1] = 0;
+	} else {	/* BB add check for overrun of SMB buf BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+/* BB fix here and in unicode clause above ie
+		if(name_len > buffersize-header)
+			free buffer exit; BB */
+		strncpy(pSMB->FileName, searchName, name_len);
+		pSMB->FileName[name_len] = 0; /* just in case */
+	}
+
+	params = 12 + name_len /* includes null */ ;
+	pSMB->TotalDataCount = 0;	/* no EAs */
+	pSMB->MaxParameterCount = cpu_to_le16(10);
+	pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf -
+					  MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(
+	  offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;	/* one byte, no need to make endian neutral */
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
+	pSMB->SearchAttributes =
+	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+			ATTR_DIRECTORY);
+	pSMB->SearchCount= cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+	pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | 
+		CIFS_SEARCH_RETURN_RESUME);
+	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+
+	/* BB what should we set StorageType to? Does it matter? BB */
+	pSMB->SearchStorageType = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+
+	if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */
+		/* BB Add code to handle unsupported level rc */
+		cFYI(1, ("Error in FindFirst = %d", rc));
+
+		if (pSMB)
+			cifs_buf_release(pSMB);
+
+		/* BB eventually could optimize out free and realloc of buf */
+		/*    for this case */
+		if (rc == -EAGAIN)
+			goto findFirstRetry;
+	} else { /* decode response */
+		/* BB remember to free buffer if error BB */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		if(rc == 0) {
+			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+				psrch_inf->unicode = TRUE;
+			else
+				psrch_inf->unicode = FALSE;
+
+			psrch_inf->ntwrk_buf_start = (char *)pSMBr;
+			psrch_inf->srch_entries_start = 
+				(char *) &pSMBr->hdr.Protocol + 
+					le16_to_cpu(pSMBr->t2.DataOffset);
+
+			parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
+			       le16_to_cpu(pSMBr->t2.ParameterOffset));
+
+			if(parms->EndofSearch)
+				psrch_inf->endOfSearch = TRUE;
+			else
+				psrch_inf->endOfSearch = FALSE;
+
+			psrch_inf->entries_in_buffer  = le16_to_cpu(parms->SearchCount);
+			psrch_inf->index_of_last_entry = 
+				psrch_inf->entries_in_buffer;
+/*cFYI(1,("entries in buf %d index_of_last %d",psrch_inf->entries_in_buffer,psrch_inf->index_of_last_entry));  */
+			*pnetfid = parms->SearchHandle;
+		} else {
+			cifs_buf_release(pSMB);
+		}
+	}
+
+	return rc;
+}
+
+int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+            __u16 searchHandle, struct cifs_search_info * psrch_inf)
+{
+	TRANSACTION2_FNEXT_REQ *pSMB = NULL;
+	TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
+	T2_FNEXT_RSP_PARMS * parms;
+	char *response_data;
+	int rc = 0;
+	int bytes_returned, name_len;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In FindNext"));
+
+	if(psrch_inf->endOfSearch == TRUE)
+		return -ENOENT;
+
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		(void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 14;    /* includes 2 bytes of null string, converted to LE below */
+	byte_count = 0;
+	pSMB->TotalDataCount = 0;       /* no EAs */
+	pSMB->MaxParameterCount = cpu_to_le16(8);
+	pSMB->MaxDataCount =
+            cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset =  cpu_to_le16(
+	      offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT);
+	pSMB->SearchHandle = searchHandle;      /* always kept as le */
+	pSMB->SearchCount =
+		cpu_to_le16(CIFSMaxBufSize / sizeof (FILE_UNIX_INFO));
+	/* test for Unix extensions */
+/*	if (tcon->ses->capabilities & CAP_UNIX) {
+		pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_UNIX);
+		psrch_inf->info_level = SMB_FIND_FILE_UNIX;
+	} else {
+		pSMB->InformationLevel =
+		   cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
+		psrch_inf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+	} */
+	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+	pSMB->ResumeKey = psrch_inf->resume_key;
+	pSMB->SearchFlags =
+	      cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
+
+	name_len = psrch_inf->resume_name_len;
+	params += name_len;
+	if(name_len < PATH_MAX) {
+		memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
+		byte_count += name_len;
+	} else {
+		rc = -EINVAL;
+		goto FNext2_err_exit;
+	}
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+                                                                                              
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+                                                                                              
+	if (rc) {
+		if (rc == -EBADF) {
+			psrch_inf->endOfSearch = TRUE;
+			rc = 0; /* search probably was closed at end of search above */
+		} else
+			cFYI(1, ("FindNext returned = %d", rc));
+	} else {                /* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		
+		if(rc == 0) {
+			/* BB fixme add lock for file (srch_info) struct here */
+			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+				psrch_inf->unicode = TRUE;
+			else
+				psrch_inf->unicode = FALSE;
+			response_data = (char *) &pSMBr->hdr.Protocol +
+			       le16_to_cpu(pSMBr->t2.ParameterOffset);
+			parms = (T2_FNEXT_RSP_PARMS *)response_data;
+			response_data = (char *)&pSMBr->hdr.Protocol +
+				le16_to_cpu(pSMBr->t2.DataOffset);
+			cifs_buf_release(psrch_inf->ntwrk_buf_start);
+			psrch_inf->srch_entries_start = response_data;
+			psrch_inf->ntwrk_buf_start = (char *)pSMB;
+			if(parms->EndofSearch)
+				psrch_inf->endOfSearch = TRUE;
+			else
+				psrch_inf->endOfSearch = FALSE;
+                                                                                              
+			psrch_inf->entries_in_buffer  = le16_to_cpu(parms->SearchCount);
+			psrch_inf->index_of_last_entry +=
+				psrch_inf->entries_in_buffer;
+/*  cFYI(1,("fnxt2 entries in buf %d index_of_last %d",psrch_inf->entries_in_buffer,psrch_inf->index_of_last_entry)); */
+
+			/* BB fixme add unlock here */
+		}
+
+	}
+
+	/* BB On error, should we leave previous search buf (and count and
+	last entry fields) intact or free the previous one? */
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
+	since file handle passed in no longer valid */
+FNext2_err_exit:
+	if (rc != 0)
+		cifs_buf_release(pSMB);
+                                                                                              
+	return rc;
+}
+
+int
+CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle)
+{
+	int rc = 0;
+	FINDCLOSE_REQ *pSMB = NULL;
+	CLOSE_RSP *pSMBr = NULL; /* BB removeme BB */
+	int bytes_returned;
+
+	cFYI(1, ("In CIFSSMBFindClose"));
+	rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
+
+	/* no sense returning error if session restarted
+		as file handle has been closed */
+	if(rc == -EAGAIN)
+		return 0;
+	if (rc)
+		return rc;
+
+	pSMBr = (CLOSE_RSP *)pSMB;  /* BB removeme BB */
+	pSMB->FileID = searchHandle;
+	pSMB->ByteCount = 0;
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cERROR(1, ("Send error in FindClose = %d", rc));
+	}
+	cifs_small_buf_release(pSMB);
+
+	/* Since session is dead, search handle closed on server already */
+	if (rc == -EAGAIN)
+		rc = 0;
+
+	return rc;
+}
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+int
+CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+                const unsigned char *searchName,
+                __u64 * inode_number,
+                const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int name_len, bytes_returned;
+	__u16 params, byte_count;
+
+	cFYI(1,("In GetSrvInodeNum for %s",searchName));
+	if(tcon == NULL)
+		return -ENODEV; 
+
+GetInodeNumberRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+                      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+			cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, 
+				PATH_MAX,nls_codepage);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+	} else {                /* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;     /* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	/* BB find exact max data count below from sess structure BB */
+	pSMB->MaxDataCount = cpu_to_le16(4000);
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+		struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("error %d in QueryInternalInfo", rc));
+	} else {
+		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		if (rc || (pSMBr->ByteCount < 2))
+		/* BB also check enough total bytes returned */
+			/* If rc should we check for EOPNOSUPP and
+			disable the srvino flag? or in caller? */
+			rc = -EIO;      /* bad smb */
+                else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+			struct file_internal_info * pfinfo;
+			/* BB Do we need a cast or hash here ? */
+			if(count < 8) {
+				cFYI(1, ("Illegal size ret in QryIntrnlInf"));
+				rc = -EIO;
+				goto GetInodeNumOut;
+			}
+			pfinfo = (struct file_internal_info *)
+				(data_offset + (char *) &pSMBr->hdr.Protocol);
+			*inode_number = pfinfo->UniqueId;
+		}
+	}
+GetInodeNumOut:
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto GetInodeNumberRetry;
+	return rc;
+}
+#endif /* CIFS_EXPERIMENTAL */
+
+int
+CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+		const unsigned char *searchName,
+		unsigned char **targetUNCs,
+		unsigned int *number_of_UNC_in_array,
+		const struct nls_table *nls_codepage)
+{
+/* TRANS2_GET_DFS_REFERRAL */
+	TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
+	TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
+	struct dfs_referral_level_3 * referrals = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	unsigned int i;
+	char * temp;
+	__u16 params, byte_count;
+	*number_of_UNC_in_array = 0;
+	*targetUNCs = NULL;
+
+	cFYI(1, ("In GetDFSRefer the path %s", searchName));
+	if (ses == NULL)
+		return -ENODEV;
+getDFSRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->hdr.Tid = ses->ipc_tid;
+	pSMB->hdr.Uid = ses->Suid;
+	if (ses->capabilities & CAP_STATUS32) {
+		pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		pSMB->hdr.Flags2 |= SMBFLG2_DFS;
+	}
+
+	if (ses->capabilities & CAP_UNICODE) {
+		pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->RequestFileName,
+				  searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->RequestFileName, searchName, name_len);
+	}
+
+	params = 2 /* level */  + name_len /*includes null */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->MaxParameterCount = 0;
+	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
+	byte_count = params + 3 /* pad */ ;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->MaxReferralLevel = cpu_to_le16(3);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in GetDFSRefer = %d", rc));
+	} else {		/* decode response */
+/* BB Add logic to parse referrals here */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 17))      /* BB also check enough total bytes returned */
+			rc = -EIO;      /* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); 
+			__u16 data_count = le16_to_cpu(pSMBr->t2.DataCount);
+
+			cFYI(1,
+			     ("Decoding GetDFSRefer response.  BCC: %d  Offset %d",
+			      pSMBr->ByteCount, data_offset));
+			referrals = 
+			    (struct dfs_referral_level_3 *) 
+					(8 /* sizeof start of data block */ +
+					data_offset +
+					(char *) &pSMBr->hdr.Protocol); 
+			cFYI(1,("num_referrals: %d dfs flags: 0x%x ... \nfor referral one refer size: 0x%x srv type: 0x%x refer flags: 0x%x ttl: 0x%x",
+				le16_to_cpu(pSMBr->NumberOfReferrals),le16_to_cpu(pSMBr->DFSFlags), le16_to_cpu(referrals->ReferralSize),le16_to_cpu(referrals->ServerType),le16_to_cpu(referrals->ReferralFlags),le16_to_cpu(referrals->TimeToLive)));
+			/* BB This field is actually two bytes in from start of
+			   data block so we could do safety check that DataBlock
+			   begins at address of pSMBr->NumberOfReferrals */
+			*number_of_UNC_in_array = le16_to_cpu(pSMBr->NumberOfReferrals);
+
+			/* BB Fix below so can return more than one referral */
+			if(*number_of_UNC_in_array > 1)
+				*number_of_UNC_in_array = 1;
+
+			/* get the length of the strings describing refs */
+			name_len = 0;
+			for(i=0;i<*number_of_UNC_in_array;i++) {
+				/* make sure that DfsPathOffset not past end */
+				__u16 offset = le16_to_cpu(referrals->DfsPathOffset);
+				if (offset > data_count) {
+					/* if invalid referral, stop here and do 
+					not try to copy any more */
+					*number_of_UNC_in_array = i;
+					break;
+				} 
+				temp = ((char *)referrals) + offset;
+
+				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
+					name_len += UniStrnlen((wchar_t *)temp,data_count);
+				} else {
+					name_len += strnlen(temp,data_count);
+				}
+				referrals++;
+				/* BB add check that referral pointer does not fall off end PDU */
+				
+			}
+			/* BB add check for name_len bigger than bcc */
+			*targetUNCs = 
+				kmalloc(name_len+1+ (*number_of_UNC_in_array),GFP_KERNEL);
+			if(*targetUNCs == NULL) {
+				rc = -ENOMEM;
+				goto GetDFSRefExit;
+			}
+			/* copy the ref strings */
+			referrals =  
+			    (struct dfs_referral_level_3 *) 
+					(8 /* sizeof data hdr */ +
+					data_offset + 
+					(char *) &pSMBr->hdr.Protocol);
+
+			for(i=0;i<*number_of_UNC_in_array;i++) {
+				temp = ((char *)referrals) + le16_to_cpu(referrals->DfsPathOffset);
+				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
+					cifs_strfromUCS_le(*targetUNCs,
+						(wchar_t *) temp, name_len, nls_codepage);
+				} else {
+					strncpy(*targetUNCs,temp,name_len);
+				}
+				/*  BB update target_uncs pointers */
+				referrals++;
+			}
+			temp = *targetUNCs;
+			temp[name_len] = 0;
+		}
+
+	}
+GetDFSRefExit:
+	if (pSMB)
+		cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto getDFSRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
+	       struct kstatfs *FSData, const struct nls_table *nls_codepage)
+{
+/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
+	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+	FILE_SYSTEM_INFO *response_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QFSInfo"));
+QFSInfoRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 2;	/* level */
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cERROR(1, ("Send error in QFSInfo = %d", rc));
+	} else {		/* decode response */
+                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 24)) /* BB alsO CHEck enough total bytes returned */
+			rc = -EIO;	/* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			cFYI(1,
+				("Decoding qfsinfo response.  BCC: %d  Offset %d",
+				pSMBr->ByteCount, data_offset));
+
+			response_data =
+			    (FILE_SYSTEM_INFO
+			     *) (((char *) &pSMBr->hdr.Protocol) +
+				 data_offset);
+			FSData->f_bsize =
+			    le32_to_cpu(response_data->BytesPerSector) *
+			    le32_to_cpu(response_data->
+					SectorsPerAllocationUnit);
+			FSData->f_blocks =
+			    le64_to_cpu(response_data->TotalAllocationUnits);
+			FSData->f_bfree = FSData->f_bavail =
+			    le64_to_cpu(response_data->FreeAllocationUnits);
+			cFYI(1,
+			     ("Blocks: %lld  Free: %lld Block size %ld",
+			      (unsigned long long)FSData->f_blocks,
+			      (unsigned long long)FSData->f_bfree,
+			      FSData->f_bsize));
+		}
+	}
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto QFSInfoRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon,
+			const struct nls_table *nls_codepage)
+{
+/* level 0x105  SMB_QUERY_FILE_SYSTEM_INFO */
+	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+	FILE_SYSTEM_ATTRIBUTE_INFO *response_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QFSAttributeInfo"));
+QFSAttributeRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 2;	/* level */
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cERROR(1, ("Send error in QFSAttributeInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 13)) {	/* BB also check enough bytes returned */
+			rc = -EIO;	/* bad smb */
+		} else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			response_data =
+			    (FILE_SYSTEM_ATTRIBUTE_INFO
+			     *) (((char *) &pSMBr->hdr.Protocol) +
+				 data_offset);
+			memcpy(&tcon->fsAttrInfo, response_data,
+			       sizeof (FILE_SYSTEM_ATTRIBUTE_INFO));
+		}
+	}
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto QFSAttributeRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon,
+		     const struct nls_table *nls_codepage)
+{
+/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
+	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+	FILE_SYSTEM_DEVICE_INFO *response_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QFSDeviceInfo"));
+QFSDeviceRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 2;	/* level */
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QFSDeviceInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < sizeof (FILE_SYSTEM_DEVICE_INFO)))
+			rc = -EIO;	/* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			response_data =
+			    (FILE_SYSTEM_DEVICE_INFO
+			     *) (((char *) &pSMBr->hdr.Protocol) +
+				 data_offset);
+			memcpy(&tcon->fsDevInfo, response_data,
+			       sizeof (FILE_SYSTEM_DEVICE_INFO));
+		}
+	}
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto QFSDeviceRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+		   const struct nls_table *nls_codepage)
+{
+/* level 0x200  SMB_QUERY_CIFS_UNIX_INFO */
+	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+	FILE_SYSTEM_UNIX_INFO *response_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QFSUnixInfo"));
+QFSUnixRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 2;	/* level */
+	pSMB->TotalDataCount = 0;
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(100);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct 
+        smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cERROR(1, ("Send error in QFSUnixInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 13)) {
+			rc = -EIO;	/* bad smb */
+		} else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			response_data =
+			    (FILE_SYSTEM_UNIX_INFO
+			     *) (((char *) &pSMBr->hdr.Protocol) +
+				 data_offset);
+			memcpy(&tcon->fsUnixInfo, response_data,
+			       sizeof (FILE_SYSTEM_UNIX_INFO));
+		}
+	}
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto QFSUnixRetry;
+
+
+	return rc;
+}
+
+
+int
+CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+		   struct kstatfs *FSData, const struct nls_table *nls_codepage)
+{
+/* level 0x201  SMB_QUERY_CIFS_POSIX_INFO */
+	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+	FILE_SYSTEM_POSIX_INFO *response_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In QFSPosixInfo"));
+QFSPosixRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	params = 2;	/* level */
+	pSMB->TotalDataCount = 0;
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(100);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	byte_count = params + 1 /* pad */ ;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct 
+        smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QFSUnixInfo = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		if (rc || (pSMBr->ByteCount < 13)) {
+			rc = -EIO;	/* bad smb */
+		} else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			response_data =
+			    (FILE_SYSTEM_POSIX_INFO
+			     *) (((char *) &pSMBr->hdr.Protocol) +
+				 data_offset);
+			FSData->f_bsize =
+					le32_to_cpu(response_data->BlockSize);
+			FSData->f_blocks =
+					le64_to_cpu(response_data->TotalBlocks);
+			FSData->f_bfree =
+			    le64_to_cpu(response_data->BlocksAvail);
+			if(response_data->UserBlocksAvail == -1) {
+				FSData->f_bavail = FSData->f_bfree;
+			} else {
+				FSData->f_bavail =
+					le64_to_cpu(response_data->UserBlocksAvail);
+			}
+			if(response_data->TotalFileNodes != -1)
+				FSData->f_files =
+					le64_to_cpu(response_data->TotalFileNodes);
+			if(response_data->FreeFileNodes != -1)
+				FSData->f_ffree =
+					le64_to_cpu(response_data->FreeFileNodes);
+		}
+	}
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto QFSPosixRetry;
+
+	return rc;
+}
+
+
+/* We can not use write of zero bytes trick to 
+   set file size due to need for large file support.  Also note that 
+   this SetPathInfo is preferred to SetFileInfo based method in next 
+   routine which is only needed to work around a sharing violation bug
+   in Samba which this routine can run into */
+
+int
+CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+	      __u64 size, int SetAllocation, const struct nls_table *nls_codepage)
+{
+	struct smb_com_transaction2_spi_req *pSMB = NULL;
+	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+	struct file_end_of_file_info *parm_data;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, byte_count, data_count, param_offset, offset;
+
+	cFYI(1, ("In SetEOF"));
+SetEOFRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+	params = 6 + name_len;
+	data_count = sizeof (struct file_end_of_file_info);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find max SMB size from sess */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+	if(SetAllocation) {
+        	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+	            pSMB->InformationLevel =
+                	cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+        	else
+	            pSMB->InformationLevel =
+        	        cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+	} else /* Set File Size */  {    
+	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+		    pSMB->InformationLevel =
+		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+	    else
+		    pSMB->InformationLevel =
+		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+	}
+
+	parm_data =
+	    (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
+				       offset);
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + data_count;
+	pSMB->DataCount = cpu_to_le16(data_count);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	parm_data->FileSize = cpu_to_le64(size);
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("SetPathInfo (file size) returned %d", rc));
+	}
+
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto SetEOFRetry;
+
+	return rc;
+}
+
+int
+CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, 
+                   __u16 fid, __u32 pid_of_opener, int SetAllocation)
+{
+	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+	char *data_offset;
+	struct file_end_of_file_info *parm_data;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count, count;
+
+	cFYI(1, ("SetFileSize (via SetFileInfo) %lld",
+			(long long)size));
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+    
+	params = 6;
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;	
+
+	count = sizeof(struct file_end_of_file_info);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find max SMB PDU from sess */
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	parm_data =
+		(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
+			offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	parm_data->FileSize = cpu_to_le64(size);
+	pSMB->Fid = fid;
+	if(SetAllocation) {
+		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+			pSMB->InformationLevel =
+				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+		else
+			pSMB->InformationLevel =
+				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+	} else /* Set File Size */  {    
+	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+		    pSMB->InformationLevel =
+		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+	    else
+		    pSMB->InformationLevel =
+		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+	}
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1,
+		     ("Send error in SetFileInfo (SetFileSize) = %d",
+		      rc));
+	}
+
+	if (pSMB)
+		cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+
+/* Some legacy servers such as NT4 require that the file times be set on 
+   an open handle, rather than by pathname - this is awkward due to
+   potential access conflicts on the open, but it is unavoidable for these
+   old servers since the only other choice is to go from 100 nanosecond DCE
+   time and resort to the original setpathinfo level which takes the ancient
+   DOS time format with 2 second granularity */
+int
+CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_INFO * data, 
+                   __u16 fid)
+{
+	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+	char *data_offset;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count, count;
+
+	cFYI(1, ("Set Times (via SetFileInfo)"));
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	/* At this point there is no need to override the current pid
+	with the pid of the opener, but that could change if we someday
+	use an existing handle (rather than opening one on the fly) */
+	/* pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));*/
+    
+	params = 6;
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+	offset = param_offset + params;
+
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 
+
+	count = sizeof (FILE_BASIC_INFO);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find max SMB PDU from sess */
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->Fid = fid;
+	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
+	else
+		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	memcpy(data_offset,data,sizeof(FILE_BASIC_INFO));
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc));
+	}
+
+	cifs_buf_release(pSMB);
+
+	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+		since file handle passed in no longer valid */
+
+	return rc;
+}
+
+
+int
+CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+		const FILE_BASIC_INFO * data, 
+		const struct nls_table *nls_codepage)
+{
+	TRANSACTION2_SPI_REQ *pSMB = NULL;
+	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	char *data_offset;
+	__u16 params, param_offset, offset, byte_count, count;
+
+	cFYI(1, ("In SetTimes"));
+
+SetTimesRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+
+	params = 6 + name_len;
+	count = sizeof (FILE_BASIC_INFO);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
+	else
+		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	memcpy(data_offset, data, sizeof (FILE_BASIC_INFO));
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("SetPathInfo (times) returned %d", rc));
+	}
+
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto SetTimesRetry;
+
+	return rc;
+}
+
+/* Can not be used to set time stamps yet (due to old DOS time format) */
+/* Can be used to set attributes */
+#if 0  /* Possibly not needed - since it turns out that strangely NT4 has a bug
+	  handling it anyway and NT4 was what we thought it would be needed for
+	  Do not delete it until we prove whether needed for Win9x though */
+int
+CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
+		__u16 dos_attrs, const struct nls_table *nls_codepage)
+{
+	SETATTR_REQ *pSMB = NULL;
+	SETATTR_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+
+	cFYI(1, ("In SetAttrLegacy"));
+
+SetAttrLgcyRetry:
+	rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+			cifs_strtoUCS((wchar_t *) pSMB->fileName, fileName, 
+				PATH_MAX, nls_codepage);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+	} else {                /* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;     /* trailing null */
+		strncpy(pSMB->fileName, fileName, name_len);
+	}
+	pSMB->attr = cpu_to_le16(dos_attrs);
+	pSMB->BufferFormat = 0x04;
+	pSMB->hdr.smb_buf_length += name_len + 1;
+	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Error in LegacySetAttr = %d", rc));
+	}
+
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto SetAttrLgcyRetry;
+
+	return rc;
+}
+#endif /* temporarily unneeded SetAttr legacy function */
+
+int
+CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
+		    char *fileName, __u64 mode, __u64 uid, __u64 gid,
+		    dev_t device, const struct nls_table *nls_codepage)
+{
+	TRANSACTION2_SPI_REQ *pSMB = NULL;
+	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	FILE_UNIX_BASIC_INFO *data_offset;
+	__u16 params, param_offset, offset, count, byte_count;
+
+	cFYI(1, ("In SetUID/GID/Mode"));
+setPermsRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+
+	params = 6 + name_len;
+	count = sizeof (FILE_UNIX_BASIC_INFO);
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+	data_offset =
+	    (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol +
+				      offset);
+	memset(data_offset, 0, count);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->DataCount = cpu_to_le16(count);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	data_offset->Uid = cpu_to_le64(uid);
+	data_offset->Gid = cpu_to_le64(gid);
+	/* better to leave device as zero when it is  */
+	data_offset->DevMajor = cpu_to_le64(MAJOR(device));
+	data_offset->DevMinor = cpu_to_le64(MINOR(device));
+	data_offset->Permissions = cpu_to_le64(mode);
+    
+	if(S_ISREG(mode))
+		data_offset->Type = cpu_to_le32(UNIX_FILE);
+	else if(S_ISDIR(mode))
+		data_offset->Type = cpu_to_le32(UNIX_DIR);
+	else if(S_ISLNK(mode))
+		data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
+	else if(S_ISCHR(mode))
+		data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
+	else if(S_ISBLK(mode))
+		data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
+	else if(S_ISFIFO(mode))
+		data_offset->Type = cpu_to_le32(UNIX_FIFO);
+	else if(S_ISSOCK(mode))
+		data_offset->Type = cpu_to_le32(UNIX_SOCKET);
+
+
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("SetPathInfo (perms) returned %d", rc));
+	}
+
+	if (pSMB)
+		cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto setPermsRetry;
+	return rc;
+}
+
+int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 
+			const int notify_subdirs, const __u16 netfid,
+			__u32 filter, const struct nls_table *nls_codepage)
+{
+	int rc = 0;
+	struct smb_com_transaction_change_notify_req * pSMB = NULL;
+	struct smb_com_transaction_change_notify_rsp * pSMBr = NULL;
+	int bytes_returned;
+
+	cFYI(1, ("In CIFSSMBNotify for file handle %d",(int)netfid));
+	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+                      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	pSMB->TotalParameterCount = 0 ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le32(2);
+	/* BB find exact data count max from sess structure BB */
+	pSMB->MaxDataCount = 0; /* same in little endian or be */
+	pSMB->MaxSetupCount = 4;
+	pSMB->Reserved = 0;
+	pSMB->ParameterOffset = 0;
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 4; /* single byte does not need le conversion */
+	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	if(notify_subdirs)
+		pSMB->WatchTree = 1; /* one byte - no le conversion needed */
+	pSMB->Reserved2 = 0;
+	pSMB->CompletionFilter = cpu_to_le32(filter);
+	pSMB->Fid = netfid; /* file handle always le */
+	pSMB->ByteCount = 0;
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			(struct smb_hdr *) pSMBr, &bytes_returned, -1);
+	if (rc) {
+		cFYI(1, ("Error in Notify = %d", rc));
+	}
+	cifs_buf_release(pSMB);
+	return rc;	
+}
+#ifdef CONFIG_CIFS_XATTR
+ssize_t
+CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+		 const unsigned char *searchName,
+		 char * EAData, size_t buf_size,
+		 const struct nls_table *nls_codepage)
+{
+		/* BB assumes one setup word */
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	struct fea * temp_fea;
+	char * temp_ptr;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In Query All EAs path %s", searchName));
+QAllEAsRetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {	/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in QueryAllEAs = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		/* BB also check enough total bytes returned */
+		/* BB we need to improve the validity checking
+		of these trans2 responses */
+		if (rc || (pSMBr->ByteCount < 4)) 
+			rc = -EIO;	/* bad smb */
+	   /* else if (pFindData){
+			memcpy((char *) pFindData,
+			       (char *) &pSMBr->hdr.Protocol +
+			       data_offset, kl);
+		}*/ else {
+			/* check that length of list is not more than bcc */
+			/* check that each entry does not go beyond length
+			   of list */
+			/* check that each element of each entry does not
+			   go beyond end of list */
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			struct fealist * ea_response_data;
+			rc = 0;
+			/* validate_trans2_offsets() */
+			/* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+			ea_response_data = (struct fealist *)
+				(((char *) &pSMBr->hdr.Protocol) +
+				data_offset);
+			name_len = le32_to_cpu(ea_response_data->list_len);
+			cFYI(1,("ea length %d", name_len));
+			if(name_len <= 8) {
+			/* returned EA size zeroed at top of function */
+				cFYI(1,("empty EA list returned from server"));
+			} else {
+				/* account for ea list len */
+				name_len -= 4;
+				temp_fea = ea_response_data->list;
+				temp_ptr = (char *)temp_fea;
+				while(name_len > 0) {
+					__u16 value_len;
+					name_len -= 4;
+					temp_ptr += 4;
+					rc += temp_fea->name_len;
+				/* account for prefix user. and trailing null */
+					rc = rc + 5 + 1; 
+					if(rc<(int)buf_size) {
+						memcpy(EAData,"user.",5);
+						EAData+=5;
+						memcpy(EAData,temp_ptr,temp_fea->name_len);
+						EAData+=temp_fea->name_len;
+						/* null terminate name */
+						*EAData = 0;
+						EAData = EAData + 1;
+					} else if(buf_size == 0) {
+						/* skip copy - calc size only */
+					} else {
+						/* stop before overrun buffer */
+						rc = -ERANGE;
+						break;
+					}
+					name_len -= temp_fea->name_len;
+					temp_ptr += temp_fea->name_len;
+					/* account for trailing null */
+					name_len--;
+					temp_ptr++;
+					value_len = le16_to_cpu(temp_fea->value_len);
+					name_len -= value_len;
+					temp_ptr += value_len;
+					/* BB check that temp_ptr is still within smb BB*/
+				/* no trailing null to account for in value len */
+					/* go on to next EA */
+					temp_fea = (struct fea *)temp_ptr;
+				}
+			}
+		}
+	}
+	if (pSMB)
+		cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto QAllEAsRetry;
+
+	return (ssize_t)rc;
+}
+
+ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
+		const unsigned char * searchName,const unsigned char * ea_name,
+		unsigned char * ea_value, size_t buf_size, 
+		const struct nls_table *nls_codepage)
+{
+	TRANSACTION2_QPI_REQ *pSMB = NULL;
+	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+	int rc = 0;
+	int bytes_returned;
+	int name_len;
+	struct fea * temp_fea;
+	char * temp_ptr;
+	__u16 params, byte_count;
+
+	cFYI(1, ("In Query EA path %s", searchName));
+QEARetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {	/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(searchName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, searchName, name_len);
+	}
+
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	pSMB->TotalDataCount = 0;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	pSMB->DataCount = 0;
+	pSMB->DataOffset = 0;
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->TotalParameterCount = cpu_to_le16(params);
+	pSMB->ParameterCount = pSMB->TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Send error in Query EA = %d", rc));
+	} else {		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+
+		/* BB also check enough total bytes returned */
+		/* BB we need to improve the validity checking
+		of these trans2 responses */
+		if (rc || (pSMBr->ByteCount < 4)) 
+			rc = -EIO;	/* bad smb */
+	   /* else if (pFindData){
+			memcpy((char *) pFindData,
+			       (char *) &pSMBr->hdr.Protocol +
+			       data_offset, kl);
+		}*/ else {
+			/* check that length of list is not more than bcc */
+			/* check that each entry does not go beyond length
+			   of list */
+			/* check that each element of each entry does not
+			   go beyond end of list */
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			struct fealist * ea_response_data;
+			rc = -ENODATA;
+			/* validate_trans2_offsets() */
+			/* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+			ea_response_data = (struct fealist *)
+				(((char *) &pSMBr->hdr.Protocol) +
+				data_offset);
+			name_len = le32_to_cpu(ea_response_data->list_len);
+			cFYI(1,("ea length %d", name_len));
+			if(name_len <= 8) {
+			/* returned EA size zeroed at top of function */
+				cFYI(1,("empty EA list returned from server"));
+			} else {
+				/* account for ea list len */
+				name_len -= 4;
+				temp_fea = ea_response_data->list;
+				temp_ptr = (char *)temp_fea;
+				/* loop through checking if we have a matching
+				name and then return the associated value */
+				while(name_len > 0) {
+					__u16 value_len;
+					name_len -= 4;
+					temp_ptr += 4;
+					value_len = le16_to_cpu(temp_fea->value_len);
+				/* BB validate that value_len falls within SMB, 
+				even though maximum for name_len is 255 */ 
+					if(memcmp(temp_fea->name,ea_name,
+						  temp_fea->name_len) == 0) {
+						/* found a match */
+						rc = value_len;
+				/* account for prefix user. and trailing null */
+						if(rc<=(int)buf_size) {
+							memcpy(ea_value,
+								temp_fea->name+temp_fea->name_len+1,
+								rc);
+							/* ea values, unlike ea names,
+							are not null terminated */
+						} else if(buf_size == 0) {
+						/* skip copy - calc size only */
+						} else {
+							/* stop before overrun buffer */
+							rc = -ERANGE;
+						}
+						break;
+					}
+					name_len -= temp_fea->name_len;
+					temp_ptr += temp_fea->name_len;
+					/* account for trailing null */
+					name_len--;
+					temp_ptr++;
+					name_len -= value_len;
+					temp_ptr += value_len;
+				/* no trailing null to account for in value len */
+					/* go on to next EA */
+					temp_fea = (struct fea *)temp_ptr;
+				}
+			} 
+		}
+	}
+	if (pSMB)
+		cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto QEARetry;
+
+	return (ssize_t)rc;
+}
+
+int
+CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+		const char * ea_name, const void * ea_value, 
+		const __u16 ea_value_len, const struct nls_table *nls_codepage)
+{
+	struct smb_com_transaction2_spi_req *pSMB = NULL;
+	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+	struct fealist *parm_data;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, byte_count, offset, count;
+
+	cFYI(1, ("In SetEA"));
+SetEARetry:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, PATH_MAX
+				  /* find define for this maxpathcomponent */
+				  , nls_codepage);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else {		/* BB improve the check for buffer overruns BB */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+
+	params = 6 + name_len;
+
+	/* done calculating parms using name_len of file name,
+	now use name_len to calculate length of ea name
+	we are going to create in the inode xattrs */
+	if(ea_name == NULL)
+		name_len = 0;
+	else
+		name_len = strnlen(ea_name,255);
+
+	count = sizeof(*parm_data) + ea_value_len + name_len + 1;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find max SMB size from sess */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+                                     InformationLevel) - 4;
+	offset = param_offset + params;
+	pSMB->InformationLevel =
+		cpu_to_le16(SMB_SET_FILE_EA);
+
+	parm_data =
+		(struct fealist *) (((char *) &pSMB->hdr.Protocol) +
+				       offset);
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + count;
+	pSMB->DataCount = cpu_to_le16(count);
+	parm_data->list_len = cpu_to_le32(count);
+	parm_data->list[0].EA_flags = 0;
+	/* we checked above that name len is less than 255 */
+	parm_data->list[0].name_len = (__u8)name_len;;
+	/* EA names are always ASCII */
+	if(ea_name)
+		strncpy(parm_data->list[0].name,ea_name,name_len);
+	parm_data->list[0].name[name_len] = 0;
+	parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
+	/* caller ensures that ea_value_len is less than 64K but
+	we need to ensure that it fits within the smb */
+
+	/*BB add length check that it would fit in negotiated SMB buffer size BB */
+	/* if(ea_value_len > buffer_size - 512 (enough for header)) */
+	if(ea_value_len)
+		memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
+
+	pSMB->TotalDataCount = pSMB->DataCount;
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("SetPathInfo (EA) returned %d", rc));
+	}
+
+	cifs_buf_release(pSMB);
+
+	if (rc == -EAGAIN)
+		goto SetEARetry;
+
+	return rc;
+}
+
+#endif
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
new file mode 100644
index 0000000..40470b9
--- /dev/null
+++ b/fs/cifs/connect.c
@@ -0,0 +1,3064 @@
+/*
+ *   fs/cifs/connect.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/ipv6.h>
+#include <linux/pagemap.h>
+#include <linux/ctype.h>
+#include <linux/utsname.h>
+#include <linux/mempool.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+#include "ntlmssp.h"
+#include "nterr.h"
+#include "rfc1002pdu.h"
+
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
+extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
+		       unsigned char *p24);
+extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
+			 unsigned char *p24);
+
+extern mempool_t *cifs_req_poolp;
+
+struct smb_vol {
+	char *username;
+	char *password;
+	char *domainname;
+	char *UNC;
+	char *UNCip;
+	char *in6_addr;  /* ipv6 address as human readable form of in6_addr */
+	char *iocharset;  /* local code page for mapping to and from Unicode */
+	char source_rfc1001_name[16]; /* netbios name of client */
+	uid_t linux_uid;
+	gid_t linux_gid;
+	mode_t file_mode;
+	mode_t dir_mode;
+	unsigned rw:1;
+	unsigned retry:1;
+	unsigned intr:1;
+	unsigned setuids:1;
+	unsigned noperm:1;
+	unsigned no_psx_acl:1; /* set if posix acl support should be disabled */
+	unsigned no_xattr:1;   /* set if xattr (EA) support should be disabled*/
+	unsigned server_ino:1; /* use inode numbers from server ie UniqueId */
+	unsigned direct_io:1;
+	unsigned int rsize;
+	unsigned int wsize;
+	unsigned int sockopt;
+	unsigned short int port;
+};
+
+static int ipv4_connect(struct sockaddr_in *psin_server, 
+			struct socket **csocket,
+			char * netb_name);
+static int ipv6_connect(struct sockaddr_in6 *psin_server, 
+			struct socket **csocket);
+
+
+	/* 
+	 * cifs tcp session reconnection
+	 * 
+	 * mark tcp session as reconnecting so temporarily locked
+	 * mark all smb sessions as reconnecting for tcp session
+	 * reconnect tcp session
+	 * wake up waiters on reconnection? - (not needed currently)
+	 */
+
+int
+cifs_reconnect(struct TCP_Server_Info *server)
+{
+	int rc = 0;
+	struct list_head *tmp;
+	struct cifsSesInfo *ses;
+	struct cifsTconInfo *tcon;
+	struct mid_q_entry * mid_entry;
+	
+	spin_lock(&GlobalMid_Lock);
+	if(server->tcpStatus == CifsExiting) {
+		/* the demux thread will exit normally 
+		next time through the loop */
+		spin_unlock(&GlobalMid_Lock);
+		return rc;
+	} else
+		server->tcpStatus = CifsNeedReconnect;
+	spin_unlock(&GlobalMid_Lock);
+	server->maxBuf = 0;
+
+	cFYI(1, ("Reconnecting tcp session "));
+
+	/* before reconnecting the tcp session, mark the smb session (uid)
+		and the tid bad so they are not used until reconnected */
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalSMBSessionList) {
+		ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
+		if (ses->server) {
+			if (ses->server == server) {
+				ses->status = CifsNeedReconnect;
+				ses->ipc_tid = 0;
+			}
+		}
+		/* else tcp and smb sessions need reconnection */
+	}
+	list_for_each(tmp, &GlobalTreeConnectionList) {
+		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
+		if((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
+			tcon->tidStatus = CifsNeedReconnect;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+	/* do not want to be sending data on a socket we are freeing */
+	down(&server->tcpSem); 
+	if(server->ssocket) {
+		cFYI(1,("State: 0x%x Flags: 0x%lx", server->ssocket->state,
+			server->ssocket->flags));
+		server->ssocket->ops->shutdown(server->ssocket,SEND_SHUTDOWN);
+		cFYI(1,("Post shutdown state: 0x%x Flags: 0x%lx", server->ssocket->state,
+			server->ssocket->flags));
+		sock_release(server->ssocket);
+		server->ssocket = NULL;
+	}
+
+	spin_lock(&GlobalMid_Lock);
+	list_for_each(tmp, &server->pending_mid_q) {
+		mid_entry = list_entry(tmp, struct
+					mid_q_entry,
+					qhead);
+		if(mid_entry) {
+			if(mid_entry->midState == MID_REQUEST_SUBMITTED) {
+				/* Mark other intransit requests as needing retry so 
+				  we do not immediately mark the session bad again 
+				  (ie after we reconnect below) as they timeout too */
+				mid_entry->midState = MID_RETRY_NEEDED;
+			}
+		}
+	}
+	spin_unlock(&GlobalMid_Lock);
+	up(&server->tcpSem); 
+
+	while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood))
+	{
+		if(server->protocolType == IPV6) {
+			rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket);
+		} else {
+			rc = ipv4_connect(&server->addr.sockAddr, 
+					&server->ssocket,
+					server->workstation_RFC1001_name);
+		}
+		if(rc) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(3 * HZ);
+		} else {
+			atomic_inc(&tcpSesReconnectCount);
+			spin_lock(&GlobalMid_Lock);
+			if(server->tcpStatus != CifsExiting)
+				server->tcpStatus = CifsGood;
+			spin_unlock(&GlobalMid_Lock);
+	/*		atomic_set(&server->inFlight,0);*/
+			wake_up(&server->response_q);
+		}
+	}
+	return rc;
+}
+
+static int
+cifs_demultiplex_thread(struct TCP_Server_Info *server)
+{
+	int length;
+	unsigned int pdu_length, total_read;
+	struct smb_hdr *smb_buffer = NULL;
+	struct msghdr smb_msg;
+	struct kvec iov;
+	struct socket *csocket = server->ssocket;
+	struct list_head *tmp;
+	struct cifsSesInfo *ses;
+	struct task_struct *task_to_wake = NULL;
+	struct mid_q_entry *mid_entry;
+	char *temp;
+
+	daemonize("cifsd");
+	allow_signal(SIGKILL);
+	current->flags |= PF_MEMALLOC;
+	server->tsk = current;	/* save process info to wake at shutdown */
+	cFYI(1, ("Demultiplex PID: %d", current->pid));
+	write_lock(&GlobalSMBSeslock); 
+	atomic_inc(&tcpSesAllocCount);
+	length = tcpSesAllocCount.counter;
+	write_unlock(&GlobalSMBSeslock);
+	if(length  > 1) {
+		mempool_resize(cifs_req_poolp,
+			length + cifs_min_rcv,
+			GFP_KERNEL);
+	}
+
+	while (server->tcpStatus != CifsExiting) {
+		if (smb_buffer == NULL)
+			smb_buffer = cifs_buf_get();
+		else
+			memset(smb_buffer, 0, sizeof (struct smb_hdr));
+
+		if (smb_buffer == NULL) {
+			cERROR(1,("Can not get memory for SMB response"));
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(HZ * 3); /* give system time to free memory */
+			continue;
+		}
+		iov.iov_base = smb_buffer;
+		iov.iov_len = 4;
+		smb_msg.msg_control = NULL;
+		smb_msg.msg_controllen = 0;
+		length =
+		    kernel_recvmsg(csocket, &smb_msg,
+				 &iov, 1, 4, 0 /* BB see socket.h flags */);
+
+		if(server->tcpStatus == CifsExiting) {
+			break;
+		} else if (server->tcpStatus == CifsNeedReconnect) {
+			cFYI(1,("Reconnecting after server stopped responding"));
+			cifs_reconnect(server);
+			cFYI(1,("call to reconnect done"));
+			csocket = server->ssocket;
+			continue;
+		} else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(1); /* minimum sleep to prevent looping
+				allowing socket to clear and app threads to set
+				tcpStatus CifsNeedReconnect if server hung */
+			continue;
+		} else if (length <= 0) {
+			if(server->tcpStatus == CifsNew) {
+				cFYI(1,("tcp session abended prematurely (after SMBnegprot)"));
+				/* some servers kill tcp session rather than returning
+					smb negprot error in which case reconnecting here is
+					not going to help - return error to mount */
+				break;
+			}
+			if(length == -EINTR) { 
+				cFYI(1,("cifsd thread killed"));
+				break;
+			}
+			cFYI(1,("Reconnecting after unexpected peek error %d",length));
+			cifs_reconnect(server);
+			csocket = server->ssocket;
+			wake_up(&server->response_q);
+			continue;
+		} else if (length > 3) {
+			pdu_length = ntohl(smb_buffer->smb_buf_length);
+		/* Only read pdu_length after below checks for too short (due
+		   to e.g. int overflow) and too long ie beyond end of buf */
+			cFYI(1,("rfc1002 length(big endian)0x%x)", pdu_length+4));
+
+			temp = (char *) smb_buffer;
+			if (temp[0] == (char) RFC1002_SESSION_KEEP_ALIVE) {
+				cFYI(0,("Received 4 byte keep alive packet"));
+			} else if (temp[0] == (char) RFC1002_POSITIVE_SESSION_RESPONSE) {
+					cFYI(1,("Good RFC 1002 session rsp"));
+			} else if (temp[0] == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
+				/* we get this from Windows 98 instead of error on SMB negprot response */
+				cFYI(1,("Negative RFC 1002 Session Response Error 0x%x)",temp[4]));
+				if(server->tcpStatus == CifsNew) {
+					/* if nack on negprot (rather than 
+					ret of smb negprot error) reconnecting
+					not going to help, ret error to mount */
+					break;
+				} else {
+					/* give server a second to
+					clean up before reconnect attempt */
+					set_current_state(TASK_INTERRUPTIBLE);
+					schedule_timeout(HZ);
+					/* always try 445 first on reconnect
+					since we get NACK on some if we ever
+					connected to port 139 (the NACK is 
+					since we do not begin with RFC1001
+					session initialize frame) */
+					server->addr.sockAddr.sin_port = htons(CIFS_PORT);
+					cifs_reconnect(server);
+					csocket = server->ssocket;
+					wake_up(&server->response_q);
+					continue;
+				}
+			} else if (temp[0] != (char) 0) {
+				cERROR(1,("Unknown RFC 1002 frame"));
+				cifs_dump_mem(" Received Data: ", temp, length);
+				cifs_reconnect(server);
+				csocket = server->ssocket;
+				continue;
+			} else {
+				if((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
+				    || (pdu_length < sizeof (struct smb_hdr) - 1 - 4)) {
+					cERROR(1,
+					    ("Invalid size SMB length %d and pdu_length %d",
+						length, pdu_length+4));
+					cifs_reconnect(server);
+					csocket = server->ssocket;
+					wake_up(&server->response_q);
+					continue;
+				} else { /* length ok */
+					length = 0;
+					iov.iov_base = 4 + (char *)smb_buffer;
+					iov.iov_len = pdu_length;
+					for (total_read = 0; 
+					     total_read < pdu_length;
+					     total_read += length) {
+						length = kernel_recvmsg(csocket, &smb_msg, 
+							&iov, 1,
+							pdu_length - total_read, 0);
+						if (length == 0) {
+							cERROR(1,
+							       ("Zero length receive when expecting %d ",
+								pdu_length - total_read));
+							cifs_reconnect(server);
+							csocket = server->ssocket;
+							wake_up(&server->response_q);
+							continue;
+						}
+					}
+					length += 4; /* account for rfc1002 hdr */
+				}
+
+				dump_smb(smb_buffer, length);
+				if (checkSMB
+				    (smb_buffer, smb_buffer->Mid, total_read+4)) {
+					cERROR(1, ("Bad SMB Received "));
+					continue;
+				}
+
+				task_to_wake = NULL;
+				spin_lock(&GlobalMid_Lock);
+				list_for_each(tmp, &server->pending_mid_q) {
+					mid_entry = list_entry(tmp, struct
+							       mid_q_entry,
+							       qhead);
+
+					if ((mid_entry->mid == smb_buffer->Mid) && (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+						cFYI(1,
+						     (" Mid 0x%x matched - waking up ",mid_entry->mid));
+						task_to_wake = mid_entry->tsk;
+						mid_entry->resp_buf =
+						    smb_buffer;
+						mid_entry->midState =
+						    MID_RESPONSE_RECEIVED;
+					}
+				}
+				spin_unlock(&GlobalMid_Lock);
+				if (task_to_wake) {
+					smb_buffer = NULL;	/* will be freed by users thread after he is done */
+					wake_up_process(task_to_wake);
+				} else if (is_valid_oplock_break(smb_buffer) == FALSE) {                          
+					cERROR(1, ("No task to wake, unknown frame rcvd!"));
+					cifs_dump_mem("Received Data is: ",temp,sizeof(struct smb_hdr));
+				}
+			}
+		} else {
+			cFYI(1,
+			    ("Frame less than four bytes received  %d bytes long.",
+			      length));
+			cifs_reconnect(server);
+			csocket = server->ssocket;
+			wake_up(&server->response_q);
+			continue;
+		}
+	}
+	spin_lock(&GlobalMid_Lock);
+	server->tcpStatus = CifsExiting;
+	server->tsk = NULL;
+	atomic_set(&server->inFlight, 0);
+	spin_unlock(&GlobalMid_Lock);
+	/* Although there should not be any requests blocked on 
+	this queue it can not hurt to be paranoid and try to wake up requests
+	that may haven been blocked when more than 50 at time were on the wire 
+	to the same server - they now will see the session is in exit state
+	and get out of SendReceive.  */
+	wake_up_all(&server->request_q);
+	/* give those requests time to exit */
+	set_current_state(TASK_INTERRUPTIBLE);
+	schedule_timeout(HZ/8);
+
+	if(server->ssocket) {
+		sock_release(csocket);
+		server->ssocket = NULL;
+	}
+	if (smb_buffer) /* buffer usually freed in free_mid - need to free it on error or exit */
+		cifs_buf_release(smb_buffer);
+
+	read_lock(&GlobalSMBSeslock);
+	if (list_empty(&server->pending_mid_q)) {
+		/* loop through server session structures attached to this and mark them dead */
+		list_for_each(tmp, &GlobalSMBSessionList) {
+			ses =
+			    list_entry(tmp, struct cifsSesInfo,
+				       cifsSessionList);
+			if (ses->server == server) {
+				ses->status = CifsExiting;
+				ses->server = NULL;
+			}
+		}
+		read_unlock(&GlobalSMBSeslock);
+	} else {
+		spin_lock(&GlobalMid_Lock);
+		list_for_each(tmp, &server->pending_mid_q) {
+		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+			if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
+				cFYI(1,
+					 (" Clearing Mid 0x%x - waking up ",mid_entry->mid));
+				task_to_wake = mid_entry->tsk;
+				if(task_to_wake) {
+					wake_up_process(task_to_wake);
+				}
+			}
+		}
+		spin_unlock(&GlobalMid_Lock);
+		read_unlock(&GlobalSMBSeslock);
+		set_current_state(TASK_INTERRUPTIBLE);
+		/* 1/8th of sec is more than enough time for them to exit */
+		schedule_timeout(HZ/8); 
+	}
+
+	if (list_empty(&server->pending_mid_q)) {
+		/* mpx threads have not exited yet give them 
+		at least the smb send timeout time for long ops */
+		cFYI(1, ("Wait for exit from demultiplex thread"));
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(46 * HZ);	
+		/* if threads still have not exited they are probably never
+		coming home not much else we can do but free the memory */
+	}
+	kfree(server);
+
+	write_lock(&GlobalSMBSeslock);
+	atomic_dec(&tcpSesAllocCount);
+	length = tcpSesAllocCount.counter;
+	write_unlock(&GlobalSMBSeslock);
+	if(length  > 0) {
+		mempool_resize(cifs_req_poolp,
+			length + cifs_min_rcv,
+			GFP_KERNEL);
+	}
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	schedule_timeout(HZ/4);
+	return 0;
+}
+
+static void * 
+cifs_kcalloc(size_t size, unsigned int __nocast type)
+{
+	void *addr;
+	addr = kmalloc(size, type);
+	if (addr)
+		memset(addr, 0, size);
+	return addr;
+}
+
+static int
+cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
+{
+	char *value;
+	char *data;
+	unsigned int  temp_len, i, j;
+	char separator[2];
+
+	separator[0] = ',';
+	separator[1] = 0; 
+
+	memset(vol->source_rfc1001_name,0x20,15);
+	for(i=0;i < strnlen(system_utsname.nodename,15);i++) {
+		/* does not have to be a perfect mapping since the field is
+		informational, only used for servers that do not support
+		port 445 and it can be overridden at mount time */
+		vol->source_rfc1001_name[i] = toupper(system_utsname.nodename[i]);
+	}
+	vol->source_rfc1001_name[15] = 0;
+
+	vol->linux_uid = current->uid;	/* current->euid instead? */
+	vol->linux_gid = current->gid;
+	vol->dir_mode = S_IRWXUGO;
+	/* 2767 perms indicate mandatory locking support */
+	vol->file_mode = S_IALLUGO & ~(S_ISUID | S_IXGRP);
+
+	/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
+	vol->rw = TRUE;
+
+	if (!options)
+		return 1;
+
+	if(strncmp(options,"sep=",4) == 0) {
+		if(options[4] != 0) {
+			separator[0] = options[4];
+			options += 5;
+		} else {
+			cFYI(1,("Null separator not allowed"));
+		}
+	}
+		
+	while ((data = strsep(&options, separator)) != NULL) {
+		if (!*data)
+			continue;
+		if ((value = strchr(data, '=')) != NULL)
+			*value++ = '\0';
+
+		if (strnicmp(data, "user_xattr",10) == 0) {/*parse before user*/
+			vol->no_xattr = 0;
+		} else if (strnicmp(data, "nouser_xattr",12) == 0) {
+			vol->no_xattr = 1;
+		} else if (strnicmp(data, "user", 4) == 0) {
+			if (!value || !*value) {
+				printk(KERN_WARNING
+				       "CIFS: invalid or missing username\n");
+				return 1;	/* needs_arg; */
+			}
+			if (strnlen(value, 200) < 200) {
+				vol->username = value;
+			} else {
+				printk(KERN_WARNING "CIFS: username too long\n");
+				return 1;
+			}
+		} else if (strnicmp(data, "pass", 4) == 0) {
+			if (!value) {
+				vol->password = NULL;
+				continue;
+			} else if(value[0] == 0) {
+				/* check if string begins with double comma
+				   since that would mean the password really
+				   does start with a comma, and would not
+				   indicate an empty string */
+				if(value[1] != separator[0]) {
+					vol->password = NULL;
+					continue;
+				}
+			}
+			temp_len = strlen(value);
+			/* removed password length check, NTLM passwords
+				can be arbitrarily long */
+
+			/* if comma in password, the string will be 
+			prematurely null terminated.  Commas in password are
+			specified across the cifs mount interface by a double
+			comma ie ,, and a comma used as in other cases ie ','
+			as a parameter delimiter/separator is single and due
+			to the strsep above is temporarily zeroed. */
+
+			/* NB: password legally can have multiple commas and
+			the only illegal character in a password is null */
+
+			if ((value[temp_len] == 0) && (value[temp_len+1] == separator[0])) {
+				/* reinsert comma */
+				value[temp_len] = separator[0];
+				temp_len+=2;  /* move after the second comma */
+				while(value[temp_len] != 0)  {
+					if (value[temp_len] == separator[0]) {
+						if (value[temp_len+1] == separator[0]) {
+							temp_len++; /* skip second comma */
+						} else { 
+						/* single comma indicating start
+							 of next parm */
+							break;
+						}
+					}
+					temp_len++;
+				}
+				if(value[temp_len] == 0) {
+					options = NULL;
+				} else {
+					value[temp_len] = 0;
+					/* point option to start of next parm */
+					options = value + temp_len + 1;
+				}
+				/* go from value to value + temp_len condensing 
+				double commas to singles. Note that this ends up
+				allocating a few bytes too many, which is ok */
+				vol->password = cifs_kcalloc(temp_len, GFP_KERNEL);
+				for(i=0,j=0;i<temp_len;i++,j++) {
+					vol->password[j] = value[i];
+					if(value[i] == separator[0] && value[i+1] == separator[0]) {
+						/* skip second comma */
+						i++;
+					}
+				}
+				vol->password[j] = 0;
+			} else {
+				vol->password = cifs_kcalloc(temp_len + 1, GFP_KERNEL);
+				strcpy(vol->password, value);
+			}
+		} else if (strnicmp(data, "ip", 2) == 0) {
+			if (!value || !*value) {
+				vol->UNCip = NULL;
+			} else if (strnlen(value, 35) < 35) {
+				vol->UNCip = value;
+			} else {
+				printk(KERN_WARNING "CIFS: ip address too long\n");
+				return 1;
+			}
+		} else if ((strnicmp(data, "unc", 3) == 0)
+			   || (strnicmp(data, "target", 6) == 0)
+			   || (strnicmp(data, "path", 4) == 0)) {
+			if (!value || !*value) {
+				printk(KERN_WARNING
+				       "CIFS: invalid path to network resource\n");
+				return 1;	/* needs_arg; */
+			}
+			if ((temp_len = strnlen(value, 300)) < 300) {
+				vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+				if(vol->UNC == NULL)
+					return 1;
+				strcpy(vol->UNC,value);
+				if (strncmp(vol->UNC, "//", 2) == 0) {
+					vol->UNC[0] = '\\';
+					vol->UNC[1] = '\\';
+				} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {	                   
+					printk(KERN_WARNING
+					       "CIFS: UNC Path does not begin with // or \\\\ \n");
+					return 1;
+				}
+			} else {
+				printk(KERN_WARNING "CIFS: UNC name too long\n");
+				return 1;
+			}
+		} else if ((strnicmp(data, "domain", 3) == 0)
+			   || (strnicmp(data, "workgroup", 5) == 0)) {
+			if (!value || !*value) {
+				printk(KERN_WARNING "CIFS: invalid domain name\n");
+				return 1;	/* needs_arg; */
+			}
+			/* BB are there cases in which a comma can be valid in
+			a domain name and need special handling? */
+			if (strnlen(value, 65) < 65) {
+				vol->domainname = value;
+				cFYI(1, ("Domain name set"));
+			} else {
+				printk(KERN_WARNING "CIFS: domain name too long\n");
+				return 1;
+			}
+		} else if (strnicmp(data, "iocharset", 9) == 0) {
+			if (!value || !*value) {
+				printk(KERN_WARNING "CIFS: invalid iocharset specified\n");
+				return 1;	/* needs_arg; */
+			}
+			if (strnlen(value, 65) < 65) {
+				if(strnicmp(value,"default",7))
+					vol->iocharset = value;
+				/* if iocharset not set load_nls_default used by caller */
+				cFYI(1, ("iocharset set to %s",value));
+			} else {
+				printk(KERN_WARNING "CIFS: iocharset name too long.\n");
+				return 1;
+			}
+		} else if (strnicmp(data, "uid", 3) == 0) {
+			if (value && *value) {
+				vol->linux_uid =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "gid", 3) == 0) {
+			if (value && *value) {
+				vol->linux_gid =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "file_mode", 4) == 0) {
+			if (value && *value) {
+				vol->file_mode =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "dir_mode", 4) == 0) {
+			if (value && *value) {
+				vol->dir_mode =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "dirmode", 4) == 0) {
+			if (value && *value) {
+				vol->dir_mode =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "port", 4) == 0) {
+			if (value && *value) {
+				vol->port =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "rsize", 5) == 0) {
+			if (value && *value) {
+				vol->rsize =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "wsize", 5) == 0) {
+			if (value && *value) {
+				vol->wsize =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "sockopt", 5) == 0) {
+			if (value && *value) {
+				vol->sockopt =
+					simple_strtoul(value, &value, 0);
+			}
+		} else if (strnicmp(data, "netbiosname", 4) == 0) {
+			if (!value || !*value || (*value == ' ')) {
+				cFYI(1,("invalid (empty) netbiosname specified"));
+			} else {
+				memset(vol->source_rfc1001_name,0x20,15);
+				for(i=0;i<15;i++) {
+				/* BB are there cases in which a comma can be 
+				valid in this workstation netbios name (and need
+				special handling)? */
+
+				/* We do not uppercase netbiosname for user */
+					if (value[i]==0)
+						break;
+					else 
+						vol->source_rfc1001_name[i] = value[i];
+				}
+				/* The string has 16th byte zero still from
+				set at top of the function  */
+				if((i==15) && (value[i] != 0))
+					printk(KERN_WARNING "CIFS: netbiosname longer than 15 and was truncated.\n");
+			}
+		} else if (strnicmp(data, "credentials", 4) == 0) {
+			/* ignore */
+		} else if (strnicmp(data, "version", 3) == 0) {
+			/* ignore */
+		} else if (strnicmp(data, "guest",5) == 0) {
+			/* ignore */
+		} else if (strnicmp(data, "rw", 2) == 0) {
+			vol->rw = TRUE;
+		} else if ((strnicmp(data, "suid", 4) == 0) ||
+				   (strnicmp(data, "nosuid", 6) == 0) ||
+				   (strnicmp(data, "exec", 4) == 0) ||
+				   (strnicmp(data, "noexec", 6) == 0) ||
+				   (strnicmp(data, "nodev", 5) == 0) ||
+				   (strnicmp(data, "noauto", 6) == 0) ||
+				   (strnicmp(data, "dev", 3) == 0)) {
+			/*  The mount tool or mount.cifs helper (if present)
+				uses these opts to set flags, and the flags are read
+				by the kernel vfs layer before we get here (ie
+				before read super) so there is no point trying to
+				parse these options again and set anything and it
+				is ok to just ignore them */
+			continue;
+		} else if (strnicmp(data, "ro", 2) == 0) {
+			vol->rw = FALSE;
+		} else if (strnicmp(data, "hard", 4) == 0) {
+			vol->retry = 1;
+		} else if (strnicmp(data, "soft", 4) == 0) {
+			vol->retry = 0;
+		} else if (strnicmp(data, "perm", 4) == 0) {
+			vol->noperm = 0;
+		} else if (strnicmp(data, "noperm", 6) == 0) {
+			vol->noperm = 1;
+		} else if (strnicmp(data, "setuids", 7) == 0) {
+			vol->setuids = 1;
+		} else if (strnicmp(data, "nosetuids", 9) == 0) {
+			vol->setuids = 0;
+		} else if (strnicmp(data, "nohard", 6) == 0) {
+			vol->retry = 0;
+		} else if (strnicmp(data, "nosoft", 6) == 0) {
+			vol->retry = 1;
+		} else if (strnicmp(data, "nointr", 6) == 0) {
+			vol->intr = 0;
+		} else if (strnicmp(data, "intr", 4) == 0) {
+			vol->intr = 1;
+		} else if (strnicmp(data, "serverino",7) == 0) {
+			vol->server_ino = 1;
+		} else if (strnicmp(data, "noserverino",9) == 0) {
+			vol->server_ino = 0;
+		} else if (strnicmp(data, "acl",3) == 0) {
+			vol->no_psx_acl = 0;
+		} else if (strnicmp(data, "noacl",5) == 0) {
+			vol->no_psx_acl = 1;
+		} else if (strnicmp(data, "direct",6) == 0) {
+			vol->direct_io = 1;
+		} else if (strnicmp(data, "forcedirectio",13) == 0) {
+			vol->direct_io = 1;
+		} else if (strnicmp(data, "in6_addr",8) == 0) {
+			if (!value || !*value) {
+				vol->in6_addr = NULL;
+			} else if (strnlen(value, 49) == 48) {
+				vol->in6_addr = value;
+			} else {
+				printk(KERN_WARNING "CIFS: ip v6 address not 48 characters long\n");
+				return 1;
+			}
+		} else if (strnicmp(data, "noac", 4) == 0) {
+			printk(KERN_WARNING "CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+		} else
+			printk(KERN_WARNING "CIFS: Unknown mount option %s\n",data);
+	}
+	if (vol->UNC == NULL) {
+		if(devname == NULL) {
+			printk(KERN_WARNING "CIFS: Missing UNC name for mount target\n");
+			return 1;
+		}
+		if ((temp_len = strnlen(devname, 300)) < 300) {
+			vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+			if(vol->UNC == NULL)
+				return 1;
+			strcpy(vol->UNC,devname);
+			if (strncmp(vol->UNC, "//", 2) == 0) {
+				vol->UNC[0] = '\\';
+				vol->UNC[1] = '\\';
+			} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
+				printk(KERN_WARNING "CIFS: UNC Path does not begin with // or \\\\ \n");
+				return 1;
+			}
+		} else {
+			printk(KERN_WARNING "CIFS: UNC name too long\n");
+			return 1;
+		}
+	}
+	if(vol->UNCip == NULL)
+		vol->UNCip = &vol->UNC[2];
+
+	return 0;
+}
+
+static struct cifsSesInfo *
+cifs_find_tcp_session(struct in_addr * target_ip_addr, 
+		struct in6_addr *target_ip6_addr,
+		 char *userName, struct TCP_Server_Info **psrvTcp)
+{
+	struct list_head *tmp;
+	struct cifsSesInfo *ses;
+	*psrvTcp = NULL;
+	read_lock(&GlobalSMBSeslock);
+
+	list_for_each(tmp, &GlobalSMBSessionList) {
+		ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
+		if (ses->server) {
+			if((target_ip_addr && 
+				(ses->server->addr.sockAddr.sin_addr.s_addr
+				  == target_ip_addr->s_addr)) || (target_ip6_addr
+				&& memcmp(&ses->server->addr.sockAddr6.sin6_addr,
+					target_ip6_addr,sizeof(*target_ip6_addr)))){
+				/* BB lock server and tcp session and increment use count here?? */
+				*psrvTcp = ses->server;	/* found a match on the TCP session */
+				/* BB check if reconnection needed */
+				if (strncmp
+				    (ses->userName, userName,
+				     MAX_USERNAME_SIZE) == 0){
+					read_unlock(&GlobalSMBSeslock);
+					return ses;	/* found exact match on both tcp and SMB sessions */
+				}
+			}
+		}
+		/* else tcp and smb sessions need reconnection */
+	}
+	read_unlock(&GlobalSMBSeslock);
+	return NULL;
+}
+
+static struct cifsTconInfo *
+find_unc(__be32 new_target_ip_addr, char *uncName, char *userName)
+{
+	struct list_head *tmp;
+	struct cifsTconInfo *tcon;
+
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalTreeConnectionList) {
+		cFYI(1, ("Next tcon - "));
+		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
+		if (tcon->ses) {
+			if (tcon->ses->server) {
+				cFYI(1,
+				     (" old ip addr: %x == new ip %x ?",
+				      tcon->ses->server->addr.sockAddr.sin_addr.
+				      s_addr, new_target_ip_addr));
+				if (tcon->ses->server->addr.sockAddr.sin_addr.
+				    s_addr == new_target_ip_addr) {
+	/* BB lock tcon and server and tcp session and increment use count here? */
+					/* found a match on the TCP session */
+					/* BB check if reconnection needed */
+					cFYI(1,("Matched ip, old UNC: %s == new: %s ?",
+					      tcon->treeName, uncName));
+					if (strncmp
+					    (tcon->treeName, uncName,
+					     MAX_TREE_SIZE) == 0) {
+						cFYI(1,
+						     ("Matched UNC, old user: %s == new: %s ?",
+						      tcon->treeName, uncName));
+						if (strncmp
+						    (tcon->ses->userName,
+						     userName,
+						     MAX_USERNAME_SIZE) == 0) {
+							read_unlock(&GlobalSMBSeslock);
+							return tcon;/* also matched user (smb session)*/
+						}
+					}
+				}
+			}
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+	return NULL;
+}
+
+int
+connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+		    const char *old_path, const struct nls_table *nls_codepage)
+{
+	unsigned char *referrals = NULL;
+	unsigned int num_referrals;
+	int rc = 0;
+
+	rc = get_dfs_path(xid, pSesInfo,old_path, nls_codepage, 
+			&num_referrals, &referrals);
+
+	/* BB Add in code to: if valid refrl, if not ip address contact
+		the helper that resolves tcp names, mount to it, try to 
+		tcon to it unmount it if fail */
+
+	if(referrals)
+		kfree(referrals);
+
+	return rc;
+}
+
+int
+get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+			const char *old_path, const struct nls_table *nls_codepage, 
+			unsigned int *pnum_referrals, unsigned char ** preferrals)
+{
+	char *temp_unc;
+	int rc = 0;
+
+	*pnum_referrals = 0;
+
+	if (pSesInfo->ipc_tid == 0) {
+		temp_unc = kmalloc(2 /* for slashes */ +
+			strnlen(pSesInfo->serverName,SERVER_NAME_LEN_WITH_NULL * 2)
+				 + 1 + 4 /* slash IPC$ */  + 2,
+				GFP_KERNEL);
+		if (temp_unc == NULL)
+			return -ENOMEM;
+		temp_unc[0] = '\\';
+		temp_unc[1] = '\\';
+		strcpy(temp_unc + 2, pSesInfo->serverName);
+		strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
+		rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
+		cFYI(1,
+		     ("CIFS Tcon rc = %d ipc_tid = %d", rc,pSesInfo->ipc_tid));
+		kfree(temp_unc);
+	}
+	if (rc == 0)
+		rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals,
+				     pnum_referrals, nls_codepage);
+
+	return rc;
+}
+
+/* See RFC1001 section 14 on representation of Netbios names */
+static void rfc1002mangle(char * target,char * source, unsigned int length)
+{
+	unsigned int i,j;
+
+	for(i=0,j=0;i<(length);i++) {
+		/* mask a nibble at a time and encode */
+		target[j] = 'A' + (0x0F & (source[i] >> 4));
+		target[j+1] = 'A' + (0x0F & source[i]);
+		j+=2;
+	}
+
+}
+
+
+static int
+ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, 
+			 char * netbios_name)
+{
+	int rc = 0;
+	int connected = 0;
+	__be16 orig_port = 0;
+
+	if(*csocket == NULL) {
+		rc = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, csocket);
+		if (rc < 0) {
+			cERROR(1, ("Error %d creating socket",rc));
+			*csocket = NULL;
+			return rc;
+		} else {
+		/* BB other socket options to set KEEPALIVE, NODELAY? */
+			cFYI(1,("Socket created"));
+			(*csocket)->sk->sk_allocation = GFP_NOFS; 
+		}
+	}
+
+	psin_server->sin_family = AF_INET;
+	if(psin_server->sin_port) { /* user overrode default port */
+		rc = (*csocket)->ops->connect(*csocket,
+				(struct sockaddr *) psin_server,
+				sizeof (struct sockaddr_in),0);
+		if (rc >= 0)
+			connected = 1;
+	} 
+
+	if(!connected) {
+		/* save original port so we can retry user specified port  
+			later if fall back ports fail this time  */
+		orig_port = psin_server->sin_port;
+
+		/* do not retry on the same port we just failed on */
+		if(psin_server->sin_port != htons(CIFS_PORT)) {
+			psin_server->sin_port = htons(CIFS_PORT);
+
+			rc = (*csocket)->ops->connect(*csocket,
+					(struct sockaddr *) psin_server,
+					sizeof (struct sockaddr_in),0);
+			if (rc >= 0)
+				connected = 1;
+		}
+	}
+	if (!connected) {
+		psin_server->sin_port = htons(RFC1001_PORT);
+		rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
+					      psin_server, sizeof (struct sockaddr_in),0);
+		if (rc >= 0) 
+			connected = 1;
+	}
+
+	/* give up here - unless we want to retry on different
+		protocol families some day */
+	if (!connected) {
+		if(orig_port)
+			psin_server->sin_port = orig_port;
+		cFYI(1,("Error %d connecting to server via ipv4",rc));
+		sock_release(*csocket);
+		*csocket = NULL;
+		return rc;
+	}
+	/* Eventually check for other socket options to change from 
+		the default. sock_setsockopt not used because it expects 
+		user space buffer */
+	(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
+
+	/* send RFC1001 sessinit */
+
+	if(psin_server->sin_port == htons(RFC1001_PORT)) {
+		/* some servers require RFC1001 sessinit before sending
+		negprot - BB check reconnection in case where second 
+		sessinit is sent but no second negprot */
+		struct rfc1002_session_packet * ses_init_buf;
+		struct smb_hdr * smb_buf;
+		ses_init_buf = cifs_kcalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL);
+		if(ses_init_buf) {
+			ses_init_buf->trailer.session_req.called_len = 32;
+			rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
+				DEFAULT_CIFS_CALLED_NAME,16);
+			ses_init_buf->trailer.session_req.calling_len = 32;
+			/* calling name ends in null (byte 16) from old smb
+			convention. */
+			if(netbios_name && (netbios_name[0] !=0)) {
+				rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
+					netbios_name,16);
+			} else {
+				rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
+					"LINUX_CIFS_CLNT",16);
+			}
+			ses_init_buf->trailer.session_req.scope1 = 0;
+			ses_init_buf->trailer.session_req.scope2 = 0;
+			smb_buf = (struct smb_hdr *)ses_init_buf;
+			/* sizeof RFC1002_SESSION_REQUEST with no scope */
+			smb_buf->smb_buf_length = 0x81000044;
+			rc = smb_send(*csocket, smb_buf, 0x44,
+				(struct sockaddr *)psin_server);
+			kfree(ses_init_buf);
+		}
+		/* else the negprot may still work without this 
+		even though malloc failed */
+		
+	}
+		
+	return rc;
+}
+
+static int
+ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
+{
+	int rc = 0;
+	int connected = 0;
+	__be16 orig_port = 0;
+
+	if(*csocket == NULL) {
+		rc = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, csocket);
+		if (rc < 0) {
+			cERROR(1, ("Error %d creating ipv6 socket",rc));
+			*csocket = NULL;
+			return rc;
+		} else {
+		/* BB other socket options to set KEEPALIVE, NODELAY? */
+			 cFYI(1,("ipv6 Socket created"));
+			(*csocket)->sk->sk_allocation = GFP_NOFS;
+		}
+	}
+
+	psin_server->sin6_family = AF_INET6;
+
+	if(psin_server->sin6_port) { /* user overrode default port */
+		rc = (*csocket)->ops->connect(*csocket,
+				(struct sockaddr *) psin_server,
+				sizeof (struct sockaddr_in6),0);
+		if (rc >= 0)
+			connected = 1;
+	} 
+
+	if(!connected) {
+		/* save original port so we can retry user specified port  
+			later if fall back ports fail this time  */
+
+		orig_port = psin_server->sin6_port;
+		/* do not retry on the same port we just failed on */
+		if(psin_server->sin6_port != htons(CIFS_PORT)) {
+			psin_server->sin6_port = htons(CIFS_PORT);
+
+			rc = (*csocket)->ops->connect(*csocket,
+					(struct sockaddr *) psin_server,
+					sizeof (struct sockaddr_in6),0);
+			if (rc >= 0)
+				connected = 1;
+		}
+	}
+	if (!connected) {
+		psin_server->sin6_port = htons(RFC1001_PORT);
+		rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
+					 psin_server, sizeof (struct sockaddr_in6),0);
+		if (rc >= 0) 
+			connected = 1;
+	}
+
+	/* give up here - unless we want to retry on different
+		protocol families some day */
+	if (!connected) {
+		if(orig_port)
+			psin_server->sin6_port = orig_port;
+		cFYI(1,("Error %d connecting to server via ipv6",rc));
+		sock_release(*csocket);
+		*csocket = NULL;
+		return rc;
+	}
+	/* Eventually check for other socket options to change from 
+		the default. sock_setsockopt not used because it expects 
+		user space buffer */
+	(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
+		
+	return rc;
+}
+
+int
+cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+	   char *mount_data, const char *devname)
+{
+	int rc = 0;
+	int xid;
+	int address_type = AF_INET;
+	struct socket *csocket = NULL;
+	struct sockaddr_in sin_server;
+	struct sockaddr_in6 sin_server6;
+	struct smb_vol volume_info;
+	struct cifsSesInfo *pSesInfo = NULL;
+	struct cifsSesInfo *existingCifsSes = NULL;
+	struct cifsTconInfo *tcon = NULL;
+	struct TCP_Server_Info *srvTcp = NULL;
+
+	xid = GetXid();
+
+/* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */
+	
+	memset(&volume_info,0,sizeof(struct smb_vol));
+	if (cifs_parse_mount_options(mount_data, devname, &volume_info)) {
+		if(volume_info.UNC)
+			kfree(volume_info.UNC);
+		if(volume_info.password)
+			kfree(volume_info.password);
+		FreeXid(xid);
+		return -EINVAL;
+	}
+
+	if (volume_info.username) {
+		/* BB fixme parse for domain name here */
+		cFYI(1, ("Username: %s ", volume_info.username));
+
+	} else {
+		cifserror("No username specified ");
+        /* In userspace mount helper we can get user name from alternate
+           locations such as env variables and files on disk */
+		if(volume_info.UNC)
+			kfree(volume_info.UNC);
+		if(volume_info.password)
+			kfree(volume_info.password);
+		FreeXid(xid);
+		return -EINVAL;
+	}
+
+	if (volume_info.UNCip && volume_info.UNC) {
+		rc = cifs_inet_pton(AF_INET, volume_info.UNCip,&sin_server.sin_addr.s_addr);
+
+		if(rc <= 0) {
+			/* not ipv4 address, try ipv6 */
+			rc = cifs_inet_pton(AF_INET6,volume_info.UNCip,&sin_server6.sin6_addr.in6_u); 
+			if(rc > 0)
+				address_type = AF_INET6;
+		} else {
+			address_type = AF_INET;
+		}
+       
+		if(rc <= 0) {
+			/* we failed translating address */
+			if(volume_info.UNC)
+				kfree(volume_info.UNC);
+			if(volume_info.password)
+				kfree(volume_info.password);
+			FreeXid(xid);
+			return -EINVAL;
+		}
+
+		cFYI(1, ("UNC: %s ip: %s", volume_info.UNC, volume_info.UNCip));
+		/* success */
+		rc = 0;
+	} else if (volume_info.UNCip){
+		/* BB using ip addr as server name connect to the DFS root below */
+		cERROR(1,("Connecting to DFS root not implemented yet"));
+		if(volume_info.UNC)
+			kfree(volume_info.UNC);
+		if(volume_info.password)
+			kfree(volume_info.password);
+		FreeXid(xid);
+		return -EINVAL;
+	} else /* which servers DFS root would we conect to */ {
+		cERROR(1,
+		       ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified  "));
+		if(volume_info.UNC)
+			kfree(volume_info.UNC);
+		if(volume_info.password)
+			kfree(volume_info.password);
+		FreeXid(xid);
+		return -EINVAL;
+	}
+
+	/* this is needed for ASCII cp to Unicode converts */
+	if(volume_info.iocharset == NULL) {
+		cifs_sb->local_nls = load_nls_default();
+	/* load_nls_default can not return null */
+	} else {
+		cifs_sb->local_nls = load_nls(volume_info.iocharset);
+		if(cifs_sb->local_nls == NULL) {
+			cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset));
+			if(volume_info.UNC)
+				kfree(volume_info.UNC);
+			if(volume_info.password)
+				kfree(volume_info.password);
+			FreeXid(xid);
+			return -ELIBACC;
+		}
+	}
+
+	if(address_type == AF_INET)
+		existingCifsSes = cifs_find_tcp_session(&sin_server.sin_addr,
+			NULL /* no ipv6 addr */,
+			volume_info.username, &srvTcp);
+	else if(address_type == AF_INET6)
+		existingCifsSes = cifs_find_tcp_session(NULL /* no ipv4 addr */,
+			&sin_server6.sin6_addr,
+			volume_info.username, &srvTcp);
+	else {
+		if(volume_info.UNC)
+			kfree(volume_info.UNC);
+		if(volume_info.password)
+			kfree(volume_info.password);
+		FreeXid(xid);
+		return -EINVAL;
+	}
+
+
+	if (srvTcp) {
+		cFYI(1, ("Existing tcp session with server found "));                
+	} else {	/* create socket */
+		if(volume_info.port)
+			sin_server.sin_port = htons(volume_info.port);
+		else
+			sin_server.sin_port = 0;
+		rc = ipv4_connect(&sin_server,&csocket,volume_info.source_rfc1001_name);
+		if (rc < 0) {
+			cERROR(1,
+			       ("Error connecting to IPv4 socket. Aborting operation"));
+			if(csocket != NULL)
+				sock_release(csocket);
+			if(volume_info.UNC)
+				kfree(volume_info.UNC);
+			if(volume_info.password)
+				kfree(volume_info.password);
+			FreeXid(xid);
+			return rc;
+		}
+
+		srvTcp = kmalloc(sizeof (struct TCP_Server_Info), GFP_KERNEL);
+		if (srvTcp == NULL) {
+			rc = -ENOMEM;
+			sock_release(csocket);
+			if(volume_info.UNC)
+				kfree(volume_info.UNC);
+			if(volume_info.password)
+				kfree(volume_info.password);
+			FreeXid(xid);
+			return rc;
+		} else {
+			memset(srvTcp, 0, sizeof (struct TCP_Server_Info));
+			memcpy(&srvTcp->addr.sockAddr, &sin_server, sizeof (struct sockaddr_in));
+			atomic_set(&srvTcp->inFlight,0);
+			/* BB Add code for ipv6 case too */
+			srvTcp->ssocket = csocket;
+			srvTcp->protocolType = IPV4;
+			init_waitqueue_head(&srvTcp->response_q);
+			init_waitqueue_head(&srvTcp->request_q);
+			INIT_LIST_HEAD(&srvTcp->pending_mid_q);
+			/* at this point we are the only ones with the pointer
+			to the struct since the kernel thread not created yet
+			so no need to spinlock this init of tcpStatus */
+			srvTcp->tcpStatus = CifsNew;
+			init_MUTEX(&srvTcp->tcpSem);
+			rc = (int)kernel_thread((void *)(void *)cifs_demultiplex_thread, srvTcp,
+				      CLONE_FS | CLONE_FILES | CLONE_VM);
+			if(rc < 0) {
+				rc = -ENOMEM;
+				sock_release(csocket);
+				if(volume_info.UNC)
+					kfree(volume_info.UNC);
+				if(volume_info.password)
+					kfree(volume_info.password);
+				FreeXid(xid);
+				return rc;
+			} else
+				rc = 0;
+			memcpy(srvTcp->workstation_RFC1001_name, volume_info.source_rfc1001_name,16);
+		}
+	}
+
+	if (existingCifsSes) {
+		pSesInfo = existingCifsSes;
+		cFYI(1, ("Existing smb sess found "));
+		if(volume_info.password)
+			kfree(volume_info.password);
+		/* volume_info.UNC freed at end of function */
+	} else if (!rc) {
+		cFYI(1, ("Existing smb sess not found "));
+		pSesInfo = sesInfoAlloc();
+		if (pSesInfo == NULL)
+			rc = -ENOMEM;
+		else {
+			pSesInfo->server = srvTcp;
+			sprintf(pSesInfo->serverName, "%u.%u.%u.%u",
+				NIPQUAD(sin_server.sin_addr.s_addr));
+		}
+
+		if (!rc){
+			/* volume_info.password freed at unmount */   
+			if (volume_info.password)
+				pSesInfo->password = volume_info.password;
+			if (volume_info.username)
+				strncpy(pSesInfo->userName,
+					volume_info.username,MAX_USERNAME_SIZE);
+			if (volume_info.domainname)
+				strncpy(pSesInfo->domainName,
+					volume_info.domainname,MAX_USERNAME_SIZE);
+			pSesInfo->linux_uid = volume_info.linux_uid;
+			down(&pSesInfo->sesSem);
+			rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
+			up(&pSesInfo->sesSem);
+			if(!rc)
+				atomic_inc(&srvTcp->socketUseCount);
+		} else
+			if(volume_info.password)
+				kfree(volume_info.password);
+	}
+    
+	/* search for existing tcon to this server share */
+	if (!rc) {
+		if((volume_info.rsize) && (volume_info.rsize <= CIFSMaxBufSize))
+			cifs_sb->rsize = volume_info.rsize;
+		else
+			cifs_sb->rsize = srvTcp->maxBuf - MAX_CIFS_HDR_SIZE; /* default */
+		if((volume_info.wsize) && (volume_info.wsize <= CIFSMaxBufSize))
+			cifs_sb->wsize = volume_info.wsize;
+		else
+			cifs_sb->wsize = CIFSMaxBufSize; /* default */
+		if(cifs_sb->rsize < PAGE_CACHE_SIZE) {
+			cifs_sb->rsize = PAGE_CACHE_SIZE;
+			cERROR(1,("Attempt to set readsize for mount to less than one page (4096)"));
+		}
+		cifs_sb->mnt_uid = volume_info.linux_uid;
+		cifs_sb->mnt_gid = volume_info.linux_gid;
+		cifs_sb->mnt_file_mode = volume_info.file_mode;
+		cifs_sb->mnt_dir_mode = volume_info.dir_mode;
+		cFYI(1,("file mode: 0x%x  dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
+
+		if(volume_info.noperm)
+			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
+		if(volume_info.setuids)
+			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+		if(volume_info.server_ino)
+			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
+		if(volume_info.no_xattr)
+			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
+		if(volume_info.direct_io) {
+			cERROR(1,("mounting share using direct i/o"));
+			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
+		}
+
+		tcon =
+		    find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
+			     volume_info.username);
+		if (tcon) {
+			cFYI(1, ("Found match on UNC path "));
+			/* we can have only one retry value for a connection
+			   to a share so for resources mounted more than once
+			   to the same server share the last value passed in 
+			   for the retry flag is used */
+			tcon->retry = volume_info.retry;
+		} else {
+			tcon = tconInfoAlloc();
+			if (tcon == NULL)
+				rc = -ENOMEM;
+			else {
+				/* check for null share name ie connect to dfs root */
+
+				/* BB check if this works for exactly length three strings */
+				if ((strchr(volume_info.UNC + 3, '\\') == NULL)
+				    && (strchr(volume_info.UNC + 3, '/') ==
+					NULL)) {
+					rc = connect_to_dfs_path(xid,
+								 pSesInfo,
+								 "",
+								 cifs_sb->
+								 local_nls);
+					if(volume_info.UNC)
+						kfree(volume_info.UNC);
+					FreeXid(xid);
+					return -ENODEV;
+				} else {
+					rc = CIFSTCon(xid, pSesInfo, 
+						volume_info.UNC,
+						tcon, cifs_sb->local_nls);
+					cFYI(1, ("CIFS Tcon rc = %d", rc));
+				}
+				if (!rc) {
+					atomic_inc(&pSesInfo->inUse);
+					tcon->retry = volume_info.retry;
+				}
+			}
+		}
+	}
+	if(pSesInfo) {
+		if (pSesInfo->capabilities & CAP_LARGE_FILES) {
+			sb->s_maxbytes = (u64) 1 << 63;
+		} else
+			sb->s_maxbytes = (u64) 1 << 31;	/* 2 GB */
+	}
+
+	sb->s_time_gran = 100;
+
+/* on error free sesinfo and tcon struct if needed */
+	if (rc) {
+		/* if session setup failed, use count is zero but
+		we still need to free cifsd thread */
+		if(atomic_read(&srvTcp->socketUseCount) == 0) {
+			spin_lock(&GlobalMid_Lock);
+			srvTcp->tcpStatus = CifsExiting;
+			spin_unlock(&GlobalMid_Lock);
+			if(srvTcp->tsk)
+				send_sig(SIGKILL,srvTcp->tsk,1);
+		}
+		 /* If find_unc succeeded then rc == 0 so we can not end */
+		if (tcon)  /* up accidently freeing someone elses tcon struct */
+			tconInfoFree(tcon);
+		if (existingCifsSes == NULL) {
+			if (pSesInfo) {
+				if ((pSesInfo->server) && 
+				    (pSesInfo->status == CifsGood)) {
+					int temp_rc;
+					temp_rc = CIFSSMBLogoff(xid, pSesInfo);
+					/* if the socketUseCount is now zero */
+					if((temp_rc == -ESHUTDOWN) &&
+					   (pSesInfo->server->tsk))
+						send_sig(SIGKILL,pSesInfo->server->tsk,1);
+				} else
+					cFYI(1, ("No session or bad tcon"));
+				sesInfoFree(pSesInfo);
+				/* pSesInfo = NULL; */
+			}
+		}
+	} else {
+		atomic_inc(&tcon->useCount);
+		cifs_sb->tcon = tcon;
+		tcon->ses = pSesInfo;
+
+		/* do not care if following two calls succeed - informational only */
+		CIFSSMBQFSDeviceInfo(xid, tcon, cifs_sb->local_nls);
+		CIFSSMBQFSAttributeInfo(xid, tcon, cifs_sb->local_nls);
+		if (tcon->ses->capabilities & CAP_UNIX) {
+			if(!CIFSSMBQFSUnixInfo(xid, tcon, cifs_sb->local_nls)) {
+				if(!volume_info.no_psx_acl) {
+					if(CIFS_UNIX_POSIX_ACL_CAP & 
+					   le64_to_cpu(tcon->fsUnixInfo.Capability))
+						cFYI(1,("server negotiated posix acl support"));
+						sb->s_flags |= MS_POSIXACL;
+				}
+			}
+		}
+	}
+
+	/* volume_info.password is freed above when existing session found
+	(in which case it is not needed anymore) but when new sesion is created
+	the password ptr is put in the new session structure (in which case the
+	password will be freed at unmount time) */
+	if(volume_info.UNC)
+		kfree(volume_info.UNC);
+	FreeXid(xid);
+	return rc;
+}
+
+static int
+CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
+	      char session_key[CIFS_SESSION_KEY_SIZE],
+	      const struct nls_table *nls_codepage)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response;
+	SESSION_SETUP_ANDX *pSMB;
+	SESSION_SETUP_ANDX *pSMBr;
+	char *bcc_ptr;
+	char *user;
+	char *domain;
+	int rc = 0;
+	int remaining_words = 0;
+	int bytes_returned = 0;
+	int len;
+	__u32 capabilities;
+	__u16 count;
+
+	cFYI(1, ("In sesssetup "));
+	if(ses == NULL)
+		return -EINVAL;
+	user = ses->userName;
+	domain = ses->domainName;
+	smb_buffer = cifs_buf_get();
+	if (smb_buffer == NULL) {
+		return -ENOMEM;
+	}
+	smb_buffer_response = smb_buffer;
+	pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
+
+	/* send SMBsessionSetup here */
+	header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
+			NULL /* no tCon exists yet */ , 13 /* wct */ );
+
+	pSMB->req_no_secext.AndXCommand = 0xFF;
+	pSMB->req_no_secext.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+	pSMB->req_no_secext.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+		CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+	if (ses->capabilities & CAP_UNICODE) {
+		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+		capabilities |= CAP_UNICODE;
+	}
+	if (ses->capabilities & CAP_STATUS32) {
+		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+		capabilities |= CAP_STATUS32;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		smb_buffer->Flags2 |= SMBFLG2_DFS;
+		capabilities |= CAP_DFS;
+	}
+	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+
+	pSMB->req_no_secext.CaseInsensitivePasswordLength = 
+		cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+
+	pSMB->req_no_secext.CaseSensitivePasswordLength =
+	    cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+	bcc_ptr = pByteArea(smb_buffer);
+	memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
+	bcc_ptr += CIFS_SESSION_KEY_SIZE;
+	memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
+	bcc_ptr += CIFS_SESSION_KEY_SIZE;
+
+	if (ses->capabilities & CAP_UNICODE) {
+		if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		if(user == NULL)
+			bytes_returned = 0; /* skill null user */
+	        else
+			bytes_returned =
+			        cifs_strtoUCS((wchar_t *) bcc_ptr, user, 100,
+					nls_codepage);
+		/* convert number of 16 bit words to bytes */
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;	/* trailing null */
+		if (domain == NULL)
+			bytes_returned =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr,
+					  "CIFS_LINUX_DOM", 32, nls_codepage);
+		else
+			bytes_returned =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr, domain, 64,
+					  nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, "Linux version ",
+				  32, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, system_utsname.release,
+				  32, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, CIFS_NETWORK_OPSYS,
+				  64, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+	} else {
+		if(user != NULL) {                
+		    strncpy(bcc_ptr, user, 200);
+		    bcc_ptr += strnlen(user, 200);
+		}
+		*bcc_ptr = 0;
+		bcc_ptr++;
+		if (domain == NULL) {
+			strcpy(bcc_ptr, "CIFS_LINUX_DOM");
+			bcc_ptr += strlen("CIFS_LINUX_DOM") + 1;
+		} else {
+			strncpy(bcc_ptr, domain, 64);
+			bcc_ptr += strnlen(domain, 64);
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		strcpy(bcc_ptr, "Linux version ");
+		bcc_ptr += strlen("Linux version ");
+		strcpy(bcc_ptr, system_utsname.release);
+		bcc_ptr += strlen(system_utsname.release) + 1;
+		strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+		bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+	}
+	count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
+	smb_buffer->smb_buf_length += count;
+	pSMB->req_no_secext.ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+			 &bytes_returned, 1);
+	if (rc) {
+/* rc = map_smb_to_linux_error(smb_buffer_response); now done in SendReceive */
+	} else if ((smb_buffer_response->WordCount == 3)
+		   || (smb_buffer_response->WordCount == 4)) {
+		__u16 action = le16_to_cpu(pSMBr->resp.Action);
+		__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
+		if (action & GUEST_LOGIN)
+			cFYI(1, (" Guest login"));	/* do we want to mark SesInfo struct ? */
+		ses->Suid = smb_buffer_response->Uid;	/* UID left in wire format (le) */
+		cFYI(1, ("UID = %d ", ses->Suid));
+         /* response can have either 3 or 4 word count - Samba sends 3 */
+		bcc_ptr = pByteArea(smb_buffer_response);	
+		if ((pSMBr->resp.hdr.WordCount == 3)
+		    || ((pSMBr->resp.hdr.WordCount == 4)
+			&& (blob_len < pSMBr->resp.ByteCount))) {
+			if (pSMBr->resp.hdr.WordCount == 4)
+				bcc_ptr += blob_len;
+
+			if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
+				if ((long) (bcc_ptr) % 2) {
+					remaining_words =
+					    (BCC(smb_buffer_response) - 1) /2;
+					bcc_ptr++;	/* Unicode strings must be word aligned */
+				} else {
+					remaining_words =
+						BCC(smb_buffer_response) / 2;
+				}
+				len =
+				    UniStrnlen((wchar_t *) bcc_ptr,
+					       remaining_words - 1);
+/* We look for obvious messed up bcc or strings in response so we do not go off
+   the end since (at least) WIN2K and Windows XP have a major bug in not null
+   terminating last Unicode string in response  */
+				ses->serverOS = cifs_kcalloc(2 * (len + 1), GFP_KERNEL);
+				cifs_strfromUCS_le(ses->serverOS,
+					   (wchar_t *)bcc_ptr, len,nls_codepage);
+				bcc_ptr += 2 * (len + 1);
+				remaining_words -= len + 1;
+				ses->serverOS[2 * len] = 0;
+				ses->serverOS[1 + (2 * len)] = 0;
+				if (remaining_words > 0) {
+					len = UniStrnlen((wchar_t *)bcc_ptr,
+							 remaining_words-1);
+					ses->serverNOS =cifs_kcalloc(2 * (len + 1),GFP_KERNEL);
+					cifs_strfromUCS_le(ses->serverNOS,
+							   (wchar_t *)bcc_ptr,len,nls_codepage);
+					bcc_ptr += 2 * (len + 1);
+					ses->serverNOS[2 * len] = 0;
+					ses->serverNOS[1 + (2 * len)] = 0;
+					if(strncmp(ses->serverNOS,
+						"NT LAN Manager 4",16) == 0) {
+						cFYI(1,("NT4 server"));
+						ses->flags |= CIFS_SES_NT4;
+					}
+					remaining_words -= len + 1;
+					if (remaining_words > 0) {
+						len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
+          /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
+						ses->serverDomain =
+						    cifs_kcalloc(2*(len+1),GFP_KERNEL);
+						cifs_strfromUCS_le(ses->serverDomain,
+						     (wchar_t *)bcc_ptr,len,nls_codepage);
+						bcc_ptr += 2 * (len + 1);
+						ses->serverDomain[2*len] = 0;
+						ses->serverDomain[1+(2*len)] = 0;
+					} /* else no more room so create dummy domain string */
+					else
+						ses->serverDomain =
+						    cifs_kcalloc(2,
+							    GFP_KERNEL);
+				} else {	/* no room so create dummy domain and NOS string */
+					ses->serverDomain =
+					    cifs_kcalloc(2, GFP_KERNEL);
+					ses->serverNOS =
+					    cifs_kcalloc(2, GFP_KERNEL);
+				}
+			} else {	/* ASCII */
+				len = strnlen(bcc_ptr, 1024);
+				if (((long) bcc_ptr + len) - (long)
+				    pByteArea(smb_buffer_response)
+					    <= BCC(smb_buffer_response)) {
+					ses->serverOS = cifs_kcalloc(len + 1,GFP_KERNEL);
+					strncpy(ses->serverOS,bcc_ptr, len);
+
+					bcc_ptr += len;
+					bcc_ptr[0] = 0;	/* null terminate the string */
+					bcc_ptr++;
+
+					len = strnlen(bcc_ptr, 1024);
+					ses->serverNOS = cifs_kcalloc(len + 1,GFP_KERNEL);
+					strncpy(ses->serverNOS, bcc_ptr, len);
+					bcc_ptr += len;
+					bcc_ptr[0] = 0;
+					bcc_ptr++;
+
+					len = strnlen(bcc_ptr, 1024);
+					ses->serverDomain = cifs_kcalloc(len + 1,GFP_KERNEL);
+					strncpy(ses->serverDomain, bcc_ptr, len);
+					bcc_ptr += len;
+					bcc_ptr[0] = 0;
+					bcc_ptr++;
+				} else
+					cFYI(1,
+					     ("Variable field of length %d extends beyond end of smb ",
+					      len));
+			}
+		} else {
+			cERROR(1,
+			       (" Security Blob Length extends beyond end of SMB"));
+		}
+	} else {
+		cERROR(1,
+		       (" Invalid Word count %d: ",
+			smb_buffer_response->WordCount));
+		rc = -EIO;
+	}
+	
+	if (smb_buffer)
+		cifs_buf_release(smb_buffer);
+
+	return rc;
+}
+
+static int
+CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
+		char *SecurityBlob,int SecurityBlobLength,
+		const struct nls_table *nls_codepage)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response;
+	SESSION_SETUP_ANDX *pSMB;
+	SESSION_SETUP_ANDX *pSMBr;
+	char *bcc_ptr;
+	char *user;
+	char *domain;
+	int rc = 0;
+	int remaining_words = 0;
+	int bytes_returned = 0;
+	int len;
+	__u32 capabilities;
+	__u16 count;
+
+	cFYI(1, ("In spnego sesssetup "));
+	if(ses == NULL)
+		return -EINVAL;
+	user = ses->userName;
+	domain = ses->domainName;
+
+	smb_buffer = cifs_buf_get();
+	if (smb_buffer == NULL) {
+		return -ENOMEM;
+	}
+	smb_buffer_response = smb_buffer;
+	pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
+
+	/* send SMBsessionSetup here */
+	header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
+			NULL /* no tCon exists yet */ , 12 /* wct */ );
+	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+	pSMB->req.AndXCommand = 0xFF;
+	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+	    CAP_EXTENDED_SECURITY;
+	if (ses->capabilities & CAP_UNICODE) {
+		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+		capabilities |= CAP_UNICODE;
+	}
+	if (ses->capabilities & CAP_STATUS32) {
+		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+		capabilities |= CAP_STATUS32;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		smb_buffer->Flags2 |= SMBFLG2_DFS;
+		capabilities |= CAP_DFS;
+	}
+	pSMB->req.Capabilities = cpu_to_le32(capabilities);
+
+	pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
+	bcc_ptr = pByteArea(smb_buffer);
+	memcpy(bcc_ptr, SecurityBlob, SecurityBlobLength);
+	bcc_ptr += SecurityBlobLength;
+
+	if (ses->capabilities & CAP_UNICODE) {
+		if ((long) bcc_ptr % 2) {	/* must be word aligned for Unicode strings */
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, user, 100, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;	/* convert num of 16 bit words to bytes */
+		bcc_ptr += 2;	/* trailing null */
+		if (domain == NULL)
+			bytes_returned =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr,
+					  "CIFS_LINUX_DOM", 32, nls_codepage);
+		else
+			bytes_returned =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr, domain, 64,
+					  nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, "Linux version ",
+				  32, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, system_utsname.release, 32,
+				  nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, CIFS_NETWORK_OPSYS,
+				  64, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;
+	} else {
+		strncpy(bcc_ptr, user, 200);
+		bcc_ptr += strnlen(user, 200);
+		*bcc_ptr = 0;
+		bcc_ptr++;
+		if (domain == NULL) {
+			strcpy(bcc_ptr, "CIFS_LINUX_DOM");
+			bcc_ptr += strlen("CIFS_LINUX_DOM") + 1;
+		} else {
+			strncpy(bcc_ptr, domain, 64);
+			bcc_ptr += strnlen(domain, 64);
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		strcpy(bcc_ptr, "Linux version ");
+		bcc_ptr += strlen("Linux version ");
+		strcpy(bcc_ptr, system_utsname.release);
+		bcc_ptr += strlen(system_utsname.release) + 1;
+		strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+		bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+	}
+	count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
+	smb_buffer->smb_buf_length += count;
+	pSMB->req.ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+			 &bytes_returned, 1);
+	if (rc) {
+/*    rc = map_smb_to_linux_error(smb_buffer_response);  *//* done in SendReceive now */
+	} else if ((smb_buffer_response->WordCount == 3)
+		   || (smb_buffer_response->WordCount == 4)) {
+		__u16 action = le16_to_cpu(pSMBr->resp.Action);
+		__u16 blob_len =
+		    le16_to_cpu(pSMBr->resp.SecurityBlobLength);
+		if (action & GUEST_LOGIN)
+			cFYI(1, (" Guest login"));	/* BB do we want to set anything in SesInfo struct ? */
+		if (ses) {
+			ses->Suid = smb_buffer_response->Uid;	/* UID left in wire format (le) */
+			cFYI(1, ("UID = %d ", ses->Suid));
+			bcc_ptr = pByteArea(smb_buffer_response);	/* response can have either 3 or 4 word count - Samba sends 3 */
+
+			/* BB Fix below to make endian neutral !! */
+
+			if ((pSMBr->resp.hdr.WordCount == 3)
+			    || ((pSMBr->resp.hdr.WordCount == 4)
+				&& (blob_len <
+				    pSMBr->resp.ByteCount))) {
+				if (pSMBr->resp.hdr.WordCount == 4) {
+					bcc_ptr +=
+					    blob_len;
+					cFYI(1,
+					     ("Security Blob Length %d ",
+					      blob_len));
+				}
+
+				if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
+					if ((long) (bcc_ptr) % 2) {
+						remaining_words =
+						    (BCC(smb_buffer_response)
+						     - 1) / 2;
+						bcc_ptr++;	/* Unicode strings must be word aligned */
+					} else {
+						remaining_words =
+						    BCC
+						    (smb_buffer_response) / 2;
+					}
+					len =
+					    UniStrnlen((wchar_t *) bcc_ptr,
+						       remaining_words - 1);
+/* We look for obvious messed up bcc or strings in response so we do not go off
+   the end since (at least) WIN2K and Windows XP have a major bug in not null
+   terminating last Unicode string in response  */
+					ses->serverOS =
+					    cifs_kcalloc(2 * (len + 1), GFP_KERNEL);
+					cifs_strfromUCS_le(ses->serverOS,
+							   (wchar_t *)
+							   bcc_ptr, len,
+							   nls_codepage);
+					bcc_ptr += 2 * (len + 1);
+					remaining_words -= len + 1;
+					ses->serverOS[2 * len] = 0;
+					ses->serverOS[1 + (2 * len)] = 0;
+					if (remaining_words > 0) {
+						len = UniStrnlen((wchar_t *)bcc_ptr,
+								 remaining_words
+								 - 1);
+						ses->serverNOS =
+						    cifs_kcalloc(2 * (len + 1),
+							    GFP_KERNEL);
+						cifs_strfromUCS_le(ses->serverNOS,
+								   (wchar_t *)bcc_ptr,
+								   len,
+								   nls_codepage);
+						bcc_ptr += 2 * (len + 1);
+						ses->serverNOS[2 * len] = 0;
+						ses->serverNOS[1 + (2 * len)] = 0;
+						remaining_words -= len + 1;
+						if (remaining_words > 0) {
+							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
+                            /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
+							ses->serverDomain = cifs_kcalloc(2*(len+1),GFP_KERNEL);
+							cifs_strfromUCS_le(ses->serverDomain,
+							     (wchar_t *)bcc_ptr, 
+                                 len,
+							     nls_codepage);
+							bcc_ptr += 2*(len+1);
+							ses->serverDomain[2*len] = 0;
+							ses->serverDomain[1+(2*len)] = 0;
+						} /* else no more room so create dummy domain string */
+						else
+							ses->serverDomain =
+							    cifs_kcalloc(2,GFP_KERNEL);
+					} else {	/* no room so create dummy domain and NOS string */
+						ses->serverDomain = cifs_kcalloc(2, GFP_KERNEL);
+						ses->serverNOS = cifs_kcalloc(2, GFP_KERNEL);
+					}
+				} else {	/* ASCII */
+
+					len = strnlen(bcc_ptr, 1024);
+					if (((long) bcc_ptr + len) - (long)
+					    pByteArea(smb_buffer_response)
+					    <= BCC(smb_buffer_response)) {
+						ses->serverOS = cifs_kcalloc(len + 1, GFP_KERNEL);
+						strncpy(ses->serverOS, bcc_ptr, len);
+
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;	/* null terminate the string */
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverNOS = cifs_kcalloc(len + 1,GFP_KERNEL);
+						strncpy(ses->serverNOS, bcc_ptr, len);
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverDomain = cifs_kcalloc(len + 1, GFP_KERNEL);
+						strncpy(ses->serverDomain, bcc_ptr, len);
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+					} else
+						cFYI(1,
+						     ("Variable field of length %d extends beyond end of smb ",
+						      len));
+				}
+			} else {
+				cERROR(1,
+				       (" Security Blob Length extends beyond end of SMB"));
+			}
+		} else {
+			cERROR(1, ("No session structure passed in."));
+		}
+	} else {
+		cERROR(1,
+		       (" Invalid Word count %d: ",
+			smb_buffer_response->WordCount));
+		rc = -EIO;
+	}
+
+	if (smb_buffer)
+		cifs_buf_release(smb_buffer);
+
+	return rc;
+}
+
+static int
+CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
+			      struct cifsSesInfo *ses, int * pNTLMv2_flag,
+			      const struct nls_table *nls_codepage)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response;
+	SESSION_SETUP_ANDX *pSMB;
+	SESSION_SETUP_ANDX *pSMBr;
+	char *bcc_ptr;
+	char *domain;
+	int rc = 0;
+	int remaining_words = 0;
+	int bytes_returned = 0;
+	int len;
+	int SecurityBlobLength = sizeof (NEGOTIATE_MESSAGE);
+	PNEGOTIATE_MESSAGE SecurityBlob;
+	PCHALLENGE_MESSAGE SecurityBlob2;
+	__u32 negotiate_flags, capabilities;
+	__u16 count;
+
+	cFYI(1, ("In NTLMSSP sesssetup (negotiate) "));
+	if(ses == NULL)
+		return -EINVAL;
+	domain = ses->domainName;
+	*pNTLMv2_flag = FALSE;
+	smb_buffer = cifs_buf_get();
+	if (smb_buffer == NULL) {
+		return -ENOMEM;
+	}
+	smb_buffer_response = smb_buffer;
+	pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
+	pSMBr = (SESSION_SETUP_ANDX *) smb_buffer_response;
+
+	/* send SMBsessionSetup here */
+	header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
+			NULL /* no tCon exists yet */ , 12 /* wct */ );
+	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+	pSMB->req.hdr.Flags |= (SMBFLG_CASELESS | SMBFLG_CANONICAL_PATH_FORMAT);
+
+	pSMB->req.AndXCommand = 0xFF;
+	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+	    CAP_EXTENDED_SECURITY;
+	if (ses->capabilities & CAP_UNICODE) {
+		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+		capabilities |= CAP_UNICODE;
+	}
+	if (ses->capabilities & CAP_STATUS32) {
+		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+		capabilities |= CAP_STATUS32;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		smb_buffer->Flags2 |= SMBFLG2_DFS;
+		capabilities |= CAP_DFS;
+	}
+	pSMB->req.Capabilities = cpu_to_le32(capabilities);
+
+	bcc_ptr = (char *) &pSMB->req.SecurityBlob;
+	SecurityBlob = (PNEGOTIATE_MESSAGE) bcc_ptr;
+	strncpy(SecurityBlob->Signature, NTLMSSP_SIGNATURE, 8);
+	SecurityBlob->MessageType = NtLmNegotiate;
+	negotiate_flags =
+	    NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM |
+	    NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM | 0x80000000 |
+	    /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
+	if(sign_CIFS_PDUs)
+		negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
+	if(ntlmv2_support)
+		negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
+	/* setup pointers to domain name and workstation name */
+	bcc_ptr += SecurityBlobLength;
+
+	SecurityBlob->WorkstationName.Buffer = 0;
+	SecurityBlob->WorkstationName.Length = 0;
+	SecurityBlob->WorkstationName.MaximumLength = 0;
+
+	if (domain == NULL) {
+		SecurityBlob->DomainName.Buffer = 0;
+		SecurityBlob->DomainName.Length = 0;
+		SecurityBlob->DomainName.MaximumLength = 0;
+	} else {
+		__u16 len;
+		negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
+		strncpy(bcc_ptr, domain, 63);
+		len = strnlen(domain, 64);
+		SecurityBlob->DomainName.MaximumLength =
+		    cpu_to_le16(len);
+		SecurityBlob->DomainName.Buffer =
+		    cpu_to_le32((long) &SecurityBlob->
+				DomainString -
+				(long) &SecurityBlob->Signature);
+		bcc_ptr += len;
+		SecurityBlobLength += len;
+		SecurityBlob->DomainName.Length =
+		    cpu_to_le16(len);
+	}
+	if (ses->capabilities & CAP_UNICODE) {
+		if ((long) bcc_ptr % 2) {
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, "Linux version ",
+				  32, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, system_utsname.release, 32,
+				  nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;	/* null terminate Linux version */
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, CIFS_NETWORK_OPSYS,
+				  64, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		*(bcc_ptr + 1) = 0;
+		*(bcc_ptr + 2) = 0;
+		bcc_ptr += 2;	/* null terminate network opsys string */
+		*(bcc_ptr + 1) = 0;
+		*(bcc_ptr + 2) = 0;
+		bcc_ptr += 2;	/* null domain */
+	} else {		/* ASCII */
+		strcpy(bcc_ptr, "Linux version ");
+		bcc_ptr += strlen("Linux version ");
+		strcpy(bcc_ptr, system_utsname.release);
+		bcc_ptr += strlen(system_utsname.release) + 1;
+		strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+		bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+		bcc_ptr++;	/* empty domain field */
+		*bcc_ptr = 0;
+	}
+	SecurityBlob->NegotiateFlags = cpu_to_le32(negotiate_flags);
+	pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
+	count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
+	smb_buffer->smb_buf_length += count;
+	pSMB->req.ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+			 &bytes_returned, 1);
+
+	if (smb_buffer_response->Status.CifsError ==
+	    cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
+		rc = 0;
+
+	if (rc) {
+/*    rc = map_smb_to_linux_error(smb_buffer_response);  *//* done in SendReceive now */
+	} else if ((smb_buffer_response->WordCount == 3)
+		   || (smb_buffer_response->WordCount == 4)) {
+		__u16 action = le16_to_cpu(pSMBr->resp.Action);
+		__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
+
+		if (action & GUEST_LOGIN)
+			cFYI(1, (" Guest login"));	
+        /* Do we want to set anything in SesInfo struct when guest login? */
+
+		bcc_ptr = pByteArea(smb_buffer_response);	
+        /* response can have either 3 or 4 word count - Samba sends 3 */
+
+		SecurityBlob2 = (PCHALLENGE_MESSAGE) bcc_ptr;
+		if (SecurityBlob2->MessageType != NtLmChallenge) {
+			cFYI(1,
+			     ("Unexpected NTLMSSP message type received %d",
+			      SecurityBlob2->MessageType));
+		} else if (ses) {
+			ses->Suid = smb_buffer_response->Uid; /* UID left in le format */ 
+			cFYI(1, ("UID = %d ", ses->Suid));
+			if ((pSMBr->resp.hdr.WordCount == 3)
+			    || ((pSMBr->resp.hdr.WordCount == 4)
+				&& (blob_len <
+				    pSMBr->resp.ByteCount))) {
+
+				if (pSMBr->resp.hdr.WordCount == 4) {
+					bcc_ptr += blob_len;
+					cFYI(1,
+					     ("Security Blob Length %d ",
+					      blob_len));
+				}
+
+				cFYI(1, ("NTLMSSP Challenge rcvd "));
+
+				memcpy(ses->server->cryptKey,
+				       SecurityBlob2->Challenge,
+				       CIFS_CRYPTO_KEY_SIZE);
+				if(SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
+					*pNTLMv2_flag = TRUE;
+
+				if((SecurityBlob2->NegotiateFlags & 
+					cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN)) 
+					|| (sign_CIFS_PDUs > 1))
+						ses->server->secMode |= 
+							SECMODE_SIGN_REQUIRED;	
+				if ((SecurityBlob2->NegotiateFlags & 
+					cpu_to_le32(NTLMSSP_NEGOTIATE_SIGN)) && (sign_CIFS_PDUs))
+						ses->server->secMode |= 
+							SECMODE_SIGN_ENABLED;
+
+				if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
+					if ((long) (bcc_ptr) % 2) {
+						remaining_words =
+						    (BCC(smb_buffer_response)
+						     - 1) / 2;
+						bcc_ptr++;	/* Unicode strings must be word aligned */
+					} else {
+						remaining_words =
+						    BCC
+						    (smb_buffer_response) / 2;
+					}
+					len =
+					    UniStrnlen((wchar_t *) bcc_ptr,
+						       remaining_words - 1);
+/* We look for obvious messed up bcc or strings in response so we do not go off
+   the end since (at least) WIN2K and Windows XP have a major bug in not null
+   terminating last Unicode string in response  */
+					ses->serverOS =
+					    cifs_kcalloc(2 * (len + 1), GFP_KERNEL);
+					cifs_strfromUCS_le(ses->serverOS,
+							   (wchar_t *)
+							   bcc_ptr, len,
+							   nls_codepage);
+					bcc_ptr += 2 * (len + 1);
+					remaining_words -= len + 1;
+					ses->serverOS[2 * len] = 0;
+					ses->serverOS[1 + (2 * len)] = 0;
+					if (remaining_words > 0) {
+						len = UniStrnlen((wchar_t *)
+								 bcc_ptr,
+								 remaining_words
+								 - 1);
+						ses->serverNOS =
+						    cifs_kcalloc(2 * (len + 1),
+							    GFP_KERNEL);
+						cifs_strfromUCS_le(ses->
+								   serverNOS,
+								   (wchar_t *)
+								   bcc_ptr,
+								   len,
+								   nls_codepage);
+						bcc_ptr += 2 * (len + 1);
+						ses->serverNOS[2 * len] = 0;
+						ses->serverNOS[1 +
+							       (2 * len)] = 0;
+						remaining_words -= len + 1;
+						if (remaining_words > 0) {
+							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
+           /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
+							ses->serverDomain =
+							    cifs_kcalloc(2 *
+								    (len +
+								     1),
+								    GFP_KERNEL);
+							cifs_strfromUCS_le
+							    (ses->
+							     serverDomain,
+							     (wchar_t *)
+							     bcc_ptr, len,
+							     nls_codepage);
+							bcc_ptr +=
+							    2 * (len + 1);
+							ses->
+							    serverDomain[2
+									 * len]
+							    = 0;
+							ses->
+							    serverDomain[1
+									 +
+									 (2
+									  *
+									  len)]
+							    = 0;
+						} /* else no more room so create dummy domain string */
+						else
+							ses->serverDomain =
+							    cifs_kcalloc(2,
+								    GFP_KERNEL);
+					} else {	/* no room so create dummy domain and NOS string */
+						ses->serverDomain =
+						    cifs_kcalloc(2, GFP_KERNEL);
+						ses->serverNOS =
+						    cifs_kcalloc(2, GFP_KERNEL);
+					}
+				} else {	/* ASCII */
+					len = strnlen(bcc_ptr, 1024);
+					if (((long) bcc_ptr + len) - (long)
+					    pByteArea(smb_buffer_response)
+					    <= BCC(smb_buffer_response)) {
+						ses->serverOS =
+						    cifs_kcalloc(len + 1,
+							    GFP_KERNEL);
+						strncpy(ses->serverOS,
+							bcc_ptr, len);
+
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;	/* null terminate string */
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverNOS =
+						    cifs_kcalloc(len + 1,
+							    GFP_KERNEL);
+						strncpy(ses->serverNOS, bcc_ptr, len);
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverDomain =
+						    cifs_kcalloc(len + 1,
+							    GFP_KERNEL);
+						strncpy(ses->serverDomain, bcc_ptr, len);	
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+					} else
+						cFYI(1,
+						     ("Variable field of length %d extends beyond end of smb ",
+						      len));
+				}
+			} else {
+				cERROR(1,
+				       (" Security Blob Length extends beyond end of SMB"));
+			}
+		} else {
+			cERROR(1, ("No session structure passed in."));
+		}
+	} else {
+		cERROR(1,
+		       (" Invalid Word count %d: ",
+			smb_buffer_response->WordCount));
+		rc = -EIO;
+	}
+
+	if (smb_buffer)
+		cifs_buf_release(smb_buffer);
+
+	return rc;
+}
+static int
+CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
+		char *ntlm_session_key, int ntlmv2_flag,
+		const struct nls_table *nls_codepage)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response;
+	SESSION_SETUP_ANDX *pSMB;
+	SESSION_SETUP_ANDX *pSMBr;
+	char *bcc_ptr;
+	char *user;
+	char *domain;
+	int rc = 0;
+	int remaining_words = 0;
+	int bytes_returned = 0;
+	int len;
+	int SecurityBlobLength = sizeof (AUTHENTICATE_MESSAGE);
+	PAUTHENTICATE_MESSAGE SecurityBlob;
+	__u32 negotiate_flags, capabilities;
+	__u16 count;
+
+	cFYI(1, ("In NTLMSSPSessSetup (Authenticate)"));
+	if(ses == NULL)
+		return -EINVAL;
+	user = ses->userName;
+	domain = ses->domainName;
+	smb_buffer = cifs_buf_get();
+	if (smb_buffer == NULL) {
+		return -ENOMEM;
+	}
+	smb_buffer_response = smb_buffer;
+	pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
+	pSMBr = (SESSION_SETUP_ANDX *) smb_buffer_response;
+
+	/* send SMBsessionSetup here */
+	header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
+			NULL /* no tCon exists yet */ , 12 /* wct */ );
+	pSMB->req.hdr.Flags |= (SMBFLG_CASELESS | SMBFLG_CANONICAL_PATH_FORMAT);
+	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+	pSMB->req.AndXCommand = 0xFF;
+	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+	pSMB->req.hdr.Uid = ses->Suid;
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+	    CAP_EXTENDED_SECURITY;
+	if (ses->capabilities & CAP_UNICODE) {
+		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+		capabilities |= CAP_UNICODE;
+	}
+	if (ses->capabilities & CAP_STATUS32) {
+		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+		capabilities |= CAP_STATUS32;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		smb_buffer->Flags2 |= SMBFLG2_DFS;
+		capabilities |= CAP_DFS;
+	}
+	pSMB->req.Capabilities = cpu_to_le32(capabilities);
+
+	bcc_ptr = (char *) &pSMB->req.SecurityBlob;
+	SecurityBlob = (PAUTHENTICATE_MESSAGE) bcc_ptr;
+	strncpy(SecurityBlob->Signature, NTLMSSP_SIGNATURE, 8);
+	SecurityBlob->MessageType = NtLmAuthenticate;
+	bcc_ptr += SecurityBlobLength;
+	negotiate_flags = 
+	    NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_REQUEST_TARGET |
+	    NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_TARGET_INFO |
+	    0x80000000 | NTLMSSP_NEGOTIATE_128;
+	if(sign_CIFS_PDUs)
+		negotiate_flags |= /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN |*/ NTLMSSP_NEGOTIATE_SIGN;
+	if(ntlmv2_flag)
+		negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
+
+/* setup pointers to domain name and workstation name */
+
+	SecurityBlob->WorkstationName.Buffer = 0;
+	SecurityBlob->WorkstationName.Length = 0;
+	SecurityBlob->WorkstationName.MaximumLength = 0;
+	SecurityBlob->SessionKey.Length = 0;
+	SecurityBlob->SessionKey.MaximumLength = 0;
+	SecurityBlob->SessionKey.Buffer = 0;
+
+	SecurityBlob->LmChallengeResponse.Length = 0;
+	SecurityBlob->LmChallengeResponse.MaximumLength = 0;
+	SecurityBlob->LmChallengeResponse.Buffer = 0;
+
+	SecurityBlob->NtChallengeResponse.Length =
+	    cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+	SecurityBlob->NtChallengeResponse.MaximumLength =
+	    cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+	memcpy(bcc_ptr, ntlm_session_key, CIFS_SESSION_KEY_SIZE);
+	SecurityBlob->NtChallengeResponse.Buffer =
+	    cpu_to_le32(SecurityBlobLength);
+	SecurityBlobLength += CIFS_SESSION_KEY_SIZE;
+	bcc_ptr += CIFS_SESSION_KEY_SIZE;
+
+	if (ses->capabilities & CAP_UNICODE) {
+		if (domain == NULL) {
+			SecurityBlob->DomainName.Buffer = 0;
+			SecurityBlob->DomainName.Length = 0;
+			SecurityBlob->DomainName.MaximumLength = 0;
+		} else {
+			__u16 len =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr, domain, 64,
+					  nls_codepage);
+			len *= 2;
+			SecurityBlob->DomainName.MaximumLength =
+			    cpu_to_le16(len);
+			SecurityBlob->DomainName.Buffer =
+			    cpu_to_le32(SecurityBlobLength);
+			bcc_ptr += len;
+			SecurityBlobLength += len;
+			SecurityBlob->DomainName.Length =
+			    cpu_to_le16(len);
+		}
+		if (user == NULL) {
+			SecurityBlob->UserName.Buffer = 0;
+			SecurityBlob->UserName.Length = 0;
+			SecurityBlob->UserName.MaximumLength = 0;
+		} else {
+			__u16 len =
+			    cifs_strtoUCS((wchar_t *) bcc_ptr, user, 64,
+					  nls_codepage);
+			len *= 2;
+			SecurityBlob->UserName.MaximumLength =
+			    cpu_to_le16(len);
+			SecurityBlob->UserName.Buffer =
+			    cpu_to_le32(SecurityBlobLength);
+			bcc_ptr += len;
+			SecurityBlobLength += len;
+			SecurityBlob->UserName.Length =
+			    cpu_to_le16(len);
+		}
+
+		/* SecurityBlob->WorkstationName.Length = cifs_strtoUCS((wchar_t *) bcc_ptr, "AMACHINE",64, nls_codepage);
+		   SecurityBlob->WorkstationName.Length *= 2;
+		   SecurityBlob->WorkstationName.MaximumLength = cpu_to_le16(SecurityBlob->WorkstationName.Length);
+		   SecurityBlob->WorkstationName.Buffer = cpu_to_le32(SecurityBlobLength);
+		   bcc_ptr += SecurityBlob->WorkstationName.Length;
+		   SecurityBlobLength += SecurityBlob->WorkstationName.Length;
+		   SecurityBlob->WorkstationName.Length = cpu_to_le16(SecurityBlob->WorkstationName.Length);  */
+
+		if ((long) bcc_ptr % 2) {
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, "Linux version ",
+				  32, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, system_utsname.release, 32,
+				  nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		bcc_ptr += 2;	/* null term version string */
+		bytes_returned =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, CIFS_NETWORK_OPSYS,
+				  64, nls_codepage);
+		bcc_ptr += 2 * bytes_returned;
+		*(bcc_ptr + 1) = 0;
+		*(bcc_ptr + 2) = 0;
+		bcc_ptr += 2;	/* null terminate network opsys string */
+		*(bcc_ptr + 1) = 0;
+		*(bcc_ptr + 2) = 0;
+		bcc_ptr += 2;	/* null domain */
+	} else {		/* ASCII */
+		if (domain == NULL) {
+			SecurityBlob->DomainName.Buffer = 0;
+			SecurityBlob->DomainName.Length = 0;
+			SecurityBlob->DomainName.MaximumLength = 0;
+		} else {
+			__u16 len;
+			negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
+			strncpy(bcc_ptr, domain, 63);
+			len = strnlen(domain, 64);
+			SecurityBlob->DomainName.MaximumLength =
+			    cpu_to_le16(len);
+			SecurityBlob->DomainName.Buffer =
+			    cpu_to_le32(SecurityBlobLength);
+			bcc_ptr += len;
+			SecurityBlobLength += len;
+			SecurityBlob->DomainName.Length = cpu_to_le16(len);
+		}
+		if (user == NULL) {
+			SecurityBlob->UserName.Buffer = 0;
+			SecurityBlob->UserName.Length = 0;
+			SecurityBlob->UserName.MaximumLength = 0;
+		} else {
+			__u16 len;
+			strncpy(bcc_ptr, user, 63);
+			len = strnlen(user, 64);
+			SecurityBlob->UserName.MaximumLength =
+			    cpu_to_le16(len);
+			SecurityBlob->UserName.Buffer =
+			    cpu_to_le32(SecurityBlobLength);
+			bcc_ptr += len;
+			SecurityBlobLength += len;
+			SecurityBlob->UserName.Length = cpu_to_le16(len);
+		}
+		/* BB fill in our workstation name if known BB */
+
+		strcpy(bcc_ptr, "Linux version ");
+		bcc_ptr += strlen("Linux version ");
+		strcpy(bcc_ptr, system_utsname.release);
+		bcc_ptr += strlen(system_utsname.release) + 1;
+		strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+		bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+		bcc_ptr++;	/* null domain */
+		*bcc_ptr = 0;
+	}
+	SecurityBlob->NegotiateFlags = cpu_to_le32(negotiate_flags);
+	pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
+	count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
+	smb_buffer->smb_buf_length += count;
+	pSMB->req.ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+			 &bytes_returned, 1);
+	if (rc) {
+/*    rc = map_smb_to_linux_error(smb_buffer_response);  *//* done in SendReceive now */
+	} else if ((smb_buffer_response->WordCount == 3)
+		   || (smb_buffer_response->WordCount == 4)) {
+		__u16 action = le16_to_cpu(pSMBr->resp.Action);
+		__u16 blob_len =
+		    le16_to_cpu(pSMBr->resp.SecurityBlobLength);
+		if (action & GUEST_LOGIN)
+			cFYI(1, (" Guest login"));	/* BB do we want to set anything in SesInfo struct ? */
+/*        if(SecurityBlob2->MessageType != NtLm??){                               
+                 cFYI("Unexpected message type on auth response is %d ")); 
+        } */
+		if (ses) {
+			cFYI(1,
+			     ("Does UID on challenge %d match auth response UID %d ",
+			      ses->Suid, smb_buffer_response->Uid));
+			ses->Suid = smb_buffer_response->Uid; /* UID left in wire format */
+			bcc_ptr = pByteArea(smb_buffer_response);	
+            /* response can have either 3 or 4 word count - Samba sends 3 */
+			if ((pSMBr->resp.hdr.WordCount == 3)
+			    || ((pSMBr->resp.hdr.WordCount == 4)
+				&& (blob_len <
+				    pSMBr->resp.ByteCount))) {
+				if (pSMBr->resp.hdr.WordCount == 4) {
+					bcc_ptr +=
+					    blob_len;
+					cFYI(1,
+					     ("Security Blob Length %d ",
+					      blob_len));
+				}
+
+				cFYI(1,
+				     ("NTLMSSP response to Authenticate "));
+
+				if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
+					if ((long) (bcc_ptr) % 2) {
+						remaining_words =
+						    (BCC(smb_buffer_response)
+						     - 1) / 2;
+						bcc_ptr++;	/* Unicode strings must be word aligned */
+					} else {
+						remaining_words = BCC(smb_buffer_response) / 2;
+					}
+					len =
+					    UniStrnlen((wchar_t *) bcc_ptr,remaining_words - 1);
+/* We look for obvious messed up bcc or strings in response so we do not go off
+  the end since (at least) WIN2K and Windows XP have a major bug in not null
+  terminating last Unicode string in response  */
+					ses->serverOS =
+					    cifs_kcalloc(2 * (len + 1), GFP_KERNEL);
+					cifs_strfromUCS_le(ses->serverOS,
+							   (wchar_t *)
+							   bcc_ptr, len,
+							   nls_codepage);
+					bcc_ptr += 2 * (len + 1);
+					remaining_words -= len + 1;
+					ses->serverOS[2 * len] = 0;
+					ses->serverOS[1 + (2 * len)] = 0;
+					if (remaining_words > 0) {
+						len = UniStrnlen((wchar_t *)
+								 bcc_ptr,
+								 remaining_words
+								 - 1);
+						ses->serverNOS =
+						    cifs_kcalloc(2 * (len + 1),
+							    GFP_KERNEL);
+						cifs_strfromUCS_le(ses->
+								   serverNOS,
+								   (wchar_t *)
+								   bcc_ptr,
+								   len,
+								   nls_codepage);
+						bcc_ptr += 2 * (len + 1);
+						ses->serverNOS[2 * len] = 0;
+						ses->serverNOS[1+(2*len)] = 0;
+						remaining_words -= len + 1;
+						if (remaining_words > 0) {
+							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
+     /* last string not always null terminated (e.g. for Windows XP & 2000) */
+							ses->serverDomain =
+							    cifs_kcalloc(2 *
+								    (len +
+								     1),
+								    GFP_KERNEL);
+							cifs_strfromUCS_le
+							    (ses->
+							     serverDomain,
+							     (wchar_t *)
+							     bcc_ptr, len,
+							     nls_codepage);
+							bcc_ptr +=
+							    2 * (len + 1);
+							ses->
+							    serverDomain[2
+									 * len]
+							    = 0;
+							ses->
+							    serverDomain[1
+									 +
+									 (2
+									  *
+									  len)]
+							    = 0;
+						} /* else no more room so create dummy domain string */
+						else
+							ses->serverDomain = cifs_kcalloc(2,GFP_KERNEL);
+					} else {  /* no room so create dummy domain and NOS string */
+						ses->serverDomain = cifs_kcalloc(2, GFP_KERNEL);
+						ses->serverNOS = cifs_kcalloc(2, GFP_KERNEL);
+					}
+				} else {	/* ASCII */
+					len = strnlen(bcc_ptr, 1024);
+					if (((long) bcc_ptr + len) - 
+                        (long) pByteArea(smb_buffer_response) 
+                            <= BCC(smb_buffer_response)) {
+						ses->serverOS = cifs_kcalloc(len + 1,GFP_KERNEL);
+						strncpy(ses->serverOS,bcc_ptr, len);
+
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;	/* null terminate the string */
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverNOS = cifs_kcalloc(len+1,GFP_KERNEL);
+						strncpy(ses->serverNOS, bcc_ptr, len);	
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+
+						len = strnlen(bcc_ptr, 1024);
+						ses->serverDomain = cifs_kcalloc(len+1,GFP_KERNEL);
+						strncpy(ses->serverDomain, bcc_ptr, len);
+						bcc_ptr += len;
+						bcc_ptr[0] = 0;
+						bcc_ptr++;
+					} else
+						cFYI(1,
+						     ("Variable field of length %d extends beyond end of smb ",
+						      len));
+				}
+			} else {
+				cERROR(1,
+				       (" Security Blob Length extends beyond end of SMB"));
+			}
+		} else {
+			cERROR(1, ("No session structure passed in."));
+		}
+	} else {
+		cERROR(1,
+		       (" Invalid Word count %d: ",
+			smb_buffer_response->WordCount));
+		rc = -EIO;
+	}
+
+	if (smb_buffer)
+		cifs_buf_release(smb_buffer);
+
+	return rc;
+}
+
+int
+CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
+	 const char *tree, struct cifsTconInfo *tcon,
+	 const struct nls_table *nls_codepage)
+{
+	struct smb_hdr *smb_buffer;
+	struct smb_hdr *smb_buffer_response;
+	TCONX_REQ *pSMB;
+	TCONX_RSP *pSMBr;
+	unsigned char *bcc_ptr;
+	int rc = 0;
+	int length;
+	__u16 count;
+
+	if (ses == NULL)
+		return -EIO;
+
+	smb_buffer = cifs_buf_get();
+	if (smb_buffer == NULL) {
+		return -ENOMEM;
+	}
+	smb_buffer_response = smb_buffer;
+
+	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
+			NULL /*no tid */ , 4 /*wct */ );
+	smb_buffer->Uid = ses->Suid;
+	pSMB = (TCONX_REQ *) smb_buffer;
+	pSMBr = (TCONX_RSP *) smb_buffer_response;
+
+	pSMB->AndXCommand = 0xFF;
+	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
+	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
+	bcc_ptr = &pSMB->Password[0];
+	bcc_ptr++;		/* skip password */
+
+	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	if (ses->capabilities & CAP_STATUS32) {
+		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+	}
+	if (ses->capabilities & CAP_DFS) {
+		smb_buffer->Flags2 |= SMBFLG2_DFS;
+	}
+	if (ses->capabilities & CAP_UNICODE) {
+		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+		length =
+		    cifs_strtoUCS((wchar_t *) bcc_ptr, tree, 100, nls_codepage);
+		bcc_ptr += 2 * length;	/* convert num of 16 bit words to bytes */
+		bcc_ptr += 2;	/* skip trailing null */
+	} else {		/* ASCII */
+
+		strcpy(bcc_ptr, tree);
+		bcc_ptr += strlen(tree) + 1;
+	}
+	strcpy(bcc_ptr, "?????");
+	bcc_ptr += strlen("?????");
+	bcc_ptr += 1;
+	count = bcc_ptr - &pSMB->Password[0];
+	pSMB->hdr.smb_buf_length += count;
+	pSMB->ByteCount = cpu_to_le16(count);
+
+	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0);
+
+	/* if (rc) rc = map_smb_to_linux_error(smb_buffer_response); */
+	/* above now done in SendReceive */
+	if ((rc == 0) && (tcon != NULL)) {
+		tcon->tidStatus = CifsGood;
+		tcon->tid = smb_buffer_response->Tid;
+		bcc_ptr = pByteArea(smb_buffer_response);
+		length = strnlen(bcc_ptr, BCC(smb_buffer_response) - 2);
+        /* skip service field (NB: this field is always ASCII) */
+		bcc_ptr += length + 1;	
+		strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
+		if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
+			length = UniStrnlen((wchar_t *) bcc_ptr, 512);
+			if ((bcc_ptr + (2 * length)) -
+			     pByteArea(smb_buffer_response) <=
+			    BCC(smb_buffer_response)) {
+				if(tcon->nativeFileSystem)
+					kfree(tcon->nativeFileSystem);
+				tcon->nativeFileSystem =
+				    cifs_kcalloc(length + 2, GFP_KERNEL);
+				cifs_strfromUCS_le(tcon->nativeFileSystem,
+						   (wchar_t *) bcc_ptr,
+						   length, nls_codepage);
+				bcc_ptr += 2 * length;
+				bcc_ptr[0] = 0;	/* null terminate the string */
+				bcc_ptr[1] = 0;
+				bcc_ptr += 2;
+			}
+			/* else do not bother copying these informational fields */
+		} else {
+			length = strnlen(bcc_ptr, 1024);
+			if ((bcc_ptr + length) -
+			    pByteArea(smb_buffer_response) <=
+			    BCC(smb_buffer_response)) {
+				if(tcon->nativeFileSystem)
+					kfree(tcon->nativeFileSystem);
+				tcon->nativeFileSystem =
+				    cifs_kcalloc(length + 1, GFP_KERNEL);
+				strncpy(tcon->nativeFileSystem, bcc_ptr,
+					length);
+			}
+			/* else do not bother copying these informational fields */
+		}
+		tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+		cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
+	} else if ((rc == 0) && tcon == NULL) {
+        /* all we need to save for IPC$ connection */
+		ses->ipc_tid = smb_buffer_response->Tid;
+	}
+
+	if (smb_buffer)
+		cifs_buf_release(smb_buffer);
+	return rc;
+}
+
+int
+cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
+{
+	int rc = 0;
+	int xid;
+	struct cifsSesInfo *ses = NULL;
+	struct task_struct *cifsd_task;
+
+	xid = GetXid();
+
+	if (cifs_sb->tcon) {
+		ses = cifs_sb->tcon->ses; /* save ptr to ses before delete tcon!*/
+		rc = CIFSSMBTDis(xid, cifs_sb->tcon);
+		if (rc == -EBUSY) {
+			FreeXid(xid);
+			return 0;
+		}
+		tconInfoFree(cifs_sb->tcon);
+		if ((ses) && (ses->server)) {
+			/* save off task so we do not refer to ses later */
+			cifsd_task = ses->server->tsk;
+			cFYI(1, ("About to do SMBLogoff "));
+			rc = CIFSSMBLogoff(xid, ses);
+			if (rc == -EBUSY) {
+				FreeXid(xid);
+				return 0;
+			} else if (rc == -ESHUTDOWN) {
+				cFYI(1,("Waking up socket by sending it signal"));
+				if(cifsd_task)
+					send_sig(SIGKILL,cifsd_task,1);
+				rc = 0;
+			} /* else - we have an smb session
+				left on this socket do not kill cifsd */
+		} else
+			cFYI(1, ("No session or bad tcon"));
+	}
+	
+	cifs_sb->tcon = NULL;
+	if (ses) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ / 2);
+	}
+	if (ses)
+		sesInfoFree(ses);
+
+	FreeXid(xid);
+	return rc;		/* BB check if we should always return zero here */
+} 
+
+int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+					   struct nls_table * nls_info)
+{
+	int rc = 0;
+	char ntlm_session_key[CIFS_SESSION_KEY_SIZE];
+	int ntlmv2_flag = FALSE;
+
+	/* what if server changes its buffer size after dropping the session? */
+	if(pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
+		rc = CIFSSMBNegotiate(xid, pSesInfo);
+		if(rc == -EAGAIN) /* retry only once on 1st time connection */ {
+			rc = CIFSSMBNegotiate(xid, pSesInfo);
+			if(rc == -EAGAIN) 
+				rc = -EHOSTDOWN;
+		}
+		if(rc == 0) {
+			spin_lock(&GlobalMid_Lock);
+			if(pSesInfo->server->tcpStatus != CifsExiting)
+				pSesInfo->server->tcpStatus = CifsGood;
+			else
+				rc = -EHOSTDOWN;
+			spin_unlock(&GlobalMid_Lock);
+
+		}
+	}
+	if (!rc) {
+		pSesInfo->capabilities = pSesInfo->server->capabilities;
+		if(linuxExtEnabled == 0)
+			pSesInfo->capabilities &= (~CAP_UNIX);
+		pSesInfo->sequence_number = 0;
+		cFYI(1,("Security Mode: 0x%x Capabilities: 0x%x Time Zone: %d",
+			pSesInfo->server->secMode,
+			pSesInfo->server->capabilities,
+			pSesInfo->server->timeZone));
+		if (extended_security
+				&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
+				&& (pSesInfo->server->secType == NTLMSSP)) {
+			cFYI(1, ("New style sesssetup "));
+			rc = CIFSSpnegoSessSetup(xid, pSesInfo,
+				NULL /* security blob */, 
+				0 /* blob length */,
+				nls_info);
+		} else if (extended_security
+			   && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
+			   && (pSesInfo->server->secType == RawNTLMSSP)) {
+			cFYI(1, ("NTLMSSP sesssetup "));
+			rc = CIFSNTLMSSPNegotiateSessSetup(xid,
+						pSesInfo,
+						&ntlmv2_flag,
+						nls_info);
+			if (!rc) {
+				if(ntlmv2_flag) {
+					char * v2_response;
+					cFYI(1,("Can use more secure NTLM version 2 password hash"));
+					if(CalcNTLMv2_partial_mac_key(pSesInfo, 
+						nls_info)) {
+						rc = -ENOMEM;
+						goto ss_err_exit;
+					} else
+						v2_response = kmalloc(16 + 64 /* blob */, GFP_KERNEL);
+					if(v2_response) {
+						CalcNTLMv2_response(pSesInfo,v2_response);
+/*						cifs_calculate_ntlmv2_mac_key(pSesInfo->mac_signing_key, response, ntlm_session_key, */
+						kfree(v2_response);
+					/* BB Put dummy sig in SessSetup PDU? */
+					} else {
+						rc = -ENOMEM;
+						goto ss_err_exit;
+					}
+
+				} else {
+					SMBNTencrypt(pSesInfo->password,
+						pSesInfo->server->cryptKey,
+						ntlm_session_key);
+
+					cifs_calculate_mac_key(pSesInfo->mac_signing_key,
+						ntlm_session_key,
+						pSesInfo->password);
+				}
+			/* for better security the weaker lanman hash not sent
+			   in AuthSessSetup so we no longer calculate it */
+
+				rc = CIFSNTLMSSPAuthSessSetup(xid,
+					pSesInfo,
+					ntlm_session_key,
+					ntlmv2_flag,
+					nls_info);
+			}
+		} else { /* old style NTLM 0.12 session setup */
+			SMBNTencrypt(pSesInfo->password,
+				pSesInfo->server->cryptKey,
+				ntlm_session_key);
+
+			cifs_calculate_mac_key(pSesInfo->mac_signing_key, 
+				ntlm_session_key, pSesInfo->password);
+			rc = CIFSSessSetup(xid, pSesInfo,
+				ntlm_session_key, nls_info);
+		}
+		if (rc) {
+			cERROR(1,("Send error in SessSetup = %d",rc));
+		} else {
+			cFYI(1,("CIFS Session Established successfully"));
+			pSesInfo->status = CifsGood;
+		}
+	}
+ss_err_exit:
+	return rc;
+}
+
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
new file mode 100644
index 0000000..f54e186
--- /dev/null
+++ b/fs/cifs/dir.c
@@ -0,0 +1,523 @@
+/*
+ *   fs/cifs/dir.c
+ *
+ *   vfs operations that deal with dentries
+ * 
+ *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+
+void
+renew_parental_timestamps(struct dentry *direntry)
+{
+	/* BB check if there is a way to get the kernel to do this or if we really need this */
+	do {
+		direntry->d_time = jiffies;
+		direntry = direntry->d_parent;
+	} while (!IS_ROOT(direntry));	
+}
+
+/* Note: caller must free return buffer */
+char *
+build_path_from_dentry(struct dentry *direntry)
+{
+	struct dentry *temp;
+	int namelen = 0;
+	char *full_path;
+
+	if(direntry == NULL)
+		return NULL;  /* not much we can do if dentry is freed and
+		we need to reopen the file after it was closed implicitly
+		when the server crashed */
+
+cifs_bp_rename_retry:
+	for (temp = direntry; !IS_ROOT(temp);) {
+		namelen += (1 + temp->d_name.len);
+		temp = temp->d_parent;
+		if(temp == NULL) {
+			cERROR(1,("corrupt dentry"));
+			return NULL;
+		}
+	}
+
+	full_path = kmalloc(namelen+1, GFP_KERNEL);
+	if(full_path == NULL)
+		return full_path;
+	full_path[namelen] = 0;	/* trailing null */
+
+	for (temp = direntry; !IS_ROOT(temp);) {
+		namelen -= 1 + temp->d_name.len;
+		if (namelen < 0) {
+			break;
+		} else {
+			full_path[namelen] = '\\';
+			strncpy(full_path + namelen + 1, temp->d_name.name,
+				temp->d_name.len);
+			cFYI(0, (" name: %s ", full_path + namelen));
+		}
+		temp = temp->d_parent;
+		if(temp == NULL) {
+			cERROR(1,("corrupt dentry"));
+			kfree(full_path);
+			return NULL;
+		}
+	}
+	if (namelen != 0) {
+		cERROR(1,
+		       ("We did not end path lookup where we expected namelen is %d",
+			namelen));
+		/* presumably this is only possible if we were racing with a rename 
+		of one of the parent directories  (we can not lock the dentries
+		above us to prevent this, but retrying should be harmless) */
+		kfree(full_path);
+		namelen = 0;
+		goto cifs_bp_rename_retry;
+	}
+
+	return full_path;
+}
+
+/* Note: caller must free return buffer */
+char *
+build_wildcard_path_from_dentry(struct dentry *direntry)
+{
+	struct dentry *temp;
+	int namelen = 0;
+	char *full_path;
+
+	if(direntry == NULL)
+		return NULL;  /* not much we can do if dentry is freed and
+		we need to reopen the file after it was closed implicitly
+		when the server crashed */
+
+cifs_bwp_rename_retry:
+	for (temp = direntry; !IS_ROOT(temp);) {
+		namelen += (1 + temp->d_name.len);
+		temp = temp->d_parent;
+		if(temp == NULL) {
+			cERROR(1,("corrupt dentry"));
+			return NULL;
+		}
+	}
+
+	full_path = kmalloc(namelen+3, GFP_KERNEL);
+	if(full_path == NULL)
+		return full_path;
+
+	full_path[namelen] = '\\';
+	full_path[namelen+1] = '*';
+	full_path[namelen+2] = 0;  /* trailing null */
+
+	for (temp = direntry; !IS_ROOT(temp);) {
+		namelen -= 1 + temp->d_name.len;
+		if (namelen < 0) {
+			break;
+		} else {
+			full_path[namelen] = '\\';
+			strncpy(full_path + namelen + 1, temp->d_name.name,
+				temp->d_name.len);
+			cFYI(0, (" name: %s ", full_path + namelen));
+		}
+		temp = temp->d_parent;
+		if(temp == NULL) {
+			cERROR(1,("corrupt dentry"));
+			kfree(full_path);
+			return NULL;
+		}
+	}
+	if (namelen != 0) {
+		cERROR(1,
+		       ("We did not end path lookup where we expected namelen is %d",
+			namelen));
+		/* presumably this is only possible if we were racing with a rename 
+		of one of the parent directories  (we can not lock the dentries
+		above us to prevent this, but retrying should be harmless) */
+		kfree(full_path);
+		namelen = 0;
+		goto cifs_bwp_rename_retry;
+	}
+
+	return full_path;
+}
+
+/* Inode operations in similar order to how they appear in the Linux file fs.h */
+
+int
+cifs_create(struct inode *inode, struct dentry *direntry, int mode,
+		struct nameidata *nd)
+{
+	int rc = -ENOENT;
+	int xid;
+	int oplock = 0;
+	int desiredAccess = GENERIC_READ | GENERIC_WRITE;
+	__u16 fileHandle;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	FILE_ALL_INFO * buf = NULL;
+	struct inode *newinode = NULL;
+	struct cifsFileInfo * pCifsFile = NULL;
+	struct cifsInodeInfo * pCifsInode;
+	int disposition = FILE_OVERWRITE_IF;
+	int write_only = FALSE;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&direntry->d_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&direntry->d_sb->s_vfs_rename_sem);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	if(nd) {
+		if ((nd->intent.open.flags & O_ACCMODE) == O_RDONLY)
+			desiredAccess = GENERIC_READ;
+		else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY) {
+			desiredAccess = GENERIC_WRITE;
+			write_only = TRUE;
+		} else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
+			/* GENERIC_ALL is too much permission to request */
+			/* can cause unnecessary access denied on create */
+			/* desiredAccess = GENERIC_ALL; */
+			desiredAccess = GENERIC_READ | GENERIC_WRITE;
+		}
+
+		if((nd->intent.open.flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			disposition = FILE_CREATE;
+		else if((nd->intent.open.flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
+			disposition = FILE_OVERWRITE_IF;
+		else if((nd->intent.open.flags & O_CREAT) == O_CREAT)
+			disposition = FILE_OPEN_IF;
+		else {
+			cFYI(1,("Create flag not set in create function"));
+		}
+	}
+
+	/* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */
+	if (oplockEnabled)
+		oplock = REQ_OPLOCK;
+
+	buf = kmalloc(sizeof(FILE_ALL_INFO),GFP_KERNEL);
+	if(buf == NULL) {
+		kfree(full_path);
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+			 desiredAccess, CREATE_NOT_DIR,
+			 &fileHandle, &oplock, buf, cifs_sb->local_nls);
+	if (rc) {
+		cFYI(1, ("cifs_create returned 0x%x ", rc));
+	} else {
+		/* If Open reported that we actually created a file
+		then we now have to set the mode if possible */
+		if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+			(oplock & CIFS_CREATE_ACTION))
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+				CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+					(__u64)current->euid,
+					(__u64)current->egid,
+					0 /* dev */,
+					cifs_sb->local_nls);
+			} else {
+				CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+					(__u64)-1,
+					(__u64)-1,
+					0 /* dev */,
+					cifs_sb->local_nls);
+			}
+		else {
+			/* BB implement via Windows security descriptors */
+			/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
+			/* could set r/o dos attribute if mode & 0222 == 0 */
+		}
+
+	/* BB server might mask mode so we have to query for Unix case*/
+		if (pTcon->ses->capabilities & CAP_UNIX)
+			rc = cifs_get_inode_info_unix(&newinode, full_path,
+						 inode->i_sb,xid);
+		else {
+			rc = cifs_get_inode_info(&newinode, full_path,
+						 buf, inode->i_sb,xid);
+			if(newinode)
+				newinode->i_mode = mode;
+		}
+
+		if (rc != 0) {
+			cFYI(1,("Create worked but get_inode_info failed with rc = %d",
+			      rc));
+		} else {
+			direntry->d_op = &cifs_dentry_ops;
+			d_instantiate(direntry, newinode);
+		}
+		if((nd->flags & LOOKUP_OPEN) == FALSE) {
+			/* mknod case - do not leave file open */
+			CIFSSMBClose(xid, pTcon, fileHandle);
+		} else if(newinode) {
+			pCifsFile = (struct cifsFileInfo *)
+			   kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
+		
+			if (pCifsFile) {
+				memset((char *)pCifsFile, 0,
+				       sizeof (struct cifsFileInfo));
+				pCifsFile->netfid = fileHandle;
+				pCifsFile->pid = current->tgid;
+				pCifsFile->pInode = newinode;
+				pCifsFile->invalidHandle = FALSE;
+				pCifsFile->closePend     = FALSE;
+				init_MUTEX(&pCifsFile->fh_sem);
+				/* put the following in at open now */
+				/* pCifsFile->pfile = file; */ 
+				write_lock(&GlobalSMBSeslock);
+				list_add(&pCifsFile->tlist,&pTcon->openFileList);
+				pCifsInode = CIFS_I(newinode);
+				if(pCifsInode) {
+				/* if readable file instance put first in list*/
+					if (write_only == TRUE) {
+                                        	list_add_tail(&pCifsFile->flist,
+							&pCifsInode->openFileList);
+					} else {
+						list_add(&pCifsFile->flist,
+							&pCifsInode->openFileList);
+					}
+					if((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+						pCifsInode->clientCanCacheAll = TRUE;
+						pCifsInode->clientCanCacheRead = TRUE;
+						cFYI(1,("Exclusive Oplock granted on inode %p",
+							newinode));
+					} else if((oplock & 0xF) == OPLOCK_READ)
+						pCifsInode->clientCanCacheRead = TRUE;
+				}
+				write_unlock(&GlobalSMBSeslock);
+			}
+		}
+	} 
+
+	if (buf)
+	    kfree(buf);
+	if (full_path)
+	    kfree(full_path);
+	FreeXid(xid);
+
+	return rc;
+}
+
+int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, dev_t device_number) 
+{
+	int rc = -EPERM;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	struct inode * newinode = NULL;
+
+	if (!old_valid_dev(device_number))
+		return -EINVAL;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&direntry->d_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&direntry->d_sb->s_vfs_rename_sem);
+	if(full_path == NULL)
+		rc = -ENOMEM;
+	
+	if (full_path && (pTcon->ses->capabilities & CAP_UNIX)) {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+			rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+				mode,(__u64)current->euid,(__u64)current->egid,
+				device_number, cifs_sb->local_nls);
+		} else {
+			rc = CIFSSMBUnixSetPerms(xid, pTcon,
+				full_path, mode, (__u64)-1, (__u64)-1,
+				device_number, cifs_sb->local_nls);
+		}
+
+		if(!rc) {
+			rc = cifs_get_inode_info_unix(&newinode, full_path,
+						inode->i_sb,xid);
+			direntry->d_op = &cifs_dentry_ops;
+			if(rc == 0)
+				d_instantiate(direntry, newinode);
+		}
+	}
+
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+
+	return rc;
+}
+
+
+struct dentry *
+cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, struct nameidata *nd)
+{
+	int xid;
+	int rc = 0; /* to get around spurious gcc warning, set to zero here */
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct inode *newInode = NULL;
+	char *full_path = NULL;
+
+	xid = GetXid();
+
+	cFYI(1,
+	     (" parent inode = 0x%p name is: %s and dentry = 0x%p",
+	      parent_dir_inode, direntry->d_name.name, direntry));
+
+	/* BB Add check of incoming data - e.g. frame not longer than maximum SMB - let server check the namelen BB */
+
+	/* check whether path exists */
+
+	cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	/* can not grab the rename sem here since it would
+	deadlock in the cases (beginning of sys_rename itself)
+	in which we already have the sb rename sem */
+	full_path = build_path_from_dentry(direntry);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (direntry->d_inode != NULL) {
+		cFYI(1, (" non-NULL inode in lookup"));
+	} else {
+		cFYI(1, (" NULL inode in lookup"));
+	}
+	cFYI(1,
+	     (" Full path: %s inode = 0x%p", full_path, direntry->d_inode));
+
+	if (pTcon->ses->capabilities & CAP_UNIX)
+		rc = cifs_get_inode_info_unix(&newInode, full_path,
+					      parent_dir_inode->i_sb,xid);
+	else
+		rc = cifs_get_inode_info(&newInode, full_path, NULL,
+					 parent_dir_inode->i_sb,xid);
+
+	if ((rc == 0) && (newInode != NULL)) {
+		direntry->d_op = &cifs_dentry_ops;
+		d_add(direntry, newInode);
+
+		/* since paths are not looked up by component - the parent directories are presumed to be good here */
+		renew_parental_timestamps(direntry);
+
+	} else if (rc == -ENOENT) {
+		rc = 0;
+		d_add(direntry, NULL);
+	} else {
+		cERROR(1,("Error 0x%x or on cifs_get_inode_info in lookup",rc));
+		/* BB special case check for Access Denied - watch security 
+		exposure of returning dir info implicitly via different rc 
+		if file exists or not but no access BB */
+	}
+
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+	return ERR_PTR(rc);
+}
+
+int
+cifs_dir_open(struct inode *inode, struct file *file)
+{				/* NB: currently unused since searches are opened in readdir */
+	int rc = 0;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	if(file->f_dentry) {
+		down(&file->f_dentry->d_sb->s_vfs_rename_sem);
+		full_path = build_wildcard_path_from_dentry(file->f_dentry);
+		up(&file->f_dentry->d_sb->s_vfs_rename_sem);
+	} else {
+		FreeXid(xid);
+		return -EIO;
+	}
+
+	cFYI(1, ("inode = 0x%p and full path is %s", inode, full_path));
+
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+static int
+cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
+{
+	int isValid = 1;
+
+/*	lock_kernel(); *//* surely we do not want to lock the kernel for a whole network round trip which could take seconds */
+
+	if (direntry->d_inode) {
+		if (cifs_revalidate(direntry)) {
+			/* unlock_kernel(); */
+			return 0;
+		}
+	} else {
+		cFYI(1,
+		     ("In cifs_d_revalidate with no inode but name = %s and dentry 0x%p",
+		      direntry->d_name.name, direntry));
+	}
+
+/*    unlock_kernel(); */
+
+	return isValid;
+}
+
+/* static int cifs_d_delete(struct dentry *direntry)
+{
+	int rc = 0;
+
+	cFYI(1, ("In cifs d_delete, name = %s", direntry->d_name.name));
+
+	return rc;
+}     */
+
+struct dentry_operations cifs_dentry_ops = {
+	.d_revalidate = cifs_d_revalidate,
+/* d_delete:       cifs_d_delete,       *//* not needed except for debugging */
+	/* no need for d_hash, d_compare, d_release, d_iput ... yet. BB confirm this BB */
+};
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
new file mode 100644
index 0000000..9d24c40
--- /dev/null
+++ b/fs/cifs/fcntl.c
@@ -0,0 +1,117 @@
+/*
+ *   fs/cifs/fcntl.c
+ *
+ *   vfs operations that deal with the file control API
+ * 
+ *   Copyright (C) International Business Machines  Corp., 2003,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "cifsfs.h"
+
+static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
+{
+	__u32 cifs_ntfy_flags = 0;
+
+	/* No way on Linux VFS to ask to monitor xattr
+	changes (and no stream support either */
+	if(fcntl_notify_flags & DN_ACCESS) {
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS;
+	}
+	if(fcntl_notify_flags & DN_MODIFY) {
+		/* What does this mean on directories? */
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE |
+			FILE_NOTIFY_CHANGE_SIZE;
+	}
+	if(fcntl_notify_flags & DN_CREATE) {
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION | 
+			FILE_NOTIFY_CHANGE_LAST_WRITE;
+	}
+	if(fcntl_notify_flags & DN_DELETE) {
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE;
+	}
+	if(fcntl_notify_flags & DN_RENAME) {
+		/* BB review this - checking various server behaviors */
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME | 
+			FILE_NOTIFY_CHANGE_FILE_NAME;
+	}
+	if(fcntl_notify_flags & DN_ATTRIB) {
+		cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_SECURITY | 
+			FILE_NOTIFY_CHANGE_ATTRIBUTES;
+	}
+/*	if(fcntl_notify_flags & DN_MULTISHOT) {
+		cifs_ntfy_flags |= ;
+	} */ /* BB fixme - not sure how to handle this with CIFS yet */
+
+
+	return cifs_ntfy_flags;
+}
+
+int cifs_dir_notify(struct file * file, unsigned long arg)
+{
+	int xid;
+	int rc = -EINVAL;
+	int oplock = FALSE;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	__u32 filter = FILE_NOTIFY_CHANGE_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES;
+	__u16 netfid;
+
+	xid = GetXid();
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&file->f_dentry->d_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(file->f_dentry);
+	up(&file->f_dentry->d_sb->s_vfs_rename_sem);
+
+	if(full_path == NULL) {
+		rc = -ENOMEM;
+	} else {
+		cERROR(1,("cifs dir notify on file %s with arg 0x%lx",full_path,arg)); /* BB removeme BB */
+		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, 
+			GENERIC_READ | SYNCHRONIZE, 0 /* create options */,
+			&netfid, &oplock,NULL, cifs_sb->local_nls);
+		/* BB fixme - add this handle to a notify handle list */
+		if(rc) {
+			cERROR(1,("Could not open directory for notify"));  /* BB remove BB */
+		} else {
+			filter = convert_to_cifs_notify_flags(arg);
+			if(filter != 0) {
+				rc = CIFSSMBNotify(xid, pTcon, 0 /* no subdirs */, netfid, 
+					filter, cifs_sb->local_nls);
+			} else {
+				rc = -EINVAL;
+			}
+			/* BB add code to close file eventually (at unmount
+			it would close automatically but may be a way
+			to do it easily when inode freed or when
+			notify info is cleared/changed */
+            cERROR(1,("notify rc %d",rc));
+		}
+	}
+	
+	FreeXid(xid);
+	return rc;
+}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
new file mode 100644
index 0000000..dcab7cf
--- /dev/null
+++ b/fs/cifs/file.c
@@ -0,0 +1,1675 @@
+/*
+ *   fs/cifs/file.c
+ *
+ *   vfs operations that deal with files
+ * 
+ *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/smp_lock.h>
+#include <asm/div64.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+
+static inline struct cifsFileInfo *cifs_init_private(
+	struct cifsFileInfo *private_data, struct inode *inode,
+	struct file *file, __u16 netfid)
+{
+	memset(private_data, 0, sizeof(struct cifsFileInfo));
+	private_data->netfid = netfid;
+	private_data->pid = current->tgid;	
+	init_MUTEX(&private_data->fh_sem);
+	private_data->pfile = file; /* needed for writepage */
+	private_data->pInode = inode;
+	private_data->invalidHandle = FALSE;
+	private_data->closePend = FALSE;
+
+	return private_data;
+}
+
+static inline int cifs_convert_flags(unsigned int flags)
+{
+	if ((flags & O_ACCMODE) == O_RDONLY)
+		return GENERIC_READ;
+	else if ((flags & O_ACCMODE) == O_WRONLY)
+		return GENERIC_WRITE;
+	else if ((flags & O_ACCMODE) == O_RDWR) {
+		/* GENERIC_ALL is too much permission to request
+		   can cause unnecessary access denied on create */
+		/* return GENERIC_ALL; */
+		return (GENERIC_READ | GENERIC_WRITE);
+	}
+
+	return 0x20197;
+}
+
+static inline int cifs_get_disposition(unsigned int flags)
+{
+	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+		return FILE_CREATE;
+	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
+		return FILE_OVERWRITE_IF;
+	else if ((flags & O_CREAT) == O_CREAT)
+		return FILE_OPEN_IF;
+	else
+		return FILE_OPEN;
+}
+
+/* all arguments to this function must be checked for validity in caller */
+static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
+	struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
+	struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
+	char *full_path, int xid)
+{
+	struct timespec temp;
+	int rc;
+
+	/* want handles we can use to read with first
+	   in the list so we do not have to walk the
+	   list to search for one in prepare_write */
+	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+		list_add_tail(&pCifsFile->flist, 
+			      &pCifsInode->openFileList);
+	} else {
+		list_add(&pCifsFile->flist,
+			 &pCifsInode->openFileList);
+	}
+	write_unlock(&GlobalSMBSeslock);
+	write_unlock(&file->f_owner.lock);
+	if (pCifsInode->clientCanCacheRead) {
+		/* we have the inode open somewhere else
+		   no need to discard cache data */
+		goto client_can_cache;
+	}
+
+	/* BB need same check in cifs_create too? */
+	/* if not oplocked, invalidate inode pages if mtime or file
+	   size changed */
+	temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
+	if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) && 
+			   (file->f_dentry->d_inode->i_size == 
+			    (loff_t)le64_to_cpu(buf->EndOfFile))) {
+		cFYI(1, ("inode unchanged on server"));
+	} else {
+		if (file->f_dentry->d_inode->i_mapping) {
+		/* BB no need to lock inode until after invalidate
+		   since namei code should already have it locked? */
+			filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
+			filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
+		}
+		cFYI(1, ("invalidating remote inode since open detected it "
+			 "changed"));
+		invalidate_remote_inode(file->f_dentry->d_inode);
+	}
+
+client_can_cache:
+	if (pTcon->ses->capabilities & CAP_UNIX)
+		rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
+			full_path, inode->i_sb, xid);
+	else
+		rc = cifs_get_inode_info(&file->f_dentry->d_inode,
+			full_path, buf, inode->i_sb, xid);
+
+	if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+		pCifsInode->clientCanCacheAll = TRUE;
+		pCifsInode->clientCanCacheRead = TRUE;
+		cFYI(1, ("Exclusive Oplock granted on inode %p",
+			 file->f_dentry->d_inode));
+	} else if ((*oplock & 0xF) == OPLOCK_READ)
+		pCifsInode->clientCanCacheRead = TRUE;
+
+	return rc;
+}
+
+int cifs_open(struct inode *inode, struct file *file)
+{
+	int rc = -EACCES;
+	int xid, oplock;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct cifsFileInfo *pCifsFile;
+	struct cifsInodeInfo *pCifsInode;
+	struct list_head *tmp;
+	char *full_path = NULL;
+	int desiredAccess;
+	int disposition;
+	__u16 netfid;
+	FILE_ALL_INFO *buf = NULL;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	if (file->f_flags & O_CREAT) {
+		/* search inode for this file and fill in file->private_data */
+		pCifsInode = CIFS_I(file->f_dentry->d_inode);
+		read_lock(&GlobalSMBSeslock);
+		list_for_each(tmp, &pCifsInode->openFileList) {
+			pCifsFile = list_entry(tmp, struct cifsFileInfo,
+					       flist);
+			if ((pCifsFile->pfile == NULL) &&
+			    (pCifsFile->pid == current->tgid)) {
+				/* mode set in cifs_create */
+
+				/* needed for writepage */
+				pCifsFile->pfile = file;
+				
+				file->private_data = pCifsFile;
+				break;
+			}
+		}
+		read_unlock(&GlobalSMBSeslock);
+		if (file->private_data != NULL) {
+			rc = 0;
+			FreeXid(xid);
+			return rc;
+		} else {
+			if (file->f_flags & O_EXCL)
+				cERROR(1, ("could not find file instance for "
+					   "new file %p ", file));
+		}
+	}
+
+	down(&inode->i_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(file->f_dentry);
+	up(&inode->i_sb->s_vfs_rename_sem);
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
+		 inode, file->f_flags, full_path));
+	desiredAccess = cifs_convert_flags(file->f_flags);
+
+/*********************************************************************
+ *  open flag mapping table:
+ *  
+ *	POSIX Flag            CIFS Disposition
+ *	----------            ---------------- 
+ *	O_CREAT               FILE_OPEN_IF
+ *	O_CREAT | O_EXCL      FILE_CREATE
+ *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
+ *	O_TRUNC               FILE_OVERWRITE
+ *	none of the above     FILE_OPEN
+ *
+ *	Note that there is not a direct match between disposition
+ *	FILE_SUPERSEDE (ie create whether or not file exists although 
+ *	O_CREAT | O_TRUNC is similar but truncates the existing
+ *	file rather than creating a new file as FILE_SUPERSEDE does
+ *	(which uses the attributes / metadata passed in on open call)
+ *?
+ *?  O_SYNC is a reasonable match to CIFS writethrough flag  
+ *?  and the read write flags match reasonably.  O_LARGEFILE
+ *?  is irrelevant because largefile support is always used
+ *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
+ *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
+ *********************************************************************/
+
+	disposition = cifs_get_disposition(file->f_flags);
+
+	if (oplockEnabled)
+		oplock = REQ_OPLOCK;
+	else
+		oplock = FALSE;
+
+	/* BB pass O_SYNC flag through on file attributes .. BB */
+
+	/* Also refresh inode by passing in file_info buf returned by SMBOpen
+	   and calling get_inode_info with returned buf (at least helps
+	   non-Unix server case) */
+
+	/* BB we can not do this if this is the second open of a file 
+	   and the first handle has writebehind data, we might be 
+	   able to simply do a filemap_fdatawrite/filemap_fdatawait first */
+	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
+			 CREATE_NOT_DIR, &netfid, &oplock, buf,
+			 cifs_sb->local_nls);
+	if (rc) {
+		cFYI(1, ("cifs_open returned 0x%x ", rc));
+		goto out;
+	}
+	file->private_data =
+		kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+	if (file->private_data == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
+	write_lock(&file->f_owner.lock);
+	write_lock(&GlobalSMBSeslock);
+	list_add(&pCifsFile->tlist, &pTcon->openFileList);
+
+	pCifsInode = CIFS_I(file->f_dentry->d_inode);
+	if (pCifsInode) {
+		rc = cifs_open_inode_helper(inode, file, pCifsInode,
+					    pCifsFile, pTcon,
+					    &oplock, buf, full_path, xid);
+	} else {
+		write_unlock(&GlobalSMBSeslock);
+		write_unlock(&file->f_owner.lock);
+	}
+
+	if (oplock & CIFS_CREATE_ACTION) {           
+		/* time to set mode which we can not set earlier due to
+		   problems creating new read-only files */
+		if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+			CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+					    inode->i_mode,
+					    (__u64)-1, (__u64)-1, 0 /* dev */,
+					    cifs_sb->local_nls);
+		} else {
+			/* BB implement via Windows security descriptors eg
+			   CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
+					      -1, -1, local_nls);
+			   in the meantime could set r/o dos attribute when
+			   perms are eg: mode & 0222 == 0 */
+		}
+	}
+
+out:
+	kfree(buf);
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+/* Try to reaquire byte range locks that were released when session */
+/* to server was lost */
+static int cifs_relock_file(struct cifsFileInfo *cifsFile)
+{
+	int rc = 0;
+
+/* BB list all locks open on this file and relock */
+
+	return rc;
+}
+
+static int cifs_reopen_file(struct inode *inode, struct file *file, 
+	int can_flush)
+{
+	int rc = -EACCES;
+	int xid, oplock;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct cifsFileInfo *pCifsFile;
+	struct cifsInodeInfo *pCifsInode;
+	char *full_path = NULL;
+	int desiredAccess;
+	int disposition = FILE_OPEN;
+	__u16 netfid;
+
+	if (inode == NULL)
+		return -EBADF;
+	if (file->private_data) {
+		pCifsFile = (struct cifsFileInfo *)file->private_data;
+	} else
+		return -EBADF;
+
+	xid = GetXid();
+	down(&pCifsFile->fh_sem);
+	if (pCifsFile->invalidHandle == FALSE) {
+		up(&pCifsFile->fh_sem);
+		FreeXid(xid);
+		return 0;
+	}
+
+	if (file->f_dentry == NULL) {
+		up(&pCifsFile->fh_sem);
+		cFYI(1, ("failed file reopen, no valid name if dentry freed"));
+		FreeXid(xid);
+		return -EBADF;
+	}
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+/* can not grab rename sem here because various ops, including
+   those that already have the rename sem can end up causing writepage
+   to get called and if the server was down that means we end up here,
+   and we can never tell if the caller already has the rename_sem */
+	full_path = build_path_from_dentry(file->f_dentry);
+	if (full_path == NULL) {
+		up(&pCifsFile->fh_sem);
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
+		 inode, file->f_flags,full_path));
+	desiredAccess = cifs_convert_flags(file->f_flags);
+
+	if (oplockEnabled)
+		oplock = REQ_OPLOCK;
+	else
+		oplock = FALSE;
+
+	/* Can not refresh inode by passing in file_info buf to be returned
+	   by SMBOpen and then calling get_inode_info with returned buf 
+	   since file might have write behind data that needs to be flushed 
+	   and server version of file size can be stale. If we knew for sure
+	   that inode was not dirty locally we could do this */
+
+/*	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+	if (buf == 0) {
+		up(&pCifsFile->fh_sem);
+		kfree(full_path);
+		FreeXid(xid);
+		return -ENOMEM;
+	} */
+	rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
+			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
+			 cifs_sb->local_nls);
+	if (rc) {
+		up(&pCifsFile->fh_sem);
+		cFYI(1, ("cifs_open returned 0x%x ", rc));
+		cFYI(1, ("oplock: %d ", oplock));
+	} else {
+		pCifsFile->netfid = netfid;
+		pCifsFile->invalidHandle = FALSE;
+		up(&pCifsFile->fh_sem);
+		pCifsInode = CIFS_I(inode);
+		if (pCifsInode) {
+			if (can_flush) {
+				filemap_fdatawrite(inode->i_mapping);
+				filemap_fdatawait(inode->i_mapping);
+			/* temporarily disable caching while we
+			   go to server to get inode info */
+				pCifsInode->clientCanCacheAll = FALSE;
+				pCifsInode->clientCanCacheRead = FALSE;
+				if (pTcon->ses->capabilities & CAP_UNIX)
+					rc = cifs_get_inode_info_unix(&inode,
+						full_path, inode->i_sb, xid);
+				else
+					rc = cifs_get_inode_info(&inode,
+						full_path, NULL, inode->i_sb,
+						xid);
+			} /* else we are writing out data to server already
+			     and could deadlock if we tried to flush data, and
+			     since we do not know if we have data that would
+			     invalidate the current end of file on the server
+			     we can not go to the server to get the new inod
+			     info */
+			if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+				pCifsInode->clientCanCacheAll = TRUE;
+				pCifsInode->clientCanCacheRead = TRUE;
+				cFYI(1, ("Exclusive Oplock granted on inode %p",
+					 file->f_dentry->d_inode));
+			} else if ((oplock & 0xF) == OPLOCK_READ) {
+				pCifsInode->clientCanCacheRead = TRUE;
+				pCifsInode->clientCanCacheAll = FALSE;
+			} else {
+				pCifsInode->clientCanCacheRead = FALSE;
+				pCifsInode->clientCanCacheAll = FALSE;
+			}
+			cifs_relock_file(pCifsFile);
+		}
+	}
+
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_close(struct inode *inode, struct file *file)
+{
+	int rc = 0;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct cifsFileInfo *pSMBFile =
+		(struct cifsFileInfo *)file->private_data;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+	if (pSMBFile) {
+		pSMBFile->closePend = TRUE;
+		write_lock(&file->f_owner.lock);
+		if (pTcon) {
+			/* no sense reconnecting to close a file that is
+			   already closed */
+			if (pTcon->tidStatus != CifsNeedReconnect) {
+				write_unlock(&file->f_owner.lock);
+				rc = CIFSSMBClose(xid, pTcon,
+						  pSMBFile->netfid);
+				write_lock(&file->f_owner.lock);
+			}
+		}
+		list_del(&pSMBFile->flist);
+		list_del(&pSMBFile->tlist);
+		write_unlock(&file->f_owner.lock);
+		kfree(pSMBFile->search_resume_name);
+		kfree(file->private_data);
+		file->private_data = NULL;
+	} else
+		rc = -EBADF;
+
+	if (list_empty(&(CIFS_I(inode)->openFileList))) {
+		cFYI(1, ("closing last open instance for inode %p", inode));
+		/* if the file is not open we do not know if we can cache info
+		   on this inode, much less write behind and read ahead */
+		CIFS_I(inode)->clientCanCacheRead = FALSE;
+		CIFS_I(inode)->clientCanCacheAll  = FALSE;
+	}
+	if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
+		rc = CIFS_I(inode)->write_behind_rc;
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_closedir(struct inode *inode, struct file *file)
+{
+	int rc = 0;
+	int xid;
+	struct cifsFileInfo *pCFileStruct =
+	    (struct cifsFileInfo *)file->private_data;
+	char *ptmp;
+
+	cFYI(1, ("Closedir inode = 0x%p with ", inode));
+
+	xid = GetXid();
+
+	if (pCFileStruct) {
+		struct cifsTconInfo *pTcon;
+		struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+
+		pTcon = cifs_sb->tcon;
+
+		cFYI(1, ("Freeing private data in close dir"));
+		if (pCFileStruct->srch_inf.endOfSearch == FALSE) {
+			pCFileStruct->invalidHandle = TRUE;
+			rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
+			cFYI(1, ("Closing uncompleted readdir with rc %d",
+				 rc));
+			/* not much we can do if it fails anyway, ignore rc */
+			rc = 0;
+		}
+		ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
+		if (ptmp) {
+   /* BB removeme BB */	cFYI(1, ("freeing smb buf in srch struct in closedir"));
+			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
+			cifs_buf_release(ptmp);
+		}
+		ptmp = pCFileStruct->search_resume_name;
+		if (ptmp) {
+   /* BB removeme BB */	cFYI(1, ("freeing resume name in closedir"));
+			pCFileStruct->search_resume_name = NULL;
+			kfree(ptmp);
+		}
+		kfree(file->private_data);
+		file->private_data = NULL;
+	}
+	/* BB can we lock the filestruct while this is going on? */
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
+{
+	int rc, xid;
+	__u32 lockType = LOCKING_ANDX_LARGE_FILES;
+	__u32 numLock = 0;
+	__u32 numUnlock = 0;
+	__u64 length;
+	int wait_flag = FALSE;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+
+	length = 1 + pfLock->fl_end - pfLock->fl_start;
+	rc = -EACCES;
+	xid = GetXid();
+
+	cFYI(1, ("Lock parm: 0x%x flockflags: "
+		 "0x%x flocktype: 0x%x start: %lld end: %lld",
+	        cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
+	        pfLock->fl_end));
+
+	if (pfLock->fl_flags & FL_POSIX)
+		cFYI(1, ("Posix "));
+	if (pfLock->fl_flags & FL_FLOCK)
+		cFYI(1, ("Flock "));
+	if (pfLock->fl_flags & FL_SLEEP) {
+		cFYI(1, ("Blocking lock "));
+		wait_flag = TRUE;
+	}
+	if (pfLock->fl_flags & FL_ACCESS)
+		cFYI(1, ("Process suspended by mandatory locking - "
+			 "not implemented yet "));
+	if (pfLock->fl_flags & FL_LEASE)
+		cFYI(1, ("Lease on file - not implemented yet"));
+	if (pfLock->fl_flags & 
+	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
+		cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
+
+	if (pfLock->fl_type == F_WRLCK) {
+		cFYI(1, ("F_WRLCK "));
+		numLock = 1;
+	} else if (pfLock->fl_type == F_UNLCK) {
+		cFYI(1, ("F_UNLCK "));
+		numUnlock = 1;
+	} else if (pfLock->fl_type == F_RDLCK) {
+		cFYI(1, ("F_RDLCK "));
+		lockType |= LOCKING_ANDX_SHARED_LOCK;
+		numLock = 1;
+	} else if (pfLock->fl_type == F_EXLCK) {
+		cFYI(1, ("F_EXLCK "));
+		numLock = 1;
+	} else if (pfLock->fl_type == F_SHLCK) {
+		cFYI(1, ("F_SHLCK "));
+		lockType |= LOCKING_ANDX_SHARED_LOCK;
+		numLock = 1;
+	} else
+		cFYI(1, ("Unknown type of lock "));
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	if (file->private_data == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+
+	if (IS_GETLK(cmd)) {
+		rc = CIFSSMBLock(xid, pTcon,
+				 ((struct cifsFileInfo *)file->
+				  private_data)->netfid,
+				 length,
+				 pfLock->fl_start, 0, 1, lockType,
+				 0 /* wait flag */ );
+		if (rc == 0) {
+			rc = CIFSSMBLock(xid, pTcon,
+					 ((struct cifsFileInfo *) file->
+					  private_data)->netfid,
+					 length,
+					 pfLock->fl_start, 1 /* numUnlock */ ,
+					 0 /* numLock */ , lockType,
+					 0 /* wait flag */ );
+			pfLock->fl_type = F_UNLCK;
+			if (rc != 0)
+				cERROR(1, ("Error unlocking previously locked "
+					   "range %d during test of lock ",
+					   rc));
+			rc = 0;
+
+		} else {
+			/* if rc == ERR_SHARING_VIOLATION ? */
+			rc = 0;	/* do not change lock type to unlock
+				   since range in use */
+		}
+
+		FreeXid(xid);
+		return rc;
+	}
+
+	rc = CIFSSMBLock(xid, pTcon,
+			 ((struct cifsFileInfo *) file->private_data)->
+			 netfid, length,
+			 pfLock->fl_start, numUnlock, numLock, lockType,
+			 wait_flag);
+	if (rc == 0 && (pfLock->fl_flags & FL_POSIX))
+		posix_lock_file_wait(file, pfLock);
+	FreeXid(xid);
+	return rc;
+}
+
+ssize_t cifs_user_write(struct file *file, const char __user *write_data,
+	size_t write_size, loff_t *poffset)
+{
+	int rc = 0;
+	unsigned int bytes_written = 0;
+	unsigned int total_written;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int xid, long_op;
+	struct cifsFileInfo *open_file;
+
+	if (file->f_dentry == NULL)
+		return -EBADF;
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	if (cifs_sb == NULL)
+		return -EBADF;
+
+	pTcon = cifs_sb->tcon;
+
+	/* cFYI(1,
+	   (" write %d bytes to offset %lld of %s", write_size,
+	   *poffset, file->f_dentry->d_name.name)); */
+
+	if (file->private_data == NULL)
+		return -EBADF;
+	else
+		open_file = (struct cifsFileInfo *) file->private_data;
+	
+	xid = GetXid();
+	if (file->f_dentry->d_inode == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+
+	if (*poffset > file->f_dentry->d_inode->i_size)
+		long_op = 2; /* writes past end of file can take a long time */
+	else
+		long_op = 1;
+
+	for (total_written = 0; write_size > total_written;
+	     total_written += bytes_written) {
+		rc = -EAGAIN;
+		while (rc == -EAGAIN) {
+			if (file->private_data == NULL) {
+				/* file has been closed on us */
+				FreeXid(xid);
+			/* if we have gotten here we have written some data
+			   and blocked, and the file has been freed on us while
+			   we blocked so return what we managed to write */
+				return total_written;
+			} 
+			if (open_file->closePend) {
+				FreeXid(xid);
+				if (total_written)
+					return total_written;
+				else
+					return -EBADF;
+			}
+			if (open_file->invalidHandle) {
+				if ((file->f_dentry == NULL) ||
+				    (file->f_dentry->d_inode == NULL)) {
+					FreeXid(xid);
+					return total_written;
+				}
+				/* we could deadlock if we called
+				   filemap_fdatawait from here so tell
+				   reopen_file not to flush data to server
+				   now */
+				rc = cifs_reopen_file(file->f_dentry->d_inode,
+					file, FALSE);
+				if (rc != 0)
+					break;
+			}
+
+			rc = CIFSSMBWrite(xid, pTcon,
+				open_file->netfid,
+				min_t(const int, cifs_sb->wsize,
+				      write_size - total_written),
+				*poffset, &bytes_written,
+				NULL, write_data + total_written, long_op);
+		}
+		if (rc || (bytes_written == 0)) {
+			if (total_written)
+				break;
+			else {
+				FreeXid(xid);
+				return rc;
+			}
+		} else
+			*poffset += bytes_written;
+		long_op = FALSE; /* subsequent writes fast -
+				    15 seconds is plenty */
+	}
+
+#ifdef CONFIG_CIFS_STATS
+	if (total_written > 0) {
+		atomic_inc(&pTcon->num_writes);
+		spin_lock(&pTcon->stat_lock);
+		pTcon->bytes_written += total_written;
+		spin_unlock(&pTcon->stat_lock);
+	}
+#endif		
+
+	/* since the write may have blocked check these pointers again */
+	if (file->f_dentry) {
+		if (file->f_dentry->d_inode) {
+			struct inode *inode = file->f_dentry->d_inode;
+			inode->i_ctime = inode->i_mtime =
+				current_fs_time(inode->i_sb);
+			if (total_written > 0) {
+				if (*poffset > file->f_dentry->d_inode->i_size)
+					i_size_write(file->f_dentry->d_inode,
+					*poffset);
+			}
+			mark_inode_dirty_sync(file->f_dentry->d_inode);
+		}
+	}
+	FreeXid(xid);
+	return total_written;
+}
+
+static ssize_t cifs_write(struct file *file, const char *write_data,
+	size_t write_size, loff_t *poffset)
+{
+	int rc = 0;
+	unsigned int bytes_written = 0;
+	unsigned int total_written;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int xid, long_op;
+	struct cifsFileInfo *open_file;
+
+	if (file->f_dentry == NULL)
+		return -EBADF;
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	if (cifs_sb == NULL)
+		return -EBADF;
+
+	pTcon = cifs_sb->tcon;
+
+	/* cFYI(1,
+	   (" write %d bytes to offset %lld of %s", write_size,
+	   *poffset, file->f_dentry->d_name.name)); */
+
+	if (file->private_data == NULL)
+		return -EBADF;
+	else
+		open_file = (struct cifsFileInfo *)file->private_data;
+	
+	xid = GetXid();
+	if (file->f_dentry->d_inode == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+
+	if (*poffset > file->f_dentry->d_inode->i_size)
+		long_op = 2; /* writes past end of file can take a long time */
+	else
+		long_op = 1;
+
+	for (total_written = 0; write_size > total_written;
+	     total_written += bytes_written) {
+		rc = -EAGAIN;
+		while (rc == -EAGAIN) {
+			if (file->private_data == NULL) {
+				/* file has been closed on us */
+				FreeXid(xid);
+			/* if we have gotten here we have written some data
+			   and blocked, and the file has been freed on us
+			   while we blocked so return what we managed to 
+			   write */
+				return total_written;
+			} 
+			if (open_file->closePend) {
+				FreeXid(xid);
+				if (total_written)
+					return total_written;
+				else
+					return -EBADF;
+			}
+			if (open_file->invalidHandle) {
+				if ((file->f_dentry == NULL) ||
+				   (file->f_dentry->d_inode == NULL)) {
+					FreeXid(xid);
+					return total_written;
+				}
+				/* we could deadlock if we called
+				   filemap_fdatawait from here so tell
+				   reopen_file not to flush data to 
+				   server now */
+				rc = cifs_reopen_file(file->f_dentry->d_inode,
+					file, FALSE);
+				if (rc != 0)
+					break;
+			}
+
+			rc = CIFSSMBWrite(xid, pTcon,
+				 open_file->netfid,
+				 min_t(const int, cifs_sb->wsize, 
+				       write_size - total_written),
+				 *poffset, &bytes_written,
+				 write_data + total_written, NULL, long_op);
+		}
+		if (rc || (bytes_written == 0)) {
+			if (total_written)
+				break;
+			else {
+				FreeXid(xid);
+				return rc;
+			}
+		} else
+			*poffset += bytes_written;
+		long_op = FALSE; /* subsequent writes fast - 
+				    15 seconds is plenty */
+	}
+
+#ifdef CONFIG_CIFS_STATS
+	if (total_written > 0) {
+		atomic_inc(&pTcon->num_writes);
+		spin_lock(&pTcon->stat_lock);
+		pTcon->bytes_written += total_written;
+		spin_unlock(&pTcon->stat_lock);
+	}
+#endif		
+
+	/* since the write may have blocked check these pointers again */
+	if (file->f_dentry) {
+		if (file->f_dentry->d_inode) {
+			file->f_dentry->d_inode->i_ctime = 
+			file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
+			if (total_written > 0) {
+				if (*poffset > file->f_dentry->d_inode->i_size)
+					i_size_write(file->f_dentry->d_inode, 
+						     *poffset);
+			}
+			mark_inode_dirty_sync(file->f_dentry->d_inode);
+		}
+	}
+	FreeXid(xid);
+	return total_written;
+}
+
+static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+{
+	struct address_space *mapping = page->mapping;
+	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	char *write_data;
+	int rc = -EFAULT;
+	int bytes_written = 0;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct inode *inode;
+	struct cifsInodeInfo *cifsInode;
+	struct cifsFileInfo *open_file = NULL;
+	struct list_head *tmp;
+	struct list_head *tmp1;
+
+	if (!mapping || !mapping->host)
+		return -EFAULT;
+
+	inode = page->mapping->host;
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	offset += (loff_t)from;
+	write_data = kmap(page);
+	write_data += from;
+
+	if ((to > PAGE_CACHE_SIZE) || (from > to)) {
+		kunmap(page);
+		return -EIO;
+	}
+
+	/* racing with truncate? */
+	if (offset > mapping->host->i_size) {
+		kunmap(page);
+		return 0; /* don't care */
+	}
+
+	/* check to make sure that we are not extending the file */
+	if (mapping->host->i_size - offset < (loff_t)to)
+		to = (unsigned)(mapping->host->i_size - offset); 
+
+	cifsInode = CIFS_I(mapping->host);
+	read_lock(&GlobalSMBSeslock); 
+	/* BB we should start at the end */
+	list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {            
+		open_file = list_entry(tmp, struct cifsFileInfo, flist);
+		if (open_file->closePend)
+			continue;
+		/* We check if file is open for writing first */
+		if ((open_file->pfile) && 
+		   ((open_file->pfile->f_flags & O_RDWR) || 
+			(open_file->pfile->f_flags & O_WRONLY))) {
+			read_unlock(&GlobalSMBSeslock);
+			bytes_written = cifs_write(open_file->pfile,
+						write_data, to-from,
+						&offset);
+			read_lock(&GlobalSMBSeslock);
+		/* Does mm or vfs already set times? */
+			inode->i_atime = 
+			inode->i_mtime = current_fs_time(inode->i_sb);
+			if ((bytes_written > 0) && (offset)) {
+				rc = 0;
+			} else if (bytes_written < 0) {
+				if (rc == -EBADF) {
+				/* have seen a case in which kernel seemed to
+				   have closed/freed a file even with writes
+				   active so we might as well see if there are
+				   other file structs to try for the same
+				   inode before giving up */
+					continue;
+				} else
+					rc = bytes_written;
+			}
+			break;  /* now that we found a valid file handle and
+				   tried to write to it we are done, no sense
+				   continuing to loop looking for another */
+		}
+		if (tmp->next == NULL) {
+			cFYI(1, ("File instance %p removed", tmp));
+			break;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+	if (open_file == NULL) {
+		cFYI(1, ("No writeable filehandles for inode"));
+		rc = -EIO;
+	}
+
+	kunmap(page);
+	return rc;
+}
+
+#if 0
+static int cifs_writepages(struct address_space *mapping,
+	struct writeback_control *wbc)
+{
+	int rc = -EFAULT;
+	int xid;
+
+	xid = GetXid();
+
+	/* Find contiguous pages then iterate through repeating
+	   call 16K write then Setpageuptodate or if LARGE_WRITE_X
+	   support then send larger writes via kevec so as to eliminate
+	   a memcpy */
+	FreeXid(xid);
+	return rc;
+}
+#endif
+
+static int cifs_writepage(struct page* page, struct writeback_control *wbc)
+{
+	int rc = -EFAULT;
+	int xid;
+
+	xid = GetXid();
+/* BB add check for wbc flags */
+	page_cache_get(page);
+        if (!PageUptodate(page)) {
+		cFYI(1, ("ppw - page not up to date"));
+	}
+	
+	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
+	SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
+	unlock_page(page);
+	page_cache_release(page);	
+	FreeXid(xid);
+	return rc;
+}
+
+static int cifs_commit_write(struct file *file, struct page *page,
+	unsigned offset, unsigned to)
+{
+	int xid;
+	int rc = 0;
+	struct inode *inode = page->mapping->host;
+	loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+	char *page_data;
+
+	xid = GetXid();
+	cFYI(1, ("commit write for page %p up to position %lld for %d", 
+		 page, position, to));
+	if (position > inode->i_size) {
+		i_size_write(inode, position);
+		/* if (file->private_data == NULL) {
+			rc = -EBADF;
+		} else {
+			open_file = (struct cifsFileInfo *)file->private_data;
+			cifs_sb = CIFS_SB(inode->i_sb);
+			rc = -EAGAIN;
+			while (rc == -EAGAIN) {
+				if ((open_file->invalidHandle) && 
+				    (!open_file->closePend)) {
+					rc = cifs_reopen_file(
+						file->f_dentry->d_inode, file);
+					if (rc != 0)
+						break;
+				}
+				if (!open_file->closePend) {
+					rc = CIFSSMBSetFileSize(xid,
+						cifs_sb->tcon, position,
+						open_file->netfid,
+						open_file->pid, FALSE);
+				} else {
+					rc = -EBADF;
+					break;
+				}
+			}
+			cFYI(1, (" SetEOF (commit write) rc = %d", rc));
+		} */
+	}
+	if (!PageUptodate(page)) {
+		position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
+		/* can not rely on (or let) writepage write this data */
+		if (to < offset) {
+			cFYI(1, ("Illegal offsets, can not copy from %d to %d",
+				offset, to));
+			FreeXid(xid);
+			return rc;
+		}
+		/* this is probably better than directly calling
+		   partialpage_write since in this function the file handle is
+		   known which we might as well	leverage */
+		/* BB check if anything else missing out of ppw
+		   such as updating last write time */
+		page_data = kmap(page);
+		rc = cifs_write(file, page_data + offset, to-offset,
+				&position);
+		if (rc > 0)
+			rc = 0;
+		/* else if (rc < 0) should we set writebehind rc? */
+		kunmap(page);
+	} else {	
+		set_page_dirty(page);
+	}
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	int xid;
+	int rc = 0;
+	struct inode *inode = file->f_dentry->d_inode;
+
+	xid = GetXid();
+
+	cFYI(1, ("Sync file - name: %s datasync: 0x%x ", 
+		dentry->d_name.name, datasync));
+	
+	rc = filemap_fdatawrite(inode->i_mapping);
+	if (rc == 0)
+		CIFS_I(inode)->write_behind_rc = 0;
+	FreeXid(xid);
+	return rc;
+}
+
+/* static int cifs_sync_page(struct page *page)
+{
+	struct address_space *mapping;
+	struct inode *inode;
+	unsigned long index = page->index;
+	unsigned int rpages = 0;
+	int rc = 0;
+
+	cFYI(1, ("sync page %p",page));
+	mapping = page->mapping;
+	if (!mapping)
+		return 0;
+	inode = mapping->host;
+	if (!inode)
+		return 0; */
+
+/*	fill in rpages then 
+	result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
+
+/*	cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
+
+	if (rc < 0)
+		return rc;
+	return 0;
+} */
+
+/*
+ * As file closes, flush all cached write data for this inode checking
+ * for write behind errors.
+ */
+int cifs_flush(struct file *file)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	int rc = 0;
+
+	/* Rather than do the steps manually:
+	   lock the inode for writing
+	   loop through pages looking for write behind data (dirty pages)
+	   coalesce into contiguous 16K (or smaller) chunks to write to server
+	   send to server (prefer in parallel)
+	   deal with writebehind errors
+	   unlock inode for writing
+	   filemapfdatawrite appears easier for the time being */
+
+	rc = filemap_fdatawrite(inode->i_mapping);
+	if (!rc) /* reset wb rc if we were able to write out dirty pages */
+		CIFS_I(inode)->write_behind_rc = 0;
+		
+	cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
+
+	return rc;
+}
+
+ssize_t cifs_user_read(struct file *file, char __user *read_data,
+	size_t read_size, loff_t *poffset)
+{
+	int rc = -EACCES;
+	unsigned int bytes_read = 0;
+	unsigned int total_read = 0;
+	unsigned int current_read_size;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int xid;
+	struct cifsFileInfo *open_file;
+	char *smb_read_data;
+	char __user *current_offset;
+	struct smb_com_read_rsp *pSMBr;
+
+	xid = GetXid();
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	if (file->private_data == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+	open_file = (struct cifsFileInfo *)file->private_data;
+
+	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+		cFYI(1, ("attempting read on write only file instance"));
+	}
+	for (total_read = 0, current_offset = read_data;
+	     read_size > total_read;
+	     total_read += bytes_read, current_offset += bytes_read) {
+		current_read_size = min_t(const int, read_size - total_read, 
+					  cifs_sb->rsize);
+		rc = -EAGAIN;
+		smb_read_data = NULL;
+		while (rc == -EAGAIN) {
+			if ((open_file->invalidHandle) && 
+			    (!open_file->closePend)) {
+				rc = cifs_reopen_file(file->f_dentry->d_inode,
+					file, TRUE);
+				if (rc != 0)
+					break;
+			}
+
+			rc = CIFSSMBRead(xid, pTcon,
+				 open_file->netfid,
+				 current_read_size, *poffset,
+				 &bytes_read, &smb_read_data);
+
+			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
+			if (copy_to_user(current_offset, 
+					 smb_read_data + 4 /* RFC1001 hdr */
+					 + le16_to_cpu(pSMBr->DataOffset), 
+					 bytes_read)) {
+				rc = -EFAULT;
+				FreeXid(xid);
+				return rc;
+            }
+			if (smb_read_data) {
+				cifs_buf_release(smb_read_data);
+				smb_read_data = NULL;
+			}
+		}
+		if (rc || (bytes_read == 0)) {
+			if (total_read) {
+				break;
+			} else {
+				FreeXid(xid);
+				return rc;
+			}
+		} else {
+#ifdef CONFIG_CIFS_STATS
+			atomic_inc(&pTcon->num_reads);
+			spin_lock(&pTcon->stat_lock);
+			pTcon->bytes_read += total_read;
+			spin_unlock(&pTcon->stat_lock);
+#endif
+			*poffset += bytes_read;
+		}
+	}
+	FreeXid(xid);
+	return total_read;
+}
+
+
+static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
+	loff_t *poffset)
+{
+	int rc = -EACCES;
+	unsigned int bytes_read = 0;
+	unsigned int total_read;
+	unsigned int current_read_size;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int xid;
+	char *current_offset;
+	struct cifsFileInfo *open_file;
+
+	xid = GetXid();
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	if (file->private_data == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+	open_file = (struct cifsFileInfo *)file->private_data;
+
+	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+		cFYI(1, ("attempting read on write only file instance"));
+
+	for (total_read = 0, current_offset = read_data; 
+	     read_size > total_read;
+	     total_read += bytes_read, current_offset += bytes_read) {
+		current_read_size = min_t(const int, read_size - total_read,
+					  cifs_sb->rsize);
+		rc = -EAGAIN;
+		while (rc == -EAGAIN) {
+			if ((open_file->invalidHandle) && 
+			    (!open_file->closePend)) {
+				rc = cifs_reopen_file(file->f_dentry->d_inode,
+					file, TRUE);
+				if (rc != 0)
+					break;
+			}
+
+			rc = CIFSSMBRead(xid, pTcon,
+				 open_file->netfid,
+				 current_read_size, *poffset,
+				 &bytes_read, &current_offset);
+		}
+		if (rc || (bytes_read == 0)) {
+			if (total_read) {
+				break;
+			} else {
+				FreeXid(xid);
+				return rc;
+			}
+		} else {
+#ifdef CONFIG_CIFS_STATS
+			atomic_inc(&pTcon->num_reads);
+			spin_lock(&pTcon->stat_lock);
+			pTcon->bytes_read += total_read;
+			spin_unlock(&pTcon->stat_lock);
+#endif
+			*poffset += bytes_read;
+		}
+	}
+	FreeXid(xid);
+	return total_read;
+}
+
+int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct dentry *dentry = file->f_dentry;
+	int rc, xid;
+
+	xid = GetXid();
+	rc = cifs_revalidate(dentry);
+	if (rc) {
+		cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
+		FreeXid(xid);
+		return rc;
+	}
+	rc = generic_file_mmap(file, vma);
+	FreeXid(xid);
+	return rc;
+}
+
+
+static void cifs_copy_cache_pages(struct address_space *mapping, 
+	struct list_head *pages, int bytes_read, char *data,
+	struct pagevec *plru_pvec)
+{
+	struct page *page;
+	char *target;
+
+	while (bytes_read > 0) {
+		if (list_empty(pages))
+			break;
+
+		page = list_entry(pages->prev, struct page, lru);
+		list_del(&page->lru);
+
+		if (add_to_page_cache(page, mapping, page->index,
+				      GFP_KERNEL)) {
+			page_cache_release(page);
+			cFYI(1, ("Add page cache failed"));
+			continue;
+		}
+
+		target = kmap_atomic(page,KM_USER0);
+
+		if (PAGE_CACHE_SIZE > bytes_read) {
+			memcpy(target, data, bytes_read);
+			/* zero the tail end of this partial page */
+			memset(target + bytes_read, 0, 
+			       PAGE_CACHE_SIZE - bytes_read);
+			bytes_read = 0;
+		} else {
+			memcpy(target, data, PAGE_CACHE_SIZE);
+			bytes_read -= PAGE_CACHE_SIZE;
+		}
+		kunmap_atomic(target, KM_USER0);
+
+		flush_dcache_page(page);
+		SetPageUptodate(page);
+		unlock_page(page);
+		if (!pagevec_add(plru_pvec, page))
+			__pagevec_lru_add(plru_pvec);
+		data += PAGE_CACHE_SIZE;
+	}
+	return;
+}
+
+static int cifs_readpages(struct file *file, struct address_space *mapping,
+	struct list_head *page_list, unsigned num_pages)
+{
+	int rc = -EACCES;
+	int xid;
+	loff_t offset;
+	struct page *page;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int bytes_read = 0;
+	unsigned int read_size,i;
+	char *smb_read_data = NULL;
+	struct smb_com_read_rsp *pSMBr;
+	struct pagevec lru_pvec;
+	struct cifsFileInfo *open_file;
+
+	xid = GetXid();
+	if (file->private_data == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+	open_file = (struct cifsFileInfo *)file->private_data;
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	pagevec_init(&lru_pvec, 0);
+
+	for (i = 0; i < num_pages; ) {
+		unsigned contig_pages;
+		struct page *tmp_page;
+		unsigned long expected_index;
+
+		if (list_empty(page_list))
+			break;
+
+		page = list_entry(page_list->prev, struct page, lru);
+		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+
+		/* count adjacent pages that we will read into */
+		contig_pages = 0;
+		expected_index = 
+			list_entry(page_list->prev, struct page, lru)->index;
+		list_for_each_entry_reverse(tmp_page,page_list,lru) {
+			if (tmp_page->index == expected_index) {
+				contig_pages++;
+				expected_index++;
+			} else
+				break; 
+		}
+		if (contig_pages + i >  num_pages)
+			contig_pages = num_pages - i;
+
+		/* for reads over a certain size could initiate async
+		   read ahead */
+
+		read_size = contig_pages * PAGE_CACHE_SIZE;
+		/* Read size needs to be in multiples of one page */
+		read_size = min_t(const unsigned int, read_size,
+				  cifs_sb->rsize & PAGE_CACHE_MASK);
+
+		rc = -EAGAIN;
+		while (rc == -EAGAIN) {
+			if ((open_file->invalidHandle) && 
+			    (!open_file->closePend)) {
+				rc = cifs_reopen_file(file->f_dentry->d_inode,
+					file, TRUE);
+				if (rc != 0)
+					break;
+			}
+
+			rc = CIFSSMBRead(xid, pTcon,
+				open_file->netfid,
+				read_size, offset,
+				&bytes_read, &smb_read_data);
+			/* BB need to check return code here */
+			if (rc== -EAGAIN) {
+				if (smb_read_data) {
+					cifs_buf_release(smb_read_data);
+					smb_read_data = NULL;
+				}
+			}
+		}
+		if ((rc < 0) || (smb_read_data == NULL)) {
+			cFYI(1, ("Read error in readpages: %d", rc));
+			/* clean up remaing pages off list */
+			while (!list_empty(page_list) && (i < num_pages)) {
+				page = list_entry(page_list->prev, struct page,
+						  lru);
+				list_del(&page->lru);
+				page_cache_release(page);
+			}
+			break;
+		} else if (bytes_read > 0) {
+			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
+			cifs_copy_cache_pages(mapping, page_list, bytes_read,
+				smb_read_data + 4 /* RFC1001 hdr */ +
+				le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
+
+			i +=  bytes_read >> PAGE_CACHE_SHIFT;
+#ifdef CONFIG_CIFS_STATS
+			atomic_inc(&pTcon->num_reads);
+			spin_lock(&pTcon->stat_lock);
+			pTcon->bytes_read += bytes_read;
+			spin_unlock(&pTcon->stat_lock);
+#endif
+			if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
+				i++; /* account for partial page */
+
+				/* server copy of file can have smaller size 
+				   than client */
+				/* BB do we need to verify this common case ? 
+				   this case is ok - if we are at server EOF 
+				   we will hit it on next read */
+
+			/* while (!list_empty(page_list) && (i < num_pages)) {
+					page = list_entry(page_list->prev, 
+							  struct page, list);
+					list_del(&page->list);
+					page_cache_release(page);
+				}
+				break; */
+			}
+		} else {
+			cFYI(1, ("No bytes read (%d) at offset %lld . "
+				 "Cleaning remaining pages from readahead list",
+				 bytes_read, offset));
+			/* BB turn off caching and do new lookup on 
+			   file size at server? */
+			while (!list_empty(page_list) && (i < num_pages)) {
+				page = list_entry(page_list->prev, struct page,
+						  lru);
+				list_del(&page->lru);
+
+				/* BB removeme - replace with zero of page? */
+				page_cache_release(page);
+			}
+			break;
+		}
+		if (smb_read_data) {
+			cifs_buf_release(smb_read_data);
+			smb_read_data = NULL;
+		}
+		bytes_read = 0;
+	}
+
+	pagevec_lru_add(&lru_pvec);
+
+/* need to free smb_read_data buf before exit */
+	if (smb_read_data) {
+		cifs_buf_release(smb_read_data);
+		smb_read_data = NULL;
+	} 
+
+	FreeXid(xid);
+	return rc;
+}
+
+static int cifs_readpage_worker(struct file *file, struct page *page,
+	loff_t *poffset)
+{
+	char *read_data;
+	int rc;
+
+	page_cache_get(page);
+	read_data = kmap(page);
+	/* for reads over a certain size could initiate async read ahead */
+                                                                                                                           
+	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
+                                                                                                                           
+	if (rc < 0)
+		goto io_error;
+	else
+		cFYI(1, ("Bytes read %d ",rc));
+                                                                                                                           
+	file->f_dentry->d_inode->i_atime =
+		current_fs_time(file->f_dentry->d_inode->i_sb);
+                                                                                                                           
+	if (PAGE_CACHE_SIZE > rc)
+		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
+
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	rc = 0;
+                                                                                                                           
+io_error:
+        kunmap(page);
+	page_cache_release(page);
+	return rc;
+}
+
+static int cifs_readpage(struct file *file, struct page *page)
+{
+	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	int rc = -EACCES;
+	int xid;
+
+	xid = GetXid();
+
+	if (file->private_data == NULL) {
+		FreeXid(xid);
+		return -EBADF;
+	}
+
+	cFYI(1, ("readpage %p at offset %d 0x%x\n", 
+		 page, (int)offset, (int)offset));
+
+	rc = cifs_readpage_worker(file, page, &offset);
+
+	unlock_page(page);
+
+	FreeXid(xid);
+	return rc;
+}
+
+/* We do not want to update the file size from server for inodes
+   open for write - to avoid races with writepage extending
+   the file - in the future we could consider allowing
+   refreshing the inode only on increases in the file size 
+   but this is tricky to do without racing with writebehind
+   page caching in the current Linux kernel design */
+int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
+{
+	struct list_head *tmp;
+	struct list_head *tmp1;
+	struct cifsFileInfo *open_file = NULL;
+	int rc = TRUE;
+
+	if (cifsInode == NULL)
+		return rc;
+
+	read_lock(&GlobalSMBSeslock); 
+	list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {            
+		open_file = list_entry(tmp, struct cifsFileInfo, flist);
+		if (open_file == NULL)
+			break;
+		if (open_file->closePend)
+			continue;
+	/* We check if file is open for writing,   
+	   BB we could supplement this with a check to see if file size
+	   changes have been flushed to server - ie inode metadata dirty */
+		if ((open_file->pfile) && 
+		    ((open_file->pfile->f_flags & O_RDWR) || 
+		    (open_file->pfile->f_flags & O_WRONLY))) {
+			rc = FALSE;
+			break;
+		}
+		if (tmp->next == NULL) {
+			cFYI(1, ("File instance %p removed", tmp));
+			break;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+	return rc;
+}
+
+
+static int cifs_prepare_write(struct file *file, struct page *page,
+	unsigned from, unsigned to)
+{
+	int rc = 0;
+        loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
+	if (!PageUptodate(page)) {
+	/*	if (to - from != PAGE_CACHE_SIZE) {
+			void *kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr, 0, from);
+			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+		} */
+		/* If we are writing a full page it will be up to date,
+		   no need to read from the server */
+		if ((to == PAGE_CACHE_SIZE) && (from == 0))
+			SetPageUptodate(page);
+
+		/* might as well read a page, it is fast enough */
+		if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+			rc = cifs_readpage_worker(file, page, &offset);
+		} else {
+		/* should we try using another file handle if there is one -
+		   how would we lock it to prevent close of that handle
+		   racing with this read?
+		   In any case this will be written out by commit_write */
+		}
+	}
+
+	/* BB should we pass any errors back? 
+	   e.g. if we do not have read access to the file */
+	return 0;
+}
+
+struct address_space_operations cifs_addr_ops = {
+	.readpage = cifs_readpage,
+	.readpages = cifs_readpages,
+	.writepage = cifs_writepage,
+	.prepare_write = cifs_prepare_write,
+	.commit_write = cifs_commit_write,
+	.set_page_dirty = __set_page_dirty_nobuffers,
+	/* .sync_page = cifs_sync_page, */
+	/* .direct_IO = */
+};
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
new file mode 100644
index 0000000..d73b0aa
--- /dev/null
+++ b/fs/cifs/inode.c
@@ -0,0 +1,1096 @@
+/*
+ *   fs/cifs/inode.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/stat.h>
+#include <linux/pagemap.h>
+#include <asm/div64.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+
+int cifs_get_inode_info_unix(struct inode **pinode,
+	const unsigned char *search_path, struct super_block *sb, int xid)
+{
+	int rc = 0;
+	FILE_UNIX_BASIC_INFO findData;
+	struct cifsTconInfo *pTcon;
+	struct inode *inode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	char *tmp_path;
+
+	pTcon = cifs_sb->tcon;
+	cFYI(1, (" Getting info on %s ", search_path));
+	/* could have done a find first instead but this returns more info */
+	rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData,
+				  cifs_sb->local_nls);
+/*	dump_mem("\nUnixQPathInfo return data", &findData,
+		 sizeof(findData)); */
+	if (rc) {
+		if (rc == -EREMOTE) {
+			tmp_path =
+			    kmalloc(strnlen(pTcon->treeName,
+					    MAX_TREE_SIZE + 1) +
+				    strnlen(search_path, MAX_PATHCONF) + 1,
+				    GFP_KERNEL);
+			if (tmp_path == NULL) {
+				return -ENOMEM;
+			}
+        		/* have to skip first of the double backslash of
+			   UNC name */
+			strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
+			strncat(tmp_path, search_path, MAX_PATHCONF);
+			rc = connect_to_dfs_path(xid, pTcon->ses,
+						 /* treename + */ tmp_path,
+						 cifs_sb->local_nls);
+			kfree(tmp_path);
+
+			/* BB fix up inode etc. */
+		} else if (rc) {
+			return rc;
+		}
+	} else {
+		struct cifsInodeInfo *cifsInfo;
+		__u32 type = le32_to_cpu(findData.Type);
+		__u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes);
+		__u64 end_of_file = le64_to_cpu(findData.EndOfFile);
+
+		/* get new inode */
+		if (*pinode == NULL) {
+			*pinode = new_inode(sb);
+			if(*pinode == NULL) 
+				return -ENOMEM;
+			/* Is an i_ino of zero legal? */
+			/* Are there sanity checks we can use to ensure that
+			   the server is really filling in that field? */
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+				(*pinode)->i_ino =
+					(unsigned long)findData.UniqueId;
+			} /* note ino incremented to unique num in new_inode */
+			insert_inode_hash(*pinode);
+		}
+
+		inode = *pinode;
+		cifsInfo = CIFS_I(inode);
+
+		cFYI(1, (" Old time %ld ", cifsInfo->time));
+		cifsInfo->time = jiffies;
+		cFYI(1, (" New time %ld ", cifsInfo->time));
+		/* this is ok to set on every inode revalidate */
+		atomic_set(&cifsInfo->inUse,1);
+
+		inode->i_atime =
+		    cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime));
+		inode->i_mtime =
+		    cifs_NTtimeToUnix(le64_to_cpu
+				(findData.LastModificationTime));
+		inode->i_ctime =
+		    cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange));
+		inode->i_mode = le64_to_cpu(findData.Permissions);
+		if (type == UNIX_FILE) {
+			inode->i_mode |= S_IFREG;
+		} else if (type == UNIX_SYMLINK) {
+			inode->i_mode |= S_IFLNK;
+		} else if (type == UNIX_DIR) {
+			inode->i_mode |= S_IFDIR;
+		} else if (type == UNIX_CHARDEV) {
+			inode->i_mode |= S_IFCHR;
+			inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
+				le64_to_cpu(findData.DevMinor) & MINORMASK);
+		} else if (type == UNIX_BLOCKDEV) {
+			inode->i_mode |= S_IFBLK;
+			inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
+				le64_to_cpu(findData.DevMinor) & MINORMASK);
+		} else if (type == UNIX_FIFO) {
+			inode->i_mode |= S_IFIFO;
+		} else if (type == UNIX_SOCKET) {
+			inode->i_mode |= S_IFSOCK;
+		}
+		inode->i_uid = le64_to_cpu(findData.Uid);
+		inode->i_gid = le64_to_cpu(findData.Gid);
+		inode->i_nlink = le64_to_cpu(findData.Nlinks);
+
+		if(is_size_safe_to_change(cifsInfo)) {
+		/* can not safely change the file size here if the
+		   client is writing to it due to potential races */
+
+			i_size_write(inode, end_of_file);
+
+		/* blksize needs to be multiple of two. So safer to default to
+		blksize and blkbits set in superblock so 2**blkbits and blksize
+		will match rather than setting to:
+		(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
+
+		/* This seems incredibly stupid but it turns out that i_blocks
+		   is not related to (i_size / i_blksize), instead 512 byte size
+		   is required for calculating num blocks */
+
+		/* 512 bytes (2**9) is the fake blocksize that must be used */
+		/* for this calculation */
+			inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
+		}
+
+		if (num_of_bytes < end_of_file)
+			cFYI(1, ("allocation size less than end of file "));
+		cFYI(1,
+		     ("Size %ld and blocks %ld",
+		      (unsigned long) inode->i_size, inode->i_blocks));
+		if (S_ISREG(inode->i_mode)) {
+			cFYI(1, (" File inode "));
+			inode->i_op = &cifs_file_inode_ops;
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+				inode->i_fop = &cifs_file_direct_ops;
+			else
+				inode->i_fop = &cifs_file_ops;
+			inode->i_data.a_ops = &cifs_addr_ops;
+		} else if (S_ISDIR(inode->i_mode)) {
+			cFYI(1, (" Directory inode"));
+			inode->i_op = &cifs_dir_inode_ops;
+			inode->i_fop = &cifs_dir_ops;
+		} else if (S_ISLNK(inode->i_mode)) {
+			cFYI(1, (" Symbolic Link inode "));
+			inode->i_op = &cifs_symlink_inode_ops;
+		/* tmp_inode->i_fop = */ /* do not need to set to anything */
+		} else {
+			cFYI(1, (" Init special inode "));
+			init_special_inode(inode, inode->i_mode,
+					   inode->i_rdev);
+		}
+	}
+	return rc;
+}
+
+int cifs_get_inode_info(struct inode **pinode,
+	const unsigned char *search_path, FILE_ALL_INFO *pfindData,
+	struct super_block *sb, int xid)
+{
+	int rc = 0;
+	struct cifsTconInfo *pTcon;
+	struct inode *inode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	char *tmp_path;
+	char *buf = NULL;
+
+	pTcon = cifs_sb->tcon;
+	cFYI(1,("Getting info on %s ", search_path));
+
+	if((pfindData == NULL) && (*pinode != NULL)) {
+		if(CIFS_I(*pinode)->clientCanCacheRead) {
+			cFYI(1,("No need to revalidate cached inode sizes"));
+			return rc;
+		}
+	}
+
+	/* if file info not passed in then get it from server */
+	if(pfindData == NULL) {
+		buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+		if(buf == NULL)
+			return -ENOMEM;
+		pfindData = (FILE_ALL_INFO *)buf;
+		/* could do find first instead but this returns more info */
+		rc = CIFSSMBQPathInfo(xid, pTcon, search_path, pfindData,
+			      cifs_sb->local_nls);
+	}
+	/* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
+	if (rc) {
+		if (rc == -EREMOTE) {
+			tmp_path =
+			    kmalloc(strnlen
+				    (pTcon->treeName,
+				     MAX_TREE_SIZE + 1) +
+				    strnlen(search_path, MAX_PATHCONF) + 1,
+				    GFP_KERNEL);
+			if (tmp_path == NULL) {
+				kfree(buf);
+				return -ENOMEM;
+			}
+
+			strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
+			strncat(tmp_path, search_path, MAX_PATHCONF);
+			rc = connect_to_dfs_path(xid, pTcon->ses,
+						 /* treename + */ tmp_path,
+						 cifs_sb->local_nls);
+			kfree(tmp_path);
+			/* BB fix up inode etc. */
+		} else if (rc) {
+			kfree(buf);
+			return rc;
+		}
+	} else {
+		struct cifsInodeInfo *cifsInfo;
+		__u32 attr = le32_to_cpu(pfindData->Attributes);
+
+		/* get new inode */
+		if (*pinode == NULL) {
+			*pinode = new_inode(sb);
+			if (*pinode == NULL)
+				return -ENOMEM;
+			/* Is an i_ino of zero legal? Can we use that to check
+			   if the server supports returning inode numbers?  Are
+			   there other sanity checks we can use to ensure that
+			   the server is really filling in that field? */
+
+			/* We can not use the IndexNumber field by default from
+			   Windows or Samba (in ALL_INFO buf) but we can request
+			   it explicitly.  It may not be unique presumably if
+			   the server has multiple devices mounted under one
+			   share */
+
+			/* There may be higher info levels that work but are
+			   there Windows server or network appliances for which
+			   IndexNumber field is not guaranteed unique? */
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL		
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM){
+				int rc1 = 0;
+				__u64 inode_num;
+
+				rc1 = CIFSGetSrvInodeNumber(xid, pTcon, 
+					search_path, &inode_num, 
+					cifs_sb->local_nls);
+				if(rc1) {
+					cFYI(1,("GetSrvInodeNum rc %d", rc1));
+					/* BB EOPNOSUPP disable SERVER_INUM? */
+				} else /* do we need cast or hash to ino? */
+					(*pinode)->i_ino = inode_num;
+			} /* else ino incremented to unique num in new_inode*/
+#endif /* CIFS_EXPERIMENTAL */
+			insert_inode_hash(*pinode);
+		}
+		inode = *pinode;
+		cifsInfo = CIFS_I(inode);
+		cifsInfo->cifsAttrs = attr;
+		cFYI(1, (" Old time %ld ", cifsInfo->time));
+		cifsInfo->time = jiffies;
+		cFYI(1, (" New time %ld ", cifsInfo->time));
+
+		/* blksize needs to be multiple of two. So safer to default to
+		blksize and blkbits set in superblock so 2**blkbits and blksize
+		will match rather than setting to:
+		(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
+
+		/* Linux can not store file creation time unfortunately so we ignore it */
+		inode->i_atime =
+		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+		inode->i_mtime =
+		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
+		inode->i_ctime =
+		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+		cFYI(0, (" Attributes came in as 0x%x ", attr));
+
+		/* set default mode. will override for dirs below */
+		if (atomic_read(&cifsInfo->inUse) == 0)
+			/* new inode, can safely set these fields */
+			inode->i_mode = cifs_sb->mnt_file_mode;
+
+/*		if (attr & ATTR_REPARSE)  */
+		/* We no longer handle these as symlinks because we could not
+		   follow them due to the absolute path with drive letter */
+		if (attr & ATTR_DIRECTORY) {
+		/* override default perms since we do not do byte range locking
+		   on dirs */
+			inode->i_mode = cifs_sb->mnt_dir_mode;
+			inode->i_mode |= S_IFDIR;
+		} else {
+			inode->i_mode |= S_IFREG;
+			/* treat the dos attribute of read-only as read-only
+			   mode e.g. 555 */
+			if (cifsInfo->cifsAttrs & ATTR_READONLY)
+				inode->i_mode &= ~(S_IWUGO);
+		/* BB add code here -
+		   validate if device or weird share or device type? */
+		}
+		if (is_size_safe_to_change(cifsInfo)) {
+			/* can not safely change the file size here if the
+			   client is writing to it due to potential races */
+			i_size_write(inode,le64_to_cpu(pfindData->EndOfFile));
+
+			/* 512 bytes (2**9) is the fake blocksize that must be
+			   used for this calculation */
+			inode->i_blocks = (512 - 1 + le64_to_cpu(
+					   pfindData->AllocationSize)) >> 9;
+		}
+
+		inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
+
+		/* BB fill in uid and gid here? with help from winbind? 
+		   or retrieve from NTFS stream extended attribute */
+		if (atomic_read(&cifsInfo->inUse) == 0) {
+			inode->i_uid = cifs_sb->mnt_uid;
+			inode->i_gid = cifs_sb->mnt_gid;
+			/* set so we do not keep refreshing these fields with
+			   bad data after user has changed them in memory */
+			atomic_set(&cifsInfo->inUse,1);
+		}
+
+		if (S_ISREG(inode->i_mode)) {
+			cFYI(1, (" File inode "));
+			inode->i_op = &cifs_file_inode_ops;
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+				inode->i_fop = &cifs_file_direct_ops;
+			else
+				inode->i_fop = &cifs_file_ops;
+			inode->i_data.a_ops = &cifs_addr_ops;
+		} else if (S_ISDIR(inode->i_mode)) {
+			cFYI(1, (" Directory inode "));
+			inode->i_op = &cifs_dir_inode_ops;
+			inode->i_fop = &cifs_dir_ops;
+		} else if (S_ISLNK(inode->i_mode)) {
+			cFYI(1, (" Symbolic Link inode "));
+			inode->i_op = &cifs_symlink_inode_ops;
+		} else {
+			init_special_inode(inode, inode->i_mode,
+					   inode->i_rdev);
+		}
+	}
+	kfree(buf);
+	return rc;
+}
+
+/* gets root inode */
+void cifs_read_inode(struct inode *inode)
+{
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	xid = GetXid();
+	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+		cifs_get_inode_info_unix(&inode, "", inode->i_sb,xid);
+	else
+		cifs_get_inode_info(&inode, "", NULL, inode->i_sb,xid);
+	/* can not call macro FreeXid here since in a void func */
+	_FreeXid(xid);
+}
+
+int cifs_unlink(struct inode *inode, struct dentry *direntry)
+{
+	int rc = 0;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	struct cifsInodeInfo *cifsInode;
+	FILE_BASIC_INFO *pinfo_buf;
+
+	cFYI(1, (" cifs_unlink, inode = 0x%p with ", inode));
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	/* Unlink can be called from rename so we can not grab the sem here
+	   since we deadlock otherwise */
+/*	down(&direntry->d_sb->s_vfs_rename_sem);*/
+	full_path = build_path_from_dentry(direntry);
+/*	up(&direntry->d_sb->s_vfs_rename_sem);*/
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls);
+
+	if (!rc) {
+		direntry->d_inode->i_nlink--;
+	} else if (rc == -ENOENT) {
+		d_drop(direntry);
+	} else if (rc == -ETXTBSY) {
+		int oplock = FALSE;
+		__u16 netfid;
+
+		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE,
+				 CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
+				 &netfid, &oplock, NULL, cifs_sb->local_nls);
+		if (rc==0) {
+			CIFSSMBRenameOpenFile(xid, pTcon, netfid, NULL,
+					      cifs_sb->local_nls);
+			CIFSSMBClose(xid, pTcon, netfid);
+			direntry->d_inode->i_nlink--;
+		}
+	} else if (rc == -EACCES) {
+		/* try only if r/o attribute set in local lookup data? */
+		pinfo_buf = kmalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
+		if (pinfo_buf) {
+			memset(pinfo_buf, 0, sizeof(FILE_BASIC_INFO));
+			/* ATTRS set to normal clears r/o bit */
+			pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
+			if (!(pTcon->ses->flags & CIFS_SES_NT4))
+				rc = CIFSSMBSetTimes(xid, pTcon, full_path,
+						     pinfo_buf,
+						     cifs_sb->local_nls);
+			else
+				rc = -EOPNOTSUPP;
+
+			if (rc == -EOPNOTSUPP) {
+				int oplock = FALSE;
+				__u16 netfid;
+			/*	rc = CIFSSMBSetAttrLegacy(xid, pTcon,
+							  full_path,
+							  (__u16)ATTR_NORMAL,
+							  cifs_sb->local_nls); 
+			   For some strange reason it seems that NT4 eats the
+			   old setattr call without actually setting the
+			   attributes so on to the third attempted workaround
+			   */
+
+			/* BB could scan to see if we already have it open
+			   and pass in pid of opener to function */
+				rc = CIFSSMBOpen(xid, pTcon, full_path,
+						 FILE_OPEN, SYNCHRONIZE |
+						 FILE_WRITE_ATTRIBUTES, 0,
+						 &netfid, &oplock, NULL,
+						 cifs_sb->local_nls);
+				if (rc==0) {
+					rc = CIFSSMBSetFileTimes(xid, pTcon,
+								 pinfo_buf,
+								 netfid);
+					CIFSSMBClose(xid, pTcon, netfid);
+				}
+			}
+			kfree(pinfo_buf);
+		}
+		if (rc==0) {
+			rc = CIFSSMBDelFile(xid, pTcon, full_path,
+					    cifs_sb->local_nls);
+			if (!rc) {
+				direntry->d_inode->i_nlink--;
+			} else if (rc == -ETXTBSY) {
+				int oplock = FALSE;
+				__u16 netfid;
+
+				rc = CIFSSMBOpen(xid, pTcon, full_path,
+						 FILE_OPEN, DELETE,
+						 CREATE_NOT_DIR |
+						 CREATE_DELETE_ON_CLOSE,
+						 &netfid, &oplock, NULL,
+						 cifs_sb->local_nls);
+				if (rc==0) {
+					CIFSSMBRenameOpenFile(xid, pTcon,
+						netfid, NULL,
+						cifs_sb->local_nls);
+					CIFSSMBClose(xid, pTcon, netfid);
+		                        direntry->d_inode->i_nlink--;
+				}
+			/* BB if rc = -ETXTBUSY goto the rename logic BB */
+			}
+		}
+	}
+	cifsInode = CIFS_I(direntry->d_inode);
+	cifsInode->time = 0;	/* will force revalidate to get info when
+				   needed */
+	direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
+		current_fs_time(inode->i_sb);
+	cifsInode = CIFS_I(inode);
+	cifsInode->time = 0;	/* force revalidate of dir as well */
+
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
+{
+	int rc = 0;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	struct inode *newinode = NULL;
+
+	cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p ", mode, inode));
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&inode->i_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&inode->i_sb->s_vfs_rename_sem);
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	/* BB add setting the equivalent of mode via CreateX w/ACLs */
+	rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls);
+	if (rc) {
+		cFYI(1, ("cifs_mkdir returned 0x%x ", rc));
+		d_drop(direntry);
+	} else {
+		inode->i_nlink++;
+		if (pTcon->ses->capabilities & CAP_UNIX)
+			rc = cifs_get_inode_info_unix(&newinode, full_path,
+						      inode->i_sb,xid);
+		else
+			rc = cifs_get_inode_info(&newinode, full_path, NULL,
+						 inode->i_sb,xid);
+
+		direntry->d_op = &cifs_dentry_ops;
+		d_instantiate(direntry, newinode);
+		if (direntry->d_inode)
+			direntry->d_inode->i_nlink = 2;
+		if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+				CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+						    mode,
+						    (__u64)current->euid,
+						    (__u64)current->egid,
+						    0 /* dev_t */,
+						    cifs_sb->local_nls);
+			} else {
+				CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+						    mode, (__u64)-1,
+						    (__u64)-1, 0 /* dev_t */,
+						    cifs_sb->local_nls);
+			}
+		else {
+			/* BB to be implemented via Windows secrty descriptors
+			   eg CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
+						 -1, -1, local_nls); */
+		}
+	}
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_rmdir(struct inode *inode, struct dentry *direntry)
+{
+	int rc = 0;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	struct cifsInodeInfo *cifsInode;
+
+	cFYI(1, (" cifs_rmdir, inode = 0x%p with ", inode));
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&inode->i_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&inode->i_sb->s_vfs_rename_sem);
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls);
+
+	if (!rc) {
+		inode->i_nlink--;
+		i_size_write(direntry->d_inode,0);
+		direntry->d_inode->i_nlink = 0;
+	}
+
+	cifsInode = CIFS_I(direntry->d_inode);
+	cifsInode->time = 0;	/* force revalidate to go get info when
+				   needed */
+	direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
+		current_fs_time(inode->i_sb);
+
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
+	struct inode *target_inode, struct dentry *target_direntry)
+{
+	char *fromName;
+	char *toName;
+	struct cifs_sb_info *cifs_sb_source;
+	struct cifs_sb_info *cifs_sb_target;
+	struct cifsTconInfo *pTcon;
+	int xid;
+	int rc = 0;
+
+	xid = GetXid();
+
+	cifs_sb_target = CIFS_SB(target_inode->i_sb);
+	cifs_sb_source = CIFS_SB(source_inode->i_sb);
+	pTcon = cifs_sb_source->tcon;
+
+	if (pTcon != cifs_sb_target->tcon) {
+		FreeXid(xid);
+		return -EXDEV;	/* BB actually could be allowed if same server,
+				   but different share.
+				   Might eventually add support for this */
+	}
+
+	/* we already  have the rename sem so we do not need to grab it again
+	   here to protect the path integrity */
+	fromName = build_path_from_dentry(source_direntry);
+	toName = build_path_from_dentry(target_direntry);
+	if ((fromName == NULL) || (toName == NULL)) {
+		rc = -ENOMEM;
+		goto cifs_rename_exit;
+	}
+
+	rc = CIFSSMBRename(xid, pTcon, fromName, toName,
+			   cifs_sb_source->local_nls);
+	if (rc == -EEXIST) {
+		/* check if they are the same file because rename of hardlinked
+		   files is a noop */
+		FILE_UNIX_BASIC_INFO *info_buf_source;
+		FILE_UNIX_BASIC_INFO *info_buf_target;
+
+		info_buf_source =
+			kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+		if (info_buf_source != NULL) {
+			info_buf_target = info_buf_source + 1;
+			rc = CIFSSMBUnixQPathInfo(xid, pTcon, fromName,
+				info_buf_source, cifs_sb_source->local_nls);
+			if (rc == 0) {
+				rc = CIFSSMBUnixQPathInfo(xid, pTcon, toName,
+						info_buf_target,
+						cifs_sb_target->local_nls);
+			}
+			if ((rc == 0) &&
+			    (info_buf_source->UniqueId ==
+			     info_buf_target->UniqueId)) {
+			/* do not rename since the files are hardlinked which
+			   is a noop */
+			} else {
+			/* we either can not tell the files are hardlinked
+			   (as with Windows servers) or files are not
+			   hardlinked so delete the target manually before
+			   renaming to follow POSIX rather than Windows
+			   semantics */
+				cifs_unlink(target_inode, target_direntry);
+				rc = CIFSSMBRename(xid, pTcon, fromName,
+						   toName,
+						   cifs_sb_source->local_nls);
+			}
+			kfree(info_buf_source);
+		} /* if we can not get memory just leave rc as EEXIST */
+	}
+
+	if (rc) {
+		cFYI(1, ("rename rc %d", rc));
+	}
+
+	if ((rc == -EIO) || (rc == -EEXIST)) {
+		int oplock = FALSE;
+		__u16 netfid;
+
+		/* BB FIXME Is Generic Read correct for rename? */
+		/* if renaming directory - we should not say CREATE_NOT_DIR,
+		   need to test renaming open directory, also GENERIC_READ
+		   might not right be right access to request */
+		rc = CIFSSMBOpen(xid, pTcon, fromName, FILE_OPEN, GENERIC_READ,
+				 CREATE_NOT_DIR, &netfid, &oplock, NULL,
+				 cifs_sb_source->local_nls);
+		if (rc==0) {
+			CIFSSMBRenameOpenFile(xid, pTcon, netfid, toName,
+					      cifs_sb_source->local_nls);
+			CIFSSMBClose(xid, pTcon, netfid);
+		}
+	}
+
+cifs_rename_exit:
+	kfree(fromName);
+	kfree(toName);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_revalidate(struct dentry *direntry)
+{
+	int xid;
+	int rc = 0;
+	char *full_path;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsInodeInfo *cifsInode;
+	loff_t local_size;
+	struct timespec local_mtime;
+	int invalidate_inode = FALSE;
+
+	if (direntry->d_inode == NULL)
+		return -ENOENT;
+
+	cifsInode = CIFS_I(direntry->d_inode);
+
+	if (cifsInode == NULL)
+		return -ENOENT;
+
+	/* no sense revalidating inode info on file that no one can write */
+	if (CIFS_I(direntry->d_inode)->clientCanCacheRead)
+		return rc;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(direntry->d_sb);
+
+	/* can not safely grab the rename sem here if rename calls revalidate
+	   since that would deadlock */
+	full_path = build_path_from_dentry(direntry);
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
+		 "jiffies %ld", full_path, direntry->d_inode,
+		 direntry->d_inode->i_count.counter, direntry,
+		 direntry->d_time, jiffies));
+
+	if (cifsInode->time == 0) {
+		/* was set to zero previously to force revalidate */
+	} else if (time_before(jiffies, cifsInode->time + HZ) &&
+		   lookupCacheEnabled) {
+		if ((S_ISREG(direntry->d_inode->i_mode) == 0) ||
+		    (direntry->d_inode->i_nlink == 1)) {
+			kfree(full_path);
+			FreeXid(xid);
+			return rc;
+		} else {
+			cFYI(1, ("Have to revalidate file due to hardlinks"));
+		}
+	}
+
+	/* save mtime and size */
+	local_mtime = direntry->d_inode->i_mtime;
+	local_size = direntry->d_inode->i_size;
+
+	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+		rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
+					      direntry->d_sb,xid);
+		if (rc) {
+			cFYI(1, ("error on getting revalidate info %d", rc));
+/*			if (rc != -ENOENT)
+				rc = 0; */	/* BB should we cache info on
+						   certain errors? */
+		}
+	} else {
+		rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
+					 direntry->d_sb,xid);
+		if (rc) {
+			cFYI(1, ("error on getting revalidate info %d", rc));
+/*			if (rc != -ENOENT)
+				rc = 0; */	/* BB should we cache info on
+						   certain errors? */
+		}
+	}
+	/* should we remap certain errors, access denied?, to zero */
+
+	/* if not oplocked, we invalidate inode pages if mtime or file size
+	   had changed on server */
+
+	if (timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) && 
+	    (local_size == direntry->d_inode->i_size)) {
+		cFYI(1, ("cifs_revalidate - inode unchanged"));
+	} else {
+		/* file may have changed on server */
+		if (cifsInode->clientCanCacheRead) {
+			/* no need to invalidate inode pages since we were the
+			   only ones who could have modified the file and the
+			   server copy is staler than ours */
+		} else {
+			invalidate_inode = TRUE;
+		}
+	}
+
+	/* can not grab this sem since kernel filesys locking documentation
+	   indicates i_sem may be taken by the kernel on lookup and rename
+	   which could deadlock if we grab the i_sem here as well */
+/*	down(&direntry->d_inode->i_sem);*/
+	/* need to write out dirty pages here  */
+	if (direntry->d_inode->i_mapping) {
+		/* do we need to lock inode until after invalidate completes
+		   below? */
+		filemap_fdatawrite(direntry->d_inode->i_mapping);
+	}
+	if (invalidate_inode) {
+		if (direntry->d_inode->i_mapping)
+			filemap_fdatawait(direntry->d_inode->i_mapping);
+		/* may eventually have to do this for open files too */
+		if (list_empty(&(cifsInode->openFileList))) {
+			/* Has changed on server - flush read ahead pages */
+			cFYI(1, ("Invalidating read ahead data on "
+				 "closed file"));
+			invalidate_remote_inode(direntry->d_inode);
+		}
+	}
+/*	up(&direntry->d_inode->i_sem); */
+	
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+	struct kstat *stat)
+{
+	int err = cifs_revalidate(dentry);
+	if (!err)
+		generic_fillattr(dentry->d_inode, stat);
+	return err;
+}
+
+static int cifs_truncate_page(struct address_space *mapping, loff_t from)
+{
+	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+	struct page *page;
+	char *kaddr;
+	int rc = 0;
+
+	page = grab_cache_page(mapping, index);
+	if (!page)
+		return -ENOMEM;
+
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+	unlock_page(page);
+	page_cache_release(page);
+	return rc;
+}
+
+int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+{
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	int rc = -EACCES;
+	int found = FALSE;
+	struct cifsFileInfo *open_file = NULL;
+	FILE_BASIC_INFO time_buf;
+	int set_time = FALSE;
+	__u64 mode = 0xFFFFFFFFFFFFFFFFULL;
+	__u64 uid = 0xFFFFFFFFFFFFFFFFULL;
+	__u64 gid = 0xFFFFFFFFFFFFFFFFULL;
+	struct cifsInodeInfo *cifsInode;
+	struct list_head *tmp;
+
+	xid = GetXid();
+
+	cFYI(1, (" In cifs_setattr, name = %s attrs->iavalid 0x%x ",
+		 direntry->d_name.name, attrs->ia_valid));
+	cifs_sb = CIFS_SB(direntry->d_inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&direntry->d_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&direntry->d_sb->s_vfs_rename_sem);
+	if (full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	cifsInode = CIFS_I(direntry->d_inode);
+
+	/* BB check if we need to refresh inode from server now ? BB */
+
+	/* need to flush data before changing file size on server */
+	filemap_fdatawrite(direntry->d_inode->i_mapping);
+	filemap_fdatawait(direntry->d_inode->i_mapping);
+
+	if (attrs->ia_valid & ATTR_SIZE) {
+		read_lock(&GlobalSMBSeslock);
+		/* To avoid spurious oplock breaks from server, in the case of
+		   inodes that we already have open, avoid doing path based
+		   setting of file size if we can do it by handle.
+		   This keeps our caching token (oplock) and avoids timeouts
+		   when the local oplock break takes longer to flush
+		   writebehind data than the SMB timeout for the SetPathInfo
+		   request would allow */
+		list_for_each(tmp, &cifsInode->openFileList) {
+			open_file = list_entry(tmp, struct cifsFileInfo,
+					       flist);
+			/* We check if file is open for writing first */
+			if ((open_file->pfile) &&
+			    ((open_file->pfile->f_flags & O_RDWR) ||
+			    (open_file->pfile->f_flags & O_WRONLY))) {
+				if (open_file->invalidHandle == FALSE) {
+					/* we found a valid, writeable network
+					   file handle to use to try to set the
+					   file size */
+					__u16 nfid = open_file->netfid;
+					__u32 npid = open_file->pid;
+					read_unlock(&GlobalSMBSeslock);
+					found = TRUE;
+					rc = CIFSSMBSetFileSize(xid, pTcon,
+						attrs->ia_size, nfid, npid,
+						FALSE);
+					cFYI(1, ("SetFileSize by handle "
+						 "(setattrs) rc = %d", rc));
+					/* Do not need reopen and retry on
+					   EAGAIN since we will retry by
+					   pathname below */
+
+					/* now that we found one valid file
+					   handle no sense continuing to loop
+					   trying others, so break here */
+					break;
+				}
+			}
+		}
+		if (found == FALSE)
+			read_unlock(&GlobalSMBSeslock);
+
+		if (rc != 0) {
+			/* Set file size by pathname rather than by handle
+			   either because no valid, writeable file handle for
+			   it was found or because there was an error setting
+			   it by handle */
+			rc = CIFSSMBSetEOF(xid, pTcon, full_path,
+					   attrs->ia_size, FALSE,
+					   cifs_sb->local_nls);
+			cFYI(1, (" SetEOF by path (setattrs) rc = %d", rc));
+		}
+
+		/* Server is ok setting allocation size implicitly - no need
+		   to call:
+		CIFSSMBSetEOF(xid, pTcon, full_path, attrs->ia_size, TRUE,
+			 cifs_sb->local_nls);
+		   */
+
+		if (rc == 0) {
+			rc = vmtruncate(direntry->d_inode, attrs->ia_size);
+			cifs_truncate_page(direntry->d_inode->i_mapping,
+					   direntry->d_inode->i_size);
+		}
+	}
+	if (attrs->ia_valid & ATTR_UID) {
+		cFYI(1, (" CIFS - UID changed to %d", attrs->ia_uid));
+		uid = attrs->ia_uid;
+		/* entry->uid = cpu_to_le16(attr->ia_uid); */
+	}
+	if (attrs->ia_valid & ATTR_GID) {
+		cFYI(1, (" CIFS - GID changed to %d", attrs->ia_gid));
+		gid = attrs->ia_gid;
+		/* entry->gid = cpu_to_le16(attr->ia_gid); */
+	}
+
+	time_buf.Attributes = 0;
+	if (attrs->ia_valid & ATTR_MODE) {
+		cFYI(1, (" CIFS - Mode changed to 0x%x", attrs->ia_mode));
+		mode = attrs->ia_mode;
+		/* entry->mode = cpu_to_le16(attr->ia_mode); */
+	}
+
+	if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+	    && (attrs->ia_valid & (ATTR_MODE | ATTR_GID | ATTR_UID)))
+		rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode, uid, gid,
+					 0 /* dev_t */, cifs_sb->local_nls);
+	else if (attrs->ia_valid & ATTR_MODE) {
+		if ((mode & S_IWUGO) == 0) /* not writeable */ {
+			if ((cifsInode->cifsAttrs & ATTR_READONLY) == 0)
+				time_buf.Attributes =
+					cpu_to_le32(cifsInode->cifsAttrs |
+						    ATTR_READONLY);
+		} else if ((mode & S_IWUGO) == S_IWUGO) {
+			if (cifsInode->cifsAttrs & ATTR_READONLY)
+				time_buf.Attributes =
+					cpu_to_le32(cifsInode->cifsAttrs &
+						    (~ATTR_READONLY));
+		}
+		/* BB to be implemented -
+		   via Windows security descriptors or streams */
+		/* CIFSSMBWinSetPerms(xid, pTcon, full_path, mode, uid, gid,
+				      cifs_sb->local_nls); */
+	}
+
+	if (attrs->ia_valid & ATTR_ATIME) {
+		set_time = TRUE;
+		time_buf.LastAccessTime =
+		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
+	} else
+		time_buf.LastAccessTime = 0;
+
+	if (attrs->ia_valid & ATTR_MTIME) {
+		set_time = TRUE;
+		time_buf.LastWriteTime =
+		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
+	} else
+		time_buf.LastWriteTime = 0;
+
+	if (attrs->ia_valid & ATTR_CTIME) {
+		set_time = TRUE;
+		cFYI(1, (" CIFS - CTIME changed ")); /* BB probably no need */
+		time_buf.ChangeTime =
+		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
+	} else
+		time_buf.ChangeTime = 0;
+
+	if (set_time || time_buf.Attributes) {
+		/* BB what if setting one attribute fails (such as size) but
+		   time setting works? */
+		time_buf.CreationTime = 0;	/* do not change */
+		/* In the future we should experiment - try setting timestamps
+		   via Handle (SetFileInfo) instead of by path */
+		if (!(pTcon->ses->flags & CIFS_SES_NT4))
+			rc = CIFSSMBSetTimes(xid, pTcon, full_path, &time_buf,
+					     cifs_sb->local_nls);
+		else
+			rc = -EOPNOTSUPP;
+
+		if (rc == -EOPNOTSUPP) {
+			int oplock = FALSE;
+			__u16 netfid;
+
+			cFYI(1, ("calling SetFileInfo since SetPathInfo for "
+				 "times not supported by this server"));
+			/* BB we could scan to see if we already have it open
+			   and pass in pid of opener to function */
+			rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
+					 SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
+					 CREATE_NOT_DIR, &netfid, &oplock,
+					 NULL, cifs_sb->local_nls);
+			if (rc==0) {
+				rc = CIFSSMBSetFileTimes(xid, pTcon, &time_buf,
+							 netfid);
+				CIFSSMBClose(xid, pTcon, netfid);
+			} else {
+			/* BB For even older servers we could convert time_buf
+			   into old DOS style which uses two second
+			   granularity */
+
+			/* rc = CIFSSMBSetTimesLegacy(xid, pTcon, full_path,
+        	        		&time_buf, cifs_sb->local_nls); */
+			}
+		}
+	}
+
+	/* do not need local check to inode_check_ok since the server does
+	   that */
+	if (!rc)
+		rc = inode_setattr(direntry->d_inode, attrs);
+	kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+void cifs_delete_inode(struct inode *inode)
+{
+	cFYI(1, ("In cifs_delete_inode, inode = 0x%p ", inode));
+	/* may have to add back in if and when safe distributed caching of
+	   directories added e.g. via FindNotify */
+}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
new file mode 100644
index 0000000..b4b8e20
--- /dev/null
+++ b/fs/cifs/ioctl.c
@@ -0,0 +1,49 @@
+/*
+ *   fs/cifs/ioctl.c
+ *
+ *   vfs operations that deal with io control
+ *
+ *   Copyright (C) International Business Machines  Corp., 2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/ext2_fs.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+
+int cifs_ioctl (struct inode * inode, struct file * filep, 
+		unsigned int command, unsigned long arg)
+{
+	int rc = -ENOTTY; /* strange error - but the precedent */
+#ifdef CONFIG_CIFS_POSIX
+	cFYI(1,("ioctl file %p  cmd %u  arg %lu",filep,command,arg));
+	switch(command) {
+		case EXT2_IOC_GETFLAGS:
+			cFYI(1,("get flags not implemented yet"));
+			return -EOPNOTSUPP;
+		case EXT2_IOC_SETFLAGS:
+			cFYI(1,("set flags not implemented yet"));
+			return -EOPNOTSUPP;
+		default:
+			cFYI(1,("unsupported ioctl"));
+			return rc;
+	}
+#endif /* CONFIG_CIFS_POSIX */
+	return rc;
+} 
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
new file mode 100644
index 0000000..1455810
--- /dev/null
+++ b/fs/cifs/link.c
@@ -0,0 +1,328 @@
+/*
+ *   fs/cifs/link.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/namei.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+
+int
+cifs_hardlink(struct dentry *old_file, struct inode *inode,
+	      struct dentry *direntry)
+{
+	int rc = -EACCES;
+	int xid;
+	char *fromName = NULL;
+	char *toName = NULL;
+	struct cifs_sb_info *cifs_sb_target;
+	struct cifsTconInfo *pTcon;
+	struct cifsInodeInfo *cifsInode;
+
+	xid = GetXid();
+
+	cifs_sb_target = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb_target->tcon;
+
+/* No need to check for cross device links since server will do that
+   BB note DFS case in future though (when we may have to check) */
+
+	down(&inode->i_sb->s_vfs_rename_sem);
+	fromName = build_path_from_dentry(old_file);
+	toName = build_path_from_dentry(direntry);
+	up(&inode->i_sb->s_vfs_rename_sem);
+	if((fromName == NULL) || (toName == NULL)) {
+		rc = -ENOMEM;
+		goto cifs_hl_exit;
+	}
+
+	if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)
+		rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName,
+					    cifs_sb_target->local_nls);
+	else {
+		rc = CIFSCreateHardLink(xid, pTcon, fromName, toName,
+					cifs_sb_target->local_nls);
+		if(rc == -EIO)
+			rc = -EOPNOTSUPP;  
+	}
+
+/* if (!rc)     */
+	{
+		/*   renew_parental_timestamps(old_file);
+		   inode->i_nlink++;
+		   mark_inode_dirty(inode);
+		   d_instantiate(direntry, inode); */
+		/* BB add call to either mark inode dirty or refresh its data and timestamp to current time */
+	}
+	d_drop(direntry);	/* force new lookup from server */
+	cifsInode = CIFS_I(old_file->d_inode);
+	cifsInode->time = 0;	/* will force revalidate to go get info when needed */
+
+cifs_hl_exit:
+	if (fromName)
+		kfree(fromName);
+	if (toName)
+		kfree(toName);
+	FreeXid(xid);
+	return rc;
+}
+
+int
+cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
+{
+	struct inode *inode = direntry->d_inode;
+	int rc = -EACCES;
+	int xid;
+	char *full_path = NULL;
+	char * target_path = ERR_PTR(-ENOMEM);
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+
+	xid = GetXid();
+
+	down(&direntry->d_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&direntry->d_sb->s_vfs_rename_sem);
+
+	if (!full_path)
+		goto out_no_free;
+
+	cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode));
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!target_path) {
+		target_path = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+	if (pTcon->ses->capabilities & CAP_UNIX)
+		rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
+					     target_path,
+					     PATH_MAX-1,
+					     cifs_sb->local_nls);
+	else {
+		/* rc = CIFSSMBQueryReparseLinkInfo */
+		/* BB Add code to Query ReparsePoint info */
+		/* BB Add MAC style xsymlink check here if enabled */
+	}
+
+	if (rc == 0) {
+
+/* BB Add special case check for Samba DFS symlinks */
+
+		target_path[PATH_MAX-1] = 0;
+	} else {
+		kfree(target_path);
+		target_path = ERR_PTR(rc);
+	}
+
+out:
+	kfree(full_path);
+out_no_free:
+	FreeXid(xid);
+	nd_set_link(nd, target_path);
+	return 0;
+}
+
+int
+cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
+{
+	int rc = -EOPNOTSUPP;
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	struct inode *newinode = NULL;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&inode->i_sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&inode->i_sb->s_vfs_rename_sem);
+
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	cFYI(1, ("Full path: %s ", full_path));
+	cFYI(1, ("symname is %s", symname));
+
+	/* BB what if DFS and this volume is on different share? BB */
+	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+		rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
+					   cifs_sb->local_nls);
+	/* else
+	   rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,cifs_sb_target->local_nls); */
+
+	if (rc == 0) {
+		if (pTcon->ses->capabilities & CAP_UNIX)
+			rc = cifs_get_inode_info_unix(&newinode, full_path,
+						      inode->i_sb,xid);
+		else
+			rc = cifs_get_inode_info(&newinode, full_path, NULL,
+						 inode->i_sb,xid);
+
+		if (rc != 0) {
+			cFYI(1,
+			     ("Create symlink worked but get_inode_info failed with rc = %d ",
+			      rc));
+		} else {
+			direntry->d_op = &cifs_dentry_ops;
+			d_instantiate(direntry, newinode);
+		}
+	}
+
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+	return rc;
+}
+
+int
+cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
+{
+	struct inode *inode = direntry->d_inode;
+	int rc = -EACCES;
+	int xid;
+	int oplock = FALSE;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	char *full_path = NULL;
+	char *tmp_path =  NULL;
+	char * tmpbuffer;
+	unsigned char * referrals = NULL;
+	int num_referrals = 0;
+	int len;
+	__u16 fid;
+
+	xid = GetXid();
+	cifs_sb = CIFS_SB(inode->i_sb);
+	pTcon = cifs_sb->tcon;
+
+/* BB would it be safe against deadlock to grab this sem 
+      even though rename itself grabs the sem and calls lookup? */
+/*       down(&inode->i_sb->s_vfs_rename_sem);*/
+	full_path = build_path_from_dentry(direntry);
+/*       up(&inode->i_sb->s_vfs_rename_sem);*/
+
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+	cFYI(1,
+	     ("Full path: %s inode = 0x%p pBuffer = 0x%p buflen = %d",
+	      full_path, inode, pBuffer, buflen));
+	if(buflen > PATH_MAX)
+		len = PATH_MAX;
+	else
+		len = buflen;
+	tmpbuffer = kmalloc(len,GFP_KERNEL);   
+	if(tmpbuffer == NULL) {
+		if (full_path)
+			kfree(full_path);
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+
+/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+		rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
+				tmpbuffer,
+				len - 1,
+				cifs_sb->local_nls);
+	else {
+		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, GENERIC_READ,
+				OPEN_REPARSE_POINT,&fid, &oplock, NULL, cifs_sb->local_nls);
+		if(!rc) {
+			rc = CIFSSMBQueryReparseLinkInfo(xid, pTcon, full_path,
+				tmpbuffer,
+				len - 1, 
+				fid,
+				cifs_sb->local_nls);
+			if(CIFSSMBClose(xid, pTcon, fid)) {
+				cFYI(1,("Error closing junction point (open for ioctl)"));
+			}
+			if(rc == -EIO) {
+				/* Query if DFS Junction */
+				tmp_path =
+					kmalloc(MAX_TREE_SIZE + MAX_PATHCONF + 1,
+						GFP_KERNEL);
+				if (tmp_path) {
+					strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
+					strncat(tmp_path, full_path, MAX_PATHCONF);
+					rc = get_dfs_path(xid, pTcon->ses, tmp_path,
+						cifs_sb->local_nls, &num_referrals, &referrals);
+					cFYI(1,("Get DFS for %s rc = %d ",tmp_path, rc));
+					if((num_referrals == 0) && (rc == 0))
+						rc = -EACCES;
+					else {
+						cFYI(1,("num referral: %d",num_referrals));
+						if(referrals) {
+							cFYI(1,("referral string: %s ",referrals));
+							strncpy(tmpbuffer, referrals, len-1);                            
+						}
+					}
+					if(referrals)
+						kfree(referrals);
+					kfree(tmp_path);
+}
+				/* BB add code like else decode referrals then memcpy to
+				  tmpbuffer and free referrals string array BB */
+			}
+		}
+	}
+	/* BB Anything else to do to handle recursive links? */
+	/* BB Should we be using page ops here? */
+
+	/* BB null terminate returned string in pBuffer? BB */
+	if (rc == 0) {
+		rc = vfs_readlink(direntry, pBuffer, len, tmpbuffer);
+		cFYI(1,
+		     ("vfs_readlink called from cifs_readlink returned %d",
+		      rc));
+	}
+
+	if (tmpbuffer) {
+		kfree(tmpbuffer);
+	}
+	if (full_path) {
+		kfree(full_path);
+	}
+	FreeXid(xid);
+	return rc;
+}
+
+void cifs_put_link(struct dentry *direntry, struct nameidata *nd)
+{
+	char *p = nd_get_link(nd);
+	if (!IS_ERR(p))
+		kfree(p);
+}
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
new file mode 100644
index 0000000..46d62c9
--- /dev/null
+++ b/fs/cifs/md4.c
@@ -0,0 +1,205 @@
+/* 
+   Unix SMB/Netbios implementation.
+   Version 1.9.
+   a implementation of MD4 designed for use in the SMB authentication protocol
+   Copyright (C) Andrew Tridgell 1997-1998.
+   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
+   
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+   
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+   
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+#include <linux/module.h>
+#include <linux/fs.h>
+#include "cifsencrypt.h"
+
+/* NOTE: This code makes no attempt to be fast! */
+
+static __u32
+F(__u32 X, __u32 Y, __u32 Z)
+{
+	return (X & Y) | ((~X) & Z);
+}
+
+static __u32
+G(__u32 X, __u32 Y, __u32 Z)
+{
+	return (X & Y) | (X & Z) | (Y & Z);
+}
+
+static __u32
+H(__u32 X, __u32 Y, __u32 Z)
+{
+	return X ^ Y ^ Z;
+}
+
+static __u32
+lshift(__u32 x, int s)
+{
+	x &= 0xFFFFFFFF;
+	return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
+}
+
+#define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s)
+#define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s)
+#define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s)
+
+/* this applies md4 to 64 byte chunks */
+static void
+mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D)
+{
+	int j;
+	__u32 AA, BB, CC, DD;
+	__u32 X[16];
+
+
+	for (j = 0; j < 16; j++)
+		X[j] = M[j];
+
+	AA = *A;
+	BB = *B;
+	CC = *C;
+	DD = *D;
+
+	ROUND1(A, B, C, D, 0, 3);
+	ROUND1(D, A, B, C, 1, 7);
+	ROUND1(C, D, A, B, 2, 11);
+	ROUND1(B, C, D, A, 3, 19);
+	ROUND1(A, B, C, D, 4, 3);
+	ROUND1(D, A, B, C, 5, 7);
+	ROUND1(C, D, A, B, 6, 11);
+	ROUND1(B, C, D, A, 7, 19);
+	ROUND1(A, B, C, D, 8, 3);
+	ROUND1(D, A, B, C, 9, 7);
+	ROUND1(C, D, A, B, 10, 11);
+	ROUND1(B, C, D, A, 11, 19);
+	ROUND1(A, B, C, D, 12, 3);
+	ROUND1(D, A, B, C, 13, 7);
+	ROUND1(C, D, A, B, 14, 11);
+	ROUND1(B, C, D, A, 15, 19);
+
+	ROUND2(A, B, C, D, 0, 3);
+	ROUND2(D, A, B, C, 4, 5);
+	ROUND2(C, D, A, B, 8, 9);
+	ROUND2(B, C, D, A, 12, 13);
+	ROUND2(A, B, C, D, 1, 3);
+	ROUND2(D, A, B, C, 5, 5);
+	ROUND2(C, D, A, B, 9, 9);
+	ROUND2(B, C, D, A, 13, 13);
+	ROUND2(A, B, C, D, 2, 3);
+	ROUND2(D, A, B, C, 6, 5);
+	ROUND2(C, D, A, B, 10, 9);
+	ROUND2(B, C, D, A, 14, 13);
+	ROUND2(A, B, C, D, 3, 3);
+	ROUND2(D, A, B, C, 7, 5);
+	ROUND2(C, D, A, B, 11, 9);
+	ROUND2(B, C, D, A, 15, 13);
+
+	ROUND3(A, B, C, D, 0, 3);
+	ROUND3(D, A, B, C, 8, 9);
+	ROUND3(C, D, A, B, 4, 11);
+	ROUND3(B, C, D, A, 12, 15);
+	ROUND3(A, B, C, D, 2, 3);
+	ROUND3(D, A, B, C, 10, 9);
+	ROUND3(C, D, A, B, 6, 11);
+	ROUND3(B, C, D, A, 14, 15);
+	ROUND3(A, B, C, D, 1, 3);
+	ROUND3(D, A, B, C, 9, 9);
+	ROUND3(C, D, A, B, 5, 11);
+	ROUND3(B, C, D, A, 13, 15);
+	ROUND3(A, B, C, D, 3, 3);
+	ROUND3(D, A, B, C, 11, 9);
+	ROUND3(C, D, A, B, 7, 11);
+	ROUND3(B, C, D, A, 15, 15);
+
+	*A += AA;
+	*B += BB;
+	*C += CC;
+	*D += DD;
+
+	*A &= 0xFFFFFFFF;
+	*B &= 0xFFFFFFFF;
+	*C &= 0xFFFFFFFF;
+	*D &= 0xFFFFFFFF;
+
+	for (j = 0; j < 16; j++)
+		X[j] = 0;
+}
+
+static void
+copy64(__u32 * M, unsigned char *in)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) |
+		    (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0);
+}
+
+static void
+copy4(unsigned char *out, __u32 x)
+{
+	out[0] = x & 0xFF;
+	out[1] = (x >> 8) & 0xFF;
+	out[2] = (x >> 16) & 0xFF;
+	out[3] = (x >> 24) & 0xFF;
+}
+
+/* produce a md4 message digest from data of length n bytes */
+void
+mdfour(unsigned char *out, unsigned char *in, int n)
+{
+	unsigned char buf[128];
+	__u32 M[16];
+	__u32 b = n * 8;
+	int i;
+	__u32 A = 0x67452301;
+	__u32 B = 0xefcdab89;
+	__u32 C = 0x98badcfe;
+	__u32 D = 0x10325476;
+
+	while (n > 64) {
+		copy64(M, in);
+		mdfour64(M,&A,&B, &C, &D);
+		in += 64;
+		n -= 64;
+	}
+
+	for (i = 0; i < 128; i++)
+		buf[i] = 0;
+	memcpy(buf, in, n);
+	buf[n] = 0x80;
+
+	if (n <= 55) {
+		copy4(buf + 56, b);
+		copy64(M, buf);
+		mdfour64(M, &A, &B, &C, &D);
+	} else {
+		copy4(buf + 120, b);
+		copy64(M, buf);
+		mdfour64(M, &A, &B, &C, &D);
+		copy64(M, buf + 64);
+		mdfour64(M, &A, &B, &C, &D);
+	}
+
+	for (i = 0; i < 128; i++)
+		buf[i] = 0;
+	copy64(M, buf);
+
+	copy4(out, A);
+	copy4(out + 4, B);
+	copy4(out + 8, C);
+	copy4(out + 12, D);
+
+	A = B = C = D = 0;
+}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
new file mode 100644
index 0000000..7aa2349
--- /dev/null
+++ b/fs/cifs/md5.c
@@ -0,0 +1,363 @@
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest.  This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+
+/* This code slightly modified to fit into Samba by 
+   abartlet@samba.org Jun 2001 
+   and to fit the cifs vfs by 
+   Steve French sfrench@us.ibm.com */
+
+#include <linux/string.h>
+#include "md5.h"
+
+static void MD5Transform(__u32 buf[4], __u32 const in[16]);
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+static void
+byteReverse(unsigned char *buf, unsigned longs)
+{
+	__u32 t;
+	do {
+		t = (__u32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
+		    ((unsigned) buf[1] << 8 | buf[0]);
+		*(__u32 *) buf = t;
+		buf += 4;
+	} while (--longs);
+}
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void
+MD5Init(struct MD5Context *ctx)
+{
+	ctx->buf[0] = 0x67452301;
+	ctx->buf[1] = 0xefcdab89;
+	ctx->buf[2] = 0x98badcfe;
+	ctx->buf[3] = 0x10325476;
+
+	ctx->bits[0] = 0;
+	ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void
+MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
+{
+	register __u32 t;
+
+	/* Update bitcount */
+
+	t = ctx->bits[0];
+	if ((ctx->bits[0] = t + ((__u32) len << 3)) < t)
+		ctx->bits[1]++;	/* Carry from low to high */
+	ctx->bits[1] += len >> 29;
+
+	t = (t >> 3) & 0x3f;	/* Bytes already in shsInfo->data */
+
+	/* Handle any leading odd-sized chunks */
+
+	if (t) {
+		unsigned char *p = (unsigned char *) ctx->in + t;
+
+		t = 64 - t;
+		if (len < t) {
+			memmove(p, buf, len);
+			return;
+		}
+		memmove(p, buf, t);
+		byteReverse(ctx->in, 16);
+		MD5Transform(ctx->buf, (__u32 *) ctx->in);
+		buf += t;
+		len -= t;
+	}
+	/* Process data in 64-byte chunks */
+
+	while (len >= 64) {
+		memmove(ctx->in, buf, 64);
+		byteReverse(ctx->in, 16);
+		MD5Transform(ctx->buf, (__u32 *) ctx->in);
+		buf += 64;
+		len -= 64;
+	}
+
+	/* Handle any remaining bytes of data. */
+
+	memmove(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern 
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void
+MD5Final(unsigned char digest[16], struct MD5Context *ctx)
+{
+	unsigned int count;
+	unsigned char *p;
+
+	/* Compute number of bytes mod 64 */
+	count = (ctx->bits[0] >> 3) & 0x3F;
+
+	/* Set the first char of padding to 0x80.  This is safe since there is
+	   always at least one byte free */
+	p = ctx->in + count;
+	*p++ = 0x80;
+
+	/* Bytes of padding needed to make 64 bytes */
+	count = 64 - 1 - count;
+
+	/* Pad out to 56 mod 64 */
+	if (count < 8) {
+		/* Two lots of padding:  Pad the first block to 64 bytes */
+		memset(p, 0, count);
+		byteReverse(ctx->in, 16);
+		MD5Transform(ctx->buf, (__u32 *) ctx->in);
+
+		/* Now fill the next block with 56 bytes */
+		memset(ctx->in, 0, 56);
+	} else {
+		/* Pad block to 56 bytes */
+		memset(p, 0, count - 8);
+	}
+	byteReverse(ctx->in, 14);
+
+	/* Append length in bits and transform */
+	((__u32 *) ctx->in)[14] = ctx->bits[0];
+	((__u32 *) ctx->in)[15] = ctx->bits[1];
+
+	MD5Transform(ctx->buf, (__u32 *) ctx->in);
+	byteReverse((unsigned char *) ctx->buf, 4);
+	memmove(digest, ctx->buf, 16);
+	memset(ctx, 0, sizeof(*ctx));	/* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+	( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void
+MD5Transform(__u32 buf[4], __u32 const in[16])
+{
+	register __u32 a, b, c, d;
+
+	a = buf[0];
+	b = buf[1];
+	c = buf[2];
+	d = buf[3];
+
+	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+	buf[0] += a;
+	buf[1] += b;
+	buf[2] += c;
+	buf[3] += d;
+}
+
+/***********************************************************************
+ the rfc 2104 version of hmac_md5 initialisation.
+***********************************************************************/
+void
+hmac_md5_init_rfc2104(unsigned char *key, int key_len,
+		      struct HMACMD5Context *ctx)
+{
+	int i;
+
+	/* if key is longer than 64 bytes reset it to key=MD5(key) */
+	if (key_len > 64) {
+		unsigned char tk[16];
+		struct MD5Context tctx;
+
+		MD5Init(&tctx);
+		MD5Update(&tctx, key, key_len);
+		MD5Final(tk, &tctx);
+
+		key = tk;
+		key_len = 16;
+	}
+
+	/* start out by storing key in pads */
+	memset(ctx->k_ipad, 0, sizeof (ctx->k_ipad));
+	memset(ctx->k_opad, 0, sizeof (ctx->k_opad));
+	memcpy(ctx->k_ipad, key, key_len);
+	memcpy(ctx->k_opad, key, key_len);
+
+	/* XOR key with ipad and opad values */
+	for (i = 0; i < 64; i++) {
+		ctx->k_ipad[i] ^= 0x36;
+		ctx->k_opad[i] ^= 0x5c;
+	}
+
+	MD5Init(&ctx->ctx);
+	MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+}
+
+/***********************************************************************
+ the microsoft version of hmac_md5 initialisation.
+***********************************************************************/
+void
+hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
+			 struct HMACMD5Context *ctx)
+{
+	int i;
+
+	/* if key is longer than 64 bytes truncate it */
+	if (key_len > 64) {
+		key_len = 64;
+	}
+
+	/* start out by storing key in pads */
+	memset(ctx->k_ipad, 0, sizeof (ctx->k_ipad));
+	memset(ctx->k_opad, 0, sizeof (ctx->k_opad));
+	memcpy(ctx->k_ipad, key, key_len);
+	memcpy(ctx->k_opad, key, key_len);
+
+	/* XOR key with ipad and opad values */
+	for (i = 0; i < 64; i++) {
+		ctx->k_ipad[i] ^= 0x36;
+		ctx->k_opad[i] ^= 0x5c;
+	}
+
+	MD5Init(&ctx->ctx);
+	MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+}
+
+/***********************************************************************
+ update hmac_md5 "inner" buffer
+***********************************************************************/
+void
+hmac_md5_update(const unsigned char *text, int text_len,
+		struct HMACMD5Context *ctx)
+{
+	MD5Update(&ctx->ctx, text, text_len);	/* then text of datagram */
+}
+
+/***********************************************************************
+ finish off hmac_md5 "inner" buffer and generate outer one.
+***********************************************************************/
+void
+hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
+{
+	struct MD5Context ctx_o;
+
+	MD5Final(digest, &ctx->ctx);
+
+	MD5Init(&ctx_o);
+	MD5Update(&ctx_o, ctx->k_opad, 64);
+	MD5Update(&ctx_o, digest, 16);
+	MD5Final(digest, &ctx_o);
+}
+
+/***********************************************************
+ single function to calculate an HMAC MD5 digest from data.
+ use the microsoft hmacmd5 init method because the key is 16 bytes.
+************************************************************/
+void
+hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
+	 unsigned char *digest)
+{
+	struct HMACMD5Context ctx;
+	hmac_md5_init_limK_to_64(key, 16, &ctx);
+	if (data_len != 0) {
+		hmac_md5_update(data, data_len, &ctx);
+	}
+	hmac_md5_final(digest, &ctx);
+}
diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h
new file mode 100644
index 0000000..00e1c53
--- /dev/null
+++ b/fs/cifs/md5.h
@@ -0,0 +1,38 @@
+#ifndef MD5_H
+#define MD5_H
+#ifndef HEADER_MD5_H
+/* Try to avoid clashes with OpenSSL */
+#define HEADER_MD5_H
+#endif
+
+struct MD5Context {
+	__u32 buf[4];
+	__u32 bits[2];
+	unsigned char in[64];
+};
+#endif				/* !MD5_H */
+
+#ifndef _HMAC_MD5_H
+struct HMACMD5Context {
+	struct MD5Context ctx;
+	unsigned char k_ipad[65];
+	unsigned char k_opad[65];
+};
+#endif				/* _HMAC_MD5_H */
+
+void MD5Init(struct MD5Context *context);
+void MD5Update(struct MD5Context *context, unsigned char const *buf,
+			unsigned len);
+void MD5Final(unsigned char digest[16], struct MD5Context *context);
+
+/* The following definitions come from lib/hmacmd5.c  */
+
+void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
+			struct HMACMD5Context *ctx);
+void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
+			struct HMACMD5Context *ctx);
+void hmac_md5_update(const unsigned char *text, int text_len,
+			struct HMACMD5Context *ctx);
+void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
+void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
+			unsigned char *digest);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
new file mode 100644
index 0000000..7b38d30
--- /dev/null
+++ b/fs/cifs/misc.c
@@ -0,0 +1,516 @@
+/*
+ *   fs/cifs/misc.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/mempool.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "smberr.h"
+#include "nterr.h"
+
+extern mempool_t *cifs_sm_req_poolp;
+extern mempool_t *cifs_req_poolp;
+extern struct task_struct * oplockThread;
+
+static __u16 GlobalMid;		/* multiplex id - rotating counter */
+
+/* The xid serves as a useful identifier for each incoming vfs request, 
+   in a similar way to the mid which is useful to track each sent smb, 
+   and CurrentXid can also provide a running counter (although it 
+   will eventually wrap past zero) of the total vfs operations handled 
+   since the cifs fs was mounted */
+
+unsigned int
+_GetXid(void)
+{
+	unsigned int xid;
+
+	spin_lock(&GlobalMid_Lock);
+	GlobalTotalActiveXid++;
+	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
+		GlobalMaxActiveXid = GlobalTotalActiveXid;	/* keep high water mark for number of simultaneous vfs ops in our filesystem */
+	xid = GlobalCurrentXid++;
+	spin_unlock(&GlobalMid_Lock);
+	return xid;
+}
+
+void
+_FreeXid(unsigned int xid)
+{
+	spin_lock(&GlobalMid_Lock);
+	/* if(GlobalTotalActiveXid == 0)
+		BUG(); */
+	GlobalTotalActiveXid--;
+	spin_unlock(&GlobalMid_Lock);
+}
+
+struct cifsSesInfo *
+sesInfoAlloc(void)
+{
+	struct cifsSesInfo *ret_buf;
+
+	ret_buf =
+	    (struct cifsSesInfo *) kmalloc(sizeof (struct cifsSesInfo),
+					   GFP_KERNEL);
+	if (ret_buf) {
+		memset(ret_buf, 0, sizeof (struct cifsSesInfo));
+		write_lock(&GlobalSMBSeslock);
+		atomic_inc(&sesInfoAllocCount);
+		ret_buf->status = CifsNew;
+		list_add(&ret_buf->cifsSessionList, &GlobalSMBSessionList);
+		init_MUTEX(&ret_buf->sesSem);
+		write_unlock(&GlobalSMBSeslock);
+	}
+	return ret_buf;
+}
+
+void
+sesInfoFree(struct cifsSesInfo *buf_to_free)
+{
+	if (buf_to_free == NULL) {
+		cFYI(1, ("Null buffer passed to sesInfoFree"));
+		return;
+	}
+
+	write_lock(&GlobalSMBSeslock);
+	atomic_dec(&sesInfoAllocCount);
+	list_del(&buf_to_free->cifsSessionList);
+	write_unlock(&GlobalSMBSeslock);
+	if (buf_to_free->serverOS)
+		kfree(buf_to_free->serverOS);
+	if (buf_to_free->serverDomain)
+		kfree(buf_to_free->serverDomain);
+	if (buf_to_free->serverNOS)
+		kfree(buf_to_free->serverNOS);
+	if (buf_to_free->password)
+		kfree(buf_to_free->password);
+	kfree(buf_to_free);
+}
+
+struct cifsTconInfo *
+tconInfoAlloc(void)
+{
+	struct cifsTconInfo *ret_buf;
+	ret_buf =
+	    (struct cifsTconInfo *) kmalloc(sizeof (struct cifsTconInfo),
+					    GFP_KERNEL);
+	if (ret_buf) {
+		memset(ret_buf, 0, sizeof (struct cifsTconInfo));
+		write_lock(&GlobalSMBSeslock);
+		atomic_inc(&tconInfoAllocCount);
+		list_add(&ret_buf->cifsConnectionList,
+			 &GlobalTreeConnectionList);
+		ret_buf->tidStatus = CifsNew;
+		INIT_LIST_HEAD(&ret_buf->openFileList);
+		init_MUTEX(&ret_buf->tconSem);
+#ifdef CONFIG_CIFS_STATS
+		spin_lock_init(&ret_buf->stat_lock);
+#endif
+		write_unlock(&GlobalSMBSeslock);
+	}
+	return ret_buf;
+}
+
+void
+tconInfoFree(struct cifsTconInfo *buf_to_free)
+{
+	if (buf_to_free == NULL) {
+		cFYI(1, ("Null buffer passed to tconInfoFree"));
+		return;
+	}
+	write_lock(&GlobalSMBSeslock);
+	atomic_dec(&tconInfoAllocCount);
+	list_del(&buf_to_free->cifsConnectionList);
+	write_unlock(&GlobalSMBSeslock);
+	if (buf_to_free->nativeFileSystem)
+		kfree(buf_to_free->nativeFileSystem);
+	kfree(buf_to_free);
+}
+
+struct smb_hdr *
+cifs_buf_get(void)
+{
+	struct smb_hdr *ret_buf = NULL;
+
+/* We could use negotiated size instead of max_msgsize - 
+   but it may be more efficient to always alloc same size 
+   albeit slightly larger than necessary and maxbuffersize 
+   defaults to this and can not be bigger */
+	ret_buf =
+	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+
+	/* clear the first few header bytes */
+	/* for most paths, more is cleared in header_assemble */
+	if (ret_buf) {
+		memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
+		atomic_inc(&bufAllocCount);
+	}
+
+	return ret_buf;
+}
+
+void
+cifs_buf_release(void *buf_to_free)
+{
+
+	if (buf_to_free == NULL) {
+		/* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/
+		return;
+	}
+	mempool_free(buf_to_free,cifs_req_poolp);
+
+	atomic_dec(&bufAllocCount);
+	return;
+}
+
+struct smb_hdr *
+cifs_small_buf_get(void)
+{
+	struct smb_hdr *ret_buf = NULL;
+
+/* We could use negotiated size instead of max_msgsize - 
+   but it may be more efficient to always alloc same size 
+   albeit slightly larger than necessary and maxbuffersize 
+   defaults to this and can not be bigger */
+	ret_buf =
+	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+	if (ret_buf) {
+	/* No need to clear memory here, cleared in header assemble */
+	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+		atomic_inc(&smBufAllocCount);
+	}
+	return ret_buf;
+}
+
+void
+cifs_small_buf_release(void *buf_to_free)
+{
+
+	if (buf_to_free == NULL) {
+		cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
+		return;
+	}
+	mempool_free(buf_to_free,cifs_sm_req_poolp);
+
+	atomic_dec(&smBufAllocCount);
+	return;
+}
+
+void
+header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
+		const struct cifsTconInfo *treeCon, int word_count
+		/* length of fixed section (word count) in two byte units  */)
+{
+	struct list_head* temp_item;
+	struct cifsSesInfo * ses;
+	char *temp = (char *) buffer;
+
+	memset(temp,0,MAX_CIFS_HDR_SIZE);
+
+	buffer->smb_buf_length =
+	    (2 * word_count) + sizeof (struct smb_hdr) -
+	    4 /*  RFC 1001 length field does not count */  +
+	    2 /* for bcc field itself */ ;
+	/* Note that this is the only network field that has to be converted to big endian and it is done just before we send it */
+
+	buffer->Protocol[0] = 0xFF;
+	buffer->Protocol[1] = 'S';
+	buffer->Protocol[2] = 'M';
+	buffer->Protocol[3] = 'B';
+	buffer->Command = smb_command;
+	buffer->Flags = 0x00;	/* case sensitive */
+	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
+	buffer->Pid = cpu_to_le16((__u16)current->tgid);
+	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
+	spin_lock(&GlobalMid_Lock);
+	GlobalMid++;
+	buffer->Mid = GlobalMid;
+	spin_unlock(&GlobalMid_Lock);
+	if (treeCon) {
+		buffer->Tid = treeCon->tid;
+		if (treeCon->ses) {
+			if (treeCon->ses->capabilities & CAP_UNICODE)
+				buffer->Flags2 |= SMBFLG2_UNICODE;
+			if (treeCon->ses->capabilities & CAP_STATUS32) {
+				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+			}
+
+			buffer->Uid = treeCon->ses->Suid;	/* always in LE format */
+			if(multiuser_mount != 0) {
+		/* For the multiuser case, there are few obvious technically  */
+		/* possible mechanisms to match the local linux user (uid)    */
+		/* to a valid remote smb user (smb_uid):		      */
+		/* 	1) Query Winbind (or other local pam/nss daemon       */
+		/* 	  for userid/password/logon_domain or credential      */
+		/*      2) Query Winbind for uid to sid to username mapping   */
+		/* 	   and see if we have a matching password for existing*/
+		/*         session for that user perhas getting password by   */
+		/*         adding a new pam_cifs module that stores passwords */
+		/*         so that the cifs vfs can get at that for all logged*/
+		/*	   on users					      */
+		/*	3) (Which is the mechanism we have chosen)	      */
+		/*	   Search through sessions to the same server for a   */
+		/*	   a match on the uid that was passed in on mount     */
+		/*         with the current processes uid (or euid?) and use  */
+		/* 	   that smb uid.   If no existing smb session for     */
+		/* 	   that uid found, use the default smb session ie     */
+		/*         the smb session for the volume mounted which is    */
+		/* 	   the same as would be used if the multiuser mount   */
+		/* 	   flag were disabled.  */
+
+		/*  BB Add support for establishing new tCon and SMB Session  */
+		/*      with userid/password pairs found on the smb session   */ 
+		/*	for other target tcp/ip addresses 		BB    */
+				if(current->uid != treeCon->ses->linux_uid) {
+					cFYI(1,("Multiuser mode and UID did not match tcon uid "));
+					read_lock(&GlobalSMBSeslock);
+					list_for_each(temp_item, &GlobalSMBSessionList) {
+						ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList);
+						if(ses->linux_uid == current->uid) {
+							if(ses->server == treeCon->ses->server) {
+								cFYI(1,("found matching uid substitute right smb_uid"));  
+								buffer->Uid = ses->Suid;
+								break;
+							} else {
+								/* BB eventually call cifs_setup_session here */
+								cFYI(1,("local UID found but smb sess with this server does not exist"));  
+							}
+						}
+					}
+					read_unlock(&GlobalSMBSeslock);
+				}
+			}
+		}
+		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
+			buffer->Flags2 |= SMBFLG2_DFS;
+		if((treeCon->ses) && (treeCon->ses->server))
+			if(treeCon->ses->server->secMode & 
+			  (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+	}
+
+/*  endian conversion of flags is now done just before sending */
+	buffer->WordCount = (char) word_count;
+	return;
+}
+
+int
+checkSMBhdr(struct smb_hdr *smb, __u16 mid)
+{
+	/* Make sure that this really is an SMB, that it is a response, 
+	   and that the message ids match */
+	if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) && 
+		(mid == smb->Mid)) {    
+		if(smb->Flags & SMBFLG_RESPONSE)
+			return 0;                    
+		else {        
+		/* only one valid case where server sends us request */
+			if(smb->Command == SMB_COM_LOCKING_ANDX)
+				return 0;
+			else
+				cERROR(1, ("Rcvd Request not response "));         
+		}
+	} else { /* bad signature or mid */
+		if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
+			cERROR(1,
+			       ("Bad protocol string signature header %x ",
+				*(unsigned int *) smb->Protocol));
+		if (mid != smb->Mid)
+			cERROR(1, ("Mids do not match"));
+	}
+	cERROR(1, ("bad smb detected. The Mid=%d", smb->Mid));
+	return 1;
+}
+
+int
+checkSMB(struct smb_hdr *smb, __u16 mid, int length)
+{
+	__u32 len = be32_to_cpu(smb->smb_buf_length);
+	cFYI(0,
+	     ("Entering checkSMB with Length: %x, smb_buf_length: %x ",
+	      length, len));
+	if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
+	    (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
+		if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
+			if (((unsigned int)length >= 
+				sizeof (struct smb_hdr) - 1)
+			    && (smb->Status.CifsError != 0)) {
+				smb->WordCount = 0;
+				return 0;	/* some error cases do not return wct and bcc */
+			} else {
+				cERROR(1, ("Length less than smb header size"));
+			}
+
+		}
+		if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
+			cERROR(1,
+			       ("smb_buf_length greater than MaxBufSize"));
+		cERROR(1,
+		       ("bad smb detected. Illegal length. The mid=%d",
+			smb->Mid));
+		return 1;
+	}
+
+	if (checkSMBhdr(smb, mid))
+		return 1;
+
+	if ((4 + len != smbCalcSize(smb))
+	    || (4 + len != (unsigned int)length)) {
+		return 0;
+	} else {
+		cERROR(1, ("smbCalcSize %x ", smbCalcSize(smb)));
+		cERROR(1,
+		       ("bad smb size detected. The Mid=%d", smb->Mid));
+		return 1;
+	}
+}
+int
+is_valid_oplock_break(struct smb_hdr *buf)
+{    
+	struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)buf;
+	struct list_head *tmp;
+	struct list_head *tmp1;
+	struct cifsTconInfo *tcon;
+	struct cifsFileInfo *netfile;
+
+	cFYI(1,("Checking for oplock break or dnotify response"));
+	if((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
+	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
+		struct smb_com_transaction_change_notify_rsp * pSMBr =
+			(struct smb_com_transaction_change_notify_rsp *)buf;
+		struct file_notify_information * pnotify;
+		__u32 data_offset = 0;
+		if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+			data_offset = le32_to_cpu(pSMBr->DataOffset);
+
+			pnotify = (struct file_notify_information *)((char *)&pSMBr->hdr.Protocol
+				+ data_offset);
+			cFYI(1,("dnotify on %s with action: 0x%x",pnotify->FileName,
+				pnotify->Action));  /* BB removeme BB */
+	             /*   cifs_dump_mem("Received notify Data is: ",buf,sizeof(struct smb_hdr)+60); */
+			return TRUE;
+		}
+		if(pSMBr->hdr.Status.CifsError) {
+			cFYI(1,("notify err 0x%d",pSMBr->hdr.Status.CifsError));
+			return TRUE;
+		}
+		return FALSE;
+	}  
+	if(pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
+		return FALSE;
+	if(pSMB->hdr.Flags & SMBFLG_RESPONSE) {
+		/* no sense logging error on invalid handle on oplock
+		   break - harmless race between close request and oplock
+		   break response is expected from time to time writing out
+		   large dirty files cached on the client */
+		if ((NT_STATUS_INVALID_HANDLE) == 
+		   le32_to_cpu(pSMB->hdr.Status.CifsError)) { 
+			cFYI(1,("invalid handle on oplock break"));
+			return TRUE;
+		} else if (ERRbadfid == 
+		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
+			return TRUE;	  
+		} else {
+			return FALSE; /* on valid oplock brk we get "request" */
+		}
+	}
+	if(pSMB->hdr.WordCount != 8)
+		return FALSE;
+
+	cFYI(1,(" oplock type 0x%d level 0x%d",pSMB->LockType,pSMB->OplockLevel));
+	if(!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
+		return FALSE;    
+
+	/* look up tcon based on tid & uid */
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &GlobalTreeConnectionList) {
+		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
+		if (tcon->tid == buf->Tid) {
+#ifdef CONFIG_CIFS_STATS
+			atomic_inc(&tcon->num_oplock_brks);
+#endif
+			list_for_each(tmp1,&tcon->openFileList){
+				netfile = list_entry(tmp1,struct cifsFileInfo,tlist);
+				if(pSMB->Fid == netfile->netfid) {
+					struct cifsInodeInfo *pCifsInode;
+					read_unlock(&GlobalSMBSeslock);
+					cFYI(1,("Matching file id, processing oplock break"));
+					pCifsInode = 
+						CIFS_I(netfile->pInode);
+					pCifsInode->clientCanCacheAll = FALSE;
+					if(pSMB->OplockLevel == 0)
+						pCifsInode->clientCanCacheRead = FALSE;
+					pCifsInode->oplockPending = TRUE;
+					AllocOplockQEntry(netfile->pInode, netfile->netfid, tcon);
+					cFYI(1,("about to wake up oplock thd"));
+					wake_up_process(oplockThread);               
+					return TRUE;
+				}
+			}
+			read_unlock(&GlobalSMBSeslock);
+			cFYI(1,("No matching file for oplock break on connection"));
+			return TRUE;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+	cFYI(1,("Can not process oplock break for non-existent connection"));
+	return TRUE;
+}
+
+void
+dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
+{
+	int i, j;
+	char debug_line[17];
+	unsigned char *buffer;
+
+	if (traceSMB == 0)
+		return;
+
+	buffer = (unsigned char *) smb_buf;
+	for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
+		if (i % 8 == 0) {	/* we have reached the beginning of line  */
+			printk(KERN_DEBUG "| ");
+			j = 0;
+		}
+		printk("%0#4x ", buffer[i]);
+		debug_line[2 * j] = ' ';
+		if (isprint(buffer[i]))
+			debug_line[1 + (2 * j)] = buffer[i];
+		else
+			debug_line[1 + (2 * j)] = '_';
+
+		if (i % 8 == 7) {	/* we have reached end of line, time to print ascii */
+			debug_line[16] = 0;
+			printk(" | %s\n", debug_line);
+		}
+	}
+	for (; j < 8; j++) {
+		printk("     ");
+		debug_line[2 * j] = ' ';
+		debug_line[1 + (2 * j)] = ' ';
+	}
+	printk( " | %s\n", debug_line);
+	return;
+}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
new file mode 100644
index 0000000..4e34c89
--- /dev/null
+++ b/fs/cifs/netmisc.c
@@ -0,0 +1,904 @@
+/*
+ *   fs/cifs/netmisc.c
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ * 
+ *   Error mapping routines from Samba libsmb/errormap.c
+ *   Copyright (C) Andrew Tridgell 2001
+ *
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <asm/div64.h>
+#include <asm/byteorder.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "smberr.h"
+#include "cifs_debug.h"
+#include "nterr.h"
+
+struct smb_to_posix_error {
+	__u16 smb_err;
+	int posix_code;
+};
+
+static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
+	{ERRbadfunc, -EINVAL},
+	{ERRbadfile, -ENOENT},
+	{ERRbadpath, -ENOTDIR},
+	{ERRnofids, -EMFILE},
+	{ERRnoaccess, -EACCES},
+	{ERRbadfid, -EBADF},
+	{ERRbadmcb, -EIO},
+	{ERRnomem, -ENOMEM},
+	{ERRbadmem, -EFAULT},
+	{ERRbadenv, -EFAULT},
+	{ERRbadformat, -EINVAL},
+	{ERRbadaccess, -EACCES},
+	{ERRbaddata, -EIO},
+	{ERRbaddrive, -ENXIO},
+	{ERRremcd, -EACCES},
+	{ERRdiffdevice, -EXDEV},
+	{ERRnofiles, -ENOENT},
+	{ERRbadshare, -ETXTBSY},
+	{ERRlock, -EACCES},
+	{ERRunsup, -EINVAL},
+	{ERRnosuchshare,-ENXIO},
+	{ERRfilexists, -EEXIST},
+	{ERRinvparm, -EINVAL},
+	{ERRdiskfull, -ENOSPC},
+	{ERRinvname, -ENOENT},
+	{ERRinvlevel,-EOPNOTSUPP},
+	{ERRdirnotempty, -ENOTEMPTY},
+	{ERRnotlocked, -ENOLCK},
+	{ERRalreadyexists, -EEXIST},
+	{ERRmoredata, -EOVERFLOW},
+	{ERReasnotsupported,-EOPNOTSUPP},
+	{ErrQuota, -EDQUOT},
+	{ErrNotALink, -ENOLINK},
+	{ERRnetlogonNotStarted,-ENOPROTOOPT},
+	{0, 0}
+};
+
+static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
+	{ERRerror, -EIO},
+	{ERRbadpw, -EPERM},
+	{ERRbadtype, -EREMOTE},
+	{ERRaccess, -EACCES},
+	{ERRinvtid, -ENXIO},
+	{ERRinvnetname, -ENODEV},
+	{ERRinvdevice, -ENXIO},
+	{ERRqfull, -ENOSPC},
+	{ERRqtoobig, -ENOSPC},
+	{ERRqeof, -EIO},
+	{ERRinvpfid, -EBADF},
+	{ERRsmbcmd, -EBADRQC},
+	{ERRsrverror, -EIO},
+	{ERRbadBID, -EIO},
+	{ERRfilespecs, -EINVAL},
+	{ERRbadLink, -EIO},
+	{ERRbadpermits, -EINVAL},
+	{ERRbadPID, -ESRCH},
+	{ERRsetattrmode, -EINVAL},
+	{ERRpaused, -EHOSTDOWN},
+	{ERRmsgoff, -EHOSTDOWN},
+	{ERRnoroom, -ENOSPC},
+	{ERRrmuns, -EUSERS},
+	{ERRtimeout, -ETIME},
+	{ERRnoresource, -ENOBUFS},
+	{ERRtoomanyuids, -EUSERS},
+	{ERRbaduid, -EACCES},
+	{ERRusempx, -EIO},
+	{ERRusestd, -EIO},
+	{ERR_NOTIFY_ENUM_DIR, -ENOBUFS},
+	{ERRaccountexpired, -EACCES},
+	{ERRbadclient, -EACCES},
+	{ERRbadLogonTime, -EACCES},
+	{ERRpasswordExpired, -EACCES},
+	{ERRnosupport, -EINVAL},
+	{0, 0}
+};
+
+static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
+	{0, 0}
+};
+
+/* Convert string containing dotted ip address to binary form */
+/* returns 0 if invalid address */
+
+/* BB add address family, change rc to status flag and return union or for ipv6 */
+/*  will need parent to call something like inet_pton to convert ipv6 address  BB */
+int
+cifs_inet_pton(int address_family, char *cp,void *dst)
+{
+	struct in_addr address;
+	int value;
+	int digit;
+	int i;
+	char temp;
+	char bytes[4];
+	char *end = bytes;
+	static const int addr_class_max[4] =
+	    { 0xffffffff, 0xffffff, 0xffff, 0xff };
+
+	if(address_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	for (i = 0; i < 4; i++) {
+		bytes[i] = 0;
+	}
+
+	temp = *cp;
+
+	while (TRUE) {
+		if (!isdigit(temp))
+			return 0;
+
+		value = 0;
+		digit = 0;
+		for (;;) {
+			if (isascii(temp) && isdigit(temp)) {
+				value = (value * 10) + temp - '0';
+				temp = *++cp;
+				digit = 1;
+			} else
+				break;
+		}
+
+		if (temp == '.') {
+			if ((end > bytes + 2) || (value > 255))
+				return 0;
+			*end++ = value;
+			temp = *++cp;
+		} else if (temp == ':') {
+			cFYI(1,("IPv6 addresses not supported for CIFS mounts yet"));
+			return -1;
+		} else
+			break;
+	}
+
+	/* check for last characters */
+	if (temp != '\0' && (!isascii(temp) || !isspace(temp)))
+		if (temp != '\\') {
+			if (temp != '/')
+				return 0;
+			else
+				(*cp = '\\');	/* switch the slash the expected way */
+		}
+	if (value > addr_class_max[end - bytes])
+		return 0;
+
+	address.s_addr = *((__be32 *) bytes) | htonl(value);
+	*((__be32 *)dst) = address.s_addr;
+	return 1; /* success */
+}
+
+/*****************************************************************************
+convert a NT status code to a dos class/code
+ *****************************************************************************/
+/* NT status -> dos error map */
+static const struct {
+	__u8 dos_class;
+	__u16 dos_code;
+	__u32 ntstatus;
+} ntstatus_to_dos_map[] = {
+	{
+	ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, {
+	ERRDOS, 87, NT_STATUS_INVALID_INFO_CLASS}, {
+	ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, {
+	ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, {
+	ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, {
+	ERRDOS, 87, NT_STATUS_INVALID_CID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, {
+	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, {
+	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, {
+	ERRDOS, 38, NT_STATUS_END_OF_FILE}, {
+	ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, {
+	ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
+	ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK 
+	 during the session setup } */
+	{
+	ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
+	ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, {
+	ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, {
+	ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, {
+	ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, {
+	ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, {
+	ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 
+	 during the session setup }   */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
+	ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, {
+	ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, {
+	ERRDOS, 158, NT_STATUS_NOT_LOCKED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, {
+	ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {
+	ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
+	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {	/* mapping changed since shell does lookup on * and expects file not found */
+	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
+	ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
+	ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, {
+	ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, {
+	ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, {
+	ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, {
+	ERRDOS, 23, NT_STATUS_DATA_ERROR}, {
+	ERRDOS, 23, NT_STATUS_CRC_ERROR}, {
+	ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, {
+	ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, {
+	ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, {
+	ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, {
+	ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {
+	ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, {
+	ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, {
+	ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, {
+	ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, {
+	ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, {
+	ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, {
+	ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, {
+	ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, {
+	ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, {
+	ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, {
+	ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, {
+	ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, {
+	ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE 
+	 during the session setup } */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE 
+	 during the session setup } */
+	{
+	ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, {
+	ERRSRV, 2241, NT_STATUS_INVALID_LOGON_HOURS}, {
+	ERRSRV, 2240, NT_STATUS_INVALID_WORKSTATION}, {
+	ERRSRV, 2242, NT_STATUS_PASSWORD_EXPIRED}, {
+	ERRSRV, 2239, NT_STATUS_ACCOUNT_DISABLED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, {
+	ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, {
+	ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, {
+	ERRDOS, 112, NT_STATUS_DISK_FULL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {
+	ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, {
+	ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, {
+	ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, {
+	ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, {
+	ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {
+	ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, {
+	ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES 
+	 during the session setup } */
+	{
+	ERRDOS, ERRnomem, NT_STATUS_INSUFFICIENT_RESOURCES}, {
+	ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
+	ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, {
+	ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, {
+	ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, {
+	ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, {
+	ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, {
+	ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, {
+	ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, {
+	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, {
+	ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, {
+	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, {
+	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, {
+	ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, {
+	ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, {
+	ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, {
+	ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, {
+	ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, {
+	ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, {
+	ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, {
+	ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, {
+	ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, {
+	ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, {
+	ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {
+	ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, {
+	ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, {
+	ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, {
+	ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, {
+	ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, {
+	ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, {
+	ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, {
+	ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, {
+	ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, {
+	ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, {
+	ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, {
+	ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, {
+	ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, {
+	ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, {
+	ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, {
+	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {
+	ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, {
+	ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, {
+	ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, {
+	ERRDOS, 203, 0xc0000100}, {
+	ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, {
+	ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, {
+	ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, {
+	ERRDOS, 2401, NT_STATUS_FILES_OPEN}, {
+	ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {
+	ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, {
+	ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, {
+	ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, {
+	ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, {
+	ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, {
+	ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, {
+	ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, {
+	ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, {
+	ERRDOS, 59, NT_STATUS_LINK_FAILED}, {
+	ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, {
+	ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, {
+	ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, {
+	ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, {
+	ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {
+	ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, {
+	ERRHRD, ERRgeneral, 0xc000016e}, {
+	ERRHRD, ERRgeneral, 0xc000016f}, {
+	ERRHRD, ERRgeneral, 0xc0000170}, {
+	ERRHRD, ERRgeneral, 0xc0000171}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, {
+	ERRHRD, ERRgeneral, 0xc0000179}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, {
+	ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, {
+	ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, {
+	ERRDOS, 19, NT_STATUS_TOO_LATE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 
+	 during the session setup } */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {
+	ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, {
+	ERRSRV, 2239, NT_STATUS_ACCOUNT_EXPIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
+/*	{ This NT error code was 'sqashed'
+	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE 
+	 during the session setup }  */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, {
+	ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {
+	ERRDOS, ERRnomem, NT_STATUS_INSUFF_SERVER_RESOURCES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, {
+	ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, {
+	ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, {
+	ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, {
+	ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, {
+	ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {
+	ERRSRV, 2242, NT_STATUS_PASSWORD_MUST_CHANGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RETRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, {
+	ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {
+	ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, {
+	ERRHRD, ERRgeneral, 0xc000024a}, {
+	ERRHRD, ERRgeneral, 0xc000024b}, {
+	ERRHRD, ERRgeneral, 0xc000024c}, {
+	ERRHRD, ERRgeneral, 0xc000024d}, {
+	ERRHRD, ERRgeneral, 0xc000024e}, {
+	ERRHRD, ERRgeneral, 0xc000024f}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, {
+	ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, {
+	ERRHRD, ERRgeneral, 0xc000025d}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {
+	ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {
+	ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {
+	ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LINKS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, {
+	ERRDOS, 21, 0xc000026e}, {
+	ERRDOS, 161, 0xc0000281}, {
+	ERRDOS, ERRnoaccess, 0xc000028a}, {
+	ERRDOS, ERRnoaccess, 0xc000028b}, {
+	ERRHRD, ERRgeneral, 0xc000028c}, {
+	ERRDOS, ERRnoaccess, 0xc000028d}, {
+	ERRDOS, ERRnoaccess, 0xc000028e}, {
+	ERRDOS, ERRnoaccess, 0xc000028f}, {
+	ERRDOS, ERRnoaccess, 0xc0000290}, {
+	ERRDOS, ERRbadfunc, 0xc000029c}, {
+	ERRDOS, ERRinvlevel, 0x007c0001}, };
+
+/*****************************************************************************
+ Print an error message from the status code
+ *****************************************************************************/
+static void
+cifs_print_status(__u32 status_code)
+{
+	int idx = 0;
+
+	while (nt_errs[idx].nt_errstr != NULL) {
+		if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
+		    (status_code & 0xFFFFFF)) {
+			printk(KERN_NOTICE "Status code returned 0x%08x %s\n",
+				   status_code,nt_errs[idx].nt_errstr);
+		}
+		idx++;
+	}
+	return;
+}
+
+
+static void
+ntstatus_to_dos(__u32 ntstatus, __u8 * eclass, __u16 * ecode)
+{
+	int i;
+	if (ntstatus == 0) {
+		*eclass = 0;
+		*ecode = 0;
+		return;
+	}
+	for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) {
+		if (ntstatus == ntstatus_to_dos_map[i].ntstatus) {
+			*eclass = ntstatus_to_dos_map[i].dos_class;
+			*ecode = ntstatus_to_dos_map[i].dos_code;
+			return;
+		}
+	}
+	*eclass = ERRHRD;
+	*ecode = ERRgeneral;
+}
+
+int
+map_smb_to_linux_error(struct smb_hdr *smb)
+{
+	unsigned int i;
+	int rc = -EIO;		/* if transport error smb error may not be set */
+	__u8 smberrclass;
+	__u16 smberrcode;
+
+	/* BB if NT Status codes - map NT BB */
+
+	/* old style smb error codes */
+	if (smb->Status.CifsError == 0)
+		return 0;
+
+	if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
+		/* translate the newer STATUS codes to old style errors and then to POSIX errors */
+		__u32 err = le32_to_cpu(smb->Status.CifsError);
+		if(cifsFYI)
+			cifs_print_status(err);
+		ntstatus_to_dos(err, &smberrclass, &smberrcode);
+	} else {
+		smberrclass = smb->Status.DosError.ErrorClass;
+		smberrcode = le16_to_cpu(smb->Status.DosError.Error);
+	}
+
+	/* old style errors */
+
+	/* DOS class smb error codes - map DOS */
+	if (smberrclass == ERRDOS) {	/* one byte field no need to byte reverse */
+		for (i = 0;
+		     i <
+		     sizeof (mapping_table_ERRDOS) /
+		     sizeof (struct smb_to_posix_error); i++) {
+			if (mapping_table_ERRDOS[i].smb_err == 0)
+				break;
+			else if (mapping_table_ERRDOS[i].smb_err == smberrcode) {
+				rc = mapping_table_ERRDOS[i].posix_code;
+				break;
+			}
+			/* else try the next error mapping one to see if it will match */
+		}
+	} else if (smberrclass == ERRSRV) {	/* server class of error codes */
+		for (i = 0;
+		     i <
+		     sizeof (mapping_table_ERRSRV) /
+		     sizeof (struct smb_to_posix_error); i++) {
+			if (mapping_table_ERRSRV[i].smb_err == 0)
+				break;
+			else if (mapping_table_ERRSRV[i].smb_err == smberrcode) {
+				rc = mapping_table_ERRSRV[i].posix_code;
+				break;
+			}
+			/* else try the next error mapping one to see if it will match */
+		}
+	}
+	/* else ERRHRD class errors or junk  - return EIO */
+
+	cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!", smberrcode,rc));
+
+	/* generic corrective action e.g. reconnect SMB session on ERRbaduid could be added */
+
+	return rc;
+}
+
+/*
+ * calculate the size of the SMB message based on the fixed header
+ * portion, the number of word parameters and the data portion of the message
+ */
+unsigned int
+smbCalcSize(struct smb_hdr *ptr)
+{
+	return (sizeof (struct smb_hdr) + (2 * ptr->WordCount) +
+		BCC(ptr));
+}
+
+/* The following are taken from fs/ntfs/util.c */
+
+#define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000)
+
+    /*
+     * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+     * into Unix UTC (based 1970-01-01, in seconds).
+     */
+struct timespec
+cifs_NTtimeToUnix(u64 ntutc)
+{
+	struct timespec ts; 
+	/* BB what about the timezone? BB */
+
+	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+	u64 t;
+
+	t = ntutc - NTFS_TIME_OFFSET;
+	ts.tv_nsec = do_div(t, 10000000) * 100;
+	ts.tv_sec = t; 
+	return ts;
+}
+
+/* Convert the Unix UTC into NT UTC. */
+u64
+cifs_UnixTimeToNT(struct timespec t)
+{
+	/* Convert to 100ns intervals and then add the NTFS time offset. */
+	return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
+}
diff --git a/fs/cifs/nterr.c b/fs/cifs/nterr.c
new file mode 100644
index 0000000..4da50cd
--- /dev/null
+++ b/fs/cifs/nterr.c
@@ -0,0 +1,687 @@
+/* 
+ *  Unix SMB/Netbios implementation.
+ *  Version 1.9.
+ *  RPC Pipe client / server routines
+ *  Copyright (C) Luke Kenneth Casson Leighton 1997-2001.
+ *  
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *  
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *  
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* NT error codes - see nterr.h */
+#include <linux/types.h>
+#include <linux/fs.h>
+#include "nterr.h"
+
+const struct nt_err_code_struct nt_errs[] = {
+	{"NT_STATUS_OK", NT_STATUS_OK},
+	{"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL},
+	{"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED},
+	{"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS},
+	{"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH},
+	{"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION},
+	{"STATUS_BUFFER_OVERFLOW", STATUS_BUFFER_OVERFLOW},
+	{"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR},
+	{"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA},
+	{"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE},
+	{"NT_STATUS_BAD_INITIAL_STACK", NT_STATUS_BAD_INITIAL_STACK},
+	{"NT_STATUS_BAD_INITIAL_PC", NT_STATUS_BAD_INITIAL_PC},
+	{"NT_STATUS_INVALID_CID", NT_STATUS_INVALID_CID},
+	{"NT_STATUS_TIMER_NOT_CANCELED", NT_STATUS_TIMER_NOT_CANCELED},
+	{"NT_STATUS_INVALID_PARAMETER", NT_STATUS_INVALID_PARAMETER},
+	{"NT_STATUS_NO_SUCH_DEVICE", NT_STATUS_NO_SUCH_DEVICE},
+	{"NT_STATUS_NO_SUCH_FILE", NT_STATUS_NO_SUCH_FILE},
+	{"NT_STATUS_INVALID_DEVICE_REQUEST",
+	 NT_STATUS_INVALID_DEVICE_REQUEST},
+	{"NT_STATUS_END_OF_FILE", NT_STATUS_END_OF_FILE},
+	{"NT_STATUS_WRONG_VOLUME", NT_STATUS_WRONG_VOLUME},
+	{"NT_STATUS_NO_MEDIA_IN_DEVICE", NT_STATUS_NO_MEDIA_IN_DEVICE},
+	{"NT_STATUS_UNRECOGNIZED_MEDIA", NT_STATUS_UNRECOGNIZED_MEDIA},
+	{"NT_STATUS_NONEXISTENT_SECTOR", NT_STATUS_NONEXISTENT_SECTOR},
+	{"NT_STATUS_MORE_PROCESSING_REQUIRED",
+	 NT_STATUS_MORE_PROCESSING_REQUIRED},
+	{"NT_STATUS_NO_MEMORY", NT_STATUS_NO_MEMORY},
+	{"NT_STATUS_CONFLICTING_ADDRESSES",
+	 NT_STATUS_CONFLICTING_ADDRESSES},
+	{"NT_STATUS_NOT_MAPPED_VIEW", NT_STATUS_NOT_MAPPED_VIEW},
+	{"NT_STATUS_UNABLE_TO_FREE_VM", NT_STATUS_UNABLE_TO_FREE_VM},
+	{"NT_STATUS_UNABLE_TO_DELETE_SECTION",
+	 NT_STATUS_UNABLE_TO_DELETE_SECTION},
+	{"NT_STATUS_INVALID_SYSTEM_SERVICE",
+	 NT_STATUS_INVALID_SYSTEM_SERVICE},
+	{"NT_STATUS_ILLEGAL_INSTRUCTION", NT_STATUS_ILLEGAL_INSTRUCTION},
+	{"NT_STATUS_INVALID_LOCK_SEQUENCE",
+	 NT_STATUS_INVALID_LOCK_SEQUENCE},
+	{"NT_STATUS_INVALID_VIEW_SIZE", NT_STATUS_INVALID_VIEW_SIZE},
+	{"NT_STATUS_INVALID_FILE_FOR_SECTION",
+	 NT_STATUS_INVALID_FILE_FOR_SECTION},
+	{"NT_STATUS_ALREADY_COMMITTED", NT_STATUS_ALREADY_COMMITTED},
+	{"NT_STATUS_ACCESS_DENIED", NT_STATUS_ACCESS_DENIED},
+	{"NT_STATUS_BUFFER_TOO_SMALL", NT_STATUS_BUFFER_TOO_SMALL},
+	{"NT_STATUS_OBJECT_TYPE_MISMATCH", NT_STATUS_OBJECT_TYPE_MISMATCH},
+	{"NT_STATUS_NONCONTINUABLE_EXCEPTION",
+	 NT_STATUS_NONCONTINUABLE_EXCEPTION},
+	{"NT_STATUS_INVALID_DISPOSITION", NT_STATUS_INVALID_DISPOSITION},
+	{"NT_STATUS_UNWIND", NT_STATUS_UNWIND},
+	{"NT_STATUS_BAD_STACK", NT_STATUS_BAD_STACK},
+	{"NT_STATUS_INVALID_UNWIND_TARGET",
+	 NT_STATUS_INVALID_UNWIND_TARGET},
+	{"NT_STATUS_NOT_LOCKED", NT_STATUS_NOT_LOCKED},
+	{"NT_STATUS_PARITY_ERROR", NT_STATUS_PARITY_ERROR},
+	{"NT_STATUS_UNABLE_TO_DECOMMIT_VM",
+	 NT_STATUS_UNABLE_TO_DECOMMIT_VM},
+	{"NT_STATUS_NOT_COMMITTED", NT_STATUS_NOT_COMMITTED},
+	{"NT_STATUS_INVALID_PORT_ATTRIBUTES",
+	 NT_STATUS_INVALID_PORT_ATTRIBUTES},
+	{"NT_STATUS_PORT_MESSAGE_TOO_LONG",
+	 NT_STATUS_PORT_MESSAGE_TOO_LONG},
+	{"NT_STATUS_INVALID_PARAMETER_MIX",
+	 NT_STATUS_INVALID_PARAMETER_MIX},
+	{"NT_STATUS_INVALID_QUOTA_LOWER", NT_STATUS_INVALID_QUOTA_LOWER},
+	{"NT_STATUS_DISK_CORRUPT_ERROR", NT_STATUS_DISK_CORRUPT_ERROR},
+	{"NT_STATUS_OBJECT_NAME_INVALID", NT_STATUS_OBJECT_NAME_INVALID},
+	{"NT_STATUS_OBJECT_NAME_NOT_FOUND",
+	 NT_STATUS_OBJECT_NAME_NOT_FOUND},
+	{"NT_STATUS_OBJECT_NAME_COLLISION",
+	 NT_STATUS_OBJECT_NAME_COLLISION},
+	{"NT_STATUS_HANDLE_NOT_WAITABLE", NT_STATUS_HANDLE_NOT_WAITABLE},
+	{"NT_STATUS_PORT_DISCONNECTED", NT_STATUS_PORT_DISCONNECTED},
+	{"NT_STATUS_DEVICE_ALREADY_ATTACHED",
+	 NT_STATUS_DEVICE_ALREADY_ATTACHED},
+	{"NT_STATUS_OBJECT_PATH_INVALID", NT_STATUS_OBJECT_PATH_INVALID},
+	{"NT_STATUS_OBJECT_PATH_NOT_FOUND",
+	 NT_STATUS_OBJECT_PATH_NOT_FOUND},
+	{"NT_STATUS_OBJECT_PATH_SYNTAX_BAD",
+	 NT_STATUS_OBJECT_PATH_SYNTAX_BAD},
+	{"NT_STATUS_DATA_OVERRUN", NT_STATUS_DATA_OVERRUN},
+	{"NT_STATUS_DATA_LATE_ERROR", NT_STATUS_DATA_LATE_ERROR},
+	{"NT_STATUS_DATA_ERROR", NT_STATUS_DATA_ERROR},
+	{"NT_STATUS_CRC_ERROR", NT_STATUS_CRC_ERROR},
+	{"NT_STATUS_SECTION_TOO_BIG", NT_STATUS_SECTION_TOO_BIG},
+	{"NT_STATUS_PORT_CONNECTION_REFUSED",
+	 NT_STATUS_PORT_CONNECTION_REFUSED},
+	{"NT_STATUS_INVALID_PORT_HANDLE", NT_STATUS_INVALID_PORT_HANDLE},
+	{"NT_STATUS_SHARING_VIOLATION", NT_STATUS_SHARING_VIOLATION},
+	{"NT_STATUS_QUOTA_EXCEEDED", NT_STATUS_QUOTA_EXCEEDED},
+	{"NT_STATUS_INVALID_PAGE_PROTECTION",
+	 NT_STATUS_INVALID_PAGE_PROTECTION},
+	{"NT_STATUS_MUTANT_NOT_OWNED", NT_STATUS_MUTANT_NOT_OWNED},
+	{"NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED",
+	 NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED},
+	{"NT_STATUS_PORT_ALREADY_SET", NT_STATUS_PORT_ALREADY_SET},
+	{"NT_STATUS_SECTION_NOT_IMAGE", NT_STATUS_SECTION_NOT_IMAGE},
+	{"NT_STATUS_SUSPEND_COUNT_EXCEEDED",
+	 NT_STATUS_SUSPEND_COUNT_EXCEEDED},
+	{"NT_STATUS_THREAD_IS_TERMINATING",
+	 NT_STATUS_THREAD_IS_TERMINATING},
+	{"NT_STATUS_BAD_WORKING_SET_LIMIT",
+	 NT_STATUS_BAD_WORKING_SET_LIMIT},
+	{"NT_STATUS_INCOMPATIBLE_FILE_MAP",
+	 NT_STATUS_INCOMPATIBLE_FILE_MAP},
+	{"NT_STATUS_SECTION_PROTECTION", NT_STATUS_SECTION_PROTECTION},
+	{"NT_STATUS_EAS_NOT_SUPPORTED", NT_STATUS_EAS_NOT_SUPPORTED},
+	{"NT_STATUS_EA_TOO_LARGE", NT_STATUS_EA_TOO_LARGE},
+	{"NT_STATUS_NONEXISTENT_EA_ENTRY", NT_STATUS_NONEXISTENT_EA_ENTRY},
+	{"NT_STATUS_NO_EAS_ON_FILE", NT_STATUS_NO_EAS_ON_FILE},
+	{"NT_STATUS_EA_CORRUPT_ERROR", NT_STATUS_EA_CORRUPT_ERROR},
+	{"NT_STATUS_FILE_LOCK_CONFLICT", NT_STATUS_FILE_LOCK_CONFLICT},
+	{"NT_STATUS_LOCK_NOT_GRANTED", NT_STATUS_LOCK_NOT_GRANTED},
+	{"NT_STATUS_DELETE_PENDING", NT_STATUS_DELETE_PENDING},
+	{"NT_STATUS_CTL_FILE_NOT_SUPPORTED",
+	 NT_STATUS_CTL_FILE_NOT_SUPPORTED},
+	{"NT_STATUS_UNKNOWN_REVISION", NT_STATUS_UNKNOWN_REVISION},
+	{"NT_STATUS_REVISION_MISMATCH", NT_STATUS_REVISION_MISMATCH},
+	{"NT_STATUS_INVALID_OWNER", NT_STATUS_INVALID_OWNER},
+	{"NT_STATUS_INVALID_PRIMARY_GROUP",
+	 NT_STATUS_INVALID_PRIMARY_GROUP},
+	{"NT_STATUS_NO_IMPERSONATION_TOKEN",
+	 NT_STATUS_NO_IMPERSONATION_TOKEN},
+	{"NT_STATUS_CANT_DISABLE_MANDATORY",
+	 NT_STATUS_CANT_DISABLE_MANDATORY},
+	{"NT_STATUS_NO_LOGON_SERVERS", NT_STATUS_NO_LOGON_SERVERS},
+	{"NT_STATUS_NO_SUCH_LOGON_SESSION",
+	 NT_STATUS_NO_SUCH_LOGON_SESSION},
+	{"NT_STATUS_NO_SUCH_PRIVILEGE", NT_STATUS_NO_SUCH_PRIVILEGE},
+	{"NT_STATUS_PRIVILEGE_NOT_HELD", NT_STATUS_PRIVILEGE_NOT_HELD},
+	{"NT_STATUS_INVALID_ACCOUNT_NAME", NT_STATUS_INVALID_ACCOUNT_NAME},
+	{"NT_STATUS_USER_EXISTS", NT_STATUS_USER_EXISTS},
+	{"NT_STATUS_NO_SUCH_USER", NT_STATUS_NO_SUCH_USER},
+	{"NT_STATUS_GROUP_EXISTS", NT_STATUS_GROUP_EXISTS},
+	{"NT_STATUS_NO_SUCH_GROUP", NT_STATUS_NO_SUCH_GROUP},
+	{"NT_STATUS_MEMBER_IN_GROUP", NT_STATUS_MEMBER_IN_GROUP},
+	{"NT_STATUS_MEMBER_NOT_IN_GROUP", NT_STATUS_MEMBER_NOT_IN_GROUP},
+	{"NT_STATUS_LAST_ADMIN", NT_STATUS_LAST_ADMIN},
+	{"NT_STATUS_WRONG_PASSWORD", NT_STATUS_WRONG_PASSWORD},
+	{"NT_STATUS_ILL_FORMED_PASSWORD", NT_STATUS_ILL_FORMED_PASSWORD},
+	{"NT_STATUS_PASSWORD_RESTRICTION", NT_STATUS_PASSWORD_RESTRICTION},
+	{"NT_STATUS_LOGON_FAILURE", NT_STATUS_LOGON_FAILURE},
+	{"NT_STATUS_ACCOUNT_RESTRICTION", NT_STATUS_ACCOUNT_RESTRICTION},
+	{"NT_STATUS_INVALID_LOGON_HOURS", NT_STATUS_INVALID_LOGON_HOURS},
+	{"NT_STATUS_INVALID_WORKSTATION", NT_STATUS_INVALID_WORKSTATION},
+	{"NT_STATUS_PASSWORD_EXPIRED", NT_STATUS_PASSWORD_EXPIRED},
+	{"NT_STATUS_ACCOUNT_DISABLED", NT_STATUS_ACCOUNT_DISABLED},
+	{"NT_STATUS_NONE_MAPPED", NT_STATUS_NONE_MAPPED},
+	{"NT_STATUS_TOO_MANY_LUIDS_REQUESTED",
+	 NT_STATUS_TOO_MANY_LUIDS_REQUESTED},
+	{"NT_STATUS_LUIDS_EXHAUSTED", NT_STATUS_LUIDS_EXHAUSTED},
+	{"NT_STATUS_INVALID_SUB_AUTHORITY",
+	 NT_STATUS_INVALID_SUB_AUTHORITY},
+	{"NT_STATUS_INVALID_ACL", NT_STATUS_INVALID_ACL},
+	{"NT_STATUS_INVALID_SID", NT_STATUS_INVALID_SID},
+	{"NT_STATUS_INVALID_SECURITY_DESCR",
+	 NT_STATUS_INVALID_SECURITY_DESCR},
+	{"NT_STATUS_PROCEDURE_NOT_FOUND", NT_STATUS_PROCEDURE_NOT_FOUND},
+	{"NT_STATUS_INVALID_IMAGE_FORMAT", NT_STATUS_INVALID_IMAGE_FORMAT},
+	{"NT_STATUS_NO_TOKEN", NT_STATUS_NO_TOKEN},
+	{"NT_STATUS_BAD_INHERITANCE_ACL", NT_STATUS_BAD_INHERITANCE_ACL},
+	{"NT_STATUS_RANGE_NOT_LOCKED", NT_STATUS_RANGE_NOT_LOCKED},
+	{"NT_STATUS_DISK_FULL", NT_STATUS_DISK_FULL},
+	{"NT_STATUS_SERVER_DISABLED", NT_STATUS_SERVER_DISABLED},
+	{"NT_STATUS_SERVER_NOT_DISABLED", NT_STATUS_SERVER_NOT_DISABLED},
+	{"NT_STATUS_TOO_MANY_GUIDS_REQUESTED",
+	 NT_STATUS_TOO_MANY_GUIDS_REQUESTED},
+	{"NT_STATUS_GUIDS_EXHAUSTED", NT_STATUS_GUIDS_EXHAUSTED},
+	{"NT_STATUS_INVALID_ID_AUTHORITY", NT_STATUS_INVALID_ID_AUTHORITY},
+	{"NT_STATUS_AGENTS_EXHAUSTED", NT_STATUS_AGENTS_EXHAUSTED},
+	{"NT_STATUS_INVALID_VOLUME_LABEL", NT_STATUS_INVALID_VOLUME_LABEL},
+	{"NT_STATUS_SECTION_NOT_EXTENDED", NT_STATUS_SECTION_NOT_EXTENDED},
+	{"NT_STATUS_NOT_MAPPED_DATA", NT_STATUS_NOT_MAPPED_DATA},
+	{"NT_STATUS_RESOURCE_DATA_NOT_FOUND",
+	 NT_STATUS_RESOURCE_DATA_NOT_FOUND},
+	{"NT_STATUS_RESOURCE_TYPE_NOT_FOUND",
+	 NT_STATUS_RESOURCE_TYPE_NOT_FOUND},
+	{"NT_STATUS_RESOURCE_NAME_NOT_FOUND",
+	 NT_STATUS_RESOURCE_NAME_NOT_FOUND},
+	{"NT_STATUS_ARRAY_BOUNDS_EXCEEDED",
+	 NT_STATUS_ARRAY_BOUNDS_EXCEEDED},
+	{"NT_STATUS_FLOAT_DENORMAL_OPERAND",
+	 NT_STATUS_FLOAT_DENORMAL_OPERAND},
+	{"NT_STATUS_FLOAT_DIVIDE_BY_ZERO", NT_STATUS_FLOAT_DIVIDE_BY_ZERO},
+	{"NT_STATUS_FLOAT_INEXACT_RESULT", NT_STATUS_FLOAT_INEXACT_RESULT},
+	{"NT_STATUS_FLOAT_INVALID_OPERATION",
+	 NT_STATUS_FLOAT_INVALID_OPERATION},
+	{"NT_STATUS_FLOAT_OVERFLOW", NT_STATUS_FLOAT_OVERFLOW},
+	{"NT_STATUS_FLOAT_STACK_CHECK", NT_STATUS_FLOAT_STACK_CHECK},
+	{"NT_STATUS_FLOAT_UNDERFLOW", NT_STATUS_FLOAT_UNDERFLOW},
+	{"NT_STATUS_INTEGER_DIVIDE_BY_ZERO",
+	 NT_STATUS_INTEGER_DIVIDE_BY_ZERO},
+	{"NT_STATUS_INTEGER_OVERFLOW", NT_STATUS_INTEGER_OVERFLOW},
+	{"NT_STATUS_PRIVILEGED_INSTRUCTION",
+	 NT_STATUS_PRIVILEGED_INSTRUCTION},
+	{"NT_STATUS_TOO_MANY_PAGING_FILES",
+	 NT_STATUS_TOO_MANY_PAGING_FILES},
+	{"NT_STATUS_FILE_INVALID", NT_STATUS_FILE_INVALID},
+	{"NT_STATUS_ALLOTTED_SPACE_EXCEEDED",
+	 NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
+	{"NT_STATUS_INSUFFICIENT_RESOURCES",
+	 NT_STATUS_INSUFFICIENT_RESOURCES},
+	{"NT_STATUS_DFS_EXIT_PATH_FOUND", NT_STATUS_DFS_EXIT_PATH_FOUND},
+	{"NT_STATUS_DEVICE_DATA_ERROR", NT_STATUS_DEVICE_DATA_ERROR},
+	{"NT_STATUS_DEVICE_NOT_CONNECTED", NT_STATUS_DEVICE_NOT_CONNECTED},
+	{"NT_STATUS_DEVICE_POWER_FAILURE", NT_STATUS_DEVICE_POWER_FAILURE},
+	{"NT_STATUS_FREE_VM_NOT_AT_BASE", NT_STATUS_FREE_VM_NOT_AT_BASE},
+	{"NT_STATUS_MEMORY_NOT_ALLOCATED", NT_STATUS_MEMORY_NOT_ALLOCATED},
+	{"NT_STATUS_WORKING_SET_QUOTA", NT_STATUS_WORKING_SET_QUOTA},
+	{"NT_STATUS_MEDIA_WRITE_PROTECTED",
+	 NT_STATUS_MEDIA_WRITE_PROTECTED},
+	{"NT_STATUS_DEVICE_NOT_READY", NT_STATUS_DEVICE_NOT_READY},
+	{"NT_STATUS_INVALID_GROUP_ATTRIBUTES",
+	 NT_STATUS_INVALID_GROUP_ATTRIBUTES},
+	{"NT_STATUS_BAD_IMPERSONATION_LEVEL",
+	 NT_STATUS_BAD_IMPERSONATION_LEVEL},
+	{"NT_STATUS_CANT_OPEN_ANONYMOUS", NT_STATUS_CANT_OPEN_ANONYMOUS},
+	{"NT_STATUS_BAD_VALIDATION_CLASS", NT_STATUS_BAD_VALIDATION_CLASS},
+	{"NT_STATUS_BAD_TOKEN_TYPE", NT_STATUS_BAD_TOKEN_TYPE},
+	{"NT_STATUS_BAD_MASTER_BOOT_RECORD",
+	 NT_STATUS_BAD_MASTER_BOOT_RECORD},
+	{"NT_STATUS_INSTRUCTION_MISALIGNMENT",
+	 NT_STATUS_INSTRUCTION_MISALIGNMENT},
+	{"NT_STATUS_INSTANCE_NOT_AVAILABLE",
+	 NT_STATUS_INSTANCE_NOT_AVAILABLE},
+	{"NT_STATUS_PIPE_NOT_AVAILABLE", NT_STATUS_PIPE_NOT_AVAILABLE},
+	{"NT_STATUS_INVALID_PIPE_STATE", NT_STATUS_INVALID_PIPE_STATE},
+	{"NT_STATUS_PIPE_BUSY", NT_STATUS_PIPE_BUSY},
+	{"NT_STATUS_ILLEGAL_FUNCTION", NT_STATUS_ILLEGAL_FUNCTION},
+	{"NT_STATUS_PIPE_DISCONNECTED", NT_STATUS_PIPE_DISCONNECTED},
+	{"NT_STATUS_PIPE_CLOSING", NT_STATUS_PIPE_CLOSING},
+	{"NT_STATUS_PIPE_CONNECTED", NT_STATUS_PIPE_CONNECTED},
+	{"NT_STATUS_PIPE_LISTENING", NT_STATUS_PIPE_LISTENING},
+	{"NT_STATUS_INVALID_READ_MODE", NT_STATUS_INVALID_READ_MODE},
+	{"NT_STATUS_IO_TIMEOUT", NT_STATUS_IO_TIMEOUT},
+	{"NT_STATUS_FILE_FORCED_CLOSED", NT_STATUS_FILE_FORCED_CLOSED},
+	{"NT_STATUS_PROFILING_NOT_STARTED",
+	 NT_STATUS_PROFILING_NOT_STARTED},
+	{"NT_STATUS_PROFILING_NOT_STOPPED",
+	 NT_STATUS_PROFILING_NOT_STOPPED},
+	{"NT_STATUS_COULD_NOT_INTERPRET", NT_STATUS_COULD_NOT_INTERPRET},
+	{"NT_STATUS_FILE_IS_A_DIRECTORY", NT_STATUS_FILE_IS_A_DIRECTORY},
+	{"NT_STATUS_NOT_SUPPORTED", NT_STATUS_NOT_SUPPORTED},
+	{"NT_STATUS_REMOTE_NOT_LISTENING", NT_STATUS_REMOTE_NOT_LISTENING},
+	{"NT_STATUS_DUPLICATE_NAME", NT_STATUS_DUPLICATE_NAME},
+	{"NT_STATUS_BAD_NETWORK_PATH", NT_STATUS_BAD_NETWORK_PATH},
+	{"NT_STATUS_NETWORK_BUSY", NT_STATUS_NETWORK_BUSY},
+	{"NT_STATUS_DEVICE_DOES_NOT_EXIST",
+	 NT_STATUS_DEVICE_DOES_NOT_EXIST},
+	{"NT_STATUS_TOO_MANY_COMMANDS", NT_STATUS_TOO_MANY_COMMANDS},
+	{"NT_STATUS_ADAPTER_HARDWARE_ERROR",
+	 NT_STATUS_ADAPTER_HARDWARE_ERROR},
+	{"NT_STATUS_INVALID_NETWORK_RESPONSE",
+	 NT_STATUS_INVALID_NETWORK_RESPONSE},
+	{"NT_STATUS_UNEXPECTED_NETWORK_ERROR",
+	 NT_STATUS_UNEXPECTED_NETWORK_ERROR},
+	{"NT_STATUS_BAD_REMOTE_ADAPTER", NT_STATUS_BAD_REMOTE_ADAPTER},
+	{"NT_STATUS_PRINT_QUEUE_FULL", NT_STATUS_PRINT_QUEUE_FULL},
+	{"NT_STATUS_NO_SPOOL_SPACE", NT_STATUS_NO_SPOOL_SPACE},
+	{"NT_STATUS_PRINT_CANCELLED", NT_STATUS_PRINT_CANCELLED},
+	{"NT_STATUS_NETWORK_NAME_DELETED", NT_STATUS_NETWORK_NAME_DELETED},
+	{"NT_STATUS_NETWORK_ACCESS_DENIED",
+	 NT_STATUS_NETWORK_ACCESS_DENIED},
+	{"NT_STATUS_BAD_DEVICE_TYPE", NT_STATUS_BAD_DEVICE_TYPE},
+	{"NT_STATUS_BAD_NETWORK_NAME", NT_STATUS_BAD_NETWORK_NAME},
+	{"NT_STATUS_TOO_MANY_NAMES", NT_STATUS_TOO_MANY_NAMES},
+	{"NT_STATUS_TOO_MANY_SESSIONS", NT_STATUS_TOO_MANY_SESSIONS},
+	{"NT_STATUS_SHARING_PAUSED", NT_STATUS_SHARING_PAUSED},
+	{"NT_STATUS_REQUEST_NOT_ACCEPTED", NT_STATUS_REQUEST_NOT_ACCEPTED},
+	{"NT_STATUS_REDIRECTOR_PAUSED", NT_STATUS_REDIRECTOR_PAUSED},
+	{"NT_STATUS_NET_WRITE_FAULT", NT_STATUS_NET_WRITE_FAULT},
+	{"NT_STATUS_PROFILING_AT_LIMIT", NT_STATUS_PROFILING_AT_LIMIT},
+	{"NT_STATUS_NOT_SAME_DEVICE", NT_STATUS_NOT_SAME_DEVICE},
+	{"NT_STATUS_FILE_RENAMED", NT_STATUS_FILE_RENAMED},
+	{"NT_STATUS_VIRTUAL_CIRCUIT_CLOSED",
+	 NT_STATUS_VIRTUAL_CIRCUIT_CLOSED},
+	{"NT_STATUS_NO_SECURITY_ON_OBJECT",
+	 NT_STATUS_NO_SECURITY_ON_OBJECT},
+	{"NT_STATUS_CANT_WAIT", NT_STATUS_CANT_WAIT},
+	{"NT_STATUS_PIPE_EMPTY", NT_STATUS_PIPE_EMPTY},
+	{"NT_STATUS_CANT_ACCESS_DOMAIN_INFO",
+	 NT_STATUS_CANT_ACCESS_DOMAIN_INFO},
+	{"NT_STATUS_CANT_TERMINATE_SELF", NT_STATUS_CANT_TERMINATE_SELF},
+	{"NT_STATUS_INVALID_SERVER_STATE", NT_STATUS_INVALID_SERVER_STATE},
+	{"NT_STATUS_INVALID_DOMAIN_STATE", NT_STATUS_INVALID_DOMAIN_STATE},
+	{"NT_STATUS_INVALID_DOMAIN_ROLE", NT_STATUS_INVALID_DOMAIN_ROLE},
+	{"NT_STATUS_NO_SUCH_DOMAIN", NT_STATUS_NO_SUCH_DOMAIN},
+	{"NT_STATUS_DOMAIN_EXISTS", NT_STATUS_DOMAIN_EXISTS},
+	{"NT_STATUS_DOMAIN_LIMIT_EXCEEDED",
+	 NT_STATUS_DOMAIN_LIMIT_EXCEEDED},
+	{"NT_STATUS_OPLOCK_NOT_GRANTED", NT_STATUS_OPLOCK_NOT_GRANTED},
+	{"NT_STATUS_INVALID_OPLOCK_PROTOCOL",
+	 NT_STATUS_INVALID_OPLOCK_PROTOCOL},
+	{"NT_STATUS_INTERNAL_DB_CORRUPTION",
+	 NT_STATUS_INTERNAL_DB_CORRUPTION},
+	{"NT_STATUS_INTERNAL_ERROR", NT_STATUS_INTERNAL_ERROR},
+	{"NT_STATUS_GENERIC_NOT_MAPPED", NT_STATUS_GENERIC_NOT_MAPPED},
+	{"NT_STATUS_BAD_DESCRIPTOR_FORMAT",
+	 NT_STATUS_BAD_DESCRIPTOR_FORMAT},
+	{"NT_STATUS_INVALID_USER_BUFFER", NT_STATUS_INVALID_USER_BUFFER},
+	{"NT_STATUS_UNEXPECTED_IO_ERROR", NT_STATUS_UNEXPECTED_IO_ERROR},
+	{"NT_STATUS_UNEXPECTED_MM_CREATE_ERR",
+	 NT_STATUS_UNEXPECTED_MM_CREATE_ERR},
+	{"NT_STATUS_UNEXPECTED_MM_MAP_ERROR",
+	 NT_STATUS_UNEXPECTED_MM_MAP_ERROR},
+	{"NT_STATUS_UNEXPECTED_MM_EXTEND_ERR",
+	 NT_STATUS_UNEXPECTED_MM_EXTEND_ERR},
+	{"NT_STATUS_NOT_LOGON_PROCESS", NT_STATUS_NOT_LOGON_PROCESS},
+	{"NT_STATUS_LOGON_SESSION_EXISTS", NT_STATUS_LOGON_SESSION_EXISTS},
+	{"NT_STATUS_INVALID_PARAMETER_1", NT_STATUS_INVALID_PARAMETER_1},
+	{"NT_STATUS_INVALID_PARAMETER_2", NT_STATUS_INVALID_PARAMETER_2},
+	{"NT_STATUS_INVALID_PARAMETER_3", NT_STATUS_INVALID_PARAMETER_3},
+	{"NT_STATUS_INVALID_PARAMETER_4", NT_STATUS_INVALID_PARAMETER_4},
+	{"NT_STATUS_INVALID_PARAMETER_5", NT_STATUS_INVALID_PARAMETER_5},
+	{"NT_STATUS_INVALID_PARAMETER_6", NT_STATUS_INVALID_PARAMETER_6},
+	{"NT_STATUS_INVALID_PARAMETER_7", NT_STATUS_INVALID_PARAMETER_7},
+	{"NT_STATUS_INVALID_PARAMETER_8", NT_STATUS_INVALID_PARAMETER_8},
+	{"NT_STATUS_INVALID_PARAMETER_9", NT_STATUS_INVALID_PARAMETER_9},
+	{"NT_STATUS_INVALID_PARAMETER_10", NT_STATUS_INVALID_PARAMETER_10},
+	{"NT_STATUS_INVALID_PARAMETER_11", NT_STATUS_INVALID_PARAMETER_11},
+	{"NT_STATUS_INVALID_PARAMETER_12", NT_STATUS_INVALID_PARAMETER_12},
+	{"NT_STATUS_REDIRECTOR_NOT_STARTED",
+	 NT_STATUS_REDIRECTOR_NOT_STARTED},
+	{"NT_STATUS_REDIRECTOR_STARTED", NT_STATUS_REDIRECTOR_STARTED},
+	{"NT_STATUS_STACK_OVERFLOW", NT_STATUS_STACK_OVERFLOW},
+	{"NT_STATUS_NO_SUCH_PACKAGE", NT_STATUS_NO_SUCH_PACKAGE},
+	{"NT_STATUS_BAD_FUNCTION_TABLE", NT_STATUS_BAD_FUNCTION_TABLE},
+	{"NT_STATUS_DIRECTORY_NOT_EMPTY", NT_STATUS_DIRECTORY_NOT_EMPTY},
+	{"NT_STATUS_FILE_CORRUPT_ERROR", NT_STATUS_FILE_CORRUPT_ERROR},
+	{"NT_STATUS_NOT_A_DIRECTORY", NT_STATUS_NOT_A_DIRECTORY},
+	{"NT_STATUS_BAD_LOGON_SESSION_STATE",
+	 NT_STATUS_BAD_LOGON_SESSION_STATE},
+	{"NT_STATUS_LOGON_SESSION_COLLISION",
+	 NT_STATUS_LOGON_SESSION_COLLISION},
+	{"NT_STATUS_NAME_TOO_LONG", NT_STATUS_NAME_TOO_LONG},
+	{"NT_STATUS_FILES_OPEN", NT_STATUS_FILES_OPEN},
+	{"NT_STATUS_CONNECTION_IN_USE", NT_STATUS_CONNECTION_IN_USE},
+	{"NT_STATUS_MESSAGE_NOT_FOUND", NT_STATUS_MESSAGE_NOT_FOUND},
+	{"NT_STATUS_PROCESS_IS_TERMINATING",
+	 NT_STATUS_PROCESS_IS_TERMINATING},
+	{"NT_STATUS_INVALID_LOGON_TYPE", NT_STATUS_INVALID_LOGON_TYPE},
+	{"NT_STATUS_NO_GUID_TRANSLATION", NT_STATUS_NO_GUID_TRANSLATION},
+	{"NT_STATUS_CANNOT_IMPERSONATE", NT_STATUS_CANNOT_IMPERSONATE},
+	{"NT_STATUS_IMAGE_ALREADY_LOADED", NT_STATUS_IMAGE_ALREADY_LOADED},
+	{"NT_STATUS_ABIOS_NOT_PRESENT", NT_STATUS_ABIOS_NOT_PRESENT},
+	{"NT_STATUS_ABIOS_LID_NOT_EXIST", NT_STATUS_ABIOS_LID_NOT_EXIST},
+	{"NT_STATUS_ABIOS_LID_ALREADY_OWNED",
+	 NT_STATUS_ABIOS_LID_ALREADY_OWNED},
+	{"NT_STATUS_ABIOS_NOT_LID_OWNER", NT_STATUS_ABIOS_NOT_LID_OWNER},
+	{"NT_STATUS_ABIOS_INVALID_COMMAND",
+	 NT_STATUS_ABIOS_INVALID_COMMAND},
+	{"NT_STATUS_ABIOS_INVALID_LID", NT_STATUS_ABIOS_INVALID_LID},
+	{"NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE",
+	 NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE},
+	{"NT_STATUS_ABIOS_INVALID_SELECTOR",
+	 NT_STATUS_ABIOS_INVALID_SELECTOR},
+	{"NT_STATUS_NO_LDT", NT_STATUS_NO_LDT},
+	{"NT_STATUS_INVALID_LDT_SIZE", NT_STATUS_INVALID_LDT_SIZE},
+	{"NT_STATUS_INVALID_LDT_OFFSET", NT_STATUS_INVALID_LDT_OFFSET},
+	{"NT_STATUS_INVALID_LDT_DESCRIPTOR",
+	 NT_STATUS_INVALID_LDT_DESCRIPTOR},
+	{"NT_STATUS_INVALID_IMAGE_NE_FORMAT",
+	 NT_STATUS_INVALID_IMAGE_NE_FORMAT},
+	{"NT_STATUS_RXACT_INVALID_STATE", NT_STATUS_RXACT_INVALID_STATE},
+	{"NT_STATUS_RXACT_COMMIT_FAILURE", NT_STATUS_RXACT_COMMIT_FAILURE},
+	{"NT_STATUS_MAPPED_FILE_SIZE_ZERO",
+	 NT_STATUS_MAPPED_FILE_SIZE_ZERO},
+	{"NT_STATUS_TOO_MANY_OPENED_FILES",
+	 NT_STATUS_TOO_MANY_OPENED_FILES},
+	{"NT_STATUS_CANCELLED", NT_STATUS_CANCELLED},
+	{"NT_STATUS_CANNOT_DELETE", NT_STATUS_CANNOT_DELETE},
+	{"NT_STATUS_INVALID_COMPUTER_NAME",
+	 NT_STATUS_INVALID_COMPUTER_NAME},
+	{"NT_STATUS_FILE_DELETED", NT_STATUS_FILE_DELETED},
+	{"NT_STATUS_SPECIAL_ACCOUNT", NT_STATUS_SPECIAL_ACCOUNT},
+	{"NT_STATUS_SPECIAL_GROUP", NT_STATUS_SPECIAL_GROUP},
+	{"NT_STATUS_SPECIAL_USER", NT_STATUS_SPECIAL_USER},
+	{"NT_STATUS_MEMBERS_PRIMARY_GROUP",
+	 NT_STATUS_MEMBERS_PRIMARY_GROUP},
+	{"NT_STATUS_FILE_CLOSED", NT_STATUS_FILE_CLOSED},
+	{"NT_STATUS_TOO_MANY_THREADS", NT_STATUS_TOO_MANY_THREADS},
+	{"NT_STATUS_THREAD_NOT_IN_PROCESS",
+	 NT_STATUS_THREAD_NOT_IN_PROCESS},
+	{"NT_STATUS_TOKEN_ALREADY_IN_USE", NT_STATUS_TOKEN_ALREADY_IN_USE},
+	{"NT_STATUS_PAGEFILE_QUOTA_EXCEEDED",
+	 NT_STATUS_PAGEFILE_QUOTA_EXCEEDED},
+	{"NT_STATUS_COMMITMENT_LIMIT", NT_STATUS_COMMITMENT_LIMIT},
+	{"NT_STATUS_INVALID_IMAGE_LE_FORMAT",
+	 NT_STATUS_INVALID_IMAGE_LE_FORMAT},
+	{"NT_STATUS_INVALID_IMAGE_NOT_MZ", NT_STATUS_INVALID_IMAGE_NOT_MZ},
+	{"NT_STATUS_INVALID_IMAGE_PROTECT",
+	 NT_STATUS_INVALID_IMAGE_PROTECT},
+	{"NT_STATUS_INVALID_IMAGE_WIN_16", NT_STATUS_INVALID_IMAGE_WIN_16},
+	{"NT_STATUS_LOGON_SERVER_CONFLICT",
+	 NT_STATUS_LOGON_SERVER_CONFLICT},
+	{"NT_STATUS_TIME_DIFFERENCE_AT_DC",
+	 NT_STATUS_TIME_DIFFERENCE_AT_DC},
+	{"NT_STATUS_SYNCHRONIZATION_REQUIRED",
+	 NT_STATUS_SYNCHRONIZATION_REQUIRED},
+	{"NT_STATUS_DLL_NOT_FOUND", NT_STATUS_DLL_NOT_FOUND},
+	{"NT_STATUS_OPEN_FAILED", NT_STATUS_OPEN_FAILED},
+	{"NT_STATUS_IO_PRIVILEGE_FAILED", NT_STATUS_IO_PRIVILEGE_FAILED},
+	{"NT_STATUS_ORDINAL_NOT_FOUND", NT_STATUS_ORDINAL_NOT_FOUND},
+	{"NT_STATUS_ENTRYPOINT_NOT_FOUND", NT_STATUS_ENTRYPOINT_NOT_FOUND},
+	{"NT_STATUS_CONTROL_C_EXIT", NT_STATUS_CONTROL_C_EXIT},
+	{"NT_STATUS_LOCAL_DISCONNECT", NT_STATUS_LOCAL_DISCONNECT},
+	{"NT_STATUS_REMOTE_DISCONNECT", NT_STATUS_REMOTE_DISCONNECT},
+	{"NT_STATUS_REMOTE_RESOURCES", NT_STATUS_REMOTE_RESOURCES},
+	{"NT_STATUS_LINK_FAILED", NT_STATUS_LINK_FAILED},
+	{"NT_STATUS_LINK_TIMEOUT", NT_STATUS_LINK_TIMEOUT},
+	{"NT_STATUS_INVALID_CONNECTION", NT_STATUS_INVALID_CONNECTION},
+	{"NT_STATUS_INVALID_ADDRESS", NT_STATUS_INVALID_ADDRESS},
+	{"NT_STATUS_DLL_INIT_FAILED", NT_STATUS_DLL_INIT_FAILED},
+	{"NT_STATUS_MISSING_SYSTEMFILE", NT_STATUS_MISSING_SYSTEMFILE},
+	{"NT_STATUS_UNHANDLED_EXCEPTION", NT_STATUS_UNHANDLED_EXCEPTION},
+	{"NT_STATUS_APP_INIT_FAILURE", NT_STATUS_APP_INIT_FAILURE},
+	{"NT_STATUS_PAGEFILE_CREATE_FAILED",
+	 NT_STATUS_PAGEFILE_CREATE_FAILED},
+	{"NT_STATUS_NO_PAGEFILE", NT_STATUS_NO_PAGEFILE},
+	{"NT_STATUS_INVALID_LEVEL", NT_STATUS_INVALID_LEVEL},
+	{"NT_STATUS_WRONG_PASSWORD_CORE", NT_STATUS_WRONG_PASSWORD_CORE},
+	{"NT_STATUS_ILLEGAL_FLOAT_CONTEXT",
+	 NT_STATUS_ILLEGAL_FLOAT_CONTEXT},
+	{"NT_STATUS_PIPE_BROKEN", NT_STATUS_PIPE_BROKEN},
+	{"NT_STATUS_REGISTRY_CORRUPT", NT_STATUS_REGISTRY_CORRUPT},
+	{"NT_STATUS_REGISTRY_IO_FAILED", NT_STATUS_REGISTRY_IO_FAILED},
+	{"NT_STATUS_NO_EVENT_PAIR", NT_STATUS_NO_EVENT_PAIR},
+	{"NT_STATUS_UNRECOGNIZED_VOLUME", NT_STATUS_UNRECOGNIZED_VOLUME},
+	{"NT_STATUS_SERIAL_NO_DEVICE_INITED",
+	 NT_STATUS_SERIAL_NO_DEVICE_INITED},
+	{"NT_STATUS_NO_SUCH_ALIAS", NT_STATUS_NO_SUCH_ALIAS},
+	{"NT_STATUS_MEMBER_NOT_IN_ALIAS", NT_STATUS_MEMBER_NOT_IN_ALIAS},
+	{"NT_STATUS_MEMBER_IN_ALIAS", NT_STATUS_MEMBER_IN_ALIAS},
+	{"NT_STATUS_ALIAS_EXISTS", NT_STATUS_ALIAS_EXISTS},
+	{"NT_STATUS_LOGON_NOT_GRANTED", NT_STATUS_LOGON_NOT_GRANTED},
+	{"NT_STATUS_TOO_MANY_SECRETS", NT_STATUS_TOO_MANY_SECRETS},
+	{"NT_STATUS_SECRET_TOO_LONG", NT_STATUS_SECRET_TOO_LONG},
+	{"NT_STATUS_INTERNAL_DB_ERROR", NT_STATUS_INTERNAL_DB_ERROR},
+	{"NT_STATUS_FULLSCREEN_MODE", NT_STATUS_FULLSCREEN_MODE},
+	{"NT_STATUS_TOO_MANY_CONTEXT_IDS", NT_STATUS_TOO_MANY_CONTEXT_IDS},
+	{"NT_STATUS_LOGON_TYPE_NOT_GRANTED",
+	 NT_STATUS_LOGON_TYPE_NOT_GRANTED},
+	{"NT_STATUS_NOT_REGISTRY_FILE", NT_STATUS_NOT_REGISTRY_FILE},
+	{"NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED",
+	 NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED},
+	{"NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR",
+	 NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR},
+	{"NT_STATUS_FT_MISSING_MEMBER", NT_STATUS_FT_MISSING_MEMBER},
+	{"NT_STATUS_ILL_FORMED_SERVICE_ENTRY",
+	 NT_STATUS_ILL_FORMED_SERVICE_ENTRY},
+	{"NT_STATUS_ILLEGAL_CHARACTER", NT_STATUS_ILLEGAL_CHARACTER},
+	{"NT_STATUS_UNMAPPABLE_CHARACTER", NT_STATUS_UNMAPPABLE_CHARACTER},
+	{"NT_STATUS_UNDEFINED_CHARACTER", NT_STATUS_UNDEFINED_CHARACTER},
+	{"NT_STATUS_FLOPPY_VOLUME", NT_STATUS_FLOPPY_VOLUME},
+	{"NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND",
+	 NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND},
+	{"NT_STATUS_FLOPPY_WRONG_CYLINDER",
+	 NT_STATUS_FLOPPY_WRONG_CYLINDER},
+	{"NT_STATUS_FLOPPY_UNKNOWN_ERROR", NT_STATUS_FLOPPY_UNKNOWN_ERROR},
+	{"NT_STATUS_FLOPPY_BAD_REGISTERS", NT_STATUS_FLOPPY_BAD_REGISTERS},
+	{"NT_STATUS_DISK_RECALIBRATE_FAILED",
+	 NT_STATUS_DISK_RECALIBRATE_FAILED},
+	{"NT_STATUS_DISK_OPERATION_FAILED",
+	 NT_STATUS_DISK_OPERATION_FAILED},
+	{"NT_STATUS_DISK_RESET_FAILED", NT_STATUS_DISK_RESET_FAILED},
+	{"NT_STATUS_SHARED_IRQ_BUSY", NT_STATUS_SHARED_IRQ_BUSY},
+	{"NT_STATUS_FT_ORPHANING", NT_STATUS_FT_ORPHANING},
+	{"NT_STATUS_PARTITION_FAILURE", NT_STATUS_PARTITION_FAILURE},
+	{"NT_STATUS_INVALID_BLOCK_LENGTH", NT_STATUS_INVALID_BLOCK_LENGTH},
+	{"NT_STATUS_DEVICE_NOT_PARTITIONED",
+	 NT_STATUS_DEVICE_NOT_PARTITIONED},
+	{"NT_STATUS_UNABLE_TO_LOCK_MEDIA", NT_STATUS_UNABLE_TO_LOCK_MEDIA},
+	{"NT_STATUS_UNABLE_TO_UNLOAD_MEDIA",
+	 NT_STATUS_UNABLE_TO_UNLOAD_MEDIA},
+	{"NT_STATUS_EOM_OVERFLOW", NT_STATUS_EOM_OVERFLOW},
+	{"NT_STATUS_NO_MEDIA", NT_STATUS_NO_MEDIA},
+	{"NT_STATUS_NO_SUCH_MEMBER", NT_STATUS_NO_SUCH_MEMBER},
+	{"NT_STATUS_INVALID_MEMBER", NT_STATUS_INVALID_MEMBER},
+	{"NT_STATUS_KEY_DELETED", NT_STATUS_KEY_DELETED},
+	{"NT_STATUS_NO_LOG_SPACE", NT_STATUS_NO_LOG_SPACE},
+	{"NT_STATUS_TOO_MANY_SIDS", NT_STATUS_TOO_MANY_SIDS},
+	{"NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED",
+	 NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED},
+	{"NT_STATUS_KEY_HAS_CHILDREN", NT_STATUS_KEY_HAS_CHILDREN},
+	{"NT_STATUS_CHILD_MUST_BE_VOLATILE",
+	 NT_STATUS_CHILD_MUST_BE_VOLATILE},
+	{"NT_STATUS_DEVICE_CONFIGURATION_ERROR",
+	 NT_STATUS_DEVICE_CONFIGURATION_ERROR},
+	{"NT_STATUS_DRIVER_INTERNAL_ERROR",
+	 NT_STATUS_DRIVER_INTERNAL_ERROR},
+	{"NT_STATUS_INVALID_DEVICE_STATE", NT_STATUS_INVALID_DEVICE_STATE},
+	{"NT_STATUS_IO_DEVICE_ERROR", NT_STATUS_IO_DEVICE_ERROR},
+	{"NT_STATUS_DEVICE_PROTOCOL_ERROR",
+	 NT_STATUS_DEVICE_PROTOCOL_ERROR},
+	{"NT_STATUS_BACKUP_CONTROLLER", NT_STATUS_BACKUP_CONTROLLER},
+	{"NT_STATUS_LOG_FILE_FULL", NT_STATUS_LOG_FILE_FULL},
+	{"NT_STATUS_TOO_LATE", NT_STATUS_TOO_LATE},
+	{"NT_STATUS_NO_TRUST_LSA_SECRET", NT_STATUS_NO_TRUST_LSA_SECRET},
+	{"NT_STATUS_NO_TRUST_SAM_ACCOUNT", NT_STATUS_NO_TRUST_SAM_ACCOUNT},
+	{"NT_STATUS_TRUSTED_DOMAIN_FAILURE",
+	 NT_STATUS_TRUSTED_DOMAIN_FAILURE},
+	{"NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE",
+	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE},
+	{"NT_STATUS_EVENTLOG_FILE_CORRUPT",
+	 NT_STATUS_EVENTLOG_FILE_CORRUPT},
+	{"NT_STATUS_EVENTLOG_CANT_START", NT_STATUS_EVENTLOG_CANT_START},
+	{"NT_STATUS_TRUST_FAILURE", NT_STATUS_TRUST_FAILURE},
+	{"NT_STATUS_MUTANT_LIMIT_EXCEEDED",
+	 NT_STATUS_MUTANT_LIMIT_EXCEEDED},
+	{"NT_STATUS_NETLOGON_NOT_STARTED", NT_STATUS_NETLOGON_NOT_STARTED},
+	{"NT_STATUS_ACCOUNT_EXPIRED", NT_STATUS_ACCOUNT_EXPIRED},
+	{"NT_STATUS_POSSIBLE_DEADLOCK", NT_STATUS_POSSIBLE_DEADLOCK},
+	{"NT_STATUS_NETWORK_CREDENTIAL_CONFLICT",
+	 NT_STATUS_NETWORK_CREDENTIAL_CONFLICT},
+	{"NT_STATUS_REMOTE_SESSION_LIMIT", NT_STATUS_REMOTE_SESSION_LIMIT},
+	{"NT_STATUS_EVENTLOG_FILE_CHANGED",
+	 NT_STATUS_EVENTLOG_FILE_CHANGED},
+	{"NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT",
+	 NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT},
+	{"NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT",
+	 NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT},
+	{"NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT",
+	 NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
+	{"NT_STATUS_DOMAIN_TRUST_INCONSISTENT",
+	 NT_STATUS_DOMAIN_TRUST_INCONSISTENT},
+	{"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED},
+	{"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY},
+	{"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED},
+	{"NT_STATUS_RESOURCE_LANG_NOT_FOUND",
+	 NT_STATUS_RESOURCE_LANG_NOT_FOUND},
+	{"NT_STATUS_INSUFF_SERVER_RESOURCES",
+	 NT_STATUS_INSUFF_SERVER_RESOURCES},
+	{"NT_STATUS_INVALID_BUFFER_SIZE", NT_STATUS_INVALID_BUFFER_SIZE},
+	{"NT_STATUS_INVALID_ADDRESS_COMPONENT",
+	 NT_STATUS_INVALID_ADDRESS_COMPONENT},
+	{"NT_STATUS_INVALID_ADDRESS_WILDCARD",
+	 NT_STATUS_INVALID_ADDRESS_WILDCARD},
+	{"NT_STATUS_TOO_MANY_ADDRESSES", NT_STATUS_TOO_MANY_ADDRESSES},
+	{"NT_STATUS_ADDRESS_ALREADY_EXISTS",
+	 NT_STATUS_ADDRESS_ALREADY_EXISTS},
+	{"NT_STATUS_ADDRESS_CLOSED", NT_STATUS_ADDRESS_CLOSED},
+	{"NT_STATUS_CONNECTION_DISCONNECTED",
+	 NT_STATUS_CONNECTION_DISCONNECTED},
+	{"NT_STATUS_CONNECTION_RESET", NT_STATUS_CONNECTION_RESET},
+	{"NT_STATUS_TOO_MANY_NODES", NT_STATUS_TOO_MANY_NODES},
+	{"NT_STATUS_TRANSACTION_ABORTED", NT_STATUS_TRANSACTION_ABORTED},
+	{"NT_STATUS_TRANSACTION_TIMED_OUT",
+	 NT_STATUS_TRANSACTION_TIMED_OUT},
+	{"NT_STATUS_TRANSACTION_NO_RELEASE",
+	 NT_STATUS_TRANSACTION_NO_RELEASE},
+	{"NT_STATUS_TRANSACTION_NO_MATCH", NT_STATUS_TRANSACTION_NO_MATCH},
+	{"NT_STATUS_TRANSACTION_RESPONDED",
+	 NT_STATUS_TRANSACTION_RESPONDED},
+	{"NT_STATUS_TRANSACTION_INVALID_ID",
+	 NT_STATUS_TRANSACTION_INVALID_ID},
+	{"NT_STATUS_TRANSACTION_INVALID_TYPE",
+	 NT_STATUS_TRANSACTION_INVALID_TYPE},
+	{"NT_STATUS_NOT_SERVER_SESSION", NT_STATUS_NOT_SERVER_SESSION},
+	{"NT_STATUS_NOT_CLIENT_SESSION", NT_STATUS_NOT_CLIENT_SESSION},
+	{"NT_STATUS_CANNOT_LOAD_REGISTRY_FILE",
+	 NT_STATUS_CANNOT_LOAD_REGISTRY_FILE},
+	{"NT_STATUS_DEBUG_ATTACH_FAILED", NT_STATUS_DEBUG_ATTACH_FAILED},
+	{"NT_STATUS_SYSTEM_PROCESS_TERMINATED",
+	 NT_STATUS_SYSTEM_PROCESS_TERMINATED},
+	{"NT_STATUS_DATA_NOT_ACCEPTED", NT_STATUS_DATA_NOT_ACCEPTED},
+	{"NT_STATUS_NO_BROWSER_SERVERS_FOUND",
+	 NT_STATUS_NO_BROWSER_SERVERS_FOUND},
+	{"NT_STATUS_VDM_HARD_ERROR", NT_STATUS_VDM_HARD_ERROR},
+	{"NT_STATUS_DRIVER_CANCEL_TIMEOUT",
+	 NT_STATUS_DRIVER_CANCEL_TIMEOUT},
+	{"NT_STATUS_REPLY_MESSAGE_MISMATCH",
+	 NT_STATUS_REPLY_MESSAGE_MISMATCH},
+	{"NT_STATUS_MAPPED_ALIGNMENT", NT_STATUS_MAPPED_ALIGNMENT},
+	{"NT_STATUS_IMAGE_CHECKSUM_MISMATCH",
+	 NT_STATUS_IMAGE_CHECKSUM_MISMATCH},
+	{"NT_STATUS_LOST_WRITEBEHIND_DATA",
+	 NT_STATUS_LOST_WRITEBEHIND_DATA},
+	{"NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID",
+	 NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID},
+	{"NT_STATUS_PASSWORD_MUST_CHANGE", NT_STATUS_PASSWORD_MUST_CHANGE},
+	{"NT_STATUS_NOT_FOUND", NT_STATUS_NOT_FOUND},
+	{"NT_STATUS_NOT_TINY_STREAM", NT_STATUS_NOT_TINY_STREAM},
+	{"NT_STATUS_RECOVERY_FAILURE", NT_STATUS_RECOVERY_FAILURE},
+	{"NT_STATUS_STACK_OVERFLOW_READ", NT_STATUS_STACK_OVERFLOW_READ},
+	{"NT_STATUS_FAIL_CHECK", NT_STATUS_FAIL_CHECK},
+	{"NT_STATUS_DUPLICATE_OBJECTID", NT_STATUS_DUPLICATE_OBJECTID},
+	{"NT_STATUS_OBJECTID_EXISTS", NT_STATUS_OBJECTID_EXISTS},
+	{"NT_STATUS_CONVERT_TO_LARGE", NT_STATUS_CONVERT_TO_LARGE},
+	{"NT_STATUS_RETRY", NT_STATUS_RETRY},
+	{"NT_STATUS_FOUND_OUT_OF_SCOPE", NT_STATUS_FOUND_OUT_OF_SCOPE},
+	{"NT_STATUS_ALLOCATE_BUCKET", NT_STATUS_ALLOCATE_BUCKET},
+	{"NT_STATUS_PROPSET_NOT_FOUND", NT_STATUS_PROPSET_NOT_FOUND},
+	{"NT_STATUS_MARSHALL_OVERFLOW", NT_STATUS_MARSHALL_OVERFLOW},
+	{"NT_STATUS_INVALID_VARIANT", NT_STATUS_INVALID_VARIANT},
+	{"NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND",
+	 NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND},
+	{"NT_STATUS_ACCOUNT_LOCKED_OUT", NT_STATUS_ACCOUNT_LOCKED_OUT},
+	{"NT_STATUS_HANDLE_NOT_CLOSABLE", NT_STATUS_HANDLE_NOT_CLOSABLE},
+	{"NT_STATUS_CONNECTION_REFUSED", NT_STATUS_CONNECTION_REFUSED},
+	{"NT_STATUS_GRACEFUL_DISCONNECT", NT_STATUS_GRACEFUL_DISCONNECT},
+	{"NT_STATUS_ADDRESS_ALREADY_ASSOCIATED",
+	 NT_STATUS_ADDRESS_ALREADY_ASSOCIATED},
+	{"NT_STATUS_ADDRESS_NOT_ASSOCIATED",
+	 NT_STATUS_ADDRESS_NOT_ASSOCIATED},
+	{"NT_STATUS_CONNECTION_INVALID", NT_STATUS_CONNECTION_INVALID},
+	{"NT_STATUS_CONNECTION_ACTIVE", NT_STATUS_CONNECTION_ACTIVE},
+	{"NT_STATUS_NETWORK_UNREACHABLE", NT_STATUS_NETWORK_UNREACHABLE},
+	{"NT_STATUS_HOST_UNREACHABLE", NT_STATUS_HOST_UNREACHABLE},
+	{"NT_STATUS_PROTOCOL_UNREACHABLE", NT_STATUS_PROTOCOL_UNREACHABLE},
+	{"NT_STATUS_PORT_UNREACHABLE", NT_STATUS_PORT_UNREACHABLE},
+	{"NT_STATUS_REQUEST_ABORTED", NT_STATUS_REQUEST_ABORTED},
+	{"NT_STATUS_CONNECTION_ABORTED", NT_STATUS_CONNECTION_ABORTED},
+	{"NT_STATUS_BAD_COMPRESSION_BUFFER",
+	 NT_STATUS_BAD_COMPRESSION_BUFFER},
+	{"NT_STATUS_USER_MAPPED_FILE", NT_STATUS_USER_MAPPED_FILE},
+	{"NT_STATUS_AUDIT_FAILED", NT_STATUS_AUDIT_FAILED},
+	{"NT_STATUS_TIMER_RESOLUTION_NOT_SET",
+	 NT_STATUS_TIMER_RESOLUTION_NOT_SET},
+	{"NT_STATUS_CONNECTION_COUNT_LIMIT",
+	 NT_STATUS_CONNECTION_COUNT_LIMIT},
+	{"NT_STATUS_LOGIN_TIME_RESTRICTION",
+	 NT_STATUS_LOGIN_TIME_RESTRICTION},
+	{"NT_STATUS_LOGIN_WKSTA_RESTRICTION",
+	 NT_STATUS_LOGIN_WKSTA_RESTRICTION},
+	{"NT_STATUS_IMAGE_MP_UP_MISMATCH", NT_STATUS_IMAGE_MP_UP_MISMATCH},
+	{"NT_STATUS_INSUFFICIENT_LOGON_INFO",
+	 NT_STATUS_INSUFFICIENT_LOGON_INFO},
+	{"NT_STATUS_BAD_DLL_ENTRYPOINT", NT_STATUS_BAD_DLL_ENTRYPOINT},
+	{"NT_STATUS_BAD_SERVICE_ENTRYPOINT",
+	 NT_STATUS_BAD_SERVICE_ENTRYPOINT},
+	{"NT_STATUS_LPC_REPLY_LOST", NT_STATUS_LPC_REPLY_LOST},
+	{"NT_STATUS_IP_ADDRESS_CONFLICT1", NT_STATUS_IP_ADDRESS_CONFLICT1},
+	{"NT_STATUS_IP_ADDRESS_CONFLICT2", NT_STATUS_IP_ADDRESS_CONFLICT2},
+	{"NT_STATUS_REGISTRY_QUOTA_LIMIT", NT_STATUS_REGISTRY_QUOTA_LIMIT},
+	{"NT_STATUS_PATH_NOT_COVERED", NT_STATUS_PATH_NOT_COVERED},
+	{"NT_STATUS_NO_CALLBACK_ACTIVE", NT_STATUS_NO_CALLBACK_ACTIVE},
+	{"NT_STATUS_LICENSE_QUOTA_EXCEEDED",
+	 NT_STATUS_LICENSE_QUOTA_EXCEEDED},
+	{"NT_STATUS_PWD_TOO_SHORT", NT_STATUS_PWD_TOO_SHORT},
+	{"NT_STATUS_PWD_TOO_RECENT", NT_STATUS_PWD_TOO_RECENT},
+	{"NT_STATUS_PWD_HISTORY_CONFLICT", NT_STATUS_PWD_HISTORY_CONFLICT},
+	{"NT_STATUS_PLUGPLAY_NO_DEVICE", NT_STATUS_PLUGPLAY_NO_DEVICE},
+	{"NT_STATUS_UNSUPPORTED_COMPRESSION",
+	 NT_STATUS_UNSUPPORTED_COMPRESSION},
+	{"NT_STATUS_INVALID_HW_PROFILE", NT_STATUS_INVALID_HW_PROFILE},
+	{"NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH",
+	 NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH},
+	{"NT_STATUS_DRIVER_ORDINAL_NOT_FOUND",
+	 NT_STATUS_DRIVER_ORDINAL_NOT_FOUND},
+	{"NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND",
+	 NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND},
+	{"NT_STATUS_RESOURCE_NOT_OWNED", NT_STATUS_RESOURCE_NOT_OWNED},
+	{"NT_STATUS_TOO_MANY_LINKS", NT_STATUS_TOO_MANY_LINKS},
+	{"NT_STATUS_QUOTA_LIST_INCONSISTENT",
+	 NT_STATUS_QUOTA_LIST_INCONSISTENT},
+	{"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE},
+	{"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES},
+	{"STATUS_MORE_ENTRIES", STATUS_MORE_ENTRIES},
+	{"STATUS_SOME_UNMAPPED", STATUS_SOME_UNMAPPED},
+	{NULL, 0}
+};
diff --git a/fs/cifs/nterr.h b/fs/cifs/nterr.h
new file mode 100644
index 0000000..d2fb06c
--- /dev/null
+++ b/fs/cifs/nterr.h
@@ -0,0 +1,556 @@
+/* 
+   Unix SMB/Netbios implementation.
+   Version 1.9.
+   NT error code constants
+   Copyright (C) Andrew Tridgell              1992-2000
+   Copyright (C) John H Terpstra              1996-2000
+   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+   Copyright (C) Paul Ashton                  1998-2000
+   
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+   
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+   
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+
+
+#ifndef _NTERR_H
+#define _NTERR_H
+
+struct nt_err_code_struct {
+	char *nt_errstr;
+	__u32 nt_errcode;
+};
+
+extern const struct nt_err_code_struct nt_errs[];
+
+/* Win32 Status codes. */
+
+#define STATUS_BUFFER_OVERFLOW            0x80000005
+#define STATUS_MORE_ENTRIES               0x0105
+#define ERROR_INVALID_PARAMETER		  0x0057
+#define ERROR_INSUFFICIENT_BUFFER	  0x007a
+#define STATUS_1804	                  0x070c
+#define STATUS_NOTIFY_ENUM_DIR            0x010c
+
+/* Win32 Error codes extracted using a loop in smbclient then printing a
+   netmon sniff to a file. */
+
+#define NT_STATUS_OK 0x0000
+#define STATUS_SOME_UNMAPPED       0x0107
+#define STATUS_BUFFER_OVERFLOW     0x80000005
+#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
+#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001
+#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002
+#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003
+#define NT_STATUS_INFO_LENGTH_MISMATCH 0xC0000000 | 0x0004
+#define NT_STATUS_ACCESS_VIOLATION 0xC0000000 | 0x0005
+#define NT_STATUS_IN_PAGE_ERROR 0xC0000000 | 0x0006
+#define NT_STATUS_PAGEFILE_QUOTA 0xC0000000 | 0x0007
+#define NT_STATUS_INVALID_HANDLE 0xC0000000 | 0x0008
+#define NT_STATUS_BAD_INITIAL_STACK 0xC0000000 | 0x0009
+#define NT_STATUS_BAD_INITIAL_PC 0xC0000000 | 0x000a
+#define NT_STATUS_INVALID_CID 0xC0000000 | 0x000b
+#define NT_STATUS_TIMER_NOT_CANCELED 0xC0000000 | 0x000c
+#define NT_STATUS_INVALID_PARAMETER 0xC0000000 | 0x000d
+#define NT_STATUS_NO_SUCH_DEVICE 0xC0000000 | 0x000e
+#define NT_STATUS_NO_SUCH_FILE 0xC0000000 | 0x000f
+#define NT_STATUS_INVALID_DEVICE_REQUEST 0xC0000000 | 0x0010
+#define NT_STATUS_END_OF_FILE 0xC0000000 | 0x0011
+#define NT_STATUS_WRONG_VOLUME 0xC0000000 | 0x0012
+#define NT_STATUS_NO_MEDIA_IN_DEVICE 0xC0000000 | 0x0013
+#define NT_STATUS_UNRECOGNIZED_MEDIA 0xC0000000 | 0x0014
+#define NT_STATUS_NONEXISTENT_SECTOR 0xC0000000 | 0x0015
+#define NT_STATUS_MORE_PROCESSING_REQUIRED 0xC0000000 | 0x0016
+#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017
+#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018
+#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019
+#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a
+#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b
+#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c
+#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d
+#define NT_STATUS_INVALID_LOCK_SEQUENCE 0xC0000000 | 0x001e
+#define NT_STATUS_INVALID_VIEW_SIZE 0xC0000000 | 0x001f
+#define NT_STATUS_INVALID_FILE_FOR_SECTION 0xC0000000 | 0x0020
+#define NT_STATUS_ALREADY_COMMITTED 0xC0000000 | 0x0021
+#define NT_STATUS_ACCESS_DENIED 0xC0000000 | 0x0022
+#define NT_STATUS_BUFFER_TOO_SMALL 0xC0000000 | 0x0023
+#define NT_STATUS_OBJECT_TYPE_MISMATCH 0xC0000000 | 0x0024
+#define NT_STATUS_NONCONTINUABLE_EXCEPTION 0xC0000000 | 0x0025
+#define NT_STATUS_INVALID_DISPOSITION 0xC0000000 | 0x0026
+#define NT_STATUS_UNWIND 0xC0000000 | 0x0027
+#define NT_STATUS_BAD_STACK 0xC0000000 | 0x0028
+#define NT_STATUS_INVALID_UNWIND_TARGET 0xC0000000 | 0x0029
+#define NT_STATUS_NOT_LOCKED 0xC0000000 | 0x002a
+#define NT_STATUS_PARITY_ERROR 0xC0000000 | 0x002b
+#define NT_STATUS_UNABLE_TO_DECOMMIT_VM 0xC0000000 | 0x002c
+#define NT_STATUS_NOT_COMMITTED 0xC0000000 | 0x002d
+#define NT_STATUS_INVALID_PORT_ATTRIBUTES 0xC0000000 | 0x002e
+#define NT_STATUS_PORT_MESSAGE_TOO_LONG 0xC0000000 | 0x002f
+#define NT_STATUS_INVALID_PARAMETER_MIX 0xC0000000 | 0x0030
+#define NT_STATUS_INVALID_QUOTA_LOWER 0xC0000000 | 0x0031
+#define NT_STATUS_DISK_CORRUPT_ERROR 0xC0000000 | 0x0032
+#define NT_STATUS_OBJECT_NAME_INVALID 0xC0000000 | 0x0033
+#define NT_STATUS_OBJECT_NAME_NOT_FOUND 0xC0000000 | 0x0034
+#define NT_STATUS_OBJECT_NAME_COLLISION 0xC0000000 | 0x0035
+#define NT_STATUS_HANDLE_NOT_WAITABLE 0xC0000000 | 0x0036
+#define NT_STATUS_PORT_DISCONNECTED 0xC0000000 | 0x0037
+#define NT_STATUS_DEVICE_ALREADY_ATTACHED 0xC0000000 | 0x0038
+#define NT_STATUS_OBJECT_PATH_INVALID 0xC0000000 | 0x0039
+#define NT_STATUS_OBJECT_PATH_NOT_FOUND 0xC0000000 | 0x003a
+#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD 0xC0000000 | 0x003b
+#define NT_STATUS_DATA_OVERRUN 0xC0000000 | 0x003c
+#define NT_STATUS_DATA_LATE_ERROR 0xC0000000 | 0x003d
+#define NT_STATUS_DATA_ERROR 0xC0000000 | 0x003e
+#define NT_STATUS_CRC_ERROR 0xC0000000 | 0x003f
+#define NT_STATUS_SECTION_TOO_BIG 0xC0000000 | 0x0040
+#define NT_STATUS_PORT_CONNECTION_REFUSED 0xC0000000 | 0x0041
+#define NT_STATUS_INVALID_PORT_HANDLE 0xC0000000 | 0x0042
+#define NT_STATUS_SHARING_VIOLATION 0xC0000000 | 0x0043
+#define NT_STATUS_QUOTA_EXCEEDED 0xC0000000 | 0x0044
+#define NT_STATUS_INVALID_PAGE_PROTECTION 0xC0000000 | 0x0045
+#define NT_STATUS_MUTANT_NOT_OWNED 0xC0000000 | 0x0046
+#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED 0xC0000000 | 0x0047
+#define NT_STATUS_PORT_ALREADY_SET 0xC0000000 | 0x0048
+#define NT_STATUS_SECTION_NOT_IMAGE 0xC0000000 | 0x0049
+#define NT_STATUS_SUSPEND_COUNT_EXCEEDED 0xC0000000 | 0x004a
+#define NT_STATUS_THREAD_IS_TERMINATING 0xC0000000 | 0x004b
+#define NT_STATUS_BAD_WORKING_SET_LIMIT 0xC0000000 | 0x004c
+#define NT_STATUS_INCOMPATIBLE_FILE_MAP 0xC0000000 | 0x004d
+#define NT_STATUS_SECTION_PROTECTION 0xC0000000 | 0x004e
+#define NT_STATUS_EAS_NOT_SUPPORTED 0xC0000000 | 0x004f
+#define NT_STATUS_EA_TOO_LARGE 0xC0000000 | 0x0050
+#define NT_STATUS_NONEXISTENT_EA_ENTRY 0xC0000000 | 0x0051
+#define NT_STATUS_NO_EAS_ON_FILE 0xC0000000 | 0x0052
+#define NT_STATUS_EA_CORRUPT_ERROR 0xC0000000 | 0x0053
+#define NT_STATUS_FILE_LOCK_CONFLICT 0xC0000000 | 0x0054
+#define NT_STATUS_LOCK_NOT_GRANTED 0xC0000000 | 0x0055
+#define NT_STATUS_DELETE_PENDING 0xC0000000 | 0x0056
+#define NT_STATUS_CTL_FILE_NOT_SUPPORTED 0xC0000000 | 0x0057
+#define NT_STATUS_UNKNOWN_REVISION 0xC0000000 | 0x0058
+#define NT_STATUS_REVISION_MISMATCH 0xC0000000 | 0x0059
+#define NT_STATUS_INVALID_OWNER 0xC0000000 | 0x005a
+#define NT_STATUS_INVALID_PRIMARY_GROUP 0xC0000000 | 0x005b
+#define NT_STATUS_NO_IMPERSONATION_TOKEN 0xC0000000 | 0x005c
+#define NT_STATUS_CANT_DISABLE_MANDATORY 0xC0000000 | 0x005d
+#define NT_STATUS_NO_LOGON_SERVERS 0xC0000000 | 0x005e
+#define NT_STATUS_NO_SUCH_LOGON_SESSION 0xC0000000 | 0x005f
+#define NT_STATUS_NO_SUCH_PRIVILEGE 0xC0000000 | 0x0060
+#define NT_STATUS_PRIVILEGE_NOT_HELD 0xC0000000 | 0x0061
+#define NT_STATUS_INVALID_ACCOUNT_NAME 0xC0000000 | 0x0062
+#define NT_STATUS_USER_EXISTS 0xC0000000 | 0x0063
+#define NT_STATUS_NO_SUCH_USER 0xC0000000 | 0x0064
+#define NT_STATUS_GROUP_EXISTS 0xC0000000 | 0x0065
+#define NT_STATUS_NO_SUCH_GROUP 0xC0000000 | 0x0066
+#define NT_STATUS_MEMBER_IN_GROUP 0xC0000000 | 0x0067
+#define NT_STATUS_MEMBER_NOT_IN_GROUP 0xC0000000 | 0x0068
+#define NT_STATUS_LAST_ADMIN 0xC0000000 | 0x0069
+#define NT_STATUS_WRONG_PASSWORD 0xC0000000 | 0x006a
+#define NT_STATUS_ILL_FORMED_PASSWORD 0xC0000000 | 0x006b
+#define NT_STATUS_PASSWORD_RESTRICTION 0xC0000000 | 0x006c
+#define NT_STATUS_LOGON_FAILURE 0xC0000000 | 0x006d
+#define NT_STATUS_ACCOUNT_RESTRICTION 0xC0000000 | 0x006e
+#define NT_STATUS_INVALID_LOGON_HOURS 0xC0000000 | 0x006f
+#define NT_STATUS_INVALID_WORKSTATION 0xC0000000 | 0x0070
+#define NT_STATUS_PASSWORD_EXPIRED 0xC0000000 | 0x0071
+#define NT_STATUS_ACCOUNT_DISABLED 0xC0000000 | 0x0072
+#define NT_STATUS_NONE_MAPPED 0xC0000000 | 0x0073
+#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED 0xC0000000 | 0x0074
+#define NT_STATUS_LUIDS_EXHAUSTED 0xC0000000 | 0x0075
+#define NT_STATUS_INVALID_SUB_AUTHORITY 0xC0000000 | 0x0076
+#define NT_STATUS_INVALID_ACL 0xC0000000 | 0x0077
+#define NT_STATUS_INVALID_SID 0xC0000000 | 0x0078
+#define NT_STATUS_INVALID_SECURITY_DESCR 0xC0000000 | 0x0079
+#define NT_STATUS_PROCEDURE_NOT_FOUND 0xC0000000 | 0x007a
+#define NT_STATUS_INVALID_IMAGE_FORMAT 0xC0000000 | 0x007b
+#define NT_STATUS_NO_TOKEN 0xC0000000 | 0x007c
+#define NT_STATUS_BAD_INHERITANCE_ACL 0xC0000000 | 0x007d
+#define NT_STATUS_RANGE_NOT_LOCKED 0xC0000000 | 0x007e
+#define NT_STATUS_DISK_FULL 0xC0000000 | 0x007f
+#define NT_STATUS_SERVER_DISABLED 0xC0000000 | 0x0080
+#define NT_STATUS_SERVER_NOT_DISABLED 0xC0000000 | 0x0081
+#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED 0xC0000000 | 0x0082
+#define NT_STATUS_GUIDS_EXHAUSTED 0xC0000000 | 0x0083
+#define NT_STATUS_INVALID_ID_AUTHORITY 0xC0000000 | 0x0084
+#define NT_STATUS_AGENTS_EXHAUSTED 0xC0000000 | 0x0085
+#define NT_STATUS_INVALID_VOLUME_LABEL 0xC0000000 | 0x0086
+#define NT_STATUS_SECTION_NOT_EXTENDED 0xC0000000 | 0x0087
+#define NT_STATUS_NOT_MAPPED_DATA 0xC0000000 | 0x0088
+#define NT_STATUS_RESOURCE_DATA_NOT_FOUND 0xC0000000 | 0x0089
+#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND 0xC0000000 | 0x008a
+#define NT_STATUS_RESOURCE_NAME_NOT_FOUND 0xC0000000 | 0x008b
+#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED 0xC0000000 | 0x008c
+#define NT_STATUS_FLOAT_DENORMAL_OPERAND 0xC0000000 | 0x008d
+#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO 0xC0000000 | 0x008e
+#define NT_STATUS_FLOAT_INEXACT_RESULT 0xC0000000 | 0x008f
+#define NT_STATUS_FLOAT_INVALID_OPERATION 0xC0000000 | 0x0090
+#define NT_STATUS_FLOAT_OVERFLOW 0xC0000000 | 0x0091
+#define NT_STATUS_FLOAT_STACK_CHECK 0xC0000000 | 0x0092
+#define NT_STATUS_FLOAT_UNDERFLOW 0xC0000000 | 0x0093
+#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO 0xC0000000 | 0x0094
+#define NT_STATUS_INTEGER_OVERFLOW 0xC0000000 | 0x0095
+#define NT_STATUS_PRIVILEGED_INSTRUCTION 0xC0000000 | 0x0096
+#define NT_STATUS_TOO_MANY_PAGING_FILES 0xC0000000 | 0x0097
+#define NT_STATUS_FILE_INVALID 0xC0000000 | 0x0098
+#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED 0xC0000000 | 0x0099
+#define NT_STATUS_INSUFFICIENT_RESOURCES 0xC0000000 | 0x009a
+#define NT_STATUS_DFS_EXIT_PATH_FOUND 0xC0000000 | 0x009b
+#define NT_STATUS_DEVICE_DATA_ERROR 0xC0000000 | 0x009c
+#define NT_STATUS_DEVICE_NOT_CONNECTED 0xC0000000 | 0x009d
+#define NT_STATUS_DEVICE_POWER_FAILURE 0xC0000000 | 0x009e
+#define NT_STATUS_FREE_VM_NOT_AT_BASE 0xC0000000 | 0x009f
+#define NT_STATUS_MEMORY_NOT_ALLOCATED 0xC0000000 | 0x00a0
+#define NT_STATUS_WORKING_SET_QUOTA 0xC0000000 | 0x00a1
+#define NT_STATUS_MEDIA_WRITE_PROTECTED 0xC0000000 | 0x00a2
+#define NT_STATUS_DEVICE_NOT_READY 0xC0000000 | 0x00a3
+#define NT_STATUS_INVALID_GROUP_ATTRIBUTES 0xC0000000 | 0x00a4
+#define NT_STATUS_BAD_IMPERSONATION_LEVEL 0xC0000000 | 0x00a5
+#define NT_STATUS_CANT_OPEN_ANONYMOUS 0xC0000000 | 0x00a6
+#define NT_STATUS_BAD_VALIDATION_CLASS 0xC0000000 | 0x00a7
+#define NT_STATUS_BAD_TOKEN_TYPE 0xC0000000 | 0x00a8
+#define NT_STATUS_BAD_MASTER_BOOT_RECORD 0xC0000000 | 0x00a9
+#define NT_STATUS_INSTRUCTION_MISALIGNMENT 0xC0000000 | 0x00aa
+#define NT_STATUS_INSTANCE_NOT_AVAILABLE 0xC0000000 | 0x00ab
+#define NT_STATUS_PIPE_NOT_AVAILABLE 0xC0000000 | 0x00ac
+#define NT_STATUS_INVALID_PIPE_STATE 0xC0000000 | 0x00ad
+#define NT_STATUS_PIPE_BUSY 0xC0000000 | 0x00ae
+#define NT_STATUS_ILLEGAL_FUNCTION 0xC0000000 | 0x00af
+#define NT_STATUS_PIPE_DISCONNECTED 0xC0000000 | 0x00b0
+#define NT_STATUS_PIPE_CLOSING 0xC0000000 | 0x00b1
+#define NT_STATUS_PIPE_CONNECTED 0xC0000000 | 0x00b2
+#define NT_STATUS_PIPE_LISTENING 0xC0000000 | 0x00b3
+#define NT_STATUS_INVALID_READ_MODE 0xC0000000 | 0x00b4
+#define NT_STATUS_IO_TIMEOUT 0xC0000000 | 0x00b5
+#define NT_STATUS_FILE_FORCED_CLOSED 0xC0000000 | 0x00b6
+#define NT_STATUS_PROFILING_NOT_STARTED 0xC0000000 | 0x00b7
+#define NT_STATUS_PROFILING_NOT_STOPPED 0xC0000000 | 0x00b8
+#define NT_STATUS_COULD_NOT_INTERPRET 0xC0000000 | 0x00b9
+#define NT_STATUS_FILE_IS_A_DIRECTORY 0xC0000000 | 0x00ba
+#define NT_STATUS_NOT_SUPPORTED 0xC0000000 | 0x00bb
+#define NT_STATUS_REMOTE_NOT_LISTENING 0xC0000000 | 0x00bc
+#define NT_STATUS_DUPLICATE_NAME 0xC0000000 | 0x00bd
+#define NT_STATUS_BAD_NETWORK_PATH 0xC0000000 | 0x00be
+#define NT_STATUS_NETWORK_BUSY 0xC0000000 | 0x00bf
+#define NT_STATUS_DEVICE_DOES_NOT_EXIST 0xC0000000 | 0x00c0
+#define NT_STATUS_TOO_MANY_COMMANDS 0xC0000000 | 0x00c1
+#define NT_STATUS_ADAPTER_HARDWARE_ERROR 0xC0000000 | 0x00c2
+#define NT_STATUS_INVALID_NETWORK_RESPONSE 0xC0000000 | 0x00c3
+#define NT_STATUS_UNEXPECTED_NETWORK_ERROR 0xC0000000 | 0x00c4
+#define NT_STATUS_BAD_REMOTE_ADAPTER 0xC0000000 | 0x00c5
+#define NT_STATUS_PRINT_QUEUE_FULL 0xC0000000 | 0x00c6
+#define NT_STATUS_NO_SPOOL_SPACE 0xC0000000 | 0x00c7
+#define NT_STATUS_PRINT_CANCELLED 0xC0000000 | 0x00c8
+#define NT_STATUS_NETWORK_NAME_DELETED 0xC0000000 | 0x00c9
+#define NT_STATUS_NETWORK_ACCESS_DENIED 0xC0000000 | 0x00ca
+#define NT_STATUS_BAD_DEVICE_TYPE 0xC0000000 | 0x00cb
+#define NT_STATUS_BAD_NETWORK_NAME 0xC0000000 | 0x00cc
+#define NT_STATUS_TOO_MANY_NAMES 0xC0000000 | 0x00cd
+#define NT_STATUS_TOO_MANY_SESSIONS 0xC0000000 | 0x00ce
+#define NT_STATUS_SHARING_PAUSED 0xC0000000 | 0x00cf
+#define NT_STATUS_REQUEST_NOT_ACCEPTED 0xC0000000 | 0x00d0
+#define NT_STATUS_REDIRECTOR_PAUSED 0xC0000000 | 0x00d1
+#define NT_STATUS_NET_WRITE_FAULT 0xC0000000 | 0x00d2
+#define NT_STATUS_PROFILING_AT_LIMIT 0xC0000000 | 0x00d3
+#define NT_STATUS_NOT_SAME_DEVICE 0xC0000000 | 0x00d4
+#define NT_STATUS_FILE_RENAMED 0xC0000000 | 0x00d5
+#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED 0xC0000000 | 0x00d6
+#define NT_STATUS_NO_SECURITY_ON_OBJECT 0xC0000000 | 0x00d7
+#define NT_STATUS_CANT_WAIT 0xC0000000 | 0x00d8
+#define NT_STATUS_PIPE_EMPTY 0xC0000000 | 0x00d9
+#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO 0xC0000000 | 0x00da
+#define NT_STATUS_CANT_TERMINATE_SELF 0xC0000000 | 0x00db
+#define NT_STATUS_INVALID_SERVER_STATE 0xC0000000 | 0x00dc
+#define NT_STATUS_INVALID_DOMAIN_STATE 0xC0000000 | 0x00dd
+#define NT_STATUS_INVALID_DOMAIN_ROLE 0xC0000000 | 0x00de
+#define NT_STATUS_NO_SUCH_DOMAIN 0xC0000000 | 0x00df
+#define NT_STATUS_DOMAIN_EXISTS 0xC0000000 | 0x00e0
+#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED 0xC0000000 | 0x00e1
+#define NT_STATUS_OPLOCK_NOT_GRANTED 0xC0000000 | 0x00e2
+#define NT_STATUS_INVALID_OPLOCK_PROTOCOL 0xC0000000 | 0x00e3
+#define NT_STATUS_INTERNAL_DB_CORRUPTION 0xC0000000 | 0x00e4
+#define NT_STATUS_INTERNAL_ERROR 0xC0000000 | 0x00e5
+#define NT_STATUS_GENERIC_NOT_MAPPED 0xC0000000 | 0x00e6
+#define NT_STATUS_BAD_DESCRIPTOR_FORMAT 0xC0000000 | 0x00e7
+#define NT_STATUS_INVALID_USER_BUFFER 0xC0000000 | 0x00e8
+#define NT_STATUS_UNEXPECTED_IO_ERROR 0xC0000000 | 0x00e9
+#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR 0xC0000000 | 0x00ea
+#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR 0xC0000000 | 0x00eb
+#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR 0xC0000000 | 0x00ec
+#define NT_STATUS_NOT_LOGON_PROCESS 0xC0000000 | 0x00ed
+#define NT_STATUS_LOGON_SESSION_EXISTS 0xC0000000 | 0x00ee
+#define NT_STATUS_INVALID_PARAMETER_1 0xC0000000 | 0x00ef
+#define NT_STATUS_INVALID_PARAMETER_2 0xC0000000 | 0x00f0
+#define NT_STATUS_INVALID_PARAMETER_3 0xC0000000 | 0x00f1
+#define NT_STATUS_INVALID_PARAMETER_4 0xC0000000 | 0x00f2
+#define NT_STATUS_INVALID_PARAMETER_5 0xC0000000 | 0x00f3
+#define NT_STATUS_INVALID_PARAMETER_6 0xC0000000 | 0x00f4
+#define NT_STATUS_INVALID_PARAMETER_7 0xC0000000 | 0x00f5
+#define NT_STATUS_INVALID_PARAMETER_8 0xC0000000 | 0x00f6
+#define NT_STATUS_INVALID_PARAMETER_9 0xC0000000 | 0x00f7
+#define NT_STATUS_INVALID_PARAMETER_10 0xC0000000 | 0x00f8
+#define NT_STATUS_INVALID_PARAMETER_11 0xC0000000 | 0x00f9
+#define NT_STATUS_INVALID_PARAMETER_12 0xC0000000 | 0x00fa
+#define NT_STATUS_REDIRECTOR_NOT_STARTED 0xC0000000 | 0x00fb
+#define NT_STATUS_REDIRECTOR_STARTED 0xC0000000 | 0x00fc
+#define NT_STATUS_STACK_OVERFLOW 0xC0000000 | 0x00fd
+#define NT_STATUS_NO_SUCH_PACKAGE 0xC0000000 | 0x00fe
+#define NT_STATUS_BAD_FUNCTION_TABLE 0xC0000000 | 0x00ff
+#define NT_STATUS_DIRECTORY_NOT_EMPTY 0xC0000000 | 0x0101
+#define NT_STATUS_FILE_CORRUPT_ERROR 0xC0000000 | 0x0102
+#define NT_STATUS_NOT_A_DIRECTORY 0xC0000000 | 0x0103
+#define NT_STATUS_BAD_LOGON_SESSION_STATE 0xC0000000 | 0x0104
+#define NT_STATUS_LOGON_SESSION_COLLISION 0xC0000000 | 0x0105
+#define NT_STATUS_NAME_TOO_LONG 0xC0000000 | 0x0106
+#define NT_STATUS_FILES_OPEN 0xC0000000 | 0x0107
+#define NT_STATUS_CONNECTION_IN_USE 0xC0000000 | 0x0108
+#define NT_STATUS_MESSAGE_NOT_FOUND 0xC0000000 | 0x0109
+#define NT_STATUS_PROCESS_IS_TERMINATING 0xC0000000 | 0x010a
+#define NT_STATUS_INVALID_LOGON_TYPE 0xC0000000 | 0x010b
+#define NT_STATUS_NO_GUID_TRANSLATION 0xC0000000 | 0x010c
+#define NT_STATUS_CANNOT_IMPERSONATE 0xC0000000 | 0x010d
+#define NT_STATUS_IMAGE_ALREADY_LOADED 0xC0000000 | 0x010e
+#define NT_STATUS_ABIOS_NOT_PRESENT 0xC0000000 | 0x010f
+#define NT_STATUS_ABIOS_LID_NOT_EXIST 0xC0000000 | 0x0110
+#define NT_STATUS_ABIOS_LID_ALREADY_OWNED 0xC0000000 | 0x0111
+#define NT_STATUS_ABIOS_NOT_LID_OWNER 0xC0000000 | 0x0112
+#define NT_STATUS_ABIOS_INVALID_COMMAND 0xC0000000 | 0x0113
+#define NT_STATUS_ABIOS_INVALID_LID 0xC0000000 | 0x0114
+#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE 0xC0000000 | 0x0115
+#define NT_STATUS_ABIOS_INVALID_SELECTOR 0xC0000000 | 0x0116
+#define NT_STATUS_NO_LDT 0xC0000000 | 0x0117
+#define NT_STATUS_INVALID_LDT_SIZE 0xC0000000 | 0x0118
+#define NT_STATUS_INVALID_LDT_OFFSET 0xC0000000 | 0x0119
+#define NT_STATUS_INVALID_LDT_DESCRIPTOR 0xC0000000 | 0x011a
+#define NT_STATUS_INVALID_IMAGE_NE_FORMAT 0xC0000000 | 0x011b
+#define NT_STATUS_RXACT_INVALID_STATE 0xC0000000 | 0x011c
+#define NT_STATUS_RXACT_COMMIT_FAILURE 0xC0000000 | 0x011d
+#define NT_STATUS_MAPPED_FILE_SIZE_ZERO 0xC0000000 | 0x011e
+#define NT_STATUS_TOO_MANY_OPENED_FILES 0xC0000000 | 0x011f
+#define NT_STATUS_CANCELLED 0xC0000000 | 0x0120
+#define NT_STATUS_CANNOT_DELETE 0xC0000000 | 0x0121
+#define NT_STATUS_INVALID_COMPUTER_NAME 0xC0000000 | 0x0122
+#define NT_STATUS_FILE_DELETED 0xC0000000 | 0x0123
+#define NT_STATUS_SPECIAL_ACCOUNT 0xC0000000 | 0x0124
+#define NT_STATUS_SPECIAL_GROUP 0xC0000000 | 0x0125
+#define NT_STATUS_SPECIAL_USER 0xC0000000 | 0x0126
+#define NT_STATUS_MEMBERS_PRIMARY_GROUP 0xC0000000 | 0x0127
+#define NT_STATUS_FILE_CLOSED 0xC0000000 | 0x0128
+#define NT_STATUS_TOO_MANY_THREADS 0xC0000000 | 0x0129
+#define NT_STATUS_THREAD_NOT_IN_PROCESS 0xC0000000 | 0x012a
+#define NT_STATUS_TOKEN_ALREADY_IN_USE 0xC0000000 | 0x012b
+#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED 0xC0000000 | 0x012c
+#define NT_STATUS_COMMITMENT_LIMIT 0xC0000000 | 0x012d
+#define NT_STATUS_INVALID_IMAGE_LE_FORMAT 0xC0000000 | 0x012e
+#define NT_STATUS_INVALID_IMAGE_NOT_MZ 0xC0000000 | 0x012f
+#define NT_STATUS_INVALID_IMAGE_PROTECT 0xC0000000 | 0x0130
+#define NT_STATUS_INVALID_IMAGE_WIN_16 0xC0000000 | 0x0131
+#define NT_STATUS_LOGON_SERVER_CONFLICT 0xC0000000 | 0x0132
+#define NT_STATUS_TIME_DIFFERENCE_AT_DC 0xC0000000 | 0x0133
+#define NT_STATUS_SYNCHRONIZATION_REQUIRED 0xC0000000 | 0x0134
+#define NT_STATUS_DLL_NOT_FOUND 0xC0000000 | 0x0135
+#define NT_STATUS_OPEN_FAILED 0xC0000000 | 0x0136
+#define NT_STATUS_IO_PRIVILEGE_FAILED 0xC0000000 | 0x0137
+#define NT_STATUS_ORDINAL_NOT_FOUND 0xC0000000 | 0x0138
+#define NT_STATUS_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0139
+#define NT_STATUS_CONTROL_C_EXIT 0xC0000000 | 0x013a
+#define NT_STATUS_LOCAL_DISCONNECT 0xC0000000 | 0x013b
+#define NT_STATUS_REMOTE_DISCONNECT 0xC0000000 | 0x013c
+#define NT_STATUS_REMOTE_RESOURCES 0xC0000000 | 0x013d
+#define NT_STATUS_LINK_FAILED 0xC0000000 | 0x013e
+#define NT_STATUS_LINK_TIMEOUT 0xC0000000 | 0x013f
+#define NT_STATUS_INVALID_CONNECTION 0xC0000000 | 0x0140
+#define NT_STATUS_INVALID_ADDRESS 0xC0000000 | 0x0141
+#define NT_STATUS_DLL_INIT_FAILED 0xC0000000 | 0x0142
+#define NT_STATUS_MISSING_SYSTEMFILE 0xC0000000 | 0x0143
+#define NT_STATUS_UNHANDLED_EXCEPTION 0xC0000000 | 0x0144
+#define NT_STATUS_APP_INIT_FAILURE 0xC0000000 | 0x0145
+#define NT_STATUS_PAGEFILE_CREATE_FAILED 0xC0000000 | 0x0146
+#define NT_STATUS_NO_PAGEFILE 0xC0000000 | 0x0147
+#define NT_STATUS_INVALID_LEVEL 0xC0000000 | 0x0148
+#define NT_STATUS_WRONG_PASSWORD_CORE 0xC0000000 | 0x0149
+#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT 0xC0000000 | 0x014a
+#define NT_STATUS_PIPE_BROKEN 0xC0000000 | 0x014b
+#define NT_STATUS_REGISTRY_CORRUPT 0xC0000000 | 0x014c
+#define NT_STATUS_REGISTRY_IO_FAILED 0xC0000000 | 0x014d
+#define NT_STATUS_NO_EVENT_PAIR 0xC0000000 | 0x014e
+#define NT_STATUS_UNRECOGNIZED_VOLUME 0xC0000000 | 0x014f
+#define NT_STATUS_SERIAL_NO_DEVICE_INITED 0xC0000000 | 0x0150
+#define NT_STATUS_NO_SUCH_ALIAS 0xC0000000 | 0x0151
+#define NT_STATUS_MEMBER_NOT_IN_ALIAS 0xC0000000 | 0x0152
+#define NT_STATUS_MEMBER_IN_ALIAS 0xC0000000 | 0x0153
+#define NT_STATUS_ALIAS_EXISTS 0xC0000000 | 0x0154
+#define NT_STATUS_LOGON_NOT_GRANTED 0xC0000000 | 0x0155
+#define NT_STATUS_TOO_MANY_SECRETS 0xC0000000 | 0x0156
+#define NT_STATUS_SECRET_TOO_LONG 0xC0000000 | 0x0157
+#define NT_STATUS_INTERNAL_DB_ERROR 0xC0000000 | 0x0158
+#define NT_STATUS_FULLSCREEN_MODE 0xC0000000 | 0x0159
+#define NT_STATUS_TOO_MANY_CONTEXT_IDS 0xC0000000 | 0x015a
+#define NT_STATUS_LOGON_TYPE_NOT_GRANTED 0xC0000000 | 0x015b
+#define NT_STATUS_NOT_REGISTRY_FILE 0xC0000000 | 0x015c
+#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x015d
+#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR 0xC0000000 | 0x015e
+#define NT_STATUS_FT_MISSING_MEMBER 0xC0000000 | 0x015f
+#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY 0xC0000000 | 0x0160
+#define NT_STATUS_ILLEGAL_CHARACTER 0xC0000000 | 0x0161
+#define NT_STATUS_UNMAPPABLE_CHARACTER 0xC0000000 | 0x0162
+#define NT_STATUS_UNDEFINED_CHARACTER 0xC0000000 | 0x0163
+#define NT_STATUS_FLOPPY_VOLUME 0xC0000000 | 0x0164
+#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND 0xC0000000 | 0x0165
+#define NT_STATUS_FLOPPY_WRONG_CYLINDER 0xC0000000 | 0x0166
+#define NT_STATUS_FLOPPY_UNKNOWN_ERROR 0xC0000000 | 0x0167
+#define NT_STATUS_FLOPPY_BAD_REGISTERS 0xC0000000 | 0x0168
+#define NT_STATUS_DISK_RECALIBRATE_FAILED 0xC0000000 | 0x0169
+#define NT_STATUS_DISK_OPERATION_FAILED 0xC0000000 | 0x016a
+#define NT_STATUS_DISK_RESET_FAILED 0xC0000000 | 0x016b
+#define NT_STATUS_SHARED_IRQ_BUSY 0xC0000000 | 0x016c
+#define NT_STATUS_FT_ORPHANING 0xC0000000 | 0x016d
+#define NT_STATUS_PARTITION_FAILURE 0xC0000000 | 0x0172
+#define NT_STATUS_INVALID_BLOCK_LENGTH 0xC0000000 | 0x0173
+#define NT_STATUS_DEVICE_NOT_PARTITIONED 0xC0000000 | 0x0174
+#define NT_STATUS_UNABLE_TO_LOCK_MEDIA 0xC0000000 | 0x0175
+#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA 0xC0000000 | 0x0176
+#define NT_STATUS_EOM_OVERFLOW 0xC0000000 | 0x0177
+#define NT_STATUS_NO_MEDIA 0xC0000000 | 0x0178
+#define NT_STATUS_NO_SUCH_MEMBER 0xC0000000 | 0x017a
+#define NT_STATUS_INVALID_MEMBER 0xC0000000 | 0x017b
+#define NT_STATUS_KEY_DELETED 0xC0000000 | 0x017c
+#define NT_STATUS_NO_LOG_SPACE 0xC0000000 | 0x017d
+#define NT_STATUS_TOO_MANY_SIDS 0xC0000000 | 0x017e
+#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x017f
+#define NT_STATUS_KEY_HAS_CHILDREN 0xC0000000 | 0x0180
+#define NT_STATUS_CHILD_MUST_BE_VOLATILE 0xC0000000 | 0x0181
+#define NT_STATUS_DEVICE_CONFIGURATION_ERROR 0xC0000000 | 0x0182
+#define NT_STATUS_DRIVER_INTERNAL_ERROR 0xC0000000 | 0x0183
+#define NT_STATUS_INVALID_DEVICE_STATE 0xC0000000 | 0x0184
+#define NT_STATUS_IO_DEVICE_ERROR 0xC0000000 | 0x0185
+#define NT_STATUS_DEVICE_PROTOCOL_ERROR 0xC0000000 | 0x0186
+#define NT_STATUS_BACKUP_CONTROLLER 0xC0000000 | 0x0187
+#define NT_STATUS_LOG_FILE_FULL 0xC0000000 | 0x0188
+#define NT_STATUS_TOO_LATE 0xC0000000 | 0x0189
+#define NT_STATUS_NO_TRUST_LSA_SECRET 0xC0000000 | 0x018a
+#define NT_STATUS_NO_TRUST_SAM_ACCOUNT 0xC0000000 | 0x018b
+#define NT_STATUS_TRUSTED_DOMAIN_FAILURE 0xC0000000 | 0x018c
+#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 0xC0000000 | 0x018d
+#define NT_STATUS_EVENTLOG_FILE_CORRUPT 0xC0000000 | 0x018e
+#define NT_STATUS_EVENTLOG_CANT_START 0xC0000000 | 0x018f
+#define NT_STATUS_TRUST_FAILURE 0xC0000000 | 0x0190
+#define NT_STATUS_MUTANT_LIMIT_EXCEEDED 0xC0000000 | 0x0191
+#define NT_STATUS_NETLOGON_NOT_STARTED 0xC0000000 | 0x0192
+#define NT_STATUS_ACCOUNT_EXPIRED 0xC0000000 | 0x0193
+#define NT_STATUS_POSSIBLE_DEADLOCK 0xC0000000 | 0x0194
+#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT 0xC0000000 | 0x0195
+#define NT_STATUS_REMOTE_SESSION_LIMIT 0xC0000000 | 0x0196
+#define NT_STATUS_EVENTLOG_FILE_CHANGED 0xC0000000 | 0x0197
+#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT 0xC0000000 | 0x0198
+#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT 0xC0000000 | 0x0199
+#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT 0xC0000000 | 0x019a
+#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT 0xC0000000 | 0x019b
+#define NT_STATUS_FS_DRIVER_REQUIRED 0xC0000000 | 0x019c
+#define NT_STATUS_NO_USER_SESSION_KEY 0xC0000000 | 0x0202
+#define NT_STATUS_USER_SESSION_DELETED 0xC0000000 | 0x0203
+#define NT_STATUS_RESOURCE_LANG_NOT_FOUND 0xC0000000 | 0x0204
+#define NT_STATUS_INSUFF_SERVER_RESOURCES 0xC0000000 | 0x0205
+#define NT_STATUS_INVALID_BUFFER_SIZE 0xC0000000 | 0x0206
+#define NT_STATUS_INVALID_ADDRESS_COMPONENT 0xC0000000 | 0x0207
+#define NT_STATUS_INVALID_ADDRESS_WILDCARD 0xC0000000 | 0x0208
+#define NT_STATUS_TOO_MANY_ADDRESSES 0xC0000000 | 0x0209
+#define NT_STATUS_ADDRESS_ALREADY_EXISTS 0xC0000000 | 0x020a
+#define NT_STATUS_ADDRESS_CLOSED 0xC0000000 | 0x020b
+#define NT_STATUS_CONNECTION_DISCONNECTED 0xC0000000 | 0x020c
+#define NT_STATUS_CONNECTION_RESET 0xC0000000 | 0x020d
+#define NT_STATUS_TOO_MANY_NODES 0xC0000000 | 0x020e
+#define NT_STATUS_TRANSACTION_ABORTED 0xC0000000 | 0x020f
+#define NT_STATUS_TRANSACTION_TIMED_OUT 0xC0000000 | 0x0210
+#define NT_STATUS_TRANSACTION_NO_RELEASE 0xC0000000 | 0x0211
+#define NT_STATUS_TRANSACTION_NO_MATCH 0xC0000000 | 0x0212
+#define NT_STATUS_TRANSACTION_RESPONDED 0xC0000000 | 0x0213
+#define NT_STATUS_TRANSACTION_INVALID_ID 0xC0000000 | 0x0214
+#define NT_STATUS_TRANSACTION_INVALID_TYPE 0xC0000000 | 0x0215
+#define NT_STATUS_NOT_SERVER_SESSION 0xC0000000 | 0x0216
+#define NT_STATUS_NOT_CLIENT_SESSION 0xC0000000 | 0x0217
+#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE 0xC0000000 | 0x0218
+#define NT_STATUS_DEBUG_ATTACH_FAILED 0xC0000000 | 0x0219
+#define NT_STATUS_SYSTEM_PROCESS_TERMINATED 0xC0000000 | 0x021a
+#define NT_STATUS_DATA_NOT_ACCEPTED 0xC0000000 | 0x021b
+#define NT_STATUS_NO_BROWSER_SERVERS_FOUND 0xC0000000 | 0x021c
+#define NT_STATUS_VDM_HARD_ERROR 0xC0000000 | 0x021d
+#define NT_STATUS_DRIVER_CANCEL_TIMEOUT 0xC0000000 | 0x021e
+#define NT_STATUS_REPLY_MESSAGE_MISMATCH 0xC0000000 | 0x021f
+#define NT_STATUS_MAPPED_ALIGNMENT 0xC0000000 | 0x0220
+#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH 0xC0000000 | 0x0221
+#define NT_STATUS_LOST_WRITEBEHIND_DATA 0xC0000000 | 0x0222
+#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID 0xC0000000 | 0x0223
+#define NT_STATUS_PASSWORD_MUST_CHANGE 0xC0000000 | 0x0224
+#define NT_STATUS_NOT_FOUND 0xC0000000 | 0x0225
+#define NT_STATUS_NOT_TINY_STREAM 0xC0000000 | 0x0226
+#define NT_STATUS_RECOVERY_FAILURE 0xC0000000 | 0x0227
+#define NT_STATUS_STACK_OVERFLOW_READ 0xC0000000 | 0x0228
+#define NT_STATUS_FAIL_CHECK 0xC0000000 | 0x0229
+#define NT_STATUS_DUPLICATE_OBJECTID 0xC0000000 | 0x022a
+#define NT_STATUS_OBJECTID_EXISTS 0xC0000000 | 0x022b
+#define NT_STATUS_CONVERT_TO_LARGE 0xC0000000 | 0x022c
+#define NT_STATUS_RETRY 0xC0000000 | 0x022d
+#define NT_STATUS_FOUND_OUT_OF_SCOPE 0xC0000000 | 0x022e
+#define NT_STATUS_ALLOCATE_BUCKET 0xC0000000 | 0x022f
+#define NT_STATUS_PROPSET_NOT_FOUND 0xC0000000 | 0x0230
+#define NT_STATUS_MARSHALL_OVERFLOW 0xC0000000 | 0x0231
+#define NT_STATUS_INVALID_VARIANT 0xC0000000 | 0x0232
+#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND 0xC0000000 | 0x0233
+#define NT_STATUS_ACCOUNT_LOCKED_OUT 0xC0000000 | 0x0234
+#define NT_STATUS_HANDLE_NOT_CLOSABLE 0xC0000000 | 0x0235
+#define NT_STATUS_CONNECTION_REFUSED 0xC0000000 | 0x0236
+#define NT_STATUS_GRACEFUL_DISCONNECT 0xC0000000 | 0x0237
+#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED 0xC0000000 | 0x0238
+#define NT_STATUS_ADDRESS_NOT_ASSOCIATED 0xC0000000 | 0x0239
+#define NT_STATUS_CONNECTION_INVALID 0xC0000000 | 0x023a
+#define NT_STATUS_CONNECTION_ACTIVE 0xC0000000 | 0x023b
+#define NT_STATUS_NETWORK_UNREACHABLE 0xC0000000 | 0x023c
+#define NT_STATUS_HOST_UNREACHABLE 0xC0000000 | 0x023d
+#define NT_STATUS_PROTOCOL_UNREACHABLE 0xC0000000 | 0x023e
+#define NT_STATUS_PORT_UNREACHABLE 0xC0000000 | 0x023f
+#define NT_STATUS_REQUEST_ABORTED 0xC0000000 | 0x0240
+#define NT_STATUS_CONNECTION_ABORTED 0xC0000000 | 0x0241
+#define NT_STATUS_BAD_COMPRESSION_BUFFER 0xC0000000 | 0x0242
+#define NT_STATUS_USER_MAPPED_FILE 0xC0000000 | 0x0243
+#define NT_STATUS_AUDIT_FAILED 0xC0000000 | 0x0244
+#define NT_STATUS_TIMER_RESOLUTION_NOT_SET 0xC0000000 | 0x0245
+#define NT_STATUS_CONNECTION_COUNT_LIMIT 0xC0000000 | 0x0246
+#define NT_STATUS_LOGIN_TIME_RESTRICTION 0xC0000000 | 0x0247
+#define NT_STATUS_LOGIN_WKSTA_RESTRICTION 0xC0000000 | 0x0248
+#define NT_STATUS_IMAGE_MP_UP_MISMATCH 0xC0000000 | 0x0249
+#define NT_STATUS_INSUFFICIENT_LOGON_INFO 0xC0000000 | 0x0250
+#define NT_STATUS_BAD_DLL_ENTRYPOINT 0xC0000000 | 0x0251
+#define NT_STATUS_BAD_SERVICE_ENTRYPOINT 0xC0000000 | 0x0252
+#define NT_STATUS_LPC_REPLY_LOST 0xC0000000 | 0x0253
+#define NT_STATUS_IP_ADDRESS_CONFLICT1 0xC0000000 | 0x0254
+#define NT_STATUS_IP_ADDRESS_CONFLICT2 0xC0000000 | 0x0255
+#define NT_STATUS_REGISTRY_QUOTA_LIMIT 0xC0000000 | 0x0256
+#define NT_STATUS_PATH_NOT_COVERED 0xC0000000 | 0x0257
+#define NT_STATUS_NO_CALLBACK_ACTIVE 0xC0000000 | 0x0258
+#define NT_STATUS_LICENSE_QUOTA_EXCEEDED 0xC0000000 | 0x0259
+#define NT_STATUS_PWD_TOO_SHORT 0xC0000000 | 0x025a
+#define NT_STATUS_PWD_TOO_RECENT 0xC0000000 | 0x025b
+#define NT_STATUS_PWD_HISTORY_CONFLICT 0xC0000000 | 0x025c
+#define NT_STATUS_PLUGPLAY_NO_DEVICE 0xC0000000 | 0x025e
+#define NT_STATUS_UNSUPPORTED_COMPRESSION 0xC0000000 | 0x025f
+#define NT_STATUS_INVALID_HW_PROFILE 0xC0000000 | 0x0260
+#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH 0xC0000000 | 0x0261
+#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND 0xC0000000 | 0x0262
+#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0263
+#define NT_STATUS_RESOURCE_NOT_OWNED 0xC0000000 | 0x0264
+#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265
+#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266
+#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267
+#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE	/* scheduler */
+
+#endif				/* _NTERR_H */
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
new file mode 100644
index 0000000..6facb41
--- /dev/null
+++ b/fs/cifs/ntlmssp.h
@@ -0,0 +1,101 @@
+/*
+ *   fs/cifs/ntlmssp.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#pragma pack(1)
+
+#define NTLMSSP_SIGNATURE "NTLMSSP"
+/* Message Types */
+#define NtLmNegotiate     cpu_to_le32(1)
+#define NtLmChallenge     cpu_to_le32(2)
+#define NtLmAuthenticate  cpu_to_le32(3)
+#define UnknownMessage    cpu_to_le32(8)
+
+/* Negotiate Flags */
+#define NTLMSSP_NEGOTIATE_UNICODE       0x01	// Text strings are in unicode
+#define NTLMSSP_NEGOTIATE_OEM           0x02	// Text strings are in OEM
+#define NTLMSSP_REQUEST_TARGET          0x04	// Server return its auth realm
+#define NTLMSSP_NEGOTIATE_SIGN        0x0010	// Request signature capability
+#define NTLMSSP_NEGOTIATE_SEAL        0x0020	// Request confidentiality
+#define NTLMSSP_NEGOTIATE_DGRAM       0x0040
+#define NTLMSSP_NEGOTIATE_LM_KEY      0x0080 // Use LM session key for sign/seal
+#define NTLMSSP_NEGOTIATE_NTLM        0x0200	// NTLM authentication
+#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000
+#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
+#define NTLMSSP_NEGOTIATE_LOCAL_CALL  0x4000	// client/server on same machine
+#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN 0x8000	// Sign for all security levels
+#define NTLMSSP_TARGET_TYPE_DOMAIN   0x10000
+#define NTLMSSP_TARGET_TYPE_SERVER   0x20000
+#define NTLMSSP_TARGET_TYPE_SHARE    0x40000
+#define NTLMSSP_NEGOTIATE_NTLMV2     0x80000
+#define NTLMSSP_REQUEST_INIT_RESP   0x100000
+#define NTLMSSP_REQUEST_ACCEPT_RESP 0x200000
+#define NTLMSSP_REQUEST_NOT_NT_KEY  0x400000
+#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
+#define NTLMSSP_NEGOTIATE_128     0x20000000
+#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
+#define NTLMSSP_NEGOTIATE_56      0x80000000
+
+/* Although typedefs are not commonly used for structure definitions */
+/* in the Linux kernel, in this particular case they are useful      */
+/* to more closely match the standards document for NTLMSSP from     */
+/* OpenGroup and to make the code more closely match the standard in */
+/* appearance */
+
+typedef struct _SECURITY_BUFFER {
+	__le16 Length;
+	__le16 MaximumLength;
+	__le32 Buffer;		/* offset to buffer */
+} SECURITY_BUFFER;
+
+typedef struct _NEGOTIATE_MESSAGE {
+	__u8 Signature[sizeof (NTLMSSP_SIGNATURE)];
+	__le32 MessageType;     /* 1 */
+	__le32 NegotiateFlags;
+	SECURITY_BUFFER DomainName;	/* RFC 1001 style and ASCII */
+	SECURITY_BUFFER WorkstationName;	/* RFC 1001 and ASCII */
+	char DomainString[0];
+	/* followed by WorkstationString */
+} NEGOTIATE_MESSAGE, *PNEGOTIATE_MESSAGE;
+
+typedef struct _CHALLENGE_MESSAGE {
+	__u8 Signature[sizeof (NTLMSSP_SIGNATURE)];
+	__le32 MessageType;   /* 2 */
+	SECURITY_BUFFER TargetName;
+	__le32 NegotiateFlags;
+	__u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
+	__u8 Reserved[8];
+	SECURITY_BUFFER TargetInfoArray;
+} CHALLENGE_MESSAGE, *PCHALLENGE_MESSAGE;
+
+typedef struct _AUTHENTICATE_MESSAGE {
+	__u8 Signature[sizeof (NTLMSSP_SIGNATURE)];
+	__le32 MessageType;  /* 3 */
+	SECURITY_BUFFER LmChallengeResponse;
+	SECURITY_BUFFER NtChallengeResponse;
+	SECURITY_BUFFER DomainName;
+	SECURITY_BUFFER UserName;
+	SECURITY_BUFFER WorkstationName;
+	SECURITY_BUFFER SessionKey;
+	__le32 NegotiateFlags;
+	char UserString[0];
+} AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
+
+#pragma pack()			/* resume default structure packing */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
new file mode 100644
index 0000000..f8bea39
--- /dev/null
+++ b/fs/cifs/readdir.c
@@ -0,0 +1,867 @@
+/*
+ *   fs/cifs/readdir.c
+ *
+ *   Directory search handling
+ * 
+ *   Copyright (C) International Business Machines  Corp., 2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/smp_lock.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+#include "cifsfs.h"
+
+/* BB fixme - add debug wrappers around this function to disable it fixme BB */
+/* static void dump_cifs_file_struct(struct file *file, char *label)
+{
+	struct cifsFileInfo * cf;
+
+	if(file) {
+		cf = file->private_data;
+		if(cf == NULL) {
+			cFYI(1,("empty cifs private file data"));
+			return;
+		}
+		if(cf->invalidHandle) {
+			cFYI(1,("invalid handle"));
+		}
+		if(cf->srch_inf.endOfSearch) {
+			cFYI(1,("end of search"));
+		}
+		if(cf->srch_inf.emptyDir) {
+			cFYI(1,("empty dir"));
+		}
+		
+	}
+} */
+
+/* Returns one if new inode created (which therefore needs to be hashed) */
+/* Might check in the future if inode number changed so we can rehash inode */
+static int construct_dentry(struct qstr *qstring, struct file *file,
+	struct inode **ptmp_inode, struct dentry **pnew_dentry)
+{
+	struct dentry *tmp_dentry;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	int rc = 0;
+
+	cFYI(1, ("For %s ", qstring->name));
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+
+	qstring->hash = full_name_hash(qstring->name, qstring->len);
+	tmp_dentry = d_lookup(file->f_dentry, qstring);
+	if (tmp_dentry) {
+		cFYI(0, (" existing dentry with inode 0x%p", tmp_dentry->d_inode));
+		*ptmp_inode = tmp_dentry->d_inode;
+/* BB overwrite old name? i.e. tmp_dentry->d_name and tmp_dentry->d_name.len??*/
+		if(*ptmp_inode == NULL) {
+			*ptmp_inode = new_inode(file->f_dentry->d_sb);
+			if(*ptmp_inode == NULL)
+				return rc;
+			rc = 1;
+			d_instantiate(tmp_dentry, *ptmp_inode);
+		}
+	} else {
+		tmp_dentry = d_alloc(file->f_dentry, qstring);
+		if(tmp_dentry == NULL) {
+			cERROR(1,("Failed allocating dentry"));
+			*ptmp_inode = NULL;
+			return rc;
+		}
+
+		*ptmp_inode = new_inode(file->f_dentry->d_sb);
+		tmp_dentry->d_op = &cifs_dentry_ops;
+		if(*ptmp_inode == NULL)
+			return rc;
+		rc = 1;
+		d_instantiate(tmp_dentry, *ptmp_inode);
+		d_rehash(tmp_dentry);
+	}
+
+	tmp_dentry->d_time = jiffies;
+	*pnew_dentry = tmp_dentry;
+	return rc;
+}
+
+static void fill_in_inode(struct inode *tmp_inode,
+	FILE_DIRECTORY_INFO *pfindData, int *pobject_type)
+{
+	struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
+	__u32 attr = le32_to_cpu(pfindData->ExtFileAttributes);
+	__u64 allocation_size = le64_to_cpu(pfindData->AllocationSize);
+	__u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
+
+	cifsInfo->cifsAttrs = attr;
+	cifsInfo->time = jiffies;
+
+	/* Linux can not store file creation time unfortunately so ignore it */
+	tmp_inode->i_atime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+	tmp_inode->i_mtime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
+	tmp_inode->i_ctime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+	/* treat dos attribute of read-only as read-only mode bit e.g. 555? */
+	/* 2767 perms - indicate mandatory locking */
+		/* BB fill in uid and gid here? with help from winbind? 
+		   or retrieve from NTFS stream extended attribute */
+	if (atomic_read(&cifsInfo->inUse) == 0) {
+		tmp_inode->i_uid = cifs_sb->mnt_uid;
+		tmp_inode->i_gid = cifs_sb->mnt_gid;
+		/* set default mode. will override for dirs below */
+		tmp_inode->i_mode = cifs_sb->mnt_file_mode;
+	}
+
+	cFYI(0,("CIFS FFIRST: Attributes came in as 0x%x",attr));
+	if (attr & ATTR_DIRECTORY) {
+		*pobject_type = DT_DIR;
+		/* override default perms since we do not lock dirs */
+		if(atomic_read(&cifsInfo->inUse) == 0) {
+			tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
+		}
+		tmp_inode->i_mode |= S_IFDIR;
+/* we no longer mark these because we could not follow them */
+/*        } else if (attr & ATTR_REPARSE) {
+                *pobject_type = DT_LNK;
+                tmp_inode->i_mode |= S_IFLNK; */
+	} else {
+		*pobject_type = DT_REG;
+		tmp_inode->i_mode |= S_IFREG;
+		if (attr & ATTR_READONLY)
+			tmp_inode->i_mode &= ~(S_IWUGO);
+	} /* could add code here - to validate if device or weird share type? */
+
+	/* can not fill in nlink here as in qpathinfo version and Unx search */
+	if (atomic_read(&cifsInfo->inUse) == 0) {
+		atomic_set(&cifsInfo->inUse, 1);
+	}
+
+	if (is_size_safe_to_change(cifsInfo)) {
+		/* can not safely change the file size here if the 
+		client is writing to it due to potential races */
+		i_size_write(tmp_inode, end_of_file);
+
+	/* 512 bytes (2**9) is the fake blocksize that must be used */
+	/* for this calculation, even though the reported blocksize is larger */
+		tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9;
+	}
+
+	if (allocation_size < end_of_file)
+		cFYI(1, ("May be sparse file, allocation less than file size"));
+	cFYI(1,
+	     ("File Size %ld and blocks %ld and blocksize %ld",
+	      (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks,
+	      tmp_inode->i_blksize));
+	if (S_ISREG(tmp_inode->i_mode)) {
+		cFYI(1, (" File inode "));
+		tmp_inode->i_op = &cifs_file_inode_ops;
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+			tmp_inode->i_fop = &cifs_file_direct_ops;
+		else
+			tmp_inode->i_fop = &cifs_file_ops;
+		tmp_inode->i_data.a_ops = &cifs_addr_ops;
+	} else if (S_ISDIR(tmp_inode->i_mode)) {
+		cFYI(1, (" Directory inode"));
+		tmp_inode->i_op = &cifs_dir_inode_ops;
+		tmp_inode->i_fop = &cifs_dir_ops;
+	} else if (S_ISLNK(tmp_inode->i_mode)) {
+		cFYI(1, (" Symbolic Link inode "));
+		tmp_inode->i_op = &cifs_symlink_inode_ops;
+	} else {
+		cFYI(1, (" Init special inode "));
+		init_special_inode(tmp_inode, tmp_inode->i_mode,
+				   tmp_inode->i_rdev);
+	}
+}
+
+static void unix_fill_in_inode(struct inode *tmp_inode,
+	FILE_UNIX_INFO *pfindData, int *pobject_type)
+{
+	struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
+
+	__u32 type = le32_to_cpu(pfindData->Type);
+	__u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes);
+	__u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
+	cifsInfo->time = jiffies;
+	atomic_inc(&cifsInfo->inUse);
+
+	tmp_inode->i_atime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+	tmp_inode->i_mtime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastModificationTime));
+	tmp_inode->i_ctime =
+	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastStatusChange));
+
+	tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
+	if (type == UNIX_FILE) {
+		*pobject_type = DT_REG;
+		tmp_inode->i_mode |= S_IFREG;
+	} else if (type == UNIX_SYMLINK) {
+		*pobject_type = DT_LNK;
+		tmp_inode->i_mode |= S_IFLNK;
+	} else if (type == UNIX_DIR) {
+		*pobject_type = DT_DIR;
+		tmp_inode->i_mode |= S_IFDIR;
+	} else if (type == UNIX_CHARDEV) {
+		*pobject_type = DT_CHR;
+		tmp_inode->i_mode |= S_IFCHR;
+		tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
+				le64_to_cpu(pfindData->DevMinor) & MINORMASK);
+	} else if (type == UNIX_BLOCKDEV) {
+		*pobject_type = DT_BLK;
+		tmp_inode->i_mode |= S_IFBLK;
+		tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
+				le64_to_cpu(pfindData->DevMinor) & MINORMASK);
+	} else if (type == UNIX_FIFO) {
+		*pobject_type = DT_FIFO;
+		tmp_inode->i_mode |= S_IFIFO;
+	} else if (type == UNIX_SOCKET) {
+		*pobject_type = DT_SOCK;
+		tmp_inode->i_mode |= S_IFSOCK;
+	}
+
+	tmp_inode->i_uid = le64_to_cpu(pfindData->Uid);
+	tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
+	tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
+
+	if (is_size_safe_to_change(cifsInfo)) {
+		/* can not safely change the file size here if the 
+		client is writing to it due to potential races */
+		i_size_write(tmp_inode,end_of_file);
+
+	/* 512 bytes (2**9) is the fake blocksize that must be used */
+	/* for this calculation, not the real blocksize */
+		tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
+	}
+
+	if (S_ISREG(tmp_inode->i_mode)) {
+		cFYI(1, ("File inode"));
+		tmp_inode->i_op = &cifs_file_inode_ops;
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+			tmp_inode->i_fop = &cifs_file_direct_ops;
+		else
+			tmp_inode->i_fop = &cifs_file_ops;
+		tmp_inode->i_data.a_ops = &cifs_addr_ops;
+	} else if (S_ISDIR(tmp_inode->i_mode)) {
+		cFYI(1, ("Directory inode"));
+		tmp_inode->i_op = &cifs_dir_inode_ops;
+		tmp_inode->i_fop = &cifs_dir_ops;
+	} else if (S_ISLNK(tmp_inode->i_mode)) {
+		cFYI(1, ("Symbolic Link inode"));
+		tmp_inode->i_op = &cifs_symlink_inode_ops;
+/* tmp_inode->i_fop = *//* do not need to set to anything */
+	} else {
+		cFYI(1, ("Special inode")); 
+		init_special_inode(tmp_inode, tmp_inode->i_mode,
+				   tmp_inode->i_rdev);
+	}
+}
+
+static int initiate_cifs_search(const int xid, struct file *file)
+{
+	int rc = 0;
+	char * full_path;
+	struct cifsFileInfo * cifsFile;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+
+	if(file->private_data == NULL) {
+		file->private_data = 
+			kmalloc(sizeof(struct cifsFileInfo),GFP_KERNEL);
+	}
+
+	if(file->private_data == NULL) {
+		return -ENOMEM;
+	} else {
+		memset(file->private_data,0,sizeof(struct cifsFileInfo));
+	}
+	cifsFile = file->private_data;
+	cifsFile->invalidHandle = TRUE;
+	cifsFile->srch_inf.endOfSearch = FALSE;
+
+	if(file->f_dentry == NULL)
+		return -ENOENT;
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	if(cifs_sb == NULL)
+		return -EINVAL;
+
+	pTcon = cifs_sb->tcon;
+	if(pTcon == NULL)
+		return -EINVAL;
+
+	down(&file->f_dentry->d_sb->s_vfs_rename_sem);
+	full_path = build_wildcard_path_from_dentry(file->f_dentry);
+	up(&file->f_dentry->d_sb->s_vfs_rename_sem);
+
+	if(full_path == NULL) {
+		return -ENOMEM;
+	}
+
+	cFYI(1, ("Full path: %s start at: %lld ", full_path, file->f_pos));
+
+	/* test for Unix extensions */
+	if (pTcon->ses->capabilities & CAP_UNIX) {
+		cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; 
+	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+		cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+	} else /* not srvinos - BB fixme add check for backlevel? */ {
+		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+	}
+
+	rc = CIFSFindFirst(xid, pTcon,full_path,cifs_sb->local_nls, 
+		&cifsFile->netfid, &cifsFile->srch_inf); 
+	if(rc == 0)
+		cifsFile->invalidHandle = FALSE;
+	kfree(full_path);
+	return rc;
+}
+
+/* return length of unicode string in bytes */
+static int cifs_unicode_bytelen(char *str)
+{
+	int len;
+	__le16 * ustr = (__le16 *)str;
+
+	for(len=0;len <= PATH_MAX;len++) {
+		if(ustr[len] == 0)
+			return len << 1;
+	}
+	cFYI(1,("Unicode string longer than PATH_MAX found"));
+	return len << 1;
+}
+
+static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
+{
+	char * new_entry;
+	FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
+
+	new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+	cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
+	/* validate that new_entry is not past end of SMB */
+	if(new_entry >= end_of_smb) {
+		cFYI(1,("search entry %p began after end of SMB %p old entry %p",
+			new_entry,end_of_smb,old_entry)); 
+		return NULL;
+	} else
+		return new_entry;
+
+}
+
+#define UNICODE_DOT cpu_to_le16(0x2e)
+
+/* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */
+static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
+{
+	int rc = 0;
+	char * filename = NULL;
+	int len = 0; 
+
+	if(cfile->srch_inf.info_level == 0x202) {
+		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		if(cfile->srch_inf.unicode) {
+			len = cifs_unicode_bytelen(filename);
+		} else {
+			/* BB should we make this strnlen of PATH_MAX? */
+			len = strnlen(filename, 5);
+		}
+	} else if(cfile->srch_inf.info_level == 0x101) {
+		FILE_DIRECTORY_INFO * pFindData = 
+			(FILE_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else if(cfile->srch_inf.info_level == 0x102) {
+		FILE_FULL_DIRECTORY_INFO * pFindData = 
+			(FILE_FULL_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else if(cfile->srch_inf.info_level == 0x105) {
+		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else if(cfile->srch_inf.info_level == 0x104) {
+		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else {
+		cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
+	}
+
+	if(filename) {
+		if(cfile->srch_inf.unicode) {
+			__le16 *ufilename = (__le16 *)filename;
+			if(len == 2) {
+				/* check for . */
+				if(ufilename[0] == UNICODE_DOT)
+					rc = 1;
+			} else if(len == 4) {
+				/* check for .. */
+				if((ufilename[0] == UNICODE_DOT)
+				   &&(ufilename[1] == UNICODE_DOT))
+					rc = 2;
+			}
+		} else /* ASCII */ {
+			if(len == 1) {
+				if(filename[0] == '.') 
+					rc = 1;
+			} else if(len == 2) {
+				if((filename[0] == '.') && (filename[1] == '.')) 
+					rc = 2;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/* find the corresponding entry in the search */
+/* Note that the SMB server returns search entries for . and .. which
+   complicates logic here if we choose to parse for them and we do not
+   assume that they are located in the findfirst return buffer.*/
+/* We start counting in the buffer with entry 2 and increment for every
+   entry (do not increment for . or .. entry) */
+static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
+	struct file *file, char **ppCurrentEntry, int *num_to_ret) 
+{
+	int rc = 0;
+	int pos_in_buf = 0;
+	loff_t first_entry_in_buffer;
+	loff_t index_to_find = file->f_pos;
+	struct cifsFileInfo * cifsFile = file->private_data;
+	/* check if index in the buffer */
+	
+	if((cifsFile == NULL) || (ppCurrentEntry == NULL) || (num_to_ret == NULL))
+		return -ENOENT;
+	
+	*ppCurrentEntry = NULL;
+	first_entry_in_buffer = 
+		cifsFile->srch_inf.index_of_last_entry - 
+			cifsFile->srch_inf.entries_in_buffer;
+/*	dump_cifs_file_struct(file, "In fce ");*/
+	if(index_to_find < first_entry_in_buffer) {
+		/* close and restart search */
+		cFYI(1,("search backing up - close and restart search"));
+		cifsFile->invalidHandle = TRUE;
+		CIFSFindClose(xid, pTcon, cifsFile->netfid);
+		kfree(cifsFile->search_resume_name);
+		cifsFile->search_resume_name = NULL;
+		if(cifsFile->srch_inf.ntwrk_buf_start) {
+			cFYI(1,("freeing SMB ff cache buf on search rewind"));
+			cifs_buf_release(cifsFile->srch_inf.ntwrk_buf_start);
+		}
+		rc = initiate_cifs_search(xid,file);
+		if(rc) {
+			cFYI(1,("error %d reinitiating a search on rewind",rc));
+			return rc;
+		}
+	}
+
+	while((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && 
+	      (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)){
+	 	cFYI(1,("calling findnext2"));
+		rc = CIFSFindNext(xid,pTcon,cifsFile->netfid, &cifsFile->srch_inf);
+		if(rc)
+			return -ENOENT;
+	}
+	if(index_to_find < cifsFile->srch_inf.index_of_last_entry) {
+		/* we found the buffer that contains the entry */
+		/* scan and find it */
+		int i;
+		char * current_entry;
+		char * end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + 
+			smbCalcSize((struct smb_hdr *)
+				cifsFile->srch_inf.ntwrk_buf_start);
+/*	dump_cifs_file_struct(file,"found entry in fce "); */
+		first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
+					- cifsFile->srch_inf.entries_in_buffer;
+		pos_in_buf = index_to_find - first_entry_in_buffer;
+		cFYI(1,("found entry - pos_in_buf %d",pos_in_buf)); 
+		current_entry = cifsFile->srch_inf.srch_entries_start;
+		for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
+			/* go entry to next entry figuring out which we need to start with */
+			/* if( . or ..)
+				skip */
+			rc = cifs_entry_is_dot(current_entry,cifsFile);
+			if(rc == 1) /* is . or .. so skip */ {
+				cFYI(1,("Entry is .")); /* BB removeme BB */
+				/* continue; */
+			} else if (rc == 2 ) {
+				cFYI(1,("Entry is ..")); /* BB removeme BB */
+				/* continue; */
+			}
+			current_entry = nxt_dir_entry(current_entry,end_of_smb);
+		}
+		if((current_entry == NULL) && (i < pos_in_buf)) {
+			/* BB fixme - check if we should flag this error */
+			cERROR(1,("reached end of buf searching for pos in buf"
+			  " %d index to find %lld rc %d",
+			  pos_in_buf,index_to_find,rc));
+		}
+		rc = 0;
+		*ppCurrentEntry = current_entry;
+	} else {
+		cFYI(1,("index not in buffer - could not findnext into it"));
+		return 0;
+	}
+
+	if(pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
+		cFYI(1,("can not return entries when pos_in_buf beyond last entry"));
+		*num_to_ret = 0;
+	} else
+		*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;
+/*	dump_cifs_file_struct(file, "end fce ");*/
+
+	return rc;
+}
+
+/* inode num, inode type and filename returned */
+static int cifs_get_name_from_search_buf(struct qstr *pqst,
+	char *current_entry, __u16 level, unsigned int unicode,
+	struct cifs_sb_info * cifs_sb, ino_t *pinum)
+{
+	int rc = 0;
+	unsigned int len = 0;
+	char * filename;
+	struct nls_table * nlt = cifs_sb->local_nls;
+
+	*pinum = 0;
+
+	if(level == SMB_FIND_FILE_UNIX) {
+		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+
+		filename = &pFindData->FileName[0];
+		if(unicode) {
+			len = cifs_unicode_bytelen(filename);
+		} else {
+			/* BB should we make this strnlen of PATH_MAX? */
+			len = strnlen(filename, PATH_MAX);
+		}
+
+		/* BB fixme - hash low and high 32 bits if not 64 bit arch BB fixme */
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+			*pinum = pFindData->UniqueId;
+	} else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
+		FILE_DIRECTORY_INFO * pFindData = 
+			(FILE_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+		FILE_FULL_DIRECTORY_INFO * pFindData = 
+			(FILE_FULL_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+		*pinum = pFindData->UniqueId;
+	} else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+	} else {
+		cFYI(1,("Unknown findfirst level %d",level));
+		return -EINVAL;
+	}
+	if(unicode) {
+		/* BB fixme - test with long names */
+		/* Note converted filename can be longer than in unicode */
+		pqst->len = cifs_strfromUCS_le((char *)pqst->name,(wchar_t *)filename,len/2,nlt);
+	} else {
+		pqst->name = filename;
+		pqst->len = len;
+	}
+	pqst->hash = full_name_hash(pqst->name,pqst->len);
+/*	cFYI(1,("filldir on %s",pqst->name));  */
+	return rc;
+}
+
+static int cifs_filldir(char *pfindEntry, struct file *file,
+	filldir_t filldir, void *direntry, char *scratch_buf)
+{
+	int rc = 0;
+	struct qstr qstring;
+	struct cifsFileInfo * pCifsF;
+	unsigned obj_type;
+	ino_t  inum;
+	struct cifs_sb_info * cifs_sb;
+	struct inode *tmp_inode;
+	struct dentry *tmp_dentry;
+
+	/* get filename and len into qstring */
+	/* get dentry */
+	/* decide whether to create and populate ionde */
+	if((direntry == NULL) || (file == NULL))
+		return -EINVAL;
+
+	pCifsF = file->private_data;
+	
+	if((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
+		return -ENOENT;
+
+	if(file->f_dentry == NULL)
+		return -ENOENT;
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+
+	qstring.name = scratch_buf;
+	rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
+			pCifsF->srch_inf.info_level,
+			pCifsF->srch_inf.unicode,cifs_sb,
+			&inum /* returned */);
+
+	if(rc)
+		return rc;
+
+	rc = construct_dentry(&qstring,file,&tmp_inode, &tmp_dentry);
+	if((tmp_inode == NULL) || (tmp_dentry == NULL))
+		return -ENOMEM;
+
+	if(rc) {
+		/* inode created, we need to hash it with right inode number */
+		if(inum != 0) {
+			/* BB fixme - hash the 2 32 quantities bits together if necessary BB */
+			tmp_inode->i_ino = inum;
+		}
+		insert_inode_hash(tmp_inode);
+	}
+
+	if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
+		unix_fill_in_inode(tmp_inode,(FILE_UNIX_INFO *)pfindEntry,&obj_type);
+	} else {
+		fill_in_inode(tmp_inode,(FILE_DIRECTORY_INFO *)pfindEntry,&obj_type);
+	}
+	
+	rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,tmp_inode->i_ino,obj_type);
+	if(rc) {
+		cFYI(1,("filldir rc = %d",rc));
+	}
+
+	dput(tmp_dentry);
+	return rc;
+}
+
+static int cifs_save_resume_key(const char *current_entry,
+	struct cifsFileInfo *cifsFile)
+{
+	int rc = 0;
+	unsigned int len = 0;
+	__u16 level;
+	char * filename;
+
+	if((cifsFile == NULL) || (current_entry == NULL))
+		return -EINVAL;
+
+	level = cifsFile->srch_inf.info_level;
+
+	if(level == SMB_FIND_FILE_UNIX) {
+		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+
+		filename = &pFindData->FileName[0];
+		if(cifsFile->srch_inf.unicode) {
+			len = cifs_unicode_bytelen(filename);
+		} else {
+			/* BB should we make this strnlen of PATH_MAX? */
+			len = strnlen(filename, PATH_MAX);
+		}
+		cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
+	} else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
+		FILE_DIRECTORY_INFO * pFindData = 
+			(FILE_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+	} else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+		FILE_FULL_DIRECTORY_INFO * pFindData = 
+			(FILE_FULL_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+	} else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+	} else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
+		filename = &pFindData->FileName[0];
+		len = le32_to_cpu(pFindData->FileNameLength);
+		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+	} else {
+		cFYI(1,("Unknown findfirst level %d",level));
+		return -EINVAL;
+	}
+	cifsFile->srch_inf.resume_name_len = len;
+	cifsFile->srch_inf.presume_name = filename;
+	return rc;
+}
+
+int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
+{
+	int rc = 0;
+	int xid,i;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct cifsFileInfo *cifsFile = NULL;
+	char * current_entry;
+	int num_to_fill = 0;
+	char * tmp_buf = NULL;
+	char * end_of_smb;
+
+	xid = GetXid();
+
+	if(file->f_dentry == NULL) {
+		FreeXid(xid);
+		return -EIO;
+	}
+/*	dump_cifs_file_struct(file, "Begin rdir "); */
+
+	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
+	pTcon = cifs_sb->tcon;
+	if(pTcon == NULL)
+		return -EINVAL;
+
+/*	cFYI(1,("readdir2 pos: %lld",file->f_pos)); */
+
+	switch ((int) file->f_pos) {
+	case 0:
+		/*if (filldir(direntry, ".", 1, file->f_pos,
+		     file->f_dentry->d_inode->i_ino, DT_DIR) < 0) {
+			cERROR(1, ("Filldir for current dir failed "));
+			rc = -ENOMEM;
+			break;
+		}
+		file->f_pos++; */
+	case 1:
+		/* if (filldir(direntry, "..", 2, file->f_pos,
+		     file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
+			cERROR(1, ("Filldir for parent dir failed "));
+			rc = -ENOMEM;
+			break;
+		}
+		file->f_pos++; */
+	case 2:
+		/* 1) If search is active, 
+			is in current search buffer? 
+			if it before then restart search
+			if after then keep searching till find it */
+
+		if(file->private_data == NULL) {
+			rc = initiate_cifs_search(xid,file);
+			cFYI(1,("initiate cifs search rc %d",rc));
+			if(rc) {
+				FreeXid(xid);
+				return rc;
+			}
+		}
+	default:
+		if(file->private_data == NULL) {
+			rc = -EINVAL;
+			FreeXid(xid);
+			return rc;
+		}
+		cifsFile = file->private_data;
+		if (cifsFile->srch_inf.endOfSearch) {
+			if(cifsFile->srch_inf.emptyDir) {
+				cFYI(1, ("End of search, empty dir"));
+				rc = 0;
+				break;
+			}
+		} /* else {
+			cifsFile->invalidHandle = TRUE;
+			CIFSFindClose(xid, pTcon, cifsFile->netfid);
+		} 
+		kfree(cifsFile->search_resume_name);
+		cifsFile->search_resume_name = NULL; */
+
+		/* BB account for . and .. in f_pos as special case */
+		/* dump_cifs_file_struct(file, "rdir after default ");*/
+
+		rc = find_cifs_entry(xid,pTcon, file,
+				&current_entry,&num_to_fill);
+		if(rc) {
+			cFYI(1,("fce error %d",rc)); 
+			goto rddir2_exit;
+		} else if (current_entry != NULL) {
+			cFYI(1,("entry %lld found",file->f_pos));
+		} else {
+			cFYI(1,("could not find entry"));
+			goto rddir2_exit;
+		}
+		cFYI(1,("loop through %d times filling dir for net buf %p",
+			num_to_fill,cifsFile->srch_inf.ntwrk_buf_start)); 
+		end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
+			smbCalcSize((struct smb_hdr *)
+				    cifsFile->srch_inf.ntwrk_buf_start);
+		tmp_buf = kmalloc(NAME_MAX+1,GFP_KERNEL);
+		for(i=0;(i<num_to_fill) && (rc == 0);i++) {
+			if(current_entry == NULL) {
+				/* evaluate whether this case is an error */
+				cERROR(1,("past end of SMB num to fill %d i %d",
+					  num_to_fill, i));
+				break;
+			}
+
+			/* BB FIXME - need to enable the below code BB */
+
+		/* if((!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) ||
+			   (cifsFile->srch_inf.info_level != 
+				   something that supports server inodes)) {
+				create dentry
+				create inode
+				fill in inode new_inode (getting local i_ino)
+			}
+			also create local inode for performance reasons (so we 
+			have a cache of inode metadata) unless this new mount 
+			parm says otherwise */
+
+			rc = cifs_filldir(current_entry, file, 
+					filldir, direntry,tmp_buf);
+			file->f_pos++;
+			if(file->f_pos == cifsFile->srch_inf.index_of_last_entry) {
+				cFYI(1,("last entry in buf at pos %lld %s",
+					file->f_pos,tmp_buf)); /* BB removeme BB */
+				cifs_save_resume_key(current_entry,cifsFile);
+				break;
+			} else 
+				current_entry = nxt_dir_entry(current_entry,end_of_smb);
+		}
+		kfree(tmp_buf);
+		break;
+	} /* end switch */
+
+rddir2_exit:
+	/* dump_cifs_file_struct(file, "end rdir ");  */
+	FreeXid(xid);
+	return rc;
+}
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
new file mode 100644
index 0000000..806c0ed
--- /dev/null
+++ b/fs/cifs/rfc1002pdu.h
@@ -0,0 +1,79 @@
+/*
+ *   fs/cifs/rfc1002pdu.h
+ *
+ *   Protocol Data Unit definitions for RFC 1001/1002 support
+ *
+ *   Copyright (c) International Business Machines  Corp., 2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#pragma pack(1)
+
+/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
+
+	/* RFC 1002 session packet types */
+#define RFC1002_SESSION_MESASAGE 0x00
+#define RFC1002_SESSION_REQUEST  0x81
+#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
+#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
+#define RFC1002_RETARGET_SESSION_RESPONSE 0x83
+#define RFC1002_SESSION_KEEP_ALIVE 0x85
+
+	/* RFC 1002 flags (only one defined */
+#define RFC1002_LENGTH_EXTEND 0x80 /* high order bit of length (ie +64K) */
+
+struct rfc1002_session_packet {
+	__u8	type;
+	__u8	flags;
+	__u16	length;
+	union {
+		struct {
+			__u8 called_len;
+			__u8 called_name[32];
+			__u8 scope1; /* null */
+			__u8 calling_len;
+			__u8 calling_name[32];
+			__u8 scope2; /* null */
+		} session_req;
+		struct {
+			__u32 retarget_ip_addr;
+			__u16 port;
+		} retarget_resp;
+		__u8 neg_ses_resp_error_code;
+		/* POSITIVE_SESSION_RESPONSE packet does not include trailer.
+		SESSION_KEEP_ALIVE packet also does not include a trailer.
+		Trailer for the SESSION_MESSAGE packet is SMB/CIFS header */
+	} trailer;
+};
+
+/* Negative Session Response error codes */
+#define RFC1002_NOT_LISTENING_CALLED  0x80 /* not listening on called name */
+#define RFC1002_NOT_LISTENING_CALLING 0x81 /* not listening on calling name */
+#define RFC1002_NOT_PRESENT           0x82 /* called name not present */
+#define RFC1002_INSUFFICIENT_RESOURCE 0x83
+#define RFC1002_UNSPECIFIED_ERROR     0x8F
+
+/* RFC 1002 Datagram service packets are not defined here as they
+are not needed for the network filesystem client unless we plan on
+implementing broadcast resolution of the server ip address (from
+server netbios name). Currently server names are resolved only via DNS
+(tcp name) or ip address or an /etc/hosts equivalent mapping to ip address.*/
+
+#define DEFAULT_CIFS_CALLED_NAME  "*SMBSERVER      "
+
+#pragma pack()		/* resume default structure packing */
+                                                             
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
new file mode 100644
index 0000000..efaa044
--- /dev/null
+++ b/fs/cifs/smbdes.c
@@ -0,0 +1,412 @@
+/* 
+   Unix SMB/Netbios implementation.
+   Version 1.9.
+
+   a partial implementation of DES designed for use in the 
+   SMB authentication protocol
+
+   Copyright (C) Andrew Tridgell 1998
+   Modified by Steve French (sfrench@us.ibm.com) 2002,2004
+   
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+   
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+   
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/* NOTES: 
+
+   This code makes no attempt to be fast! In fact, it is a very
+   slow implementation 
+
+   This code is NOT a complete DES implementation. It implements only
+   the minimum necessary for SMB authentication, as used by all SMB
+   products (including every copy of Microsoft Windows95 ever sold)
+
+   In particular, it can only do a unchained forward DES pass. This
+   means it is not possible to use this code for encryption/decryption
+   of data, instead it is only useful as a "hash" algorithm.
+
+   There is no entry point into this code that allows normal DES operation.
+
+   I believe this means that this code does not come under ITAR
+   regulations but this is NOT a legal opinion. If you are concerned
+   about the applicability of ITAR regulations to this code then you
+   should confirm it for yourself (and maybe let me know if you come
+   up with a different answer to the one above)
+*/
+#include <linux/slab.h>
+#include "cifsencrypt.h"
+#define uchar unsigned char
+
+static uchar perm1[56] = { 57, 49, 41, 33, 25, 17, 9,
+	1, 58, 50, 42, 34, 26, 18,
+	10, 2, 59, 51, 43, 35, 27,
+	19, 11, 3, 60, 52, 44, 36,
+	63, 55, 47, 39, 31, 23, 15,
+	7, 62, 54, 46, 38, 30, 22,
+	14, 6, 61, 53, 45, 37, 29,
+	21, 13, 5, 28, 20, 12, 4
+};
+
+static uchar perm2[48] = { 14, 17, 11, 24, 1, 5,
+	3, 28, 15, 6, 21, 10,
+	23, 19, 12, 4, 26, 8,
+	16, 7, 27, 20, 13, 2,
+	41, 52, 31, 37, 47, 55,
+	30, 40, 51, 45, 33, 48,
+	44, 49, 39, 56, 34, 53,
+	46, 42, 50, 36, 29, 32
+};
+
+static uchar perm3[64] = { 58, 50, 42, 34, 26, 18, 10, 2,
+	60, 52, 44, 36, 28, 20, 12, 4,
+	62, 54, 46, 38, 30, 22, 14, 6,
+	64, 56, 48, 40, 32, 24, 16, 8,
+	57, 49, 41, 33, 25, 17, 9, 1,
+	59, 51, 43, 35, 27, 19, 11, 3,
+	61, 53, 45, 37, 29, 21, 13, 5,
+	63, 55, 47, 39, 31, 23, 15, 7
+};
+
+static uchar perm4[48] = { 32, 1, 2, 3, 4, 5,
+	4, 5, 6, 7, 8, 9,
+	8, 9, 10, 11, 12, 13,
+	12, 13, 14, 15, 16, 17,
+	16, 17, 18, 19, 20, 21,
+	20, 21, 22, 23, 24, 25,
+	24, 25, 26, 27, 28, 29,
+	28, 29, 30, 31, 32, 1
+};
+
+static uchar perm5[32] = { 16, 7, 20, 21,
+	29, 12, 28, 17,
+	1, 15, 23, 26,
+	5, 18, 31, 10,
+	2, 8, 24, 14,
+	32, 27, 3, 9,
+	19, 13, 30, 6,
+	22, 11, 4, 25
+};
+
+static uchar perm6[64] = { 40, 8, 48, 16, 56, 24, 64, 32,
+	39, 7, 47, 15, 55, 23, 63, 31,
+	38, 6, 46, 14, 54, 22, 62, 30,
+	37, 5, 45, 13, 53, 21, 61, 29,
+	36, 4, 44, 12, 52, 20, 60, 28,
+	35, 3, 43, 11, 51, 19, 59, 27,
+	34, 2, 42, 10, 50, 18, 58, 26,
+	33, 1, 41, 9, 49, 17, 57, 25
+};
+
+static uchar sc[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 };
+
+static uchar sbox[8][4][16] = {
+	{{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7},
+	 {0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8},
+	 {4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0},
+	 {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13}},
+
+	{{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10},
+	 {3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5},
+	 {0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15},
+	 {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9}},
+
+	{{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8},
+	 {13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1},
+	 {13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7},
+	 {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12}},
+
+	{{7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15},
+	 {13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9},
+	 {10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4},
+	 {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14}},
+
+	{{2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9},
+	 {14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6},
+	 {4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14},
+	 {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3}},
+
+	{{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11},
+	 {10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8},
+	 {9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6},
+	 {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13}},
+
+	{{4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1},
+	 {13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6},
+	 {1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2},
+	 {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12}},
+
+	{{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7},
+	 {1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2},
+	 {7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8},
+	 {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11}}
+};
+
+static void
+permute(char *out, char *in, uchar * p, int n)
+{
+	int i;
+	for (i = 0; i < n; i++)
+		out[i] = in[p[i] - 1];
+}
+
+static void
+lshift(char *d, int count, int n)
+{
+	char out[64];
+	int i;
+	for (i = 0; i < n; i++)
+		out[i] = d[(i + count) % n];
+	for (i = 0; i < n; i++)
+		d[i] = out[i];
+}
+
+static void
+concat(char *out, char *in1, char *in2, int l1, int l2)
+{
+	while (l1--)
+		*out++ = *in1++;
+	while (l2--)
+		*out++ = *in2++;
+}
+
+static void
+xor(char *out, char *in1, char *in2, int n)
+{
+	int i;
+	for (i = 0; i < n; i++)
+		out[i] = in1[i] ^ in2[i];
+}
+
+static void
+dohash(char *out, char *in, char *key, int forw)
+{
+	int i, j, k;
+	char *pk1;
+	char c[28];
+	char d[28];
+	char *cd;
+	char ki[16][48];
+	char *pd1;
+	char l[32], r[32];
+	char *rl;
+
+	/* Have to reduce stack usage */
+	pk1 = kmalloc(56+56+64+64,GFP_KERNEL);
+	if(pk1 == NULL)
+		return;
+
+	cd = pk1 + 56;
+	pd1= cd  + 56;
+	rl = pd1 + 64;
+
+	permute(pk1, key, perm1, 56);
+
+	for (i = 0; i < 28; i++)
+		c[i] = pk1[i];
+	for (i = 0; i < 28; i++)
+		d[i] = pk1[i + 28];
+
+	for (i = 0; i < 16; i++) {
+		lshift(c, sc[i], 28);
+		lshift(d, sc[i], 28);
+
+		concat(cd, c, d, 28, 28);
+		permute(ki[i], cd, perm2, 48);
+	}
+
+	permute(pd1, in, perm3, 64);
+
+	for (j = 0; j < 32; j++) {
+		l[j] = pd1[j];
+		r[j] = pd1[j + 32];
+	}
+
+	for (i = 0; i < 16; i++) {
+		char *er;  /* er[48]  */
+		char *erk; /* erk[48] */
+		char b[8][6];
+		char *cb;  /* cb[32]  */
+		char *pcb; /* pcb[32] */
+		char *r2;  /* r2[32]  */
+
+		er = kmalloc(48+48+32+32+32, GFP_KERNEL);
+		if(er == NULL) {
+			kfree(pk1);
+			return;
+		}
+		erk = er+48;
+		cb  = erk+48;
+		pcb = cb+32;
+		r2  = pcb+32;
+
+		permute(er, r, perm4, 48);
+
+		xor(erk, er, ki[forw ? i : 15 - i], 48);
+
+		for (j = 0; j < 8; j++)
+			for (k = 0; k < 6; k++)
+				b[j][k] = erk[j * 6 + k];
+
+		for (j = 0; j < 8; j++) {
+			int m, n;
+			m = (b[j][0] << 1) | b[j][5];
+
+			n = (b[j][1] << 3) | (b[j][2] << 2) | (b[j][3] <<
+							       1) | b[j][4];
+
+			for (k = 0; k < 4; k++)
+				b[j][k] =
+				    (sbox[j][m][n] & (1 << (3 - k))) ? 1 : 0;
+		}
+
+		for (j = 0; j < 8; j++)
+			for (k = 0; k < 4; k++)
+				cb[j * 4 + k] = b[j][k];
+		permute(pcb, cb, perm5, 32);
+
+		xor(r2, l, pcb, 32);
+
+		for (j = 0; j < 32; j++)
+			l[j] = r[j];
+
+		for (j = 0; j < 32; j++)
+			r[j] = r2[j];
+
+		kfree(er);
+	}
+
+	concat(rl, r, l, 32, 32);
+
+	permute(out, rl, perm6, 64);
+	kfree(pk1);
+}
+
+static void
+str_to_key(unsigned char *str, unsigned char *key)
+{
+	int i;
+
+	key[0] = str[0] >> 1;
+	key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
+	key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
+	key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
+	key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
+	key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
+	key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
+	key[7] = str[6] & 0x7F;
+	for (i = 0; i < 8; i++) {
+		key[i] = (key[i] << 1);
+	}
+}
+
+static void
+smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw)
+{
+	int i;
+	char *outb; /* outb[64] */
+	char *inb;  /* inb[64]  */
+	char *keyb; /* keyb[64] */
+	unsigned char key2[8];
+
+	outb = kmalloc(64 * 3,GFP_KERNEL);
+	if(outb == NULL)
+		return;
+
+	inb  = outb + 64;
+	keyb = inb +  64;
+
+	str_to_key(key, key2);
+
+	for (i = 0; i < 64; i++) {
+		inb[i] = (in[i / 8] & (1 << (7 - (i % 8)))) ? 1 : 0;
+		keyb[i] = (key2[i / 8] & (1 << (7 - (i % 8)))) ? 1 : 0;
+		outb[i] = 0;
+	}
+
+	dohash(outb, inb, keyb, forw);
+
+	for (i = 0; i < 8; i++) {
+		out[i] = 0;
+	}
+
+	for (i = 0; i < 64; i++) {
+		if (outb[i])
+			out[i / 8] |= (1 << (7 - (i % 8)));
+	}
+	kfree(outb);
+}
+
+void
+E_P16(unsigned char *p14, unsigned char *p16)
+{
+	unsigned char sp8[8] =
+	    { 0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25 };
+	smbhash(p16, sp8, p14, 1);
+	smbhash(p16 + 8, sp8, p14 + 7, 1);
+}
+
+void
+E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24)
+{
+	smbhash(p24, c8, p21, 1);
+	smbhash(p24 + 8, c8, p21 + 7, 1);
+	smbhash(p24 + 16, c8, p21 + 14, 1);
+}
+
+void
+D_P16(unsigned char *p14, unsigned char *in, unsigned char *out)
+{
+	smbhash(out, in, p14, 0);
+	smbhash(out + 8, in + 8, p14 + 7, 0);
+}
+
+void
+E_old_pw_hash(unsigned char *p14, unsigned char *in, unsigned char *out)
+{
+	smbhash(out, in, p14, 1);
+	smbhash(out + 8, in + 8, p14 + 7, 1);
+}
+#if 0
+/* these routines are currently unneeded, but may be
+	needed later */
+void
+cred_hash1(unsigned char *out, unsigned char *in, unsigned char *key)
+{
+	unsigned char buf[8];
+
+	smbhash(buf, in, key, 1);
+	smbhash(out, buf, key + 9, 1);
+}
+
+void
+cred_hash2(unsigned char *out, unsigned char *in, unsigned char *key)
+{
+	unsigned char buf[8];
+	static unsigned char key2[8];
+
+	smbhash(buf, in, key, 1);
+	key2[0] = key[7];
+	smbhash(out, buf, key2, 1);
+}
+
+void
+cred_hash3(unsigned char *out, unsigned char *in, unsigned char *key, int forw)
+{
+	static unsigned char key2[8];
+
+	smbhash(out, in, key, forw);
+	key2[0] = key[7];
+	smbhash(out + 8, in + 8, key2, forw);
+}
+#endif /* unneeded routines */
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
new file mode 100644
index 0000000..6103bcd
--- /dev/null
+++ b/fs/cifs/smbencrypt.c
@@ -0,0 +1,285 @@
+/* 
+   Unix SMB/Netbios implementation.
+   Version 1.9.
+   SMB parameters and setup
+   Copyright (C) Andrew Tridgell 1992-2000
+   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+   Modified by Jeremy Allison 1995.
+   Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003
+   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
+   
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+   
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+   
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include "cifs_unicode.h"
+#include "cifspdu.h"
+#include "md5.h"
+#include "cifs_debug.h"
+#include "cifsencrypt.h"
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+/* following came from the other byteorder.h to avoid include conflicts */
+#define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
+#define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
+#define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
+
+/*The following definitions come from  libsmb/smbencrypt.c  */
+
+void SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+void E_md4hash(const unsigned char *passwd, unsigned char *p16);
+void nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16]);
+static void SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8,
+		   unsigned char p24[24]);
+void NTLMSSPOWFencrypt(unsigned char passwd[8],
+		       unsigned char *ntlmchalresp, unsigned char p24[24]);
+void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+
+/*
+   This implements the X/Open SMB password encryption
+   It takes a password, a 8 byte "crypt key" and puts 24 bytes of 
+   encrypted password into p24 */
+/* Note that password must be uppercased and null terminated */
+void
+SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
+{
+	unsigned char p14[15], p21[21];
+
+	memset(p21, '\0', 21);
+	memset(p14, '\0', 14);
+	strncpy((char *) p14, (char *) passwd, 14);
+
+/*	strupper((char *)p14); *//* BB at least uppercase the easy range */
+	E_P16(p14, p21);
+
+	SMBOWFencrypt(p21, c8, p24);
+	
+	memset(p14,0,15);
+	memset(p21,0,21);
+}
+
+/* Routines for Windows NT MD4 Hash functions. */
+static int
+_my_wcslen(__u16 * str)
+{
+	int len = 0;
+	while (*str++ != 0)
+		len++;
+	return len;
+}
+
+/*
+ * Convert a string into an NT UNICODE string.
+ * Note that regardless of processor type 
+ * this must be in intel (little-endian)
+ * format.
+ */
+
+static int
+_my_mbstowcs(__u16 * dst, const unsigned char *src, int len)
+{				/* not a very good conversion routine - change/fix */
+	int i;
+	__u16 val;
+
+	for (i = 0; i < len; i++) {
+		val = *src;
+		SSVAL(dst, 0, val);
+		dst++;
+		src++;
+		if (val == 0)
+			break;
+	}
+	return i;
+}
+
+/* 
+ * Creates the MD4 Hash of the users password in NT UNICODE.
+ */
+
+void
+E_md4hash(const unsigned char *passwd, unsigned char *p16)
+{
+	int len;
+	__u16 wpwd[129];
+
+	/* Password cannot be longer than 128 characters */
+	if(passwd) {
+		len = strlen((char *) passwd);
+		if (len > 128) {
+			len = 128;
+		}
+		/* Password must be converted to NT unicode */
+		_my_mbstowcs(wpwd, passwd, len);
+	} else
+		len = 0;
+
+	wpwd[len] = 0;	/* Ensure string is null terminated */
+	/* Calculate length in bytes */
+	len = _my_wcslen(wpwd) * sizeof (__u16);
+
+	mdfour(p16, (unsigned char *) wpwd, len);
+	memset(wpwd,0,129 * 2);
+}
+
+/* Does both the NT and LM owfs of a user's password */
+void
+nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
+{
+	char passwd[514];
+
+	memset(passwd, '\0', 514);
+	if (strlen(pwd) < 513)
+		strcpy(passwd, pwd);
+	else
+		memcpy(passwd, pwd, 512);
+	/* Calculate the MD4 hash (NT compatible) of the password */
+	memset(nt_p16, '\0', 16);
+	E_md4hash(passwd, nt_p16);
+
+	/* Mangle the passwords into Lanman format */
+	passwd[14] = '\0';
+/*	strupper(passwd); */
+
+	/* Calculate the SMB (lanman) hash functions of the password */
+
+	memset(p16, '\0', 16);
+	E_P16((unsigned char *) passwd, (unsigned char *) p16);
+
+	/* clear out local copy of user's password (just being paranoid). */
+	memset(passwd, '\0', sizeof (passwd));
+}
+
+/* Does the NTLMv2 owfs of a user's password */
+#if 0  /* function not needed yet - but will be soon */
+static void
+ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
+		const char *domain_n, unsigned char kr_buf[16],
+		const struct nls_table *nls_codepage)
+{
+	wchar_t * user_u;
+	wchar_t * dom_u;
+	int user_l, domain_l;
+	struct HMACMD5Context ctx;
+
+	/* might as well do one alloc to hold both (user_u and dom_u) */
+	user_u = kmalloc(2048 * sizeof(wchar_t),GFP_KERNEL); 
+	if(user_u == NULL)
+		return;
+	dom_u = user_u + 1024;
+    
+	/* push_ucs2(NULL, user_u, user_n, (user_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
+	   push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
+
+	/* BB user and domain may need to be uppercased */
+	user_l = cifs_strtoUCS(user_u, user_n, 511, nls_codepage);
+	domain_l = cifs_strtoUCS(dom_u, domain_n, 511, nls_codepage);
+
+	user_l++;		/* trailing null */
+	domain_l++;
+
+	hmac_md5_init_limK_to_64(owf, 16, &ctx);
+	hmac_md5_update((const unsigned char *) user_u, user_l * 2, &ctx);
+	hmac_md5_update((const unsigned char *) dom_u, domain_l * 2, &ctx);
+	hmac_md5_final(kr_buf, &ctx);
+
+	kfree(user_u);
+}
+#endif 
+
+/* Does the des encryption from the NT or LM MD4 hash. */
+static void
+SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8,
+	      unsigned char p24[24])
+{
+	unsigned char p21[21];
+
+	memset(p21, '\0', 21);
+
+	memcpy(p21, passwd, 16);
+	E_P24(p21, c8, p24);
+}
+
+/* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
+void
+NTLMSSPOWFencrypt(unsigned char passwd[8],
+		  unsigned char *ntlmchalresp, unsigned char p24[24])
+{
+	unsigned char p21[21];
+
+	memset(p21, '\0', 21);
+	memcpy(p21, passwd, 8);
+	memset(p21 + 8, 0xbd, 8);
+
+	E_P24(p21, ntlmchalresp, p24);
+}
+
+/* Does the NT MD4 hash then des encryption. */
+
+void
+SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
+{
+	unsigned char p21[21];
+
+	memset(p21, '\0', 21);
+
+	E_md4hash(passwd, p21);
+	SMBOWFencrypt(p21, c8, p24);
+}
+
+
+/* Does the md5 encryption from the NT hash for NTLMv2. */
+/* These routines will be needed later */
+#if 0
+static void
+SMBOWFencrypt_ntv2(const unsigned char kr[16],
+                   const struct data_blob * srv_chal,
+                   const struct data_blob * cli_chal, unsigned char resp_buf[16])
+{
+        struct HMACMD5Context ctx;
+
+        hmac_md5_init_limK_to_64(kr, 16, &ctx);
+        hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
+        hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
+        hmac_md5_final(resp_buf, &ctx);
+}
+
+static void
+SMBsesskeygen_ntv2(const unsigned char kr[16],
+		   const unsigned char *nt_resp, __u8 sess_key[16])
+{
+	struct HMACMD5Context ctx;
+
+	hmac_md5_init_limK_to_64(kr, 16, &ctx);
+	hmac_md5_update(nt_resp, 16, &ctx);
+	hmac_md5_final((unsigned char *) sess_key, &ctx);
+}
+
+static void
+SMBsesskeygen_ntv1(const unsigned char kr[16],
+		   const unsigned char *nt_resp, __u8 sess_key[16])
+{
+	mdfour((unsigned char *) sess_key, (unsigned char *) kr, 16);
+}
+#endif
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
new file mode 100644
index 0000000..e21f138
--- /dev/null
+++ b/fs/cifs/smberr.h
@@ -0,0 +1,115 @@
+/*
+ *   fs/cifs/smberr.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   See Error Codes section of the SNIA CIFS Specification 
+ *   for more information 
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#define SUCCESS 0		/* The request was successful. */
+#define ERRDOS 0x01		/* Error is from the core DOS operating system set */
+#define ERRSRV 0x02		/* Error is generated by the file server daemon */
+#define ERRHRD 0x03		/* Error is a hardware error. */
+#define ERRCMD 0xFF		/*  Command was not in the "SMB" format. */
+
+/* The following error codes may be generated with the SUCCESS error class.*/
+
+#define SUCCESS 0		/* The request was successful. */
+
+/* The following error codes may be generated with the ERRDOS error class.*/
+
+#define ERRbadfunc 1		/* Invalid function. The server did not recognize or could not perform a system call generated by the server, e.g., set the DIRECTORY attribute on a data file, invalid seek mode. */
+#define ERRbadfile 2		/*File not found. The last component of a file's pathname could not be found. */
+#define ERRbadpath 3		/* Directory invalid. A directory component in a pathname could not be found. */
+#define ERRnofids 4		/* Too many open files. The server has no file handles available. */
+#define ERRnoaccess 5		/* Access denied, the client's context does not permit the requested function. This includes the following conditions: invalid rename command, write to Fid open for read only, read on Fid open for write only, attempt to delete a non-empty directory */
+#define ERRbadfid 6		/* Invalid file handle. The file handle specified was not recognized by the server. */
+#define ERRbadmcb 7		/* Memory control blocks destroyed. */
+#define ERRnomem 8		/* Insufficient server memory to perform the requested function. */
+#define ERRbadmem 9		/* Invalid memory block address. */
+#define ERRbadenv 10		/* Invalid environment. */
+#define ERRbadformat 11		/* Invalid format. */
+#define ERRbadaccess 12		/* Invalid open mode. */
+#define ERRbaddata 13		/* Invalid data (generated only by IOCTL calls within the server). */
+#define ERRbaddrive 15		/* Invalid drive specified. */
+#define ERRremcd 16		/* A Delete Directory request attempted to remove the server's current directory. */
+#define ERRdiffdevice 17	/* Not same device (e.g., a cross volume rename was attempted */
+#define ERRnofiles 18		/* A File Search command can find no more files matching the specified criteria. */
+#define ERRgeneral 31
+#define ERRbadshare 32		/* The sharing mode specified for an Open conflicts with existing FIDs on the file. */
+#define ERRlock 33		/* A Lock request conflicted with an existing lock or specified an invalid mode, or an Unlock requested attempted to remove a lock held by another process. */
+#define ERRunsup     50
+#define ERRnosuchshare 67
+#define ERRfilexists 80		/* The file named in the request already exists. */
+#define ERRinvparm   87
+#define ERRdiskfull  112
+#define ERRinvname   123
+#define ERRinvlevel  124
+#define ERRdirnotempty 145
+#define ERRnotlocked   158
+#define ERRalreadyexists 183
+#define ERRbadpipe 230
+#define ERRpipebusy 231
+#define ERRpipeclosing 232
+#define ERRnotconnected 233
+#define ERRmoredata    234
+#define ERReasnotsupported 282
+#define ErrQuota 0x200		/* The operation would cause a quota limit to be exceeded. */
+#define ErrNotALink 0x201	/* A link operation was performed on a pathname that
+				   was not a link. */
+
+/* Following error codes may be generated with the ERRSRV error
+class.*/
+
+#define ERRerror 1		/* Non-specific error code. It is returned under the following conditions: resource other than disk space exhausted (e.g. TIDs), first SMB command was not negotiate, multiple negotiates attempted, and internal server error. */
+#define ERRbadpw 2		/* Bad password - name/password pair in a TreeConnect or Session Setup are invalid. */
+#define ERRbadtype 3		/* used for indicating DFS referral needed */
+#define ERRaccess 4		/* The client does not have the necessary access rights within the specified context for requested function. */
+#define ERRinvtid 5		/* The Tid specified in a command was invalid. */
+#define ERRinvnetname 6		/* Invalid network name in tree connect. */
+#define ERRinvdevice 7		/* Invalid device - printer request made to non-printer connection or non-printer request made to printer connection. */
+#define ERRqfull 49		/* Print queue full (files) -- returned by open print file. */
+#define ERRqtoobig 50		/* Print queue full -- no space. */
+#define ERRqeof         51	/* EOF on print queue dump */
+#define ERRinvpfid      52	/* Invalid print file FID. */
+#define ERRsmbcmd       64	/* The server did not recognize the command received. */
+#define ERRsrverror     65	/* The server encountered an internal error, e.g., system file unavailable. */
+#define ERRbadBID       66	/* (obsolete) */
+#define ERRfilespecs    67	/* The Fid and pathname parameters contained an invalid combination of values. */
+#define ERRbadLink      68	/* (obsolete) */
+#define ERRbadpermits   69	/* The access permissions specified for a file or directory are not a valid combination. */
+#define ERRbadPID       70
+#define ERRsetattrmode  71	/* attribute (mode) is invalid */
+#define ERRpaused       81	/* Server is paused */
+#define ERRmsgoff	82	/* reserved - messaging off */
+#define ERRnoroom       83	/* reserved - no room for message */
+#define ERRrmuns        87	/* reserved - too many remote names */
+#define ERRtimeout      88	/* operation timed out */
+#define ERRnoresource   89	/* No resources available for request */
+#define ERRtoomanyuids  90	/* Too many UIDs active on this session */
+#define ERRbaduid       91	/* The UID is not known as a valid user */
+#define ERRusempx      250	/* temporarily unable to use raw */
+#define ERRusestd      251	/* temporarily unable to use either raw or mpx */
+#define ERR_NOTIFY_ENUM_DIR 1024
+#define ERRaccountexpired 2239
+#define ERRbadclient      2240
+#define ERRbadLogonTime   2241
+#define ERRpasswordExpired 2242
+#define ERRnetlogonNotStarted 2455
+#define ERRnosupport       0xFFFF
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
new file mode 100644
index 0000000..af13e52
--- /dev/null
+++ b/fs/cifs/transport.c
@@ -0,0 +1,619 @@
+/*
+ *   fs/cifs/transport.c
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ */
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <linux/mempool.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+  
+extern mempool_t *cifs_mid_poolp;
+extern kmem_cache_t *cifs_oplock_cachep;
+
+static struct mid_q_entry *
+AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
+{
+	struct mid_q_entry *temp;
+
+	if (ses == NULL) {
+		cERROR(1, ("Null session passed in to AllocMidQEntry "));
+		return NULL;
+	}
+	if (ses->server == NULL) {
+		cERROR(1, ("Null TCP session in AllocMidQEntry"));
+		return NULL;
+	}
+	
+	temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,SLAB_KERNEL | SLAB_NOFS);
+	if (temp == NULL)
+		return temp;
+	else {
+		memset(temp, 0, sizeof (struct mid_q_entry));
+		temp->mid = smb_buffer->Mid;	/* always LE */
+		temp->pid = current->pid;
+		temp->command = smb_buffer->Command;
+		cFYI(1, ("For smb_command %d", temp->command));
+		do_gettimeofday(&temp->when_sent);
+		temp->ses = ses;
+		temp->tsk = current;
+	}
+
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&temp->qhead, &ses->server->pending_mid_q);
+	atomic_inc(&midCount);
+	temp->midState = MID_REQUEST_ALLOCATED;
+	spin_unlock(&GlobalMid_Lock);
+	return temp;
+}
+
+static void
+DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
+	spin_lock(&GlobalMid_Lock);
+	midEntry->midState = MID_FREE;
+	list_del(&midEntry->qhead);
+	atomic_dec(&midCount);
+	spin_unlock(&GlobalMid_Lock);
+	cifs_buf_release(midEntry->resp_buf);
+	mempool_free(midEntry, cifs_mid_poolp);
+}
+
+struct oplock_q_entry *
+AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
+{
+	struct oplock_q_entry *temp;
+	if ((pinode== NULL) || (tcon == NULL)) {
+		cERROR(1, ("Null parms passed to AllocOplockQEntry"));
+		return NULL;
+	}
+	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
+						       SLAB_KERNEL);
+	if (temp == NULL)
+		return temp;
+	else {
+		temp->pinode = pinode;
+		temp->tcon = tcon;
+		temp->netfid = fid;
+		spin_lock(&GlobalMid_Lock);
+		list_add_tail(&temp->qhead, &GlobalOplock_Q);
+		spin_unlock(&GlobalMid_Lock);
+	}
+	return temp;
+
+}
+
+void DeleteOplockQEntry(struct oplock_q_entry * oplockEntry)
+{
+	spin_lock(&GlobalMid_Lock); 
+    /* should we check if list empty first? */
+	list_del(&oplockEntry->qhead);
+	spin_unlock(&GlobalMid_Lock);
+	kmem_cache_free(cifs_oplock_cachep, oplockEntry);
+}
+
+int
+smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
+	 unsigned int smb_buf_length, struct sockaddr *sin)
+{
+	int rc = 0;
+	int i = 0;
+	struct msghdr smb_msg;
+	struct kvec iov;
+	unsigned len = smb_buf_length + 4;
+
+	if(ssocket == NULL)
+		return -ENOTSOCK; /* BB eventually add reconnect code here */
+	iov.iov_base = smb_buffer;
+	iov.iov_len = len;
+
+	smb_msg.msg_name = sin;
+	smb_msg.msg_namelen = sizeof (struct sockaddr);
+	smb_msg.msg_control = NULL;
+	smb_msg.msg_controllen = 0;
+	smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
+
+	/* smb header is converted in header_assemble. bcc and rest of SMB word
+	   area, and byte area if necessary, is converted to littleendian in 
+	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send 
+	   Flags2 is converted in SendReceive */
+
+	smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
+	cFYI(1, ("Sending smb of length %d ", smb_buf_length));
+	dump_smb(smb_buffer, len);
+
+	while (len > 0) {
+		rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, len);
+		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
+			i++;
+			if(i > 60) {
+				cERROR(1,
+				   ("sends on sock %p stuck for 30 seconds",
+				    ssocket));
+				rc = -EAGAIN;
+				break;
+			}
+			msleep(500);
+			continue;
+		}
+		if (rc < 0) 
+			break;
+		iov.iov_base += rc;
+		iov.iov_len -= rc;
+		len -= rc;
+	}
+
+	if (rc < 0) {
+		cERROR(1,("Error %d sending data on socket to server.", rc));
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+#ifdef CIFS_EXPERIMENTAL
+/* BB finish off this function, adding support for writing set of pages as iovec */
+/* and also adding support for operations that need to parse the response smb    */
+
+int
+smb_sendv(struct socket *ssocket, struct smb_hdr *smb_buffer,
+	 unsigned int smb_buf_length, struct kvec * write_vector /* page list */, struct sockaddr *sin)
+{
+	int rc = 0;
+	int i = 0;
+	struct msghdr smb_msg;
+	number_of_pages += 1; /* account for SMB header */
+	struct kvec * piov  = kmalloc(number_of_pages * sizeof(struct kvec));
+	if(i=0;i<num_pages-1;i++
+	unsigned len = smb_buf_length + 4;
+
+	if(ssocket == NULL)
+		return -ENOTSOCK; /* BB eventually add reconnect code here */
+	iov.iov_base = smb_buffer;
+	iov.iov_len = len;
+
+	smb_msg.msg_name = sin;
+	smb_msg.msg_namelen = sizeof (struct sockaddr);
+	smb_msg.msg_control = NULL;
+	smb_msg.msg_controllen = 0;
+	smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
+
+	/* smb header is converted in header_assemble. bcc and rest of SMB word
+	   area, and byte area if necessary, is converted to littleendian in 
+	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send 
+	   Flags2 is converted in SendReceive */
+
+	smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
+	cFYI(1, ("Sending smb of length %d ", smb_buf_length));
+	dump_smb(smb_buffer, len);
+
+	while (len > 0) {
+		rc = kernel_sendmsg(ssocket, &smb_msg, &iov, number_of_pages, len?);
+		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
+			i++;
+			if(i > 60) {
+				cERROR(1,
+				   ("sends on sock %p stuck for 30 seconds",
+				    ssocket));
+				rc = -EAGAIN;
+				break;
+			}
+			msleep(500);
+			continue;
+		}
+		if (rc < 0) 
+			break;
+		iov.iov_base += rc;
+		iov.iov_len -= rc;
+		len -= rc;
+	}
+
+	if (rc < 0) {
+		cERROR(1,("Error %d sending data on socket to server.", rc));
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+
+int
+CIFSSendRcv(const unsigned int xid, struct cifsSesInfo *ses,
+	    struct smb_hdr *in_buf, struct kvec * write_vector /* page list */, int *pbytes_returned, const int long_op)
+{
+	int rc = 0;
+	unsigned long timeout = 15 * HZ;
+	struct mid_q_entry *midQ = NULL;
+
+	if (ses == NULL) {
+		cERROR(1,("Null smb session"));
+		return -EIO;
+	}
+	if(ses->server == NULL) {
+		cERROR(1,("Null tcp session"));
+		return -EIO;
+	}
+	if(pbytes_returned == NULL)
+		return -EIO;
+	else
+		*pbytes_returned = 0;
+
+  
+
+	/* Ensure that we do not send more than 50 overlapping requests 
+	   to the same server. We may make this configurable later or
+	   use ses->maxReq */
+	if(long_op == -1) {
+		/* oplock breaks must not be held up */
+		atomic_inc(&ses->server->inFlight);
+	} else {
+		spin_lock(&GlobalMid_Lock); 
+		while(1) {        
+			if(atomic_read(&ses->server->inFlight) >= cifs_max_pending){
+				spin_unlock(&GlobalMid_Lock);
+				wait_event(ses->server->request_q,
+					atomic_read(&ses->server->inFlight)
+					 < cifs_max_pending);
+				spin_lock(&GlobalMid_Lock);
+			} else {
+				if(ses->server->tcpStatus == CifsExiting) {
+					spin_unlock(&GlobalMid_Lock);
+					return -ENOENT;
+				}
+
+			/* can not count locking commands against total since
+			   they are allowed to block on server */
+					
+				if(long_op < 3) {
+				/* update # of requests on the wire to server */
+					atomic_inc(&ses->server->inFlight);
+				}
+				spin_unlock(&GlobalMid_Lock);
+				break;
+			}
+		}
+	}
+	/* make sure that we sign in the same order that we send on this socket 
+	   and avoid races inside tcp sendmsg code that could cause corruption
+	   of smb data */
+
+	down(&ses->server->tcpSem); 
+
+	if (ses->server->tcpStatus == CifsExiting) {
+		rc = -ENOENT;
+		goto cifs_out_label;
+	} else if (ses->server->tcpStatus == CifsNeedReconnect) {
+		cFYI(1,("tcp session dead - return to caller to retry"));
+		rc = -EAGAIN;
+		goto cifs_out_label;
+	} else if (ses->status != CifsGood) {
+		/* check if SMB session is bad because we are setting it up */
+		if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 
+			(in_buf->Command != SMB_COM_NEGOTIATE)) {
+			rc = -EAGAIN;
+			goto cifs_out_label;
+		} /* else ok - we are setting up session */
+	}
+	midQ = AllocMidQEntry(in_buf, ses);
+	if (midQ == NULL) {
+		up(&ses->server->tcpSem);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return -ENOMEM;
+	}
+
+	if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+		up(&ses->server->tcpSem);
+		cERROR(1,
+		       ("Illegal length, greater than maximum frame, %d ",
+			in_buf->smb_buf_length));
+		DeleteMidQEntry(midQ);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return -EIO;
+	}
+
+	/* BB can we sign efficiently in this path? */
+	rc = cifs_sign_smb(in_buf, ses, &midQ->sequence_number);
+
+	midQ->midState = MID_REQUEST_SUBMITTED;
+/*	rc = smb_sendv(ses->server->ssocket, in_buf, in_buf->smb_buf_length, piovec,
+		      (struct sockaddr *) &(ses->server->addr.sockAddr));*/
+	if(rc < 0) {
+		DeleteMidQEntry(midQ);
+		up(&ses->server->tcpSem);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return rc;
+	} else
+		up(&ses->server->tcpSem);
+cifs_out_label:
+	if(midQ)
+	        DeleteMidQEntry(midQ);
+                                                                                                                           
+	if(long_op < 3) {
+		atomic_dec(&ses->server->inFlight);
+		wake_up(&ses->server->request_q);
+	}
+
+	return rc;
+}
+
+
+#endif /* CIFS_EXPERIMENTAL */
+
+int
+SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
+	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
+	    int *pbytes_returned, const int long_op)
+{
+	int rc = 0;
+	unsigned int receive_len;
+	unsigned long timeout;
+	struct mid_q_entry *midQ;
+
+	if (ses == NULL) {
+		cERROR(1,("Null smb session"));
+		return -EIO;
+	}
+	if(ses->server == NULL) {
+		cERROR(1,("Null tcp session"));
+		return -EIO;
+	}
+
+	/* Ensure that we do not send more than 50 overlapping requests 
+	   to the same server. We may make this configurable later or
+	   use ses->maxReq */
+	if(long_op == -1) {
+		/* oplock breaks must not be held up */
+		atomic_inc(&ses->server->inFlight);
+	} else {
+		spin_lock(&GlobalMid_Lock); 
+		while(1) {        
+			if(atomic_read(&ses->server->inFlight) >= cifs_max_pending){
+				spin_unlock(&GlobalMid_Lock);
+				wait_event(ses->server->request_q,
+					atomic_read(&ses->server->inFlight)
+					 < cifs_max_pending);
+				spin_lock(&GlobalMid_Lock);
+			} else {
+				if(ses->server->tcpStatus == CifsExiting) {
+					spin_unlock(&GlobalMid_Lock);
+					return -ENOENT;
+				}
+
+			/* can not count locking commands against total since
+			   they are allowed to block on server */
+					
+				if(long_op < 3) {
+				/* update # of requests on the wire to server */
+					atomic_inc(&ses->server->inFlight);
+				}
+				spin_unlock(&GlobalMid_Lock);
+				break;
+			}
+		}
+	}
+	/* make sure that we sign in the same order that we send on this socket 
+	   and avoid races inside tcp sendmsg code that could cause corruption
+	   of smb data */
+
+	down(&ses->server->tcpSem); 
+
+	if (ses->server->tcpStatus == CifsExiting) {
+		rc = -ENOENT;
+		goto out_unlock;
+	} else if (ses->server->tcpStatus == CifsNeedReconnect) {
+		cFYI(1,("tcp session dead - return to caller to retry"));
+		rc = -EAGAIN;
+		goto out_unlock;
+	} else if (ses->status != CifsGood) {
+		/* check if SMB session is bad because we are setting it up */
+		if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 
+			(in_buf->Command != SMB_COM_NEGOTIATE)) {
+			rc = -EAGAIN;
+			goto out_unlock;
+		} /* else ok - we are setting up session */
+	}
+	midQ = AllocMidQEntry(in_buf, ses);
+	if (midQ == NULL) {
+		up(&ses->server->tcpSem);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return -ENOMEM;
+	}
+
+	if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+		up(&ses->server->tcpSem);
+		cERROR(1,
+		       ("Illegal length, greater than maximum frame, %d ",
+			in_buf->smb_buf_length));
+		DeleteMidQEntry(midQ);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return -EIO;
+	}
+
+	rc = cifs_sign_smb(in_buf, ses, &midQ->sequence_number);
+
+	midQ->midState = MID_REQUEST_SUBMITTED;
+	rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
+		      (struct sockaddr *) &(ses->server->addr.sockAddr));
+	if(rc < 0) {
+		DeleteMidQEntry(midQ);
+		up(&ses->server->tcpSem);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return rc;
+	} else
+		up(&ses->server->tcpSem);
+	if (long_op == -1)
+		goto cifs_no_response_exit;
+	else if (long_op == 2) /* writes past end of file can take looooong time */
+		timeout = 300 * HZ;
+	else if (long_op == 1)
+		timeout = 45 * HZ; /* should be greater than 
+			servers oplock break timeout (about 43 seconds) */
+	else if (long_op > 2) {
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	} else
+		timeout = 15 * HZ;
+	/* wait for 15 seconds or until woken up due to response arriving or 
+	   due to last connection to this server being unmounted */
+	if (signal_pending(current)) {
+		/* if signal pending do not hold up user for full smb timeout
+		but we still give response a change to complete */
+		timeout = 2 * HZ;
+	}   
+
+	/* No user interrupts in wait - wreaks havoc with performance */
+	if(timeout != MAX_SCHEDULE_TIMEOUT) {
+		timeout += jiffies;
+		wait_event(ses->server->response_q,
+			(!(midQ->midState & MID_REQUEST_SUBMITTED)) || 
+			time_after(jiffies, timeout) || 
+			((ses->server->tcpStatus != CifsGood) &&
+			 (ses->server->tcpStatus != CifsNew)));
+	} else {
+		wait_event(ses->server->response_q,
+			(!(midQ->midState & MID_REQUEST_SUBMITTED)) || 
+			((ses->server->tcpStatus != CifsGood) &&
+			 (ses->server->tcpStatus != CifsNew)));
+	}
+
+	spin_lock(&GlobalMid_Lock);
+	if (midQ->resp_buf) {
+		spin_unlock(&GlobalMid_Lock);
+		receive_len = be32_to_cpu(*(__be32 *)midQ->resp_buf);
+	} else {
+		cERROR(1,("No response buffer"));
+		if(midQ->midState == MID_REQUEST_SUBMITTED) {
+			if(ses->server->tcpStatus == CifsExiting)
+				rc = -EHOSTDOWN;
+			else {
+				ses->server->tcpStatus = CifsNeedReconnect;
+				midQ->midState = MID_RETRY_NEEDED;
+			}
+		}
+
+		if (rc != -EHOSTDOWN) {
+			if(midQ->midState == MID_RETRY_NEEDED) {
+				rc = -EAGAIN;
+				cFYI(1,("marking request for retry"));
+			} else {
+				rc = -EIO;
+			}
+		}
+		spin_unlock(&GlobalMid_Lock);
+		DeleteMidQEntry(midQ);
+		/* If not lock req, update # of requests on wire to server */
+		if(long_op < 3) {
+			atomic_dec(&ses->server->inFlight); 
+			wake_up(&ses->server->request_q);
+		}
+		return rc;
+	}
+  
+	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
+		cERROR(1,
+		       ("Frame too large received.  Length: %d  Xid: %d",
+			receive_len, xid));
+		rc = -EIO;
+	} else {		/* rcvd frame is ok */
+
+		if (midQ->resp_buf && out_buf
+		    && (midQ->midState == MID_RESPONSE_RECEIVED)) {
+			out_buf->smb_buf_length = receive_len;
+			memcpy((char *)out_buf + 4,
+			       (char *)midQ->resp_buf + 4,
+			       receive_len);
+
+			dump_smb(out_buf, 92);
+			/* convert the length into a more usable form */
+			if((receive_len > 24) &&
+			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) {
+				rc = cifs_verify_signature(out_buf, ses->mac_signing_key,midQ->sequence_number); /* BB fix BB */
+				if(rc)
+					cFYI(1,("Unexpected signature received from server"));
+			}
+
+			*pbytes_returned = out_buf->smb_buf_length;
+
+			/* BB special case reconnect tid and reconnect uid here? */
+			rc = map_smb_to_linux_error(out_buf);
+
+			/* convert ByteCount if necessary */
+			if (receive_len >=
+			    sizeof (struct smb_hdr) -
+			    4 /* do not count RFC1001 header */  +
+			    (2 * out_buf->WordCount) + 2 /* bcc */ )
+				BCC(out_buf) = le16_to_cpu(BCC(out_buf));
+		} else {
+			rc = -EIO;
+			cFYI(1,("Bad MID state? "));
+		}
+	}
+cifs_no_response_exit:
+	DeleteMidQEntry(midQ);
+
+	if(long_op < 3) {
+		atomic_dec(&ses->server->inFlight); 
+		wake_up(&ses->server->request_q);
+	}
+
+	return rc;
+
+out_unlock:
+	up(&ses->server->tcpSem);
+	/* If not lock req, update # of requests on wire to server */
+	if(long_op < 3) {
+		atomic_dec(&ses->server->inFlight); 
+		wake_up(&ses->server->request_q);
+	}
+
+	return rc;
+}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
new file mode 100644
index 0000000..549afa1
--- /dev/null
+++ b/fs/cifs/xattr.c
@@ -0,0 +1,334 @@
+/*
+ *   fs/cifs/xattr.c
+ *
+ *   Copyright (c) International Business Machines  Corp., 2003
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+
+#define MAX_EA_VALUE_SIZE 65535
+#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
+#define CIFS_XATTR_USER_PREFIX "user."
+#define CIFS_XATTR_SYSTEM_PREFIX "system."
+#define CIFS_XATTR_OS2_PREFIX "os2."
+#define CIFS_XATTR_SECURITY_PREFIX ".security"
+#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
+#define XATTR_TRUSTED_PREFIX_LEN  8
+#define XATTR_SECURITY_PREFIX_LEN 9
+/* BB need to add server (Samba e.g) support for security and trusted prefix */
+  
+
+
+int cifs_removexattr(struct dentry * direntry, const char * ea_name)
+{
+	int rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct super_block * sb;
+	char * full_path;
+                                                                                     
+	if(direntry == NULL)
+		return -EIO;
+	if(direntry->d_inode == NULL)
+		return -EIO;
+	sb = direntry->d_inode->i_sb;
+	if(sb == NULL)
+		return -EIO;
+	xid = GetXid();
+                                                                                     
+	cifs_sb = CIFS_SB(sb);
+	pTcon = cifs_sb->tcon;
+                                                                                     
+	down(&sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&sb->s_vfs_rename_sem);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	if(ea_name == NULL) {
+		cFYI(1,("Null xattr names not supported"));
+	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)
+		&& (strncmp(ea_name,CIFS_XATTR_OS2_PREFIX,4))) {
+		cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+		/* BB what if no namespace prefix? */
+		/* Should we just pass them to server, except for
+		system and perhaps security prefixes? */
+	} else {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+			goto remove_ea_exit;
+
+		ea_name+=5; /* skip past user. prefix */
+		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
+			(__u16)0, cifs_sb->local_nls);
+	}
+remove_ea_exit:
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+#endif
+	return rc;
+}
+
+int cifs_setxattr(struct dentry * direntry, const char * ea_name,
+        const void * ea_value, size_t value_size, int flags)
+{
+	int rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct super_block * sb;
+	char * full_path;
+
+	if(direntry == NULL)
+		return -EIO;
+	if(direntry->d_inode == NULL)
+		return -EIO;
+	sb = direntry->d_inode->i_sb;
+	if(sb == NULL)
+		return -EIO;
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&sb->s_vfs_rename_sem);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	/* return dos attributes as pseudo xattr */
+	/* return alt name if available as pseudo attr */
+
+	/* if proc/fs/cifs/streamstoxattr is set then
+		search server for EAs or streams to 
+		returns as xattrs */
+	if(value_size > MAX_EA_VALUE_SIZE) {
+		cFYI(1,("size of EA value too large"));
+		if(full_path)
+			kfree(full_path);
+		FreeXid(xid);
+		return -EOPNOTSUPP;
+	}
+
+	if(ea_name == NULL) {
+		cFYI(1,("Null xattr names not supported"));
+	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+			goto set_ea_exit;
+		if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
+			cFYI(1,("attempt to set cifs inode metadata"));
+		}
+		ea_name += 5; /* skip past user. prefix */
+		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+			(__u16)value_size, cifs_sb->local_nls);
+	} else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+			goto set_ea_exit;
+
+		ea_name += 4; /* skip past os2. prefix */
+		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+			(__u16)value_size, cifs_sb->local_nls);
+	} else {
+		int temp; 
+		temp = strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+			strlen(POSIX_ACL_XATTR_ACCESS));
+		if (temp == 0) {
+#ifdef CONFIG_CIFS_POSIX
+			rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,ea_value,
+				(const int)value_size, ACL_TYPE_ACCESS,
+				cifs_sb->local_nls);
+			cFYI(1,("set POSIX ACL rc %d",rc));
+#else
+			cFYI(1,("set POSIX ACL not supported"));
+#endif
+		} else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
+#ifdef CONFIG_CIFS_POSIX
+			rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,ea_value,
+				(const int)value_size, ACL_TYPE_DEFAULT,
+				cifs_sb->local_nls);
+			cFYI(1,("set POSIX default ACL rc %d",rc));
+#else
+			cFYI(1,("set default POSIX ACL not supported"));
+#endif
+		} else {
+			cFYI(1,("illegal xattr request %s (only user namespace supported)",ea_name));
+		  /* BB what if no namespace prefix? */
+		  /* Should we just pass them to server, except for 
+		  system and perhaps security prefixes? */
+		}
+	}
+
+set_ea_exit:
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+#endif
+	return rc;
+}
+
+ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
+         void * ea_value, size_t buf_size)
+{
+	ssize_t rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct super_block * sb;
+	char * full_path;
+
+	if(direntry == NULL)
+		return -EIO;
+	if(direntry->d_inode == NULL)
+		return -EIO;
+	sb = direntry->d_inode->i_sb;
+	if(sb == NULL)
+		return -EIO;
+
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&sb->s_vfs_rename_sem);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	/* return dos attributes as pseudo xattr */
+	/* return alt name if available as pseudo attr */
+	if(ea_name == NULL) {
+		cFYI(1,("Null xattr names not supported"));
+	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+			goto get_ea_exit;
+
+		if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
+			cFYI(1,("attempt to query cifs inode metadata"));
+			/* revalidate/getattr then populate from inode */
+		} /* BB add else when above is implemented */
+		ea_name += 5; /* skip past user. prefix */
+		rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+			buf_size, cifs_sb->local_nls);
+	} else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
+		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+			goto get_ea_exit;
+
+		ea_name += 4; /* skip past os2. prefix */
+		rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+			buf_size, cifs_sb->local_nls);
+	} else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
+#ifdef CONFIG_CIFS_POSIX
+		rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+				ea_value, buf_size, ACL_TYPE_ACCESS, 
+				cifs_sb->local_nls);
+#else 
+		cFYI(1,("query POSIX ACL not supported yet"));
+#endif /* CONFIG_CIFS_POSIX */
+	} else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
+#ifdef CONFIG_CIFS_POSIX
+		rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+				ea_value, buf_size, ACL_TYPE_DEFAULT, 
+				cifs_sb->local_nls);
+#else 
+		cFYI(1,("query POSIX default ACL not supported yet"));
+#endif
+	} else if(strncmp(ea_name,
+		  CIFS_XATTR_TRUSTED_PREFIX,XATTR_TRUSTED_PREFIX_LEN) == 0) {
+		cFYI(1,("Trusted xattr namespace not supported yet"));
+	} else if(strncmp(ea_name,
+		  CIFS_XATTR_SECURITY_PREFIX,XATTR_SECURITY_PREFIX_LEN) == 0) {
+		cFYI(1,("Security xattr namespace not supported yet"));
+	} else {
+		cFYI(1,("illegal xattr name request %s (only user namespace supported)",ea_name));
+	}
+
+	/* We could add an additional check for streams ie 
+	    if proc/fs/cifs/streamstoxattr is set then
+		search server for EAs or streams to 
+		returns as xattrs */
+
+	if(rc == -EINVAL)
+		rc = -EOPNOTSUPP; 
+
+get_ea_exit:
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+#endif
+	return rc;
+}
+
+ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
+{
+	ssize_t rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+	int xid;
+	struct cifs_sb_info *cifs_sb;
+	struct cifsTconInfo *pTcon;
+	struct super_block * sb;
+	char * full_path;
+
+	if(direntry == NULL)
+		return -EIO;
+	if(direntry->d_inode == NULL)
+		return -EIO;
+	sb = direntry->d_inode->i_sb;
+	if(sb == NULL)
+		return -EIO;
+	xid = GetXid();
+
+	cifs_sb = CIFS_SB(sb);
+	pTcon = cifs_sb->tcon;
+
+	down(&sb->s_vfs_rename_sem);
+	full_path = build_path_from_dentry(direntry);
+	up(&sb->s_vfs_rename_sem);
+	if(full_path == NULL) {
+		FreeXid(xid);
+		return -ENOMEM;
+	}
+	/* return dos attributes as pseudo xattr */
+	/* return alt name if available as pseudo attr */
+
+	/* if proc/fs/cifs/streamstoxattr is set then
+		search server for EAs or streams to 
+		returns as xattrs */
+	rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
+				cifs_sb->local_nls);
+
+	if (full_path)
+		kfree(full_path);
+	FreeXid(xid);
+#endif
+	return rc;
+}
diff --git a/fs/coda/Makefile b/fs/coda/Makefile
new file mode 100644
index 0000000..6c22e61
--- /dev/null
+++ b/fs/coda/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Linux Coda filesystem routines.
+#
+
+obj-$(CONFIG_CODA_FS) += coda.o
+
+coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \
+	     coda_linux.o symlink.o pioctl.o sysctl.o 
+
+# If you want debugging output, please uncomment the following line.
+
+# EXTRA_CFLAGS += -DDEBUG -DDEBUG_SMB_MALLOC=1
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
new file mode 100644
index 0000000..80072fd
--- /dev/null
+++ b/fs/coda/cache.c
@@ -0,0 +1,120 @@
+/*
+ * Cache operations for Coda.
+ * For Linux 2.1: (C) 1997 Carnegie Mellon University
+ * For Linux 2.3: (C) 2000 Carnegie Mellon University
+ *
+ * Carnegie Mellon encourages users of this code to contribute improvements
+ * to the Coda project http://www.coda.cs.cmu.edu/ <coda@cs.cmu.edu>.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/list.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_cache.h>
+
+static atomic_t permission_epoch = ATOMIC_INIT(0);
+
+/* replace or extend an acl cache hit */
+void coda_cache_enter(struct inode *inode, int mask)
+{
+	struct coda_inode_info *cii = ITOC(inode);
+
+	cii->c_cached_epoch = atomic_read(&permission_epoch);
+	if (cii->c_uid != current->fsuid) {
+                cii->c_uid = current->fsuid;
+                cii->c_cached_perm = mask;
+        } else
+                cii->c_cached_perm |= mask;
+}
+
+/* remove cached acl from an inode */
+void coda_cache_clear_inode(struct inode *inode)
+{
+	struct coda_inode_info *cii = ITOC(inode);
+        cii->c_cached_perm = 0;
+}
+
+/* remove all acl caches */
+void coda_cache_clear_all(struct super_block *sb)
+{
+        struct coda_sb_info *sbi;
+
+        sbi = coda_sbp(sb);
+        if (!sbi) BUG();
+
+	atomic_inc(&permission_epoch);
+}
+
+
+/* check if the mask has been matched against the acl already */
+int coda_cache_check(struct inode *inode, int mask)
+{
+	struct coda_inode_info *cii = ITOC(inode);
+        int hit;
+	
+        hit = (mask & cii->c_cached_perm) == mask &&
+		cii->c_uid == current->fsuid &&
+		cii->c_cached_epoch == atomic_read(&permission_epoch);
+
+        return hit;
+}
+
+
+/* Purging dentries and children */
+/* The following routines drop dentries which are not
+   in use and flag dentries which are in use to be 
+   zapped later.
+
+   The flags are detected by:
+   - coda_dentry_revalidate (for lookups) if the flag is C_PURGE
+   - coda_dentry_delete: to remove dentry from the cache when d_count
+     falls to zero
+   - an inode method coda_revalidate (for attributes) if the 
+     flag is C_VATTR
+*/
+
+/* this won't do any harm: just flag all children */
+static void coda_flag_children(struct dentry *parent, int flag)
+{
+	struct list_head *child;
+	struct dentry *de;
+
+	spin_lock(&dcache_lock);
+	list_for_each(child, &parent->d_subdirs)
+	{
+		de = list_entry(child, struct dentry, d_child);
+		/* don't know what to do with negative dentries */
+		if ( ! de->d_inode ) 
+			continue;
+		coda_flag_inode(de->d_inode, flag);
+	}
+	spin_unlock(&dcache_lock);
+	return; 
+}
+
+void coda_flag_inode_children(struct inode *inode, int flag)
+{
+	struct dentry *alias_de;
+
+	if ( !inode || !S_ISDIR(inode->i_mode)) 
+		return; 
+
+	alias_de = d_find_alias(inode);
+	if (!alias_de)
+		return;
+	coda_flag_children(alias_de, flag);
+	shrink_dcache_parent(alias_de);
+	dput(alias_de);
+}
+
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
new file mode 100644
index 0000000..23aeef5
--- /dev/null
+++ b/fs/coda/cnode.c
@@ -0,0 +1,172 @@
+/* cnode related routines for the coda kernel code
+   (C) 1996 Peter Braam
+   */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/time.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_psdev.h>
+
+static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2)
+{
+	return memcmp(fid1, fid2, sizeof(*fid1)) == 0;
+}
+
+static struct inode_operations coda_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.setattr	= coda_setattr,
+};
+
+/* cnode.c */
+static void coda_fill_inode(struct inode *inode, struct coda_vattr *attr)
+{
+        coda_vattr_to_iattr(inode, attr);
+
+        if (S_ISREG(inode->i_mode)) {
+                inode->i_op = &coda_file_inode_operations;
+                inode->i_fop = &coda_file_operations;
+        } else if (S_ISDIR(inode->i_mode)) {
+                inode->i_op = &coda_dir_inode_operations;
+                inode->i_fop = &coda_dir_operations;
+        } else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &coda_symlink_inode_operations;
+		inode->i_data.a_ops = &coda_symlink_aops;
+		inode->i_mapping = &inode->i_data;
+	} else
+                init_special_inode(inode, inode->i_mode, huge_decode_dev(attr->va_rdev));
+}
+
+static int coda_test_inode(struct inode *inode, void *data)
+{
+	struct CodaFid *fid = (struct CodaFid *)data;
+	return coda_fideq(&(ITOC(inode)->c_fid), fid);
+}
+
+static int coda_set_inode(struct inode *inode, void *data)
+{
+	struct CodaFid *fid = (struct CodaFid *)data;
+	ITOC(inode)->c_fid = *fid;
+	return 0;
+}
+
+static int coda_fail_inode(struct inode *inode, void *data)
+{
+	return -1;
+}
+
+struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid,
+			 struct coda_vattr * attr)
+{
+	struct inode *inode;
+	struct coda_inode_info *cii;
+	unsigned long hash = coda_f2i(fid);
+
+	inode = iget5_locked(sb, hash, coda_test_inode, coda_set_inode, fid);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (inode->i_state & I_NEW) {
+		cii = ITOC(inode);
+		/* we still need to set i_ino for things like stat(2) */
+		inode->i_ino = hash;
+		cii->c_mapcount = 0;
+		unlock_new_inode(inode);
+	}
+
+	/* always replace the attributes, type might have changed */
+	coda_fill_inode(inode, attr);
+	return inode;
+}
+
+/* this is effectively coda_iget:
+   - get attributes (might be cached)
+   - get the inode for the fid using vfs iget
+   - link the two up if this is needed
+   - fill in the attributes
+*/
+int coda_cnode_make(struct inode **inode, struct CodaFid *fid, struct super_block *sb)
+{
+        struct coda_vattr attr;
+        int error;
+        
+	/* We get inode numbers from Venus -- see venus source */
+	error = venus_getattr(sb, fid, &attr);
+	if ( error ) {
+	    *inode = NULL;
+	    return error;
+	} 
+
+	*inode = coda_iget(sb, fid, &attr);
+	if ( IS_ERR(*inode) ) {
+		printk("coda_cnode_make: coda_iget failed\n");
+                return PTR_ERR(*inode);
+        }
+	return 0;
+}
+
+
+void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid, 
+		      struct CodaFid *newfid)
+{
+	struct coda_inode_info *cii;
+	unsigned long hash = coda_f2i(newfid);
+	
+	cii = ITOC(inode);
+
+	if (!coda_fideq(&cii->c_fid, oldfid))
+		BUG();
+
+	/* replace fid and rehash inode */
+	/* XXX we probably need to hold some lock here! */
+	remove_inode_hash(inode);
+	cii->c_fid = *newfid;
+	inode->i_ino = hash;
+	__insert_inode_hash(inode, hash);
+}
+
+/* convert a fid to an inode. */
+struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) 
+{
+	struct inode *inode;
+	unsigned long hash = coda_f2i(fid);
+
+	if ( !sb ) {
+		printk("coda_fid_to_inode: no sb!\n");
+		return NULL;
+	}
+
+	inode = iget5_locked(sb, hash, coda_test_inode, coda_fail_inode, fid);
+	if ( !inode )
+		return NULL;
+
+	/* we should never see newly created inodes because we intentionally
+	 * fail in the initialization callback */
+	BUG_ON(inode->i_state & I_NEW);
+
+	return inode;
+}
+
+/* the CONTROL inode is made without asking attributes from Venus */
+int coda_cnode_makectl(struct inode **inode, struct super_block *sb)
+{
+	int error = -ENOMEM;
+
+	*inode = new_inode(sb);
+	if (*inode) {
+		(*inode)->i_ino = CTL_INO;
+		(*inode)->i_op = &coda_ioctl_inode_operations;
+		(*inode)->i_fop = &coda_ioctl_operations;
+		(*inode)->i_mode = 0444;
+		error = 0;
+	}
+
+	return error;
+}
+
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
new file mode 100644
index 0000000..5597080
--- /dev/null
+++ b/fs/coda/coda_linux.c
@@ -0,0 +1,197 @@
+/*
+ * Inode operations for Coda filesystem
+ * Original version: (C) 1996 P. Braam and M. Callahan
+ * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University
+ * 
+ * Carnegie Mellon encourages users to contribute improvements to
+ * the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+
+/* initialize the debugging variables */
+int coda_fake_statfs;
+
+/* print a fid */
+char * coda_f2s(struct CodaFid *f)
+{
+	static char s[60];
+#ifdef CONFIG_CODA_FS_OLD_API
+ 	sprintf(s, "(%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2]);
+#else
+ 	sprintf(s, "(%08x.%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2], f->opaque[3]);
+#endif
+	return s;
+}
+
+/* recognize special .CONTROL name */
+int coda_iscontrol(const char *name, size_t length)
+{
+	return ((CODA_CONTROLLEN == length) && 
+                (strncmp(name, CODA_CONTROL, CODA_CONTROLLEN) == 0));
+}
+
+/* recognize /coda inode */
+int coda_isroot(struct inode *i)
+{
+    return ( i->i_sb->s_root->d_inode == i );
+}
+
+unsigned short coda_flags_to_cflags(unsigned short flags)
+{
+	unsigned short coda_flags = 0;
+	
+	if ((flags & O_ACCMODE) == O_RDONLY)
+		coda_flags |= C_O_READ;
+
+	if ((flags & O_ACCMODE) == O_RDWR)
+		coda_flags |= C_O_READ | C_O_WRITE;
+
+	if ((flags & O_ACCMODE) == O_WRONLY)
+		coda_flags |= C_O_WRITE;
+
+	if (flags & O_TRUNC)
+		coda_flags |= C_O_TRUNC;
+
+	if (flags & O_CREAT)
+		coda_flags |= C_O_CREAT;
+
+	if (flags & O_EXCL)
+		coda_flags |= C_O_EXCL;
+
+	return coda_flags;
+}
+
+
+/* utility functions below */
+void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
+{
+        int inode_type;
+        /* inode's i_flags, i_ino are set by iget 
+           XXX: is this all we need ??
+           */
+        switch (attr->va_type) {
+        case C_VNON:
+                inode_type  = 0;
+                break;
+        case C_VREG:
+                inode_type = S_IFREG;
+                break;
+        case C_VDIR:
+                inode_type = S_IFDIR;
+                break;
+        case C_VLNK:
+                inode_type = S_IFLNK;
+                break;
+        default:
+                inode_type = 0;
+        }
+	inode->i_mode |= inode_type;
+
+	if (attr->va_mode != (u_short) -1)
+	        inode->i_mode = attr->va_mode | inode_type;
+        if (attr->va_uid != -1) 
+	        inode->i_uid = (uid_t) attr->va_uid;
+        if (attr->va_gid != -1)
+	        inode->i_gid = (gid_t) attr->va_gid;
+	if (attr->va_nlink != -1)
+	        inode->i_nlink = attr->va_nlink;
+	if (attr->va_size != -1)
+	        inode->i_size = attr->va_size;
+	if (attr->va_blocksize != -1)
+		inode->i_blksize = attr->va_blocksize;
+	if (attr->va_size != -1)
+		inode->i_blocks = (attr->va_size + 511) >> 9;
+	if (attr->va_atime.tv_sec != -1) 
+	        inode->i_atime = attr->va_atime;
+	if (attr->va_mtime.tv_sec != -1)
+	        inode->i_mtime = attr->va_mtime;
+        if (attr->va_ctime.tv_sec != -1)
+	        inode->i_ctime = attr->va_ctime;
+}
+
+
+/* 
+ * BSD sets attributes that need not be modified to -1. 
+ * Linux uses the valid field to indicate what should be
+ * looked at.  The BSD type field needs to be deduced from linux 
+ * mode.
+ * So we have to do some translations here.
+ */
+
+void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr)
+{
+        unsigned int valid;
+
+        /* clean out */        
+        vattr->va_mode = (umode_t) -1;
+        vattr->va_uid = (vuid_t) -1; 
+        vattr->va_gid = (vgid_t) -1;
+        vattr->va_size = (off_t) -1;
+	vattr->va_atime.tv_sec = (time_t) -1;
+	vattr->va_atime.tv_nsec =  (time_t) -1;
+        vattr->va_mtime.tv_sec = (time_t) -1;
+        vattr->va_mtime.tv_nsec = (time_t) -1;
+	vattr->va_ctime.tv_sec = (time_t) -1;
+	vattr->va_ctime.tv_nsec = (time_t) -1;
+        vattr->va_type = C_VNON;
+	vattr->va_fileid = -1;
+	vattr->va_gen = -1;
+	vattr->va_bytes = -1;
+	vattr->va_nlink = -1;
+	vattr->va_blocksize = -1;
+	vattr->va_rdev = -1;
+        vattr->va_flags = 0;
+
+        /* determine the type */
+#if 0
+        mode = iattr->ia_mode;
+                if ( S_ISDIR(mode) ) {
+                vattr->va_type = C_VDIR; 
+        } else if ( S_ISREG(mode) ) {
+                vattr->va_type = C_VREG;
+        } else if ( S_ISLNK(mode) ) {
+                vattr->va_type = C_VLNK;
+        } else {
+                /* don't do others */
+                vattr->va_type = C_VNON;
+        }
+#endif 
+
+        /* set those vattrs that need change */
+        valid = iattr->ia_valid;
+        if ( valid & ATTR_MODE ) {
+                vattr->va_mode = iattr->ia_mode;
+	}
+        if ( valid & ATTR_UID ) {
+                vattr->va_uid = (vuid_t) iattr->ia_uid;
+	}
+        if ( valid & ATTR_GID ) {
+                vattr->va_gid = (vgid_t) iattr->ia_gid;
+	}
+        if ( valid & ATTR_SIZE ) {
+                vattr->va_size = iattr->ia_size;
+	}
+        if ( valid & ATTR_ATIME ) {
+                vattr->va_atime = iattr->ia_atime;
+	}
+        if ( valid & ATTR_MTIME ) {
+                vattr->va_mtime = iattr->ia_mtime;
+	}
+        if ( valid & ATTR_CTIME ) {
+                vattr->va_ctime = iattr->ia_ctime;
+	}
+}
+
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
new file mode 100644
index 0000000..2391766
--- /dev/null
+++ b/fs/coda/dir.c
@@ -0,0 +1,704 @@
+
+/*
+ * Directory operations for Coda filesystem
+ * Original version: (C) 1996 P. Braam and M. Callahan
+ * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University
+ * 
+ * Carnegie Mellon encourages users to contribute improvements to
+ * the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_cache.h>
+#include <linux/coda_proc.h>
+
+/* dir inode-ops */
+static int coda_create(struct inode *dir, struct dentry *new, int mode, struct nameidata *nd);
+static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd);
+static int coda_link(struct dentry *old_dentry, struct inode *dir_inode, 
+		     struct dentry *entry);
+static int coda_unlink(struct inode *dir_inode, struct dentry *entry);
+static int coda_symlink(struct inode *dir_inode, struct dentry *entry,
+			const char *symname);
+static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, int mode);
+static int coda_rmdir(struct inode *dir_inode, struct dentry *entry);
+static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, 
+                       struct inode *new_inode, struct dentry *new_dentry);
+
+/* dir file-ops */
+static int coda_readdir(struct file *file, void *dirent, filldir_t filldir);
+
+/* dentry ops */
+static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
+static int coda_dentry_delete(struct dentry *);
+
+/* support routines */
+static int coda_venus_readdir(struct file *filp, filldir_t filldir,
+			      void *dirent, struct dentry *dir);
+int coda_fsync(struct file *, struct dentry *dentry, int datasync);
+
+/* same as fs/bad_inode.c */
+static int coda_return_EIO(void)
+{
+	return -EIO;
+}
+#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
+
+static struct dentry_operations coda_dentry_operations =
+{
+	.d_revalidate	= coda_dentry_revalidate,
+	.d_delete	= coda_dentry_delete,
+};
+
+struct inode_operations coda_dir_inode_operations =
+{
+	.create		= coda_create,
+	.lookup		= coda_lookup,
+	.link		= coda_link,
+	.unlink		= coda_unlink,
+	.symlink	= coda_symlink,
+	.mkdir		= coda_mkdir,
+	.rmdir		= coda_rmdir,
+	.mknod		= CODA_EIO_ERROR,
+	.rename		= coda_rename,
+	.permission	= coda_permission,
+	.getattr	= coda_getattr,
+	.setattr	= coda_setattr,
+};
+
+struct file_operations coda_dir_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.readdir	= coda_readdir,
+	.open		= coda_open,
+	.flush		= coda_flush,
+	.release	= coda_release,
+	.fsync		= coda_fsync,
+};
+
+
+/* inode operations for directories */
+/* access routines: lookup, readlink, permission */
+static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd)
+{
+	struct inode *res_inode = NULL;
+	struct CodaFid resfid = { { 0, } };
+	int dropme = 0; /* to indicate entry should not be cached */
+	int type = 0;
+	int error = 0;
+	const char *name = entry->d_name.name;
+	size_t length = entry->d_name.len;
+	
+	if ( length > CODA_MAXNAMLEN ) {
+	        printk("name too long: lookup, %s (%*s)\n", 
+		       coda_i2s(dir), (int)length, name);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+
+	lock_kernel();
+        /* control object, create inode on the fly */
+        if (coda_isroot(dir) && coda_iscontrol(name, length)) {
+	        error = coda_cnode_makectl(&res_inode, dir->i_sb);
+		dropme = 1;
+                goto exit;
+        }
+
+	error = venus_lookup(dir->i_sb, coda_i2f(dir), 
+			     (const char *)name, length, &type, &resfid);
+
+	res_inode = NULL;
+	if (!error) {
+		if (type & CODA_NOCACHE) {
+			type &= (~CODA_NOCACHE);
+			dropme = 1;
+		}
+
+	    	error = coda_cnode_make(&res_inode, &resfid, dir->i_sb);
+		if (error) {
+			unlock_kernel();
+			return ERR_PTR(error);
+		}
+	} else if (error != -ENOENT) {
+		unlock_kernel();
+		return ERR_PTR(error);
+	}
+
+exit:
+	entry->d_time = 0;
+	entry->d_op = &coda_dentry_operations;
+	d_add(entry, res_inode);
+	if ( dropme ) {
+		d_drop(entry);
+		coda_flag_inode(res_inode, C_VATTR);
+	}
+	unlock_kernel();
+        return NULL;
+}
+
+
+int coda_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+        int error = 0;
+ 
+	if (!mask)
+		return 0; 
+
+	lock_kernel();
+
+	coda_vfs_stat.permission++;
+
+	if (coda_cache_check(inode, mask))
+		goto out; 
+
+        error = venus_access(inode->i_sb, coda_i2f(inode), mask);
+    
+	if (!error)
+		coda_cache_enter(inode, mask);
+
+ out:
+	unlock_kernel();
+
+        return error; 
+}
+
+
+static inline void coda_dir_changed(struct inode *dir, int link)
+{
+#ifdef REQUERY_VENUS_FOR_MTIME
+	/* invalidate the directory cnode's attributes so we refetch the
+	 * attributes from venus next time the inode is referenced */
+	coda_flag_inode(dir, C_VATTR);
+#else
+	/* optimistically we can also act as if our nose bleeds. The
+         * granularity of the mtime is coarse anyways so we might actually be
+         * right most of the time. Note: we only do this for directories. */
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+#endif
+	if (link)
+		dir->i_nlink += link;
+}
+
+/* creation routines: create, mknod, mkdir, link, symlink */
+static int coda_create(struct inode *dir, struct dentry *de, int mode, struct nameidata *nd)
+{
+        int error=0;
+	const char *name=de->d_name.name;
+	int length=de->d_name.len;
+	struct inode *inode;
+	struct CodaFid newfid;
+	struct coda_vattr attrs;
+
+	lock_kernel();
+	coda_vfs_stat.create++;
+
+	if (coda_isroot(dir) && coda_iscontrol(name, length)) {
+		unlock_kernel();
+		return -EPERM;
+	}
+
+	error = venus_create(dir->i_sb, coda_i2f(dir), name, length, 
+				0, mode, &newfid, &attrs);
+
+        if ( error ) {
+		unlock_kernel();
+		d_drop(de);
+		return error;
+	}
+
+	inode = coda_iget(dir->i_sb, &newfid, &attrs);
+	if ( IS_ERR(inode) ) {
+		unlock_kernel();
+		d_drop(de);
+		return PTR_ERR(inode);
+	}
+
+	/* invalidate the directory cnode's attributes */
+	coda_dir_changed(dir, 0);
+	unlock_kernel();
+	d_instantiate(de, inode);
+        return 0;
+}
+
+static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
+{
+	struct inode *inode;
+	struct coda_vattr attrs;
+	const char *name = de->d_name.name;
+	int len = de->d_name.len;
+	int error;
+	struct CodaFid newfid;
+
+	lock_kernel();
+	coda_vfs_stat.mkdir++;
+
+	if (coda_isroot(dir) && coda_iscontrol(name, len)) {
+		unlock_kernel();
+		return -EPERM;
+	}
+
+	attrs.va_mode = mode;
+	error = venus_mkdir(dir->i_sb, coda_i2f(dir), 
+			       name, len, &newfid, &attrs);
+        
+        if ( error ) {
+		unlock_kernel();
+		d_drop(de);
+		return error;
+        }
+         
+	inode = coda_iget(dir->i_sb, &newfid, &attrs);
+	if ( IS_ERR(inode) ) {
+		unlock_kernel();
+		d_drop(de);
+		return PTR_ERR(inode);
+	}
+	
+	/* invalidate the directory cnode's attributes */
+	coda_dir_changed(dir, 1);
+	unlock_kernel();
+	d_instantiate(de, inode);
+        return 0;
+}
+
+/* try to make de an entry in dir_inodde linked to source_de */ 
+static int coda_link(struct dentry *source_de, struct inode *dir_inode, 
+	  struct dentry *de)
+{
+	struct inode *inode = source_de->d_inode;
+        const char * name = de->d_name.name;
+	int len = de->d_name.len;
+	int error;
+
+	lock_kernel();
+	coda_vfs_stat.link++;
+
+	if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
+		unlock_kernel();
+		return -EPERM;
+	}
+
+	error = venus_link(dir_inode->i_sb, coda_i2f(inode),
+			   coda_i2f(dir_inode), (const char *)name, len);
+
+	if (error) { 
+		d_drop(de);
+		goto out;
+	}
+
+	coda_dir_changed(dir_inode, 0);
+	atomic_inc(&inode->i_count);
+	d_instantiate(de, inode);
+	inode->i_nlink++;
+        
+out:
+	unlock_kernel();
+	return(error);
+}
+
+
+static int coda_symlink(struct inode *dir_inode, struct dentry *de,
+			const char *symname)
+{
+        const char *name = de->d_name.name;
+	int len = de->d_name.len;
+	int symlen;
+        int error=0;
+        
+	lock_kernel();
+	coda_vfs_stat.symlink++;
+
+	if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
+		unlock_kernel();
+		return -EPERM;
+	}
+
+	symlen = strlen(symname);
+	if ( symlen > CODA_MAXPATHLEN ) {
+		unlock_kernel();
+		return -ENAMETOOLONG;
+	}
+
+	/*
+	 * This entry is now negative. Since we do not create
+	 * an inode for the entry we have to drop it. 
+	 */
+	d_drop(de);
+	error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len, 
+			      symname, symlen);
+
+	/* mtime is no good anymore */
+	if ( !error )
+		coda_dir_changed(dir_inode, 0);
+
+	unlock_kernel();
+        return error;
+}
+
+/* destruction routines: unlink, rmdir */
+int coda_unlink(struct inode *dir, struct dentry *de)
+{
+        int error;
+	const char *name = de->d_name.name;
+	int len = de->d_name.len;
+
+	lock_kernel();
+	coda_vfs_stat.unlink++;
+
+        error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
+        if ( error ) {
+		unlock_kernel();
+                return error;
+        }
+
+	coda_dir_changed(dir, 0);
+	de->d_inode->i_nlink--;
+	unlock_kernel();
+
+        return 0;
+}
+
+int coda_rmdir(struct inode *dir, struct dentry *de)
+{
+	const char *name = de->d_name.name;
+	int len = de->d_name.len;
+        int error;
+
+	lock_kernel();
+	coda_vfs_stat.rmdir++;
+
+	if (!d_unhashed(de)) {
+		unlock_kernel();
+		return -EBUSY;
+	}
+	error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
+
+        if ( error ) {
+		unlock_kernel();
+                return error;
+        }
+
+	coda_dir_changed(dir, -1);
+	de->d_inode->i_nlink--;
+	d_delete(de);
+	unlock_kernel();
+
+        return 0;
+}
+
+/* rename */
+static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, 
+		       struct inode *new_dir, struct dentry *new_dentry)
+{
+        const char *old_name = old_dentry->d_name.name;
+        const char *new_name = new_dentry->d_name.name;
+	int old_length = old_dentry->d_name.len;
+	int new_length = new_dentry->d_name.len;
+        int link_adjust = 0;
+        int error;
+
+	lock_kernel();
+	coda_vfs_stat.rename++;
+
+        error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), 
+			     coda_i2f(new_dir), old_length, new_length, 
+			     (const char *) old_name, (const char *)new_name);
+
+        if ( !error ) {
+		if ( new_dentry->d_inode ) {
+			if ( S_ISDIR(new_dentry->d_inode->i_mode) )
+                        	link_adjust = 1;
+
+                        coda_dir_changed(old_dir, -link_adjust);
+                        coda_dir_changed(new_dir,  link_adjust);
+			coda_flag_inode(new_dentry->d_inode, C_VATTR);
+		} else {
+			coda_flag_inode(old_dir, C_VATTR);
+			coda_flag_inode(new_dir, C_VATTR);
+                }
+	}
+	unlock_kernel();
+
+	return error;
+}
+
+
+/* file operations for directories */
+int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
+{
+	struct dentry *coda_dentry = coda_file->f_dentry;
+	struct coda_file_info *cfi;
+	struct file *host_file;
+	struct inode *host_inode;
+	int ret;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	coda_vfs_stat.readdir++;
+
+	host_inode = host_file->f_dentry->d_inode;
+	down(&host_inode->i_sem);
+	host_file->f_pos = coda_file->f_pos;
+
+	if (!host_file->f_op->readdir) {
+		/* Venus: we must read Venus dirents from the file */
+		ret = coda_venus_readdir(host_file, filldir, dirent, coda_dentry);
+	} else {
+		/* potemkin case: we were handed a directory inode. */
+		/* Yuk, we can't call vfs_readdir because we are already
+		 * holding the inode semaphore. */
+		ret = -ENOTDIR;
+		if (!host_file->f_op || !host_file->f_op->readdir)
+			goto out;
+
+		ret = -ENOENT;
+		if (!IS_DEADDIR(host_inode)) {
+			ret = host_file->f_op->readdir(host_file, filldir, dirent);
+			file_accessed(host_file);
+		}
+	}
+out:
+	coda_file->f_pos = host_file->f_pos;
+	up(&host_inode->i_sem);
+
+	return ret;
+}
+
+static inline unsigned int CDT2DT(unsigned char cdt)
+{
+	unsigned int dt;
+
+	switch(cdt) {
+	case CDT_UNKNOWN: dt = DT_UNKNOWN; break;
+	case CDT_FIFO:	  dt = DT_FIFO;    break;
+	case CDT_CHR:	  dt = DT_CHR;     break;
+	case CDT_DIR:	  dt = DT_DIR;     break;
+	case CDT_BLK:	  dt = DT_BLK;     break;
+	case CDT_REG:	  dt = DT_REG;     break;
+	case CDT_LNK:	  dt = DT_LNK;     break;
+	case CDT_SOCK:	  dt = DT_SOCK;    break;
+	case CDT_WHT:	  dt = DT_WHT;     break;
+	default:	  dt = DT_UNKNOWN; break;
+	}
+	return dt;
+}
+
+/* support routines */
+static int coda_venus_readdir(struct file *filp, filldir_t filldir,
+			      void *dirent, struct dentry *dir)
+{
+	int result = 0; /* # of entries returned */
+	struct venus_dirent *vdir;
+	unsigned long vdir_size =
+	    (unsigned long)(&((struct venus_dirent *)0)->d_name);
+	unsigned int type;
+	struct qstr name;
+	ino_t ino;
+	int ret, i;
+
+	vdir = (struct venus_dirent *)kmalloc(sizeof(*vdir), GFP_KERNEL);
+	if (!vdir) return -ENOMEM;
+
+	i = filp->f_pos;
+	switch(i) {
+	case 0:
+		ret = filldir(dirent, ".", 1, 0, dir->d_inode->i_ino, DT_DIR);
+		if (ret < 0) break;
+		result++;
+		filp->f_pos++;
+		/* fallthrough */
+	case 1:
+		ret = filldir(dirent, "..", 2, 1, dir->d_parent->d_inode->i_ino, DT_DIR);
+		if (ret < 0) break;
+		result++;
+		filp->f_pos++;
+		/* fallthrough */
+	default:
+	while (1) {
+		/* read entries from the directory file */
+		ret = kernel_read(filp, filp->f_pos - 2, (char *)vdir,
+				  sizeof(*vdir));
+		if (ret < 0) {
+			printk("coda_venus_readdir: read dir failed %d\n", ret);
+			break;
+		}
+		if (ret == 0) break; /* end of directory file reached */
+
+		/* catch truncated reads */
+		if (ret < vdir_size || ret < vdir_size + vdir->d_namlen) {
+			printk("coda_venus_readdir: short read: %ld\n",
+			       filp->f_dentry->d_inode->i_ino);
+			ret = -EBADF;
+			break;
+		}
+		/* validate whether the directory file actually makes sense */
+		if (vdir->d_reclen < vdir_size + vdir->d_namlen) {
+			printk("coda_venus_readdir: Invalid dir: %ld\n",
+			       filp->f_dentry->d_inode->i_ino);
+			ret = -EBADF;
+			break;
+		}
+
+		name.len = vdir->d_namlen;
+		name.name = vdir->d_name;
+
+		/* Make sure we skip '.' and '..', we already got those */
+		if (name.name[0] == '.' && (name.len == 1 ||
+		    (vdir->d_name[1] == '.' && name.len == 2)))
+			vdir->d_fileno = name.len = 0;
+
+		/* skip null entries */
+		if (vdir->d_fileno && name.len) {
+			/* try to look up this entry in the dcache, that way
+			 * userspace doesn't have to worry about breaking
+			 * getcwd by having mismatched inode numbers for
+			 * internal volume mountpoints. */
+			ino = find_inode_number(dir, &name);
+			if (!ino) ino = vdir->d_fileno;
+
+			type = CDT2DT(vdir->d_type);
+			ret = filldir(dirent, name.name, name.len, filp->f_pos,
+				      ino, type); 
+			/* failure means no space for filling in this round */
+			if (ret < 0) break;
+			result++;
+		}
+		/* we'll always have progress because d_reclen is unsigned and
+		 * we've already established it is non-zero. */
+		filp->f_pos += vdir->d_reclen;
+	}
+	} 
+	kfree(vdir);
+	return result ? result : ret;
+}
+
+/* called when a cache lookup succeeds */
+static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
+{
+	struct inode *inode = de->d_inode;
+	struct coda_inode_info *cii;
+
+	if (!inode)
+		return 1;
+	lock_kernel();
+	if (coda_isroot(inode))
+		goto out;
+	if (is_bad_inode(inode))
+		goto bad;
+
+	cii = ITOC(de->d_inode);
+	if (!(cii->c_flags & (C_PURGE | C_FLUSH)))
+		goto out;
+
+	shrink_dcache_parent(de);
+
+	/* propagate for a flush */
+	if (cii->c_flags & C_FLUSH) 
+		coda_flag_inode_children(inode, C_FLUSH);
+
+	if (atomic_read(&de->d_count) > 1)
+		/* pretend it's valid, but don't change the flags */
+		goto out;
+
+	/* clear the flags. */
+	cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH);
+
+bad:
+	unlock_kernel();
+	return 0;
+out:
+	unlock_kernel();
+	return 1;
+}
+
+/*
+ * This is the callback from dput() when d_count is going to 0.
+ * We use this to unhash dentries with bad inodes.
+ */
+static int coda_dentry_delete(struct dentry * dentry)
+{
+	int flags;
+
+	if (!dentry->d_inode) 
+		return 0;
+
+	flags = (ITOC(dentry->d_inode)->c_flags) & C_PURGE;
+	if (is_bad_inode(dentry->d_inode) || flags) {
+		return 1;
+	}
+	return 0;
+}
+
+
+
+/*
+ * This is called when we want to check if the inode has
+ * changed on the server.  Coda makes this easy since the
+ * cache manager Venus issues a downcall to the kernel when this 
+ * happens 
+ */
+int coda_revalidate_inode(struct dentry *dentry)
+{
+	struct coda_vattr attr;
+	int error = 0;
+	int old_mode;
+	ino_t old_ino;
+	struct inode *inode = dentry->d_inode;
+	struct coda_inode_info *cii = ITOC(inode);
+
+	lock_kernel();
+	if ( !cii->c_flags )
+		goto ok;
+
+	if (cii->c_flags & (C_VATTR | C_PURGE | C_FLUSH)) {
+		error = venus_getattr(inode->i_sb, &(cii->c_fid), &attr);
+		if ( error )
+			goto return_bad;
+
+		/* this inode may be lost if:
+		   - it's ino changed 
+		   - type changes must be permitted for repair and
+		   missing mount points.
+		*/
+		old_mode = inode->i_mode;
+		old_ino = inode->i_ino;
+		coda_vattr_to_iattr(inode, &attr);
+
+		if ((old_mode & S_IFMT) != (inode->i_mode & S_IFMT)) {
+			printk("Coda: inode %ld, fid %s changed type!\n",
+			       inode->i_ino, coda_f2s(&(cii->c_fid)));
+		}
+
+		/* the following can happen when a local fid is replaced 
+		   with a global one, here we lose and declare the inode bad */
+		if (inode->i_ino != old_ino)
+			goto return_bad;
+		
+		coda_flag_inode_children(inode, C_FLUSH);
+		cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH);
+	}
+
+ok:
+	unlock_kernel();
+	return 0;
+
+return_bad:
+	unlock_kernel();
+	return -EIO;
+}
diff --git a/fs/coda/file.c b/fs/coda/file.c
new file mode 100644
index 0000000..e6bc022
--- /dev/null
+++ b/fs/coda/file.c
@@ -0,0 +1,300 @@
+/*
+ * File operations for Coda.
+ * Original version: (C) 1996 Peter Braam 
+ * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
+ *
+ * Carnegie Mellon encourages users of this code to contribute improvements
+ * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/smp_lock.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_proc.h>
+
+/* if CODA_STORE fails with EOPNOTSUPP, venus clearly doesn't support
+ * CODA_STORE/CODA_RELEASE and we fall back on using the CODA_CLOSE upcall */
+static int use_coda_close;
+
+static ssize_t
+coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct coda_file_info *cfi;
+	struct file *host_file;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	if (!host_file->f_op || !host_file->f_op->read)
+		return -EINVAL;
+
+	return host_file->f_op->read(host_file, buf, count, ppos);
+}
+
+static ssize_t
+coda_file_sendfile(struct file *coda_file, loff_t *ppos, size_t count,
+		   read_actor_t actor, void *target)
+{
+	struct coda_file_info *cfi;
+	struct file *host_file;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	if (!host_file->f_op || !host_file->f_op->sendfile)
+		return -EINVAL;
+
+	return host_file->f_op->sendfile(host_file, ppos, count, actor, target);
+}
+
+static ssize_t
+coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct inode *host_inode, *coda_inode = coda_file->f_dentry->d_inode;
+	struct coda_file_info *cfi;
+	struct file *host_file;
+	ssize_t ret;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	if (!host_file->f_op || !host_file->f_op->write)
+		return -EINVAL;
+
+	host_inode = host_file->f_dentry->d_inode;
+	down(&coda_inode->i_sem);
+
+	ret = host_file->f_op->write(host_file, buf, count, ppos);
+
+	coda_inode->i_size = host_inode->i_size;
+	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
+	coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
+	up(&coda_inode->i_sem);
+
+	return ret;
+}
+
+static int
+coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+{
+	struct coda_file_info *cfi;
+	struct coda_inode_info *cii;
+	struct file *host_file;
+	struct inode *coda_inode, *host_inode;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	if (!host_file->f_op || !host_file->f_op->mmap)
+		return -ENODEV;
+
+	coda_inode = coda_file->f_dentry->d_inode;
+	host_inode = host_file->f_dentry->d_inode;
+	coda_file->f_mapping = host_file->f_mapping;
+	if (coda_inode->i_mapping == &coda_inode->i_data)
+		coda_inode->i_mapping = host_inode->i_mapping;
+
+	/* only allow additional mmaps as long as userspace isn't changing
+	 * the container file on us! */
+	else if (coda_inode->i_mapping != host_inode->i_mapping)
+		return -EBUSY;
+
+	/* keep track of how often the coda_inode/host_file has been mmapped */
+	cii = ITOC(coda_inode);
+	cii->c_mapcount++;
+	cfi->cfi_mapcount++;
+
+	return host_file->f_op->mmap(host_file, vma);
+}
+
+int coda_open(struct inode *coda_inode, struct file *coda_file)
+{
+	struct file *host_file = NULL;
+	int error;
+	unsigned short flags = coda_file->f_flags & (~O_EXCL);
+	unsigned short coda_flags = coda_flags_to_cflags(flags);
+	struct coda_file_info *cfi;
+
+	coda_vfs_stat.open++;
+
+	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
+	if (!cfi) {
+		unlock_kernel();
+		return -ENOMEM;
+	}
+
+	lock_kernel();
+
+	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
+			   &host_file); 
+	if (error || !host_file) {
+		kfree(cfi);
+		unlock_kernel();
+		return error;
+	}
+
+	host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC);
+
+	cfi->cfi_magic = CODA_MAGIC;
+	cfi->cfi_mapcount = 0;
+	cfi->cfi_container = host_file;
+
+	BUG_ON(coda_file->private_data != NULL);
+	coda_file->private_data = cfi;
+
+	unlock_kernel();
+	return 0;
+}
+
+int coda_flush(struct file *coda_file)
+{
+	unsigned short flags = coda_file->f_flags & ~O_EXCL;
+	unsigned short coda_flags = coda_flags_to_cflags(flags);
+	struct coda_file_info *cfi;
+	struct inode *coda_inode;
+	int err = 0, fcnt;
+
+	lock_kernel();
+
+	coda_vfs_stat.flush++;
+
+	/* last close semantics */
+	fcnt = file_count(coda_file);
+	if (fcnt > 1)
+		goto out;
+
+	/* No need to make an upcall when we have not made any modifications
+	 * to the file */
+	if ((coda_file->f_flags & O_ACCMODE) == O_RDONLY)
+		goto out;
+
+	if (use_coda_close)
+		goto out;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+
+	coda_inode = coda_file->f_dentry->d_inode;
+
+	err = venus_store(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
+			  coda_file->f_uid);
+
+	if (err == -EOPNOTSUPP) {
+		use_coda_close = 1;
+		err = 0;
+	}
+
+out:
+	unlock_kernel();
+	return err;
+}
+
+int coda_release(struct inode *coda_inode, struct file *coda_file)
+{
+	unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
+	unsigned short coda_flags = coda_flags_to_cflags(flags);
+	struct coda_file_info *cfi;
+	struct coda_inode_info *cii;
+	struct inode *host_inode;
+	int err = 0;
+
+	lock_kernel();
+	coda_vfs_stat.release++;
+ 
+	if (!use_coda_close) {
+		err = venus_release(coda_inode->i_sb, coda_i2f(coda_inode),
+				    coda_flags);
+		if (err == -EOPNOTSUPP) {
+			use_coda_close = 1;
+			err = 0;
+		}
+	}
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+
+	if (use_coda_close)
+		err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
+				  coda_flags, coda_file->f_uid);
+
+	host_inode = cfi->cfi_container->f_dentry->d_inode;
+	cii = ITOC(coda_inode);
+
+	/* did we mmap this file? */
+	if (coda_inode->i_mapping == &host_inode->i_data) {
+		cii->c_mapcount -= cfi->cfi_mapcount;
+		if (!cii->c_mapcount)
+			coda_inode->i_mapping = &coda_inode->i_data;
+	}
+
+	fput(cfi->cfi_container);
+	kfree(coda_file->private_data);
+	coda_file->private_data = NULL;
+
+	unlock_kernel();
+	return err;
+}
+
+int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
+{
+	struct file *host_file;
+	struct dentry *host_dentry;
+	struct inode *host_inode, *coda_inode = coda_dentry->d_inode;
+	struct coda_file_info *cfi;
+	int err = 0;
+
+	if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
+	      S_ISLNK(coda_inode->i_mode)))
+		return -EINVAL;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	coda_vfs_stat.fsync++;
+
+	if (host_file->f_op && host_file->f_op->fsync) {
+		host_dentry = host_file->f_dentry;
+		host_inode = host_dentry->d_inode;
+		down(&host_inode->i_sem);
+		err = host_file->f_op->fsync(host_file, host_dentry, datasync);
+		up(&host_inode->i_sem);
+	}
+
+	if ( !err && !datasync ) {
+		lock_kernel();
+		err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
+		unlock_kernel();
+	}
+
+	return err;
+}
+
+struct file_operations coda_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= coda_file_read,
+	.write		= coda_file_write,
+	.mmap		= coda_file_mmap,
+	.open		= coda_open,
+	.flush		= coda_flush,
+	.release	= coda_release,
+	.fsync		= coda_fsync,
+	.sendfile	= coda_file_sendfile,
+};
+
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
new file mode 100644
index 0000000..04a73fb
--- /dev/null
+++ b/fs/coda/inode.c
@@ -0,0 +1,321 @@
+/*
+ * Super block/filesystem wide operations
+ *
+ * Copyright (C) 1996 Peter J. Braam <braam@maths.ox.ac.uk> and 
+ * Michael Callahan <callahan@maths.ox.ac.uk> 
+ * 
+ * Rewritten for Linux 2.1.  Peter Braam <braam@cs.cmu.edu>
+ * Copyright (C) Carnegie Mellon University
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/smp_lock.h>
+#include <linux/file.h>
+#include <linux/vfs.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_cache.h>
+
+/* VFS super_block ops */
+static void coda_clear_inode(struct inode *);
+static void coda_put_super(struct super_block *);
+static int coda_statfs(struct super_block *sb, struct kstatfs *buf);
+
+static kmem_cache_t * coda_inode_cachep;
+
+static struct inode *coda_alloc_inode(struct super_block *sb)
+{
+	struct coda_inode_info *ei;
+	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
+	ei->c_flags = 0;
+	ei->c_uid = 0;
+	ei->c_cached_perm = 0;
+	return &ei->vfs_inode;
+}
+
+static void coda_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(coda_inode_cachep, ITOC(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct coda_inode_info *ei = (struct coda_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+int coda_init_inodecache(void)
+{
+	coda_inode_cachep = kmem_cache_create("coda_inode_cache",
+				sizeof(struct coda_inode_info),
+				0, SLAB_RECLAIM_ACCOUNT,
+				init_once, NULL);
+	if (coda_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+void coda_destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(coda_inode_cachep))
+		printk(KERN_INFO "coda_inode_cache: not all structures were freed\n");
+}
+
+static int coda_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+/* exported operations */
+static struct super_operations coda_super_operations =
+{
+	.alloc_inode	= coda_alloc_inode,
+	.destroy_inode	= coda_destroy_inode,
+	.clear_inode	= coda_clear_inode,
+	.put_super	= coda_put_super,
+	.statfs		= coda_statfs,
+	.remount_fs	= coda_remount,
+};
+
+static int get_device_index(struct coda_mount_data *data)
+{
+	struct file *file;
+	struct inode *inode;
+	int idx;
+
+	if(data == NULL) {
+		printk("coda_read_super: Bad mount data\n");
+		return -1;
+	}
+
+	if(data->version != CODA_MOUNT_VERSION) {
+		printk("coda_read_super: Bad mount version\n");
+		return -1;
+	}
+
+	file = fget(data->fd);
+	inode = NULL;
+	if(file)
+		inode = file->f_dentry->d_inode;
+	
+	if(!inode || !S_ISCHR(inode->i_mode) ||
+	   imajor(inode) != CODA_PSDEV_MAJOR) {
+		if(file)
+			fput(file);
+
+		printk("coda_read_super: Bad file\n");
+		return -1;
+	}
+
+	idx = iminor(inode);
+	fput(file);
+
+	if(idx < 0 || idx >= MAX_CODADEVS) {
+		printk("coda_read_super: Bad minor number\n");
+		return -1;
+	}
+
+	return idx;
+}
+
+static int coda_fill_super(struct super_block *sb, void *data, int silent)
+{
+        struct inode *root = NULL; 
+	struct coda_sb_info *sbi = NULL;
+	struct venus_comm *vc = NULL;
+	struct CodaFid fid;
+        int error;
+	int idx;
+
+	idx = get_device_index((struct coda_mount_data *) data);
+
+	/* Ignore errors in data, for backward compatibility */
+	if(idx == -1)
+		idx = 0;
+	
+	printk(KERN_INFO "coda_read_super: device index: %i\n", idx);
+
+	vc = &coda_comms[idx];
+	if (!vc->vc_inuse) {
+		printk("coda_read_super: No pseudo device\n");
+		return -EINVAL;
+	}
+
+        if ( vc->vc_sb ) {
+		printk("coda_read_super: Device already mounted\n");
+		return -EBUSY;
+	}
+
+	sbi = kmalloc(sizeof(struct coda_sb_info), GFP_KERNEL);
+	if(!sbi) {
+		return -ENOMEM;
+	}
+
+	vc->vc_sb = sb;
+
+	sbi->sbi_vcomm = vc;
+
+        sb->s_fs_info = sbi;
+	sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
+        sb->s_blocksize = 1024;	/* XXXXX  what do we put here?? */
+        sb->s_blocksize_bits = 10;
+        sb->s_magic = CODA_SUPER_MAGIC;
+        sb->s_op = &coda_super_operations;
+
+	/* get root fid from Venus: this needs the root inode */
+	error = venus_rootfid(sb, &fid);
+	if ( error ) {
+	        printk("coda_read_super: coda_get_rootfid failed with %d\n",
+		       error);
+		goto error;
+	}
+	printk("coda_read_super: rootfid is %s\n", coda_f2s(&fid));
+	
+	/* make root inode */
+        error = coda_cnode_make(&root, &fid, sb);
+        if ( error || !root ) {
+	    printk("Failure of coda_cnode_make for root: error %d\n", error);
+	    goto error;
+	} 
+
+	printk("coda_read_super: rootinode is %ld dev %s\n", 
+	       root->i_ino, root->i_sb->s_id);
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root)
+		goto error;
+        return 0;
+
+ error:
+	if (sbi) {
+		kfree(sbi);
+		if(vc)
+			vc->vc_sb = NULL;		
+	}
+	if (root)
+                iput(root);
+
+        return -EINVAL;
+}
+
+static void coda_put_super(struct super_block *sb)
+{
+        struct coda_sb_info *sbi;
+
+	sbi = coda_sbp(sb);
+	sbi->sbi_vcomm->vc_sb = NULL;
+
+	printk("Coda: Bye bye.\n");
+	kfree(sbi);
+}
+
+static void coda_clear_inode(struct inode *inode)
+{
+	coda_cache_clear_inode(inode);
+}
+
+int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	int err = coda_revalidate_inode(dentry);
+	if (!err)
+		generic_fillattr(dentry->d_inode, stat);
+	return err;
+}
+
+int coda_setattr(struct dentry *de, struct iattr *iattr)
+{
+	struct inode *inode = de->d_inode;
+	struct coda_vattr vattr;
+	int error;
+
+	lock_kernel();
+	
+	memset(&vattr, 0, sizeof(vattr)); 
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	coda_iattr_to_vattr(iattr, &vattr);
+	vattr.va_type = C_VNON; /* cannot set type */
+
+	/* Venus is responsible for truncating the container-file!!! */
+	error = venus_setattr(inode->i_sb, coda_i2f(inode), &vattr);
+
+	if ( !error ) {
+	        coda_vattr_to_iattr(inode, &vattr); 
+		coda_cache_clear_inode(inode);
+	}
+
+	unlock_kernel();
+
+	return error;
+}
+
+struct inode_operations coda_file_inode_operations = {
+	.permission	= coda_permission,
+	.getattr	= coda_getattr,
+	.setattr	= coda_setattr,
+};
+
+static int coda_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	int error;
+	
+	lock_kernel();
+
+	error = venus_statfs(sb, buf);
+
+	unlock_kernel();
+
+	if (error) {
+		/* fake something like AFS does */
+		buf->f_blocks = 9000000;
+		buf->f_bfree  = 9000000;
+		buf->f_bavail = 9000000;
+		buf->f_files  = 9000000;
+		buf->f_ffree  = 9000000;
+	}
+
+	/* and fill in the rest */
+	buf->f_type = CODA_SUPER_MAGIC;
+	buf->f_bsize = 1024;
+	buf->f_namelen = CODA_MAXNAMLEN;
+
+	return 0; 
+}
+
+/* init_coda: used by filesystems.c to register coda */
+
+static struct super_block *coda_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, coda_fill_super);
+}
+
+struct file_system_type coda_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "coda",
+	.get_sb		= coda_get_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags	= FS_BINARY_MOUNTDATA,
+};
+
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
new file mode 100644
index 0000000..1277149
--- /dev/null
+++ b/fs/coda/pioctl.c
@@ -0,0 +1,95 @@
+/*
+ * Pioctl operations for Coda.
+ * Original version: (C) 1996 Peter Braam 
+ * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
+ *
+ * Carnegie Mellon encourages users of this code to contribute improvements
+ * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/namei.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_psdev.h>
+
+/* pioctl ops */
+static int coda_ioctl_permission(struct inode *inode, int mask,
+				 struct nameidata *nd);
+static int coda_pioctl(struct inode * inode, struct file * filp, 
+                       unsigned int cmd, unsigned long user_data);
+
+/* exported from this file */
+struct inode_operations coda_ioctl_inode_operations =
+{
+	.permission	= coda_ioctl_permission,
+	.setattr	= coda_setattr,
+};
+
+struct file_operations coda_ioctl_operations = {
+	.owner		= THIS_MODULE,
+	.ioctl		= coda_pioctl,
+};
+
+/* the coda pioctl inode ops */
+static int coda_ioctl_permission(struct inode *inode, int mask,
+				 struct nameidata *nd)
+{
+        return 0;
+}
+
+static int coda_pioctl(struct inode * inode, struct file * filp, 
+                       unsigned int cmd, unsigned long user_data)
+{
+	struct nameidata nd;
+        int error;
+	struct PioctlData data;
+        struct inode *target_inode = NULL;
+        struct coda_inode_info *cnp;
+
+        /* get the Pioctl data arguments from user space */
+        if (copy_from_user(&data, (void __user *)user_data, sizeof(data))) {
+	    return -EINVAL;
+	}
+       
+        /* 
+         * Look up the pathname. Note that the pathname is in 
+         * user memory, and namei takes care of this
+         */
+        if ( data.follow ) {
+                error = user_path_walk(data.path, &nd);
+	} else {
+	        error = user_path_walk_link(data.path, &nd);
+	}
+		
+	if ( error ) {
+		return error;
+        } else {
+	        target_inode = nd.dentry->d_inode;
+	}
+	
+	/* return if it is not a Coda inode */
+	if ( target_inode->i_sb != inode->i_sb ) {
+		path_release(&nd);
+	        return  -EINVAL;
+	}
+
+	/* now proceed to make the upcall */
+        cnp = ITOC(target_inode);
+
+	error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data);
+
+	path_release(&nd);
+        return error;
+}
+
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
new file mode 100644
index 0000000..ef001a9
--- /dev/null
+++ b/fs/coda/psdev.c
@@ -0,0 +1,462 @@
+/*
+ *      	An implementation of a loadable kernel mode driver providing
+ *		multiple kernel/user space bidirectional communications links.
+ *
+ * 		Author: 	Alan Cox <alan@redhat.com>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ * 
+ *              Adapted to become the Linux 2.0 Coda pseudo device
+ *              Peter  Braam  <braam@maths.ox.ac.uk> 
+ *              Michael Callahan <mjc@emmy.smith.edu>           
+ *
+ *              Changes for Linux 2.1
+ *              Copyright (c) 1997 Carnegie-Mellon University
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/device.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/poll.h>
+#include <asm/uaccess.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_proc.h>
+
+#define upc_free(r) kfree(r)
+
+/* 
+ * Coda stuff
+ */
+extern struct file_system_type coda_fs_type;
+
+/* statistics */
+int           coda_hard;         /* allows signals during upcalls */
+unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */
+
+
+struct venus_comm coda_comms[MAX_CODADEVS];
+static struct class_simple *coda_psdev_class;
+
+/*
+ * Device operations
+ */
+
+static unsigned int coda_psdev_poll(struct file *file, poll_table * wait)
+{
+        struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+	unsigned int mask = POLLOUT | POLLWRNORM;
+
+	poll_wait(file, &vcp->vc_waitq, wait);
+	if (!list_empty(&vcp->vc_pending))
+                mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+static int coda_psdev_ioctl(struct inode * inode, struct file * filp, 
+			    unsigned int cmd, unsigned long arg)
+{
+	unsigned int data;
+
+	switch(cmd) {
+	case CIOC_KERNEL_VERSION:
+		data = CODA_KERNEL_VERSION;
+		return put_user(data, (int __user *) arg);
+	default:
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+/*
+ *	Receive a message written by Venus to the psdev
+ */
+ 
+static ssize_t coda_psdev_write(struct file *file, const char __user *buf, 
+				size_t nbytes, loff_t *off)
+{
+        struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+        struct upc_req *req = NULL;
+        struct upc_req *tmp;
+	struct list_head *lh;
+	struct coda_in_hdr hdr;
+	ssize_t retval = 0, count = 0;
+	int error;
+
+        /* Peek at the opcode, uniquefier */
+	if (copy_from_user(&hdr, buf, 2 * sizeof(u_long)))
+	        return -EFAULT;
+
+        if (DOWNCALL(hdr.opcode)) {
+		struct super_block *sb = NULL;
+                union outputArgs *dcbuf;
+		int size = sizeof(*dcbuf);
+
+		sb = vcp->vc_sb;
+		if ( !sb ) {
+                        count = nbytes;
+                        goto out;
+		}
+
+		if  ( nbytes < sizeof(struct coda_out_hdr) ) {
+		        printk("coda_downcall opc %d uniq %d, not enough!\n",
+			       hdr.opcode, hdr.unique);
+			count = nbytes;
+			goto out;
+		}
+		if ( nbytes > size ) {
+		        printk("Coda: downcall opc %d, uniq %d, too much!",
+			       hdr.opcode, hdr.unique);
+		        nbytes = size;
+		}
+		CODA_ALLOC(dcbuf, union outputArgs *, nbytes);
+		if (copy_from_user(dcbuf, buf, nbytes)) {
+			CODA_FREE(dcbuf, nbytes);
+			retval = -EFAULT;
+			goto out;
+		}
+
+		/* what downcall errors does Venus handle ? */
+		lock_kernel();
+		error = coda_downcall(hdr.opcode, dcbuf, sb);
+		unlock_kernel();
+
+		CODA_FREE(dcbuf, nbytes);
+		if (error) {
+		        printk("psdev_write: coda_downcall error: %d\n", error);
+			retval = error;
+			goto out;
+		}
+		count = nbytes;
+		goto out;
+	}
+        
+	/* Look for the message on the processing queue. */
+	lock_kernel();
+	list_for_each(lh, &vcp->vc_processing) {
+		tmp = list_entry(lh, struct upc_req , uc_chain);
+		if (tmp->uc_unique == hdr.unique) {
+			req = tmp;
+			list_del(&req->uc_chain);
+			break;
+		}
+	}
+	unlock_kernel();
+
+	if (!req) {
+		printk("psdev_write: msg (%d, %d) not found\n", 
+			hdr.opcode, hdr.unique);
+		retval = -ESRCH;
+		goto out;
+	}
+
+        /* move data into response buffer. */
+	if (req->uc_outSize < nbytes) {
+                printk("psdev_write: too much cnt: %d, cnt: %ld, opc: %d, uniq: %d.\n",
+		       req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique);
+		nbytes = req->uc_outSize; /* don't have more space! */
+	}
+        if (copy_from_user(req->uc_data, buf, nbytes)) {
+		req->uc_flags |= REQ_ABORT;
+		wake_up(&req->uc_sleep);
+		retval = -EFAULT;
+		goto out;
+	}
+
+	/* adjust outsize. is this useful ?? */
+        req->uc_outSize = nbytes;	
+        req->uc_flags |= REQ_WRITE;
+	count = nbytes;
+
+	/* Convert filedescriptor into a file handle */
+	if (req->uc_opcode == CODA_OPEN_BY_FD) {
+		struct coda_open_by_fd_out *outp =
+			(struct coda_open_by_fd_out *)req->uc_data;
+		outp->fh = fget(outp->fd);
+	}
+
+        wake_up(&req->uc_sleep);
+out:
+        return(count ? count : retval);  
+}
+
+/*
+ *	Read a message from the kernel to Venus
+ */
+
+static ssize_t coda_psdev_read(struct file * file, char __user * buf, 
+			       size_t nbytes, loff_t *off)
+{
+	DECLARE_WAITQUEUE(wait, current);
+        struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+        struct upc_req *req;
+	ssize_t retval = 0, count = 0;
+
+	if (nbytes == 0)
+		return 0;
+
+	lock_kernel();
+
+	add_wait_queue(&vcp->vc_waitq, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	while (list_empty(&vcp->vc_pending)) {
+		if (file->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	}
+
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&vcp->vc_waitq, &wait);
+
+	if (retval)
+		goto out;
+
+	req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain);
+	list_del(&req->uc_chain);
+
+	/* Move the input args into userspace */
+	count = req->uc_inSize;
+	if (nbytes < req->uc_inSize) {
+                printk ("psdev_read: Venus read %ld bytes of %d in message\n",
+			(long)nbytes, req->uc_inSize);
+		count = nbytes;
+        }
+
+	if (copy_to_user(buf, req->uc_data, count))
+	        retval = -EFAULT;
+        
+	/* If request was not a signal, enqueue and don't free */
+	if (!(req->uc_flags & REQ_ASYNC)) {
+		req->uc_flags |= REQ_READ;
+		list_add(&(req->uc_chain), vcp->vc_processing.prev);
+		goto out;
+	}
+
+	CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
+	upc_free(req);
+out:
+	unlock_kernel();
+	return (count ? count : retval);
+}
+
+static int coda_psdev_open(struct inode * inode, struct file * file)
+{
+        struct venus_comm *vcp;
+	int idx;
+
+	lock_kernel();
+	idx = iminor(inode);
+	if(idx >= MAX_CODADEVS) {
+		unlock_kernel();
+		return -ENODEV;
+	}
+
+	vcp = &coda_comms[idx];
+	if(vcp->vc_inuse) {
+		unlock_kernel();
+		return -EBUSY;
+	}
+	
+	if (!vcp->vc_inuse++) {
+		INIT_LIST_HEAD(&vcp->vc_pending);
+		INIT_LIST_HEAD(&vcp->vc_processing);
+		init_waitqueue_head(&vcp->vc_waitq);
+		vcp->vc_sb = NULL;
+		vcp->vc_seq = 0;
+	}
+	
+	file->private_data = vcp;
+
+	unlock_kernel();
+        return 0;
+}
+
+
+static int coda_psdev_release(struct inode * inode, struct file * file)
+{
+        struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+        struct upc_req *req, *tmp;
+
+	lock_kernel();
+	if ( !vcp->vc_inuse ) {
+		unlock_kernel();
+		printk("psdev_release: Not open.\n");
+		return -1;
+	}
+
+	if (--vcp->vc_inuse) {
+		unlock_kernel();
+		return 0;
+	}
+        
+        /* Wakeup clients so they can return. */
+	list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
+		/* Async requests need to be freed here */
+		if (req->uc_flags & REQ_ASYNC) {
+			CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
+			upc_free(req);
+			continue;
+		}
+		req->uc_flags |= REQ_ABORT;
+		wake_up(&req->uc_sleep);
+        }
+        
+	list_for_each_entry(req, &vcp->vc_processing, uc_chain) {
+		req->uc_flags |= REQ_ABORT;
+	        wake_up(&req->uc_sleep);
+        }
+
+	unlock_kernel();
+	return 0;
+}
+
+
+static struct file_operations coda_psdev_fops = {
+	.owner		= THIS_MODULE,
+	.read		= coda_psdev_read,
+	.write		= coda_psdev_write,
+	.poll		= coda_psdev_poll,
+	.ioctl		= coda_psdev_ioctl,
+	.open		= coda_psdev_open,
+	.release	= coda_psdev_release,
+};
+
+static int init_coda_psdev(void)
+{
+	int i, err = 0;
+	if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) {
+              printk(KERN_ERR "coda_psdev: unable to get major %d\n", 
+		     CODA_PSDEV_MAJOR);
+              return -EIO;
+	}
+	coda_psdev_class = class_simple_create(THIS_MODULE, "coda");
+	if (IS_ERR(coda_psdev_class)) {
+		err = PTR_ERR(coda_psdev_class);
+		goto out_chrdev;
+	}		
+	devfs_mk_dir ("coda");
+	for (i = 0; i < MAX_CODADEVS; i++) {
+		class_simple_device_add(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR,i), 
+				NULL, "cfs%d", i);
+		err = devfs_mk_cdev(MKDEV(CODA_PSDEV_MAJOR, i),
+				S_IFCHR|S_IRUSR|S_IWUSR, "coda/%d", i);
+		if (err)
+			goto out_class;
+	}
+	coda_sysctl_init();
+	goto out;
+
+out_class:
+	for (i = 0; i < MAX_CODADEVS; i++) 
+		class_simple_device_remove(MKDEV(CODA_PSDEV_MAJOR, i));
+	class_simple_destroy(coda_psdev_class);
+out_chrdev:
+	unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
+out:
+	return err;
+}
+
+
+MODULE_AUTHOR("Peter J. Braam <braam@cs.cmu.edu>");
+MODULE_LICENSE("GPL");
+
+extern int coda_init_inodecache(void);
+extern void coda_destroy_inodecache(void);
+static int __init init_coda(void)
+{
+	int status;
+	int i;
+	printk(KERN_INFO "Coda Kernel/Venus communications, "
+#ifdef CONFIG_CODA_FS_OLD_API
+	       "v5.3.20"
+#else
+	       "v6.0.0"
+#endif
+	       ", coda@cs.cmu.edu\n");
+
+	status = coda_init_inodecache();
+	if (status)
+		goto out2;
+	status = init_coda_psdev();
+	if ( status ) {
+		printk("Problem (%d) in init_coda_psdev\n", status);
+		goto out1;
+	}
+	
+	status = register_filesystem(&coda_fs_type);
+	if (status) {
+		printk("coda: failed to register filesystem!\n");
+		goto out;
+	}
+	return 0;
+out:
+	for (i = 0; i < MAX_CODADEVS; i++) {
+		class_simple_device_remove(MKDEV(CODA_PSDEV_MAJOR, i));
+		devfs_remove("coda/%d", i);
+	}
+	class_simple_destroy(coda_psdev_class);
+	devfs_remove("coda");
+	unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
+	coda_sysctl_clean();
+out1:
+	coda_destroy_inodecache();
+out2:
+	return status;
+}
+
+static void __exit exit_coda(void)
+{
+        int err, i;
+
+	err = unregister_filesystem(&coda_fs_type);
+        if ( err != 0 ) {
+                printk("coda: failed to unregister filesystem\n");
+        }
+	for (i = 0; i < MAX_CODADEVS; i++) {
+		class_simple_device_remove(MKDEV(CODA_PSDEV_MAJOR, i));
+		devfs_remove("coda/%d", i);
+	}
+	class_simple_destroy(coda_psdev_class);
+	devfs_remove("coda");
+	unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
+	coda_sysctl_clean();
+	coda_destroy_inodecache();
+}
+
+module_init(init_coda);
+module_exit(exit_coda);
+
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
new file mode 100644
index 0000000..b35e5bb
--- /dev/null
+++ b/fs/coda/symlink.c
@@ -0,0 +1,55 @@
+/*
+ * Symlink inode operations for Coda filesystem
+ * Original version: (C) 1996 P. Braam and M. Callahan
+ * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University
+ * 
+ * Carnegie Mellon encourages users to contribute improvements to
+ * the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_proc.h>
+
+static int coda_symlink_filler(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	int error;
+	struct coda_inode_info *cii;
+	unsigned int len = PAGE_SIZE;
+	char *p = kmap(page);
+
+	lock_kernel();
+	cii = ITOC(inode);
+	coda_vfs_stat.follow_link++;
+
+	error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
+	unlock_kernel();
+	if (error)
+		goto fail;
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+
+fail:
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return error;
+}
+
+struct address_space_operations coda_symlink_aops = {
+	.readpage	= coda_symlink_filler,
+};
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
new file mode 100644
index 0000000..f0b1075
--- /dev/null
+++ b/fs/coda/sysctl.c
@@ -0,0 +1,254 @@
+/*
+ * Sysctl operations for Coda filesystem
+ * Original version: (C) 1996 P. Braam and M. Callahan
+ * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University
+ * 
+ * Carnegie Mellon encourages users to contribute improvements to
+ * the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
+ * 
+ * CODA operation statistics
+ * (c) March, 1998 Zhanyong Wan <zhanyong.wan@yale.edu>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_cache.h>
+#include <linux/coda_proc.h>
+
+static struct ctl_table_header *fs_table_header;
+
+#define FS_CODA         1       /* Coda file system */
+
+#define CODA_TIMEOUT    3       /* timeout on upcalls to become intrble */
+#define CODA_HARD       5       /* mount type "hard" or "soft" */
+#define CODA_VFS 	 6       /* vfs statistics */
+#define CODA_CACHE_INV 	 9       /* cache invalidation statistics */
+#define CODA_FAKE_STATFS 10	 /* don't query venus for actual cache usage */
+
+struct coda_vfs_stats		coda_vfs_stat;
+static struct coda_cache_inv_stats	coda_cache_inv_stat;
+
+static void reset_coda_vfs_stats( void )
+{
+	memset( &coda_vfs_stat, 0, sizeof( coda_vfs_stat ) );
+}
+
+static void reset_coda_cache_inv_stats( void )
+{
+	memset( &coda_cache_inv_stat, 0, sizeof( coda_cache_inv_stat ) );
+}
+
+static int do_reset_coda_vfs_stats( ctl_table * table, int write,
+				    struct file * filp, void __user * buffer,
+				    size_t * lenp, loff_t * ppos )
+{
+	if ( write ) {
+		reset_coda_vfs_stats();
+
+		*ppos += *lenp;
+	} else {
+		*lenp = 0;
+	}
+
+	return 0;
+}
+
+static int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
+					  struct file * filp,
+					  void __user * buffer,
+					  size_t * lenp, loff_t * ppos )
+{
+	if ( write ) {
+		reset_coda_cache_inv_stats();
+
+		*ppos += *lenp;
+	} else {
+		*lenp = 0;
+	}
+  
+	return 0;
+}
+
+static int coda_vfs_stats_get_info( char * buffer, char ** start,
+				    off_t offset, int length)
+{
+	int len=0;
+	off_t begin;
+	struct coda_vfs_stats * ps = & coda_vfs_stat;
+  
+  /* this works as long as we are below 1024 characters! */
+	len += sprintf( buffer,
+			"Coda VFS statistics\n"
+			"===================\n\n"
+			"File Operations:\n"
+			"\topen\t\t%9d\n"
+			"\tflush\t\t%9d\n"
+			"\trelease\t\t%9d\n"
+			"\tfsync\t\t%9d\n\n"
+			"Dir Operations:\n"
+			"\treaddir\t\t%9d\n\n"
+			"Inode Operations\n"
+			"\tcreate\t\t%9d\n"
+			"\tlookup\t\t%9d\n"
+			"\tlink\t\t%9d\n"
+			"\tunlink\t\t%9d\n"
+			"\tsymlink\t\t%9d\n"
+			"\tmkdir\t\t%9d\n"
+			"\trmdir\t\t%9d\n"
+			"\trename\t\t%9d\n"
+			"\tpermission\t%9d\n",
+
+			/* file operations */
+			ps->open,
+			ps->flush,
+			ps->release,
+			ps->fsync,
+
+			/* dir operations */
+			ps->readdir,
+		  
+			/* inode operations */
+			ps->create,
+			ps->lookup,
+			ps->link,
+			ps->unlink,
+			ps->symlink,
+			ps->mkdir,
+			ps->rmdir,
+			ps->rename,
+			ps->permission); 
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if ( len > length )
+		len = length;
+	if ( len < 0 )
+		len = 0;
+
+	return len;
+}
+
+static int coda_cache_inv_stats_get_info( char * buffer, char ** start,
+					  off_t offset, int length)
+{
+	int len=0;
+	off_t begin;
+	struct coda_cache_inv_stats * ps = & coda_cache_inv_stat;
+  
+	/* this works as long as we are below 1024 characters! */
+	len += sprintf( buffer,
+			"Coda cache invalidation statistics\n"
+			"==================================\n\n"
+			"flush\t\t%9d\n"
+			"purge user\t%9d\n"
+			"zap_dir\t\t%9d\n"
+			"zap_file\t%9d\n"
+			"zap_vnode\t%9d\n"
+			"purge_fid\t%9d\n"
+			"replace\t\t%9d\n",
+			ps->flush,
+			ps->purge_user,
+			ps->zap_dir,
+			ps->zap_file,
+			ps->zap_vnode,
+			ps->purge_fid,
+			ps->replace );
+  
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if ( len > length )
+		len = length;
+	if ( len < 0 )
+		len = 0;
+
+	return len;
+}
+
+static ctl_table coda_table[] = {
+ 	{CODA_TIMEOUT, "timeout", &coda_timeout, sizeof(int), 0644, NULL, &proc_dointvec},
+ 	{CODA_HARD, "hard", &coda_hard, sizeof(int), 0644, NULL, &proc_dointvec},
+ 	{CODA_VFS, "vfs_stats", NULL, 0, 0644, NULL, &do_reset_coda_vfs_stats},
+ 	{CODA_CACHE_INV, "cache_inv_stats", NULL, 0, 0644, NULL, &do_reset_coda_cache_inv_stats},
+ 	{CODA_FAKE_STATFS, "fake_statfs", &coda_fake_statfs, sizeof(int), 0600, NULL, &proc_dointvec},
+	{ 0 }
+};
+
+static ctl_table fs_table[] = {
+       {FS_CODA, "coda",    NULL, 0, 0555, coda_table},
+       {0}
+};
+
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ target directory structure:
+   /proc/fs  (see linux/fs/proc/root.c)
+   /proc/fs/coda
+   /proc/fs/coda/{vfs_stats,
+
+*/
+
+static struct proc_dir_entry* proc_fs_coda;
+
+#endif
+
+#define coda_proc_create(name,get_info) \
+	create_proc_info_entry(name, 0, proc_fs_coda, get_info)
+
+void coda_sysctl_init(void)
+{
+	reset_coda_vfs_stats();
+	reset_coda_cache_inv_stats();
+
+#ifdef CONFIG_PROC_FS
+	proc_fs_coda = proc_mkdir("coda", proc_root_fs);
+	if (proc_fs_coda) {
+		proc_fs_coda->owner = THIS_MODULE;
+		coda_proc_create("vfs_stats", coda_vfs_stats_get_info);
+		coda_proc_create("cache_inv_stats", coda_cache_inv_stats_get_info);
+	}
+#endif
+
+#ifdef CONFIG_SYSCTL
+	if ( !fs_table_header )
+		fs_table_header = register_sysctl_table(fs_table, 0);
+#endif 
+}
+
+void coda_sysctl_clean(void) 
+{
+
+#ifdef CONFIG_SYSCTL
+	if ( fs_table_header ) {
+		unregister_sysctl_table(fs_table_header);
+		fs_table_header = NULL;
+	}
+#endif
+
+#ifdef CONFIG_PROC_FS
+        remove_proc_entry("cache_inv_stats", proc_fs_coda);
+        remove_proc_entry("vfs_stats", proc_fs_coda);
+	remove_proc_entry("coda", proc_root_fs);
+#endif 
+}
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
new file mode 100644
index 0000000..1bae996
--- /dev/null
+++ b/fs/coda/upcall.c
@@ -0,0 +1,914 @@
+/*
+ * Mostly platform independent upcall operations to Venus:
+ *  -- upcalls
+ *  -- upcall routines
+ *
+ * Linux 2.0 version
+ * Copyright (C) 1996 Peter J. Braam <braam@maths.ox.ac.uk>, 
+ * Michael Callahan <callahan@maths.ox.ac.uk> 
+ * 
+ * Redone for Linux 2.1
+ * Copyright (C) 1997 Carnegie Mellon University
+ *
+ * Carnegie Mellon University encourages users of this code to contribute
+ * improvements to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
+ */
+
+#include <asm/system.h>
+#include <linux/signal.h>
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vfs.h>
+
+#include <linux/coda.h>
+#include <linux/coda_linux.h>
+#include <linux/coda_psdev.h>
+#include <linux/coda_fs_i.h>
+#include <linux/coda_cache.h>
+#include <linux/coda_proc.h> 
+
+#define upc_alloc() kmalloc(sizeof(struct upc_req), GFP_KERNEL)
+#define upc_free(r) kfree(r)
+
+static int coda_upcall(struct coda_sb_info *mntinfo, int inSize, int *outSize, 
+		       union inputArgs *buffer);
+
+static void *alloc_upcall(int opcode, int size)
+{
+	union inputArgs *inp;
+
+	CODA_ALLOC(inp, union inputArgs *, size);
+        if (!inp)
+		return ERR_PTR(-ENOMEM);
+
+        inp->ih.opcode = opcode;
+	inp->ih.pid = current->pid;
+	inp->ih.pgid = process_group(current);
+#ifdef CONFIG_CODA_FS_OLD_API
+	memset(&inp->ih.cred, 0, sizeof(struct coda_cred));
+	inp->ih.cred.cr_fsuid = current->fsuid;
+#else
+	inp->ih.uid = current->fsuid;
+#endif
+	return (void*)inp;
+}
+
+#define UPARG(op)\
+do {\
+	inp = (union inputArgs *)alloc_upcall(op, insize); \
+        if (IS_ERR(inp)) { return PTR_ERR(inp); }\
+        outp = (union outputArgs *)(inp); \
+        outsize = insize; \
+} while (0)
+
+#define INSIZE(tag) sizeof(struct coda_ ## tag ## _in)
+#define OUTSIZE(tag) sizeof(struct coda_ ## tag ## _out)
+#define SIZE(tag)  max_t(unsigned int, INSIZE(tag), OUTSIZE(tag))
+
+
+/* the upcalls */
+int venus_rootfid(struct super_block *sb, struct CodaFid *fidp)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+
+        insize = SIZE(root);
+        UPARG(CODA_ROOT);
+
+	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	
+	if (error) {
+	        printk("coda_get_rootfid: error %d\n", error);
+	} else {
+		*fidp = outp->coda_root.VFid;
+	}
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_getattr(struct super_block *sb, struct CodaFid *fid, 
+		     struct coda_vattr *attr) 
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+
+        insize = SIZE(getattr); 
+	UPARG(CODA_GETATTR);
+        inp->coda_getattr.VFid = *fid;
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	
+	*attr = outp->coda_getattr.attr;
+
+	CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_setattr(struct super_block *sb, struct CodaFid *fid, 
+		  struct coda_vattr *vattr)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+	
+	insize = SIZE(setattr);
+	UPARG(CODA_SETATTR);
+
+        inp->coda_setattr.VFid = *fid;
+	inp->coda_setattr.attr = *vattr;
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+        CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_lookup(struct super_block *sb, struct CodaFid *fid, 
+		    const char *name, int length, int * type, 
+		    struct CodaFid *resfid)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+	int offset;
+
+	offset = INSIZE(lookup);
+        insize = max_t(unsigned int, offset + length +1, OUTSIZE(lookup));
+	UPARG(CODA_LOOKUP);
+
+        inp->coda_lookup.VFid = *fid;
+	inp->coda_lookup.name = offset;
+	inp->coda_lookup.flags = CLU_CASE_SENSITIVE;
+        /* send Venus a null terminated string */
+        memcpy((char *)(inp) + offset, name, length);
+        *((char *)inp + offset + length) = '\0';
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	*resfid = outp->coda_lookup.VFid;
+	*type = outp->coda_lookup.vtype;
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_store(struct super_block *sb, struct CodaFid *fid, int flags,
+                vuid_t uid)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+#ifdef CONFIG_CODA_FS_OLD_API
+	struct coda_cred cred = { 0, };
+	cred.cr_fsuid = uid;
+#endif
+	
+	insize = SIZE(store);
+	UPARG(CODA_STORE);
+	
+#ifdef CONFIG_CODA_FS_OLD_API
+	memcpy(&(inp->ih.cred), &cred, sizeof(cred));
+#else
+	inp->ih.uid = uid;
+#endif
+	
+        inp->coda_store.VFid = *fid;
+        inp->coda_store.flags = flags;
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_release(struct super_block *sb, struct CodaFid *fid, int flags)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+	
+	insize = SIZE(release);
+	UPARG(CODA_RELEASE);
+	
+	inp->coda_release.VFid = *fid;
+	inp->coda_release.flags = flags;
+
+	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
+                vuid_t uid)
+{
+	union inputArgs *inp;
+	union outputArgs *outp;
+	int insize, outsize, error;
+#ifdef CONFIG_CODA_FS_OLD_API
+	struct coda_cred cred = { 0, };
+	cred.cr_fsuid = uid;
+#endif
+	
+	insize = SIZE(release);
+	UPARG(CODA_CLOSE);
+	
+#ifdef CONFIG_CODA_FS_OLD_API
+	memcpy(&(inp->ih.cred), &cred, sizeof(cred));
+#else
+	inp->ih.uid = uid;
+#endif
+	
+        inp->coda_close.VFid = *fid;
+        inp->coda_close.flags = flags;
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_open(struct super_block *sb, struct CodaFid *fid,
+		  int flags, struct file **fh)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+       
+	insize = SIZE(open_by_fd);
+	UPARG(CODA_OPEN_BY_FD);
+
+        inp->coda_open.VFid = *fid;
+        inp->coda_open.flags = flags;
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	*fh = outp->coda_open_by_fd.fh;
+
+	CODA_FREE(inp, insize);
+	return error;
+}	
+
+int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, 
+		   const char *name, int length, 
+		   struct CodaFid *newfid, struct coda_vattr *attrs)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int offset;
+
+	offset = INSIZE(mkdir);
+	insize = max_t(unsigned int, offset + length + 1, OUTSIZE(mkdir));
+	UPARG(CODA_MKDIR);
+
+        inp->coda_mkdir.VFid = *dirfid;
+        inp->coda_mkdir.attr = *attrs;
+	inp->coda_mkdir.name = offset;
+        /* Venus must get null terminated string */
+        memcpy((char *)(inp) + offset, name, length);
+        *((char *)inp + offset + length) = '\0';
+        
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	*attrs = outp->coda_mkdir.attr;
+	*newfid = outp->coda_mkdir.VFid;
+
+	CODA_FREE(inp, insize);
+	return error;        
+}
+
+
+int venus_rename(struct super_block *sb, struct CodaFid *old_fid, 
+		 struct CodaFid *new_fid, size_t old_length, 
+		 size_t new_length, const char *old_name, 
+		 const char *new_name)
+{
+	union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error; 
+	int offset, s;
+	
+	offset = INSIZE(rename);
+	insize = max_t(unsigned int, offset + new_length + old_length + 8,
+		     OUTSIZE(rename)); 
+ 	UPARG(CODA_RENAME);
+
+        inp->coda_rename.sourceFid = *old_fid;
+        inp->coda_rename.destFid =  *new_fid;
+        inp->coda_rename.srcname = offset;
+
+        /* Venus must receive an null terminated string */
+        s = ( old_length & ~0x3) +4; /* round up to word boundary */
+        memcpy((char *)(inp) + offset, old_name, old_length);
+        *((char *)inp + offset + old_length) = '\0';
+
+        /* another null terminated string for Venus */
+        offset += s;
+        inp->coda_rename.destname = offset;
+        s = ( new_length & ~0x3) +4; /* round up to word boundary */
+        memcpy((char *)(inp) + offset, new_name, new_length);
+        *((char *)inp + offset + new_length) = '\0';
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_create(struct super_block *sb, struct CodaFid *dirfid, 
+		 const char *name, int length, int excl, int mode,
+		 struct CodaFid *newfid, struct coda_vattr *attrs) 
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int offset;
+
+        offset = INSIZE(create);
+	insize = max_t(unsigned int, offset + length + 1, OUTSIZE(create));
+	UPARG(CODA_CREATE);
+
+        inp->coda_create.VFid = *dirfid;
+        inp->coda_create.attr.va_mode = mode;
+	inp->coda_create.excl = excl;
+        inp->coda_create.mode = mode;
+        inp->coda_create.name = offset;
+
+        /* Venus must get null terminated string */
+        memcpy((char *)(inp) + offset, name, length);
+        *((char *)inp + offset + length) = '\0';
+                
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	*attrs = outp->coda_create.attr;
+	*newfid = outp->coda_create.VFid;
+
+	CODA_FREE(inp, insize);
+	return error;        
+}
+
+int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, 
+		    const char *name, int length)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int offset;
+
+        offset = INSIZE(rmdir);
+	insize = max_t(unsigned int, offset + length + 1, OUTSIZE(rmdir));
+	UPARG(CODA_RMDIR);
+
+        inp->coda_rmdir.VFid = *dirfid;
+        inp->coda_rmdir.name = offset;
+        memcpy((char *)(inp) + offset, name, length);
+	*((char *)inp + offset + length) = '\0';
+        
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_remove(struct super_block *sb, struct CodaFid *dirfid, 
+		    const char *name, int length)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int error=0, insize, outsize, offset;
+
+        offset = INSIZE(remove);
+	insize = max_t(unsigned int, offset + length + 1, OUTSIZE(remove));
+	UPARG(CODA_REMOVE);
+
+        inp->coda_remove.VFid = *dirfid;
+        inp->coda_remove.name = offset;
+        memcpy((char *)(inp) + offset, name, length);
+	*((char *)inp + offset + length) = '\0';
+        
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_readlink(struct super_block *sb, struct CodaFid *fid, 
+		      char *buffer, int *length)
+{ 
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int retlen;
+        char *result;
+        
+	insize = max_t(unsigned int,
+		     INSIZE(readlink), OUTSIZE(readlink)+ *length + 1);
+	UPARG(CODA_READLINK);
+
+        inp->coda_readlink.VFid = *fid;
+    
+        error =  coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	
+	if (! error) {
+                retlen = outp->coda_readlink.count;
+		if ( retlen > *length )
+		        retlen = *length;
+		*length = retlen;
+		result =  (char *)outp + (long)outp->coda_readlink.data;
+		memcpy(buffer, result, retlen);
+		*(buffer + retlen) = '\0';
+	}
+        
+        CODA_FREE(inp, insize);
+        return error;
+}
+
+
+
+int venus_link(struct super_block *sb, struct CodaFid *fid, 
+		  struct CodaFid *dirfid, const char *name, int len )
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int offset;
+
+	offset = INSIZE(link);
+	insize = max_t(unsigned int, offset  + len + 1, OUTSIZE(link));
+        UPARG(CODA_LINK);
+
+        inp->coda_link.sourceFid = *fid;
+        inp->coda_link.destFid = *dirfid;
+        inp->coda_link.tname = offset;
+
+        /* make sure strings are null terminated */
+        memcpy((char *)(inp) + offset, name, len);
+        *((char *)inp + offset + len) = '\0';
+        
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_symlink(struct super_block *sb, struct CodaFid *fid,
+		     const char *name, int len,
+		     const char *symname, int symlen)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        int offset, s;
+
+        offset = INSIZE(symlink);
+	insize = max_t(unsigned int, offset + len + symlen + 8, OUTSIZE(symlink));
+	UPARG(CODA_SYMLINK);
+        
+        /*        inp->coda_symlink.attr = *tva; XXXXXX */ 
+        inp->coda_symlink.VFid = *fid;
+
+	/* Round up to word boundary and null terminate */
+        inp->coda_symlink.srcname = offset;
+        s = ( symlen  & ~0x3 ) + 4; 
+        memcpy((char *)(inp) + offset, symname, symlen);
+        *((char *)inp + offset + symlen) = '\0';
+        
+	/* Round up to word boundary and null terminate */
+        offset += s;
+        inp->coda_symlink.tname = offset;
+        s = (len & ~0x3) + 4;
+        memcpy((char *)(inp) + offset, name, len);
+        *((char *)inp + offset + len) = '\0';
+
+	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+        return error;
+}
+
+int venus_fsync(struct super_block *sb, struct CodaFid *fid)
+{
+        union inputArgs *inp;
+        union outputArgs *outp; 
+	int insize, outsize, error;
+	
+	insize=SIZE(fsync);
+	UPARG(CODA_FSYNC);
+
+        inp->coda_fsync.VFid = *fid;
+        error = coda_upcall(coda_sbp(sb), sizeof(union inputArgs), 
+                            &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_access(struct super_block *sb, struct CodaFid *fid, int mask)
+{
+        union inputArgs *inp;
+        union outputArgs *outp; 
+	int insize, outsize, error;
+
+	insize = SIZE(access);
+	UPARG(CODA_ACCESS);
+
+        inp->coda_access.VFid = *fid;
+        inp->coda_access.flags = mask;
+
+	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+
+int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
+		 unsigned int cmd, struct PioctlData *data)
+{
+        union inputArgs *inp;
+        union outputArgs *outp;  
+	int insize, outsize, error;
+	int iocsize;
+
+	insize = VC_MAXMSGSIZE;
+	UPARG(CODA_IOCTL);
+
+        /* build packet for Venus */
+        if (data->vi.in_size > VC_MAXDATASIZE) {
+		error = -EINVAL;
+		goto exit;
+        }
+
+        if (data->vi.out_size > VC_MAXDATASIZE) {
+		error = -EINVAL;
+		goto exit;
+	}
+
+        inp->coda_ioctl.VFid = *fid;
+    
+        /* the cmd field was mutated by increasing its size field to
+         * reflect the path and follow args. We need to subtract that
+         * out before sending the command to Venus.  */
+        inp->coda_ioctl.cmd = (cmd & ~(PIOCPARM_MASK << 16));	
+        iocsize = ((cmd >> 16) & PIOCPARM_MASK) - sizeof(char *) - sizeof(int);
+        inp->coda_ioctl.cmd |= (iocsize & PIOCPARM_MASK) <<	16;	
+    
+        /* in->coda_ioctl.rwflag = flag; */
+        inp->coda_ioctl.len = data->vi.in_size;
+        inp->coda_ioctl.data = (char *)(INSIZE(ioctl));
+     
+        /* get the data out of user space */
+        if ( copy_from_user((char*)inp + (long)inp->coda_ioctl.data,
+			    data->vi.in, data->vi.in_size) ) {
+		error = -EINVAL;
+	        goto exit;
+	}
+
+        error = coda_upcall(coda_sbp(sb), SIZE(ioctl) + data->vi.in_size,
+                            &outsize, inp);
+        
+        if (error) {
+	        printk("coda_pioctl: Venus returns: %d for %s\n", 
+		       error, coda_f2s(fid));
+		goto exit; 
+	}
+
+	if (outsize < (long)outp->coda_ioctl.data + outp->coda_ioctl.len) {
+		error = -EINVAL;
+		goto exit;
+	}
+        
+	/* Copy out the OUT buffer. */
+        if (outp->coda_ioctl.len > data->vi.out_size) {
+		error = -EINVAL;
+		goto exit;
+        }
+
+	/* Copy out the OUT buffer. */
+	if (copy_to_user(data->vi.out,
+			 (char *)outp + (long)outp->coda_ioctl.data,
+			 outp->coda_ioctl.len)) {
+		error = -EFAULT;
+		goto exit;
+	}
+
+ exit:
+	CODA_FREE(inp, insize);
+	return error;
+}
+
+int venus_statfs(struct super_block *sb, struct kstatfs *sfs) 
+{ 
+        union inputArgs *inp;
+        union outputArgs *outp;
+        int insize, outsize, error;
+        
+	insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
+	UPARG(CODA_STATFS);
+
+        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	
+        if (!error) {
+		sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
+		sfs->f_bfree  = outp->coda_statfs.stat.f_bfree;
+		sfs->f_bavail = outp->coda_statfs.stat.f_bavail;
+		sfs->f_files  = outp->coda_statfs.stat.f_files;
+		sfs->f_ffree  = outp->coda_statfs.stat.f_ffree;
+	} else {
+		printk("coda_statfs: Venus returns: %d\n", error);
+	}
+
+        CODA_FREE(inp, insize);
+        return error;
+}
+
+/*
+ * coda_upcall and coda_downcall routines.
+ * 
+ */
+
+static inline void coda_waitfor_upcall(struct upc_req *vmp,
+				       struct venus_comm *vcommp)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	vmp->uc_posttime = jiffies;
+
+	add_wait_queue(&vmp->uc_sleep, &wait);
+	for (;;) {
+		if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE ) 
+			set_current_state(TASK_INTERRUPTIBLE);
+		else
+			set_current_state(TASK_UNINTERRUPTIBLE);
+
+                /* venus died */
+                if ( !vcommp->vc_inuse )
+                        break;
+
+		/* got a reply */
+		if ( vmp->uc_flags & ( REQ_WRITE | REQ_ABORT ) )
+			break;
+
+		if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE && signal_pending(current) ) {
+			/* if this process really wants to die, let it go */
+			if ( sigismember(&(current->pending.signal), SIGKILL) ||
+			     sigismember(&(current->pending.signal), SIGINT) )
+				break;
+			/* signal is present: after timeout always return 
+			   really smart idea, probably useless ... */
+			if ( jiffies - vmp->uc_posttime > coda_timeout * HZ )
+				break; 
+		}
+		schedule();
+	}
+	remove_wait_queue(&vmp->uc_sleep, &wait);
+	set_current_state(TASK_RUNNING);
+
+	return;
+}
+
+
+/* 
+ * coda_upcall will return an error in the case of 
+ * failed communication with Venus _or_ will peek at Venus
+ * reply and return Venus' error.
+ *
+ * As venus has 2 types of errors, normal errors (positive) and internal
+ * errors (negative), normal errors are negated, while internal errors
+ * are all mapped to -EINTR, while showing a nice warning message. (jh)
+ * 
+ */
+static int coda_upcall(struct coda_sb_info *sbi, 
+		int inSize, int *outSize, 
+		union inputArgs *buffer) 
+{
+	struct venus_comm *vcommp;
+	union outputArgs *out;
+	struct upc_req *req;
+	int error = 0;
+
+	vcommp = sbi->sbi_vcomm;
+	if ( !vcommp->vc_inuse ) {
+		printk("No pseudo device in upcall comms at %p\n", vcommp);
+                return -ENXIO;
+	}
+
+	/* Format the request message. */
+	req = upc_alloc();
+	if (!req) {
+		printk("Failed to allocate upc_req structure\n");
+		return -ENOMEM;
+	}
+	req->uc_data = (void *)buffer;
+	req->uc_flags = 0;
+	req->uc_inSize = inSize;
+	req->uc_outSize = *outSize ? *outSize : inSize;
+	req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode;
+	req->uc_unique = ++vcommp->vc_seq;
+	init_waitqueue_head(&req->uc_sleep);
+	
+	/* Fill in the common input args. */
+	((union inputArgs *)buffer)->ih.unique = req->uc_unique;
+
+	/* Append msg to pending queue and poke Venus. */
+	list_add(&(req->uc_chain), vcommp->vc_pending.prev);
+        
+	wake_up_interruptible(&vcommp->vc_waitq);
+	/* We can be interrupted while we wait for Venus to process
+	 * our request.  If the interrupt occurs before Venus has read
+	 * the request, we dequeue and return. If it occurs after the
+	 * read but before the reply, we dequeue, send a signal
+	 * message, and return. If it occurs after the reply we ignore
+	 * it. In no case do we want to restart the syscall.  If it
+	 * was interrupted by a venus shutdown (psdev_close), return
+	 * ENODEV.  */
+
+	/* Go to sleep.  Wake up on signals only after the timeout. */
+	coda_waitfor_upcall(req, vcommp);
+
+	if (vcommp->vc_inuse) {      /* i.e. Venus is still alive */
+	    /* Op went through, interrupt or not... */
+	    if (req->uc_flags & REQ_WRITE) {
+		out = (union outputArgs *)req->uc_data;
+		/* here we map positive Venus errors to kernel errors */
+		error = -out->oh.result;
+		*outSize = req->uc_outSize;
+		goto exit;
+	    }
+	    if ( !(req->uc_flags & REQ_READ) && signal_pending(current)) { 
+		/* Interrupted before venus read it. */
+		list_del(&(req->uc_chain));
+		/* perhaps the best way to convince the app to
+		   give up? */
+		error = -EINTR;
+		goto exit;
+	    } 
+	    if ( (req->uc_flags & REQ_READ) && signal_pending(current) ) {
+		    /* interrupted after Venus did its read, send signal */
+		    union inputArgs *sig_inputArgs;
+		    struct upc_req *sig_req;
+		    
+		    list_del(&(req->uc_chain));
+		    error = -ENOMEM;
+		    sig_req = upc_alloc();
+		    if (!sig_req) goto exit;
+
+		    CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
+		    if (!sig_req->uc_data) {
+			upc_free(sig_req);
+			goto exit;
+		    }
+		    
+		    error = -EINTR;
+		    sig_inputArgs = (union inputArgs *)sig_req->uc_data;
+		    sig_inputArgs->ih.opcode = CODA_SIGNAL;
+		    sig_inputArgs->ih.unique = req->uc_unique;
+		    
+		    sig_req->uc_flags = REQ_ASYNC;
+		    sig_req->uc_opcode = sig_inputArgs->ih.opcode;
+		    sig_req->uc_unique = sig_inputArgs->ih.unique;
+		    sig_req->uc_inSize = sizeof(struct coda_in_hdr);
+		    sig_req->uc_outSize = sizeof(struct coda_in_hdr);
+		    
+		    /* insert at head of queue! */
+		    list_add(&(sig_req->uc_chain), &vcommp->vc_pending);
+		    wake_up_interruptible(&vcommp->vc_waitq);
+	    } else {
+		    printk("Coda: Strange interruption..\n");
+		    error = -EINTR;
+	    }
+	} else {	/* If venus died i.e. !VC_OPEN(vcommp) */
+	        printk("coda_upcall: Venus dead on (op,un) (%d.%d) flags %d\n",
+		       req->uc_opcode, req->uc_unique, req->uc_flags);
+		error = -ENODEV;
+	}
+
+ exit:
+	upc_free(req);
+	return error;
+}
+
+/*  
+    The statements below are part of the Coda opportunistic
+    programming -- taken from the Mach/BSD kernel code for Coda. 
+    You don't get correct semantics by stating what needs to be
+    done without guaranteeing the invariants needed for it to happen.
+    When will be have time to find out what exactly is going on?  (pjb)
+*/
+
+
+/* 
+ * There are 7 cases where cache invalidations occur.  The semantics
+ *  of each is listed here:
+ *
+ * CODA_FLUSH     -- flush all entries from the name cache and the cnode cache.
+ * CODA_PURGEUSER -- flush all entries from the name cache for a specific user
+ *                  This call is a result of token expiration.
+ *
+ * The next arise as the result of callbacks on a file or directory.
+ * CODA_ZAPFILE   -- flush the cached attributes for a file.
+
+ * CODA_ZAPDIR    -- flush the attributes for the dir and
+ *                  force a new lookup for all the children
+                    of this dir.
+
+ *
+ * The next is a result of Venus detecting an inconsistent file.
+ * CODA_PURGEFID  -- flush the attribute for the file
+ *                  purge it and its children from the dcache
+ *
+ * The last  allows Venus to replace local fids with global ones
+ * during reintegration.
+ *
+ * CODA_REPLACE -- replace one CodaFid with another throughout the name cache */
+
+int coda_downcall(int opcode, union outputArgs * out, struct super_block *sb)
+{
+	/* Handle invalidation requests. */
+          if ( !sb || !sb->s_root || !sb->s_root->d_inode)
+		  return 0; 
+
+	  switch (opcode) {
+
+	  case CODA_FLUSH : {
+		   coda_cache_clear_all(sb);
+		   shrink_dcache_sb(sb);
+		   coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
+		   return(0);
+	  }
+
+	  case CODA_PURGEUSER : {
+		   coda_cache_clear_all(sb);
+		   return(0);
+	  }
+
+	  case CODA_ZAPDIR : {
+	          struct inode *inode;
+		  struct CodaFid *fid = &out->coda_zapdir.CodaFid;
+
+		  inode = coda_fid_to_inode(fid, sb);
+		  if (inode) {
+			  coda_flag_inode_children(inode, C_PURGE);
+	                  coda_flag_inode(inode, C_VATTR);
+			  iput(inode);
+		  }
+		  
+		  return(0);
+	  }
+
+	  case CODA_ZAPFILE : {
+	          struct inode *inode;
+		  struct CodaFid *fid = &out->coda_zapfile.CodaFid;
+		  inode = coda_fid_to_inode(fid, sb);
+		  if ( inode ) {
+	                  coda_flag_inode(inode, C_VATTR);
+			  iput(inode);
+		  }
+		  return 0;
+	  }
+
+	  case CODA_PURGEFID : {
+	          struct inode *inode;
+		  struct CodaFid *fid = &out->coda_purgefid.CodaFid;
+		  inode = coda_fid_to_inode(fid, sb);
+		  if ( inode ) { 
+			coda_flag_inode_children(inode, C_PURGE);
+
+			/* catch the dentries later if some are still busy */
+			coda_flag_inode(inode, C_PURGE);
+			d_prune_aliases(inode);
+
+			iput(inode);
+		  }
+		  return 0;
+	  }
+
+	  case CODA_REPLACE : {
+	          struct inode *inode;
+		  struct CodaFid *oldfid = &out->coda_replace.OldFid;
+		  struct CodaFid *newfid = &out->coda_replace.NewFid;
+		  inode = coda_fid_to_inode(oldfid, sb);
+		  if ( inode ) { 
+			  coda_replace_fid(inode, oldfid, newfid);
+			  iput(inode);
+		  }
+		  return 0;
+	  }
+	  }
+	  return 0;
+}
+
diff --git a/fs/compat.c b/fs/compat.c
new file mode 100644
index 0000000..a912bdf
--- /dev/null
+++ b/fs/compat.c
@@ -0,0 +1,1950 @@
+/*
+ *  linux/fs/compat.c
+ *
+ *  Kernel compatibililty routines for e.g. 32 bit syscall support
+ *  on 64 bit kernels.
+ *
+ *  Copyright (C) 2002       Stephen Rothwell, IBM Corporation
+ *  Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
+ *  Copyright (C) 1998       Eddie C. Dost  (ecd@skynet.be)
+ *  Copyright (C) 2001,2002  Andi Kleen, SuSE Labs 
+ *  Copyright (C) 2003       Pavel Machek (pavel@suse.cz)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/namei.h>
+#include <linux/file.h>
+#include <linux/vfs.h>
+#include <linux/ioctl32.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/sockios.h>	/* for SIOCDEVPRIVATE */
+#include <linux/smb.h>
+#include <linux/smb_mount.h>
+#include <linux/ncp_mount.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/dirent.h>
+#include <linux/dnotify.h>
+#include <linux/highuid.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/personality.h>
+#include <linux/rwsem.h>
+
+#include <net/sock.h>		/* siocdevprivate_ioctl */
+
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/ioctls.h>
+
+/*
+ * Not all architectures have sys_utime, so implement this in terms
+ * of sys_utimes.
+ */
+asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __user *t)
+{
+	struct timeval tv[2];
+
+	if (t) {
+		if (get_user(tv[0].tv_sec, &t->actime) ||
+		    get_user(tv[1].tv_sec, &t->modtime))
+			return -EFAULT;
+		tv[0].tv_usec = 0;
+		tv[1].tv_usec = 0;
+	}
+	return do_utimes(filename, t ? tv : NULL);
+}
+
+asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
+{
+	struct timeval tv[2];
+
+	if (t) { 
+		if (get_user(tv[0].tv_sec, &t[0].tv_sec) ||
+		    get_user(tv[0].tv_usec, &t[0].tv_usec) ||
+		    get_user(tv[1].tv_sec, &t[1].tv_sec) ||
+		    get_user(tv[1].tv_usec, &t[1].tv_usec))
+			return -EFAULT; 
+	} 
+	return do_utimes(filename, t ? tv : NULL);
+}
+
+asmlinkage long compat_sys_newstat(char __user * filename,
+		struct compat_stat __user *statbuf)
+{
+	struct kstat stat;
+	int error = vfs_stat(filename, &stat);
+
+	if (!error)
+		error = cp_compat_stat(&stat, statbuf);
+	return error;
+}
+
+asmlinkage long compat_sys_newlstat(char __user * filename,
+		struct compat_stat __user *statbuf)
+{
+	struct kstat stat;
+	int error = vfs_lstat(filename, &stat);
+
+	if (!error)
+		error = cp_compat_stat(&stat, statbuf);
+	return error;
+}
+
+asmlinkage long compat_sys_newfstat(unsigned int fd,
+		struct compat_stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_fstat(fd, &stat);
+
+	if (!error)
+		error = cp_compat_stat(&stat, statbuf);
+	return error;
+}
+
+static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf)
+{
+	
+	if (sizeof ubuf->f_blocks == 4) {
+		if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) &
+		    0xffffffff00000000ULL)
+			return -EOVERFLOW;
+		/* f_files and f_ffree may be -1; it's okay
+		 * to stuff that into 32 bits */
+		if (kbuf->f_files != 0xffffffffffffffffULL
+		 && (kbuf->f_files & 0xffffffff00000000ULL))
+			return -EOVERFLOW;
+		if (kbuf->f_ffree != 0xffffffffffffffffULL
+		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
+			return -EOVERFLOW;
+	}
+	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) ||
+	    __put_user(kbuf->f_type, &ubuf->f_type) ||
+	    __put_user(kbuf->f_bsize, &ubuf->f_bsize) ||
+	    __put_user(kbuf->f_blocks, &ubuf->f_blocks) ||
+	    __put_user(kbuf->f_bfree, &ubuf->f_bfree) ||
+	    __put_user(kbuf->f_bavail, &ubuf->f_bavail) ||
+	    __put_user(kbuf->f_files, &ubuf->f_files) ||
+	    __put_user(kbuf->f_ffree, &ubuf->f_ffree) ||
+	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
+	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
+	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
+	    __put_user(kbuf->f_frsize, &ubuf->f_frsize) ||
+	    __put_user(0, &ubuf->f_spare[0]) || 
+	    __put_user(0, &ubuf->f_spare[1]) || 
+	    __put_user(0, &ubuf->f_spare[2]) || 
+	    __put_user(0, &ubuf->f_spare[3]) || 
+	    __put_user(0, &ubuf->f_spare[4]))
+		return -EFAULT;
+	return 0;
+}
+
+/*
+ * The following statfs calls are copies of code from fs/open.c and
+ * should be checked against those from time to time
+ */
+asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs __user *buf)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(path, &nd);
+	if (!error) {
+		struct kstatfs tmp;
+		error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+		if (!error && put_compat_statfs(buf, &tmp))
+			error = -EFAULT;
+		path_release(&nd);
+	}
+	return error;
+}
+
+asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf)
+{
+	struct file * file;
+	struct kstatfs tmp;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+	error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+	if (!error && put_compat_statfs(buf, &tmp))
+		error = -EFAULT;
+	fput(file);
+out:
+	return error;
+}
+
+static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
+{
+	if (sizeof ubuf->f_blocks == 4) {
+		if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail) &
+		    0xffffffff00000000ULL)
+			return -EOVERFLOW;
+		/* f_files and f_ffree may be -1; it's okay
+		 * to stuff that into 32 bits */
+		if (kbuf->f_files != 0xffffffffffffffffULL
+		 && (kbuf->f_files & 0xffffffff00000000ULL))
+			return -EOVERFLOW;
+		if (kbuf->f_ffree != 0xffffffffffffffffULL
+		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
+			return -EOVERFLOW;
+	}
+	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) ||
+	    __put_user(kbuf->f_type, &ubuf->f_type) ||
+	    __put_user(kbuf->f_bsize, &ubuf->f_bsize) ||
+	    __put_user(kbuf->f_blocks, &ubuf->f_blocks) ||
+	    __put_user(kbuf->f_bfree, &ubuf->f_bfree) ||
+	    __put_user(kbuf->f_bavail, &ubuf->f_bavail) ||
+	    __put_user(kbuf->f_files, &ubuf->f_files) ||
+	    __put_user(kbuf->f_ffree, &ubuf->f_ffree) ||
+	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
+	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
+	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
+	    __put_user(kbuf->f_frsize, &ubuf->f_frsize))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, struct compat_statfs64 __user *buf)
+{
+	struct nameidata nd;
+	int error;
+
+	if (sz != sizeof(*buf))
+		return -EINVAL;
+
+	error = user_path_walk(path, &nd);
+	if (!error) {
+		struct kstatfs tmp;
+		error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+		if (!error && put_compat_statfs64(buf, &tmp))
+			error = -EFAULT;
+		path_release(&nd);
+	}
+	return error;
+}
+
+asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf)
+{
+	struct file * file;
+	struct kstatfs tmp;
+	int error;
+
+	if (sz != sizeof(*buf))
+		return -EINVAL;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+	error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+	if (!error && put_compat_statfs64(buf, &tmp))
+		error = -EFAULT;
+	fput(file);
+out:
+	return error;
+}
+
+/* ioctl32 stuff, used by sparc64, parisc, s390x, ppc64, x86_64, MIPS */
+
+#define IOCTL_HASHSIZE 256
+static struct ioctl_trans *ioctl32_hash_table[IOCTL_HASHSIZE];
+static DECLARE_RWSEM(ioctl32_sem);
+
+extern struct ioctl_trans ioctl_start[];
+extern int ioctl_table_size;
+
+static inline unsigned long ioctl32_hash(unsigned long cmd)
+{
+	return (((cmd >> 6) ^ (cmd >> 4) ^ cmd)) % IOCTL_HASHSIZE;
+}
+
+static void ioctl32_insert_translation(struct ioctl_trans *trans)
+{
+	unsigned long hash;
+	struct ioctl_trans *t;
+
+	hash = ioctl32_hash (trans->cmd);
+	if (!ioctl32_hash_table[hash])
+		ioctl32_hash_table[hash] = trans;
+	else {
+		t = ioctl32_hash_table[hash];
+		while (t->next)
+			t = t->next;
+		trans->next = NULL;
+		t->next = trans;
+	}
+}
+
+static int __init init_sys32_ioctl(void)
+{
+	int i;
+
+	for (i = 0; i < ioctl_table_size; i++) {
+		if (ioctl_start[i].next != 0) { 
+			printk("ioctl translation %d bad\n",i); 
+			return -1;
+		}
+
+		ioctl32_insert_translation(&ioctl_start[i]);
+	}
+	return 0;
+}
+
+__initcall(init_sys32_ioctl);
+
+int register_ioctl32_conversion(unsigned int cmd,
+				ioctl_trans_handler_t handler)
+{
+	struct ioctl_trans *t;
+	struct ioctl_trans *new_t;
+	unsigned long hash = ioctl32_hash(cmd);
+
+	new_t = kmalloc(sizeof(*new_t), GFP_KERNEL);
+	if (!new_t)
+		return -ENOMEM;
+
+	down_write(&ioctl32_sem);
+	for (t = ioctl32_hash_table[hash]; t; t = t->next) {
+		if (t->cmd == cmd) {
+			printk(KERN_ERR "Trying to register duplicated ioctl32 "
+					"handler %x\n", cmd);
+			up_write(&ioctl32_sem);
+			kfree(new_t);
+			return -EINVAL; 
+		}
+	}
+	new_t->next = NULL;
+	new_t->cmd = cmd;
+	new_t->handler = handler;
+	ioctl32_insert_translation(new_t);
+
+	up_write(&ioctl32_sem);
+	return 0;
+}
+EXPORT_SYMBOL(register_ioctl32_conversion);
+
+static inline int builtin_ioctl(struct ioctl_trans *t)
+{ 
+	return t >= ioctl_start && t < (ioctl_start + ioctl_table_size);
+} 
+
+/* Problem: 
+   This function cannot unregister duplicate ioctls, because they are not
+   unique.
+   When they happen we need to extend the prototype to pass the handler too. */
+
+int unregister_ioctl32_conversion(unsigned int cmd)
+{
+	unsigned long hash = ioctl32_hash(cmd);
+	struct ioctl_trans *t, *t1;
+
+	down_write(&ioctl32_sem);
+
+	t = ioctl32_hash_table[hash];
+	if (!t) { 
+		up_write(&ioctl32_sem);
+		return -EINVAL;
+	} 
+
+	if (t->cmd == cmd) { 
+		if (builtin_ioctl(t)) {
+			printk("%p tried to unregister builtin ioctl %x\n",
+			       __builtin_return_address(0), cmd);
+		} else { 
+			ioctl32_hash_table[hash] = t->next;
+			up_write(&ioctl32_sem);
+			kfree(t);
+			return 0;
+		}
+	} 
+	while (t->next) {
+		t1 = t->next;
+		if (t1->cmd == cmd) { 
+			if (builtin_ioctl(t1)) {
+				printk("%p tried to unregister builtin "
+					"ioctl %x\n",
+					__builtin_return_address(0), cmd);
+				goto out;
+			} else { 
+				t->next = t1->next;
+				up_write(&ioctl32_sem);
+				kfree(t1);
+				return 0;
+			}
+		}
+		t = t1;
+	}
+	printk(KERN_ERR "Trying to free unknown 32bit ioctl handler %x\n",
+				cmd);
+out:
+	up_write(&ioctl32_sem);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(unregister_ioctl32_conversion); 
+
+static void compat_ioctl_error(struct file *filp, unsigned int fd,
+		unsigned int cmd, unsigned long arg)
+{
+	char buf[10];
+	char *fn = "?";
+	char *path;
+
+	/* find the name of the device. */
+	path = (char *)__get_free_page(GFP_KERNEL);
+	if (path) {
+		fn = d_path(filp->f_dentry, filp->f_vfsmnt, path, PAGE_SIZE);
+		if (IS_ERR(fn))
+			fn = "?";
+	}
+
+	sprintf(buf,"'%c'", (cmd>>24) & 0x3f);
+	if (!isprint(buf[1]))
+		sprintf(buf, "%02x", buf[1]);
+	printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
+			"cmd(%08x){%s} arg(%08x) on %s\n",
+			current->comm, current->pid,
+			(int)fd, (unsigned int)cmd, buf,
+			(unsigned int)arg, fn);
+
+	if (path)
+		free_page((unsigned long)path);
+}
+
+asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+				unsigned long arg)
+{
+	struct file *filp;
+	int error = -EBADF;
+	struct ioctl_trans *t;
+	int fput_needed;
+
+	filp = fget_light(fd, &fput_needed);
+	if (!filp)
+		goto out;
+
+	/* RED-PEN how should LSM module know it's handling 32bit? */
+	error = security_file_ioctl(filp, cmd, arg);
+	if (error)
+		goto out_fput;
+
+	/*
+	 * To allow the compat_ioctl handlers to be self contained
+	 * we need to check the common ioctls here first.
+	 * Just handle them with the standard handlers below.
+	 */
+	switch (cmd) {
+	case FIOCLEX:
+	case FIONCLEX:
+	case FIONBIO:
+	case FIOASYNC:
+	case FIOQSIZE:
+		break;
+
+	case FIBMAP:
+	case FIGETBSZ:
+	case FIONREAD:
+		if (S_ISREG(filp->f_dentry->d_inode->i_mode))
+			break;
+		/*FALL THROUGH*/
+
+	default:
+		if (filp->f_op && filp->f_op->compat_ioctl) {
+			error = filp->f_op->compat_ioctl(filp, cmd, arg);
+			if (error != -ENOIOCTLCMD)
+				goto out_fput;
+		}
+
+		if (!filp->f_op ||
+		    (!filp->f_op->ioctl && !filp->f_op->unlocked_ioctl))
+			goto do_ioctl;
+		break;
+	}
+
+	/* When register_ioctl32_conversion is finally gone remove
+	   this lock! -AK */
+	down_read(&ioctl32_sem);
+	for (t = ioctl32_hash_table[ioctl32_hash(cmd)]; t; t = t->next) {
+		if (t->cmd == cmd)
+			goto found_handler;
+	}
+	up_read(&ioctl32_sem);
+
+	if (S_ISSOCK(filp->f_dentry->d_inode->i_mode) &&
+	    cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
+		error = siocdevprivate_ioctl(fd, cmd, arg);
+	} else {
+		static int count;
+
+		if (++count <= 50)
+			compat_ioctl_error(filp, fd, cmd, arg);
+		error = -EINVAL;
+	}
+
+	goto out_fput;
+
+ found_handler:
+	if (t->handler) {
+		lock_kernel();
+		error = t->handler(fd, cmd, arg, filp);
+		unlock_kernel();
+		up_read(&ioctl32_sem);
+		goto out_fput;
+	}
+
+	up_read(&ioctl32_sem);
+ do_ioctl:
+	error = vfs_ioctl(filp, fd, cmd, arg);
+ out_fput:
+	fput_light(filp, fput_needed);
+ out:
+	return error;
+}
+
+static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
+{
+	if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
+	    __get_user(kfl->l_type, &ufl->l_type) ||
+	    __get_user(kfl->l_whence, &ufl->l_whence) ||
+	    __get_user(kfl->l_start, &ufl->l_start) ||
+	    __get_user(kfl->l_len, &ufl->l_len) ||
+	    __get_user(kfl->l_pid, &ufl->l_pid))
+		return -EFAULT;
+	return 0;
+}
+
+static int put_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
+{
+	if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) ||
+	    __put_user(kfl->l_type, &ufl->l_type) ||
+	    __put_user(kfl->l_whence, &ufl->l_whence) ||
+	    __put_user(kfl->l_start, &ufl->l_start) ||
+	    __put_user(kfl->l_len, &ufl->l_len) ||
+	    __put_user(kfl->l_pid, &ufl->l_pid))
+		return -EFAULT;
+	return 0;
+}
+
+#ifndef HAVE_ARCH_GET_COMPAT_FLOCK64
+static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
+{
+	if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
+	    __get_user(kfl->l_type, &ufl->l_type) ||
+	    __get_user(kfl->l_whence, &ufl->l_whence) ||
+	    __get_user(kfl->l_start, &ufl->l_start) ||
+	    __get_user(kfl->l_len, &ufl->l_len) ||
+	    __get_user(kfl->l_pid, &ufl->l_pid))
+		return -EFAULT;
+	return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_PUT_COMPAT_FLOCK64
+static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
+{
+	if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) ||
+	    __put_user(kfl->l_type, &ufl->l_type) ||
+	    __put_user(kfl->l_whence, &ufl->l_whence) ||
+	    __put_user(kfl->l_start, &ufl->l_start) ||
+	    __put_user(kfl->l_len, &ufl->l_len) ||
+	    __put_user(kfl->l_pid, &ufl->l_pid))
+		return -EFAULT;
+	return 0;
+}
+#endif
+
+asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
+		unsigned long arg)
+{
+	mm_segment_t old_fs;
+	struct flock f;
+	long ret;
+
+	switch (cmd) {
+	case F_GETLK:
+	case F_SETLK:
+	case F_SETLKW:
+		ret = get_compat_flock(&f, compat_ptr(arg));
+		if (ret != 0)
+			break;
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		ret = sys_fcntl(fd, cmd, (unsigned long)&f);
+		set_fs(old_fs);
+		if (cmd == F_GETLK && ret == 0) {
+			if ((f.l_start >= COMPAT_OFF_T_MAX) ||
+			    ((f.l_start + f.l_len) > COMPAT_OFF_T_MAX))
+				ret = -EOVERFLOW;
+			if (ret == 0)
+				ret = put_compat_flock(&f, compat_ptr(arg));
+		}
+		break;
+
+	case F_GETLK64:
+	case F_SETLK64:
+	case F_SETLKW64:
+		ret = get_compat_flock64(&f, compat_ptr(arg));
+		if (ret != 0)
+			break;
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		ret = sys_fcntl(fd, (cmd == F_GETLK64) ? F_GETLK :
+				((cmd == F_SETLK64) ? F_SETLK : F_SETLKW),
+				(unsigned long)&f);
+		set_fs(old_fs);
+		if (cmd == F_GETLK64 && ret == 0) {
+			if ((f.l_start >= COMPAT_LOFF_T_MAX) ||
+			    ((f.l_start + f.l_len) > COMPAT_LOFF_T_MAX))
+				ret = -EOVERFLOW;
+			if (ret == 0)
+				ret = put_compat_flock64(&f, compat_ptr(arg));
+		}
+		break;
+
+	default:
+		ret = sys_fcntl(fd, cmd, arg);
+		break;
+	}
+	return ret;
+}
+
+asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
+		unsigned long arg)
+{
+	if ((cmd == F_GETLK64) || (cmd == F_SETLK64) || (cmd == F_SETLKW64))
+		return -EINVAL;
+	return compat_sys_fcntl64(fd, cmd, arg);
+}
+
+asmlinkage long
+compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
+{
+	long ret;
+	aio_context_t ctx64;
+
+	mm_segment_t oldfs = get_fs();
+	if (unlikely(get_user(ctx64, ctx32p)))
+		return -EFAULT;
+
+	set_fs(KERNEL_DS);
+	/* The __user pointer cast is valid because of the set_fs() */
+	ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
+	set_fs(oldfs);
+	/* truncating is ok because it's a user address */
+	if (!ret)
+		ret = put_user((u32) ctx64, ctx32p);
+	return ret;
+}
+
+asmlinkage long
+compat_sys_io_getevents(aio_context_t ctx_id,
+				 unsigned long min_nr,
+				 unsigned long nr,
+				 struct io_event __user *events,
+				 struct compat_timespec __user *timeout)
+{
+	long ret;
+	struct timespec t;
+	struct timespec __user *ut = NULL;
+
+	ret = -EFAULT;
+	if (unlikely(!access_ok(VERIFY_WRITE, events, 
+				nr * sizeof(struct io_event))))
+		goto out;
+	if (timeout) {
+		if (get_compat_timespec(&t, timeout))
+			goto out;
+
+		ut = compat_alloc_user_space(sizeof(*ut));
+		if (copy_to_user(ut, &t, sizeof(t)) )
+			goto out;
+	} 
+	ret = sys_io_getevents(ctx_id, min_nr, nr, events, ut);
+out:
+	return ret;
+}
+
+static inline long
+copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
+{
+	compat_uptr_t uptr;
+	int i;
+
+	for (i = 0; i < nr; ++i) {
+		if (get_user(uptr, ptr32 + i))
+			return -EFAULT;
+		if (put_user(compat_ptr(uptr), ptr64 + i))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+#define MAX_AIO_SUBMITS 	(PAGE_SIZE/sizeof(struct iocb *))
+
+asmlinkage long
+compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
+{
+	struct iocb __user * __user *iocb64; 
+	long ret;
+
+	if (unlikely(nr < 0))
+		return -EINVAL;
+
+	if (nr > MAX_AIO_SUBMITS)
+		nr = MAX_AIO_SUBMITS;
+	
+	iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
+	ret = copy_iocb(nr, iocb, iocb64);
+	if (!ret)
+		ret = sys_io_submit(ctx_id, nr, iocb64);
+	return ret;
+}
+
+struct compat_ncp_mount_data {
+	compat_int_t version;
+	compat_uint_t ncp_fd;
+	compat_uid_t mounted_uid;
+	compat_pid_t wdog_pid;
+	unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
+	compat_uint_t time_out;
+	compat_uint_t retry_count;
+	compat_uint_t flags;
+	compat_uid_t uid;
+	compat_gid_t gid;
+	compat_mode_t file_mode;
+	compat_mode_t dir_mode;
+};
+
+struct compat_ncp_mount_data_v4 {
+	compat_int_t version;
+	compat_ulong_t flags;
+	compat_ulong_t mounted_uid;
+	compat_long_t wdog_pid;
+	compat_uint_t ncp_fd;
+	compat_uint_t time_out;
+	compat_uint_t retry_count;
+	compat_ulong_t uid;
+	compat_ulong_t gid;
+	compat_ulong_t file_mode;
+	compat_ulong_t dir_mode;
+};
+
+static void *do_ncp_super_data_conv(void *raw_data)
+{
+	int version = *(unsigned int *)raw_data;
+
+	if (version == 3) {
+		struct compat_ncp_mount_data *c_n = raw_data;
+		struct ncp_mount_data *n = raw_data;
+
+		n->dir_mode = c_n->dir_mode;
+		n->file_mode = c_n->file_mode;
+		n->gid = c_n->gid;
+		n->uid = c_n->uid;
+		memmove (n->mounted_vol, c_n->mounted_vol, (sizeof (c_n->mounted_vol) + 3 * sizeof (unsigned int)));
+		n->wdog_pid = c_n->wdog_pid;
+		n->mounted_uid = c_n->mounted_uid;
+	} else if (version == 4) {
+		struct compat_ncp_mount_data_v4 *c_n = raw_data;
+		struct ncp_mount_data_v4 *n = raw_data;
+
+		n->dir_mode = c_n->dir_mode;
+		n->file_mode = c_n->file_mode;
+		n->gid = c_n->gid;
+		n->uid = c_n->uid;
+		n->retry_count = c_n->retry_count;
+		n->time_out = c_n->time_out;
+		n->ncp_fd = c_n->ncp_fd;
+		n->wdog_pid = c_n->wdog_pid;
+		n->mounted_uid = c_n->mounted_uid;
+		n->flags = c_n->flags;
+	} else if (version != 5) {
+		return NULL;
+	}
+
+	return raw_data;
+}
+
+struct compat_smb_mount_data {
+	compat_int_t version;
+	compat_uid_t mounted_uid;
+	compat_uid_t uid;
+	compat_gid_t gid;
+	compat_mode_t file_mode;
+	compat_mode_t dir_mode;
+};
+
+static void *do_smb_super_data_conv(void *raw_data)
+{
+	struct smb_mount_data *s = raw_data;
+	struct compat_smb_mount_data *c_s = raw_data;
+
+	if (c_s->version != SMB_MOUNT_OLDVERSION)
+		goto out;
+	s->dir_mode = c_s->dir_mode;
+	s->file_mode = c_s->file_mode;
+	s->gid = c_s->gid;
+	s->uid = c_s->uid;
+	s->mounted_uid = c_s->mounted_uid;
+ out:
+	return raw_data;
+}
+
+extern int copy_mount_options (const void __user *, unsigned long *);
+
+#define SMBFS_NAME      "smbfs"
+#define NCPFS_NAME      "ncpfs"
+
+asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
+				 char __user * type, unsigned long flags,
+				 void __user * data)
+{
+	unsigned long type_page;
+	unsigned long data_page;
+	unsigned long dev_page;
+	char *dir_page;
+	int retval;
+
+	retval = copy_mount_options (type, &type_page);
+	if (retval < 0)
+		goto out;
+
+	dir_page = getname(dir_name);
+	retval = PTR_ERR(dir_page);
+	if (IS_ERR(dir_page))
+		goto out1;
+
+	retval = copy_mount_options (dev_name, &dev_page);
+	if (retval < 0)
+		goto out2;
+
+	retval = copy_mount_options (data, &data_page);
+	if (retval < 0)
+		goto out3;
+
+	retval = -EINVAL;
+
+	if (type_page) {
+		if (!strcmp((char *)type_page, SMBFS_NAME)) {
+			do_smb_super_data_conv((void *)data_page);
+		} else if (!strcmp((char *)type_page, NCPFS_NAME)) {
+			do_ncp_super_data_conv((void *)data_page);
+		}
+	}
+
+	lock_kernel();
+	retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
+			flags, (void*)data_page);
+	unlock_kernel();
+
+	free_page(data_page);
+ out3:
+	free_page(dev_page);
+ out2:
+	putname(dir_page);
+ out1:
+	free_page(type_page);
+ out:
+	return retval;
+}
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define COMPAT_ROUND_UP(x) (((x)+sizeof(compat_long_t)-1) & \
+				~(sizeof(compat_long_t)-1))
+
+struct compat_old_linux_dirent {
+	compat_ulong_t	d_ino;
+	compat_ulong_t	d_offset;
+	unsigned short	d_namlen;
+	char		d_name[1];
+};
+
+struct compat_readdir_callback {
+	struct compat_old_linux_dirent __user *dirent;
+	int result;
+};
+
+static int compat_fillonedir(void *__buf, const char *name, int namlen,
+			loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct compat_readdir_callback *buf = __buf;
+	struct compat_old_linux_dirent __user *dirent;
+
+	if (buf->result)
+		return -EINVAL;
+	buf->result++;
+	dirent = buf->dirent;
+	if (!access_ok(VERIFY_WRITE, dirent,
+			(unsigned long)(dirent->d_name + namlen + 1) -
+				(unsigned long)dirent))
+		goto efault;
+	if (	__put_user(ino, &dirent->d_ino) ||
+		__put_user(offset, &dirent->d_offset) ||
+		__put_user(namlen, &dirent->d_namlen) ||
+		__copy_to_user(dirent->d_name, name, namlen) ||
+		__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	return 0;
+efault:
+	buf->result = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long compat_sys_old_readdir(unsigned int fd,
+	struct compat_old_linux_dirent __user *dirent, unsigned int count)
+{
+	int error;
+	struct file *file;
+	struct compat_readdir_callback buf;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.result = 0;
+	buf.dirent = dirent;
+
+	error = vfs_readdir(file, compat_fillonedir, &buf);
+	if (error >= 0)
+		error = buf.result;
+
+	fput(file);
+out:
+	return error;
+}
+
+struct compat_linux_dirent {
+	compat_ulong_t	d_ino;
+	compat_ulong_t	d_off;
+	unsigned short	d_reclen;
+	char		d_name[1];
+};
+
+struct compat_getdents_callback {
+	struct compat_linux_dirent __user *current_dir;
+	struct compat_linux_dirent __user *previous;
+	int count;
+	int error;
+};
+
+static int compat_filldir(void *__buf, const char *name, int namlen,
+		loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct compat_linux_dirent __user * dirent;
+	struct compat_getdents_callback *buf = __buf;
+	int reclen = COMPAT_ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+	if (dirent) {
+		if (__put_user(offset, &dirent->d_off))
+			goto efault;
+	}
+	dirent = buf->current_dir;
+	if (__put_user(ino, &dirent->d_ino))
+		goto efault;
+	if (__put_user(reclen, &dirent->d_reclen))
+		goto efault;
+	if (copy_to_user(dirent->d_name, name, namlen))
+		goto efault;
+	if (__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	if (__put_user(d_type, (char  __user *) dirent + reclen - 1))
+		goto efault;
+	buf->previous = dirent;
+	dirent = (void __user *)dirent + reclen;
+	buf->current_dir = dirent;
+	buf->count -= reclen;
+	return 0;
+efault:
+	buf->error = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long compat_sys_getdents(unsigned int fd,
+		struct compat_linux_dirent __user *dirent, unsigned int count)
+{
+	struct file * file;
+	struct compat_linux_dirent __user * lastdirent;
+	struct compat_getdents_callback buf;
+	int error;
+
+	error = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, dirent, count))
+		goto out;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.current_dir = dirent;
+	buf.previous = NULL;
+	buf.count = count;
+	buf.error = 0;
+
+	error = vfs_readdir(file, compat_filldir, &buf);
+	if (error < 0)
+		goto out_putf;
+	error = buf.error;
+	lastdirent = buf.previous;
+	if (lastdirent) {
+		if (put_user(file->f_pos, &lastdirent->d_off))
+			error = -EFAULT;
+		else
+			error = count - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+#ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
+#define COMPAT_ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
+
+struct compat_getdents_callback64 {
+	struct linux_dirent64 __user *current_dir;
+	struct linux_dirent64 __user *previous;
+	int count;
+	int error;
+};
+
+static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+		     ino_t ino, unsigned int d_type)
+{
+	struct linux_dirent64 __user *dirent;
+	struct compat_getdents_callback64 *buf = __buf;
+	int jj = NAME_OFFSET(dirent);
+	int reclen = COMPAT_ROUND_UP64(jj + namlen + 1);
+	u64 off;
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+
+	if (dirent) {
+		if (__put_user_unaligned(offset, &dirent->d_off))
+			goto efault;
+	}
+	dirent = buf->current_dir;
+	if (__put_user_unaligned(ino, &dirent->d_ino))
+		goto efault;
+	off = 0;
+	if (__put_user_unaligned(off, &dirent->d_off))
+		goto efault;
+	if (__put_user(reclen, &dirent->d_reclen))
+		goto efault;
+	if (__put_user(d_type, &dirent->d_type))
+		goto efault;
+	if (copy_to_user(dirent->d_name, name, namlen))
+		goto efault;
+	if (__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	buf->previous = dirent;
+	dirent = (void __user *)dirent + reclen;
+	buf->current_dir = dirent;
+	buf->count -= reclen;
+	return 0;
+efault:
+	buf->error = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long compat_sys_getdents64(unsigned int fd,
+		struct linux_dirent64 __user * dirent, unsigned int count)
+{
+	struct file * file;
+	struct linux_dirent64 __user * lastdirent;
+	struct compat_getdents_callback64 buf;
+	int error;
+
+	error = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, dirent, count))
+		goto out;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.current_dir = dirent;
+	buf.previous = NULL;
+	buf.count = count;
+	buf.error = 0;
+
+	error = vfs_readdir(file, compat_filldir64, &buf);
+	if (error < 0)
+		goto out_putf;
+	error = buf.error;
+	lastdirent = buf.previous;
+	if (lastdirent) {
+		typeof(lastdirent->d_off) d_off = file->f_pos;
+		__put_user_unaligned(d_off, &lastdirent->d_off);
+		error = count - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
+
+static ssize_t compat_do_readv_writev(int type, struct file *file,
+			       const struct compat_iovec __user *uvector,
+			       unsigned long nr_segs, loff_t *pos)
+{
+	typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
+	typedef ssize_t (*iov_fn_t)(struct file *, const struct iovec *, unsigned long, loff_t *);
+
+	compat_ssize_t tot_len;
+	struct iovec iovstack[UIO_FASTIOV];
+	struct iovec *iov=iovstack, *vector;
+	ssize_t ret;
+	int seg;
+	io_fn_t fn;
+	iov_fn_t fnv;
+
+	/*
+	 * SuS says "The readv() function *may* fail if the iovcnt argument
+	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
+	 * traditionally returned zero for zero segments, so...
+	 */
+	ret = 0;
+	if (nr_segs == 0)
+		goto out;
+
+	/*
+	 * First get the "struct iovec" from user memory and
+	 * verify all the pointers
+	 */
+	ret = -EINVAL;
+	if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
+		goto out;
+	if (!file->f_op)
+		goto out;
+	if (nr_segs > UIO_FASTIOV) {
+		ret = -ENOMEM;
+		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+		if (!iov)
+			goto out;
+	}
+	ret = -EFAULT;
+	if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
+		goto out;
+
+	/*
+	 * Single unix specification:
+	 * We should -EINVAL if an element length is not >= 0 and fitting an
+	 * ssize_t.  The total length is fitting an ssize_t
+	 *
+	 * Be careful here because iov_len is a size_t not an ssize_t
+	 */
+	tot_len = 0;
+	vector = iov;
+	ret = -EINVAL;
+	for (seg = 0 ; seg < nr_segs; seg++) {
+		compat_ssize_t tmp = tot_len;
+		compat_ssize_t len;
+		compat_uptr_t buf;
+
+		if (__get_user(len, &uvector->iov_len) ||
+		    __get_user(buf, &uvector->iov_base)) {
+			ret = -EFAULT;
+			goto out;
+		}
+		if (len < 0)	/* size_t not fitting an compat_ssize_t .. */
+			goto out;
+		tot_len += len;
+		if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
+			goto out;
+		vector->iov_base = compat_ptr(buf);
+		vector->iov_len = (compat_size_t) len;
+		uvector++;
+		vector++;
+	}
+	if (tot_len == 0) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = rw_verify_area(type, file, pos, tot_len);
+	if (ret)
+		goto out;
+
+	fnv = NULL;
+	if (type == READ) {
+		fn = file->f_op->read;
+		fnv = file->f_op->readv;
+	} else {
+		fn = (io_fn_t)file->f_op->write;
+		fnv = file->f_op->writev;
+	}
+	if (fnv) {
+		ret = fnv(file, iov, nr_segs, pos);
+		goto out;
+	}
+
+	/* Do it by hand, with file-ops */
+	ret = 0;
+	vector = iov;
+	while (nr_segs > 0) {
+		void __user * base;
+		size_t len;
+		ssize_t nr;
+
+		base = vector->iov_base;
+		len = vector->iov_len;
+		vector++;
+		nr_segs--;
+
+		nr = fn(file, base, len, pos);
+
+		if (nr < 0) {
+			if (!ret) ret = nr;
+			break;
+		}
+		ret += nr;
+		if (nr != len)
+			break;
+	}
+out:
+	if (iov != iovstack)
+		kfree(iov);
+	if ((ret + (type == READ)) > 0)
+		dnotify_parent(file->f_dentry,
+				(type == READ) ? DN_ACCESS : DN_MODIFY);
+	return ret;
+}
+
+asmlinkage ssize_t
+compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+
+	file = fget(fd);
+	if (!file)
+		return -EBADF;
+
+	if (!(file->f_mode & FMODE_READ))
+		goto out;
+
+	ret = -EINVAL;
+	if (!file->f_op || (!file->f_op->readv && !file->f_op->read))
+		goto out;
+
+	ret = compat_do_readv_writev(READ, file, vec, vlen, &file->f_pos);
+
+out:
+	fput(file);
+	return ret;
+}
+
+asmlinkage ssize_t
+compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+
+	file = fget(fd);
+	if (!file)
+		return -EBADF;
+	if (!(file->f_mode & FMODE_WRITE))
+		goto out;
+
+	ret = -EINVAL;
+	if (!file->f_op || (!file->f_op->writev && !file->f_op->write))
+		goto out;
+
+	ret = compat_do_readv_writev(WRITE, file, vec, vlen, &file->f_pos);
+
+out:
+	fput(file);
+	return ret;
+}
+
+/*
+ * compat_count() counts the number of arguments/envelopes. It is basically
+ * a copy of count() from fs/exec.c, except that it works with 32 bit argv
+ * and envp pointers.
+ */
+static int compat_count(compat_uptr_t __user *argv, int max)
+{
+	int i = 0;
+
+	if (argv != NULL) {
+		for (;;) {
+			compat_uptr_t p;
+
+			if (get_user(p, argv))
+				return -EFAULT;
+			if (!p)
+				break;
+			argv++;
+			if(++i > max)
+				return -E2BIG;
+		}
+	}
+	return i;
+}
+
+/*
+ * compat_copy_strings() is basically a copy of copy_strings() from fs/exec.c
+ * except that it works with 32 bit argv and envp pointers.
+ */
+static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
+				struct linux_binprm *bprm)
+{
+	struct page *kmapped_page = NULL;
+	char *kaddr = NULL;
+	int ret;
+
+	while (argc-- > 0) {
+		compat_uptr_t str;
+		int len;
+		unsigned long pos;
+
+		if (get_user(str, argv+argc) ||
+			!(len = strnlen_user(compat_ptr(str), bprm->p))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		if (bprm->p < len)  {
+			ret = -E2BIG;
+			goto out;
+		}
+
+		bprm->p -= len;
+		/* XXX: add architecture specific overflow check here. */
+		pos = bprm->p;
+
+		while (len > 0) {
+			int i, new, err;
+			int offset, bytes_to_copy;
+			struct page *page;
+
+			offset = pos % PAGE_SIZE;
+			i = pos/PAGE_SIZE;
+			page = bprm->page[i];
+			new = 0;
+			if (!page) {
+				page = alloc_page(GFP_HIGHUSER);
+				bprm->page[i] = page;
+				if (!page) {
+					ret = -ENOMEM;
+					goto out;
+				}
+				new = 1;
+			}
+
+			if (page != kmapped_page) {
+				if (kmapped_page)
+					kunmap(kmapped_page);
+				kmapped_page = page;
+				kaddr = kmap(kmapped_page);
+			}
+			if (new && offset)
+				memset(kaddr, 0, offset);
+			bytes_to_copy = PAGE_SIZE - offset;
+			if (bytes_to_copy > len) {
+				bytes_to_copy = len;
+				if (new)
+					memset(kaddr+offset+len, 0,
+						PAGE_SIZE-offset-len);
+			}
+			err = copy_from_user(kaddr+offset, compat_ptr(str),
+						bytes_to_copy);
+			if (err) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			pos += bytes_to_copy;
+			str += bytes_to_copy;
+			len -= bytes_to_copy;
+		}
+	}
+	ret = 0;
+out:
+	if (kmapped_page)
+		kunmap(kmapped_page);
+	return ret;
+}
+
+#ifdef CONFIG_MMU
+
+#define free_arg_pages(bprm) do { } while (0)
+
+#else
+
+static inline void free_arg_pages(struct linux_binprm *bprm)
+{
+	int i;
+
+	for (i = 0; i < MAX_ARG_PAGES; i++) {
+		if (bprm->page[i])
+			__free_page(bprm->page[i]);
+		bprm->page[i] = NULL;
+	}
+}
+
+#endif /* CONFIG_MMU */
+
+/*
+ * compat_do_execve() is mostly a copy of do_execve(), with the exception
+ * that it processes 32 bit argv and envp pointers.
+ */
+int compat_do_execve(char * filename,
+	compat_uptr_t __user *argv,
+	compat_uptr_t __user *envp,
+	struct pt_regs * regs)
+{
+	struct linux_binprm *bprm;
+	struct file *file;
+	int retval;
+	int i;
+
+	retval = -ENOMEM;
+	bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
+	if (!bprm)
+		goto out_ret;
+	memset(bprm, 0, sizeof(*bprm));
+
+	file = open_exec(filename);
+	retval = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out_kfree;
+
+	sched_exec();
+
+	bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+	bprm->file = file;
+	bprm->filename = filename;
+	bprm->interp = filename;
+	bprm->mm = mm_alloc();
+	retval = -ENOMEM;
+	if (!bprm->mm)
+		goto out_file;
+
+	retval = init_new_context(current, bprm->mm);
+	if (retval < 0)
+		goto out_mm;
+
+	bprm->argc = compat_count(argv, bprm->p / sizeof(compat_uptr_t));
+	if ((retval = bprm->argc) < 0)
+		goto out_mm;
+
+	bprm->envc = compat_count(envp, bprm->p / sizeof(compat_uptr_t));
+	if ((retval = bprm->envc) < 0)
+		goto out_mm;
+
+	retval = security_bprm_alloc(bprm);
+	if (retval)
+		goto out;
+
+	retval = prepare_binprm(bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = copy_strings_kernel(1, &bprm->filename, bprm);
+	if (retval < 0)
+		goto out;
+
+	bprm->exec = bprm->p;
+	retval = compat_copy_strings(bprm->envc, envp, bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = compat_copy_strings(bprm->argc, argv, bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = search_binary_handler(bprm, regs);
+	if (retval >= 0) {
+		free_arg_pages(bprm);
+
+		/* execve success */
+		security_bprm_free(bprm);
+		kfree(bprm);
+		return retval;
+	}
+
+out:
+	/* Something went wrong, return the inode and free the argument pages*/
+	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+		struct page * page = bprm->page[i];
+		if (page)
+			__free_page(page);
+	}
+
+	if (bprm->security)
+		security_bprm_free(bprm);
+
+out_mm:
+	if (bprm->mm)
+		mmdrop(bprm->mm);
+
+out_file:
+	if (bprm->file) {
+		allow_write_access(bprm->file);
+		fput(bprm->file);
+	}
+
+out_kfree:
+	kfree(bprm);
+
+out_ret:
+	return retval;
+}
+
+#define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
+
+#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
+
+/*
+ * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
+ * 64-bit unsigned longs.
+ */
+static inline
+int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
+			unsigned long *fdset)
+{
+	nr = ROUND_UP(nr, __COMPAT_NFDBITS);
+	if (ufdset) {
+		unsigned long odd;
+
+		if (!access_ok(VERIFY_WRITE, ufdset, nr*sizeof(compat_ulong_t)))
+			return -EFAULT;
+
+		odd = nr & 1UL;
+		nr &= ~1UL;
+		while (nr) {
+			unsigned long h, l;
+			__get_user(l, ufdset);
+			__get_user(h, ufdset+1);
+			ufdset += 2;
+			*fdset++ = h << 32 | l;
+			nr -= 2;
+		}
+		if (odd)
+			__get_user(*fdset, ufdset);
+	} else {
+		/* Tricky, must clear full unsigned long in the
+		 * kernel fdset at the end, this makes sure that
+		 * actually happens.
+		 */
+		memset(fdset, 0, ((nr + 1) & ~1)*sizeof(compat_ulong_t));
+	}
+	return 0;
+}
+
+static inline
+void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
+			unsigned long *fdset)
+{
+	unsigned long odd;
+	nr = ROUND_UP(nr, __COMPAT_NFDBITS);
+
+	if (!ufdset)
+		return;
+
+	odd = nr & 1UL;
+	nr &= ~1UL;
+	while (nr) {
+		unsigned long h, l;
+		l = *fdset++;
+		h = l >> 32;
+		__put_user(l, ufdset);
+		__put_user(h, ufdset+1);
+		ufdset += 2;
+		nr -= 2;
+	}
+	if (odd)
+		__put_user(*fdset, ufdset);
+}
+
+
+/*
+ * This is a virtual copy of sys_select from fs/select.c and probably
+ * should be compared to it from time to time
+ */
+static void *select_bits_alloc(int size)
+{
+	return kmalloc(6 * size, GFP_KERNEL);
+}
+
+static void select_bits_free(void *bits, int size)
+{
+	kfree(bits);
+}
+
+/*
+ * We can actually return ERESTARTSYS instead of EINTR, but I'd
+ * like to be certain this leads to no problems. So I return
+ * EINTR just for safety.
+ *
+ * Update: ERESTARTSYS breaks at least the xview clock binary, so
+ * I'm trying ERESTARTNOHAND which restart only when you want to.
+ */
+#define MAX_SELECT_SECONDS \
+	((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
+
+asmlinkage long
+compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp,
+		compat_ulong_t __user *exp, struct compat_timeval __user *tvp)
+{
+	fd_set_bits fds;
+	char *bits;
+	long timeout;
+	int size, max_fdset, ret = -EINVAL;
+
+	timeout = MAX_SCHEDULE_TIMEOUT;
+	if (tvp) {
+		time_t sec, usec;
+
+		if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
+		    || __get_user(sec, &tvp->tv_sec)
+		    || __get_user(usec, &tvp->tv_usec)) {
+			ret = -EFAULT;
+			goto out_nofds;
+		}
+
+		if (sec < 0 || usec < 0)
+			goto out_nofds;
+
+		if ((unsigned long) sec < MAX_SELECT_SECONDS) {
+			timeout = ROUND_UP(usec, 1000000/HZ);
+			timeout += sec * (unsigned long) HZ;
+		}
+	}
+
+	if (n < 0)
+		goto out_nofds;
+
+	/* max_fdset can increase, so grab it once to avoid race */
+	max_fdset = current->files->max_fdset;
+	if (n > max_fdset)
+		n = max_fdset;
+
+	/*
+	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
+	 * since we used fdset we need to allocate memory in units of
+	 * long-words.
+	 */
+	ret = -ENOMEM;
+	size = FDS_BYTES(n);
+	bits = select_bits_alloc(size);
+	if (!bits)
+		goto out_nofds;
+	fds.in      = (unsigned long *)  bits;
+	fds.out     = (unsigned long *) (bits +   size);
+	fds.ex      = (unsigned long *) (bits + 2*size);
+	fds.res_in  = (unsigned long *) (bits + 3*size);
+	fds.res_out = (unsigned long *) (bits + 4*size);
+	fds.res_ex  = (unsigned long *) (bits + 5*size);
+
+	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
+	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
+	    (ret = compat_get_fd_set(n, exp, fds.ex)))
+		goto out;
+	zero_fd_set(n, fds.res_in);
+	zero_fd_set(n, fds.res_out);
+	zero_fd_set(n, fds.res_ex);
+
+	ret = do_select(n, &fds, &timeout);
+
+	if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
+		time_t sec = 0, usec = 0;
+		if (timeout) {
+			sec = timeout / HZ;
+			usec = timeout % HZ;
+			usec *= (1000000/HZ);
+		}
+		if (put_user(sec, &tvp->tv_sec) ||
+		    put_user(usec, &tvp->tv_usec))
+			ret = -EFAULT;
+	}
+
+	if (ret < 0)
+		goto out;
+	if (!ret) {
+		ret = -ERESTARTNOHAND;
+		if (signal_pending(current))
+			goto out;
+		ret = 0;
+	}
+
+	compat_set_fd_set(n, inp, fds.res_in);
+	compat_set_fd_set(n, outp, fds.res_out);
+	compat_set_fd_set(n, exp, fds.res_ex);
+
+out:
+	select_bits_free(bits, size);
+out_nofds:
+	return ret;
+}
+
+#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
+/* Stuff for NFS server syscalls... */
+struct compat_nfsctl_svc {
+	u16			svc32_port;
+	s32			svc32_nthreads;
+};
+
+struct compat_nfsctl_client {
+	s8			cl32_ident[NFSCLNT_IDMAX+1];
+	s32			cl32_naddr;
+	struct in_addr		cl32_addrlist[NFSCLNT_ADDRMAX];
+	s32			cl32_fhkeytype;
+	s32			cl32_fhkeylen;
+	u8			cl32_fhkey[NFSCLNT_KEYMAX];
+};
+
+struct compat_nfsctl_export {
+	char		ex32_client[NFSCLNT_IDMAX+1];
+	char		ex32_path[NFS_MAXPATHLEN+1];
+	compat_dev_t	ex32_dev;
+	compat_ino_t	ex32_ino;
+	compat_int_t	ex32_flags;
+	compat_uid_t	ex32_anon_uid;
+	compat_gid_t	ex32_anon_gid;
+};
+
+struct compat_nfsctl_fdparm {
+	struct sockaddr		gd32_addr;
+	s8			gd32_path[NFS_MAXPATHLEN+1];
+	compat_int_t		gd32_version;
+};
+
+struct compat_nfsctl_fsparm {
+	struct sockaddr		gd32_addr;
+	s8			gd32_path[NFS_MAXPATHLEN+1];
+	compat_int_t		gd32_maxlen;
+};
+
+struct compat_nfsctl_arg {
+	compat_int_t		ca32_version;	/* safeguard */
+	union {
+		struct compat_nfsctl_svc	u32_svc;
+		struct compat_nfsctl_client	u32_client;
+		struct compat_nfsctl_export	u32_export;
+		struct compat_nfsctl_fdparm	u32_getfd;
+		struct compat_nfsctl_fsparm	u32_getfs;
+	} u;
+#define ca32_svc	u.u32_svc
+#define ca32_client	u.u32_client
+#define ca32_export	u.u32_export
+#define ca32_getfd	u.u32_getfd
+#define ca32_getfs	u.u32_getfs
+};
+
+union compat_nfsctl_res {
+	__u8			cr32_getfh[NFS_FHSIZE];
+	struct knfsd_fh		cr32_getfs;
+};
+
+static int compat_nfs_svc_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+{
+	int err;
+
+	err = access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc));
+	err |= get_user(karg->ca_version, &arg->ca32_version);
+	err |= __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port);
+	err |= __get_user(karg->ca_svc.svc_nthreads, &arg->ca32_svc.svc32_nthreads);
+	return (err) ? -EFAULT : 0;
+}
+
+static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+{
+	int err;
+
+	err = access_ok(VERIFY_READ, &arg->ca32_client, sizeof(arg->ca32_client));
+	err |= get_user(karg->ca_version, &arg->ca32_version);
+	err |= __copy_from_user(&karg->ca_client.cl_ident[0],
+			  &arg->ca32_client.cl32_ident[0],
+			  NFSCLNT_IDMAX);
+	err |= __get_user(karg->ca_client.cl_naddr, &arg->ca32_client.cl32_naddr);
+	err |= __copy_from_user(&karg->ca_client.cl_addrlist[0],
+			  &arg->ca32_client.cl32_addrlist[0],
+			  (sizeof(struct in_addr) * NFSCLNT_ADDRMAX));
+	err |= __get_user(karg->ca_client.cl_fhkeytype,
+		      &arg->ca32_client.cl32_fhkeytype);
+	err |= __get_user(karg->ca_client.cl_fhkeylen,
+		      &arg->ca32_client.cl32_fhkeylen);
+	err |= __copy_from_user(&karg->ca_client.cl_fhkey[0],
+			  &arg->ca32_client.cl32_fhkey[0],
+			  NFSCLNT_KEYMAX);
+
+	return (err) ? -EFAULT : 0;
+}
+
+static int compat_nfs_exp_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+{
+	int err;
+
+	err = access_ok(VERIFY_READ, &arg->ca32_export, sizeof(arg->ca32_export));
+	err |= get_user(karg->ca_version, &arg->ca32_version);
+	err |= __copy_from_user(&karg->ca_export.ex_client[0],
+			  &arg->ca32_export.ex32_client[0],
+			  NFSCLNT_IDMAX);
+	err |= __copy_from_user(&karg->ca_export.ex_path[0],
+			  &arg->ca32_export.ex32_path[0],
+			  NFS_MAXPATHLEN);
+	err |= __get_user(karg->ca_export.ex_dev,
+		      &arg->ca32_export.ex32_dev);
+	err |= __get_user(karg->ca_export.ex_ino,
+		      &arg->ca32_export.ex32_ino);
+	err |= __get_user(karg->ca_export.ex_flags,
+		      &arg->ca32_export.ex32_flags);
+	err |= __get_user(karg->ca_export.ex_anon_uid,
+		      &arg->ca32_export.ex32_anon_uid);
+	err |= __get_user(karg->ca_export.ex_anon_gid,
+		      &arg->ca32_export.ex32_anon_gid);
+	SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid);
+	SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid);
+
+	return (err) ? -EFAULT : 0;
+}
+
+static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+{
+	int err;
+
+	err = access_ok(VERIFY_READ, &arg->ca32_getfd, sizeof(arg->ca32_getfd));
+	err |= get_user(karg->ca_version, &arg->ca32_version);
+	err |= __copy_from_user(&karg->ca_getfd.gd_addr,
+			  &arg->ca32_getfd.gd32_addr,
+			  (sizeof(struct sockaddr)));
+	err |= __copy_from_user(&karg->ca_getfd.gd_path,
+			  &arg->ca32_getfd.gd32_path,
+			  (NFS_MAXPATHLEN+1));
+	err |= __get_user(karg->ca_getfd.gd_version,
+		      &arg->ca32_getfd.gd32_version);
+
+	return (err) ? -EFAULT : 0;
+}
+
+static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+{
+	int err;
+
+	err = access_ok(VERIFY_READ, &arg->ca32_getfs, sizeof(arg->ca32_getfs));
+	err |= get_user(karg->ca_version, &arg->ca32_version);
+	err |= __copy_from_user(&karg->ca_getfs.gd_addr,
+			  &arg->ca32_getfs.gd32_addr,
+			  (sizeof(struct sockaddr)));
+	err |= __copy_from_user(&karg->ca_getfs.gd_path,
+			  &arg->ca32_getfs.gd32_path,
+			  (NFS_MAXPATHLEN+1));
+	err |= __get_user(karg->ca_getfs.gd_maxlen,
+		      &arg->ca32_getfs.gd32_maxlen);
+
+	return (err) ? -EFAULT : 0;
+}
+
+/* This really doesn't need translations, we are only passing
+ * back a union which contains opaque nfs file handle data.
+ */
+static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsctl_res __user *res)
+{
+	int err;
+
+	err = copy_to_user(res, kres, sizeof(*res));
+
+	return (err) ? -EFAULT : 0;
+}
+
+asmlinkage long compat_sys_nfsservctl(int cmd, struct compat_nfsctl_arg __user *arg,
+					union compat_nfsctl_res __user *res)
+{
+	struct nfsctl_arg *karg;
+	union nfsctl_res *kres;
+	mm_segment_t oldfs;
+	int err;
+
+	karg = kmalloc(sizeof(*karg), GFP_USER);
+	kres = kmalloc(sizeof(*kres), GFP_USER);
+	if(!karg || !kres) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	switch(cmd) {
+	case NFSCTL_SVC:
+		err = compat_nfs_svc_trans(karg, arg);
+		break;
+
+	case NFSCTL_ADDCLIENT:
+		err = compat_nfs_clnt_trans(karg, arg);
+		break;
+
+	case NFSCTL_DELCLIENT:
+		err = compat_nfs_clnt_trans(karg, arg);
+		break;
+
+	case NFSCTL_EXPORT:
+	case NFSCTL_UNEXPORT:
+		err = compat_nfs_exp_trans(karg, arg);
+		break;
+
+	case NFSCTL_GETFD:
+		err = compat_nfs_getfd_trans(karg, arg);
+		break;
+
+	case NFSCTL_GETFS:
+		err = compat_nfs_getfs_trans(karg, arg);
+		break;
+
+	default:
+		err = -EINVAL;
+		goto done;
+	}
+
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	/* The __user pointer casts are valid because of the set_fs() */
+	err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
+	set_fs(oldfs);
+
+	if (err)
+		goto done;
+
+	if((cmd == NFSCTL_GETFD) ||
+	   (cmd == NFSCTL_GETFS))
+		err = compat_nfs_getfh_res_trans(kres, res);
+
+done:
+	kfree(karg);
+	kfree(kres);
+	return err;
+}
+#else /* !NFSD */
+long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
+{
+	return sys_ni_syscall();
+}
+#endif
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
new file mode 100644
index 0000000..155e612
--- /dev/null
+++ b/fs/compat_ioctl.c
@@ -0,0 +1,3082 @@
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ * Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
+ * Copyright (C) 1998  Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 2001,2002  Andi Kleen, SuSE Labs 
+ * Copyright (C) 2003       Pavel Machek (pavel@suse.cz)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#ifdef INCLUDES
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/if.h>
+#include <linux/if_bridge.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/raid/md.h>
+#include <linux/kd.h>
+#include <linux/dirent.h>
+#include <linux/route.h>
+#include <linux/in6.h>
+#include <linux/ipv6_route.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/vt.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fd.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/mtio.h>
+#include <linux/cdrom.h>
+#include <linux/loop.h>
+#include <linux/auto_fs.h>
+#include <linux/auto_fs4.h>
+#include <linux/devfs_fs.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/fb.h>
+#include <linux/ext2_fs.h>
+#include <linux/videodev.h>
+#include <linux/netdevice.h>
+#include <linux/raw.h>
+#include <linux/smb_fs.h>
+#include <linux/blkpg.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/rtc.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/if_tun.h>
+#include <linux/ctype.h>
+#include <linux/ioctl32.h>
+#include <linux/syscalls.h>
+#include <linux/ncp_fs.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+#include <linux/wireless.h>
+#include <linux/atalk.h>
+
+#include <net/sock.h>          /* siocdevprivate_ioctl */
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/rfcomm.h>
+
+#include <linux/capi.h>
+
+#include <scsi/scsi.h>
+/* Ugly hack. */
+#undef __KERNEL__
+#include <scsi/scsi_ioctl.h>
+#define __KERNEL__
+#include <scsi/sg.h>
+
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_bonding.h>
+#include <linux/watchdog.h>
+#include <linux/dm-ioctl.h>
+
+#include <asm/module.h>
+#include <linux/soundcard.h>
+#include <linux/lp.h>
+#include <linux/ppdev.h>
+
+#include <linux/atm.h>
+#include <linux/atmarp.h>
+#include <linux/atmclip.h>
+#include <linux/atmdev.h>
+#include <linux/atmioc.h>
+#include <linux/atmlec.h>
+#include <linux/atmmpc.h>
+#include <linux/atmsvc.h>
+#include <linux/atm_tcp.h>
+#include <linux/sonet.h>
+#include <linux/atm_suni.h>
+#include <linux/mtd/mtd.h>
+
+#include <linux/usb.h>
+#include <linux/usbdevice_fs.h>
+#include <linux/nbd.h>
+#include <linux/random.h>
+#include <linux/filter.h>
+#include <linux/msdos_fs.h>
+#include <linux/pktcdvd.h>
+
+#include <linux/hiddev.h>
+
+#undef INCLUDES
+#endif
+
+#ifdef CODE
+
+/* Aiee. Someone does not find a difference between int and long */
+#define EXT2_IOC32_GETFLAGS               _IOR('f', 1, int)
+#define EXT2_IOC32_SETFLAGS               _IOW('f', 2, int)
+#define EXT2_IOC32_GETVERSION             _IOR('v', 1, int)
+#define EXT2_IOC32_SETVERSION             _IOW('v', 2, int)
+
+static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	int err;
+	unsigned long val;
+	
+	set_fs (KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&val);
+	set_fs (old_fs);
+	if (!err && put_user(val, (u32 __user *)compat_ptr(arg)))
+		return -EFAULT;
+	return err;
+}
+ 
+static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	u32 __user *argptr = compat_ptr(arg);
+	int err;
+	unsigned long val;
+	
+	if(get_user(val, argptr))
+		return -EFAULT;
+	set_fs (KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&val);
+	set_fs (old_fs);
+	if (!err && put_user(val, argptr))
+		return -EFAULT;
+	return err;
+}
+
+static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	/* These are just misnamed, they actually get/put from/to user an int */
+	switch (cmd) {
+	case EXT2_IOC32_GETFLAGS: cmd = EXT2_IOC_GETFLAGS; break;
+	case EXT2_IOC32_SETFLAGS: cmd = EXT2_IOC_SETFLAGS; break;
+	case EXT2_IOC32_GETVERSION: cmd = EXT2_IOC_GETVERSION; break;
+	case EXT2_IOC32_SETVERSION: cmd = EXT2_IOC_SETVERSION; break;
+	}
+	return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
+}
+
+struct video_tuner32 {
+	compat_int_t tuner;
+	char name[32];
+	compat_ulong_t rangelow, rangehigh;
+	u32 flags;	/* It is really u32 in videodev.h */
+	u16 mode, signal;
+};
+
+static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
+{
+	int i;
+
+	if(get_user(kp->tuner, &up->tuner))
+		return -EFAULT;
+	for(i = 0; i < 32; i++)
+		__get_user(kp->name[i], &up->name[i]);
+	__get_user(kp->rangelow, &up->rangelow);
+	__get_user(kp->rangehigh, &up->rangehigh);
+	__get_user(kp->flags, &up->flags);
+	__get_user(kp->mode, &up->mode);
+	__get_user(kp->signal, &up->signal);
+	return 0;
+}
+
+static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
+{
+	int i;
+
+	if(put_user(kp->tuner, &up->tuner))
+		return -EFAULT;
+	for(i = 0; i < 32; i++)
+		__put_user(kp->name[i], &up->name[i]);
+	__put_user(kp->rangelow, &up->rangelow);
+	__put_user(kp->rangehigh, &up->rangehigh);
+	__put_user(kp->flags, &up->flags);
+	__put_user(kp->mode, &up->mode);
+	__put_user(kp->signal, &up->signal);
+	return 0;
+}
+
+struct video_buffer32 {
+	compat_caddr_t base;
+	compat_int_t height, width, depth, bytesperline;
+};
+
+static int get_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
+{
+	u32 tmp;
+
+	if (get_user(tmp, &up->base))
+		return -EFAULT;
+
+	/* This is actually a physical address stored
+	 * as a void pointer.
+	 */
+	kp->base = (void *)(unsigned long) tmp;
+
+	__get_user(kp->height, &up->height);
+	__get_user(kp->width, &up->width);
+	__get_user(kp->depth, &up->depth);
+	__get_user(kp->bytesperline, &up->bytesperline);
+	return 0;
+}
+
+static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
+{
+	u32 tmp = (u32)((unsigned long)kp->base);
+
+	if(put_user(tmp, &up->base))
+		return -EFAULT;
+	__put_user(kp->height, &up->height);
+	__put_user(kp->width, &up->width);
+	__put_user(kp->depth, &up->depth);
+	__put_user(kp->bytesperline, &up->bytesperline);
+	return 0;
+}
+
+struct video_clip32 {
+	s32 x, y, width, height;	/* Its really s32 in videodev.h */
+	compat_caddr_t next;
+};
+
+struct video_window32 {
+	u32 x, y, width, height, chromakey, flags;
+	compat_caddr_t clips;
+	compat_int_t clipcount;
+};
+
+/* You get back everything except the clips... */
+static int put_video_window32(struct video_window *kp, struct video_window32 __user *up)
+{
+	if(put_user(kp->x, &up->x))
+		return -EFAULT;
+	__put_user(kp->y, &up->y);
+	__put_user(kp->width, &up->width);
+	__put_user(kp->height, &up->height);
+	__put_user(kp->chromakey, &up->chromakey);
+	__put_user(kp->flags, &up->flags);
+	__put_user(kp->clipcount, &up->clipcount);
+	return 0;
+}
+
+#define VIDIOCGTUNER32		_IOWR('v',4, struct video_tuner32)
+#define VIDIOCSTUNER32		_IOW('v',5, struct video_tuner32)
+#define VIDIOCGWIN32		_IOR('v',9, struct video_window32)
+#define VIDIOCSWIN32		_IOW('v',10, struct video_window32)
+#define VIDIOCGFBUF32		_IOR('v',11, struct video_buffer32)
+#define VIDIOCSFBUF32		_IOW('v',12, struct video_buffer32)
+#define VIDIOCGFREQ32		_IOR('v',14, u32)
+#define VIDIOCSFREQ32		_IOW('v',15, u32)
+
+enum {
+	MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip)
+};
+
+static int do_set_window(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct video_window32 __user *up = compat_ptr(arg);
+	struct video_window __user *vw;
+	struct video_clip __user *p;
+	int nclips;
+	u32 n;
+
+	if (get_user(nclips, &up->clipcount))
+		return -EFAULT;
+
+	/* Peculiar interface... */
+	if (nclips < 0)
+		nclips = VIDEO_CLIPMAP_SIZE;
+
+	if (nclips > MaxClips)
+		return -ENOMEM;
+
+	vw = compat_alloc_user_space(sizeof(struct video_window) +
+				    nclips * sizeof(struct video_clip));
+
+	p = nclips ? (struct video_clip __user *)(vw + 1) : NULL;
+
+	if (get_user(n, &up->x) || put_user(n, &vw->x) ||
+	    get_user(n, &up->y) || put_user(n, &vw->y) ||
+	    get_user(n, &up->width) || put_user(n, &vw->width) ||
+	    get_user(n, &up->height) || put_user(n, &vw->height) ||
+	    get_user(n, &up->chromakey) || put_user(n, &vw->chromakey) ||
+	    get_user(n, &up->flags) || put_user(n, &vw->flags) ||
+	    get_user(n, &up->clipcount) || put_user(n, &vw->clipcount) ||
+	    get_user(n, &up->clips) || put_user(p, &vw->clips))
+		return -EFAULT;
+
+	if (nclips) {
+		struct video_clip32 __user *u = compat_ptr(n);
+		int i;
+		if (!u)
+			return -EINVAL;
+		for (i = 0; i < nclips; i++, u++, p++) {
+			s32 v;
+			if (get_user(v, &u->x) ||
+			    put_user(v, &p->x) ||
+			    get_user(v, &u->y) ||
+			    put_user(v, &p->y) ||
+			    get_user(v, &u->width) ||
+			    put_user(v, &p->width) ||
+			    get_user(v, &u->height) ||
+			    put_user(v, &p->height) ||
+			    put_user(NULL, &p->next))
+				return -EFAULT;
+		}
+	}
+
+	return sys_ioctl(fd, VIDIOCSWIN, (unsigned long)p);
+}
+
+static int do_video_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	union {
+		struct video_tuner vt;
+		struct video_buffer vb;
+		struct video_window vw;
+		unsigned long vx;
+	} karg;
+	mm_segment_t old_fs = get_fs();
+	void __user *up = compat_ptr(arg);
+	int err = 0;
+
+	/* First, convert the command. */
+	switch(cmd) {
+	case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
+	case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
+	case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
+	case VIDIOCGFBUF32: cmd = VIDIOCGFBUF; break;
+	case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
+	case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
+	case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
+	};
+
+	switch(cmd) {
+	case VIDIOCSTUNER:
+	case VIDIOCGTUNER:
+		err = get_video_tuner32(&karg.vt, up);
+		break;
+
+	case VIDIOCSFBUF:
+		err = get_video_buffer32(&karg.vb, up);
+		break;
+
+	case VIDIOCSFREQ:
+		err = get_user(karg.vx, (u32 __user *)up);
+		break;
+	};
+	if(err)
+		goto out;
+
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&karg);
+	set_fs(old_fs);
+
+	if(err == 0) {
+		switch(cmd) {
+		case VIDIOCGTUNER:
+			err = put_video_tuner32(&karg.vt, up);
+			break;
+
+		case VIDIOCGWIN:
+			err = put_video_window32(&karg.vw, up);
+			break;
+
+		case VIDIOCGFBUF:
+			err = put_video_buffer32(&karg.vb, up);
+			break;
+
+		case VIDIOCGFREQ:
+			err = put_user(((u32)karg.vx), (u32 __user *)up);
+			break;
+		};
+	}
+out:
+	return err;
+}
+
+#ifdef CONFIG_NET
+static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct compat_timeval __user *up = compat_ptr(arg);
+	struct timeval ktv;
+	mm_segment_t old_fs = get_fs();
+	int err;
+
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&ktv);
+	set_fs(old_fs);
+	if(!err) {
+		err = put_user(ktv.tv_sec, &up->tv_sec);
+		err |= __put_user(ktv.tv_usec, &up->tv_usec);
+	}
+	return err;
+}
+
+struct ifmap32 {
+	compat_ulong_t mem_start;
+	compat_ulong_t mem_end;
+	unsigned short base_addr;
+	unsigned char irq;
+	unsigned char dma;
+	unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN     6
+#define IFNAMSIZ        16
+        union {
+                char    ifrn_name[IFNAMSIZ];            /* if name, e.g. "en0" */
+        } ifr_ifrn;
+        union {
+                struct  sockaddr ifru_addr;
+                struct  sockaddr ifru_dstaddr;
+                struct  sockaddr ifru_broadaddr;
+                struct  sockaddr ifru_netmask;
+                struct  sockaddr ifru_hwaddr;
+                short   ifru_flags;
+                compat_int_t     ifru_ivalue;
+                compat_int_t     ifru_mtu;
+                struct  ifmap32 ifru_map;
+                char    ifru_slave[IFNAMSIZ];   /* Just fits the size */
+		char	ifru_newname[IFNAMSIZ];
+                compat_caddr_t ifru_data;
+	    /* XXXX? ifru_settings should be here */
+        } ifr_ifru;
+};
+
+struct ifconf32 {
+        compat_int_t	ifc_len;                        /* size of buffer       */
+        compat_caddr_t  ifcbuf;
+};
+
+static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct net_device *dev;
+	struct ifreq32 ifr32;
+	int err;
+
+	if (copy_from_user(&ifr32, compat_ptr(arg), sizeof(ifr32)))
+		return -EFAULT;
+
+	dev = dev_get_by_index(ifr32.ifr_ifindex);
+	if (!dev)
+		return -ENODEV;
+
+	strlcpy(ifr32.ifr_name, dev->name, sizeof(ifr32.ifr_name));
+	dev_put(dev);
+	
+	err = copy_to_user(compat_ptr(arg), &ifr32, sizeof(ifr32));
+	return (err ? -EFAULT : 0);
+}
+
+static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ifconf32 ifc32;
+	struct ifconf ifc;
+	struct ifconf __user *uifc;
+	struct ifreq32 __user *ifr32;
+	struct ifreq __user *ifr;
+	unsigned int i, j;
+	int err;
+
+	if (copy_from_user(&ifc32, compat_ptr(arg), sizeof(struct ifconf32)))
+		return -EFAULT;
+
+	if (ifc32.ifcbuf == 0) {
+		ifc32.ifc_len = 0;
+		ifc.ifc_len = 0;
+		ifc.ifc_req = NULL;
+		uifc = compat_alloc_user_space(sizeof(struct ifconf));
+	} else {
+		size_t len =((ifc32.ifc_len / sizeof (struct ifreq32)) + 1) *
+			sizeof (struct ifreq);
+		uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
+		ifc.ifc_len = len;
+		ifr = ifc.ifc_req = (void __user *)(uifc + 1);
+		ifr32 = compat_ptr(ifc32.ifcbuf);
+		for (i = 0; i < ifc32.ifc_len; i += sizeof (struct ifreq32)) {
+			if (copy_in_user(ifr, ifr32, sizeof(struct ifreq32)))
+				return -EFAULT;
+			ifr++;
+			ifr32++; 
+		}
+	}
+	if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
+		return -EFAULT;
+
+	err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)uifc);	
+	if (err)
+		return err;
+
+	if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) 
+		return -EFAULT;
+
+	ifr = ifc.ifc_req;
+	ifr32 = compat_ptr(ifc32.ifcbuf);
+	for (i = 0, j = 0; i < ifc32.ifc_len && j < ifc.ifc_len;
+	     i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) {
+		if (copy_in_user(ifr32, ifr, sizeof (struct ifreq32)))
+			return -EFAULT;
+		ifr32++;
+		ifr++;
+	}
+
+	if (ifc32.ifcbuf == 0) {
+		/* Translate from 64-bit structure multiple to
+		 * a 32-bit one.
+		 */
+		i = ifc.ifc_len;
+		i = ((i / sizeof(struct ifreq)) * sizeof(struct ifreq32));
+		ifc32.ifc_len = i;
+	} else {
+		if (i <= ifc32.ifc_len)
+			ifc32.ifc_len = i;
+		else
+			ifc32.ifc_len = i - sizeof (struct ifreq32);
+	}
+	if (copy_to_user(compat_ptr(arg), &ifc32, sizeof(struct ifconf32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ifreq __user *ifr;
+	struct ifreq32 __user *ifr32;
+	u32 data;
+	void __user *datap;
+	
+	ifr = compat_alloc_user_space(sizeof(*ifr));
+	ifr32 = compat_ptr(arg);
+
+	if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+		return -EFAULT;
+
+	if (get_user(data, &ifr32->ifr_ifru.ifru_data))
+		return -EFAULT;
+
+	datap = compat_ptr(data);
+	if (put_user(datap, &ifr->ifr_ifru.ifru_data))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long) ifr);
+}
+
+static int bond_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ifreq kifr;
+	struct ifreq __user *uifr;
+	struct ifreq32 __user *ifr32 = compat_ptr(arg);
+	mm_segment_t old_fs;
+	int err;
+	u32 data;
+	void __user *datap;
+
+	switch (cmd) {
+	case SIOCBONDENSLAVE:
+	case SIOCBONDRELEASE:
+	case SIOCBONDSETHWADDR:
+	case SIOCBONDCHANGEACTIVE:
+		if (copy_from_user(&kifr, ifr32, sizeof(struct ifreq32)))
+			return -EFAULT;
+
+		old_fs = get_fs();
+		set_fs (KERNEL_DS);
+		err = sys_ioctl (fd, cmd, (unsigned long)&kifr);
+		set_fs (old_fs);
+
+		return err;
+	case SIOCBONDSLAVEINFOQUERY:
+	case SIOCBONDINFOQUERY:
+		uifr = compat_alloc_user_space(sizeof(*uifr));
+		if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+			return -EFAULT;
+
+		if (get_user(data, &ifr32->ifr_ifru.ifru_data))
+			return -EFAULT;
+
+		datap = compat_ptr(data);
+		if (put_user(datap, &uifr->ifr_ifru.ifru_data))
+			return -EFAULT;
+
+		return sys_ioctl (fd, cmd, (unsigned long)uifr);
+	default:
+		return -EINVAL;
+	};
+}
+
+int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ifreq __user *u_ifreq64;
+	struct ifreq32 __user *u_ifreq32 = compat_ptr(arg);
+	char tmp_buf[IFNAMSIZ];
+	void __user *data64;
+	u32 data32;
+
+	if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
+			   IFNAMSIZ))
+		return -EFAULT;
+	if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+		return -EFAULT;
+	data64 = compat_ptr(data32);
+
+	u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
+
+	/* Don't check these user accesses, just let that get trapped
+	 * in the ioctl handler instead.
+	 */
+	if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
+			 IFNAMSIZ))
+		return -EFAULT;
+	if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long) u_ifreq64);
+}
+
+static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ifreq ifr;
+	struct ifreq32 __user *uifr32;
+	struct ifmap32 __user *uifmap32;
+	mm_segment_t old_fs;
+	int err;
+	
+	uifr32 = compat_ptr(arg);
+	uifmap32 = &uifr32->ifr_ifru.ifru_map;
+	switch (cmd) {
+	case SIOCSIFMAP:
+		err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
+		err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+		err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+		err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+		err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
+		err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
+		err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
+		if (err)
+			return -EFAULT;
+		break;
+	default:
+		if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
+			return -EFAULT;
+		break;
+	}
+	old_fs = get_fs();
+	set_fs (KERNEL_DS);
+	err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
+	set_fs (old_fs);
+	if (!err) {
+		switch (cmd) {
+		/* TUNSETIFF is defined as _IOW, it should be _IORW
+		 * as the data is copied back to user space, but that
+		 * cannot be fixed without breaking all existing apps.
+		 */
+		case TUNSETIFF:
+		case SIOCGIFFLAGS:
+		case SIOCGIFMETRIC:
+		case SIOCGIFMTU:
+		case SIOCGIFMEM:
+		case SIOCGIFHWADDR:
+		case SIOCGIFINDEX:
+		case SIOCGIFADDR:
+		case SIOCGIFBRDADDR:
+		case SIOCGIFDSTADDR:
+		case SIOCGIFNETMASK:
+		case SIOCGIFTXQLEN:
+			if (copy_to_user(uifr32, &ifr, sizeof(*uifr32)))
+				return -EFAULT;
+			break;
+		case SIOCGIFMAP:
+			err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
+			err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+			err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+			err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+			err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
+			err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
+			err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
+			if (err)
+				err = -EFAULT;
+			break;
+		}
+	}
+	return err;
+}
+
+struct rtentry32 {
+        u32   		rt_pad1;
+        struct sockaddr rt_dst;         /* target address               */
+        struct sockaddr rt_gateway;     /* gateway addr (RTF_GATEWAY)   */
+        struct sockaddr rt_genmask;     /* target network mask (IP)     */
+        unsigned short  rt_flags;
+        short           rt_pad2;
+        u32   		rt_pad3;
+        unsigned char   rt_tos;
+        unsigned char   rt_class;
+        short           rt_pad4;
+        short           rt_metric;      /* +1 for binary compatibility! */
+        /* char * */ u32 rt_dev;        /* forcing the device at add    */
+        u32   		rt_mtu;         /* per route MTU/Window         */
+        u32   		rt_window;      /* Window clamping              */
+        unsigned short  rt_irtt;        /* Initial RTT                  */
+
+};
+
+struct in6_rtmsg32 {
+	struct in6_addr		rtmsg_dst;
+	struct in6_addr		rtmsg_src;
+	struct in6_addr		rtmsg_gateway;
+	u32			rtmsg_type;
+	u16			rtmsg_dst_len;
+	u16			rtmsg_src_len;
+	u32			rtmsg_metric;
+	u32			rtmsg_info;
+	u32			rtmsg_flags;
+	s32			rtmsg_ifindex;
+};
+
+static int routing_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+	void *r = NULL;
+	struct in6_rtmsg r6;
+	struct rtentry r4;
+	char devname[16];
+	u32 rtdev;
+	mm_segment_t old_fs = get_fs();
+	
+	struct socket *mysock = sockfd_lookup(fd, &ret);
+
+	if (mysock && mysock->sk && mysock->sk->sk_family == AF_INET6) { /* ipv6 */
+		struct in6_rtmsg32 __user *ur6 = compat_ptr(arg);
+		ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
+			3 * sizeof(struct in6_addr));
+		ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
+		ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+		ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+		ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
+		ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
+		ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
+		ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+		
+		r = (void *) &r6;
+	} else { /* ipv4 */
+		struct rtentry32 __user *ur4 = compat_ptr(arg);
+		ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
+					3 * sizeof(struct sockaddr));
+		ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
+		ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
+		ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
+		ret |= __get_user (r4.rt_window, &(ur4->rt_window));
+		ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
+		ret |= __get_user (rtdev, &(ur4->rt_dev));
+		if (rtdev) {
+			ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
+			r4.rt_dev = devname; devname[15] = 0;
+		} else
+			r4.rt_dev = NULL;
+
+		r = (void *) &r4;
+	}
+
+	if (ret)
+		return -EFAULT;
+
+	set_fs (KERNEL_DS);
+	ret = sys_ioctl (fd, cmd, (unsigned long) r);
+	set_fs (old_fs);
+
+	if (mysock)
+		sockfd_put(mysock);
+
+	return ret;
+}
+#endif
+
+struct hd_geometry32 {
+	unsigned char heads;
+	unsigned char sectors;
+	unsigned short cylinders;
+	u32 start;
+};
+                        
+static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	struct hd_geometry geo;
+	struct hd_geometry32 __user *ugeo;
+	int err;
+	
+	set_fs (KERNEL_DS);
+	err = sys_ioctl(fd, HDIO_GETGEO, (unsigned long)&geo);
+	set_fs (old_fs);
+	ugeo = compat_ptr(arg);
+	if (!err) {
+		err = copy_to_user (ugeo, &geo, 4);
+		err |= __put_user (geo.start, &ugeo->start);
+	}
+	return err ? -EFAULT : 0;
+}
+
+struct fb_fix_screeninfo32 {
+	char			id[16];
+        compat_caddr_t	smem_start;
+	u32			smem_len;
+	u32			type;
+	u32			type_aux;
+	u32			visual;
+	u16			xpanstep;
+	u16			ypanstep;
+	u16			ywrapstep;
+	u32			line_length;
+        compat_caddr_t	mmio_start;
+	u32			mmio_len;
+	u32			accel;
+	u16			reserved[3];
+};
+
+struct fb_cmap32 {
+	u32			start;
+	u32			len;
+	compat_caddr_t	red;
+	compat_caddr_t	green;
+	compat_caddr_t	blue;
+	compat_caddr_t	transp;
+};
+
+static int fb_getput_cmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct fb_cmap_user __user *cmap;
+	struct fb_cmap32 __user *cmap32;
+	__u32 data;
+	int err;
+
+	cmap = compat_alloc_user_space(sizeof(*cmap));
+	cmap32 = compat_ptr(arg);
+
+	if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+		return -EFAULT;
+
+	if (get_user(data, &cmap32->red) ||
+	    put_user(compat_ptr(data), &cmap->red) ||
+	    get_user(data, &cmap32->green) ||
+	    put_user(compat_ptr(data), &cmap->green) ||
+	    get_user(data, &cmap32->blue) ||
+	    put_user(compat_ptr(data), &cmap->blue) ||
+	    get_user(data, &cmap32->transp) ||
+	    put_user(compat_ptr(data), &cmap->transp))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, cmd, (unsigned long) cmap);
+
+	if (!err) {
+		if (copy_in_user(&cmap32->start,
+				 &cmap->start,
+				 2 * sizeof(__u32)))
+			err = -EFAULT;
+	}
+	return err;
+}
+
+static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
+				  struct fb_fix_screeninfo32 __user *fix32)
+{
+	__u32 data;
+	int err;
+
+	err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
+
+	data = (__u32) (unsigned long) fix->smem_start;
+	err |= put_user(data, &fix32->smem_start);
+
+	err |= put_user(fix->smem_len, &fix32->smem_len);
+	err |= put_user(fix->type, &fix32->type);
+	err |= put_user(fix->type_aux, &fix32->type_aux);
+	err |= put_user(fix->visual, &fix32->visual);
+	err |= put_user(fix->xpanstep, &fix32->xpanstep);
+	err |= put_user(fix->ypanstep, &fix32->ypanstep);
+	err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
+	err |= put_user(fix->line_length, &fix32->line_length);
+
+	data = (__u32) (unsigned long) fix->mmio_start;
+	err |= put_user(data, &fix32->mmio_start);
+
+	err |= put_user(fix->mmio_len, &fix32->mmio_len);
+	err |= put_user(fix->accel, &fix32->accel);
+	err |= copy_to_user(fix32->reserved, fix->reserved,
+			    sizeof(fix->reserved));
+
+	return err;
+}
+
+static int fb_get_fscreeninfo(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs;
+	struct fb_fix_screeninfo fix;
+	struct fb_fix_screeninfo32 __user *fix32;
+	int err;
+
+	fix32 = compat_ptr(arg);
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long) &fix);
+	set_fs(old_fs);
+
+	if (!err)
+		err = do_fscreeninfo_to_user(&fix, fix32);
+
+	return err;
+}
+
+static int fb_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	int err;
+
+	switch (cmd) {
+	case FBIOGET_FSCREENINFO:
+		err = fb_get_fscreeninfo(fd,cmd, arg);
+		break;
+
+  	case FBIOGETCMAP:
+	case FBIOPUTCMAP:
+		err = fb_getput_cmap(fd, cmd, arg);
+		break;
+
+	default:
+		do {
+			static int count;
+			if (++count <= 20)
+				printk("%s: Unknown fb ioctl cmd fd(%d) "
+				       "cmd(%08x) arg(%08lx)\n",
+				       __FUNCTION__, fd, cmd, arg);
+		} while(0);
+		err = -ENOSYS;
+		break;
+	};
+
+	return err;
+}
+
+static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	unsigned long kval;
+	unsigned int __user *uvp;
+	int error;
+
+	set_fs(KERNEL_DS);
+	error = sys_ioctl(fd, cmd, (long)&kval);
+	set_fs(old_fs);
+
+	if(error == 0) {
+		uvp = compat_ptr(arg);
+		if(put_user(kval, uvp))
+			error = -EFAULT;
+	}
+	return error;
+}
+
+
+typedef struct sg_io_hdr32 {
+	compat_int_t interface_id;	/* [i] 'S' for SCSI generic (required) */
+	compat_int_t dxfer_direction;	/* [i] data transfer direction  */
+	unsigned char cmd_len;		/* [i] SCSI command length ( <= 16 bytes) */
+	unsigned char mx_sb_len;		/* [i] max length to write to sbp */
+	unsigned short iovec_count;	/* [i] 0 implies no scatter gather */
+	compat_uint_t dxfer_len;		/* [i] byte count of data transfer */
+	compat_uint_t dxferp;		/* [i], [*io] points to data transfer memory
+					      or scatter gather list */
+	compat_uptr_t cmdp;		/* [i], [*i] points to command to perform */
+	compat_uptr_t sbp;		/* [i], [*o] points to sense_buffer memory */
+	compat_uint_t timeout;		/* [i] MAX_UINT->no timeout (unit: millisec) */
+	compat_uint_t flags;		/* [i] 0 -> default, see SG_FLAG... */
+	compat_int_t pack_id;		/* [i->o] unused internally (normally) */
+	compat_uptr_t usr_ptr;		/* [i->o] unused internally */
+	unsigned char status;		/* [o] scsi status */
+	unsigned char masked_status;	/* [o] shifted, masked scsi status */
+	unsigned char msg_status;		/* [o] messaging level data (optional) */
+	unsigned char sb_len_wr;		/* [o] byte count actually written to sbp */
+	unsigned short host_status;	/* [o] errors from host adapter */
+	unsigned short driver_status;	/* [o] errors from software driver */
+	compat_int_t resid;		/* [o] dxfer_len - actual_transferred */
+	compat_uint_t duration;		/* [o] time taken by cmd (unit: millisec) */
+	compat_uint_t info;		/* [o] auxiliary information */
+} sg_io_hdr32_t;  /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+	compat_uint_t iov_base;
+	compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+	sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+	sg_iovec32_t __user *iov32 = dxferp;
+	int i;
+
+	for (i = 0; i < iovec_count; i++) {
+		u32 base, len;
+
+		if (get_user(base, &iov32[i].iov_base) ||
+		    get_user(len, &iov32[i].iov_len) ||
+		    put_user(compat_ptr(base), &iov[i].iov_base) ||
+		    put_user(len, &iov[i].iov_len))
+			return -EFAULT;
+	}
+
+	if (put_user(iov, &sgio->dxferp))
+		return -EFAULT;
+	return 0;
+}
+
+static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	sg_io_hdr_t __user *sgio;
+	sg_io_hdr32_t __user *sgio32;
+	u16 iovec_count;
+	u32 data;
+	void __user *dxferp;
+	int err;
+
+	sgio32 = compat_ptr(arg);
+	if (get_user(iovec_count, &sgio32->iovec_count))
+		return -EFAULT;
+
+	{
+		void __user *top = compat_alloc_user_space(0);
+		void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+				       (iovec_count * sizeof(sg_iovec_t)));
+		if (new > top)
+			return -EINVAL;
+
+		sgio = new;
+	}
+
+	/* Ok, now construct.  */
+	if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+			 (2 * sizeof(int)) +
+			 (2 * sizeof(unsigned char)) +
+			 (1 * sizeof(unsigned short)) +
+			 (1 * sizeof(unsigned int))))
+		return -EFAULT;
+
+	if (get_user(data, &sgio32->dxferp))
+		return -EFAULT;
+	dxferp = compat_ptr(data);
+	if (iovec_count) {
+		if (sg_build_iovec(sgio, dxferp, iovec_count))
+			return -EFAULT;
+	} else {
+		if (put_user(dxferp, &sgio->dxferp))
+			return -EFAULT;
+	}
+
+	{
+		unsigned char __user *cmdp;
+		unsigned char __user *sbp;
+
+		if (get_user(data, &sgio32->cmdp))
+			return -EFAULT;
+		cmdp = compat_ptr(data);
+
+		if (get_user(data, &sgio32->sbp))
+			return -EFAULT;
+		sbp = compat_ptr(data);
+
+		if (put_user(cmdp, &sgio->cmdp) ||
+		    put_user(sbp, &sgio->sbp))
+			return -EFAULT;
+	}
+
+	if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+			 3 * sizeof(int)))
+		return -EFAULT;
+
+	if (get_user(data, &sgio32->usr_ptr))
+		return -EFAULT;
+	if (put_user(compat_ptr(data), &sgio->usr_ptr))
+		return -EFAULT;
+
+	if (copy_in_user(&sgio->status, &sgio32->status,
+			 (4 * sizeof(unsigned char)) +
+			 (2 * sizeof(unsigned (short))) +
+			 (3 * sizeof(int))))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, cmd, (unsigned long) sgio);
+
+	if (err >= 0) {
+		void __user *datap;
+
+		if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+				 sizeof(int)) ||
+		    get_user(datap, &sgio->usr_ptr) ||
+		    put_user((u32)(unsigned long)datap,
+			     &sgio32->usr_ptr) ||
+		    copy_in_user(&sgio32->status, &sgio->status,
+				 (4 * sizeof(unsigned char)) +
+				 (2 * sizeof(unsigned short)) +
+				 (3 * sizeof(int))))
+			err = -EFAULT;
+	}
+
+	return err;
+}
+
+struct sock_fprog32 {
+	unsigned short	len;
+	compat_caddr_t	filter;
+};
+
+#define PPPIOCSPASS32	_IOW('t', 71, struct sock_fprog32)
+#define PPPIOCSACTIVE32	_IOW('t', 70, struct sock_fprog32)
+
+static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct sock_fprog32 __user *u_fprog32 = compat_ptr(arg);
+	struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
+	void __user *fptr64;
+	u32 fptr32;
+	u16 flen;
+
+	if (get_user(flen, &u_fprog32->len) ||
+	    get_user(fptr32, &u_fprog32->filter))
+		return -EFAULT;
+
+	fptr64 = compat_ptr(fptr32);
+
+	if (put_user(flen, &u_fprog64->len) ||
+	    put_user(fptr64, &u_fprog64->filter))
+		return -EFAULT;
+
+	if (cmd == PPPIOCSPASS32)
+		cmd = PPPIOCSPASS;
+	else
+		cmd = PPPIOCSACTIVE;
+
+	return sys_ioctl(fd, cmd, (unsigned long) u_fprog64);
+}
+
+struct ppp_option_data32 {
+	compat_caddr_t	ptr;
+	u32			length;
+	compat_int_t		transmit;
+};
+#define PPPIOCSCOMPRESS32	_IOW('t', 77, struct ppp_option_data32)
+
+struct ppp_idle32 {
+	compat_time_t xmit_idle;
+	compat_time_t recv_idle;
+};
+#define PPPIOCGIDLE32		_IOR('t', 63, struct ppp_idle32)
+
+static int ppp_gidle(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ppp_idle __user *idle;
+	struct ppp_idle32 __user *idle32;
+	__kernel_time_t xmit, recv;
+	int err;
+
+	idle = compat_alloc_user_space(sizeof(*idle));
+	idle32 = compat_ptr(arg);
+
+	err = sys_ioctl(fd, PPPIOCGIDLE, (unsigned long) idle);
+
+	if (!err) {
+		if (get_user(xmit, &idle->xmit_idle) ||
+		    get_user(recv, &idle->recv_idle) ||
+		    put_user(xmit, &idle32->xmit_idle) ||
+		    put_user(recv, &idle32->recv_idle))
+			err = -EFAULT;
+	}
+	return err;
+}
+
+static int ppp_scompress(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ppp_option_data __user *odata;
+	struct ppp_option_data32 __user *odata32;
+	__u32 data;
+	void __user *datap;
+
+	odata = compat_alloc_user_space(sizeof(*odata));
+	odata32 = compat_ptr(arg);
+
+	if (get_user(data, &odata32->ptr))
+		return -EFAULT;
+
+	datap = compat_ptr(data);
+	if (put_user(datap, &odata->ptr))
+		return -EFAULT;
+
+	if (copy_in_user(&odata->length, &odata32->length,
+			 sizeof(__u32) + sizeof(int)))
+		return -EFAULT;
+
+	return sys_ioctl(fd, PPPIOCSCOMPRESS, (unsigned long) odata);
+}
+
+static int ppp_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	int err;
+
+	switch (cmd) {
+	case PPPIOCGIDLE32:
+		err = ppp_gidle(fd, cmd, arg);
+		break;
+
+	case PPPIOCSCOMPRESS32:
+		err = ppp_scompress(fd, cmd, arg);
+		break;
+
+	default:
+		do {
+			static int count;
+			if (++count <= 20)
+				printk("ppp_ioctl: Unknown cmd fd(%d) "
+				       "cmd(%08x) arg(%08x)\n",
+				       (int)fd, (unsigned int)cmd, (unsigned int)arg);
+		} while(0);
+		err = -EINVAL;
+		break;
+	};
+
+	return err;
+}
+
+
+struct mtget32 {
+	compat_long_t	mt_type;
+	compat_long_t	mt_resid;
+	compat_long_t	mt_dsreg;
+	compat_long_t	mt_gstat;
+	compat_long_t	mt_erreg;
+	compat_daddr_t	mt_fileno;
+	compat_daddr_t	mt_blkno;
+};
+#define MTIOCGET32	_IOR('m', 2, struct mtget32)
+
+struct mtpos32 {
+	compat_long_t	mt_blkno;
+};
+#define MTIOCPOS32	_IOR('m', 3, struct mtpos32)
+
+static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	struct mtget get;
+	struct mtget32 __user *umget32;
+	struct mtpos pos;
+	struct mtpos32 __user *upos32;
+	unsigned long kcmd;
+	void *karg;
+	int err = 0;
+
+	switch(cmd) {
+	case MTIOCPOS32:
+		kcmd = MTIOCPOS;
+		karg = &pos;
+		break;
+	case MTIOCGET32:
+		kcmd = MTIOCGET;
+		karg = &get;
+		break;
+	default:
+		do {
+			static int count;
+			if (++count <= 20)
+				printk("mt_ioctl: Unknown cmd fd(%d) "
+				       "cmd(%08x) arg(%08x)\n",
+				       (int)fd, (unsigned int)cmd, (unsigned int)arg);
+		} while(0);
+		return -EINVAL;
+	}
+	set_fs (KERNEL_DS);
+	err = sys_ioctl (fd, kcmd, (unsigned long)karg);
+	set_fs (old_fs);
+	if (err)
+		return err;
+	switch (cmd) {
+	case MTIOCPOS32:
+		upos32 = compat_ptr(arg);
+		err = __put_user(pos.mt_blkno, &upos32->mt_blkno);
+		break;
+	case MTIOCGET32:
+		umget32 = compat_ptr(arg);
+		err = __put_user(get.mt_type, &umget32->mt_type);
+		err |= __put_user(get.mt_resid, &umget32->mt_resid);
+		err |= __put_user(get.mt_dsreg, &umget32->mt_dsreg);
+		err |= __put_user(get.mt_gstat, &umget32->mt_gstat);
+		err |= __put_user(get.mt_erreg, &umget32->mt_erreg);
+		err |= __put_user(get.mt_fileno, &umget32->mt_fileno);
+		err |= __put_user(get.mt_blkno, &umget32->mt_blkno);
+		break;
+	}
+	return err ? -EFAULT: 0;
+}
+
+struct cdrom_read_audio32 {
+	union cdrom_addr	addr;
+	u8			addr_format;
+	compat_int_t		nframes;
+	compat_caddr_t		buf;
+};
+
+struct cdrom_generic_command32 {
+	unsigned char	cmd[CDROM_PACKET_SIZE];
+	compat_caddr_t	buffer;
+	compat_uint_t	buflen;
+	compat_int_t	stat;
+	compat_caddr_t	sense;
+	unsigned char	data_direction;
+	compat_int_t	quiet;
+	compat_int_t	timeout;
+	compat_caddr_t	reserved[1];
+};
+  
+static int cdrom_do_read_audio(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct cdrom_read_audio __user *cdread_audio;
+	struct cdrom_read_audio32 __user *cdread_audio32;
+	__u32 data;
+	void __user *datap;
+
+	cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
+	cdread_audio32 = compat_ptr(arg);
+
+	if (copy_in_user(&cdread_audio->addr,
+			 &cdread_audio32->addr,
+			 (sizeof(*cdread_audio32) -
+			  sizeof(compat_caddr_t))))
+	 	return -EFAULT;
+
+	if (get_user(data, &cdread_audio32->buf))
+		return -EFAULT;
+	datap = compat_ptr(data);
+	if (put_user(datap, &cdread_audio->buf))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long) cdread_audio);
+}
+
+static int cdrom_do_generic_command(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct cdrom_generic_command __user *cgc;
+	struct cdrom_generic_command32 __user *cgc32;
+	u32 data;
+	unsigned char dir;
+	int itmp;
+
+	cgc = compat_alloc_user_space(sizeof(*cgc));
+	cgc32 = compat_ptr(arg);
+
+	if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
+	    get_user(data, &cgc32->buffer) ||
+	    put_user(compat_ptr(data), &cgc->buffer) ||
+	    copy_in_user(&cgc->buflen, &cgc32->buflen,
+			 (sizeof(unsigned int) + sizeof(int))) ||
+	    get_user(data, &cgc32->sense) ||
+	    put_user(compat_ptr(data), &cgc->sense) ||
+	    get_user(dir, &cgc32->data_direction) ||
+	    put_user(dir, &cgc->data_direction) ||
+	    get_user(itmp, &cgc32->quiet) ||
+	    put_user(itmp, &cgc->quiet) ||
+	    get_user(itmp, &cgc32->timeout) ||
+	    put_user(itmp, &cgc->timeout) ||
+	    get_user(data, &cgc32->reserved[0]) ||
+	    put_user(compat_ptr(data), &cgc->reserved[0]))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long) cgc);
+}
+
+static int cdrom_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	int err;
+
+	switch(cmd) {
+	case CDROMREADAUDIO:
+		err = cdrom_do_read_audio(fd, cmd, arg);
+		break;
+
+	case CDROM_SEND_PACKET:
+		err = cdrom_do_generic_command(fd, cmd, arg);
+		break;
+
+	default:
+		do {
+			static int count;
+			if (++count <= 20)
+				printk("cdrom_ioctl: Unknown cmd fd(%d) "
+				       "cmd(%08x) arg(%08x)\n",
+				       (int)fd, (unsigned int)cmd, (unsigned int)arg);
+		} while(0);
+		err = -EINVAL;
+		break;
+	};
+
+	return err;
+}
+
+struct loop_info32 {
+	compat_int_t	lo_number;      /* ioctl r/o */
+	compat_dev_t	lo_device;      /* ioctl r/o */
+	compat_ulong_t	lo_inode;       /* ioctl r/o */
+	compat_dev_t	lo_rdevice;     /* ioctl r/o */
+	compat_int_t	lo_offset;
+	compat_int_t	lo_encrypt_type;
+	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
+	compat_int_t	lo_flags;       /* ioctl r/o */
+	char		lo_name[LO_NAME_SIZE];
+	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
+	compat_ulong_t	lo_init[2];
+	char		reserved[4];
+};
+
+static int loop_status(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	struct loop_info l;
+	struct loop_info32 __user *ul;
+	int err = -EINVAL;
+
+	ul = compat_ptr(arg);
+	switch(cmd) {
+	case LOOP_SET_STATUS:
+		err = get_user(l.lo_number, &ul->lo_number);
+		err |= __get_user(l.lo_device, &ul->lo_device);
+		err |= __get_user(l.lo_inode, &ul->lo_inode);
+		err |= __get_user(l.lo_rdevice, &ul->lo_rdevice);
+		err |= __copy_from_user(&l.lo_offset, &ul->lo_offset,
+		        8 + (unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+		if (err) {
+			err = -EFAULT;
+		} else {
+			set_fs (KERNEL_DS);
+			err = sys_ioctl (fd, cmd, (unsigned long)&l);
+			set_fs (old_fs);
+		}
+		break;
+	case LOOP_GET_STATUS:
+		set_fs (KERNEL_DS);
+		err = sys_ioctl (fd, cmd, (unsigned long)&l);
+		set_fs (old_fs);
+		if (!err) {
+			err = put_user(l.lo_number, &ul->lo_number);
+			err |= __put_user(l.lo_device, &ul->lo_device);
+			err |= __put_user(l.lo_inode, &ul->lo_inode);
+			err |= __put_user(l.lo_rdevice, &ul->lo_rdevice);
+			err |= __copy_to_user(&ul->lo_offset, &l.lo_offset,
+				(unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+			if (err)
+				err = -EFAULT;
+		}
+		break;
+	default: {
+		static int count;
+		if (++count <= 20)
+			printk("%s: Unknown loop ioctl cmd, fd(%d) "
+			       "cmd(%08x) arg(%08lx)\n",
+			       __FUNCTION__, fd, cmd, arg);
+	}
+	}
+	return err;
+}
+
+extern int tty_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_VT
+
+static int vt_check(struct file *file)
+{
+	struct tty_struct *tty;
+	struct inode *inode = file->f_dentry->d_inode;
+	
+	if (file->f_op->ioctl != tty_ioctl)
+		return -EINVAL;
+	                
+	tty = (struct tty_struct *)file->private_data;
+	if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+		return -EINVAL;
+	                                                
+	if (tty->driver->ioctl != vt_ioctl)
+		return -EINVAL;
+	
+	/*
+	 * To have permissions to do most of the vt ioctls, we either have
+	 * to be the owner of the tty, or super-user.
+	 */
+	if (current->signal->tty == tty || capable(CAP_SYS_ADMIN))
+		return 1;
+	return 0;                                                    
+}
+
+struct consolefontdesc32 {
+	unsigned short charcount;       /* characters in font (256 or 512) */
+	unsigned short charheight;      /* scan lines per character (1-32) */
+	compat_caddr_t chardata;	/* font data in expanded form */
+};
+
+static int do_fontx_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
+{
+	struct consolefontdesc32 __user *user_cfd = compat_ptr(arg);
+	struct console_font_op op;
+	compat_caddr_t data;
+	int i, perm;
+
+	perm = vt_check(file);
+	if (perm < 0) return perm;
+	
+	switch (cmd) {
+	case PIO_FONTX:
+		if (!perm)
+			return -EPERM;
+		op.op = KD_FONT_OP_SET;
+		op.flags = 0;
+		op.width = 8;
+		if (get_user(op.height, &user_cfd->charheight) ||
+		    get_user(op.charcount, &user_cfd->charcount) ||
+		    get_user(data, &user_cfd->chardata))
+			return -EFAULT;
+		op.data = compat_ptr(data);
+		return con_font_op(vc_cons[fg_console].d, &op);
+	case GIO_FONTX:
+		op.op = KD_FONT_OP_GET;
+		op.flags = 0;
+		op.width = 8;
+		if (get_user(op.height, &user_cfd->charheight) ||
+		    get_user(op.charcount, &user_cfd->charcount) ||
+		    get_user(data, &user_cfd->chardata))
+			return -EFAULT;
+		if (!data)
+			return 0;
+		op.data = compat_ptr(data);
+		i = con_font_op(vc_cons[fg_console].d, &op);
+		if (i)
+			return i;
+		if (put_user(op.height, &user_cfd->charheight) ||
+		    put_user(op.charcount, &user_cfd->charcount) ||
+		    put_user((compat_caddr_t)(unsigned long)op.data,
+				&user_cfd->chardata))
+			return -EFAULT;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+struct console_font_op32 {
+	compat_uint_t op;        /* operation code KD_FONT_OP_* */
+	compat_uint_t flags;     /* KD_FONT_FLAG_* */
+	compat_uint_t width, height;     /* font size */
+	compat_uint_t charcount;
+	compat_caddr_t data;    /* font data with height fixed to 32 */
+};
+                                        
+static int do_kdfontop_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
+{
+	struct console_font_op op;
+	struct console_font_op32 __user *fontop = compat_ptr(arg);
+	int perm = vt_check(file), i;
+	struct vc_data *vc;
+	
+	if (perm < 0) return perm;
+	
+	if (copy_from_user(&op, fontop, sizeof(struct console_font_op32)))
+		return -EFAULT;
+	if (!perm && op.op != KD_FONT_OP_GET)
+		return -EPERM;
+	op.data = compat_ptr(((struct console_font_op32 *)&op)->data);
+	op.flags |= KD_FONT_FLAG_OLD;
+	vc = ((struct tty_struct *)file->private_data)->driver_data;
+	i = con_font_op(vc, &op);
+	if (i)
+		return i;
+	((struct console_font_op32 *)&op)->data = (unsigned long)op.data;
+	if (copy_to_user(fontop, &op, sizeof(struct console_font_op32)))
+		return -EFAULT;
+	return 0;
+}
+
+struct unimapdesc32 {
+	unsigned short entry_ct;
+	compat_caddr_t entries;
+};
+
+static int do_unimap_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
+{
+	struct unimapdesc32 tmp;
+	struct unimapdesc32 __user *user_ud = compat_ptr(arg);
+	int perm = vt_check(file);
+	
+	if (perm < 0) return perm;
+	if (copy_from_user(&tmp, user_ud, sizeof tmp))
+		return -EFAULT;
+	switch (cmd) {
+	case PIO_UNIMAP:
+		if (!perm) return -EPERM;
+		return con_set_unimap(vc_cons[fg_console].d, tmp.entry_ct, compat_ptr(tmp.entries));
+	case GIO_UNIMAP:
+		return con_get_unimap(vc_cons[fg_console].d, tmp.entry_ct, &(user_ud->entry_ct), compat_ptr(tmp.entries));
+	}
+	return 0;
+}
+
+#endif /* CONFIG_VT */
+
+static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	__kernel_uid_t kuid;
+	int err;
+
+	cmd = SMB_IOC_GETMOUNTUID;
+
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&kuid);
+	set_fs(old_fs);
+
+	if (err >= 0)
+		err = put_user(kuid, (compat_uid_t __user *)compat_ptr(arg));
+
+	return err;
+}
+
+struct atmif_sioc32 {
+        compat_int_t	number;
+        compat_int_t	length;
+        compat_caddr_t	arg;
+};
+
+struct atm_iobuf32 {
+	compat_int_t	length;
+	compat_caddr_t	buffer;
+};
+
+#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32)
+#define ATM_GETNAMES32    _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32)
+#define ATM_GETTYPE32     _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32)
+#define ATM_GETESI32	  _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32)
+#define ATM_GETADDR32	  _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32)
+#define ATM_RSTADDR32	  _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32)
+#define ATM_ADDADDR32	  _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32)
+#define ATM_DELADDR32	  _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32)
+#define ATM_GETCIRANGE32  _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32)
+#define ATM_SETCIRANGE32  _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32)
+#define ATM_SETESI32      _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32)
+#define ATM_SETESIF32     _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32)
+#define ATM_GETSTAT32     _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32)
+#define ATM_GETSTATZ32    _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32)
+#define ATM_GETLOOP32	  _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32)
+#define ATM_SETLOOP32	  _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32)
+#define ATM_QUERYLOOP32	  _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32)
+
+static struct {
+        unsigned int cmd32;
+        unsigned int cmd;
+} atm_ioctl_map[] = {
+        { ATM_GETLINKRATE32, ATM_GETLINKRATE },
+	{ ATM_GETNAMES32,    ATM_GETNAMES },
+        { ATM_GETTYPE32,     ATM_GETTYPE },
+        { ATM_GETESI32,      ATM_GETESI },
+        { ATM_GETADDR32,     ATM_GETADDR },
+        { ATM_RSTADDR32,     ATM_RSTADDR },
+        { ATM_ADDADDR32,     ATM_ADDADDR },
+        { ATM_DELADDR32,     ATM_DELADDR },
+        { ATM_GETCIRANGE32,  ATM_GETCIRANGE },
+	{ ATM_SETCIRANGE32,  ATM_SETCIRANGE },
+	{ ATM_SETESI32,      ATM_SETESI },
+	{ ATM_SETESIF32,     ATM_SETESIF },
+	{ ATM_GETSTAT32,     ATM_GETSTAT },
+	{ ATM_GETSTATZ32,    ATM_GETSTATZ },
+	{ ATM_GETLOOP32,     ATM_GETLOOP },
+	{ ATM_SETLOOP32,     ATM_SETLOOP },
+	{ ATM_QUERYLOOP32,   ATM_QUERYLOOP }
+};
+
+#define NR_ATM_IOCTL (sizeof(atm_ioctl_map)/sizeof(atm_ioctl_map[0]))
+
+
+static int do_atm_iobuf(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct atm_iobuf   __user *iobuf;
+	struct atm_iobuf32 __user *iobuf32;
+	u32 data;
+	void __user *datap;
+	int len, err;
+
+	iobuf = compat_alloc_user_space(sizeof(*iobuf));
+	iobuf32 = compat_ptr(arg);
+
+	if (get_user(len, &iobuf32->length) ||
+	    get_user(data, &iobuf32->buffer))
+		return -EFAULT;
+	datap = compat_ptr(data);
+	if (put_user(len, &iobuf->length) ||
+	    put_user(datap, &iobuf->buffer))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, cmd, (unsigned long)iobuf);
+
+	if (!err) {
+		if (copy_in_user(&iobuf32->length, &iobuf->length,
+				 sizeof(int)))
+			err = -EFAULT;
+	}
+
+	return err;
+}
+
+static int do_atmif_sioc(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+        struct atmif_sioc   __user *sioc;
+	struct atmif_sioc32 __user *sioc32;
+	u32 data;
+	void __user *datap;
+	int err;
+        
+	sioc = compat_alloc_user_space(sizeof(*sioc));
+	sioc32 = compat_ptr(arg);
+
+	if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
+	    get_user(data, &sioc32->arg))
+		return -EFAULT;
+	datap = compat_ptr(data);
+	if (put_user(datap, &sioc->arg))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, cmd, (unsigned long) sioc);
+
+	if (!err) {
+		if (copy_in_user(&sioc32->length, &sioc->length,
+				 sizeof(int)))
+			err = -EFAULT;
+	}
+	return err;
+}
+
+static int do_atm_ioctl(unsigned int fd, unsigned int cmd32, unsigned long arg)
+{
+        int i;
+        unsigned int cmd = 0;
+        
+	switch (cmd32) {
+	case SONET_GETSTAT:
+	case SONET_GETSTATZ:
+	case SONET_GETDIAG:
+	case SONET_SETDIAG:
+	case SONET_CLRDIAG:
+	case SONET_SETFRAMING:
+	case SONET_GETFRAMING:
+	case SONET_GETFRSENSE:
+		return do_atmif_sioc(fd, cmd32, arg);
+	}
+
+	for (i = 0; i < NR_ATM_IOCTL; i++) {
+		if (cmd32 == atm_ioctl_map[i].cmd32) {
+			cmd = atm_ioctl_map[i].cmd;
+			break;
+		}
+	}
+	if (i == NR_ATM_IOCTL)
+	        return -EINVAL;
+        
+        switch (cmd) {
+	case ATM_GETNAMES:
+		return do_atm_iobuf(fd, cmd, arg);
+	    
+	case ATM_GETLINKRATE:
+        case ATM_GETTYPE:
+        case ATM_GETESI:
+        case ATM_GETADDR:
+        case ATM_RSTADDR:
+        case ATM_ADDADDR:
+        case ATM_DELADDR:
+        case ATM_GETCIRANGE:
+	case ATM_SETCIRANGE:
+	case ATM_SETESI:
+	case ATM_SETESIF:
+	case ATM_GETSTAT:
+	case ATM_GETSTATZ:
+	case ATM_GETLOOP:
+	case ATM_SETLOOP:
+	case ATM_QUERYLOOP:
+                return do_atmif_sioc(fd, cmd, arg);
+        }
+
+        return -EINVAL;
+}
+
+static __attribute_used__ int 
+ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	return -EINVAL;
+}
+
+static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	/* The mkswap binary hard codes it to Intel value :-((( */
+	return w_long(fd, BLKGETSIZE, arg);
+}
+
+struct blkpg_ioctl_arg32 {
+	compat_int_t op;
+	compat_int_t flags;
+	compat_int_t datalen;
+	compat_caddr_t data;
+};
+
+static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct blkpg_ioctl_arg32 __user *ua32 = compat_ptr(arg);
+	struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
+	compat_caddr_t udata;
+	compat_int_t n;
+	int err;
+	
+	err = get_user(n, &ua32->op);
+	err |= put_user(n, &a->op);
+	err |= get_user(n, &ua32->flags);
+	err |= put_user(n, &a->flags);
+	err |= get_user(n, &ua32->datalen);
+	err |= put_user(n, &a->datalen);
+	err |= get_user(udata, &ua32->data);
+	err |= put_user(compat_ptr(udata), &a->data);
+	if (err)
+		return err;
+
+	return sys_ioctl(fd, cmd, (unsigned long)a);
+}
+
+static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
+}
+
+/* Fix sizeof(sizeof()) breakage */
+#define BLKBSZGET_32   _IOR(0x12,112,int)
+#define BLKBSZSET_32   _IOW(0x12,113,int)
+#define BLKGETSIZE64_32        _IOR(0x12,114,int)
+
+static int do_blkbszget(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+       return sys_ioctl(fd, BLKBSZGET, (unsigned long)compat_ptr(arg));
+}
+
+static int do_blkbszset(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+       return sys_ioctl(fd, BLKBSZSET, (unsigned long)compat_ptr(arg));
+}
+
+static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
+                          unsigned long arg)
+{
+       return sys_ioctl(fd, BLKGETSIZE64, (unsigned long)compat_ptr(arg));
+}
+
+/* Bluetooth ioctls */
+#define HCIUARTSETPROTO	_IOW('U', 200, int)
+#define HCIUARTGETPROTO	_IOR('U', 201, int)
+
+#define BNEPCONNADD	_IOW('B', 200, int)
+#define BNEPCONNDEL	_IOW('B', 201, int)
+#define BNEPGETCONNLIST	_IOR('B', 210, int)
+#define BNEPGETCONNINFO	_IOR('B', 211, int)
+
+#define CMTPCONNADD	_IOW('C', 200, int)
+#define CMTPCONNDEL	_IOW('C', 201, int)
+#define CMTPGETCONNLIST	_IOR('C', 210, int)
+#define CMTPGETCONNINFO	_IOR('C', 211, int)
+
+#define HIDPCONNADD	_IOW('H', 200, int)
+#define HIDPCONNDEL	_IOW('H', 201, int)
+#define HIDPGETCONNLIST	_IOR('H', 210, int)
+#define HIDPGETCONNINFO	_IOR('H', 211, int)
+
+struct floppy_struct32 {
+	compat_uint_t	size;
+	compat_uint_t	sect;
+	compat_uint_t	head;
+	compat_uint_t	track;
+	compat_uint_t	stretch;
+	unsigned char	gap;
+	unsigned char	rate;
+	unsigned char	spec1;
+	unsigned char	fmt_gap;
+	const compat_caddr_t name;
+};
+
+struct floppy_drive_params32 {
+	char		cmos;
+	compat_ulong_t	max_dtr;
+	compat_ulong_t	hlt;
+	compat_ulong_t	hut;
+	compat_ulong_t	srt;
+	compat_ulong_t	spinup;
+	compat_ulong_t	spindown;
+	unsigned char	spindown_offset;
+	unsigned char	select_delay;
+	unsigned char	rps;
+	unsigned char	tracks;
+	compat_ulong_t	timeout;
+	unsigned char	interleave_sect;
+	struct floppy_max_errors max_errors;
+	char		flags;
+	char		read_track;
+	short		autodetect[8];
+	compat_int_t	checkfreq;
+	compat_int_t	native_format;
+};
+
+struct floppy_drive_struct32 {
+	signed char	flags;
+	compat_ulong_t	spinup_date;
+	compat_ulong_t	select_date;
+	compat_ulong_t	first_read_date;
+	short		probed_format;
+	short		track;
+	short		maxblock;
+	short		maxtrack;
+	compat_int_t	generation;
+	compat_int_t	keep_data;
+	compat_int_t	fd_ref;
+	compat_int_t	fd_device;
+	compat_int_t	last_checked;
+	compat_caddr_t dmabuf;
+	compat_int_t	bufblocks;
+};
+
+struct floppy_fdc_state32 {
+	compat_int_t	spec1;
+	compat_int_t	spec2;
+	compat_int_t	dtr;
+	unsigned char	version;
+	unsigned char	dor;
+	compat_ulong_t	address;
+	unsigned int	rawcmd:2;
+	unsigned int	reset:1;
+	unsigned int	need_configure:1;
+	unsigned int	perp_mode:2;
+	unsigned int	has_fifo:1;
+	unsigned int	driver_version;
+	unsigned char	track[4];
+};
+
+struct floppy_write_errors32 {
+	unsigned int	write_errors;
+	compat_ulong_t	first_error_sector;
+	compat_int_t	first_error_generation;
+	compat_ulong_t	last_error_sector;
+	compat_int_t	last_error_generation;
+	compat_uint_t	badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
+#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
+#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct floppy_drive_struct32)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct floppy_drive_struct32)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct floppy_fdc_state32)
+#define FDWERRORGET32  _IOR(2, 0x17, struct floppy_write_errors32)
+
+static struct {
+	unsigned int	cmd32;
+	unsigned int	cmd;
+} fd_ioctl_trans_table[] = {
+	{ FDSETPRM32, FDSETPRM },
+	{ FDDEFPRM32, FDDEFPRM },
+	{ FDGETPRM32, FDGETPRM },
+	{ FDSETDRVPRM32, FDSETDRVPRM },
+	{ FDGETDRVPRM32, FDGETDRVPRM },
+	{ FDGETDRVSTAT32, FDGETDRVSTAT },
+	{ FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
+	{ FDGETFDCSTAT32, FDGETFDCSTAT },
+	{ FDWERRORGET32, FDWERRORGET }
+};
+
+#define NR_FD_IOCTL_TRANS (sizeof(fd_ioctl_trans_table)/sizeof(fd_ioctl_trans_table[0]))
+
+static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	void *karg = NULL;
+	unsigned int kcmd = 0;
+	int i, err;
+
+	for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
+		if (cmd == fd_ioctl_trans_table[i].cmd32) {
+			kcmd = fd_ioctl_trans_table[i].cmd;
+			break;
+		}
+	if (!kcmd)
+		return -EINVAL;
+
+	switch (cmd) {
+		case FDSETPRM32:
+		case FDDEFPRM32:
+		case FDGETPRM32:
+		{
+			compat_uptr_t name;
+			struct floppy_struct32 __user *uf;
+			struct floppy_struct *f;
+
+			uf = compat_ptr(arg);
+			f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
+			if (!karg)
+				return -ENOMEM;
+			if (cmd == FDGETPRM32)
+				break;
+			err = __get_user(f->size, &uf->size);
+			err |= __get_user(f->sect, &uf->sect);
+			err |= __get_user(f->head, &uf->head);
+			err |= __get_user(f->track, &uf->track);
+			err |= __get_user(f->stretch, &uf->stretch);
+			err |= __get_user(f->gap, &uf->gap);
+			err |= __get_user(f->rate, &uf->rate);
+			err |= __get_user(f->spec1, &uf->spec1);
+			err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+			err |= __get_user(name, &uf->name);
+			f->name = compat_ptr(name);
+			if (err) {
+				err = -EFAULT;
+				goto out;
+			}
+			break;
+		}
+		case FDSETDRVPRM32:
+		case FDGETDRVPRM32:
+		{
+			struct floppy_drive_params32 __user *uf;
+			struct floppy_drive_params *f;
+
+			uf = compat_ptr(arg);
+			f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
+			if (!karg)
+				return -ENOMEM;
+			if (cmd == FDGETDRVPRM32)
+				break;
+			err = __get_user(f->cmos, &uf->cmos);
+			err |= __get_user(f->max_dtr, &uf->max_dtr);
+			err |= __get_user(f->hlt, &uf->hlt);
+			err |= __get_user(f->hut, &uf->hut);
+			err |= __get_user(f->srt, &uf->srt);
+			err |= __get_user(f->spinup, &uf->spinup);
+			err |= __get_user(f->spindown, &uf->spindown);
+			err |= __get_user(f->spindown_offset, &uf->spindown_offset);
+			err |= __get_user(f->select_delay, &uf->select_delay);
+			err |= __get_user(f->rps, &uf->rps);
+			err |= __get_user(f->tracks, &uf->tracks);
+			err |= __get_user(f->timeout, &uf->timeout);
+			err |= __get_user(f->interleave_sect, &uf->interleave_sect);
+			err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
+			err |= __get_user(f->flags, &uf->flags);
+			err |= __get_user(f->read_track, &uf->read_track);
+			err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
+			err |= __get_user(f->checkfreq, &uf->checkfreq);
+			err |= __get_user(f->native_format, &uf->native_format);
+			if (err) {
+				err = -EFAULT;
+				goto out;
+			}
+			break;
+		}
+		case FDGETDRVSTAT32:
+		case FDPOLLDRVSTAT32:
+			karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
+			if (!karg)
+				return -ENOMEM;
+			break;
+		case FDGETFDCSTAT32:
+			karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
+			if (!karg)
+				return -ENOMEM;
+			break;
+		case FDWERRORGET32:
+			karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
+			if (!karg)
+				return -ENOMEM;
+			break;
+		default:
+			return -EINVAL;
+	}
+	set_fs (KERNEL_DS);
+	err = sys_ioctl (fd, kcmd, (unsigned long)karg);
+	set_fs (old_fs);
+	if (err)
+		goto out;
+	switch (cmd) {
+		case FDGETPRM32:
+		{
+			struct floppy_struct *f = karg;
+			struct floppy_struct32 __user *uf = compat_ptr(arg);
+
+			err = __put_user(f->size, &uf->size);
+			err |= __put_user(f->sect, &uf->sect);
+			err |= __put_user(f->head, &uf->head);
+			err |= __put_user(f->track, &uf->track);
+			err |= __put_user(f->stretch, &uf->stretch);
+			err |= __put_user(f->gap, &uf->gap);
+			err |= __put_user(f->rate, &uf->rate);
+			err |= __put_user(f->spec1, &uf->spec1);
+			err |= __put_user(f->fmt_gap, &uf->fmt_gap);
+			err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
+			break;
+		}
+		case FDGETDRVPRM32:
+		{
+			struct floppy_drive_params32 __user *uf;
+			struct floppy_drive_params *f = karg;
+
+			uf = compat_ptr(arg);
+			err = __put_user(f->cmos, &uf->cmos);
+			err |= __put_user(f->max_dtr, &uf->max_dtr);
+			err |= __put_user(f->hlt, &uf->hlt);
+			err |= __put_user(f->hut, &uf->hut);
+			err |= __put_user(f->srt, &uf->srt);
+			err |= __put_user(f->spinup, &uf->spinup);
+			err |= __put_user(f->spindown, &uf->spindown);
+			err |= __put_user(f->spindown_offset, &uf->spindown_offset);
+			err |= __put_user(f->select_delay, &uf->select_delay);
+			err |= __put_user(f->rps, &uf->rps);
+			err |= __put_user(f->tracks, &uf->tracks);
+			err |= __put_user(f->timeout, &uf->timeout);
+			err |= __put_user(f->interleave_sect, &uf->interleave_sect);
+			err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
+			err |= __put_user(f->flags, &uf->flags);
+			err |= __put_user(f->read_track, &uf->read_track);
+			err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
+			err |= __put_user(f->checkfreq, &uf->checkfreq);
+			err |= __put_user(f->native_format, &uf->native_format);
+			break;
+		}
+		case FDGETDRVSTAT32:
+		case FDPOLLDRVSTAT32:
+		{
+			struct floppy_drive_struct32 __user *uf;
+			struct floppy_drive_struct *f = karg;
+
+			uf = compat_ptr(arg);
+			err = __put_user(f->flags, &uf->flags);
+			err |= __put_user(f->spinup_date, &uf->spinup_date);
+			err |= __put_user(f->select_date, &uf->select_date);
+			err |= __put_user(f->first_read_date, &uf->first_read_date);
+			err |= __put_user(f->probed_format, &uf->probed_format);
+			err |= __put_user(f->track, &uf->track);
+			err |= __put_user(f->maxblock, &uf->maxblock);
+			err |= __put_user(f->maxtrack, &uf->maxtrack);
+			err |= __put_user(f->generation, &uf->generation);
+			err |= __put_user(f->keep_data, &uf->keep_data);
+			err |= __put_user(f->fd_ref, &uf->fd_ref);
+			err |= __put_user(f->fd_device, &uf->fd_device);
+			err |= __put_user(f->last_checked, &uf->last_checked);
+			err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
+			err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
+			break;
+		}
+		case FDGETFDCSTAT32:
+		{
+			struct floppy_fdc_state32 __user *uf;
+			struct floppy_fdc_state *f = karg;
+
+			uf = compat_ptr(arg);
+			err = __put_user(f->spec1, &uf->spec1);
+			err |= __put_user(f->spec2, &uf->spec2);
+			err |= __put_user(f->dtr, &uf->dtr);
+			err |= __put_user(f->version, &uf->version);
+			err |= __put_user(f->dor, &uf->dor);
+			err |= __put_user(f->address, &uf->address);
+			err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
+					   (char *)&f->address + sizeof(f->address), sizeof(int));
+			err |= __put_user(f->driver_version, &uf->driver_version);
+			err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
+			break;
+		}
+		case FDWERRORGET32:
+		{
+			struct floppy_write_errors32 __user *uf;
+			struct floppy_write_errors *f = karg;
+
+			uf = compat_ptr(arg);
+			err = __put_user(f->write_errors, &uf->write_errors);
+			err |= __put_user(f->first_error_sector, &uf->first_error_sector);
+			err |= __put_user(f->first_error_generation, &uf->first_error_generation);
+			err |= __put_user(f->last_error_sector, &uf->last_error_sector);
+			err |= __put_user(f->last_error_generation, &uf->last_error_generation);
+			err |= __put_user(f->badness, &uf->badness);
+			break;
+		}
+		default:
+			break;
+	}
+	if (err)
+		err = -EFAULT;
+
+out:	if (karg) kfree(karg);
+	return err;
+}
+
+struct mtd_oob_buf32 {
+	u_int32_t start;
+	u_int32_t length;
+	compat_caddr_t ptr;	/* unsigned char* */
+};
+
+#define MEMWRITEOOB32 	_IOWR('M',3,struct mtd_oob_buf32)
+#define MEMREADOOB32 	_IOWR('M',4,struct mtd_oob_buf32)
+
+static int mtd_rw_oob(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct mtd_oob_buf __user *buf = compat_alloc_user_space(sizeof(*buf));
+	struct mtd_oob_buf32 __user *buf32 = compat_ptr(arg);
+	u32 data;
+	char __user *datap;
+	unsigned int real_cmd;
+	int err;
+
+	real_cmd = (cmd == MEMREADOOB32) ?
+		MEMREADOOB : MEMWRITEOOB;
+
+	if (copy_in_user(&buf->start, &buf32->start,
+			 2 * sizeof(u32)) ||
+	    get_user(data, &buf32->ptr))
+		return -EFAULT;
+	datap = compat_ptr(data);
+	if (put_user(datap, &buf->ptr))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, real_cmd, (unsigned long) buf);
+
+	if (!err) {
+		if (copy_in_user(&buf32->start, &buf->start,
+				 2 * sizeof(u32)))
+			err = -EFAULT;
+	}
+
+	return err;
+}	
+
+#define	VFAT_IOCTL_READDIR_BOTH32	_IOR('r', 1, struct compat_dirent[2])
+#define	VFAT_IOCTL_READDIR_SHORT32	_IOR('r', 2, struct compat_dirent[2])
+
+static long
+put_dirent32 (struct dirent *d, struct compat_dirent __user *d32)
+{
+        if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
+                return -EFAULT;
+
+        __put_user(d->d_ino, &d32->d_ino);
+        __put_user(d->d_off, &d32->d_off);
+        __put_user(d->d_reclen, &d32->d_reclen);
+        if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
+		return -EFAULT;
+
+        return 0;
+}
+
+static int vfat_ioctl32(unsigned fd, unsigned cmd, unsigned long arg)
+{
+	struct compat_dirent __user *p = compat_ptr(arg);
+	int ret;
+	mm_segment_t oldfs = get_fs();
+	struct dirent d[2];
+
+	switch(cmd)
+	{
+        	case VFAT_IOCTL_READDIR_BOTH32:
+                	cmd = VFAT_IOCTL_READDIR_BOTH;
+                	break;
+        	case VFAT_IOCTL_READDIR_SHORT32:
+                	cmd = VFAT_IOCTL_READDIR_SHORT;
+                	break;
+	}
+
+	set_fs(KERNEL_DS);
+	ret = sys_ioctl(fd,cmd,(unsigned long)&d);
+	set_fs(oldfs);
+	if (ret >= 0) {
+		ret |= put_dirent32(&d[0], p);
+		ret |= put_dirent32(&d[1], p + 1);
+	}
+	return ret;
+}
+
+#define REISERFS_IOC_UNPACK32               _IOW(0xCD,1,int)
+
+static int reiserfs_ioctl32(unsigned fd, unsigned cmd, unsigned long ptr)
+{
+        if (cmd == REISERFS_IOC_UNPACK32)
+                cmd = REISERFS_IOC_UNPACK;
+
+        return sys_ioctl(fd,cmd,ptr);
+}
+
+struct raw32_config_request
+{
+        compat_int_t    raw_minor;
+        __u64   block_major;
+        __u64   block_minor;
+} __attribute__((packed));
+
+static int get_raw32_request(struct raw_config_request *req, struct raw32_config_request __user *user_req)
+{
+        int ret;
+
+        if (!access_ok(VERIFY_READ, user_req, sizeof(struct raw32_config_request)))
+                return -EFAULT;
+
+        ret = __get_user(req->raw_minor, &user_req->raw_minor);
+        ret |= __get_user(req->block_major, &user_req->block_major);
+        ret |= __get_user(req->block_minor, &user_req->block_minor);
+
+        return ret ? -EFAULT : 0;
+}
+
+static int set_raw32_request(struct raw_config_request *req, struct raw32_config_request __user *user_req)
+{
+	int ret;
+
+        if (!access_ok(VERIFY_WRITE, user_req, sizeof(struct raw32_config_request)))
+                return -EFAULT;
+
+        ret = __put_user(req->raw_minor, &user_req->raw_minor);
+        ret |= __put_user(req->block_major, &user_req->block_major);
+        ret |= __put_user(req->block_minor, &user_req->block_minor);
+
+        return ret ? -EFAULT : 0;
+}
+
+static int raw_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+{
+        int ret;
+
+        switch (cmd) {
+        case RAW_SETBIND:
+        case RAW_GETBIND: {
+                struct raw_config_request req;
+                struct raw32_config_request __user *user_req = compat_ptr(arg);
+                mm_segment_t oldfs = get_fs();
+
+                if ((ret = get_raw32_request(&req, user_req)))
+                        return ret;
+
+                set_fs(KERNEL_DS);
+                ret = sys_ioctl(fd,cmd,(unsigned long)&req);
+                set_fs(oldfs);
+
+                if ((!ret) && (cmd == RAW_GETBIND)) {
+                        ret = set_raw32_request(&req, user_req);
+                }
+                break;
+        }
+        default:
+                ret = sys_ioctl(fd, cmd, arg);
+                break;
+        }
+        return ret;
+}
+
+struct serial_struct32 {
+        compat_int_t    type;
+        compat_int_t    line;
+        compat_uint_t   port;
+        compat_int_t    irq;
+        compat_int_t    flags;
+        compat_int_t    xmit_fifo_size;
+        compat_int_t    custom_divisor;
+        compat_int_t    baud_base;
+        unsigned short  close_delay;
+        char    io_type;
+        char    reserved_char[1];
+        compat_int_t    hub6;
+        unsigned short  closing_wait; /* time to wait before closing */
+        unsigned short  closing_wait2; /* no longer used... */
+        compat_uint_t   iomem_base;
+        unsigned short  iomem_reg_shift;
+        unsigned int    port_high;
+     /* compat_ulong_t  iomap_base FIXME */
+        compat_int_t    reserved[1];
+};
+
+static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+{
+        typedef struct serial_struct SS;
+        typedef struct serial_struct32 SS32;
+        struct serial_struct32 __user *ss32 = compat_ptr(arg);
+        int err;
+        struct serial_struct ss;
+        mm_segment_t oldseg = get_fs();
+        __u32 udata;
+
+        if (cmd == TIOCSSERIAL) {
+                if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
+                        return -EFAULT;
+                if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
+			return -EFAULT;
+                __get_user(udata, &ss32->iomem_base);
+                ss.iomem_base = compat_ptr(udata);
+                __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
+                __get_user(ss.port_high, &ss32->port_high);
+                ss.iomap_base = 0UL;
+        }
+        set_fs(KERNEL_DS);
+                err = sys_ioctl(fd,cmd,(unsigned long)(&ss));
+        set_fs(oldseg);
+        if (cmd == TIOCGSERIAL && err >= 0) {
+                if (!access_ok(VERIFY_WRITE, ss32, sizeof(SS32)))
+                        return -EFAULT;
+                if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
+			return -EFAULT;
+                __put_user((unsigned long)ss.iomem_base  >> 32 ?
+                            0xffffffff : (unsigned)(unsigned long)ss.iomem_base,
+                            &ss32->iomem_base);
+                __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
+                __put_user(ss.port_high, &ss32->port_high);
+
+        }
+        return err;
+}
+
+struct usbdevfs_ctrltransfer32 {
+        u8 bRequestType;
+        u8 bRequest;
+        u16 wValue;
+        u16 wIndex;
+        u16 wLength;
+        u32 timeout;  /* in milliseconds */
+        compat_caddr_t data;
+};
+
+#define USBDEVFS_CONTROL32           _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
+
+static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+        struct usbdevfs_ctrltransfer32 __user *p32 = compat_ptr(arg);
+        struct usbdevfs_ctrltransfer __user *p;
+        __u32 udata;
+        p = compat_alloc_user_space(sizeof(*p));
+        if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
+            get_user(udata, &p32->data) ||
+	    put_user(compat_ptr(udata), &p->data))
+		return -EFAULT;
+        return sys_ioctl(fd, USBDEVFS_CONTROL, (unsigned long)p);
+}
+
+
+struct usbdevfs_bulktransfer32 {
+        compat_uint_t ep;
+        compat_uint_t len;
+        compat_uint_t timeout; /* in milliseconds */
+        compat_caddr_t data;
+};
+
+#define USBDEVFS_BULK32              _IOWR('U', 2, struct usbdevfs_bulktransfer32)
+
+static int do_usbdevfs_bulk(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+        struct usbdevfs_bulktransfer32 __user *p32 = compat_ptr(arg);
+        struct usbdevfs_bulktransfer __user *p;
+        compat_uint_t n;
+        compat_caddr_t addr;
+
+        p = compat_alloc_user_space(sizeof(*p));
+
+        if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
+            get_user(n, &p32->len) || put_user(n, &p->len) ||
+            get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
+            get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
+                return -EFAULT;
+
+        return sys_ioctl(fd, USBDEVFS_BULK, (unsigned long)p);
+}
+
+
+/*
+ *  USBDEVFS_SUBMITURB, USBDEVFS_REAPURB and USBDEVFS_REAPURBNDELAY
+ *  are handled in usbdevfs core.			-Christopher Li
+ */
+
+struct usbdevfs_disconnectsignal32 {
+        compat_int_t signr;
+        compat_caddr_t context;
+};
+
+#define USBDEVFS_DISCSIGNAL32      _IOR('U', 14, struct usbdevfs_disconnectsignal32)
+
+static int do_usbdevfs_discsignal(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+        struct usbdevfs_disconnectsignal kdis;
+        struct usbdevfs_disconnectsignal32 __user *udis;
+        mm_segment_t old_fs;
+        u32 uctx;
+        int err;
+
+        udis = compat_ptr(arg);
+
+        if (get_user(kdis.signr, &udis->signr) ||
+            __get_user(uctx, &udis->context))
+                return -EFAULT;
+
+        kdis.context = compat_ptr(uctx);
+
+        old_fs = get_fs();
+        set_fs(KERNEL_DS);
+        err = sys_ioctl(fd, USBDEVFS_DISCSIGNAL, (unsigned long) &kdis);
+        set_fs(old_fs);
+
+        return err;
+}
+
+/*
+ * I2C layer ioctls
+ */
+
+struct i2c_msg32 {
+	u16 addr;
+	u16 flags;
+	u16 len;
+	compat_caddr_t buf;
+};
+
+struct i2c_rdwr_ioctl_data32 {
+	compat_caddr_t msgs; /* struct i2c_msg __user *msgs */
+	u32 nmsgs;
+};
+
+struct i2c_smbus_ioctl_data32 {
+	u8 read_write;
+	u8 command;
+	u32 size;
+	compat_caddr_t data; /* union i2c_smbus_data *data */
+};
+
+struct i2c_rdwr_aligned {
+	struct i2c_rdwr_ioctl_data cmd;
+	struct i2c_msg msgs[0];
+};
+
+static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct i2c_rdwr_ioctl_data32	__user *udata = compat_ptr(arg);
+	struct i2c_rdwr_aligned		__user *tdata;
+	struct i2c_msg			__user *tmsgs;
+	struct i2c_msg32		__user *umsgs;
+	compat_caddr_t			datap;
+	int				nmsgs, i;
+
+	if (get_user(nmsgs, &udata->nmsgs))
+		return -EFAULT;
+	if (nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
+		return -EINVAL;
+
+	if (get_user(datap, &udata->msgs))
+		return -EFAULT;
+	umsgs = compat_ptr(datap);
+
+	tdata = compat_alloc_user_space(sizeof(*tdata) +
+				      nmsgs * sizeof(struct i2c_msg));
+	tmsgs = &tdata->msgs[0];
+
+	if (put_user(nmsgs, &tdata->cmd.nmsgs) ||
+	    put_user(tmsgs, &tdata->cmd.msgs))
+		return -EFAULT;
+
+	for (i = 0; i < nmsgs; i++) {
+		if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
+			return -EFAULT;
+		if (get_user(datap, &umsgs[i].buf) ||
+		    put_user(compat_ptr(datap), &tmsgs[i].buf))
+			return -EFAULT;
+	}
+	return sys_ioctl(fd, cmd, (unsigned long)tdata);
+}
+
+static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct i2c_smbus_ioctl_data	__user *tdata;
+	struct i2c_smbus_ioctl_data32	__user *udata;
+	compat_caddr_t			datap;
+
+	tdata = compat_alloc_user_space(sizeof(*tdata));
+	if (tdata == NULL)
+		return -ENOMEM;
+	if (!access_ok(VERIFY_WRITE, tdata, sizeof(*tdata)))
+		return -EFAULT;
+
+	udata = compat_ptr(arg);
+	if (!access_ok(VERIFY_READ, udata, sizeof(*udata)))
+		return -EFAULT;
+
+	if (__copy_in_user(&tdata->read_write, &udata->read_write, 2 * sizeof(u8)))
+		return -EFAULT;
+	if (__copy_in_user(&tdata->size, &udata->size, 2 * sizeof(u32)))
+		return -EFAULT;
+	if (__get_user(datap, &udata->data) ||
+	    __put_user(compat_ptr(datap), &tdata->data))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long)tdata);
+}
+
+struct compat_iw_point {
+	compat_caddr_t pointer;
+	__u16 length;
+	__u16 flags;
+};
+
+static int do_wireless_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct iwreq __user *iwr;
+	struct iwreq __user *iwr_u;
+	struct iw_point __user *iwp;
+	struct compat_iw_point __user *iwp_u;
+	compat_caddr_t pointer;
+	__u16 length, flags;
+
+	iwr_u = compat_ptr(arg);
+	iwp_u = (struct compat_iw_point __user *) &iwr_u->u.data;
+	iwr = compat_alloc_user_space(sizeof(*iwr));
+	if (iwr == NULL)
+		return -ENOMEM;
+
+	iwp = &iwr->u.data;
+
+	if (!access_ok(VERIFY_WRITE, iwr, sizeof(*iwr)))
+		return -EFAULT;
+
+	if (__copy_in_user(&iwr->ifr_ifrn.ifrn_name[0],
+			   &iwr_u->ifr_ifrn.ifrn_name[0],
+			   sizeof(iwr->ifr_ifrn.ifrn_name)))
+		return -EFAULT;
+
+	if (__get_user(pointer, &iwp_u->pointer) ||
+	    __get_user(length, &iwp_u->length) ||
+	    __get_user(flags, &iwp_u->flags))
+		return -EFAULT;
+
+	if (__put_user(compat_ptr(pointer), &iwp->pointer) ||
+	    __put_user(length, &iwp->length) ||
+	    __put_user(flags, &iwp->flags))
+		return -EFAULT;
+
+	return sys_ioctl(fd, cmd, (unsigned long) iwr);
+}
+
+/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
+ * for some operations; this forces use of the newer bridge-utils that
+ * use compatiable ioctls
+ */
+static int old_bridge_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	u32 tmp;
+
+	if (get_user(tmp, (u32 __user *) arg))
+		return -EFAULT;
+	if (tmp == BRCTL_GET_VERSION)
+		return BRCTL_VERSION + 1;
+	return -EINVAL;
+}
+
+#if defined(CONFIG_NCP_FS) || defined(CONFIG_NCP_FS_MODULE)
+struct ncp_ioctl_request_32 {
+	u32 function;
+	u32 size;
+	compat_caddr_t data;
+};
+
+struct ncp_fs_info_v2_32 {
+	s32 version;
+	u32 mounted_uid;
+	u32 connection;
+	u32 buffer_size;
+
+	u32 volume_number;
+	u32 directory_id;
+
+	u32 dummy1;
+	u32 dummy2;
+	u32 dummy3;
+};
+
+struct ncp_objectname_ioctl_32
+{
+	s32		auth_type;
+	u32		object_name_len;
+	compat_caddr_t	object_name;	/* an userspace data, in most cases user name */
+};
+
+struct ncp_privatedata_ioctl_32
+{
+	u32		len;
+	compat_caddr_t	data;		/* ~1000 for NDS */
+};
+
+#define	NCP_IOC_NCPREQUEST_32		_IOR('n', 1, struct ncp_ioctl_request_32)
+#define NCP_IOC_GETMOUNTUID2_32		_IOW('n', 2, u32)
+#define NCP_IOC_GET_FS_INFO_V2_32	_IOWR('n', 4, struct ncp_fs_info_v2_32)
+#define NCP_IOC_GETOBJECTNAME_32	_IOWR('n', 9, struct ncp_objectname_ioctl_32)
+#define NCP_IOC_SETOBJECTNAME_32	_IOR('n', 9, struct ncp_objectname_ioctl_32)
+#define NCP_IOC_GETPRIVATEDATA_32	_IOWR('n', 10, struct ncp_privatedata_ioctl_32)
+#define NCP_IOC_SETPRIVATEDATA_32	_IOR('n', 10, struct ncp_privatedata_ioctl_32)
+
+static int do_ncp_ncprequest(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ncp_ioctl_request_32 n32;
+	struct ncp_ioctl_request __user *p = compat_alloc_user_space(sizeof(*p));
+
+	if (copy_from_user(&n32, compat_ptr(arg), sizeof(n32)) ||
+	    put_user(n32.function, &p->function) ||
+	    put_user(n32.size, &p->size) ||
+	    put_user(compat_ptr(n32.data), &p->data))
+		return -EFAULT;
+
+	return sys_ioctl(fd, NCP_IOC_NCPREQUEST, (unsigned long)p);
+}
+
+static int do_ncp_getmountuid2(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	__kernel_uid_t kuid;
+	int err;
+
+	cmd = NCP_IOC_GETMOUNTUID2;
+
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, cmd, (unsigned long)&kuid);
+	set_fs(old_fs);
+
+	if (!err)
+		err = put_user(kuid,
+			       (unsigned int __user *) compat_ptr(arg));
+
+	return err;
+}
+
+static int do_ncp_getfsinfo2(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	mm_segment_t old_fs = get_fs();
+	struct ncp_fs_info_v2_32 n32;
+	struct ncp_fs_info_v2 n;
+	int err;
+
+	if (copy_from_user(&n32, compat_ptr(arg), sizeof(n32)))
+		return -EFAULT;
+	if (n32.version != NCP_GET_FS_INFO_VERSION_V2)
+		return -EINVAL;
+	n.version = NCP_GET_FS_INFO_VERSION_V2;
+
+	set_fs(KERNEL_DS);
+	err = sys_ioctl(fd, NCP_IOC_GET_FS_INFO_V2, (unsigned long)&n);
+	set_fs(old_fs);
+
+	if (!err) {
+		n32.version = n.version;
+		n32.mounted_uid = n.mounted_uid;
+		n32.connection = n.connection;
+		n32.buffer_size = n.buffer_size;
+		n32.volume_number = n.volume_number;
+		n32.directory_id = n.directory_id;
+		n32.dummy1 = n.dummy1;
+		n32.dummy2 = n.dummy2;
+		n32.dummy3 = n.dummy3;
+		err = copy_to_user(compat_ptr(arg), &n32, sizeof(n32)) ? -EFAULT : 0;
+	}
+	return err;
+}
+
+static int do_ncp_getobjectname(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ncp_objectname_ioctl_32 n32, __user *p32 = compat_ptr(arg);
+	struct ncp_objectname_ioctl __user *p = compat_alloc_user_space(sizeof(*p));
+	s32 auth_type;
+	u32 name_len;
+	int err;
+
+	if (copy_from_user(&n32, p32, sizeof(n32)) ||
+	    put_user(n32.object_name_len, &p->object_name_len) ||
+	    put_user(compat_ptr(n32.object_name), &p->object_name))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, NCP_IOC_GETOBJECTNAME, (unsigned long)p);
+        if (err)
+		return err;
+
+	if (get_user(auth_type, &p->auth_type) ||
+	    put_user(auth_type, &p32->auth_type) ||
+	    get_user(name_len, &p->object_name_len) ||
+	    put_user(name_len, &p32->object_name_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int do_ncp_setobjectname(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ncp_objectname_ioctl_32 n32, __user *p32 = compat_ptr(arg);
+	struct ncp_objectname_ioctl __user *p = compat_alloc_user_space(sizeof(*p));
+
+	if (copy_from_user(&n32, p32, sizeof(n32)) ||
+	    put_user(n32.auth_type, &p->auth_type) ||
+	    put_user(n32.object_name_len, &p->object_name_len) ||
+	    put_user(compat_ptr(n32.object_name), &p->object_name))
+		return -EFAULT;
+
+	return sys_ioctl(fd, NCP_IOC_SETOBJECTNAME, (unsigned long)p);
+}
+
+static int do_ncp_getprivatedata(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ncp_privatedata_ioctl_32 n32, __user *p32 = compat_ptr(arg);
+	struct ncp_privatedata_ioctl __user *p =
+		compat_alloc_user_space(sizeof(*p));
+	u32 len;
+	int err;
+
+	if (copy_from_user(&n32, p32, sizeof(n32)) ||
+	    put_user(n32.len, &p->len) ||
+	    put_user(compat_ptr(n32.data), &p->data))
+		return -EFAULT;
+
+	err = sys_ioctl(fd, NCP_IOC_GETPRIVATEDATA, (unsigned long)p);
+        if (err)
+		return err;
+
+	if (get_user(len, &p->len) ||
+	    put_user(len, &p32->len))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int do_ncp_setprivatedata(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct ncp_privatedata_ioctl_32 n32;
+	struct ncp_privatedata_ioctl_32 __user *p32 = compat_ptr(arg);
+	struct ncp_privatedata_ioctl __user *p =
+		compat_alloc_user_space(sizeof(*p));
+
+	if (copy_from_user(&n32, p32, sizeof(n32)) ||
+	    put_user(n32.len, &p->len) ||
+	    put_user(compat_ptr(n32.data), &p->data))
+		return -EFAULT;
+
+	return sys_ioctl(fd, NCP_IOC_SETPRIVATEDATA, (unsigned long)p);
+}
+#endif
+
+#undef CODE
+#endif
+
+#ifdef DECLARES
+HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
+HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
+#ifdef CONFIG_NET
+HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
+HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
+HANDLE_IOCTL(SIOCGIFFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMETRIC, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMETRIC, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMTU, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMTU, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMEM, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMEM, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFHWADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFHWADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCADDMULTI, dev_ifsioc)
+HANDLE_IOCTL(SIOCDELMULTI, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFINDEX, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
+
+/* ioctls used by appletalk ddp.c */
+HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCDIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSARP, dev_ifsioc)
+HANDLE_IOCTL(SIOCDARP, dev_ifsioc)
+
+HANDLE_IOCTL(SIOCGIFBRDADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFBRDADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFDSTADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFDSTADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFNETMASK, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFNETMASK, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFPFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
+HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
+HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
+HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
+HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
+HANDLE_IOCTL(SIOCBONDSETHWADDR, bond_ioctl)
+HANDLE_IOCTL(SIOCBONDSLAVEINFOQUERY, bond_ioctl)
+HANDLE_IOCTL(SIOCBONDINFOQUERY, bond_ioctl)
+HANDLE_IOCTL(SIOCBONDCHANGEACTIVE, bond_ioctl)
+HANDLE_IOCTL(SIOCADDRT, routing_ioctl)
+HANDLE_IOCTL(SIOCDELRT, routing_ioctl)
+HANDLE_IOCTL(SIOCBRADDIF, dev_ifsioc)
+HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc)
+/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
+HANDLE_IOCTL(SIOCRTMSG, ret_einval)
+HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
+#endif
+HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
+HANDLE_IOCTL(BLKRAGET, w_long)
+HANDLE_IOCTL(BLKGETSIZE, w_long)
+HANDLE_IOCTL(0x1260, broken_blkgetsize)
+HANDLE_IOCTL(BLKFRAGET, w_long)
+HANDLE_IOCTL(BLKSECTGET, w_long)
+HANDLE_IOCTL(FBIOGET_FSCREENINFO, fb_ioctl_trans)
+HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
+HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
+HANDLE_IOCTL(FBIOPUTCMAP, fb_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
+HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
+HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
+HANDLE_IOCTL(PPPIOCGIDLE32, ppp_ioctl_trans)
+HANDLE_IOCTL(PPPIOCSCOMPRESS32, ppp_ioctl_trans)
+HANDLE_IOCTL(PPPIOCSPASS32, ppp_sock_fprog_ioctl_trans)
+HANDLE_IOCTL(PPPIOCSACTIVE32, ppp_sock_fprog_ioctl_trans)
+HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
+HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
+HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
+HANDLE_IOCTL(LOOP_SET_STATUS, loop_status)
+HANDLE_IOCTL(LOOP_GET_STATUS, loop_status)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
+HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
+#ifdef CONFIG_VT
+HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl)
+HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl)
+HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl)
+HANDLE_IOCTL(GIO_UNIMAP, do_unimap_ioctl)
+HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl)
+#endif
+HANDLE_IOCTL(EXT2_IOC32_GETFLAGS, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_SETFLAGS, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_GETVERSION, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_SETVERSION, do_ext2_ioctl)
+HANDLE_IOCTL(VIDIOCGTUNER32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSTUNER32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCGWIN32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSWIN32, do_set_window)
+HANDLE_IOCTL(VIDIOCGFBUF32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSFBUF32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCGFREQ32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSFREQ32, do_video_ioctl)
+/* One SMB ioctl needs translations. */
+#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
+HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
+HANDLE_IOCTL(ATM_GETLINKRATE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETNAMES32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETTYPE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETESI32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_RSTADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_ADDADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_DELADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETCIRANGE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETCIRANGE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETESI32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETESIF32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETSTAT32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETSTATZ32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETLOOP32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETLOOP32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_QUERYLOOP32, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETSTAT, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETSTATZ, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_SETDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_CLRDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_SETFRAMING, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
+/* block stuff */
+HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
+HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
+HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
+/* vfat */
+HANDLE_IOCTL(VFAT_IOCTL_READDIR_BOTH32, vfat_ioctl32)
+HANDLE_IOCTL(VFAT_IOCTL_READDIR_SHORT32, vfat_ioctl32)
+HANDLE_IOCTL(REISERFS_IOC_UNPACK32, reiserfs_ioctl32)
+/* Raw devices */
+HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
+HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
+/* Serial */
+HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl)
+HANDLE_IOCTL(TIOCSSERIAL, serial_struct_ioctl)
+/* Usbdevfs */
+HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control)
+HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk)
+HANDLE_IOCTL(USBDEVFS_DISCSIGNAL32, do_usbdevfs_discsignal)
+/* i2c */
+HANDLE_IOCTL(I2C_FUNCS, w_long)
+HANDLE_IOCTL(I2C_RDWR, do_i2c_rdwr_ioctl)
+HANDLE_IOCTL(I2C_SMBUS, do_i2c_smbus_ioctl)
+/* wireless */
+HANDLE_IOCTL(SIOCGIWRANGE, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIWSPY, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWSPY, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIWTHRSPY, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWTHRSPY, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWAPLIST, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWSCAN, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIWESSID, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWESSID, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIWNICKN, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWNICKN, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIWENCODE, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCGIWENCODE, do_wireless_ioctl)
+HANDLE_IOCTL(SIOCSIFBR, old_bridge_ioctl)
+HANDLE_IOCTL(SIOCGIFBR, old_bridge_ioctl)
+
+#if defined(CONFIG_NCP_FS) || defined(CONFIG_NCP_FS_MODULE)
+HANDLE_IOCTL(NCP_IOC_NCPREQUEST_32, do_ncp_ncprequest)
+HANDLE_IOCTL(NCP_IOC_GETMOUNTUID2_32, do_ncp_getmountuid2)
+HANDLE_IOCTL(NCP_IOC_GET_FS_INFO_V2_32, do_ncp_getfsinfo2)
+HANDLE_IOCTL(NCP_IOC_GETOBJECTNAME_32, do_ncp_getobjectname)
+HANDLE_IOCTL(NCP_IOC_SETOBJECTNAME_32, do_ncp_setobjectname)
+HANDLE_IOCTL(NCP_IOC_GETPRIVATEDATA_32, do_ncp_getprivatedata)
+HANDLE_IOCTL(NCP_IOC_SETPRIVATEDATA_32, do_ncp_setprivatedata)
+#endif
+
+#undef DECLARES
+#endif
diff --git a/fs/cramfs/Makefile b/fs/cramfs/Makefile
new file mode 100644
index 0000000..92ebb46
--- /dev/null
+++ b/fs/cramfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux cramfs routines.
+#
+
+obj-$(CONFIG_CRAMFS) += cramfs.o
+
+cramfs-objs := inode.o uncompress.o
diff --git a/fs/cramfs/README b/fs/cramfs/README
new file mode 100644
index 0000000..445d1c2
--- /dev/null
+++ b/fs/cramfs/README
@@ -0,0 +1,168 @@
+Notes on Filesystem Layout
+--------------------------
+
+These notes describe what mkcramfs generates.  Kernel requirements are
+a bit looser, e.g. it doesn't care if the <file_data> items are
+swapped around (though it does care that directory entries (inodes) in
+a given directory are contiguous, as this is used by readdir).
+
+All data is currently in host-endian format; neither mkcramfs nor the
+kernel ever do swabbing.  (See section `Block Size' below.)
+
+<filesystem>:
+	<superblock>
+	<directory_structure>
+	<data>
+
+<superblock>: struct cramfs_super (see cramfs_fs.h).
+
+<directory_structure>:
+	For each file:
+		struct cramfs_inode (see cramfs_fs.h).
+		Filename.  Not generally null-terminated, but it is
+		 null-padded to a multiple of 4 bytes.
+
+The order of inode traversal is described as "width-first" (not to be
+confused with breadth-first); i.e. like depth-first but listing all of
+a directory's entries before recursing down its subdirectories: the
+same order as `ls -AUR' (but without the /^\..*:$/ directory header
+lines); put another way, the same order as `find -type d -exec
+ls -AU1 {} \;'.
+
+Beginning in 2.4.7, directory entries are sorted.  This optimization
+allows cramfs_lookup to return more quickly when a filename does not
+exist, speeds up user-space directory sorts, etc.
+
+<data>:
+	One <file_data> for each file that's either a symlink or a
+	 regular file of non-zero st_size.
+
+<file_data>:
+	nblocks * <block_pointer>
+	 (where nblocks = (st_size - 1) / blksize + 1)
+	nblocks * <block>
+	padding to multiple of 4 bytes
+
+The i'th <block_pointer> for a file stores the byte offset of the
+*end* of the i'th <block> (i.e. one past the last byte, which is the
+same as the start of the (i+1)'th <block> if there is one).  The first
+<block> immediately follows the last <block_pointer> for the file.
+<block_pointer>s are each 32 bits long.
+
+The order of <file_data>'s is a depth-first descent of the directory
+tree, i.e. the same order as `find -size +0 \( -type f -o -type l \)
+-print'.
+
+
+<block>: The i'th <block> is the output of zlib's compress function
+applied to the i'th blksize-sized chunk of the input data.
+(For the last <block> of the file, the input may of course be smaller.)
+Each <block> may be a different size.  (See <block_pointer> above.)
+<block>s are merely byte-aligned, not generally u32-aligned.
+
+
+Holes
+-----
+
+This kernel supports cramfs holes (i.e. [efficient representation of]
+blocks in uncompressed data consisting entirely of NUL bytes), but by
+default mkcramfs doesn't test for & create holes, since cramfs in
+kernels up to at least 2.3.39 didn't support holes.  Run mkcramfs
+with -z if you want it to create files that can have holes in them.
+
+
+Tools
+-----
+
+The cramfs user-space tools, including mkcramfs and cramfsck, are
+located at <http://sourceforge.net/projects/cramfs/>.
+
+
+Future Development
+==================
+
+Block Size
+----------
+
+(Block size in cramfs refers to the size of input data that is
+compressed at a time.  It's intended to be somewhere around
+PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
+
+The superblock ought to indicate the block size that the fs was
+written for, since comments in <linux/pagemap.h> indicate that
+PAGE_CACHE_SIZE may grow in future (if I interpret the comment
+correctly).
+
+Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
+for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
+turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
+This discrepancy is a bug, though it's not clear which should be
+changed.
+
+One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
+<asm/page.h>.  Personally I don't like this option, but it does
+require the least amount of change: just change `#define
+PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'.  The disadvantage
+is that the generated cramfs cannot always be shared between different
+kernels, not even necessarily kernels of the same architecture if
+PAGE_CACHE_SIZE is subject to change between kernel versions
+(currently possible with arm and ia64).
+
+The remaining options try to make cramfs more sharable.
+
+One part of that is addressing endianness.  The two options here are
+`always use little-endian' (like ext2fs) or `writer chooses
+endianness; kernel adapts at runtime'.  Little-endian wins because of
+code simplicity and little CPU overhead even on big-endian machines.
+
+The cost of swabbing is changing the code to use the le32_to_cpu
+etc. macros as used by ext2fs.  We don't need to swab the compressed
+data, only the superblock, inodes and block pointers.
+
+
+The other part of making cramfs more sharable is choosing a block
+size.  The options are:
+
+  1. Always 4096 bytes.
+
+  2. Writer chooses blocksize; kernel adapts but rejects blocksize >
+     PAGE_CACHE_SIZE.
+
+  3. Writer chooses blocksize; kernel adapts even to blocksize >
+     PAGE_CACHE_SIZE.
+
+It's easy enough to change the kernel to use a smaller value than
+PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
+
+The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE
+value don't get as good compression as they can.
+
+The cost of option 2 relative to option 1 is that the code uses
+variables instead of #define'd constants.  The gain is that people
+with kernels having larger PAGE_CACHE_SIZE can make use of that if
+they don't mind their cramfs being inaccessible to kernels with
+smaller PAGE_CACHE_SIZE values.
+
+Option 3 is easy to implement if we don't mind being CPU-inefficient:
+e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
+must be no larger than 32KB) and discard what it doesn't need.
+Getting readpage to read into all the covered pages is harder.
+
+The main advantage of option 3 over 1, 2, is better compression.  The
+cost is greater complexity.  Probably not worth it, but I hope someone
+will disagree.  (If it is implemented, then I'll re-use that code in
+e2compr.)
+
+
+Another cost of 2 and 3 over 1 is making mkcramfs use a different
+block size, but that just means adding and parsing a -b option.
+
+
+Inode Size
+----------
+
+Given that cramfs will probably be used for CDs etc. as well as just
+silicon ROMs, it might make sense to expand the inode a little from
+its current 12 bytes.  Inodes other than the root inode are followed
+by filename, so the expansion doesn't even have to be a multiple of 4
+bytes.
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
new file mode 100644
index 0000000..6c285efa
--- /dev/null
+++ b/fs/cramfs/inode.c
@@ -0,0 +1,525 @@
+/*
+ * Compressed rom filesystem for Linux.
+ *
+ * Copyright (C) 1999 Linus Torvalds.
+ *
+ * This file is released under the GPL.
+ */
+
+/*
+ * These are the VFS interfaces to the compressed rom filesystem.
+ * The actual compression is based on zlib, see the other files.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/cramfs_fs.h>
+#include <linux/slab.h>
+#include <linux/cramfs_fs_sb.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <asm/semaphore.h>
+
+#include <asm/uaccess.h>
+
+static struct super_operations cramfs_ops;
+static struct inode_operations cramfs_dir_inode_operations;
+static struct file_operations cramfs_directory_operations;
+static struct address_space_operations cramfs_aops;
+
+static DECLARE_MUTEX(read_mutex);
+
+
+/* These two macros may change in future, to provide better st_ino
+   semantics. */
+#define CRAMINO(x)	((x)->offset?(x)->offset<<2:1)
+#define OFFSET(x)	((x)->i_ino)
+
+static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inode * cramfs_inode)
+{
+	struct inode * inode = new_inode(sb);
+	static struct timespec zerotime;
+
+	if (inode) {
+		inode->i_mode = cramfs_inode->mode;
+		inode->i_uid = cramfs_inode->uid;
+		inode->i_size = cramfs_inode->size;
+		inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_gid = cramfs_inode->gid;
+		/* Struct copy intentional */
+		inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
+		inode->i_ino = CRAMINO(cramfs_inode);
+		/* inode->i_nlink is left 1 - arguably wrong for directories,
+		   but it's the best we can do without reading the directory
+	           contents.  1 yields the right result in GNU find, even
+		   without -noleaf option. */
+		insert_inode_hash(inode);
+		if (S_ISREG(inode->i_mode)) {
+			inode->i_fop = &generic_ro_fops;
+			inode->i_data.a_ops = &cramfs_aops;
+		} else if (S_ISDIR(inode->i_mode)) {
+			inode->i_op = &cramfs_dir_inode_operations;
+			inode->i_fop = &cramfs_directory_operations;
+		} else if (S_ISLNK(inode->i_mode)) {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_data.a_ops = &cramfs_aops;
+		} else {
+			inode->i_size = 0;
+			inode->i_blocks = 0;
+			init_special_inode(inode, inode->i_mode,
+				old_decode_dev(cramfs_inode->size));
+		}
+	}
+	return inode;
+}
+
+/*
+ * We have our own block cache: don't fill up the buffer cache
+ * with the rom-image, because the way the filesystem is set
+ * up the accesses should be fairly regular and cached in the
+ * page cache and dentry tree anyway..
+ *
+ * This also acts as a way to guarantee contiguous areas of up to
+ * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
+ * worry about end-of-buffer issues even when decompressing a full
+ * page cache.
+ */
+#define READ_BUFFERS (2)
+/* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
+#define NEXT_BUFFER(_ix) ((_ix) ^ 1)
+
+/*
+ * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
+ * data that takes up more space than the original and with unlucky
+ * alignment.
+ */
+#define BLKS_PER_BUF_SHIFT	(2)
+#define BLKS_PER_BUF		(1 << BLKS_PER_BUF_SHIFT)
+#define BUFFER_SIZE		(BLKS_PER_BUF*PAGE_CACHE_SIZE)
+
+static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
+static unsigned buffer_blocknr[READ_BUFFERS];
+static struct super_block * buffer_dev[READ_BUFFERS];
+static int next_buffer;
+
+/*
+ * Returns a pointer to a buffer containing at least LEN bytes of
+ * filesystem starting at byte offset OFFSET into the filesystem.
+ */
+static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
+{
+	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+	struct page *pages[BLKS_PER_BUF];
+	unsigned i, blocknr, buffer, unread;
+	unsigned long devsize;
+	char *data;
+
+	if (!len)
+		return NULL;
+	blocknr = offset >> PAGE_CACHE_SHIFT;
+	offset &= PAGE_CACHE_SIZE - 1;
+
+	/* Check if an existing buffer already has the data.. */
+	for (i = 0; i < READ_BUFFERS; i++) {
+		unsigned int blk_offset;
+
+		if (buffer_dev[i] != sb)
+			continue;
+		if (blocknr < buffer_blocknr[i])
+			continue;
+		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
+		blk_offset += offset;
+		if (blk_offset + len > BUFFER_SIZE)
+			continue;
+		return read_buffers[i] + blk_offset;
+	}
+
+	devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
+
+	/* Ok, read in BLKS_PER_BUF pages completely first. */
+	unread = 0;
+	for (i = 0; i < BLKS_PER_BUF; i++) {
+		struct page *page = NULL;
+
+		if (blocknr + i < devsize) {
+			page = read_cache_page(mapping, blocknr + i,
+				(filler_t *)mapping->a_ops->readpage,
+				NULL);
+			/* synchronous error? */
+			if (IS_ERR(page))
+				page = NULL;
+		}
+		pages[i] = page;
+	}
+
+	for (i = 0; i < BLKS_PER_BUF; i++) {
+		struct page *page = pages[i];
+		if (page) {
+			wait_on_page_locked(page);
+			if (!PageUptodate(page)) {
+				/* asynchronous error */
+				page_cache_release(page);
+				pages[i] = NULL;
+			}
+		}
+	}
+
+	buffer = next_buffer;
+	next_buffer = NEXT_BUFFER(buffer);
+	buffer_blocknr[buffer] = blocknr;
+	buffer_dev[buffer] = sb;
+
+	data = read_buffers[buffer];
+	for (i = 0; i < BLKS_PER_BUF; i++) {
+		struct page *page = pages[i];
+		if (page) {
+			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
+			kunmap(page);
+			page_cache_release(page);
+		} else
+			memset(data, 0, PAGE_CACHE_SIZE);
+		data += PAGE_CACHE_SIZE;
+	}
+	return read_buffers[buffer] + offset;
+}
+
+static void cramfs_put_super(struct super_block *sb)
+{
+	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
+}
+
+static int cramfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int i;
+	struct cramfs_super super;
+	unsigned long root_offset;
+	struct cramfs_sb_info *sbi;
+	struct inode *root;
+
+	sb->s_flags |= MS_RDONLY;
+
+	sbi = kmalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct cramfs_sb_info));
+
+	/* Invalidate the read buffers on mount: think disk change.. */
+	down(&read_mutex);
+	for (i = 0; i < READ_BUFFERS; i++)
+		buffer_blocknr[i] = -1;
+
+	/* Read the first block and get the superblock from it */
+	memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
+	up(&read_mutex);
+
+	/* Do sanity checks on the superblock */
+	if (super.magic != CRAMFS_MAGIC) {
+		/* check at 512 byte offset */
+		down(&read_mutex);
+		memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
+		up(&read_mutex);
+		if (super.magic != CRAMFS_MAGIC) {
+			if (!silent)
+				printk(KERN_ERR "cramfs: wrong magic\n");
+			goto out;
+		}
+	}
+
+	/* get feature flags first */
+	if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
+		printk(KERN_ERR "cramfs: unsupported filesystem features\n");
+		goto out;
+	}
+
+	/* Check that the root inode is in a sane state */
+	if (!S_ISDIR(super.root.mode)) {
+		printk(KERN_ERR "cramfs: root is not a directory\n");
+		goto out;
+	}
+	root_offset = super.root.offset << 2;
+	if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
+		sbi->size=super.size;
+		sbi->blocks=super.fsid.blocks;
+		sbi->files=super.fsid.files;
+	} else {
+		sbi->size=1<<28;
+		sbi->blocks=0;
+		sbi->files=0;
+	}
+	sbi->magic=super.magic;
+	sbi->flags=super.flags;
+	if (root_offset == 0)
+		printk(KERN_INFO "cramfs: empty filesystem");
+	else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
+		 ((root_offset != sizeof(struct cramfs_super)) &&
+		  (root_offset != 512 + sizeof(struct cramfs_super))))
+	{
+		printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset);
+		goto out;
+	}
+
+	/* Set it all up.. */
+	sb->s_op = &cramfs_ops;
+	root = get_cramfs_inode(sb, &super.root);
+	if (!root)
+		goto out;
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		iput(root);
+		goto out;
+	}
+	return 0;
+out:
+	kfree(sbi);
+	sb->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static int cramfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = CRAMFS_MAGIC;
+	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_blocks = CRAMFS_SB(sb)->blocks;
+	buf->f_bfree = 0;
+	buf->f_bavail = 0;
+	buf->f_files = CRAMFS_SB(sb)->files;
+	buf->f_ffree = 0;
+	buf->f_namelen = CRAMFS_MAXPATHLEN;
+	return 0;
+}
+
+/*
+ * Read a cramfs directory entry.
+ */
+static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	char *buf;
+	unsigned int offset;
+	int copied;
+
+	/* Offset within the thing. */
+	offset = filp->f_pos;
+	if (offset >= inode->i_size)
+		return 0;
+	/* Directory entries are always 4-byte aligned */
+	if (offset & 3)
+		return -EINVAL;
+
+	buf = kmalloc(256, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	copied = 0;
+	while (offset < inode->i_size) {
+		struct cramfs_inode *de;
+		unsigned long nextoffset;
+		char *name;
+		ino_t ino;
+		mode_t mode;
+		int namelen, error;
+
+		down(&read_mutex);
+		de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
+		name = (char *)(de+1);
+
+		/*
+		 * Namelengths on disk are shifted by two
+		 * and the name padded out to 4-byte boundaries
+		 * with zeroes.
+		 */
+		namelen = de->namelen << 2;
+		memcpy(buf, name, namelen);
+		ino = CRAMINO(de);
+		mode = de->mode;
+		up(&read_mutex);
+		nextoffset = offset + sizeof(*de) + namelen;
+		for (;;) {
+			if (!namelen) {
+				kfree(buf);
+				return -EIO;
+			}
+			if (buf[namelen-1])
+				break;
+			namelen--;
+		}
+		error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
+		if (error)
+			break;
+
+		offset = nextoffset;
+		filp->f_pos = offset;
+		copied++;
+	}
+	kfree(buf);
+	return 0;
+}
+
+/*
+ * Lookup and fill in the inode data..
+ */
+static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	unsigned int offset = 0;
+	int sorted;
+
+	down(&read_mutex);
+	sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
+	while (offset < dir->i_size) {
+		struct cramfs_inode *de;
+		char *name;
+		int namelen, retval;
+
+		de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+256);
+		name = (char *)(de+1);
+
+		/* Try to take advantage of sorted directories */
+		if (sorted && (dentry->d_name.name[0] < name[0]))
+			break;
+
+		namelen = de->namelen << 2;
+		offset += sizeof(*de) + namelen;
+
+		/* Quick check that the name is roughly the right length */
+		if (((dentry->d_name.len + 3) & ~3) != namelen)
+			continue;
+
+		for (;;) {
+			if (!namelen) {
+				up(&read_mutex);
+				return ERR_PTR(-EIO);
+			}
+			if (name[namelen-1])
+				break;
+			namelen--;
+		}
+		if (namelen != dentry->d_name.len)
+			continue;
+		retval = memcmp(dentry->d_name.name, name, namelen);
+		if (retval > 0)
+			continue;
+		if (!retval) {
+			struct cramfs_inode entry = *de;
+			up(&read_mutex);
+			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
+			return NULL;
+		}
+		/* else (retval < 0) */
+		if (sorted)
+			break;
+	}
+	up(&read_mutex);
+	d_add(dentry, NULL);
+	return NULL;
+}
+
+static int cramfs_readpage(struct file *file, struct page * page)
+{
+	struct inode *inode = page->mapping->host;
+	u32 maxblock, bytes_filled;
+	void *pgdata;
+
+	maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	bytes_filled = 0;
+	if (page->index < maxblock) {
+		struct super_block *sb = inode->i_sb;
+		u32 blkptr_offset = OFFSET(inode) + page->index*4;
+		u32 start_offset, compr_len;
+
+		start_offset = OFFSET(inode) + maxblock*4;
+		down(&read_mutex);
+		if (page->index)
+			start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
+		compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
+		up(&read_mutex);
+		pgdata = kmap(page);
+		if (compr_len == 0)
+			; /* hole */
+		else {
+			down(&read_mutex);
+			bytes_filled = cramfs_uncompress_block(pgdata,
+				 PAGE_CACHE_SIZE,
+				 cramfs_read(sb, start_offset, compr_len),
+				 compr_len);
+			up(&read_mutex);
+		}
+	} else
+		pgdata = kmap(page);
+	memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
+	kunmap(page);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+	return 0;
+}
+
+static struct address_space_operations cramfs_aops = {
+	.readpage = cramfs_readpage
+};
+
+/*
+ * Our operations:
+ */
+
+/*
+ * A directory can only readdir
+ */
+static struct file_operations cramfs_directory_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.readdir	= cramfs_readdir,
+};
+
+static struct inode_operations cramfs_dir_inode_operations = {
+	.lookup		= cramfs_lookup,
+};
+
+static struct super_operations cramfs_ops = {
+	.put_super	= cramfs_put_super,
+	.remount_fs	= cramfs_remount,
+	.statfs		= cramfs_statfs,
+};
+
+static struct super_block *cramfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
+}
+
+static struct file_system_type cramfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "cramfs",
+	.get_sb		= cramfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_cramfs_fs(void)
+{
+	cramfs_uncompress_init();
+	return register_filesystem(&cramfs_fs_type);
+}
+
+static void __exit exit_cramfs_fs(void)
+{
+	cramfs_uncompress_exit();
+	unregister_filesystem(&cramfs_fs_type);
+}
+
+module_init(init_cramfs_fs)
+module_exit(exit_cramfs_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/cramfs/uncompress.c b/fs/cramfs/uncompress.c
new file mode 100644
index 0000000..5034365
--- /dev/null
+++ b/fs/cramfs/uncompress.c
@@ -0,0 +1,77 @@
+/*
+ * uncompress.c
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * cramfs interfaces to the uncompression library. There's really just
+ * three entrypoints:
+ *
+ *  - cramfs_uncompress_init() - called to initialize the thing.
+ *  - cramfs_uncompress_exit() - tell me when you're done
+ *  - cramfs_uncompress_block() - uncompress a block.
+ *
+ * NOTE NOTE NOTE! The uncompression is entirely single-threaded. We
+ * only have one stream, and we'll initialize it only once even if it
+ * then is used by multiple filesystems.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+static z_stream stream;
+static int initialized;
+
+/* Returns length of decompressed data. */
+int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen)
+{
+	int err;
+
+	stream.next_in = src;
+	stream.avail_in = srclen;
+
+	stream.next_out = dst;
+	stream.avail_out = dstlen;
+
+	err = zlib_inflateReset(&stream);
+	if (err != Z_OK) {
+		printk("zlib_inflateReset error %d\n", err);
+		zlib_inflateEnd(&stream);
+		zlib_inflateInit(&stream);
+	}
+
+	err = zlib_inflate(&stream, Z_FINISH);
+	if (err != Z_STREAM_END)
+		goto err;
+	return stream.total_out;
+
+err:
+	printk("Error %d while decompressing!\n", err);
+	printk("%p(%d)->%p(%d)\n", src, srclen, dst, dstlen);
+	return 0;
+}
+
+int cramfs_uncompress_init(void)
+{
+	if (!initialized++) {
+		stream.workspace = vmalloc(zlib_inflate_workspacesize());
+		if ( !stream.workspace ) {
+			initialized = 0;
+			return -ENOMEM;
+		}
+		stream.next_in = NULL;
+		stream.avail_in = 0;
+		zlib_inflateInit(&stream);
+	}
+	return 0;
+}
+
+int cramfs_uncompress_exit(void)
+{
+	if (!--initialized) {
+		zlib_inflateEnd(&stream);
+		vfree(stream.workspace);
+	}
+	return 0;
+}
diff --git a/fs/dcache.c b/fs/dcache.c
new file mode 100644
index 0000000..496a4e0
--- /dev/null
+++ b/fs/dcache.c
@@ -0,0 +1,1764 @@
+/*
+ * fs/dcache.c
+ *
+ * Complete reimplementation
+ * (C) 1997 Thomas Schoebel-Theuer,
+ * with heavy changes by Linus Torvalds
+ */
+
+/*
+ * Notes on the allocation strategy:
+ *
+ * The dcache is a master of the icache - whenever a dcache entry
+ * exists, the inode will always exist. "iput()" is done either when
+ * the dcache entry is deleted or garbage collected.
+ */
+
+#include <linux/config.h>
+#include <linux/syscalls.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/hash.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/file.h>
+#include <asm/uaccess.h>
+#include <linux/security.h>
+#include <linux/seqlock.h>
+#include <linux/swap.h>
+#include <linux/bootmem.h>
+
+/* #define DCACHE_DEBUG 1 */
+
+int sysctl_vfs_cache_pressure = 100;
+EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
+
+ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
+seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
+
+EXPORT_SYMBOL(dcache_lock);
+
+static kmem_cache_t *dentry_cache; 
+
+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
+
+/*
+ * This is the single most critical data structure when it comes
+ * to the dcache: the hashtable for lookups. Somebody should try
+ * to make this good - I've just made it work.
+ *
+ * This hash-function tries to avoid losing too many bits of hash
+ * information, yet avoid using a prime hash-size or similar.
+ */
+#define D_HASHBITS     d_hash_shift
+#define D_HASHMASK     d_hash_mask
+
+static unsigned int d_hash_mask;
+static unsigned int d_hash_shift;
+static struct hlist_head *dentry_hashtable;
+static LIST_HEAD(dentry_unused);
+
+/* Statistics gathering. */
+struct dentry_stat_t dentry_stat = {
+	.age_limit = 45,
+};
+
+static void d_callback(struct rcu_head *head)
+{
+	struct dentry * dentry = container_of(head, struct dentry, d_rcu);
+
+	if (dname_external(dentry))
+		kfree(dentry->d_name.name);
+	kmem_cache_free(dentry_cache, dentry); 
+}
+
+/*
+ * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
+ * inside dcache_lock.
+ */
+static void d_free(struct dentry *dentry)
+{
+	if (dentry->d_op && dentry->d_op->d_release)
+		dentry->d_op->d_release(dentry);
+ 	call_rcu(&dentry->d_rcu, d_callback);
+}
+
+/*
+ * Release the dentry's inode, using the filesystem
+ * d_iput() operation if defined.
+ * Called with dcache_lock and per dentry lock held, drops both.
+ */
+static inline void dentry_iput(struct dentry * dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	if (inode) {
+		dentry->d_inode = NULL;
+		list_del_init(&dentry->d_alias);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&dcache_lock);
+		if (dentry->d_op && dentry->d_op->d_iput)
+			dentry->d_op->d_iput(dentry, inode);
+		else
+			iput(inode);
+	} else {
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&dcache_lock);
+	}
+}
+
+/* 
+ * This is dput
+ *
+ * This is complicated by the fact that we do not want to put
+ * dentries that are no longer on any hash chain on the unused
+ * list: we'd much rather just get rid of them immediately.
+ *
+ * However, that implies that we have to traverse the dentry
+ * tree upwards to the parents which might _also_ now be
+ * scheduled for deletion (it may have been only waiting for
+ * its last child to go away).
+ *
+ * This tail recursion is done by hand as we don't want to depend
+ * on the compiler to always get this right (gcc generally doesn't).
+ * Real recursion would eat up our stack space.
+ */
+
+/*
+ * dput - release a dentry
+ * @dentry: dentry to release 
+ *
+ * Release a dentry. This will drop the usage count and if appropriate
+ * call the dentry unlink method as well as removing it from the queues and
+ * releasing its resources. If the parent dentries were scheduled for release
+ * they too may now get deleted.
+ *
+ * no dcache lock, please.
+ */
+
+void dput(struct dentry *dentry)
+{
+	if (!dentry)
+		return;
+
+repeat:
+	if (atomic_read(&dentry->d_count) == 1)
+		might_sleep();
+	if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
+		return;
+
+	spin_lock(&dentry->d_lock);
+	if (atomic_read(&dentry->d_count)) {
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&dcache_lock);
+		return;
+	}
+
+	/*
+	 * AV: ->d_delete() is _NOT_ allowed to block now.
+	 */
+	if (dentry->d_op && dentry->d_op->d_delete) {
+		if (dentry->d_op->d_delete(dentry))
+			goto unhash_it;
+	}
+	/* Unreachable? Get rid of it */
+ 	if (d_unhashed(dentry))
+		goto kill_it;
+  	if (list_empty(&dentry->d_lru)) {
+  		dentry->d_flags |= DCACHE_REFERENCED;
+  		list_add(&dentry->d_lru, &dentry_unused);
+  		dentry_stat.nr_unused++;
+  	}
+ 	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+	return;
+
+unhash_it:
+	__d_drop(dentry);
+
+kill_it: {
+		struct dentry *parent;
+
+		/* If dentry was on d_lru list
+		 * delete it from there
+		 */
+  		if (!list_empty(&dentry->d_lru)) {
+  			list_del(&dentry->d_lru);
+  			dentry_stat.nr_unused--;
+  		}
+  		list_del(&dentry->d_child);
+		dentry_stat.nr_dentry--;	/* For d_free, below */
+		/*drops the locks, at that point nobody can reach this dentry */
+		dentry_iput(dentry);
+		parent = dentry->d_parent;
+		d_free(dentry);
+		if (dentry == parent)
+			return;
+		dentry = parent;
+		goto repeat;
+	}
+}
+
+/**
+ * d_invalidate - invalidate a dentry
+ * @dentry: dentry to invalidate
+ *
+ * Try to invalidate the dentry if it turns out to be
+ * possible. If there are other dentries that can be
+ * reached through this one we can't delete it and we
+ * return -EBUSY. On success we return 0.
+ *
+ * no dcache lock.
+ */
+ 
+int d_invalidate(struct dentry * dentry)
+{
+	/*
+	 * If it's already been dropped, return OK.
+	 */
+	spin_lock(&dcache_lock);
+	if (d_unhashed(dentry)) {
+		spin_unlock(&dcache_lock);
+		return 0;
+	}
+	/*
+	 * Check whether to do a partial shrink_dcache
+	 * to get rid of unused child entries.
+	 */
+	if (!list_empty(&dentry->d_subdirs)) {
+		spin_unlock(&dcache_lock);
+		shrink_dcache_parent(dentry);
+		spin_lock(&dcache_lock);
+	}
+
+	/*
+	 * Somebody else still using it?
+	 *
+	 * If it's a directory, we can't drop it
+	 * for fear of somebody re-populating it
+	 * with children (even though dropping it
+	 * would make it unreachable from the root,
+	 * we might still populate it if it was a
+	 * working directory or similar).
+	 */
+	spin_lock(&dentry->d_lock);
+	if (atomic_read(&dentry->d_count) > 1) {
+		if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&dcache_lock);
+			return -EBUSY;
+		}
+	}
+
+	__d_drop(dentry);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+	return 0;
+}
+
+/* This should be called _only_ with dcache_lock held */
+
+static inline struct dentry * __dget_locked(struct dentry *dentry)
+{
+	atomic_inc(&dentry->d_count);
+	if (!list_empty(&dentry->d_lru)) {
+		dentry_stat.nr_unused--;
+		list_del_init(&dentry->d_lru);
+	}
+	return dentry;
+}
+
+struct dentry * dget_locked(struct dentry *dentry)
+{
+	return __dget_locked(dentry);
+}
+
+/**
+ * d_find_alias - grab a hashed alias of inode
+ * @inode: inode in question
+ * @want_discon:  flag, used by d_splice_alias, to request
+ *          that only a DISCONNECTED alias be returned.
+ *
+ * If inode has a hashed alias, or is a directory and has any alias,
+ * acquire the reference to alias and return it. Otherwise return NULL.
+ * Notice that if inode is a directory there can be only one alias and
+ * it can be unhashed only if it has no children, or if it is the root
+ * of a filesystem.
+ *
+ * If the inode has a DCACHE_DISCONNECTED alias, then prefer
+ * any other hashed alias over that one unless @want_discon is set,
+ * in which case only return a DCACHE_DISCONNECTED alias.
+ */
+
+static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
+{
+	struct list_head *head, *next, *tmp;
+	struct dentry *alias, *discon_alias=NULL;
+
+	head = &inode->i_dentry;
+	next = inode->i_dentry.next;
+	while (next != head) {
+		tmp = next;
+		next = tmp->next;
+		prefetch(next);
+		alias = list_entry(tmp, struct dentry, d_alias);
+ 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+			if (alias->d_flags & DCACHE_DISCONNECTED)
+				discon_alias = alias;
+			else if (!want_discon) {
+				__dget_locked(alias);
+				return alias;
+			}
+		}
+	}
+	if (discon_alias)
+		__dget_locked(discon_alias);
+	return discon_alias;
+}
+
+struct dentry * d_find_alias(struct inode *inode)
+{
+	struct dentry *de;
+	spin_lock(&dcache_lock);
+	de = __d_find_alias(inode, 0);
+	spin_unlock(&dcache_lock);
+	return de;
+}
+
+/*
+ *	Try to kill dentries associated with this inode.
+ * WARNING: you must own a reference to inode.
+ */
+void d_prune_aliases(struct inode *inode)
+{
+	struct list_head *tmp, *head = &inode->i_dentry;
+restart:
+	spin_lock(&dcache_lock);
+	tmp = head;
+	while ((tmp = tmp->next) != head) {
+		struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
+		spin_lock(&dentry->d_lock);
+		if (!atomic_read(&dentry->d_count)) {
+			__dget_locked(dentry);
+			__d_drop(dentry);
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&dcache_lock);
+			dput(dentry);
+			goto restart;
+		}
+		spin_unlock(&dentry->d_lock);
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/*
+ * Throw away a dentry - free the inode, dput the parent.
+ * This requires that the LRU list has already been
+ * removed.
+ * Called with dcache_lock, drops it and then regains.
+ */
+static inline void prune_one_dentry(struct dentry * dentry)
+{
+	struct dentry * parent;
+
+	__d_drop(dentry);
+	list_del(&dentry->d_child);
+	dentry_stat.nr_dentry--;	/* For d_free, below */
+	dentry_iput(dentry);
+	parent = dentry->d_parent;
+	d_free(dentry);
+	if (parent != dentry)
+		dput(parent);
+	spin_lock(&dcache_lock);
+}
+
+/**
+ * prune_dcache - shrink the dcache
+ * @count: number of entries to try and free
+ *
+ * Shrink the dcache. This is done when we need
+ * more memory, or simply when we need to unmount
+ * something (at which point we need to unuse
+ * all dentries).
+ *
+ * This function may fail to free any resources if
+ * all the dentries are in use.
+ */
+ 
+static void prune_dcache(int count)
+{
+	spin_lock(&dcache_lock);
+	for (; count ; count--) {
+		struct dentry *dentry;
+		struct list_head *tmp;
+
+		cond_resched_lock(&dcache_lock);
+
+		tmp = dentry_unused.prev;
+		if (tmp == &dentry_unused)
+			break;
+		list_del_init(tmp);
+		prefetch(dentry_unused.prev);
+ 		dentry_stat.nr_unused--;
+		dentry = list_entry(tmp, struct dentry, d_lru);
+
+ 		spin_lock(&dentry->d_lock);
+		/*
+		 * We found an inuse dentry which was not removed from
+		 * dentry_unused because of laziness during lookup.  Do not free
+		 * it - just keep it off the dentry_unused list.
+		 */
+ 		if (atomic_read(&dentry->d_count)) {
+ 			spin_unlock(&dentry->d_lock);
+			continue;
+		}
+		/* If the dentry was recently referenced, don't free it. */
+		if (dentry->d_flags & DCACHE_REFERENCED) {
+			dentry->d_flags &= ~DCACHE_REFERENCED;
+ 			list_add(&dentry->d_lru, &dentry_unused);
+ 			dentry_stat.nr_unused++;
+ 			spin_unlock(&dentry->d_lock);
+			continue;
+		}
+		prune_one_dentry(dentry);
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/*
+ * Shrink the dcache for the specified super block.
+ * This allows us to unmount a device without disturbing
+ * the dcache for the other devices.
+ *
+ * This implementation makes just two traversals of the
+ * unused list.  On the first pass we move the selected
+ * dentries to the most recent end, and on the second
+ * pass we free them.  The second pass must restart after
+ * each dput(), but since the target dentries are all at
+ * the end, it's really just a single traversal.
+ */
+
+/**
+ * shrink_dcache_sb - shrink dcache for a superblock
+ * @sb: superblock
+ *
+ * Shrink the dcache for the specified super block. This
+ * is used to free the dcache before unmounting a file
+ * system
+ */
+
+void shrink_dcache_sb(struct super_block * sb)
+{
+	struct list_head *tmp, *next;
+	struct dentry *dentry;
+
+	/*
+	 * Pass one ... move the dentries for the specified
+	 * superblock to the most recent end of the unused list.
+	 */
+	spin_lock(&dcache_lock);
+	next = dentry_unused.next;
+	while (next != &dentry_unused) {
+		tmp = next;
+		next = tmp->next;
+		dentry = list_entry(tmp, struct dentry, d_lru);
+		if (dentry->d_sb != sb)
+			continue;
+		list_del(tmp);
+		list_add(tmp, &dentry_unused);
+	}
+
+	/*
+	 * Pass two ... free the dentries for this superblock.
+	 */
+repeat:
+	next = dentry_unused.next;
+	while (next != &dentry_unused) {
+		tmp = next;
+		next = tmp->next;
+		dentry = list_entry(tmp, struct dentry, d_lru);
+		if (dentry->d_sb != sb)
+			continue;
+		dentry_stat.nr_unused--;
+		list_del_init(tmp);
+		spin_lock(&dentry->d_lock);
+		if (atomic_read(&dentry->d_count)) {
+			spin_unlock(&dentry->d_lock);
+			continue;
+		}
+		prune_one_dentry(dentry);
+		goto repeat;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/*
+ * Search for at least 1 mount point in the dentry's subdirs.
+ * We descend to the next level whenever the d_subdirs
+ * list is non-empty and continue searching.
+ */
+ 
+/**
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
+ *
+ * Return true if the parent or its subdirectories contain
+ * a mount point
+ */
+ 
+int have_submounts(struct dentry *parent)
+{
+	struct dentry *this_parent = parent;
+	struct list_head *next;
+
+	spin_lock(&dcache_lock);
+	if (d_mountpoint(parent))
+		goto positive;
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	while (next != &this_parent->d_subdirs) {
+		struct list_head *tmp = next;
+		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+		next = tmp->next;
+		/* Have we found a mount point ? */
+		if (d_mountpoint(dentry))
+			goto positive;
+		if (!list_empty(&dentry->d_subdirs)) {
+			this_parent = dentry;
+			goto repeat;
+		}
+	}
+	/*
+	 * All done at this level ... ascend and resume the search.
+	 */
+	if (this_parent != parent) {
+		next = this_parent->d_child.next; 
+		this_parent = this_parent->d_parent;
+		goto resume;
+	}
+	spin_unlock(&dcache_lock);
+	return 0; /* No mount points found in tree */
+positive:
+	spin_unlock(&dcache_lock);
+	return 1;
+}
+
+/*
+ * Search the dentry child list for the specified parent,
+ * and move any unused dentries to the end of the unused
+ * list for prune_dcache(). We descend to the next level
+ * whenever the d_subdirs list is non-empty and continue
+ * searching.
+ *
+ * It returns zero iff there are no unused children,
+ * otherwise  it returns the number of children moved to
+ * the end of the unused list. This may not be the total
+ * number of unused children, because select_parent can
+ * drop the lock and return early due to latency
+ * constraints.
+ */
+static int select_parent(struct dentry * parent)
+{
+	struct dentry *this_parent = parent;
+	struct list_head *next;
+	int found = 0;
+
+	spin_lock(&dcache_lock);
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	while (next != &this_parent->d_subdirs) {
+		struct list_head *tmp = next;
+		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+		next = tmp->next;
+
+		if (!list_empty(&dentry->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&dentry->d_lru);
+		}
+		/* 
+		 * move only zero ref count dentries to the end 
+		 * of the unused list for prune_dcache
+		 */
+		if (!atomic_read(&dentry->d_count)) {
+			list_add(&dentry->d_lru, dentry_unused.prev);
+			dentry_stat.nr_unused++;
+			found++;
+		}
+
+		/*
+		 * We can return to the caller if we have found some (this
+		 * ensures forward progress). We'll be coming back to find
+		 * the rest.
+		 */
+		if (found && need_resched())
+			goto out;
+
+		/*
+		 * Descend a level if the d_subdirs list is non-empty.
+		 */
+		if (!list_empty(&dentry->d_subdirs)) {
+			this_parent = dentry;
+#ifdef DCACHE_DEBUG
+printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
+dentry->d_parent->d_name.name, dentry->d_name.name, found);
+#endif
+			goto repeat;
+		}
+	}
+	/*
+	 * All done at this level ... ascend and resume the search.
+	 */
+	if (this_parent != parent) {
+		next = this_parent->d_child.next; 
+		this_parent = this_parent->d_parent;
+#ifdef DCACHE_DEBUG
+printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
+this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
+#endif
+		goto resume;
+	}
+out:
+	spin_unlock(&dcache_lock);
+	return found;
+}
+
+/**
+ * shrink_dcache_parent - prune dcache
+ * @parent: parent of entries to prune
+ *
+ * Prune the dcache to remove unused children of the parent dentry.
+ */
+ 
+void shrink_dcache_parent(struct dentry * parent)
+{
+	int found;
+
+	while ((found = select_parent(parent)) != 0)
+		prune_dcache(found);
+}
+
+/**
+ * shrink_dcache_anon - further prune the cache
+ * @head: head of d_hash list of dentries to prune
+ *
+ * Prune the dentries that are anonymous
+ *
+ * parsing d_hash list does not hlist_for_each_rcu() as it
+ * done under dcache_lock.
+ *
+ */
+void shrink_dcache_anon(struct hlist_head *head)
+{
+	struct hlist_node *lp;
+	int found;
+	do {
+		found = 0;
+		spin_lock(&dcache_lock);
+		hlist_for_each(lp, head) {
+			struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
+			if (!list_empty(&this->d_lru)) {
+				dentry_stat.nr_unused--;
+				list_del_init(&this->d_lru);
+			}
+
+			/* 
+			 * move only zero ref count dentries to the end 
+			 * of the unused list for prune_dcache
+			 */
+			if (!atomic_read(&this->d_count)) {
+				list_add_tail(&this->d_lru, &dentry_unused);
+				dentry_stat.nr_unused++;
+				found++;
+			}
+		}
+		spin_unlock(&dcache_lock);
+		prune_dcache(found);
+	} while(found);
+}
+
+/*
+ * Scan `nr' dentries and return the number which remain.
+ *
+ * We need to avoid reentering the filesystem if the caller is performing a
+ * GFP_NOFS allocation attempt.  One example deadlock is:
+ *
+ * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
+ * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
+ * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
+ *
+ * In this case we return -1 to tell the caller that we baled.
+ */
+static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
+{
+	if (nr) {
+		if (!(gfp_mask & __GFP_FS))
+			return -1;
+		prune_dcache(nr);
+	}
+	return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+}
+
+/**
+ * d_alloc	-	allocate a dcache entry
+ * @parent: parent of entry to allocate
+ * @name: qstr of the name
+ *
+ * Allocates a dentry. It returns %NULL if there is insufficient memory
+ * available. On a success the dentry is returned. The name passed in is
+ * copied and the copy passed in may be reused after this call.
+ */
+ 
+struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+{
+	struct dentry *dentry;
+	char *dname;
+
+	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
+	if (!dentry)
+		return NULL;
+
+	if (name->len > DNAME_INLINE_LEN-1) {
+		dname = kmalloc(name->len + 1, GFP_KERNEL);
+		if (!dname) {
+			kmem_cache_free(dentry_cache, dentry); 
+			return NULL;
+		}
+	} else  {
+		dname = dentry->d_iname;
+	}	
+	dentry->d_name.name = dname;
+
+	dentry->d_name.len = name->len;
+	dentry->d_name.hash = name->hash;
+	memcpy(dname, name->name, name->len);
+	dname[name->len] = 0;
+
+	atomic_set(&dentry->d_count, 1);
+	dentry->d_flags = DCACHE_UNHASHED;
+	spin_lock_init(&dentry->d_lock);
+	dentry->d_inode = NULL;
+	dentry->d_parent = NULL;
+	dentry->d_sb = NULL;
+	dentry->d_op = NULL;
+	dentry->d_fsdata = NULL;
+	dentry->d_mounted = 0;
+	dentry->d_cookie = NULL;
+	INIT_HLIST_NODE(&dentry->d_hash);
+	INIT_LIST_HEAD(&dentry->d_lru);
+	INIT_LIST_HEAD(&dentry->d_subdirs);
+	INIT_LIST_HEAD(&dentry->d_alias);
+
+	if (parent) {
+		dentry->d_parent = dget(parent);
+		dentry->d_sb = parent->d_sb;
+	} else {
+		INIT_LIST_HEAD(&dentry->d_child);
+	}
+
+	spin_lock(&dcache_lock);
+	if (parent)
+		list_add(&dentry->d_child, &parent->d_subdirs);
+	dentry_stat.nr_dentry++;
+	spin_unlock(&dcache_lock);
+
+	return dentry;
+}
+
+struct dentry *d_alloc_name(struct dentry *parent, const char *name)
+{
+	struct qstr q;
+
+	q.name = name;
+	q.len = strlen(name);
+	q.hash = full_name_hash(q.name, q.len);
+	return d_alloc(parent, &q);
+}
+
+/**
+ * d_instantiate - fill in inode information for a dentry
+ * @entry: dentry to complete
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry.
+ *
+ * This turns negative dentries into productive full members
+ * of society.
+ *
+ * NOTE! This assumes that the inode count has been incremented
+ * (or otherwise set) by the caller to indicate that it is now
+ * in use by the dcache.
+ */
+ 
+void d_instantiate(struct dentry *entry, struct inode * inode)
+{
+	if (!list_empty(&entry->d_alias)) BUG();
+	spin_lock(&dcache_lock);
+	if (inode)
+		list_add(&entry->d_alias, &inode->i_dentry);
+	entry->d_inode = inode;
+	spin_unlock(&dcache_lock);
+	security_d_instantiate(entry, inode);
+}
+
+/**
+ * d_instantiate_unique - instantiate a non-aliased dentry
+ * @entry: dentry to instantiate
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry. On success, it returns NULL.
+ * If an unhashed alias of "entry" already exists, then we return the
+ * aliased dentry instead.
+ *
+ * Note that in order to avoid conflicts with rename() etc, the caller
+ * had better be holding the parent directory semaphore.
+ */
+struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
+{
+	struct dentry *alias;
+	int len = entry->d_name.len;
+	const char *name = entry->d_name.name;
+	unsigned int hash = entry->d_name.hash;
+
+	BUG_ON(!list_empty(&entry->d_alias));
+	spin_lock(&dcache_lock);
+	if (!inode)
+		goto do_negative;
+	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+		struct qstr *qstr = &alias->d_name;
+
+		if (qstr->hash != hash)
+			continue;
+		if (alias->d_parent != entry->d_parent)
+			continue;
+		if (qstr->len != len)
+			continue;
+		if (memcmp(qstr->name, name, len))
+			continue;
+		dget_locked(alias);
+		spin_unlock(&dcache_lock);
+		BUG_ON(!d_unhashed(alias));
+		return alias;
+	}
+	list_add(&entry->d_alias, &inode->i_dentry);
+do_negative:
+	entry->d_inode = inode;
+	spin_unlock(&dcache_lock);
+	security_d_instantiate(entry, inode);
+	return NULL;
+}
+EXPORT_SYMBOL(d_instantiate_unique);
+
+/**
+ * d_alloc_root - allocate root dentry
+ * @root_inode: inode to allocate the root for
+ *
+ * Allocate a root ("/") dentry for the inode given. The inode is
+ * instantiated and returned. %NULL is returned if there is insufficient
+ * memory or the inode passed is %NULL.
+ */
+ 
+struct dentry * d_alloc_root(struct inode * root_inode)
+{
+	struct dentry *res = NULL;
+
+	if (root_inode) {
+		static const struct qstr name = { .name = "/", .len = 1 };
+
+		res = d_alloc(NULL, &name);
+		if (res) {
+			res->d_sb = root_inode->i_sb;
+			res->d_parent = res;
+			d_instantiate(res, root_inode);
+		}
+	}
+	return res;
+}
+
+static inline struct hlist_head *d_hash(struct dentry *parent,
+					unsigned long hash)
+{
+	hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
+	hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+	return dentry_hashtable + (hash & D_HASHMASK);
+}
+
+/**
+ * d_alloc_anon - allocate an anonymous dentry
+ * @inode: inode to allocate the dentry for
+ *
+ * This is similar to d_alloc_root.  It is used by filesystems when
+ * creating a dentry for a given inode, often in the process of 
+ * mapping a filehandle to a dentry.  The returned dentry may be
+ * anonymous, or may have a full name (if the inode was already
+ * in the cache).  The file system may need to make further
+ * efforts to connect this dentry into the dcache properly.
+ *
+ * When called on a directory inode, we must ensure that
+ * the inode only ever has one dentry.  If a dentry is
+ * found, that is returned instead of allocating a new one.
+ *
+ * On successful return, the reference to the inode has been transferred
+ * to the dentry.  If %NULL is returned (indicating kmalloc failure),
+ * the reference on the inode has not been released.
+ */
+
+struct dentry * d_alloc_anon(struct inode *inode)
+{
+	static const struct qstr anonstring = { .name = "" };
+	struct dentry *tmp;
+	struct dentry *res;
+
+	if ((res = d_find_alias(inode))) {
+		iput(inode);
+		return res;
+	}
+
+	tmp = d_alloc(NULL, &anonstring);
+	if (!tmp)
+		return NULL;
+
+	tmp->d_parent = tmp; /* make sure dput doesn't croak */
+	
+	spin_lock(&dcache_lock);
+	res = __d_find_alias(inode, 0);
+	if (!res) {
+		/* attach a disconnected dentry */
+		res = tmp;
+		tmp = NULL;
+		spin_lock(&res->d_lock);
+		res->d_sb = inode->i_sb;
+		res->d_parent = res;
+		res->d_inode = inode;
+		res->d_flags |= DCACHE_DISCONNECTED;
+		res->d_flags &= ~DCACHE_UNHASHED;
+		list_add(&res->d_alias, &inode->i_dentry);
+		hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
+		spin_unlock(&res->d_lock);
+
+		inode = NULL; /* don't drop reference */
+	}
+	spin_unlock(&dcache_lock);
+
+	if (inode)
+		iput(inode);
+	if (tmp)
+		dput(tmp);
+	return res;
+}
+
+
+/**
+ * d_splice_alias - splice a disconnected dentry into the tree if one exists
+ * @inode:  the inode which may have a disconnected dentry
+ * @dentry: a negative dentry which we want to point to the inode.
+ *
+ * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
+ * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
+ * and return it, else simply d_add the inode to the dentry and return NULL.
+ *
+ * This is needed in the lookup routine of any filesystem that is exportable
+ * (via knfsd) so that we can build dcache paths to directories effectively.
+ *
+ * If a dentry was found and moved, then it is returned.  Otherwise NULL
+ * is returned.  This matches the expected return value of ->lookup.
+ *
+ */
+struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+{
+	struct dentry *new = NULL;
+
+	if (inode) {
+		spin_lock(&dcache_lock);
+		new = __d_find_alias(inode, 1);
+		if (new) {
+			BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
+			spin_unlock(&dcache_lock);
+			security_d_instantiate(new, inode);
+			d_rehash(dentry);
+			d_move(new, dentry);
+			iput(inode);
+		} else {
+			/* d_instantiate takes dcache_lock, so we do it by hand */
+			list_add(&dentry->d_alias, &inode->i_dentry);
+			dentry->d_inode = inode;
+			spin_unlock(&dcache_lock);
+			security_d_instantiate(dentry, inode);
+			d_rehash(dentry);
+		}
+	} else
+		d_add(dentry, inode);
+	return new;
+}
+
+
+/**
+ * d_lookup - search for a dentry
+ * @parent: parent dentry
+ * @name: qstr of name we wish to find
+ *
+ * Searches the children of the parent dentry for the name in question. If
+ * the dentry is found its reference count is incremented and the dentry
+ * is returned. The caller must use d_put to free the entry when it has
+ * finished using it. %NULL is returned on failure.
+ *
+ * __d_lookup is dcache_lock free. The hash list is protected using RCU.
+ * Memory barriers are used while updating and doing lockless traversal. 
+ * To avoid races with d_move while rename is happening, d_lock is used.
+ *
+ * Overflows in memcmp(), while d_move, are avoided by keeping the length
+ * and name pointer in one structure pointed by d_qstr.
+ *
+ * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
+ * lookup is going on.
+ *
+ * dentry_unused list is not updated even if lookup finds the required dentry
+ * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
+ * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
+ * acquisition.
+ *
+ * d_lookup() is protected against the concurrent renames in some unrelated
+ * directory using the seqlockt_t rename_lock.
+ */
+
+struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
+{
+	struct dentry * dentry = NULL;
+	unsigned long seq;
+
+        do {
+                seq = read_seqbegin(&rename_lock);
+                dentry = __d_lookup(parent, name);
+                if (dentry)
+			break;
+	} while (read_seqretry(&rename_lock, seq));
+	return dentry;
+}
+
+struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
+{
+	unsigned int len = name->len;
+	unsigned int hash = name->hash;
+	const unsigned char *str = name->name;
+	struct hlist_head *head = d_hash(parent,hash);
+	struct dentry *found = NULL;
+	struct hlist_node *node;
+
+	rcu_read_lock();
+	
+	hlist_for_each_rcu(node, head) {
+		struct dentry *dentry; 
+		struct qstr *qstr;
+
+		dentry = hlist_entry(node, struct dentry, d_hash);
+
+		if (dentry->d_name.hash != hash)
+			continue;
+		if (dentry->d_parent != parent)
+			continue;
+
+		spin_lock(&dentry->d_lock);
+
+		/*
+		 * Recheck the dentry after taking the lock - d_move may have
+		 * changed things.  Don't bother checking the hash because we're
+		 * about to compare the whole name anyway.
+		 */
+		if (dentry->d_parent != parent)
+			goto next;
+
+		/*
+		 * It is safe to compare names since d_move() cannot
+		 * change the qstr (protected by d_lock).
+		 */
+		qstr = &dentry->d_name;
+		if (parent->d_op && parent->d_op->d_compare) {
+			if (parent->d_op->d_compare(parent, qstr, name))
+				goto next;
+		} else {
+			if (qstr->len != len)
+				goto next;
+			if (memcmp(qstr->name, str, len))
+				goto next;
+		}
+
+		if (!d_unhashed(dentry)) {
+			atomic_inc(&dentry->d_count);
+			found = dentry;
+		}
+		spin_unlock(&dentry->d_lock);
+		break;
+next:
+		spin_unlock(&dentry->d_lock);
+ 	}
+ 	rcu_read_unlock();
+
+ 	return found;
+}
+
+/**
+ * d_validate - verify dentry provided from insecure source
+ * @dentry: The dentry alleged to be valid child of @dparent
+ * @dparent: The parent dentry (known to be valid)
+ * @hash: Hash of the dentry
+ * @len: Length of the name
+ *
+ * An insecure source has sent us a dentry, here we verify it and dget() it.
+ * This is used by ncpfs in its readdir implementation.
+ * Zero is returned in the dentry is invalid.
+ */
+ 
+int d_validate(struct dentry *dentry, struct dentry *dparent)
+{
+	struct hlist_head *base;
+	struct hlist_node *lhp;
+
+	/* Check whether the ptr might be valid at all.. */
+	if (!kmem_ptr_validate(dentry_cache, dentry))
+		goto out;
+
+	if (dentry->d_parent != dparent)
+		goto out;
+
+	spin_lock(&dcache_lock);
+	base = d_hash(dparent, dentry->d_name.hash);
+	hlist_for_each(lhp,base) { 
+		/* hlist_for_each_rcu() not required for d_hash list
+		 * as it is parsed under dcache_lock
+		 */
+		if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
+			__dget_locked(dentry);
+			spin_unlock(&dcache_lock);
+			return 1;
+		}
+	}
+	spin_unlock(&dcache_lock);
+out:
+	return 0;
+}
+
+/*
+ * When a file is deleted, we have two options:
+ * - turn this dentry into a negative dentry
+ * - unhash this dentry and free it.
+ *
+ * Usually, we want to just turn this into
+ * a negative dentry, but if anybody else is
+ * currently using the dentry or the inode
+ * we can't do that and we fall back on removing
+ * it from the hash queues and waiting for
+ * it to be deleted later when it has no users
+ */
+ 
+/**
+ * d_delete - delete a dentry
+ * @dentry: The dentry to delete
+ *
+ * Turn the dentry into a negative dentry if possible, otherwise
+ * remove it from the hash queues so it can be deleted later
+ */
+ 
+void d_delete(struct dentry * dentry)
+{
+	/*
+	 * Are we the only user?
+	 */
+	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
+	if (atomic_read(&dentry->d_count) == 1) {
+		dentry_iput(dentry);
+		return;
+	}
+
+	if (!d_unhashed(dentry))
+		__d_drop(dentry);
+
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+}
+
+static void __d_rehash(struct dentry * entry, struct hlist_head *list)
+{
+
+ 	entry->d_flags &= ~DCACHE_UNHASHED;
+ 	hlist_add_head_rcu(&entry->d_hash, list);
+}
+
+/**
+ * d_rehash	- add an entry back to the hash
+ * @entry: dentry to add to the hash
+ *
+ * Adds a dentry to the hash according to its name.
+ */
+ 
+void d_rehash(struct dentry * entry)
+{
+	struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
+
+	spin_lock(&dcache_lock);
+	spin_lock(&entry->d_lock);
+	__d_rehash(entry, list);
+	spin_unlock(&entry->d_lock);
+	spin_unlock(&dcache_lock);
+}
+
+#define do_switch(x,y) do { \
+	__typeof__ (x) __tmp = x; \
+	x = y; y = __tmp; } while (0)
+
+/*
+ * When switching names, the actual string doesn't strictly have to
+ * be preserved in the target - because we're dropping the target
+ * anyway. As such, we can just do a simple memcpy() to copy over
+ * the new name before we switch.
+ *
+ * Note that we have to be a lot more careful about getting the hash
+ * switched - we have to switch the hash value properly even if it
+ * then no longer matches the actual (corrupted) string of the target.
+ * The hash value has to match the hash queue that the dentry is on..
+ */
+static void switch_names(struct dentry *dentry, struct dentry *target)
+{
+	if (dname_external(target)) {
+		if (dname_external(dentry)) {
+			/*
+			 * Both external: swap the pointers
+			 */
+			do_switch(target->d_name.name, dentry->d_name.name);
+		} else {
+			/*
+			 * dentry:internal, target:external.  Steal target's
+			 * storage and make target internal.
+			 */
+			dentry->d_name.name = target->d_name.name;
+			target->d_name.name = target->d_iname;
+		}
+	} else {
+		if (dname_external(dentry)) {
+			/*
+			 * dentry:external, target:internal.  Give dentry's
+			 * storage to target and make dentry internal
+			 */
+			memcpy(dentry->d_iname, target->d_name.name,
+					target->d_name.len + 1);
+			target->d_name.name = dentry->d_name.name;
+			dentry->d_name.name = dentry->d_iname;
+		} else {
+			/*
+			 * Both are internal.  Just copy target to dentry
+			 */
+			memcpy(dentry->d_iname, target->d_name.name,
+					target->d_name.len + 1);
+		}
+	}
+}
+
+/*
+ * We cannibalize "target" when moving dentry on top of it,
+ * because it's going to be thrown away anyway. We could be more
+ * polite about it, though.
+ *
+ * This forceful removal will result in ugly /proc output if
+ * somebody holds a file open that got deleted due to a rename.
+ * We could be nicer about the deleted file, and let it show
+ * up under the name it got deleted rather than the name that
+ * deleted it.
+ */
+ 
+/**
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+
+void d_move(struct dentry * dentry, struct dentry * target)
+{
+	struct hlist_head *list;
+
+	if (!dentry->d_inode)
+		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
+
+	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
+	/*
+	 * XXXX: do we really need to take target->d_lock?
+	 */
+	if (target < dentry) {
+		spin_lock(&target->d_lock);
+		spin_lock(&dentry->d_lock);
+	} else {
+		spin_lock(&dentry->d_lock);
+		spin_lock(&target->d_lock);
+	}
+
+	/* Move the dentry to the target hash queue, if on different bucket */
+	if (dentry->d_flags & DCACHE_UNHASHED)
+		goto already_unhashed;
+
+	hlist_del_rcu(&dentry->d_hash);
+
+already_unhashed:
+	list = d_hash(target->d_parent, target->d_name.hash);
+	__d_rehash(dentry, list);
+
+	/* Unhash the target: dput() will then get rid of it */
+	__d_drop(target);
+
+	list_del(&dentry->d_child);
+	list_del(&target->d_child);
+
+	/* Switch the names.. */
+	switch_names(dentry, target);
+	do_switch(dentry->d_name.len, target->d_name.len);
+	do_switch(dentry->d_name.hash, target->d_name.hash);
+
+	/* ... and switch the parents */
+	if (IS_ROOT(dentry)) {
+		dentry->d_parent = target->d_parent;
+		target->d_parent = target;
+		INIT_LIST_HEAD(&target->d_child);
+	} else {
+		do_switch(dentry->d_parent, target->d_parent);
+
+		/* And add them back to the (new) parent lists */
+		list_add(&target->d_child, &target->d_parent->d_subdirs);
+	}
+
+	list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
+	spin_unlock(&target->d_lock);
+	spin_unlock(&dentry->d_lock);
+	write_sequnlock(&rename_lock);
+	spin_unlock(&dcache_lock);
+}
+
+/**
+ * d_path - return the path of a dentry
+ * @dentry: dentry to report
+ * @vfsmnt: vfsmnt to which the dentry belongs
+ * @root: root dentry
+ * @rootmnt: vfsmnt to which the root dentry belongs
+ * @buffer: buffer to return value in
+ * @buflen: buffer length
+ *
+ * Convert a dentry into an ASCII path name. If the entry has been deleted
+ * the string " (deleted)" is appended. Note that this is ambiguous.
+ *
+ * Returns the buffer or an error code if the path was too long.
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ */
+static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+			struct dentry *root, struct vfsmount *rootmnt,
+			char *buffer, int buflen)
+{
+	char * end = buffer+buflen;
+	char * retval;
+	int namelen;
+
+	*--end = '\0';
+	buflen--;
+	if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
+		buflen -= 10;
+		end -= 10;
+		if (buflen < 0)
+			goto Elong;
+		memcpy(end, " (deleted)", 10);
+	}
+
+	if (buflen < 1)
+		goto Elong;
+	/* Get '/' right */
+	retval = end-1;
+	*retval = '/';
+
+	for (;;) {
+		struct dentry * parent;
+
+		if (dentry == root && vfsmnt == rootmnt)
+			break;
+		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+			/* Global root? */
+			spin_lock(&vfsmount_lock);
+			if (vfsmnt->mnt_parent == vfsmnt) {
+				spin_unlock(&vfsmount_lock);
+				goto global_root;
+			}
+			dentry = vfsmnt->mnt_mountpoint;
+			vfsmnt = vfsmnt->mnt_parent;
+			spin_unlock(&vfsmount_lock);
+			continue;
+		}
+		parent = dentry->d_parent;
+		prefetch(parent);
+		namelen = dentry->d_name.len;
+		buflen -= namelen + 1;
+		if (buflen < 0)
+			goto Elong;
+		end -= namelen;
+		memcpy(end, dentry->d_name.name, namelen);
+		*--end = '/';
+		retval = end;
+		dentry = parent;
+	}
+
+	return retval;
+
+global_root:
+	namelen = dentry->d_name.len;
+	buflen -= namelen;
+	if (buflen < 0)
+		goto Elong;
+	retval -= namelen-1;	/* hit the slash */
+	memcpy(retval, dentry->d_name.name, namelen);
+	return retval;
+Elong:
+	return ERR_PTR(-ENAMETOOLONG);
+}
+
+/* write full pathname into buffer and return start of pathname */
+char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+				char *buf, int buflen)
+{
+	char *res;
+	struct vfsmount *rootmnt;
+	struct dentry *root;
+
+	read_lock(&current->fs->lock);
+	rootmnt = mntget(current->fs->rootmnt);
+	root = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+	spin_lock(&dcache_lock);
+	res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+	spin_unlock(&dcache_lock);
+	dput(root);
+	mntput(rootmnt);
+	return res;
+}
+
+/*
+ * NOTE! The user-level library version returns a
+ * character pointer. The kernel system call just
+ * returns the length of the buffer filled (which
+ * includes the ending '\0' character), or a negative
+ * error value. So libc would do something like
+ *
+ *	char *getcwd(char * buf, size_t size)
+ *	{
+ *		int retval;
+ *
+ *		retval = sys_getcwd(buf, size);
+ *		if (retval >= 0)
+ *			return buf;
+ *		errno = -retval;
+ *		return NULL;
+ *	}
+ */
+asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
+{
+	int error;
+	struct vfsmount *pwdmnt, *rootmnt;
+	struct dentry *pwd, *root;
+	char *page = (char *) __get_free_page(GFP_USER);
+
+	if (!page)
+		return -ENOMEM;
+
+	read_lock(&current->fs->lock);
+	pwdmnt = mntget(current->fs->pwdmnt);
+	pwd = dget(current->fs->pwd);
+	rootmnt = mntget(current->fs->rootmnt);
+	root = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+
+	error = -ENOENT;
+	/* Has the current directory has been unlinked? */
+	spin_lock(&dcache_lock);
+	if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
+		unsigned long len;
+		char * cwd;
+
+		cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
+		spin_unlock(&dcache_lock);
+
+		error = PTR_ERR(cwd);
+		if (IS_ERR(cwd))
+			goto out;
+
+		error = -ERANGE;
+		len = PAGE_SIZE + page - cwd;
+		if (len <= size) {
+			error = len;
+			if (copy_to_user(buf, cwd, len))
+				error = -EFAULT;
+		}
+	} else
+		spin_unlock(&dcache_lock);
+
+out:
+	dput(pwd);
+	mntput(pwdmnt);
+	dput(root);
+	mntput(rootmnt);
+	free_page((unsigned long) page);
+	return error;
+}
+
+/*
+ * Test whether new_dentry is a subdirectory of old_dentry.
+ *
+ * Trivially implemented using the dcache structure
+ */
+
+/**
+ * is_subdir - is new dentry a subdirectory of old_dentry
+ * @new_dentry: new dentry
+ * @old_dentry: old dentry
+ *
+ * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
+ * Returns 0 otherwise.
+ * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
+ */
+  
+int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
+{
+	int result;
+	struct dentry * saved = new_dentry;
+	unsigned long seq;
+
+	/* need rcu_readlock to protect against the d_parent trashing due to
+	 * d_move
+	 */
+	rcu_read_lock();
+        do {
+		/* for restarting inner loop in case of seq retry */
+		new_dentry = saved;
+		result = 0;
+		seq = read_seqbegin(&rename_lock);
+		for (;;) {
+			if (new_dentry != old_dentry) {
+				struct dentry * parent = new_dentry->d_parent;
+				if (parent == new_dentry)
+					break;
+				new_dentry = parent;
+				continue;
+			}
+			result = 1;
+			break;
+		}
+	} while (read_seqretry(&rename_lock, seq));
+	rcu_read_unlock();
+
+	return result;
+}
+
+void d_genocide(struct dentry *root)
+{
+	struct dentry *this_parent = root;
+	struct list_head *next;
+
+	spin_lock(&dcache_lock);
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	while (next != &this_parent->d_subdirs) {
+		struct list_head *tmp = next;
+		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+		next = tmp->next;
+		if (d_unhashed(dentry)||!dentry->d_inode)
+			continue;
+		if (!list_empty(&dentry->d_subdirs)) {
+			this_parent = dentry;
+			goto repeat;
+		}
+		atomic_dec(&dentry->d_count);
+	}
+	if (this_parent != root) {
+		next = this_parent->d_child.next; 
+		atomic_dec(&this_parent->d_count);
+		this_parent = this_parent->d_parent;
+		goto resume;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/**
+ * find_inode_number - check for dentry with name
+ * @dir: directory to check
+ * @name: Name to find.
+ *
+ * Check whether a dentry already exists for the given name,
+ * and return the inode number if it has an inode. Otherwise
+ * 0 is returned.
+ *
+ * This routine is used to post-process directory listings for
+ * filesystems using synthetic inode numbers, and is necessary
+ * to keep getcwd() working.
+ */
+ 
+ino_t find_inode_number(struct dentry *dir, struct qstr *name)
+{
+	struct dentry * dentry;
+	ino_t ino = 0;
+
+	/*
+	 * Check for a fs-specific hash function. Note that we must
+	 * calculate the standard hash first, as the d_op->d_hash()
+	 * routine may choose to leave the hash value unchanged.
+	 */
+	name->hash = full_name_hash(name->name, name->len);
+	if (dir->d_op && dir->d_op->d_hash)
+	{
+		if (dir->d_op->d_hash(dir, name) != 0)
+			goto out;
+	}
+
+	dentry = d_lookup(dir, name);
+	if (dentry)
+	{
+		if (dentry->d_inode)
+			ino = dentry->d_inode->i_ino;
+		dput(dentry);
+	}
+out:
+	return ino;
+}
+
+static __initdata unsigned long dhash_entries;
+static int __init set_dhash_entries(char *str)
+{
+	if (!str)
+		return 0;
+	dhash_entries = simple_strtoul(str, &str, 0);
+	return 1;
+}
+__setup("dhash_entries=", set_dhash_entries);
+
+static void __init dcache_init_early(void)
+{
+	int loop;
+
+	/* If hashes are distributed across NUMA nodes, defer
+	 * hash allocation until vmalloc space is available.
+	 */
+	if (hashdist)
+		return;
+
+	dentry_hashtable =
+		alloc_large_system_hash("Dentry cache",
+					sizeof(struct hlist_head),
+					dhash_entries,
+					13,
+					HASH_EARLY,
+					&d_hash_shift,
+					&d_hash_mask,
+					0);
+
+	for (loop = 0; loop < (1 << d_hash_shift); loop++)
+		INIT_HLIST_HEAD(&dentry_hashtable[loop]);
+}
+
+static void __init dcache_init(unsigned long mempages)
+{
+	int loop;
+
+	/* 
+	 * A constructor could be added for stable state like the lists,
+	 * but it is probably not worth it because of the cache nature
+	 * of the dcache. 
+	 */
+	dentry_cache = kmem_cache_create("dentry_cache",
+					 sizeof(struct dentry),
+					 0,
+					 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
+					 NULL, NULL);
+	
+	set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+
+	/* Hash may have been set up in dcache_init_early */
+	if (!hashdist)
+		return;
+
+	dentry_hashtable =
+		alloc_large_system_hash("Dentry cache",
+					sizeof(struct hlist_head),
+					dhash_entries,
+					13,
+					0,
+					&d_hash_shift,
+					&d_hash_mask,
+					0);
+
+	for (loop = 0; loop < (1 << d_hash_shift); loop++)
+		INIT_HLIST_HEAD(&dentry_hashtable[loop]);
+}
+
+/* SLAB cache for __getname() consumers */
+kmem_cache_t *names_cachep;
+
+/* SLAB cache for file structures */
+kmem_cache_t *filp_cachep;
+
+EXPORT_SYMBOL(d_genocide);
+
+extern void bdev_cache_init(void);
+extern void chrdev_init(void);
+
+void __init vfs_caches_init_early(void)
+{
+	dcache_init_early();
+	inode_init_early();
+}
+
+void __init vfs_caches_init(unsigned long mempages)
+{
+	unsigned long reserve;
+
+	/* Base hash sizes on available memory, with a reserve equal to
+           150% of current kernel size */
+
+	reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
+	mempages -= reserve;
+
+	names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+
+	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
+
+	dcache_init(mempages);
+	inode_init(mempages);
+	files_init(mempages);
+	mnt_init(mempages);
+	bdev_cache_init();
+	chrdev_init();
+}
+
+EXPORT_SYMBOL(d_alloc);
+EXPORT_SYMBOL(d_alloc_anon);
+EXPORT_SYMBOL(d_alloc_root);
+EXPORT_SYMBOL(d_delete);
+EXPORT_SYMBOL(d_find_alias);
+EXPORT_SYMBOL(d_instantiate);
+EXPORT_SYMBOL(d_invalidate);
+EXPORT_SYMBOL(d_lookup);
+EXPORT_SYMBOL(d_move);
+EXPORT_SYMBOL(d_path);
+EXPORT_SYMBOL(d_prune_aliases);
+EXPORT_SYMBOL(d_rehash);
+EXPORT_SYMBOL(d_splice_alias);
+EXPORT_SYMBOL(d_validate);
+EXPORT_SYMBOL(dget_locked);
+EXPORT_SYMBOL(dput);
+EXPORT_SYMBOL(find_inode_number);
+EXPORT_SYMBOL(have_submounts);
+EXPORT_SYMBOL(names_cachep);
+EXPORT_SYMBOL(shrink_dcache_parent);
+EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/dcookies.c b/fs/dcookies.c
new file mode 100644
index 0000000..581aac9
--- /dev/null
+++ b/fs/dcookies.c
@@ -0,0 +1,330 @@
+/*
+ * dcookies.c
+ *
+ * Copyright 2002 John Levon <levon@movementarian.org>
+ *
+ * Persistent cookie-path mappings. These are used by
+ * profilers to convert a per-task EIP value into something
+ * non-transitory that can be processed at a later date.
+ * This is done by locking the dentry/vfsmnt pair in the
+ * kernel until released by the tasks needing the persistent
+ * objects. The tag is simply an unsigned long that refers
+ * to the pair and can be looked up from userspace.
+ */
+
+#include <linux/config.h>
+#include <linux/syscalls.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mount.h>
+#include <linux/dcache.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/dcookies.h>
+#include <asm/uaccess.h>
+
+/* The dcookies are allocated from a kmem_cache and
+ * hashed onto a small number of lists. None of the
+ * code here is particularly performance critical
+ */
+struct dcookie_struct {
+	struct dentry * dentry;
+	struct vfsmount * vfsmnt;
+	struct list_head hash_list;
+};
+
+static LIST_HEAD(dcookie_users);
+static DECLARE_MUTEX(dcookie_sem);
+static kmem_cache_t * dcookie_cache;
+static struct list_head * dcookie_hashtable;
+static size_t hash_size;
+
+static inline int is_live(void)
+{
+	return !(list_empty(&dcookie_users));
+}
+
+
+/* The dentry is locked, its address will do for the cookie */
+static inline unsigned long dcookie_value(struct dcookie_struct * dcs)
+{
+	return (unsigned long)dcs->dentry;
+}
+
+
+static size_t dcookie_hash(unsigned long dcookie)
+{
+	return (dcookie >> L1_CACHE_SHIFT) & (hash_size - 1);
+}
+
+
+static struct dcookie_struct * find_dcookie(unsigned long dcookie)
+{
+	struct dcookie_struct *found = NULL;
+	struct dcookie_struct * dcs;
+	struct list_head * pos;
+	struct list_head * list;
+
+	list = dcookie_hashtable + dcookie_hash(dcookie);
+
+	list_for_each(pos, list) {
+		dcs = list_entry(pos, struct dcookie_struct, hash_list);
+		if (dcookie_value(dcs) == dcookie) {
+			found = dcs;
+			break;
+		}
+	}
+
+	return found;
+}
+
+
+static void hash_dcookie(struct dcookie_struct * dcs)
+{
+	struct list_head * list = dcookie_hashtable + dcookie_hash(dcookie_value(dcs));
+	list_add(&dcs->hash_list, list);
+}
+
+
+static struct dcookie_struct * alloc_dcookie(struct dentry * dentry,
+	struct vfsmount * vfsmnt)
+{
+	struct dcookie_struct * dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL);
+	if (!dcs)
+		return NULL;
+
+	atomic_inc(&dentry->d_count);
+	atomic_inc(&vfsmnt->mnt_count);
+	dentry->d_cookie = dcs;
+
+	dcs->dentry = dentry;
+	dcs->vfsmnt = vfsmnt;
+	hash_dcookie(dcs);
+
+	return dcs;
+}
+
+
+/* This is the main kernel-side routine that retrieves the cookie
+ * value for a dentry/vfsmnt pair.
+ */
+int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
+	unsigned long * cookie)
+{
+	int err = 0;
+	struct dcookie_struct * dcs;
+
+	down(&dcookie_sem);
+
+	if (!is_live()) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	dcs = dentry->d_cookie;
+
+	if (!dcs)
+		dcs = alloc_dcookie(dentry, vfsmnt);
+
+	if (!dcs) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	*cookie = dcookie_value(dcs);
+
+out:
+	up(&dcookie_sem);
+	return err;
+}
+
+
+/* And here is where the userspace process can look up the cookie value
+ * to retrieve the path.
+ */
+asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
+{
+	unsigned long cookie = (unsigned long)cookie64;
+	int err = -EINVAL;
+	char * kbuf;
+	char * path;
+	size_t pathlen;
+	struct dcookie_struct * dcs;
+
+	/* we could leak path information to users
+	 * without dir read permission without this
+	 */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	down(&dcookie_sem);
+
+	if (!is_live()) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!(dcs = find_dcookie(cookie)))
+		goto out;
+
+	err = -ENOMEM;
+	kbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!kbuf)
+		goto out;
+
+	/* FIXME: (deleted) ? */
+	path = d_path(dcs->dentry, dcs->vfsmnt, kbuf, PAGE_SIZE);
+
+	if (IS_ERR(path)) {
+		err = PTR_ERR(path);
+		goto out_free;
+	}
+
+	err = -ERANGE;
+ 
+	pathlen = kbuf + PAGE_SIZE - path;
+	if (pathlen <= len) {
+		err = pathlen;
+		if (copy_to_user(buf, path, pathlen))
+			err = -EFAULT;
+	}
+
+out_free:
+	kfree(kbuf);
+out:
+	up(&dcookie_sem);
+	return err;
+}
+
+
+static int dcookie_init(void)
+{
+	struct list_head * d;
+	unsigned int i, hash_bits;
+	int err = -ENOMEM;
+
+	dcookie_cache = kmem_cache_create("dcookie_cache",
+		sizeof(struct dcookie_struct),
+		0, 0, NULL, NULL);
+
+	if (!dcookie_cache)
+		goto out;
+
+	dcookie_hashtable = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!dcookie_hashtable)
+		goto out_kmem;
+
+	err = 0;
+
+	/*
+	 * Find the power-of-two list-heads that can fit into the allocation..
+	 * We don't guarantee that "sizeof(struct list_head)" is necessarily
+	 * a power-of-two.
+	 */
+	hash_size = PAGE_SIZE / sizeof(struct list_head);
+	hash_bits = 0;
+	do {
+		hash_bits++;
+	} while ((hash_size >> hash_bits) != 0);
+	hash_bits--;
+
+	/*
+	 * Re-calculate the actual number of entries and the mask
+	 * from the number of bits we can fit.
+	 */
+	hash_size = 1UL << hash_bits;
+
+	/* And initialize the newly allocated array */
+	d = dcookie_hashtable;
+	i = hash_size;
+	do {
+		INIT_LIST_HEAD(d);
+		d++;
+		i--;
+	} while (i);
+
+out:
+	return err;
+out_kmem:
+	kmem_cache_destroy(dcookie_cache);
+	goto out;
+}
+
+
+static void free_dcookie(struct dcookie_struct * dcs)
+{
+	dcs->dentry->d_cookie = NULL;
+	dput(dcs->dentry);
+	mntput(dcs->vfsmnt);
+	kmem_cache_free(dcookie_cache, dcs);
+}
+
+
+static void dcookie_exit(void)
+{
+	struct list_head * list;
+	struct list_head * pos;
+	struct list_head * pos2;
+	struct dcookie_struct * dcs;
+	size_t i;
+
+	for (i = 0; i < hash_size; ++i) {
+		list = dcookie_hashtable + i;
+		list_for_each_safe(pos, pos2, list) {
+			dcs = list_entry(pos, struct dcookie_struct, hash_list);
+			list_del(&dcs->hash_list);
+			free_dcookie(dcs);
+		}
+	}
+
+	kfree(dcookie_hashtable);
+	kmem_cache_destroy(dcookie_cache);
+}
+
+
+struct dcookie_user {
+	struct list_head next;
+};
+ 
+struct dcookie_user * dcookie_register(void)
+{
+	struct dcookie_user * user;
+
+	down(&dcookie_sem);
+
+	user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
+	if (!user)
+		goto out;
+
+	if (!is_live() && dcookie_init())
+		goto out_free;
+
+	list_add(&user->next, &dcookie_users);
+
+out:
+	up(&dcookie_sem);
+	return user;
+out_free:
+	kfree(user);
+	user = NULL;
+	goto out;
+}
+
+
+void dcookie_unregister(struct dcookie_user * user)
+{
+	down(&dcookie_sem);
+
+	list_del(&user->next);
+	kfree(user);
+
+	if (!is_live())
+		dcookie_exit();
+
+	up(&dcookie_sem);
+}
+
+EXPORT_SYMBOL_GPL(dcookie_register);
+EXPORT_SYMBOL_GPL(dcookie_unregister);
+EXPORT_SYMBOL_GPL(get_dcookie);
diff --git a/fs/debugfs/Makefile b/fs/debugfs/Makefile
new file mode 100644
index 0000000..840c456
--- /dev/null
+++ b/fs/debugfs/Makefile
@@ -0,0 +1,4 @@
+debugfs-objs	:= inode.o file.o
+
+obj-$(CONFIG_DEBUG_FS)	+= debugfs.o
+
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
new file mode 100644
index 0000000..548556f
--- /dev/null
+++ b/fs/debugfs/file.c
@@ -0,0 +1,262 @@
+/*
+ *  file.c - part of debugfs, a tiny little debug file system
+ *
+ *  Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *  Copyright (C) 2004 IBM Inc.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ *
+ *  debugfs is for people to use instead of /proc or /sys.
+ *  See Documentation/DocBook/kernel-api for more details.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+
+static ssize_t default_read_file(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+static ssize_t default_write_file(struct file *file, const char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	return count;
+}
+
+static int default_open(struct inode *inode, struct file *file)
+{
+	if (inode->u.generic_ip)
+		file->private_data = inode->u.generic_ip;
+
+	return 0;
+}
+
+struct file_operations debugfs_file_operations = {
+	.read =		default_read_file,
+	.write =	default_write_file,
+	.open =		default_open,
+};
+
+#define simple_type(type, format, temptype, strtolfn)				\
+static ssize_t read_file_##type(struct file *file, char __user *user_buf,	\
+				size_t count, loff_t *ppos)			\
+{										\
+	char buf[32];								\
+	type *val = file->private_data;						\
+										\
+	snprintf(buf, sizeof(buf), format "\n", *val);				\
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));\
+}										\
+static ssize_t write_file_##type(struct file *file, const char __user *user_buf,\
+				 size_t count, loff_t *ppos)			\
+{										\
+	char *endp;								\
+	char buf[32];								\
+	int buf_size;								\
+	type *val = file->private_data;						\
+	temptype tmp;								\
+										\
+	memset(buf, 0x00, sizeof(buf));						\
+	buf_size = min(count, (sizeof(buf)-1));					\
+	if (copy_from_user(buf, user_buf, buf_size))				\
+		return -EFAULT;							\
+										\
+	tmp = strtolfn(buf, &endp, 0);						\
+	if ((endp == buf) || ((type)tmp != tmp))				\
+		return -EINVAL;							\
+	*val = tmp;								\
+	return count;								\
+}										\
+static struct file_operations fops_##type = {					\
+	.read =		read_file_##type,					\
+	.write =	write_file_##type,					\
+	.open =		default_open,						\
+};
+simple_type(u8, "%c", unsigned long, simple_strtoul);
+simple_type(u16, "%hi", unsigned long, simple_strtoul);
+simple_type(u32, "%i", unsigned long, simple_strtoul);
+
+/**
+ * debugfs_create_u8 - create a file in the debugfs filesystem that is used to read and write a unsigned 8 bit value.
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ *         from.
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value.  If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+				 struct dentry *parent, u8 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_u8);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u8);
+
+/**
+ * debugfs_create_u16 - create a file in the debugfs filesystem that is used to read and write a unsigned 8 bit value.
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ *         from.
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value.  If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+				  struct dentry *parent, u16 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_u16);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u16);
+
+/**
+ * debugfs_create_u32 - create a file in the debugfs filesystem that is used to read and write a unsigned 8 bit value.
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ *         from.
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value.  If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+				 struct dentry *parent, u32 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_u32);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u32);
+
+static ssize_t read_file_bool(struct file *file, char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	char buf[3];
+	u32 *val = file->private_data;
+	
+	if (*val)
+		buf[0] = 'Y';
+	else
+		buf[0] = 'N';
+	buf[1] = '\n';
+	buf[2] = 0x00;
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool(struct file *file, const char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int buf_size;
+	u32 *val = file->private_data;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	switch (buf[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		*val = 1;
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		*val = 0;
+		break;
+	}
+	
+	return count;
+}
+
+static struct file_operations fops_bool = {
+	.read =		read_file_bool,
+	.write =	write_file_bool,
+	.open =		default_open,
+};
+
+/**
+ * debugfs_create_bool - create a file in the debugfs filesystem that is used to read and write a boolean value.
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ *         from.
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value.  If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+				   struct dentry *parent, u32 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_bool);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_bool);
+
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
new file mode 100644
index 0000000..b529786
--- /dev/null
+++ b/fs/debugfs/inode.c
@@ -0,0 +1,328 @@
+/*
+ *  file.c - part of debugfs, a tiny little debug file system
+ *
+ *  Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *  Copyright (C) 2004 IBM Inc.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ *
+ *  debugfs is for people to use instead of /proc or /sys.
+ *  See Documentation/DocBook/kernel-api for more details.
+ *
+ */
+
+/* uncomment to get debug messages from the debug filesystem, ah the irony. */
+/* #define DEBUG */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+#define DEBUGFS_MAGIC	0x64626720
+
+/* declared over in file.c */
+extern struct file_operations debugfs_file_operations;
+
+static struct vfsmount *debugfs_mount;
+static int debugfs_mount_count;
+
+static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
+{
+	struct inode *inode = new_inode(sb);
+
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = 0;
+		inode->i_gid = 0;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		switch (mode & S_IFMT) {
+		default:
+			init_special_inode(inode, mode, dev);
+			break;
+		case S_IFREG:
+			inode->i_fop = &debugfs_file_operations;
+			break;
+		case S_IFDIR:
+			inode->i_op = &simple_dir_inode_operations;
+			inode->i_fop = &simple_dir_operations;
+
+			/* directory inodes start off with i_nlink == 2 (for "." entry) */
+			inode->i_nlink++;
+			break;
+		}
+	}
+	return inode; 
+}
+
+/* SMP-safe */
+static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
+			 int mode, dev_t dev)
+{
+	struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev);
+	int error = -EPERM;
+
+	if (dentry->d_inode)
+		return -EEXIST;
+
+	if (inode) {
+		d_instantiate(dentry, inode);
+		dget(dentry);
+		error = 0;
+	}
+	return error;
+}
+
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	int res;
+
+	mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
+	res = debugfs_mknod(dir, dentry, mode, 0);
+	if (!res)
+		dir->i_nlink++;
+	return res;
+}
+
+static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode)
+{
+	mode = (mode & S_IALLUGO) | S_IFREG;
+	return debugfs_mknod(dir, dentry, mode, 0);
+}
+
+static inline int debugfs_positive(struct dentry *dentry)
+{
+	return dentry->d_inode && !d_unhashed(dentry);
+}
+
+static int debug_fill_super(struct super_block *sb, void *data, int silent)
+{
+	static struct tree_descr debug_files[] = {{""}};
+
+	return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
+}
+
+static struct dentry * get_dentry(struct dentry *parent, const char *name)
+{               
+	struct qstr qstr;
+
+	qstr.name = name;
+	qstr.len = strlen(name);
+	qstr.hash = full_name_hash(name,qstr.len);
+	return lookup_hash(&qstr,parent);
+}               
+
+static struct super_block *debug_get_sb(struct file_system_type *fs_type,
+				        int flags, const char *dev_name,
+					void *data)
+{
+	return get_sb_single(fs_type, flags, data, debug_fill_super);
+}
+
+static struct file_system_type debug_fs_type = {
+	.owner =	THIS_MODULE,
+	.name =		"debugfs",
+	.get_sb =	debug_get_sb,
+	.kill_sb =	kill_litter_super,
+};
+
+static int debugfs_create_by_name(const char *name, mode_t mode,
+				  struct dentry *parent,
+				  struct dentry **dentry)
+{
+	int error = 0;
+
+	/* If the parent is not specified, we create it in the root.
+	 * We need the root dentry to do this, which is in the super 
+	 * block. A pointer to that is in the struct vfsmount that we
+	 * have around.
+	 */
+	if (!parent ) {
+		if (debugfs_mount && debugfs_mount->mnt_sb) {
+			parent = debugfs_mount->mnt_sb->s_root;
+		}
+	}
+	if (!parent) {
+		pr_debug("debugfs: Ah! can not find a parent!\n");
+		return -EFAULT;
+	}
+
+	*dentry = NULL;
+	down(&parent->d_inode->i_sem);
+	*dentry = get_dentry (parent, name);
+	if (!IS_ERR(dentry)) {
+		if ((mode & S_IFMT) == S_IFDIR)
+			error = debugfs_mkdir(parent->d_inode, *dentry, mode);
+		else 
+			error = debugfs_create(parent->d_inode, *dentry, mode);
+	} else
+		error = PTR_ERR(dentry);
+	up(&parent->d_inode->i_sem);
+
+	return error;
+}
+
+/**
+ * debugfs_create_file - create a file in the debugfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ *        on.  The inode.u.generic_ip pointer will point to this value on
+ *        the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ *        this file.
+ *
+ * This is the basic "create a file" function for debugfs.  It allows for a
+ * wide range of flexibility in createing a file, or a directory (if you
+ * want to create a directory, the debugfs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_file(const char *name, mode_t mode,
+				   struct dentry *parent, void *data,
+				   struct file_operations *fops)
+{
+	struct dentry *dentry = NULL;
+	int error;
+
+	pr_debug("debugfs: creating file '%s'\n",name);
+
+	error = simple_pin_fs("debugfs", &debugfs_mount, &debugfs_mount_count);
+	if (error)
+		goto exit;
+
+	error = debugfs_create_by_name(name, mode, parent, &dentry);
+	if (error) {
+		dentry = NULL;
+		goto exit;
+	}
+
+	if (dentry->d_inode) {
+		if (data)
+			dentry->d_inode->u.generic_ip = data;
+		if (fops)
+			dentry->d_inode->i_fop = fops;
+	}
+exit:
+	return dentry;
+}
+EXPORT_SYMBOL_GPL(debugfs_create_file);
+
+/**
+ * debugfs_create_dir - create a directory in the debugfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the directory to
+ *        create.
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this paramater is NULL, then the
+ *          directory will be created in the root of the debugfs filesystem.
+ *
+ * This function creates a directory in debugfs with the given name.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+{
+	return debugfs_create_file(name, 
+				   S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+				   parent, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_dir);
+
+/**
+ * debugfs_remove - removes a file or directory from the debugfs filesystem
+ *
+ * @dentry: a pointer to a the dentry of the file or directory to be
+ *          removed.
+ *
+ * This function removes a file or directory in debugfs that was previously
+ * created with a call to another debugfs function (like
+ * debufs_create_file() or variants thereof.)
+ *
+ * This function is required to be called in order for the file to be
+ * removed, no automatic cleanup of files will happen when a module is
+ * removed, you are responsible here.
+ */
+void debugfs_remove(struct dentry *dentry)
+{
+	struct dentry *parent;
+	
+	if (!dentry)
+		return;
+
+	parent = dentry->d_parent;
+	if (!parent || !parent->d_inode)
+		return;
+
+	down(&parent->d_inode->i_sem);
+	if (debugfs_positive(dentry)) {
+		if (dentry->d_inode) {
+			if (S_ISDIR(dentry->d_inode->i_mode))
+				simple_rmdir(parent->d_inode, dentry);
+			else
+				simple_unlink(parent->d_inode, dentry);
+		dput(dentry);
+		}
+	}
+	up(&parent->d_inode->i_sem);
+	simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+}
+EXPORT_SYMBOL_GPL(debugfs_remove);
+
+static decl_subsys(debug, NULL, NULL);
+
+static int __init debugfs_init(void)
+{
+	int retval;
+
+	kset_set_kset_s(&debug_subsys, kernel_subsys);
+	retval = subsystem_register(&debug_subsys);
+	if (retval)
+		return retval;
+
+	retval = register_filesystem(&debug_fs_type);
+	if (retval)
+		subsystem_unregister(&debug_subsys);
+	return retval;
+}
+
+static void __exit debugfs_exit(void)
+{
+	simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+	unregister_filesystem(&debug_fs_type);
+	subsystem_unregister(&debug_subsys);
+}
+
+core_initcall(debugfs_init);
+module_exit(debugfs_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/fs/devfs/Makefile b/fs/devfs/Makefile
new file mode 100644
index 0000000..6dd8d12
--- /dev/null
+++ b/fs/devfs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux devfs-filesystem routines.
+#
+
+obj-$(CONFIG_DEVFS_FS) += devfs.o
+
+devfs-objs := base.o util.o
+
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
new file mode 100644
index 0000000..1ecfe1f
--- /dev/null
+++ b/fs/devfs/base.c
@@ -0,0 +1,2838 @@
+/*  devfs (Device FileSystem) driver.
+
+    Copyright (C) 1998-2002  Richard Gooch
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public
+    License along with this library; if not, write to the Free
+    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
+    The postal address is:
+      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+
+    ChangeLog
+
+    19980110   Richard Gooch <rgooch@atnf.csiro.au>
+               Original version.
+  v0.1
+    19980111   Richard Gooch <rgooch@atnf.csiro.au>
+               Created per-fs inode table rather than using inode->u.generic_ip
+  v0.2
+    19980111   Richard Gooch <rgooch@atnf.csiro.au>
+               Created .epoch inode which has a ctime of 0.
+	       Fixed loss of named pipes when dentries lost.
+	       Fixed loss of inode data when devfs_register() follows mknod().
+  v0.3
+    19980111   Richard Gooch <rgooch@atnf.csiro.au>
+               Fix for when compiling with CONFIG_KERNELD.
+    19980112   Richard Gooch <rgooch@atnf.csiro.au>
+               Fix for readdir() which sometimes didn't show entries.
+	       Added <<tolerant>> option to <devfs_register>.
+  v0.4
+    19980113   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_fill_file> function.
+  v0.5
+    19980115   Richard Gooch <rgooch@atnf.csiro.au>
+               Added subdirectory support. Major restructuring.
+    19980116   Richard Gooch <rgooch@atnf.csiro.au>
+               Fixed <find_by_dev> to not search major=0,minor=0.
+	       Added symlink support.
+  v0.6
+    19980120   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_mk_dir> function and support directory unregister
+    19980120   Richard Gooch <rgooch@atnf.csiro.au>
+               Auto-ownership uses real uid/gid rather than effective uid/gid.
+  v0.7
+    19980121   Richard Gooch <rgooch@atnf.csiro.au>
+               Supported creation of sockets.
+  v0.8
+    19980122   Richard Gooch <rgooch@atnf.csiro.au>
+               Added DEVFS_FL_HIDE_UNREG flag.
+	       Interface change to <devfs_mk_symlink>.
+               Created <devfs_symlink> to support symlink(2).
+  v0.9
+    19980123   Richard Gooch <rgooch@atnf.csiro.au>
+               Added check to <devfs_fill_file> to check inode is in devfs.
+	       Added optional traversal of symlinks.
+  v0.10
+    19980124   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_get_flags> and <devfs_set_flags>.
+  v0.11
+    19980125   C. Scott Ananian <cananian@alumni.princeton.edu>
+               Created <devfs_find_handle>.
+    19980125   Richard Gooch <rgooch@atnf.csiro.au>
+               Allow removal of symlinks.
+  v0.12
+    19980125   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_set_symlink_destination>.
+    19980126   Richard Gooch <rgooch@atnf.csiro.au>
+               Moved DEVFS_SUPER_MAGIC into header file.
+	       Added DEVFS_FL_HIDE flag.
+	       Created <devfs_get_maj_min>.
+	       Created <devfs_get_handle_from_inode>.
+	       Fixed minor bug in <find_by_dev>.
+    19980127   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed interface to <find_by_dev>, <find_entry>,
+	       <devfs_unregister>, <devfs_fill_file> and <devfs_find_handle>.
+	       Fixed inode times when symlink created with symlink(2).
+  v0.13
+    19980129   C. Scott Ananian <cananian@alumni.princeton.edu>
+               Exported <devfs_set_symlink_destination>, <devfs_get_maj_min>
+	       and <devfs_get_handle_from_inode>.
+    19980129   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_unlink> to support unlink(2).
+  v0.14
+    19980129   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed kerneld support for entries in devfs subdirectories.
+    19980130   Richard Gooch <rgooch@atnf.csiro.au>
+	       Bugfixes in <call_kerneld>.
+  v0.15
+    19980207   Richard Gooch <rgooch@atnf.csiro.au>
+	       Call kerneld when looking up unregistered entries.
+  v0.16
+    19980326   Richard Gooch <rgooch@atnf.csiro.au>
+	       Modified interface to <devfs_find_handle> for symlink traversal.
+  v0.17
+    19980331   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed persistence bug with device numbers for manually created
+	       device files.
+	       Fixed problem with recreating symlinks with different content.
+  v0.18
+    19980401   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed to CONFIG_KMOD.
+	       Hide entries which are manually unlinked.
+	       Always invalidate devfs dentry cache when registering entries.
+	       Created <devfs_rmdir> to support rmdir(2).
+	       Ensure directories created by <devfs_mk_dir> are visible.
+  v0.19
+    19980402   Richard Gooch <rgooch@atnf.csiro.au>
+	       Invalidate devfs dentry cache when making directories.
+	       Invalidate devfs dentry cache when removing entries.
+	       Fixed persistence bug with fifos.
+  v0.20
+    19980421   Richard Gooch <rgooch@atnf.csiro.au>
+	       Print process command when debugging kerneld/kmod.
+	       Added debugging for register/unregister/change operations.
+    19980422   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added "devfs=" boot options.
+  v0.21
+    19980426   Richard Gooch <rgooch@atnf.csiro.au>
+	       No longer lock/unlock superblock in <devfs_put_super>.
+	       Drop negative dentries when they are released.
+	       Manage dcache more efficiently.
+  v0.22
+    19980427   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFS_FL_AUTO_DEVNUM flag.
+  v0.23
+    19980430   Richard Gooch <rgooch@atnf.csiro.au>
+	       No longer set unnecessary methods.
+  v0.24
+    19980504   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added PID display to <call_kerneld> debugging message.
+	       Added "after" debugging message to <call_kerneld>.
+    19980519   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added "diread" and "diwrite" boot options.
+    19980520   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed persistence problem with permissions.
+  v0.25
+    19980602   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support legacy device nodes.
+	       Fixed bug where recreated inodes were hidden.
+  v0.26
+    19980602   Richard Gooch <rgooch@atnf.csiro.au>
+	       Improved debugging in <get_vfs_inode>.
+    19980607   Richard Gooch <rgooch@atnf.csiro.au>
+	       No longer free old dentries in <devfs_mk_dir>.
+	       Free all dentries for a given entry when deleting inodes.
+  v0.27
+    19980627   Richard Gooch <rgooch@atnf.csiro.au>
+	       Limit auto-device numbering to majors 128 to 239.
+  v0.28
+    19980629   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed inode times persistence problem.
+  v0.29
+    19980704   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed spelling in <devfs_readlink> debug.
+	       Fixed bug in <devfs_setup> parsing "dilookup".
+  v0.30
+    19980705   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed devfs inode leak when manually recreating inodes.
+	       Fixed permission persistence problem when recreating inodes.
+  v0.31
+    19980727   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed harmless "unused variable" compiler warning.
+	       Fixed modes for manually recreated device nodes.
+  v0.32
+    19980728   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added NULL devfs inode warning in <devfs_read_inode>.
+	       Force all inode nlink values to 1.
+  v0.33
+    19980730   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added "dimknod" boot option.
+	       Set inode nlink to 0 when freeing dentries.
+	       Fixed modes for manually recreated symlinks.
+  v0.34
+    19980802   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bugs in recreated directories and symlinks.
+  v0.35
+    19980806   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bugs in recreated device nodes.
+    19980807   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bug in currently unused <devfs_get_handle_from_inode>.
+	       Defined new <devfs_handle_t> type.
+	       Improved debugging when getting entries.
+	       Fixed bug where directories could be emptied.
+  v0.36
+    19980809   Richard Gooch <rgooch@atnf.csiro.au>
+	       Replaced dummy .epoch inode with .devfsd character device.
+    19980810   Richard Gooch <rgooch@atnf.csiro.au>
+	       Implemented devfsd protocol revision 0.
+  v0.37
+    19980819   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added soothing message to warning in <devfs_d_iput>.
+  v0.38
+    19980829   Richard Gooch <rgooch@atnf.csiro.au>
+	       Use GCC extensions for structure initialisations.
+	       Implemented async open notification.
+	       Incremented devfsd protocol revision to 1.
+  v0.39
+    19980908   Richard Gooch <rgooch@atnf.csiro.au>
+	       Moved async open notification to end of <devfs_open>.
+  v0.40
+    19980910   Richard Gooch <rgooch@atnf.csiro.au>
+	       Prepended "/dev/" to module load request.
+	       Renamed <call_kerneld> to <call_kmod>.
+  v0.41
+    19980910   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed typo "AYSNC" -> "ASYNC".
+  v0.42
+    19980910   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added open flag for files.
+  v0.43
+    19980927   Richard Gooch <rgooch@atnf.csiro.au>
+	       Set i_blocks=0 and i_blksize=1024 in <devfs_read_inode>.
+  v0.44
+    19981005   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added test for empty <<name>> in <devfs_find_handle>.
+	       Renamed <generate_path> to <devfs_generate_path> and published.
+  v0.45
+    19981006   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_get_fops>.
+  v0.46
+    19981007   Richard Gooch <rgooch@atnf.csiro.au>
+	       Limit auto-device numbering to majors 144 to 239.
+  v0.47
+    19981010   Richard Gooch <rgooch@atnf.csiro.au>
+	       Updated <devfs_follow_link> for VFS change in 2.1.125.
+  v0.48
+    19981022   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created DEVFS_ FL_COMPAT flag.
+  v0.49
+    19981023   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created "nocompat" boot option.
+  v0.50
+    19981025   Richard Gooch <rgooch@atnf.csiro.au>
+	       Replaced "mount" boot option with "nomount".
+  v0.51
+    19981110   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created "only" boot option.
+  v0.52
+    19981112   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFS_FL_REMOVABLE flag.
+  v0.53
+    19981114   Richard Gooch <rgooch@atnf.csiro.au>
+	       Only call <scan_dir_for_removable> on first call to
+	       <devfs_readdir>.
+  v0.54
+    19981205   Richard Gooch <rgooch@atnf.csiro.au>
+	       Updated <devfs_rmdir> for VFS change in 2.1.131.
+  v0.55
+    19981218   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_mk_compat>.
+    19981220   Richard Gooch <rgooch@atnf.csiro.au>
+	       Check for partitions on removable media in <devfs_lookup>.
+  v0.56
+    19990118   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added support for registering regular files.
+	       Created <devfs_set_file_size>.
+	       Update devfs inodes from entries if not changed through FS.
+  v0.57
+    19990124   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed <devfs_fill_file> to only initialise temporary inodes.
+	       Trap for NULL fops in <devfs_register>.
+	       Return -ENODEV in <devfs_fill_file> for non-driver inodes.
+  v0.58
+    19990126   Richard Gooch <rgooch@atnf.csiro.au>
+	       Switched from PATH_MAX to DEVFS_PATHLEN.
+  v0.59
+    19990127   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created "nottycompat" boot option.
+  v0.60
+    19990318   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed <devfsd_read> to not overrun event buffer.
+  v0.61
+    19990329   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_auto_unregister>.
+  v0.62
+    19990330   Richard Gooch <rgooch@atnf.csiro.au>
+	       Don't return unregistred entries in <devfs_find_handle>.
+	       Panic in <devfs_unregister> if entry unregistered.
+    19990401   Richard Gooch <rgooch@atnf.csiro.au>
+	       Don't panic in <devfs_auto_unregister> for duplicates.
+  v0.63
+    19990402   Richard Gooch <rgooch@atnf.csiro.au>
+	       Don't unregister already unregistered entries in <unregister>.
+  v0.64
+    19990510   Richard Gooch <rgooch@atnf.csiro.au>
+	       Disable warning messages when unable to read partition table for
+	       removable media.
+  v0.65
+    19990512   Richard Gooch <rgooch@atnf.csiro.au>
+	       Updated <devfs_lookup> for VFS change in 2.3.1-pre1.
+	       Created "oops-on-panic" boot option.
+	       Improved debugging in <devfs_register> and <devfs_unregister>.
+  v0.66
+    19990519   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added documentation for some functions.
+    19990525   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed "oops-on-panic" boot option: now always Oops.
+  v0.67
+    19990531   Richard Gooch <rgooch@atnf.csiro.au>
+	       Improved debugging in <devfs_register>.
+  v0.68
+    19990604   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added "diunlink" and "nokmod" boot options.
+	       Removed superfluous warning message in <devfs_d_iput>.
+  v0.69
+    19990611   Richard Gooch <rgooch@atnf.csiro.au>
+	       Took account of change to <d_alloc_root>.
+  v0.70
+    19990614   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created separate event queue for each mounted devfs.
+	       Removed <devfs_invalidate_dcache>.
+	       Created new ioctl()s.
+	       Incremented devfsd protocol revision to 3.
+	       Fixed bug when re-creating directories: contents were lost.
+	       Block access to inodes until devfsd updates permissions.
+    19990615   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support 2.2.x kernels.
+  v0.71
+    19990623   Richard Gooch <rgooch@atnf.csiro.au>
+	       Switched to sending process uid/gid to devfsd.
+	       Renamed <call_kmod> to <try_modload>.
+	       Added DEVFSD_NOTIFY_LOOKUP event.
+    19990624   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFSD_NOTIFY_CHANGE event.
+	       Incremented devfsd protocol revision to 4.
+  v0.72
+    19990713   Richard Gooch <rgooch@atnf.csiro.au>
+	       Return EISDIR rather than EINVAL for read(2) on directories.
+  v0.73
+    19990809   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed <devfs_setup> to new __init scheme.
+  v0.74
+    19990901   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed remaining function declarations to new __init scheme.
+  v0.75
+    19991013   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_get_info>, <devfs_set_info>,
+	       <devfs_get_first_child> and <devfs_get_next_sibling>.
+	       Added <<dir>> parameter to <devfs_register>, <devfs_mk_compat>,
+	       <devfs_mk_dir> and <devfs_find_handle>.
+	       Work sponsored by SGI.
+  v0.76
+    19991017   Richard Gooch <rgooch@atnf.csiro.au>
+	       Allow multiple unregistrations.
+	       Work sponsored by SGI.
+  v0.77
+    19991026   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added major and minor number to devfsd protocol.
+	       Incremented devfsd protocol revision to 5.
+	       Work sponsored by SGI.
+  v0.78
+    19991030   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support info pointer for all devfs entry types.
+	       Added <<info>> parameter to <devfs_mk_dir> and
+	       <devfs_mk_symlink>.
+	       Work sponsored by SGI.
+  v0.79
+    19991031   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support "../" when searching devfs namespace.
+	       Work sponsored by SGI.
+  v0.80
+    19991101   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_get_unregister_slave>.
+	       Work sponsored by SGI.
+  v0.81
+    19991103   Richard Gooch <rgooch@atnf.csiro.au>
+	       Exported <devfs_get_parent>.
+	       Work sponsored by SGI.
+  v0.82
+    19991104   Richard Gooch <rgooch@atnf.csiro.au>
+               Removed unused <devfs_set_symlink_destination>.
+    19991105   Richard Gooch <rgooch@atnf.csiro.au>
+               Do not hide entries from devfsd or children.
+	       Removed DEVFS_ FL_TTY_COMPAT flag.
+	       Removed "nottycompat" boot option.
+	       Removed <devfs_mk_compat>.
+	       Work sponsored by SGI.
+  v0.83
+    19991107   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFS_FL_WAIT flag.
+	       Work sponsored by SGI.
+  v0.84
+    19991107   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support new "disc" naming scheme in <get_removable_partition>.
+	       Allow NULL fops in <devfs_register>.
+	       Work sponsored by SGI.
+  v0.85
+    19991110   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fall back to major table if NULL fops given to <devfs_register>.
+	       Work sponsored by SGI.
+  v0.86
+    19991204   Richard Gooch <rgooch@atnf.csiro.au>
+	       Support fifos when unregistering.
+	       Work sponsored by SGI.
+  v0.87
+    19991209   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed obsolete DEVFS_ FL_COMPAT and DEVFS_ FL_TOLERANT flags.
+	       Work sponsored by SGI.
+  v0.88
+    19991214   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed kmod support.
+	       Work sponsored by SGI.
+  v0.89
+    19991216   Richard Gooch <rgooch@atnf.csiro.au>
+	       Improved debugging in <get_vfs_inode>.
+	       Ensure dentries created by devfsd will be cleaned up.
+	       Work sponsored by SGI.
+  v0.90
+    19991223   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_get_name>.
+	       Work sponsored by SGI.
+  v0.91
+    20000203   Richard Gooch <rgooch@atnf.csiro.au>
+	       Ported to kernel 2.3.42.
+	       Removed <devfs_fill_file>.
+	       Work sponsored by SGI.
+  v0.92
+    20000306   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFS_ FL_NO_PERSISTENCE flag.
+	       Removed unnecessary call to <update_devfs_inode_from_entry> in
+	       <devfs_readdir>.
+	       Work sponsored by SGI.
+  v0.93
+    20000413   Richard Gooch <rgooch@atnf.csiro.au>
+	       Set inode->i_size to correct size for symlinks.
+    20000414   Richard Gooch <rgooch@atnf.csiro.au>
+	       Only give lookup() method to directories to comply with new VFS
+	       assumptions.
+	       Work sponsored by SGI.
+    20000415   Richard Gooch <rgooch@atnf.csiro.au>
+	       Remove unnecessary tests in symlink methods.
+	       Don't kill existing block ops in <devfs_read_inode>.
+	       Work sponsored by SGI.
+  v0.94
+    20000424   Richard Gooch <rgooch@atnf.csiro.au>
+	       Don't create missing directories in <devfs_find_handle>.
+	       Work sponsored by SGI.
+  v0.95
+    20000430   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added CONFIG_DEVFS_MOUNT.
+	       Work sponsored by SGI.
+  v0.96
+    20000608   Richard Gooch <rgooch@atnf.csiro.au>
+	       Disabled multi-mount capability (use VFS bindings instead).
+	       Work sponsored by SGI.
+  v0.97
+    20000610   Richard Gooch <rgooch@atnf.csiro.au>
+	       Switched to FS_SINGLE to disable multi-mounts.
+    20000612   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed module support.
+	       Removed multi-mount code.
+	       Removed compatibility macros: VFS has changed too much.
+	       Work sponsored by SGI.
+  v0.98
+    20000614   Richard Gooch <rgooch@atnf.csiro.au>
+	       Merged devfs inode into devfs entry.
+	       Work sponsored by SGI.
+  v0.99
+    20000619   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed dead code in <devfs_register> which used to call
+	       <free_dentries>.
+	       Work sponsored by SGI.
+  v0.100
+    20000621   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed interface to <devfs_register>.
+	       Work sponsored by SGI.
+  v0.101
+    20000622   Richard Gooch <rgooch@atnf.csiro.au>
+	       Simplified interface to <devfs_mk_symlink> and <devfs_mk_dir>.
+	       Simplified interface to <devfs_find_handle>.
+	       Work sponsored by SGI.
+  v0.102
+    20010519   Richard Gooch <rgooch@atnf.csiro.au>
+	       Ensure <devfs_generate_path> terminates string for root entry.
+	       Exported <devfs_get_name> to modules.
+    20010520   Richard Gooch <rgooch@atnf.csiro.au>
+	       Make <devfs_mk_symlink> send events to devfsd.
+	       Cleaned up option processing in <devfs_setup>.
+    20010521   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bugs in handling symlinks: could leak or cause Oops.
+    20010522   Richard Gooch <rgooch@atnf.csiro.au>
+	       Cleaned up directory handling by separating fops.
+  v0.103
+    20010601   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed handling of inverted options in <devfs_setup>.
+  v0.104
+    20010604   Richard Gooch <rgooch@atnf.csiro.au>
+	       Adjusted <try_modload> to account for <devfs_generate_path> fix.
+  v0.105
+    20010617   Richard Gooch <rgooch@atnf.csiro.au>
+	       Answered question posed by Al Viro and removed his comments.
+	       Moved setting of registered flag after other fields are changed.
+	       Fixed race between <devfsd_close> and <devfsd_notify_one>.
+	       Global VFS changes added bogus BKL to <devfsd_close>: removed.
+	       Widened locking in <devfs_readlink> and <devfs_follow_link>.
+	       Replaced <devfsd_read> stack usage with <devfsd_ioctl> kmalloc.
+	       Simplified locking in <devfsd_ioctl> and fixed memory leak.
+  v0.106
+    20010709   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed broken devnum allocation and use <devfs_alloc_devnum>.
+	       Fixed old devnum leak by calling new <devfs_dealloc_devnum>.
+  v0.107
+    20010712   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bug in <devfs_setup> which could hang boot process.
+  v0.108
+    20010730   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added DEVFSD_NOTIFY_DELETE event.
+    20010801   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed #include <asm/segment.h>.
+  v0.109
+    20010807   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed inode table races by removing it and using
+	       inode->u.generic_ip instead.
+	       Moved <devfs_read_inode> into <get_vfs_inode>.
+	       Moved <devfs_write_inode> into <devfs_notify_change>.
+  v0.110
+    20010808   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed race in <devfs_do_symlink> for uni-processor.
+  v0.111
+    20010818   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed remnant of multi-mount support in <devfs_mknod>.
+               Removed unused DEVFS_FL_SHOW_UNREG flag.
+  v0.112
+    20010820   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed nlink field from struct devfs_inode.
+  v0.113
+    20010823   Richard Gooch <rgooch@atnf.csiro.au>
+	       Replaced BKL with global rwsem to protect symlink data (quick
+	       and dirty hack).
+  v0.114
+    20010827   Richard Gooch <rgooch@atnf.csiro.au>
+	       Replaced global rwsem for symlink with per-link refcount.
+  v0.115
+    20010919   Richard Gooch <rgooch@atnf.csiro.au>
+	       Set inode->i_mapping->a_ops for block nodes in <get_vfs_inode>.
+  v0.116
+    20011008   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed overrun in <devfs_link> by removing function (not needed).
+    20011009   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed buffer underrun in <try_modload>.
+    20011029   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed race in <devfsd_ioctl> when setting event mask.
+    20011114   Richard Gooch <rgooch@atnf.csiro.au>
+	       First release of new locking code.
+  v1.0
+    20011117   Richard Gooch <rgooch@atnf.csiro.au>
+	       Discard temporary buffer, now use "%s" for dentry names.
+    20011118   Richard Gooch <rgooch@atnf.csiro.au>
+	       Don't generate path in <try_modload>: use fake entry instead.
+	       Use "existing" directory in <_devfs_make_parent_for_leaf>.
+    20011122   Richard Gooch <rgooch@atnf.csiro.au>
+	       Use slab cache rather than fixed buffer for devfsd events.
+  v1.1
+    20011125   Richard Gooch <rgooch@atnf.csiro.au>
+	       Send DEVFSD_NOTIFY_REGISTERED events in <devfs_mk_dir>.
+    20011127   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed locking bug in <devfs_d_revalidate_wait> due to typo.
+	       Do not send CREATE, CHANGE, ASYNC_OPEN or DELETE events from
+	       devfsd or children.
+  v1.2
+    20011202   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bug in <devfsd_read>: was dereferencing freed pointer.
+  v1.3
+    20011203   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed bug in <devfsd_close>: was dereferencing freed pointer.
+	       Added process group check for devfsd privileges.
+  v1.4
+    20011204   Richard Gooch <rgooch@atnf.csiro.au>
+	       Use SLAB_ATOMIC in <devfsd_notify_de> from <devfs_d_delete>.
+  v1.5
+    20011211   Richard Gooch <rgooch@atnf.csiro.au>
+	       Return old entry in <devfs_mk_dir> for 2.4.x kernels.
+    20011212   Richard Gooch <rgooch@atnf.csiro.au>
+	       Increment refcount on module in <check_disc_changed>.
+    20011215   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_get_handle> and exported <devfs_put>.
+	       Increment refcount on module in <devfs_get_ops>.
+	       Created <devfs_put_ops>.
+  v1.6
+    20011216   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added poisoning to <devfs_put>.
+	       Improved debugging messages.
+  v1.7
+    20011221   Richard Gooch <rgooch@atnf.csiro.au>
+	       Corrected (made useful) debugging message in <unregister>.
+	       Moved <kmem_cache_create> in <mount_devfs_fs> to <init_devfs_fs>
+    20011224   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added magic number to guard against scribbling drivers.
+    20011226   Richard Gooch <rgooch@atnf.csiro.au>
+	       Only return old entry in <devfs_mk_dir> if a directory.
+	       Defined macros for error and debug messages.
+  v1.8
+    20020113   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed (rare, old) race in <devfs_lookup>.
+  v1.9
+    20020120   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed deadlock bug in <devfs_d_revalidate_wait>.
+	       Tag VFS deletable in <devfs_mk_symlink> if handle ignored.
+  v1.10
+    20020129   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added KERN_* to remaining messages.
+	       Cleaned up declaration of <stat_read>.
+  v1.11
+    20020219   Richard Gooch <rgooch@atnf.csiro.au>
+	       Changed <devfs_rmdir> to allow later additions if not yet empty.
+  v1.12
+    20020406   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed silently introduced calls to lock_kernel() and
+	       unlock_kernel() due to recent VFS locking changes. BKL isn't
+	       required in devfs.
+  v1.13
+    20020428   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed 2.4.x compatibility code.
+  v1.14
+    20020510   Richard Gooch <rgooch@atnf.csiro.au>
+	       Added BKL to <devfs_open> because drivers still need it.
+  v1.15
+    20020512   Richard Gooch <rgooch@atnf.csiro.au>
+	       Protected <scan_dir_for_removable> and <get_removable_partition>
+	       from changing directory contents.
+  v1.16
+    20020514   Richard Gooch <rgooch@atnf.csiro.au>
+	       Minor cleanup of <scan_dir_for_removable>.
+  v1.17
+    20020721   Richard Gooch <rgooch@atnf.csiro.au>
+	       Switched to ISO C structure field initialisers.
+	       Switch to set_current_state() and move before add_wait_queue().
+    20020722   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed devfs entry leak in <devfs_readdir> when *readdir fails.
+  v1.18
+    20020725   Richard Gooch <rgooch@atnf.csiro.au>
+	       Created <devfs_find_and_unregister>.
+  v1.19
+    20020728   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed deprecated <devfs_find_handle>.
+  v1.20
+    20020820   Richard Gooch <rgooch@atnf.csiro.au>
+	       Fixed module unload race in <devfs_open>.
+  v1.21
+    20021013   Richard Gooch <rgooch@atnf.csiro.au>
+	       Removed DEVFS_ FL_AUTO_OWNER.
+	       Switched lingering structure field initialiser to ISO C.
+	       Added locking when updating FCB flags.
+  v1.22
+*/
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#define DEVFS_VERSION            "2004-01-31"
+
+#define DEVFS_NAME "devfs"
+
+#define FIRST_INODE 1
+
+#define STRING_LENGTH 256
+#define FAKE_BLOCK_SIZE 1024
+#define POISON_PTR ( *(void **) poison_array )
+#define MAGIC_VALUE 0x327db823
+
+#ifndef TRUE
+#  define TRUE 1
+#  define FALSE 0
+#endif
+
+#define MODE_DIR (S_IFDIR | S_IWUSR | S_IRUGO | S_IXUGO)
+
+#define DEBUG_NONE         0x0000000
+#define DEBUG_MODULE_LOAD  0x0000001
+#define DEBUG_REGISTER     0x0000002
+#define DEBUG_UNREGISTER   0x0000004
+#define DEBUG_FREE         0x0000008
+#define DEBUG_SET_FLAGS    0x0000010
+#define DEBUG_S_READ       0x0000100	/*  Break  */
+#define DEBUG_I_LOOKUP     0x0001000	/*  Break  */
+#define DEBUG_I_CREATE     0x0002000
+#define DEBUG_I_GET        0x0004000
+#define DEBUG_I_CHANGE     0x0008000
+#define DEBUG_I_UNLINK     0x0010000
+#define DEBUG_I_RLINK      0x0020000
+#define DEBUG_I_FLINK      0x0040000
+#define DEBUG_I_MKNOD      0x0080000
+#define DEBUG_F_READDIR    0x0100000	/*  Break  */
+#define DEBUG_D_DELETE     0x1000000	/*  Break  */
+#define DEBUG_D_RELEASE    0x2000000
+#define DEBUG_D_IPUT       0x4000000
+#define DEBUG_ALL          0xfffffff
+#define DEBUG_DISABLED     DEBUG_NONE
+
+#define OPTION_NONE             0x00
+#define OPTION_MOUNT            0x01
+
+#define PRINTK(format, args...) \
+   {printk (KERN_ERR "%s" format, __FUNCTION__ , ## args);}
+
+#define OOPS(format, args...) \
+   {printk (KERN_CRIT "%s" format, __FUNCTION__ , ## args); \
+    printk ("Forcing Oops\n"); \
+    BUG();}
+
+#ifdef CONFIG_DEVFS_DEBUG
+#  define VERIFY_ENTRY(de) \
+   {if ((de) && (de)->magic_number != MAGIC_VALUE) \
+        OOPS ("(%p): bad magic value: %x\n", (de), (de)->magic_number);}
+#  define WRITE_ENTRY_MAGIC(de,magic) (de)->magic_number = (magic)
+#  define DPRINTK(flag, format, args...) \
+   {if (devfs_debug & flag) \
+	printk (KERN_INFO "%s" format, __FUNCTION__ , ## args);}
+#else
+#  define VERIFY_ENTRY(de)
+#  define WRITE_ENTRY_MAGIC(de,magic)
+#  define DPRINTK(flag, format, args...)
+#endif
+
+typedef struct devfs_entry *devfs_handle_t;
+
+struct directory_type {
+	rwlock_t lock;		/*  Lock for searching(R)/updating(W)   */
+	struct devfs_entry *first;
+	struct devfs_entry *last;
+	unsigned char no_more_additions:1;
+};
+
+struct symlink_type {
+	unsigned int length;	/*  Not including the NULL-termimator       */
+	char *linkname;		/*  This is NULL-terminated                 */
+};
+
+struct devfs_inode {		/*  This structure is for "persistent" inode storage  */
+	struct dentry *dentry;
+	struct timespec atime;
+	struct timespec mtime;
+	struct timespec ctime;
+	unsigned int ino;	/*  Inode number as seen in the VFS         */
+	uid_t uid;
+	gid_t gid;
+};
+
+struct devfs_entry {
+#ifdef CONFIG_DEVFS_DEBUG
+	unsigned int magic_number;
+#endif
+	void *info;
+	atomic_t refcount;	/*  When this drops to zero, it's unused    */
+	union {
+		struct directory_type dir;
+		dev_t dev;
+		struct symlink_type symlink;
+		const char *name;	/*  Only used for (mode == 0)               */
+	} u;
+	struct devfs_entry *prev;	/*  Previous entry in the parent directory  */
+	struct devfs_entry *next;	/*  Next entry in the parent directory      */
+	struct devfs_entry *parent;	/*  The parent directory                    */
+	struct devfs_inode inode;
+	umode_t mode;
+	unsigned short namelen;	/*  I think 64k+ filenames are a way off... */
+	unsigned char vfs:1;	/*  Whether the VFS may delete the entry   */
+	char name[1];		/*  This is just a dummy: the allocated array
+				   is bigger. This is NULL-terminated      */
+};
+
+/*  The root of the device tree  */
+static struct devfs_entry *root_entry;
+
+struct devfsd_buf_entry {
+	struct devfs_entry *de;	/*  The name is generated with this         */
+	unsigned short type;	/*  The type of event                       */
+	umode_t mode;
+	uid_t uid;
+	gid_t gid;
+	struct devfsd_buf_entry *next;
+};
+
+struct fs_info {		/*  This structure is for the mounted devfs  */
+	struct super_block *sb;
+	spinlock_t devfsd_buffer_lock;	/*  Lock when inserting/deleting events  */
+	struct devfsd_buf_entry *devfsd_first_event;
+	struct devfsd_buf_entry *devfsd_last_event;
+	volatile int devfsd_sleeping;
+	volatile struct task_struct *devfsd_task;
+	volatile pid_t devfsd_pgrp;
+	volatile struct file *devfsd_file;
+	struct devfsd_notify_struct *devfsd_info;
+	volatile unsigned long devfsd_event_mask;
+	atomic_t devfsd_overrun_count;
+	wait_queue_head_t devfsd_wait_queue;	/*  Wake devfsd on input       */
+	wait_queue_head_t revalidate_wait_queue;	/*  Wake when devfsd sleeps    */
+};
+
+static struct fs_info fs_info = {.devfsd_buffer_lock = SPIN_LOCK_UNLOCKED };
+static kmem_cache_t *devfsd_buf_cache;
+#ifdef CONFIG_DEVFS_DEBUG
+static unsigned int devfs_debug_init __initdata = DEBUG_NONE;
+static unsigned int devfs_debug = DEBUG_NONE;
+static DEFINE_SPINLOCK(stat_lock);
+static unsigned int stat_num_entries;
+static unsigned int stat_num_bytes;
+#endif
+static unsigned char poison_array[8] =
+    { 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a };
+
+#ifdef CONFIG_DEVFS_MOUNT
+static unsigned int boot_options = OPTION_MOUNT;
+#else
+static unsigned int boot_options = OPTION_NONE;
+#endif
+
+/*  Forward function declarations  */
+static devfs_handle_t _devfs_walk_path(struct devfs_entry *dir,
+				       const char *name, int namelen,
+				       int traverse_symlink);
+static ssize_t devfsd_read(struct file *file, char __user *buf, size_t len,
+			   loff_t * ppos);
+static int devfsd_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg);
+static int devfsd_close(struct inode *inode, struct file *file);
+#ifdef CONFIG_DEVFS_DEBUG
+static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
+			 loff_t * ppos);
+static struct file_operations stat_fops = {
+	.open = nonseekable_open,
+	.read = stat_read,
+};
+#endif
+
+/*  Devfs daemon file operations  */
+static struct file_operations devfsd_fops = {
+	.open = nonseekable_open,
+	.read = devfsd_read,
+	.ioctl = devfsd_ioctl,
+	.release = devfsd_close,
+};
+
+/*  Support functions follow  */
+
+/**
+ *	devfs_get - Get a reference to a devfs entry.
+ *	@de:  The devfs entry.
+ */
+
+static struct devfs_entry *devfs_get(struct devfs_entry *de)
+{
+	VERIFY_ENTRY(de);
+	if (de)
+		atomic_inc(&de->refcount);
+	return de;
+}				/*  End Function devfs_get  */
+
+/**
+ *	devfs_put - Put (release) a reference to a devfs entry.
+ *	@de:  The handle to the devfs entry.
+ */
+
+static void devfs_put(devfs_handle_t de)
+{
+	if (!de)
+		return;
+	VERIFY_ENTRY(de);
+	if (de->info == POISON_PTR)
+		OOPS("(%p): poisoned pointer\n", de);
+	if (!atomic_dec_and_test(&de->refcount))
+		return;
+	if (de == root_entry)
+		OOPS("(%p): root entry being freed\n", de);
+	DPRINTK(DEBUG_FREE, "(%s): de: %p, parent: %p \"%s\"\n",
+		de->name, de, de->parent,
+		de->parent ? de->parent->name : "no parent");
+	if (S_ISLNK(de->mode))
+		kfree(de->u.symlink.linkname);
+	WRITE_ENTRY_MAGIC(de, 0);
+#ifdef CONFIG_DEVFS_DEBUG
+	spin_lock(&stat_lock);
+	--stat_num_entries;
+	stat_num_bytes -= sizeof *de + de->namelen;
+	if (S_ISLNK(de->mode))
+		stat_num_bytes -= de->u.symlink.length + 1;
+	spin_unlock(&stat_lock);
+#endif
+	de->info = POISON_PTR;
+	kfree(de);
+}				/*  End Function devfs_put  */
+
+/**
+ *	_devfs_search_dir - Search for a devfs entry in a directory.
+ *	@dir:  The directory to search.
+ *	@name:  The name of the entry to search for.
+ *	@namelen:  The number of characters in @name.
+ *
+ *  Search for a devfs entry in a directory and returns a pointer to the entry
+ *   on success, else %NULL. The directory must be locked already.
+ *   An implicit devfs_get() is performed on the returned entry.
+ */
+
+static struct devfs_entry *_devfs_search_dir(struct devfs_entry *dir,
+					     const char *name,
+					     unsigned int namelen)
+{
+	struct devfs_entry *curr;
+
+	if (!S_ISDIR(dir->mode)) {
+		PRINTK("(%s): not a directory\n", dir->name);
+		return NULL;
+	}
+	for (curr = dir->u.dir.first; curr != NULL; curr = curr->next) {
+		if (curr->namelen != namelen)
+			continue;
+		if (memcmp(curr->name, name, namelen) == 0)
+			break;
+		/*  Not found: try the next one  */
+	}
+	return devfs_get(curr);
+}				/*  End Function _devfs_search_dir  */
+
+/**
+ *	_devfs_alloc_entry - Allocate a devfs entry.
+ *	@name:     the name of the entry
+ *	@namelen:  the number of characters in @name
+ *      @mode:     the mode for the entry
+ *
+ *  Allocate a devfs entry and returns a pointer to the entry on success, else
+ *   %NULL.
+ */
+
+static struct devfs_entry *_devfs_alloc_entry(const char *name,
+					      unsigned int namelen,
+					      umode_t mode)
+{
+	struct devfs_entry *new;
+	static unsigned long inode_counter = FIRST_INODE;
+	static DEFINE_SPINLOCK(counter_lock);
+
+	if (name && (namelen < 1))
+		namelen = strlen(name);
+	if ((new = kmalloc(sizeof *new + namelen, GFP_KERNEL)) == NULL)
+		return NULL;
+	memset(new, 0, sizeof *new + namelen);	/*  Will set '\0' on name  */
+	new->mode = mode;
+	if (S_ISDIR(mode))
+		rwlock_init(&new->u.dir.lock);
+	atomic_set(&new->refcount, 1);
+	spin_lock(&counter_lock);
+	new->inode.ino = inode_counter++;
+	spin_unlock(&counter_lock);
+	if (name)
+		memcpy(new->name, name, namelen);
+	new->namelen = namelen;
+	WRITE_ENTRY_MAGIC(new, MAGIC_VALUE);
+#ifdef CONFIG_DEVFS_DEBUG
+	spin_lock(&stat_lock);
+	++stat_num_entries;
+	stat_num_bytes += sizeof *new + namelen;
+	spin_unlock(&stat_lock);
+#endif
+	return new;
+}				/*  End Function _devfs_alloc_entry  */
+
+/**
+ *	_devfs_append_entry - Append a devfs entry to a directory's child list.
+ *	@dir:  The directory to add to.
+ *	@de:  The devfs entry to append.
+ *	@old_de: If an existing entry exists, it will be written here. This may
+ *		 be %NULL. An implicit devfs_get() is performed on this entry.
+ *
+ *  Append a devfs entry to a directory's list of children, checking first to
+ *   see if an entry of the same name exists. The directory will be locked.
+ *   The value 0 is returned on success, else a negative error code.
+ *   On failure, an implicit devfs_put() is performed on %de.
+ */
+
+static int _devfs_append_entry(devfs_handle_t dir, devfs_handle_t de,
+			       devfs_handle_t * old_de)
+{
+	int retval;
+
+	if (old_de)
+		*old_de = NULL;
+	if (!S_ISDIR(dir->mode)) {
+		PRINTK("(%s): dir: \"%s\" is not a directory\n", de->name,
+		       dir->name);
+		devfs_put(de);
+		return -ENOTDIR;
+	}
+	write_lock(&dir->u.dir.lock);
+	if (dir->u.dir.no_more_additions)
+		retval = -ENOENT;
+	else {
+		struct devfs_entry *old;
+
+		old = _devfs_search_dir(dir, de->name, de->namelen);
+		if (old_de)
+			*old_de = old;
+		else
+			devfs_put(old);
+		if (old == NULL) {
+			de->parent = dir;
+			de->prev = dir->u.dir.last;
+			/*  Append to the directory's list of children  */
+			if (dir->u.dir.first == NULL)
+				dir->u.dir.first = de;
+			else
+				dir->u.dir.last->next = de;
+			dir->u.dir.last = de;
+			retval = 0;
+		} else
+			retval = -EEXIST;
+	}
+	write_unlock(&dir->u.dir.lock);
+	if (retval)
+		devfs_put(de);
+	return retval;
+}				/*  End Function _devfs_append_entry  */
+
+/**
+ *	_devfs_get_root_entry - Get the root devfs entry.
+ *
+ *	Returns the root devfs entry on success, else %NULL.
+ *
+ *	TODO it must be called asynchronously due to the fact
+ *	that devfs is initialized relatively late. Proper way
+ *	is to remove module_init from init_devfs_fs and manually
+ *	call it early enough during system init
+ */
+
+static struct devfs_entry *_devfs_get_root_entry(void)
+{
+	struct devfs_entry *new;
+	static DEFINE_SPINLOCK(root_lock);
+
+	if (root_entry)
+		return root_entry;
+
+	new = _devfs_alloc_entry(NULL, 0, MODE_DIR);
+	if (new == NULL)
+		return NULL;
+
+	spin_lock(&root_lock);
+	if (root_entry) {
+		spin_unlock(&root_lock);
+		devfs_put(new);
+		return root_entry;
+	}
+	root_entry = new;
+	spin_unlock(&root_lock);
+
+	return root_entry;
+}				/*  End Function _devfs_get_root_entry  */
+
+/**
+ *	_devfs_descend - Descend down a tree using the next component name.
+ *	@dir:  The directory to search.
+ *	@name:  The component name to search for.
+ *	@namelen:  The length of %name.
+ *	@next_pos:  The position of the next '/' or '\0' is written here.
+ *
+ *  Descend into a directory, searching for a component. This function forms
+ *   the core of a tree-walking algorithm. The directory will be locked.
+ *   The devfs entry corresponding to the component is returned. If there is
+ *   no matching entry, %NULL is returned.
+ *   An implicit devfs_get() is performed on the returned entry.
+ */
+
+static struct devfs_entry *_devfs_descend(struct devfs_entry *dir,
+					  const char *name, int namelen,
+					  int *next_pos)
+{
+	const char *stop, *ptr;
+	struct devfs_entry *entry;
+
+	if ((namelen >= 3) && (strncmp(name, "../", 3) == 0)) {	/*  Special-case going to parent directory  */
+		*next_pos = 3;
+		return devfs_get(dir->parent);
+	}
+	stop = name + namelen;
+	/*  Search for a possible '/'  */
+	for (ptr = name; (ptr < stop) && (*ptr != '/'); ++ptr) ;
+	*next_pos = ptr - name;
+	read_lock(&dir->u.dir.lock);
+	entry = _devfs_search_dir(dir, name, *next_pos);
+	read_unlock(&dir->u.dir.lock);
+	return entry;
+}				/*  End Function _devfs_descend  */
+
+static devfs_handle_t _devfs_make_parent_for_leaf(struct devfs_entry *dir,
+						  const char *name,
+						  int namelen, int *leaf_pos)
+{
+	int next_pos = 0;
+
+	if (dir == NULL)
+		dir = _devfs_get_root_entry();
+	if (dir == NULL)
+		return NULL;
+	devfs_get(dir);
+	/*  Search for possible trailing component and ignore it  */
+	for (--namelen; (namelen > 0) && (name[namelen] != '/'); --namelen) ;
+	*leaf_pos = (name[namelen] == '/') ? (namelen + 1) : 0;
+	for (; namelen > 0; name += next_pos, namelen -= next_pos) {
+		struct devfs_entry *de, *old = NULL;
+
+		if ((de =
+		     _devfs_descend(dir, name, namelen, &next_pos)) == NULL) {
+			de = _devfs_alloc_entry(name, next_pos, MODE_DIR);
+			devfs_get(de);
+			if (!de || _devfs_append_entry(dir, de, &old)) {
+				devfs_put(de);
+				if (!old || !S_ISDIR(old->mode)) {
+					devfs_put(old);
+					devfs_put(dir);
+					return NULL;
+				}
+				de = old;	/*  Use the existing directory  */
+			}
+		}
+		if (de == dir->parent) {
+			devfs_put(dir);
+			devfs_put(de);
+			return NULL;
+		}
+		devfs_put(dir);
+		dir = de;
+		if (name[next_pos] == '/')
+			++next_pos;
+	}
+	return dir;
+}				/*  End Function _devfs_make_parent_for_leaf  */
+
+static devfs_handle_t _devfs_prepare_leaf(devfs_handle_t * dir,
+					  const char *name, umode_t mode)
+{
+	int namelen, leaf_pos;
+	struct devfs_entry *de;
+
+	namelen = strlen(name);
+	if ((*dir = _devfs_make_parent_for_leaf(*dir, name, namelen,
+						&leaf_pos)) == NULL) {
+		PRINTK("(%s): could not create parent path\n", name);
+		return NULL;
+	}
+	if ((de = _devfs_alloc_entry(name + leaf_pos, namelen - leaf_pos, mode))
+	    == NULL) {
+		PRINTK("(%s): could not allocate entry\n", name);
+		devfs_put(*dir);
+		return NULL;
+	}
+	return de;
+}				/*  End Function _devfs_prepare_leaf  */
+
+static devfs_handle_t _devfs_walk_path(struct devfs_entry *dir,
+				       const char *name, int namelen,
+				       int traverse_symlink)
+{
+	int next_pos = 0;
+
+	if (dir == NULL)
+		dir = _devfs_get_root_entry();
+	if (dir == NULL)
+		return NULL;
+	devfs_get(dir);
+	for (; namelen > 0; name += next_pos, namelen -= next_pos) {
+		struct devfs_entry *de, *link;
+
+		if (!S_ISDIR(dir->mode)) {
+			devfs_put(dir);
+			return NULL;
+		}
+
+		if ((de =
+		     _devfs_descend(dir, name, namelen, &next_pos)) == NULL) {
+			devfs_put(dir);
+			return NULL;
+		}
+		if (S_ISLNK(de->mode) && traverse_symlink) {	/*  Need to follow the link: this is a stack chomper  */
+			/* FIXME what if it puts outside of mounted tree? */
+			link = _devfs_walk_path(dir, de->u.symlink.linkname,
+						de->u.symlink.length, TRUE);
+			devfs_put(de);
+			if (!link) {
+				devfs_put(dir);
+				return NULL;
+			}
+			de = link;
+		}
+		devfs_put(dir);
+		dir = de;
+		if (name[next_pos] == '/')
+			++next_pos;
+	}
+	return dir;
+}				/*  End Function _devfs_walk_path  */
+
+/**
+ *	_devfs_find_entry - Find a devfs entry.
+ *	@dir: The handle to the parent devfs directory entry. If this is %NULL the
+ *		name is relative to the root of the devfs.
+ *	@name: The name of the entry. This may be %NULL.
+ *	@traverse_symlink: If %TRUE then symbolic links are traversed.
+ *
+ *	Returns the devfs_entry pointer on success, else %NULL. An implicit
+ *	devfs_get() is performed.
+ */
+
+static struct devfs_entry *_devfs_find_entry(devfs_handle_t dir,
+					     const char *name,
+					     int traverse_symlink)
+{
+	unsigned int namelen = strlen(name);
+
+	if (name[0] == '/') {
+		/*  Skip leading pathname component  */
+		if (namelen < 2) {
+			PRINTK("(%s): too short\n", name);
+			return NULL;
+		}
+		for (++name, --namelen; (*name != '/') && (namelen > 0);
+		     ++name, --namelen) ;
+		if (namelen < 2) {
+			PRINTK("(%s): too short\n", name);
+			return NULL;
+		}
+		++name;
+		--namelen;
+	}
+	return _devfs_walk_path(dir, name, namelen, traverse_symlink);
+}				/*  End Function _devfs_find_entry  */
+
+static struct devfs_entry *get_devfs_entry_from_vfs_inode(struct inode *inode)
+{
+	if (inode == NULL)
+		return NULL;
+	VERIFY_ENTRY((struct devfs_entry *)inode->u.generic_ip);
+	return inode->u.generic_ip;
+}				/*  End Function get_devfs_entry_from_vfs_inode  */
+
+/**
+ *	free_dentry - Free the dentry for a device entry and invalidate inode.
+ *	@de: The entry.
+ *
+ *	This must only be called after the entry has been unhooked from its
+ *	 parent directory.
+ */
+
+static void free_dentry(struct devfs_entry *de)
+{
+	struct dentry *dentry = de->inode.dentry;
+
+	if (!dentry)
+		return;
+	spin_lock(&dcache_lock);
+	dget_locked(dentry);
+	spin_unlock(&dcache_lock);
+	/*  Forcefully remove the inode  */
+	if (dentry->d_inode != NULL)
+		dentry->d_inode->i_nlink = 0;
+	d_drop(dentry);
+	dput(dentry);
+}				/*  End Function free_dentry  */
+
+/**
+ *	is_devfsd_or_child - Test if the current process is devfsd or one of its children.
+ *	@fs_info: The filesystem information.
+ *
+ *	Returns %TRUE if devfsd or child, else %FALSE.
+ */
+
+static int is_devfsd_or_child(struct fs_info *fs_info)
+{
+	struct task_struct *p = current;
+
+	if (p == fs_info->devfsd_task)
+		return (TRUE);
+	if (process_group(p) == fs_info->devfsd_pgrp)
+		return (TRUE);
+	read_lock(&tasklist_lock);
+	for (; p != &init_task; p = p->real_parent) {
+		if (p == fs_info->devfsd_task) {
+			read_unlock(&tasklist_lock);
+			return (TRUE);
+		}
+	}
+	read_unlock(&tasklist_lock);
+	return (FALSE);
+}				/*  End Function is_devfsd_or_child  */
+
+/**
+ *	devfsd_queue_empty - Test if devfsd has work pending in its event queue.
+ *	@fs_info: The filesystem information.
+ *
+ *	Returns %TRUE if the queue is empty, else %FALSE.
+ */
+
+static inline int devfsd_queue_empty(struct fs_info *fs_info)
+{
+	return (fs_info->devfsd_last_event) ? FALSE : TRUE;
+}				/*  End Function devfsd_queue_empty  */
+
+/**
+ *	wait_for_devfsd_finished - Wait for devfsd to finish processing its event queue.
+ *	@fs_info: The filesystem information.
+ *
+ *	Returns %TRUE if no more waiting will be required, else %FALSE.
+ */
+
+static int wait_for_devfsd_finished(struct fs_info *fs_info)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	if (fs_info->devfsd_task == NULL)
+		return (TRUE);
+	if (devfsd_queue_empty(fs_info) && fs_info->devfsd_sleeping)
+		return TRUE;
+	if (is_devfsd_or_child(fs_info))
+		return (FALSE);
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	add_wait_queue(&fs_info->revalidate_wait_queue, &wait);
+	if (!devfsd_queue_empty(fs_info) || !fs_info->devfsd_sleeping)
+		if (fs_info->devfsd_task)
+			schedule();
+	remove_wait_queue(&fs_info->revalidate_wait_queue, &wait);
+	__set_current_state(TASK_RUNNING);
+	return (TRUE);
+}				/*  End Function wait_for_devfsd_finished  */
+
+/**
+ *	devfsd_notify_de - Notify the devfsd daemon of a change.
+ *	@de: The devfs entry that has changed. This and all parent entries will
+ *            have their reference counts incremented if the event was queued.
+ *	@type: The type of change.
+ *	@mode: The mode of the entry.
+ *	@uid: The user ID.
+ *	@gid: The group ID.
+ *	@fs_info: The filesystem info.
+ *
+ *	Returns %TRUE if an event was queued and devfsd woken up, else %FALSE.
+ */
+
+static int devfsd_notify_de(struct devfs_entry *de,
+			    unsigned short type, umode_t mode,
+			    uid_t uid, gid_t gid, struct fs_info *fs_info)
+{
+	struct devfsd_buf_entry *entry;
+	struct devfs_entry *curr;
+
+	if (!(fs_info->devfsd_event_mask & (1 << type)))
+		return (FALSE);
+	if ((entry = kmem_cache_alloc(devfsd_buf_cache, SLAB_KERNEL)) == NULL) {
+		atomic_inc(&fs_info->devfsd_overrun_count);
+		return (FALSE);
+	}
+	for (curr = de; curr != NULL; curr = curr->parent)
+		devfs_get(curr);
+	entry->de = de;
+	entry->type = type;
+	entry->mode = mode;
+	entry->uid = uid;
+	entry->gid = gid;
+	entry->next = NULL;
+	spin_lock(&fs_info->devfsd_buffer_lock);
+	if (!fs_info->devfsd_first_event)
+		fs_info->devfsd_first_event = entry;
+	if (fs_info->devfsd_last_event)
+		fs_info->devfsd_last_event->next = entry;
+	fs_info->devfsd_last_event = entry;
+	spin_unlock(&fs_info->devfsd_buffer_lock);
+	wake_up_interruptible(&fs_info->devfsd_wait_queue);
+	return (TRUE);
+}				/*  End Function devfsd_notify_de  */
+
+/**
+ *	devfsd_notify - Notify the devfsd daemon of a change.
+ *	@de: The devfs entry that has changed.
+ *	@type: The type of change event.
+ *	@wait: If TRUE, the function waits for the daemon to finish processing
+ *		the event.
+ */
+
+static void devfsd_notify(struct devfs_entry *de, unsigned short type)
+{
+	devfsd_notify_de(de, type, de->mode, current->euid,
+			 current->egid, &fs_info);
+}
+
+static int devfs_mk_dev(dev_t dev, umode_t mode, const char *fmt, va_list args)
+{
+	struct devfs_entry *dir = NULL, *de;
+	char buf[64];
+	int error, n;
+
+	n = vsnprintf(buf, sizeof(buf), fmt, args);
+	if (n >= sizeof(buf) || !buf[0]) {
+		printk(KERN_WARNING "%s: invalid format string %s\n",
+		       __FUNCTION__, fmt);
+		return -EINVAL;
+	}
+
+	de = _devfs_prepare_leaf(&dir, buf, mode);
+	if (!de) {
+		printk(KERN_WARNING "%s: could not prepare leaf for %s\n",
+		       __FUNCTION__, buf);
+		return -ENOMEM;	/* could be more accurate... */
+	}
+
+	de->u.dev = dev;
+
+	error = _devfs_append_entry(dir, de, NULL);
+	if (error) {
+		printk(KERN_WARNING "%s: could not append to parent for %s\n",
+		       __FUNCTION__, buf);
+		goto out;
+	}
+
+	devfsd_notify(de, DEVFSD_NOTIFY_REGISTERED);
+      out:
+	devfs_put(dir);
+	return error;
+}
+
+int devfs_mk_bdev(dev_t dev, umode_t mode, const char *fmt, ...)
+{
+	va_list args;
+
+	if (!S_ISBLK(mode)) {
+		printk(KERN_WARNING "%s: invalide mode (%u) for %s\n",
+		       __FUNCTION__, mode, fmt);
+		return -EINVAL;
+	}
+
+	va_start(args, fmt);
+	return devfs_mk_dev(dev, mode, fmt, args);
+}
+
+EXPORT_SYMBOL(devfs_mk_bdev);
+
+int devfs_mk_cdev(dev_t dev, umode_t mode, const char *fmt, ...)
+{
+	va_list args;
+
+	if (!S_ISCHR(mode)) {
+		printk(KERN_WARNING "%s: invalide mode (%u) for %s\n",
+		       __FUNCTION__, mode, fmt);
+		return -EINVAL;
+	}
+
+	va_start(args, fmt);
+	return devfs_mk_dev(dev, mode, fmt, args);
+}
+
+EXPORT_SYMBOL(devfs_mk_cdev);
+
+/**
+ *	_devfs_unhook - Unhook a device entry from its parents list
+ *	@de: The entry to unhook.
+ *
+ *	Returns %TRUE if the entry was unhooked, else %FALSE if it was
+ *		previously unhooked.
+ *	The caller must have a write lock on the parent directory.
+ */
+
+static int _devfs_unhook(struct devfs_entry *de)
+{
+	struct devfs_entry *parent;
+
+	if (!de || (de->prev == de))
+		return FALSE;
+	parent = de->parent;
+	if (de->prev == NULL)
+		parent->u.dir.first = de->next;
+	else
+		de->prev->next = de->next;
+	if (de->next == NULL)
+		parent->u.dir.last = de->prev;
+	else
+		de->next->prev = de->prev;
+	de->prev = de;		/*  Indicate we're unhooked                      */
+	de->next = NULL;	/*  Force early termination for <devfs_readdir>  */
+	return TRUE;
+}				/*  End Function _devfs_unhook  */
+
+/**
+ *	_devfs_unregister - Unregister a device entry from its parent.
+ *	@dir: The parent directory.
+ *	@de: The entry to unregister.
+ *
+ *	The caller must have a write lock on the parent directory, which is
+ *	unlocked by this function.
+ */
+
+static void _devfs_unregister(struct devfs_entry *dir, struct devfs_entry *de)
+{
+	int unhooked = _devfs_unhook(de);
+
+	write_unlock(&dir->u.dir.lock);
+	if (!unhooked)
+		return;
+	devfs_get(dir);
+	devfsd_notify(de, DEVFSD_NOTIFY_UNREGISTERED);
+	free_dentry(de);
+	devfs_put(dir);
+	if (!S_ISDIR(de->mode))
+		return;
+	while (TRUE) {		/*  Recursively unregister: this is a stack chomper  */
+		struct devfs_entry *child;
+
+		write_lock(&de->u.dir.lock);
+		de->u.dir.no_more_additions = TRUE;
+		child = de->u.dir.first;
+		VERIFY_ENTRY(child);
+		_devfs_unregister(de, child);
+		if (!child)
+			break;
+		DPRINTK(DEBUG_UNREGISTER, "(%s): child: %p  refcount: %d\n",
+			child->name, child, atomic_read(&child->refcount));
+		devfs_put(child);
+	}
+}				/*  End Function _devfs_unregister  */
+
+static int devfs_do_symlink(devfs_handle_t dir, const char *name,
+			    const char *link, devfs_handle_t * handle)
+{
+	int err;
+	unsigned int linklength;
+	char *newlink;
+	struct devfs_entry *de;
+
+	if (handle != NULL)
+		*handle = NULL;
+	if (name == NULL) {
+		PRINTK("(): NULL name pointer\n");
+		return -EINVAL;
+	}
+	if (link == NULL) {
+		PRINTK("(%s): NULL link pointer\n", name);
+		return -EINVAL;
+	}
+	linklength = strlen(link);
+	if ((newlink = kmalloc(linklength + 1, GFP_KERNEL)) == NULL)
+		return -ENOMEM;
+	memcpy(newlink, link, linklength);
+	newlink[linklength] = '\0';
+	if ((de = _devfs_prepare_leaf(&dir, name, S_IFLNK | S_IRUGO | S_IXUGO))
+	    == NULL) {
+		PRINTK("(%s): could not prepare leaf\n", name);
+		kfree(newlink);
+		return -ENOTDIR;
+	}
+	de->info = NULL;
+	de->u.symlink.linkname = newlink;
+	de->u.symlink.length = linklength;
+	if ((err = _devfs_append_entry(dir, de, NULL)) != 0) {
+		PRINTK("(%s): could not append to parent, err: %d\n", name,
+		       err);
+		devfs_put(dir);
+		return err;
+	}
+	devfs_put(dir);
+#ifdef CONFIG_DEVFS_DEBUG
+	spin_lock(&stat_lock);
+	stat_num_bytes += linklength + 1;
+	spin_unlock(&stat_lock);
+#endif
+	if (handle != NULL)
+		*handle = de;
+	return 0;
+}				/*  End Function devfs_do_symlink  */
+
+/**
+ *	devfs_mk_symlink Create a symbolic link in the devfs namespace.
+ *	@from: The name of the entry.
+ *	@to: Name of the destination
+ *
+ *	Returns 0 on success, else a negative error code is returned.
+ */
+
+int devfs_mk_symlink(const char *from, const char *to)
+{
+	devfs_handle_t de;
+	int err;
+
+	err = devfs_do_symlink(NULL, from, to, &de);
+	if (!err) {
+		de->vfs = TRUE;
+		devfsd_notify(de, DEVFSD_NOTIFY_REGISTERED);
+	}
+
+	return err;
+}
+
+/**
+ *	devfs_mk_dir - Create a directory in the devfs namespace.
+ *		new name is relative to the root of the devfs.
+ *	@fmt: The name of the entry.
+ *
+ *	Use of this function is optional. The devfs_register() function
+ *	will automatically create intermediate directories as needed. This function
+ *	is provided for efficiency reasons, as it provides a handle to a directory.
+ *	On failure %NULL is returned.
+ */
+
+int devfs_mk_dir(const char *fmt, ...)
+{
+	struct devfs_entry *dir = NULL, *de = NULL, *old;
+	char buf[64];
+	va_list args;
+	int error, n;
+
+	va_start(args, fmt);
+	n = vsnprintf(buf, 64, fmt, args);
+	if (n >= 64 || !buf[0]) {
+		printk(KERN_WARNING "%s: invalid argument.", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	de = _devfs_prepare_leaf(&dir, buf, MODE_DIR);
+	if (!de) {
+		PRINTK("(%s): could not prepare leaf\n", buf);
+		return -EINVAL;
+	}
+
+	error = _devfs_append_entry(dir, de, &old);
+	if (error == -EEXIST && S_ISDIR(old->mode)) {
+		/*
+		 * devfs_mk_dir() of an already-existing directory will
+		 * return success.
+		 */
+		error = 0;
+		goto out_put;
+	} else if (error) {
+		PRINTK("(%s): could not append to dir: %p \"%s\"\n",
+		       buf, dir, dir->name);
+		devfs_put(old);
+		goto out_put;
+	}
+
+	devfsd_notify(de, DEVFSD_NOTIFY_REGISTERED);
+
+      out_put:
+	devfs_put(dir);
+	return error;
+}
+
+void devfs_remove(const char *fmt, ...)
+{
+	char buf[64];
+	va_list args;
+	int n;
+
+	va_start(args, fmt);
+	n = vsnprintf(buf, sizeof(buf), fmt, args);
+	if (n < sizeof(buf) && buf[0]) {
+		devfs_handle_t de = _devfs_find_entry(NULL, buf, 0);
+
+		if (!de) {
+			printk(KERN_ERR "%s: %s not found, cannot remove\n",
+			       __FUNCTION__, buf);
+			dump_stack();
+			return;
+		}
+
+		write_lock(&de->parent->u.dir.lock);
+		_devfs_unregister(de->parent, de);
+		devfs_put(de);
+		devfs_put(de);
+	}
+}
+
+/**
+ *	devfs_generate_path - Generate a pathname for an entry, relative to the devfs root.
+ *	@de: The devfs entry.
+ *	@path: The buffer to write the pathname to. The pathname and '\0'
+ *		terminator will be written at the end of the buffer.
+ *	@buflen: The length of the buffer.
+ *
+ *	Returns the offset in the buffer where the pathname starts on success,
+ *	else a negative error code.
+ */
+
+static int devfs_generate_path(devfs_handle_t de, char *path, int buflen)
+{
+	int pos;
+#define NAMEOF(de) ( (de)->mode ? (de)->name : (de)->u.name )
+
+	if (de == NULL)
+		return -EINVAL;
+	VERIFY_ENTRY(de);
+	if (de->namelen >= buflen)
+		return -ENAMETOOLONG;	/*  Must be first       */
+	path[buflen - 1] = '\0';
+	if (de->parent == NULL)
+		return buflen - 1;	/*  Don't prepend root  */
+	pos = buflen - de->namelen - 1;
+	memcpy(path + pos, NAMEOF(de), de->namelen);
+	for (de = de->parent; de->parent != NULL; de = de->parent) {
+		if (pos - de->namelen - 1 < 0)
+			return -ENAMETOOLONG;
+		path[--pos] = '/';
+		pos -= de->namelen;
+		memcpy(path + pos, NAMEOF(de), de->namelen);
+	}
+	return pos;
+}				/*  End Function devfs_generate_path  */
+
+/**
+ *	devfs_setup - Process kernel boot options.
+ *	@str: The boot options after the "devfs=".
+ */
+
+static int __init devfs_setup(char *str)
+{
+	static struct {
+		char *name;
+		unsigned int mask;
+		unsigned int *opt;
+	} devfs_options_tab[] __initdata = {
+#ifdef CONFIG_DEVFS_DEBUG
+		{
+		"dall", DEBUG_ALL, &devfs_debug_init}, {
+		"dmod", DEBUG_MODULE_LOAD, &devfs_debug_init}, {
+		"dreg", DEBUG_REGISTER, &devfs_debug_init}, {
+		"dunreg", DEBUG_UNREGISTER, &devfs_debug_init}, {
+		"dfree", DEBUG_FREE, &devfs_debug_init}, {
+		"diget", DEBUG_I_GET, &devfs_debug_init}, {
+		"dchange", DEBUG_SET_FLAGS, &devfs_debug_init}, {
+		"dsread", DEBUG_S_READ, &devfs_debug_init}, {
+		"dichange", DEBUG_I_CHANGE, &devfs_debug_init}, {
+		"dimknod", DEBUG_I_MKNOD, &devfs_debug_init}, {
+		"dilookup", DEBUG_I_LOOKUP, &devfs_debug_init}, {
+		"diunlink", DEBUG_I_UNLINK, &devfs_debug_init},
+#endif				/*  CONFIG_DEVFS_DEBUG  */
+		{
+		"mount", OPTION_MOUNT, &boot_options}, {
+		NULL, 0, NULL}
+	};
+
+	while ((*str != '\0') && !isspace(*str)) {
+		int i, found = 0, invert = 0;
+
+		if (strncmp(str, "no", 2) == 0) {
+			invert = 1;
+			str += 2;
+		}
+		for (i = 0; devfs_options_tab[i].name != NULL; i++) {
+			int len = strlen(devfs_options_tab[i].name);
+
+			if (strncmp(str, devfs_options_tab[i].name, len) == 0) {
+				if (invert)
+					*devfs_options_tab[i].opt &=
+					    ~devfs_options_tab[i].mask;
+				else
+					*devfs_options_tab[i].opt |=
+					    devfs_options_tab[i].mask;
+				str += len;
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			return 0;	/*  No match         */
+		if (*str != ',')
+			return 0;	/*  No more options  */
+		++str;
+	}
+	return 1;
+}				/*  End Function devfs_setup  */
+
+__setup("devfs=", devfs_setup);
+
+EXPORT_SYMBOL(devfs_mk_dir);
+EXPORT_SYMBOL(devfs_remove);
+
+/**
+ *	try_modload - Notify devfsd of an inode lookup by a non-devfsd process.
+ *	@parent: The parent devfs entry.
+ *	@fs_info: The filesystem info.
+ *	@name: The device name.
+ *	@namelen: The number of characters in @name.
+ *	@buf: A working area that will be used. This must not go out of scope
+ *            until devfsd is idle again.
+ *
+ *	Returns 0 on success (event was queued), else a negative error code.
+ */
+
+static int try_modload(struct devfs_entry *parent, struct fs_info *fs_info,
+		       const char *name, unsigned namelen,
+		       struct devfs_entry *buf)
+{
+	if (!(fs_info->devfsd_event_mask & (1 << DEVFSD_NOTIFY_LOOKUP)))
+		return -ENOENT;
+	if (is_devfsd_or_child(fs_info))
+		return -ENOENT;
+	memset(buf, 0, sizeof *buf);
+	atomic_set(&buf->refcount, 1);
+	buf->parent = parent;
+	buf->namelen = namelen;
+	buf->u.name = name;
+	WRITE_ENTRY_MAGIC(buf, MAGIC_VALUE);
+	if (!devfsd_notify_de(buf, DEVFSD_NOTIFY_LOOKUP, 0,
+			      current->euid, current->egid, fs_info))
+		return -ENOENT;
+	/*  Possible success: event has been queued  */
+	return 0;
+}				/*  End Function try_modload  */
+
+/*  Superblock operations follow  */
+
+static struct inode_operations devfs_iops;
+static struct inode_operations devfs_dir_iops;
+static struct file_operations devfs_fops;
+static struct file_operations devfs_dir_fops;
+static struct inode_operations devfs_symlink_iops;
+
+static int devfs_notify_change(struct dentry *dentry, struct iattr *iattr)
+{
+	int retval;
+	struct devfs_entry *de;
+	struct inode *inode = dentry->d_inode;
+	struct fs_info *fs_info = inode->i_sb->s_fs_info;
+
+	de = get_devfs_entry_from_vfs_inode(inode);
+	if (de == NULL)
+		return -ENODEV;
+	retval = inode_change_ok(inode, iattr);
+	if (retval != 0)
+		return retval;
+	retval = inode_setattr(inode, iattr);
+	if (retval != 0)
+		return retval;
+	DPRINTK(DEBUG_I_CHANGE, "(%d): VFS inode: %p  devfs_entry: %p\n",
+		(int)inode->i_ino, inode, de);
+	DPRINTK(DEBUG_I_CHANGE, "():   mode: 0%o  uid: %d  gid: %d\n",
+		(int)inode->i_mode, (int)inode->i_uid, (int)inode->i_gid);
+	/*  Inode is not on hash chains, thus must save permissions here rather
+	   than in a write_inode() method  */
+	de->mode = inode->i_mode;
+	de->inode.uid = inode->i_uid;
+	de->inode.gid = inode->i_gid;
+	de->inode.atime = inode->i_atime;
+	de->inode.mtime = inode->i_mtime;
+	de->inode.ctime = inode->i_ctime;
+	if ((iattr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) &&
+	    !is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_CHANGE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	return 0;
+}				/*  End Function devfs_notify_change  */
+
+static struct super_operations devfs_sops = {
+	.drop_inode = generic_delete_inode,
+	.statfs = simple_statfs,
+};
+
+/**
+ *	_devfs_get_vfs_inode - Get a VFS inode.
+ *	@sb: The super block.
+ *	@de: The devfs inode.
+ *	@dentry: The dentry to register with the devfs inode.
+ *
+ *	Returns the inode on success, else %NULL. An implicit devfs_get() is
+ *       performed if the inode is created.
+ */
+
+static struct inode *_devfs_get_vfs_inode(struct super_block *sb,
+					  struct devfs_entry *de,
+					  struct dentry *dentry)
+{
+	struct inode *inode;
+
+	if (de->prev == de)
+		return NULL;	/*  Quick check to see if unhooked  */
+	if ((inode = new_inode(sb)) == NULL) {
+		PRINTK("(%s): new_inode() failed, de: %p\n", de->name, de);
+		return NULL;
+	}
+	if (de->parent) {
+		read_lock(&de->parent->u.dir.lock);
+		if (de->prev != de)
+			de->inode.dentry = dentry;	/*      Not unhooked  */
+		read_unlock(&de->parent->u.dir.lock);
+	} else
+		de->inode.dentry = dentry;	/*  Root: no locking needed  */
+	if (de->inode.dentry != dentry) {	/*  Must have been unhooked  */
+		iput(inode);
+		return NULL;
+	}
+	/* FIXME where is devfs_put? */
+	inode->u.generic_ip = devfs_get(de);
+	inode->i_ino = de->inode.ino;
+	DPRINTK(DEBUG_I_GET, "(%d): VFS inode: %p  devfs_entry: %p\n",
+		(int)inode->i_ino, inode, de);
+	inode->i_blocks = 0;
+	inode->i_blksize = FAKE_BLOCK_SIZE;
+	inode->i_op = &devfs_iops;
+	inode->i_mode = de->mode;
+	if (S_ISDIR(de->mode)) {
+		inode->i_op = &devfs_dir_iops;
+		inode->i_fop = &devfs_dir_fops;
+	} else if (S_ISLNK(de->mode)) {
+		inode->i_op = &devfs_symlink_iops;
+		inode->i_size = de->u.symlink.length;
+	} else if (S_ISCHR(de->mode) || S_ISBLK(de->mode)) {
+		init_special_inode(inode, de->mode, de->u.dev);
+	} else if (S_ISFIFO(de->mode) || S_ISSOCK(de->mode)) {
+		init_special_inode(inode, de->mode, 0);
+	} else {
+		PRINTK("(%s): unknown mode %o de: %p\n",
+		       de->name, de->mode, de);
+		iput(inode);
+		devfs_put(de);
+		return NULL;
+	}
+
+	inode->i_uid = de->inode.uid;
+	inode->i_gid = de->inode.gid;
+	inode->i_atime = de->inode.atime;
+	inode->i_mtime = de->inode.mtime;
+	inode->i_ctime = de->inode.ctime;
+	DPRINTK(DEBUG_I_GET, "():   mode: 0%o  uid: %d  gid: %d\n",
+		(int)inode->i_mode, (int)inode->i_uid, (int)inode->i_gid);
+	return inode;
+}				/*  End Function _devfs_get_vfs_inode  */
+
+/*  File operations for device entries follow  */
+
+static int devfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+	int err, count;
+	int stored = 0;
+	struct fs_info *fs_info;
+	struct devfs_entry *parent, *de, *next = NULL;
+	struct inode *inode = file->f_dentry->d_inode;
+
+	fs_info = inode->i_sb->s_fs_info;
+	parent = get_devfs_entry_from_vfs_inode(file->f_dentry->d_inode);
+	if ((long)file->f_pos < 0)
+		return -EINVAL;
+	DPRINTK(DEBUG_F_READDIR, "(%s): fs_info: %p  pos: %ld\n",
+		parent->name, fs_info, (long)file->f_pos);
+	switch ((long)file->f_pos) {
+	case 0:
+		err = (*filldir) (dirent, "..", 2, file->f_pos,
+				  parent_ino(file->f_dentry), DT_DIR);
+		if (err == -EINVAL)
+			break;
+		if (err < 0)
+			return err;
+		file->f_pos++;
+		++stored;
+		/*  Fall through  */
+	case 1:
+		err =
+		    (*filldir) (dirent, ".", 1, file->f_pos, inode->i_ino,
+				DT_DIR);
+		if (err == -EINVAL)
+			break;
+		if (err < 0)
+			return err;
+		file->f_pos++;
+		++stored;
+		/*  Fall through  */
+	default:
+		/*  Skip entries  */
+		count = file->f_pos - 2;
+		read_lock(&parent->u.dir.lock);
+		for (de = parent->u.dir.first; de && (count > 0); de = de->next)
+			--count;
+		devfs_get(de);
+		read_unlock(&parent->u.dir.lock);
+		/*  Now add all remaining entries  */
+		while (de) {
+			err = (*filldir) (dirent, de->name, de->namelen,
+					  file->f_pos, de->inode.ino,
+					  de->mode >> 12);
+			if (err < 0)
+				devfs_put(de);
+			else {
+				file->f_pos++;
+				++stored;
+			}
+			if (err == -EINVAL)
+				break;
+			if (err < 0)
+				return err;
+			read_lock(&parent->u.dir.lock);
+			next = devfs_get(de->next);
+			read_unlock(&parent->u.dir.lock);
+			devfs_put(de);
+			de = next;
+		}
+		break;
+	}
+	return stored;
+}				/*  End Function devfs_readdir  */
+
+/* Open devfs specific special files */
+static int devfs_open(struct inode *inode, struct file *file)
+{
+	int err;
+	int minor = MINOR(inode->i_rdev);
+	struct file_operations *old_fops, *new_fops;
+
+	switch (minor) {
+	case 0:		/* /dev/.devfsd */
+		new_fops = fops_get(&devfsd_fops);
+		break;
+#ifdef CONFIG_DEVFS_DEBUG
+	case 1:		/* /dev/.stat */
+		new_fops = fops_get(&stat_fops);
+		break;
+#endif
+	default:
+		return -ENODEV;
+	}
+
+	if (new_fops == NULL)
+		return -ENODEV;
+	old_fops = file->f_op;
+	file->f_op = new_fops;
+	err = new_fops->open ? new_fops->open(inode, file) : 0;
+	if (err) {
+		file->f_op = old_fops;
+		fops_put(new_fops);
+	} else
+		fops_put(old_fops);
+	return err;
+}				/*  End Function devfs_open  */
+
+static struct file_operations devfs_fops = {
+	.open = devfs_open,
+};
+
+static struct file_operations devfs_dir_fops = {
+	.read = generic_read_dir,
+	.readdir = devfs_readdir,
+};
+
+/*  Dentry operations for device entries follow  */
+
+/**
+ *	devfs_d_release - Callback for when a dentry is freed.
+ *	@dentry: The dentry.
+ */
+
+static void devfs_d_release(struct dentry *dentry)
+{
+	DPRINTK(DEBUG_D_RELEASE, "(%p): inode: %p\n", dentry, dentry->d_inode);
+}				/*  End Function devfs_d_release  */
+
+/**
+ *	devfs_d_iput - Callback for when a dentry loses its inode.
+ *	@dentry: The dentry.
+ *	@inode:	The inode.
+ */
+
+static void devfs_d_iput(struct dentry *dentry, struct inode *inode)
+{
+	struct devfs_entry *de;
+
+	de = get_devfs_entry_from_vfs_inode(inode);
+	DPRINTK(DEBUG_D_IPUT,
+		"(%s): dentry: %p inode: %p de: %p de->dentry: %p\n", de->name,
+		dentry, inode, de, de->inode.dentry);
+	if (de->inode.dentry && (de->inode.dentry != dentry))
+		OOPS("(%s): de: %p dentry: %p de->dentry: %p\n",
+		     de->name, de, dentry, de->inode.dentry);
+	de->inode.dentry = NULL;
+	iput(inode);
+	devfs_put(de);
+}				/*  End Function devfs_d_iput  */
+
+static int devfs_d_delete(struct dentry *dentry);
+
+static struct dentry_operations devfs_dops = {
+	.d_delete = devfs_d_delete,
+	.d_release = devfs_d_release,
+	.d_iput = devfs_d_iput,
+};
+
+static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *);
+
+static struct dentry_operations devfs_wait_dops = {
+	.d_delete = devfs_d_delete,
+	.d_release = devfs_d_release,
+	.d_iput = devfs_d_iput,
+	.d_revalidate = devfs_d_revalidate_wait,
+};
+
+/**
+ *	devfs_d_delete - Callback for when all files for a dentry are closed.
+ *	@dentry: The dentry.
+ */
+
+static int devfs_d_delete(struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (dentry->d_op == &devfs_wait_dops)
+		dentry->d_op = &devfs_dops;
+	/*  Unhash dentry if negative (has no inode)  */
+	if (inode == NULL) {
+		DPRINTK(DEBUG_D_DELETE, "(%p): dropping negative dentry\n",
+			dentry);
+		return 1;
+	}
+	return 0;
+}				/*  End Function devfs_d_delete  */
+
+struct devfs_lookup_struct {
+	devfs_handle_t de;
+	wait_queue_head_t wait_queue;
+};
+
+/* XXX: this doesn't handle the case where we got a negative dentry
+        but a devfs entry has been registered in the meanwhile */
+static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	devfs_handle_t parent = get_devfs_entry_from_vfs_inode(dir);
+	struct devfs_lookup_struct *lookup_info = dentry->d_fsdata;
+	DECLARE_WAITQUEUE(wait, current);
+	int need_lock;
+
+	/*
+	 * FIXME HACK
+	 *
+	 * make sure that
+	 *   d_instantiate always runs under lock
+	 *   we release i_sem lock before going to sleep
+	 *
+	 * unfortunately sometimes d_revalidate is called with
+	 * and sometimes without i_sem lock held. The following checks
+	 * attempt to deduce when we need to add (and drop resp.) lock
+	 * here. This relies on current (2.6.2) calling coventions:
+	 *
+	 *   lookup_hash is always run under i_sem and is passing NULL
+	 *   as nd
+	 *
+	 *   open(...,O_CREATE,...) calls _lookup_hash under i_sem
+	 *   and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
+	 *
+	 *   all other invocations of ->d_revalidate seem to happen
+	 *   outside of i_sem
+	 */
+	need_lock = nd &&
+	    (!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
+
+	if (need_lock)
+		down(&dir->i_sem);
+
+	if (is_devfsd_or_child(fs_info)) {
+		devfs_handle_t de = lookup_info->de;
+		struct inode *inode;
+
+		DPRINTK(DEBUG_I_LOOKUP,
+			"(%s): dentry: %p inode: %p de: %p by: \"%s\"\n",
+			dentry->d_name.name, dentry, dentry->d_inode, de,
+			current->comm);
+		if (dentry->d_inode)
+			goto out;
+		if (de == NULL) {
+			read_lock(&parent->u.dir.lock);
+			de = _devfs_search_dir(parent, dentry->d_name.name,
+					       dentry->d_name.len);
+			read_unlock(&parent->u.dir.lock);
+			if (de == NULL)
+				goto out;
+			lookup_info->de = de;
+		}
+		/*  Create an inode, now that the driver information is available  */
+		inode = _devfs_get_vfs_inode(dir->i_sb, de, dentry);
+		if (!inode)
+			goto out;
+		DPRINTK(DEBUG_I_LOOKUP,
+			"(%s): new VFS inode(%u): %p de: %p by: \"%s\"\n",
+			de->name, de->inode.ino, inode, de, current->comm);
+		d_instantiate(dentry, inode);
+		goto out;
+	}
+	if (lookup_info == NULL)
+		goto out;	/*  Early termination  */
+	read_lock(&parent->u.dir.lock);
+	if (dentry->d_fsdata) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		add_wait_queue(&lookup_info->wait_queue, &wait);
+		read_unlock(&parent->u.dir.lock);
+		/* at this point it is always (hopefully) locked */
+		up(&dir->i_sem);
+		schedule();
+		down(&dir->i_sem);
+		/*
+		 * This does not need nor should remove wait from wait_queue.
+		 * Wait queue head is never reused - nothing is ever added to it
+		 * after all waiters have been waked up and head itself disappears
+		 * very soon after it. Moreover it is local variable on stack that
+		 * is likely to have already disappeared so any reference to it
+		 * at this point is buggy.
+		 */
+
+	} else
+		read_unlock(&parent->u.dir.lock);
+
+      out:
+	if (need_lock)
+		up(&dir->i_sem);
+	return 1;
+}				/*  End Function devfs_d_revalidate_wait  */
+
+/*  Inode operations for device entries follow  */
+
+static struct dentry *devfs_lookup(struct inode *dir, struct dentry *dentry,
+				   struct nameidata *nd)
+{
+	struct devfs_entry tmp;	/*  Must stay in scope until devfsd idle again  */
+	struct devfs_lookup_struct lookup_info;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	struct devfs_entry *parent, *de;
+	struct inode *inode;
+	struct dentry *retval = NULL;
+
+	/*  Set up the dentry operations before anything else, to ensure cleaning
+	   up on any error  */
+	dentry->d_op = &devfs_dops;
+	/*  First try to get the devfs entry for this directory  */
+	parent = get_devfs_entry_from_vfs_inode(dir);
+	DPRINTK(DEBUG_I_LOOKUP, "(%s): dentry: %p parent: %p by: \"%s\"\n",
+		dentry->d_name.name, dentry, parent, current->comm);
+	if (parent == NULL)
+		return ERR_PTR(-ENOENT);
+	read_lock(&parent->u.dir.lock);
+	de = _devfs_search_dir(parent, dentry->d_name.name, dentry->d_name.len);
+	read_unlock(&parent->u.dir.lock);
+	lookup_info.de = de;
+	init_waitqueue_head(&lookup_info.wait_queue);
+	dentry->d_fsdata = &lookup_info;
+	if (de == NULL) {	/*  Try with devfsd. For any kind of failure, leave a negative dentry
+				   so someone else can deal with it (in the case where the sysadmin
+				   does a mknod()). It's important to do this before hashing the
+				   dentry, so that the devfsd queue is filled before revalidates
+				   can start  */
+		if (try_modload(parent, fs_info, dentry->d_name.name, dentry->d_name.len, &tmp) < 0) {	/*  Lookup event was not queued to devfsd  */
+			d_add(dentry, NULL);
+			return NULL;
+		}
+	}
+	dentry->d_op = &devfs_wait_dops;
+	d_add(dentry, NULL);	/*  Open the floodgates  */
+	/*  Unlock directory semaphore, which will release any waiters. They
+	   will get the hashed dentry, and may be forced to wait for
+	   revalidation  */
+	up(&dir->i_sem);
+	wait_for_devfsd_finished(fs_info);	/*  If I'm not devfsd, must wait  */
+	down(&dir->i_sem);	/*  Grab it again because them's the rules  */
+	de = lookup_info.de;
+	/*  If someone else has been so kind as to make the inode, we go home
+	   early  */
+	if (dentry->d_inode)
+		goto out;
+	if (de == NULL) {
+		read_lock(&parent->u.dir.lock);
+		de = _devfs_search_dir(parent, dentry->d_name.name,
+				       dentry->d_name.len);
+		read_unlock(&parent->u.dir.lock);
+		if (de == NULL)
+			goto out;
+		/*  OK, there's an entry now, but no VFS inode yet  */
+	}
+	/*  Create an inode, now that the driver information is available  */
+	inode = _devfs_get_vfs_inode(dir->i_sb, de, dentry);
+	if (!inode) {
+		retval = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	DPRINTK(DEBUG_I_LOOKUP,
+		"(%s): new VFS inode(%u): %p de: %p by: \"%s\"\n", de->name,
+		de->inode.ino, inode, de, current->comm);
+	d_instantiate(dentry, inode);
+      out:
+	write_lock(&parent->u.dir.lock);
+	dentry->d_op = &devfs_dops;
+	dentry->d_fsdata = NULL;
+	wake_up(&lookup_info.wait_queue);
+	write_unlock(&parent->u.dir.lock);
+	devfs_put(de);
+	return retval;
+}				/*  End Function devfs_lookup  */
+
+static int devfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int unhooked;
+	struct devfs_entry *de;
+	struct inode *inode = dentry->d_inode;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+
+	de = get_devfs_entry_from_vfs_inode(inode);
+	DPRINTK(DEBUG_I_UNLINK, "(%s): de: %p\n", dentry->d_name.name, de);
+	if (de == NULL)
+		return -ENOENT;
+	if (!de->vfs)
+		return -EPERM;
+	write_lock(&de->parent->u.dir.lock);
+	unhooked = _devfs_unhook(de);
+	write_unlock(&de->parent->u.dir.lock);
+	if (!unhooked)
+		return -ENOENT;
+	if (!is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_DELETE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	free_dentry(de);
+	devfs_put(de);
+	return 0;
+}				/*  End Function devfs_unlink  */
+
+static int devfs_symlink(struct inode *dir, struct dentry *dentry,
+			 const char *symname)
+{
+	int err;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	struct devfs_entry *parent, *de;
+	struct inode *inode;
+
+	/*  First try to get the devfs entry for this directory  */
+	parent = get_devfs_entry_from_vfs_inode(dir);
+	if (parent == NULL)
+		return -ENOENT;
+	err = devfs_do_symlink(parent, dentry->d_name.name, symname, &de);
+	DPRINTK(DEBUG_DISABLED, "(%s): errcode from <devfs_do_symlink>: %d\n",
+		dentry->d_name.name, err);
+	if (err < 0)
+		return err;
+	de->vfs = TRUE;
+	de->inode.uid = current->euid;
+	de->inode.gid = current->egid;
+	de->inode.atime = CURRENT_TIME;
+	de->inode.mtime = CURRENT_TIME;
+	de->inode.ctime = CURRENT_TIME;
+	if ((inode = _devfs_get_vfs_inode(dir->i_sb, de, dentry)) == NULL)
+		return -ENOMEM;
+	DPRINTK(DEBUG_DISABLED, "(%s): new VFS inode(%u): %p  dentry: %p\n",
+		dentry->d_name.name, de->inode.ino, inode, dentry);
+	d_instantiate(dentry, inode);
+	if (!is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_CREATE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	return 0;
+}				/*  End Function devfs_symlink  */
+
+static int devfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	int err;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	struct devfs_entry *parent, *de;
+	struct inode *inode;
+
+	mode = (mode & ~S_IFMT) | S_IFDIR;	/*  VFS doesn't pass S_IFMT part  */
+	parent = get_devfs_entry_from_vfs_inode(dir);
+	if (parent == NULL)
+		return -ENOENT;
+	de = _devfs_alloc_entry(dentry->d_name.name, dentry->d_name.len, mode);
+	if (!de)
+		return -ENOMEM;
+	de->vfs = TRUE;
+	if ((err = _devfs_append_entry(parent, de, NULL)) != 0)
+		return err;
+	de->inode.uid = current->euid;
+	de->inode.gid = current->egid;
+	de->inode.atime = CURRENT_TIME;
+	de->inode.mtime = CURRENT_TIME;
+	de->inode.ctime = CURRENT_TIME;
+	if ((inode = _devfs_get_vfs_inode(dir->i_sb, de, dentry)) == NULL)
+		return -ENOMEM;
+	DPRINTK(DEBUG_DISABLED, "(%s): new VFS inode(%u): %p  dentry: %p\n",
+		dentry->d_name.name, de->inode.ino, inode, dentry);
+	d_instantiate(dentry, inode);
+	if (!is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_CREATE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	return 0;
+}				/*  End Function devfs_mkdir  */
+
+static int devfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int err = 0;
+	struct devfs_entry *de;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	struct inode *inode = dentry->d_inode;
+
+	if (dir->i_sb->s_fs_info != inode->i_sb->s_fs_info)
+		return -EINVAL;
+	de = get_devfs_entry_from_vfs_inode(inode);
+	if (de == NULL)
+		return -ENOENT;
+	if (!S_ISDIR(de->mode))
+		return -ENOTDIR;
+	if (!de->vfs)
+		return -EPERM;
+	/*  First ensure the directory is empty and will stay that way  */
+	write_lock(&de->u.dir.lock);
+	if (de->u.dir.first)
+		err = -ENOTEMPTY;
+	else
+		de->u.dir.no_more_additions = TRUE;
+	write_unlock(&de->u.dir.lock);
+	if (err)
+		return err;
+	/*  Now unhook the directory from its parent  */
+	write_lock(&de->parent->u.dir.lock);
+	if (!_devfs_unhook(de))
+		err = -ENOENT;
+	write_unlock(&de->parent->u.dir.lock);
+	if (err)
+		return err;
+	if (!is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_DELETE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	free_dentry(de);
+	devfs_put(de);
+	return 0;
+}				/*  End Function devfs_rmdir  */
+
+static int devfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+		       dev_t rdev)
+{
+	int err;
+	struct fs_info *fs_info = dir->i_sb->s_fs_info;
+	struct devfs_entry *parent, *de;
+	struct inode *inode;
+
+	DPRINTK(DEBUG_I_MKNOD, "(%s): mode: 0%o  dev: %u:%u\n",
+		dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
+	parent = get_devfs_entry_from_vfs_inode(dir);
+	if (parent == NULL)
+		return -ENOENT;
+	de = _devfs_alloc_entry(dentry->d_name.name, dentry->d_name.len, mode);
+	if (!de)
+		return -ENOMEM;
+	de->vfs = TRUE;
+	if (S_ISCHR(mode) || S_ISBLK(mode))
+		de->u.dev = rdev;
+	if ((err = _devfs_append_entry(parent, de, NULL)) != 0)
+		return err;
+	de->inode.uid = current->euid;
+	de->inode.gid = current->egid;
+	de->inode.atime = CURRENT_TIME;
+	de->inode.mtime = CURRENT_TIME;
+	de->inode.ctime = CURRENT_TIME;
+	if ((inode = _devfs_get_vfs_inode(dir->i_sb, de, dentry)) == NULL)
+		return -ENOMEM;
+	DPRINTK(DEBUG_I_MKNOD, ":   new VFS inode(%u): %p  dentry: %p\n",
+		de->inode.ino, inode, dentry);
+	d_instantiate(dentry, inode);
+	if (!is_devfsd_or_child(fs_info))
+		devfsd_notify_de(de, DEVFSD_NOTIFY_CREATE, inode->i_mode,
+				 inode->i_uid, inode->i_gid, fs_info);
+	return 0;
+}				/*  End Function devfs_mknod  */
+
+static int devfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct devfs_entry *p = get_devfs_entry_from_vfs_inode(dentry->d_inode);
+	nd_set_link(nd, p ? p->u.symlink.linkname : ERR_PTR(-ENODEV));
+	return 0;
+}				/*  End Function devfs_follow_link  */
+
+static struct inode_operations devfs_iops = {
+	.setattr = devfs_notify_change,
+};
+
+static struct inode_operations devfs_dir_iops = {
+	.lookup = devfs_lookup,
+	.unlink = devfs_unlink,
+	.symlink = devfs_symlink,
+	.mkdir = devfs_mkdir,
+	.rmdir = devfs_rmdir,
+	.mknod = devfs_mknod,
+	.setattr = devfs_notify_change,
+};
+
+static struct inode_operations devfs_symlink_iops = {
+	.readlink = generic_readlink,
+	.follow_link = devfs_follow_link,
+	.setattr = devfs_notify_change,
+};
+
+static int devfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct inode *root_inode = NULL;
+
+	if (_devfs_get_root_entry() == NULL)
+		goto out_no_root;
+	atomic_set(&fs_info.devfsd_overrun_count, 0);
+	init_waitqueue_head(&fs_info.devfsd_wait_queue);
+	init_waitqueue_head(&fs_info.revalidate_wait_queue);
+	fs_info.sb = sb;
+	sb->s_fs_info = &fs_info;
+	sb->s_blocksize = 1024;
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = DEVFS_SUPER_MAGIC;
+	sb->s_op = &devfs_sops;
+	sb->s_time_gran = 1;
+	if ((root_inode = _devfs_get_vfs_inode(sb, root_entry, NULL)) == NULL)
+		goto out_no_root;
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root)
+		goto out_no_root;
+	DPRINTK(DEBUG_S_READ, "(): made devfs ptr: %p\n", sb->s_fs_info);
+	return 0;
+
+      out_no_root:
+	PRINTK("(): get root inode failed\n");
+	if (root_inode)
+		iput(root_inode);
+	return -EINVAL;
+}				/*  End Function devfs_fill_super  */
+
+static struct super_block *devfs_get_sb(struct file_system_type *fs_type,
+					int flags, const char *dev_name,
+					void *data)
+{
+	return get_sb_single(fs_type, flags, data, devfs_fill_super);
+}
+
+static struct file_system_type devfs_fs_type = {
+	.name = DEVFS_NAME,
+	.get_sb = devfs_get_sb,
+	.kill_sb = kill_anon_super,
+};
+
+/*  File operations for devfsd follow  */
+
+static ssize_t devfsd_read(struct file *file, char __user *buf, size_t len,
+			   loff_t * ppos)
+{
+	int done = FALSE;
+	int ival;
+	loff_t pos, devname_offset, tlen, rpos;
+	devfs_handle_t de;
+	struct devfsd_buf_entry *entry;
+	struct fs_info *fs_info = file->f_dentry->d_inode->i_sb->s_fs_info;
+	struct devfsd_notify_struct *info = fs_info->devfsd_info;
+	DECLARE_WAITQUEUE(wait, current);
+
+	/*  Verify the task has grabbed the queue  */
+	if (fs_info->devfsd_task != current)
+		return -EPERM;
+	info->major = 0;
+	info->minor = 0;
+	/*  Block for a new entry  */
+	set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(&fs_info->devfsd_wait_queue, &wait);
+	while (devfsd_queue_empty(fs_info)) {
+		fs_info->devfsd_sleeping = TRUE;
+		wake_up(&fs_info->revalidate_wait_queue);
+		schedule();
+		fs_info->devfsd_sleeping = FALSE;
+		if (signal_pending(current)) {
+			remove_wait_queue(&fs_info->devfsd_wait_queue, &wait);
+			__set_current_state(TASK_RUNNING);
+			return -EINTR;
+		}
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&fs_info->devfsd_wait_queue, &wait);
+	__set_current_state(TASK_RUNNING);
+	/*  Now play with the data  */
+	ival = atomic_read(&fs_info->devfsd_overrun_count);
+	info->overrun_count = ival;
+	entry = fs_info->devfsd_first_event;
+	info->type = entry->type;
+	info->mode = entry->mode;
+	info->uid = entry->uid;
+	info->gid = entry->gid;
+	de = entry->de;
+	if (S_ISCHR(de->mode) || S_ISBLK(de->mode)) {
+		info->major = MAJOR(de->u.dev);
+		info->minor = MINOR(de->u.dev);
+	}
+	pos = devfs_generate_path(de, info->devname, DEVFS_PATHLEN);
+	if (pos < 0)
+		return pos;
+	info->namelen = DEVFS_PATHLEN - pos - 1;
+	if (info->mode == 0)
+		info->mode = de->mode;
+	devname_offset = info->devname - (char *)info;
+	rpos = *ppos;
+	if (rpos < devname_offset) {
+		/*  Copy parts of the header  */
+		tlen = devname_offset - rpos;
+		if (tlen > len)
+			tlen = len;
+		if (copy_to_user(buf, (char *)info + rpos, tlen)) {
+			return -EFAULT;
+		}
+		rpos += tlen;
+		buf += tlen;
+		len -= tlen;
+	}
+	if ((rpos >= devname_offset) && (len > 0)) {
+		/*  Copy the name  */
+		tlen = info->namelen + 1;
+		if (tlen > len)
+			tlen = len;
+		else
+			done = TRUE;
+		if (copy_to_user
+		    (buf, info->devname + pos + rpos - devname_offset, tlen)) {
+			return -EFAULT;
+		}
+		rpos += tlen;
+	}
+	tlen = rpos - *ppos;
+	if (done) {
+		devfs_handle_t parent;
+
+		spin_lock(&fs_info->devfsd_buffer_lock);
+		fs_info->devfsd_first_event = entry->next;
+		if (entry->next == NULL)
+			fs_info->devfsd_last_event = NULL;
+		spin_unlock(&fs_info->devfsd_buffer_lock);
+		for (; de != NULL; de = parent) {
+			parent = de->parent;
+			devfs_put(de);
+		}
+		kmem_cache_free(devfsd_buf_cache, entry);
+		if (ival > 0)
+			atomic_sub(ival, &fs_info->devfsd_overrun_count);
+		*ppos = 0;
+	} else
+		*ppos = rpos;
+	return tlen;
+}				/*  End Function devfsd_read  */
+
+static int devfsd_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ival;
+	struct fs_info *fs_info = inode->i_sb->s_fs_info;
+
+	switch (cmd) {
+	case DEVFSDIOC_GET_PROTO_REV:
+		ival = DEVFSD_PROTOCOL_REVISION_KERNEL;
+		if (copy_to_user((void __user *)arg, &ival, sizeof ival))
+			return -EFAULT;
+		break;
+	case DEVFSDIOC_SET_EVENT_MASK:
+		/*  Ensure only one reader has access to the queue. This scheme will
+		   work even if the global kernel lock were to be removed, because it
+		   doesn't matter who gets in first, as long as only one gets it  */
+		if (fs_info->devfsd_task == NULL) {
+			static DEFINE_SPINLOCK(lock);
+
+			if (!spin_trylock(&lock))
+				return -EBUSY;
+			if (fs_info->devfsd_task != NULL) {	/*  We lost the race...  */
+				spin_unlock(&lock);
+				return -EBUSY;
+			}
+			fs_info->devfsd_task = current;
+			spin_unlock(&lock);
+			fs_info->devfsd_pgrp =
+			    (process_group(current) ==
+			     current->pid) ? process_group(current) : 0;
+			fs_info->devfsd_file = file;
+			fs_info->devfsd_info =
+			    kmalloc(sizeof *fs_info->devfsd_info, GFP_KERNEL);
+			if (!fs_info->devfsd_info) {
+				devfsd_close(inode, file);
+				return -ENOMEM;
+			}
+		} else if (fs_info->devfsd_task != current)
+			return -EBUSY;
+		fs_info->devfsd_event_mask = arg;	/*  Let the masses come forth  */
+		break;
+	case DEVFSDIOC_RELEASE_EVENT_QUEUE:
+		if (fs_info->devfsd_file != file)
+			return -EPERM;
+		return devfsd_close(inode, file);
+		/*break; */
+#ifdef CONFIG_DEVFS_DEBUG
+	case DEVFSDIOC_SET_DEBUG_MASK:
+		if (copy_from_user(&ival, (void __user *)arg, sizeof ival))
+			return -EFAULT;
+		devfs_debug = ival;
+		break;
+#endif
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}				/*  End Function devfsd_ioctl  */
+
+static int devfsd_close(struct inode *inode, struct file *file)
+{
+	struct devfsd_buf_entry *entry, *next;
+	struct fs_info *fs_info = inode->i_sb->s_fs_info;
+
+	if (fs_info->devfsd_file != file)
+		return 0;
+	fs_info->devfsd_event_mask = 0;
+	fs_info->devfsd_file = NULL;
+	spin_lock(&fs_info->devfsd_buffer_lock);
+	entry = fs_info->devfsd_first_event;
+	fs_info->devfsd_first_event = NULL;
+	fs_info->devfsd_last_event = NULL;
+	if (fs_info->devfsd_info) {
+		kfree(fs_info->devfsd_info);
+		fs_info->devfsd_info = NULL;
+	}
+	spin_unlock(&fs_info->devfsd_buffer_lock);
+	fs_info->devfsd_pgrp = 0;
+	fs_info->devfsd_task = NULL;
+	wake_up(&fs_info->revalidate_wait_queue);
+	for (; entry; entry = next) {
+		next = entry->next;
+		kmem_cache_free(devfsd_buf_cache, entry);
+	}
+	return 0;
+}				/*  End Function devfsd_close  */
+
+#ifdef CONFIG_DEVFS_DEBUG
+static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
+			 loff_t * ppos)
+{
+	ssize_t num;
+	char txt[80];
+
+	num = sprintf(txt, "Number of entries: %u  number of bytes: %u\n",
+		      stat_num_entries, stat_num_bytes) + 1;
+	if (*ppos >= num)
+		return 0;
+	if (*ppos + len > num)
+		len = num - *ppos;
+	if (copy_to_user(buf, txt + *ppos, len))
+		return -EFAULT;
+	*ppos += len;
+	return len;
+}				/*  End Function stat_read  */
+#endif
+
+static int __init init_devfs_fs(void)
+{
+	int err;
+	int major;
+	struct devfs_entry *devfsd;
+#ifdef CONFIG_DEVFS_DEBUG
+	struct devfs_entry *stat;
+#endif
+
+	if (_devfs_get_root_entry() == NULL)
+		return -ENOMEM;
+
+	printk(KERN_INFO "%s: %s Richard Gooch (rgooch@atnf.csiro.au)\n",
+	       DEVFS_NAME, DEVFS_VERSION);
+	devfsd_buf_cache = kmem_cache_create("devfsd_event",
+					     sizeof(struct devfsd_buf_entry),
+					     0, 0, NULL, NULL);
+	if (!devfsd_buf_cache)
+		OOPS("(): unable to allocate event slab\n");
+#ifdef CONFIG_DEVFS_DEBUG
+	devfs_debug = devfs_debug_init;
+	printk(KERN_INFO "%s: devfs_debug: 0x%0x\n", DEVFS_NAME, devfs_debug);
+#endif
+	printk(KERN_INFO "%s: boot_options: 0x%0x\n", DEVFS_NAME, boot_options);
+
+	/* register special device for devfsd communication */
+	major = register_chrdev(0, "devfs", &devfs_fops);
+	if (major < 0)
+		return major;
+
+	/*  And create the entry for ".devfsd"  */
+	devfsd = _devfs_alloc_entry(".devfsd", 0, S_IFCHR | S_IRUSR | S_IWUSR);
+	if (devfsd == NULL)
+		return -ENOMEM;
+	devfsd->u.dev = MKDEV(major, 0);
+	_devfs_append_entry(root_entry, devfsd, NULL);
+
+#ifdef CONFIG_DEVFS_DEBUG
+	stat = _devfs_alloc_entry(".stat", 0, S_IFCHR | S_IRUGO);
+	if (stat == NULL)
+		return -ENOMEM;
+	stat->u.dev = MKDEV(major, 1);
+	_devfs_append_entry(root_entry, stat, NULL);
+#endif
+
+	err = register_filesystem(&devfs_fs_type);
+	return err;
+}				/*  End Function init_devfs_fs  */
+
+void __init mount_devfs_fs(void)
+{
+	int err;
+
+	if (!(boot_options & OPTION_MOUNT))
+		return;
+	err = do_mount("none", "/dev", "devfs", 0, NULL);
+	if (err == 0)
+		printk(KERN_INFO "Mounted devfs on /dev\n");
+	else
+		PRINTK("(): unable to mount devfs, err: %d\n", err);
+}				/*  End Function mount_devfs_fs  */
+
+module_init(init_devfs_fs)
diff --git a/fs/devfs/util.c b/fs/devfs/util.c
new file mode 100644
index 0000000..db06d38
--- /dev/null
+++ b/fs/devfs/util.c
@@ -0,0 +1,97 @@
+/*  devfs (Device FileSystem) utilities.
+
+    Copyright (C) 1999-2002  Richard Gooch
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public
+    License along with this library; if not, write to the Free
+    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
+    The postal address is:
+      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+
+    ChangeLog
+
+    19991031   Richard Gooch <rgooch@atnf.csiro.au>
+               Created.
+    19991103   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <_devfs_convert_name> and supported SCSI and IDE CD-ROMs
+    20000203   Richard Gooch <rgooch@atnf.csiro.au>
+               Changed operations pointer type to void *.
+    20000621   Richard Gooch <rgooch@atnf.csiro.au>
+               Changed interface to <devfs_register_series>.
+    20000622   Richard Gooch <rgooch@atnf.csiro.au>
+               Took account of interface change to <devfs_mk_symlink>.
+               Took account of interface change to <devfs_mk_dir>.
+    20010519   Richard Gooch <rgooch@atnf.csiro.au>
+               Documentation cleanup.
+    20010709   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_*alloc_major> and <devfs_*alloc_devnum>.
+    20010710   Richard Gooch <rgooch@atnf.csiro.au>
+               Created <devfs_*alloc_unique_number>.
+    20010730   Richard Gooch <rgooch@atnf.csiro.au>
+               Documentation typo fix.
+    20010806   Richard Gooch <rgooch@atnf.csiro.au>
+               Made <block_semaphore> and <char_semaphore> private.
+    20010813   Richard Gooch <rgooch@atnf.csiro.au>
+               Fixed bug in <devfs_alloc_unique_number>: limited to 128 numbers
+    20010818   Richard Gooch <rgooch@atnf.csiro.au>
+               Updated major masks up to Linus' "no new majors" proclamation.
+	       Block: were 126 now 122 free, char: were 26 now 19 free.
+    20020324   Richard Gooch <rgooch@atnf.csiro.au>
+               Fixed bug in <devfs_alloc_unique_number>: was clearing beyond
+	       bitfield.
+    20020326   Richard Gooch <rgooch@atnf.csiro.au>
+               Fixed bitfield data type for <devfs_*alloc_devnum>.
+               Made major bitfield type and initialiser 64 bit safe.
+    20020413   Richard Gooch <rgooch@atnf.csiro.au>
+               Fixed shift warning on 64 bit machines.
+    20020428   Richard Gooch <rgooch@atnf.csiro.au>
+               Copied and used macro for error messages from fs/devfs/base.c 
+    20021013   Richard Gooch <rgooch@atnf.csiro.au>
+               Documentation fix.
+    20030101   Adam J. Richter <adam@yggdrasil.com>
+               Eliminate DEVFS_SPECIAL_{CHR,BLK}.  Use mode_t instead.
+    20030106   Christoph Hellwig <hch@infradead.org>
+               Rewrite devfs_{,de}alloc_devnum to look like C code.
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/genhd.h>
+#include <linux/bitops.h>
+
+int devfs_register_tape(const char *name)
+{
+	char tname[32], dest[64];
+	static unsigned int tape_counter;
+	unsigned int n = tape_counter++;
+
+	sprintf(dest, "../%s", name);
+	sprintf(tname, "tapes/tape%u", n);
+	devfs_mk_symlink(tname, dest);
+
+	return n;
+}
+
+EXPORT_SYMBOL(devfs_register_tape);
+
+void devfs_unregister_tape(int num)
+{
+	if (num >= 0)
+		devfs_remove("tapes/tape%u", num);
+}
+
+EXPORT_SYMBOL(devfs_unregister_tape);
diff --git a/fs/devpts/Makefile b/fs/devpts/Makefile
new file mode 100644
index 0000000..5800df2
--- /dev/null
+++ b/fs/devpts/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux /dev/pts virtual filesystem.
+#
+
+obj-$(CONFIG_UNIX98_PTYS)		+= devpts.o
+
+devpts-$(CONFIG_UNIX98_PTYS)		:= inode.o
+devpts-$(CONFIG_DEVPTS_FS_SECURITY)	+= xattr_security.o
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
new file mode 100644
index 0000000..1571c8d
--- /dev/null
+++ b/fs/devpts/inode.c
@@ -0,0 +1,242 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/fs/devpts/inode.c
+ *
+ *  Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/tty.h>
+#include <linux/devpts_fs.h>
+#include <linux/xattr.h>
+
+#define DEVPTS_SUPER_MAGIC 0x1cd1
+
+extern struct xattr_handler devpts_xattr_security_handler;
+
+static struct xattr_handler *devpts_xattr_handlers[] = {
+#ifdef CONFIG_DEVPTS_FS_SECURITY
+	&devpts_xattr_security_handler,
+#endif
+	NULL
+};
+
+static struct inode_operations devpts_file_inode_operations = {
+#ifdef CONFIG_DEVPTS_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+};
+
+static struct vfsmount *devpts_mnt;
+static struct dentry *devpts_root;
+
+static struct {
+	int setuid;
+	int setgid;
+	uid_t   uid;
+	gid_t   gid;
+	umode_t mode;
+} config = {.mode = 0600};
+
+static int devpts_remount(struct super_block *sb, int *flags, char *data)
+{
+	int setuid = 0;
+	int setgid = 0;
+	uid_t uid = 0;
+	gid_t gid = 0;
+	umode_t mode = 0600;
+	char *this_char;
+
+	this_char = NULL;
+	while ((this_char = strsep(&data, ",")) != NULL) {
+		int n;
+		char dummy;
+		if (!*this_char)
+			continue;
+		if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) {
+			setuid = 1;
+			uid = n;
+		} else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) {
+			setgid = 1;
+			gid = n;
+		} else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1)
+			mode = n & ~S_IFMT;
+		else {
+			printk("devpts: called with bogus options\n");
+			return -EINVAL;
+		}
+	}
+	config.setuid  = setuid;
+	config.setgid  = setgid;
+	config.uid     = uid;
+	config.gid     = gid;
+	config.mode    = mode;
+
+	return 0;
+}
+
+static struct super_operations devpts_sops = {
+	.statfs		= simple_statfs,
+	.remount_fs	= devpts_remount,
+};
+
+static int
+devpts_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct inode * inode;
+
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = DEVPTS_SUPER_MAGIC;
+	s->s_op = &devpts_sops;
+	s->s_xattr = devpts_xattr_handlers;
+	s->s_time_gran = 1;
+
+	inode = new_inode(s);
+	if (!inode)
+		goto fail;
+	inode->i_ino = 1;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_blocks = 0;
+	inode->i_blksize = 1024;
+	inode->i_uid = inode->i_gid = 0;
+	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
+	inode->i_op = &simple_dir_inode_operations;
+	inode->i_fop = &simple_dir_operations;
+	inode->i_nlink = 2;
+
+	devpts_root = s->s_root = d_alloc_root(inode);
+	if (s->s_root)
+		return 0;
+	
+	printk("devpts: get root dentry failed\n");
+	iput(inode);
+fail:
+	return -ENOMEM;
+}
+
+static struct super_block *devpts_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, devpts_fill_super);
+}
+
+static struct file_system_type devpts_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "devpts",
+	.get_sb		= devpts_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+/*
+ * The normal naming convention is simply /dev/pts/<number>; this conforms
+ * to the System V naming convention
+ */
+
+static struct dentry *get_node(int num)
+{
+	char s[12];
+	struct dentry *root = devpts_root;
+	down(&root->d_inode->i_sem);
+	return lookup_one_len(s, root, sprintf(s, "%d", num));
+}
+
+int devpts_pty_new(struct tty_struct *tty)
+{
+	int number = tty->index;
+	struct tty_driver *driver = tty->driver;
+	dev_t device = MKDEV(driver->major, driver->minor_start+number);
+	struct dentry *dentry;
+	struct inode *inode = new_inode(devpts_mnt->mnt_sb);
+
+	/* We're supposed to be given the slave end of a pty */
+	BUG_ON(driver->type != TTY_DRIVER_TYPE_PTY);
+	BUG_ON(driver->subtype != PTY_TYPE_SLAVE);
+
+	if (!inode)
+		return -ENOMEM;
+
+	inode->i_ino = number+2;
+	inode->i_blksize = 1024;
+	inode->i_uid = config.setuid ? config.uid : current->fsuid;
+	inode->i_gid = config.setgid ? config.gid : current->fsgid;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	init_special_inode(inode, S_IFCHR|config.mode, device);
+	inode->i_op = &devpts_file_inode_operations;
+	inode->u.generic_ip = tty;
+
+	dentry = get_node(number);
+	if (!IS_ERR(dentry) && !dentry->d_inode)
+		d_instantiate(dentry, inode);
+
+	up(&devpts_root->d_inode->i_sem);
+
+	return 0;
+}
+
+struct tty_struct *devpts_get_tty(int number)
+{
+	struct dentry *dentry = get_node(number);
+	struct tty_struct *tty;
+
+	tty = NULL;
+	if (!IS_ERR(dentry)) {
+		if (dentry->d_inode)
+			tty = dentry->d_inode->u.generic_ip;
+		dput(dentry);
+	}
+
+	up(&devpts_root->d_inode->i_sem);
+
+	return tty;
+}
+
+void devpts_pty_kill(int number)
+{
+	struct dentry *dentry = get_node(number);
+
+	if (!IS_ERR(dentry)) {
+		struct inode *inode = dentry->d_inode;
+		if (inode) {
+			inode->i_nlink--;
+			d_delete(dentry);
+			dput(dentry);
+		}
+		dput(dentry);
+	}
+	up(&devpts_root->d_inode->i_sem);
+}
+
+static int __init init_devpts_fs(void)
+{
+	int err = register_filesystem(&devpts_fs_type);
+	if (!err) {
+		devpts_mnt = kern_mount(&devpts_fs_type);
+		if (IS_ERR(devpts_mnt))
+			err = PTR_ERR(devpts_mnt);
+	}
+	return err;
+}
+
+static void __exit exit_devpts_fs(void)
+{
+	unregister_filesystem(&devpts_fs_type);
+	mntput(devpts_mnt);
+}
+
+module_init(init_devpts_fs)
+module_exit(exit_devpts_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/devpts/xattr_security.c b/fs/devpts/xattr_security.c
new file mode 100644
index 0000000..864cb5c
--- /dev/null
+++ b/fs/devpts/xattr_security.c
@@ -0,0 +1,47 @@
+/*
+ * Security xattr support for devpts.
+ *
+ * Author: Stephen Smalley <sds@epoch.ncsc.mil>
+ * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/xattr.h>
+
+static size_t
+devpts_xattr_security_list(struct inode *inode, char *list, size_t list_len,
+			   const char *name, size_t name_len)
+{
+	return security_inode_listsecurity(inode, list, list_len);
+}
+
+static int
+devpts_xattr_security_get(struct inode *inode, const char *name,
+			  void *buffer, size_t size)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return security_inode_getsecurity(inode, name, buffer, size);
+}
+
+static int
+devpts_xattr_security_set(struct inode *inode, const char *name,
+			  const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return security_inode_setsecurity(inode, name, value, size, flags);
+}
+
+struct xattr_handler devpts_xattr_security_handler = {
+	.prefix	= XATTR_SECURITY_PREFIX,
+	.list	= devpts_xattr_security_list,
+	.get	= devpts_xattr_security_get,
+	.set	= devpts_xattr_security_set,
+};
diff --git a/fs/direct-io.c b/fs/direct-io.c
new file mode 100644
index 0000000..5a674a0
--- /dev/null
+++ b/fs/direct-io.c
@@ -0,0 +1,1258 @@
+/*
+ * fs/direct-io.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ *
+ * O_DIRECT
+ *
+ * 04Jul2002	akpm@zip.com.au
+ *		Initial version
+ * 11Sep2002	janetinc@us.ibm.com
+ * 		added readv/writev support.
+ * 29Oct2002	akpm@zip.com.au
+ *		rewrote bio_add_page() support.
+ * 30Oct2002	pbadari@us.ibm.com
+ *		added support for non-aligned IO.
+ * 06Nov2002	pbadari@us.ibm.com
+ *		added asynchronous IO support.
+ * 21Jul2003	nathans@sgi.com
+ *		added IO completion notifier.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/rwsem.h>
+#include <linux/uio.h>
+#include <asm/atomic.h>
+
+/*
+ * How many user pages to map in one call to get_user_pages().  This determines
+ * the size of a structure on the stack.
+ */
+#define DIO_PAGES	64
+
+/*
+ * This code generally works in units of "dio_blocks".  A dio_block is
+ * somewhere between the hard sector size and the filesystem block size.  it
+ * is determined on a per-invocation basis.   When talking to the filesystem
+ * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
+ * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
+ * to bio_block quantities by shifting left by blkfactor.
+ *
+ * If blkfactor is zero then the user's request was aligned to the filesystem's
+ * blocksize.
+ *
+ * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
+ * This determines whether we need to do the fancy locking which prevents
+ * direct-IO from being able to read uninitialised disk blocks.  If its zero
+ * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_sem is
+ * not held for the entire direct write (taken briefly, initially, during a
+ * direct read though, but its never held for the duration of a direct-IO).
+ */
+
+struct dio {
+	/* BIO submission state */
+	struct bio *bio;		/* bio under assembly */
+	struct inode *inode;
+	int rw;
+	int lock_type;			/* doesn't change */
+	unsigned blkbits;		/* doesn't change */
+	unsigned blkfactor;		/* When we're using an alignment which
+					   is finer than the filesystem's soft
+					   blocksize, this specifies how much
+					   finer.  blkfactor=2 means 1/4-block
+					   alignment.  Does not change */
+	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
+					   been performed at the start of a
+					   write */
+	int pages_in_io;		/* approximate total IO pages */
+	size_t	size;			/* total request size (doesn't change)*/
+	sector_t block_in_file;		/* Current offset into the underlying
+					   file in dio_block units. */
+	unsigned blocks_available;	/* At block_in_file.  changes */
+	sector_t final_block_in_request;/* doesn't change */
+	unsigned first_block_in_page;	/* doesn't change, Used only once */
+	int boundary;			/* prev block is at a boundary */
+	int reap_counter;		/* rate limit reaping */
+	get_blocks_t *get_blocks;	/* block mapping function */
+	dio_iodone_t *end_io;		/* IO completion function */
+	sector_t final_block_in_bio;	/* current final block in bio + 1 */
+	sector_t next_block_for_io;	/* next block to be put under IO,
+					   in dio_blocks units */
+	struct buffer_head map_bh;	/* last get_blocks() result */
+
+	/*
+	 * Deferred addition of a page to the dio.  These variables are
+	 * private to dio_send_cur_page(), submit_page_section() and
+	 * dio_bio_add_page().
+	 */
+	struct page *cur_page;		/* The page */
+	unsigned cur_page_offset;	/* Offset into it, in bytes */
+	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
+	sector_t cur_page_block;	/* Where it starts */
+
+	/*
+	 * Page fetching state. These variables belong to dio_refill_pages().
+	 */
+	int curr_page;			/* changes */
+	int total_pages;		/* doesn't change */
+	unsigned long curr_user_address;/* changes */
+
+	/*
+	 * Page queue.  These variables belong to dio_refill_pages() and
+	 * dio_get_page().
+	 */
+	struct page *pages[DIO_PAGES];	/* page buffer */
+	unsigned head;			/* next page to process */
+	unsigned tail;			/* last valid page + 1 */
+	int page_errors;		/* errno from get_user_pages() */
+
+	/* BIO completion state */
+	spinlock_t bio_lock;		/* protects BIO fields below */
+	int bio_count;			/* nr bios to be completed */
+	int bios_in_flight;		/* nr bios in flight */
+	struct bio *bio_list;		/* singly linked via bi_private */
+	struct task_struct *waiter;	/* waiting task (NULL if none) */
+
+	/* AIO related stuff */
+	struct kiocb *iocb;		/* kiocb */
+	int is_async;			/* is IO async ? */
+	ssize_t result;                 /* IO result */
+};
+
+/*
+ * How many pages are in the queue?
+ */
+static inline unsigned dio_pages_present(struct dio *dio)
+{
+	return dio->tail - dio->head;
+}
+
+/*
+ * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
+ */
+static int dio_refill_pages(struct dio *dio)
+{
+	int ret;
+	int nr_pages;
+
+	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(
+		current,			/* Task for fault acounting */
+		current->mm,			/* whose pages? */
+		dio->curr_user_address,		/* Where from? */
+		nr_pages,			/* How many pages? */
+		dio->rw == READ,		/* Write to memory? */
+		0,				/* force (?) */
+		&dio->pages[0],
+		NULL);				/* vmas */
+	up_read(&current->mm->mmap_sem);
+
+	if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) {
+		/*
+		 * A memory fault, but the filesystem has some outstanding
+		 * mapped blocks.  We need to use those blocks up to avoid
+		 * leaking stale data in the file.
+		 */
+		if (dio->page_errors == 0)
+			dio->page_errors = ret;
+		dio->pages[0] = ZERO_PAGE(dio->curr_user_address);
+		dio->head = 0;
+		dio->tail = 1;
+		ret = 0;
+		goto out;
+	}
+
+	if (ret >= 0) {
+		dio->curr_user_address += ret * PAGE_SIZE;
+		dio->curr_page += ret;
+		dio->head = 0;
+		dio->tail = ret;
+		ret = 0;
+	}
+out:
+	return ret;	
+}
+
+/*
+ * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
+ * buffered inside the dio so that we can call get_user_pages() against a
+ * decent number of pages, less frequently.  To provide nicer use of the
+ * L1 cache.
+ */
+static struct page *dio_get_page(struct dio *dio)
+{
+	if (dio_pages_present(dio) == 0) {
+		int ret;
+
+		ret = dio_refill_pages(dio);
+		if (ret)
+			return ERR_PTR(ret);
+		BUG_ON(dio_pages_present(dio) == 0);
+	}
+	return dio->pages[dio->head++];
+}
+
+/*
+ * Called when all DIO BIO I/O has been completed - let the filesystem
+ * know, if it registered an interest earlier via get_blocks.  Pass the
+ * private field of the map buffer_head so that filesystems can use it
+ * to hold additional state between get_blocks calls and dio_complete.
+ */
+static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
+{
+	if (dio->end_io && dio->result)
+		dio->end_io(dio->inode, offset, bytes, dio->map_bh.b_private);
+	if (dio->lock_type == DIO_LOCKING)
+		up_read(&dio->inode->i_alloc_sem);
+}
+
+/*
+ * Called when a BIO has been processed.  If the count goes to zero then IO is
+ * complete and we can signal this to the AIO layer.
+ */
+static void finished_one_bio(struct dio *dio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dio->bio_lock, flags);
+	if (dio->bio_count == 1) {
+		if (dio->is_async) {
+			/*
+			 * Last reference to the dio is going away.
+			 * Drop spinlock and complete the DIO.
+			 */
+			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			dio_complete(dio, dio->block_in_file << dio->blkbits,
+					dio->result);
+			/* Complete AIO later if falling back to buffered i/o */
+			if (dio->result == dio->size ||
+				((dio->rw == READ) && dio->result)) {
+				aio_complete(dio->iocb, dio->result, 0);
+				kfree(dio);
+				return;
+			} else {
+				/*
+				 * Falling back to buffered
+				 */
+				spin_lock_irqsave(&dio->bio_lock, flags);
+				dio->bio_count--;
+				if (dio->waiter)
+					wake_up_process(dio->waiter);
+				spin_unlock_irqrestore(&dio->bio_lock, flags);
+				return;
+			}
+		}
+	}
+	dio->bio_count--;
+	spin_unlock_irqrestore(&dio->bio_lock, flags);
+}
+
+static int dio_bio_complete(struct dio *dio, struct bio *bio);
+/*
+ * Asynchronous IO callback. 
+ */
+static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+{
+	struct dio *dio = bio->bi_private;
+
+	if (bio->bi_size)
+		return 1;
+
+	/* cleanup the bio */
+	dio_bio_complete(dio, bio);
+	return 0;
+}
+
+/*
+ * The BIO completion handler simply queues the BIO up for the process-context
+ * handler.
+ *
+ * During I/O bi_private points at the dio.  After I/O, bi_private is used to
+ * implement a singly-linked list of completed BIOs, at dio->bio_list.
+ */
+static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
+{
+	struct dio *dio = bio->bi_private;
+	unsigned long flags;
+
+	if (bio->bi_size)
+		return 1;
+
+	spin_lock_irqsave(&dio->bio_lock, flags);
+	bio->bi_private = dio->bio_list;
+	dio->bio_list = bio;
+	dio->bios_in_flight--;
+	if (dio->waiter && dio->bios_in_flight == 0)
+		wake_up_process(dio->waiter);
+	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	return 0;
+}
+
+static int
+dio_bio_alloc(struct dio *dio, struct block_device *bdev,
+		sector_t first_sector, int nr_vecs)
+{
+	struct bio *bio;
+
+	bio = bio_alloc(GFP_KERNEL, nr_vecs);
+	if (bio == NULL)
+		return -ENOMEM;
+
+	bio->bi_bdev = bdev;
+	bio->bi_sector = first_sector;
+	if (dio->is_async)
+		bio->bi_end_io = dio_bio_end_aio;
+	else
+		bio->bi_end_io = dio_bio_end_io;
+
+	dio->bio = bio;
+	return 0;
+}
+
+/*
+ * In the AIO read case we speculatively dirty the pages before starting IO.
+ * During IO completion, any of these pages which happen to have been written
+ * back will be redirtied by bio_check_pages_dirty().
+ */
+static void dio_bio_submit(struct dio *dio)
+{
+	struct bio *bio = dio->bio;
+	unsigned long flags;
+
+	bio->bi_private = dio;
+	spin_lock_irqsave(&dio->bio_lock, flags);
+	dio->bio_count++;
+	dio->bios_in_flight++;
+	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	if (dio->is_async && dio->rw == READ)
+		bio_set_pages_dirty(bio);
+	submit_bio(dio->rw, bio);
+
+	dio->bio = NULL;
+	dio->boundary = 0;
+}
+
+/*
+ * Release any resources in case of a failure
+ */
+static void dio_cleanup(struct dio *dio)
+{
+	while (dio_pages_present(dio))
+		page_cache_release(dio_get_page(dio));
+}
+
+/*
+ * Wait for the next BIO to complete.  Remove it and return it.
+ */
+static struct bio *dio_await_one(struct dio *dio)
+{
+	unsigned long flags;
+	struct bio *bio;
+
+	spin_lock_irqsave(&dio->bio_lock, flags);
+	while (dio->bio_list == NULL) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (dio->bio_list == NULL) {
+			dio->waiter = current;
+			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			blk_run_address_space(dio->inode->i_mapping);
+			io_schedule();
+			spin_lock_irqsave(&dio->bio_lock, flags);
+			dio->waiter = NULL;
+		}
+		set_current_state(TASK_RUNNING);
+	}
+	bio = dio->bio_list;
+	dio->bio_list = bio->bi_private;
+	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	return bio;
+}
+
+/*
+ * Process one completed BIO.  No locks are held.
+ */
+static int dio_bio_complete(struct dio *dio, struct bio *bio)
+{
+	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int page_no;
+
+	if (!uptodate)
+		dio->result = -EIO;
+
+	if (dio->is_async && dio->rw == READ) {
+		bio_check_pages_dirty(bio);	/* transfers ownership */
+	} else {
+		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
+			struct page *page = bvec[page_no].bv_page;
+
+			if (dio->rw == READ && !PageCompound(page))
+				set_page_dirty_lock(page);
+			page_cache_release(page);
+		}
+		bio_put(bio);
+	}
+	finished_one_bio(dio);
+	return uptodate ? 0 : -EIO;
+}
+
+/*
+ * Wait on and process all in-flight BIOs.
+ */
+static int dio_await_completion(struct dio *dio)
+{
+	int ret = 0;
+
+	if (dio->bio)
+		dio_bio_submit(dio);
+
+	/*
+	 * The bio_lock is not held for the read of bio_count.
+	 * This is ok since it is the dio_bio_complete() that changes
+	 * bio_count.
+	 */
+	while (dio->bio_count) {
+		struct bio *bio = dio_await_one(dio);
+		int ret2;
+
+		ret2 = dio_bio_complete(dio, bio);
+		if (ret == 0)
+			ret = ret2;
+	}
+	return ret;
+}
+
+/*
+ * A really large O_DIRECT read or write can generate a lot of BIOs.  So
+ * to keep the memory consumption sane we periodically reap any completed BIOs
+ * during the BIO generation phase.
+ *
+ * This also helps to limit the peak amount of pinned userspace memory.
+ */
+static int dio_bio_reap(struct dio *dio)
+{
+	int ret = 0;
+
+	if (dio->reap_counter++ >= 64) {
+		while (dio->bio_list) {
+			unsigned long flags;
+			struct bio *bio;
+			int ret2;
+
+			spin_lock_irqsave(&dio->bio_lock, flags);
+			bio = dio->bio_list;
+			dio->bio_list = bio->bi_private;
+			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			ret2 = dio_bio_complete(dio, bio);
+			if (ret == 0)
+				ret = ret2;
+		}
+		dio->reap_counter = 0;
+	}
+	return ret;
+}
+
+/*
+ * Call into the fs to map some more disk blocks.  We record the current number
+ * of available blocks at dio->blocks_available.  These are in units of the
+ * fs blocksize, (1 << inode->i_blkbits).
+ *
+ * The fs is allowed to map lots of blocks at once.  If it wants to do that,
+ * it uses the passed inode-relative block number as the file offset, as usual.
+ *
+ * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io
+ * has remaining to do.  The fs should not map more than this number of blocks.
+ *
+ * If the fs has mapped a lot of blocks, it should populate bh->b_size to
+ * indicate how much contiguous disk space has been made available at
+ * bh->b_blocknr.
+ *
+ * If *any* of the mapped blocks are new, then the fs must set buffer_new().
+ * This isn't very efficient...
+ *
+ * In the case of filesystem holes: the fs may return an arbitrarily-large
+ * hole by returning an appropriate value in b_size and by clearing
+ * buffer_mapped().  However the direct-io code will only process holes one
+ * block at a time - it will repeatedly call get_blocks() as it walks the hole.
+ */
+static int get_more_blocks(struct dio *dio)
+{
+	int ret;
+	struct buffer_head *map_bh = &dio->map_bh;
+	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
+	unsigned long fs_count;	/* Number of filesystem-sized blocks */
+	unsigned long dio_count;/* Number of dio_block-sized blocks */
+	unsigned long blkmask;
+	int create;
+
+	/*
+	 * If there was a memory error and we've overwritten all the
+	 * mapped blocks then we can now return that memory error
+	 */
+	ret = dio->page_errors;
+	if (ret == 0) {
+		map_bh->b_state = 0;
+		map_bh->b_size = 0;
+		BUG_ON(dio->block_in_file >= dio->final_block_in_request);
+		fs_startblk = dio->block_in_file >> dio->blkfactor;
+		dio_count = dio->final_block_in_request - dio->block_in_file;
+		fs_count = dio_count >> dio->blkfactor;
+		blkmask = (1 << dio->blkfactor) - 1;
+		if (dio_count & blkmask)	
+			fs_count++;
+
+		create = dio->rw == WRITE;
+		if (dio->lock_type == DIO_LOCKING) {
+			if (dio->block_in_file < (i_size_read(dio->inode) >>
+							dio->blkbits))
+				create = 0;
+		} else if (dio->lock_type == DIO_NO_LOCKING) {
+			create = 0;
+		}
+		/*
+		 * For writes inside i_size we forbid block creations: only
+		 * overwrites are permitted.  We fall back to buffered writes
+		 * at a higher level for inside-i_size block-instantiating
+		 * writes.
+		 */
+		ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count,
+						map_bh, create);
+	}
+	return ret;
+}
+
+/*
+ * There is no bio.  Make one now.
+ */
+static int dio_new_bio(struct dio *dio, sector_t start_sector)
+{
+	sector_t sector;
+	int ret, nr_pages;
+
+	ret = dio_bio_reap(dio);
+	if (ret)
+		goto out;
+	sector = start_sector << (dio->blkbits - 9);
+	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
+	BUG_ON(nr_pages <= 0);
+	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+	dio->boundary = 0;
+out:
+	return ret;
+}
+
+/*
+ * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
+ * that was successful then update final_block_in_bio and take a ref against
+ * the just-added page.
+ *
+ * Return zero on success.  Non-zero means the caller needs to start a new BIO.
+ */
+static int dio_bio_add_page(struct dio *dio)
+{
+	int ret;
+
+	ret = bio_add_page(dio->bio, dio->cur_page,
+			dio->cur_page_len, dio->cur_page_offset);
+	if (ret == dio->cur_page_len) {
+		/*
+		 * Decrement count only, if we are done with this page
+		 */
+		if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE)
+			dio->pages_in_io--;
+		page_cache_get(dio->cur_page);
+		dio->final_block_in_bio = dio->cur_page_block +
+			(dio->cur_page_len >> dio->blkbits);
+		ret = 0;
+	} else {
+		ret = 1;
+	}
+	return ret;
+}
+		
+/*
+ * Put cur_page under IO.  The section of cur_page which is described by
+ * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
+ * starts on-disk at cur_page_block.
+ *
+ * We take a ref against the page here (on behalf of its presence in the bio).
+ *
+ * The caller of this function is responsible for removing cur_page from the
+ * dio, and for dropping the refcount which came from that presence.
+ */
+static int dio_send_cur_page(struct dio *dio)
+{
+	int ret = 0;
+
+	if (dio->bio) {
+		/*
+		 * See whether this new request is contiguous with the old
+		 */
+		if (dio->final_block_in_bio != dio->cur_page_block)
+			dio_bio_submit(dio);
+		/*
+		 * Submit now if the underlying fs is about to perform a
+		 * metadata read
+		 */
+		if (dio->boundary)
+			dio_bio_submit(dio);
+	}
+
+	if (dio->bio == NULL) {
+		ret = dio_new_bio(dio, dio->cur_page_block);
+		if (ret)
+			goto out;
+	}
+
+	if (dio_bio_add_page(dio) != 0) {
+		dio_bio_submit(dio);
+		ret = dio_new_bio(dio, dio->cur_page_block);
+		if (ret == 0) {
+			ret = dio_bio_add_page(dio);
+			BUG_ON(ret != 0);
+		}
+	}
+out:
+	return ret;
+}
+
+/*
+ * An autonomous function to put a chunk of a page under deferred IO.
+ *
+ * The caller doesn't actually know (or care) whether this piece of page is in
+ * a BIO, or is under IO or whatever.  We just take care of all possible 
+ * situations here.  The separation between the logic of do_direct_IO() and
+ * that of submit_page_section() is important for clarity.  Please don't break.
+ *
+ * The chunk of page starts on-disk at blocknr.
+ *
+ * We perform deferred IO, by recording the last-submitted page inside our
+ * private part of the dio structure.  If possible, we just expand the IO
+ * across that page here.
+ *
+ * If that doesn't work out then we put the old page into the bio and add this
+ * page to the dio instead.
+ */
+static int
+submit_page_section(struct dio *dio, struct page *page,
+		unsigned offset, unsigned len, sector_t blocknr)
+{
+	int ret = 0;
+
+	/*
+	 * Can we just grow the current page's presence in the dio?
+	 */
+	if (	(dio->cur_page == page) &&
+		(dio->cur_page_offset + dio->cur_page_len == offset) &&
+		(dio->cur_page_block +
+			(dio->cur_page_len >> dio->blkbits) == blocknr)) {
+		dio->cur_page_len += len;
+
+		/*
+		 * If dio->boundary then we want to schedule the IO now to
+		 * avoid metadata seeks.
+		 */
+		if (dio->boundary) {
+			ret = dio_send_cur_page(dio);
+			page_cache_release(dio->cur_page);
+			dio->cur_page = NULL;
+		}
+		goto out;
+	}
+
+	/*
+	 * If there's a deferred page already there then send it.
+	 */
+	if (dio->cur_page) {
+		ret = dio_send_cur_page(dio);
+		page_cache_release(dio->cur_page);
+		dio->cur_page = NULL;
+		if (ret)
+			goto out;
+	}
+
+	page_cache_get(page);		/* It is in dio */
+	dio->cur_page = page;
+	dio->cur_page_offset = offset;
+	dio->cur_page_len = len;
+	dio->cur_page_block = blocknr;
+out:
+	return ret;
+}
+
+/*
+ * Clean any dirty buffers in the blockdev mapping which alias newly-created
+ * file blocks.  Only called for S_ISREG files - blockdevs do not set
+ * buffer_new
+ */
+static void clean_blockdev_aliases(struct dio *dio)
+{
+	unsigned i;
+	unsigned nblocks;
+
+	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;
+
+	for (i = 0; i < nblocks; i++) {
+		unmap_underlying_metadata(dio->map_bh.b_bdev,
+					dio->map_bh.b_blocknr + i);
+	}
+}
+
+/*
+ * If we are not writing the entire block and get_block() allocated
+ * the block for us, we need to fill-in the unused portion of the
+ * block with zeros. This happens only if user-buffer, fileoffset or
+ * io length is not filesystem block-size multiple.
+ *
+ * `end' is zero if we're doing the start of the IO, 1 at the end of the
+ * IO.
+ */
+static void dio_zero_block(struct dio *dio, int end)
+{
+	unsigned dio_blocks_per_fs_block;
+	unsigned this_chunk_blocks;	/* In dio_blocks */
+	unsigned this_chunk_bytes;
+	struct page *page;
+
+	dio->start_zero_done = 1;
+	if (!dio->blkfactor || !buffer_new(&dio->map_bh))
+		return;
+
+	dio_blocks_per_fs_block = 1 << dio->blkfactor;
+	this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1);
+
+	if (!this_chunk_blocks)
+		return;
+
+	/*
+	 * We need to zero out part of an fs block.  It is either at the
+	 * beginning or the end of the fs block.
+	 */
+	if (end) 
+		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
+
+	this_chunk_bytes = this_chunk_blocks << dio->blkbits;
+
+	page = ZERO_PAGE(dio->curr_user_address);
+	if (submit_page_section(dio, page, 0, this_chunk_bytes, 
+				dio->next_block_for_io))
+		return;
+
+	dio->next_block_for_io += this_chunk_blocks;
+}
+
+/*
+ * Walk the user pages, and the file, mapping blocks to disk and generating
+ * a sequence of (page,offset,len,block) mappings.  These mappings are injected
+ * into submit_page_section(), which takes care of the next stage of submission
+ *
+ * Direct IO against a blockdev is different from a file.  Because we can
+ * happily perform page-sized but 512-byte aligned IOs.  It is important that
+ * blockdev IO be able to have fine alignment and large sizes.
+ *
+ * So what we do is to permit the ->get_blocks function to populate bh.b_size
+ * with the size of IO which is permitted at this offset and this i_blkbits.
+ *
+ * For best results, the blockdev should be set up with 512-byte i_blkbits and
+ * it should set b_size to PAGE_SIZE or more inside get_blocks().  This gives
+ * fine alignment but still allows this function to work in PAGE_SIZE units.
+ */
+static int do_direct_IO(struct dio *dio)
+{
+	const unsigned blkbits = dio->blkbits;
+	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+	struct page *page;
+	unsigned block_in_page;
+	struct buffer_head *map_bh = &dio->map_bh;
+	int ret = 0;
+
+	/* The I/O can start at any block offset within the first page */
+	block_in_page = dio->first_block_in_page;
+
+	while (dio->block_in_file < dio->final_block_in_request) {
+		page = dio_get_page(dio);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			goto out;
+		}
+
+		while (block_in_page < blocks_per_page) {
+			unsigned offset_in_page = block_in_page << blkbits;
+			unsigned this_chunk_bytes;	/* # of bytes mapped */
+			unsigned this_chunk_blocks;	/* # of blocks */
+			unsigned u;
+
+			if (dio->blocks_available == 0) {
+				/*
+				 * Need to go and map some more disk
+				 */
+				unsigned long blkmask;
+				unsigned long dio_remainder;
+
+				ret = get_more_blocks(dio);
+				if (ret) {
+					page_cache_release(page);
+					goto out;
+				}
+				if (!buffer_mapped(map_bh))
+					goto do_holes;
+
+				dio->blocks_available =
+						map_bh->b_size >> dio->blkbits;
+				dio->next_block_for_io =
+					map_bh->b_blocknr << dio->blkfactor;
+				if (buffer_new(map_bh))
+					clean_blockdev_aliases(dio);
+
+				if (!dio->blkfactor)
+					goto do_holes;
+
+				blkmask = (1 << dio->blkfactor) - 1;
+				dio_remainder = (dio->block_in_file & blkmask);
+
+				/*
+				 * If we are at the start of IO and that IO
+				 * starts partway into a fs-block,
+				 * dio_remainder will be non-zero.  If the IO
+				 * is a read then we can simply advance the IO
+				 * cursor to the first block which is to be
+				 * read.  But if the IO is a write and the
+				 * block was newly allocated we cannot do that;
+				 * the start of the fs block must be zeroed out
+				 * on-disk
+				 */
+				if (!buffer_new(map_bh))
+					dio->next_block_for_io += dio_remainder;
+				dio->blocks_available -= dio_remainder;
+			}
+do_holes:
+			/* Handle holes */
+			if (!buffer_mapped(map_bh)) {
+				char *kaddr;
+
+				/* AKPM: eargh, -ENOTBLK is a hack */
+				if (dio->rw == WRITE) {
+					page_cache_release(page);
+					return -ENOTBLK;
+				}
+
+				if (dio->block_in_file >=
+					i_size_read(dio->inode)>>blkbits) {
+					/* We hit eof */
+					page_cache_release(page);
+					goto out;
+				}
+				kaddr = kmap_atomic(page, KM_USER0);
+				memset(kaddr + (block_in_page << blkbits),
+						0, 1 << blkbits);
+				flush_dcache_page(page);
+				kunmap_atomic(kaddr, KM_USER0);
+				dio->block_in_file++;
+				block_in_page++;
+				goto next_block;
+			}
+
+			/*
+			 * If we're performing IO which has an alignment which
+			 * is finer than the underlying fs, go check to see if
+			 * we must zero out the start of this block.
+			 */
+			if (unlikely(dio->blkfactor && !dio->start_zero_done))
+				dio_zero_block(dio, 0);
+
+			/*
+			 * Work out, in this_chunk_blocks, how much disk we
+			 * can add to this page
+			 */
+			this_chunk_blocks = dio->blocks_available;
+			u = (PAGE_SIZE - offset_in_page) >> blkbits;
+			if (this_chunk_blocks > u)
+				this_chunk_blocks = u;
+			u = dio->final_block_in_request - dio->block_in_file;
+			if (this_chunk_blocks > u)
+				this_chunk_blocks = u;
+			this_chunk_bytes = this_chunk_blocks << blkbits;
+			BUG_ON(this_chunk_bytes == 0);
+
+			dio->boundary = buffer_boundary(map_bh);
+			ret = submit_page_section(dio, page, offset_in_page,
+				this_chunk_bytes, dio->next_block_for_io);
+			if (ret) {
+				page_cache_release(page);
+				goto out;
+			}
+			dio->next_block_for_io += this_chunk_blocks;
+
+			dio->block_in_file += this_chunk_blocks;
+			block_in_page += this_chunk_blocks;
+			dio->blocks_available -= this_chunk_blocks;
+next_block:
+			if (dio->block_in_file > dio->final_block_in_request)
+				BUG();
+			if (dio->block_in_file == dio->final_block_in_request)
+				break;
+		}
+
+		/* Drop the ref which was taken in get_user_pages() */
+		page_cache_release(page);
+		block_in_page = 0;
+	}
+out:
+	return ret;
+}
+
+/*
+ * Releases both i_sem and i_alloc_sem
+ */
+static ssize_t
+direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 
+	const struct iovec *iov, loff_t offset, unsigned long nr_segs, 
+	unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io,
+	struct dio *dio)
+{
+	unsigned long user_addr; 
+	int seg;
+	ssize_t ret = 0;
+	ssize_t ret2;
+	size_t bytes;
+
+	dio->bio = NULL;
+	dio->inode = inode;
+	dio->rw = rw;
+	dio->blkbits = blkbits;
+	dio->blkfactor = inode->i_blkbits - blkbits;
+	dio->start_zero_done = 0;
+	dio->size = 0;
+	dio->block_in_file = offset >> blkbits;
+	dio->blocks_available = 0;
+	dio->cur_page = NULL;
+
+	dio->boundary = 0;
+	dio->reap_counter = 0;
+	dio->get_blocks = get_blocks;
+	dio->end_io = end_io;
+	dio->map_bh.b_private = NULL;
+	dio->final_block_in_bio = -1;
+	dio->next_block_for_io = -1;
+
+	dio->page_errors = 0;
+	dio->result = 0;
+	dio->iocb = iocb;
+
+	/*
+	 * BIO completion state.
+	 *
+	 * ->bio_count starts out at one, and we decrement it to zero after all
+	 * BIOs are submitted.  This to avoid the situation where a really fast
+	 * (or synchronous) device could take the count to zero while we're
+	 * still submitting BIOs.
+	 */
+	dio->bio_count = 1;
+	dio->bios_in_flight = 0;
+	spin_lock_init(&dio->bio_lock);
+	dio->bio_list = NULL;
+	dio->waiter = NULL;
+
+	/*
+	 * In case of non-aligned buffers, we may need 2 more
+	 * pages since we need to zero out first and last block.
+	 */
+	if (unlikely(dio->blkfactor))
+		dio->pages_in_io = 2;
+	else
+		dio->pages_in_io = 0;
+
+	for (seg = 0; seg < nr_segs; seg++) {
+		user_addr = (unsigned long)iov[seg].iov_base;
+		dio->pages_in_io +=
+			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
+				- user_addr/PAGE_SIZE);
+	}
+
+	for (seg = 0; seg < nr_segs; seg++) {
+		user_addr = (unsigned long)iov[seg].iov_base;
+		dio->size += bytes = iov[seg].iov_len;
+
+		/* Index into the first page of the first block */
+		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
+		dio->final_block_in_request = dio->block_in_file +
+						(bytes >> blkbits);
+		/* Page fetching state */
+		dio->head = 0;
+		dio->tail = 0;
+		dio->curr_page = 0;
+
+		dio->total_pages = 0;
+		if (user_addr & (PAGE_SIZE-1)) {
+			dio->total_pages++;
+			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
+		}
+		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+		dio->curr_user_address = user_addr;
+	
+		ret = do_direct_IO(dio);
+
+		dio->result += iov[seg].iov_len -
+			((dio->final_block_in_request - dio->block_in_file) <<
+					blkbits);
+
+		if (ret) {
+			dio_cleanup(dio);
+			break;
+		}
+	} /* end iovec loop */
+
+	if (ret == -ENOTBLK && rw == WRITE) {
+		/*
+		 * The remaining part of the request will be
+		 * be handled by buffered I/O when we return
+		 */
+		ret = 0;
+	}
+	/*
+	 * There may be some unwritten disk at the end of a part-written
+	 * fs-block-sized block.  Go zero that now.
+	 */
+	dio_zero_block(dio, 1);
+
+	if (dio->cur_page) {
+		ret2 = dio_send_cur_page(dio);
+		if (ret == 0)
+			ret = ret2;
+		page_cache_release(dio->cur_page);
+		dio->cur_page = NULL;
+	}
+	if (dio->bio)
+		dio_bio_submit(dio);
+
+	/*
+	 * It is possible that, we return short IO due to end of file.
+	 * In that case, we need to release all the pages we got hold on.
+	 */
+	dio_cleanup(dio);
+
+	/*
+	 * All block lookups have been performed. For READ requests
+	 * we can let i_sem go now that its achieved its purpose
+	 * of protecting us from looking up uninitialized blocks.
+	 */
+	if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
+		up(&dio->inode->i_sem);
+
+	/*
+	 * OK, all BIOs are submitted, so we can decrement bio_count to truly
+	 * reflect the number of to-be-processed BIOs.
+	 */
+	if (dio->is_async) {
+		int should_wait = 0;
+
+		if (dio->result < dio->size && rw == WRITE) {
+			dio->waiter = current;
+			should_wait = 1;
+		}
+		if (ret == 0)
+			ret = dio->result;
+		finished_one_bio(dio);		/* This can free the dio */
+		blk_run_address_space(inode->i_mapping);
+		if (should_wait) {
+			unsigned long flags;
+			/*
+			 * Wait for already issued I/O to drain out and
+			 * release its references to user-space pages
+			 * before returning to fallback on buffered I/O
+			 */
+
+			spin_lock_irqsave(&dio->bio_lock, flags);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			while (dio->bio_count) {
+				spin_unlock_irqrestore(&dio->bio_lock, flags);
+				io_schedule();
+				spin_lock_irqsave(&dio->bio_lock, flags);
+				set_current_state(TASK_UNINTERRUPTIBLE);
+			}
+			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			set_current_state(TASK_RUNNING);
+			kfree(dio);
+		}
+	} else {
+		ssize_t transferred = 0;
+
+		finished_one_bio(dio);
+		ret2 = dio_await_completion(dio);
+		if (ret == 0)
+			ret = ret2;
+		if (ret == 0)
+			ret = dio->page_errors;
+		if (dio->result) {
+			loff_t i_size = i_size_read(inode);
+
+			transferred = dio->result;
+			/*
+			 * Adjust the return value if the read crossed a
+			 * non-block-aligned EOF.
+			 */
+			if (rw == READ && (offset + transferred > i_size))
+				transferred = i_size - offset;
+		}
+		dio_complete(dio, offset, transferred);
+		if (ret == 0)
+			ret = transferred;
+
+		/* We could have also come here on an AIO file extend */
+		if (!is_sync_kiocb(iocb) && rw == WRITE &&
+		    ret >= 0 && dio->result == dio->size)
+			/*
+			 * For AIO writes where we have completed the
+			 * i/o, we have to mark the the aio complete.
+			 */
+			aio_complete(iocb, ret, 0);
+		kfree(dio);
+	}
+	return ret;
+}
+
+/*
+ * This is a library function for use by filesystem drivers.
+ * The locking rules are governed by the dio_lock_type parameter.
+ *
+ * DIO_NO_LOCKING (no locking, for raw block device access)
+ * For writes, i_sem is not held on entry; it is never taken.
+ *
+ * DIO_LOCKING (simple locking for regular files)
+ * For writes we are called under i_sem and return with i_sem held, even though
+ * it is internally dropped.
+ * For reads, i_sem is not held on entry, but it is taken and dropped before
+ * returning.
+ *
+ * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
+ *	uninitialised data, allowing parallel direct readers and writers)
+ * For writes we are called without i_sem, return without it, never touch it.
+ * For reads, i_sem is held on entry and will be released before returning.
+ *
+ * Additional i_alloc_sem locking requirements described inline below.
+ */
+ssize_t
+__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
+	struct block_device *bdev, const struct iovec *iov, loff_t offset, 
+	unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
+	int dio_lock_type)
+{
+	int seg;
+	size_t size;
+	unsigned long addr;
+	unsigned blkbits = inode->i_blkbits;
+	unsigned bdev_blkbits = 0;
+	unsigned blocksize_mask = (1 << blkbits) - 1;
+	ssize_t retval = -EINVAL;
+	loff_t end = offset;
+	struct dio *dio;
+	int reader_with_isem = (rw == READ && dio_lock_type == DIO_OWN_LOCKING);
+
+	if (rw & WRITE)
+		current->flags |= PF_SYNCWRITE;
+
+	if (bdev)
+		bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
+
+	if (offset & blocksize_mask) {
+		if (bdev)
+			 blkbits = bdev_blkbits;
+		blocksize_mask = (1 << blkbits) - 1;
+		if (offset & blocksize_mask)
+			goto out;
+	}
+
+	/* Check the memory alignment.  Blocks cannot straddle pages */
+	for (seg = 0; seg < nr_segs; seg++) {
+		addr = (unsigned long)iov[seg].iov_base;
+		size = iov[seg].iov_len;
+		end += size;
+		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
+			if (bdev)
+				 blkbits = bdev_blkbits;
+			blocksize_mask = (1 << blkbits) - 1;
+			if ((addr & blocksize_mask) || (size & blocksize_mask))  
+				goto out;
+		}
+	}
+
+	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+	retval = -ENOMEM;
+	if (!dio)
+		goto out;
+
+	/*
+	 * For block device access DIO_NO_LOCKING is used,
+	 *	neither readers nor writers do any locking at all
+	 * For regular files using DIO_LOCKING,
+	 *	readers need to grab i_sem and i_alloc_sem
+	 *	writers need to grab i_alloc_sem only (i_sem is already held)
+	 * For regular files using DIO_OWN_LOCKING,
+	 *	neither readers nor writers take any locks here
+	 *	(i_sem is already held and release for writers here)
+	 */
+	dio->lock_type = dio_lock_type;
+	if (dio_lock_type != DIO_NO_LOCKING) {
+		/* watch out for a 0 len io from a tricksy fs */
+		if (rw == READ && end > offset) {
+			struct address_space *mapping;
+
+			mapping = iocb->ki_filp->f_mapping;
+			if (dio_lock_type != DIO_OWN_LOCKING) {
+				down(&inode->i_sem);
+				reader_with_isem = 1;
+			}
+
+			retval = filemap_write_and_wait_range(mapping, offset,
+							      end - 1);
+			if (retval) {
+				kfree(dio);
+				goto out;
+			}
+
+			if (dio_lock_type == DIO_OWN_LOCKING) {
+				up(&inode->i_sem);
+				reader_with_isem = 0;
+			}
+		}
+
+		if (dio_lock_type == DIO_LOCKING)
+			down_read(&inode->i_alloc_sem);
+	}
+
+	/*
+	 * For file extending writes updating i_size before data
+	 * writeouts complete can expose uninitialized blocks. So
+	 * even for AIO, we need to wait for i/o to complete before
+	 * returning in this case.
+	 */
+	dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) &&
+		(end > i_size_read(inode)));
+
+	retval = direct_io_worker(rw, iocb, inode, iov, offset,
+				nr_segs, blkbits, get_blocks, end_io, dio);
+
+	if (rw == READ && dio_lock_type == DIO_LOCKING)
+		reader_with_isem = 0;
+
+out:
+	if (reader_with_isem)
+		up(&inode->i_sem);
+	if (rw & WRITE)
+		current->flags &= ~PF_SYNCWRITE;
+	return retval;
+}
+EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/dnotify.c b/fs/dnotify.c
new file mode 100644
index 0000000..f3b540d
--- /dev/null
+++ b/fs/dnotify.c
@@ -0,0 +1,183 @@
+/*
+ * Directory notifications for Linux.
+ *
+ * Copyright (C) 2000,2001,2002 Stephen Rothwell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/dnotify.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+int dir_notify_enable = 1;
+
+static kmem_cache_t *dn_cache;
+
+static void redo_inode_mask(struct inode *inode)
+{
+	unsigned long new_mask;
+	struct dnotify_struct *dn;
+
+	new_mask = 0;
+	for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next)
+		new_mask |= dn->dn_mask & ~DN_MULTISHOT;
+	inode->i_dnotify_mask = new_mask;
+}
+
+void dnotify_flush(struct file *filp, fl_owner_t id)
+{
+	struct dnotify_struct *dn;
+	struct dnotify_struct **prev;
+	struct inode *inode;
+
+	inode = filp->f_dentry->d_inode;
+	if (!S_ISDIR(inode->i_mode))
+		return;
+	spin_lock(&inode->i_lock);
+	prev = &inode->i_dnotify;
+	while ((dn = *prev) != NULL) {
+		if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
+			*prev = dn->dn_next;
+			redo_inode_mask(inode);
+			kmem_cache_free(dn_cache, dn);
+			break;
+		}
+		prev = &dn->dn_next;
+	}
+	spin_unlock(&inode->i_lock);
+}
+
+int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+{
+	struct dnotify_struct *dn;
+	struct dnotify_struct *odn;
+	struct dnotify_struct **prev;
+	struct inode *inode;
+	fl_owner_t id = current->files;
+	int error = 0;
+
+	if ((arg & ~DN_MULTISHOT) == 0) {
+		dnotify_flush(filp, id);
+		return 0;
+	}
+	if (!dir_notify_enable)
+		return -EINVAL;
+	inode = filp->f_dentry->d_inode;
+	if (!S_ISDIR(inode->i_mode))
+		return -ENOTDIR;
+	dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL);
+	if (dn == NULL)
+		return -ENOMEM;
+	spin_lock(&inode->i_lock);
+	prev = &inode->i_dnotify;
+	while ((odn = *prev) != NULL) {
+		if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
+			odn->dn_fd = fd;
+			odn->dn_mask |= arg;
+			inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
+			goto out_free;
+		}
+		prev = &odn->dn_next;
+	}
+
+	error = f_setown(filp, current->pid, 0);
+	if (error)
+		goto out_free;
+
+	dn->dn_mask = arg;
+	dn->dn_fd = fd;
+	dn->dn_filp = filp;
+	dn->dn_owner = id;
+	inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
+	dn->dn_next = inode->i_dnotify;
+	inode->i_dnotify = dn;
+	spin_unlock(&inode->i_lock);
+
+	if (filp->f_op && filp->f_op->dir_notify)
+		return filp->f_op->dir_notify(filp, arg);
+	return 0;
+
+out_free:
+	spin_unlock(&inode->i_lock);
+	kmem_cache_free(dn_cache, dn);
+	return error;
+}
+
+void __inode_dir_notify(struct inode *inode, unsigned long event)
+{
+	struct dnotify_struct *	dn;
+	struct dnotify_struct **prev;
+	struct fown_struct *	fown;
+	int			changed = 0;
+
+	spin_lock(&inode->i_lock);
+	prev = &inode->i_dnotify;
+	while ((dn = *prev) != NULL) {
+		if ((dn->dn_mask & event) == 0) {
+			prev = &dn->dn_next;
+			continue;
+		}
+		fown = &dn->dn_filp->f_owner;
+		send_sigio(fown, dn->dn_fd, POLL_MSG);
+		if (dn->dn_mask & DN_MULTISHOT)
+			prev = &dn->dn_next;
+		else {
+			*prev = dn->dn_next;
+			changed = 1;
+			kmem_cache_free(dn_cache, dn);
+		}
+	}
+	if (changed)
+		redo_inode_mask(inode);
+	spin_unlock(&inode->i_lock);
+}
+
+EXPORT_SYMBOL(__inode_dir_notify);
+
+/*
+ * This is hopelessly wrong, but unfixable without API changes.  At
+ * least it doesn't oops the kernel...
+ *
+ * To safely access ->d_parent we need to keep d_move away from it.  Use the
+ * dentry's d_lock for this.
+ */
+void dnotify_parent(struct dentry *dentry, unsigned long event)
+{
+	struct dentry *parent;
+
+	if (!dir_notify_enable)
+		return;
+
+	spin_lock(&dentry->d_lock);
+	parent = dentry->d_parent;
+	if (parent->d_inode->i_dnotify_mask & event) {
+		dget(parent);
+		spin_unlock(&dentry->d_lock);
+		__inode_dir_notify(parent->d_inode, event);
+		dput(parent);
+	} else {
+		spin_unlock(&dentry->d_lock);
+	}
+}
+EXPORT_SYMBOL_GPL(dnotify_parent);
+
+static int __init dnotify_init(void)
+{
+	dn_cache = kmem_cache_create("dnotify_cache",
+		sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL);
+	return 0;
+}
+
+module_init(dnotify_init)
diff --git a/fs/dquot.c b/fs/dquot.c
new file mode 100644
index 0000000..11048e0
--- /dev/null
+++ b/fs/dquot.c
@@ -0,0 +1,1850 @@
+/*
+ * Implementation of the diskquota system for the LINUX operating system. QUOTA
+ * is implemented using the BSD system call interface as the means of
+ * communication with the user level. This file contains the generic routines
+ * called by the different filesystems on allocation of an inode or block.
+ * These routines take care of the administration needed to have a consistent
+ * diskquota tracking system. The ideas of both user and group quotas are based
+ * on the Melbourne quota system as used on BSD derived systems. The internal
+ * implementation is based on one of the several variants of the LINUX
+ * inode-subsystem with added complexity of the diskquota system.
+ * 
+ * Version: $Id: dquot.c,v 6.3 1996/11/17 18:35:34 mvw Exp mvw $
+ * 
+ * Author:	Marco van Wieringen <mvw@planets.elm.net>
+ *
+ * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
+ *
+ *		Revised list management to avoid races
+ *		-- Bill Hawes, <whawes@star.net>, 9/98
+ *
+ *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ *		As the consequence the locking was moved from dquot_decr_...(),
+ *		dquot_incr_...() to calling functions.
+ *		invalidate_dquots() now writes modified dquots.
+ *		Serialized quota_off() and quota_on() for mount point.
+ *		Fixed a few bugs in grow_dquots().
+ *		Fixed deadlock in write_dquot() - we no longer account quotas on
+ *		quota files
+ *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
+ *		add_dquot_ref() restarts after blocking
+ *		Added check for bogus uid and fixed check for group in quotactl.
+ *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
+ *
+ *		Used struct list_head instead of own list struct
+ *		Invalidation of referenced dquots is no longer possible
+ *		Improved free_dquots list management
+ *		Quota and i_blocks are now updated in one place to avoid races
+ *		Warnings are now delayed so we won't block in critical section
+ *		Write updated not to require dquot lock
+ *		Jan Kara, <jack@suse.cz>, 9/2000
+ *
+ *		Added dynamic quota structure allocation
+ *		Jan Kara <jack@suse.cz> 12/2000
+ *
+ *		Rewritten quota interface. Implemented new quota format and
+ *		formats registering.
+ *		Jan Kara, <jack@suse.cz>, 2001,2002
+ *
+ *		New SMP locking.
+ *		Jan Kara, <jack@suse.cz>, 10/2002
+ *
+ *		Added journalled quota support, fix lock inversion problems
+ *		Jan Kara, <jack@suse.cz>, 2003,2004
+ *
+ * (C) Copyright 1994 - 1997 Marco van Wieringen 
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/kmod.h>
+#include <linux/namei.h>
+#include <linux/buffer_head.h>
+
+#include <asm/uaccess.h>
+
+#define __DQUOT_PARANOIA
+
+/*
+ * There are two quota SMP locks. dq_list_lock protects all lists with quotas
+ * and quota formats and also dqstats structure containing statistics about the
+ * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
+ * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
+ * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
+ * in inode_add_bytes() and inode_sub_bytes().
+ *
+ * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock
+ *
+ * Note that some things (eg. sb pointer, type, id) doesn't change during
+ * the life of the dquot structure and so needn't to be protected by a lock
+ *
+ * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
+ * operation is just reading pointers from inode (or not using them at all) the
+ * read lock is enough. If pointers are altered function must hold write lock
+ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
+ * for altering the flag i_sem is also needed).  If operation is holding
+ * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
+ * dqonoff_sem.
+ * This locking assures that:
+ *   a) update/access to dquot pointers in inode is serialized
+ *   b) everyone is guarded against invalidate_dquots()
+ *
+ * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced
+ * from inodes (dquot_alloc_space() and such don't check the dq_lock).
+ * Currently dquot is locked only when it is being read to memory (or space for
+ * it is being allocated) on the first dqget() and when it is being released on
+ * the last dqput(). The allocation and release oparations are serialized by
+ * the dq_lock and by checking the use count in dquot_release().  Write
+ * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
+ * spinlock to internal buffers before writing.
+ *
+ * Lock ordering (including related VFS locks) is the following:
+ *   i_sem > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
+ *   > dquot->dq_lock > dqio_sem
+ * i_sem on quota files is special (it's below dqio_sem)
+ */
+
+static DEFINE_SPINLOCK(dq_list_lock);
+DEFINE_SPINLOCK(dq_data_lock);
+
+static char *quotatypes[] = INITQFNAMES;
+static struct quota_format_type *quota_formats;	/* List of registered formats */
+static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
+
+/* SLAB cache for dquot structures */
+static kmem_cache_t *dquot_cachep;
+
+int register_quota_format(struct quota_format_type *fmt)
+{
+	spin_lock(&dq_list_lock);
+	fmt->qf_next = quota_formats;
+	quota_formats = fmt;
+	spin_unlock(&dq_list_lock);
+	return 0;
+}
+
+void unregister_quota_format(struct quota_format_type *fmt)
+{
+	struct quota_format_type **actqf;
+
+	spin_lock(&dq_list_lock);
+	for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
+	if (*actqf)
+		*actqf = (*actqf)->qf_next;
+	spin_unlock(&dq_list_lock);
+}
+
+static struct quota_format_type *find_quota_format(int id)
+{
+	struct quota_format_type *actqf;
+
+	spin_lock(&dq_list_lock);
+	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+	if (!actqf || !try_module_get(actqf->qf_owner)) {
+		int qm;
+
+		spin_unlock(&dq_list_lock);
+		
+		for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
+		if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
+			return NULL;
+
+		spin_lock(&dq_list_lock);
+		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+		if (actqf && !try_module_get(actqf->qf_owner))
+			actqf = NULL;
+	}
+	spin_unlock(&dq_list_lock);
+	return actqf;
+}
+
+static void put_quota_format(struct quota_format_type *fmt)
+{
+	module_put(fmt->qf_owner);
+}
+
+/*
+ * Dquot List Management:
+ * The quota code uses three lists for dquot management: the inuse_list,
+ * free_dquots, and dquot_hash[] array. A single dquot structure may be
+ * on all three lists, depending on its current state.
+ *
+ * All dquots are placed to the end of inuse_list when first created, and this
+ * list is used for invalidate operation, which must look at every dquot.
+ *
+ * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+ * and this list is searched whenever we need an available dquot.  Dquots are
+ * removed from the list as soon as they are used again, and
+ * dqstats.free_dquots gives the number of dquots on the list. When
+ * dquot is invalidated it's completely released from memory.
+ *
+ * Dquots with a specific identity (device, type and id) are placed on
+ * one of the dquot_hash[] hash chains. The provides an efficient search
+ * mechanism to locate a specific dquot.
+ */
+
+static LIST_HEAD(inuse_list);
+static LIST_HEAD(free_dquots);
+static unsigned int dq_hash_bits, dq_hash_mask;
+static struct hlist_head *dquot_hash;
+
+struct dqstats dqstats;
+
+static void dqput(struct dquot *dquot);
+
+static inline unsigned int
+hashfn(const struct super_block *sb, unsigned int id, int type)
+{
+	unsigned long tmp;
+
+	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
+	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
+}
+
+/*
+ * Following list functions expect dq_list_lock to be held
+ */
+static inline void insert_dquot_hash(struct dquot *dquot)
+{
+	struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+	hlist_add_head(&dquot->dq_hash, head);
+}
+
+static inline void remove_dquot_hash(struct dquot *dquot)
+{
+	hlist_del_init(&dquot->dq_hash);
+}
+
+static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
+{
+	struct hlist_node *node;
+	struct dquot *dquot;
+
+	hlist_for_each (node, dquot_hash+hashent) {
+		dquot = hlist_entry(node, struct dquot, dq_hash);
+		if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
+			return dquot;
+	}
+	return NODQUOT;
+}
+
+/* Add a dquot to the tail of the free list */
+static inline void put_dquot_last(struct dquot *dquot)
+{
+	list_add(&dquot->dq_free, free_dquots.prev);
+	dqstats.free_dquots++;
+}
+
+static inline void remove_free_dquot(struct dquot *dquot)
+{
+	if (list_empty(&dquot->dq_free))
+		return;
+	list_del_init(&dquot->dq_free);
+	dqstats.free_dquots--;
+}
+
+static inline void put_inuse(struct dquot *dquot)
+{
+	/* We add to the back of inuse list so we don't have to restart
+	 * when traversing this list and we block */
+	list_add(&dquot->dq_inuse, inuse_list.prev);
+	dqstats.allocated_dquots++;
+}
+
+static inline void remove_inuse(struct dquot *dquot)
+{
+	dqstats.allocated_dquots--;
+	list_del(&dquot->dq_inuse);
+}
+/*
+ * End of list functions needing dq_list_lock
+ */
+
+static void wait_on_dquot(struct dquot *dquot)
+{
+	down(&dquot->dq_lock);
+	up(&dquot->dq_lock);
+}
+
+#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
+
+int dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+	spin_lock(&dq_list_lock);
+	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
+				info[dquot->dq_type].dqi_dirty_list);
+	spin_unlock(&dq_list_lock);
+	return 0;
+}
+
+/* This function needs dq_list_lock */
+static inline int clear_dquot_dirty(struct dquot *dquot)
+{
+	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
+		return 0;
+	list_del_init(&dquot->dq_dirty);
+	return 1;
+}
+
+void mark_info_dirty(struct super_block *sb, int type)
+{
+	set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+}
+EXPORT_SYMBOL(mark_info_dirty);
+
+/*
+ *	Read dquot from disk and alloc space for it
+ */
+
+int dquot_acquire(struct dquot *dquot)
+{
+	int ret = 0, ret2 = 0;
+	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+	down(&dquot->dq_lock);
+	down(&dqopt->dqio_sem);
+	if (!test_bit(DQ_READ_B, &dquot->dq_flags))
+		ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+	if (ret < 0)
+		goto out_iolock;
+	set_bit(DQ_READ_B, &dquot->dq_flags);
+	/* Instantiate dquot if needed */
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
+		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+		/* Write the info if needed */
+		if (info_dirty(&dqopt->info[dquot->dq_type]))
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (ret < 0)
+			goto out_iolock;
+		if (ret2 < 0) {
+			ret = ret2;
+			goto out_iolock;
+		}
+	}
+	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_iolock:
+	up(&dqopt->dqio_sem);
+	up(&dquot->dq_lock);
+	return ret;
+}
+
+/*
+ *	Write dquot to disk
+ */
+int dquot_commit(struct dquot *dquot)
+{
+	int ret = 0, ret2 = 0;
+	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+	down(&dqopt->dqio_sem);
+	spin_lock(&dq_list_lock);
+	if (!clear_dquot_dirty(dquot)) {
+		spin_unlock(&dq_list_lock);
+		goto out_sem;
+	}
+	spin_unlock(&dq_list_lock);
+	/* Inactive dquot can be only if there was error during read/init
+	 * => we have better not writing it */
+	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+		if (info_dirty(&dqopt->info[dquot->dq_type]))
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (ret >= 0)
+			ret = ret2;
+	}
+out_sem:
+	up(&dqopt->dqio_sem);
+	return ret;
+}
+
+/*
+ *	Release dquot
+ */
+int dquot_release(struct dquot *dquot)
+{
+	int ret = 0, ret2 = 0;
+	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+	down(&dquot->dq_lock);
+	/* Check whether we are not racing with some other dqget() */
+	if (atomic_read(&dquot->dq_count) > 1)
+		goto out_dqlock;
+	down(&dqopt->dqio_sem);
+	if (dqopt->ops[dquot->dq_type]->release_dqblk) {
+		ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+		/* Write the info */
+		if (info_dirty(&dqopt->info[dquot->dq_type]))
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (ret >= 0)
+			ret = ret2;
+	}
+	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+	up(&dqopt->dqio_sem);
+out_dqlock:
+	up(&dquot->dq_lock);
+	return ret;
+}
+
+/* Invalidate all dquots on the list. Note that this function is called after
+ * quota is disabled and pointers from inodes removed so there cannot be new
+ * quota users. Also because we hold dqonoff_sem there can be no quota users
+ * for this sb+type at all. */
+static void invalidate_dquots(struct super_block *sb, int type)
+{
+	struct dquot *dquot;
+	struct list_head *head;
+
+	spin_lock(&dq_list_lock);
+	for (head = inuse_list.next; head != &inuse_list;) {
+		dquot = list_entry(head, struct dquot, dq_inuse);
+		head = head->next;
+		if (dquot->dq_sb != sb)
+			continue;
+		if (dquot->dq_type != type)
+			continue;
+#ifdef __DQUOT_PARANOIA
+		if (atomic_read(&dquot->dq_count))
+			BUG();
+#endif
+		/* Quota now has no users and it has been written on last dqput() */
+		remove_dquot_hash(dquot);
+		remove_free_dquot(dquot);
+		remove_inuse(dquot);
+		kmem_cache_free(dquot_cachep, dquot);
+	}
+	spin_unlock(&dq_list_lock);
+}
+
+int vfs_quota_sync(struct super_block *sb, int type)
+{
+	struct list_head *dirty;
+	struct dquot *dquot;
+	struct quota_info *dqopt = sb_dqopt(sb);
+	int cnt;
+
+	down(&dqopt->dqonoff_sem);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (type != -1 && cnt != type)
+			continue;
+		if (!sb_has_quota_enabled(sb, cnt))
+			continue;
+		spin_lock(&dq_list_lock);
+		dirty = &dqopt->info[cnt].dqi_dirty_list;
+		while (!list_empty(dirty)) {
+			dquot = list_entry(dirty->next, struct dquot, dq_dirty);
+			/* Dirty and inactive can be only bad dquot... */
+			if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+				clear_dquot_dirty(dquot);
+				continue;
+			}
+			/* Now we have active dquot from which someone is
+ 			 * holding reference so we can safely just increase
+			 * use count */
+			atomic_inc(&dquot->dq_count);
+			dqstats.lookups++;
+			spin_unlock(&dq_list_lock);
+			sb->dq_op->write_dquot(dquot);
+			dqput(dquot);
+			spin_lock(&dq_list_lock);
+		}
+		spin_unlock(&dq_list_lock);
+	}
+
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt)
+			&& info_dirty(&dqopt->info[cnt]))
+			sb->dq_op->write_info(sb, cnt);
+	spin_lock(&dq_list_lock);
+	dqstats.syncs++;
+	spin_unlock(&dq_list_lock);
+	up(&dqopt->dqonoff_sem);
+
+	return 0;
+}
+
+/* Free unused dquots from cache */
+static void prune_dqcache(int count)
+{
+	struct list_head *head;
+	struct dquot *dquot;
+
+	head = free_dquots.prev;
+	while (head != &free_dquots && count) {
+		dquot = list_entry(head, struct dquot, dq_free);
+		remove_dquot_hash(dquot);
+		remove_free_dquot(dquot);
+		remove_inuse(dquot);
+		kmem_cache_free(dquot_cachep, dquot);
+		count--;
+		head = free_dquots.prev;
+	}
+}
+
+/*
+ * This is called from kswapd when we think we need some
+ * more memory
+ */
+
+static int shrink_dqcache_memory(int nr, unsigned int gfp_mask)
+{
+	if (nr) {
+		spin_lock(&dq_list_lock);
+		prune_dqcache(nr);
+		spin_unlock(&dq_list_lock);
+	}
+	return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+}
+
+/*
+ * Put reference to dquot
+ * NOTE: If you change this function please check whether dqput_blocks() works right...
+ * MUST be called with either dqptr_sem or dqonoff_sem held
+ */
+static void dqput(struct dquot *dquot)
+{
+	if (!dquot)
+		return;
+#ifdef __DQUOT_PARANOIA
+	if (!atomic_read(&dquot->dq_count)) {
+		printk("VFS: dqput: trying to free free dquot\n");
+		printk("VFS: device %s, dquot of %s %d\n",
+			dquot->dq_sb->s_id,
+			quotatypes[dquot->dq_type],
+			dquot->dq_id);
+		BUG();
+	}
+#endif
+	
+	spin_lock(&dq_list_lock);
+	dqstats.drops++;
+	spin_unlock(&dq_list_lock);
+we_slept:
+	spin_lock(&dq_list_lock);
+	if (atomic_read(&dquot->dq_count) > 1) {
+		/* We have more than one user... nothing to do */
+		atomic_dec(&dquot->dq_count);
+		spin_unlock(&dq_list_lock);
+		return;
+	}
+	/* Need to release dquot? */
+	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+		spin_unlock(&dq_list_lock);
+		/* Commit dquot before releasing */
+		dquot->dq_sb->dq_op->write_dquot(dquot);
+		goto we_slept;
+	}
+	/* Clear flag in case dquot was inactive (something bad happened) */
+	clear_dquot_dirty(dquot);
+	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+		spin_unlock(&dq_list_lock);
+		dquot->dq_sb->dq_op->release_dquot(dquot);
+		goto we_slept;
+	}
+	atomic_dec(&dquot->dq_count);
+#ifdef __DQUOT_PARANOIA
+	/* sanity check */
+	if (!list_empty(&dquot->dq_free))
+		BUG();
+#endif
+	put_dquot_last(dquot);
+	spin_unlock(&dq_list_lock);
+}
+
+static struct dquot *get_empty_dquot(struct super_block *sb, int type)
+{
+	struct dquot *dquot;
+
+	dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS);
+	if(!dquot)
+		return NODQUOT;
+
+	memset((caddr_t)dquot, 0, sizeof(struct dquot));
+	sema_init(&dquot->dq_lock, 1);
+	INIT_LIST_HEAD(&dquot->dq_free);
+	INIT_LIST_HEAD(&dquot->dq_inuse);
+	INIT_HLIST_NODE(&dquot->dq_hash);
+	INIT_LIST_HEAD(&dquot->dq_dirty);
+	dquot->dq_sb = sb;
+	dquot->dq_type = type;
+	atomic_set(&dquot->dq_count, 1);
+
+	return dquot;
+}
+
+/*
+ * Get reference to dquot
+ * MUST be called with either dqptr_sem or dqonoff_sem held
+ */
+static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+{
+	unsigned int hashent = hashfn(sb, id, type);
+	struct dquot *dquot, *empty = NODQUOT;
+
+        if (!sb_has_quota_enabled(sb, type))
+		return NODQUOT;
+we_slept:
+	spin_lock(&dq_list_lock);
+	if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
+		if (empty == NODQUOT) {
+			spin_unlock(&dq_list_lock);
+			if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
+				schedule();	/* Try to wait for a moment... */
+			goto we_slept;
+		}
+		dquot = empty;
+		dquot->dq_id = id;
+		/* all dquots go on the inuse_list */
+		put_inuse(dquot);
+		/* hash it first so it can be found */
+		insert_dquot_hash(dquot);
+		dqstats.lookups++;
+		spin_unlock(&dq_list_lock);
+	} else {
+		if (!atomic_read(&dquot->dq_count))
+			remove_free_dquot(dquot);
+		atomic_inc(&dquot->dq_count);
+		dqstats.cache_hits++;
+		dqstats.lookups++;
+		spin_unlock(&dq_list_lock);
+		if (empty)
+			kmem_cache_free(dquot_cachep, empty);
+	}
+	/* Wait for dq_lock - after this we know that either dquot_release() is already
+	 * finished or it will be canceled due to dq_count > 1 test */
+	wait_on_dquot(dquot);
+	/* Read the dquot and instantiate it (everything done only if needed) */
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
+		dqput(dquot);
+		return NODQUOT;
+	}
+#ifdef __DQUOT_PARANOIA
+	if (!dquot->dq_sb)	/* Has somebody invalidated entry under us? */
+		BUG();
+#endif
+
+	return dquot;
+}
+
+static int dqinit_needed(struct inode *inode, int type)
+{
+	int cnt;
+
+	if (IS_NOQUOTA(inode))
+		return 0;
+	if (type != -1)
+		return inode->i_dquot[type] == NODQUOT;
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (inode->i_dquot[cnt] == NODQUOT)
+			return 1;
+	return 0;
+}
+
+/* This routine is guarded by dqonoff_sem semaphore */
+static void add_dquot_ref(struct super_block *sb, int type)
+{
+	struct list_head *p;
+
+restart:
+	file_list_lock();
+	list_for_each(p, &sb->s_files) {
+		struct file *filp = list_entry(p, struct file, f_list);
+		struct inode *inode = filp->f_dentry->d_inode;
+		if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
+			struct dentry *dentry = dget(filp->f_dentry);
+			file_list_unlock();
+			sb->dq_op->initialize(inode, type);
+			dput(dentry);
+			/* As we may have blocked we had better restart... */
+			goto restart;
+		}
+	}
+	file_list_unlock();
+}
+
+/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
+static inline int dqput_blocks(struct dquot *dquot)
+{
+	if (atomic_read(&dquot->dq_count) <= 1)
+		return 1;
+	return 0;
+}
+
+/* Remove references to dquots from inode - add dquot to list for freeing if needed */
+/* We can't race with anybody because we hold dqptr_sem for writing... */
+int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head)
+{
+	struct dquot *dquot = inode->i_dquot[type];
+
+	inode->i_dquot[type] = NODQUOT;
+	if (dquot != NODQUOT) {
+		if (dqput_blocks(dquot)) {
+#ifdef __DQUOT_PARANOIA
+			if (atomic_read(&dquot->dq_count) != 1)
+				printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
+#endif
+			spin_lock(&dq_list_lock);
+			list_add(&dquot->dq_free, tofree_head);	/* As dquot must have currently users it can't be on the free list... */
+			spin_unlock(&dq_list_lock);
+			return 1;
+		}
+		else
+			dqput(dquot);   /* We have guaranteed we won't block */
+	}
+	return 0;
+}
+
+/* Free list of dquots - called from inode.c */
+/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
+static void put_dquot_list(struct list_head *tofree_head)
+{
+	struct list_head *act_head;
+	struct dquot *dquot;
+
+	act_head = tofree_head->next;
+	/* So now we have dquots on the list... Just free them */
+	while (act_head != tofree_head) {
+		dquot = list_entry(act_head, struct dquot, dq_free);
+		act_head = act_head->next;
+		list_del_init(&dquot->dq_free);	/* Remove dquot from the list so we won't have problems... */
+		dqput(dquot);
+	}
+}
+
+/* Gather all references from inodes and drop them */
+static void drop_dquot_ref(struct super_block *sb, int type)
+{
+	LIST_HEAD(tofree_head);
+
+	/* We need to be guarded against prune_icache to reach all the
+	 * inodes - otherwise some can be on the local list of prune_icache */
+	down(&iprune_sem);
+	down_write(&sb_dqopt(sb)->dqptr_sem);
+	remove_dquot_ref(sb, type, &tofree_head);
+	up_write(&sb_dqopt(sb)->dqptr_sem);
+	up(&iprune_sem);
+	put_dquot_list(&tofree_head);
+}
+
+static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
+{
+	dquot->dq_dqb.dqb_curinodes += number;
+}
+
+static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
+{
+	dquot->dq_dqb.dqb_curspace += number;
+}
+
+static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
+{
+	if (dquot->dq_dqb.dqb_curinodes > number)
+		dquot->dq_dqb.dqb_curinodes -= number;
+	else
+		dquot->dq_dqb.dqb_curinodes = 0;
+	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
+		dquot->dq_dqb.dqb_itime = (time_t) 0;
+	clear_bit(DQ_INODES_B, &dquot->dq_flags);
+}
+
+static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
+{
+	if (dquot->dq_dqb.dqb_curspace > number)
+		dquot->dq_dqb.dqb_curspace -= number;
+	else
+		dquot->dq_dqb.dqb_curspace = 0;
+	if (toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit)
+		dquot->dq_dqb.dqb_btime = (time_t) 0;
+	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+}
+
+static int flag_print_warnings = 1;
+
+static inline int need_print_warning(struct dquot *dquot)
+{
+	if (!flag_print_warnings)
+		return 0;
+
+	switch (dquot->dq_type) {
+		case USRQUOTA:
+			return current->fsuid == dquot->dq_id;
+		case GRPQUOTA:
+			return in_group_p(dquot->dq_id);
+	}
+	return 0;
+}
+
+/* Values of warnings */
+#define NOWARN 0
+#define IHARDWARN 1
+#define ISOFTLONGWARN 2
+#define ISOFTWARN 3
+#define BHARDWARN 4
+#define BSOFTLONGWARN 5
+#define BSOFTWARN 6
+
+/* Print warning to user which exceeded quota */
+static void print_warning(struct dquot *dquot, const char warntype)
+{
+	char *msg = NULL;
+	int flag = (warntype == BHARDWARN || warntype == BSOFTLONGWARN) ? DQ_BLKS_B :
+	  ((warntype == IHARDWARN || warntype == ISOFTLONGWARN) ? DQ_INODES_B : 0);
+
+	if (!need_print_warning(dquot) || (flag && test_and_set_bit(flag, &dquot->dq_flags)))
+		return;
+
+	tty_write_message(current->signal->tty, dquot->dq_sb->s_id);
+	if (warntype == ISOFTWARN || warntype == BSOFTWARN)
+		tty_write_message(current->signal->tty, ": warning, ");
+	else
+		tty_write_message(current->signal->tty, ": write failed, ");
+	tty_write_message(current->signal->tty, quotatypes[dquot->dq_type]);
+	switch (warntype) {
+		case IHARDWARN:
+			msg = " file limit reached.\r\n";
+			break;
+		case ISOFTLONGWARN:
+			msg = " file quota exceeded too long.\r\n";
+			break;
+		case ISOFTWARN:
+			msg = " file quota exceeded.\r\n";
+			break;
+		case BHARDWARN:
+			msg = " block limit reached.\r\n";
+			break;
+		case BSOFTLONGWARN:
+			msg = " block quota exceeded too long.\r\n";
+			break;
+		case BSOFTWARN:
+			msg = " block quota exceeded.\r\n";
+			break;
+	}
+	tty_write_message(current->signal->tty, msg);
+}
+
+static inline void flush_warnings(struct dquot **dquots, char *warntype)
+{
+	int i;
+
+	for (i = 0; i < MAXQUOTAS; i++)
+		if (dquots[i] != NODQUOT && warntype[i] != NOWARN)
+			print_warning(dquots[i], warntype[i]);
+}
+
+static inline char ignore_hardlimit(struct dquot *dquot)
+{
+	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+
+	return capable(CAP_SYS_RESOURCE) &&
+	    (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
+}
+
+/* needs dq_data_lock */
+static int check_idq(struct dquot *dquot, ulong inodes, char *warntype)
+{
+	*warntype = NOWARN;
+	if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
+		return QUOTA_OK;
+
+	if (dquot->dq_dqb.dqb_ihardlimit &&
+	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
+            !ignore_hardlimit(dquot)) {
+		*warntype = IHARDWARN;
+		return NO_QUOTA;
+	}
+
+	if (dquot->dq_dqb.dqb_isoftlimit &&
+	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
+	    dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
+            !ignore_hardlimit(dquot)) {
+		*warntype = ISOFTLONGWARN;
+		return NO_QUOTA;
+	}
+
+	if (dquot->dq_dqb.dqb_isoftlimit &&
+	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
+	    dquot->dq_dqb.dqb_itime == 0) {
+		*warntype = ISOFTWARN;
+		dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+	}
+
+	return QUOTA_OK;
+}
+
+/* needs dq_data_lock */
+static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
+{
+	*warntype = 0;
+	if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
+		return QUOTA_OK;
+
+	if (dquot->dq_dqb.dqb_bhardlimit &&
+	   toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit &&
+            !ignore_hardlimit(dquot)) {
+		if (!prealloc)
+			*warntype = BHARDWARN;
+		return NO_QUOTA;
+	}
+
+	if (dquot->dq_dqb.dqb_bsoftlimit &&
+	   toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
+	    dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
+            !ignore_hardlimit(dquot)) {
+		if (!prealloc)
+			*warntype = BSOFTLONGWARN;
+		return NO_QUOTA;
+	}
+
+	if (dquot->dq_dqb.dqb_bsoftlimit &&
+	   toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
+	    dquot->dq_dqb.dqb_btime == 0) {
+		if (!prealloc) {
+			*warntype = BSOFTWARN;
+			dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+		}
+		else
+			/*
+			 * We don't allow preallocation to exceed softlimit so exceeding will
+			 * be always printed
+			 */
+			return NO_QUOTA;
+	}
+
+	return QUOTA_OK;
+}
+
+/*
+ *	Initialize quota pointers in inode
+ *	Transaction must be started at entry
+ */
+int dquot_initialize(struct inode *inode, int type)
+{
+	unsigned int id = 0;
+	int cnt, ret = 0;
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode))
+		return 0;
+	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	/* Having dqptr_sem we know NOQUOTA flags can't be altered... */
+	if (IS_NOQUOTA(inode))
+		goto out_err;
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (type != -1 && cnt != type)
+			continue;
+		if (inode->i_dquot[cnt] == NODQUOT) {
+			switch (cnt) {
+				case USRQUOTA:
+					id = inode->i_uid;
+					break;
+				case GRPQUOTA:
+					id = inode->i_gid;
+					break;
+			}
+			inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
+		}
+	}
+out_err:
+	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return ret;
+}
+
+/*
+ * 	Release all quotas referenced by inode
+ *	Transaction must be started at an entry
+ */
+int dquot_drop(struct inode *inode)
+{
+	int cnt;
+
+	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] != NODQUOT) {
+			dqput(inode->i_dquot[cnt]);
+			inode->i_dquot[cnt] = NODQUOT;
+		}
+	}
+	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return 0;
+}
+
+/*
+ * Following four functions update i_blocks+i_bytes fields and
+ * quota information (together with appropriate checks)
+ * NOTE: We absolutely rely on the fact that caller dirties
+ * the inode (usually macros in quotaops.h care about this) and
+ * holds a handle for the current transaction so that dquot write and
+ * inode write go into the same transaction.
+ */
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+	int cnt, ret = NO_QUOTA;
+	char warntype[MAXQUOTAS];
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode)) {
+out_add:
+		inode_add_bytes(inode, number);
+		return QUOTA_OK;
+	}
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		warntype[cnt] = NOWARN;
+
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode)) {	/* Now we can do reliable test... */
+		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		goto out_add;
+	}
+	spin_lock(&dq_data_lock);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
+			goto warn_put_all;
+	}
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		dquot_incr_space(inode->i_dquot[cnt], number);
+	}
+	inode_add_bytes(inode, number);
+	ret = QUOTA_OK;
+warn_put_all:
+	spin_unlock(&dq_data_lock);
+	if (ret == QUOTA_OK)
+		/* Dirtify all the dquots - this can block when journalling */
+		for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+			if (inode->i_dquot[cnt])
+				mark_dquot_dirty(inode->i_dquot[cnt]);
+	flush_warnings(inode->i_dquot, warntype);
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return ret;
+}
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_alloc_inode(const struct inode *inode, unsigned long number)
+{
+	int cnt, ret = NO_QUOTA;
+	char warntype[MAXQUOTAS];
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode))
+		return QUOTA_OK;
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		warntype[cnt] = NOWARN;
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode)) {
+		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		return QUOTA_OK;
+	}
+	spin_lock(&dq_data_lock);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
+			goto warn_put_all;
+	}
+
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		dquot_incr_inodes(inode->i_dquot[cnt], number);
+	}
+	ret = QUOTA_OK;
+warn_put_all:
+	spin_unlock(&dq_data_lock);
+	if (ret == QUOTA_OK)
+		/* Dirtify all the dquots - this can block when journalling */
+		for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+			if (inode->i_dquot[cnt])
+				mark_dquot_dirty(inode->i_dquot[cnt]);
+	flush_warnings((struct dquot **)inode->i_dquot, warntype);
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return ret;
+}
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_free_space(struct inode *inode, qsize_t number)
+{
+	unsigned int cnt;
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode)) {
+out_sub:
+		inode_sub_bytes(inode, number);
+		return QUOTA_OK;
+	}
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	/* Now recheck reliably when holding dqptr_sem */
+	if (IS_NOQUOTA(inode)) {
+		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		goto out_sub;
+	}
+	spin_lock(&dq_data_lock);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		dquot_decr_space(inode->i_dquot[cnt], number);
+	}
+	inode_sub_bytes(inode, number);
+	spin_unlock(&dq_data_lock);
+	/* Dirtify all the dquots - this can block when journalling */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (inode->i_dquot[cnt])
+			mark_dquot_dirty(inode->i_dquot[cnt]);
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return QUOTA_OK;
+}
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_free_inode(const struct inode *inode, unsigned long number)
+{
+	unsigned int cnt;
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode))
+		return QUOTA_OK;
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	/* Now recheck reliably when holding dqptr_sem */
+	if (IS_NOQUOTA(inode)) {
+		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		return QUOTA_OK;
+	}
+	spin_lock(&dq_data_lock);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt] == NODQUOT)
+			continue;
+		dquot_decr_inodes(inode->i_dquot[cnt], number);
+	}
+	spin_unlock(&dq_data_lock);
+	/* Dirtify all the dquots - this can block when journalling */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (inode->i_dquot[cnt])
+			mark_dquot_dirty(inode->i_dquot[cnt]);
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return QUOTA_OK;
+}
+
+/*
+ * Transfer the number of inode and blocks from one diskquota to an other.
+ *
+ * This operation can block, but only after everything is updated
+ * A transaction must be started when entering this function.
+ */
+int dquot_transfer(struct inode *inode, struct iattr *iattr)
+{
+	qsize_t space;
+	struct dquot *transfer_from[MAXQUOTAS];
+	struct dquot *transfer_to[MAXQUOTAS];
+	int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
+	    chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
+	char warntype[MAXQUOTAS];
+
+	/* First test before acquiring semaphore - solves deadlocks when we
+         * re-enter the quota code and are already holding the semaphore */
+	if (IS_NOQUOTA(inode))
+		return QUOTA_OK;
+	/* Clear the arrays */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
+		warntype[cnt] = NOWARN;
+	}
+	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	/* Now recheck reliably when holding dqptr_sem */
+	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
+		up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		return QUOTA_OK;
+	}
+	/* First build the transfer_to list - here we can block on
+	 * reading/instantiating of dquots.  We know that the transaction for
+	 * us was already started so we don't violate lock ranking here */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		switch (cnt) {
+			case USRQUOTA:
+				if (!chuid)
+					continue;
+				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
+				break;
+			case GRPQUOTA:
+				if (!chgid)
+					continue;
+				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
+				break;
+		}
+	}
+	spin_lock(&dq_data_lock);
+	space = inode_get_bytes(inode);
+	/* Build the transfer_from list and check the limits */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (transfer_to[cnt] == NODQUOT)
+			continue;
+		transfer_from[cnt] = inode->i_dquot[cnt];
+		if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA ||
+		    check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA)
+			goto warn_put_all;
+	}
+
+	/*
+	 * Finally perform the needed transfer from transfer_from to transfer_to
+	 */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		/*
+		 * Skip changes for same uid or gid or for turned off quota-type.
+		 */
+		if (transfer_to[cnt] == NODQUOT)
+			continue;
+
+		/* Due to IO error we might not have transfer_from[] structure */
+		if (transfer_from[cnt]) {
+			dquot_decr_inodes(transfer_from[cnt], 1);
+			dquot_decr_space(transfer_from[cnt], space);
+		}
+
+		dquot_incr_inodes(transfer_to[cnt], 1);
+		dquot_incr_space(transfer_to[cnt], space);
+
+		inode->i_dquot[cnt] = transfer_to[cnt];
+	}
+	ret = QUOTA_OK;
+warn_put_all:
+	spin_unlock(&dq_data_lock);
+	/* Dirtify all the dquots - this can block when journalling */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (transfer_from[cnt])
+			mark_dquot_dirty(transfer_from[cnt]);
+		if (transfer_to[cnt])
+			mark_dquot_dirty(transfer_to[cnt]);
+	}
+	flush_warnings(transfer_to, warntype);
+	
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
+			dqput(transfer_from[cnt]);
+		if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
+			dqput(transfer_to[cnt]);
+	}
+	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	return ret;
+}
+
+/*
+ * Write info of quota file to disk
+ */
+int dquot_commit_info(struct super_block *sb, int type)
+{
+	int ret;
+	struct quota_info *dqopt = sb_dqopt(sb);
+
+	down(&dqopt->dqio_sem);
+	ret = dqopt->ops[type]->write_file_info(sb, type);
+	up(&dqopt->dqio_sem);
+	return ret;
+}
+
+/*
+ * Definitions of diskquota operations.
+ */
+struct dquot_operations dquot_operations = {
+	.initialize	= dquot_initialize,
+	.drop		= dquot_drop,
+	.alloc_space	= dquot_alloc_space,
+	.alloc_inode	= dquot_alloc_inode,
+	.free_space	= dquot_free_space,
+	.free_inode	= dquot_free_inode,
+	.transfer	= dquot_transfer,
+	.write_dquot	= dquot_commit,
+	.acquire_dquot	= dquot_acquire,
+	.release_dquot	= dquot_release,
+	.mark_dirty	= dquot_mark_dquot_dirty,
+	.write_info	= dquot_commit_info
+};
+
+static inline void set_enable_flags(struct quota_info *dqopt, int type)
+{
+	switch (type) {
+		case USRQUOTA:
+			dqopt->flags |= DQUOT_USR_ENABLED;
+			break;
+		case GRPQUOTA:
+			dqopt->flags |= DQUOT_GRP_ENABLED;
+			break;
+	}
+}
+
+static inline void reset_enable_flags(struct quota_info *dqopt, int type)
+{
+	switch (type) {
+		case USRQUOTA:
+			dqopt->flags &= ~DQUOT_USR_ENABLED;
+			break;
+		case GRPQUOTA:
+			dqopt->flags &= ~DQUOT_GRP_ENABLED;
+			break;
+	}
+}
+
+/*
+ * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
+ */
+int vfs_quota_off(struct super_block *sb, int type)
+{
+	int cnt;
+	struct quota_info *dqopt = sb_dqopt(sb);
+	struct inode *toputinode[MAXQUOTAS];
+	struct vfsmount *toputmnt[MAXQUOTAS];
+
+	/* We need to serialize quota_off() for device */
+	down(&dqopt->dqonoff_sem);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		toputinode[cnt] = NULL;
+		toputmnt[cnt] = NULL;
+		if (type != -1 && cnt != type)
+			continue;
+		if (!sb_has_quota_enabled(sb, cnt))
+			continue;
+		reset_enable_flags(dqopt, cnt);
+
+		/* Note: these are blocking operations */
+		drop_dquot_ref(sb, cnt);
+		invalidate_dquots(sb, cnt);
+		/*
+		 * Now all dquots should be invalidated, all writes done so we should be only
+		 * users of the info. No locks needed.
+		 */
+		if (info_dirty(&dqopt->info[cnt]))
+			sb->dq_op->write_info(sb, cnt);
+		if (dqopt->ops[cnt]->free_file_info)
+			dqopt->ops[cnt]->free_file_info(sb, cnt);
+		put_quota_format(dqopt->info[cnt].dqi_format);
+
+		toputinode[cnt] = dqopt->files[cnt];
+		toputmnt[cnt] = dqopt->mnt[cnt];
+		dqopt->files[cnt] = NULL;
+		dqopt->mnt[cnt] = NULL;
+		dqopt->info[cnt].dqi_flags = 0;
+		dqopt->info[cnt].dqi_igrace = 0;
+		dqopt->info[cnt].dqi_bgrace = 0;
+		dqopt->ops[cnt] = NULL;
+	}
+	up(&dqopt->dqonoff_sem);
+	/* Sync the superblock so that buffers with quota data are written to
+	 * disk (and so userspace sees correct data afterwards).
+	 * The reference to vfsmnt we are still holding protects us from
+	 * umount (we don't have it only when quotas are turned on/off for
+	 * journal replay but in that case we are guarded by the fs anyway). */
+	if (sb->s_op->sync_fs)
+		sb->s_op->sync_fs(sb, 1);
+	sync_blockdev(sb->s_bdev);
+	/* Now the quota files are just ordinary files and we can set the
+	 * inode flags back. Moreover we discard the pagecache so that
+	 * userspace sees the writes we did bypassing the pagecache. We
+	 * must also discard the blockdev buffers so that we see the
+	 * changes done by userspace on the next quotaon() */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (toputinode[cnt]) {
+			down(&dqopt->dqonoff_sem);
+			/* If quota was reenabled in the meantime, we have
+			 * nothing to do */
+			if (!sb_has_quota_enabled(sb, cnt)) {
+				down(&toputinode[cnt]->i_sem);
+				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
+				  S_NOATIME | S_NOQUOTA);
+				truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+				up(&toputinode[cnt]->i_sem);
+				mark_inode_dirty(toputinode[cnt]);
+				iput(toputinode[cnt]);
+			}
+			up(&dqopt->dqonoff_sem);
+			/* We don't hold the reference when we turned on quotas
+			 * just for the journal replay... */
+			if (toputmnt[cnt])
+				mntput(toputmnt[cnt]);
+		}
+	if (sb->s_bdev)
+		invalidate_bdev(sb->s_bdev, 0);
+	return 0;
+}
+
+/*
+ *	Turn quotas on on a device
+ */
+
+/* Helper function when we already have the inode */
+static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
+{
+	struct quota_format_type *fmt = find_quota_format(format_id);
+	struct super_block *sb = inode->i_sb;
+	struct quota_info *dqopt = sb_dqopt(sb);
+	int error;
+	int oldflags = -1;
+
+	if (!fmt)
+		return -ESRCH;
+	if (!S_ISREG(inode->i_mode)) {
+		error = -EACCES;
+		goto out_fmt;
+	}
+	if (IS_RDONLY(inode)) {
+		error = -EROFS;
+		goto out_fmt;
+	}
+	if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
+		error = -EINVAL;
+		goto out_fmt;
+	}
+
+	/* As we bypass the pagecache we must now flush the inode so that
+	 * we see all the changes from userspace... */
+	write_inode_now(inode, 1);
+	/* And now flush the block cache so that kernel sees the changes */
+	invalidate_bdev(sb->s_bdev, 0);
+	down(&inode->i_sem);
+	down(&dqopt->dqonoff_sem);
+	if (sb_has_quota_enabled(sb, type)) {
+		error = -EBUSY;
+		goto out_lock;
+	}
+	/* We don't want quota and atime on quota files (deadlocks possible)
+	 * Also nobody should write to the file - we use special IO operations
+	 * which ignore the immutable bit. */
+	down_write(&dqopt->dqptr_sem);
+	oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
+	inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
+	up_write(&dqopt->dqptr_sem);
+
+	error = -EIO;
+	dqopt->files[type] = igrab(inode);
+	if (!dqopt->files[type])
+		goto out_lock;
+	error = -EINVAL;
+	if (!fmt->qf_ops->check_quota_file(sb, type))
+		goto out_file_init;
+
+	dqopt->ops[type] = fmt->qf_ops;
+	dqopt->info[type].dqi_format = fmt;
+	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
+	down(&dqopt->dqio_sem);
+	if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
+		up(&dqopt->dqio_sem);
+		goto out_file_init;
+	}
+	up(&dqopt->dqio_sem);
+	up(&inode->i_sem);
+	set_enable_flags(dqopt, type);
+
+	add_dquot_ref(sb, type);
+	up(&dqopt->dqonoff_sem);
+
+	return 0;
+
+out_file_init:
+	dqopt->files[type] = NULL;
+	iput(inode);
+out_lock:
+	up(&dqopt->dqonoff_sem);
+	if (oldflags != -1) {
+		down_write(&dqopt->dqptr_sem);
+		/* Set the flags back (in the case of accidental quotaon()
+		 * on a wrong file we don't want to mess up the flags) */
+		inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
+		inode->i_flags |= oldflags;
+		up_write(&dqopt->dqptr_sem);
+	}
+	up(&inode->i_sem);
+out_fmt:
+	put_quota_format(fmt);
+
+	return error; 
+}
+
+/* Actual function called from quotactl() */
+int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
+{
+	struct nameidata nd;
+	int error;
+
+	error = path_lookup(path, LOOKUP_FOLLOW, &nd);
+	if (error < 0)
+		return error;
+	error = security_quota_on(nd.dentry);
+	if (error)
+		goto out_path;
+	/* Quota file not on the same filesystem? */
+	if (nd.mnt->mnt_sb != sb)
+		error = -EXDEV;
+	else {
+		error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id);
+		if (!error)
+			sb_dqopt(sb)->mnt[type] = mntget(nd.mnt);
+	}
+out_path:
+	path_release(&nd);
+	return error;
+}
+
+/*
+ * This function is used when filesystem needs to initialize quotas
+ * during mount time.
+ */
+int vfs_quota_on_mount(int type, int format_id, struct dentry *dentry)
+{
+	int error;
+
+	error = security_quota_on(dentry);
+	if (error)
+		return error;
+	return vfs_quota_on_inode(dentry->d_inode, type, format_id);
+}
+
+/* Generic routine for getting common part of quota structure */
+static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
+{
+	struct mem_dqblk *dm = &dquot->dq_dqb;
+
+	spin_lock(&dq_data_lock);
+	di->dqb_bhardlimit = dm->dqb_bhardlimit;
+	di->dqb_bsoftlimit = dm->dqb_bsoftlimit;
+	di->dqb_curspace = dm->dqb_curspace;
+	di->dqb_ihardlimit = dm->dqb_ihardlimit;
+	di->dqb_isoftlimit = dm->dqb_isoftlimit;
+	di->dqb_curinodes = dm->dqb_curinodes;
+	di->dqb_btime = dm->dqb_btime;
+	di->dqb_itime = dm->dqb_itime;
+	di->dqb_valid = QIF_ALL;
+	spin_unlock(&dq_data_lock);
+}
+
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+{
+	struct dquot *dquot;
+
+	down(&sb_dqopt(sb)->dqonoff_sem);
+	if (!(dquot = dqget(sb, id, type))) {
+		up(&sb_dqopt(sb)->dqonoff_sem);
+		return -ESRCH;
+	}
+	do_get_dqblk(dquot, di);
+	dqput(dquot);
+	up(&sb_dqopt(sb)->dqonoff_sem);
+	return 0;
+}
+
+/* Generic routine for setting common part of quota structure */
+static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+{
+	struct mem_dqblk *dm = &dquot->dq_dqb;
+	int check_blim = 0, check_ilim = 0;
+
+	spin_lock(&dq_data_lock);
+	if (di->dqb_valid & QIF_SPACE) {
+		dm->dqb_curspace = di->dqb_curspace;
+		check_blim = 1;
+	}
+	if (di->dqb_valid & QIF_BLIMITS) {
+		dm->dqb_bsoftlimit = di->dqb_bsoftlimit;
+		dm->dqb_bhardlimit = di->dqb_bhardlimit;
+		check_blim = 1;
+	}
+	if (di->dqb_valid & QIF_INODES) {
+		dm->dqb_curinodes = di->dqb_curinodes;
+		check_ilim = 1;
+	}
+	if (di->dqb_valid & QIF_ILIMITS) {
+		dm->dqb_isoftlimit = di->dqb_isoftlimit;
+		dm->dqb_ihardlimit = di->dqb_ihardlimit;
+		check_ilim = 1;
+	}
+	if (di->dqb_valid & QIF_BTIME)
+		dm->dqb_btime = di->dqb_btime;
+	if (di->dqb_valid & QIF_ITIME)
+		dm->dqb_itime = di->dqb_itime;
+
+	if (check_blim) {
+		if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) {
+			dm->dqb_btime = 0;
+			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+		}
+		else if (!(di->dqb_valid & QIF_BTIME))	/* Set grace only if user hasn't provided his own... */
+			dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+	}
+	if (check_ilim) {
+		if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
+			dm->dqb_itime = 0;
+			clear_bit(DQ_INODES_B, &dquot->dq_flags);
+		}
+		else if (!(di->dqb_valid & QIF_ITIME))	/* Set grace only if user hasn't provided his own... */
+			dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+	}
+	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
+		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+	else
+		set_bit(DQ_FAKE_B, &dquot->dq_flags);
+	spin_unlock(&dq_data_lock);
+	mark_dquot_dirty(dquot);
+}
+
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+{
+	struct dquot *dquot;
+
+	down(&sb_dqopt(sb)->dqonoff_sem);
+	if (!(dquot = dqget(sb, id, type))) {
+		up(&sb_dqopt(sb)->dqonoff_sem);
+		return -ESRCH;
+	}
+	do_set_dqblk(dquot, di);
+	dqput(dquot);
+	up(&sb_dqopt(sb)->dqonoff_sem);
+	return 0;
+}
+
+/* Generic routine for getting common part of quota file information */
+int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+{
+	struct mem_dqinfo *mi;
+  
+	down(&sb_dqopt(sb)->dqonoff_sem);
+	if (!sb_has_quota_enabled(sb, type)) {
+		up(&sb_dqopt(sb)->dqonoff_sem);
+		return -ESRCH;
+	}
+	mi = sb_dqopt(sb)->info + type;
+	spin_lock(&dq_data_lock);
+	ii->dqi_bgrace = mi->dqi_bgrace;
+	ii->dqi_igrace = mi->dqi_igrace;
+	ii->dqi_flags = mi->dqi_flags & DQF_MASK;
+	ii->dqi_valid = IIF_ALL;
+	spin_unlock(&dq_data_lock);
+	up(&sb_dqopt(sb)->dqonoff_sem);
+	return 0;
+}
+
+/* Generic routine for setting common part of quota file information */
+int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+{
+	struct mem_dqinfo *mi;
+
+	down(&sb_dqopt(sb)->dqonoff_sem);
+	if (!sb_has_quota_enabled(sb, type)) {
+		up(&sb_dqopt(sb)->dqonoff_sem);
+		return -ESRCH;
+	}
+	mi = sb_dqopt(sb)->info + type;
+	spin_lock(&dq_data_lock);
+	if (ii->dqi_valid & IIF_BGRACE)
+		mi->dqi_bgrace = ii->dqi_bgrace;
+	if (ii->dqi_valid & IIF_IGRACE)
+		mi->dqi_igrace = ii->dqi_igrace;
+	if (ii->dqi_valid & IIF_FLAGS)
+		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
+	spin_unlock(&dq_data_lock);
+	mark_info_dirty(sb, type);
+	/* Force write to disk */
+	sb->dq_op->write_info(sb, type);
+	up(&sb_dqopt(sb)->dqonoff_sem);
+	return 0;
+}
+
+struct quotactl_ops vfs_quotactl_ops = {
+	.quota_on	= vfs_quota_on,
+	.quota_off	= vfs_quota_off,
+	.quota_sync	= vfs_quota_sync,
+	.get_info	= vfs_get_dqinfo,
+	.set_info	= vfs_set_dqinfo,
+	.get_dqblk	= vfs_get_dqblk,
+	.set_dqblk	= vfs_set_dqblk
+};
+
+static ctl_table fs_dqstats_table[] = {
+	{
+		.ctl_name	= FS_DQ_LOOKUPS,
+		.procname	= "lookups",
+		.data		= &dqstats.lookups,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_DROPS,
+		.procname	= "drops",
+		.data		= &dqstats.drops,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_READS,
+		.procname	= "reads",
+		.data		= &dqstats.reads,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_WRITES,
+		.procname	= "writes",
+		.data		= &dqstats.writes,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_CACHE_HITS,
+		.procname	= "cache_hits",
+		.data		= &dqstats.cache_hits,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_ALLOCATED,
+		.procname	= "allocated_dquots",
+		.data		= &dqstats.allocated_dquots,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_FREE,
+		.procname	= "free_dquots",
+		.data		= &dqstats.free_dquots,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_SYNCS,
+		.procname	= "syncs",
+		.data		= &dqstats.syncs,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= FS_DQ_WARNINGS,
+		.procname	= "warnings",
+		.data		= &flag_print_warnings,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{ .ctl_name = 0 },
+};
+
+static ctl_table fs_table[] = {
+	{
+		.ctl_name	= FS_DQSTATS,
+		.procname	= "quota",
+		.mode		= 0555,
+		.child		= fs_dqstats_table,
+	},
+	{ .ctl_name = 0 },
+};
+
+static ctl_table sys_table[] = {
+	{
+		.ctl_name	= CTL_FS,
+		.procname	= "fs",
+		.mode		= 0555,
+		.child		= fs_table,
+	},
+	{ .ctl_name = 0 },
+};
+
+static int __init dquot_init(void)
+{
+	int i;
+	unsigned long nr_hash, order;
+
+	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
+
+	register_sysctl_table(sys_table, 0);
+
+	dquot_cachep = kmem_cache_create("dquot", 
+			sizeof(struct dquot), sizeof(unsigned long) * 4,
+			SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
+			NULL, NULL);
+
+	order = 0;
+	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
+	if (!dquot_hash)
+		panic("Cannot create dquot hash table");
+
+	/* Find power-of-two hlist_heads which can fit into allocation */
+	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
+	dq_hash_bits = 0;
+	do {
+		dq_hash_bits++;
+	} while (nr_hash >> dq_hash_bits);
+	dq_hash_bits--;
+
+	nr_hash = 1UL << dq_hash_bits;
+	dq_hash_mask = nr_hash - 1;
+	for (i = 0; i < nr_hash; i++)
+		INIT_HLIST_HEAD(dquot_hash + i);
+
+	printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
+			nr_hash, order, (PAGE_SIZE << order));
+
+	set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+
+	return 0;
+}
+module_init(dquot_init);
+
+EXPORT_SYMBOL(register_quota_format);
+EXPORT_SYMBOL(unregister_quota_format);
+EXPORT_SYMBOL(dqstats);
+EXPORT_SYMBOL(dq_data_lock);
+EXPORT_SYMBOL(vfs_quota_on);
+EXPORT_SYMBOL(vfs_quota_on_mount);
+EXPORT_SYMBOL(vfs_quota_off);
+EXPORT_SYMBOL(vfs_quota_sync);
+EXPORT_SYMBOL(vfs_get_dqinfo);
+EXPORT_SYMBOL(vfs_set_dqinfo);
+EXPORT_SYMBOL(vfs_get_dqblk);
+EXPORT_SYMBOL(vfs_set_dqblk);
+EXPORT_SYMBOL(dquot_commit);
+EXPORT_SYMBOL(dquot_commit_info);
+EXPORT_SYMBOL(dquot_acquire);
+EXPORT_SYMBOL(dquot_release);
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+EXPORT_SYMBOL(dquot_initialize);
+EXPORT_SYMBOL(dquot_drop);
+EXPORT_SYMBOL(dquot_alloc_space);
+EXPORT_SYMBOL(dquot_alloc_inode);
+EXPORT_SYMBOL(dquot_free_space);
+EXPORT_SYMBOL(dquot_free_inode);
+EXPORT_SYMBOL(dquot_transfer);
diff --git a/fs/efs/Makefile b/fs/efs/Makefile
new file mode 100644
index 0000000..963543d
--- /dev/null
+++ b/fs/efs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux efs-filesystem routines.
+#
+
+obj-$(CONFIG_EFS_FS) += efs.o
+
+efs-objs := super.o inode.o namei.o dir.o file.o symlink.o
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
new file mode 100644
index 0000000..777c614
--- /dev/null
+++ b/fs/efs/dir.c
@@ -0,0 +1,113 @@
+/*
+ * dir.c
+ *
+ * Copyright (c) 1999 Al Smith
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/efs_fs.h>
+#include <linux/smp_lock.h>
+
+static int efs_readdir(struct file *, void *, filldir_t);
+
+struct file_operations efs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= efs_readdir,
+};
+
+struct inode_operations efs_dir_inode_operations = {
+	.lookup		= efs_lookup,
+};
+
+static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct buffer_head *bh;
+
+	struct efs_dir		*dirblock;
+	struct efs_dentry	*dirslot;
+	efs_ino_t		inodenum;
+	efs_block_t		block;
+	int			slot, namelen;
+	char			*nameptr;
+
+	if (inode->i_size & (EFS_DIRBSIZE-1))
+		printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n");
+
+	lock_kernel();
+
+	/* work out where this entry can be found */
+	block = filp->f_pos >> EFS_DIRBSIZE_BITS;
+
+	/* each block contains at most 256 slots */
+	slot  = filp->f_pos & 0xff;
+
+	/* look at all blocks */
+	while (block < inode->i_blocks) {
+		/* read the dir block */
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
+
+		if (!bh) {
+			printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
+			break;
+		}
+
+		dirblock = (struct efs_dir *) bh->b_data; 
+
+		if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) {
+			printk(KERN_ERR "EFS: readdir(): invalid directory block\n");
+			brelse(bh);
+			break;
+		}
+
+		while (slot < dirblock->slots) {
+			if (dirblock->space[slot] == 0) {
+				slot++;
+				continue;
+			}
+
+			dirslot  = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
+
+			inodenum = be32_to_cpu(dirslot->inode);
+			namelen  = dirslot->namelen;
+			nameptr  = dirslot->name;
+
+#ifdef DEBUG
+			printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen);
+#endif
+			if (namelen > 0) {
+				/* found the next entry */
+				filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+
+				/* copy filename and data in dirslot */
+				filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN);
+
+				/* sanity check */
+				if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
+					printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
+					slot++;
+					continue;
+				}
+
+				/* store position of next slot */
+				if (++slot == dirblock->slots) {
+					slot = 0;
+					block++;
+				}
+				brelse(bh);
+				filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+				goto out;
+			}
+			slot++;
+		}
+		brelse(bh);
+
+		slot = 0;
+		block++;
+	}
+
+	filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+out:
+	unlock_kernel();
+	return 0;
+}
+
diff --git a/fs/efs/file.c b/fs/efs/file.c
new file mode 100644
index 0000000..5db2012
--- /dev/null
+++ b/fs/efs/file.c
@@ -0,0 +1,60 @@
+/*
+ * file.c
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from work (c) 1995,1996 Christian Vogelgsang.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/efs_fs.h>
+
+int efs_get_block(struct inode *inode, sector_t iblock,
+		  struct buffer_head *bh_result, int create)
+{
+	int error = -EROFS;
+	long phys;
+
+	if (create)
+		return error;
+	if (iblock >= inode->i_blocks) {
+#ifdef DEBUG
+		/*
+		 * i have no idea why this happens as often as it does
+		 */
+		printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n",
+			block,
+			inode->i_blocks,
+			inode->i_size);
+#endif
+		return 0;
+	}
+	phys = efs_map_block(inode, iblock);
+	if (phys)
+		map_bh(bh_result, inode->i_sb, phys);
+	return 0;
+}
+
+int efs_bmap(struct inode *inode, efs_block_t block) {
+
+	if (block < 0) {
+		printk(KERN_WARNING "EFS: bmap(): block < 0\n");
+		return 0;
+	}
+
+	/* are we about to read past the end of a file ? */
+	if (!(block < inode->i_blocks)) {
+#ifdef DEBUG
+		/*
+		 * i have no idea why this happens as often as it does
+		 */
+		printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n",
+			block,
+			inode->i_blocks,
+			inode->i_size);
+#endif
+		return 0;
+	}
+
+	return efs_map_block(inode, block);
+}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
new file mode 100644
index 0000000..180607f
--- /dev/null
+++ b/fs/efs/inode.c
@@ -0,0 +1,305 @@
+/*
+ * inode.c
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from work (c) 1995,1996 Christian Vogelgsang,
+ *              and from work (c) 1998 Mike Shaver.
+ */
+
+#include <linux/efs_fs.h>
+#include <linux/efs_fs_sb.h>
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+static int efs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,efs_get_block);
+}
+static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,efs_get_block);
+}
+static struct address_space_operations efs_aops = {
+	.readpage = efs_readpage,
+	.sync_page = block_sync_page,
+	.bmap = _efs_bmap
+};
+
+static inline void extent_copy(efs_extent *src, efs_extent *dst) {
+	/*
+	 * this is slightly evil. it doesn't just copy
+	 * efs_extent from src to dst, it also mangles
+	 * the bits so that dst ends up in cpu byte-order.
+	 */
+
+	dst->cooked.ex_magic  =  (unsigned int) src->raw[0];
+	dst->cooked.ex_bn     = ((unsigned int) src->raw[1] << 16) |
+				((unsigned int) src->raw[2] <<  8) |
+				((unsigned int) src->raw[3] <<  0);
+	dst->cooked.ex_length =  (unsigned int) src->raw[4];
+	dst->cooked.ex_offset = ((unsigned int) src->raw[5] << 16) |
+				((unsigned int) src->raw[6] <<  8) |
+				((unsigned int) src->raw[7] <<  0);
+	return;
+}
+
+void efs_read_inode(struct inode *inode)
+{
+	int i, inode_index;
+	dev_t device;
+	u32 rdev;
+	struct buffer_head *bh;
+	struct efs_sb_info    *sb = SUPER_INFO(inode->i_sb);
+	struct efs_inode_info *in = INODE_INFO(inode);
+	efs_block_t block, offset;
+	struct efs_dinode *efs_inode;
+  
+	/*
+	** EFS layout:
+	**
+	** |   cylinder group    |   cylinder group    |   cylinder group ..etc
+	** |inodes|data          |inodes|data          |inodes|data       ..etc
+	**
+	** work out the inode block index, (considering initially that the
+	** inodes are stored as consecutive blocks). then work out the block
+	** number of that inode given the above layout, and finally the
+	** offset of the inode within that block.
+	*/
+
+	inode_index = inode->i_ino /
+		(EFS_BLOCKSIZE / sizeof(struct efs_dinode));
+
+	block = sb->fs_start + sb->first_block + 
+		(sb->group_size * (inode_index / sb->inode_blocks)) +
+		(inode_index % sb->inode_blocks);
+
+	offset = (inode->i_ino %
+			(EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
+		sizeof(struct efs_dinode);
+
+	bh = sb_bread(inode->i_sb, block);
+	if (!bh) {
+		printk(KERN_WARNING "EFS: bread() failed at block %d\n", block);
+		goto read_inode_error;
+	}
+
+	efs_inode = (struct efs_dinode *) (bh->b_data + offset);
+    
+	inode->i_mode  = be16_to_cpu(efs_inode->di_mode);
+	inode->i_nlink = be16_to_cpu(efs_inode->di_nlink);
+	inode->i_uid   = (uid_t)be16_to_cpu(efs_inode->di_uid);
+	inode->i_gid   = (gid_t)be16_to_cpu(efs_inode->di_gid);
+	inode->i_size  = be32_to_cpu(efs_inode->di_size);
+	inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
+	inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
+	inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime);
+	inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+
+	/* this is the number of blocks in the file */
+	if (inode->i_size == 0) {
+		inode->i_blocks = 0;
+	} else {
+		inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1;
+	}
+
+	rdev = be16_to_cpu(efs_inode->di_u.di_dev.odev);
+	if (rdev == 0xffff) {
+		rdev = be32_to_cpu(efs_inode->di_u.di_dev.ndev);
+		if (sysv_major(rdev) > 0xfff)
+			device = 0;
+		else
+			device = MKDEV(sysv_major(rdev), sysv_minor(rdev));
+	} else
+		device = old_decode_dev(rdev);
+
+	/* get the number of extents for this object */
+	in->numextents = be16_to_cpu(efs_inode->di_numextents);
+	in->lastextent = 0;
+
+	/* copy the extents contained within the inode to memory */
+	for(i = 0; i < EFS_DIRECTEXTENTS; i++) {
+		extent_copy(&(efs_inode->di_u.di_extents[i]), &(in->extents[i]));
+		if (i < in->numextents && in->extents[i].cooked.ex_magic != 0) {
+			printk(KERN_WARNING "EFS: extent %d has bad magic number in inode %lu\n", i, inode->i_ino);
+			brelse(bh);
+			goto read_inode_error;
+		}
+	}
+
+	brelse(bh);
+   
+#ifdef DEBUG
+	printk(KERN_DEBUG "EFS: read_inode(): inode %lu, extents %d, mode %o\n",
+		inode->i_ino, in->numextents, inode->i_mode);
+#endif
+
+	switch (inode->i_mode & S_IFMT) {
+		case S_IFDIR: 
+			inode->i_op = &efs_dir_inode_operations; 
+			inode->i_fop = &efs_dir_operations; 
+			break;
+		case S_IFREG:
+			inode->i_fop = &generic_ro_fops;
+			inode->i_data.a_ops = &efs_aops;
+			break;
+		case S_IFLNK:
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_data.a_ops = &efs_symlink_aops;
+			break;
+		case S_IFCHR:
+		case S_IFBLK:
+		case S_IFIFO:
+			init_special_inode(inode, inode->i_mode, device);
+			break;
+		default:
+			printk(KERN_WARNING "EFS: unsupported inode mode %o\n", inode->i_mode);
+			goto read_inode_error;
+			break;
+	}
+
+	return;
+        
+read_inode_error:
+	printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino);
+	make_bad_inode(inode);
+
+	return;
+}
+
+static inline efs_block_t
+efs_extent_check(efs_extent *ptr, efs_block_t block, struct efs_sb_info *sb) {
+	efs_block_t start;
+	efs_block_t length;
+	efs_block_t offset;
+
+	/*
+	 * given an extent and a logical block within a file,
+	 * can this block be found within this extent ?
+	 */
+	start  = ptr->cooked.ex_bn;
+	length = ptr->cooked.ex_length;
+	offset = ptr->cooked.ex_offset;
+
+	if ((block >= offset) && (block < offset+length)) {
+		return(sb->fs_start + start + block - offset);
+	} else {
+		return 0;
+	}
+}
+
+efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
+	struct efs_sb_info    *sb = SUPER_INFO(inode->i_sb);
+	struct efs_inode_info *in = INODE_INFO(inode);
+	struct buffer_head    *bh = NULL;
+
+	int cur, last, first = 1;
+	int ibase, ioffset, dirext, direxts, indext, indexts;
+	efs_block_t iblock, result = 0, lastblock = 0;
+	efs_extent ext, *exts;
+
+	last = in->lastextent;
+
+	if (in->numextents <= EFS_DIRECTEXTENTS) {
+		/* first check the last extent we returned */
+		if ((result = efs_extent_check(&in->extents[last], block, sb)))
+			return result;
+    
+		/* if we only have one extent then nothing can be found */
+		if (in->numextents == 1) {
+			printk(KERN_ERR "EFS: map_block() failed to map (1 extent)\n");
+			return 0;
+		}
+
+		direxts = in->numextents;
+
+		/*
+		 * check the stored extents in the inode
+		 * start with next extent and check forwards
+		 */
+		for(dirext = 1; dirext < direxts; dirext++) {
+			cur = (last + dirext) % in->numextents;
+			if ((result = efs_extent_check(&in->extents[cur], block, sb))) {
+				in->lastextent = cur;
+				return result;
+			}
+		}
+
+		printk(KERN_ERR "EFS: map_block() failed to map block %u (dir)\n", block);
+		return 0;
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "EFS: map_block(): indirect search for logical block %u\n", block);
+#endif
+	direxts = in->extents[0].cooked.ex_offset;
+	indexts = in->numextents;
+
+	for(indext = 0; indext < indexts; indext++) {
+		cur = (last + indext) % indexts;
+
+		/*
+		 * work out which direct extent contains `cur'.
+		 *
+		 * also compute ibase: i.e. the number of the first
+		 * indirect extent contained within direct extent `cur'.
+		 *
+		 */
+		ibase = 0;
+		for(dirext = 0; cur < ibase && dirext < direxts; dirext++) {
+			ibase += in->extents[dirext].cooked.ex_length *
+				(EFS_BLOCKSIZE / sizeof(efs_extent));
+		}
+
+		if (dirext == direxts) {
+			/* should never happen */
+			printk(KERN_ERR "EFS: couldn't find direct extent for indirect extent %d (block %u)\n", cur, block);
+			if (bh) brelse(bh);
+			return 0;
+		}
+		
+		/* work out block number and offset of this indirect extent */
+		iblock = sb->fs_start + in->extents[dirext].cooked.ex_bn +
+			(cur - ibase) /
+			(EFS_BLOCKSIZE / sizeof(efs_extent));
+		ioffset = (cur - ibase) %
+			(EFS_BLOCKSIZE / sizeof(efs_extent));
+
+		if (first || lastblock != iblock) {
+			if (bh) brelse(bh);
+
+			bh = sb_bread(inode->i_sb, iblock);
+			if (!bh) {
+				printk(KERN_ERR "EFS: bread() failed at block %d\n", iblock);
+				return 0;
+			}
+#ifdef DEBUG
+			printk(KERN_DEBUG "EFS: map_block(): read indirect extent block %d\n", iblock);
+#endif
+			first = 0;
+			lastblock = iblock;
+		}
+
+		exts = (efs_extent *) bh->b_data;
+
+		extent_copy(&(exts[ioffset]), &ext);
+
+		if (ext.cooked.ex_magic != 0) {
+			printk(KERN_ERR "EFS: extent %d has bad magic number in block %d\n", cur, iblock);
+			if (bh) brelse(bh);
+			return 0;
+		}
+
+		if ((result = efs_extent_check(&ext, block, sb))) {
+			if (bh) brelse(bh);
+			in->lastextent = cur;
+			return result;
+		}
+	}
+	if (bh) brelse(bh);
+	printk(KERN_ERR "EFS: map_block() failed to map block %u (indir)\n", block);
+	return 0;
+}  
+
+MODULE_LICENSE("GPL");
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
new file mode 100644
index 0000000..ed4a207
--- /dev/null
+++ b/fs/efs/namei.c
@@ -0,0 +1,110 @@
+/*
+ * namei.c
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from work (c) 1995,1996 Christian Vogelgsang.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+#include <linux/efs_fs.h>
+#include <linux/smp_lock.h>
+
+static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) {
+	struct buffer_head *bh;
+
+	int			slot, namelen;
+	char			*nameptr;
+	struct efs_dir		*dirblock;
+	struct efs_dentry	*dirslot;
+	efs_ino_t		inodenum;
+	efs_block_t		block;
+ 
+	if (inode->i_size & (EFS_DIRBSIZE-1))
+		printk(KERN_WARNING "EFS: WARNING: find_entry(): directory size not a multiple of EFS_DIRBSIZE\n");
+
+	for(block = 0; block < inode->i_blocks; block++) {
+
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
+		if (!bh) {
+			printk(KERN_ERR "EFS: find_entry(): failed to read dir block %d\n", block);
+			return 0;
+		}
+    
+		dirblock = (struct efs_dir *) bh->b_data;
+
+		if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) {
+			printk(KERN_ERR "EFS: find_entry(): invalid directory block\n");
+			brelse(bh);
+			return(0);
+		}
+
+		for(slot = 0; slot < dirblock->slots; slot++) {
+			dirslot  = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
+
+			namelen  = dirslot->namelen;
+			nameptr  = dirslot->name;
+
+			if ((namelen == len) && (!memcmp(name, nameptr, len))) {
+				inodenum = be32_to_cpu(dirslot->inode);
+				brelse(bh);
+				return(inodenum);
+			}
+		}
+		brelse(bh);
+	}
+	return(0);
+}
+
+struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) {
+	efs_ino_t inodenum;
+	struct inode * inode = NULL;
+
+	lock_kernel();
+	inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len);
+	if (inodenum) {
+		if (!(inode = iget(dir->i_sb, inodenum))) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+
+	d_add(dentry, inode);
+	return NULL;
+}
+
+struct dentry *efs_get_parent(struct dentry *child)
+{
+	struct dentry *parent;
+	struct inode *inode;
+	efs_ino_t ino;
+	int error;
+
+	lock_kernel();
+
+	error = -ENOENT;
+	ino = efs_find_entry(child->d_inode, "..", 2);
+	if (!ino)
+		goto fail;
+
+	error = -EACCES;
+	inode = iget(child->d_inode->i_sb, ino);
+	if (!inode)
+		goto fail;
+
+	error = -ENOMEM;
+	parent = d_alloc_anon(inode);
+	if (!parent)
+		goto fail_iput;
+
+	unlock_kernel();
+	return parent;
+
+ fail_iput:
+	iput(inode);
+ fail:
+	unlock_kernel();
+	return ERR_PTR(error);
+}
diff --git a/fs/efs/super.c b/fs/efs/super.c
new file mode 100644
index 0000000..d8d5ea9
--- /dev/null
+++ b/fs/efs/super.c
@@ -0,0 +1,343 @@
+/*
+ * super.c
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from work (c) 1995,1996 Christian Vogelgsang.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/efs_fs.h>
+#include <linux/efs_vh.h>
+#include <linux/efs_fs_sb.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+
+static int efs_statfs(struct super_block *s, struct kstatfs *buf);
+static int efs_fill_super(struct super_block *s, void *d, int silent);
+
+static struct super_block *efs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, efs_fill_super);
+}
+
+static struct file_system_type efs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "efs",
+	.get_sb		= efs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static struct pt_types sgi_pt_types[] = {
+	{0x00,		"SGI vh"},
+	{0x01,		"SGI trkrepl"},
+	{0x02,		"SGI secrepl"},
+	{0x03,		"SGI raw"},
+	{0x04,		"SGI bsd"},
+	{SGI_SYSV,	"SGI sysv"},
+	{0x06,		"SGI vol"},
+	{SGI_EFS,	"SGI efs"},
+	{0x08,		"SGI lv"},
+	{0x09,		"SGI rlv"},
+	{0x0A,		"SGI xfs"},
+	{0x0B,		"SGI xfslog"},
+	{0x0C,		"SGI xlv"},
+	{0x82,		"Linux swap"},
+	{0x83,		"Linux native"},
+	{0,		NULL}
+};
+
+
+static kmem_cache_t * efs_inode_cachep;
+
+static struct inode *efs_alloc_inode(struct super_block *sb)
+{
+	struct efs_inode_info *ei;
+	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void efs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct efs_inode_info *ei = (struct efs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	efs_inode_cachep = kmem_cache_create("efs_inode_cache",
+				sizeof(struct efs_inode_info),
+				0, SLAB_RECLAIM_ACCOUNT,
+				init_once, NULL);
+	if (efs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(efs_inode_cachep))
+		printk(KERN_INFO "efs_inode_cache: not all structures were freed\n");
+}
+
+static void efs_put_super(struct super_block *s)
+{
+	kfree(s->s_fs_info);
+	s->s_fs_info = NULL;
+}
+
+static int efs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+static struct super_operations efs_superblock_operations = {
+	.alloc_inode	= efs_alloc_inode,
+	.destroy_inode	= efs_destroy_inode,
+	.read_inode	= efs_read_inode,
+	.put_super	= efs_put_super,
+	.statfs		= efs_statfs,
+	.remount_fs	= efs_remount,
+};
+
+static struct export_operations efs_export_ops = {
+	.get_parent	= efs_get_parent,
+};
+
+static int __init init_efs_fs(void) {
+	int err;
+	printk("EFS: "EFS_VERSION" - http://aeschi.ch.eu.org/efs/\n");
+	err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&efs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_efs_fs(void) {
+	unregister_filesystem(&efs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_efs_fs)
+module_exit(exit_efs_fs)
+
+static efs_block_t efs_validate_vh(struct volume_header *vh) {
+	int		i;
+	__be32		cs, *ui;
+	int		csum;
+	efs_block_t	sblock = 0; /* shuts up gcc */
+	struct pt_types	*pt_entry;
+	int		pt_type, slice = -1;
+
+	if (be32_to_cpu(vh->vh_magic) != VHMAGIC) {
+		/*
+		 * assume that we're dealing with a partition and allow
+		 * read_super() to try and detect a valid superblock
+		 * on the next block.
+		 */
+		return 0;
+	}
+
+	ui = ((__be32 *) (vh + 1)) - 1;
+	for(csum = 0; ui >= ((__be32 *) vh);) {
+		cs = *ui--;
+		csum += be32_to_cpu(cs);
+	}
+	if (csum) {
+		printk(KERN_INFO "EFS: SGI disklabel: checksum bad, label corrupted\n");
+		return 0;
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "EFS: bf: \"%16s\"\n", vh->vh_bootfile);
+
+	for(i = 0; i < NVDIR; i++) {
+		int	j;
+		char	name[VDNAMESIZE+1];
+
+		for(j = 0; j < VDNAMESIZE; j++) {
+			name[j] = vh->vh_vd[i].vd_name[j];
+		}
+		name[j] = (char) 0;
+
+		if (name[0]) {
+			printk(KERN_DEBUG "EFS: vh: %8s block: 0x%08x size: 0x%08x\n",
+				name,
+				(int) be32_to_cpu(vh->vh_vd[i].vd_lbn),
+				(int) be32_to_cpu(vh->vh_vd[i].vd_nbytes));
+		}
+	}
+#endif
+
+	for(i = 0; i < NPARTAB; i++) {
+		pt_type = (int) be32_to_cpu(vh->vh_pt[i].pt_type);
+		for(pt_entry = sgi_pt_types; pt_entry->pt_name; pt_entry++) {
+			if (pt_type == pt_entry->pt_type) break;
+		}
+#ifdef DEBUG
+		if (be32_to_cpu(vh->vh_pt[i].pt_nblks)) {
+			printk(KERN_DEBUG "EFS: pt %2d: start: %08d size: %08d type: 0x%02x (%s)\n",
+				i,
+				(int) be32_to_cpu(vh->vh_pt[i].pt_firstlbn),
+				(int) be32_to_cpu(vh->vh_pt[i].pt_nblks),
+				pt_type,
+				(pt_entry->pt_name) ? pt_entry->pt_name : "unknown");
+		}
+#endif
+		if (IS_EFS(pt_type)) {
+			sblock = be32_to_cpu(vh->vh_pt[i].pt_firstlbn);
+			slice = i;
+		}
+	}
+
+	if (slice == -1) {
+		printk(KERN_NOTICE "EFS: partition table contained no EFS partitions\n");
+#ifdef DEBUG
+	} else {
+		printk(KERN_INFO "EFS: using slice %d (type %s, offset 0x%x)\n",
+			slice,
+			(pt_entry->pt_name) ? pt_entry->pt_name : "unknown",
+			sblock);
+#endif
+	}
+	return(sblock);
+}
+
+static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) {
+
+	if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) return -1;
+
+	sb->fs_magic     = be32_to_cpu(super->fs_magic);
+	sb->total_blocks = be32_to_cpu(super->fs_size);
+	sb->first_block  = be32_to_cpu(super->fs_firstcg);
+	sb->group_size   = be32_to_cpu(super->fs_cgfsize);
+	sb->data_free    = be32_to_cpu(super->fs_tfree);
+	sb->inode_free   = be32_to_cpu(super->fs_tinode);
+	sb->inode_blocks = be16_to_cpu(super->fs_cgisize);
+	sb->total_groups = be16_to_cpu(super->fs_ncg);
+    
+	return 0;    
+}
+
+static int efs_fill_super(struct super_block *s, void *d, int silent)
+{
+	struct efs_sb_info *sb;
+	struct buffer_head *bh;
+	struct inode *root;
+
+ 	sb = kmalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
+	if (!sb)
+		return -ENOMEM;
+	s->s_fs_info = sb;
+	memset(sb, 0, sizeof(struct efs_sb_info));
+ 
+	s->s_magic		= EFS_SUPER_MAGIC;
+	if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
+		printk(KERN_ERR "EFS: device does not support %d byte blocks\n",
+			EFS_BLOCKSIZE);
+		goto out_no_fs_ul;
+	}
+  
+	/* read the vh (volume header) block */
+	bh = sb_bread(s, 0);
+
+	if (!bh) {
+		printk(KERN_ERR "EFS: cannot read volume header\n");
+		goto out_no_fs_ul;
+	}
+
+	/*
+	 * if this returns zero then we didn't find any partition table.
+	 * this isn't (yet) an error - just assume for the moment that
+	 * the device is valid and go on to search for a superblock.
+	 */
+	sb->fs_start = efs_validate_vh((struct volume_header *) bh->b_data);
+	brelse(bh);
+
+	if (sb->fs_start == -1) {
+		goto out_no_fs_ul;
+	}
+
+	bh = sb_bread(s, sb->fs_start + EFS_SUPER);
+	if (!bh) {
+		printk(KERN_ERR "EFS: cannot read superblock\n");
+		goto out_no_fs_ul;
+	}
+		
+	if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) {
+#ifdef DEBUG
+		printk(KERN_WARNING "EFS: invalid superblock at block %u\n", sb->fs_start + EFS_SUPER);
+#endif
+		brelse(bh);
+		goto out_no_fs_ul;
+	}
+	brelse(bh);
+
+	if (!(s->s_flags & MS_RDONLY)) {
+#ifdef DEBUG
+		printk(KERN_INFO "EFS: forcing read-only mode\n");
+#endif
+		s->s_flags |= MS_RDONLY;
+	}
+	s->s_op   = &efs_superblock_operations;
+	s->s_export_op = &efs_export_ops;
+	root = iget(s, EFS_ROOTINODE);
+	s->s_root = d_alloc_root(root);
+ 
+	if (!(s->s_root)) {
+		printk(KERN_ERR "EFS: get root inode failed\n");
+		iput(root);
+		goto out_no_fs;
+	}
+
+	return 0;
+
+out_no_fs_ul:
+out_no_fs:
+	s->s_fs_info = NULL;
+	kfree(sb);
+	return -EINVAL;
+}
+
+static int efs_statfs(struct super_block *s, struct kstatfs *buf) {
+	struct efs_sb_info *sb = SUPER_INFO(s);
+
+	buf->f_type    = EFS_SUPER_MAGIC;	/* efs magic number */
+	buf->f_bsize   = EFS_BLOCKSIZE;		/* blocksize */
+	buf->f_blocks  = sb->total_groups *	/* total data blocks */
+			(sb->group_size - sb->inode_blocks);
+	buf->f_bfree   = sb->data_free;		/* free data blocks */
+	buf->f_bavail  = sb->data_free;		/* free blocks for non-root */
+	buf->f_files   = sb->total_groups *	/* total inodes */
+			sb->inode_blocks *
+			(EFS_BLOCKSIZE / sizeof(struct efs_dinode));
+	buf->f_ffree   = sb->inode_free;	/* free inodes */
+	buf->f_fsid.val[0] = (sb->fs_magic >> 16) & 0xffff; /* fs ID */
+	buf->f_fsid.val[1] =  sb->fs_magic        & 0xffff; /* fs ID */
+	buf->f_namelen = EFS_MAXNAMELEN;	/* max filename length */
+
+	return 0;
+}
+
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
new file mode 100644
index 0000000..3d9a350
--- /dev/null
+++ b/fs/efs/symlink.c
@@ -0,0 +1,58 @@
+/*
+ * symlink.c
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from work (c) 1995,1996 Christian Vogelgsang.
+ */
+
+#include <linux/string.h>
+#include <linux/efs_fs.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+
+static int efs_symlink_readpage(struct file *file, struct page *page)
+{
+	char *link = kmap(page);
+	struct buffer_head * bh;
+	struct inode * inode = page->mapping->host;
+	efs_block_t size = inode->i_size;
+	int err;
+  
+	err = -ENAMETOOLONG;
+	if (size > 2 * EFS_BLOCKSIZE)
+		goto fail;
+  
+	lock_kernel();
+	/* read first 512 bytes of link target */
+	err = -EIO;
+	bh = sb_bread(inode->i_sb, efs_bmap(inode, 0));
+	if (!bh)
+		goto fail;
+	memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size);
+	brelse(bh);
+	if (size > EFS_BLOCKSIZE) {
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, 1));
+		if (!bh)
+			goto fail;
+		memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE);
+		brelse(bh);
+	}
+	link[size] = '\0';
+	unlock_kernel();
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+fail:
+	unlock_kernel();
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return err;
+}
+
+struct address_space_operations efs_symlink_aops = {
+	.readpage	= efs_symlink_readpage
+};
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
new file mode 100644
index 0000000..05b966c
--- /dev/null
+++ b/fs/eventpoll.c
@@ -0,0 +1,1639 @@
+/*
+ *  fs/eventpoll.c ( Efficent event polling implementation )
+ *  Copyright (C) 2001,...,2003	 Davide Libenzi
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/rwsem.h>
+#include <linux/rbtree.h>
+#include <linux/wait.h>
+#include <linux/eventpoll.h>
+#include <linux/mount.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/mman.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+
+/*
+ * LOCKING:
+ * There are three level of locking required by epoll :
+ *
+ * 1) epsem (semaphore)
+ * 2) ep->sem (rw_semaphore)
+ * 3) ep->lock (rw_lock)
+ *
+ * The acquire order is the one listed above, from 1 to 3.
+ * We need a spinlock (ep->lock) because we manipulate objects
+ * from inside the poll callback, that might be triggered from
+ * a wake_up() that in turn might be called from IRQ context.
+ * So we can't sleep inside the poll callback and hence we need
+ * a spinlock. During the event transfer loop (from kernel to
+ * user space) we could end up sleeping due a copy_to_user(), so
+ * we need a lock that will allow us to sleep. This lock is a
+ * read-write semaphore (ep->sem). It is acquired on read during
+ * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
+ * and during eventpoll_release_file(). Then we also need a global
+ * semaphore to serialize eventpoll_release_file() and ep_free().
+ * This semaphore is acquired by ep_free() during the epoll file
+ * cleanup path and it is also acquired by eventpoll_release_file()
+ * if a file has been pushed inside an epoll set and it is then
+ * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is possible to drop the "ep->sem" and to use the global
+ * semaphore "epsem" (together with "ep->lock") to have it working,
+ * but having "ep->sem" will make the interface more scalable.
+ * Events that require holding "epsem" are very rare, while for
+ * normal operations the epoll private "ep->sem" will guarantee
+ * a greater scalability.
+ */
+
+
+#define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */
+
+#define DEBUG_EPOLL 0
+
+#if DEBUG_EPOLL > 0
+#define DPRINTK(x) printk x
+#define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
+#else /* #if DEBUG_EPOLL > 0 */
+#define DPRINTK(x) (void) 0
+#define DNPRINTK(n, x) (void) 0
+#endif /* #if DEBUG_EPOLL > 0 */
+
+#define DEBUG_EPI 0
+
+#if DEBUG_EPI != 0
+#define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
+#else /* #if DEBUG_EPI != 0 */
+#define EPI_SLAB_DEBUG 0
+#endif /* #if DEBUG_EPI != 0 */
+
+/* Epoll private bits inside the event mask */
+#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
+
+/* Maximum number of poll wake up nests we are allowing */
+#define EP_MAX_POLLWAKE_NESTS 4
+
+/* Macro to allocate a "struct epitem" from the slab cache */
+#define EPI_MEM_ALLOC()	(struct epitem *) kmem_cache_alloc(epi_cache, SLAB_KERNEL)
+
+/* Macro to free a "struct epitem" to the slab cache */
+#define EPI_MEM_FREE(p) kmem_cache_free(epi_cache, p)
+
+/* Macro to allocate a "struct eppoll_entry" from the slab cache */
+#define PWQ_MEM_ALLOC()	(struct eppoll_entry *) kmem_cache_alloc(pwq_cache, SLAB_KERNEL)
+
+/* Macro to free a "struct eppoll_entry" to the slab cache */
+#define PWQ_MEM_FREE(p) kmem_cache_free(pwq_cache, p)
+
+/* Fast test to see if the file is an evenpoll file */
+#define IS_FILE_EPOLL(f) ((f)->f_op == &eventpoll_fops)
+
+/* Setup the structure that is used as key for the rb-tree */
+#define EP_SET_FFD(p, f, d) do { (p)->file = (f); (p)->fd = (d); } while (0)
+
+/* Compare rb-tree keys */
+#define EP_CMP_FFD(p1, p2) ((p1)->file > (p2)->file ? +1: \
+			    ((p1)->file < (p2)->file ? -1: (p1)->fd - (p2)->fd))
+
+/* Special initialization for the rb-tree node to detect linkage */
+#define EP_RB_INITNODE(n) (n)->rb_parent = (n)
+
+/* Removes a node from the rb-tree and marks it for a fast is-linked check */
+#define EP_RB_ERASE(n, r) do { rb_erase(n, r); (n)->rb_parent = (n); } while (0)
+
+/* Fast check to verify that the item is linked to the main rb-tree */
+#define EP_RB_LINKED(n) ((n)->rb_parent != (n))
+
+/*
+ * Remove the item from the list and perform its initialization.
+ * This is useful for us because we can test if the item is linked
+ * using "EP_IS_LINKED(p)".
+ */
+#define EP_LIST_DEL(p) do { list_del(p); INIT_LIST_HEAD(p); } while (0)
+
+/* Tells us if the item is currently linked */
+#define EP_IS_LINKED(p) (!list_empty(p))
+
+/* Get the "struct epitem" from a wait queue pointer */
+#define EP_ITEM_FROM_WAIT(p) ((struct epitem *) container_of(p, struct eppoll_entry, wait)->base)
+
+/* Get the "struct epitem" from an epoll queue wrapper */
+#define EP_ITEM_FROM_EPQUEUE(p) (container_of(p, struct ep_pqueue, pt)->epi)
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+#define EP_OP_HASH_EVENT(op) ((op) != EPOLL_CTL_DEL)
+
+
+struct epoll_filefd {
+	struct file *file;
+	int fd;
+};
+
+/*
+ * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
+ * It is used to keep track on all tasks that are currently inside the wake_up() code
+ * to 1) short-circuit the one coming from the same task and same wait queue head
+ * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
+ * 3) let go the ones coming from other tasks.
+ */
+struct wake_task_node {
+	struct list_head llink;
+	task_t *task;
+	wait_queue_head_t *wq;
+};
+
+/*
+ * This is used to implement the safe poll wake up avoiding to reenter
+ * the poll callback from inside wake_up().
+ */
+struct poll_safewake {
+	struct list_head wake_task_list;
+	spinlock_t lock;
+};
+
+/*
+ * This structure is stored inside the "private_data" member of the file
+ * structure and rapresent the main data sructure for the eventpoll
+ * interface.
+ */
+struct eventpoll {
+	/* Protect the this structure access */
+	rwlock_t lock;
+
+	/*
+	 * This semaphore is used to ensure that files are not removed
+	 * while epoll is using them. This is read-held during the event
+	 * collection loop and it is write-held during the file cleanup
+	 * path, the epoll file exit code and the ctl operations.
+	 */
+	struct rw_semaphore sem;
+
+	/* Wait queue used by sys_epoll_wait() */
+	wait_queue_head_t wq;
+
+	/* Wait queue used by file->poll() */
+	wait_queue_head_t poll_wait;
+
+	/* List of ready file descriptors */
+	struct list_head rdllist;
+
+	/* RB-Tree root used to store monitored fd structs */
+	struct rb_root rbr;
+};
+
+/* Wait structure used by the poll hooks */
+struct eppoll_entry {
+	/* List header used to link this structure to the "struct epitem" */
+	struct list_head llink;
+
+	/* The "base" pointer is set to the container "struct epitem" */
+	void *base;
+
+	/*
+	 * Wait queue item that will be linked to the target file wait
+	 * queue head.
+	 */
+	wait_queue_t wait;
+
+	/* The wait queue head that linked the "wait" wait queue item */
+	wait_queue_head_t *whead;
+};
+
+/*
+ * Each file descriptor added to the eventpoll interface will
+ * have an entry of this type linked to the hash.
+ */
+struct epitem {
+	/* RB-Tree node used to link this structure to the eventpoll rb-tree */
+	struct rb_node rbn;
+
+	/* List header used to link this structure to the eventpoll ready list */
+	struct list_head rdllink;
+
+	/* The file descriptor information this item refers to */
+	struct epoll_filefd ffd;
+
+	/* Number of active wait queue attached to poll operations */
+	int nwait;
+
+	/* List containing poll wait queues */
+	struct list_head pwqlist;
+
+	/* The "container" of this item */
+	struct eventpoll *ep;
+
+	/* The structure that describe the interested events and the source fd */
+	struct epoll_event event;
+
+	/*
+	 * Used to keep track of the usage count of the structure. This avoids
+	 * that the structure will desappear from underneath our processing.
+	 */
+	atomic_t usecnt;
+
+	/* List header used to link this item to the "struct file" items list */
+	struct list_head fllink;
+
+	/* List header used to link the item to the transfer list */
+	struct list_head txlink;
+
+	/*
+	 * This is used during the collection/transfer of events to userspace
+	 * to pin items empty events set.
+	 */
+	unsigned int revents;
+};
+
+/* Wrapper struct used by poll queueing */
+struct ep_pqueue {
+	poll_table pt;
+	struct epitem *epi;
+};
+
+
+
+static void ep_poll_safewake_init(struct poll_safewake *psw);
+static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
+static int ep_getfd(int *efd, struct inode **einode, struct file **efile);
+static int ep_file_init(struct file *file);
+static void ep_free(struct eventpoll *ep);
+static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
+static void ep_use_epitem(struct epitem *epi);
+static void ep_release_epitem(struct epitem *epi);
+static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
+				 poll_table *pt);
+static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi);
+static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+		     struct file *tfile, int fd);
+static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+		     struct epoll_event *event);
+static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi);
+static int ep_unlink(struct eventpoll *ep, struct epitem *epi);
+static int ep_remove(struct eventpoll *ep, struct epitem *epi);
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key);
+static int ep_eventpoll_close(struct inode *inode, struct file *file);
+static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait);
+static int ep_collect_ready_items(struct eventpoll *ep,
+				  struct list_head *txlist, int maxevents);
+static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
+			  struct epoll_event __user *events);
+static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist);
+static int ep_events_transfer(struct eventpoll *ep,
+			      struct epoll_event __user *events,
+			      int maxevents);
+static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+		   int maxevents, long timeout);
+static int eventpollfs_delete_dentry(struct dentry *dentry);
+static struct inode *ep_eventpoll_inode(void);
+static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
+					      int flags, const char *dev_name,
+					      void *data);
+
+/*
+ * This semaphore is used to serialize ep_free() and eventpoll_release_file().
+ */
+struct semaphore epsem;
+
+/* Safe wake up implementation */
+static struct poll_safewake psw;
+
+/* Slab cache used to allocate "struct epitem" */
+static kmem_cache_t *epi_cache;
+
+/* Slab cache used to allocate "struct eppoll_entry" */
+static kmem_cache_t *pwq_cache;
+
+/* Virtual fs used to allocate inodes for eventpoll files */
+static struct vfsmount *eventpoll_mnt;
+
+/* File callbacks that implement the eventpoll file behaviour */
+static struct file_operations eventpoll_fops = {
+	.release	= ep_eventpoll_close,
+	.poll		= ep_eventpoll_poll
+};
+
+/*
+ * This is used to register the virtual file system from where
+ * eventpoll inodes are allocated.
+ */
+static struct file_system_type eventpoll_fs_type = {
+	.name		= "eventpollfs",
+	.get_sb		= eventpollfs_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+/* Very basic directory entry operations for the eventpoll virtual file system */
+static struct dentry_operations eventpollfs_dentry_operations = {
+	.d_delete	= eventpollfs_delete_dentry,
+};
+
+
+
+/* Initialize the poll safe wake up structure */
+static void ep_poll_safewake_init(struct poll_safewake *psw)
+{
+
+	INIT_LIST_HEAD(&psw->wake_task_list);
+	spin_lock_init(&psw->lock);
+}
+
+
+/*
+ * Perform a safe wake up of the poll wait list. The problem is that
+ * with the new callback'd wake up system, it is possible that the
+ * poll callback is reentered from inside the call to wake_up() done
+ * on the poll wait queue head. The rule is that we cannot reenter the
+ * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
+ * and we cannot reenter the same wait queue head at all. This will
+ * enable to have a hierarchy of epoll file descriptor of no more than
+ * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
+ * because this one gets called by the poll callback, that in turn is called
+ * from inside a wake_up(), that might be called from irq context.
+ */
+static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
+{
+	int wake_nests = 0;
+	unsigned long flags;
+	task_t *this_task = current;
+	struct list_head *lsthead = &psw->wake_task_list, *lnk;
+	struct wake_task_node *tncur;
+	struct wake_task_node tnode;
+
+	spin_lock_irqsave(&psw->lock, flags);
+
+	/* Try to see if the current task is already inside this wakeup call */
+	list_for_each(lnk, lsthead) {
+		tncur = list_entry(lnk, struct wake_task_node, llink);
+
+		if (tncur->wq == wq ||
+		    (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
+			/*
+			 * Ops ... loop detected or maximum nest level reached.
+			 * We abort this wake by breaking the cycle itself.
+			 */
+			spin_unlock_irqrestore(&psw->lock, flags);
+			return;
+		}
+	}
+
+	/* Add the current task to the list */
+	tnode.task = this_task;
+	tnode.wq = wq;
+	list_add(&tnode.llink, lsthead);
+
+	spin_unlock_irqrestore(&psw->lock, flags);
+
+	/* Do really wake up now */
+	wake_up(wq);
+
+	/* Remove the current task from the list */
+	spin_lock_irqsave(&psw->lock, flags);
+	list_del(&tnode.llink);
+	spin_unlock_irqrestore(&psw->lock, flags);
+}
+
+
+/* Used to initialize the epoll bits inside the "struct file" */
+void eventpoll_init_file(struct file *file)
+{
+
+	INIT_LIST_HEAD(&file->f_ep_links);
+	spin_lock_init(&file->f_ep_lock);
+}
+
+
+/*
+ * This is called from eventpoll_release() to unlink files from the eventpoll
+ * interface. We need to have this facility to cleanup correctly files that are
+ * closed without being removed from the eventpoll interface.
+ */
+void eventpoll_release_file(struct file *file)
+{
+	struct list_head *lsthead = &file->f_ep_links;
+	struct eventpoll *ep;
+	struct epitem *epi;
+
+	/*
+	 * We don't want to get "file->f_ep_lock" because it is not
+	 * necessary. It is not necessary because we're in the "struct file"
+	 * cleanup path, and this means that noone is using this file anymore.
+	 * The only hit might come from ep_free() but by holding the semaphore
+	 * will correctly serialize the operation. We do need to acquire
+	 * "ep->sem" after "epsem" because ep_remove() requires it when called
+	 * from anywhere but ep_free().
+	 */
+	down(&epsem);
+
+	while (!list_empty(lsthead)) {
+		epi = list_entry(lsthead->next, struct epitem, fllink);
+
+		ep = epi->ep;
+		EP_LIST_DEL(&epi->fllink);
+		down_write(&ep->sem);
+		ep_remove(ep, epi);
+		up_write(&ep->sem);
+	}
+
+	up(&epsem);
+}
+
+
+/*
+ * It opens an eventpoll file descriptor by suggesting a storage of "size"
+ * file descriptors. The size parameter is just an hint about how to size
+ * data structures. It won't prevent the user to store more than "size"
+ * file descriptors inside the epoll interface. It is the kernel part of
+ * the userspace epoll_create(2).
+ */
+asmlinkage long sys_epoll_create(int size)
+{
+	int error, fd;
+	struct inode *inode;
+	struct file *file;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
+		     current, size));
+
+	/* Sanity check on the size parameter */
+	error = -EINVAL;
+	if (size <= 0)
+		goto eexit_1;
+
+	/*
+	 * Creates all the items needed to setup an eventpoll file. That is,
+	 * a file structure, and inode and a free file descriptor.
+	 */
+	error = ep_getfd(&fd, &inode, &file);
+	if (error)
+		goto eexit_1;
+
+	/* Setup the file internal data structure ( "struct eventpoll" ) */
+	error = ep_file_init(file);
+	if (error)
+		goto eexit_2;
+
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
+		     current, size, fd));
+
+	return fd;
+
+eexit_2:
+	sys_close(fd);
+eexit_1:
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
+		     current, size, error));
+	return error;
+}
+
+
+/*
+ * The following function implements the controller interface for
+ * the eventpoll file that enables the insertion/removal/change of
+ * file descriptors inside the interest set.  It represents
+ * the kernel part of the user space epoll_ctl(2).
+ */
+asmlinkage long
+sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event)
+{
+	int error;
+	struct file *file, *tfile;
+	struct eventpoll *ep;
+	struct epitem *epi;
+	struct epoll_event epds;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
+		     current, epfd, op, fd, event));
+
+	error = -EFAULT;
+	if (EP_OP_HASH_EVENT(op) &&
+	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
+		goto eexit_1;
+
+	/* Get the "struct file *" for the eventpoll file */
+	error = -EBADF;
+	file = fget(epfd);
+	if (!file)
+		goto eexit_1;
+
+	/* Get the "struct file *" for the target file */
+	tfile = fget(fd);
+	if (!tfile)
+		goto eexit_2;
+
+	/* The target file descriptor must support poll */
+	error = -EPERM;
+	if (!tfile->f_op || !tfile->f_op->poll)
+		goto eexit_3;
+
+	/*
+	 * We have to check that the file structure underneath the file descriptor
+	 * the user passed to us _is_ an eventpoll file. And also we do not permit
+	 * adding an epoll file descriptor inside itself.
+	 */
+	error = -EINVAL;
+	if (file == tfile || !IS_FILE_EPOLL(file))
+		goto eexit_3;
+
+	/*
+	 * At this point it is safe to assume that the "private_data" contains
+	 * our own data structure.
+	 */
+	ep = file->private_data;
+
+	down_write(&ep->sem);
+
+	/* Try to lookup the file inside our hash table */
+	epi = ep_find(ep, tfile, fd);
+
+	error = -EINVAL;
+	switch (op) {
+	case EPOLL_CTL_ADD:
+		if (!epi) {
+			epds.events |= POLLERR | POLLHUP;
+
+			error = ep_insert(ep, &epds, tfile, fd);
+		} else
+			error = -EEXIST;
+		break;
+	case EPOLL_CTL_DEL:
+		if (epi)
+			error = ep_remove(ep, epi);
+		else
+			error = -ENOENT;
+		break;
+	case EPOLL_CTL_MOD:
+		if (epi) {
+			epds.events |= POLLERR | POLLHUP;
+			error = ep_modify(ep, epi, &epds);
+		} else
+			error = -ENOENT;
+		break;
+	}
+
+	/*
+	 * The function ep_find() increments the usage count of the structure
+	 * so, if this is not NULL, we need to release it.
+	 */
+	if (epi)
+		ep_release_epitem(epi);
+
+	up_write(&ep->sem);
+
+eexit_3:
+	fput(tfile);
+eexit_2:
+	fput(file);
+eexit_1:
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
+		     current, epfd, op, fd, event, error));
+
+	return error;
+}
+
+#define MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
+
+/*
+ * Implement the event wait interface for the eventpoll file. It is the kernel
+ * part of the user space epoll_wait(2).
+ */
+asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
+			       int maxevents, int timeout)
+{
+	int error;
+	struct file *file;
+	struct eventpoll *ep;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
+		     current, epfd, events, maxevents, timeout));
+
+	/* The maximum number of event must be greater than zero */
+	if (maxevents <= 0 || maxevents > MAX_EVENTS)
+		return -EINVAL;
+
+	/* Verify that the area passed by the user is writeable */
+	if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
+		error = -EFAULT;
+		goto eexit_1;
+	}
+
+	/* Get the "struct file *" for the eventpoll file */
+	error = -EBADF;
+	file = fget(epfd);
+	if (!file)
+		goto eexit_1;
+
+	/*
+	 * We have to check that the file structure underneath the fd
+	 * the user passed to us _is_ an eventpoll file.
+	 */
+	error = -EINVAL;
+	if (!IS_FILE_EPOLL(file))
+		goto eexit_2;
+
+	/*
+	 * At this point it is safe to assume that the "private_data" contains
+	 * our own data structure.
+	 */
+	ep = file->private_data;
+
+	/* Time to fish for events ... */
+	error = ep_poll(ep, events, maxevents, timeout);
+
+eexit_2:
+	fput(file);
+eexit_1:
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
+		     current, epfd, events, maxevents, timeout, error));
+
+	return error;
+}
+
+
+/*
+ * Creates the file descriptor to be used by the epoll interface.
+ */
+static int ep_getfd(int *efd, struct inode **einode, struct file **efile)
+{
+	struct qstr this;
+	char name[32];
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *file;
+	int error, fd;
+
+	/* Get an ready to use file */
+	error = -ENFILE;
+	file = get_empty_filp();
+	if (!file)
+		goto eexit_1;
+
+	/* Allocates an inode from the eventpoll file system */
+	inode = ep_eventpoll_inode();
+	error = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto eexit_2;
+
+	/* Allocates a free descriptor to plug the file onto */
+	error = get_unused_fd();
+	if (error < 0)
+		goto eexit_3;
+	fd = error;
+
+	/*
+	 * Link the inode to a directory entry by creating a unique name
+	 * using the inode number.
+	 */
+	error = -ENOMEM;
+	sprintf(name, "[%lu]", inode->i_ino);
+	this.name = name;
+	this.len = strlen(name);
+	this.hash = inode->i_ino;
+	dentry = d_alloc(eventpoll_mnt->mnt_sb->s_root, &this);
+	if (!dentry)
+		goto eexit_4;
+	dentry->d_op = &eventpollfs_dentry_operations;
+	d_add(dentry, inode);
+	file->f_vfsmnt = mntget(eventpoll_mnt);
+	file->f_dentry = dentry;
+	file->f_mapping = inode->i_mapping;
+
+	file->f_pos = 0;
+	file->f_flags = O_RDONLY;
+	file->f_op = &eventpoll_fops;
+	file->f_mode = FMODE_READ;
+	file->f_version = 0;
+	file->private_data = NULL;
+
+	/* Install the new setup file into the allocated fd. */
+	fd_install(fd, file);
+
+	*efd = fd;
+	*einode = inode;
+	*efile = file;
+	return 0;
+
+eexit_4:
+	put_unused_fd(fd);
+eexit_3:
+	iput(inode);
+eexit_2:
+	put_filp(file);
+eexit_1:
+	return error;
+}
+
+
+static int ep_file_init(struct file *file)
+{
+	struct eventpoll *ep;
+
+	if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL)))
+		return -ENOMEM;
+
+	memset(ep, 0, sizeof(*ep));
+	rwlock_init(&ep->lock);
+	init_rwsem(&ep->sem);
+	init_waitqueue_head(&ep->wq);
+	init_waitqueue_head(&ep->poll_wait);
+	INIT_LIST_HEAD(&ep->rdllist);
+	ep->rbr = RB_ROOT;
+
+	file->private_data = ep;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_file_init() ep=%p\n",
+		     current, ep));
+	return 0;
+}
+
+
+static void ep_free(struct eventpoll *ep)
+{
+	struct rb_node *rbp;
+	struct epitem *epi;
+
+	/* We need to release all tasks waiting for these file */
+	if (waitqueue_active(&ep->poll_wait))
+		ep_poll_safewake(&psw, &ep->poll_wait);
+
+	/*
+	 * We need to lock this because we could be hit by
+	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
+	 * We do not need to hold "ep->sem" here because the epoll file
+	 * is on the way to be removed and no one has references to it
+	 * anymore. The only hit might come from eventpoll_release_file() but
+	 * holding "epsem" is sufficent here.
+	 */
+	down(&epsem);
+
+	/*
+	 * Walks through the whole tree by unregistering poll callbacks.
+	 */
+	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+		epi = rb_entry(rbp, struct epitem, rbn);
+
+		ep_unregister_pollwait(ep, epi);
+	}
+
+	/*
+	 * Walks through the whole hash by freeing each "struct epitem". At this
+	 * point we are sure no poll callbacks will be lingering around, and also by
+	 * write-holding "sem" we can be sure that no file cleanup code will hit
+	 * us during this operation. So we can avoid the lock on "ep->lock".
+	 */
+	while ((rbp = rb_first(&ep->rbr)) != 0) {
+		epi = rb_entry(rbp, struct epitem, rbn);
+		ep_remove(ep, epi);
+	}
+
+	up(&epsem);
+}
+
+
+/*
+ * Search the file inside the eventpoll hash. It add usage count to
+ * the returned item, so the caller must call ep_release_epitem()
+ * after finished using the "struct epitem".
+ */
+static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
+{
+	int kcmp;
+	unsigned long flags;
+	struct rb_node *rbp;
+	struct epitem *epi, *epir = NULL;
+	struct epoll_filefd ffd;
+
+	EP_SET_FFD(&ffd, file, fd);
+	read_lock_irqsave(&ep->lock, flags);
+	for (rbp = ep->rbr.rb_node; rbp; ) {
+		epi = rb_entry(rbp, struct epitem, rbn);
+		kcmp = EP_CMP_FFD(&ffd, &epi->ffd);
+		if (kcmp > 0)
+			rbp = rbp->rb_right;
+		else if (kcmp < 0)
+			rbp = rbp->rb_left;
+		else {
+			ep_use_epitem(epi);
+			epir = epi;
+			break;
+		}
+	}
+	read_unlock_irqrestore(&ep->lock, flags);
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
+		     current, file, epir));
+
+	return epir;
+}
+
+
+/*
+ * Increment the usage count of the "struct epitem" making it sure
+ * that the user will have a valid pointer to reference.
+ */
+static void ep_use_epitem(struct epitem *epi)
+{
+
+	atomic_inc(&epi->usecnt);
+}
+
+
+/*
+ * Decrement ( release ) the usage count by signaling that the user
+ * has finished using the structure. It might lead to freeing the
+ * structure itself if the count goes to zero.
+ */
+static void ep_release_epitem(struct epitem *epi)
+{
+
+	if (atomic_dec_and_test(&epi->usecnt))
+		EPI_MEM_FREE(epi);
+}
+
+
+/*
+ * This is the callback that is used to add our wait queue to the
+ * target file wakeup lists.
+ */
+static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
+				 poll_table *pt)
+{
+	struct epitem *epi = EP_ITEM_FROM_EPQUEUE(pt);
+	struct eppoll_entry *pwq;
+
+	if (epi->nwait >= 0 && (pwq = PWQ_MEM_ALLOC())) {
+		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
+		pwq->whead = whead;
+		pwq->base = epi;
+		add_wait_queue(whead, &pwq->wait);
+		list_add_tail(&pwq->llink, &epi->pwqlist);
+		epi->nwait++;
+	} else {
+		/* We have to signal that an error occurred */
+		epi->nwait = -1;
+	}
+}
+
+
+static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
+{
+	int kcmp;
+	struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
+	struct epitem *epic;
+
+	while (*p) {
+		parent = *p;
+		epic = rb_entry(parent, struct epitem, rbn);
+		kcmp = EP_CMP_FFD(&epi->ffd, &epic->ffd);
+		if (kcmp > 0)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+	rb_link_node(&epi->rbn, parent, p);
+	rb_insert_color(&epi->rbn, &ep->rbr);
+}
+
+
+static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+		     struct file *tfile, int fd)
+{
+	int error, revents, pwake = 0;
+	unsigned long flags;
+	struct epitem *epi;
+	struct ep_pqueue epq;
+
+	error = -ENOMEM;
+	if (!(epi = EPI_MEM_ALLOC()))
+		goto eexit_1;
+
+	/* Item initialization follow here ... */
+	EP_RB_INITNODE(&epi->rbn);
+	INIT_LIST_HEAD(&epi->rdllink);
+	INIT_LIST_HEAD(&epi->fllink);
+	INIT_LIST_HEAD(&epi->txlink);
+	INIT_LIST_HEAD(&epi->pwqlist);
+	epi->ep = ep;
+	EP_SET_FFD(&epi->ffd, tfile, fd);
+	epi->event = *event;
+	atomic_set(&epi->usecnt, 1);
+	epi->nwait = 0;
+
+	/* Initialize the poll table using the queue callback */
+	epq.epi = epi;
+	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
+
+	/*
+	 * Attach the item to the poll hooks and get current event bits.
+	 * We can safely use the file* here because its usage count has
+	 * been increased by the caller of this function.
+	 */
+	revents = tfile->f_op->poll(tfile, &epq.pt);
+
+	/*
+	 * We have to check if something went wrong during the poll wait queue
+	 * install process. Namely an allocation for a wait queue failed due
+	 * high memory pressure.
+	 */
+	if (epi->nwait < 0)
+		goto eexit_2;
+
+	/* Add the current item to the list of active epoll hook for this file */
+	spin_lock(&tfile->f_ep_lock);
+	list_add_tail(&epi->fllink, &tfile->f_ep_links);
+	spin_unlock(&tfile->f_ep_lock);
+
+	/* We have to drop the new item inside our item list to keep track of it */
+	write_lock_irqsave(&ep->lock, flags);
+
+	/* Add the current item to the rb-tree */
+	ep_rbtree_insert(ep, epi);
+
+	/* If the file is already "ready" we drop it inside the ready list */
+	if ((revents & event->events) && !EP_IS_LINKED(&epi->rdllink)) {
+		list_add_tail(&epi->rdllink, &ep->rdllist);
+
+		/* Notify waiting tasks that events are available */
+		if (waitqueue_active(&ep->wq))
+			wake_up(&ep->wq);
+		if (waitqueue_active(&ep->poll_wait))
+			pwake++;
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	/* We have to call this outside the lock */
+	if (pwake)
+		ep_poll_safewake(&psw, &ep->poll_wait);
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n",
+		     current, ep, tfile, fd));
+
+	return 0;
+
+eexit_2:
+	ep_unregister_pollwait(ep, epi);
+
+	/*
+	 * We need to do this because an event could have been arrived on some
+	 * allocated wait queue.
+	 */
+	write_lock_irqsave(&ep->lock, flags);
+	if (EP_IS_LINKED(&epi->rdllink))
+		EP_LIST_DEL(&epi->rdllink);
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	EPI_MEM_FREE(epi);
+eexit_1:
+	return error;
+}
+
+
+/*
+ * Modify the interest event mask by dropping an event if the new mask
+ * has a match in the current file status.
+ */
+static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
+{
+	int pwake = 0;
+	unsigned int revents;
+	unsigned long flags;
+
+	/*
+	 * Set the new event interest mask before calling f_op->poll(), otherwise
+	 * a potential race might occur. In fact if we do this operation inside
+	 * the lock, an event might happen between the f_op->poll() call and the
+	 * new event set registering.
+	 */
+	epi->event.events = event->events;
+
+	/*
+	 * Get current event bits. We can safely use the file* here because
+	 * its usage count has been increased by the caller of this function.
+	 */
+	revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	/* Copy the data member from inside the lock */
+	epi->event.data = event->data;
+
+	/*
+	 * If the item is not linked to the hash it means that it's on its
+	 * way toward the removal. Do nothing in this case.
+	 */
+	if (EP_RB_LINKED(&epi->rbn)) {
+		/*
+		 * If the item is "hot" and it is not registered inside the ready
+		 * list, push it inside. If the item is not "hot" and it is currently
+		 * registered inside the ready list, unlink it.
+		 */
+		if (revents & event->events) {
+			if (!EP_IS_LINKED(&epi->rdllink)) {
+				list_add_tail(&epi->rdllink, &ep->rdllist);
+
+				/* Notify waiting tasks that events are available */
+				if (waitqueue_active(&ep->wq))
+					wake_up(&ep->wq);
+				if (waitqueue_active(&ep->poll_wait))
+					pwake++;
+			}
+		}
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	/* We have to call this outside the lock */
+	if (pwake)
+		ep_poll_safewake(&psw, &ep->poll_wait);
+
+	return 0;
+}
+
+
+/*
+ * This function unregister poll callbacks from the associated file descriptor.
+ * Since this must be called without holding "ep->lock" the atomic exchange trick
+ * will protect us from multiple unregister.
+ */
+static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
+{
+	int nwait;
+	struct list_head *lsthead = &epi->pwqlist;
+	struct eppoll_entry *pwq;
+
+	/* This is called without locks, so we need the atomic exchange */
+	nwait = xchg(&epi->nwait, 0);
+
+	if (nwait) {
+		while (!list_empty(lsthead)) {
+			pwq = list_entry(lsthead->next, struct eppoll_entry, llink);
+
+			EP_LIST_DEL(&pwq->llink);
+			remove_wait_queue(pwq->whead, &pwq->wait);
+			PWQ_MEM_FREE(pwq);
+		}
+	}
+}
+
+
+/*
+ * Unlink the "struct epitem" from all places it might have been hooked up.
+ * This function must be called with write IRQ lock on "ep->lock".
+ */
+static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
+{
+	int error;
+
+	/*
+	 * It can happen that this one is called for an item already unlinked.
+	 * The check protect us from doing a double unlink ( crash ).
+	 */
+	error = -ENOENT;
+	if (!EP_RB_LINKED(&epi->rbn))
+		goto eexit_1;
+
+	/*
+	 * Clear the event mask for the unlinked item. This will avoid item
+	 * notifications to be sent after the unlink operation from inside
+	 * the kernel->userspace event transfer loop.
+	 */
+	epi->event.events = 0;
+
+	/*
+	 * At this point is safe to do the job, unlink the item from our rb-tree.
+	 * This operation togheter with the above check closes the door to
+	 * double unlinks.
+	 */
+	EP_RB_ERASE(&epi->rbn, &ep->rbr);
+
+	/*
+	 * If the item we are going to remove is inside the ready file descriptors
+	 * we want to remove it from this list to avoid stale events.
+	 */
+	if (EP_IS_LINKED(&epi->rdllink))
+		EP_LIST_DEL(&epi->rdllink);
+
+	error = 0;
+eexit_1:
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
+		     current, ep, epi->file, error));
+
+	return error;
+}
+
+
+/*
+ * Removes a "struct epitem" from the eventpoll hash and deallocates
+ * all the associated resources.
+ */
+static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+{
+	int error;
+	unsigned long flags;
+	struct file *file = epi->ffd.file;
+
+	/*
+	 * Removes poll wait queue hooks. We _have_ to do this without holding
+	 * the "ep->lock" otherwise a deadlock might occur. This because of the
+	 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
+	 * queue head lock when unregistering the wait queue. The wakeup callback
+	 * will run by holding the wait queue head lock and will call our callback
+	 * that will try to get "ep->lock".
+	 */
+	ep_unregister_pollwait(ep, epi);
+
+	/* Remove the current item from the list of epoll hooks */
+	spin_lock(&file->f_ep_lock);
+	if (EP_IS_LINKED(&epi->fllink))
+		EP_LIST_DEL(&epi->fllink);
+	spin_unlock(&file->f_ep_lock);
+
+	/* We need to acquire the write IRQ lock before calling ep_unlink() */
+	write_lock_irqsave(&ep->lock, flags);
+
+	/* Really unlink the item from the hash */
+	error = ep_unlink(ep, epi);
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	if (error)
+		goto eexit_1;
+
+	/* At this point it is safe to free the eventpoll item */
+	ep_release_epitem(epi);
+
+	error = 0;
+eexit_1:
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
+		     current, ep, file, error));
+
+	return error;
+}
+
+
+/*
+ * This is the callback that is passed to the wait queue wakeup
+ * machanism. It is called by the stored file descriptors when they
+ * have events to report.
+ */
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+	int pwake = 0;
+	unsigned long flags;
+	struct epitem *epi = EP_ITEM_FROM_WAIT(wait);
+	struct eventpoll *ep = epi->ep;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
+		     current, epi->file, epi, ep));
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	/*
+	 * If the event mask does not contain any poll(2) event, we consider the
+	 * descriptor to be disabled. This condition is likely the effect of the
+	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
+	 * until the next EPOLL_CTL_MOD will be issued.
+	 */
+	if (!(epi->event.events & ~EP_PRIVATE_BITS))
+		goto is_disabled;
+
+	/* If this file is already in the ready list we exit soon */
+	if (EP_IS_LINKED(&epi->rdllink))
+		goto is_linked;
+
+	list_add_tail(&epi->rdllink, &ep->rdllist);
+
+is_linked:
+	/*
+	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
+	 * wait list.
+	 */
+	if (waitqueue_active(&ep->wq))
+		wake_up(&ep->wq);
+	if (waitqueue_active(&ep->poll_wait))
+		pwake++;
+
+is_disabled:
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	/* We have to call this outside the lock */
+	if (pwake)
+		ep_poll_safewake(&psw, &ep->poll_wait);
+
+	return 1;
+}
+
+
+static int ep_eventpoll_close(struct inode *inode, struct file *file)
+{
+	struct eventpoll *ep = file->private_data;
+
+	if (ep) {
+		ep_free(ep);
+		kfree(ep);
+	}
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
+	return 0;
+}
+
+
+static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+{
+	unsigned int pollflags = 0;
+	unsigned long flags;
+	struct eventpoll *ep = file->private_data;
+
+	/* Insert inside our poll wait queue */
+	poll_wait(file, &ep->poll_wait, wait);
+
+	/* Check our condition */
+	read_lock_irqsave(&ep->lock, flags);
+	if (!list_empty(&ep->rdllist))
+		pollflags = POLLIN | POLLRDNORM;
+	read_unlock_irqrestore(&ep->lock, flags);
+
+	return pollflags;
+}
+
+
+/*
+ * Since we have to release the lock during the __copy_to_user() operation and
+ * during the f_op->poll() call, we try to collect the maximum number of items
+ * by reducing the irqlock/irqunlock switching rate.
+ */
+static int ep_collect_ready_items(struct eventpoll *ep, struct list_head *txlist, int maxevents)
+{
+	int nepi;
+	unsigned long flags;
+	struct list_head *lsthead = &ep->rdllist, *lnk;
+	struct epitem *epi;
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	for (nepi = 0, lnk = lsthead->next; lnk != lsthead && nepi < maxevents;) {
+		epi = list_entry(lnk, struct epitem, rdllink);
+
+		lnk = lnk->next;
+
+		/* If this file is already in the ready list we exit soon */
+		if (!EP_IS_LINKED(&epi->txlink)) {
+			/*
+			 * This is initialized in this way so that the default
+			 * behaviour of the reinjecting code will be to push back
+			 * the item inside the ready list.
+			 */
+			epi->revents = epi->event.events;
+
+			/* Link the ready item into the transfer list */
+			list_add(&epi->txlink, txlist);
+			nepi++;
+
+			/*
+			 * Unlink the item from the ready list.
+			 */
+			EP_LIST_DEL(&epi->rdllink);
+		}
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	return nepi;
+}
+
+
+/*
+ * This function is called without holding the "ep->lock" since the call to
+ * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
+ * because of the way poll() is traditionally implemented in Linux.
+ */
+static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
+			  struct epoll_event __user *events)
+{
+	int eventcnt = 0;
+	unsigned int revents;
+	struct list_head *lnk;
+	struct epitem *epi;
+
+	/*
+	 * We can loop without lock because this is a task private list.
+	 * The test done during the collection loop will guarantee us that
+	 * another task will not try to collect this file. Also, items
+	 * cannot vanish during the loop because we are holding "sem".
+	 */
+	list_for_each(lnk, txlist) {
+		epi = list_entry(lnk, struct epitem, txlink);
+
+		/*
+		 * Get the ready file event set. We can safely use the file
+		 * because we are holding the "sem" in read and this will
+		 * guarantee that both the file and the item will not vanish.
+		 */
+		revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
+
+		/*
+		 * Set the return event set for the current file descriptor.
+		 * Note that only the task task was successfully able to link
+		 * the item to its "txlist" will write this field.
+		 */
+		epi->revents = revents & epi->event.events;
+
+		if (epi->revents) {
+			if (__put_user(epi->revents,
+				       &events[eventcnt].events) ||
+			    __put_user(epi->event.data,
+				       &events[eventcnt].data))
+				return -EFAULT;
+			if (epi->event.events & EPOLLONESHOT)
+				epi->event.events &= EP_PRIVATE_BITS;
+			eventcnt++;
+		}
+	}
+	return eventcnt;
+}
+
+
+/*
+ * Walk through the transfer list we collected with ep_collect_ready_items()
+ * and, if 1) the item is still "alive" 2) its event set is not empty 3) it's
+ * not already linked, links it to the ready list. Same as above, we are holding
+ * "sem" so items cannot vanish underneath our nose.
+ */
+static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist)
+{
+	int ricnt = 0, pwake = 0;
+	unsigned long flags;
+	struct epitem *epi;
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	while (!list_empty(txlist)) {
+		epi = list_entry(txlist->next, struct epitem, txlink);
+
+		/* Unlink the current item from the transfer list */
+		EP_LIST_DEL(&epi->txlink);
+
+		/*
+		 * If the item is no more linked to the interest set, we don't
+		 * have to push it inside the ready list because the following
+		 * ep_release_epitem() is going to drop it. Also, if the current
+		 * item is set to have an Edge Triggered behaviour, we don't have
+		 * to push it back either.
+		 */
+		if (EP_RB_LINKED(&epi->rbn) && !(epi->event.events & EPOLLET) &&
+		    (epi->revents & epi->event.events) && !EP_IS_LINKED(&epi->rdllink)) {
+			list_add_tail(&epi->rdllink, &ep->rdllist);
+			ricnt++;
+		}
+	}
+
+	if (ricnt) {
+		/*
+		 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
+		 * wait list.
+		 */
+		if (waitqueue_active(&ep->wq))
+			wake_up(&ep->wq);
+		if (waitqueue_active(&ep->poll_wait))
+			pwake++;
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	/* We have to call this outside the lock */
+	if (pwake)
+		ep_poll_safewake(&psw, &ep->poll_wait);
+}
+
+
+/*
+ * Perform the transfer of events to user space.
+ */
+static int ep_events_transfer(struct eventpoll *ep,
+			      struct epoll_event __user *events, int maxevents)
+{
+	int eventcnt = 0;
+	struct list_head txlist;
+
+	INIT_LIST_HEAD(&txlist);
+
+	/*
+	 * We need to lock this because we could be hit by
+	 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
+	 */
+	down_read(&ep->sem);
+
+	/* Collect/extract ready items */
+	if (ep_collect_ready_items(ep, &txlist, maxevents) > 0) {
+		/* Build result set in userspace */
+		eventcnt = ep_send_events(ep, &txlist, events);
+
+		/* Reinject ready items into the ready list */
+		ep_reinject_items(ep, &txlist);
+	}
+
+	up_read(&ep->sem);
+
+	return eventcnt;
+}
+
+
+static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+		   int maxevents, long timeout)
+{
+	int res, eavail;
+	unsigned long flags;
+	long jtimeout;
+	wait_queue_t wait;
+
+	/*
+	 * Calculate the timeout by checking for the "infinite" value ( -1 )
+	 * and the overflow condition. The passed timeout is in milliseconds,
+	 * that why (t * HZ) / 1000.
+	 */
+	jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ?
+		MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000;
+
+retry:
+	write_lock_irqsave(&ep->lock, flags);
+
+	res = 0;
+	if (list_empty(&ep->rdllist)) {
+		/*
+		 * We don't have any available event to return to the caller.
+		 * We need to sleep here, and we will be wake up by
+		 * ep_poll_callback() when events will become available.
+		 */
+		init_waitqueue_entry(&wait, current);
+		add_wait_queue(&ep->wq, &wait);
+
+		for (;;) {
+			/*
+			 * We don't want to sleep if the ep_poll_callback() sends us
+			 * a wakeup in between. That's why we set the task state
+			 * to TASK_INTERRUPTIBLE before doing the checks.
+			 */
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (!list_empty(&ep->rdllist) || !jtimeout)
+				break;
+			if (signal_pending(current)) {
+				res = -EINTR;
+				break;
+			}
+
+			write_unlock_irqrestore(&ep->lock, flags);
+			jtimeout = schedule_timeout(jtimeout);
+			write_lock_irqsave(&ep->lock, flags);
+		}
+		remove_wait_queue(&ep->wq, &wait);
+
+		set_current_state(TASK_RUNNING);
+	}
+
+	/* Is it worth to try to dig for events ? */
+	eavail = !list_empty(&ep->rdllist);
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	/*
+	 * Try to transfer events to user space. In case we get 0 events and
+	 * there's still timeout left over, we go trying again in search of
+	 * more luck.
+	 */
+	if (!res && eavail &&
+	    !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout)
+		goto retry;
+
+	return res;
+}
+
+
+static int eventpollfs_delete_dentry(struct dentry *dentry)
+{
+
+	return 1;
+}
+
+
+static struct inode *ep_eventpoll_inode(void)
+{
+	int error = -ENOMEM;
+	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
+
+	if (!inode)
+		goto eexit_1;
+
+	inode->i_fop = &eventpoll_fops;
+
+	/*
+	 * Mark the inode dirty from the very beginning,
+	 * that way it will never be moved to the dirty
+	 * list because mark_inode_dirty() will think
+	 * that it already _is_ on the dirty list.
+	 */
+	inode->i_state = I_DIRTY;
+	inode->i_mode = S_IRUSR | S_IWUSR;
+	inode->i_uid = current->fsuid;
+	inode->i_gid = current->fsgid;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_blksize = PAGE_SIZE;
+	return inode;
+
+eexit_1:
+	return ERR_PTR(error);
+}
+
+
+static struct super_block *
+eventpollfs_get_sb(struct file_system_type *fs_type, int flags,
+		   const char *dev_name, void *data)
+{
+	return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC);
+}
+
+
+static int __init eventpoll_init(void)
+{
+	int error;
+
+	init_MUTEX(&epsem);
+
+	/* Initialize the structure used to perform safe poll wait head wake ups */
+	ep_poll_safewake_init(&psw);
+
+	/* Allocates slab cache used to allocate "struct epitem" items */
+	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
+			0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
+			NULL, NULL);
+
+	/* Allocates slab cache used to allocate "struct eppoll_entry" */
+	pwq_cache = kmem_cache_create("eventpoll_pwq",
+			sizeof(struct eppoll_entry), 0,
+			EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
+
+	/*
+	 * Register the virtual file system that will be the source of inodes
+	 * for the eventpoll files
+	 */
+	error = register_filesystem(&eventpoll_fs_type);
+	if (error)
+		goto epanic;
+
+	/* Mount the above commented virtual file system */
+	eventpoll_mnt = kern_mount(&eventpoll_fs_type);
+	error = PTR_ERR(eventpoll_mnt);
+	if (IS_ERR(eventpoll_mnt))
+		goto epanic;
+
+	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n",
+			current));
+	return 0;
+
+epanic:
+	panic("eventpoll_init() failed\n");
+}
+
+
+static void __exit eventpoll_exit(void)
+{
+	/* Undo all operations done inside eventpoll_init() */
+	unregister_filesystem(&eventpoll_fs_type);
+	mntput(eventpoll_mnt);
+	kmem_cache_destroy(pwq_cache);
+	kmem_cache_destroy(epi_cache);
+}
+
+module_init(eventpoll_init);
+module_exit(eventpoll_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/exec.c b/fs/exec.c
new file mode 100644
index 0000000..a839449
--- /dev/null
+++ b/fs/exec.c
@@ -0,0 +1,1498 @@
+/*
+ *  linux/fs/exec.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * #!-checking implemented by tytso.
+ */
+/*
+ * Demand-loading implemented 01.12.91 - no need to read anything but
+ * the header into memory. The inode of the executable is put into
+ * "current->executable", and page faults do the actual loading. Clean.
+ *
+ * Once more I can proudly say that linux stood up to being changed: it
+ * was less than 2 hours work to get demand-loading completely implemented.
+ *
+ * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
+ * current->executable is only used by the procfs.  This allows a dispatch
+ * table to check for several different types  of binary formats.  We keep
+ * trying until we recognize the file or we run out of supported binary
+ * formats. 
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/key.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/swap.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/rmap.h>
+#include <linux/acct.h>
+
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+int core_uses_pid;
+char core_pattern[65] = "core";
+/* The maximal length of core_pattern is also specified in sysctl.c */
+
+static struct linux_binfmt *formats;
+static DEFINE_RWLOCK(binfmt_lock);
+
+int register_binfmt(struct linux_binfmt * fmt)
+{
+	struct linux_binfmt ** tmp = &formats;
+
+	if (!fmt)
+		return -EINVAL;
+	if (fmt->next)
+		return -EBUSY;
+	write_lock(&binfmt_lock);
+	while (*tmp) {
+		if (fmt == *tmp) {
+			write_unlock(&binfmt_lock);
+			return -EBUSY;
+		}
+		tmp = &(*tmp)->next;
+	}
+	fmt->next = formats;
+	formats = fmt;
+	write_unlock(&binfmt_lock);
+	return 0;	
+}
+
+EXPORT_SYMBOL(register_binfmt);
+
+int unregister_binfmt(struct linux_binfmt * fmt)
+{
+	struct linux_binfmt ** tmp = &formats;
+
+	write_lock(&binfmt_lock);
+	while (*tmp) {
+		if (fmt == *tmp) {
+			*tmp = fmt->next;
+			write_unlock(&binfmt_lock);
+			return 0;
+		}
+		tmp = &(*tmp)->next;
+	}
+	write_unlock(&binfmt_lock);
+	return -EINVAL;
+}
+
+EXPORT_SYMBOL(unregister_binfmt);
+
+static inline void put_binfmt(struct linux_binfmt * fmt)
+{
+	module_put(fmt->module);
+}
+
+/*
+ * Note that a shared library must be both readable and executable due to
+ * security reasons.
+ *
+ * Also note that we take the address to load from from the file itself.
+ */
+asmlinkage long sys_uselib(const char __user * library)
+{
+	struct file * file;
+	struct nameidata nd;
+	int error;
+
+	nd.intent.open.flags = FMODE_READ;
+	error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+	if (error)
+		goto out;
+
+	error = -EINVAL;
+	if (!S_ISREG(nd.dentry->d_inode->i_mode))
+		goto exit;
+
+	error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
+	if (error)
+		goto exit;
+
+	file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+	error = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+
+	error = -ENOEXEC;
+	if(file->f_op) {
+		struct linux_binfmt * fmt;
+
+		read_lock(&binfmt_lock);
+		for (fmt = formats ; fmt ; fmt = fmt->next) {
+			if (!fmt->load_shlib)
+				continue;
+			if (!try_module_get(fmt->module))
+				continue;
+			read_unlock(&binfmt_lock);
+			error = fmt->load_shlib(file);
+			read_lock(&binfmt_lock);
+			put_binfmt(fmt);
+			if (error != -ENOEXEC)
+				break;
+		}
+		read_unlock(&binfmt_lock);
+	}
+	fput(file);
+out:
+  	return error;
+exit:
+	path_release(&nd);
+	goto out;
+}
+
+/*
+ * count() counts the number of strings in array ARGV.
+ */
+static int count(char __user * __user * argv, int max)
+{
+	int i = 0;
+
+	if (argv != NULL) {
+		for (;;) {
+			char __user * p;
+
+			if (get_user(p, argv))
+				return -EFAULT;
+			if (!p)
+				break;
+			argv++;
+			if(++i > max)
+				return -E2BIG;
+			cond_resched();
+		}
+	}
+	return i;
+}
+
+/*
+ * 'copy_strings()' copies argument/environment strings from user
+ * memory to free pages in kernel mem. These are in a format ready
+ * to be put directly into the top of new user memory.
+ */
+int copy_strings(int argc,char __user * __user * argv, struct linux_binprm *bprm)
+{
+	struct page *kmapped_page = NULL;
+	char *kaddr = NULL;
+	int ret;
+
+	while (argc-- > 0) {
+		char __user *str;
+		int len;
+		unsigned long pos;
+
+		if (get_user(str, argv+argc) ||
+				!(len = strnlen_user(str, bprm->p))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		if (bprm->p < len)  {
+			ret = -E2BIG;
+			goto out;
+		}
+
+		bprm->p -= len;
+		/* XXX: add architecture specific overflow check here. */
+		pos = bprm->p;
+
+		while (len > 0) {
+			int i, new, err;
+			int offset, bytes_to_copy;
+			struct page *page;
+
+			offset = pos % PAGE_SIZE;
+			i = pos/PAGE_SIZE;
+			page = bprm->page[i];
+			new = 0;
+			if (!page) {
+				page = alloc_page(GFP_HIGHUSER);
+				bprm->page[i] = page;
+				if (!page) {
+					ret = -ENOMEM;
+					goto out;
+				}
+				new = 1;
+			}
+
+			if (page != kmapped_page) {
+				if (kmapped_page)
+					kunmap(kmapped_page);
+				kmapped_page = page;
+				kaddr = kmap(kmapped_page);
+			}
+			if (new && offset)
+				memset(kaddr, 0, offset);
+			bytes_to_copy = PAGE_SIZE - offset;
+			if (bytes_to_copy > len) {
+				bytes_to_copy = len;
+				if (new)
+					memset(kaddr+offset+len, 0,
+						PAGE_SIZE-offset-len);
+			}
+			err = copy_from_user(kaddr+offset, str, bytes_to_copy);
+			if (err) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			pos += bytes_to_copy;
+			str += bytes_to_copy;
+			len -= bytes_to_copy;
+		}
+	}
+	ret = 0;
+out:
+	if (kmapped_page)
+		kunmap(kmapped_page);
+	return ret;
+}
+
+/*
+ * Like copy_strings, but get argv and its values from kernel memory.
+ */
+int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
+{
+	int r;
+	mm_segment_t oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	r = copy_strings(argc, (char __user * __user *)argv, bprm);
+	set_fs(oldfs);
+	return r;
+}
+
+EXPORT_SYMBOL(copy_strings_kernel);
+
+#ifdef CONFIG_MMU
+/*
+ * This routine is used to map in a page into an address space: needed by
+ * execve() for the initial stack and environment pages.
+ *
+ * vma->vm_mm->mmap_sem is held for writing.
+ */
+void install_arg_page(struct vm_area_struct *vma,
+			struct page *page, unsigned long address)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pgd_t * pgd;
+	pud_t * pud;
+	pmd_t * pmd;
+	pte_t * pte;
+
+	if (unlikely(anon_vma_prepare(vma)))
+		goto out_sig;
+
+	flush_dcache_page(page);
+	pgd = pgd_offset(mm, address);
+
+	spin_lock(&mm->page_table_lock);
+	pud = pud_alloc(mm, pgd, address);
+	if (!pud)
+		goto out;
+	pmd = pmd_alloc(mm, pud, address);
+	if (!pmd)
+		goto out;
+	pte = pte_alloc_map(mm, pmd, address);
+	if (!pte)
+		goto out;
+	if (!pte_none(*pte)) {
+		pte_unmap(pte);
+		goto out;
+	}
+	inc_mm_counter(mm, rss);
+	lru_cache_add_active(page);
+	set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
+					page, vma->vm_page_prot))));
+	page_add_anon_rmap(page, vma, address);
+	pte_unmap(pte);
+	spin_unlock(&mm->page_table_lock);
+
+	/* no need for flush_tlb */
+	return;
+out:
+	spin_unlock(&mm->page_table_lock);
+out_sig:
+	__free_page(page);
+	force_sig(SIGKILL, current);
+}
+
+#define EXTRA_STACK_VM_PAGES	20	/* random */
+
+int setup_arg_pages(struct linux_binprm *bprm,
+		    unsigned long stack_top,
+		    int executable_stack)
+{
+	unsigned long stack_base;
+	struct vm_area_struct *mpnt;
+	struct mm_struct *mm = current->mm;
+	int i, ret;
+	long arg_size;
+
+#ifdef CONFIG_STACK_GROWSUP
+	/* Move the argument and environment strings to the bottom of the
+	 * stack space.
+	 */
+	int offset, j;
+	char *to, *from;
+
+	/* Start by shifting all the pages down */
+	i = 0;
+	for (j = 0; j < MAX_ARG_PAGES; j++) {
+		struct page *page = bprm->page[j];
+		if (!page)
+			continue;
+		bprm->page[i++] = page;
+	}
+
+	/* Now move them within their pages */
+	offset = bprm->p % PAGE_SIZE;
+	to = kmap(bprm->page[0]);
+	for (j = 1; j < i; j++) {
+		memmove(to, to + offset, PAGE_SIZE - offset);
+		from = kmap(bprm->page[j]);
+		memcpy(to + PAGE_SIZE - offset, from, offset);
+		kunmap(bprm->page[j - 1]);
+		to = from;
+	}
+	memmove(to, to + offset, PAGE_SIZE - offset);
+	kunmap(bprm->page[j - 1]);
+
+	/* Limit stack size to 1GB */
+	stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
+	if (stack_base > (1 << 30))
+		stack_base = 1 << 30;
+	stack_base = PAGE_ALIGN(stack_top - stack_base);
+
+	/* Adjust bprm->p to point to the end of the strings. */
+	bprm->p = stack_base + PAGE_SIZE * i - offset;
+
+	mm->arg_start = stack_base;
+	arg_size = i << PAGE_SHIFT;
+
+	/* zero pages that were copied above */
+	while (i < MAX_ARG_PAGES)
+		bprm->page[i++] = NULL;
+#else
+	stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
+	stack_base = PAGE_ALIGN(stack_base);
+	bprm->p += stack_base;
+	mm->arg_start = bprm->p;
+	arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
+#endif
+
+	arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+
+	if (bprm->loader)
+		bprm->loader += stack_base;
+	bprm->exec += stack_base;
+
+	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	if (!mpnt)
+		return -ENOMEM;
+
+	if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
+		kmem_cache_free(vm_area_cachep, mpnt);
+		return -ENOMEM;
+	}
+
+	memset(mpnt, 0, sizeof(*mpnt));
+
+	down_write(&mm->mmap_sem);
+	{
+		mpnt->vm_mm = mm;
+#ifdef CONFIG_STACK_GROWSUP
+		mpnt->vm_start = stack_base;
+		mpnt->vm_end = stack_base + arg_size;
+#else
+		mpnt->vm_end = stack_top;
+		mpnt->vm_start = mpnt->vm_end - arg_size;
+#endif
+		/* Adjust stack execute permissions; explicitly enable
+		 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
+		 * and leave alone (arch default) otherwise. */
+		if (unlikely(executable_stack == EXSTACK_ENABLE_X))
+			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
+		else if (executable_stack == EXSTACK_DISABLE_X)
+			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
+		else
+			mpnt->vm_flags = VM_STACK_FLAGS;
+		mpnt->vm_flags |= mm->def_flags;
+		mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
+		if ((ret = insert_vm_struct(mm, mpnt))) {
+			up_write(&mm->mmap_sem);
+			kmem_cache_free(vm_area_cachep, mpnt);
+			return ret;
+		}
+		mm->stack_vm = mm->total_vm = vma_pages(mpnt);
+	}
+
+	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+		struct page *page = bprm->page[i];
+		if (page) {
+			bprm->page[i] = NULL;
+			install_arg_page(mpnt, page, stack_base);
+		}
+		stack_base += PAGE_SIZE;
+	}
+	up_write(&mm->mmap_sem);
+	
+	return 0;
+}
+
+EXPORT_SYMBOL(setup_arg_pages);
+
+#define free_arg_pages(bprm) do { } while (0)
+
+#else
+
+static inline void free_arg_pages(struct linux_binprm *bprm)
+{
+	int i;
+
+	for (i = 0; i < MAX_ARG_PAGES; i++) {
+		if (bprm->page[i])
+			__free_page(bprm->page[i]);
+		bprm->page[i] = NULL;
+	}
+}
+
+#endif /* CONFIG_MMU */
+
+struct file *open_exec(const char *name)
+{
+	struct nameidata nd;
+	int err;
+	struct file *file;
+
+	nd.intent.open.flags = FMODE_READ;
+	err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+	file = ERR_PTR(err);
+
+	if (!err) {
+		struct inode *inode = nd.dentry->d_inode;
+		file = ERR_PTR(-EACCES);
+		if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
+		    S_ISREG(inode->i_mode)) {
+			int err = permission(inode, MAY_EXEC, &nd);
+			if (!err && !(inode->i_mode & 0111))
+				err = -EACCES;
+			file = ERR_PTR(err);
+			if (!err) {
+				file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+				if (!IS_ERR(file)) {
+					err = deny_write_access(file);
+					if (err) {
+						fput(file);
+						file = ERR_PTR(err);
+					}
+				}
+out:
+				return file;
+			}
+		}
+		path_release(&nd);
+	}
+	goto out;
+}
+
+EXPORT_SYMBOL(open_exec);
+
+int kernel_read(struct file *file, unsigned long offset,
+	char *addr, unsigned long count)
+{
+	mm_segment_t old_fs;
+	loff_t pos = offset;
+	int result;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	/* The cast to a user pointer is valid due to the set_fs() */
+	result = vfs_read(file, (void __user *)addr, count, &pos);
+	set_fs(old_fs);
+	return result;
+}
+
+EXPORT_SYMBOL(kernel_read);
+
+static int exec_mmap(struct mm_struct *mm)
+{
+	struct task_struct *tsk;
+	struct mm_struct * old_mm, *active_mm;
+
+	/* Notify parent that we're no longer interested in the old VM */
+	tsk = current;
+	old_mm = current->mm;
+	mm_release(tsk, old_mm);
+
+	if (old_mm) {
+		/*
+		 * Make sure that if there is a core dump in progress
+		 * for the old mm, we get out and die instead of going
+		 * through with the exec.  We must hold mmap_sem around
+		 * checking core_waiters and changing tsk->mm.  The
+		 * core-inducing thread will increment core_waiters for
+		 * each thread whose ->mm == old_mm.
+		 */
+		down_read(&old_mm->mmap_sem);
+		if (unlikely(old_mm->core_waiters)) {
+			up_read(&old_mm->mmap_sem);
+			return -EINTR;
+		}
+	}
+	task_lock(tsk);
+	active_mm = tsk->active_mm;
+	tsk->mm = mm;
+	tsk->active_mm = mm;
+	activate_mm(active_mm, mm);
+	task_unlock(tsk);
+	arch_pick_mmap_layout(mm);
+	if (old_mm) {
+		up_read(&old_mm->mmap_sem);
+		if (active_mm != old_mm) BUG();
+		mmput(old_mm);
+		return 0;
+	}
+	mmdrop(active_mm);
+	return 0;
+}
+
+/*
+ * This function makes sure the current process has its own signal table,
+ * so that flush_signal_handlers can later reset the handlers without
+ * disturbing other processes.  (Other processes might share the signal
+ * table via the CLONE_SIGHAND option to clone().)
+ */
+static inline int de_thread(struct task_struct *tsk)
+{
+	struct signal_struct *sig = tsk->signal;
+	struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
+	spinlock_t *lock = &oldsighand->siglock;
+	int count;
+
+	/*
+	 * If we don't share sighandlers, then we aren't sharing anything
+	 * and we can just re-use it all.
+	 */
+	if (atomic_read(&oldsighand->count) <= 1) {
+		BUG_ON(atomic_read(&sig->count) != 1);
+		exit_itimers(sig);
+		return 0;
+	}
+
+	newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
+	if (!newsighand)
+		return -ENOMEM;
+
+	if (thread_group_empty(current))
+		goto no_thread_group;
+
+	/*
+	 * Kill all other threads in the thread group.
+	 * We must hold tasklist_lock to call zap_other_threads.
+	 */
+	read_lock(&tasklist_lock);
+	spin_lock_irq(lock);
+	if (sig->flags & SIGNAL_GROUP_EXIT) {
+		/*
+		 * Another group action in progress, just
+		 * return so that the signal is processed.
+		 */
+		spin_unlock_irq(lock);
+		read_unlock(&tasklist_lock);
+		kmem_cache_free(sighand_cachep, newsighand);
+		return -EAGAIN;
+	}
+	zap_other_threads(current);
+	read_unlock(&tasklist_lock);
+
+	/*
+	 * Account for the thread group leader hanging around:
+	 */
+	count = 2;
+	if (thread_group_leader(current))
+		count = 1;
+	while (atomic_read(&sig->count) > count) {
+		sig->group_exit_task = current;
+		sig->notify_count = count;
+		__set_current_state(TASK_UNINTERRUPTIBLE);
+		spin_unlock_irq(lock);
+		schedule();
+		spin_lock_irq(lock);
+	}
+	sig->group_exit_task = NULL;
+	sig->notify_count = 0;
+	spin_unlock_irq(lock);
+
+	/*
+	 * At this point all other threads have exited, all we have to
+	 * do is to wait for the thread group leader to become inactive,
+	 * and to assume its PID:
+	 */
+	if (!thread_group_leader(current)) {
+		struct task_struct *leader = current->group_leader, *parent;
+		struct dentry *proc_dentry1, *proc_dentry2;
+		unsigned long exit_state, ptrace;
+
+		/*
+		 * Wait for the thread group leader to be a zombie.
+		 * It should already be zombie at this point, most
+		 * of the time.
+		 */
+		while (leader->exit_state != EXIT_ZOMBIE)
+			yield();
+
+		spin_lock(&leader->proc_lock);
+		spin_lock(&current->proc_lock);
+		proc_dentry1 = proc_pid_unhash(current);
+		proc_dentry2 = proc_pid_unhash(leader);
+		write_lock_irq(&tasklist_lock);
+
+		if (leader->tgid != current->tgid)
+			BUG();
+		if (current->pid == current->tgid)
+			BUG();
+		/*
+		 * An exec() starts a new thread group with the
+		 * TGID of the previous thread group. Rehash the
+		 * two threads with a switched PID, and release
+		 * the former thread group leader:
+		 */
+		ptrace = leader->ptrace;
+		parent = leader->parent;
+		if (unlikely(ptrace) && unlikely(parent == current)) {
+			/*
+			 * Joker was ptracing his own group leader,
+			 * and now he wants to be his own parent!
+			 * We can't have that.
+			 */
+			ptrace = 0;
+		}
+
+		ptrace_unlink(current);
+		ptrace_unlink(leader);
+		remove_parent(current);
+		remove_parent(leader);
+
+		switch_exec_pids(leader, current);
+
+		current->parent = current->real_parent = leader->real_parent;
+		leader->parent = leader->real_parent = child_reaper;
+		current->group_leader = current;
+		leader->group_leader = leader;
+
+		add_parent(current, current->parent);
+		add_parent(leader, leader->parent);
+		if (ptrace) {
+			current->ptrace = ptrace;
+			__ptrace_link(current, parent);
+		}
+
+		list_del(&current->tasks);
+		list_add_tail(&current->tasks, &init_task.tasks);
+		current->exit_signal = SIGCHLD;
+		exit_state = leader->exit_state;
+
+		write_unlock_irq(&tasklist_lock);
+		spin_unlock(&leader->proc_lock);
+		spin_unlock(&current->proc_lock);
+		proc_pid_flush(proc_dentry1);
+		proc_pid_flush(proc_dentry2);
+
+		if (exit_state != EXIT_ZOMBIE)
+			BUG();
+		release_task(leader);
+        }
+
+	/*
+	 * Now there are really no other threads at all,
+	 * so it's safe to stop telling them to kill themselves.
+	 */
+	sig->flags = 0;
+
+no_thread_group:
+	BUG_ON(atomic_read(&sig->count) != 1);
+	exit_itimers(sig);
+
+	if (atomic_read(&oldsighand->count) == 1) {
+		/*
+		 * Now that we nuked the rest of the thread group,
+		 * it turns out we are not sharing sighand any more either.
+		 * So we can just keep it.
+		 */
+		kmem_cache_free(sighand_cachep, newsighand);
+	} else {
+		/*
+		 * Move our state over to newsighand and switch it in.
+		 */
+		spin_lock_init(&newsighand->siglock);
+		atomic_set(&newsighand->count, 1);
+		memcpy(newsighand->action, oldsighand->action,
+		       sizeof(newsighand->action));
+
+		write_lock_irq(&tasklist_lock);
+		spin_lock(&oldsighand->siglock);
+		spin_lock(&newsighand->siglock);
+
+		current->sighand = newsighand;
+		recalc_sigpending();
+
+		spin_unlock(&newsighand->siglock);
+		spin_unlock(&oldsighand->siglock);
+		write_unlock_irq(&tasklist_lock);
+
+		if (atomic_dec_and_test(&oldsighand->count))
+			kmem_cache_free(sighand_cachep, oldsighand);
+	}
+
+	if (!thread_group_empty(current))
+		BUG();
+	if (!thread_group_leader(current))
+		BUG();
+	return 0;
+}
+	
+/*
+ * These functions flushes out all traces of the currently running executable
+ * so that a new one can be started
+ */
+
+static inline void flush_old_files(struct files_struct * files)
+{
+	long j = -1;
+
+	spin_lock(&files->file_lock);
+	for (;;) {
+		unsigned long set, i;
+
+		j++;
+		i = j * __NFDBITS;
+		if (i >= files->max_fds || i >= files->max_fdset)
+			break;
+		set = files->close_on_exec->fds_bits[j];
+		if (!set)
+			continue;
+		files->close_on_exec->fds_bits[j] = 0;
+		spin_unlock(&files->file_lock);
+		for ( ; set ; i++,set >>= 1) {
+			if (set & 1) {
+				sys_close(i);
+			}
+		}
+		spin_lock(&files->file_lock);
+
+	}
+	spin_unlock(&files->file_lock);
+}
+
+void get_task_comm(char *buf, struct task_struct *tsk)
+{
+	/* buf must be at least sizeof(tsk->comm) in size */
+	task_lock(tsk);
+	strncpy(buf, tsk->comm, sizeof(tsk->comm));
+	task_unlock(tsk);
+}
+
+void set_task_comm(struct task_struct *tsk, char *buf)
+{
+	task_lock(tsk);
+	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
+	task_unlock(tsk);
+}
+
+int flush_old_exec(struct linux_binprm * bprm)
+{
+	char * name;
+	int i, ch, retval;
+	struct files_struct *files;
+	char tcomm[sizeof(current->comm)];
+
+	/*
+	 * Make sure we have a private signal table and that
+	 * we are unassociated from the previous thread group.
+	 */
+	retval = de_thread(current);
+	if (retval)
+		goto out;
+
+	/*
+	 * Make sure we have private file handles. Ask the
+	 * fork helper to do the work for us and the exit
+	 * helper to do the cleanup of the old one.
+	 */
+	files = current->files;		/* refcounted so safe to hold */
+	retval = unshare_files();
+	if (retval)
+		goto out;
+	/*
+	 * Release all of the old mmap stuff
+	 */
+	retval = exec_mmap(bprm->mm);
+	if (retval)
+		goto mmap_failed;
+
+	bprm->mm = NULL;		/* We're using it now */
+
+	/* This is the point of no return */
+	steal_locks(files);
+	put_files_struct(files);
+
+	current->sas_ss_sp = current->sas_ss_size = 0;
+
+	if (current->euid == current->uid && current->egid == current->gid)
+		current->mm->dumpable = 1;
+	name = bprm->filename;
+	for (i=0; (ch = *(name++)) != '\0';) {
+		if (ch == '/')
+			i = 0;
+		else
+			if (i < (sizeof(tcomm) - 1))
+				tcomm[i++] = ch;
+	}
+	tcomm[i] = '\0';
+	set_task_comm(current, tcomm);
+
+	current->flags &= ~PF_RANDOMIZE;
+	flush_thread();
+
+	if (bprm->e_uid != current->euid || bprm->e_gid != current->egid || 
+	    permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
+	    (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+		suid_keys(current);
+		current->mm->dumpable = 0;
+	}
+
+	/* An exec changes our domain. We are no longer part of the thread
+	   group */
+
+	current->self_exec_id++;
+			
+	flush_signal_handlers(current, 0);
+	flush_old_files(current->files);
+
+	return 0;
+
+mmap_failed:
+	put_files_struct(current->files);
+	current->files = files;
+out:
+	return retval;
+}
+
+EXPORT_SYMBOL(flush_old_exec);
+
+/* 
+ * Fill the binprm structure from the inode. 
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+ */
+int prepare_binprm(struct linux_binprm *bprm)
+{
+	int mode;
+	struct inode * inode = bprm->file->f_dentry->d_inode;
+	int retval;
+
+	mode = inode->i_mode;
+	/*
+	 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
+	 * generic_permission lets a non-executable through
+	 */
+	if (!(mode & 0111))	/* with at least _one_ execute bit set */
+		return -EACCES;
+	if (bprm->file->f_op == NULL)
+		return -EACCES;
+
+	bprm->e_uid = current->euid;
+	bprm->e_gid = current->egid;
+
+	if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
+		/* Set-uid? */
+		if (mode & S_ISUID) {
+			current->personality &= ~PER_CLEAR_ON_SETID;
+			bprm->e_uid = inode->i_uid;
+		}
+
+		/* Set-gid? */
+		/*
+		 * If setgid is set but no group execute bit then this
+		 * is a candidate for mandatory locking, not a setgid
+		 * executable.
+		 */
+		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+			current->personality &= ~PER_CLEAR_ON_SETID;
+			bprm->e_gid = inode->i_gid;
+		}
+	}
+
+	/* fill in binprm security blob */
+	retval = security_bprm_set(bprm);
+	if (retval)
+		return retval;
+
+	memset(bprm->buf,0,BINPRM_BUF_SIZE);
+	return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
+}
+
+EXPORT_SYMBOL(prepare_binprm);
+
+static inline int unsafe_exec(struct task_struct *p)
+{
+	int unsafe = 0;
+	if (p->ptrace & PT_PTRACED) {
+		if (p->ptrace & PT_PTRACE_CAP)
+			unsafe |= LSM_UNSAFE_PTRACE_CAP;
+		else
+			unsafe |= LSM_UNSAFE_PTRACE;
+	}
+	if (atomic_read(&p->fs->count) > 1 ||
+	    atomic_read(&p->files->count) > 1 ||
+	    atomic_read(&p->sighand->count) > 1)
+		unsafe |= LSM_UNSAFE_SHARE;
+
+	return unsafe;
+}
+
+void compute_creds(struct linux_binprm *bprm)
+{
+	int unsafe;
+
+	if (bprm->e_uid != current->uid)
+		suid_keys(current);
+	exec_keys(current);
+
+	task_lock(current);
+	unsafe = unsafe_exec(current);
+	security_bprm_apply_creds(bprm, unsafe);
+	task_unlock(current);
+	security_bprm_post_apply_creds(bprm);
+}
+
+EXPORT_SYMBOL(compute_creds);
+
+void remove_arg_zero(struct linux_binprm *bprm)
+{
+	if (bprm->argc) {
+		unsigned long offset;
+		char * kaddr;
+		struct page *page;
+
+		offset = bprm->p % PAGE_SIZE;
+		goto inside;
+
+		while (bprm->p++, *(kaddr+offset++)) {
+			if (offset != PAGE_SIZE)
+				continue;
+			offset = 0;
+			kunmap_atomic(kaddr, KM_USER0);
+inside:
+			page = bprm->page[bprm->p/PAGE_SIZE];
+			kaddr = kmap_atomic(page, KM_USER0);
+		}
+		kunmap_atomic(kaddr, KM_USER0);
+		bprm->argc--;
+	}
+}
+
+EXPORT_SYMBOL(remove_arg_zero);
+
+/*
+ * cycle the list of binary formats handler, until one recognizes the image
+ */
+int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+{
+	int try,retval;
+	struct linux_binfmt *fmt;
+#ifdef __alpha__
+	/* handle /sbin/loader.. */
+	{
+	    struct exec * eh = (struct exec *) bprm->buf;
+
+	    if (!bprm->loader && eh->fh.f_magic == 0x183 &&
+		(eh->fh.f_flags & 0x3000) == 0x3000)
+	    {
+		struct file * file;
+		unsigned long loader;
+
+		allow_write_access(bprm->file);
+		fput(bprm->file);
+		bprm->file = NULL;
+
+	        loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+		file = open_exec("/sbin/loader");
+		retval = PTR_ERR(file);
+		if (IS_ERR(file))
+			return retval;
+
+		/* Remember if the application is TASO.  */
+		bprm->sh_bang = eh->ah.entry < 0x100000000UL;
+
+		bprm->file = file;
+		bprm->loader = loader;
+		retval = prepare_binprm(bprm);
+		if (retval<0)
+			return retval;
+		/* should call search_binary_handler recursively here,
+		   but it does not matter */
+	    }
+	}
+#endif
+	retval = security_bprm_check(bprm);
+	if (retval)
+		return retval;
+
+	/* kernel module loader fixup */
+	/* so we don't try to load run modprobe in kernel space. */
+	set_fs(USER_DS);
+	retval = -ENOENT;
+	for (try=0; try<2; try++) {
+		read_lock(&binfmt_lock);
+		for (fmt = formats ; fmt ; fmt = fmt->next) {
+			int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
+			if (!fn)
+				continue;
+			if (!try_module_get(fmt->module))
+				continue;
+			read_unlock(&binfmt_lock);
+			retval = fn(bprm, regs);
+			if (retval >= 0) {
+				put_binfmt(fmt);
+				allow_write_access(bprm->file);
+				if (bprm->file)
+					fput(bprm->file);
+				bprm->file = NULL;
+				current->did_exec = 1;
+				return retval;
+			}
+			read_lock(&binfmt_lock);
+			put_binfmt(fmt);
+			if (retval != -ENOEXEC || bprm->mm == NULL)
+				break;
+			if (!bprm->file) {
+				read_unlock(&binfmt_lock);
+				return retval;
+			}
+		}
+		read_unlock(&binfmt_lock);
+		if (retval != -ENOEXEC || bprm->mm == NULL) {
+			break;
+#ifdef CONFIG_KMOD
+		}else{
+#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
+			if (printable(bprm->buf[0]) &&
+			    printable(bprm->buf[1]) &&
+			    printable(bprm->buf[2]) &&
+			    printable(bprm->buf[3]))
+				break; /* -ENOEXEC */
+			request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
+#endif
+		}
+	}
+	return retval;
+}
+
+EXPORT_SYMBOL(search_binary_handler);
+
+/*
+ * sys_execve() executes a new program.
+ */
+int do_execve(char * filename,
+	char __user *__user *argv,
+	char __user *__user *envp,
+	struct pt_regs * regs)
+{
+	struct linux_binprm *bprm;
+	struct file *file;
+	int retval;
+	int i;
+
+	retval = -ENOMEM;
+	bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
+	if (!bprm)
+		goto out_ret;
+	memset(bprm, 0, sizeof(*bprm));
+
+	file = open_exec(filename);
+	retval = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out_kfree;
+
+	sched_exec();
+
+	bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+	bprm->file = file;
+	bprm->filename = filename;
+	bprm->interp = filename;
+	bprm->mm = mm_alloc();
+	retval = -ENOMEM;
+	if (!bprm->mm)
+		goto out_file;
+
+	retval = init_new_context(current, bprm->mm);
+	if (retval < 0)
+		goto out_mm;
+
+	bprm->argc = count(argv, bprm->p / sizeof(void *));
+	if ((retval = bprm->argc) < 0)
+		goto out_mm;
+
+	bprm->envc = count(envp, bprm->p / sizeof(void *));
+	if ((retval = bprm->envc) < 0)
+		goto out_mm;
+
+	retval = security_bprm_alloc(bprm);
+	if (retval)
+		goto out;
+
+	retval = prepare_binprm(bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = copy_strings_kernel(1, &bprm->filename, bprm);
+	if (retval < 0)
+		goto out;
+
+	bprm->exec = bprm->p;
+	retval = copy_strings(bprm->envc, envp, bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = copy_strings(bprm->argc, argv, bprm);
+	if (retval < 0)
+		goto out;
+
+	retval = search_binary_handler(bprm,regs);
+	if (retval >= 0) {
+		free_arg_pages(bprm);
+
+		/* execve success */
+		security_bprm_free(bprm);
+		acct_update_integrals(current);
+		update_mem_hiwater(current);
+		kfree(bprm);
+		return retval;
+	}
+
+out:
+	/* Something went wrong, return the inode and free the argument pages*/
+	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+		struct page * page = bprm->page[i];
+		if (page)
+			__free_page(page);
+	}
+
+	if (bprm->security)
+		security_bprm_free(bprm);
+
+out_mm:
+	if (bprm->mm)
+		mmdrop(bprm->mm);
+
+out_file:
+	if (bprm->file) {
+		allow_write_access(bprm->file);
+		fput(bprm->file);
+	}
+
+out_kfree:
+	kfree(bprm);
+
+out_ret:
+	return retval;
+}
+
+int set_binfmt(struct linux_binfmt *new)
+{
+	struct linux_binfmt *old = current->binfmt;
+
+	if (new) {
+		if (!try_module_get(new->module))
+			return -1;
+	}
+	current->binfmt = new;
+	if (old)
+		module_put(old->module);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_binfmt);
+
+#define CORENAME_MAX_SIZE 64
+
+/* format_corename will inspect the pattern parameter, and output a
+ * name into corename, which must have space for at least
+ * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
+ */
+static void format_corename(char *corename, const char *pattern, long signr)
+{
+	const char *pat_ptr = pattern;
+	char *out_ptr = corename;
+	char *const out_end = corename + CORENAME_MAX_SIZE;
+	int rc;
+	int pid_in_pattern = 0;
+
+	/* Repeat as long as we have more pattern to process and more output
+	   space */
+	while (*pat_ptr) {
+		if (*pat_ptr != '%') {
+			if (out_ptr == out_end)
+				goto out;
+			*out_ptr++ = *pat_ptr++;
+		} else {
+			switch (*++pat_ptr) {
+			case 0:
+				goto out;
+			/* Double percent, output one percent */
+			case '%':
+				if (out_ptr == out_end)
+					goto out;
+				*out_ptr++ = '%';
+				break;
+			/* pid */
+			case 'p':
+				pid_in_pattern = 1;
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%d", current->tgid);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			/* uid */
+			case 'u':
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%d", current->uid);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			/* gid */
+			case 'g':
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%d", current->gid);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			/* signal that caused the coredump */
+			case 's':
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%ld", signr);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			/* UNIX time of coredump */
+			case 't': {
+				struct timeval tv;
+				do_gettimeofday(&tv);
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%lu", tv.tv_sec);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			}
+			/* hostname */
+			case 'h':
+				down_read(&uts_sem);
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%s", system_utsname.nodename);
+				up_read(&uts_sem);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			/* executable */
+			case 'e':
+				rc = snprintf(out_ptr, out_end - out_ptr,
+					      "%s", current->comm);
+				if (rc > out_end - out_ptr)
+					goto out;
+				out_ptr += rc;
+				break;
+			default:
+				break;
+			}
+			++pat_ptr;
+		}
+	}
+	/* Backward compatibility with core_uses_pid:
+	 *
+	 * If core_pattern does not include a %p (as is the default)
+	 * and core_uses_pid is set, then .%pid will be appended to
+	 * the filename */
+	if (!pid_in_pattern
+            && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
+		rc = snprintf(out_ptr, out_end - out_ptr,
+			      ".%d", current->tgid);
+		if (rc > out_end - out_ptr)
+			goto out;
+		out_ptr += rc;
+	}
+      out:
+	*out_ptr = 0;
+}
+
+static void zap_threads (struct mm_struct *mm)
+{
+	struct task_struct *g, *p;
+	struct task_struct *tsk = current;
+	struct completion *vfork_done = tsk->vfork_done;
+	int traced = 0;
+
+	/*
+	 * Make sure nobody is waiting for us to release the VM,
+	 * otherwise we can deadlock when we wait on each other
+	 */
+	if (vfork_done) {
+		tsk->vfork_done = NULL;
+		complete(vfork_done);
+	}
+
+	read_lock(&tasklist_lock);
+	do_each_thread(g,p)
+		if (mm == p->mm && p != tsk) {
+			force_sig_specific(SIGKILL, p);
+			mm->core_waiters++;
+			if (unlikely(p->ptrace) &&
+			    unlikely(p->parent->mm == mm))
+				traced = 1;
+		}
+	while_each_thread(g,p);
+
+	read_unlock(&tasklist_lock);
+
+	if (unlikely(traced)) {
+		/*
+		 * We are zapping a thread and the thread it ptraces.
+		 * If the tracee went into a ptrace stop for exit tracing,
+		 * we could deadlock since the tracer is waiting for this
+		 * coredump to finish.  Detach them so they can both die.
+		 */
+		write_lock_irq(&tasklist_lock);
+		do_each_thread(g,p) {
+			if (mm == p->mm && p != tsk &&
+			    p->ptrace && p->parent->mm == mm) {
+				__ptrace_unlink(p);
+			}
+		} while_each_thread(g,p);
+		write_unlock_irq(&tasklist_lock);
+	}
+}
+
+static void coredump_wait(struct mm_struct *mm)
+{
+	DECLARE_COMPLETION(startup_done);
+
+	mm->core_waiters++; /* let other threads block */
+	mm->core_startup_done = &startup_done;
+
+	/* give other threads a chance to run: */
+	yield();
+
+	zap_threads(mm);
+	if (--mm->core_waiters) {
+		up_write(&mm->mmap_sem);
+		wait_for_completion(&startup_done);
+	} else
+		up_write(&mm->mmap_sem);
+	BUG_ON(mm->core_waiters);
+}
+
+int do_coredump(long signr, int exit_code, struct pt_regs * regs)
+{
+	char corename[CORENAME_MAX_SIZE + 1];
+	struct mm_struct *mm = current->mm;
+	struct linux_binfmt * binfmt;
+	struct inode * inode;
+	struct file * file;
+	int retval = 0;
+
+	binfmt = current->binfmt;
+	if (!binfmt || !binfmt->core_dump)
+		goto fail;
+	down_write(&mm->mmap_sem);
+	if (!mm->dumpable) {
+		up_write(&mm->mmap_sem);
+		goto fail;
+	}
+	mm->dumpable = 0;
+	init_completion(&mm->core_done);
+	spin_lock_irq(&current->sighand->siglock);
+	current->signal->flags = SIGNAL_GROUP_EXIT;
+	current->signal->group_exit_code = exit_code;
+	spin_unlock_irq(&current->sighand->siglock);
+	coredump_wait(mm);
+
+	/*
+	 * Clear any false indication of pending signals that might
+	 * be seen by the filesystem code called to write the core file.
+	 */
+	current->signal->group_stop_count = 0;
+	clear_thread_flag(TIF_SIGPENDING);
+
+	if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
+		goto fail_unlock;
+
+	/*
+	 * lock_kernel() because format_corename() is controlled by sysctl, which
+	 * uses lock_kernel()
+	 */
+ 	lock_kernel();
+	format_corename(corename, core_pattern, signr);
+	unlock_kernel();
+	file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+	if (IS_ERR(file))
+		goto fail_unlock;
+	inode = file->f_dentry->d_inode;
+	if (inode->i_nlink > 1)
+		goto close_fail;	/* multiple links - don't dump */
+	if (d_unhashed(file->f_dentry))
+		goto close_fail;
+
+	if (!S_ISREG(inode->i_mode))
+		goto close_fail;
+	if (!file->f_op)
+		goto close_fail;
+	if (!file->f_op->write)
+		goto close_fail;
+	if (do_truncate(file->f_dentry, 0) != 0)
+		goto close_fail;
+
+	retval = binfmt->core_dump(signr, regs, file);
+
+	if (retval)
+		current->signal->group_exit_code |= 0x80;
+close_fail:
+	filp_close(file, NULL);
+fail_unlock:
+	complete_all(&mm->core_done);
+fail:
+	return retval;
+}
diff --git a/fs/exportfs/Makefile b/fs/exportfs/Makefile
new file mode 100644
index 0000000..d7c5d4d
--- /dev/null
+++ b/fs/exportfs/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the filesystem export support routines.
+
+obj-$(CONFIG_EXPORTFS) += exportfs.o
+
+exportfs-objs := expfs.o
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
new file mode 100644
index 0000000..c49d625
--- /dev/null
+++ b/fs/exportfs/expfs.c
@@ -0,0 +1,540 @@
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/namei.h>
+
+struct export_operations export_op_default;
+
+#define	CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
+
+#define dprintk(fmt, args...) do{}while(0)
+
+/**
+ * find_exported_dentry - helper routine to implement export_operations->decode_fh
+ * @sb:		The &super_block identifying the filesystem
+ * @obj:	An opaque identifier of the object to be found - passed to
+ *		get_inode
+ * @parent:	An optional opqaue identifier of the parent of the object.
+ * @acceptable:	A function used to test possible &dentries to see if they are
+ *		acceptable
+ * @context:	A parameter to @acceptable so that it knows on what basis to
+ *		judge.
+ *
+ * find_exported_dentry is the central helper routine to enable file systems
+ * to provide the decode_fh() export_operation.  It's main task is to take
+ * an &inode, find or create an appropriate &dentry structure, and possibly
+ * splice this into the dcache in the correct place.
+ *
+ * The decode_fh() operation provided by the filesystem should call
+ * find_exported_dentry() with the same parameters that it received except
+ * that instead of the file handle fragment, pointers to opaque identifiers
+ * for the object and optionally its parent are passed.  The default decode_fh
+ * routine passes one pointer to the start of the filehandle fragment, and
+ * one 8 bytes into the fragment.  It is expected that most filesystems will
+ * take this approach, though the offset to the parent identifier may well be
+ * different.
+ *
+ * find_exported_dentry() will call get_dentry to get an dentry pointer from
+ * the file system.  If any &dentry in the d_alias list is acceptable, it will
+ * be returned.  Otherwise find_exported_dentry() will attempt to splice a new
+ * &dentry into the dcache using get_name() and get_parent() to find the
+ * appropriate place.
+ */
+
+struct dentry *
+find_exported_dentry(struct super_block *sb, void *obj, void *parent,
+		     int (*acceptable)(void *context, struct dentry *de),
+		     void *context)
+{
+	struct dentry *result = NULL;
+	struct dentry *target_dir;
+	int err;
+	struct export_operations *nops = sb->s_export_op;
+	struct list_head *le, *head;
+	struct dentry *toput = NULL;
+	int noprogress;
+	char nbuf[NAME_MAX+1];
+
+	/*
+	 * Attempt to find the inode.
+	 */
+	result = CALL(sb->s_export_op,get_dentry)(sb,obj);
+	err = -ESTALE;
+	if (result == NULL)
+		goto err_out;
+	if (IS_ERR(result)) {
+		err = PTR_ERR(result);
+		goto err_out;
+	}
+	if (S_ISDIR(result->d_inode->i_mode) &&
+	    (result->d_flags & DCACHE_DISCONNECTED)) {
+		/* it is an unconnected directory, we must connect it */
+		;
+	} else {
+		if (acceptable(context, result))
+			return result;
+		if (S_ISDIR(result->d_inode->i_mode)) {
+			/* there is no other dentry, so fail */
+			goto err_result;
+		}
+		/* try any other aliases */
+		spin_lock(&dcache_lock);
+		head = &result->d_inode->i_dentry;
+		list_for_each(le, head) {
+			struct dentry *dentry = list_entry(le, struct dentry, d_alias);
+			dget_locked(dentry);
+			spin_unlock(&dcache_lock);
+			if (toput)
+				dput(toput);
+			toput = NULL;
+			if (dentry != result &&
+			    acceptable(context, dentry)) {
+				dput(result);
+				return dentry;
+			}
+			spin_lock(&dcache_lock);
+			toput = dentry;
+		}
+		spin_unlock(&dcache_lock);
+		if (toput)
+			dput(toput);
+	}			
+
+	/* It's a directory, or we are required to confirm the file's
+	 * location in the tree based on the parent information
+ 	 */
+	dprintk("find_exported_dentry: need to look harder for %s/%d\n",sb->s_id,*(int*)obj);
+	if (S_ISDIR(result->d_inode->i_mode))
+		target_dir = dget(result);
+	else {
+		if (parent == NULL)
+			goto err_result;
+
+		target_dir = CALL(sb->s_export_op,get_dentry)(sb,parent);
+		if (IS_ERR(target_dir))
+			err = PTR_ERR(target_dir);
+		if (target_dir == NULL || IS_ERR(target_dir))
+			goto err_result;
+	}
+	/*
+	 * Now we need to make sure that target_dir is properly connected.
+	 * It may already be, as the flag isn't always updated when connection
+	 * happens.
+	 * So, we walk up parent links until we find a connected directory,
+	 * or we run out of directories.  Then we find the parent, find
+	 * the name of the child in that parent, and do a lookup.
+	 * This should connect the child into the parent
+	 * We then repeat.
+	 */
+
+	/* it is possible that a confused file system might not let us complete 
+	 * the path to the root.  For example, if get_parent returns a directory
+	 * in which we cannot find a name for the child.  While this implies a
+	 * very sick filesystem we don't want it to cause knfsd to spin.  Hence
+	 * the noprogress counter.  If we go through the loop 10 times (2 is
+	 * probably enough) without getting anywhere, we just give up
+	 */
+	noprogress= 0;
+	while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
+		struct dentry *pd = target_dir;
+
+		dget(pd);
+		spin_lock(&pd->d_lock);
+		while (!IS_ROOT(pd) &&
+				(pd->d_parent->d_flags&DCACHE_DISCONNECTED)) {
+			struct dentry *parent = pd->d_parent;
+
+			dget(parent);
+			spin_unlock(&pd->d_lock);
+			dput(pd);
+			pd = parent;
+			spin_lock(&pd->d_lock);
+		}
+		spin_unlock(&pd->d_lock);
+
+		if (!IS_ROOT(pd)) {
+			/* must have found a connected parent - great */
+			spin_lock(&pd->d_lock);
+			pd->d_flags &= ~DCACHE_DISCONNECTED;
+			spin_unlock(&pd->d_lock);
+			noprogress = 0;
+		} else if (pd == sb->s_root) {
+			printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
+			spin_lock(&pd->d_lock);
+			pd->d_flags &= ~DCACHE_DISCONNECTED;
+			spin_unlock(&pd->d_lock);
+			noprogress = 0;
+		} else {
+			/* we have hit the top of a disconnected path.  Try
+			 * to find parent and connect
+			 * note: racing with some other process renaming a
+			 * directory isn't much of a problem here.  If someone
+			 * renames the directory, it will end up properly
+			 * connected, which is what we want
+			 */
+			struct dentry *ppd;
+			struct dentry *npd;
+
+			down(&pd->d_inode->i_sem);
+			ppd = CALL(nops,get_parent)(pd);
+			up(&pd->d_inode->i_sem);
+
+			if (IS_ERR(ppd)) {
+				err = PTR_ERR(ppd);
+				dprintk("find_exported_dentry: get_parent of %ld failed, err %d\n",
+					pd->d_inode->i_ino, err);
+				dput(pd);
+				break;
+			}
+			dprintk("find_exported_dentry: find name of %lu in %lu\n", pd->d_inode->i_ino, ppd->d_inode->i_ino);
+			err = CALL(nops,get_name)(ppd, nbuf, pd);
+			if (err) {
+				dput(ppd);
+				dput(pd);
+				if (err == -ENOENT)
+					/* some race between get_parent and
+					 * get_name?  just try again
+					 */
+					continue;
+				break;
+			}
+			dprintk("find_exported_dentry: found name: %s\n", nbuf);
+			down(&ppd->d_inode->i_sem);
+			npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
+			up(&ppd->d_inode->i_sem);
+			if (IS_ERR(npd)) {
+				err = PTR_ERR(npd);
+				dprintk("find_exported_dentry: lookup failed: %d\n", err);
+				dput(ppd);
+				dput(pd);
+				break;
+			}
+			/* we didn't really want npd, we really wanted
+			 * a side-effect of the lookup.
+			 * hopefully, npd == pd, though it isn't really
+			 * a problem if it isn't
+			 */
+			if (npd == pd)
+				noprogress = 0;
+			else
+				printk("find_exported_dentry: npd != pd\n");
+			dput(npd);
+			dput(ppd);
+			if (IS_ROOT(pd)) {
+				/* something went wrong, we have to give up */
+				dput(pd);
+				break;
+			}
+		}
+		dput(pd);
+	}
+
+	if (target_dir->d_flags & DCACHE_DISCONNECTED) {
+		/* something went wrong - oh-well */
+		if (!err)
+			err = -ESTALE;
+		goto err_target;
+	}
+	/* if we weren't after a directory, have one more step to go */
+	if (result != target_dir) {
+		struct dentry *nresult;
+		err = CALL(nops,get_name)(target_dir, nbuf, result);
+		if (!err) {
+			down(&target_dir->d_inode->i_sem);
+			nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+			up(&target_dir->d_inode->i_sem);
+			if (!IS_ERR(nresult)) {
+				if (nresult->d_inode) {
+					dput(result);
+					result = nresult;
+				} else
+					dput(nresult);
+			}
+		}
+	}
+	dput(target_dir);
+	/* now result is properly connected, it is our best bet */
+	if (acceptable(context, result))
+		return result;
+	/* one last try of the aliases.. */
+	spin_lock(&dcache_lock);
+	toput = NULL;
+	head = &result->d_inode->i_dentry;
+	list_for_each(le, head) {
+		struct dentry *dentry = list_entry(le, struct dentry, d_alias);
+		dget_locked(dentry);
+		spin_unlock(&dcache_lock);
+		if (toput) dput(toput);
+		if (dentry != result &&
+		    acceptable(context, dentry)) {
+			dput(result);
+			return dentry;
+		}
+		spin_lock(&dcache_lock);
+		toput = dentry;
+	}
+	spin_unlock(&dcache_lock);
+	if (toput)
+		dput(toput);
+
+	/* drat - I just cannot find anything acceptable */
+	dput(result);
+	/* It might be justifiable to return ESTALE here,
+	 * but the filehandle at-least looks reasonable good
+	 * and it just be a permission problem, so returning
+	 * -EACCESS is safer
+	 */
+	return ERR_PTR(-EACCES);
+
+ err_target:
+	dput(target_dir);
+ err_result:
+	dput(result);
+ err_out:
+	return ERR_PTR(err);
+}
+
+
+
+static struct dentry *get_parent(struct dentry *child)
+{
+	/* get_parent cannot be supported generically, the locking
+	 * is too icky.
+	 * instead, we just return EACCES.  If server reboots or inodes
+	 * get flushed, you lose
+	 */
+	return ERR_PTR(-EACCES);
+}
+
+
+struct getdents_callback {
+	char *name;		/* name that was found. It already points to a
+				   buffer NAME_MAX+1 is size */
+	unsigned long ino;	/* the inum we are looking for */
+	int found;		/* inode matched? */
+	int sequence;		/* sequence counter */
+};
+
+/*
+ * A rather strange filldir function to capture
+ * the name matching the specified inode number.
+ */
+static int filldir_one(void * __buf, const char * name, int len,
+			loff_t pos, ino_t ino, unsigned int d_type)
+{
+	struct getdents_callback *buf = __buf;
+	int result = 0;
+
+	buf->sequence++;
+	if (buf->ino == ino) {
+		memcpy(buf->name, name, len);
+		buf->name[len] = '\0';
+		buf->found = 1;
+		result = -1;
+	}
+	return result;
+}
+
+/**
+ * get_name - default export_operations->get_name function
+ * @dentry: the directory in which to find a name
+ * @name:   a pointer to a %NAME_MAX+1 char buffer to store the name
+ * @child:  the dentry for the child directory.
+ *
+ * calls readdir on the parent until it finds an entry with
+ * the same inode number as the child, and returns that.
+ */
+static int get_name(struct dentry *dentry, char *name,
+			struct dentry *child)
+{
+	struct inode *dir = dentry->d_inode;
+	int error;
+	struct file *file;
+	struct getdents_callback buffer;
+
+	error = -ENOTDIR;
+	if (!dir || !S_ISDIR(dir->i_mode))
+		goto out;
+	error = -EINVAL;
+	if (!dir->i_fop)
+		goto out;
+	/*
+	 * Open the directory ...
+	 */
+	file = dentry_open(dget(dentry), NULL, O_RDONLY);
+	error = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+
+	error = -EINVAL;
+	if (!file->f_op->readdir)
+		goto out_close;
+
+	buffer.name = name;
+	buffer.ino = child->d_inode->i_ino;
+	buffer.found = 0;
+	buffer.sequence = 0;
+	while (1) {
+		int old_seq = buffer.sequence;
+
+		error = vfs_readdir(file, filldir_one, &buffer);
+
+		if (error < 0)
+			break;
+
+		error = 0;
+		if (buffer.found)
+			break;
+		error = -ENOENT;
+		if (old_seq == buffer.sequence)
+			break;
+	}
+
+out_close:
+	fput(file);
+out:
+	return error;
+}
+
+
+static struct dentry *export_iget(struct super_block *sb, unsigned long ino, __u32 generation)
+{
+
+	/* iget isn't really right if the inode is currently unallocated!!
+	 * This should really all be done inside each filesystem
+	 *
+	 * ext2fs' read_inode has been strengthed to return a bad_inode if
+	 * the inode had been deleted.
+	 *
+	 * Currently we don't know the generation for parent directory, so
+	 * a generation of 0 means "accept any"
+	 */
+	struct inode *inode;
+	struct dentry *result;
+	if (ino == 0)
+		return ERR_PTR(-ESTALE);
+	inode = iget(sb, ino);
+	if (inode == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (is_bad_inode(inode)
+	    || (generation && inode->i_generation != generation)
+		) {
+		/* we didn't find the right inode.. */
+		dprintk("fh_verify: Inode %lu, Bad count: %d %d or version  %u %u\n",
+			inode->i_ino,
+			inode->i_nlink, atomic_read(&inode->i_count),
+			inode->i_generation,
+			generation);
+
+		iput(inode);
+		return ERR_PTR(-ESTALE);
+	}
+	/* now to find a dentry.
+	 * If possible, get a well-connected one
+	 */
+	result = d_alloc_anon(inode);
+	if (!result) {
+		iput(inode);
+		return ERR_PTR(-ENOMEM);
+	}
+	return result;
+}
+
+
+static struct dentry *get_object(struct super_block *sb, void *vobjp)
+{
+	__u32 *objp = vobjp;
+	unsigned long ino = objp[0];
+	__u32 generation = objp[1];
+
+	return export_iget(sb, ino, generation);
+}
+
+
+/**
+ * export_encode_fh - default export_operations->encode_fh function
+ * @dentry:  the dentry to encode
+ * @fh:      where to store the file handle fragment
+ * @max_len: maximum length to store there
+ * @connectable: whether to store parent information
+ *
+ * This default encode_fh function assumes that the 32 inode number
+ * is suitable for locating an inode, and that the generation number
+ * can be used to check that it is still valid.  It places them in the
+ * filehandle fragment where export_decode_fh expects to find them.
+ */
+static int export_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+		   int connectable)
+{
+	struct inode * inode = dentry->d_inode;
+	int len = *max_len;
+	int type = 1;
+	
+	if (len < 2 || (connectable && len < 4))
+		return 255;
+
+	len = 2;
+	fh[0] = inode->i_ino;
+	fh[1] = inode->i_generation;
+	if (connectable && !S_ISDIR(inode->i_mode)) {
+		struct inode *parent;
+
+		spin_lock(&dentry->d_lock);
+		parent = dentry->d_parent->d_inode;
+		fh[2] = parent->i_ino;
+		fh[3] = parent->i_generation;
+		spin_unlock(&dentry->d_lock);
+		len = 4;
+		type = 2;
+	}
+	*max_len = len;
+	return type;
+}
+
+
+/**
+ * export_decode_fh - default export_operations->decode_fh function
+ * @sb:  The superblock
+ * @fh:  pointer to the file handle fragment
+ * @fh_len: length of file handle fragment
+ * @acceptable: function for testing acceptability of dentrys
+ * @context:   context for @acceptable
+ *
+ * This is the default decode_fh() function.
+ * a fileid_type of 1 indicates that the filehandlefragment
+ * just contains an object identifier understood by  get_dentry.
+ * a fileid_type of 2 says that there is also a directory
+ * identifier 8 bytes in to the filehandlefragement.
+ */
+static struct dentry *export_decode_fh(struct super_block *sb, __u32 *fh, int fh_len,
+			      int fileid_type,
+			 int (*acceptable)(void *context, struct dentry *de),
+			 void *context)
+{
+	__u32 parent[2];
+	parent[0] = parent[1] = 0;
+	if (fh_len < 2 || fileid_type > 2)
+		return NULL;
+	if (fileid_type == 2) {
+		if (fh_len > 2) parent[0] = fh[2];
+		if (fh_len > 3) parent[1] = fh[3];
+	}
+	return find_exported_dentry(sb, fh, parent,
+				   acceptable, context);
+}
+
+struct export_operations export_op_default = {
+	.decode_fh	= export_decode_fh,
+	.encode_fh	= export_encode_fh,
+
+	.get_name	= get_name,
+	.get_parent	= get_parent,
+	.get_dentry	= get_object,
+};
+
+EXPORT_SYMBOL(export_op_default);
+EXPORT_SYMBOL(find_exported_dentry);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/ext2/CHANGES b/fs/ext2/CHANGES
new file mode 100644
index 0000000..aa5aaf0
--- /dev/null
+++ b/fs/ext2/CHANGES
@@ -0,0 +1,157 @@
+Changes from version 0.5a to version 0.5b
+=========================================
+	- Now that we have sysctl(), the immutable flag cannot be changed when
+	  the system is running at security level > 0.
+	- Some cleanups in the code.
+	- More consistency checks on directories.
+	- The ext2.diff patch from Tom May <ftom@netcom.com> has been
+	  integrated.  This patch replaces expensive "/" and "%" with
+	  cheap ">>" and "&" where possible.
+
+Changes from version 0.5 to version 0.5a
+========================================
+	- Zero the partial block following the end of the file when a file
+	  is truncated.
+	- Dates updated in the copyright.
+	- More checks when the filesystem is mounted: the count of blocks,
+	  fragments, and inodes per group is checked against the block size.
+	- The buffers used by the error routines are now static variables, to
+	  avoid using space on the kernel stack, as requested by Linus.
+	- Some cleanups in the error messages (some versions of syslog contain
+	  a bug which truncates an error message if it contains '\n').
+	- Check that no data can be written to a file past the 2GB limit.
+	- The famous readdir() bug has been fixed by Stephen Tweedie.
+	- Added a revision level in the superblock.
+	- Full support for O_SYNC flag of the open system call.
+	- New mount options: `resuid=#uid' and `resgid=#gid'.  `resuid' causes
+	  ext2fs to consider user #uid like root for the reserved blocks.
+	  `resgid' acts the same way with group #gid.  New fields in the
+	  superblock contain default values for resuid and resgid and can
+	  be modified by tune2fs.
+	  Idea comes from Rene Cougnenc <cougnenc@renux.frmug.fr.net>.
+	- New mount options: `bsddf' and `minixdf'.  `bsddf' causes ext2fs
+	  to remove the blocks used for FS structures from the total block
+	  count in statfs.  With `minixdf', ext2fs mimics Minix behavior
+	  in statfs (i.e. it returns the total number of blocks on the
+	  partition).  This is intended to make bde happy :-)
+	- New file attributes:
+	  - Immutable files cannot be modified.  Data cannot be written to
+	    these files.  They cannot be removed, renamed and new links cannot
+	    be created.  Even root cannot modify the files.  He has to remove
+	    the immutable attribute first.
+	  - Append-only files: can only be written in append-mode when writing.
+	    They cannot be removed, renamed and new links cannot be created.
+	    Note: files may only be added to an append-only directory.
+	  - No-dump files: the attribute is not used by the kernel.  My port
+	    of dump uses it to avoid backing up files which are not important.
+	- New check in ext2_check_dir_entry: the inode number is checked.
+	- Support for big file systems: the copy of the FS descriptor is now
+	  dynamically allocated (previous versions used a fixed size array).
+	  This allows to mount 2GB+ FS.
+	- Reorganization of the ext2_inode structure to allow other operating
+	  systems to create specific fields if they use ext2fs as their native
+	  file system.  Currently, ext2fs is only implemented in Linux but
+	  will soon be part of Gnu Hurd and of Masix.
+
+Changes from version 0.4b to version 0.5
+========================================
+	- New superblock fields: s_lastcheck and s_checkinterval added
+	  by Uwe Ohse <uwe@tirka.gun.de> to implement timedependent checks
+	  of the file system
+	- Real random numbers for secure rm added by Pierre del Perugia
+	  <delperug@gla.ecoledoc.ibp.fr>
+	- The mount warnings related to the state of a fs are not printed
+	  if the fs is mounted read-only, idea by Nick Holloway
+	  <alfie@dcs.warwick.ac.uk>
+
+Changes from version 0.4a to version 0.4b
+=========================================
+	- Copyrights changed to include the name of my laboratory.
+	- Clean up of balloc.c and ialloc.c.
+	- More consistency checks.
+	- Block preallocation added by Stephen Tweedie.
+	- Direct reads of directories disallowed.
+	- Readahead implemented in readdir by Stephen Tweedie.
+	- Bugs in block and inodes allocation fixed.
+	- Readahead implemented in ext2_find_entry by Chip Salzenberg.
+	- New mount options:
+	  `check=none|normal|strict'
+	  `debug'
+	  `errors=continue|remount-ro|panic'
+	  `grpid', `bsdgroups'
+	  `nocheck'
+	  `nogrpid', `sysvgroups'
+	- truncate() now tries to deallocate contiguous blocks in a single call
+	  to ext2_free_blocks().
+	- lots of cosmetic changes.
+
+Changes from version 0.4 to version 0.4a
+========================================
+        - the `sync' option support is now complete.  Version 0.4 was not
+          supporting it when truncating a file.  I have tested the synchronous
+          writes and they work but they make the system very slow :-(  I have
+          to work again on this to make it faster.
+        - when detecting an error on a mounted filesystem, version 0.4 used
+          to try to write a flag in the super block even if the filesystem had
+          been mounted read-only.  This is fixed.
+        - the `sb=#' option now causes the kernel code to use the filesystem
+          descriptors located at block #+1.  Version 0.4 used the superblock
+          backup located at block # but used the main copy of the descriptors.
+        - a new file attribute `S' is supported.  This attribute causes
+          synchronous writes but is applied to a file not to the entire file
+          system (thanks to Michael Kraehe <kraehe@bakunin.north.de> for
+          suggesting it).
+        - the directory cache is inhibited by default.  The cache management
+          code seems to be buggy and I have to look at it carefully before
+          using it again.
+        - deleting a file with the `s' attribute (secure deletion) causes its
+          blocks to be overwritten with random values not with zeros (thanks to
+          Michael A. Griffith <grif@cs.ucr.edu> for suggesting it).
+        - lots of cosmetic changes have been made.
+
+Changes from version 0.3 to version 0.4
+=======================================
+        - Three new mount options are supported: `check', `sync' and `sb=#'.
+          `check' tells the kernel code to make more consistency checks
+          when the file system is mounted.  Currently, the kernel code checks
+          that the blocks and inodes bitmaps are consistent with the free
+          blocks and inodes counts.  More checks will be added in future
+          releases.
+          `sync' tells the kernel code to use synchronous writes when updating
+          an inode, a bitmap, a directory entry or an indirect block.  This
+          can make the file system much slower but can be a big win for files
+          recovery in case of a crash (and we can now say to the BSD folks
+          that Linux also supports synchronous updates :-).
+          `sb=#' tells the kernel code to use an alternate super block instead
+          of its master copy.  `#' is the number of the block (counted in
+          1024 bytes blocks) which contains the alternate super block.
+          An ext2 file system typically contains backups of the super block
+          at blocks 8193, 16385, and so on.
+        - I have change the meaning of the valid flag used by e2fsck.  it
+          now contains the state of the file system.  If the kernel code
+          detects an inconsistency while the file system is mounted, it flags
+          it as erroneous and e2fsck will detect that on next run.
+        - The super block now contains a mount counter.  This counter is
+          incremented each time the file system is mounted read/write.   When
+          this counter becomes bigger than a maximal mount counts (also stored
+          in the super block), e2fsck checks the file system, even if it had
+          been unmounted cleanly, and resets this counter to 0.
+        - File attributes are now supported.  One can associate a set of
+          attributes to a file.  Three attributes are defined:
+          `c': the file is marked for automatic compression,
+          `s': the file is marked for secure deletion: when the file is
+          deleted, its blocks are zeroed and written back to the disk,
+          `u': the file is marked for undeletion: when the file is deleted,
+          its contents are saved to allow a future undeletion.
+          Currently, only the `s' attribute is implemented in the kernel
+          code.  Support for the other attributes will be added in a future
+          release.
+        - a few bugs related to times updates have been fixed by Bruce
+          Evans and me.
+        - a bug related to the links count of deleted inodes has been fixed.
+          Previous versions used to keep the links count set to 1 when a file
+          was deleted.  The new version now sets links_count to 0 when deleting
+          the last link.
+        - a race condition when deallocating an inode has been fixed by
+          Stephen Tweedie.
+
diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile
new file mode 100644
index 0000000..ee240a1
--- /dev/null
+++ b/fs/ext2/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux ext2-filesystem routines.
+#
+
+obj-$(CONFIG_EXT2_FS) += ext2.o
+
+ext2-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+	  ioctl.o namei.o super.o symlink.o
+
+ext2-$(CONFIG_EXT2_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
+ext2-$(CONFIG_EXT2_FS_POSIX_ACL) += acl.o
+ext2-$(CONFIG_EXT2_FS_SECURITY)	 += xattr_security.o
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
new file mode 100644
index 0000000..8369ee8
--- /dev/null
+++ b/fs/ext2/acl.c
@@ -0,0 +1,518 @@
+/*
+ * linux/fs/ext2/acl.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *
+ext2_acl_from_disk(const void *value, size_t size)
+{
+	const char *end = (char *)value + size;
+	int n, count;
+	struct posix_acl *acl;
+
+	if (!value)
+		return NULL;
+	if (size < sizeof(ext2_acl_header))
+		 return ERR_PTR(-EINVAL);
+	if (((ext2_acl_header *)value)->a_version !=
+	    cpu_to_le32(EXT2_ACL_VERSION))
+		return ERR_PTR(-EINVAL);
+	value = (char *)value + sizeof(ext2_acl_header);
+	count = ext2_acl_count(size);
+	if (count < 0)
+		return ERR_PTR(-EINVAL);
+	if (count == 0)
+		return NULL;
+	acl = posix_acl_alloc(count, GFP_KERNEL);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+	for (n=0; n < count; n++) {
+		ext2_acl_entry *entry =
+			(ext2_acl_entry *)value;
+		if ((char *)value + sizeof(ext2_acl_entry_short) > end)
+			goto fail;
+		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
+		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				value = (char *)value +
+					sizeof(ext2_acl_entry_short);
+				acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				value = (char *)value + sizeof(ext2_acl_entry);
+				if ((char *)value > end)
+					goto fail;
+				acl->a_entries[n].e_id =
+					le32_to_cpu(entry->e_id);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	if (value != end)
+		goto fail;
+	return acl;
+
+fail:
+	posix_acl_release(acl);
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static void *
+ext2_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+	ext2_acl_header *ext_acl;
+	char *e;
+	size_t n;
+
+	*size = ext2_acl_size(acl->a_count);
+	ext_acl = (ext2_acl_header *)kmalloc(sizeof(ext2_acl_header) +
+		acl->a_count * sizeof(ext2_acl_entry), GFP_KERNEL);
+	if (!ext_acl)
+		return ERR_PTR(-ENOMEM);
+	ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION);
+	e = (char *)ext_acl + sizeof(ext2_acl_header);
+	for (n=0; n < acl->a_count; n++) {
+		ext2_acl_entry *entry = (ext2_acl_entry *)e;
+		entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+		entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER:
+			case ACL_GROUP:
+				entry->e_id =
+					cpu_to_le32(acl->a_entries[n].e_id);
+				e += sizeof(ext2_acl_entry);
+				break;
+
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				e += sizeof(ext2_acl_entry_short);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	return (char *)ext_acl;
+
+fail:
+	kfree(ext_acl);
+	return ERR_PTR(-EINVAL);
+}
+
+static inline struct posix_acl *
+ext2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
+{
+	struct posix_acl *acl = EXT2_ACL_NOT_CACHED;
+
+	spin_lock(&inode->i_lock);
+	if (*i_acl != EXT2_ACL_NOT_CACHED)
+		acl = posix_acl_dup(*i_acl);
+	spin_unlock(&inode->i_lock);
+
+	return acl;
+}
+
+static inline void
+ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
+		   struct posix_acl *acl)
+{
+	spin_lock(&inode->i_lock);
+	if (*i_acl != EXT2_ACL_NOT_CACHED)
+		posix_acl_release(*i_acl);
+	*i_acl = posix_acl_dup(acl);
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * inode->i_sem: don't care
+ */
+static struct posix_acl *
+ext2_get_acl(struct inode *inode, int type)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	int name_index;
+	char *value = NULL;
+	struct posix_acl *acl;
+	int retval;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return NULL;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			acl = ext2_iget_acl(inode, &ei->i_acl);
+			if (acl != EXT2_ACL_NOT_CACHED)
+				return acl;
+			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+			break;
+
+		case ACL_TYPE_DEFAULT:
+			acl = ext2_iget_acl(inode, &ei->i_default_acl);
+			if (acl != EXT2_ACL_NOT_CACHED)
+				return acl;
+			name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+			break;
+
+		default:
+			return ERR_PTR(-EINVAL);
+	}
+	retval = ext2_xattr_get(inode, name_index, "", NULL, 0);
+	if (retval > 0) {
+		value = kmalloc(retval, GFP_KERNEL);
+		if (!value)
+			return ERR_PTR(-ENOMEM);
+		retval = ext2_xattr_get(inode, name_index, "", value, retval);
+	}
+	if (retval > 0)
+		acl = ext2_acl_from_disk(value, retval);
+	else if (retval == -ENODATA || retval == -ENOSYS)
+		acl = NULL;
+	else
+		acl = ERR_PTR(retval);
+	if (value)
+		kfree(value);
+
+	if (!IS_ERR(acl)) {
+		switch(type) {
+			case ACL_TYPE_ACCESS:
+				ext2_iset_acl(inode, &ei->i_acl, acl);
+				break;
+
+			case ACL_TYPE_DEFAULT:
+				ext2_iset_acl(inode, &ei->i_default_acl, acl);
+				break;
+		}
+	}
+	return acl;
+}
+
+/*
+ * inode->i_sem: down
+ */
+static int
+ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	int name_index;
+	void *value = NULL;
+	size_t size;
+	int error;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+			if (acl) {
+				mode_t mode = inode->i_mode;
+				error = posix_acl_equiv_mode(acl, &mode);
+				if (error < 0)
+					return error;
+				else {
+					inode->i_mode = mode;
+					mark_inode_dirty(inode);
+					if (error == 0)
+						acl = NULL;
+				}
+			}
+			break;
+
+		case ACL_TYPE_DEFAULT:
+			name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+			if (!S_ISDIR(inode->i_mode))
+				return acl ? -EACCES : 0;
+			break;
+
+		default:
+			return -EINVAL;
+	}
+ 	if (acl) {
+		value = ext2_acl_to_disk(acl, &size);
+		if (IS_ERR(value))
+			return (int)PTR_ERR(value);
+	}
+
+	error = ext2_xattr_set(inode, name_index, "", value, size, 0);
+
+	if (value)
+		kfree(value);
+	if (!error) {
+		switch(type) {
+			case ACL_TYPE_ACCESS:
+				ext2_iset_acl(inode, &ei->i_acl, acl);
+				break;
+
+			case ACL_TYPE_DEFAULT:
+				ext2_iset_acl(inode, &ei->i_default_acl, acl);
+				break;
+		}
+	}
+	return error;
+}
+
+static int
+ext2_check_acl(struct inode *inode, int mask)
+{
+	struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
+
+	if (acl) {
+		int error = posix_acl_permission(inode, acl, mask);
+		posix_acl_release(acl);
+		return error;
+	}
+
+	return -EAGAIN;
+}
+
+int
+ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	return generic_permission(inode, mask, ext2_check_acl);
+}
+
+/*
+ * Initialize the ACLs of a new inode. Called from ext2_new_inode.
+ *
+ * dir->i_sem: down
+ * inode->i_sem: up (access to inode is still exclusive)
+ */
+int
+ext2_init_acl(struct inode *inode, struct inode *dir)
+{
+	struct posix_acl *acl = NULL;
+	int error = 0;
+
+	if (!S_ISLNK(inode->i_mode)) {
+		if (test_opt(dir->i_sb, POSIX_ACL)) {
+			acl = ext2_get_acl(dir, ACL_TYPE_DEFAULT);
+			if (IS_ERR(acl))
+				return PTR_ERR(acl);
+		}
+		if (!acl)
+			inode->i_mode &= ~current->fs->umask;
+	}
+	if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
+               struct posix_acl *clone;
+	       mode_t mode;
+
+		if (S_ISDIR(inode->i_mode)) {
+			error = ext2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
+			if (error)
+				goto cleanup;
+		}
+		clone = posix_acl_clone(acl, GFP_KERNEL);
+		error = -ENOMEM;
+		if (!clone)
+			goto cleanup;
+		mode = inode->i_mode;
+		error = posix_acl_create_masq(clone, &mode);
+		if (error >= 0) {
+			inode->i_mode = mode;
+			if (error > 0) {
+				/* This is an extended ACL */
+				error = ext2_set_acl(inode,
+						     ACL_TYPE_ACCESS, clone);
+			}
+		}
+		posix_acl_release(clone);
+	}
+cleanup:
+       posix_acl_release(acl);
+       return error;
+}
+
+/*
+ * Does chmod for an inode that may have an Access Control List. The
+ * inode->i_mode field must be updated to the desired value by the caller
+ * before calling this function.
+ * Returns 0 on success, or a negative error number.
+ *
+ * We change the ACL rather than storing some ACL entries in the file
+ * mode permission bits (which would be more efficient), because that
+ * would break once additional permissions (like  ACL_APPEND, ACL_DELETE
+ * for directories) are added. There are no more bits available in the
+ * file mode.
+ *
+ * inode->i_sem: down
+ */
+int
+ext2_acl_chmod(struct inode *inode)
+{
+	struct posix_acl *acl, *clone;
+        int error;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+	acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+	clone = posix_acl_clone(acl, GFP_KERNEL);
+	posix_acl_release(acl);
+	if (!clone)
+		return -ENOMEM;
+	error = posix_acl_chmod_masq(clone, inode->i_mode);
+	if (!error)
+		error = ext2_set_acl(inode, ACL_TYPE_ACCESS, clone);
+	posix_acl_release(clone);
+	return error;
+}
+
+/*
+ * Extended attribut handlers
+ */
+static size_t
+ext2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_size,
+			   const char *name, size_t name_len)
+{
+	const size_t size = sizeof(XATTR_NAME_ACL_ACCESS);
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	if (list && size <= list_size)
+		memcpy(list, XATTR_NAME_ACL_ACCESS, size);
+	return size;
+}
+
+static size_t
+ext2_xattr_list_acl_default(struct inode *inode, char *list, size_t list_size,
+			    const char *name, size_t name_len)
+{
+	const size_t size = sizeof(XATTR_NAME_ACL_DEFAULT);
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	if (list && size <= list_size)
+		memcpy(list, XATTR_NAME_ACL_DEFAULT, size);
+	return size;
+}
+
+static int
+ext2_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+{
+	struct posix_acl *acl;
+	int error;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return -EOPNOTSUPP;
+
+	acl = ext2_get_acl(inode, type);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl == NULL)
+		return -ENODATA;
+	error = posix_acl_to_xattr(acl, buffer, size);
+	posix_acl_release(acl);
+
+	return error;
+}
+
+static int
+ext2_xattr_get_acl_access(struct inode *inode, const char *name,
+			  void *buffer, size_t size)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext2_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int
+ext2_xattr_get_acl_default(struct inode *inode, const char *name,
+			   void *buffer, size_t size)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext2_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int
+ext2_xattr_set_acl(struct inode *inode, int type, const void *value,
+		   size_t size)
+{
+	struct posix_acl *acl;
+	int error;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return -EOPNOTSUPP;
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		return -EPERM;
+
+	if (value) {
+		acl = posix_acl_from_xattr(value, size);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		else if (acl) {
+			error = posix_acl_valid(acl);
+			if (error)
+				goto release_and_out;
+		}
+	} else
+		acl = NULL;
+
+	error = ext2_set_acl(inode, type, acl);
+
+release_and_out:
+	posix_acl_release(acl);
+	return error;
+}
+
+static int
+ext2_xattr_set_acl_access(struct inode *inode, const char *name,
+			  const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext2_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int
+ext2_xattr_set_acl_default(struct inode *inode, const char *name,
+			   const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext2_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+struct xattr_handler ext2_xattr_acl_access_handler = {
+	.prefix	= XATTR_NAME_ACL_ACCESS,
+	.list	= ext2_xattr_list_acl_access,
+	.get	= ext2_xattr_get_acl_access,
+	.set	= ext2_xattr_set_acl_access,
+};
+
+struct xattr_handler ext2_xattr_acl_default_handler = {
+	.prefix	= XATTR_NAME_ACL_DEFAULT,
+	.list	= ext2_xattr_list_acl_default,
+	.get	= ext2_xattr_get_acl_default,
+	.set	= ext2_xattr_set_acl_default,
+};
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
new file mode 100644
index 0000000..fed96ae
--- /dev/null
+++ b/fs/ext2/acl.h
@@ -0,0 +1,82 @@
+/*
+  File: fs/ext2/acl.h
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/xattr_acl.h>
+
+#define EXT2_ACL_VERSION	0x0001
+
+typedef struct {
+	__le16		e_tag;
+	__le16		e_perm;
+	__le32		e_id;
+} ext2_acl_entry;
+
+typedef struct {
+	__le16		e_tag;
+	__le16		e_perm;
+} ext2_acl_entry_short;
+
+typedef struct {
+	__le32		a_version;
+} ext2_acl_header;
+
+static inline size_t ext2_acl_size(int count)
+{
+	if (count <= 4) {
+		return sizeof(ext2_acl_header) +
+		       count * sizeof(ext2_acl_entry_short);
+	} else {
+		return sizeof(ext2_acl_header) +
+		       4 * sizeof(ext2_acl_entry_short) +
+		       (count - 4) * sizeof(ext2_acl_entry);
+	}
+}
+
+static inline int ext2_acl_count(size_t size)
+{
+	ssize_t s;
+	size -= sizeof(ext2_acl_header);
+	s = size - 4 * sizeof(ext2_acl_entry_short);
+	if (s < 0) {
+		if (size % sizeof(ext2_acl_entry_short))
+			return -1;
+		return size / sizeof(ext2_acl_entry_short);
+	} else {
+		if (s % sizeof(ext2_acl_entry))
+			return -1;
+		return s / sizeof(ext2_acl_entry) + 4;
+	}
+}
+
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+
+/* Value for inode->u.ext2_i.i_acl and inode->u.ext2_i.i_default_acl
+   if the ACL has not been cached */
+#define EXT2_ACL_NOT_CACHED ((void *)-1)
+
+/* acl.c */
+extern int ext2_permission (struct inode *, int, struct nameidata *);
+extern int ext2_acl_chmod (struct inode *);
+extern int ext2_init_acl (struct inode *, struct inode *);
+
+#else
+#include <linux/sched.h>
+#define ext2_permission NULL
+#define ext2_get_acl	NULL
+#define ext2_set_acl	NULL
+
+static inline int
+ext2_acl_chmod (struct inode *inode)
+{
+	return 0;
+}
+
+static inline int ext2_init_acl (struct inode *inode, struct inode *dir)
+{
+	return 0;
+}
+#endif
+
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
new file mode 100644
index 0000000..6591abe
--- /dev/null
+++ b/fs/ext2/balloc.c
@@ -0,0 +1,699 @@
+/*
+ *  linux/fs/ext2/balloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/config.h>
+#include "ext2.h"
+#include <linux/quotaops.h>
+#include <linux/sched.h>
+#include <linux/buffer_head.h>
+
+/*
+ * balloc.c contains the blocks allocation and deallocation routines
+ */
+
+/*
+ * The free blocks are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.  The descriptors are loaded in memory
+ * when a file system is mounted (see ext2_read_super).
+ */
+
+
+#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
+
+struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
+					     unsigned int block_group,
+					     struct buffer_head ** bh)
+{
+	unsigned long group_desc;
+	unsigned long offset;
+	struct ext2_group_desc * desc;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+	if (block_group >= sbi->s_groups_count) {
+		ext2_error (sb, "ext2_get_group_desc",
+			    "block_group >= groups_count - "
+			    "block_group = %d, groups_count = %lu",
+			    block_group, sbi->s_groups_count);
+
+		return NULL;
+	}
+
+	group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb);
+	offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1);
+	if (!sbi->s_group_desc[group_desc]) {
+		ext2_error (sb, "ext2_get_group_desc",
+			    "Group descriptor not loaded - "
+			    "block_group = %d, group_desc = %lu, desc = %lu",
+			     block_group, group_desc, offset);
+		return NULL;
+	}
+
+	desc = (struct ext2_group_desc *) sbi->s_group_desc[group_desc]->b_data;
+	if (bh)
+		*bh = sbi->s_group_desc[group_desc];
+	return desc + offset;
+}
+
+/*
+ * Read the bitmap for a given block_group, reading into the specified 
+ * slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head on success or NULL in case of failure.
+ */
+static struct buffer_head *
+read_block_bitmap(struct super_block *sb, unsigned int block_group)
+{
+	struct ext2_group_desc * desc;
+	struct buffer_head * bh = NULL;
+	
+	desc = ext2_get_group_desc (sb, block_group, NULL);
+	if (!desc)
+		goto error_out;
+	bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
+	if (!bh)
+		ext2_error (sb, "read_block_bitmap",
+			    "Cannot read block bitmap - "
+			    "block_group = %d, block_bitmap = %u",
+			    block_group, le32_to_cpu(desc->bg_block_bitmap));
+error_out:
+	return bh;
+}
+
+/*
+ * Set sb->s_dirt here because the superblock was "logically" altered.  We
+ * need to recalculate its free blocks count and flush it out.
+ */
+static int reserve_blocks(struct super_block *sb, int count)
+{
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
+	unsigned free_blocks;
+	unsigned root_blocks;
+
+	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+	root_blocks = le32_to_cpu(es->s_r_blocks_count);
+
+	if (free_blocks < count)
+		count = free_blocks;
+
+	if (free_blocks < root_blocks + count && !capable(CAP_SYS_RESOURCE) &&
+	    sbi->s_resuid != current->fsuid &&
+	    (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+		/*
+		 * We are too close to reserve and we are not privileged.
+		 * Can we allocate anything at all?
+		 */
+		if (free_blocks > root_blocks)
+			count = free_blocks - root_blocks;
+		else
+			return 0;
+	}
+
+	percpu_counter_mod(&sbi->s_freeblocks_counter, -count);
+	sb->s_dirt = 1;
+	return count;
+}
+
+static void release_blocks(struct super_block *sb, int count)
+{
+	if (count) {
+		struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+		percpu_counter_mod(&sbi->s_freeblocks_counter, count);
+		sb->s_dirt = 1;
+	}
+}
+
+static int group_reserve_blocks(struct ext2_sb_info *sbi, int group_no,
+	struct ext2_group_desc *desc, struct buffer_head *bh, int count)
+{
+	unsigned free_blocks;
+
+	if (!desc->bg_free_blocks_count)
+		return 0;
+
+	spin_lock(sb_bgl_lock(sbi, group_no));
+	free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
+	if (free_blocks < count)
+		count = free_blocks;
+	desc->bg_free_blocks_count = cpu_to_le16(free_blocks - count);
+	spin_unlock(sb_bgl_lock(sbi, group_no));
+	mark_buffer_dirty(bh);
+	return count;
+}
+
+static void group_release_blocks(struct super_block *sb, int group_no,
+	struct ext2_group_desc *desc, struct buffer_head *bh, int count)
+{
+	if (count) {
+		struct ext2_sb_info *sbi = EXT2_SB(sb);
+		unsigned free_blocks;
+
+		spin_lock(sb_bgl_lock(sbi, group_no));
+		free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
+		desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
+		spin_unlock(sb_bgl_lock(sbi, group_no));
+		sb->s_dirt = 1;
+		mark_buffer_dirty(bh);
+	}
+}
+
+/* Free given blocks, update quota and i_blocks field */
+void ext2_free_blocks (struct inode * inode, unsigned long block,
+		       unsigned long count)
+{
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head * bh2;
+	unsigned long block_group;
+	unsigned long bit;
+	unsigned long i;
+	unsigned long overflow;
+	struct super_block * sb = inode->i_sb;
+	struct ext2_sb_info * sbi = EXT2_SB(sb);
+	struct ext2_group_desc * desc;
+	struct ext2_super_block * es = sbi->s_es;
+	unsigned freed = 0, group_freed;
+
+	if (block < le32_to_cpu(es->s_first_data_block) ||
+	    block + count < block ||
+	    block + count > le32_to_cpu(es->s_blocks_count)) {
+		ext2_error (sb, "ext2_free_blocks",
+			    "Freeing blocks not in datazone - "
+			    "block = %lu, count = %lu", block, count);
+		goto error_return;
+	}
+
+	ext2_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
+
+do_more:
+	overflow = 0;
+	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
+		      EXT2_BLOCKS_PER_GROUP(sb);
+	bit = (block - le32_to_cpu(es->s_first_data_block)) %
+		      EXT2_BLOCKS_PER_GROUP(sb);
+	/*
+	 * Check to see if we are freeing blocks across a group
+	 * boundary.
+	 */
+	if (bit + count > EXT2_BLOCKS_PER_GROUP(sb)) {
+		overflow = bit + count - EXT2_BLOCKS_PER_GROUP(sb);
+		count -= overflow;
+	}
+	brelse(bitmap_bh);
+	bitmap_bh = read_block_bitmap(sb, block_group);
+	if (!bitmap_bh)
+		goto error_return;
+
+	desc = ext2_get_group_desc (sb, block_group, &bh2);
+	if (!desc)
+		goto error_return;
+
+	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
+	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
+	    in_range (block, le32_to_cpu(desc->bg_inode_table),
+		      sbi->s_itb_per_group) ||
+	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
+		      sbi->s_itb_per_group))
+		ext2_error (sb, "ext2_free_blocks",
+			    "Freeing blocks in system zones - "
+			    "Block = %lu, count = %lu",
+			    block, count);
+
+	for (i = 0, group_freed = 0; i < count; i++) {
+		if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+						bit + i, bitmap_bh->b_data)) {
+			ext2_error(sb, __FUNCTION__,
+				"bit already cleared for block %lu", block + i);
+		} else {
+			group_freed++;
+		}
+	}
+
+	mark_buffer_dirty(bitmap_bh);
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		sync_dirty_buffer(bitmap_bh);
+
+	group_release_blocks(sb, block_group, desc, bh2, group_freed);
+	freed += group_freed;
+
+	if (overflow) {
+		block += count;
+		count = overflow;
+		goto do_more;
+	}
+error_return:
+	brelse(bitmap_bh);
+	release_blocks(sb, freed);
+	DQUOT_FREE_BLOCK(inode, freed);
+}
+
+static int grab_block(spinlock_t *lock, char *map, unsigned size, int goal)
+{
+	int k;
+	char *p, *r;
+
+	if (!ext2_test_bit(goal, map))
+		goto got_it;
+
+repeat:
+	if (goal) {
+		/*
+		 * The goal was occupied; search forward for a free 
+		 * block within the next XX blocks.
+		 *
+		 * end_goal is more or less random, but it has to be
+		 * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the
+		 * next 64-bit boundary is simple..
+		 */
+		k = (goal + 63) & ~63;
+		goal = ext2_find_next_zero_bit(map, k, goal);
+		if (goal < k)
+			goto got_it;
+		/*
+		 * Search in the remainder of the current group.
+		 */
+	}
+
+	p = map + (goal >> 3);
+	r = memscan(p, 0, (size - goal + 7) >> 3);
+	k = (r - map) << 3;
+	if (k < size) {
+		/* 
+		 * We have succeeded in finding a free byte in the block
+		 * bitmap.  Now search backwards to find the start of this
+		 * group of free blocks - won't take more than 7 iterations.
+		 */
+		for (goal = k; goal && !ext2_test_bit (goal - 1, map); goal--)
+			;
+		goto got_it;
+	}
+
+	k = ext2_find_next_zero_bit ((u32 *)map, size, goal);
+	if (k < size) {
+		goal = k;
+		goto got_it;
+	}
+	return -1;
+got_it:
+	if (ext2_set_bit_atomic(lock, goal, (void *) map)) 
+		goto repeat;	
+	return goal;
+}
+
+/*
+ * ext2_new_block uses a goal block to assist allocation.  If the goal is
+ * free, or there is a free block within 32 blocks of the goal, that block
+ * is allocated.  Otherwise a forward search is made for a free block; within 
+ * each block group the search first looks for an entire free byte in the block
+ * bitmap, and then for any free bit if that fails.
+ * This function also updates quota and i_blocks field.
+ */
+int ext2_new_block(struct inode *inode, unsigned long goal,
+			u32 *prealloc_count, u32 *prealloc_block, int *err)
+{
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *gdp_bh;	/* bh2 */
+	struct ext2_group_desc *desc;
+	int group_no;			/* i */
+	int ret_block;			/* j */
+	int group_idx;			/* k */
+	int target_block;		/* tmp */
+	int block = 0;
+	struct super_block *sb = inode->i_sb;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
+	unsigned group_size = EXT2_BLOCKS_PER_GROUP(sb);
+	unsigned prealloc_goal = es->s_prealloc_blocks;
+	unsigned group_alloc = 0, es_alloc, dq_alloc;
+	int nr_scanned_groups;
+
+	if (!prealloc_goal--)
+		prealloc_goal = EXT2_DEFAULT_PREALLOC_BLOCKS - 1;
+	if (!prealloc_count || *prealloc_count)
+		prealloc_goal = 0;
+
+	if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+		*err = -EDQUOT;
+		goto out;
+	}
+
+	while (prealloc_goal && DQUOT_PREALLOC_BLOCK(inode, prealloc_goal))
+		prealloc_goal--;
+
+	dq_alloc = prealloc_goal + 1;
+	es_alloc = reserve_blocks(sb, dq_alloc);
+	if (!es_alloc) {
+		*err = -ENOSPC;
+		goto out_dquot;
+	}
+
+	ext2_debug ("goal=%lu.\n", goal);
+
+	if (goal < le32_to_cpu(es->s_first_data_block) ||
+	    goal >= le32_to_cpu(es->s_blocks_count))
+		goal = le32_to_cpu(es->s_first_data_block);
+	group_no = (goal - le32_to_cpu(es->s_first_data_block)) / group_size;
+	desc = ext2_get_group_desc (sb, group_no, &gdp_bh);
+	if (!desc) {
+		/*
+		 * gdp_bh may still be uninitialised.  But group_release_blocks
+		 * will not touch it because group_alloc is zero.
+		 */
+		goto io_error;
+	}
+
+	group_alloc = group_reserve_blocks(sbi, group_no, desc,
+					gdp_bh, es_alloc);
+	if (group_alloc) {
+		ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) %
+					group_size);
+		brelse(bitmap_bh);
+		bitmap_bh = read_block_bitmap(sb, group_no);
+		if (!bitmap_bh)
+			goto io_error;
+		
+		ext2_debug("goal is at %d:%d.\n", group_no, ret_block);
+
+		ret_block = grab_block(sb_bgl_lock(sbi, group_no),
+				bitmap_bh->b_data, group_size, ret_block);
+		if (ret_block >= 0)
+			goto got_block;
+		group_release_blocks(sb, group_no, desc, gdp_bh, group_alloc);
+		group_alloc = 0;
+	}
+
+	ext2_debug ("Bit not found in block group %d.\n", group_no);
+
+	/*
+	 * Now search the rest of the groups.  We assume that 
+	 * i and desc correctly point to the last group visited.
+	 */
+	nr_scanned_groups = 0;
+retry:
+	for (group_idx = 0; !group_alloc &&
+			group_idx < sbi->s_groups_count; group_idx++) {
+		group_no++;
+		if (group_no >= sbi->s_groups_count)
+			group_no = 0;
+		desc = ext2_get_group_desc(sb, group_no, &gdp_bh);
+		if (!desc)
+			goto io_error;
+		group_alloc = group_reserve_blocks(sbi, group_no, desc,
+						gdp_bh, es_alloc);
+	}
+	if (!group_alloc) {
+		*err = -ENOSPC;
+		goto out_release;
+	}
+	brelse(bitmap_bh);
+	bitmap_bh = read_block_bitmap(sb, group_no);
+	if (!bitmap_bh)
+		goto io_error;
+
+	ret_block = grab_block(sb_bgl_lock(sbi, group_no), bitmap_bh->b_data,
+				group_size, 0);
+	if (ret_block < 0) {
+		/*
+		 * If a free block counter is corrupted we can loop inifintely.
+		 * Detect that here.
+		 */
+		nr_scanned_groups++;
+		if (nr_scanned_groups > 2 * sbi->s_groups_count) {
+			ext2_error(sb, "ext2_new_block",
+				"corrupted free blocks counters");
+			goto io_error;
+		}
+		/*
+		 * Someone else grabbed the last free block in this blockgroup
+		 * before us.  Retry the scan.
+		 */
+		group_release_blocks(sb, group_no, desc, gdp_bh, group_alloc);
+		group_alloc = 0;
+		goto retry;
+	}
+
+got_block:
+	ext2_debug("using block group %d(%d)\n",
+		group_no, desc->bg_free_blocks_count);
+
+	target_block = ret_block + group_no * group_size +
+			le32_to_cpu(es->s_first_data_block);
+
+	if (target_block == le32_to_cpu(desc->bg_block_bitmap) ||
+	    target_block == le32_to_cpu(desc->bg_inode_bitmap) ||
+	    in_range(target_block, le32_to_cpu(desc->bg_inode_table),
+		      sbi->s_itb_per_group))
+		ext2_error (sb, "ext2_new_block",
+			    "Allocating block in system zone - "
+			    "block = %u", target_block);
+
+	if (target_block >= le32_to_cpu(es->s_blocks_count)) {
+		ext2_error (sb, "ext2_new_block",
+			    "block(%d) >= blocks count(%d) - "
+			    "block_group = %d, es == %p ", ret_block,
+			le32_to_cpu(es->s_blocks_count), group_no, es);
+		goto io_error;
+	}
+	block = target_block;
+
+	/* OK, we _had_ allocated something */
+	ext2_debug("found bit %d\n", ret_block);
+
+	dq_alloc--;
+	es_alloc--;
+	group_alloc--;
+
+	/*
+	 * Do block preallocation now if required.
+	 */
+	write_lock(&EXT2_I(inode)->i_meta_lock);
+	if (group_alloc && !*prealloc_count) {
+		unsigned n;
+
+		for (n = 0; n < group_alloc && ++ret_block < group_size; n++) {
+			if (ext2_set_bit_atomic(sb_bgl_lock(sbi, group_no),
+						ret_block,
+						(void*) bitmap_bh->b_data))
+ 				break;
+		}
+		*prealloc_block = block + 1;
+		*prealloc_count = n;
+		es_alloc -= n;
+		dq_alloc -= n;
+		group_alloc -= n;
+	}
+	write_unlock(&EXT2_I(inode)->i_meta_lock);
+
+	mark_buffer_dirty(bitmap_bh);
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		sync_dirty_buffer(bitmap_bh);
+
+	ext2_debug ("allocating block %d. ", block);
+
+	*err = 0;
+out_release:
+	group_release_blocks(sb, group_no, desc, gdp_bh, group_alloc);
+	release_blocks(sb, es_alloc);
+out_dquot:
+	DQUOT_FREE_BLOCK(inode, dq_alloc);
+out:
+	brelse(bitmap_bh);
+	return block;
+
+io_error:
+	*err = -EIO;
+	goto out_release;
+}
+
+unsigned long ext2_count_free_blocks (struct super_block * sb)
+{
+	struct ext2_group_desc * desc;
+	unsigned long desc_count = 0;
+	int i;
+#ifdef EXT2FS_DEBUG
+	unsigned long bitmap_count, x;
+	struct ext2_super_block *es;
+
+	lock_super (sb);
+	es = EXT2_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	desc = NULL;
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		struct buffer_head *bitmap_bh;
+		desc = ext2_get_group_desc (sb, i, NULL);
+		if (!desc)
+			continue;
+		desc_count += le16_to_cpu(desc->bg_free_blocks_count);
+		bitmap_bh = read_block_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+		
+		x = ext2_count_free(bitmap_bh, sb->s_blocksize);
+		printk ("group %d: stored = %d, counted = %lu\n",
+			i, le16_to_cpu(desc->bg_free_blocks_count), x);
+		bitmap_count += x;
+		brelse(bitmap_bh);
+	}
+	printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n",
+		(long)le32_to_cpu(es->s_free_blocks_count),
+		desc_count, bitmap_count);
+	unlock_super (sb);
+	return bitmap_count;
+#else
+        for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+                desc = ext2_get_group_desc (sb, i, NULL);
+                if (!desc)
+                        continue;
+                desc_count += le16_to_cpu(desc->bg_free_blocks_count);
+	}
+	return desc_count;
+#endif
+}
+
+static inline int
+block_in_use(unsigned long block, struct super_block *sb, unsigned char *map)
+{
+	return ext2_test_bit ((block -
+		le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block)) %
+			 EXT2_BLOCKS_PER_GROUP(sb), map);
+}
+
+static inline int test_root(int a, int b)
+{
+	int num = b;
+
+	while (a > num)
+		num *= b;
+	return num == a;
+}
+
+static int ext2_group_sparse(int group)
+{
+	if (group <= 1)
+		return 1;
+	return (test_root(group, 3) || test_root(group, 5) ||
+		test_root(group, 7));
+}
+
+/**
+ *	ext2_bg_has_super - number of blocks used by the superblock in group
+ *	@sb: superblock for filesystem
+ *	@group: group number to check
+ *
+ *	Return the number of blocks used by the superblock (primary or backup)
+ *	in this group.  Currently this will be only 0 or 1.
+ */
+int ext2_bg_has_super(struct super_block *sb, int group)
+{
+	if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
+	    !ext2_group_sparse(group))
+		return 0;
+	return 1;
+}
+
+/**
+ *	ext2_bg_num_gdb - number of blocks used by the group table in group
+ *	@sb: superblock for filesystem
+ *	@group: group number to check
+ *
+ *	Return the number of blocks used by the group descriptor table
+ *	(primary or backup) in this group.  In the future there may be a
+ *	different number of descriptor blocks in each group.
+ */
+unsigned long ext2_bg_num_gdb(struct super_block *sb, int group)
+{
+	if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
+	    !ext2_group_sparse(group))
+		return 0;
+	return EXT2_SB(sb)->s_gdb_count;
+}
+
+#ifdef CONFIG_EXT2_CHECK
+/* Called at mount-time, super-block is locked */
+void ext2_check_blocks_bitmap (struct super_block * sb)
+{
+	struct buffer_head *bitmap_bh = NULL;
+	struct ext2_super_block * es;
+	unsigned long desc_count, bitmap_count, x, j;
+	unsigned long desc_blocks;
+	struct ext2_group_desc * desc;
+	int i;
+
+	es = EXT2_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	desc = NULL;
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		desc = ext2_get_group_desc (sb, i, NULL);
+		if (!desc)
+			continue;
+		desc_count += le16_to_cpu(desc->bg_free_blocks_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_block_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+
+		if (ext2_bg_has_super(sb, i) &&
+				!ext2_test_bit(0, bitmap_bh->b_data))
+			ext2_error(sb, __FUNCTION__,
+				   "Superblock in group %d is marked free", i);
+
+		desc_blocks = ext2_bg_num_gdb(sb, i);
+		for (j = 0; j < desc_blocks; j++)
+			if (!ext2_test_bit(j + 1, bitmap_bh->b_data))
+				ext2_error(sb, __FUNCTION__,
+					   "Descriptor block #%ld in group "
+					   "%d is marked free", j, i);
+
+		if (!block_in_use(le32_to_cpu(desc->bg_block_bitmap),
+					sb, bitmap_bh->b_data))
+			ext2_error(sb, "ext2_check_blocks_bitmap",
+				    "Block bitmap for group %d is marked free",
+				    i);
+
+		if (!block_in_use(le32_to_cpu(desc->bg_inode_bitmap),
+					sb, bitmap_bh->b_data))
+			ext2_error(sb, "ext2_check_blocks_bitmap",
+				    "Inode bitmap for group %d is marked free",
+				    i);
+
+		for (j = 0; j < EXT2_SB(sb)->s_itb_per_group; j++)
+			if (!block_in_use(le32_to_cpu(desc->bg_inode_table) + j,
+						sb, bitmap_bh->b_data))
+				ext2_error (sb, "ext2_check_blocks_bitmap",
+					    "Block #%ld of the inode table in "
+					    "group %d is marked free", j, i);
+
+		x = ext2_count_free(bitmap_bh, sb->s_blocksize);
+		if (le16_to_cpu(desc->bg_free_blocks_count) != x)
+			ext2_error (sb, "ext2_check_blocks_bitmap",
+				    "Wrong free blocks count for group %d, "
+				    "stored = %d, counted = %lu", i,
+				    le16_to_cpu(desc->bg_free_blocks_count), x);
+		bitmap_count += x;
+	}
+	if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
+		ext2_error (sb, "ext2_check_blocks_bitmap",
+			"Wrong free blocks count in super block, "
+			"stored = %lu, counted = %lu",
+			(unsigned long)le32_to_cpu(es->s_free_blocks_count),
+			bitmap_count);
+	brelse(bitmap_bh);
+}
+#endif
diff --git a/fs/ext2/bitmap.c b/fs/ext2/bitmap.c
new file mode 100644
index 0000000..20145b7
--- /dev/null
+++ b/fs/ext2/bitmap.c
@@ -0,0 +1,25 @@
+/*
+ *  linux/fs/ext2/bitmap.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include <linux/buffer_head.h>
+
+static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
+{
+	unsigned int i;
+	unsigned long sum = 0;
+	
+	if (!map) 
+		return (0);
+	for (i = 0; i < numchars; i++)
+		sum += nibblemap[map->b_data[i] & 0xf] +
+			nibblemap[(map->b_data[i] >> 4) & 0xf];
+	return (sum);
+}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
new file mode 100644
index 0000000..5b5f528
--- /dev/null
+++ b/fs/ext2/dir.c
@@ -0,0 +1,673 @@
+/*
+ *  linux/fs/ext2/dir.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/dir.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 directory handling functions
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *
+ * All code that works with directory layout had been switched to pagecache
+ * and moved here. AV
+ */
+
+#include "ext2.h"
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+typedef struct ext2_dir_entry_2 ext2_dirent;
+
+/*
+ * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
+ * more robust, but we have what we have
+ */
+static inline unsigned ext2_chunk_size(struct inode *inode)
+{
+	return inode->i_sb->s_blocksize;
+}
+
+static inline void ext2_put_page(struct page *page)
+{
+	kunmap(page);
+	page_cache_release(page);
+}
+
+static inline unsigned long dir_pages(struct inode *inode)
+{
+	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
+}
+
+/*
+ * Return the offset into page `page_nr' of the last valid
+ * byte in that page, plus one.
+ */
+static unsigned
+ext2_last_byte(struct inode *inode, unsigned long page_nr)
+{
+	unsigned last_byte = inode->i_size;
+
+	last_byte -= page_nr << PAGE_CACHE_SHIFT;
+	if (last_byte > PAGE_CACHE_SIZE)
+		last_byte = PAGE_CACHE_SIZE;
+	return last_byte;
+}
+
+static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
+{
+	struct inode *dir = page->mapping->host;
+	int err = 0;
+	dir->i_version++;
+	page->mapping->a_ops->commit_write(NULL, page, from, to);
+	if (IS_DIRSYNC(dir))
+		err = write_one_page(page, 1);
+	else
+		unlock_page(page);
+	return err;
+}
+
+static void ext2_check_page(struct page *page)
+{
+	struct inode *dir = page->mapping->host;
+	struct super_block *sb = dir->i_sb;
+	unsigned chunk_size = ext2_chunk_size(dir);
+	char *kaddr = page_address(page);
+	u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
+	unsigned offs, rec_len;
+	unsigned limit = PAGE_CACHE_SIZE;
+	ext2_dirent *p;
+	char *error;
+
+	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
+		limit = dir->i_size & ~PAGE_CACHE_MASK;
+		if (limit & (chunk_size - 1))
+			goto Ebadsize;
+		if (!limit)
+			goto out;
+	}
+	for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
+		p = (ext2_dirent *)(kaddr + offs);
+		rec_len = le16_to_cpu(p->rec_len);
+
+		if (rec_len < EXT2_DIR_REC_LEN(1))
+			goto Eshort;
+		if (rec_len & 3)
+			goto Ealign;
+		if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+			goto Enamelen;
+		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+			goto Espan;
+		if (le32_to_cpu(p->inode) > max_inumber)
+			goto Einumber;
+	}
+	if (offs != limit)
+		goto Eend;
+out:
+	SetPageChecked(page);
+	return;
+
+	/* Too bad, we had an error */
+
+Ebadsize:
+	ext2_error(sb, "ext2_check_page",
+		"size of directory #%lu is not a multiple of chunk size",
+		dir->i_ino
+	);
+	goto fail;
+Eshort:
+	error = "rec_len is smaller than minimal";
+	goto bad_entry;
+Ealign:
+	error = "unaligned directory entry";
+	goto bad_entry;
+Enamelen:
+	error = "rec_len is too small for name_len";
+	goto bad_entry;
+Espan:
+	error = "directory entry across blocks";
+	goto bad_entry;
+Einumber:
+	error = "inode out of bounds";
+bad_entry:
+	ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - "
+		"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+		dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		(unsigned long) le32_to_cpu(p->inode),
+		rec_len, p->name_len);
+	goto fail;
+Eend:
+	p = (ext2_dirent *)(kaddr + offs);
+	ext2_error (sb, "ext2_check_page",
+		"entry in directory #%lu spans the page boundary"
+		"offset=%lu, inode=%lu",
+		dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		(unsigned long) le32_to_cpu(p->inode));
+fail:
+	SetPageChecked(page);
+	SetPageError(page);
+}
+
+static struct page * ext2_get_page(struct inode *dir, unsigned long n)
+{
+	struct address_space *mapping = dir->i_mapping;
+	struct page *page = read_cache_page(mapping, n,
+				(filler_t*)mapping->a_ops->readpage, NULL);
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		kmap(page);
+		if (!PageUptodate(page))
+			goto fail;
+		if (!PageChecked(page))
+			ext2_check_page(page);
+		if (PageError(page))
+			goto fail;
+	}
+	return page;
+
+fail:
+	ext2_put_page(page);
+	return ERR_PTR(-EIO);
+}
+
+/*
+ * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
+ *
+ * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
+ */
+static inline int ext2_match (int len, const char * const name,
+					struct ext2_dir_entry_2 * de)
+{
+	if (len != de->name_len)
+		return 0;
+	if (!de->inode)
+		return 0;
+	return !memcmp(name, de->name, len);
+}
+
+/*
+ * p is at least 6 bytes before the end of page
+ */
+static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
+{
+	return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
+}
+
+static inline unsigned 
+ext2_validate_entry(char *base, unsigned offset, unsigned mask)
+{
+	ext2_dirent *de = (ext2_dirent*)(base + offset);
+	ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
+	while ((char*)p < (char*)de) {
+		if (p->rec_len == 0)
+			break;
+		p = ext2_next_entry(p);
+	}
+	return (char *)p - base;
+}
+
+static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
+	[EXT2_FT_UNKNOWN]	= DT_UNKNOWN,
+	[EXT2_FT_REG_FILE]	= DT_REG,
+	[EXT2_FT_DIR]		= DT_DIR,
+	[EXT2_FT_CHRDEV]	= DT_CHR,
+	[EXT2_FT_BLKDEV]	= DT_BLK,
+	[EXT2_FT_FIFO]		= DT_FIFO,
+	[EXT2_FT_SOCK]		= DT_SOCK,
+	[EXT2_FT_SYMLINK]	= DT_LNK,
+};
+
+#define S_SHIFT 12
+static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
+	[S_IFREG >> S_SHIFT]	= EXT2_FT_REG_FILE,
+	[S_IFDIR >> S_SHIFT]	= EXT2_FT_DIR,
+	[S_IFCHR >> S_SHIFT]	= EXT2_FT_CHRDEV,
+	[S_IFBLK >> S_SHIFT]	= EXT2_FT_BLKDEV,
+	[S_IFIFO >> S_SHIFT]	= EXT2_FT_FIFO,
+	[S_IFSOCK >> S_SHIFT]	= EXT2_FT_SOCK,
+	[S_IFLNK >> S_SHIFT]	= EXT2_FT_SYMLINK,
+};
+
+static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
+{
+	mode_t mode = inode->i_mode;
+	if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
+		de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+	else
+		de->file_type = 0;
+}
+
+static int
+ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+	loff_t pos = filp->f_pos;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	unsigned int offset = pos & ~PAGE_CACHE_MASK;
+	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned long npages = dir_pages(inode);
+	unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
+	unsigned char *types = NULL;
+	int need_revalidate = (filp->f_version != inode->i_version);
+	int ret;
+
+	if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
+		goto success;
+
+	if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
+		types = ext2_filetype_table;
+
+	for ( ; n < npages; n++, offset = 0) {
+		char *kaddr, *limit;
+		ext2_dirent *de;
+		struct page *page = ext2_get_page(inode, n);
+
+		if (IS_ERR(page)) {
+			ext2_error(sb, __FUNCTION__,
+				   "bad page in #%lu",
+				   inode->i_ino);
+			filp->f_pos += PAGE_CACHE_SIZE - offset;
+			ret = -EIO;
+			goto done;
+		}
+		kaddr = page_address(page);
+		if (need_revalidate) {
+			offset = ext2_validate_entry(kaddr, offset, chunk_mask);
+			need_revalidate = 0;
+		}
+		de = (ext2_dirent *)(kaddr+offset);
+		limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
+		for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
+			if (de->rec_len == 0) {
+				ext2_error(sb, __FUNCTION__,
+					"zero-length directory entry");
+				ret = -EIO;
+				ext2_put_page(page);
+				goto done;
+			}
+			if (de->inode) {
+				int over;
+				unsigned char d_type = DT_UNKNOWN;
+
+				if (types && de->file_type < EXT2_FT_MAX)
+					d_type = types[de->file_type];
+
+				offset = (char *)de - kaddr;
+				over = filldir(dirent, de->name, de->name_len,
+						(n<<PAGE_CACHE_SHIFT) | offset,
+						le32_to_cpu(de->inode), d_type);
+				if (over) {
+					ext2_put_page(page);
+					goto success;
+				}
+			}
+			filp->f_pos += le16_to_cpu(de->rec_len);
+		}
+		ext2_put_page(page);
+	}
+
+success:
+	ret = 0;
+done:
+	filp->f_version = inode->i_version;
+	return ret;
+}
+
+/*
+ *	ext2_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the page in which the entry was found, and the entry itself
+ * (as a parameter - res_dir). Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
+			struct dentry *dentry, struct page ** res_page)
+{
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	unsigned reclen = EXT2_DIR_REC_LEN(namelen);
+	unsigned long start, n;
+	unsigned long npages = dir_pages(dir);
+	struct page *page = NULL;
+	struct ext2_inode_info *ei = EXT2_I(dir);
+	ext2_dirent * de;
+
+	if (npages == 0)
+		goto out;
+
+	/* OFFSET_CACHE */
+	*res_page = NULL;
+
+	start = ei->i_dir_start_lookup;
+	if (start >= npages)
+		start = 0;
+	n = start;
+	do {
+		char *kaddr;
+		page = ext2_get_page(dir, n);
+		if (!IS_ERR(page)) {
+			kaddr = page_address(page);
+			de = (ext2_dirent *) kaddr;
+			kaddr += ext2_last_byte(dir, n) - reclen;
+			while ((char *) de <= kaddr) {
+				if (de->rec_len == 0) {
+					ext2_error(dir->i_sb, __FUNCTION__,
+						"zero-length directory entry");
+					ext2_put_page(page);
+					goto out;
+				}
+				if (ext2_match (namelen, name, de))
+					goto found;
+				de = ext2_next_entry(de);
+			}
+			ext2_put_page(page);
+		}
+		if (++n >= npages)
+			n = 0;
+	} while (n != start);
+out:
+	return NULL;
+
+found:
+	*res_page = page;
+	ei->i_dir_start_lookup = n;
+	return de;
+}
+
+struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
+{
+	struct page *page = ext2_get_page(dir, 0);
+	ext2_dirent *de = NULL;
+
+	if (!IS_ERR(page)) {
+		de = ext2_next_entry((ext2_dirent *) page_address(page));
+		*p = page;
+	}
+	return de;
+}
+
+ino_t ext2_inode_by_name(struct inode * dir, struct dentry *dentry)
+{
+	ino_t res = 0;
+	struct ext2_dir_entry_2 * de;
+	struct page *page;
+	
+	de = ext2_find_entry (dir, dentry, &page);
+	if (de) {
+		res = le32_to_cpu(de->inode);
+		kunmap(page);
+		page_cache_release(page);
+	}
+	return res;
+}
+
+/* Releases the page */
+void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
+			struct page *page, struct inode *inode)
+{
+	unsigned from = (char *) de - (char *) page_address(page);
+	unsigned to = from + le16_to_cpu(de->rec_len);
+	int err;
+
+	lock_page(page);
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		BUG();
+	de->inode = cpu_to_le32(inode->i_ino);
+	ext2_set_de_type (de, inode);
+	err = ext2_commit_chunk(page, from, to);
+	ext2_put_page(page);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
+	mark_inode_dirty(dir);
+}
+
+/*
+ *	Parent is locked.
+ */
+int ext2_add_link (struct dentry *dentry, struct inode *inode)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	unsigned chunk_size = ext2_chunk_size(dir);
+	unsigned reclen = EXT2_DIR_REC_LEN(namelen);
+	unsigned short rec_len, name_len;
+	struct page *page = NULL;
+	ext2_dirent * de;
+	unsigned long npages = dir_pages(dir);
+	unsigned long n;
+	char *kaddr;
+	unsigned from, to;
+	int err;
+
+	/*
+	 * We take care of directory expansion in the same loop.
+	 * This code plays outside i_size, so it locks the page
+	 * to protect that region.
+	 */
+	for (n = 0; n <= npages; n++) {
+		char *dir_end;
+
+		page = ext2_get_page(dir, n);
+		err = PTR_ERR(page);
+		if (IS_ERR(page))
+			goto out;
+		lock_page(page);
+		kaddr = page_address(page);
+		dir_end = kaddr + ext2_last_byte(dir, n);
+		de = (ext2_dirent *)kaddr;
+		kaddr += PAGE_CACHE_SIZE - reclen;
+		while ((char *)de <= kaddr) {
+			if ((char *)de == dir_end) {
+				/* We hit i_size */
+				name_len = 0;
+				rec_len = chunk_size;
+				de->rec_len = cpu_to_le16(chunk_size);
+				de->inode = 0;
+				goto got_it;
+			}
+			if (de->rec_len == 0) {
+				ext2_error(dir->i_sb, __FUNCTION__,
+					"zero-length directory entry");
+				err = -EIO;
+				goto out_unlock;
+			}
+			err = -EEXIST;
+			if (ext2_match (namelen, name, de))
+				goto out_unlock;
+			name_len = EXT2_DIR_REC_LEN(de->name_len);
+			rec_len = le16_to_cpu(de->rec_len);
+			if (!de->inode && rec_len >= reclen)
+				goto got_it;
+			if (rec_len >= name_len + reclen)
+				goto got_it;
+			de = (ext2_dirent *) ((char *) de + rec_len);
+		}
+		unlock_page(page);
+		ext2_put_page(page);
+	}
+	BUG();
+	return -EINVAL;
+
+got_it:
+	from = (char*)de - (char*)page_address(page);
+	to = from + rec_len;
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		goto out_unlock;
+	if (de->inode) {
+		ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
+		de1->rec_len = cpu_to_le16(rec_len - name_len);
+		de->rec_len = cpu_to_le16(name_len);
+		de = de1;
+	}
+	de->name_len = namelen;
+	memcpy (de->name, name, namelen);
+	de->inode = cpu_to_le32(inode->i_ino);
+	ext2_set_de_type (de, inode);
+	err = ext2_commit_chunk(page, from, to);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
+	mark_inode_dirty(dir);
+	/* OFFSET_CACHE */
+out_put:
+	ext2_put_page(page);
+out:
+	return err;
+out_unlock:
+	unlock_page(page);
+	goto out_put;
+}
+
+/*
+ * ext2_delete_entry deletes a directory entry by merging it with the
+ * previous entry. Page is up-to-date. Releases the page.
+ */
+int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = mapping->host;
+	char *kaddr = page_address(page);
+	unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
+	unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
+	ext2_dirent * pde = NULL;
+	ext2_dirent * de = (ext2_dirent *) (kaddr + from);
+	int err;
+
+	while ((char*)de < (char*)dir) {
+		if (de->rec_len == 0) {
+			ext2_error(inode->i_sb, __FUNCTION__,
+				"zero-length directory entry");
+			err = -EIO;
+			goto out;
+		}
+		pde = de;
+		de = ext2_next_entry(de);
+	}
+	if (pde)
+		from = (char*)pde - (char*)page_address(page);
+	lock_page(page);
+	err = mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		BUG();
+	if (pde)
+		pde->rec_len = cpu_to_le16(to-from);
+	dir->inode = 0;
+	err = ext2_commit_chunk(page, from, to);
+	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
+	mark_inode_dirty(inode);
+out:
+	ext2_put_page(page);
+	return err;
+}
+
+/*
+ * Set the first fragment of directory.
+ */
+int ext2_make_empty(struct inode *inode, struct inode *parent)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page = grab_cache_page(mapping, 0);
+	unsigned chunk_size = ext2_chunk_size(inode);
+	struct ext2_dir_entry_2 * de;
+	int err;
+	void *kaddr;
+
+	if (!page)
+		return -ENOMEM;
+	err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
+	if (err) {
+		unlock_page(page);
+		goto fail;
+	}
+	kaddr = kmap_atomic(page, KM_USER0);
+       memset(kaddr, 0, chunk_size);
+	de = (struct ext2_dir_entry_2 *)kaddr;
+	de->name_len = 1;
+	de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
+	memcpy (de->name, ".\0\0", 4);
+	de->inode = cpu_to_le32(inode->i_ino);
+	ext2_set_de_type (de, inode);
+
+	de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
+	de->name_len = 2;
+	de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
+	de->inode = cpu_to_le32(parent->i_ino);
+	memcpy (de->name, "..\0", 4);
+	ext2_set_de_type (de, inode);
+	kunmap_atomic(kaddr, KM_USER0);
+	err = ext2_commit_chunk(page, 0, chunk_size);
+fail:
+	page_cache_release(page);
+	return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int ext2_empty_dir (struct inode * inode)
+{
+	struct page *page = NULL;
+	unsigned long i, npages = dir_pages(inode);
+
+	for (i = 0; i < npages; i++) {
+		char *kaddr;
+		ext2_dirent * de;
+		page = ext2_get_page(inode, i);
+
+		if (IS_ERR(page))
+			continue;
+
+		kaddr = page_address(page);
+		de = (ext2_dirent *)kaddr;
+		kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
+
+		while ((char *)de <= kaddr) {
+			if (de->rec_len == 0) {
+				ext2_error(inode->i_sb, __FUNCTION__,
+					"zero-length directory entry");
+				printk("kaddr=%p, de=%p\n", kaddr, de);
+				goto not_empty;
+			}
+			if (de->inode != 0) {
+				/* check for . and .. */
+				if (de->name[0] != '.')
+					goto not_empty;
+				if (de->name_len > 2)
+					goto not_empty;
+				if (de->name_len < 2) {
+					if (de->inode !=
+					    cpu_to_le32(inode->i_ino))
+						goto not_empty;
+				} else if (de->name[1] != '.')
+					goto not_empty;
+			}
+			de = ext2_next_entry(de);
+		}
+		ext2_put_page(page);
+	}
+	return 1;
+
+not_empty:
+	ext2_put_page(page);
+	return 0;
+}
+
+struct file_operations ext2_dir_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.readdir	= ext2_readdir,
+	.ioctl		= ext2_ioctl,
+	.fsync		= ext2_sync_file,
+};
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
new file mode 100644
index 0000000..9f1a40e
--- /dev/null
+++ b/fs/ext2/ext2.h
@@ -0,0 +1,160 @@
+#include <linux/fs.h>
+#include <linux/ext2_fs.h>
+
+/*
+ * second extended file system inode data in memory
+ */
+struct ext2_inode_info {
+	__le32	i_data[15];
+	__u32	i_flags;
+	__u32	i_faddr;
+	__u8	i_frag_no;
+	__u8	i_frag_size;
+	__u16	i_state;
+	__u32	i_file_acl;
+	__u32	i_dir_acl;
+	__u32	i_dtime;
+
+	/*
+	 * i_block_group is the number of the block group which contains
+	 * this file's inode.  Constant across the lifetime of the inode,
+	 * it is ued for making block allocation decisions - we try to
+	 * place a file's data blocks near its inode block, and new inodes
+	 * near to their parent directory's inode.
+	 */
+	__u32	i_block_group;
+
+	/*
+	 * i_next_alloc_block is the logical (file-relative) number of the
+	 * most-recently-allocated block in this file.  Yes, it is misnamed.
+	 * We use this for detecting linearly ascending allocation requests.
+	 */
+	__u32	i_next_alloc_block;
+
+	/*
+	 * i_next_alloc_goal is the *physical* companion to i_next_alloc_block.
+	 * it the the physical block number of the block which was most-recently
+	 * allocated to this file.  This give us the goal (target) for the next
+	 * allocation when we detect linearly ascending requests.
+	 */
+	__u32	i_next_alloc_goal;
+	__u32	i_prealloc_block;
+	__u32	i_prealloc_count;
+	__u32	i_dir_start_lookup;
+#ifdef CONFIG_EXT2_FS_XATTR
+	/*
+	 * Extended attributes can be read independently of the main file
+	 * data. Taking i_sem even when reading would cause contention
+	 * between readers of EAs and writers of regular file data, so
+	 * instead we synchronize on xattr_sem when reading or changing
+	 * EAs.
+	 */
+	struct rw_semaphore xattr_sem;
+#endif
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	struct posix_acl	*i_acl;
+	struct posix_acl	*i_default_acl;
+#endif
+	rwlock_t i_meta_lock;
+	struct inode	vfs_inode;
+};
+
+/*
+ * Inode dynamic state flags
+ */
+#define EXT2_STATE_NEW			0x00000001 /* inode is newly created */
+
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * Ok, these declarations are also in <linux/kernel.h> but none of the
+ * ext2 source programs needs to include it so they are duplicated here.
+ */
+
+static inline struct ext2_inode_info *EXT2_I(struct inode *inode)
+{
+	return container_of(inode, struct ext2_inode_info, vfs_inode);
+}
+
+/* balloc.c */
+extern int ext2_bg_has_super(struct super_block *sb, int group);
+extern unsigned long ext2_bg_num_gdb(struct super_block *sb, int group);
+extern int ext2_new_block (struct inode *, unsigned long,
+			   __u32 *, __u32 *, int *);
+extern void ext2_free_blocks (struct inode *, unsigned long,
+			      unsigned long);
+extern unsigned long ext2_count_free_blocks (struct super_block *);
+extern unsigned long ext2_count_dirs (struct super_block *);
+extern void ext2_check_blocks_bitmap (struct super_block *);
+extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
+						    unsigned int block_group,
+						    struct buffer_head ** bh);
+
+/* dir.c */
+extern int ext2_add_link (struct dentry *, struct inode *);
+extern ino_t ext2_inode_by_name(struct inode *, struct dentry *);
+extern int ext2_make_empty(struct inode *, struct inode *);
+extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,struct dentry *, struct page **);
+extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_empty_dir (struct inode *);
+extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
+extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *);
+
+/* fsync.c */
+extern int ext2_sync_file (struct file *, struct dentry *, int);
+
+/* ialloc.c */
+extern struct inode * ext2_new_inode (struct inode *, int);
+extern void ext2_free_inode (struct inode *);
+extern unsigned long ext2_count_free_inodes (struct super_block *);
+extern void ext2_check_inodes_bitmap (struct super_block *);
+extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
+
+/* inode.c */
+extern void ext2_read_inode (struct inode *);
+extern int ext2_write_inode (struct inode *, int);
+extern void ext2_delete_inode (struct inode *);
+extern int ext2_sync_inode (struct inode *);
+extern void ext2_discard_prealloc (struct inode *);
+extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
+extern void ext2_truncate (struct inode *);
+extern int ext2_setattr (struct dentry *, struct iattr *);
+extern void ext2_set_inode_flags(struct inode *inode);
+
+/* ioctl.c */
+extern int ext2_ioctl (struct inode *, struct file *, unsigned int,
+		       unsigned long);
+
+/* super.c */
+extern void ext2_error (struct super_block *, const char *, const char *, ...)
+	__attribute__ ((format (printf, 3, 4)));
+extern void ext2_warning (struct super_block *, const char *, const char *, ...)
+	__attribute__ ((format (printf, 3, 4)));
+extern void ext2_update_dynamic_rev (struct super_block *sb);
+extern void ext2_write_super (struct super_block *);
+
+/*
+ * Inodes and files operations
+ */
+
+/* dir.c */
+extern struct file_operations ext2_dir_operations;
+
+/* file.c */
+extern struct inode_operations ext2_file_inode_operations;
+extern struct file_operations ext2_file_operations;
+
+/* inode.c */
+extern struct address_space_operations ext2_aops;
+extern struct address_space_operations ext2_nobh_aops;
+
+/* namei.c */
+extern struct inode_operations ext2_dir_inode_operations;
+extern struct inode_operations ext2_special_inode_operations;
+
+/* symlink.c */
+extern struct inode_operations ext2_fast_symlink_inode_operations;
+extern struct inode_operations ext2_symlink_inode_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
new file mode 100644
index 0000000..f5e8614
--- /dev/null
+++ b/fs/ext2/file.c
@@ -0,0 +1,68 @@
+/*
+ *  linux/fs/ext2/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 fs regular file handling primitives
+ *
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ * 	(jj@sunsite.ms.mff.cuni.cz)
+ */
+
+#include <linux/time.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Called when an inode is released. Note that this is different
+ * from ext2_open_file: open gets called at every open, but release
+ * gets called only when /all/ the files are closed.
+ */
+static int ext2_release_file (struct inode * inode, struct file * filp)
+{
+	if (filp->f_mode & FMODE_WRITE)
+		ext2_discard_prealloc (inode);
+	return 0;
+}
+
+/*
+ * We have mostly NULL's here: the current defaults are ok for
+ * the ext2 filesystem.
+ */
+struct file_operations ext2_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= generic_file_aio_write,
+	.ioctl		= ext2_ioctl,
+	.mmap		= generic_file_mmap,
+	.open		= generic_file_open,
+	.release	= ext2_release_file,
+	.fsync		= ext2_sync_file,
+	.readv		= generic_file_readv,
+	.writev		= generic_file_writev,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations ext2_file_inode_operations = {
+	.truncate	= ext2_truncate,
+#ifdef CONFIG_EXT2_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext2_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.setattr	= ext2_setattr,
+	.permission	= ext2_permission,
+};
diff --git a/fs/ext2/fsync.c b/fs/ext2/fsync.c
new file mode 100644
index 0000000..c9c2e5f
--- /dev/null
+++ b/fs/ext2/fsync.c
@@ -0,0 +1,51 @@
+/*
+ *  linux/fs/ext2/fsync.c
+ *
+ *  Copyright (C) 1993  Stephen Tweedie (sct@dcs.ed.ac.uk)
+ *  from
+ *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
+ *                      Laboratoire MASI - Institut Blaise Pascal
+ *                      Universite Pierre et Marie Curie (Paris VI)
+ *  from
+ *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
+ * 
+ *  ext2fs fsync primitive
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ * 
+ *  Removed unnecessary code duplication for little endian machines
+ *  and excessive __inline__s. 
+ *        Andi Kleen, 1997
+ *
+ * Major simplications and cleanup - we only need to do the metadata, because
+ * we can depend on generic_block_fdatasync() to sync the data blocks.
+ */
+
+#include "ext2.h"
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>		/* for fsync_inode_buffers() */
+
+
+/*
+ *	File may be NULL when we are called. Perhaps we shouldn't
+ *	even pass file to fsync ?
+ */
+
+int ext2_sync_file(struct file *file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	int err;
+	int ret;
+
+	ret = sync_mapping_buffers(inode->i_mapping);
+	if (!(inode->i_state & I_DIRTY))
+		return ret;
+	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+		return ret;
+
+	err = ext2_sync_inode(inode);
+	if (ret == 0)
+		ret = err;
+	return ret;
+}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
new file mode 100644
index 0000000..77e0591
--- /dev/null
+++ b/fs/ext2/ialloc.c
@@ -0,0 +1,735 @@
+/*
+ *  linux/fs/ext2/ialloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  BSD ufs-inspired inode and directory allocation by 
+ *  Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/config.h>
+#include <linux/quotaops.h>
+#include <linux/sched.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/random.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * ialloc.c contains the inodes allocation and deallocation routines
+ */
+
+/*
+ * The free inodes are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.
+ */
+
+
+/*
+ * Read the inode allocation bitmap for a given block_group, reading
+ * into the specified slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head of bitmap on success or NULL.
+ */
+static struct buffer_head *
+read_inode_bitmap(struct super_block * sb, unsigned long block_group)
+{
+	struct ext2_group_desc *desc;
+	struct buffer_head *bh = NULL;
+
+	desc = ext2_get_group_desc(sb, block_group, NULL);
+	if (!desc)
+		goto error_out;
+
+	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
+	if (!bh)
+		ext2_error(sb, "read_inode_bitmap",
+			    "Cannot read inode bitmap - "
+			    "block_group = %lu, inode_bitmap = %u",
+			    block_group, le32_to_cpu(desc->bg_inode_bitmap));
+error_out:
+	return bh;
+}
+
+static void ext2_release_inode(struct super_block *sb, int group, int dir)
+{
+	struct ext2_group_desc * desc;
+	struct buffer_head *bh;
+
+	desc = ext2_get_group_desc(sb, group, &bh);
+	if (!desc) {
+		ext2_error(sb, "ext2_release_inode",
+			"can't get descriptor for group %d", group);
+		return;
+	}
+
+	spin_lock(sb_bgl_lock(EXT2_SB(sb), group));
+	desc->bg_free_inodes_count =
+		cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
+	if (dir)
+		desc->bg_used_dirs_count =
+			cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
+	spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
+	if (dir)
+		percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
+	sb->s_dirt = 1;
+	mark_buffer_dirty(bh);
+}
+
+/*
+ * NOTE! When we get the inode, we're the only people
+ * that have access to it, and as such there are no
+ * race conditions we have to worry about. The inode
+ * is not on the hash-lists, and it cannot be reached
+ * through the filesystem because the directory entry
+ * has been deleted earlier.
+ *
+ * HOWEVER: we must make sure that we get no aliases,
+ * which means that we have to call "clear_inode()"
+ * _before_ we mark the inode not in use in the inode
+ * bitmaps. Otherwise a newly created file might use
+ * the same inode number (not actually the same pointer
+ * though), and then we'd have two inodes sharing the
+ * same inode number and space on the harddisk.
+ */
+void ext2_free_inode (struct inode * inode)
+{
+	struct super_block * sb = inode->i_sb;
+	int is_directory;
+	unsigned long ino;
+	struct buffer_head *bitmap_bh = NULL;
+	unsigned long block_group;
+	unsigned long bit;
+	struct ext2_super_block * es;
+
+	ino = inode->i_ino;
+	ext2_debug ("freeing inode %lu\n", ino);
+
+	/*
+	 * Note: we must free any quota before locking the superblock,
+	 * as writing the quota to disk may need the lock as well.
+	 */
+	if (!is_bad_inode(inode)) {
+		/* Quota is already initialized in iput() */
+		ext2_xattr_delete_inode(inode);
+	    	DQUOT_FREE_INODE(inode);
+		DQUOT_DROP(inode);
+	}
+
+	es = EXT2_SB(sb)->s_es;
+	is_directory = S_ISDIR(inode->i_mode);
+
+	/* Do this BEFORE marking the inode not in use or returning an error */
+	clear_inode (inode);
+
+	if (ino < EXT2_FIRST_INO(sb) ||
+	    ino > le32_to_cpu(es->s_inodes_count)) {
+		ext2_error (sb, "ext2_free_inode",
+			    "reserved or nonexistent inode %lu", ino);
+		goto error_return;
+	}
+	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
+	bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
+	brelse(bitmap_bh);
+	bitmap_bh = read_inode_bitmap(sb, block_group);
+	if (!bitmap_bh)
+		goto error_return;
+
+	/* Ok, now we can actually update the inode bitmaps.. */
+	if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
+				bit, (void *) bitmap_bh->b_data))
+		ext2_error (sb, "ext2_free_inode",
+			      "bit already cleared for inode %lu", ino);
+	else
+		ext2_release_inode(sb, block_group, is_directory);
+	mark_buffer_dirty(bitmap_bh);
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		sync_dirty_buffer(bitmap_bh);
+error_return:
+	brelse(bitmap_bh);
+}
+
+/*
+ * We perform asynchronous prereading of the new inode's inode block when
+ * we create the inode, in the expectation that the inode will be written
+ * back soon.  There are two reasons:
+ *
+ * - When creating a large number of files, the async prereads will be
+ *   nicely merged into large reads
+ * - When writing out a large number of inodes, we don't need to keep on
+ *   stalling the writes while we read the inode block.
+ *
+ * FIXME: ext2_get_group_desc() needs to be simplified.
+ */
+static void ext2_preread_inode(struct inode *inode)
+{
+	unsigned long block_group;
+	unsigned long offset;
+	unsigned long block;
+	struct buffer_head *bh;
+	struct ext2_group_desc * gdp;
+	struct backing_dev_info *bdi;
+
+	bdi = inode->i_mapping->backing_dev_info;
+	if (bdi_read_congested(bdi))
+		return;
+	if (bdi_write_congested(bdi))
+		return;
+
+	block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
+	gdp = ext2_get_group_desc(inode->i_sb, block_group, &bh);
+	if (gdp == NULL)
+		return;
+
+	/*
+	 * Figure out the offset within the block group inode table
+	 */
+	offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
+				EXT2_INODE_SIZE(inode->i_sb);
+	block = le32_to_cpu(gdp->bg_inode_table) +
+				(offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
+	sb_breadahead(inode->i_sb, block);
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory\'s block
+ * group to find a free inode.
+ */
+static int find_group_dir(struct super_block *sb, struct inode *parent)
+{
+	int ngroups = EXT2_SB(sb)->s_groups_count;
+	int avefreei = ext2_count_free_inodes(sb) / ngroups;
+	struct ext2_group_desc *desc, *best_desc = NULL;
+	struct buffer_head *bh, *best_bh = NULL;
+	int group, best_group = -1;
+
+	for (group = 0; group < ngroups; group++) {
+		desc = ext2_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+			continue;
+		if (!best_desc || 
+		    (le16_to_cpu(desc->bg_free_blocks_count) >
+		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
+			best_group = group;
+			best_desc = desc;
+			best_bh = bh;
+		}
+	}
+	if (!best_desc)
+		return -1;
+
+	return best_group;
+}
+
+/* 
+ * Orlov's allocator for directories. 
+ * 
+ * We always try to spread first-level directories.
+ *
+ * If there are blockgroups with both free inodes and free blocks counts 
+ * not worse than average we return one with smallest directory count. 
+ * Otherwise we simply return a random group. 
+ * 
+ * For the rest rules look so: 
+ * 
+ * It's OK to put directory into a group unless 
+ * it has too many directories already (max_dirs) or 
+ * it has too few free inodes left (min_inodes) or 
+ * it has too few free blocks left (min_blocks) or 
+ * it's already running too large debt (max_debt). 
+ * Parent's group is prefered, if it doesn't satisfy these 
+ * conditions we search cyclically through the rest. If none 
+ * of the groups look good we just look for a group with more 
+ * free inodes than average (starting at parent's group). 
+ * 
+ * Debt is incremented each time we allocate a directory and decremented 
+ * when we allocate an inode, within 0--255. 
+ */ 
+
+#define INODE_COST 64
+#define BLOCK_COST 256
+
+static int find_group_orlov(struct super_block *sb, struct inode *parent)
+{
+	int parent_group = EXT2_I(parent)->i_block_group;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
+	int ngroups = sbi->s_groups_count;
+	int inodes_per_group = EXT2_INODES_PER_GROUP(sb);
+	int freei;
+	int avefreei;
+	int free_blocks;
+	int avefreeb;
+	int blocks_per_dir;
+	int ndirs;
+	int max_debt, max_dirs, min_blocks, min_inodes;
+	int group = -1, i;
+	struct ext2_group_desc *desc;
+	struct buffer_head *bh;
+
+	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
+	avefreei = freei / ngroups;
+	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+	avefreeb = free_blocks / ngroups;
+	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
+
+	if ((parent == sb->s_root->d_inode) ||
+	    (EXT2_I(parent)->i_flags & EXT2_TOPDIR_FL)) {
+		struct ext2_group_desc *best_desc = NULL;
+		struct buffer_head *best_bh = NULL;
+		int best_ndir = inodes_per_group;
+		int best_group = -1;
+
+		get_random_bytes(&group, sizeof(group));
+		parent_group = (unsigned)group % ngroups;
+		for (i = 0; i < ngroups; i++) {
+			group = (parent_group + i) % ngroups;
+			desc = ext2_get_group_desc (sb, group, &bh);
+			if (!desc || !desc->bg_free_inodes_count)
+				continue;
+			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
+				continue;
+			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+				continue;
+			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
+				continue;
+			best_group = group;
+			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
+			best_desc = desc;
+			best_bh = bh;
+		}
+		if (best_group >= 0) {
+			desc = best_desc;
+			bh = best_bh;
+			group = best_group;
+			goto found;
+		}
+		goto fallback;
+	}
+
+	if (ndirs == 0)
+		ndirs = 1;	/* percpu_counters are approximate... */
+
+	blocks_per_dir = (le32_to_cpu(es->s_blocks_count)-free_blocks) / ndirs;
+
+	max_dirs = ndirs / ngroups + inodes_per_group / 16;
+	min_inodes = avefreei - inodes_per_group / 4;
+	min_blocks = avefreeb - EXT2_BLOCKS_PER_GROUP(sb) / 4;
+
+	max_debt = EXT2_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST);
+	if (max_debt * INODE_COST > inodes_per_group)
+		max_debt = inodes_per_group / INODE_COST;
+	if (max_debt > 255)
+		max_debt = 255;
+	if (max_debt == 0)
+		max_debt = 1;
+
+	for (i = 0; i < ngroups; i++) {
+		group = (parent_group + i) % ngroups;
+		desc = ext2_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (sbi->s_debts[group] >= max_debt)
+			continue;
+		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
+			continue;
+		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
+			continue;
+		goto found;
+	}
+
+fallback:
+	for (i = 0; i < ngroups; i++) {
+		group = (parent_group + i) % ngroups;
+		desc = ext2_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
+			goto found;
+	}
+
+	if (avefreei) {
+		/*
+		 * The free-inodes counter is approximate, and for really small
+		 * filesystems the above test can fail to find any blockgroups
+		 */
+		avefreei = 0;
+		goto fallback;
+	}
+
+	return -1;
+
+found:
+	return group;
+}
+
+static int find_group_other(struct super_block *sb, struct inode *parent)
+{
+	int parent_group = EXT2_I(parent)->i_block_group;
+	int ngroups = EXT2_SB(sb)->s_groups_count;
+	struct ext2_group_desc *desc;
+	struct buffer_head *bh;
+	int group, i;
+
+	/*
+	 * Try to place the inode in its parent directory
+	 */
+	group = parent_group;
+	desc = ext2_get_group_desc (sb, group, &bh);
+	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+			le16_to_cpu(desc->bg_free_blocks_count))
+		goto found;
+
+	/*
+	 * We're going to place this inode in a different blockgroup from its
+	 * parent.  We want to cause files in a common directory to all land in
+	 * the same blockgroup.  But we want files which are in a different
+	 * directory which shares a blockgroup with our parent to land in a
+	 * different blockgroup.
+	 *
+	 * So add our directory's i_ino into the starting point for the hash.
+	 */
+	group = (group + parent->i_ino) % ngroups;
+
+	/*
+	 * Use a quadratic hash to find a group with a free inode and some
+	 * free blocks.
+	 */
+	for (i = 1; i < ngroups; i <<= 1) {
+		group += i;
+		if (group >= ngroups)
+			group -= ngroups;
+		desc = ext2_get_group_desc (sb, group, &bh);
+		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+				le16_to_cpu(desc->bg_free_blocks_count))
+			goto found;
+	}
+
+	/*
+	 * That failed: try linear search for a free inode, even if that group
+	 * has no free blocks.
+	 */
+	group = parent_group;
+	for (i = 0; i < ngroups; i++) {
+		if (++group >= ngroups)
+			group = 0;
+		desc = ext2_get_group_desc (sb, group, &bh);
+		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
+			goto found;
+	}
+
+	return -1;
+
+found:
+	return group;
+}
+
+struct inode *ext2_new_inode(struct inode *dir, int mode)
+{
+	struct super_block *sb;
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *bh2;
+	int group, i;
+	ino_t ino = 0;
+	struct inode * inode;
+	struct ext2_group_desc *gdp;
+	struct ext2_super_block *es;
+	struct ext2_inode_info *ei;
+	struct ext2_sb_info *sbi;
+	int err;
+
+	sb = dir->i_sb;
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	ei = EXT2_I(inode);
+	sbi = EXT2_SB(sb);
+	es = sbi->s_es;
+	if (S_ISDIR(mode)) {
+		if (test_opt(sb, OLDALLOC))
+			group = find_group_dir(sb, dir);
+		else
+			group = find_group_orlov(sb, dir);
+	} else 
+		group = find_group_other(sb, dir);
+
+	if (group == -1) {
+		err = -ENOSPC;
+		goto fail;
+	}
+
+	for (i = 0; i < sbi->s_groups_count; i++) {
+		gdp = ext2_get_group_desc(sb, group, &bh2);
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, group);
+		if (!bitmap_bh) {
+			err = -EIO;
+			goto fail;
+		}
+		ino = 0;
+
+repeat_in_this_group:
+		ino = ext2_find_next_zero_bit((unsigned long *)bitmap_bh->b_data,
+					      EXT2_INODES_PER_GROUP(sb), ino);
+		if (ino >= EXT2_INODES_PER_GROUP(sb)) {
+			/*
+			 * Rare race: find_group_xx() decided that there were
+			 * free inodes in this group, but by the time we tried
+			 * to allocate one, they're all gone.  This can also
+			 * occur because the counters which find_group_orlov()
+			 * uses are approximate.  So just go and search the
+			 * next block group.
+			 */
+			if (++group == sbi->s_groups_count)
+				group = 0;
+			continue;
+		}
+		if (ext2_set_bit_atomic(sb_bgl_lock(sbi, group),
+						ino, bitmap_bh->b_data)) {
+			/* we lost this inode */
+			if (++ino >= EXT2_INODES_PER_GROUP(sb)) {
+				/* this group is exhausted, try next group */
+				if (++group == sbi->s_groups_count)
+					group = 0;
+				continue;
+			}
+			/* try to find free inode in the same group */
+			goto repeat_in_this_group;
+		}
+		goto got;
+	}
+
+	/*
+	 * Scanned all blockgroups.
+	 */
+	err = -ENOSPC;
+	goto fail;
+got:
+	mark_buffer_dirty(bitmap_bh);
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		sync_dirty_buffer(bitmap_bh);
+	brelse(bitmap_bh);
+
+	ino += group * EXT2_INODES_PER_GROUP(sb) + 1;
+	if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+		ext2_error (sb, "ext2_new_inode",
+			    "reserved inode or inode > inodes count - "
+			    "block_group = %d,inode=%lu", group,
+			    (unsigned long) ino);
+		err = -EIO;
+		goto fail;
+	}
+
+	percpu_counter_mod(&sbi->s_freeinodes_counter, -1);
+	if (S_ISDIR(mode))
+		percpu_counter_inc(&sbi->s_dirs_counter);
+
+	spin_lock(sb_bgl_lock(sbi, group));
+	gdp->bg_free_inodes_count =
+                cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+	if (S_ISDIR(mode)) {
+		if (sbi->s_debts[group] < 255)
+			sbi->s_debts[group]++;
+		gdp->bg_used_dirs_count =
+			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+	} else {
+		if (sbi->s_debts[group])
+			sbi->s_debts[group]--;
+	}
+	spin_unlock(sb_bgl_lock(sbi, group));
+
+	sb->s_dirt = 1;
+	mark_buffer_dirty(bh2);
+	inode->i_uid = current->fsuid;
+	if (test_opt (sb, GRPID))
+		inode->i_gid = dir->i_gid;
+	else if (dir->i_mode & S_ISGID) {
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+	inode->i_mode = mode;
+
+	inode->i_ino = ino;
+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
+	inode->i_blocks = 0;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	memset(ei->i_data, 0, sizeof(ei->i_data));
+	ei->i_flags = EXT2_I(dir)->i_flags & ~EXT2_BTREE_FL;
+	if (S_ISLNK(mode))
+		ei->i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
+	/* dirsync is only applied to directories */
+	if (!S_ISDIR(mode))
+		ei->i_flags &= ~EXT2_DIRSYNC_FL;
+	ei->i_faddr = 0;
+	ei->i_frag_no = 0;
+	ei->i_frag_size = 0;
+	ei->i_file_acl = 0;
+	ei->i_dir_acl = 0;
+	ei->i_dtime = 0;
+	ei->i_block_group = group;
+	ei->i_next_alloc_block = 0;
+	ei->i_next_alloc_goal = 0;
+	ei->i_prealloc_block = 0;
+	ei->i_prealloc_count = 0;
+	ei->i_dir_start_lookup = 0;
+	ei->i_state = EXT2_STATE_NEW;
+	ext2_set_inode_flags(inode);
+	spin_lock(&sbi->s_next_gen_lock);
+	inode->i_generation = sbi->s_next_generation++;
+	spin_unlock(&sbi->s_next_gen_lock);
+	insert_inode_hash(inode);
+
+	if (DQUOT_ALLOC_INODE(inode)) {
+		DQUOT_DROP(inode);
+		err = -ENOSPC;
+		goto fail2;
+	}
+	err = ext2_init_acl(inode, dir);
+	if (err) {
+		DQUOT_FREE_INODE(inode);
+		goto fail2;
+	}
+	mark_inode_dirty(inode);
+	ext2_debug("allocating inode %lu\n", inode->i_ino);
+	ext2_preread_inode(inode);
+	return inode;
+
+fail2:
+	inode->i_flags |= S_NOQUOTA;
+	inode->i_nlink = 0;
+	iput(inode);
+	return ERR_PTR(err);
+
+fail:
+	make_bad_inode(inode);
+	iput(inode);
+	return ERR_PTR(err);
+}
+
+unsigned long ext2_count_free_inodes (struct super_block * sb)
+{
+	struct ext2_group_desc *desc;
+	unsigned long desc_count = 0;
+	int i;	
+
+#ifdef EXT2FS_DEBUG
+	struct ext2_super_block *es;
+	unsigned long bitmap_count = 0;
+	struct buffer_head *bitmap_bh = NULL;
+
+	lock_super (sb);
+	es = EXT2_SB(sb)->s_es;
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		unsigned x;
+
+		desc = ext2_get_group_desc (sb, i, NULL);
+		if (!desc)
+			continue;
+		desc_count += le16_to_cpu(desc->bg_free_inodes_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+
+		x = ext2_count_free(bitmap_bh, EXT2_INODES_PER_GROUP(sb) / 8);
+		printk("group %d: stored = %d, counted = %u\n",
+			i, le16_to_cpu(desc->bg_free_inodes_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	printk("ext2_count_free_inodes: stored = %lu, computed = %lu, %lu\n",
+		percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter),
+		desc_count, bitmap_count);
+	unlock_super(sb);
+	return desc_count;
+#else
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		desc = ext2_get_group_desc (sb, i, NULL);
+		if (!desc)
+			continue;
+		desc_count += le16_to_cpu(desc->bg_free_inodes_count);
+	}
+	return desc_count;
+#endif
+}
+
+/* Called at mount-time, super-block is locked */
+unsigned long ext2_count_dirs (struct super_block * sb)
+{
+	unsigned long count = 0;
+	int i;
+
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		struct ext2_group_desc *gdp = ext2_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		count += le16_to_cpu(gdp->bg_used_dirs_count);
+	}
+	return count;
+}
+
+#ifdef CONFIG_EXT2_CHECK
+/* Called at mount-time, super-block is locked */
+void ext2_check_inodes_bitmap (struct super_block * sb)
+{
+	struct ext2_super_block * es = EXT2_SB(sb)->s_es;
+	unsigned long desc_count = 0, bitmap_count = 0;
+	struct buffer_head *bitmap_bh = NULL;
+	int i;
+
+	for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+		struct ext2_group_desc *desc;
+		unsigned x;
+
+		desc = ext2_get_group_desc(sb, i, NULL);
+		if (!desc)
+			continue;
+		desc_count += le16_to_cpu(desc->bg_free_inodes_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+		
+		x = ext2_count_free(bitmap_bh, EXT2_INODES_PER_GROUP(sb) / 8);
+		if (le16_to_cpu(desc->bg_free_inodes_count) != x)
+			ext2_error (sb, "ext2_check_inodes_bitmap",
+				    "Wrong free inodes count in group %d, "
+				    "stored = %d, counted = %lu", i,
+				    le16_to_cpu(desc->bg_free_inodes_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	if (percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter) !=
+				bitmap_count)
+		ext2_error(sb, "ext2_check_inodes_bitmap",
+			    "Wrong free inodes count in super block, "
+			    "stored = %lu, counted = %lu",
+			    (unsigned long)le32_to_cpu(es->s_free_inodes_count),
+			    bitmap_count);
+}
+#endif
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
new file mode 100644
index 0000000..b890be0
--- /dev/null
+++ b/fs/ext2/inode.c
@@ -0,0 +1,1276 @@
+/*
+ *  linux/fs/ext2/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Goal-directed block allocation by Stephen Tweedie
+ * 	(sct@dcs.ed.ac.uk), 1993, 1998
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ * 	(jj@sunsite.ms.mff.cuni.cz)
+ *
+ *  Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
+ */
+
+#include <linux/smp_lock.h>
+#include <linux/time.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/module.h>
+#include <linux/writeback.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include "ext2.h"
+#include "acl.h"
+
+MODULE_AUTHOR("Remy Card and others");
+MODULE_DESCRIPTION("Second Extended Filesystem");
+MODULE_LICENSE("GPL");
+
+static int ext2_update_inode(struct inode * inode, int do_sync);
+
+/*
+ * Test whether an inode is a fast symlink.
+ */
+static inline int ext2_inode_is_fast_symlink(struct inode *inode)
+{
+	int ea_blocks = EXT2_I(inode)->i_file_acl ?
+		(inode->i_sb->s_blocksize >> 9) : 0;
+
+	return (S_ISLNK(inode->i_mode) &&
+		inode->i_blocks - ea_blocks == 0);
+}
+
+/*
+ * Called at the last iput() if i_nlink is zero.
+ */
+void ext2_delete_inode (struct inode * inode)
+{
+	if (is_bad_inode(inode))
+		goto no_delete;
+	EXT2_I(inode)->i_dtime	= get_seconds();
+	mark_inode_dirty(inode);
+	ext2_update_inode(inode, inode_needs_sync(inode));
+
+	inode->i_size = 0;
+	if (inode->i_blocks)
+		ext2_truncate (inode);
+	ext2_free_inode (inode);
+
+	return;
+no_delete:
+	clear_inode(inode);	/* We must guarantee clearing of inode... */
+}
+
+void ext2_discard_prealloc (struct inode * inode)
+{
+#ifdef EXT2_PREALLOCATE
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	write_lock(&ei->i_meta_lock);
+	if (ei->i_prealloc_count) {
+		unsigned short total = ei->i_prealloc_count;
+		unsigned long block = ei->i_prealloc_block;
+		ei->i_prealloc_count = 0;
+		ei->i_prealloc_block = 0;
+		write_unlock(&ei->i_meta_lock);
+		ext2_free_blocks (inode, block, total);
+		return;
+	} else
+		write_unlock(&ei->i_meta_lock);
+#endif
+}
+
+static int ext2_alloc_block (struct inode * inode, unsigned long goal, int *err)
+{
+#ifdef EXT2FS_DEBUG
+	static unsigned long alloc_hits, alloc_attempts;
+#endif
+	unsigned long result;
+
+
+#ifdef EXT2_PREALLOCATE
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	write_lock(&ei->i_meta_lock);
+	if (ei->i_prealloc_count &&
+	    (goal == ei->i_prealloc_block || goal + 1 == ei->i_prealloc_block))
+	{
+		result = ei->i_prealloc_block++;
+		ei->i_prealloc_count--;
+		write_unlock(&ei->i_meta_lock);
+		ext2_debug ("preallocation hit (%lu/%lu).\n",
+			    ++alloc_hits, ++alloc_attempts);
+	} else {
+		write_unlock(&ei->i_meta_lock);
+		ext2_discard_prealloc (inode);
+		ext2_debug ("preallocation miss (%lu/%lu).\n",
+			    alloc_hits, ++alloc_attempts);
+		if (S_ISREG(inode->i_mode))
+			result = ext2_new_block (inode, goal, 
+				 &ei->i_prealloc_count,
+				 &ei->i_prealloc_block, err);
+		else
+			result = ext2_new_block(inode, goal, NULL, NULL, err);
+	}
+#else
+	result = ext2_new_block (inode, goal, 0, 0, err);
+#endif
+	return result;
+}
+
+typedef struct {
+	__le32	*p;
+	__le32	key;
+	struct buffer_head *bh;
+} Indirect;
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
+{
+	p->key = *(p->p = v);
+	p->bh = bh;
+}
+
+static inline int verify_chain(Indirect *from, Indirect *to)
+{
+	while (from <= to && from->key == *from->p)
+		from++;
+	return (from > to);
+}
+
+/**
+ *	ext2_block_to_path - parse the block number into array of offsets
+ *	@inode: inode in question (we are only interested in its superblock)
+ *	@i_block: block number to be parsed
+ *	@offsets: array to store the offsets in
+ *      @boundary: set this non-zero if the referred-to block is likely to be
+ *             followed (on disk) by an indirect block.
+ *	To store the locations of file's data ext2 uses a data structure common
+ *	for UNIX filesystems - tree of pointers anchored in the inode, with
+ *	data blocks at leaves and indirect blocks in intermediate nodes.
+ *	This function translates the block number into path in that tree -
+ *	return value is the path length and @offsets[n] is the offset of
+ *	pointer to (n+1)th node in the nth one. If @block is out of range
+ *	(negative or too large) warning is printed and zero returned.
+ *
+ *	Note: function doesn't find node addresses, so no IO is needed. All
+ *	we need to know is the capacity of indirect blocks (taken from the
+ *	inode->i_sb).
+ */
+
+/*
+ * Portability note: the last comparison (check that we fit into triple
+ * indirect block) is spelled differently, because otherwise on an
+ * architecture with 32-bit longs and 8Kb pages we might get into trouble
+ * if our filesystem had 8Kb blocks. We might use long long, but that would
+ * kill us on x86. Oh, well, at least the sign propagation does not matter -
+ * i_block would have to be negative in the very beginning, so we would not
+ * get there at all.
+ */
+
+static int ext2_block_to_path(struct inode *inode,
+			long i_block, int offsets[4], int *boundary)
+{
+	int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
+	int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
+	const long direct_blocks = EXT2_NDIR_BLOCKS,
+		indirect_blocks = ptrs,
+		double_blocks = (1 << (ptrs_bits * 2));
+	int n = 0;
+	int final = 0;
+
+	if (i_block < 0) {
+		ext2_warning (inode->i_sb, "ext2_block_to_path", "block < 0");
+	} else if (i_block < direct_blocks) {
+		offsets[n++] = i_block;
+		final = direct_blocks;
+	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
+		offsets[n++] = EXT2_IND_BLOCK;
+		offsets[n++] = i_block;
+		final = ptrs;
+	} else if ((i_block -= indirect_blocks) < double_blocks) {
+		offsets[n++] = EXT2_DIND_BLOCK;
+		offsets[n++] = i_block >> ptrs_bits;
+		offsets[n++] = i_block & (ptrs - 1);
+		final = ptrs;
+	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+		offsets[n++] = EXT2_TIND_BLOCK;
+		offsets[n++] = i_block >> (ptrs_bits * 2);
+		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+		offsets[n++] = i_block & (ptrs - 1);
+		final = ptrs;
+	} else {
+		ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big");
+	}
+	if (boundary)
+		*boundary = (i_block & (ptrs - 1)) == (final - 1);
+	return n;
+}
+
+/**
+ *	ext2_get_branch - read the chain of indirect blocks leading to data
+ *	@inode: inode in question
+ *	@depth: depth of the chain (1 - direct pointer, etc.)
+ *	@offsets: offsets of pointers in inode/indirect blocks
+ *	@chain: place to store the result
+ *	@err: here we store the error value
+ *
+ *	Function fills the array of triples <key, p, bh> and returns %NULL
+ *	if everything went OK or the pointer to the last filled triple
+ *	(incomplete one) otherwise. Upon the return chain[i].key contains
+ *	the number of (i+1)-th block in the chain (as it is stored in memory,
+ *	i.e. little-endian 32-bit), chain[i].p contains the address of that
+ *	number (it points into struct inode for i==0 and into the bh->b_data
+ *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
+ *	block for i>0 and NULL for i==0. In other words, it holds the block
+ *	numbers of the chain, addresses they were taken from (and where we can
+ *	verify that chain did not change) and buffer_heads hosting these
+ *	numbers.
+ *
+ *	Function stops when it stumbles upon zero pointer (absent block)
+ *		(pointer to last triple returned, *@err == 0)
+ *	or when it gets an IO error reading an indirect block
+ *		(ditto, *@err == -EIO)
+ *	or when it notices that chain had been changed while it was reading
+ *		(ditto, *@err == -EAGAIN)
+ *	or when it reads all @depth-1 indirect blocks successfully and finds
+ *	the whole chain, all way to the data (returns %NULL, *err == 0).
+ */
+static Indirect *ext2_get_branch(struct inode *inode,
+				 int depth,
+				 int *offsets,
+				 Indirect chain[4],
+				 int *err)
+{
+	struct super_block *sb = inode->i_sb;
+	Indirect *p = chain;
+	struct buffer_head *bh;
+
+	*err = 0;
+	/* i_data is not going away, no lock needed */
+	add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
+	if (!p->key)
+		goto no_block;
+	while (--depth) {
+		bh = sb_bread(sb, le32_to_cpu(p->key));
+		if (!bh)
+			goto failure;
+		read_lock(&EXT2_I(inode)->i_meta_lock);
+		if (!verify_chain(chain, p))
+			goto changed;
+		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
+		read_unlock(&EXT2_I(inode)->i_meta_lock);
+		if (!p->key)
+			goto no_block;
+	}
+	return NULL;
+
+changed:
+	read_unlock(&EXT2_I(inode)->i_meta_lock);
+	brelse(bh);
+	*err = -EAGAIN;
+	goto no_block;
+failure:
+	*err = -EIO;
+no_block:
+	return p;
+}
+
+/**
+ *	ext2_find_near - find a place for allocation with sufficient locality
+ *	@inode: owner
+ *	@ind: descriptor of indirect block.
+ *
+ *	This function returns the prefered place for block allocation.
+ *	It is used when heuristic for sequential allocation fails.
+ *	Rules are:
+ *	  + if there is a block to the left of our position - allocate near it.
+ *	  + if pointer will live in indirect block - allocate near that block.
+ *	  + if pointer will live in inode - allocate in the same cylinder group.
+ *
+ * In the latter case we colour the starting block by the callers PID to
+ * prevent it from clashing with concurrent allocations for a different inode
+ * in the same block group.   The PID is used here so that functionally related
+ * files will be close-by on-disk.
+ *
+ *	Caller must make sure that @ind is valid and will stay that way.
+ */
+
+static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
+	__le32 *p;
+	unsigned long bg_start;
+	unsigned long colour;
+
+	/* Try to find previous block */
+	for (p = ind->p - 1; p >= start; p--)
+		if (*p)
+			return le32_to_cpu(*p);
+
+	/* No such thing, so let's try location of indirect block */
+	if (ind->bh)
+		return ind->bh->b_blocknr;
+
+	/*
+	 * It is going to be refered from inode itself? OK, just put it into
+	 * the same cylinder group then.
+	 */
+	bg_start = (ei->i_block_group * EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
+		le32_to_cpu(EXT2_SB(inode->i_sb)->s_es->s_first_data_block);
+	colour = (current->pid % 16) *
+			(EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+	return bg_start + colour;
+}
+
+/**
+ *	ext2_find_goal - find a prefered place for allocation.
+ *	@inode: owner
+ *	@block:  block we want
+ *	@chain:  chain of indirect blocks
+ *	@partial: pointer to the last triple within a chain
+ *	@goal:	place to store the result.
+ *
+ *	Normally this function find the prefered place for block allocation,
+ *	stores it in *@goal and returns zero. If the branch had been changed
+ *	under us we return -EAGAIN.
+ */
+
+static inline int ext2_find_goal(struct inode *inode,
+				 long block,
+				 Indirect chain[4],
+				 Indirect *partial,
+				 unsigned long *goal)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	write_lock(&ei->i_meta_lock);
+	if ((block == ei->i_next_alloc_block + 1) && ei->i_next_alloc_goal) {
+		ei->i_next_alloc_block++;
+		ei->i_next_alloc_goal++;
+	} 
+	if (verify_chain(chain, partial)) {
+		/*
+		 * try the heuristic for sequential allocation,
+		 * failing that at least try to get decent locality.
+		 */
+		if (block == ei->i_next_alloc_block)
+			*goal = ei->i_next_alloc_goal;
+		if (!*goal)
+			*goal = ext2_find_near(inode, partial);
+		write_unlock(&ei->i_meta_lock);
+		return 0;
+	}
+	write_unlock(&ei->i_meta_lock);
+	return -EAGAIN;
+}
+
+/**
+ *	ext2_alloc_branch - allocate and set up a chain of blocks.
+ *	@inode: owner
+ *	@num: depth of the chain (number of blocks to allocate)
+ *	@offsets: offsets (in the blocks) to store the pointers to next.
+ *	@branch: place to store the chain in.
+ *
+ *	This function allocates @num blocks, zeroes out all but the last one,
+ *	links them into chain and (if we are synchronous) writes them to disk.
+ *	In other words, it prepares a branch that can be spliced onto the
+ *	inode. It stores the information about that chain in the branch[], in
+ *	the same format as ext2_get_branch() would do. We are calling it after
+ *	we had read the existing part of chain and partial points to the last
+ *	triple of that (one with zero ->key). Upon the exit we have the same
+ *	picture as after the successful ext2_get_block(), excpet that in one
+ *	place chain is disconnected - *branch->p is still zero (we did not
+ *	set the last link), but branch->key contains the number that should
+ *	be placed into *branch->p to fill that gap.
+ *
+ *	If allocation fails we free all blocks we've allocated (and forget
+ *	their buffer_heads) and return the error value the from failed
+ *	ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
+ *	as described above and return 0.
+ */
+
+static int ext2_alloc_branch(struct inode *inode,
+			     int num,
+			     unsigned long goal,
+			     int *offsets,
+			     Indirect *branch)
+{
+	int blocksize = inode->i_sb->s_blocksize;
+	int n = 0;
+	int err;
+	int i;
+	int parent = ext2_alloc_block(inode, goal, &err);
+
+	branch[0].key = cpu_to_le32(parent);
+	if (parent) for (n = 1; n < num; n++) {
+		struct buffer_head *bh;
+		/* Allocate the next block */
+		int nr = ext2_alloc_block(inode, parent, &err);
+		if (!nr)
+			break;
+		branch[n].key = cpu_to_le32(nr);
+		/*
+		 * Get buffer_head for parent block, zero it out and set 
+		 * the pointer to new one, then send parent to disk.
+		 */
+		bh = sb_getblk(inode->i_sb, parent);
+		lock_buffer(bh);
+		memset(bh->b_data, 0, blocksize);
+		branch[n].bh = bh;
+		branch[n].p = (__le32 *) bh->b_data + offsets[n];
+		*branch[n].p = branch[n].key;
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		mark_buffer_dirty_inode(bh, inode);
+		/* We used to sync bh here if IS_SYNC(inode).
+		 * But we now rely upon generic_osync_inode()
+		 * and b_inode_buffers.  But not for directories.
+		 */
+		if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
+			sync_dirty_buffer(bh);
+		parent = nr;
+	}
+	if (n == num)
+		return 0;
+
+	/* Allocation failed, free what we already allocated */
+	for (i = 1; i < n; i++)
+		bforget(branch[i].bh);
+	for (i = 0; i < n; i++)
+		ext2_free_blocks(inode, le32_to_cpu(branch[i].key), 1);
+	return err;
+}
+
+/**
+ *	ext2_splice_branch - splice the allocated branch onto inode.
+ *	@inode: owner
+ *	@block: (logical) number of block we are adding
+ *	@chain: chain of indirect blocks (with a missing link - see
+ *		ext2_alloc_branch)
+ *	@where: location of missing link
+ *	@num:   number of blocks we are adding
+ *
+ *	This function verifies that chain (up to the missing link) had not
+ *	changed, fills the missing link and does all housekeeping needed in
+ *	inode (->i_blocks, etc.). In case of success we end up with the full
+ *	chain to new block and return 0. Otherwise (== chain had been changed)
+ *	we free the new blocks (forgetting their buffer_heads, indeed) and
+ *	return -EAGAIN.
+ */
+
+static inline int ext2_splice_branch(struct inode *inode,
+				     long block,
+				     Indirect chain[4],
+				     Indirect *where,
+				     int num)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	int i;
+
+	/* Verify that place we are splicing to is still there and vacant */
+
+	write_lock(&ei->i_meta_lock);
+	if (!verify_chain(chain, where-1) || *where->p)
+		goto changed;
+
+	/* That's it */
+
+	*where->p = where->key;
+	ei->i_next_alloc_block = block;
+	ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
+
+	write_unlock(&ei->i_meta_lock);
+
+	/* We are done with atomic stuff, now do the rest of housekeeping */
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+
+	/* had we spliced it onto indirect block? */
+	if (where->bh)
+		mark_buffer_dirty_inode(where->bh, inode);
+
+	mark_inode_dirty(inode);
+	return 0;
+
+changed:
+	write_unlock(&ei->i_meta_lock);
+	for (i = 1; i < num; i++)
+		bforget(where[i].bh);
+	for (i = 0; i < num; i++)
+		ext2_free_blocks(inode, le32_to_cpu(where[i].key), 1);
+	return -EAGAIN;
+}
+
+/*
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+ * to tree, set linkage between the newborn blocks, write them if sync is
+ * required, recheck the path, free and repeat if check fails, otherwise
+ * set the last missing link (that will protect us from any truncate-generated
+ * removals - all blocks on the path are immune now) and possibly force the
+ * write on the parent block.
+ * That has a nice additional property: no special recovery from the failed
+ * allocations is needed - we simply release blocks and do not touch anything
+ * reachable from inode.
+ */
+
+int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
+{
+	int err = -EIO;
+	int offsets[4];
+	Indirect chain[4];
+	Indirect *partial;
+	unsigned long goal;
+	int left;
+	int boundary = 0;
+	int depth = ext2_block_to_path(inode, iblock, offsets, &boundary);
+
+	if (depth == 0)
+		goto out;
+
+reread:
+	partial = ext2_get_branch(inode, depth, offsets, chain, &err);
+
+	/* Simplest case - block found, no allocation needed */
+	if (!partial) {
+got_it:
+		map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
+		if (boundary)
+			set_buffer_boundary(bh_result);
+		/* Clean up and exit */
+		partial = chain+depth-1; /* the whole chain */
+		goto cleanup;
+	}
+
+	/* Next simple case - plain lookup or failed read of indirect block */
+	if (!create || err == -EIO) {
+cleanup:
+		while (partial > chain) {
+			brelse(partial->bh);
+			partial--;
+		}
+out:
+		return err;
+	}
+
+	/*
+	 * Indirect block might be removed by truncate while we were
+	 * reading it. Handling of that case (forget what we've got and
+	 * reread) is taken out of the main path.
+	 */
+	if (err == -EAGAIN)
+		goto changed;
+
+	goal = 0;
+	if (ext2_find_goal(inode, iblock, chain, partial, &goal) < 0)
+		goto changed;
+
+	left = (chain + depth) - partial;
+	err = ext2_alloc_branch(inode, left, goal,
+					offsets+(partial-chain), partial);
+	if (err)
+		goto cleanup;
+
+	if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0)
+		goto changed;
+
+	set_buffer_new(bh_result);
+	goto got_it;
+
+changed:
+	while (partial > chain) {
+		brelse(partial->bh);
+		partial--;
+	}
+	goto reread;
+}
+
+static int ext2_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, ext2_get_block, wbc);
+}
+
+static int ext2_readpage(struct file *file, struct page *page)
+{
+	return mpage_readpage(page, ext2_get_block);
+}
+
+static int
+ext2_readpages(struct file *file, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+	return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
+}
+
+static int
+ext2_prepare_write(struct file *file, struct page *page,
+			unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,ext2_get_block);
+}
+
+static int
+ext2_nobh_prepare_write(struct file *file, struct page *page,
+			unsigned from, unsigned to)
+{
+	return nobh_prepare_write(page,from,to,ext2_get_block);
+}
+
+static int ext2_nobh_writepage(struct page *page,
+			struct writeback_control *wbc)
+{
+	return nobh_writepage(page, ext2_get_block, wbc);
+}
+
+static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,ext2_get_block);
+}
+
+static int
+ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
+			struct buffer_head *bh_result, int create)
+{
+	int ret;
+
+	ret = ext2_get_block(inode, iblock, bh_result, create);
+	if (ret == 0)
+		bh_result->b_size = (1 << inode->i_blkbits);
+	return ret;
+}
+
+static ssize_t
+ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+			loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_mapping->host;
+
+	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+				offset, nr_segs, ext2_get_blocks, NULL);
+}
+
+static int
+ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, ext2_get_block);
+}
+
+struct address_space_operations ext2_aops = {
+	.readpage		= ext2_readpage,
+	.readpages		= ext2_readpages,
+	.writepage		= ext2_writepage,
+	.sync_page		= block_sync_page,
+	.prepare_write		= ext2_prepare_write,
+	.commit_write		= generic_commit_write,
+	.bmap			= ext2_bmap,
+	.direct_IO		= ext2_direct_IO,
+	.writepages		= ext2_writepages,
+};
+
+struct address_space_operations ext2_nobh_aops = {
+	.readpage		= ext2_readpage,
+	.readpages		= ext2_readpages,
+	.writepage		= ext2_nobh_writepage,
+	.sync_page		= block_sync_page,
+	.prepare_write		= ext2_nobh_prepare_write,
+	.commit_write		= nobh_commit_write,
+	.bmap			= ext2_bmap,
+	.direct_IO		= ext2_direct_IO,
+	.writepages		= ext2_writepages,
+};
+
+/*
+ * Probably it should be a library function... search for first non-zero word
+ * or memcmp with zero_page, whatever is better for particular architecture.
+ * Linus?
+ */
+static inline int all_zeroes(__le32 *p, __le32 *q)
+{
+	while (p < q)
+		if (*p++)
+			return 0;
+	return 1;
+}
+
+/**
+ *	ext2_find_shared - find the indirect blocks for partial truncation.
+ *	@inode:	  inode in question
+ *	@depth:	  depth of the affected branch
+ *	@offsets: offsets of pointers in that branch (see ext2_block_to_path)
+ *	@chain:	  place to store the pointers to partial indirect blocks
+ *	@top:	  place to the (detached) top of branch
+ *
+ *	This is a helper function used by ext2_truncate().
+ *
+ *	When we do truncate() we may have to clean the ends of several indirect
+ *	blocks but leave the blocks themselves alive. Block is partially
+ *	truncated if some data below the new i_size is refered from it (and
+ *	it is on the path to the first completely truncated data block, indeed).
+ *	We have to free the top of that path along with everything to the right
+ *	of the path. Since no allocation past the truncation point is possible
+ *	until ext2_truncate() finishes, we may safely do the latter, but top
+ *	of branch may require special attention - pageout below the truncation
+ *	point might try to populate it.
+ *
+ *	We atomically detach the top of branch from the tree, store the block
+ *	number of its root in *@top, pointers to buffer_heads of partially
+ *	truncated blocks - in @chain[].bh and pointers to their last elements
+ *	that should not be removed - in @chain[].p. Return value is the pointer
+ *	to last filled element of @chain.
+ *
+ *	The work left to caller to do the actual freeing of subtrees:
+ *		a) free the subtree starting from *@top
+ *		b) free the subtrees whose roots are stored in
+ *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
+ *		c) free the subtrees growing from the inode past the @chain[0].p
+ *			(no partially truncated stuff there).
+ */
+
+static Indirect *ext2_find_shared(struct inode *inode,
+				int depth,
+				int offsets[4],
+				Indirect chain[4],
+				__le32 *top)
+{
+	Indirect *partial, *p;
+	int k, err;
+
+	*top = 0;
+	for (k = depth; k > 1 && !offsets[k-1]; k--)
+		;
+	partial = ext2_get_branch(inode, k, offsets, chain, &err);
+	if (!partial)
+		partial = chain + k-1;
+	/*
+	 * If the branch acquired continuation since we've looked at it -
+	 * fine, it should all survive and (new) top doesn't belong to us.
+	 */
+	write_lock(&EXT2_I(inode)->i_meta_lock);
+	if (!partial->key && *partial->p) {
+		write_unlock(&EXT2_I(inode)->i_meta_lock);
+		goto no_top;
+	}
+	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
+		;
+	/*
+	 * OK, we've found the last block that must survive. The rest of our
+	 * branch should be detached before unlocking. However, if that rest
+	 * of branch is all ours and does not grow immediately from the inode
+	 * it's easier to cheat and just decrement partial->p.
+	 */
+	if (p == chain + k - 1 && p > chain) {
+		p->p--;
+	} else {
+		*top = *p->p;
+		*p->p = 0;
+	}
+	write_unlock(&EXT2_I(inode)->i_meta_lock);
+
+	while(partial > p)
+	{
+		brelse(partial->bh);
+		partial--;
+	}
+no_top:
+	return partial;
+}
+
+/**
+ *	ext2_free_data - free a list of data blocks
+ *	@inode:	inode we are dealing with
+ *	@p:	array of block numbers
+ *	@q:	points immediately past the end of array
+ *
+ *	We are freeing all blocks refered from that array (numbers are
+ *	stored as little-endian 32-bit) and updating @inode->i_blocks
+ *	appropriately.
+ */
+static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
+{
+	unsigned long block_to_free = 0, count = 0;
+	unsigned long nr;
+
+	for ( ; p < q ; p++) {
+		nr = le32_to_cpu(*p);
+		if (nr) {
+			*p = 0;
+			/* accumulate blocks to free if they're contiguous */
+			if (count == 0)
+				goto free_this;
+			else if (block_to_free == nr - count)
+				count++;
+			else {
+				mark_inode_dirty(inode);
+				ext2_free_blocks (inode, block_to_free, count);
+			free_this:
+				block_to_free = nr;
+				count = 1;
+			}
+		}
+	}
+	if (count > 0) {
+		mark_inode_dirty(inode);
+		ext2_free_blocks (inode, block_to_free, count);
+	}
+}
+
+/**
+ *	ext2_free_branches - free an array of branches
+ *	@inode:	inode we are dealing with
+ *	@p:	array of block numbers
+ *	@q:	pointer immediately past the end of array
+ *	@depth:	depth of the branches to free
+ *
+ *	We are freeing all blocks refered from these branches (numbers are
+ *	stored as little-endian 32-bit) and updating @inode->i_blocks
+ *	appropriately.
+ */
+static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
+{
+	struct buffer_head * bh;
+	unsigned long nr;
+
+	if (depth--) {
+		int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
+		for ( ; p < q ; p++) {
+			nr = le32_to_cpu(*p);
+			if (!nr)
+				continue;
+			*p = 0;
+			bh = sb_bread(inode->i_sb, nr);
+			/*
+			 * A read failure? Report error and clear slot
+			 * (should be rare).
+			 */ 
+			if (!bh) {
+				ext2_error(inode->i_sb, "ext2_free_branches",
+					"Read failure, inode=%ld, block=%ld",
+					inode->i_ino, nr);
+				continue;
+			}
+			ext2_free_branches(inode,
+					   (__le32*)bh->b_data,
+					   (__le32*)bh->b_data + addr_per_block,
+					   depth);
+			bforget(bh);
+			ext2_free_blocks(inode, nr, 1);
+			mark_inode_dirty(inode);
+		}
+	} else
+		ext2_free_data(inode, p, q);
+}
+
+void ext2_truncate (struct inode * inode)
+{
+	__le32 *i_data = EXT2_I(inode)->i_data;
+	int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
+	int offsets[4];
+	Indirect chain[4];
+	Indirect *partial;
+	__le32 nr = 0;
+	int n;
+	long iblock;
+	unsigned blocksize;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	    S_ISLNK(inode->i_mode)))
+		return;
+	if (ext2_inode_is_fast_symlink(inode))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+
+	ext2_discard_prealloc(inode);
+
+	blocksize = inode->i_sb->s_blocksize;
+	iblock = (inode->i_size + blocksize-1)
+					>> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
+
+	if (test_opt(inode->i_sb, NOBH))
+		nobh_truncate_page(inode->i_mapping, inode->i_size);
+	else
+		block_truncate_page(inode->i_mapping,
+				inode->i_size, ext2_get_block);
+
+	n = ext2_block_to_path(inode, iblock, offsets, NULL);
+	if (n == 0)
+		return;
+
+	if (n == 1) {
+		ext2_free_data(inode, i_data+offsets[0],
+					i_data + EXT2_NDIR_BLOCKS);
+		goto do_indirects;
+	}
+
+	partial = ext2_find_shared(inode, n, offsets, chain, &nr);
+	/* Kill the top of shared branch (already detached) */
+	if (nr) {
+		if (partial == chain)
+			mark_inode_dirty(inode);
+		else
+			mark_buffer_dirty_inode(partial->bh, inode);
+		ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
+	}
+	/* Clear the ends of indirect blocks on the shared branch */
+	while (partial > chain) {
+		ext2_free_branches(inode,
+				   partial->p + 1,
+				   (__le32*)partial->bh->b_data+addr_per_block,
+				   (chain+n-1) - partial);
+		mark_buffer_dirty_inode(partial->bh, inode);
+		brelse (partial->bh);
+		partial--;
+	}
+do_indirects:
+	/* Kill the remaining (whole) subtrees */
+	switch (offsets[0]) {
+		default:
+			nr = i_data[EXT2_IND_BLOCK];
+			if (nr) {
+				i_data[EXT2_IND_BLOCK] = 0;
+				mark_inode_dirty(inode);
+				ext2_free_branches(inode, &nr, &nr+1, 1);
+			}
+		case EXT2_IND_BLOCK:
+			nr = i_data[EXT2_DIND_BLOCK];
+			if (nr) {
+				i_data[EXT2_DIND_BLOCK] = 0;
+				mark_inode_dirty(inode);
+				ext2_free_branches(inode, &nr, &nr+1, 2);
+			}
+		case EXT2_DIND_BLOCK:
+			nr = i_data[EXT2_TIND_BLOCK];
+			if (nr) {
+				i_data[EXT2_TIND_BLOCK] = 0;
+				mark_inode_dirty(inode);
+				ext2_free_branches(inode, &nr, &nr+1, 3);
+			}
+		case EXT2_TIND_BLOCK:
+			;
+	}
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	if (inode_needs_sync(inode)) {
+		sync_mapping_buffers(inode->i_mapping);
+		ext2_sync_inode (inode);
+	} else {
+		mark_inode_dirty(inode);
+	}
+}
+
+static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
+					struct buffer_head **p)
+{
+	struct buffer_head * bh;
+	unsigned long block_group;
+	unsigned long block;
+	unsigned long offset;
+	struct ext2_group_desc * gdp;
+
+	*p = NULL;
+	if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
+	    ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
+		goto Einval;
+
+	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
+	gdp = ext2_get_group_desc(sb, block_group, &bh);
+	if (!gdp)
+		goto Egdp;
+	/*
+	 * Figure out the offset within the block group inode table
+	 */
+	offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
+	block = le32_to_cpu(gdp->bg_inode_table) +
+		(offset >> EXT2_BLOCK_SIZE_BITS(sb));
+	if (!(bh = sb_bread(sb, block)))
+		goto Eio;
+
+	*p = bh;
+	offset &= (EXT2_BLOCK_SIZE(sb) - 1);
+	return (struct ext2_inode *) (bh->b_data + offset);
+
+Einval:
+	ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
+		   (unsigned long) ino);
+	return ERR_PTR(-EINVAL);
+Eio:
+	ext2_error(sb, "ext2_get_inode",
+		   "unable to read inode block - inode=%lu, block=%lu",
+		   (unsigned long) ino, block);
+Egdp:
+	return ERR_PTR(-EIO);
+}
+
+void ext2_set_inode_flags(struct inode *inode)
+{
+	unsigned int flags = EXT2_I(inode)->i_flags;
+
+	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	if (flags & EXT2_SYNC_FL)
+		inode->i_flags |= S_SYNC;
+	if (flags & EXT2_APPEND_FL)
+		inode->i_flags |= S_APPEND;
+	if (flags & EXT2_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT2_NOATIME_FL)
+		inode->i_flags |= S_NOATIME;
+	if (flags & EXT2_DIRSYNC_FL)
+		inode->i_flags |= S_DIRSYNC;
+}
+
+void ext2_read_inode (struct inode * inode)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	ino_t ino = inode->i_ino;
+	struct buffer_head * bh;
+	struct ext2_inode * raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
+	int n;
+
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	ei->i_acl = EXT2_ACL_NOT_CACHED;
+	ei->i_default_acl = EXT2_ACL_NOT_CACHED;
+#endif
+	if (IS_ERR(raw_inode))
+ 		goto bad_inode;
+
+	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+	if (!(test_opt (inode->i_sb, NO_UID32))) {
+		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+	}
+	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+	inode->i_size = le32_to_cpu(raw_inode->i_size);
+	inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
+	inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
+	inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
+	inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
+	/* We now have enough fields to check if the inode was active or not.
+	 * This is needed because nfsd might try to access dead inodes
+	 * the test is that same one that e2fsck uses
+	 * NeilBrown 1999oct15
+	 */
+	if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
+		/* this inode is deleted */
+		brelse (bh);
+		goto bad_inode;
+	}
+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
+	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
+	ei->i_frag_no = raw_inode->i_frag;
+	ei->i_frag_size = raw_inode->i_fsize;
+	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
+	ei->i_dir_acl = 0;
+	if (S_ISREG(inode->i_mode))
+		inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
+	else
+		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+	ei->i_dtime = 0;
+	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+	ei->i_state = 0;
+	ei->i_next_alloc_block = 0;
+	ei->i_next_alloc_goal = 0;
+	ei->i_prealloc_count = 0;
+	ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
+	ei->i_dir_start_lookup = 0;
+
+	/*
+	 * NOTE! The in-memory inode i_data array is in little-endian order
+	 * even on big-endian machines: we do NOT byteswap the block numbers!
+	 */
+	for (n = 0; n < EXT2_N_BLOCKS; n++)
+		ei->i_data[n] = raw_inode->i_block[n];
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ext2_file_inode_operations;
+		inode->i_fop = &ext2_file_operations;
+		if (test_opt(inode->i_sb, NOBH))
+			inode->i_mapping->a_ops = &ext2_nobh_aops;
+		else
+			inode->i_mapping->a_ops = &ext2_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ext2_dir_inode_operations;
+		inode->i_fop = &ext2_dir_operations;
+		if (test_opt(inode->i_sb, NOBH))
+			inode->i_mapping->a_ops = &ext2_nobh_aops;
+		else
+			inode->i_mapping->a_ops = &ext2_aops;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (ext2_inode_is_fast_symlink(inode))
+			inode->i_op = &ext2_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &ext2_symlink_inode_operations;
+			if (test_opt(inode->i_sb, NOBH))
+				inode->i_mapping->a_ops = &ext2_nobh_aops;
+			else
+				inode->i_mapping->a_ops = &ext2_aops;
+		}
+	} else {
+		inode->i_op = &ext2_special_inode_operations;
+		if (raw_inode->i_block[0])
+			init_special_inode(inode, inode->i_mode,
+			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
+		else 
+			init_special_inode(inode, inode->i_mode,
+			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
+	}
+	brelse (bh);
+	ext2_set_inode_flags(inode);
+	return;
+	
+bad_inode:
+	make_bad_inode(inode);
+	return;
+}
+
+static int ext2_update_inode(struct inode * inode, int do_sync)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	struct super_block *sb = inode->i_sb;
+	ino_t ino = inode->i_ino;
+	uid_t uid = inode->i_uid;
+	gid_t gid = inode->i_gid;
+	struct buffer_head * bh;
+	struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
+	int n;
+	int err = 0;
+
+	if (IS_ERR(raw_inode))
+ 		return -EIO;
+
+	/* For fields not not tracking in the in-memory inode,
+	 * initialise them to zero for new inodes. */
+	if (ei->i_state & EXT2_STATE_NEW)
+		memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
+
+	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+	if (!(test_opt(sb, NO_UID32))) {
+		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
+		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
+/*
+ * Fix up interoperability with old kernels. Otherwise, old inodes get
+ * re-used with the upper 16 bits of the uid/gid intact
+ */
+		if (!ei->i_dtime) {
+			raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
+			raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
+		} else {
+			raw_inode->i_uid_high = 0;
+			raw_inode->i_gid_high = 0;
+		}
+	} else {
+		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
+		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
+		raw_inode->i_uid_high = 0;
+		raw_inode->i_gid_high = 0;
+	}
+	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+	raw_inode->i_size = cpu_to_le32(inode->i_size);
+	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+
+	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
+	raw_inode->i_frag = ei->i_frag_no;
+	raw_inode->i_fsize = ei->i_frag_size;
+	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
+	if (!S_ISREG(inode->i_mode))
+		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+	else {
+		raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
+		if (inode->i_size > 0x7fffffffULL) {
+			if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
+					EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
+			    EXT2_SB(sb)->s_es->s_rev_level ==
+					cpu_to_le32(EXT2_GOOD_OLD_REV)) {
+			       /* If this is the first large file
+				* created, add a flag to the superblock.
+				*/
+				lock_kernel();
+				ext2_update_dynamic_rev(sb);
+				EXT2_SET_RO_COMPAT_FEATURE(sb,
+					EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
+				unlock_kernel();
+				ext2_write_super(sb);
+			}
+		}
+	}
+	
+	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		if (old_valid_dev(inode->i_rdev)) {
+			raw_inode->i_block[0] =
+				cpu_to_le32(old_encode_dev(inode->i_rdev));
+			raw_inode->i_block[1] = 0;
+		} else {
+			raw_inode->i_block[0] = 0;
+			raw_inode->i_block[1] =
+				cpu_to_le32(new_encode_dev(inode->i_rdev));
+			raw_inode->i_block[2] = 0;
+		}
+	} else for (n = 0; n < EXT2_N_BLOCKS; n++)
+		raw_inode->i_block[n] = ei->i_data[n];
+	mark_buffer_dirty(bh);
+	if (do_sync) {
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh)) {
+			printk ("IO error syncing ext2 inode [%s:%08lx]\n",
+				sb->s_id, (unsigned long) ino);
+			err = -EIO;
+		}
+	}
+	ei->i_state &= ~EXT2_STATE_NEW;
+	brelse (bh);
+	return err;
+}
+
+int ext2_write_inode(struct inode *inode, int wait)
+{
+	return ext2_update_inode(inode, wait);
+}
+
+int ext2_sync_inode(struct inode *inode)
+{
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_ALL,
+		.nr_to_write = 0,	/* sys_fsync did this */
+	};
+	return sync_inode(inode, &wbc);
+}
+
+int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	error = inode_change_ok(inode, iattr);
+	if (error)
+		return error;
+	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
+	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+		error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
+		if (error)
+			return error;
+	}
+	error = inode_setattr(inode, iattr);
+	if (!error && (iattr->ia_valid & ATTR_MODE))
+		error = ext2_acl_chmod(inode);
+	return error;
+}
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
new file mode 100644
index 0000000..709d867
--- /dev/null
+++ b/fs/ext2/ioctl.c
@@ -0,0 +1,81 @@
+/*
+ * linux/fs/ext2/ioctl.c
+ *
+ * Copyright (C) 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include "ext2.h"
+#include <linux/time.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+
+
+int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+		unsigned long arg)
+{
+	struct ext2_inode_info *ei = EXT2_I(inode);
+	unsigned int flags;
+
+	ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+	switch (cmd) {
+	case EXT2_IOC_GETFLAGS:
+		flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
+		return put_user(flags, (int __user *) arg);
+	case EXT2_IOC_SETFLAGS: {
+		unsigned int oldflags;
+
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EACCES;
+
+		if (get_user(flags, (int __user *) arg))
+			return -EFAULT;
+
+		if (!S_ISDIR(inode->i_mode))
+			flags &= ~EXT2_DIRSYNC_FL;
+
+		oldflags = ei->i_flags;
+
+		/*
+		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+		 * the relevant capability.
+		 *
+		 * This test looks nicer. Thanks to Pauline Middelink
+		 */
+		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
+			if (!capable(CAP_LINUX_IMMUTABLE))
+				return -EPERM;
+		}
+
+		flags = flags & EXT2_FL_USER_MODIFIABLE;
+		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
+		ei->i_flags = flags;
+
+		ext2_set_inode_flags(inode);
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		return 0;
+	}
+	case EXT2_IOC_GETVERSION:
+		return put_user(inode->i_generation, (int __user *) arg);
+	case EXT2_IOC_SETVERSION:
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EPERM;
+		if (IS_RDONLY(inode))
+			return -EROFS;
+		if (get_user(inode->i_generation, (int __user *) arg))
+			return -EFAULT;	
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
new file mode 100644
index 0000000..3176b3d
--- /dev/null
+++ b/fs/ext2/namei.c
@@ -0,0 +1,418 @@
+/*
+ * linux/fs/ext2/namei.c
+ *
+ * Rewrite to pagecache. Almost all code had been changed, so blame me
+ * if the things go wrong. Please, send bug reports to
+ * viro@parcelfarce.linux.theplanet.co.uk
+ *
+ * Stuff here is basically a glue between the VFS and generic UNIXish
+ * filesystem that keeps everything in pagecache. All knowledge of the
+ * directory layout is in fs/ext2/dir.c - it turned out to be easily separatable
+ * and it's easier to debug that way. In principle we might want to
+ * generalize that a bit and turn it into a library. Or not.
+ *
+ * The only non-static object here is ext2_dir_inode_operations.
+ *
+ * TODO: get rid of kmap() use, add readahead.
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/pagemap.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Couple of helper functions - make the code slightly cleaner.
+ */
+
+static inline void ext2_inc_count(struct inode *inode)
+{
+	inode->i_nlink++;
+	mark_inode_dirty(inode);
+}
+
+static inline void ext2_dec_count(struct inode *inode)
+{
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+}
+
+static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
+{
+	int err = ext2_add_link(dentry, inode);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	ext2_dec_count(inode);
+	iput(inode);
+	return err;
+}
+
+/*
+ * Methods themselves.
+ */
+
+static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode * inode;
+	ino_t ino;
+	
+	if (dentry->d_name.len > EXT2_NAME_LEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	ino = ext2_inode_by_name(dir, dentry);
+	inode = NULL;
+	if (ino) {
+		inode = iget(dir->i_sb, ino);
+		if (!inode)
+			return ERR_PTR(-EACCES);
+	}
+	if (inode)
+		return d_splice_alias(inode, dentry);
+	d_add(dentry, inode);
+	return NULL;
+}
+
+struct dentry *ext2_get_parent(struct dentry *child)
+{
+	unsigned long ino;
+	struct dentry *parent;
+	struct inode *inode;
+	struct dentry dotdot;
+
+	dotdot.d_name.name = "..";
+	dotdot.d_name.len = 2;
+
+	ino = ext2_inode_by_name(child->d_inode, &dotdot);
+	if (!ino)
+		return ERR_PTR(-ENOENT);
+	inode = iget(child->d_inode->i_sb, ino);
+
+	if (!inode)
+		return ERR_PTR(-EACCES);
+	parent = d_alloc_anon(inode);
+	if (!parent) {
+		iput(inode);
+		parent = ERR_PTR(-ENOMEM);
+	}
+	return parent;
+} 
+
+/*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate(). 
+ */
+static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+{
+	struct inode * inode = ext2_new_inode (dir, mode);
+	int err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		inode->i_op = &ext2_file_inode_operations;
+		inode->i_fop = &ext2_file_operations;
+		if (test_opt(inode->i_sb, NOBH))
+			inode->i_mapping->a_ops = &ext2_nobh_aops;
+		else
+			inode->i_mapping->a_ops = &ext2_aops;
+		mark_inode_dirty(inode);
+		err = ext2_add_nondir(dentry, inode);
+	}
+	return err;
+}
+
+static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	struct inode * inode;
+	int err;
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	inode = ext2_new_inode (dir, mode);
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		init_special_inode(inode, inode->i_mode, rdev);
+#ifdef CONFIG_EXT2_FS_XATTR
+		inode->i_op = &ext2_special_inode_operations;
+#endif
+		mark_inode_dirty(inode);
+		err = ext2_add_nondir(dentry, inode);
+	}
+	return err;
+}
+
+static int ext2_symlink (struct inode * dir, struct dentry * dentry,
+	const char * symname)
+{
+	struct super_block * sb = dir->i_sb;
+	int err = -ENAMETOOLONG;
+	unsigned l = strlen(symname)+1;
+	struct inode * inode;
+
+	if (l > sb->s_blocksize)
+		goto out;
+
+	inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	if (l > sizeof (EXT2_I(inode)->i_data)) {
+		/* slow symlink */
+		inode->i_op = &ext2_symlink_inode_operations;
+		if (test_opt(inode->i_sb, NOBH))
+			inode->i_mapping->a_ops = &ext2_nobh_aops;
+		else
+			inode->i_mapping->a_ops = &ext2_aops;
+		err = page_symlink(inode, symname, l);
+		if (err)
+			goto out_fail;
+	} else {
+		/* fast symlink */
+		inode->i_op = &ext2_fast_symlink_inode_operations;
+		memcpy((char*)(EXT2_I(inode)->i_data),symname,l);
+		inode->i_size = l-1;
+	}
+	mark_inode_dirty(inode);
+
+	err = ext2_add_nondir(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	ext2_dec_count(inode);
+	iput (inode);
+	goto out;
+}
+
+static int ext2_link (struct dentry * old_dentry, struct inode * dir,
+	struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+
+	if (inode->i_nlink >= EXT2_LINK_MAX)
+		return -EMLINK;
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	ext2_inc_count(inode);
+	atomic_inc(&inode->i_count);
+
+	return ext2_add_nondir(dentry, inode);
+}
+
+static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	struct inode * inode;
+	int err = -EMLINK;
+
+	if (dir->i_nlink >= EXT2_LINK_MAX)
+		goto out;
+
+	ext2_inc_count(dir);
+
+	inode = ext2_new_inode (dir, S_IFDIR | mode);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_dir;
+
+	inode->i_op = &ext2_dir_inode_operations;
+	inode->i_fop = &ext2_dir_operations;
+	if (test_opt(inode->i_sb, NOBH))
+		inode->i_mapping->a_ops = &ext2_nobh_aops;
+	else
+		inode->i_mapping->a_ops = &ext2_aops;
+
+	ext2_inc_count(inode);
+
+	err = ext2_make_empty(inode, dir);
+	if (err)
+		goto out_fail;
+
+	err = ext2_add_link(dentry, inode);
+	if (err)
+		goto out_fail;
+
+	d_instantiate(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	ext2_dec_count(inode);
+	ext2_dec_count(inode);
+	iput(inode);
+out_dir:
+	ext2_dec_count(dir);
+	goto out;
+}
+
+static int ext2_unlink(struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	struct ext2_dir_entry_2 * de;
+	struct page * page;
+	int err = -ENOENT;
+
+	de = ext2_find_entry (dir, dentry, &page);
+	if (!de)
+		goto out;
+
+	err = ext2_delete_entry (de, page);
+	if (err)
+		goto out;
+
+	inode->i_ctime = dir->i_ctime;
+	ext2_dec_count(inode);
+	err = 0;
+out:
+	return err;
+}
+
+static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	int err = -ENOTEMPTY;
+
+	if (ext2_empty_dir(inode)) {
+		err = ext2_unlink(dir, dentry);
+		if (!err) {
+			inode->i_size = 0;
+			ext2_dec_count(inode);
+			ext2_dec_count(dir);
+		}
+	}
+	return err;
+}
+
+static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
+	struct inode * new_dir,	struct dentry * new_dentry )
+{
+	struct inode * old_inode = old_dentry->d_inode;
+	struct inode * new_inode = new_dentry->d_inode;
+	struct page * dir_page = NULL;
+	struct ext2_dir_entry_2 * dir_de = NULL;
+	struct page * old_page;
+	struct ext2_dir_entry_2 * old_de;
+	int err = -ENOENT;
+
+	old_de = ext2_find_entry (old_dir, old_dentry, &old_page);
+	if (!old_de)
+		goto out;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+		err = -EIO;
+		dir_de = ext2_dotdot(old_inode, &dir_page);
+		if (!dir_de)
+			goto out_old;
+	}
+
+	if (new_inode) {
+		struct page *new_page;
+		struct ext2_dir_entry_2 *new_de;
+
+		err = -ENOTEMPTY;
+		if (dir_de && !ext2_empty_dir (new_inode))
+			goto out_dir;
+
+		err = -ENOENT;
+		new_de = ext2_find_entry (new_dir, new_dentry, &new_page);
+		if (!new_de)
+			goto out_dir;
+		ext2_inc_count(old_inode);
+		ext2_set_link(new_dir, new_de, new_page, old_inode);
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		if (dir_de)
+			new_inode->i_nlink--;
+		ext2_dec_count(new_inode);
+	} else {
+		if (dir_de) {
+			err = -EMLINK;
+			if (new_dir->i_nlink >= EXT2_LINK_MAX)
+				goto out_dir;
+		}
+		ext2_inc_count(old_inode);
+		err = ext2_add_link(new_dentry, old_inode);
+		if (err) {
+			ext2_dec_count(old_inode);
+			goto out_dir;
+		}
+		if (dir_de)
+			ext2_inc_count(new_dir);
+	}
+
+	/*
+	 * Like most other Unix systems, set the ctime for inodes on a
+ 	 * rename.
+	 * ext2_dec_count() will mark the inode dirty.
+	 */
+	old_inode->i_ctime = CURRENT_TIME_SEC;
+
+	ext2_delete_entry (old_de, old_page);
+	ext2_dec_count(old_inode);
+
+	if (dir_de) {
+		ext2_set_link(old_inode, dir_de, dir_page, new_dir);
+		ext2_dec_count(old_dir);
+	}
+	return 0;
+
+
+out_dir:
+	if (dir_de) {
+		kunmap(dir_page);
+		page_cache_release(dir_page);
+	}
+out_old:
+	kunmap(old_page);
+	page_cache_release(old_page);
+out:
+	return err;
+}
+
+struct inode_operations ext2_dir_inode_operations = {
+	.create		= ext2_create,
+	.lookup		= ext2_lookup,
+	.link		= ext2_link,
+	.unlink		= ext2_unlink,
+	.symlink	= ext2_symlink,
+	.mkdir		= ext2_mkdir,
+	.rmdir		= ext2_rmdir,
+	.mknod		= ext2_mknod,
+	.rename		= ext2_rename,
+#ifdef CONFIG_EXT2_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext2_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.setattr	= ext2_setattr,
+	.permission	= ext2_permission,
+};
+
+struct inode_operations ext2_special_inode_operations = {
+#ifdef CONFIG_EXT2_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext2_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.setattr	= ext2_setattr,
+	.permission	= ext2_permission,
+};
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
new file mode 100644
index 0000000..37ca77a
--- /dev/null
+++ b/fs/ext2/super.c
@@ -0,0 +1,1161 @@
+/*
+ *  linux/fs/ext2/super.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <linux/vfs.h>
+#include <asm/uaccess.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+static void ext2_sync_super(struct super_block *sb,
+			    struct ext2_super_block *es);
+static int ext2_remount (struct super_block * sb, int * flags, char * data);
+static int ext2_statfs (struct super_block * sb, struct kstatfs * buf);
+
+void ext2_error (struct super_block * sb, const char * function,
+		 const char * fmt, ...)
+{
+	va_list args;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		sbi->s_mount_state |= EXT2_ERROR_FS;
+		es->s_state =
+			cpu_to_le16(le16_to_cpu(es->s_state) | EXT2_ERROR_FS);
+		ext2_sync_super(sb, es);
+	}
+
+	va_start(args, fmt);
+	printk(KERN_CRIT "EXT2-fs error (device %s): %s: ",sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+
+	if (test_opt(sb, ERRORS_PANIC))
+		panic("EXT2-fs panic from previous error\n");
+	if (test_opt(sb, ERRORS_RO)) {
+		printk("Remounting filesystem read-only\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+}
+
+void ext2_warning (struct super_block * sb, const char * function,
+		   const char * fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	printk(KERN_WARNING "EXT2-fs warning (device %s): %s: ",
+	       sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+}
+
+void ext2_update_dynamic_rev(struct super_block *sb)
+{
+	struct ext2_super_block *es = EXT2_SB(sb)->s_es;
+
+	if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
+		return;
+
+	ext2_warning(sb, __FUNCTION__,
+		     "updating to rev %d because of new feature flag, "
+		     "running e2fsck is recommended",
+		     EXT2_DYNAMIC_REV);
+
+	es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
+	es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
+	es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
+	/* leave es->s_feature_*compat flags alone */
+	/* es->s_uuid will be set by e2fsck if empty */
+
+	/*
+	 * The rest of the superblock fields should be zero, and if not it
+	 * means they are likely already in use, so leave them alone.  We
+	 * can leave it up to e2fsck to clean up any inconsistencies there.
+	 */
+}
+
+static void ext2_put_super (struct super_block * sb)
+{
+	int db_count;
+	int i;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+	ext2_xattr_put_super(sb);
+	if (!(sb->s_flags & MS_RDONLY)) {
+		struct ext2_super_block *es = sbi->s_es;
+
+		es->s_state = cpu_to_le16(sbi->s_mount_state);
+		ext2_sync_super(sb, es);
+	}
+	db_count = sbi->s_gdb_count;
+	for (i = 0; i < db_count; i++)
+		if (sbi->s_group_desc[i])
+			brelse (sbi->s_group_desc[i]);
+	kfree(sbi->s_group_desc);
+	kfree(sbi->s_debts);
+	percpu_counter_destroy(&sbi->s_freeblocks_counter);
+	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+	percpu_counter_destroy(&sbi->s_dirs_counter);
+	brelse (sbi->s_sbh);
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+
+	return;
+}
+
+static kmem_cache_t * ext2_inode_cachep;
+
+static struct inode *ext2_alloc_inode(struct super_block *sb)
+{
+	struct ext2_inode_info *ei;
+	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	ei->i_acl = EXT2_ACL_NOT_CACHED;
+	ei->i_default_acl = EXT2_ACL_NOT_CACHED;
+#endif
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void ext2_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		rwlock_init(&ei->i_meta_lock);
+#ifdef CONFIG_EXT2_FS_XATTR
+		init_rwsem(&ei->xattr_sem);
+#endif
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+ 
+static int init_inodecache(void)
+{
+	ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
+					     sizeof(struct ext2_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (ext2_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(ext2_inode_cachep))
+		printk(KERN_INFO "ext2_inode_cache: not all structures were freed\n");
+}
+
+static void ext2_clear_inode(struct inode *inode)
+{
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	struct ext2_inode_info *ei = EXT2_I(inode);
+
+	if (ei->i_acl && ei->i_acl != EXT2_ACL_NOT_CACHED) {
+		posix_acl_release(ei->i_acl);
+		ei->i_acl = EXT2_ACL_NOT_CACHED;
+	}
+	if (ei->i_default_acl && ei->i_default_acl != EXT2_ACL_NOT_CACHED) {
+		posix_acl_release(ei->i_default_acl);
+		ei->i_default_acl = EXT2_ACL_NOT_CACHED;
+	}
+#endif
+	if (!is_bad_inode(inode))
+		ext2_discard_prealloc(inode);
+}
+
+
+#ifdef CONFIG_QUOTA
+static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
+static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
+#endif
+
+static struct super_operations ext2_sops = {
+	.alloc_inode	= ext2_alloc_inode,
+	.destroy_inode	= ext2_destroy_inode,
+	.read_inode	= ext2_read_inode,
+	.write_inode	= ext2_write_inode,
+	.delete_inode	= ext2_delete_inode,
+	.put_super	= ext2_put_super,
+	.write_super	= ext2_write_super,
+	.statfs		= ext2_statfs,
+	.remount_fs	= ext2_remount,
+	.clear_inode	= ext2_clear_inode,
+#ifdef CONFIG_QUOTA
+	.quota_read	= ext2_quota_read,
+	.quota_write	= ext2_quota_write,
+#endif
+};
+
+/* Yes, most of these are left as NULL!!
+ * A NULL value implies the default, which works with ext2-like file
+ * systems, but can be improved upon.
+ * Currently only get_parent is required.
+ */
+struct dentry *ext2_get_parent(struct dentry *child);
+static struct export_operations ext2_export_ops = {
+	.get_parent = ext2_get_parent,
+};
+
+static unsigned long get_sb_block(void **data)
+{
+	unsigned long 	sb_block;
+	char 		*options = (char *) *data;
+
+	if (!options || strncmp(options, "sb=", 3) != 0)
+		return 1;	/* Default location */
+	options += 3;
+	sb_block = simple_strtoul(options, &options, 0);
+	if (*options && *options != ',') {
+		printk("EXT2-fs: Invalid sb specification: %s\n",
+		       (char *) *data);
+		return 1;
+	}
+	if (*options == ',')
+		options++;
+	*data = (void *) options;
+	return sb_block;
+}
+
+enum {
+	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
+	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
+	Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh,
+	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
+	Opt_ignore, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_bsd_df, "bsddf"},
+	{Opt_minix_df, "minixdf"},
+	{Opt_grpid, "grpid"},
+	{Opt_grpid, "bsdgroups"},
+	{Opt_nogrpid, "nogrpid"},
+	{Opt_nogrpid, "sysvgroups"},
+	{Opt_resgid, "resgid=%u"},
+	{Opt_resuid, "resuid=%u"},
+	{Opt_sb, "sb=%u"},
+	{Opt_err_cont, "errors=continue"},
+	{Opt_err_panic, "errors=panic"},
+	{Opt_err_ro, "errors=remount-ro"},
+	{Opt_nouid32, "nouid32"},
+	{Opt_nocheck, "check=none"},
+	{Opt_nocheck, "nocheck"},
+	{Opt_check, "check"},
+	{Opt_debug, "debug"},
+	{Opt_oldalloc, "oldalloc"},
+	{Opt_orlov, "orlov"},
+	{Opt_nobh, "nobh"},
+	{Opt_user_xattr, "user_xattr"},
+	{Opt_nouser_xattr, "nouser_xattr"},
+	{Opt_acl, "acl"},
+	{Opt_noacl, "noacl"},
+	{Opt_ignore, "grpquota"},
+	{Opt_ignore, "noquota"},
+	{Opt_ignore, "quota"},
+	{Opt_ignore, "usrquota"},
+	{Opt_err, NULL}
+};
+
+static int parse_options (char * options,
+			  struct ext2_sb_info *sbi)
+{
+	char * p;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long kind = EXT2_MOUNT_ERRORS_CONT;
+	int option;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep (&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_bsd_df:
+			clear_opt (sbi->s_mount_opt, MINIX_DF);
+			break;
+		case Opt_minix_df:
+			set_opt (sbi->s_mount_opt, MINIX_DF);
+			break;
+		case Opt_grpid:
+			set_opt (sbi->s_mount_opt, GRPID);
+			break;
+		case Opt_nogrpid:
+			clear_opt (sbi->s_mount_opt, GRPID);
+			break;
+		case Opt_resuid:
+			if (match_int(&args[0], &option))
+				return 0;
+			sbi->s_resuid = option;
+			break;
+		case Opt_resgid:
+			if (match_int(&args[0], &option))
+				return 0;
+			sbi->s_resgid = option;
+			break;
+		case Opt_sb:
+			/* handled by get_sb_block() instead of here */
+			/* *sb_block = match_int(&args[0]); */
+			break;
+		case Opt_err_panic:
+			kind = EXT2_MOUNT_ERRORS_PANIC;
+			break;
+		case Opt_err_ro:
+			kind = EXT2_MOUNT_ERRORS_RO;
+			break;
+		case Opt_err_cont:
+			kind = EXT2_MOUNT_ERRORS_CONT;
+			break;
+		case Opt_nouid32:
+			set_opt (sbi->s_mount_opt, NO_UID32);
+			break;
+		case Opt_check:
+#ifdef CONFIG_EXT2_CHECK
+			set_opt (sbi->s_mount_opt, CHECK);
+#else
+			printk("EXT2 Check option not supported\n");
+#endif
+			break;
+		case Opt_nocheck:
+			clear_opt (sbi->s_mount_opt, CHECK);
+			break;
+		case Opt_debug:
+			set_opt (sbi->s_mount_opt, DEBUG);
+			break;
+		case Opt_oldalloc:
+			set_opt (sbi->s_mount_opt, OLDALLOC);
+			break;
+		case Opt_orlov:
+			clear_opt (sbi->s_mount_opt, OLDALLOC);
+			break;
+		case Opt_nobh:
+			set_opt (sbi->s_mount_opt, NOBH);
+			break;
+#ifdef CONFIG_EXT2_FS_XATTR
+		case Opt_user_xattr:
+			set_opt (sbi->s_mount_opt, XATTR_USER);
+			break;
+		case Opt_nouser_xattr:
+			clear_opt (sbi->s_mount_opt, XATTR_USER);
+			break;
+#else
+		case Opt_user_xattr:
+		case Opt_nouser_xattr:
+			printk("EXT2 (no)user_xattr options not supported\n");
+			break;
+#endif
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+		case Opt_acl:
+			set_opt(sbi->s_mount_opt, POSIX_ACL);
+			break;
+		case Opt_noacl:
+			clear_opt(sbi->s_mount_opt, POSIX_ACL);
+			break;
+#else
+		case Opt_acl:
+		case Opt_noacl:
+			printk("EXT2 (no)acl options not supported\n");
+			break;
+#endif
+		case Opt_ignore:
+			break;
+		default:
+			return 0;
+		}
+	}
+	sbi->s_mount_opt |= kind;
+	return 1;
+}
+
+static int ext2_setup_super (struct super_block * sb,
+			      struct ext2_super_block * es,
+			      int read_only)
+{
+	int res = 0;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+	if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
+		printk ("EXT2-fs warning: revision level too high, "
+			"forcing read-only mode\n");
+		res = MS_RDONLY;
+	}
+	if (read_only)
+		return res;
+	if (!(sbi->s_mount_state & EXT2_VALID_FS))
+		printk ("EXT2-fs warning: mounting unchecked fs, "
+			"running e2fsck is recommended\n");
+	else if ((sbi->s_mount_state & EXT2_ERROR_FS))
+		printk ("EXT2-fs warning: mounting fs with errors, "
+			"running e2fsck is recommended\n");
+	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+		 le16_to_cpu(es->s_mnt_count) >=
+		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
+		printk ("EXT2-fs warning: maximal mount count reached, "
+			"running e2fsck is recommended\n");
+	else if (le32_to_cpu(es->s_checkinterval) &&
+		(le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+		printk ("EXT2-fs warning: checktime reached, "
+			"running e2fsck is recommended\n");
+	if (!le16_to_cpu(es->s_max_mnt_count))
+		es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
+	es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+	ext2_write_super(sb);
+	if (test_opt (sb, DEBUG))
+		printk ("[EXT II FS %s, %s, bs=%lu, fs=%lu, gc=%lu, "
+			"bpg=%lu, ipg=%lu, mo=%04lx]\n",
+			EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
+			sbi->s_frag_size,
+			sbi->s_groups_count,
+			EXT2_BLOCKS_PER_GROUP(sb),
+			EXT2_INODES_PER_GROUP(sb),
+			sbi->s_mount_opt);
+#ifdef CONFIG_EXT2_CHECK
+	if (test_opt (sb, CHECK)) {
+		ext2_check_blocks_bitmap (sb);
+		ext2_check_inodes_bitmap (sb);
+	}
+#endif
+	return res;
+}
+
+static int ext2_check_descriptors (struct super_block * sb)
+{
+	int i;
+	int desc_block = 0;
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	unsigned long block = le32_to_cpu(sbi->s_es->s_first_data_block);
+	struct ext2_group_desc * gdp = NULL;
+
+	ext2_debug ("Checking group descriptors");
+
+	for (i = 0; i < sbi->s_groups_count; i++)
+	{
+		if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
+			gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
+		if (le32_to_cpu(gdp->bg_block_bitmap) < block ||
+		    le32_to_cpu(gdp->bg_block_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
+		{
+			ext2_error (sb, "ext2_check_descriptors",
+				    "Block bitmap for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
+			return 0;
+		}
+		if (le32_to_cpu(gdp->bg_inode_bitmap) < block ||
+		    le32_to_cpu(gdp->bg_inode_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
+		{
+			ext2_error (sb, "ext2_check_descriptors",
+				    "Inode bitmap for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
+			return 0;
+		}
+		if (le32_to_cpu(gdp->bg_inode_table) < block ||
+		    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >=
+		    block + EXT2_BLOCKS_PER_GROUP(sb))
+		{
+			ext2_error (sb, "ext2_check_descriptors",
+				    "Inode table for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
+			return 0;
+		}
+		block += EXT2_BLOCKS_PER_GROUP(sb);
+		gdp++;
+	}
+	return 1;
+}
+
+#define log2(n) ffz(~(n))
+ 
+/*
+ * Maximal file size.  There is a direct, and {,double-,triple-}indirect
+ * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
+ * We need to be 1 filesystem block less than the 2^32 sector limit.
+ */
+static loff_t ext2_max_size(int bits)
+{
+	loff_t res = EXT2_NDIR_BLOCKS;
+	/* This constant is calculated to be the largest file size for a
+	 * dense, 4k-blocksize file such that the total number of
+	 * sectors in the file, including data and all indirect blocks,
+	 * does not exceed 2^32. */
+	const loff_t upper_limit = 0x1ff7fffd000LL;
+
+	res += 1LL << (bits-2);
+	res += 1LL << (2*(bits-2));
+	res += 1LL << (3*(bits-2));
+	res <<= bits;
+	if (res > upper_limit)
+		res = upper_limit;
+	return res;
+}
+
+static unsigned long descriptor_loc(struct super_block *sb,
+				    unsigned long logic_sb_block,
+				    int nr)
+{
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	unsigned long bg, first_data_block, first_meta_bg;
+	int has_super = 0;
+	
+	first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
+
+	if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
+	    nr < first_meta_bg)
+		return (logic_sb_block + nr + 1);
+	bg = sbi->s_desc_per_block * nr;
+	if (ext2_bg_has_super(sb, bg))
+		has_super = 1;
+	return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
+}
+
+static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct buffer_head * bh;
+	struct ext2_sb_info * sbi;
+	struct ext2_super_block * es;
+	struct inode *root;
+	unsigned long block;
+	unsigned long sb_block = get_sb_block(&data);
+	unsigned long logic_sb_block;
+	unsigned long offset = 0;
+	unsigned long def_mount_opts;
+	int blocksize = BLOCK_SIZE;
+	int db_count;
+	int i, j;
+	__le32 features;
+
+	sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(*sbi));
+
+	/*
+	 * See what the current blocksize for the device is, and
+	 * use that as the blocksize.  Otherwise (or if the blocksize
+	 * is smaller than the default) use the default.
+	 * This is important for devices that have a hardware
+	 * sectorsize that is larger than the default.
+	 */
+	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
+	if (!blocksize) {
+		printk ("EXT2-fs: unable to set blocksize\n");
+		goto failed_sbi;
+	}
+
+	/*
+	 * If the superblock doesn't start on a hardware sector boundary,
+	 * calculate the offset.  
+	 */
+	if (blocksize != BLOCK_SIZE) {
+		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
+		offset = (sb_block*BLOCK_SIZE) % blocksize;
+	} else {
+		logic_sb_block = sb_block;
+	}
+
+	if (!(bh = sb_bread(sb, logic_sb_block))) {
+		printk ("EXT2-fs: unable to read superblock\n");
+		goto failed_sbi;
+	}
+	/*
+	 * Note: s_es must be initialized as soon as possible because
+	 *       some ext2 macro-instructions depend on its value
+	 */
+	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
+	sbi->s_es = es;
+	sb->s_magic = le16_to_cpu(es->s_magic);
+
+	if (sb->s_magic != EXT2_SUPER_MAGIC)
+		goto cantfind_ext2;
+
+	/* Set defaults before we parse the mount options */
+	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+	if (def_mount_opts & EXT2_DEFM_DEBUG)
+		set_opt(sbi->s_mount_opt, DEBUG);
+	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
+		set_opt(sbi->s_mount_opt, GRPID);
+	if (def_mount_opts & EXT2_DEFM_UID16)
+		set_opt(sbi->s_mount_opt, NO_UID32);
+	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+		set_opt(sbi->s_mount_opt, XATTR_USER);
+	if (def_mount_opts & EXT2_DEFM_ACL)
+		set_opt(sbi->s_mount_opt, POSIX_ACL);
+	
+	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
+		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
+		set_opt(sbi->s_mount_opt, ERRORS_RO);
+
+	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
+	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
+	
+	if (!parse_options ((char *) data, sbi))
+		goto failed_mount;
+
+	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
+		 MS_POSIXACL : 0);
+
+	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
+	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
+	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
+	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
+		printk("EXT2-fs warning: feature flags set on rev 0 fs, "
+		       "running e2fsck is recommended\n");
+	/*
+	 * Check feature flags regardless of the revision level, since we
+	 * previously didn't change the revision level when setting the flags,
+	 * so there is a chance incompat flags are set on a rev 0 filesystem.
+	 */
+	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
+	if (features) {
+		printk("EXT2-fs: %s: couldn't mount because of "
+		       "unsupported optional features (%x).\n",
+		       sb->s_id, le32_to_cpu(features));
+		goto failed_mount;
+	}
+	if (!(sb->s_flags & MS_RDONLY) &&
+	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
+		printk("EXT2-fs: %s: couldn't mount RDWR because of "
+		       "unsupported optional features (%x).\n",
+		       sb->s_id, le32_to_cpu(features));
+		goto failed_mount;
+	}
+
+	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+
+	/* If the blocksize doesn't match, re-read the thing.. */
+	if (sb->s_blocksize != blocksize) {
+		brelse(bh);
+
+		if (!sb_set_blocksize(sb, blocksize)) {
+			printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
+			goto failed_sbi;
+		}
+
+		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
+		offset = (sb_block*BLOCK_SIZE) % blocksize;
+		bh = sb_bread(sb, logic_sb_block);
+		if(!bh) {
+			printk("EXT2-fs: Couldn't read superblock on "
+			       "2nd try.\n");
+			goto failed_sbi;
+		}
+		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
+		sbi->s_es = es;
+		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
+			printk ("EXT2-fs: Magic mismatch, very weird !\n");
+			goto failed_mount;
+		}
+	}
+
+	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
+
+	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
+		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
+		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
+	} else {
+		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
+		    (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+		    (sbi->s_inode_size > blocksize)) {
+			printk ("EXT2-fs: unsupported inode size: %d\n",
+				sbi->s_inode_size);
+			goto failed_mount;
+		}
+	}
+
+	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
+				   le32_to_cpu(es->s_log_frag_size);
+	if (sbi->s_frag_size == 0)
+		goto cantfind_ext2;
+	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
+
+	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
+	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+
+	if (EXT2_INODE_SIZE(sb) == 0)
+		goto cantfind_ext2;
+	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
+	if (sbi->s_inodes_per_block == 0)
+		goto cantfind_ext2;
+	sbi->s_itb_per_group = sbi->s_inodes_per_group /
+					sbi->s_inodes_per_block;
+	sbi->s_desc_per_block = sb->s_blocksize /
+					sizeof (struct ext2_group_desc);
+	sbi->s_sbh = bh;
+	sbi->s_mount_state = le16_to_cpu(es->s_state);
+	sbi->s_addr_per_block_bits =
+		log2 (EXT2_ADDR_PER_BLOCK(sb));
+	sbi->s_desc_per_block_bits =
+		log2 (EXT2_DESC_PER_BLOCK(sb));
+
+	if (sb->s_magic != EXT2_SUPER_MAGIC)
+		goto cantfind_ext2;
+
+	if (sb->s_blocksize != bh->b_size) {
+		if (!silent)
+			printk ("VFS: Unsupported blocksize on dev "
+				"%s.\n", sb->s_id);
+		goto failed_mount;
+	}
+
+	if (sb->s_blocksize != sbi->s_frag_size) {
+		printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
+			sbi->s_frag_size, sb->s_blocksize);
+		goto failed_mount;
+	}
+
+	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
+		printk ("EXT2-fs: #blocks per group too big: %lu\n",
+			sbi->s_blocks_per_group);
+		goto failed_mount;
+	}
+	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
+		printk ("EXT2-fs: #fragments per group too big: %lu\n",
+			sbi->s_frags_per_group);
+		goto failed_mount;
+	}
+	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+		printk ("EXT2-fs: #inodes per group too big: %lu\n",
+			sbi->s_inodes_per_group);
+		goto failed_mount;
+	}
+
+	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
+		goto cantfind_ext2;
+	sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
+				        le32_to_cpu(es->s_first_data_block) +
+				       EXT2_BLOCKS_PER_GROUP(sb) - 1) /
+				       EXT2_BLOCKS_PER_GROUP(sb);
+	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
+		   EXT2_DESC_PER_BLOCK(sb);
+	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
+	if (sbi->s_group_desc == NULL) {
+		printk ("EXT2-fs: not enough memory\n");
+		goto failed_mount;
+	}
+	percpu_counter_init(&sbi->s_freeblocks_counter);
+	percpu_counter_init(&sbi->s_freeinodes_counter);
+	percpu_counter_init(&sbi->s_dirs_counter);
+	bgl_lock_init(&sbi->s_blockgroup_lock);
+	sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
+			       GFP_KERNEL);
+	if (!sbi->s_debts) {
+		printk ("EXT2-fs: not enough memory\n");
+		goto failed_mount_group_desc;
+	}
+	memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
+	for (i = 0; i < db_count; i++) {
+		block = descriptor_loc(sb, logic_sb_block, i);
+		sbi->s_group_desc[i] = sb_bread(sb, block);
+		if (!sbi->s_group_desc[i]) {
+			for (j = 0; j < i; j++)
+				brelse (sbi->s_group_desc[j]);
+			printk ("EXT2-fs: unable to read group descriptors\n");
+			goto failed_mount_group_desc;
+		}
+	}
+	if (!ext2_check_descriptors (sb)) {
+		printk ("EXT2-fs: group descriptors corrupted!\n");
+		db_count = i;
+		goto failed_mount2;
+	}
+	sbi->s_gdb_count = db_count;
+	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+	spin_lock_init(&sbi->s_next_gen_lock);
+	/*
+	 * set up enough so that it can read an inode
+	 */
+	sb->s_op = &ext2_sops;
+	sb->s_export_op = &ext2_export_ops;
+	sb->s_xattr = ext2_xattr_handlers;
+	root = iget(sb, EXT2_ROOT_INO);
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		iput(root);
+		printk(KERN_ERR "EXT2-fs: get root inode failed\n");
+		goto failed_mount2;
+	}
+	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+		dput(sb->s_root);
+		sb->s_root = NULL;
+		printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
+		goto failed_mount2;
+	}
+	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
+		ext2_warning(sb, __FUNCTION__,
+			"mounting ext3 filesystem as ext2\n");
+	ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+	percpu_counter_mod(&sbi->s_freeblocks_counter,
+				ext2_count_free_blocks(sb));
+	percpu_counter_mod(&sbi->s_freeinodes_counter,
+				ext2_count_free_inodes(sb));
+	percpu_counter_mod(&sbi->s_dirs_counter,
+				ext2_count_dirs(sb));
+	return 0;
+
+cantfind_ext2:
+	if (!silent)
+		printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
+		       sb->s_id);
+	goto failed_mount;
+
+failed_mount2:
+	for (i = 0; i < db_count; i++)
+		brelse(sbi->s_group_desc[i]);
+failed_mount_group_desc:
+	kfree(sbi->s_group_desc);
+	kfree(sbi->s_debts);
+failed_mount:
+	brelse(bh);
+failed_sbi:
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+	return -EINVAL;
+}
+
+static void ext2_commit_super (struct super_block * sb,
+			       struct ext2_super_block * es)
+{
+	es->s_wtime = cpu_to_le32(get_seconds());
+	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
+	sb->s_dirt = 0;
+}
+
+static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
+{
+	es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
+	es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
+	es->s_wtime = cpu_to_le32(get_seconds());
+	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
+	sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
+	sb->s_dirt = 0;
+}
+
+/*
+ * In the second extended file system, it is not necessary to
+ * write the super block since we use a mapping of the
+ * disk super block in a buffer.
+ *
+ * However, this function is still used to set the fs valid
+ * flags to 0.  We need to set this flag to 0 since the fs
+ * may have been checked while mounted and e2fsck may have
+ * set s_state to EXT2_VALID_FS after some corrections.
+ */
+
+void ext2_write_super (struct super_block * sb)
+{
+	struct ext2_super_block * es;
+	lock_kernel();
+	if (!(sb->s_flags & MS_RDONLY)) {
+		es = EXT2_SB(sb)->s_es;
+
+		if (le16_to_cpu(es->s_state) & EXT2_VALID_FS) {
+			ext2_debug ("setting valid to 0\n");
+			es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) &
+						  ~EXT2_VALID_FS);
+			es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
+			es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
+			es->s_mtime = cpu_to_le32(get_seconds());
+			ext2_sync_super(sb, es);
+		} else
+			ext2_commit_super (sb, es);
+	}
+	sb->s_dirt = 0;
+	unlock_kernel();
+}
+
+static int ext2_remount (struct super_block * sb, int * flags, char * data)
+{
+	struct ext2_sb_info * sbi = EXT2_SB(sb);
+	struct ext2_super_block * es;
+
+	/*
+	 * Allow the "check" option to be passed as a remount option.
+	 */
+	if (!parse_options (data, sbi))
+		return -EINVAL;
+
+	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+		((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+
+	es = sbi->s_es;
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (*flags & MS_RDONLY) {
+		if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
+		    !(sbi->s_mount_state & EXT2_VALID_FS))
+			return 0;
+		/*
+		 * OK, we are remounting a valid rw partition rdonly, so set
+		 * the rdonly flag and then mark the partition as valid again.
+		 */
+		es->s_state = cpu_to_le16(sbi->s_mount_state);
+		es->s_mtime = cpu_to_le32(get_seconds());
+	} else {
+		__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
+					       ~EXT2_FEATURE_RO_COMPAT_SUPP);
+		if (ret) {
+			printk("EXT2-fs: %s: couldn't remount RDWR because of "
+			       "unsupported optional features (%x).\n",
+			       sb->s_id, le32_to_cpu(ret));
+			return -EROFS;
+		}
+		/*
+		 * Mounting a RDONLY partition read-write, so reread and
+		 * store the current valid flag.  (It may have been changed
+		 * by e2fsck since we originally mounted the partition.)
+		 */
+		sbi->s_mount_state = le16_to_cpu(es->s_state);
+		if (!ext2_setup_super (sb, es, 0))
+			sb->s_flags &= ~MS_RDONLY;
+	}
+	ext2_sync_super(sb, es);
+	return 0;
+}
+
+static int ext2_statfs (struct super_block * sb, struct kstatfs * buf)
+{
+	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	unsigned long overhead;
+	int i;
+
+	if (test_opt (sb, MINIX_DF))
+		overhead = 0;
+	else {
+		/*
+		 * Compute the overhead (FS structures)
+		 */
+
+		/*
+		 * All of the blocks before first_data_block are
+		 * overhead
+		 */
+		overhead = le32_to_cpu(sbi->s_es->s_first_data_block);
+
+		/*
+		 * Add the overhead attributed to the superblock and
+		 * block group descriptors.  If the sparse superblocks
+		 * feature is turned on, then not all groups have this.
+		 */
+		for (i = 0; i < sbi->s_groups_count; i++)
+			overhead += ext2_bg_has_super(sb, i) +
+				ext2_bg_num_gdb(sb, i);
+
+		/*
+		 * Every block group has an inode bitmap, a block
+		 * bitmap, and an inode table.
+		 */
+		overhead += (sbi->s_groups_count *
+			     (2 + sbi->s_itb_per_group));
+	}
+
+	buf->f_type = EXT2_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead;
+	buf->f_bfree = ext2_count_free_blocks(sb);
+	buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count);
+	if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count))
+		buf->f_bavail = 0;
+	buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
+	buf->f_ffree = ext2_count_free_inodes (sb);
+	buf->f_namelen = EXT2_NAME_LEN;
+	return 0;
+}
+
+static struct super_block *ext2_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+}
+
+#ifdef CONFIG_QUOTA
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
+			       size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t toread;
+	struct buffer_head tmp_bh;
+	struct buffer_head *bh;
+	loff_t i_size = i_size_read(inode);
+
+	if (off > i_size)
+		return 0;
+	if (off+len > i_size)
+		len = i_size-off;
+	toread = len;
+	while (toread > 0) {
+		tocopy = sb->s_blocksize - offset < toread ?
+				sb->s_blocksize - offset : toread;
+
+		tmp_bh.b_state = 0;
+		err = ext2_get_block(inode, blk, &tmp_bh, 0);
+		if (err)
+			return err;
+		if (!buffer_mapped(&tmp_bh))	/* A hole? */
+			memset(data, 0, tocopy);
+		else {
+			bh = sb_bread(sb, tmp_bh.b_blocknr);
+			if (!bh)
+				return -EIO;
+			memcpy(data, bh->b_data+offset, tocopy);
+			brelse(bh);
+		}
+		offset = 0;
+		toread -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+	return len;
+}
+
+/* Write to quotafile */
+static ssize_t ext2_quota_write(struct super_block *sb, int type,
+				const char *data, size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t towrite = len;
+	struct buffer_head tmp_bh;
+	struct buffer_head *bh;
+
+	down(&inode->i_sem);
+	while (towrite > 0) {
+		tocopy = sb->s_blocksize - offset < towrite ?
+				sb->s_blocksize - offset : towrite;
+
+		tmp_bh.b_state = 0;
+		err = ext2_get_block(inode, blk, &tmp_bh, 1);
+		if (err)
+			goto out;
+		if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
+			bh = sb_bread(sb, tmp_bh.b_blocknr);
+		else
+			bh = sb_getblk(sb, tmp_bh.b_blocknr);
+		if (!bh) {
+			err = -EIO;
+			goto out;
+		}
+		lock_buffer(bh);
+		memcpy(bh->b_data+offset, data, tocopy);
+		flush_dcache_page(bh->b_page);
+		set_buffer_uptodate(bh);
+		mark_buffer_dirty(bh);
+		unlock_buffer(bh);
+		brelse(bh);
+		offset = 0;
+		towrite -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+out:
+	if (len == towrite)
+		return err;
+	if (inode->i_size < off+len-towrite)
+		i_size_write(inode, off+len-towrite);
+	inode->i_version++;
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(inode);
+	up(&inode->i_sem);
+	return len - towrite;
+}
+
+#endif
+
+static struct file_system_type ext2_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ext2",
+	.get_sb		= ext2_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_ext2_fs(void)
+{
+	int err = init_ext2_xattr();
+	if (err)
+		return err;
+	err = init_inodecache();
+	if (err)
+		goto out1;
+        err = register_filesystem(&ext2_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	exit_ext2_xattr();
+	return err;
+}
+
+static void __exit exit_ext2_fs(void)
+{
+	unregister_filesystem(&ext2_fs_type);
+	destroy_inodecache();
+	exit_ext2_xattr();
+}
+
+module_init(init_ext2_fs)
+module_exit(exit_ext2_fs)
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
new file mode 100644
index 0000000..9f7bac0
--- /dev/null
+++ b/fs/ext2/symlink.c
@@ -0,0 +1,52 @@
+/*
+ *  linux/fs/ext2/symlink.c
+ *
+ * Only fast symlinks left here - the rest is done by generic code. AV, 1999
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/symlink.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 symlink handling code
+ */
+
+#include "ext2.h"
+#include "xattr.h"
+#include <linux/namei.h>
+
+static int ext2_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct ext2_inode_info *ei = EXT2_I(dentry->d_inode);
+	nd_set_link(nd, (char *)ei->i_data);
+	return 0;
+}
+
+struct inode_operations ext2_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+#ifdef CONFIG_EXT2_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext2_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+};
+ 
+struct inode_operations ext2_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= ext2_follow_link,
+#ifdef CONFIG_EXT2_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext2_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+};
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
new file mode 100644
index 0000000..27982b5
--- /dev/null
+++ b/fs/ext2/xattr.c
@@ -0,0 +1,1043 @@
+/*
+ * linux/fs/ext2/xattr.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
+ *
+ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
+ * Extended attributes for symlinks and special files added per
+ *  suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ *  Red Hat Inc.
+ *
+ */
+
+/*
+ * Extended attributes are stored on disk blocks allocated outside of
+ * any inode. The i_file_acl field is then made to point to this allocated
+ * block. If all extended attributes of an inode are identical, these
+ * inodes may share the same extended attribute block. Such situations
+ * are automatically detected by keeping a cache of recent attribute block
+ * numbers and hashes over the block's contents in memory.
+ *
+ *
+ * Extended attribute block layout:
+ *
+ *   +------------------+
+ *   | header           |
+ *   | entry 1          | |
+ *   | entry 2          | | growing downwards
+ *   | entry 3          | v
+ *   | four null bytes  |
+ *   | . . .            |
+ *   | value 1          | ^
+ *   | value 3          | | growing upwards
+ *   | value 2          | |
+ *   +------------------+
+ *
+ * The block header is followed by multiple entry descriptors. These entry
+ * descriptors are variable in size, and alligned to EXT2_XATTR_PAD
+ * byte boundaries. The entry descriptors are sorted by attribute name,
+ * so that two extended attribute blocks can be compared efficiently.
+ *
+ * Attribute values are aligned to the end of the block, stored in
+ * no specific order. They are also padded to EXT2_XATTR_PAD byte
+ * boundaries. No additional gaps are left between them.
+ *
+ * Locking strategy
+ * ----------------
+ * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
+ * EA blocks are only changed if they are exclusive to an inode, so
+ * holding xattr_sem also means that nothing but the EA block's reference
+ * count will change. Multiple writers to an EA block are synchronized
+ * by the bh lock. No more than a single bh lock is held at any time
+ * to avoid deadlocks.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mbcache.h>
+#include <linux/quotaops.h>
+#include <linux/rwsem.h>
+#include "ext2.h"
+#include "xattr.h"
+#include "acl.h"
+
+#define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
+#define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
+#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
+#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#ifdef EXT2_XATTR_DEBUG
+# define ea_idebug(inode, f...) do { \
+		printk(KERN_DEBUG "inode %s:%ld: ", \
+			inode->i_sb->s_id, inode->i_ino); \
+		printk(f); \
+		printk("\n"); \
+	} while (0)
+# define ea_bdebug(bh, f...) do { \
+		char b[BDEVNAME_SIZE]; \
+		printk(KERN_DEBUG "block %s:%lu: ", \
+			bdevname(bh->b_bdev, b), \
+			(unsigned long) bh->b_blocknr); \
+		printk(f); \
+		printk("\n"); \
+	} while (0)
+#else
+# define ea_idebug(f...)
+# define ea_bdebug(f...)
+#endif
+
+static int ext2_xattr_set2(struct inode *, struct buffer_head *,
+			   struct ext2_xattr_header *);
+
+static int ext2_xattr_cache_insert(struct buffer_head *);
+static struct buffer_head *ext2_xattr_cache_find(struct inode *,
+						 struct ext2_xattr_header *);
+static void ext2_xattr_rehash(struct ext2_xattr_header *,
+			      struct ext2_xattr_entry *);
+
+static struct mb_cache *ext2_xattr_cache;
+
+static struct xattr_handler *ext2_xattr_handler_map[] = {
+	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext2_xattr_acl_access_handler,
+	[EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler,
+#endif
+	[EXT2_XATTR_INDEX_TRUSTED]	     = &ext2_xattr_trusted_handler,
+#ifdef CONFIG_EXT2_FS_SECURITY
+	[EXT2_XATTR_INDEX_SECURITY]	     = &ext2_xattr_security_handler,
+#endif
+};
+
+struct xattr_handler *ext2_xattr_handlers[] = {
+	&ext2_xattr_user_handler,
+	&ext2_xattr_trusted_handler,
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+	&ext2_xattr_acl_access_handler,
+	&ext2_xattr_acl_default_handler,
+#endif
+#ifdef CONFIG_EXT2_FS_SECURITY
+	&ext2_xattr_security_handler,
+#endif
+	NULL
+};
+
+static inline struct xattr_handler *
+ext2_xattr_handler(int name_index)
+{
+	struct xattr_handler *handler = NULL;
+
+	if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
+		handler = ext2_xattr_handler_map[name_index];
+	return handler;
+}
+
+/*
+ * ext2_xattr_get()
+ *
+ * Copy an extended attribute into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+int
+ext2_xattr_get(struct inode *inode, int name_index, const char *name,
+	       void *buffer, size_t buffer_size)
+{
+	struct buffer_head *bh = NULL;
+	struct ext2_xattr_entry *entry;
+	size_t name_len, size;
+	char *end;
+	int error;
+
+	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
+		  name_index, name, buffer, (long)buffer_size);
+
+	if (name == NULL)
+		return -EINVAL;
+	down_read(&EXT2_I(inode)->xattr_sem);
+	error = -ENODATA;
+	if (!EXT2_I(inode)->i_file_acl)
+		goto cleanup;
+	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
+	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
+	error = -EIO;
+	if (!bh)
+		goto cleanup;
+	ea_bdebug(bh, "b_count=%d, refcount=%d",
+		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
+	end = bh->b_data + bh->b_size;
+	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
+	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
+bad_block:	ext2_error(inode->i_sb, "ext2_xattr_get",
+			"inode %ld: bad block %d", inode->i_ino,
+			EXT2_I(inode)->i_file_acl);
+		error = -EIO;
+		goto cleanup;
+	}
+	/* find named attribute */
+	name_len = strlen(name);
+
+	error = -ERANGE;
+	if (name_len > 255)
+		goto cleanup;
+	entry = FIRST_ENTRY(bh);
+	while (!IS_LAST_ENTRY(entry)) {
+		struct ext2_xattr_entry *next =
+			EXT2_XATTR_NEXT(entry);
+		if ((char *)next >= end)
+			goto bad_block;
+		if (name_index == entry->e_name_index &&
+		    name_len == entry->e_name_len &&
+		    memcmp(name, entry->e_name, name_len) == 0)
+			goto found;
+		entry = next;
+	}
+	/* Check the remaining name entries */
+	while (!IS_LAST_ENTRY(entry)) {
+		struct ext2_xattr_entry *next =
+			EXT2_XATTR_NEXT(entry);
+		if ((char *)next >= end)
+			goto bad_block;
+		entry = next;
+	}
+	if (ext2_xattr_cache_insert(bh))
+		ea_idebug(inode, "cache insert failed");
+	error = -ENODATA;
+	goto cleanup;
+found:
+	/* check the buffer size */
+	if (entry->e_value_block != 0)
+		goto bad_block;
+	size = le32_to_cpu(entry->e_value_size);
+	if (size > inode->i_sb->s_blocksize ||
+	    le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
+		goto bad_block;
+
+	if (ext2_xattr_cache_insert(bh))
+		ea_idebug(inode, "cache insert failed");
+	if (buffer) {
+		error = -ERANGE;
+		if (size > buffer_size)
+			goto cleanup;
+		/* return value of attribute */
+		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
+			size);
+	}
+	error = size;
+
+cleanup:
+	brelse(bh);
+	up_read(&EXT2_I(inode)->xattr_sem);
+
+	return error;
+}
+
+/*
+ * ext2_xattr_list()
+ *
+ * Copy a list of attribute names into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+static int
+ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	struct buffer_head *bh = NULL;
+	struct ext2_xattr_entry *entry;
+	char *end;
+	size_t rest = buffer_size;
+	int error;
+
+	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+		  buffer, (long)buffer_size);
+
+	down_read(&EXT2_I(inode)->xattr_sem);
+	error = 0;
+	if (!EXT2_I(inode)->i_file_acl)
+		goto cleanup;
+	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
+	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
+	error = -EIO;
+	if (!bh)
+		goto cleanup;
+	ea_bdebug(bh, "b_count=%d, refcount=%d",
+		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
+	end = bh->b_data + bh->b_size;
+	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
+	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
+bad_block:	ext2_error(inode->i_sb, "ext2_xattr_list",
+			"inode %ld: bad block %d", inode->i_ino,
+			EXT2_I(inode)->i_file_acl);
+		error = -EIO;
+		goto cleanup;
+	}
+
+	/* check the on-disk data structure */
+	entry = FIRST_ENTRY(bh);
+	while (!IS_LAST_ENTRY(entry)) {
+		struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
+
+		if ((char *)next >= end)
+			goto bad_block;
+		entry = next;
+	}
+	if (ext2_xattr_cache_insert(bh))
+		ea_idebug(inode, "cache insert failed");
+
+	/* list the attribute names */
+	for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
+	     entry = EXT2_XATTR_NEXT(entry)) {
+		struct xattr_handler *handler =
+			ext2_xattr_handler(entry->e_name_index);
+
+		if (handler) {
+			size_t size = handler->list(inode, buffer, rest,
+						    entry->e_name,
+						    entry->e_name_len);
+			if (buffer) {
+				if (size > rest) {
+					error = -ERANGE;
+					goto cleanup;
+				}
+				buffer += size;
+			}
+			rest -= size;
+		}
+	}
+	error = buffer_size - rest;  /* total size */
+
+cleanup:
+	brelse(bh);
+	up_read(&EXT2_I(inode)->xattr_sem);
+
+	return error;
+}
+
+/*
+ * Inode operation listxattr()
+ *
+ * dentry->d_inode->i_sem: don't care
+ */
+ssize_t
+ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+	return ext2_xattr_list(dentry->d_inode, buffer, size);
+}
+
+/*
+ * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
+ * not set, set it.
+ */
+static void ext2_xattr_update_super_block(struct super_block *sb)
+{
+	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
+		return;
+
+	lock_super(sb);
+	EXT2_SB(sb)->s_es->s_feature_compat |=
+		cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
+	sb->s_dirt = 1;
+	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
+	unlock_super(sb);
+}
+
+/*
+ * ext2_xattr_set()
+ *
+ * Create, replace or remove an extended attribute for this inode. Buffer
+ * is NULL to remove an existing extended attribute, and non-NULL to
+ * either replace an existing extended attribute, or create a new extended
+ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
+ * specify that an extended attribute must exist and must not exist
+ * previous to the call, respectively.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int
+ext2_xattr_set(struct inode *inode, int name_index, const char *name,
+	       const void *value, size_t value_len, int flags)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *bh = NULL;
+	struct ext2_xattr_header *header = NULL;
+	struct ext2_xattr_entry *here, *last;
+	size_t name_len, free, min_offs = sb->s_blocksize;
+	int not_found = 1, error;
+	char *end;
+	
+	/*
+	 * header -- Points either into bh, or to a temporarily
+	 *           allocated buffer.
+	 * here -- The named entry found, or the place for inserting, within
+	 *         the block pointed to by header.
+	 * last -- Points right after the last named entry within the block
+	 *         pointed to by header.
+	 * min_offs -- The offset of the first value (values are aligned
+	 *             towards the end of the block).
+	 * end -- Points right after the block pointed to by header.
+	 */
+	
+	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
+		  name_index, name, value, (long)value_len);
+
+	if (IS_RDONLY(inode))
+		return -EROFS;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		return -EPERM;
+	if (value == NULL)
+		value_len = 0;
+	if (name == NULL)
+		return -EINVAL;
+	name_len = strlen(name);
+	if (name_len > 255 || value_len > sb->s_blocksize)
+		return -ERANGE;
+	down_write(&EXT2_I(inode)->xattr_sem);
+	if (EXT2_I(inode)->i_file_acl) {
+		/* The inode already has an extended attribute block. */
+		bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
+		error = -EIO;
+		if (!bh)
+			goto cleanup;
+		ea_bdebug(bh, "b_count=%d, refcount=%d",
+			atomic_read(&(bh->b_count)),
+			le32_to_cpu(HDR(bh)->h_refcount));
+		header = HDR(bh);
+		end = bh->b_data + bh->b_size;
+		if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
+		    header->h_blocks != cpu_to_le32(1)) {
+bad_block:		ext2_error(sb, "ext2_xattr_set",
+				"inode %ld: bad block %d", inode->i_ino, 
+				   EXT2_I(inode)->i_file_acl);
+			error = -EIO;
+			goto cleanup;
+		}
+		/* Find the named attribute. */
+		here = FIRST_ENTRY(bh);
+		while (!IS_LAST_ENTRY(here)) {
+			struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
+			if ((char *)next >= end)
+				goto bad_block;
+			if (!here->e_value_block && here->e_value_size) {
+				size_t offs = le16_to_cpu(here->e_value_offs);
+				if (offs < min_offs)
+					min_offs = offs;
+			}
+			not_found = name_index - here->e_name_index;
+			if (!not_found)
+				not_found = name_len - here->e_name_len;
+			if (!not_found)
+				not_found = memcmp(name, here->e_name,name_len);
+			if (not_found <= 0)
+				break;
+			here = next;
+		}
+		last = here;
+		/* We still need to compute min_offs and last. */
+		while (!IS_LAST_ENTRY(last)) {
+			struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
+			if ((char *)next >= end)
+				goto bad_block;
+			if (!last->e_value_block && last->e_value_size) {
+				size_t offs = le16_to_cpu(last->e_value_offs);
+				if (offs < min_offs)
+					min_offs = offs;
+			}
+			last = next;
+		}
+
+		/* Check whether we have enough space left. */
+		free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
+	} else {
+		/* We will use a new extended attribute block. */
+		free = sb->s_blocksize -
+			sizeof(struct ext2_xattr_header) - sizeof(__u32);
+		here = last = NULL;  /* avoid gcc uninitialized warning. */
+	}
+
+	if (not_found) {
+		/* Request to remove a nonexistent attribute? */
+		error = -ENODATA;
+		if (flags & XATTR_REPLACE)
+			goto cleanup;
+		error = 0;
+		if (value == NULL)
+			goto cleanup;
+	} else {
+		/* Request to create an existing attribute? */
+		error = -EEXIST;
+		if (flags & XATTR_CREATE)
+			goto cleanup;
+		if (!here->e_value_block && here->e_value_size) {
+			size_t size = le32_to_cpu(here->e_value_size);
+
+			if (le16_to_cpu(here->e_value_offs) + size > 
+			    sb->s_blocksize || size > sb->s_blocksize)
+				goto bad_block;
+			free += EXT2_XATTR_SIZE(size);
+		}
+		free += EXT2_XATTR_LEN(name_len);
+	}
+	error = -ENOSPC;
+	if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
+		goto cleanup;
+
+	/* Here we know that we can set the new attribute. */
+
+	if (header) {
+		struct mb_cache_entry *ce;
+
+		/* assert(header == HDR(bh)); */
+		ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev,
+					bh->b_blocknr);
+		lock_buffer(bh);
+		if (header->h_refcount == cpu_to_le32(1)) {
+			ea_bdebug(bh, "modifying in-place");
+			if (ce)
+				mb_cache_entry_free(ce);
+			/* keep the buffer locked while modifying it. */
+		} else {
+			int offset;
+
+			if (ce)
+				mb_cache_entry_release(ce);
+			unlock_buffer(bh);
+			ea_bdebug(bh, "cloning");
+			header = kmalloc(bh->b_size, GFP_KERNEL);
+			error = -ENOMEM;
+			if (header == NULL)
+				goto cleanup;
+			memcpy(header, HDR(bh), bh->b_size);
+			header->h_refcount = cpu_to_le32(1);
+
+			offset = (char *)here - bh->b_data;
+			here = ENTRY((char *)header + offset);
+			offset = (char *)last - bh->b_data;
+			last = ENTRY((char *)header + offset);
+		}
+	} else {
+		/* Allocate a buffer where we construct the new block. */
+		header = kmalloc(sb->s_blocksize, GFP_KERNEL);
+		error = -ENOMEM;
+		if (header == NULL)
+			goto cleanup;
+		memset(header, 0, sb->s_blocksize);
+		end = (char *)header + sb->s_blocksize;
+		header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
+		header->h_blocks = header->h_refcount = cpu_to_le32(1);
+		last = here = ENTRY(header+1);
+	}
+
+	/* Iff we are modifying the block in-place, bh is locked here. */
+
+	if (not_found) {
+		/* Insert the new name. */
+		size_t size = EXT2_XATTR_LEN(name_len);
+		size_t rest = (char *)last - (char *)here;
+		memmove((char *)here + size, here, rest);
+		memset(here, 0, size);
+		here->e_name_index = name_index;
+		here->e_name_len = name_len;
+		memcpy(here->e_name, name, name_len);
+	} else {
+		if (!here->e_value_block && here->e_value_size) {
+			char *first_val = (char *)header + min_offs;
+			size_t offs = le16_to_cpu(here->e_value_offs);
+			char *val = (char *)header + offs;
+			size_t size = EXT2_XATTR_SIZE(
+				le32_to_cpu(here->e_value_size));
+
+			if (size == EXT2_XATTR_SIZE(value_len)) {
+				/* The old and the new value have the same
+				   size. Just replace. */
+				here->e_value_size = cpu_to_le32(value_len);
+				memset(val + size - EXT2_XATTR_PAD, 0,
+				       EXT2_XATTR_PAD); /* Clear pad bytes. */
+				memcpy(val, value, value_len);
+				goto skip_replace;
+			}
+
+			/* Remove the old value. */
+			memmove(first_val + size, first_val, val - first_val);
+			memset(first_val, 0, size);
+			here->e_value_offs = 0;
+			min_offs += size;
+
+			/* Adjust all value offsets. */
+			last = ENTRY(header+1);
+			while (!IS_LAST_ENTRY(last)) {
+				size_t o = le16_to_cpu(last->e_value_offs);
+				if (!last->e_value_block && o < offs)
+					last->e_value_offs =
+						cpu_to_le16(o + size);
+				last = EXT2_XATTR_NEXT(last);
+			}
+		}
+		if (value == NULL) {
+			/* Remove the old name. */
+			size_t size = EXT2_XATTR_LEN(name_len);
+			last = ENTRY((char *)last - size);
+			memmove(here, (char*)here + size,
+				(char*)last - (char*)here);
+			memset(last, 0, size);
+		}
+	}
+
+	if (value != NULL) {
+		/* Insert the new value. */
+		here->e_value_size = cpu_to_le32(value_len);
+		if (value_len) {
+			size_t size = EXT2_XATTR_SIZE(value_len);
+			char *val = (char *)header + min_offs - size;
+			here->e_value_offs =
+				cpu_to_le16((char *)val - (char *)header);
+			memset(val + size - EXT2_XATTR_PAD, 0,
+			       EXT2_XATTR_PAD); /* Clear the pad bytes. */
+			memcpy(val, value, value_len);
+		}
+	}
+
+skip_replace:
+	if (IS_LAST_ENTRY(ENTRY(header+1))) {
+		/* This block is now empty. */
+		if (bh && header == HDR(bh))
+			unlock_buffer(bh);  /* we were modifying in-place. */
+		error = ext2_xattr_set2(inode, bh, NULL);
+	} else {
+		ext2_xattr_rehash(header, here);
+		if (bh && header == HDR(bh))
+			unlock_buffer(bh);  /* we were modifying in-place. */
+		error = ext2_xattr_set2(inode, bh, header);
+	}
+
+cleanup:
+	brelse(bh);
+	if (!(bh && header == HDR(bh)))
+		kfree(header);
+	up_write(&EXT2_I(inode)->xattr_sem);
+
+	return error;
+}
+
+/*
+ * Second half of ext2_xattr_set(): Update the file system.
+ */
+static int
+ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
+		struct ext2_xattr_header *header)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *new_bh = NULL;
+	int error;
+
+	if (header) {
+		new_bh = ext2_xattr_cache_find(inode, header);
+		if (new_bh) {
+			/* We found an identical block in the cache. */
+			if (new_bh == old_bh) {
+				ea_bdebug(new_bh, "keeping this block");
+			} else {
+				/* The old block is released after updating
+				   the inode.  */
+				ea_bdebug(new_bh, "reusing block");
+
+				error = -EDQUOT;
+				if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+					unlock_buffer(new_bh);
+					goto cleanup;
+				}
+				HDR(new_bh)->h_refcount = cpu_to_le32(1 +
+					le32_to_cpu(HDR(new_bh)->h_refcount));
+				ea_bdebug(new_bh, "refcount now=%d",
+					le32_to_cpu(HDR(new_bh)->h_refcount));
+			}
+			unlock_buffer(new_bh);
+		} else if (old_bh && header == HDR(old_bh)) {
+			/* Keep this block. No need to lock the block as we
+			   don't need to change the reference count. */
+			new_bh = old_bh;
+			get_bh(new_bh);
+			ext2_xattr_cache_insert(new_bh);
+		} else {
+			/* We need to allocate a new block */
+			int goal = le32_to_cpu(EXT2_SB(sb)->s_es->
+						           s_first_data_block) +
+				   EXT2_I(inode)->i_block_group *
+				   EXT2_BLOCKS_PER_GROUP(sb);
+			int block = ext2_new_block(inode, goal,
+						   NULL, NULL, &error);
+			if (error)
+				goto cleanup;
+			ea_idebug(inode, "creating block %d", block);
+
+			new_bh = sb_getblk(sb, block);
+			if (!new_bh) {
+				ext2_free_blocks(inode, block, 1);
+				error = -EIO;
+				goto cleanup;
+			}
+			lock_buffer(new_bh);
+			memcpy(new_bh->b_data, header, new_bh->b_size);
+			set_buffer_uptodate(new_bh);
+			unlock_buffer(new_bh);
+			ext2_xattr_cache_insert(new_bh);
+			
+			ext2_xattr_update_super_block(sb);
+		}
+		mark_buffer_dirty(new_bh);
+		if (IS_SYNC(inode)) {
+			sync_dirty_buffer(new_bh);
+			error = -EIO;
+			if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
+				goto cleanup;
+		}
+	}
+
+	/* Update the inode. */
+	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	if (IS_SYNC(inode)) {
+		error = ext2_sync_inode (inode);
+		/* In case sync failed due to ENOSPC the inode was actually
+		 * written (only some dirty data were not) so we just proceed
+		 * as if nothing happened and cleanup the unused block */
+		if (error && error != -ENOSPC) {
+			if (new_bh && new_bh != old_bh)
+				DQUOT_FREE_BLOCK(inode, 1);
+			goto cleanup;
+		}
+	} else
+		mark_inode_dirty(inode);
+
+	error = 0;
+	if (old_bh && old_bh != new_bh) {
+		struct mb_cache_entry *ce;
+
+		/*
+		 * If there was an old block and we are no longer using it,
+		 * release the old block.
+		 */
+		ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,
+					old_bh->b_blocknr);
+		lock_buffer(old_bh);
+		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
+			/* Free the old block. */
+			if (ce)
+				mb_cache_entry_free(ce);
+			ea_bdebug(old_bh, "freeing");
+			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
+			/* We let our caller release old_bh, so we
+			 * need to duplicate the buffer before. */
+			get_bh(old_bh);
+			bforget(old_bh);
+		} else {
+			/* Decrement the refcount only. */
+			HDR(old_bh)->h_refcount = cpu_to_le32(
+				le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
+			if (ce)
+				mb_cache_entry_release(ce);
+			DQUOT_FREE_BLOCK(inode, 1);
+			mark_buffer_dirty(old_bh);
+			ea_bdebug(old_bh, "refcount now=%d",
+				le32_to_cpu(HDR(old_bh)->h_refcount));
+		}
+		unlock_buffer(old_bh);
+	}
+
+cleanup:
+	brelse(new_bh);
+
+	return error;
+}
+
+/*
+ * ext2_xattr_delete_inode()
+ *
+ * Free extended attribute resources associated with this inode. This
+ * is called immediately before an inode is freed.
+ */
+void
+ext2_xattr_delete_inode(struct inode *inode)
+{
+	struct buffer_head *bh = NULL;
+	struct mb_cache_entry *ce;
+
+	down_write(&EXT2_I(inode)->xattr_sem);
+	if (!EXT2_I(inode)->i_file_acl)
+		goto cleanup;
+	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
+	if (!bh) {
+		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
+			"inode %ld: block %d read error", inode->i_ino,
+			EXT2_I(inode)->i_file_acl);
+		goto cleanup;
+	}
+	ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
+	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
+	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
+		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
+			"inode %ld: bad block %d", inode->i_ino,
+			EXT2_I(inode)->i_file_acl);
+		goto cleanup;
+	}
+	ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr);
+	lock_buffer(bh);
+	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
+		if (ce)
+			mb_cache_entry_free(ce);
+		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
+		get_bh(bh);
+		bforget(bh);
+	} else {
+		HDR(bh)->h_refcount = cpu_to_le32(
+			le32_to_cpu(HDR(bh)->h_refcount) - 1);
+		if (ce)
+			mb_cache_entry_release(ce);
+		mark_buffer_dirty(bh);
+		if (IS_SYNC(inode))
+			sync_dirty_buffer(bh);
+		DQUOT_FREE_BLOCK(inode, 1);
+	}
+	ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
+	unlock_buffer(bh);
+	EXT2_I(inode)->i_file_acl = 0;
+
+cleanup:
+	brelse(bh);
+	up_write(&EXT2_I(inode)->xattr_sem);
+}
+
+/*
+ * ext2_xattr_put_super()
+ *
+ * This is called when a file system is unmounted.
+ */
+void
+ext2_xattr_put_super(struct super_block *sb)
+{
+	mb_cache_shrink(ext2_xattr_cache, sb->s_bdev);
+}
+
+
+/*
+ * ext2_xattr_cache_insert()
+ *
+ * Create a new entry in the extended attribute cache, and insert
+ * it unless such an entry is already in the cache.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+static int
+ext2_xattr_cache_insert(struct buffer_head *bh)
+{
+	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
+	struct mb_cache_entry *ce;
+	int error;
+
+	ce = mb_cache_entry_alloc(ext2_xattr_cache);
+	if (!ce)
+		return -ENOMEM;
+	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+	if (error) {
+		mb_cache_entry_free(ce);
+		if (error == -EBUSY) {
+			ea_bdebug(bh, "already in cache (%d cache entries)",
+				atomic_read(&ext2_xattr_cache->c_entry_count));
+			error = 0;
+		}
+	} else {
+		ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
+			  atomic_read(&ext2_xattr_cache->c_entry_count));
+		mb_cache_entry_release(ce);
+	}
+	return error;
+}
+
+/*
+ * ext2_xattr_cmp()
+ *
+ * Compare two extended attribute blocks for equality.
+ *
+ * Returns 0 if the blocks are equal, 1 if they differ, and
+ * a negative error number on errors.
+ */
+static int
+ext2_xattr_cmp(struct ext2_xattr_header *header1,
+	       struct ext2_xattr_header *header2)
+{
+	struct ext2_xattr_entry *entry1, *entry2;
+
+	entry1 = ENTRY(header1+1);
+	entry2 = ENTRY(header2+1);
+	while (!IS_LAST_ENTRY(entry1)) {
+		if (IS_LAST_ENTRY(entry2))
+			return 1;
+		if (entry1->e_hash != entry2->e_hash ||
+		    entry1->e_name_index != entry2->e_name_index ||
+		    entry1->e_name_len != entry2->e_name_len ||
+		    entry1->e_value_size != entry2->e_value_size ||
+		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
+			return 1;
+		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
+			return -EIO;
+		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
+			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
+			   le32_to_cpu(entry1->e_value_size)))
+			return 1;
+
+		entry1 = EXT2_XATTR_NEXT(entry1);
+		entry2 = EXT2_XATTR_NEXT(entry2);
+	}
+	if (!IS_LAST_ENTRY(entry2))
+		return 1;
+	return 0;
+}
+
+/*
+ * ext2_xattr_cache_find()
+ *
+ * Find an identical extended attribute block.
+ *
+ * Returns a locked buffer head to the block found, or NULL if such
+ * a block was not found or an error occurred.
+ */
+static struct buffer_head *
+ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
+{
+	__u32 hash = le32_to_cpu(header->h_hash);
+	struct mb_cache_entry *ce;
+
+	if (!header->h_hash)
+		return NULL;  /* never share */
+	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+again:
+	ce = mb_cache_entry_find_first(ext2_xattr_cache, 0,
+				       inode->i_sb->s_bdev, hash);
+	while (ce) {
+		struct buffer_head *bh;
+
+		if (IS_ERR(ce)) {
+			if (PTR_ERR(ce) == -EAGAIN)
+				goto again;
+			break;
+		}
+
+		bh = sb_bread(inode->i_sb, ce->e_block);
+		if (!bh) {
+			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
+				"inode %ld: block %ld read error",
+				inode->i_ino, (unsigned long) ce->e_block);
+		} else {
+			lock_buffer(bh);
+			if (le32_to_cpu(HDR(bh)->h_refcount) >
+				   EXT2_XATTR_REFCOUNT_MAX) {
+				ea_idebug(inode, "block %ld refcount %d>%d",
+					  (unsigned long) ce->e_block,
+					  le32_to_cpu(HDR(bh)->h_refcount),
+					  EXT2_XATTR_REFCOUNT_MAX);
+			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
+				ea_bdebug(bh, "b_count=%d",
+					  atomic_read(&(bh->b_count)));
+				mb_cache_entry_release(ce);
+				return bh;
+			}
+			unlock_buffer(bh);
+			brelse(bh);
+		}
+		ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+	}
+	return NULL;
+}
+
+#define NAME_HASH_SHIFT 5
+#define VALUE_HASH_SHIFT 16
+
+/*
+ * ext2_xattr_hash_entry()
+ *
+ * Compute the hash of an extended attribute.
+ */
+static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
+					 struct ext2_xattr_entry *entry)
+{
+	__u32 hash = 0;
+	char *name = entry->e_name;
+	int n;
+
+	for (n=0; n < entry->e_name_len; n++) {
+		hash = (hash << NAME_HASH_SHIFT) ^
+		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+		       *name++;
+	}
+
+	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+		__le32 *value = (__le32 *)((char *)header +
+			le16_to_cpu(entry->e_value_offs));
+		for (n = (le32_to_cpu(entry->e_value_size) +
+		     EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
+			hash = (hash << VALUE_HASH_SHIFT) ^
+			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+			       le32_to_cpu(*value++);
+		}
+	}
+	entry->e_hash = cpu_to_le32(hash);
+}
+
+#undef NAME_HASH_SHIFT
+#undef VALUE_HASH_SHIFT
+
+#define BLOCK_HASH_SHIFT 16
+
+/*
+ * ext2_xattr_rehash()
+ *
+ * Re-compute the extended attribute hash value after an entry has changed.
+ */
+static void ext2_xattr_rehash(struct ext2_xattr_header *header,
+			      struct ext2_xattr_entry *entry)
+{
+	struct ext2_xattr_entry *here;
+	__u32 hash = 0;
+	
+	ext2_xattr_hash_entry(header, entry);
+	here = ENTRY(header+1);
+	while (!IS_LAST_ENTRY(here)) {
+		if (!here->e_hash) {
+			/* Block is not shared if an entry's hash value == 0 */
+			hash = 0;
+			break;
+		}
+		hash = (hash << BLOCK_HASH_SHIFT) ^
+		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
+		       le32_to_cpu(here->e_hash);
+		here = EXT2_XATTR_NEXT(here);
+	}
+	header->h_hash = cpu_to_le32(hash);
+}
+
+#undef BLOCK_HASH_SHIFT
+
+int __init
+init_ext2_xattr(void)
+{
+	ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL,
+		sizeof(struct mb_cache_entry) +
+		sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+	if (!ext2_xattr_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void
+exit_ext2_xattr(void)
+{
+	mb_cache_destroy(ext2_xattr_cache);
+}
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
new file mode 100644
index 0000000..5f3bfde
--- /dev/null
+++ b/fs/ext2/xattr.h
@@ -0,0 +1,118 @@
+/*
+  File: linux/ext2_xattr.h
+
+  On-disk format of extended attributes for the ext2 filesystem.
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/xattr.h>
+
+/* Magic value in attribute blocks */
+#define EXT2_XATTR_MAGIC		0xEA020000
+
+/* Maximum number of references to one attribute block */
+#define EXT2_XATTR_REFCOUNT_MAX		1024
+
+/* Name indexes */
+#define EXT2_XATTR_INDEX_USER			1
+#define EXT2_XATTR_INDEX_POSIX_ACL_ACCESS	2
+#define EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT	3
+#define EXT2_XATTR_INDEX_TRUSTED		4
+#define	EXT2_XATTR_INDEX_LUSTRE			5
+#define EXT2_XATTR_INDEX_SECURITY	        6
+
+struct ext2_xattr_header {
+	__le32	h_magic;	/* magic number for identification */
+	__le32	h_refcount;	/* reference count */
+	__le32	h_blocks;	/* number of disk blocks used */
+	__le32	h_hash;		/* hash value of all attributes */
+	__u32	h_reserved[4];	/* zero right now */
+};
+
+struct ext2_xattr_entry {
+	__u8	e_name_len;	/* length of name */
+	__u8	e_name_index;	/* attribute name index */
+	__le16	e_value_offs;	/* offset in disk block of value */
+	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
+	__le32	e_value_size;	/* size of attribute value */
+	__le32	e_hash;		/* hash value of name and value */
+	char	e_name[0];	/* attribute name */
+};
+
+#define EXT2_XATTR_PAD_BITS		2
+#define EXT2_XATTR_PAD		(1<<EXT2_XATTR_PAD_BITS)
+#define EXT2_XATTR_ROUND		(EXT2_XATTR_PAD-1)
+#define EXT2_XATTR_LEN(name_len) \
+	(((name_len) + EXT2_XATTR_ROUND + \
+	sizeof(struct ext2_xattr_entry)) & ~EXT2_XATTR_ROUND)
+#define EXT2_XATTR_NEXT(entry) \
+	( (struct ext2_xattr_entry *)( \
+	  (char *)(entry) + EXT2_XATTR_LEN((entry)->e_name_len)) )
+#define EXT2_XATTR_SIZE(size) \
+	(((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND)
+
+# ifdef CONFIG_EXT2_FS_XATTR
+
+extern struct xattr_handler ext2_xattr_user_handler;
+extern struct xattr_handler ext2_xattr_trusted_handler;
+extern struct xattr_handler ext2_xattr_acl_access_handler;
+extern struct xattr_handler ext2_xattr_acl_default_handler;
+extern struct xattr_handler ext2_xattr_security_handler;
+
+extern ssize_t ext2_listxattr(struct dentry *, char *, size_t);
+
+extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t);
+extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
+
+extern void ext2_xattr_delete_inode(struct inode *);
+extern void ext2_xattr_put_super(struct super_block *);
+
+extern int init_ext2_xattr(void);
+extern void exit_ext2_xattr(void);
+
+extern struct xattr_handler *ext2_xattr_handlers[];
+
+# else  /* CONFIG_EXT2_FS_XATTR */
+
+static inline int
+ext2_xattr_get(struct inode *inode, int name_index,
+	       const char *name, void *buffer, size_t size)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+ext2_xattr_set(struct inode *inode, int name_index, const char *name,
+	       const void *value, size_t size, int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void
+ext2_xattr_delete_inode(struct inode *inode)
+{
+}
+
+static inline void
+ext2_xattr_put_super(struct super_block *sb)
+{
+}
+
+static inline int
+init_ext2_xattr(void)
+{
+	return 0;
+}
+
+static inline void
+exit_ext2_xattr(void)
+{
+}
+
+#define ext2_xattr_handlers NULL
+
+# endif  /* CONFIG_EXT2_FS_XATTR */
+
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
new file mode 100644
index 0000000..6a6c59f
--- /dev/null
+++ b/fs/ext2/xattr_security.c
@@ -0,0 +1,53 @@
+/*
+ * linux/fs/ext2/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext2_fs.h>
+#include "xattr.h"
+
+static size_t
+ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size,
+			 const char *name, size_t name_len)
+{
+	const int prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext2_xattr_security_get(struct inode *inode, const char *name,
+		       void *buffer, size_t size)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_SECURITY, name,
+			      buffer, size);
+}
+
+static int
+ext2_xattr_security_set(struct inode *inode, const char *name,
+		       const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext2_xattr_security_handler = {
+	.prefix	= XATTR_SECURITY_PREFIX,
+	.list	= ext2_xattr_security_list,
+	.get	= ext2_xattr_security_get,
+	.set	= ext2_xattr_security_set,
+};
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
new file mode 100644
index 0000000..52b30ee
--- /dev/null
+++ b/fs/ext2/xattr_trusted.c
@@ -0,0 +1,64 @@
+/*
+ * linux/fs/ext2/xattr_trusted.c
+ * Handler for trusted extended attributes.
+ *
+ * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext2_fs.h>
+#include "xattr.h"
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+
+static size_t
+ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
+			const char *name, size_t name_len)
+{
+	const int prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return 0;
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext2_xattr_trusted_get(struct inode *inode, const char *name,
+		       void *buffer, size_t size)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name,
+			      buffer, size);
+}
+
+static int
+ext2_xattr_trusted_set(struct inode *inode, const char *name,
+		       const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext2_xattr_trusted_handler = {
+	.prefix	= XATTR_TRUSTED_PREFIX,
+	.list	= ext2_xattr_trusted_list,
+	.get	= ext2_xattr_trusted_get,
+	.set	= ext2_xattr_trusted_set,
+};
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
new file mode 100644
index 0000000..0c03ea1
--- /dev/null
+++ b/fs/ext2/xattr_user.c
@@ -0,0 +1,77 @@
+/*
+ * linux/fs/ext2/xattr_user.c
+ * Handler for extended user attributes.
+ *
+ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "ext2.h"
+#include "xattr.h"
+
+#define XATTR_USER_PREFIX "user."
+
+static size_t
+ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size,
+		     const char *name, size_t name_len)
+{
+	const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return 0;
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_USER_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext2_xattr_user_get(struct inode *inode, const char *name,
+		    void *buffer, size_t size)
+{
+	int error;
+
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return -EOPNOTSUPP;
+	error = permission(inode, MAY_READ, NULL);
+	if (error)
+		return error;
+
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name, buffer, size);
+}
+
+static int
+ext2_xattr_user_set(struct inode *inode, const char *name,
+		    const void *value, size_t size, int flags)
+{
+	int error;
+
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return -EOPNOTSUPP;
+	if ( !S_ISREG(inode->i_mode) &&
+	    (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
+		return -EPERM;
+	error = permission(inode, MAY_WRITE, NULL);
+	if (error)
+		return error;
+
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext2_xattr_user_handler = {
+	.prefix	= XATTR_USER_PREFIX,
+	.list	= ext2_xattr_user_list,
+	.get	= ext2_xattr_user_get,
+	.set	= ext2_xattr_user_set,
+};
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
new file mode 100644
index 0000000..704cd44
--- /dev/null
+++ b/fs/ext3/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux ext3-filesystem routines.
+#
+
+obj-$(CONFIG_EXT3_FS) += ext3.o
+
+ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+	   ioctl.o namei.o super.o symlink.o hash.o resize.o
+
+ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
+ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+ext3-$(CONFIG_EXT3_FS_SECURITY)	 += xattr_security.o
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
new file mode 100644
index 0000000..328592c
--- /dev/null
+++ b/fs/ext3/acl.c
@@ -0,0 +1,547 @@
+/*
+ * linux/fs/ext3/acl.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *
+ext3_acl_from_disk(const void *value, size_t size)
+{
+	const char *end = (char *)value + size;
+	int n, count;
+	struct posix_acl *acl;
+
+	if (!value)
+		return NULL;
+	if (size < sizeof(ext3_acl_header))
+		 return ERR_PTR(-EINVAL);
+	if (((ext3_acl_header *)value)->a_version !=
+	    cpu_to_le32(EXT3_ACL_VERSION))
+		return ERR_PTR(-EINVAL);
+	value = (char *)value + sizeof(ext3_acl_header);
+	count = ext3_acl_count(size);
+	if (count < 0)
+		return ERR_PTR(-EINVAL);
+	if (count == 0)
+		return NULL;
+	acl = posix_acl_alloc(count, GFP_KERNEL);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+	for (n=0; n < count; n++) {
+		ext3_acl_entry *entry =
+			(ext3_acl_entry *)value;
+		if ((char *)value + sizeof(ext3_acl_entry_short) > end)
+			goto fail;
+		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
+		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				value = (char *)value +
+					sizeof(ext3_acl_entry_short);
+				acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				value = (char *)value + sizeof(ext3_acl_entry);
+				if ((char *)value > end)
+					goto fail;
+				acl->a_entries[n].e_id =
+					le32_to_cpu(entry->e_id);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	if (value != end)
+		goto fail;
+	return acl;
+
+fail:
+	posix_acl_release(acl);
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static void *
+ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+	ext3_acl_header *ext_acl;
+	char *e;
+	size_t n;
+
+	*size = ext3_acl_size(acl->a_count);
+	ext_acl = (ext3_acl_header *)kmalloc(sizeof(ext3_acl_header) +
+		acl->a_count * sizeof(ext3_acl_entry), GFP_KERNEL);
+	if (!ext_acl)
+		return ERR_PTR(-ENOMEM);
+	ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
+	e = (char *)ext_acl + sizeof(ext3_acl_header);
+	for (n=0; n < acl->a_count; n++) {
+		ext3_acl_entry *entry = (ext3_acl_entry *)e;
+		entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+		entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER:
+			case ACL_GROUP:
+				entry->e_id =
+					cpu_to_le32(acl->a_entries[n].e_id);
+				e += sizeof(ext3_acl_entry);
+				break;
+
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				e += sizeof(ext3_acl_entry_short);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	return (char *)ext_acl;
+
+fail:
+	kfree(ext_acl);
+	return ERR_PTR(-EINVAL);
+}
+
+static inline struct posix_acl *
+ext3_iget_acl(struct inode *inode, struct posix_acl **i_acl)
+{
+	struct posix_acl *acl = EXT3_ACL_NOT_CACHED;
+
+	spin_lock(&inode->i_lock);
+	if (*i_acl != EXT3_ACL_NOT_CACHED)
+		acl = posix_acl_dup(*i_acl);
+	spin_unlock(&inode->i_lock);
+
+	return acl;
+}
+
+static inline void
+ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
+                  struct posix_acl *acl)
+{
+	spin_lock(&inode->i_lock);
+	if (*i_acl != EXT3_ACL_NOT_CACHED)
+		posix_acl_release(*i_acl);
+	*i_acl = posix_acl_dup(acl);
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Inode operation get_posix_acl().
+ *
+ * inode->i_sem: don't care
+ */
+static struct posix_acl *
+ext3_get_acl(struct inode *inode, int type)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	int name_index;
+	char *value = NULL;
+	struct posix_acl *acl;
+	int retval;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return NULL;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			acl = ext3_iget_acl(inode, &ei->i_acl);
+			if (acl != EXT3_ACL_NOT_CACHED)
+				return acl;
+			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
+			break;
+
+		case ACL_TYPE_DEFAULT:
+			acl = ext3_iget_acl(inode, &ei->i_default_acl);
+			if (acl != EXT3_ACL_NOT_CACHED)
+				return acl;
+			name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
+			break;
+
+		default:
+			return ERR_PTR(-EINVAL);
+	}
+	retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
+	if (retval > 0) {
+		value = kmalloc(retval, GFP_KERNEL);
+		if (!value)
+			return ERR_PTR(-ENOMEM);
+		retval = ext3_xattr_get(inode, name_index, "", value, retval);
+	}
+	if (retval > 0)
+		acl = ext3_acl_from_disk(value, retval);
+	else if (retval == -ENODATA || retval == -ENOSYS)
+		acl = NULL;
+	else
+		acl = ERR_PTR(retval);
+	kfree(value);
+
+	if (!IS_ERR(acl)) {
+		switch(type) {
+			case ACL_TYPE_ACCESS:
+				ext3_iset_acl(inode, &ei->i_acl, acl);
+				break;
+
+			case ACL_TYPE_DEFAULT:
+				ext3_iset_acl(inode, &ei->i_default_acl, acl);
+				break;
+		}
+	}
+	return acl;
+}
+
+/*
+ * Set the access or default ACL of an inode.
+ *
+ * inode->i_sem: down unless called from ext3_new_inode
+ */
+static int
+ext3_set_acl(handle_t *handle, struct inode *inode, int type,
+	     struct posix_acl *acl)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	int name_index;
+	void *value = NULL;
+	size_t size;
+	int error;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
+			if (acl) {
+				mode_t mode = inode->i_mode;
+				error = posix_acl_equiv_mode(acl, &mode);
+				if (error < 0)
+					return error;
+				else {
+					inode->i_mode = mode;
+					ext3_mark_inode_dirty(handle, inode);
+					if (error == 0)
+						acl = NULL;
+				}
+			}
+			break;
+
+		case ACL_TYPE_DEFAULT:
+			name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
+			if (!S_ISDIR(inode->i_mode))
+				return acl ? -EACCES : 0;
+			break;
+
+		default:
+			return -EINVAL;
+	}
+ 	if (acl) {
+		value = ext3_acl_to_disk(acl, &size);
+		if (IS_ERR(value))
+			return (int)PTR_ERR(value);
+	}
+
+	error = ext3_xattr_set_handle(handle, inode, name_index, "",
+				      value, size, 0);
+
+	kfree(value);
+	if (!error) {
+		switch(type) {
+			case ACL_TYPE_ACCESS:
+				ext3_iset_acl(inode, &ei->i_acl, acl);
+				break;
+
+			case ACL_TYPE_DEFAULT:
+				ext3_iset_acl(inode, &ei->i_default_acl, acl);
+				break;
+		}
+	}
+	return error;
+}
+
+static int
+ext3_check_acl(struct inode *inode, int mask)
+{
+	struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
+
+	if (acl) {
+		int error = posix_acl_permission(inode, acl, mask);
+		posix_acl_release(acl);
+		return error;
+	}
+
+	return -EAGAIN;
+}
+
+int
+ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	return generic_permission(inode, mask, ext3_check_acl);
+}
+
+/*
+ * Initialize the ACLs of a new inode. Called from ext3_new_inode.
+ *
+ * dir->i_sem: down
+ * inode->i_sem: up (access to inode is still exclusive)
+ */
+int
+ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+	struct posix_acl *acl = NULL;
+	int error = 0;
+
+	if (!S_ISLNK(inode->i_mode)) {
+		if (test_opt(dir->i_sb, POSIX_ACL)) {
+			acl = ext3_get_acl(dir, ACL_TYPE_DEFAULT);
+			if (IS_ERR(acl))
+				return PTR_ERR(acl);
+		}
+		if (!acl)
+			inode->i_mode &= ~current->fs->umask;
+	}
+	if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
+		struct posix_acl *clone;
+		mode_t mode;
+
+		if (S_ISDIR(inode->i_mode)) {
+			error = ext3_set_acl(handle, inode,
+					     ACL_TYPE_DEFAULT, acl);
+			if (error)
+				goto cleanup;
+		}
+		clone = posix_acl_clone(acl, GFP_KERNEL);
+		error = -ENOMEM;
+		if (!clone)
+			goto cleanup;
+
+		mode = inode->i_mode;
+		error = posix_acl_create_masq(clone, &mode);
+		if (error >= 0) {
+			inode->i_mode = mode;
+			if (error > 0) {
+				/* This is an extended ACL */
+				error = ext3_set_acl(handle, inode,
+						     ACL_TYPE_ACCESS, clone);
+			}
+		}
+		posix_acl_release(clone);
+	}
+cleanup:
+	posix_acl_release(acl);
+	return error;
+}
+
+/*
+ * Does chmod for an inode that may have an Access Control List. The
+ * inode->i_mode field must be updated to the desired value by the caller
+ * before calling this function.
+ * Returns 0 on success, or a negative error number.
+ *
+ * We change the ACL rather than storing some ACL entries in the file
+ * mode permission bits (which would be more efficient), because that
+ * would break once additional permissions (like  ACL_APPEND, ACL_DELETE
+ * for directories) are added. There are no more bits available in the
+ * file mode.
+ *
+ * inode->i_sem: down
+ */
+int
+ext3_acl_chmod(struct inode *inode)
+{
+	struct posix_acl *acl, *clone;
+        int error;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+	clone = posix_acl_clone(acl, GFP_KERNEL);
+	posix_acl_release(acl);
+	if (!clone)
+		return -ENOMEM;
+	error = posix_acl_chmod_masq(clone, inode->i_mode);
+	if (!error) {
+		handle_t *handle;
+		int retries = 0;
+
+	retry:
+		handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS);
+		if (IS_ERR(handle)) {
+			error = PTR_ERR(handle);
+			ext3_std_error(inode->i_sb, error);
+			goto out;
+		}
+		error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, clone);
+		ext3_journal_stop(handle);
+		if (error == -ENOSPC &&
+		    ext3_should_retry_alloc(inode->i_sb, &retries))
+			goto retry;
+	}
+out:
+	posix_acl_release(clone);
+	return error;
+}
+
+/*
+ * Extended attribute handlers
+ */
+static size_t
+ext3_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
+			   const char *name, size_t name_len)
+{
+	const size_t size = sizeof(XATTR_NAME_ACL_ACCESS);
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	if (list && size <= list_len)
+		memcpy(list, XATTR_NAME_ACL_ACCESS, size);
+	return size;
+}
+
+static size_t
+ext3_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
+			    const char *name, size_t name_len)
+{
+	const size_t size = sizeof(XATTR_NAME_ACL_DEFAULT);
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return 0;
+	if (list && size <= list_len)
+		memcpy(list, XATTR_NAME_ACL_DEFAULT, size);
+	return size;
+}
+
+static int
+ext3_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+{
+	struct posix_acl *acl;
+	int error;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return -EOPNOTSUPP;
+
+	acl = ext3_get_acl(inode, type);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl == NULL)
+		return -ENODATA;
+	error = posix_acl_to_xattr(acl, buffer, size);
+	posix_acl_release(acl);
+
+	return error;
+}
+
+static int
+ext3_xattr_get_acl_access(struct inode *inode, const char *name,
+			  void *buffer, size_t size)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext3_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int
+ext3_xattr_get_acl_default(struct inode *inode, const char *name,
+			   void *buffer, size_t size)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext3_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int
+ext3_xattr_set_acl(struct inode *inode, int type, const void *value,
+		   size_t size)
+{
+	handle_t *handle;
+	struct posix_acl *acl;
+	int error, retries = 0;
+
+	if (!test_opt(inode->i_sb, POSIX_ACL))
+		return -EOPNOTSUPP;
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		return -EPERM;
+
+	if (value) {
+		acl = posix_acl_from_xattr(value, size);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		else if (acl) {
+			error = posix_acl_valid(acl);
+			if (error)
+				goto release_and_out;
+		}
+	} else
+		acl = NULL;
+
+retry:
+	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	error = ext3_set_acl(handle, inode, type, acl);
+	ext3_journal_stop(handle);
+	if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
+
+release_and_out:
+	posix_acl_release(acl);
+	return error;
+}
+
+static int
+ext3_xattr_set_acl_access(struct inode *inode, const char *name,
+			  const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext3_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int
+ext3_xattr_set_acl_default(struct inode *inode, const char *name,
+			   const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") != 0)
+		return -EINVAL;
+	return ext3_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+struct xattr_handler ext3_xattr_acl_access_handler = {
+	.prefix	= XATTR_NAME_ACL_ACCESS,
+	.list	= ext3_xattr_list_acl_access,
+	.get	= ext3_xattr_get_acl_access,
+	.set	= ext3_xattr_set_acl_access,
+};
+
+struct xattr_handler ext3_xattr_acl_default_handler = {
+	.prefix	= XATTR_NAME_ACL_DEFAULT,
+	.list	= ext3_xattr_list_acl_default,
+	.get	= ext3_xattr_get_acl_default,
+	.set	= ext3_xattr_set_acl_default,
+};
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
new file mode 100644
index 0000000..98af0c0
--- /dev/null
+++ b/fs/ext3/acl.h
@@ -0,0 +1,84 @@
+/*
+  File: fs/ext3/acl.h
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/xattr_acl.h>
+
+#define EXT3_ACL_VERSION	0x0001
+
+typedef struct {
+	__le16		e_tag;
+	__le16		e_perm;
+	__le32		e_id;
+} ext3_acl_entry;
+
+typedef struct {
+	__le16		e_tag;
+	__le16		e_perm;
+} ext3_acl_entry_short;
+
+typedef struct {
+	__le32		a_version;
+} ext3_acl_header;
+
+static inline size_t ext3_acl_size(int count)
+{
+	if (count <= 4) {
+		return sizeof(ext3_acl_header) +
+		       count * sizeof(ext3_acl_entry_short);
+	} else {
+		return sizeof(ext3_acl_header) +
+		       4 * sizeof(ext3_acl_entry_short) +
+		       (count - 4) * sizeof(ext3_acl_entry);
+	}
+}
+
+static inline int ext3_acl_count(size_t size)
+{
+	ssize_t s;
+	size -= sizeof(ext3_acl_header);
+	s = size - 4 * sizeof(ext3_acl_entry_short);
+	if (s < 0) {
+		if (size % sizeof(ext3_acl_entry_short))
+			return -1;
+		return size / sizeof(ext3_acl_entry_short);
+	} else {
+		if (s % sizeof(ext3_acl_entry))
+			return -1;
+		return s / sizeof(ext3_acl_entry) + 4;
+	}
+}
+
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+
+/* Value for inode->u.ext3_i.i_acl and inode->u.ext3_i.i_default_acl
+   if the ACL has not been cached */
+#define EXT3_ACL_NOT_CACHED ((void *)-1)
+
+/* acl.c */
+extern int ext3_permission (struct inode *, int, struct nameidata *);
+extern int ext3_acl_chmod (struct inode *);
+extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
+
+extern int init_ext3_acl(void);
+extern void exit_ext3_acl(void);
+
+#else  /* CONFIG_EXT3_FS_POSIX_ACL */
+#include <linux/sched.h>
+#define ext3_permission NULL
+
+static inline int
+ext3_acl_chmod(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int
+ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+	return 0;
+}
+#endif  /* CONFIG_EXT3_FS_POSIX_ACL */
+
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
new file mode 100644
index 0000000..ccd632f
--- /dev/null
+++ b/fs/ext3/balloc.c
@@ -0,0 +1,1600 @@
+/*
+ *  linux/fs/ext3/balloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+
+/*
+ * balloc.c contains the blocks allocation and deallocation routines
+ */
+
+/*
+ * The free blocks are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.  The descriptors are loaded in memory
+ * when a file system is mounted (see ext3_read_super).
+ */
+
+
+#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
+
+struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
+					     unsigned int block_group,
+					     struct buffer_head ** bh)
+{
+	unsigned long group_desc;
+	unsigned long offset;
+	struct ext3_group_desc * desc;
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+
+	if (block_group >= sbi->s_groups_count) {
+		ext3_error (sb, "ext3_get_group_desc",
+			    "block_group >= groups_count - "
+			    "block_group = %d, groups_count = %lu",
+			    block_group, sbi->s_groups_count);
+
+		return NULL;
+	}
+	smp_rmb();
+
+	group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
+	offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
+	if (!sbi->s_group_desc[group_desc]) {
+		ext3_error (sb, "ext3_get_group_desc",
+			    "Group descriptor not loaded - "
+			    "block_group = %d, group_desc = %lu, desc = %lu",
+			     block_group, group_desc, offset);
+		return NULL;
+	}
+
+	desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
+	if (bh)
+		*bh = sbi->s_group_desc[group_desc];
+	return desc + offset;
+}
+
+/*
+ * Read the bitmap for a given block_group, reading into the specified 
+ * slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head on success or NULL in case of failure.
+ */
+static struct buffer_head *
+read_block_bitmap(struct super_block *sb, unsigned int block_group)
+{
+	struct ext3_group_desc * desc;
+	struct buffer_head * bh = NULL;
+
+	desc = ext3_get_group_desc (sb, block_group, NULL);
+	if (!desc)
+		goto error_out;
+	bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
+	if (!bh)
+		ext3_error (sb, "read_block_bitmap",
+			    "Cannot read block bitmap - "
+			    "block_group = %d, block_bitmap = %u",
+			    block_group, le32_to_cpu(desc->bg_block_bitmap));
+error_out:
+	return bh;
+}
+/*
+ * The reservation window structure operations
+ * --------------------------------------------
+ * Operations include:
+ * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
+ *
+ * We use sorted double linked list for the per-filesystem reservation
+ * window list. (like in vm_region).
+ *
+ * Initially, we keep those small operations in the abstract functions,
+ * so later if we need a better searching tree than double linked-list,
+ * we could easily switch to that without changing too much
+ * code.
+ */
+#if 0
+static void __rsv_window_dump(struct rb_root *root, int verbose,
+			      const char *fn)
+{
+	struct rb_node *n;
+	struct ext3_reserve_window_node *rsv, *prev;
+	int bad;
+
+restart:
+	n = rb_first(root);
+	bad = 0;
+	prev = NULL;
+
+	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
+	while (n) {
+		rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
+		if (verbose)
+			printk("reservation window 0x%p "
+			       "start:  %d, end:  %d\n",
+			       rsv, rsv->rsv_start, rsv->rsv_end);
+		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
+			printk("Bad reservation %p (start >= end)\n",
+			       rsv);
+			bad = 1;
+		}
+		if (prev && prev->rsv_end >= rsv->rsv_start) {
+			printk("Bad reservation %p (prev->end >= start)\n",
+			       rsv);
+			bad = 1;
+		}
+		if (bad) {
+			if (!verbose) {
+				printk("Restarting reservation walk in verbose mode\n");
+				verbose = 1;
+				goto restart;
+			}
+		}
+		n = rb_next(n);
+		prev = rsv;
+	}
+	printk("Window map complete.\n");
+	if (bad)
+		BUG();
+}
+#define rsv_window_dump(root, verbose) \
+	__rsv_window_dump((root), (verbose), __FUNCTION__)
+#else
+#define rsv_window_dump(root, verbose) do {} while (0)
+#endif
+
+static int
+goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal,
+			unsigned int group, struct super_block * sb)
+{
+	unsigned long group_first_block, group_last_block;
+
+	group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
+				group * EXT3_BLOCKS_PER_GROUP(sb);
+	group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
+
+	if ((rsv->_rsv_start > group_last_block) ||
+	    (rsv->_rsv_end < group_first_block))
+		return 0;
+	if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start)
+		|| (goal + group_first_block > rsv->_rsv_end)))
+		return 0;
+	return 1;
+}
+
+/*
+ * Find the reserved window which includes the goal, or the previous one
+ * if the goal is not in any window.
+ * Returns NULL if there are no windows or if all windows start after the goal.
+ */
+static struct ext3_reserve_window_node *
+search_reserve_window(struct rb_root *root, unsigned long goal)
+{
+	struct rb_node *n = root->rb_node;
+	struct ext3_reserve_window_node *rsv;
+
+	if (!n)
+		return NULL;
+
+	do {
+		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
+
+		if (goal < rsv->rsv_start)
+			n = n->rb_left;
+		else if (goal > rsv->rsv_end)
+			n = n->rb_right;
+		else
+			return rsv;
+	} while (n);
+	/*
+	 * We've fallen off the end of the tree: the goal wasn't inside
+	 * any particular node.  OK, the previous node must be to one
+	 * side of the interval containing the goal.  If it's the RHS,
+	 * we need to back up one.
+	 */
+	if (rsv->rsv_start > goal) {
+		n = rb_prev(&rsv->rsv_node);
+		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
+	}
+	return rsv;
+}
+
+void ext3_rsv_window_add(struct super_block *sb,
+		    struct ext3_reserve_window_node *rsv)
+{
+	struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
+	struct rb_node *node = &rsv->rsv_node;
+	unsigned int start = rsv->rsv_start;
+
+	struct rb_node ** p = &root->rb_node;
+	struct rb_node * parent = NULL;
+	struct ext3_reserve_window_node *this;
+
+	while (*p)
+	{
+		parent = *p;
+		this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
+
+		if (start < this->rsv_start)
+			p = &(*p)->rb_left;
+		else if (start > this->rsv_end)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+}
+
+static void rsv_window_remove(struct super_block *sb,
+			      struct ext3_reserve_window_node *rsv)
+{
+	rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+	rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+	rsv->rsv_alloc_hit = 0;
+	rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
+}
+
+static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
+{
+	/* a valid reservation end block could not be 0 */
+	return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED);
+}
+void ext3_init_block_alloc_info(struct inode *inode)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
+	struct super_block *sb = inode->i_sb;
+
+	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
+	if (block_i) {
+		struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
+
+		rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+		rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+
+	 	/*
+		 * if filesystem is mounted with NORESERVATION, the goal
+		 * reservation window size is set to zero to indicate
+		 * block reservation is off
+		 */
+		if (!test_opt(sb, RESERVATION))
+			rsv->rsv_goal_size = 0;
+		else
+			rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
+		rsv->rsv_alloc_hit = 0;
+		block_i->last_alloc_logical_block = 0;
+		block_i->last_alloc_physical_block = 0;
+	}
+	ei->i_block_alloc_info = block_i;
+}
+
+void ext3_discard_reservation(struct inode *inode)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
+	struct ext3_reserve_window_node *rsv;
+	spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
+
+	if (!block_i)
+		return;
+
+	rsv = &block_i->rsv_window_node;
+	if (!rsv_is_empty(&rsv->rsv_window)) {
+		spin_lock(rsv_lock);
+		if (!rsv_is_empty(&rsv->rsv_window))
+			rsv_window_remove(inode->i_sb, rsv);
+		spin_unlock(rsv_lock);
+	}
+}
+
+/* Free given blocks, update quota and i_blocks field */
+void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
+			 unsigned long block, unsigned long count,
+			 int *pdquot_freed_blocks)
+{
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *gd_bh;
+	unsigned long block_group;
+	unsigned long bit;
+	unsigned long i;
+	unsigned long overflow;
+	struct ext3_group_desc * desc;
+	struct ext3_super_block * es;
+	struct ext3_sb_info *sbi;
+	int err = 0, ret;
+	unsigned group_freed;
+
+	*pdquot_freed_blocks = 0;
+	sbi = EXT3_SB(sb);
+	es = sbi->s_es;
+	if (block < le32_to_cpu(es->s_first_data_block) ||
+	    block + count < block ||
+	    block + count > le32_to_cpu(es->s_blocks_count)) {
+		ext3_error (sb, "ext3_free_blocks",
+			    "Freeing blocks not in datazone - "
+			    "block = %lu, count = %lu", block, count);
+		goto error_return;
+	}
+
+	ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
+
+do_more:
+	overflow = 0;
+	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
+		      EXT3_BLOCKS_PER_GROUP(sb);
+	bit = (block - le32_to_cpu(es->s_first_data_block)) %
+		      EXT3_BLOCKS_PER_GROUP(sb);
+	/*
+	 * Check to see if we are freeing blocks across a group
+	 * boundary.
+	 */
+	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
+		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
+		count -= overflow;
+	}
+	brelse(bitmap_bh);
+	bitmap_bh = read_block_bitmap(sb, block_group);
+	if (!bitmap_bh)
+		goto error_return;
+	desc = ext3_get_group_desc (sb, block_group, &gd_bh);
+	if (!desc)
+		goto error_return;
+
+	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
+	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
+	    in_range (block, le32_to_cpu(desc->bg_inode_table),
+		      sbi->s_itb_per_group) ||
+	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
+		      sbi->s_itb_per_group))
+		ext3_error (sb, "ext3_free_blocks",
+			    "Freeing blocks in system zones - "
+			    "Block = %lu, count = %lu",
+			    block, count);
+
+	/*
+	 * We are about to start releasing blocks in the bitmap,
+	 * so we need undo access.
+	 */
+	/* @@@ check errors */
+	BUFFER_TRACE(bitmap_bh, "getting undo access");
+	err = ext3_journal_get_undo_access(handle, bitmap_bh);
+	if (err)
+		goto error_return;
+
+	/*
+	 * We are about to modify some metadata.  Call the journal APIs
+	 * to unshare ->b_data if a currently-committing transaction is
+	 * using it
+	 */
+	BUFFER_TRACE(gd_bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, gd_bh);
+	if (err)
+		goto error_return;
+
+	jbd_lock_bh_state(bitmap_bh);
+
+	for (i = 0, group_freed = 0; i < count; i++) {
+		/*
+		 * An HJ special.  This is expensive...
+		 */
+#ifdef CONFIG_JBD_DEBUG
+		jbd_unlock_bh_state(bitmap_bh);
+		{
+			struct buffer_head *debug_bh;
+			debug_bh = sb_find_get_block(sb, block + i);
+			if (debug_bh) {
+				BUFFER_TRACE(debug_bh, "Deleted!");
+				if (!bh2jh(bitmap_bh)->b_committed_data)
+					BUFFER_TRACE(debug_bh,
+						"No commited data in bitmap");
+				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
+				__brelse(debug_bh);
+			}
+		}
+		jbd_lock_bh_state(bitmap_bh);
+#endif
+		if (need_resched()) {
+			jbd_unlock_bh_state(bitmap_bh);
+			cond_resched();
+			jbd_lock_bh_state(bitmap_bh);
+		}
+		/* @@@ This prevents newly-allocated data from being
+		 * freed and then reallocated within the same
+		 * transaction. 
+		 * 
+		 * Ideally we would want to allow that to happen, but to
+		 * do so requires making journal_forget() capable of
+		 * revoking the queued write of a data block, which
+		 * implies blocking on the journal lock.  *forget()
+		 * cannot block due to truncate races.
+		 *
+		 * Eventually we can fix this by making journal_forget()
+		 * return a status indicating whether or not it was able
+		 * to revoke the buffer.  On successful revoke, it is
+		 * safe not to set the allocation bit in the committed
+		 * bitmap, because we know that there is no outstanding
+		 * activity on the buffer any more and so it is safe to
+		 * reallocate it.  
+		 */
+		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
+		J_ASSERT_BH(bitmap_bh,
+				bh2jh(bitmap_bh)->b_committed_data != NULL);
+		ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
+				bh2jh(bitmap_bh)->b_committed_data);
+
+		/*
+		 * We clear the bit in the bitmap after setting the committed
+		 * data bit, because this is the reverse order to that which
+		 * the allocator uses.
+		 */
+		BUFFER_TRACE(bitmap_bh, "clear bit");
+		if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+						bit + i, bitmap_bh->b_data)) {
+			jbd_unlock_bh_state(bitmap_bh);
+			ext3_error(sb, __FUNCTION__,
+				"bit already cleared for block %lu", block + i);
+			jbd_lock_bh_state(bitmap_bh);
+			BUFFER_TRACE(bitmap_bh, "bit already cleared");
+		} else {
+			group_freed++;
+		}
+	}
+	jbd_unlock_bh_state(bitmap_bh);
+
+	spin_lock(sb_bgl_lock(sbi, block_group));
+	desc->bg_free_blocks_count =
+		cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
+			group_freed);
+	spin_unlock(sb_bgl_lock(sbi, block_group));
+	percpu_counter_mod(&sbi->s_freeblocks_counter, count);
+
+	/* We dirtied the bitmap block */
+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
+
+	/* And the group descriptor block */
+	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
+	ret = ext3_journal_dirty_metadata(handle, gd_bh);
+	if (!err) err = ret;
+	*pdquot_freed_blocks += group_freed;
+
+	if (overflow && !err) {
+		block += count;
+		count = overflow;
+		goto do_more;
+	}
+	sb->s_dirt = 1;
+error_return:
+	brelse(bitmap_bh);
+	ext3_std_error(sb, err);
+	return;
+}
+
+/* Free given blocks, update quota and i_blocks field */
+void ext3_free_blocks(handle_t *handle, struct inode *inode,
+			unsigned long block, unsigned long count)
+{
+	struct super_block * sb;
+	int dquot_freed_blocks;
+
+	sb = inode->i_sb;
+	if (!sb) {
+		printk ("ext3_free_blocks: nonexistent device");
+		return;
+	}
+	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
+	if (dquot_freed_blocks)
+		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+	return;
+}
+
+/*
+ * For ext3 allocations, we must not reuse any blocks which are
+ * allocated in the bitmap buffer's "last committed data" copy.  This
+ * prevents deletes from freeing up the page for reuse until we have
+ * committed the delete transaction.
+ *
+ * If we didn't do this, then deleting something and reallocating it as
+ * data would allow the old block to be overwritten before the
+ * transaction committed (because we force data to disk before commit).
+ * This would lead to corruption if we crashed between overwriting the
+ * data and committing the delete. 
+ *
+ * @@@ We may want to make this allocation behaviour conditional on
+ * data-writes at some point, and disable it for metadata allocations or
+ * sync-data inodes.
+ */
+static int ext3_test_allocatable(int nr, struct buffer_head *bh)
+{
+	int ret;
+	struct journal_head *jh = bh2jh(bh);
+
+	if (ext3_test_bit(nr, bh->b_data))
+		return 0;
+
+	jbd_lock_bh_state(bh);
+	if (!jh->b_committed_data)
+		ret = 1;
+	else
+		ret = !ext3_test_bit(nr, jh->b_committed_data);
+	jbd_unlock_bh_state(bh);
+	return ret;
+}
+
+static int
+bitmap_search_next_usable_block(int start, struct buffer_head *bh,
+					int maxblocks)
+{
+	int next;
+	struct journal_head *jh = bh2jh(bh);
+
+	/*
+	 * The bitmap search --- search forward alternately through the actual
+	 * bitmap and the last-committed copy until we find a bit free in
+	 * both
+	 */
+	while (start < maxblocks) {
+		next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
+		if (next >= maxblocks)
+			return -1;
+		if (ext3_test_allocatable(next, bh))
+			return next;
+		jbd_lock_bh_state(bh);
+		if (jh->b_committed_data)
+			start = ext3_find_next_zero_bit(jh->b_committed_data,
+						 	maxblocks, next);
+		jbd_unlock_bh_state(bh);
+	}
+	return -1;
+}
+
+/*
+ * Find an allocatable block in a bitmap.  We honour both the bitmap and
+ * its last-committed copy (if that exists), and perform the "most
+ * appropriate allocation" algorithm of looking for a free block near
+ * the initial goal; then for a free byte somewhere in the bitmap; then
+ * for any free bit in the bitmap.
+ */
+static int
+find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
+{
+	int here, next;
+	char *p, *r;
+
+	if (start > 0) {
+		/*
+		 * The goal was occupied; search forward for a free 
+		 * block within the next XX blocks.
+		 *
+		 * end_goal is more or less random, but it has to be
+		 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
+		 * next 64-bit boundary is simple..
+		 */
+		int end_goal = (start + 63) & ~63;
+		if (end_goal > maxblocks)
+			end_goal = maxblocks;
+		here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
+		if (here < end_goal && ext3_test_allocatable(here, bh))
+			return here;
+		ext3_debug("Bit not found near goal\n");
+	}
+
+	here = start;
+	if (here < 0)
+		here = 0;
+
+	p = ((char *)bh->b_data) + (here >> 3);
+	r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+	next = (r - ((char *)bh->b_data)) << 3;
+
+	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
+		return next;
+
+	/*
+	 * The bitmap search --- search forward alternately through the actual
+	 * bitmap and the last-committed copy until we find a bit free in
+	 * both
+	 */
+	here = bitmap_search_next_usable_block(here, bh, maxblocks);
+	return here;
+}
+
+/*
+ * We think we can allocate this block in this bitmap.  Try to set the bit.
+ * If that succeeds then check that nobody has allocated and then freed the
+ * block since we saw that is was not marked in b_committed_data.  If it _was_
+ * allocated and freed then clear the bit in the bitmap again and return
+ * zero (failure).
+ */
+static inline int
+claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
+{
+	struct journal_head *jh = bh2jh(bh);
+	int ret;
+
+	if (ext3_set_bit_atomic(lock, block, bh->b_data))
+		return 0;
+	jbd_lock_bh_state(bh);
+	if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
+		ext3_clear_bit_atomic(lock, block, bh->b_data);
+		ret = 0;
+	} else {
+		ret = 1;
+	}
+	jbd_unlock_bh_state(bh);
+	return ret;
+}
+
+/*
+ * If we failed to allocate the desired block then we may end up crossing to a
+ * new bitmap.  In that case we must release write access to the old one via
+ * ext3_journal_release_buffer(), else we'll run out of credits.
+ */
+static int
+ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
+	struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
+{
+	int group_first_block, start, end;
+
+	/* we do allocation within the reservation window if we have a window */
+	if (my_rsv) {
+		group_first_block =
+			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
+			group * EXT3_BLOCKS_PER_GROUP(sb);
+		if (my_rsv->_rsv_start >= group_first_block)
+			start = my_rsv->_rsv_start - group_first_block;
+		else
+			/* reservation window cross group boundary */
+			start = 0;
+		end = my_rsv->_rsv_end - group_first_block + 1;
+		if (end > EXT3_BLOCKS_PER_GROUP(sb))
+			/* reservation window crosses group boundary */
+			end = EXT3_BLOCKS_PER_GROUP(sb);
+		if ((start <= goal) && (goal < end))
+			start = goal;
+		else
+			goal = -1;
+	} else {
+		if (goal > 0)
+			start = goal;
+		else
+			start = 0;
+		end = EXT3_BLOCKS_PER_GROUP(sb);
+	}
+
+	BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
+
+repeat:
+	if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) {
+		goal = find_next_usable_block(start, bitmap_bh, end);
+		if (goal < 0)
+			goto fail_access;
+		if (!my_rsv) {
+			int i;
+
+			for (i = 0; i < 7 && goal > start &&
+					ext3_test_allocatable(goal - 1,
+								bitmap_bh);
+					i++, goal--)
+				;
+		}
+	}
+	start = goal;
+
+	if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
+		/*
+		 * The block was allocated by another thread, or it was
+		 * allocated and then freed by another thread
+		 */
+		start++;
+		goal++;
+		if (start >= end)
+			goto fail_access;
+		goto repeat;
+	}
+	return goal;
+fail_access:
+	return -1;
+}
+
+/**
+ * 	find_next_reservable_window():
+ *		find a reservable space within the given range.
+ *		It does not allocate the reservation window for now:
+ *		alloc_new_reservation() will do the work later.
+ *
+ * 	@search_head: the head of the searching list;
+ *		This is not necessarily the list head of the whole filesystem
+ *
+ *		We have both head and start_block to assist the search
+ *		for the reservable space. The list starts from head,
+ *		but we will shift to the place where start_block is,
+ *		then start from there, when looking for a reservable space.
+ *
+ * 	@size: the target new reservation window size
+ *
+ * 	@group_first_block: the first block we consider to start
+ *			the real search from
+ *
+ * 	@last_block:
+ *		the maximum block number that our goal reservable space
+ *		could start from. This is normally the last block in this
+ *		group. The search will end when we found the start of next
+ *		possible reservable space is out of this boundary.
+ *		This could handle the cross boundary reservation window
+ *		request.
+ *
+ * 	basically we search from the given range, rather than the whole
+ * 	reservation double linked list, (start_block, last_block)
+ * 	to find a free region that is of my size and has not
+ * 	been reserved.
+ *
+ *	on succeed, it returns the reservation window to be appended to.
+ *	failed, return NULL.
+ */
+static struct ext3_reserve_window_node *find_next_reservable_window(
+				struct ext3_reserve_window_node *search_head,
+				unsigned long size, int *start_block,
+				int last_block)
+{
+	struct rb_node *next;
+	struct ext3_reserve_window_node *rsv, *prev;
+	int cur;
+
+	/* TODO: make the start of the reservation window byte-aligned */
+	/* cur = *start_block & ~7;*/
+	cur = *start_block;
+	rsv = search_head;
+	if (!rsv)
+		return NULL;
+
+	while (1) {
+		if (cur <= rsv->rsv_end)
+			cur = rsv->rsv_end + 1;
+
+		/* TODO?
+		 * in the case we could not find a reservable space
+		 * that is what is expected, during the re-search, we could
+		 * remember what's the largest reservable space we could have
+		 * and return that one.
+		 *
+		 * For now it will fail if we could not find the reservable
+		 * space with expected-size (or more)...
+		 */
+		if (cur > last_block)
+			return NULL;		/* fail */
+
+		prev = rsv;
+		next = rb_next(&rsv->rsv_node);
+		rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+
+		/*
+		 * Reached the last reservation, we can just append to the
+		 * previous one.
+		 */
+		if (!next)
+			break;
+
+		if (cur + size <= rsv->rsv_start) {
+			/*
+			 * Found a reserveable space big enough.  We could
+			 * have a reservation across the group boundary here
+		 	 */
+			break;
+		}
+	}
+	/*
+	 * we come here either :
+	 * when we reach the end of the whole list,
+	 * and there is empty reservable space after last entry in the list.
+	 * append it to the end of the list.
+	 *
+	 * or we found one reservable space in the middle of the list,
+	 * return the reservation window that we could append to.
+	 * succeed.
+	 */
+	*start_block = cur;
+	return prev;
+}
+
+/**
+ * 	alloc_new_reservation()--allocate a new reservation window
+ *
+ *		To make a new reservation, we search part of the filesystem
+ *		reservation list (the list that inside the group). We try to
+ *		allocate a new reservation window near the allocation goal,
+ *		or the beginning of the group, if there is no goal.
+ *
+ *		We first find a reservable space after the goal, then from
+ *		there, we check the bitmap for the first free block after
+ *		it. If there is no free block until the end of group, then the
+ *		whole group is full, we failed. Otherwise, check if the free
+ *		block is inside the expected reservable space, if so, we
+ *		succeed.
+ *		If the first free block is outside the reservable space, then
+ *		start from the first free block, we search for next available
+ *		space, and go on.
+ *
+ *	on succeed, a new reservation will be found and inserted into the list
+ *	It contains at least one free block, and it does not overlap with other
+ *	reservation windows.
+ *
+ *	failed: we failed to find a reservation window in this group
+ *
+ *	@rsv: the reservation
+ *
+ *	@goal: The goal (group-relative).  It is where the search for a
+ *		free reservable space should start from.
+ *		if we have a goal(goal >0 ), then start from there,
+ *		no goal(goal = -1), we start from the first block
+ *		of the group.
+ *
+ *	@sb: the super block
+ *	@group: the group we are trying to allocate in
+ *	@bitmap_bh: the block group block bitmap
+ */
+static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
+		int goal, struct super_block *sb,
+		unsigned int group, struct buffer_head *bitmap_bh)
+{
+	struct ext3_reserve_window_node *search_head;
+	int group_first_block, group_end_block, start_block;
+	int first_free_block;
+	int reservable_space_start;
+	struct ext3_reserve_window_node *prev_rsv;
+	struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
+	unsigned long size;
+
+	group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
+				group * EXT3_BLOCKS_PER_GROUP(sb);
+	group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
+
+	if (goal < 0)
+		start_block = group_first_block;
+	else
+		start_block = goal + group_first_block;
+
+	size = my_rsv->rsv_goal_size;
+	if (!rsv_is_empty(&my_rsv->rsv_window)) {
+		/*
+		 * if the old reservation is cross group boundary
+		 * and if the goal is inside the old reservation window,
+		 * we will come here when we just failed to allocate from
+		 * the first part of the window. We still have another part
+		 * that belongs to the next group. In this case, there is no
+		 * point to discard our window and try to allocate a new one
+		 * in this group(which will fail). we should
+		 * keep the reservation window, just simply move on.
+		 *
+		 * Maybe we could shift the start block of the reservation
+		 * window to the first block of next group.
+		 */
+
+		if ((my_rsv->rsv_start <= group_end_block) &&
+				(my_rsv->rsv_end > group_end_block) &&
+				(start_block >= my_rsv->rsv_start))
+			return -1;
+
+		if ((my_rsv->rsv_alloc_hit >
+		     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
+			/*
+			 * if we previously allocation hit ration is greater than half
+			 * we double the size of reservation window next time
+			 * otherwise keep the same
+			 */
+			size = size * 2;
+			if (size > EXT3_MAX_RESERVE_BLOCKS)
+				size = EXT3_MAX_RESERVE_BLOCKS;
+			my_rsv->rsv_goal_size= size;
+		}
+	}
+	/*
+	 * shift the search start to the window near the goal block
+	 */
+	search_head = search_reserve_window(fs_rsv_root, start_block);
+
+	/*
+	 * find_next_reservable_window() simply finds a reservable window
+	 * inside the given range(start_block, group_end_block).
+	 *
+	 * To make sure the reservation window has a free bit inside it, we
+	 * need to check the bitmap after we found a reservable window.
+	 */
+retry:
+	prev_rsv = find_next_reservable_window(search_head, size,
+						&start_block, group_end_block);
+	if (prev_rsv == NULL)
+		goto failed;
+	reservable_space_start = start_block;
+	/*
+	 * On success, find_next_reservable_window() returns the
+	 * reservation window where there is a reservable space after it.
+	 * Before we reserve this reservable space, we need
+	 * to make sure there is at least a free block inside this region.
+	 *
+	 * searching the first free bit on the block bitmap and copy of
+	 * last committed bitmap alternatively, until we found a allocatable
+	 * block. Search start from the start block of the reservable space
+	 * we just found.
+	 */
+	first_free_block = bitmap_search_next_usable_block(
+			reservable_space_start - group_first_block,
+			bitmap_bh, group_end_block - group_first_block + 1);
+
+	if (first_free_block < 0) {
+		/*
+		 * no free block left on the bitmap, no point
+		 * to reserve the space. return failed.
+		 */
+		goto failed;
+	}
+	start_block = first_free_block + group_first_block;
+	/*
+	 * check if the first free block is within the
+	 * free space we just found
+	 */
+	if ((start_block >= reservable_space_start) &&
+	  (start_block < reservable_space_start + size))
+		goto found_rsv_window;
+	/*
+	 * if the first free bit we found is out of the reservable space
+	 * this means there is no free block on the reservable space
+	 * we should continue search for next reservable space,
+	 * start from where the free block is,
+	 * we also shift the list head to where we stopped last time
+	 */
+	search_head = prev_rsv;
+	goto retry;
+
+found_rsv_window:
+	/*
+	 * great! the reservable space contains some free blocks.
+	 * if the search returns that we should add the new
+	 * window just next to where the old window, we don't
+ 	 * need to remove the old window first then add it to the
+	 * same place, just update the new start and new end.
+	 */
+	if (my_rsv != prev_rsv)  {
+		if (!rsv_is_empty(&my_rsv->rsv_window))
+			rsv_window_remove(sb, my_rsv);
+	}
+	my_rsv->rsv_start = reservable_space_start;
+	my_rsv->rsv_end = my_rsv->rsv_start + size - 1;
+	my_rsv->rsv_alloc_hit = 0;
+	if (my_rsv != prev_rsv)  {
+		ext3_rsv_window_add(sb, my_rsv);
+	}
+	return 0;		/* succeed */
+failed:
+	/*
+	 * failed to find a new reservation window in the current
+	 * group, remove the current(stale) reservation window
+	 * if there is any
+	 */
+	if (!rsv_is_empty(&my_rsv->rsv_window))
+		rsv_window_remove(sb, my_rsv);
+	return -1;		/* failed */
+}
+
+/*
+ * This is the main function used to allocate a new block and its reservation
+ * window.
+ *
+ * Each time when a new block allocation is need, first try to allocate from
+ * its own reservation.  If it does not have a reservation window, instead of
+ * looking for a free bit on bitmap first, then look up the reservation list to
+ * see if it is inside somebody else's reservation window, we try to allocate a
+ * reservation window for it starting from the goal first. Then do the block
+ * allocation within the reservation window.
+ *
+ * This will avoid keeping on searching the reservation list again and
+ * again when someboday is looking for a free block (without
+ * reservation), and there are lots of free blocks, but they are all
+ * being reserved.
+ *
+ * We use a sorted double linked list for the per-filesystem reservation list.
+ * The insert, remove and find a free space(non-reserved) operations for the
+ * sorted double linked list should be fast.
+ *
+ */
+static int
+ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
+			unsigned int group, struct buffer_head *bitmap_bh,
+			int goal, struct ext3_reserve_window_node * my_rsv,
+			int *errp)
+{
+	spinlock_t *rsv_lock;
+	unsigned long group_first_block;
+	int ret = 0;
+	int fatal;
+
+	*errp = 0;
+
+	/*
+	 * Make sure we use undo access for the bitmap, because it is critical
+	 * that we do the frozen_data COW on bitmap buffers in all cases even
+	 * if the buffer is in BJ_Forget state in the committing transaction.
+	 */
+	BUFFER_TRACE(bitmap_bh, "get undo access for new block");
+	fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
+	if (fatal) {
+		*errp = fatal;
+		return -1;
+	}
+
+	/*
+	 * we don't deal with reservation when
+	 * filesystem is mounted without reservation
+	 * or the file is not a regular file
+	 * or last attempt to allocate a block with reservation turned on failed
+	 */
+	if (my_rsv == NULL ) {
+		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
+		goto out;
+	}
+	rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
+	/*
+	 * goal is a group relative block number (if there is a goal)
+	 * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb)
+	 * first block is a filesystem wide block number
+	 * first block is the block number of the first block in this group
+	 */
+	group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
+			group * EXT3_BLOCKS_PER_GROUP(sb);
+
+	/*
+	 * Basically we will allocate a new block from inode's reservation
+	 * window.
+	 *
+	 * We need to allocate a new reservation window, if:
+	 * a) inode does not have a reservation window; or
+	 * b) last attempt to allocate a block from existing reservation
+	 *    failed; or
+	 * c) we come here with a goal and with a reservation window
+	 *
+	 * We do not need to allocate a new reservation window if we come here
+	 * at the beginning with a goal and the goal is inside the window, or
+	 * we don't have a goal but already have a reservation window.
+	 * then we could go to allocate from the reservation window directly.
+	 */
+	while (1) {
+		struct ext3_reserve_window rsv_copy;
+
+		rsv_copy._rsv_start = my_rsv->rsv_start;
+		rsv_copy._rsv_end = my_rsv->rsv_end;
+
+		if (rsv_is_empty(&rsv_copy) || (ret < 0) ||
+			!goal_in_my_reservation(&rsv_copy, goal, group, sb)) {
+			spin_lock(rsv_lock);
+			ret = alloc_new_reservation(my_rsv, goal, sb,
+							group, bitmap_bh);
+			rsv_copy._rsv_start = my_rsv->rsv_start;
+			rsv_copy._rsv_end = my_rsv->rsv_end;
+			spin_unlock(rsv_lock);
+			if (ret < 0)
+				break;			/* failed */
+
+			if (!goal_in_my_reservation(&rsv_copy, goal, group, sb))
+				goal = -1;
+		}
+		if ((rsv_copy._rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
+		    || (rsv_copy._rsv_end < group_first_block))
+			BUG();
+		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
+					   &rsv_copy);
+		if (ret >= 0) {
+			my_rsv->rsv_alloc_hit++;
+			break;				/* succeed */
+		}
+	}
+out:
+	if (ret >= 0) {
+		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
+					"bitmap block");
+		fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
+		if (fatal) {
+			*errp = fatal;
+			return -1;
+		}
+		return ret;
+	}
+
+	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
+	ext3_journal_release_buffer(handle, bitmap_bh);
+	return ret;
+}
+
+static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
+{
+	int free_blocks, root_blocks;
+
+	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+		sbi->s_resuid != current->fsuid &&
+		(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
+ * it is profitable to retry the operation, this function will wait
+ * for the current or commiting transaction to complete, and then
+ * return TRUE.
+ */
+int ext3_should_retry_alloc(struct super_block *sb, int *retries)
+{
+	if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3)
+		return 0;
+
+	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+
+	return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
+}
+
+/*
+ * ext3_new_block uses a goal block to assist allocation.  If the goal is
+ * free, or there is a free block within 32 blocks of the goal, that block
+ * is allocated.  Otherwise a forward search is made for a free block; within 
+ * each block group the search first looks for an entire free byte in the block
+ * bitmap, and then for any free bit if that fails.
+ * This function also updates quota and i_blocks field.
+ */
+int ext3_new_block(handle_t *handle, struct inode *inode,
+			unsigned long goal, int *errp)
+{
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *gdp_bh;
+	int group_no;
+	int goal_group;
+	int ret_block;
+	int bgi;			/* blockgroup iteration index */
+	int target_block;
+	int fatal = 0, err;
+	int performed_allocation = 0;
+	int free_blocks;
+	struct super_block *sb;
+	struct ext3_group_desc *gdp;
+	struct ext3_super_block *es;
+	struct ext3_sb_info *sbi;
+	struct ext3_reserve_window_node *my_rsv = NULL;
+	struct ext3_block_alloc_info *block_i;
+	unsigned short windowsz = 0;
+#ifdef EXT3FS_DEBUG
+	static int goal_hits, goal_attempts;
+#endif
+	unsigned long ngroups;
+
+	*errp = -ENOSPC;
+	sb = inode->i_sb;
+	if (!sb) {
+		printk("ext3_new_block: nonexistent device");
+		return 0;
+	}
+
+	/*
+	 * Check quota for allocation of this block.
+	 */
+	if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+		*errp = -EDQUOT;
+		return 0;
+	}
+
+	sbi = EXT3_SB(sb);
+	es = EXT3_SB(sb)->s_es;
+	ext3_debug("goal=%lu.\n", goal);
+	/*
+	 * Allocate a block from reservation only when
+	 * filesystem is mounted with reservation(default,-o reservation), and
+	 * it's a regular file, and
+	 * the desired window size is greater than 0 (One could use ioctl
+	 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
+	 * reservation on that particular file)
+	 */
+	block_i = EXT3_I(inode)->i_block_alloc_info;
+	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
+		my_rsv = &block_i->rsv_window_node;
+
+	if (!ext3_has_free_blocks(sbi)) {
+		*errp = -ENOSPC;
+		goto out;
+	}
+
+	/*
+	 * First, test whether the goal block is free.
+	 */
+	if (goal < le32_to_cpu(es->s_first_data_block) ||
+	    goal >= le32_to_cpu(es->s_blocks_count))
+		goal = le32_to_cpu(es->s_first_data_block);
+	group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
+			EXT3_BLOCKS_PER_GROUP(sb);
+	gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
+	if (!gdp)
+		goto io_error;
+
+	goal_group = group_no;
+retry:
+	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+	/*
+	 * if there is not enough free blocks to make a new resevation
+	 * turn off reservation for this allocation
+	 */
+	if (my_rsv && (free_blocks < windowsz)
+		&& (rsv_is_empty(&my_rsv->rsv_window)))
+		my_rsv = NULL;
+
+	if (free_blocks > 0) {
+		ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) %
+				EXT3_BLOCKS_PER_GROUP(sb));
+		bitmap_bh = read_block_bitmap(sb, group_no);
+		if (!bitmap_bh)
+			goto io_error;
+		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
+					bitmap_bh, ret_block, my_rsv, &fatal);
+		if (fatal)
+			goto out;
+		if (ret_block >= 0)
+			goto allocated;
+	}
+
+	ngroups = EXT3_SB(sb)->s_groups_count;
+	smp_rmb();
+
+	/*
+	 * Now search the rest of the groups.  We assume that 
+	 * i and gdp correctly point to the last group visited.
+	 */
+	for (bgi = 0; bgi < ngroups; bgi++) {
+		group_no++;
+		if (group_no >= ngroups)
+			group_no = 0;
+		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
+		if (!gdp) {
+			*errp = -EIO;
+			goto out;
+		}
+		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+		/*
+		 * skip this group if the number of
+		 * free blocks is less than half of the reservation
+		 * window size.
+		 */
+		if (free_blocks <= (windowsz/2))
+			continue;
+
+		brelse(bitmap_bh);
+		bitmap_bh = read_block_bitmap(sb, group_no);
+		if (!bitmap_bh)
+			goto io_error;
+		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
+					bitmap_bh, -1, my_rsv, &fatal);
+		if (fatal)
+			goto out;
+		if (ret_block >= 0) 
+			goto allocated;
+	}
+	/*
+	 * We may end up a bogus ealier ENOSPC error due to
+	 * filesystem is "full" of reservations, but
+	 * there maybe indeed free blocks avaliable on disk
+	 * In this case, we just forget about the reservations
+	 * just do block allocation as without reservations.
+	 */
+	if (my_rsv) {
+		my_rsv = NULL;
+		group_no = goal_group;
+		goto retry;
+	}
+	/* No space left on the device */
+	*errp = -ENOSPC;
+	goto out;
+
+allocated:
+
+	ext3_debug("using block group %d(%d)\n",
+			group_no, gdp->bg_free_blocks_count);
+
+	BUFFER_TRACE(gdp_bh, "get_write_access");
+	fatal = ext3_journal_get_write_access(handle, gdp_bh);
+	if (fatal)
+		goto out;
+
+	target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
+				+ le32_to_cpu(es->s_first_data_block);
+
+	if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
+	    target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+	    in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
+		      EXT3_SB(sb)->s_itb_per_group))
+		ext3_error(sb, "ext3_new_block",
+			    "Allocating block in system zone - "
+			    "block = %u", target_block);
+
+	performed_allocation = 1;
+
+#ifdef CONFIG_JBD_DEBUG
+	{
+		struct buffer_head *debug_bh;
+
+		/* Record bitmap buffer state in the newly allocated block */
+		debug_bh = sb_find_get_block(sb, target_block);
+		if (debug_bh) {
+			BUFFER_TRACE(debug_bh, "state when allocated");
+			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
+			brelse(debug_bh);
+		}
+	}
+	jbd_lock_bh_state(bitmap_bh);
+	spin_lock(sb_bgl_lock(sbi, group_no));
+	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
+		if (ext3_test_bit(ret_block,
+				bh2jh(bitmap_bh)->b_committed_data)) {
+			printk("%s: block was unexpectedly set in "
+				"b_committed_data\n", __FUNCTION__);
+		}
+	}
+	ext3_debug("found bit %d\n", ret_block);
+	spin_unlock(sb_bgl_lock(sbi, group_no));
+	jbd_unlock_bh_state(bitmap_bh);
+#endif
+
+	/* ret_block was blockgroup-relative.  Now it becomes fs-relative */
+	ret_block = target_block;
+
+	if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
+		ext3_error(sb, "ext3_new_block",
+			    "block(%d) >= blocks count(%d) - "
+			    "block_group = %d, es == %p ", ret_block,
+			le32_to_cpu(es->s_blocks_count), group_no, es);
+		goto out;
+	}
+
+	/*
+	 * It is up to the caller to add the new buffer to a journal
+	 * list of some description.  We don't know in advance whether
+	 * the caller wants to use it as metadata or data.
+	 */
+	ext3_debug("allocating block %d. Goal hits %d of %d.\n",
+			ret_block, goal_hits, goal_attempts);
+
+	spin_lock(sb_bgl_lock(sbi, group_no));
+	gdp->bg_free_blocks_count =
+			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
+	spin_unlock(sb_bgl_lock(sbi, group_no));
+	percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
+
+	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
+	err = ext3_journal_dirty_metadata(handle, gdp_bh);
+	if (!fatal)
+		fatal = err;
+
+	sb->s_dirt = 1;
+	if (fatal)
+		goto out;
+
+	*errp = 0;
+	brelse(bitmap_bh);
+	return ret_block;
+
+io_error:
+	*errp = -EIO;
+out:
+	if (fatal) {
+		*errp = fatal;
+		ext3_std_error(sb, fatal);
+	}
+	/*
+	 * Undo the block allocation
+	 */
+	if (!performed_allocation)
+		DQUOT_FREE_BLOCK(inode, 1);
+	brelse(bitmap_bh);
+	return 0;
+}
+
+unsigned long ext3_count_free_blocks(struct super_block *sb)
+{
+	unsigned long desc_count;
+	struct ext3_group_desc *gdp;
+	int i;
+	unsigned long ngroups;
+#ifdef EXT3FS_DEBUG
+	struct ext3_super_block *es;
+	unsigned long bitmap_count, x;
+	struct buffer_head *bitmap_bh = NULL;
+
+	lock_super(sb);
+	es = EXT3_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	gdp = NULL;
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		gdp = ext3_get_group_desc(sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_block_bitmap(sb, i);
+		if (bitmap_bh == NULL)
+			continue;
+
+		x = ext3_count_free(bitmap_bh, sb->s_blocksize);
+		printk("group %d: stored = %d, counted = %lu\n",
+			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
+	       le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count);
+	unlock_super(sb);
+	return bitmap_count;
+#else
+	desc_count = 0;
+	ngroups = EXT3_SB(sb)->s_groups_count;
+	smp_rmb();
+	for (i = 0; i < ngroups; i++) {
+		gdp = ext3_get_group_desc(sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+	}
+
+	return desc_count;
+#endif
+}
+
+static inline int
+block_in_use(unsigned long block, struct super_block *sb, unsigned char *map)
+{
+	return ext3_test_bit ((block -
+		le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
+			 EXT3_BLOCKS_PER_GROUP(sb), map);
+}
+
+static inline int test_root(int a, int b)
+{
+	int num = b;
+
+	while (a > num)
+		num *= b;
+	return num == a;
+}
+
+static int ext3_group_sparse(int group)
+{
+	if (group <= 1)
+		return 1;
+	if (!(group & 1))
+		return 0;
+	return (test_root(group, 7) || test_root(group, 5) ||
+		test_root(group, 3));
+}
+
+/**
+ *	ext3_bg_has_super - number of blocks used by the superblock in group
+ *	@sb: superblock for filesystem
+ *	@group: group number to check
+ *
+ *	Return the number of blocks used by the superblock (primary or backup)
+ *	in this group.  Currently this will be only 0 or 1.
+ */
+int ext3_bg_has_super(struct super_block *sb, int group)
+{
+	if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
+	    !ext3_group_sparse(group))
+		return 0;
+	return 1;
+}
+
+/**
+ *	ext3_bg_num_gdb - number of blocks used by the group table in group
+ *	@sb: superblock for filesystem
+ *	@group: group number to check
+ *
+ *	Return the number of blocks used by the group descriptor table
+ *	(primary or backup) in this group.  In the future there may be a
+ *	different number of descriptor blocks in each group.
+ */
+unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
+{
+	if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
+	    !ext3_group_sparse(group))
+		return 0;
+	return EXT3_SB(sb)->s_gdb_count;
+}
+
+#ifdef CONFIG_EXT3_CHECK
+/* Called at mount-time, super-block is locked */
+void ext3_check_blocks_bitmap (struct super_block * sb)
+{
+	struct ext3_super_block *es;
+	unsigned long desc_count, bitmap_count, x, j;
+	unsigned long desc_blocks;
+	struct buffer_head *bitmap_bh = NULL;
+	struct ext3_group_desc *gdp;
+	int i;
+
+	es = EXT3_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	gdp = NULL;
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		gdp = ext3_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_block_bitmap(sb, i);
+		if (bitmap_bh == NULL)
+			continue;
+
+		if (ext3_bg_has_super(sb, i) &&
+				!ext3_test_bit(0, bitmap_bh->b_data))
+			ext3_error(sb, __FUNCTION__,
+				   "Superblock in group %d is marked free", i);
+
+		desc_blocks = ext3_bg_num_gdb(sb, i);
+		for (j = 0; j < desc_blocks; j++)
+			if (!ext3_test_bit(j + 1, bitmap_bh->b_data))
+				ext3_error(sb, __FUNCTION__,
+					   "Descriptor block #%ld in group "
+					   "%d is marked free", j, i);
+
+		if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap),
+						sb, bitmap_bh->b_data))
+			ext3_error (sb, "ext3_check_blocks_bitmap",
+				    "Block bitmap for group %d is marked free",
+				    i);
+
+		if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap),
+						sb, bitmap_bh->b_data))
+			ext3_error (sb, "ext3_check_blocks_bitmap",
+				    "Inode bitmap for group %d is marked free",
+				    i);
+
+		for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++)
+			if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
+							sb, bitmap_bh->b_data))
+				ext3_error (sb, "ext3_check_blocks_bitmap",
+					    "Block #%d of the inode table in "
+					    "group %d is marked free", j, i);
+
+		x = ext3_count_free(bitmap_bh, sb->s_blocksize);
+		if (le16_to_cpu(gdp->bg_free_blocks_count) != x)
+			ext3_error (sb, "ext3_check_blocks_bitmap",
+				    "Wrong free blocks count for group %d, "
+				    "stored = %d, counted = %lu", i,
+				    le16_to_cpu(gdp->bg_free_blocks_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
+		ext3_error (sb, "ext3_check_blocks_bitmap",
+			"Wrong free blocks count in super block, "
+			"stored = %lu, counted = %lu",
+			(unsigned long)le32_to_cpu(es->s_free_blocks_count),
+			bitmap_count);
+}
+#endif
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
new file mode 100644
index 0000000..6c419b9
--- /dev/null
+++ b/fs/ext3/bitmap.c
@@ -0,0 +1,26 @@
+/*
+ *  linux/fs/ext3/bitmap.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include <linux/buffer_head.h>
+
+
+static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
+{
+	unsigned int i;
+	unsigned long sum = 0;
+
+	if (!map) 
+		return (0);
+	for (i = 0; i < numchars; i++)
+		sum += nibblemap[map->b_data[i] & 0xf] +
+			nibblemap[(map->b_data[i] >> 4) & 0xf];
+	return (sum);
+}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
new file mode 100644
index 0000000..832867a
--- /dev/null
+++ b/fs/ext3/dir.c
@@ -0,0 +1,519 @@
+/*
+ *  linux/fs/ext3/dir.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/dir.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext3 directory handling functions
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *
+ * Hash Tree Directory indexing (c) 2001  Daniel Phillips
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+
+static unsigned char ext3_filetype_table[] = {
+	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static int ext3_readdir(struct file *, void *, filldir_t);
+static int ext3_dx_readdir(struct file * filp,
+			   void * dirent, filldir_t filldir);
+static int ext3_release_dir (struct inode * inode,
+				struct file * filp);
+
+struct file_operations ext3_dir_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.readdir	= ext3_readdir,		/* we take BKL. needed?*/
+	.ioctl		= ext3_ioctl,		/* BKL held */
+	.fsync		= ext3_sync_file,	/* BKL held */
+#ifdef CONFIG_EXT3_INDEX
+	.release	= ext3_release_dir,
+#endif
+};
+
+
+static unsigned char get_dtype(struct super_block *sb, int filetype)
+{
+	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
+	    (filetype >= EXT3_FT_MAX))
+		return DT_UNKNOWN;
+
+	return (ext3_filetype_table[filetype]);
+}
+			       
+
+int ext3_check_dir_entry (const char * function, struct inode * dir,
+			  struct ext3_dir_entry_2 * de,
+			  struct buffer_head * bh,
+			  unsigned long offset)
+{
+	const char * error_msg = NULL;
+ 	const int rlen = le16_to_cpu(de->rec_len);
+
+	if (rlen < EXT3_DIR_REC_LEN(1))
+		error_msg = "rec_len is smaller than minimal";
+	else if (rlen % 4 != 0)
+		error_msg = "rec_len % 4 != 0";
+	else if (rlen < EXT3_DIR_REC_LEN(de->name_len))
+		error_msg = "rec_len is too small for name_len";
+	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+		error_msg = "directory entry across blocks";
+	else if (le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+		error_msg = "inode out of bounds";
+
+	if (error_msg != NULL)
+		ext3_error (dir->i_sb, function,
+			"bad entry in directory #%lu: %s - "
+			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+			dir->i_ino, error_msg, offset,
+			(unsigned long) le32_to_cpu(de->inode),
+			rlen, de->name_len);
+	return error_msg == NULL ? 1 : 0;
+}
+
+static int ext3_readdir(struct file * filp,
+			 void * dirent, filldir_t filldir)
+{
+	int error = 0;
+	unsigned long offset, blk;
+	int i, num, stored;
+	struct buffer_head * bh, * tmp, * bha[16];
+	struct ext3_dir_entry_2 * de;
+	struct super_block * sb;
+	int err;
+	struct inode *inode = filp->f_dentry->d_inode;
+	int ret = 0;
+
+	sb = inode->i_sb;
+
+#ifdef CONFIG_EXT3_INDEX
+	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+				    EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+	     ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+		err = ext3_dx_readdir(filp, dirent, filldir);
+		if (err != ERR_BAD_DX_DIR) {
+			ret = err;
+			goto out;
+		}
+		/*
+		 * We don't set the inode dirty flag since it's not
+		 * critical that it get flushed back to the disk.
+		 */
+		EXT3_I(filp->f_dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
+	}
+#endif
+	stored = 0;
+	bh = NULL;
+	offset = filp->f_pos & (sb->s_blocksize - 1);
+
+	while (!error && !stored && filp->f_pos < inode->i_size) {
+		blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb);
+		bh = ext3_bread(NULL, inode, blk, 0, &err);
+		if (!bh) {
+			ext3_error (sb, "ext3_readdir",
+				"directory #%lu contains a hole at offset %lu",
+				inode->i_ino, (unsigned long)filp->f_pos);
+			filp->f_pos += sb->s_blocksize - offset;
+			continue;
+		}
+
+		/*
+		 * Do the readahead
+		 */
+		if (!offset) {
+			for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0;
+			     i > 0; i--) {
+				tmp = ext3_getblk (NULL, inode, ++blk, 0, &err);
+				if (tmp && !buffer_uptodate(tmp) &&
+						!buffer_locked(tmp))
+					bha[num++] = tmp;
+				else
+					brelse (tmp);
+			}
+			if (num) {
+				ll_rw_block (READA, num, bha);
+				for (i = 0; i < num; i++)
+					brelse (bha[i]);
+			}
+		}
+
+revalidate:
+		/* If the dir block has changed since the last call to
+		 * readdir(2), then we might be pointing to an invalid
+		 * dirent right now.  Scan from the start of the block
+		 * to make sure. */
+		if (filp->f_version != inode->i_version) {
+			for (i = 0; i < sb->s_blocksize && i < offset; ) {
+				de = (struct ext3_dir_entry_2 *) 
+					(bh->b_data + i);
+				/* It's too expensive to do a full
+				 * dirent test each time round this
+				 * loop, but we do have to test at
+				 * least that it is non-zero.  A
+				 * failure will be detected in the
+				 * dirent test below. */
+				if (le16_to_cpu(de->rec_len) <
+						EXT3_DIR_REC_LEN(1))
+					break;
+				i += le16_to_cpu(de->rec_len);
+			}
+			offset = i;
+			filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+				| offset;
+			filp->f_version = inode->i_version;
+		}
+
+		while (!error && filp->f_pos < inode->i_size 
+		       && offset < sb->s_blocksize) {
+			de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
+			if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
+						   bh, offset)) {
+				/* On error, skip the f_pos to the
+                                   next block. */
+				filp->f_pos = (filp->f_pos |
+						(sb->s_blocksize - 1)) + 1;
+				brelse (bh);
+				ret = stored;
+				goto out;
+			}
+			offset += le16_to_cpu(de->rec_len);
+			if (le32_to_cpu(de->inode)) {
+				/* We might block in the next section
+				 * if the data destination is
+				 * currently swapped out.  So, use a
+				 * version stamp to detect whether or
+				 * not the directory has been modified
+				 * during the copy operation.
+				 */
+				unsigned long version = filp->f_version;
+
+				error = filldir(dirent, de->name,
+						de->name_len,
+						filp->f_pos,
+						le32_to_cpu(de->inode),
+						get_dtype(sb, de->file_type));
+				if (error)
+					break;
+				if (version != filp->f_version)
+					goto revalidate;
+				stored ++;
+			}
+			filp->f_pos += le16_to_cpu(de->rec_len);
+		}
+		offset = 0;
+		brelse (bh);
+	}
+out:
+	return ret;
+}
+
+#ifdef CONFIG_EXT3_INDEX
+/*
+ * These functions convert from the major/minor hash to an f_pos
+ * value.
+ * 
+ * Currently we only use major hash numer.  This is unfortunate, but
+ * on 32-bit machines, the same VFS interface is used for lseek and
+ * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+ * lseek/telldir/seekdir will blow out spectacularly, and from within
+ * the ext2 low-level routine, we don't know if we're being called by
+ * a 64-bit version of the system call or the 32-bit version of the
+ * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
+ * cookie.  Sigh.
+ */
+#define hash2pos(major, minor)	(major >> 1)
+#define pos2maj_hash(pos)	((pos << 1) & 0xffffffff)
+#define pos2min_hash(pos)	(0)
+
+/*
+ * This structure holds the nodes of the red-black tree used to store
+ * the directory entry in hash order.
+ */
+struct fname {
+	__u32		hash;
+	__u32		minor_hash;
+	struct rb_node	rb_hash; 
+	struct fname	*next;
+	__u32		inode;
+	__u8		name_len;
+	__u8		file_type;
+	char		name[0];
+};
+
+/*
+ * This functoin implements a non-recursive way of freeing all of the
+ * nodes in the red-black tree.
+ */
+static void free_rb_tree_fname(struct rb_root *root)
+{
+	struct rb_node	*n = root->rb_node;
+	struct rb_node	*parent;
+	struct fname	*fname;
+
+	while (n) {
+		/* Do the node's children first */
+		if ((n)->rb_left) {
+			n = n->rb_left;
+			continue;
+		}
+		if (n->rb_right) {
+			n = n->rb_right;
+			continue;
+		}
+		/*
+		 * The node has no children; free it, and then zero
+		 * out parent's link to it.  Finally go to the
+		 * beginning of the loop and try to free the parent
+		 * node.
+		 */
+		parent = n->rb_parent;
+		fname = rb_entry(n, struct fname, rb_hash);
+		while (fname) {
+			struct fname * old = fname;
+			fname = fname->next;
+			kfree (old);
+		}
+		if (!parent)
+			root->rb_node = NULL;
+		else if (parent->rb_left == n)
+			parent->rb_left = NULL;
+		else if (parent->rb_right == n)
+			parent->rb_right = NULL;
+		n = parent;
+	}
+	root->rb_node = NULL;
+}
+
+
+static struct dir_private_info *create_dir_info(loff_t pos)
+{
+	struct dir_private_info *p;
+
+	p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+	if (!p)
+		return NULL;
+	p->root.rb_node = NULL;
+	p->curr_node = NULL;
+	p->extra_fname = NULL;
+	p->last_pos = 0;
+	p->curr_hash = pos2maj_hash(pos);
+	p->curr_minor_hash = pos2min_hash(pos);
+	p->next_hash = 0;
+	return p;
+}
+
+void ext3_htree_free_dir_info(struct dir_private_info *p)
+{
+	free_rb_tree_fname(&p->root);
+	kfree(p);
+}
+
+/*
+ * Given a directory entry, enter it into the fname rb tree.
+ */
+int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
+			     __u32 minor_hash,
+			     struct ext3_dir_entry_2 *dirent)
+{
+	struct rb_node **p, *parent = NULL;
+	struct fname * fname, *new_fn;
+	struct dir_private_info *info;
+	int len;
+
+	info = (struct dir_private_info *) dir_file->private_data;
+	p = &info->root.rb_node;
+
+	/* Create and allocate the fname structure */
+	len = sizeof(struct fname) + dirent->name_len + 1;
+	new_fn = kmalloc(len, GFP_KERNEL);
+	if (!new_fn)
+		return -ENOMEM;
+	memset(new_fn, 0, len);
+	new_fn->hash = hash;
+	new_fn->minor_hash = minor_hash;
+	new_fn->inode = le32_to_cpu(dirent->inode);
+	new_fn->name_len = dirent->name_len;
+	new_fn->file_type = dirent->file_type;
+	memcpy(new_fn->name, dirent->name, dirent->name_len);
+	new_fn->name[dirent->name_len] = 0;
+
+	while (*p) {
+		parent = *p;
+		fname = rb_entry(parent, struct fname, rb_hash);
+
+		/*
+		 * If the hash and minor hash match up, then we put
+		 * them on a linked list.  This rarely happens...
+		 */
+		if ((new_fn->hash == fname->hash) &&
+		    (new_fn->minor_hash == fname->minor_hash)) {
+			new_fn->next = fname->next;
+			fname->next = new_fn;
+			return 0;
+		}
+
+		if (new_fn->hash < fname->hash)
+			p = &(*p)->rb_left;
+		else if (new_fn->hash > fname->hash)
+			p = &(*p)->rb_right;
+		else if (new_fn->minor_hash < fname->minor_hash)
+			p = &(*p)->rb_left;
+		else /* if (new_fn->minor_hash > fname->minor_hash) */
+			p = &(*p)->rb_right;
+	}
+
+	rb_link_node(&new_fn->rb_hash, parent, p);
+	rb_insert_color(&new_fn->rb_hash, &info->root);
+	return 0;
+}
+
+
+
+/*
+ * This is a helper function for ext3_dx_readdir.  It calls filldir
+ * for all entres on the fname linked list.  (Normally there is only
+ * one entry on the linked list, unless there are 62 bit hash collisions.)
+ */
+static int call_filldir(struct file * filp, void * dirent,
+			filldir_t filldir, struct fname *fname)
+{
+	struct dir_private_info *info = filp->private_data;
+	loff_t	curr_pos;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block * sb;
+	int error;
+
+	sb = inode->i_sb;
+
+	if (!fname) {
+		printk("call_filldir: called with null fname?!?\n");
+		return 0;
+	}
+	curr_pos = hash2pos(fname->hash, fname->minor_hash);
+	while (fname) {
+		error = filldir(dirent, fname->name,
+				fname->name_len, curr_pos, 
+				fname->inode,
+				get_dtype(sb, fname->file_type));
+		if (error) {
+			filp->f_pos = curr_pos;
+			info->extra_fname = fname->next;
+			return error;
+		}
+		fname = fname->next;
+	}
+	return 0;
+}
+
+static int ext3_dx_readdir(struct file * filp,
+			 void * dirent, filldir_t filldir)
+{
+	struct dir_private_info *info = filp->private_data;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct fname *fname;
+	int	ret;
+
+	if (!info) {
+		info = create_dir_info(filp->f_pos);
+		if (!info)
+			return -ENOMEM;
+		filp->private_data = info;
+	}
+
+	if (filp->f_pos == EXT3_HTREE_EOF)
+		return 0;	/* EOF */
+
+	/* Some one has messed with f_pos; reset the world */
+	if (info->last_pos != filp->f_pos) {
+		free_rb_tree_fname(&info->root);
+		info->curr_node = NULL;
+		info->extra_fname = NULL;
+		info->curr_hash = pos2maj_hash(filp->f_pos);
+		info->curr_minor_hash = pos2min_hash(filp->f_pos);
+	}
+
+	/*
+	 * If there are any leftover names on the hash collision
+	 * chain, return them first.
+	 */
+	if (info->extra_fname &&
+	    call_filldir(filp, dirent, filldir, info->extra_fname))
+		goto finished;
+
+	if (!info->curr_node)
+		info->curr_node = rb_first(&info->root);
+
+	while (1) {
+		/*
+		 * Fill the rbtree if we have no more entries,
+		 * or the inode has changed since we last read in the
+		 * cached entries. 
+		 */
+		if ((!info->curr_node) ||
+		    (filp->f_version != inode->i_version)) {
+			info->curr_node = NULL;
+			free_rb_tree_fname(&info->root);
+			filp->f_version = inode->i_version;
+			ret = ext3_htree_fill_tree(filp, info->curr_hash,
+						   info->curr_minor_hash,
+						   &info->next_hash);
+			if (ret < 0)
+				return ret;
+			if (ret == 0) {
+				filp->f_pos = EXT3_HTREE_EOF;
+				break;
+			}
+			info->curr_node = rb_first(&info->root);
+		}
+
+		fname = rb_entry(info->curr_node, struct fname, rb_hash);
+		info->curr_hash = fname->hash;
+		info->curr_minor_hash = fname->minor_hash;
+		if (call_filldir(filp, dirent, filldir, fname))
+			break;
+
+		info->curr_node = rb_next(info->curr_node);
+		if (!info->curr_node) {
+			if (info->next_hash == ~0) {
+				filp->f_pos = EXT3_HTREE_EOF;
+				break;
+			}
+			info->curr_hash = info->next_hash;
+			info->curr_minor_hash = 0;
+		}
+	}
+finished:
+	info->last_pos = filp->f_pos;
+	return 0;
+}
+
+static int ext3_release_dir (struct inode * inode, struct file * filp)
+{
+       if (filp->private_data)
+		ext3_htree_free_dir_info(filp->private_data);
+
+	return 0;
+}
+
+#endif
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
new file mode 100644
index 0000000..5ad8cf02
--- /dev/null
+++ b/fs/ext3/file.c
@@ -0,0 +1,131 @@
+/*
+ *  linux/fs/ext3/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext3 fs regular file handling primitives
+ *
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ *	(jj@sunsite.ms.mff.cuni.cz)
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Called when an inode is released. Note that this is different
+ * from ext3_file_open: open gets called at every open, but release
+ * gets called only when /all/ the files are closed.
+ */
+static int ext3_release_file (struct inode * inode, struct file * filp)
+{
+	/* if we are the last writer on the inode, drop the block reservation */
+	if ((filp->f_mode & FMODE_WRITE) &&
+			(atomic_read(&inode->i_writecount) == 1))
+		ext3_discard_reservation(inode);
+	if (is_dx(inode) && filp->private_data)
+		ext3_htree_free_dir_info(filp->private_data);
+
+	return 0;
+}
+
+static ssize_t
+ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_dentry->d_inode;
+	ssize_t ret;
+	int err;
+
+	ret = generic_file_aio_write(iocb, buf, count, pos);
+
+	/*
+	 * Skip flushing if there was an error, or if nothing was written.
+	 */
+	if (ret <= 0)
+		return ret;
+
+	/*
+	 * If the inode is IS_SYNC, or is O_SYNC and we are doing data
+	 * journalling then we need to make sure that we force the transaction
+	 * to disk to keep all metadata uptodate synchronously.
+	 */
+	if (file->f_flags & O_SYNC) {
+		/*
+		 * If we are non-data-journaled, then the dirty data has
+		 * already been flushed to backing store by generic_osync_inode,
+		 * and the inode has been flushed too if there have been any
+		 * modifications other than mere timestamp updates.
+		 *
+		 * Open question --- do we care about flushing timestamps too
+		 * if the inode is IS_SYNC?
+		 */
+		if (!ext3_should_journal_data(inode))
+			return ret;
+
+		goto force_commit;
+	}
+
+	/*
+	 * So we know that there has been no forced data flush.  If the inode
+	 * is marked IS_SYNC, we need to force one ourselves.
+	 */
+	if (!IS_SYNC(inode))
+		return ret;
+
+	/*
+	 * Open question #2 --- should we force data to disk here too?  If we
+	 * don't, the only impact is that data=writeback filesystems won't
+	 * flush data to disk automatically on IS_SYNC, only metadata (but
+	 * historically, that is what ext2 has done.)
+	 */
+
+force_commit:
+	err = ext3_force_commit(inode->i_sb);
+	if (err) 
+		return err;
+	return ret;
+}
+
+struct file_operations ext3_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.write		= do_sync_write,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= ext3_file_write,
+	.readv		= generic_file_readv,
+	.writev		= generic_file_writev,
+	.ioctl		= ext3_ioctl,
+	.mmap		= generic_file_mmap,
+	.open		= generic_file_open,
+	.release	= ext3_release_file,
+	.fsync		= ext3_sync_file,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations ext3_file_inode_operations = {
+	.truncate	= ext3_truncate,
+	.setattr	= ext3_setattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext3_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.permission	= ext3_permission,
+};
+
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
new file mode 100644
index 0000000..49382a20
--- /dev/null
+++ b/fs/ext3/fsync.c
@@ -0,0 +1,88 @@
+/*
+ *  linux/fs/ext3/fsync.c
+ *
+ *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
+ *  from
+ *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
+ *                      Laboratoire MASI - Institut Blaise Pascal
+ *                      Universite Pierre et Marie Curie (Paris VI)
+ *  from
+ *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
+ * 
+ *  ext3fs fsync primitive
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ * 
+ *  Removed unnecessary code duplication for little endian machines
+ *  and excessive __inline__s. 
+ *        Andi Kleen, 1997
+ *
+ * Major simplications and cleanup - we only need to do the metadata, because
+ * we can depend on generic_block_fdatasync() to sync the data blocks.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/writeback.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+
+/*
+ * akpm: A new design for ext3_sync_file().
+ *
+ * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
+ * There cannot be a transaction open by this task.
+ * Another task could have dirtied this inode.  Its data can be in any
+ * state in the journalling system.
+ *
+ * What we do is just kick off a commit and wait on it.  This will snapshot the
+ * inode to disk.
+ */
+
+int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	int ret = 0;
+
+	J_ASSERT(ext3_journal_current_handle() == 0);
+
+	/*
+	 * data=writeback:
+	 *  The caller's filemap_fdatawrite()/wait will sync the data.
+	 *  sync_inode() will sync the metadata
+	 *
+	 * data=ordered:
+	 *  The caller's filemap_fdatawrite() will write the data and
+	 *  sync_inode() will write the inode if it is dirty.  Then the caller's
+	 *  filemap_fdatawait() will wait on the pages.
+	 *
+	 * data=journal:
+	 *  filemap_fdatawrite won't do anything (the buffers are clean).
+	 *  ext3_force_commit will write the file data into the journal and
+	 *  will wait on that.
+	 *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
+	 *  (they were dirtied by commit).  But that's OK - the blocks are
+	 *  safe in-journal, which is all fsync() needs to ensure.
+	 */
+	if (ext3_should_journal_data(inode)) {
+		ret = ext3_force_commit(inode->i_sb);
+		goto out;
+	}
+
+	/*
+	 * The VFS has written the file data.  If the inode is unaltered
+	 * then we need not start a commit.
+	 */
+	if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
+		struct writeback_control wbc = {
+			.sync_mode = WB_SYNC_ALL,
+			.nr_to_write = 0, /* sys_fsync did this */
+		};
+		ret = sync_inode(inode, &wbc);
+	}
+out:
+	return ret;
+}
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
new file mode 100644
index 0000000..5a2d123
--- /dev/null
+++ b/fs/ext3/hash.c
@@ -0,0 +1,152 @@
+/*
+ *  linux/fs/ext3/hash.c
+ *
+ * Copyright (C) 2002 by Theodore Ts'o
+ *
+ * This file is released under the GPL v2.
+ * 
+ * This file may be redistributed under the terms of the GNU Public
+ * License.
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/sched.h>
+#include <linux/ext3_fs.h>
+#include <linux/cryptohash.h>
+
+#define DELTA 0x9E3779B9
+
+static void TEA_transform(__u32 buf[4], __u32 const in[])
+{
+	__u32	sum = 0;
+	__u32	b0 = buf[0], b1 = buf[1];
+	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
+	int	n = 16;
+
+	do {
+		sum += DELTA;
+		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
+		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
+	} while(--n);
+
+	buf[0] += b0;
+	buf[1] += b1;
+}
+
+
+/* The old legacy hash */
+static __u32 dx_hack_hash (const char *name, int len)
+{
+	__u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+	while (len--) {
+		__u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
+
+		if (hash & 0x80000000) hash -= 0x7fffffff;
+		hash1 = hash0;
+		hash0 = hash;
+	}
+	return (hash0 << 1);
+}
+
+static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
+{
+	__u32	pad, val;
+	int	i;
+
+	pad = (__u32)len | ((__u32)len << 8);
+	pad |= pad << 16;
+
+	val = pad;
+	if (len > num*4)
+		len = num * 4;
+	for (i=0; i < len; i++) {
+		if ((i % 4) == 0)
+			val = pad;
+		val = msg[i] + (val << 8);
+		if ((i % 4) == 3) {
+			*buf++ = val;
+			val = pad;
+			num--;
+		}
+	}
+	if (--num >= 0)
+		*buf++ = val;
+	while (--num >= 0)
+		*buf++ = pad;
+}
+
+/*
+ * Returns the hash of a filename.  If len is 0 and name is NULL, then
+ * this function can be used to test whether or not a hash version is
+ * supported.
+ * 
+ * The seed is an 4 longword (32 bits) "secret" which can be used to
+ * uniquify a hash.  If the seed is all zero's, then some default seed
+ * may be used.
+ * 
+ * A particular hash version specifies whether or not the seed is
+ * represented, and whether or not the returned hash is 32 bits or 64
+ * bits.  32 bit hashes will return 0 for the minor hash.
+ */
+int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+{
+	__u32	hash;
+	__u32	minor_hash = 0;
+	const char	*p;
+	int		i;
+	__u32 		in[8], buf[4];
+
+	/* Initialize the default seed for the hash checksum functions */
+	buf[0] = 0x67452301;
+	buf[1] = 0xefcdab89;
+	buf[2] = 0x98badcfe;
+	buf[3] = 0x10325476;
+
+	/* Check to see if the seed is all zero's */
+	if (hinfo->seed) {
+		for (i=0; i < 4; i++) {
+			if (hinfo->seed[i])
+				break;
+		}
+		if (i < 4)
+			memcpy(buf, hinfo->seed, sizeof(buf));
+	}
+
+	switch (hinfo->hash_version) {
+	case DX_HASH_LEGACY:
+		hash = dx_hack_hash(name, len);
+		break;
+	case DX_HASH_HALF_MD4:
+		p = name;
+		while (len > 0) {
+			str2hashbuf(p, len, in, 8);
+			half_md4_transform(buf, in);
+			len -= 32;
+			p += 32;
+		}
+		minor_hash = buf[2];
+		hash = buf[1];
+		break;
+	case DX_HASH_TEA:
+		p = name;
+		while (len > 0) {
+			str2hashbuf(p, len, in, 4);
+			TEA_transform(buf, in);
+			len -= 16;
+			p += 16;
+		}
+		hash = buf[0];
+		minor_hash = buf[1];
+		break;
+	default:
+		hinfo->hash = 0;
+		return -1;
+	}
+	hash = hash & ~1;
+	if (hash == (EXT3_HTREE_EOF << 1))
+		hash = (EXT3_HTREE_EOF-1) << 1;
+	hinfo->hash = hash;
+	hinfo->minor_hash = minor_hash;
+	return 0;
+}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
new file mode 100644
index 0000000..1e6f3ea
--- /dev/null
+++ b/fs/ext3/ialloc.c
@@ -0,0 +1,794 @@
+/*
+ *  linux/fs/ext3/ialloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  BSD ufs-inspired inode and directory allocation by
+ *  Stephen Tweedie (sct@redhat.com), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/random.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * ialloc.c contains the inodes allocation and deallocation routines
+ */
+
+/*
+ * The free inodes are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.
+ */
+
+
+/*
+ * Read the inode allocation bitmap for a given block_group, reading
+ * into the specified slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head of bitmap on success or NULL.
+ */
+static struct buffer_head *
+read_inode_bitmap(struct super_block * sb, unsigned long block_group)
+{
+	struct ext3_group_desc *desc;
+	struct buffer_head *bh = NULL;
+
+	desc = ext3_get_group_desc(sb, block_group, NULL);
+	if (!desc)
+		goto error_out;
+
+	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
+	if (!bh)
+		ext3_error(sb, "read_inode_bitmap",
+			    "Cannot read inode bitmap - "
+			    "block_group = %lu, inode_bitmap = %u",
+			    block_group, le32_to_cpu(desc->bg_inode_bitmap));
+error_out:
+	return bh;
+}
+
+/*
+ * NOTE! When we get the inode, we're the only people
+ * that have access to it, and as such there are no
+ * race conditions we have to worry about. The inode
+ * is not on the hash-lists, and it cannot be reached
+ * through the filesystem because the directory entry
+ * has been deleted earlier.
+ *
+ * HOWEVER: we must make sure that we get no aliases,
+ * which means that we have to call "clear_inode()"
+ * _before_ we mark the inode not in use in the inode
+ * bitmaps. Otherwise a newly created file might use
+ * the same inode number (not actually the same pointer
+ * though), and then we'd have two inodes sharing the
+ * same inode number and space on the harddisk.
+ */
+void ext3_free_inode (handle_t *handle, struct inode * inode)
+{
+	struct super_block * sb = inode->i_sb;
+	int is_directory;
+	unsigned long ino;
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *bh2;
+	unsigned long block_group;
+	unsigned long bit;
+	struct ext3_group_desc * gdp;
+	struct ext3_super_block * es;
+	struct ext3_sb_info *sbi;
+	int fatal = 0, err;
+
+	if (atomic_read(&inode->i_count) > 1) {
+		printk ("ext3_free_inode: inode has count=%d\n",
+					atomic_read(&inode->i_count));
+		return;
+	}
+	if (inode->i_nlink) {
+		printk ("ext3_free_inode: inode has nlink=%d\n",
+			inode->i_nlink);
+		return;
+	}
+	if (!sb) {
+		printk("ext3_free_inode: inode on nonexistent device\n");
+		return;
+	}
+	sbi = EXT3_SB(sb);
+
+	ino = inode->i_ino;
+	ext3_debug ("freeing inode %lu\n", ino);
+
+	/*
+	 * Note: we must free any quota before locking the superblock,
+	 * as writing the quota to disk may need the lock as well.
+	 */
+	DQUOT_INIT(inode);
+	ext3_xattr_delete_inode(handle, inode);
+	DQUOT_FREE_INODE(inode);
+	DQUOT_DROP(inode);
+
+	is_directory = S_ISDIR(inode->i_mode);
+
+	/* Do this BEFORE marking the inode not in use or returning an error */
+	clear_inode (inode);
+
+	es = EXT3_SB(sb)->s_es;
+	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+		ext3_error (sb, "ext3_free_inode",
+			    "reserved or nonexistent inode %lu", ino);
+		goto error_return;
+	}
+	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
+	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
+	bitmap_bh = read_inode_bitmap(sb, block_group);
+	if (!bitmap_bh)
+		goto error_return;
+
+	BUFFER_TRACE(bitmap_bh, "get_write_access");
+	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
+	if (fatal)
+		goto error_return;
+
+	/* Ok, now we can actually update the inode bitmaps.. */
+	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+					bit, bitmap_bh->b_data))
+		ext3_error (sb, "ext3_free_inode",
+			      "bit already cleared for inode %lu", ino);
+	else {
+		gdp = ext3_get_group_desc (sb, block_group, &bh2);
+
+		BUFFER_TRACE(bh2, "get_write_access");
+		fatal = ext3_journal_get_write_access(handle, bh2);
+		if (fatal) goto error_return;
+
+		if (gdp) {
+			spin_lock(sb_bgl_lock(sbi, block_group));
+			gdp->bg_free_inodes_count = cpu_to_le16(
+				le16_to_cpu(gdp->bg_free_inodes_count) + 1);
+			if (is_directory)
+				gdp->bg_used_dirs_count = cpu_to_le16(
+				  le16_to_cpu(gdp->bg_used_dirs_count) - 1);
+			spin_unlock(sb_bgl_lock(sbi, block_group));
+			percpu_counter_inc(&sbi->s_freeinodes_counter);
+			if (is_directory)
+				percpu_counter_dec(&sbi->s_dirs_counter);
+
+		}
+		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
+		err = ext3_journal_dirty_metadata(handle, bh2);
+		if (!fatal) fatal = err;
+	}
+	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
+	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
+	if (!fatal)
+		fatal = err;
+	sb->s_dirt = 1;
+error_return:
+	brelse(bitmap_bh);
+	ext3_std_error(sb, fatal);
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory\'s block
+ * group to find a free inode.
+ */
+static int find_group_dir(struct super_block *sb, struct inode *parent)
+{
+	int ngroups = EXT3_SB(sb)->s_groups_count;
+	int freei, avefreei;
+	struct ext3_group_desc *desc, *best_desc = NULL;
+	struct buffer_head *bh;
+	int group, best_group = -1;
+
+	freei = percpu_counter_read_positive(&EXT3_SB(sb)->s_freeinodes_counter);
+	avefreei = freei / ngroups;
+
+	for (group = 0; group < ngroups; group++) {
+		desc = ext3_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+			continue;
+		if (!best_desc || 
+		    (le16_to_cpu(desc->bg_free_blocks_count) >
+		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
+			best_group = group;
+			best_desc = desc;
+		}
+	}
+	return best_group;
+}
+
+/* 
+ * Orlov's allocator for directories. 
+ * 
+ * We always try to spread first-level directories.
+ *
+ * If there are blockgroups with both free inodes and free blocks counts 
+ * not worse than average we return one with smallest directory count. 
+ * Otherwise we simply return a random group. 
+ * 
+ * For the rest rules look so: 
+ * 
+ * It's OK to put directory into a group unless 
+ * it has too many directories already (max_dirs) or 
+ * it has too few free inodes left (min_inodes) or 
+ * it has too few free blocks left (min_blocks) or 
+ * it's already running too large debt (max_debt). 
+ * Parent's group is prefered, if it doesn't satisfy these 
+ * conditions we search cyclically through the rest. If none 
+ * of the groups look good we just look for a group with more 
+ * free inodes than average (starting at parent's group). 
+ * 
+ * Debt is incremented each time we allocate a directory and decremented 
+ * when we allocate an inode, within 0--255. 
+ */ 
+
+#define INODE_COST 64
+#define BLOCK_COST 256
+
+static int find_group_orlov(struct super_block *sb, struct inode *parent)
+{
+	int parent_group = EXT3_I(parent)->i_block_group;
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	struct ext3_super_block *es = sbi->s_es;
+	int ngroups = sbi->s_groups_count;
+	int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
+	int freei, avefreei;
+	int freeb, avefreeb;
+	int blocks_per_dir, ndirs;
+	int max_debt, max_dirs, min_blocks, min_inodes;
+	int group = -1, i;
+	struct ext3_group_desc *desc;
+	struct buffer_head *bh;
+
+	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
+	avefreei = freei / ngroups;
+	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+	avefreeb = freeb / ngroups;
+	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
+
+	if ((parent == sb->s_root->d_inode) ||
+	    (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) {
+		int best_ndir = inodes_per_group;
+		int best_group = -1;
+
+		get_random_bytes(&group, sizeof(group));
+		parent_group = (unsigned)group % ngroups;
+		for (i = 0; i < ngroups; i++) {
+			group = (parent_group + i) % ngroups;
+			desc = ext3_get_group_desc (sb, group, &bh);
+			if (!desc || !desc->bg_free_inodes_count)
+				continue;
+			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
+				continue;
+			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+				continue;
+			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
+				continue;
+			best_group = group;
+			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
+		}
+		if (best_group >= 0)
+			return best_group;
+		goto fallback;
+	}
+
+	blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
+
+	max_dirs = ndirs / ngroups + inodes_per_group / 16;
+	min_inodes = avefreei - inodes_per_group / 4;
+	min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
+
+	max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST);
+	if (max_debt * INODE_COST > inodes_per_group)
+		max_debt = inodes_per_group / INODE_COST;
+	if (max_debt > 255)
+		max_debt = 255;
+	if (max_debt == 0)
+		max_debt = 1;
+
+	for (i = 0; i < ngroups; i++) {
+		group = (parent_group + i) % ngroups;
+		desc = ext3_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
+			continue;
+		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
+			continue;
+		return group;
+	}
+
+fallback:
+	for (i = 0; i < ngroups; i++) {
+		group = (parent_group + i) % ngroups;
+		desc = ext3_get_group_desc (sb, group, &bh);
+		if (!desc || !desc->bg_free_inodes_count)
+			continue;
+		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
+			return group;
+	}
+
+	if (avefreei) {
+		/*
+		 * The free-inodes counter is approximate, and for really small
+		 * filesystems the above test can fail to find any blockgroups
+		 */
+		avefreei = 0;
+		goto fallback;
+	}
+
+	return -1;
+}
+
+static int find_group_other(struct super_block *sb, struct inode *parent)
+{
+	int parent_group = EXT3_I(parent)->i_block_group;
+	int ngroups = EXT3_SB(sb)->s_groups_count;
+	struct ext3_group_desc *desc;
+	struct buffer_head *bh;
+	int group, i;
+
+	/*
+	 * Try to place the inode in its parent directory
+	 */
+	group = parent_group;
+	desc = ext3_get_group_desc (sb, group, &bh);
+	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+			le16_to_cpu(desc->bg_free_blocks_count))
+		return group;
+
+	/*
+	 * We're going to place this inode in a different blockgroup from its
+	 * parent.  We want to cause files in a common directory to all land in
+	 * the same blockgroup.  But we want files which are in a different
+	 * directory which shares a blockgroup with our parent to land in a
+	 * different blockgroup.
+	 *
+	 * So add our directory's i_ino into the starting point for the hash.
+	 */
+	group = (group + parent->i_ino) % ngroups;
+
+	/*
+	 * Use a quadratic hash to find a group with a free inode and some free
+	 * blocks.
+	 */
+	for (i = 1; i < ngroups; i <<= 1) {
+		group += i;
+		if (group >= ngroups)
+			group -= ngroups;
+		desc = ext3_get_group_desc (sb, group, &bh);
+		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+				le16_to_cpu(desc->bg_free_blocks_count))
+			return group;
+	}
+
+	/*
+	 * That failed: try linear search for a free inode, even if that group
+	 * has no free blocks.
+	 */
+	group = parent_group;
+	for (i = 0; i < ngroups; i++) {
+		if (++group >= ngroups)
+			group = 0;
+		desc = ext3_get_group_desc (sb, group, &bh);
+		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
+			return group;
+	}
+
+	return -1;
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
+{
+	struct super_block *sb;
+	struct buffer_head *bitmap_bh = NULL;
+	struct buffer_head *bh2;
+	int group;
+	unsigned long ino = 0;
+	struct inode * inode;
+	struct ext3_group_desc * gdp = NULL;
+	struct ext3_super_block * es;
+	struct ext3_inode_info *ei;
+	struct ext3_sb_info *sbi;
+	int err = 0;
+	struct inode *ret;
+	int i;
+
+	/* Cannot create files in a deleted directory */
+	if (!dir || !dir->i_nlink)
+		return ERR_PTR(-EPERM);
+
+	sb = dir->i_sb;
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	ei = EXT3_I(inode);
+
+	sbi = EXT3_SB(sb);
+	es = sbi->s_es;
+	if (S_ISDIR(mode)) {
+		if (test_opt (sb, OLDALLOC))
+			group = find_group_dir(sb, dir);
+		else
+			group = find_group_orlov(sb, dir);
+	} else 
+		group = find_group_other(sb, dir);
+
+	err = -ENOSPC;
+	if (group == -1)
+		goto out;
+
+	for (i = 0; i < sbi->s_groups_count; i++) {
+		err = -EIO;
+
+		gdp = ext3_get_group_desc(sb, group, &bh2);
+		if (!gdp)
+			goto fail;
+
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, group);
+		if (!bitmap_bh)
+			goto fail;
+
+		ino = 0;
+
+repeat_in_this_group:
+		ino = ext3_find_next_zero_bit((unsigned long *)
+				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
+		if (ino < EXT3_INODES_PER_GROUP(sb)) {
+
+			BUFFER_TRACE(bitmap_bh, "get_write_access");
+			err = ext3_journal_get_write_access(handle, bitmap_bh);
+			if (err)
+				goto fail;
+
+			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
+						ino, bitmap_bh->b_data)) {
+				/* we won it */
+				BUFFER_TRACE(bitmap_bh,
+					"call ext3_journal_dirty_metadata");
+				err = ext3_journal_dirty_metadata(handle,
+								bitmap_bh);
+				if (err)
+					goto fail;
+				goto got;
+			}
+			/* we lost it */
+			journal_release_buffer(handle, bitmap_bh);
+
+			if (++ino < EXT3_INODES_PER_GROUP(sb))
+				goto repeat_in_this_group;
+		}
+
+		/*
+		 * This case is possible in concurrent environment.  It is very
+		 * rare.  We cannot repeat the find_group_xxx() call because
+		 * that will simply return the same blockgroup, because the
+		 * group descriptor metadata has not yet been updated.
+		 * So we just go onto the next blockgroup.
+		 */
+		if (++group == sbi->s_groups_count)
+			group = 0;
+	}
+	err = -ENOSPC;
+	goto out;
+
+got:
+	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
+	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+		ext3_error (sb, "ext3_new_inode",
+			    "reserved inode or inode > inodes count - "
+			    "block_group = %d, inode=%lu", group, ino);
+		err = -EIO;
+		goto fail;
+	}
+
+	BUFFER_TRACE(bh2, "get_write_access");
+	err = ext3_journal_get_write_access(handle, bh2);
+	if (err) goto fail;
+	spin_lock(sb_bgl_lock(sbi, group));
+	gdp->bg_free_inodes_count =
+		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+	if (S_ISDIR(mode)) {
+		gdp->bg_used_dirs_count =
+			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+	}
+	spin_unlock(sb_bgl_lock(sbi, group));
+	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
+	err = ext3_journal_dirty_metadata(handle, bh2);
+	if (err) goto fail;
+
+	percpu_counter_dec(&sbi->s_freeinodes_counter);
+	if (S_ISDIR(mode))
+		percpu_counter_inc(&sbi->s_dirs_counter);
+	sb->s_dirt = 1;
+
+	inode->i_uid = current->fsuid;
+	if (test_opt (sb, GRPID))
+		inode->i_gid = dir->i_gid;
+	else if (dir->i_mode & S_ISGID) {
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+	inode->i_mode = mode;
+
+	inode->i_ino = ino;
+	/* This is the optimal IO size (for stat), not the fs block size */
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = 0;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+
+	memset(ei->i_data, 0, sizeof(ei->i_data));
+	ei->i_dir_start_lookup = 0;
+	ei->i_disksize = 0;
+
+	ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
+	if (S_ISLNK(mode))
+		ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
+	/* dirsync only applies to directories */
+	if (!S_ISDIR(mode))
+		ei->i_flags &= ~EXT3_DIRSYNC_FL;
+#ifdef EXT3_FRAGMENTS
+	ei->i_faddr = 0;
+	ei->i_frag_no = 0;
+	ei->i_frag_size = 0;
+#endif
+	ei->i_file_acl = 0;
+	ei->i_dir_acl = 0;
+	ei->i_dtime = 0;
+	ei->i_block_alloc_info = NULL;
+	ei->i_block_group = group;
+
+	ext3_set_inode_flags(inode);
+	if (IS_DIRSYNC(inode))
+		handle->h_sync = 1;
+	insert_inode_hash(inode);
+	spin_lock(&sbi->s_next_gen_lock);
+	inode->i_generation = sbi->s_next_generation++;
+	spin_unlock(&sbi->s_next_gen_lock);
+
+	ei->i_state = EXT3_STATE_NEW;
+	ei->i_extra_isize =
+		(EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
+		sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
+
+	ret = inode;
+	if(DQUOT_ALLOC_INODE(inode)) {
+		DQUOT_DROP(inode);
+		err = -EDQUOT;
+		goto fail2;
+	}
+	err = ext3_init_acl(handle, inode, dir);
+	if (err) {
+		DQUOT_FREE_INODE(inode);
+		goto fail2;
+  	}
+	err = ext3_mark_inode_dirty(handle, inode);
+	if (err) {
+		ext3_std_error(sb, err);
+		DQUOT_FREE_INODE(inode);
+		goto fail2;
+	}
+
+	ext3_debug("allocating inode %lu\n", inode->i_ino);
+	goto really_out;
+fail:
+	ext3_std_error(sb, err);
+out:
+	iput(inode);
+	ret = ERR_PTR(err);
+really_out:
+	brelse(bitmap_bh);
+	return ret;
+
+fail2:
+	inode->i_flags |= S_NOQUOTA;
+	inode->i_nlink = 0;
+	iput(inode);
+	brelse(bitmap_bh);
+	return ERR_PTR(err);
+}
+
+/* Verify that we are loading a valid orphan from disk */
+struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
+{
+	unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
+	unsigned long block_group;
+	int bit;
+	struct buffer_head *bitmap_bh = NULL;
+	struct inode *inode = NULL;
+
+	/* Error cases - e2fsck has already cleaned up for us */
+	if (ino > max_ino) {
+		ext3_warning(sb, __FUNCTION__,
+			     "bad orphan ino %lu!  e2fsck was run?\n", ino);
+		goto out;
+	}
+
+	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
+	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
+	bitmap_bh = read_inode_bitmap(sb, block_group);
+	if (!bitmap_bh) {
+		ext3_warning(sb, __FUNCTION__,
+			     "inode bitmap error for orphan %lu\n", ino);
+		goto out;
+	}
+
+	/* Having the inode bit set should be a 100% indicator that this
+	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
+	 * inodes that were being truncated, so we can't check i_nlink==0.
+	 */
+	if (!ext3_test_bit(bit, bitmap_bh->b_data) ||
+			!(inode = iget(sb, ino)) || is_bad_inode(inode) ||
+			NEXT_ORPHAN(inode) > max_ino) {
+		ext3_warning(sb, __FUNCTION__,
+			     "bad orphan inode %lu!  e2fsck was run?\n", ino);
+		printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
+		       bit, (unsigned long long)bitmap_bh->b_blocknr,
+		       ext3_test_bit(bit, bitmap_bh->b_data));
+		printk(KERN_NOTICE "inode=%p\n", inode);
+		if (inode) {
+			printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+			       is_bad_inode(inode));
+			printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+			       NEXT_ORPHAN(inode));
+			printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
+		}
+		/* Avoid freeing blocks if we got a bad deleted inode */
+		if (inode && inode->i_nlink == 0)
+			inode->i_blocks = 0;
+		iput(inode);
+		inode = NULL;
+	}
+out:
+	brelse(bitmap_bh);
+	return inode;
+}
+
+unsigned long ext3_count_free_inodes (struct super_block * sb)
+{
+	unsigned long desc_count;
+	struct ext3_group_desc *gdp;
+	int i;
+#ifdef EXT3FS_DEBUG
+	struct ext3_super_block *es;
+	unsigned long bitmap_count, x;
+	struct buffer_head *bitmap_bh = NULL;
+
+	lock_super (sb);
+	es = EXT3_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	gdp = NULL;
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		gdp = ext3_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+
+		x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
+		printk("group %d: stored = %d, counted = %lu\n",
+			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n",
+		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
+	unlock_super(sb);
+	return desc_count;
+#else
+	desc_count = 0;
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		gdp = ext3_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+		cond_resched();
+	}
+	return desc_count;
+#endif
+}
+
+/* Called at mount-time, super-block is locked */
+unsigned long ext3_count_dirs (struct super_block * sb)
+{
+	unsigned long count = 0;
+	int i;
+
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		struct ext3_group_desc *gdp = ext3_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		count += le16_to_cpu(gdp->bg_used_dirs_count);
+	}
+	return count;
+}
+
+#ifdef CONFIG_EXT3_CHECK
+/* Called at mount-time, super-block is locked */
+void ext3_check_inodes_bitmap (struct super_block * sb)
+{
+	struct ext3_super_block * es;
+	unsigned long desc_count, bitmap_count, x;
+	struct buffer_head *bitmap_bh = NULL;
+	struct ext3_group_desc * gdp;
+	int i;
+
+	es = EXT3_SB(sb)->s_es;
+	desc_count = 0;
+	bitmap_count = 0;
+	gdp = NULL;
+	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+		gdp = ext3_get_group_desc (sb, i, NULL);
+		if (!gdp)
+			continue;
+		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+		brelse(bitmap_bh);
+		bitmap_bh = read_inode_bitmap(sb, i);
+		if (!bitmap_bh)
+			continue;
+
+		x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
+		if (le16_to_cpu(gdp->bg_free_inodes_count) != x)
+			ext3_error (sb, "ext3_check_inodes_bitmap",
+				    "Wrong free inodes count in group %d, "
+				    "stored = %d, counted = %lu", i,
+				    le16_to_cpu(gdp->bg_free_inodes_count), x);
+		bitmap_count += x;
+	}
+	brelse(bitmap_bh);
+	if (le32_to_cpu(es->s_free_inodes_count) != bitmap_count)
+		ext3_error (sb, "ext3_check_inodes_bitmap",
+			    "Wrong free inodes count in super block, "
+			    "stored = %lu, counted = %lu",
+			    (unsigned long)le32_to_cpu(es->s_free_inodes_count),
+			    bitmap_count);
+}
+#endif
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
new file mode 100644
index 0000000..040eb28
--- /dev/null
+++ b/fs/ext3/inode.c
@@ -0,0 +1,3132 @@
+/*
+ *  linux/fs/ext3/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Goal-directed block allocation by Stephen Tweedie
+ * 	(sct@redhat.com), 1993, 1998
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ * 	(jj@sunsite.ms.mff.cuni.cz)
+ *
+ *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/ext3_jbd.h>
+#include <linux/jbd.h>
+#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/uio.h>
+#include "xattr.h"
+#include "acl.h"
+
+static int ext3_writepage_trans_blocks(struct inode *inode);
+
+/*
+ * Test whether an inode is a fast symlink.
+ */
+static inline int ext3_inode_is_fast_symlink(struct inode *inode)
+{
+	int ea_blocks = EXT3_I(inode)->i_file_acl ?
+		(inode->i_sb->s_blocksize >> 9) : 0;
+
+	return (S_ISLNK(inode->i_mode) &&
+		inode->i_blocks - ea_blocks == 0);
+}
+
+/* The ext3 forget function must perform a revoke if we are freeing data
+ * which has been journaled.  Metadata (eg. indirect blocks) must be
+ * revoked in all cases. 
+ *
+ * "bh" may be NULL: a metadata block may have been freed from memory
+ * but there may still be a record of it in the journal, and that record
+ * still needs to be revoked.
+ */
+
+int ext3_forget(handle_t *handle, int is_metadata,
+		       struct inode *inode, struct buffer_head *bh,
+		       int blocknr)
+{
+	int err;
+
+	might_sleep();
+
+	BUFFER_TRACE(bh, "enter");
+
+	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
+		  "data mode %lx\n",
+		  bh, is_metadata, inode->i_mode,
+		  test_opt(inode->i_sb, DATA_FLAGS));
+
+	/* Never use the revoke function if we are doing full data
+	 * journaling: there is no need to, and a V1 superblock won't
+	 * support it.  Otherwise, only skip the revoke on un-journaled
+	 * data blocks. */
+
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
+	    (!is_metadata && !ext3_should_journal_data(inode))) {
+		if (bh) {
+			BUFFER_TRACE(bh, "call journal_forget");
+			return ext3_journal_forget(handle, bh);
+		}
+		return 0;
+	}
+
+	/*
+	 * data!=journal && (is_metadata || should_journal_data(inode))
+	 */
+	BUFFER_TRACE(bh, "call ext3_journal_revoke");
+	err = ext3_journal_revoke(handle, blocknr, bh);
+	if (err)
+		ext3_abort(inode->i_sb, __FUNCTION__,
+			   "error %d when attempting revoke", err);
+	BUFFER_TRACE(bh, "exit");
+	return err;
+}
+
+/*
+ * Work out how many blocks we need to progress with the next chunk of a
+ * truncate transaction.
+ */
+
+static unsigned long blocks_for_truncate(struct inode *inode) 
+{
+	unsigned long needed;
+
+	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
+
+	/* Give ourselves just enough room to cope with inodes in which
+	 * i_blocks is corrupt: we've seen disk corruptions in the past
+	 * which resulted in random data in an inode which looked enough
+	 * like a regular file for ext3 to try to delete it.  Things
+	 * will go a bit crazy if that happens, but at least we should
+	 * try not to panic the whole kernel. */
+	if (needed < 2)
+		needed = 2;
+
+	/* But we need to bound the transaction so we don't overflow the
+	 * journal. */
+	if (needed > EXT3_MAX_TRANS_DATA) 
+		needed = EXT3_MAX_TRANS_DATA;
+
+	return EXT3_DATA_TRANS_BLOCKS + needed;
+}
+
+/* 
+ * Truncate transactions can be complex and absolutely huge.  So we need to
+ * be able to restart the transaction at a conventient checkpoint to make
+ * sure we don't overflow the journal.
+ *
+ * start_transaction gets us a new handle for a truncate transaction,
+ * and extend_transaction tries to extend the existing one a bit.  If
+ * extend fails, we need to propagate the failure up and restart the
+ * transaction in the top-level truncate loop. --sct 
+ */
+
+static handle_t *start_transaction(struct inode *inode) 
+{
+	handle_t *result;
+
+	result = ext3_journal_start(inode, blocks_for_truncate(inode));
+	if (!IS_ERR(result))
+		return result;
+
+	ext3_std_error(inode->i_sb, PTR_ERR(result));
+	return result;
+}
+
+/*
+ * Try to extend this transaction for the purposes of truncation.
+ *
+ * Returns 0 if we managed to create more room.  If we can't create more
+ * room, and the transaction must be restarted we return 1.
+ */
+static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+{
+	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
+		return 0;
+	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
+		return 0;
+	return 1;
+}
+
+/*
+ * Restart the transaction associated with *handle.  This does a commit,
+ * so before we call here everything must be consistently dirtied against
+ * this transaction.
+ */
+static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
+{
+	jbd_debug(2, "restarting handle %p\n", handle);
+	return ext3_journal_restart(handle, blocks_for_truncate(inode));
+}
+
+/*
+ * Called at the last iput() if i_nlink is zero.
+ */
+void ext3_delete_inode (struct inode * inode)
+{
+	handle_t *handle;
+
+	if (is_bad_inode(inode))
+		goto no_delete;
+
+	handle = start_transaction(inode);
+	if (IS_ERR(handle)) {
+		/* If we're going to skip the normal cleanup, we still
+		 * need to make sure that the in-core orphan linked list
+		 * is properly cleaned up. */
+		ext3_orphan_del(NULL, inode);
+		goto no_delete;
+	}
+
+	if (IS_SYNC(inode))
+		handle->h_sync = 1;
+	inode->i_size = 0;
+	if (inode->i_blocks)
+		ext3_truncate(inode);
+	/*
+	 * Kill off the orphan record which ext3_truncate created.
+	 * AKPM: I think this can be inside the above `if'.
+	 * Note that ext3_orphan_del() has to be able to cope with the
+	 * deletion of a non-existent orphan - this is because we don't
+	 * know if ext3_truncate() actually created an orphan record.
+	 * (Well, we could do this if we need to, but heck - it works)
+	 */
+	ext3_orphan_del(handle, inode);
+	EXT3_I(inode)->i_dtime	= get_seconds();
+
+	/* 
+	 * One subtle ordering requirement: if anything has gone wrong
+	 * (transaction abort, IO errors, whatever), then we can still
+	 * do these next steps (the fs will already have been marked as
+	 * having errors), but we can't free the inode if the mark_dirty
+	 * fails.  
+	 */
+	if (ext3_mark_inode_dirty(handle, inode))
+		/* If that failed, just do the required in-core inode clear. */
+		clear_inode(inode);
+	else
+		ext3_free_inode(handle, inode);
+	ext3_journal_stop(handle);
+	return;
+no_delete:
+	clear_inode(inode);	/* We must guarantee clearing of inode... */
+}
+
+static int ext3_alloc_block (handle_t *handle,
+			struct inode * inode, unsigned long goal, int *err)
+{
+	unsigned long result;
+
+	result = ext3_new_block(handle, inode, goal, err);
+	return result;
+}
+
+
+typedef struct {
+	__le32	*p;
+	__le32	key;
+	struct buffer_head *bh;
+} Indirect;
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
+{
+	p->key = *(p->p = v);
+	p->bh = bh;
+}
+
+static inline int verify_chain(Indirect *from, Indirect *to)
+{
+	while (from <= to && from->key == *from->p)
+		from++;
+	return (from > to);
+}
+
+/**
+ *	ext3_block_to_path - parse the block number into array of offsets
+ *	@inode: inode in question (we are only interested in its superblock)
+ *	@i_block: block number to be parsed
+ *	@offsets: array to store the offsets in
+ *      @boundary: set this non-zero if the referred-to block is likely to be
+ *             followed (on disk) by an indirect block.
+ *
+ *	To store the locations of file's data ext3 uses a data structure common
+ *	for UNIX filesystems - tree of pointers anchored in the inode, with
+ *	data blocks at leaves and indirect blocks in intermediate nodes.
+ *	This function translates the block number into path in that tree -
+ *	return value is the path length and @offsets[n] is the offset of
+ *	pointer to (n+1)th node in the nth one. If @block is out of range
+ *	(negative or too large) warning is printed and zero returned.
+ *
+ *	Note: function doesn't find node addresses, so no IO is needed. All
+ *	we need to know is the capacity of indirect blocks (taken from the
+ *	inode->i_sb).
+ */
+
+/*
+ * Portability note: the last comparison (check that we fit into triple
+ * indirect block) is spelled differently, because otherwise on an
+ * architecture with 32-bit longs and 8Kb pages we might get into trouble
+ * if our filesystem had 8Kb blocks. We might use long long, but that would
+ * kill us on x86. Oh, well, at least the sign propagation does not matter -
+ * i_block would have to be negative in the very beginning, so we would not
+ * get there at all.
+ */
+
+static int ext3_block_to_path(struct inode *inode,
+			long i_block, int offsets[4], int *boundary)
+{
+	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
+	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
+	const long direct_blocks = EXT3_NDIR_BLOCKS,
+		indirect_blocks = ptrs,
+		double_blocks = (1 << (ptrs_bits * 2));
+	int n = 0;
+	int final = 0;
+
+	if (i_block < 0) {
+		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
+	} else if (i_block < direct_blocks) {
+		offsets[n++] = i_block;
+		final = direct_blocks;
+	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
+		offsets[n++] = EXT3_IND_BLOCK;
+		offsets[n++] = i_block;
+		final = ptrs;
+	} else if ((i_block -= indirect_blocks) < double_blocks) {
+		offsets[n++] = EXT3_DIND_BLOCK;
+		offsets[n++] = i_block >> ptrs_bits;
+		offsets[n++] = i_block & (ptrs - 1);
+		final = ptrs;
+	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+		offsets[n++] = EXT3_TIND_BLOCK;
+		offsets[n++] = i_block >> (ptrs_bits * 2);
+		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+		offsets[n++] = i_block & (ptrs - 1);
+		final = ptrs;
+	} else {
+		ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
+	}
+	if (boundary)
+		*boundary = (i_block & (ptrs - 1)) == (final - 1);
+	return n;
+}
+
+/**
+ *	ext3_get_branch - read the chain of indirect blocks leading to data
+ *	@inode: inode in question
+ *	@depth: depth of the chain (1 - direct pointer, etc.)
+ *	@offsets: offsets of pointers in inode/indirect blocks
+ *	@chain: place to store the result
+ *	@err: here we store the error value
+ *
+ *	Function fills the array of triples <key, p, bh> and returns %NULL
+ *	if everything went OK or the pointer to the last filled triple
+ *	(incomplete one) otherwise. Upon the return chain[i].key contains
+ *	the number of (i+1)-th block in the chain (as it is stored in memory,
+ *	i.e. little-endian 32-bit), chain[i].p contains the address of that
+ *	number (it points into struct inode for i==0 and into the bh->b_data
+ *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
+ *	block for i>0 and NULL for i==0. In other words, it holds the block
+ *	numbers of the chain, addresses they were taken from (and where we can
+ *	verify that chain did not change) and buffer_heads hosting these
+ *	numbers.
+ *
+ *	Function stops when it stumbles upon zero pointer (absent block)
+ *		(pointer to last triple returned, *@err == 0)
+ *	or when it gets an IO error reading an indirect block
+ *		(ditto, *@err == -EIO)
+ *	or when it notices that chain had been changed while it was reading
+ *		(ditto, *@err == -EAGAIN)
+ *	or when it reads all @depth-1 indirect blocks successfully and finds
+ *	the whole chain, all way to the data (returns %NULL, *err == 0).
+ */
+static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
+				 Indirect chain[4], int *err)
+{
+	struct super_block *sb = inode->i_sb;
+	Indirect *p = chain;
+	struct buffer_head *bh;
+
+	*err = 0;
+	/* i_data is not going away, no lock needed */
+	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
+	if (!p->key)
+		goto no_block;
+	while (--depth) {
+		bh = sb_bread(sb, le32_to_cpu(p->key));
+		if (!bh)
+			goto failure;
+		/* Reader: pointers */
+		if (!verify_chain(chain, p))
+			goto changed;
+		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
+		/* Reader: end */
+		if (!p->key)
+			goto no_block;
+	}
+	return NULL;
+
+changed:
+	brelse(bh);
+	*err = -EAGAIN;
+	goto no_block;
+failure:
+	*err = -EIO;
+no_block:
+	return p;
+}
+
+/**
+ *	ext3_find_near - find a place for allocation with sufficient locality
+ *	@inode: owner
+ *	@ind: descriptor of indirect block.
+ *
+ *	This function returns the prefered place for block allocation.
+ *	It is used when heuristic for sequential allocation fails.
+ *	Rules are:
+ *	  + if there is a block to the left of our position - allocate near it.
+ *	  + if pointer will live in indirect block - allocate near that block.
+ *	  + if pointer will live in inode - allocate in the same
+ *	    cylinder group. 
+ *
+ * In the latter case we colour the starting block by the callers PID to
+ * prevent it from clashing with concurrent allocations for a different inode
+ * in the same block group.   The PID is used here so that functionally related
+ * files will be close-by on-disk.
+ *
+ *	Caller must make sure that @ind is valid and will stay that way.
+ */
+
+static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
+	__le32 *p;
+	unsigned long bg_start;
+	unsigned long colour;
+
+	/* Try to find previous block */
+	for (p = ind->p - 1; p >= start; p--)
+		if (*p)
+			return le32_to_cpu(*p);
+
+	/* No such thing, so let's try location of indirect block */
+	if (ind->bh)
+		return ind->bh->b_blocknr;
+
+	/*
+	 * It is going to be refered from inode itself? OK, just put it into
+	 * the same cylinder group then.
+	 */
+	bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
+		le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
+	colour = (current->pid % 16) *
+			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+	return bg_start + colour;
+}
+
+/**
+ *	ext3_find_goal - find a prefered place for allocation.
+ *	@inode: owner
+ *	@block:  block we want
+ *	@chain:  chain of indirect blocks
+ *	@partial: pointer to the last triple within a chain
+ *	@goal:	place to store the result.
+ *
+ *	Normally this function find the prefered place for block allocation,
+ *	stores it in *@goal and returns zero. If the branch had been changed
+ *	under us we return -EAGAIN.
+ */
+
+static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
+			  Indirect *partial, unsigned long *goal)
+{
+	struct ext3_block_alloc_info *block_i =  EXT3_I(inode)->i_block_alloc_info;
+
+	/*
+	 * try the heuristic for sequential allocation,
+	 * failing that at least try to get decent locality.
+	 */
+	if (block_i && (block == block_i->last_alloc_logical_block + 1)
+		&& (block_i->last_alloc_physical_block != 0)) {
+		*goal = block_i->last_alloc_physical_block + 1;
+		return 0;
+	}
+
+	if (verify_chain(chain, partial)) {
+		*goal = ext3_find_near(inode, partial);
+		return 0;
+	}
+	return -EAGAIN;
+}
+
+/**
+ *	ext3_alloc_branch - allocate and set up a chain of blocks.
+ *	@inode: owner
+ *	@num: depth of the chain (number of blocks to allocate)
+ *	@offsets: offsets (in the blocks) to store the pointers to next.
+ *	@branch: place to store the chain in.
+ *
+ *	This function allocates @num blocks, zeroes out all but the last one,
+ *	links them into chain and (if we are synchronous) writes them to disk.
+ *	In other words, it prepares a branch that can be spliced onto the
+ *	inode. It stores the information about that chain in the branch[], in
+ *	the same format as ext3_get_branch() would do. We are calling it after
+ *	we had read the existing part of chain and partial points to the last
+ *	triple of that (one with zero ->key). Upon the exit we have the same
+ *	picture as after the successful ext3_get_block(), excpet that in one
+ *	place chain is disconnected - *branch->p is still zero (we did not
+ *	set the last link), but branch->key contains the number that should
+ *	be placed into *branch->p to fill that gap.
+ *
+ *	If allocation fails we free all blocks we've allocated (and forget
+ *	their buffer_heads) and return the error value the from failed
+ *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
+ *	as described above and return 0.
+ */
+
+static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
+			     int num,
+			     unsigned long goal,
+			     int *offsets,
+			     Indirect *branch)
+{
+	int blocksize = inode->i_sb->s_blocksize;
+	int n = 0, keys = 0;
+	int err = 0;
+	int i;
+	int parent = ext3_alloc_block(handle, inode, goal, &err);
+
+	branch[0].key = cpu_to_le32(parent);
+	if (parent) {
+		for (n = 1; n < num; n++) {
+			struct buffer_head *bh;
+			/* Allocate the next block */
+			int nr = ext3_alloc_block(handle, inode, parent, &err);
+			if (!nr)
+				break;
+			branch[n].key = cpu_to_le32(nr);
+			keys = n+1;
+
+			/*
+			 * Get buffer_head for parent block, zero it out
+			 * and set the pointer to new one, then send
+			 * parent to disk.  
+			 */
+			bh = sb_getblk(inode->i_sb, parent);
+			branch[n].bh = bh;
+			lock_buffer(bh);
+			BUFFER_TRACE(bh, "call get_create_access");
+			err = ext3_journal_get_create_access(handle, bh);
+			if (err) {
+				unlock_buffer(bh);
+				brelse(bh);
+				break;
+			}
+
+			memset(bh->b_data, 0, blocksize);
+			branch[n].p = (__le32*) bh->b_data + offsets[n];
+			*branch[n].p = branch[n].key;
+			BUFFER_TRACE(bh, "marking uptodate");
+			set_buffer_uptodate(bh);
+			unlock_buffer(bh);
+
+			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+			err = ext3_journal_dirty_metadata(handle, bh);
+			if (err)
+				break;
+
+			parent = nr;
+		}
+	}
+	if (n == num)
+		return 0;
+
+	/* Allocation failed, free what we already allocated */
+	for (i = 1; i < keys; i++) {
+		BUFFER_TRACE(branch[i].bh, "call journal_forget");
+		ext3_journal_forget(handle, branch[i].bh);
+	}
+	for (i = 0; i < keys; i++)
+		ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+	return err;
+}
+
+/**
+ *	ext3_splice_branch - splice the allocated branch onto inode.
+ *	@inode: owner
+ *	@block: (logical) number of block we are adding
+ *	@chain: chain of indirect blocks (with a missing link - see
+ *		ext3_alloc_branch)
+ *	@where: location of missing link
+ *	@num:   number of blocks we are adding
+ *
+ *	This function verifies that chain (up to the missing link) had not
+ *	changed, fills the missing link and does all housekeeping needed in
+ *	inode (->i_blocks, etc.). In case of success we end up with the full
+ *	chain to new block and return 0. Otherwise (== chain had been changed)
+ *	we free the new blocks (forgetting their buffer_heads, indeed) and
+ *	return -EAGAIN.
+ */
+
+static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
+			      Indirect chain[4], Indirect *where, int num)
+{
+	int i;
+	int err = 0;
+	struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+
+	/*
+	 * If we're splicing into a [td]indirect block (as opposed to the
+	 * inode) then we need to get write access to the [td]indirect block
+	 * before the splice.
+	 */
+	if (where->bh) {
+		BUFFER_TRACE(where->bh, "get_write_access");
+		err = ext3_journal_get_write_access(handle, where->bh);
+		if (err)
+			goto err_out;
+	}
+	/* Verify that place we are splicing to is still there and vacant */
+
+	if (!verify_chain(chain, where-1) || *where->p)
+		/* Writer: end */
+		goto changed;
+
+	/* That's it */
+
+	*where->p = where->key;
+
+	/*
+	 * update the most recently allocated logical & physical block
+	 * in i_block_alloc_info, to assist find the proper goal block for next
+	 * allocation
+	 */
+	if (block_i) {
+		block_i->last_alloc_logical_block = block;
+		block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+	}
+
+	/* We are done with atomic stuff, now do the rest of housekeeping */
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	ext3_mark_inode_dirty(handle, inode);
+
+	/* had we spliced it onto indirect block? */
+	if (where->bh) {
+		/*
+		 * akpm: If we spliced it onto an indirect block, we haven't
+		 * altered the inode.  Note however that if it is being spliced
+		 * onto an indirect block at the very end of the file (the
+		 * file is growing) then we *will* alter the inode to reflect
+		 * the new i_size.  But that is not done here - it is done in
+		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
+		 */
+		jbd_debug(5, "splicing indirect only\n");
+		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
+		err = ext3_journal_dirty_metadata(handle, where->bh);
+		if (err) 
+			goto err_out;
+	} else {
+		/*
+		 * OK, we spliced it into the inode itself on a direct block.
+		 * Inode was dirtied above.
+		 */
+		jbd_debug(5, "splicing direct\n");
+	}
+	return err;
+
+changed:
+	/*
+	 * AKPM: if where[i].bh isn't part of the current updating
+	 * transaction then we explode nastily.  Test this code path.
+	 */
+	jbd_debug(1, "the chain changed: try again\n");
+	err = -EAGAIN;
+
+err_out:
+	for (i = 1; i < num; i++) {
+		BUFFER_TRACE(where[i].bh, "call journal_forget");
+		ext3_journal_forget(handle, where[i].bh);
+	}
+	/* For the normal collision cleanup case, we free up the blocks.
+	 * On genuine filesystem errors we don't even think about doing
+	 * that. */
+	if (err == -EAGAIN)
+		for (i = 0; i < num; i++)
+			ext3_free_blocks(handle, inode, 
+					 le32_to_cpu(where[i].key), 1);
+	return err;
+}
+
+/*
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+ * to tree, set linkage between the newborn blocks, write them if sync is
+ * required, recheck the path, free and repeat if check fails, otherwise
+ * set the last missing link (that will protect us from any truncate-generated
+ * removals - all blocks on the path are immune now) and possibly force the
+ * write on the parent block.
+ * That has a nice additional property: no special recovery from the failed
+ * allocations is needed - we simply release blocks and do not touch anything
+ * reachable from inode.
+ *
+ * akpm: `handle' can be NULL if create == 0.
+ *
+ * The BKL may not be held on entry here.  Be sure to take it early.
+ */
+
+static int
+ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
+		struct buffer_head *bh_result, int create, int extend_disksize)
+{
+	int err = -EIO;
+	int offsets[4];
+	Indirect chain[4];
+	Indirect *partial;
+	unsigned long goal;
+	int left;
+	int boundary = 0;
+	int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+	struct ext3_inode_info *ei = EXT3_I(inode);
+
+	J_ASSERT(handle != NULL || create == 0);
+
+	if (depth == 0)
+		goto out;
+
+reread:
+	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
+
+	/* Simplest case - block found, no allocation needed */
+	if (!partial) {
+		clear_buffer_new(bh_result);
+got_it:
+		map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
+		if (boundary)
+			set_buffer_boundary(bh_result);
+		/* Clean up and exit */
+		partial = chain+depth-1; /* the whole chain */
+		goto cleanup;
+	}
+
+	/* Next simple case - plain lookup or failed read of indirect block */
+	if (!create || err == -EIO) {
+cleanup:
+		while (partial > chain) {
+			BUFFER_TRACE(partial->bh, "call brelse");
+			brelse(partial->bh);
+			partial--;
+		}
+		BUFFER_TRACE(bh_result, "returned");
+out:
+		return err;
+	}
+
+	/*
+	 * Indirect block might be removed by truncate while we were
+	 * reading it. Handling of that case (forget what we've got and
+	 * reread) is taken out of the main path.
+	 */
+	if (err == -EAGAIN)
+		goto changed;
+
+	goal = 0;
+	down(&ei->truncate_sem);
+
+	/* lazy initialize the block allocation info here if necessary */
+	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) {
+		ext3_init_block_alloc_info(inode);
+	}
+
+	if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
+		up(&ei->truncate_sem);
+		goto changed;
+	}
+
+	left = (chain + depth) - partial;
+
+	/*
+	 * Block out ext3_truncate while we alter the tree
+	 */
+	err = ext3_alloc_branch(handle, inode, left, goal,
+					offsets+(partial-chain), partial);
+
+	/* The ext3_splice_branch call will free and forget any buffers
+	 * on the new chain if there is a failure, but that risks using
+	 * up transaction credits, especially for bitmaps where the
+	 * credits cannot be returned.  Can we handle this somehow?  We
+	 * may need to return -EAGAIN upwards in the worst case.  --sct */
+	if (!err)
+		err = ext3_splice_branch(handle, inode, iblock, chain,
+					 partial, left);
+	/* i_disksize growing is protected by truncate_sem
+	 * don't forget to protect it if you're about to implement
+	 * concurrent ext3_get_block() -bzzz */
+	if (!err && extend_disksize && inode->i_size > ei->i_disksize)
+		ei->i_disksize = inode->i_size;
+	up(&ei->truncate_sem);
+	if (err == -EAGAIN)
+		goto changed;
+	if (err)
+		goto cleanup;
+
+	set_buffer_new(bh_result);
+	goto got_it;
+
+changed:
+	while (partial > chain) {
+		jbd_debug(1, "buffer chain changed, retrying\n");
+		BUFFER_TRACE(partial->bh, "brelsing");
+		brelse(partial->bh);
+		partial--;
+	}
+	goto reread;
+}
+
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	handle_t *handle = NULL;
+	int ret;
+
+	if (create) {
+		handle = ext3_journal_current_handle();
+		J_ASSERT(handle != 0);
+	}
+	ret = ext3_get_block_handle(handle, inode, iblock,
+				bh_result, create, 1);
+	return ret;
+}
+
+#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
+
+static int
+ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
+		unsigned long max_blocks, struct buffer_head *bh_result,
+		int create)
+{
+	handle_t *handle = journal_current_handle();
+	int ret = 0;
+
+	if (!handle)
+		goto get_block;		/* A read */
+
+	if (handle->h_transaction->t_state == T_LOCKED) {
+		/*
+		 * Huge direct-io writes can hold off commits for long
+		 * periods of time.  Let this commit run.
+		 */
+		ext3_journal_stop(handle);
+		handle = ext3_journal_start(inode, DIO_CREDITS);
+		if (IS_ERR(handle))
+			ret = PTR_ERR(handle);
+		goto get_block;
+	}
+
+	if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
+		/*
+		 * Getting low on buffer credits...
+		 */
+		ret = ext3_journal_extend(handle, DIO_CREDITS);
+		if (ret > 0) {
+			/*
+			 * Couldn't extend the transaction.  Start a new one.
+			 */
+			ret = ext3_journal_restart(handle, DIO_CREDITS);
+		}
+	}
+
+get_block:
+	if (ret == 0)
+		ret = ext3_get_block_handle(handle, inode, iblock,
+					bh_result, create, 0);
+	bh_result->b_size = (1 << inode->i_blkbits);
+	return ret;
+}
+
+static int ext3_writepages_get_block(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh, int create)
+{
+	return ext3_direct_io_get_blocks(inode, iblock, 1, bh, create);
+}
+
+/*
+ * `handle' can be NULL if create is zero
+ */
+struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
+				long block, int create, int * errp)
+{
+	struct buffer_head dummy;
+	int fatal = 0, err;
+
+	J_ASSERT(handle != NULL || create == 0);
+
+	dummy.b_state = 0;
+	dummy.b_blocknr = -1000;
+	buffer_trace_init(&dummy.b_history);
+	*errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
+	if (!*errp && buffer_mapped(&dummy)) {
+		struct buffer_head *bh;
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+		if (buffer_new(&dummy)) {
+			J_ASSERT(create != 0);
+			J_ASSERT(handle != 0);
+
+			/* Now that we do not always journal data, we
+			   should keep in mind whether this should
+			   always journal the new buffer as metadata.
+			   For now, regular file writes use
+			   ext3_get_block instead, so it's not a
+			   problem. */
+			lock_buffer(bh);
+			BUFFER_TRACE(bh, "call get_create_access");
+			fatal = ext3_journal_get_create_access(handle, bh);
+			if (!fatal && !buffer_uptodate(bh)) {
+				memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+				set_buffer_uptodate(bh);
+			}
+			unlock_buffer(bh);
+			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+			err = ext3_journal_dirty_metadata(handle, bh);
+			if (!fatal)
+				fatal = err;
+		} else {
+			BUFFER_TRACE(bh, "not a new buffer");
+		}
+		if (fatal) {
+			*errp = fatal;
+			brelse(bh);
+			bh = NULL;
+		}
+		return bh;
+	}
+	return NULL;
+}
+
+struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
+			       int block, int create, int *err)
+{
+	struct buffer_head * bh;
+
+	bh = ext3_getblk(handle, inode, block, create, err);
+	if (!bh)
+		return bh;
+	if (buffer_uptodate(bh))
+		return bh;
+	ll_rw_block(READ, 1, &bh);
+	wait_on_buffer(bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	put_bh(bh);
+	*err = -EIO;
+	return NULL;
+}
+
+static int walk_page_buffers(	handle_t *handle,
+				struct buffer_head *head,
+				unsigned from,
+				unsigned to,
+				int *partial,
+				int (*fn)(	handle_t *handle,
+						struct buffer_head *bh))
+{
+	struct buffer_head *bh;
+	unsigned block_start, block_end;
+	unsigned blocksize = head->b_size;
+	int err, ret = 0;
+	struct buffer_head *next;
+
+	for (	bh = head, block_start = 0;
+		ret == 0 && (bh != head || !block_start);
+	    	block_start = block_end, bh = next)
+	{
+		next = bh->b_this_page;
+		block_end = block_start + blocksize;
+		if (block_end <= from || block_start >= to) {
+			if (partial && !buffer_uptodate(bh))
+				*partial = 1;
+			continue;
+		}
+		err = (*fn)(handle, bh);
+		if (!ret)
+			ret = err;
+	}
+	return ret;
+}
+
+/*
+ * To preserve ordering, it is essential that the hole instantiation and
+ * the data write be encapsulated in a single transaction.  We cannot
+ * close off a transaction and start a new one between the ext3_get_block()
+ * and the commit_write().  So doing the journal_start at the start of
+ * prepare_write() is the right place.
+ *
+ * Also, this function can nest inside ext3_writepage() ->
+ * block_write_full_page(). In that case, we *know* that ext3_writepage()
+ * has generated enough buffer credits to do the whole page.  So we won't
+ * block on the journal in that case, which is good, because the caller may
+ * be PF_MEMALLOC.
+ *
+ * By accident, ext3 can be reentered when a transaction is open via
+ * quota file writes.  If we were to commit the transaction while thus
+ * reentered, there can be a deadlock - we would be holding a quota
+ * lock, and the commit would never complete if another thread had a
+ * transaction open and was blocking on the quota lock - a ranking
+ * violation.
+ *
+ * So what we do is to rely on the fact that journal_stop/journal_start
+ * will _not_ run commit under these circumstances because handle->h_ref
+ * is elevated.  We'll still have enough credits for the tiny quotafile
+ * write.  
+ */
+
+static int do_journal_get_write_access(handle_t *handle, 
+				       struct buffer_head *bh)
+{
+	if (!buffer_mapped(bh) || buffer_freed(bh))
+		return 0;
+	return ext3_journal_get_write_access(handle, bh);
+}
+
+static int ext3_prepare_write(struct file *file, struct page *page,
+			      unsigned from, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+	handle_t *handle;
+	int retries = 0;
+
+retry:
+	handle = ext3_journal_start(inode, needed_blocks);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto out;
+	}
+	if (test_opt(inode->i_sb, NOBH))
+		ret = nobh_prepare_write(page, from, to, ext3_get_block);
+	else
+		ret = block_prepare_write(page, from, to, ext3_get_block);
+	if (ret)
+		goto prepare_write_failed;
+
+	if (ext3_should_journal_data(inode)) {
+		ret = walk_page_buffers(handle, page_buffers(page),
+				from, to, NULL, do_journal_get_write_access);
+	}
+prepare_write_failed:
+	if (ret)
+		ext3_journal_stop(handle);
+	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
+out:
+	return ret;
+}
+
+int
+ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+{
+	int err = journal_dirty_data(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
+						bh, handle,err);
+	return err;
+}
+
+/* For commit_write() in data=journal mode */
+static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
+{
+	if (!buffer_mapped(bh) || buffer_freed(bh))
+		return 0;
+	set_buffer_uptodate(bh);
+	return ext3_journal_dirty_metadata(handle, bh);
+}
+
+/*
+ * We need to pick up the new inode size which generic_commit_write gave us
+ * `file' can be NULL - eg, when called from page_symlink().
+ *
+ * ext3 never places buffers on inode->i_mapping->private_list.  metadata
+ * buffers are managed internally.
+ */
+
+static int ext3_ordered_commit_write(struct file *file, struct page *page,
+			     unsigned from, unsigned to)
+{
+	handle_t *handle = ext3_journal_current_handle();
+	struct inode *inode = page->mapping->host;
+	int ret = 0, ret2;
+
+	ret = walk_page_buffers(handle, page_buffers(page),
+		from, to, NULL, ext3_journal_dirty_data);
+
+	if (ret == 0) {
+		/*
+		 * generic_commit_write() will run mark_inode_dirty() if i_size
+		 * changes.  So let's piggyback the i_disksize mark_inode_dirty
+		 * into that.
+		 */
+		loff_t new_i_size;
+
+		new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+		if (new_i_size > EXT3_I(inode)->i_disksize)
+			EXT3_I(inode)->i_disksize = new_i_size;
+		ret = generic_commit_write(file, page, from, to);
+	}
+	ret2 = ext3_journal_stop(handle);
+	if (!ret)
+		ret = ret2;
+	return ret;
+}
+
+static int ext3_writeback_commit_write(struct file *file, struct page *page,
+			     unsigned from, unsigned to)
+{
+	handle_t *handle = ext3_journal_current_handle();
+	struct inode *inode = page->mapping->host;
+	int ret = 0, ret2;
+	loff_t new_i_size;
+
+	new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+	if (new_i_size > EXT3_I(inode)->i_disksize)
+		EXT3_I(inode)->i_disksize = new_i_size;
+
+	if (test_opt(inode->i_sb, NOBH))
+		ret = nobh_commit_write(file, page, from, to);
+	else
+		ret = generic_commit_write(file, page, from, to);
+
+	ret2 = ext3_journal_stop(handle);
+	if (!ret)
+		ret = ret2;
+	return ret;
+}
+
+static int ext3_journalled_commit_write(struct file *file,
+			struct page *page, unsigned from, unsigned to)
+{
+	handle_t *handle = ext3_journal_current_handle();
+	struct inode *inode = page->mapping->host;
+	int ret = 0, ret2;
+	int partial = 0;
+	loff_t pos;
+
+	/*
+	 * Here we duplicate the generic_commit_write() functionality
+	 */
+	pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+
+	ret = walk_page_buffers(handle, page_buffers(page), from,
+				to, &partial, commit_write_fn);
+	if (!partial)
+		SetPageUptodate(page);
+	if (pos > inode->i_size)
+		i_size_write(inode, pos);
+	EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+	if (inode->i_size > EXT3_I(inode)->i_disksize) {
+		EXT3_I(inode)->i_disksize = inode->i_size;
+		ret2 = ext3_mark_inode_dirty(handle, inode);
+		if (!ret) 
+			ret = ret2;
+	}
+	ret2 = ext3_journal_stop(handle);
+	if (!ret)
+		ret = ret2;
+	return ret;
+}
+
+/* 
+ * bmap() is special.  It gets used by applications such as lilo and by
+ * the swapper to find the on-disk block of a specific piece of data.
+ *
+ * Naturally, this is dangerous if the block concerned is still in the
+ * journal.  If somebody makes a swapfile on an ext3 data-journaling
+ * filesystem and enables swap, then they may get a nasty shock when the
+ * data getting swapped to that swapfile suddenly gets overwritten by
+ * the original zero's written out previously to the journal and
+ * awaiting writeback in the kernel's buffer cache. 
+ *
+ * So, if we see any bmap calls here on a modified, data-journaled file,
+ * take extra steps to flush any blocks which might be in the cache. 
+ */
+static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
+{
+	struct inode *inode = mapping->host;
+	journal_t *journal;
+	int err;
+
+	if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
+		/* 
+		 * This is a REALLY heavyweight approach, but the use of
+		 * bmap on dirty files is expected to be extremely rare:
+		 * only if we run lilo or swapon on a freshly made file
+		 * do we expect this to happen. 
+		 *
+		 * (bmap requires CAP_SYS_RAWIO so this does not
+		 * represent an unprivileged user DOS attack --- we'd be
+		 * in trouble if mortal users could trigger this path at
+		 * will.) 
+		 *
+		 * NB. EXT3_STATE_JDATA is not set on files other than
+		 * regular files.  If somebody wants to bmap a directory
+		 * or symlink and gets confused because the buffer
+		 * hasn't yet been flushed to disk, they deserve
+		 * everything they get.
+		 */
+
+		EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
+		journal = EXT3_JOURNAL(inode);
+		journal_lock_updates(journal);
+		err = journal_flush(journal);
+		journal_unlock_updates(journal);
+
+		if (err)
+			return 0;
+	}
+
+	return generic_block_bmap(mapping,block,ext3_get_block);
+}
+
+static int bget_one(handle_t *handle, struct buffer_head *bh)
+{
+	get_bh(bh);
+	return 0;
+}
+
+static int bput_one(handle_t *handle, struct buffer_head *bh)
+{
+	put_bh(bh);
+	return 0;
+}
+
+static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
+{
+	if (buffer_mapped(bh))
+		return ext3_journal_dirty_data(handle, bh);
+	return 0;
+}
+
+/*
+ * Note that we always start a transaction even if we're not journalling
+ * data.  This is to preserve ordering: any hole instantiation within
+ * __block_write_full_page -> ext3_get_block() should be journalled
+ * along with the data so we don't crash and then get metadata which
+ * refers to old data.
+ *
+ * In all journalling modes block_write_full_page() will start the I/O.
+ *
+ * Problem:
+ *
+ *	ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
+ *		ext3_writepage()
+ *
+ * Similar for:
+ *
+ *	ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
+ *
+ * Same applies to ext3_get_block().  We will deadlock on various things like
+ * lock_journal and i_truncate_sem.
+ *
+ * Setting PF_MEMALLOC here doesn't work - too many internal memory
+ * allocations fail.
+ *
+ * 16May01: If we're reentered then journal_current_handle() will be
+ *	    non-zero. We simply *return*.
+ *
+ * 1 July 2001: @@@ FIXME:
+ *   In journalled data mode, a data buffer may be metadata against the
+ *   current transaction.  But the same file is part of a shared mapping
+ *   and someone does a writepage() on it.
+ *
+ *   We will move the buffer onto the async_data list, but *after* it has
+ *   been dirtied. So there's a small window where we have dirty data on
+ *   BJ_Metadata.
+ *
+ *   Note that this only applies to the last partial page in the file.  The
+ *   bit which block_write_full_page() uses prepare/commit for.  (That's
+ *   broken code anyway: it's wrong for msync()).
+ *
+ *   It's a rare case: affects the final partial page, for journalled data
+ *   where the file is subject to bith write() and writepage() in the same
+ *   transction.  To fix it we'll need a custom block_write_full_page().
+ *   We'll probably need that anyway for journalling writepage() output.
+ *
+ * We don't honour synchronous mounts for writepage().  That would be
+ * disastrous.  Any write() or metadata operation will sync the fs for
+ * us.
+ *
+ * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
+ * we don't need to open a transaction here.
+ */
+static int ext3_ordered_writepage(struct page *page,
+			struct writeback_control *wbc)
+{
+	struct inode *inode = page->mapping->host;
+	struct buffer_head *page_bufs;
+	handle_t *handle = NULL;
+	int ret = 0;
+	int err;
+
+	J_ASSERT(PageLocked(page));
+
+	/*
+	 * We give up here if we're reentered, because it might be for a
+	 * different filesystem.
+	 */
+	if (ext3_journal_current_handle())
+		goto out_fail;
+
+	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
+
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto out_fail;
+	}
+
+	if (!page_has_buffers(page)) {
+		create_empty_buffers(page, inode->i_sb->s_blocksize,
+				(1 << BH_Dirty)|(1 << BH_Uptodate));
+	}
+	page_bufs = page_buffers(page);
+	walk_page_buffers(handle, page_bufs, 0,
+			PAGE_CACHE_SIZE, NULL, bget_one);
+
+	ret = block_write_full_page(page, ext3_get_block, wbc);
+
+	/*
+	 * The page can become unlocked at any point now, and
+	 * truncate can then come in and change things.  So we
+	 * can't touch *page from now on.  But *page_bufs is
+	 * safe due to elevated refcount.
+	 */
+
+	/*
+	 * And attach them to the current transaction.  But only if 
+	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
+	 * and generally junk.
+	 */
+	if (ret == 0) {
+		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
+					NULL, journal_dirty_data_fn);
+		if (!ret)
+			ret = err;
+	}
+	walk_page_buffers(handle, page_bufs, 0,
+			PAGE_CACHE_SIZE, NULL, bput_one);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+
+out_fail:
+	redirty_page_for_writepage(wbc, page);
+	unlock_page(page);
+	return ret;
+}
+
+static int
+ext3_writeback_writepage_helper(struct page *page,
+				struct writeback_control *wbc)
+{
+	return block_write_full_page(page, ext3_get_block, wbc);
+}
+
+static int
+ext3_writeback_writepages(struct address_space *mapping,
+				struct writeback_control *wbc)
+{
+	struct inode *inode = mapping->host;
+	handle_t *handle = NULL;
+	int err, ret = 0;
+
+	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+		return ret;
+
+	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		return ret;
+	}
+
+        ret = __mpage_writepages(mapping, wbc, ext3_writepages_get_block,
+					ext3_writeback_writepage_helper);
+
+	/*
+	 * Need to reaquire the handle since ext3_writepages_get_block()
+	 * can restart the handle
+	 */
+	handle = journal_current_handle();
+
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_writeback_writepage(struct page *page,
+				struct writeback_control *wbc)
+{
+	struct inode *inode = page->mapping->host;
+	handle_t *handle = NULL;
+	int ret = 0;
+	int err;
+
+	if (ext3_journal_current_handle())
+		goto out_fail;
+
+	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto out_fail;
+	}
+
+	if (test_opt(inode->i_sb, NOBH))
+		ret = nobh_writepage(page, ext3_get_block, wbc);
+	else
+		ret = block_write_full_page(page, ext3_get_block, wbc);
+
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+
+out_fail:
+	redirty_page_for_writepage(wbc, page);
+	unlock_page(page);
+	return ret;
+}
+
+static int ext3_journalled_writepage(struct page *page,
+				struct writeback_control *wbc)
+{
+	struct inode *inode = page->mapping->host;
+	handle_t *handle = NULL;
+	int ret = 0;
+	int err;
+
+	if (ext3_journal_current_handle())
+		goto no_write;
+
+	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto no_write;
+	}
+
+	if (!page_has_buffers(page) || PageChecked(page)) {
+		/*
+		 * It's mmapped pagecache.  Add buffers and journal it.  There
+		 * doesn't seem much point in redirtying the page here.
+		 */
+		ClearPageChecked(page);
+		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
+					ext3_get_block);
+		if (ret != 0)
+			goto out_unlock;
+		ret = walk_page_buffers(handle, page_buffers(page), 0,
+			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
+
+		err = walk_page_buffers(handle, page_buffers(page), 0,
+				PAGE_CACHE_SIZE, NULL, commit_write_fn);
+		if (ret == 0)
+			ret = err;
+		EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+		unlock_page(page);
+	} else {
+		/*
+		 * It may be a page full of checkpoint-mode buffers.  We don't
+		 * really know unless we go poke around in the buffer_heads.
+		 * But block_write_full_page will do the right thing.
+		 */
+		ret = block_write_full_page(page, ext3_get_block, wbc);
+	}
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+out:
+	return ret;
+
+no_write:
+	redirty_page_for_writepage(wbc, page);
+out_unlock:
+	unlock_page(page);
+	goto out;
+}
+
+static int ext3_readpage(struct file *file, struct page *page)
+{
+	return mpage_readpage(page, ext3_get_block);
+}
+
+static int
+ext3_readpages(struct file *file, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
+}
+
+static int ext3_invalidatepage(struct page *page, unsigned long offset)
+{
+	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
+
+	/*
+	 * If it's a full truncate we just forget about the pending dirtying
+	 */
+	if (offset == 0)
+		ClearPageChecked(page);
+
+	return journal_invalidatepage(journal, page, offset);
+}
+
+static int ext3_releasepage(struct page *page, int wait)
+{
+	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
+
+	WARN_ON(PageChecked(page));
+	if (!page_has_buffers(page))
+		return 0;
+	return journal_try_to_free_buffers(journal, page, wait);
+}
+
+/*
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ * If the O_DIRECT write is intantiating holes inside i_size and the machine
+ * crashes then stale disk data _may_ be exposed inside the file.
+ */
+static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
+			const struct iovec *iov, loff_t offset,
+			unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_mapping->host;
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	handle_t *handle = NULL;
+	ssize_t ret;
+	int orphan = 0;
+	size_t count = iov_length(iov, nr_segs);
+
+	if (rw == WRITE) {
+		loff_t final_size = offset + count;
+
+		handle = ext3_journal_start(inode, DIO_CREDITS);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			goto out;
+		}
+		if (final_size > inode->i_size) {
+			ret = ext3_orphan_add(handle, inode);
+			if (ret)
+				goto out_stop;
+			orphan = 1;
+			ei->i_disksize = inode->i_size;
+		}
+	}
+
+	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
+				 offset, nr_segs,
+				 ext3_direct_io_get_blocks, NULL);
+
+	/*
+	 * Reacquire the handle: ext3_direct_io_get_block() can restart the
+	 * transaction
+	 */
+	handle = journal_current_handle();
+
+out_stop:
+	if (handle) {
+		int err;
+
+		if (orphan && inode->i_nlink)
+			ext3_orphan_del(handle, inode);
+		if (orphan && ret > 0) {
+			loff_t end = offset + ret;
+			if (end > inode->i_size) {
+				ei->i_disksize = end;
+				i_size_write(inode, end);
+				/*
+				 * We're going to return a positive `ret'
+				 * here due to non-zero-length I/O, so there's
+				 * no way of reporting error returns from
+				 * ext3_mark_inode_dirty() to userspace.  So
+				 * ignore it.
+				 */
+				ext3_mark_inode_dirty(handle, inode);
+			}
+		}
+		err = ext3_journal_stop(handle);
+		if (ret == 0)
+			ret = err;
+	}
+out:
+	return ret;
+}
+
+/*
+ * Pages can be marked dirty completely asynchronously from ext3's journalling
+ * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
+ * much here because ->set_page_dirty is called under VFS locks.  The page is
+ * not necessarily locked.
+ *
+ * We cannot just dirty the page and leave attached buffers clean, because the
+ * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
+ * or jbddirty because all the journalling code will explode.
+ *
+ * So what we do is to mark the page "pending dirty" and next time writepage
+ * is called, propagate that into the buffers appropriately.
+ */
+static int ext3_journalled_set_page_dirty(struct page *page)
+{
+	SetPageChecked(page);
+	return __set_page_dirty_nobuffers(page);
+}
+
+static struct address_space_operations ext3_ordered_aops = {
+	.readpage	= ext3_readpage,
+	.readpages	= ext3_readpages,
+	.writepage	= ext3_ordered_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= ext3_prepare_write,
+	.commit_write	= ext3_ordered_commit_write,
+	.bmap		= ext3_bmap,
+	.invalidatepage	= ext3_invalidatepage,
+	.releasepage	= ext3_releasepage,
+	.direct_IO	= ext3_direct_IO,
+};
+
+static struct address_space_operations ext3_writeback_aops = {
+	.readpage	= ext3_readpage,
+	.readpages	= ext3_readpages,
+	.writepage	= ext3_writeback_writepage,
+	.writepages	= ext3_writeback_writepages,
+	.sync_page	= block_sync_page,
+	.prepare_write	= ext3_prepare_write,
+	.commit_write	= ext3_writeback_commit_write,
+	.bmap		= ext3_bmap,
+	.invalidatepage	= ext3_invalidatepage,
+	.releasepage	= ext3_releasepage,
+	.direct_IO	= ext3_direct_IO,
+};
+
+static struct address_space_operations ext3_journalled_aops = {
+	.readpage	= ext3_readpage,
+	.readpages	= ext3_readpages,
+	.writepage	= ext3_journalled_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= ext3_prepare_write,
+	.commit_write	= ext3_journalled_commit_write,
+	.set_page_dirty	= ext3_journalled_set_page_dirty,
+	.bmap		= ext3_bmap,
+	.invalidatepage	= ext3_invalidatepage,
+	.releasepage	= ext3_releasepage,
+};
+
+void ext3_set_aops(struct inode *inode)
+{
+	if (ext3_should_order_data(inode))
+		inode->i_mapping->a_ops = &ext3_ordered_aops;
+	else if (ext3_should_writeback_data(inode))
+		inode->i_mapping->a_ops = &ext3_writeback_aops;
+	else
+		inode->i_mapping->a_ops = &ext3_journalled_aops;
+}
+
+/*
+ * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This required during truncate. We need to physically zero the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ */
+static int ext3_block_truncate_page(handle_t *handle, struct page *page,
+		struct address_space *mapping, loff_t from)
+{
+	unsigned long index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned blocksize, iblock, length, pos;
+	struct inode *inode = mapping->host;
+	struct buffer_head *bh;
+	int err = 0;
+	void *kaddr;
+
+	blocksize = inode->i_sb->s_blocksize;
+	length = blocksize - (offset & (blocksize - 1));
+	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+
+	/*
+	 * For "nobh" option,  we can only work if we don't need to
+	 * read-in the page - otherwise we create buffers to do the IO.
+	 */
+	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH)) {
+		if (PageUptodate(page)) {
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr + offset, 0, length);
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+			set_page_dirty(page);
+			goto unlock;
+		}
+	}
+
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+
+	/* Find the buffer that contains "offset" */
+	bh = page_buffers(page);
+	pos = blocksize;
+	while (offset >= pos) {
+		bh = bh->b_this_page;
+		iblock++;
+		pos += blocksize;
+	}
+
+	err = 0;
+	if (buffer_freed(bh)) {
+		BUFFER_TRACE(bh, "freed: skip");
+		goto unlock;
+	}
+
+	if (!buffer_mapped(bh)) {
+		BUFFER_TRACE(bh, "unmapped");
+		ext3_get_block(inode, iblock, bh, 0);
+		/* unmapped? It's a hole - nothing to do */
+		if (!buffer_mapped(bh)) {
+			BUFFER_TRACE(bh, "still unmapped");
+			goto unlock;
+		}
+	}
+
+	/* Ok, it's mapped. Make sure it's up-to-date */
+	if (PageUptodate(page))
+		set_buffer_uptodate(bh);
+
+	if (!buffer_uptodate(bh)) {
+		err = -EIO;
+		ll_rw_block(READ, 1, &bh);
+		wait_on_buffer(bh);
+		/* Uhhuh. Read error. Complain and punt. */
+		if (!buffer_uptodate(bh))
+			goto unlock;
+	}
+
+	if (ext3_should_journal_data(inode)) {
+		BUFFER_TRACE(bh, "get write access");
+		err = ext3_journal_get_write_access(handle, bh);
+		if (err)
+			goto unlock;
+	}
+
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + offset, 0, length);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+
+	BUFFER_TRACE(bh, "zeroed end of block");
+
+	err = 0;
+	if (ext3_should_journal_data(inode)) {
+		err = ext3_journal_dirty_metadata(handle, bh);
+	} else {
+		if (ext3_should_order_data(inode))
+			err = ext3_journal_dirty_data(handle, bh);
+		mark_buffer_dirty(bh);
+	}
+
+unlock:
+	unlock_page(page);
+	page_cache_release(page);
+	return err;
+}
+
+/*
+ * Probably it should be a library function... search for first non-zero word
+ * or memcmp with zero_page, whatever is better for particular architecture.
+ * Linus?
+ */
+static inline int all_zeroes(__le32 *p, __le32 *q)
+{
+	while (p < q)
+		if (*p++)
+			return 0;
+	return 1;
+}
+
+/**
+ *	ext3_find_shared - find the indirect blocks for partial truncation.
+ *	@inode:	  inode in question
+ *	@depth:	  depth of the affected branch
+ *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
+ *	@chain:	  place to store the pointers to partial indirect blocks
+ *	@top:	  place to the (detached) top of branch
+ *
+ *	This is a helper function used by ext3_truncate().
+ *
+ *	When we do truncate() we may have to clean the ends of several
+ *	indirect blocks but leave the blocks themselves alive. Block is
+ *	partially truncated if some data below the new i_size is refered
+ *	from it (and it is on the path to the first completely truncated
+ *	data block, indeed).  We have to free the top of that path along
+ *	with everything to the right of the path. Since no allocation
+ *	past the truncation point is possible until ext3_truncate()
+ *	finishes, we may safely do the latter, but top of branch may
+ *	require special attention - pageout below the truncation point
+ *	might try to populate it.
+ *
+ *	We atomically detach the top of branch from the tree, store the
+ *	block number of its root in *@top, pointers to buffer_heads of
+ *	partially truncated blocks - in @chain[].bh and pointers to
+ *	their last elements that should not be removed - in
+ *	@chain[].p. Return value is the pointer to last filled element
+ *	of @chain.
+ *
+ *	The work left to caller to do the actual freeing of subtrees:
+ *		a) free the subtree starting from *@top
+ *		b) free the subtrees whose roots are stored in
+ *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
+ *		c) free the subtrees growing from the inode past the @chain[0].
+ *			(no partially truncated stuff there).  */
+
+static Indirect *ext3_find_shared(struct inode *inode,
+				int depth,
+				int offsets[4],
+				Indirect chain[4],
+				__le32 *top)
+{
+	Indirect *partial, *p;
+	int k, err;
+
+	*top = 0;
+	/* Make k index the deepest non-null offest + 1 */
+	for (k = depth; k > 1 && !offsets[k-1]; k--)
+		;
+	partial = ext3_get_branch(inode, k, offsets, chain, &err);
+	/* Writer: pointers */
+	if (!partial)
+		partial = chain + k-1;
+	/*
+	 * If the branch acquired continuation since we've looked at it -
+	 * fine, it should all survive and (new) top doesn't belong to us.
+	 */
+	if (!partial->key && *partial->p)
+		/* Writer: end */
+		goto no_top;
+	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
+		;
+	/*
+	 * OK, we've found the last block that must survive. The rest of our
+	 * branch should be detached before unlocking. However, if that rest
+	 * of branch is all ours and does not grow immediately from the inode
+	 * it's easier to cheat and just decrement partial->p.
+	 */
+	if (p == chain + k - 1 && p > chain) {
+		p->p--;
+	} else {
+		*top = *p->p;
+		/* Nope, don't do this in ext3.  Must leave the tree intact */
+#if 0
+		*p->p = 0;
+#endif
+	}
+	/* Writer: end */
+
+	while(partial > p)
+	{
+		brelse(partial->bh);
+		partial--;
+	}
+no_top:
+	return partial;
+}
+
+/*
+ * Zero a number of block pointers in either an inode or an indirect block.
+ * If we restart the transaction we must again get write access to the
+ * indirect block for further modification.
+ *
+ * We release `count' blocks on disk, but (last - first) may be greater
+ * than `count' because there can be holes in there.
+ */
+static void
+ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
+		unsigned long block_to_free, unsigned long count,
+		__le32 *first, __le32 *last)
+{
+	__le32 *p;
+	if (try_to_extend_transaction(handle, inode)) {
+		if (bh) {
+			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+			ext3_journal_dirty_metadata(handle, bh);
+		}
+		ext3_mark_inode_dirty(handle, inode);
+		ext3_journal_test_restart(handle, inode);
+		if (bh) {
+			BUFFER_TRACE(bh, "retaking write access");
+			ext3_journal_get_write_access(handle, bh);
+		}
+	}
+
+	/*
+	 * Any buffers which are on the journal will be in memory. We find
+	 * them on the hash table so journal_revoke() will run journal_forget()
+	 * on them.  We've already detached each block from the file, so
+	 * bforget() in journal_forget() should be safe.
+	 *
+	 * AKPM: turn on bforget in journal_forget()!!!
+	 */
+	for (p = first; p < last; p++) {
+		u32 nr = le32_to_cpu(*p);
+		if (nr) {
+			struct buffer_head *bh;
+
+			*p = 0;
+			bh = sb_find_get_block(inode->i_sb, nr);
+			ext3_forget(handle, 0, inode, bh, nr);
+		}
+	}
+
+	ext3_free_blocks(handle, inode, block_to_free, count);
+}
+
+/**
+ * ext3_free_data - free a list of data blocks
+ * @handle:	handle for this transaction
+ * @inode:	inode we are dealing with
+ * @this_bh:	indirect buffer_head which contains *@first and *@last
+ * @first:	array of block numbers
+ * @last:	points immediately past the end of array
+ *
+ * We are freeing all blocks refered from that array (numbers are stored as
+ * little-endian 32-bit) and updating @inode->i_blocks appropriately.
+ *
+ * We accumulate contiguous runs of blocks to free.  Conveniently, if these
+ * blocks are contiguous then releasing them at one time will only affect one
+ * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
+ * actually use a lot of journal space.
+ *
+ * @this_bh will be %NULL if @first and @last point into the inode's direct
+ * block pointers.
+ */
+static void ext3_free_data(handle_t *handle, struct inode *inode,
+			   struct buffer_head *this_bh,
+			   __le32 *first, __le32 *last)
+{
+	unsigned long block_to_free = 0;    /* Starting block # of a run */
+	unsigned long count = 0;	    /* Number of blocks in the run */ 
+	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
+					       corresponding to
+					       block_to_free */
+	unsigned long nr;		    /* Current block # */
+	__le32 *p;			    /* Pointer into inode/ind
+					       for current block */
+	int err;
+
+	if (this_bh) {				/* For indirect block */
+		BUFFER_TRACE(this_bh, "get_write_access");
+		err = ext3_journal_get_write_access(handle, this_bh);
+		/* Important: if we can't update the indirect pointers
+		 * to the blocks, we can't free them. */
+		if (err)
+			return;
+	}
+
+	for (p = first; p < last; p++) {
+		nr = le32_to_cpu(*p);
+		if (nr) {
+			/* accumulate blocks to free if they're contiguous */
+			if (count == 0) {
+				block_to_free = nr;
+				block_to_free_p = p;
+				count = 1;
+			} else if (nr == block_to_free + count) {
+				count++;
+			} else {
+				ext3_clear_blocks(handle, inode, this_bh, 
+						  block_to_free,
+						  count, block_to_free_p, p);
+				block_to_free = nr;
+				block_to_free_p = p;
+				count = 1;
+			}
+		}
+	}
+
+	if (count > 0)
+		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
+				  count, block_to_free_p, p);
+
+	if (this_bh) {
+		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
+		ext3_journal_dirty_metadata(handle, this_bh);
+	}
+}
+
+/**
+ *	ext3_free_branches - free an array of branches
+ *	@handle: JBD handle for this transaction
+ *	@inode:	inode we are dealing with
+ *	@parent_bh: the buffer_head which contains *@first and *@last
+ *	@first:	array of block numbers
+ *	@last:	pointer immediately past the end of array
+ *	@depth:	depth of the branches to free
+ *
+ *	We are freeing all blocks refered from these branches (numbers are
+ *	stored as little-endian 32-bit) and updating @inode->i_blocks
+ *	appropriately.
+ */
+static void ext3_free_branches(handle_t *handle, struct inode *inode,
+			       struct buffer_head *parent_bh,
+			       __le32 *first, __le32 *last, int depth)
+{
+	unsigned long nr;
+	__le32 *p;
+
+	if (is_handle_aborted(handle))
+		return;
+
+	if (depth--) {
+		struct buffer_head *bh;
+		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
+		p = last;
+		while (--p >= first) {
+			nr = le32_to_cpu(*p);
+			if (!nr)
+				continue;		/* A hole */
+
+			/* Go read the buffer for the next level down */
+			bh = sb_bread(inode->i_sb, nr);
+
+			/*
+			 * A read failure? Report error and clear slot
+			 * (should be rare).
+			 */
+			if (!bh) {
+				ext3_error(inode->i_sb, "ext3_free_branches",
+					   "Read failure, inode=%ld, block=%ld",
+					   inode->i_ino, nr);
+				continue;
+			}
+
+			/* This zaps the entire block.  Bottom up. */
+			BUFFER_TRACE(bh, "free child branches");
+			ext3_free_branches(handle, inode, bh,
+					   (__le32*)bh->b_data,
+					   (__le32*)bh->b_data + addr_per_block,
+					   depth);
+
+			/*
+			 * We've probably journalled the indirect block several
+			 * times during the truncate.  But it's no longer
+			 * needed and we now drop it from the transaction via
+			 * journal_revoke().
+			 *
+			 * That's easy if it's exclusively part of this
+			 * transaction.  But if it's part of the committing
+			 * transaction then journal_forget() will simply
+			 * brelse() it.  That means that if the underlying
+			 * block is reallocated in ext3_get_block(),
+			 * unmap_underlying_metadata() will find this block
+			 * and will try to get rid of it.  damn, damn.
+			 *
+			 * If this block has already been committed to the
+			 * journal, a revoke record will be written.  And
+			 * revoke records must be emitted *before* clearing
+			 * this block's bit in the bitmaps.
+			 */
+			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
+
+			/*
+			 * Everything below this this pointer has been
+			 * released.  Now let this top-of-subtree go.
+			 *
+			 * We want the freeing of this indirect block to be
+			 * atomic in the journal with the updating of the
+			 * bitmap block which owns it.  So make some room in
+			 * the journal.
+			 *
+			 * We zero the parent pointer *after* freeing its
+			 * pointee in the bitmaps, so if extend_transaction()
+			 * for some reason fails to put the bitmap changes and
+			 * the release into the same transaction, recovery
+			 * will merely complain about releasing a free block,
+			 * rather than leaking blocks.
+			 */
+			if (is_handle_aborted(handle))
+				return;
+			if (try_to_extend_transaction(handle, inode)) {
+				ext3_mark_inode_dirty(handle, inode);
+				ext3_journal_test_restart(handle, inode);
+			}
+
+			ext3_free_blocks(handle, inode, nr, 1);
+
+			if (parent_bh) {
+				/*
+				 * The block which we have just freed is
+				 * pointed to by an indirect block: journal it
+				 */
+				BUFFER_TRACE(parent_bh, "get_write_access");
+				if (!ext3_journal_get_write_access(handle,
+								   parent_bh)){
+					*p = 0;
+					BUFFER_TRACE(parent_bh,
+					"call ext3_journal_dirty_metadata");
+					ext3_journal_dirty_metadata(handle, 
+								    parent_bh);
+				}
+			}
+		}
+	} else {
+		/* We have reached the bottom of the tree. */
+		BUFFER_TRACE(parent_bh, "free data blocks");
+		ext3_free_data(handle, inode, parent_bh, first, last);
+	}
+}
+
+/*
+ * ext3_truncate()
+ *
+ * We block out ext3_get_block() block instantiations across the entire
+ * transaction, and VFS/VM ensures that ext3_truncate() cannot run
+ * simultaneously on behalf of the same inode.
+ *
+ * As we work through the truncate and commmit bits of it to the journal there
+ * is one core, guiding principle: the file's tree must always be consistent on
+ * disk.  We must be able to restart the truncate after a crash.
+ *
+ * The file's tree may be transiently inconsistent in memory (although it
+ * probably isn't), but whenever we close off and commit a journal transaction,
+ * the contents of (the filesystem + the journal) must be consistent and
+ * restartable.  It's pretty simple, really: bottom up, right to left (although
+ * left-to-right works OK too).
+ *
+ * Note that at recovery time, journal replay occurs *before* the restart of
+ * truncate against the orphan inode list.
+ *
+ * The committed inode has the new, desired i_size (which is the same as
+ * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
+ * that this inode's truncate did not complete and it will again call
+ * ext3_truncate() to have another go.  So there will be instantiated blocks
+ * to the right of the truncation point in a crashed ext3 filesystem.  But
+ * that's fine - as long as they are linked from the inode, the post-crash
+ * ext3_truncate() run will find them and release them.
+ */
+
+void ext3_truncate(struct inode * inode)
+{
+	handle_t *handle;
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	__le32 *i_data = ei->i_data;
+	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
+	struct address_space *mapping = inode->i_mapping;
+	int offsets[4];
+	Indirect chain[4];
+	Indirect *partial;
+	__le32 nr = 0;
+	int n;
+	long last_block;
+	unsigned blocksize = inode->i_sb->s_blocksize;
+	struct page *page;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	    S_ISLNK(inode->i_mode)))
+		return;
+	if (ext3_inode_is_fast_symlink(inode))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+
+	/*
+	 * We have to lock the EOF page here, because lock_page() nests
+	 * outside journal_start().
+	 */
+	if ((inode->i_size & (blocksize - 1)) == 0) {
+		/* Block boundary? Nothing to do */
+		page = NULL;
+	} else {
+		page = grab_cache_page(mapping,
+				inode->i_size >> PAGE_CACHE_SHIFT);
+		if (!page)
+			return;
+	}
+
+	handle = start_transaction(inode);
+	if (IS_ERR(handle)) {
+		if (page) {
+			clear_highpage(page);
+			flush_dcache_page(page);
+			unlock_page(page);
+			page_cache_release(page);
+		}
+		return;		/* AKPM: return what? */
+	}
+
+	last_block = (inode->i_size + blocksize-1)
+					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
+
+	if (page)
+		ext3_block_truncate_page(handle, page, mapping, inode->i_size);
+
+	n = ext3_block_to_path(inode, last_block, offsets, NULL);
+	if (n == 0)
+		goto out_stop;	/* error */
+
+	/*
+	 * OK.  This truncate is going to happen.  We add the inode to the
+	 * orphan list, so that if this truncate spans multiple transactions,
+	 * and we crash, we will resume the truncate when the filesystem
+	 * recovers.  It also marks the inode dirty, to catch the new size.
+	 *
+	 * Implication: the file must always be in a sane, consistent
+	 * truncatable state while each transaction commits.
+	 */
+	if (ext3_orphan_add(handle, inode))
+		goto out_stop;
+
+	/*
+	 * The orphan list entry will now protect us from any crash which
+	 * occurs before the truncate completes, so it is now safe to propagate
+	 * the new, shorter inode size (held for now in i_size) into the
+	 * on-disk inode. We do this via i_disksize, which is the value which
+	 * ext3 *really* writes onto the disk inode.
+	 */
+	ei->i_disksize = inode->i_size;
+
+	/*
+	 * From here we block out all ext3_get_block() callers who want to
+	 * modify the block allocation tree.
+	 */
+	down(&ei->truncate_sem);
+
+	if (n == 1) {		/* direct blocks */
+		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
+			       i_data + EXT3_NDIR_BLOCKS);
+		goto do_indirects;
+	}
+
+	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
+	/* Kill the top of shared branch (not detached) */
+	if (nr) {
+		if (partial == chain) {
+			/* Shared branch grows from the inode */
+			ext3_free_branches(handle, inode, NULL,
+					   &nr, &nr+1, (chain+n-1) - partial);
+			*partial->p = 0;
+			/*
+			 * We mark the inode dirty prior to restart,
+			 * and prior to stop.  No need for it here.
+			 */
+		} else {
+			/* Shared branch grows from an indirect block */
+			BUFFER_TRACE(partial->bh, "get_write_access");
+			ext3_free_branches(handle, inode, partial->bh,
+					partial->p,
+					partial->p+1, (chain+n-1) - partial);
+		}
+	}
+	/* Clear the ends of indirect blocks on the shared branch */
+	while (partial > chain) {
+		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
+				   (__le32*)partial->bh->b_data+addr_per_block,
+				   (chain+n-1) - partial);
+		BUFFER_TRACE(partial->bh, "call brelse");
+		brelse (partial->bh);
+		partial--;
+	}
+do_indirects:
+	/* Kill the remaining (whole) subtrees */
+	switch (offsets[0]) {
+		default:
+			nr = i_data[EXT3_IND_BLOCK];
+			if (nr) {
+				ext3_free_branches(handle, inode, NULL,
+						   &nr, &nr+1, 1);
+				i_data[EXT3_IND_BLOCK] = 0;
+			}
+		case EXT3_IND_BLOCK:
+			nr = i_data[EXT3_DIND_BLOCK];
+			if (nr) {
+				ext3_free_branches(handle, inode, NULL,
+						   &nr, &nr+1, 2);
+				i_data[EXT3_DIND_BLOCK] = 0;
+			}
+		case EXT3_DIND_BLOCK:
+			nr = i_data[EXT3_TIND_BLOCK];
+			if (nr) {
+				ext3_free_branches(handle, inode, NULL,
+						   &nr, &nr+1, 3);
+				i_data[EXT3_TIND_BLOCK] = 0;
+			}
+		case EXT3_TIND_BLOCK:
+			;
+	}
+
+	ext3_discard_reservation(inode);
+
+	up(&ei->truncate_sem);
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	ext3_mark_inode_dirty(handle, inode);
+
+	/* In a multi-transaction truncate, we only make the final
+	 * transaction synchronous */
+	if (IS_SYNC(inode))
+		handle->h_sync = 1;
+out_stop:
+	/*
+	 * If this was a simple ftruncate(), and the file will remain alive
+	 * then we need to clear up the orphan record which we created above.
+	 * However, if this was a real unlink then we were called by
+	 * ext3_delete_inode(), and we allow that function to clean up the
+	 * orphan info for us.
+	 */
+	if (inode->i_nlink)
+		ext3_orphan_del(handle, inode);
+
+	ext3_journal_stop(handle);
+}
+
+static unsigned long ext3_get_inode_block(struct super_block *sb,
+		unsigned long ino, struct ext3_iloc *iloc)
+{
+	unsigned long desc, group_desc, block_group;
+	unsigned long offset, block;
+	struct buffer_head *bh;
+	struct ext3_group_desc * gdp;
+
+
+	if ((ino != EXT3_ROOT_INO &&
+		ino != EXT3_JOURNAL_INO &&
+		ino != EXT3_RESIZE_INO &&
+		ino < EXT3_FIRST_INO(sb)) ||
+		ino > le32_to_cpu(
+			EXT3_SB(sb)->s_es->s_inodes_count)) {
+		ext3_error (sb, "ext3_get_inode_block",
+			    "bad inode number: %lu", ino);
+		return 0;
+	}
+	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
+	if (block_group >= EXT3_SB(sb)->s_groups_count) {
+		ext3_error (sb, "ext3_get_inode_block",
+			    "group >= groups count");
+		return 0;
+	}
+	smp_rmb();
+	group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
+	desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
+	bh = EXT3_SB(sb)->s_group_desc[group_desc];
+	if (!bh) {
+		ext3_error (sb, "ext3_get_inode_block",
+			    "Descriptor not loaded");
+		return 0;
+	}
+
+	gdp = (struct ext3_group_desc *) bh->b_data;
+	/*
+	 * Figure out the offset within the block group inode table
+	 */
+	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
+		EXT3_INODE_SIZE(sb);
+	block = le32_to_cpu(gdp[desc].bg_inode_table) +
+		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
+
+	iloc->block_group = block_group;
+	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
+	return block;
+}
+
+/*
+ * ext3_get_inode_loc returns with an extra refcount against the inode's
+ * underlying buffer_head on success. If 'in_mem' is true, we have all
+ * data in memory that is needed to recreate the on-disk version of this
+ * inode.
+ */
+static int __ext3_get_inode_loc(struct inode *inode,
+				struct ext3_iloc *iloc, int in_mem)
+{
+	unsigned long block;
+	struct buffer_head *bh;
+
+	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
+	if (!block)
+		return -EIO;
+
+	bh = sb_getblk(inode->i_sb, block);
+	if (!bh) {
+		ext3_error (inode->i_sb, "ext3_get_inode_loc",
+				"unable to read inode block - "
+				"inode=%lu, block=%lu", inode->i_ino, block);
+		return -EIO;
+	}
+	if (!buffer_uptodate(bh)) {
+		lock_buffer(bh);
+		if (buffer_uptodate(bh)) {
+			/* someone brought it uptodate while we waited */
+			unlock_buffer(bh);
+			goto has_buffer;
+		}
+
+		/*
+		 * If we have all information of the inode in memory and this
+		 * is the only valid inode in the block, we need not read the
+		 * block.
+		 */
+		if (in_mem) {
+			struct buffer_head *bitmap_bh;
+			struct ext3_group_desc *desc;
+			int inodes_per_buffer;
+			int inode_offset, i;
+			int block_group;
+			int start;
+
+			block_group = (inode->i_ino - 1) /
+					EXT3_INODES_PER_GROUP(inode->i_sb);
+			inodes_per_buffer = bh->b_size /
+				EXT3_INODE_SIZE(inode->i_sb);
+			inode_offset = ((inode->i_ino - 1) %
+					EXT3_INODES_PER_GROUP(inode->i_sb));
+			start = inode_offset & ~(inodes_per_buffer - 1);
+
+			/* Is the inode bitmap in cache? */
+			desc = ext3_get_group_desc(inode->i_sb,
+						block_group, NULL);
+			if (!desc)
+				goto make_io;
+
+			bitmap_bh = sb_getblk(inode->i_sb,
+					le32_to_cpu(desc->bg_inode_bitmap));
+			if (!bitmap_bh)
+				goto make_io;
+
+			/*
+			 * If the inode bitmap isn't in cache then the
+			 * optimisation may end up performing two reads instead
+			 * of one, so skip it.
+			 */
+			if (!buffer_uptodate(bitmap_bh)) {
+				brelse(bitmap_bh);
+				goto make_io;
+			}
+			for (i = start; i < start + inodes_per_buffer; i++) {
+				if (i == inode_offset)
+					continue;
+				if (ext3_test_bit(i, bitmap_bh->b_data))
+					break;
+			}
+			brelse(bitmap_bh);
+			if (i == start + inodes_per_buffer) {
+				/* all other inodes are free, so skip I/O */
+				memset(bh->b_data, 0, bh->b_size);
+				set_buffer_uptodate(bh);
+				unlock_buffer(bh);
+				goto has_buffer;
+			}
+		}
+
+make_io:
+		/*
+		 * There are other valid inodes in the buffer, this inode
+		 * has in-inode xattrs, or we don't have this inode in memory.
+		 * Read the block from disk.
+		 */
+		get_bh(bh);
+		bh->b_end_io = end_buffer_read_sync;
+		submit_bh(READ, bh);
+		wait_on_buffer(bh);
+		if (!buffer_uptodate(bh)) {
+			ext3_error(inode->i_sb, "ext3_get_inode_loc",
+					"unable to read inode block - "
+					"inode=%lu, block=%lu",
+					inode->i_ino, block);
+			brelse(bh);
+			return -EIO;
+		}
+	}
+has_buffer:
+	iloc->bh = bh;
+	return 0;
+}
+
+int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
+{
+	/* We have all inode data except xattrs in memory here. */
+	return __ext3_get_inode_loc(inode, iloc,
+		!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
+}
+
+void ext3_set_inode_flags(struct inode *inode)
+{
+	unsigned int flags = EXT3_I(inode)->i_flags;
+
+	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	if (flags & EXT3_SYNC_FL)
+		inode->i_flags |= S_SYNC;
+	if (flags & EXT3_APPEND_FL)
+		inode->i_flags |= S_APPEND;
+	if (flags & EXT3_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT3_NOATIME_FL)
+		inode->i_flags |= S_NOATIME;
+	if (flags & EXT3_DIRSYNC_FL)
+		inode->i_flags |= S_DIRSYNC;
+}
+
+void ext3_read_inode(struct inode * inode)
+{
+	struct ext3_iloc iloc;
+	struct ext3_inode *raw_inode;
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	struct buffer_head *bh;
+	int block;
+
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+	ei->i_acl = EXT3_ACL_NOT_CACHED;
+	ei->i_default_acl = EXT3_ACL_NOT_CACHED;
+#endif
+	ei->i_block_alloc_info = NULL;
+
+	if (__ext3_get_inode_loc(inode, &iloc, 0))
+		goto bad_inode;
+	bh = iloc.bh;
+	raw_inode = ext3_raw_inode(&iloc);
+	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+	if(!(test_opt (inode->i_sb, NO_UID32))) {
+		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+	}
+	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+	inode->i_size = le32_to_cpu(raw_inode->i_size);
+	inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
+	inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
+	inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
+	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
+
+	ei->i_state = 0;
+	ei->i_dir_start_lookup = 0;
+	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
+	/* We now have enough fields to check if the inode was active or not.
+	 * This is needed because nfsd might try to access dead inodes
+	 * the test is that same one that e2fsck uses
+	 * NeilBrown 1999oct15
+	 */
+	if (inode->i_nlink == 0) {
+		if (inode->i_mode == 0 ||
+		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
+			/* this inode is deleted */
+			brelse (bh);
+			goto bad_inode;
+		}
+		/* The only unlinked inodes we let through here have
+		 * valid i_mode and are being read by the orphan
+		 * recovery code: that's fine, we're about to complete
+		 * the process of deleting those. */
+	}
+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size
+					 * (for stat), not the fs block
+					 * size */  
+	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+#ifdef EXT3_FRAGMENTS
+	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
+	ei->i_frag_no = raw_inode->i_frag;
+	ei->i_frag_size = raw_inode->i_fsize;
+#endif
+	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
+	if (!S_ISREG(inode->i_mode)) {
+		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+	} else {
+		inode->i_size |=
+			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
+	}
+	ei->i_disksize = inode->i_size;
+	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+	ei->i_block_group = iloc.block_group;
+	/*
+	 * NOTE! The in-memory inode i_data array is in little-endian order
+	 * even on big-endian machines: we do NOT byteswap the block numbers!
+	 */
+	for (block = 0; block < EXT3_N_BLOCKS; block++)
+		ei->i_data[block] = raw_inode->i_block[block];
+	INIT_LIST_HEAD(&ei->i_orphan);
+
+	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
+	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
+		/*
+		 * When mke2fs creates big inodes it does not zero out
+		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
+		 * so ignore those first few inodes.
+		 */
+		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+		    EXT3_INODE_SIZE(inode->i_sb))
+			goto bad_inode;
+		if (ei->i_extra_isize == 0) {
+			/* The extra space is currently unused. Use it. */
+			ei->i_extra_isize = sizeof(struct ext3_inode) -
+					    EXT3_GOOD_OLD_INODE_SIZE;
+		} else {
+			__le32 *magic = (void *)raw_inode +
+					EXT3_GOOD_OLD_INODE_SIZE +
+					ei->i_extra_isize;
+			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
+				 ei->i_state |= EXT3_STATE_XATTR;
+		}
+	} else
+		ei->i_extra_isize = 0;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ext3_file_inode_operations;
+		inode->i_fop = &ext3_file_operations;
+		ext3_set_aops(inode);
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ext3_dir_inode_operations;
+		inode->i_fop = &ext3_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (ext3_inode_is_fast_symlink(inode))
+			inode->i_op = &ext3_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &ext3_symlink_inode_operations;
+			ext3_set_aops(inode);
+		}
+	} else {
+		inode->i_op = &ext3_special_inode_operations;
+		if (raw_inode->i_block[0])
+			init_special_inode(inode, inode->i_mode,
+			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
+		else 
+			init_special_inode(inode, inode->i_mode,
+			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
+	}
+	brelse (iloc.bh);
+	ext3_set_inode_flags(inode);
+	return;
+
+bad_inode:
+	make_bad_inode(inode);
+	return;
+}
+
+/*
+ * Post the struct inode info into an on-disk inode location in the
+ * buffer-cache.  This gobbles the caller's reference to the
+ * buffer_head in the inode location struct.
+ *
+ * The caller must have write access to iloc->bh.
+ */
+static int ext3_do_update_inode(handle_t *handle, 
+				struct inode *inode, 
+				struct ext3_iloc *iloc)
+{
+	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	struct buffer_head *bh = iloc->bh;
+	int err = 0, rc, block;
+
+	/* For fields not not tracking in the in-memory inode,
+	 * initialise them to zero for new inodes. */
+	if (ei->i_state & EXT3_STATE_NEW)
+		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
+
+	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+	if(!(test_opt(inode->i_sb, NO_UID32))) {
+		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+/*
+ * Fix up interoperability with old kernels. Otherwise, old inodes get
+ * re-used with the upper 16 bits of the uid/gid intact
+ */
+		if(!ei->i_dtime) {
+			raw_inode->i_uid_high =
+				cpu_to_le16(high_16_bits(inode->i_uid));
+			raw_inode->i_gid_high =
+				cpu_to_le16(high_16_bits(inode->i_gid));
+		} else {
+			raw_inode->i_uid_high = 0;
+			raw_inode->i_gid_high = 0;
+		}
+	} else {
+		raw_inode->i_uid_low =
+			cpu_to_le16(fs_high2lowuid(inode->i_uid));
+		raw_inode->i_gid_low =
+			cpu_to_le16(fs_high2lowgid(inode->i_gid));
+		raw_inode->i_uid_high = 0;
+		raw_inode->i_gid_high = 0;
+	}
+	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+#ifdef EXT3_FRAGMENTS
+	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
+	raw_inode->i_frag = ei->i_frag_no;
+	raw_inode->i_fsize = ei->i_frag_size;
+#endif
+	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
+	if (!S_ISREG(inode->i_mode)) {
+		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+	} else {
+		raw_inode->i_size_high =
+			cpu_to_le32(ei->i_disksize >> 32);
+		if (ei->i_disksize > 0x7fffffffULL) {
+			struct super_block *sb = inode->i_sb;
+			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
+					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
+			    EXT3_SB(sb)->s_es->s_rev_level ==
+					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
+			       /* If this is the first large file
+				* created, add a flag to the superblock.
+				*/
+				err = ext3_journal_get_write_access(handle,
+						EXT3_SB(sb)->s_sbh);
+				if (err)
+					goto out_brelse;
+				ext3_update_dynamic_rev(sb);
+				EXT3_SET_RO_COMPAT_FEATURE(sb,
+					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
+				sb->s_dirt = 1;
+				handle->h_sync = 1;
+				err = ext3_journal_dirty_metadata(handle,
+						EXT3_SB(sb)->s_sbh);
+			}
+		}
+	}
+	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		if (old_valid_dev(inode->i_rdev)) {
+			raw_inode->i_block[0] =
+				cpu_to_le32(old_encode_dev(inode->i_rdev));
+			raw_inode->i_block[1] = 0;
+		} else {
+			raw_inode->i_block[0] = 0;
+			raw_inode->i_block[1] =
+				cpu_to_le32(new_encode_dev(inode->i_rdev));
+			raw_inode->i_block[2] = 0;
+		}
+	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
+		raw_inode->i_block[block] = ei->i_data[block];
+
+	if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
+		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
+
+	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+	rc = ext3_journal_dirty_metadata(handle, bh);
+	if (!err)
+		err = rc;
+	ei->i_state &= ~EXT3_STATE_NEW;
+
+out_brelse:
+	brelse (bh);
+	ext3_std_error(inode->i_sb, err);
+	return err;
+}
+
+/*
+ * ext3_write_inode()
+ *
+ * We are called from a few places:
+ *
+ * - Within generic_file_write() for O_SYNC files.
+ *   Here, there will be no transaction running. We wait for any running
+ *   trasnaction to commit.
+ *
+ * - Within sys_sync(), kupdate and such.
+ *   We wait on commit, if tol to.
+ *
+ * - Within prune_icache() (PF_MEMALLOC == true)
+ *   Here we simply return.  We can't afford to block kswapd on the
+ *   journal commit.
+ *
+ * In all cases it is actually safe for us to return without doing anything,
+ * because the inode has been copied into a raw inode buffer in
+ * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
+ * knfsd.
+ *
+ * Note that we are absolutely dependent upon all inode dirtiers doing the
+ * right thing: they *must* call mark_inode_dirty() after dirtying info in
+ * which we are interested.
+ *
+ * It would be a bug for them to not do this.  The code:
+ *
+ *	mark_inode_dirty(inode)
+ *	stuff();
+ *	inode->i_size = expr;
+ *
+ * is in error because a kswapd-driven write_inode() could occur while
+ * `stuff()' is running, and the new i_size will be lost.  Plus the inode
+ * will no longer be on the superblock's dirty inode list.
+ */
+int ext3_write_inode(struct inode *inode, int wait)
+{
+	if (current->flags & PF_MEMALLOC)
+		return 0;
+
+	if (ext3_journal_current_handle()) {
+		jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
+		dump_stack();
+		return -EIO;
+	}
+
+	if (!wait)
+		return 0;
+
+	return ext3_force_commit(inode->i_sb);
+}
+
+/*
+ * ext3_setattr()
+ *
+ * Called from notify_change.
+ *
+ * We want to trap VFS attempts to truncate the file as soon as
+ * possible.  In particular, we want to make sure that when the VFS
+ * shrinks i_size, we put the inode on the orphan list and modify
+ * i_disksize immediately, so that during the subsequent flushing of
+ * dirty pages and freeing of disk blocks, we can guarantee that any
+ * commit will leave the blocks being flushed in an unused state on
+ * disk.  (On recovery, the inode will get truncated and the blocks will
+ * be freed, so we have a strong guarantee that no future commit will
+ * leave these blocks visible to the user.)  
+ *
+ * Called with inode->sem down.
+ */
+int ext3_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error, rc = 0;
+	const unsigned int ia_valid = attr->ia_valid;
+
+	error = inode_change_ok(inode, attr);
+	if (error)
+		return error;
+
+	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+		handle_t *handle;
+
+		/* (user+group)*(old+new) structure, inode write (sb,
+		 * inode block, ? - but truncate inode update has it) */
+		handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
+		if (IS_ERR(handle)) {
+			error = PTR_ERR(handle);
+			goto err_out;
+		}
+		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+		if (error) {
+			ext3_journal_stop(handle);
+			return error;
+		}
+		/* Update corresponding info in inode so that everything is in
+		 * one transaction */
+		if (attr->ia_valid & ATTR_UID)
+			inode->i_uid = attr->ia_uid;
+		if (attr->ia_valid & ATTR_GID)
+			inode->i_gid = attr->ia_gid;
+		error = ext3_mark_inode_dirty(handle, inode);
+		ext3_journal_stop(handle);
+	}
+
+	if (S_ISREG(inode->i_mode) &&
+	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
+		handle_t *handle;
+
+		handle = ext3_journal_start(inode, 3);
+		if (IS_ERR(handle)) {
+			error = PTR_ERR(handle);
+			goto err_out;
+		}
+
+		error = ext3_orphan_add(handle, inode);
+		EXT3_I(inode)->i_disksize = attr->ia_size;
+		rc = ext3_mark_inode_dirty(handle, inode);
+		if (!error)
+			error = rc;
+		ext3_journal_stop(handle);
+	}
+
+	rc = inode_setattr(inode, attr);
+
+	/* If inode_setattr's call to ext3_truncate failed to get a
+	 * transaction handle at all, we need to clean up the in-core
+	 * orphan list manually. */
+	if (inode->i_nlink)
+		ext3_orphan_del(NULL, inode);
+
+	if (!rc && (ia_valid & ATTR_MODE))
+		rc = ext3_acl_chmod(inode);
+
+err_out:
+	ext3_std_error(inode->i_sb, error);
+	if (!error)
+		error = rc;
+	return error;
+}
+
+
+/*
+ * akpm: how many blocks doth make a writepage()?
+ *
+ * With N blocks per page, it may be:
+ * N data blocks
+ * 2 indirect block
+ * 2 dindirect
+ * 1 tindirect
+ * N+5 bitmap blocks (from the above)
+ * N+5 group descriptor summary blocks
+ * 1 inode block
+ * 1 superblock.
+ * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
+ *
+ * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
+ *
+ * With ordered or writeback data it's the same, less the N data blocks.
+ *
+ * If the inode's direct blocks can hold an integral number of pages then a
+ * page cannot straddle two indirect blocks, and we can only touch one indirect
+ * and dindirect block, and the "5" above becomes "3".
+ *
+ * This still overestimates under most circumstances.  If we were to pass the
+ * start and end offsets in here as well we could do block_to_path() on each
+ * block and work out the exact number of indirects which are touched.  Pah.
+ */
+
+static int ext3_writepage_trans_blocks(struct inode *inode)
+{
+	int bpp = ext3_journal_blocks_per_page(inode);
+	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
+	int ret;
+
+	if (ext3_should_journal_data(inode))
+		ret = 3 * (bpp + indirects) + 2;
+	else
+		ret = 2 * (bpp + indirects) + 2;
+
+#ifdef CONFIG_QUOTA
+	/* We know that structure was already allocated during DQUOT_INIT so
+	 * we will be updating only the data blocks + inodes */
+	ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
+#endif
+
+	return ret;
+}
+
+/*
+ * The caller must have previously called ext3_reserve_inode_write().
+ * Give this, we know that the caller already has write access to iloc->bh.
+ */
+int ext3_mark_iloc_dirty(handle_t *handle,
+		struct inode *inode, struct ext3_iloc *iloc)
+{
+	int err = 0;
+
+	/* the do_update_inode consumes one bh->b_count */
+	get_bh(iloc->bh);
+
+	/* ext3_do_update_inode() does journal_dirty_metadata */
+	err = ext3_do_update_inode(handle, inode, iloc);
+	put_bh(iloc->bh);
+	return err;
+}
+
+/* 
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh.  This _must_ be cleaned up later. 
+ */
+
+int
+ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
+			 struct ext3_iloc *iloc)
+{
+	int err = 0;
+	if (handle) {
+		err = ext3_get_inode_loc(inode, iloc);
+		if (!err) {
+			BUFFER_TRACE(iloc->bh, "get_write_access");
+			err = ext3_journal_get_write_access(handle, iloc->bh);
+			if (err) {
+				brelse(iloc->bh);
+				iloc->bh = NULL;
+			}
+		}
+	}
+	ext3_std_error(inode->i_sb, err);
+	return err;
+}
+
+/*
+ * akpm: What we do here is to mark the in-core inode as clean
+ * with respect to inode dirtiness (it may still be data-dirty).
+ * This means that the in-core inode may be reaped by prune_icache
+ * without having to perform any I/O.  This is a very good thing,
+ * because *any* task may call prune_icache - even ones which
+ * have a transaction open against a different journal.
+ *
+ * Is this cheating?  Not really.  Sure, we haven't written the
+ * inode out, but prune_icache isn't a user-visible syncing function.
+ * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
+ * we start and wait on commits.
+ *
+ * Is this efficient/effective?  Well, we're being nice to the system
+ * by cleaning up our inodes proactively so they can be reaped
+ * without I/O.  But we are potentially leaving up to five seconds'
+ * worth of inodes floating about which prune_icache wants us to
+ * write out.  One way to fix that would be to get prune_icache()
+ * to do a write_super() to free up some memory.  It has the desired
+ * effect.
+ */
+int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
+{
+	struct ext3_iloc iloc;
+	int err;
+
+	might_sleep();
+	err = ext3_reserve_inode_write(handle, inode, &iloc);
+	if (!err)
+		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+	return err;
+}
+
+/*
+ * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
+ *
+ * We're really interested in the case where a file is being extended.
+ * i_size has been changed by generic_commit_write() and we thus need
+ * to include the updated inode in the current transaction.
+ *
+ * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * are allocated to the file.
+ *
+ * If the inode is marked synchronous, we don't honour that here - doing
+ * so would cause a commit on atime updates, which we don't bother doing.
+ * We handle synchronous inodes at the highest possible level.
+ */
+void ext3_dirty_inode(struct inode *inode)
+{
+	handle_t *current_handle = ext3_journal_current_handle();
+	handle_t *handle;
+
+	handle = ext3_journal_start(inode, 2);
+	if (IS_ERR(handle))
+		goto out;
+	if (current_handle &&
+		current_handle->h_transaction != handle->h_transaction) {
+		/* This task has a transaction open against a different fs */
+		printk(KERN_EMERG "%s: transactions do not match!\n",
+		       __FUNCTION__);
+	} else {
+		jbd_debug(5, "marking dirty.  outer handle=%p\n",
+				current_handle);
+		ext3_mark_inode_dirty(handle, inode);
+	}
+	ext3_journal_stop(handle);
+out:
+	return;
+}
+
+#ifdef AKPM
+/* 
+ * Bind an inode's backing buffer_head into this transaction, to prevent
+ * it from being flushed to disk early.  Unlike
+ * ext3_reserve_inode_write, this leaves behind no bh reference and
+ * returns no iloc structure, so the caller needs to repeat the iloc
+ * lookup to mark the inode dirty later.
+ */
+static inline int
+ext3_pin_inode(handle_t *handle, struct inode *inode)
+{
+	struct ext3_iloc iloc;
+
+	int err = 0;
+	if (handle) {
+		err = ext3_get_inode_loc(inode, &iloc);
+		if (!err) {
+			BUFFER_TRACE(iloc.bh, "get_write_access");
+			err = journal_get_write_access(handle, iloc.bh);
+			if (!err)
+				err = ext3_journal_dirty_metadata(handle, 
+								  iloc.bh);
+			brelse(iloc.bh);
+		}
+	}
+	ext3_std_error(inode->i_sb, err);
+	return err;
+}
+#endif
+
+int ext3_change_inode_journal_flag(struct inode *inode, int val)
+{
+	journal_t *journal;
+	handle_t *handle;
+	int err;
+
+	/*
+	 * We have to be very careful here: changing a data block's
+	 * journaling status dynamically is dangerous.  If we write a
+	 * data block to the journal, change the status and then delete
+	 * that block, we risk forgetting to revoke the old log record
+	 * from the journal and so a subsequent replay can corrupt data.
+	 * So, first we make sure that the journal is empty and that
+	 * nobody is changing anything.
+	 */
+
+	journal = EXT3_JOURNAL(inode);
+	if (is_journal_aborted(journal) || IS_RDONLY(inode))
+		return -EROFS;
+
+	journal_lock_updates(journal);
+	journal_flush(journal);
+
+	/*
+	 * OK, there are no updates running now, and all cached data is
+	 * synced to disk.  We are now in a completely consistent state
+	 * which doesn't have anything in the journal, and we know that
+	 * no filesystem updates are running, so it is safe to modify
+	 * the inode's in-core data-journaling state flag now.
+	 */
+
+	if (val)
+		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
+	else
+		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
+	ext3_set_aops(inode);
+
+	journal_unlock_updates(journal);
+
+	/* Finally we can mark the inode as dirty. */
+
+	handle = ext3_journal_start(inode, 1);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	err = ext3_mark_inode_dirty(handle, inode);
+	handle->h_sync = 1;
+	ext3_journal_stop(handle);
+	ext3_std_error(inode->i_sb, err);
+
+	return err;
+}
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
new file mode 100644
index 0000000..706d686
--- /dev/null
+++ b/fs/ext3/ioctl.c
@@ -0,0 +1,243 @@
+/*
+ * linux/fs/ext3/ioctl.c
+ *
+ * Copyright (C) 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/time.h>
+#include <asm/uaccess.h>
+
+
+int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+		unsigned long arg)
+{
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	unsigned int flags;
+	unsigned short rsv_window_size;
+
+	ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+	switch (cmd) {
+	case EXT3_IOC_GETFLAGS:
+		flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
+		return put_user(flags, (int __user *) arg);
+	case EXT3_IOC_SETFLAGS: {
+		handle_t *handle = NULL;
+		int err;
+		struct ext3_iloc iloc;
+		unsigned int oldflags;
+		unsigned int jflag;
+
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EACCES;
+
+		if (get_user(flags, (int __user *) arg))
+			return -EFAULT;
+
+		if (!S_ISDIR(inode->i_mode))
+			flags &= ~EXT3_DIRSYNC_FL;
+
+		oldflags = ei->i_flags;
+
+		/* The JOURNAL_DATA flag is modifiable only by root */
+		jflag = flags & EXT3_JOURNAL_DATA_FL;
+
+		/*
+		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+		 * the relevant capability.
+		 *
+		 * This test looks nicer. Thanks to Pauline Middelink
+		 */
+		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
+			if (!capable(CAP_LINUX_IMMUTABLE))
+				return -EPERM;
+		}
+
+		/*
+		 * The JOURNAL_DATA flag can only be changed by
+		 * the relevant capability.
+		 */
+		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
+			if (!capable(CAP_SYS_RESOURCE))
+				return -EPERM;
+		}
+
+
+		handle = ext3_journal_start(inode, 1);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		if (IS_SYNC(inode))
+			handle->h_sync = 1;
+		err = ext3_reserve_inode_write(handle, inode, &iloc);
+		if (err)
+			goto flags_err;
+
+		flags = flags & EXT3_FL_USER_MODIFIABLE;
+		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
+		ei->i_flags = flags;
+
+		ext3_set_inode_flags(inode);
+		inode->i_ctime = CURRENT_TIME_SEC;
+
+		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+		ext3_journal_stop(handle);
+		if (err)
+			return err;
+
+		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
+			err = ext3_change_inode_journal_flag(inode, jflag);
+		return err;
+	}
+	case EXT3_IOC_GETVERSION:
+	case EXT3_IOC_GETVERSION_OLD:
+		return put_user(inode->i_generation, (int __user *) arg);
+	case EXT3_IOC_SETVERSION:
+	case EXT3_IOC_SETVERSION_OLD: {
+		handle_t *handle;
+		struct ext3_iloc iloc;
+		__u32 generation;
+		int err;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EPERM;
+		if (IS_RDONLY(inode))
+			return -EROFS;
+		if (get_user(generation, (int __user *) arg))
+			return -EFAULT;
+
+		handle = ext3_journal_start(inode, 1);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		err = ext3_reserve_inode_write(handle, inode, &iloc);
+		if (err == 0) {
+			inode->i_ctime = CURRENT_TIME_SEC;
+			inode->i_generation = generation;
+			err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+		}
+		ext3_journal_stop(handle);
+		return err;
+	}
+#ifdef CONFIG_JBD_DEBUG
+	case EXT3_IOC_WAIT_FOR_READONLY:
+		/*
+		 * This is racy - by the time we're woken up and running,
+		 * the superblock could be released.  And the module could
+		 * have been unloaded.  So sue me.
+		 *
+		 * Returns 1 if it slept, else zero.
+		 */
+		{
+			struct super_block *sb = inode->i_sb;
+			DECLARE_WAITQUEUE(wait, current);
+			int ret = 0;
+
+			set_current_state(TASK_INTERRUPTIBLE);
+			add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
+			if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) {
+				schedule();
+				ret = 1;
+			}
+			remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
+			return ret;
+		}
+#endif
+	case EXT3_IOC_GETRSVSZ:
+		if (test_opt(inode->i_sb, RESERVATION)
+			&& S_ISREG(inode->i_mode)
+			&& ei->i_block_alloc_info) {
+			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
+			return put_user(rsv_window_size, (int __user *)arg);
+		}
+		return -ENOTTY;
+	case EXT3_IOC_SETRSVSZ: {
+
+		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
+			return -ENOTTY;
+
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EACCES;
+
+		if (get_user(rsv_window_size, (int __user *)arg))
+			return -EFAULT;
+
+		if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
+			rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;
+
+		/*
+		 * need to allocate reservation structure for this inode
+		 * before set the window size
+		 */
+		down(&ei->truncate_sem);
+		if (!ei->i_block_alloc_info)
+			ext3_init_block_alloc_info(inode);
+
+		if (ei->i_block_alloc_info){
+			struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
+			rsv->rsv_goal_size = rsv_window_size;
+		}
+		up(&ei->truncate_sem);
+		return 0;
+	}
+	case EXT3_IOC_GROUP_EXTEND: {
+		unsigned long n_blocks_count;
+		struct super_block *sb = inode->i_sb;
+		int err;
+
+		if (!capable(CAP_SYS_RESOURCE))
+			return -EPERM;
+
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if (get_user(n_blocks_count, (__u32 __user *)arg))
+			return -EFAULT;
+
+		err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
+		journal_lock_updates(EXT3_SB(sb)->s_journal);
+		journal_flush(EXT3_SB(sb)->s_journal);
+		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+
+		return err;
+	}
+	case EXT3_IOC_GROUP_ADD: {
+		struct ext3_new_group_data input;
+		struct super_block *sb = inode->i_sb;
+		int err;
+
+		if (!capable(CAP_SYS_RESOURCE))
+			return -EPERM;
+
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
+				sizeof(input)))
+			return -EFAULT;
+
+		err = ext3_group_add(sb, &input);
+		journal_lock_updates(EXT3_SB(sb)->s_journal);
+		journal_flush(EXT3_SB(sb)->s_journal);
+		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+
+		return err;
+	}
+
+
+	default:
+		return -ENOTTY;
+	}
+}
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
new file mode 100644
index 0000000..79742d8
--- /dev/null
+++ b/fs/ext3/namei.c
@@ -0,0 +1,2378 @@
+/*
+ *  linux/fs/ext3/namei.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *  Directory entry file type support and forward compatibility hooks
+ *  	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
+ *  Hash Tree Directory indexing (c)
+ *  	Daniel Phillips, 2001
+ *  Hash Tree Directory indexing porting
+ *  	Christopher Li, 2002
+ *  Hash Tree Directory indexing cleanup
+ * 	Theodore Ts'o, 2002
+ */
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/jbd.h>
+#include <linux/time.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * define how far ahead to read directories while searching them.
+ */
+#define NAMEI_RA_CHUNKS  2
+#define NAMEI_RA_BLOCKS  4
+#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
+
+static struct buffer_head *ext3_append(handle_t *handle,
+					struct inode *inode,
+					u32 *block, int *err)
+{
+	struct buffer_head *bh;
+
+	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+
+	if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
+		inode->i_size += inode->i_sb->s_blocksize;
+		EXT3_I(inode)->i_disksize = inode->i_size;
+		ext3_journal_get_write_access(handle,bh);
+	}
+	return bh;
+}
+
+#ifndef assert
+#define assert(test) J_ASSERT(test)
+#endif
+
+#ifndef swap
+#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
+#endif
+
+#ifdef DX_DEBUG
+#define dxtrace(command) command
+#else
+#define dxtrace(command) 
+#endif
+
+struct fake_dirent
+{
+	__le32 inode;
+	__le16 rec_len;
+	u8 name_len;
+	u8 file_type;
+};
+
+struct dx_countlimit
+{
+	__le16 limit;
+	__le16 count;
+};
+
+struct dx_entry
+{
+	__le32 hash;
+	__le32 block;
+};
+
+/*
+ * dx_root_info is laid out so that if it should somehow get overlaid by a
+ * dirent the two low bits of the hash version will be zero.  Therefore, the
+ * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
+ */
+
+struct dx_root
+{
+	struct fake_dirent dot;
+	char dot_name[4];
+	struct fake_dirent dotdot;
+	char dotdot_name[4];
+	struct dx_root_info
+	{
+		__le32 reserved_zero;
+		u8 hash_version;
+		u8 info_length; /* 8 */
+		u8 indirect_levels;
+		u8 unused_flags;
+	}
+	info;
+	struct dx_entry	entries[0];
+};
+
+struct dx_node
+{
+	struct fake_dirent fake;
+	struct dx_entry	entries[0];
+};
+
+
+struct dx_frame
+{
+	struct buffer_head *bh;
+	struct dx_entry *entries;
+	struct dx_entry *at;
+};
+
+struct dx_map_entry
+{
+	u32 hash;
+	u32 offs;
+};
+
+#ifdef CONFIG_EXT3_INDEX
+static inline unsigned dx_get_block (struct dx_entry *entry);
+static void dx_set_block (struct dx_entry *entry, unsigned value);
+static inline unsigned dx_get_hash (struct dx_entry *entry);
+static void dx_set_hash (struct dx_entry *entry, unsigned value);
+static unsigned dx_get_count (struct dx_entry *entries);
+static unsigned dx_get_limit (struct dx_entry *entries);
+static void dx_set_count (struct dx_entry *entries, unsigned value);
+static void dx_set_limit (struct dx_entry *entries, unsigned value);
+static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
+static unsigned dx_node_limit (struct inode *dir);
+static struct dx_frame *dx_probe(struct dentry *dentry,
+				 struct inode *dir,
+				 struct dx_hash_info *hinfo,
+				 struct dx_frame *frame,
+				 int *err);
+static void dx_release (struct dx_frame *frames);
+static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
+			struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+static void dx_sort_map(struct dx_map_entry *map, unsigned count);
+static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
+		struct dx_map_entry *offsets, int count);
+static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
+static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
+static int ext3_htree_next_block(struct inode *dir, __u32 hash,
+				 struct dx_frame *frame,
+				 struct dx_frame *frames, 
+				 __u32 *start_hash);
+static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
+		       struct ext3_dir_entry_2 **res_dir, int *err);
+static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
+			     struct inode *inode);
+
+/*
+ * Future: use high four bits of block for coalesce-on-delete flags
+ * Mask them off for now.
+ */
+
+static inline unsigned dx_get_block (struct dx_entry *entry)
+{
+	return le32_to_cpu(entry->block) & 0x00ffffff;
+}
+
+static inline void dx_set_block (struct dx_entry *entry, unsigned value)
+{
+	entry->block = cpu_to_le32(value);
+}
+
+static inline unsigned dx_get_hash (struct dx_entry *entry)
+{
+	return le32_to_cpu(entry->hash);
+}
+
+static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
+{
+	entry->hash = cpu_to_le32(value);
+}
+
+static inline unsigned dx_get_count (struct dx_entry *entries)
+{
+	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
+}
+
+static inline unsigned dx_get_limit (struct dx_entry *entries)
+{
+	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
+}
+
+static inline void dx_set_count (struct dx_entry *entries, unsigned value)
+{
+	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
+}
+
+static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
+{
+	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+}
+
+static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
+{
+	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
+		EXT3_DIR_REC_LEN(2) - infosize;
+	return 0? 20: entry_space / sizeof(struct dx_entry);
+}
+
+static inline unsigned dx_node_limit (struct inode *dir)
+{
+	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
+	return 0? 22: entry_space / sizeof(struct dx_entry);
+}
+
+/*
+ * Debug
+ */
+#ifdef DX_DEBUG
+static void dx_show_index (char * label, struct dx_entry *entries)
+{
+        int i, n = dx_get_count (entries);
+        printk("%s index ", label);
+        for (i = 0; i < n; i++)
+        {
+                printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
+        }
+        printk("\n");
+}
+
+struct stats
+{ 
+	unsigned names;
+	unsigned space;
+	unsigned bcount;
+};
+
+static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
+				 int size, int show_names)
+{
+	unsigned names = 0, space = 0;
+	char *base = (char *) de;
+	struct dx_hash_info h = *hinfo;
+
+	printk("names: ");
+	while ((char *) de < base + size)
+	{
+		if (de->inode)
+		{
+			if (show_names)
+			{
+				int len = de->name_len;
+				char *name = de->name;
+				while (len--) printk("%c", *name++);
+				ext3fs_dirhash(de->name, de->name_len, &h);
+				printk(":%x.%u ", h.hash,
+				       ((char *) de - base));
+			}
+			space += EXT3_DIR_REC_LEN(de->name_len);
+	 		names++;
+		}
+		de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
+	}
+	printk("(%i)\n", names);
+	return (struct stats) { names, space, 1 };
+}
+
+struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
+			     struct dx_entry *entries, int levels)
+{
+	unsigned blocksize = dir->i_sb->s_blocksize;
+	unsigned count = dx_get_count (entries), names = 0, space = 0, i;
+	unsigned bcount = 0;
+	struct buffer_head *bh;
+	int err;
+	printk("%i indexed blocks...\n", count);
+	for (i = 0; i < count; i++, entries++)
+	{
+		u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
+		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
+		struct stats stats;
+		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
+		if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
+		stats = levels?
+		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
+		   dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
+		names += stats.names;
+		space += stats.space;
+		bcount += stats.bcount;
+		brelse (bh);
+	}
+	if (bcount)
+		printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
+			names, space/bcount,(space/bcount)*100/blocksize);
+	return (struct stats) { names, space, bcount};
+}
+#endif /* DX_DEBUG */
+
+/*
+ * Probe for a directory leaf block to search.
+ *
+ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
+ * error in the directory index, and the caller should fall back to
+ * searching the directory normally.  The callers of dx_probe **MUST**
+ * check for this error code, and make sure it never gets reflected
+ * back to userspace.
+ */
+static struct dx_frame *
+dx_probe(struct dentry *dentry, struct inode *dir,
+	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
+{
+	unsigned count, indirect;
+	struct dx_entry *at, *entries, *p, *q, *m;
+	struct dx_root *root;
+	struct buffer_head *bh;
+	struct dx_frame *frame = frame_in;
+	u32 hash;
+
+	frame->bh = NULL;
+	if (dentry)
+		dir = dentry->d_parent->d_inode;
+	if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
+		goto fail;
+	root = (struct dx_root *) bh->b_data;
+	if (root->info.hash_version != DX_HASH_TEA &&
+	    root->info.hash_version != DX_HASH_HALF_MD4 &&
+	    root->info.hash_version != DX_HASH_LEGACY) {
+		ext3_warning(dir->i_sb, __FUNCTION__,
+			     "Unrecognised inode hash code %d",
+			     root->info.hash_version);
+		brelse(bh);
+		*err = ERR_BAD_DX_DIR;
+		goto fail;
+	}
+	hinfo->hash_version = root->info.hash_version;
+	hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
+	if (dentry)
+		ext3fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo);
+	hash = hinfo->hash;
+
+	if (root->info.unused_flags & 1) {
+		ext3_warning(dir->i_sb, __FUNCTION__,
+			     "Unimplemented inode hash flags: %#06x",
+			     root->info.unused_flags);
+		brelse(bh);
+		*err = ERR_BAD_DX_DIR;
+		goto fail;
+	}
+
+	if ((indirect = root->info.indirect_levels) > 1) {
+		ext3_warning(dir->i_sb, __FUNCTION__,
+			     "Unimplemented inode hash depth: %#06x",
+			     root->info.indirect_levels);
+		brelse(bh);
+		*err = ERR_BAD_DX_DIR;
+		goto fail;
+	}
+
+	entries = (struct dx_entry *) (((char *)&root->info) +
+				       root->info.info_length);
+	assert(dx_get_limit(entries) == dx_root_limit(dir,
+						      root->info.info_length));
+	dxtrace (printk("Look up %x", hash));
+	while (1)
+	{
+		count = dx_get_count(entries);
+		assert (count && count <= dx_get_limit(entries));
+		p = entries + 1;
+		q = entries + count - 1;
+		while (p <= q)
+		{
+			m = p + (q - p)/2;
+			dxtrace(printk("."));
+			if (dx_get_hash(m) > hash)
+				q = m - 1;
+			else
+				p = m + 1;
+		}
+
+		if (0) // linear search cross check
+		{
+			unsigned n = count - 1;
+			at = entries;
+			while (n--)
+			{
+				dxtrace(printk(","));
+				if (dx_get_hash(++at) > hash)
+				{
+					at--;
+					break;
+				}
+			}
+			assert (at == p - 1);
+		}
+
+		at = p - 1;
+		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
+		frame->bh = bh;
+		frame->entries = entries;
+		frame->at = at;
+		if (!indirect--) return frame;
+		if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
+			goto fail2;
+		at = entries = ((struct dx_node *) bh->b_data)->entries;
+		assert (dx_get_limit(entries) == dx_node_limit (dir));
+		frame++;
+	}
+fail2:
+	while (frame >= frame_in) {
+		brelse(frame->bh);
+		frame--;
+	}
+fail:
+	return NULL;
+}
+
+static void dx_release (struct dx_frame *frames)
+{
+	if (frames[0].bh == NULL)
+		return;
+
+	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
+		brelse(frames[1].bh);
+	brelse(frames[0].bh);
+}
+
+/*
+ * This function increments the frame pointer to search the next leaf
+ * block, and reads in the necessary intervening nodes if the search
+ * should be necessary.  Whether or not the search is necessary is
+ * controlled by the hash parameter.  If the hash value is even, then
+ * the search is only continued if the next block starts with that
+ * hash value.  This is used if we are searching for a specific file.
+ *
+ * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
+ *
+ * This function returns 1 if the caller should continue to search,
+ * or 0 if it should not.  If there is an error reading one of the
+ * index blocks, it will a negative error code.
+ *
+ * If start_hash is non-null, it will be filled in with the starting
+ * hash of the next page.
+ */
+static int ext3_htree_next_block(struct inode *dir, __u32 hash,
+				 struct dx_frame *frame,
+				 struct dx_frame *frames, 
+				 __u32 *start_hash)
+{
+	struct dx_frame *p;
+	struct buffer_head *bh;
+	int err, num_frames = 0;
+	__u32 bhash;
+
+	p = frame;
+	/*
+	 * Find the next leaf page by incrementing the frame pointer.
+	 * If we run out of entries in the interior node, loop around and
+	 * increment pointer in the parent node.  When we break out of
+	 * this loop, num_frames indicates the number of interior
+	 * nodes need to be read.
+	 */
+	while (1) {
+		if (++(p->at) < p->entries + dx_get_count(p->entries))
+			break;
+		if (p == frames)
+			return 0;
+		num_frames++;
+		p--;
+	}
+
+	/*
+	 * If the hash is 1, then continue only if the next page has a
+	 * continuation hash of any value.  This is used for readdir
+	 * handling.  Otherwise, check to see if the hash matches the
+	 * desired contiuation hash.  If it doesn't, return since
+	 * there's no point to read in the successive index pages.
+	 */
+	bhash = dx_get_hash(p->at);
+	if (start_hash)
+		*start_hash = bhash;
+	if ((hash & 1) == 0) {
+		if ((bhash & ~1) != hash)
+			return 0;
+	}
+	/*
+	 * If the hash is HASH_NB_ALWAYS, we always go to the next
+	 * block so no check is necessary
+	 */
+	while (num_frames--) {
+		if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
+				      0, &err)))
+			return err; /* Failure */
+		p++;
+		brelse (p->bh);
+		p->bh = bh;
+		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+	}
+	return 1;
+}
+
+
+/*
+ * p is at least 6 bytes before the end of page
+ */
+static inline struct ext3_dir_entry_2 *ext3_next_entry(struct ext3_dir_entry_2 *p)
+{
+	return (struct ext3_dir_entry_2 *)((char*)p + le16_to_cpu(p->rec_len));
+}
+
+/*
+ * This function fills a red-black tree with information from a
+ * directory block.  It returns the number directory entries loaded
+ * into the tree.  If there is an error it is returned in err.
+ */
+static int htree_dirblock_to_tree(struct file *dir_file,
+				  struct inode *dir, int block,
+				  struct dx_hash_info *hinfo,
+				  __u32 start_hash, __u32 start_minor_hash)
+{
+	struct buffer_head *bh;
+	struct ext3_dir_entry_2 *de, *top;
+	int err, count = 0;
+
+	dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
+	if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
+		return err;
+
+	de = (struct ext3_dir_entry_2 *) bh->b_data;
+	top = (struct ext3_dir_entry_2 *) ((char *) de +
+					   dir->i_sb->s_blocksize -
+					   EXT3_DIR_REC_LEN(0));
+	for (; de < top; de = ext3_next_entry(de)) {
+		ext3fs_dirhash(de->name, de->name_len, hinfo);
+		if ((hinfo->hash < start_hash) ||
+		    ((hinfo->hash == start_hash) &&
+		     (hinfo->minor_hash < start_minor_hash)))
+			continue;
+		if (de->inode == 0)
+			continue;
+		if ((err = ext3_htree_store_dirent(dir_file,
+				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
+			brelse(bh);
+			return err;
+		}
+		count++;
+	}
+	brelse(bh);
+	return count;
+}
+
+
+/*
+ * This function fills a red-black tree with information from a
+ * directory.  We start scanning the directory in hash order, starting
+ * at start_hash and start_minor_hash.
+ *
+ * This function returns the number of entries inserted into the tree,
+ * or a negative error code.
+ */
+int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+			 __u32 start_minor_hash, __u32 *next_hash)
+{
+	struct dx_hash_info hinfo;
+	struct ext3_dir_entry_2 *de;
+	struct dx_frame frames[2], *frame;
+	struct inode *dir;
+	int block, err;
+	int count = 0;
+	int ret;
+	__u32 hashval;
+
+	dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
+		       start_minor_hash));
+	dir = dir_file->f_dentry->d_inode;
+	if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
+		hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
+		hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
+		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
+					       start_hash, start_minor_hash);
+		*next_hash = ~0;
+		return count;
+	}
+	hinfo.hash = start_hash;
+	hinfo.minor_hash = 0;
+	frame = dx_probe(NULL, dir_file->f_dentry->d_inode, &hinfo, frames, &err);
+	if (!frame)
+		return err;
+
+	/* Add '.' and '..' from the htree header */
+	if (!start_hash && !start_minor_hash) {
+		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
+		if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
+			goto errout;
+		count++;
+	}
+	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
+		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
+		de = ext3_next_entry(de);
+		if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
+			goto errout;
+		count++;
+	}
+
+	while (1) {
+		block = dx_get_block(frame->at);
+		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
+					     start_hash, start_minor_hash);
+		if (ret < 0) {
+			err = ret;
+			goto errout;
+		}
+		count += ret;
+		hashval = ~0;
+		ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, 
+					    frame, frames, &hashval);
+		*next_hash = hashval;
+		if (ret < 0) {
+			err = ret;
+			goto errout;
+		}
+		/*
+		 * Stop if:  (a) there are no more entries, or
+		 * (b) we have inserted at least one entry and the
+		 * next hash value is not a continuation
+		 */
+		if ((ret == 0) ||
+		    (count && ((hashval & 1) == 0)))
+			break;
+	}
+	dx_release(frames);
+	dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", 
+		       count, *next_hash));
+	return count;
+errout:
+	dx_release(frames);
+	return (err);
+}
+
+
+/*
+ * Directory block splitting, compacting
+ */
+
+static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
+			struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
+{
+	int count = 0;
+	char *base = (char *) de;
+	struct dx_hash_info h = *hinfo;
+
+	while ((char *) de < base + size)
+	{
+		if (de->name_len && de->inode) {
+			ext3fs_dirhash(de->name, de->name_len, &h);
+			map_tail--;
+			map_tail->hash = h.hash;
+			map_tail->offs = (u32) ((char *) de - base);
+			count++;
+			cond_resched();
+		}
+		/* XXX: do we need to check rec_len == 0 case? -Chris */
+		de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
+	}
+	return count;
+}
+
+static void dx_sort_map (struct dx_map_entry *map, unsigned count)
+{
+        struct dx_map_entry *p, *q, *top = map + count - 1;
+        int more;
+        /* Combsort until bubble sort doesn't suck */
+        while (count > 2)
+	{
+                count = count*10/13;
+                if (count - 9 < 2) /* 9, 10 -> 11 */
+                        count = 11;
+                for (p = top, q = p - count; q >= map; p--, q--)
+                        if (p->hash < q->hash)
+                                swap(*p, *q);
+        }
+        /* Garden variety bubble sort */
+        do {
+                more = 0;
+                q = top;
+                while (q-- > map)
+		{
+                        if (q[1].hash >= q[0].hash)
+				continue;
+                        swap(*(q+1), *q);
+                        more = 1;
+		}
+	} while(more);
+}
+
+static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
+{
+	struct dx_entry *entries = frame->entries;
+	struct dx_entry *old = frame->at, *new = old + 1;
+	int count = dx_get_count(entries);
+
+	assert(count < dx_get_limit(entries));
+	assert(old < entries + count);
+	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
+	dx_set_hash(new, hash);
+	dx_set_block(new, block);
+	dx_set_count(entries, count + 1);
+}
+#endif
+
+
+static void ext3_update_dx_flag(struct inode *inode)
+{
+	if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+				     EXT3_FEATURE_COMPAT_DIR_INDEX))
+		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
+}
+
+/*
+ * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
+ *
+ * `len <= EXT3_NAME_LEN' is guaranteed by caller.
+ * `de != NULL' is guaranteed by caller.
+ */
+static inline int ext3_match (int len, const char * const name,
+			      struct ext3_dir_entry_2 * de)
+{
+	if (len != de->name_len)
+		return 0;
+	if (!de->inode)
+		return 0;
+	return !memcmp(name, de->name, len);
+}
+
+/*
+ * Returns 0 if not found, -1 on failure, and 1 on success
+ */
+static inline int search_dirblock(struct buffer_head * bh,
+				  struct inode *dir,
+				  struct dentry *dentry,
+				  unsigned long offset,
+				  struct ext3_dir_entry_2 ** res_dir)
+{
+	struct ext3_dir_entry_2 * de;
+	char * dlimit;
+	int de_len;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+
+	de = (struct ext3_dir_entry_2 *) bh->b_data;
+	dlimit = bh->b_data + dir->i_sb->s_blocksize;
+	while ((char *) de < dlimit) {
+		/* this code is executed quadratically often */
+		/* do minimal checking `by hand' */
+
+		if ((char *) de + namelen <= dlimit &&
+		    ext3_match (namelen, name, de)) {
+			/* found a match - just to be sure, do a full check */
+			if (!ext3_check_dir_entry("ext3_find_entry",
+						  dir, de, bh, offset))
+				return -1;
+			*res_dir = de;
+			return 1;
+		}
+		/* prevent looping on a bad block */
+		de_len = le16_to_cpu(de->rec_len);
+		if (de_len <= 0)
+			return -1;
+		offset += de_len;
+		de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
+	}
+	return 0;
+}
+
+
+/*
+ *	ext3_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ *
+ * The returned buffer_head has ->b_count elevated.  The caller is expected
+ * to brelse() it when appropriate.
+ */
+static struct buffer_head * ext3_find_entry (struct dentry *dentry,
+					struct ext3_dir_entry_2 ** res_dir)
+{
+	struct super_block * sb;
+	struct buffer_head * bh_use[NAMEI_RA_SIZE];
+	struct buffer_head * bh, *ret = NULL;
+	unsigned long start, block, b;
+	int ra_max = 0;		/* Number of bh's in the readahead
+				   buffer, bh_use[] */
+	int ra_ptr = 0;		/* Current index into readahead
+				   buffer */
+	int num = 0;
+	int nblocks, i, err;
+	struct inode *dir = dentry->d_parent->d_inode;
+	int namelen;
+	const u8 *name;
+	unsigned blocksize;
+
+	*res_dir = NULL;
+	sb = dir->i_sb;
+	blocksize = sb->s_blocksize;
+	namelen = dentry->d_name.len;
+	name = dentry->d_name.name;
+	if (namelen > EXT3_NAME_LEN)
+		return NULL;
+#ifdef CONFIG_EXT3_INDEX
+	if (is_dx(dir)) {
+		bh = ext3_dx_find_entry(dentry, res_dir, &err);
+		/*
+		 * On success, or if the error was file not found,
+		 * return.  Otherwise, fall back to doing a search the
+		 * old fashioned way.
+		 */
+		if (bh || (err != ERR_BAD_DX_DIR))
+			return bh;
+		dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
+	}
+#endif
+	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+	start = EXT3_I(dir)->i_dir_start_lookup;
+	if (start >= nblocks)
+		start = 0;
+	block = start;
+restart:
+	do {
+		/*
+		 * We deal with the read-ahead logic here.
+		 */
+		if (ra_ptr >= ra_max) {
+			/* Refill the readahead buffer */
+			ra_ptr = 0;
+			b = block;
+			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
+				/*
+				 * Terminate if we reach the end of the
+				 * directory and must wrap, or if our
+				 * search has finished at this block.
+				 */
+				if (b >= nblocks || (num && block == start)) {
+					bh_use[ra_max] = NULL;
+					break;
+				}
+				num++;
+				bh = ext3_getblk(NULL, dir, b++, 0, &err);
+				bh_use[ra_max] = bh;
+				if (bh)
+					ll_rw_block(READ, 1, &bh);
+			}
+		}
+		if ((bh = bh_use[ra_ptr++]) == NULL)
+			goto next;
+		wait_on_buffer(bh);
+		if (!buffer_uptodate(bh)) {
+			/* read error, skip block & hope for the best */
+			ext3_error(sb, __FUNCTION__, "reading directory #%lu "
+				   "offset %lu", dir->i_ino, block);
+			brelse(bh);
+			goto next;
+		}
+		i = search_dirblock(bh, dir, dentry,
+			    block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
+		if (i == 1) {
+			EXT3_I(dir)->i_dir_start_lookup = block;
+			ret = bh;
+			goto cleanup_and_exit;
+		} else {
+			brelse(bh);
+			if (i < 0)
+				goto cleanup_and_exit;
+		}
+	next:
+		if (++block >= nblocks)
+			block = 0;
+	} while (block != start);
+
+	/*
+	 * If the directory has grown while we were searching, then
+	 * search the last part of the directory before giving up.
+	 */
+	block = nblocks;
+	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+	if (block < nblocks) {
+		start = 0;
+		goto restart;
+	}
+
+cleanup_and_exit:
+	/* Clean up the read-ahead blocks */
+	for (; ra_ptr < ra_max; ra_ptr++)
+		brelse (bh_use[ra_ptr]);
+	return ret;
+}
+
+#ifdef CONFIG_EXT3_INDEX
+static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
+		       struct ext3_dir_entry_2 **res_dir, int *err)
+{
+	struct super_block * sb;
+	struct dx_hash_info	hinfo;
+	u32 hash;
+	struct dx_frame frames[2], *frame;
+	struct ext3_dir_entry_2 *de, *top;
+	struct buffer_head *bh;
+	unsigned long block;
+	int retval;
+	int namelen = dentry->d_name.len;
+	const u8 *name = dentry->d_name.name;
+	struct inode *dir = dentry->d_parent->d_inode;
+
+	sb = dir->i_sb;
+	if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err)))
+		return NULL;
+	hash = hinfo.hash;
+	do {
+		block = dx_get_block(frame->at);
+		if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
+			goto errout;
+		de = (struct ext3_dir_entry_2 *) bh->b_data;
+		top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
+				       EXT3_DIR_REC_LEN(0));
+		for (; de < top; de = ext3_next_entry(de))
+		if (ext3_match (namelen, name, de)) {
+			if (!ext3_check_dir_entry("ext3_find_entry",
+						  dir, de, bh,
+				  (block<<EXT3_BLOCK_SIZE_BITS(sb))
+					  +((char *)de - bh->b_data))) {
+				brelse (bh);
+				goto errout;
+			}
+			*res_dir = de;
+			dx_release (frames);
+			return bh;
+		}
+		brelse (bh);
+		/* Check to see if we should continue to search */
+		retval = ext3_htree_next_block(dir, hash, frame,
+					       frames, NULL);
+		if (retval < 0) {
+			ext3_warning(sb, __FUNCTION__,
+			     "error reading index page in directory #%lu",
+			     dir->i_ino);
+			*err = retval;
+			goto errout;
+		}
+	} while (retval == 1);
+
+	*err = -ENOENT;
+errout:
+	dxtrace(printk("%s not found\n", name));
+	dx_release (frames);
+	return NULL;
+}
+#endif
+
+static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode * inode;
+	struct ext3_dir_entry_2 * de;
+	struct buffer_head * bh;
+
+	if (dentry->d_name.len > EXT3_NAME_LEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	bh = ext3_find_entry(dentry, &de);
+	inode = NULL;
+	if (bh) {
+		unsigned long ino = le32_to_cpu(de->inode);
+		brelse (bh);
+		inode = iget(dir->i_sb, ino);
+
+		if (!inode)
+			return ERR_PTR(-EACCES);
+	}
+	if (inode)
+		return d_splice_alias(inode, dentry);
+	d_add(dentry, inode);
+	return NULL;
+}
+
+
+struct dentry *ext3_get_parent(struct dentry *child)
+{
+	unsigned long ino;
+	struct dentry *parent;
+	struct inode *inode;
+	struct dentry dotdot;
+	struct ext3_dir_entry_2 * de;
+	struct buffer_head *bh;
+
+	dotdot.d_name.name = "..";
+	dotdot.d_name.len = 2;
+	dotdot.d_parent = child; /* confusing, isn't it! */
+
+	bh = ext3_find_entry(&dotdot, &de);
+	inode = NULL;
+	if (!bh)
+		return ERR_PTR(-ENOENT);
+	ino = le32_to_cpu(de->inode);
+	brelse(bh);
+	inode = iget(child->d_inode->i_sb, ino);
+
+	if (!inode)
+		return ERR_PTR(-EACCES);
+
+	parent = d_alloc_anon(inode);
+	if (!parent) {
+		iput(inode);
+		parent = ERR_PTR(-ENOMEM);
+	}
+	return parent;
+} 
+
+#define S_SHIFT 12
+static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
+	[S_IFREG >> S_SHIFT]	= EXT3_FT_REG_FILE,
+	[S_IFDIR >> S_SHIFT]	= EXT3_FT_DIR,
+	[S_IFCHR >> S_SHIFT]	= EXT3_FT_CHRDEV,
+	[S_IFBLK >> S_SHIFT]	= EXT3_FT_BLKDEV,
+	[S_IFIFO >> S_SHIFT]	= EXT3_FT_FIFO,
+	[S_IFSOCK >> S_SHIFT]	= EXT3_FT_SOCK,
+	[S_IFLNK >> S_SHIFT]	= EXT3_FT_SYMLINK,
+};
+
+static inline void ext3_set_de_type(struct super_block *sb,
+				struct ext3_dir_entry_2 *de,
+				umode_t mode) {
+	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
+		de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+}
+
+#ifdef CONFIG_EXT3_INDEX
+static struct ext3_dir_entry_2 *
+dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
+{
+	unsigned rec_len = 0;
+
+	while (count--) {
+		struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
+		rec_len = EXT3_DIR_REC_LEN(de->name_len);
+		memcpy (to, de, rec_len);
+		((struct ext3_dir_entry_2 *) to)->rec_len =
+				cpu_to_le16(rec_len);
+		de->inode = 0;
+		map++;
+		to += rec_len;
+	}
+	return (struct ext3_dir_entry_2 *) (to - rec_len);
+}
+
+static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
+{
+	struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
+	unsigned rec_len = 0;
+
+	prev = to = de;
+	while ((char*)de < base + size) {
+		next = (struct ext3_dir_entry_2 *) ((char *) de +
+						    le16_to_cpu(de->rec_len));
+		if (de->inode && de->name_len) {
+			rec_len = EXT3_DIR_REC_LEN(de->name_len);
+			if (de > to)
+				memmove(to, de, rec_len);
+			to->rec_len = cpu_to_le16(rec_len);
+			prev = to;
+			to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
+		}
+		de = next;
+	}
+	return prev;
+}
+
+static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+			struct buffer_head **bh,struct dx_frame *frame,
+			struct dx_hash_info *hinfo, int *error)
+{
+	unsigned blocksize = dir->i_sb->s_blocksize;
+	unsigned count, continued;
+	struct buffer_head *bh2;
+	u32 newblock;
+	u32 hash2;
+	struct dx_map_entry *map;
+	char *data1 = (*bh)->b_data, *data2;
+	unsigned split;
+	struct ext3_dir_entry_2 *de = NULL, *de2;
+	int	err;
+
+	bh2 = ext3_append (handle, dir, &newblock, error);
+	if (!(bh2)) {
+		brelse(*bh);
+		*bh = NULL;
+		goto errout;
+	}
+
+	BUFFER_TRACE(*bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, *bh);
+	if (err) {
+	journal_error:
+		brelse(*bh);
+		brelse(bh2);
+		*bh = NULL;
+		ext3_std_error(dir->i_sb, err);
+		goto errout;
+	}
+	BUFFER_TRACE(frame->bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, frame->bh);
+	if (err)
+		goto journal_error;
+
+	data2 = bh2->b_data;
+
+	/* create map in the end of data2 block */
+	map = (struct dx_map_entry *) (data2 + blocksize);
+	count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
+			     blocksize, hinfo, map);
+	map -= count;
+	split = count/2; // need to adjust to actual middle
+	dx_sort_map (map, count);
+	hash2 = map[split].hash;
+	continued = hash2 == map[split - 1].hash;
+	dxtrace(printk("Split block %i at %x, %i/%i\n",
+		dx_get_block(frame->at), hash2, split, count-split));
+
+	/* Fancy dance to stay within two buffers */
+	de2 = dx_move_dirents(data1, data2, map + split, count - split);
+	de = dx_pack_dirents(data1,blocksize);
+	de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
+	de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2);
+	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
+	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
+
+	/* Which block gets the new entry? */
+	if (hinfo->hash >= hash2)
+	{
+		swap(*bh, bh2);
+		de = de2;
+	}
+	dx_insert_block (frame, hash2 + continued, newblock);
+	err = ext3_journal_dirty_metadata (handle, bh2);
+	if (err)
+		goto journal_error;
+	err = ext3_journal_dirty_metadata (handle, frame->bh);
+	if (err)
+		goto journal_error;
+	brelse (bh2);
+	dxtrace(dx_show_index ("frame", frame->entries));
+errout:
+	return de;
+}
+#endif
+
+
+/*
+ * Add a new entry into a directory (leaf) block.  If de is non-NULL,
+ * it points to a directory entry which is guaranteed to be large
+ * enough for new directory entry.  If de is NULL, then
+ * add_dirent_to_buf will attempt search the directory block for
+ * space.  It will return -ENOSPC if no space is available, and -EIO
+ * and -EEXIST if directory entry already exists.
+ * 
+ * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
+ * all other cases bh is released.
+ */
+static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+			     struct inode *inode, struct ext3_dir_entry_2 *de,
+			     struct buffer_head * bh)
+{
+	struct inode	*dir = dentry->d_parent->d_inode;
+	const char	*name = dentry->d_name.name;
+	int		namelen = dentry->d_name.len;
+	unsigned long	offset = 0;
+	unsigned short	reclen;
+	int		nlen, rlen, err;
+	char		*top;
+
+	reclen = EXT3_DIR_REC_LEN(namelen);
+	if (!de) {
+		de = (struct ext3_dir_entry_2 *)bh->b_data;
+		top = bh->b_data + dir->i_sb->s_blocksize - reclen;
+		while ((char *) de <= top) {
+			if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
+						  bh, offset)) {
+				brelse (bh);
+				return -EIO;
+			}
+			if (ext3_match (namelen, name, de)) {
+				brelse (bh);
+				return -EEXIST;
+			}
+			nlen = EXT3_DIR_REC_LEN(de->name_len);
+			rlen = le16_to_cpu(de->rec_len);
+			if ((de->inode? rlen - nlen: rlen) >= reclen)
+				break;
+			de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
+			offset += rlen;
+		}
+		if ((char *) de > top)
+			return -ENOSPC;
+	}
+	BUFFER_TRACE(bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, bh);
+	if (err) {
+		ext3_std_error(dir->i_sb, err);
+		brelse(bh);
+		return err;
+	}
+
+	/* By now the buffer is marked for journaling */
+	nlen = EXT3_DIR_REC_LEN(de->name_len);
+	rlen = le16_to_cpu(de->rec_len);
+	if (de->inode) {
+		struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
+		de1->rec_len = cpu_to_le16(rlen - nlen);
+		de->rec_len = cpu_to_le16(nlen);
+		de = de1;
+	}
+	de->file_type = EXT3_FT_UNKNOWN;
+	if (inode) {
+		de->inode = cpu_to_le32(inode->i_ino);
+		ext3_set_de_type(dir->i_sb, de, inode->i_mode);
+	} else
+		de->inode = 0;
+	de->name_len = namelen;
+	memcpy (de->name, name, namelen);
+	/*
+	 * XXX shouldn't update any times until successful
+	 * completion of syscall, but too many callers depend
+	 * on this.
+	 *
+	 * XXX similarly, too many callers depend on
+	 * ext3_new_inode() setting the times, but error
+	 * recovery deletes the inode, so the worst that can
+	 * happen is that the times are slightly out of date
+	 * and/or different from the directory change time.
+	 */
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	ext3_update_dx_flag(dir);
+	dir->i_version++;
+	ext3_mark_inode_dirty(handle, dir);
+	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+	err = ext3_journal_dirty_metadata(handle, bh);
+	if (err)
+		ext3_std_error(dir->i_sb, err);
+	brelse(bh);
+	return 0;
+}
+
+#ifdef CONFIG_EXT3_INDEX
+/*
+ * This converts a one block unindexed directory to a 3 block indexed
+ * directory, and adds the dentry to the indexed directory.
+ */
+static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+			    struct inode *inode, struct buffer_head *bh)
+{
+	struct inode	*dir = dentry->d_parent->d_inode;
+	const char	*name = dentry->d_name.name;
+	int		namelen = dentry->d_name.len;
+	struct buffer_head *bh2;
+	struct dx_root	*root;
+	struct dx_frame	frames[2], *frame;
+	struct dx_entry *entries;
+	struct ext3_dir_entry_2	*de, *de2;
+	char		*data1, *top;
+	unsigned	len;
+	int		retval;
+	unsigned	blocksize;
+	struct dx_hash_info hinfo;
+	u32		block;
+	struct fake_dirent *fde;
+
+	blocksize =  dir->i_sb->s_blocksize;
+	dxtrace(printk("Creating index\n"));
+	retval = ext3_journal_get_write_access(handle, bh);
+	if (retval) {
+		ext3_std_error(dir->i_sb, retval);
+		brelse(bh);
+		return retval;
+	}
+	root = (struct dx_root *) bh->b_data;
+
+	bh2 = ext3_append (handle, dir, &block, &retval);
+	if (!(bh2)) {
+		brelse(bh);
+		return retval;
+	}
+	EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
+	data1 = bh2->b_data;
+
+	/* The 0th block becomes the root, move the dirents out */
+	fde = &root->dotdot;
+	de = (struct ext3_dir_entry_2 *)((char *)fde + le16_to_cpu(fde->rec_len));
+	len = ((char *) root) + blocksize - (char *) de;
+	memcpy (data1, de, len);
+	de = (struct ext3_dir_entry_2 *) data1;
+	top = data1 + len;
+	while ((char *)(de2=(void*)de+le16_to_cpu(de->rec_len)) < top)
+		de = de2;
+	de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
+	/* Initialize the root; the dot dirents already exist */
+	de = (struct ext3_dir_entry_2 *) (&root->dotdot);
+	de->rec_len = cpu_to_le16(blocksize - EXT3_DIR_REC_LEN(2));
+	memset (&root->info, 0, sizeof(root->info));
+	root->info.info_length = sizeof(root->info);
+	root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
+	entries = root->entries;
+	dx_set_block (entries, 1);
+	dx_set_count (entries, 1);
+	dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
+
+	/* Initialize as for dx_probe */
+	hinfo.hash_version = root->info.hash_version;
+	hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
+	ext3fs_dirhash(name, namelen, &hinfo);
+	frame = frames;
+	frame->entries = entries;
+	frame->at = entries;
+	frame->bh = bh;
+	bh = bh2;
+	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+	dx_release (frames);
+	if (!(de))
+		return retval;
+
+	return add_dirent_to_buf(handle, dentry, inode, de, bh);
+}
+#endif
+
+/*
+ *	ext3_add_entry()
+ *
+ * adds a file entry to the specified directory, using the same
+ * semantics as ext3_find_entry(). It returns NULL if it failed.
+ *
+ * NOTE!! The inode part of 'de' is left at 0 - which means you
+ * may not sleep between calling this and putting something into
+ * the entry, as someone else might have used it while you slept.
+ */
+static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
+	struct inode *inode)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	unsigned long offset;
+	struct buffer_head * bh;
+	struct ext3_dir_entry_2 *de;
+	struct super_block * sb;
+	int	retval;
+#ifdef CONFIG_EXT3_INDEX
+	int	dx_fallback=0;
+#endif
+	unsigned blocksize;
+	unsigned nlen, rlen;
+	u32 block, blocks;
+
+	sb = dir->i_sb;
+	blocksize = sb->s_blocksize;
+	if (!dentry->d_name.len)
+		return -EINVAL;
+#ifdef CONFIG_EXT3_INDEX
+	if (is_dx(dir)) {
+		retval = ext3_dx_add_entry(handle, dentry, inode);
+		if (!retval || (retval != ERR_BAD_DX_DIR))
+			return retval;
+		EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
+		dx_fallback++;
+		ext3_mark_inode_dirty(handle, dir);
+	}
+#endif
+	blocks = dir->i_size >> sb->s_blocksize_bits;
+	for (block = 0, offset = 0; block < blocks; block++) {
+		bh = ext3_bread(handle, dir, block, 0, &retval);
+		if(!bh)
+			return retval;
+		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+		if (retval != -ENOSPC)
+			return retval;
+
+#ifdef CONFIG_EXT3_INDEX
+		if (blocks == 1 && !dx_fallback &&
+		    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
+			return make_indexed_dir(handle, dentry, inode, bh);
+#endif
+		brelse(bh);
+	}
+	bh = ext3_append(handle, dir, &block, &retval);
+	if (!bh)
+		return retval;
+	de = (struct ext3_dir_entry_2 *) bh->b_data;
+	de->inode = 0;
+	de->rec_len = cpu_to_le16(rlen = blocksize);
+	nlen = 0;
+	return add_dirent_to_buf(handle, dentry, inode, de, bh);
+}
+
+#ifdef CONFIG_EXT3_INDEX
+/*
+ * Returns 0 for success, or a negative error value
+ */
+static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
+			     struct inode *inode)
+{
+	struct dx_frame frames[2], *frame;
+	struct dx_entry *entries, *at;
+	struct dx_hash_info hinfo;
+	struct buffer_head * bh;
+	struct inode *dir = dentry->d_parent->d_inode;
+	struct super_block * sb = dir->i_sb;
+	struct ext3_dir_entry_2 *de;
+	int err;
+
+	frame = dx_probe(dentry, NULL, &hinfo, frames, &err);
+	if (!frame)
+		return err;
+	entries = frame->entries;
+	at = frame->at;
+
+	if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
+		goto cleanup;
+
+	BUFFER_TRACE(bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, bh);
+	if (err)
+		goto journal_error;
+
+	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+	if (err != -ENOSPC) {
+		bh = NULL;
+		goto cleanup;
+	}
+
+	/* Block full, should compress but for now just split */
+	dxtrace(printk("using %u of %u node entries\n",
+		       dx_get_count(entries), dx_get_limit(entries)));
+	/* Need to split index? */
+	if (dx_get_count(entries) == dx_get_limit(entries)) {
+		u32 newblock;
+		unsigned icount = dx_get_count(entries);
+		int levels = frame - frames;
+		struct dx_entry *entries2;
+		struct dx_node *node2;
+		struct buffer_head *bh2;
+
+		if (levels && (dx_get_count(frames->entries) ==
+			       dx_get_limit(frames->entries))) {
+			ext3_warning(sb, __FUNCTION__,
+				     "Directory index full!\n");
+			err = -ENOSPC;
+			goto cleanup;
+		}
+		bh2 = ext3_append (handle, dir, &newblock, &err);
+		if (!(bh2))
+			goto cleanup;
+		node2 = (struct dx_node *)(bh2->b_data);
+		entries2 = node2->entries;
+		node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
+		node2->fake.inode = 0;
+		BUFFER_TRACE(frame->bh, "get_write_access");
+		err = ext3_journal_get_write_access(handle, frame->bh);
+		if (err)
+			goto journal_error;
+		if (levels) {
+			unsigned icount1 = icount/2, icount2 = icount - icount1;
+			unsigned hash2 = dx_get_hash(entries + icount1);
+			dxtrace(printk("Split index %i/%i\n", icount1, icount2));
+
+			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
+			err = ext3_journal_get_write_access(handle,
+							     frames[0].bh);
+			if (err)
+				goto journal_error;
+
+			memcpy ((char *) entries2, (char *) (entries + icount1),
+				icount2 * sizeof(struct dx_entry));
+			dx_set_count (entries, icount1);
+			dx_set_count (entries2, icount2);
+			dx_set_limit (entries2, dx_node_limit(dir));
+
+			/* Which index block gets the new entry? */
+			if (at - entries >= icount1) {
+				frame->at = at = at - entries - icount1 + entries2;
+				frame->entries = entries = entries2;
+				swap(frame->bh, bh2);
+			}
+			dx_insert_block (frames + 0, hash2, newblock);
+			dxtrace(dx_show_index ("node", frames[1].entries));
+			dxtrace(dx_show_index ("node",
+			       ((struct dx_node *) bh2->b_data)->entries));
+			err = ext3_journal_dirty_metadata(handle, bh2);
+			if (err)
+				goto journal_error;
+			brelse (bh2);
+		} else {
+			dxtrace(printk("Creating second level index...\n"));
+			memcpy((char *) entries2, (char *) entries,
+			       icount * sizeof(struct dx_entry));
+			dx_set_limit(entries2, dx_node_limit(dir));
+
+			/* Set up root */
+			dx_set_count(entries, 1);
+			dx_set_block(entries + 0, newblock);
+			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
+
+			/* Add new access path frame */
+			frame = frames + 1;
+			frame->at = at = at - entries + entries2;
+			frame->entries = entries = entries2;
+			frame->bh = bh2;
+			err = ext3_journal_get_write_access(handle,
+							     frame->bh);
+			if (err)
+				goto journal_error;
+		}
+		ext3_journal_dirty_metadata(handle, frames[0].bh);
+	}
+	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
+	if (!de)
+		goto cleanup;
+	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+	bh = NULL;
+	goto cleanup;
+
+journal_error:
+	ext3_std_error(dir->i_sb, err);
+cleanup:
+	if (bh)
+		brelse(bh);
+	dx_release(frames);
+	return err;
+}
+#endif
+
+/*
+ * ext3_delete_entry deletes a directory entry by merging it with the
+ * previous entry
+ */
+static int ext3_delete_entry (handle_t *handle, 
+			      struct inode * dir,
+			      struct ext3_dir_entry_2 * de_del,
+			      struct buffer_head * bh)
+{
+	struct ext3_dir_entry_2 * de, * pde;
+	int i;
+
+	i = 0;
+	pde = NULL;
+	de = (struct ext3_dir_entry_2 *) bh->b_data;
+	while (i < bh->b_size) {
+		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
+			return -EIO;
+		if (de == de_del)  {
+			BUFFER_TRACE(bh, "get_write_access");
+			ext3_journal_get_write_access(handle, bh);
+			if (pde)
+				pde->rec_len =
+					cpu_to_le16(le16_to_cpu(pde->rec_len) +
+						    le16_to_cpu(de->rec_len));
+			else
+				de->inode = 0;
+			dir->i_version++;
+			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+			ext3_journal_dirty_metadata(handle, bh);
+			return 0;
+		}
+		i += le16_to_cpu(de->rec_len);
+		pde = de;
+		de = (struct ext3_dir_entry_2 *)
+			((char *) de + le16_to_cpu(de->rec_len));
+	}
+	return -ENOENT;
+}
+
+/*
+ * ext3_mark_inode_dirty is somewhat expensive, so unlike ext2 we
+ * do not perform it in these functions.  We perform it at the call site,
+ * if it is needed.
+ */
+static inline void ext3_inc_count(handle_t *handle, struct inode *inode)
+{
+	inode->i_nlink++;
+}
+
+static inline void ext3_dec_count(handle_t *handle, struct inode *inode)
+{
+	inode->i_nlink--;
+}
+
+static int ext3_add_nondir(handle_t *handle,
+		struct dentry *dentry, struct inode *inode)
+{
+	int err = ext3_add_entry(handle, dentry, inode);
+	if (!err) {
+		ext3_mark_inode_dirty(handle, inode);
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	ext3_dec_count(handle, inode);
+	iput(inode);
+	return err;
+}
+
+/*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate(). 
+ */
+static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
+		struct nameidata *nd)
+{
+	handle_t *handle; 
+	struct inode * inode;
+	int err, retries = 0;
+
+retry:
+	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
+					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+					2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode = ext3_new_inode (handle, dir, mode);
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		inode->i_op = &ext3_file_inode_operations;
+		inode->i_fop = &ext3_file_operations;
+		ext3_set_aops(inode);
+		err = ext3_add_nondir(handle, dentry, inode);
+	}
+	ext3_journal_stop(handle);
+	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+}
+
+static int ext3_mknod (struct inode * dir, struct dentry *dentry,
+			int mode, dev_t rdev)
+{
+	handle_t *handle;
+	struct inode *inode;
+	int err, retries = 0;
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+retry:
+	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
+			 		EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+					2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode = ext3_new_inode (handle, dir, mode);
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		init_special_inode(inode, inode->i_mode, rdev);
+#ifdef CONFIG_EXT3_FS_XATTR
+		inode->i_op = &ext3_special_inode_operations;
+#endif
+		err = ext3_add_nondir(handle, dentry, inode);
+	}
+	ext3_journal_stop(handle);
+	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+}
+
+static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	handle_t *handle;
+	struct inode * inode;
+	struct buffer_head * dir_block;
+	struct ext3_dir_entry_2 * de;
+	int err, retries = 0;
+
+	if (dir->i_nlink >= EXT3_LINK_MAX)
+		return -EMLINK;
+
+retry:
+	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
+					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+					2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_stop;
+
+	inode->i_op = &ext3_dir_inode_operations;
+	inode->i_fop = &ext3_dir_operations;
+	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+	dir_block = ext3_bread (handle, inode, 0, 1, &err);
+	if (!dir_block) {
+		inode->i_nlink--; /* is this nlink == 0? */
+		ext3_mark_inode_dirty(handle, inode);
+		iput (inode);
+		goto out_stop;
+	}
+	BUFFER_TRACE(dir_block, "get_write_access");
+	ext3_journal_get_write_access(handle, dir_block);
+	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
+	de->inode = cpu_to_le32(inode->i_ino);
+	de->name_len = 1;
+	de->rec_len = cpu_to_le16(EXT3_DIR_REC_LEN(de->name_len));
+	strcpy (de->name, ".");
+	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
+	de = (struct ext3_dir_entry_2 *)
+			((char *) de + le16_to_cpu(de->rec_len));
+	de->inode = cpu_to_le32(dir->i_ino);
+	de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize-EXT3_DIR_REC_LEN(1));
+	de->name_len = 2;
+	strcpy (de->name, "..");
+	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
+	inode->i_nlink = 2;
+	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
+	ext3_journal_dirty_metadata(handle, dir_block);
+	brelse (dir_block);
+	ext3_mark_inode_dirty(handle, inode);
+	err = ext3_add_entry (handle, dentry, inode);
+	if (err) {
+		inode->i_nlink = 0;
+		ext3_mark_inode_dirty(handle, inode);
+		iput (inode);
+		goto out_stop;
+	}
+	dir->i_nlink++;
+	ext3_update_dx_flag(dir);
+	ext3_mark_inode_dirty(handle, dir);
+	d_instantiate(dentry, inode);
+out_stop:
+	ext3_journal_stop(handle);
+	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+static int empty_dir (struct inode * inode)
+{
+	unsigned long offset;
+	struct buffer_head * bh;
+	struct ext3_dir_entry_2 * de, * de1;
+	struct super_block * sb;
+	int err = 0;
+
+	sb = inode->i_sb;
+	if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
+	    !(bh = ext3_bread (NULL, inode, 0, 0, &err))) {
+		if (err)
+			ext3_error(inode->i_sb, __FUNCTION__,
+				   "error %d reading directory #%lu offset 0",
+				   err, inode->i_ino);
+		else
+			ext3_warning(inode->i_sb, __FUNCTION__,
+				     "bad directory (dir #%lu) - no data block",
+				     inode->i_ino);
+		return 1;
+	}
+	de = (struct ext3_dir_entry_2 *) bh->b_data;
+	de1 = (struct ext3_dir_entry_2 *)
+			((char *) de + le16_to_cpu(de->rec_len));
+	if (le32_to_cpu(de->inode) != inode->i_ino ||
+			!le32_to_cpu(de1->inode) || 
+			strcmp (".", de->name) ||
+			strcmp ("..", de1->name)) {
+	    	ext3_warning (inode->i_sb, "empty_dir",
+			      "bad directory (dir #%lu) - no `.' or `..'",
+			      inode->i_ino);
+		brelse (bh);
+		return 1;
+	}
+	offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
+	de = (struct ext3_dir_entry_2 *)
+			((char *) de1 + le16_to_cpu(de1->rec_len));
+	while (offset < inode->i_size ) {
+		if (!bh ||
+			(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+			err = 0;
+			brelse (bh);
+			bh = ext3_bread (NULL, inode,
+				offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err);
+			if (!bh) {
+				if (err)
+					ext3_error(sb, __FUNCTION__,
+						   "error %d reading directory"
+						   " #%lu offset %lu",
+						   err, inode->i_ino, offset);
+				offset += sb->s_blocksize;
+				continue;
+			}
+			de = (struct ext3_dir_entry_2 *) bh->b_data;
+		}
+		if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) {
+			de = (struct ext3_dir_entry_2 *)(bh->b_data +
+							 sb->s_blocksize);
+			offset = (offset | (sb->s_blocksize - 1)) + 1;
+			continue;
+		}
+		if (le32_to_cpu(de->inode)) {
+			brelse (bh);
+			return 0;
+		}
+		offset += le16_to_cpu(de->rec_len);
+		de = (struct ext3_dir_entry_2 *)
+				((char *) de + le16_to_cpu(de->rec_len));
+	}
+	brelse (bh);
+	return 1;
+}
+
+/* ext3_orphan_add() links an unlinked or truncated inode into a list of
+ * such inodes, starting at the superblock, in case we crash before the
+ * file is closed/deleted, or in case the inode truncate spans multiple
+ * transactions and the last transaction is not recovered after a crash.
+ *
+ * At filesystem recovery time, we walk this list deleting unlinked
+ * inodes and truncating linked inodes in ext3_orphan_cleanup().
+ */
+int ext3_orphan_add(handle_t *handle, struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct ext3_iloc iloc;
+	int err = 0, rc;
+
+	lock_super(sb);
+	if (!list_empty(&EXT3_I(inode)->i_orphan))
+		goto out_unlock;
+
+	/* Orphan handling is only valid for files with data blocks
+	 * being truncated, or files being unlinked. */
+
+	/* @@@ FIXME: Observation from aviro:
+	 * I think I can trigger J_ASSERT in ext3_orphan_add().  We block 
+	 * here (on lock_super()), so race with ext3_link() which might bump
+	 * ->i_nlink. For, say it, character device. Not a regular file,
+	 * not a directory, not a symlink and ->i_nlink > 0.
+	 */
+	J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+		S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
+
+	BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
+	if (err)
+		goto out_unlock;
+
+	err = ext3_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto out_unlock;
+
+	/* Insert this inode at the head of the on-disk orphan list... */
+	NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
+	EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
+	if (!err)
+		err = rc;
+
+	/* Only add to the head of the in-memory list if all the
+	 * previous operations succeeded.  If the orphan_add is going to
+	 * fail (possibly taking the journal offline), we can't risk
+	 * leaving the inode on the orphan list: stray orphan-list
+	 * entries can cause panics at unmount time.
+	 *
+	 * This is safe: on error we're going to ignore the orphan list
+	 * anyway on the next recovery. */
+	if (!err)
+		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
+
+	jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
+	jbd_debug(4, "orphan inode %ld will point to %d\n",
+			inode->i_ino, NEXT_ORPHAN(inode));
+out_unlock:
+	unlock_super(sb);
+	ext3_std_error(inode->i_sb, err);
+	return err;
+}
+
+/*
+ * ext3_orphan_del() removes an unlinked or truncated inode from the list
+ * of such inodes stored on disk, because it is finally being cleaned up.
+ */
+int ext3_orphan_del(handle_t *handle, struct inode *inode)
+{
+	struct list_head *prev;
+	struct ext3_inode_info *ei = EXT3_I(inode);
+	struct ext3_sb_info *sbi;
+	unsigned long ino_next;
+	struct ext3_iloc iloc;
+	int err = 0;
+
+	lock_super(inode->i_sb);
+	if (list_empty(&ei->i_orphan)) {
+		unlock_super(inode->i_sb);
+		return 0;
+	}
+
+	ino_next = NEXT_ORPHAN(inode);
+	prev = ei->i_orphan.prev;
+	sbi = EXT3_SB(inode->i_sb);
+
+	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+
+	list_del_init(&ei->i_orphan);
+
+	/* If we're on an error path, we may not have a valid
+	 * transaction handle with which to update the orphan list on
+	 * disk, but we still need to remove the inode from the linked
+	 * list in memory. */
+	if (!handle)
+		goto out;
+
+	err = ext3_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto out_err;
+
+	if (prev == &sbi->s_orphan) {
+		jbd_debug(4, "superblock will point to %lu\n", ino_next);
+		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
+		err = ext3_journal_get_write_access(handle, sbi->s_sbh);
+		if (err)
+			goto out_brelse;
+		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+		err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+	} else {
+		struct ext3_iloc iloc2;
+		struct inode *i_prev =
+			&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
+
+		jbd_debug(4, "orphan inode %lu will point to %lu\n",
+			  i_prev->i_ino, ino_next);
+		err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
+		if (err)
+			goto out_brelse;
+		NEXT_ORPHAN(i_prev) = ino_next;
+		err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2);
+	}
+	if (err)
+		goto out_brelse;
+	NEXT_ORPHAN(inode) = 0;
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+
+out_err:
+	ext3_std_error(inode->i_sb, err);
+out:
+	unlock_super(inode->i_sb);
+	return err;
+
+out_brelse:
+	brelse(iloc.bh);
+	goto out_err;
+}
+
+static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
+{
+	int retval;
+	struct inode * inode;
+	struct buffer_head * bh;
+	struct ext3_dir_entry_2 * de;
+	handle_t *handle;
+
+	/* Initialize quotas before so that eventual writes go in
+	 * separate transaction */
+	DQUOT_INIT(dentry->d_inode);
+	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	retval = -ENOENT;
+	bh = ext3_find_entry (dentry, &de);
+	if (!bh)
+		goto end_rmdir;
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode = dentry->d_inode;
+
+	retval = -EIO;
+	if (le32_to_cpu(de->inode) != inode->i_ino)
+		goto end_rmdir;
+
+	retval = -ENOTEMPTY;
+	if (!empty_dir (inode))
+		goto end_rmdir;
+
+	retval = ext3_delete_entry(handle, dir, de, bh);
+	if (retval)
+		goto end_rmdir;
+	if (inode->i_nlink != 2)
+		ext3_warning (inode->i_sb, "ext3_rmdir",
+			      "empty directory has nlink!=2 (%d)",
+			      inode->i_nlink);
+	inode->i_version++;
+	inode->i_nlink = 0;
+	/* There's no need to set i_disksize: the fact that i_nlink is
+	 * zero will ensure that the right thing happens during any
+	 * recovery. */
+	inode->i_size = 0;
+	ext3_orphan_add(handle, inode);
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	ext3_mark_inode_dirty(handle, inode);
+	dir->i_nlink--;
+	ext3_update_dx_flag(dir);
+	ext3_mark_inode_dirty(handle, dir);
+
+end_rmdir:
+	ext3_journal_stop(handle);
+	brelse (bh);
+	return retval;
+}
+
+static int ext3_unlink(struct inode * dir, struct dentry *dentry)
+{
+	int retval;
+	struct inode * inode;
+	struct buffer_head * bh;
+	struct ext3_dir_entry_2 * de;
+	handle_t *handle;
+
+	/* Initialize quotas before so that eventual writes go
+	 * in separate transaction */
+	DQUOT_INIT(dentry->d_inode);
+	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	retval = -ENOENT;
+	bh = ext3_find_entry (dentry, &de);
+	if (!bh)
+		goto end_unlink;
+
+	inode = dentry->d_inode;
+
+	retval = -EIO;
+	if (le32_to_cpu(de->inode) != inode->i_ino)
+		goto end_unlink;
+
+	if (!inode->i_nlink) {
+		ext3_warning (inode->i_sb, "ext3_unlink",
+			      "Deleting nonexistent file (%lu), %d",
+			      inode->i_ino, inode->i_nlink);
+		inode->i_nlink = 1;
+	}
+	retval = ext3_delete_entry(handle, dir, de, bh);
+	if (retval)
+		goto end_unlink;
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	ext3_update_dx_flag(dir);
+	ext3_mark_inode_dirty(handle, dir);
+	inode->i_nlink--;
+	if (!inode->i_nlink)
+		ext3_orphan_add(handle, inode);
+	inode->i_ctime = dir->i_ctime;
+	ext3_mark_inode_dirty(handle, inode);
+	retval = 0;
+
+end_unlink:
+	ext3_journal_stop(handle);
+	brelse (bh);
+	return retval;
+}
+
+static int ext3_symlink (struct inode * dir,
+		struct dentry *dentry, const char * symname)
+{
+	handle_t *handle;
+	struct inode * inode;
+	int l, err, retries = 0;
+
+	l = strlen(symname)+1;
+	if (l > dir->i_sb->s_blocksize)
+		return -ENAMETOOLONG;
+
+retry:
+	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
+			 		EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
+					2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_stop;
+
+	if (l > sizeof (EXT3_I(inode)->i_data)) {
+		inode->i_op = &ext3_symlink_inode_operations;
+		ext3_set_aops(inode);
+		/*
+		 * page_symlink() calls into ext3_prepare/commit_write.
+		 * We have a transaction open.  All is sweetness.  It also sets
+		 * i_size in generic_commit_write().
+		 */
+		err = page_symlink(inode, symname, l);
+		if (err) {
+			ext3_dec_count(handle, inode);
+			ext3_mark_inode_dirty(handle, inode);
+			iput (inode);
+			goto out_stop;
+		}
+	} else {
+		inode->i_op = &ext3_fast_symlink_inode_operations;
+		memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
+		inode->i_size = l-1;
+	}
+	EXT3_I(inode)->i_disksize = inode->i_size;
+	err = ext3_add_nondir(handle, dentry, inode);
+out_stop:
+	ext3_journal_stop(handle);
+	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+}
+
+static int ext3_link (struct dentry * old_dentry,
+		struct inode * dir, struct dentry *dentry)
+{
+	handle_t *handle;
+	struct inode *inode = old_dentry->d_inode;
+	int err, retries = 0;
+
+	if (inode->i_nlink >= EXT3_LINK_MAX)
+		return -EMLINK;
+
+retry:
+	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
+					EXT3_INDEX_EXTRA_TRANS_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(dir))
+		handle->h_sync = 1;
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	ext3_inc_count(handle, inode);
+	atomic_inc(&inode->i_count);
+
+	err = ext3_add_nondir(handle, dentry, inode);
+	ext3_journal_stop(handle);
+	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+}
+
+#define PARENT_INO(buffer) \
+	((struct ext3_dir_entry_2 *) ((char *) buffer + \
+	le16_to_cpu(((struct ext3_dir_entry_2 *) buffer)->rec_len)))->inode
+
+/*
+ * Anybody can rename anything with this: the permission checks are left to the
+ * higher-level routines.
+ */
+static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
+			   struct inode * new_dir,struct dentry *new_dentry)
+{
+	handle_t *handle;
+	struct inode * old_inode, * new_inode;
+	struct buffer_head * old_bh, * new_bh, * dir_bh;
+	struct ext3_dir_entry_2 * old_de, * new_de;
+	int retval;
+
+	old_bh = new_bh = dir_bh = NULL;
+
+	/* Initialize quotas before so that eventual writes go
+	 * in separate transaction */
+	if (new_dentry->d_inode)
+		DQUOT_INIT(new_dentry->d_inode);
+	handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS +
+			 		EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+		handle->h_sync = 1;
+
+	old_bh = ext3_find_entry (old_dentry, &old_de);
+	/*
+	 *  Check for inode number is _not_ due to possible IO errors.
+	 *  We might rmdir the source, keep it as pwd of some process
+	 *  and merrily kill the link to whatever was created under the
+	 *  same name. Goodbye sticky bit ;-<
+	 */
+	old_inode = old_dentry->d_inode;
+	retval = -ENOENT;
+	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
+		goto end_rename;
+
+	new_inode = new_dentry->d_inode;
+	new_bh = ext3_find_entry (new_dentry, &new_de);
+	if (new_bh) {
+		if (!new_inode) {
+			brelse (new_bh);
+			new_bh = NULL;
+		}
+	}
+	if (S_ISDIR(old_inode->i_mode)) {
+		if (new_inode) {
+			retval = -ENOTEMPTY;
+			if (!empty_dir (new_inode))
+				goto end_rename;
+		}
+		retval = -EIO;
+		dir_bh = ext3_bread (handle, old_inode, 0, 0, &retval);
+		if (!dir_bh)
+			goto end_rename;
+		if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
+			goto end_rename;
+		retval = -EMLINK;
+		if (!new_inode && new_dir!=old_dir &&
+				new_dir->i_nlink >= EXT3_LINK_MAX)
+			goto end_rename;
+	}
+	if (!new_bh) {
+		retval = ext3_add_entry (handle, new_dentry, old_inode);
+		if (retval)
+			goto end_rename;
+	} else {
+		BUFFER_TRACE(new_bh, "get write access");
+		ext3_journal_get_write_access(handle, new_bh);
+		new_de->inode = cpu_to_le32(old_inode->i_ino);
+		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
+					      EXT3_FEATURE_INCOMPAT_FILETYPE))
+			new_de->file_type = old_de->file_type;
+		new_dir->i_version++;
+		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
+		ext3_journal_dirty_metadata(handle, new_bh);
+		brelse(new_bh);
+		new_bh = NULL;
+	}
+
+	/*
+	 * Like most other Unix systems, set the ctime for inodes on a
+	 * rename.
+	 */
+	old_inode->i_ctime = CURRENT_TIME_SEC;
+	ext3_mark_inode_dirty(handle, old_inode);
+
+	/*
+	 * ok, that's it
+	 */
+	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
+	    old_de->name_len != old_dentry->d_name.len ||
+	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
+	    (retval = ext3_delete_entry(handle, old_dir,
+					old_de, old_bh)) == -ENOENT) {
+		/* old_de could have moved from under us during htree split, so
+		 * make sure that we are deleting the right entry.  We might
+		 * also be pointing to a stale entry in the unused part of
+		 * old_bh so just checking inum and the name isn't enough. */
+		struct buffer_head *old_bh2;
+		struct ext3_dir_entry_2 *old_de2;
+
+		old_bh2 = ext3_find_entry(old_dentry, &old_de2);
+		if (old_bh2) {
+			retval = ext3_delete_entry(handle, old_dir,
+						   old_de2, old_bh2);
+			brelse(old_bh2);
+		}
+	}
+	if (retval) {
+		ext3_warning(old_dir->i_sb, "ext3_rename",
+				"Deleting old file (%lu), %d, error=%d",
+				old_dir->i_ino, old_dir->i_nlink, retval);
+	}
+
+	if (new_inode) {
+		new_inode->i_nlink--;
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+	}
+	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+	ext3_update_dx_flag(old_dir);
+	if (dir_bh) {
+		BUFFER_TRACE(dir_bh, "get_write_access");
+		ext3_journal_get_write_access(handle, dir_bh);
+		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
+		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
+		ext3_journal_dirty_metadata(handle, dir_bh);
+		old_dir->i_nlink--;
+		if (new_inode) {
+			new_inode->i_nlink--;
+		} else {
+			new_dir->i_nlink++;
+			ext3_update_dx_flag(new_dir);
+			ext3_mark_inode_dirty(handle, new_dir);
+		}
+	}
+	ext3_mark_inode_dirty(handle, old_dir);
+	if (new_inode) {
+		ext3_mark_inode_dirty(handle, new_inode);
+		if (!new_inode->i_nlink)
+			ext3_orphan_add(handle, new_inode);
+	}
+	retval = 0;
+
+end_rename:
+	brelse (dir_bh);
+	brelse (old_bh);
+	brelse (new_bh);
+	ext3_journal_stop(handle);
+	return retval;
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations ext3_dir_inode_operations = {
+	.create		= ext3_create,
+	.lookup		= ext3_lookup,
+	.link		= ext3_link,
+	.unlink		= ext3_unlink,
+	.symlink	= ext3_symlink,
+	.mkdir		= ext3_mkdir,
+	.rmdir		= ext3_rmdir,
+	.mknod		= ext3_mknod,
+	.rename		= ext3_rename,
+	.setattr	= ext3_setattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext3_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.permission	= ext3_permission,
+};
+
+struct inode_operations ext3_special_inode_operations = {
+	.setattr	= ext3_setattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext3_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+	.permission	= ext3_permission,
+}; 
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
new file mode 100644
index 0000000..2c9f812
--- /dev/null
+++ b/fs/ext3/resize.c
@@ -0,0 +1,996 @@
+/*
+ *  linux/fs/ext3/resize.c
+ *
+ * Support for resizing an ext3 filesystem while it is mounted.
+ *
+ * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This could probably be made into a module, because it is not often in use.
+ */
+
+#include <linux/config.h>
+
+#define EXT3FS_DEBUG
+
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/ext3_jbd.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+
+#define outside(b, first, last)	((b) < (first) || (b) >= (last))
+#define inside(b, first, last)	((b) >= (first) && (b) < (last))
+
+static int verify_group_input(struct super_block *sb,
+			      struct ext3_new_group_data *input)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	struct ext3_super_block *es = sbi->s_es;
+	unsigned start = le32_to_cpu(es->s_blocks_count);
+	unsigned end = start + input->blocks_count;
+	unsigned group = input->group;
+	unsigned itend = input->inode_table + EXT3_SB(sb)->s_itb_per_group;
+	unsigned overhead = ext3_bg_has_super(sb, group) ?
+		(1 + ext3_bg_num_gdb(sb, group) +
+		 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+	unsigned metaend = start + overhead;
+	struct buffer_head *bh = NULL;
+	int free_blocks_count;
+	int err = -EINVAL;
+
+	input->free_blocks_count = free_blocks_count =
+		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
+
+	if (test_opt(sb, DEBUG))
+		printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
+		       "(%d free, %u reserved)\n",
+		       ext3_bg_has_super(sb, input->group) ? "normal" :
+		       "no-super", input->group, input->blocks_count,
+		       free_blocks_count, input->reserved_blocks);
+
+	if (group != sbi->s_groups_count)
+		ext3_warning(sb, __FUNCTION__,
+			     "Cannot add at group %u (only %lu groups)",
+			     input->group, sbi->s_groups_count);
+	else if ((start - le32_to_cpu(es->s_first_data_block)) %
+		 EXT3_BLOCKS_PER_GROUP(sb))
+		ext3_warning(sb, __FUNCTION__, "Last group not full");
+	else if (input->reserved_blocks > input->blocks_count / 5)
+		ext3_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
+			     input->reserved_blocks);
+	else if (free_blocks_count < 0)
+		ext3_warning(sb, __FUNCTION__, "Bad blocks count %u",
+			     input->blocks_count);
+	else if (!(bh = sb_bread(sb, end - 1)))
+		ext3_warning(sb, __FUNCTION__, "Cannot read last block (%u)",
+			     end - 1);
+	else if (outside(input->block_bitmap, start, end))
+		ext3_warning(sb, __FUNCTION__,
+			     "Block bitmap not in group (block %u)",
+			     input->block_bitmap);
+	else if (outside(input->inode_bitmap, start, end))
+		ext3_warning(sb, __FUNCTION__,
+			     "Inode bitmap not in group (block %u)",
+			     input->inode_bitmap);
+	else if (outside(input->inode_table, start, end) ||
+	         outside(itend - 1, start, end))
+		ext3_warning(sb, __FUNCTION__,
+			     "Inode table not in group (blocks %u-%u)",
+			     input->inode_table, itend - 1);
+	else if (input->inode_bitmap == input->block_bitmap)
+		ext3_warning(sb, __FUNCTION__,
+			     "Block bitmap same as inode bitmap (%u)",
+			     input->block_bitmap);
+	else if (inside(input->block_bitmap, input->inode_table, itend))
+		ext3_warning(sb, __FUNCTION__,
+			     "Block bitmap (%u) in inode table (%u-%u)",
+			     input->block_bitmap, input->inode_table, itend-1);
+	else if (inside(input->inode_bitmap, input->inode_table, itend))
+		ext3_warning(sb, __FUNCTION__,
+			     "Inode bitmap (%u) in inode table (%u-%u)",
+			     input->inode_bitmap, input->inode_table, itend-1);
+	else if (inside(input->block_bitmap, start, metaend))
+		ext3_warning(sb, __FUNCTION__,
+			     "Block bitmap (%u) in GDT table (%u-%u)",
+			     input->block_bitmap, start, metaend - 1);
+	else if (inside(input->inode_bitmap, start, metaend))
+		ext3_warning(sb, __FUNCTION__,
+			     "Inode bitmap (%u) in GDT table (%u-%u)",
+			     input->inode_bitmap, start, metaend - 1);
+	else if (inside(input->inode_table, start, metaend) ||
+	         inside(itend - 1, start, metaend))
+		ext3_warning(sb, __FUNCTION__,
+			     "Inode table (%u-%u) overlaps GDT table (%u-%u)",
+			     input->inode_table, itend - 1, start, metaend - 1);
+	else
+		err = 0;
+	brelse(bh);
+
+	return err;
+}
+
+static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
+				  unsigned long blk)
+{
+	struct buffer_head *bh;
+	int err;
+
+	bh = sb_getblk(sb, blk);
+	if ((err = ext3_journal_get_write_access(handle, bh))) {
+		brelse(bh);
+		bh = ERR_PTR(err);
+	} else {
+		lock_buffer(bh);
+		memset(bh->b_data, 0, sb->s_blocksize);
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+	}
+
+	return bh;
+}
+
+/*
+ * To avoid calling the atomic setbit hundreds or thousands of times, we only
+ * need to use it within a single byte (to ensure we get endianness right).
+ * We can use memset for the rest of the bitmap as there are no other users.
+ */
+static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+{
+	int i;
+
+	if (start_bit >= end_bit)
+		return;
+
+	ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
+	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
+		ext3_set_bit(i, bitmap);
+	if (i < end_bit)
+		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+}
+
+/*
+ * Set up the block and inode bitmaps, and the inode table for the new group.
+ * This doesn't need to be part of the main transaction, since we are only
+ * changing blocks outside the actual filesystem.  We still do journaling to
+ * ensure the recovery is correct in case of a failure just after resize.
+ * If any part of this fails, we simply abort the resize.
+ */
+static int setup_new_group_blocks(struct super_block *sb,
+				  struct ext3_new_group_data *input)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	unsigned long start = input->group * sbi->s_blocks_per_group +
+		le32_to_cpu(sbi->s_es->s_first_data_block);
+	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
+		le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
+	unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
+	struct buffer_head *bh;
+	handle_t *handle;
+	unsigned long block;
+	int bit;
+	int i;
+	int err = 0, err2;
+
+	handle = ext3_journal_start_sb(sb, reserved_gdb + gdblocks +
+				       2 + sbi->s_itb_per_group);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	lock_super(sb);
+	if (input->group != sbi->s_groups_count) {
+		err = -EBUSY;
+		goto exit_journal;
+	}
+
+	if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
+		err = PTR_ERR(bh);
+		goto exit_journal;
+	}
+
+	if (ext3_bg_has_super(sb, input->group)) {
+		ext3_debug("mark backup superblock %#04lx (+0)\n", start);
+		ext3_set_bit(0, bh->b_data);
+	}
+
+	/* Copy all of the GDT blocks into the backup in this group */
+	for (i = 0, bit = 1, block = start + 1;
+	     i < gdblocks; i++, block++, bit++) {
+		struct buffer_head *gdb;
+
+		ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
+
+		gdb = sb_getblk(sb, block);
+		if ((err = ext3_journal_get_write_access(handle, gdb))) {
+			brelse(gdb);
+			goto exit_bh;
+		}
+		lock_buffer(bh);
+		memcpy(gdb->b_data, sbi->s_group_desc[i], bh->b_size);
+		set_buffer_uptodate(gdb);
+		unlock_buffer(bh);
+		ext3_journal_dirty_metadata(handle, gdb);
+		ext3_set_bit(bit, bh->b_data);
+		brelse(gdb);
+	}
+
+	/* Zero out all of the reserved backup group descriptor table blocks */
+	for (i = 0, bit = gdblocks + 1, block = start + bit;
+	     i < reserved_gdb; i++, block++, bit++) {
+		struct buffer_head *gdb;
+
+		ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
+
+		if (IS_ERR(gdb = bclean(handle, sb, block))) {
+			err = PTR_ERR(bh);
+			goto exit_bh;
+		}
+		ext3_journal_dirty_metadata(handle, gdb);
+		ext3_set_bit(bit, bh->b_data);
+		brelse(gdb);
+	}
+	ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
+		   input->block_bitmap - start);
+	ext3_set_bit(input->block_bitmap - start, bh->b_data);
+	ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
+		   input->inode_bitmap - start);
+	ext3_set_bit(input->inode_bitmap - start, bh->b_data);
+
+	/* Zero out all of the inode table blocks */
+	for (i = 0, block = input->inode_table, bit = block - start;
+	     i < sbi->s_itb_per_group; i++, bit++, block++) {
+		struct buffer_head *it;
+
+		ext3_debug("clear inode block %#04x (+%ld)\n", block, bit);
+		if (IS_ERR(it = bclean(handle, sb, block))) {
+			err = PTR_ERR(it);
+			goto exit_bh;
+		}
+		ext3_journal_dirty_metadata(handle, it);
+		brelse(it);
+		ext3_set_bit(bit, bh->b_data);
+	}
+	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
+			bh->b_data);
+	ext3_journal_dirty_metadata(handle, bh);
+	brelse(bh);
+
+	/* Mark unused entries in inode bitmap used */
+	ext3_debug("clear inode bitmap %#04x (+%ld)\n",
+		   input->inode_bitmap, input->inode_bitmap - start);
+	if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
+		err = PTR_ERR(bh);
+		goto exit_journal;
+	}
+
+	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
+			bh->b_data);
+	ext3_journal_dirty_metadata(handle, bh);
+exit_bh:
+	brelse(bh);
+
+exit_journal:
+	unlock_super(sb);
+	if ((err2 = ext3_journal_stop(handle)) && !err)
+		err = err2;
+
+	return err;
+}
+
+/*
+ * Iterate through the groups which hold BACKUP superblock/GDT copies in an
+ * ext3 filesystem.  The counters should be initialized to 1, 5, and 7 before
+ * calling this for the first time.  In a sparse filesystem it will be the
+ * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
+ * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
+ */
+static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
+				  unsigned *five, unsigned *seven)
+{
+	unsigned *min = three;
+	int mult = 3;
+	unsigned ret;
+
+	if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
+					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
+		ret = *min;
+		*min += 1;
+		return ret;
+	}
+
+	if (*five < *min) {
+		min = five;
+		mult = 5;
+	}
+	if (*seven < *min) {
+		min = seven;
+		mult = 7;
+	}
+
+	ret = *min;
+	*min *= mult;
+
+	return ret;
+}
+
+/*
+ * Check that all of the backup GDT blocks are held in the primary GDT block.
+ * It is assumed that they are stored in group order.  Returns the number of
+ * groups in current filesystem that have BACKUPS, or -ve error code.
+ */
+static int verify_reserved_gdb(struct super_block *sb,
+			       struct buffer_head *primary)
+{
+	const unsigned long blk = primary->b_blocknr;
+	const unsigned long end = EXT3_SB(sb)->s_groups_count;
+	unsigned three = 1;
+	unsigned five = 5;
+	unsigned seven = 7;
+	unsigned grp;
+	__u32 *p = (__u32 *)primary->b_data;
+	int gdbackups = 0;
+
+	while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
+		if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
+			ext3_warning(sb, __FUNCTION__,
+				     "reserved GDT %ld missing grp %d (%ld)\n",
+				     blk, grp,
+				     grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
+			return -EINVAL;
+		}
+		if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
+			return -EFBIG;
+	}
+
+	return gdbackups;
+}
+
+/*
+ * Called when we need to bring a reserved group descriptor table block into
+ * use from the resize inode.  The primary copy of the new GDT block currently
+ * is an indirect block (under the double indirect block in the resize inode).
+ * The new backup GDT blocks will be stored as leaf blocks in this indirect
+ * block, in group order.  Even though we know all the block numbers we need,
+ * we check to ensure that the resize inode has actually reserved these blocks.
+ *
+ * Don't need to update the block bitmaps because the blocks are still in use.
+ *
+ * We get all of the error cases out of the way, so that we are sure to not
+ * fail once we start modifying the data on disk, because JBD has no rollback.
+ */
+static int add_new_gdb(handle_t *handle, struct inode *inode,
+		       struct ext3_new_group_data *input,
+		       struct buffer_head **primary)
+{
+	struct super_block *sb = inode->i_sb;
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+	unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
+	unsigned long gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
+	struct buffer_head **o_group_desc, **n_group_desc;
+	struct buffer_head *dind;
+	int gdbackups;
+	struct ext3_iloc iloc;
+	__u32 *data;
+	int err;
+
+	if (test_opt(sb, DEBUG))
+		printk(KERN_DEBUG
+		       "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
+		       gdb_num);
+
+	/*
+	 * If we are not using the primary superblock/GDT copy don't resize,
+	 * because the user tools have no way of handling this.  Probably a
+	 * bad time to do it anyways.
+	 */
+	if (EXT3_SB(sb)->s_sbh->b_blocknr !=
+	    le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
+		ext3_warning(sb, __FUNCTION__,
+			"won't resize using backup superblock at %llu\n",
+			(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
+		return -EPERM;
+	}
+
+	*primary = sb_bread(sb, gdblock);
+	if (!*primary)
+		return -EIO;
+
+	if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
+		err = gdbackups;
+		goto exit_bh;
+	}
+
+	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
+	dind = sb_bread(sb, le32_to_cpu(*data));
+	if (!dind) {
+		err = -EIO;
+		goto exit_bh;
+	}
+
+	data = (__u32 *)dind->b_data;
+	if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
+		ext3_warning(sb, __FUNCTION__,
+			     "new group %u GDT block %lu not reserved\n",
+			     input->group, gdblock);
+		err = -EINVAL;
+		goto exit_dind;
+	}
+
+	if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
+		goto exit_dind;
+
+	if ((err = ext3_journal_get_write_access(handle, *primary)))
+		goto exit_sbh;
+
+	if ((err = ext3_journal_get_write_access(handle, dind)))
+		goto exit_primary;
+
+	/* ext3_reserve_inode_write() gets a reference on the iloc */
+	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
+		goto exit_dindj;
+
+	n_group_desc = (struct buffer_head **)kmalloc((gdb_num + 1) *
+				sizeof(struct buffer_head *), GFP_KERNEL);
+	if (!n_group_desc) {
+		err = -ENOMEM;
+		ext3_warning (sb, __FUNCTION__,
+			      "not enough memory for %lu groups", gdb_num + 1);
+		goto exit_inode;
+	}
+
+	/*
+	 * Finally, we have all of the possible failures behind us...
+	 *
+	 * Remove new GDT block from inode double-indirect block and clear out
+	 * the new GDT block for use (which also "frees" the backup GDT blocks
+	 * from the reserved inode).  We don't need to change the bitmaps for
+	 * these blocks, because they are marked as in-use from being in the
+	 * reserved inode, and will become GDT blocks (primary and backup).
+	 */
+	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
+	ext3_journal_dirty_metadata(handle, dind);
+	brelse(dind);
+	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
+	ext3_mark_iloc_dirty(handle, inode, &iloc);
+	memset((*primary)->b_data, 0, sb->s_blocksize);
+	ext3_journal_dirty_metadata(handle, *primary);
+
+	o_group_desc = EXT3_SB(sb)->s_group_desc;
+	memcpy(n_group_desc, o_group_desc,
+	       EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+	n_group_desc[gdb_num] = *primary;
+	EXT3_SB(sb)->s_group_desc = n_group_desc;
+	EXT3_SB(sb)->s_gdb_count++;
+	kfree(o_group_desc);
+
+	es->s_reserved_gdt_blocks =
+		cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
+	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+
+	return 0;
+
+exit_inode:
+	//ext3_journal_release_buffer(handle, iloc.bh);
+	brelse(iloc.bh);
+exit_dindj:
+	//ext3_journal_release_buffer(handle, dind);
+exit_primary:
+	//ext3_journal_release_buffer(handle, *primary);
+exit_sbh:
+	//ext3_journal_release_buffer(handle, *primary);
+exit_dind:
+	brelse(dind);
+exit_bh:
+	brelse(*primary);
+
+	ext3_debug("leaving with error %d\n", err);
+	return err;
+}
+
+/*
+ * Called when we are adding a new group which has a backup copy of each of
+ * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
+ * We need to add these reserved backup GDT blocks to the resize inode, so
+ * that they are kept for future resizing and not allocated to files.
+ *
+ * Each reserved backup GDT block will go into a different indirect block.
+ * The indirect blocks are actually the primary reserved GDT blocks,
+ * so we know in advance what their block numbers are.  We only get the
+ * double-indirect block to verify it is pointing to the primary reserved
+ * GDT blocks so we don't overwrite a data block by accident.  The reserved
+ * backup GDT blocks are stored in their reserved primary GDT block.
+ */
+static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
+			      struct ext3_new_group_data *input)
+{
+	struct super_block *sb = inode->i_sb;
+	int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
+	struct buffer_head **primary;
+	struct buffer_head *dind;
+	struct ext3_iloc iloc;
+	unsigned long blk;
+	__u32 *data, *end;
+	int gdbackups = 0;
+	int res, i;
+	int err;
+
+	primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
+	if (!primary)
+		return -ENOMEM;
+
+	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
+	dind = sb_bread(sb, le32_to_cpu(*data));
+	if (!dind) {
+		err = -EIO;
+		goto exit_free;
+	}
+
+	blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
+	data = (__u32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
+	end = (__u32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
+
+	/* Get each reserved primary GDT block and verify it holds backups */
+	for (res = 0; res < reserved_gdb; res++, blk++) {
+		if (le32_to_cpu(*data) != blk) {
+			ext3_warning(sb, __FUNCTION__,
+				     "reserved block %lu not at offset %ld\n",
+				     blk, (long)(data - (__u32 *)dind->b_data));
+			err = -EINVAL;
+			goto exit_bh;
+		}
+		primary[res] = sb_bread(sb, blk);
+		if (!primary[res]) {
+			err = -EIO;
+			goto exit_bh;
+		}
+		if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
+			brelse(primary[res]);
+			err = gdbackups;
+			goto exit_bh;
+		}
+		if (++data >= end)
+			data = (__u32 *)dind->b_data;
+	}
+
+	for (i = 0; i < reserved_gdb; i++) {
+		if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
+			/*
+			int j;
+			for (j = 0; j < i; j++)
+				ext3_journal_release_buffer(handle, primary[j]);
+			 */
+			goto exit_bh;
+		}
+	}
+
+	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
+		goto exit_bh;
+
+	/*
+	 * Finally we can add each of the reserved backup GDT blocks from
+	 * the new group to its reserved primary GDT block.
+	 */
+	blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
+	for (i = 0; i < reserved_gdb; i++) {
+		int err2;
+		data = (__u32 *)primary[i]->b_data;
+		/* printk("reserving backup %lu[%u] = %lu\n",
+		       primary[i]->b_blocknr, gdbackups,
+		       blk + primary[i]->b_blocknr); */
+		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
+		err2 = ext3_journal_dirty_metadata(handle, primary[i]);
+		if (!err)
+			err = err2;
+	}
+	inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
+	ext3_mark_iloc_dirty(handle, inode, &iloc);
+
+exit_bh:
+	while (--res >= 0)
+		brelse(primary[res]);
+	brelse(dind);
+
+exit_free:
+	kfree(primary);
+
+	return err;
+}
+
+/*
+ * Update the backup copies of the ext3 metadata.  These don't need to be part
+ * of the main resize transaction, because e2fsck will re-write them if there
+ * is a problem (basically only OOM will cause a problem).  However, we
+ * _should_ update the backups if possible, in case the primary gets trashed
+ * for some reason and we need to run e2fsck from a backup superblock.  The
+ * important part is that the new block and inode counts are in the backup
+ * superblocks, and the location of the new group metadata in the GDT backups.
+ *
+ * We do not need lock_super() for this, because these blocks are not
+ * otherwise touched by the filesystem code when it is mounted.  We don't
+ * need to worry about last changing from sbi->s_groups_count, because the
+ * worst that can happen is that we do not copy the full number of backups
+ * at this time.  The resize which changed s_groups_count will backup again.
+ */
+static void update_backups(struct super_block *sb,
+			   int blk_off, char *data, int size)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	const unsigned long last = sbi->s_groups_count;
+	const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
+	unsigned three = 1;
+	unsigned five = 5;
+	unsigned seven = 7;
+	unsigned group;
+	int rest = sb->s_blocksize - size;
+	handle_t *handle;
+	int err = 0, err2;
+
+	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
+	if (IS_ERR(handle)) {
+		group = 1;
+		err = PTR_ERR(handle);
+		goto exit_err;
+	}
+
+	while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
+		struct buffer_head *bh;
+
+		/* Out of journal space, and can't get more - abort - so sad */
+		if (handle->h_buffer_credits == 0 &&
+		    ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
+		    (err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
+			break;
+
+		bh = sb_getblk(sb, group * bpg + blk_off);
+		ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n",
+			   bh->b_blocknr);
+		if ((err = ext3_journal_get_write_access(handle, bh)))
+			break;
+		lock_buffer(bh);
+		memcpy(bh->b_data, data, size);
+		if (rest)
+			memset(bh->b_data + size, 0, rest);
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		ext3_journal_dirty_metadata(handle, bh);
+		brelse(bh);
+	}
+	if ((err2 = ext3_journal_stop(handle)) && !err)
+		err = err2;
+
+	/*
+	 * Ugh! Need to have e2fsck write the backup copies.  It is too
+	 * late to revert the resize, we shouldn't fail just because of
+	 * the backup copies (they are only needed in case of corruption).
+	 *
+	 * However, if we got here we have a journal problem too, so we
+	 * can't really start a transaction to mark the superblock.
+	 * Chicken out and just set the flag on the hope it will be written
+	 * to disk, and if not - we will simply wait until next fsck.
+	 */
+exit_err:
+	if (err) {
+		ext3_warning(sb, __FUNCTION__,
+			     "can't update backup for group %d (err %d), "
+			     "forcing fsck on next reboot\n", group, err);
+		sbi->s_mount_state &= ~EXT3_VALID_FS;
+		sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS);
+		mark_buffer_dirty(sbi->s_sbh);
+	}
+}
+
+/* Add group descriptor data to an existing or new group descriptor block.
+ * Ensure we handle all possible error conditions _before_ we start modifying
+ * the filesystem, because we cannot abort the transaction and not have it
+ * write the data to disk.
+ *
+ * If we are on a GDT block boundary, we need to get the reserved GDT block.
+ * Otherwise, we may need to add backup GDT blocks for a sparse group.
+ *
+ * We only need to hold the superblock lock while we are actually adding
+ * in the new group's counts to the superblock.  Prior to that we have
+ * not really "added" the group at all.  We re-check that we are still
+ * adding in the last group in case things have changed since verifying.
+ */
+int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	struct ext3_super_block *es = sbi->s_es;
+	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
+		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
+	struct buffer_head *primary = NULL;
+	struct ext3_group_desc *gdp;
+	struct inode *inode = NULL;
+	handle_t *handle;
+	int gdb_off, gdb_num;
+	int err, err2;
+
+	gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
+	gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
+
+	if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
+					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
+		ext3_warning(sb, __FUNCTION__,
+			     "Can't resize non-sparse filesystem further\n");
+		return -EPERM;
+	}
+
+	if (reserved_gdb || gdb_off == 0) {
+		if (!EXT3_HAS_COMPAT_FEATURE(sb,
+					     EXT3_FEATURE_COMPAT_RESIZE_INODE)){
+			ext3_warning(sb, __FUNCTION__,
+				     "No reserved GDT blocks, can't resize\n");
+			return -EPERM;
+		}
+		inode = iget(sb, EXT3_RESIZE_INO);
+		if (!inode || is_bad_inode(inode)) {
+			ext3_warning(sb, __FUNCTION__,
+				     "Error opening resize inode\n");
+			iput(inode);
+			return -ENOENT;
+		}
+	}
+
+	if ((err = verify_group_input(sb, input)))
+		goto exit_put;
+
+	if ((err = setup_new_group_blocks(sb, input)))
+		goto exit_put;
+
+	/*
+	 * We will always be modifying at least the superblock and a GDT
+	 * block.  If we are adding a group past the last current GDT block,
+	 * we will also modify the inode and the dindirect block.  If we
+	 * are adding a group with superblock/GDT backups  we will also
+	 * modify each of the reserved GDT dindirect blocks.
+	 */
+	handle = ext3_journal_start_sb(sb,
+				       ext3_bg_has_super(sb, input->group) ?
+				       3 + reserved_gdb : 4);
+	if (IS_ERR(handle)) {
+		err = PTR_ERR(handle);
+		goto exit_put;
+	}
+
+	lock_super(sb);
+	if (input->group != EXT3_SB(sb)->s_groups_count) {
+		ext3_warning(sb, __FUNCTION__,
+			     "multiple resizers run on filesystem!\n");
+		goto exit_journal;
+	}
+
+	if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
+		goto exit_journal;
+
+	/*
+	 * We will only either add reserved group blocks to a backup group
+	 * or remove reserved blocks for the first group in a new group block.
+	 * Doing both would be mean more complex code, and sane people don't
+	 * use non-sparse filesystems anymore.  This is already checked above.
+	 */
+	if (gdb_off) {
+		primary = sbi->s_group_desc[gdb_num];
+		if ((err = ext3_journal_get_write_access(handle, primary)))
+			goto exit_journal;
+
+		if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
+		    (err = reserve_backup_gdb(handle, inode, input)))
+			goto exit_journal;
+	} else if ((err = add_new_gdb(handle, inode, input, &primary)))
+		goto exit_journal;
+
+	/*
+	 * OK, now we've set up the new group.  Time to make it active.
+	 *
+	 * Current kernels don't lock all allocations via lock_super(),
+	 * so we have to be safe wrt. concurrent accesses the group
+	 * data.  So we need to be careful to set all of the relevant
+	 * group descriptor data etc. *before* we enable the group.
+	 *
+	 * The key field here is EXT3_SB(sb)->s_groups_count: as long as
+	 * that retains its old value, nobody is going to access the new
+	 * group.
+	 *
+	 * So first we update all the descriptor metadata for the new
+	 * group; then we update the total disk blocks count; then we
+	 * update the groups count to enable the group; then finally we
+	 * update the free space counts so that the system can start
+	 * using the new disk blocks.
+	 */
+
+	/* Update group descriptor block for new group */
+	gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
+
+	gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
+	gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
+	gdp->bg_inode_table = cpu_to_le32(input->inode_table);
+	gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
+	gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
+
+	/*
+	 * Make the new blocks and inodes valid next.  We do this before
+	 * increasing the group count so that once the group is enabled,
+	 * all of its blocks and inodes are already valid.
+	 *
+	 * We always allocate group-by-group, then block-by-block or
+	 * inode-by-inode within a group, so enabling these
+	 * blocks/inodes before the group is live won't actually let us
+	 * allocate the new space yet.
+	 */
+	es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) +
+		input->blocks_count);
+	es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
+		EXT3_INODES_PER_GROUP(sb));
+
+	/*
+	 * We need to protect s_groups_count against other CPUs seeing
+	 * inconsistent state in the superblock.
+	 *
+	 * The precise rules we use are:
+	 *
+	 * * Writers of s_groups_count *must* hold lock_super
+	 * AND
+	 * * Writers must perform a smp_wmb() after updating all dependent
+	 *   data and before modifying the groups count
+	 *
+	 * * Readers must hold lock_super() over the access
+	 * OR
+	 * * Readers must perform an smp_rmb() after reading the groups count
+	 *   and before reading any dependent data.
+	 *
+	 * NB. These rules can be relaxed when checking the group count
+	 * while freeing data, as we can only allocate from a block
+	 * group after serialising against the group count, and we can
+	 * only then free after serialising in turn against that
+	 * allocation.
+	 */
+	smp_wmb();
+
+	/* Update the global fs size fields */
+	EXT3_SB(sb)->s_groups_count++;
+
+	ext3_journal_dirty_metadata(handle, primary);
+
+	/* Update the reserved block counts only once the new group is
+	 * active. */
+	es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) +
+		input->reserved_blocks);
+
+	/* Update the free space counts */
+	percpu_counter_mod(&sbi->s_freeblocks_counter,
+			   input->free_blocks_count);
+	percpu_counter_mod(&sbi->s_freeinodes_counter,
+			   EXT3_INODES_PER_GROUP(sb));
+
+	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	sb->s_dirt = 1;
+
+exit_journal:
+	unlock_super(sb);
+	if ((err2 = ext3_journal_stop(handle)) && !err)
+		err = err2;
+	if (!err) {
+		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+			       sizeof(struct ext3_super_block));
+		update_backups(sb, primary->b_blocknr, primary->b_data,
+			       primary->b_size);
+	}
+exit_put:
+	iput(inode);
+	return err;
+} /* ext3_group_add */
+
+/* Extend the filesystem to the new number of blocks specified.  This entry
+ * point is only used to extend the current filesystem to the end of the last
+ * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
+ * for emergencies (because it has no dependencies on reserved blocks).
+ *
+ * If we _really_ wanted, we could use default values to call ext3_group_add()
+ * allow the "remount" trick to work for arbitrary resizing, assuming enough
+ * GDT blocks are reserved to grow to the desired size.
+ */
+int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
+		      unsigned long n_blocks_count)
+{
+	unsigned long o_blocks_count;
+	unsigned long o_groups_count;
+	unsigned long last;
+	int add;
+	struct buffer_head * bh;
+	handle_t *handle;
+	int err, freed_blocks;
+
+	/* We don't need to worry about locking wrt other resizers just
+	 * yet: we're going to revalidate es->s_blocks_count after
+	 * taking lock_super() below. */
+	o_blocks_count = le32_to_cpu(es->s_blocks_count);
+	o_groups_count = EXT3_SB(sb)->s_groups_count;
+
+	if (test_opt(sb, DEBUG))
+		printk(KERN_DEBUG "EXT3-fs: extending last group from %lu to %lu blocks\n",
+		       o_blocks_count, n_blocks_count);
+
+	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
+		return 0;
+
+	if (n_blocks_count < o_blocks_count) {
+		ext3_warning(sb, __FUNCTION__,
+			     "can't shrink FS - resize aborted");
+		return -EBUSY;
+	}
+
+	/* Handle the remaining blocks in the last group only. */
+	last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
+		EXT3_BLOCKS_PER_GROUP(sb);
+
+	if (last == 0) {
+		ext3_warning(sb, __FUNCTION__,
+			     "need to use ext2online to resize further\n");
+		return -EPERM;
+	}
+
+	add = EXT3_BLOCKS_PER_GROUP(sb) - last;
+
+	if (o_blocks_count + add > n_blocks_count)
+		add = n_blocks_count - o_blocks_count;
+
+	if (o_blocks_count + add < n_blocks_count)
+		ext3_warning(sb, __FUNCTION__,
+			     "will only finish group (%lu blocks, %u new)",
+			     o_blocks_count + add, add);
+
+	/* See if the device is actually as big as what was requested */
+	bh = sb_bread(sb, o_blocks_count + add -1);
+	if (!bh) {
+		ext3_warning(sb, __FUNCTION__,
+			     "can't read last block, resize aborted");
+		return -ENOSPC;
+	}
+	brelse(bh);
+
+	/* We will update the superblock, one block bitmap, and
+	 * one group descriptor via ext3_free_blocks().
+	 */
+	handle = ext3_journal_start_sb(sb, 3);
+	if (IS_ERR(handle)) {
+		err = PTR_ERR(handle);
+		ext3_warning(sb, __FUNCTION__, "error %d on journal start",err);
+		goto exit_put;
+	}
+
+	lock_super(sb);
+	if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
+		ext3_warning(sb, __FUNCTION__,
+			     "multiple resizers run on filesystem!\n");
+		err = -EBUSY;
+		goto exit_put;
+	}
+
+	if ((err = ext3_journal_get_write_access(handle,
+						 EXT3_SB(sb)->s_sbh))) {
+		ext3_warning(sb, __FUNCTION__,
+			     "error %d on journal write access", err);
+		unlock_super(sb);
+		ext3_journal_stop(handle);
+		goto exit_put;
+	}
+	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
+	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	sb->s_dirt = 1;
+	unlock_super(sb);
+	ext3_debug("freeing blocks %ld through %ld\n", o_blocks_count,
+		   o_blocks_count + add);
+	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
+	ext3_debug("freed blocks %ld through %ld\n", o_blocks_count,
+		   o_blocks_count + add);
+	if ((err = ext3_journal_stop(handle)))
+		goto exit_put;
+	if (test_opt(sb, DEBUG))
+		printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
+		       le32_to_cpu(es->s_blocks_count));
+	update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
+		       sizeof(struct ext3_super_block));
+exit_put:
+	return err;
+} /* ext3_group_extend */
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
new file mode 100644
index 0000000..545b440
--- /dev/null
+++ b/fs/ext3/super.c
@@ -0,0 +1,2539 @@
+/*
+ *  linux/fs/ext3/super.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/parser.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/random.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/quotaops.h>
+#include <asm/uaccess.h>
+#include "xattr.h"
+#include "acl.h"
+
+static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
+static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
+			       int);
+static void ext3_commit_super (struct super_block * sb,
+			       struct ext3_super_block * es,
+			       int sync);
+static void ext3_mark_recovery_complete(struct super_block * sb,
+					struct ext3_super_block * es);
+static void ext3_clear_journal_err(struct super_block * sb,
+				   struct ext3_super_block * es);
+static int ext3_sync_fs(struct super_block *sb, int wait);
+static const char *ext3_decode_error(struct super_block * sb, int errno,
+				     char nbuf[16]);
+static int ext3_remount (struct super_block * sb, int * flags, char * data);
+static int ext3_statfs (struct super_block * sb, struct kstatfs * buf);
+static void ext3_unlockfs(struct super_block *sb);
+static void ext3_write_super (struct super_block * sb);
+static void ext3_write_super_lockfs(struct super_block *sb);
+
+/* 
+ * Wrappers for journal_start/end.
+ *
+ * The only special thing we need to do here is to make sure that all
+ * journal_end calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate. 
+ */
+handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
+{
+	journal_t *journal;
+
+	if (sb->s_flags & MS_RDONLY)
+		return ERR_PTR(-EROFS);
+
+	/* Special case here: if the journal has aborted behind our
+	 * backs (eg. EIO in the commit thread), then we still need to
+	 * take the FS itself readonly cleanly. */
+	journal = EXT3_SB(sb)->s_journal;
+	if (is_journal_aborted(journal)) {
+		ext3_abort(sb, __FUNCTION__,
+			   "Detected aborted journal");
+		return ERR_PTR(-EROFS);
+	}
+
+	return journal_start(journal, nblocks);
+}
+
+/* 
+ * The only special thing we need to do here is to make sure that all
+ * journal_stop calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate. 
+ */
+int __ext3_journal_stop(const char *where, handle_t *handle)
+{
+	struct super_block *sb;
+	int err;
+	int rc;
+
+	sb = handle->h_transaction->t_journal->j_private;
+	err = handle->h_err;
+	rc = journal_stop(handle);
+
+	if (!err)
+		err = rc;
+	if (err)
+		__ext3_std_error(sb, where, err);
+	return err;
+}
+
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err)
+{
+	char nbuf[16];
+	const char *errstr = ext3_decode_error(NULL, err, nbuf);
+
+	if (bh)
+		BUFFER_TRACE(bh, "abort");
+
+	if (!handle->h_err)
+		handle->h_err = err;
+
+	if (is_handle_aborted(handle))
+		return;
+
+	printk(KERN_ERR "%s: aborting transaction: %s in %s\n",
+	       caller, errstr, err_fn);
+
+	journal_abort_handle(handle);
+}
+
+/* Deal with the reporting of failure conditions on a filesystem such as
+ * inconsistencies detected or read IO failures.
+ *
+ * On ext2, we can store the error state of the filesystem in the
+ * superblock.  That is not possible on ext3, because we may have other
+ * write ordering constraints on the superblock which prevent us from
+ * writing it out straight away; and given that the journal is about to
+ * be aborted, we can't rely on the current, or future, transactions to
+ * write out the superblock safely.
+ *
+ * We'll just use the journal_abort() error code to record an error in
+ * the journal instead.  On recovery, the journal will compain about
+ * that error until we've noted it down and cleared it.
+ */
+
+static void ext3_handle_error(struct super_block *sb)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
+	es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	if (test_opt (sb, ERRORS_RO)) {
+		printk (KERN_CRIT "Remounting filesystem read-only\n");
+		sb->s_flags |= MS_RDONLY;
+	} else {
+		journal_t *journal = EXT3_SB(sb)->s_journal;
+
+		EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
+		if (journal)
+			journal_abort(journal, -EIO);
+	}
+	if (test_opt(sb, ERRORS_PANIC))
+		panic("EXT3-fs (device %s): panic forced after error\n",
+			sb->s_id);
+	ext3_commit_super(sb, es, 1);
+}
+
+void ext3_error (struct super_block * sb, const char * function,
+		 const char * fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+
+	ext3_handle_error(sb);
+}
+
+static const char *ext3_decode_error(struct super_block * sb, int errno,
+				     char nbuf[16])
+{
+	char *errstr = NULL;
+
+	switch (errno) {
+	case -EIO:
+		errstr = "IO failure";
+		break;
+	case -ENOMEM:
+		errstr = "Out of memory";
+		break;
+	case -EROFS:
+		if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT)
+			errstr = "Journal has aborted";
+		else
+			errstr = "Readonly filesystem";
+		break;
+	default:
+		/* If the caller passed in an extra buffer for unknown
+		 * errors, textualise them now.  Else we just return
+		 * NULL. */
+		if (nbuf) {
+			/* Check for truncated error codes... */
+			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
+				errstr = nbuf;
+		}
+		break;
+	}
+
+	return errstr;
+}
+
+/* __ext3_std_error decodes expected errors from journaling functions
+ * automatically and invokes the appropriate error response.  */
+
+void __ext3_std_error (struct super_block * sb, const char * function,
+		       int errno)
+{
+	char nbuf[16];
+	const char *errstr = ext3_decode_error(sb, errno, nbuf);
+
+	printk (KERN_CRIT "EXT3-fs error (device %s) in %s: %s\n",
+		sb->s_id, function, errstr);
+
+	ext3_handle_error(sb);
+}
+
+/*
+ * ext3_abort is a much stronger failure handler than ext3_error.  The
+ * abort function may be used to deal with unrecoverable failures such
+ * as journal IO errors or ENOMEM at a critical moment in log management.
+ *
+ * We unconditionally force the filesystem into an ABORT|READONLY state,
+ * unless the error response on the fs has been set to panic in which
+ * case we take the easy way out and panic immediately.
+ */
+
+void ext3_abort (struct super_block * sb, const char * function,
+		 const char * fmt, ...)
+{
+	va_list args;
+
+	printk (KERN_CRIT "ext3_abort called.\n");
+
+	va_start(args, fmt);
+	printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+
+	if (test_opt(sb, ERRORS_PANIC))
+		panic("EXT3-fs panic from previous error\n");
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	printk(KERN_CRIT "Remounting filesystem read-only\n");
+	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
+	sb->s_flags |= MS_RDONLY;
+	EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
+	journal_abort(EXT3_SB(sb)->s_journal, -EIO);
+}
+
+void ext3_warning (struct super_block * sb, const char * function,
+		   const char * fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	printk(KERN_WARNING "EXT3-fs warning (device %s): %s: ",
+	       sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+}
+
+void ext3_update_dynamic_rev(struct super_block *sb)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+	if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
+		return;
+
+	ext3_warning(sb, __FUNCTION__,
+		     "updating to rev %d because of new feature flag, "
+		     "running e2fsck is recommended",
+		     EXT3_DYNAMIC_REV);
+
+	es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO);
+	es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE);
+	es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV);
+	/* leave es->s_feature_*compat flags alone */
+	/* es->s_uuid will be set by e2fsck if empty */
+
+	/*
+	 * The rest of the superblock fields should be zero, and if not it
+	 * means they are likely already in use, so leave them alone.  We
+	 * can leave it up to e2fsck to clean up any inconsistencies there.
+	 */
+}
+
+/*
+ * Open the external journal device
+ */
+static struct block_device *ext3_blkdev_get(dev_t dev)
+{
+	struct block_device *bdev;
+	char b[BDEVNAME_SIZE];
+
+	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	if (IS_ERR(bdev))
+		goto fail;
+	return bdev;
+
+fail:
+	printk(KERN_ERR "EXT3: failed to open journal device %s: %ld\n",
+			__bdevname(dev, b), PTR_ERR(bdev));
+	return NULL;
+}
+
+/*
+ * Release the journal device
+ */
+static int ext3_blkdev_put(struct block_device *bdev)
+{
+	bd_release(bdev);
+	return blkdev_put(bdev);
+}
+
+static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
+{
+	struct block_device *bdev;
+	int ret = -ENODEV;
+
+	bdev = sbi->journal_bdev;
+	if (bdev) {
+		ret = ext3_blkdev_put(bdev);
+		sbi->journal_bdev = NULL;
+	}
+	return ret;
+}
+
+static inline struct inode *orphan_list_entry(struct list_head *l)
+{
+	return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode;
+}
+
+static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
+{
+	struct list_head *l;
+
+	printk(KERN_ERR "sb orphan head is %d\n", 
+	       le32_to_cpu(sbi->s_es->s_last_orphan));
+
+	printk(KERN_ERR "sb_info orphan list:\n");
+	list_for_each(l, &sbi->s_orphan) {
+		struct inode *inode = orphan_list_entry(l);
+		printk(KERN_ERR "  "
+		       "inode %s:%ld at %p: mode %o, nlink %d, next %d\n",
+		       inode->i_sb->s_id, inode->i_ino, inode,
+		       inode->i_mode, inode->i_nlink, 
+		       NEXT_ORPHAN(inode));
+	}
+}
+
+static void ext3_put_super (struct super_block * sb)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	struct ext3_super_block *es = sbi->s_es;
+	int i;
+
+	ext3_xattr_put_super(sb);
+	journal_destroy(sbi->s_journal);
+	if (!(sb->s_flags & MS_RDONLY)) {
+		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+		es->s_state = cpu_to_le16(sbi->s_mount_state);
+		BUFFER_TRACE(sbi->s_sbh, "marking dirty");
+		mark_buffer_dirty(sbi->s_sbh);
+		ext3_commit_super(sb, es, 1);
+	}
+
+	for (i = 0; i < sbi->s_gdb_count; i++)
+		brelse(sbi->s_group_desc[i]);
+	kfree(sbi->s_group_desc);
+	percpu_counter_destroy(&sbi->s_freeblocks_counter);
+	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+	percpu_counter_destroy(&sbi->s_dirs_counter);
+	brelse(sbi->s_sbh);
+#ifdef CONFIG_QUOTA
+	for (i = 0; i < MAXQUOTAS; i++)
+		kfree(sbi->s_qf_names[i]);
+#endif
+
+	/* Debugging code just in case the in-memory inode orphan list
+	 * isn't empty.  The on-disk one can be non-empty if we've
+	 * detected an error and taken the fs readonly, but the
+	 * in-memory list had better be clean by this point. */
+	if (!list_empty(&sbi->s_orphan))
+		dump_orphan_list(sb, sbi);
+	J_ASSERT(list_empty(&sbi->s_orphan));
+
+	invalidate_bdev(sb->s_bdev, 0);
+	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+		/*
+		 * Invalidate the journal device's buffers.  We don't want them
+		 * floating about in memory - the physical journal device may
+		 * hotswapped, and it breaks the `ro-after' testing code.
+		 */
+		sync_blockdev(sbi->journal_bdev);
+		invalidate_bdev(sbi->journal_bdev, 0);
+		ext3_blkdev_remove(sbi);
+	}
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+	return;
+}
+
+static kmem_cache_t *ext3_inode_cachep;
+
+/*
+ * Called inside transaction, so use GFP_NOFS
+ */
+static struct inode *ext3_alloc_inode(struct super_block *sb)
+{
+	struct ext3_inode_info *ei;
+
+	ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS);
+	if (!ei)
+		return NULL;
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+	ei->i_acl = EXT3_ACL_NOT_CACHED;
+	ei->i_default_acl = EXT3_ACL_NOT_CACHED;
+#endif
+	ei->i_block_alloc_info = NULL;
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void ext3_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		INIT_LIST_HEAD(&ei->i_orphan);
+#ifdef CONFIG_EXT3_FS_XATTR
+		init_rwsem(&ei->xattr_sem);
+#endif
+		init_MUTEX(&ei->truncate_sem);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+ 
+static int init_inodecache(void)
+{
+	ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
+					     sizeof(struct ext3_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (ext3_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(ext3_inode_cachep))
+		printk(KERN_INFO "ext3_inode_cache: not all structures were freed\n");
+}
+
+static void ext3_clear_inode(struct inode *inode)
+{
+	struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+       if (EXT3_I(inode)->i_acl &&
+           EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
+               posix_acl_release(EXT3_I(inode)->i_acl);
+               EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
+       }
+       if (EXT3_I(inode)->i_default_acl &&
+           EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
+               posix_acl_release(EXT3_I(inode)->i_default_acl);
+               EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
+       }
+#endif
+	ext3_discard_reservation(inode);
+	EXT3_I(inode)->i_block_alloc_info = NULL;
+	kfree(rsv);
+}
+
+#ifdef CONFIG_QUOTA
+
+#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
+#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
+
+static int ext3_dquot_initialize(struct inode *inode, int type);
+static int ext3_dquot_drop(struct inode *inode);
+static int ext3_write_dquot(struct dquot *dquot);
+static int ext3_acquire_dquot(struct dquot *dquot);
+static int ext3_release_dquot(struct dquot *dquot);
+static int ext3_mark_dquot_dirty(struct dquot *dquot);
+static int ext3_write_info(struct super_block *sb, int type);
+static int ext3_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext3_quota_on_mount(struct super_block *sb, int type);
+static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
+			       size_t len, loff_t off);
+static ssize_t ext3_quota_write(struct super_block *sb, int type,
+				const char *data, size_t len, loff_t off);
+
+static struct dquot_operations ext3_quota_operations = {
+	.initialize	= ext3_dquot_initialize,
+	.drop		= ext3_dquot_drop,
+	.alloc_space	= dquot_alloc_space,
+	.alloc_inode	= dquot_alloc_inode,
+	.free_space	= dquot_free_space,
+	.free_inode	= dquot_free_inode,
+	.transfer	= dquot_transfer,
+	.write_dquot	= ext3_write_dquot,
+	.acquire_dquot	= ext3_acquire_dquot,
+	.release_dquot	= ext3_release_dquot,
+	.mark_dirty	= ext3_mark_dquot_dirty,
+	.write_info	= ext3_write_info
+};
+
+static struct quotactl_ops ext3_qctl_operations = {
+	.quota_on	= ext3_quota_on,
+	.quota_off	= vfs_quota_off,
+	.quota_sync	= vfs_quota_sync,
+	.get_info	= vfs_get_dqinfo,
+	.set_info	= vfs_set_dqinfo,
+	.get_dqblk	= vfs_get_dqblk,
+	.set_dqblk	= vfs_set_dqblk
+};
+#endif
+
+static struct super_operations ext3_sops = {
+	.alloc_inode	= ext3_alloc_inode,
+	.destroy_inode	= ext3_destroy_inode,
+	.read_inode	= ext3_read_inode,
+	.write_inode	= ext3_write_inode,
+	.dirty_inode	= ext3_dirty_inode,
+	.delete_inode	= ext3_delete_inode,
+	.put_super	= ext3_put_super,
+	.write_super	= ext3_write_super,
+	.sync_fs	= ext3_sync_fs,
+	.write_super_lockfs = ext3_write_super_lockfs,
+	.unlockfs	= ext3_unlockfs,
+	.statfs		= ext3_statfs,
+	.remount_fs	= ext3_remount,
+	.clear_inode	= ext3_clear_inode,
+#ifdef CONFIG_QUOTA
+	.quota_read	= ext3_quota_read,
+	.quota_write	= ext3_quota_write,
+#endif
+};
+
+struct dentry *ext3_get_parent(struct dentry *child);
+static struct export_operations ext3_export_ops = {
+	.get_parent = ext3_get_parent,
+};
+
+enum {
+	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
+	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
+	Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
+	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
+	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
+	Opt_commit, Opt_journal_update, Opt_journal_inum,
+	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
+	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
+	Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
+};
+
+static match_table_t tokens = {
+	{Opt_bsd_df, "bsddf"},
+	{Opt_minix_df, "minixdf"},
+	{Opt_grpid, "grpid"},
+	{Opt_grpid, "bsdgroups"},
+	{Opt_nogrpid, "nogrpid"},
+	{Opt_nogrpid, "sysvgroups"},
+	{Opt_resgid, "resgid=%u"},
+	{Opt_resuid, "resuid=%u"},
+	{Opt_sb, "sb=%u"},
+	{Opt_err_cont, "errors=continue"},
+	{Opt_err_panic, "errors=panic"},
+	{Opt_err_ro, "errors=remount-ro"},
+	{Opt_nouid32, "nouid32"},
+	{Opt_nocheck, "nocheck"},
+	{Opt_nocheck, "check=none"},
+	{Opt_check, "check"},
+	{Opt_debug, "debug"},
+	{Opt_oldalloc, "oldalloc"},
+	{Opt_orlov, "orlov"},
+	{Opt_user_xattr, "user_xattr"},
+	{Opt_nouser_xattr, "nouser_xattr"},
+	{Opt_acl, "acl"},
+	{Opt_noacl, "noacl"},
+	{Opt_reservation, "reservation"},
+	{Opt_noreservation, "noreservation"},
+	{Opt_noload, "noload"},
+	{Opt_nobh, "nobh"},
+	{Opt_commit, "commit=%u"},
+	{Opt_journal_update, "journal=update"},
+	{Opt_journal_inum, "journal=%u"},
+	{Opt_abort, "abort"},
+	{Opt_data_journal, "data=journal"},
+	{Opt_data_ordered, "data=ordered"},
+	{Opt_data_writeback, "data=writeback"},
+	{Opt_offusrjquota, "usrjquota="},
+	{Opt_usrjquota, "usrjquota=%s"},
+	{Opt_offgrpjquota, "grpjquota="},
+	{Opt_grpjquota, "grpjquota=%s"},
+	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
+	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+	{Opt_ignore, "grpquota"},
+	{Opt_ignore, "noquota"},
+	{Opt_ignore, "quota"},
+	{Opt_ignore, "usrquota"},
+	{Opt_barrier, "barrier=%u"},
+	{Opt_err, NULL},
+	{Opt_resize, "resize"},
+};
+
+static unsigned long get_sb_block(void **data)
+{
+	unsigned long 	sb_block;
+	char 		*options = (char *) *data;
+
+	if (!options || strncmp(options, "sb=", 3) != 0)
+		return 1;	/* Default location */
+	options += 3;
+	sb_block = simple_strtoul(options, &options, 0);
+	if (*options && *options != ',') {
+		printk("EXT3-fs: Invalid sb specification: %s\n",
+		       (char *) *data);
+		return 1;
+	}
+	if (*options == ',')
+		options++;
+	*data = (void *) options;
+	return sb_block;
+}
+
+static int parse_options (char * options, struct super_block *sb,
+			  unsigned long * inum, unsigned long *n_blocks_count, int is_remount)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	char * p;
+	substring_t args[MAX_OPT_ARGS];
+	int data_opt = 0;
+	int option;
+#ifdef CONFIG_QUOTA
+	int qtype;
+	char *qname;
+#endif
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep (&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_bsd_df:
+			clear_opt (sbi->s_mount_opt, MINIX_DF);
+			break;
+		case Opt_minix_df:
+			set_opt (sbi->s_mount_opt, MINIX_DF);
+			break;
+		case Opt_grpid:
+			set_opt (sbi->s_mount_opt, GRPID);
+			break;
+		case Opt_nogrpid:
+			clear_opt (sbi->s_mount_opt, GRPID);
+			break;
+		case Opt_resuid:
+			if (match_int(&args[0], &option))
+				return 0;
+			sbi->s_resuid = option;
+			break;
+		case Opt_resgid:
+			if (match_int(&args[0], &option))
+				return 0;
+			sbi->s_resgid = option;
+			break;
+		case Opt_sb:
+			/* handled by get_sb_block() instead of here */
+			/* *sb_block = match_int(&args[0]); */
+			break;
+		case Opt_err_panic:
+			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+			clear_opt (sbi->s_mount_opt, ERRORS_RO);
+			set_opt (sbi->s_mount_opt, ERRORS_PANIC);
+			break;
+		case Opt_err_ro:
+			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+			set_opt (sbi->s_mount_opt, ERRORS_RO);
+			break;
+		case Opt_err_cont:
+			clear_opt (sbi->s_mount_opt, ERRORS_RO);
+			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+			set_opt (sbi->s_mount_opt, ERRORS_CONT);
+			break;
+		case Opt_nouid32:
+			set_opt (sbi->s_mount_opt, NO_UID32);
+			break;
+		case Opt_check:
+#ifdef CONFIG_EXT3_CHECK
+			set_opt (sbi->s_mount_opt, CHECK);
+#else
+			printk(KERN_ERR
+			       "EXT3 Check option not supported\n");
+#endif
+			break;
+		case Opt_nocheck:
+			clear_opt (sbi->s_mount_opt, CHECK);
+			break;
+		case Opt_debug:
+			set_opt (sbi->s_mount_opt, DEBUG);
+			break;
+		case Opt_oldalloc:
+			set_opt (sbi->s_mount_opt, OLDALLOC);
+			break;
+		case Opt_orlov:
+			clear_opt (sbi->s_mount_opt, OLDALLOC);
+			break;
+#ifdef CONFIG_EXT3_FS_XATTR
+		case Opt_user_xattr:
+			set_opt (sbi->s_mount_opt, XATTR_USER);
+			break;
+		case Opt_nouser_xattr:
+			clear_opt (sbi->s_mount_opt, XATTR_USER);
+			break;
+#else
+		case Opt_user_xattr:
+		case Opt_nouser_xattr:
+			printk("EXT3 (no)user_xattr options not supported\n");
+			break;
+#endif
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+		case Opt_acl:
+			set_opt(sbi->s_mount_opt, POSIX_ACL);
+			break;
+		case Opt_noacl:
+			clear_opt(sbi->s_mount_opt, POSIX_ACL);
+			break;
+#else
+		case Opt_acl:
+		case Opt_noacl:
+			printk("EXT3 (no)acl options not supported\n");
+			break;
+#endif
+		case Opt_reservation:
+			set_opt(sbi->s_mount_opt, RESERVATION);
+			break;
+		case Opt_noreservation:
+			clear_opt(sbi->s_mount_opt, RESERVATION);
+			break;
+		case Opt_journal_update:
+			/* @@@ FIXME */
+			/* Eventually we will want to be able to create
+			   a journal file here.  For now, only allow the
+			   user to specify an existing inode to be the
+			   journal file. */
+			if (is_remount) {
+				printk(KERN_ERR "EXT3-fs: cannot specify "
+				       "journal on remount\n");
+				return 0;
+			}
+			set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
+			break;
+		case Opt_journal_inum:
+			if (is_remount) {
+				printk(KERN_ERR "EXT3-fs: cannot specify "
+				       "journal on remount\n");
+				return 0;
+			}
+			if (match_int(&args[0], &option))
+				return 0;
+			*inum = option;
+			break;
+		case Opt_noload:
+			set_opt (sbi->s_mount_opt, NOLOAD);
+			break;
+		case Opt_commit:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0)
+				return 0;
+			if (option == 0)
+				option = JBD_DEFAULT_MAX_COMMIT_AGE;
+			sbi->s_commit_interval = HZ * option;
+			break;
+		case Opt_data_journal:
+			data_opt = EXT3_MOUNT_JOURNAL_DATA;
+			goto datacheck;
+		case Opt_data_ordered:
+			data_opt = EXT3_MOUNT_ORDERED_DATA;
+			goto datacheck;
+		case Opt_data_writeback:
+			data_opt = EXT3_MOUNT_WRITEBACK_DATA;
+		datacheck:
+			if (is_remount) {
+				if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS)
+						!= data_opt) {
+					printk(KERN_ERR
+						"EXT3-fs: cannot change data "
+						"mode on remount\n");
+					return 0;
+				}
+			} else {
+				sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS;
+				sbi->s_mount_opt |= data_opt;
+			}
+			break;
+#ifdef CONFIG_QUOTA
+		case Opt_usrjquota:
+			qtype = USRQUOTA;
+			goto set_qf_name;
+		case Opt_grpjquota:
+			qtype = GRPQUOTA;
+set_qf_name:
+			if (sb_any_quota_enabled(sb)) {
+				printk(KERN_ERR
+					"EXT3-fs: Cannot change journalled "
+					"quota options when quota turned on.\n");
+				return 0;
+			}
+			qname = match_strdup(&args[0]);
+			if (!qname) {
+				printk(KERN_ERR
+					"EXT3-fs: not enough memory for "
+					"storing quotafile name.\n");
+				return 0;
+			}
+			if (sbi->s_qf_names[qtype] &&
+			    strcmp(sbi->s_qf_names[qtype], qname)) {
+				printk(KERN_ERR
+					"EXT3-fs: %s quota file already "
+					"specified.\n", QTYPE2NAME(qtype));
+				kfree(qname);
+				return 0;
+			}
+			sbi->s_qf_names[qtype] = qname;
+			if (strchr(sbi->s_qf_names[qtype], '/')) {
+				printk(KERN_ERR
+					"EXT3-fs: quotafile must be on "
+					"filesystem root.\n");
+				kfree(sbi->s_qf_names[qtype]);
+				sbi->s_qf_names[qtype] = NULL;
+				return 0;
+			}
+			break;
+		case Opt_offusrjquota:
+			qtype = USRQUOTA;
+			goto clear_qf_name;
+		case Opt_offgrpjquota:
+			qtype = GRPQUOTA;
+clear_qf_name:
+			if (sb_any_quota_enabled(sb)) {
+				printk(KERN_ERR "EXT3-fs: Cannot change "
+					"journalled quota options when "
+					"quota turned on.\n");
+				return 0;
+			}
+			kfree(sbi->s_qf_names[qtype]);
+			sbi->s_qf_names[qtype] = NULL;
+			break;
+		case Opt_jqfmt_vfsold:
+			sbi->s_jquota_fmt = QFMT_VFS_OLD;
+			break;
+		case Opt_jqfmt_vfsv0:
+			sbi->s_jquota_fmt = QFMT_VFS_V0;
+			break;
+#else
+		case Opt_usrjquota:
+		case Opt_grpjquota:
+		case Opt_offusrjquota:
+		case Opt_offgrpjquota:
+		case Opt_jqfmt_vfsold:
+		case Opt_jqfmt_vfsv0:
+			printk(KERN_ERR
+				"EXT3-fs: journalled quota options not "
+				"supported.\n");
+			break;
+#endif
+		case Opt_abort:
+			set_opt(sbi->s_mount_opt, ABORT);
+			break;
+		case Opt_barrier:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option)
+				set_opt(sbi->s_mount_opt, BARRIER);
+			else
+				clear_opt(sbi->s_mount_opt, BARRIER);
+			break;
+		case Opt_ignore:
+			break;
+		case Opt_resize:
+			if (!n_blocks_count) {
+				printk("EXT3-fs: resize option only available "
+					"for remount\n");
+				return 0;
+			}
+			match_int(&args[0], &option);
+			*n_blocks_count = option;
+			break;
+		case Opt_nobh:
+			set_opt(sbi->s_mount_opt, NOBH);
+			break;
+		default:
+			printk (KERN_ERR
+				"EXT3-fs: Unrecognized mount option \"%s\" "
+				"or missing value\n", p);
+			return 0;
+		}
+	}
+#ifdef CONFIG_QUOTA
+	if (!sbi->s_jquota_fmt && (sbi->s_qf_names[USRQUOTA] ||
+	    sbi->s_qf_names[GRPQUOTA])) {
+		printk(KERN_ERR
+			"EXT3-fs: journalled quota format not specified.\n");
+		return 0;
+	}
+#endif
+
+	return 1;
+}
+
+static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
+			    int read_only)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	int res = 0;
+
+	if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
+		printk (KERN_ERR "EXT3-fs warning: revision level too high, "
+			"forcing read-only mode\n");
+		res = MS_RDONLY;
+	}
+	if (read_only)
+		return res;
+	if (!(sbi->s_mount_state & EXT3_VALID_FS))
+		printk (KERN_WARNING "EXT3-fs warning: mounting unchecked fs, "
+			"running e2fsck is recommended\n");
+	else if ((sbi->s_mount_state & EXT3_ERROR_FS))
+		printk (KERN_WARNING
+			"EXT3-fs warning: mounting fs with errors, "
+			"running e2fsck is recommended\n");
+	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+		 le16_to_cpu(es->s_mnt_count) >=
+		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
+		printk (KERN_WARNING
+			"EXT3-fs warning: maximal mount count reached, "
+			"running e2fsck is recommended\n");
+	else if (le32_to_cpu(es->s_checkinterval) &&
+		(le32_to_cpu(es->s_lastcheck) +
+			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+		printk (KERN_WARNING
+			"EXT3-fs warning: checktime reached, "
+			"running e2fsck is recommended\n");
+#if 0
+		/* @@@ We _will_ want to clear the valid bit if we find
+                   inconsistencies, to force a fsck at reboot.  But for
+                   a plain journaled filesystem we can keep it set as
+                   valid forever! :) */
+	es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT3_VALID_FS);
+#endif
+	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
+		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
+	es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+	es->s_mtime = cpu_to_le32(get_seconds());
+	ext3_update_dynamic_rev(sb);
+	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+
+	ext3_commit_super(sb, es, 1);
+	if (test_opt(sb, DEBUG))
+		printk(KERN_INFO "[EXT3 FS bs=%lu, gc=%lu, "
+				"bpg=%lu, ipg=%lu, mo=%04lx]\n",
+			sb->s_blocksize,
+			sbi->s_groups_count,
+			EXT3_BLOCKS_PER_GROUP(sb),
+			EXT3_INODES_PER_GROUP(sb),
+			sbi->s_mount_opt);
+
+	printk(KERN_INFO "EXT3 FS on %s, ", sb->s_id);
+	if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
+		char b[BDEVNAME_SIZE];
+
+		printk("external journal on %s\n",
+			bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
+	} else {
+		printk("internal journal\n");
+	}
+#ifdef CONFIG_EXT3_CHECK
+	if (test_opt (sb, CHECK)) {
+		ext3_check_blocks_bitmap (sb);
+		ext3_check_inodes_bitmap (sb);
+	}
+#endif
+	return res;
+}
+
+/* Called at mount-time, super-block is locked */
+static int ext3_check_descriptors (struct super_block * sb)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	unsigned long block = le32_to_cpu(sbi->s_es->s_first_data_block);
+	struct ext3_group_desc * gdp = NULL;
+	int desc_block = 0;
+	int i;
+
+	ext3_debug ("Checking group descriptors");
+
+	for (i = 0; i < sbi->s_groups_count; i++)
+	{
+		if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0)
+			gdp = (struct ext3_group_desc *)
+					sbi->s_group_desc[desc_block++]->b_data;
+		if (le32_to_cpu(gdp->bg_block_bitmap) < block ||
+		    le32_to_cpu(gdp->bg_block_bitmap) >=
+				block + EXT3_BLOCKS_PER_GROUP(sb))
+		{
+			ext3_error (sb, "ext3_check_descriptors",
+				    "Block bitmap for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long)
+					le32_to_cpu(gdp->bg_block_bitmap));
+			return 0;
+		}
+		if (le32_to_cpu(gdp->bg_inode_bitmap) < block ||
+		    le32_to_cpu(gdp->bg_inode_bitmap) >=
+				block + EXT3_BLOCKS_PER_GROUP(sb))
+		{
+			ext3_error (sb, "ext3_check_descriptors",
+				    "Inode bitmap for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long)
+					le32_to_cpu(gdp->bg_inode_bitmap));
+			return 0;
+		}
+		if (le32_to_cpu(gdp->bg_inode_table) < block ||
+		    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >=
+		    block + EXT3_BLOCKS_PER_GROUP(sb))
+		{
+			ext3_error (sb, "ext3_check_descriptors",
+				    "Inode table for group %d"
+				    " not in group (block %lu)!",
+				    i, (unsigned long)
+					le32_to_cpu(gdp->bg_inode_table));
+			return 0;
+		}
+		block += EXT3_BLOCKS_PER_GROUP(sb);
+		gdp++;
+	}
+
+	sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
+	sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb));
+	return 1;
+}
+
+
+/* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at
+ * the superblock) which were deleted from all directories, but held open by
+ * a process at the time of a crash.  We walk the list and try to delete these
+ * inodes at recovery time (only with a read-write filesystem).
+ *
+ * In order to keep the orphan inode chain consistent during traversal (in
+ * case of crash during recovery), we link each inode into the superblock
+ * orphan list_head and handle it the same way as an inode deletion during
+ * normal operation (which journals the operations for us).
+ *
+ * We only do an iget() and an iput() on each inode, which is very safe if we
+ * accidentally point at an in-use or already deleted inode.  The worst that
+ * can happen in this case is that we get a "bit already cleared" message from
+ * ext3_free_inode().  The only reason we would point at a wrong inode is if
+ * e2fsck was run on this filesystem, and it must have already done the orphan
+ * inode cleanup for us, so we can safely abort without any further action.
+ */
+static void ext3_orphan_cleanup (struct super_block * sb,
+				 struct ext3_super_block * es)
+{
+	unsigned int s_flags = sb->s_flags;
+	int nr_orphans = 0, nr_truncates = 0;
+#ifdef CONFIG_QUOTA
+	int i;
+#endif
+	if (!es->s_last_orphan) {
+		jbd_debug(4, "no orphan inodes to clean up\n");
+		return;
+	}
+
+	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
+		if (es->s_last_orphan)
+			jbd_debug(1, "Errors on filesystem, "
+				  "clearing orphan list.\n");
+		es->s_last_orphan = 0;
+		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+		return;
+	}
+
+	if (s_flags & MS_RDONLY) {
+		printk(KERN_INFO "EXT3-fs: %s: orphan cleanup on readonly fs\n",
+		       sb->s_id);
+		sb->s_flags &= ~MS_RDONLY;
+	}
+#ifdef CONFIG_QUOTA
+	/* Needed for iput() to work correctly and not trash data */
+	sb->s_flags |= MS_ACTIVE;
+	/* Turn on quotas so that they are updated correctly */
+	for (i = 0; i < MAXQUOTAS; i++) {
+		if (EXT3_SB(sb)->s_qf_names[i]) {
+			int ret = ext3_quota_on_mount(sb, i);
+			if (ret < 0)
+				printk(KERN_ERR
+					"EXT3-fs: Cannot turn on journalled "
+					"quota: error %d\n", ret);
+		}
+	}
+#endif
+
+	while (es->s_last_orphan) {
+		struct inode *inode;
+
+		if (!(inode =
+		      ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
+			es->s_last_orphan = 0;
+			break;
+		}
+
+		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
+		DQUOT_INIT(inode);
+		if (inode->i_nlink) {
+			printk(KERN_DEBUG
+				"%s: truncating inode %ld to %Ld bytes\n",
+				__FUNCTION__, inode->i_ino, inode->i_size);
+			jbd_debug(2, "truncating inode %ld to %Ld bytes\n",
+				  inode->i_ino, inode->i_size);
+			ext3_truncate(inode);
+			nr_truncates++;
+		} else {
+			printk(KERN_DEBUG
+				"%s: deleting unreferenced inode %ld\n",
+				__FUNCTION__, inode->i_ino);
+			jbd_debug(2, "deleting unreferenced inode %ld\n",
+				  inode->i_ino);
+			nr_orphans++;
+		}
+		iput(inode);  /* The delete magic happens here! */
+	}
+
+#define PLURAL(x) (x), ((x)==1) ? "" : "s"
+
+	if (nr_orphans)
+		printk(KERN_INFO "EXT3-fs: %s: %d orphan inode%s deleted\n",
+		       sb->s_id, PLURAL(nr_orphans));
+	if (nr_truncates)
+		printk(KERN_INFO "EXT3-fs: %s: %d truncate%s cleaned up\n",
+		       sb->s_id, PLURAL(nr_truncates));
+#ifdef CONFIG_QUOTA
+	/* Turn quotas off */
+	for (i = 0; i < MAXQUOTAS; i++) {
+		if (sb_dqopt(sb)->files[i])
+			vfs_quota_off(sb, i);
+	}
+#endif
+	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+}
+
+#define log2(n) ffz(~(n))
+
+/*
+ * Maximal file size.  There is a direct, and {,double-,triple-}indirect
+ * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
+ * We need to be 1 filesystem block less than the 2^32 sector limit.
+ */
+static loff_t ext3_max_size(int bits)
+{
+	loff_t res = EXT3_NDIR_BLOCKS;
+	/* This constant is calculated to be the largest file size for a
+	 * dense, 4k-blocksize file such that the total number of
+	 * sectors in the file, including data and all indirect blocks,
+	 * does not exceed 2^32. */
+	const loff_t upper_limit = 0x1ff7fffd000LL;
+
+	res += 1LL << (bits-2);
+	res += 1LL << (2*(bits-2));
+	res += 1LL << (3*(bits-2));
+	res <<= bits;
+	if (res > upper_limit)
+		res = upper_limit;
+	return res;
+}
+
+static unsigned long descriptor_loc(struct super_block *sb,
+				    unsigned long logic_sb_block,
+				    int nr)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	unsigned long bg, first_data_block, first_meta_bg;
+	int has_super = 0;
+
+	first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
+
+	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
+	    nr < first_meta_bg)
+		return (logic_sb_block + nr + 1);
+	bg = sbi->s_desc_per_block * nr;
+	if (ext3_bg_has_super(sb, bg))
+		has_super = 1;
+	return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
+}
+
+
+static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+{
+	struct buffer_head * bh;
+	struct ext3_super_block *es = NULL;
+	struct ext3_sb_info *sbi;
+	unsigned long block;
+	unsigned long sb_block = get_sb_block(&data);
+	unsigned long logic_sb_block;
+	unsigned long offset = 0;
+	unsigned long journal_inum = 0;
+	unsigned long def_mount_opts;
+	struct inode *root;
+	int blocksize;
+	int hblock;
+	int db_count;
+	int i;
+	int needs_recovery;
+	__le32 features;
+
+	sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(*sbi));
+	sbi->s_mount_opt = 0;
+	sbi->s_resuid = EXT3_DEF_RESUID;
+	sbi->s_resgid = EXT3_DEF_RESGID;
+
+	unlock_kernel();
+
+	blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
+	if (!blocksize) {
+		printk(KERN_ERR "EXT3-fs: unable to set blocksize\n");
+		goto out_fail;
+	}
+
+	/*
+	 * The ext3 superblock will not be buffer aligned for other than 1kB
+	 * block sizes.  We need to calculate the offset from buffer start.
+	 */
+	if (blocksize != EXT3_MIN_BLOCK_SIZE) {
+		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
+		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
+	} else {
+		logic_sb_block = sb_block;
+	}
+
+	if (!(bh = sb_bread(sb, logic_sb_block))) {
+		printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
+		goto out_fail;
+	}
+	/*
+	 * Note: s_es must be initialized as soon as possible because
+	 *       some ext3 macro-instructions depend on its value
+	 */
+	es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+	sbi->s_es = es;
+	sb->s_magic = le16_to_cpu(es->s_magic);
+	if (sb->s_magic != EXT3_SUPER_MAGIC)
+		goto cantfind_ext3;
+
+	/* Set defaults before we parse the mount options */
+	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+	if (def_mount_opts & EXT3_DEFM_DEBUG)
+		set_opt(sbi->s_mount_opt, DEBUG);
+	if (def_mount_opts & EXT3_DEFM_BSDGROUPS)
+		set_opt(sbi->s_mount_opt, GRPID);
+	if (def_mount_opts & EXT3_DEFM_UID16)
+		set_opt(sbi->s_mount_opt, NO_UID32);
+	if (def_mount_opts & EXT3_DEFM_XATTR_USER)
+		set_opt(sbi->s_mount_opt, XATTR_USER);
+	if (def_mount_opts & EXT3_DEFM_ACL)
+		set_opt(sbi->s_mount_opt, POSIX_ACL);
+	if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
+		sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA;
+	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
+		sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA;
+	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
+		sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA;
+
+	if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
+		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO)
+		set_opt(sbi->s_mount_opt, ERRORS_RO);
+
+	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
+	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
+
+	set_opt(sbi->s_mount_opt, RESERVATION);
+
+	if (!parse_options ((char *) data, sb, &journal_inum, NULL, 0))
+		goto failed_mount;
+
+	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+
+	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
+	    (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
+	     EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
+	     EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
+		printk(KERN_WARNING 
+		       "EXT3-fs warning: feature flags set on rev 0 fs, "
+		       "running e2fsck is recommended\n");
+	/*
+	 * Check feature flags regardless of the revision level, since we
+	 * previously didn't change the revision level when setting the flags,
+	 * so there is a chance incompat flags are set on a rev 0 filesystem.
+	 */
+	features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP);
+	if (features) {
+		printk(KERN_ERR "EXT3-fs: %s: couldn't mount because of "
+		       "unsupported optional features (%x).\n",
+		       sb->s_id, le32_to_cpu(features));
+		goto failed_mount;
+	}
+	features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP);
+	if (!(sb->s_flags & MS_RDONLY) && features) {
+		printk(KERN_ERR "EXT3-fs: %s: couldn't mount RDWR because of "
+		       "unsupported optional features (%x).\n",
+		       sb->s_id, le32_to_cpu(features));
+		goto failed_mount;
+	}
+	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+
+	if (blocksize < EXT3_MIN_BLOCK_SIZE ||
+	    blocksize > EXT3_MAX_BLOCK_SIZE) {
+		printk(KERN_ERR 
+		       "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
+		       blocksize, sb->s_id);
+		goto failed_mount;
+	}
+
+	hblock = bdev_hardsect_size(sb->s_bdev);
+	if (sb->s_blocksize != blocksize) {
+		/*
+		 * Make sure the blocksize for the filesystem is larger
+		 * than the hardware sectorsize for the machine.
+		 */
+		if (blocksize < hblock) {
+			printk(KERN_ERR "EXT3-fs: blocksize %d too small for "
+			       "device blocksize %d.\n", blocksize, hblock);
+			goto failed_mount;
+		}
+
+		brelse (bh);
+		sb_set_blocksize(sb, blocksize);
+		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
+		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
+		bh = sb_bread(sb, logic_sb_block);
+		if (!bh) {
+			printk(KERN_ERR 
+			       "EXT3-fs: Can't read superblock on 2nd try.\n");
+			goto failed_mount;
+		}
+		es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
+		sbi->s_es = es;
+		if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
+			printk (KERN_ERR 
+				"EXT3-fs: Magic mismatch, very weird !\n");
+			goto failed_mount;
+		}
+	}
+
+	sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits);
+
+	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) {
+		sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE;
+		sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO;
+	} else {
+		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+		if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
+		    (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+		    (sbi->s_inode_size > blocksize)) {
+			printk (KERN_ERR
+				"EXT3-fs: unsupported inode size: %d\n",
+				sbi->s_inode_size);
+			goto failed_mount;
+		}
+	}
+	sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
+				   le32_to_cpu(es->s_log_frag_size);
+	if (blocksize != sbi->s_frag_size) {
+		printk(KERN_ERR
+		       "EXT3-fs: fragsize %lu != blocksize %u (unsupported)\n",
+		       sbi->s_frag_size, blocksize);
+		goto failed_mount;
+	}
+	sbi->s_frags_per_block = 1;
+	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
+	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+	if (EXT3_INODE_SIZE(sb) == 0)
+		goto cantfind_ext3;
+	sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb);
+	if (sbi->s_inodes_per_block == 0)
+		goto cantfind_ext3;
+	sbi->s_itb_per_group = sbi->s_inodes_per_group /
+					sbi->s_inodes_per_block;
+	sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc);
+	sbi->s_sbh = bh;
+	sbi->s_mount_state = le16_to_cpu(es->s_state);
+	sbi->s_addr_per_block_bits = log2(EXT3_ADDR_PER_BLOCK(sb));
+	sbi->s_desc_per_block_bits = log2(EXT3_DESC_PER_BLOCK(sb));
+	for (i=0; i < 4; i++)
+		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
+	sbi->s_def_hash_version = es->s_def_hash_version;
+
+	if (sbi->s_blocks_per_group > blocksize * 8) {
+		printk (KERN_ERR
+			"EXT3-fs: #blocks per group too big: %lu\n",
+			sbi->s_blocks_per_group);
+		goto failed_mount;
+	}
+	if (sbi->s_frags_per_group > blocksize * 8) {
+		printk (KERN_ERR
+			"EXT3-fs: #fragments per group too big: %lu\n",
+			sbi->s_frags_per_group);
+		goto failed_mount;
+	}
+	if (sbi->s_inodes_per_group > blocksize * 8) {
+		printk (KERN_ERR
+			"EXT3-fs: #inodes per group too big: %lu\n",
+			sbi->s_inodes_per_group);
+		goto failed_mount;
+	}
+
+	if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
+		goto cantfind_ext3;
+	sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
+			       le32_to_cpu(es->s_first_data_block) +
+			       EXT3_BLOCKS_PER_GROUP(sb) - 1) /
+			      EXT3_BLOCKS_PER_GROUP(sb);
+	db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
+		   EXT3_DESC_PER_BLOCK(sb);
+	sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
+				    GFP_KERNEL);
+	if (sbi->s_group_desc == NULL) {
+		printk (KERN_ERR "EXT3-fs: not enough memory\n");
+		goto failed_mount;
+	}
+
+	percpu_counter_init(&sbi->s_freeblocks_counter);
+	percpu_counter_init(&sbi->s_freeinodes_counter);
+	percpu_counter_init(&sbi->s_dirs_counter);
+	bgl_lock_init(&sbi->s_blockgroup_lock);
+
+	for (i = 0; i < db_count; i++) {
+		block = descriptor_loc(sb, logic_sb_block, i);
+		sbi->s_group_desc[i] = sb_bread(sb, block);
+		if (!sbi->s_group_desc[i]) {
+			printk (KERN_ERR "EXT3-fs: "
+				"can't read group descriptor %d\n", i);
+			db_count = i;
+			goto failed_mount2;
+		}
+	}
+	if (!ext3_check_descriptors (sb)) {
+		printk (KERN_ERR "EXT3-fs: group descriptors corrupted !\n");
+		goto failed_mount2;
+	}
+	sbi->s_gdb_count = db_count;
+	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+	spin_lock_init(&sbi->s_next_gen_lock);
+	/* per fileystem reservation list head & lock */
+	spin_lock_init(&sbi->s_rsv_window_lock);
+	sbi->s_rsv_window_root = RB_ROOT;
+	/* Add a single, static dummy reservation to the start of the
+	 * reservation window list --- it gives us a placeholder for
+	 * append-at-start-of-list which makes the allocation logic
+	 * _much_ simpler. */
+	sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+	sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
+	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
+	sbi->s_rsv_window_head.rsv_goal_size = 0;
+	ext3_rsv_window_add(sb, &sbi->s_rsv_window_head);
+
+	/*
+	 * set up enough so that it can read an inode
+	 */
+	sb->s_op = &ext3_sops;
+	sb->s_export_op = &ext3_export_ops;
+	sb->s_xattr = ext3_xattr_handlers;
+#ifdef CONFIG_QUOTA
+	sb->s_qcop = &ext3_qctl_operations;
+	sb->dq_op = &ext3_quota_operations;
+#endif
+	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
+
+	sb->s_root = NULL;
+
+	needs_recovery = (es->s_last_orphan != 0 ||
+			  EXT3_HAS_INCOMPAT_FEATURE(sb,
+				    EXT3_FEATURE_INCOMPAT_RECOVER));
+
+	/*
+	 * The first inode we look at is the journal inode.  Don't try
+	 * root first: it may be modified in the journal!
+	 */
+	if (!test_opt(sb, NOLOAD) &&
+	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
+		if (ext3_load_journal(sb, es))
+			goto failed_mount2;
+	} else if (journal_inum) {
+		if (ext3_create_journal(sb, es, journal_inum))
+			goto failed_mount2;
+	} else {
+		if (!silent)
+			printk (KERN_ERR
+				"ext3: No journal on filesystem on %s\n",
+				sb->s_id);
+		goto failed_mount2;
+	}
+
+	/* We have now updated the journal if required, so we can
+	 * validate the data journaling mode. */
+	switch (test_opt(sb, DATA_FLAGS)) {
+	case 0:
+		/* No mode set, assume a default based on the journal
+                   capabilities: ORDERED_DATA if the journal can
+                   cope, else JOURNAL_DATA */
+		if (journal_check_available_features
+		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE))
+			set_opt(sbi->s_mount_opt, ORDERED_DATA);
+		else
+			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+		break;
+
+	case EXT3_MOUNT_ORDERED_DATA:
+	case EXT3_MOUNT_WRITEBACK_DATA:
+		if (!journal_check_available_features
+		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
+			printk(KERN_ERR "EXT3-fs: Journal does not support "
+			       "requested data journaling mode\n");
+			goto failed_mount3;
+		}
+	default:
+		break;
+	}
+
+	if (test_opt(sb, NOBH)) {
+		if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) {
+			printk(KERN_WARNING "EXT3-fs: Ignoring nobh option "
+				"since filesystem blocksize doesn't match "
+				"pagesize\n");
+			clear_opt(sbi->s_mount_opt, NOBH);
+		}
+		if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
+			printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
+				"its supported only with writeback mode\n");
+			clear_opt(sbi->s_mount_opt, NOBH);
+		}
+	}
+	/*
+	 * The journal_load will have done any necessary log recovery,
+	 * so we can safely mount the rest of the filesystem now.
+	 */
+
+	root = iget(sb, EXT3_ROOT_INO);
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		printk(KERN_ERR "EXT3-fs: get root inode failed\n");
+		iput(root);
+		goto failed_mount3;
+	}
+	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+		dput(sb->s_root);
+		sb->s_root = NULL;
+		printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
+		goto failed_mount3;
+	}
+
+	ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+	/*
+	 * akpm: core read_super() calls in here with the superblock locked.
+	 * That deadlocks, because orphan cleanup needs to lock the superblock
+	 * in numerous places.  Here we just pop the lock - it's relatively
+	 * harmless, because we are now ready to accept write_super() requests,
+	 * and aviro says that's the only reason for hanging onto the
+	 * superblock lock.
+	 */
+	EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
+	ext3_orphan_cleanup(sb, es);
+	EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
+	if (needs_recovery)
+		printk (KERN_INFO "EXT3-fs: recovery complete.\n");
+	ext3_mark_recovery_complete(sb, es);
+	printk (KERN_INFO "EXT3-fs: mounted filesystem with %s data mode.\n",
+		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
+		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
+		"writeback");
+
+	percpu_counter_mod(&sbi->s_freeblocks_counter,
+		ext3_count_free_blocks(sb));
+	percpu_counter_mod(&sbi->s_freeinodes_counter,
+		ext3_count_free_inodes(sb));
+	percpu_counter_mod(&sbi->s_dirs_counter,
+		ext3_count_dirs(sb));
+
+	lock_kernel();
+	return 0;
+
+cantfind_ext3:
+	if (!silent)
+		printk(KERN_ERR "VFS: Can't find ext3 filesystem on dev %s.\n",
+		       sb->s_id);
+	goto failed_mount;
+
+failed_mount3:
+	journal_destroy(sbi->s_journal);
+failed_mount2:
+	for (i = 0; i < db_count; i++)
+		brelse(sbi->s_group_desc[i]);
+	kfree(sbi->s_group_desc);
+failed_mount:
+#ifdef CONFIG_QUOTA
+	for (i = 0; i < MAXQUOTAS; i++)
+		kfree(sbi->s_qf_names[i]);
+#endif
+	ext3_blkdev_remove(sbi);
+	brelse(bh);
+out_fail:
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+	lock_kernel();
+	return -EINVAL;
+}
+
+/*
+ * Setup any per-fs journal parameters now.  We'll do this both on
+ * initial mount, once the journal has been initialised but before we've
+ * done any recovery; and again on any subsequent remount. 
+ */
+static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
+{
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+
+	if (sbi->s_commit_interval)
+		journal->j_commit_interval = sbi->s_commit_interval;
+	/* We could also set up an ext3-specific default for the commit
+	 * interval here, but for now we'll just fall back to the jbd
+	 * default. */
+
+	spin_lock(&journal->j_state_lock);
+	if (test_opt(sb, BARRIER))
+		journal->j_flags |= JFS_BARRIER;
+	else
+		journal->j_flags &= ~JFS_BARRIER;
+	spin_unlock(&journal->j_state_lock);
+}
+
+static journal_t *ext3_get_journal(struct super_block *sb, int journal_inum)
+{
+	struct inode *journal_inode;
+	journal_t *journal;
+
+	/* First, test for the existence of a valid inode on disk.  Bad
+	 * things happen if we iget() an unused inode, as the subsequent
+	 * iput() will try to delete it. */
+
+	journal_inode = iget(sb, journal_inum);
+	if (!journal_inode) {
+		printk(KERN_ERR "EXT3-fs: no journal found.\n");
+		return NULL;
+	}
+	if (!journal_inode->i_nlink) {
+		make_bad_inode(journal_inode);
+		iput(journal_inode);
+		printk(KERN_ERR "EXT3-fs: journal inode is deleted.\n");
+		return NULL;
+	}
+
+	jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
+		  journal_inode, journal_inode->i_size);
+	if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) {
+		printk(KERN_ERR "EXT3-fs: invalid journal inode.\n");
+		iput(journal_inode);
+		return NULL;
+	}
+
+	journal = journal_init_inode(journal_inode);
+	if (!journal) {
+		printk(KERN_ERR "EXT3-fs: Could not load journal inode\n");
+		iput(journal_inode);
+		return NULL;
+	}
+	journal->j_private = sb;
+	ext3_init_journal_params(sb, journal);
+	return journal;
+}
+
+static journal_t *ext3_get_dev_journal(struct super_block *sb,
+				       dev_t j_dev)
+{
+	struct buffer_head * bh;
+	journal_t *journal;
+	int start;
+	int len;
+	int hblock, blocksize;
+	unsigned long sb_block;
+	unsigned long offset;
+	struct ext3_super_block * es;
+	struct block_device *bdev;
+
+	bdev = ext3_blkdev_get(j_dev);
+	if (bdev == NULL)
+		return NULL;
+
+	if (bd_claim(bdev, sb)) {
+		printk(KERN_ERR
+		        "EXT3: failed to claim external journal device.\n");
+		blkdev_put(bdev);
+		return NULL;
+	}
+
+	blocksize = sb->s_blocksize;
+	hblock = bdev_hardsect_size(bdev);
+	if (blocksize < hblock) {
+		printk(KERN_ERR
+			"EXT3-fs: blocksize too small for journal device.\n");
+		goto out_bdev;
+	}
+
+	sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
+	offset = EXT3_MIN_BLOCK_SIZE % blocksize;
+	set_blocksize(bdev, blocksize);
+	if (!(bh = __bread(bdev, sb_block, blocksize))) {
+		printk(KERN_ERR "EXT3-fs: couldn't read superblock of "
+		       "external journal\n");
+		goto out_bdev;
+	}
+
+	es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+	if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
+	    !(le32_to_cpu(es->s_feature_incompat) &
+	      EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
+		printk(KERN_ERR "EXT3-fs: external journal has "
+					"bad superblock\n");
+		brelse(bh);
+		goto out_bdev;
+	}
+
+	if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
+		printk(KERN_ERR "EXT3-fs: journal UUID does not match\n");
+		brelse(bh);
+		goto out_bdev;
+	}
+
+	len = le32_to_cpu(es->s_blocks_count);
+	start = sb_block + 1;
+	brelse(bh);	/* we're done with the superblock */
+
+	journal = journal_init_dev(bdev, sb->s_bdev,
+					start, len, blocksize);
+	if (!journal) {
+		printk(KERN_ERR "EXT3-fs: failed to create device journal\n");
+		goto out_bdev;
+	}
+	journal->j_private = sb;
+	ll_rw_block(READ, 1, &journal->j_sb_buffer);
+	wait_on_buffer(journal->j_sb_buffer);
+	if (!buffer_uptodate(journal->j_sb_buffer)) {
+		printk(KERN_ERR "EXT3-fs: I/O error on journal device\n");
+		goto out_journal;
+	}
+	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
+		printk(KERN_ERR "EXT3-fs: External journal has more than one "
+					"user (unsupported) - %d\n",
+			be32_to_cpu(journal->j_superblock->s_nr_users));
+		goto out_journal;
+	}
+	EXT3_SB(sb)->journal_bdev = bdev;
+	ext3_init_journal_params(sb, journal);
+	return journal;
+out_journal:
+	journal_destroy(journal);
+out_bdev:
+	ext3_blkdev_put(bdev);
+	return NULL;
+}
+
+static int ext3_load_journal(struct super_block * sb,
+			     struct ext3_super_block * es)
+{
+	journal_t *journal;
+	int journal_inum = le32_to_cpu(es->s_journal_inum);
+	dev_t journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
+	int err = 0;
+	int really_read_only;
+
+	really_read_only = bdev_read_only(sb->s_bdev);
+
+	/*
+	 * Are we loading a blank journal or performing recovery after a
+	 * crash?  For recovery, we need to check in advance whether we
+	 * can get read-write access to the device.
+	 */
+
+	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) {
+		if (sb->s_flags & MS_RDONLY) {
+			printk(KERN_INFO "EXT3-fs: INFO: recovery "
+					"required on readonly filesystem.\n");
+			if (really_read_only) {
+				printk(KERN_ERR "EXT3-fs: write access "
+					"unavailable, cannot proceed.\n");
+				return -EROFS;
+			}
+			printk (KERN_INFO "EXT3-fs: write access will "
+					"be enabled during recovery.\n");
+		}
+	}
+
+	if (journal_inum && journal_dev) {
+		printk(KERN_ERR "EXT3-fs: filesystem has both journal "
+		       "and inode journals!\n");
+		return -EINVAL;
+	}
+
+	if (journal_inum) {
+		if (!(journal = ext3_get_journal(sb, journal_inum)))
+			return -EINVAL;
+	} else {
+		if (!(journal = ext3_get_dev_journal(sb, journal_dev)))
+			return -EINVAL;
+	}
+
+	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
+		err = journal_update_format(journal);
+		if (err)  {
+			printk(KERN_ERR "EXT3-fs: error updating journal.\n");
+			journal_destroy(journal);
+			return err;
+		}
+	}
+
+	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER))
+		err = journal_wipe(journal, !really_read_only);
+	if (!err)
+		err = journal_load(journal);
+
+	if (err) {
+		printk(KERN_ERR "EXT3-fs: error loading journal.\n");
+		journal_destroy(journal);
+		return err;
+	}
+
+	EXT3_SB(sb)->s_journal = journal;
+	ext3_clear_journal_err(sb, es);
+	return 0;
+}
+
+static int ext3_create_journal(struct super_block * sb,
+			       struct ext3_super_block * es,
+			       int journal_inum)
+{
+	journal_t *journal;
+
+	if (sb->s_flags & MS_RDONLY) {
+		printk(KERN_ERR "EXT3-fs: readonly filesystem when trying to "
+				"create journal.\n");
+		return -EROFS;
+	}
+
+	if (!(journal = ext3_get_journal(sb, journal_inum)))
+		return -EINVAL;
+
+	printk(KERN_INFO "EXT3-fs: creating new journal on inode %d\n",
+	       journal_inum);
+
+	if (journal_create(journal)) {
+		printk(KERN_ERR "EXT3-fs: error creating journal.\n");
+		journal_destroy(journal);
+		return -EIO;
+	}
+
+	EXT3_SB(sb)->s_journal = journal;
+
+	ext3_update_dynamic_rev(sb);
+	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+	EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL);
+
+	es->s_journal_inum = cpu_to_le32(journal_inum);
+	sb->s_dirt = 1;
+
+	/* Make sure we flush the recovery flag to disk. */
+	ext3_commit_super(sb, es, 1);
+
+	return 0;
+}
+
+static void ext3_commit_super (struct super_block * sb,
+			       struct ext3_super_block * es,
+			       int sync)
+{
+	struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
+
+	if (!sbh)
+		return;
+	es->s_wtime = cpu_to_le32(get_seconds());
+	es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
+	es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
+	BUFFER_TRACE(sbh, "marking dirty");
+	mark_buffer_dirty(sbh);
+	if (sync)
+		sync_dirty_buffer(sbh);
+}
+
+
+/*
+ * Have we just finished recovery?  If so, and if we are mounting (or
+ * remounting) the filesystem readonly, then we will end up with a
+ * consistent fs on disk.  Record that fact.
+ */
+static void ext3_mark_recovery_complete(struct super_block * sb,
+					struct ext3_super_block * es)
+{
+	journal_t *journal = EXT3_SB(sb)->s_journal;
+
+	journal_lock_updates(journal);
+	journal_flush(journal);
+	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
+	    sb->s_flags & MS_RDONLY) {
+		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+		sb->s_dirt = 0;
+		ext3_commit_super(sb, es, 1);
+	}
+	journal_unlock_updates(journal);
+}
+
+/*
+ * If we are mounting (or read-write remounting) a filesystem whose journal
+ * has recorded an error from a previous lifetime, move that error to the
+ * main filesystem now.
+ */
+static void ext3_clear_journal_err(struct super_block * sb,
+				   struct ext3_super_block * es)
+{
+	journal_t *journal;
+	int j_errno;
+	const char *errstr;
+
+	journal = EXT3_SB(sb)->s_journal;
+
+	/*
+	 * Now check for any error status which may have been recorded in the
+	 * journal by a prior ext3_error() or ext3_abort()
+	 */
+
+	j_errno = journal_errno(journal);
+	if (j_errno) {
+		char nbuf[16];
+
+		errstr = ext3_decode_error(sb, j_errno, nbuf);
+		ext3_warning(sb, __FUNCTION__, "Filesystem error recorded "
+			     "from previous mount: %s", errstr);
+		ext3_warning(sb, __FUNCTION__, "Marking fs in need of "
+			     "filesystem check.");
+
+		EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
+		es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
+		ext3_commit_super (sb, es, 1);
+
+		journal_clear_err(journal);
+	}
+}
+
+/*
+ * Force the running and committing transactions to commit,
+ * and wait on the commit.
+ */
+int ext3_force_commit(struct super_block *sb)
+{
+	journal_t *journal;
+	int ret;
+
+	if (sb->s_flags & MS_RDONLY)
+		return 0;
+
+	journal = EXT3_SB(sb)->s_journal;
+	sb->s_dirt = 0;
+	ret = ext3_journal_force_commit(journal);
+	return ret;
+}
+
+/*
+ * Ext3 always journals updates to the superblock itself, so we don't
+ * have to propagate any other updates to the superblock on disk at this
+ * point.  Just start an async writeback to get the buffers on their way
+ * to the disk.
+ *
+ * This implicitly triggers the writebehind on sync().
+ */
+
+static void ext3_write_super (struct super_block * sb)
+{
+	if (down_trylock(&sb->s_lock) == 0)
+		BUG();
+	sb->s_dirt = 0;
+}
+
+static int ext3_sync_fs(struct super_block *sb, int wait)
+{
+	tid_t target;
+
+	sb->s_dirt = 0;
+	if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
+		if (wait)
+			log_wait_commit(EXT3_SB(sb)->s_journal, target);
+	}
+	return 0;
+}
+
+/*
+ * LVM calls this function before a (read-only) snapshot is created.  This
+ * gives us a chance to flush the journal completely and mark the fs clean.
+ */
+static void ext3_write_super_lockfs(struct super_block *sb)
+{
+	sb->s_dirt = 0;
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		journal_t *journal = EXT3_SB(sb)->s_journal;
+
+		/* Now we set up the journal barrier. */
+		journal_lock_updates(journal);
+		journal_flush(journal);
+
+		/* Journal blocked and flushed, clear needs_recovery flag. */
+		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
+	}
+}
+
+/*
+ * Called by LVM after the snapshot is done.  We need to reset the RECOVER
+ * flag here, even though the filesystem is not technically dirty yet.
+ */
+static void ext3_unlockfs(struct super_block *sb)
+{
+	if (!(sb->s_flags & MS_RDONLY)) {
+		lock_super(sb);
+		/* Reser the needs_recovery flag before the fs is unlocked. */
+		EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
+		unlock_super(sb);
+		journal_unlock_updates(EXT3_SB(sb)->s_journal);
+	}
+}
+
+static int ext3_remount (struct super_block * sb, int * flags, char * data)
+{
+	struct ext3_super_block * es;
+	struct ext3_sb_info *sbi = EXT3_SB(sb);
+	unsigned long tmp;
+	unsigned long n_blocks_count = 0;
+
+	/*
+	 * Allow the "check" option to be passed as a remount option.
+	 */
+	if (!parse_options(data, sb, &tmp, &n_blocks_count, 1))
+		return -EINVAL;
+
+	if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+		ext3_abort(sb, __FUNCTION__, "Abort forced by user");
+
+	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+
+	es = sbi->s_es;
+
+	ext3_init_journal_params(sb, sbi->s_journal);
+
+	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
+		n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
+		if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+			return -EROFS;
+
+		if (*flags & MS_RDONLY) {
+			/*
+			 * First of all, the unconditional stuff we have to do
+			 * to disable replay of the journal when we next remount
+			 */
+			sb->s_flags |= MS_RDONLY;
+
+			/*
+			 * OK, test if we are remounting a valid rw partition
+			 * readonly, and if so set the rdonly flag and then
+			 * mark the partition as valid again.
+			 */
+			if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) &&
+			    (sbi->s_mount_state & EXT3_VALID_FS))
+				es->s_state = cpu_to_le16(sbi->s_mount_state);
+
+			ext3_mark_recovery_complete(sb, es);
+		} else {
+			__le32 ret;
+			if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
+					~EXT3_FEATURE_RO_COMPAT_SUPP))) {
+				printk(KERN_WARNING "EXT3-fs: %s: couldn't "
+				       "remount RDWR because of unsupported "
+				       "optional features (%x).\n",
+				       sb->s_id, le32_to_cpu(ret));
+				return -EROFS;
+			}
+			/*
+			 * Mounting a RDONLY partition read-write, so reread
+			 * and store the current valid flag.  (It may have
+			 * been changed by e2fsck since we originally mounted
+			 * the partition.)
+			 */
+			ext3_clear_journal_err(sb, es);
+			sbi->s_mount_state = le16_to_cpu(es->s_state);
+			if ((ret = ext3_group_extend(sb, es, n_blocks_count)))
+				return ret;
+			if (!ext3_setup_super (sb, es, 0))
+				sb->s_flags &= ~MS_RDONLY;
+		}
+	}
+	return 0;
+}
+
+static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+	unsigned long overhead;
+	int i;
+
+	if (test_opt (sb, MINIX_DF))
+		overhead = 0;
+	else {
+		unsigned long ngroups;
+		ngroups = EXT3_SB(sb)->s_groups_count;
+		smp_rmb();
+
+		/*
+		 * Compute the overhead (FS structures)
+		 */
+
+		/*
+		 * All of the blocks before first_data_block are
+		 * overhead
+		 */
+		overhead = le32_to_cpu(es->s_first_data_block);
+
+		/*
+		 * Add the overhead attributed to the superblock and
+		 * block group descriptors.  If the sparse superblocks
+		 * feature is turned on, then not all groups have this.
+		 */
+		for (i = 0; i < ngroups; i++) {
+			overhead += ext3_bg_has_super(sb, i) +
+				ext3_bg_num_gdb(sb, i);
+			cond_resched();
+		}
+
+		/*
+		 * Every block group has an inode bitmap, a block
+		 * bitmap, and an inode table.
+		 */
+		overhead += (ngroups * (2 + EXT3_SB(sb)->s_itb_per_group));
+	}
+
+	buf->f_type = EXT3_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
+	buf->f_bfree = ext3_count_free_blocks (sb);
+	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
+	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
+		buf->f_bavail = 0;
+	buf->f_files = le32_to_cpu(es->s_inodes_count);
+	buf->f_ffree = ext3_count_free_inodes (sb);
+	buf->f_namelen = EXT3_NAME_LEN;
+	return 0;
+}
+
+/* Helper function for writing quotas on sync - we need to start transaction before quota file
+ * is locked for write. Otherwise the are possible deadlocks:
+ * Process 1                         Process 2
+ * ext3_create()                     quota_sync()
+ *   journal_start()                   write_dquot()
+ *   DQUOT_INIT()                        down(dqio_sem)
+ *     down(dqio_sem)                    journal_start()
+ *
+ */
+
+#ifdef CONFIG_QUOTA
+
+static inline struct inode *dquot_to_inode(struct dquot *dquot)
+{
+	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+}
+
+static int ext3_dquot_initialize(struct inode *inode, int type)
+{
+	handle_t *handle;
+	int ret, err;
+
+	/* We may create quota structure so we need to reserve enough blocks */
+	handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_initialize(inode, type);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_dquot_drop(struct inode *inode)
+{
+	handle_t *handle;
+	int ret, err;
+
+	/* We may delete quota structure so we need to reserve enough blocks */
+	handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_drop(inode);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_write_dquot(struct dquot *dquot)
+{
+	int ret, err;
+	handle_t *handle;
+	struct inode *inode;
+
+	inode = dquot_to_inode(dquot);
+	handle = ext3_journal_start(inode,
+					EXT3_QUOTA_TRANS_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_commit(dquot);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_acquire_dquot(struct dquot *dquot)
+{
+	int ret, err;
+	handle_t *handle;
+
+	handle = ext3_journal_start(dquot_to_inode(dquot),
+					EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_acquire(dquot);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_release_dquot(struct dquot *dquot)
+{
+	int ret, err;
+	handle_t *handle;
+
+	handle = ext3_journal_start(dquot_to_inode(dquot),
+					EXT3_QUOTA_INIT_BLOCKS);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_release(dquot);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+static int ext3_mark_dquot_dirty(struct dquot *dquot)
+{
+	/* Are we journalling quotas? */
+	if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
+	    EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+		dquot_mark_dquot_dirty(dquot);
+		return ext3_write_dquot(dquot);
+	} else {
+		return dquot_mark_dquot_dirty(dquot);
+	}
+}
+
+static int ext3_write_info(struct super_block *sb, int type)
+{
+	int ret, err;
+	handle_t *handle;
+
+	/* Data block + inode block */
+	handle = ext3_journal_start(sb->s_root->d_inode, 2);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	ret = dquot_commit_info(sb, type);
+	err = ext3_journal_stop(handle);
+	if (!ret)
+		ret = err;
+	return ret;
+}
+
+/*
+ * Turn on quotas during mount time - we need to find
+ * the quota file and such...
+ */
+static int ext3_quota_on_mount(struct super_block *sb, int type)
+{
+	int err;
+	struct dentry *dentry;
+	struct qstr name = { .name = EXT3_SB(sb)->s_qf_names[type],
+			     .hash = 0,
+			     .len = strlen(EXT3_SB(sb)->s_qf_names[type])};
+
+	dentry = lookup_hash(&name, sb->s_root);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+	err = vfs_quota_on_mount(type, EXT3_SB(sb)->s_jquota_fmt, dentry);
+	/* Now invalidate and put the dentry - quota got its own reference
+	 * to inode and dentry has at least wrong hash so we had better
+	 * throw it away */
+	d_invalidate(dentry);
+	dput(dentry);
+	return err;
+}
+
+/*
+ * Standard function to be called on quota_on
+ */
+static int ext3_quota_on(struct super_block *sb, int type, int format_id,
+			 char *path)
+{
+	int err;
+	struct nameidata nd;
+
+	/* Not journalling quota? */
+	if (!EXT3_SB(sb)->s_qf_names[USRQUOTA] &&
+	    !EXT3_SB(sb)->s_qf_names[GRPQUOTA])
+		return vfs_quota_on(sb, type, format_id, path);
+	err = path_lookup(path, LOOKUP_FOLLOW, &nd);
+	if (err)
+		return err;
+	/* Quotafile not on the same filesystem? */
+	if (nd.mnt->mnt_sb != sb) {
+		path_release(&nd);
+		return -EXDEV;
+	}
+	/* Quotafile not of fs root? */
+	if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+		printk(KERN_WARNING
+			"EXT3-fs: Quota file not on filesystem root. "
+			"Journalled quota will not work.\n");
+	path_release(&nd);
+	return vfs_quota_on(sb, type, format_id, path);
+}
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
+			       size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t toread;
+	struct buffer_head *bh;
+	loff_t i_size = i_size_read(inode);
+
+	if (off > i_size)
+		return 0;
+	if (off+len > i_size)
+		len = i_size-off;
+	toread = len;
+	while (toread > 0) {
+		tocopy = sb->s_blocksize - offset < toread ?
+				sb->s_blocksize - offset : toread;
+		bh = ext3_bread(NULL, inode, blk, 0, &err);
+		if (err)
+			return err;
+		if (!bh)	/* A hole? */
+			memset(data, 0, tocopy);
+		else
+			memcpy(data, bh->b_data+offset, tocopy);
+		brelse(bh);
+		offset = 0;
+		toread -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+	return len;
+}
+
+/* Write to quotafile (we know the transaction is already started and has
+ * enough credits) */
+static ssize_t ext3_quota_write(struct super_block *sb, int type,
+				const char *data, size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
+	size_t towrite = len;
+	struct buffer_head *bh;
+	handle_t *handle = journal_current_handle();
+
+	down(&inode->i_sem);
+	while (towrite > 0) {
+		tocopy = sb->s_blocksize - offset < towrite ?
+				sb->s_blocksize - offset : towrite;
+		bh = ext3_bread(handle, inode, blk, 1, &err);
+		if (!bh)
+			goto out;
+		if (journal_quota) {
+			err = ext3_journal_get_write_access(handle, bh);
+			if (err) {
+				brelse(bh);
+				goto out;
+			}
+		}
+		lock_buffer(bh);
+		memcpy(bh->b_data+offset, data, tocopy);
+		flush_dcache_page(bh->b_page);
+		unlock_buffer(bh);
+		if (journal_quota)
+			err = ext3_journal_dirty_metadata(handle, bh);
+		else {
+			/* Always do at least ordered writes for quotas */
+			err = ext3_journal_dirty_data(handle, bh);
+			mark_buffer_dirty(bh);
+		}
+		brelse(bh);
+		if (err)
+			goto out;
+		offset = 0;
+		towrite -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+out:
+	if (len == towrite)
+		return err;
+	if (inode->i_size < off+len-towrite) {
+		i_size_write(inode, off+len-towrite);
+		EXT3_I(inode)->i_disksize = inode->i_size;
+	}
+	inode->i_version++;
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	ext3_mark_inode_dirty(handle, inode);
+	up(&inode->i_sem);
+	return len - towrite;
+}
+
+#endif
+
+static struct super_block *ext3_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
+}
+
+static struct file_system_type ext3_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ext3",
+	.get_sb		= ext3_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_ext3_fs(void)
+{
+	int err = init_ext3_xattr();
+	if (err)
+		return err;
+	err = init_inodecache();
+	if (err)
+		goto out1;
+        err = register_filesystem(&ext3_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+ 	exit_ext3_xattr();
+	return err;
+}
+
+static void __exit exit_ext3_fs(void)
+{
+	unregister_filesystem(&ext3_fs_type);
+	destroy_inodecache();
+	exit_ext3_xattr();
+}
+
+MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
+MODULE_LICENSE("GPL");
+module_init(init_ext3_fs)
+module_exit(exit_ext3_fs)
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
new file mode 100644
index 0000000..8c3e728
--- /dev/null
+++ b/fs/ext3/symlink.c
@@ -0,0 +1,54 @@
+/*
+ *  linux/fs/ext3/symlink.c
+ *
+ * Only fast symlinks left here - the rest is done by generic code. AV, 1999
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/symlink.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext3 symlink handling code
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/namei.h>
+#include "xattr.h"
+
+static int ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
+	nd_set_link(nd, (char*)ei->i_data);
+	return 0;
+}
+
+struct inode_operations ext3_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+#ifdef CONFIG_EXT3_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext3_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+};
+
+struct inode_operations ext3_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= ext3_follow_link,
+#ifdef CONFIG_EXT3_FS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= ext3_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
+};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
new file mode 100644
index 0000000..4cbc6d0
--- /dev/null
+++ b/fs/ext3/xattr.c
@@ -0,0 +1,1320 @@
+/*
+ * linux/fs/ext3/xattr.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ *
+ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
+ * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
+ * Extended attributes for symlinks and special files added per
+ *  suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ *  Red Hat Inc.
+ * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
+ *  and Andreas Gruenbacher <agruen@suse.de>.
+ */
+
+/*
+ * Extended attributes are stored directly in inodes (on file systems with
+ * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
+ * field contains the block number if an inode uses an additional block. All
+ * attributes must fit in the inode and one additional block. Blocks that
+ * contain the identical set of attributes may be shared among several inodes.
+ * Identical blocks are detected by keeping a cache of blocks that have
+ * recently been accessed.
+ *
+ * The attributes in inodes and on blocks have a different header; the entries
+ * are stored in the same format:
+ *
+ *   +------------------+
+ *   | header           |
+ *   | entry 1          | |
+ *   | entry 2          | | growing downwards
+ *   | entry 3          | v
+ *   | four null bytes  |
+ *   | . . .            |
+ *   | value 1          | ^
+ *   | value 3          | | growing upwards
+ *   | value 2          | |
+ *   +------------------+
+ *
+ * The header is followed by multiple entry descriptors. In disk blocks, the
+ * entry descriptors are kept sorted. In inodes, they are unsorted. The
+ * attribute values are aligned to the end of the block in no specific order.
+ *
+ * Locking strategy
+ * ----------------
+ * EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem.
+ * EA blocks are only changed if they are exclusive to an inode, so
+ * holding xattr_sem also means that nothing but the EA block's reference
+ * count can change. Multiple writers to the same block are synchronized
+ * by the buffer lock.
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
+#include <linux/mbcache.h>
+#include <linux/quotaops.h>
+#include <linux/rwsem.h>
+#include "xattr.h"
+#include "acl.h"
+
+#define BHDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
+#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
+#define BFIRST(bh) ENTRY(BHDR(bh)+1)
+#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#define IHDR(inode, raw_inode) \
+	((struct ext3_xattr_ibody_header *) \
+		((void *)raw_inode + \
+		 EXT3_GOOD_OLD_INODE_SIZE + \
+		 EXT3_I(inode)->i_extra_isize))
+#define IFIRST(hdr) ((struct ext3_xattr_entry *)((hdr)+1))
+
+#ifdef EXT3_XATTR_DEBUG
+# define ea_idebug(inode, f...) do { \
+		printk(KERN_DEBUG "inode %s:%ld: ", \
+			inode->i_sb->s_id, inode->i_ino); \
+		printk(f); \
+		printk("\n"); \
+	} while (0)
+# define ea_bdebug(bh, f...) do { \
+		char b[BDEVNAME_SIZE]; \
+		printk(KERN_DEBUG "block %s:%lu: ", \
+			bdevname(bh->b_bdev, b), \
+			(unsigned long) bh->b_blocknr); \
+		printk(f); \
+		printk("\n"); \
+	} while (0)
+#else
+# define ea_idebug(f...)
+# define ea_bdebug(f...)
+#endif
+
+static void ext3_xattr_cache_insert(struct buffer_head *);
+static struct buffer_head *ext3_xattr_cache_find(struct inode *,
+						 struct ext3_xattr_header *,
+						 struct mb_cache_entry **);
+static void ext3_xattr_rehash(struct ext3_xattr_header *,
+			      struct ext3_xattr_entry *);
+
+static struct mb_cache *ext3_xattr_cache;
+
+static struct xattr_handler *ext3_xattr_handler_map[] = {
+	[EXT3_XATTR_INDEX_USER]		     = &ext3_xattr_user_handler,
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+	[EXT3_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext3_xattr_acl_access_handler,
+	[EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext3_xattr_acl_default_handler,
+#endif
+	[EXT3_XATTR_INDEX_TRUSTED]	     = &ext3_xattr_trusted_handler,
+#ifdef CONFIG_EXT3_FS_SECURITY
+	[EXT3_XATTR_INDEX_SECURITY]	     = &ext3_xattr_security_handler,
+#endif
+};
+
+struct xattr_handler *ext3_xattr_handlers[] = {
+	&ext3_xattr_user_handler,
+	&ext3_xattr_trusted_handler,
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+	&ext3_xattr_acl_access_handler,
+	&ext3_xattr_acl_default_handler,
+#endif
+#ifdef CONFIG_EXT3_FS_SECURITY
+	&ext3_xattr_security_handler,
+#endif
+	NULL
+};
+
+static inline struct xattr_handler *
+ext3_xattr_handler(int name_index)
+{
+	struct xattr_handler *handler = NULL;
+
+	if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map))
+		handler = ext3_xattr_handler_map[name_index];
+	return handler;
+}
+
+/*
+ * Inode operation listxattr()
+ *
+ * dentry->d_inode->i_sem: don't care
+ */
+ssize_t
+ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+	return ext3_xattr_list(dentry->d_inode, buffer, size);
+}
+
+static int
+ext3_xattr_check_names(struct ext3_xattr_entry *entry, void *end)
+{
+	while (!IS_LAST_ENTRY(entry)) {
+		struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry);
+		if ((void *)next >= end)
+			return -EIO;
+		entry = next;
+	}
+	return 0;
+}
+
+static inline int
+ext3_xattr_check_block(struct buffer_head *bh)
+{
+	int error;
+
+	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
+	    BHDR(bh)->h_blocks != cpu_to_le32(1))
+		return -EIO;
+	error = ext3_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+	return error;
+}
+
+static inline int
+ext3_xattr_check_entry(struct ext3_xattr_entry *entry, size_t size)
+{
+	size_t value_size = le32_to_cpu(entry->e_value_size);
+
+	if (entry->e_value_block != 0 || value_size > size ||
+	    le16_to_cpu(entry->e_value_offs) + value_size > size)
+		return -EIO;
+	return 0;
+}
+
+static int
+ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index,
+		      const char *name, size_t size, int sorted)
+{
+	struct ext3_xattr_entry *entry;
+	size_t name_len;
+	int cmp = 1;
+
+	if (name == NULL)
+		return -EINVAL;
+	name_len = strlen(name);
+	entry = *pentry;
+	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
+		cmp = name_index - entry->e_name_index;
+		if (!cmp)
+			cmp = name_len - entry->e_name_len;
+		if (!cmp)
+			cmp = memcmp(name, entry->e_name, name_len);
+		if (cmp <= 0 && (sorted || cmp == 0))
+			break;
+	}
+	*pentry = entry;
+	if (!cmp && ext3_xattr_check_entry(entry, size))
+			return -EIO;
+	return cmp ? -ENODATA : 0;
+}
+
+int
+ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
+		     void *buffer, size_t buffer_size)
+{
+	struct buffer_head *bh = NULL;
+	struct ext3_xattr_entry *entry;
+	size_t size;
+	int error;
+
+	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
+		  name_index, name, buffer, (long)buffer_size);
+
+	error = -ENODATA;
+	if (!EXT3_I(inode)->i_file_acl)
+		goto cleanup;
+	ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl);
+	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
+	if (!bh)
+		goto cleanup;
+	ea_bdebug(bh, "b_count=%d, refcount=%d",
+		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+	if (ext3_xattr_check_block(bh)) {
+bad_block:	ext3_error(inode->i_sb, __FUNCTION__,
+			   "inode %ld: bad block %d", inode->i_ino,
+			   EXT3_I(inode)->i_file_acl);
+		error = -EIO;
+		goto cleanup;
+	}
+	ext3_xattr_cache_insert(bh);
+	entry = BFIRST(bh);
+	error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
+	if (error == -EIO)
+		goto bad_block;
+	if (error)
+		goto cleanup;
+	size = le32_to_cpu(entry->e_value_size);
+	if (buffer) {
+		error = -ERANGE;
+		if (size > buffer_size)
+			goto cleanup;
+		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
+		       size);
+	}
+	error = size;
+
+cleanup:
+	brelse(bh);
+	return error;
+}
+
+static int
+ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+		     void *buffer, size_t buffer_size)
+{
+	struct ext3_xattr_ibody_header *header;
+	struct ext3_xattr_entry *entry;
+	struct ext3_inode *raw_inode;
+	struct ext3_iloc iloc;
+	size_t size;
+	void *end;
+	int error;
+
+	if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
+		return -ENODATA;
+	error = ext3_get_inode_loc(inode, &iloc);
+	if (error)
+		return error;
+	raw_inode = ext3_raw_inode(&iloc);
+	header = IHDR(inode, raw_inode);
+	entry = IFIRST(header);
+	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
+	error = ext3_xattr_check_names(entry, end);
+	if (error)
+		goto cleanup;
+	error = ext3_xattr_find_entry(&entry, name_index, name,
+				      end - (void *)entry, 0);
+	if (error)
+		goto cleanup;
+	size = le32_to_cpu(entry->e_value_size);
+	if (buffer) {
+		error = -ERANGE;
+		if (size > buffer_size)
+			goto cleanup;
+		memcpy(buffer, (void *)IFIRST(header) +
+		       le16_to_cpu(entry->e_value_offs), size);
+	}
+	error = size;
+
+cleanup:
+	brelse(iloc.bh);
+	return error;
+}
+
+/*
+ * ext3_xattr_get()
+ *
+ * Copy an extended attribute into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+int
+ext3_xattr_get(struct inode *inode, int name_index, const char *name,
+	       void *buffer, size_t buffer_size)
+{
+	int error;
+
+	down_read(&EXT3_I(inode)->xattr_sem);
+	error = ext3_xattr_ibody_get(inode, name_index, name, buffer,
+				     buffer_size);
+	if (error == -ENODATA)
+		error = ext3_xattr_block_get(inode, name_index, name, buffer,
+					     buffer_size);
+	up_read(&EXT3_I(inode)->xattr_sem);
+	return error;
+}
+
+static int
+ext3_xattr_list_entries(struct inode *inode, struct ext3_xattr_entry *entry,
+			char *buffer, size_t buffer_size)
+{
+	size_t rest = buffer_size;
+
+	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
+		struct xattr_handler *handler =
+			ext3_xattr_handler(entry->e_name_index);
+
+		if (handler) {
+			size_t size = handler->list(inode, buffer, rest,
+						    entry->e_name,
+						    entry->e_name_len);
+			if (buffer) {
+				if (size > rest)
+					return -ERANGE;
+				buffer += size;
+			}
+			rest -= size;
+		}
+	}
+	return buffer_size - rest;
+}
+
+int
+ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	struct buffer_head *bh = NULL;
+	int error;
+
+	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+		  buffer, (long)buffer_size);
+
+	error = 0;
+	if (!EXT3_I(inode)->i_file_acl)
+		goto cleanup;
+	ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl);
+	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
+	error = -EIO;
+	if (!bh)
+		goto cleanup;
+	ea_bdebug(bh, "b_count=%d, refcount=%d",
+		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+	if (ext3_xattr_check_block(bh)) {
+		ext3_error(inode->i_sb, __FUNCTION__,
+			   "inode %ld: bad block %d", inode->i_ino,
+			   EXT3_I(inode)->i_file_acl);
+		error = -EIO;
+		goto cleanup;
+	}
+	ext3_xattr_cache_insert(bh);
+	error = ext3_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
+
+cleanup:
+	brelse(bh);
+
+	return error;
+}
+
+static int
+ext3_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	struct ext3_xattr_ibody_header *header;
+	struct ext3_inode *raw_inode;
+	struct ext3_iloc iloc;
+	void *end;
+	int error;
+
+	if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
+		return 0;
+	error = ext3_get_inode_loc(inode, &iloc);
+	if (error)
+		return error;
+	raw_inode = ext3_raw_inode(&iloc);
+	header = IHDR(inode, raw_inode);
+	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
+	error = ext3_xattr_check_names(IFIRST(header), end);
+	if (error)
+		goto cleanup;
+	error = ext3_xattr_list_entries(inode, IFIRST(header),
+					buffer, buffer_size);
+
+cleanup:
+	brelse(iloc.bh);
+	return error;
+}
+
+/*
+ * ext3_xattr_list()
+ *
+ * Copy a list of attribute names into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+int
+ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+	int i_error, b_error;
+
+	down_read(&EXT3_I(inode)->xattr_sem);
+	i_error = ext3_xattr_ibody_list(inode, buffer, buffer_size);
+	if (i_error < 0) {
+		b_error = 0;
+	} else {
+		if (buffer) {
+			buffer += i_error;
+			buffer_size -= i_error;
+		}
+		b_error = ext3_xattr_block_list(inode, buffer, buffer_size);
+		if (b_error < 0)
+			i_error = 0;
+	}
+	up_read(&EXT3_I(inode)->xattr_sem);
+	return i_error + b_error;
+}
+
+/*
+ * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
+ * not set, set it.
+ */
+static void ext3_xattr_update_super_block(handle_t *handle,
+					  struct super_block *sb)
+{
+	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
+		return;
+
+	lock_super(sb);
+	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
+		EXT3_SB(sb)->s_es->s_feature_compat |=
+			cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
+		sb->s_dirt = 1;
+		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	}
+	unlock_super(sb);
+}
+
+/*
+ * Release the xattr block BH: If the reference count is > 1, decrement
+ * it; otherwise free the block.
+ */
+static void
+ext3_xattr_release_block(handle_t *handle, struct inode *inode,
+			 struct buffer_head *bh)
+{
+	struct mb_cache_entry *ce = NULL;
+
+	ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
+	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
+		ea_bdebug(bh, "refcount now=0; freeing");
+		if (ce)
+			mb_cache_entry_free(ce);
+		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
+		get_bh(bh);
+		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
+	} else {
+		if (ext3_journal_get_write_access(handle, bh) == 0) {
+			lock_buffer(bh);
+			BHDR(bh)->h_refcount = cpu_to_le32(
+				le32_to_cpu(BHDR(bh)->h_refcount) - 1);
+			ext3_journal_dirty_metadata(handle, bh);
+			if (IS_SYNC(inode))
+				handle->h_sync = 1;
+			DQUOT_FREE_BLOCK(inode, 1);
+			unlock_buffer(bh);
+			ea_bdebug(bh, "refcount now=%d; releasing",
+				  le32_to_cpu(BHDR(bh)->h_refcount));
+		}
+		if (ce)
+			mb_cache_entry_release(ce);
+	}
+}
+
+struct ext3_xattr_info {
+	int name_index;
+	const char *name;
+	const void *value;
+	size_t value_len;
+};
+
+struct ext3_xattr_search {
+	struct ext3_xattr_entry *first;
+	void *base;
+	void *end;
+	struct ext3_xattr_entry *here;
+	int not_found;
+};
+
+static int
+ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s)
+{
+	struct ext3_xattr_entry *last;
+	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
+
+	/* Compute min_offs and last. */
+	last = s->first;
+	for (; !IS_LAST_ENTRY(last); last = EXT3_XATTR_NEXT(last)) {
+		if (!last->e_value_block && last->e_value_size) {
+			size_t offs = le16_to_cpu(last->e_value_offs);
+			if (offs < min_offs)
+				min_offs = offs;
+		}
+	}
+	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
+	if (!s->not_found) {
+		if (!s->here->e_value_block && s->here->e_value_size) {
+			size_t size = le32_to_cpu(s->here->e_value_size);
+			free += EXT3_XATTR_SIZE(size);
+		}
+		free += EXT3_XATTR_LEN(name_len);
+	}
+	if (i->value) {
+		if (free < EXT3_XATTR_SIZE(i->value_len) ||
+		    free < EXT3_XATTR_LEN(name_len) +
+			   EXT3_XATTR_SIZE(i->value_len))
+			return -ENOSPC;
+	}
+
+	if (i->value && s->not_found) {
+		/* Insert the new name. */
+		size_t size = EXT3_XATTR_LEN(name_len);
+		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
+		memmove((void *)s->here + size, s->here, rest);
+		memset(s->here, 0, size);
+		s->here->e_name_index = i->name_index;
+		s->here->e_name_len = name_len;
+		memcpy(s->here->e_name, i->name, name_len);
+	} else {
+		if (!s->here->e_value_block && s->here->e_value_size) {
+			void *first_val = s->base + min_offs;
+			size_t offs = le16_to_cpu(s->here->e_value_offs);
+			void *val = s->base + offs;
+			size_t size = EXT3_XATTR_SIZE(
+				le32_to_cpu(s->here->e_value_size));
+
+			if (i->value && size == EXT3_XATTR_SIZE(i->value_len)) {
+				/* The old and the new value have the same
+				   size. Just replace. */
+				s->here->e_value_size =
+					cpu_to_le32(i->value_len);
+				memset(val + size - EXT3_XATTR_PAD, 0,
+				       EXT3_XATTR_PAD); /* Clear pad bytes. */
+				memcpy(val, i->value, i->value_len);
+				return 0;
+			}
+
+			/* Remove the old value. */
+			memmove(first_val + size, first_val, val - first_val);
+			memset(first_val, 0, size);
+			s->here->e_value_size = 0;
+			s->here->e_value_offs = 0;
+			min_offs += size;
+
+			/* Adjust all value offsets. */
+			last = s->first;
+			while (!IS_LAST_ENTRY(last)) {
+				size_t o = le16_to_cpu(last->e_value_offs);
+				if (!last->e_value_block &&
+				    last->e_value_size && o < offs)
+					last->e_value_offs =
+						cpu_to_le16(o + size);
+				last = EXT3_XATTR_NEXT(last);
+			}
+		}
+		if (!i->value) {
+			/* Remove the old name. */
+			size_t size = EXT3_XATTR_LEN(name_len);
+			last = ENTRY((void *)last - size);
+			memmove(s->here, (void *)s->here + size,
+				(void *)last - (void *)s->here + sizeof(__u32));
+			memset(last, 0, size);
+		}
+	}
+
+	if (i->value) {
+		/* Insert the new value. */
+		s->here->e_value_size = cpu_to_le32(i->value_len);
+		if (i->value_len) {
+			size_t size = EXT3_XATTR_SIZE(i->value_len);
+			void *val = s->base + min_offs - size;
+			s->here->e_value_offs = cpu_to_le16(min_offs - size);
+			memset(val + size - EXT3_XATTR_PAD, 0,
+			       EXT3_XATTR_PAD); /* Clear the pad bytes. */
+			memcpy(val, i->value, i->value_len);
+		}
+	}
+	return 0;
+}
+
+struct ext3_xattr_block_find {
+	struct ext3_xattr_search s;
+	struct buffer_head *bh;
+};
+
+int
+ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
+		      struct ext3_xattr_block_find *bs)
+{
+	struct super_block *sb = inode->i_sb;
+	int error;
+
+	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
+		  i->name_index, i->name, i->value, (long)i->value_len);
+
+	if (EXT3_I(inode)->i_file_acl) {
+		/* The inode already has an extended attribute block. */
+		bs->bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
+		error = -EIO;
+		if (!bs->bh)
+			goto cleanup;
+		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
+			atomic_read(&(bs->bh->b_count)),
+			le32_to_cpu(BHDR(bs->bh)->h_refcount));
+		if (ext3_xattr_check_block(bs->bh)) {
+			ext3_error(sb, __FUNCTION__,
+				"inode %ld: bad block %d", inode->i_ino,
+				EXT3_I(inode)->i_file_acl);
+			error = -EIO;
+			goto cleanup;
+		}
+		/* Find the named attribute. */
+		bs->s.base = BHDR(bs->bh);
+		bs->s.first = BFIRST(bs->bh);
+		bs->s.end = bs->bh->b_data + bs->bh->b_size;
+		bs->s.here = bs->s.first;
+		error = ext3_xattr_find_entry(&bs->s.here, i->name_index,
+					      i->name, bs->bh->b_size, 1);
+		if (error && error != -ENODATA)
+			goto cleanup;
+		bs->s.not_found = error;
+	}
+	error = 0;
+
+cleanup:
+	return error;
+}
+
+static int
+ext3_xattr_block_set(handle_t *handle, struct inode *inode,
+		     struct ext3_xattr_info *i,
+		     struct ext3_xattr_block_find *bs)
+{
+	struct super_block *sb = inode->i_sb;
+	struct buffer_head *new_bh = NULL;
+	struct ext3_xattr_search *s = &bs->s;
+	struct mb_cache_entry *ce = NULL;
+	int error;
+
+#define header(x) ((struct ext3_xattr_header *)(x))
+
+	if (i->value && i->value_len > sb->s_blocksize)
+		return -ENOSPC;
+	if (s->base) {
+		ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
+					bs->bh->b_blocknr);
+		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
+			if (ce) {
+				mb_cache_entry_free(ce);
+				ce = NULL;
+			}
+			ea_bdebug(bs->bh, "modifying in-place");
+			error = ext3_journal_get_write_access(handle, bs->bh);
+			if (error)
+				goto cleanup;
+			lock_buffer(bs->bh);
+			error = ext3_xattr_set_entry(i, s);
+			if (!error) {
+				if (!IS_LAST_ENTRY(s->first))
+					ext3_xattr_rehash(header(s->base),
+							  s->here);
+				ext3_xattr_cache_insert(bs->bh);
+			}
+			unlock_buffer(bs->bh);
+			if (error == -EIO)
+				goto bad_block;
+			if (!error)
+				error = ext3_journal_dirty_metadata(handle,
+								    bs->bh);
+			if (error)
+				goto cleanup;
+			goto inserted;
+		} else {
+			int offset = (char *)s->here - bs->bh->b_data;
+
+			if (ce) {
+				mb_cache_entry_release(ce);
+				ce = NULL;
+			}
+			ea_bdebug(bs->bh, "cloning");
+			s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
+			error = -ENOMEM;
+			if (s->base == NULL)
+				goto cleanup;
+			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
+			s->first = ENTRY(header(s->base)+1);
+			header(s->base)->h_refcount = cpu_to_le32(1);
+			s->here = ENTRY(s->base + offset);
+			s->end = s->base + bs->bh->b_size;
+		}
+	} else {
+		/* Allocate a buffer where we construct the new block. */
+		s->base = kmalloc(sb->s_blocksize, GFP_KERNEL);
+		/* assert(header == s->base) */
+		error = -ENOMEM;
+		if (s->base == NULL)
+			goto cleanup;
+		memset(s->base, 0, sb->s_blocksize);
+		header(s->base)->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
+		header(s->base)->h_blocks = cpu_to_le32(1);
+		header(s->base)->h_refcount = cpu_to_le32(1);
+		s->first = ENTRY(header(s->base)+1);
+		s->here = ENTRY(header(s->base)+1);
+		s->end = s->base + sb->s_blocksize;
+	}
+
+	error = ext3_xattr_set_entry(i, s);
+	if (error == -EIO)
+		goto bad_block;
+	if (error)
+		goto cleanup;
+	if (!IS_LAST_ENTRY(s->first))
+		ext3_xattr_rehash(header(s->base), s->here);
+
+inserted:
+	if (!IS_LAST_ENTRY(s->first)) {
+		new_bh = ext3_xattr_cache_find(inode, header(s->base), &ce);
+		if (new_bh) {
+			/* We found an identical block in the cache. */
+			if (new_bh == bs->bh)
+				ea_bdebug(new_bh, "keeping");
+			else {
+				/* The old block is released after updating
+				   the inode. */
+				error = -EDQUOT;
+				if (DQUOT_ALLOC_BLOCK(inode, 1))
+					goto cleanup;
+				error = ext3_journal_get_write_access(handle,
+								      new_bh);
+				if (error)
+					goto cleanup_dquot;
+				lock_buffer(new_bh);
+				BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
+					le32_to_cpu(BHDR(new_bh)->h_refcount));
+				ea_bdebug(new_bh, "reusing; refcount now=%d",
+					le32_to_cpu(BHDR(new_bh)->h_refcount));
+				unlock_buffer(new_bh);
+				error = ext3_journal_dirty_metadata(handle,
+								    new_bh);
+				if (error)
+					goto cleanup_dquot;
+			}
+			mb_cache_entry_release(ce);
+			ce = NULL;
+		} else if (bs->bh && s->base == bs->bh->b_data) {
+			/* We were modifying this block in-place. */
+			ea_bdebug(bs->bh, "keeping this block");
+			new_bh = bs->bh;
+			get_bh(new_bh);
+		} else {
+			/* We need to allocate a new block */
+			int goal = le32_to_cpu(
+					EXT3_SB(sb)->s_es->s_first_data_block) +
+				EXT3_I(inode)->i_block_group *
+				EXT3_BLOCKS_PER_GROUP(sb);
+			int block = ext3_new_block(handle, inode, goal, &error);
+			if (error)
+				goto cleanup;
+			ea_idebug(inode, "creating block %d", block);
+
+			new_bh = sb_getblk(sb, block);
+			if (!new_bh) {
+getblk_failed:
+				ext3_free_blocks(handle, inode, block, 1);
+				error = -EIO;
+				goto cleanup;
+			}
+			lock_buffer(new_bh);
+			error = ext3_journal_get_create_access(handle, new_bh);
+			if (error) {
+				unlock_buffer(new_bh);
+				goto getblk_failed;
+			}
+			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+			set_buffer_uptodate(new_bh);
+			unlock_buffer(new_bh);
+			ext3_xattr_cache_insert(new_bh);
+			error = ext3_journal_dirty_metadata(handle, new_bh);
+			if (error)
+				goto cleanup;
+		}
+	}
+
+	/* Update the inode. */
+	EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
+
+	/* Drop the previous xattr block. */
+	if (bs->bh && bs->bh != new_bh)
+		ext3_xattr_release_block(handle, inode, bs->bh);
+	error = 0;
+
+cleanup:
+	if (ce)
+		mb_cache_entry_release(ce);
+	brelse(new_bh);
+	if (!(bs->bh && s->base == bs->bh->b_data))
+		kfree(s->base);
+
+	return error;
+
+cleanup_dquot:
+	DQUOT_FREE_BLOCK(inode, 1);
+	goto cleanup;
+
+bad_block:
+	ext3_error(inode->i_sb, __FUNCTION__,
+		   "inode %ld: bad block %d", inode->i_ino,
+		   EXT3_I(inode)->i_file_acl);
+	goto cleanup;
+
+#undef header
+}
+
+struct ext3_xattr_ibody_find {
+	struct ext3_xattr_search s;
+	struct ext3_iloc iloc;
+};
+
+int
+ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
+		      struct ext3_xattr_ibody_find *is)
+{
+	struct ext3_xattr_ibody_header *header;
+	struct ext3_inode *raw_inode;
+	int error;
+
+	if (EXT3_I(inode)->i_extra_isize == 0)
+		return 0;
+	raw_inode = ext3_raw_inode(&is->iloc);
+	header = IHDR(inode, raw_inode);
+	is->s.base = is->s.first = IFIRST(header);
+	is->s.here = is->s.first;
+	is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
+	if (EXT3_I(inode)->i_state & EXT3_STATE_XATTR) {
+		error = ext3_xattr_check_names(IFIRST(header), is->s.end);
+		if (error)
+			return error;
+		/* Find the named attribute. */
+		error = ext3_xattr_find_entry(&is->s.here, i->name_index,
+					      i->name, is->s.end -
+					      (void *)is->s.base, 0);
+		if (error && error != -ENODATA)
+			return error;
+		is->s.not_found = error;
+	}
+	return 0;
+}
+
+static int
+ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
+		     struct ext3_xattr_info *i,
+		     struct ext3_xattr_ibody_find *is)
+{
+	struct ext3_xattr_ibody_header *header;
+	struct ext3_xattr_search *s = &is->s;
+	int error;
+
+	if (EXT3_I(inode)->i_extra_isize == 0)
+		return -ENOSPC;
+	error = ext3_xattr_set_entry(i, s);
+	if (error)
+		return error;
+	header = IHDR(inode, ext3_raw_inode(&is->iloc));
+	if (!IS_LAST_ENTRY(s->first)) {
+		header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
+		EXT3_I(inode)->i_state |= EXT3_STATE_XATTR;
+	} else {
+		header->h_magic = cpu_to_le32(0);
+		EXT3_I(inode)->i_state &= ~EXT3_STATE_XATTR;
+	}
+	return 0;
+}
+
+/*
+ * ext3_xattr_set_handle()
+ *
+ * Create, replace or remove an extended attribute for this inode. Buffer
+ * is NULL to remove an existing extended attribute, and non-NULL to
+ * either replace an existing extended attribute, or create a new extended
+ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
+ * specify that an extended attribute must exist and must not exist
+ * previous to the call, respectively.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int
+ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+		      const char *name, const void *value, size_t value_len,
+		      int flags)
+{
+	struct ext3_xattr_info i = {
+		.name_index = name_index,
+		.name = name,
+		.value = value,
+		.value_len = value_len,
+
+	};
+	struct ext3_xattr_ibody_find is = {
+		.s = { .not_found = -ENODATA, },
+	};
+	struct ext3_xattr_block_find bs = {
+		.s = { .not_found = -ENODATA, },
+	};
+	int error;
+
+	if (IS_RDONLY(inode))
+		return -EROFS;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		return -EPERM;
+	if (!name)
+		return -EINVAL;
+	if (strlen(name) > 255)
+		return -ERANGE;
+	down_write(&EXT3_I(inode)->xattr_sem);
+	error = ext3_get_inode_loc(inode, &is.iloc);
+	if (error)
+		goto cleanup;
+
+	if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
+		struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
+		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
+		EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
+	}
+
+	error = ext3_xattr_ibody_find(inode, &i, &is);
+	if (error)
+		goto cleanup;
+	if (is.s.not_found)
+		error = ext3_xattr_block_find(inode, &i, &bs);
+	if (error)
+		goto cleanup;
+	if (is.s.not_found && bs.s.not_found) {
+		error = -ENODATA;
+		if (flags & XATTR_REPLACE)
+			goto cleanup;
+		error = 0;
+		if (!value)
+			goto cleanup;
+	} else {
+		error = -EEXIST;
+		if (flags & XATTR_CREATE)
+			goto cleanup;
+	}
+	error = ext3_journal_get_write_access(handle, is.iloc.bh);
+	if (error)
+		goto cleanup;
+	if (!value) {
+		if (!is.s.not_found)
+			error = ext3_xattr_ibody_set(handle, inode, &i, &is);
+		else if (!bs.s.not_found)
+			error = ext3_xattr_block_set(handle, inode, &i, &bs);
+	} else {
+		error = ext3_xattr_ibody_set(handle, inode, &i, &is);
+		if (!error && !bs.s.not_found) {
+			i.value = NULL;
+			error = ext3_xattr_block_set(handle, inode, &i, &bs);
+		} else if (error == -ENOSPC) {
+			error = ext3_xattr_block_set(handle, inode, &i, &bs);
+			if (error)
+				goto cleanup;
+			if (!is.s.not_found) {
+				i.value = NULL;
+				error = ext3_xattr_ibody_set(handle, inode, &i,
+							     &is);
+			}
+		}
+	}
+	if (!error) {
+		ext3_xattr_update_super_block(handle, inode->i_sb);
+		inode->i_ctime = CURRENT_TIME_SEC;
+		error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
+		/*
+		 * The bh is consumed by ext3_mark_iloc_dirty, even with
+		 * error != 0.
+		 */
+		is.iloc.bh = NULL;
+		if (IS_SYNC(inode))
+			handle->h_sync = 1;
+	}
+
+cleanup:
+	brelse(is.iloc.bh);
+	brelse(bs.bh);
+	up_write(&EXT3_I(inode)->xattr_sem);
+	return error;
+}
+
+/*
+ * ext3_xattr_set()
+ *
+ * Like ext3_xattr_set_handle, but start from an inode. This extended
+ * attribute modification is a filesystem transaction by itself.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int
+ext3_xattr_set(struct inode *inode, int name_index, const char *name,
+	       const void *value, size_t value_len, int flags)
+{
+	handle_t *handle;
+	int error, retries = 0;
+
+retry:
+	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS);
+	if (IS_ERR(handle)) {
+		error = PTR_ERR(handle);
+	} else {
+		int error2;
+
+		error = ext3_xattr_set_handle(handle, inode, name_index, name,
+					      value, value_len, flags);
+		error2 = ext3_journal_stop(handle);
+		if (error == -ENOSPC &&
+		    ext3_should_retry_alloc(inode->i_sb, &retries))
+			goto retry;
+		if (error == 0)
+			error = error2;
+	}
+
+	return error;
+}
+
+/*
+ * ext3_xattr_delete_inode()
+ *
+ * Free extended attribute resources associated with this inode. This
+ * is called immediately before an inode is freed. We have exclusive
+ * access to the inode.
+ */
+void
+ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
+{
+	struct buffer_head *bh = NULL;
+
+	if (!EXT3_I(inode)->i_file_acl)
+		goto cleanup;
+	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
+	if (!bh) {
+		ext3_error(inode->i_sb, __FUNCTION__,
+			"inode %ld: block %d read error", inode->i_ino,
+			EXT3_I(inode)->i_file_acl);
+		goto cleanup;
+	}
+	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
+	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
+		ext3_error(inode->i_sb, __FUNCTION__,
+			"inode %ld: bad block %d", inode->i_ino,
+			EXT3_I(inode)->i_file_acl);
+		goto cleanup;
+	}
+	ext3_xattr_release_block(handle, inode, bh);
+	EXT3_I(inode)->i_file_acl = 0;
+
+cleanup:
+	brelse(bh);
+}
+
+/*
+ * ext3_xattr_put_super()
+ *
+ * This is called when a file system is unmounted.
+ */
+void
+ext3_xattr_put_super(struct super_block *sb)
+{
+	mb_cache_shrink(ext3_xattr_cache, sb->s_bdev);
+}
+
+/*
+ * ext3_xattr_cache_insert()
+ *
+ * Create a new entry in the extended attribute cache, and insert
+ * it unless such an entry is already in the cache.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+static void
+ext3_xattr_cache_insert(struct buffer_head *bh)
+{
+	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+	struct mb_cache_entry *ce;
+	int error;
+
+	ce = mb_cache_entry_alloc(ext3_xattr_cache);
+	if (!ce) {
+		ea_bdebug(bh, "out of memory");
+		return;
+	}
+	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+	if (error) {
+		mb_cache_entry_free(ce);
+		if (error == -EBUSY) {
+			ea_bdebug(bh, "already in cache");
+			error = 0;
+		}
+	} else {
+		ea_bdebug(bh, "inserting [%x]", (int)hash);
+		mb_cache_entry_release(ce);
+	}
+}
+
+/*
+ * ext3_xattr_cmp()
+ *
+ * Compare two extended attribute blocks for equality.
+ *
+ * Returns 0 if the blocks are equal, 1 if they differ, and
+ * a negative error number on errors.
+ */
+static int
+ext3_xattr_cmp(struct ext3_xattr_header *header1,
+	       struct ext3_xattr_header *header2)
+{
+	struct ext3_xattr_entry *entry1, *entry2;
+
+	entry1 = ENTRY(header1+1);
+	entry2 = ENTRY(header2+1);
+	while (!IS_LAST_ENTRY(entry1)) {
+		if (IS_LAST_ENTRY(entry2))
+			return 1;
+		if (entry1->e_hash != entry2->e_hash ||
+		    entry1->e_name_index != entry2->e_name_index ||
+		    entry1->e_name_len != entry2->e_name_len ||
+		    entry1->e_value_size != entry2->e_value_size ||
+		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
+			return 1;
+		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
+			return -EIO;
+		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
+			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
+			   le32_to_cpu(entry1->e_value_size)))
+			return 1;
+
+		entry1 = EXT3_XATTR_NEXT(entry1);
+		entry2 = EXT3_XATTR_NEXT(entry2);
+	}
+	if (!IS_LAST_ENTRY(entry2))
+		return 1;
+	return 0;
+}
+
+/*
+ * ext3_xattr_cache_find()
+ *
+ * Find an identical extended attribute block.
+ *
+ * Returns a pointer to the block found, or NULL if such a block was
+ * not found or an error occurred.
+ */
+static struct buffer_head *
+ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
+		      struct mb_cache_entry **pce)
+{
+	__u32 hash = le32_to_cpu(header->h_hash);
+	struct mb_cache_entry *ce;
+
+	if (!header->h_hash)
+		return NULL;  /* never share */
+	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+again:
+	ce = mb_cache_entry_find_first(ext3_xattr_cache, 0,
+				       inode->i_sb->s_bdev, hash);
+	while (ce) {
+		struct buffer_head *bh;
+
+		if (IS_ERR(ce)) {
+			if (PTR_ERR(ce) == -EAGAIN)
+				goto again;
+			break;
+		}
+		bh = sb_bread(inode->i_sb, ce->e_block);
+		if (!bh) {
+			ext3_error(inode->i_sb, __FUNCTION__,
+				"inode %ld: block %ld read error",
+				inode->i_ino, (unsigned long) ce->e_block);
+		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
+				EXT3_XATTR_REFCOUNT_MAX) {
+			ea_idebug(inode, "block %ld refcount %d>=%d",
+				  (unsigned long) ce->e_block,
+				  le32_to_cpu(BHDR(bh)->h_refcount),
+					  EXT3_XATTR_REFCOUNT_MAX);
+		} else if (ext3_xattr_cmp(header, BHDR(bh)) == 0) {
+			*pce = ce;
+			return bh;
+		}
+		brelse(bh);
+		ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+	}
+	return NULL;
+}
+
+#define NAME_HASH_SHIFT 5
+#define VALUE_HASH_SHIFT 16
+
+/*
+ * ext3_xattr_hash_entry()
+ *
+ * Compute the hash of an extended attribute.
+ */
+static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
+					 struct ext3_xattr_entry *entry)
+{
+	__u32 hash = 0;
+	char *name = entry->e_name;
+	int n;
+
+	for (n=0; n < entry->e_name_len; n++) {
+		hash = (hash << NAME_HASH_SHIFT) ^
+		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+		       *name++;
+	}
+
+	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+		__le32 *value = (__le32 *)((char *)header +
+			le16_to_cpu(entry->e_value_offs));
+		for (n = (le32_to_cpu(entry->e_value_size) +
+		     EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
+			hash = (hash << VALUE_HASH_SHIFT) ^
+			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+			       le32_to_cpu(*value++);
+		}
+	}
+	entry->e_hash = cpu_to_le32(hash);
+}
+
+#undef NAME_HASH_SHIFT
+#undef VALUE_HASH_SHIFT
+
+#define BLOCK_HASH_SHIFT 16
+
+/*
+ * ext3_xattr_rehash()
+ *
+ * Re-compute the extended attribute hash value after an entry has changed.
+ */
+static void ext3_xattr_rehash(struct ext3_xattr_header *header,
+			      struct ext3_xattr_entry *entry)
+{
+	struct ext3_xattr_entry *here;
+	__u32 hash = 0;
+
+	ext3_xattr_hash_entry(header, entry);
+	here = ENTRY(header+1);
+	while (!IS_LAST_ENTRY(here)) {
+		if (!here->e_hash) {
+			/* Block is not shared if an entry's hash value == 0 */
+			hash = 0;
+			break;
+		}
+		hash = (hash << BLOCK_HASH_SHIFT) ^
+		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
+		       le32_to_cpu(here->e_hash);
+		here = EXT3_XATTR_NEXT(here);
+	}
+	header->h_hash = cpu_to_le32(hash);
+}
+
+#undef BLOCK_HASH_SHIFT
+
+int __init
+init_ext3_xattr(void)
+{
+	ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL,
+		sizeof(struct mb_cache_entry) +
+		sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+	if (!ext3_xattr_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void
+exit_ext3_xattr(void)
+{
+	if (ext3_xattr_cache)
+		mb_cache_destroy(ext3_xattr_cache);
+	ext3_xattr_cache = NULL;
+}
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
new file mode 100644
index 0000000..eb31a69
--- /dev/null
+++ b/fs/ext3/xattr.h
@@ -0,0 +1,135 @@
+/*
+  File: fs/ext3/xattr.h
+
+  On-disk format of extended attributes for the ext3 filesystem.
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/config.h>
+#include <linux/xattr.h>
+
+/* Magic value in attribute blocks */
+#define EXT3_XATTR_MAGIC		0xEA020000
+
+/* Maximum number of references to one attribute block */
+#define EXT3_XATTR_REFCOUNT_MAX		1024
+
+/* Name indexes */
+#define EXT3_XATTR_INDEX_USER			1
+#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS	2
+#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT	3
+#define EXT3_XATTR_INDEX_TRUSTED		4
+#define	EXT3_XATTR_INDEX_LUSTRE			5
+#define EXT3_XATTR_INDEX_SECURITY	        6
+
+struct ext3_xattr_header {
+	__le32	h_magic;	/* magic number for identification */
+	__le32	h_refcount;	/* reference count */
+	__le32	h_blocks;	/* number of disk blocks used */
+	__le32	h_hash;		/* hash value of all attributes */
+	__u32	h_reserved[4];	/* zero right now */
+};
+
+struct ext3_xattr_ibody_header {
+	__le32	h_magic;	/* magic number for identification */
+};
+
+struct ext3_xattr_entry {
+	__u8	e_name_len;	/* length of name */
+	__u8	e_name_index;	/* attribute name index */
+	__le16	e_value_offs;	/* offset in disk block of value */
+	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
+	__le32	e_value_size;	/* size of attribute value */
+	__le32	e_hash;		/* hash value of name and value */
+	char	e_name[0];	/* attribute name */
+};
+
+#define EXT3_XATTR_PAD_BITS		2
+#define EXT3_XATTR_PAD		(1<<EXT3_XATTR_PAD_BITS)
+#define EXT3_XATTR_ROUND		(EXT3_XATTR_PAD-1)
+#define EXT3_XATTR_LEN(name_len) \
+	(((name_len) + EXT3_XATTR_ROUND + \
+	sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
+#define EXT3_XATTR_NEXT(entry) \
+	( (struct ext3_xattr_entry *)( \
+	  (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
+#define EXT3_XATTR_SIZE(size) \
+	(((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
+
+# ifdef CONFIG_EXT3_FS_XATTR
+
+extern struct xattr_handler ext3_xattr_user_handler;
+extern struct xattr_handler ext3_xattr_trusted_handler;
+extern struct xattr_handler ext3_xattr_acl_access_handler;
+extern struct xattr_handler ext3_xattr_acl_default_handler;
+extern struct xattr_handler ext3_xattr_security_handler;
+
+extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
+
+extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
+extern int ext3_xattr_list(struct inode *, char *, size_t);
+extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
+extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
+
+extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
+extern void ext3_xattr_put_super(struct super_block *);
+
+extern int init_ext3_xattr(void);
+extern void exit_ext3_xattr(void);
+
+extern struct xattr_handler *ext3_xattr_handlers[];
+
+# else  /* CONFIG_EXT3_FS_XATTR */
+
+static inline int
+ext3_xattr_get(struct inode *inode, int name_index, const char *name,
+	       void *buffer, size_t size, int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+ext3_xattr_list(struct inode *inode, void *buffer, size_t size)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+ext3_xattr_set(struct inode *inode, int name_index, const char *name,
+	       const void *value, size_t size, int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+	       const char *name, const void *value, size_t size, int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void
+ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
+{
+}
+
+static inline void
+ext3_xattr_put_super(struct super_block *sb)
+{
+}
+
+static inline int
+init_ext3_xattr(void)
+{
+	return 0;
+}
+
+static inline void
+exit_ext3_xattr(void)
+{
+}
+
+#define ext3_xattr_handlers	NULL
+
+# endif  /* CONFIG_EXT3_FS_XATTR */
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
new file mode 100644
index 0000000..ddc1c41
--- /dev/null
+++ b/fs/ext3/xattr_security.c
@@ -0,0 +1,55 @@
+/*
+ * linux/fs/ext3/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
+#include "xattr.h"
+
+static size_t
+ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size,
+			 const char *name, size_t name_len)
+{
+	const size_t prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext3_xattr_security_get(struct inode *inode, const char *name,
+		       void *buffer, size_t size)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return ext3_xattr_get(inode, EXT3_XATTR_INDEX_SECURITY, name,
+			      buffer, size);
+}
+
+static int
+ext3_xattr_security_set(struct inode *inode, const char *name,
+		       const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	return ext3_xattr_set(inode, EXT3_XATTR_INDEX_SECURITY, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext3_xattr_security_handler = {
+	.prefix	= XATTR_SECURITY_PREFIX,
+	.list	= ext3_xattr_security_list,
+	.get	= ext3_xattr_security_get,
+	.set	= ext3_xattr_security_set,
+};
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
new file mode 100644
index 0000000..f68bfd1
--- /dev/null
+++ b/fs/ext3/xattr_trusted.c
@@ -0,0 +1,65 @@
+/*
+ * linux/fs/ext3/xattr_trusted.c
+ * Handler for trusted extended attributes.
+ *
+ * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
+#include "xattr.h"
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+
+static size_t
+ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
+			const char *name, size_t name_len)
+{
+	const size_t prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return 0;
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext3_xattr_trusted_get(struct inode *inode, const char *name,
+		       void *buffer, size_t size)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED, name,
+			      buffer, size);
+}
+
+static int
+ext3_xattr_trusted_set(struct inode *inode, const char *name,
+		       const void *value, size_t size, int flags)
+{
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return ext3_xattr_set(inode, EXT3_XATTR_INDEX_TRUSTED, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext3_xattr_trusted_handler = {
+	.prefix	= XATTR_TRUSTED_PREFIX,
+	.list	= ext3_xattr_trusted_list,
+	.get	= ext3_xattr_trusted_get,
+	.set	= ext3_xattr_trusted_set,
+};
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
new file mode 100644
index 0000000..e907cae
--- /dev/null
+++ b/fs/ext3/xattr_user.c
@@ -0,0 +1,79 @@
+/*
+ * linux/fs/ext3/xattr_user.c
+ * Handler for extended user attributes.
+ *
+ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
+#include "xattr.h"
+
+#define XATTR_USER_PREFIX "user."
+
+static size_t
+ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size,
+		     const char *name, size_t name_len)
+{
+	const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+	const size_t total_len = prefix_len + name_len + 1;
+
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return 0;
+
+	if (list && total_len <= list_size) {
+		memcpy(list, XATTR_USER_PREFIX, prefix_len);
+		memcpy(list+prefix_len, name, name_len);
+		list[prefix_len + name_len] = '\0';
+	}
+	return total_len;
+}
+
+static int
+ext3_xattr_user_get(struct inode *inode, const char *name,
+		    void *buffer, size_t size)
+{
+	int error;
+
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return -EOPNOTSUPP;
+	error = permission(inode, MAY_READ, NULL);
+	if (error)
+		return error;
+
+	return ext3_xattr_get(inode, EXT3_XATTR_INDEX_USER, name, buffer, size);
+}
+
+static int
+ext3_xattr_user_set(struct inode *inode, const char *name,
+		    const void *value, size_t size, int flags)
+{
+	int error;
+
+	if (strcmp(name, "") == 0)
+		return -EINVAL;
+	if (!test_opt(inode->i_sb, XATTR_USER))
+		return -EOPNOTSUPP;
+	if ( !S_ISREG(inode->i_mode) &&
+	    (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
+		return -EPERM;
+	error = permission(inode, MAY_WRITE, NULL);
+	if (error)
+		return error;
+
+	return ext3_xattr_set(inode, EXT3_XATTR_INDEX_USER, name,
+			      value, size, flags);
+}
+
+struct xattr_handler ext3_xattr_user_handler = {
+	.prefix	= XATTR_USER_PREFIX,
+	.list	= ext3_xattr_user_list,
+	.get	= ext3_xattr_user_get,
+	.set	= ext3_xattr_user_set,
+};
diff --git a/fs/fat/Makefile b/fs/fat/Makefile
new file mode 100644
index 0000000..bfb5f06
--- /dev/null
+++ b/fs/fat/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux fat filesystem support.
+#
+
+obj-$(CONFIG_FAT_FS) += fat.o
+
+fat-objs := cache.o dir.o fatent.o file.o inode.o misc.o
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
new file mode 100644
index 0000000..7c52e46
--- /dev/null
+++ b/fs/fat/cache.c
@@ -0,0 +1,324 @@
+/*
+ *  linux/fs/fat/cache.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *
+ *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
+ *	of inode number.
+ *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
+ */
+
+#include <linux/fs.h>
+#include <linux/msdos_fs.h>
+#include <linux/buffer_head.h>
+
+/* this must be > 0. */
+#define FAT_MAX_CACHE	8
+
+struct fat_cache {
+	struct list_head cache_list;
+	int nr_contig;	/* number of contiguous clusters */
+	int fcluster;	/* cluster number in the file. */
+	int dcluster;	/* cluster number on disk. */
+};
+
+struct fat_cache_id {
+	unsigned int id;
+	int nr_contig;
+	int fcluster;
+	int dcluster;
+};
+
+static inline int fat_max_cache(struct inode *inode)
+{
+	return FAT_MAX_CACHE;
+}
+
+static kmem_cache_t *fat_cache_cachep;
+
+static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct fat_cache *cache = (struct fat_cache *)foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		INIT_LIST_HEAD(&cache->cache_list);
+}
+
+int __init fat_cache_init(void)
+{
+	fat_cache_cachep = kmem_cache_create("fat_cache",
+				sizeof(struct fat_cache),
+				0, SLAB_RECLAIM_ACCOUNT,
+				init_once, NULL);
+	if (fat_cache_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+void __exit fat_cache_destroy(void)
+{
+	if (kmem_cache_destroy(fat_cache_cachep))
+		printk(KERN_INFO "fat_cache: not all structures were freed\n");
+}
+
+static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
+{
+	return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
+}
+
+static inline void fat_cache_free(struct fat_cache *cache)
+{
+	BUG_ON(!list_empty(&cache->cache_list));
+	kmem_cache_free(fat_cache_cachep, cache);
+}
+
+static inline void fat_cache_update_lru(struct inode *inode,
+					struct fat_cache *cache)
+{
+	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
+		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
+}
+
+static int fat_cache_lookup(struct inode *inode, int fclus,
+			    struct fat_cache_id *cid,
+			    int *cached_fclus, int *cached_dclus)
+{
+	static struct fat_cache nohit = { .fcluster = 0, };
+
+	struct fat_cache *hit = &nohit, *p;
+	int offset = -1;
+
+	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
+	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
+		/* Find the cache of "fclus" or nearest cache. */
+		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
+			hit = p;
+			if ((hit->fcluster + hit->nr_contig) < fclus) {
+				offset = hit->nr_contig;
+			} else {
+				offset = fclus - hit->fcluster;
+				break;
+			}
+		}
+	}
+	if (hit != &nohit) {
+		fat_cache_update_lru(inode, hit);
+
+		cid->id = MSDOS_I(inode)->cache_valid_id;
+		cid->nr_contig = hit->nr_contig;
+		cid->fcluster = hit->fcluster;
+		cid->dcluster = hit->dcluster;
+		*cached_fclus = cid->fcluster + offset;
+		*cached_dclus = cid->dcluster + offset;
+	}
+	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
+
+	return offset;
+}
+
+static struct fat_cache *fat_cache_merge(struct inode *inode,
+					 struct fat_cache_id *new)
+{
+	struct fat_cache *p;
+
+	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
+		/* Find the same part as "new" in cluster-chain. */
+		if (p->fcluster == new->fcluster) {
+			BUG_ON(p->dcluster != new->dcluster);
+			if (new->nr_contig > p->nr_contig)
+				p->nr_contig = new->nr_contig;
+			return p;
+		}
+	}
+	return NULL;
+}
+
+static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
+{
+	struct fat_cache *cache, *tmp;
+
+	if (new->fcluster == -1) /* dummy cache */
+		return;
+
+	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
+	if (new->id != FAT_CACHE_VALID &&
+	    new->id != MSDOS_I(inode)->cache_valid_id)
+		goto out;	/* this cache was invalidated */
+
+	cache = fat_cache_merge(inode, new);
+	if (cache == NULL) {
+		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
+			MSDOS_I(inode)->nr_caches++;
+			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
+
+			tmp = fat_cache_alloc(inode);
+			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
+			cache = fat_cache_merge(inode, new);
+			if (cache != NULL) {
+				MSDOS_I(inode)->nr_caches--;
+				fat_cache_free(tmp);
+				goto out_update_lru;
+			}
+			cache = tmp;
+		} else {
+			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
+			cache = list_entry(p, struct fat_cache, cache_list);
+		}
+		cache->fcluster = new->fcluster;
+		cache->dcluster = new->dcluster;
+		cache->nr_contig = new->nr_contig;
+	}
+out_update_lru:
+	fat_cache_update_lru(inode, cache);
+out:
+	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
+}
+
+/*
+ * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
+ * fixes itself after a while.
+ */
+static void __fat_cache_inval_inode(struct inode *inode)
+{
+	struct msdos_inode_info *i = MSDOS_I(inode);
+	struct fat_cache *cache;
+
+	while (!list_empty(&i->cache_lru)) {
+		cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
+		list_del_init(&cache->cache_list);
+		i->nr_caches--;
+		fat_cache_free(cache);
+	}
+	/* Update. The copy of caches before this id is discarded. */
+	i->cache_valid_id++;
+	if (i->cache_valid_id == FAT_CACHE_VALID)
+		i->cache_valid_id++;
+}
+
+void fat_cache_inval_inode(struct inode *inode)
+{
+	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
+	__fat_cache_inval_inode(inode);
+	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
+}
+
+static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
+{
+	cid->nr_contig++;
+	return ((cid->dcluster + cid->nr_contig) == dclus);
+}
+
+static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
+{
+	cid->id = FAT_CACHE_VALID;
+	cid->fcluster = fclus;
+	cid->dcluster = dclus;
+	cid->nr_contig = 0;
+}
+
+int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
+{
+	struct super_block *sb = inode->i_sb;
+	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
+	struct fat_entry fatent;
+	struct fat_cache_id cid;
+	int nr;
+
+	BUG_ON(MSDOS_I(inode)->i_start == 0);
+
+	*fclus = 0;
+	*dclus = MSDOS_I(inode)->i_start;
+	if (cluster == 0)
+		return 0;
+
+	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
+		/*
+		 * dummy, always not contiguous
+		 * This is reinitialized by cache_init(), later.
+		 */
+		cache_init(&cid, -1, -1);
+	}
+
+	fatent_init(&fatent);
+	while (*fclus < cluster) {
+		/* prevent the infinite loop of cluster chain */
+		if (*fclus > limit) {
+			fat_fs_panic(sb, "%s: detected the cluster chain loop"
+				     " (i_pos %lld)", __FUNCTION__,
+				     MSDOS_I(inode)->i_pos);
+			nr = -EIO;
+			goto out;
+		}
+
+		nr = fat_ent_read(inode, &fatent, *dclus);
+		if (nr < 0)
+			goto out;
+		else if (nr == FAT_ENT_FREE) {
+			fat_fs_panic(sb, "%s: invalid cluster chain"
+				     " (i_pos %lld)", __FUNCTION__,
+				     MSDOS_I(inode)->i_pos);
+			nr = -EIO;
+			goto out;
+		} else if (nr == FAT_ENT_EOF) {
+			fat_cache_add(inode, &cid);
+			goto out;
+		}
+		(*fclus)++;
+		*dclus = nr;
+		if (!cache_contiguous(&cid, *dclus))
+			cache_init(&cid, *fclus, *dclus);
+	}
+	nr = 0;
+	fat_cache_add(inode, &cid);
+out:
+	fatent_brelse(&fatent);
+	return nr;
+}
+
+static int fat_bmap_cluster(struct inode *inode, int cluster)
+{
+	struct super_block *sb = inode->i_sb;
+	int ret, fclus, dclus;
+
+	if (MSDOS_I(inode)->i_start == 0)
+		return 0;
+
+	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
+	if (ret < 0)
+		return ret;
+	else if (ret == FAT_ENT_EOF) {
+		fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)",
+			     __FUNCTION__, MSDOS_I(inode)->i_pos);
+		return -EIO;
+	}
+	return dclus;
+}
+
+int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	sector_t last_block;
+	int cluster, offset;
+
+	*phys = 0;
+	if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
+		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits))
+			*phys = sector + sbi->dir_start;
+		return 0;
+	}
+	last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1))
+		>> sb->s_blocksize_bits;
+	if (sector >= last_block)
+		return 0;
+
+	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
+	offset  = sector & (sbi->sec_per_clus - 1);
+	cluster = fat_bmap_cluster(inode, cluster);
+	if (cluster < 0)
+		return cluster;
+	else if (cluster)
+		*phys = fat_clus_to_blknr(sbi, cluster) + offset;
+	return 0;
+}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
new file mode 100644
index 0000000..e5ae1b7
--- /dev/null
+++ b/fs/fat/dir.c
@@ -0,0 +1,1271 @@
+/*
+ *  linux/fs/fat/dir.c
+ *
+ *  directory handling functions for fat-based filesystems
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *
+ *  Hidden files 1995 by Albert Cahalan <albert@ccs.neu.edu> <adc@coe.neu.edu>
+ *
+ *  VFAT extensions by Gordon Chaffee <chaffee@plateau.cs.berkeley.edu>
+ *  Merged with msdos fs by Henrik Storner <storner@osiris.ping.dk>
+ *  Rewritten for constant inumbers. Plugged buffer overrun in readdir(). AV
+ *  Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/msdos_fs.h>
+#include <linux/dirent.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <asm/uaccess.h>
+
+static inline loff_t fat_make_i_pos(struct super_block *sb,
+				    struct buffer_head *bh,
+				    struct msdos_dir_entry *de)
+{
+	return ((loff_t)bh->b_blocknr << MSDOS_SB(sb)->dir_per_block_bits)
+		| (de - (struct msdos_dir_entry *)bh->b_data);
+}
+
+/* Returns the inode number of the directory entry at offset pos. If bh is
+   non-NULL, it is brelse'd before. Pos is incremented. The buffer header is
+   returned in bh.
+   AV. Most often we do it item-by-item. Makes sense to optimize.
+   AV. OK, there we go: if both bh and de are non-NULL we assume that we just
+   AV. want the next entry (took one explicit de=NULL in vfat/namei.c).
+   AV. It's done in fat_get_entry() (inlined), here the slow case lives.
+   AV. Additionally, when we return -1 (i.e. reached the end of directory)
+   AV. we make bh NULL.
+ */
+static int fat__get_entry(struct inode *dir, loff_t *pos,
+			  struct buffer_head **bh, struct msdos_dir_entry **de)
+{
+	struct super_block *sb = dir->i_sb;
+	sector_t phys, iblock;
+	int offset;
+	int err;
+
+next:
+	if (*bh)
+		brelse(*bh);
+
+	*bh = NULL;
+	iblock = *pos >> sb->s_blocksize_bits;
+	err = fat_bmap(dir, iblock, &phys);
+	if (err || !phys)
+		return -1;	/* beyond EOF or error */
+
+	*bh = sb_bread(sb, phys);
+	if (*bh == NULL) {
+		printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n",
+		       (unsigned long long)phys);
+		/* skip this block */
+		*pos = (iblock + 1) << sb->s_blocksize_bits;
+		goto next;
+	}
+
+	offset = *pos & (sb->s_blocksize - 1);
+	*pos += sizeof(struct msdos_dir_entry);
+	*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
+
+	return 0;
+}
+
+static inline int fat_get_entry(struct inode *dir, loff_t *pos,
+				struct buffer_head **bh,
+				struct msdos_dir_entry **de)
+{
+	/* Fast stuff first */
+	if (*bh && *de &&
+	    (*de - (struct msdos_dir_entry *)(*bh)->b_data) < MSDOS_SB(dir->i_sb)->dir_per_block - 1) {
+		*pos += sizeof(struct msdos_dir_entry);
+		(*de)++;
+		return 0;
+	}
+	return fat__get_entry(dir, pos, bh, de);
+}
+
+/*
+ * Convert Unicode 16 to UTF8, translated Unicode, or ASCII.
+ * If uni_xlate is enabled and we can't get a 1:1 conversion, use a
+ * colon as an escape character since it is normally invalid on the vfat
+ * filesystem. The following four characters are the hexadecimal digits
+ * of Unicode value. This lets us do a full dump and restore of Unicode
+ * filenames. We could get into some trouble with long Unicode names,
+ * but ignore that right now.
+ * Ahem... Stack smashing in ring 0 isn't fun. Fixed.
+ */
+static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
+		       struct nls_table *nls)
+{
+	wchar_t *ip, ec;
+	unsigned char *op, nc;
+	int charlen;
+	int k;
+
+	ip = uni;
+	op = ascii;
+
+	while (*ip) {
+		ec = *ip++;
+		if ( (charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
+			op += charlen;
+		} else {
+			if (uni_xlate == 1) {
+				*op = ':';
+				for (k = 4; k > 0; k--) {
+					nc = ec & 0xF;
+					op[k] = nc > 9	? nc + ('a' - 10)
+							: nc + '0';
+					ec >>= 4;
+				}
+				op += 5;
+			} else {
+				*op++ = '?';
+			}
+		}
+		/* We have some slack there, so it's OK */
+		if (op>ascii+256) {
+			op = ascii + 256;
+			break;
+		}
+	}
+	*op = 0;
+	return (op - ascii);
+}
+
+static inline int
+fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
+{
+	int charlen;
+
+	charlen = t->char2uni(c, clen, uni);
+	if (charlen < 0) {
+		*uni = 0x003f;	/* a question mark */
+		charlen = 1;
+	}
+	return charlen;
+}
+
+static inline int
+fat_short2lower_uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
+{
+	int charlen;
+	wchar_t wc;
+
+	charlen = t->char2uni(c, clen, &wc);
+	if (charlen < 0) {
+		*uni = 0x003f;	/* a question mark */
+		charlen = 1;
+	} else if (charlen <= 1) {
+		unsigned char nc = t->charset2lower[*c];
+
+		if (!nc)
+			nc = *c;
+
+		if ( (charlen = t->char2uni(&nc, 1, uni)) < 0) {
+			*uni = 0x003f;	/* a question mark */
+			charlen = 1;
+		}
+	} else
+		*uni = wc;
+
+	return charlen;
+}
+
+static inline int
+fat_shortname2uni(struct nls_table *nls, unsigned char *buf, int buf_size,
+		  wchar_t *uni_buf, unsigned short opt, int lower)
+{
+	int len = 0;
+
+	if (opt & VFAT_SFN_DISPLAY_LOWER)
+		len =  fat_short2lower_uni(nls, buf, buf_size, uni_buf);
+	else if (opt & VFAT_SFN_DISPLAY_WIN95)
+		len = fat_short2uni(nls, buf, buf_size, uni_buf);
+	else if (opt & VFAT_SFN_DISPLAY_WINNT) {
+		if (lower)
+			len = fat_short2lower_uni(nls, buf, buf_size, uni_buf);
+		else
+			len = fat_short2uni(nls, buf, buf_size, uni_buf);
+	} else
+		len = fat_short2uni(nls, buf, buf_size, uni_buf);
+
+	return len;
+}
+
+/*
+ * Return values: negative -> error, 0 -> not found, positive -> found,
+ * value is the total amount of slots, including the shortname entry.
+ */
+int fat_search_long(struct inode *inode, const unsigned char *name,
+		    int name_len, struct fat_slot_info *sinfo)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bh = NULL;
+	struct msdos_dir_entry *de;
+	struct nls_table *nls_io = sbi->nls_io;
+	struct nls_table *nls_disk = sbi->nls_disk;
+	wchar_t bufuname[14];
+	unsigned char xlate_len, nr_slots;
+	wchar_t *unicode = NULL;
+	unsigned char work[8], bufname[260];	/* 256 + 4 */
+	int uni_xlate = sbi->options.unicode_xlate;
+	int utf8 = sbi->options.utf8;
+	int anycase = (sbi->options.name_check != 's');
+	unsigned short opt_shortname = sbi->options.shortname;
+	loff_t cpos = 0;
+	int chl, i, j, last_u, err;
+
+	err = -ENOENT;
+	while(1) {
+		if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
+			goto EODir;
+parse_record:
+		nr_slots = 0;
+		if (de->name[0] == DELETED_FLAG)
+			continue;
+		if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME))
+			continue;
+		if (de->attr != ATTR_EXT && IS_FREE(de->name))
+			continue;
+		if (de->attr == ATTR_EXT) {
+			struct msdos_dir_slot *ds;
+			unsigned char id;
+			unsigned char slot;
+			unsigned char slots;
+			unsigned char sum;
+			unsigned char alias_checksum;
+
+			if (!unicode) {
+				unicode = (wchar_t *)
+					__get_free_page(GFP_KERNEL);
+				if (!unicode) {
+					brelse(bh);
+					return -ENOMEM;
+				}
+			}
+parse_long:
+			slots = 0;
+			ds = (struct msdos_dir_slot *) de;
+			id = ds->id;
+			if (!(id & 0x40))
+				continue;
+			slots = id & ~0x40;
+			if (slots > 20 || !slots)	/* ceil(256 * 2 / 26) */
+				continue;
+			nr_slots = slots;
+			alias_checksum = ds->alias_checksum;
+
+			slot = slots;
+			while (1) {
+				int offset;
+
+				slot--;
+				offset = slot * 13;
+				fat16_towchar(unicode + offset, ds->name0_4, 5);
+				fat16_towchar(unicode + offset + 5, ds->name5_10, 6);
+				fat16_towchar(unicode + offset + 11, ds->name11_12, 2);
+
+				if (ds->id & 0x40) {
+					unicode[offset + 13] = 0;
+				}
+				if (fat_get_entry(inode, &cpos, &bh, &de) < 0)
+					goto EODir;
+				if (slot == 0)
+					break;
+				ds = (struct msdos_dir_slot *) de;
+				if (ds->attr !=  ATTR_EXT)
+					goto parse_record;
+				if ((ds->id & ~0x40) != slot)
+					goto parse_long;
+				if (ds->alias_checksum != alias_checksum)
+					goto parse_long;
+			}
+			if (de->name[0] == DELETED_FLAG)
+				continue;
+			if (de->attr ==  ATTR_EXT)
+				goto parse_long;
+			if (IS_FREE(de->name) || (de->attr & ATTR_VOLUME))
+				continue;
+			for (sum = 0, i = 0; i < 11; i++)
+				sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + de->name[i];
+			if (sum != alias_checksum)
+				nr_slots = 0;
+		}
+
+		memcpy(work, de->name, sizeof(de->name));
+		/* see namei.c, msdos_format_name */
+		if (work[0] == 0x05)
+			work[0] = 0xE5;
+		for (i = 0, j = 0, last_u = 0; i < 8;) {
+			if (!work[i]) break;
+			chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
+						&bufuname[j++], opt_shortname,
+						de->lcase & CASE_LOWER_BASE);
+			if (chl <= 1) {
+				if (work[i] != ' ')
+					last_u = j;
+			} else {
+				last_u = j;
+			}
+			i += chl;
+		}
+		j = last_u;
+		fat_short2uni(nls_disk, ".", 1, &bufuname[j++]);
+		for (i = 0; i < 3;) {
+			if (!de->ext[i]) break;
+			chl = fat_shortname2uni(nls_disk, &de->ext[i], 3 - i,
+						&bufuname[j++], opt_shortname,
+						de->lcase & CASE_LOWER_EXT);
+			if (chl <= 1) {
+				if (de->ext[i] != ' ')
+					last_u = j;
+			} else {
+				last_u = j;
+			}
+			i += chl;
+		}
+		if (!last_u)
+			continue;
+
+		bufuname[last_u] = 0x0000;
+		xlate_len = utf8
+			?utf8_wcstombs(bufname, bufuname, sizeof(bufname))
+			:uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+		if (xlate_len == name_len)
+			if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
+			    (anycase && !nls_strnicmp(nls_io, name, bufname,
+								xlate_len)))
+				goto Found;
+
+		if (nr_slots) {
+			xlate_len = utf8
+				?utf8_wcstombs(bufname, unicode, sizeof(bufname))
+				:uni16_to_x8(bufname, unicode, uni_xlate, nls_io);
+			if (xlate_len != name_len)
+				continue;
+			if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
+			    (anycase && !nls_strnicmp(nls_io, name, bufname,
+								xlate_len)))
+				goto Found;
+		}
+	}
+
+Found:
+	nr_slots++;	/* include the de */
+	sinfo->slot_off = cpos - nr_slots * sizeof(*de);
+	sinfo->nr_slots = nr_slots;
+	sinfo->de = de;
+	sinfo->bh = bh;
+	sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
+	err = 0;
+EODir:
+	if (unicode)
+		free_page((unsigned long)unicode);
+
+	return err;
+}
+
+EXPORT_SYMBOL(fat_search_long);
+
+struct fat_ioctl_filldir_callback {
+	struct dirent __user *dirent;
+	int result;
+	/* for dir ioctl */
+	const char *longname;
+	int long_len;
+	const char *shortname;
+	int short_len;
+};
+
+static int fat_readdirx(struct inode *inode, struct file *filp, void *dirent,
+			filldir_t filldir, int short_only, int both)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bh;
+	struct msdos_dir_entry *de;
+	struct nls_table *nls_io = sbi->nls_io;
+	struct nls_table *nls_disk = sbi->nls_disk;
+	unsigned char long_slots;
+	const char *fill_name;
+	int fill_len;
+	wchar_t bufuname[14];
+	wchar_t *unicode = NULL;
+	unsigned char c, work[8], bufname[56], *ptname = bufname;
+	unsigned long lpos, dummy, *furrfu = &lpos;
+	int uni_xlate = sbi->options.unicode_xlate;
+	int isvfat = sbi->options.isvfat;
+	int utf8 = sbi->options.utf8;
+	int nocase = sbi->options.nocase;
+	unsigned short opt_shortname = sbi->options.shortname;
+	unsigned long inum;
+	int chi, chl, i, i2, j, last, last_u, dotoffset = 0;
+	loff_t cpos;
+	int ret = 0;
+
+	lock_kernel();
+
+	cpos = filp->f_pos;
+	/* Fake . and .. for the root directory. */
+	if (inode->i_ino == MSDOS_ROOT_INO) {
+		while (cpos < 2) {
+			if (filldir(dirent, "..", cpos+1, cpos, MSDOS_ROOT_INO, DT_DIR) < 0)
+				goto out;
+			cpos++;
+			filp->f_pos++;
+		}
+		if (cpos == 2) {
+			dummy = 2;
+			furrfu = &dummy;
+			cpos = 0;
+		}
+	}
+	if (cpos & (sizeof(struct msdos_dir_entry)-1)) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	bh = NULL;
+GetNew:
+	long_slots = 0;
+	if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
+		goto EODir;
+	/* Check for long filename entry */
+	if (isvfat) {
+		if (de->name[0] == DELETED_FLAG)
+			goto RecEnd;
+		if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME))
+			goto RecEnd;
+		if (de->attr != ATTR_EXT && IS_FREE(de->name))
+			goto RecEnd;
+	} else {
+		if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name))
+			goto RecEnd;
+	}
+
+	if (isvfat && de->attr == ATTR_EXT) {
+		struct msdos_dir_slot *ds;
+		unsigned char id;
+		unsigned char slot;
+		unsigned char slots;
+		unsigned char sum;
+		unsigned char alias_checksum;
+
+		if (!unicode) {
+			unicode = (wchar_t *)__get_free_page(GFP_KERNEL);
+			if (!unicode) {
+				filp->f_pos = cpos;
+				brelse(bh);
+				ret = -ENOMEM;
+				goto out;
+			}
+		}
+ParseLong:
+		slots = 0;
+		ds = (struct msdos_dir_slot *) de;
+		id = ds->id;
+		if (!(id & 0x40))
+			goto RecEnd;
+		slots = id & ~0x40;
+		if (slots > 20 || !slots)	/* ceil(256 * 2 / 26) */
+			goto RecEnd;
+		long_slots = slots;
+		alias_checksum = ds->alias_checksum;
+
+		slot = slots;
+		while (1) {
+			int offset;
+
+			slot--;
+			offset = slot * 13;
+			fat16_towchar(unicode + offset, ds->name0_4, 5);
+			fat16_towchar(unicode + offset + 5, ds->name5_10, 6);
+			fat16_towchar(unicode + offset + 11, ds->name11_12, 2);
+
+			if (ds->id & 0x40) {
+				unicode[offset + 13] = 0;
+			}
+			if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
+				goto EODir;
+			if (slot == 0)
+				break;
+			ds = (struct msdos_dir_slot *) de;
+			if (ds->attr !=  ATTR_EXT)
+				goto RecEnd;	/* XXX */
+			if ((ds->id & ~0x40) != slot)
+				goto ParseLong;
+			if (ds->alias_checksum != alias_checksum)
+				goto ParseLong;
+		}
+		if (de->name[0] == DELETED_FLAG)
+			goto RecEnd;
+		if (de->attr ==  ATTR_EXT)
+			goto ParseLong;
+		if (IS_FREE(de->name) || (de->attr & ATTR_VOLUME))
+			goto RecEnd;
+		for (sum = 0, i = 0; i < 11; i++)
+			sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + de->name[i];
+		if (sum != alias_checksum)
+			long_slots = 0;
+	}
+
+	if (sbi->options.dotsOK) {
+		ptname = bufname;
+		dotoffset = 0;
+		if (de->attr & ATTR_HIDDEN) {
+			*ptname++ = '.';
+			dotoffset = 1;
+		}
+	}
+
+	memcpy(work, de->name, sizeof(de->name));
+	/* see namei.c, msdos_format_name */
+	if (work[0] == 0x05)
+		work[0] = 0xE5;
+	for (i = 0, j = 0, last = 0, last_u = 0; i < 8;) {
+		if (!(c = work[i])) break;
+		chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
+					&bufuname[j++], opt_shortname,
+					de->lcase & CASE_LOWER_BASE);
+		if (chl <= 1) {
+			ptname[i++] = (!nocase && c>='A' && c<='Z') ? c+32 : c;
+			if (c != ' ') {
+				last = i;
+				last_u = j;
+			}
+		} else {
+			last_u = j;
+			for (chi = 0; chi < chl && i < 8; chi++) {
+				ptname[i] = work[i];
+				i++; last = i;
+			}
+		}
+	}
+	i = last;
+	j = last_u;
+	fat_short2uni(nls_disk, ".", 1, &bufuname[j++]);
+	ptname[i++] = '.';
+	for (i2 = 0; i2 < 3;) {
+		if (!(c = de->ext[i2])) break;
+		chl = fat_shortname2uni(nls_disk, &de->ext[i2], 3 - i2,
+					&bufuname[j++], opt_shortname,
+					de->lcase & CASE_LOWER_EXT);
+		if (chl <= 1) {
+			i2++;
+			ptname[i++] = (!nocase && c>='A' && c<='Z') ? c+32 : c;
+			if (c != ' ') {
+				last = i;
+				last_u = j;
+			}
+		} else {
+			last_u = j;
+			for (chi = 0; chi < chl && i2 < 3; chi++) {
+				ptname[i++] = de->ext[i2++];
+				last = i;
+			}
+		}
+	}
+	if (!last)
+		goto RecEnd;
+
+	i = last + dotoffset;
+	j = last_u;
+
+	lpos = cpos - (long_slots+1)*sizeof(struct msdos_dir_entry);
+	if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME))
+		inum = inode->i_ino;
+	else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
+		inum = parent_ino(filp->f_dentry);
+	} else {
+		loff_t i_pos = fat_make_i_pos(sb, bh, de);
+		struct inode *tmp = fat_iget(sb, i_pos);
+		if (tmp) {
+			inum = tmp->i_ino;
+			iput(tmp);
+		} else
+			inum = iunique(sb, MSDOS_ROOT_INO);
+	}
+
+	if (isvfat) {
+		bufuname[j] = 0x0000;
+		i = utf8 ? utf8_wcstombs(bufname, bufuname, sizeof(bufname))
+			 : uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+	}
+
+	fill_name = bufname;
+	fill_len = i;
+	if (!short_only && long_slots) {
+		/* convert the unicode long name. 261 is maximum size
+		 * of unicode buffer. (13 * slots + nul) */
+		void *longname = unicode + 261;
+		int buf_size = PAGE_SIZE - (261 * sizeof(unicode[0]));
+		int long_len = utf8
+			? utf8_wcstombs(longname, unicode, buf_size)
+			: uni16_to_x8(longname, unicode, uni_xlate, nls_io);
+
+		if (!both) {
+			fill_name = longname;
+			fill_len = long_len;
+		} else {
+			/* hack for fat_ioctl_filldir() */
+			struct fat_ioctl_filldir_callback *p = dirent;
+
+			p->longname = longname;
+			p->long_len = long_len;
+			p->shortname = bufname;
+			p->short_len = i;
+			fill_name = NULL;
+			fill_len = 0;
+		}
+	}
+	if (filldir(dirent, fill_name, fill_len, *furrfu, inum,
+		    (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0)
+		goto FillFailed;
+
+RecEnd:
+	furrfu = &lpos;
+	filp->f_pos = cpos;
+	goto GetNew;
+EODir:
+	filp->f_pos = cpos;
+FillFailed:
+	if (bh)
+		brelse(bh);
+	if (unicode)
+		free_page((unsigned long)unicode);
+out:
+	unlock_kernel();
+	return ret;
+}
+
+static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	return fat_readdirx(inode, filp, dirent, filldir, 0, 0);
+}
+
+static int fat_ioctl_filldir(void *__buf, const char *name, int name_len,
+			     loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct fat_ioctl_filldir_callback *buf = __buf;
+	struct dirent __user *d1 = buf->dirent;
+	struct dirent __user *d2 = d1 + 1;
+
+	if (buf->result)
+		return -EINVAL;
+	buf->result++;
+
+	if (name != NULL) {
+		/* dirent has only short name */
+		if (name_len >= sizeof(d1->d_name))
+			name_len = sizeof(d1->d_name) - 1;
+
+		if (put_user(0, d2->d_name)			||
+		    put_user(0, &d2->d_reclen)			||
+		    copy_to_user(d1->d_name, name, name_len)	||
+		    put_user(0, d1->d_name + name_len)		||
+		    put_user(name_len, &d1->d_reclen))
+			goto efault;
+	} else {
+		/* dirent has short and long name */
+		const char *longname = buf->longname;
+		int long_len = buf->long_len;
+		const char *shortname = buf->shortname;
+		int short_len = buf->short_len;
+
+		if (long_len >= sizeof(d1->d_name))
+			long_len = sizeof(d1->d_name) - 1;
+		if (short_len >= sizeof(d1->d_name))
+			short_len = sizeof(d1->d_name) - 1;
+
+		if (copy_to_user(d2->d_name, longname, long_len)	||
+		    put_user(0, d2->d_name + long_len)			||
+		    put_user(long_len, &d2->d_reclen)			||
+		    put_user(ino, &d2->d_ino)				||
+		    put_user(offset, &d2->d_off)			||
+		    copy_to_user(d1->d_name, shortname, short_len)	||
+		    put_user(0, d1->d_name + short_len)			||
+		    put_user(short_len, &d1->d_reclen))
+			goto efault;
+	}
+	return 0;
+efault:
+	buf->result = -EFAULT;
+	return -EFAULT;
+}
+
+static int fat_dir_ioctl(struct inode * inode, struct file * filp,
+		  unsigned int cmd, unsigned long arg)
+{
+	struct fat_ioctl_filldir_callback buf;
+	struct dirent __user *d1;
+	int ret, short_only, both;
+
+	switch (cmd) {
+	case VFAT_IOCTL_READDIR_SHORT:
+		short_only = 1;
+		both = 0;
+		break;
+	case VFAT_IOCTL_READDIR_BOTH:
+		short_only = 0;
+		both = 1;
+		break;
+	default:
+		return fat_generic_ioctl(inode, filp, cmd, arg);
+	}
+
+	d1 = (struct dirent __user *)arg;
+	if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2])))
+		return -EFAULT;
+	/*
+	 * Yes, we don't need this put_user() absolutely. However old
+	 * code didn't return the right value. So, app use this value,
+	 * in order to check whether it is EOF.
+	 */
+	if (put_user(0, &d1->d_reclen))
+		return -EFAULT;
+
+	buf.dirent = d1;
+	buf.result = 0;
+	down(&inode->i_sem);
+	ret = -ENOENT;
+	if (!IS_DEADDIR(inode)) {
+		ret = fat_readdirx(inode, filp, &buf, fat_ioctl_filldir,
+				   short_only, both);
+	}
+	up(&inode->i_sem);
+	if (ret >= 0)
+		ret = buf.result;
+	return ret;
+}
+
+struct file_operations fat_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= fat_readdir,
+	.ioctl		= fat_dir_ioctl,
+	.fsync		= file_fsync,
+};
+
+static int fat_get_short_entry(struct inode *dir, loff_t *pos,
+			       struct buffer_head **bh,
+			       struct msdos_dir_entry **de)
+{
+	while (fat_get_entry(dir, pos, bh, de) >= 0) {
+		/* free entry or long name entry or volume label */
+		if (!IS_FREE((*de)->name) && !((*de)->attr & ATTR_VOLUME))
+			return 0;
+	}
+	return -ENOENT;
+}
+
+/*
+ * The ".." entry can not provide the "struct fat_slot_info" informations
+ * for inode. So, this function provide the some informations only.
+ */
+int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
+			 struct msdos_dir_entry **de, loff_t *i_pos)
+{
+	loff_t offset;
+
+	offset = 0;
+	*bh = NULL;
+	while (fat_get_short_entry(dir, &offset, bh, de) >= 0) {
+		if (!strncmp((*de)->name, MSDOS_DOTDOT, MSDOS_NAME)) {
+			*i_pos = fat_make_i_pos(dir->i_sb, *bh, *de);
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+EXPORT_SYMBOL(fat_get_dotdot_entry);
+
+/* See if directory is empty */
+int fat_dir_empty(struct inode *dir)
+{
+	struct buffer_head *bh;
+	struct msdos_dir_entry *de;
+	loff_t cpos;
+	int result = 0;
+
+	bh = NULL;
+	cpos = 0;
+	while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) {
+		if (strncmp(de->name, MSDOS_DOT   , MSDOS_NAME) &&
+		    strncmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
+			result = -ENOTEMPTY;
+			break;
+		}
+	}
+	brelse(bh);
+	return result;
+}
+
+EXPORT_SYMBOL(fat_dir_empty);
+
+/*
+ * fat_subdirs counts the number of sub-directories of dir. It can be run
+ * on directories being created.
+ */
+int fat_subdirs(struct inode *dir)
+{
+	struct buffer_head *bh;
+	struct msdos_dir_entry *de;
+	loff_t cpos;
+	int count = 0;
+
+	bh = NULL;
+	cpos = 0;
+	while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) {
+		if (de->attr & ATTR_DIR)
+			count++;
+	}
+	brelse(bh);
+	return count;
+}
+
+/*
+ * Scans a directory for a given file (name points to its formatted name).
+ * Returns an error code or zero.
+ */
+int fat_scan(struct inode *dir, const unsigned char *name,
+	     struct fat_slot_info *sinfo)
+{
+	struct super_block *sb = dir->i_sb;
+
+	sinfo->slot_off = 0;
+	sinfo->bh = NULL;
+	while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh,
+				   &sinfo->de) >= 0) {
+		if (!strncmp(sinfo->de->name, name, MSDOS_NAME)) {
+			sinfo->slot_off -= sizeof(*sinfo->de);
+			sinfo->nr_slots = 1;
+			sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+EXPORT_SYMBOL(fat_scan);
+
+static int __fat_remove_entries(struct inode *dir, loff_t pos, int nr_slots)
+{
+	struct super_block *sb = dir->i_sb;
+	struct buffer_head *bh;
+	struct msdos_dir_entry *de, *endp;
+	int err = 0, orig_slots;
+
+	while (nr_slots) {
+		bh = NULL;
+		if (fat_get_entry(dir, &pos, &bh, &de) < 0) {
+			err = -EIO;
+			break;
+		}
+
+		orig_slots = nr_slots;
+		endp = (struct msdos_dir_entry *)(bh->b_data + sb->s_blocksize);
+		while (nr_slots && de < endp) {
+			de->name[0] = DELETED_FLAG;
+			de++;
+			nr_slots--;
+		}
+		mark_buffer_dirty(bh);
+		if (IS_DIRSYNC(dir))
+			err = sync_dirty_buffer(bh);
+		brelse(bh);
+		if (err)
+			break;
+
+		/* pos is *next* de's position, so this does `- sizeof(de)' */
+		pos += ((orig_slots - nr_slots) * sizeof(*de)) - sizeof(*de);
+	}
+
+	return err;
+}
+
+int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
+{
+	struct msdos_dir_entry *de;
+	struct buffer_head *bh;
+	int err = 0, nr_slots;
+
+	/*
+	 * First stage: Remove the shortname. By this, the directory
+	 * entry is removed.
+	 */
+	nr_slots = sinfo->nr_slots;
+	de = sinfo->de;
+	sinfo->de = NULL;
+	bh = sinfo->bh;
+	sinfo->bh = NULL;
+	while (nr_slots && de >= (struct msdos_dir_entry *)bh->b_data) {
+		de->name[0] = DELETED_FLAG;
+		de--;
+		nr_slots--;
+	}
+	mark_buffer_dirty(bh);
+	if (IS_DIRSYNC(dir))
+		err = sync_dirty_buffer(bh);
+	brelse(bh);
+	if (err)
+		return err;
+	dir->i_version++;
+
+	if (nr_slots) {
+		/*
+		 * Second stage: remove the remaining longname slots.
+		 * (This directory entry is already removed, and so return
+		 * the success)
+		 */
+		err = __fat_remove_entries(dir, sinfo->slot_off, nr_slots);
+		if (err) {
+			printk(KERN_WARNING
+			       "FAT: Couldn't remove the long name slots\n");
+		}
+	}
+
+	dir->i_mtime = dir->i_atime = CURRENT_TIME_SEC;
+	if (IS_DIRSYNC(dir))
+		(void)fat_sync_inode(dir);
+	else
+		mark_inode_dirty(dir);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(fat_remove_entries);
+
+static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
+			      struct buffer_head **bhs, int nr_bhs)
+{
+	struct super_block *sb = dir->i_sb;
+	sector_t last_blknr = blknr + MSDOS_SB(sb)->sec_per_clus;
+	int err, i, n;
+
+	/* Zeroing the unused blocks on this cluster */
+	blknr += nr_used;
+	n = nr_used;
+	while (blknr < last_blknr) {
+		bhs[n] = sb_getblk(sb, blknr);
+		if (!bhs[n]) {
+			err = -ENOMEM;
+			goto error;
+		}
+		memset(bhs[n]->b_data, 0, sb->s_blocksize);
+		set_buffer_uptodate(bhs[n]);
+		mark_buffer_dirty(bhs[n]);
+
+		n++;
+		blknr++;
+		if (n == nr_bhs) {
+			if (IS_DIRSYNC(dir)) {
+				err = fat_sync_bhs(bhs, n);
+				if (err)
+					goto error;
+			}
+			for (i = 0; i < n; i++)
+				brelse(bhs[i]);
+			n = 0;
+		}
+	}
+	if (IS_DIRSYNC(dir)) {
+		err = fat_sync_bhs(bhs, n);
+		if (err)
+			goto error;
+	}
+	for (i = 0; i < n; i++)
+		brelse(bhs[i]);
+
+	return 0;
+
+error:
+	for (i = 0; i < n; i++)
+		bforget(bhs[i]);
+	return err;
+}
+
+int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
+{
+	struct super_block *sb = dir->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+	struct msdos_dir_entry *de;
+	sector_t blknr;
+	__le16 date, time;
+	int err, cluster;
+
+	err = fat_alloc_clusters(dir, &cluster, 1);
+	if (err)
+		goto error;
+
+	blknr = fat_clus_to_blknr(sbi, cluster);
+	bhs[0] = sb_getblk(sb, blknr);
+	if (!bhs[0]) {
+		err = -ENOMEM;
+		goto error_free;
+	}
+
+	fat_date_unix2dos(ts->tv_sec, &time, &date);
+
+	de = (struct msdos_dir_entry *)bhs[0]->b_data;
+	/* filling the new directory slots ("." and ".." entries) */
+	memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
+	memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
+	de->attr = de[1].attr = ATTR_DIR;
+	de[0].lcase = de[1].lcase = 0;
+	de[0].time = de[1].time = time;
+	de[0].date = de[1].date = date;
+	de[0].ctime_cs = de[1].ctime_cs = 0;
+	if (sbi->options.isvfat) {
+		/* extra timestamps */
+		de[0].ctime = de[1].ctime = time;
+		de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = date;
+	} else {
+		de[0].ctime = de[1].ctime = 0;
+		de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0;
+	}
+	de[0].start = cpu_to_le16(cluster);
+	de[0].starthi = cpu_to_le16(cluster >> 16);
+	de[1].start = cpu_to_le16(MSDOS_I(dir)->i_logstart);
+	de[1].starthi = cpu_to_le16(MSDOS_I(dir)->i_logstart >> 16);
+	de[0].size = de[1].size = 0;
+	memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
+	set_buffer_uptodate(bhs[0]);
+	mark_buffer_dirty(bhs[0]);
+
+	err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
+	if (err)
+		goto error_free;
+
+	return cluster;
+
+error_free:
+	fat_free_clusters(dir, cluster);
+error:
+	return err;
+}
+
+EXPORT_SYMBOL(fat_alloc_new_dir);
+
+static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
+			       int *nr_cluster, struct msdos_dir_entry **de,
+			       struct buffer_head **bh, loff_t *i_pos)
+{
+	struct super_block *sb = dir->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+	sector_t blknr, start_blknr, last_blknr;
+	unsigned long size, copy;
+	int err, i, n, offset, cluster[2];
+
+	/*
+	 * The minimum cluster size is 512bytes, and maximum entry
+	 * size is 32*slots (672bytes).  So, iff the cluster size is
+	 * 512bytes, we may need two clusters.
+	 */
+	size = nr_slots * sizeof(struct msdos_dir_entry);
+	*nr_cluster = (size + (sbi->cluster_size - 1)) >> sbi->cluster_bits;
+	BUG_ON(*nr_cluster > 2);
+
+	err = fat_alloc_clusters(dir, cluster, *nr_cluster);
+	if (err)
+		goto error;
+
+	/*
+	 * First stage: Fill the directory entry.  NOTE: This cluster
+	 * is not referenced from any inode yet, so updates order is
+	 * not important.
+	 */
+	i = n = copy = 0;
+	do {
+		start_blknr = blknr = fat_clus_to_blknr(sbi, cluster[i]);
+		last_blknr = start_blknr + sbi->sec_per_clus;
+		while (blknr < last_blknr) {
+			bhs[n] = sb_getblk(sb, blknr);
+			if (!bhs[n]) {
+				err = -ENOMEM;
+				goto error_nomem;
+			}
+
+			/* fill the directory entry */
+			copy = min(size, sb->s_blocksize);
+			memcpy(bhs[n]->b_data, slots, copy);
+			slots += copy;
+			size -= copy;
+			set_buffer_uptodate(bhs[n]);
+			mark_buffer_dirty(bhs[n]);
+			if (!size)
+				break;
+			n++;
+			blknr++;
+		}
+	} while (++i < *nr_cluster);
+
+	memset(bhs[n]->b_data + copy, 0, sb->s_blocksize - copy);
+	offset = copy - sizeof(struct msdos_dir_entry);
+	get_bh(bhs[n]);
+	*bh = bhs[n];
+	*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
+	*i_pos = fat_make_i_pos(sb, *bh, *de);
+
+	/* Second stage: clear the rest of cluster, and write outs */
+	err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE);
+	if (err)
+		goto error_free;
+
+	return cluster[0];
+
+error_free:
+	brelse(*bh);
+	*bh = NULL;
+	n = 0;
+error_nomem:
+	for (i = 0; i < n; i++)
+		bforget(bhs[i]);
+	fat_free_clusters(dir, cluster[0]);
+error:
+	return err;
+}
+
+int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
+		    struct fat_slot_info *sinfo)
+{
+	struct super_block *sb = dir->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
+	struct msdos_dir_entry *de;
+	int err, free_slots, i, nr_bhs;
+	loff_t pos, i_pos;
+
+	sinfo->nr_slots = nr_slots;
+
+	/* First stage: search free direcotry entries */
+	free_slots = nr_bhs = 0;
+	bh = prev = NULL;
+	pos = 0;
+	err = -ENOSPC;
+	while (fat_get_entry(dir, &pos, &bh, &de) > -1) {
+		/* check the maximum size of directory */
+		if (pos >= FAT_MAX_DIR_SIZE)
+			goto error;
+
+		if (IS_FREE(de->name)) {
+			if (prev != bh) {
+				get_bh(bh);
+				bhs[nr_bhs] = prev = bh;
+				nr_bhs++;
+			}
+			free_slots++;
+			if (free_slots == nr_slots)
+				goto found;
+		} else {
+			for (i = 0; i < nr_bhs; i++)
+				brelse(bhs[i]);
+			prev = NULL;
+			free_slots = nr_bhs = 0;
+		}
+	}
+	if (dir->i_ino == MSDOS_ROOT_INO) {
+		if (sbi->fat_bits != 32)
+			goto error;
+	} else if (MSDOS_I(dir)->i_start == 0) {
+		printk(KERN_ERR "FAT: Corrupted directory (i_pos %lld)\n",
+		       MSDOS_I(dir)->i_pos);
+		err = -EIO;
+		goto error;
+	}
+
+found:
+	err = 0;
+	pos -= free_slots * sizeof(*de);
+	nr_slots -= free_slots;
+	if (free_slots) {
+		/*
+		 * Second stage: filling the free entries with new entries.
+		 * NOTE: If this slots has shortname, first, we write
+		 * the long name slots, then write the short name.
+		 */
+		int size = free_slots * sizeof(*de);
+		int offset = pos & (sb->s_blocksize - 1);
+		int long_bhs = nr_bhs - (nr_slots == 0);
+
+		/* Fill the long name slots. */
+		for (i = 0; i < long_bhs; i++) {
+			int copy = min_t(int, sb->s_blocksize - offset, size);
+			memcpy(bhs[i]->b_data + offset, slots, copy);
+			mark_buffer_dirty(bhs[i]);
+			offset = 0;
+			slots += copy;
+			size -= copy;
+		}
+		if (long_bhs && IS_DIRSYNC(dir))
+			err = fat_sync_bhs(bhs, long_bhs);
+		if (!err && i < nr_bhs) {
+			/* Fill the short name slot. */
+			int copy = min_t(int, sb->s_blocksize - offset, size);
+			memcpy(bhs[i]->b_data + offset, slots, copy);
+			mark_buffer_dirty(bhs[i]);
+			if (IS_DIRSYNC(dir))
+				err = sync_dirty_buffer(bhs[i]);
+		}
+		for (i = 0; i < nr_bhs; i++)
+			brelse(bhs[i]);
+		if (err)
+			goto error_remove;
+	}
+
+	if (nr_slots) {
+		int cluster, nr_cluster;
+
+		/*
+		 * Third stage: allocate the cluster for new entries.
+		 * And initialize the cluster with new entries, then
+		 * add the cluster to dir.
+		 */
+		cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster,
+					      &de, &bh, &i_pos);
+		if (cluster < 0) {
+			err = cluster;
+			goto error_remove;
+		}
+		err = fat_chain_add(dir, cluster, nr_cluster);
+		if (err) {
+			fat_free_clusters(dir, cluster);
+			goto error_remove;
+		}
+		if (dir->i_size & (sbi->cluster_size - 1)) {
+			fat_fs_panic(sb, "Odd directory size");
+			dir->i_size = (dir->i_size + sbi->cluster_size - 1)
+				& ~((loff_t)sbi->cluster_size - 1);
+		}
+		dir->i_size += nr_cluster << sbi->cluster_bits;
+		MSDOS_I(dir)->mmu_private += nr_cluster << sbi->cluster_bits;
+	}
+	sinfo->slot_off = pos;
+	sinfo->de = de;
+	sinfo->bh = bh;
+	sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
+
+	return 0;
+
+error:
+	brelse(bh);
+	for (i = 0; i < nr_bhs; i++)
+		brelse(bhs[i]);
+	return err;
+
+error_remove:
+	brelse(bh);
+	if (free_slots)
+		__fat_remove_entries(dir, pos, free_slots);
+	return err;
+}
+
+EXPORT_SYMBOL(fat_add_entries);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
new file mode 100644
index 0000000..4164cd5
--- /dev/null
+++ b/fs/fat/fatent.c
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2004, OGAWA Hirofumi
+ * Released under GPL v2.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/msdos_fs.h>
+
+struct fatent_operations {
+	void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
+	void (*ent_set_ptr)(struct fat_entry *, int);
+	int (*ent_bread)(struct super_block *, struct fat_entry *,
+			 int, sector_t);
+	int (*ent_get)(struct fat_entry *);
+	void (*ent_put)(struct fat_entry *, int);
+	int (*ent_next)(struct fat_entry *);
+};
+
+static void fat12_ent_blocknr(struct super_block *sb, int entry,
+			      int *offset, sector_t *blocknr)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	int bytes = entry + (entry >> 1);
+	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+	*offset = bytes & (sb->s_blocksize - 1);
+	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
+}
+
+static void fat_ent_blocknr(struct super_block *sb, int entry,
+			    int *offset, sector_t *blocknr)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	int bytes = (entry << sbi->fatent_shift);
+	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+	*offset = bytes & (sb->s_blocksize - 1);
+	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
+}
+
+static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
+{
+	struct buffer_head **bhs = fatent->bhs;
+	if (fatent->nr_bhs == 1) {
+		WARN_ON(offset >= (bhs[0]->b_size - 1));
+		fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
+		fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
+	} else {
+		WARN_ON(offset != (bhs[0]->b_size - 1));
+		fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
+		fatent->u.ent12_p[1] = bhs[1]->b_data;
+	}
+}
+
+static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
+{
+	WARN_ON(offset & (2 - 1));
+	fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
+}
+
+static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
+{
+	WARN_ON(offset & (4 - 1));
+	fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
+}
+
+static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
+			   int offset, sector_t blocknr)
+{
+	struct buffer_head **bhs = fatent->bhs;
+
+	WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
+	bhs[0] = sb_bread(sb, blocknr);
+	if (!bhs[0])
+		goto err;
+
+	if ((offset + 1) < sb->s_blocksize)
+		fatent->nr_bhs = 1;
+	else {
+		/* This entry is block boundary, it needs the next block */
+		blocknr++;
+		bhs[1] = sb_bread(sb, blocknr);
+		if (!bhs[1])
+			goto err_brelse;
+		fatent->nr_bhs = 2;
+	}
+	fat12_ent_set_ptr(fatent, offset);
+	return 0;
+
+err_brelse:
+	brelse(bhs[0]);
+err:
+	printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n",
+	       (unsigned long long)blocknr);
+	return -EIO;
+}
+
+static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
+			 int offset, sector_t blocknr)
+{
+	struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+
+	WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
+	fatent->bhs[0] = sb_bread(sb, blocknr);
+	if (!fatent->bhs[0]) {
+		printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n",
+		       (unsigned long long)blocknr);
+		return -EIO;
+	}
+	fatent->nr_bhs = 1;
+	ops->ent_set_ptr(fatent, offset);
+	return 0;
+}
+
+static int fat12_ent_get(struct fat_entry *fatent)
+{
+	u8 **ent12_p = fatent->u.ent12_p;
+	int next;
+
+	if (fatent->entry & 1)
+		next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
+	else
+		next = (*ent12_p[1] << 8) | *ent12_p[0];
+	next &= 0x0fff;
+	if (next >= BAD_FAT12)
+		next = FAT_ENT_EOF;
+	return next;
+}
+
+static int fat16_ent_get(struct fat_entry *fatent)
+{
+	int next = le16_to_cpu(*fatent->u.ent16_p);
+	WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
+	if (next >= BAD_FAT16)
+		next = FAT_ENT_EOF;
+	return next;
+}
+
+static int fat32_ent_get(struct fat_entry *fatent)
+{
+	int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
+	WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
+	if (next >= BAD_FAT32)
+		next = FAT_ENT_EOF;
+	return next;
+}
+
+static void fat12_ent_put(struct fat_entry *fatent, int new)
+{
+	u8 **ent12_p = fatent->u.ent12_p;
+
+	if (new == FAT_ENT_EOF)
+		new = EOF_FAT12;
+
+	if (fatent->entry & 1) {
+		*ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
+		*ent12_p[1] = new >> 4;
+	} else {
+		*ent12_p[0] = new & 0xff;
+		*ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
+	}
+
+	mark_buffer_dirty(fatent->bhs[0]);
+	if (fatent->nr_bhs == 2)
+		mark_buffer_dirty(fatent->bhs[1]);
+}
+
+static void fat16_ent_put(struct fat_entry *fatent, int new)
+{
+	if (new == FAT_ENT_EOF)
+		new = EOF_FAT16;
+
+	*fatent->u.ent16_p = cpu_to_le16(new);
+	mark_buffer_dirty(fatent->bhs[0]);
+}
+
+static void fat32_ent_put(struct fat_entry *fatent, int new)
+{
+	if (new == FAT_ENT_EOF)
+		new = EOF_FAT32;
+
+	WARN_ON(new & 0xf0000000);
+	new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
+	*fatent->u.ent32_p = cpu_to_le32(new);
+	mark_buffer_dirty(fatent->bhs[0]);
+}
+
+static int fat12_ent_next(struct fat_entry *fatent)
+{
+	u8 **ent12_p = fatent->u.ent12_p;
+	struct buffer_head **bhs = fatent->bhs;
+	u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
+
+	fatent->entry++;
+	if (fatent->nr_bhs == 1) {
+		WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2)));
+		WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
+		if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
+			ent12_p[0] = nextp - 1;
+			ent12_p[1] = nextp;
+			return 1;
+		}
+	} else {
+		WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
+		WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
+		ent12_p[0] = nextp - 1;
+		ent12_p[1] = nextp;
+		brelse(bhs[0]);
+		bhs[0] = bhs[1];
+		fatent->nr_bhs = 1;
+		return 1;
+	}
+	ent12_p[0] = NULL;
+	ent12_p[1] = NULL;
+	return 0;
+}
+
+static int fat16_ent_next(struct fat_entry *fatent)
+{
+	const struct buffer_head *bh = fatent->bhs[0];
+	fatent->entry++;
+	if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
+		fatent->u.ent16_p++;
+		return 1;
+	}
+	fatent->u.ent16_p = NULL;
+	return 0;
+}
+
+static int fat32_ent_next(struct fat_entry *fatent)
+{
+	const struct buffer_head *bh = fatent->bhs[0];
+	fatent->entry++;
+	if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
+		fatent->u.ent32_p++;
+		return 1;
+	}
+	fatent->u.ent32_p = NULL;
+	return 0;
+}
+
+static struct fatent_operations fat12_ops = {
+	.ent_blocknr	= fat12_ent_blocknr,
+	.ent_set_ptr	= fat12_ent_set_ptr,
+	.ent_bread	= fat12_ent_bread,
+	.ent_get	= fat12_ent_get,
+	.ent_put	= fat12_ent_put,
+	.ent_next	= fat12_ent_next,
+};
+
+static struct fatent_operations fat16_ops = {
+	.ent_blocknr	= fat_ent_blocknr,
+	.ent_set_ptr	= fat16_ent_set_ptr,
+	.ent_bread	= fat_ent_bread,
+	.ent_get	= fat16_ent_get,
+	.ent_put	= fat16_ent_put,
+	.ent_next	= fat16_ent_next,
+};
+
+static struct fatent_operations fat32_ops = {
+	.ent_blocknr	= fat_ent_blocknr,
+	.ent_set_ptr	= fat32_ent_set_ptr,
+	.ent_bread	= fat_ent_bread,
+	.ent_get	= fat32_ent_get,
+	.ent_put	= fat32_ent_put,
+	.ent_next	= fat32_ent_next,
+};
+
+static inline void lock_fat(struct msdos_sb_info *sbi)
+{
+	down(&sbi->fat_lock);
+}
+
+static inline void unlock_fat(struct msdos_sb_info *sbi)
+{
+	up(&sbi->fat_lock);
+}
+
+void fat_ent_access_init(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+	init_MUTEX(&sbi->fat_lock);
+
+	switch (sbi->fat_bits) {
+	case 32:
+		sbi->fatent_shift = 2;
+		sbi->fatent_ops = &fat32_ops;
+		break;
+	case 16:
+		sbi->fatent_shift = 1;
+		sbi->fatent_ops = &fat16_ops;
+		break;
+	case 12:
+		sbi->fatent_shift = -1;
+		sbi->fatent_ops = &fat12_ops;
+		break;
+	}
+}
+
+static inline int fat_ent_update_ptr(struct super_block *sb,
+				     struct fat_entry *fatent,
+				     int offset, sector_t blocknr)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct fatent_operations *ops = sbi->fatent_ops;
+	struct buffer_head **bhs = fatent->bhs;
+
+	/* Is this fatent's blocks including this entry? */
+	if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
+		return 0;
+	/* Does this entry need the next block? */
+	if (sbi->fat_bits == 12 && (offset + 1) >= sb->s_blocksize) {
+		if (fatent->nr_bhs != 2 || bhs[1]->b_blocknr != (blocknr + 1))
+			return 0;
+	}
+	ops->ent_set_ptr(fatent, offset);
+	return 1;
+}
+
+int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	struct fatent_operations *ops = sbi->fatent_ops;
+	int err, offset;
+	sector_t blocknr;
+
+	if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
+		fatent_brelse(fatent);
+		fat_fs_panic(sb, "invalid access to FAT (entry 0x%08x)", entry);
+		return -EIO;
+	}
+
+	fatent_set_entry(fatent, entry);
+	ops->ent_blocknr(sb, entry, &offset, &blocknr);
+
+	if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
+		fatent_brelse(fatent);
+		err = ops->ent_bread(sb, fatent, offset, blocknr);
+		if (err)
+			return err;
+	}
+	return ops->ent_get(fatent);
+}
+
+/* FIXME: We can write the blocks as more big chunk. */
+static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
+			  int nr_bhs)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *c_bh;
+	int err, n, copy;
+
+	err = 0;
+	for (copy = 1; copy < sbi->fats; copy++) {
+		sector_t backup_fat = sbi->fat_length * copy;
+
+		for (n = 0; n < nr_bhs; n++) {
+			c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
+			if (!c_bh) {
+				err = -ENOMEM;
+				goto error;
+			}
+			memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
+			set_buffer_uptodate(c_bh);
+			mark_buffer_dirty(c_bh);
+			if (sb->s_flags & MS_SYNCHRONOUS)
+				err = sync_dirty_buffer(c_bh);
+			brelse(c_bh);
+			if (err)
+				goto error;
+		}
+	}
+error:
+	return err;
+}
+
+int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
+		  int new, int wait)
+{
+	struct super_block *sb = inode->i_sb;
+	struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+	int err;
+
+	ops->ent_put(fatent, new);
+	if (wait) {
+		err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
+		if (err)
+			return err;
+	}
+	return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
+}
+
+static inline int fat_ent_next(struct msdos_sb_info *sbi,
+			       struct fat_entry *fatent)
+{
+	if (sbi->fatent_ops->ent_next(fatent)) {
+		if (fatent->entry < sbi->max_cluster)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int fat_ent_read_block(struct super_block *sb,
+				     struct fat_entry *fatent)
+{
+	struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+	sector_t blocknr;
+	int offset;
+
+	fatent_brelse(fatent);
+	ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
+	return ops->ent_bread(sb, fatent, offset, blocknr);
+}
+
+static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
+			    struct fat_entry *fatent)
+{
+	int n, i;
+
+	for (n = 0; n < fatent->nr_bhs; n++) {
+		for (i = 0; i < *nr_bhs; i++) {
+			if (fatent->bhs[n] == bhs[i])
+				break;
+		}
+		if (i == *nr_bhs) {
+			get_bh(fatent->bhs[n]);
+			bhs[i] = fatent->bhs[n];
+			(*nr_bhs)++;
+		}
+	}
+}
+
+int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct fatent_operations *ops = sbi->fatent_ops;
+	struct fat_entry fatent, prev_ent;
+	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+	int i, count, err, nr_bhs, idx_clus;
+
+	BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2));	/* fixed limit */
+
+	lock_fat(sbi);
+	if (sbi->free_clusters != -1 && sbi->free_clusters < nr_cluster) {
+		unlock_fat(sbi);
+		return -ENOSPC;
+	}
+
+	err = nr_bhs = idx_clus = 0;
+	count = FAT_START_ENT;
+	fatent_init(&prev_ent);
+	fatent_init(&fatent);
+	fatent_set_entry(&fatent, sbi->prev_free + 1);
+	while (count < sbi->max_cluster) {
+		if (fatent.entry >= sbi->max_cluster)
+			fatent.entry = FAT_START_ENT;
+		fatent_set_entry(&fatent, fatent.entry);
+		err = fat_ent_read_block(sb, &fatent);
+		if (err)
+			goto out;
+
+		/* Find the free entries in a block */
+		do {
+			if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
+				int entry = fatent.entry;
+
+				/* make the cluster chain */
+				ops->ent_put(&fatent, FAT_ENT_EOF);
+				if (prev_ent.nr_bhs)
+					ops->ent_put(&prev_ent, entry);
+
+				fat_collect_bhs(bhs, &nr_bhs, &fatent);
+
+				sbi->prev_free = entry;
+				if (sbi->free_clusters != -1)
+					sbi->free_clusters--;
+
+				cluster[idx_clus] = entry;
+				idx_clus++;
+				if (idx_clus == nr_cluster)
+					goto out;
+
+				/*
+				 * fat_collect_bhs() gets ref-count of bhs,
+				 * so we can still use the prev_ent.
+				 */
+				prev_ent = fatent;
+			}
+			count++;
+			if (count == sbi->max_cluster)
+				break;
+		} while (fat_ent_next(sbi, &fatent));
+	}
+
+	/* Couldn't allocate the free entries */
+	sbi->free_clusters = 0;
+	err = -ENOSPC;
+
+out:
+	unlock_fat(sbi);
+	fatent_brelse(&fatent);
+	if (!err) {
+		if (inode_needs_sync(inode))
+			err = fat_sync_bhs(bhs, nr_bhs);
+		if (!err)
+			err = fat_mirror_bhs(sb, bhs, nr_bhs);
+	}
+	for (i = 0; i < nr_bhs; i++)
+		brelse(bhs[i]);
+	fat_clusters_flush(sb);
+
+	if (err && idx_clus)
+		fat_free_clusters(inode, cluster[0]);
+
+	return err;
+}
+
+int fat_free_clusters(struct inode *inode, int cluster)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct fatent_operations *ops = sbi->fatent_ops;
+	struct fat_entry fatent;
+	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+	int i, err, nr_bhs;
+
+	nr_bhs = 0;
+	fatent_init(&fatent);
+	lock_fat(sbi);
+	do {
+		cluster = fat_ent_read(inode, &fatent, cluster);
+		if (cluster < 0) {
+			err = cluster;
+			goto error;
+		} else if (cluster == FAT_ENT_FREE) {
+			fat_fs_panic(sb, "%s: deleting FAT entry beyond EOF",
+				     __FUNCTION__);
+			err = -EIO;
+			goto error;
+		}
+
+		ops->ent_put(&fatent, FAT_ENT_FREE);
+		if (sbi->free_clusters != -1)
+			sbi->free_clusters++;
+
+		if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
+			if (sb->s_flags & MS_SYNCHRONOUS) {
+				err = fat_sync_bhs(bhs, nr_bhs);
+				if (err)
+					goto error;
+			}
+			err = fat_mirror_bhs(sb, bhs, nr_bhs);
+			if (err)
+				goto error;
+			for (i = 0; i < nr_bhs; i++)
+				brelse(bhs[i]);
+			nr_bhs = 0;
+		}
+		fat_collect_bhs(bhs, &nr_bhs, &fatent);
+	} while (cluster != FAT_ENT_EOF);
+
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		err = fat_sync_bhs(bhs, nr_bhs);
+		if (err)
+			goto error;
+	}
+	err = fat_mirror_bhs(sb, bhs, nr_bhs);
+error:
+	fatent_brelse(&fatent);
+	for (i = 0; i < nr_bhs; i++)
+		brelse(bhs[i]);
+	unlock_fat(sbi);
+
+	fat_clusters_flush(sb);
+
+	return err;
+}
+
+EXPORT_SYMBOL(fat_free_clusters);
+
+int fat_count_free_clusters(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct fatent_operations *ops = sbi->fatent_ops;
+	struct fat_entry fatent;
+	int err = 0, free;
+
+	lock_fat(sbi);
+	if (sbi->free_clusters != -1)
+		goto out;
+
+	free = 0;
+	fatent_init(&fatent);
+	fatent_set_entry(&fatent, FAT_START_ENT);
+	while (fatent.entry < sbi->max_cluster) {
+		err = fat_ent_read_block(sb, &fatent);
+		if (err)
+			goto out;
+
+		do {
+			if (ops->ent_get(&fatent) == FAT_ENT_FREE)
+				free++;
+		} while (fat_ent_next(sbi, &fatent));
+	}
+	sbi->free_clusters = free;
+	fatent_brelse(&fatent);
+out:
+	unlock_fat(sbi);
+	return err;
+}
diff --git a/fs/fat/file.c b/fs/fat/file.c
new file mode 100644
index 0000000..62ffa91
--- /dev/null
+++ b/fs/fat/file.c
@@ -0,0 +1,308 @@
+/*
+ *  linux/fs/fat/file.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *
+ *  regular file handling primitives for fat-based filesystems
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/msdos_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+static ssize_t fat_file_aio_write(struct kiocb *iocb, const char __user *buf,
+				  size_t count, loff_t pos)
+{
+	struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
+	int retval;
+
+	retval = generic_file_aio_write(iocb, buf, count, pos);
+	if (retval > 0) {
+		inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+		MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
+		mark_inode_dirty(inode);
+//		check the locking rules
+//		if (IS_SYNC(inode))
+//			fat_sync_inode(inode);
+	}
+	return retval;
+}
+
+static ssize_t fat_file_writev(struct file *filp, const struct iovec *iov,
+			       unsigned long nr_segs, loff_t *ppos)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int retval;
+
+	retval = generic_file_writev(filp, iov, nr_segs, ppos);
+	if (retval > 0) {
+		inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+		MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
+		mark_inode_dirty(inode);
+	}
+	return retval;
+}
+
+int fat_generic_ioctl(struct inode *inode, struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	u32 __user *user_attr = (u32 __user *)arg;
+
+	switch (cmd) {
+	case FAT_IOCTL_GET_ATTRIBUTES:
+	{
+		u32 attr;
+
+		if (inode->i_ino == MSDOS_ROOT_INO)
+			attr = ATTR_DIR;
+		else
+			attr = fat_attr(inode);
+
+		return put_user(attr, user_attr);
+	}
+	case FAT_IOCTL_SET_ATTRIBUTES:
+	{
+		u32 attr, oldattr;
+		int err, is_dir = S_ISDIR(inode->i_mode);
+		struct iattr ia;
+
+		err = get_user(attr, user_attr);
+		if (err)
+			return err;
+
+		down(&inode->i_sem);
+
+		if (IS_RDONLY(inode)) {
+			err = -EROFS;
+			goto up;
+		}
+
+		/*
+		 * ATTR_VOLUME and ATTR_DIR cannot be changed; this also
+		 * prevents the user from turning us into a VFAT
+		 * longname entry.  Also, we obviously can't set
+		 * any of the NTFS attributes in the high 24 bits.
+		 */
+		attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
+		/* Merge in ATTR_VOLUME and ATTR_DIR */
+		attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
+			(is_dir ? ATTR_DIR : 0);
+		oldattr = fat_attr(inode);
+
+		/* Equivalent to a chmod() */
+		ia.ia_valid = ATTR_MODE | ATTR_CTIME;
+		if (is_dir) {
+			ia.ia_mode = MSDOS_MKMODE(attr,
+				S_IRWXUGO & ~sbi->options.fs_dmask)
+				| S_IFDIR;
+		} else {
+			ia.ia_mode = MSDOS_MKMODE(attr,
+				(S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO))
+				& ~sbi->options.fs_fmask)
+				| S_IFREG;
+		}
+
+		/* The root directory has no attributes */
+		if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
+			err = -EINVAL;
+			goto up;
+		}
+
+		if (sbi->options.sys_immutable) {
+			if ((attr | oldattr) & ATTR_SYS) {
+				if (!capable(CAP_LINUX_IMMUTABLE)) {
+					err = -EPERM;
+					goto up;
+				}
+			}
+		}
+
+		/* This MUST be done before doing anything irreversible... */
+		err = notify_change(filp->f_dentry, &ia);
+		if (err)
+			goto up;
+
+		if (sbi->options.sys_immutable) {
+			if (attr & ATTR_SYS)
+				inode->i_flags |= S_IMMUTABLE;
+			else
+				inode->i_flags &= S_IMMUTABLE;
+		}
+
+		MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED;
+		mark_inode_dirty(inode);
+	up:
+		up(&inode->i_sem);
+		return err;
+	}
+	default:
+		return -ENOTTY;	/* Inappropriate ioctl for device */
+	}
+}
+
+struct file_operations fat_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.write		= do_sync_write,
+	.readv		= generic_file_readv,
+	.writev		= fat_file_writev,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= fat_file_aio_write,
+	.mmap		= generic_file_mmap,
+	.ioctl		= fat_generic_ioctl,
+	.fsync		= file_fsync,
+	.sendfile	= generic_file_sendfile,
+};
+
+int fat_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
+	struct inode *inode = dentry->d_inode;
+	int mask, error = 0;
+
+	lock_kernel();
+
+	/* FAT cannot truncate to a longer file */
+	if (attr->ia_valid & ATTR_SIZE) {
+		if (attr->ia_size > inode->i_size) {
+			error = -EPERM;
+			goto out;
+		}
+	}
+
+	error = inode_change_ok(inode, attr);
+	if (error) {
+		if (sbi->options.quiet)
+			error = 0;
+		goto out;
+	}
+	if (((attr->ia_valid & ATTR_UID) &&
+	     (attr->ia_uid != sbi->options.fs_uid)) ||
+	    ((attr->ia_valid & ATTR_GID) &&
+	     (attr->ia_gid != sbi->options.fs_gid)) ||
+	    ((attr->ia_valid & ATTR_MODE) &&
+	     (attr->ia_mode & ~MSDOS_VALID_MODE)))
+		error = -EPERM;
+
+	if (error) {
+		if (sbi->options.quiet)
+			error = 0;
+		goto out;
+	}
+	error = inode_setattr(inode, attr);
+	if (error)
+		goto out;
+
+	if (S_ISDIR(inode->i_mode))
+		mask = sbi->options.fs_dmask;
+	else
+		mask = sbi->options.fs_fmask;
+	inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
+out:
+	unlock_kernel();
+	return error;
+}
+
+EXPORT_SYMBOL(fat_notify_change);
+
+/* Free all clusters after the skip'th cluster. */
+static int fat_free(struct inode *inode, int skip)
+{
+	struct super_block *sb = inode->i_sb;
+	int err, wait, free_start, i_start, i_logstart;
+
+	if (MSDOS_I(inode)->i_start == 0)
+		return 0;
+
+	/*
+	 * Write a new EOF, and get the remaining cluster chain for freeing.
+	 */
+	wait = IS_DIRSYNC(inode);
+	if (skip) {
+		struct fat_entry fatent;
+		int ret, fclus, dclus;
+
+		ret = fat_get_cluster(inode, skip - 1, &fclus, &dclus);
+		if (ret < 0)
+			return ret;
+		else if (ret == FAT_ENT_EOF)
+			return 0;
+
+		fatent_init(&fatent);
+		ret = fat_ent_read(inode, &fatent, dclus);
+		if (ret == FAT_ENT_EOF) {
+			fatent_brelse(&fatent);
+			return 0;
+		} else if (ret == FAT_ENT_FREE) {
+			fat_fs_panic(sb,
+				     "%s: invalid cluster chain (i_pos %lld)",
+				     __FUNCTION__, MSDOS_I(inode)->i_pos);
+			ret = -EIO;
+		} else if (ret > 0) {
+			err = fat_ent_write(inode, &fatent, FAT_ENT_EOF, wait);
+			if (err)
+				ret = err;
+		}
+		fatent_brelse(&fatent);
+		if (ret < 0)
+			return ret;
+
+		free_start = ret;
+		i_start = i_logstart = 0;
+		fat_cache_inval_inode(inode);
+	} else {
+		fat_cache_inval_inode(inode);
+
+		i_start = free_start = MSDOS_I(inode)->i_start;
+		i_logstart = MSDOS_I(inode)->i_logstart;
+		MSDOS_I(inode)->i_start = 0;
+		MSDOS_I(inode)->i_logstart = 0;
+	}
+	MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
+	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	if (wait) {
+		err = fat_sync_inode(inode);
+		if (err)
+			goto error;
+	} else
+		mark_inode_dirty(inode);
+	inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9);
+
+	/* Freeing the remained cluster chain */
+	return fat_free_clusters(inode, free_start);
+
+error:
+	if (i_start) {
+		MSDOS_I(inode)->i_start = i_start;
+		MSDOS_I(inode)->i_logstart = i_logstart;
+	}
+	return err;
+}
+
+void fat_truncate(struct inode *inode)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	const unsigned int cluster_size = sbi->cluster_size;
+	int nr_clusters;
+
+	/*
+	 * This protects against truncating a file bigger than it was then
+	 * trying to write into the hole.
+	 */
+	if (MSDOS_I(inode)->mmu_private > inode->i_size)
+		MSDOS_I(inode)->mmu_private = inode->i_size;
+
+	nr_clusters = (inode->i_size + (cluster_size - 1)) >> sbi->cluster_bits;
+
+	lock_kernel();
+	fat_free(inode, nr_clusters);
+	unlock_kernel();
+}
+
+struct inode_operations fat_file_inode_operations = {
+	.truncate	= fat_truncate,
+	.setattr	= fat_notify_change,
+};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
new file mode 100644
index 0000000..8ccee84
--- /dev/null
+++ b/fs/fat/inode.c
@@ -0,0 +1,1351 @@
+/*
+ *  linux/fs/fat/inode.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *  VFAT extensions by Gordon Chaffee, merged with msdos fs by Henrik Storner
+ *  Rewritten for the constant inumbers support by Al Viro
+ *
+ *  Fixes:
+ *
+ *	Max Cohan: Fixed invalid FSINFO offset when info_sector is 0
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
+#include <linux/msdos_fs.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <asm/unaligned.h>
+
+#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
+/* if user don't select VFAT, this is undefined. */
+#define CONFIG_FAT_DEFAULT_IOCHARSET	""
+#endif
+
+static int fat_default_codepage = CONFIG_FAT_DEFAULT_CODEPAGE;
+static char fat_default_iocharset[] = CONFIG_FAT_DEFAULT_IOCHARSET;
+
+
+static int fat_add_cluster(struct inode *inode)
+{
+	int err, cluster;
+
+	err = fat_alloc_clusters(inode, &cluster, 1);
+	if (err)
+		return err;
+	/* FIXME: this cluster should be added after data of this
+	 * cluster is writed */
+	err = fat_chain_add(inode, cluster, 1);
+	if (err)
+		fat_free_clusters(inode, cluster);
+	return err;
+}
+
+static int fat_get_block(struct inode *inode, sector_t iblock,
+			 struct buffer_head *bh_result, int create)
+{
+	struct super_block *sb = inode->i_sb;
+	sector_t phys;
+	int err;
+
+	err = fat_bmap(inode, iblock, &phys);
+	if (err)
+		return err;
+	if (phys) {
+		map_bh(bh_result, sb, phys);
+		return 0;
+	}
+	if (!create)
+		return 0;
+	if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
+		fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)",
+			     MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
+		return -EIO;
+	}
+	if (!((unsigned long)iblock & (MSDOS_SB(sb)->sec_per_clus - 1))) {
+		err = fat_add_cluster(inode);
+		if (err)
+			return err;
+	}
+	MSDOS_I(inode)->mmu_private += sb->s_blocksize;
+	err = fat_bmap(inode, iblock, &phys);
+	if (err)
+		return err;
+	if (!phys)
+		BUG();
+	set_buffer_new(bh_result);
+	map_bh(bh_result, sb, phys);
+	return 0;
+}
+
+static int fat_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, fat_get_block, wbc);
+}
+
+static int fat_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, fat_get_block);
+}
+
+static int fat_prepare_write(struct file *file, struct page *page,
+			     unsigned from, unsigned to)
+{
+	return cont_prepare_write(page, from, to, fat_get_block,
+				  &MSDOS_I(page->mapping->host)->mmu_private);
+}
+
+static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, fat_get_block);
+}
+
+static struct address_space_operations fat_aops = {
+	.readpage	= fat_readpage,
+	.writepage	= fat_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= fat_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= _fat_bmap
+};
+
+/*
+ * New FAT inode stuff. We do the following:
+ *	a) i_ino is constant and has nothing with on-disk location.
+ *	b) FAT manages its own cache of directory entries.
+ *	c) *This* cache is indexed by on-disk location.
+ *	d) inode has an associated directory entry, all right, but
+ *		it may be unhashed.
+ *	e) currently entries are stored within struct inode. That should
+ *		change.
+ *	f) we deal with races in the following way:
+ *		1. readdir() and lookup() do FAT-dir-cache lookup.
+ *		2. rename() unhashes the F-d-c entry and rehashes it in
+ *			a new place.
+ *		3. unlink() and rmdir() unhash F-d-c entry.
+ *		4. fat_write_inode() checks whether the thing is unhashed.
+ *			If it is we silently return. If it isn't we do bread(),
+ *			check if the location is still valid and retry if it
+ *			isn't. Otherwise we do changes.
+ *		5. Spinlock is used to protect hash/unhash/location check/lookup
+ *		6. fat_clear_inode() unhashes the F-d-c entry.
+ *		7. lookup() and readdir() do igrab() if they find a F-d-c entry
+ *			and consider negative result as cache miss.
+ */
+
+static void fat_hash_init(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	int i;
+
+	spin_lock_init(&sbi->inode_hash_lock);
+	for (i = 0; i < FAT_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+}
+
+static inline unsigned long fat_hash(struct super_block *sb, loff_t i_pos)
+{
+	unsigned long tmp = (unsigned long)i_pos | (unsigned long) sb;
+	tmp = tmp + (tmp >> FAT_HASH_BITS) + (tmp >> FAT_HASH_BITS * 2);
+	return tmp & FAT_HASH_MASK;
+}
+
+void fat_attach(struct inode *inode, loff_t i_pos)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+	spin_lock(&sbi->inode_hash_lock);
+	MSDOS_I(inode)->i_pos = i_pos;
+	hlist_add_head(&MSDOS_I(inode)->i_fat_hash,
+			sbi->inode_hashtable + fat_hash(sb, i_pos));
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+EXPORT_SYMBOL(fat_attach);
+
+void fat_detach(struct inode *inode)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	spin_lock(&sbi->inode_hash_lock);
+	MSDOS_I(inode)->i_pos = 0;
+	hlist_del_init(&MSDOS_I(inode)->i_fat_hash);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+EXPORT_SYMBOL(fat_detach);
+
+struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct hlist_head *head = sbi->inode_hashtable + fat_hash(sb, i_pos);
+	struct hlist_node *_p;
+	struct msdos_inode_info *i;
+	struct inode *inode = NULL;
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_for_each_entry(i, _p, head, i_fat_hash) {
+		BUG_ON(i->vfs_inode.i_sb != sb);
+		if (i->i_pos != i_pos)
+			continue;
+		inode = igrab(&i->vfs_inode);
+		if (inode)
+			break;
+	}
+	spin_unlock(&sbi->inode_hash_lock);
+	return inode;
+}
+
+static int is_exec(unsigned char *extension)
+{
+	unsigned char *exe_extensions = "EXECOMBAT", *walk;
+
+	for (walk = exe_extensions; *walk; walk += 3)
+		if (!strncmp(extension, walk, 3))
+			return 1;
+	return 0;
+}
+
+static int fat_calc_dir_size(struct inode *inode)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	int ret, fclus, dclus;
+
+	inode->i_size = 0;
+	if (MSDOS_I(inode)->i_start == 0)
+		return 0;
+
+	ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus);
+	if (ret < 0)
+		return ret;
+	inode->i_size = (fclus + 1) << sbi->cluster_bits;
+
+	return 0;
+}
+
+/* doesn't deal with root inode */
+static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	int error;
+
+	MSDOS_I(inode)->i_pos = 0;
+	inode->i_uid = sbi->options.fs_uid;
+	inode->i_gid = sbi->options.fs_gid;
+	inode->i_version++;
+	inode->i_generation = get_seconds();
+
+	if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
+		inode->i_generation &= ~1;
+		inode->i_mode = MSDOS_MKMODE(de->attr,
+			S_IRWXUGO & ~sbi->options.fs_dmask) | S_IFDIR;
+		inode->i_op = sbi->dir_ops;
+		inode->i_fop = &fat_dir_operations;
+
+		MSDOS_I(inode)->i_start = le16_to_cpu(de->start);
+		if (sbi->fat_bits == 32)
+			MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16);
+
+		MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
+		error = fat_calc_dir_size(inode);
+		if (error < 0)
+			return error;
+		MSDOS_I(inode)->mmu_private = inode->i_size;
+
+		inode->i_nlink = fat_subdirs(inode);
+	} else { /* not a directory */
+		inode->i_generation |= 1;
+		inode->i_mode = MSDOS_MKMODE(de->attr,
+		    ((sbi->options.showexec &&
+			!is_exec(de->ext))
+			? S_IRUGO|S_IWUGO : S_IRWXUGO)
+		    & ~sbi->options.fs_fmask) | S_IFREG;
+		MSDOS_I(inode)->i_start = le16_to_cpu(de->start);
+		if (sbi->fat_bits == 32)
+			MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16);
+
+		MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
+		inode->i_size = le32_to_cpu(de->size);
+		inode->i_op = &fat_file_inode_operations;
+		inode->i_fop = &fat_file_operations;
+		inode->i_mapping->a_ops = &fat_aops;
+		MSDOS_I(inode)->mmu_private = inode->i_size;
+	}
+	if (de->attr & ATTR_SYS) {
+		if (sbi->options.sys_immutable)
+			inode->i_flags |= S_IMMUTABLE;
+	}
+	MSDOS_I(inode)->i_attrs = de->attr & ATTR_UNUSED;
+	/* this is as close to the truth as we can get ... */
+	inode->i_blksize = sbi->cluster_size;
+	inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
+			   & ~((loff_t)sbi->cluster_size - 1)) >> 9;
+	inode->i_mtime.tv_sec = inode->i_atime.tv_sec =
+		date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date));
+	inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0;
+	if (sbi->options.isvfat) {
+		int secs = de->ctime_cs / 100;
+		int csecs = de->ctime_cs % 100;
+		inode->i_ctime.tv_sec  =
+			date_dos2unix(le16_to_cpu(de->ctime),
+				      le16_to_cpu(de->cdate)) + secs;
+		inode->i_ctime.tv_nsec = csecs * 10000000;
+	} else
+		inode->i_ctime = inode->i_mtime;
+
+	return 0;
+}
+
+struct inode *fat_build_inode(struct super_block *sb,
+			struct msdos_dir_entry *de, loff_t i_pos)
+{
+	struct inode *inode;
+	int err;
+
+	inode = fat_iget(sb, i_pos);
+	if (inode)
+		goto out;
+	inode = new_inode(sb);
+	if (!inode) {
+		inode = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	inode->i_ino = iunique(sb, MSDOS_ROOT_INO);
+	inode->i_version = 1;
+	err = fat_fill_inode(inode, de);
+	if (err) {
+		iput(inode);
+		inode = ERR_PTR(err);
+		goto out;
+	}
+	fat_attach(inode, i_pos);
+	insert_inode_hash(inode);
+out:
+	return inode;
+}
+
+EXPORT_SYMBOL(fat_build_inode);
+
+static void fat_delete_inode(struct inode *inode)
+{
+	if (!is_bad_inode(inode)) {
+		inode->i_size = 0;
+		fat_truncate(inode);
+	}
+	clear_inode(inode);
+}
+
+static void fat_clear_inode(struct inode *inode)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+
+	if (is_bad_inode(inode))
+		return;
+	lock_kernel();
+	spin_lock(&sbi->inode_hash_lock);
+	fat_cache_inval_inode(inode);
+	hlist_del_init(&MSDOS_I(inode)->i_fat_hash);
+	spin_unlock(&sbi->inode_hash_lock);
+	unlock_kernel();
+}
+
+static void fat_put_super(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+	if (!(sb->s_flags & MS_RDONLY))
+		fat_clusters_flush(sb);
+
+	if (sbi->nls_disk) {
+		unload_nls(sbi->nls_disk);
+		sbi->nls_disk = NULL;
+		sbi->options.codepage = fat_default_codepage;
+	}
+	if (sbi->nls_io) {
+		unload_nls(sbi->nls_io);
+		sbi->nls_io = NULL;
+	}
+	if (sbi->options.iocharset != fat_default_iocharset) {
+		kfree(sbi->options.iocharset);
+		sbi->options.iocharset = fat_default_iocharset;
+	}
+
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+}
+
+static kmem_cache_t *fat_inode_cachep;
+
+static struct inode *fat_alloc_inode(struct super_block *sb)
+{
+	struct msdos_inode_info *ei;
+	ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void fat_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		spin_lock_init(&ei->cache_lru_lock);
+		ei->nr_caches = 0;
+		ei->cache_valid_id = FAT_CACHE_VALID + 1;
+		INIT_LIST_HEAD(&ei->cache_lru);
+		INIT_HLIST_NODE(&ei->i_fat_hash);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static int __init fat_init_inodecache(void)
+{
+	fat_inode_cachep = kmem_cache_create("fat_inode_cache",
+					     sizeof(struct msdos_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (fat_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void __exit fat_destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(fat_inode_cachep))
+		printk(KERN_INFO "fat_inode_cache: not all structures were freed\n");
+}
+
+static int fat_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	*flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+	return 0;
+}
+
+static int fat_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+	/* If the count of free cluster is still unknown, counts it here. */
+	if (sbi->free_clusters == -1) {
+		int err = fat_count_free_clusters(sb);
+		if (err)
+			return err;
+	}
+
+	buf->f_type = sb->s_magic;
+	buf->f_bsize = sbi->cluster_size;
+	buf->f_blocks = sbi->max_cluster - FAT_START_ENT;
+	buf->f_bfree = sbi->free_clusters;
+	buf->f_bavail = sbi->free_clusters;
+	buf->f_namelen = sbi->options.isvfat ? 260 : 12;
+
+	return 0;
+}
+
+static int fat_write_inode(struct inode *inode, int wait)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bh;
+	struct msdos_dir_entry *raw_entry;
+	loff_t i_pos;
+	int err = 0;
+
+retry:
+	i_pos = MSDOS_I(inode)->i_pos;
+	if (inode->i_ino == MSDOS_ROOT_INO || !i_pos)
+		return 0;
+
+	lock_kernel();
+	bh = sb_bread(sb, i_pos >> sbi->dir_per_block_bits);
+	if (!bh) {
+		printk(KERN_ERR "FAT: unable to read inode block "
+		       "for updating (i_pos %lld)\n", i_pos);
+		err = -EIO;
+		goto out;
+	}
+	spin_lock(&sbi->inode_hash_lock);
+	if (i_pos != MSDOS_I(inode)->i_pos) {
+		spin_unlock(&sbi->inode_hash_lock);
+		brelse(bh);
+		unlock_kernel();
+		goto retry;
+	}
+
+	raw_entry = &((struct msdos_dir_entry *) (bh->b_data))
+	    [i_pos & (sbi->dir_per_block - 1)];
+	if (S_ISDIR(inode->i_mode))
+		raw_entry->size = 0;
+	else
+		raw_entry->size = cpu_to_le32(inode->i_size);
+	raw_entry->attr = fat_attr(inode);
+	raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart);
+	raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16);
+	fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date);
+	if (sbi->options.isvfat) {
+		fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate);
+		raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 +
+			inode->i_ctime.tv_nsec / 10000000;
+	}
+	spin_unlock(&sbi->inode_hash_lock);
+	mark_buffer_dirty(bh);
+	if (wait)
+		err = sync_dirty_buffer(bh);
+	brelse(bh);
+out:
+	unlock_kernel();
+	return err;
+}
+
+int fat_sync_inode(struct inode *inode)
+{
+	return fat_write_inode(inode, 1);
+}
+
+EXPORT_SYMBOL(fat_sync_inode);
+
+static int fat_show_options(struct seq_file *m, struct vfsmount *mnt);
+static struct super_operations fat_sops = {
+	.alloc_inode	= fat_alloc_inode,
+	.destroy_inode	= fat_destroy_inode,
+	.write_inode	= fat_write_inode,
+	.delete_inode	= fat_delete_inode,
+	.put_super	= fat_put_super,
+	.statfs		= fat_statfs,
+	.clear_inode	= fat_clear_inode,
+	.remount_fs	= fat_remount,
+
+	.read_inode	= make_bad_inode,
+
+	.show_options	= fat_show_options,
+};
+
+/*
+ * a FAT file handle with fhtype 3 is
+ *  0/  i_ino - for fast, reliable lookup if still in the cache
+ *  1/  i_generation - to see if i_ino is still valid
+ *          bit 0 == 0 iff directory
+ *  2/  i_pos(8-39) - if ino has changed, but still in cache
+ *  3/  i_pos(4-7)|i_logstart - to semi-verify inode found at i_pos
+ *  4/  i_pos(0-3)|parent->i_logstart - maybe used to hunt for the file on disc
+ *
+ * Hack for NFSv2: Maximum FAT entry number is 28bits and maximum
+ * i_pos is 40bits (blocknr(32) + dir offset(8)), so two 4bits
+ * of i_logstart is used to store the directory entry offset.
+ */
+
+static struct dentry *
+fat_decode_fh(struct super_block *sb, __u32 *fh, int len, int fhtype,
+	      int (*acceptable)(void *context, struct dentry *de),
+	      void *context)
+{
+	if (fhtype != 3)
+		return ERR_PTR(-ESTALE);
+	if (len < 5)
+		return ERR_PTR(-ESTALE);
+
+	return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable, context);
+}
+
+static struct dentry *fat_get_dentry(struct super_block *sb, void *inump)
+{
+	struct inode *inode = NULL;
+	struct dentry *result;
+	__u32 *fh = inump;
+
+	inode = iget(sb, fh[0]);
+	if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) {
+		if (inode)
+			iput(inode);
+		inode = NULL;
+	}
+	if (!inode) {
+		loff_t i_pos;
+		int i_logstart = fh[3] & 0x0fffffff;
+
+		i_pos = (loff_t)fh[2] << 8;
+		i_pos |= ((fh[3] >> 24) & 0xf0) | (fh[4] >> 28);
+
+		/* try 2 - see if i_pos is in F-d-c
+		 * require i_logstart to be the same
+		 * Will fail if you truncate and then re-write
+		 */
+
+		inode = fat_iget(sb, i_pos);
+		if (inode && MSDOS_I(inode)->i_logstart != i_logstart) {
+			iput(inode);
+			inode = NULL;
+		}
+	}
+	if (!inode) {
+		/* For now, do nothing
+		 * What we could do is:
+		 * follow the file starting at fh[4], and record
+		 * the ".." entry, and the name of the fh[2] entry.
+		 * The follow the ".." file finding the next step up.
+		 * This way we build a path to the root of
+		 * the tree. If this works, we lookup the path and so
+		 * get this inode into the cache.
+		 * Finally try the fat_iget lookup again
+		 * If that fails, then weare totally out of luck
+		 * But all that is for another day
+		 */
+	}
+	if (!inode)
+		return ERR_PTR(-ESTALE);
+
+
+	/* now to find a dentry.
+	 * If possible, get a well-connected one
+	 */
+	result = d_alloc_anon(inode);
+	if (result == NULL) {
+		iput(inode);
+		return ERR_PTR(-ENOMEM);
+	}
+	result->d_op = sb->s_root->d_op;
+	return result;
+}
+
+static int
+fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
+{
+	int len = *lenp;
+	struct inode *inode =  de->d_inode;
+	u32 ipos_h, ipos_m, ipos_l;
+
+	if (len < 5)
+		return 255; /* no room */
+
+	ipos_h = MSDOS_I(inode)->i_pos >> 8;
+	ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
+	ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+	*lenp = 5;
+	fh[0] = inode->i_ino;
+	fh[1] = inode->i_generation;
+	fh[2] = ipos_h;
+	fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
+	spin_lock(&de->d_lock);
+	fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
+	spin_unlock(&de->d_lock);
+	return 3;
+}
+
+static struct dentry *fat_get_parent(struct dentry *child)
+{
+	struct buffer_head *bh;
+	struct msdos_dir_entry *de;
+	loff_t i_pos;
+	struct dentry *parent;
+	struct inode *inode;
+	int err;
+
+	lock_kernel();
+
+	err = fat_get_dotdot_entry(child->d_inode, &bh, &de, &i_pos);
+	if (err) {
+		parent = ERR_PTR(err);
+		goto out;
+	}
+	inode = fat_build_inode(child->d_sb, de, i_pos);
+	brelse(bh);
+	if (IS_ERR(inode)) {
+		parent = ERR_PTR(PTR_ERR(inode));
+		goto out;
+	}
+	parent = d_alloc_anon(inode);
+	if (!parent) {
+		iput(inode);
+		parent = ERR_PTR(-ENOMEM);
+	}
+out:
+	unlock_kernel();
+
+	return parent;
+}
+
+static struct export_operations fat_export_ops = {
+	.decode_fh	= fat_decode_fh,
+	.encode_fh	= fat_encode_fh,
+	.get_dentry	= fat_get_dentry,
+	.get_parent	= fat_get_parent,
+};
+
+static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(mnt->mnt_sb);
+	struct fat_mount_options *opts = &sbi->options;
+	int isvfat = opts->isvfat;
+
+	if (opts->fs_uid != 0)
+		seq_printf(m, ",uid=%u", opts->fs_uid);
+	if (opts->fs_gid != 0)
+		seq_printf(m, ",gid=%u", opts->fs_gid);
+	seq_printf(m, ",fmask=%04o", opts->fs_fmask);
+	seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+	if (sbi->nls_disk)
+		seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
+	if (isvfat) {
+		if (sbi->nls_io)
+			seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
+
+		switch (opts->shortname) {
+		case VFAT_SFN_DISPLAY_WIN95 | VFAT_SFN_CREATE_WIN95:
+			seq_puts(m, ",shortname=win95");
+			break;
+		case VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WINNT:
+			seq_puts(m, ",shortname=winnt");
+			break;
+		case VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WIN95:
+			seq_puts(m, ",shortname=mixed");
+			break;
+		case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95:
+			/* seq_puts(m, ",shortname=lower"); */
+			break;
+		default:
+			seq_puts(m, ",shortname=unknown");
+			break;
+		}
+	}
+	if (opts->name_check != 'n')
+		seq_printf(m, ",check=%c", opts->name_check);
+	if (opts->quiet)
+		seq_puts(m, ",quiet");
+	if (opts->showexec)
+		seq_puts(m, ",showexec");
+	if (opts->sys_immutable)
+		seq_puts(m, ",sys_immutable");
+	if (!isvfat) {
+		if (opts->dotsOK)
+			seq_puts(m, ",dotsOK=yes");
+		if (opts->nocase)
+			seq_puts(m, ",nocase");
+	} else {
+		if (opts->utf8)
+			seq_puts(m, ",utf8");
+		if (opts->unicode_xlate)
+			seq_puts(m, ",uni_xlate");
+		if (!opts->numtail)
+			seq_puts(m, ",nonumtail");
+	}
+
+	return 0;
+}
+
+enum {
+	Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
+	Opt_umask, Opt_dmask, Opt_fmask, Opt_codepage, Opt_nocase,
+	Opt_quiet, Opt_showexec, Opt_debug, Opt_immutable,
+	Opt_dots, Opt_nodots,
+	Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
+	Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
+	Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
+	Opt_obsolate, Opt_err,
+};
+
+static match_table_t fat_tokens = {
+	{Opt_check_r, "check=relaxed"},
+	{Opt_check_s, "check=strict"},
+	{Opt_check_n, "check=normal"},
+	{Opt_check_r, "check=r"},
+	{Opt_check_s, "check=s"},
+	{Opt_check_n, "check=n"},
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_umask, "umask=%o"},
+	{Opt_dmask, "dmask=%o"},
+	{Opt_fmask, "fmask=%o"},
+	{Opt_codepage, "codepage=%u"},
+	{Opt_nocase, "nocase"},
+	{Opt_quiet, "quiet"},
+	{Opt_showexec, "showexec"},
+	{Opt_debug, "debug"},
+	{Opt_immutable, "sys_immutable"},
+	{Opt_obsolate, "conv=binary"},
+	{Opt_obsolate, "conv=text"},
+	{Opt_obsolate, "conv=auto"},
+	{Opt_obsolate, "conv=b"},
+	{Opt_obsolate, "conv=t"},
+	{Opt_obsolate, "conv=a"},
+	{Opt_obsolate, "fat=%u"},
+	{Opt_obsolate, "blocksize=%u"},
+	{Opt_obsolate, "cvf_format=%20s"},
+	{Opt_obsolate, "cvf_options=%100s"},
+	{Opt_obsolate, "posix"},
+	{Opt_err, NULL}
+};
+static match_table_t msdos_tokens = {
+	{Opt_nodots, "nodots"},
+	{Opt_nodots, "dotsOK=no"},
+	{Opt_dots, "dots"},
+	{Opt_dots, "dotsOK=yes"},
+	{Opt_err, NULL}
+};
+static match_table_t vfat_tokens = {
+	{Opt_charset, "iocharset=%s"},
+	{Opt_shortname_lower, "shortname=lower"},
+	{Opt_shortname_win95, "shortname=win95"},
+	{Opt_shortname_winnt, "shortname=winnt"},
+	{Opt_shortname_mixed, "shortname=mixed"},
+	{Opt_utf8_no, "utf8=0"},		/* 0 or no or false */
+	{Opt_utf8_no, "utf8=no"},
+	{Opt_utf8_no, "utf8=false"},
+	{Opt_utf8_yes, "utf8=1"},		/* empty or 1 or yes or true */
+	{Opt_utf8_yes, "utf8=yes"},
+	{Opt_utf8_yes, "utf8=true"},
+	{Opt_utf8_yes, "utf8"},
+	{Opt_uni_xl_no, "uni_xlate=0"},		/* 0 or no or false */
+	{Opt_uni_xl_no, "uni_xlate=no"},
+	{Opt_uni_xl_no, "uni_xlate=false"},
+	{Opt_uni_xl_yes, "uni_xlate=1"},	/* empty or 1 or yes or true */
+	{Opt_uni_xl_yes, "uni_xlate=yes"},
+	{Opt_uni_xl_yes, "uni_xlate=true"},
+	{Opt_uni_xl_yes, "uni_xlate"},
+	{Opt_nonumtail_no, "nonumtail=0"},	/* 0 or no or false */
+	{Opt_nonumtail_no, "nonumtail=no"},
+	{Opt_nonumtail_no, "nonumtail=false"},
+	{Opt_nonumtail_yes, "nonumtail=1"},	/* empty or 1 or yes or true */
+	{Opt_nonumtail_yes, "nonumtail=yes"},
+	{Opt_nonumtail_yes, "nonumtail=true"},
+	{Opt_nonumtail_yes, "nonumtail"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(char *options, int is_vfat, int *debug,
+			 struct fat_mount_options *opts)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+	char *iocharset;
+
+	opts->isvfat = is_vfat;
+
+	opts->fs_uid = current->uid;
+	opts->fs_gid = current->gid;
+	opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+	opts->codepage = fat_default_codepage;
+	opts->iocharset = fat_default_iocharset;
+	if (is_vfat)
+		opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95;
+	else
+		opts->shortname = 0;
+	opts->name_check = 'n';
+	opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK =  0;
+	opts->utf8 = opts->unicode_xlate = 0;
+	opts->numtail = 1;
+	opts->nocase = 0;
+	*debug = 0;
+
+	if (!options)
+		return 0;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, fat_tokens, args);
+		if (token == Opt_err) {
+			if (is_vfat)
+				token = match_token(p, vfat_tokens, args);
+			else
+				token = match_token(p, msdos_tokens, args);
+		}
+		switch (token) {
+		case Opt_check_s:
+			opts->name_check = 's';
+			break;
+		case Opt_check_r:
+			opts->name_check = 'r';
+			break;
+		case Opt_check_n:
+			opts->name_check = 'n';
+			break;
+		case Opt_nocase:
+			if (!is_vfat)
+				opts->nocase = 1;
+			else {
+				/* for backward compatibility */
+				opts->shortname = VFAT_SFN_DISPLAY_WIN95
+					| VFAT_SFN_CREATE_WIN95;
+			}
+			break;
+		case Opt_quiet:
+			opts->quiet = 1;
+			break;
+		case Opt_showexec:
+			opts->showexec = 1;
+			break;
+		case Opt_debug:
+			*debug = 1;
+			break;
+		case Opt_immutable:
+			opts->sys_immutable = 1;
+			break;
+		case Opt_uid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->fs_uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->fs_gid = option;
+			break;
+		case Opt_umask:
+			if (match_octal(&args[0], &option))
+				return 0;
+			opts->fs_fmask = opts->fs_dmask = option;
+			break;
+		case Opt_dmask:
+			if (match_octal(&args[0], &option))
+				return 0;
+			opts->fs_dmask = option;
+			break;
+		case Opt_fmask:
+			if (match_octal(&args[0], &option))
+				return 0;
+			opts->fs_fmask = option;
+			break;
+		case Opt_codepage:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->codepage = option;
+			break;
+
+		/* msdos specific */
+		case Opt_dots:
+			opts->dotsOK = 1;
+			break;
+		case Opt_nodots:
+			opts->dotsOK = 0;
+			break;
+
+		/* vfat specific */
+		case Opt_charset:
+			if (opts->iocharset != fat_default_iocharset)
+				kfree(opts->iocharset);
+			iocharset = match_strdup(&args[0]);
+			if (!iocharset)
+				return -ENOMEM;
+			opts->iocharset = iocharset;
+			break;
+		case Opt_shortname_lower:
+			opts->shortname = VFAT_SFN_DISPLAY_LOWER
+					| VFAT_SFN_CREATE_WIN95;
+			break;
+		case Opt_shortname_win95:
+			opts->shortname = VFAT_SFN_DISPLAY_WIN95
+					| VFAT_SFN_CREATE_WIN95;
+			break;
+		case Opt_shortname_winnt:
+			opts->shortname = VFAT_SFN_DISPLAY_WINNT
+					| VFAT_SFN_CREATE_WINNT;
+			break;
+		case Opt_shortname_mixed:
+			opts->shortname = VFAT_SFN_DISPLAY_WINNT
+					| VFAT_SFN_CREATE_WIN95;
+			break;
+		case Opt_utf8_no:		/* 0 or no or false */
+			opts->utf8 = 0;
+			break;
+		case Opt_utf8_yes:		/* empty or 1 or yes or true */
+			opts->utf8 = 1;
+			break;
+		case Opt_uni_xl_no:		/* 0 or no or false */
+			opts->unicode_xlate = 0;
+			break;
+		case Opt_uni_xl_yes:		/* empty or 1 or yes or true */
+			opts->unicode_xlate = 1;
+			break;
+		case Opt_nonumtail_no:		/* 0 or no or false */
+			opts->numtail = 1;	/* negated option */
+			break;
+		case Opt_nonumtail_yes:		/* empty or 1 or yes or true */
+			opts->numtail = 0;	/* negated option */
+			break;
+
+		/* obsolete mount options */
+		case Opt_obsolate:
+			printk(KERN_INFO "FAT: \"%s\" option is obsolete, "
+			       "not supported now\n", p);
+			break;
+		/* unknown option */
+		default:
+			printk(KERN_ERR "FAT: Unrecognized mount option \"%s\" "
+			       "or missing value\n", p);
+			return -EINVAL;
+		}
+	}
+	/* UTF8 doesn't provide FAT semantics */
+	if (!strcmp(opts->iocharset, "utf8")) {
+		printk(KERN_ERR "FAT: utf8 is not a recommended IO charset"
+		       " for FAT filesystems, filesystem will be case sensitive!\n");
+	}
+
+	if (opts->unicode_xlate)
+		opts->utf8 = 0;
+
+	return 0;
+}
+
+static int fat_read_root(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	int error;
+
+	MSDOS_I(inode)->i_pos = 0;
+	inode->i_uid = sbi->options.fs_uid;
+	inode->i_gid = sbi->options.fs_gid;
+	inode->i_version++;
+	inode->i_generation = 0;
+	inode->i_mode = (S_IRWXUGO & ~sbi->options.fs_dmask) | S_IFDIR;
+	inode->i_op = sbi->dir_ops;
+	inode->i_fop = &fat_dir_operations;
+	if (sbi->fat_bits == 32) {
+		MSDOS_I(inode)->i_start = sbi->root_cluster;
+		error = fat_calc_dir_size(inode);
+		if (error < 0)
+			return error;
+	} else {
+		MSDOS_I(inode)->i_start = 0;
+		inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry);
+	}
+	inode->i_blksize = sbi->cluster_size;
+	inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
+			   & ~((loff_t)sbi->cluster_size - 1)) >> 9;
+	MSDOS_I(inode)->i_logstart = 0;
+	MSDOS_I(inode)->mmu_private = inode->i_size;
+
+	MSDOS_I(inode)->i_attrs = ATTR_NONE;
+	inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = 0;
+	inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+	inode->i_nlink = fat_subdirs(inode)+2;
+
+	return 0;
+}
+
+/*
+ * Read the super block of an MS-DOS FS.
+ */
+int fat_fill_super(struct super_block *sb, void *data, int silent,
+		   struct inode_operations *fs_dir_inode_ops, int isvfat)
+{
+	struct inode *root_inode = NULL;
+	struct buffer_head *bh;
+	struct fat_boot_sector *b;
+	struct msdos_sb_info *sbi;
+	u16 logical_sector_size;
+	u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
+	int debug;
+	unsigned int media;
+	long error;
+	char buf[50];
+
+	sbi = kmalloc(sizeof(struct msdos_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct msdos_sb_info));
+
+	sb->s_flags |= MS_NODIRATIME;
+	sb->s_magic = MSDOS_SUPER_MAGIC;
+	sb->s_op = &fat_sops;
+	sb->s_export_op = &fat_export_ops;
+	sbi->dir_ops = fs_dir_inode_ops;
+
+	error = parse_options(data, isvfat, &debug, &sbi->options);
+	if (error)
+		goto out_fail;
+
+	error = -EIO;
+	sb_min_blocksize(sb, 512);
+	bh = sb_bread(sb, 0);
+	if (bh == NULL) {
+		printk(KERN_ERR "FAT: unable to read boot sector\n");
+		goto out_fail;
+	}
+
+	b = (struct fat_boot_sector *) bh->b_data;
+	if (!b->reserved) {
+		if (!silent)
+			printk(KERN_ERR "FAT: bogus number of reserved sectors\n");
+		brelse(bh);
+		goto out_invalid;
+	}
+	if (!b->fats) {
+		if (!silent)
+			printk(KERN_ERR "FAT: bogus number of FAT structure\n");
+		brelse(bh);
+		goto out_invalid;
+	}
+
+	/*
+	 * Earlier we checked here that b->secs_track and b->head are nonzero,
+	 * but it turns out valid FAT filesystems can have zero there.
+	 */
+
+	media = b->media;
+	if (!FAT_VALID_MEDIA(media)) {
+		if (!silent)
+			printk(KERN_ERR "FAT: invalid media value (0x%02x)\n",
+			       media);
+		brelse(bh);
+		goto out_invalid;
+	}
+	logical_sector_size =
+		le16_to_cpu(get_unaligned((__le16 *)&b->sector_size));
+	if (!logical_sector_size
+	    || (logical_sector_size & (logical_sector_size - 1))
+	    || (logical_sector_size < 512)
+	    || (PAGE_CACHE_SIZE < logical_sector_size)) {
+		if (!silent)
+			printk(KERN_ERR "FAT: bogus logical sector size %u\n",
+			       logical_sector_size);
+		brelse(bh);
+		goto out_invalid;
+	}
+	sbi->sec_per_clus = b->sec_per_clus;
+	if (!sbi->sec_per_clus
+	    || (sbi->sec_per_clus & (sbi->sec_per_clus - 1))) {
+		if (!silent)
+			printk(KERN_ERR "FAT: bogus sectors per cluster %u\n",
+			       sbi->sec_per_clus);
+		brelse(bh);
+		goto out_invalid;
+	}
+
+	if (logical_sector_size < sb->s_blocksize) {
+		printk(KERN_ERR "FAT: logical sector size too small for device"
+		       " (logical sector size = %u)\n", logical_sector_size);
+		brelse(bh);
+		goto out_fail;
+	}
+	if (logical_sector_size > sb->s_blocksize) {
+		brelse(bh);
+
+		if (!sb_set_blocksize(sb, logical_sector_size)) {
+			printk(KERN_ERR "FAT: unable to set blocksize %u\n",
+			       logical_sector_size);
+			goto out_fail;
+		}
+		bh = sb_bread(sb, 0);
+		if (bh == NULL) {
+			printk(KERN_ERR "FAT: unable to read boot sector"
+			       " (logical sector size = %lu)\n",
+			       sb->s_blocksize);
+			goto out_fail;
+		}
+		b = (struct fat_boot_sector *) bh->b_data;
+	}
+
+	sbi->cluster_size = sb->s_blocksize * sbi->sec_per_clus;
+	sbi->cluster_bits = ffs(sbi->cluster_size) - 1;
+	sbi->fats = b->fats;
+	sbi->fat_bits = 0;		/* Don't know yet */
+	sbi->fat_start = le16_to_cpu(b->reserved);
+	sbi->fat_length = le16_to_cpu(b->fat_length);
+	sbi->root_cluster = 0;
+	sbi->free_clusters = -1;	/* Don't know yet */
+	sbi->prev_free = FAT_START_ENT;
+
+	if (!sbi->fat_length && b->fat32_length) {
+		struct fat_boot_fsinfo *fsinfo;
+		struct buffer_head *fsinfo_bh;
+
+		/* Must be FAT32 */
+		sbi->fat_bits = 32;
+		sbi->fat_length = le32_to_cpu(b->fat32_length);
+		sbi->root_cluster = le32_to_cpu(b->root_cluster);
+
+		sb->s_maxbytes = 0xffffffff;
+
+		/* MC - if info_sector is 0, don't multiply by 0 */
+		sbi->fsinfo_sector = le16_to_cpu(b->info_sector);
+		if (sbi->fsinfo_sector == 0)
+			sbi->fsinfo_sector = 1;
+
+		fsinfo_bh = sb_bread(sb, sbi->fsinfo_sector);
+		if (fsinfo_bh == NULL) {
+			printk(KERN_ERR "FAT: bread failed, FSINFO block"
+			       " (sector = %lu)\n", sbi->fsinfo_sector);
+			brelse(bh);
+			goto out_fail;
+		}
+
+		fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
+		if (!IS_FSINFO(fsinfo)) {
+			printk(KERN_WARNING
+			       "FAT: Did not find valid FSINFO signature.\n"
+			       "     Found signature1 0x%08x signature2 0x%08x"
+			       " (sector = %lu)\n",
+			       le32_to_cpu(fsinfo->signature1),
+			       le32_to_cpu(fsinfo->signature2),
+			       sbi->fsinfo_sector);
+		} else {
+			sbi->free_clusters = le32_to_cpu(fsinfo->free_clusters);
+			sbi->prev_free = le32_to_cpu(fsinfo->next_cluster);
+		}
+
+		brelse(fsinfo_bh);
+	}
+
+	sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
+	sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
+
+	sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length;
+	sbi->dir_entries =
+		le16_to_cpu(get_unaligned((__le16 *)&b->dir_entries));
+	if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
+		if (!silent)
+			printk(KERN_ERR "FAT: bogus directroy-entries per block"
+			       " (%u)\n", sbi->dir_entries);
+		brelse(bh);
+		goto out_invalid;
+	}
+
+	rootdir_sectors = sbi->dir_entries
+		* sizeof(struct msdos_dir_entry) / sb->s_blocksize;
+	sbi->data_start = sbi->dir_start + rootdir_sectors;
+	total_sectors = le16_to_cpu(get_unaligned((__le16 *)&b->sectors));
+	if (total_sectors == 0)
+		total_sectors = le32_to_cpu(b->total_sect);
+
+	total_clusters = (total_sectors - sbi->data_start) / sbi->sec_per_clus;
+
+	if (sbi->fat_bits != 32)
+		sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
+
+	/* check that FAT table does not overflow */
+	fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+	total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
+	if (total_clusters > MAX_FAT(sb)) {
+		if (!silent)
+			printk(KERN_ERR "FAT: count of clusters too big (%u)\n",
+			       total_clusters);
+		brelse(bh);
+		goto out_invalid;
+	}
+
+	sbi->max_cluster = total_clusters + FAT_START_ENT;
+	/* check the free_clusters, it's not necessarily correct */
+	if (sbi->free_clusters != -1 && sbi->free_clusters > total_clusters)
+		sbi->free_clusters = -1;
+	/* check the prev_free, it's not necessarily correct */
+	sbi->prev_free %= sbi->max_cluster;
+	if (sbi->prev_free < FAT_START_ENT)
+		sbi->prev_free = FAT_START_ENT;
+
+	brelse(bh);
+
+	/* set up enough so that it can read an inode */
+	fat_hash_init(sb);
+	fat_ent_access_init(sb);
+
+	/*
+	 * The low byte of FAT's first entry must have same value with
+	 * media-field.  But in real world, too many devices is
+	 * writing wrong value.  So, removed that validity check.
+	 *
+	 * if (FAT_FIRST_ENT(sb, media) != first)
+	 */
+
+	error = -EINVAL;
+	sprintf(buf, "cp%d", sbi->options.codepage);
+	sbi->nls_disk = load_nls(buf);
+	if (!sbi->nls_disk) {
+		printk(KERN_ERR "FAT: codepage %s not found\n", buf);
+		goto out_fail;
+	}
+
+	/* FIXME: utf8 is using iocharset for upper/lower conversion */
+	if (sbi->options.isvfat) {
+		sbi->nls_io = load_nls(sbi->options.iocharset);
+		if (!sbi->nls_io) {
+			printk(KERN_ERR "FAT: IO charset %s not found\n",
+			       sbi->options.iocharset);
+			goto out_fail;
+		}
+	}
+
+	error = -ENOMEM;
+	root_inode = new_inode(sb);
+	if (!root_inode)
+		goto out_fail;
+	root_inode->i_ino = MSDOS_ROOT_INO;
+	root_inode->i_version = 1;
+	error = fat_read_root(root_inode);
+	if (error < 0)
+		goto out_fail;
+	error = -ENOMEM;
+	insert_inode_hash(root_inode);
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root) {
+		printk(KERN_ERR "FAT: get root inode failed\n");
+		goto out_fail;
+	}
+
+	return 0;
+
+out_invalid:
+	error = -EINVAL;
+	if (!silent)
+		printk(KERN_INFO "VFS: Can't find a valid FAT filesystem"
+		       " on dev %s.\n", sb->s_id);
+
+out_fail:
+	if (root_inode)
+		iput(root_inode);
+	if (sbi->nls_io)
+		unload_nls(sbi->nls_io);
+	if (sbi->nls_disk)
+		unload_nls(sbi->nls_disk);
+	if (sbi->options.iocharset != fat_default_iocharset)
+		kfree(sbi->options.iocharset);
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+	return error;
+}
+
+EXPORT_SYMBOL(fat_fill_super);
+
+int __init fat_cache_init(void);
+void __exit fat_cache_destroy(void);
+
+static int __init init_fat_fs(void)
+{
+	int ret;
+
+	ret = fat_cache_init();
+	if (ret < 0)
+		return ret;
+	return fat_init_inodecache();
+}
+
+static void __exit exit_fat_fs(void)
+{
+	fat_cache_destroy();
+	fat_destroy_inodecache();
+}
+
+module_init(init_fat_fs)
+module_exit(exit_fat_fs)
+
+MODULE_LICENSE("GPL");
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
new file mode 100644
index 0000000..2a0df21
--- /dev/null
+++ b/fs/fat/misc.c
@@ -0,0 +1,225 @@
+/*
+ *  linux/fs/fat/misc.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *  22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980
+ *		 and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru)
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/msdos_fs.h>
+#include <linux/buffer_head.h>
+
+/*
+ * fat_fs_panic reports a severe file system problem and sets the file system
+ * read-only. The file system can be made writable again by remounting it.
+ */
+void fat_fs_panic(struct super_block *s, const char *fmt, ...)
+{
+	va_list args;
+
+	printk(KERN_ERR "FAT: Filesystem panic (dev %s)\n", s->s_id);
+
+	printk(KERN_ERR "    ");
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+	printk("\n");
+
+	if (!(s->s_flags & MS_RDONLY)) {
+		s->s_flags |= MS_RDONLY;
+		printk(KERN_ERR "    File system has been set read-only\n");
+	}
+}
+
+EXPORT_SYMBOL(fat_fs_panic);
+
+/* Flushes the number of free clusters on FAT32 */
+/* XXX: Need to write one per FSINFO block.  Currently only writes 1 */
+void fat_clusters_flush(struct super_block *sb)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	struct buffer_head *bh;
+	struct fat_boot_fsinfo *fsinfo;
+
+	if (sbi->fat_bits != 32)
+		return;
+
+	bh = sb_bread(sb, sbi->fsinfo_sector);
+	if (bh == NULL) {
+		printk(KERN_ERR "FAT: bread failed in fat_clusters_flush\n");
+		return;
+	}
+
+	fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
+	/* Sanity check */
+	if (!IS_FSINFO(fsinfo)) {
+		printk(KERN_ERR "FAT: Did not find valid FSINFO signature.\n"
+		       "     Found signature1 0x%08x signature2 0x%08x"
+		       " (sector = %lu)\n",
+		       le32_to_cpu(fsinfo->signature1),
+		       le32_to_cpu(fsinfo->signature2),
+		       sbi->fsinfo_sector);
+	} else {
+		if (sbi->free_clusters != -1)
+			fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters);
+		if (sbi->prev_free != -1)
+			fsinfo->next_cluster = cpu_to_le32(sbi->prev_free);
+		mark_buffer_dirty(bh);
+		if (sb->s_flags & MS_SYNCHRONOUS)
+			sync_dirty_buffer(bh);
+	}
+	brelse(bh);
+}
+
+/*
+ * fat_chain_add() adds a new cluster to the chain of clusters represented
+ * by inode.
+ */
+int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
+{
+	struct super_block *sb = inode->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	int ret, new_fclus, last;
+
+	/*
+	 * We must locate the last cluster of the file to add this new
+	 * one (new_dclus) to the end of the link list (the FAT).
+	 */
+	last = new_fclus = 0;
+	if (MSDOS_I(inode)->i_start) {
+		int fclus, dclus;
+
+		ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus);
+		if (ret < 0)
+			return ret;
+		new_fclus = fclus + 1;
+		last = dclus;
+	}
+
+	/* add new one to the last of the cluster chain */
+	if (last) {
+		struct fat_entry fatent;
+
+		fatent_init(&fatent);
+		ret = fat_ent_read(inode, &fatent, last);
+		if (ret >= 0) {
+			int wait = inode_needs_sync(inode);
+			ret = fat_ent_write(inode, &fatent, new_dclus, wait);
+			fatent_brelse(&fatent);
+		}
+		if (ret < 0)
+			return ret;
+//		fat_cache_add(inode, new_fclus, new_dclus);
+	} else {
+		MSDOS_I(inode)->i_start = new_dclus;
+		MSDOS_I(inode)->i_logstart = new_dclus;
+		/*
+		 * Since generic_osync_inode() synchronize later if
+		 * this is not directory, we don't here.
+		 */
+		if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
+			ret = fat_sync_inode(inode);
+			if (ret)
+				return ret;
+		} else
+			mark_inode_dirty(inode);
+	}
+	if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
+		fat_fs_panic(sb, "clusters badly computed (%d != %lu)",
+			new_fclus, inode->i_blocks >> (sbi->cluster_bits - 9));
+		fat_cache_inval_inode(inode);
+	}
+	inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
+
+	return 0;
+}
+
+extern struct timezone sys_tz;
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static int day_n[] = {
+   /* Jan  Feb  Mar  Apr   May  Jun  Jul  Aug  Sep  Oct  Nov  Dec */
+	0,  31,  59,  90,  120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0
+};
+
+/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
+int date_dos2unix(unsigned short time, unsigned short date)
+{
+	int month, year, secs;
+
+	/*
+	 * first subtract and mask after that... Otherwise, if
+	 * date == 0, bad things happen
+	 */
+	month = ((date >> 5) - 1) & 15;
+	year = date >> 9;
+	secs = (time & 31)*2+60*((time >> 5) & 63)+(time >> 11)*3600+86400*
+	    ((date & 31)-1+day_n[month]+(year/4)+year*365-((year & 3) == 0 &&
+	    month < 2 ? 1 : 0)+3653);
+			/* days since 1.1.70 plus 80's leap day */
+	secs += sys_tz.tz_minuteswest*60;
+	return secs;
+}
+
+/* Convert linear UNIX date to a MS-DOS time/date pair. */
+void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
+{
+	int day, year, nl_day, month;
+
+	unix_date -= sys_tz.tz_minuteswest*60;
+
+	/* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
+	if (unix_date < 315532800)
+		unix_date = 315532800;
+
+	*time = cpu_to_le16((unix_date % 60)/2+(((unix_date/60) % 60) << 5)+
+	    (((unix_date/3600) % 24) << 11));
+	day = unix_date/86400-3652;
+	year = day/365;
+	if ((year+3)/4+365*year > day)
+		year--;
+	day -= (year+3)/4+365*year;
+	if (day == 59 && !(year & 3)) {
+		nl_day = day;
+		month = 2;
+	} else {
+		nl_day = (year & 3) || day <= 59 ? day : day-1;
+		for (month = 0; month < 12; month++) {
+			if (day_n[month] > nl_day)
+				break;
+		}
+	}
+	*date = cpu_to_le16(nl_day-day_n[month-1]+1+(month << 5)+(year << 9));
+}
+
+EXPORT_SYMBOL(fat_date_unix2dos);
+
+int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
+{
+	int i, e, err = 0;
+
+	for (i = 0; i < nr_bhs; i++) {
+		lock_buffer(bhs[i]);
+		if (test_clear_buffer_dirty(bhs[i])) {
+			get_bh(bhs[i]);
+			bhs[i]->b_end_io = end_buffer_write_sync;
+			e = submit_bh(WRITE, bhs[i]);
+			if (!err && e)
+				err = e;
+		} else
+			unlock_buffer(bhs[i]);
+	}
+	for (i = 0; i < nr_bhs; i++) {
+		wait_on_buffer(bhs[i]);
+		if (buffer_eopnotsupp(bhs[i])) {
+			clear_buffer_eopnotsupp(bhs[i]);
+			err = -EOPNOTSUPP;
+		} else if (!err && !buffer_uptodate(bhs[i]))
+			err = -EIO;
+	}
+	return err;
+}
+
+EXPORT_SYMBOL(fat_sync_bhs);
diff --git a/fs/fcntl.c b/fs/fcntl.c
new file mode 100644
index 0000000..c170806
--- /dev/null
+++ b/fs/fcntl.c
@@ -0,0 +1,601 @@
+/*
+ *  linux/fs/fcntl.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/syscalls.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/dnotify.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/ptrace.h>
+
+#include <asm/poll.h>
+#include <asm/siginfo.h>
+#include <asm/uaccess.h>
+
+void fastcall set_close_on_exec(unsigned int fd, int flag)
+{
+	struct files_struct *files = current->files;
+	spin_lock(&files->file_lock);
+	if (flag)
+		FD_SET(fd, files->close_on_exec);
+	else
+		FD_CLR(fd, files->close_on_exec);
+	spin_unlock(&files->file_lock);
+}
+
+static inline int get_close_on_exec(unsigned int fd)
+{
+	struct files_struct *files = current->files;
+	int res;
+	spin_lock(&files->file_lock);
+	res = FD_ISSET(fd, files->close_on_exec);
+	spin_unlock(&files->file_lock);
+	return res;
+}
+
+/*
+ * locate_fd finds a free file descriptor in the open_fds fdset,
+ * expanding the fd arrays if necessary.  Must be called with the
+ * file_lock held for write.
+ */
+
+static int locate_fd(struct files_struct *files, 
+			    struct file *file, unsigned int orig_start)
+{
+	unsigned int newfd;
+	unsigned int start;
+	int error;
+
+	error = -EINVAL;
+	if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+		goto out;
+
+repeat:
+	/*
+	 * Someone might have closed fd's in the range
+	 * orig_start..files->next_fd
+	 */
+	start = orig_start;
+	if (start < files->next_fd)
+		start = files->next_fd;
+
+	newfd = start;
+	if (start < files->max_fdset) {
+		newfd = find_next_zero_bit(files->open_fds->fds_bits,
+			files->max_fdset, start);
+	}
+	
+	error = -EMFILE;
+	if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+		goto out;
+
+	error = expand_files(files, newfd);
+	if (error < 0)
+		goto out;
+
+	/*
+	 * If we needed to expand the fs array we
+	 * might have blocked - try again.
+	 */
+	if (error)
+		goto repeat;
+
+	if (start <= files->next_fd)
+		files->next_fd = newfd + 1;
+	
+	error = newfd;
+	
+out:
+	return error;
+}
+
+static int dupfd(struct file *file, unsigned int start)
+{
+	struct files_struct * files = current->files;
+	int fd;
+
+	spin_lock(&files->file_lock);
+	fd = locate_fd(files, file, start);
+	if (fd >= 0) {
+		FD_SET(fd, files->open_fds);
+		FD_CLR(fd, files->close_on_exec);
+		spin_unlock(&files->file_lock);
+		fd_install(fd, file);
+	} else {
+		spin_unlock(&files->file_lock);
+		fput(file);
+	}
+
+	return fd;
+}
+
+asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
+{
+	int err = -EBADF;
+	struct file * file, *tofree;
+	struct files_struct * files = current->files;
+
+	spin_lock(&files->file_lock);
+	if (!(file = fcheck(oldfd)))
+		goto out_unlock;
+	err = newfd;
+	if (newfd == oldfd)
+		goto out_unlock;
+	err = -EBADF;
+	if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+		goto out_unlock;
+	get_file(file);			/* We are now finished with oldfd */
+
+	err = expand_files(files, newfd);
+	if (err < 0)
+		goto out_fput;
+
+	/* To avoid races with open() and dup(), we will mark the fd as
+	 * in-use in the open-file bitmap throughout the entire dup2()
+	 * process.  This is quite safe: do_close() uses the fd array
+	 * entry, not the bitmap, to decide what work needs to be
+	 * done.  --sct */
+	/* Doesn't work. open() might be there first. --AV */
+
+	/* Yes. It's a race. In user space. Nothing sane to do */
+	err = -EBUSY;
+	tofree = files->fd[newfd];
+	if (!tofree && FD_ISSET(newfd, files->open_fds))
+		goto out_fput;
+
+	files->fd[newfd] = file;
+	FD_SET(newfd, files->open_fds);
+	FD_CLR(newfd, files->close_on_exec);
+	spin_unlock(&files->file_lock);
+
+	if (tofree)
+		filp_close(tofree, files);
+	err = newfd;
+out:
+	return err;
+out_unlock:
+	spin_unlock(&files->file_lock);
+	goto out;
+
+out_fput:
+	spin_unlock(&files->file_lock);
+	fput(file);
+	goto out;
+}
+
+asmlinkage long sys_dup(unsigned int fildes)
+{
+	int ret = -EBADF;
+	struct file * file = fget(fildes);
+
+	if (file)
+		ret = dupfd(file, 0);
+	return ret;
+}
+
+#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
+
+static int setfl(int fd, struct file * filp, unsigned long arg)
+{
+	struct inode * inode = filp->f_dentry->d_inode;
+	int error = 0;
+
+	/* O_APPEND cannot be cleared if the file is marked as append-only */
+	if (!(arg & O_APPEND) && IS_APPEND(inode))
+		return -EPERM;
+
+	/* O_NOATIME can only be set by the owner or superuser */
+	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
+		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+			return -EPERM;
+
+	/* required for strict SunOS emulation */
+	if (O_NONBLOCK != O_NDELAY)
+	       if (arg & O_NDELAY)
+		   arg |= O_NONBLOCK;
+
+	if (arg & O_DIRECT) {
+		if (!filp->f_mapping || !filp->f_mapping->a_ops ||
+			!filp->f_mapping->a_ops->direct_IO)
+				return -EINVAL;
+	}
+
+	if (filp->f_op && filp->f_op->check_flags)
+		error = filp->f_op->check_flags(arg);
+	if (error)
+		return error;
+
+	lock_kernel();
+	if ((arg ^ filp->f_flags) & FASYNC) {
+		if (filp->f_op && filp->f_op->fasync) {
+			error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
+			if (error < 0)
+				goto out;
+		}
+	}
+
+	filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ out:
+	unlock_kernel();
+	return error;
+}
+
+static void f_modown(struct file *filp, unsigned long pid,
+                     uid_t uid, uid_t euid, int force)
+{
+	write_lock_irq(&filp->f_owner.lock);
+	if (force || !filp->f_owner.pid) {
+		filp->f_owner.pid = pid;
+		filp->f_owner.uid = uid;
+		filp->f_owner.euid = euid;
+	}
+	write_unlock_irq(&filp->f_owner.lock);
+}
+
+int f_setown(struct file *filp, unsigned long arg, int force)
+{
+	int err;
+	
+	err = security_file_set_fowner(filp);
+	if (err)
+		return err;
+
+	f_modown(filp, arg, current->uid, current->euid, force);
+	return 0;
+}
+
+EXPORT_SYMBOL(f_setown);
+
+void f_delown(struct file *filp)
+{
+	f_modown(filp, 0, 0, 0, 1);
+}
+
+static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
+		struct file *filp)
+{
+	long err = -EINVAL;
+
+	switch (cmd) {
+	case F_DUPFD:
+		get_file(filp);
+		err = dupfd(filp, arg);
+		break;
+	case F_GETFD:
+		err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
+		break;
+	case F_SETFD:
+		err = 0;
+		set_close_on_exec(fd, arg & FD_CLOEXEC);
+		break;
+	case F_GETFL:
+		err = filp->f_flags;
+		break;
+	case F_SETFL:
+		err = setfl(fd, filp, arg);
+		break;
+	case F_GETLK:
+		err = fcntl_getlk(filp, (struct flock __user *) arg);
+		break;
+	case F_SETLK:
+	case F_SETLKW:
+		err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
+		break;
+	case F_GETOWN:
+		/*
+		 * XXX If f_owner is a process group, the
+		 * negative return value will get converted
+		 * into an error.  Oops.  If we keep the
+		 * current syscall conventions, the only way
+		 * to fix this will be in libc.
+		 */
+		err = filp->f_owner.pid;
+		force_successful_syscall_return();
+		break;
+	case F_SETOWN:
+		err = f_setown(filp, arg, 1);
+		break;
+	case F_GETSIG:
+		err = filp->f_owner.signum;
+		break;
+	case F_SETSIG:
+		/* arg == 0 restores default behaviour. */
+		if (arg < 0 || arg > _NSIG) {
+			break;
+		}
+		err = 0;
+		filp->f_owner.signum = arg;
+		break;
+	case F_GETLEASE:
+		err = fcntl_getlease(filp);
+		break;
+	case F_SETLEASE:
+		err = fcntl_setlease(fd, filp, arg);
+		break;
+	case F_NOTIFY:
+		err = fcntl_dirnotify(fd, filp, arg);
+		break;
+	default:
+		break;
+	}
+	return err;
+}
+
+asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{	
+	struct file *filp;
+	long err = -EBADF;
+
+	filp = fget(fd);
+	if (!filp)
+		goto out;
+
+	err = security_file_fcntl(filp, cmd, arg);
+	if (err) {
+		fput(filp);
+		return err;
+	}
+
+	err = do_fcntl(fd, cmd, arg, filp);
+
+ 	fput(filp);
+out:
+	return err;
+}
+
+#if BITS_PER_LONG == 32
+asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+{	
+	struct file * filp;
+	long err;
+
+	err = -EBADF;
+	filp = fget(fd);
+	if (!filp)
+		goto out;
+
+	err = security_file_fcntl(filp, cmd, arg);
+	if (err) {
+		fput(filp);
+		return err;
+	}
+	err = -EBADF;
+	
+	switch (cmd) {
+		case F_GETLK64:
+			err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
+			break;
+		case F_SETLK64:
+		case F_SETLKW64:
+			err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
+			break;
+		default:
+			err = do_fcntl(fd, cmd, arg, filp);
+			break;
+	}
+	fput(filp);
+out:
+	return err;
+}
+#endif
+
+/* Table to convert sigio signal codes into poll band bitmaps */
+
+static long band_table[NSIGPOLL] = {
+	POLLIN | POLLRDNORM,			/* POLL_IN */
+	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
+	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
+	POLLERR,				/* POLL_ERR */
+	POLLPRI | POLLRDBAND,			/* POLL_PRI */
+	POLLHUP | POLLERR			/* POLL_HUP */
+};
+
+static inline int sigio_perm(struct task_struct *p,
+                             struct fown_struct *fown, int sig)
+{
+	return (((fown->euid == 0) ||
+		 (fown->euid == p->suid) || (fown->euid == p->uid) ||
+		 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
+		!security_file_send_sigiotask(p, fown, sig));
+}
+
+static void send_sigio_to_task(struct task_struct *p,
+			       struct fown_struct *fown, 
+			       int fd,
+			       int reason)
+{
+	if (!sigio_perm(p, fown, fown->signum))
+		return;
+
+	switch (fown->signum) {
+		siginfo_t si;
+		default:
+			/* Queue a rt signal with the appropriate fd as its
+			   value.  We use SI_SIGIO as the source, not 
+			   SI_KERNEL, since kernel signals always get 
+			   delivered even if we can't queue.  Failure to
+			   queue in this case _should_ be reported; we fall
+			   back to SIGIO in that case. --sct */
+			si.si_signo = fown->signum;
+			si.si_errno = 0;
+		        si.si_code  = reason;
+			/* Make sure we are called with one of the POLL_*
+			   reasons, otherwise we could leak kernel stack into
+			   userspace.  */
+			if ((reason & __SI_MASK) != __SI_POLL)
+				BUG();
+			if (reason - POLL_IN >= NSIGPOLL)
+				si.si_band  = ~0L;
+			else
+				si.si_band = band_table[reason - POLL_IN];
+			si.si_fd    = fd;
+			if (!send_sig_info(fown->signum, &si, p))
+				break;
+		/* fall-through: fall back on the old plain SIGIO signal */
+		case 0:
+			send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
+	}
+}
+
+void send_sigio(struct fown_struct *fown, int fd, int band)
+{
+	struct task_struct *p;
+	int pid;
+	
+	read_lock(&fown->lock);
+	pid = fown->pid;
+	if (!pid)
+		goto out_unlock_fown;
+	
+	read_lock(&tasklist_lock);
+	if (pid > 0) {
+		p = find_task_by_pid(pid);
+		if (p) {
+			send_sigio_to_task(p, fown, fd, band);
+		}
+	} else {
+		do_each_task_pid(-pid, PIDTYPE_PGID, p) {
+			send_sigio_to_task(p, fown, fd, band);
+		} while_each_task_pid(-pid, PIDTYPE_PGID, p);
+	}
+	read_unlock(&tasklist_lock);
+ out_unlock_fown:
+	read_unlock(&fown->lock);
+}
+
+static void send_sigurg_to_task(struct task_struct *p,
+                                struct fown_struct *fown)
+{
+	if (sigio_perm(p, fown, SIGURG))
+		send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
+}
+
+int send_sigurg(struct fown_struct *fown)
+{
+	struct task_struct *p;
+	int pid, ret = 0;
+	
+	read_lock(&fown->lock);
+	pid = fown->pid;
+	if (!pid)
+		goto out_unlock_fown;
+
+	ret = 1;
+	
+	read_lock(&tasklist_lock);
+	if (pid > 0) {
+		p = find_task_by_pid(pid);
+		if (p) {
+			send_sigurg_to_task(p, fown);
+		}
+	} else {
+		do_each_task_pid(-pid, PIDTYPE_PGID, p) {
+			send_sigurg_to_task(p, fown);
+		} while_each_task_pid(-pid, PIDTYPE_PGID, p);
+	}
+	read_unlock(&tasklist_lock);
+ out_unlock_fown:
+	read_unlock(&fown->lock);
+	return ret;
+}
+
+static DEFINE_RWLOCK(fasync_lock);
+static kmem_cache_t *fasync_cache;
+
+/*
+ * fasync_helper() is used by some character device drivers (mainly mice)
+ * to set up the fasync queue. It returns negative on error, 0 if it did
+ * no changes and positive if it added/deleted the entry.
+ */
+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
+{
+	struct fasync_struct *fa, **fp;
+	struct fasync_struct *new = NULL;
+	int result = 0;
+
+	if (on) {
+		new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
+		if (!new)
+			return -ENOMEM;
+	}
+	write_lock_irq(&fasync_lock);
+	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
+		if (fa->fa_file == filp) {
+			if(on) {
+				fa->fa_fd = fd;
+				kmem_cache_free(fasync_cache, new);
+			} else {
+				*fp = fa->fa_next;
+				kmem_cache_free(fasync_cache, fa);
+				result = 1;
+			}
+			goto out;
+		}
+	}
+
+	if (on) {
+		new->magic = FASYNC_MAGIC;
+		new->fa_file = filp;
+		new->fa_fd = fd;
+		new->fa_next = *fapp;
+		*fapp = new;
+		result = 1;
+	}
+out:
+	write_unlock_irq(&fasync_lock);
+	return result;
+}
+
+EXPORT_SYMBOL(fasync_helper);
+
+void __kill_fasync(struct fasync_struct *fa, int sig, int band)
+{
+	while (fa) {
+		struct fown_struct * fown;
+		if (fa->magic != FASYNC_MAGIC) {
+			printk(KERN_ERR "kill_fasync: bad magic number in "
+			       "fasync_struct!\n");
+			return;
+		}
+		fown = &fa->fa_file->f_owner;
+		/* Don't send SIGURG to processes which have not set a
+		   queued signum: SIGURG has its own default signalling
+		   mechanism. */
+		if (!(sig == SIGURG && fown->signum == 0))
+			send_sigio(fown, fa->fa_fd, band);
+		fa = fa->fa_next;
+	}
+}
+
+EXPORT_SYMBOL(__kill_fasync);
+
+void kill_fasync(struct fasync_struct **fp, int sig, int band)
+{
+	/* First a quick test without locking: usually
+	 * the list is empty.
+	 */
+	if (*fp) {
+		read_lock(&fasync_lock);
+		/* reread *fp after obtaining the lock */
+		__kill_fasync(*fp, sig, band);
+		read_unlock(&fasync_lock);
+	}
+}
+EXPORT_SYMBOL(kill_fasync);
+
+static int __init fasync_init(void)
+{
+	fasync_cache = kmem_cache_create("fasync_cache",
+		sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
+	return 0;
+}
+
+module_init(fasync_init)
diff --git a/fs/fifo.c b/fs/fifo.c
new file mode 100644
index 0000000..5455916
--- /dev/null
+++ b/fs/fifo.c
@@ -0,0 +1,155 @@
+/*
+ *  linux/fs/fifo.c
+ *
+ *  written by Paul H. Hargrove
+ *
+ *  Fixes:
+ *	10-06-1999, AV: fixed OOM handling in fifo_open(), moved
+ *			initialization there, switched to external
+ *			allocation of pipe_inode_info.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/pipe_fs_i.h>
+
+static void wait_for_partner(struct inode* inode, unsigned int* cnt)
+{
+	int cur = *cnt;	
+	while(cur == *cnt) {
+		pipe_wait(inode);
+		if(signal_pending(current))
+			break;
+	}
+}
+
+static void wake_up_partner(struct inode* inode)
+{
+	wake_up_interruptible(PIPE_WAIT(*inode));
+}
+
+static int fifo_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	ret = -ERESTARTSYS;
+	if (down_interruptible(PIPE_SEM(*inode)))
+		goto err_nolock_nocleanup;
+
+	if (!inode->i_pipe) {
+		ret = -ENOMEM;
+		if(!pipe_new(inode))
+			goto err_nocleanup;
+	}
+	filp->f_version = 0;
+
+	/* We can only do regular read/write on fifos */
+	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+
+	switch (filp->f_mode) {
+	case 1:
+	/*
+	 *  O_RDONLY
+	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
+	 *  opened, even when there is no process writing the FIFO.
+	 */
+		filp->f_op = &read_fifo_fops;
+		PIPE_RCOUNTER(*inode)++;
+		if (PIPE_READERS(*inode)++ == 0)
+			wake_up_partner(inode);
+
+		if (!PIPE_WRITERS(*inode)) {
+			if ((filp->f_flags & O_NONBLOCK)) {
+				/* suppress POLLHUP until we have
+				 * seen a writer */
+				filp->f_version = PIPE_WCOUNTER(*inode);
+			} else 
+			{
+				wait_for_partner(inode, &PIPE_WCOUNTER(*inode));
+				if(signal_pending(current))
+					goto err_rd;
+			}
+		}
+		break;
+	
+	case 2:
+	/*
+	 *  O_WRONLY
+	 *  POSIX.1 says that O_NONBLOCK means return -1 with
+	 *  errno=ENXIO when there is no process reading the FIFO.
+	 */
+		ret = -ENXIO;
+		if ((filp->f_flags & O_NONBLOCK) && !PIPE_READERS(*inode))
+			goto err;
+
+		filp->f_op = &write_fifo_fops;
+		PIPE_WCOUNTER(*inode)++;
+		if (!PIPE_WRITERS(*inode)++)
+			wake_up_partner(inode);
+
+		if (!PIPE_READERS(*inode)) {
+			wait_for_partner(inode, &PIPE_RCOUNTER(*inode));
+			if (signal_pending(current))
+				goto err_wr;
+		}
+		break;
+	
+	case 3:
+	/*
+	 *  O_RDWR
+	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
+	 *  This implementation will NEVER block on a O_RDWR open, since
+	 *  the process can at least talk to itself.
+	 */
+		filp->f_op = &rdwr_fifo_fops;
+
+		PIPE_READERS(*inode)++;
+		PIPE_WRITERS(*inode)++;
+		PIPE_RCOUNTER(*inode)++;
+		PIPE_WCOUNTER(*inode)++;
+		if (PIPE_READERS(*inode) == 1 || PIPE_WRITERS(*inode) == 1)
+			wake_up_partner(inode);
+		break;
+
+	default:
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Ok! */
+	up(PIPE_SEM(*inode));
+	return 0;
+
+err_rd:
+	if (!--PIPE_READERS(*inode))
+		wake_up_interruptible(PIPE_WAIT(*inode));
+	ret = -ERESTARTSYS;
+	goto err;
+
+err_wr:
+	if (!--PIPE_WRITERS(*inode))
+		wake_up_interruptible(PIPE_WAIT(*inode));
+	ret = -ERESTARTSYS;
+	goto err;
+
+err:
+	if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode))
+		free_pipe_info(inode);
+
+err_nocleanup:
+	up(PIPE_SEM(*inode));
+
+err_nolock_nocleanup:
+	return ret;
+}
+
+/*
+ * Dummy default file-operations: the only thing this does
+ * is contain the open that then fills in the correct operations
+ * depending on the access mode of the file...
+ */
+struct file_operations def_fifo_fops = {
+	.open		= fifo_open,	/* will set read or write pipe_fops */
+};
diff --git a/fs/file.c b/fs/file.c
new file mode 100644
index 0000000..92b5f25
--- /dev/null
+++ b/fs/file.c
@@ -0,0 +1,254 @@
+/*
+ *  linux/fs/file.c
+ *
+ *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
+ *
+ *  Manage the dynamic fd arrays in the process files_struct.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/bitops.h>
+
+
+/*
+ * Allocate an fd array, using kmalloc or vmalloc.
+ * Note: the array isn't cleared at allocation time.
+ */
+struct file ** alloc_fd_array(int num)
+{
+	struct file **new_fds;
+	int size = num * sizeof(struct file *);
+
+	if (size <= PAGE_SIZE)
+		new_fds = (struct file **) kmalloc(size, GFP_KERNEL);
+	else 
+		new_fds = (struct file **) vmalloc(size);
+	return new_fds;
+}
+
+void free_fd_array(struct file **array, int num)
+{
+	int size = num * sizeof(struct file *);
+
+	if (!array) {
+		printk (KERN_ERR "free_fd_array: array = 0 (num = %d)\n", num);
+		return;
+	}
+
+	if (num <= NR_OPEN_DEFAULT) /* Don't free the embedded fd array! */
+		return;
+	else if (size <= PAGE_SIZE)
+		kfree(array);
+	else
+		vfree(array);
+}
+
+/*
+ * Expand the fd array in the files_struct.  Called with the files
+ * spinlock held for write.
+ */
+
+static int expand_fd_array(struct files_struct *files, int nr)
+	__releases(files->file_lock)
+	__acquires(files->file_lock)
+{
+	struct file **new_fds;
+	int error, nfds;
+
+	
+	error = -EMFILE;
+	if (files->max_fds >= NR_OPEN || nr >= NR_OPEN)
+		goto out;
+
+	nfds = files->max_fds;
+	spin_unlock(&files->file_lock);
+
+	/* 
+	 * Expand to the max in easy steps, and keep expanding it until
+	 * we have enough for the requested fd array size. 
+	 */
+
+	do {
+#if NR_OPEN_DEFAULT < 256
+		if (nfds < 256)
+			nfds = 256;
+		else 
+#endif
+		if (nfds < (PAGE_SIZE / sizeof(struct file *)))
+			nfds = PAGE_SIZE / sizeof(struct file *);
+		else {
+			nfds = nfds * 2;
+			if (nfds > NR_OPEN)
+				nfds = NR_OPEN;
+		}
+	} while (nfds <= nr);
+
+	error = -ENOMEM;
+	new_fds = alloc_fd_array(nfds);
+	spin_lock(&files->file_lock);
+	if (!new_fds)
+		goto out;
+
+	/* Copy the existing array and install the new pointer */
+
+	if (nfds > files->max_fds) {
+		struct file **old_fds;
+		int i;
+		
+		old_fds = xchg(&files->fd, new_fds);
+		i = xchg(&files->max_fds, nfds);
+
+		/* Don't copy/clear the array if we are creating a new
+		   fd array for fork() */
+		if (i) {
+			memcpy(new_fds, old_fds, i * sizeof(struct file *));
+			/* clear the remainder of the array */
+			memset(&new_fds[i], 0,
+			       (nfds-i) * sizeof(struct file *)); 
+
+			spin_unlock(&files->file_lock);
+			free_fd_array(old_fds, i);
+			spin_lock(&files->file_lock);
+		}
+	} else {
+		/* Somebody expanded the array while we slept ... */
+		spin_unlock(&files->file_lock);
+		free_fd_array(new_fds, nfds);
+		spin_lock(&files->file_lock);
+	}
+	error = 0;
+out:
+	return error;
+}
+
+/*
+ * Allocate an fdset array, using kmalloc or vmalloc.
+ * Note: the array isn't cleared at allocation time.
+ */
+fd_set * alloc_fdset(int num)
+{
+	fd_set *new_fdset;
+	int size = num / 8;
+
+	if (size <= PAGE_SIZE)
+		new_fdset = (fd_set *) kmalloc(size, GFP_KERNEL);
+	else
+		new_fdset = (fd_set *) vmalloc(size);
+	return new_fdset;
+}
+
+void free_fdset(fd_set *array, int num)
+{
+	int size = num / 8;
+
+	if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */
+		return;
+	else if (size <= PAGE_SIZE)
+		kfree(array);
+	else
+		vfree(array);
+}
+
+/*
+ * Expand the fdset in the files_struct.  Called with the files spinlock
+ * held for write.
+ */
+static int expand_fdset(struct files_struct *files, int nr)
+	__releases(file->file_lock)
+	__acquires(file->file_lock)
+{
+	fd_set *new_openset = NULL, *new_execset = NULL;
+	int error, nfds = 0;
+
+	error = -EMFILE;
+	if (files->max_fdset >= NR_OPEN || nr >= NR_OPEN)
+		goto out;
+
+	nfds = files->max_fdset;
+	spin_unlock(&files->file_lock);
+
+	/* Expand to the max in easy steps */
+	do {
+		if (nfds < (PAGE_SIZE * 8))
+			nfds = PAGE_SIZE * 8;
+		else {
+			nfds = nfds * 2;
+			if (nfds > NR_OPEN)
+				nfds = NR_OPEN;
+		}
+	} while (nfds <= nr);
+
+	error = -ENOMEM;
+	new_openset = alloc_fdset(nfds);
+	new_execset = alloc_fdset(nfds);
+	spin_lock(&files->file_lock);
+	if (!new_openset || !new_execset)
+		goto out;
+
+	error = 0;
+	
+	/* Copy the existing tables and install the new pointers */
+	if (nfds > files->max_fdset) {
+		int i = files->max_fdset / (sizeof(unsigned long) * 8);
+		int count = (nfds - files->max_fdset) / 8;
+		
+		/* 
+		 * Don't copy the entire array if the current fdset is
+		 * not yet initialised.  
+		 */
+		if (i) {
+			memcpy (new_openset, files->open_fds, files->max_fdset/8);
+			memcpy (new_execset, files->close_on_exec, files->max_fdset/8);
+			memset (&new_openset->fds_bits[i], 0, count);
+			memset (&new_execset->fds_bits[i], 0, count);
+		}
+		
+		nfds = xchg(&files->max_fdset, nfds);
+		new_openset = xchg(&files->open_fds, new_openset);
+		new_execset = xchg(&files->close_on_exec, new_execset);
+		spin_unlock(&files->file_lock);
+		free_fdset (new_openset, nfds);
+		free_fdset (new_execset, nfds);
+		spin_lock(&files->file_lock);
+		return 0;
+	} 
+	/* Somebody expanded the array while we slept ... */
+
+out:
+	spin_unlock(&files->file_lock);
+	if (new_openset)
+		free_fdset(new_openset, nfds);
+	if (new_execset)
+		free_fdset(new_execset, nfds);
+	spin_lock(&files->file_lock);
+	return error;
+}
+
+/*
+ * Expand files.
+ * Return <0 on error; 0 nothing done; 1 files expanded, we may have blocked.
+ * Should be called with the files->file_lock spinlock held for write.
+ */
+int expand_files(struct files_struct *files, int nr)
+{
+	int err, expand = 0;
+
+	if (nr >= files->max_fdset) {
+		expand = 1;
+		if ((err = expand_fdset(files, nr)))
+			goto out;
+	}
+	if (nr >= files->max_fds) {
+		expand = 1;
+		if ((err = expand_fd_array(files, nr)))
+			goto out;
+	}
+	err = expand;
+out:
+	return err;
+}
diff --git a/fs/file_table.c b/fs/file_table.c
new file mode 100644
index 0000000..03d83cb
--- /dev/null
+++ b/fs/file_table.c
@@ -0,0 +1,255 @@
+/*
+ *  linux/fs/file_table.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/eventpoll.h>
+#include <linux/mount.h>
+#include <linux/cdev.h>
+
+/* sysctl tunables... */
+struct files_stat_struct files_stat = {
+	.max_files = NR_FILE
+};
+
+EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
+
+/* public. Not pretty! */
+ __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
+
+static DEFINE_SPINLOCK(filp_count_lock);
+
+/* slab constructors and destructors are called from arbitrary
+ * context and must be fully threaded - use a local spinlock
+ * to protect files_stat.nr_files
+ */
+void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
+{
+	if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		unsigned long flags;
+		spin_lock_irqsave(&filp_count_lock, flags);
+		files_stat.nr_files++;
+		spin_unlock_irqrestore(&filp_count_lock, flags);
+	}
+}
+
+void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&filp_count_lock, flags);
+	files_stat.nr_files--;
+	spin_unlock_irqrestore(&filp_count_lock, flags);
+}
+
+static inline void file_free(struct file *f)
+{
+	kmem_cache_free(filp_cachep, f);
+}
+
+/* Find an unused file structure and return a pointer to it.
+ * Returns NULL, if there are no more free file structures or
+ * we run out of memory.
+ */
+struct file *get_empty_filp(void)
+{
+static int old_max;
+	struct file * f;
+
+	/*
+	 * Privileged users can go above max_files
+	 */
+	if (files_stat.nr_files < files_stat.max_files ||
+				capable(CAP_SYS_ADMIN)) {
+		f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+		if (f) {
+			memset(f, 0, sizeof(*f));
+			if (security_file_alloc(f)) {
+				file_free(f);
+				goto fail;
+			}
+			eventpoll_init_file(f);
+			atomic_set(&f->f_count, 1);
+			f->f_uid = current->fsuid;
+			f->f_gid = current->fsgid;
+			rwlock_init(&f->f_owner.lock);
+			/* f->f_version: 0 */
+			INIT_LIST_HEAD(&f->f_list);
+			f->f_maxcount = INT_MAX;
+			return f;
+		}
+	}
+
+	/* Ran out of filps - report that */
+	if (files_stat.max_files >= old_max) {
+		printk(KERN_INFO "VFS: file-max limit %d reached\n",
+					files_stat.max_files);
+		old_max = files_stat.max_files;
+	} else {
+		/* Big problems... */
+		printk(KERN_WARNING "VFS: filp allocation failed\n");
+	}
+fail:
+	return NULL;
+}
+
+EXPORT_SYMBOL(get_empty_filp);
+
+void fastcall fput(struct file *file)
+{
+	if (atomic_dec_and_test(&file->f_count))
+		__fput(file);
+}
+
+EXPORT_SYMBOL(fput);
+
+/* __fput is called from task context when aio completion releases the last
+ * last use of a struct file *.  Do not use otherwise.
+ */
+void fastcall __fput(struct file *file)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct vfsmount *mnt = file->f_vfsmnt;
+	struct inode *inode = dentry->d_inode;
+
+	might_sleep();
+	/*
+	 * The function eventpoll_release() should be the first called
+	 * in the file cleanup chain.
+	 */
+	eventpoll_release(file);
+	locks_remove_flock(file);
+
+	if (file->f_op && file->f_op->release)
+		file->f_op->release(inode, file);
+	security_file_free(file);
+	if (unlikely(inode->i_cdev != NULL))
+		cdev_put(inode->i_cdev);
+	fops_put(file->f_op);
+	if (file->f_mode & FMODE_WRITE)
+		put_write_access(inode);
+	file_kill(file);
+	file->f_dentry = NULL;
+	file->f_vfsmnt = NULL;
+	file_free(file);
+	dput(dentry);
+	mntput(mnt);
+}
+
+struct file fastcall *fget(unsigned int fd)
+{
+	struct file *file;
+	struct files_struct *files = current->files;
+
+	spin_lock(&files->file_lock);
+	file = fcheck_files(files, fd);
+	if (file)
+		get_file(file);
+	spin_unlock(&files->file_lock);
+	return file;
+}
+
+EXPORT_SYMBOL(fget);
+
+/*
+ * Lightweight file lookup - no refcnt increment if fd table isn't shared. 
+ * You can use this only if it is guranteed that the current task already 
+ * holds a refcnt to that file. That check has to be done at fget() only
+ * and a flag is returned to be passed to the corresponding fput_light().
+ * There must not be a cloning between an fget_light/fput_light pair.
+ */
+struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
+{
+	struct file *file;
+	struct files_struct *files = current->files;
+
+	*fput_needed = 0;
+	if (likely((atomic_read(&files->count) == 1))) {
+		file = fcheck_files(files, fd);
+	} else {
+		spin_lock(&files->file_lock);
+		file = fcheck_files(files, fd);
+		if (file) {
+			get_file(file);
+			*fput_needed = 1;
+		}
+		spin_unlock(&files->file_lock);
+	}
+	return file;
+}
+
+
+void put_filp(struct file *file)
+{
+	if (atomic_dec_and_test(&file->f_count)) {
+		security_file_free(file);
+		file_kill(file);
+		file_free(file);
+	}
+}
+
+void file_move(struct file *file, struct list_head *list)
+{
+	if (!list)
+		return;
+	file_list_lock();
+	list_move(&file->f_list, list);
+	file_list_unlock();
+}
+
+void file_kill(struct file *file)
+{
+	if (!list_empty(&file->f_list)) {
+		file_list_lock();
+		list_del_init(&file->f_list);
+		file_list_unlock();
+	}
+}
+
+int fs_may_remount_ro(struct super_block *sb)
+{
+	struct list_head *p;
+
+	/* Check that no files are currently opened for writing. */
+	file_list_lock();
+	list_for_each(p, &sb->s_files) {
+		struct file *file = list_entry(p, struct file, f_list);
+		struct inode *inode = file->f_dentry->d_inode;
+
+		/* File with pending delete? */
+		if (inode->i_nlink == 0)
+			goto too_bad;
+
+		/* Writeable file? */
+		if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
+			goto too_bad;
+	}
+	file_list_unlock();
+	return 1; /* Tis' cool bro. */
+too_bad:
+	file_list_unlock();
+	return 0;
+}
+
+void __init files_init(unsigned long mempages)
+{ 
+	int n; 
+	/* One file with associated inode and dcache is very roughly 1K. 
+	 * Per default don't use more than 10% of our memory for files. 
+	 */ 
+
+	n = (mempages * (PAGE_SIZE / 1024)) / 10;
+	files_stat.max_files = n; 
+	if (files_stat.max_files < NR_FILE)
+		files_stat.max_files = NR_FILE;
+} 
diff --git a/fs/filesystems.c b/fs/filesystems.c
new file mode 100644
index 0000000..44082bf
--- /dev/null
+++ b/fs/filesystems.c
@@ -0,0 +1,236 @@
+/*
+ *  linux/fs/filesystems.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  table of configured filesystems
+ */
+
+#include <linux/syscalls.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+/*
+ * Handling of filesystem drivers list.
+ * Rules:
+ *	Inclusion to/removals from/scanning of list are protected by spinlock.
+ *	During the unload module must call unregister_filesystem().
+ *	We can access the fields of list element if:
+ *		1) spinlock is held or
+ *		2) we hold the reference to the module.
+ *	The latter can be guaranteed by call of try_module_get(); if it
+ *	returned 0 we must skip the element, otherwise we got the reference.
+ *	Once the reference is obtained we can drop the spinlock.
+ */
+
+static struct file_system_type *file_systems;
+static DEFINE_RWLOCK(file_systems_lock);
+
+/* WARNING: This can be used only if we _already_ own a reference */
+void get_filesystem(struct file_system_type *fs)
+{
+	__module_get(fs->owner);
+}
+
+void put_filesystem(struct file_system_type *fs)
+{
+	module_put(fs->owner);
+}
+
+static struct file_system_type **find_filesystem(const char *name)
+{
+	struct file_system_type **p;
+	for (p=&file_systems; *p; p=&(*p)->next)
+		if (strcmp((*p)->name,name) == 0)
+			break;
+	return p;
+}
+
+/**
+ *	register_filesystem - register a new filesystem
+ *	@fs: the file system structure
+ *
+ *	Adds the file system passed to the list of file systems the kernel
+ *	is aware of for mount and other syscalls. Returns 0 on success,
+ *	or a negative errno code on an error.
+ *
+ *	The &struct file_system_type that is passed is linked into the kernel 
+ *	structures and must not be freed until the file system has been
+ *	unregistered.
+ */
+ 
+int register_filesystem(struct file_system_type * fs)
+{
+	int res = 0;
+	struct file_system_type ** p;
+
+	if (!fs)
+		return -EINVAL;
+	if (fs->next)
+		return -EBUSY;
+	INIT_LIST_HEAD(&fs->fs_supers);
+	write_lock(&file_systems_lock);
+	p = find_filesystem(fs->name);
+	if (*p)
+		res = -EBUSY;
+	else
+		*p = fs;
+	write_unlock(&file_systems_lock);
+	return res;
+}
+
+EXPORT_SYMBOL(register_filesystem);
+
+/**
+ *	unregister_filesystem - unregister a file system
+ *	@fs: filesystem to unregister
+ *
+ *	Remove a file system that was previously successfully registered
+ *	with the kernel. An error is returned if the file system is not found.
+ *	Zero is returned on a success.
+ *	
+ *	Once this function has returned the &struct file_system_type structure
+ *	may be freed or reused.
+ */
+ 
+int unregister_filesystem(struct file_system_type * fs)
+{
+	struct file_system_type ** tmp;
+
+	write_lock(&file_systems_lock);
+	tmp = &file_systems;
+	while (*tmp) {
+		if (fs == *tmp) {
+			*tmp = fs->next;
+			fs->next = NULL;
+			write_unlock(&file_systems_lock);
+			return 0;
+		}
+		tmp = &(*tmp)->next;
+	}
+	write_unlock(&file_systems_lock);
+	return -EINVAL;
+}
+
+EXPORT_SYMBOL(unregister_filesystem);
+
+static int fs_index(const char __user * __name)
+{
+	struct file_system_type * tmp;
+	char * name;
+	int err, index;
+
+	name = getname(__name);
+	err = PTR_ERR(name);
+	if (IS_ERR(name))
+		return err;
+
+	err = -EINVAL;
+	read_lock(&file_systems_lock);
+	for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) {
+		if (strcmp(tmp->name,name) == 0) {
+			err = index;
+			break;
+		}
+	}
+	read_unlock(&file_systems_lock);
+	putname(name);
+	return err;
+}
+
+static int fs_name(unsigned int index, char __user * buf)
+{
+	struct file_system_type * tmp;
+	int len, res;
+
+	read_lock(&file_systems_lock);
+	for (tmp = file_systems; tmp; tmp = tmp->next, index--)
+		if (index <= 0 && try_module_get(tmp->owner))
+			break;
+	read_unlock(&file_systems_lock);
+	if (!tmp)
+		return -EINVAL;
+
+	/* OK, we got the reference, so we can safely block */
+	len = strlen(tmp->name) + 1;
+	res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0;
+	put_filesystem(tmp);
+	return res;
+}
+
+static int fs_maxindex(void)
+{
+	struct file_system_type * tmp;
+	int index;
+
+	read_lock(&file_systems_lock);
+	for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++)
+		;
+	read_unlock(&file_systems_lock);
+	return index;
+}
+
+/*
+ * Whee.. Weird sysv syscall. 
+ */
+asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2)
+{
+	int retval = -EINVAL;
+
+	switch (option) {
+		case 1:
+			retval = fs_index((const char __user *) arg1);
+			break;
+
+		case 2:
+			retval = fs_name(arg1, (char __user *) arg2);
+			break;
+
+		case 3:
+			retval = fs_maxindex();
+			break;
+	}
+	return retval;
+}
+
+int get_filesystem_list(char * buf)
+{
+	int len = 0;
+	struct file_system_type * tmp;
+
+	read_lock(&file_systems_lock);
+	tmp = file_systems;
+	while (tmp && len < PAGE_SIZE - 80) {
+		len += sprintf(buf+len, "%s\t%s\n",
+			(tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev",
+			tmp->name);
+		tmp = tmp->next;
+	}
+	read_unlock(&file_systems_lock);
+	return len;
+}
+
+struct file_system_type *get_fs_type(const char *name)
+{
+	struct file_system_type *fs;
+
+	read_lock(&file_systems_lock);
+	fs = *(find_filesystem(name));
+	if (fs && !try_module_get(fs->owner))
+		fs = NULL;
+	read_unlock(&file_systems_lock);
+	if (!fs && (request_module("%s", name) == 0)) {
+		read_lock(&file_systems_lock);
+		fs = *(find_filesystem(name));
+		if (fs && !try_module_get(fs->owner))
+			fs = NULL;
+		read_unlock(&file_systems_lock);
+	}
+	return fs;
+}
+
+EXPORT_SYMBOL(get_fs_type);
diff --git a/fs/freevxfs/Makefile b/fs/freevxfs/Makefile
new file mode 100644
index 0000000..87ad097
--- /dev/null
+++ b/fs/freevxfs/Makefile
@@ -0,0 +1,8 @@
+#
+# VxFS Makefile
+#
+
+obj-$(CONFIG_VXFS_FS) += freevxfs.o
+
+freevxfs-objs := vxfs_bmap.o vxfs_fshead.o vxfs_immed.o vxfs_inode.o \
+		 vxfs_lookup.o vxfs_olt.o vxfs_subr.o vxfs_super.o
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
new file mode 100644
index 0000000..8da0252
--- /dev/null
+++ b/fs/freevxfs/vxfs.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_SUPER_H_
+#define _VXFS_SUPER_H_
+
+/*
+ * Veritas filesystem driver - superblock structure.
+ *
+ * This file contains the definition of the disk and core
+ * superblocks of the Veritas Filesystem.
+ */
+#include <linux/types.h>
+#include "vxfs_kcompat.h"
+
+
+/*
+ * Data types for use with the VxFS ondisk format.
+ */
+typedef	int32_t		vx_daddr_t;
+typedef int32_t		vx_ino_t;
+
+/*
+ * Superblock magic number (vxfs_super->vs_magic).
+ */
+#define VXFS_SUPER_MAGIC	0xa501FCF5
+
+/*
+ * The root inode.
+ */
+#define VXFS_ROOT_INO		2
+
+/*
+ * Num of entries in free extent array
+ */
+#define VXFS_NEFREE		32
+
+
+/*
+ * VxFS superblock (disk).
+ */
+struct vxfs_sb {
+	/*
+	 * Readonly fields for the version 1 superblock.
+	 *
+	 * Lots of this fields are no more used by version 2
+	 * and never filesystems.
+	 */
+	u_int32_t	vs_magic;		/* Magic number */
+	int32_t		vs_version;		/* VxFS version */
+	u_int32_t	vs_ctime;		/* create time - secs */
+	u_int32_t	vs_cutime;		/* create time - usecs */
+	int32_t		__unused1;		/* unused */
+	int32_t		__unused2;		/* unused */
+	vx_daddr_t	vs_old_logstart;	/* obsolete */
+	vx_daddr_t	vs_old_logend;		/* obsolete */
+	int32_t		vs_bsize;		/* block size */
+	int32_t		vs_size;		/* number of blocks */
+	int32_t		vs_dsize;		/* number of data blocks */
+	u_int32_t	vs_old_ninode;		/* obsolete */
+	int32_t		vs_old_nau;		/* obsolete */
+	int32_t		__unused3;		/* unused */
+	int32_t		vs_old_defiextsize;	/* obsolete */
+	int32_t		vs_old_ilbsize;		/* obsolete */
+	int32_t		vs_immedlen;		/* size of immediate data area */
+	int32_t		vs_ndaddr;		/* number of direct extentes */
+	vx_daddr_t	vs_firstau;		/* address of first AU */
+	vx_daddr_t	vs_emap;		/* offset of extent map in AU */
+	vx_daddr_t	vs_imap;		/* offset of inode map in AU */
+	vx_daddr_t	vs_iextop;		/* offset of ExtOp. map in AU */
+	vx_daddr_t	vs_istart;		/* offset of inode list in AU */
+	vx_daddr_t	vs_bstart;		/* offset of fdblock in AU */
+	vx_daddr_t	vs_femap;		/* aufirst + emap */
+	vx_daddr_t	vs_fimap;		/* aufirst + imap */
+	vx_daddr_t	vs_fiextop;		/* aufirst + iextop */
+	vx_daddr_t	vs_fistart;		/* aufirst + istart */
+	vx_daddr_t	vs_fbstart;		/* aufirst + bstart */
+	int32_t		vs_nindir;		/* number of entries in indir */
+	int32_t		vs_aulen;		/* length of AU in blocks */
+	int32_t		vs_auimlen;		/* length of imap in blocks */
+	int32_t		vs_auemlen;		/* length of emap in blocks */
+	int32_t		vs_auilen;		/* length of ilist in blocks */
+	int32_t		vs_aupad;		/* length of pad in blocks */
+	int32_t		vs_aublocks;		/* data blocks in AU */
+	int32_t		vs_maxtier;		/* log base 2 of aublocks */
+	int32_t		vs_inopb;		/* number of inodes per blk */
+	int32_t		vs_old_inopau;		/* obsolete */
+	int32_t		vs_old_inopilb;		/* obsolete */
+	int32_t		vs_old_ndiripau;	/* obsolete */
+	int32_t		vs_iaddrlen;		/* size of indirect addr ext. */
+	int32_t		vs_bshift;		/* log base 2 of bsize */
+	int32_t		vs_inoshift;		/* log base 2 of inobp */
+	int32_t		vs_bmask;		/* ~( bsize - 1 ) */
+	int32_t		vs_boffmask;		/* bsize - 1 */
+	int32_t		vs_old_inomask;		/* old_inopilb - 1 */
+	int32_t		vs_checksum;		/* checksum of V1 data */
+	
+	/*
+	 * Version 1, writable
+	 */
+	int32_t		vs_free;		/* number of free blocks */
+	int32_t		vs_ifree;		/* number of free inodes */
+	int32_t		vs_efree[VXFS_NEFREE];	/* number of free extents by size */
+	int32_t		vs_flags;		/* flags ?!? */
+	u_int8_t	vs_mod;			/* filesystem has been changed */
+	u_int8_t	vs_clean;		/* clean FS */
+	u_int16_t	__unused4;		/* unused */
+	u_int32_t	vs_firstlogid;		/* mount time log ID */
+	u_int32_t	vs_wtime;		/* last time written - sec */
+	u_int32_t	vs_wutime;		/* last time written - usec */
+	u_int8_t	vs_fname[6];		/* FS name */
+	u_int8_t	vs_fpack[6];		/* FS pack name */
+	int32_t		vs_logversion;		/* log format version */
+	int32_t		__unused5;		/* unused */
+	
+	/*
+	 * Version 2, Read-only
+	 */
+	vx_daddr_t	vs_oltext[2];		/* OLT extent and replica */
+	int32_t		vs_oltsize;		/* OLT extent size */
+	int32_t		vs_iauimlen;		/* size of inode map */
+	int32_t		vs_iausize;		/* size of IAU in blocks */
+	int32_t		vs_dinosize;		/* size of inode in bytes */
+	int32_t		vs_old_dniaddr;		/* indir levels per inode */
+	int32_t		vs_checksum2;		/* checksum of V2 RO */
+
+	/*
+	 * Actually much more...
+	 */
+};
+
+
+/*
+ * In core superblock filesystem private data for VxFS.
+ */
+struct vxfs_sb_info {
+	struct vxfs_sb		*vsi_raw;	/* raw (on disk) supeblock */
+	struct buffer_head	*vsi_bp;	/* buffer for raw superblock*/
+	struct inode		*vsi_fship;	/* fileset header inode */
+	struct inode		*vsi_ilist;	/* inode list inode */
+	struct inode		*vsi_stilist;	/* structual inode list inode */
+	u_long			vsi_iext;	/* initial inode list */
+	ino_t			vsi_fshino;	/* fileset header inode */
+	daddr_t			vsi_oltext;	/* OLT extent */
+	daddr_t			vsi_oltsize;	/* OLT size */
+};
+
+
+/*
+ * File modes.  File types above 0xf000 are vxfs internal only, they should
+ * not be passed back to higher levels of the system.  vxfs file types must
+ * never have one of the regular file type bits set.
+ */
+enum vxfs_mode {
+	VXFS_ISUID = 0x00000800,	/* setuid */
+	VXFS_ISGID = 0x00000400,	/* setgid */
+	VXFS_ISVTX = 0x00000200,	/* sticky bit */
+	VXFS_IREAD = 0x00000100,	/* read */
+	VXFS_IWRITE = 0x00000080,	/* write */
+	VXFS_IEXEC = 0x00000040,	/* exec */
+
+	VXFS_IFIFO = 0x00001000,	/* Named pipe */
+	VXFS_IFCHR = 0x00002000,	/* Character device */
+	VXFS_IFDIR = 0x00004000,	/* Directory */
+	VXFS_IFNAM = 0x00005000,	/* Xenix device ?? */
+	VXFS_IFBLK = 0x00006000,	/* Block device */
+	VXFS_IFREG = 0x00008000,	/* Regular file */
+	VXFS_IFCMP = 0x00009000,	/* Compressed file ?!? */
+	VXFS_IFLNK = 0x0000a000,	/* Symlink */
+	VXFS_IFSOC = 0x0000c000,	/* Socket */
+
+	/* VxFS internal */
+	VXFS_IFFSH = 0x10000000,	/* Fileset header */
+	VXFS_IFILT = 0x20000000,	/* Inode list */
+	VXFS_IFIAU = 0x30000000,	/* Inode allocation unit */
+	VXFS_IFCUT = 0x40000000,	/* Current usage table */
+	VXFS_IFATT = 0x50000000,	/* Attr. inode */
+	VXFS_IFLCT = 0x60000000,	/* Link count table */
+	VXFS_IFIAT = 0x70000000,	/* Indirect attribute file */
+	VXFS_IFEMR = 0x80000000,	/* Extent map reorg file */
+	VXFS_IFQUO = 0x90000000,	/* BSD quota file */
+	VXFS_IFPTI = 0xa0000000,	/* "Pass through" inode */
+	VXFS_IFLAB = 0x11000000,	/* Device label file */
+	VXFS_IFOLT = 0x12000000,	/* OLT file */
+	VXFS_IFLOG = 0x13000000,	/* Log file */
+	VXFS_IFEMP = 0x14000000,	/* Extent map file */
+	VXFS_IFEAU = 0x15000000,	/* Extent AU file */
+	VXFS_IFAUS = 0x16000000,	/* Extent AU summary file */
+	VXFS_IFDEV = 0x17000000,	/* Device config file */
+
+};
+
+#define	VXFS_TYPE_MASK		0xfffff000
+
+#define VXFS_IS_TYPE(ip,type)	(((ip)->vii_mode & VXFS_TYPE_MASK) == (type))
+#define VXFS_ISFIFO(x)		VXFS_IS_TYPE((x),VXFS_IFIFO)
+#define VXFS_ISCHR(x)		VXFS_IS_TYPE((x),VXFS_IFCHR)
+#define VXFS_ISDIR(x)		VXFS_IS_TYPE((x),VXFS_IFDIR)
+#define VXFS_ISNAM(x)		VXFS_IS_TYPE((x),VXFS_IFNAM)
+#define VXFS_ISBLK(x)		VXFS_IS_TYPE((x),VXFS_IFBLK)
+#define VXFS_ISLNK(x)		VXFS_IS_TYPE((x),VXFS_IFLNK)
+#define VXFS_ISREG(x)		VXFS_IS_TYPE((x),VXFS_IFREG)
+#define VXFS_ISCMP(x)		VXFS_IS_TYPE((x),VXFS_IFCMP)
+#define VXFS_ISSOC(x)		VXFS_IS_TYPE((x),VXFS_IFSOC)
+
+#define VXFS_ISFSH(x)		VXFS_IS_TYPE((x),VXFS_IFFSH)
+#define VXFS_ISILT(x)		VXFS_IS_TYPE((x),VXFS_IFILT)
+
+/*
+ * Inmode organisation types.
+ */
+enum {
+	VXFS_ORG_NONE	= 0,	/* Inode has *no* format ?!? */
+	VXFS_ORG_EXT4	= 1,	/* Ext4 */
+	VXFS_ORG_IMMED	= 2,	/* All data stored in inode */
+	VXFS_ORG_TYPED	= 3,	/* Typed extents */
+};
+
+#define VXFS_IS_ORG(ip,org)	((ip)->vii_orgtype == (org))
+#define VXFS_ISNONE(ip)		VXFS_IS_ORG((ip), VXFS_ORG_NONE)
+#define VXFS_ISEXT4(ip)		VXFS_IS_ORG((ip), VXFS_ORG_EXT4)
+#define VXFS_ISIMMED(ip)	VXFS_IS_ORG((ip), VXFS_ORG_IMMED)
+#define VXFS_ISTYPED(ip)	VXFS_IS_ORG((ip), VXFS_ORG_TYPED)
+
+
+/*
+ * Get filesystem private data from VFS inode.
+ */
+#define VXFS_INO(ip) \
+	((struct vxfs_inode_info *)(ip)->u.generic_ip)
+
+/*
+ * Get filesystem private data from VFS superblock.
+ */
+#define VXFS_SBI(sbp) \
+	((struct vxfs_sb_info *)(sbp)->s_fs_info)
+
+#endif /* _VXFS_SUPER_H_ */
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
new file mode 100644
index 0000000..bc4b57d
--- /dev/null
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - filesystem to disk block mapping.
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+
+#include "vxfs.h"
+#include "vxfs_inode.h"
+
+
+#ifdef DIAGNOSTIC
+static void
+vxfs_typdump(struct vxfs_typed *typ)
+{
+	printk(KERN_DEBUG "type=%Lu ", typ->vt_hdr >> VXFS_TYPED_TYPESHIFT);
+	printk("offset=%Lx ", typ->vt_hdr & VXFS_TYPED_OFFSETMASK);
+	printk("block=%x ", typ->vt_block);
+	printk("size=%x\n", typ->vt_size);
+}
+#endif
+
+/**
+ * vxfs_bmap_ext4 - do bmap for ext4 extents
+ * @ip:		pointer to the inode we do bmap for
+ * @iblock:	logical block.
+ *
+ * Description:
+ *   vxfs_bmap_ext4 performs the bmap operation for inodes with
+ *   ext4-style extents (which are much like the traditional UNIX
+ *   inode organisation).
+ *
+ * Returns:
+ *   The physical block number on success, else Zero.
+ */
+static daddr_t
+vxfs_bmap_ext4(struct inode *ip, long bn)
+{
+	struct super_block *sb = ip->i_sb;
+	struct vxfs_inode_info *vip = VXFS_INO(ip);
+	unsigned long bsize = sb->s_blocksize;
+	u32 indsize = vip->vii_ext4.ve4_indsize;
+	int i;
+
+	if (indsize > sb->s_blocksize)
+		goto fail_size;
+
+	for (i = 0; i < VXFS_NDADDR; i++) {
+		struct direct *d = vip->vii_ext4.ve4_direct + i;
+		if (bn >= 0 && bn < d->size)
+			return (bn + d->extent);
+		bn -= d->size;
+	}
+
+	if ((bn / (indsize * indsize * bsize / 4)) == 0) {
+		struct buffer_head *buf;
+		daddr_t	bno;
+		u32 *indir;
+
+		buf = sb_bread(sb, vip->vii_ext4.ve4_indir[0]);
+		if (!buf || !buffer_mapped(buf))
+			goto fail_buf;
+
+		indir = (u32 *)buf->b_data;
+		bno = indir[(bn/indsize) % (indsize*bn)] + (bn%indsize);
+
+		brelse(buf);
+		return bno;
+	} else
+		printk(KERN_WARNING "no matching indir?");
+
+	return 0;
+
+fail_size:
+	printk("vxfs: indirect extent to big!\n");
+fail_buf:
+	return 0;
+}
+
+/**
+ * vxfs_bmap_indir - recursion for vxfs_bmap_typed
+ * @ip:		pointer to the inode we do bmap for
+ * @indir:	indirect block we start reading at
+ * @size:	size of the typed area to search
+ * @block:	partially result from further searches
+ *
+ * Description:
+ *   vxfs_bmap_indir reads a &struct vxfs_typed at @indir
+ *   and performs the type-defined action.
+ *
+ * Return Value:
+ *   The physical block number on success, else Zero.
+ *
+ * Note:
+ *   Kernelstack is rare.  Unrecurse?
+ */
+static daddr_t
+vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
+{
+	struct buffer_head		*bp = NULL;
+	daddr_t				pblock = 0;
+	int				i;
+
+	for (i = 0; i < size * VXFS_TYPED_PER_BLOCK(ip->i_sb); i++) {
+		struct vxfs_typed	*typ;
+		int64_t			off;
+
+		bp = sb_bread(ip->i_sb,
+				indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)));
+		if (!buffer_mapped(bp))
+			return 0;
+
+		typ = ((struct vxfs_typed *)bp->b_data) +
+			(i % VXFS_TYPED_PER_BLOCK(ip->i_sb));
+		off = (typ->vt_hdr & VXFS_TYPED_OFFSETMASK);
+
+		if (block < off) {
+			brelse(bp);
+			continue;
+		}
+
+		switch ((u_int32_t)(typ->vt_hdr >> VXFS_TYPED_TYPESHIFT)) {
+		case VXFS_TYPED_INDIRECT:
+			pblock = vxfs_bmap_indir(ip, typ->vt_block,
+					typ->vt_size, block - off);
+			if (pblock == -2)
+				break;
+			goto out;
+		case VXFS_TYPED_DATA:
+			if ((block - off) >= typ->vt_size)
+				break;
+			pblock = (typ->vt_block + block - off);
+			goto out;
+		case VXFS_TYPED_INDIRECT_DEV4:
+		case VXFS_TYPED_DATA_DEV4: {
+			struct vxfs_typed_dev4	*typ4 =
+				(struct vxfs_typed_dev4 *)typ;
+
+			printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n");
+			printk(KERN_INFO "block: %Lu\tsize: %Ld\tdev: %d\n",
+			       (unsigned long long) typ4->vd4_block,
+			       (unsigned long long) typ4->vd4_size,
+			       typ4->vd4_dev);
+			goto fail;
+		}
+		default:
+			BUG();
+		}
+		brelse(bp);
+	}
+
+fail:
+	pblock = 0;
+out:
+	brelse(bp);
+	return (pblock);
+}
+
+/**
+ * vxfs_bmap_typed - bmap for typed extents
+ * @ip:		pointer to the inode we do bmap for
+ * @iblock:	logical block
+ *
+ * Description:
+ *   Performs the bmap operation for typed extents.
+ *
+ * Return Value:
+ *   The physical block number on success, else Zero.
+ */
+static daddr_t
+vxfs_bmap_typed(struct inode *ip, long iblock)
+{
+	struct vxfs_inode_info		*vip = VXFS_INO(ip);
+	daddr_t				pblock = 0;
+	int				i;
+
+	for (i = 0; i < VXFS_NTYPED; i++) {
+		struct vxfs_typed	*typ = vip->vii_org.typed + i;
+		int64_t			off = (typ->vt_hdr & VXFS_TYPED_OFFSETMASK);
+
+#ifdef DIAGNOSTIC
+		vxfs_typdump(typ);
+#endif
+		if (iblock < off)
+			continue;
+		switch ((u_int32_t)(typ->vt_hdr >> VXFS_TYPED_TYPESHIFT)) {
+		case VXFS_TYPED_INDIRECT:
+			pblock = vxfs_bmap_indir(ip, typ->vt_block,
+					typ->vt_size, iblock - off);
+			if (pblock == -2)
+				break;
+			return (pblock);
+		case VXFS_TYPED_DATA:
+			if ((iblock - off) < typ->vt_size)
+				return (typ->vt_block + iblock - off);
+			break;
+		case VXFS_TYPED_INDIRECT_DEV4:
+		case VXFS_TYPED_DATA_DEV4: {
+			struct vxfs_typed_dev4	*typ4 =
+				(struct vxfs_typed_dev4 *)typ;
+
+			printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n");
+			printk(KERN_INFO "block: %Lu\tsize: %Ld\tdev: %d\n",
+			       (unsigned long long) typ4->vd4_block,
+			       (unsigned long long) typ4->vd4_size,
+			       typ4->vd4_dev);
+			return 0;
+		}
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vxfs_bmap1 - vxfs-internal bmap operation
+ * @ip:			pointer to the inode we do bmap for
+ * @iblock:		logical block
+ *
+ * Description:
+ *   vxfs_bmap1 perfoms a logical to physical block mapping
+ *   for vxfs-internal purposes.
+ *
+ * Return Value:
+ *   The physical block number on success, else Zero.
+ */
+daddr_t
+vxfs_bmap1(struct inode *ip, long iblock)
+{
+	struct vxfs_inode_info		*vip = VXFS_INO(ip);
+
+	if (VXFS_ISEXT4(vip))
+		return vxfs_bmap_ext4(ip, iblock);
+	if (VXFS_ISTYPED(vip))
+		return vxfs_bmap_typed(ip, iblock);
+	if (VXFS_ISNONE(vip))
+		goto unsupp;
+	if (VXFS_ISIMMED(vip))
+		goto unsupp;
+
+	printk(KERN_WARNING "vxfs: inode %ld has no valid orgtype (%x)\n",
+			ip->i_ino, vip->vii_orgtype);
+	BUG();
+
+unsupp:
+	printk(KERN_WARNING "vxfs: inode %ld has an unsupported orgtype (%x)\n",
+			ip->i_ino, vip->vii_orgtype);
+	return 0;
+}
diff --git a/fs/freevxfs/vxfs_dir.h b/fs/freevxfs/vxfs_dir.h
new file mode 100644
index 0000000..8a4dfef
--- /dev/null
+++ b/fs/freevxfs/vxfs_dir.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_DIR_H_
+#define _VXFS_DIR_H_
+
+/*
+ * Veritas filesystem driver - directory structure.
+ *
+ * This file contains the definition of the vxfs directory format.
+ */
+
+
+/*
+ * VxFS directory block header.
+ *
+ * This entry is the head of every filesystem block in a directory.
+ * It is used for free space managment and additionally includes
+ * a hash for speeding up directory search (lookup).
+ *
+ * The hash may be empty and in fact we do not use it all in the
+ * Linux driver for now.
+ */
+struct vxfs_dirblk {
+	u_int16_t	d_free;		/* free space in dirblock */
+	u_int16_t	d_nhash;	/* no of hash chains */
+	u_int16_t	d_hash[1];	/* hash chain */
+};
+
+/*
+ * VXFS_NAMELEN is the maximum length of the d_name field
+ *	of an VxFS directory entry.
+ */
+#define VXFS_NAMELEN	256
+
+/*
+ * VxFS directory entry.
+ */
+struct vxfs_direct {
+	vx_ino_t	d_ino;			/* inode number */
+	u_int16_t	d_reclen;		/* record length */
+	u_int16_t	d_namelen;		/* d_name length */
+	u_int16_t	d_hashnext;		/* next hash entry */
+	char		d_name[VXFS_NAMELEN];	/* name */
+};
+
+/*
+ * VXFS_DIRPAD defines the directory entry boundaries, is _must_ be
+ *	a multiple of four.
+ * VXFS_NAMEMIN is the length of a directory entry with a NULL d_name.
+ * VXFS_DIRROUND is an internal macros that rounds a length to a value
+ *	usable for directory sizes.
+ * VXFS_DIRLEN calculates the directory entry size for an entry with
+ *	a d_name with size len.
+ */
+#define VXFS_DIRPAD		4
+#define VXFS_NAMEMIN		((int)((struct vxfs_direct *)0)->d_name)
+#define VXFS_DIRROUND(len)	((VXFS_DIRPAD + (len) - 1) & ~(VXFS_DIRPAD -1))
+#define VXFS_DIRLEN(len)	(VXFS_DIRROUND(VXFS_NAMEMIN + (len)))
+
+/*
+ * VXFS_DIRBLKOV is the overhead of a specific dirblock.
+ */
+#define VXFS_DIRBLKOV(dbp)	((sizeof(short) * dbp->d_nhash) + 4)
+
+#endif /* _VXFS_DIR_H_ */
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
new file mode 100644
index 0000000..d8be917
--- /dev/null
+++ b/fs/freevxfs/vxfs_extern.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_EXTERN_H_
+#define _VXFS_EXTERN_H_
+
+/*
+ * Veritas filesystem driver - external prototypes.
+ *
+ * This file contains prototypes for all vxfs functions used
+ * outside their respective source files.
+ */
+
+
+struct kmem_cache_s;
+struct super_block;
+struct vxfs_inode_info;
+struct inode;
+
+
+/* vxfs_bmap.c */
+extern daddr_t			vxfs_bmap1(struct inode *, long);
+
+/* vxfs_fshead.c */
+extern int			vxfs_read_fshead(struct super_block *);
+
+/* vxfs_inode.c */
+extern struct kmem_cache_s	*vxfs_inode_cachep;
+extern void			vxfs_dumpi(struct vxfs_inode_info *, ino_t);
+extern struct inode *		vxfs_get_fake_inode(struct super_block *,
+					struct vxfs_inode_info *);
+extern void			vxfs_put_fake_inode(struct inode *);
+extern struct vxfs_inode_info *	vxfs_blkiget(struct super_block *, u_long, ino_t);
+extern struct vxfs_inode_info *	vxfs_stiget(struct super_block *, ino_t);
+extern void			vxfs_read_inode(struct inode *);
+extern void			vxfs_clear_inode(struct inode *);
+
+/* vxfs_lookup.c */
+extern struct inode_operations	vxfs_dir_inode_ops;
+extern struct file_operations	vxfs_dir_operations;
+
+/* vxfs_olt.c */
+extern int			vxfs_read_olt(struct super_block *, u_long);
+
+/* vxfs_subr.c */
+extern struct page *		vxfs_get_page(struct address_space *, u_long);
+extern void			vxfs_put_page(struct page *);
+extern struct buffer_head *	vxfs_bread(struct inode *, int);
+
+#endif /* _VXFS_EXTERN_H_ */
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
new file mode 100644
index 0000000..05b19f7
--- /dev/null
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - fileset header routines.
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "vxfs.h"
+#include "vxfs_inode.h"
+#include "vxfs_extern.h"
+#include "vxfs_fshead.h"
+
+
+#ifdef DIAGNOSTIC
+static void
+vxfs_dumpfsh(struct vxfs_fsh *fhp)
+{
+	printk("\n\ndumping fileset header:\n");
+	printk("----------------------------\n");
+	printk("version: %u\n", fhp->fsh_version);
+	printk("fsindex: %u\n", fhp->fsh_fsindex);
+	printk("iauino: %u\tninodes:%u\n",
+			fhp->fsh_iauino, fhp->fsh_ninodes);
+	printk("maxinode: %u\tlctino: %u\n",
+			fhp->fsh_maxinode, fhp->fsh_lctino);
+	printk("nau: %u\n", fhp->fsh_nau);
+	printk("ilistino[0]: %u\tilistino[1]: %u\n",
+			fhp->fsh_ilistino[0], fhp->fsh_ilistino[1]);
+}
+#endif
+
+/**
+ * vxfs_getfsh - read fileset header into memory
+ * @ip:		the (fake) fileset header inode
+ * @which:	0 for the structural, 1 for the primary fsh.
+ *
+ * Description:
+ *   vxfs_getfsh reads either the structural or primary fileset header
+ *   described by @ip into memory.
+ *
+ * Returns:
+ *   The fileset header structure on success, else Zero.
+ */
+static struct vxfs_fsh *
+vxfs_getfsh(struct inode *ip, int which)
+{
+	struct buffer_head		*bp;
+
+	bp = vxfs_bread(ip, which);
+	if (buffer_mapped(bp)) {
+		struct vxfs_fsh		*fhp;
+
+		if (!(fhp = kmalloc(sizeof(*fhp), SLAB_KERNEL)))
+			return NULL;
+		memcpy(fhp, bp->b_data, sizeof(*fhp));
+
+		brelse(bp);
+		return (fhp);
+	}
+
+	return NULL;
+}
+
+/**
+ * vxfs_read_fshead - read the fileset headers
+ * @sbp:	superblock to which the fileset belongs
+ *
+ * Description:
+ *   vxfs_read_fshead will fill the inode and structural inode list in @sb.
+ *
+ * Returns:
+ *   Zero on success, else a negative error code (-EINVAL).
+ */
+int
+vxfs_read_fshead(struct super_block *sbp)
+{
+	struct vxfs_sb_info		*infp = VXFS_SBI(sbp);
+	struct vxfs_fsh			*pfp, *sfp;
+	struct vxfs_inode_info		*vip, *tip;
+
+	vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
+	if (!vip) {
+		printk(KERN_ERR "vxfs: unabled to read fsh inode\n");
+		return -EINVAL;
+	}
+	if (!VXFS_ISFSH(vip)) {
+		printk(KERN_ERR "vxfs: fsh list inode is of wrong type (%x)\n",
+				vip->vii_mode & VXFS_TYPE_MASK); 
+		goto out_free_fship;
+	}
+
+
+#ifdef DIAGNOSTIC
+	printk("vxfs: fsh inode dump:\n");
+	vxfs_dumpi(vip, infp->vsi_fshino);
+#endif
+
+	infp->vsi_fship = vxfs_get_fake_inode(sbp, vip);
+	if (!infp->vsi_fship) {
+		printk(KERN_ERR "vxfs: unabled to get fsh inode\n");
+		goto out_free_fship;
+	}
+
+	sfp = vxfs_getfsh(infp->vsi_fship, 0);
+	if (!sfp) {
+		printk(KERN_ERR "vxfs: unabled to get structural fsh\n");
+		goto out_iput_fship;
+	} 
+
+#ifdef DIAGNOSTIC
+	vxfs_dumpfsh(sfp);
+#endif
+
+	pfp = vxfs_getfsh(infp->vsi_fship, 1);
+	if (!pfp) {
+		printk(KERN_ERR "vxfs: unabled to get primary fsh\n");
+		goto out_free_sfp;
+	}
+
+#ifdef DIAGNOSTIC
+	vxfs_dumpfsh(pfp);
+#endif
+
+	tip = vxfs_blkiget(sbp, infp->vsi_iext, sfp->fsh_ilistino[0]);
+	if (!tip)
+		goto out_free_pfp;
+
+	infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip);
+	if (!infp->vsi_stilist) {
+		printk(KERN_ERR "vxfs: unabled to get structual list inode\n");
+		kfree(tip);
+		goto out_free_pfp;
+	}
+	if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
+		printk(KERN_ERR "vxfs: structual list inode is of wrong type (%x)\n",
+				VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK); 
+		goto out_iput_stilist;
+	}
+
+	tip = vxfs_stiget(sbp, pfp->fsh_ilistino[0]);
+	if (!tip)
+		goto out_iput_stilist;
+	infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip);
+	if (!infp->vsi_ilist) {
+		printk(KERN_ERR "vxfs: unabled to get inode list inode\n");
+		kfree(tip);
+		goto out_iput_stilist;
+	}
+	if (!VXFS_ISILT(VXFS_INO(infp->vsi_ilist))) {
+		printk(KERN_ERR "vxfs: inode list inode is of wrong type (%x)\n",
+				VXFS_INO(infp->vsi_ilist)->vii_mode & VXFS_TYPE_MASK);
+		goto out_iput_ilist;
+	}
+
+	return 0;
+
+ out_iput_ilist:
+ 	iput(infp->vsi_ilist);
+ out_iput_stilist:
+ 	iput(infp->vsi_stilist);
+ out_free_pfp:
+	kfree(pfp);
+ out_free_sfp:
+ 	kfree(sfp);
+ out_iput_fship:
+	iput(infp->vsi_fship);
+	return -EINVAL;
+ out_free_fship:
+ 	kfree(vip);
+	return -EINVAL;
+}
diff --git a/fs/freevxfs/vxfs_fshead.h b/fs/freevxfs/vxfs_fshead.h
new file mode 100644
index 0000000..ead0d64
--- /dev/null
+++ b/fs/freevxfs/vxfs_fshead.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_FSHEAD_H_
+#define _VXFS_FSHEAD_H_
+
+/*
+ * Veritas filesystem driver - fileset header structures.
+ *
+ * This file contains the physical structure of the VxFS
+ * fileset header.
+ */
+
+
+/*
+ * Fileset header 
+ */
+struct vxfs_fsh {
+	u_int32_t	fsh_version;		/* fileset header version */
+	u_int32_t	fsh_fsindex;		/* fileset index */
+	u_int32_t	fsh_time;		/* modification time - sec */
+	u_int32_t	fsh_utime;		/* modification time - usec */
+	u_int32_t	fsh_extop;		/* extop flags */
+	vx_ino_t	fsh_ninodes;		/* allocated inodes */
+	u_int32_t	fsh_nau;		/* number of IAUs */
+	u_int32_t	fsh_old_ilesize;	/* old size of ilist */
+	u_int32_t	fsh_dflags;		/* flags */
+	u_int32_t	fsh_quota;		/* quota limit */
+	vx_ino_t	fsh_maxinode;		/* maximum inode number */
+	vx_ino_t	fsh_iauino;		/* IAU inode */
+	vx_ino_t	fsh_ilistino[2];	/* ilist inodes */
+	vx_ino_t	fsh_lctino;		/* link count table inode */
+
+	/*
+	 * Slightly more fields follow, but they
+	 *  a) are not of any interest for us, and
+	 *  b) differ a lot in different vxfs versions/ports
+	 */
+};
+
+#endif /* _VXFS_FSHEAD_H_ */
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
new file mode 100644
index 0000000..ac677ab
--- /dev/null
+++ b/fs/freevxfs/vxfs_immed.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - support for 'immed' inodes.
+ */
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/namei.h>
+
+#include "vxfs.h"
+#include "vxfs_inode.h"
+
+
+static int	vxfs_immed_follow_link(struct dentry *, struct nameidata *);
+
+static int	vxfs_immed_readpage(struct file *, struct page *);
+
+/*
+ * Inode operations for immed symlinks.
+ *
+ * Unliked all other operations we do not go through the pagecache,
+ * but do all work directly on the inode.
+ */
+struct inode_operations vxfs_immed_symlink_iops = {
+	.readlink =		generic_readlink,
+	.follow_link =		vxfs_immed_follow_link,
+};
+
+/*
+ * Adress space operations for immed files and directories.
+ */
+struct address_space_operations vxfs_immed_aops = {
+	.readpage =		vxfs_immed_readpage,
+};
+
+/**
+ * vxfs_immed_follow_link - follow immed symlink
+ * @dp:		dentry for the link
+ * @np:		pathname lookup data for the current path walk
+ *
+ * Description:
+ *   vxfs_immed_follow_link restarts the pathname lookup with
+ *   the data obtained from @dp.
+ *
+ * Returns:
+ *   Zero on success, else a negative error code.
+ */
+static int
+vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np)
+{
+	struct vxfs_inode_info		*vip = VXFS_INO(dp->d_inode);
+	nd_set_link(np, vip->vii_immed.vi_immed);
+	return 0;
+}
+
+/**
+ * vxfs_immed_readpage - read part of an immed inode into pagecache
+ * @file:	file context (unused)
+ * @page:	page frame to fill in.
+ *
+ * Description:
+ *   vxfs_immed_readpage reads a part of the immed area of the
+ *   file that hosts @pp into the pagecache.
+ *
+ * Returns:
+ *   Zero on success, else a negative error code.
+ *
+ * Locking status:
+ *   @page is locked and will be unlocked.
+ */
+static int
+vxfs_immed_readpage(struct file *fp, struct page *pp)
+{
+	struct vxfs_inode_info	*vip = VXFS_INO(pp->mapping->host);
+	u_int64_t		offset = pp->index << PAGE_CACHE_SHIFT;
+	caddr_t			kaddr;
+
+	kaddr = kmap(pp);
+	memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE);
+	kunmap(pp);
+	
+	flush_dcache_page(pp);
+	SetPageUptodate(pp);
+        unlock_page(pp);
+
+	return 0;
+}
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
new file mode 100644
index 0000000..9672d2f
--- /dev/null
+++ b/fs/freevxfs/vxfs_inode.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - inode routines.
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/pagemap.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vxfs.h"
+#include "vxfs_inode.h"
+#include "vxfs_extern.h"
+
+
+extern struct address_space_operations vxfs_aops;
+extern struct address_space_operations vxfs_immed_aops;
+
+extern struct inode_operations vxfs_immed_symlink_iops;
+
+static struct file_operations vxfs_file_operations = {
+	.open =			generic_file_open,
+	.llseek =		generic_file_llseek,
+	.read =			generic_file_read,
+	.mmap =			generic_file_mmap,
+	.sendfile =		generic_file_sendfile,
+};
+
+
+kmem_cache_t		*vxfs_inode_cachep;
+
+
+#ifdef DIAGNOSTIC
+/*
+ * Dump inode contents (partially).
+ */
+void
+vxfs_dumpi(struct vxfs_inode_info *vip, ino_t ino)
+{
+	printk(KERN_DEBUG "\n\n");
+	if (ino)
+		printk(KERN_DEBUG "dumping vxfs inode %ld\n", ino);
+	else
+		printk(KERN_DEBUG "dumping unknown vxfs inode\n");
+
+	printk(KERN_DEBUG "---------------------------\n");
+	printk(KERN_DEBUG "mode is %x\n", vip->vii_mode);
+	printk(KERN_DEBUG "nlink:%u, uid:%u, gid:%u\n",
+			vip->vii_nlink, vip->vii_uid, vip->vii_gid);
+	printk(KERN_DEBUG "size:%Lx, blocks:%u\n",
+			vip->vii_size, vip->vii_blocks);
+	printk(KERN_DEBUG "orgtype:%u\n", vip->vii_orgtype);
+}
+#endif
+
+
+/**
+ * vxfs_blkiget - find inode based on extent #
+ * @sbp:	superblock of the filesystem we search in
+ * @extent:	number of the extent to search
+ * @ino:	inode number to search
+ *
+ * Description:
+ *  vxfs_blkiget searches inode @ino in the filesystem described by
+ *  @sbp in the extent @extent.
+ *  Returns the matching VxFS inode on success, else a NULL pointer.
+ *
+ * NOTE:
+ *  While __vxfs_iget uses the pagecache vxfs_blkiget uses the
+ *  buffercache.  This function should not be used outside the
+ *  read_super() method, otherwise the data may be incoherent.
+ */
+struct vxfs_inode_info *
+vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
+{
+	struct buffer_head		*bp;
+	u_long				block, offset;
+
+	block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
+	offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE);
+	bp = sb_bread(sbp, block);
+
+	if (buffer_mapped(bp)) {
+		struct vxfs_inode_info	*vip;
+		struct vxfs_dinode	*dip;
+
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+			goto fail;
+		dip = (struct vxfs_dinode *)(bp->b_data + offset);
+		memcpy(vip, dip, sizeof(*vip));
+#ifdef DIAGNOSTIC
+		vxfs_dumpi(vip, ino);
+#endif
+		brelse(bp);
+		return (vip);
+	}
+
+fail:
+	printk(KERN_WARNING "vxfs: unable to read block %ld\n", block);
+	brelse(bp);
+	return NULL;
+}
+
+/**
+ * __vxfs_iget - generic find inode facility
+ * @sbp:		VFS superblock
+ * @ino:		inode number
+ * @ilistp:		inode list
+ *
+ * Description:
+ *  Search the for inode number @ino in the filesystem
+ *  described by @sbp.  Use the specified inode table (@ilistp).
+ *  Returns the matching VxFS inode on success, else a NULL pointer.
+ */
+static struct vxfs_inode_info *
+__vxfs_iget(ino_t ino, struct inode *ilistp)
+{
+	struct page			*pp;
+	u_long				offset;
+
+	offset = (ino % (PAGE_SIZE / VXFS_ISIZE)) * VXFS_ISIZE;
+	pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE);
+
+	if (!IS_ERR(pp)) {
+		struct vxfs_inode_info	*vip;
+		struct vxfs_dinode	*dip;
+		caddr_t			kaddr = (char *)page_address(pp);
+
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+			goto fail;
+		dip = (struct vxfs_dinode *)(kaddr + offset);
+		memcpy(vip, dip, sizeof(*vip));
+#ifdef DIAGNOSTIC
+		vxfs_dumpi(vip, ino);
+#endif
+		vxfs_put_page(pp);
+		return (vip);
+	}
+
+	printk(KERN_WARNING "vxfs: error on page %p\n", pp);
+	return NULL;
+
+fail:
+	printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino);
+	vxfs_put_page(pp);
+	return NULL;
+}
+
+/**
+ * vxfs_stiget - find inode using the structural inode list
+ * @sbp:	VFS superblock
+ * @ino:	inode #
+ *
+ * Description:
+ *  Find inode @ino in the filesystem described by @sbp using
+ *  the structural inode list.
+ *  Returns the matching VxFS inode on success, else a NULL pointer.
+ */
+struct vxfs_inode_info *
+vxfs_stiget(struct super_block *sbp, ino_t ino)
+{
+        return __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist);
+}
+
+/**
+ * vxfs_transmod - mode for a VxFS inode
+ * @vip:	VxFS inode
+ *
+ * Description:
+ *  vxfs_transmod returns a Linux mode_t for a given
+ *  VxFS inode structure.
+ */
+static __inline__ mode_t
+vxfs_transmod(struct vxfs_inode_info *vip)
+{
+	mode_t			ret = vip->vii_mode & ~VXFS_TYPE_MASK;
+
+	if (VXFS_ISFIFO(vip))
+		ret |= S_IFIFO;
+	if (VXFS_ISCHR(vip))
+		ret |= S_IFCHR;
+	if (VXFS_ISDIR(vip))
+		ret |= S_IFDIR;
+	if (VXFS_ISBLK(vip))
+		ret |= S_IFBLK;
+	if (VXFS_ISLNK(vip))
+		ret |= S_IFLNK;
+	if (VXFS_ISREG(vip))
+		ret |= S_IFREG;
+	if (VXFS_ISSOC(vip))
+		ret |= S_IFSOCK;
+
+	return (ret);
+}
+
+/**
+ * vxfs_iinit- helper to fill inode fields
+ * @ip:		VFS inode
+ * @vip:	VxFS inode
+ *
+ * Description:
+ *  vxfs_instino is a helper function to fill in all relevant
+ *  fields in @ip from @vip.
+ */
+static void
+vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip)
+{
+
+	ip->i_mode = vxfs_transmod(vip);
+	ip->i_uid = (uid_t)vip->vii_uid;
+	ip->i_gid = (gid_t)vip->vii_gid;
+
+	ip->i_nlink = vip->vii_nlink;
+	ip->i_size = vip->vii_size;
+
+	ip->i_atime.tv_sec = vip->vii_atime;
+	ip->i_ctime.tv_sec = vip->vii_ctime;
+	ip->i_mtime.tv_sec = vip->vii_mtime;
+	ip->i_atime.tv_nsec = 0;
+	ip->i_ctime.tv_nsec = 0;
+	ip->i_mtime.tv_nsec = 0;
+
+	ip->i_blksize = PAGE_SIZE;
+	ip->i_blocks = vip->vii_blocks;
+	ip->i_generation = vip->vii_gen;
+
+	ip->u.generic_ip = (void *)vip;
+	
+}
+
+/**
+ * vxfs_get_fake_inode - get fake inode structure
+ * @sbp:		filesystem superblock
+ * @vip:		fspriv inode
+ *
+ * Description:
+ *  vxfs_fake_inode gets a fake inode (not in the inode hash) for a
+ *  superblock, vxfs_inode pair.
+ *  Returns the filled VFS inode.
+ */
+struct inode *
+vxfs_get_fake_inode(struct super_block *sbp, struct vxfs_inode_info *vip)
+{
+	struct inode			*ip = NULL;
+
+	if ((ip = new_inode(sbp))) {
+		vxfs_iinit(ip, vip);
+		ip->i_mapping->a_ops = &vxfs_aops;
+	}
+	return (ip);
+}
+
+/**
+ * vxfs_put_fake_inode - free faked inode
+ * *ip:			VFS inode
+ *
+ * Description:
+ *  vxfs_put_fake_inode frees all data asssociated with @ip.
+ */
+void
+vxfs_put_fake_inode(struct inode *ip)
+{
+	iput(ip);
+}
+
+/**
+ * vxfs_read_inode - fill in inode information
+ * @ip:		inode pointer to fill
+ *
+ * Description:
+ *  vxfs_read_inode reads the disk inode for @ip and fills
+ *  in all relevant fields in @ip.
+ */
+void
+vxfs_read_inode(struct inode *ip)
+{
+	struct super_block		*sbp = ip->i_sb;
+	struct vxfs_inode_info		*vip;
+	struct address_space_operations	*aops;
+	ino_t				ino = ip->i_ino;
+
+	if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist)))
+		return;
+
+	vxfs_iinit(ip, vip);
+
+	if (VXFS_ISIMMED(vip))
+		aops = &vxfs_immed_aops;
+	else
+		aops = &vxfs_aops;
+
+	if (S_ISREG(ip->i_mode)) {
+		ip->i_fop = &vxfs_file_operations;
+		ip->i_mapping->a_ops = aops;
+	} else if (S_ISDIR(ip->i_mode)) {
+		ip->i_op = &vxfs_dir_inode_ops;
+		ip->i_fop = &vxfs_dir_operations;
+		ip->i_mapping->a_ops = aops;
+	} else if (S_ISLNK(ip->i_mode)) {
+		if (!VXFS_ISIMMED(vip)) {
+			ip->i_op = &page_symlink_inode_operations;
+			ip->i_mapping->a_ops = &vxfs_aops;
+		} else
+			ip->i_op = &vxfs_immed_symlink_iops;
+	} else
+		init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev));
+
+	return;
+}
+
+/**
+ * vxfs_clear_inode - remove inode from main memory
+ * @ip:		inode to discard.
+ *
+ * Description:
+ *  vxfs_clear_inode() is called on the final iput and frees the private
+ *  inode area.
+ */
+void
+vxfs_clear_inode(struct inode *ip)
+{
+	kmem_cache_free(vxfs_inode_cachep, ip->u.generic_ip);
+}
diff --git a/fs/freevxfs/vxfs_inode.h b/fs/freevxfs/vxfs_inode.h
new file mode 100644
index 0000000..240aeb1
--- /dev/null
+++ b/fs/freevxfs/vxfs_inode.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_INODE_H_
+#define _VXFS_INODE_H_
+
+/*
+ * Veritas filesystem driver - inode structure.
+ *
+ * This file contains the definition of the disk and core
+ * inodes of the Veritas Filesystem.
+ */
+
+
+#define VXFS_ISIZE		0x100		/* Inode size */
+
+#define VXFS_NDADDR		10		/* Number of direct addrs in inode */
+#define VXFS_NIADDR		2		/* Number of indirect addrs in inode */
+#define VXFS_NIMMED		96		/* Size of immediate data in inode */
+#define VXFS_NTYPED		6		/* Num of typed extents */
+
+#define VXFS_TYPED_OFFSETMASK	(0x00FFFFFFFFFFFFFFULL)
+#define VXFS_TYPED_TYPEMASK	(0xFF00000000000000ULL)
+#define VXFS_TYPED_TYPESHIFT	56
+
+#define VXFS_TYPED_PER_BLOCK(sbp) \
+	((sbp)->s_blocksize / sizeof(struct vxfs_typed))
+
+/*
+ * Possible extent descriptor types for %VXFS_ORG_TYPED extents.
+ */
+enum {
+	VXFS_TYPED_INDIRECT		= 1,
+	VXFS_TYPED_DATA			= 2,
+	VXFS_TYPED_INDIRECT_DEV4	= 3,
+	VXFS_TYPED_DATA_DEV4		= 4,
+};
+
+/*
+ * Data stored immediately in the inode.
+ */
+struct vxfs_immed {
+	u_int8_t	vi_immed[VXFS_NIMMED];
+};
+
+struct vxfs_ext4 {
+	u_int32_t		ve4_spare;		/* ?? */
+	u_int32_t		ve4_indsize;		/* Indirect extent size */
+	vx_daddr_t		ve4_indir[VXFS_NIADDR];	/* Indirect extents */
+	struct direct {					/* Direct extents */
+		vx_daddr_t	extent;			/* Extent number */
+		int32_t		size;			/* Size of extent */
+	} ve4_direct[VXFS_NDADDR];
+};
+
+struct vxfs_typed {
+	u_int64_t	vt_hdr;		/* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
+	vx_daddr_t	vt_block;	/* Extent block */
+	int32_t		vt_size;	/* Size in blocks */
+};
+
+struct vxfs_typed_dev4 {
+	u_int64_t	vd4_hdr;	/* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
+	u_int64_t	vd4_block;	/* Extent block */
+	u_int64_t	vd4_size;	/* Size in blocks */
+	int32_t		vd4_dev;	/* Device ID */
+	u_int32_t	__pad1;
+};
+
+/*
+ * The inode as contained on the physical device.
+ */
+struct vxfs_dinode {
+	int32_t		vdi_mode;
+	u_int32_t	vdi_nlink;	/* Link count */
+	u_int32_t	vdi_uid;	/* UID */
+	u_int32_t	vdi_gid;	/* GID */
+	u_int64_t	vdi_size;	/* Inode size in bytes */
+	u_int32_t	vdi_atime;	/* Last time accessed - sec */
+	u_int32_t	vdi_autime;	/* Last time accessed - usec */
+	u_int32_t	vdi_mtime;	/* Last modify time - sec */
+	u_int32_t	vdi_mutime;	/* Last modify time - usec */
+	u_int32_t	vdi_ctime;	/* Create time - sec */
+	u_int32_t	vdi_cutime;	/* Create time - usec */
+	u_int8_t	vdi_aflags;	/* Allocation flags */
+	u_int8_t	vdi_orgtype;	/* Organisation type */
+	u_int16_t	vdi_eopflags;
+	u_int32_t	vdi_eopdata;
+	union {
+		u_int32_t		rdev;
+		u_int32_t		dotdot;
+		struct {
+			u_int32_t	reserved;
+			u_int32_t	fixextsize;
+		} i_regular;
+		struct {
+			u_int32_t	matchino;
+			u_int32_t	fsetindex;
+		} i_vxspec;
+		u_int64_t		align;
+	} vdi_ftarea;
+	u_int32_t	vdi_blocks;	/* How much blocks does inode occupy */
+	u_int32_t	vdi_gen;	/* Inode generation */
+	u_int64_t	vdi_version;	/* Version */
+	union {
+		struct vxfs_immed	immed;
+		struct vxfs_ext4	ext4;
+		struct vxfs_typed	typed[VXFS_NTYPED];
+	} vdi_org;
+	u_int32_t	vdi_iattrino;
+};
+
+#define vdi_rdev	vdi_ftarea.rdev
+#define vdi_dotdot	vdi_ftarea.dotdot
+#define vdi_fixextsize	vdi_ftarea.regular.fixextsize
+#define vdi_matchino	vdi_ftarea.vxspec.matchino
+#define vdi_fsetindex	vdi_ftarea.vxspec.fsetindex
+
+#define vdi_immed	vdi_org.immed
+#define vdi_ext4	vdi_org.ext4
+#define vdi_typed	vdi_org.typed
+
+
+/*
+ * The inode as represented in the main memory.
+ *
+ * TBD: This should become a separate structure...
+ */
+#define vxfs_inode_info	vxfs_dinode
+
+#define vii_mode	vdi_mode
+#define vii_uid		vdi_uid
+#define vii_gid		vdi_gid
+#define vii_nlink	vdi_nlink
+#define vii_size	vdi_size
+#define vii_atime	vdi_atime
+#define vii_ctime	vdi_ctime
+#define vii_mtime	vdi_mtime
+#define vii_blocks	vdi_blocks
+#define vii_org		vdi_org
+#define vii_orgtype	vdi_orgtype
+#define vii_gen		vdi_gen
+
+#define vii_rdev	vdi_ftarea.rdev
+#define vii_dotdot	vdi_ftarea.dotdot
+#define vii_fixextsize	vdi_ftarea.regular.fixextsize
+#define vii_matchino	vdi_ftarea.vxspec.matchino
+#define vii_fsetindex	vdi_ftarea.vxspec.fsetindex
+
+#define vii_immed	vdi_org.immed
+#define vii_ext4	vdi_org.ext4
+#define vii_typed	vdi_org.typed
+
+#endif /* _VXFS_INODE_H_ */
diff --git a/fs/freevxfs/vxfs_kcompat.h b/fs/freevxfs/vxfs_kcompat.h
new file mode 100644
index 0000000..342a4cc
--- /dev/null
+++ b/fs/freevxfs/vxfs_kcompat.h
@@ -0,0 +1,49 @@
+#ifndef _VXFS_KCOMPAT_H
+#define _VXFS_KCOMPAT_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+
+#include <linux/blkdev.h>
+
+typedef long sector_t;
+
+/* From include/linux/fs.h (Linux 2.5.2-pre3)  */
+static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
+{
+	return bread(sb->s_dev, block, sb->s_blocksize);
+}
+
+/* Dito.  */
+static inline void map_bh(struct buffer_head *bh, struct super_block *sb, int block)
+{
+	bh->b_state |= 1 << BH_Mapped;
+	bh->b_dev = sb->s_dev;
+	bh->b_blocknr = block;
+}
+
+/* From fs/block_dev.c (Linux 2.5.2-pre2)  */
+static inline int sb_set_blocksize(struct super_block *sb, int size)
+{
+	int bits;
+	if (set_blocksize(sb->s_dev, size) < 0)
+		return 0;
+	sb->s_blocksize = size;
+	for (bits = 9, size >>= 9; size >>= 1; bits++)
+		;
+	sb->s_blocksize_bits = bits;
+	return sb->s_blocksize;
+}
+
+/* Dito.  */
+static inline int sb_min_blocksize(struct super_block *sb, int size)
+{
+	int minsize = get_hardsect_size(sb->s_dev);
+	if (size < minsize)
+		size = minsize;
+	return sb_set_blocksize(sb, size);
+}
+
+#endif /* Kernel 2.4 */
+#endif /* _VXFS_KCOMPAT_H */
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
new file mode 100644
index 0000000..506ae25
--- /dev/null
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - lookup and other directory related code.
+ */
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+#include "vxfs.h"
+#include "vxfs_dir.h"
+#include "vxfs_inode.h"
+#include "vxfs_extern.h"
+
+/*
+ * Number of VxFS blocks per page.
+ */
+#define VXFS_BLOCK_PER_PAGE(sbp)  ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
+
+
+static struct dentry *	vxfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int		vxfs_readdir(struct file *, void *, filldir_t);
+
+struct inode_operations vxfs_dir_inode_ops = {
+	.lookup =		vxfs_lookup,
+};
+
+struct file_operations vxfs_dir_operations = {
+	.readdir =		vxfs_readdir,
+};
+
+ 
+static __inline__ u_long
+dir_pages(struct inode *inode)
+{
+	return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+}
+ 
+static __inline__ u_long
+dir_blocks(struct inode *ip)
+{
+	u_long			bsize = ip->i_sb->s_blocksize;
+	return (ip->i_size + bsize - 1) & ~(bsize - 1);
+}
+
+/*
+ * NOTE! unlike strncmp, vxfs_match returns 1 for success, 0 for failure.
+ *
+ * len <= VXFS_NAMELEN and de != NULL are guaranteed by caller.
+ */
+static __inline__ int
+vxfs_match(int len, const char * const name, struct vxfs_direct *de)
+{
+	if (len != de->d_namelen)
+		return 0;
+	if (!de->d_ino)
+		return 0;
+	return !memcmp(name, de->d_name, len);
+}
+
+static __inline__ struct vxfs_direct *
+vxfs_next_entry(struct vxfs_direct *de)
+{
+	return ((struct vxfs_direct *)((char*)de + de->d_reclen));
+}
+
+/**
+ * vxfs_find_entry - find a mathing directory entry for a dentry
+ * @ip:		directory inode
+ * @dp:		dentry for which we want to find a direct
+ * @ppp:	gets filled with the page the return value sits in
+ *
+ * Description:
+ *   vxfs_find_entry finds a &struct vxfs_direct for the VFS directory
+ *   cache entry @dp.  @ppp will be filled with the page the return
+ *   value resides in.
+ *
+ * Returns:
+ *   The wanted direct on success, else a NULL pointer.
+ */
+static struct vxfs_direct *
+vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
+{
+	u_long				npages, page, nblocks, pblocks, block;
+	u_long				bsize = ip->i_sb->s_blocksize;
+	const char			*name = dp->d_name.name;
+	int				namelen = dp->d_name.len;
+
+	npages = dir_pages(ip);
+	nblocks = dir_blocks(ip);
+	pblocks = VXFS_BLOCK_PER_PAGE(ip->i_sb);
+	
+	for (page = 0; page < npages; page++) {
+		caddr_t			kaddr;
+		struct page		*pp;
+
+		pp = vxfs_get_page(ip->i_mapping, page);
+		if (IS_ERR(pp))
+			continue;
+		kaddr = (caddr_t)page_address(pp);
+
+		for (block = 0; block <= nblocks && block <= pblocks; block++) {
+			caddr_t			baddr, limit;
+			struct vxfs_dirblk	*dbp;
+			struct vxfs_direct	*de;
+
+			baddr = kaddr + (block * bsize);
+			limit = baddr + bsize - VXFS_DIRLEN(1);
+			
+			dbp = (struct vxfs_dirblk *)baddr;
+			de = (struct vxfs_direct *)(baddr + VXFS_DIRBLKOV(dbp));
+
+			for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
+				if (!de->d_reclen)
+					break;
+				if (!de->d_ino)
+					continue;
+				if (vxfs_match(namelen, name, de)) {
+					*ppp = pp;
+					return (de);
+				}
+			}
+		}
+		vxfs_put_page(pp);
+	}
+
+	return NULL;
+}
+
+/**
+ * vxfs_inode_by_name - find inode number for dentry
+ * @dip:	directory to search in
+ * @dp:		dentry we seach for
+ *
+ * Description:
+ *   vxfs_inode_by_name finds out the inode number of
+ *   the path component described by @dp in @dip.
+ *
+ * Returns:
+ *   The wanted inode number on success, else Zero.
+ */
+static ino_t
+vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
+{
+	struct vxfs_direct		*de;
+	struct page			*pp;
+	ino_t				ino = 0;
+
+	de = vxfs_find_entry(dip, dp, &pp);
+	if (de) {
+		ino = de->d_ino;
+		kunmap(pp);
+		page_cache_release(pp);
+	}
+	
+	return (ino);
+}
+
+/**
+ * vxfs_lookup - lookup pathname component
+ * @dip:	dir in which we lookup
+ * @dp:		dentry we lookup
+ * @nd:		lookup nameidata
+ *
+ * Description:
+ *   vxfs_lookup tries to lookup the pathname component described
+ *   by @dp in @dip.
+ *
+ * Returns:
+ *   A NULL-pointer on success, else an negative error code encoded
+ *   in the return pointer.
+ */
+static struct dentry *
+vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd)
+{
+	struct inode		*ip = NULL;
+	ino_t			ino;
+			 
+	if (dp->d_name.len > VXFS_NAMELEN)
+		return ERR_PTR(-ENAMETOOLONG);
+				 
+	lock_kernel();
+	ino = vxfs_inode_by_name(dip, dp);
+	if (ino) {
+		ip = iget(dip->i_sb, ino);
+		if (!ip) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	d_add(dp, ip);
+	return NULL;
+}
+
+/**
+ * vxfs_readdir - read a directory
+ * @fp:		the directory to read
+ * @retp:	return buffer
+ * @filler:	filldir callback
+ *
+ * Description:
+ *   vxfs_readdir fills @retp with directory entries from @fp
+ *   using the VFS supplied callback @filler.
+ *
+ * Returns:
+ *   Zero.
+ */
+static int
+vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
+{
+	struct inode		*ip = fp->f_dentry->d_inode;
+	struct super_block	*sbp = ip->i_sb;
+	u_long			bsize = sbp->s_blocksize;
+	u_long			page, npages, block, pblocks, nblocks, offset;
+	loff_t			pos;
+
+	switch ((long)fp->f_pos) {
+	case 0:
+		if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
+			goto out;
+		fp->f_pos++;
+		/* fallthrough */
+	case 1:
+		if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0)
+			goto out;
+		fp->f_pos++;
+		/* fallthrough */
+	}
+
+	pos = fp->f_pos - 2;
+	
+	if (pos > VXFS_DIRROUND(ip->i_size)) {
+		unlock_kernel();
+		return 0;
+	}
+
+	npages = dir_pages(ip);
+	nblocks = dir_blocks(ip);
+	pblocks = VXFS_BLOCK_PER_PAGE(sbp);
+
+	page = pos >> PAGE_CACHE_SHIFT;
+	offset = pos & ~PAGE_CACHE_MASK;
+	block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
+
+	for (; page < npages; page++, block = 0) {
+		caddr_t			kaddr;
+		struct page		*pp;
+
+		pp = vxfs_get_page(ip->i_mapping, page);
+		if (IS_ERR(pp))
+			continue;
+		kaddr = (caddr_t)page_address(pp);
+
+		for (; block <= nblocks && block <= pblocks; block++) {
+			caddr_t			baddr, limit;
+			struct vxfs_dirblk	*dbp;
+			struct vxfs_direct	*de;
+
+			baddr = kaddr + (block * bsize);
+			limit = baddr + bsize - VXFS_DIRLEN(1);
+	
+			dbp = (struct vxfs_dirblk *)baddr;
+			de = (struct vxfs_direct *)
+				(offset ?
+				 (kaddr + offset) :
+				 (baddr + VXFS_DIRBLKOV(dbp)));
+
+			for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
+				int	over;
+
+				if (!de->d_reclen)
+					break;
+				if (!de->d_ino)
+					continue;
+
+				offset = (caddr_t)de - kaddr;
+				over = filler(retp, de->d_name, de->d_namelen,
+					((page << PAGE_CACHE_SHIFT) | offset) + 2,
+					de->d_ino, DT_UNKNOWN);
+				if (over) {
+					vxfs_put_page(pp);
+					goto done;
+				}
+			}
+			offset = 0;
+		}
+		vxfs_put_page(pp);
+		offset = 0;
+	}
+
+done:
+	fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+out:
+	unlock_kernel();
+	return 0;
+}
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
new file mode 100644
index 0000000..7a204e3
--- /dev/null
+++ b/fs/freevxfs/vxfs_olt.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* 
+ * Veritas filesystem driver - object location table support.
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+
+#include "vxfs.h"
+#include "vxfs_olt.h"
+
+
+static __inline__ void
+vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
+{
+	if (infp->vsi_fshino)
+		BUG();
+	infp->vsi_fshino = fshp->olt_fsino[0];
+}
+
+static __inline__ void
+vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
+{
+	if (infp->vsi_iext)
+		BUG();
+	infp->vsi_iext = ilistp->olt_iext[0]; 
+}
+
+static __inline__ u_long
+vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
+{
+	if (sbp->s_blocksize % bsize)
+		BUG();
+	return (block * (sbp->s_blocksize / bsize));
+}
+
+
+/**
+ * vxfs_read_olt - read olt
+ * @sbp:	superblock of the filesystem
+ * @bsize:	blocksize of the filesystem
+ *
+ * Description:
+ *   vxfs_read_olt reads the olt of the filesystem described by @sbp
+ *   into main memory and does some basic setup.
+ *
+ * Returns:
+ *   Zero on success, else a negative error code.
+ */
+int
+vxfs_read_olt(struct super_block *sbp, u_long bsize)
+{
+	struct vxfs_sb_info	*infp = VXFS_SBI(sbp);
+	struct buffer_head	*bp;
+	struct vxfs_olt		*op;
+	char			*oaddr, *eaddr;
+
+
+	bp = sb_bread(sbp, vxfs_oblock(sbp, infp->vsi_oltext, bsize));
+	if (!bp || !bp->b_data)
+		goto fail;
+
+	op = (struct vxfs_olt *)bp->b_data;
+	if (op->olt_magic != VXFS_OLT_MAGIC) {
+		printk(KERN_NOTICE "vxfs: ivalid olt magic number\n");
+		goto fail;
+	}
+
+	/*
+	 * It is in theory possible that vsi_oltsize is > 1.
+	 * I've not seen any such filesystem yet and I'm lazy..  --hch
+	 */
+	if (infp->vsi_oltsize > 1) {
+		printk(KERN_NOTICE "vxfs: oltsize > 1 detected.\n");
+		printk(KERN_NOTICE "vxfs: please notify hch@infradead.org\n");
+		goto fail;
+	}
+
+	oaddr = (char *)bp->b_data + op->olt_size;
+	eaddr = (char *)bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize);
+
+	while (oaddr < eaddr) {
+		struct vxfs_oltcommon	*ocp =
+			(struct vxfs_oltcommon *)oaddr;
+		
+		switch (ocp->olt_type) {
+		case VXFS_OLT_FSHEAD:
+			vxfs_get_fshead((struct vxfs_oltfshead *)oaddr, infp);
+			break;
+		case VXFS_OLT_ILIST:
+			vxfs_get_ilist((struct vxfs_oltilist *)oaddr, infp);
+			break;
+		}
+
+		oaddr += ocp->olt_size;
+	}
+
+	brelse(bp);
+	return 0;
+
+fail:
+	brelse(bp);
+	return -EINVAL;
+}
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
new file mode 100644
index 0000000..d832429
--- /dev/null
+++ b/fs/freevxfs/vxfs_olt.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _VXFS_OLT_H_
+#define _VXFS_OLT_H_
+
+/*
+ * Veritas filesystem driver - Object Location Table data structures.
+ *
+ * This file contains definitions for the Object Location Table used
+ * by the Veritas Filesystem version 2 and newer.
+ */
+
+
+/*
+ * OLT magic number (vxfs_olt->olt_magic).
+ */
+#define VXFS_OLT_MAGIC		0xa504FCF5
+
+/*
+ * VxFS OLT entry types.
+ */
+enum {
+	VXFS_OLT_FREE	= 1,
+	VXFS_OLT_FSHEAD	= 2,
+	VXFS_OLT_CUT	= 3,
+	VXFS_OLT_ILIST	= 4,
+	VXFS_OLT_DEV	= 5,
+	VXFS_OLT_SB	= 6
+};
+
+/*
+ * VxFS OLT header.
+ *
+ * The Object Location Table header is placed at the beginning of each
+ * OLT extent.  It is used to fing certain filesystem-wide metadata, e.g.
+ * the inital inode list, the fileset header or the device configuration.
+ */
+struct vxfs_olt {
+	u_int32_t	olt_magic;	/* magic number			*/
+	u_int32_t	olt_size;	/* size of this entry		*/
+	u_int32_t	olt_checksum;	/* checksum of extent		*/
+	u_int32_t	__unused1;	/* ???				*/
+	u_int32_t	olt_mtime;	/* time of last mod. (sec)	*/
+	u_int32_t	olt_mutime;	/* time of last mod. (usec)	*/
+	u_int32_t	olt_totfree;	/* free space in OLT extent	*/
+	vx_daddr_t	olt_extents[2];	/* addr of this extent, replica	*/
+	u_int32_t	olt_esize;	/* size of this extent		*/
+	vx_daddr_t	olt_next[2];    /* addr of next extent, replica	*/
+	u_int32_t	olt_nsize;	/* size of next extent		*/
+	u_int32_t	__unused2;	/* align to 8 byte boundary	*/
+};
+
+/*
+ * VxFS common OLT entry (on disk).
+ */
+struct vxfs_oltcommon {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_size;	/* size of this record		*/
+};
+
+/*
+ * VxFS free OLT entry (on disk).
+ */
+struct vxfs_oltfree {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_fsize;	/* size of this free record	*/
+};
+
+/*
+ * VxFS initial-inode list (on disk).
+ */
+struct vxfs_oltilist {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_size;	/* size of this record		*/
+	vx_ino_t	olt_iext[2];	/* initial inode list, replica	*/
+};
+
+/*
+ * Current Usage Table 
+ */
+struct vxfs_oltcut {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_size;	/* size of this record		*/
+	vx_ino_t	olt_cutino;	/* inode of current usage table	*/
+	u_int32_t	__pad;		/* unused, 8 byte align		*/
+};
+
+/*
+ * Inodes containing Superblock, Intent log and OLTs 
+ */
+struct vxfs_oltsb {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_size;	/* size of this record		*/
+	vx_ino_t	olt_sbino;	/* inode of superblock file	*/
+	u_int32_t	__unused1;	/* ???				*/
+	vx_ino_t	olt_logino[2];	/* inode of log file,replica	*/
+	vx_ino_t	olt_oltino[2];	/* inode of OLT, replica	*/
+};
+
+/*
+ * Inode containing device configuration + it's replica 
+ */
+struct vxfs_oltdev {
+	u_int32_t	olt_type;	/* type of this record		*/
+	u_int32_t	olt_size;	/* size of this record		*/
+	vx_ino_t	olt_devino[2];	/* inode of device config files	*/
+};
+
+/*
+ * Fileset header 
+ */
+struct vxfs_oltfshead {
+	u_int32_t	olt_type;	/* type number			*/
+	u_int32_t	olt_size;	/* size of this record		*/
+	vx_ino_t	olt_fsino[2];   /* inodes of fileset header	*/
+};
+
+#endif /* _VXFS_OLT_H_ */
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
new file mode 100644
index 0000000..5e30561
--- /dev/null
+++ b/fs/freevxfs/vxfs_subr.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - shared subroutines.
+ */
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+
+#include "vxfs_kcompat.h"
+#include "vxfs_extern.h"
+
+
+static int		vxfs_readpage(struct file *, struct page *);
+static sector_t		vxfs_bmap(struct address_space *, sector_t);
+
+struct address_space_operations vxfs_aops = {
+	.readpage =		vxfs_readpage,
+	.bmap =			vxfs_bmap,
+	.sync_page =		block_sync_page,
+};
+
+inline void
+vxfs_put_page(struct page *pp)
+{
+	kunmap(pp);
+	page_cache_release(pp);
+}
+
+/**
+ * vxfs_get_page - read a page into memory.
+ * @ip:		inode to read from
+ * @n:		page number
+ *
+ * Description:
+ *   vxfs_get_page reads the @n th page of @ip into the pagecache.
+ *
+ * Returns:
+ *   The wanted page on success, else a NULL pointer.
+ */
+struct page *
+vxfs_get_page(struct address_space *mapping, u_long n)
+{
+	struct page *			pp;
+
+	pp = read_cache_page(mapping, n,
+			(filler_t*)mapping->a_ops->readpage, NULL);
+
+	if (!IS_ERR(pp)) {
+		wait_on_page_locked(pp);
+		kmap(pp);
+		if (!PageUptodate(pp))
+			goto fail;
+		/** if (!PageChecked(pp)) **/
+			/** vxfs_check_page(pp); **/
+		if (PageError(pp))
+			goto fail;
+	}
+	
+	return (pp);
+		 
+fail:
+	vxfs_put_page(pp);
+	return ERR_PTR(-EIO);
+}
+
+/**
+ * vxfs_bread - read buffer for a give inode,block tuple
+ * @ip:		inode
+ * @block:	logical block
+ *
+ * Description:
+ *   The vxfs_bread function reads block no @block  of
+ *   @ip into the buffercache.
+ *
+ * Returns:
+ *   The resulting &struct buffer_head.
+ */
+struct buffer_head *
+vxfs_bread(struct inode *ip, int block)
+{
+	struct buffer_head	*bp;
+	daddr_t			pblock;
+
+	pblock = vxfs_bmap1(ip, block);
+	bp = sb_bread(ip->i_sb, pblock);
+
+	return (bp);
+}
+
+/**
+ * vxfs_get_block - locate buffer for given inode,block tuple 
+ * @ip:		inode
+ * @iblock:	logical block
+ * @bp:		buffer skeleton
+ * @create:	%TRUE if blocks may be newly allocated.
+ *
+ * Description:
+ *   The vxfs_get_block function fills @bp with the right physical
+ *   block and device number to perform a lowlevel read/write on
+ *   it.
+ *
+ * Returns:
+ *   Zero on success, else a negativ error code (-EIO).
+ */
+static int
+vxfs_getblk(struct inode *ip, sector_t iblock,
+	    struct buffer_head *bp, int create)
+{
+	daddr_t			pblock;
+
+	pblock = vxfs_bmap1(ip, iblock);
+	if (pblock != 0) {
+		map_bh(bp, ip->i_sb, pblock);
+		return 0;
+	}
+
+	return -EIO;
+}
+
+/**
+ * vxfs_readpage - read one page synchronously into the pagecache
+ * @file:	file context (unused)
+ * @page:	page frame to fill in.
+ *
+ * Description:
+ *   The vxfs_readpage routine reads @page synchronously into the
+ *   pagecache.
+ *
+ * Returns:
+ *   Zero on success, else a negative error code.
+ *
+ * Locking status:
+ *   @page is locked and will be unlocked.
+ */
+static int
+vxfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, vxfs_getblk);
+}
+ 
+/**
+ * vxfs_bmap - perform logical to physical block mapping
+ * @mapping:	logical to physical mapping to use
+ * @block:	logical block (relative to @mapping).
+ *
+ * Description:
+ *   Vxfs_bmap find out the corresponding phsical block to the
+ *   @mapping, @block pair.
+ *
+ * Returns:
+ *   Physical block number on success, else Zero.
+ *
+ * Locking status:
+ *   We are under the bkl.
+ */
+static sector_t
+vxfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, vxfs_getblk);
+}
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
new file mode 100644
index 0000000..0ae2c7b
--- /dev/null
+++ b/fs/freevxfs/vxfs_super.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2000-2001 Christoph Hellwig.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Veritas filesystem driver - superblock related routines.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/vfs.h>
+
+#include "vxfs.h"
+#include "vxfs_extern.h"
+#include "vxfs_dir.h"
+#include "vxfs_inode.h"
+
+
+MODULE_AUTHOR("Christoph Hellwig");
+MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_ALIAS("vxfs"); /* makes mount -t vxfs autoload the module */
+
+
+static void		vxfs_put_super(struct super_block *);
+static int		vxfs_statfs(struct super_block *, struct kstatfs *);
+static int		vxfs_remount(struct super_block *, int *, char *);
+
+static struct super_operations vxfs_super_ops = {
+	.read_inode =		vxfs_read_inode,
+	.clear_inode =		vxfs_clear_inode,
+	.put_super =		vxfs_put_super,
+	.statfs =		vxfs_statfs,
+	.remount_fs =		vxfs_remount,
+};
+
+/**
+ * vxfs_put_super - free superblock resources
+ * @sbp:	VFS superblock.
+ *
+ * Description:
+ *   vxfs_put_super frees all resources allocated for @sbp
+ *   after the last instance of the filesystem is unmounted.
+ */
+
+static void
+vxfs_put_super(struct super_block *sbp)
+{
+	struct vxfs_sb_info	*infp = VXFS_SBI(sbp);
+
+	vxfs_put_fake_inode(infp->vsi_fship);
+	vxfs_put_fake_inode(infp->vsi_ilist);
+	vxfs_put_fake_inode(infp->vsi_stilist);
+
+	brelse(infp->vsi_bp);
+	kfree(infp);
+}
+
+/**
+ * vxfs_statfs - get filesystem information
+ * @sbp:	VFS superblock
+ * @bufp:	output buffer
+ *
+ * Description:
+ *   vxfs_statfs fills the statfs buffer @bufp with information
+ *   about the filesystem described by @sbp.
+ *
+ * Returns:
+ *   Zero.
+ *
+ * Locking:
+ *   No locks held.
+ *
+ * Notes:
+ *   This is everything but complete...
+ */
+static int
+vxfs_statfs(struct super_block *sbp, struct kstatfs *bufp)
+{
+	struct vxfs_sb_info		*infp = VXFS_SBI(sbp);
+
+	bufp->f_type = VXFS_SUPER_MAGIC;
+	bufp->f_bsize = sbp->s_blocksize;
+	bufp->f_blocks = infp->vsi_raw->vs_dsize;
+	bufp->f_bfree = infp->vsi_raw->vs_free;
+	bufp->f_bavail = 0;
+	bufp->f_files = 0;
+	bufp->f_ffree = infp->vsi_raw->vs_ifree;
+	bufp->f_namelen = VXFS_NAMELEN;
+
+	return 0;
+}
+
+static int vxfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+/**
+ * vxfs_read_super - read superblock into memory and initalize filesystem
+ * @sbp:		VFS superblock (to fill)
+ * @dp:			fs private mount data
+ * @silent:		do not complain loudly when sth is wrong
+ *
+ * Description:
+ *   We are called on the first mount of a filesystem to read the
+ *   superblock into memory and do some basic setup.
+ *
+ * Returns:
+ *   The superblock on success, else %NULL.
+ *
+ * Locking:
+ *   We are under the bkl and @sbp->s_lock.
+ */
+static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
+{
+	struct vxfs_sb_info	*infp;
+	struct vxfs_sb		*rsbp;
+	struct buffer_head	*bp = NULL;
+	u_long			bsize;
+	struct inode *root;
+
+	sbp->s_flags |= MS_RDONLY;
+
+	infp = kmalloc(sizeof(*infp), GFP_KERNEL);
+	if (!infp) {
+		printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n");
+		return -ENOMEM;
+	}
+	memset(infp, 0, sizeof(*infp));
+
+	bsize = sb_min_blocksize(sbp, BLOCK_SIZE);
+	if (!bsize) {
+		printk(KERN_WARNING "vxfs: unable to set blocksize\n");
+		goto out;
+	}
+
+	bp = sb_bread(sbp, 1);
+	if (!bp || !buffer_mapped(bp)) {
+		if (!silent) {
+			printk(KERN_WARNING
+				"vxfs: unable to read disk superblock\n");
+		}
+		goto out;
+	}
+
+	rsbp = (struct vxfs_sb *)bp->b_data;
+	if (rsbp->vs_magic != VXFS_SUPER_MAGIC) {
+		if (!silent)
+			printk(KERN_NOTICE "vxfs: WRONG superblock magic\n");
+		goto out;
+	}
+
+	if ((rsbp->vs_version < 2 || rsbp->vs_version > 4) && !silent) {
+		printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n",
+		       rsbp->vs_version);
+		goto out;
+	}
+
+#ifdef DIAGNOSTIC
+	printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", rsbp->vs_version);
+	printk(KERN_DEBUG "vxfs: blocksize: %d\n", rsbp->vs_bsize);
+#endif
+
+	sbp->s_magic = rsbp->vs_magic;
+	sbp->s_fs_info = (void *)infp;
+
+	infp->vsi_raw = rsbp;
+	infp->vsi_bp = bp;
+	infp->vsi_oltext = rsbp->vs_oltext[0];
+	infp->vsi_oltsize = rsbp->vs_oltsize;
+
+	if (!sb_set_blocksize(sbp, rsbp->vs_bsize)) {
+		printk(KERN_WARNING "vxfs: unable to set final block size\n");
+		goto out;
+	}
+
+	if (vxfs_read_olt(sbp, bsize)) {
+		printk(KERN_WARNING "vxfs: unable to read olt\n");
+		goto out;
+	}
+
+	if (vxfs_read_fshead(sbp)) {
+		printk(KERN_WARNING "vxfs: unable to read fshead\n");
+		goto out;
+	}
+
+	sbp->s_op = &vxfs_super_ops;
+	root = iget(sbp, VXFS_ROOT_INO);
+	sbp->s_root = d_alloc_root(root);
+	if (!sbp->s_root) {
+		iput(root);
+		printk(KERN_WARNING "vxfs: unable to get root dentry.\n");
+		goto out_free_ilist;
+	}
+
+	return 0;
+	
+out_free_ilist:
+	vxfs_put_fake_inode(infp->vsi_fship);
+	vxfs_put_fake_inode(infp->vsi_ilist);
+	vxfs_put_fake_inode(infp->vsi_stilist);
+out:
+	brelse(bp);
+	kfree(infp);
+	return -EINVAL;
+}
+
+/*
+ * The usual module blurb.
+ */
+static struct super_block *vxfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, vxfs_fill_super);
+}
+
+static struct file_system_type vxfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "vxfs",
+	.get_sb		= vxfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init
+vxfs_init(void)
+{
+	vxfs_inode_cachep = kmem_cache_create("vxfs_inode",
+			sizeof(struct vxfs_inode_info), 0, 
+			SLAB_RECLAIM_ACCOUNT, NULL, NULL);
+	if (vxfs_inode_cachep)
+		return (register_filesystem(&vxfs_fs_type));
+	return -ENOMEM;
+}
+
+static void __exit
+vxfs_cleanup(void)
+{
+	unregister_filesystem(&vxfs_fs_type);
+	kmem_cache_destroy(vxfs_inode_cachep);
+}
+
+module_init(vxfs_init);
+module_exit(vxfs_cleanup);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
new file mode 100644
index 0000000..d6efb36
--- /dev/null
+++ b/fs/fs-writeback.c
@@ -0,0 +1,695 @@
+/*
+ * fs/fs-writeback.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ *
+ * Contains all the functions related to writing back and waiting
+ * upon dirty inodes against superblocks, and writing back dirty
+ * pages against inodes.  ie: data writeback.  Writeout of the
+ * inode itself is not handled here.
+ *
+ * 10Apr2002	akpm@zip.com.au
+ *		Split out of fs/inode.c
+ *		Additions for address_space-based writeback
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+
+extern struct super_block *blockdev_superblock;
+
+/**
+ *	__mark_inode_dirty -	internal function
+ *	@inode: inode to mark
+ *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
+ *	Mark an inode as dirty. Callers should use mark_inode_dirty or
+ *  	mark_inode_dirty_sync.
+ *
+ * Put the inode on the super block's dirty list.
+ *
+ * CAREFUL! We mark it dirty unconditionally, but move it onto the
+ * dirty list only if it is hashed or if it refers to a blockdev.
+ * If it was not hashed, it will never be added to the dirty list
+ * even if it is later hashed, as it will have been marked dirty already.
+ *
+ * In short, make sure you hash any inodes _before_ you start marking
+ * them dirty.
+ *
+ * This function *must* be atomic for the I_DIRTY_PAGES case -
+ * set_page_dirty() is called under spinlock in several places.
+ *
+ * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
+ * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
+ * the kernel-internal blockdev inode represents the dirtying time of the
+ * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
+ * page->mapping->host, so the page-dirtying time is recorded in the internal
+ * blockdev inode.
+ */
+void __mark_inode_dirty(struct inode *inode, int flags)
+{
+	struct super_block *sb = inode->i_sb;
+
+	/*
+	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
+	 * dirty the inode itself
+	 */
+	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+		if (sb->s_op->dirty_inode)
+			sb->s_op->dirty_inode(inode);
+	}
+
+	/*
+	 * make sure that changes are seen by all cpus before we test i_state
+	 * -- mikulas
+	 */
+	smp_mb();
+
+	/* avoid the locking if we can */
+	if ((inode->i_state & flags) == flags)
+		return;
+
+	if (unlikely(block_dump)) {
+		struct dentry *dentry = NULL;
+		const char *name = "?";
+
+		if (!list_empty(&inode->i_dentry)) {
+			dentry = list_entry(inode->i_dentry.next,
+					    struct dentry, d_alias);
+			if (dentry && dentry->d_name.name)
+				name = (const char *) dentry->d_name.name;
+		}
+
+		if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
+			printk(KERN_DEBUG
+			       "%s(%d): dirtied inode %lu (%s) on %s\n",
+			       current->comm, current->pid, inode->i_ino,
+			       name, inode->i_sb->s_id);
+	}
+
+	spin_lock(&inode_lock);
+	if ((inode->i_state & flags) != flags) {
+		const int was_dirty = inode->i_state & I_DIRTY;
+
+		inode->i_state |= flags;
+
+		/*
+		 * If the inode is locked, just update its dirty state. 
+		 * The unlocker will place the inode on the appropriate
+		 * superblock list, based upon its state.
+		 */
+		if (inode->i_state & I_LOCK)
+			goto out;
+
+		/*
+		 * Only add valid (hashed) inodes to the superblock's
+		 * dirty list.  Add blockdev inodes as well.
+		 */
+		if (!S_ISBLK(inode->i_mode)) {
+			if (hlist_unhashed(&inode->i_hash))
+				goto out;
+		}
+		if (inode->i_state & (I_FREEING|I_CLEAR))
+			goto out;
+
+		/*
+		 * If the inode was already on s_dirty or s_io, don't
+		 * reposition it (that would break s_dirty time-ordering).
+		 */
+		if (!was_dirty) {
+			inode->dirtied_when = jiffies;
+			list_move(&inode->i_list, &sb->s_dirty);
+		}
+	}
+out:
+	spin_unlock(&inode_lock);
+}
+
+EXPORT_SYMBOL(__mark_inode_dirty);
+
+static int write_inode(struct inode *inode, int sync)
+{
+	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
+		return inode->i_sb->s_op->write_inode(inode, sync);
+	return 0;
+}
+
+/*
+ * Write a single inode's dirty pages and inode data out to disk.
+ * If `wait' is set, wait on the writeout.
+ *
+ * The whole writeout design is quite complex and fragile.  We want to avoid
+ * starvation of particular inodes when others are being redirtied, prevent
+ * livelocks, etc.
+ *
+ * Called under inode_lock.
+ */
+static int
+__sync_single_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	unsigned dirty;
+	struct address_space *mapping = inode->i_mapping;
+	struct super_block *sb = inode->i_sb;
+	int wait = wbc->sync_mode == WB_SYNC_ALL;
+	int ret;
+
+	BUG_ON(inode->i_state & I_LOCK);
+
+	/* Set I_LOCK, reset I_DIRTY */
+	dirty = inode->i_state & I_DIRTY;
+	inode->i_state |= I_LOCK;
+	inode->i_state &= ~I_DIRTY;
+
+	spin_unlock(&inode_lock);
+
+	ret = do_writepages(mapping, wbc);
+
+	/* Don't write the inode if only I_DIRTY_PAGES was set */
+	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+		int err = write_inode(inode, wait);
+		if (ret == 0)
+			ret = err;
+	}
+
+	if (wait) {
+		int err = filemap_fdatawait(mapping);
+		if (ret == 0)
+			ret = err;
+	}
+
+	spin_lock(&inode_lock);
+	inode->i_state &= ~I_LOCK;
+	if (!(inode->i_state & I_FREEING)) {
+		if (!(inode->i_state & I_DIRTY) &&
+		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+			/*
+			 * We didn't write back all the pages.  nfs_writepages()
+			 * sometimes bales out without doing anything. Redirty
+			 * the inode.  It is still on sb->s_io.
+			 */
+			if (wbc->for_kupdate) {
+				/*
+				 * For the kupdate function we leave the inode
+				 * at the head of sb_dirty so it will get more
+				 * writeout as soon as the queue becomes
+				 * uncongested.
+				 */
+				inode->i_state |= I_DIRTY_PAGES;
+				list_move_tail(&inode->i_list, &sb->s_dirty);
+			} else {
+				/*
+				 * Otherwise fully redirty the inode so that
+				 * other inodes on this superblock will get some
+				 * writeout.  Otherwise heavy writing to one
+				 * file would indefinitely suspend writeout of
+				 * all the other files.
+				 */
+				inode->i_state |= I_DIRTY_PAGES;
+				inode->dirtied_when = jiffies;
+				list_move(&inode->i_list, &sb->s_dirty);
+			}
+		} else if (inode->i_state & I_DIRTY) {
+			/*
+			 * Someone redirtied the inode while were writing back
+			 * the pages.
+			 */
+			list_move(&inode->i_list, &sb->s_dirty);
+		} else if (atomic_read(&inode->i_count)) {
+			/*
+			 * The inode is clean, inuse
+			 */
+			list_move(&inode->i_list, &inode_in_use);
+		} else {
+			/*
+			 * The inode is clean, unused
+			 */
+			list_move(&inode->i_list, &inode_unused);
+			inodes_stat.nr_unused++;
+		}
+	}
+	wake_up_inode(inode);
+	return ret;
+}
+
+/*
+ * Write out an inode's dirty pages.  Called under inode_lock.
+ */
+static int
+__writeback_single_inode(struct inode *inode,
+			struct writeback_control *wbc)
+{
+	wait_queue_head_t *wqh;
+
+	if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
+		list_move(&inode->i_list, &inode->i_sb->s_dirty);
+		return 0;
+	}
+
+	/*
+	 * It's a data-integrity sync.  We must wait.
+	 */
+	if (inode->i_state & I_LOCK) {
+		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK);
+
+		wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
+		do {
+			__iget(inode);
+			spin_unlock(&inode_lock);
+			__wait_on_bit(wqh, &wq, inode_wait,
+							TASK_UNINTERRUPTIBLE);
+			iput(inode);
+			spin_lock(&inode_lock);
+		} while (inode->i_state & I_LOCK);
+	}
+	return __sync_single_inode(inode, wbc);
+}
+
+/*
+ * Write out a superblock's list of dirty inodes.  A wait will be performed
+ * upon no inodes, all inodes or the final one, depending upon sync_mode.
+ *
+ * If older_than_this is non-NULL, then only write out inodes which
+ * had their first dirtying at a time earlier than *older_than_this.
+ *
+ * If we're a pdlfush thread, then implement pdflush collision avoidance
+ * against the entire list.
+ *
+ * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
+ * that it can be located for waiting on in __writeback_single_inode().
+ *
+ * Called under inode_lock.
+ *
+ * If `bdi' is non-zero then we're being asked to writeback a specific queue.
+ * This function assumes that the blockdev superblock's inodes are backed by
+ * a variety of queues, so all inodes are searched.  For other superblocks,
+ * assume that all inodes are backed by the same queue.
+ *
+ * FIXME: this linear search could get expensive with many fileystems.  But
+ * how to fix?  We need to go from an address_space to all inodes which share
+ * a queue with that address_space.  (Easy: have a global "dirty superblocks"
+ * list).
+ *
+ * The inodes to be written are parked on sb->s_io.  They are moved back onto
+ * sb->s_dirty as they are selected for writing.  This way, none can be missed
+ * on the writer throttling path, and we get decent balancing between many
+ * throttled threads: we don't want them all piling up on __wait_on_inode.
+ */
+static void
+sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
+{
+	const unsigned long start = jiffies;	/* livelock avoidance */
+
+	if (!wbc->for_kupdate || list_empty(&sb->s_io))
+		list_splice_init(&sb->s_dirty, &sb->s_io);
+
+	while (!list_empty(&sb->s_io)) {
+		struct inode *inode = list_entry(sb->s_io.prev,
+						struct inode, i_list);
+		struct address_space *mapping = inode->i_mapping;
+		struct backing_dev_info *bdi = mapping->backing_dev_info;
+		long pages_skipped;
+
+		if (!bdi_cap_writeback_dirty(bdi)) {
+			list_move(&inode->i_list, &sb->s_dirty);
+			if (sb == blockdev_superblock) {
+				/*
+				 * Dirty memory-backed blockdev: the ramdisk
+				 * driver does this.  Skip just this inode
+				 */
+				continue;
+			}
+			/*
+			 * Dirty memory-backed inode against a filesystem other
+			 * than the kernel-internal bdev filesystem.  Skip the
+			 * entire superblock.
+			 */
+			break;
+		}
+
+		if (wbc->nonblocking && bdi_write_congested(bdi)) {
+			wbc->encountered_congestion = 1;
+			if (sb != blockdev_superblock)
+				break;		/* Skip a congested fs */
+			list_move(&inode->i_list, &sb->s_dirty);
+			continue;		/* Skip a congested blockdev */
+		}
+
+		if (wbc->bdi && bdi != wbc->bdi) {
+			if (sb != blockdev_superblock)
+				break;		/* fs has the wrong queue */
+			list_move(&inode->i_list, &sb->s_dirty);
+			continue;		/* blockdev has wrong queue */
+		}
+
+		/* Was this inode dirtied after sync_sb_inodes was called? */
+		if (time_after(inode->dirtied_when, start))
+			break;
+
+		/* Was this inode dirtied too recently? */
+		if (wbc->older_than_this && time_after(inode->dirtied_when,
+						*wbc->older_than_this))
+			break;
+
+		/* Is another pdflush already flushing this queue? */
+		if (current_is_pdflush() && !writeback_acquire(bdi))
+			break;
+
+		BUG_ON(inode->i_state & I_FREEING);
+		__iget(inode);
+		pages_skipped = wbc->pages_skipped;
+		__writeback_single_inode(inode, wbc);
+		if (wbc->sync_mode == WB_SYNC_HOLD) {
+			inode->dirtied_when = jiffies;
+			list_move(&inode->i_list, &sb->s_dirty);
+		}
+		if (current_is_pdflush())
+			writeback_release(bdi);
+		if (wbc->pages_skipped != pages_skipped) {
+			/*
+			 * writeback is not making progress due to locked
+			 * buffers.  Skip this inode for now.
+			 */
+			list_move(&inode->i_list, &sb->s_dirty);
+		}
+		spin_unlock(&inode_lock);
+		cond_resched();
+		iput(inode);
+		spin_lock(&inode_lock);
+		if (wbc->nr_to_write <= 0)
+			break;
+	}
+	return;		/* Leave any unwritten inodes on s_io */
+}
+
+/*
+ * Start writeback of dirty pagecache data against all unlocked inodes.
+ *
+ * Note:
+ * We don't need to grab a reference to superblock here. If it has non-empty
+ * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
+ * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are
+ * empty. Since __sync_single_inode() regains inode_lock before it finally moves
+ * inode from superblock lists we are OK.
+ *
+ * If `older_than_this' is non-zero then only flush inodes which have a
+ * flushtime older than *older_than_this.
+ *
+ * If `bdi' is non-zero then we will scan the first inode against each
+ * superblock until we find the matching ones.  One group will be the dirty
+ * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
+ * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
+ * super-efficient but we're about to do a ton of I/O...
+ */
+void
+writeback_inodes(struct writeback_control *wbc)
+{
+	struct super_block *sb;
+
+	might_sleep();
+	spin_lock(&sb_lock);
+restart:
+	sb = sb_entry(super_blocks.prev);
+	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+		if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
+			/* we're making our own get_super here */
+			sb->s_count++;
+			spin_unlock(&sb_lock);
+			/*
+			 * If we can't get the readlock, there's no sense in
+			 * waiting around, most of the time the FS is going to
+			 * be unmounted by the time it is released.
+			 */
+			if (down_read_trylock(&sb->s_umount)) {
+				if (sb->s_root) {
+					spin_lock(&inode_lock);
+					sync_sb_inodes(sb, wbc);
+					spin_unlock(&inode_lock);
+				}
+				up_read(&sb->s_umount);
+			}
+			spin_lock(&sb_lock);
+			if (__put_super_and_need_restart(sb))
+				goto restart;
+		}
+		if (wbc->nr_to_write <= 0)
+			break;
+	}
+	spin_unlock(&sb_lock);
+}
+
+/*
+ * writeback and wait upon the filesystem's dirty inodes.  The caller will
+ * do this in two passes - one to write, and one to wait.  WB_SYNC_HOLD is
+ * used to park the written inodes on sb->s_dirty for the wait pass.
+ *
+ * A finite limit is set on the number of pages which will be written.
+ * To prevent infinite livelock of sys_sync().
+ *
+ * We add in the number of potentially dirty inodes, because each inode write
+ * can dirty pagecache in the underlying blockdev.
+ */
+void sync_inodes_sb(struct super_block *sb, int wait)
+{
+	struct writeback_control wbc = {
+		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+	};
+	unsigned long nr_dirty = read_page_state(nr_dirty);
+	unsigned long nr_unstable = read_page_state(nr_unstable);
+
+	wbc.nr_to_write = nr_dirty + nr_unstable +
+			(inodes_stat.nr_inodes - inodes_stat.nr_unused) +
+			nr_dirty + nr_unstable;
+	wbc.nr_to_write += wbc.nr_to_write / 2;		/* Bit more for luck */
+	spin_lock(&inode_lock);
+	sync_sb_inodes(sb, &wbc);
+	spin_unlock(&inode_lock);
+}
+
+/*
+ * Rather lame livelock avoidance.
+ */
+static void set_sb_syncing(int val)
+{
+	struct super_block *sb;
+	spin_lock(&sb_lock);
+	sb = sb_entry(super_blocks.prev);
+	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+		sb->s_syncing = val;
+	}
+	spin_unlock(&sb_lock);
+}
+
+/*
+ * Find a superblock with inodes that need to be synced
+ */
+static struct super_block *get_super_to_sync(void)
+{
+	struct super_block *sb;
+restart:
+	spin_lock(&sb_lock);
+	sb = sb_entry(super_blocks.prev);
+	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+		if (sb->s_syncing)
+			continue;
+		sb->s_syncing = 1;
+		sb->s_count++;
+		spin_unlock(&sb_lock);
+		down_read(&sb->s_umount);
+		if (!sb->s_root) {
+			drop_super(sb);
+			goto restart;
+		}
+		return sb;
+	}
+	spin_unlock(&sb_lock);
+	return NULL;
+}
+
+/**
+ * sync_inodes
+ *
+ * sync_inodes() goes through each super block's dirty inode list, writes the
+ * inodes out, waits on the writeout and puts the inodes back on the normal
+ * list.
+ *
+ * This is for sys_sync().  fsync_dev() uses the same algorithm.  The subtle
+ * part of the sync functions is that the blockdev "superblock" is processed
+ * last.  This is because the write_inode() function of a typical fs will
+ * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
+ * What we want to do is to perform all that dirtying first, and then write
+ * back all those inode blocks via the blockdev mapping in one sweep.  So the
+ * additional (somewhat redundant) sync_blockdev() calls here are to make
+ * sure that really happens.  Because if we call sync_inodes_sb(wait=1) with
+ * outstanding dirty inodes, the writeback goes block-at-a-time within the
+ * filesystem's write_inode().  This is extremely slow.
+ */
+void sync_inodes(int wait)
+{
+	struct super_block *sb;
+
+	set_sb_syncing(0);
+	while ((sb = get_super_to_sync()) != NULL) {
+		sync_inodes_sb(sb, 0);
+		sync_blockdev(sb->s_bdev);
+		drop_super(sb);
+	}
+	if (wait) {
+		set_sb_syncing(0);
+		while ((sb = get_super_to_sync()) != NULL) {
+			sync_inodes_sb(sb, 1);
+			sync_blockdev(sb->s_bdev);
+			drop_super(sb);
+		}
+	}
+}
+
+/**
+ *	write_inode_now	-	write an inode to disk
+ *	@inode: inode to write to disk
+ *	@sync: whether the write should be synchronous or not
+ *
+ *	This function commits an inode to disk immediately if it is
+ *	dirty. This is primarily needed by knfsd.
+ */
+ 
+int write_inode_now(struct inode *inode, int sync)
+{
+	int ret;
+	struct writeback_control wbc = {
+		.nr_to_write = LONG_MAX,
+		.sync_mode = WB_SYNC_ALL,
+	};
+
+	if (!mapping_cap_writeback_dirty(inode->i_mapping))
+		return 0;
+
+	might_sleep();
+	spin_lock(&inode_lock);
+	ret = __writeback_single_inode(inode, &wbc);
+	spin_unlock(&inode_lock);
+	if (sync)
+		wait_on_inode(inode);
+	return ret;
+}
+EXPORT_SYMBOL(write_inode_now);
+
+/**
+ * sync_inode - write an inode and its pages to disk.
+ * @inode: the inode to sync
+ * @wbc: controls the writeback mode
+ *
+ * sync_inode() will write an inode and its pages to disk.  It will also
+ * correctly update the inode on its superblock's dirty inode lists and will
+ * update inode->i_state.
+ *
+ * The caller must have a ref on the inode.
+ */
+int sync_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	int ret;
+
+	spin_lock(&inode_lock);
+	ret = __writeback_single_inode(inode, wbc);
+	spin_unlock(&inode_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sync_inode);
+
+/**
+ * generic_osync_inode - flush all dirty data for a given inode to disk
+ * @inode: inode to write
+ * @what:  what to write and wait upon
+ *
+ * This can be called by file_write functions for files which have the
+ * O_SYNC flag set, to flush dirty writes to disk.
+ *
+ * @what is a bitmask, specifying which part of the inode's data should be
+ * written and waited upon:
+ *
+ *    OSYNC_DATA:     i_mapping's dirty data
+ *    OSYNC_METADATA: the buffers at i_mapping->private_list
+ *    OSYNC_INODE:    the inode itself
+ */
+
+int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
+{
+	int err = 0;
+	int need_write_inode_now = 0;
+	int err2;
+
+	current->flags |= PF_SYNCWRITE;
+	if (what & OSYNC_DATA)
+		err = filemap_fdatawrite(mapping);
+	if (what & (OSYNC_METADATA|OSYNC_DATA)) {
+		err2 = sync_mapping_buffers(mapping);
+		if (!err)
+			err = err2;
+	}
+	if (what & OSYNC_DATA) {
+		err2 = filemap_fdatawait(mapping);
+		if (!err)
+			err = err2;
+	}
+	current->flags &= ~PF_SYNCWRITE;
+
+	spin_lock(&inode_lock);
+	if ((inode->i_state & I_DIRTY) &&
+	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
+		need_write_inode_now = 1;
+	spin_unlock(&inode_lock);
+
+	if (need_write_inode_now) {
+		err2 = write_inode_now(inode, 1);
+		if (!err)
+			err = err2;
+	}
+	else
+		wait_on_inode(inode);
+
+	return err;
+}
+
+EXPORT_SYMBOL(generic_osync_inode);
+
+/**
+ * writeback_acquire: attempt to get exclusive writeback access to a device
+ * @bdi: the device's backing_dev_info structure
+ *
+ * It is a waste of resources to have more than one pdflush thread blocked on
+ * a single request queue.  Exclusion at the request_queue level is obtained
+ * via a flag in the request_queue's backing_dev_info.state.
+ *
+ * Non-request_queue-backed address_spaces will share default_backing_dev_info,
+ * unless they implement their own.  Which is somewhat inefficient, as this
+ * may prevent concurrent writeback against multiple devices.
+ */
+int writeback_acquire(struct backing_dev_info *bdi)
+{
+	return !test_and_set_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_in_progress: determine whether there is writeback in progress
+ *                        against a backing device.
+ * @bdi: the device's backing_dev_info structure.
+ */
+int writeback_in_progress(struct backing_dev_info *bdi)
+{
+	return test_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_release: relinquish exclusive writeback access against a device.
+ * @bdi: the device's backing_dev_info structure
+ */
+void writeback_release(struct backing_dev_info *bdi)
+{
+	BUG_ON(!writeback_in_progress(bdi));
+	clear_bit(BDI_pdflush, &bdi->state);
+}
diff --git a/fs/hfs/Makefile b/fs/hfs/Makefile
new file mode 100644
index 0000000..c41f5a85
--- /dev/null
+++ b/fs/hfs/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux hfs filesystem routines.
+#
+
+obj-$(CONFIG_HFS_FS) += hfs.o
+
+hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
+	    catalog.o dir.o extent.o inode.o attr.o mdb.o \
+            part_tbl.o string.o super.o sysdep.o trans.o
+
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
new file mode 100644
index 0000000..e057ec5
--- /dev/null
+++ b/fs/hfs/attr.c
@@ -0,0 +1,121 @@
+/*
+ *  linux/fs/hfs/attr.c
+ *
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Export hfs data via xattr
+ */
+
+
+#include <linux/fs.h>
+#include <linux/xattr.h>
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+int hfs_setxattr(struct dentry *dentry, const char *name,
+		 const void *value, size_t size, int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	struct hfs_find_data fd;
+	hfs_cat_rec rec;
+	struct hfs_cat_file *file;
+	int res;
+
+	if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd);
+	if (res)
+		return res;
+	fd.search_key->cat = HFS_I(inode)->cat_key;
+	res = hfs_brec_find(&fd);
+	if (res)
+		goto out;
+	hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+			sizeof(struct hfs_cat_file));
+	file = &rec.file;
+
+	if (!strcmp(name, "hfs.type")) {
+		if (size == 4)
+			memcpy(&file->UsrWds.fdType, value, 4);
+		else
+			res = -ERANGE;
+	} else if (!strcmp(name, "hfs.creator")) {
+		if (size == 4)
+			memcpy(&file->UsrWds.fdCreator, value, 4);
+		else
+			res = -ERANGE;
+	} else
+		res = -EOPNOTSUPP;
+	if (!res)
+		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+				sizeof(struct hfs_cat_file));
+out:
+	hfs_find_exit(&fd);
+	return res;
+}
+
+ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
+			 void *value, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	struct hfs_find_data fd;
+	hfs_cat_rec rec;
+	struct hfs_cat_file *file;
+	ssize_t res = 0;
+
+	if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	if (size) {
+		res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd);
+		if (res)
+			return res;
+		fd.search_key->cat = HFS_I(inode)->cat_key;
+		res = hfs_brec_find(&fd);
+		if (res)
+			goto out;
+		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+				sizeof(struct hfs_cat_file));
+	}
+	file = &rec.file;
+
+	if (!strcmp(name, "hfs.type")) {
+		if (size >= 4) {
+			memcpy(value, &file->UsrWds.fdType, 4);
+			res = 4;
+		} else
+			res = size ? -ERANGE : 4;
+	} else if (!strcmp(name, "hfs.creator")) {
+		if (size >= 4) {
+			memcpy(value, &file->UsrWds.fdCreator, 4);
+			res = 4;
+		} else
+			res = size ? -ERANGE : 4;
+	} else
+		res = -ENODATA;
+out:
+	if (size)
+		hfs_find_exit(&fd);
+	return res;
+}
+
+#define HFS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
+
+ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	if (!buffer || !size)
+		return HFS_ATTRLIST_SIZE;
+	if (size < HFS_ATTRLIST_SIZE)
+		return -ERANGE;
+	strcpy(buffer, "hfs.type");
+	strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
+
+	return HFS_ATTRLIST_SIZE;
+}
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
new file mode 100644
index 0000000..89450ae
--- /dev/null
+++ b/fs/hfs/bfind.c
@@ -0,0 +1,210 @@
+/*
+ *  linux/fs/hfs/bfind.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Search routines for btrees
+ */
+
+#include <linux/slab.h>
+#include "btree.h"
+
+int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+{
+	void *ptr;
+
+	fd->tree = tree;
+	fd->bnode = NULL;
+	ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	fd->search_key = ptr;
+	fd->key = ptr + tree->max_key_len + 2;
+	dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
+	down(&tree->tree_lock);
+	return 0;
+}
+
+void hfs_find_exit(struct hfs_find_data *fd)
+{
+	hfs_bnode_put(fd->bnode);
+	kfree(fd->search_key);
+	dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
+	up(&fd->tree->tree_lock);
+	fd->tree = NULL;
+}
+
+/* Find the record in bnode that best matches key (not greater than...)*/
+int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
+{
+	int cmpval;
+	u16 off, len, keylen;
+	int rec;
+	int b, e;
+	int res;
+
+	b = 0;
+	e = bnode->num_recs - 1;
+	res = -ENOENT;
+	do {
+		rec = (e + b) / 2;
+		len = hfs_brec_lenoff(bnode, rec, &off);
+		keylen = hfs_brec_keylen(bnode, rec);
+		hfs_bnode_read(bnode, fd->key, off, keylen);
+		cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
+		if (!cmpval) {
+			e = rec;
+			res = 0;
+			goto done;
+		}
+		if (cmpval < 0)
+			b = rec + 1;
+		else
+			e = rec - 1;
+	} while (b <= e);
+	//printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
+	if (rec != e && e >= 0) {
+		len = hfs_brec_lenoff(bnode, e, &off);
+		keylen = hfs_brec_keylen(bnode, e);
+		hfs_bnode_read(bnode, fd->key, off, keylen);
+	}
+done:
+	fd->record = e;
+	fd->keyoffset = off;
+	fd->keylength = keylen;
+	fd->entryoffset = off + keylen;
+	fd->entrylength = len - keylen;
+	return res;
+}
+
+/* Traverse a B*Tree from the root to a leaf finding best fit to key */
+/* Return allocated copy of node found, set recnum to best record */
+int hfs_brec_find(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	u32 nidx, parent;
+	__be32 data;
+	int height, res;
+
+	tree = fd->tree;
+	if (fd->bnode)
+		hfs_bnode_put(fd->bnode);
+	fd->bnode = NULL;
+	nidx = tree->root;
+	if (!nidx)
+		return -ENOENT;
+	height = tree->depth;
+	res = 0;
+	parent = 0;
+	for (;;) {
+		bnode = hfs_bnode_find(tree, nidx);
+		if (IS_ERR(bnode)) {
+			res = PTR_ERR(bnode);
+			bnode = NULL;
+			break;
+		}
+		if (bnode->height != height)
+			goto invalid;
+		if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF))
+			goto invalid;
+		bnode->parent = parent;
+
+		res = __hfs_brec_find(bnode, fd);
+		if (!height)
+			break;
+		if (fd->record < 0)
+			goto release;
+
+		parent = nidx;
+		hfs_bnode_read(bnode, &data, fd->entryoffset, 4);
+		nidx = be32_to_cpu(data);
+		hfs_bnode_put(bnode);
+	}
+	fd->bnode = bnode;
+	return res;
+
+invalid:
+	printk("HFS: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
+		height, bnode->height, bnode->type, nidx, parent);
+	res = -EIO;
+release:
+	hfs_bnode_put(bnode);
+	return res;
+}
+
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+{
+	int res;
+
+	res = hfs_brec_find(fd);
+	if (res)
+		return res;
+	if (fd->entrylength > rec_len)
+		return -EINVAL;
+	hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength);
+	return 0;
+}
+
+int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	int idx, res = 0;
+	u16 off, len, keylen;
+
+	bnode = fd->bnode;
+	tree = bnode->tree;
+
+	if (cnt < 0) {
+		cnt = -cnt;
+		while (cnt > fd->record) {
+			cnt -= fd->record + 1;
+			fd->record = bnode->num_recs - 1;
+			idx = bnode->prev;
+			if (!idx) {
+				res = -ENOENT;
+				goto out;
+			}
+			hfs_bnode_put(bnode);
+			bnode = hfs_bnode_find(tree, idx);
+			if (IS_ERR(bnode)) {
+				res = PTR_ERR(bnode);
+				bnode = NULL;
+				goto out;
+			}
+		}
+		fd->record -= cnt;
+	} else {
+		while (cnt >= bnode->num_recs - fd->record) {
+			cnt -= bnode->num_recs - fd->record;
+			fd->record = 0;
+			idx = bnode->next;
+			if (!idx) {
+				res = -ENOENT;
+				goto out;
+			}
+			hfs_bnode_put(bnode);
+			bnode = hfs_bnode_find(tree, idx);
+			if (IS_ERR(bnode)) {
+				res = PTR_ERR(bnode);
+				bnode = NULL;
+				goto out;
+			}
+		}
+		fd->record += cnt;
+	}
+
+	len = hfs_brec_lenoff(bnode, fd->record, &off);
+	keylen = hfs_brec_keylen(bnode, fd->record);
+	fd->keyoffset = off;
+	fd->keylength = keylen;
+	fd->entryoffset = off + keylen;
+	fd->entrylength = len - keylen;
+	hfs_bnode_read(bnode, fd->key, off, keylen);
+out:
+	fd->bnode = bnode;
+	return res;
+}
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
new file mode 100644
index 0000000..24e7579
--- /dev/null
+++ b/fs/hfs/bitmap.c
@@ -0,0 +1,243 @@
+/*
+ *  linux/fs/hfs/bitmap.c
+ *
+ * Copyright (C) 1996-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * Based on GPLed code Copyright (C) 1995  Michael Dreher
+ *
+ * This file contains the code to modify the volume bitmap:
+ * search/set/clear bits.
+ */
+
+#include "hfs_fs.h"
+
+/*
+ * hfs_find_zero_bit()
+ *
+ * Description:
+ *  Given a block of memory, its length in bits, and a starting bit number,
+ *  determine the number of the first zero bits (in left-to-right ordering)
+ *  in that range.
+ *
+ *  Returns >= 'size' if no zero bits are found in the range.
+ *
+ *  Accesses memory in 32-bit aligned chunks of 32-bits and thus
+ *  may read beyond the 'size'th bit.
+ */
+static u32 hfs_find_set_zero_bits(__be32 *bitmap, u32 size, u32 offset, u32 *max)
+{
+	__be32 *curr, *end;
+	u32 mask, start, len, n;
+	__be32 val;
+	int i;
+
+	len = *max;
+	if (!len)
+		return size;
+
+	curr = bitmap + (offset / 32);
+	end = bitmap + ((size + 31) / 32);
+
+	/* scan the first partial u32 for zero bits */
+	val = *curr;
+	if (~val) {
+		n = be32_to_cpu(val);
+		i = offset % 32;
+		mask = (1U << 31) >> i;
+		for (; i < 32; mask >>= 1, i++) {
+			if (!(n & mask))
+				goto found;
+		}
+	}
+
+	/* scan complete u32s for the first zero bit */
+	while (++curr < end) {
+		val = *curr;
+		if (~val) {
+			n = be32_to_cpu(val);
+			mask = 1 << 31;
+			for (i = 0; i < 32; mask >>= 1, i++) {
+				if (!(n & mask))
+					goto found;
+			}
+		}
+	}
+	return size;
+
+found:
+	start = (curr - bitmap) * 32 + i;
+	if (start >= size)
+		return start;
+	/* do any partial u32 at the start */
+	len = min(size - start, len);
+	while (1) {
+		n |= mask;
+		if (++i >= 32)
+			break;
+		mask >>= 1;
+		if (!--len || n & mask)
+			goto done;
+	}
+	if (!--len)
+		goto done;
+	*curr++ = cpu_to_be32(n);
+	/* do full u32s */
+	while (1) {
+		n = be32_to_cpu(*curr);
+		if (len < 32)
+			break;
+		if (n) {
+			len = 32;
+			break;
+		}
+		*curr++ = cpu_to_be32(0xffffffff);
+		len -= 32;
+	}
+	/* do any partial u32 at end */
+	mask = 1U << 31;
+	for (i = 0; i < len; i++) {
+		if (n & mask)
+			break;
+		n |= mask;
+		mask >>= 1;
+	}
+done:
+	*curr = cpu_to_be32(n);
+	*max = (curr - bitmap) * 32 + i - start;
+	return start;
+}
+
+/*
+ * hfs_vbm_search_free()
+ *
+ * Description:
+ *   Search for 'num_bits' consecutive cleared bits in the bitmap blocks of
+ *   the hfs MDB. 'mdb' had better be locked or the returned range
+ *   may be no longer free, when this functions returns!
+ *   XXX Currently the search starts from bit 0, but it should start with
+ *   the bit number stored in 's_alloc_ptr' of the MDB.
+ * Input Variable(s):
+ *   struct hfs_mdb *mdb: Pointer to the hfs MDB
+ *   u16 *num_bits: Pointer to the number of cleared bits
+ *     to search for
+ * Output Variable(s):
+ *   u16 *num_bits: The number of consecutive clear bits of the
+ *     returned range. If the bitmap is fragmented, this will be less than
+ *     requested and it will be zero, when the disk is full.
+ * Returns:
+ *   The number of the first bit of the range of cleared bits which has been
+ *   found. When 'num_bits' is zero, this is invalid!
+ * Preconditions:
+ *   'mdb' points to a "valid" (struct hfs_mdb).
+ *   'num_bits' points to a variable of type (u16), which contains
+ *	the number of cleared bits to find.
+ * Postconditions:
+ *   'num_bits' is set to the length of the found sequence.
+ */
+u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
+{
+	void *bitmap;
+	u32 pos;
+
+	/* make sure we have actual work to perform */
+	if (!*num_bits)
+		return 0;
+
+	down(&HFS_SB(sb)->bitmap_lock);
+	bitmap = HFS_SB(sb)->bitmap;
+
+	pos = hfs_find_set_zero_bits(bitmap, HFS_SB(sb)->fs_ablocks, goal, num_bits);
+	if (pos >= HFS_SB(sb)->fs_ablocks) {
+		if (goal)
+			pos = hfs_find_set_zero_bits(bitmap, goal, 0, num_bits);
+		if (pos >= HFS_SB(sb)->fs_ablocks) {
+			*num_bits = pos = 0;
+			goto out;
+		}
+	}
+
+	dprint(DBG_BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits);
+	HFS_SB(sb)->free_ablocks -= *num_bits;
+	hfs_bitmap_dirty(sb);
+out:
+	up(&HFS_SB(sb)->bitmap_lock);
+	return pos;
+}
+
+
+/*
+ * hfs_clear_vbm_bits()
+ *
+ * Description:
+ *   Clear the requested bits in the volume bitmap of the hfs filesystem
+ * Input Variable(s):
+ *   struct hfs_mdb *mdb: Pointer to the hfs MDB
+ *   u16 start: The offset of the first bit
+ *   u16 count: The number of bits
+ * Output Variable(s):
+ *   None
+ * Returns:
+ *    0: no error
+ *   -1: One of the bits was already clear.  This is a strange
+ *	 error and when it happens, the filesystem must be repaired!
+ *   -2: One or more of the bits are out of range of the bitmap.
+ * Preconditions:
+ *   'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ *   Starting with bit number 'start', 'count' bits in the volume bitmap
+ *   are cleared. The affected bitmap blocks are marked "dirty", the free
+ *   block count of the MDB is updated and the MDB is marked dirty.
+ */
+int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
+{
+	__be32 *curr;
+	u32 mask;
+	int i, len;
+
+	/* is there any actual work to be done? */
+	if (!count)
+		return 0;
+
+	dprint(DBG_BITMAP, "clear_bits: %u,%u\n", start, count);
+	/* are all of the bits in range? */
+	if ((start + count) > HFS_SB(sb)->fs_ablocks)
+		return -2;
+
+	down(&HFS_SB(sb)->bitmap_lock);
+	/* bitmap is always on a 32-bit boundary */
+	curr = HFS_SB(sb)->bitmap + (start / 32);
+	len = count;
+
+	/* do any partial u32 at the start */
+	i = start % 32;
+	if (i) {
+		int j = 32 - i;
+		mask = 0xffffffffU << j;
+		if (j > count) {
+			mask |= 0xffffffffU >> (i + count);
+			*curr &= cpu_to_be32(mask);
+			goto out;
+		}
+		*curr++ &= cpu_to_be32(mask);
+		count -= j;
+	}
+
+	/* do full u32s */
+	while (count >= 32) {
+		*curr++ = 0;
+		count -= 32;
+	}
+	/* do any partial u32 at end */
+	if (count) {
+		mask = 0xffffffffU >> count;
+		*curr &= cpu_to_be32(mask);
+	}
+out:
+	HFS_SB(sb)->free_ablocks += len;
+	up(&HFS_SB(sb)->bitmap_lock);
+	hfs_bitmap_dirty(sb);
+
+	return 0;
+}
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
new file mode 100644
index 0000000..6ad1211
--- /dev/null
+++ b/fs/hfs/bnode.c
@@ -0,0 +1,498 @@
+/*
+ *  linux/fs/hfs/bnode.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle basic btree node operations
+ */
+
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include "btree.h"
+
+#define REF_PAGES	0
+
+void hfs_bnode_read(struct hfs_bnode *node, void *buf,
+		int off, int len)
+{
+	struct page *page;
+
+	off += node->page_offset;
+	page = node->page[0];
+
+	memcpy(buf, kmap(page) + off, len);
+	kunmap(page);
+}
+
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+{
+	__be16 data;
+	// optimize later...
+	hfs_bnode_read(node, &data, off, 2);
+	return be16_to_cpu(data);
+}
+
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+{
+	u8 data;
+	// optimize later...
+	hfs_bnode_read(node, &data, off, 1);
+	return data;
+}
+
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+{
+	struct hfs_btree *tree;
+	int key_len;
+
+	tree = node->tree;
+	if (node->type == HFS_NODE_LEAF ||
+	    tree->attributes & HFS_TREE_VARIDXKEYS)
+		key_len = hfs_bnode_read_u8(node, off) + 1;
+	else
+		key_len = tree->max_key_len + 1;
+
+	hfs_bnode_read(node, key, off, key_len);
+}
+
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+{
+	struct page *page;
+
+	off += node->page_offset;
+	page = node->page[0];
+
+	memcpy(kmap(page) + off, buf, len);
+	kunmap(page);
+	set_page_dirty(page);
+}
+
+void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+{
+	__be16 v = cpu_to_be16(data);
+	// optimize later...
+	hfs_bnode_write(node, &v, off, 2);
+}
+
+void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
+{
+	// optimize later...
+	hfs_bnode_write(node, &data, off, 1);
+}
+
+void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+{
+	struct page *page;
+
+	off += node->page_offset;
+	page = node->page[0];
+
+	memset(kmap(page) + off, 0, len);
+	kunmap(page);
+	set_page_dirty(page);
+}
+
+void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
+		struct hfs_bnode *src_node, int src, int len)
+{
+	struct hfs_btree *tree;
+	struct page *src_page, *dst_page;
+
+	dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+	if (!len)
+		return;
+	tree = src_node->tree;
+	src += src_node->page_offset;
+	dst += dst_node->page_offset;
+	src_page = src_node->page[0];
+	dst_page = dst_node->page[0];
+
+	memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
+	kunmap(src_page);
+	kunmap(dst_page);
+	set_page_dirty(dst_page);
+}
+
+void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+{
+	struct page *page;
+	void *ptr;
+
+	dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+	if (!len)
+		return;
+	src += node->page_offset;
+	dst += node->page_offset;
+	page = node->page[0];
+	ptr = kmap(page);
+	memmove(ptr + dst, ptr + src, len);
+	kunmap(page);
+	set_page_dirty(page);
+}
+
+void hfs_bnode_dump(struct hfs_bnode *node)
+{
+	struct hfs_bnode_desc desc;
+	__be32 cnid;
+	int i, off, key_off;
+
+	dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
+	hfs_bnode_read(node, &desc, 0, sizeof(desc));
+	dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
+		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
+		desc.type, desc.height, be16_to_cpu(desc.num_recs));
+
+	off = node->tree->node_size - 2;
+	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
+		key_off = hfs_bnode_read_u16(node, off);
+		dprint(DBG_BNODE_MOD, " %d", key_off);
+		if (i && node->type == HFS_NODE_INDEX) {
+			int tmp;
+
+			if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
+				tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
+			else
+				tmp = node->tree->max_key_len + 1;
+			dprint(DBG_BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off));
+			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
+			dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+		} else if (i && node->type == HFS_NODE_LEAF) {
+			int tmp;
+
+			tmp = hfs_bnode_read_u8(node, key_off);
+			dprint(DBG_BNODE_MOD, " (%d)", tmp);
+		}
+	}
+	dprint(DBG_BNODE_MOD, "\n");
+}
+
+void hfs_bnode_unlink(struct hfs_bnode *node)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *tmp;
+	__be32 cnid;
+
+	tree = node->tree;
+	if (node->prev) {
+		tmp = hfs_bnode_find(tree, node->prev);
+		if (IS_ERR(tmp))
+			return;
+		tmp->next = node->next;
+		cnid = cpu_to_be32(tmp->next);
+		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+		hfs_bnode_put(tmp);
+	} else if (node->type == HFS_NODE_LEAF)
+		tree->leaf_head = node->next;
+
+	if (node->next) {
+		tmp = hfs_bnode_find(tree, node->next);
+		if (IS_ERR(tmp))
+			return;
+		tmp->prev = node->prev;
+		cnid = cpu_to_be32(tmp->prev);
+		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
+		hfs_bnode_put(tmp);
+	} else if (node->type == HFS_NODE_LEAF)
+		tree->leaf_tail = node->prev;
+
+	// move down?
+	if (!node->prev && !node->next) {
+		printk("hfs_btree_del_level\n");
+	}
+	if (!node->parent) {
+		tree->root = 0;
+		tree->depth = 0;
+	}
+	set_bit(HFS_BNODE_DELETED, &node->flags);
+}
+
+static inline int hfs_bnode_hash(u32 num)
+{
+	num = (num >> 16) + num;
+	num += num >> 8;
+	return num & (NODE_HASH_SIZE - 1);
+}
+
+struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
+{
+	struct hfs_bnode *node;
+
+	if (cnid >= tree->node_count) {
+		printk("HFS: request for non-existent node %d in B*Tree\n", cnid);
+		return NULL;
+	}
+
+	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
+	     node; node = node->next_hash) {
+		if (node->this == cnid) {
+			return node;
+		}
+	}
+	return NULL;
+}
+
+static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+{
+	struct super_block *sb;
+	struct hfs_bnode *node, *node2;
+	struct address_space *mapping;
+	struct page *page;
+	int size, block, i, hash;
+	loff_t off;
+
+	if (cnid >= tree->node_count) {
+		printk("HFS: request for non-existent node %d in B*Tree\n", cnid);
+		return NULL;
+	}
+
+	sb = tree->inode->i_sb;
+	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
+		sizeof(struct page *);
+	node = kmalloc(size, GFP_KERNEL);
+	if (!node)
+		return NULL;
+	memset(node, 0, size);
+	node->tree = tree;
+	node->this = cnid;
+	set_bit(HFS_BNODE_NEW, &node->flags);
+	atomic_set(&node->refcnt, 1);
+	dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
+	       node->tree->cnid, node->this);
+	init_waitqueue_head(&node->lock_wq);
+	spin_lock(&tree->hash_lock);
+	node2 = hfs_bnode_findhash(tree, cnid);
+	if (!node2) {
+		hash = hfs_bnode_hash(cnid);
+		node->next_hash = tree->node_hash[hash];
+		tree->node_hash[hash] = node;
+		tree->node_hash_cnt++;
+	} else {
+		spin_unlock(&tree->hash_lock);
+		kfree(node);
+		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+		return node2;
+	}
+	spin_unlock(&tree->hash_lock);
+
+	mapping = tree->inode->i_mapping;
+	off = (loff_t)cnid * tree->node_size;
+	block = off >> PAGE_CACHE_SHIFT;
+	node->page_offset = off & ~PAGE_CACHE_MASK;
+	for (i = 0; i < tree->pages_per_bnode; i++) {
+		page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
+		if (IS_ERR(page))
+			goto fail;
+		if (PageError(page)) {
+			page_cache_release(page);
+			goto fail;
+		}
+#if !REF_PAGES
+		page_cache_release(page);
+#endif
+		node->page[i] = page;
+	}
+
+	return node;
+fail:
+	set_bit(HFS_BNODE_ERROR, &node->flags);
+	return node;
+}
+
+void hfs_bnode_unhash(struct hfs_bnode *node)
+{
+	struct hfs_bnode **p;
+
+	dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
+		node->tree->cnid, node->this, atomic_read(&node->refcnt));
+	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
+	     *p && *p != node; p = &(*p)->next_hash)
+		;
+	if (!*p)
+		BUG();
+	*p = node->next_hash;
+	node->tree->node_hash_cnt--;
+}
+
+/* Load a particular node out of a tree */
+struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
+{
+	struct hfs_bnode *node;
+	struct hfs_bnode_desc *desc;
+	int i, rec_off, off, next_off;
+	int entry_size, key_size;
+
+	spin_lock(&tree->hash_lock);
+	node = hfs_bnode_findhash(tree, num);
+	if (node) {
+		hfs_bnode_get(node);
+		spin_unlock(&tree->hash_lock);
+		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
+		if (test_bit(HFS_BNODE_ERROR, &node->flags))
+			goto node_error;
+		return node;
+	}
+	spin_unlock(&tree->hash_lock);
+	node = __hfs_bnode_create(tree, num);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+	if (test_bit(HFS_BNODE_ERROR, &node->flags))
+		goto node_error;
+	if (!test_bit(HFS_BNODE_NEW, &node->flags))
+		return node;
+
+	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
+	node->prev = be32_to_cpu(desc->prev);
+	node->next = be32_to_cpu(desc->next);
+	node->num_recs = be16_to_cpu(desc->num_recs);
+	node->type = desc->type;
+	node->height = desc->height;
+	kunmap(node->page[0]);
+
+	switch (node->type) {
+	case HFS_NODE_HEADER:
+	case HFS_NODE_MAP:
+		if (node->height != 0)
+			goto node_error;
+		break;
+	case HFS_NODE_LEAF:
+		if (node->height != 1)
+			goto node_error;
+		break;
+	case HFS_NODE_INDEX:
+		if (node->height <= 1 || node->height > tree->depth)
+			goto node_error;
+		break;
+	default:
+		goto node_error;
+	}
+
+	rec_off = tree->node_size - 2;
+	off = hfs_bnode_read_u16(node, rec_off);
+	if (off != sizeof(struct hfs_bnode_desc))
+		goto node_error;
+	for (i = 1; i <= node->num_recs; off = next_off, i++) {
+		rec_off -= 2;
+		next_off = hfs_bnode_read_u16(node, rec_off);
+		if (next_off <= off ||
+		    next_off > tree->node_size ||
+		    next_off & 1)
+			goto node_error;
+		entry_size = next_off - off;
+		if (node->type != HFS_NODE_INDEX &&
+		    node->type != HFS_NODE_LEAF)
+			continue;
+		key_size = hfs_bnode_read_u8(node, off) + 1;
+		if (key_size >= entry_size /*|| key_size & 1*/)
+			goto node_error;
+	}
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+	return node;
+
+node_error:
+	set_bit(HFS_BNODE_ERROR, &node->flags);
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+	hfs_bnode_put(node);
+	return ERR_PTR(-EIO);
+}
+
+void hfs_bnode_free(struct hfs_bnode *node)
+{
+	//int i;
+
+	//for (i = 0; i < node->tree->pages_per_bnode; i++)
+	//	if (node->page[i])
+	//		page_cache_release(node->page[i]);
+	kfree(node);
+}
+
+struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
+{
+	struct hfs_bnode *node;
+	struct page **pagep;
+	int i;
+
+	spin_lock(&tree->hash_lock);
+	node = hfs_bnode_findhash(tree, num);
+	spin_unlock(&tree->hash_lock);
+	if (node)
+		BUG();
+	node = __hfs_bnode_create(tree, num);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
+		hfs_bnode_put(node);
+		return ERR_PTR(-EIO);
+	}
+
+	pagep = node->page;
+	memset(kmap(*pagep) + node->page_offset, 0,
+	       min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
+	set_page_dirty(*pagep);
+	kunmap(*pagep);
+	for (i = 1; i < tree->pages_per_bnode; i++) {
+		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+		set_page_dirty(*pagep);
+		kunmap(*pagep);
+	}
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+
+	return node;
+}
+
+void hfs_bnode_get(struct hfs_bnode *node)
+{
+	if (node) {
+		atomic_inc(&node->refcnt);
+#if REF_PAGES
+		{
+		int i;
+		for (i = 0; i < node->tree->pages_per_bnode; i++)
+			get_page(node->page[i]);
+		}
+#endif
+		dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
+		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+	}
+}
+
+/* Dispose of resources used by a node */
+void hfs_bnode_put(struct hfs_bnode *node)
+{
+	if (node) {
+		struct hfs_btree *tree = node->tree;
+		int i;
+
+		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
+		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+		if (!atomic_read(&node->refcnt))
+			BUG();
+		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) {
+#if REF_PAGES
+			for (i = 0; i < tree->pages_per_bnode; i++)
+				put_page(node->page[i]);
+#endif
+			return;
+		}
+		for (i = 0; i < tree->pages_per_bnode; i++) {
+			mark_page_accessed(node->page[i]);
+#if REF_PAGES
+			put_page(node->page[i]);
+#endif
+		}
+
+		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
+			hfs_bnode_unhash(node);
+			spin_unlock(&tree->hash_lock);
+			hfs_bmap_free(node);
+			hfs_bnode_free(node);
+			return;
+		}
+		spin_unlock(&tree->hash_lock);
+	}
+}
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
new file mode 100644
index 0000000..7d8fff2
--- /dev/null
+++ b/fs/hfs/brec.c
@@ -0,0 +1,496 @@
+/*
+ *  linux/fs/hfs/brec.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle individual btree records
+ */
+
+#include "btree.h"
+
+static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd);
+static int hfs_brec_update_parent(struct hfs_find_data *fd);
+static int hfs_btree_inc_height(struct hfs_btree *tree);
+
+/* Get the length and offset of the given record in the given node */
+u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off)
+{
+	__be16 retval[2];
+	u16 dataoff;
+
+	dataoff = node->tree->node_size - (rec + 2) * 2;
+	hfs_bnode_read(node, retval, dataoff, 4);
+	*off = be16_to_cpu(retval[1]);
+	return be16_to_cpu(retval[0]) - *off;
+}
+
+/* Get the length of the key from a keyed record */
+u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
+{
+	u16 retval, recoff;
+
+	if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF)
+		return 0;
+
+	if ((node->type == HFS_NODE_INDEX) &&
+	   !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
+		if (node->tree->attributes & HFS_TREE_BIGKEYS)
+			retval = node->tree->max_key_len + 2;
+		else
+			retval = node->tree->max_key_len + 1;
+	} else {
+		recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
+		if (!recoff)
+			return 0;
+		if (node->tree->attributes & HFS_TREE_BIGKEYS)
+			retval = hfs_bnode_read_u16(node, recoff) + 2;
+		else
+			retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
+	}
+	return retval;
+}
+
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node;
+	int size, key_len, rec;
+	int data_off, end_off;
+	int idx_rec_off, data_rec_off, end_rec_off;
+	__be32 cnid;
+
+	tree = fd->tree;
+	if (!fd->bnode) {
+		if (!tree->root)
+			hfs_btree_inc_height(tree);
+		fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
+		if (IS_ERR(fd->bnode))
+			return PTR_ERR(fd->bnode);
+		fd->record = -1;
+	}
+	new_node = NULL;
+	key_len = (fd->search_key->key_len | 1) + 1;
+again:
+	/* new record idx and complete record size */
+	rec = fd->record + 1;
+	size = key_len + entry_len;
+
+	node = fd->bnode;
+	hfs_bnode_dump(node);
+	/* get last offset */
+	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
+	end_off = hfs_bnode_read_u16(node, end_rec_off);
+	end_rec_off -= 2;
+	dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
+	if (size > end_rec_off - end_off) {
+		if (new_node)
+			panic("not enough room!\n");
+		new_node = hfs_bnode_split(fd);
+		if (IS_ERR(new_node))
+			return PTR_ERR(new_node);
+		goto again;
+	}
+	if (node->type == HFS_NODE_LEAF) {
+		tree->leaf_count++;
+		mark_inode_dirty(tree->inode);
+	}
+	node->num_recs++;
+	/* write new last offset */
+	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+	hfs_bnode_write_u16(node, end_rec_off, end_off + size);
+	data_off = end_off;
+	data_rec_off = end_rec_off + 2;
+	idx_rec_off = tree->node_size - (rec + 1) * 2;
+	if (idx_rec_off == data_rec_off)
+		goto skip;
+	/* move all following entries */
+	do {
+		data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
+		hfs_bnode_write_u16(node, data_rec_off, data_off + size);
+		data_rec_off += 2;
+	} while (data_rec_off < idx_rec_off);
+
+	/* move data away */
+	hfs_bnode_move(node, data_off + size, data_off,
+		       end_off - data_off);
+
+skip:
+	hfs_bnode_write(node, fd->search_key, data_off, key_len);
+	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+	hfs_bnode_dump(node);
+
+	if (new_node) {
+		/* update parent key if we inserted a key
+		 * at the start of the first node
+		 */
+		if (!rec && new_node != node)
+			hfs_brec_update_parent(fd);
+
+		hfs_bnode_put(fd->bnode);
+		if (!new_node->parent) {
+			hfs_btree_inc_height(tree);
+			new_node->parent = tree->root;
+		}
+		fd->bnode = hfs_bnode_find(tree, new_node->parent);
+
+		/* create index data entry */
+		cnid = cpu_to_be32(new_node->this);
+		entry = &cnid;
+		entry_len = sizeof(cnid);
+
+		/* get index key */
+		hfs_bnode_read_key(new_node, fd->search_key, 14);
+		__hfs_brec_find(fd->bnode, fd);
+
+		hfs_bnode_put(new_node);
+		new_node = NULL;
+
+		if (tree->attributes & HFS_TREE_VARIDXKEYS)
+			key_len = fd->search_key->key_len + 1;
+		else {
+			fd->search_key->key_len = tree->max_key_len;
+			key_len = tree->max_key_len + 1;
+		}
+		goto again;
+	}
+
+	if (!rec)
+		hfs_brec_update_parent(fd);
+
+	return 0;
+}
+
+int hfs_brec_remove(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *parent;
+	int end_off, rec_off, data_off, size;
+
+	tree = fd->tree;
+	node = fd->bnode;
+again:
+	rec_off = tree->node_size - (fd->record + 2) * 2;
+	end_off = tree->node_size - (node->num_recs + 1) * 2;
+
+	if (node->type == HFS_NODE_LEAF) {
+		tree->leaf_count--;
+		mark_inode_dirty(tree->inode);
+	}
+	hfs_bnode_dump(node);
+	dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength);
+	if (!--node->num_recs) {
+		hfs_bnode_unlink(node);
+		if (!node->parent)
+			return 0;
+		parent = hfs_bnode_find(tree, node->parent);
+		if (IS_ERR(parent))
+			return PTR_ERR(parent);
+		hfs_bnode_put(node);
+		node = fd->bnode = parent;
+
+		__hfs_brec_find(node, fd);
+		goto again;
+	}
+	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+
+	if (rec_off == end_off)
+		goto skip;
+	size = fd->keylength + fd->entrylength;
+
+	do {
+		data_off = hfs_bnode_read_u16(node, rec_off);
+		hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
+		rec_off -= 2;
+	} while (rec_off >= end_off);
+
+	/* fill hole */
+	hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
+		       data_off - fd->keyoffset - size);
+skip:
+	hfs_bnode_dump(node);
+	if (!fd->record)
+		hfs_brec_update_parent(fd);
+	return 0;
+}
+
+static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node;
+	struct hfs_bnode_desc node_desc;
+	int num_recs, new_rec_off, new_off, old_rec_off;
+	int data_start, data_end, size;
+
+	tree = fd->tree;
+	node = fd->bnode;
+	new_node = hfs_bmap_alloc(tree);
+	if (IS_ERR(new_node))
+		return new_node;
+	hfs_bnode_get(node);
+	dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n",
+		node->this, new_node->this, node->next);
+	new_node->next = node->next;
+	new_node->prev = node->this;
+	new_node->parent = node->parent;
+	new_node->type = node->type;
+	new_node->height = node->height;
+
+	size = tree->node_size / 2 - node->num_recs * 2 - 14;
+	old_rec_off = tree->node_size - 4;
+	num_recs = 1;
+	for (;;) {
+		data_start = hfs_bnode_read_u16(node, old_rec_off);
+		if (data_start > size)
+			break;
+		old_rec_off -= 2;
+		if (++num_recs < node->num_recs)
+			continue;
+		/* panic? */
+		hfs_bnode_put(node);
+		hfs_bnode_put(new_node);
+		return ERR_PTR(-ENOSPC);
+	}
+
+	if (fd->record + 1 < num_recs) {
+		/* new record is in the lower half,
+		 * so leave some more space there
+		 */
+		old_rec_off += 2;
+		num_recs--;
+		data_start = hfs_bnode_read_u16(node, old_rec_off);
+	} else {
+		hfs_bnode_put(node);
+		hfs_bnode_get(new_node);
+		fd->bnode = new_node;
+		fd->record -= num_recs;
+		fd->keyoffset -= data_start - 14;
+		fd->entryoffset -= data_start - 14;
+	}
+	new_node->num_recs = node->num_recs - num_recs;
+	node->num_recs = num_recs;
+
+	new_rec_off = tree->node_size - 2;
+	new_off = 14;
+	size = data_start - new_off;
+	num_recs = new_node->num_recs;
+	data_end = data_start;
+	while (num_recs) {
+		hfs_bnode_write_u16(new_node, new_rec_off, new_off);
+		old_rec_off -= 2;
+		new_rec_off -= 2;
+		data_end = hfs_bnode_read_u16(node, old_rec_off);
+		new_off = data_end - size;
+		num_recs--;
+	}
+	hfs_bnode_write_u16(new_node, new_rec_off, new_off);
+	hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);
+
+	/* update new bnode header */
+	node_desc.next = cpu_to_be32(new_node->next);
+	node_desc.prev = cpu_to_be32(new_node->prev);
+	node_desc.type = new_node->type;
+	node_desc.height = new_node->height;
+	node_desc.num_recs = cpu_to_be16(new_node->num_recs);
+	node_desc.reserved = 0;
+	hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
+
+	/* update previous bnode header */
+	node->next = new_node->this;
+	hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
+	node_desc.next = cpu_to_be32(node->next);
+	node_desc.num_recs = cpu_to_be16(node->num_recs);
+	hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
+
+	/* update next bnode header */
+	if (new_node->next) {
+		struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
+		next_node->prev = new_node->this;
+		hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
+		node_desc.prev = cpu_to_be32(next_node->prev);
+		hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc));
+		hfs_bnode_put(next_node);
+	} else if (node->this == tree->leaf_tail) {
+		/* if there is no next node, this might be the new tail */
+		tree->leaf_tail = new_node->this;
+		mark_inode_dirty(tree->inode);
+	}
+
+	hfs_bnode_dump(node);
+	hfs_bnode_dump(new_node);
+	hfs_bnode_put(node);
+
+	return new_node;
+}
+
+static int hfs_brec_update_parent(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node, *parent;
+	int newkeylen, diff;
+	int rec, rec_off, end_rec_off;
+	int start_off, end_off;
+
+	tree = fd->tree;
+	node = fd->bnode;
+	new_node = NULL;
+	if (!node->parent)
+		return 0;
+
+again:
+	parent = hfs_bnode_find(tree, node->parent);
+	if (IS_ERR(parent))
+		return PTR_ERR(parent);
+	__hfs_brec_find(parent, fd);
+	hfs_bnode_dump(parent);
+	rec = fd->record;
+
+	/* size difference between old and new key */
+	if (tree->attributes & HFS_TREE_VARIDXKEYS)
+		newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1;
+	else
+		fd->keylength = newkeylen = tree->max_key_len + 1;
+	dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen);
+
+	rec_off = tree->node_size - (rec + 2) * 2;
+	end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+	diff = newkeylen - fd->keylength;
+	if (!diff)
+		goto skip;
+	if (diff > 0) {
+		end_off = hfs_bnode_read_u16(parent, end_rec_off);
+		if (end_rec_off - end_off < diff) {
+
+			printk("splitting index node...\n");
+			fd->bnode = parent;
+			new_node = hfs_bnode_split(fd);
+			if (IS_ERR(new_node))
+				return PTR_ERR(new_node);
+			parent = fd->bnode;
+			rec = fd->record;
+			rec_off = tree->node_size - (rec + 2) * 2;
+			end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+		}
+	}
+
+	end_off = start_off = hfs_bnode_read_u16(parent, rec_off);
+	hfs_bnode_write_u16(parent, rec_off, start_off + diff);
+	start_off -= 4;	/* move previous cnid too */
+
+	while (rec_off > end_rec_off) {
+		rec_off -= 2;
+		end_off = hfs_bnode_read_u16(parent, rec_off);
+		hfs_bnode_write_u16(parent, rec_off, end_off + diff);
+	}
+	hfs_bnode_move(parent, start_off + diff, start_off,
+		       end_off - start_off);
+skip:
+	hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen);
+	if (!(tree->attributes & HFS_TREE_VARIDXKEYS))
+		hfs_bnode_write_u8(parent, fd->keyoffset, newkeylen - 1);
+	hfs_bnode_dump(parent);
+
+	hfs_bnode_put(node);
+	node = parent;
+
+	if (new_node) {
+		__be32 cnid;
+
+		fd->bnode = hfs_bnode_find(tree, new_node->parent);
+		/* create index key and entry */
+		hfs_bnode_read_key(new_node, fd->search_key, 14);
+		cnid = cpu_to_be32(new_node->this);
+
+		__hfs_brec_find(fd->bnode, fd);
+		hfs_brec_insert(fd, &cnid, sizeof(cnid));
+		hfs_bnode_put(fd->bnode);
+		hfs_bnode_put(new_node);
+
+		if (!rec) {
+			if (new_node == node)
+				goto out;
+			/* restore search_key */
+			hfs_bnode_read_key(node, fd->search_key, 14);
+		}
+	}
+
+	if (!rec && node->parent)
+		goto again;
+out:
+	fd->bnode = node;
+	return 0;
+}
+
+static int hfs_btree_inc_height(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node, *new_node;
+	struct hfs_bnode_desc node_desc;
+	int key_size, rec;
+	__be32 cnid;
+
+	node = NULL;
+	if (tree->root) {
+		node = hfs_bnode_find(tree, tree->root);
+		if (IS_ERR(node))
+			return PTR_ERR(node);
+	}
+	new_node = hfs_bmap_alloc(tree);
+	if (IS_ERR(new_node)) {
+		hfs_bnode_put(node);
+		return PTR_ERR(new_node);
+	}
+
+	tree->root = new_node->this;
+	if (!tree->depth) {
+		tree->leaf_head = tree->leaf_tail = new_node->this;
+		new_node->type = HFS_NODE_LEAF;
+		new_node->num_recs = 0;
+	} else {
+		new_node->type = HFS_NODE_INDEX;
+		new_node->num_recs = 1;
+	}
+	new_node->parent = 0;
+	new_node->next = 0;
+	new_node->prev = 0;
+	new_node->height = ++tree->depth;
+
+	node_desc.next = cpu_to_be32(new_node->next);
+	node_desc.prev = cpu_to_be32(new_node->prev);
+	node_desc.type = new_node->type;
+	node_desc.height = new_node->height;
+	node_desc.num_recs = cpu_to_be16(new_node->num_recs);
+	node_desc.reserved = 0;
+	hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
+
+	rec = tree->node_size - 2;
+	hfs_bnode_write_u16(new_node, rec, 14);
+
+	if (node) {
+		/* insert old root idx into new root */
+		node->parent = tree->root;
+		if (node->type == HFS_NODE_LEAF ||
+		    tree->attributes & HFS_TREE_VARIDXKEYS)
+			key_size = hfs_bnode_read_u8(node, 14) + 1;
+		else
+			key_size = tree->max_key_len + 1;
+		hfs_bnode_copy(new_node, 14, node, 14, key_size);
+
+		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+			key_size = tree->max_key_len + 1;
+			hfs_bnode_write_u8(new_node, 14, tree->max_key_len);
+		}
+		key_size = (key_size + 1) & -2;
+		cnid = cpu_to_be32(node->this);
+		hfs_bnode_write(new_node, &cnid, 14 + key_size, 4);
+
+		rec -= 2;
+		hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4);
+
+		hfs_bnode_put(node);
+	}
+	hfs_bnode_put(new_node);
+	mark_inode_dirty(tree->inode);
+
+	return 0;
+}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
new file mode 100644
index 0000000..394725e
--- /dev/null
+++ b/fs/hfs/btree.c
@@ -0,0 +1,327 @@
+/*
+ *  linux/fs/hfs/btree.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle opening/closing btree
+ */
+
+#include <linux/pagemap.h>
+
+#include "btree.h"
+
+/* Get a reference to a B*Tree and do some initial checks */
+struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
+{
+	struct hfs_btree *tree;
+	struct hfs_btree_header_rec *head;
+	struct address_space *mapping;
+	struct page *page;
+	unsigned int size;
+
+	tree = kmalloc(sizeof(*tree), GFP_KERNEL);
+	if (!tree)
+		return NULL;
+	memset(tree, 0, sizeof(*tree));
+
+	init_MUTEX(&tree->tree_lock);
+	spin_lock_init(&tree->hash_lock);
+	/* Set the correct compare function */
+	tree->sb = sb;
+	tree->cnid = id;
+	tree->keycmp = keycmp;
+
+	tree->inode = iget_locked(sb, id);
+	if (!tree->inode)
+		goto free_tree;
+	if (!(tree->inode->i_state & I_NEW))
+		BUG();
+	{
+	struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
+	HFS_I(tree->inode)->flags = 0;
+	init_MUTEX(&HFS_I(tree->inode)->extents_lock);
+	switch (id) {
+	case HFS_EXT_CNID:
+		hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
+				    mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
+		tree->inode->i_mapping->a_ops = &hfs_btree_aops;
+		break;
+	case HFS_CAT_CNID:
+		hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
+				    mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
+		tree->inode->i_mapping->a_ops = &hfs_btree_aops;
+		break;
+	default:
+		BUG();
+	}
+	}
+	unlock_new_inode(tree->inode);
+
+	mapping = tree->inode->i_mapping;
+	page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+	if (IS_ERR(page))
+		goto free_tree;
+
+	/* Load the header */
+	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	tree->root = be32_to_cpu(head->root);
+	tree->leaf_count = be32_to_cpu(head->leaf_count);
+	tree->leaf_head = be32_to_cpu(head->leaf_head);
+	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
+	tree->node_count = be32_to_cpu(head->node_count);
+	tree->free_nodes = be32_to_cpu(head->free_nodes);
+	tree->attributes = be32_to_cpu(head->attributes);
+	tree->node_size = be16_to_cpu(head->node_size);
+	tree->max_key_len = be16_to_cpu(head->max_key_len);
+	tree->depth = be16_to_cpu(head->depth);
+
+	size = tree->node_size;
+	if (!size || size & (size - 1))
+		goto fail_page;
+	if (!tree->node_count)
+		goto fail_page;
+	tree->node_size_shift = ffs(size) - 1;
+	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+	kunmap(page);
+	page_cache_release(page);
+	return tree;
+
+ fail_page:
+	tree->inode->i_mapping->a_ops = &hfs_aops;
+	page_cache_release(page);
+ free_tree:
+	iput(tree->inode);
+	kfree(tree);
+	return NULL;
+}
+
+/* Release resources used by a btree */
+void hfs_btree_close(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node;
+	int i;
+
+	if (!tree)
+		return;
+
+	for (i = 0; i < NODE_HASH_SIZE; i++) {
+		while ((node = tree->node_hash[i])) {
+			tree->node_hash[i] = node->next_hash;
+			if (atomic_read(&node->refcnt))
+				printk("HFS: node %d:%d still has %d user(s)!\n",
+					node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			hfs_bnode_free(node);
+			tree->node_hash_cnt--;
+		}
+	}
+	iput(tree->inode);
+	kfree(tree);
+}
+
+void hfs_btree_write(struct hfs_btree *tree)
+{
+	struct hfs_btree_header_rec *head;
+	struct hfs_bnode *node;
+	struct page *page;
+
+	node = hfs_bnode_find(tree, 0);
+	if (IS_ERR(node))
+		/* panic? */
+		return;
+	/* Load the header */
+	page = node->page[0];
+	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+
+	head->root = cpu_to_be32(tree->root);
+	head->leaf_count = cpu_to_be32(tree->leaf_count);
+	head->leaf_head = cpu_to_be32(tree->leaf_head);
+	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
+	head->node_count = cpu_to_be32(tree->node_count);
+	head->free_nodes = cpu_to_be32(tree->free_nodes);
+	head->attributes = cpu_to_be32(tree->attributes);
+	head->depth = cpu_to_be16(tree->depth);
+
+	kunmap(page);
+	set_page_dirty(page);
+	hfs_bnode_put(node);
+}
+
+static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
+{
+	struct hfs_btree *tree = prev->tree;
+	struct hfs_bnode *node;
+	struct hfs_bnode_desc desc;
+	__be32 cnid;
+
+	node = hfs_bnode_create(tree, idx);
+	if (IS_ERR(node))
+		return node;
+
+	if (!tree->free_nodes)
+		panic("FIXME!!!");
+	tree->free_nodes--;
+	prev->next = idx;
+	cnid = cpu_to_be32(idx);
+	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+
+	node->type = HFS_NODE_MAP;
+	node->num_recs = 1;
+	hfs_bnode_clear(node, 0, tree->node_size);
+	desc.next = 0;
+	desc.prev = 0;
+	desc.type = HFS_NODE_MAP;
+	desc.height = 0;
+	desc.num_recs = cpu_to_be16(1);
+	desc.reserved = 0;
+	hfs_bnode_write(node, &desc, 0, sizeof(desc));
+	hfs_bnode_write_u16(node, 14, 0x8000);
+	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
+	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
+
+	return node;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node, *next_node;
+	struct page **pagep;
+	u32 nidx, idx;
+	u16 off, len;
+	u8 *data, byte, m;
+	int i;
+
+	while (!tree->free_nodes) {
+		struct inode *inode = tree->inode;
+		u32 count;
+		int res;
+
+		res = hfs_extend_file(inode);
+		if (res)
+			return ERR_PTR(res);
+		HFS_I(inode)->phys_size = inode->i_size =
+				(loff_t)HFS_I(inode)->alloc_blocks *
+				HFS_SB(tree->sb)->alloc_blksz;
+		HFS_I(inode)->fs_blocks = inode->i_size >>
+					  tree->sb->s_blocksize_bits;
+		inode_set_bytes(inode, inode->i_size);
+		count = inode->i_size >> tree->node_size_shift;
+		tree->free_nodes = count - tree->node_count;
+		tree->node_count = count;
+	}
+
+	nidx = 0;
+	node = hfs_bnode_find(tree, nidx);
+	if (IS_ERR(node))
+		return node;
+	len = hfs_brec_lenoff(node, 2, &off);
+
+	off += node->page_offset;
+	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	data = kmap(*pagep);
+	off &= ~PAGE_CACHE_MASK;
+	idx = 0;
+
+	for (;;) {
+		while (len) {
+			byte = data[off];
+			if (byte != 0xff) {
+				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
+					if (!(byte & m)) {
+						idx += i;
+						data[off] |= m;
+						set_page_dirty(*pagep);
+						kunmap(*pagep);
+						tree->free_nodes--;
+						mark_inode_dirty(tree->inode);
+						hfs_bnode_put(node);
+						return hfs_bnode_create(tree, idx);
+					}
+				}
+			}
+			if (++off >= PAGE_CACHE_SIZE) {
+				kunmap(*pagep);
+				data = kmap(*++pagep);
+				off = 0;
+			}
+			idx += 8;
+			len--;
+		}
+		kunmap(*pagep);
+		nidx = node->next;
+		if (!nidx) {
+			printk("create new bmap node...\n");
+			next_node = hfs_bmap_new_bmap(node, idx);
+		} else
+			next_node = hfs_bnode_find(tree, nidx);
+		hfs_bnode_put(node);
+		if (IS_ERR(next_node))
+			return next_node;
+		node = next_node;
+
+		len = hfs_brec_lenoff(node, 0, &off);
+		off += node->page_offset;
+		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+		data = kmap(*pagep);
+		off &= ~PAGE_CACHE_MASK;
+	}
+}
+
+void hfs_bmap_free(struct hfs_bnode *node)
+{
+	struct hfs_btree *tree;
+	struct page *page;
+	u16 off, len;
+	u32 nidx;
+	u8 *data, byte, m;
+
+	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
+	tree = node->tree;
+	nidx = node->this;
+	node = hfs_bnode_find(tree, 0);
+	if (IS_ERR(node))
+		return;
+	len = hfs_brec_lenoff(node, 2, &off);
+	while (nidx >= len * 8) {
+		u32 i;
+
+		nidx -= len * 8;
+		i = node->next;
+		hfs_bnode_put(node);
+		if (!i) {
+			/* panic */;
+			printk("HFS: unable to free bnode %u. bmap not found!\n", node->this);
+			return;
+		}
+		node = hfs_bnode_find(tree, i);
+		if (IS_ERR(node))
+			return;
+		if (node->type != HFS_NODE_MAP) {
+			/* panic */;
+			printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type);
+			hfs_bnode_put(node);
+			return;
+		}
+		len = hfs_brec_lenoff(node, 0, &off);
+	}
+	off += node->page_offset + nidx / 8;
+	page = node->page[off >> PAGE_CACHE_SHIFT];
+	data = kmap(page);
+	off &= ~PAGE_CACHE_MASK;
+	m = 1 << (~nidx & 7);
+	byte = data[off];
+	if (!(byte & m)) {
+		printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type);
+		kunmap(page);
+		hfs_bnode_put(node);
+		return;
+	}
+	data[off] = byte & ~m;
+	set_page_dirty(page);
+	kunmap(page);
+	hfs_bnode_put(node);
+	tree->free_nodes++;
+	mark_inode_dirty(tree->inode);
+}
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
new file mode 100644
index 0000000..cc51905
--- /dev/null
+++ b/fs/hfs/btree.h
@@ -0,0 +1,168 @@
+/*
+ *  linux/fs/hfs/btree.h
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ */
+
+#include "hfs_fs.h"
+
+typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
+
+#define NODE_HASH_SIZE  256
+
+/* A HFS BTree held in memory */
+struct hfs_btree {
+	struct super_block *sb;
+	struct inode *inode;
+	btree_keycmp keycmp;
+
+	u32 cnid;
+	u32 root;
+	u32 leaf_count;
+	u32 leaf_head;
+	u32 leaf_tail;
+	u32 node_count;
+	u32 free_nodes;
+	u32 attributes;
+
+	unsigned int node_size;
+	unsigned int node_size_shift;
+	unsigned int max_key_len;
+	unsigned int depth;
+
+	//unsigned int map1_size, map_size;
+	struct semaphore tree_lock;
+
+	unsigned int pages_per_bnode;
+	spinlock_t hash_lock;
+	struct hfs_bnode *node_hash[NODE_HASH_SIZE];
+	int node_hash_cnt;
+};
+
+/* A HFS BTree node in memory */
+struct hfs_bnode {
+	struct hfs_btree *tree;
+
+	u32 prev;
+	u32 this;
+	u32 next;
+	u32 parent;
+
+	u16 num_recs;
+	u8 type;
+	u8 height;
+
+	struct hfs_bnode *next_hash;
+	unsigned long flags;
+	wait_queue_head_t lock_wq;
+	atomic_t refcnt;
+	unsigned int page_offset;
+	struct page *page[0];
+};
+
+#define HFS_BNODE_ERROR		0
+#define HFS_BNODE_NEW		1
+#define HFS_BNODE_DELETED	2
+
+struct hfs_find_data {
+	btree_key *key;
+	btree_key *search_key;
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	int record;
+	int keyoffset, keylength;
+	int entryoffset, entrylength;
+};
+
+
+/* btree.c */
+extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
+extern void hfs_btree_close(struct hfs_btree *);
+extern void hfs_btree_write(struct hfs_btree *);
+extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
+extern void hfs_bmap_free(struct hfs_bnode *node);
+
+/* bnode.c */
+extern void hfs_bnode_read(struct hfs_bnode *, void *, int, int);
+extern u16 hfs_bnode_read_u16(struct hfs_bnode *, int);
+extern u8 hfs_bnode_read_u8(struct hfs_bnode *, int);
+extern void hfs_bnode_read_key(struct hfs_bnode *, void *, int);
+extern void hfs_bnode_write(struct hfs_bnode *, void *, int, int);
+extern void hfs_bnode_write_u16(struct hfs_bnode *, int, u16);
+extern void hfs_bnode_write_u8(struct hfs_bnode *, int, u8);
+extern void hfs_bnode_clear(struct hfs_bnode *, int, int);
+extern void hfs_bnode_copy(struct hfs_bnode *, int,
+			   struct hfs_bnode *, int, int);
+extern void hfs_bnode_move(struct hfs_bnode *, int, int, int);
+extern void hfs_bnode_dump(struct hfs_bnode *);
+extern void hfs_bnode_unlink(struct hfs_bnode *);
+extern struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *, u32);
+extern struct hfs_bnode *hfs_bnode_find(struct hfs_btree *, u32);
+extern void hfs_bnode_unhash(struct hfs_bnode *);
+extern void hfs_bnode_free(struct hfs_bnode *);
+extern struct hfs_bnode *hfs_bnode_create(struct hfs_btree *, u32);
+extern void hfs_bnode_get(struct hfs_bnode *);
+extern void hfs_bnode_put(struct hfs_bnode *);
+
+/* brec.c */
+extern u16 hfs_brec_lenoff(struct hfs_bnode *, u16, u16 *);
+extern u16 hfs_brec_keylen(struct hfs_bnode *, u16);
+extern int hfs_brec_insert(struct hfs_find_data *, void *, int);
+extern int hfs_brec_remove(struct hfs_find_data *);
+
+/* bfind.c */
+extern int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
+extern void hfs_find_exit(struct hfs_find_data *);
+extern int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
+extern int hfs_brec_find(struct hfs_find_data *);
+extern int hfs_brec_read(struct hfs_find_data *, void *, int);
+extern int hfs_brec_goto(struct hfs_find_data *, int);
+
+
+struct hfs_bnode_desc {
+	__be32 next;		/* (V) Number of the next node at this level */
+	__be32 prev;		/* (V) Number of the prev node at this level */
+	u8 type;		/* (F) The type of node */
+	u8 height;		/* (F) The level of this node (leaves=1) */
+	__be16 num_recs;	/* (V) The number of records in this node */
+	u16 reserved;
+} __packed;
+
+#define HFS_NODE_INDEX	0x00	/* An internal (index) node */
+#define HFS_NODE_HEADER	0x01	/* The tree header node (node 0) */
+#define HFS_NODE_MAP	0x02	/* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF	0xFF	/* A leaf (ndNHeight==1) node */
+
+struct hfs_btree_header_rec {
+	__be16 depth;		/* (V) The number of levels in this B-tree */
+	__be32 root;		/* (V) The node number of the root node */
+	__be32 leaf_count;	/* (V) The number of leaf records */
+	__be32 leaf_head;	/* (V) The number of the first leaf node */
+	__be32 leaf_tail;	/* (V) The number of the last leaf node */
+	__be16 node_size;	/* (F) The number of bytes in a node (=512) */
+	__be16 max_key_len;	/* (F) The length of a key in an index node */
+	__be32 node_count;	/* (V) The total number of nodes */
+	__be32 free_nodes;	/* (V) The number of unused nodes */
+	u16 reserved1;
+	__be32 clump_size;	/* (F) clump size. not usually used. */
+	u8 btree_type;		/* (F) BTree type */
+	u8 reserved2;
+	__be32 attributes;	/* (F) attributes */
+	u32 reserved3[16];
+} __packed;
+
+#define HFS_NODE_INDEX	0x00	/* An internal (index) node */
+#define HFS_NODE_HEADER	0x01	/* The tree header node (node 0) */
+#define HFS_NODE_MAP		0x02	/* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF		0xFF	/* A leaf (ndNHeight==1) node */
+
+#define BTREE_ATTR_BADCLOSE	0x00000001	/* b-tree not closed properly. not
+						   used by hfsplus. */
+#define HFS_TREE_BIGKEYS	0x00000002	/* key length is u16 instead of u8.
+						   used by hfsplus. */
+#define HFS_TREE_VARIDXKEYS	0x00000004	/* variable key length instead of
+						   max key length. use din catalog
+						   b-tree but not in extents
+						   b-tree (hfsplus). */
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
new file mode 100644
index 0000000..65dedef
--- /dev/null
+++ b/fs/hfs/catalog.c
@@ -0,0 +1,347 @@
+/*
+ *  linux/fs/hfs/catalog.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains the functions related to the catalog B-tree.
+ *
+ * Cache code shamelessly stolen from
+ *     linux/fs/inode.c Copyright (C) 1991, 1992  Linus Torvalds
+ *     re-shamelessly stolen Copyright (C) 1997 Linus Torvalds
+ */
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+/*
+ * hfs_cat_build_key()
+ *
+ * Given the ID of the parent and the name build a search key.
+ */
+void hfs_cat_build_key(btree_key *key, u32 parent, struct qstr *name)
+{
+	key->cat.reserved = 0;
+	key->cat.ParID = cpu_to_be32(parent);
+	if (name) {
+		hfs_triv2mac(&key->cat.CName, name);
+		key->key_len = 6 + key->cat.CName.len;
+	} else {
+		memset(&key->cat.CName, 0, sizeof(struct hfs_name));
+		key->key_len = 6;
+	}
+}
+
+static int hfs_cat_build_record(hfs_cat_rec *rec, u32 cnid, struct inode *inode)
+{
+	__be32 mtime = hfs_mtime();
+
+	memset(rec, 0, sizeof(*rec));
+	if (S_ISDIR(inode->i_mode)) {
+		rec->type = HFS_CDR_DIR;
+		rec->dir.DirID = cpu_to_be32(cnid);
+		rec->dir.CrDat = mtime;
+		rec->dir.MdDat = mtime;
+		rec->dir.BkDat = 0;
+		rec->dir.UsrInfo.frView = cpu_to_be16(0xff);
+		return sizeof(struct hfs_cat_dir);
+	} else {
+		/* init some fields for the file record */
+		rec->type = HFS_CDR_FIL;
+		rec->file.Flags = HFS_FIL_USED | HFS_FIL_THD;
+		if (!(inode->i_mode & S_IWUSR))
+			rec->file.Flags |= HFS_FIL_LOCK;
+		rec->file.FlNum = cpu_to_be32(cnid);
+		rec->file.CrDat = mtime;
+		rec->file.MdDat = mtime;
+		rec->file.BkDat = 0;
+		rec->file.UsrWds.fdType = HFS_SB(inode->i_sb)->s_type;
+		rec->file.UsrWds.fdCreator = HFS_SB(inode->i_sb)->s_creator;
+		return sizeof(struct hfs_cat_file);
+	}
+}
+
+static int hfs_cat_build_thread(hfs_cat_rec *rec, int type,
+				u32 parentid, struct qstr *name)
+{
+	rec->type = type;
+	memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved));
+	rec->thread.ParID = cpu_to_be32(parentid);
+	hfs_triv2mac(&rec->thread.CName, name);
+	return sizeof(struct hfs_cat_thread);
+}
+
+/*
+ * create_entry()
+ *
+ * Add a new file or directory to the catalog B-tree and
+ * return a (struct hfs_cat_entry) for it in '*result'.
+ */
+int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
+{
+	struct hfs_find_data fd;
+	struct super_block *sb;
+	union hfs_cat_rec entry;
+	int entry_size;
+	int err;
+
+	dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
+	if (dir->i_size >= HFS_MAX_VALENCE)
+		return -ENOSPC;
+
+	sb = dir->i_sb;
+	hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
+
+	hfs_cat_build_key(fd.search_key, cnid, NULL);
+	entry_size = hfs_cat_build_thread(&entry, S_ISDIR(inode->i_mode) ?
+			HFS_CDR_THD : HFS_CDR_FTH,
+			dir->i_ino, str);
+	err = hfs_brec_find(&fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto err2;
+	}
+	err = hfs_brec_insert(&fd, &entry, entry_size);
+	if (err)
+		goto err2;
+
+	hfs_cat_build_key(fd.search_key, dir->i_ino, str);
+	entry_size = hfs_cat_build_record(&entry, cnid, inode);
+	err = hfs_brec_find(&fd);
+	if (err != -ENOENT) {
+		/* panic? */
+		if (!err)
+			err = -EEXIST;
+		goto err1;
+	}
+	err = hfs_brec_insert(&fd, &entry, entry_size);
+	if (err)
+		goto err1;
+
+	dir->i_size++;
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	hfs_find_exit(&fd);
+	return 0;
+
+err1:
+	hfs_cat_build_key(fd.search_key, cnid, NULL);
+	if (!hfs_brec_find(&fd))
+		hfs_brec_remove(&fd);
+err2:
+	hfs_find_exit(&fd);
+	return err;
+}
+
+/*
+ * hfs_cat_compare()
+ *
+ * Description:
+ *   This is the comparison function used for the catalog B-tree.  In
+ *   comparing catalog B-tree entries, the parent id is the most
+ *   significant field (compared as unsigned ints).  The name field is
+ *   the least significant (compared in "Macintosh lexical order",
+ *   see hfs_strcmp() in string.c)
+ * Input Variable(s):
+ *   struct hfs_cat_key *key1: pointer to the first key to compare
+ *   struct hfs_cat_key *key2: pointer to the second key to compare
+ * Output Variable(s):
+ *   NONE
+ * Returns:
+ *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
+ * Preconditions:
+ *   key1 and key2 point to "valid" (struct hfs_cat_key)s.
+ * Postconditions:
+ *   This function has no side-effects
+ */
+int hfs_cat_keycmp(const btree_key *key1, const btree_key *key2)
+{
+	int retval;
+
+	retval = be32_to_cpu(key1->cat.ParID) - be32_to_cpu(key2->cat.ParID);
+	if (!retval)
+		retval = hfs_strcmp(key1->cat.CName.name, key1->cat.CName.len,
+				    key2->cat.CName.name, key2->cat.CName.len);
+
+	return retval;
+}
+
+/* Try to get a catalog entry for given catalog id */
+// move to read_super???
+int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
+		      struct hfs_find_data *fd)
+{
+	hfs_cat_rec rec;
+	int res, len, type;
+
+	hfs_cat_build_key(fd->search_key, cnid, NULL);
+	res = hfs_brec_read(fd, &rec, sizeof(rec));
+	if (res)
+		return res;
+
+	type = rec.type;
+	if (type != HFS_CDR_THD && type != HFS_CDR_FTH) {
+		printk("HFS-fs: Found bad thread record in catalog\n");
+		return -EIO;
+	}
+
+	fd->search_key->cat.ParID = rec.thread.ParID;
+	len = fd->search_key->cat.CName.len = rec.thread.CName.len;
+	memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
+	return hfs_brec_find(fd);
+}
+
+
+/*
+ * hfs_cat_delete()
+ *
+ * Delete the indicated file or directory.
+ * The associated thread is also removed unless ('with_thread'==0).
+ */
+int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
+{
+	struct super_block *sb;
+	struct hfs_find_data fd;
+	struct list_head *pos;
+	int res, type;
+
+	dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+	sb = dir->i_sb;
+	hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
+
+	hfs_cat_build_key(fd.search_key, dir->i_ino, str);
+	res = hfs_brec_find(&fd);
+	if (res)
+		goto out;
+
+	type = hfs_bnode_read_u8(fd.bnode, fd.entryoffset);
+	if (type == HFS_CDR_FIL) {
+		struct hfs_cat_file file;
+		hfs_bnode_read(fd.bnode, &file, fd.entryoffset, sizeof(file));
+		if (be32_to_cpu(file.FlNum) == cnid) {
+#if 0
+			hfs_free_fork(sb, &file, HFS_FK_DATA);
+#endif
+			hfs_free_fork(sb, &file, HFS_FK_RSRC);
+		}
+	}
+
+	list_for_each(pos, &HFS_I(dir)->open_dir_list) {
+		struct hfs_readdir_data *rd =
+			list_entry(pos, struct hfs_readdir_data, list);
+		if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
+			rd->file->f_pos--;
+	}
+
+	res = hfs_brec_remove(&fd);
+	if (res)
+		goto out;
+
+	hfs_cat_build_key(fd.search_key, cnid, NULL);
+	res = hfs_brec_find(&fd);
+	if (!res) {
+		res = hfs_brec_remove(&fd);
+		if (res)
+			goto out;
+	}
+
+	dir->i_size--;
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	res = 0;
+out:
+	hfs_find_exit(&fd);
+
+	return res;
+}
+
+/*
+ * hfs_cat_move()
+ *
+ * Rename a file or directory, possibly to a new directory.
+ * If the destination exists it is removed and a
+ * (struct hfs_cat_entry) for it is returned in '*result'.
+ */
+int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
+		 struct inode *dst_dir, struct qstr *dst_name)
+{
+	struct super_block *sb;
+	struct hfs_find_data src_fd, dst_fd;
+	union hfs_cat_rec entry;
+	int entry_size, type;
+	int err;
+
+	dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
+		dst_dir->i_ino, dst_name->name);
+	sb = src_dir->i_sb;
+	hfs_find_init(HFS_SB(sb)->cat_tree, &src_fd);
+	dst_fd = src_fd;
+
+	/* find the old dir entry and read the data */
+	hfs_cat_build_key(src_fd.search_key, src_dir->i_ino, src_name);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+
+	hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
+			    src_fd.entrylength);
+
+	/* create new dir entry with the data from the old entry */
+	hfs_cat_build_key(dst_fd.search_key, dst_dir->i_ino, dst_name);
+	err = hfs_brec_find(&dst_fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto out;
+	}
+
+	err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength);
+	if (err)
+		goto out;
+	dst_dir->i_size++;
+	dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dst_dir);
+
+	/* finally remove the old entry */
+	hfs_cat_build_key(src_fd.search_key, src_dir->i_ino, src_name);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+	err = hfs_brec_remove(&src_fd);
+	if (err)
+		goto out;
+	src_dir->i_size--;
+	src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(src_dir);
+
+	type = entry.type;
+	if (type == HFS_CDR_FIL && !(entry.file.Flags & HFS_FIL_THD))
+		goto out;
+
+	/* remove old thread entry */
+	hfs_cat_build_key(src_fd.search_key, cnid, NULL);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+	err = hfs_brec_remove(&src_fd);
+	if (err)
+		goto out;
+
+	/* create new thread entry */
+	hfs_cat_build_key(dst_fd.search_key, cnid, NULL);
+	entry_size = hfs_cat_build_thread(&entry, type == HFS_CDR_FIL ? HFS_CDR_FTH : HFS_CDR_THD,
+					dst_dir->i_ino, dst_name);
+	err = hfs_brec_find(&dst_fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto out;
+	}
+	err = hfs_brec_insert(&dst_fd, &entry, entry_size);
+out:
+	hfs_bnode_put(dst_fd.bnode);
+	hfs_find_exit(&src_fd);
+	return err;
+}
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
new file mode 100644
index 0000000..c559982
--- /dev/null
+++ b/fs/hfs/dir.c
@@ -0,0 +1,330 @@
+/*
+ *  linux/fs/hfs/dir.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains directory-related functions independent of which
+ * scheme is being used to represent forks.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ */
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+/*
+ * hfs_lookup()
+ */
+static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
+				 struct nameidata *nd)
+{
+	hfs_cat_rec rec;
+	struct hfs_find_data fd;
+	struct inode *inode = NULL;
+	int res;
+
+	dentry->d_op = &hfs_dentry_operations;
+
+	hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
+	hfs_cat_build_key(fd.search_key, dir->i_ino, &dentry->d_name);
+	res = hfs_brec_read(&fd, &rec, sizeof(rec));
+	if (res) {
+		hfs_find_exit(&fd);
+		if (res == -ENOENT) {
+			/* No such entry */
+			inode = NULL;
+			goto done;
+		}
+		return ERR_PTR(res);
+	}
+	inode = hfs_iget(dir->i_sb, &fd.search_key->cat, &rec);
+	hfs_find_exit(&fd);
+	if (!inode)
+		return ERR_PTR(-EACCES);
+done:
+	d_add(dentry, inode);
+	return NULL;
+}
+
+/*
+ * hfs_readdir
+ */
+static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	int len, err;
+	char strbuf[HFS_NAMELEN + 1];
+	union hfs_cat_rec entry;
+	struct hfs_find_data fd;
+	struct hfs_readdir_data *rd;
+	u16 type;
+
+	if (filp->f_pos >= inode->i_size)
+		return 0;
+
+	hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
+	hfs_cat_build_key(fd.search_key, inode->i_ino, NULL);
+	err = hfs_brec_find(&fd);
+	if (err)
+		goto out;
+
+	switch ((u32)filp->f_pos) {
+	case 0:
+		/* This is completely artificial... */
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+			goto out;
+		filp->f_pos++;
+		/* fall through */
+	case 1:
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		if (entry.type != HFS_CDR_THD) {
+			printk("HFS: bad catalog folder thread\n");
+			err = -EIO;
+			goto out;
+		}
+		//if (fd.entrylength < HFS_MIN_THREAD_SZ) {
+		//	printk("HFS: truncated catalog thread\n");
+		//	err = -EIO;
+		//	goto out;
+		//}
+		if (filldir(dirent, "..", 2, 1,
+			    be32_to_cpu(entry.thread.ParID), DT_DIR))
+			goto out;
+		filp->f_pos++;
+		/* fall through */
+	default:
+		if (filp->f_pos >= inode->i_size)
+			goto out;
+		err = hfs_brec_goto(&fd, filp->f_pos - 1);
+		if (err)
+			goto out;
+	}
+
+	for (;;) {
+		if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
+			printk("HFS: walked past end of dir\n");
+			err = -EIO;
+			goto out;
+		}
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		type = entry.type;
+		len = hfs_mac2triv(strbuf, &fd.key->cat.CName);
+		if (type == HFS_CDR_DIR) {
+			if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
+				printk("HFS: small dir entry\n");
+				err = -EIO;
+				goto out;
+			}
+			if (filldir(dirent, strbuf, len, filp->f_pos,
+				    be32_to_cpu(entry.dir.DirID), DT_DIR))
+				break;
+		} else if (type == HFS_CDR_FIL) {
+			if (fd.entrylength < sizeof(struct hfs_cat_file)) {
+				printk("HFS: small file entry\n");
+				err = -EIO;
+				goto out;
+			}
+			if (filldir(dirent, strbuf, len, filp->f_pos,
+				    be32_to_cpu(entry.file.FlNum), DT_REG))
+				break;
+		} else {
+			printk("HFS: bad catalog entry type %d\n", type);
+			err = -EIO;
+			goto out;
+		}
+		filp->f_pos++;
+		if (filp->f_pos >= inode->i_size)
+			goto out;
+		err = hfs_brec_goto(&fd, 1);
+		if (err)
+			goto out;
+	}
+	rd = filp->private_data;
+	if (!rd) {
+		rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL);
+		if (!rd) {
+			err = -ENOMEM;
+			goto out;
+		}
+		filp->private_data = rd;
+		rd->file = filp;
+		list_add(&rd->list, &HFS_I(inode)->open_dir_list);
+	}
+	memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
+out:
+	hfs_find_exit(&fd);
+	return err;
+}
+
+static int hfs_dir_release(struct inode *inode, struct file *file)
+{
+	struct hfs_readdir_data *rd = file->private_data;
+	if (rd) {
+		list_del(&rd->list);
+		kfree(rd);
+	}
+	return 0;
+}
+
+/*
+ * hfs_create()
+ *
+ * This is the create() entry in the inode_operations structure for
+ * regular HFS directories.  The purpose is to create a new file in
+ * a directory and return a corresponding inode, given the inode for
+ * the directory and the name (and its length) of the new file.
+ */
+static int hfs_create(struct inode *dir, struct dentry *dentry, int mode,
+		      struct nameidata *nd)
+{
+	struct inode *inode;
+	int res;
+
+	inode = hfs_new_inode(dir, &dentry->d_name, mode);
+	if (!inode)
+		return -ENOSPC;
+
+	res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
+	if (res) {
+		inode->i_nlink = 0;
+		hfs_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+	d_instantiate(dentry, inode);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * hfs_mkdir()
+ *
+ * This is the mkdir() entry in the inode_operations structure for
+ * regular HFS directories.  The purpose is to create a new directory
+ * in a directory, given the inode for the parent directory and the
+ * name (and its length) of the new directory.
+ */
+static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct inode *inode;
+	int res;
+
+	inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
+	if (!inode)
+		return -ENOSPC;
+
+	res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
+	if (res) {
+		inode->i_nlink = 0;
+		hfs_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+	d_instantiate(dentry, inode);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * hfs_unlink()
+ *
+ * This is the unlink() entry in the inode_operations structure for
+ * regular HFS directories.  The purpose is to delete an existing
+ * file, given the inode for the parent directory and the name
+ * (and its length) of the existing file.
+ */
+static int hfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode;
+	int res;
+
+	inode = dentry->d_inode;
+	res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
+	if (res)
+		return res;
+
+	inode->i_nlink--;
+	hfs_delete_inode(inode);
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+
+	return res;
+}
+
+/*
+ * hfs_rmdir()
+ *
+ * This is the rmdir() entry in the inode_operations structure for
+ * regular HFS directories.  The purpose is to delete an existing
+ * directory, given the inode for the parent directory and the name
+ * (and its length) of the existing directory.
+ */
+static int hfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode;
+	int res;
+
+	inode = dentry->d_inode;
+	if (inode->i_size != 2)
+		return -ENOTEMPTY;
+	res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
+	if (res)
+		return res;
+	inode->i_nlink = 0;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	hfs_delete_inode(inode);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * hfs_rename()
+ *
+ * This is the rename() entry in the inode_operations structure for
+ * regular HFS directories.  The purpose is to rename an existing
+ * file or directory, given the inode for the current directory and
+ * the name (and its length) of the existing file/directory and the
+ * inode for the new directory and the name (and its length) of the
+ * new file/directory.
+ * XXX: how do you handle must_be dir?
+ */
+static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+		      struct inode *new_dir, struct dentry *new_dentry)
+{
+	int res;
+
+	/* Unlink destination if it already exists */
+	if (new_dentry->d_inode) {
+		res = hfs_unlink(new_dir, new_dentry);
+		if (res)
+			return res;
+	}
+
+	res = hfs_cat_move(old_dentry->d_inode->i_ino,
+			   old_dir, &old_dentry->d_name,
+			   new_dir, &new_dentry->d_name);
+	if (!res)
+		hfs_cat_build_key((btree_key *)&HFS_I(old_dentry->d_inode)->cat_key,
+				  new_dir->i_ino, &new_dentry->d_name);
+	return res;
+}
+
+struct file_operations hfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= hfs_readdir,
+	.llseek		= generic_file_llseek,
+	.release	= hfs_dir_release,
+};
+
+struct inode_operations hfs_dir_inode_operations = {
+	.create		= hfs_create,
+	.lookup		= hfs_lookup,
+	.unlink		= hfs_unlink,
+	.mkdir		= hfs_mkdir,
+	.rmdir		= hfs_rmdir,
+	.rename		= hfs_rename,
+	.setattr	= hfs_inode_setattr,
+};
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
new file mode 100644
index 0000000..cbc8510
--- /dev/null
+++ b/fs/hfs/extent.c
@@ -0,0 +1,527 @@
+/*
+ *  linux/fs/hfs/extent.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains the functions related to the extents B-tree.
+ */
+
+#include <linux/pagemap.h>
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * build_key
+ */
+static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
+{
+	key->key_len = 7;
+	key->ext.FkType = type;
+	key->ext.FNum = cpu_to_be32(cnid);
+	key->ext.FABN = cpu_to_be16(block);
+}
+
+/*
+ * hfs_ext_compare()
+ *
+ * Description:
+ *   This is the comparison function used for the extents B-tree.  In
+ *   comparing extent B-tree entries, the file id is the most
+ *   significant field (compared as unsigned ints); the fork type is
+ *   the second most significant field (compared as unsigned chars);
+ *   and the allocation block number field is the least significant
+ *   (compared as unsigned ints).
+ * Input Variable(s):
+ *   struct hfs_ext_key *key1: pointer to the first key to compare
+ *   struct hfs_ext_key *key2: pointer to the second key to compare
+ * Output Variable(s):
+ *   NONE
+ * Returns:
+ *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
+ * Preconditions:
+ *   key1 and key2 point to "valid" (struct hfs_ext_key)s.
+ * Postconditions:
+ *   This function has no side-effects */
+int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
+{
+	__be32 fnum1, fnum2;
+	__be16 block1, block2;
+
+	fnum1 = key1->ext.FNum;
+	fnum2 = key2->ext.FNum;
+	if (fnum1 != fnum2)
+		return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
+	if (key1->ext.FkType != key2->ext.FkType)
+		return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
+
+	block1 = key1->ext.FABN;
+	block2 = key2->ext.FABN;
+	if (block1 == block2)
+		return 0;
+	return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
+}
+
+/*
+ * hfs_ext_find_block
+ *
+ * Find a block within an extent record
+ */
+static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
+{
+	int i;
+	u16 count;
+
+	for (i = 0; i < 3; ext++, i++) {
+		count = be16_to_cpu(ext->count);
+		if (off < count)
+			return be16_to_cpu(ext->block) + off;
+		off -= count;
+	}
+	/* panic? */
+	return 0;
+}
+
+static int hfs_ext_block_count(struct hfs_extent *ext)
+{
+	int i;
+	u16 count = 0;
+
+	for (i = 0; i < 3; ext++, i++)
+		count += be16_to_cpu(ext->count);
+	return count;
+}
+
+static u16 hfs_ext_lastblock(struct hfs_extent *ext)
+{
+	int i;
+
+	ext += 2;
+	for (i = 0; i < 2; ext--, i++)
+		if (ext->count)
+			break;
+	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
+}
+
+static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
+{
+	int res;
+
+	hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
+			  HFS_IS_RSRC(inode) ?  HFS_FK_RSRC : HFS_FK_DATA);
+	res = hfs_brec_find(fd);
+	if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
+		if (res != -ENOENT)
+			return;
+		hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
+		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
+	} else {
+		if (res)
+			return;
+		hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
+		HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
+	}
+}
+
+void hfs_ext_write_extent(struct inode *inode)
+{
+	struct hfs_find_data fd;
+
+	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
+		hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
+		__hfs_ext_write_extent(inode, &fd);
+		hfs_find_exit(&fd);
+	}
+}
+
+static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
+					u32 cnid, u32 block, u8 type)
+{
+	int res;
+
+	hfs_ext_build_key(fd->search_key, cnid, block, type);
+	fd->key->ext.FNum = 0;
+	res = hfs_brec_find(fd);
+	if (res && res != -ENOENT)
+		return res;
+	if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
+	    fd->key->ext.FkType != fd->search_key->ext.FkType)
+		return -ENOENT;
+	if (fd->entrylength != sizeof(hfs_extent_rec))
+		return -EIO;
+	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
+	return 0;
+}
+
+static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
+{
+	int res;
+
+	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY)
+		__hfs_ext_write_extent(inode, fd);
+
+	res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
+				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
+	if (!res) {
+		HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
+		HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
+	} else {
+		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
+		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
+	}
+	return res;
+}
+
+static int hfs_ext_read_extent(struct inode *inode, u16 block)
+{
+	struct hfs_find_data fd;
+	int res;
+
+	if (block >= HFS_I(inode)->cached_start &&
+	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
+		return 0;
+
+	hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
+	res = __hfs_ext_cache_extent(&fd, inode, block);
+	hfs_find_exit(&fd);
+	return res;
+}
+
+static void hfs_dump_extent(struct hfs_extent *extent)
+{
+	int i;
+
+	dprint(DBG_EXTENT, "   ");
+	for (i = 0; i < 3; i++)
+		dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block),
+				 be16_to_cpu(extent[i].count));
+	dprint(DBG_EXTENT, "\n");
+}
+
+static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
+			  u16 alloc_block, u16 block_count)
+{
+	u16 count, start;
+	int i;
+
+	hfs_dump_extent(extent);
+	for (i = 0; i < 3; extent++, i++) {
+		count = be16_to_cpu(extent->count);
+		if (offset == count) {
+			start = be16_to_cpu(extent->block);
+			if (alloc_block != start + count) {
+				if (++i >= 3)
+					return -ENOSPC;
+				extent++;
+				extent->block = cpu_to_be16(alloc_block);
+			} else
+				block_count += count;
+			extent->count = cpu_to_be16(block_count);
+			return 0;
+		} else if (offset < count)
+			break;
+		offset -= count;
+	}
+	/* panic? */
+	return -EIO;
+}
+
+static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
+			    u16 offset, u16 block_nr)
+{
+	u16 count, start;
+	int i;
+
+	hfs_dump_extent(extent);
+	for (i = 0; i < 3; extent++, i++) {
+		count = be16_to_cpu(extent->count);
+		if (offset == count)
+			goto found;
+		else if (offset < count)
+			break;
+		offset -= count;
+	}
+	/* panic? */
+	return -EIO;
+found:
+	for (;;) {
+		start = be16_to_cpu(extent->block);
+		if (count <= block_nr) {
+			hfs_clear_vbm_bits(sb, start, count);
+			extent->block = 0;
+			extent->count = 0;
+			block_nr -= count;
+		} else {
+			count -= block_nr;
+			hfs_clear_vbm_bits(sb, start + count, block_nr);
+			extent->count = cpu_to_be16(count);
+			block_nr = 0;
+		}
+		if (!block_nr || !i)
+			return 0;
+		i--;
+		extent--;
+		count = be16_to_cpu(extent->count);
+	}
+}
+
+int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
+{
+	struct hfs_find_data fd;
+	u32 total_blocks, blocks, start;
+	u32 cnid = be32_to_cpu(file->FlNum);
+	struct hfs_extent *extent;
+	int res, i;
+
+	if (type == HFS_FK_DATA) {
+		total_blocks = be32_to_cpu(file->PyLen);
+		extent = file->ExtRec;
+	} else {
+		total_blocks = be32_to_cpu(file->RPyLen);
+		extent = file->RExtRec;
+	}
+	total_blocks /= HFS_SB(sb)->alloc_blksz;
+	if (!total_blocks)
+		return 0;
+
+	blocks = 0;
+	for (i = 0; i < 3; extent++, i++)
+		blocks += be16_to_cpu(extent[i].count);
+
+	res = hfs_free_extents(sb, extent, blocks, blocks);
+	if (res)
+		return res;
+	if (total_blocks == blocks)
+		return 0;
+
+	hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
+	do {
+		res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
+		if (res)
+			break;
+		start = be16_to_cpu(fd.key->ext.FABN);
+		hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
+		hfs_brec_remove(&fd);
+		total_blocks = start;
+	} while (total_blocks > blocks);
+	hfs_find_exit(&fd);
+
+	return res;
+}
+
+/*
+ * hfs_get_block
+ */
+int hfs_get_block(struct inode *inode, sector_t block,
+		  struct buffer_head *bh_result, int create)
+{
+	struct super_block *sb;
+	u16 dblock, ablock;
+	int res;
+
+	sb = inode->i_sb;
+	/* Convert inode block to disk allocation block */
+	ablock = (u32)block / HFS_SB(sb)->fs_div;
+
+	if (block >= HFS_I(inode)->fs_blocks) {
+		if (block > HFS_I(inode)->fs_blocks || !create)
+			return -EIO;
+		if (ablock >= HFS_I(inode)->alloc_blocks) {
+			res = hfs_extend_file(inode);
+			if (res)
+				return res;
+		}
+	} else
+		create = 0;
+
+	if (ablock < HFS_I(inode)->first_blocks) {
+		dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
+		goto done;
+	}
+
+	down(&HFS_I(inode)->extents_lock);
+	res = hfs_ext_read_extent(inode, ablock);
+	if (!res)
+		dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
+					    ablock - HFS_I(inode)->cached_start);
+	else {
+		up(&HFS_I(inode)->extents_lock);
+		return -EIO;
+	}
+	up(&HFS_I(inode)->extents_lock);
+
+done:
+	map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
+	       dblock * HFS_SB(sb)->fs_div +
+	       (u32)block % HFS_SB(sb)->fs_div);
+
+	if (create) {
+		set_buffer_new(bh_result);
+		HFS_I(inode)->phys_size += sb->s_blocksize;
+		HFS_I(inode)->fs_blocks++;
+		inode_add_bytes(inode, sb->s_blocksize);
+		mark_inode_dirty(inode);
+	}
+	return 0;
+}
+
+int hfs_extend_file(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	u32 start, len, goal;
+	int res;
+
+	down(&HFS_I(inode)->extents_lock);
+	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
+		goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
+	else {
+		res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
+		if (res)
+			goto out;
+		goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
+	}
+
+	len = HFS_I(inode)->clump_blocks;
+	start = hfs_vbm_search_free(sb, goal, &len);
+	if (!len) {
+		res = -ENOSPC;
+		goto out;
+	}
+
+	dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
+		if (!HFS_I(inode)->first_blocks) {
+			dprint(DBG_EXTENT, "first extents\n");
+			/* no extents yet */
+			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
+			HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
+			res = 0;
+		} else {
+			/* try to append to extents in inode */
+			res = hfs_add_extent(HFS_I(inode)->first_extents,
+					     HFS_I(inode)->alloc_blocks,
+					     start, len);
+			if (res == -ENOSPC)
+				goto insert_extent;
+		}
+		if (!res) {
+			hfs_dump_extent(HFS_I(inode)->first_extents);
+			HFS_I(inode)->first_blocks += len;
+		}
+	} else {
+		res = hfs_add_extent(HFS_I(inode)->cached_extents,
+				     HFS_I(inode)->alloc_blocks -
+				     HFS_I(inode)->cached_start,
+				     start, len);
+		if (!res) {
+			hfs_dump_extent(HFS_I(inode)->cached_extents);
+			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
+			HFS_I(inode)->cached_blocks += len;
+		} else if (res == -ENOSPC)
+			goto insert_extent;
+	}
+out:
+	up(&HFS_I(inode)->extents_lock);
+	if (!res) {
+		HFS_I(inode)->alloc_blocks += len;
+		mark_inode_dirty(inode);
+		if (inode->i_ino < HFS_FIRSTUSER_CNID)
+			set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
+		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
+		sb->s_dirt = 1;
+	}
+	return res;
+
+insert_extent:
+	dprint(DBG_EXTENT, "insert new extent\n");
+	hfs_ext_write_extent(inode);
+
+	memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
+	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
+	HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
+	hfs_dump_extent(HFS_I(inode)->cached_extents);
+	HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
+	HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
+	HFS_I(inode)->cached_blocks = len;
+
+	res = 0;
+	goto out;
+}
+
+void hfs_file_truncate(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct hfs_find_data fd;
+	u16 blk_cnt, alloc_cnt, start;
+	u32 size;
+	int res;
+
+	dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
+	       (long long)HFS_I(inode)->phys_size, inode->i_size);
+	if (inode->i_size > HFS_I(inode)->phys_size) {
+		struct address_space *mapping = inode->i_mapping;
+		struct page *page;
+		int res;
+
+		size = inode->i_size - 1;
+		page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
+		if (!page)
+			return;
+		size &= PAGE_CACHE_SIZE - 1;
+		size++;
+		res = mapping->a_ops->prepare_write(NULL, page, size, size);
+		if (!res)
+			res = mapping->a_ops->commit_write(NULL, page, size, size);
+		if (res)
+			inode->i_size = HFS_I(inode)->phys_size;
+		unlock_page(page);
+		page_cache_release(page);
+		mark_inode_dirty(inode);
+		return;
+	}
+	size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
+	blk_cnt = size / HFS_SB(sb)->alloc_blksz;
+	alloc_cnt = HFS_I(inode)->alloc_blocks;
+	if (blk_cnt == alloc_cnt)
+		goto out;
+
+	down(&HFS_I(inode)->extents_lock);
+	hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
+	while (1) {
+		if (alloc_cnt == HFS_I(inode)->first_blocks) {
+			hfs_free_extents(sb, HFS_I(inode)->first_extents,
+					 alloc_cnt, alloc_cnt - blk_cnt);
+			hfs_dump_extent(HFS_I(inode)->first_extents);
+			HFS_I(inode)->first_blocks = blk_cnt;
+			break;
+		}
+		res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
+		if (res)
+			break;
+		start = HFS_I(inode)->cached_start;
+		hfs_free_extents(sb, HFS_I(inode)->cached_extents,
+				 alloc_cnt - start, alloc_cnt - blk_cnt);
+		hfs_dump_extent(HFS_I(inode)->cached_extents);
+		if (blk_cnt > start) {
+			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
+			break;
+		}
+		alloc_cnt = start;
+		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
+		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
+		hfs_brec_remove(&fd);
+	}
+	hfs_find_exit(&fd);
+	up(&HFS_I(inode)->extents_lock);
+
+	HFS_I(inode)->alloc_blocks = blk_cnt;
+out:
+	HFS_I(inode)->phys_size = inode->i_size;
+	HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
+	mark_inode_dirty(inode);
+}
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
new file mode 100644
index 0000000..df6b33a
--- /dev/null
+++ b/fs/hfs/hfs.h
@@ -0,0 +1,287 @@
+/*
+ *  linux/fs/hfs/hfs.h
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ */
+
+#ifndef _HFS_H
+#define _HFS_H
+
+/* offsets to various blocks */
+#define HFS_DD_BLK		0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK		1 /* First block of partition map */
+#define HFS_MDB_BLK		2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC	0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC	0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC	0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC		0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC	0xD2D7 /* MFS MDB (super block) */
+
+/* various FIXED size parameters */
+#define HFS_SECTOR_SIZE		512    /* size of an HFS sector */
+#define HFS_SECTOR_SIZE_BITS	9      /* log_2(HFS_SECTOR_SIZE) */
+#define HFS_NAMELEN		31     /* maximum length of an HFS filename */
+#define HFS_MAX_VALENCE		32767U
+
+/* Meanings of the drAtrb field of the MDB,
+ * Reference: _Inside Macintosh: Files_ p. 2-61
+ */
+#define HFS_SB_ATTRIB_HLOCK	(1 << 7)
+#define HFS_SB_ATTRIB_UNMNT	(1 << 8)
+#define HFS_SB_ATTRIB_SPARED	(1 << 9)
+#define HFS_SB_ATTRIB_INCNSTNT	(1 << 11)
+#define HFS_SB_ATTRIB_SLOCK	(1 << 15)
+
+/* Some special File ID numbers */
+#define HFS_POR_CNID		1	/* Parent Of the Root */
+#define HFS_ROOT_CNID		2	/* ROOT directory */
+#define HFS_EXT_CNID		3	/* EXTents B-tree */
+#define HFS_CAT_CNID		4	/* CATalog B-tree */
+#define HFS_BAD_CNID		5	/* BAD blocks file */
+#define HFS_ALLOC_CNID		6	/* ALLOCation file (HFS+) */
+#define HFS_START_CNID		7	/* STARTup file (HFS+) */
+#define HFS_ATTR_CNID		8	/* ATTRibutes file (HFS+) */
+#define HFS_EXCH_CNID		15	/* ExchangeFiles temp id */
+#define HFS_FIRSTUSER_CNID	16
+
+/* values for hfs_cat_rec.cdrType */
+#define HFS_CDR_DIR    0x01    /* folder (directory) */
+#define HFS_CDR_FIL    0x02    /* file */
+#define HFS_CDR_THD    0x03    /* folder (directory) thread */
+#define HFS_CDR_FTH    0x04    /* file thread */
+
+/* legal values for hfs_ext_key.FkType and hfs_file.fork */
+#define HFS_FK_DATA	0x00
+#define HFS_FK_RSRC	0xFF
+
+/* bits in hfs_fil_entry.Flags */
+#define HFS_FIL_LOCK	0x01  /* locked */
+#define HFS_FIL_THD	0x02  /* file thread */
+#define HFS_FIL_DOPEN   0x04  /* data fork open */
+#define HFS_FIL_ROPEN   0x08  /* resource fork open */
+#define HFS_FIL_DIR     0x10  /* directory (always clear) */
+#define HFS_FIL_NOCOPY  0x40  /* copy-protected file */
+#define HFS_FIL_USED	0x80  /* open */
+
+/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
+#define HFS_DIR_LOCK        0x01  /* locked */
+#define HFS_DIR_THD         0x02  /* directory thread */
+#define HFS_DIR_INEXPFOLDER 0x04  /* in a shared area */
+#define HFS_DIR_MOUNTED     0x08  /* mounted */
+#define HFS_DIR_DIR         0x10  /* directory (always set) */
+#define HFS_DIR_EXPFOLDER   0x20  /* share point */
+
+/* bits hfs_finfo.fdFlags */
+#define HFS_FLG_INITED		0x0100
+#define HFS_FLG_LOCKED		0x1000
+#define HFS_FLG_INVISIBLE	0x4000
+
+/*======== HFS structures as they appear on the disk ========*/
+
+#define __packed __attribute__ ((packed))
+
+/* Pascal-style string of up to 31 characters */
+struct hfs_name {
+	u8 len;
+	u8 name[HFS_NAMELEN];
+} __packed;
+
+struct hfs_point {
+	__be16 v;
+	__be16 h;
+} __packed;
+
+struct hfs_rect {
+	__be16 top;
+	__be16 left;
+	__be16 bottom;
+	__be16 right;
+} __packed;
+
+struct hfs_finfo {
+	__be32 fdType;
+	__be32 fdCreator;
+	__be16 fdFlags;
+	struct hfs_point fdLocation;
+	__be16 fdFldr;
+} __packed;
+
+struct hfs_fxinfo {
+	__be16 fdIconID;
+	u8 fdUnused[8];
+	__be16 fdComment;
+	__be32 fdPutAway;
+} __packed;
+
+struct hfs_dinfo {
+	struct hfs_rect frRect;
+	__be16 frFlags;
+	struct hfs_point frLocation;
+	__be16 frView;
+} __packed;
+
+struct hfs_dxinfo {
+	struct hfs_point frScroll;
+	__be32 frOpenChain;
+	__be16 frUnused;
+	__be16 frComment;
+	__be32 frPutAway;
+} __packed;
+
+union hfs_finder_info {
+	struct {
+		struct hfs_finfo finfo;
+		struct hfs_fxinfo fxinfo;
+	} file;
+	struct {
+		struct hfs_dinfo dinfo;
+		struct hfs_dxinfo dxinfo;
+	} dir;
+} __packed;
+
+/* Cast to a pointer to a generic bkey */
+#define	HFS_BKEY(X)	(((void)((X)->KeyLen)), ((struct hfs_bkey *)(X)))
+
+/* The key used in the catalog b-tree: */
+struct hfs_cat_key {
+	u8 key_len;		/* number of bytes in the key */
+	u8 reserved;		/* padding */
+	__be32 ParID;		/* CNID of the parent dir */
+	struct hfs_name	CName;	/* The filename of the entry */
+} __packed;
+
+/* The key used in the extents b-tree: */
+struct hfs_ext_key {
+	u8 key_len;		/* number of bytes in the key */
+	u8 FkType;		/* HFS_FK_{DATA,RSRC} */
+	__be32 FNum;		/* The File ID of the file */
+	__be16 FABN;		/* allocation blocks number*/
+} __packed;
+
+typedef union hfs_btree_key {
+	u8 key_len;			/* number of bytes in the key */
+	struct hfs_cat_key cat;
+	struct hfs_ext_key ext;
+} hfs_btree_key;
+
+typedef union hfs_btree_key btree_key;
+
+struct hfs_extent {
+	__be16 block;
+	__be16 count;
+};
+typedef struct hfs_extent hfs_extent_rec[3];
+
+/* The catalog record for a file */
+struct hfs_cat_file {
+	s8 type;			/* The type of entry */
+	u8 reserved;
+	u8 Flags;			/* Flags such as read-only */
+	s8 Typ;				/* file version number = 0 */
+	struct hfs_finfo UsrWds;	/* data used by the Finder */
+	__be32 FlNum;			/* The CNID */
+	__be16 StBlk;			/* obsolete */
+	__be32 LgLen;			/* The logical EOF of the data fork*/
+	__be32 PyLen;			/* The physical EOF of the data fork */
+	__be16 RStBlk;			/* obsolete */
+	__be32 RLgLen;			/* The logical EOF of the rsrc fork */
+	__be32 RPyLen;			/* The physical EOF of the rsrc fork */
+	__be32 CrDat;			/* The creation date */
+	__be32 MdDat;			/* The modified date */
+	__be32 BkDat;			/* The last backup date */
+	struct hfs_fxinfo FndrInfo;	/* more data for the Finder */
+	__be16 ClpSize;			/* number of bytes to allocate
+					   when extending files */
+	hfs_extent_rec ExtRec;		/* first extent record
+					   for the data fork */
+	hfs_extent_rec RExtRec;		/* first extent record
+					   for the resource fork */
+	u32 Resrv;			/* reserved by Apple */
+} __packed;
+
+/* the catalog record for a directory */
+struct hfs_cat_dir {
+	s8 type;			/* The type of entry */
+	u8 reserved;
+	__be16 Flags;			/* flags */
+	__be16 Val;			/* Valence: number of files and
+					   dirs in the directory */
+	__be32 DirID;			/* The CNID */
+	__be32 CrDat;			/* The creation date */
+	__be32 MdDat;			/* The modification date */
+	__be32 BkDat;			/* The last backup date */
+	struct hfs_dinfo UsrInfo;	/* data used by the Finder */
+	struct hfs_dxinfo FndrInfo;	/* more data used by Finder */
+	u8 Resrv[16];			/* reserved by Apple */
+} __packed;
+
+/* the catalog record for a thread */
+struct hfs_cat_thread {
+	s8 type;			/* The type of entry */
+	u8 reserved[9];			/* reserved by Apple */
+	__be32 ParID;			/* CNID of parent directory */
+	struct hfs_name CName;		/* The name of this entry */
+}  __packed;
+
+/* A catalog tree record */
+typedef union hfs_cat_rec {
+	s8 type;			/* The type of entry */
+	struct hfs_cat_file file;
+	struct hfs_cat_dir dir;
+	struct hfs_cat_thread thread;
+} hfs_cat_rec;
+
+struct hfs_mdb {
+	__be16 drSigWord;		/* Signature word indicating fs type */
+	__be32 drCrDate;		/* fs creation date/time */
+	__be32 drLsMod;			/* fs modification date/time */
+	__be16 drAtrb;			/* fs attributes */
+	__be16 drNmFls;			/* number of files in root directory */
+	__be16 drVBMSt;			/* location (in 512-byte blocks)
+					   of the volume bitmap */
+	__be16 drAllocPtr;		/* location (in allocation blocks)
+					   to begin next allocation search */
+	__be16 drNmAlBlks;		/* number of allocation blocks */
+	__be32 drAlBlkSiz;		/* bytes in an allocation block */
+	__be32 drClpSiz;		/* clumpsize, the number of bytes to
+					   allocate when extending a file */
+	__be16 drAlBlSt;		/* location (in 512-byte blocks)
+					   of the first allocation block */
+	__be32 drNxtCNID;		/* CNID to assign to the next
+					   file or directory created */
+	__be16 drFreeBks;		/* number of free allocation blocks */
+	u8 drVN[28];			/* the volume label */
+	__be32 drVolBkUp;		/* fs backup date/time */
+	__be16 drVSeqNum;		/* backup sequence number */
+	__be32 drWrCnt;			/* fs write count */
+	__be32 drXTClpSiz;		/* clumpsize for the extents B-tree */
+	__be32 drCTClpSiz;		/* clumpsize for the catalog B-tree */
+	__be16 drNmRtDirs;		/* number of directories in
+					   the root directory */
+	__be32 drFilCnt;		/* number of files in the fs */
+	__be32 drDirCnt;		/* number of directories in the fs */
+	u8 drFndrInfo[32];		/* data used by the Finder */
+	__be16 drEmbedSigWord;		/* embedded volume signature */
+	__be32 drEmbedExtent;		/* starting block number (xdrStABN)
+					   and number of allocation blocks
+					   (xdrNumABlks) occupied by embedded
+					   volume */
+	__be32 drXTFlSize;		/* bytes in the extents B-tree */
+	hfs_extent_rec drXTExtRec;	/* extents B-tree's first 3 extents */
+	__be32 drCTFlSize;		/* bytes in the catalog B-tree */
+	hfs_extent_rec drCTExtRec;	/* catalog B-tree's first 3 extents */
+} __packed;
+
+/*======== Data structures kept in memory ========*/
+
+struct hfs_readdir_data {
+	struct list_head list;
+	struct file *file;
+	struct hfs_cat_key key;
+};
+
+#endif
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
new file mode 100644
index 0000000..0dc8ef8
--- /dev/null
+++ b/fs/hfs/hfs_fs.h
@@ -0,0 +1,286 @@
+/*
+ *  linux/fs/hfs/hfs_fs.h
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ */
+
+#ifndef _LINUX_HFS_FS_H
+#define _LINUX_HFS_FS_H
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include "hfs.h"
+
+#define DBG_BNODE_REFS	0x00000001
+#define DBG_BNODE_MOD	0x00000002
+#define DBG_CAT_MOD	0x00000004
+#define DBG_INODE	0x00000008
+#define DBG_SUPER	0x00000010
+#define DBG_EXTENT	0x00000020
+#define DBG_BITMAP	0x00000040
+
+//#define DBG_MASK	(DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD|DBG_CAT_MOD|DBG_BITMAP)
+//#define DBG_MASK	(DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
+//#define DBG_MASK	(DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
+#define DBG_MASK	(0)
+
+#define dprint(flg, fmt, args...) \
+	if (flg & DBG_MASK) printk(fmt , ## args)
+
+#define hfs_warn(format, args...) printk(KERN_WARNING format , ## args)
+#define hfs_error(format, args...) printk(KERN_ERR format , ## args)
+
+/*
+ * struct hfs_inode_info
+ *
+ * The HFS-specific part of a Linux (struct inode)
+ */
+struct hfs_inode_info {
+	atomic_t opencnt;
+
+	unsigned int flags;
+
+	/* to deal with localtime ugliness */
+	int tz_secondswest;
+
+	struct hfs_cat_key cat_key;
+
+	struct list_head open_dir_list;
+	struct inode *rsrc_inode;
+
+	struct semaphore extents_lock;
+
+	u16 alloc_blocks, clump_blocks;
+	sector_t fs_blocks;
+	/* Allocation extents from catlog record or volume header */
+	hfs_extent_rec first_extents;
+	u16 first_blocks;
+	hfs_extent_rec cached_extents;
+	u16 cached_start, cached_blocks;
+
+	loff_t phys_size;
+	struct inode vfs_inode;
+};
+
+#define HFS_FLG_RSRC		0x0001
+#define HFS_FLG_EXT_DIRTY	0x0002
+#define HFS_FLG_EXT_NEW		0x0004
+
+#define HFS_IS_RSRC(inode)	(HFS_I(inode)->flags & HFS_FLG_RSRC)
+
+/*
+ * struct hfs_sb_info
+ *
+ * The HFS-specific part of a Linux (struct super_block)
+ */
+struct hfs_sb_info {
+	struct buffer_head *mdb_bh;		/* The hfs_buffer
+						   holding the real
+						   superblock (aka VIB
+						   or MDB) */
+	struct hfs_mdb *mdb;
+	struct buffer_head *alt_mdb_bh;		/* The hfs_buffer holding
+						   the alternate superblock */
+	struct hfs_mdb *alt_mdb;
+	__be32 *bitmap;				/* The page holding the
+						   allocation bitmap */
+	struct hfs_btree *ext_tree;			/* Information about
+						   the extents b-tree */
+	struct hfs_btree *cat_tree;			/* Information about
+						   the catalog b-tree */
+	u32 file_count;				/* The number of
+						   regular files in
+						   the filesystem */
+	u32 folder_count;			/* The number of
+						   directories in the
+						   filesystem */
+	u32 next_id;				/* The next available
+						   file id number */
+	u32 clumpablks;				/* The number of allocation
+						   blocks to try to add when
+						   extending a file */
+	u32 fs_start;				/* The first 512-byte
+						   block represented
+						   in the bitmap */
+	u32 part_start;
+	u16 root_files;				/* The number of
+						   regular
+						   (non-directory)
+						   files in the root
+						   directory */
+	u16 root_dirs;				/* The number of
+						   directories in the
+						   root directory */
+	u16 fs_ablocks;				/* The number of
+						   allocation blocks
+						   in the filesystem */
+	u16 free_ablocks;			/* the number of unused
+						   allocation blocks
+						   in the filesystem */
+	u32 alloc_blksz;			/* The size of an
+						   "allocation block" */
+	int s_quiet;				/* Silent failure when
+						   changing owner or mode? */
+	__be32 s_type;				/* Type for new files */
+	__be32 s_creator;			/* Creator for new files */
+	umode_t s_file_umask;			/* The umask applied to the
+						   permissions on all files */
+	umode_t s_dir_umask;			/* The umask applied to the
+						   permissions on all dirs */
+	uid_t s_uid;				/* The uid of all files */
+	gid_t s_gid;				/* The gid of all files */
+
+	int session, part;
+
+	struct semaphore bitmap_lock;
+
+	unsigned long flags;
+
+	u16 blockoffset;
+
+	int fs_div;
+
+	struct hlist_head rsrc_inodes;
+};
+
+#define HFS_FLG_BITMAP_DIRTY	0
+#define HFS_FLG_MDB_DIRTY	1
+#define HFS_FLG_ALT_MDB_DIRTY	2
+
+/* bitmap.c */
+extern u32 hfs_vbm_search_free(struct super_block *, u32, u32 *);
+extern int hfs_clear_vbm_bits(struct super_block *, u16, u16);
+
+/* catalog.c */
+extern int hfs_cat_keycmp(const btree_key *, const btree_key *);
+struct hfs_find_data;
+extern int hfs_cat_find_brec(struct super_block *, u32, struct hfs_find_data *);
+extern int hfs_cat_create(u32, struct inode *, struct qstr *, struct inode *);
+extern int hfs_cat_delete(u32, struct inode *, struct qstr *);
+extern int hfs_cat_move(u32, struct inode *, struct qstr *,
+			struct inode *, struct qstr *);
+extern void hfs_cat_build_key(btree_key *, u32, struct qstr *);
+
+/* dir.c */
+extern struct file_operations hfs_dir_operations;
+extern struct inode_operations hfs_dir_inode_operations;
+
+/* extent.c */
+extern int hfs_ext_keycmp(const btree_key *, const btree_key *);
+extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int);
+extern void hfs_ext_write_extent(struct inode *);
+extern int hfs_extend_file(struct inode *);
+extern void hfs_file_truncate(struct inode *);
+
+extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+
+/* inode.c */
+extern struct address_space_operations hfs_aops;
+extern struct address_space_operations hfs_btree_aops;
+
+extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
+extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
+extern int hfs_write_inode(struct inode *, int);
+extern int hfs_inode_setattr(struct dentry *, struct iattr *);
+extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
+			__be32 log_size, __be32 phys_size, u32 clump_size);
+extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *);
+extern void hfs_clear_inode(struct inode *);
+extern void hfs_delete_inode(struct inode *);
+
+/* attr.c */
+extern int hfs_setxattr(struct dentry *dentry, const char *name,
+			const void *value, size_t size, int flags);
+extern ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
+			    void *value, size_t size);
+extern ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+/* mdb.c */
+extern int hfs_mdb_get(struct super_block *);
+extern void hfs_mdb_commit(struct super_block *);
+extern void hfs_mdb_close(struct super_block *);
+extern void hfs_mdb_put(struct super_block *);
+
+/* part_tbl.c */
+extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
+
+/* string.c */
+extern struct dentry_operations hfs_dentry_operations;
+
+extern int hfs_hash_dentry(struct dentry *, struct qstr *);
+extern int hfs_strcmp(const unsigned char *, unsigned int,
+		      const unsigned char *, unsigned int);
+extern int hfs_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+
+/* trans.c */
+extern void hfs_triv2mac(struct hfs_name *, struct qstr *);
+extern int hfs_mac2triv(char *, const struct hfs_name *);
+
+extern struct timezone sys_tz;
+
+/*
+ * There are two time systems.  Both are based on seconds since
+ * a particular time/date.
+ *	Unix:	unsigned lil-endian since 00:00 GMT, Jan. 1, 1970
+ *	mac:	unsigned big-endian since 00:00 GMT, Jan. 1, 1904
+ *
+ */
+#define __hfs_u_to_mtime(sec)	cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60)
+#define __hfs_m_to_utime(sec)	(be32_to_cpu(sec) - 2082844800U  + sys_tz.tz_minuteswest * 60)
+
+#define HFS_I(inode)	(list_entry(inode, struct hfs_inode_info, vfs_inode))
+#define HFS_SB(sb)	((struct hfs_sb_info *)(sb)->s_fs_info)
+
+#define hfs_m_to_utime(time)	(struct timespec){ .tv_sec = __hfs_m_to_utime(time) }
+#define hfs_u_to_mtime(time)	__hfs_u_to_mtime((time).tv_sec)
+#define hfs_mtime()		__hfs_u_to_mtime(get_seconds())
+
+static inline const char *hfs_mdb_name(struct super_block *sb)
+{
+	return sb->s_id;
+}
+
+static inline void hfs_bitmap_dirty(struct super_block *sb)
+{
+	set_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags);
+	sb->s_dirt = 1;
+}
+
+static inline void hfs_buffer_sync(struct buffer_head *bh)
+{
+	while (buffer_locked(bh)) {
+		wait_on_buffer(bh);
+	}
+	if (buffer_dirty(bh)) {
+		ll_rw_block(WRITE, 1, &bh);
+		wait_on_buffer(bh);
+	}
+}
+
+#define sb_bread512(sb, sec, data) ({			\
+	struct buffer_head *__bh;			\
+	sector_t __block;				\
+	loff_t __start;					\
+	int __offset;					\
+							\
+	__start = (loff_t)(sec) << HFS_SECTOR_SIZE_BITS;\
+	__block = __start >> (sb)->s_blocksize_bits;	\
+	__offset = __start & ((sb)->s_blocksize - 1);	\
+	__bh = sb_bread((sb), __block);			\
+	if (likely(__bh != NULL))			\
+		data = (void *)(__bh->b_data + __offset);\
+	else						\
+		data = NULL;				\
+	__bh;						\
+})
+
+#endif
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
new file mode 100644
index 0000000..7519123
--- /dev/null
+++ b/fs/hfs/inode.c
@@ -0,0 +1,636 @@
+/*
+ *  linux/fs/hfs/inode.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains inode-related functions which do not depend on
+ * which scheme is being used to represent forks.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ */
+
+#include <linux/pagemap.h>
+#include <linux/version.h>
+#include <linux/mpage.h>
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+static struct file_operations hfs_file_operations;
+static struct inode_operations hfs_file_inode_operations;
+
+/*================ Variable-like macros ================*/
+
+#define HFS_VALID_MODE_BITS  (S_IFREG | S_IFDIR | S_IRWXUGO)
+
+static int hfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, hfs_get_block, wbc);
+}
+
+static int hfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, hfs_get_block);
+}
+
+static int hfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return cont_prepare_write(page, from, to, hfs_get_block,
+				  &HFS_I(page->mapping->host)->phys_size);
+}
+
+static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, hfs_get_block);
+}
+
+static int hfs_releasepage(struct page *page, int mask)
+{
+	struct inode *inode = page->mapping->host;
+	struct super_block *sb = inode->i_sb;
+	struct hfs_btree *tree;
+	struct hfs_bnode *node;
+	u32 nidx;
+	int i, res = 1;
+
+	switch (inode->i_ino) {
+	case HFS_EXT_CNID:
+		tree = HFS_SB(sb)->ext_tree;
+		break;
+	case HFS_CAT_CNID:
+		tree = HFS_SB(sb)->cat_tree;
+		break;
+	default:
+		BUG();
+		return 0;
+	}
+	if (tree->node_size >= PAGE_CACHE_SIZE) {
+		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+		spin_lock(&tree->hash_lock);
+		node = hfs_bnode_findhash(tree, nidx);
+		if (!node)
+			;
+		else if (atomic_read(&node->refcnt))
+			res = 0;
+		if (res && node) {
+			hfs_bnode_unhash(node);
+			hfs_bnode_free(node);
+		}
+		spin_unlock(&tree->hash_lock);
+	} else {
+		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		spin_lock(&tree->hash_lock);
+		do {
+			node = hfs_bnode_findhash(tree, nidx++);
+			if (!node)
+				continue;
+			if (atomic_read(&node->refcnt)) {
+				res = 0;
+				break;
+			}
+			hfs_bnode_unhash(node);
+			hfs_bnode_free(node);
+		} while (--i && nidx < tree->node_count);
+		spin_unlock(&tree->hash_lock);
+	}
+	//printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
+	return res ? try_to_free_buffers(page) : 0;
+}
+
+static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
+			  struct buffer_head *bh_result, int create)
+{
+	int ret;
+
+	ret = hfs_get_block(inode, iblock, bh_result, create);
+	if (!ret)
+		bh_result->b_size = (1 << inode->i_blkbits);
+	return ret;
+}
+
+static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
+		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
+
+	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+				  offset, nr_segs, hfs_get_blocks, NULL);
+}
+
+static int hfs_writepages(struct address_space *mapping,
+			  struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, hfs_get_block);
+}
+
+struct address_space_operations hfs_btree_aops = {
+	.readpage	= hfs_readpage,
+	.writepage	= hfs_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= hfs_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= hfs_bmap,
+	.releasepage	= hfs_releasepage,
+};
+
+struct address_space_operations hfs_aops = {
+	.readpage	= hfs_readpage,
+	.writepage	= hfs_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= hfs_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= hfs_bmap,
+	.direct_IO	= hfs_direct_IO,
+	.writepages	= hfs_writepages,
+};
+
+/*
+ * hfs_new_inode
+ */
+struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	init_MUTEX(&HFS_I(inode)->extents_lock);
+	INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
+	hfs_cat_build_key((btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
+	inode->i_ino = HFS_SB(sb)->next_id++;
+	inode->i_mode = mode;
+	inode->i_uid = current->fsuid;
+	inode->i_gid = current->fsgid;
+	inode->i_nlink = 1;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_blksize = HFS_SB(sb)->alloc_blksz;
+	HFS_I(inode)->flags = 0;
+	HFS_I(inode)->rsrc_inode = NULL;
+	HFS_I(inode)->fs_blocks = 0;
+	if (S_ISDIR(mode)) {
+		inode->i_size = 2;
+		HFS_SB(sb)->folder_count++;
+		if (dir->i_ino == HFS_ROOT_CNID)
+			HFS_SB(sb)->root_dirs++;
+		inode->i_op = &hfs_dir_inode_operations;
+		inode->i_fop = &hfs_dir_operations;
+		inode->i_mode |= S_IRWXUGO;
+		inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
+	} else if (S_ISREG(mode)) {
+		HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
+		HFS_SB(sb)->file_count++;
+		if (dir->i_ino == HFS_ROOT_CNID)
+			HFS_SB(sb)->root_files++;
+		inode->i_op = &hfs_file_inode_operations;
+		inode->i_fop = &hfs_file_operations;
+		inode->i_mapping->a_ops = &hfs_aops;
+		inode->i_mode |= S_IRUGO|S_IXUGO;
+		if (mode & S_IWUSR)
+			inode->i_mode |= S_IWUGO;
+		inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
+		HFS_I(inode)->phys_size = 0;
+		HFS_I(inode)->alloc_blocks = 0;
+		HFS_I(inode)->first_blocks = 0;
+		HFS_I(inode)->cached_start = 0;
+		HFS_I(inode)->cached_blocks = 0;
+		memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
+		memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
+	}
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+	set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
+	sb->s_dirt = 1;
+
+	return inode;
+}
+
+void hfs_delete_inode(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+
+	dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino);
+	if (S_ISDIR(inode->i_mode)) {
+		HFS_SB(sb)->folder_count--;
+		if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
+			HFS_SB(sb)->root_dirs--;
+		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
+		sb->s_dirt = 1;
+		return;
+	}
+	HFS_SB(sb)->file_count--;
+	if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
+		HFS_SB(sb)->root_files--;
+	if (S_ISREG(inode->i_mode)) {
+		if (!inode->i_nlink) {
+			inode->i_size = 0;
+			hfs_file_truncate(inode);
+		}
+	}
+	set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
+	sb->s_dirt = 1;
+}
+
+void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
+			 __be32 __log_size, __be32 phys_size, u32 clump_size)
+{
+	struct super_block *sb = inode->i_sb;
+	u32 log_size = be32_to_cpu(__log_size);
+	u16 count;
+	int i;
+
+	memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
+	for (count = 0, i = 0; i < 3; i++)
+		count += be16_to_cpu(ext[i].count);
+	HFS_I(inode)->first_blocks = count;
+
+	inode->i_size = HFS_I(inode)->phys_size = log_size;
+	HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
+	HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
+				     HFS_SB(sb)->alloc_blksz;
+	HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
+	if (!HFS_I(inode)->clump_blocks)
+		HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
+}
+
+struct hfs_iget_data {
+	struct hfs_cat_key *key;
+	hfs_cat_rec *rec;
+};
+
+static int hfs_test_inode(struct inode *inode, void *data)
+{
+	struct hfs_iget_data *idata = data;
+	hfs_cat_rec *rec;
+
+	rec = idata->rec;
+	switch (rec->type) {
+	case HFS_CDR_DIR:
+		return inode->i_ino == be32_to_cpu(rec->dir.DirID);
+	case HFS_CDR_FIL:
+		return inode->i_ino == be32_to_cpu(rec->file.FlNum);
+	default:
+		BUG();
+		return 1;
+	}
+}
+
+/*
+ * hfs_read_inode
+ */
+static int hfs_read_inode(struct inode *inode, void *data)
+{
+	struct hfs_iget_data *idata = data;
+	struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
+	hfs_cat_rec *rec;
+
+	HFS_I(inode)->flags = 0;
+	HFS_I(inode)->rsrc_inode = NULL;
+	init_MUTEX(&HFS_I(inode)->extents_lock);
+	INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
+
+	/* Initialize the inode */
+	inode->i_uid = hsb->s_uid;
+	inode->i_gid = hsb->s_gid;
+	inode->i_nlink = 1;
+	inode->i_blksize = HFS_SB(inode->i_sb)->alloc_blksz;
+
+	if (idata->key)
+		HFS_I(inode)->cat_key = *idata->key;
+	else
+		HFS_I(inode)->flags |= HFS_FLG_RSRC;
+	HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
+
+	rec = idata->rec;
+	switch (rec->type) {
+	case HFS_CDR_FIL:
+		if (!HFS_IS_RSRC(inode)) {
+			hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
+					    rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
+		} else {
+			hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
+					    rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
+		}
+
+		inode->i_ino = be32_to_cpu(rec->file.FlNum);
+		inode->i_mode = S_IRUGO | S_IXUGO;
+		if (!(rec->file.Flags & HFS_FIL_LOCK))
+			inode->i_mode |= S_IWUGO;
+		inode->i_mode &= ~hsb->s_file_umask;
+		inode->i_mode |= S_IFREG;
+		inode->i_ctime = inode->i_atime = inode->i_mtime =
+				hfs_m_to_utime(rec->file.MdDat);
+		inode->i_op = &hfs_file_inode_operations;
+		inode->i_fop = &hfs_file_operations;
+		inode->i_mapping->a_ops = &hfs_aops;
+		break;
+	case HFS_CDR_DIR:
+		inode->i_ino = be32_to_cpu(rec->dir.DirID);
+		inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
+		HFS_I(inode)->fs_blocks = 0;
+		inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
+		inode->i_ctime = inode->i_atime = inode->i_mtime =
+				hfs_m_to_utime(rec->dir.MdDat);
+		inode->i_op = &hfs_dir_inode_operations;
+		inode->i_fop = &hfs_dir_operations;
+		break;
+	default:
+		make_bad_inode(inode);
+	}
+	return 0;
+}
+
+/*
+ * __hfs_iget()
+ *
+ * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
+ * the catalog B-tree and the 'type' of the desired file return the
+ * inode for that file/directory or NULL.  Note that 'type' indicates
+ * whether we want the actual file or directory, or the corresponding
+ * metadata (AppleDouble header file or CAP metadata file).
+ */
+struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
+{
+	struct hfs_iget_data data = { key, rec };
+	struct inode *inode;
+	u32 cnid;
+
+	switch (rec->type) {
+	case HFS_CDR_DIR:
+		cnid = be32_to_cpu(rec->dir.DirID);
+		break;
+	case HFS_CDR_FIL:
+		cnid = be32_to_cpu(rec->file.FlNum);
+		break;
+	default:
+		return NULL;
+	}
+	inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
+	if (inode && (inode->i_state & I_NEW))
+		unlock_new_inode(inode);
+	return inode;
+}
+
+void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
+			  __be32 *log_size, __be32 *phys_size)
+{
+	memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
+
+	if (log_size)
+		*log_size = cpu_to_be32(inode->i_size);
+	if (phys_size)
+		*phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
+					 HFS_SB(inode->i_sb)->alloc_blksz);
+}
+
+int hfs_write_inode(struct inode *inode, int unused)
+{
+	struct inode *main_inode = inode;
+	struct hfs_find_data fd;
+	hfs_cat_rec rec;
+
+	dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino);
+	hfs_ext_write_extent(inode);
+
+	if (inode->i_ino < HFS_FIRSTUSER_CNID) {
+		switch (inode->i_ino) {
+		case HFS_ROOT_CNID:
+			break;
+		case HFS_EXT_CNID:
+			hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
+			return 0;
+		case HFS_CAT_CNID:
+			hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
+			return 0;
+		default:
+			BUG();
+			return -EIO;
+		}
+	}
+
+	if (HFS_IS_RSRC(inode))
+		main_inode = HFS_I(inode)->rsrc_inode;
+
+	if (!main_inode->i_nlink)
+		return 0;
+
+	if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
+		/* panic? */
+		return -EIO;
+
+	fd.search_key->cat = HFS_I(main_inode)->cat_key;
+	if (hfs_brec_find(&fd))
+		/* panic? */
+		goto out;
+
+	if (S_ISDIR(main_inode->i_mode)) {
+		if (fd.entrylength < sizeof(struct hfs_cat_dir))
+			/* panic? */;
+		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+			   sizeof(struct hfs_cat_dir));
+		if (rec.type != HFS_CDR_DIR ||
+		    be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
+		}
+
+		rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
+		rec.dir.Val = cpu_to_be16(inode->i_size - 2);
+
+		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+			    sizeof(struct hfs_cat_dir));
+	} else if (HFS_IS_RSRC(inode)) {
+		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+			       sizeof(struct hfs_cat_file));
+		hfs_inode_write_fork(inode, rec.file.RExtRec,
+				     &rec.file.RLgLen, &rec.file.RPyLen);
+		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+				sizeof(struct hfs_cat_file));
+	} else {
+		if (fd.entrylength < sizeof(struct hfs_cat_file))
+			/* panic? */;
+		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+			   sizeof(struct hfs_cat_file));
+		if (rec.type != HFS_CDR_FIL ||
+		    be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
+		}
+
+		if (inode->i_mode & S_IWUSR)
+			rec.file.Flags &= ~HFS_FIL_LOCK;
+		else
+			rec.file.Flags |= HFS_FIL_LOCK;
+		hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
+		rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
+
+		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+			    sizeof(struct hfs_cat_file));
+	}
+out:
+	hfs_find_exit(&fd);
+	return 0;
+}
+
+static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
+				      struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	hfs_cat_rec rec;
+	struct hfs_find_data fd;
+	int res;
+
+	if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
+		goto out;
+
+	inode = HFS_I(dir)->rsrc_inode;
+	if (inode)
+		goto out;
+
+	inode = new_inode(dir->i_sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
+	fd.search_key->cat = HFS_I(dir)->cat_key;
+	res = hfs_brec_read(&fd, &rec, sizeof(rec));
+	if (!res) {
+		struct hfs_iget_data idata = { NULL, &rec };
+		hfs_read_inode(inode, &idata);
+	}
+	hfs_find_exit(&fd);
+	if (res) {
+		iput(inode);
+		return ERR_PTR(res);
+	}
+	HFS_I(inode)->rsrc_inode = dir;
+	HFS_I(dir)->rsrc_inode = inode;
+	igrab(dir);
+	hlist_add_head(&inode->i_hash, &HFS_SB(dir->i_sb)->rsrc_inodes);
+	mark_inode_dirty(inode);
+out:
+	d_add(dentry, inode);
+	return NULL;
+}
+
+void hfs_clear_inode(struct inode *inode)
+{
+	if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
+		HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
+		iput(HFS_I(inode)->rsrc_inode);
+	}
+}
+
+static int hfs_permission(struct inode *inode, int mask,
+			  struct nameidata *nd)
+{
+	if (S_ISREG(inode->i_mode) && mask & MAY_EXEC)
+		return 0;
+	return generic_permission(inode, mask, NULL);
+}
+
+static int hfs_file_open(struct inode *inode, struct file *file)
+{
+	if (HFS_IS_RSRC(inode))
+		inode = HFS_I(inode)->rsrc_inode;
+	if (atomic_read(&file->f_count) != 1)
+		return 0;
+	atomic_inc(&HFS_I(inode)->opencnt);
+	return 0;
+}
+
+static int hfs_file_release(struct inode *inode, struct file *file)
+{
+	//struct super_block *sb = inode->i_sb;
+
+	if (HFS_IS_RSRC(inode))
+		inode = HFS_I(inode)->rsrc_inode;
+	if (atomic_read(&file->f_count) != 0)
+		return 0;
+	if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
+		down(&inode->i_sem);
+		hfs_file_truncate(inode);
+		//if (inode->i_flags & S_DEAD) {
+		//	hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
+		//	hfs_delete_inode(inode);
+		//}
+		up(&inode->i_sem);
+	}
+	return 0;
+}
+
+/*
+ * hfs_notify_change()
+ *
+ * Based very closely on fs/msdos/inode.c by Werner Almesberger
+ *
+ * This is the notify_change() field in the super_operations structure
+ * for HFS file systems.  The purpose is to take that changes made to
+ * an inode and apply then in a filesystem-dependent manner.  In this
+ * case the process has a few of tasks to do:
+ *  1) prevent changes to the i_uid and i_gid fields.
+ *  2) map file permissions to the closest allowable permissions
+ *  3) Since multiple Linux files can share the same on-disk inode under
+ *     HFS (for instance the data and resource forks of a file) a change
+ *     to permissions must be applied to all other in-core inodes which
+ *     correspond to the same HFS file.
+ */
+
+int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
+	int error;
+
+	error = inode_change_ok(inode, attr); /* basic permission checks */
+	if (error)
+		return error;
+
+	/* no uig/gid changes and limit which mode bits can be set */
+	if (((attr->ia_valid & ATTR_UID) &&
+	     (attr->ia_uid != hsb->s_uid)) ||
+	    ((attr->ia_valid & ATTR_GID) &&
+	     (attr->ia_gid != hsb->s_gid)) ||
+	    ((attr->ia_valid & ATTR_MODE) &&
+	     ((S_ISDIR(inode->i_mode) &&
+	       (attr->ia_mode != inode->i_mode)) ||
+	      (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
+		return hsb->s_quiet ? 0 : error;
+	}
+
+	if (attr->ia_valid & ATTR_MODE) {
+		/* Only the 'w' bits can ever change and only all together. */
+		if (attr->ia_mode & S_IWUSR)
+			attr->ia_mode = inode->i_mode | S_IWUGO;
+		else
+			attr->ia_mode = inode->i_mode & ~S_IWUGO;
+		attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
+	}
+	error = inode_setattr(inode, attr);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+
+static struct file_operations hfs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.sendfile	= generic_file_sendfile,
+	.fsync		= file_fsync,
+	.open		= hfs_file_open,
+	.release	= hfs_file_release,
+};
+
+static struct inode_operations hfs_file_inode_operations = {
+	.lookup		= hfs_file_lookup,
+	.truncate	= hfs_file_truncate,
+	.setattr	= hfs_inode_setattr,
+	.permission	= hfs_permission,
+	.setxattr	= hfs_setxattr,
+	.getxattr	= hfs_getxattr,
+	.listxattr	= hfs_listxattr,
+};
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
new file mode 100644
index 0000000..4efb640
--- /dev/null
+++ b/fs/hfs/mdb.c
@@ -0,0 +1,343 @@
+/*
+ *  linux/fs/hfs/mdb.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains functions for reading/writing the MDB.
+ */
+
+#include <linux/cdrom.h>
+#include <linux/genhd.h>
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+/*================ File-local data types ================*/
+
+/*
+ * The HFS Master Directory Block (MDB).
+ *
+ * Also known as the Volume Information Block (VIB), this structure is
+ * the HFS equivalent of a superblock.
+ *
+ * Reference: _Inside Macintosh: Files_ pages 2-59 through 2-62
+ *
+ * modified for HFS Extended
+ */
+
+static int hfs_get_last_session(struct super_block *sb,
+				sector_t *start, sector_t *size)
+{
+	struct cdrom_multisession ms_info;
+	struct cdrom_tocentry te;
+	int res;
+
+	/* default values */
+	*start = 0;
+	*size = sb->s_bdev->bd_inode->i_size >> 9;
+
+	if (HFS_SB(sb)->session >= 0) {
+		te.cdte_track = HFS_SB(sb)->session;
+		te.cdte_format = CDROM_LBA;
+		res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
+		if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
+			*start = (sector_t)te.cdte_addr.lba << 2;
+			return 0;
+		}
+		printk(KERN_ERR "HFS: Invalid session number or type of track\n");
+		return -EINVAL;
+	}
+	ms_info.addr_format = CDROM_LBA;
+	res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
+	if (!res && ms_info.xa_flag)
+		*start = (sector_t)ms_info.addr.lba << 2;
+	return 0;
+}
+
+/*
+ * hfs_mdb_get()
+ *
+ * Build the in-core MDB for a filesystem, including
+ * the B-trees and the volume bitmap.
+ */
+int hfs_mdb_get(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct hfs_mdb *mdb, *mdb2;
+	unsigned int block;
+	char *ptr;
+	int off2, len, size, sect;
+	sector_t part_start, part_size;
+	loff_t off;
+	__be16 attrib;
+
+	/* set the device driver to 512-byte blocks */
+	size = sb_min_blocksize(sb, HFS_SECTOR_SIZE);
+	if (!size)
+		return -EINVAL;
+
+	if (hfs_get_last_session(sb, &part_start, &part_size))
+		return -EINVAL;
+	while (1) {
+		/* See if this is an HFS filesystem */
+		bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb);
+		if (!bh)
+			goto out;
+
+		if (mdb->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC))
+			break;
+		brelse(bh);
+
+		/* check for a partition block
+		 * (should do this only for cdrom/loop though)
+		 */
+		if (hfs_part_find(sb, &part_start, &part_size))
+			goto out;
+	}
+
+	HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz);
+	if (!size || (size & (HFS_SECTOR_SIZE - 1))) {
+		hfs_warn("hfs_fs: bad allocation block size %d\n", size);
+		goto out_bh;
+	}
+
+	size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE);
+	/* size must be a multiple of 512 */
+	while (size & (size - 1))
+		size -= HFS_SECTOR_SIZE;
+	sect = be16_to_cpu(mdb->drAlBlSt) + part_start;
+	/* align block size to first sector */
+	while (sect & ((size - 1) >> HFS_SECTOR_SIZE_BITS))
+		size >>= 1;
+	/* align block size to weird alloc size */
+	while (HFS_SB(sb)->alloc_blksz & (size - 1))
+		size >>= 1;
+	brelse(bh);
+	if (!sb_set_blocksize(sb, size)) {
+		printk("hfs_fs: unable to set blocksize to %u\n", size);
+		goto out;
+	}
+
+	bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb);
+	if (!bh)
+		goto out;
+	if (mdb->drSigWord != cpu_to_be16(HFS_SUPER_MAGIC))
+		goto out_bh;
+
+	HFS_SB(sb)->mdb_bh = bh;
+	HFS_SB(sb)->mdb = mdb;
+
+	/* These parameters are read from the MDB, and never written */
+	HFS_SB(sb)->part_start = part_start;
+	HFS_SB(sb)->fs_ablocks = be16_to_cpu(mdb->drNmAlBlks);
+	HFS_SB(sb)->fs_div = HFS_SB(sb)->alloc_blksz >> sb->s_blocksize_bits;
+	HFS_SB(sb)->clumpablks = be32_to_cpu(mdb->drClpSiz) /
+				 HFS_SB(sb)->alloc_blksz;
+	if (!HFS_SB(sb)->clumpablks)
+		HFS_SB(sb)->clumpablks = 1;
+	HFS_SB(sb)->fs_start = (be16_to_cpu(mdb->drAlBlSt) + part_start) >>
+			       (sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS);
+
+	/* These parameters are read from and written to the MDB */
+	HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks);
+	HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID);
+	HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls);
+	HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs);
+	HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt);
+	HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt);
+
+	/* TRY to get the alternate (backup) MDB. */
+	sect = part_start + part_size - 2;
+	bh = sb_bread512(sb, sect, mdb2);
+	if (bh) {
+		if (mdb2->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) {
+			HFS_SB(sb)->alt_mdb_bh = bh;
+			HFS_SB(sb)->alt_mdb = mdb2;
+		} else
+			brelse(bh);
+	}
+
+	if (!HFS_SB(sb)->alt_mdb) {
+		hfs_warn("hfs_fs: unable to locate alternate MDB\n");
+		hfs_warn("hfs_fs: continuing without an alternate MDB\n");
+	}
+
+	HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0);
+	if (!HFS_SB(sb)->bitmap)
+		goto out;
+
+	/* read in the bitmap */
+	block = be16_to_cpu(mdb->drVBMSt) + part_start;
+	off = (loff_t)block << HFS_SECTOR_SIZE_BITS;
+	size = (HFS_SB(sb)->fs_ablocks + 8) / 8;
+	ptr = (u8 *)HFS_SB(sb)->bitmap;
+	while (size) {
+		bh = sb_bread(sb, off >> sb->s_blocksize_bits);
+		if (!bh) {
+			hfs_warn("hfs_fs: unable to read volume bitmap\n");
+			goto out;
+		}
+		off2 = off & (sb->s_blocksize - 1);
+		len = min((int)sb->s_blocksize - off2, size);
+		memcpy(ptr, bh->b_data + off2, len);
+		brelse(bh);
+		ptr += len;
+		off += len;
+		size -= len;
+	}
+
+	HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp);
+	if (!HFS_SB(sb)->ext_tree) {
+		hfs_warn("hfs_fs: unable to open extent tree\n");
+		goto out;
+	}
+	HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp);
+	if (!HFS_SB(sb)->cat_tree) {
+		hfs_warn("hfs_fs: unable to open catalog tree\n");
+		goto out;
+	}
+
+	attrib = mdb->drAtrb;
+	if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
+		hfs_warn("HFS-fs warning: Filesystem was not cleanly unmounted, "
+			 "running fsck.hfs is recommended.  mounting read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+	if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
+		hfs_warn("HFS-fs: Filesystem is marked locked, mounting read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+	if (!(sb->s_flags & MS_RDONLY)) {
+		/* Mark the volume uncleanly unmounted in case we crash */
+		attrib &= cpu_to_be16(~HFS_SB_ATTRIB_UNMNT);
+		attrib |= cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT);
+		mdb->drAtrb = attrib;
+		mdb->drWrCnt = cpu_to_be32(be32_to_cpu(mdb->drWrCnt) + 1);
+		mdb->drLsMod = hfs_mtime();
+
+		mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
+		hfs_buffer_sync(HFS_SB(sb)->mdb_bh);
+	}
+
+	return 0;
+
+out_bh:
+	brelse(bh);
+out:
+	hfs_mdb_put(sb);
+	return -EIO;
+}
+
+/*
+ * hfs_mdb_commit()
+ *
+ * Description:
+ *   This updates the MDB on disk (look also at hfs_write_super()).
+ *   It does not check, if the superblock has been modified, or
+ *   if the filesystem has been mounted read-only. It is mainly
+ *   called by hfs_write_super() and hfs_btree_extend().
+ * Input Variable(s):
+ *   struct hfs_mdb *mdb: Pointer to the hfs MDB
+ *   int backup;
+ * Output Variable(s):
+ *   NONE
+ * Returns:
+ *   void
+ * Preconditions:
+ *   'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ *   The HFS MDB and on disk will be updated, by copying the possibly
+ *   modified fields from the in memory MDB (in native byte order) to
+ *   the disk block buffer.
+ *   If 'backup' is non-zero then the alternate MDB is also written
+ *   and the function doesn't return until it is actually on disk.
+ */
+void hfs_mdb_commit(struct super_block *sb)
+{
+	struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
+
+	if (test_and_clear_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags)) {
+		/* These parameters may have been modified, so write them back */
+		mdb->drLsMod = hfs_mtime();
+		mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks);
+		mdb->drNxtCNID = cpu_to_be32(HFS_SB(sb)->next_id);
+		mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files);
+		mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs);
+		mdb->drFilCnt = cpu_to_be32(HFS_SB(sb)->file_count);
+		mdb->drDirCnt = cpu_to_be32(HFS_SB(sb)->folder_count);
+
+		/* write MDB to disk */
+		mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
+	}
+
+	/* write the backup MDB, not returning until it is written.
+	 * we only do this when either the catalog or extents overflow
+	 * files grow. */
+	if (test_and_clear_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags) &&
+	    HFS_SB(sb)->alt_mdb) {
+		hfs_inode_write_fork(HFS_SB(sb)->ext_tree->inode, mdb->drXTExtRec,
+				     &mdb->drXTFlSize, NULL);
+		hfs_inode_write_fork(HFS_SB(sb)->cat_tree->inode, mdb->drCTExtRec,
+				     &mdb->drCTFlSize, NULL);
+		memcpy(HFS_SB(sb)->alt_mdb, HFS_SB(sb)->mdb, HFS_SECTOR_SIZE);
+		HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT);
+		HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT);
+		mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh);
+		hfs_buffer_sync(HFS_SB(sb)->alt_mdb_bh);
+	}
+
+	if (test_and_clear_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags)) {
+		struct buffer_head *bh;
+		sector_t block;
+		char *ptr;
+		int off, size, len;
+
+		block = be16_to_cpu(HFS_SB(sb)->mdb->drVBMSt) + HFS_SB(sb)->part_start;
+		off = (block << HFS_SECTOR_SIZE_BITS) & (sb->s_blocksize - 1);
+		block >>= sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS;
+		size = (HFS_SB(sb)->fs_ablocks + 7) / 8;
+		ptr = (u8 *)HFS_SB(sb)->bitmap;
+		while (size) {
+			bh = sb_bread(sb, block);
+			if (!bh) {
+				hfs_warn("hfs_fs: unable to read volume bitmap\n");
+				break;
+			}
+			len = min((int)sb->s_blocksize - off, size);
+			memcpy(bh->b_data + off, ptr, len);
+			mark_buffer_dirty(bh);
+			brelse(bh);
+			block++;
+			off = 0;
+			ptr += len;
+			size -= len;
+		}
+	}
+}
+
+void hfs_mdb_close(struct super_block *sb)
+{
+	/* update volume attributes */
+	if (sb->s_flags & MS_RDONLY)
+		return;
+	HFS_SB(sb)->mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT);
+	HFS_SB(sb)->mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT);
+	mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
+}
+
+/*
+ * hfs_mdb_put()
+ *
+ * Release the resources associated with the in-core MDB.  */
+void hfs_mdb_put(struct super_block *sb)
+{
+	/* free the B-trees */
+	hfs_btree_close(HFS_SB(sb)->ext_tree);
+	hfs_btree_close(HFS_SB(sb)->cat_tree);
+
+	/* free the buffers holding the primary and alternate MDBs */
+	brelse(HFS_SB(sb)->mdb_bh);
+	brelse(HFS_SB(sb)->alt_mdb_bh);
+}
diff --git a/fs/hfs/part_tbl.c b/fs/hfs/part_tbl.c
new file mode 100644
index 0000000..36add53
--- /dev/null
+++ b/fs/hfs/part_tbl.c
@@ -0,0 +1,117 @@
+/*
+ *  linux/fs/hfs/part_tbl.c
+ *
+ * Copyright (C) 1996-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * Original code to handle the new style Mac partition table based on
+ * a patch contributed by Holger Schemel (aeglos@valinor.owl.de).
+ */
+
+#include "hfs_fs.h"
+
+/*
+ * The new style Mac partition map
+ *
+ * For each partition on the media there is a physical block (512-byte
+ * block) containing one of these structures.  These blocks are
+ * contiguous starting at block 1.
+ */
+struct new_pmap {
+	__be16	pmSig;		/* signature */
+	__be16	reSigPad;	/* padding */
+	__be32	pmMapBlkCnt;	/* partition blocks count */
+	__be32	pmPyPartStart;	/* physical block start of partition */
+	__be32	pmPartBlkCnt;	/* physical block count of partition */
+	u8	pmPartName[32];	/* (null terminated?) string
+				   giving the name of this
+				   partition */
+	u8	pmPartType[32];	/* (null terminated?) string
+				   giving the type of this
+				   partition */
+	/* a bunch more stuff we don't need */
+} __packed;
+
+/*
+ * The old style Mac partition map
+ *
+ * The partition map consists for a 2-byte signature followed by an
+ * array of these structures.  The map is terminated with an all-zero
+ * one of these.
+ */
+struct old_pmap {
+	__be16		pdSig;	/* Signature bytes */
+	struct 	old_pmap_entry {
+		__be32	pdStart;
+		__be32	pdSize;
+		__be32	pdFSID;
+	}	pdEntry[42];
+} __packed;
+
+/*
+ * hfs_part_find()
+ *
+ * Parse the partition map looking for the
+ * start and length of the 'part'th HFS partition.
+ */
+int hfs_part_find(struct super_block *sb,
+		  sector_t *part_start, sector_t *part_size)
+{
+	struct buffer_head *bh;
+	__be16 *data;
+	int i, size, res;
+
+	res = -ENOENT;
+	bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data);
+	if (!bh)
+		return -EIO;
+
+	switch (be16_to_cpu(*data)) {
+	case HFS_OLD_PMAP_MAGIC:
+	  {
+		struct old_pmap *pm;
+		struct old_pmap_entry *p;
+
+		pm = (struct old_pmap *)bh->b_data;
+		p = pm->pdEntry;
+		size = 42;
+		for (i = 0; i < size; p++, i++) {
+			if (p->pdStart && p->pdSize &&
+			    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
+			    (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) {
+				*part_start += be32_to_cpu(p->pdStart);
+				*part_size = be32_to_cpu(p->pdSize);
+				res = 0;
+			}
+		}
+		break;
+	  }
+	case HFS_NEW_PMAP_MAGIC:
+	  {
+		struct new_pmap *pm;
+
+		pm = (struct new_pmap *)bh->b_data;
+		size = be32_to_cpu(pm->pmMapBlkCnt);
+		for (i = 0; i < size;) {
+			if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
+			    (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) {
+				*part_start += be32_to_cpu(pm->pmPyPartStart);
+				*part_size = be32_to_cpu(pm->pmPartBlkCnt);
+				res = 0;
+				break;
+			}
+			brelse(bh);
+			bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm);
+			if (!bh)
+				return -EIO;
+			if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC))
+				break;
+		}
+		break;
+	  }
+	}
+	brelse(bh);
+
+	return res;
+}
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
new file mode 100644
index 0000000..927a5af
--- /dev/null
+++ b/fs/hfs/string.c
@@ -0,0 +1,115 @@
+/*
+ *  linux/fs/hfs/string.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains the string comparison function for the
+ * Macintosh character set.
+ *
+ * The code in this file is derived from code which is copyright
+ * 1986, 1989, 1990 by Abacus Research and Development, Inc. (ARDI)
+ * It is used here by the permission of ARDI's president Cliff Matthews.
+ */
+
+#include "hfs_fs.h"
+#include <linux/dcache.h>
+
+/*================ File-local variables ================*/
+
+/*
+ * unsigned char caseorder[]
+ *
+ * Defines the lexical ordering of characters on the Macintosh
+ *
+ * Composition of the 'casefold' and 'order' tables from ARDI's code
+ * with the entry for 0x20 changed to match that for 0xCA to remove
+ * special case for those two characters.
+ */
+static unsigned char caseorder[256] = {
+	0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
+	0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
+	0x20,0x22,0x23,0x28,0x29,0x2A,0x2B,0x2C,0x2F,0x30,0x31,0x32,0x33,0x34,0x35,0x36,
+	0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,0x40,0x41,0x42,0x43,0x44,0x45,0x46,
+	0x47,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E,
+	0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xA9,0xAA,0xAB,0xAC,0xAD,
+	0x4E,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E,
+	0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xAF,0xB0,0xB1,0xB2,0xB3,
+	0x4A,0x4C,0x5A,0x60,0x7B,0x7F,0x98,0x4F,0x49,0x51,0x4A,0x4B,0x4C,0x5A,0x60,0x63,
+	0x64,0x65,0x6E,0x6F,0x70,0x71,0x7B,0x84,0x85,0x86,0x7F,0x80,0x9A,0x9B,0x9C,0x98,
+	0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0x94,0xBB,0xBC,0xBD,0xBE,0xBF,0xC0,0x4D,0x81,
+	0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0x55,0x8A,0xCC,0x4D,0x81,
+	0xCD,0xCE,0xCF,0xD0,0xD1,0xD2,0xD3,0x26,0x27,0xD4,0x20,0x49,0x4B,0x80,0x82,0x82,
+	0xD5,0xD6,0x24,0x25,0x2D,0x2E,0xD7,0xD8,0xA6,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
+	0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
+	0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF
+};
+
+/*================ Global functions ================*/
+
+/*
+ * Hash a string to an integer in a case-independent way
+ */
+int hfs_hash_dentry(struct dentry *dentry, struct qstr *this)
+{
+	const unsigned char *name = this->name;
+	unsigned int hash, len = this->len;
+
+	if (len > HFS_NAMELEN)
+		len = HFS_NAMELEN;
+
+	hash = init_name_hash();
+	for (; len; len--)
+		hash = partial_name_hash(caseorder[*name++], hash);
+	this->hash = end_name_hash(hash);
+	return 0;
+}
+
+/*
+ * Compare two strings in the HFS filename character ordering
+ * Returns positive, negative, or zero, not just 0 or (+/-)1
+ *
+ * Equivalent to ARDI's call:
+ *	ROMlib_RelString(s1+1, s2+1, true, false, (s1[0]<<16) | s2[0])
+ */
+int hfs_strcmp(const unsigned char *s1, unsigned int len1,
+	       const unsigned char *s2, unsigned int len2)
+{
+	int len, tmp;
+
+	len = (len1 > len2) ? len2 : len1;
+
+	while (len--) {
+		tmp = (int)caseorder[*(s1++)] - (int)caseorder[*(s2++)];
+		if (tmp)
+			return tmp;
+	}
+	return len1 - len2;
+}
+
+/*
+ * Test for equality of two strings in the HFS filename character ordering.
+ * return 1 on failure and 0 on success
+ */
+int hfs_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2)
+{
+	const unsigned char *n1, *n2;
+	int len;
+
+	len = s1->len;
+	if (len >= HFS_NAMELEN) {
+		if (s2->len < HFS_NAMELEN)
+			return 1;
+		len = HFS_NAMELEN;
+	} else if (len != s2->len)
+		return 1;
+
+	n1 = s1->name;
+	n2 = s2->name;
+	while (len--) {
+		if (caseorder[*n1++] != caseorder[*n2++])
+			return 1;
+	}
+	return 0;
+}
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
new file mode 100644
index 0000000..1e2c193
--- /dev/null
+++ b/fs/hfs/super.c
@@ -0,0 +1,395 @@
+/*
+ *  linux/fs/hfs/super.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains hfs_read_super(), some of the super_ops and
+ * init_module() and cleanup_module().	The remaining super_ops are in
+ * inode.c since they deal with inodes.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/parser.h>
+#include <linux/vfs.h>
+
+#include "hfs_fs.h"
+#include "btree.h"
+
+static kmem_cache_t *hfs_inode_cachep;
+
+MODULE_LICENSE("GPL");
+
+/*
+ * hfs_write_super()
+ *
+ * Description:
+ *   This function is called by the VFS only. When the filesystem
+ *   is mounted r/w it updates the MDB on disk.
+ * Input Variable(s):
+ *   struct super_block *sb: Pointer to the hfs superblock
+ * Output Variable(s):
+ *   NONE
+ * Returns:
+ *   void
+ * Preconditions:
+ *   'sb' points to a "valid" (struct super_block).
+ * Postconditions:
+ *   The MDB is marked 'unsuccessfully unmounted' by clearing bit 8 of drAtrb
+ *   (hfs_put_super() must set this flag!). Some MDB fields are updated
+ *   and the MDB buffer is written to disk by calling hfs_mdb_commit().
+ */
+static void hfs_write_super(struct super_block *sb)
+{
+	sb->s_dirt = 0;
+	if (sb->s_flags & MS_RDONLY)
+		return;
+	/* sync everything to the buffers */
+	hfs_mdb_commit(sb);
+}
+
+/*
+ * hfs_put_super()
+ *
+ * This is the put_super() entry in the super_operations structure for
+ * HFS filesystems.  The purpose is to release the resources
+ * associated with the superblock sb.
+ */
+static void hfs_put_super(struct super_block *sb)
+{
+	hfs_mdb_close(sb);
+	/* release the MDB's resources */
+	hfs_mdb_put(sb);
+}
+
+/*
+ * hfs_statfs()
+ *
+ * This is the statfs() entry in the super_operations structure for
+ * HFS filesystems.  The purpose is to return various data about the
+ * filesystem.
+ *
+ * changed f_files/f_ffree to reflect the fs_ablock/free_ablocks.
+ */
+static int hfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = HFS_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div;
+	buf->f_bfree = (u32)HFS_SB(sb)->free_ablocks * HFS_SB(sb)->fs_div;
+	buf->f_bavail = buf->f_bfree;
+	buf->f_files = HFS_SB(sb)->fs_ablocks;
+	buf->f_ffree = HFS_SB(sb)->free_ablocks;
+	buf->f_namelen = HFS_NAMELEN;
+
+	return 0;
+}
+
+static int hfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (!(*flags & MS_RDONLY)) {
+		if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
+			printk("HFS-fs warning: Filesystem was not cleanly unmounted, "
+			       "running fsck.hfs is recommended.  leaving read-only.\n");
+			sb->s_flags |= MS_RDONLY;
+			*flags |= MS_RDONLY;
+		} else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
+			printk("HFS-fs: Filesystem is marked locked, leaving read-only.\n");
+			sb->s_flags |= MS_RDONLY;
+			*flags |= MS_RDONLY;
+		}
+	}
+	return 0;
+}
+
+static struct inode *hfs_alloc_inode(struct super_block *sb)
+{
+	struct hfs_inode_info *i;
+
+	i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL);
+	return i ? &i->vfs_inode : NULL;
+}
+
+static void hfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
+}
+
+static struct super_operations hfs_super_operations = {
+	.alloc_inode	= hfs_alloc_inode,
+	.destroy_inode	= hfs_destroy_inode,
+	.write_inode	= hfs_write_inode,
+	.clear_inode	= hfs_clear_inode,
+	.put_super	= hfs_put_super,
+	.write_super	= hfs_write_super,
+	.statfs		= hfs_statfs,
+	.remount_fs     = hfs_remount,
+};
+
+enum {
+	opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask,
+	opt_part, opt_session, opt_type, opt_creator, opt_quiet,
+	opt_err
+};
+
+static match_table_t tokens = {
+	{ opt_uid, "uid=%u" },
+	{ opt_gid, "gid=%u" },
+	{ opt_umask, "umask=%o" },
+	{ opt_file_umask, "file_umask=%o" },
+	{ opt_dir_umask, "dir_umask=%o" },
+	{ opt_part, "part=%u" },
+	{ opt_session, "session=%u" },
+	{ opt_type, "type=%s" },
+	{ opt_creator, "creator=%s" },
+	{ opt_quiet, "quiet" },
+	{ opt_err, NULL }
+};
+
+static inline int match_fourchar(substring_t *arg, u32 *result)
+{
+	if (arg->to - arg->from != 4)
+		return -EINVAL;
+	memcpy(result, arg->from, 4);
+	return 0;
+}
+
+/*
+ * parse_options()
+ *
+ * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger
+ * This function is called by hfs_read_super() to parse the mount options.
+ */
+static int parse_options(char *options, struct hfs_sb_info *hsb)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int tmp, token;
+
+	/* initialize the sb with defaults */
+	hsb->s_uid = current->uid;
+	hsb->s_gid = current->gid;
+	hsb->s_file_umask = 0133;
+	hsb->s_dir_umask = 0022;
+	hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f);	/* == '????' */
+	hsb->s_quiet = 0;
+	hsb->part = -1;
+	hsb->session = -1;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case opt_uid:
+			if (match_int(&args[0], &tmp)) {
+				printk("HFS: uid requires an argument\n");
+				return 0;
+			}
+			hsb->s_uid = (uid_t)tmp;
+			break;
+		case opt_gid:
+			if (match_int(&args[0], &tmp)) {
+				printk("HFS: gid requires an argument\n");
+				return 0;
+			}
+			hsb->s_gid = (gid_t)tmp;
+			break;
+		case opt_umask:
+			if (match_octal(&args[0], &tmp)) {
+				printk("HFS: umask requires a value\n");
+				return 0;
+			}
+			hsb->s_file_umask = (umode_t)tmp;
+			hsb->s_dir_umask = (umode_t)tmp;
+			break;
+		case opt_file_umask:
+			if (match_octal(&args[0], &tmp)) {
+				printk("HFS: file_umask requires a value\n");
+				return 0;
+			}
+			hsb->s_file_umask = (umode_t)tmp;
+			break;
+		case opt_dir_umask:
+			if (match_octal(&args[0], &tmp)) {
+				printk("HFS: dir_umask requires a value\n");
+				return 0;
+			}
+			hsb->s_dir_umask = (umode_t)tmp;
+			break;
+		case opt_part:
+			if (match_int(&args[0], &hsb->part)) {
+				printk("HFS: part requires an argument\n");
+				return 0;
+			}
+			break;
+		case opt_session:
+			if (match_int(&args[0], &hsb->session)) {
+				printk("HFS: session requires an argument\n");
+				return 0;
+			}
+			break;
+		case opt_type:
+			if (match_fourchar(&args[0], &hsb->s_type)) {
+				printk("HFS+-fs: type requires a 4 character value\n");
+				return 0;
+			}
+			break;
+		case opt_creator:
+			if (match_fourchar(&args[0], &hsb->s_creator)) {
+				printk("HFS+-fs: creator requires a 4 character value\n");
+				return 0;
+			}
+			break;
+		case opt_quiet:
+			hsb->s_quiet = 1;
+			break;
+		default:
+			return 0;
+		}
+	}
+
+	hsb->s_dir_umask &= 0777;
+	hsb->s_file_umask &= 0577;
+
+	return 1;
+}
+
+/*
+ * hfs_read_super()
+ *
+ * This is the function that is responsible for mounting an HFS
+ * filesystem.	It performs all the tasks necessary to get enough data
+ * from the disk to read the root inode.  This includes parsing the
+ * mount options, dealing with Macintosh partitions, reading the
+ * superblock and the allocation bitmap blocks, calling
+ * hfs_btree_init() to get the necessary data about the extents and
+ * catalog B-trees and, finally, reading the root inode into memory.
+ */
+static int hfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct hfs_sb_info *sbi;
+	struct hfs_find_data fd;
+	hfs_cat_rec rec;
+	struct inode *root_inode;
+	int res;
+
+	sbi = kmalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct hfs_sb_info));
+	INIT_HLIST_HEAD(&sbi->rsrc_inodes);
+
+	res = -EINVAL;
+	if (!parse_options((char *)data, sbi)) {
+		hfs_warn("hfs_fs: unable to parse mount options.\n");
+		goto bail3;
+	}
+
+	sb->s_op = &hfs_super_operations;
+	sb->s_flags |= MS_NODIRATIME;
+	init_MUTEX(&sbi->bitmap_lock);
+
+	res = hfs_mdb_get(sb);
+	if (res) {
+		if (!silent)
+			hfs_warn("VFS: Can't find a HFS filesystem on dev %s.\n",
+				hfs_mdb_name(sb));
+		res = -EINVAL;
+		goto bail2;
+	}
+
+	/* try to get the root inode */
+	hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
+	res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
+	if (!res)
+		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+	if (res) {
+		hfs_find_exit(&fd);
+		goto bail_no_root;
+	}
+	root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
+	hfs_find_exit(&fd);
+	if (!root_inode)
+		goto bail_no_root;
+
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root)
+		goto bail_iput;
+
+	sb->s_root->d_op = &hfs_dentry_operations;
+
+	/* everything's okay */
+	return 0;
+
+bail_iput:
+	iput(root_inode);
+bail_no_root:
+	hfs_warn("hfs_fs: get root inode failed.\n");
+	hfs_mdb_put(sb);
+bail2:
+bail3:
+	kfree(sbi);
+	return res;
+}
+
+static struct super_block *hfs_get_sb(struct file_system_type *fs_type,
+				      int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, hfs_fill_super);
+}
+
+static struct file_system_type hfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "hfs",
+	.get_sb		= hfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct hfs_inode_info *i = p;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&i->vfs_inode);
+}
+
+static int __init init_hfs_fs(void)
+{
+	int err;
+
+	hfs_inode_cachep = kmem_cache_create("hfs_inode_cache",
+		sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN,
+		hfs_init_once, NULL);
+	if (!hfs_inode_cachep)
+		return -ENOMEM;
+	err = register_filesystem(&hfs_fs_type);
+	if (err)
+		kmem_cache_destroy(hfs_inode_cachep);
+	return err;
+}
+
+static void __exit exit_hfs_fs(void)
+{
+	unregister_filesystem(&hfs_fs_type);
+	if (kmem_cache_destroy(hfs_inode_cachep))
+		printk(KERN_INFO "hfs_inode_cache: not all structures were freed\n");
+}
+
+module_init(init_hfs_fs)
+module_exit(exit_hfs_fs)
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
new file mode 100644
index 0000000..5bf89ec
--- /dev/null
+++ b/fs/hfs/sysdep.c
@@ -0,0 +1,40 @@
+/*
+ *  linux/fs/hfs/sysdep.c
+ *
+ * Copyright (C) 1996  Paul H. Hargrove
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains the code to do various system dependent things.
+ */
+
+#include "hfs_fs.h"
+
+/* dentry case-handling: just lowercase everything */
+
+static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	int diff;
+
+	if(!inode)
+		return 1;
+
+	/* fix up inode on a timezone change */
+	diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
+	if (diff) {
+		inode->i_ctime.tv_sec += diff;
+		inode->i_atime.tv_sec += diff;
+		inode->i_mtime.tv_sec += diff;
+		HFS_I(inode)->tz_secondswest += diff;
+	}
+	return 1;
+}
+
+struct dentry_operations hfs_dentry_operations =
+{
+	.d_revalidate	= hfs_revalidate_dentry,
+	.d_hash		= hfs_hash_dentry,
+	.d_compare	= hfs_compare_dentry,
+};
+
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
new file mode 100644
index 0000000..fb9720a
--- /dev/null
+++ b/fs/hfs/trans.c
@@ -0,0 +1,72 @@
+/*
+ *  linux/fs/hfs/trans.c
+ *
+ * Copyright (C) 1995-1997  Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * This file contains routines for converting between the Macintosh
+ * character set and various other encodings.  This includes dealing
+ * with ':' vs. '/' as the path-element separator.
+ */
+
+#include "hfs_fs.h"
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_mac2triv()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the 'trivial' name-mangling scheme, returning the length of the
+ * mangled filename.  Note that the output string is not NULL
+ * terminated.
+ *
+ * The name-mangling works as follows:
+ * The character '/', which is illegal in Linux filenames is replaced
+ * by ':' which never appears in HFS filenames.	 All other characters
+ * are passed unchanged from input to output.
+ */
+int hfs_mac2triv(char *out, const struct hfs_name *in)
+{
+	const char *p;
+	char c;
+	int i, len;
+
+	len = in->len;
+	p = in->name;
+	for (i = 0; i < len; i++) {
+		c = *p++;
+		*out++ = c == '/' ? ':' : c;
+	}
+	return i;
+}
+
+/*
+ * hfs_triv2mac()
+ *
+ * Given an ASCII string (not null-terminated) and its length,
+ * generate the corresponding filename in the Macintosh character set
+ * using the 'trivial' name-mangling scheme, returning the length of
+ * the mangled filename.  Note that the output string is not NULL
+ * terminated.
+ *
+ * This routine is a inverse to hfs_mac2triv().
+ * A ':' is replaced by a '/'.
+ */
+void hfs_triv2mac(struct hfs_name *out, struct qstr *in)
+{
+	const char *src;
+	char *dst, c;
+	int i, len;
+
+	out->len = len = min((unsigned int)HFS_NAMELEN, in->len);
+	src = in->name;
+	dst = out->name;
+	for (i = 0; i < len; i++) {
+		c = *src++;
+		*dst++ = c == ':' ? '/' : c;
+	}
+	for (; i < HFS_NAMELEN; i++)
+		*dst++ = 0;
+}
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
new file mode 100644
index 0000000..3cc0df7
--- /dev/null
+++ b/fs/hfsplus/Makefile
@@ -0,0 +1,9 @@
+#
+## Makefile for the linux hfsplus filesystem routines.
+#
+
+obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
+
+hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
+		bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o
+
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
new file mode 100644
index 0000000..257cdde
--- /dev/null
+++ b/fs/hfsplus/bfind.c
@@ -0,0 +1,210 @@
+/*
+ *  linux/fs/hfsplus/bfind.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Search routines for btrees
+ */
+
+#include <linux/slab.h>
+#include "hfsplus_fs.h"
+
+int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+{
+	void *ptr;
+
+	fd->tree = tree;
+	fd->bnode = NULL;
+	ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	fd->search_key = ptr;
+	fd->key = ptr + tree->max_key_len + 2;
+	dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
+	down(&tree->tree_lock);
+	return 0;
+}
+
+void hfs_find_exit(struct hfs_find_data *fd)
+{
+	hfs_bnode_put(fd->bnode);
+	kfree(fd->search_key);
+	dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
+	up(&fd->tree->tree_lock);
+	fd->tree = NULL;
+}
+
+/* Find the record in bnode that best matches key (not greater than...)*/
+int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
+{
+	int cmpval;
+	u16 off, len, keylen;
+	int rec;
+	int b, e;
+	int res;
+
+	b = 0;
+	e = bnode->num_recs - 1;
+	res = -ENOENT;
+	do {
+		rec = (e + b) / 2;
+		len = hfs_brec_lenoff(bnode, rec, &off);
+		keylen = hfs_brec_keylen(bnode, rec);
+		hfs_bnode_read(bnode, fd->key, off, keylen);
+		cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
+		if (!cmpval) {
+			e = rec;
+			res = 0;
+			goto done;
+		}
+		if (cmpval < 0)
+			b = rec + 1;
+		else
+			e = rec - 1;
+	} while (b <= e);
+	//printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
+	if (rec != e && e >= 0) {
+		len = hfs_brec_lenoff(bnode, e, &off);
+		keylen = hfs_brec_keylen(bnode, e);
+		hfs_bnode_read(bnode, fd->key, off, keylen);
+	}
+done:
+	fd->record = e;
+	fd->keyoffset = off;
+	fd->keylength = keylen;
+	fd->entryoffset = off + keylen;
+	fd->entrylength = len - keylen;
+	return res;
+}
+
+/* Traverse a B*Tree from the root to a leaf finding best fit to key */
+/* Return allocated copy of node found, set recnum to best record */
+int hfs_brec_find(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	u32 nidx, parent;
+	__be32 data;
+	int height, res;
+
+	tree = fd->tree;
+	if (fd->bnode)
+		hfs_bnode_put(fd->bnode);
+	fd->bnode = NULL;
+	nidx = tree->root;
+	if (!nidx)
+		return -ENOENT;
+	height = tree->depth;
+	res = 0;
+	parent = 0;
+	for (;;) {
+		bnode = hfs_bnode_find(tree, nidx);
+		if (IS_ERR(bnode)) {
+			res = PTR_ERR(bnode);
+			bnode = NULL;
+			break;
+		}
+		if (bnode->height != height)
+			goto invalid;
+		if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF))
+			goto invalid;
+		bnode->parent = parent;
+
+		res = __hfs_brec_find(bnode, fd);
+		if (!height)
+			break;
+		if (fd->record < 0)
+			goto release;
+
+		parent = nidx;
+		hfs_bnode_read(bnode, &data, fd->entryoffset, 4);
+		nidx = be32_to_cpu(data);
+		hfs_bnode_put(bnode);
+	}
+	fd->bnode = bnode;
+	return res;
+
+invalid:
+	printk("HFS+-fs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
+		height, bnode->height, bnode->type, nidx, parent);
+	res = -EIO;
+release:
+	hfs_bnode_put(bnode);
+	return res;
+}
+
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+{
+	int res;
+
+	res = hfs_brec_find(fd);
+	if (res)
+		return res;
+	if (fd->entrylength > rec_len)
+		return -EINVAL;
+	hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength);
+	return 0;
+}
+
+int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	int idx, res = 0;
+	u16 off, len, keylen;
+
+	bnode = fd->bnode;
+	tree = bnode->tree;
+
+	if (cnt < 0) {
+		cnt = -cnt;
+		while (cnt > fd->record) {
+			cnt -= fd->record + 1;
+			fd->record = bnode->num_recs - 1;
+			idx = bnode->prev;
+			if (!idx) {
+				res = -ENOENT;
+				goto out;
+			}
+			hfs_bnode_put(bnode);
+			bnode = hfs_bnode_find(tree, idx);
+			if (IS_ERR(bnode)) {
+				res = PTR_ERR(bnode);
+				bnode = NULL;
+				goto out;
+			}
+		}
+		fd->record -= cnt;
+	} else {
+		while (cnt >= bnode->num_recs - fd->record) {
+			cnt -= bnode->num_recs - fd->record;
+			fd->record = 0;
+			idx = bnode->next;
+			if (!idx) {
+				res = -ENOENT;
+				goto out;
+			}
+			hfs_bnode_put(bnode);
+			bnode = hfs_bnode_find(tree, idx);
+			if (IS_ERR(bnode)) {
+				res = PTR_ERR(bnode);
+				bnode = NULL;
+				goto out;
+			}
+		}
+		fd->record += cnt;
+	}
+
+	len = hfs_brec_lenoff(bnode, fd->record, &off);
+	keylen = hfs_brec_keylen(bnode, fd->record);
+	fd->keyoffset = off;
+	fd->keylength = keylen;
+	fd->entryoffset = off + keylen;
+	fd->entrylength = len - keylen;
+	hfs_bnode_read(bnode, fd->key, off, keylen);
+out:
+	fd->bnode = bnode;
+	return res;
+}
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
new file mode 100644
index 0000000..c7d3164
--- /dev/null
+++ b/fs/hfsplus/bitmap.c
@@ -0,0 +1,221 @@
+/*
+ *  linux/fs/hfsplus/bitmap.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handling of allocation file
+ */
+
+#include <linux/pagemap.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+#define PAGE_CACHE_BITS	(PAGE_CACHE_SIZE * 8)
+
+int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
+{
+	struct page *page;
+	struct address_space *mapping;
+	__be32 *pptr, *curr, *end;
+	u32 mask, start, len, n;
+	__be32 val;
+	int i;
+
+	len = *max;
+	if (!len)
+		return size;
+
+	dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
+	down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+	mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+	page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
+			       (filler_t *)mapping->a_ops->readpage, NULL);
+	pptr = kmap(page);
+	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
+	i = offset % 32;
+	offset &= ~(PAGE_CACHE_BITS - 1);
+	if ((size ^ offset) / PAGE_CACHE_BITS)
+		end = pptr + PAGE_CACHE_BITS / 32;
+	else
+		end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
+
+	/* scan the first partial u32 for zero bits */
+	val = *curr;
+	if (~val) {
+		n = be32_to_cpu(val);
+		mask = (1U << 31) >> i;
+		for (; i < 32; mask >>= 1, i++) {
+			if (!(n & mask))
+				goto found;
+		}
+	}
+	curr++;
+
+	/* scan complete u32s for the first zero bit */
+	while (1) {
+		while (curr < end) {
+			val = *curr;
+			if (~val) {
+				n = be32_to_cpu(val);
+				mask = 1 << 31;
+				for (i = 0; i < 32; mask >>= 1, i++) {
+					if (!(n & mask))
+						goto found;
+				}
+			}
+			curr++;
+		}
+		kunmap(page);
+		offset += PAGE_CACHE_BITS;
+		if (offset >= size)
+			break;
+		page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
+				       (filler_t *)mapping->a_ops->readpage, NULL);
+		curr = pptr = kmap(page);
+		if ((size ^ offset) / PAGE_CACHE_BITS)
+			end = pptr + PAGE_CACHE_BITS / 32;
+		else
+			end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
+	}
+	dprint(DBG_BITMAP, "bitmap full\n");
+	start = size;
+	goto out;
+
+found:
+	start = offset + (curr - pptr) * 32 + i;
+	if (start >= size) {
+		dprint(DBG_BITMAP, "bitmap full\n");
+		goto out;
+	}
+	/* do any partial u32 at the start */
+	len = min(size - start, len);
+	while (1) {
+		n |= mask;
+		if (++i >= 32)
+			break;
+		mask >>= 1;
+		if (!--len || n & mask)
+			goto done;
+	}
+	if (!--len)
+		goto done;
+	*curr++ = cpu_to_be32(n);
+	/* do full u32s */
+	while (1) {
+		while (curr < end) {
+			n = be32_to_cpu(*curr);
+			if (len < 32)
+				goto last;
+			if (n) {
+				len = 32;
+				goto last;
+			}
+			*curr++ = cpu_to_be32(0xffffffff);
+			len -= 32;
+		}
+		set_page_dirty(page);
+		kunmap(page);
+		offset += PAGE_CACHE_BITS;
+		page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
+				       (filler_t *)mapping->a_ops->readpage, NULL);
+		pptr = kmap(page);
+		curr = pptr;
+		end = pptr + PAGE_CACHE_BITS / 32;
+	}
+last:
+	/* do any partial u32 at end */
+	mask = 1U << 31;
+	for (i = 0; i < len; i++) {
+		if (n & mask)
+			break;
+		n |= mask;
+		mask >>= 1;
+	}
+done:
+	*curr = cpu_to_be32(n);
+	set_page_dirty(page);
+	kunmap(page);
+	*max = offset + (curr - pptr) * 32 + i - start;
+	HFSPLUS_SB(sb).free_blocks -= *max;
+	sb->s_dirt = 1;
+	dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
+out:
+	up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+	return start;
+}
+
+int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
+{
+	struct page *page;
+	struct address_space *mapping;
+	__be32 *pptr, *curr, *end;
+	u32 mask, len, pnr;
+	int i;
+
+	/* is there any actual work to be done? */
+	if (!count)
+		return 0;
+
+	dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
+	/* are all of the bits in range? */
+	if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
+		return -2;
+
+	down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+	mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+	pnr = offset / PAGE_CACHE_BITS;
+	page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+	pptr = kmap(page);
+	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
+	end = pptr + PAGE_CACHE_BITS / 32;
+	len = count;
+
+	/* do any partial u32 at the start */
+	i = offset % 32;
+	if (i) {
+		int j = 32 - i;
+		mask = 0xffffffffU << j;
+		if (j > count) {
+			mask |= 0xffffffffU >> (i + count);
+			*curr++ &= cpu_to_be32(mask);
+			goto out;
+		}
+		*curr++ &= cpu_to_be32(mask);
+		count -= j;
+	}
+
+	/* do full u32s */
+	while (1) {
+		while (curr < end) {
+			if (count < 32)
+				goto done;
+			*curr++ = 0;
+			count -= 32;
+		}
+		if (!count)
+			break;
+		set_page_dirty(page);
+		kunmap(page);
+		page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+		pptr = kmap(page);
+		curr = pptr;
+		end = pptr + PAGE_CACHE_BITS / 32;
+	}
+done:
+	/* do any partial u32 at end */
+	if (count) {
+		mask = 0xffffffffU >> count;
+		*curr &= cpu_to_be32(mask);
+	}
+out:
+	set_page_dirty(page);
+	kunmap(page);
+	HFSPLUS_SB(sb).free_blocks += len;
+	sb->s_dirt = 1;
+	up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+
+	return 0;
+}
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
new file mode 100644
index 0000000..267872e
--- /dev/null
+++ b/fs/hfsplus/bnode.c
@@ -0,0 +1,662 @@
+/*
+ *  linux/fs/hfsplus/bnode.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle basic btree node operations
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/swap.h>
+#include <linux/version.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+#define REF_PAGES	0
+
+/* Copy a specified range of bytes from the raw data of a node */
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+{
+	struct page **pagep;
+	int l;
+
+	off += node->page_offset;
+	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	off &= ~PAGE_CACHE_MASK;
+
+	l = min(len, (int)PAGE_CACHE_SIZE - off);
+	memcpy(buf, kmap(*pagep) + off, l);
+	kunmap(*pagep);
+
+	while ((len -= l) != 0) {
+		buf += l;
+		l = min(len, (int)PAGE_CACHE_SIZE);
+		memcpy(buf, kmap(*++pagep), l);
+		kunmap(*pagep);
+	}
+}
+
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+{
+	__be16 data;
+	// optimize later...
+	hfs_bnode_read(node, &data, off, 2);
+	return be16_to_cpu(data);
+}
+
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+{
+	u8 data;
+	// optimize later...
+	hfs_bnode_read(node, &data, off, 1);
+	return data;
+}
+
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+{
+	struct hfs_btree *tree;
+	int key_len;
+
+	tree = node->tree;
+	if (node->type == HFS_NODE_LEAF ||
+	    tree->attributes & HFS_TREE_VARIDXKEYS)
+		key_len = hfs_bnode_read_u16(node, off) + 2;
+	else
+		key_len = tree->max_key_len + 2;
+
+	hfs_bnode_read(node, key, off, key_len);
+}
+
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+{
+	struct page **pagep;
+	int l;
+
+	off += node->page_offset;
+	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	off &= ~PAGE_CACHE_MASK;
+
+	l = min(len, (int)PAGE_CACHE_SIZE - off);
+	memcpy(kmap(*pagep) + off, buf, l);
+	set_page_dirty(*pagep);
+	kunmap(*pagep);
+
+	while ((len -= l) != 0) {
+		buf += l;
+		l = min(len, (int)PAGE_CACHE_SIZE);
+		memcpy(kmap(*++pagep), buf, l);
+		set_page_dirty(*pagep);
+		kunmap(*pagep);
+	}
+}
+
+void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+{
+	__be16 v = cpu_to_be16(data);
+	// optimize later...
+	hfs_bnode_write(node, &v, off, 2);
+}
+
+void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+{
+	struct page **pagep;
+	int l;
+
+	off += node->page_offset;
+	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	off &= ~PAGE_CACHE_MASK;
+
+	l = min(len, (int)PAGE_CACHE_SIZE - off);
+	memset(kmap(*pagep) + off, 0, l);
+	set_page_dirty(*pagep);
+	kunmap(*pagep);
+
+	while ((len -= l) != 0) {
+		l = min(len, (int)PAGE_CACHE_SIZE);
+		memset(kmap(*++pagep), 0, l);
+		set_page_dirty(*pagep);
+		kunmap(*pagep);
+	}
+}
+
+void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
+		    struct hfs_bnode *src_node, int src, int len)
+{
+	struct hfs_btree *tree;
+	struct page **src_page, **dst_page;
+	int l;
+
+	dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+	if (!len)
+		return;
+	tree = src_node->tree;
+	src += src_node->page_offset;
+	dst += dst_node->page_offset;
+	src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
+	src &= ~PAGE_CACHE_MASK;
+	dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
+	dst &= ~PAGE_CACHE_MASK;
+
+	if (src == dst) {
+		l = min(len, (int)PAGE_CACHE_SIZE - src);
+		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
+		kunmap(*src_page);
+		set_page_dirty(*dst_page);
+		kunmap(*dst_page);
+
+		while ((len -= l) != 0) {
+			l = min(len, (int)PAGE_CACHE_SIZE);
+			memcpy(kmap(*++dst_page), kmap(*++src_page), l);
+			kunmap(*src_page);
+			set_page_dirty(*dst_page);
+			kunmap(*dst_page);
+		}
+	} else {
+		void *src_ptr, *dst_ptr;
+
+		do {
+			src_ptr = kmap(*src_page) + src;
+			dst_ptr = kmap(*dst_page) + dst;
+			if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
+				l = PAGE_CACHE_SIZE - src;
+				src = 0;
+				dst += l;
+			} else {
+				l = PAGE_CACHE_SIZE - dst;
+				src += l;
+				dst = 0;
+			}
+			l = min(len, l);
+			memcpy(dst_ptr, src_ptr, l);
+			kunmap(*src_page);
+			set_page_dirty(*dst_page);
+			kunmap(*dst_page);
+			if (!dst)
+				dst_page++;
+			else
+				src_page++;
+		} while ((len -= l));
+	}
+}
+
+void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+{
+	struct page **src_page, **dst_page;
+	int l;
+
+	dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+	if (!len)
+		return;
+	src += node->page_offset;
+	dst += node->page_offset;
+	if (dst > src) {
+		src += len - 1;
+		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
+		src = (src & ~PAGE_CACHE_MASK) + 1;
+		dst += len - 1;
+		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
+		dst = (dst & ~PAGE_CACHE_MASK) + 1;
+
+		if (src == dst) {
+			while (src < len) {
+				memmove(kmap(*dst_page), kmap(*src_page), src);
+				kunmap(*src_page);
+				set_page_dirty(*dst_page);
+				kunmap(*dst_page);
+				len -= src;
+				src = PAGE_CACHE_SIZE;
+				src_page--;
+				dst_page--;
+			}
+			src -= len;
+			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len);
+			kunmap(*src_page);
+			set_page_dirty(*dst_page);
+			kunmap(*dst_page);
+		} else {
+			void *src_ptr, *dst_ptr;
+
+			do {
+				src_ptr = kmap(*src_page) + src;
+				dst_ptr = kmap(*dst_page) + dst;
+				if (src < dst) {
+					l = src;
+					src = PAGE_CACHE_SIZE;
+					dst -= l;
+				} else {
+					l = dst;
+					src -= l;
+					dst = PAGE_CACHE_SIZE;
+				}
+				l = min(len, l);
+				memmove(dst_ptr - l, src_ptr - l, l);
+				kunmap(*src_page);
+				set_page_dirty(*dst_page);
+				kunmap(*dst_page);
+				if (dst == PAGE_CACHE_SIZE)
+					dst_page--;
+				else
+					src_page--;
+			} while ((len -= l));
+		}
+	} else {
+		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
+		src &= ~PAGE_CACHE_MASK;
+		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
+		dst &= ~PAGE_CACHE_MASK;
+
+		if (src == dst) {
+			l = min(len, (int)PAGE_CACHE_SIZE - src);
+			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l);
+			kunmap(*src_page);
+			set_page_dirty(*dst_page);
+			kunmap(*dst_page);
+
+			while ((len -= l) != 0) {
+				l = min(len, (int)PAGE_CACHE_SIZE);
+				memmove(kmap(*++dst_page), kmap(*++src_page), l);
+				kunmap(*src_page);
+				set_page_dirty(*dst_page);
+				kunmap(*dst_page);
+			}
+		} else {
+			void *src_ptr, *dst_ptr;
+
+			do {
+				src_ptr = kmap(*src_page) + src;
+				dst_ptr = kmap(*dst_page) + dst;
+				if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
+					l = PAGE_CACHE_SIZE - src;
+					src = 0;
+					dst += l;
+				} else {
+					l = PAGE_CACHE_SIZE - dst;
+					src += l;
+					dst = 0;
+				}
+				l = min(len, l);
+				memmove(dst_ptr, src_ptr, l);
+				kunmap(*src_page);
+				set_page_dirty(*dst_page);
+				kunmap(*dst_page);
+				if (!dst)
+					dst_page++;
+				else
+					src_page++;
+			} while ((len -= l));
+		}
+	}
+}
+
+void hfs_bnode_dump(struct hfs_bnode *node)
+{
+	struct hfs_bnode_desc desc;
+	__be32 cnid;
+	int i, off, key_off;
+
+	dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
+	hfs_bnode_read(node, &desc, 0, sizeof(desc));
+	dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
+		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
+		desc.type, desc.height, be16_to_cpu(desc.num_recs));
+
+	off = node->tree->node_size - 2;
+	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
+		key_off = hfs_bnode_read_u16(node, off);
+		dprint(DBG_BNODE_MOD, " %d", key_off);
+		if (i && node->type == HFS_NODE_INDEX) {
+			int tmp;
+
+			if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
+				tmp = hfs_bnode_read_u16(node, key_off) + 2;
+			else
+				tmp = node->tree->max_key_len + 2;
+			dprint(DBG_BNODE_MOD, " (%d", tmp);
+			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
+			dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+		} else if (i && node->type == HFS_NODE_LEAF) {
+			int tmp;
+
+			tmp = hfs_bnode_read_u16(node, key_off);
+			dprint(DBG_BNODE_MOD, " (%d)", tmp);
+		}
+	}
+	dprint(DBG_BNODE_MOD, "\n");
+}
+
+void hfs_bnode_unlink(struct hfs_bnode *node)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *tmp;
+	__be32 cnid;
+
+	tree = node->tree;
+	if (node->prev) {
+		tmp = hfs_bnode_find(tree, node->prev);
+		if (IS_ERR(tmp))
+			return;
+		tmp->next = node->next;
+		cnid = cpu_to_be32(tmp->next);
+		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+		hfs_bnode_put(tmp);
+	} else if (node->type == HFS_NODE_LEAF)
+		tree->leaf_head = node->next;
+
+	if (node->next) {
+		tmp = hfs_bnode_find(tree, node->next);
+		if (IS_ERR(tmp))
+			return;
+		tmp->prev = node->prev;
+		cnid = cpu_to_be32(tmp->prev);
+		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
+		hfs_bnode_put(tmp);
+	} else if (node->type == HFS_NODE_LEAF)
+		tree->leaf_tail = node->prev;
+
+	// move down?
+	if (!node->prev && !node->next) {
+		printk("hfs_btree_del_level\n");
+	}
+	if (!node->parent) {
+		tree->root = 0;
+		tree->depth = 0;
+	}
+	set_bit(HFS_BNODE_DELETED, &node->flags);
+}
+
+static inline int hfs_bnode_hash(u32 num)
+{
+	num = (num >> 16) + num;
+	num += num >> 8;
+	return num & (NODE_HASH_SIZE - 1);
+}
+
+struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
+{
+	struct hfs_bnode *node;
+
+	if (cnid >= tree->node_count) {
+		printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
+		return NULL;
+	}
+
+	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
+	     node; node = node->next_hash) {
+		if (node->this == cnid) {
+			return node;
+		}
+	}
+	return NULL;
+}
+
+static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+{
+	struct super_block *sb;
+	struct hfs_bnode *node, *node2;
+	struct address_space *mapping;
+	struct page *page;
+	int size, block, i, hash;
+	loff_t off;
+
+	if (cnid >= tree->node_count) {
+		printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
+		return NULL;
+	}
+
+	sb = tree->inode->i_sb;
+	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
+		sizeof(struct page *);
+	node = kmalloc(size, GFP_KERNEL);
+	if (!node)
+		return NULL;
+	memset(node, 0, size);
+	node->tree = tree;
+	node->this = cnid;
+	set_bit(HFS_BNODE_NEW, &node->flags);
+	atomic_set(&node->refcnt, 1);
+	dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
+	       node->tree->cnid, node->this);
+	init_waitqueue_head(&node->lock_wq);
+	spin_lock(&tree->hash_lock);
+	node2 = hfs_bnode_findhash(tree, cnid);
+	if (!node2) {
+		hash = hfs_bnode_hash(cnid);
+		node->next_hash = tree->node_hash[hash];
+		tree->node_hash[hash] = node;
+		tree->node_hash_cnt++;
+	} else {
+		spin_unlock(&tree->hash_lock);
+		kfree(node);
+		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+		return node2;
+	}
+	spin_unlock(&tree->hash_lock);
+
+	mapping = tree->inode->i_mapping;
+	off = (loff_t)cnid << tree->node_size_shift;
+	block = off >> PAGE_CACHE_SHIFT;
+	node->page_offset = off & ~PAGE_CACHE_MASK;
+	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
+		page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
+		if (IS_ERR(page))
+			goto fail;
+		if (PageError(page)) {
+			page_cache_release(page);
+			goto fail;
+		}
+#if !REF_PAGES
+		page_cache_release(page);
+#endif
+		node->page[i] = page;
+	}
+
+	return node;
+fail:
+	set_bit(HFS_BNODE_ERROR, &node->flags);
+	return node;
+}
+
+void hfs_bnode_unhash(struct hfs_bnode *node)
+{
+	struct hfs_bnode **p;
+
+	dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
+		node->tree->cnid, node->this, atomic_read(&node->refcnt));
+	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
+	     *p && *p != node; p = &(*p)->next_hash)
+		;
+	if (!*p)
+		BUG();
+	*p = node->next_hash;
+	node->tree->node_hash_cnt--;
+}
+
+/* Load a particular node out of a tree */
+struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
+{
+	struct hfs_bnode *node;
+	struct hfs_bnode_desc *desc;
+	int i, rec_off, off, next_off;
+	int entry_size, key_size;
+
+	spin_lock(&tree->hash_lock);
+	node = hfs_bnode_findhash(tree, num);
+	if (node) {
+		hfs_bnode_get(node);
+		spin_unlock(&tree->hash_lock);
+		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
+		if (test_bit(HFS_BNODE_ERROR, &node->flags))
+			goto node_error;
+		return node;
+	}
+	spin_unlock(&tree->hash_lock);
+	node = __hfs_bnode_create(tree, num);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+	if (test_bit(HFS_BNODE_ERROR, &node->flags))
+		goto node_error;
+	if (!test_bit(HFS_BNODE_NEW, &node->flags))
+		return node;
+
+	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
+	node->prev = be32_to_cpu(desc->prev);
+	node->next = be32_to_cpu(desc->next);
+	node->num_recs = be16_to_cpu(desc->num_recs);
+	node->type = desc->type;
+	node->height = desc->height;
+	kunmap(node->page[0]);
+
+	switch (node->type) {
+	case HFS_NODE_HEADER:
+	case HFS_NODE_MAP:
+		if (node->height != 0)
+			goto node_error;
+		break;
+	case HFS_NODE_LEAF:
+		if (node->height != 1)
+			goto node_error;
+		break;
+	case HFS_NODE_INDEX:
+		if (node->height <= 1 || node->height > tree->depth)
+			goto node_error;
+		break;
+	default:
+		goto node_error;
+	}
+
+	rec_off = tree->node_size - 2;
+	off = hfs_bnode_read_u16(node, rec_off);
+	if (off != sizeof(struct hfs_bnode_desc))
+		goto node_error;
+	for (i = 1; i <= node->num_recs; off = next_off, i++) {
+		rec_off -= 2;
+		next_off = hfs_bnode_read_u16(node, rec_off);
+		if (next_off <= off ||
+		    next_off > tree->node_size ||
+		    next_off & 1)
+			goto node_error;
+		entry_size = next_off - off;
+		if (node->type != HFS_NODE_INDEX &&
+		    node->type != HFS_NODE_LEAF)
+			continue;
+		key_size = hfs_bnode_read_u16(node, off) + 2;
+		if (key_size >= entry_size || key_size & 1)
+			goto node_error;
+	}
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+	return node;
+
+node_error:
+	set_bit(HFS_BNODE_ERROR, &node->flags);
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+	hfs_bnode_put(node);
+	return ERR_PTR(-EIO);
+}
+
+void hfs_bnode_free(struct hfs_bnode *node)
+{
+	//int i;
+
+	//for (i = 0; i < node->tree->pages_per_bnode; i++)
+	//	if (node->page[i])
+	//		page_cache_release(node->page[i]);
+	kfree(node);
+}
+
+struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
+{
+	struct hfs_bnode *node;
+	struct page **pagep;
+	int i;
+
+	spin_lock(&tree->hash_lock);
+	node = hfs_bnode_findhash(tree, num);
+	spin_unlock(&tree->hash_lock);
+	if (node) {
+		printk("new node %u already hashed?\n", num);
+		BUG();
+	}
+	node = __hfs_bnode_create(tree, num);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
+		hfs_bnode_put(node);
+		return ERR_PTR(-EIO);
+	}
+
+	pagep = node->page;
+	memset(kmap(*pagep) + node->page_offset, 0,
+	       min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
+	set_page_dirty(*pagep);
+	kunmap(*pagep);
+	for (i = 1; i < tree->pages_per_bnode; i++) {
+		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+		set_page_dirty(*pagep);
+		kunmap(*pagep);
+	}
+	clear_bit(HFS_BNODE_NEW, &node->flags);
+	wake_up(&node->lock_wq);
+
+	return node;
+}
+
+void hfs_bnode_get(struct hfs_bnode *node)
+{
+	if (node) {
+		atomic_inc(&node->refcnt);
+#if REF_PAGES
+		{
+		int i;
+		for (i = 0; i < node->tree->pages_per_bnode; i++)
+			get_page(node->page[i]);
+		}
+#endif
+		dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
+		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+	}
+}
+
+/* Dispose of resources used by a node */
+void hfs_bnode_put(struct hfs_bnode *node)
+{
+	if (node) {
+		struct hfs_btree *tree = node->tree;
+		int i;
+
+		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
+		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+		if (!atomic_read(&node->refcnt))
+			BUG();
+		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) {
+#if REF_PAGES
+			for (i = 0; i < tree->pages_per_bnode; i++)
+				put_page(node->page[i]);
+#endif
+			return;
+		}
+		for (i = 0; i < tree->pages_per_bnode; i++) {
+			mark_page_accessed(node->page[i]);
+#if REF_PAGES
+			put_page(node->page[i]);
+#endif
+		}
+
+		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
+			hfs_bnode_unhash(node);
+			spin_unlock(&tree->hash_lock);
+			hfs_bmap_free(node);
+			hfs_bnode_free(node);
+			return;
+		}
+		spin_unlock(&tree->hash_lock);
+	}
+}
+
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
new file mode 100644
index 0000000..0ccef2a
--- /dev/null
+++ b/fs/hfsplus/brec.c
@@ -0,0 +1,491 @@
+/*
+ *  linux/fs/hfsplus/brec.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle individual btree records
+ */
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd);
+static int hfs_brec_update_parent(struct hfs_find_data *fd);
+static int hfs_btree_inc_height(struct hfs_btree *);
+
+/* Get the length and offset of the given record in the given node */
+u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off)
+{
+	__be16 retval[2];
+	u16 dataoff;
+
+	dataoff = node->tree->node_size - (rec + 2) * 2;
+	hfs_bnode_read(node, retval, dataoff, 4);
+	*off = be16_to_cpu(retval[1]);
+	return be16_to_cpu(retval[0]) - *off;
+}
+
+/* Get the length of the key from a keyed record */
+u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
+{
+	u16 retval, recoff;
+
+	if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF)
+		return 0;
+
+	if ((node->type == HFS_NODE_INDEX) &&
+	   !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
+		retval = node->tree->max_key_len + 2;
+	} else {
+		recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
+		if (!recoff)
+			return 0;
+		if (node->tree->attributes & HFS_TREE_BIGKEYS)
+			retval = hfs_bnode_read_u16(node, recoff) + 2;
+		else
+			retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
+	}
+	return retval;
+}
+
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node;
+	int size, key_len, rec;
+	int data_off, end_off;
+	int idx_rec_off, data_rec_off, end_rec_off;
+	__be32 cnid;
+
+	tree = fd->tree;
+	if (!fd->bnode) {
+		if (!tree->root)
+			hfs_btree_inc_height(tree);
+		fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
+		if (IS_ERR(fd->bnode))
+			return PTR_ERR(fd->bnode);
+		fd->record = -1;
+	}
+	new_node = NULL;
+	key_len = be16_to_cpu(fd->search_key->key_len) + 2;
+again:
+	/* new record idx and complete record size */
+	rec = fd->record + 1;
+	size = key_len + entry_len;
+
+	node = fd->bnode;
+	hfs_bnode_dump(node);
+	/* get last offset */
+	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
+	end_off = hfs_bnode_read_u16(node, end_rec_off);
+	end_rec_off -= 2;
+	dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
+	if (size > end_rec_off - end_off) {
+		if (new_node)
+			panic("not enough room!\n");
+		new_node = hfs_bnode_split(fd);
+		if (IS_ERR(new_node))
+			return PTR_ERR(new_node);
+		goto again;
+	}
+	if (node->type == HFS_NODE_LEAF) {
+		tree->leaf_count++;
+		mark_inode_dirty(tree->inode);
+	}
+	node->num_recs++;
+	/* write new last offset */
+	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+	hfs_bnode_write_u16(node, end_rec_off, end_off + size);
+	data_off = end_off;
+	data_rec_off = end_rec_off + 2;
+	idx_rec_off = tree->node_size - (rec + 1) * 2;
+	if (idx_rec_off == data_rec_off)
+		goto skip;
+	/* move all following entries */
+	do {
+		data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
+		hfs_bnode_write_u16(node, data_rec_off, data_off + size);
+		data_rec_off += 2;
+	} while (data_rec_off < idx_rec_off);
+
+	/* move data away */
+	hfs_bnode_move(node, data_off + size, data_off,
+		       end_off - data_off);
+
+skip:
+	hfs_bnode_write(node, fd->search_key, data_off, key_len);
+	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+	hfs_bnode_dump(node);
+
+	if (new_node) {
+		/* update parent key if we inserted a key
+		 * at the start of the first node
+		 */
+		if (!rec && new_node != node)
+			hfs_brec_update_parent(fd);
+
+		hfs_bnode_put(fd->bnode);
+		if (!new_node->parent) {
+			hfs_btree_inc_height(tree);
+			new_node->parent = tree->root;
+		}
+		fd->bnode = hfs_bnode_find(tree, new_node->parent);
+
+		/* create index data entry */
+		cnid = cpu_to_be32(new_node->this);
+		entry = &cnid;
+		entry_len = sizeof(cnid);
+
+		/* get index key */
+		hfs_bnode_read_key(new_node, fd->search_key, 14);
+		__hfs_brec_find(fd->bnode, fd);
+
+		hfs_bnode_put(new_node);
+		new_node = NULL;
+
+		if (tree->attributes & HFS_TREE_VARIDXKEYS)
+			key_len = be16_to_cpu(fd->search_key->key_len) + 2;
+		else {
+			fd->search_key->key_len = cpu_to_be16(tree->max_key_len);
+			key_len = tree->max_key_len + 2;
+		}
+		goto again;
+	}
+
+	if (!rec)
+		hfs_brec_update_parent(fd);
+
+	return 0;
+}
+
+int hfs_brec_remove(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *parent;
+	int end_off, rec_off, data_off, size;
+
+	tree = fd->tree;
+	node = fd->bnode;
+again:
+	rec_off = tree->node_size - (fd->record + 2) * 2;
+	end_off = tree->node_size - (node->num_recs + 1) * 2;
+
+	if (node->type == HFS_NODE_LEAF) {
+		tree->leaf_count--;
+		mark_inode_dirty(tree->inode);
+	}
+	hfs_bnode_dump(node);
+	dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength);
+	if (!--node->num_recs) {
+		hfs_bnode_unlink(node);
+		if (!node->parent)
+			return 0;
+		parent = hfs_bnode_find(tree, node->parent);
+		if (IS_ERR(parent))
+			return PTR_ERR(parent);
+		hfs_bnode_put(node);
+		node = fd->bnode = parent;
+
+		__hfs_brec_find(node, fd);
+		goto again;
+	}
+	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+
+	if (rec_off == end_off)
+		goto skip;
+	size = fd->keylength + fd->entrylength;
+
+	do {
+		data_off = hfs_bnode_read_u16(node, rec_off);
+		hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
+		rec_off -= 2;
+	} while (rec_off >= end_off);
+
+	/* fill hole */
+	hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
+		       data_off - fd->keyoffset - size);
+skip:
+	hfs_bnode_dump(node);
+	if (!fd->record)
+		hfs_brec_update_parent(fd);
+	return 0;
+}
+
+static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node;
+	struct hfs_bnode_desc node_desc;
+	int num_recs, new_rec_off, new_off, old_rec_off;
+	int data_start, data_end, size;
+
+	tree = fd->tree;
+	node = fd->bnode;
+	new_node = hfs_bmap_alloc(tree);
+	if (IS_ERR(new_node))
+		return new_node;
+	hfs_bnode_get(node);
+	dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n",
+		node->this, new_node->this, node->next);
+	new_node->next = node->next;
+	new_node->prev = node->this;
+	new_node->parent = node->parent;
+	new_node->type = node->type;
+	new_node->height = node->height;
+
+	size = tree->node_size / 2 - node->num_recs * 2 - 14;
+	old_rec_off = tree->node_size - 4;
+	num_recs = 1;
+	for (;;) {
+		data_start = hfs_bnode_read_u16(node, old_rec_off);
+		if (data_start > size)
+			break;
+		old_rec_off -= 2;
+		if (++num_recs < node->num_recs)
+			continue;
+		/* panic? */
+		hfs_bnode_put(node);
+		hfs_bnode_put(new_node);
+		return ERR_PTR(-ENOSPC);
+	}
+
+	if (fd->record + 1 < num_recs) {
+		/* new record is in the lower half,
+		 * so leave some more space there
+		 */
+		old_rec_off += 2;
+		num_recs--;
+		data_start = hfs_bnode_read_u16(node, old_rec_off);
+	} else {
+		hfs_bnode_put(node);
+		hfs_bnode_get(new_node);
+		fd->bnode = new_node;
+		fd->record -= num_recs;
+		fd->keyoffset -= data_start - 14;
+		fd->entryoffset -= data_start - 14;
+	}
+	new_node->num_recs = node->num_recs - num_recs;
+	node->num_recs = num_recs;
+
+	new_rec_off = tree->node_size - 2;
+	new_off = 14;
+	size = data_start - new_off;
+	num_recs = new_node->num_recs;
+	data_end = data_start;
+	while (num_recs) {
+		hfs_bnode_write_u16(new_node, new_rec_off, new_off);
+		old_rec_off -= 2;
+		new_rec_off -= 2;
+		data_end = hfs_bnode_read_u16(node, old_rec_off);
+		new_off = data_end - size;
+		num_recs--;
+	}
+	hfs_bnode_write_u16(new_node, new_rec_off, new_off);
+	hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);
+
+	/* update new bnode header */
+	node_desc.next = cpu_to_be32(new_node->next);
+	node_desc.prev = cpu_to_be32(new_node->prev);
+	node_desc.type = new_node->type;
+	node_desc.height = new_node->height;
+	node_desc.num_recs = cpu_to_be16(new_node->num_recs);
+	node_desc.reserved = 0;
+	hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
+
+	/* update previous bnode header */
+	node->next = new_node->this;
+	hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
+	node_desc.next = cpu_to_be32(node->next);
+	node_desc.num_recs = cpu_to_be16(node->num_recs);
+	hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
+
+	/* update next bnode header */
+	if (new_node->next) {
+		struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
+		next_node->prev = new_node->this;
+		hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
+		node_desc.prev = cpu_to_be32(next_node->prev);
+		hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc));
+		hfs_bnode_put(next_node);
+	} else if (node->this == tree->leaf_tail) {
+		/* if there is no next node, this might be the new tail */
+		tree->leaf_tail = new_node->this;
+		mark_inode_dirty(tree->inode);
+	}
+
+	hfs_bnode_dump(node);
+	hfs_bnode_dump(new_node);
+	hfs_bnode_put(node);
+
+	return new_node;
+}
+
+static int hfs_brec_update_parent(struct hfs_find_data *fd)
+{
+	struct hfs_btree *tree;
+	struct hfs_bnode *node, *new_node, *parent;
+	int newkeylen, diff;
+	int rec, rec_off, end_rec_off;
+	int start_off, end_off;
+
+	tree = fd->tree;
+	node = fd->bnode;
+	new_node = NULL;
+	if (!node->parent)
+		return 0;
+
+again:
+	parent = hfs_bnode_find(tree, node->parent);
+	if (IS_ERR(parent))
+		return PTR_ERR(parent);
+	__hfs_brec_find(parent, fd);
+	hfs_bnode_dump(parent);
+	rec = fd->record;
+
+	/* size difference between old and new key */
+	if (tree->attributes & HFS_TREE_VARIDXKEYS)
+		newkeylen = hfs_bnode_read_u16(node, 14) + 2;
+	else
+		fd->keylength = newkeylen = tree->max_key_len + 2;
+	dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen);
+
+	rec_off = tree->node_size - (rec + 2) * 2;
+	end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+	diff = newkeylen - fd->keylength;
+	if (!diff)
+		goto skip;
+	if (diff > 0) {
+		end_off = hfs_bnode_read_u16(parent, end_rec_off);
+		if (end_rec_off - end_off < diff) {
+
+			printk("splitting index node...\n");
+			fd->bnode = parent;
+			new_node = hfs_bnode_split(fd);
+			if (IS_ERR(new_node))
+				return PTR_ERR(new_node);
+			parent = fd->bnode;
+			rec = fd->record;
+			rec_off = tree->node_size - (rec + 2) * 2;
+			end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+		}
+	}
+
+	end_off = start_off = hfs_bnode_read_u16(parent, rec_off);
+	hfs_bnode_write_u16(parent, rec_off, start_off + diff);
+	start_off -= 4;	/* move previous cnid too */
+
+	while (rec_off > end_rec_off) {
+		rec_off -= 2;
+		end_off = hfs_bnode_read_u16(parent, rec_off);
+		hfs_bnode_write_u16(parent, rec_off, end_off + diff);
+	}
+	hfs_bnode_move(parent, start_off + diff, start_off,
+		       end_off - start_off);
+skip:
+	hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen);
+	hfs_bnode_dump(parent);
+
+	hfs_bnode_put(node);
+	node = parent;
+
+	if (new_node) {
+		__be32 cnid;
+
+		fd->bnode = hfs_bnode_find(tree, new_node->parent);
+		/* create index key and entry */
+		hfs_bnode_read_key(new_node, fd->search_key, 14);
+		cnid = cpu_to_be32(new_node->this);
+
+		__hfs_brec_find(fd->bnode, fd);
+		hfs_brec_insert(fd, &cnid, sizeof(cnid));
+		hfs_bnode_put(fd->bnode);
+		hfs_bnode_put(new_node);
+
+		if (!rec) {
+			if (new_node == node)
+				goto out;
+			/* restore search_key */
+			hfs_bnode_read_key(node, fd->search_key, 14);
+		}
+	}
+
+	if (!rec && node->parent)
+		goto again;
+out:
+	fd->bnode = node;
+	return 0;
+}
+
+static int hfs_btree_inc_height(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node, *new_node;
+	struct hfs_bnode_desc node_desc;
+	int key_size, rec;
+	__be32 cnid;
+
+	node = NULL;
+	if (tree->root) {
+		node = hfs_bnode_find(tree, tree->root);
+		if (IS_ERR(node))
+			return PTR_ERR(node);
+	}
+	new_node = hfs_bmap_alloc(tree);
+	if (IS_ERR(new_node)) {
+		hfs_bnode_put(node);
+		return PTR_ERR(new_node);
+	}
+
+	tree->root = new_node->this;
+	if (!tree->depth) {
+		tree->leaf_head = tree->leaf_tail = new_node->this;
+		new_node->type = HFS_NODE_LEAF;
+		new_node->num_recs = 0;
+	} else {
+		new_node->type = HFS_NODE_INDEX;
+		new_node->num_recs = 1;
+	}
+	new_node->parent = 0;
+	new_node->next = 0;
+	new_node->prev = 0;
+	new_node->height = ++tree->depth;
+
+	node_desc.next = cpu_to_be32(new_node->next);
+	node_desc.prev = cpu_to_be32(new_node->prev);
+	node_desc.type = new_node->type;
+	node_desc.height = new_node->height;
+	node_desc.num_recs = cpu_to_be16(new_node->num_recs);
+	node_desc.reserved = 0;
+	hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
+
+	rec = tree->node_size - 2;
+	hfs_bnode_write_u16(new_node, rec, 14);
+
+	if (node) {
+		/* insert old root idx into new root */
+		node->parent = tree->root;
+		if (node->type == HFS_NODE_LEAF ||
+		    tree->attributes & HFS_TREE_VARIDXKEYS)
+			key_size = hfs_bnode_read_u16(node, 14) + 2;
+		else
+			key_size = tree->max_key_len + 2;
+		hfs_bnode_copy(new_node, 14, node, 14, key_size);
+
+		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+			key_size = tree->max_key_len + 2;
+			hfs_bnode_write_u16(new_node, 14, tree->max_key_len);
+		}
+		cnid = cpu_to_be32(node->this);
+		hfs_bnode_write(new_node, &cnid, 14 + key_size, 4);
+
+		rec -= 2;
+		hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4);
+
+		hfs_bnode_put(node);
+	}
+	hfs_bnode_put(new_node);
+	mark_inode_dirty(tree->inode);
+
+	return 0;
+}
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
new file mode 100644
index 0000000..44326aa
--- /dev/null
+++ b/fs/hfsplus/btree.c
@@ -0,0 +1,319 @@
+/*
+ *  linux/fs/hfsplus/btree.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handle opening/closing btree
+ */
+
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+
+/* Get a reference to a B*Tree and do some initial checks */
+struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
+{
+	struct hfs_btree *tree;
+	struct hfs_btree_header_rec *head;
+	struct address_space *mapping;
+	struct page *page;
+	unsigned int size;
+
+	tree = kmalloc(sizeof(*tree), GFP_KERNEL);
+	if (!tree)
+		return NULL;
+	memset(tree, 0, sizeof(*tree));
+
+	init_MUTEX(&tree->tree_lock);
+	spin_lock_init(&tree->hash_lock);
+	/* Set the correct compare function */
+	tree->sb = sb;
+	tree->cnid = id;
+	if (id == HFSPLUS_EXT_CNID) {
+		tree->keycmp = hfsplus_ext_cmp_key;
+	} else if (id == HFSPLUS_CAT_CNID) {
+		tree->keycmp = hfsplus_cat_cmp_key;
+	} else {
+		printk("HFS+-fs: unknown B*Tree requested\n");
+		goto free_tree;
+	}
+	tree->inode = iget(sb, id);
+	if (!tree->inode)
+		goto free_tree;
+
+	mapping = tree->inode->i_mapping;
+	page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+	if (IS_ERR(page))
+		goto free_tree;
+
+	/* Load the header */
+	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	tree->root = be32_to_cpu(head->root);
+	tree->leaf_count = be32_to_cpu(head->leaf_count);
+	tree->leaf_head = be32_to_cpu(head->leaf_head);
+	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
+	tree->node_count = be32_to_cpu(head->node_count);
+	tree->free_nodes = be32_to_cpu(head->free_nodes);
+	tree->attributes = be32_to_cpu(head->attributes);
+	tree->node_size = be16_to_cpu(head->node_size);
+	tree->max_key_len = be16_to_cpu(head->max_key_len);
+	tree->depth = be16_to_cpu(head->depth);
+
+	size = tree->node_size;
+	if (!size || size & (size - 1))
+		goto fail_page;
+	if (!tree->node_count)
+		goto fail_page;
+	tree->node_size_shift = ffs(size) - 1;
+
+	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+	kunmap(page);
+	page_cache_release(page);
+	return tree;
+
+ fail_page:
+	tree->inode->i_mapping->a_ops = &hfsplus_aops;
+	page_cache_release(page);
+ free_tree:
+	iput(tree->inode);
+	kfree(tree);
+	return NULL;
+}
+
+/* Release resources used by a btree */
+void hfs_btree_close(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node;
+	int i;
+
+	if (!tree)
+		return;
+
+	for (i = 0; i < NODE_HASH_SIZE; i++) {
+		while ((node = tree->node_hash[i])) {
+			tree->node_hash[i] = node->next_hash;
+			if (atomic_read(&node->refcnt))
+				printk("HFS+: node %d:%d still has %d user(s)!\n",
+					node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			hfs_bnode_free(node);
+			tree->node_hash_cnt--;
+		}
+	}
+	iput(tree->inode);
+	kfree(tree);
+}
+
+void hfs_btree_write(struct hfs_btree *tree)
+{
+	struct hfs_btree_header_rec *head;
+	struct hfs_bnode *node;
+	struct page *page;
+
+	node = hfs_bnode_find(tree, 0);
+	if (IS_ERR(node))
+		/* panic? */
+		return;
+	/* Load the header */
+	page = node->page[0];
+	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+
+	head->root = cpu_to_be32(tree->root);
+	head->leaf_count = cpu_to_be32(tree->leaf_count);
+	head->leaf_head = cpu_to_be32(tree->leaf_head);
+	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
+	head->node_count = cpu_to_be32(tree->node_count);
+	head->free_nodes = cpu_to_be32(tree->free_nodes);
+	head->attributes = cpu_to_be32(tree->attributes);
+	head->depth = cpu_to_be16(tree->depth);
+
+	kunmap(page);
+	set_page_dirty(page);
+	hfs_bnode_put(node);
+}
+
+static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
+{
+	struct hfs_btree *tree = prev->tree;
+	struct hfs_bnode *node;
+	struct hfs_bnode_desc desc;
+	__be32 cnid;
+
+	node = hfs_bnode_create(tree, idx);
+	if (IS_ERR(node))
+		return node;
+
+	tree->free_nodes--;
+	prev->next = idx;
+	cnid = cpu_to_be32(idx);
+	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+
+	node->type = HFS_NODE_MAP;
+	node->num_recs = 1;
+	hfs_bnode_clear(node, 0, tree->node_size);
+	desc.next = 0;
+	desc.prev = 0;
+	desc.type = HFS_NODE_MAP;
+	desc.height = 0;
+	desc.num_recs = cpu_to_be16(1);
+	desc.reserved = 0;
+	hfs_bnode_write(node, &desc, 0, sizeof(desc));
+	hfs_bnode_write_u16(node, 14, 0x8000);
+	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
+	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
+
+	return node;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+	struct hfs_bnode *node, *next_node;
+	struct page **pagep;
+	u32 nidx, idx;
+	u16 off, len;
+	u8 *data, byte, m;
+	int i;
+
+	while (!tree->free_nodes) {
+		struct inode *inode = tree->inode;
+		u32 count;
+		int res;
+
+		res = hfsplus_file_extend(inode);
+		if (res)
+			return ERR_PTR(res);
+		HFSPLUS_I(inode).phys_size = inode->i_size =
+				(loff_t)HFSPLUS_I(inode).alloc_blocks <<
+				HFSPLUS_SB(tree->sb).alloc_blksz_shift;
+		HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
+					     HFSPLUS_SB(tree->sb).fs_shift;
+		inode_set_bytes(inode, inode->i_size);
+		count = inode->i_size >> tree->node_size_shift;
+		tree->free_nodes = count - tree->node_count;
+		tree->node_count = count;
+	}
+
+	nidx = 0;
+	node = hfs_bnode_find(tree, nidx);
+	if (IS_ERR(node))
+		return node;
+	len = hfs_brec_lenoff(node, 2, &off);
+
+	off += node->page_offset;
+	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	data = kmap(*pagep);
+	off &= ~PAGE_CACHE_MASK;
+	idx = 0;
+
+	for (;;) {
+		while (len) {
+			byte = data[off];
+			if (byte != 0xff) {
+				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
+					if (!(byte & m)) {
+						idx += i;
+						data[off] |= m;
+						set_page_dirty(*pagep);
+						kunmap(*pagep);
+						tree->free_nodes--;
+						mark_inode_dirty(tree->inode);
+						hfs_bnode_put(node);
+						if (!idx) {
+							printk("unexpected idx %u (%u)\n", idx, node->this);
+							BUG();
+						}
+						return hfs_bnode_create(tree, idx);
+					}
+				}
+			}
+			if (++off >= PAGE_CACHE_SIZE) {
+				kunmap(*pagep);
+				data = kmap(*++pagep);
+				off = 0;
+			}
+			idx += 8;
+			len--;
+		}
+		kunmap(*pagep);
+		nidx = node->next;
+		if (!nidx) {
+			printk("create new bmap node...\n");
+			next_node = hfs_bmap_new_bmap(node, idx);
+		} else
+			next_node = hfs_bnode_find(tree, nidx);
+		hfs_bnode_put(node);
+		if (IS_ERR(next_node))
+			return next_node;
+		node = next_node;
+
+		len = hfs_brec_lenoff(node, 0, &off);
+		off += node->page_offset;
+		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+		data = kmap(*pagep);
+		off &= ~PAGE_CACHE_MASK;
+	}
+}
+
+void hfs_bmap_free(struct hfs_bnode *node)
+{
+	struct hfs_btree *tree;
+	struct page *page;
+	u16 off, len;
+	u32 nidx;
+	u8 *data, byte, m;
+
+	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
+	if (!node->this)
+		BUG();
+	tree = node->tree;
+	nidx = node->this;
+	node = hfs_bnode_find(tree, 0);
+	if (IS_ERR(node))
+		return;
+	len = hfs_brec_lenoff(node, 2, &off);
+	while (nidx >= len * 8) {
+		u32 i;
+
+		nidx -= len * 8;
+		i = node->next;
+		hfs_bnode_put(node);
+		if (!i) {
+			/* panic */;
+			printk("HFS: unable to free bnode %u. bmap not found!\n", node->this);
+			return;
+		}
+		node = hfs_bnode_find(tree, i);
+		if (IS_ERR(node))
+			return;
+		if (node->type != HFS_NODE_MAP) {
+			/* panic */;
+			printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type);
+			hfs_bnode_put(node);
+			return;
+		}
+		len = hfs_brec_lenoff(node, 0, &off);
+	}
+	off += node->page_offset + nidx / 8;
+	page = node->page[off >> PAGE_CACHE_SHIFT];
+	data = kmap(page);
+	off &= ~PAGE_CACHE_MASK;
+	m = 1 << (~nidx & 7);
+	byte = data[off];
+	if (!(byte & m)) {
+		printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type);
+		kunmap(page);
+		hfs_bnode_put(node);
+		return;
+	}
+	data[off] = byte & ~m;
+	set_page_dirty(page);
+	kunmap(page);
+	hfs_bnode_put(node);
+	tree->free_nodes++;
+	mark_inode_dirty(tree->inode);
+}
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
new file mode 100644
index 0000000..9471279
--- /dev/null
+++ b/fs/hfsplus/catalog.c
@@ -0,0 +1,358 @@
+/*
+ *  linux/fs/hfsplus/catalog.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handling of catalog records
+ */
+
+#include <linux/sched.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+int hfsplus_cat_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
+{
+	__be32 k1p, k2p;
+
+	k1p = k1->cat.parent;
+	k2p = k2->cat.parent;
+	if (k1p != k2p)
+		return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
+
+	return hfsplus_unistrcmp(&k1->cat.name, &k2->cat.name);
+}
+
+void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
+			   u32 parent, struct qstr *str)
+{
+	int len;
+
+	key->cat.parent = cpu_to_be32(parent);
+	if (str) {
+		hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len);
+		len = be16_to_cpu(key->cat.name.length);
+	} else {
+		key->cat.name.length = 0;
+		len = 0;
+	}
+	key->key_len = cpu_to_be16(6 + 2 * len);
+}
+
+static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
+				      struct hfsplus_unistr *name)
+{
+	int ustrlen;
+
+	ustrlen = be16_to_cpu(name->length);
+	key->cat.parent = cpu_to_be32(parent);
+	key->cat.name.length = cpu_to_be16(ustrlen);
+	ustrlen *= 2;
+	memcpy(key->cat.name.unicode, name->unicode, ustrlen);
+	key->key_len = cpu_to_be16(6 + ustrlen);
+}
+
+static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
+{
+	if (inode->i_flags & S_IMMUTABLE)
+		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
+	else
+		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
+	if (inode->i_flags & S_APPEND)
+		perms->rootflags |= HFSPLUS_FLG_APPEND;
+	else
+		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
+	HFSPLUS_I(inode).rootflags = perms->rootflags;
+	HFSPLUS_I(inode).userflags = perms->userflags;
+	perms->mode = cpu_to_be16(inode->i_mode);
+	perms->owner = cpu_to_be32(inode->i_uid);
+	perms->group = cpu_to_be32(inode->i_gid);
+}
+
+static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode)
+{
+	if (S_ISDIR(inode->i_mode)) {
+		struct hfsplus_cat_folder *folder;
+
+		folder = &entry->folder;
+		memset(folder, 0, sizeof(*folder));
+		folder->type = cpu_to_be16(HFSPLUS_FOLDER);
+		folder->id = cpu_to_be32(inode->i_ino);
+		folder->create_date = folder->content_mod_date =
+			folder->attribute_mod_date = folder->access_date = hfsp_now2mt();
+		hfsplus_set_perms(inode, &folder->permissions);
+		if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir)
+			/* invisible and namelocked */
+			folder->user_info.frFlags = cpu_to_be16(0x5000);
+		return sizeof(*folder);
+	} else {
+		struct hfsplus_cat_file *file;
+
+		file = &entry->file;
+		memset(file, 0, sizeof(*file));
+		file->type = cpu_to_be16(HFSPLUS_FILE);
+		file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
+		file->id = cpu_to_be32(cnid);
+		file->create_date = file->content_mod_date =
+			file->attribute_mod_date = file->access_date = hfsp_now2mt();
+		if (cnid == inode->i_ino) {
+			hfsplus_set_perms(inode, &file->permissions);
+			file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
+			file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
+			if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
+				file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
+		} else {
+			file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
+			file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
+			file->user_info.fdFlags = cpu_to_be16(0x100);
+			file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev);
+		}
+		return sizeof(*file);
+	}
+}
+
+static int hfsplus_fill_cat_thread(struct super_block *sb,
+				   hfsplus_cat_entry *entry, int type,
+				   u32 parentid, struct qstr *str)
+{
+	entry->type = cpu_to_be16(type);
+	entry->thread.reserved = 0;
+	entry->thread.parentID = cpu_to_be32(parentid);
+	hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len);
+	return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
+}
+
+/* Try to get a catalog entry for given catalog id */
+int hfsplus_find_cat(struct super_block *sb, u32 cnid,
+		     struct hfs_find_data *fd)
+{
+	hfsplus_cat_entry tmp;
+	int err;
+	u16 type;
+
+	hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
+	err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
+	if (err)
+		return err;
+
+	type = be16_to_cpu(tmp.type);
+	if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
+		printk("HFS+-fs: Found bad thread record in catalog\n");
+		return -EIO;
+	}
+
+	hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID),
+				 &tmp.thread.nodeName);
+	return hfs_brec_find(fd);
+}
+
+int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
+{
+	struct hfs_find_data fd;
+	struct super_block *sb;
+	hfsplus_cat_entry entry;
+	int entry_size;
+	int err;
+
+	dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
+	sb = dir->i_sb;
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+
+	hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+	entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
+			HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
+			dir->i_ino, str);
+	err = hfs_brec_find(&fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto err2;
+	}
+	err = hfs_brec_insert(&fd, &entry, entry_size);
+	if (err)
+		goto err2;
+
+	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+	entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
+	err = hfs_brec_find(&fd);
+	if (err != -ENOENT) {
+		/* panic? */
+		if (!err)
+			err = -EEXIST;
+		goto err1;
+	}
+	err = hfs_brec_insert(&fd, &entry, entry_size);
+	if (err)
+		goto err1;
+
+	dir->i_size++;
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	hfs_find_exit(&fd);
+	return 0;
+
+err1:
+	hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+	if (!hfs_brec_find(&fd))
+		hfs_brec_remove(&fd);
+err2:
+	hfs_find_exit(&fd);
+	return err;
+}
+
+int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
+{
+	struct super_block *sb;
+	struct hfs_find_data fd;
+	struct hfsplus_fork_raw fork;
+	struct list_head *pos;
+	int err, off;
+	u16 type;
+
+	dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+	sb = dir->i_sb;
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+
+	if (!str) {
+		int len;
+
+		hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+		err = hfs_brec_find(&fd);
+		if (err)
+			goto out;
+
+		off = fd.entryoffset + offsetof(struct hfsplus_cat_thread, nodeName);
+		fd.search_key->cat.parent = cpu_to_be32(dir->i_ino);
+		hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.length, off, 2);
+		len = be16_to_cpu(fd.search_key->cat.name.length) * 2;
+		hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.unicode, off + 2, len);
+		fd.search_key->key_len = cpu_to_be16(6 + len);
+	} else
+		hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+
+	err = hfs_brec_find(&fd);
+	if (err)
+		goto out;
+
+	type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
+	if (type == HFSPLUS_FILE) {
+#if 0
+		off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork);
+		hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
+		hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA);
+#endif
+
+		off = fd.entryoffset + offsetof(struct hfsplus_cat_file, rsrc_fork);
+		hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
+		hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
+	}
+
+	list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) {
+		struct hfsplus_readdir_data *rd =
+			list_entry(pos, struct hfsplus_readdir_data, list);
+		if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
+			rd->file->f_pos--;
+	}
+
+	err = hfs_brec_remove(&fd);
+	if (err)
+		goto out;
+
+	hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+	err = hfs_brec_find(&fd);
+	if (err)
+		goto out;
+
+	err = hfs_brec_remove(&fd);
+	if (err)
+		goto out;
+
+	dir->i_size--;
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+out:
+	hfs_find_exit(&fd);
+
+	return err;
+}
+
+int hfsplus_rename_cat(u32 cnid,
+		       struct inode *src_dir, struct qstr *src_name,
+		       struct inode *dst_dir, struct qstr *dst_name)
+{
+	struct super_block *sb;
+	struct hfs_find_data src_fd, dst_fd;
+	hfsplus_cat_entry entry;
+	int entry_size, type;
+	int err = 0;
+
+	dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
+		dst_dir->i_ino, dst_name->name);
+	sb = src_dir->i_sb;
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd);
+	dst_fd = src_fd;
+
+	/* find the old dir entry and read the data */
+	hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+
+	hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
+				src_fd.entrylength);
+
+	/* create new dir entry with the data from the old entry */
+	hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
+	err = hfs_brec_find(&dst_fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto out;
+	}
+
+	err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength);
+	if (err)
+		goto out;
+	dst_dir->i_size++;
+	dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dst_dir);
+
+	/* finally remove the old entry */
+	hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+	err = hfs_brec_remove(&src_fd);
+	if (err)
+		goto out;
+	src_dir->i_size--;
+	src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(src_dir);
+
+	/* remove old thread entry */
+	hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
+	err = hfs_brec_find(&src_fd);
+	if (err)
+		goto out;
+	type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
+	err = hfs_brec_remove(&src_fd);
+	if (err)
+		goto out;
+
+	/* create new thread entry */
+	hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
+	entry_size = hfsplus_fill_cat_thread(sb, &entry, type, dst_dir->i_ino, dst_name);
+	err = hfs_brec_find(&dst_fd);
+	if (err != -ENOENT) {
+		if (!err)
+			err = -EEXIST;
+		goto out;
+	}
+	err = hfs_brec_insert(&dst_fd, &entry, entry_size);
+out:
+	hfs_bnode_put(dst_fd.bnode);
+	hfs_find_exit(&src_fd);
+	return err;
+}
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
new file mode 100644
index 0000000..7bda766
--- /dev/null
+++ b/fs/hfsplus/dir.c
@@ -0,0 +1,484 @@
+/*
+ *  linux/fs/hfsplus/dir.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handling of directories
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/version.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+static inline void hfsplus_instantiate(struct dentry *dentry,
+				       struct inode *inode, u32 cnid)
+{
+	dentry->d_fsdata = (void *)(unsigned long)cnid;
+	d_instantiate(dentry, inode);
+}
+
+/* Find the entry inside dir named dentry->d_name */
+static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
+				     struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	struct hfs_find_data fd;
+	struct super_block *sb;
+	hfsplus_cat_entry entry;
+	int err;
+	u32 cnid, linkid = 0;
+	u16 type;
+
+	sb = dir->i_sb;
+	dentry->d_fsdata = NULL;
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
+again:
+	err = hfs_brec_read(&fd, &entry, sizeof(entry));
+	if (err) {
+		if (err == -ENOENT) {
+			hfs_find_exit(&fd);
+			/* No such entry */
+			inode = NULL;
+			goto out;
+		}
+		goto fail;
+	}
+	type = be16_to_cpu(entry.type);
+	if (type == HFSPLUS_FOLDER) {
+		if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
+			err = -EIO;
+			goto fail;
+		}
+		cnid = be32_to_cpu(entry.folder.id);
+		dentry->d_fsdata = (void *)(unsigned long)cnid;
+	} else if (type == HFSPLUS_FILE) {
+		if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
+			err = -EIO;
+			goto fail;
+		}
+		cnid = be32_to_cpu(entry.file.id);
+		if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
+		    entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR)) {
+			struct qstr str;
+			char name[32];
+
+			if (dentry->d_fsdata) {
+				err = -ENOENT;
+				inode = NULL;
+				goto out;
+			}
+			dentry->d_fsdata = (void *)(unsigned long)cnid;
+			linkid = be32_to_cpu(entry.file.permissions.dev);
+			str.len = sprintf(name, "iNode%d", linkid);
+			str.name = name;
+			hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
+			goto again;
+		} else if (!dentry->d_fsdata)
+			dentry->d_fsdata = (void *)(unsigned long)cnid;
+	} else {
+		printk("HFS+-fs: Illegal catalog entry type in lookup\n");
+		err = -EIO;
+		goto fail;
+	}
+	hfs_find_exit(&fd);
+	inode = iget(dir->i_sb, cnid);
+	if (!inode)
+		return ERR_PTR(-EACCES);
+	if (S_ISREG(inode->i_mode))
+		HFSPLUS_I(inode).dev = linkid;
+out:
+	d_add(dentry, inode);
+	return NULL;
+fail:
+	hfs_find_exit(&fd);
+	return ERR_PTR(err);
+}
+
+static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	int len, err;
+	char strbuf[HFSPLUS_MAX_STRLEN + 1];
+	hfsplus_cat_entry entry;
+	struct hfs_find_data fd;
+	struct hfsplus_readdir_data *rd;
+	u16 type;
+
+	if (filp->f_pos >= inode->i_size)
+		return 0;
+
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+	hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
+	err = hfs_brec_find(&fd);
+	if (err)
+		goto out;
+
+	switch ((u32)filp->f_pos) {
+	case 0:
+		/* This is completely artificial... */
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+			goto out;
+		filp->f_pos++;
+		/* fall through */
+	case 1:
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
+			printk("HFS+-fs: bad catalog folder thread\n");
+			err = -EIO;
+			goto out;
+		}
+		if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
+			printk("HFS+-fs: truncated catalog thread\n");
+			err = -EIO;
+			goto out;
+		}
+		if (filldir(dirent, "..", 2, 1,
+			    be32_to_cpu(entry.thread.parentID), DT_DIR))
+			goto out;
+		filp->f_pos++;
+		/* fall through */
+	default:
+		if (filp->f_pos >= inode->i_size)
+			goto out;
+		err = hfs_brec_goto(&fd, filp->f_pos - 1);
+		if (err)
+			goto out;
+	}
+
+	for (;;) {
+		if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
+			printk("HFS+-fs: walked past end of dir\n");
+			err = -EIO;
+			goto out;
+		}
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		type = be16_to_cpu(entry.type);
+		len = HFSPLUS_MAX_STRLEN;
+		err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
+		if (err)
+			goto out;
+		if (type == HFSPLUS_FOLDER) {
+			if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
+				printk("HFS+-fs: small dir entry\n");
+				err = -EIO;
+				goto out;
+			}
+			if (HFSPLUS_SB(sb).hidden_dir &&
+			    HFSPLUS_SB(sb).hidden_dir->i_ino == be32_to_cpu(entry.folder.id))
+				goto next;
+			if (filldir(dirent, strbuf, len, filp->f_pos,
+				    be32_to_cpu(entry.folder.id), DT_DIR))
+				break;
+		} else if (type == HFSPLUS_FILE) {
+			if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
+				printk("HFS+-fs: small file entry\n");
+				err = -EIO;
+				goto out;
+			}
+			if (filldir(dirent, strbuf, len, filp->f_pos,
+				    be32_to_cpu(entry.file.id), DT_REG))
+				break;
+		} else {
+			printk("HFS+-fs: bad catalog entry type\n");
+			err = -EIO;
+			goto out;
+		}
+	next:
+		filp->f_pos++;
+		if (filp->f_pos >= inode->i_size)
+			goto out;
+		err = hfs_brec_goto(&fd, 1);
+		if (err)
+			goto out;
+	}
+	rd = filp->private_data;
+	if (!rd) {
+		rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
+		if (!rd) {
+			err = -ENOMEM;
+			goto out;
+		}
+		filp->private_data = rd;
+		rd->file = filp;
+		list_add(&rd->list, &HFSPLUS_I(inode).open_dir_list);
+	}
+	memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
+out:
+	hfs_find_exit(&fd);
+	return err;
+}
+
+static int hfsplus_dir_release(struct inode *inode, struct file *file)
+{
+	struct hfsplus_readdir_data *rd = file->private_data;
+	if (rd) {
+		list_del(&rd->list);
+		kfree(rd);
+	}
+	return 0;
+}
+
+static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
+			  struct nameidata *nd)
+{
+	struct inode *inode;
+	int res;
+
+	inode = hfsplus_new_inode(dir->i_sb, mode);
+	if (!inode)
+		return -ENOSPC;
+
+	res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+	if (res) {
+		inode->i_nlink = 0;
+		hfsplus_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+	hfsplus_instantiate(dentry, inode, inode->i_ino);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
+			struct dentry *dst_dentry)
+{
+	struct super_block *sb = dst_dir->i_sb;
+	struct inode *inode = src_dentry->d_inode;
+	struct inode *src_dir = src_dentry->d_parent->d_inode;
+	struct qstr str;
+	char name[32];
+	u32 cnid, id;
+	int res;
+
+	if (HFSPLUS_IS_RSRC(inode))
+		return -EPERM;
+
+	if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) {
+		for (;;) {
+			get_random_bytes(&id, sizeof(cnid));
+			id &= 0x3fffffff;
+			str.name = name;
+			str.len = sprintf(name, "iNode%d", id);
+			res = hfsplus_rename_cat(inode->i_ino,
+						 src_dir, &src_dentry->d_name,
+						 HFSPLUS_SB(sb).hidden_dir, &str);
+			if (!res)
+				break;
+			if (res != -EEXIST)
+				return res;
+		}
+		HFSPLUS_I(inode).dev = id;
+		cnid = HFSPLUS_SB(sb).next_cnid++;
+		src_dentry->d_fsdata = (void *)(unsigned long)cnid;
+		res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode);
+		if (res)
+			/* panic? */
+			return res;
+		HFSPLUS_SB(sb).file_count++;
+	}
+	cnid = HFSPLUS_SB(sb).next_cnid++;
+	res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode);
+	if (res)
+		return res;
+
+	inode->i_nlink++;
+	hfsplus_instantiate(dst_dentry, inode, cnid);
+	atomic_inc(&inode->i_count);
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	HFSPLUS_SB(sb).file_count++;
+	sb->s_dirt = 1;
+
+	return 0;
+}
+
+static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode = dentry->d_inode;
+	struct qstr str;
+	char name[32];
+	u32 cnid;
+	int res;
+
+	if (HFSPLUS_IS_RSRC(inode))
+		return -EPERM;
+
+	cnid = (u32)(unsigned long)dentry->d_fsdata;
+	if (inode->i_ino == cnid &&
+	    atomic_read(&HFSPLUS_I(inode).opencnt)) {
+		str.name = name;
+		str.len = sprintf(name, "temp%lu", inode->i_ino);
+		res = hfsplus_rename_cat(inode->i_ino,
+					 dir, &dentry->d_name,
+					 HFSPLUS_SB(sb).hidden_dir, &str);
+		if (!res)
+			inode->i_flags |= S_DEAD;
+		return res;
+	}
+	res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
+	if (res)
+		return res;
+
+	inode->i_nlink--;
+	hfsplus_delete_inode(inode);
+	if (inode->i_ino != cnid && !inode->i_nlink) {
+		if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
+			res = hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
+			if (!res)
+				hfsplus_delete_inode(inode);
+		} else
+			inode->i_flags |= S_DEAD;
+	}
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+
+	return res;
+}
+
+static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct inode *inode;
+	int res;
+
+	inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode);
+	if (!inode)
+		return -ENOSPC;
+
+	res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+	if (res) {
+		inode->i_nlink = 0;
+		hfsplus_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+	hfsplus_instantiate(dentry, inode, inode->i_ino);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode;
+	int res;
+
+	inode = dentry->d_inode;
+	if (inode->i_size != 2)
+		return -ENOTEMPTY;
+	res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
+	if (res)
+		return res;
+	inode->i_nlink = 0;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	hfsplus_delete_inode(inode);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
+			   const char *symname)
+{
+	struct super_block *sb;
+	struct inode *inode;
+	int res;
+
+	sb = dir->i_sb;
+	inode = hfsplus_new_inode(sb, S_IFLNK | S_IRWXUGO);
+	if (!inode)
+		return -ENOSPC;
+
+	res = page_symlink(inode, symname, strlen(symname) + 1);
+	if (res) {
+		inode->i_nlink = 0;
+		hfsplus_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+
+	mark_inode_dirty(inode);
+	res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+
+	if (!res) {
+		hfsplus_instantiate(dentry, inode, inode->i_ino);
+		mark_inode_dirty(inode);
+	}
+
+	return res;
+}
+
+static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
+			 int mode, dev_t rdev)
+{
+	struct super_block *sb;
+	struct inode *inode;
+	int res;
+
+	sb = dir->i_sb;
+	inode = hfsplus_new_inode(sb, mode);
+	if (!inode)
+		return -ENOSPC;
+
+	res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+	if (res) {
+		inode->i_nlink = 0;
+		hfsplus_delete_inode(inode);
+		iput(inode);
+		return res;
+	}
+	init_special_inode(inode, mode, rdev);
+	hfsplus_instantiate(dentry, inode, inode->i_ino);
+	mark_inode_dirty(inode);
+
+	return 0;
+}
+
+static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
+			  struct inode *new_dir, struct dentry *new_dentry)
+{
+	int res;
+
+	/* Unlink destination if it already exists */
+	if (new_dentry->d_inode) {
+		res = hfsplus_unlink(new_dir, new_dentry);
+		if (res)
+			return res;
+	}
+
+	res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata,
+				 old_dir, &old_dentry->d_name,
+				 new_dir, &new_dentry->d_name);
+	if (!res)
+		new_dentry->d_fsdata = old_dentry->d_fsdata;
+	return res;
+}
+
+struct inode_operations hfsplus_dir_inode_operations = {
+	.lookup		= hfsplus_lookup,
+	.create		= hfsplus_create,
+	.link		= hfsplus_link,
+	.unlink		= hfsplus_unlink,
+	.mkdir		= hfsplus_mkdir,
+	.rmdir		= hfsplus_rmdir,
+	.symlink	= hfsplus_symlink,
+	.mknod		= hfsplus_mknod,
+	.rename		= hfsplus_rename,
+};
+
+struct file_operations hfsplus_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= hfsplus_readdir,
+	.ioctl          = hfsplus_ioctl,
+	.llseek		= generic_file_llseek,
+	.release	= hfsplus_dir_release,
+};
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
new file mode 100644
index 0000000..376498c
--- /dev/null
+++ b/fs/hfsplus/extents.c
@@ -0,0 +1,505 @@
+/*
+ *  linux/fs/hfsplus/extents.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handling of Extents both in catalog and extents overflow trees
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/version.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+/* Compare two extents keys, returns 0 on same, pos/neg for difference */
+int hfsplus_ext_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
+{
+	__be32 k1id, k2id;
+	__be32 k1s, k2s;
+
+	k1id = k1->ext.cnid;
+	k2id = k2->ext.cnid;
+	if (k1id != k2id)
+		return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
+
+	if (k1->ext.fork_type != k2->ext.fork_type)
+		return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
+
+	k1s = k1->ext.start_block;
+	k2s = k2->ext.start_block;
+	if (k1s == k2s)
+		return 0;
+	return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
+}
+
+static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
+				  u32 block, u8 type)
+{
+	key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
+	key->ext.cnid = cpu_to_be32(cnid);
+	key->ext.start_block = cpu_to_be32(block);
+	key->ext.fork_type = type;
+	key->ext.pad = 0;
+}
+
+static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
+{
+	int i;
+	u32 count;
+
+	for (i = 0; i < 8; ext++, i++) {
+		count = be32_to_cpu(ext->block_count);
+		if (off < count)
+			return be32_to_cpu(ext->start_block) + off;
+		off -= count;
+	}
+	/* panic? */
+	return 0;
+}
+
+static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
+{
+	int i;
+	u32 count = 0;
+
+	for (i = 0; i < 8; ext++, i++)
+		count += be32_to_cpu(ext->block_count);
+	return count;
+}
+
+static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
+{
+	int i;
+
+	ext += 7;
+	for (i = 0; i < 7; ext--, i++)
+		if (ext->block_count)
+			break;
+	return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
+}
+
+static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
+{
+	int res;
+
+	hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start,
+			      HFSPLUS_IS_RSRC(inode) ?  HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+	res = hfs_brec_find(fd);
+	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) {
+		if (res != -ENOENT)
+			return;
+		hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec));
+		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+	} else {
+		if (res)
+			return;
+		hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength);
+		HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY;
+	}
+}
+
+void hfsplus_ext_write_extent(struct inode *inode)
+{
+	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) {
+		struct hfs_find_data fd;
+
+		hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
+		__hfsplus_ext_write_extent(inode, &fd);
+		hfs_find_exit(&fd);
+	}
+}
+
+static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
+					    struct hfsplus_extent *extent,
+					    u32 cnid, u32 block, u8 type)
+{
+	int res;
+
+	hfsplus_ext_build_key(fd->search_key, cnid, block, type);
+	fd->key->ext.cnid = 0;
+	res = hfs_brec_find(fd);
+	if (res && res != -ENOENT)
+		return res;
+	if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
+	    fd->key->ext.fork_type != fd->search_key->ext.fork_type)
+		return -ENOENT;
+	if (fd->entrylength != sizeof(hfsplus_extent_rec))
+		return -EIO;
+	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec));
+	return 0;
+}
+
+static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
+{
+	int res;
+
+	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY)
+		__hfsplus_ext_write_extent(inode, fd);
+
+	res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino,
+					block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+	if (!res) {
+		HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block);
+		HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents);
+	} else {
+		HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
+		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+	}
+	return res;
+}
+
+static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
+{
+	struct hfs_find_data fd;
+	int res;
+
+	if (block >= HFSPLUS_I(inode).cached_start &&
+	    block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks)
+		return 0;
+
+	hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
+	res = __hfsplus_ext_cache_extent(&fd, inode, block);
+	hfs_find_exit(&fd);
+	return res;
+}
+
+/* Get a block at iblock for inode, possibly allocating if create */
+int hfsplus_get_block(struct inode *inode, sector_t iblock,
+		      struct buffer_head *bh_result, int create)
+{
+	struct super_block *sb;
+	int res = -EIO;
+	u32 ablock, dblock, mask;
+	int shift;
+
+	sb = inode->i_sb;
+
+	/* Convert inode block to disk allocation block */
+	shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits;
+	ablock = iblock >> HFSPLUS_SB(sb).fs_shift;
+
+	if (iblock >= HFSPLUS_I(inode).fs_blocks) {
+		if (iblock > HFSPLUS_I(inode).fs_blocks || !create)
+			return -EIO;
+		if (ablock >= HFSPLUS_I(inode).alloc_blocks) {
+			res = hfsplus_file_extend(inode);
+			if (res)
+				return res;
+		}
+	} else
+		create = 0;
+
+	if (ablock < HFSPLUS_I(inode).first_blocks) {
+		dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock);
+		goto done;
+	}
+
+	down(&HFSPLUS_I(inode).extents_lock);
+	res = hfsplus_ext_read_extent(inode, ablock);
+	if (!res) {
+		dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock -
+					     HFSPLUS_I(inode).cached_start);
+	} else {
+		up(&HFSPLUS_I(inode).extents_lock);
+		return -EIO;
+	}
+	up(&HFSPLUS_I(inode).extents_lock);
+
+done:
+	dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
+	mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1;
+	map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask));
+	if (create) {
+		set_buffer_new(bh_result);
+		HFSPLUS_I(inode).phys_size += sb->s_blocksize;
+		HFSPLUS_I(inode).fs_blocks++;
+		inode_add_bytes(inode, sb->s_blocksize);
+		mark_inode_dirty(inode);
+	}
+	return 0;
+}
+
+static void hfsplus_dump_extent(struct hfsplus_extent *extent)
+{
+	int i;
+
+	dprint(DBG_EXTENT, "   ");
+	for (i = 0; i < 8; i++)
+		dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block),
+				 be32_to_cpu(extent[i].block_count));
+	dprint(DBG_EXTENT, "\n");
+}
+
+static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
+			      u32 alloc_block, u32 block_count)
+{
+	u32 count, start;
+	int i;
+
+	hfsplus_dump_extent(extent);
+	for (i = 0; i < 8; extent++, i++) {
+		count = be32_to_cpu(extent->block_count);
+		if (offset == count) {
+			start = be32_to_cpu(extent->start_block);
+			if (alloc_block != start + count) {
+				if (++i >= 8)
+					return -ENOSPC;
+				extent++;
+				extent->start_block = cpu_to_be32(alloc_block);
+			} else
+				block_count += count;
+			extent->block_count = cpu_to_be32(block_count);
+			return 0;
+		} else if (offset < count)
+			break;
+		offset -= count;
+	}
+	/* panic? */
+	return -EIO;
+}
+
+static int hfsplus_free_extents(struct super_block *sb,
+				struct hfsplus_extent *extent,
+				u32 offset, u32 block_nr)
+{
+	u32 count, start;
+	int i;
+
+	hfsplus_dump_extent(extent);
+	for (i = 0; i < 8; extent++, i++) {
+		count = be32_to_cpu(extent->block_count);
+		if (offset == count)
+			goto found;
+		else if (offset < count)
+			break;
+		offset -= count;
+	}
+	/* panic? */
+	return -EIO;
+found:
+	for (;;) {
+		start = be32_to_cpu(extent->start_block);
+		if (count <= block_nr) {
+			hfsplus_block_free(sb, start, count);
+			extent->block_count = 0;
+			extent->start_block = 0;
+			block_nr -= count;
+		} else {
+			count -= block_nr;
+			hfsplus_block_free(sb, start + count, block_nr);
+			extent->block_count = cpu_to_be32(count);
+			block_nr = 0;
+		}
+		if (!block_nr || !i)
+			return 0;
+		i--;
+		extent--;
+		count = be32_to_cpu(extent->block_count);
+	}
+}
+
+int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type)
+{
+	struct hfs_find_data fd;
+	hfsplus_extent_rec ext_entry;
+	u32 total_blocks, blocks, start;
+	int res, i;
+
+	total_blocks = be32_to_cpu(fork->total_blocks);
+	if (!total_blocks)
+		return 0;
+
+	blocks = 0;
+	for (i = 0; i < 8; i++)
+		blocks += be32_to_cpu(fork->extents[i].block_count);
+
+	res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
+	if (res)
+		return res;
+	if (total_blocks == blocks)
+		return 0;
+
+	hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+	do {
+		res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
+						total_blocks, type);
+		if (res)
+			break;
+		start = be32_to_cpu(fd.key->ext.start_block);
+		hfsplus_free_extents(sb, ext_entry,
+				     total_blocks - start,
+				     total_blocks);
+		hfs_brec_remove(&fd);
+		total_blocks = start;
+	} while (total_blocks > blocks);
+	hfs_find_exit(&fd);
+
+	return res;
+}
+
+int hfsplus_file_extend(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	u32 start, len, goal;
+	int res;
+
+	if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
+		// extend alloc file
+		printk("extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
+			HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
+		return -ENOSPC;
+		//BUG();
+	}
+
+	down(&HFSPLUS_I(inode).extents_lock);
+	if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks)
+		goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents);
+	else {
+		res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks);
+		if (res)
+			goto out;
+		goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents);
+	}
+
+	len = HFSPLUS_I(inode).clump_blocks;
+	start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len);
+	if (start >= HFSPLUS_SB(sb).total_blocks) {
+		start = hfsplus_block_allocate(sb, goal, 0, &len);
+		if (start >= goal) {
+			res = -ENOSPC;
+			goto out;
+		}
+	}
+
+	dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+	if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) {
+		if (!HFSPLUS_I(inode).first_blocks) {
+			dprint(DBG_EXTENT, "first extents\n");
+			/* no extents yet */
+			HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start);
+			HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len);
+			res = 0;
+		} else {
+			/* try to append to extents in inode */
+			res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents,
+						 HFSPLUS_I(inode).alloc_blocks,
+						 start, len);
+			if (res == -ENOSPC)
+				goto insert_extent;
+		}
+		if (!res) {
+			hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
+			HFSPLUS_I(inode).first_blocks += len;
+		}
+	} else {
+		res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents,
+					 HFSPLUS_I(inode).alloc_blocks -
+					 HFSPLUS_I(inode).cached_start,
+					 start, len);
+		if (!res) {
+			hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
+			HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
+			HFSPLUS_I(inode).cached_blocks += len;
+		} else if (res == -ENOSPC)
+			goto insert_extent;
+	}
+out:
+	up(&HFSPLUS_I(inode).extents_lock);
+	if (!res) {
+		HFSPLUS_I(inode).alloc_blocks += len;
+		mark_inode_dirty(inode);
+	}
+	return res;
+
+insert_extent:
+	dprint(DBG_EXTENT, "insert new extent\n");
+	hfsplus_ext_write_extent(inode);
+
+	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
+	HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start);
+	HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len);
+	hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
+	HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
+	HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks;
+	HFSPLUS_I(inode).cached_blocks = len;
+
+	res = 0;
+	goto out;
+}
+
+void hfsplus_file_truncate(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct hfs_find_data fd;
+	u32 alloc_cnt, blk_cnt, start;
+	int res;
+
+	dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
+	       (long long)HFSPLUS_I(inode).phys_size, inode->i_size);
+	if (inode->i_size > HFSPLUS_I(inode).phys_size) {
+		struct address_space *mapping = inode->i_mapping;
+		struct page *page;
+		u32 size = inode->i_size - 1;
+		int res;
+
+		page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
+		if (!page)
+			return;
+		size &= PAGE_CACHE_SIZE - 1;
+		size++;
+		res = mapping->a_ops->prepare_write(NULL, page, size, size);
+		if (!res)
+			res = mapping->a_ops->commit_write(NULL, page, size, size);
+		if (res)
+			inode->i_size = HFSPLUS_I(inode).phys_size;
+		unlock_page(page);
+		page_cache_release(page);
+		mark_inode_dirty(inode);
+		return;
+	}
+	blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift;
+	alloc_cnt = HFSPLUS_I(inode).alloc_blocks;
+	if (blk_cnt == alloc_cnt)
+		goto out;
+
+	down(&HFSPLUS_I(inode).extents_lock);
+	hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+	while (1) {
+		if (alloc_cnt == HFSPLUS_I(inode).first_blocks) {
+			hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents,
+					     alloc_cnt, alloc_cnt - blk_cnt);
+			hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
+			HFSPLUS_I(inode).first_blocks = blk_cnt;
+			break;
+		}
+		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+		if (res)
+			break;
+		start = HFSPLUS_I(inode).cached_start;
+		hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents,
+				     alloc_cnt - start, alloc_cnt - blk_cnt);
+		hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
+		if (blk_cnt > start) {
+			HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
+			break;
+		}
+		alloc_cnt = start;
+		HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
+		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+		hfs_brec_remove(&fd);
+	}
+	hfs_find_exit(&fd);
+	up(&HFSPLUS_I(inode).extents_lock);
+
+	HFSPLUS_I(inode).alloc_blocks = blk_cnt;
+out:
+	HFSPLUS_I(inode).phys_size = inode->i_size;
+	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
+	mark_inode_dirty(inode);
+}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
new file mode 100644
index 0000000..533094a
--- /dev/null
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -0,0 +1,414 @@
+/*
+ *  linux/include/linux/hfsplus_fs.h
+ *
+ * Copyright (C) 1999
+ * Brad Boyer (flar@pants.nu)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ */
+
+#ifndef _LINUX_HFSPLUS_FS_H
+#define _LINUX_HFSPLUS_FS_H
+
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/buffer_head.h>
+#include "hfsplus_raw.h"
+
+#define DBG_BNODE_REFS	0x00000001
+#define DBG_BNODE_MOD	0x00000002
+#define DBG_CAT_MOD	0x00000004
+#define DBG_INODE	0x00000008
+#define DBG_SUPER	0x00000010
+#define DBG_EXTENT	0x00000020
+#define DBG_BITMAP	0x00000040
+
+//#define DBG_MASK	(DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
+//#define DBG_MASK	(DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
+//#define DBG_MASK	(DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
+#define DBG_MASK	(0)
+
+#define dprint(flg, fmt, args...) \
+	if (flg & DBG_MASK) printk(fmt , ## args)
+
+/* Runtime config options */
+#define HFSPLUS_DEF_CR_TYPE    0x3F3F3F3F  /* '????' */
+
+#define HFSPLUS_TYPE_DATA 0x00
+#define HFSPLUS_TYPE_RSRC 0xFF
+
+typedef int (*btree_keycmp)(hfsplus_btree_key *, hfsplus_btree_key *);
+
+#define NODE_HASH_SIZE	256
+
+/* An HFS+ BTree held in memory */
+struct hfs_btree {
+	struct super_block *sb;
+	struct inode *inode;
+	btree_keycmp keycmp;
+
+	u32 cnid;
+	u32 root;
+	u32 leaf_count;
+	u32 leaf_head;
+	u32 leaf_tail;
+	u32 node_count;
+	u32 free_nodes;
+	u32 attributes;
+
+	unsigned int node_size;
+	unsigned int node_size_shift;
+	unsigned int max_key_len;
+	unsigned int depth;
+
+	//unsigned int map1_size, map_size;
+	struct semaphore tree_lock;
+
+	unsigned int pages_per_bnode;
+	spinlock_t hash_lock;
+	struct hfs_bnode *node_hash[NODE_HASH_SIZE];
+	int node_hash_cnt;
+};
+
+struct page;
+
+/* An HFS+ BTree node in memory */
+struct hfs_bnode {
+	struct hfs_btree *tree;
+
+	u32 prev;
+	u32 this;
+	u32 next;
+	u32 parent;
+
+	u16 num_recs;
+	u8 type;
+	u8 height;
+
+	struct hfs_bnode *next_hash;
+	unsigned long flags;
+	wait_queue_head_t lock_wq;
+	atomic_t refcnt;
+	unsigned int page_offset;
+	struct page *page[0];
+};
+
+#define HFS_BNODE_LOCK		0
+#define HFS_BNODE_ERROR		1
+#define HFS_BNODE_NEW		2
+#define HFS_BNODE_DIRTY		3
+#define HFS_BNODE_DELETED	4
+
+/*
+ * HFS+ superblock info (built from Volume Header on disk)
+ */
+
+struct hfsplus_vh;
+struct hfs_btree;
+
+struct hfsplus_sb_info {
+	struct buffer_head *s_vhbh;
+	struct hfsplus_vh *s_vhdr;
+	struct hfs_btree *ext_tree;
+	struct hfs_btree *cat_tree;
+	struct hfs_btree *attr_tree;
+	struct inode *alloc_file;
+	struct inode *hidden_dir;
+	struct nls_table *nls;
+
+	/* Runtime variables */
+	u32 blockoffset;
+	u32 sect_count;
+	int fs_shift;
+
+	/* Stuff in host order from Vol Header */
+	u32 alloc_blksz;
+	int alloc_blksz_shift;
+	u32 total_blocks;
+	u32 free_blocks;
+	u32 next_alloc;
+	u32 next_cnid;
+	u32 file_count;
+	u32 folder_count;
+	u32 data_clump_blocks, rsrc_clump_blocks;
+
+	/* Config options */
+	u32 creator;
+	u32 type;
+
+	umode_t umask;
+	uid_t uid;
+	gid_t gid;
+
+	int part, session;
+
+	unsigned long flags;
+
+	atomic_t inode_cnt;
+	u32 last_inode_cnt;
+
+	struct hlist_head rsrc_inodes;
+};
+
+#define HFSPLUS_SB_WRITEBACKUP	0x0001
+#define HFSPLUS_SB_NODECOMPOSE	0x0002
+
+
+struct hfsplus_inode_info {
+	struct semaphore extents_lock;
+	u32 clump_blocks, alloc_blocks;
+	sector_t fs_blocks;
+	/* Allocation extents from catalog record or volume header */
+	hfsplus_extent_rec first_extents;
+	u32 first_blocks;
+	hfsplus_extent_rec cached_extents;
+	u32 cached_start, cached_blocks;
+	atomic_t opencnt;
+
+	struct inode *rsrc_inode;
+	unsigned long flags;
+
+	/* Device number in hfsplus_permissions in catalog */
+	u32 dev;
+	/* BSD system and user file flags */
+	u8 rootflags;
+	u8 userflags;
+
+	struct list_head open_dir_list;
+	loff_t phys_size;
+	struct inode vfs_inode;
+};
+
+#define HFSPLUS_FLG_RSRC	0x0001
+#define HFSPLUS_FLG_EXT_DIRTY	0x0002
+#define HFSPLUS_FLG_EXT_NEW	0x0004
+
+#define HFSPLUS_IS_DATA(inode)   (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC))
+#define HFSPLUS_IS_RSRC(inode)   (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)
+
+struct hfs_find_data {
+	/* filled by caller */
+	hfsplus_btree_key *search_key;
+	hfsplus_btree_key *key;
+	/* filled by find */
+	struct hfs_btree *tree;
+	struct hfs_bnode *bnode;
+	/* filled by findrec */
+	int record;
+	int keyoffset, keylength;
+	int entryoffset, entrylength;
+};
+
+struct hfsplus_readdir_data {
+	struct list_head list;
+	struct file *file;
+	struct hfsplus_cat_key key;
+};
+
+#define hfs_btree_open hfsplus_btree_open
+#define hfs_btree_close hfsplus_btree_close
+#define hfs_btree_write hfsplus_btree_write
+#define hfs_bmap_alloc hfsplus_bmap_alloc
+#define hfs_bmap_free hfsplus_bmap_free
+#define hfs_bnode_read hfsplus_bnode_read
+#define hfs_bnode_read_u16 hfsplus_bnode_read_u16
+#define hfs_bnode_read_u8 hfsplus_bnode_read_u8
+#define hfs_bnode_read_key hfsplus_bnode_read_key
+#define hfs_bnode_write hfsplus_bnode_write
+#define hfs_bnode_write_u16 hfsplus_bnode_write_u16
+#define hfs_bnode_clear hfsplus_bnode_clear
+#define hfs_bnode_copy hfsplus_bnode_copy
+#define hfs_bnode_move hfsplus_bnode_move
+#define hfs_bnode_dump hfsplus_bnode_dump
+#define hfs_bnode_unlink hfsplus_bnode_unlink
+#define hfs_bnode_findhash hfsplus_bnode_findhash
+#define hfs_bnode_find hfsplus_bnode_find
+#define hfs_bnode_unhash hfsplus_bnode_unhash
+#define hfs_bnode_free hfsplus_bnode_free
+#define hfs_bnode_create hfsplus_bnode_create
+#define hfs_bnode_get hfsplus_bnode_get
+#define hfs_bnode_put hfsplus_bnode_put
+#define hfs_brec_lenoff hfsplus_brec_lenoff
+#define hfs_brec_keylen hfsplus_brec_keylen
+#define hfs_brec_insert hfsplus_brec_insert
+#define hfs_brec_remove hfsplus_brec_remove
+#define hfs_find_init hfsplus_find_init
+#define hfs_find_exit hfsplus_find_exit
+#define __hfs_brec_find __hplusfs_brec_find
+#define hfs_brec_find hfsplus_brec_find
+#define hfs_brec_read hfsplus_brec_read
+#define hfs_brec_goto hfsplus_brec_goto
+#define hfs_part_find hfsplus_part_find
+
+/*
+ * definitions for ext2 flag ioctls (linux really needs a generic
+ * interface for this).
+ */
+
+/* ext2 ioctls (EXT2_IOC_GETFLAGS and EXT2_IOC_SETFLAGS) to support
+ * chattr/lsattr */
+#define HFSPLUS_IOC_EXT2_GETFLAGS	_IOR('f', 1, long)
+#define HFSPLUS_IOC_EXT2_SETFLAGS	_IOW('f', 2, long)
+
+#define EXT2_FLAG_IMMUTABLE		0x00000010 /* Immutable file */
+#define EXT2_FLAG_APPEND		0x00000020 /* writes to file may only append */
+#define EXT2_FLAG_NODUMP		0x00000040 /* do not dump file */
+
+
+/*
+ * Functions in any *.c used in other files
+ */
+
+/* bitmap.c */
+int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *);
+int hfsplus_block_free(struct super_block *, u32, u32);
+
+/* btree.c */
+struct hfs_btree *hfs_btree_open(struct super_block *, u32);
+void hfs_btree_close(struct hfs_btree *);
+void hfs_btree_write(struct hfs_btree *);
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *);
+void hfs_bmap_free(struct hfs_bnode *);
+
+/* bnode.c */
+void hfs_bnode_read(struct hfs_bnode *, void *, int, int);
+u16 hfs_bnode_read_u16(struct hfs_bnode *, int);
+u8 hfs_bnode_read_u8(struct hfs_bnode *, int);
+void hfs_bnode_read_key(struct hfs_bnode *, void *, int);
+void hfs_bnode_write(struct hfs_bnode *, void *, int, int);
+void hfs_bnode_write_u16(struct hfs_bnode *, int, u16);
+void hfs_bnode_clear(struct hfs_bnode *, int, int);
+void hfs_bnode_copy(struct hfs_bnode *, int,
+		    struct hfs_bnode *, int, int);
+void hfs_bnode_move(struct hfs_bnode *, int, int, int);
+void hfs_bnode_dump(struct hfs_bnode *);
+void hfs_bnode_unlink(struct hfs_bnode *);
+struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *, u32);
+struct hfs_bnode *hfs_bnode_find(struct hfs_btree *, u32);
+void hfs_bnode_unhash(struct hfs_bnode *);
+void hfs_bnode_free(struct hfs_bnode *);
+struct hfs_bnode *hfs_bnode_create(struct hfs_btree *, u32);
+void hfs_bnode_get(struct hfs_bnode *);
+void hfs_bnode_put(struct hfs_bnode *);
+
+/* brec.c */
+u16 hfs_brec_lenoff(struct hfs_bnode *, u16, u16 *);
+u16 hfs_brec_keylen(struct hfs_bnode *, u16);
+int hfs_brec_insert(struct hfs_find_data *, void *, int);
+int hfs_brec_remove(struct hfs_find_data *);
+
+/* bfind.c */
+int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
+void hfs_find_exit(struct hfs_find_data *);
+int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
+int hfs_brec_find(struct hfs_find_data *);
+int hfs_brec_read(struct hfs_find_data *, void *, int);
+int hfs_brec_goto(struct hfs_find_data *, int);
+
+/* catalog.c */
+int hfsplus_cat_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *);
+void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *, u32, struct qstr *);
+int hfsplus_find_cat(struct super_block *, u32, struct hfs_find_data *);
+int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
+int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
+int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
+		       struct inode *, struct qstr *);
+
+/* extents.c */
+int hfsplus_ext_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *);
+void hfsplus_ext_write_extent(struct inode *);
+int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
+int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int);
+int hfsplus_file_extend(struct inode *);
+void hfsplus_file_truncate(struct inode *);
+
+/* inode.c */
+extern struct address_space_operations hfsplus_aops;
+extern struct address_space_operations hfsplus_btree_aops;
+
+void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
+void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
+int hfsplus_cat_read_inode(struct inode *, struct hfs_find_data *);
+int hfsplus_cat_write_inode(struct inode *);
+struct inode *hfsplus_new_inode(struct super_block *, int);
+void hfsplus_delete_inode(struct inode *);
+
+/* ioctl.c */
+int hfsplus_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+		  unsigned long arg);
+int hfsplus_setxattr(struct dentry *dentry, const char *name,
+		     const void *value, size_t size, int flags);
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+			 void *value, size_t size);
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+/* options.c */
+int parse_options(char *, struct hfsplus_sb_info *);
+void fill_defaults(struct hfsplus_sb_info *);
+
+/* tables.c */
+extern u16 hfsplus_case_fold_table[];
+extern u16 hfsplus_decompose_table[];
+extern u16 hfsplus_compose_table[];
+
+/* unicode.c */
+int hfsplus_unistrcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
+int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *);
+int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int);
+
+/* wrapper.c */
+int hfsplus_read_wrapper(struct super_block *);
+
+int hfs_part_find(struct super_block *, sector_t *, sector_t *);
+
+/* access macros */
+/*
+static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
+{
+	return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
+}
+*/
+#define HFSPLUS_SB(super)	(*(struct hfsplus_sb_info *)(super)->s_fs_info)
+#define HFSPLUS_I(inode)	(*list_entry(inode, struct hfsplus_inode_info, vfs_inode))
+
+#if 1
+#define hfsplus_kmap(p)		({ struct page *__p = (p); kmap(__p); })
+#define hfsplus_kunmap(p)	({ struct page *__p = (p); kunmap(__p); __p; })
+#else
+#define hfsplus_kmap(p)		kmap(p)
+#define hfsplus_kunmap(p)	kunmap(p)
+#endif
+
+#define sb_bread512(sb, sec, data) ({			\
+	struct buffer_head *__bh;			\
+	sector_t __block;				\
+	loff_t __start;					\
+	int __offset;					\
+							\
+	__start = (loff_t)(sec) << HFSPLUS_SECTOR_SHIFT;\
+	__block = __start >> (sb)->s_blocksize_bits;	\
+	__offset = __start & ((sb)->s_blocksize - 1);	\
+	__bh = sb_bread((sb), __block);			\
+	if (likely(__bh != NULL))			\
+		data = (void *)(__bh->b_data + __offset);\
+	else						\
+		data = NULL;				\
+	__bh;						\
+})
+
+/* time macros */
+#define __hfsp_mt2ut(t)		(be32_to_cpu(t) - 2082844800U)
+#define __hfsp_ut2mt(t)		(cpu_to_be32(t + 2082844800U))
+
+/* compatibility */
+#define hfsp_mt2ut(t)		(struct timespec){ .tv_sec = __hfsp_mt2ut(t) }
+#define hfsp_ut2mt(t)		__hfsp_ut2mt((t).tv_sec)
+#define hfsp_now2mt()		__hfsp_ut2mt(get_seconds())
+
+#define kdev_t_to_nr(x)		(x)
+
+#endif
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
new file mode 100644
index 0000000..5bad37c
--- /dev/null
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -0,0 +1,326 @@
+/*
+ *  linux/include/linux/hfsplus_raw.h
+ *
+ * Copyright (C) 1999
+ * Brad Boyer (flar@pants.nu)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Format of structures on disk
+ * Information taken from Apple Technote #1150 (HFS Plus Volume Format)
+ *
+ */
+
+#ifndef _LINUX_HFSPLUS_RAW_H
+#define _LINUX_HFSPLUS_RAW_H
+
+#include <linux/types.h>
+
+#define __packed __attribute__ ((packed))
+
+/* Some constants */
+#define HFSPLUS_SECTOR_SIZE        512
+#define HFSPLUS_SECTOR_SHIFT         9
+#define HFSPLUS_VOLHEAD_SECTOR       2
+#define HFSPLUS_VOLHEAD_SIG     0x482b
+#define HFSPLUS_SUPER_MAGIC     0x482b
+#define HFSPLUS_CURRENT_VERSION      4
+
+#define HFSP_WRAP_MAGIC         0x4244
+#define HFSP_WRAP_ATTRIB_SLOCK  0x8000
+#define HFSP_WRAP_ATTRIB_SPARED 0x0200
+
+#define HFSP_WRAPOFF_SIG          0x00
+#define HFSP_WRAPOFF_ATTRIB       0x0A
+#define HFSP_WRAPOFF_ABLKSIZE     0x14
+#define HFSP_WRAPOFF_ABLKSTART    0x1C
+#define HFSP_WRAPOFF_EMBEDSIG     0x7C
+#define HFSP_WRAPOFF_EMBEDEXT     0x7E
+
+#define HFSP_HIDDENDIR_NAME	"\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
+
+#define HFSP_HARDLINK_TYPE	0x686c6e6b	/* 'hlnk' */
+#define HFSP_HFSPLUS_CREATOR	0x6866732b	/* 'hfs+' */
+
+#define HFSP_MOUNT_VERSION	0x482b4c78	/* 'H+Lx' */
+
+/* Structures used on disk */
+
+typedef __be32 hfsplus_cnid;
+typedef __be16 hfsplus_unichr;
+
+/* A "string" as used in filenames, etc. */
+struct hfsplus_unistr {
+	__be16 length;
+	hfsplus_unichr unicode[255];
+} __packed;
+
+#define HFSPLUS_MAX_STRLEN 255
+
+/* POSIX permissions */
+struct hfsplus_perm {
+	__be32 owner;
+	__be32 group;
+	u8  rootflags;
+	u8  userflags;
+	__be16 mode;
+	__be32 dev;
+} __packed;
+
+#define HFSPLUS_FLG_NODUMP	0x01
+#define HFSPLUS_FLG_IMMUTABLE	0x02
+#define HFSPLUS_FLG_APPEND	0x04
+
+/* A single contiguous area of a file */
+struct hfsplus_extent {
+	__be32 start_block;
+	__be32 block_count;
+} __packed;
+typedef struct hfsplus_extent hfsplus_extent_rec[8];
+
+/* Information for a "Fork" in a file */
+struct hfsplus_fork_raw {
+	__be64 total_size;
+	__be32 clump_size;
+	__be32 total_blocks;
+	hfsplus_extent_rec extents;
+} __packed;
+
+/* HFS+ Volume Header */
+struct hfsplus_vh {
+	__be16 signature;
+	__be16 version;
+	__be32 attributes;
+	__be32 last_mount_vers;
+	u32 reserved;
+
+	__be32 create_date;
+	__be32 modify_date;
+	__be32 backup_date;
+	__be32 checked_date;
+
+	__be32 file_count;
+	__be32 folder_count;
+
+	__be32 blocksize;
+	__be32 total_blocks;
+	__be32 free_blocks;
+
+	__be32 next_alloc;
+	__be32 rsrc_clump_sz;
+	__be32 data_clump_sz;
+	hfsplus_cnid next_cnid;
+
+	__be32 write_count;
+	__be64 encodings_bmp;
+
+	u8 finder_info[32];
+
+	struct hfsplus_fork_raw alloc_file;
+	struct hfsplus_fork_raw ext_file;
+	struct hfsplus_fork_raw cat_file;
+	struct hfsplus_fork_raw attr_file;
+	struct hfsplus_fork_raw start_file;
+} __packed;
+
+/* HFS+ volume attributes */
+#define HFSPLUS_VOL_UNMNT     (1 << 8)
+#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
+#define HFSPLUS_VOL_NOCACHE   (1 << 10)
+#define HFSPLUS_VOL_INCNSTNT  (1 << 11)
+#define HFSPLUS_VOL_SOFTLOCK  (1 << 15)
+
+/* HFS+ BTree node descriptor */
+struct hfs_bnode_desc {
+	__be32 next;
+	__be32 prev;
+	s8 type;
+	u8 height;
+	__be16 num_recs;
+	u16 reserved;
+} __packed;
+
+/* HFS+ BTree node types */
+#define HFS_NODE_INDEX	0x00
+#define HFS_NODE_HEADER	0x01
+#define HFS_NODE_MAP	0x02
+#define HFS_NODE_LEAF	0xFF
+
+/* HFS+ BTree header */
+struct hfs_btree_header_rec {
+	__be16 depth;
+	__be32 root;
+	__be32 leaf_count;
+	__be32 leaf_head;
+	__be32 leaf_tail;
+	__be16 node_size;
+	__be16 max_key_len;
+	__be32 node_count;
+	__be32 free_nodes;
+	u16 reserved1;
+	__be32 clump_size;
+	u8 btree_type;
+	u8 reserved2;
+	__be32 attributes;
+	u32 reserved3[16];
+} __packed;
+
+/* BTree attributes */
+#define HFS_TREE_BIGKEYS	2
+#define HFS_TREE_VARIDXKEYS	4
+
+/* HFS+ BTree misc info */
+#define HFSPLUS_TREE_HEAD 0
+#define HFSPLUS_NODE_MXSZ 32768
+
+/* Some special File ID numbers (stolen from hfs.h) */
+#define HFSPLUS_POR_CNID		1	/* Parent Of the Root */
+#define HFSPLUS_ROOT_CNID		2	/* ROOT directory */
+#define HFSPLUS_EXT_CNID		3	/* EXTents B-tree */
+#define HFSPLUS_CAT_CNID		4	/* CATalog B-tree */
+#define HFSPLUS_BAD_CNID		5	/* BAD blocks file */
+#define HFSPLUS_ALLOC_CNID		6	/* ALLOCation file */
+#define HFSPLUS_START_CNID		7	/* STARTup file */
+#define HFSPLUS_ATTR_CNID		8	/* ATTRibutes file */
+#define HFSPLUS_EXCH_CNID		15	/* ExchangeFiles temp id */
+#define HFSPLUS_FIRSTUSER_CNID		16	/* first available user id */
+
+/* HFS+ catalog entry key */
+struct hfsplus_cat_key {
+	__be16 key_len;
+	hfsplus_cnid parent;
+	struct hfsplus_unistr name;
+} __packed;
+
+
+/* Structs from hfs.h */
+struct hfsp_point {
+	__be16 v;
+	__be16 h;
+} __packed;
+
+struct hfsp_rect {
+	__be16 top;
+	__be16 left;
+	__be16 bottom;
+	__be16 right;
+} __packed;
+
+
+/* HFS directory info (stolen from hfs.h */
+struct DInfo {
+	struct hfsp_rect frRect;
+	__be16 frFlags;
+	struct hfsp_point frLocation;
+	__be16 frView;
+} __packed;
+
+struct DXInfo {
+	struct hfsp_point frScroll;
+	__be32 frOpenChain;
+	__be16 frUnused;
+	__be16 frComment;
+	__be32 frPutAway;
+} __packed;
+
+/* HFS+ folder data (part of an hfsplus_cat_entry) */
+struct hfsplus_cat_folder {
+	__be16 type;
+	__be16 flags;
+	__be32 valence;
+	hfsplus_cnid id;
+	__be32 create_date;
+	__be32 content_mod_date;
+	__be32 attribute_mod_date;
+	__be32 access_date;
+	__be32 backup_date;
+	struct hfsplus_perm permissions;
+	struct DInfo user_info;
+	struct DXInfo finder_info;
+	__be32 text_encoding;
+	u32 reserved;
+} __packed;
+
+/* HFS file info (stolen from hfs.h) */
+struct FInfo {
+	__be32 fdType;
+	__be32 fdCreator;
+	__be16 fdFlags;
+	struct hfsp_point fdLocation;
+	__be16 fdFldr;
+} __packed;
+
+struct FXInfo {
+	__be16 fdIconID;
+	u8 fdUnused[8];
+	__be16 fdComment;
+	__be32 fdPutAway;
+} __packed;
+
+/* HFS+ file data (part of a cat_entry) */
+struct hfsplus_cat_file {
+	__be16 type;
+	__be16 flags;
+	u32 reserved1;
+	hfsplus_cnid id;
+	__be32 create_date;
+	__be32 content_mod_date;
+	__be32 attribute_mod_date;
+	__be32 access_date;
+	__be32 backup_date;
+	struct hfsplus_perm permissions;
+	struct FInfo user_info;
+	struct FXInfo finder_info;
+	__be32 text_encoding;
+	u32 reserved2;
+
+	struct hfsplus_fork_raw data_fork;
+	struct hfsplus_fork_raw rsrc_fork;
+} __packed;
+
+/* File attribute bits */
+#define HFSPLUS_FILE_LOCKED		0x0001
+#define HFSPLUS_FILE_THREAD_EXISTS	0x0002
+
+/* HFS+ catalog thread (part of a cat_entry) */
+struct hfsplus_cat_thread {
+	__be16 type;
+	s16 reserved;
+	hfsplus_cnid parentID;
+	struct hfsplus_unistr nodeName;
+} __packed;
+
+#define HFSPLUS_MIN_THREAD_SZ 10
+
+/* A data record in the catalog tree */
+typedef union {
+	__be16 type;
+	struct hfsplus_cat_folder folder;
+	struct hfsplus_cat_file file;
+	struct hfsplus_cat_thread thread;
+} __packed hfsplus_cat_entry;
+
+/* HFS+ catalog entry type */
+#define HFSPLUS_FOLDER         0x0001
+#define HFSPLUS_FILE           0x0002
+#define HFSPLUS_FOLDER_THREAD  0x0003
+#define HFSPLUS_FILE_THREAD    0x0004
+
+/* HFS+ extents tree key */
+struct hfsplus_ext_key {
+	__be16 key_len;
+	u8 fork_type;
+	u8 pad;
+	hfsplus_cnid cnid;
+	__be32 start_block;
+} __packed;
+
+#define HFSPLUS_EXT_KEYLEN 12
+
+/* HFS+ generic BTree key */
+typedef union {
+	__be16 key_len;
+	struct hfsplus_cat_key cat;
+	struct hfsplus_ext_key ext;
+} __packed hfsplus_btree_key;
+
+#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
new file mode 100644
index 0000000..d564270
--- /dev/null
+++ b/fs/hfsplus/inode.c
@@ -0,0 +1,555 @@
+/*
+ *  linux/fs/hfsplus/inode.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Inode handling routines
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/version.h>
+#include <linux/mpage.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+static int hfsplus_readpage(struct file *file, struct page *page)
+{
+	//printk("readpage: %lu\n", page->index);
+	return block_read_full_page(page, hfsplus_get_block);
+}
+
+static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
+{
+	//printk("writepage: %lu\n", page->index);
+	return block_write_full_page(page, hfsplus_get_block, wbc);
+}
+
+static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return cont_prepare_write(page, from, to, hfsplus_get_block,
+		&HFSPLUS_I(page->mapping->host).phys_size);
+}
+
+static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, hfsplus_get_block);
+}
+
+static int hfsplus_releasepage(struct page *page, int mask)
+{
+	struct inode *inode = page->mapping->host;
+	struct super_block *sb = inode->i_sb;
+	struct hfs_btree *tree;
+	struct hfs_bnode *node;
+	u32 nidx;
+	int i, res = 1;
+
+	switch (inode->i_ino) {
+	case HFSPLUS_EXT_CNID:
+		tree = HFSPLUS_SB(sb).ext_tree;
+		break;
+	case HFSPLUS_CAT_CNID:
+		tree = HFSPLUS_SB(sb).cat_tree;
+		break;
+	case HFSPLUS_ATTR_CNID:
+		tree = HFSPLUS_SB(sb).attr_tree;
+		break;
+	default:
+		BUG();
+		return 0;
+	}
+	if (tree->node_size >= PAGE_CACHE_SIZE) {
+		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+		spin_lock(&tree->hash_lock);
+		node = hfs_bnode_findhash(tree, nidx);
+		if (!node)
+			;
+		else if (atomic_read(&node->refcnt))
+			res = 0;
+		if (res && node) {
+			hfs_bnode_unhash(node);
+			hfs_bnode_free(node);
+		}
+		spin_unlock(&tree->hash_lock);
+	} else {
+		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		spin_lock(&tree->hash_lock);
+		do {
+			node = hfs_bnode_findhash(tree, nidx++);
+			if (!node)
+				continue;
+			if (atomic_read(&node->refcnt)) {
+				res = 0;
+				break;
+			}
+			hfs_bnode_unhash(node);
+			hfs_bnode_free(node);
+		} while (--i && nidx < tree->node_count);
+		spin_unlock(&tree->hash_lock);
+	}
+	//printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
+	return res ? try_to_free_buffers(page) : 0;
+}
+
+static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
+			      struct buffer_head *bh_result, int create)
+{
+	int ret;
+
+	ret = hfsplus_get_block(inode, iblock, bh_result, create);
+	if (!ret)
+		bh_result->b_size = (1 << inode->i_blkbits);
+	return ret;
+}
+
+static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
+		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
+
+	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+				  offset, nr_segs, hfsplus_get_blocks, NULL);
+}
+
+static int hfsplus_writepages(struct address_space *mapping,
+			      struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, hfsplus_get_block);
+}
+
+struct address_space_operations hfsplus_btree_aops = {
+	.readpage	= hfsplus_readpage,
+	.writepage	= hfsplus_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= hfsplus_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= hfsplus_bmap,
+	.releasepage	= hfsplus_releasepage,
+};
+
+struct address_space_operations hfsplus_aops = {
+	.readpage	= hfsplus_readpage,
+	.writepage	= hfsplus_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= hfsplus_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= hfsplus_bmap,
+	.direct_IO	= hfsplus_direct_IO,
+	.writepages	= hfsplus_writepages,
+};
+
+static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
+					  struct nameidata *nd)
+{
+	struct hfs_find_data fd;
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode = NULL;
+	int err;
+
+	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
+		goto out;
+
+	inode = HFSPLUS_I(dir).rsrc_inode;
+	if (inode)
+		goto out;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	inode->i_ino = dir->i_ino;
+	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
+	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
+	HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
+
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+	err = hfsplus_find_cat(sb, dir->i_ino, &fd);
+	if (!err)
+		err = hfsplus_cat_read_inode(inode, &fd);
+	hfs_find_exit(&fd);
+	if (err) {
+		iput(inode);
+		return ERR_PTR(err);
+	}
+	HFSPLUS_I(inode).rsrc_inode = dir;
+	HFSPLUS_I(dir).rsrc_inode = inode;
+	igrab(dir);
+	hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
+	mark_inode_dirty(inode);
+	{
+	void hfsplus_inode_check(struct super_block *sb);
+	atomic_inc(&HFSPLUS_SB(sb).inode_cnt);
+	hfsplus_inode_check(sb);
+	}
+out:
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
+{
+	struct super_block *sb = inode->i_sb;
+	u16 mode;
+
+	mode = be16_to_cpu(perms->mode);
+
+	inode->i_uid = be32_to_cpu(perms->owner);
+	if (!inode->i_uid && !mode)
+		inode->i_uid = HFSPLUS_SB(sb).uid;
+
+	inode->i_gid = be32_to_cpu(perms->group);
+	if (!inode->i_gid && !mode)
+		inode->i_gid = HFSPLUS_SB(sb).gid;
+
+	if (dir) {
+		mode = mode ? (mode & S_IALLUGO) :
+			(S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
+		mode |= S_IFDIR;
+	} else if (!mode)
+		mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
+			~(HFSPLUS_SB(sb).umask));
+	inode->i_mode = mode;
+
+	HFSPLUS_I(inode).rootflags = perms->rootflags;
+	HFSPLUS_I(inode).userflags = perms->userflags;
+	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
+		inode->i_flags |= S_IMMUTABLE;
+	else
+		inode->i_flags &= ~S_IMMUTABLE;
+	if (perms->rootflags & HFSPLUS_FLG_APPEND)
+		inode->i_flags |= S_APPEND;
+	else
+		inode->i_flags &= ~S_APPEND;
+}
+
+static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
+{
+	if (inode->i_flags & S_IMMUTABLE)
+		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
+	else
+		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
+	if (inode->i_flags & S_APPEND)
+		perms->rootflags |= HFSPLUS_FLG_APPEND;
+	else
+		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
+	perms->userflags = HFSPLUS_I(inode).userflags;
+	perms->mode = cpu_to_be16(inode->i_mode);
+	perms->owner = cpu_to_be32(inode->i_uid);
+	perms->group = cpu_to_be32(inode->i_gid);
+	perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
+}
+
+static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	/* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
+	 * open_exec has the same test, so it's still not executable, if a x bit
+	 * is set fall back to standard permission check.
+	 */
+	if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
+		return 0;
+	return generic_permission(inode, mask, NULL);
+}
+
+
+static int hfsplus_file_open(struct inode *inode, struct file *file)
+{
+	if (HFSPLUS_IS_RSRC(inode))
+		inode = HFSPLUS_I(inode).rsrc_inode;
+	if (atomic_read(&file->f_count) != 1)
+		return 0;
+	atomic_inc(&HFSPLUS_I(inode).opencnt);
+	return 0;
+}
+
+static int hfsplus_file_release(struct inode *inode, struct file *file)
+{
+	struct super_block *sb = inode->i_sb;
+
+	if (HFSPLUS_IS_RSRC(inode))
+		inode = HFSPLUS_I(inode).rsrc_inode;
+	if (atomic_read(&file->f_count) != 0)
+		return 0;
+	if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
+		down(&inode->i_sem);
+		hfsplus_file_truncate(inode);
+		if (inode->i_flags & S_DEAD) {
+			hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
+			hfsplus_delete_inode(inode);
+		}
+		up(&inode->i_sem);
+	}
+	return 0;
+}
+
+extern struct inode_operations hfsplus_dir_inode_operations;
+extern struct file_operations hfsplus_dir_operations;
+
+static struct inode_operations hfsplus_file_inode_operations = {
+	.lookup		= hfsplus_file_lookup,
+	.truncate	= hfsplus_file_truncate,
+	.permission	= hfsplus_permission,
+	.setxattr	= hfsplus_setxattr,
+	.getxattr	= hfsplus_getxattr,
+	.listxattr	= hfsplus_listxattr,
+};
+
+static struct file_operations hfsplus_file_operations = {
+	.llseek 	= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.sendfile	= generic_file_sendfile,
+	.fsync		= file_fsync,
+	.open		= hfsplus_file_open,
+	.release	= hfsplus_file_release,
+	.ioctl          = hfsplus_ioctl,
+};
+
+struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
+{
+	struct inode *inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	{
+	void hfsplus_inode_check(struct super_block *sb);
+	atomic_inc(&HFSPLUS_SB(sb).inode_cnt);
+	hfsplus_inode_check(sb);
+	}
+	inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
+	inode->i_mode = mode;
+	inode->i_uid = current->fsuid;
+	inode->i_gid = current->fsgid;
+	inode->i_nlink = 1;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_blksize = HFSPLUS_SB(sb).alloc_blksz;
+	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
+	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
+	atomic_set(&HFSPLUS_I(inode).opencnt, 0);
+	HFSPLUS_I(inode).flags = 0;
+	memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
+	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
+	HFSPLUS_I(inode).alloc_blocks = 0;
+	HFSPLUS_I(inode).first_blocks = 0;
+	HFSPLUS_I(inode).cached_start = 0;
+	HFSPLUS_I(inode).cached_blocks = 0;
+	HFSPLUS_I(inode).phys_size = 0;
+	HFSPLUS_I(inode).fs_blocks = 0;
+	HFSPLUS_I(inode).rsrc_inode = NULL;
+	if (S_ISDIR(inode->i_mode)) {
+		inode->i_size = 2;
+		HFSPLUS_SB(sb).folder_count++;
+		inode->i_op = &hfsplus_dir_inode_operations;
+		inode->i_fop = &hfsplus_dir_operations;
+	} else if (S_ISREG(inode->i_mode)) {
+		HFSPLUS_SB(sb).file_count++;
+		inode->i_op = &hfsplus_file_inode_operations;
+		inode->i_fop = &hfsplus_file_operations;
+		inode->i_mapping->a_ops = &hfsplus_aops;
+		HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
+	} else if (S_ISLNK(inode->i_mode)) {
+		HFSPLUS_SB(sb).file_count++;
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_mapping->a_ops = &hfsplus_aops;
+		HFSPLUS_I(inode).clump_blocks = 1;
+	} else
+		HFSPLUS_SB(sb).file_count++;
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+	sb->s_dirt = 1;
+
+	return inode;
+}
+
+void hfsplus_delete_inode(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+
+	if (S_ISDIR(inode->i_mode)) {
+		HFSPLUS_SB(sb).folder_count--;
+		sb->s_dirt = 1;
+		return;
+	}
+	HFSPLUS_SB(sb).file_count--;
+	if (S_ISREG(inode->i_mode)) {
+		if (!inode->i_nlink) {
+			inode->i_size = 0;
+			hfsplus_file_truncate(inode);
+		}
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_size = 0;
+		hfsplus_file_truncate(inode);
+	}
+	sb->s_dirt = 1;
+}
+
+void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
+{
+	struct super_block *sb = inode->i_sb;
+	u32 count;
+	int i;
+
+	memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
+	       sizeof(hfsplus_extent_rec));
+	for (count = 0, i = 0; i < 8; i++)
+		count += be32_to_cpu(fork->extents[i].block_count);
+	HFSPLUS_I(inode).first_blocks = count;
+	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
+	HFSPLUS_I(inode).cached_start = 0;
+	HFSPLUS_I(inode).cached_blocks = 0;
+
+	HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
+	inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
+	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
+	HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
+	if (!HFSPLUS_I(inode).clump_blocks)
+		HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
+				HFSPLUS_SB(sb).data_clump_blocks;
+}
+
+void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
+{
+	memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
+	       sizeof(hfsplus_extent_rec));
+	fork->total_size = cpu_to_be64(inode->i_size);
+	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
+}
+
+int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+{
+	hfsplus_cat_entry entry;
+	int res = 0;
+	u16 type;
+
+	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
+
+	HFSPLUS_I(inode).dev = 0;
+	inode->i_blksize = HFSPLUS_SB(inode->i_sb).alloc_blksz;
+	if (type == HFSPLUS_FOLDER) {
+		struct hfsplus_cat_folder *folder = &entry.folder;
+
+		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
+			/* panic? */;
+		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+					sizeof(struct hfsplus_cat_folder));
+		hfsplus_get_perms(inode, &folder->permissions, 1);
+		inode->i_nlink = 1;
+		inode->i_size = 2 + be32_to_cpu(folder->valence);
+		inode->i_atime = hfsp_mt2ut(folder->access_date);
+		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
+		inode->i_ctime = inode->i_mtime;
+		HFSPLUS_I(inode).fs_blocks = 0;
+		inode->i_op = &hfsplus_dir_inode_operations;
+		inode->i_fop = &hfsplus_dir_operations;
+	} else if (type == HFSPLUS_FILE) {
+		struct hfsplus_cat_file *file = &entry.file;
+
+		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
+			/* panic? */;
+		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+					sizeof(struct hfsplus_cat_file));
+
+		hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
+					&file->data_fork : &file->rsrc_fork);
+		hfsplus_get_perms(inode, &file->permissions, 0);
+		inode->i_nlink = 1;
+		if (S_ISREG(inode->i_mode)) {
+			if (file->permissions.dev)
+				inode->i_nlink = be32_to_cpu(file->permissions.dev);
+			inode->i_op = &hfsplus_file_inode_operations;
+			inode->i_fop = &hfsplus_file_operations;
+			inode->i_mapping->a_ops = &hfsplus_aops;
+		} else if (S_ISLNK(inode->i_mode)) {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &hfsplus_aops;
+		} else {
+			init_special_inode(inode, inode->i_mode,
+					   be32_to_cpu(file->permissions.dev));
+		}
+		inode->i_atime = hfsp_mt2ut(file->access_date);
+		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
+		inode->i_ctime = inode->i_mtime;
+	} else {
+		printk("HFS+-fs: bad catalog entry used to create inode\n");
+		res = -EIO;
+	}
+	return res;
+}
+
+int hfsplus_cat_write_inode(struct inode *inode)
+{
+	struct inode *main_inode = inode;
+	struct hfs_find_data fd;
+	hfsplus_cat_entry entry;
+
+	if (HFSPLUS_IS_RSRC(inode))
+		main_inode = HFSPLUS_I(inode).rsrc_inode;
+
+	if (!main_inode->i_nlink)
+		return 0;
+
+	if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
+		/* panic? */
+		return -EIO;
+
+	if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
+		/* panic? */
+		goto out;
+
+	if (S_ISDIR(main_inode->i_mode)) {
+		struct hfsplus_cat_folder *folder = &entry.folder;
+
+		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
+			/* panic? */;
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+					sizeof(struct hfsplus_cat_folder));
+		/* simple node checks? */
+		hfsplus_set_perms(inode, &folder->permissions);
+		folder->access_date = hfsp_ut2mt(inode->i_atime);
+		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+		folder->valence = cpu_to_be32(inode->i_size - 2);
+		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
+					 sizeof(struct hfsplus_cat_folder));
+	} else if (HFSPLUS_IS_RSRC(inode)) {
+		struct hfsplus_cat_file *file = &entry.file;
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+			       sizeof(struct hfsplus_cat_file));
+		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
+		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
+				sizeof(struct hfsplus_cat_file));
+	} else {
+		struct hfsplus_cat_file *file = &entry.file;
+
+		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
+			/* panic? */;
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+					sizeof(struct hfsplus_cat_file));
+		hfsplus_inode_write_fork(inode, &file->data_fork);
+		if (S_ISREG(inode->i_mode))
+			HFSPLUS_I(inode).dev = inode->i_nlink;
+		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+			HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
+		hfsplus_set_perms(inode, &file->permissions);
+		if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
+			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
+		else
+			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
+		file->access_date = hfsp_ut2mt(inode->i_atime);
+		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
+					 sizeof(struct hfsplus_cat_file));
+	}
+out:
+	hfs_find_exit(&fd);
+	return 0;
+}
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
new file mode 100644
index 0000000..e07aa09
--- /dev/null
+++ b/fs/hfsplus/ioctl.c
@@ -0,0 +1,188 @@
+/*
+ *  linux/fs/hfsplus/ioctl.c
+ *
+ * Copyright (C) 2003
+ * Ethan Benson <erbenson@alaska.net>
+ * partially derived from linux/fs/ext2/ioctl.c
+ * Copyright (C) 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * hfsplus ioctls
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/xattr.h>
+#include <asm/uaccess.h>
+#include "hfsplus_fs.h"
+
+int hfsplus_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+		  unsigned long arg)
+{
+	unsigned int flags;
+
+	switch (cmd) {
+	case HFSPLUS_IOC_EXT2_GETFLAGS:
+		flags = 0;
+		if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE)
+			flags |= EXT2_FLAG_IMMUTABLE; /* EXT2_IMMUTABLE_FL */
+		if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND)
+			flags |= EXT2_FLAG_APPEND; /* EXT2_APPEND_FL */
+		if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP)
+			flags |= EXT2_FLAG_NODUMP; /* EXT2_NODUMP_FL */
+		return put_user(flags, (int __user *)arg);
+	case HFSPLUS_IOC_EXT2_SETFLAGS: {
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EACCES;
+
+		if (get_user(flags, (int __user *)arg))
+			return -EFAULT;
+
+		if (flags & (EXT2_FLAG_IMMUTABLE|EXT2_FLAG_APPEND) ||
+		    HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) {
+			if (!capable(CAP_LINUX_IMMUTABLE))
+				return -EPERM;
+		}
+
+		/* don't silently ignore unsupported ext2 flags */
+		if (flags & ~(EXT2_FLAG_IMMUTABLE|EXT2_FLAG_APPEND|
+			      EXT2_FLAG_NODUMP))
+			return -EOPNOTSUPP;
+
+		if (flags & EXT2_FLAG_IMMUTABLE) { /* EXT2_IMMUTABLE_FL */
+			inode->i_flags |= S_IMMUTABLE;
+			HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE;
+		} else {
+			inode->i_flags &= ~S_IMMUTABLE;
+			HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
+		}
+		if (flags & EXT2_FLAG_APPEND) { /* EXT2_APPEND_FL */
+			inode->i_flags |= S_APPEND;
+			HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND;
+		} else {
+			inode->i_flags &= ~S_APPEND;
+			HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND;
+		}
+		if (flags & EXT2_FLAG_NODUMP) /* EXT2_NODUMP_FL */
+			HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP;
+		else
+			HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP;
+
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		return 0;
+	}
+	default:
+		return -ENOTTY;
+	}
+}
+
+int hfsplus_setxattr(struct dentry *dentry, const char *name,
+		     const void *value, size_t size, int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	struct hfs_find_data fd;
+	hfsplus_cat_entry entry;
+	struct hfsplus_cat_file *file;
+	int res;
+
+	if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+	if (res)
+		return res;
+	res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+	if (res)
+		goto out;
+	hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+			sizeof(struct hfsplus_cat_file));
+	file = &entry.file;
+
+	if (!strcmp(name, "hfs.type")) {
+		if (size == 4)
+			memcpy(&file->user_info.fdType, value, 4);
+		else
+			res = -ERANGE;
+	} else if (!strcmp(name, "hfs.creator")) {
+		if (size == 4)
+			memcpy(&file->user_info.fdCreator, value, 4);
+		else
+			res = -ERANGE;
+	} else
+		res = -EOPNOTSUPP;
+	if (!res)
+		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
+				sizeof(struct hfsplus_cat_file));
+out:
+	hfs_find_exit(&fd);
+	return res;
+}
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+			 void *value, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	struct hfs_find_data fd;
+	hfsplus_cat_entry entry;
+	struct hfsplus_cat_file *file;
+	ssize_t res = 0;
+
+	if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	if (size) {
+		res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+		if (res)
+			return res;
+		res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+		if (res)
+			goto out;
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+				sizeof(struct hfsplus_cat_file));
+	}
+	file = &entry.file;
+
+	if (!strcmp(name, "hfs.type")) {
+		if (size >= 4) {
+			memcpy(value, &file->user_info.fdType, 4);
+			res = 4;
+		} else
+			res = size ? -ERANGE : 4;
+	} else if (!strcmp(name, "hfs.creator")) {
+		if (size >= 4) {
+			memcpy(value, &file->user_info.fdCreator, 4);
+			res = 4;
+		} else
+			res = size ? -ERANGE : 4;
+	} else
+		res = -ENODATA;
+out:
+	if (size)
+		hfs_find_exit(&fd);
+	return res;
+}
+
+#define HFSPLUS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
+
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
+		return -EOPNOTSUPP;
+
+	if (!buffer || !size)
+		return HFSPLUS_ATTRLIST_SIZE;
+	if (size < HFSPLUS_ATTRLIST_SIZE)
+		return -ERANGE;
+	strcpy(buffer, "hfs.type");
+	strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
+
+	return HFSPLUS_ATTRLIST_SIZE;
+}
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
new file mode 100644
index 0000000..1cca010
--- /dev/null
+++ b/fs/hfsplus/options.c
@@ -0,0 +1,162 @@
+/*
+ *  linux/fs/hfsplus/options.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Option parsing
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/parser.h>
+#include <linux/nls.h>
+#include "hfsplus_fs.h"
+
+enum {
+	opt_creator, opt_type,
+	opt_umask, opt_uid, opt_gid,
+	opt_part, opt_session, opt_nls,
+	opt_nodecompose, opt_decompose,
+	opt_err
+};
+
+static match_table_t tokens = {
+	{ opt_creator, "creator=%s" },
+	{ opt_type, "type=%s" },
+	{ opt_umask, "umask=%o" },
+	{ opt_uid, "uid=%u" },
+	{ opt_gid, "gid=%u" },
+	{ opt_part, "part=%u" },
+	{ opt_session, "session=%u" },
+	{ opt_nls, "nls=%s" },
+	{ opt_decompose, "decompose" },
+	{ opt_nodecompose, "nodecompose" },
+	{ opt_err, NULL }
+};
+
+/* Initialize an options object to reasonable defaults */
+void fill_defaults(struct hfsplus_sb_info *opts)
+{
+	if (!opts)
+		return;
+
+	opts->creator = HFSPLUS_DEF_CR_TYPE;
+	opts->type = HFSPLUS_DEF_CR_TYPE;
+	opts->umask = current->fs->umask;
+	opts->uid = current->uid;
+	opts->gid = current->gid;
+	opts->part = -1;
+	opts->session = -1;
+}
+
+/* convert a "four byte character" to a 32 bit int with error checks */
+static inline int match_fourchar(substring_t *arg, u32 *result)
+{
+	if (arg->to - arg->from != 4)
+		return -EINVAL;
+	memcpy(result, arg->from, 4);
+	return 0;
+}
+
+/* Parse options from mount. Returns 0 on failure */
+/* input is the options passed to mount() as a string */
+int parse_options(char *input, struct hfsplus_sb_info *sbi)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int tmp, token;
+
+	if (!input)
+		goto done;
+
+	while ((p = strsep(&input, ",")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case opt_creator:
+			if (match_fourchar(&args[0], &sbi->creator)) {
+				printk("HFS+-fs: creator requires a 4 character value\n");
+				return 0;
+			}
+			break;
+		case opt_type:
+			if (match_fourchar(&args[0], &sbi->type)) {
+				printk("HFS+-fs: type requires a 4 character value\n");
+				return 0;
+			}
+			break;
+		case opt_umask:
+			if (match_octal(&args[0], &tmp)) {
+				printk("HFS+-fs: umask requires a value\n");
+				return 0;
+			}
+			sbi->umask = (umode_t)tmp;
+			break;
+		case opt_uid:
+			if (match_int(&args[0], &tmp)) {
+				printk("HFS+-fs: uid requires an argument\n");
+				return 0;
+			}
+			sbi->uid = (uid_t)tmp;
+			break;
+		case opt_gid:
+			if (match_int(&args[0], &tmp)) {
+				printk("HFS+-fs: gid requires an argument\n");
+				return 0;
+			}
+			sbi->gid = (gid_t)tmp;
+			break;
+		case opt_part:
+			if (match_int(&args[0], &sbi->part)) {
+				printk("HFS+-fs: part requires an argument\n");
+				return 0;
+			}
+			break;
+		case opt_session:
+			if (match_int(&args[0], &sbi->session)) {
+				printk("HFS+-fs: session requires an argument\n");
+				return 0;
+			}
+			break;
+		case opt_nls:
+			if (sbi->nls) {
+				printk("HFS+-fs: unable to change nls mapping\n");
+				return 0;
+			}
+			p = match_strdup(&args[0]);
+			sbi->nls = load_nls(p);
+			if (!sbi->nls) {
+				printk("HFS+-fs: unable to load nls mapping \"%s\"\n", p);
+				kfree(p);
+				return 0;
+			}
+			kfree(p);
+			break;
+		case opt_decompose:
+			sbi->flags &= ~HFSPLUS_SB_NODECOMPOSE;
+			break;
+		case opt_nodecompose:
+			sbi->flags |= HFSPLUS_SB_NODECOMPOSE;
+			break;
+		default:
+			return 0;
+		}
+	}
+
+done:
+	if (!sbi->nls) {
+		/* try utf8 first, as this is the old default behaviour */
+		sbi->nls = load_nls("utf8");
+		if (!sbi->nls)
+			sbi->nls = load_nls_default();
+		if (!sbi->nls)
+			return 0;
+	}
+
+	return 1;
+}
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
new file mode 100644
index 0000000..ae78306
--- /dev/null
+++ b/fs/hfsplus/part_tbl.c
@@ -0,0 +1,133 @@
+/*
+ * linux/fs/hfs/part_tbl.c
+ *
+ * Copyright (C) 1996-1997  Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU General Public License.
+ *
+ * Original code to handle the new style Mac partition table based on
+ * a patch contributed by Holger Schemel (aeglos@valinor.owl.de).
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ */
+
+#include "hfsplus_fs.h"
+
+/* offsets to various blocks */
+#define HFS_DD_BLK		0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK		1 /* First block of partition map */
+#define HFS_MDB_BLK		2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC	0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC	0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC	0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC		0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC	0xD2D7 /* MFS MDB (super block) */
+
+/*
+ * The new style Mac partition map
+ *
+ * For each partition on the media there is a physical block (512-byte
+ * block) containing one of these structures.  These blocks are
+ * contiguous starting at block 1.
+ */
+struct new_pmap {
+	__be16	pmSig;		/* signature */
+	__be16	reSigPad;	/* padding */
+	__be32	pmMapBlkCnt;	/* partition blocks count */
+	__be32	pmPyPartStart;	/* physical block start of partition */
+	__be32	pmPartBlkCnt;	/* physical block count of partition */
+	u8	pmPartName[32];	/* (null terminated?) string
+				   giving the name of this
+				   partition */
+	u8	pmPartType[32];	/* (null terminated?) string
+				   giving the type of this
+				   partition */
+	/* a bunch more stuff we don't need */
+} __packed;
+
+/*
+ * The old style Mac partition map
+ *
+ * The partition map consists for a 2-byte signature followed by an
+ * array of these structures.  The map is terminated with an all-zero
+ * one of these.
+ */
+struct old_pmap {
+	__be16		pdSig;	/* Signature bytes */
+	struct 	old_pmap_entry {
+		__be32	pdStart;
+		__be32	pdSize;
+		__be32	pdFSID;
+	}	pdEntry[42];
+} __packed;
+
+/*
+ * hfs_part_find()
+ *
+ * Parse the partition map looking for the
+ * start and length of the 'part'th HFS partition.
+ */
+int hfs_part_find(struct super_block *sb,
+		  sector_t *part_start, sector_t *part_size)
+{
+	struct buffer_head *bh;
+	__be16 *data;
+	int i, size, res;
+
+	res = -ENOENT;
+	bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data);
+	if (!bh)
+		return -EIO;
+
+	switch (be16_to_cpu(*data)) {
+	case HFS_OLD_PMAP_MAGIC:
+	  {
+		struct old_pmap *pm;
+		struct old_pmap_entry *p;
+
+		pm = (struct old_pmap *)bh->b_data;
+		p = pm->pdEntry;
+		size = 42;
+		for (i = 0; i < size; p++, i++) {
+			if (p->pdStart && p->pdSize &&
+			    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
+			    (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+				*part_start += be32_to_cpu(p->pdStart);
+				*part_size = be32_to_cpu(p->pdSize);
+				res = 0;
+			}
+		}
+		break;
+	  }
+	case HFS_NEW_PMAP_MAGIC:
+	  {
+		struct new_pmap *pm;
+
+		pm = (struct new_pmap *)bh->b_data;
+		size = be32_to_cpu(pm->pmMapBlkCnt);
+		for (i = 0; i < size;) {
+			if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
+			    (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+				*part_start += be32_to_cpu(pm->pmPyPartStart);
+				*part_size = be32_to_cpu(pm->pmPartBlkCnt);
+				res = 0;
+				break;
+			}
+			brelse(bh);
+			bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm);
+			if (!bh)
+				return -EIO;
+			if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC))
+				break;
+		}
+		break;
+	  }
+	}
+	brelse(bh);
+
+	return res;
+}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
new file mode 100644
index 0000000..5f80446
--- /dev/null
+++ b/fs/hfsplus/super.c
@@ -0,0 +1,502 @@
+/*
+ *  linux/fs/hfsplus/super.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/vfs.h>
+#include <linux/nls.h>
+
+static struct inode *hfsplus_alloc_inode(struct super_block *sb);
+static void hfsplus_destroy_inode(struct inode *inode);
+
+#include "hfsplus_fs.h"
+
+void hfsplus_inode_check(struct super_block *sb)
+{
+#if 0
+	u32 cnt = atomic_read(&HFSPLUS_SB(sb).inode_cnt);
+	u32 last_cnt = HFSPLUS_SB(sb).last_inode_cnt;
+
+	if (cnt <= (last_cnt / 2) ||
+	    cnt >= (last_cnt * 2)) {
+		HFSPLUS_SB(sb).last_inode_cnt = cnt;
+		printk("inode_check: %u,%u,%u\n", cnt, last_cnt,
+			HFSPLUS_SB(sb).cat_tree ? HFSPLUS_SB(sb).cat_tree->node_hash_cnt : 0);
+	}
+#endif
+}
+
+static void hfsplus_read_inode(struct inode *inode)
+{
+	struct hfs_find_data fd;
+	struct hfsplus_vh *vhdr;
+	int err;
+
+	atomic_inc(&HFSPLUS_SB(inode->i_sb).inode_cnt);
+	hfsplus_inode_check(inode->i_sb);
+	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
+	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
+	HFSPLUS_I(inode).flags = 0;
+	HFSPLUS_I(inode).rsrc_inode = NULL;
+
+	if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
+	read_inode:
+		hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+		err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+		if (!err)
+			err = hfsplus_cat_read_inode(inode, &fd);
+		hfs_find_exit(&fd);
+		if (err)
+			goto bad_inode;
+		return;
+	}
+	vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
+	switch(inode->i_ino) {
+	case HFSPLUS_ROOT_CNID:
+		goto read_inode;
+	case HFSPLUS_EXT_CNID:
+		hfsplus_inode_read_fork(inode, &vhdr->ext_file);
+		inode->i_mapping->a_ops = &hfsplus_btree_aops;
+		break;
+	case HFSPLUS_CAT_CNID:
+		hfsplus_inode_read_fork(inode, &vhdr->cat_file);
+		inode->i_mapping->a_ops = &hfsplus_btree_aops;
+		break;
+	case HFSPLUS_ALLOC_CNID:
+		hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
+		inode->i_mapping->a_ops = &hfsplus_aops;
+		break;
+	case HFSPLUS_START_CNID:
+		hfsplus_inode_read_fork(inode, &vhdr->start_file);
+		break;
+	case HFSPLUS_ATTR_CNID:
+		hfsplus_inode_read_fork(inode, &vhdr->attr_file);
+		inode->i_mapping->a_ops = &hfsplus_btree_aops;
+		break;
+	default:
+		goto bad_inode;
+	}
+
+	return;
+
+ bad_inode:
+	make_bad_inode(inode);
+}
+
+static int hfsplus_write_inode(struct inode *inode, int unused)
+{
+	struct hfsplus_vh *vhdr;
+	int ret = 0;
+
+	dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
+	hfsplus_ext_write_extent(inode);
+	if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
+		return hfsplus_cat_write_inode(inode);
+	}
+	vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
+	switch (inode->i_ino) {
+	case HFSPLUS_ROOT_CNID:
+		ret = hfsplus_cat_write_inode(inode);
+		break;
+	case HFSPLUS_EXT_CNID:
+		if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) {
+			HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
+			inode->i_sb->s_dirt = 1;
+		}
+		hfsplus_inode_write_fork(inode, &vhdr->ext_file);
+		hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree);
+		break;
+	case HFSPLUS_CAT_CNID:
+		if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) {
+			HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
+			inode->i_sb->s_dirt = 1;
+		}
+		hfsplus_inode_write_fork(inode, &vhdr->cat_file);
+		hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree);
+		break;
+	case HFSPLUS_ALLOC_CNID:
+		if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) {
+			HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
+			inode->i_sb->s_dirt = 1;
+		}
+		hfsplus_inode_write_fork(inode, &vhdr->alloc_file);
+		break;
+	case HFSPLUS_START_CNID:
+		if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) {
+			HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
+			inode->i_sb->s_dirt = 1;
+		}
+		hfsplus_inode_write_fork(inode, &vhdr->start_file);
+		break;
+	case HFSPLUS_ATTR_CNID:
+		if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) {
+			HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
+			inode->i_sb->s_dirt = 1;
+		}
+		hfsplus_inode_write_fork(inode, &vhdr->attr_file);
+		hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree);
+		break;
+	}
+	return ret;
+}
+
+static void hfsplus_clear_inode(struct inode *inode)
+{
+	dprint(DBG_INODE, "hfsplus_clear_inode: %lu\n", inode->i_ino);
+	atomic_dec(&HFSPLUS_SB(inode->i_sb).inode_cnt);
+	if (HFSPLUS_IS_RSRC(inode)) {
+		HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL;
+		iput(HFSPLUS_I(inode).rsrc_inode);
+	}
+	hfsplus_inode_check(inode->i_sb);
+}
+
+static void hfsplus_write_super(struct super_block *sb)
+{
+	struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+
+	dprint(DBG_SUPER, "hfsplus_write_super\n");
+	sb->s_dirt = 0;
+	if (sb->s_flags & MS_RDONLY)
+		/* warn? */
+		return;
+
+	vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks);
+	vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc);
+	vhdr->next_cnid = cpu_to_be32(HFSPLUS_SB(sb).next_cnid);
+	vhdr->folder_count = cpu_to_be32(HFSPLUS_SB(sb).folder_count);
+	vhdr->file_count = cpu_to_be32(HFSPLUS_SB(sb).file_count);
+
+	mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
+	if (HFSPLUS_SB(sb).flags & HFSPLUS_SB_WRITEBACKUP) {
+		if (HFSPLUS_SB(sb).sect_count) {
+			struct buffer_head *bh;
+			u32 block, offset;
+
+			block = HFSPLUS_SB(sb).blockoffset;
+			block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9);
+			offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1);
+			printk("backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
+				HFSPLUS_SB(sb).sect_count, block, offset);
+			bh = sb_bread(sb, block);
+			if (bh) {
+				vhdr = (struct hfsplus_vh *)(bh->b_data + offset);
+				if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) {
+					memcpy(vhdr, HFSPLUS_SB(sb).s_vhdr, sizeof(*vhdr));
+					mark_buffer_dirty(bh);
+					brelse(bh);
+				} else
+					printk("backup not found!\n");
+			}
+		}
+		HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
+	}
+}
+
+static void hfsplus_put_super(struct super_block *sb)
+{
+	dprint(DBG_SUPER, "hfsplus_put_super\n");
+	if (!(sb->s_flags & MS_RDONLY)) {
+		struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+
+		vhdr->modify_date = hfsp_now2mt();
+		vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
+		vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
+		mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
+		ll_rw_block(WRITE, 1, &HFSPLUS_SB(sb).s_vhbh);
+		wait_on_buffer(HFSPLUS_SB(sb).s_vhbh);
+	}
+
+	hfs_btree_close(HFSPLUS_SB(sb).cat_tree);
+	hfs_btree_close(HFSPLUS_SB(sb).ext_tree);
+	iput(HFSPLUS_SB(sb).alloc_file);
+	iput(HFSPLUS_SB(sb).hidden_dir);
+	brelse(HFSPLUS_SB(sb).s_vhbh);
+	if (HFSPLUS_SB(sb).nls)
+		unload_nls(HFSPLUS_SB(sb).nls);
+}
+
+static int hfsplus_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = HFSPLUS_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift;
+	buf->f_bfree = HFSPLUS_SB(sb).free_blocks << HFSPLUS_SB(sb).fs_shift;
+	buf->f_bavail = buf->f_bfree;
+	buf->f_files = 0xFFFFFFFF;
+	buf->f_ffree = 0xFFFFFFFF - HFSPLUS_SB(sb).next_cnid;
+	buf->f_namelen = HFSPLUS_MAX_STRLEN;
+
+	return 0;
+}
+
+static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
+{
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (!(*flags & MS_RDONLY)) {
+		struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+
+		if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
+			printk("HFS+-fs warning: Filesystem was not cleanly unmounted, "
+			       "running fsck.hfsplus is recommended.  leaving read-only.\n");
+			sb->s_flags |= MS_RDONLY;
+			*flags |= MS_RDONLY;
+		} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
+			printk("HFS+-fs: Filesystem is marked locked, leaving read-only.\n");
+			sb->s_flags |= MS_RDONLY;
+			*flags |= MS_RDONLY;
+		}
+	}
+	return 0;
+}
+
+static struct super_operations hfsplus_sops = {
+	.alloc_inode	= hfsplus_alloc_inode,
+	.destroy_inode	= hfsplus_destroy_inode,
+	.read_inode	= hfsplus_read_inode,
+	.write_inode	= hfsplus_write_inode,
+	.clear_inode	= hfsplus_clear_inode,
+	.put_super	= hfsplus_put_super,
+	.write_super	= hfsplus_write_super,
+	.statfs		= hfsplus_statfs,
+	.remount_fs	= hfsplus_remount,
+};
+
+static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct hfsplus_vh *vhdr;
+	struct hfsplus_sb_info *sbi;
+	hfsplus_cat_entry entry;
+	struct hfs_find_data fd;
+	struct inode *root;
+	struct qstr str;
+	struct nls_table *nls = NULL;
+	int err = -EINVAL;
+
+	sbi = kmalloc(sizeof(struct hfsplus_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+
+	memset(sbi, 0, sizeof(HFSPLUS_SB(sb)));
+	sb->s_fs_info = sbi;
+	INIT_HLIST_HEAD(&sbi->rsrc_inodes);
+	fill_defaults(sbi);
+	if (!parse_options(data, sbi)) {
+		if (!silent)
+			printk("HFS+-fs: unable to parse mount options\n");
+		err = -EINVAL;
+		goto cleanup;
+	}
+
+	/* temporarily use utf8 to correctly find the hidden dir below */
+	nls = sbi->nls;
+	sbi->nls = load_nls("utf8");
+	if (!nls) {
+		printk("HFS+: unable to load nls for utf8\n");
+		err = -EINVAL;
+		goto cleanup;
+	}
+
+	/* Grab the volume header */
+	if (hfsplus_read_wrapper(sb)) {
+		if (!silent)
+			printk("HFS+-fs: unable to find HFS+ superblock\n");
+		err = -EINVAL;
+		goto cleanup;
+	}
+	vhdr = HFSPLUS_SB(sb).s_vhdr;
+
+	/* Copy parts of the volume header into the superblock */
+	sb->s_magic = be16_to_cpu(vhdr->signature);
+	if (be16_to_cpu(vhdr->version) != HFSPLUS_CURRENT_VERSION) {
+		if (!silent)
+			printk("HFS+-fs: wrong filesystem version\n");
+		goto cleanup;
+	}
+	HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks);
+	HFSPLUS_SB(sb).free_blocks = be32_to_cpu(vhdr->free_blocks);
+	HFSPLUS_SB(sb).next_alloc = be32_to_cpu(vhdr->next_alloc);
+	HFSPLUS_SB(sb).next_cnid = be32_to_cpu(vhdr->next_cnid);
+	HFSPLUS_SB(sb).file_count = be32_to_cpu(vhdr->file_count);
+	HFSPLUS_SB(sb).folder_count = be32_to_cpu(vhdr->folder_count);
+	HFSPLUS_SB(sb).data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
+	if (!HFSPLUS_SB(sb).data_clump_blocks)
+		HFSPLUS_SB(sb).data_clump_blocks = 1;
+	HFSPLUS_SB(sb).rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
+	if (!HFSPLUS_SB(sb).rsrc_clump_blocks)
+		HFSPLUS_SB(sb).rsrc_clump_blocks = 1;
+
+	/* Set up operations so we can load metadata */
+	sb->s_op = &hfsplus_sops;
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+	if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
+		if (!silent)
+			printk("HFS+-fs warning: Filesystem was not cleanly unmounted, "
+			       "running fsck.hfsplus is recommended.  mounting read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+	} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
+		if (!silent)
+			printk("HFS+-fs: Filesystem is marked locked, mounting read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+
+	/* Load metadata objects (B*Trees) */
+	HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
+	if (!HFSPLUS_SB(sb).ext_tree) {
+		if (!silent)
+			printk("HFS+-fs: failed to load extents file\n");
+		goto cleanup;
+	}
+	HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
+	if (!HFSPLUS_SB(sb).cat_tree) {
+		if (!silent)
+			printk("HFS+-fs: failed to load catalog file\n");
+		goto cleanup;
+	}
+
+	HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID);
+	if (!HFSPLUS_SB(sb).alloc_file) {
+		if (!silent)
+			printk("HFS+-fs: failed to load allocation file\n");
+		goto cleanup;
+	}
+
+	/* Load the root directory */
+	root = iget(sb, HFSPLUS_ROOT_CNID);
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		if (!silent)
+			printk("HFS+-fs: failed to load root directory\n");
+		iput(root);
+		goto cleanup;
+	}
+
+	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
+	str.name = HFSP_HIDDENDIR_NAME;
+	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+	hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
+		hfs_find_exit(&fd);
+		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
+			goto cleanup;
+		HFSPLUS_SB(sb).hidden_dir = iget(sb, be32_to_cpu(entry.folder.id));
+		if (!HFSPLUS_SB(sb).hidden_dir)
+			goto cleanup;
+	} else
+		hfs_find_exit(&fd);
+
+	if (sb->s_flags & MS_RDONLY)
+		goto out;
+
+	/* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
+	 * all three are registered with Apple for our use
+	 */
+	vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+	vhdr->modify_date = hfsp_now2mt();
+	vhdr->write_count = cpu_to_be32(be32_to_cpu(vhdr->write_count) + 1);
+	vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+	vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+	mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
+	ll_rw_block(WRITE, 1, &HFSPLUS_SB(sb).s_vhbh);
+	wait_on_buffer(HFSPLUS_SB(sb).s_vhbh);
+
+	if (!HFSPLUS_SB(sb).hidden_dir) {
+		printk("HFS+: create hidden dir...\n");
+		HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+		hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode,
+				   &str, HFSPLUS_SB(sb).hidden_dir);
+		mark_inode_dirty(HFSPLUS_SB(sb).hidden_dir);
+	}
+out:
+	unload_nls(sbi->nls);
+	sbi->nls = nls;
+	return 0;
+
+cleanup:
+	hfsplus_put_super(sb);
+	if (nls)
+		unload_nls(nls);
+	return err;
+}
+
+MODULE_AUTHOR("Brad Boyer");
+MODULE_DESCRIPTION("Extended Macintosh Filesystem");
+MODULE_LICENSE("GPL");
+
+static kmem_cache_t *hfsplus_inode_cachep;
+
+static struct inode *hfsplus_alloc_inode(struct super_block *sb)
+{
+	struct hfsplus_inode_info *i;
+
+	i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL);
+	return i ? &i->vfs_inode : NULL;
+}
+
+static void hfsplus_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(hfsplus_inode_cachep, &HFSPLUS_I(inode));
+}
+
+#define HFSPLUS_INODE_SIZE	sizeof(struct hfsplus_inode_info)
+
+static struct super_block *hfsplus_get_sb(struct file_system_type *fs_type,
+					  int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
+}
+
+static struct file_system_type hfsplus_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "hfsplus",
+	.get_sb		= hfsplus_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct hfsplus_inode_info *i = p;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&i->vfs_inode);
+}
+
+static int __init init_hfsplus_fs(void)
+{
+	int err;
+
+	hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
+		HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN,
+		hfsplus_init_once, NULL);
+	if (!hfsplus_inode_cachep)
+		return -ENOMEM;
+	err = register_filesystem(&hfsplus_fs_type);
+	if (err)
+		kmem_cache_destroy(hfsplus_inode_cachep);
+	return err;
+}
+
+static void __exit exit_hfsplus_fs(void)
+{
+	unregister_filesystem(&hfsplus_fs_type);
+	if (kmem_cache_destroy(hfsplus_inode_cachep))
+		printk(KERN_INFO "hfsplus_inode_cache: not all structures were freed\n");
+}
+
+module_init(init_hfsplus_fs)
+module_exit(exit_hfsplus_fs)
diff --git a/fs/hfsplus/tables.c b/fs/hfsplus/tables.c
new file mode 100644
index 0000000..1b91173
--- /dev/null
+++ b/fs/hfsplus/tables.c
@@ -0,0 +1,3245 @@
+/*
+ * linux/fs/hfsplus/tables.c
+ *
+ * Various data tables
+ */
+
+#include "hfsplus_fs.h"
+
+/*
+ *  Unicode case folding table taken from Apple Technote #1150
+ *  (HFS Plus Volume Format)
+ */
+
+u16 hfsplus_case_fold_table[] = {
+/*
+ *  The lower case table consists of a 256-entry high-byte table followed by
+ *  some number of 256-entry subtables. The high-byte table contains either an
+ *  offset to the subtable for characters with that high byte or zero, which
+ *  means that there are no case mappings or ignored characters in that block.
+ *  Ignored characters are mapped to zero.
+ */
+
+    // High-byte indices ( == 0 iff no case mapping and no ignorables )
+
+
+    /* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+            0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00,
+
+    // Table 1 (for high byte 0x00)
+
+    /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+            0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+    /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+            0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+    /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+            0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+    /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+            0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+    /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+            0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+    /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+            0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+    /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+            0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+    /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+            0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+    /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+            0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+    /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+            0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+    /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+            0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+    /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+            0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+    /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7,
+            0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+    /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+            0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF,
+    /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+            0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+    /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+            0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF,
+
+    // Table 2 (for high byte 0x01)
+
+    /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107,
+            0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F,
+    /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117,
+            0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F,
+    /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127,
+            0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F,
+    /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137,
+            0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140,
+    /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147,
+            0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F,
+    /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157,
+            0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F,
+    /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167,
+            0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F,
+    /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177,
+            0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F,
+    /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188,
+            0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259,
+    /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268,
+            0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275,
+    /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8,
+            0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF,
+    /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292,
+            0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF,
+    /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9,
+            0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF,
+    /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7,
+            0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF,
+    /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7,
+            0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF,
+    /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7,
+            0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF,
+
+    // Table 3 (for high byte 0x03)
+
+    /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
+            0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
+    /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+            0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
+    /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
+            0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
+    /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
+            0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
+    /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
+            0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
+    /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
+            0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
+    /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
+            0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
+    /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
+            0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
+    /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
+            0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F,
+    /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+            0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+    /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+            0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF,
+    /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+            0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+    /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+            0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF,
+    /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7,
+            0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF,
+    /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7,
+            0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF,
+    /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
+            0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF,
+
+    // Table 4 (for high byte 0x04)
+
+    /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407,
+            0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F,
+    /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+            0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+    /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+            0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+    /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+            0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+    /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+            0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+    /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457,
+            0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F,
+    /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467,
+            0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F,
+    /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477,
+            0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F,
+    /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
+            0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F,
+    /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497,
+            0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F,
+    /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7,
+            0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF,
+    /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7,
+            0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF,
+    /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8,
+            0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF,
+    /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7,
+            0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF,
+    /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7,
+            0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF,
+    /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7,
+            0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF,
+
+    // Table 5 (for high byte 0x05)
+
+    /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
+            0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
+    /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
+            0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
+    /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
+            0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
+    /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+            0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+    /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+            0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+    /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557,
+            0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
+    /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+            0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+    /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+            0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+    /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587,
+            0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
+    /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
+            0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
+    /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
+            0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
+    /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
+            0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
+    /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
+            0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
+    /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
+            0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
+    /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
+            0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
+    /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
+            0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF,
+
+    // Table 6 (for high byte 0x10)
+
+    /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+            0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F,
+    /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017,
+            0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F,
+    /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+            0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F,
+    /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037,
+            0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F,
+    /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
+            0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F,
+    /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
+            0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F,
+    /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067,
+            0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F,
+    /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077,
+            0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F,
+    /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087,
+            0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F,
+    /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097,
+            0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F,
+    /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+            0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+    /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+            0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+    /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7,
+            0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF,
+    /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+            0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+    /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+            0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+    /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7,
+            0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF,
+
+    // Table 7 (for high byte 0x20)
+
+    /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+            0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017,
+            0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F,
+    /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
+            0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F,
+    /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037,
+            0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F,
+    /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047,
+            0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F,
+    /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057,
+            0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F,
+    /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067,
+            0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+    /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077,
+            0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F,
+    /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
+            0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F,
+    /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097,
+            0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F,
+    /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7,
+            0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF,
+    /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7,
+            0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF,
+    /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7,
+            0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF,
+    /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7,
+            0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF,
+    /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7,
+            0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF,
+    /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7,
+            0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF,
+
+    // Table 8 (for high byte 0x21)
+
+    /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
+            0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
+    /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
+            0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
+    /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
+            0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
+    /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
+            0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
+    /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
+            0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
+    /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
+            0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
+    /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+            0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+    /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+            0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+    /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
+            0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
+    /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
+            0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
+    /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
+            0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
+    /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
+            0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
+    /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
+            0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
+    /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
+            0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
+    /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
+            0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
+    /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
+            0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF,
+
+    // Table 9 (for high byte 0xFE)
+
+    /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07,
+            0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F,
+    /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17,
+            0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F,
+    /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27,
+            0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F,
+    /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37,
+            0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F,
+    /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47,
+            0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F,
+    /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57,
+            0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F,
+    /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67,
+            0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F,
+    /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77,
+            0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F,
+    /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87,
+            0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F,
+    /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97,
+            0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F,
+    /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7,
+            0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF,
+    /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7,
+            0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF,
+    /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7,
+            0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF,
+    /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7,
+            0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF,
+    /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7,
+            0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF,
+    /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7,
+            0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000,
+
+    // Table 10 (for high byte 0xFF)
+
+    /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
+            0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
+    /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
+            0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
+    /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+            0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+    /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+            0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
+    /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+            0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+    /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+            0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
+    /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
+            0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
+    /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
+            0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
+    /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
+            0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
+    /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
+            0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
+    /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
+            0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
+    /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
+            0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
+    /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
+            0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
+    /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
+            0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
+    /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
+            0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
+    /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
+            0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF,
+};
+
+u16 hfsplus_decompose_table[] = {
+	/* base table */
+	0x0010, 0x04c0, 0x0000, 0x06f0, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x07b0,
+	/* char table 0x0___ */
+	0x0020, 0x0070, 0x0160, 0x0190, 0x0230, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x02d0, 0x0340, 0x0360, 0x03b0, 0x03e0, 0x0400, 0x0430,
+	/* char table 0x00__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0030, 0x0040, 0x0050, 0x0060,
+	/* char values 0x00c_ */
+	0x2042, 0x204a, 0x2052, 0x205a, 0x2062, 0x206a, 0x0000, 0x2072,
+	0x207a, 0x2082, 0x208a, 0x2092, 0x209a, 0x20a2, 0x20aa, 0x20b2,
+	/* char values 0x00d_ */
+	0x0000, 0x20ba, 0x20c2, 0x20ca, 0x20d2, 0x20da, 0x20e2, 0x0000,
+	0x0000, 0x20ea, 0x20f2, 0x20fa, 0x2102, 0x210a, 0x0000, 0x0000,
+	/* char values 0x00e_ */
+	0x2112, 0x211a, 0x2122, 0x212a, 0x2132, 0x213a, 0x0000, 0x2142,
+	0x214a, 0x2152, 0x215a, 0x2162, 0x216a, 0x2172, 0x217a, 0x2182,
+	/* char values 0x00f_ */
+	0x0000, 0x218a, 0x2192, 0x219a, 0x21a2, 0x21aa, 0x21b2, 0x0000,
+	0x0000, 0x21ba, 0x21c2, 0x21ca, 0x21d2, 0x21da, 0x0000, 0x21e2,
+	/* char table 0x01__ */
+	0x0080, 0x0090, 0x00a0, 0x00b0, 0x00c0, 0x00d0, 0x00e0, 0x00f0,
+	0x0000, 0x0000, 0x0100, 0x0110, 0x0120, 0x0130, 0x0140, 0x0150,
+	/* char values 0x010_ */
+	0x21ea, 0x21f2, 0x21fa, 0x2202, 0x220a, 0x2212, 0x221a, 0x2222,
+	0x222a, 0x2232, 0x223a, 0x2242, 0x224a, 0x2252, 0x225a, 0x2262,
+	/* char values 0x011_ */
+	0x0000, 0x0000, 0x226a, 0x2272, 0x227a, 0x2282, 0x228a, 0x2292,
+	0x229a, 0x22a2, 0x22aa, 0x22b2, 0x22ba, 0x22c2, 0x22ca, 0x22d2,
+	/* char values 0x012_ */
+	0x22da, 0x22e2, 0x22ea, 0x22f2, 0x22fa, 0x2302, 0x0000, 0x0000,
+	0x230a, 0x2312, 0x231a, 0x2322, 0x232a, 0x2332, 0x233a, 0x2342,
+	/* char values 0x013_ */
+	0x234a, 0x0000, 0x0000, 0x0000, 0x2352, 0x235a, 0x2362, 0x236a,
+	0x0000, 0x2372, 0x237a, 0x2382, 0x238a, 0x2392, 0x239a, 0x0000,
+	/* char values 0x014_ */
+	0x0000, 0x0000, 0x0000, 0x23a2, 0x23aa, 0x23b2, 0x23ba, 0x23c2,
+	0x23ca, 0x0000, 0x0000, 0x0000, 0x23d2, 0x23da, 0x23e2, 0x23ea,
+	/* char values 0x015_ */
+	0x23f2, 0x23fa, 0x0000, 0x0000, 0x2402, 0x240a, 0x2412, 0x241a,
+	0x2422, 0x242a, 0x2432, 0x243a, 0x2442, 0x244a, 0x2452, 0x245a,
+	/* char values 0x016_ */
+	0x2462, 0x246a, 0x2472, 0x247a, 0x2482, 0x248a, 0x0000, 0x0000,
+	0x2492, 0x249a, 0x24a2, 0x24aa, 0x24b2, 0x24ba, 0x24c2, 0x24ca,
+	/* char values 0x017_ */
+	0x24d2, 0x24da, 0x24e2, 0x24ea, 0x24f2, 0x24fa, 0x2502, 0x250a,
+	0x2512, 0x251a, 0x2522, 0x252a, 0x2532, 0x253a, 0x2542, 0x0000,
+	/* char values 0x01a_ */
+	0x254a, 0x2552, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x255a,
+	/* char values 0x01b_ */
+	0x2562, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x01c_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x256a, 0x2572, 0x257a,
+	/* char values 0x01d_ */
+	0x2582, 0x258a, 0x2592, 0x259a, 0x25a2, 0x25ab, 0x25b7, 0x25c3,
+	0x25cf, 0x25db, 0x25e7, 0x25f3, 0x25ff, 0x0000, 0x260b, 0x2617,
+	/* char values 0x01e_ */
+	0x2623, 0x262f, 0x263a, 0x2642, 0x0000, 0x0000, 0x264a, 0x2652,
+	0x265a, 0x2662, 0x266a, 0x2672, 0x267b, 0x2687, 0x2692, 0x269a,
+	/* char values 0x01f_ */
+	0x26a2, 0x0000, 0x0000, 0x0000, 0x26aa, 0x26b2, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x26bb, 0x26c7, 0x26d2, 0x26da, 0x26e2, 0x26ea,
+	/* char table 0x02__ */
+	0x0170, 0x0180, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x020_ */
+	0x26f2, 0x26fa, 0x2702, 0x270a, 0x2712, 0x271a, 0x2722, 0x272a,
+	0x2732, 0x273a, 0x2742, 0x274a, 0x2752, 0x275a, 0x2762, 0x276a,
+	/* char values 0x021_ */
+	0x2772, 0x277a, 0x2782, 0x278a, 0x2792, 0x279a, 0x27a2, 0x27aa,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x03__ */
+	0x0000, 0x01a0, 0x0000, 0x0000, 0x01b0, 0x0000, 0x0000, 0x01c0,
+	0x01d0, 0x01e0, 0x01f0, 0x0200, 0x0210, 0x0220, 0x0000, 0x0000,
+	/* char values 0x031_ */
+	0x27b2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x034_ */
+	0x27b9, 0x27bd, 0x0000, 0x27c1, 0x27c6, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x037_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x27cd, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x27d1, 0x0000,
+	/* char values 0x038_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x27d6, 0x27de, 0x27e5,
+	0x27ea, 0x27f2, 0x27fa, 0x0000, 0x2802, 0x0000, 0x280a, 0x2812,
+	/* char values 0x039_ */
+	0x281b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x03a_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x2826, 0x282e, 0x2836, 0x283e, 0x2846, 0x284e,
+	/* char values 0x03b_ */
+	0x2857, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x03c_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x2862, 0x286a, 0x2872, 0x287a, 0x2882, 0x0000,
+	/* char values 0x03d_ */
+	0x0000, 0x0000, 0x0000, 0x288a, 0x2892, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x04__ */
+	0x0240, 0x0250, 0x0000, 0x0260, 0x0000, 0x0270, 0x0000, 0x0280,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0290, 0x02a0, 0x02b0, 0x02c0,
+	/* char values 0x040_ */
+	0x0000, 0x289a, 0x0000, 0x28a2, 0x0000, 0x0000, 0x0000, 0x28aa,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x28b2, 0x0000, 0x28ba, 0x0000,
+	/* char values 0x041_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x28c2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x043_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x28ca, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x045_ */
+	0x0000, 0x28d2, 0x0000, 0x28da, 0x0000, 0x0000, 0x0000, 0x28e2,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x28ea, 0x0000, 0x28f2, 0x0000,
+	/* char values 0x047_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x28fa, 0x2902,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x04c_ */
+	0x0000, 0x290a, 0x2912, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x04d_ */
+	0x291a, 0x2922, 0x292a, 0x2932, 0x2939, 0x293d, 0x2942, 0x294a,
+	0x2951, 0x2955, 0x295a, 0x2962, 0x296a, 0x2972, 0x297a, 0x2982,
+	/* char values 0x04e_ */
+	0x2989, 0x298d, 0x2992, 0x299a, 0x29a2, 0x29aa, 0x29b2, 0x29ba,
+	0x29c1, 0x29c5, 0x29ca, 0x29d2, 0x0000, 0x0000, 0x29da, 0x29e2,
+	/* char values 0x04f_ */
+	0x29ea, 0x29f2, 0x29fa, 0x2a02, 0x2a0a, 0x2a12, 0x0000, 0x0000,
+	0x2a1a, 0x2a22, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x09__ */
+	0x0000, 0x0000, 0x02e0, 0x02f0, 0x0000, 0x0300, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0310, 0x0320, 0x0330, 0x0000, 0x0000,
+	/* char values 0x092_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x2a2a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x093_ */
+	0x0000, 0x2a32, 0x0000, 0x0000, 0x2a3a, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x095_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x2a42, 0x2a4a, 0x2a52, 0x2a5a, 0x2a62, 0x2a6a, 0x2a72, 0x2a7a,
+	/* char values 0x09b_ */
+	0x2a82, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x09c_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x2a8a, 0x2a92, 0x0000, 0x0000, 0x0000,
+	/* char values 0x09d_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x2a9a, 0x2aa2, 0x0000, 0x2aaa,
+	/* char table 0x0a__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0350, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0a5_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x2ab2, 0x2aba, 0x2ac2, 0x2aca, 0x0000, 0x2ad2, 0x0000,
+	/* char table 0x0b__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0370, 0x0380, 0x0000, 0x0000,
+	0x0000, 0x0390, 0x0000, 0x0000, 0x03a0, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0b4_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x2ada, 0x0000, 0x0000, 0x2ae2, 0x2aea, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0b5_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x2af2, 0x2afa, 0x0000, 0x2b02,
+	/* char values 0x0b9_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x2b0a, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0bc_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x2b12, 0x2b1a, 0x2b22, 0x0000, 0x0000, 0x0000,
+	/* char table 0x0c__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x03c0, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x03d0, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0c4_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x2b2a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0cc_ */
+	0x2b32, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b3a,
+	0x2b42, 0x0000, 0x2b4a, 0x2b53, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x0d__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x03f0, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0d4_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x2b5e, 0x2b66, 0x2b6e, 0x0000, 0x0000, 0x0000,
+	/* char table 0x0e__ */
+	0x0000, 0x0000, 0x0000, 0x0410, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0420, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0e3_ */
+	0x0000, 0x0000, 0x0000, 0x2b76, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0eb_ */
+	0x0000, 0x0000, 0x0000, 0x2b7e, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x0f__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0440, 0x0450, 0x0460, 0x0470,
+	0x0480, 0x0490, 0x04a0, 0x04b0, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0f4_ */
+	0x0000, 0x0000, 0x0000, 0x2b86, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b8e, 0x0000, 0x0000,
+	/* char values 0x0f5_ */
+	0x0000, 0x0000, 0x2b96, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b9e,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x2ba6, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0f6_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x2bae, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0f7_ */
+	0x0000, 0x0000, 0x0000, 0x2bb6, 0x0000, 0x2bbe, 0x2bc6, 0x2bcf,
+	0x2bda, 0x2be3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0f8_ */
+	0x0000, 0x2bee, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0f9_ */
+	0x0000, 0x0000, 0x0000, 0x2bf6, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2bfe, 0x0000, 0x0000,
+	/* char values 0x0fa_ */
+	0x0000, 0x0000, 0x2c06, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c0e,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x2c16, 0x0000, 0x0000, 0x0000,
+	/* char values 0x0fb_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x2c1e, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x1___ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x04d0, 0x05e0,
+	/* char table 0x1e__ */
+	0x04e0, 0x04f0, 0x0500, 0x0510, 0x0520, 0x0530, 0x0540, 0x0550,
+	0x0560, 0x0570, 0x0580, 0x0590, 0x05a0, 0x05b0, 0x05c0, 0x05d0,
+	/* char values 0x1e0_ */
+	0x2c26, 0x2c2e, 0x2c36, 0x2c3e, 0x2c46, 0x2c4e, 0x2c56, 0x2c5e,
+	0x2c67, 0x2c73, 0x2c7e, 0x2c86, 0x2c8e, 0x2c96, 0x2c9e, 0x2ca6,
+	/* char values 0x1e1_ */
+	0x2cae, 0x2cb6, 0x2cbe, 0x2cc6, 0x2ccf, 0x2cdb, 0x2ce7, 0x2cf3,
+	0x2cfe, 0x2d06, 0x2d0e, 0x2d16, 0x2d1f, 0x2d2b, 0x2d36, 0x2d3e,
+	/* char values 0x1e2_ */
+	0x2d46, 0x2d4e, 0x2d56, 0x2d5e, 0x2d66, 0x2d6e, 0x2d76, 0x2d7e,
+	0x2d86, 0x2d8e, 0x2d96, 0x2d9e, 0x2da6, 0x2dae, 0x2db7, 0x2dc3,
+	/* char values 0x1e3_ */
+	0x2dce, 0x2dd6, 0x2dde, 0x2de6, 0x2dee, 0x2df6, 0x2dfe, 0x2e06,
+	0x2e0f, 0x2e1b, 0x2e26, 0x2e2e, 0x2e36, 0x2e3e, 0x2e46, 0x2e4e,
+	/* char values 0x1e4_ */
+	0x2e56, 0x2e5e, 0x2e66, 0x2e6e, 0x2e76, 0x2e7e, 0x2e86, 0x2e8e,
+	0x2e96, 0x2e9e, 0x2ea6, 0x2eae, 0x2eb7, 0x2ec3, 0x2ecf, 0x2edb,
+	/* char values 0x1e5_ */
+	0x2ee7, 0x2ef3, 0x2eff, 0x2f0b, 0x2f16, 0x2f1e, 0x2f26, 0x2f2e,
+	0x2f36, 0x2f3e, 0x2f46, 0x2f4e, 0x2f57, 0x2f63, 0x2f6e, 0x2f76,
+	/* char values 0x1e6_ */
+	0x2f7e, 0x2f86, 0x2f8e, 0x2f96, 0x2f9f, 0x2fab, 0x2fb7, 0x2fc3,
+	0x2fcf, 0x2fdb, 0x2fe6, 0x2fee, 0x2ff6, 0x2ffe, 0x3006, 0x300e,
+	/* char values 0x1e7_ */
+	0x3016, 0x301e, 0x3026, 0x302e, 0x3036, 0x303e, 0x3046, 0x304e,
+	0x3057, 0x3063, 0x306f, 0x307b, 0x3086, 0x308e, 0x3096, 0x309e,
+	/* char values 0x1e8_ */
+	0x30a6, 0x30ae, 0x30b6, 0x30be, 0x30c6, 0x30ce, 0x30d6, 0x30de,
+	0x30e6, 0x30ee, 0x30f6, 0x30fe, 0x3106, 0x310e, 0x3116, 0x311e,
+	/* char values 0x1e9_ */
+	0x3126, 0x312e, 0x3136, 0x313e, 0x3146, 0x314e, 0x3156, 0x315e,
+	0x3166, 0x316e, 0x0000, 0x3176, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x1ea_ */
+	0x317e, 0x3186, 0x318e, 0x3196, 0x319f, 0x31ab, 0x31b7, 0x31c3,
+	0x31cf, 0x31db, 0x31e7, 0x31f3, 0x31ff, 0x320b, 0x3217, 0x3223,
+	/* char values 0x1eb_ */
+	0x322f, 0x323b, 0x3247, 0x3253, 0x325f, 0x326b, 0x3277, 0x3283,
+	0x328e, 0x3296, 0x329e, 0x32a6, 0x32ae, 0x32b6, 0x32bf, 0x32cb,
+	/* char values 0x1ec_ */
+	0x32d7, 0x32e3, 0x32ef, 0x32fb, 0x3307, 0x3313, 0x331f, 0x332b,
+	0x3336, 0x333e, 0x3346, 0x334e, 0x3356, 0x335e, 0x3366, 0x336e,
+	/* char values 0x1ed_ */
+	0x3377, 0x3383, 0x338f, 0x339b, 0x33a7, 0x33b3, 0x33bf, 0x33cb,
+	0x33d7, 0x33e3, 0x33ef, 0x33fb, 0x3407, 0x3413, 0x341f, 0x342b,
+	/* char values 0x1ee_ */
+	0x3437, 0x3443, 0x344f, 0x345b, 0x3466, 0x346e, 0x3476, 0x347e,
+	0x3487, 0x3493, 0x349f, 0x34ab, 0x34b7, 0x34c3, 0x34cf, 0x34db,
+	/* char values 0x1ef_ */
+	0x34e7, 0x34f3, 0x34fe, 0x3506, 0x350e, 0x3516, 0x351e, 0x3526,
+	0x352e, 0x3536, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x1f__ */
+	0x05f0, 0x0600, 0x0610, 0x0620, 0x0630, 0x0640, 0x0650, 0x0660,
+	0x0670, 0x0680, 0x0690, 0x06a0, 0x06b0, 0x06c0, 0x06d0, 0x06e0,
+	/* char values 0x1f0_ */
+	0x353e, 0x3546, 0x354f, 0x355b, 0x3567, 0x3573, 0x357f, 0x358b,
+	0x3596, 0x359e, 0x35a7, 0x35b3, 0x35bf, 0x35cb, 0x35d7, 0x35e3,
+	/* char values 0x1f1_ */
+	0x35ee, 0x35f6, 0x35ff, 0x360b, 0x3617, 0x3623, 0x0000, 0x0000,
+	0x362e, 0x3636, 0x363f, 0x364b, 0x3657, 0x3663, 0x0000, 0x0000,
+	/* char values 0x1f2_ */
+	0x366e, 0x3676, 0x367f, 0x368b, 0x3697, 0x36a3, 0x36af, 0x36bb,
+	0x36c6, 0x36ce, 0x36d7, 0x36e3, 0x36ef, 0x36fb, 0x3707, 0x3713,
+	/* char values 0x1f3_ */
+	0x371e, 0x3726, 0x372f, 0x373b, 0x3747, 0x3753, 0x375f, 0x376b,
+	0x3776, 0x377e, 0x3787, 0x3793, 0x379f, 0x37ab, 0x37b7, 0x37c3,
+	/* char values 0x1f4_ */
+	0x37ce, 0x37d6, 0x37df, 0x37eb, 0x37f7, 0x3803, 0x0000, 0x0000,
+	0x380e, 0x3816, 0x381f, 0x382b, 0x3837, 0x3843, 0x0000, 0x0000,
+	/* char values 0x1f5_ */
+	0x384e, 0x3856, 0x385f, 0x386b, 0x3877, 0x3883, 0x388f, 0x389b,
+	0x0000, 0x38a6, 0x0000, 0x38af, 0x0000, 0x38bb, 0x0000, 0x38c7,
+	/* char values 0x1f6_ */
+	0x38d2, 0x38da, 0x38e3, 0x38ef, 0x38fb, 0x3907, 0x3913, 0x391f,
+	0x392a, 0x3932, 0x393b, 0x3947, 0x3953, 0x395f, 0x396b, 0x3977,
+	/* char values 0x1f7_ */
+	0x3982, 0x398a, 0x3992, 0x399a, 0x39a2, 0x39aa, 0x39b2, 0x39ba,
+	0x39c2, 0x39ca, 0x39d2, 0x39da, 0x39e2, 0x39ea, 0x0000, 0x0000,
+	/* char values 0x1f8_ */
+	0x39f3, 0x39ff, 0x3a0c, 0x3a1c, 0x3a2c, 0x3a3c, 0x3a4c, 0x3a5c,
+	0x3a6b, 0x3a77, 0x3a84, 0x3a94, 0x3aa4, 0x3ab4, 0x3ac4, 0x3ad4,
+	/* char values 0x1f9_ */
+	0x3ae3, 0x3aef, 0x3afc, 0x3b0c, 0x3b1c, 0x3b2c, 0x3b3c, 0x3b4c,
+	0x3b5b, 0x3b67, 0x3b74, 0x3b84, 0x3b94, 0x3ba4, 0x3bb4, 0x3bc4,
+	/* char values 0x1fa_ */
+	0x3bd3, 0x3bdf, 0x3bec, 0x3bfc, 0x3c0c, 0x3c1c, 0x3c2c, 0x3c3c,
+	0x3c4b, 0x3c57, 0x3c64, 0x3c74, 0x3c84, 0x3c94, 0x3ca4, 0x3cb4,
+	/* char values 0x1fb_ */
+	0x3cc2, 0x3cca, 0x3cd3, 0x3cde, 0x3ce7, 0x0000, 0x3cf2, 0x3cfb,
+	0x3d06, 0x3d0e, 0x3d16, 0x3d1e, 0x3d26, 0x0000, 0x3d2d, 0x0000,
+	/* char values 0x1fc_ */
+	0x0000, 0x3d32, 0x3d3b, 0x3d46, 0x3d4f, 0x0000, 0x3d5a, 0x3d63,
+	0x3d6e, 0x3d76, 0x3d7e, 0x3d86, 0x3d8e, 0x3d96, 0x3d9e, 0x3da6,
+	/* char values 0x1fd_ */
+	0x3dae, 0x3db6, 0x3dbf, 0x3dcb, 0x0000, 0x0000, 0x3dd6, 0x3ddf,
+	0x3dea, 0x3df2, 0x3dfa, 0x3e02, 0x0000, 0x3e0a, 0x3e12, 0x3e1a,
+	/* char values 0x1fe_ */
+	0x3e22, 0x3e2a, 0x3e33, 0x3e3f, 0x3e4a, 0x3e52, 0x3e5a, 0x3e63,
+	0x3e6e, 0x3e76, 0x3e7e, 0x3e86, 0x3e8e, 0x3e96, 0x3e9e, 0x3ea5,
+	/* char values 0x1ff_ */
+	0x0000, 0x0000, 0x3eab, 0x3eb6, 0x3ebf, 0x0000, 0x3eca, 0x3ed3,
+	0x3ede, 0x3ee6, 0x3eee, 0x3ef6, 0x3efe, 0x3f05, 0x0000, 0x0000,
+	/* char table 0x3___ */
+	0x0700, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0x30__ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0710, 0x0720, 0x0730, 0x0740,
+	0x0000, 0x0750, 0x0760, 0x0770, 0x0780, 0x0790, 0x0000, 0x07a0,
+	/* char values 0x304_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x3f0a, 0x0000, 0x3f12, 0x0000,
+	/* char values 0x305_ */
+	0x3f1a, 0x0000, 0x3f22, 0x0000, 0x3f2a, 0x0000, 0x3f32, 0x0000,
+	0x3f3a, 0x0000, 0x3f42, 0x0000, 0x3f4a, 0x0000, 0x3f52, 0x0000,
+	/* char values 0x306_ */
+	0x3f5a, 0x0000, 0x3f62, 0x0000, 0x0000, 0x3f6a, 0x0000, 0x3f72,
+	0x0000, 0x3f7a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x307_ */
+	0x3f82, 0x3f8a, 0x0000, 0x3f92, 0x3f9a, 0x0000, 0x3fa2, 0x3faa,
+	0x0000, 0x3fb2, 0x3fba, 0x0000, 0x3fc2, 0x3fca, 0x0000, 0x0000,
+	/* char values 0x309_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x3fd2, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3fda, 0x0000,
+	/* char values 0x30a_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x3fe2, 0x0000, 0x3fea, 0x0000,
+	/* char values 0x30b_ */
+	0x3ff2, 0x0000, 0x3ffa, 0x0000, 0x4002, 0x0000, 0x400a, 0x0000,
+	0x4012, 0x0000, 0x401a, 0x0000, 0x4022, 0x0000, 0x402a, 0x0000,
+	/* char values 0x30c_ */
+	0x4032, 0x0000, 0x403a, 0x0000, 0x0000, 0x4042, 0x0000, 0x404a,
+	0x0000, 0x4052, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0x30d_ */
+	0x405a, 0x4062, 0x0000, 0x406a, 0x4072, 0x0000, 0x407a, 0x4082,
+	0x0000, 0x408a, 0x4092, 0x0000, 0x409a, 0x40a2, 0x0000, 0x0000,
+	/* char values 0x30f_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x40aa, 0x0000, 0x0000, 0x40b2,
+	0x40ba, 0x40c2, 0x40ca, 0x0000, 0x0000, 0x0000, 0x40d2, 0x0000,
+	/* char table 0xf___ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x07c0, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char table 0xfb__ */
+	0x0000, 0x07d0, 0x07e0, 0x07f0, 0x0800, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	/* char values 0xfb1_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x40da,
+	/* char values 0xfb2_ */
+	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x40e2, 0x40ea, 0x40f3, 0x40ff, 0x410a, 0x4112,
+	/* char values 0xfb3_ */
+	0x411a, 0x4122, 0x412a, 0x4132, 0x413a, 0x4142, 0x414a, 0x0000,
+	0x4152, 0x415a, 0x4162, 0x416a, 0x4172, 0x0000, 0x417a, 0x0000,
+	/* char values 0xfb4_ */
+	0x4182, 0x418a, 0x0000, 0x4192, 0x419a, 0x0000, 0x41a2, 0x41aa,
+	0x41b2, 0x41ba, 0x41c2, 0x41ca, 0x41d2, 0x41da, 0x41e2, 0x0000,
+	/* decomposed characters */
+	0x0041, 0x0300, 0x0041, 0x0301, 0x0041, 0x0302, 0x0041, 0x0303,
+	0x0041, 0x0308, 0x0041, 0x030a, 0x0043, 0x0327, 0x0045, 0x0300,
+	0x0045, 0x0301, 0x0045, 0x0302, 0x0045, 0x0308, 0x0049, 0x0300,
+	0x0049, 0x0301, 0x0049, 0x0302, 0x0049, 0x0308, 0x004e, 0x0303,
+	0x004f, 0x0300, 0x004f, 0x0301, 0x004f, 0x0302, 0x004f, 0x0303,
+	0x004f, 0x0308, 0x0055, 0x0300, 0x0055, 0x0301, 0x0055, 0x0302,
+	0x0055, 0x0308, 0x0059, 0x0301, 0x0061, 0x0300, 0x0061, 0x0301,
+	0x0061, 0x0302, 0x0061, 0x0303, 0x0061, 0x0308, 0x0061, 0x030a,
+	0x0063, 0x0327, 0x0065, 0x0300, 0x0065, 0x0301, 0x0065, 0x0302,
+	0x0065, 0x0308, 0x0069, 0x0300, 0x0069, 0x0301, 0x0069, 0x0302,
+	0x0069, 0x0308, 0x006e, 0x0303, 0x006f, 0x0300, 0x006f, 0x0301,
+	0x006f, 0x0302, 0x006f, 0x0303, 0x006f, 0x0308, 0x0075, 0x0300,
+	0x0075, 0x0301, 0x0075, 0x0302, 0x0075, 0x0308, 0x0079, 0x0301,
+	0x0079, 0x0308, 0x0041, 0x0304, 0x0061, 0x0304, 0x0041, 0x0306,
+	0x0061, 0x0306, 0x0041, 0x0328, 0x0061, 0x0328, 0x0043, 0x0301,
+	0x0063, 0x0301, 0x0043, 0x0302, 0x0063, 0x0302, 0x0043, 0x0307,
+	0x0063, 0x0307, 0x0043, 0x030c, 0x0063, 0x030c, 0x0044, 0x030c,
+	0x0064, 0x030c, 0x0045, 0x0304, 0x0065, 0x0304, 0x0045, 0x0306,
+	0x0065, 0x0306, 0x0045, 0x0307, 0x0065, 0x0307, 0x0045, 0x0328,
+	0x0065, 0x0328, 0x0045, 0x030c, 0x0065, 0x030c, 0x0047, 0x0302,
+	0x0067, 0x0302, 0x0047, 0x0306, 0x0067, 0x0306, 0x0047, 0x0307,
+	0x0067, 0x0307, 0x0047, 0x0327, 0x0067, 0x0327, 0x0048, 0x0302,
+	0x0068, 0x0302, 0x0049, 0x0303, 0x0069, 0x0303, 0x0049, 0x0304,
+	0x0069, 0x0304, 0x0049, 0x0306, 0x0069, 0x0306, 0x0049, 0x0328,
+	0x0069, 0x0328, 0x0049, 0x0307, 0x004a, 0x0302, 0x006a, 0x0302,
+	0x004b, 0x0327, 0x006b, 0x0327, 0x004c, 0x0301, 0x006c, 0x0301,
+	0x004c, 0x0327, 0x006c, 0x0327, 0x004c, 0x030c, 0x006c, 0x030c,
+	0x004e, 0x0301, 0x006e, 0x0301, 0x004e, 0x0327, 0x006e, 0x0327,
+	0x004e, 0x030c, 0x006e, 0x030c, 0x004f, 0x0304, 0x006f, 0x0304,
+	0x004f, 0x0306, 0x006f, 0x0306, 0x004f, 0x030b, 0x006f, 0x030b,
+	0x0052, 0x0301, 0x0072, 0x0301, 0x0052, 0x0327, 0x0072, 0x0327,
+	0x0052, 0x030c, 0x0072, 0x030c, 0x0053, 0x0301, 0x0073, 0x0301,
+	0x0053, 0x0302, 0x0073, 0x0302, 0x0053, 0x0327, 0x0073, 0x0327,
+	0x0053, 0x030c, 0x0073, 0x030c, 0x0054, 0x0327, 0x0074, 0x0327,
+	0x0054, 0x030c, 0x0074, 0x030c, 0x0055, 0x0303, 0x0075, 0x0303,
+	0x0055, 0x0304, 0x0075, 0x0304, 0x0055, 0x0306, 0x0075, 0x0306,
+	0x0055, 0x030a, 0x0075, 0x030a, 0x0055, 0x030b, 0x0075, 0x030b,
+	0x0055, 0x0328, 0x0075, 0x0328, 0x0057, 0x0302, 0x0077, 0x0302,
+	0x0059, 0x0302, 0x0079, 0x0302, 0x0059, 0x0308, 0x005a, 0x0301,
+	0x007a, 0x0301, 0x005a, 0x0307, 0x007a, 0x0307, 0x005a, 0x030c,
+	0x007a, 0x030c, 0x004f, 0x031b, 0x006f, 0x031b, 0x0055, 0x031b,
+	0x0075, 0x031b, 0x0041, 0x030c, 0x0061, 0x030c, 0x0049, 0x030c,
+	0x0069, 0x030c, 0x004f, 0x030c, 0x006f, 0x030c, 0x0055, 0x030c,
+	0x0075, 0x030c, 0x0055, 0x0308, 0x0304, 0x0075, 0x0308, 0x0304,
+	0x0055, 0x0308, 0x0301, 0x0075, 0x0308, 0x0301, 0x0055, 0x0308,
+	0x030c, 0x0075, 0x0308, 0x030c, 0x0055, 0x0308, 0x0300, 0x0075,
+	0x0308, 0x0300, 0x0041, 0x0308, 0x0304, 0x0061, 0x0308, 0x0304,
+	0x0041, 0x0307, 0x0304, 0x0061, 0x0307, 0x0304, 0x00c6, 0x0304,
+	0x00e6, 0x0304, 0x0047, 0x030c, 0x0067, 0x030c, 0x004b, 0x030c,
+	0x006b, 0x030c, 0x004f, 0x0328, 0x006f, 0x0328, 0x004f, 0x0328,
+	0x0304, 0x006f, 0x0328, 0x0304, 0x01b7, 0x030c, 0x0292, 0x030c,
+	0x006a, 0x030c, 0x0047, 0x0301, 0x0067, 0x0301, 0x0041, 0x030a,
+	0x0301, 0x0061, 0x030a, 0x0301, 0x00c6, 0x0301, 0x00e6, 0x0301,
+	0x00d8, 0x0301, 0x00f8, 0x0301, 0x0041, 0x030f, 0x0061, 0x030f,
+	0x0041, 0x0311, 0x0061, 0x0311, 0x0045, 0x030f, 0x0065, 0x030f,
+	0x0045, 0x0311, 0x0065, 0x0311, 0x0049, 0x030f, 0x0069, 0x030f,
+	0x0049, 0x0311, 0x0069, 0x0311, 0x004f, 0x030f, 0x006f, 0x030f,
+	0x004f, 0x0311, 0x006f, 0x0311, 0x0052, 0x030f, 0x0072, 0x030f,
+	0x0052, 0x0311, 0x0072, 0x0311, 0x0055, 0x030f, 0x0075, 0x030f,
+	0x0055, 0x0311, 0x0075, 0x0311, 0x0306, 0x0307, 0x0300, 0x0301,
+	0x0313, 0x0308, 0x030d, 0x02b9, 0x003b, 0x00a8, 0x030d, 0x0391,
+	0x030d, 0x00b7, 0x0395, 0x030d, 0x0397, 0x030d, 0x0399, 0x030d,
+	0x039f, 0x030d, 0x03a5, 0x030d, 0x03a9, 0x030d, 0x03b9, 0x0308,
+	0x030d, 0x0399, 0x0308, 0x03a5, 0x0308, 0x03b1, 0x030d, 0x03b5,
+	0x030d, 0x03b7, 0x030d, 0x03b9, 0x030d, 0x03c5, 0x0308, 0x030d,
+	0x03b9, 0x0308, 0x03c5, 0x0308, 0x03bf, 0x030d, 0x03c5, 0x030d,
+	0x03c9, 0x030d, 0x03d2, 0x030d, 0x03d2, 0x0308, 0x0415, 0x0308,
+	0x0413, 0x0301, 0x0406, 0x0308, 0x041a, 0x0301, 0x0423, 0x0306,
+	0x0418, 0x0306, 0x0438, 0x0306, 0x0435, 0x0308, 0x0433, 0x0301,
+	0x0456, 0x0308, 0x043a, 0x0301, 0x0443, 0x0306, 0x0474, 0x030f,
+	0x0475, 0x030f, 0x0416, 0x0306, 0x0436, 0x0306, 0x0410, 0x0306,
+	0x0430, 0x0306, 0x0410, 0x0308, 0x0430, 0x0308, 0x00c6, 0x00e6,
+	0x0415, 0x0306, 0x0435, 0x0306, 0x018f, 0x0259, 0x018f, 0x0308,
+	0x0259, 0x0308, 0x0416, 0x0308, 0x0436, 0x0308, 0x0417, 0x0308,
+	0x0437, 0x0308, 0x01b7, 0x0292, 0x0418, 0x0304, 0x0438, 0x0304,
+	0x0418, 0x0308, 0x0438, 0x0308, 0x041e, 0x0308, 0x043e, 0x0308,
+	0x019f, 0x0275, 0x019f, 0x0308, 0x0275, 0x0308, 0x0423, 0x0304,
+	0x0443, 0x0304, 0x0423, 0x0308, 0x0443, 0x0308, 0x0423, 0x030b,
+	0x0443, 0x030b, 0x0427, 0x0308, 0x0447, 0x0308, 0x042b, 0x0308,
+	0x044b, 0x0308, 0x0928, 0x093c, 0x0930, 0x093c, 0x0933, 0x093c,
+	0x0915, 0x093c, 0x0916, 0x093c, 0x0917, 0x093c, 0x091c, 0x093c,
+	0x0921, 0x093c, 0x0922, 0x093c, 0x092b, 0x093c, 0x092f, 0x093c,
+	0x09ac, 0x09bc, 0x09c7, 0x09be, 0x09c7, 0x09d7, 0x09a1, 0x09bc,
+	0x09a2, 0x09bc, 0x09af, 0x09bc, 0x0a16, 0x0a3c, 0x0a17, 0x0a3c,
+	0x0a1c, 0x0a3c, 0x0a21, 0x0a3c, 0x0a2b, 0x0a3c, 0x0b47, 0x0b56,
+	0x0b47, 0x0b3e, 0x0b47, 0x0b57, 0x0b21, 0x0b3c, 0x0b22, 0x0b3c,
+	0x0b2f, 0x0b3c, 0x0b92, 0x0bd7, 0x0bc6, 0x0bbe, 0x0bc7, 0x0bbe,
+	0x0bc6, 0x0bd7, 0x0c46, 0x0c56, 0x0cbf, 0x0cd5, 0x0cc6, 0x0cd5,
+	0x0cc6, 0x0cd6, 0x0cc6, 0x0cc2, 0x0cc6, 0x0cc2, 0x0cd5, 0x0d46,
+	0x0d3e, 0x0d47, 0x0d3e, 0x0d46, 0x0d57, 0x0e4d, 0x0e32, 0x0ecd,
+	0x0eb2, 0x0f42, 0x0fb7, 0x0f4c, 0x0fb7, 0x0f51, 0x0fb7, 0x0f56,
+	0x0fb7, 0x0f5b, 0x0fb7, 0x0f40, 0x0fb5, 0x0f72, 0x0f71, 0x0f74,
+	0x0f71, 0x0fb2, 0x0f80, 0x0fb2, 0x0f80, 0x0f71, 0x0fb3, 0x0f80,
+	0x0fb3, 0x0f80, 0x0f71, 0x0f80, 0x0f71, 0x0f92, 0x0fb7, 0x0f9c,
+	0x0fb7, 0x0fa1, 0x0fb7, 0x0fa6, 0x0fb7, 0x0fab, 0x0fb7, 0x0f90,
+	0x0fb5, 0x0041, 0x0325, 0x0061, 0x0325, 0x0042, 0x0307, 0x0062,
+	0x0307, 0x0042, 0x0323, 0x0062, 0x0323, 0x0042, 0x0331, 0x0062,
+	0x0331, 0x0043, 0x0327, 0x0301, 0x0063, 0x0327, 0x0301, 0x0044,
+	0x0307, 0x0064, 0x0307, 0x0044, 0x0323, 0x0064, 0x0323, 0x0044,
+	0x0331, 0x0064, 0x0331, 0x0044, 0x0327, 0x0064, 0x0327, 0x0044,
+	0x032d, 0x0064, 0x032d, 0x0045, 0x0304, 0x0300, 0x0065, 0x0304,
+	0x0300, 0x0045, 0x0304, 0x0301, 0x0065, 0x0304, 0x0301, 0x0045,
+	0x032d, 0x0065, 0x032d, 0x0045, 0x0330, 0x0065, 0x0330, 0x0045,
+	0x0327, 0x0306, 0x0065, 0x0327, 0x0306, 0x0046, 0x0307, 0x0066,
+	0x0307, 0x0047, 0x0304, 0x0067, 0x0304, 0x0048, 0x0307, 0x0068,
+	0x0307, 0x0048, 0x0323, 0x0068, 0x0323, 0x0048, 0x0308, 0x0068,
+	0x0308, 0x0048, 0x0327, 0x0068, 0x0327, 0x0048, 0x032e, 0x0068,
+	0x032e, 0x0049, 0x0330, 0x0069, 0x0330, 0x0049, 0x0308, 0x0301,
+	0x0069, 0x0308, 0x0301, 0x004b, 0x0301, 0x006b, 0x0301, 0x004b,
+	0x0323, 0x006b, 0x0323, 0x004b, 0x0331, 0x006b, 0x0331, 0x004c,
+	0x0323, 0x006c, 0x0323, 0x004c, 0x0323, 0x0304, 0x006c, 0x0323,
+	0x0304, 0x004c, 0x0331, 0x006c, 0x0331, 0x004c, 0x032d, 0x006c,
+	0x032d, 0x004d, 0x0301, 0x006d, 0x0301, 0x004d, 0x0307, 0x006d,
+	0x0307, 0x004d, 0x0323, 0x006d, 0x0323, 0x004e, 0x0307, 0x006e,
+	0x0307, 0x004e, 0x0323, 0x006e, 0x0323, 0x004e, 0x0331, 0x006e,
+	0x0331, 0x004e, 0x032d, 0x006e, 0x032d, 0x004f, 0x0303, 0x0301,
+	0x006f, 0x0303, 0x0301, 0x004f, 0x0303, 0x0308, 0x006f, 0x0303,
+	0x0308, 0x004f, 0x0304, 0x0300, 0x006f, 0x0304, 0x0300, 0x004f,
+	0x0304, 0x0301, 0x006f, 0x0304, 0x0301, 0x0050, 0x0301, 0x0070,
+	0x0301, 0x0050, 0x0307, 0x0070, 0x0307, 0x0052, 0x0307, 0x0072,
+	0x0307, 0x0052, 0x0323, 0x0072, 0x0323, 0x0052, 0x0323, 0x0304,
+	0x0072, 0x0323, 0x0304, 0x0052, 0x0331, 0x0072, 0x0331, 0x0053,
+	0x0307, 0x0073, 0x0307, 0x0053, 0x0323, 0x0073, 0x0323, 0x0053,
+	0x0301, 0x0307, 0x0073, 0x0301, 0x0307, 0x0053, 0x030c, 0x0307,
+	0x0073, 0x030c, 0x0307, 0x0053, 0x0323, 0x0307, 0x0073, 0x0323,
+	0x0307, 0x0054, 0x0307, 0x0074, 0x0307, 0x0054, 0x0323, 0x0074,
+	0x0323, 0x0054, 0x0331, 0x0074, 0x0331, 0x0054, 0x032d, 0x0074,
+	0x032d, 0x0055, 0x0324, 0x0075, 0x0324, 0x0055, 0x0330, 0x0075,
+	0x0330, 0x0055, 0x032d, 0x0075, 0x032d, 0x0055, 0x0303, 0x0301,
+	0x0075, 0x0303, 0x0301, 0x0055, 0x0304, 0x0308, 0x0075, 0x0304,
+	0x0308, 0x0056, 0x0303, 0x0076, 0x0303, 0x0056, 0x0323, 0x0076,
+	0x0323, 0x0057, 0x0300, 0x0077, 0x0300, 0x0057, 0x0301, 0x0077,
+	0x0301, 0x0057, 0x0308, 0x0077, 0x0308, 0x0057, 0x0307, 0x0077,
+	0x0307, 0x0057, 0x0323, 0x0077, 0x0323, 0x0058, 0x0307, 0x0078,
+	0x0307, 0x0058, 0x0308, 0x0078, 0x0308, 0x0059, 0x0307, 0x0079,
+	0x0307, 0x005a, 0x0302, 0x007a, 0x0302, 0x005a, 0x0323, 0x007a,
+	0x0323, 0x005a, 0x0331, 0x007a, 0x0331, 0x0068, 0x0331, 0x0074,
+	0x0308, 0x0077, 0x030a, 0x0079, 0x030a, 0x017f, 0x0307, 0x0041,
+	0x0323, 0x0061, 0x0323, 0x0041, 0x0309, 0x0061, 0x0309, 0x0041,
+	0x0302, 0x0301, 0x0061, 0x0302, 0x0301, 0x0041, 0x0302, 0x0300,
+	0x0061, 0x0302, 0x0300, 0x0041, 0x0302, 0x0309, 0x0061, 0x0302,
+	0x0309, 0x0041, 0x0302, 0x0303, 0x0061, 0x0302, 0x0303, 0x0041,
+	0x0323, 0x0302, 0x0061, 0x0323, 0x0302, 0x0041, 0x0306, 0x0301,
+	0x0061, 0x0306, 0x0301, 0x0041, 0x0306, 0x0300, 0x0061, 0x0306,
+	0x0300, 0x0041, 0x0306, 0x0309, 0x0061, 0x0306, 0x0309, 0x0041,
+	0x0306, 0x0303, 0x0061, 0x0306, 0x0303, 0x0041, 0x0323, 0x0306,
+	0x0061, 0x0323, 0x0306, 0x0045, 0x0323, 0x0065, 0x0323, 0x0045,
+	0x0309, 0x0065, 0x0309, 0x0045, 0x0303, 0x0065, 0x0303, 0x0045,
+	0x0302, 0x0301, 0x0065, 0x0302, 0x0301, 0x0045, 0x0302, 0x0300,
+	0x0065, 0x0302, 0x0300, 0x0045, 0x0302, 0x0309, 0x0065, 0x0302,
+	0x0309, 0x0045, 0x0302, 0x0303, 0x0065, 0x0302, 0x0303, 0x0045,
+	0x0323, 0x0302, 0x0065, 0x0323, 0x0302, 0x0049, 0x0309, 0x0069,
+	0x0309, 0x0049, 0x0323, 0x0069, 0x0323, 0x004f, 0x0323, 0x006f,
+	0x0323, 0x004f, 0x0309, 0x006f, 0x0309, 0x004f, 0x0302, 0x0301,
+	0x006f, 0x0302, 0x0301, 0x004f, 0x0302, 0x0300, 0x006f, 0x0302,
+	0x0300, 0x004f, 0x0302, 0x0309, 0x006f, 0x0302, 0x0309, 0x004f,
+	0x0302, 0x0303, 0x006f, 0x0302, 0x0303, 0x004f, 0x0323, 0x0302,
+	0x006f, 0x0323, 0x0302, 0x004f, 0x031b, 0x0301, 0x006f, 0x031b,
+	0x0301, 0x004f, 0x031b, 0x0300, 0x006f, 0x031b, 0x0300, 0x004f,
+	0x031b, 0x0309, 0x006f, 0x031b, 0x0309, 0x004f, 0x031b, 0x0303,
+	0x006f, 0x031b, 0x0303, 0x004f, 0x031b, 0x0323, 0x006f, 0x031b,
+	0x0323, 0x0055, 0x0323, 0x0075, 0x0323, 0x0055, 0x0309, 0x0075,
+	0x0309, 0x0055, 0x031b, 0x0301, 0x0075, 0x031b, 0x0301, 0x0055,
+	0x031b, 0x0300, 0x0075, 0x031b, 0x0300, 0x0055, 0x031b, 0x0309,
+	0x0075, 0x031b, 0x0309, 0x0055, 0x031b, 0x0303, 0x0075, 0x031b,
+	0x0303, 0x0055, 0x031b, 0x0323, 0x0075, 0x031b, 0x0323, 0x0059,
+	0x0300, 0x0079, 0x0300, 0x0059, 0x0323, 0x0079, 0x0323, 0x0059,
+	0x0309, 0x0079, 0x0309, 0x0059, 0x0303, 0x0079, 0x0303, 0x03b1,
+	0x0313, 0x03b1, 0x0314, 0x03b1, 0x0313, 0x0300, 0x03b1, 0x0314,
+	0x0300, 0x03b1, 0x0313, 0x0301, 0x03b1, 0x0314, 0x0301, 0x03b1,
+	0x0313, 0x0342, 0x03b1, 0x0314, 0x0342, 0x0391, 0x0313, 0x0391,
+	0x0314, 0x0391, 0x0313, 0x0300, 0x0391, 0x0314, 0x0300, 0x0391,
+	0x0313, 0x0301, 0x0391, 0x0314, 0x0301, 0x0391, 0x0313, 0x0342,
+	0x0391, 0x0314, 0x0342, 0x03b5, 0x0313, 0x03b5, 0x0314, 0x03b5,
+	0x0313, 0x0300, 0x03b5, 0x0314, 0x0300, 0x03b5, 0x0313, 0x0301,
+	0x03b5, 0x0314, 0x0301, 0x0395, 0x0313, 0x0395, 0x0314, 0x0395,
+	0x0313, 0x0300, 0x0395, 0x0314, 0x0300, 0x0395, 0x0313, 0x0301,
+	0x0395, 0x0314, 0x0301, 0x03b7, 0x0313, 0x03b7, 0x0314, 0x03b7,
+	0x0313, 0x0300, 0x03b7, 0x0314, 0x0300, 0x03b7, 0x0313, 0x0301,
+	0x03b7, 0x0314, 0x0301, 0x03b7, 0x0313, 0x0342, 0x03b7, 0x0314,
+	0x0342, 0x0397, 0x0313, 0x0397, 0x0314, 0x0397, 0x0313, 0x0300,
+	0x0397, 0x0314, 0x0300, 0x0397, 0x0313, 0x0301, 0x0397, 0x0314,
+	0x0301, 0x0397, 0x0313, 0x0342, 0x0397, 0x0314, 0x0342, 0x03b9,
+	0x0313, 0x03b9, 0x0314, 0x03b9, 0x0313, 0x0300, 0x03b9, 0x0314,
+	0x0300, 0x03b9, 0x0313, 0x0301, 0x03b9, 0x0314, 0x0301, 0x03b9,
+	0x0313, 0x0342, 0x03b9, 0x0314, 0x0342, 0x0399, 0x0313, 0x0399,
+	0x0314, 0x0399, 0x0313, 0x0300, 0x0399, 0x0314, 0x0300, 0x0399,
+	0x0313, 0x0301, 0x0399, 0x0314, 0x0301, 0x0399, 0x0313, 0x0342,
+	0x0399, 0x0314, 0x0342, 0x03bf, 0x0313, 0x03bf, 0x0314, 0x03bf,
+	0x0313, 0x0300, 0x03bf, 0x0314, 0x0300, 0x03bf, 0x0313, 0x0301,
+	0x03bf, 0x0314, 0x0301, 0x039f, 0x0313, 0x039f, 0x0314, 0x039f,
+	0x0313, 0x0300, 0x039f, 0x0314, 0x0300, 0x039f, 0x0313, 0x0301,
+	0x039f, 0x0314, 0x0301, 0x03c5, 0x0313, 0x03c5, 0x0314, 0x03c5,
+	0x0313, 0x0300, 0x03c5, 0x0314, 0x0300, 0x03c5, 0x0313, 0x0301,
+	0x03c5, 0x0314, 0x0301, 0x03c5, 0x0313, 0x0342, 0x03c5, 0x0314,
+	0x0342, 0x03a5, 0x0314, 0x03a5, 0x0314, 0x0300, 0x03a5, 0x0314,
+	0x0301, 0x03a5, 0x0314, 0x0342, 0x03c9, 0x0313, 0x03c9, 0x0314,
+	0x03c9, 0x0313, 0x0300, 0x03c9, 0x0314, 0x0300, 0x03c9, 0x0313,
+	0x0301, 0x03c9, 0x0314, 0x0301, 0x03c9, 0x0313, 0x0342, 0x03c9,
+	0x0314, 0x0342, 0x03a9, 0x0313, 0x03a9, 0x0314, 0x03a9, 0x0313,
+	0x0300, 0x03a9, 0x0314, 0x0300, 0x03a9, 0x0313, 0x0301, 0x03a9,
+	0x0314, 0x0301, 0x03a9, 0x0313, 0x0342, 0x03a9, 0x0314, 0x0342,
+	0x03b1, 0x0300, 0x03b1, 0x0301, 0x03b5, 0x0300, 0x03b5, 0x0301,
+	0x03b7, 0x0300, 0x03b7, 0x0301, 0x03b9, 0x0300, 0x03b9, 0x0301,
+	0x03bf, 0x0300, 0x03bf, 0x0301, 0x03c5, 0x0300, 0x03c5, 0x0301,
+	0x03c9, 0x0300, 0x03c9, 0x0301, 0x03b1, 0x0345, 0x0313, 0x03b1,
+	0x0345, 0x0314, 0x03b1, 0x0345, 0x0313, 0x0300, 0x03b1, 0x0345,
+	0x0314, 0x0300, 0x03b1, 0x0345, 0x0313, 0x0301, 0x03b1, 0x0345,
+	0x0314, 0x0301, 0x03b1, 0x0345, 0x0313, 0x0342, 0x03b1, 0x0345,
+	0x0314, 0x0342, 0x0391, 0x0345, 0x0313, 0x0391, 0x0345, 0x0314,
+	0x0391, 0x0345, 0x0313, 0x0300, 0x0391, 0x0345, 0x0314, 0x0300,
+	0x0391, 0x0345, 0x0313, 0x0301, 0x0391, 0x0345, 0x0314, 0x0301,
+	0x0391, 0x0345, 0x0313, 0x0342, 0x0391, 0x0345, 0x0314, 0x0342,
+	0x03b7, 0x0345, 0x0313, 0x03b7, 0x0345, 0x0314, 0x03b7, 0x0345,
+	0x0313, 0x0300, 0x03b7, 0x0345, 0x0314, 0x0300, 0x03b7, 0x0345,
+	0x0313, 0x0301, 0x03b7, 0x0345, 0x0314, 0x0301, 0x03b7, 0x0345,
+	0x0313, 0x0342, 0x03b7, 0x0345, 0x0314, 0x0342, 0x0397, 0x0345,
+	0x0313, 0x0397, 0x0345, 0x0314, 0x0397, 0x0345, 0x0313, 0x0300,
+	0x0397, 0x0345, 0x0314, 0x0300, 0x0397, 0x0345, 0x0313, 0x0301,
+	0x0397, 0x0345, 0x0314, 0x0301, 0x0397, 0x0345, 0x0313, 0x0342,
+	0x0397, 0x0345, 0x0314, 0x0342, 0x03c9, 0x0345, 0x0313, 0x03c9,
+	0x0345, 0x0314, 0x03c9, 0x0345, 0x0313, 0x0300, 0x03c9, 0x0345,
+	0x0314, 0x0300, 0x03c9, 0x0345, 0x0313, 0x0301, 0x03c9, 0x0345,
+	0x0314, 0x0301, 0x03c9, 0x0345, 0x0313, 0x0342, 0x03c9, 0x0345,
+	0x0314, 0x0342, 0x03a9, 0x0345, 0x0313, 0x03a9, 0x0345, 0x0314,
+	0x03a9, 0x0345, 0x0313, 0x0300, 0x03a9, 0x0345, 0x0314, 0x0300,
+	0x03a9, 0x0345, 0x0313, 0x0301, 0x03a9, 0x0345, 0x0314, 0x0301,
+	0x03a9, 0x0345, 0x0313, 0x0342, 0x03a9, 0x0345, 0x0314, 0x0342,
+	0x03b1, 0x0306, 0x03b1, 0x0304, 0x03b1, 0x0345, 0x0300, 0x03b1,
+	0x0345, 0x03b1, 0x0345, 0x0301, 0x03b1, 0x0342, 0x03b1, 0x0345,
+	0x0342, 0x0391, 0x0306, 0x0391, 0x0304, 0x0391, 0x0300, 0x0391,
+	0x0301, 0x0391, 0x0345, 0x03b9, 0x00a8, 0x0342, 0x03b7, 0x0345,
+	0x0300, 0x03b7, 0x0345, 0x03b7, 0x0345, 0x0301, 0x03b7, 0x0342,
+	0x03b7, 0x0345, 0x0342, 0x0395, 0x0300, 0x0395, 0x0301, 0x0397,
+	0x0300, 0x0397, 0x0301, 0x0397, 0x0345, 0x1fbf, 0x0300, 0x1fbf,
+	0x0301, 0x1fbf, 0x0342, 0x03b9, 0x0306, 0x03b9, 0x0304, 0x03b9,
+	0x0308, 0x0300, 0x03b9, 0x0308, 0x0301, 0x03b9, 0x0342, 0x03b9,
+	0x0308, 0x0342, 0x0399, 0x0306, 0x0399, 0x0304, 0x0399, 0x0300,
+	0x0399, 0x0301, 0x1ffe, 0x0300, 0x1ffe, 0x0301, 0x1ffe, 0x0342,
+	0x03c5, 0x0306, 0x03c5, 0x0304, 0x03c5, 0x0308, 0x0300, 0x03c5,
+	0x0308, 0x0301, 0x03c1, 0x0313, 0x03c1, 0x0314, 0x03c5, 0x0342,
+	0x03c5, 0x0308, 0x0342, 0x03a5, 0x0306, 0x03a5, 0x0304, 0x03a5,
+	0x0300, 0x03a5, 0x0301, 0x03a1, 0x0314, 0x00a8, 0x0300, 0x00a8,
+	0x0301, 0x0060, 0x03c9, 0x0345, 0x0300, 0x03c9, 0x0345, 0x03bf,
+	0x0345, 0x0301, 0x03c9, 0x0342, 0x03c9, 0x0345, 0x0342, 0x039f,
+	0x0300, 0x039f, 0x0301, 0x03a9, 0x0300, 0x03a9, 0x0301, 0x03a9,
+	0x0345, 0x00b4, 0x304b, 0x3099, 0x304d, 0x3099, 0x304f, 0x3099,
+	0x3051, 0x3099, 0x3053, 0x3099, 0x3055, 0x3099, 0x3057, 0x3099,
+	0x3059, 0x3099, 0x305b, 0x3099, 0x305d, 0x3099, 0x305f, 0x3099,
+	0x3061, 0x3099, 0x3064, 0x3099, 0x3066, 0x3099, 0x3068, 0x3099,
+	0x306f, 0x3099, 0x306f, 0x309a, 0x3072, 0x3099, 0x3072, 0x309a,
+	0x3075, 0x3099, 0x3075, 0x309a, 0x3078, 0x3099, 0x3078, 0x309a,
+	0x307b, 0x3099, 0x307b, 0x309a, 0x3046, 0x3099, 0x309d, 0x3099,
+	0x30ab, 0x3099, 0x30ad, 0x3099, 0x30af, 0x3099, 0x30b1, 0x3099,
+	0x30b3, 0x3099, 0x30b5, 0x3099, 0x30b7, 0x3099, 0x30b9, 0x3099,
+	0x30bb, 0x3099, 0x30bd, 0x3099, 0x30bf, 0x3099, 0x30c1, 0x3099,
+	0x30c4, 0x3099, 0x30c6, 0x3099, 0x30c8, 0x3099, 0x30cf, 0x3099,
+	0x30cf, 0x309a, 0x30d2, 0x3099, 0x30d2, 0x309a, 0x30d5, 0x3099,
+	0x30d5, 0x309a, 0x30d8, 0x3099, 0x30d8, 0x309a, 0x30db, 0x3099,
+	0x30db, 0x309a, 0x30a6, 0x3099, 0x30ef, 0x3099, 0x30f0, 0x3099,
+	0x30f1, 0x3099, 0x30f2, 0x3099, 0x30fd, 0x3099, 0x05f2, 0x05b7,
+	0x05e9, 0x05c1, 0x05e9, 0x05c2, 0x05e9, 0x05bc, 0x05c1, 0x05e9,
+	0x05bc, 0x05c2, 0x05d0, 0x05b7, 0x05d0, 0x05b8, 0x05d0, 0x05bc,
+	0x05d1, 0x05bc, 0x05d2, 0x05bc, 0x05d3, 0x05bc, 0x05d4, 0x05bc,
+	0x05d5, 0x05bc, 0x05d6, 0x05bc, 0x05d8, 0x05bc, 0x05d9, 0x05bc,
+	0x05da, 0x05bc, 0x05db, 0x05bc, 0x05dc, 0x05bc, 0x05de, 0x05bc,
+	0x05e0, 0x05bc, 0x05e1, 0x05bc, 0x05e3, 0x05bc, 0x05e4, 0x05bc,
+	0x05e6, 0x05bc, 0x05e7, 0x05bc, 0x05e8, 0x05bc, 0x05e9, 0x05bc,
+	0x05ea, 0x05bc, 0x05d5, 0x05b9, 0x05d1, 0x05bf, 0x05db, 0x05bf,
+	0x05e4, 0x05bf
+};
+
+u16 hfsplus_compose_table[] = {
+	/* base */
+	0x0000, 0x0050,  0x0300, 0x00a4,  0x0301, 0x00e4,  0x0302, 0x015c,
+	0x0303, 0x0192,  0x0304, 0x01b4,  0x0306, 0x01e6,  0x0307, 0x0220,
+	0x0308, 0x0270,  0x0309, 0x02d2,  0x030a, 0x02ec,  0x030b, 0x02fa,
+	0x030c, 0x0308,  0x030d, 0x034c,  0x030f, 0x0370,  0x0311, 0x038e,
+	0x0313, 0x03a8,  0x0314, 0x03c6,  0x031b, 0x03e8,  0x0323, 0x03f2,
+	0x0324, 0x0440,  0x0325, 0x0446,  0x0327, 0x044c,  0x0328, 0x047a,
+	0x032d, 0x0490,  0x032e, 0x04aa,  0x0330, 0x04b0,  0x0331, 0x04be,
+	0x0342, 0x04e2,  0x0345, 0x04f4,  0x05b7, 0x0504,  0x05b8, 0x050a,
+	0x05b9, 0x050e,  0x05bc, 0x0512,  0x05bf, 0x0540,  0x05c1, 0x0548,
+	0x05c2, 0x054c,  0x093c, 0x0550,  0x09bc, 0x0568,  0x09be, 0x0572,
+	0x09d7, 0x0576,  0x0a3c, 0x057a,  0x0b3c, 0x0586,  0x0b3e, 0x058e,
+	0x0b56, 0x0592,  0x0b57, 0x0596,  0x0bbe, 0x059a,  0x0bd7, 0x05a0,
+	0x0c56, 0x05a6,  0x0cc2, 0x05aa,  0x0cd5, 0x05ae,  0x0cd6, 0x05b4,
+	0x0d3e, 0x05b8,  0x0d57, 0x05be,  0x0e32, 0x05c2,  0x0eb2, 0x05c6,
+	0x0f71, 0x05ca,  0x0f80, 0x05d2,  0x0fb5, 0x05d8,  0x0fb7, 0x05de,
+	0x1100, 0x00a2,  0x1101, 0x00a2,  0x1102, 0x00a2,  0x1103, 0x00a2,
+	0x1104, 0x00a2,  0x1105, 0x00a2,  0x1106, 0x00a2,  0x1107, 0x00a2,
+	0x1108, 0x00a2,  0x1109, 0x00a2,  0x110a, 0x00a2,  0x110b, 0x00a2,
+	0x110c, 0x00a2,  0x110d, 0x00a2,  0x110e, 0x00a2,  0x110f, 0x00a2,
+	0x1110, 0x00a2,  0x1111, 0x00a2,  0x1112, 0x00a2,  0x3099, 0x05f4,
+	0x309a, 0x0656,
+	/* hangul marker */
+	0xffff, 0x0000,
+	/* 0x0300 */
+	0x0340, 0x001f,  0x0041, 0x066c,  0x0045, 0x066e,  0x0049, 0x0670,
+	0x004f, 0x0672,  0x0055, 0x0674,  0x0057, 0x0676,  0x0059, 0x0678,
+	0x0061, 0x067a,  0x0065, 0x067c,  0x0069, 0x067e,  0x006f, 0x0680,
+	0x0075, 0x0682,  0x0077, 0x0684,  0x0079, 0x0686,  0x00a8, 0x0688,
+	0x0391, 0x068a,  0x0395, 0x068c,  0x0397, 0x068e,  0x0399, 0x0690,
+	0x039f, 0x0692,  0x03a5, 0x0694,  0x03a9, 0x0696,  0x03b1, 0x0698,
+	0x03b5, 0x069a,  0x03b7, 0x069c,  0x03b9, 0x069e,  0x03bf, 0x06a0,
+	0x03c5, 0x06a2,  0x03c9, 0x06a4,  0x1fbf, 0x06a6,  0x1ffe, 0x06a8,
+	/* 0x0301 */
+	0x0341, 0x003b,  0x0041, 0x06aa,  0x0043, 0x06ac,  0x0045, 0x06ae,
+	0x0047, 0x06b0,  0x0049, 0x06b2,  0x004b, 0x06b4,  0x004c, 0x06b6,
+	0x004d, 0x06b8,  0x004e, 0x06ba,  0x004f, 0x06bc,  0x0050, 0x06be,
+	0x0052, 0x06c0,  0x0053, 0x06c2,  0x0055, 0x06c6,  0x0057, 0x06c8,
+	0x0059, 0x06ca,  0x005a, 0x06cc,  0x0061, 0x06ce,  0x0063, 0x06d0,
+	0x0065, 0x06d2,  0x0067, 0x06d4,  0x0069, 0x06d6,  0x006b, 0x06d8,
+	0x006c, 0x06da,  0x006d, 0x06dc,  0x006e, 0x06de,  0x006f, 0x06e0,
+	0x0070, 0x06e2,  0x0072, 0x06e4,  0x0073, 0x06e6,  0x0075, 0x06ea,
+	0x0077, 0x06ec,  0x0079, 0x06ee,  0x007a, 0x06f0,  0x00a8, 0x06f2,
+	0x00c6, 0x06f4,  0x00d8, 0x06f6,  0x00e6, 0x06f8,  0x00f8, 0x06fa,
+	0x0391, 0x06fc,  0x0395, 0x06fe,  0x0397, 0x0700,  0x0399, 0x0702,
+	0x039f, 0x0704,  0x03a5, 0x0706,  0x03a9, 0x0708,  0x03b1, 0x070a,
+	0x03b5, 0x070c,  0x03b7, 0x070e,  0x03b9, 0x0710,  0x03bf, 0x0712,
+	0x03c5, 0x0714,  0x03c9, 0x0716,  0x0413, 0x0718,  0x041a, 0x071a,
+	0x0433, 0x071c,  0x043a, 0x071e,  0x1fbf, 0x0720,  0x1ffe, 0x0722,
+	/* 0x0302 */
+	0x0000, 0x001a,  0x0041, 0x0724,  0x0043, 0x072e,  0x0045, 0x0730,
+	0x0047, 0x073a,  0x0048, 0x073c,  0x0049, 0x073e,  0x004a, 0x0740,
+	0x004f, 0x0742,  0x0053, 0x074c,  0x0055, 0x074e,  0x0057, 0x0750,
+	0x0059, 0x0752,  0x005a, 0x0754,  0x0061, 0x0756,  0x0063, 0x0760,
+	0x0065, 0x0762,  0x0067, 0x076c,  0x0068, 0x076e,  0x0069, 0x0770,
+	0x006a, 0x0772,  0x006f, 0x0774,  0x0073, 0x077e,  0x0075, 0x0780,
+	0x0077, 0x0782,  0x0079, 0x0784,  0x007a, 0x0786,
+	/* 0x0303 */
+	0x0000, 0x0010,  0x0041, 0x0788,  0x0045, 0x078a,  0x0049, 0x078c,
+	0x004e, 0x078e,  0x004f, 0x0790,  0x0055, 0x0796,  0x0056, 0x079a,
+	0x0059, 0x079c,  0x0061, 0x079e,  0x0065, 0x07a0,  0x0069, 0x07a2,
+	0x006e, 0x07a4,  0x006f, 0x07a6,  0x0075, 0x07ac,  0x0076, 0x07b0,
+	0x0079, 0x07b2,
+	/* 0x0304 */
+	0x0000, 0x0018,  0x0041, 0x07b4,  0x0045, 0x07b6,  0x0047, 0x07bc,
+	0x0049, 0x07be,  0x004f, 0x07c0,  0x0055, 0x07c6,  0x0061, 0x07ca,
+	0x0065, 0x07cc,  0x0067, 0x07d2,  0x0069, 0x07d4,  0x006f, 0x07d6,
+	0x0075, 0x07dc,  0x00c6, 0x07e0,  0x00e6, 0x07e2,  0x0391, 0x07e4,
+	0x0399, 0x07e6,  0x03a5, 0x07e8,  0x03b1, 0x07ea,  0x03b9, 0x07ec,
+	0x03c5, 0x07ee,  0x0418, 0x07f0,  0x0423, 0x07f2,  0x0438, 0x07f4,
+	0x0443, 0x07f6,
+	/* 0x0306 */
+	0x0000, 0x001c,  0x0041, 0x07f8,  0x0045, 0x0802,  0x0047, 0x0804,
+	0x0049, 0x0806,  0x004f, 0x0808,  0x0055, 0x080a,  0x0061, 0x080c,
+	0x0065, 0x0816,  0x0067, 0x0818,  0x0069, 0x081a,  0x006f, 0x081c,
+	0x0075, 0x081e,  0x0391, 0x0820,  0x0399, 0x0822,  0x03a5, 0x0824,
+	0x03b1, 0x0826,  0x03b9, 0x0828,  0x03c5, 0x082a,  0x0410, 0x082c,
+	0x0415, 0x082e,  0x0416, 0x0830,  0x0418, 0x0832,  0x0423, 0x0834,
+	0x0430, 0x0836,  0x0435, 0x0838,  0x0436, 0x083a,  0x0438, 0x083c,
+	0x0443, 0x083e,
+	/* 0x0307 */
+	0x0000, 0x0027,  0x0041, 0x0840,  0x0042, 0x0844,  0x0043, 0x0846,
+	0x0044, 0x0848,  0x0045, 0x084a,  0x0046, 0x084c,  0x0047, 0x084e,
+	0x0048, 0x0850,  0x0049, 0x0852,  0x004d, 0x0854,  0x004e, 0x0856,
+	0x0050, 0x0858,  0x0052, 0x085a,  0x0053, 0x085c,  0x0054, 0x085e,
+	0x0057, 0x0860,  0x0058, 0x0862,  0x0059, 0x0864,  0x005a, 0x0866,
+	0x0061, 0x0868,  0x0062, 0x086c,  0x0063, 0x086e,  0x0064, 0x0870,
+	0x0065, 0x0872,  0x0066, 0x0874,  0x0067, 0x0876,  0x0068, 0x0878,
+	0x006d, 0x087a,  0x006e, 0x087c,  0x0070, 0x087e,  0x0072, 0x0880,
+	0x0073, 0x0882,  0x0074, 0x0884,  0x0077, 0x0886,  0x0078, 0x0888,
+	0x0079, 0x088a,  0x007a, 0x088c,  0x017f, 0x088e,  0x0306, 0x0890,
+	/* 0x0308 */
+	0x0000, 0x0030,  0x0041, 0x0892,  0x0045, 0x0896,  0x0048, 0x0898,
+	0x0049, 0x089a,  0x004f, 0x089e,  0x0055, 0x08a0,  0x0057, 0x08aa,
+	0x0058, 0x08ac,  0x0059, 0x08ae,  0x0061, 0x08b0,  0x0065, 0x08b4,
+	0x0068, 0x08b6,  0x0069, 0x08b8,  0x006f, 0x08bc,  0x0074, 0x08be,
+	0x0075, 0x08c0,  0x0077, 0x08ca,  0x0078, 0x08cc,  0x0079, 0x08ce,
+	0x018f, 0x08d0,  0x019f, 0x08d2,  0x0259, 0x08d4,  0x0275, 0x08d6,
+	0x0399, 0x08d8,  0x03a5, 0x08da,  0x03b9, 0x08dc,  0x03c5, 0x08e6,
+	0x03d2, 0x08f0,  0x0406, 0x08f2,  0x0410, 0x08f4,  0x0415, 0x08f6,
+	0x0416, 0x08f8,  0x0417, 0x08fa,  0x0418, 0x08fc,  0x041e, 0x08fe,
+	0x0423, 0x0900,  0x0427, 0x0902,  0x042b, 0x0904,  0x0430, 0x0906,
+	0x0435, 0x0908,  0x0436, 0x090a,  0x0437, 0x090c,  0x0438, 0x090e,
+	0x043e, 0x0910,  0x0443, 0x0912,  0x0447, 0x0914,  0x044b, 0x0916,
+	0x0456, 0x0918,
+	/* 0x0309 */
+	0x0000, 0x000c,  0x0041, 0x091a,  0x0045, 0x091c,  0x0049, 0x091e,
+	0x004f, 0x0920,  0x0055, 0x0922,  0x0059, 0x0924,  0x0061, 0x0926,
+	0x0065, 0x0928,  0x0069, 0x092a,  0x006f, 0x092c,  0x0075, 0x092e,
+	0x0079, 0x0930,
+	/* 0x030a */
+	0x0000, 0x0006,  0x0041, 0x0932,  0x0055, 0x0936,  0x0061, 0x0938,
+	0x0075, 0x093c,  0x0077, 0x093e,  0x0079, 0x0940,
+	/* 0x030b */
+	0x0000, 0x0006,  0x004f, 0x0942,  0x0055, 0x0944,  0x006f, 0x0946,
+	0x0075, 0x0948,  0x0423, 0x094a,  0x0443, 0x094c,
+	/* 0x030c */
+	0x0000, 0x0021,  0x0041, 0x094e,  0x0043, 0x0950,  0x0044, 0x0952,
+	0x0045, 0x0954,  0x0047, 0x0956,  0x0049, 0x0958,  0x004b, 0x095a,
+	0x004c, 0x095c,  0x004e, 0x095e,  0x004f, 0x0960,  0x0052, 0x0962,
+	0x0053, 0x0964,  0x0054, 0x0968,  0x0055, 0x096a,  0x005a, 0x096c,
+	0x0061, 0x096e,  0x0063, 0x0970,  0x0064, 0x0972,  0x0065, 0x0974,
+	0x0067, 0x0976,  0x0069, 0x0978,  0x006a, 0x097a,  0x006b, 0x097c,
+	0x006c, 0x097e,  0x006e, 0x0980,  0x006f, 0x0982,  0x0072, 0x0984,
+	0x0073, 0x0986,  0x0074, 0x098a,  0x0075, 0x098c,  0x007a, 0x098e,
+	0x01b7, 0x0990,  0x0292, 0x0992,
+	/* 0x030d */
+	0x0000, 0x0011,  0x00a8, 0x0994,  0x0308, 0x0996,  0x0391, 0x0998,
+	0x0395, 0x099a,  0x0397, 0x099c,  0x0399, 0x099e,  0x039f, 0x09a0,
+	0x03a5, 0x09a2,  0x03a9, 0x09a4,  0x03b1, 0x09a6,  0x03b5, 0x09a8,
+	0x03b7, 0x09aa,  0x03b9, 0x09ac,  0x03bf, 0x09ae,  0x03c5, 0x09b0,
+	0x03c9, 0x09b2,  0x03d2, 0x09b4,
+	/* 0x030f */
+	0x0000, 0x000e,  0x0041, 0x09b6,  0x0045, 0x09b8,  0x0049, 0x09ba,
+	0x004f, 0x09bc,  0x0052, 0x09be,  0x0055, 0x09c0,  0x0061, 0x09c2,
+	0x0065, 0x09c4,  0x0069, 0x09c6,  0x006f, 0x09c8,  0x0072, 0x09ca,
+	0x0075, 0x09cc,  0x0474, 0x09ce,  0x0475, 0x09d0,
+	/* 0x0311 */
+	0x0000, 0x000c,  0x0041, 0x09d2,  0x0045, 0x09d4,  0x0049, 0x09d6,
+	0x004f, 0x09d8,  0x0052, 0x09da,  0x0055, 0x09dc,  0x0061, 0x09de,
+	0x0065, 0x09e0,  0x0069, 0x09e2,  0x006f, 0x09e4,  0x0072, 0x09e6,
+	0x0075, 0x09e8,
+	/* 0x0313 */
+	0x0343, 0x000e,  0x0391, 0x09ea,  0x0395, 0x09f2,  0x0397, 0x09f8,
+	0x0399, 0x0a00,  0x039f, 0x0a08,  0x03a9, 0x0a0e,  0x03b1, 0x0a16,
+	0x03b5, 0x0a1e,  0x03b7, 0x0a24,  0x03b9, 0x0a2c,  0x03bf, 0x0a34,
+	0x03c1, 0x0a3a,  0x03c5, 0x0a3c,  0x03c9, 0x0a44,
+	/* 0x0314 */
+	0x0000, 0x0010,  0x0391, 0x0a4c,  0x0395, 0x0a54,  0x0397, 0x0a5a,
+	0x0399, 0x0a62,  0x039f, 0x0a6a,  0x03a1, 0x0a70,  0x03a5, 0x0a72,
+	0x03a9, 0x0a7a,  0x03b1, 0x0a82,  0x03b5, 0x0a8a,  0x03b7, 0x0a90,
+	0x03b9, 0x0a98,  0x03bf, 0x0aa0,  0x03c1, 0x0aa6,  0x03c5, 0x0aa8,
+	0x03c9, 0x0ab0,
+	/* 0x031b */
+	0x0000, 0x0004,  0x004f, 0x0ab8,  0x0055, 0x0ac4,  0x006f, 0x0ad0,
+	0x0075, 0x0adc,
+	/* 0x0323 */
+	0x0000, 0x0026,  0x0041, 0x0ae8,  0x0042, 0x0aee,  0x0044, 0x0af0,
+	0x0045, 0x0af2,  0x0048, 0x0af6,  0x0049, 0x0af8,  0x004b, 0x0afa,
+	0x004c, 0x0afc,  0x004d, 0x0b00,  0x004e, 0x0b02,  0x004f, 0x0b04,
+	0x0052, 0x0b08,  0x0053, 0x0b0c,  0x0054, 0x0b10,  0x0055, 0x0b12,
+	0x0056, 0x0b14,  0x0057, 0x0b16,  0x0059, 0x0b18,  0x005a, 0x0b1a,
+	0x0061, 0x0b1c,  0x0062, 0x0b22,  0x0064, 0x0b24,  0x0065, 0x0b26,
+	0x0068, 0x0b2a,  0x0069, 0x0b2c,  0x006b, 0x0b2e,  0x006c, 0x0b30,
+	0x006d, 0x0b34,  0x006e, 0x0b36,  0x006f, 0x0b38,  0x0072, 0x0b3c,
+	0x0073, 0x0b40,  0x0074, 0x0b44,  0x0075, 0x0b46,  0x0076, 0x0b48,
+	0x0077, 0x0b4a,  0x0079, 0x0b4c,  0x007a, 0x0b4e,
+	/* 0x0324 */
+	0x0000, 0x0002,  0x0055, 0x0b50,  0x0075, 0x0b52,
+	/* 0x0325 */
+	0x0000, 0x0002,  0x0041, 0x0b54,  0x0061, 0x0b56,
+	/* 0x0327 */
+	0x0000, 0x0016,  0x0043, 0x0b58,  0x0044, 0x0b5c,  0x0045, 0x0b5e,
+	0x0047, 0x0b62,  0x0048, 0x0b64,  0x004b, 0x0b66,  0x004c, 0x0b68,
+	0x004e, 0x0b6a,  0x0052, 0x0b6c,  0x0053, 0x0b6e,  0x0054, 0x0b70,
+	0x0063, 0x0b72,  0x0064, 0x0b76,  0x0065, 0x0b78,  0x0067, 0x0b7c,
+	0x0068, 0x0b7e,  0x006b, 0x0b80,  0x006c, 0x0b82,  0x006e, 0x0b84,
+	0x0072, 0x0b86,  0x0073, 0x0b88,  0x0074, 0x0b8a,
+	/* 0x0328 */
+	0x0000, 0x000a,  0x0041, 0x0b8c,  0x0045, 0x0b8e,  0x0049, 0x0b90,
+	0x004f, 0x0b92,  0x0055, 0x0b96,  0x0061, 0x0b98,  0x0065, 0x0b9a,
+	0x0069, 0x0b9c,  0x006f, 0x0b9e,  0x0075, 0x0ba2,
+	/* 0x032d */
+	0x0000, 0x000c,  0x0044, 0x0ba4,  0x0045, 0x0ba6,  0x004c, 0x0ba8,
+	0x004e, 0x0baa,  0x0054, 0x0bac,  0x0055, 0x0bae,  0x0064, 0x0bb0,
+	0x0065, 0x0bb2,  0x006c, 0x0bb4,  0x006e, 0x0bb6,  0x0074, 0x0bb8,
+	0x0075, 0x0bba,
+	/* 0x032e */
+	0x0000, 0x0002,  0x0048, 0x0bbc,  0x0068, 0x0bbe,
+	/* 0x0330 */
+	0x0000, 0x0006,  0x0045, 0x0bc0,  0x0049, 0x0bc2,  0x0055, 0x0bc4,
+	0x0065, 0x0bc6,  0x0069, 0x0bc8,  0x0075, 0x0bca,
+	/* 0x0331 */
+	0x0000, 0x0011,  0x0042, 0x0bcc,  0x0044, 0x0bce,  0x004b, 0x0bd0,
+	0x004c, 0x0bd2,  0x004e, 0x0bd4,  0x0052, 0x0bd6,  0x0054, 0x0bd8,
+	0x005a, 0x0bda,  0x0062, 0x0bdc,  0x0064, 0x0bde,  0x0068, 0x0be0,
+	0x006b, 0x0be2,  0x006c, 0x0be4,  0x006e, 0x0be6,  0x0072, 0x0be8,
+	0x0074, 0x0bea,  0x007a, 0x0bec,
+	/* 0x0342 */
+	0x0000, 0x0008,  0x00a8, 0x0bee,  0x03b1, 0x0bf0,  0x03b7, 0x0bf2,
+	0x03b9, 0x0bf4,  0x03c5, 0x0bf6,  0x03c9, 0x0bf8,  0x1fbf, 0x0bfa,
+	0x1ffe, 0x0bfc,
+	/* 0x0345 */
+	0x0000, 0x0007,  0x0391, 0x0bfe,  0x0397, 0x0c04,  0x03a9, 0x0c0a,
+	0x03b1, 0x0c10,  0x03b7, 0x0c1c,  0x03bf, 0x0c28,  0x03c9, 0x0c2c,
+	/* 0x05b7 */
+	0x0000, 0x0002,  0x05d0, 0x0c36,  0x05f2, 0x0c38,
+	/* 0x05b8 */
+	0x0000, 0x0001,  0x05d0, 0x0c3a,
+	/* 0x05b9 */
+	0x0000, 0x0001,  0x05d5, 0x0c3c,
+	/* 0x05bc */
+	0x0000, 0x0016,  0x05d0, 0x0c3e,  0x05d1, 0x0c40,  0x05d2, 0x0c42,
+	0x05d3, 0x0c44,  0x05d4, 0x0c46,  0x05d5, 0x0c48,  0x05d6, 0x0c4a,
+	0x05d8, 0x0c4c,  0x05d9, 0x0c4e,  0x05da, 0x0c50,  0x05db, 0x0c52,
+	0x05dc, 0x0c54,  0x05de, 0x0c56,  0x05e0, 0x0c58,  0x05e1, 0x0c5a,
+	0x05e3, 0x0c5c,  0x05e4, 0x0c5e,  0x05e6, 0x0c60,  0x05e7, 0x0c62,
+	0x05e8, 0x0c64,  0x05e9, 0x0c66,  0x05ea, 0x0c6c,
+	/* 0x05bf */
+	0x0000, 0x0003,  0x05d1, 0x0c6e,  0x05db, 0x0c70,  0x05e4, 0x0c72,
+	/* 0x05c1 */
+	0x0000, 0x0001,  0x05e9, 0x0c74,
+	/* 0x05c2 */
+	0x0000, 0x0001,  0x05e9, 0x0c76,
+	/* 0x093c */
+	0x0000, 0x000b,  0x0915, 0x0c78,  0x0916, 0x0c7a,  0x0917, 0x0c7c,
+	0x091c, 0x0c7e,  0x0921, 0x0c80,  0x0922, 0x0c82,  0x0928, 0x0c84,
+	0x092b, 0x0c86,  0x092f, 0x0c88,  0x0930, 0x0c8a,  0x0933, 0x0c8c,
+	/* 0x09bc */
+	0x0000, 0x0004,  0x09a1, 0x0c8e,  0x09a2, 0x0c90,  0x09ac, 0x0c92,
+	0x09af, 0x0c94,
+	/* 0x09be */
+	0x0000, 0x0001,  0x09c7, 0x0c96,
+	/* 0x09d7 */
+	0x0000, 0x0001,  0x09c7, 0x0c98,
+	/* 0x0a3c */
+	0x0000, 0x0005,  0x0a16, 0x0c9a,  0x0a17, 0x0c9c,  0x0a1c, 0x0c9e,
+	0x0a21, 0x0ca0,  0x0a2b, 0x0ca2,
+	/* 0x0b3c */
+	0x0000, 0x0003,  0x0b21, 0x0ca4,  0x0b22, 0x0ca6,  0x0b2f, 0x0ca8,
+	/* 0x0b3e */
+	0x0000, 0x0001,  0x0b47, 0x0caa,
+	/* 0x0b56 */
+	0x0000, 0x0001,  0x0b47, 0x0cac,
+	/* 0x0b57 */
+	0x0000, 0x0001,  0x0b47, 0x0cae,
+	/* 0x0bbe */
+	0x0000, 0x0002,  0x0bc6, 0x0cb0,  0x0bc7, 0x0cb2,
+	/* 0x0bd7 */
+	0x0000, 0x0002,  0x0b92, 0x0cb4,  0x0bc6, 0x0cb6,
+	/* 0x0c56 */
+	0x0000, 0x0001,  0x0c46, 0x0cb8,
+	/* 0x0cc2 */
+	0x0000, 0x0001,  0x0cc6, 0x0cba,
+	/* 0x0cd5 */
+	0x0000, 0x0002,  0x0cbf, 0x0cbe,  0x0cc6, 0x0cc0,
+	/* 0x0cd6 */
+	0x0000, 0x0001,  0x0cc6, 0x0cc2,
+	/* 0x0d3e */
+	0x0000, 0x0002,  0x0d46, 0x0cc4,  0x0d47, 0x0cc6,
+	/* 0x0d57 */
+	0x0000, 0x0001,  0x0d46, 0x0cc8,
+	/* 0x0e32 */
+	0x0000, 0x0001,  0x0e4d, 0x0cca,
+	/* 0x0eb2 */
+	0x0000, 0x0001,  0x0ecd, 0x0ccc,
+	/* 0x0f71 */
+	0x0000, 0x0003,  0x0f72, 0x0cce,  0x0f74, 0x0cd0,  0x0f80, 0x0cd2,
+	/* 0x0f80 */
+	0x0000, 0x0002,  0x0fb2, 0x0cd4,  0x0fb3, 0x0cd8,
+	/* 0x0fb5 */
+	0x0000, 0x0002,  0x0f40, 0x0cdc,  0x0f90, 0x0cde,
+	/* 0x0fb7 */
+	0x0000, 0x000a,  0x0f42, 0x0ce0,  0x0f4c, 0x0ce2,  0x0f51, 0x0ce4,
+	0x0f56, 0x0ce6,  0x0f5b, 0x0ce8,  0x0f92, 0x0cea,  0x0f9c, 0x0cec,
+	0x0fa1, 0x0cee,  0x0fa6, 0x0cf0,  0x0fab, 0x0cf2,
+	/* 0x3099 */
+	0x0000, 0x0030,  0x3046, 0x0cf4,  0x304b, 0x0cf6,  0x304d, 0x0cf8,
+	0x304f, 0x0cfa,  0x3051, 0x0cfc,  0x3053, 0x0cfe,  0x3055, 0x0d00,
+	0x3057, 0x0d02,  0x3059, 0x0d04,  0x305b, 0x0d06,  0x305d, 0x0d08,
+	0x305f, 0x0d0a,  0x3061, 0x0d0c,  0x3064, 0x0d0e,  0x3066, 0x0d10,
+	0x3068, 0x0d12,  0x306f, 0x0d14,  0x3072, 0x0d16,  0x3075, 0x0d18,
+	0x3078, 0x0d1a,  0x307b, 0x0d1c,  0x309d, 0x0d1e,  0x30a6, 0x0d20,
+	0x30ab, 0x0d22,  0x30ad, 0x0d24,  0x30af, 0x0d26,  0x30b1, 0x0d28,
+	0x30b3, 0x0d2a,  0x30b5, 0x0d2c,  0x30b7, 0x0d2e,  0x30b9, 0x0d30,
+	0x30bb, 0x0d32,  0x30bd, 0x0d34,  0x30bf, 0x0d36,  0x30c1, 0x0d38,
+	0x30c4, 0x0d3a,  0x30c6, 0x0d3c,  0x30c8, 0x0d3e,  0x30cf, 0x0d40,
+	0x30d2, 0x0d42,  0x30d5, 0x0d44,  0x30d8, 0x0d46,  0x30db, 0x0d48,
+	0x30ef, 0x0d4a,  0x30f0, 0x0d4c,  0x30f1, 0x0d4e,  0x30f2, 0x0d50,
+	0x30fd, 0x0d52,
+	/* 0x309a */
+	0x0000, 0x000a,  0x306f, 0x0d54,  0x3072, 0x0d56,  0x3075, 0x0d58,
+	0x3078, 0x0d5a,  0x307b, 0x0d5c,  0x30cf, 0x0d5e,  0x30d2, 0x0d60,
+	0x30d5, 0x0d62,  0x30d8, 0x0d64,  0x30db, 0x0d66,
+	/* 0x0041 0x0300 */
+	0x00c0, 0x0000,
+	/* 0x0045 0x0300 */
+	0x00c8, 0x0000,
+	/* 0x0049 0x0300 */
+	0x00cc, 0x0000,
+	/* 0x004f 0x0300 */
+	0x00d2, 0x0000,
+	/* 0x0055 0x0300 */
+	0x00d9, 0x0000,
+	/* 0x0057 0x0300 */
+	0x1e80, 0x0000,
+	/* 0x0059 0x0300 */
+	0x1ef2, 0x0000,
+	/* 0x0061 0x0300 */
+	0x00e0, 0x0000,
+	/* 0x0065 0x0300 */
+	0x00e8, 0x0000,
+	/* 0x0069 0x0300 */
+	0x00ec, 0x0000,
+	/* 0x006f 0x0300 */
+	0x00f2, 0x0000,
+	/* 0x0075 0x0300 */
+	0x00f9, 0x0000,
+	/* 0x0077 0x0300 */
+	0x1e81, 0x0000,
+	/* 0x0079 0x0300 */
+	0x1ef3, 0x0000,
+	/* 0x00a8 0x0300 */
+	0x1fed, 0x0000,
+	/* 0x0391 0x0300 */
+	0x1fba, 0x0000,
+	/* 0x0395 0x0300 */
+	0x1fc8, 0x0000,
+	/* 0x0397 0x0300 */
+	0x1fca, 0x0000,
+	/* 0x0399 0x0300 */
+	0x1fda, 0x0000,
+	/* 0x039f 0x0300 */
+	0x1ff8, 0x0000,
+	/* 0x03a5 0x0300 */
+	0x1fea, 0x0000,
+	/* 0x03a9 0x0300 */
+	0x1ffa, 0x0000,
+	/* 0x03b1 0x0300 */
+	0x1f70, 0x0000,
+	/* 0x03b5 0x0300 */
+	0x1f72, 0x0000,
+	/* 0x03b7 0x0300 */
+	0x1f74, 0x0000,
+	/* 0x03b9 0x0300 */
+	0x1f76, 0x0000,
+	/* 0x03bf 0x0300 */
+	0x1f78, 0x0000,
+	/* 0x03c5 0x0300 */
+	0x1f7a, 0x0000,
+	/* 0x03c9 0x0300 */
+	0x1f7c, 0x0000,
+	/* 0x1fbf 0x0300 */
+	0x1fcd, 0x0000,
+	/* 0x1ffe 0x0300 */
+	0x1fdd, 0x0000,
+	/* 0x0041 0x0301 */
+	0x00c1, 0x0000,
+	/* 0x0043 0x0301 */
+	0x0106, 0x0000,
+	/* 0x0045 0x0301 */
+	0x00c9, 0x0000,
+	/* 0x0047 0x0301 */
+	0x01f4, 0x0000,
+	/* 0x0049 0x0301 */
+	0x00cd, 0x0000,
+	/* 0x004b 0x0301 */
+	0x1e30, 0x0000,
+	/* 0x004c 0x0301 */
+	0x0139, 0x0000,
+	/* 0x004d 0x0301 */
+	0x1e3e, 0x0000,
+	/* 0x004e 0x0301 */
+	0x0143, 0x0000,
+	/* 0x004f 0x0301 */
+	0x00d3, 0x0000,
+	/* 0x0050 0x0301 */
+	0x1e54, 0x0000,
+	/* 0x0052 0x0301 */
+	0x0154, 0x0000,
+	/* 0x0053 0x0301 */
+	0x015a, 0x0001,  0x0307, 0x0d68,
+	/* 0x0055 0x0301 */
+	0x00da, 0x0000,
+	/* 0x0057 0x0301 */
+	0x1e82, 0x0000,
+	/* 0x0059 0x0301 */
+	0x00dd, 0x0000,
+	/* 0x005a 0x0301 */
+	0x0179, 0x0000,
+	/* 0x0061 0x0301 */
+	0x00e1, 0x0000,
+	/* 0x0063 0x0301 */
+	0x0107, 0x0000,
+	/* 0x0065 0x0301 */
+	0x00e9, 0x0000,
+	/* 0x0067 0x0301 */
+	0x01f5, 0x0000,
+	/* 0x0069 0x0301 */
+	0x00ed, 0x0000,
+	/* 0x006b 0x0301 */
+	0x1e31, 0x0000,
+	/* 0x006c 0x0301 */
+	0x013a, 0x0000,
+	/* 0x006d 0x0301 */
+	0x1e3f, 0x0000,
+	/* 0x006e 0x0301 */
+	0x0144, 0x0000,
+	/* 0x006f 0x0301 */
+	0x00f3, 0x0000,
+	/* 0x0070 0x0301 */
+	0x1e55, 0x0000,
+	/* 0x0072 0x0301 */
+	0x0155, 0x0000,
+	/* 0x0073 0x0301 */
+	0x015b, 0x0001,  0x0307, 0x0d6a,
+	/* 0x0075 0x0301 */
+	0x00fa, 0x0000,
+	/* 0x0077 0x0301 */
+	0x1e83, 0x0000,
+	/* 0x0079 0x0301 */
+	0x00fd, 0x0000,
+	/* 0x007a 0x0301 */
+	0x017a, 0x0000,
+	/* 0x00a8 0x0301 */
+	0x1fee, 0x0000,
+	/* 0x00c6 0x0301 */
+	0x01fc, 0x0000,
+	/* 0x00d8 0x0301 */
+	0x01fe, 0x0000,
+	/* 0x00e6 0x0301 */
+	0x01fd, 0x0000,
+	/* 0x00f8 0x0301 */
+	0x01ff, 0x0000,
+	/* 0x0391 0x0301 */
+	0x1fbb, 0x0000,
+	/* 0x0395 0x0301 */
+	0x1fc9, 0x0000,
+	/* 0x0397 0x0301 */
+	0x1fcb, 0x0000,
+	/* 0x0399 0x0301 */
+	0x1fdb, 0x0000,
+	/* 0x039f 0x0301 */
+	0x1ff9, 0x0000,
+	/* 0x03a5 0x0301 */
+	0x1feb, 0x0000,
+	/* 0x03a9 0x0301 */
+	0x1ffb, 0x0000,
+	/* 0x03b1 0x0301 */
+	0x1f71, 0x0000,
+	/* 0x03b5 0x0301 */
+	0x1f73, 0x0000,
+	/* 0x03b7 0x0301 */
+	0x1f75, 0x0000,
+	/* 0x03b9 0x0301 */
+	0x1f77, 0x0000,
+	/* 0x03bf 0x0301 */
+	0x1f79, 0x0000,
+	/* 0x03c5 0x0301 */
+	0x1f7b, 0x0000,
+	/* 0x03c9 0x0301 */
+	0x1f7d, 0x0000,
+	/* 0x0413 0x0301 */
+	0x0403, 0x0000,
+	/* 0x041a 0x0301 */
+	0x040c, 0x0000,
+	/* 0x0433 0x0301 */
+	0x0453, 0x0000,
+	/* 0x043a 0x0301 */
+	0x045c, 0x0000,
+	/* 0x1fbf 0x0301 */
+	0x1fce, 0x0000,
+	/* 0x1ffe 0x0301 */
+	0x1fde, 0x0000,
+	/* 0x0041 0x0302 */
+	0x00c2, 0x0004,  0x0300, 0x0d6c,  0x0301, 0x0d6e,  0x0303, 0x0d70,
+	0x0309, 0x0d72,
+	/* 0x0043 0x0302 */
+	0x0108, 0x0000,
+	/* 0x0045 0x0302 */
+	0x00ca, 0x0004,  0x0300, 0x0d74,  0x0301, 0x0d76,  0x0303, 0x0d78,
+	0x0309, 0x0d7a,
+	/* 0x0047 0x0302 */
+	0x011c, 0x0000,
+	/* 0x0048 0x0302 */
+	0x0124, 0x0000,
+	/* 0x0049 0x0302 */
+	0x00ce, 0x0000,
+	/* 0x004a 0x0302 */
+	0x0134, 0x0000,
+	/* 0x004f 0x0302 */
+	0x00d4, 0x0004,  0x0300, 0x0d7c,  0x0301, 0x0d7e,  0x0303, 0x0d80,
+	0x0309, 0x0d82,
+	/* 0x0053 0x0302 */
+	0x015c, 0x0000,
+	/* 0x0055 0x0302 */
+	0x00db, 0x0000,
+	/* 0x0057 0x0302 */
+	0x0174, 0x0000,
+	/* 0x0059 0x0302 */
+	0x0176, 0x0000,
+	/* 0x005a 0x0302 */
+	0x1e90, 0x0000,
+	/* 0x0061 0x0302 */
+	0x00e2, 0x0004,  0x0300, 0x0d84,  0x0301, 0x0d86,  0x0303, 0x0d88,
+	0x0309, 0x0d8a,
+	/* 0x0063 0x0302 */
+	0x0109, 0x0000,
+	/* 0x0065 0x0302 */
+	0x00ea, 0x0004,  0x0300, 0x0d8c,  0x0301, 0x0d8e,  0x0303, 0x0d90,
+	0x0309, 0x0d92,
+	/* 0x0067 0x0302 */
+	0x011d, 0x0000,
+	/* 0x0068 0x0302 */
+	0x0125, 0x0000,
+	/* 0x0069 0x0302 */
+	0x00ee, 0x0000,
+	/* 0x006a 0x0302 */
+	0x0135, 0x0000,
+	/* 0x006f 0x0302 */
+	0x00f4, 0x0004,  0x0300, 0x0d94,  0x0301, 0x0d96,  0x0303, 0x0d98,
+	0x0309, 0x0d9a,
+	/* 0x0073 0x0302 */
+	0x015d, 0x0000,
+	/* 0x0075 0x0302 */
+	0x00fb, 0x0000,
+	/* 0x0077 0x0302 */
+	0x0175, 0x0000,
+	/* 0x0079 0x0302 */
+	0x0177, 0x0000,
+	/* 0x007a 0x0302 */
+	0x1e91, 0x0000,
+	/* 0x0041 0x0303 */
+	0x00c3, 0x0000,
+	/* 0x0045 0x0303 */
+	0x1ebc, 0x0000,
+	/* 0x0049 0x0303 */
+	0x0128, 0x0000,
+	/* 0x004e 0x0303 */
+	0x00d1, 0x0000,
+	/* 0x004f 0x0303 */
+	0x00d5, 0x0002,  0x0301, 0x0d9c,  0x0308, 0x0d9e,
+	/* 0x0055 0x0303 */
+	0x0168, 0x0001,  0x0301, 0x0da0,
+	/* 0x0056 0x0303 */
+	0x1e7c, 0x0000,
+	/* 0x0059 0x0303 */
+	0x1ef8, 0x0000,
+	/* 0x0061 0x0303 */
+	0x00e3, 0x0000,
+	/* 0x0065 0x0303 */
+	0x1ebd, 0x0000,
+	/* 0x0069 0x0303 */
+	0x0129, 0x0000,
+	/* 0x006e 0x0303 */
+	0x00f1, 0x0000,
+	/* 0x006f 0x0303 */
+	0x00f5, 0x0002,  0x0301, 0x0da2,  0x0308, 0x0da4,
+	/* 0x0075 0x0303 */
+	0x0169, 0x0001,  0x0301, 0x0da6,
+	/* 0x0076 0x0303 */
+	0x1e7d, 0x0000,
+	/* 0x0079 0x0303 */
+	0x1ef9, 0x0000,
+	/* 0x0041 0x0304 */
+	0x0100, 0x0000,
+	/* 0x0045 0x0304 */
+	0x0112, 0x0002,  0x0300, 0x0da8,  0x0301, 0x0daa,
+	/* 0x0047 0x0304 */
+	0x1e20, 0x0000,
+	/* 0x0049 0x0304 */
+	0x012a, 0x0000,
+	/* 0x004f 0x0304 */
+	0x014c, 0x0002,  0x0300, 0x0dac,  0x0301, 0x0dae,
+	/* 0x0055 0x0304 */
+	0x016a, 0x0001,  0x0308, 0x0db0,
+	/* 0x0061 0x0304 */
+	0x0101, 0x0000,
+	/* 0x0065 0x0304 */
+	0x0113, 0x0002,  0x0300, 0x0db2,  0x0301, 0x0db4,
+	/* 0x0067 0x0304 */
+	0x1e21, 0x0000,
+	/* 0x0069 0x0304 */
+	0x012b, 0x0000,
+	/* 0x006f 0x0304 */
+	0x014d, 0x0002,  0x0300, 0x0db6,  0x0301, 0x0db8,
+	/* 0x0075 0x0304 */
+	0x016b, 0x0001,  0x0308, 0x0dba,
+	/* 0x00c6 0x0304 */
+	0x01e2, 0x0000,
+	/* 0x00e6 0x0304 */
+	0x01e3, 0x0000,
+	/* 0x0391 0x0304 */
+	0x1fb9, 0x0000,
+	/* 0x0399 0x0304 */
+	0x1fd9, 0x0000,
+	/* 0x03a5 0x0304 */
+	0x1fe9, 0x0000,
+	/* 0x03b1 0x0304 */
+	0x1fb1, 0x0000,
+	/* 0x03b9 0x0304 */
+	0x1fd1, 0x0000,
+	/* 0x03c5 0x0304 */
+	0x1fe1, 0x0000,
+	/* 0x0418 0x0304 */
+	0x04e2, 0x0000,
+	/* 0x0423 0x0304 */
+	0x04ee, 0x0000,
+	/* 0x0438 0x0304 */
+	0x04e3, 0x0000,
+	/* 0x0443 0x0304 */
+	0x04ef, 0x0000,
+	/* 0x0041 0x0306 */
+	0x0102, 0x0004,  0x0300, 0x0dbc,  0x0301, 0x0dbe,  0x0303, 0x0dc0,
+	0x0309, 0x0dc2,
+	/* 0x0045 0x0306 */
+	0x0114, 0x0000,
+	/* 0x0047 0x0306 */
+	0x011e, 0x0000,
+	/* 0x0049 0x0306 */
+	0x012c, 0x0000,
+	/* 0x004f 0x0306 */
+	0x014e, 0x0000,
+	/* 0x0055 0x0306 */
+	0x016c, 0x0000,
+	/* 0x0061 0x0306 */
+	0x0103, 0x0004,  0x0300, 0x0dc4,  0x0301, 0x0dc6,  0x0303, 0x0dc8,
+	0x0309, 0x0dca,
+	/* 0x0065 0x0306 */
+	0x0115, 0x0000,
+	/* 0x0067 0x0306 */
+	0x011f, 0x0000,
+	/* 0x0069 0x0306 */
+	0x012d, 0x0000,
+	/* 0x006f 0x0306 */
+	0x014f, 0x0000,
+	/* 0x0075 0x0306 */
+	0x016d, 0x0000,
+	/* 0x0391 0x0306 */
+	0x1fb8, 0x0000,
+	/* 0x0399 0x0306 */
+	0x1fd8, 0x0000,
+	/* 0x03a5 0x0306 */
+	0x1fe8, 0x0000,
+	/* 0x03b1 0x0306 */
+	0x1fb0, 0x0000,
+	/* 0x03b9 0x0306 */
+	0x1fd0, 0x0000,
+	/* 0x03c5 0x0306 */
+	0x1fe0, 0x0000,
+	/* 0x0410 0x0306 */
+	0x04d0, 0x0000,
+	/* 0x0415 0x0306 */
+	0x04d6, 0x0000,
+	/* 0x0416 0x0306 */
+	0x04c1, 0x0000,
+	/* 0x0418 0x0306 */
+	0x0419, 0x0000,
+	/* 0x0423 0x0306 */
+	0x040e, 0x0000,
+	/* 0x0430 0x0306 */
+	0x04d1, 0x0000,
+	/* 0x0435 0x0306 */
+	0x04d7, 0x0000,
+	/* 0x0436 0x0306 */
+	0x04c2, 0x0000,
+	/* 0x0438 0x0306 */
+	0x0439, 0x0000,
+	/* 0x0443 0x0306 */
+	0x045e, 0x0000,
+	/* 0x0041 0x0307 */
+	0x0000, 0x0001,  0x0304, 0x0dcc,
+	/* 0x0042 0x0307 */
+	0x1e02, 0x0000,
+	/* 0x0043 0x0307 */
+	0x010a, 0x0000,
+	/* 0x0044 0x0307 */
+	0x1e0a, 0x0000,
+	/* 0x0045 0x0307 */
+	0x0116, 0x0000,
+	/* 0x0046 0x0307 */
+	0x1e1e, 0x0000,
+	/* 0x0047 0x0307 */
+	0x0120, 0x0000,
+	/* 0x0048 0x0307 */
+	0x1e22, 0x0000,
+	/* 0x0049 0x0307 */
+	0x0130, 0x0000,
+	/* 0x004d 0x0307 */
+	0x1e40, 0x0000,
+	/* 0x004e 0x0307 */
+	0x1e44, 0x0000,
+	/* 0x0050 0x0307 */
+	0x1e56, 0x0000,
+	/* 0x0052 0x0307 */
+	0x1e58, 0x0000,
+	/* 0x0053 0x0307 */
+	0x1e60, 0x0000,
+	/* 0x0054 0x0307 */
+	0x1e6a, 0x0000,
+	/* 0x0057 0x0307 */
+	0x1e86, 0x0000,
+	/* 0x0058 0x0307 */
+	0x1e8a, 0x0000,
+	/* 0x0059 0x0307 */
+	0x1e8e, 0x0000,
+	/* 0x005a 0x0307 */
+	0x017b, 0x0000,
+	/* 0x0061 0x0307 */
+	0x0000, 0x0001,  0x0304, 0x0dce,
+	/* 0x0062 0x0307 */
+	0x1e03, 0x0000,
+	/* 0x0063 0x0307 */
+	0x010b, 0x0000,
+	/* 0x0064 0x0307 */
+	0x1e0b, 0x0000,
+	/* 0x0065 0x0307 */
+	0x0117, 0x0000,
+	/* 0x0066 0x0307 */
+	0x1e1f, 0x0000,
+	/* 0x0067 0x0307 */
+	0x0121, 0x0000,
+	/* 0x0068 0x0307 */
+	0x1e23, 0x0000,
+	/* 0x006d 0x0307 */
+	0x1e41, 0x0000,
+	/* 0x006e 0x0307 */
+	0x1e45, 0x0000,
+	/* 0x0070 0x0307 */
+	0x1e57, 0x0000,
+	/* 0x0072 0x0307 */
+	0x1e59, 0x0000,
+	/* 0x0073 0x0307 */
+	0x1e61, 0x0000,
+	/* 0x0074 0x0307 */
+	0x1e6b, 0x0000,
+	/* 0x0077 0x0307 */
+	0x1e87, 0x0000,
+	/* 0x0078 0x0307 */
+	0x1e8b, 0x0000,
+	/* 0x0079 0x0307 */
+	0x1e8f, 0x0000,
+	/* 0x007a 0x0307 */
+	0x017c, 0x0000,
+	/* 0x017f 0x0307 */
+	0x1e9b, 0x0000,
+	/* 0x0306 0x0307 */
+	0x0310, 0x0000,
+	/* 0x0041 0x0308 */
+	0x00c4, 0x0001,  0x0304, 0x0dd0,
+	/* 0x0045 0x0308 */
+	0x00cb, 0x0000,
+	/* 0x0048 0x0308 */
+	0x1e26, 0x0000,
+	/* 0x0049 0x0308 */
+	0x00cf, 0x0001,  0x0301, 0x0dd2,
+	/* 0x004f 0x0308 */
+	0x00d6, 0x0000,
+	/* 0x0055 0x0308 */
+	0x00dc, 0x0004,  0x0300, 0x0dd4,  0x0301, 0x0dd6,  0x0304, 0x0dd8,
+	0x030c, 0x0dda,
+	/* 0x0057 0x0308 */
+	0x1e84, 0x0000,
+	/* 0x0058 0x0308 */
+	0x1e8c, 0x0000,
+	/* 0x0059 0x0308 */
+	0x0178, 0x0000,
+	/* 0x0061 0x0308 */
+	0x00e4, 0x0001,  0x0304, 0x0ddc,
+	/* 0x0065 0x0308 */
+	0x00eb, 0x0000,
+	/* 0x0068 0x0308 */
+	0x1e27, 0x0000,
+	/* 0x0069 0x0308 */
+	0x00ef, 0x0001,  0x0301, 0x0dde,
+	/* 0x006f 0x0308 */
+	0x00f6, 0x0000,
+	/* 0x0074 0x0308 */
+	0x1e97, 0x0000,
+	/* 0x0075 0x0308 */
+	0x00fc, 0x0004,  0x0300, 0x0de0,  0x0301, 0x0de2,  0x0304, 0x0de4,
+	0x030c, 0x0de6,
+	/* 0x0077 0x0308 */
+	0x1e85, 0x0000,
+	/* 0x0078 0x0308 */
+	0x1e8d, 0x0000,
+	/* 0x0079 0x0308 */
+	0x00ff, 0x0000,
+	/* 0x018f 0x0308 */
+	0x04da, 0x0000,
+	/* 0x019f 0x0308 */
+	0x04ea, 0x0000,
+	/* 0x0259 0x0308 */
+	0x04db, 0x0000,
+	/* 0x0275 0x0308 */
+	0x04eb, 0x0000,
+	/* 0x0399 0x0308 */
+	0x03aa, 0x0000,
+	/* 0x03a5 0x0308 */
+	0x03ab, 0x0000,
+	/* 0x03b9 0x0308 */
+	0x03ca, 0x0004,  0x0300, 0x0de8,  0x0301, 0x0dea,  0x030d, 0x0dec,
+	0x0342, 0x0dee,
+	/* 0x03c5 0x0308 */
+	0x03cb, 0x0004,  0x0300, 0x0df0,  0x0301, 0x0df2,  0x030d, 0x0df4,
+	0x0342, 0x0df6,
+	/* 0x03d2 0x0308 */
+	0x03d4, 0x0000,
+	/* 0x0406 0x0308 */
+	0x0407, 0x0000,
+	/* 0x0410 0x0308 */
+	0x04d2, 0x0000,
+	/* 0x0415 0x0308 */
+	0x0401, 0x0000,
+	/* 0x0416 0x0308 */
+	0x04dc, 0x0000,
+	/* 0x0417 0x0308 */
+	0x04de, 0x0000,
+	/* 0x0418 0x0308 */
+	0x04e4, 0x0000,
+	/* 0x041e 0x0308 */
+	0x04e6, 0x0000,
+	/* 0x0423 0x0308 */
+	0x04f0, 0x0000,
+	/* 0x0427 0x0308 */
+	0x04f4, 0x0000,
+	/* 0x042b 0x0308 */
+	0x04f8, 0x0000,
+	/* 0x0430 0x0308 */
+	0x04d3, 0x0000,
+	/* 0x0435 0x0308 */
+	0x0451, 0x0000,
+	/* 0x0436 0x0308 */
+	0x04dd, 0x0000,
+	/* 0x0437 0x0308 */
+	0x04df, 0x0000,
+	/* 0x0438 0x0308 */
+	0x04e5, 0x0000,
+	/* 0x043e 0x0308 */
+	0x04e7, 0x0000,
+	/* 0x0443 0x0308 */
+	0x04f1, 0x0000,
+	/* 0x0447 0x0308 */
+	0x04f5, 0x0000,
+	/* 0x044b 0x0308 */
+	0x04f9, 0x0000,
+	/* 0x0456 0x0308 */
+	0x0457, 0x0000,
+	/* 0x0041 0x0309 */
+	0x1ea2, 0x0000,
+	/* 0x0045 0x0309 */
+	0x1eba, 0x0000,
+	/* 0x0049 0x0309 */
+	0x1ec8, 0x0000,
+	/* 0x004f 0x0309 */
+	0x1ece, 0x0000,
+	/* 0x0055 0x0309 */
+	0x1ee6, 0x0000,
+	/* 0x0059 0x0309 */
+	0x1ef6, 0x0000,
+	/* 0x0061 0x0309 */
+	0x1ea3, 0x0000,
+	/* 0x0065 0x0309 */
+	0x1ebb, 0x0000,
+	/* 0x0069 0x0309 */
+	0x1ec9, 0x0000,
+	/* 0x006f 0x0309 */
+	0x1ecf, 0x0000,
+	/* 0x0075 0x0309 */
+	0x1ee7, 0x0000,
+	/* 0x0079 0x0309 */
+	0x1ef7, 0x0000,
+	/* 0x0041 0x030a */
+	0x00c5, 0x0001,  0x0301, 0x0df8,
+	/* 0x0055 0x030a */
+	0x016e, 0x0000,
+	/* 0x0061 0x030a */
+	0x00e5, 0x0001,  0x0301, 0x0dfa,
+	/* 0x0075 0x030a */
+	0x016f, 0x0000,
+	/* 0x0077 0x030a */
+	0x1e98, 0x0000,
+	/* 0x0079 0x030a */
+	0x1e99, 0x0000,
+	/* 0x004f 0x030b */
+	0x0150, 0x0000,
+	/* 0x0055 0x030b */
+	0x0170, 0x0000,
+	/* 0x006f 0x030b */
+	0x0151, 0x0000,
+	/* 0x0075 0x030b */
+	0x0171, 0x0000,
+	/* 0x0423 0x030b */
+	0x04f2, 0x0000,
+	/* 0x0443 0x030b */
+	0x04f3, 0x0000,
+	/* 0x0041 0x030c */
+	0x01cd, 0x0000,
+	/* 0x0043 0x030c */
+	0x010c, 0x0000,
+	/* 0x0044 0x030c */
+	0x010e, 0x0000,
+	/* 0x0045 0x030c */
+	0x011a, 0x0000,
+	/* 0x0047 0x030c */
+	0x01e6, 0x0000,
+	/* 0x0049 0x030c */
+	0x01cf, 0x0000,
+	/* 0x004b 0x030c */
+	0x01e8, 0x0000,
+	/* 0x004c 0x030c */
+	0x013d, 0x0000,
+	/* 0x004e 0x030c */
+	0x0147, 0x0000,
+	/* 0x004f 0x030c */
+	0x01d1, 0x0000,
+	/* 0x0052 0x030c */
+	0x0158, 0x0000,
+	/* 0x0053 0x030c */
+	0x0160, 0x0001,  0x0307, 0x0dfc,
+	/* 0x0054 0x030c */
+	0x0164, 0x0000,
+	/* 0x0055 0x030c */
+	0x01d3, 0x0000,
+	/* 0x005a 0x030c */
+	0x017d, 0x0000,
+	/* 0x0061 0x030c */
+	0x01ce, 0x0000,
+	/* 0x0063 0x030c */
+	0x010d, 0x0000,
+	/* 0x0064 0x030c */
+	0x010f, 0x0000,
+	/* 0x0065 0x030c */
+	0x011b, 0x0000,
+	/* 0x0067 0x030c */
+	0x01e7, 0x0000,
+	/* 0x0069 0x030c */
+	0x01d0, 0x0000,
+	/* 0x006a 0x030c */
+	0x01f0, 0x0000,
+	/* 0x006b 0x030c */
+	0x01e9, 0x0000,
+	/* 0x006c 0x030c */
+	0x013e, 0x0000,
+	/* 0x006e 0x030c */
+	0x0148, 0x0000,
+	/* 0x006f 0x030c */
+	0x01d2, 0x0000,
+	/* 0x0072 0x030c */
+	0x0159, 0x0000,
+	/* 0x0073 0x030c */
+	0x0161, 0x0001,  0x0307, 0x0dfe,
+	/* 0x0074 0x030c */
+	0x0165, 0x0000,
+	/* 0x0075 0x030c */
+	0x01d4, 0x0000,
+	/* 0x007a 0x030c */
+	0x017e, 0x0000,
+	/* 0x01b7 0x030c */
+	0x01ee, 0x0000,
+	/* 0x0292 0x030c */
+	0x01ef, 0x0000,
+	/* 0x00a8 0x030d */
+	0x0385, 0x0000,
+	/* 0x0308 0x030d */
+	0x0344, 0x0000,
+	/* 0x0391 0x030d */
+	0x0386, 0x0000,
+	/* 0x0395 0x030d */
+	0x0388, 0x0000,
+	/* 0x0397 0x030d */
+	0x0389, 0x0000,
+	/* 0x0399 0x030d */
+	0x038a, 0x0000,
+	/* 0x039f 0x030d */
+	0x038c, 0x0000,
+	/* 0x03a5 0x030d */
+	0x038e, 0x0000,
+	/* 0x03a9 0x030d */
+	0x038f, 0x0000,
+	/* 0x03b1 0x030d */
+	0x03ac, 0x0000,
+	/* 0x03b5 0x030d */
+	0x03ad, 0x0000,
+	/* 0x03b7 0x030d */
+	0x03ae, 0x0000,
+	/* 0x03b9 0x030d */
+	0x03af, 0x0000,
+	/* 0x03bf 0x030d */
+	0x03cc, 0x0000,
+	/* 0x03c5 0x030d */
+	0x03cd, 0x0000,
+	/* 0x03c9 0x030d */
+	0x03ce, 0x0000,
+	/* 0x03d2 0x030d */
+	0x03d3, 0x0000,
+	/* 0x0041 0x030f */
+	0x0200, 0x0000,
+	/* 0x0045 0x030f */
+	0x0204, 0x0000,
+	/* 0x0049 0x030f */
+	0x0208, 0x0000,
+	/* 0x004f 0x030f */
+	0x020c, 0x0000,
+	/* 0x0052 0x030f */
+	0x0210, 0x0000,
+	/* 0x0055 0x030f */
+	0x0214, 0x0000,
+	/* 0x0061 0x030f */
+	0x0201, 0x0000,
+	/* 0x0065 0x030f */
+	0x0205, 0x0000,
+	/* 0x0069 0x030f */
+	0x0209, 0x0000,
+	/* 0x006f 0x030f */
+	0x020d, 0x0000,
+	/* 0x0072 0x030f */
+	0x0211, 0x0000,
+	/* 0x0075 0x030f */
+	0x0215, 0x0000,
+	/* 0x0474 0x030f */
+	0x0476, 0x0000,
+	/* 0x0475 0x030f */
+	0x0477, 0x0000,
+	/* 0x0041 0x0311 */
+	0x0202, 0x0000,
+	/* 0x0045 0x0311 */
+	0x0206, 0x0000,
+	/* 0x0049 0x0311 */
+	0x020a, 0x0000,
+	/* 0x004f 0x0311 */
+	0x020e, 0x0000,
+	/* 0x0052 0x0311 */
+	0x0212, 0x0000,
+	/* 0x0055 0x0311 */
+	0x0216, 0x0000,
+	/* 0x0061 0x0311 */
+	0x0203, 0x0000,
+	/* 0x0065 0x0311 */
+	0x0207, 0x0000,
+	/* 0x0069 0x0311 */
+	0x020b, 0x0000,
+	/* 0x006f 0x0311 */
+	0x020f, 0x0000,
+	/* 0x0072 0x0311 */
+	0x0213, 0x0000,
+	/* 0x0075 0x0311 */
+	0x0217, 0x0000,
+	/* 0x0391 0x0313 */
+	0x1f08, 0x0003,  0x0300, 0x0e00,  0x0301, 0x0e02,  0x0342, 0x0e04,
+	/* 0x0395 0x0313 */
+	0x1f18, 0x0002,  0x0300, 0x0e06,  0x0301, 0x0e08,
+	/* 0x0397 0x0313 */
+	0x1f28, 0x0003,  0x0300, 0x0e0a,  0x0301, 0x0e0c,  0x0342, 0x0e0e,
+	/* 0x0399 0x0313 */
+	0x1f38, 0x0003,  0x0300, 0x0e10,  0x0301, 0x0e12,  0x0342, 0x0e14,
+	/* 0x039f 0x0313 */
+	0x1f48, 0x0002,  0x0300, 0x0e16,  0x0301, 0x0e18,
+	/* 0x03a9 0x0313 */
+	0x1f68, 0x0003,  0x0300, 0x0e1a,  0x0301, 0x0e1c,  0x0342, 0x0e1e,
+	/* 0x03b1 0x0313 */
+	0x1f00, 0x0003,  0x0300, 0x0e20,  0x0301, 0x0e22,  0x0342, 0x0e24,
+	/* 0x03b5 0x0313 */
+	0x1f10, 0x0002,  0x0300, 0x0e26,  0x0301, 0x0e28,
+	/* 0x03b7 0x0313 */
+	0x1f20, 0x0003,  0x0300, 0x0e2a,  0x0301, 0x0e2c,  0x0342, 0x0e2e,
+	/* 0x03b9 0x0313 */
+	0x1f30, 0x0003,  0x0300, 0x0e30,  0x0301, 0x0e32,  0x0342, 0x0e34,
+	/* 0x03bf 0x0313 */
+	0x1f40, 0x0002,  0x0300, 0x0e36,  0x0301, 0x0e38,
+	/* 0x03c1 0x0313 */
+	0x1fe4, 0x0000,
+	/* 0x03c5 0x0313 */
+	0x1f50, 0x0003,  0x0300, 0x0e3a,  0x0301, 0x0e3c,  0x0342, 0x0e3e,
+	/* 0x03c9 0x0313 */
+	0x1f60, 0x0003,  0x0300, 0x0e40,  0x0301, 0x0e42,  0x0342, 0x0e44,
+	/* 0x0391 0x0314 */
+	0x1f09, 0x0003,  0x0300, 0x0e46,  0x0301, 0x0e48,  0x0342, 0x0e4a,
+	/* 0x0395 0x0314 */
+	0x1f19, 0x0002,  0x0300, 0x0e4c,  0x0301, 0x0e4e,
+	/* 0x0397 0x0314 */
+	0x1f29, 0x0003,  0x0300, 0x0e50,  0x0301, 0x0e52,  0x0342, 0x0e54,
+	/* 0x0399 0x0314 */
+	0x1f39, 0x0003,  0x0300, 0x0e56,  0x0301, 0x0e58,  0x0342, 0x0e5a,
+	/* 0x039f 0x0314 */
+	0x1f49, 0x0002,  0x0300, 0x0e5c,  0x0301, 0x0e5e,
+	/* 0x03a1 0x0314 */
+	0x1fec, 0x0000,
+	/* 0x03a5 0x0314 */
+	0x1f59, 0x0003,  0x0300, 0x0e60,  0x0301, 0x0e62,  0x0342, 0x0e64,
+	/* 0x03a9 0x0314 */
+	0x1f69, 0x0003,  0x0300, 0x0e66,  0x0301, 0x0e68,  0x0342, 0x0e6a,
+	/* 0x03b1 0x0314 */
+	0x1f01, 0x0003,  0x0300, 0x0e6c,  0x0301, 0x0e6e,  0x0342, 0x0e70,
+	/* 0x03b5 0x0314 */
+	0x1f11, 0x0002,  0x0300, 0x0e72,  0x0301, 0x0e74,
+	/* 0x03b7 0x0314 */
+	0x1f21, 0x0003,  0x0300, 0x0e76,  0x0301, 0x0e78,  0x0342, 0x0e7a,
+	/* 0x03b9 0x0314 */
+	0x1f31, 0x0003,  0x0300, 0x0e7c,  0x0301, 0x0e7e,  0x0342, 0x0e80,
+	/* 0x03bf 0x0314 */
+	0x1f41, 0x0002,  0x0300, 0x0e82,  0x0301, 0x0e84,
+	/* 0x03c1 0x0314 */
+	0x1fe5, 0x0000,
+	/* 0x03c5 0x0314 */
+	0x1f51, 0x0003,  0x0300, 0x0e86,  0x0301, 0x0e88,  0x0342, 0x0e8a,
+	/* 0x03c9 0x0314 */
+	0x1f61, 0x0003,  0x0300, 0x0e8c,  0x0301, 0x0e8e,  0x0342, 0x0e90,
+	/* 0x004f 0x031b */
+	0x01a0, 0x0005,  0x0300, 0x0e92,  0x0301, 0x0e94,  0x0303, 0x0e96,
+	0x0309, 0x0e98,  0x0323, 0x0e9a,
+	/* 0x0055 0x031b */
+	0x01af, 0x0005,  0x0300, 0x0e9c,  0x0301, 0x0e9e,  0x0303, 0x0ea0,
+	0x0309, 0x0ea2,  0x0323, 0x0ea4,
+	/* 0x006f 0x031b */
+	0x01a1, 0x0005,  0x0300, 0x0ea6,  0x0301, 0x0ea8,  0x0303, 0x0eaa,
+	0x0309, 0x0eac,  0x0323, 0x0eae,
+	/* 0x0075 0x031b */
+	0x01b0, 0x0005,  0x0300, 0x0eb0,  0x0301, 0x0eb2,  0x0303, 0x0eb4,
+	0x0309, 0x0eb6,  0x0323, 0x0eb8,
+	/* 0x0041 0x0323 */
+	0x1ea0, 0x0002,  0x0302, 0x0eba,  0x0306, 0x0ebc,
+	/* 0x0042 0x0323 */
+	0x1e04, 0x0000,
+	/* 0x0044 0x0323 */
+	0x1e0c, 0x0000,
+	/* 0x0045 0x0323 */
+	0x1eb8, 0x0001,  0x0302, 0x0ebe,
+	/* 0x0048 0x0323 */
+	0x1e24, 0x0000,
+	/* 0x0049 0x0323 */
+	0x1eca, 0x0000,
+	/* 0x004b 0x0323 */
+	0x1e32, 0x0000,
+	/* 0x004c 0x0323 */
+	0x1e36, 0x0001,  0x0304, 0x0ec0,
+	/* 0x004d 0x0323 */
+	0x1e42, 0x0000,
+	/* 0x004e 0x0323 */
+	0x1e46, 0x0000,
+	/* 0x004f 0x0323 */
+	0x1ecc, 0x0001,  0x0302, 0x0ec2,
+	/* 0x0052 0x0323 */
+	0x1e5a, 0x0001,  0x0304, 0x0ec4,
+	/* 0x0053 0x0323 */
+	0x1e62, 0x0001,  0x0307, 0x0ec6,
+	/* 0x0054 0x0323 */
+	0x1e6c, 0x0000,
+	/* 0x0055 0x0323 */
+	0x1ee4, 0x0000,
+	/* 0x0056 0x0323 */
+	0x1e7e, 0x0000,
+	/* 0x0057 0x0323 */
+	0x1e88, 0x0000,
+	/* 0x0059 0x0323 */
+	0x1ef4, 0x0000,
+	/* 0x005a 0x0323 */
+	0x1e92, 0x0000,
+	/* 0x0061 0x0323 */
+	0x1ea1, 0x0002,  0x0302, 0x0ec8,  0x0306, 0x0eca,
+	/* 0x0062 0x0323 */
+	0x1e05, 0x0000,
+	/* 0x0064 0x0323 */
+	0x1e0d, 0x0000,
+	/* 0x0065 0x0323 */
+	0x1eb9, 0x0001,  0x0302, 0x0ecc,
+	/* 0x0068 0x0323 */
+	0x1e25, 0x0000,
+	/* 0x0069 0x0323 */
+	0x1ecb, 0x0000,
+	/* 0x006b 0x0323 */
+	0x1e33, 0x0000,
+	/* 0x006c 0x0323 */
+	0x1e37, 0x0001,  0x0304, 0x0ece,
+	/* 0x006d 0x0323 */
+	0x1e43, 0x0000,
+	/* 0x006e 0x0323 */
+	0x1e47, 0x0000,
+	/* 0x006f 0x0323 */
+	0x1ecd, 0x0001,  0x0302, 0x0ed0,
+	/* 0x0072 0x0323 */
+	0x1e5b, 0x0001,  0x0304, 0x0ed2,
+	/* 0x0073 0x0323 */
+	0x1e63, 0x0001,  0x0307, 0x0ed4,
+	/* 0x0074 0x0323 */
+	0x1e6d, 0x0000,
+	/* 0x0075 0x0323 */
+	0x1ee5, 0x0000,
+	/* 0x0076 0x0323 */
+	0x1e7f, 0x0000,
+	/* 0x0077 0x0323 */
+	0x1e89, 0x0000,
+	/* 0x0079 0x0323 */
+	0x1ef5, 0x0000,
+	/* 0x007a 0x0323 */
+	0x1e93, 0x0000,
+	/* 0x0055 0x0324 */
+	0x1e72, 0x0000,
+	/* 0x0075 0x0324 */
+	0x1e73, 0x0000,
+	/* 0x0041 0x0325 */
+	0x1e00, 0x0000,
+	/* 0x0061 0x0325 */
+	0x1e01, 0x0000,
+	/* 0x0043 0x0327 */
+	0x00c7, 0x0001,  0x0301, 0x0ed6,
+	/* 0x0044 0x0327 */
+	0x1e10, 0x0000,
+	/* 0x0045 0x0327 */
+	0x0000, 0x0001,  0x0306, 0x0ed8,
+	/* 0x0047 0x0327 */
+	0x0122, 0x0000,
+	/* 0x0048 0x0327 */
+	0x1e28, 0x0000,
+	/* 0x004b 0x0327 */
+	0x0136, 0x0000,
+	/* 0x004c 0x0327 */
+	0x013b, 0x0000,
+	/* 0x004e 0x0327 */
+	0x0145, 0x0000,
+	/* 0x0052 0x0327 */
+	0x0156, 0x0000,
+	/* 0x0053 0x0327 */
+	0x015e, 0x0000,
+	/* 0x0054 0x0327 */
+	0x0162, 0x0000,
+	/* 0x0063 0x0327 */
+	0x00e7, 0x0001,  0x0301, 0x0eda,
+	/* 0x0064 0x0327 */
+	0x1e11, 0x0000,
+	/* 0x0065 0x0327 */
+	0x0000, 0x0001,  0x0306, 0x0edc,
+	/* 0x0067 0x0327 */
+	0x0123, 0x0000,
+	/* 0x0068 0x0327 */
+	0x1e29, 0x0000,
+	/* 0x006b 0x0327 */
+	0x0137, 0x0000,
+	/* 0x006c 0x0327 */
+	0x013c, 0x0000,
+	/* 0x006e 0x0327 */
+	0x0146, 0x0000,
+	/* 0x0072 0x0327 */
+	0x0157, 0x0000,
+	/* 0x0073 0x0327 */
+	0x015f, 0x0000,
+	/* 0x0074 0x0327 */
+	0x0163, 0x0000,
+	/* 0x0041 0x0328 */
+	0x0104, 0x0000,
+	/* 0x0045 0x0328 */
+	0x0118, 0x0000,
+	/* 0x0049 0x0328 */
+	0x012e, 0x0000,
+	/* 0x004f 0x0328 */
+	0x01ea, 0x0001,  0x0304, 0x0ede,
+	/* 0x0055 0x0328 */
+	0x0172, 0x0000,
+	/* 0x0061 0x0328 */
+	0x0105, 0x0000,
+	/* 0x0065 0x0328 */
+	0x0119, 0x0000,
+	/* 0x0069 0x0328 */
+	0x012f, 0x0000,
+	/* 0x006f 0x0328 */
+	0x01eb, 0x0001,  0x0304, 0x0ee0,
+	/* 0x0075 0x0328 */
+	0x0173, 0x0000,
+	/* 0x0044 0x032d */
+	0x1e12, 0x0000,
+	/* 0x0045 0x032d */
+	0x1e18, 0x0000,
+	/* 0x004c 0x032d */
+	0x1e3c, 0x0000,
+	/* 0x004e 0x032d */
+	0x1e4a, 0x0000,
+	/* 0x0054 0x032d */
+	0x1e70, 0x0000,
+	/* 0x0055 0x032d */
+	0x1e76, 0x0000,
+	/* 0x0064 0x032d */
+	0x1e13, 0x0000,
+	/* 0x0065 0x032d */
+	0x1e19, 0x0000,
+	/* 0x006c 0x032d */
+	0x1e3d, 0x0000,
+	/* 0x006e 0x032d */
+	0x1e4b, 0x0000,
+	/* 0x0074 0x032d */
+	0x1e71, 0x0000,
+	/* 0x0075 0x032d */
+	0x1e77, 0x0000,
+	/* 0x0048 0x032e */
+	0x1e2a, 0x0000,
+	/* 0x0068 0x032e */
+	0x1e2b, 0x0000,
+	/* 0x0045 0x0330 */
+	0x1e1a, 0x0000,
+	/* 0x0049 0x0330 */
+	0x1e2c, 0x0000,
+	/* 0x0055 0x0330 */
+	0x1e74, 0x0000,
+	/* 0x0065 0x0330 */
+	0x1e1b, 0x0000,
+	/* 0x0069 0x0330 */
+	0x1e2d, 0x0000,
+	/* 0x0075 0x0330 */
+	0x1e75, 0x0000,
+	/* 0x0042 0x0331 */
+	0x1e06, 0x0000,
+	/* 0x0044 0x0331 */
+	0x1e0e, 0x0000,
+	/* 0x004b 0x0331 */
+	0x1e34, 0x0000,
+	/* 0x004c 0x0331 */
+	0x1e3a, 0x0000,
+	/* 0x004e 0x0331 */
+	0x1e48, 0x0000,
+	/* 0x0052 0x0331 */
+	0x1e5e, 0x0000,
+	/* 0x0054 0x0331 */
+	0x1e6e, 0x0000,
+	/* 0x005a 0x0331 */
+	0x1e94, 0x0000,
+	/* 0x0062 0x0331 */
+	0x1e07, 0x0000,
+	/* 0x0064 0x0331 */
+	0x1e0f, 0x0000,
+	/* 0x0068 0x0331 */
+	0x1e96, 0x0000,
+	/* 0x006b 0x0331 */
+	0x1e35, 0x0000,
+	/* 0x006c 0x0331 */
+	0x1e3b, 0x0000,
+	/* 0x006e 0x0331 */
+	0x1e49, 0x0000,
+	/* 0x0072 0x0331 */
+	0x1e5f, 0x0000,
+	/* 0x0074 0x0331 */
+	0x1e6f, 0x0000,
+	/* 0x007a 0x0331 */
+	0x1e95, 0x0000,
+	/* 0x00a8 0x0342 */
+	0x1fc1, 0x0000,
+	/* 0x03b1 0x0342 */
+	0x1fb6, 0x0000,
+	/* 0x03b7 0x0342 */
+	0x1fc6, 0x0000,
+	/* 0x03b9 0x0342 */
+	0x1fd6, 0x0000,
+	/* 0x03c5 0x0342 */
+	0x1fe6, 0x0000,
+	/* 0x03c9 0x0342 */
+	0x1ff6, 0x0000,
+	/* 0x1fbf 0x0342 */
+	0x1fcf, 0x0000,
+	/* 0x1ffe 0x0342 */
+	0x1fdf, 0x0000,
+	/* 0x0391 0x0345 */
+	0x1fbc, 0x0002,  0x0313, 0x0ee2,  0x0314, 0x0eea,
+	/* 0x0397 0x0345 */
+	0x1fcc, 0x0002,  0x0313, 0x0ef2,  0x0314, 0x0efa,
+	/* 0x03a9 0x0345 */
+	0x1ffc, 0x0002,  0x0313, 0x0f02,  0x0314, 0x0f0a,
+	/* 0x03b1 0x0345 */
+	0x1fb3, 0x0005,  0x0300, 0x0f12,  0x0301, 0x0f14,  0x0313, 0x0f16,
+	0x0314, 0x0f1e,  0x0342, 0x0f26,
+	/* 0x03b7 0x0345 */
+	0x1fc3, 0x0005,  0x0300, 0x0f28,  0x0301, 0x0f2a,  0x0313, 0x0f2c,
+	0x0314, 0x0f34,  0x0342, 0x0f3c,
+	/* 0x03bf 0x0345 */
+	0x0000, 0x0001,  0x0301, 0x0f3e,
+	/* 0x03c9 0x0345 */
+	0x1ff3, 0x0004,  0x0300, 0x0f40,  0x0313, 0x0f42,  0x0314, 0x0f4a,
+	0x0342, 0x0f52,
+	/* 0x05d0 0x05b7 */
+	0xfb2e, 0x0000,
+	/* 0x05f2 0x05b7 */
+	0xfb1f, 0x0000,
+	/* 0x05d0 0x05b8 */
+	0xfb2f, 0x0000,
+	/* 0x05d5 0x05b9 */
+	0xfb4b, 0x0000,
+	/* 0x05d0 0x05bc */
+	0xfb30, 0x0000,
+	/* 0x05d1 0x05bc */
+	0xfb31, 0x0000,
+	/* 0x05d2 0x05bc */
+	0xfb32, 0x0000,
+	/* 0x05d3 0x05bc */
+	0xfb33, 0x0000,
+	/* 0x05d4 0x05bc */
+	0xfb34, 0x0000,
+	/* 0x05d5 0x05bc */
+	0xfb35, 0x0000,
+	/* 0x05d6 0x05bc */
+	0xfb36, 0x0000,
+	/* 0x05d8 0x05bc */
+	0xfb38, 0x0000,
+	/* 0x05d9 0x05bc */
+	0xfb39, 0x0000,
+	/* 0x05da 0x05bc */
+	0xfb3a, 0x0000,
+	/* 0x05db 0x05bc */
+	0xfb3b, 0x0000,
+	/* 0x05dc 0x05bc */
+	0xfb3c, 0x0000,
+	/* 0x05de 0x05bc */
+	0xfb3e, 0x0000,
+	/* 0x05e0 0x05bc */
+	0xfb40, 0x0000,
+	/* 0x05e1 0x05bc */
+	0xfb41, 0x0000,
+	/* 0x05e3 0x05bc */
+	0xfb43, 0x0000,
+	/* 0x05e4 0x05bc */
+	0xfb44, 0x0000,
+	/* 0x05e6 0x05bc */
+	0xfb46, 0x0000,
+	/* 0x05e7 0x05bc */
+	0xfb47, 0x0000,
+	/* 0x05e8 0x05bc */
+	0xfb48, 0x0000,
+	/* 0x05e9 0x05bc */
+	0xfb49, 0x0002,  0x05c1, 0x0f54,  0x05c2, 0x0f56,
+	/* 0x05ea 0x05bc */
+	0xfb4a, 0x0000,
+	/* 0x05d1 0x05bf */
+	0xfb4c, 0x0000,
+	/* 0x05db 0x05bf */
+	0xfb4d, 0x0000,
+	/* 0x05e4 0x05bf */
+	0xfb4e, 0x0000,
+	/* 0x05e9 0x05c1 */
+	0xfb2a, 0x0000,
+	/* 0x05e9 0x05c2 */
+	0xfb2b, 0x0000,
+	/* 0x0915 0x093c */
+	0x0958, 0x0000,
+	/* 0x0916 0x093c */
+	0x0959, 0x0000,
+	/* 0x0917 0x093c */
+	0x095a, 0x0000,
+	/* 0x091c 0x093c */
+	0x095b, 0x0000,
+	/* 0x0921 0x093c */
+	0x095c, 0x0000,
+	/* 0x0922 0x093c */
+	0x095d, 0x0000,
+	/* 0x0928 0x093c */
+	0x0929, 0x0000,
+	/* 0x092b 0x093c */
+	0x095e, 0x0000,
+	/* 0x092f 0x093c */
+	0x095f, 0x0000,
+	/* 0x0930 0x093c */
+	0x0931, 0x0000,
+	/* 0x0933 0x093c */
+	0x0934, 0x0000,
+	/* 0x09a1 0x09bc */
+	0x09dc, 0x0000,
+	/* 0x09a2 0x09bc */
+	0x09dd, 0x0000,
+	/* 0x09ac 0x09bc */
+	0x09b0, 0x0000,
+	/* 0x09af 0x09bc */
+	0x09df, 0x0000,
+	/* 0x09c7 0x09be */
+	0x09cb, 0x0000,
+	/* 0x09c7 0x09d7 */
+	0x09cc, 0x0000,
+	/* 0x0a16 0x0a3c */
+	0x0a59, 0x0000,
+	/* 0x0a17 0x0a3c */
+	0x0a5a, 0x0000,
+	/* 0x0a1c 0x0a3c */
+	0x0a5b, 0x0000,
+	/* 0x0a21 0x0a3c */
+	0x0a5c, 0x0000,
+	/* 0x0a2b 0x0a3c */
+	0x0a5e, 0x0000,
+	/* 0x0b21 0x0b3c */
+	0x0b5c, 0x0000,
+	/* 0x0b22 0x0b3c */
+	0x0b5d, 0x0000,
+	/* 0x0b2f 0x0b3c */
+	0x0b5f, 0x0000,
+	/* 0x0b47 0x0b3e */
+	0x0b4b, 0x0000,
+	/* 0x0b47 0x0b56 */
+	0x0b48, 0x0000,
+	/* 0x0b47 0x0b57 */
+	0x0b4c, 0x0000,
+	/* 0x0bc6 0x0bbe */
+	0x0bca, 0x0000,
+	/* 0x0bc7 0x0bbe */
+	0x0bcb, 0x0000,
+	/* 0x0b92 0x0bd7 */
+	0x0b94, 0x0000,
+	/* 0x0bc6 0x0bd7 */
+	0x0bcc, 0x0000,
+	/* 0x0c46 0x0c56 */
+	0x0c48, 0x0000,
+	/* 0x0cc6 0x0cc2 */
+	0x0cca, 0x0001,  0x0cd5, 0x0f58,
+	/* 0x0cbf 0x0cd5 */
+	0x0cc0, 0x0000,
+	/* 0x0cc6 0x0cd5 */
+	0x0cc7, 0x0000,
+	/* 0x0cc6 0x0cd6 */
+	0x0cc8, 0x0000,
+	/* 0x0d46 0x0d3e */
+	0x0d4a, 0x0000,
+	/* 0x0d47 0x0d3e */
+	0x0d4b, 0x0000,
+	/* 0x0d46 0x0d57 */
+	0x0d4c, 0x0000,
+	/* 0x0e4d 0x0e32 */
+	0x0e33, 0x0000,
+	/* 0x0ecd 0x0eb2 */
+	0x0eb3, 0x0000,
+	/* 0x0f72 0x0f71 */
+	0x0f73, 0x0000,
+	/* 0x0f74 0x0f71 */
+	0x0f75, 0x0000,
+	/* 0x0f80 0x0f71 */
+	0x0f81, 0x0000,
+	/* 0x0fb2 0x0f80 */
+	0x0f76, 0x0001,  0x0f71, 0x0f5a,
+	/* 0x0fb3 0x0f80 */
+	0x0f78, 0x0001,  0x0f71, 0x0f5c,
+	/* 0x0f40 0x0fb5 */
+	0x0f69, 0x0000,
+	/* 0x0f90 0x0fb5 */
+	0x0fb9, 0x0000,
+	/* 0x0f42 0x0fb7 */
+	0x0f43, 0x0000,
+	/* 0x0f4c 0x0fb7 */
+	0x0f4d, 0x0000,
+	/* 0x0f51 0x0fb7 */
+	0x0f52, 0x0000,
+	/* 0x0f56 0x0fb7 */
+	0x0f57, 0x0000,
+	/* 0x0f5b 0x0fb7 */
+	0x0f5c, 0x0000,
+	/* 0x0f92 0x0fb7 */
+	0x0f93, 0x0000,
+	/* 0x0f9c 0x0fb7 */
+	0x0f9d, 0x0000,
+	/* 0x0fa1 0x0fb7 */
+	0x0fa2, 0x0000,
+	/* 0x0fa6 0x0fb7 */
+	0x0fa7, 0x0000,
+	/* 0x0fab 0x0fb7 */
+	0x0fac, 0x0000,
+	/* 0x3046 0x3099 */
+	0x3094, 0x0000,
+	/* 0x304b 0x3099 */
+	0x304c, 0x0000,
+	/* 0x304d 0x3099 */
+	0x304e, 0x0000,
+	/* 0x304f 0x3099 */
+	0x3050, 0x0000,
+	/* 0x3051 0x3099 */
+	0x3052, 0x0000,
+	/* 0x3053 0x3099 */
+	0x3054, 0x0000,
+	/* 0x3055 0x3099 */
+	0x3056, 0x0000,
+	/* 0x3057 0x3099 */
+	0x3058, 0x0000,
+	/* 0x3059 0x3099 */
+	0x305a, 0x0000,
+	/* 0x305b 0x3099 */
+	0x305c, 0x0000,
+	/* 0x305d 0x3099 */
+	0x305e, 0x0000,
+	/* 0x305f 0x3099 */
+	0x3060, 0x0000,
+	/* 0x3061 0x3099 */
+	0x3062, 0x0000,
+	/* 0x3064 0x3099 */
+	0x3065, 0x0000,
+	/* 0x3066 0x3099 */
+	0x3067, 0x0000,
+	/* 0x3068 0x3099 */
+	0x3069, 0x0000,
+	/* 0x306f 0x3099 */
+	0x3070, 0x0000,
+	/* 0x3072 0x3099 */
+	0x3073, 0x0000,
+	/* 0x3075 0x3099 */
+	0x3076, 0x0000,
+	/* 0x3078 0x3099 */
+	0x3079, 0x0000,
+	/* 0x307b 0x3099 */
+	0x307c, 0x0000,
+	/* 0x309d 0x3099 */
+	0x309e, 0x0000,
+	/* 0x30a6 0x3099 */
+	0x30f4, 0x0000,
+	/* 0x30ab 0x3099 */
+	0x30ac, 0x0000,
+	/* 0x30ad 0x3099 */
+	0x30ae, 0x0000,
+	/* 0x30af 0x3099 */
+	0x30b0, 0x0000,
+	/* 0x30b1 0x3099 */
+	0x30b2, 0x0000,
+	/* 0x30b3 0x3099 */
+	0x30b4, 0x0000,
+	/* 0x30b5 0x3099 */
+	0x30b6, 0x0000,
+	/* 0x30b7 0x3099 */
+	0x30b8, 0x0000,
+	/* 0x30b9 0x3099 */
+	0x30ba, 0x0000,
+	/* 0x30bb 0x3099 */
+	0x30bc, 0x0000,
+	/* 0x30bd 0x3099 */
+	0x30be, 0x0000,
+	/* 0x30bf 0x3099 */
+	0x30c0, 0x0000,
+	/* 0x30c1 0x3099 */
+	0x30c2, 0x0000,
+	/* 0x30c4 0x3099 */
+	0x30c5, 0x0000,
+	/* 0x30c6 0x3099 */
+	0x30c7, 0x0000,
+	/* 0x30c8 0x3099 */
+	0x30c9, 0x0000,
+	/* 0x30cf 0x3099 */
+	0x30d0, 0x0000,
+	/* 0x30d2 0x3099 */
+	0x30d3, 0x0000,
+	/* 0x30d5 0x3099 */
+	0x30d6, 0x0000,
+	/* 0x30d8 0x3099 */
+	0x30d9, 0x0000,
+	/* 0x30db 0x3099 */
+	0x30dc, 0x0000,
+	/* 0x30ef 0x3099 */
+	0x30f7, 0x0000,
+	/* 0x30f0 0x3099 */
+	0x30f8, 0x0000,
+	/* 0x30f1 0x3099 */
+	0x30f9, 0x0000,
+	/* 0x30f2 0x3099 */
+	0x30fa, 0x0000,
+	/* 0x30fd 0x3099 */
+	0x30fe, 0x0000,
+	/* 0x306f 0x309a */
+	0x3071, 0x0000,
+	/* 0x3072 0x309a */
+	0x3074, 0x0000,
+	/* 0x3075 0x309a */
+	0x3077, 0x0000,
+	/* 0x3078 0x309a */
+	0x307a, 0x0000,
+	/* 0x307b 0x309a */
+	0x307d, 0x0000,
+	/* 0x30cf 0x309a */
+	0x30d1, 0x0000,
+	/* 0x30d2 0x309a */
+	0x30d4, 0x0000,
+	/* 0x30d5 0x309a */
+	0x30d7, 0x0000,
+	/* 0x30d8 0x309a */
+	0x30da, 0x0000,
+	/* 0x30db 0x309a */
+	0x30dd, 0x0000,
+	/* 0x0307 0x0053 0x0301 */
+	0x1e64, 0x0000,
+	/* 0x0307 0x0073 0x0301 */
+	0x1e65, 0x0000,
+	/* 0x0300 0x0041 0x0302 */
+	0x1ea6, 0x0000,
+	/* 0x0301 0x0041 0x0302 */
+	0x1ea4, 0x0000,
+	/* 0x0303 0x0041 0x0302 */
+	0x1eaa, 0x0000,
+	/* 0x0309 0x0041 0x0302 */
+	0x1ea8, 0x0000,
+	/* 0x0300 0x0045 0x0302 */
+	0x1ec0, 0x0000,
+	/* 0x0301 0x0045 0x0302 */
+	0x1ebe, 0x0000,
+	/* 0x0303 0x0045 0x0302 */
+	0x1ec4, 0x0000,
+	/* 0x0309 0x0045 0x0302 */
+	0x1ec2, 0x0000,
+	/* 0x0300 0x004f 0x0302 */
+	0x1ed2, 0x0000,
+	/* 0x0301 0x004f 0x0302 */
+	0x1ed0, 0x0000,
+	/* 0x0303 0x004f 0x0302 */
+	0x1ed6, 0x0000,
+	/* 0x0309 0x004f 0x0302 */
+	0x1ed4, 0x0000,
+	/* 0x0300 0x0061 0x0302 */
+	0x1ea7, 0x0000,
+	/* 0x0301 0x0061 0x0302 */
+	0x1ea5, 0x0000,
+	/* 0x0303 0x0061 0x0302 */
+	0x1eab, 0x0000,
+	/* 0x0309 0x0061 0x0302 */
+	0x1ea9, 0x0000,
+	/* 0x0300 0x0065 0x0302 */
+	0x1ec1, 0x0000,
+	/* 0x0301 0x0065 0x0302 */
+	0x1ebf, 0x0000,
+	/* 0x0303 0x0065 0x0302 */
+	0x1ec5, 0x0000,
+	/* 0x0309 0x0065 0x0302 */
+	0x1ec3, 0x0000,
+	/* 0x0300 0x006f 0x0302 */
+	0x1ed3, 0x0000,
+	/* 0x0301 0x006f 0x0302 */
+	0x1ed1, 0x0000,
+	/* 0x0303 0x006f 0x0302 */
+	0x1ed7, 0x0000,
+	/* 0x0309 0x006f 0x0302 */
+	0x1ed5, 0x0000,
+	/* 0x0301 0x004f 0x0303 */
+	0x1e4c, 0x0000,
+	/* 0x0308 0x004f 0x0303 */
+	0x1e4e, 0x0000,
+	/* 0x0301 0x0055 0x0303 */
+	0x1e78, 0x0000,
+	/* 0x0301 0x006f 0x0303 */
+	0x1e4d, 0x0000,
+	/* 0x0308 0x006f 0x0303 */
+	0x1e4f, 0x0000,
+	/* 0x0301 0x0075 0x0303 */
+	0x1e79, 0x0000,
+	/* 0x0300 0x0045 0x0304 */
+	0x1e14, 0x0000,
+	/* 0x0301 0x0045 0x0304 */
+	0x1e16, 0x0000,
+	/* 0x0300 0x004f 0x0304 */
+	0x1e50, 0x0000,
+	/* 0x0301 0x004f 0x0304 */
+	0x1e52, 0x0000,
+	/* 0x0308 0x0055 0x0304 */
+	0x1e7a, 0x0000,
+	/* 0x0300 0x0065 0x0304 */
+	0x1e15, 0x0000,
+	/* 0x0301 0x0065 0x0304 */
+	0x1e17, 0x0000,
+	/* 0x0300 0x006f 0x0304 */
+	0x1e51, 0x0000,
+	/* 0x0301 0x006f 0x0304 */
+	0x1e53, 0x0000,
+	/* 0x0308 0x0075 0x0304 */
+	0x1e7b, 0x0000,
+	/* 0x0300 0x0041 0x0306 */
+	0x1eb0, 0x0000,
+	/* 0x0301 0x0041 0x0306 */
+	0x1eae, 0x0000,
+	/* 0x0303 0x0041 0x0306 */
+	0x1eb4, 0x0000,
+	/* 0x0309 0x0041 0x0306 */
+	0x1eb2, 0x0000,
+	/* 0x0300 0x0061 0x0306 */
+	0x1eb1, 0x0000,
+	/* 0x0301 0x0061 0x0306 */
+	0x1eaf, 0x0000,
+	/* 0x0303 0x0061 0x0306 */
+	0x1eb5, 0x0000,
+	/* 0x0309 0x0061 0x0306 */
+	0x1eb3, 0x0000,
+	/* 0x0304 0x0041 0x0307 */
+	0x01e0, 0x0000,
+	/* 0x0304 0x0061 0x0307 */
+	0x01e1, 0x0000,
+	/* 0x0304 0x0041 0x0308 */
+	0x01de, 0x0000,
+	/* 0x0301 0x0049 0x0308 */
+	0x1e2e, 0x0000,
+	/* 0x0300 0x0055 0x0308 */
+	0x01db, 0x0000,
+	/* 0x0301 0x0055 0x0308 */
+	0x01d7, 0x0000,
+	/* 0x0304 0x0055 0x0308 */
+	0x01d5, 0x0000,
+	/* 0x030c 0x0055 0x0308 */
+	0x01d9, 0x0000,
+	/* 0x0304 0x0061 0x0308 */
+	0x01df, 0x0000,
+	/* 0x0301 0x0069 0x0308 */
+	0x1e2f, 0x0000,
+	/* 0x0300 0x0075 0x0308 */
+	0x01dc, 0x0000,
+	/* 0x0301 0x0075 0x0308 */
+	0x01d8, 0x0000,
+	/* 0x0304 0x0075 0x0308 */
+	0x01d6, 0x0000,
+	/* 0x030c 0x0075 0x0308 */
+	0x01da, 0x0000,
+	/* 0x0300 0x03b9 0x0308 */
+	0x1fd2, 0x0000,
+	/* 0x0301 0x03b9 0x0308 */
+	0x1fd3, 0x0000,
+	/* 0x030d 0x03b9 0x0308 */
+	0x0390, 0x0000,
+	/* 0x0342 0x03b9 0x0308 */
+	0x1fd7, 0x0000,
+	/* 0x0300 0x03c5 0x0308 */
+	0x1fe2, 0x0000,
+	/* 0x0301 0x03c5 0x0308 */
+	0x1fe3, 0x0000,
+	/* 0x030d 0x03c5 0x0308 */
+	0x03b0, 0x0000,
+	/* 0x0342 0x03c5 0x0308 */
+	0x1fe7, 0x0000,
+	/* 0x0301 0x0041 0x030a */
+	0x01fa, 0x0000,
+	/* 0x0301 0x0061 0x030a */
+	0x01fb, 0x0000,
+	/* 0x0307 0x0053 0x030c */
+	0x1e66, 0x0000,
+	/* 0x0307 0x0073 0x030c */
+	0x1e67, 0x0000,
+	/* 0x0300 0x0391 0x0313 */
+	0x1f0a, 0x0000,
+	/* 0x0301 0x0391 0x0313 */
+	0x1f0c, 0x0000,
+	/* 0x0342 0x0391 0x0313 */
+	0x1f0e, 0x0000,
+	/* 0x0300 0x0395 0x0313 */
+	0x1f1a, 0x0000,
+	/* 0x0301 0x0395 0x0313 */
+	0x1f1c, 0x0000,
+	/* 0x0300 0x0397 0x0313 */
+	0x1f2a, 0x0000,
+	/* 0x0301 0x0397 0x0313 */
+	0x1f2c, 0x0000,
+	/* 0x0342 0x0397 0x0313 */
+	0x1f2e, 0x0000,
+	/* 0x0300 0x0399 0x0313 */
+	0x1f3a, 0x0000,
+	/* 0x0301 0x0399 0x0313 */
+	0x1f3c, 0x0000,
+	/* 0x0342 0x0399 0x0313 */
+	0x1f3e, 0x0000,
+	/* 0x0300 0x039f 0x0313 */
+	0x1f4a, 0x0000,
+	/* 0x0301 0x039f 0x0313 */
+	0x1f4c, 0x0000,
+	/* 0x0300 0x03a9 0x0313 */
+	0x1f6a, 0x0000,
+	/* 0x0301 0x03a9 0x0313 */
+	0x1f6c, 0x0000,
+	/* 0x0342 0x03a9 0x0313 */
+	0x1f6e, 0x0000,
+	/* 0x0300 0x03b1 0x0313 */
+	0x1f02, 0x0000,
+	/* 0x0301 0x03b1 0x0313 */
+	0x1f04, 0x0000,
+	/* 0x0342 0x03b1 0x0313 */
+	0x1f06, 0x0000,
+	/* 0x0300 0x03b5 0x0313 */
+	0x1f12, 0x0000,
+	/* 0x0301 0x03b5 0x0313 */
+	0x1f14, 0x0000,
+	/* 0x0300 0x03b7 0x0313 */
+	0x1f22, 0x0000,
+	/* 0x0301 0x03b7 0x0313 */
+	0x1f24, 0x0000,
+	/* 0x0342 0x03b7 0x0313 */
+	0x1f26, 0x0000,
+	/* 0x0300 0x03b9 0x0313 */
+	0x1f32, 0x0000,
+	/* 0x0301 0x03b9 0x0313 */
+	0x1f34, 0x0000,
+	/* 0x0342 0x03b9 0x0313 */
+	0x1f36, 0x0000,
+	/* 0x0300 0x03bf 0x0313 */
+	0x1f42, 0x0000,
+	/* 0x0301 0x03bf 0x0313 */
+	0x1f44, 0x0000,
+	/* 0x0300 0x03c5 0x0313 */
+	0x1f52, 0x0000,
+	/* 0x0301 0x03c5 0x0313 */
+	0x1f54, 0x0000,
+	/* 0x0342 0x03c5 0x0313 */
+	0x1f56, 0x0000,
+	/* 0x0300 0x03c9 0x0313 */
+	0x1f62, 0x0000,
+	/* 0x0301 0x03c9 0x0313 */
+	0x1f64, 0x0000,
+	/* 0x0342 0x03c9 0x0313 */
+	0x1f66, 0x0000,
+	/* 0x0300 0x0391 0x0314 */
+	0x1f0b, 0x0000,
+	/* 0x0301 0x0391 0x0314 */
+	0x1f0d, 0x0000,
+	/* 0x0342 0x0391 0x0314 */
+	0x1f0f, 0x0000,
+	/* 0x0300 0x0395 0x0314 */
+	0x1f1b, 0x0000,
+	/* 0x0301 0x0395 0x0314 */
+	0x1f1d, 0x0000,
+	/* 0x0300 0x0397 0x0314 */
+	0x1f2b, 0x0000,
+	/* 0x0301 0x0397 0x0314 */
+	0x1f2d, 0x0000,
+	/* 0x0342 0x0397 0x0314 */
+	0x1f2f, 0x0000,
+	/* 0x0300 0x0399 0x0314 */
+	0x1f3b, 0x0000,
+	/* 0x0301 0x0399 0x0314 */
+	0x1f3d, 0x0000,
+	/* 0x0342 0x0399 0x0314 */
+	0x1f3f, 0x0000,
+	/* 0x0300 0x039f 0x0314 */
+	0x1f4b, 0x0000,
+	/* 0x0301 0x039f 0x0314 */
+	0x1f4d, 0x0000,
+	/* 0x0300 0x03a5 0x0314 */
+	0x1f5b, 0x0000,
+	/* 0x0301 0x03a5 0x0314 */
+	0x1f5d, 0x0000,
+	/* 0x0342 0x03a5 0x0314 */
+	0x1f5f, 0x0000,
+	/* 0x0300 0x03a9 0x0314 */
+	0x1f6b, 0x0000,
+	/* 0x0301 0x03a9 0x0314 */
+	0x1f6d, 0x0000,
+	/* 0x0342 0x03a9 0x0314 */
+	0x1f6f, 0x0000,
+	/* 0x0300 0x03b1 0x0314 */
+	0x1f03, 0x0000,
+	/* 0x0301 0x03b1 0x0314 */
+	0x1f05, 0x0000,
+	/* 0x0342 0x03b1 0x0314 */
+	0x1f07, 0x0000,
+	/* 0x0300 0x03b5 0x0314 */
+	0x1f13, 0x0000,
+	/* 0x0301 0x03b5 0x0314 */
+	0x1f15, 0x0000,
+	/* 0x0300 0x03b7 0x0314 */
+	0x1f23, 0x0000,
+	/* 0x0301 0x03b7 0x0314 */
+	0x1f25, 0x0000,
+	/* 0x0342 0x03b7 0x0314 */
+	0x1f27, 0x0000,
+	/* 0x0300 0x03b9 0x0314 */
+	0x1f33, 0x0000,
+	/* 0x0301 0x03b9 0x0314 */
+	0x1f35, 0x0000,
+	/* 0x0342 0x03b9 0x0314 */
+	0x1f37, 0x0000,
+	/* 0x0300 0x03bf 0x0314 */
+	0x1f43, 0x0000,
+	/* 0x0301 0x03bf 0x0314 */
+	0x1f45, 0x0000,
+	/* 0x0300 0x03c5 0x0314 */
+	0x1f53, 0x0000,
+	/* 0x0301 0x03c5 0x0314 */
+	0x1f55, 0x0000,
+	/* 0x0342 0x03c5 0x0314 */
+	0x1f57, 0x0000,
+	/* 0x0300 0x03c9 0x0314 */
+	0x1f63, 0x0000,
+	/* 0x0301 0x03c9 0x0314 */
+	0x1f65, 0x0000,
+	/* 0x0342 0x03c9 0x0314 */
+	0x1f67, 0x0000,
+	/* 0x0300 0x004f 0x031b */
+	0x1edc, 0x0000,
+	/* 0x0301 0x004f 0x031b */
+	0x1eda, 0x0000,
+	/* 0x0303 0x004f 0x031b */
+	0x1ee0, 0x0000,
+	/* 0x0309 0x004f 0x031b */
+	0x1ede, 0x0000,
+	/* 0x0323 0x004f 0x031b */
+	0x1ee2, 0x0000,
+	/* 0x0300 0x0055 0x031b */
+	0x1eea, 0x0000,
+	/* 0x0301 0x0055 0x031b */
+	0x1ee8, 0x0000,
+	/* 0x0303 0x0055 0x031b */
+	0x1eee, 0x0000,
+	/* 0x0309 0x0055 0x031b */
+	0x1eec, 0x0000,
+	/* 0x0323 0x0055 0x031b */
+	0x1ef0, 0x0000,
+	/* 0x0300 0x006f 0x031b */
+	0x1edd, 0x0000,
+	/* 0x0301 0x006f 0x031b */
+	0x1edb, 0x0000,
+	/* 0x0303 0x006f 0x031b */
+	0x1ee1, 0x0000,
+	/* 0x0309 0x006f 0x031b */
+	0x1edf, 0x0000,
+	/* 0x0323 0x006f 0x031b */
+	0x1ee3, 0x0000,
+	/* 0x0300 0x0075 0x031b */
+	0x1eeb, 0x0000,
+	/* 0x0301 0x0075 0x031b */
+	0x1ee9, 0x0000,
+	/* 0x0303 0x0075 0x031b */
+	0x1eef, 0x0000,
+	/* 0x0309 0x0075 0x031b */
+	0x1eed, 0x0000,
+	/* 0x0323 0x0075 0x031b */
+	0x1ef1, 0x0000,
+	/* 0x0302 0x0041 0x0323 */
+	0x1eac, 0x0000,
+	/* 0x0306 0x0041 0x0323 */
+	0x1eb6, 0x0000,
+	/* 0x0302 0x0045 0x0323 */
+	0x1ec6, 0x0000,
+	/* 0x0304 0x004c 0x0323 */
+	0x1e38, 0x0000,
+	/* 0x0302 0x004f 0x0323 */
+	0x1ed8, 0x0000,
+	/* 0x0304 0x0052 0x0323 */
+	0x1e5c, 0x0000,
+	/* 0x0307 0x0053 0x0323 */
+	0x1e68, 0x0000,
+	/* 0x0302 0x0061 0x0323 */
+	0x1ead, 0x0000,
+	/* 0x0306 0x0061 0x0323 */
+	0x1eb7, 0x0000,
+	/* 0x0302 0x0065 0x0323 */
+	0x1ec7, 0x0000,
+	/* 0x0304 0x006c 0x0323 */
+	0x1e39, 0x0000,
+	/* 0x0302 0x006f 0x0323 */
+	0x1ed9, 0x0000,
+	/* 0x0304 0x0072 0x0323 */
+	0x1e5d, 0x0000,
+	/* 0x0307 0x0073 0x0323 */
+	0x1e69, 0x0000,
+	/* 0x0301 0x0043 0x0327 */
+	0x1e08, 0x0000,
+	/* 0x0306 0x0045 0x0327 */
+	0x1e1c, 0x0000,
+	/* 0x0301 0x0063 0x0327 */
+	0x1e09, 0x0000,
+	/* 0x0306 0x0065 0x0327 */
+	0x1e1d, 0x0000,
+	/* 0x0304 0x004f 0x0328 */
+	0x01ec, 0x0000,
+	/* 0x0304 0x006f 0x0328 */
+	0x01ed, 0x0000,
+	/* 0x0313 0x0391 0x0345 */
+	0x1f88, 0x0003,  0x0300, 0x0f5e,  0x0301, 0x0f60,  0x0342, 0x0f62,
+	/* 0x0314 0x0391 0x0345 */
+	0x1f89, 0x0003,  0x0300, 0x0f64,  0x0301, 0x0f66,  0x0342, 0x0f68,
+	/* 0x0313 0x0397 0x0345 */
+	0x1f98, 0x0003,  0x0300, 0x0f6a,  0x0301, 0x0f6c,  0x0342, 0x0f6e,
+	/* 0x0314 0x0397 0x0345 */
+	0x1f99, 0x0003,  0x0300, 0x0f70,  0x0301, 0x0f72,  0x0342, 0x0f74,
+	/* 0x0313 0x03a9 0x0345 */
+	0x1fa8, 0x0003,  0x0300, 0x0f76,  0x0301, 0x0f78,  0x0342, 0x0f7a,
+	/* 0x0314 0x03a9 0x0345 */
+	0x1fa9, 0x0003,  0x0300, 0x0f7c,  0x0301, 0x0f7e,  0x0342, 0x0f80,
+	/* 0x0300 0x03b1 0x0345 */
+	0x1fb2, 0x0000,
+	/* 0x0301 0x03b1 0x0345 */
+	0x1fb4, 0x0000,
+	/* 0x0313 0x03b1 0x0345 */
+	0x1f80, 0x0003,  0x0300, 0x0f82,  0x0301, 0x0f84,  0x0342, 0x0f86,
+	/* 0x0314 0x03b1 0x0345 */
+	0x1f81, 0x0003,  0x0300, 0x0f88,  0x0301, 0x0f8a,  0x0342, 0x0f8c,
+	/* 0x0342 0x03b1 0x0345 */
+	0x1fb7, 0x0000,
+	/* 0x0300 0x03b7 0x0345 */
+	0x1fc2, 0x0000,
+	/* 0x0301 0x03b7 0x0345 */
+	0x1fc4, 0x0000,
+	/* 0x0313 0x03b7 0x0345 */
+	0x1f90, 0x0003,  0x0300, 0x0f8e,  0x0301, 0x0f90,  0x0342, 0x0f92,
+	/* 0x0314 0x03b7 0x0345 */
+	0x1f91, 0x0003,  0x0300, 0x0f94,  0x0301, 0x0f96,  0x0342, 0x0f98,
+	/* 0x0342 0x03b7 0x0345 */
+	0x1fc7, 0x0000,
+	/* 0x0301 0x03bf 0x0345 */
+	0x1ff4, 0x0000,
+	/* 0x0300 0x03c9 0x0345 */
+	0x1ff2, 0x0000,
+	/* 0x0313 0x03c9 0x0345 */
+	0x1fa0, 0x0003,  0x0300, 0x0f9a,  0x0301, 0x0f9c,  0x0342, 0x0f9e,
+	/* 0x0314 0x03c9 0x0345 */
+	0x1fa1, 0x0003,  0x0300, 0x0fa0,  0x0301, 0x0fa2,  0x0342, 0x0fa4,
+	/* 0x0342 0x03c9 0x0345 */
+	0x1ff7, 0x0000,
+	/* 0x05c1 0x05e9 0x05bc */
+	0xfb2c, 0x0000,
+	/* 0x05c2 0x05e9 0x05bc */
+	0xfb2d, 0x0000,
+	/* 0x0cd5 0x0cc6 0x0cc2 */
+	0x0ccb, 0x0000,
+	/* 0x0f71 0x0fb2 0x0f80 */
+	0x0f77, 0x0000,
+	/* 0x0f71 0x0fb3 0x0f80 */
+	0x0f79, 0x0000,
+	/* 0x0300 0x0313 0x0391 0x0345 */
+	0x1f8a, 0x0000,
+	/* 0x0301 0x0313 0x0391 0x0345 */
+	0x1f8c, 0x0000,
+	/* 0x0342 0x0313 0x0391 0x0345 */
+	0x1f8e, 0x0000,
+	/* 0x0300 0x0314 0x0391 0x0345 */
+	0x1f8b, 0x0000,
+	/* 0x0301 0x0314 0x0391 0x0345 */
+	0x1f8d, 0x0000,
+	/* 0x0342 0x0314 0x0391 0x0345 */
+	0x1f8f, 0x0000,
+	/* 0x0300 0x0313 0x0397 0x0345 */
+	0x1f9a, 0x0000,
+	/* 0x0301 0x0313 0x0397 0x0345 */
+	0x1f9c, 0x0000,
+	/* 0x0342 0x0313 0x0397 0x0345 */
+	0x1f9e, 0x0000,
+	/* 0x0300 0x0314 0x0397 0x0345 */
+	0x1f9b, 0x0000,
+	/* 0x0301 0x0314 0x0397 0x0345 */
+	0x1f9d, 0x0000,
+	/* 0x0342 0x0314 0x0397 0x0345 */
+	0x1f9f, 0x0000,
+	/* 0x0300 0x0313 0x03a9 0x0345 */
+	0x1faa, 0x0000,
+	/* 0x0301 0x0313 0x03a9 0x0345 */
+	0x1fac, 0x0000,
+	/* 0x0342 0x0313 0x03a9 0x0345 */
+	0x1fae, 0x0000,
+	/* 0x0300 0x0314 0x03a9 0x0345 */
+	0x1fab, 0x0000,
+	/* 0x0301 0x0314 0x03a9 0x0345 */
+	0x1fad, 0x0000,
+	/* 0x0342 0x0314 0x03a9 0x0345 */
+	0x1faf, 0x0000,
+	/* 0x0300 0x0313 0x03b1 0x0345 */
+	0x1f82, 0x0000,
+	/* 0x0301 0x0313 0x03b1 0x0345 */
+	0x1f84, 0x0000,
+	/* 0x0342 0x0313 0x03b1 0x0345 */
+	0x1f86, 0x0000,
+	/* 0x0300 0x0314 0x03b1 0x0345 */
+	0x1f83, 0x0000,
+	/* 0x0301 0x0314 0x03b1 0x0345 */
+	0x1f85, 0x0000,
+	/* 0x0342 0x0314 0x03b1 0x0345 */
+	0x1f87, 0x0000,
+	/* 0x0300 0x0313 0x03b7 0x0345 */
+	0x1f92, 0x0000,
+	/* 0x0301 0x0313 0x03b7 0x0345 */
+	0x1f94, 0x0000,
+	/* 0x0342 0x0313 0x03b7 0x0345 */
+	0x1f96, 0x0000,
+	/* 0x0300 0x0314 0x03b7 0x0345 */
+	0x1f93, 0x0000,
+	/* 0x0301 0x0314 0x03b7 0x0345 */
+	0x1f95, 0x0000,
+	/* 0x0342 0x0314 0x03b7 0x0345 */
+	0x1f97, 0x0000,
+	/* 0x0300 0x0313 0x03c9 0x0345 */
+	0x1fa2, 0x0000,
+	/* 0x0301 0x0313 0x03c9 0x0345 */
+	0x1fa4, 0x0000,
+	/* 0x0342 0x0313 0x03c9 0x0345 */
+	0x1fa6, 0x0000,
+	/* 0x0300 0x0314 0x03c9 0x0345 */
+	0x1fa3, 0x0000,
+	/* 0x0301 0x0314 0x03c9 0x0345 */
+	0x1fa5, 0x0000,
+	/* 0x0342 0x0314 0x03c9 0x0345 */
+	0x1fa7, 0x0000,
+};
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
new file mode 100644
index 0000000..060c690
--- /dev/null
+++ b/fs/hfsplus/unicode.c
@@ -0,0 +1,271 @@
+/*
+ *  linux/fs/hfsplus/unicode.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handler routines for unicode strings
+ */
+
+#include <linux/types.h>
+#include <linux/nls.h>
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+/* Fold the case of a unicode char, given the 16 bit value */
+/* Returns folded char, or 0 if ignorable */
+static inline u16 case_fold(u16 c)
+{
+        u16 tmp;
+
+        tmp = hfsplus_case_fold_table[c >> 8];
+        if (tmp)
+                tmp = hfsplus_case_fold_table[tmp + (c & 0xff)];
+        else
+                tmp = c;
+        return tmp;
+}
+
+/* Compare unicode strings, return values like normal strcmp */
+int hfsplus_unistrcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unistr *s2)
+{
+	u16 len1, len2, c1, c2;
+	const hfsplus_unichr *p1, *p2;
+
+	len1 = be16_to_cpu(s1->length);
+	len2 = be16_to_cpu(s2->length);
+	p1 = s1->unicode;
+	p2 = s2->unicode;
+
+	while (1) {
+		c1 = c2 = 0;
+
+		while (len1 && !c1) {
+			c1 = case_fold(be16_to_cpu(*p1));
+			p1++;
+			len1--;
+		}
+		while (len2 && !c2) {
+			c2 = case_fold(be16_to_cpu(*p2));
+			p2++;
+			len2--;
+		}
+
+		if (c1 != c2)
+			return (c1 < c2) ? -1 : 1;
+		if (!c1 && !c2)
+			return 0;
+	}
+}
+
+#define Hangul_SBase	0xac00
+#define Hangul_LBase	0x1100
+#define Hangul_VBase	0x1161
+#define Hangul_TBase	0x11a7
+#define Hangul_SCount	11172
+#define Hangul_LCount	19
+#define Hangul_VCount	21
+#define Hangul_TCount	28
+#define Hangul_NCount	(Hangul_VCount * Hangul_TCount)
+
+
+static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
+{
+	int i, s, e;
+
+	s = 1;
+	e = p[1];
+	if (!e || cc < p[s * 2] || cc > p[e * 2])
+		return NULL;
+	do {
+		i = (s + e) / 2;
+		if (cc > p[i * 2])
+			s = i + 1;
+		else if (cc < p[i * 2])
+			e = i - 1;
+		else
+			return hfsplus_compose_table + p[i * 2 + 1];
+	} while (s <= e);
+	return NULL;
+}
+
+int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
+{
+	const hfsplus_unichr *ip;
+	struct nls_table *nls = HFSPLUS_SB(sb).nls;
+	u8 *op;
+	u16 cc, c0, c1;
+	u16 *ce1, *ce2;
+	int i, len, ustrlen, res, compose;
+
+	op = astr;
+	ip = ustr->unicode;
+	ustrlen = be16_to_cpu(ustr->length);
+	len = *len_p;
+	ce1 = NULL;
+	compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+
+	while (ustrlen > 0) {
+		c0 = be16_to_cpu(*ip++);
+		ustrlen--;
+		/* search for single decomposed char */
+		if (likely(compose))
+			ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
+		if (ce1 && (cc = ce1[0])) {
+			/* start of a possibly decomposed Hangul char */
+			if (cc != 0xffff)
+				goto done;
+			if (!ustrlen)
+				goto same;
+			c1 = be16_to_cpu(*ip) - Hangul_VBase;
+			if (c1 < Hangul_VCount) {
+				/* compose the Hangul char */
+				cc = (c0 - Hangul_LBase) * Hangul_VCount;
+				cc = (cc + c1) * Hangul_TCount;
+				cc += Hangul_SBase;
+				ip++;
+				ustrlen--;
+				if (!ustrlen)
+					goto done;
+				c1 = be16_to_cpu(*ip) - Hangul_TBase;
+				if (c1 > 0 && c1 < Hangul_TCount) {
+					cc += c1;
+					ip++;
+					ustrlen--;
+				}
+				goto done;
+			}
+		}
+		while (1) {
+			/* main loop for common case of not composed chars */
+			if (!ustrlen)
+				goto same;
+			c1 = be16_to_cpu(*ip);
+			if (likely(compose))
+				ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c1);
+			if (ce1)
+				break;
+			switch (c0) {
+			case 0:
+				c0 = 0x2400;
+				break;
+			case '/':
+				c0 = ':';
+				break;
+			}
+			res = nls->uni2char(c0, op, len);
+			if (res < 0) {
+				if (res == -ENAMETOOLONG)
+					goto out;
+				*op = '?';
+				res = 1;
+			}
+			op += res;
+			len -= res;
+			c0 = c1;
+			ip++;
+			ustrlen--;
+		}
+		ce2 = hfsplus_compose_lookup(ce1, c0);
+		if (ce2) {
+			i = 1;
+			while (i < ustrlen) {
+				ce1 = hfsplus_compose_lookup(ce2, be16_to_cpu(ip[i]));
+				if (!ce1)
+					break;
+				i++;
+				ce2 = ce1;
+			}
+			if ((cc = ce2[0])) {
+				ip += i;
+				ustrlen -= i;
+				goto done;
+			}
+		}
+	same:
+		switch (c0) {
+		case 0:
+			cc = 0x2400;
+			break;
+		case '/':
+			cc = ':';
+			break;
+		default:
+			cc = c0;
+		}
+	done:
+		res = nls->uni2char(cc, op, len);
+		if (res < 0) {
+			if (res == -ENAMETOOLONG)
+				goto out;
+			*op = '?';
+			res = 1;
+		}
+		op += res;
+		len -= res;
+	}
+	res = 0;
+out:
+	*len_p = (char *)op - astr;
+	return res;
+}
+
+int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr, const char *astr, int len)
+{
+	struct nls_table *nls = HFSPLUS_SB(sb).nls;
+	int size, off, decompose;
+	wchar_t c;
+	u16 outlen = 0;
+
+	decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+
+	while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
+		size = nls->char2uni(astr, len, &c);
+		if (size <= 0) {
+			c = '?';
+			size = 1;
+		}
+		astr += size;
+		len -= size;
+		switch (c) {
+		case 0x2400:
+			c = 0;
+			break;
+		case ':':
+			c = '/';
+			break;
+		}
+		if (c >= 0xc0 && decompose) {
+			off = hfsplus_decompose_table[(c >> 12) & 0xf];
+			if (!off)
+				goto done;
+			if (off == 0xffff) {
+				goto done;
+			}
+			off = hfsplus_decompose_table[off + ((c >> 8) & 0xf)];
+			if (!off)
+				goto done;
+			off = hfsplus_decompose_table[off + ((c >> 4) & 0xf)];
+			if (!off)
+				goto done;
+			off = hfsplus_decompose_table[off + (c & 0xf)];
+			size = off & 3;
+			if (!size)
+				goto done;
+			off /= 4;
+			if (outlen + size > HFSPLUS_MAX_STRLEN)
+				break;
+			do {
+				ustr->unicode[outlen++] = cpu_to_be16(hfsplus_decompose_table[off++]);
+			} while (--size > 0);
+			continue;
+		}
+	done:
+		ustr->unicode[outlen++] = cpu_to_be16(c);
+	}
+	ustr->length = cpu_to_be16(outlen);
+	if (len > 0)
+		return -ENAMETOOLONG;
+	return 0;
+}
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
new file mode 100644
index 0000000..0c51d63
--- /dev/null
+++ b/fs/hfsplus/wrapper.c
@@ -0,0 +1,171 @@
+/*
+ *  linux/fs/hfsplus/wrapper.c
+ *
+ * Copyright (C) 2001
+ * Brad Boyer (flar@allandria.com)
+ * (C) 2003 Ardis Technologies <roman@ardistech.com>
+ *
+ * Handling of HFS wrappers around HFS+ volumes
+ */
+
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/cdrom.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+#include <asm/unaligned.h>
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+struct hfsplus_wd {
+	u32 ablk_size;
+	u16 ablk_start;
+	u16 embed_start;
+	u16 embed_count;
+};
+
+static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
+{
+	u32 extent;
+	u16 attrib;
+
+	if (be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG)) != HFSPLUS_VOLHEAD_SIG)
+		return 0;
+
+	attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB));
+	if (!(attrib & HFSP_WRAP_ATTRIB_SLOCK) ||
+	   !(attrib & HFSP_WRAP_ATTRIB_SPARED))
+		return 0;
+
+	wd->ablk_size = be32_to_cpu(*(__be32 *)(bufptr + HFSP_WRAPOFF_ABLKSIZE));
+	if (wd->ablk_size < HFSPLUS_SECTOR_SIZE)
+		return 0;
+	if (wd->ablk_size % HFSPLUS_SECTOR_SIZE)
+		return 0;
+	wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
+
+	extent = be32_to_cpu(get_unaligned((__be32 *)(bufptr + HFSP_WRAPOFF_EMBEDEXT)));
+	wd->embed_start = (extent >> 16) & 0xFFFF;
+	wd->embed_count = extent & 0xFFFF;
+
+	return 1;
+}
+
+static int hfsplus_get_last_session(struct super_block *sb,
+				    sector_t *start, sector_t *size)
+{
+	struct cdrom_multisession ms_info;
+	struct cdrom_tocentry te;
+	int res;
+
+	/* default values */
+	*start = 0;
+	*size = sb->s_bdev->bd_inode->i_size >> 9;
+
+	if (HFSPLUS_SB(sb).session >= 0) {
+		te.cdte_track = HFSPLUS_SB(sb).session;
+		te.cdte_format = CDROM_LBA;
+		res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
+		if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
+			*start = (sector_t)te.cdte_addr.lba << 2;
+			return 0;
+		}
+		printk(KERN_ERR "HFS: Invalid session number or type of track\n");
+		return -EINVAL;
+	}
+	ms_info.addr_format = CDROM_LBA;
+	res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
+	if (!res && ms_info.xa_flag)
+		*start = (sector_t)ms_info.addr.lba << 2;
+	return 0;
+}
+
+/* Find the volume header and fill in some minimum bits in superblock */
+/* Takes in super block, returns true if good data read */
+int hfsplus_read_wrapper(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct hfsplus_vh *vhdr;
+	struct hfsplus_wd wd;
+	sector_t part_start, part_size;
+	u32 blocksize;
+
+	blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
+	if (!blocksize)
+		return -EINVAL;
+
+	if (hfsplus_get_last_session(sb, &part_start, &part_size))
+		return -EINVAL;
+	while (1) {
+		bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
+		if (!bh)
+			return -EIO;
+
+		if (vhdr->signature == cpu_to_be16(HFSP_WRAP_MAGIC)) {
+			if (!hfsplus_read_mdb(vhdr, &wd))
+				goto error;
+			wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
+			part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
+			part_size = wd.embed_count * wd.ablk_size;
+			brelse(bh);
+			bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
+			if (!bh)
+				return -EIO;
+		}
+		if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
+			break;
+		brelse(bh);
+
+		/* check for a partition block
+		 * (should do this only for cdrom/loop though)
+		 */
+		if (hfs_part_find(sb, &part_start, &part_size))
+			return -EINVAL;
+	}
+
+	blocksize = be32_to_cpu(vhdr->blocksize);
+	brelse(bh);
+
+	/* block size must be at least as large as a sector
+	 * and a multiple of 2
+	 */
+	if (blocksize < HFSPLUS_SECTOR_SIZE ||
+	    ((blocksize - 1) & blocksize))
+		return -EINVAL;
+	HFSPLUS_SB(sb).alloc_blksz = blocksize;
+	HFSPLUS_SB(sb).alloc_blksz_shift = 0;
+	while ((blocksize >>= 1) != 0)
+		HFSPLUS_SB(sb).alloc_blksz_shift++;
+	blocksize = min(HFSPLUS_SB(sb).alloc_blksz, (u32)PAGE_SIZE);
+
+	/* align block size to block offset */
+	while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
+		blocksize >>= 1;
+
+	if (sb_set_blocksize(sb, blocksize) != blocksize) {
+		printk("HFS+: unable to blocksize to %u!\n", blocksize);
+		return -EINVAL;
+	}
+
+	HFSPLUS_SB(sb).blockoffset = part_start >>
+			(sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
+	HFSPLUS_SB(sb).sect_count = part_size;
+	HFSPLUS_SB(sb).fs_shift = HFSPLUS_SB(sb).alloc_blksz_shift -
+			sb->s_blocksize_bits;
+
+	bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
+	if (!bh)
+		return -EIO;
+
+	/* should still be the same... */
+	if (be16_to_cpu(vhdr->signature) != HFSPLUS_VOLHEAD_SIG)
+		goto error;
+	HFSPLUS_SB(sb).s_vhbh = bh;
+	HFSPLUS_SB(sb).s_vhdr = vhdr;
+
+	return 0;
+ error:
+	brelse(bh);
+	return -EINVAL;
+}
diff --git a/fs/hostfs/Makefile b/fs/hostfs/Makefile
new file mode 100644
index 0000000..d5beaff
--- /dev/null
+++ b/fs/hostfs/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+# Licensed under the GPL
+#
+
+hostfs-objs := hostfs_kern.o hostfs_user.o
+
+obj-y :=
+obj-$(CONFIG_HOSTFS) += hostfs.o
+
+include arch/um/scripts/Makefile.rules
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
new file mode 100644
index 0000000..c1516d0
--- /dev/null
+++ b/fs/hostfs/hostfs.h
@@ -0,0 +1,100 @@
+#ifndef __UM_FS_HOSTFS
+#define __UM_FS_HOSTFS
+
+#include "os.h"
+
+/* These are exactly the same definitions as in fs.h, but the names are
+ * changed so that this file can be included in both kernel and user files.
+ */
+
+#define HOSTFS_ATTR_MODE	1
+#define HOSTFS_ATTR_UID 	2
+#define HOSTFS_ATTR_GID 	4
+#define HOSTFS_ATTR_SIZE	8
+#define HOSTFS_ATTR_ATIME	16
+#define HOSTFS_ATTR_MTIME	32
+#define HOSTFS_ATTR_CTIME	64
+#define HOSTFS_ATTR_ATIME_SET	128
+#define HOSTFS_ATTR_MTIME_SET	256
+
+/* These two are unused by hostfs. */
+#define HOSTFS_ATTR_FORCE	512	/* Not a change, but a change it */
+#define HOSTFS_ATTR_ATTR_FLAG	1024
+
+/* If you are very careful, you'll notice that these two are missing:
+ *
+ * #define ATTR_KILL_SUID	2048
+ * #define ATTR_KILL_SGID	4096
+ *
+ * and this is because they were added in 2.5 development in this patch:
+ *
+ * http://linux.bkbits.net:8080/linux-2.5/
+ * cset@3caf4a12k4XgDzK7wyK-TGpSZ9u2Ww?nav=index.html
+ * |src/.|src/include|src/include/linux|related/include/linux/fs.h
+ *
+ * Actually, they are not needed by most ->setattr() methods - they are set by
+ * callers of notify_change() to notify that the setuid/setgid bits must be
+ * dropped.
+ * notify_change() will delete those flags, make sure attr->ia_valid & ATTR_MODE
+ * is on, and remove the appropriate bits from attr->ia_mode (attr is a
+ * "struct iattr *"). -BlaisorBlade
+ */
+
+struct hostfs_iattr {
+	unsigned int	ia_valid;
+	mode_t		ia_mode;
+	uid_t		ia_uid;
+	gid_t		ia_gid;
+	loff_t		ia_size;
+	struct timespec	ia_atime;
+	struct timespec	ia_mtime;
+	struct timespec	ia_ctime;
+	unsigned int	ia_attr_flags;
+};
+
+extern int stat_file(const char *path, unsigned long long *inode_out,
+		     int *mode_out, int *nlink_out, int *uid_out, int *gid_out,
+		     unsigned long long *size_out, struct timespec *atime_out,
+		     struct timespec *mtime_out, struct timespec *ctime_out,
+		     int *blksize_out, unsigned long long *blocks_out);
+extern int access_file(char *path, int r, int w, int x);
+extern int open_file(char *path, int r, int w, int append);
+extern int file_type(const char *path, int *maj, int *min);
+extern void *open_dir(char *path, int *err_out);
+extern char *read_dir(void *stream, unsigned long long *pos,
+		      unsigned long long *ino_out, int *len_out);
+extern void close_file(void *stream);
+extern void close_dir(void *stream);
+extern int read_file(int fd, unsigned long long *offset, char *buf, int len);
+extern int write_file(int fd, unsigned long long *offset, const char *buf,
+		      int len);
+extern int lseek_file(int fd, long long offset, int whence);
+extern int file_create(char *name, int ur, int uw, int ux, int gr,
+		       int gw, int gx, int or, int ow, int ox);
+extern int set_attr(const char *file, struct hostfs_iattr *attrs);
+extern int make_symlink(const char *from, const char *to);
+extern int unlink_file(const char *file);
+extern int do_mkdir(const char *file, int mode);
+extern int do_rmdir(const char *file);
+extern int do_mknod(const char *file, int mode, int dev);
+extern int link_file(const char *from, const char *to);
+extern int do_readlink(char *file, char *buf, int size);
+extern int rename_file(char *from, char *to);
+extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
+		     long long *bfree_out, long long *bavail_out,
+		     long long *files_out, long long *ffree_out,
+		     void *fsid_out, int fsid_size, long *namelen_out,
+		     long *spare_out);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
new file mode 100644
index 0000000..a88ad29
--- /dev/null
+++ b/fs/hostfs/hostfs_kern.c
@@ -0,0 +1,1045 @@
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ *
+ * Ported the filesystem routines to 2.5.
+ * 2003-02-10 Petr Baudis <pasky@ucw.cz>
+ */
+
+#include <linux/stddef.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/root_dev.h>
+#include <linux/statfs.h>
+#include <linux/kdev_t.h>
+#include <asm/uaccess.h>
+#include "hostfs.h"
+#include "kern_util.h"
+#include "kern.h"
+#include "user_util.h"
+#include "2_5compat.h"
+#include "init.h"
+
+struct hostfs_inode_info {
+	char *host_filename;
+	int fd;
+	int mode;
+	struct inode vfs_inode;
+};
+
+static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
+{
+	return(list_entry(inode, struct hostfs_inode_info, vfs_inode));
+}
+
+#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_dentry->d_inode)
+
+int hostfs_d_delete(struct dentry *dentry)
+{
+	return(1);
+}
+
+struct dentry_operations hostfs_dentry_ops = {
+	.d_delete		= hostfs_d_delete,
+};
+
+/* Changed in hostfs_args before the kernel starts running */
+static char *root_ino = "/";
+static int append = 0;
+
+#define HOSTFS_SUPER_MAGIC 0x00c0ffee
+
+static struct inode_operations hostfs_iops;
+static struct inode_operations hostfs_dir_iops;
+static struct address_space_operations hostfs_link_aops;
+
+#ifndef MODULE
+static int __init hostfs_args(char *options, int *add)
+{
+	char *ptr;
+
+	ptr = strchr(options, ',');
+	if(ptr != NULL)
+		*ptr++ = '\0';
+	if(*options != '\0')
+		root_ino = options;
+
+	options = ptr;
+	while(options){
+		ptr = strchr(options, ',');
+		if(ptr != NULL)
+			*ptr++ = '\0';
+		if(*options != '\0'){
+			if(!strcmp(options, "append"))
+				append = 1;
+			else printf("hostfs_args - unsupported option - %s\n",
+				    options);
+		}
+		options = ptr;
+	}
+	return(0);
+}
+
+__uml_setup("hostfs=", hostfs_args,
+"hostfs=<root dir>,<flags>,...\n"
+"    This is used to set hostfs parameters.  The root directory argument\n"
+"    is used to confine all hostfs mounts to within the specified directory\n"
+"    tree on the host.  If this isn't specified, then a user inside UML can\n"
+"    mount anything on the host that's accessible to the user that's running\n"
+"    it.\n"
+"    The only flag currently supported is 'append', which specifies that all\n"
+"    files opened by hostfs will be opened in append mode.\n\n"
+);
+#endif
+
+static char *dentry_name(struct dentry *dentry, int extra)
+{
+	struct dentry *parent;
+	char *root, *name;
+	int len;
+
+	len = 0;
+	parent = dentry;
+	while(parent->d_parent != parent){
+		len += parent->d_name.len + 1;
+		parent = parent->d_parent;
+	}
+
+	root = HOSTFS_I(parent->d_inode)->host_filename;
+	len += strlen(root);
+	name = kmalloc(len + extra + 1, GFP_KERNEL);
+	if(name == NULL) return(NULL);
+
+	name[len] = '\0';
+	parent = dentry;
+	while(parent->d_parent != parent){
+		len -= parent->d_name.len + 1;
+		name[len] = '/';
+		strncpy(&name[len + 1], parent->d_name.name,
+			parent->d_name.len);
+		parent = parent->d_parent;
+	}
+	strncpy(name, root, strlen(root));
+	return(name);
+}
+
+static char *inode_name(struct inode *ino, int extra)
+{
+	struct dentry *dentry;
+
+	dentry = list_entry(ino->i_dentry.next, struct dentry, d_alias);
+	return(dentry_name(dentry, extra));
+}
+
+static int read_name(struct inode *ino, char *name)
+{
+	/* The non-int inode fields are copied into ints by stat_file and
+	 * then copied into the inode because passing the actual pointers
+	 * in and having them treated as int * breaks on big-endian machines
+	 */
+	int err;
+	int i_mode, i_nlink, i_blksize;
+	unsigned long long i_size;
+	unsigned long long i_ino;
+	unsigned long long i_blocks;
+
+	err = stat_file(name, &i_ino, &i_mode, &i_nlink, &ino->i_uid,
+			&ino->i_gid, &i_size, &ino->i_atime, &ino->i_mtime,
+			&ino->i_ctime, &i_blksize, &i_blocks);
+	if(err)
+		return(err);
+
+	ino->i_ino = i_ino;
+	ino->i_mode = i_mode;
+	ino->i_nlink = i_nlink;
+	ino->i_size = i_size;
+	ino->i_blksize = i_blksize;
+	ino->i_blocks = i_blocks;
+	if((ino->i_sb->s_dev == ROOT_DEV) && (ino->i_uid == getuid()))
+		ino->i_uid = 0;
+	return(0);
+}
+
+static char *follow_link(char *link)
+{
+	int len, n;
+	char *name, *resolved, *end;
+
+	len = 64;
+	while(1){
+		n = -ENOMEM;
+		name = kmalloc(len, GFP_KERNEL);
+		if(name == NULL)
+			goto out;
+
+		n = do_readlink(link, name, len);
+		if(n < len)
+			break;
+		len *= 2;
+		kfree(name);
+	}
+	if(n < 0)
+		goto out_free;
+
+	if(*name == '/')
+		return(name);
+
+	end = strrchr(link, '/');
+	if(end == NULL)
+		return(name);
+
+	*(end + 1) = '\0';
+	len = strlen(link) + strlen(name) + 1;
+
+	resolved = kmalloc(len, GFP_KERNEL);
+	if(resolved == NULL){
+		n = -ENOMEM;
+		goto out_free;
+	}
+
+	sprintf(resolved, "%s%s", link, name);
+	kfree(name);
+	kfree(link);
+	return(resolved);
+
+ out_free:
+	kfree(name);
+ out:
+	return(ERR_PTR(n));
+}
+
+static int read_inode(struct inode *ino)
+{
+	char *name;
+	int err = 0;
+
+	/* Unfortunately, we are called from iget() when we don't have a dentry
+	 * allocated yet.
+	 */
+	if(list_empty(&ino->i_dentry))
+		goto out;
+
+	err = -ENOMEM;
+	name = inode_name(ino, 0);
+	if(name == NULL)
+		goto out;
+
+	if(file_type(name, NULL, NULL) == OS_TYPE_SYMLINK){
+		name = follow_link(name);
+		if(IS_ERR(name)){
+			err = PTR_ERR(name);
+			goto out;
+		}
+	}
+
+	err = read_name(ino, name);
+	kfree(name);
+ out:
+	return(err);
+}
+
+int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
+{
+	/* do_statfs uses struct statfs64 internally, but the linux kernel
+	 * struct statfs still has 32-bit versions for most of these fields,
+	 * so we convert them here
+	 */
+	int err;
+	long long f_blocks;
+	long long f_bfree;
+	long long f_bavail;
+	long long f_files;
+	long long f_ffree;
+
+	err = do_statfs(HOSTFS_I(sb->s_root->d_inode)->host_filename,
+			&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
+			&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
+			&sf->f_namelen, sf->f_spare);
+	if(err) return(err);
+	sf->f_blocks = f_blocks;
+	sf->f_bfree = f_bfree;
+	sf->f_bavail = f_bavail;
+	sf->f_files = f_files;
+	sf->f_ffree = f_ffree;
+	sf->f_type = HOSTFS_SUPER_MAGIC;
+	return(0);
+}
+
+static struct inode *hostfs_alloc_inode(struct super_block *sb)
+{
+	struct hostfs_inode_info *hi;
+
+	hi = kmalloc(sizeof(*hi), GFP_KERNEL);
+	if(hi == NULL)
+		return(NULL);
+
+	*hi = ((struct hostfs_inode_info) { .host_filename	= NULL,
+					    .fd			= -1,
+					    .mode		= 0 });
+	inode_init_once(&hi->vfs_inode);
+	return(&hi->vfs_inode);
+}
+
+static void hostfs_delete_inode(struct inode *inode)
+{
+	if(HOSTFS_I(inode)->fd != -1) {
+		close_file(&HOSTFS_I(inode)->fd);
+		HOSTFS_I(inode)->fd = -1;
+	}
+	clear_inode(inode);
+}
+
+static void hostfs_destroy_inode(struct inode *inode)
+{
+	if(HOSTFS_I(inode)->host_filename)
+		kfree(HOSTFS_I(inode)->host_filename);
+
+	/*XXX: This should not happen, probably. The check is here for
+	 * additional safety.*/
+	if(HOSTFS_I(inode)->fd != -1) {
+		close_file(&HOSTFS_I(inode)->fd);
+		printk(KERN_DEBUG "Closing host fd in .destroy_inode\n");
+	}
+
+	kfree(HOSTFS_I(inode));
+}
+
+static void hostfs_read_inode(struct inode *inode)
+{
+	read_inode(inode);
+}
+
+static struct super_operations hostfs_sbops = {
+	.alloc_inode	= hostfs_alloc_inode,
+	.drop_inode	= generic_delete_inode,
+	.delete_inode   = hostfs_delete_inode,
+	.destroy_inode	= hostfs_destroy_inode,
+	.read_inode	= hostfs_read_inode,
+	.statfs		= hostfs_statfs,
+};
+
+int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
+{
+	void *dir;
+	char *name;
+	unsigned long long next, ino;
+	int error, len;
+
+	name = dentry_name(file->f_dentry, 0);
+	if(name == NULL) return(-ENOMEM);
+	dir = open_dir(name, &error);
+	kfree(name);
+	if(dir == NULL) return(-error);
+	next = file->f_pos;
+	while((name = read_dir(dir, &next, &ino, &len)) != NULL){
+		error = (*filldir)(ent, name, len, file->f_pos,
+				   ino, DT_UNKNOWN);
+		if(error) break;
+		file->f_pos = next;
+	}
+	close_dir(dir);
+	return(0);
+}
+
+int hostfs_file_open(struct inode *ino, struct file *file)
+{
+	char *name;
+	int mode = 0, r = 0, w = 0, fd;
+
+	mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
+	if((mode & HOSTFS_I(ino)->mode) == mode)
+		return(0);
+
+	/* The file may already have been opened, but with the wrong access,
+	 * so this resets things and reopens the file with the new access.
+	 */
+	if(HOSTFS_I(ino)->fd != -1){
+		close_file(&HOSTFS_I(ino)->fd);
+		HOSTFS_I(ino)->fd = -1;
+	}
+
+	HOSTFS_I(ino)->mode |= mode;
+	if(HOSTFS_I(ino)->mode & FMODE_READ)
+		r = 1;
+	if(HOSTFS_I(ino)->mode & FMODE_WRITE)
+		w = 1;
+	if(w)
+		r = 1;
+
+	name = dentry_name(file->f_dentry, 0);
+	if(name == NULL)
+		return(-ENOMEM);
+
+	fd = open_file(name, r, w, append);
+	kfree(name);
+	if(fd < 0) return(fd);
+	FILE_HOSTFS_I(file)->fd = fd;
+
+	return(0);
+}
+
+int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	return(0);
+}
+
+static struct file_operations hostfs_file_fops = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.sendfile	= generic_file_sendfile,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= generic_file_aio_write,
+	.readv		= generic_file_readv,
+	.writev		= generic_file_writev,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.open		= hostfs_file_open,
+	.release	= NULL,
+	.fsync		= hostfs_fsync,
+};
+
+static struct file_operations hostfs_dir_fops = {
+	.llseek		= generic_file_llseek,
+	.readdir	= hostfs_readdir,
+	.read		= generic_read_dir,
+};
+
+int hostfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = mapping->host;
+	char *buffer;
+	unsigned long long base;
+	int count = PAGE_CACHE_SIZE;
+	int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	int err;
+
+	if (page->index >= end_index)
+		count = inode->i_size & (PAGE_CACHE_SIZE-1);
+
+	buffer = kmap(page);
+	base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
+
+	err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
+	if(err != count){
+		ClearPageUptodate(page);
+		goto out;
+	}
+
+	if (base > inode->i_size)
+		inode->i_size = base;
+
+	if (PageError(page))
+		ClearPageError(page);
+	err = 0;
+
+ out:
+	kunmap(page);
+
+	unlock_page(page);
+	return err;
+}
+
+int hostfs_readpage(struct file *file, struct page *page)
+{
+	char *buffer;
+	long long start;
+	int err = 0;
+
+	start = (long long) page->index << PAGE_CACHE_SHIFT;
+	buffer = kmap(page);
+	err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
+			PAGE_CACHE_SIZE);
+	if(err < 0) goto out;
+
+	memset(&buffer[err], 0, PAGE_CACHE_SIZE - err);
+
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	if (PageError(page)) ClearPageError(page);
+	err = 0;
+ out:
+	kunmap(page);
+	unlock_page(page);
+	return(err);
+}
+
+int hostfs_prepare_write(struct file *file, struct page *page,
+			 unsigned int from, unsigned int to)
+{
+	char *buffer;
+	long long start, tmp;
+	int err;
+
+	start = (long long) page->index << PAGE_CACHE_SHIFT;
+	buffer = kmap(page);
+	if(from != 0){
+		tmp = start;
+		err = read_file(FILE_HOSTFS_I(file)->fd, &tmp, buffer,
+				from);
+		if(err < 0) goto out;
+	}
+	if(to != PAGE_CACHE_SIZE){
+		start += to;
+		err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer + to,
+				PAGE_CACHE_SIZE - to);
+		if(err < 0) goto out;
+	}
+	err = 0;
+ out:
+	kunmap(page);
+	return(err);
+}
+
+int hostfs_commit_write(struct file *file, struct page *page, unsigned from,
+		 unsigned to)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = mapping->host;
+	char *buffer;
+	long long start;
+	int err = 0;
+
+	start = (long long) (page->index << PAGE_CACHE_SHIFT) + from;
+	buffer = kmap(page);
+	err = write_file(FILE_HOSTFS_I(file)->fd, &start, buffer + from,
+			 to - from);
+	if(err > 0) err = 0;
+	if(!err && (start > inode->i_size))
+		inode->i_size = start;
+
+	kunmap(page);
+	return(err);
+}
+
+static struct address_space_operations hostfs_aops = {
+	.writepage 	= hostfs_writepage,
+	.readpage	= hostfs_readpage,
+/* 	.set_page_dirty = __set_page_dirty_nobuffers, */
+	.prepare_write	= hostfs_prepare_write,
+	.commit_write	= hostfs_commit_write
+};
+
+static int init_inode(struct inode *inode, struct dentry *dentry)
+{
+	char *name;
+	int type, err = -ENOMEM;
+	int maj, min;
+	dev_t rdev = 0;
+
+	if(dentry){
+		name = dentry_name(dentry, 0);
+		if(name == NULL)
+			goto out;
+		type = file_type(name, &maj, &min);
+		/*Reencode maj and min with the kernel encoding.*/
+		rdev = MKDEV(maj, min);
+		kfree(name);
+	}
+	else type = OS_TYPE_DIR;
+
+	err = 0;
+	if(type == OS_TYPE_SYMLINK)
+		inode->i_op = &page_symlink_inode_operations;
+	else if(type == OS_TYPE_DIR)
+		inode->i_op = &hostfs_dir_iops;
+	else inode->i_op = &hostfs_iops;
+
+	if(type == OS_TYPE_DIR) inode->i_fop = &hostfs_dir_fops;
+	else inode->i_fop = &hostfs_file_fops;
+
+	if(type == OS_TYPE_SYMLINK)
+		inode->i_mapping->a_ops = &hostfs_link_aops;
+	else inode->i_mapping->a_ops = &hostfs_aops;
+
+	switch (type) {
+	case OS_TYPE_CHARDEV:
+		init_special_inode(inode, S_IFCHR, rdev);
+		break;
+	case OS_TYPE_BLOCKDEV:
+		init_special_inode(inode, S_IFBLK, rdev);
+		break;
+	case OS_TYPE_FIFO:
+		init_special_inode(inode, S_IFIFO, 0);
+		break;
+	case OS_TYPE_SOCK:
+		init_special_inode(inode, S_IFSOCK, 0);
+		break;
+	}
+ out:
+	return(err);
+}
+
+int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
+                 struct nameidata *nd)
+{
+	struct inode *inode;
+	char *name;
+	int error, fd;
+
+	error = -ENOMEM;
+	inode = iget(dir->i_sb, 0);
+	if(inode == NULL) goto out;
+
+	error = init_inode(inode, dentry);
+	if(error)
+		goto out_put;
+
+	error = -ENOMEM;
+	name = dentry_name(dentry, 0);
+	if(name == NULL)
+		goto out_put;
+
+	fd = file_create(name,
+			 mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR,
+			 mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP,
+			 mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH);
+	if(fd < 0)
+		error = fd;
+	else error = read_name(inode, name);
+
+	kfree(name);
+	if(error)
+		goto out_put;
+
+	HOSTFS_I(inode)->fd = fd;
+	HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE;
+	d_instantiate(dentry, inode);
+	return(0);
+
+ out_put:
+	iput(inode);
+ out:
+	return(error);
+}
+
+struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
+                            struct nameidata *nd)
+{
+	struct inode *inode;
+	char *name;
+	int err;
+
+	err = -ENOMEM;
+	inode = iget(ino->i_sb, 0);
+	if(inode == NULL)
+		goto out;
+
+	err = init_inode(inode, dentry);
+	if(err)
+		goto out_put;
+
+	err = -ENOMEM;
+	name = dentry_name(dentry, 0);
+	if(name == NULL)
+		goto out_put;
+
+	err = read_name(inode, name);
+	kfree(name);
+	if(err == -ENOENT){
+		iput(inode);
+		inode = NULL;
+	}
+	else if(err)
+		goto out_put;
+
+	d_add(dentry, inode);
+	dentry->d_op = &hostfs_dentry_ops;
+	return(NULL);
+
+ out_put:
+	iput(inode);
+ out:
+	return(ERR_PTR(err));
+}
+
+static char *inode_dentry_name(struct inode *ino, struct dentry *dentry)
+{
+        char *file;
+	int len;
+
+	file = inode_name(ino, dentry->d_name.len + 1);
+	if(file == NULL) return(NULL);
+        strcat(file, "/");
+	len = strlen(file);
+        strncat(file, dentry->d_name.name, dentry->d_name.len);
+	file[len + dentry->d_name.len] = '\0';
+        return(file);
+}
+
+int hostfs_link(struct dentry *to, struct inode *ino, struct dentry *from)
+{
+        char *from_name, *to_name;
+        int err;
+
+        if((from_name = inode_dentry_name(ino, from)) == NULL)
+                return(-ENOMEM);
+        to_name = dentry_name(to, 0);
+	if(to_name == NULL){
+		kfree(from_name);
+		return(-ENOMEM);
+	}
+        err = link_file(to_name, from_name);
+        kfree(from_name);
+        kfree(to_name);
+        return(err);
+}
+
+int hostfs_unlink(struct inode *ino, struct dentry *dentry)
+{
+	char *file;
+	int err;
+
+	if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+	if(append)
+		return(-EPERM);
+
+	err = unlink_file(file);
+	kfree(file);
+	return(err);
+}
+
+int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to)
+{
+	char *file;
+	int err;
+
+	if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+	err = make_symlink(file, to);
+	kfree(file);
+	return(err);
+}
+
+int hostfs_mkdir(struct inode *ino, struct dentry *dentry, int mode)
+{
+	char *file;
+	int err;
+
+	if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+	err = do_mkdir(file, mode);
+	kfree(file);
+	return(err);
+}
+
+int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
+{
+	char *file;
+	int err;
+
+	if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+	err = do_rmdir(file);
+	kfree(file);
+	return(err);
+}
+
+int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+	struct inode *inode;
+	char *name;
+	int err = -ENOMEM;
+
+	inode = iget(dir->i_sb, 0);
+	if(inode == NULL)
+		goto out;
+
+	err = init_inode(inode, dentry);
+	if(err)
+		goto out_put;
+
+	err = -ENOMEM;
+	name = dentry_name(dentry, 0);
+	if(name == NULL)
+		goto out_put;
+
+	init_special_inode(inode, mode, dev);
+	err = do_mknod(name, mode, dev);
+	if(err)
+		goto out_free;
+
+	err = read_name(inode, name);
+	kfree(name);
+	if(err)
+		goto out_put;
+
+	d_instantiate(dentry, inode);
+	return(0);
+
+ out_free:
+	kfree(name);
+ out_put:
+	iput(inode);
+ out:
+	return(err);
+}
+
+int hostfs_rename(struct inode *from_ino, struct dentry *from,
+		  struct inode *to_ino, struct dentry *to)
+{
+	char *from_name, *to_name;
+	int err;
+
+	if((from_name = inode_dentry_name(from_ino, from)) == NULL)
+		return(-ENOMEM);
+	if((to_name = inode_dentry_name(to_ino, to)) == NULL){
+		kfree(from_name);
+		return(-ENOMEM);
+	}
+	err = rename_file(from_name, to_name);
+	kfree(from_name);
+	kfree(to_name);
+	return(err);
+}
+
+void hostfs_truncate(struct inode *ino)
+{
+	not_implemented();
+}
+
+int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd)
+{
+	char *name;
+	int r = 0, w = 0, x = 0, err;
+
+	if (desired & MAY_READ) r = 1;
+	if (desired & MAY_WRITE) w = 1;
+	if (desired & MAY_EXEC) x = 1;
+	name = inode_name(ino, 0);
+	if (name == NULL) return(-ENOMEM);
+
+	if (S_ISCHR(ino->i_mode) || S_ISBLK(ino->i_mode) ||
+			S_ISFIFO(ino->i_mode) || S_ISSOCK(ino->i_mode))
+		err = 0;
+	else
+		err = access_file(name, r, w, x);
+	kfree(name);
+	if(!err)
+		err = generic_permission(ino, desired, NULL);
+	return err;
+}
+
+int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct hostfs_iattr attrs;
+	char *name;
+	int err;
+
+	err = inode_change_ok(dentry->d_inode, attr);
+	if (err)
+		return err;
+
+	if(append)
+		attr->ia_valid &= ~ATTR_SIZE;
+
+	attrs.ia_valid = 0;
+	if(attr->ia_valid & ATTR_MODE){
+		attrs.ia_valid |= HOSTFS_ATTR_MODE;
+		attrs.ia_mode = attr->ia_mode;
+	}
+	if(attr->ia_valid & ATTR_UID){
+		if((dentry->d_inode->i_sb->s_dev == ROOT_DEV) &&
+		   (attr->ia_uid == 0))
+			attr->ia_uid = getuid();
+		attrs.ia_valid |= HOSTFS_ATTR_UID;
+		attrs.ia_uid = attr->ia_uid;
+	}
+	if(attr->ia_valid & ATTR_GID){
+		if((dentry->d_inode->i_sb->s_dev == ROOT_DEV) &&
+		   (attr->ia_gid == 0))
+			attr->ia_gid = getgid();
+		attrs.ia_valid |= HOSTFS_ATTR_GID;
+		attrs.ia_gid = attr->ia_gid;
+	}
+	if(attr->ia_valid & ATTR_SIZE){
+		attrs.ia_valid |= HOSTFS_ATTR_SIZE;
+		attrs.ia_size = attr->ia_size;
+	}
+	if(attr->ia_valid & ATTR_ATIME){
+		attrs.ia_valid |= HOSTFS_ATTR_ATIME;
+		attrs.ia_atime = attr->ia_atime;
+	}
+	if(attr->ia_valid & ATTR_MTIME){
+		attrs.ia_valid |= HOSTFS_ATTR_MTIME;
+		attrs.ia_mtime = attr->ia_mtime;
+	}
+	if(attr->ia_valid & ATTR_CTIME){
+		attrs.ia_valid |= HOSTFS_ATTR_CTIME;
+		attrs.ia_ctime = attr->ia_ctime;
+	}
+	if(attr->ia_valid & ATTR_ATIME_SET){
+		attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET;
+	}
+	if(attr->ia_valid & ATTR_MTIME_SET){
+		attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET;
+	}
+	name = dentry_name(dentry, 0);
+	if(name == NULL) return(-ENOMEM);
+	err = set_attr(name, &attrs);
+	kfree(name);
+	if(err)
+		return(err);
+
+	return(inode_setattr(dentry->d_inode, attr));
+}
+
+int hostfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+	   struct kstat *stat)
+{
+	generic_fillattr(dentry->d_inode, stat);
+	return(0);
+}
+
+static struct inode_operations hostfs_iops = {
+	.create		= hostfs_create,
+	.link		= hostfs_link,
+	.unlink		= hostfs_unlink,
+	.symlink	= hostfs_symlink,
+	.mkdir		= hostfs_mkdir,
+	.rmdir		= hostfs_rmdir,
+	.mknod		= hostfs_mknod,
+	.rename		= hostfs_rename,
+	.truncate	= hostfs_truncate,
+	.permission	= hostfs_permission,
+	.setattr	= hostfs_setattr,
+	.getattr	= hostfs_getattr,
+};
+
+static struct inode_operations hostfs_dir_iops = {
+	.create		= hostfs_create,
+	.lookup		= hostfs_lookup,
+	.link		= hostfs_link,
+	.unlink		= hostfs_unlink,
+	.symlink	= hostfs_symlink,
+	.mkdir		= hostfs_mkdir,
+	.rmdir		= hostfs_rmdir,
+	.mknod		= hostfs_mknod,
+	.rename		= hostfs_rename,
+	.truncate	= hostfs_truncate,
+	.permission	= hostfs_permission,
+	.setattr	= hostfs_setattr,
+	.getattr	= hostfs_getattr,
+};
+
+int hostfs_link_readpage(struct file *file, struct page *page)
+{
+	char *buffer, *name;
+	long long start;
+	int err;
+
+	start = page->index << PAGE_CACHE_SHIFT;
+	buffer = kmap(page);
+	name = inode_name(page->mapping->host, 0);
+	if(name == NULL) return(-ENOMEM);
+	err = do_readlink(name, buffer, PAGE_CACHE_SIZE);
+	kfree(name);
+	if(err == PAGE_CACHE_SIZE)
+		err = -E2BIG;
+	else if(err > 0){
+		flush_dcache_page(page);
+		SetPageUptodate(page);
+		if (PageError(page)) ClearPageError(page);
+		err = 0;
+	}
+	kunmap(page);
+	unlock_page(page);
+	return(err);
+}
+
+static struct address_space_operations hostfs_link_aops = {
+	.readpage	= hostfs_link_readpage,
+};
+
+static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+{
+	struct inode *root_inode;
+	char *name, *data = d;
+	int err;
+
+	sb->s_blocksize = 1024;
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = HOSTFS_SUPER_MAGIC;
+	sb->s_op = &hostfs_sbops;
+
+	if((data == NULL) || (*data == '\0'))
+		data = root_ino;
+
+	err = -ENOMEM;
+	name = kmalloc(strlen(data) + 1, GFP_KERNEL);
+	if(name == NULL)
+		goto out;
+
+	strcpy(name, data);
+
+	root_inode = iget(sb, 0);
+	if(root_inode == NULL)
+		goto out_free;
+
+	err = init_inode(root_inode, NULL);
+	if(err)
+		goto out_put;
+
+	HOSTFS_I(root_inode)->host_filename = name;
+
+	err = -ENOMEM;
+	sb->s_root = d_alloc_root(root_inode);
+	if(sb->s_root == NULL)
+		goto out_put;
+
+	err = read_inode(root_inode);
+	if(err)
+		goto out_put;
+
+	return(0);
+
+ out_put:
+	iput(root_inode);
+ out_free:
+	kfree(name);
+ out:
+	return(err);
+}
+
+static struct super_block *hostfs_read_sb(struct file_system_type *type,
+					     int flags, const char *dev_name,
+					     void *data)
+{
+	return(get_sb_nodev(type, flags, data, hostfs_fill_sb_common));
+}
+
+static struct file_system_type hostfs_type = {
+	.owner 		= THIS_MODULE,
+	.name 		= "hostfs",
+	.get_sb 	= hostfs_read_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags 	= 0,
+};
+
+static int __init init_hostfs(void)
+{
+	return(register_filesystem(&hostfs_type));
+}
+
+static void __exit exit_hostfs(void)
+{
+	unregister_filesystem(&hostfs_type);
+}
+
+module_init(init_hostfs)
+module_exit(exit_hostfs)
+MODULE_LICENSE("GPL");
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
new file mode 100644
index 0000000..4796e84
--- /dev/null
+++ b/fs/hostfs/hostfs_user.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <errno.h>
+#include <utime.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/vfs.h>
+#include "hostfs.h"
+#include "kern_util.h"
+#include "user.h"
+
+int stat_file(const char *path, unsigned long long *inode_out, int *mode_out,
+	      int *nlink_out, int *uid_out, int *gid_out,
+	      unsigned long long *size_out, struct timespec *atime_out,
+	      struct timespec *mtime_out, struct timespec *ctime_out,
+	      int *blksize_out, unsigned long long *blocks_out)
+{
+	struct stat64 buf;
+
+	if(lstat64(path, &buf) < 0)
+		return(-errno);
+
+	if(inode_out != NULL) *inode_out = buf.st_ino;
+	if(mode_out != NULL) *mode_out = buf.st_mode;
+	if(nlink_out != NULL) *nlink_out = buf.st_nlink;
+	if(uid_out != NULL) *uid_out = buf.st_uid;
+	if(gid_out != NULL) *gid_out = buf.st_gid;
+	if(size_out != NULL) *size_out = buf.st_size;
+	if(atime_out != NULL) {
+		atime_out->tv_sec = buf.st_atime;
+		atime_out->tv_nsec = 0;
+	}
+	if(mtime_out != NULL) {
+		mtime_out->tv_sec = buf.st_mtime;
+		mtime_out->tv_nsec = 0;
+	}
+	if(ctime_out != NULL) {
+		ctime_out->tv_sec = buf.st_ctime;
+		ctime_out->tv_nsec = 0;
+	}
+	if(blksize_out != NULL) *blksize_out = buf.st_blksize;
+	if(blocks_out != NULL) *blocks_out = buf.st_blocks;
+	return(0);
+}
+
+int file_type(const char *path, int *maj, int *min)
+{
+ 	struct stat64 buf;
+
+	if(lstat64(path, &buf) < 0)
+		return(-errno);
+	/*We cannot pass rdev as is because glibc and the kernel disagree
+	 *about its definition.*/
+	if(maj != NULL)
+		*maj = major(buf.st_rdev);
+	if(min != NULL)
+		*min = minor(buf.st_rdev);
+
+	if(S_ISDIR(buf.st_mode)) return(OS_TYPE_DIR);
+	else if(S_ISLNK(buf.st_mode)) return(OS_TYPE_SYMLINK);
+	else if(S_ISCHR(buf.st_mode)) return(OS_TYPE_CHARDEV);
+	else if(S_ISBLK(buf.st_mode)) return(OS_TYPE_BLOCKDEV);
+	else if(S_ISFIFO(buf.st_mode))return(OS_TYPE_FIFO);
+	else if(S_ISSOCK(buf.st_mode))return(OS_TYPE_SOCK);
+	else return(OS_TYPE_FILE);
+}
+
+int access_file(char *path, int r, int w, int x)
+{
+	int mode = 0;
+
+	if(r) mode = R_OK;
+	if(w) mode |= W_OK;
+	if(x) mode |= X_OK;
+	if(access(path, mode) != 0) return(-errno);
+	else return(0);
+}
+
+int open_file(char *path, int r, int w, int append)
+{
+	int mode = 0, fd;
+
+	if(r && !w)
+		mode = O_RDONLY;
+	else if(!r && w)
+		mode = O_WRONLY;
+	else if(r && w)
+		mode = O_RDWR;
+	else panic("Impossible mode in open_file");
+
+	if(append)
+		mode |= O_APPEND;
+	fd = open64(path, mode);
+	if(fd < 0) return(-errno);
+	else return(fd);
+}
+
+void *open_dir(char *path, int *err_out)
+{
+	DIR *dir;
+
+	dir = opendir(path);
+	*err_out = errno;
+	if(dir == NULL) return(NULL);
+	return(dir);
+}
+
+char *read_dir(void *stream, unsigned long long *pos,
+	       unsigned long long *ino_out, int *len_out)
+{
+	DIR *dir = stream;
+	struct dirent *ent;
+
+	seekdir(dir, *pos);
+	ent = readdir(dir);
+	if(ent == NULL) return(NULL);
+	*len_out = strlen(ent->d_name);
+	*ino_out = ent->d_ino;
+	*pos = telldir(dir);
+	return(ent->d_name);
+}
+
+int read_file(int fd, unsigned long long *offset, char *buf, int len)
+{
+	int n;
+
+	n = pread64(fd, buf, len, *offset);
+	if(n < 0) return(-errno);
+	*offset += n;
+	return(n);
+}
+
+int write_file(int fd, unsigned long long *offset, const char *buf, int len)
+{
+	int n;
+
+	n = pwrite64(fd, buf, len, *offset);
+	if(n < 0) return(-errno);
+	*offset += n;
+	return(n);
+}
+
+int lseek_file(int fd, long long offset, int whence)
+{
+	int ret;
+
+	ret = lseek64(fd, offset, whence);
+	if(ret < 0) return(-errno);
+	return(0);
+}
+
+void close_file(void *stream)
+{
+	close(*((int *) stream));
+}
+
+void close_dir(void *stream)
+{
+	closedir(stream);
+}
+
+int file_create(char *name, int ur, int uw, int ux, int gr,
+		int gw, int gx, int or, int ow, int ox)
+{
+	int mode, fd;
+
+	mode = 0;
+	mode |= ur ? S_IRUSR : 0;
+	mode |= uw ? S_IWUSR : 0;
+	mode |= ux ? S_IXUSR : 0;
+	mode |= gr ? S_IRGRP : 0;
+	mode |= gw ? S_IWGRP : 0;
+	mode |= gx ? S_IXGRP : 0;
+	mode |= or ? S_IROTH : 0;
+	mode |= ow ? S_IWOTH : 0;
+	mode |= ox ? S_IXOTH : 0;
+	fd = open64(name, O_CREAT | O_RDWR, mode);
+	if(fd < 0)
+		return(-errno);
+	return(fd);
+}
+
+int set_attr(const char *file, struct hostfs_iattr *attrs)
+{
+	struct utimbuf buf;
+	int err, ma;
+
+	if(attrs->ia_valid & HOSTFS_ATTR_MODE){
+		if(chmod(file, attrs->ia_mode) != 0) return(-errno);
+	}
+	if(attrs->ia_valid & HOSTFS_ATTR_UID){
+		if(chown(file, attrs->ia_uid, -1)) return(-errno);
+	}
+	if(attrs->ia_valid & HOSTFS_ATTR_GID){
+		if(chown(file, -1, attrs->ia_gid)) return(-errno);
+	}
+	if(attrs->ia_valid & HOSTFS_ATTR_SIZE){
+		if(truncate(file, attrs->ia_size)) return(-errno);
+	}
+	ma = HOSTFS_ATTR_ATIME_SET | HOSTFS_ATTR_MTIME_SET;
+	if((attrs->ia_valid & ma) == ma){
+		buf.actime = attrs->ia_atime.tv_sec;
+		buf.modtime = attrs->ia_mtime.tv_sec;
+		if(utime(file, &buf) != 0) return(-errno);
+	}
+	else {
+		struct timespec ts;
+
+		if(attrs->ia_valid & HOSTFS_ATTR_ATIME_SET){
+			err = stat_file(file, NULL, NULL, NULL, NULL, NULL,
+					NULL, NULL, &ts, NULL, NULL, NULL);
+			if(err != 0)
+				return(err);
+			buf.actime = attrs->ia_atime.tv_sec;
+			buf.modtime = ts.tv_sec;
+			if(utime(file, &buf) != 0)
+				return(-errno);
+		}
+		if(attrs->ia_valid & HOSTFS_ATTR_MTIME_SET){
+			err = stat_file(file, NULL, NULL, NULL, NULL, NULL,
+					NULL, &ts, NULL, NULL, NULL, NULL);
+			if(err != 0)
+				return(err);
+			buf.actime = ts.tv_sec;
+			buf.modtime = attrs->ia_mtime.tv_sec;
+			if(utime(file, &buf) != 0)
+				return(-errno);
+		}
+	}
+	if(attrs->ia_valid & HOSTFS_ATTR_CTIME) ;
+	if(attrs->ia_valid & (HOSTFS_ATTR_ATIME | HOSTFS_ATTR_MTIME)){
+		err = stat_file(file, NULL, NULL, NULL, NULL, NULL, NULL,
+				&attrs->ia_atime, &attrs->ia_mtime, NULL,
+				NULL, NULL);
+		if(err != 0) return(err);
+	}
+	return(0);
+}
+
+int make_symlink(const char *from, const char *to)
+{
+	int err;
+
+	err = symlink(to, from);
+	if(err) return(-errno);
+	return(0);
+}
+
+int unlink_file(const char *file)
+{
+	int err;
+
+	err = unlink(file);
+	if(err) return(-errno);
+	return(0);
+}
+
+int do_mkdir(const char *file, int mode)
+{
+	int err;
+
+	err = mkdir(file, mode);
+	if(err) return(-errno);
+	return(0);
+}
+
+int do_rmdir(const char *file)
+{
+	int err;
+
+	err = rmdir(file);
+	if(err) return(-errno);
+	return(0);
+}
+
+int do_mknod(const char *file, int mode, int dev)
+{
+	int err;
+
+	err = mknod(file, mode, dev);
+	if(err) return(-errno);
+	return(0);
+}
+
+int link_file(const char *to, const char *from)
+{
+	int err;
+
+	err = link(to, from);
+	if(err) return(-errno);
+	return(0);
+}
+
+int do_readlink(char *file, char *buf, int size)
+{
+	int n;
+
+	n = readlink(file, buf, size);
+	if(n < 0)
+		return(-errno);
+	if(n < size)
+		buf[n] = '\0';
+	return(n);
+}
+
+int rename_file(char *from, char *to)
+{
+	int err;
+
+	err = rename(from, to);
+	if(err < 0) return(-errno);
+	return(0);
+}
+
+int do_statfs(char *root, long *bsize_out, long long *blocks_out,
+	      long long *bfree_out, long long *bavail_out,
+	      long long *files_out, long long *ffree_out,
+	      void *fsid_out, int fsid_size, long *namelen_out,
+	      long *spare_out)
+{
+	struct statfs64 buf;
+	int err;
+
+	err = statfs64(root, &buf);
+	if(err < 0) return(-errno);
+	*bsize_out = buf.f_bsize;
+	*blocks_out = buf.f_blocks;
+	*bfree_out = buf.f_bfree;
+	*bavail_out = buf.f_bavail;
+	*files_out = buf.f_files;
+	*ffree_out = buf.f_ffree;
+	memcpy(fsid_out, &buf.f_fsid,
+	       sizeof(buf.f_fsid) > fsid_size ? fsid_size :
+	       sizeof(buf.f_fsid));
+	*namelen_out = buf.f_namelen;
+	spare_out[0] = buf.f_spare[0];
+	spare_out[1] = buf.f_spare[1];
+	spare_out[2] = buf.f_spare[2];
+	spare_out[3] = buf.f_spare[3];
+	spare_out[4] = buf.f_spare[4];
+	spare_out[5] = buf.f_spare[5];
+	return(0);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/fs/hpfs/Makefile b/fs/hpfs/Makefile
new file mode 100644
index 0000000..57b786f
--- /dev/null
+++ b/fs/hpfs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux hpfs filesystem routines.
+#
+
+obj-$(CONFIG_HPFS_FS) += hpfs.o
+
+hpfs-objs := alloc.o anode.o buffer.o dentry.o dir.o dnode.o ea.o file.o \
+	     inode.o map.o name.o namei.o super.o
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
new file mode 100644
index 0000000..5503e2c
--- /dev/null
+++ b/fs/hpfs/alloc.c
@@ -0,0 +1,456 @@
+/*
+ *  linux/fs/hpfs/alloc.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  HPFS bitmap operations
+ */
+
+#include "hpfs_fn.h"
+
+static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
+
+/*
+ * Check if a sector is allocated in bitmap
+ * This is really slow. Turned on only if chk==2
+ */
+
+static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
+{
+	struct quad_buffer_head qbh;
+	unsigned *bmp;
+	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
+	if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) {
+		hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
+		goto fail1;
+	}
+	hpfs_brelse4(&qbh);
+	if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
+		unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
+		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
+		if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) {
+			hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
+			goto fail1;
+		}
+		hpfs_brelse4(&qbh);
+	}
+	return 0;
+	fail1:
+	hpfs_brelse4(&qbh);
+	fail:
+	return 1;
+}
+
+/*
+ * Check if sector(s) have proper number and additionally check if they're
+ * allocated in bitmap.
+ */
+	
+int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
+{
+	if (start + len < start || start < 0x12 ||
+	    start + len > hpfs_sb(s)->sb_fs_size) {
+	    	hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start);
+		return 1;
+	}
+	if (hpfs_sb(s)->sb_chk>=2) {
+		int i;
+		for (i = 0; i < len; i++)
+			if (chk_if_allocated(s, start + i, msg)) return 1;
+	}
+	return 0;
+}
+
+static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
+{
+	struct quad_buffer_head qbh;
+	unsigned *bmp;
+	unsigned bs = near & ~0x3fff;
+	unsigned nr = (near & 0x3fff) & ~(n - 1);
+	/*unsigned mnr;*/
+	unsigned i, q;
+	int a, b;
+	secno ret = 0;
+	if (n != 1 && n != 4) {
+		hpfs_error(s, "Bad allocation size: %d", n);
+		return 0;
+	}
+	lock_super(s);
+	if (bs != ~0x3fff) {
+		if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
+	} else {
+		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls;
+	}
+	if (!tstbits(bmp, nr, n + forward)) {
+		ret = bs + nr;
+		goto rt;
+	}
+	/*if (!tstbits(bmp, nr + n, n + forward)) {
+		ret = bs + nr + n;
+		goto rt;
+	}*/
+	q = nr + n; b = 0;
+	while ((a = tstbits(bmp, q, n + forward)) != 0) {
+		q += a;
+		if (n != 1) q = ((q-1)&~(n-1))+n;
+		if (!b) {
+			if (q>>5 != nr>>5) {
+				b = 1;
+				q = nr & 0x1f;
+			}
+		} else if (q > nr) break;
+	}
+	if (!a) {
+		ret = bs + q;
+		goto rt;
+	}
+	nr >>= 5;
+	/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/
+	i = nr;
+	do {
+		if (!bmp[i]) goto cont;
+		if (n + forward >= 0x3f && bmp[i] != -1) goto cont;
+		q = i<<5;
+		if (i > 0) {
+			unsigned k = bmp[i-1];
+			while (k & 0x80000000) {
+				q--; k <<= 1;
+			}
+		}
+		if (n != 1) q = ((q-1)&~(n-1))+n;
+		while ((a = tstbits(bmp, q, n + forward)) != 0) {
+			q += a;
+			if (n != 1) q = ((q-1)&~(n-1))+n;
+			if (q>>5 > i) break;
+		}
+		if (!a) {
+			ret = bs + q;
+			goto rt;
+		}
+		cont:
+		i++, i &= 0x1ff;
+	} while (i != nr);
+	rt:
+	if (ret) {
+		if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
+			hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
+			ret = 0;
+			goto b;
+		}
+		bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f));
+		hpfs_mark_4buffers_dirty(&qbh);
+	}
+	b:
+	hpfs_brelse4(&qbh);
+	uls:
+	unlock_super(s);
+	return ret;
+}
+
+/*
+ * Allocation strategy:	1) search place near the sector specified
+ *			2) search bitmap where free sectors last found
+ *			3) search all bitmaps
+ *			4) search all bitmaps ignoring number of pre-allocated
+ *				sectors
+ */
+
+secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock)
+{
+	secno sec;
+	int i;
+	unsigned n_bmps;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	int f_p = 0;
+	int near_bmp;
+	if (forward < 0) {
+		forward = -forward;
+		f_p = 1;
+	}
+	if (lock) hpfs_lock_creation(s);
+	n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
+	if (near && near < sbi->sb_fs_size) {
+		if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
+		near_bmp = near >> 14;
+	} else near_bmp = n_bmps / 2;
+	/*
+	if (b != -1) {
+		if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
+			b &= 0x0fffffff;
+			goto ret;
+		}
+		if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
+	*/
+	if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc;
+	less_fwd:
+	for (i = 0; i < n_bmps; i++) {
+		if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) {
+			sbi->sb_c_bitmap = near_bmp+i;
+			goto ret;
+		}	
+		if (!forward) {
+			if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) {
+				sbi->sb_c_bitmap = near_bmp-i-1;
+				goto ret;
+			}
+		} else {
+			if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) {
+				sbi->sb_c_bitmap = near_bmp+i-n_bmps;
+				goto ret;
+			}
+		}
+		if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) {
+			goto ret;
+		}
+	}
+	if (!f_p) {
+		if (forward) {
+			sbi->sb_max_fwd_alloc = forward * 3 / 4;
+			forward /= 2;
+			goto less_fwd;
+		}
+	}
+	sec = 0;
+	ret:
+	if (sec && f_p) {
+		for (i = 0; i < forward; i++) {
+			if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) {
+				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
+				sec = 0;
+				break;
+			}
+		}
+	}
+	if (lock) hpfs_unlock_creation(s);
+	return sec;
+}
+
+static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
+{
+	unsigned nr = near;
+	secno sec;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (nr < sbi->sb_dirband_start)
+		nr = sbi->sb_dirband_start;
+	if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size)
+		nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
+	nr -= sbi->sb_dirband_start;
+	nr >>= 2;
+	if (lock) hpfs_lock_creation(s);
+	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
+	if (lock) hpfs_unlock_creation(s);
+	if (!sec) return 0;
+	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
+}
+
+/* Alloc sector if it's free */
+
+static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec)
+{
+	struct quad_buffer_head qbh;
+	unsigned *bmp;
+	lock_super(s);
+	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
+	if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) {
+		bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f));
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		unlock_super(s);
+		return 1;
+	}
+	hpfs_brelse4(&qbh);
+	end:
+	unlock_super(s);
+	return 0;
+}
+
+int hpfs_alloc_if_possible(struct super_block *s, secno sec)
+{
+	int r;
+	hpfs_lock_creation(s);
+	r = hpfs_alloc_if_possible_nolock(s, sec);
+	hpfs_unlock_creation(s);
+	return r;
+}
+
+/* Free sectors in bitmaps */
+
+void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
+{
+	struct quad_buffer_head qbh;
+	unsigned *bmp;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	/*printk("2 - ");*/
+	if (!n) return;
+	if (sec < 0x12) {
+		hpfs_error(s, "Trying to free reserved sector %08x", sec);
+		return;
+	}
+	lock_super(s);
+	sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
+	if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
+	new_map:
+	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
+		unlock_super(s);
+		return;
+	}	
+	new_tst:
+	if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) {
+		hpfs_error(s, "sector %08x not allocated", sec);
+		hpfs_brelse4(&qbh);
+		unlock_super(s);
+		return;
+	}
+	bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f);
+	if (!--n) {
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		unlock_super(s);
+		return;
+	}	
+	if (!(++sec & 0x3fff)) {
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		goto new_map;
+	}
+	goto new_tst;
+}
+
+/*
+ * Check if there are at least n free dnodes on the filesystem.
+ * Called before adding to dnode. If we run out of space while
+ * splitting dnodes, it would corrupt dnode tree.
+ */
+
+int hpfs_check_free_dnodes(struct super_block *s, int n)
+{
+	int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
+	int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
+	int i, j;
+	unsigned *bmp;
+	struct quad_buffer_head qbh;
+	if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
+		for (j = 0; j < 512; j++) {
+			unsigned k;
+			if (!bmp[j]) continue;
+			for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) {
+				hpfs_brelse4(&qbh);
+				return 0;
+			}
+		}
+	}
+	hpfs_brelse4(&qbh);
+	i = 0;
+	if (hpfs_sb(s)->sb_c_bitmap != -1) {
+		bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1");
+		goto chk_bmp;
+	}
+	chk_next:
+	if (i == b) i++;
+	if (i >= n_bmps) return 1;
+	bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2");
+	chk_bmp:
+	if (bmp) {
+		for (j = 0; j < 512; j++) {
+			unsigned k;
+			if (!bmp[j]) continue;
+			for (k = 0xf; k; k <<= 4)
+				if ((bmp[j] & k) == k) {
+					if (!--n) {
+						hpfs_brelse4(&qbh);
+						return 0;
+					}
+				}
+		}
+		hpfs_brelse4(&qbh);
+	}
+	i++;
+	goto chk_next;
+}
+
+void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
+{
+	if (hpfs_sb(s)->sb_chk) if (dno & 3) {
+		hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno);
+		return;
+	}
+	if (dno < hpfs_sb(s)->sb_dirband_start ||
+	    dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
+		hpfs_free_sectors(s, dno, 4);
+	} else {
+		struct quad_buffer_head qbh;
+		unsigned *bmp;
+		unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
+		lock_super(s);
+		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
+			unlock_super(s);
+			return;
+		}
+		bmp[ssec >> 5] |= 1 << (ssec & 0x1f);
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		unlock_super(s);
+	}
+}
+
+struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
+			 dnode_secno *dno, struct quad_buffer_head *qbh,
+			 int lock)
+{
+	struct dnode *d;
+	if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
+		if (!(*dno = alloc_in_dirband(s, near, lock)))
+			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL;
+	} else {
+		if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock)))
+			if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL;
+	}
+	if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
+		hpfs_free_dnode(s, *dno);
+		return NULL;
+	}
+	memset(d, 0, 2048);
+	d->magic = DNODE_MAGIC;
+	d->first_free = 52;
+	d->dirent[0] = 32;
+	d->dirent[2] = 8;
+	d->dirent[30] = 1;
+	d->dirent[31] = 255;
+	d->self = *dno;
+	return d;
+}
+
+struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno,
+			  struct buffer_head **bh)
+{
+	struct fnode *f;
+	if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL;
+	if (!(f = hpfs_get_sector(s, *fno, bh))) {
+		hpfs_free_sectors(s, *fno, 1);
+		return NULL;
+	}	
+	memset(f, 0, 512);
+	f->magic = FNODE_MAGIC;
+	f->ea_offs = 0xc4;
+	f->btree.n_free_nodes = 8;
+	f->btree.first_free = 8;
+	return f;
+}
+
+struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano,
+			  struct buffer_head **bh)
+{
+	struct anode *a;
+	if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL;
+	if (!(a = hpfs_get_sector(s, *ano, bh))) {
+		hpfs_free_sectors(s, *ano, 1);
+		return NULL;
+	}
+	memset(a, 0, 512);
+	a->magic = ANODE_MAGIC;
+	a->self = *ano;
+	a->btree.n_free_nodes = 40;
+	a->btree.n_used_nodes = 0;
+	a->btree.first_free = 8;
+	return a;
+}
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
new file mode 100644
index 0000000..1aa88c4
--- /dev/null
+++ b/fs/hpfs/anode.c
@@ -0,0 +1,491 @@
+/*
+ *  linux/fs/hpfs/anode.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  handling HPFS anode tree that contains file allocation info
+ */
+
+#include "hpfs_fn.h"
+
+/* Find a sector in allocation tree */
+
+secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
+		   struct bplus_header *btree, unsigned sec,
+		   struct buffer_head *bh)
+{
+	anode_secno a = -1;
+	struct anode *anode;
+	int i;
+	int c1, c2 = 0;
+	go_down:
+	if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
+	if (btree->internal) {
+		for (i = 0; i < btree->n_used_nodes; i++)
+			if (btree->u.internal[i].file_secno > sec) {
+				a = btree->u.internal[i].down;
+				brelse(bh);
+				if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
+				btree = &anode->btree;
+				goto go_down;
+			}
+		hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
+		brelse(bh);
+		return -1;
+	}
+	for (i = 0; i < btree->n_used_nodes; i++)
+		if (btree->u.external[i].file_secno <= sec &&
+		    btree->u.external[i].file_secno + btree->u.external[i].length > sec) {
+			a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno;
+			if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
+				brelse(bh);
+				return -1;
+			}
+			if (inode) {
+				struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+				hpfs_inode->i_file_sec = btree->u.external[i].file_secno;
+				hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno;
+				hpfs_inode->i_n_secs = btree->u.external[i].length;
+			}
+			brelse(bh);
+			return a;
+		}
+	hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
+	brelse(bh);
+	return -1;
+}
+
+/* Add a sector to tree */
+
+secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
+{
+	struct bplus_header *btree;
+	struct anode *anode = NULL, *ranode = NULL;
+	struct fnode *fnode;
+	anode_secno a, na = -1, ra, up = -1;
+	secno se;
+	struct buffer_head *bh, *bh1, *bh2;
+	int n;
+	unsigned fs;
+	int c1, c2 = 0;
+	if (fnod) {
+		if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
+		btree = &fnode->btree;
+	} else {
+		if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
+		btree = &anode->btree;
+	}
+	a = node;
+	go_down:
+	if ((n = btree->n_used_nodes - 1) < -!!fnod) {
+		hpfs_error(s, "anode %08x has no entries", a);
+		brelse(bh);
+		return -1;
+	}
+	if (btree->internal) {
+		a = btree->u.internal[n].down;
+		btree->u.internal[n].file_secno = -1;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+		if (hpfs_sb(s)->sb_chk)
+			if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
+		if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
+		btree = &anode->btree;
+		goto go_down;
+	}
+	if (n >= 0) {
+		if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) {
+			hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
+				btree->u.external[n].file_secno + btree->u.external[n].length, fsecno,
+				fnod?'f':'a', node);
+			brelse(bh);
+			return -1;
+		}
+		if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) {
+			btree->u.external[n].length++;
+			mark_buffer_dirty(bh);
+			brelse(bh);
+			return se;
+		}
+	} else {
+		if (fsecno) {
+			hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
+			brelse(bh);
+			return -1;
+		}
+		se = !fnod ? node : (node + 16384) & ~16383;
+	}	
+	if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) {
+		brelse(bh);
+		return -1;
+	}
+	fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length;
+	if (!btree->n_free_nodes) {
+		up = a != node ? anode->up : -1;
+		if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
+			brelse(bh);
+			hpfs_free_sectors(s, se, 1);
+			return -1;
+		}
+		if (a == node && fnod) {
+			anode->up = node;
+			anode->btree.fnode_parent = 1;
+			anode->btree.n_used_nodes = btree->n_used_nodes;
+			anode->btree.first_free = btree->first_free;
+			anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
+			memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
+			btree->internal = 1;
+			btree->n_free_nodes = 11;
+			btree->n_used_nodes = 1;
+			btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree;
+			btree->u.internal[0].file_secno = -1;
+			btree->u.internal[0].down = na;
+			mark_buffer_dirty(bh);
+		} else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
+			brelse(bh);
+			brelse(bh1);
+			hpfs_free_sectors(s, se, 1);
+			hpfs_free_sectors(s, na, 1);
+			return -1;
+		}
+		brelse(bh);
+		bh = bh1;
+		btree = &anode->btree;
+	}
+	btree->n_free_nodes--; n = btree->n_used_nodes++;
+	btree->first_free += 12;
+	btree->u.external[n].disk_secno = se;
+	btree->u.external[n].file_secno = fs;
+	btree->u.external[n].length = 1;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	if ((a == node && fnod) || na == -1) return se;
+	c2 = 0;
+	while (up != -1) {
+		struct anode *new_anode;
+		if (hpfs_sb(s)->sb_chk)
+			if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
+		if (up != node || !fnod) {
+			if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
+			btree = &anode->btree;
+		} else {
+			if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
+			btree = &fnode->btree;
+		}
+		if (btree->n_free_nodes) {
+			btree->n_free_nodes--; n = btree->n_used_nodes++;
+			btree->first_free += 8;
+			btree->u.internal[n].file_secno = -1;
+			btree->u.internal[n].down = na;
+			btree->u.internal[n-1].file_secno = fs;
+			mark_buffer_dirty(bh);
+			brelse(bh);
+			brelse(bh2);
+			hpfs_free_sectors(s, ra, 1);
+			if ((anode = hpfs_map_anode(s, na, &bh))) {
+				anode->up = up;
+				anode->btree.fnode_parent = up == node && fnod;
+				mark_buffer_dirty(bh);
+				brelse(bh);
+			}
+			return se;
+		}
+		up = up != node ? anode->up : -1;
+		btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+		a = na;
+		if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
+			anode = new_anode;
+			/*anode->up = up != -1 ? up : ra;*/
+			anode->btree.internal = 1;
+			anode->btree.n_used_nodes = 1;
+			anode->btree.n_free_nodes = 59;
+			anode->btree.first_free = 16;
+			anode->btree.u.internal[0].down = a;
+			anode->btree.u.internal[0].file_secno = -1;
+			mark_buffer_dirty(bh);
+			brelse(bh);
+			if ((anode = hpfs_map_anode(s, a, &bh))) {
+				anode->up = na;
+				mark_buffer_dirty(bh);
+				brelse(bh);
+			}
+		} else na = a;
+	}
+	if ((anode = hpfs_map_anode(s, na, &bh))) {
+		anode->up = node;
+		if (fnod) anode->btree.fnode_parent = 1;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+	}
+	if (!fnod) {
+		if (!(anode = hpfs_map_anode(s, node, &bh))) {
+			brelse(bh2);
+			return -1;
+		}
+		btree = &anode->btree;
+	} else {
+		if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
+			brelse(bh2);
+			return -1;
+		}
+		btree = &fnode->btree;
+	}
+	ranode->up = node;
+	memcpy(&ranode->btree, btree, btree->first_free);
+	if (fnod) ranode->btree.fnode_parent = 1;
+	ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
+	if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+		struct anode *unode;
+		if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) {
+			unode->up = ra;
+			unode->btree.fnode_parent = 0;
+			mark_buffer_dirty(bh1);
+			brelse(bh1);
+		}
+	}
+	btree->internal = 1;
+	btree->n_free_nodes = fnod ? 10 : 58;
+	btree->n_used_nodes = 2;
+	btree->first_free = (char *)&btree->u.internal[2] - (char *)btree;
+	btree->u.internal[0].file_secno = fs;
+	btree->u.internal[0].down = ra;
+	btree->u.internal[1].file_secno = -1;
+	btree->u.internal[1].down = na;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	mark_buffer_dirty(bh2);
+	brelse(bh2);
+	return se;
+}
+
+/*
+ * Remove allocation tree. Recursion would look much nicer but
+ * I want to avoid it because it can cause stack overflow.
+ */
+
+void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
+{
+	struct bplus_header *btree1 = btree;
+	struct anode *anode = NULL;
+	anode_secno ano = 0, oano;
+	struct buffer_head *bh;
+	int level = 0;
+	int pos = 0;
+	int i;
+	int c1, c2 = 0;
+	int d1, d2;
+	go_down:
+	d2 = 0;
+	while (btree1->internal) {
+		ano = btree1->u.internal[pos].down;
+		if (level) brelse(bh);
+		if (hpfs_sb(s)->sb_chk)
+			if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
+				return;
+		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
+		btree1 = &anode->btree;
+		level++;
+		pos = 0;
+	}
+	for (i = 0; i < btree1->n_used_nodes; i++)
+		hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length);
+	go_up:
+	if (!level) return;
+	brelse(bh);
+	if (hpfs_sb(s)->sb_chk)
+		if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
+	hpfs_free_sectors(s, ano, 1);
+	oano = ano;
+	ano = anode->up;
+	if (--level) {
+		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
+		btree1 = &anode->btree;
+	} else btree1 = btree;
+	for (i = 0; i < btree1->n_used_nodes; i++) {
+		if (btree1->u.internal[i].down == oano) {
+			if ((pos = i + 1) < btree1->n_used_nodes)
+				goto go_down;
+			else
+				goto go_up;
+		}
+	}
+	hpfs_error(s,
+		   "reference to anode %08x not found in anode %08x "
+		   "(probably bad up pointer)",
+		   oano, level ? ano : -1);
+	if (level)
+		brelse(bh);
+}
+
+/* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
+
+static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
+{
+	struct anode *anode;
+	struct buffer_head *bh;
+	if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
+	return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
+}
+
+int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
+	    unsigned len, char *buf)
+{
+	struct buffer_head *bh;
+	char *data;
+	secno sec;
+	unsigned l;
+	while (len) {
+		if (ano) {
+			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
+				return -1;
+		} else sec = a + (pos >> 9);
+		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
+		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
+			return -1;
+		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
+		memcpy(buf, data + (pos & 0x1ff), l);
+		brelse(bh);
+		buf += l; pos += l; len -= l;
+	}
+	return 0;
+}
+
+int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
+	     unsigned len, char *buf)
+{
+	struct buffer_head *bh;
+	char *data;
+	secno sec;
+	unsigned l;
+	while (len) {
+		if (ano) {
+			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
+				return -1;
+		} else sec = a + (pos >> 9);
+		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
+		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
+			return -1;
+		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
+		memcpy(data + (pos & 0x1ff), buf, l);
+		mark_buffer_dirty(bh);
+		brelse(bh);
+		buf += l; pos += l; len -= l;
+	}
+	return 0;
+}
+
+void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
+{
+	struct anode *anode;
+	struct buffer_head *bh;
+	if (ano) {
+		if (!(anode = hpfs_map_anode(s, a, &bh))) return;
+		hpfs_remove_btree(s, &anode->btree);
+		brelse(bh);
+		hpfs_free_sectors(s, a, 1);
+	} else hpfs_free_sectors(s, a, (len + 511) >> 9);
+}
+
+/* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
+
+void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
+{
+	struct fnode *fnode;
+	struct anode *anode;
+	struct buffer_head *bh;
+	struct bplus_header *btree;
+	anode_secno node = f;
+	int i, j, nodes;
+	int c1, c2 = 0;
+	if (fno) {
+		if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
+		btree = &fnode->btree;
+	} else {
+		if (!(anode = hpfs_map_anode(s, f, &bh))) return;
+		btree = &anode->btree;
+	}
+	if (!secs) {
+		hpfs_remove_btree(s, btree);
+		if (fno) {
+			btree->n_free_nodes = 8;
+			btree->n_used_nodes = 0;
+			btree->first_free = 8;
+			btree->internal = 0;
+			mark_buffer_dirty(bh);
+		} else hpfs_free_sectors(s, f, 1);
+		brelse(bh);
+		return;
+	}
+	while (btree->internal) {
+		nodes = btree->n_used_nodes + btree->n_free_nodes;
+		for (i = 0; i < btree->n_used_nodes; i++)
+			if (btree->u.internal[i].file_secno >= secs) goto f;
+		brelse(bh);
+		hpfs_error(s, "internal btree %08x doesn't end with -1", node);
+		return;
+		f:
+		for (j = i + 1; j < btree->n_used_nodes; j++)
+			hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0);
+		btree->n_used_nodes = i + 1;
+		btree->n_free_nodes = nodes - btree->n_used_nodes;
+		btree->first_free = 8 + 8 * btree->n_used_nodes;
+		mark_buffer_dirty(bh);
+		if (btree->u.internal[i].file_secno == secs) {
+			brelse(bh);
+			return;
+		}
+		node = btree->u.internal[i].down;
+		brelse(bh);
+		if (hpfs_sb(s)->sb_chk)
+			if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
+				return;
+		if (!(anode = hpfs_map_anode(s, node, &bh))) return;
+		btree = &anode->btree;
+	}	
+	nodes = btree->n_used_nodes + btree->n_free_nodes;
+	for (i = 0; i < btree->n_used_nodes; i++)
+		if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff;
+	brelse(bh);
+	return;
+	ff:
+	if (secs <= btree->u.external[i].file_secno) {
+		hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
+		if (i) i--;
+	}
+	else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) {
+		hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs -
+			btree->u.external[i].file_secno, btree->u.external[i].length
+			- secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */
+		btree->u.external[i].length = secs - btree->u.external[i].file_secno;
+	}
+	for (j = i + 1; j < btree->n_used_nodes; j++)
+		hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length);
+	btree->n_used_nodes = i + 1;
+	btree->n_free_nodes = nodes - btree->n_used_nodes;
+	btree->first_free = 8 + 12 * btree->n_used_nodes;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+}
+
+/* Remove file or directory and it's eas - note that directory must
+   be empty when this is called. */
+
+void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
+{
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	struct extended_attribute *ea;
+	struct extended_attribute *ea_end;
+	if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
+	if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
+	else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno);
+	ea_end = fnode_end_ea(fnode);
+	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
+		if (ea->indirect)
+			hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
+	hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l);
+	brelse(bh);
+	hpfs_free_sectors(s, fno, 1);
+}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
new file mode 100644
index 0000000..2807aa8
--- /dev/null
+++ b/fs/hpfs/buffer.c
@@ -0,0 +1,175 @@
+/*
+ *  linux/fs/hpfs/buffer.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  general buffer i/o
+ */
+
+#include "hpfs_fn.h"
+
+void hpfs_lock_creation(struct super_block *s)
+{
+#ifdef DEBUG_LOCKS
+	printk("lock creation\n");
+#endif
+	down(&hpfs_sb(s)->hpfs_creation_de);
+}
+
+void hpfs_unlock_creation(struct super_block *s)
+{
+#ifdef DEBUG_LOCKS
+	printk("unlock creation\n");
+#endif
+	up(&hpfs_sb(s)->hpfs_creation_de);
+}
+
+/* Map a sector into a buffer and return pointers to it and to the buffer. */
+
+void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
+		 int ahead)
+{
+	struct buffer_head *bh;
+
+	cond_resched();
+
+	*bhp = bh = sb_bread(s, secno);
+	if (bh != NULL)
+		return bh->b_data;
+	else {
+		printk("HPFS: hpfs_map_sector: read error\n");
+		return NULL;
+	}
+}
+
+/* Like hpfs_map_sector but don't read anything */
+
+void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
+{
+	struct buffer_head *bh;
+	/*return hpfs_map_sector(s, secno, bhp, 0);*/
+
+	cond_resched();
+
+	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
+		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
+		set_buffer_uptodate(bh);
+		return bh->b_data;
+	} else {
+		printk("HPFS: hpfs_get_sector: getblk failed\n");
+		return NULL;
+	}
+}
+
+/* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
+
+void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
+		   int ahead)
+{
+	struct buffer_head *bh;
+	char *data;
+
+	cond_resched();
+
+	if (secno & 3) {
+		printk("HPFS: hpfs_map_4sectors: unaligned read\n");
+		return NULL;
+	}
+
+	qbh->data = data = (char *)kmalloc(2048, GFP_NOFS);
+	if (!data) {
+		printk("HPFS: hpfs_map_4sectors: out of memory\n");
+		goto bail;
+	}
+
+	qbh->bh[0] = bh = sb_bread(s, secno);
+	if (!bh)
+		goto bail0;
+	memcpy(data, bh->b_data, 512);
+
+	qbh->bh[1] = bh = sb_bread(s, secno + 1);
+	if (!bh)
+		goto bail1;
+	memcpy(data + 512, bh->b_data, 512);
+
+	qbh->bh[2] = bh = sb_bread(s, secno + 2);
+	if (!bh)
+		goto bail2;
+	memcpy(data + 2 * 512, bh->b_data, 512);
+
+	qbh->bh[3] = bh = sb_bread(s, secno + 3);
+	if (!bh)
+		goto bail3;
+	memcpy(data + 3 * 512, bh->b_data, 512);
+
+	return data;
+
+ bail3:
+	brelse(qbh->bh[2]);
+ bail2:
+	brelse(qbh->bh[1]);
+ bail1:
+	brelse(qbh->bh[0]);
+ bail0:
+	kfree(data);
+	printk("HPFS: hpfs_map_4sectors: read error\n");
+ bail:
+	return NULL;
+}
+
+/* Don't read sectors */
+
+void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
+                          struct quad_buffer_head *qbh)
+{
+	cond_resched();
+
+	if (secno & 3) {
+		printk("HPFS: hpfs_get_4sectors: unaligned read\n");
+		return NULL;
+	}
+
+	/*return hpfs_map_4sectors(s, secno, qbh, 0);*/
+	if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
+		printk("HPFS: hpfs_get_4sectors: out of memory\n");
+		return NULL;
+	}
+	if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0;
+	if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1;
+	if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2;
+	if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3;
+	memcpy(qbh->data, qbh->bh[0]->b_data, 512);
+	memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512);
+	memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512);
+	memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512);
+	return qbh->data;
+
+	bail3:	brelse(qbh->bh[2]);
+	bail2:	brelse(qbh->bh[1]);
+	bail1:	brelse(qbh->bh[0]);
+	bail0:
+	return NULL;
+}
+	
+
+void hpfs_brelse4(struct quad_buffer_head *qbh)
+{
+	brelse(qbh->bh[3]);
+	brelse(qbh->bh[2]);
+	brelse(qbh->bh[1]);
+	brelse(qbh->bh[0]);
+	kfree(qbh->data);
+}	
+
+void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
+{
+	PRINTK(("hpfs_mark_4buffers_dirty\n"));
+	memcpy(qbh->bh[0]->b_data, qbh->data, 512);
+	memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
+	memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
+	memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
+	mark_buffer_dirty(qbh->bh[0]);
+	mark_buffer_dirty(qbh->bh[1]);
+	mark_buffer_dirty(qbh->bh[2]);
+	mark_buffer_dirty(qbh->bh[3]);
+}
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
new file mode 100644
index 0000000..0831912
--- /dev/null
+++ b/fs/hpfs/dentry.c
@@ -0,0 +1,60 @@
+/*
+ *  linux/fs/hpfs/dentry.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  dcache operations
+ */
+
+#include "hpfs_fn.h"
+
+/*
+ * Note: the dentry argument is the parent dentry.
+ */
+
+static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+{
+	unsigned long	 hash;
+	int		 i;
+	unsigned l = qstr->len;
+
+	if (l == 1) if (qstr->name[0]=='.') goto x;
+	if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x;
+	hpfs_adjust_length((char *)qstr->name, &l);
+	/*if (hpfs_chk_name((char *)qstr->name,&l))*/
+		/*return -ENAMETOOLONG;*/
+		/*return -ENOENT;*/
+	x:
+
+	hash = init_name_hash();
+	for (i = 0; i < l; i++)
+		hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash);
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	unsigned al=a->len;
+	unsigned bl=b->len;
+	hpfs_adjust_length((char *)a->name, &al);
+	/*hpfs_adjust_length((char *)b->name, &bl);*/
+	/* 'a' is the qstr of an already existing dentry, so the name
+	 * must be valid. 'b' must be validated first.
+	 */
+
+	if (hpfs_chk_name((char *)b->name, &bl)) return 1;
+	if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1;
+	return 0;
+}
+
+static struct dentry_operations hpfs_dentry_operations = {
+	.d_hash		= hpfs_hash_dentry,
+	.d_compare	= hpfs_compare_dentry,
+};
+
+void hpfs_set_dentry_operations(struct dentry *dentry)
+{
+	dentry->d_op = &hpfs_dentry_operations;
+}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
new file mode 100644
index 0000000..0217c3a
--- /dev/null
+++ b/fs/hpfs/dir.c
@@ -0,0 +1,320 @@
+/*
+ *  linux/fs/hpfs/dir.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  directory VFS functions
+ */
+
+#include "hpfs_fn.h"
+
+static int hpfs_dir_release(struct inode *inode, struct file *filp)
+{
+	lock_kernel();
+	hpfs_del_pos(inode, &filp->f_pos);
+	/*hpfs_write_if_changed(inode);*/
+	unlock_kernel();
+	return 0;
+}
+
+/* This is slow, but it's not used often */
+
+static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t new_off = off + (whence == 1 ? filp->f_pos : 0);
+	loff_t pos;
+	struct quad_buffer_head qbh;
+	struct inode *i = filp->f_dentry->d_inode;
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	struct super_block *s = i->i_sb;
+
+	lock_kernel();
+
+	/*printk("dir lseek\n");*/
+	if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
+	down(&i->i_sem);
+	pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
+	while (pos != new_off) {
+		if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
+		else goto fail;
+		if (pos == 12) goto fail;
+	}
+	up(&i->i_sem);
+ok:
+	unlock_kernel();
+	return filp->f_pos = new_off;
+fail:
+	up(&i->i_sem);
+	/*printk("illegal lseek: %016llx\n", new_off);*/
+	unlock_kernel();
+	return -ESPIPE;
+}
+
+static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	int lc;
+	long old_pos;
+	char *tempname;
+	int c1, c2 = 0;
+	int ret = 0;
+
+	lock_kernel();
+
+	if (hpfs_sb(inode->i_sb)->sb_chk) {
+		if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) {
+			ret = -EFSERROR;
+			goto out;
+		}
+		if (hpfs_chk_sectors(inode->i_sb, hpfs_inode->i_dno, 4, "dir_dnode")) {
+			ret = -EFSERROR;
+			goto out;
+		}
+	}
+	if (hpfs_sb(inode->i_sb)->sb_chk >= 2) {
+		struct buffer_head *bh;
+		struct fnode *fno;
+		int e = 0;
+		if (!(fno = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) {
+			ret = -EIOERROR;
+			goto out;
+		}
+		if (!fno->dirflag) {
+			e = 1;
+			hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino);
+		}
+		if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
+			e = 1;
+			hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno);
+		}
+		brelse(bh);
+		if (e) {
+			ret = -EFSERROR;
+			goto out;
+		}
+	}
+	lc = hpfs_sb(inode->i_sb)->sb_lowercase;
+	if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */
+		filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */
+		goto out;
+	}
+	if (filp->f_pos == 13) {
+		ret = -ENOENT;
+		goto out;
+	}
+	
+	while (1) {
+		again:
+		/* This won't work when cycle is longer than number of dirents
+		   accepted by filldir, but what can I do?
+		   maybe killall -9 ls helps */
+		if (hpfs_sb(inode->i_sb)->sb_chk)
+			if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) {
+				ret = -EFSERROR;
+				goto out;
+			}
+		if (filp->f_pos == 12)
+			goto out;
+		if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) {
+			printk("HPFS: warning: pos==%d\n",(int)filp->f_pos);
+			goto out;
+		}
+		if (filp->f_pos == 0) {
+			if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
+				goto out;
+			filp->f_pos = 11;
+		}
+		if (filp->f_pos == 11) {
+			if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0)
+				goto out;
+			filp->f_pos = 1;
+		}
+		if (filp->f_pos == 1) {
+			filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
+			hpfs_add_pos(inode, &filp->f_pos);
+			filp->f_version = inode->i_version;
+		}
+		old_pos = filp->f_pos;
+		if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) {
+			ret = -EIOERROR;
+			goto out;
+		}
+		if (de->first || de->last) {
+			if (hpfs_sb(inode->i_sb)->sb_chk) {
+				if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos);
+				if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos);
+			}
+			hpfs_brelse4(&qbh);
+			goto again;
+		}
+		tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
+		if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
+			filp->f_pos = old_pos;
+			if (tempname != (char *)de->name) kfree(tempname);
+			hpfs_brelse4(&qbh);
+			goto out;
+		}
+		if (tempname != (char *)de->name) kfree(tempname);
+		hpfs_brelse4(&qbh);
+	}
+out:
+	unlock_kernel();
+	return ret;
+}
+
+/*
+ * lookup.  Search the specified directory for the specified name, set
+ * *result to the corresponding inode.
+ *
+ * lookup uses the inode number to tell read_inode whether it is reading
+ * the inode of a directory or a file -- file ino's are odd, directory
+ * ino's are even.  read_inode avoids i/o for file inodes; everything
+ * needed is up here in the directory.  (And file fnodes are out in
+ * the boondocks.)
+ *
+ *    - M.P.: this is over, sometimes we've got to read file's fnode for eas
+ *	      inode numbers are just fnode sector numbers; iget lock is used
+ *	      to tell read_inode to read fnode or not.
+ */
+
+struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	ino_t ino;
+	int err;
+	struct inode *result = NULL;
+	struct hpfs_inode_info *hpfs_result;
+
+	lock_kernel();
+	if ((err = hpfs_chk_name((char *)name, &len))) {
+		if (err == -ENAMETOOLONG) {
+			unlock_kernel();
+			return ERR_PTR(-ENAMETOOLONG);
+		}
+		goto end_add;
+	}
+
+	/*
+	 * '.' and '..' will never be passed here.
+	 */
+
+	de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh);
+
+	/*
+	 * This is not really a bailout, just means file not found.
+	 */
+
+	if (!de) goto end;
+
+	/*
+	 * Get inode number, what we're after.
+	 */
+
+	ino = de->fnode;
+
+	/*
+	 * Go find or make an inode.
+	 */
+
+	result = iget_locked(dir->i_sb, ino);
+	if (!result) {
+		hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode");
+		goto bail1;
+	}
+	if (result->i_state & I_NEW) {
+		hpfs_init_inode(result);
+		if (de->directory)
+			hpfs_read_inode(result);
+		else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas)
+			hpfs_read_inode(result);
+		else {
+			result->i_mode |= S_IFREG;
+			result->i_mode &= ~0111;
+			result->i_op = &hpfs_file_iops;
+			result->i_fop = &hpfs_file_ops;
+			result->i_nlink = 1;
+		}
+		unlock_new_inode(result);
+	}
+	hpfs_result = hpfs_i(result);
+	if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
+
+	hpfs_decide_conv(result, (char *)name, len);
+
+	if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
+		hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
+		goto bail1;
+	}
+
+	/*
+	 * Fill in the info from the directory if this is a newly created
+	 * inode.
+	 */
+
+	if (!result->i_ctime.tv_sec) {
+		if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date)))
+			result->i_ctime.tv_sec = 1;
+		result->i_ctime.tv_nsec = 0;
+		result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date);
+		result->i_mtime.tv_nsec = 0;
+		result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date);
+		result->i_atime.tv_nsec = 0;
+		hpfs_result->i_ea_size = de->ea_size;
+		if (!hpfs_result->i_ea_mode && de->read_only)
+			result->i_mode &= ~0222;
+		if (!de->directory) {
+			if (result->i_size == -1) {
+				result->i_size = de->file_size;
+				result->i_data.a_ops = &hpfs_aops;
+				hpfs_i(result)->mmu_private = result->i_size;
+			/*
+			 * i_blocks should count the fnode and any anodes.
+			 * We count 1 for the fnode and don't bother about
+			 * anodes -- the disk heads are on the directory band
+			 * and we want them to stay there.
+			 */
+				result->i_blocks = 1 + ((result->i_size + 511) >> 9);
+			}
+		}
+	}
+
+	hpfs_brelse4(&qbh);
+
+	/*
+	 * Made it.
+	 */
+
+	end:
+	end_add:
+	hpfs_set_dentry_operations(dentry);
+	unlock_kernel();
+	d_add(dentry, result);
+	return NULL;
+
+	/*
+	 * Didn't.
+	 */
+	bail1:
+	
+	hpfs_brelse4(&qbh);
+	
+	/*bail:*/
+
+	unlock_kernel();
+	return ERR_PTR(-ENOENT);
+}
+
+struct file_operations hpfs_dir_ops =
+{
+	.llseek		= hpfs_dir_lseek,
+	.read		= generic_read_dir,
+	.readdir	= hpfs_readdir,
+	.release	= hpfs_dir_release,
+	.fsync		= hpfs_file_fsync,
+};
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
new file mode 100644
index 0000000..1d21307
--- /dev/null
+++ b/fs/hpfs/dnode.c
@@ -0,0 +1,1080 @@
+/*
+ *  linux/fs/hpfs/dnode.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  handling directory dnode tree - adding, deleteing & searching for dirents
+ */
+
+#include "hpfs_fn.h"
+
+static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde)
+{
+	struct hpfs_dirent *de;
+	struct hpfs_dirent *de_end = dnode_end_de(d);
+	int i = 1;
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
+		if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i;
+		i++;
+	}
+	printk("HPFS: get_pos: not_found\n");
+	return ((loff_t)d->self << 4) | (loff_t)1;
+}
+
+void hpfs_add_pos(struct inode *inode, loff_t *pos)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	int i = 0;
+	loff_t **ppos;
+
+	if (hpfs_inode->i_rddir_off)
+		for (; hpfs_inode->i_rddir_off[i]; i++)
+			if (hpfs_inode->i_rddir_off[i] == pos) return;
+	if (!(i&0x0f)) {
+		if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) {
+			printk("HPFS: out of memory for position list\n");
+			return;
+		}
+		if (hpfs_inode->i_rddir_off) {
+			memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t));
+			kfree(hpfs_inode->i_rddir_off);
+		}
+		hpfs_inode->i_rddir_off = ppos;
+	}
+	hpfs_inode->i_rddir_off[i] = pos;
+	hpfs_inode->i_rddir_off[i + 1] = NULL;
+}
+
+void hpfs_del_pos(struct inode *inode, loff_t *pos)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	loff_t **i, **j;
+
+	if (!hpfs_inode->i_rddir_off) goto not_f;
+	for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd;
+	goto not_f;
+	fnd:
+	for (j = i + 1; *j; j++) ;
+	*i = *(j - 1);
+	*(j - 1) = NULL;
+	if (j - 1 == hpfs_inode->i_rddir_off) {
+		kfree(hpfs_inode->i_rddir_off);
+		hpfs_inode->i_rddir_off = NULL;
+	}
+	return;
+	not_f:
+	/*printk("HPFS: warning: position pointer %p->%08x not found\n", pos, (int)*pos);*/
+	return;
+}
+
+static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t),
+			 loff_t p1, loff_t p2)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	loff_t **i;
+
+	if (!hpfs_inode->i_rddir_off) return;
+	for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2);
+	return;
+}
+
+static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t)
+{
+	if (*p == f) *p = t;
+}
+
+/*void hpfs_hpfs_pos_substd(loff_t *p, loff_t f, loff_t t)
+{
+	if ((*p & ~0x3f) == (f & ~0x3f)) *p = (t & ~0x3f) | (*p & 0x3f);
+}*/
+
+static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c)
+{
+	if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) {
+		int n = (*p & 0x3f) + c;
+		if (n > 0x3f) printk("HPFS: hpfs_pos_ins: %08x + %d\n", (int)*p, (int)c >> 8);
+		else *p = (*p & ~0x3f) | n;
+	}
+}
+
+static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c)
+{
+	if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) {
+		int n = (*p & 0x3f) - c;
+		if (n < 1) printk("HPFS: hpfs_pos_ins: %08x - %d\n", (int)*p, (int)c >> 8);
+		else *p = (*p & ~0x3f) | n;
+	}
+}
+
+static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d)
+{
+	struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL;
+	de_end = dnode_end_de(d);
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
+		deee = dee; dee = de;
+	}	
+	return deee;
+}
+
+static struct hpfs_dirent *dnode_last_de(struct dnode *d)
+{
+	struct hpfs_dirent *de, *de_end, *dee = NULL;
+	de_end = dnode_end_de(d);
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
+		dee = de;
+	}	
+	return dee;
+}
+
+static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr)
+{
+	struct hpfs_dirent *de;
+	if (!(de = dnode_last_de(d))) {
+		hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self);
+		return;
+	}
+	if (hpfs_sb(s)->sb_chk) {
+		if (de->down) {
+			hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x",
+				d->self, de_down_pointer(de));
+			return;
+		}
+		if (de->length != 32) {
+			hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self);
+			return;
+		}
+	}
+	if (ptr) {
+		if ((d->first_free += 4) > 2048) {
+			hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self);
+			d->first_free -= 4;
+			return;
+		}
+		de->length = 36;
+		de->down = 1;
+		*(dnode_secno *)((char *)de + 32) = ptr;
+	}
+}
+
+/* Add an entry to dnode and don't care if it grows over 2048 bytes */
+
+struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name,
+				unsigned namelen, secno down_ptr)
+{
+	struct hpfs_dirent *de;
+	struct hpfs_dirent *de_end = dnode_end_de(d);
+	unsigned d_size = de_size(namelen, down_ptr);
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
+		int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last);
+		if (!c) {
+			hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self);
+			return NULL;
+		}
+		if (c < 0) break;
+	}
+	memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
+	memset(de, 0, d_size);
+	if (down_ptr) {
+		*(int *)((char *)de + d_size - 4) = down_ptr;
+		de->down = 1;
+	}
+	de->length = d_size;
+	if (down_ptr) de->down = 1;
+	de->not_8x3 = hpfs_is_name_long(name, namelen);
+	de->namelen = namelen;
+	memcpy(de->name, name, namelen);
+	d->first_free += d_size;
+	return de;
+}
+
+/* Delete dirent and don't care about its subtree */
+
+static void hpfs_delete_de(struct super_block *s, struct dnode *d,
+			   struct hpfs_dirent *de)
+{
+	if (de->last) {
+		hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self);
+		return;
+	}
+	d->first_free -= de->length;
+	memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de);
+}
+
+static void fix_up_ptrs(struct super_block *s, struct dnode *d)
+{
+	struct hpfs_dirent *de;
+	struct hpfs_dirent *de_end = dnode_end_de(d);
+	dnode_secno dno = d->self;
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de))
+		if (de->down) {
+			struct quad_buffer_head qbh;
+			struct dnode *dd;
+			if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) {
+				if (dd->up != dno || dd->root_dnode) {
+					dd->up = dno;
+					dd->root_dnode = 0;
+					hpfs_mark_4buffers_dirty(&qbh);
+				}
+				hpfs_brelse4(&qbh);
+			}
+		}
+}
+
+/* Add an entry to dnode and do dnode splitting if required */
+
+static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
+			     unsigned char *name, unsigned namelen,
+			     struct hpfs_dirent *new_de, dnode_secno down_ptr)
+{
+	struct quad_buffer_head qbh, qbh1, qbh2;
+	struct dnode *d, *ad, *rd, *nd = NULL;
+	dnode_secno adno, rdno;
+	struct hpfs_dirent *de;
+	struct hpfs_dirent nde;
+	char *nname;
+	int h;
+	int pos;
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	int c1, c2 = 0;
+	if (!(nname = kmalloc(256, GFP_NOFS))) {
+		printk("HPFS: out of memory, can't add to dnode\n");
+		return 1;
+	}
+	go_up:
+	if (namelen >= 256) {
+		hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen);
+		if (nd) kfree(nd);
+		kfree(nname);
+		return 1;
+	}
+	if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) {
+		if (nd) kfree(nd);
+		kfree(nname);
+		return 1;
+	}
+	go_up_a:
+	if (hpfs_sb(i->i_sb)->sb_chk)
+		if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) {
+			hpfs_brelse4(&qbh);
+			if (nd) kfree(nd);
+			kfree(nname);
+			return 1;
+		}
+	if (d->first_free + de_size(namelen, down_ptr) <= 2048) {
+		loff_t t;
+		copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de);
+		t = get_pos(d, de);
+		for_all_poss(i, hpfs_pos_ins, t, 1);
+		for_all_poss(i, hpfs_pos_subst, 4, t);
+		for_all_poss(i, hpfs_pos_subst, 5, t + 1);
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		if (nd) kfree(nd);
+		kfree(nname);
+		return 0;
+	}
+	if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) {
+		/* 0x924 is a max size of dnode after adding a dirent with
+		   max name length. We alloc this only once. There must
+		   not be any error while splitting dnodes, otherwise the
+		   whole directory, not only file we're adding, would
+		   be lost. */
+		printk("HPFS: out of memory for dnode splitting\n");
+		hpfs_brelse4(&qbh);
+		kfree(nname);
+		return 1;
+	}	
+	memcpy(nd, d, d->first_free);
+	copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de);
+	for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1);
+	h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10;
+	if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) {
+		hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
+		hpfs_brelse4(&qbh);
+		kfree(nd);
+		kfree(nname);
+		return 1;
+	}
+	i->i_size += 2048;
+	i->i_blocks += 4;
+	pos = 1;
+	for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) {
+		copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de);
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos);
+		pos++;
+	}
+	copy_de(new_de = &nde, de);
+	memcpy(name = nname, de->name, namelen = de->namelen);
+	for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4);
+	down_ptr = adno;
+	set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
+	de = de_next_de(de);
+	memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de);
+	nd->first_free -= (char *)de - (char *)nd - 20;
+	memcpy(d, nd, nd->first_free);
+	for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos);
+	fix_up_ptrs(i->i_sb, ad);
+	if (!d->root_dnode) {
+		dno = ad->up = d->up;
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		hpfs_mark_4buffers_dirty(&qbh1);
+		hpfs_brelse4(&qbh1);
+		goto go_up;
+	}
+	if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) {
+		hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
+		hpfs_brelse4(&qbh);
+		hpfs_brelse4(&qbh1);
+		kfree(nd);
+		kfree(nname);
+		return 1;
+	}
+	i->i_size += 2048;
+	i->i_blocks += 4;
+	rd->root_dnode = 1;
+	rd->up = d->up;
+	if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) {
+		hpfs_free_dnode(i->i_sb, rdno);
+		hpfs_brelse4(&qbh);
+		hpfs_brelse4(&qbh1);
+		hpfs_brelse4(&qbh2);
+		kfree(nd);
+		kfree(nname);
+		return 1;
+	}
+	fnode->u.external[0].disk_secno = rdno;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	d->up = ad->up = hpfs_i(i)->i_dno = rdno;
+	d->root_dnode = ad->root_dnode = 0;
+	hpfs_mark_4buffers_dirty(&qbh);
+	hpfs_brelse4(&qbh);
+	hpfs_mark_4buffers_dirty(&qbh1);
+	hpfs_brelse4(&qbh1);
+	qbh = qbh2;
+	set_last_pointer(i->i_sb, rd, dno);
+	dno = rdno;
+	d = rd;
+	goto go_up_a;
+}
+
+/*
+ * Add an entry to directory btree.
+ * I hate such crazy directory structure.
+ * It's easy to read but terrible to write.
+ * I wrote this directory code 4 times.
+ * I hope, now it's finally bug-free.
+ */
+
+int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen,
+		    struct hpfs_dirent *new_de, int cdepth)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	struct dnode *d;
+	struct hpfs_dirent *de, *de_end;
+	struct quad_buffer_head qbh;
+	dnode_secno dno;
+	int c;
+	int c1, c2 = 0;
+	dno = hpfs_inode->i_dno;
+	down:
+	if (hpfs_sb(i->i_sb)->sb_chk)
+		if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1;
+	if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1;
+	de_end = dnode_end_de(d);
+	for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
+		if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) {
+			hpfs_brelse4(&qbh);
+			return -1;
+		}	
+		if (c < 0) {
+			if (de->down) {
+				dno = de_down_pointer(de);
+				hpfs_brelse4(&qbh);
+				goto down;
+			}
+			break;
+		}
+	}
+	hpfs_brelse4(&qbh);
+	if (!cdepth) hpfs_lock_creation(i->i_sb);
+	if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) {
+		c = 1;
+		goto ret;
+	}	
+	i->i_version++;
+	c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
+	ret:
+	if (!cdepth) hpfs_unlock_creation(i->i_sb);
+	return c;
+}
+
+/* 
+ * Find dirent with higher name in 'from' subtree and move it to 'to' dnode.
+ * Return the dnode we moved from (to be checked later if it's empty)
+ */
+
+static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
+{
+	dnode_secno dno, ddno;
+	dnode_secno chk_up = to;
+	struct dnode *dnode;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de, *nde;
+	int a;
+	loff_t t;
+	int c1, c2 = 0;
+	dno = from;
+	while (1) {
+		if (hpfs_sb(i->i_sb)->sb_chk)
+			if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top"))
+				return 0;
+		if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0;
+		if (hpfs_sb(i->i_sb)->sb_chk) {
+			if (dnode->up != chk_up) {
+				hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x",
+					dno, chk_up, dnode->up);
+				hpfs_brelse4(&qbh);
+				return 0;
+			}
+			chk_up = dno;
+		}
+		if (!(de = dnode_last_de(dnode))) {
+			hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno);
+			hpfs_brelse4(&qbh);
+			return 0;
+		}
+		if (!de->down) break;
+		dno = de_down_pointer(de);
+		hpfs_brelse4(&qbh);
+	}
+	while (!(de = dnode_pre_last_de(dnode))) {
+		dnode_secno up = dnode->up;
+		hpfs_brelse4(&qbh);
+		hpfs_free_dnode(i->i_sb, dno);
+		i->i_size -= 2048;
+		i->i_blocks -= 4;
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5);
+		if (up == to) return to;
+		if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0;
+		if (dnode->root_dnode) {
+			hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to);
+			hpfs_brelse4(&qbh);
+			return 0;
+		}
+		de = dnode_last_de(dnode);
+		if (!de || !de->down) {
+			hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno);
+			hpfs_brelse4(&qbh);
+			return 0;
+		}
+		dnode->first_free -= 4;
+		de->length -= 4;
+		de->down = 0;
+		hpfs_mark_4buffers_dirty(&qbh);
+		dno = up;
+	}
+	t = get_pos(dnode, de);
+	for_all_poss(i, hpfs_pos_subst, t, 4);
+	for_all_poss(i, hpfs_pos_subst, t + 1, 5);
+	if (!(nde = kmalloc(de->length, GFP_NOFS))) {
+		hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted");
+		hpfs_brelse4(&qbh);
+		return 0;
+	}
+	memcpy(nde, de, de->length);
+	ddno = de->down ? de_down_pointer(de) : 0;
+	hpfs_delete_de(i->i_sb, dnode, de);
+	set_last_pointer(i->i_sb, dnode, ddno);
+	hpfs_mark_4buffers_dirty(&qbh);
+	hpfs_brelse4(&qbh);
+	a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from);
+	kfree(nde);
+	if (a) return 0;
+	return dno;
+}
+
+/* 
+ * Check if a dnode is empty and delete it from the tree
+ * (chkdsk doesn't like empty dnodes)
+ */
+
+static void delete_empty_dnode(struct inode *i, dnode_secno dno)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	struct quad_buffer_head qbh;
+	struct dnode *dnode;
+	dnode_secno down, up, ndown;
+	int p;
+	struct hpfs_dirent *de;
+	int c1, c2 = 0;
+	try_it_again:
+	if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return;
+	if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return;
+	if (dnode->first_free > 56) goto end;
+	if (dnode->first_free == 52 || dnode->first_free == 56) {
+		struct hpfs_dirent *de_end;
+		int root = dnode->root_dnode;
+		up = dnode->up;
+		de = dnode_first_de(dnode);
+		down = de->down ? de_down_pointer(de) : 0;
+		if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) {
+			hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno);
+			goto end;
+		}
+		hpfs_brelse4(&qbh);
+		hpfs_free_dnode(i->i_sb, dno);
+		i->i_size -= 2048;
+		i->i_blocks -= 4;
+		if (root) {
+			struct fnode *fnode;
+			struct buffer_head *bh;
+			struct dnode *d1;
+			struct quad_buffer_head qbh1;
+			if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) {
+				hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino);
+				return;
+			}
+			if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
+				d1->up = up;
+				d1->root_dnode = 1;
+				hpfs_mark_4buffers_dirty(&qbh1);
+				hpfs_brelse4(&qbh1);
+			}
+			if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) {
+				fnode->u.external[0].disk_secno = down;
+				mark_buffer_dirty(bh);
+				brelse(bh);
+			}
+			hpfs_inode->i_dno = down;
+			for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12);
+			return;
+		}
+		if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return;
+		p = 1;
+		de_end = dnode_end_de(dnode);
+		for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++)
+			if (de->down) if (de_down_pointer(de) == dno) goto fnd;
+		hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up);
+		goto end;
+		fnd:
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p);
+		if (!down) {
+			de->down = 0;
+			de->length -= 4;
+			dnode->first_free -= 4;
+			memmove(de_next_de(de), (char *)de_next_de(de) + 4,
+				(char *)dnode + dnode->first_free - (char *)de_next_de(de));
+		} else {
+			struct dnode *d1;
+			struct quad_buffer_head qbh1;
+			*(dnode_secno *) ((void *) de + de->length - 4) = down;
+			if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
+				d1->up = up;
+				hpfs_mark_4buffers_dirty(&qbh1);
+				hpfs_brelse4(&qbh1);
+			}
+		}
+	} else {
+		hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free);
+		goto end;
+	}
+
+	if (!de->last) {
+		struct hpfs_dirent *de_next = de_next_de(de);
+		struct hpfs_dirent *de_cp;
+		struct dnode *d1;
+		struct quad_buffer_head qbh1;
+		if (!de_next->down) goto endm;
+		ndown = de_down_pointer(de_next);
+		if (!(de_cp = kmalloc(de->length, GFP_NOFS))) {
+			printk("HPFS: out of memory for dtree balancing\n");
+			goto endm;
+		}
+		memcpy(de_cp, de, de->length);
+		hpfs_delete_de(i->i_sb, dnode, de);
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4);
+		for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1);
+		if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) {
+			d1->up = ndown;
+			hpfs_mark_4buffers_dirty(&qbh1);
+			hpfs_brelse4(&qbh1);
+		}
+		hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0);
+		/*printk("UP-TO-DNODE: %08x (ndown = %08x, down = %08x, dno = %08x)\n", up, ndown, down, dno);*/
+		dno = up;
+		kfree(de_cp);
+		goto try_it_again;
+	} else {
+		struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode);
+		struct hpfs_dirent *de_cp;
+		struct dnode *d1;
+		struct quad_buffer_head qbh1;
+		dnode_secno dlp;
+		if (!de_prev) {
+			hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up);
+			hpfs_mark_4buffers_dirty(&qbh);
+			hpfs_brelse4(&qbh);
+			dno = up;
+			goto try_it_again;
+		}
+		if (!de_prev->down) goto endm;
+		ndown = de_down_pointer(de_prev);
+		if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) {
+			struct hpfs_dirent *del = dnode_last_de(d1);
+			dlp = del->down ? de_down_pointer(del) : 0;
+			if (!dlp && down) {
+				if (d1->first_free > 2044) {
+					if (hpfs_sb(i->i_sb)->sb_chk >= 2) {
+						printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
+						printk("HPFS: warning: terminating balancing operation\n");
+					}
+					hpfs_brelse4(&qbh1);
+					goto endm;
+				}
+				if (hpfs_sb(i->i_sb)->sb_chk >= 2) {
+					printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
+					printk("HPFS: warning: goin'on\n");
+				}
+				del->length += 4;
+				del->down = 1;
+				d1->first_free += 4;
+			}
+			if (dlp && !down) {
+				del->length -= 4;
+				del->down = 0;
+				d1->first_free -= 4;
+			} else if (down)
+				*(dnode_secno *) ((void *) del + del->length - 4) = down;
+		} else goto endm;
+		if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) {
+			printk("HPFS: out of memory for dtree balancing\n");
+			hpfs_brelse4(&qbh1);
+			goto endm;
+		}
+		hpfs_mark_4buffers_dirty(&qbh1);
+		hpfs_brelse4(&qbh1);
+		memcpy(de_cp, de_prev, de_prev->length);
+		hpfs_delete_de(i->i_sb, dnode, de_prev);
+		if (!de_prev->down) {
+			de_prev->length += 4;
+			de_prev->down = 1;
+			dnode->first_free += 4;
+		}
+		*(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown;
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
+		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1));
+		if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) {
+			d1->up = ndown;
+			hpfs_mark_4buffers_dirty(&qbh1);
+			hpfs_brelse4(&qbh1);
+		}
+		hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp);
+		dno = up;
+		kfree(de_cp);
+		goto try_it_again;
+	}
+	endm:
+	hpfs_mark_4buffers_dirty(&qbh);
+	end:
+	hpfs_brelse4(&qbh);
+}
+
+
+/* Delete dirent from directory */
+
+int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
+		       struct quad_buffer_head *qbh, int depth)
+{
+	struct dnode *dnode = qbh->data;
+	dnode_secno down = 0;
+	int lock = 0;
+	loff_t t;
+	if (de->first || de->last) {
+		hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno);
+		hpfs_brelse4(qbh);
+		return 1;
+	}
+	if (de->down) down = de_down_pointer(de);
+	if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) {
+		lock = 1;
+		hpfs_lock_creation(i->i_sb);
+		if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) {
+			hpfs_brelse4(qbh);
+			hpfs_unlock_creation(i->i_sb);
+			return 2;
+		}
+	}
+	i->i_version++;
+	for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
+	hpfs_delete_de(i->i_sb, dnode, de);
+	hpfs_mark_4buffers_dirty(qbh);
+	hpfs_brelse4(qbh);
+	if (down) {
+		dnode_secno a = move_to_top(i, down, dno);
+		for_all_poss(i, hpfs_pos_subst, 5, t);
+		if (a) delete_empty_dnode(i, a);
+		if (lock) hpfs_unlock_creation(i->i_sb);
+		return !a;
+	}
+	delete_empty_dnode(i, dno);
+	if (lock) hpfs_unlock_creation(i->i_sb);
+	return 0;
+}
+
+void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes,
+		       int *n_subdirs, int *n_items)
+{
+	struct dnode *dnode;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	dnode_secno ptr, odno = 0;
+	int c1, c2 = 0;
+	int d1, d2 = 0;
+	go_down:
+	if (n_dnodes) (*n_dnodes)++;
+	if (hpfs_sb(s)->sb_chk)
+		if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return;
+	ptr = 0;
+	go_up:
+	if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return;
+	if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno)
+		hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up);
+	de = dnode_first_de(dnode);
+	if (ptr) while(1) {
+		if (de->down) if (de_down_pointer(de) == ptr) goto process_de;
+		if (de->last) {
+			hpfs_brelse4(&qbh);
+			hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x",
+				ptr, dno, odno);
+			return;
+		}
+		de = de_next_de(de);
+	}
+	next_de:
+	if (de->down) {
+		odno = dno;
+		dno = de_down_pointer(de);
+		hpfs_brelse4(&qbh);
+		goto go_down;
+	}
+	process_de:
+	if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++;
+	if (!de->first && !de->last && n_items) (*n_items)++;
+	if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de;
+	ptr = dno;
+	dno = dnode->up;
+	if (dnode->root_dnode) {
+		hpfs_brelse4(&qbh);
+		return;
+	}
+	hpfs_brelse4(&qbh);
+	if (hpfs_sb(s)->sb_chk)
+		if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return;
+	odno = -1;
+	goto go_up;
+}
+
+static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n,
+					  struct quad_buffer_head *qbh, struct dnode **dn)
+{
+	int i;
+	struct hpfs_dirent *de, *de_end;
+	struct dnode *dnode;
+	dnode = hpfs_map_dnode(s, dno, qbh);
+	if (!dnode) return NULL;
+	if (dn) *dn=dnode;
+	de = dnode_first_de(dnode);
+	de_end = dnode_end_de(dnode);
+	for (i = 1; de < de_end; i++, de = de_next_de(de)) {
+		if (i == n) {
+			return de;
+		}	
+		if (de->last) break;
+	}
+	hpfs_brelse4(qbh);
+	hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n);
+	return NULL;
+}
+
+dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno)
+{
+	struct quad_buffer_head qbh;
+	dnode_secno d = dno;
+	dnode_secno up = 0;
+	struct hpfs_dirent *de;
+	int c1, c2 = 0;
+
+	again:
+	if (hpfs_sb(s)->sb_chk)
+		if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible"))
+			return d;
+	if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno;
+	if (hpfs_sb(s)->sb_chk)
+		if (up && ((struct dnode *)qbh.data)->up != up)
+			hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up);
+	if (!de->down) {
+		hpfs_brelse4(&qbh);
+		return d;
+	}
+	up = d;
+	d = de_down_pointer(de);
+	hpfs_brelse4(&qbh);
+	goto again;
+}
+
+struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
+				   struct quad_buffer_head *qbh)
+{
+	loff_t pos;
+	unsigned c;
+	dnode_secno dno;
+	struct hpfs_dirent *de, *d;
+	struct hpfs_dirent *up_de;
+	struct hpfs_dirent *end_up_de;
+	struct dnode *dnode;
+	struct dnode *up_dnode;
+	struct quad_buffer_head qbh0;
+
+	pos = *posp;
+	dno = pos >> 6 << 2;
+	pos &= 077;
+	if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode)))
+		goto bail;
+
+	/* Going to the next dirent */
+	if ((d = de_next_de(de)) < dnode_end_de(dnode)) {
+		if (!(++*posp & 077)) {
+			hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp);
+			goto bail;
+		}
+		/* We're going down the tree */
+		if (d->down) {
+			*posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1;
+		}
+	
+		return de;
+	}
+
+	/* Going up */
+	if (dnode->root_dnode) goto bail;
+
+	if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0)))
+		goto bail;
+
+	end_up_de = dnode_end_de(up_dnode);
+	c = 0;
+	for (up_de = dnode_first_de(up_dnode); up_de < end_up_de;
+	     up_de = de_next_de(up_de)) {
+		if (!(++c & 077)) hpfs_error(inode->i_sb,
+			"map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up);
+		if (up_de->down && de_down_pointer(up_de) == dno) {
+			*posp = ((loff_t) dnode->up << 4) + c;
+			hpfs_brelse4(&qbh0);
+			return de;
+		}
+	}
+	
+	hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x",
+		dno, dnode->up);
+	hpfs_brelse4(&qbh0);
+	
+	bail:
+	*posp = 12;
+	return de;
+}
+
+/* Find a dirent in tree */
+
+struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len,
+			       dnode_secno *dd, struct quad_buffer_head *qbh)
+{
+	struct dnode *dnode;
+	struct hpfs_dirent *de;
+	struct hpfs_dirent *de_end;
+	int c1, c2 = 0;
+
+	if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n");
+	again:
+	if (hpfs_sb(inode->i_sb)->sb_chk)
+		if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL;
+	if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL;
+	
+	de_end = dnode_end_de(dnode);
+	for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) {
+		int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last);
+		if (!t) {
+			if (dd) *dd = dno;
+			return de;
+		}
+		if (t < 0) {
+			if (de->down) {
+				dno = de_down_pointer(de);
+				hpfs_brelse4(qbh);
+				goto again;
+			}
+		break;
+		}
+	}
+	hpfs_brelse4(qbh);
+	return NULL;
+}
+
+/*
+ * Remove empty directory. In normal cases it is only one dnode with two
+ * entries, but we must handle also such obscure cases when it's a tree
+ * of empty dnodes.
+ */
+
+void hpfs_remove_dtree(struct super_block *s, dnode_secno dno)
+{
+	struct quad_buffer_head qbh;
+	struct dnode *dnode;
+	struct hpfs_dirent *de;
+	dnode_secno d1, d2, rdno = dno;
+	while (1) {
+		if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return;
+		de = dnode_first_de(dnode);
+		if (de->last) {
+			if (de->down) d1 = de_down_pointer(de);
+			else goto error;
+			hpfs_brelse4(&qbh);
+			hpfs_free_dnode(s, dno);
+			dno = d1;
+		} else break;
+	}
+	if (!de->first) goto error;
+	d1 = de->down ? de_down_pointer(de) : 0;
+	de = de_next_de(de);
+	if (!de->last) goto error;
+	d2 = de->down ? de_down_pointer(de) : 0;
+	hpfs_brelse4(&qbh);
+	hpfs_free_dnode(s, dno);
+	do {
+		while (d1) {
+			if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return;
+			de = dnode_first_de(dnode);
+			if (!de->last) goto error;
+			d1 = de->down ? de_down_pointer(de) : 0;
+			hpfs_brelse4(&qbh);
+			hpfs_free_dnode(s, dno);
+		}
+		d1 = d2;
+		d2 = 0;
+	} while (d1);
+	return;
+	error:
+	hpfs_brelse4(&qbh);
+	hpfs_free_dnode(s, dno);
+	hpfs_error(s, "directory %08x is corrupted or not empty", rdno);
+}
+
+/* 
+ * Find dirent for specified fnode. Use truncated 15-char name in fnode as
+ * a help for searching.
+ */
+
+struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
+				     struct fnode *f, struct quad_buffer_head *qbh)
+{
+	char *name1;
+	char *name2;
+	int name1len, name2len;
+	struct dnode *d;
+	dnode_secno dno, downd;
+	struct fnode *upf;
+	struct buffer_head *bh;
+	struct hpfs_dirent *de, *de_end;
+	int c;
+	int c1, c2 = 0;
+	int d1, d2 = 0;
+	name1 = f->name;
+	if (!(name2 = kmalloc(256, GFP_NOFS))) {
+		printk("HPFS: out of memory, can't map dirent\n");
+		return NULL;
+	}
+	if (f->len <= 15)
+		memcpy(name2, name1, name1len = name2len = f->len);
+	else {
+		memcpy(name2, name1, 15);
+		memset(name2 + 15, 0xff, 256 - 15);
+		/*name2[15] = 0xff;*/
+		name1len = 15; name2len = 256;
+	}
+	if (!(upf = hpfs_map_fnode(s, f->up, &bh))) {
+		kfree(name2);
+		return NULL;
+	}	
+	if (!upf->dirflag) {
+		brelse(bh);
+		hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up);
+		kfree(name2);
+		return NULL;
+	}
+	dno = upf->u.external[0].disk_secno;
+	brelse(bh);
+	go_down:
+	downd = 0;
+	go_up:
+	if (!(d = hpfs_map_dnode(s, dno, qbh))) {
+		kfree(name2);
+		return NULL;
+	}
+	de_end = dnode_end_de(d);
+	de = dnode_first_de(d);
+	if (downd) {
+		while (de < de_end) {
+			if (de->down) if (de_down_pointer(de) == downd) goto f;
+			de = de_next_de(de);
+		}
+		hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno);
+		hpfs_brelse4(qbh);
+		kfree(name2);
+		return NULL;
+	}
+	next_de:
+	if (de->fnode == fno) {
+		kfree(name2);
+		return de;
+	}
+	c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last);
+	if (c < 0 && de->down) {
+		dno = de_down_pointer(de);
+		hpfs_brelse4(qbh);
+		if (hpfs_sb(s)->sb_chk)
+			if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) {
+			kfree(name2);
+			return NULL;
+		}
+		goto go_down;
+	}
+	f:
+	if (de->fnode == fno) {
+		kfree(name2);
+		return de;
+	}
+	c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last);
+	if (c < 0 && !de->last) goto not_found;
+	if ((de = de_next_de(de)) < de_end) goto next_de;
+	if (d->root_dnode) goto not_found;
+	downd = dno;
+	dno = d->up;
+	hpfs_brelse4(qbh);
+	if (hpfs_sb(s)->sb_chk)
+		if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) {
+			kfree(name2);
+			return NULL;
+		}
+	goto go_up;
+	not_found:
+	hpfs_brelse4(qbh);
+	hpfs_error(s, "dirent for fnode %08x not found", fno);
+	kfree(name2);
+	return NULL;
+}
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
new file mode 100644
index 0000000..66339dc
--- /dev/null
+++ b/fs/hpfs/ea.c
@@ -0,0 +1,363 @@
+/*
+ *  linux/fs/hpfs/ea.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  handling extended attributes
+ */
+
+#include "hpfs_fn.h"
+
+/* Remove external extended attributes. ano specifies whether a is a 
+   direct sector where eas starts or an anode */
+
+void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
+{
+	unsigned pos = 0;
+	while (pos < len) {
+		char ex[4 + 255 + 1 + 8];
+		struct extended_attribute *ea = (struct extended_attribute *)ex;
+		if (pos + 4 > len) {
+			hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
+				ano ? "anode" : "sectors", a, len);
+			return;
+		}
+		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
+		if (ea->indirect) {
+			if (ea->valuelen != 8) {
+				hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
+					ano ? "anode" : "sectors", a, pos);
+				return;
+			}
+			if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
+				return;
+			hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
+		}
+		pos += ea->namelen + ea->valuelen + 5;
+	}
+	if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9);
+	else {
+		struct buffer_head *bh;
+		struct anode *anode;
+		if ((anode = hpfs_map_anode(s, a, &bh))) {
+			hpfs_remove_btree(s, &anode->btree);
+			brelse(bh);
+			hpfs_free_sectors(s, a, 1);
+		}
+	}
+}
+
+static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
+{
+	char *ret;
+	if (!(ret = kmalloc(size + 1, GFP_NOFS))) {
+		printk("HPFS: out of memory for EA\n");
+		return NULL;
+	}
+	if (hpfs_ea_read(s, a, ano, 0, size, ret)) {
+		kfree(ret);
+		return NULL;
+	}
+	ret[size] = 0;
+	return ret;
+}
+
+static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data,
+			    int size)
+{
+	hpfs_ea_write(s, a, ano, 0, size, data);
+}
+
+/* Read an extended attribute named 'key' into the provided buffer */
+
+int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
+		char *buf, int size)
+{
+	unsigned pos;
+	int ano, len;
+	secno a;
+	struct extended_attribute *ea;
+	struct extended_attribute *ea_end = fnode_end_ea(fnode);
+	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect)
+				goto indirect;
+			if (ea->valuelen >= size)
+				return -EINVAL;
+			memcpy(buf, ea_data(ea), ea->valuelen);
+			buf[ea->valuelen] = 0;
+			return 0;
+		}
+	a = fnode->ea_secno;
+	len = fnode->ea_size_l;
+	ano = fnode->ea_anode;
+	pos = 0;
+	while (pos < len) {
+		char ex[4 + 255 + 1 + 8];
+		ea = (struct extended_attribute *)ex;
+		if (pos + 4 > len) {
+			hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
+				ano ? "anode" : "sectors", a, len);
+			return -EIO;
+		}
+		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+			return -EIO;
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect)
+				goto indirect;
+			if (ea->valuelen >= size)
+				return -EINVAL;
+			if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf))
+				return -EIO;
+			buf[ea->valuelen] = 0;
+			return 0;
+		}
+		pos += ea->namelen + ea->valuelen + 5;
+	}
+	return -ENOENT;
+indirect:
+	if (ea_len(ea) >= size)
+		return -EINVAL;
+	if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf))
+		return -EIO;
+	buf[ea_len(ea)] = 0;
+	return 0;
+}
+
+/* Read an extended attribute named 'key' */
+char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *size)
+{
+	char *ret;
+	unsigned pos;
+	int ano, len;
+	secno a;
+	struct extended_attribute *ea;
+	struct extended_attribute *ea_end = fnode_end_ea(fnode);
+	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect)
+				return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+			if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) {
+				printk("HPFS: out of memory for EA\n");
+				return NULL;
+			}
+			memcpy(ret, ea_data(ea), ea->valuelen);
+			ret[ea->valuelen] = 0;
+			return ret;
+		}
+	a = fnode->ea_secno;
+	len = fnode->ea_size_l;
+	ano = fnode->ea_anode;
+	pos = 0;
+	while (pos < len) {
+		char ex[4 + 255 + 1 + 8];
+		ea = (struct extended_attribute *)ex;
+		if (pos + 4 > len) {
+			hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
+				ano ? "anode" : "sectors", a, len);
+			return NULL;
+		}
+		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+			return NULL;
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect)
+				return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+			if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) {
+				printk("HPFS: out of memory for EA\n");
+				return NULL;
+			}
+			if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) {
+				kfree(ret);
+				return NULL;
+			}
+			ret[ea->valuelen] = 0;
+			return ret;
+		}
+		pos += ea->namelen + ea->valuelen + 5;
+	}
+	return NULL;
+}
+
+/* 
+ * Update or create extended attribute 'key' with value 'data'. Note that
+ * when this ea exists, it MUST have the same size as size of data.
+ * This driver can't change sizes of eas ('cause I just don't need it).
+ */
+
+void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size)
+{
+	fnode_secno fno = inode->i_ino;
+	struct super_block *s = inode->i_sb;
+	unsigned pos;
+	int ano, len;
+	secno a;
+	unsigned char h[4];
+	struct extended_attribute *ea;
+	struct extended_attribute *ea_end = fnode_end_ea(fnode);
+	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect) {
+				if (ea_len(ea) == size)
+					set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+			} else if (ea->valuelen == size) {
+				memcpy(ea_data(ea), data, size);
+			}
+			return;
+		}
+	a = fnode->ea_secno;
+	len = fnode->ea_size_l;
+	ano = fnode->ea_anode;
+	pos = 0;
+	while (pos < len) {
+		char ex[4 + 255 + 1 + 8];
+		ea = (struct extended_attribute *)ex;
+		if (pos + 4 > len) {
+			hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
+				ano ? "anode" : "sectors", a, len);
+			return;
+		}
+		if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
+		if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+			return;
+		if (!strcmp(ea->name, key)) {
+			if (ea->indirect) {
+				if (ea_len(ea) == size)
+					set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+			}
+			else {
+				if (ea->valuelen == size)
+					hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data);
+			}
+			return;
+		}
+		pos += ea->namelen + ea->valuelen + 5;
+	}
+	if (!fnode->ea_offs) {
+		/*if (fnode->ea_size_s) {
+			hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0",
+				inode->i_ino, fnode->ea_size_s);
+			return;
+		}*/
+		fnode->ea_offs = 0xc4;
+	}
+	if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
+		hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x",
+			inode->i_ino, fnode->ea_offs, fnode->ea_size_s);
+		return;
+	}
+	if ((fnode->ea_size_s || !fnode->ea_size_l) &&
+	     fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) {
+		ea = fnode_end_ea(fnode);
+		*(char *)ea = 0;
+		ea->namelen = strlen(key);
+		ea->valuelen = size;
+		strcpy(ea->name, key);
+		memcpy(ea_data(ea), data, size);
+		fnode->ea_size_s += strlen(key) + size + 5;
+		goto ret;
+	}
+	/* Most the code here is 99.9993422% unused. I hope there are no bugs.
+	   But what .. HPFS.IFS has also bugs in ea management. */
+	if (fnode->ea_size_s && !fnode->ea_size_l) {
+		secno n;
+		struct buffer_head *bh;
+		char *data;
+		if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return;
+		if (!(data = hpfs_get_sector(s, n, &bh))) {
+			hpfs_free_sectors(s, n, 1);
+			return;
+		}
+		memcpy(data, fnode_ea(fnode), fnode->ea_size_s);
+		fnode->ea_size_l = fnode->ea_size_s;
+		fnode->ea_size_s = 0;
+		fnode->ea_secno = n;
+		fnode->ea_anode = 0;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+	}
+	pos = fnode->ea_size_l + 5 + strlen(key) + size;
+	len = (fnode->ea_size_l + 511) >> 9;
+	if (pos >= 30000) goto bail;
+	while (((pos + 511) >> 9) > len) {
+		if (!len) {
+			if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1)))
+				goto bail;
+			fnode->ea_anode = 0;
+			len++;
+		} else if (!fnode->ea_anode) {
+			if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) {
+				len++;
+			} else {
+				/* Aargh... don't know how to create ea anodes :-( */
+				/*struct buffer_head *bh;
+				struct anode *anode;
+				anode_secno a_s;
+				if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh)))
+					goto bail;
+				anode->up = fno;
+				anode->btree.fnode_parent = 1;
+				anode->btree.n_free_nodes--;
+				anode->btree.n_used_nodes++;
+				anode->btree.first_free += 12;
+				anode->u.external[0].disk_secno = fnode->ea_secno;
+				anode->u.external[0].file_secno = 0;
+				anode->u.external[0].length = len;
+				mark_buffer_dirty(bh);
+				brelse(bh);
+				fnode->ea_anode = 1;
+				fnode->ea_secno = a_s;*/
+				secno new_sec;
+				int i;
+				if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1)))
+					goto bail;
+				for (i = 0; i < len; i++) {
+					struct buffer_head *bh1, *bh2;
+					void *b1, *b2;
+					if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) {
+						hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
+						goto bail;
+					}
+					if (!(b2 = hpfs_get_sector(s, new_sec + i, &bh2))) {
+						brelse(bh1);
+						hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
+						goto bail;
+					}
+					memcpy(b2, b1, 512);
+					brelse(bh1);
+					mark_buffer_dirty(bh2);
+					brelse(bh2);
+				}
+				hpfs_free_sectors(s, fnode->ea_secno, len);
+				fnode->ea_secno = new_sec;
+				len = (pos + 511) >> 9;
+			}
+		}
+		if (fnode->ea_anode) {
+			if (hpfs_add_sector_to_btree(s, fnode->ea_secno,
+						     0, len) != -1) {
+				len++;
+			} else {
+				goto bail;
+			}
+		}
+	}
+	h[0] = 0;
+	h[1] = strlen(key);
+	h[2] = size & 0xff;
+	h[3] = size >> 8;
+	if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail;
+	if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail;
+	if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail;
+	fnode->ea_size_l = pos;
+	ret:
+	hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
+	return;
+	bail:
+	if (fnode->ea_secno)
+		if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9);
+		else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9));
+	else fnode->ea_secno = fnode->ea_size_l = 0;
+}
+	
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
new file mode 100644
index 0000000..ab144da
--- /dev/null
+++ b/fs/hpfs/file.c
@@ -0,0 +1,140 @@
+/*
+ *  linux/fs/hpfs/file.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  file VFS functions
+ */
+
+#include "hpfs_fn.h"
+
+#define BLOCKS(size) (((size) + 511) >> 9)
+
+static int hpfs_file_release(struct inode *inode, struct file *file)
+{
+	lock_kernel();
+	hpfs_write_if_changed(inode);
+	unlock_kernel();
+	return 0;
+}
+
+int hpfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	/*return file_fsync(file, dentry);*/
+	return 0; /* Don't fsync :-) */
+}
+
+/*
+ * generic_file_read often calls bmap with non-existing sector,
+ * so we must ignore such errors.
+ */
+
+static secno hpfs_bmap(struct inode *inode, unsigned file_secno)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	unsigned n, disk_secno;
+	struct fnode *fnode;
+	struct buffer_head *bh;
+	if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
+	n = file_secno - hpfs_inode->i_file_sec;
+	if (n < hpfs_inode->i_n_secs) return hpfs_inode->i_disk_sec + n;
+	if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
+	disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
+	if (disk_secno == -1) return 0;
+	if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
+	return disk_secno;
+}
+
+static void hpfs_truncate(struct inode *i)
+{
+	if (IS_IMMUTABLE(i)) return /*-EPERM*/;
+	lock_kernel();
+	hpfs_i(i)->i_n_secs = 0;
+	i->i_blocks = 1 + ((i->i_size + 511) >> 9);
+	hpfs_i(i)->mmu_private = i->i_size;
+	hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
+	hpfs_write_inode(i);
+	hpfs_i(i)->i_n_secs = 0;
+	unlock_kernel();
+}
+
+static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
+{
+	secno s;
+	s = hpfs_bmap(inode, iblock);
+	if (s) {
+		map_bh(bh_result, inode->i_sb, s);
+		return 0;
+	}
+	if (!create) return 0;
+	if (iblock<<9 != hpfs_i(inode)->mmu_private) {
+		BUG();
+		return -EIO;
+	}
+	if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
+		hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
+		return -ENOSPC;
+	}
+	inode->i_blocks++;
+	hpfs_i(inode)->mmu_private += 512;
+	set_buffer_new(bh_result);
+	map_bh(bh_result, inode->i_sb, s);
+	return 0;
+}
+
+static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page,hpfs_get_block, wbc);
+}
+static int hpfs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,hpfs_get_block);
+}
+static int hpfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return cont_prepare_write(page,from,to,hpfs_get_block,
+		&hpfs_i(page->mapping->host)->mmu_private);
+}
+static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,hpfs_get_block);
+}
+struct address_space_operations hpfs_aops = {
+	.readpage = hpfs_readpage,
+	.writepage = hpfs_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = hpfs_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = _hpfs_bmap
+};
+
+static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	ssize_t retval;
+
+	retval = generic_file_write(file, buf, count, ppos);
+	if (retval > 0) {
+		struct inode *inode = file->f_dentry->d_inode;
+		inode->i_mtime = CURRENT_TIME_SEC;
+		hpfs_i(inode)->i_dirty = 1;
+	}
+	return retval;
+}
+
+struct file_operations hpfs_file_ops =
+{
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= hpfs_file_write,
+	.mmap		= generic_file_mmap,
+	.release	= hpfs_file_release,
+	.fsync		= hpfs_file_fsync,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations hpfs_file_iops =
+{
+	.truncate	= hpfs_truncate,
+	.setattr	= hpfs_notify_change,
+};
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
new file mode 100644
index 0000000..0e84c73
--- /dev/null
+++ b/fs/hpfs/hpfs.h
@@ -0,0 +1,493 @@
+/*
+ *  linux/fs/hpfs/hpfs.h
+ *
+ *  HPFS structures by Chris Smith, 1993
+ *
+ *  a little bit modified by Mikulas Patocka, 1998-1999
+ */
+
+/* The paper
+
+     Duncan, Roy
+     Design goals and implementation of the new High Performance File System
+     Microsoft Systems Journal  Sept 1989  v4 n5 p1(13)
+
+   describes what HPFS looked like when it was new, and it is the source
+   of most of the information given here.  The rest is conjecture.
+
+   For definitive information on the Duncan paper, see it, not this file.
+   For definitive information on HPFS, ask somebody else -- this is guesswork.
+   There are certain to be many mistakes. */
+
+/* Notation */
+
+typedef unsigned secno;			/* sector number, partition relative */
+
+typedef secno dnode_secno;		/* sector number of a dnode */
+typedef secno fnode_secno;		/* sector number of an fnode */
+typedef secno anode_secno;		/* sector number of an anode */
+
+typedef u32 time32_t;		/* 32-bit time_t type */
+
+/* sector 0 */
+
+/* The boot block is very like a FAT boot block, except that the
+   29h signature byte is 28h instead, and the ID string is "HPFS". */
+
+#define BB_MAGIC 0xaa55
+
+struct hpfs_boot_block
+{
+  unsigned char jmp[3];
+  unsigned char oem_id[8];
+  unsigned char bytes_per_sector[2];	/* 512 */
+  unsigned char sectors_per_cluster;
+  unsigned char n_reserved_sectors[2];
+  unsigned char n_fats;
+  unsigned char n_rootdir_entries[2];
+  unsigned char n_sectors_s[2];
+  unsigned char media_byte;
+  unsigned short sectors_per_fat;
+  unsigned short sectors_per_track;
+  unsigned short heads_per_cyl;
+  unsigned int n_hidden_sectors;
+  unsigned int n_sectors_l;		/* size of partition */
+  unsigned char drive_number;
+  unsigned char mbz;
+  unsigned char sig_28h;		/* 28h */
+  unsigned char vol_serno[4];
+  unsigned char vol_label[11];
+  unsigned char sig_hpfs[8];		/* "HPFS    " */
+  unsigned char pad[448];
+  unsigned short magic;			/* aa55 */
+};
+
+
+/* sector 16 */
+
+/* The super block has the pointer to the root directory. */
+
+#define SB_MAGIC 0xf995e849
+
+struct hpfs_super_block
+{
+  unsigned magic;			/* f995 e849 */
+  unsigned magic1;			/* fa53 e9c5, more magic? */
+  /*unsigned huh202;*/			/* ?? 202 = N. of B. in 1.00390625 S.*/
+  char version;				/* version of a filesystem  usually 2 */
+  char funcversion;			/* functional version - oldest version
+  					   of filesystem that can understand
+					   this disk */
+  unsigned short int zero;		/* 0 */
+  fnode_secno root;			/* fnode of root directory */
+  secno n_sectors;			/* size of filesystem */
+  unsigned n_badblocks;			/* number of bad blocks */
+  secno bitmaps;			/* pointers to free space bit maps */
+  unsigned zero1;			/* 0 */
+  secno badblocks;			/* bad block list */
+  unsigned zero3;			/* 0 */
+  time32_t last_chkdsk;			/* date last checked, 0 if never */
+  /*unsigned zero4;*/			/* 0 */
+  time32_t last_optimize;			/* date last optimized, 0 if never */
+  secno n_dir_band;			/* number of sectors in dir band */
+  secno dir_band_start;			/* first sector in dir band */
+  secno dir_band_end;			/* last sector in dir band */
+  secno dir_band_bitmap;		/* free space map, 1 dnode per bit */
+  char volume_name[32];			/* not used */
+  secno user_id_table;			/* 8 preallocated sectors - user id */
+  unsigned zero6[103];			/* 0 */
+};
+
+
+/* sector 17 */
+
+/* The spare block has pointers to spare sectors.  */
+
+#define SP_MAGIC 0xf9911849
+
+struct hpfs_spare_block
+{
+  unsigned magic;			/* f991 1849 */
+  unsigned magic1;			/* fa52 29c5, more magic? */
+
+  unsigned dirty: 1;			/* 0 clean, 1 "improperly stopped" */
+  /*unsigned flag1234: 4;*/		/* unknown flags */
+  unsigned sparedir_used: 1;		/* spare dirblks used */
+  unsigned hotfixes_used: 1;		/* hotfixes used */
+  unsigned bad_sector: 1;		/* bad sector, corrupted disk (???) */
+  unsigned bad_bitmap: 1;		/* bad bitmap */
+  unsigned fast: 1;			/* partition was fast formatted */
+  unsigned old_wrote: 1;		/* old version wrote to partion */
+  unsigned old_wrote_1: 1;		/* old version wrote to partion (?) */
+  unsigned install_dasd_limits: 1;	/* HPFS386 flags */
+  unsigned resynch_dasd_limits: 1;
+  unsigned dasd_limits_operational: 1;
+  unsigned multimedia_active: 1;
+  unsigned dce_acls_active: 1;
+  unsigned dasd_limits_dirty: 1;
+  unsigned flag67: 2;
+  unsigned char mm_contlgulty;
+  unsigned char unused;
+
+  secno hotfix_map;			/* info about remapped bad sectors */
+  unsigned n_spares_used;		/* number of hotfixes */
+  unsigned n_spares;			/* number of spares in hotfix map */
+  unsigned n_dnode_spares_free;		/* spare dnodes unused */
+  unsigned n_dnode_spares;		/* length of spare_dnodes[] list,
+					   follows in this block*/
+  secno code_page_dir;			/* code page directory block */
+  unsigned n_code_pages;		/* number of code pages */
+  /*unsigned large_numbers[2];*/	/* ?? */
+  unsigned super_crc;			/* on HPFS386 and LAN Server this is
+  					   checksum of superblock, on normal
+					   OS/2 unused */
+  unsigned spare_crc;			/* on HPFS386 checksum of spareblock */
+  unsigned zero1[15];			/* unused */
+  dnode_secno spare_dnodes[100];	/* emergency free dnode list */
+  unsigned zero2[1];			/* room for more? */
+};
+
+/* The bad block list is 4 sectors long.  The first word must be zero,
+   the remaining words give n_badblocks bad block numbers.
+   I bet you can see it coming... */
+
+#define BAD_MAGIC 0
+       
+/* The hotfix map is 4 sectors long.  It looks like
+
+       secno from[n_spares];
+       secno to[n_spares];
+
+   The to[] list is initialized to point to n_spares preallocated empty
+   sectors.  The from[] list contains the sector numbers of bad blocks
+   which have been remapped to corresponding sectors in the to[] list.
+   n_spares_used gives the length of the from[] list. */
+
+
+/* Sectors 18 and 19 are preallocated and unused.
+   Maybe they're spares for 16 and 17, but simple substitution fails. */
+
+
+/* The code page info pointed to by the spare block consists of an index
+   block and blocks containing uppercasing tables.  I don't know what
+   these are for (CHKDSK, maybe?) -- OS/2 does not seem to use them
+   itself.  Linux doesn't use them either. */
+
+/* block pointed to by spareblock->code_page_dir */
+
+#define CP_DIR_MAGIC 0x494521f7
+
+struct code_page_directory
+{
+  unsigned magic;			/* 4945 21f7 */
+  unsigned n_code_pages;		/* number of pointers following */
+  unsigned zero1[2];
+  struct {
+    unsigned short ix;			/* index */
+    unsigned short code_page_number;	/* code page number */
+    unsigned bounds;			/* matches corresponding word
+					   in data block */
+    secno code_page_data;		/* sector number of a code_page_data
+					   containing c.p. array */
+    unsigned short index;		/* index in c.p. array in that sector*/
+    unsigned short unknown;		/* some unknown value; usually 0;
+    					   2 in Japanese version */
+  } array[31];				/* unknown length */
+};
+
+/* blocks pointed to by code_page_directory */
+
+#define CP_DATA_MAGIC 0x894521f7
+
+struct code_page_data
+{
+  unsigned magic;			/* 8945 21f7 */
+  unsigned n_used;			/* # elements used in c_p_data[] */
+  unsigned bounds[3];			/* looks a bit like
+					     (beg1,end1), (beg2,end2)
+					   one byte each */
+  unsigned short offs[3];		/* offsets from start of sector
+					   to start of c_p_data[ix] */
+  struct {
+    unsigned short ix;			/* index */
+    unsigned short code_page_number;	/* code page number */
+    unsigned short unknown;		/* the same as in cp directory */
+    unsigned char map[128];		/* upcase table for chars 80..ff */
+    unsigned short zero2;
+  } code_page[3];
+  unsigned char incognita[78];
+};
+
+
+/* Free space bitmaps are 4 sectors long, which is 16384 bits.
+   16384 sectors is 8 meg, and each 8 meg band has a 4-sector bitmap.
+   Bit order in the maps is little-endian.  0 means taken, 1 means free.
+
+   Bit map sectors are marked allocated in the bit maps, and so are sectors 
+   off the end of the partition.
+
+   Band 0 is sectors 0-3fff, its map is in sectors 18-1b.
+   Band 1 is 4000-7fff, its map is in 7ffc-7fff.
+   Band 2 is 8000-ffff, its map is in 8000-8003.
+   The remaining bands have maps in their first (even) or last (odd) 4 sectors
+     -- if the last, partial, band is odd its map is in its last 4 sectors.
+
+   The bitmap locations are given in a table pointed to by the super block.
+   No doubt they aren't constrained to be at 18, 7ffc, 8000, ...; that is
+   just where they usually are.
+
+   The "directory band" is a bunch of sectors preallocated for dnodes.
+   It has a 4-sector free space bitmap of its own.  Each bit in the map
+   corresponds to one 4-sector dnode, bit 0 of the map corresponding to
+   the first 4 sectors of the directory band.  The entire band is marked
+   allocated in the main bitmap.   The super block gives the locations
+   of the directory band and its bitmap.  ("band" doesn't mean it is
+   8 meg long; it isn't.)  */
+
+
+/* dnode: directory.  4 sectors long */
+
+/* A directory is a tree of dnodes.  The fnode for a directory
+   contains one pointer, to the root dnode of the tree.  The fnode
+   never moves, the dnodes do the B-tree thing, splitting and merging
+   as files are added and removed.  */
+
+#define DNODE_MAGIC   0x77e40aae
+
+struct dnode {
+  unsigned magic;			/* 77e4 0aae */
+  unsigned first_free;			/* offset from start of dnode to
+					   first free dir entry */
+  unsigned root_dnode:1;		/* Is it root dnode? */
+  unsigned increment_me:31;		/* some kind of activity counter?
+					   Neither HPFS.IFS nor CHKDSK cares
+					   if you change this word */
+  secno up;				/* (root dnode) directory's fnode
+					   (nonroot) parent dnode */
+  dnode_secno self;			/* pointer to this dnode */
+  unsigned char dirent[2028];		/* one or more dirents */
+};
+
+struct hpfs_dirent {
+  unsigned short length;		/* offset to next dirent */
+  unsigned first: 1;			/* set on phony ^A^A (".") entry */
+  unsigned has_acl: 1;
+  unsigned down: 1;			/* down pointer present (after name) */
+  unsigned last: 1;			/* set on phony \377 entry */
+  unsigned has_ea: 1;			/* entry has EA */
+  unsigned has_xtd_perm: 1;		/* has extended perm list (???) */
+  unsigned has_explicit_acl: 1;
+  unsigned has_needea: 1;		/* ?? some EA has NEEDEA set
+					   I have no idea why this is
+					   interesting in a dir entry */
+  unsigned read_only: 1;		/* dos attrib */
+  unsigned hidden: 1;			/* dos attrib */
+  unsigned system: 1;			/* dos attrib */
+  unsigned flag11: 1;			/* would be volume label dos attrib */
+  unsigned directory: 1;		/* dos attrib */
+  unsigned archive: 1;			/* dos attrib */
+  unsigned not_8x3: 1;			/* name is not 8.3 */
+  unsigned flag15: 1;
+  fnode_secno fnode;			/* fnode giving allocation info */
+  time32_t write_date;			/* mtime */
+  unsigned file_size;			/* file length, bytes */
+  time32_t read_date;			/* atime */
+  time32_t creation_date;			/* ctime */
+  unsigned ea_size;			/* total EA length, bytes */
+  unsigned char no_of_acls : 3;		/* number of ACL's */
+  unsigned char reserver : 5;
+  unsigned char ix;			/* code page index (of filename), see
+					   struct code_page_data */
+  unsigned char namelen, name[1];	/* file name */
+  /* dnode_secno down;	  btree down pointer, if present,
+     			  follows name on next word boundary, or maybe it
+			  precedes next dirent, which is on a word boundary. */
+};
+
+
+/* B+ tree: allocation info in fnodes and anodes */
+
+/* dnodes point to fnodes which are responsible for listing the sectors
+   assigned to the file.  This is done with trees of (length,address)
+   pairs.  (Actually triples, of (length, file-address, disk-address)
+   which can represent holes.  Find out if HPFS does that.)
+   At any rate, fnodes contain a small tree; if subtrees are needed
+   they occupy essentially a full block in anodes.  A leaf-level tree node
+   has 3-word entries giving sector runs, a non-leaf node has 2-word
+   entries giving subtree pointers.  A flag in the header says which. */
+
+struct bplus_leaf_node
+{
+  unsigned file_secno;			/* first file sector in extent */
+  unsigned length;			/* length, sectors */
+  secno disk_secno;			/* first corresponding disk sector */
+};
+
+struct bplus_internal_node
+{
+  unsigned file_secno;			/* subtree maps sectors < this  */
+  anode_secno down;			/* pointer to subtree */
+};
+
+struct bplus_header
+{
+  unsigned hbff: 1;	/* high bit of first free entry offset */
+  unsigned flag1: 1;
+  unsigned flag2: 1;
+  unsigned flag3: 1;
+  unsigned flag4: 1;
+  unsigned fnode_parent: 1;		/* ? we're pointed to by an fnode,
+					   the data btree or some ea or the
+					   main ea bootage pointer ea_secno */
+					/* also can get set in fnodes, which
+					   may be a chkdsk glitch or may mean
+					   this bit is irrelevant in fnodes,
+					   or this interpretation is all wet */
+  unsigned binary_search: 1;		/* suggest binary search (unused) */
+  unsigned internal: 1;			/* 1 -> (internal) tree of anodes
+					   0 -> (leaf) list of extents */
+  unsigned char fill[3];
+  unsigned char n_free_nodes;		/* free nodes in following array */
+  unsigned char n_used_nodes;		/* used nodes in following array */
+  unsigned short first_free;		/* offset from start of header to
+					   first free node in array */
+  union {
+    struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
+					       subtree pointers */
+    struct bplus_leaf_node external[0];	    /* (external) 3-word entries giving
+					       sector runs */
+  } u;
+};
+
+/* fnode: root of allocation b+ tree, and EA's */
+
+/* Every file and every directory has one fnode, pointed to by the directory
+   entry and pointing to the file's sectors or directory's root dnode.  EA's
+   are also stored here, and there are said to be ACL's somewhere here too. */
+
+#define FNODE_MAGIC 0xf7e40aae
+
+struct fnode
+{
+  unsigned magic;			/* f7e4 0aae */
+  unsigned zero1[2];			/* read history */
+  unsigned char len, name[15];		/* true length, truncated name */
+  fnode_secno up;			/* pointer to file's directory fnode */
+  /*unsigned zero2[3];*/
+  secno acl_size_l;
+  secno acl_secno;
+  unsigned short acl_size_s;
+  char acl_anode;
+  char zero2;				/* history bit count */
+  unsigned ea_size_l;			/* length of disk-resident ea's */
+  secno ea_secno;			/* first sector of disk-resident ea's*/
+  unsigned short ea_size_s;		/* length of fnode-resident ea's */
+
+  unsigned flag0: 1;
+  unsigned ea_anode: 1;			/* 1 -> ea_secno is an anode */
+  unsigned flag2: 1;
+  unsigned flag3: 1;
+  unsigned flag4: 1;
+  unsigned flag5: 1;
+  unsigned flag6: 1;
+  unsigned flag7: 1;
+  unsigned dirflag: 1;			/* 1 -> directory.  first & only extent
+					   points to dnode. */
+  unsigned flag9: 1;
+  unsigned flag10: 1;
+  unsigned flag11: 1;
+  unsigned flag12: 1;
+  unsigned flag13: 1;
+  unsigned flag14: 1;
+  unsigned flag15: 1;
+
+  struct bplus_header btree;		/* b+ tree, 8 extents or 12 subtrees */
+  union {
+    struct bplus_leaf_node external[8];
+    struct bplus_internal_node internal[12];
+  } u;
+
+  unsigned file_size;			/* file length, bytes */
+  unsigned n_needea;			/* number of EA's with NEEDEA set */
+  char user_id[16];			/* unused */
+  unsigned short ea_offs;		/* offset from start of fnode
+					   to first fnode-resident ea */
+  char dasd_limit_treshhold;
+  char dasd_limit_delta;
+  unsigned dasd_limit;
+  unsigned dasd_usage;
+  /*unsigned zero5[2];*/
+  unsigned char ea[316];		/* zero or more EA's, packed together
+					   with no alignment padding.
+					   (Do not use this name, get here
+					   via fnode + ea_offs. I think.) */
+};
+
+
+/* anode: 99.44% pure allocation tree */
+
+#define ANODE_MAGIC 0x37e40aae
+
+struct anode
+{
+  unsigned magic;			/* 37e4 0aae */
+  anode_secno self;			/* pointer to this anode */
+  secno up;				/* parent anode or fnode */
+
+  struct bplus_header btree;		/* b+tree, 40 extents or 60 subtrees */
+  union {
+    struct bplus_leaf_node external[40];
+    struct bplus_internal_node internal[60];
+  } u;
+
+  unsigned fill[3];			/* unused */
+};
+
+
+/* extended attributes.
+
+   A file's EA info is stored as a list of (name,value) pairs.  It is
+   usually in the fnode, but (if it's large) it is moved to a single
+   sector run outside the fnode, or to multiple runs with an anode tree
+   that points to them.
+
+   The value of a single EA is stored along with the name, or (if large)
+   it is moved to a single sector run, or multiple runs pointed to by an
+   anode tree, pointed to by the value field of the (name,value) pair.
+
+   Flags in the EA tell whether the value is immediate, in a single sector
+   run, or in multiple runs.  Flags in the fnode tell whether the EA list
+   is immediate, in a single run, or in multiple runs. */
+
+struct extended_attribute
+{
+  unsigned indirect: 1;			/* 1 -> value gives sector number
+					   where real value starts */
+  unsigned anode: 1;			/* 1 -> sector is an anode
+					   that points to fragmented value */
+  unsigned flag2: 1;
+  unsigned flag3: 1;
+  unsigned flag4: 1;
+  unsigned flag5: 1;
+  unsigned flag6: 1;
+  unsigned needea: 1;			/* required ea */
+  unsigned char namelen;		/* length of name, bytes */
+  unsigned short valuelen;		/* length of value, bytes */
+  unsigned char name[0];
+  /*
+    unsigned char name[namelen];	ascii attrib name
+    unsigned char nul;			terminating '\0', not counted
+    unsigned char value[valuelen];	value, arbitrary
+      if this.indirect, valuelen is 8 and the value is
+        unsigned length;		real length of value, bytes
+        secno secno;			sector address where it starts
+      if this.anode, the above sector number is the root of an anode tree
+        which points to the value.
+  */
+};
+
+/*
+   Local Variables:
+   comment-column: 40
+   End:
+*/
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
new file mode 100644
index 0000000..6628c3b
--- /dev/null
+++ b/fs/hpfs/hpfs_fn.h
@@ -0,0 +1,338 @@
+/*
+ *  linux/fs/hpfs/hpfs_fn.h
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  function headers
+ */
+
+//#define DBG
+//#define DEBUG_LOCKS
+
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/hpfs_fs.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+
+#include "hpfs.h"
+
+#define EIOERROR  EIO
+#define EFSERROR  EPERM
+#define EMEMERROR ENOMEM
+
+#define ANODE_ALLOC_FWD	512
+#define FNODE_ALLOC_FWD	0
+#define ALLOC_FWD_MIN	16
+#define ALLOC_FWD_MAX	128
+#define ALLOC_M		1
+#define FNODE_RD_AHEAD	16
+#define ANODE_RD_AHEAD	16
+#define DNODE_RD_AHEAD	4
+
+#define FREE_DNODES_ADD	58
+#define FREE_DNODES_DEL	29
+
+#define CHKCOND(x,y) if (!(x)) printk y
+
+#ifdef DBG
+#define PRINTK(x) printk x
+#else
+#undef PRINTK
+#define PRINTK(x)
+#endif
+
+struct hpfs_inode_info {
+	loff_t mmu_private;
+	ino_t i_parent_dir;	/* (directories) gives fnode of parent dir */
+	unsigned i_dno;		/* (directories) root dnode */
+	unsigned i_dpos;	/* (directories) temp for readdir */
+	unsigned i_dsubdno;	/* (directories) temp for readdir */
+	unsigned i_file_sec;	/* (files) minimalist cache of alloc info */
+	unsigned i_disk_sec;	/* (files) minimalist cache of alloc info */
+	unsigned i_n_secs;	/* (files) minimalist cache of alloc info */
+	unsigned i_ea_size;	/* size of extended attributes */
+	unsigned i_conv : 2;	/* (files) crlf->newline hackery */
+	unsigned i_ea_mode : 1;	/* file's permission is stored in ea */
+	unsigned i_ea_uid : 1;	/* file's uid is stored in ea */
+	unsigned i_ea_gid : 1;	/* file's gid is stored in ea */
+	unsigned i_dirty : 1;
+	struct semaphore i_sem;
+	struct semaphore i_parent;
+	loff_t **i_rddir_off;
+	struct inode vfs_inode;
+};
+
+struct hpfs_sb_info {
+	ino_t sb_root;			/* inode number of root dir */
+	unsigned sb_fs_size;		/* file system size, sectors */
+	unsigned sb_bitmaps;		/* sector number of bitmap list */
+	unsigned sb_dirband_start;	/* directory band start sector */
+	unsigned sb_dirband_size;	/* directory band size, dnodes */
+	unsigned sb_dmap;		/* sector number of dnode bit map */
+	unsigned sb_n_free;		/* free blocks for statfs, or -1 */
+	unsigned sb_n_free_dnodes;	/* free dnodes for statfs, or -1 */
+	uid_t sb_uid;			/* uid from mount options */
+	gid_t sb_gid;			/* gid from mount options */
+	umode_t sb_mode;		/* mode from mount options */
+	unsigned sb_conv : 2;		/* crlf->newline hackery */
+	unsigned sb_eas : 2;		/* eas: 0-ignore, 1-ro, 2-rw */
+	unsigned sb_err : 2;		/* on errs: 0-cont, 1-ro, 2-panic */
+	unsigned sb_chk : 2;		/* checks: 0-no, 1-normal, 2-strict */
+	unsigned sb_lowercase : 1;	/* downcase filenames hackery */
+	unsigned sb_was_error : 1;	/* there was an error, set dirty flag */
+	unsigned sb_chkdsk : 2;		/* chkdsk: 0-no, 1-on errs, 2-allways */
+	unsigned char *sb_cp_table;	/* code page tables: */
+					/* 	128 bytes uppercasing table & */
+					/*	128 bytes lowercasing table */
+	unsigned *sb_bmp_dir;		/* main bitmap directory */
+	unsigned sb_c_bitmap;		/* current bitmap */
+	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
+	struct semaphore hpfs_creation_de; /* when creating dirents, nobody else
+					   can alloc blocks */
+	/*unsigned sb_mounting : 1;*/
+	int sb_timeshift;
+};
+
+/*
+ * conv= options
+ */
+
+#define CONV_BINARY 0			/* no conversion */
+#define CONV_TEXT 1			/* crlf->newline */
+#define CONV_AUTO 2			/* decide based on file contents */
+
+/* Four 512-byte buffers and the 2k block obtained by concatenating them */
+
+struct quad_buffer_head {
+	struct buffer_head *bh[4];
+	void *data;
+};
+
+/* The b-tree down pointer from a dir entry */
+
+static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
+{
+  CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
+  return *(dnode_secno *) ((void *) de + de->length - 4);
+}
+
+/* The first dir entry in a dnode */
+
+static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode)
+{
+  return (void *) dnode->dirent;
+}
+
+/* The end+1 of the dir entries */
+
+static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode)
+{
+  CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free));
+  return (void *) dnode + dnode->first_free;
+}
+
+/* The dir entry after dir entry de */
+
+static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de)
+{
+  CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length));
+  return (void *) de + de->length;
+}
+
+static inline struct extended_attribute *fnode_ea(struct fnode *fnode)
+{
+	return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s);
+}
+
+static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode)
+{
+	return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s);
+}
+
+static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
+{
+	return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen);
+}
+
+static inline secno ea_sec(struct extended_attribute *ea)
+{
+	return *(secno *)((char *)ea + 9 + ea->namelen);
+}
+
+static inline secno ea_len(struct extended_attribute *ea)
+{
+	return *(secno *)((char *)ea + 5 + ea->namelen);
+}
+
+static inline char *ea_data(struct extended_attribute *ea)
+{
+	return (char *)((char *)ea + 5 + ea->namelen);
+}
+
+static inline unsigned de_size(int namelen, secno down_ptr)
+{
+	return ((0x1f + namelen + 3) & ~3) + (down_ptr ? 4 : 0);
+}
+
+static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
+{
+	int a;
+	int n;
+	if (!dst || !src) return;
+	a = dst->down;
+	n = dst->not_8x3;
+	memcpy((char *)dst + 2, (char *)src + 2, 28);
+	dst->down = a;
+	dst->not_8x3 = n;
+}
+
+static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n)
+{
+	int i;
+	if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
+	if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1;
+	for (i = 1; i < n; i++)
+		if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1))
+			return i + 1;
+	return 0;
+}
+
+/* alloc.c */
+
+int hpfs_chk_sectors(struct super_block *, secno, int, char *);
+secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int);
+int hpfs_alloc_if_possible(struct super_block *, secno);
+void hpfs_free_sectors(struct super_block *, secno, unsigned);
+int hpfs_check_free_dnodes(struct super_block *, int);
+void hpfs_free_dnode(struct super_block *, secno);
+struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int);
+struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
+struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
+
+/* anode.c */
+
+secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_header *, unsigned, struct buffer_head *);
+secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned);
+void hpfs_remove_btree(struct super_block *, struct bplus_header *);
+int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *);
+int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *);
+void hpfs_ea_remove(struct super_block *, secno, int, unsigned);
+void hpfs_truncate_btree(struct super_block *, secno, int, unsigned);
+void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
+
+/* buffer.c */
+
+void hpfs_lock_creation(struct super_block *);
+void hpfs_unlock_creation(struct super_block *);
+void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
+void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
+void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int);
+void *hpfs_get_4sectors(struct super_block *, unsigned, struct quad_buffer_head *);
+void hpfs_brelse4(struct quad_buffer_head *);
+void hpfs_mark_4buffers_dirty(struct quad_buffer_head *);
+
+/* dentry.c */
+
+void hpfs_set_dentry_operations(struct dentry *);
+
+/* dir.c */
+
+struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct file_operations hpfs_dir_ops;
+
+/* dnode.c */
+
+void hpfs_add_pos(struct inode *, loff_t *);
+void hpfs_del_pos(struct inode *, loff_t *);
+struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno);
+int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int);
+int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
+void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
+dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
+struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *);
+struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *);
+void hpfs_remove_dtree(struct super_block *, dnode_secno);
+struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *);
+
+/* ea.c */
+
+void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned);
+int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int);
+char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *);
+void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int);
+
+/* file.c */
+
+int hpfs_file_fsync(struct file *, struct dentry *, int);
+extern struct file_operations hpfs_file_ops;
+extern struct inode_operations hpfs_file_iops;
+extern struct address_space_operations hpfs_aops;
+
+/* inode.c */
+
+void hpfs_init_inode(struct inode *);
+void hpfs_read_inode(struct inode *);
+void hpfs_write_inode(struct inode *);
+void hpfs_write_inode_nolock(struct inode *);
+int hpfs_notify_change(struct dentry *, struct iattr *);
+void hpfs_write_if_changed(struct inode *);
+void hpfs_delete_inode(struct inode *);
+
+/* map.c */
+
+unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
+unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
+char *hpfs_load_code_page(struct super_block *, secno);
+secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
+struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
+struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
+dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino);
+
+/* name.c */
+
+unsigned char hpfs_upcase(unsigned char *, unsigned char);
+int hpfs_chk_name(unsigned char *, unsigned *);
+char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
+int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int);
+int hpfs_is_name_long(unsigned char *, unsigned);
+void hpfs_adjust_length(unsigned char *, unsigned *);
+void hpfs_decide_conv(struct inode *, unsigned char *, unsigned);
+
+/* namei.c */
+
+extern struct inode_operations hpfs_dir_iops;
+extern struct address_space_operations hpfs_symlink_aops;
+
+static inline struct hpfs_inode_info *hpfs_i(struct inode *inode)
+{
+	return list_entry(inode, struct hpfs_inode_info, vfs_inode);
+}
+
+static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/* super.c */
+
+void hpfs_error(struct super_block *, char *, ...);
+int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
+unsigned hpfs_count_one_bitmap(struct super_block *, secno);
+
+/*
+ * local time (HPFS) to GMT (Unix)
+ */
+
+static inline time_t local_to_gmt(struct super_block *s, time32_t t)
+{
+	extern struct timezone sys_tz;
+	return t + sys_tz.tz_minuteswest * 60 + hpfs_sb(s)->sb_timeshift;
+}
+
+static inline time32_t gmt_to_local(struct super_block *s, time_t t)
+{
+	extern struct timezone sys_tz;
+	return t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
+}
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
new file mode 100644
index 0000000..38b1741
--- /dev/null
+++ b/fs/hpfs/inode.c
@@ -0,0 +1,291 @@
+/*
+ *  linux/fs/hpfs/inode.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  inode VFS functions
+ */
+
+#include "hpfs_fn.h"
+
+void hpfs_init_inode(struct inode *i)
+{
+	struct super_block *sb = i->i_sb;
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+
+	i->i_uid = hpfs_sb(sb)->sb_uid;
+	i->i_gid = hpfs_sb(sb)->sb_gid;
+	i->i_mode = hpfs_sb(sb)->sb_mode;
+	hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
+	i->i_blksize = 512;
+	i->i_size = -1;
+	i->i_blocks = -1;
+	
+	hpfs_inode->i_dno = 0;
+	hpfs_inode->i_n_secs = 0;
+	hpfs_inode->i_file_sec = 0;
+	hpfs_inode->i_disk_sec = 0;
+	hpfs_inode->i_dpos = 0;
+	hpfs_inode->i_dsubdno = 0;
+	hpfs_inode->i_ea_mode = 0;
+	hpfs_inode->i_ea_uid = 0;
+	hpfs_inode->i_ea_gid = 0;
+	hpfs_inode->i_ea_size = 0;
+
+	hpfs_inode->i_rddir_off = NULL;
+	hpfs_inode->i_dirty = 0;
+
+	i->i_ctime.tv_sec = i->i_ctime.tv_nsec = 0;
+	i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
+	i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
+}
+
+void hpfs_read_inode(struct inode *i)
+{
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	struct super_block *sb = i->i_sb;
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	unsigned char *ea;
+	int ea_size;
+
+	if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
+		/*i->i_mode |= S_IFREG;
+		i->i_mode &= ~0111;
+		i->i_op = &hpfs_file_iops;
+		i->i_fop = &hpfs_file_ops;
+		i->i_nlink = 0;*/
+		make_bad_inode(i);
+		return;
+	}
+	if (hpfs_sb(i->i_sb)->sb_eas) {
+		if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) {
+			if (ea_size == 2) {
+				i->i_uid = le16_to_cpu(*(u16*)ea);
+				hpfs_inode->i_ea_uid = 1;
+			}
+			kfree(ea);
+		}
+		if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) {
+			if (ea_size == 2) {
+				i->i_gid = le16_to_cpu(*(u16*)ea);
+				hpfs_inode->i_ea_gid = 1;
+			}
+			kfree(ea);
+		}
+		if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) {
+			kfree(ea);
+			i->i_mode = S_IFLNK | 0777;
+			i->i_op = &page_symlink_inode_operations;
+			i->i_data.a_ops = &hpfs_symlink_aops;
+			i->i_nlink = 1;
+			i->i_size = ea_size;
+			i->i_blocks = 1;
+			brelse(bh);
+			return;
+		}
+		if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) {
+			int rdev = 0;
+			umode_t mode = hpfs_sb(sb)->sb_mode;
+			if (ea_size == 2) {
+				mode = le16_to_cpu(*(u16*)ea);
+				hpfs_inode->i_ea_mode = 1;
+			}
+			kfree(ea);
+			i->i_mode = mode;
+			if (S_ISBLK(mode) || S_ISCHR(mode)) {
+				if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) {
+					if (ea_size == 4)
+						rdev = le32_to_cpu(*(u32*)ea);
+					kfree(ea);
+				}
+			}
+			if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
+				brelse(bh);
+				i->i_nlink = 1;
+				i->i_size = 0;
+				i->i_blocks = 1;
+				init_special_inode(i, mode,
+					new_decode_dev(rdev));
+				return;
+			}
+		}
+	}
+	if (fnode->dirflag) {
+		unsigned n_dnodes, n_subdirs;
+		i->i_mode |= S_IFDIR;
+		i->i_op = &hpfs_dir_iops;
+		i->i_fop = &hpfs_dir_ops;
+		hpfs_inode->i_parent_dir = fnode->up;
+		hpfs_inode->i_dno = fnode->u.external[0].disk_secno;
+		if (hpfs_sb(sb)->sb_chk >= 2) {
+			struct buffer_head *bh0;
+			if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
+		}
+		n_dnodes = 0; n_subdirs = 0;
+		hpfs_count_dnodes(i->i_sb, hpfs_inode->i_dno, &n_dnodes, &n_subdirs, NULL);
+		i->i_blocks = 4 * n_dnodes;
+		i->i_size = 2048 * n_dnodes;
+		i->i_nlink = 2 + n_subdirs;
+	} else {
+		i->i_mode |= S_IFREG;
+		if (!hpfs_inode->i_ea_mode) i->i_mode &= ~0111;
+		i->i_op = &hpfs_file_iops;
+		i->i_fop = &hpfs_file_ops;
+		i->i_nlink = 1;
+		i->i_size = fnode->file_size;
+		i->i_blocks = ((i->i_size + 511) >> 9) + 1;
+		i->i_data.a_ops = &hpfs_aops;
+		hpfs_i(i)->mmu_private = i->i_size;
+	}
+	brelse(bh);
+}
+
+static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	/*if (fnode->acl_size_l || fnode->acl_size_s) {
+		   Some unknown structures like ACL may be in fnode,
+		   we'd better not overwrite them
+		hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
+	} else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) {
+		u32 ea;
+		if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
+			ea = cpu_to_le32(i->i_uid);
+			hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2);
+			hpfs_inode->i_ea_uid = 1;
+		}
+		if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) {
+			ea = cpu_to_le32(i->i_gid);
+			hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2);
+			hpfs_inode->i_ea_gid = 1;
+		}
+		if (!S_ISLNK(i->i_mode))
+			if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111))
+			  | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))
+			  && i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333))
+			  | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) {
+				ea = cpu_to_le32(i->i_mode);
+				hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2);
+				hpfs_inode->i_ea_mode = 1;
+			}
+		if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) {
+			ea = cpu_to_le32(new_encode_dev(i->i_rdev));
+			hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4);
+		}
+	}
+}
+
+void hpfs_write_inode(struct inode *i)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	struct inode *parent;
+	if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
+	if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) {
+		if (*hpfs_inode->i_rddir_off) printk("HPFS: write_inode: some position still there\n");
+		kfree(hpfs_inode->i_rddir_off);
+		hpfs_inode->i_rddir_off = NULL;
+	}
+	down(&hpfs_inode->i_parent);
+	if (!i->i_nlink) {
+		up(&hpfs_inode->i_parent);
+		return;
+	}
+	parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
+	if (parent) {
+		hpfs_inode->i_dirty = 0;
+		if (parent->i_state & I_NEW) {
+			hpfs_init_inode(parent);
+			hpfs_read_inode(parent);
+			unlock_new_inode(parent);
+		}
+		down(&hpfs_inode->i_sem);
+		hpfs_write_inode_nolock(i);
+		up(&hpfs_inode->i_sem);
+		iput(parent);
+	} else {
+		mark_inode_dirty(i);
+	}
+	up(&hpfs_inode->i_parent);
+}
+
+void hpfs_write_inode_nolock(struct inode *i)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
+	if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) return;
+	if (i->i_ino != hpfs_sb(i->i_sb)->sb_root && i->i_nlink) {
+		if (!(de = map_fnode_dirent(i->i_sb, i->i_ino, fnode, &qbh))) {
+			brelse(bh);
+			return;
+		}
+	} else de = NULL;
+	if (S_ISREG(i->i_mode)) {
+		fnode->file_size = i->i_size;
+		if (de) de->file_size = i->i_size;
+	} else if (S_ISDIR(i->i_mode)) {
+		fnode->file_size = 0;
+		if (de) de->file_size = 0;
+	}
+	hpfs_write_inode_ea(i, fnode);
+	if (de) {
+		de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
+		de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
+		de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
+		de->read_only = !(i->i_mode & 0222);
+		de->ea_size = hpfs_inode->i_ea_size;
+		hpfs_mark_4buffers_dirty(&qbh);
+		hpfs_brelse4(&qbh);
+	}
+	if (S_ISDIR(i->i_mode)) {
+		if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
+			de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
+			de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
+			de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
+			de->read_only = !(i->i_mode & 0222);
+			de->ea_size = /*hpfs_inode->i_ea_size*/0;
+			de->file_size = 0;
+			hpfs_mark_4buffers_dirty(&qbh);
+			hpfs_brelse4(&qbh);
+		} else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino);
+	}
+	mark_buffer_dirty(bh);
+	brelse(bh);
+}
+
+int hpfs_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error=0;
+	lock_kernel();
+	if ( ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) ||
+	     (hpfs_sb(inode->i_sb)->sb_root == inode->i_ino) ) {
+		error = -EINVAL;
+	} else if ((error = inode_change_ok(inode, attr))) {
+	} else if ((error = inode_setattr(inode, attr))) {
+	} else {
+		hpfs_write_inode(inode);
+	}
+	unlock_kernel();
+	return error;
+}
+
+void hpfs_write_if_changed(struct inode *inode)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+
+	if (hpfs_inode->i_dirty)
+		hpfs_write_inode(inode);
+}
+
+void hpfs_delete_inode(struct inode *inode)
+{
+	lock_kernel();
+	hpfs_remove_fnode(inode->i_sb, inode->i_ino);
+	unlock_kernel();
+	clear_inode(inode);
+}
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
new file mode 100644
index 0000000..0fecdac
--- /dev/null
+++ b/fs/hpfs/map.c
@@ -0,0 +1,275 @@
+/*
+ *  linux/fs/hpfs/map.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  mapping structures to memory with some minimal checks
+ */
+
+#include "hpfs_fn.h"
+
+unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
+{
+	return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
+}
+
+unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+			 struct quad_buffer_head *qbh, char *id)
+{
+	secno sec;
+	if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
+		hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
+		return NULL;
+	}
+	sec = hpfs_sb(s)->sb_bmp_dir[bmp_block];
+	if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
+		hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
+		return NULL;
+	}
+	return hpfs_map_4sectors(s, sec, qbh, 4);
+}
+
+/*
+ * Load first code page into kernel memory, return pointer to 256-byte array,
+ * first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are
+ * lowercasing table
+ */
+
+char *hpfs_load_code_page(struct super_block *s, secno cps)
+{
+	struct buffer_head *bh;
+	secno cpds;
+	unsigned cpi;
+	unsigned char *ptr;
+	unsigned char *cp_table;
+	int i;
+	struct code_page_data *cpd;
+	struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
+	if (!cp) return NULL;
+	if (cp->magic != CP_DIR_MAGIC) {
+		printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic);
+		brelse(bh);
+		return NULL;
+	}
+	if (!cp->n_code_pages) {
+		printk("HPFS: n_code_pages == 0\n");
+		brelse(bh);
+		return NULL;
+	}
+	cpds = cp->array[0].code_page_data;
+	cpi = cp->array[0].index;
+	brelse(bh);
+
+	if (cpi >= 3) {
+		printk("HPFS: Code page index out of array\n");
+		return NULL;
+	}
+	
+	if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
+	if ((unsigned)cpd->offs[cpi] > 0x178) {
+		printk("HPFS: Code page index out of sector\n");
+		brelse(bh);
+		return NULL;
+	}
+	ptr = (char *)cpd + cpd->offs[cpi] + 6;
+	if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
+		printk("HPFS: out of memory for code page table\n");
+		brelse(bh);
+		return NULL;
+	}
+	memcpy(cp_table, ptr, 128);
+	brelse(bh);
+
+	/* Try to build lowercasing table from uppercasing one */
+
+	for (i=128; i<256; i++) cp_table[i]=i;
+	for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128)
+		cp_table[cp_table[i-128]] = i;
+	
+	return cp_table;
+}
+
+secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
+{
+	struct buffer_head *bh;
+	int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
+	int i;
+	secno *b;
+	if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
+		printk("HPFS: can't allocate memory for bitmap directory\n");
+		return NULL;
+	}	
+	for (i=0;i<n;i++) {
+		secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
+		if (!d) {
+			kfree(b);
+			return NULL;
+		}
+		memcpy((char *)b + 512 * i, d, 512);
+		brelse(bh);
+	}
+	return b;
+}
+
+/*
+ * Load fnode to memory
+ */
+
+struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp)
+{
+	struct fnode *fnode;
+	if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) {
+		return NULL;
+	}
+	if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) {
+		if (hpfs_sb(s)->sb_chk) {
+			struct extended_attribute *ea;
+			struct extended_attribute *ea_end;
+			if (fnode->magic != FNODE_MAGIC) {
+				hpfs_error(s, "bad magic on fnode %08x", ino);
+				goto bail;
+			}
+			if (!fnode->dirflag) {
+				if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
+				    (fnode->btree.internal ? 12 : 8)) {
+					hpfs_error(s, "bad number of nodes in fnode %08x", ino);
+					goto bail;
+				}
+				if (fnode->btree.first_free !=
+				    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
+					hpfs_error(s, "bad first_free pointer in fnode %08x", ino);
+					goto bail;
+				}
+			}
+			if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
+			   (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
+				hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x",
+					ino, fnode->ea_offs, fnode->ea_size_s);
+				goto bail;
+			}
+			ea = fnode_ea(fnode);
+			ea_end = fnode_end_ea(fnode);
+			while (ea != ea_end) {
+				if (ea > ea_end) {
+					hpfs_error(s, "bad EA in fnode %08x", ino);
+					goto bail;
+				}
+				ea = next_ea(ea);
+			}
+		}
+	}
+	return fnode;
+	bail:
+	brelse(*bhp);
+	return NULL;
+}
+
+struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp)
+{
+	struct anode *anode;
+	if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
+	if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
+		if (hpfs_sb(s)->sb_chk) {
+			if (anode->magic != ANODE_MAGIC || anode->self != ano) {
+				hpfs_error(s, "bad magic on anode %08x", ano);
+				goto bail;
+			}
+			if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
+			    (anode->btree.internal ? 60 : 40)) {
+				hpfs_error(s, "bad number of nodes in anode %08x", ano);
+				goto bail;
+			}
+			if (anode->btree.first_free !=
+			    8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
+				hpfs_error(s, "bad first_free pointer in anode %08x", ano);
+				goto bail;
+			}
+		}
+	return anode;
+	bail:
+	brelse(*bhp);
+	return NULL;
+}
+
+/*
+ * Load dnode to memory and do some checks
+ */
+
+struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
+			     struct quad_buffer_head *qbh)
+{
+	struct dnode *dnode;
+	if (hpfs_sb(s)->sb_chk) {
+		if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL;
+		if (secno & 3) {
+			hpfs_error(s, "dnode %08x not byte-aligned", secno);
+			return NULL;
+		}	
+	}
+	if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
+		if (hpfs_sb(s)->sb_chk) {
+			unsigned p, pp = 0;
+			unsigned char *d = (char *)dnode;
+			int b = 0;
+			if (dnode->magic != DNODE_MAGIC) {
+				hpfs_error(s, "bad magic on dnode %08x", secno);
+				goto bail;
+			}
+			if (dnode->self != secno)
+				hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self);
+			/* Check dirents - bad dirents would cause infinite
+			   loops or shooting to memory */
+			if (dnode->first_free > 2048/* || dnode->first_free < 84*/) {
+				hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free);
+				goto bail;
+			}
+			for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) {
+				struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
+				if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) {
+					hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
+					goto bail;
+				}
+				if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) {
+					if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok;
+					hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
+					goto bail;
+				}
+				ok:
+				if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down;
+				if (de->down) if (de_down_pointer(de) < 0x10) {
+					hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
+					goto bail;
+				}
+				pp = p;
+				
+			}
+			if (p != dnode->first_free) {
+				hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
+				goto bail;
+			}
+			if (d[pp + 30] != 1 || d[pp + 31] != 255) {
+				hpfs_error(s, "dnode %08x does not end with \\377 entry", secno);
+				goto bail;
+			}
+			if (b == 3) printk("HPFS: warning: unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n", secno);
+		}
+	return dnode;
+	bail:
+	hpfs_brelse4(qbh);
+	return NULL;
+}
+
+dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
+{
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	dnode_secno dno;
+
+	fnode = hpfs_map_fnode(s, ino, &bh);
+	if (!fnode)
+		return 0;
+
+	dno = fnode->u.external[0].disk_secno;
+	brelse(bh);
+	return dno;
+}
diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c
new file mode 100644
index 0000000..1f4a964
--- /dev/null
+++ b/fs/hpfs/name.c
@@ -0,0 +1,144 @@
+/*
+ *  linux/fs/hpfs/name.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  operations with filenames
+ */
+
+#include "hpfs_fn.h"
+
+static char *text_postfix[]={
+".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
+".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
+".RC", ".TEX", ".TXT", ".Y", ""};
+
+static char *text_prefix[]={
+"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
+"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
+
+void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len)
+{
+	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
+	int i;
+	if (hpfs_inode->i_conv != CONV_AUTO) return;
+	for (i = 0; *text_postfix[i]; i++) {
+		int l = strlen(text_postfix[i]);
+		if (l <= len)
+			if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0))
+				goto text;
+	}
+	for (i = 0; *text_prefix[i]; i++) {
+		int l = strlen(text_prefix[i]);
+		if (l <= len)
+			if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0))
+				goto text;
+	}
+	hpfs_inode->i_conv = CONV_BINARY;
+	return;
+	text:
+	hpfs_inode->i_conv = CONV_TEXT;
+	return;
+}
+
+static inline int not_allowed_char(unsigned char c)
+{
+	return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' ||
+	      c=='>' || c=='?' || c=='\\' || c=='|';
+}
+
+static inline int no_dos_char(unsigned char c)
+{	/* Characters that are allowed in HPFS but not in DOS */
+	return c=='+' || c==',' || c==';' || c=='=' || c=='[' || c==']';
+}
+
+static inline unsigned char upcase(unsigned char *dir, unsigned char a)
+{
+	if (a<128 || a==255) return a>='a' && a<='z' ? a - 0x20 : a;
+	if (!dir) return a;
+	return dir[a-128];
+}
+
+unsigned char hpfs_upcase(unsigned char *dir, unsigned char a)
+{
+	return upcase(dir, a);
+}
+
+static inline unsigned char locase(unsigned char *dir, unsigned char a)
+{
+	if (a<128 || a==255) return a>='A' && a<='Z' ? a + 0x20 : a;
+	if (!dir) return a;
+	return dir[a];
+}
+
+int hpfs_chk_name(unsigned char *name, unsigned *len)
+{
+	int i;
+	if (*len > 254) return -ENAMETOOLONG;
+	hpfs_adjust_length(name, len);
+	if (!*len) return -EINVAL;
+	for (i = 0; i < *len; i++) if (not_allowed_char(name[i])) return -EINVAL;
+	if (*len == 1) if (name[0] == '.') return -EINVAL;
+	if (*len == 2) if (name[0] == '.' && name[1] == '.') return -EINVAL;
+	return 0;
+}
+
+char *hpfs_translate_name(struct super_block *s, unsigned char *from,
+			  unsigned len, int lc, int lng)
+{
+	char *to;
+	int i;
+	if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
+		printk("HPFS: Long name flag mismatch - name ");
+		for (i=0; i<len; i++) printk("%c", from[i]);
+		printk(" misidentified as %s.\n", lng ? "short" : "long");
+		printk("HPFS: It's nothing serious. It could happen because of bug in OS/2.\nHPFS: Set checks=normal to disable this message.\n");
+	}
+	if (!lc) return from;
+	if (!(to = kmalloc(len, GFP_KERNEL))) {
+		printk("HPFS: can't allocate memory for name conversion buffer\n");
+		return from;
+	}
+	for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]);
+	return to;
+}
+
+int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
+		       unsigned char *n2, unsigned l2, int last)
+{
+	unsigned l = l1 < l2 ? l1 : l2;
+	unsigned i;
+	if (last) return -1;
+	for (i = 0; i < l; i++) {
+		unsigned char c1 = upcase(hpfs_sb(s)->sb_cp_table,n1[i]);
+		unsigned char c2 = upcase(hpfs_sb(s)->sb_cp_table,n2[i]);
+		if (c1 < c2) return -1;
+		if (c1 > c2) return 1;
+	}
+	if (l1 < l2) return -1;
+	if (l1 > l2) return 1;
+	return 0;
+}
+
+int hpfs_is_name_long(unsigned char *name, unsigned len)
+{
+	int i,j;
+	for (i = 0; i < len && name[i] != '.'; i++)
+		if (no_dos_char(name[i])) return 1;
+	if (!i || i > 8) return 1;
+	if (i == len) return 0;
+	for (j = i + 1; j < len; j++)
+		if (name[j] == '.' || no_dos_char(name[i])) return 1;
+	return j - i > 4;
+}
+
+/* OS/2 clears dots and spaces at the end of file name, so we have to */
+
+void hpfs_adjust_length(unsigned char *name, unsigned *len)
+{
+	if (!*len) return;
+	if (*len == 1 && name[0] == '.') return;
+	if (*len == 2 && name[0] == '.' && name[1] == '.') return;
+	while (*len && (name[*len - 1] == '.' || name[*len - 1] == ' '))
+		(*len)--;
+}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
new file mode 100644
index 0000000..8ff8fc4
--- /dev/null
+++ b/fs/hpfs/namei.c
@@ -0,0 +1,673 @@
+/*
+ *  linux/fs/hpfs/namei.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  adding & removing files & directories
+ */
+
+#include "hpfs_fn.h"
+
+static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct quad_buffer_head qbh0;
+	struct buffer_head *bh;
+	struct hpfs_dirent *de;
+	struct fnode *fnode;
+	struct dnode *dnode;
+	struct inode *result;
+	fnode_secno fno;
+	dnode_secno dno;
+	int r;
+	struct hpfs_dirent dee;
+	int err;
+	if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+	lock_kernel();
+	err = -ENOSPC;
+	fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
+	if (!fnode)
+		goto bail;
+	dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1);
+	if (!dnode)
+		goto bail1;
+	memset(&dee, 0, sizeof dee);
+	dee.directory = 1;
+	if (!(mode & 0222)) dee.read_only = 1;
+	/*dee.archive = 0;*/
+	dee.hidden = name[0] == '.';
+	dee.fnode = fno;
+	dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+	result = new_inode(dir->i_sb);
+	if (!result)
+		goto bail2;
+	hpfs_init_inode(result);
+	result->i_ino = fno;
+	hpfs_i(result)->i_parent_dir = dir->i_ino;
+	hpfs_i(result)->i_dno = dno;
+	result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+	result->i_ctime.tv_nsec = 0; 
+	result->i_mtime.tv_nsec = 0; 
+	result->i_atime.tv_nsec = 0; 
+	hpfs_i(result)->i_ea_size = 0;
+	result->i_mode |= S_IFDIR;
+	result->i_op = &hpfs_dir_iops;
+	result->i_fop = &hpfs_dir_ops;
+	result->i_blocks = 4;
+	result->i_size = 2048;
+	result->i_nlink = 2;
+	if (dee.read_only)
+		result->i_mode &= ~0222;
+
+	down(&hpfs_i(dir)->i_sem);
+	r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+	if (r == 1)
+		goto bail3;
+	if (r == -1) {
+		err = -EEXIST;
+		goto bail3;
+	}
+	fnode->len = len;
+	memcpy(fnode->name, name, len > 15 ? 15 : len);
+	fnode->up = dir->i_ino;
+	fnode->dirflag = 1;
+	fnode->btree.n_free_nodes = 7;
+	fnode->btree.n_used_nodes = 1;
+	fnode->btree.first_free = 0x14;
+	fnode->u.external[0].disk_secno = dno;
+	fnode->u.external[0].file_secno = -1;
+	dnode->root_dnode = 1;
+	dnode->up = fno;
+	de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0);
+	de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds());
+	if (!(mode & 0222)) de->read_only = 1;
+	de->first = de->directory = 1;
+	/*de->hidden = de->system = 0;*/
+	de->fnode = fno;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	hpfs_mark_4buffers_dirty(&qbh0);
+	hpfs_brelse4(&qbh0);
+	dir->i_nlink++;
+	insert_inode_hash(result);
+
+	if (result->i_uid != current->fsuid ||
+	    result->i_gid != current->fsgid ||
+	    result->i_mode != (mode | S_IFDIR)) {
+		result->i_uid = current->fsuid;
+		result->i_gid = current->fsgid;
+		result->i_mode = mode | S_IFDIR;
+		hpfs_write_inode_nolock(result);
+	}
+	d_instantiate(dentry, result);
+	up(&hpfs_i(dir)->i_sem);
+	unlock_kernel();
+	return 0;
+bail3:
+	up(&hpfs_i(dir)->i_sem);
+	iput(result);
+bail2:
+	hpfs_brelse4(&qbh0);
+	hpfs_free_dnode(dir->i_sb, dno);
+bail1:
+	brelse(bh);
+	hpfs_free_sectors(dir->i_sb, fno, 1);
+bail:
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct inode *result = NULL;
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	fnode_secno fno;
+	int r;
+	struct hpfs_dirent dee;
+	int err;
+	if ((err = hpfs_chk_name((char *)name, &len)))
+		return err==-ENOENT ? -EINVAL : err;
+	lock_kernel();
+	err = -ENOSPC;
+	fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
+	if (!fnode)
+		goto bail;
+	memset(&dee, 0, sizeof dee);
+	if (!(mode & 0222)) dee.read_only = 1;
+	dee.archive = 1;
+	dee.hidden = name[0] == '.';
+	dee.fnode = fno;
+	dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+
+	result = new_inode(dir->i_sb);
+	if (!result)
+		goto bail1;
+	
+	hpfs_init_inode(result);
+	result->i_ino = fno;
+	result->i_mode |= S_IFREG;
+	result->i_mode &= ~0111;
+	result->i_op = &hpfs_file_iops;
+	result->i_fop = &hpfs_file_ops;
+	result->i_nlink = 1;
+	hpfs_decide_conv(result, (char *)name, len);
+	hpfs_i(result)->i_parent_dir = dir->i_ino;
+	result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+	result->i_ctime.tv_nsec = 0;
+	result->i_mtime.tv_nsec = 0;
+	result->i_atime.tv_nsec = 0;
+	hpfs_i(result)->i_ea_size = 0;
+	if (dee.read_only)
+		result->i_mode &= ~0222;
+	result->i_blocks = 1;
+	result->i_size = 0;
+	result->i_data.a_ops = &hpfs_aops;
+	hpfs_i(result)->mmu_private = 0;
+
+	down(&hpfs_i(dir)->i_sem);
+	r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+	if (r == 1)
+		goto bail2;
+	if (r == -1) {
+		err = -EEXIST;
+		goto bail2;
+	}
+	fnode->len = len;
+	memcpy(fnode->name, name, len > 15 ? 15 : len);
+	fnode->up = dir->i_ino;
+	mark_buffer_dirty(bh);
+	brelse(bh);
+
+	insert_inode_hash(result);
+
+	if (result->i_uid != current->fsuid ||
+	    result->i_gid != current->fsgid ||
+	    result->i_mode != (mode | S_IFREG)) {
+		result->i_uid = current->fsuid;
+		result->i_gid = current->fsgid;
+		result->i_mode = mode | S_IFREG;
+		hpfs_write_inode_nolock(result);
+	}
+	d_instantiate(dentry, result);
+	up(&hpfs_i(dir)->i_sem);
+	unlock_kernel();
+	return 0;
+
+bail2:
+	up(&hpfs_i(dir)->i_sem);
+	iput(result);
+bail1:
+	brelse(bh);
+	hpfs_free_sectors(dir->i_sb, fno, 1);
+bail:
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	fnode_secno fno;
+	int r;
+	struct hpfs_dirent dee;
+	struct inode *result = NULL;
+	int err;
+	if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+	if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+	lock_kernel();
+	err = -ENOSPC;
+	fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
+	if (!fnode)
+		goto bail;
+	memset(&dee, 0, sizeof dee);
+	if (!(mode & 0222)) dee.read_only = 1;
+	dee.archive = 1;
+	dee.hidden = name[0] == '.';
+	dee.fnode = fno;
+	dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+
+	result = new_inode(dir->i_sb);
+	if (!result)
+		goto bail1;
+
+	hpfs_init_inode(result);
+	result->i_ino = fno;
+	hpfs_i(result)->i_parent_dir = dir->i_ino;
+	result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+	result->i_ctime.tv_nsec = 0;
+	result->i_mtime.tv_nsec = 0;
+	result->i_atime.tv_nsec = 0;
+	hpfs_i(result)->i_ea_size = 0;
+	result->i_uid = current->fsuid;
+	result->i_gid = current->fsgid;
+	result->i_nlink = 1;
+	result->i_size = 0;
+	result->i_blocks = 1;
+	init_special_inode(result, mode, rdev);
+
+	down(&hpfs_i(dir)->i_sem);
+	r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+	if (r == 1)
+		goto bail2;
+	if (r == -1) {
+		err = -EEXIST;
+		goto bail2;
+	}
+	fnode->len = len;
+	memcpy(fnode->name, name, len > 15 ? 15 : len);
+	fnode->up = dir->i_ino;
+	mark_buffer_dirty(bh);
+
+	insert_inode_hash(result);
+
+	hpfs_write_inode_nolock(result);
+	d_instantiate(dentry, result);
+	up(&hpfs_i(dir)->i_sem);
+	brelse(bh);
+	unlock_kernel();
+	return 0;
+bail2:
+	up(&hpfs_i(dir)->i_sem);
+	iput(result);
+bail1:
+	brelse(bh);
+	hpfs_free_sectors(dir->i_sb, fno, 1);
+bail:
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	fnode_secno fno;
+	int r;
+	struct hpfs_dirent dee;
+	struct inode *result;
+	int err;
+	if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+	lock_kernel();
+	if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
+		unlock_kernel();
+		return -EPERM;
+	}
+	err = -ENOSPC;
+	fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
+	if (!fnode)
+		goto bail;
+	memset(&dee, 0, sizeof dee);
+	dee.archive = 1;
+	dee.hidden = name[0] == '.';
+	dee.fnode = fno;
+	dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+
+	result = new_inode(dir->i_sb);
+	if (!result)
+		goto bail1;
+	result->i_ino = fno;
+	hpfs_init_inode(result);
+	hpfs_i(result)->i_parent_dir = dir->i_ino;
+	result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+	result->i_ctime.tv_nsec = 0;
+	result->i_mtime.tv_nsec = 0;
+	result->i_atime.tv_nsec = 0;
+	hpfs_i(result)->i_ea_size = 0;
+	result->i_mode = S_IFLNK | 0777;
+	result->i_uid = current->fsuid;
+	result->i_gid = current->fsgid;
+	result->i_blocks = 1;
+	result->i_nlink = 1;
+	result->i_size = strlen(symlink);
+	result->i_op = &page_symlink_inode_operations;
+	result->i_data.a_ops = &hpfs_symlink_aops;
+
+	down(&hpfs_i(dir)->i_sem);
+	r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+	if (r == 1)
+		goto bail2;
+	if (r == -1) {
+		err = -EEXIST;
+		goto bail2;
+	}
+	fnode->len = len;
+	memcpy(fnode->name, name, len > 15 ? 15 : len);
+	fnode->up = dir->i_ino;
+	hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink));
+	mark_buffer_dirty(bh);
+	brelse(bh);
+
+	insert_inode_hash(result);
+
+	hpfs_write_inode_nolock(result);
+	d_instantiate(dentry, result);
+	up(&hpfs_i(dir)->i_sem);
+	unlock_kernel();
+	return 0;
+bail2:
+	up(&hpfs_i(dir)->i_sem);
+	iput(result);
+bail1:
+	brelse(bh);
+	hpfs_free_sectors(dir->i_sb, fno, 1);
+bail:
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	struct inode *inode = dentry->d_inode;
+	dnode_secno dno;
+	fnode_secno fno;
+	int r;
+	int rep = 0;
+	int err;
+
+	lock_kernel();
+	hpfs_adjust_length((char *)name, &len);
+again:
+	down(&hpfs_i(inode)->i_parent);
+	down(&hpfs_i(dir)->i_sem);
+	err = -ENOENT;
+	de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+	if (!de)
+		goto out;
+
+	err = -EPERM;
+	if (de->first)
+		goto out1;
+
+	err = -EISDIR;
+	if (de->directory)
+		goto out1;
+
+	fno = de->fnode;
+	r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
+	switch (r) {
+	case 1:
+		hpfs_error(dir->i_sb, "there was error when removing dirent");
+		err = -EFSERROR;
+		break;
+	case 2:		/* no space for deleting, try to truncate file */
+
+		err = -ENOSPC;
+		if (rep++)
+			break;
+
+		up(&hpfs_i(dir)->i_sem);
+		up(&hpfs_i(inode)->i_parent);
+		d_drop(dentry);
+		spin_lock(&dentry->d_lock);
+		if (atomic_read(&dentry->d_count) > 1 ||
+		    permission(inode, MAY_WRITE, NULL) ||
+		    !S_ISREG(inode->i_mode) ||
+		    get_write_access(inode)) {
+			spin_unlock(&dentry->d_lock);
+			d_rehash(dentry);
+		} else {
+			struct iattr newattrs;
+			spin_unlock(&dentry->d_lock);
+			/*printk("HPFS: truncating file before delete.\n");*/
+			newattrs.ia_size = 0;
+			newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+			err = notify_change(dentry, &newattrs);
+			put_write_access(inode);
+			if (!err)
+				goto again;
+		}
+		unlock_kernel();
+		return -ENOSPC;
+	default:
+		inode->i_nlink--;
+		err = 0;
+	}
+	goto out;
+
+out1:
+	hpfs_brelse4(&qbh);
+out:
+	up(&hpfs_i(dir)->i_sem);
+	up(&hpfs_i(inode)->i_parent);
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	const char *name = dentry->d_name.name;
+	unsigned len = dentry->d_name.len;
+	struct quad_buffer_head qbh;
+	struct hpfs_dirent *de;
+	struct inode *inode = dentry->d_inode;
+	dnode_secno dno;
+	fnode_secno fno;
+	int n_items = 0;
+	int err;
+	int r;
+
+	hpfs_adjust_length((char *)name, &len);
+	lock_kernel();
+	down(&hpfs_i(inode)->i_parent);
+	down(&hpfs_i(dir)->i_sem);
+	err = -ENOENT;
+	de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+	if (!de)
+		goto out;
+
+	err = -EPERM;
+	if (de->first)
+		goto out1;
+
+	err = -ENOTDIR;
+	if (!de->directory)
+		goto out1;
+
+	hpfs_count_dnodes(dir->i_sb, hpfs_i(inode)->i_dno, NULL, NULL, &n_items);
+	err = -ENOTEMPTY;
+	if (n_items)
+		goto out1;
+
+	fno = de->fnode;
+	r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
+	switch (r) {
+	case 1:
+		hpfs_error(dir->i_sb, "there was error when removing dirent");
+		err = -EFSERROR;
+		break;
+	case 2:
+		err = -ENOSPC;
+		break;
+	default:
+		dir->i_nlink--;
+		inode->i_nlink = 0;
+		err = 0;
+	}
+	goto out;
+out1:
+	hpfs_brelse4(&qbh);
+out:
+	up(&hpfs_i(dir)->i_sem);
+	up(&hpfs_i(inode)->i_parent);
+	unlock_kernel();
+	return err;
+}
+
+static int hpfs_symlink_readpage(struct file *file, struct page *page)
+{
+	char *link = kmap(page);
+	struct inode *i = page->mapping->host;
+	struct fnode *fnode;
+	struct buffer_head *bh;
+	int err;
+
+	err = -EIO;
+	lock_kernel();
+	if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh)))
+		goto fail;
+	err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE);
+	brelse(bh);
+	if (err)
+		goto fail;
+	unlock_kernel();
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+
+fail:
+	unlock_kernel();
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return err;
+}
+
+struct address_space_operations hpfs_symlink_aops = {
+	.readpage	= hpfs_symlink_readpage
+};
+	
+static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry)
+{
+	char *old_name = (char *)old_dentry->d_name.name;
+	int old_len = old_dentry->d_name.len;
+	char *new_name = (char *)new_dentry->d_name.name;
+	int new_len = new_dentry->d_name.len;
+	struct inode *i = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	struct quad_buffer_head qbh, qbh1;
+	struct hpfs_dirent *dep, *nde;
+	struct hpfs_dirent de;
+	dnode_secno dno;
+	int r;
+	struct buffer_head *bh;
+	struct fnode *fnode;
+	int err;
+	if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err;
+	err = 0;
+	hpfs_adjust_length((char *)old_name, &old_len);
+
+	lock_kernel();
+	/* order doesn't matter, due to VFS exclusion */
+	down(&hpfs_i(i)->i_parent);
+	if (new_inode)
+		down(&hpfs_i(new_inode)->i_parent);
+	down(&hpfs_i(old_dir)->i_sem);
+	if (new_dir != old_dir)
+		down(&hpfs_i(new_dir)->i_sem);
+	
+	/* Erm? Moving over the empty non-busy directory is perfectly legal */
+	if (new_inode && S_ISDIR(new_inode->i_mode)) {
+		err = -EINVAL;
+		goto end1;
+	}
+
+	if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+		hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
+		err = -ENOENT;
+		goto end1;
+	}
+	copy_de(&de, dep);
+	de.hidden = new_name[0] == '.';
+
+	if (new_inode) {
+		int r;
+		if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
+			if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) {
+				new_inode->i_nlink = 0;
+				copy_de(nde, &de);
+				memcpy(nde->name, new_name, new_len);
+				hpfs_mark_4buffers_dirty(&qbh1);
+				hpfs_brelse4(&qbh1);
+				goto end;
+			}
+			hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent");
+			err = -EFSERROR;
+			goto end1;
+		}
+		err = r == 2 ? -ENOSPC : r == 1 ? -EFSERROR : 0;
+		goto end1;
+	}
+
+	if (new_dir == old_dir) hpfs_brelse4(&qbh);
+
+	hpfs_lock_creation(i->i_sb);
+	if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) {
+		hpfs_unlock_creation(i->i_sb);
+		if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!");
+		err = r == 1 ? -ENOSPC : -EFSERROR;
+		if (new_dir != old_dir) hpfs_brelse4(&qbh);
+		goto end1;
+	}
+	
+	if (new_dir == old_dir)
+		if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+			hpfs_unlock_creation(i->i_sb);
+			hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
+			err = -ENOENT;
+			goto end1;
+		}
+
+	if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) {
+		hpfs_unlock_creation(i->i_sb);
+		hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent");
+		err = r == 2 ? -ENOSPC : -EFSERROR;
+		goto end1;
+	}
+	hpfs_unlock_creation(i->i_sb);
+	
+	end:
+	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
+	if (S_ISDIR(i->i_mode)) {
+		new_dir->i_nlink++;
+		old_dir->i_nlink--;
+	}
+	if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) {
+		fnode->up = new_dir->i_ino;
+		fnode->len = new_len;
+		memcpy(fnode->name, new_name, new_len>15?15:new_len);
+		if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len);
+		mark_buffer_dirty(bh);
+		brelse(bh);
+	}
+	hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
+	hpfs_decide_conv(i, (char *)new_name, new_len);
+end1:
+	if (old_dir != new_dir)
+		up(&hpfs_i(new_dir)->i_sem);
+	up(&hpfs_i(old_dir)->i_sem);
+	up(&hpfs_i(i)->i_parent);
+	if (new_inode)
+		up(&hpfs_i(new_inode)->i_parent);
+	unlock_kernel();
+	return err;
+}
+
+struct inode_operations hpfs_dir_iops =
+{
+	.create		= hpfs_create,
+	.lookup		= hpfs_lookup,
+	.unlink		= hpfs_unlink,
+	.symlink	= hpfs_symlink,
+	.mkdir		= hpfs_mkdir,
+	.rmdir		= hpfs_rmdir,
+	.mknod		= hpfs_mknod,
+	.rename		= hpfs_rename,
+	.setattr	= hpfs_notify_change,
+};
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
new file mode 100644
index 0000000..8eefa63
--- /dev/null
+++ b/fs/hpfs/super.c
@@ -0,0 +1,701 @@
+/*
+ *  linux/fs/hpfs/super.c
+ *
+ *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
+ *
+ *  mounting, unmounting, error handling
+ */
+
+#include "hpfs_fn.h"
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/init.h>
+#include <linux/statfs.h>
+
+/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
+
+static void mark_dirty(struct super_block *s)
+{
+	if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) {
+		struct buffer_head *bh;
+		struct hpfs_spare_block *sb;
+		if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
+			sb->dirty = 1;
+			sb->old_wrote = 0;
+			mark_buffer_dirty(bh);
+			brelse(bh);
+		}
+	}
+}
+
+/* Mark the filesystem clean (mark it dirty for chkdsk if chkdsk==2 or if there
+   were errors) */
+
+static void unmark_dirty(struct super_block *s)
+{
+	struct buffer_head *bh;
+	struct hpfs_spare_block *sb;
+	if (s->s_flags & MS_RDONLY) return;
+	if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
+		sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error;
+		sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error;
+		mark_buffer_dirty(bh);
+		brelse(bh);
+	}
+}
+
+/* Filesystem error... */
+
+#define ERR_BUF_SIZE 1024
+
+void hpfs_error(struct super_block *s, char *m,...)
+{
+	char *buf;
+	va_list l;
+	va_start(l, m);
+	if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL)))
+		printk("HPFS: No memory for error message '%s'\n",m);
+	else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE)
+		printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n");
+	printk("HPFS: filesystem error: ");
+	if (buf) printk("%s", buf);
+	else printk("%s\n",m);
+	if (!hpfs_sb(s)->sb_was_error) {
+		if (hpfs_sb(s)->sb_err == 2) {
+			printk("; crashing the system because you wanted it\n");
+			mark_dirty(s);
+			panic("HPFS panic");
+		} else if (hpfs_sb(s)->sb_err == 1) {
+			if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n");
+			else {
+				printk("; remounting read-only\n");
+				mark_dirty(s);
+				s->s_flags |= MS_RDONLY;
+			}
+		} else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
+		else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
+	} else printk("\n");
+	if (buf) kfree(buf);
+	hpfs_sb(s)->sb_was_error = 1;
+}
+
+/* 
+ * A little trick to detect cycles in many hpfs structures and don't let the
+ * kernel crash on corrupted filesystem. When first called, set c2 to 0.
+ *
+ * BTW. chkdsk doesn't detect cycles correctly. When I had 2 lost directories
+ * nested each in other, chkdsk locked up happilly.
+ */
+
+int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2,
+		char *msg)
+{
+	if (*c2 && *c1 == key) {
+		hpfs_error(s, "cycle detected on key %08x in %s", key, msg);
+		return 1;
+	}
+	(*c2)++;
+	if (!((*c2 - 1) & *c2)) *c1 = key;
+	return 0;
+}
+
+static void hpfs_put_super(struct super_block *s)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_cp_table) kfree(sbi->sb_cp_table);
+	if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir);
+	unmark_dirty(s);
+	s->s_fs_info = NULL;
+	kfree(sbi);
+}
+
+unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+{
+	struct quad_buffer_head qbh;
+	unsigned *bits;
+	unsigned i, count;
+	if (!(bits = hpfs_map_4sectors(s, secno, &qbh, 4))) return 0;
+	count = 0;
+	for (i = 0; i < 2048 / sizeof(unsigned); i++) {
+		unsigned b; 
+		if (!bits[i]) continue;
+		for (b = bits[i]; b; b>>=1) count += b & 1;
+	}
+	hpfs_brelse4(&qbh);
+	return count;
+}
+
+static unsigned count_bitmaps(struct super_block *s)
+{
+	unsigned n, count, n_bands;
+	n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
+	count = 0;
+	for (n = 0; n < n_bands; n++)
+		count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]);
+	return count;
+}
+
+static int hpfs_statfs(struct super_block *s, struct kstatfs *buf)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	lock_kernel();
+
+	/*if (sbi->sb_n_free == -1) {*/
+		sbi->sb_n_free = count_bitmaps(s);
+		sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+	/*}*/
+	buf->f_type = s->s_magic;
+	buf->f_bsize = 512;
+	buf->f_blocks = sbi->sb_fs_size;
+	buf->f_bfree = sbi->sb_n_free;
+	buf->f_bavail = sbi->sb_n_free;
+	buf->f_files = sbi->sb_dirband_size / 4;
+	buf->f_ffree = sbi->sb_n_free_dnodes;
+	buf->f_namelen = 254;
+
+	unlock_kernel();
+
+	return 0;
+}
+
+static kmem_cache_t * hpfs_inode_cachep;
+
+static struct inode *hpfs_alloc_inode(struct super_block *sb)
+{
+	struct hpfs_inode_info *ei;
+	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS);
+	if (!ei)
+		return NULL;
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void hpfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		init_MUTEX(&ei->i_sem);
+		init_MUTEX(&ei->i_parent);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+ 
+static int init_inodecache(void)
+{
+	hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache",
+					     sizeof(struct hpfs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (hpfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(hpfs_inode_cachep))
+		printk(KERN_INFO "hpfs_inode_cache: not all structures were freed\n");
+}
+
+/*
+ * A tiny parser for option strings, stolen from dosfs.
+ * Stolen again from read-only hpfs.
+ * And updated for table-driven option parsing.
+ */
+
+enum {
+	Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis,
+	Opt_conv_binary, Opt_conv_text, Opt_conv_auto,
+	Opt_check_none, Opt_check_normal, Opt_check_strict,
+	Opt_err_cont, Opt_err_ro, Opt_err_panic,
+	Opt_eas_no, Opt_eas_ro, Opt_eas_rw,
+	Opt_chkdsk_no, Opt_chkdsk_errors, Opt_chkdsk_always,
+	Opt_timeshift, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_help, "help"},
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_umask, "umask=%o"},
+	{Opt_case_lower, "case=lower"},
+	{Opt_case_asis, "case=asis"},
+	{Opt_conv_binary, "conv=binary"},
+	{Opt_conv_text, "conv=text"},
+	{Opt_conv_auto, "conv=auto"},
+	{Opt_check_none, "check=none"},
+	{Opt_check_normal, "check=normal"},
+	{Opt_check_strict, "check=strict"},
+	{Opt_err_cont, "errors=continue"},
+	{Opt_err_ro, "errors=remount-ro"},
+	{Opt_err_panic, "errors=panic"},
+	{Opt_eas_no, "eas=no"},
+	{Opt_eas_ro, "eas=ro"},
+	{Opt_eas_rw, "eas=rw"},
+	{Opt_chkdsk_no, "chkdsk=no"},
+	{Opt_chkdsk_errors, "chkdsk=errors"},
+	{Opt_chkdsk_always, "chkdsk=always"},
+	{Opt_timeshift, "timeshift=%d"},
+	{Opt_err, NULL},
+};
+
+static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
+		      int *lowercase, int *conv, int *eas, int *chk, int *errs,
+		      int *chkdsk, int *timeshift)
+{
+	char *p;
+	int option;
+
+	if (!opts)
+		return 1;
+
+	/*printk("Parsing opts: '%s'\n",opts);*/
+
+	while ((p = strsep(&opts, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_help:
+			return 2;
+		case Opt_uid:
+			if (match_int(args, &option))
+				return 0;
+			*uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(args, &option))
+				return 0;
+			*gid = option;
+			break;
+		case Opt_umask:
+			if (match_octal(args, &option))
+				return 0;
+			*umask = option;
+			break;
+		case Opt_case_lower:
+			*lowercase = 1;
+			break;
+		case Opt_case_asis:
+			*lowercase = 0;
+			break;
+		case Opt_conv_binary:
+			*conv = CONV_BINARY;
+			break;
+		case Opt_conv_text:
+			*conv = CONV_TEXT;
+			break;
+		case Opt_conv_auto:
+			*conv = CONV_AUTO;
+			break;
+		case Opt_check_none:
+			*chk = 0;
+			break;
+		case Opt_check_normal:
+			*chk = 1;
+			break;
+		case Opt_check_strict:
+			*chk = 2;
+			break;
+		case Opt_err_cont:
+			*errs = 0;
+			break;
+		case Opt_err_ro:
+			*errs = 1;
+			break;
+		case Opt_err_panic:
+			*errs = 2;
+			break;
+		case Opt_eas_no:
+			*eas = 0;
+			break;
+		case Opt_eas_ro:
+			*eas = 1;
+			break;
+		case Opt_eas_rw:
+			*eas = 2;
+			break;
+		case Opt_chkdsk_no:
+			*chkdsk = 0;
+			break;
+		case Opt_chkdsk_errors:
+			*chkdsk = 1;
+			break;
+		case Opt_chkdsk_always:
+			*chkdsk = 2;
+			break;
+		case Opt_timeshift:
+		{
+			int m = 1;
+			char *rhs = args[0].from;
+			if (!rhs || !*rhs)
+				return 0;
+			if (*rhs == '-') m = -1;
+			if (*rhs == '+' || *rhs == '-') rhs++;
+			*timeshift = simple_strtoul(rhs, &rhs, 0) * m;
+			if (*rhs)
+				return 0;
+			break;
+		}
+		default:
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static inline void hpfs_help(void)
+{
+	printk("\n\
+HPFS filesystem options:\n\
+      help              do not mount and display this text\n\
+      uid=xxx           set uid of files that don't have uid specified in eas\n\
+      gid=xxx           set gid of files that don't have gid specified in eas\n\
+      umask=xxx         set mode of files that don't have mode specified in eas\n\
+      case=lower        lowercase all files\n\
+      case=asis         do not lowercase files (default)\n\
+      conv=binary       do not convert CR/LF -> LF (default)\n\
+      conv=auto         convert only files with known text extensions\n\
+      conv=text         convert all files\n\
+      check=none        no fs checks - kernel may crash on corrupted filesystem\n\
+      check=normal      do some checks - it should not crash (default)\n\
+      check=strict      do extra time-consuming checks, used for debugging\n\
+      errors=continue   continue on errors\n\
+      errors=remount-ro remount read-only if errors found (default)\n\
+      errors=panic      panic on errors\n\
+      chkdsk=no         do not mark fs for chkdsking even if there were errors\n\
+      chkdsk=errors     mark fs dirty if errors found (default)\n\
+      chkdsk=always     always mark fs dirty - used for debugging\n\
+      eas=no            ignore extended attributes\n\
+      eas=ro            read but do not write extended attributes\n\
+      eas=rw            r/w eas => enables chmod, chown, mknod, ln -s (default)\n\
+      timeshift=nnn	add nnn seconds to file times\n\
+\n");
+}
+
+static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+{
+	uid_t uid;
+	gid_t gid;
+	umode_t umask;
+	int lowercase, conv, eas, chk, errs, chkdsk, timeshift;
+	int o;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	
+	*flags |= MS_NOATIME;
+	
+	uid = sbi->sb_uid; gid = sbi->sb_gid;
+	umask = 0777 & ~sbi->sb_mode;
+	lowercase = sbi->sb_lowercase; conv = sbi->sb_conv;
+	eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk;
+	errs = sbi->sb_err; timeshift = sbi->sb_timeshift;
+
+	if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv,
+	    &eas, &chk, &errs, &chkdsk, &timeshift))) {
+		printk("HPFS: bad mount options.\n");
+	    	return 1;
+	}
+	if (o == 2) {
+		hpfs_help();
+		return 1;
+	}
+	if (timeshift != sbi->sb_timeshift) {
+		printk("HPFS: timeshift can't be changed using remount.\n");
+		return 1;
+	}
+
+	unmark_dirty(s);
+
+	sbi->sb_uid = uid; sbi->sb_gid = gid;
+	sbi->sb_mode = 0777 & ~umask;
+	sbi->sb_lowercase = lowercase; sbi->sb_conv = conv;
+	sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
+	sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
+
+	if (!(*flags & MS_RDONLY)) mark_dirty(s);
+
+	return 0;
+}
+
+/* Super operations */
+
+static struct super_operations hpfs_sops =
+{
+	.alloc_inode	= hpfs_alloc_inode,
+	.destroy_inode	= hpfs_destroy_inode,
+	.delete_inode	= hpfs_delete_inode,
+	.put_super	= hpfs_put_super,
+	.statfs		= hpfs_statfs,
+	.remount_fs	= hpfs_remount_fs,
+};
+
+static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+{
+	struct buffer_head *bh0, *bh1, *bh2;
+	struct hpfs_boot_block *bootblock;
+	struct hpfs_super_block *superblock;
+	struct hpfs_spare_block *spareblock;
+	struct hpfs_sb_info *sbi;
+	struct inode *root;
+
+	uid_t uid;
+	gid_t gid;
+	umode_t umask;
+	int lowercase, conv, eas, chk, errs, chkdsk, timeshift;
+
+	dnode_secno root_dno;
+	struct hpfs_dirent *de = NULL;
+	struct quad_buffer_head qbh;
+
+	int o;
+
+	sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	s->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(*sbi));
+
+	sbi->sb_bmp_dir = NULL;
+	sbi->sb_cp_table = NULL;
+
+	init_MUTEX(&sbi->hpfs_creation_de);
+
+	uid = current->uid;
+	gid = current->gid;
+	umask = current->fs->umask;
+	lowercase = 0;
+	conv = CONV_BINARY;
+	eas = 2;
+	chk = 1;
+	errs = 1;
+	chkdsk = 1;
+	timeshift = 0;
+
+	if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv,
+	    &eas, &chk, &errs, &chkdsk, &timeshift))) {
+		printk("HPFS: bad mount options.\n");
+		goto bail0;
+	}
+	if (o==2) {
+		hpfs_help();
+		goto bail0;
+	}
+
+	/*sbi->sb_mounting = 1;*/
+	sb_set_blocksize(s, 512);
+	sbi->sb_fs_size = -1;
+	if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1;
+	if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2;
+	if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3;
+
+	/* Check magics */
+	if (/*bootblock->magic != BB_MAGIC
+	    ||*/ superblock->magic != SB_MAGIC
+	    || spareblock->magic != SP_MAGIC) {
+		if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n");
+		goto bail4;
+	}
+
+	/* Check version */
+	if (!(s->s_flags & MS_RDONLY) &&
+	      superblock->funcversion != 2 && superblock->funcversion != 3) {
+		printk("HPFS: Bad version %d,%d. Mount readonly to go around\n",
+			(int)superblock->version, (int)superblock->funcversion);
+		printk("HPFS: please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - mikulas@artax.karlin.mff.cuni.cz\n");
+		goto bail4;
+	}
+
+	s->s_flags |= MS_NOATIME;
+
+	/* Fill superblock stuff */
+	s->s_magic = HPFS_SUPER_MAGIC;
+	s->s_op = &hpfs_sops;
+
+	sbi->sb_root = superblock->root;
+	sbi->sb_fs_size = superblock->n_sectors;
+	sbi->sb_bitmaps = superblock->bitmaps;
+	sbi->sb_dirband_start = superblock->dir_band_start;
+	sbi->sb_dirband_size = superblock->n_dir_band;
+	sbi->sb_dmap = superblock->dir_band_bitmap;
+	sbi->sb_uid = uid;
+	sbi->sb_gid = gid;
+	sbi->sb_mode = 0777 & ~umask;
+	sbi->sb_n_free = -1;
+	sbi->sb_n_free_dnodes = -1;
+	sbi->sb_lowercase = lowercase;
+	sbi->sb_conv = conv;
+	sbi->sb_eas = eas;
+	sbi->sb_chk = chk;
+	sbi->sb_chkdsk = chkdsk;
+	sbi->sb_err = errs;
+	sbi->sb_timeshift = timeshift;
+	sbi->sb_was_error = 0;
+	sbi->sb_cp_table = NULL;
+	sbi->sb_c_bitmap = -1;
+	sbi->sb_max_fwd_alloc = 0xffffff;
+	
+	/* Load bitmap directory */
+	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps)))
+		goto bail4;
+	
+	/* Check for general fs errors*/
+	if (spareblock->dirty && !spareblock->old_wrote) {
+		if (errs == 2) {
+			printk("HPFS: Improperly stopped, not mounted\n");
+			goto bail4;
+		}
+		hpfs_error(s, "improperly stopped");
+	}
+
+	if (!(s->s_flags & MS_RDONLY)) {
+		spareblock->dirty = 1;
+		spareblock->old_wrote = 0;
+		mark_buffer_dirty(bh2);
+	}
+
+	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
+		if (errs >= 2) {
+			printk("HPFS: Hotfixes not supported here, try chkdsk\n");
+			mark_dirty(s);
+			goto bail4;
+		}
+		hpfs_error(s, "hotfixes not supported here, try chkdsk");
+		if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n");
+		else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n");
+	}
+	if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) {
+		if (errs >= 2) {
+			printk("HPFS: Spare dnodes used, try chkdsk\n");
+			mark_dirty(s);
+			goto bail4;
+		}
+		hpfs_error(s, "warning: spare dnodes used, try chkdsk");
+		if (errs == 0) printk("HPFS: Proceeding, but your filesystem could be corrupted if you delete files or directories\n");
+	}
+	if (chk) {
+		unsigned a;
+		if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band ||
+		    superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) {
+			hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x",
+				superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band);
+			goto bail4;
+		}
+		a = sbi->sb_dirband_size;
+		sbi->sb_dirband_size = 0;
+		if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") ||
+		    hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") ||
+		    hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) {
+			mark_dirty(s);
+			goto bail4;
+		}
+		sbi->sb_dirband_size = a;
+	} else printk("HPFS: You really don't want any checks? You are crazy...\n");
+
+	/* Load code page table */
+	if (spareblock->n_code_pages)
+		if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir)))
+			printk("HPFS: Warning: code page support is disabled\n");
+
+	brelse(bh2);
+	brelse(bh1);
+	brelse(bh0);
+
+	root = iget_locked(s, sbi->sb_root);
+	if (!root)
+		goto bail0;
+	hpfs_init_inode(root);
+	hpfs_read_inode(root);
+	unlock_new_inode(root);
+	s->s_root = d_alloc_root(root);
+	if (!s->s_root) {
+		iput(root);
+		goto bail0;
+	}
+	hpfs_set_dentry_operations(s->s_root);
+
+	/*
+	 * find the root directory's . pointer & finish filling in the inode
+	 */
+
+	root_dno = hpfs_fnode_dno(s, sbi->sb_root);
+	if (root_dno)
+		de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh);
+	if (!de)
+		hpfs_error(s, "unable to find root dir");
+	else {
+		root->i_atime.tv_sec = local_to_gmt(s, de->read_date);
+		root->i_atime.tv_nsec = 0;
+		root->i_mtime.tv_sec = local_to_gmt(s, de->write_date);
+		root->i_mtime.tv_nsec = 0;
+		root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date);
+		root->i_ctime.tv_nsec = 0;
+		hpfs_i(root)->i_ea_size = de->ea_size;
+		hpfs_i(root)->i_parent_dir = root->i_ino;
+		if (root->i_size == -1)
+			root->i_size = 2048;
+		if (root->i_blocks == -1)
+			root->i_blocks = 5;
+		hpfs_brelse4(&qbh);
+	}
+	return 0;
+
+bail4:	brelse(bh2);
+bail3:	brelse(bh1);
+bail2:	brelse(bh0);
+bail1:
+bail0:
+	if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir);
+	if (sbi->sb_cp_table) kfree(sbi->sb_cp_table);
+	s->s_fs_info = NULL;
+	kfree(sbi);
+	return -EINVAL;
+}
+
+static struct super_block *hpfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, hpfs_fill_super);
+}
+
+static struct file_system_type hpfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "hpfs",
+	.get_sb		= hpfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_hpfs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&hpfs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_hpfs_fs(void)
+{
+	unregister_filesystem(&hpfs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_hpfs_fs)
+module_exit(exit_hpfs_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/hppfs/Makefile b/fs/hppfs/Makefile
new file mode 100644
index 0000000..6890433
--- /dev/null
+++ b/fs/hppfs/Makefile
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2002, 2003 Jeff Dike (jdike@karaya.com)
+# Licensed under the GPL
+#
+
+hppfs-objs := hppfs_kern.o
+
+obj-y =
+obj-$(CONFIG_HPPFS) += hppfs.o
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
new file mode 100644
index 0000000..f8e0cbd
--- /dev/null
+++ b/fs/hppfs/hppfs_kern.c
@@ -0,0 +1,815 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+#include <linux/statfs.h>
+#include <asm/uaccess.h>
+#include <asm/fcntl.h>
+#include "os.h"
+
+static int init_inode(struct inode *inode, struct dentry *dentry);
+
+struct hppfs_data {
+	struct list_head list;
+	char contents[PAGE_SIZE - sizeof(struct list_head)];
+};
+
+struct hppfs_private {
+	struct file *proc_file;
+	int host_fd;
+	loff_t len;
+	struct hppfs_data *contents;
+};
+
+struct hppfs_inode_info {
+        struct dentry *proc_dentry;
+	struct inode vfs_inode;
+};
+
+static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode)
+{
+	return(list_entry(inode, struct hppfs_inode_info, vfs_inode));
+}
+
+#define HPPFS_SUPER_MAGIC 0xb00000ee
+
+static struct super_operations hppfs_sbops;
+
+static int is_pid(struct dentry *dentry)
+{
+	struct super_block *sb;
+	int i;
+
+	sb = dentry->d_sb;
+	if((sb->s_op != &hppfs_sbops) || (dentry->d_parent != sb->s_root))
+		return(0);
+
+	for(i = 0; i < dentry->d_name.len; i++){
+		if(!isdigit(dentry->d_name.name[i]))
+			return(0);
+	}
+	return(1);
+}
+
+static char *dentry_name(struct dentry *dentry, int extra)
+{
+	struct dentry *parent;
+	char *root, *name;
+	const char *seg_name;
+	int len, seg_len;
+
+	len = 0;
+	parent = dentry;
+	while(parent->d_parent != parent){
+		if(is_pid(parent))
+			len += strlen("pid") + 1;
+		else len += parent->d_name.len + 1;
+		parent = parent->d_parent;
+	}
+
+	root = "proc";
+	len += strlen(root);
+	name = kmalloc(len + extra + 1, GFP_KERNEL);
+	if(name == NULL) return(NULL);
+
+	name[len] = '\0';
+	parent = dentry;
+	while(parent->d_parent != parent){
+		if(is_pid(parent)){
+			seg_name = "pid";
+			seg_len = strlen("pid");
+		}
+		else {
+			seg_name = parent->d_name.name;
+			seg_len = parent->d_name.len;
+		}
+
+		len -= seg_len + 1;
+		name[len] = '/';
+		strncpy(&name[len + 1], seg_name, seg_len);
+		parent = parent->d_parent;
+	}
+	strncpy(name, root, strlen(root));
+	return(name);
+}
+
+struct dentry_operations hppfs_dentry_ops = {
+};
+
+static int file_removed(struct dentry *dentry, const char *file)
+{
+	char *host_file;
+	int extra, fd;
+
+	extra = 0;
+	if(file != NULL) extra += strlen(file) + 1;
+
+	host_file = dentry_name(dentry, extra + strlen("/remove"));
+	if(host_file == NULL){
+		printk("file_removed : allocation failed\n");
+		return(-ENOMEM);
+	}
+
+	if(file != NULL){
+		strcat(host_file, "/");
+		strcat(host_file, file);
+	}
+	strcat(host_file, "/remove");
+
+	fd = os_open_file(host_file, of_read(OPENFLAGS()), 0);
+	kfree(host_file);
+	if(fd > 0){
+		os_close_file(fd);
+		return(1);
+	}
+	return(0);
+}
+
+static void hppfs_read_inode(struct inode *ino)
+{
+	struct inode *proc_ino;
+
+	if(HPPFS_I(ino)->proc_dentry == NULL)
+		return;
+
+	proc_ino = HPPFS_I(ino)->proc_dentry->d_inode;
+	ino->i_uid = proc_ino->i_uid;
+	ino->i_gid = proc_ino->i_gid;
+	ino->i_atime = proc_ino->i_atime;
+	ino->i_mtime = proc_ino->i_mtime;
+	ino->i_ctime = proc_ino->i_ctime;
+	ino->i_ino = proc_ino->i_ino;
+	ino->i_mode = proc_ino->i_mode;
+	ino->i_nlink = proc_ino->i_nlink;
+	ino->i_size = proc_ino->i_size;
+	ino->i_blksize = proc_ino->i_blksize;
+	ino->i_blocks = proc_ino->i_blocks;
+}
+
+static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
+                                  struct nameidata *nd)
+{
+	struct dentry *proc_dentry, *new, *parent;
+	struct inode *inode;
+	int err, deleted;
+
+	deleted = file_removed(dentry, NULL);
+	if(deleted < 0)
+		return(ERR_PTR(deleted));
+	else if(deleted)
+		return(ERR_PTR(-ENOENT));
+
+	err = -ENOMEM;
+	parent = HPPFS_I(ino)->proc_dentry;
+	down(&parent->d_inode->i_sem);
+	proc_dentry = d_lookup(parent, &dentry->d_name);
+	if(proc_dentry == NULL){
+		proc_dentry = d_alloc(parent, &dentry->d_name);
+		if(proc_dentry == NULL){
+			up(&parent->d_inode->i_sem);
+			goto out;
+		}
+		new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
+						       proc_dentry, NULL);
+		if(new){
+			dput(proc_dentry);
+			proc_dentry = new;
+		}
+	}
+	up(&parent->d_inode->i_sem);
+
+	if(IS_ERR(proc_dentry))
+		return(proc_dentry);
+
+	inode = iget(ino->i_sb, 0);
+	if(inode == NULL)
+		goto out_dput;
+
+	err = init_inode(inode, proc_dentry);
+	if(err)
+		goto out_put;
+
+	hppfs_read_inode(inode);
+
+ 	d_add(dentry, inode);
+	dentry->d_op = &hppfs_dentry_ops;
+	return(NULL);
+
+ out_put:
+	iput(inode);
+ out_dput:
+	dput(proc_dentry);
+ out:
+	return(ERR_PTR(err));
+}
+
+static struct inode_operations hppfs_file_iops = {
+};
+
+static ssize_t read_proc(struct file *file, char *buf, ssize_t count,
+			 loff_t *ppos, int is_user)
+{
+	ssize_t (*read)(struct file *, char *, size_t, loff_t *);
+	ssize_t n;
+
+	read = file->f_dentry->d_inode->i_fop->read;
+
+	if(!is_user)
+		set_fs(KERNEL_DS);
+
+	n = (*read)(file, buf, count, &file->f_pos);
+
+	if(!is_user)
+		set_fs(USER_DS);
+
+	if(ppos) *ppos = file->f_pos;
+	return(n);
+}
+
+static ssize_t hppfs_read_file(int fd, char *buf, ssize_t count)
+{
+	ssize_t n;
+	int cur, err;
+	char *new_buf;
+
+	n = -ENOMEM;
+	new_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if(new_buf == NULL){
+		printk("hppfs_read_file : kmalloc failed\n");
+		goto out;
+	}
+	n = 0;
+	while(count > 0){
+		cur = min_t(ssize_t, count, PAGE_SIZE);
+		err = os_read_file(fd, new_buf, cur);
+		if(err < 0){
+			printk("hppfs_read : read failed, errno = %d\n",
+			       count);
+			n = err;
+			goto out_free;
+		}
+		else if(err == 0)
+			break;
+
+		if(copy_to_user(buf, new_buf, err)){
+			n = -EFAULT;
+			goto out_free;
+		}
+		n += err;
+		count -= err;
+	}
+ out_free:
+	kfree(new_buf);
+ out:
+	return(n);
+}
+
+static ssize_t hppfs_read(struct file *file, char *buf, size_t count,
+			  loff_t *ppos)
+{
+	struct hppfs_private *hppfs = file->private_data;
+	struct hppfs_data *data;
+	loff_t off;
+	int err;
+
+	if(hppfs->contents != NULL){
+		if(*ppos >= hppfs->len) return(0);
+
+		data = hppfs->contents;
+		off = *ppos;
+		while(off >= sizeof(data->contents)){
+			data = list_entry(data->list.next, struct hppfs_data,
+					  list);
+			off -= sizeof(data->contents);
+		}
+
+		if(off + count > hppfs->len)
+			count = hppfs->len - off;
+		copy_to_user(buf, &data->contents[off], count);
+		*ppos += count;
+	}
+	else if(hppfs->host_fd != -1){
+		err = os_seek_file(hppfs->host_fd, *ppos);
+		if(err){
+			printk("hppfs_read : seek failed, errno = %d\n", err);
+			return(err);
+		}
+		count = hppfs_read_file(hppfs->host_fd, buf, count);
+		if(count > 0)
+			*ppos += count;
+	}
+	else count = read_proc(hppfs->proc_file, buf, count, ppos, 1);
+
+	return(count);
+}
+
+static ssize_t hppfs_write(struct file *file, const char *buf, size_t len,
+			   loff_t *ppos)
+{
+	struct hppfs_private *data = file->private_data;
+	struct file *proc_file = data->proc_file;
+	ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
+	int err;
+
+	write = proc_file->f_dentry->d_inode->i_fop->write;
+
+	proc_file->f_pos = file->f_pos;
+	err = (*write)(proc_file, buf, len, &proc_file->f_pos);
+	file->f_pos = proc_file->f_pos;
+
+	return(err);
+}
+
+static int open_host_sock(char *host_file, int *filter_out)
+{
+	char *end;
+	int fd;
+
+	end = &host_file[strlen(host_file)];
+	strcpy(end, "/rw");
+	*filter_out = 1;
+	fd = os_connect_socket(host_file);
+	if(fd > 0)
+		return(fd);
+
+	strcpy(end, "/r");
+	*filter_out = 0;
+	fd = os_connect_socket(host_file);
+	return(fd);
+}
+
+static void free_contents(struct hppfs_data *head)
+{
+	struct hppfs_data *data;
+	struct list_head *ele, *next;
+
+	if(head == NULL) return;
+
+	list_for_each_safe(ele, next, &head->list){
+		data = list_entry(ele, struct hppfs_data, list);
+		kfree(data);
+	}
+	kfree(head);
+}
+
+static struct hppfs_data *hppfs_get_data(int fd, int filter,
+					 struct file *proc_file,
+					 struct file *hppfs_file,
+					 loff_t *size_out)
+{
+	struct hppfs_data *data, *new, *head;
+	int n, err;
+
+	err = -ENOMEM;
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if(data == NULL){
+		printk("hppfs_get_data : head allocation failed\n");
+		goto failed;
+	}
+
+	INIT_LIST_HEAD(&data->list);
+
+	head = data;
+	*size_out = 0;
+
+	if(filter){
+		while((n = read_proc(proc_file, data->contents,
+				     sizeof(data->contents), NULL, 0)) > 0)
+			os_write_file(fd, data->contents, n);
+		err = os_shutdown_socket(fd, 0, 1);
+		if(err){
+			printk("hppfs_get_data : failed to shut down "
+			       "socket\n");
+			goto failed_free;
+		}
+	}
+	while(1){
+		n = os_read_file(fd, data->contents, sizeof(data->contents));
+		if(n < 0){
+			err = n;
+			printk("hppfs_get_data : read failed, errno = %d\n",
+			       err);
+			goto failed_free;
+		}
+		else if(n == 0)
+			break;
+
+		*size_out += n;
+
+		if(n < sizeof(data->contents))
+			break;
+
+		new = kmalloc(sizeof(*data), GFP_KERNEL);
+		if(new == 0){
+			printk("hppfs_get_data : data allocation failed\n");
+			err = -ENOMEM;
+			goto failed_free;
+		}
+
+		INIT_LIST_HEAD(&new->list);
+		list_add(&new->list, &data->list);
+		data = new;
+	}
+	return(head);
+
+ failed_free:
+	free_contents(head);
+ failed:
+	return(ERR_PTR(err));
+}
+
+static struct hppfs_private *hppfs_data(void)
+{
+	struct hppfs_private *data;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if(data == NULL)
+		return(data);
+
+	*data = ((struct hppfs_private ) { .host_fd  		= -1,
+					   .len  		= -1,
+					   .contents 		= NULL } );
+	return(data);
+}
+
+static int file_mode(int fmode)
+{
+	if(fmode == (FMODE_READ | FMODE_WRITE))
+		return(O_RDWR);
+	if(fmode == FMODE_READ)
+		return(O_RDONLY);
+	if(fmode == FMODE_WRITE)
+		return(O_WRONLY);
+	return(0);
+}
+
+static int hppfs_open(struct inode *inode, struct file *file)
+{
+	struct hppfs_private *data;
+	struct dentry *proc_dentry;
+	char *host_file;
+	int err, fd, type, filter;
+
+	err = -ENOMEM;
+	data = hppfs_data();
+	if(data == NULL)
+		goto out;
+
+	host_file = dentry_name(file->f_dentry, strlen("/rw"));
+	if(host_file == NULL)
+		goto out_free2;
+
+	proc_dentry = HPPFS_I(inode)->proc_dentry;
+
+	/* XXX This isn't closed anywhere */
+	data->proc_file = dentry_open(dget(proc_dentry), NULL,
+				      file_mode(file->f_mode));
+	err = PTR_ERR(data->proc_file);
+	if(IS_ERR(data->proc_file))
+		goto out_free1;
+
+	type = os_file_type(host_file);
+	if(type == OS_TYPE_FILE){
+		fd = os_open_file(host_file, of_read(OPENFLAGS()), 0);
+		if(fd >= 0)
+			data->host_fd = fd;
+		else printk("hppfs_open : failed to open '%s', errno = %d\n",
+			    host_file, -fd);
+
+		data->contents = NULL;
+	}
+	else if(type == OS_TYPE_DIR){
+		fd = open_host_sock(host_file, &filter);
+		if(fd > 0){
+			data->contents = hppfs_get_data(fd, filter,
+							&data->proc_file,
+							file, &data->len);
+			if(!IS_ERR(data->contents))
+				data->host_fd = fd;
+		}
+		else printk("hppfs_open : failed to open a socket in "
+			    "'%s', errno = %d\n", host_file, -fd);
+	}
+	kfree(host_file);
+
+	file->private_data = data;
+	return(0);
+
+ out_free1:
+	kfree(host_file);
+ out_free2:
+	free_contents(data->contents);
+	kfree(data);
+ out:
+	return(err);
+}
+
+static int hppfs_dir_open(struct inode *inode, struct file *file)
+{
+	struct hppfs_private *data;
+	struct dentry *proc_dentry;
+	int err;
+
+	err = -ENOMEM;
+	data = hppfs_data();
+	if(data == NULL)
+		goto out;
+
+	proc_dentry = HPPFS_I(inode)->proc_dentry;
+	data->proc_file = dentry_open(dget(proc_dentry), NULL,
+				      file_mode(file->f_mode));
+	err = PTR_ERR(data->proc_file);
+	if(IS_ERR(data->proc_file))
+		goto out_free;
+
+	file->private_data = data;
+	return(0);
+
+ out_free:
+	kfree(data);
+ out:
+	return(err);
+}
+
+static loff_t hppfs_llseek(struct file *file, loff_t off, int where)
+{
+	struct hppfs_private *data = file->private_data;
+	struct file *proc_file = &data->proc_file;
+	loff_t (*llseek)(struct file *, loff_t, int);
+	loff_t ret;
+
+	llseek = proc_file->f_dentry->d_inode->i_fop->llseek;
+	if(llseek != NULL){
+		ret = (*llseek)(proc_file, off, where);
+		if(ret < 0)
+			return(ret);
+	}
+
+	return(default_llseek(file, off, where));
+}
+
+static struct file_operations hppfs_file_fops = {
+	.owner		= NULL,
+	.llseek		= hppfs_llseek,
+	.read		= hppfs_read,
+	.write		= hppfs_write,
+	.open		= hppfs_open,
+};
+
+struct hppfs_dirent {
+	void *vfs_dirent;
+	filldir_t filldir;
+	struct dentry *dentry;
+};
+
+static int hppfs_filldir(void *d, const char *name, int size,
+			 loff_t offset, ino_t inode, unsigned int type)
+{
+	struct hppfs_dirent *dirent = d;
+
+	if(file_removed(dirent->dentry, name))
+		return(0);
+
+	return((*dirent->filldir)(dirent->vfs_dirent, name, size, offset,
+				  inode, type));
+}
+
+static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
+{
+	struct hppfs_private *data = file->private_data;
+	struct file *proc_file = &data->proc_file;
+	int (*readdir)(struct file *, void *, filldir_t);
+	struct hppfs_dirent dirent = ((struct hppfs_dirent)
+		                      { .vfs_dirent  	= ent,
+					.filldir 	= filldir,
+					.dentry  	= file->f_dentry } );
+	int err;
+
+	readdir = proc_file->f_dentry->d_inode->i_fop->readdir;
+
+	proc_file->f_pos = file->f_pos;
+	err = (*readdir)(proc_file, &dirent, hppfs_filldir);
+	file->f_pos = proc_file->f_pos;
+
+	return(err);
+}
+
+static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	return(0);
+}
+
+static struct file_operations hppfs_dir_fops = {
+	.owner		= NULL,
+	.readdir	= hppfs_readdir,
+	.open		= hppfs_dir_open,
+	.fsync		= hppfs_fsync,
+};
+
+static int hppfs_statfs(struct super_block *sb, struct kstatfs *sf)
+{
+	sf->f_blocks = 0;
+	sf->f_bfree = 0;
+	sf->f_bavail = 0;
+	sf->f_files = 0;
+	sf->f_ffree = 0;
+	sf->f_type = HPPFS_SUPER_MAGIC;
+	return(0);
+}
+
+static struct inode *hppfs_alloc_inode(struct super_block *sb)
+{
+	struct hppfs_inode_info *hi;
+
+	hi = kmalloc(sizeof(*hi), GFP_KERNEL);
+	if(hi == NULL)
+		return(NULL);
+
+	*hi = ((struct hppfs_inode_info) { .proc_dentry	= NULL });
+	inode_init_once(&hi->vfs_inode);
+	return(&hi->vfs_inode);
+}
+
+void hppfs_delete_inode(struct inode *ino)
+{
+	clear_inode(ino);
+}
+
+static void hppfs_destroy_inode(struct inode *inode)
+{
+	kfree(HPPFS_I(inode));
+}
+
+static struct super_operations hppfs_sbops = {
+	.alloc_inode	= hppfs_alloc_inode,
+	.destroy_inode	= hppfs_destroy_inode,
+	.read_inode	= hppfs_read_inode,
+	.delete_inode	= hppfs_delete_inode,
+	.statfs		= hppfs_statfs,
+};
+
+static int hppfs_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+	struct file *proc_file;
+	struct dentry *proc_dentry;
+	int (*readlink)(struct dentry *, char *, int);
+	int err, n;
+
+	proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+	proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
+	err = PTR_ERR(proc_dentry);
+	if(IS_ERR(proc_dentry))
+		return(err);
+
+	readlink = proc_dentry->d_inode->i_op->readlink;
+	n = (*readlink)(proc_dentry, buffer, buflen);
+
+	fput(proc_file);
+
+	return(n);
+}
+
+static int hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct file *proc_file;
+	struct dentry *proc_dentry;
+	int (*follow_link)(struct dentry *, struct nameidata *);
+	int err, n;
+
+	proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+	proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
+	err = PTR_ERR(proc_dentry);
+	if(IS_ERR(proc_dentry))
+		return(err);
+
+	follow_link = proc_dentry->d_inode->i_op->follow_link;
+	n = (*follow_link)(proc_dentry, nd);
+
+	fput(proc_file);
+
+	return(n);
+}
+
+static struct inode_operations hppfs_dir_iops = {
+	.lookup		= hppfs_lookup,
+};
+
+static struct inode_operations hppfs_link_iops = {
+	.readlink	= hppfs_readlink,
+	.follow_link	= hppfs_follow_link,
+};
+
+static int init_inode(struct inode *inode, struct dentry *dentry)
+{
+	if(S_ISDIR(dentry->d_inode->i_mode)){
+		inode->i_op = &hppfs_dir_iops;
+		inode->i_fop = &hppfs_dir_fops;
+	}
+	else if(S_ISLNK(dentry->d_inode->i_mode)){
+		inode->i_op = &hppfs_link_iops;
+		inode->i_fop = &hppfs_file_fops;
+	}
+	else {
+		inode->i_op = &hppfs_file_iops;
+		inode->i_fop = &hppfs_file_fops;
+	}
+
+	HPPFS_I(inode)->proc_dentry = dentry;
+
+	return(0);
+}
+
+static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
+{
+	struct inode *root_inode;
+	struct file_system_type *procfs;
+	struct super_block *proc_sb;
+	int err;
+
+	err = -ENOENT;
+	procfs = get_fs_type("proc");
+	if(procfs == NULL)
+		goto out;
+
+	if(list_empty(&procfs->fs_supers))
+		goto out;
+
+	proc_sb = list_entry(procfs->fs_supers.next, struct super_block,
+			     s_instances);
+
+	sb->s_blocksize = 1024;
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = HPPFS_SUPER_MAGIC;
+	sb->s_op = &hppfs_sbops;
+
+	root_inode = iget(sb, 0);
+	if(root_inode == NULL)
+		goto out;
+
+	err = init_inode(root_inode, proc_sb->s_root);
+	if(err)
+		goto out_put;
+
+	err = -ENOMEM;
+	sb->s_root = d_alloc_root(root_inode);
+	if(sb->s_root == NULL)
+		goto out_put;
+
+	hppfs_read_inode(root_inode);
+
+	return(0);
+
+ out_put:
+	iput(root_inode);
+ out:
+	return(err);
+}
+
+static struct super_block *hppfs_read_super(struct file_system_type *type,
+					     int flags, const char *dev_name,
+					     void *data)
+{
+	return(get_sb_nodev(type, flags, data, hppfs_fill_super));
+}
+
+static struct file_system_type hppfs_type = {
+	.owner 		= THIS_MODULE,
+	.name 		= "hppfs",
+	.get_sb 	= hppfs_read_super,
+	.kill_sb	= kill_anon_super,
+	.fs_flags 	= 0,
+};
+
+static int __init init_hppfs(void)
+{
+	return(register_filesystem(&hppfs_type));
+}
+
+static void __exit exit_hppfs(void)
+{
+	unregister_filesystem(&hppfs_type);
+}
+
+module_init(init_hppfs)
+module_exit(exit_hppfs)
+MODULE_LICENSE("GPL");
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/fs/hugetlbfs/Makefile b/fs/hugetlbfs/Makefile
new file mode 100644
index 0000000..6adf870
--- /dev/null
+++ b/fs/hugetlbfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux ramfs routines.
+#
+
+obj-$(CONFIG_HUGETLBFS) += hugetlbfs.o
+
+hugetlbfs-objs := inode.o
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
new file mode 100644
index 0000000..2af3338
--- /dev/null
+++ b/fs/hugetlbfs/inode.c
@@ -0,0 +1,853 @@
+/*
+ * hugetlbpage-backed filesystem.  Based on ramfs.
+ *
+ * William Irwin, 2002
+ *
+ * Copyright (C) 2002 Linus Torvalds.
+ */
+
+#include <linux/module.h>
+#include <linux/thread_info.h>
+#include <asm/current.h>
+#include <linux/sched.h>		/* remove ASAP */
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/file.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/backing-dev.h>
+#include <linux/hugetlb.h>
+#include <linux/pagevec.h>
+#include <linux/quotaops.h>
+#include <linux/slab.h>
+#include <linux/dnotify.h>
+#include <linux/statfs.h>
+#include <linux/security.h>
+
+#include <asm/uaccess.h>
+
+/* some random number */
+#define HUGETLBFS_MAGIC	0x958458f6
+
+static struct super_operations hugetlbfs_ops;
+static struct address_space_operations hugetlbfs_aops;
+struct file_operations hugetlbfs_file_operations;
+static struct inode_operations hugetlbfs_dir_inode_operations;
+static struct inode_operations hugetlbfs_inode_operations;
+
+static struct backing_dev_info hugetlbfs_backing_dev_info = {
+	.ra_pages	= 0,	/* No readahead */
+	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+int sysctl_hugetlb_shm_group;
+
+static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct address_space *mapping = inode->i_mapping;
+	loff_t len, vma_len;
+	int ret;
+
+	if ((vma->vm_flags & (VM_MAYSHARE | VM_WRITE)) == VM_WRITE)
+		return -EINVAL;
+
+	if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
+		return -EINVAL;
+
+	if (vma->vm_start & ~HPAGE_MASK)
+		return -EINVAL;
+
+	if (vma->vm_end & ~HPAGE_MASK)
+		return -EINVAL;
+
+	if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+		return -EINVAL;
+
+	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+
+	down(&inode->i_sem);
+	file_accessed(file);
+	vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
+	vma->vm_ops = &hugetlb_vm_ops;
+
+	ret = -ENOMEM;
+	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+	if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
+		goto out;
+
+	ret = hugetlb_prefault(mapping, vma);
+	if (ret)
+		goto out;
+
+	if (inode->i_size < len)
+		inode->i_size = len;
+out:
+	up(&inode->i_sem);
+
+	return ret;
+}
+
+/*
+ * Called under down_write(mmap_sem), page_table_lock is not held
+ */
+
+#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags);
+#else
+static unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long start_addr;
+
+	if (len & ~HPAGE_MASK)
+		return -EINVAL;
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	if (addr) {
+		addr = ALIGN(addr, HPAGE_SIZE);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	start_addr = mm->free_area_cache;
+
+full_search:
+	addr = ALIGN(start_addr, HPAGE_SIZE);
+
+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+		/* At this point:  (!vma || addr < vma->vm_end). */
+		if (TASK_SIZE - len < addr) {
+			/*
+			 * Start a new search - just in case we missed
+			 * some holes.
+			 */
+			if (start_addr != TASK_UNMAPPED_BASE) {
+				start_addr = TASK_UNMAPPED_BASE;
+				goto full_search;
+			}
+			return -ENOMEM;
+		}
+
+		if (!vma || addr + len <= vma->vm_start)
+			return addr;
+		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
+	}
+}
+#endif
+
+/*
+ * Read a page. Again trivial. If it didn't already exist
+ * in the page cache, it is zero-filled.
+ */
+static int hugetlbfs_readpage(struct file *file, struct page * page)
+{
+	unlock_page(page);
+	return -EINVAL;
+}
+
+static int hugetlbfs_prepare_write(struct file *file,
+			struct page *page, unsigned offset, unsigned to)
+{
+	return -EINVAL;
+}
+
+static int hugetlbfs_commit_write(struct file *file,
+			struct page *page, unsigned offset, unsigned to)
+{
+	return -EINVAL;
+}
+
+static void huge_pagevec_release(struct pagevec *pvec)
+{
+	int i;
+
+	for (i = 0; i < pagevec_count(pvec); ++i)
+		put_page(pvec->pages[i]);
+
+	pagevec_reinit(pvec);
+}
+
+static void truncate_huge_page(struct page *page)
+{
+	clear_page_dirty(page);
+	ClearPageUptodate(page);
+	remove_from_page_cache(page);
+	put_page(page);
+}
+
+static void truncate_hugepages(struct address_space *mapping, loff_t lstart)
+{
+	const pgoff_t start = lstart >> HPAGE_SHIFT;
+	struct pagevec pvec;
+	pgoff_t next;
+	int i;
+
+	pagevec_init(&pvec, 0);
+	next = start;
+	while (1) {
+		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+			if (next == start)
+				break;
+			next = start;
+			continue;
+		}
+
+		for (i = 0; i < pagevec_count(&pvec); ++i) {
+			struct page *page = pvec.pages[i];
+
+			lock_page(page);
+			if (page->index > next)
+				next = page->index;
+			++next;
+			truncate_huge_page(page);
+			unlock_page(page);
+			hugetlb_put_quota(mapping);
+		}
+		huge_pagevec_release(&pvec);
+	}
+	BUG_ON(!lstart && mapping->nrpages);
+}
+
+static void hugetlbfs_delete_inode(struct inode *inode)
+{
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(inode->i_sb);
+
+	hlist_del_init(&inode->i_hash);
+	list_del_init(&inode->i_list);
+	list_del_init(&inode->i_sb_list);
+	inode->i_state |= I_FREEING;
+	inodes_stat.nr_inodes--;
+	spin_unlock(&inode_lock);
+
+	if (inode->i_data.nrpages)
+		truncate_hugepages(&inode->i_data, 0);
+
+	security_inode_delete(inode);
+
+	if (sbinfo->free_inodes >= 0) {
+		spin_lock(&sbinfo->stat_lock);
+		sbinfo->free_inodes++;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+
+	clear_inode(inode);
+	destroy_inode(inode);
+}
+
+static void hugetlbfs_forget_inode(struct inode *inode)
+{
+	struct super_block *super_block = inode->i_sb;
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(super_block);
+
+	if (hlist_unhashed(&inode->i_hash))
+		goto out_truncate;
+
+	if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
+		list_del(&inode->i_list);
+		list_add(&inode->i_list, &inode_unused);
+	}
+	inodes_stat.nr_unused++;
+	if (!super_block || (super_block->s_flags & MS_ACTIVE)) {
+		spin_unlock(&inode_lock);
+		return;
+	}
+
+	/* write_inode_now() ? */
+	inodes_stat.nr_unused--;
+	hlist_del_init(&inode->i_hash);
+out_truncate:
+	list_del_init(&inode->i_list);
+	list_del_init(&inode->i_sb_list);
+	inode->i_state |= I_FREEING;
+	inodes_stat.nr_inodes--;
+	spin_unlock(&inode_lock);
+	if (inode->i_data.nrpages)
+		truncate_hugepages(&inode->i_data, 0);
+
+	if (sbinfo->free_inodes >= 0) {
+		spin_lock(&sbinfo->stat_lock);
+		sbinfo->free_inodes++;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+
+	clear_inode(inode);
+	destroy_inode(inode);
+}
+
+static void hugetlbfs_drop_inode(struct inode *inode)
+{
+	if (!inode->i_nlink)
+		hugetlbfs_delete_inode(inode);
+	else
+		hugetlbfs_forget_inode(inode);
+}
+
+/*
+ * h_pgoff is in HPAGE_SIZE units.
+ * vma->vm_pgoff is in PAGE_SIZE units.
+ */
+static inline void
+hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
+{
+	struct vm_area_struct *vma;
+	struct prio_tree_iter iter;
+
+	vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) {
+		unsigned long h_vm_pgoff;
+		unsigned long v_length;
+		unsigned long v_offset;
+
+		h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
+		v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
+		/*
+		 * Is this VMA fully outside the truncation point?
+		 */
+		if (h_vm_pgoff >= h_pgoff)
+			v_offset = 0;
+
+		v_length = vma->vm_end - vma->vm_start;
+
+		zap_hugepage_range(vma,
+				vma->vm_start + v_offset,
+				v_length - v_offset);
+	}
+}
+
+/*
+ * Expanding truncates are not allowed.
+ */
+static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
+{
+	unsigned long pgoff;
+	struct address_space *mapping = inode->i_mapping;
+
+	if (offset > inode->i_size)
+		return -EINVAL;
+
+	BUG_ON(offset & ~HPAGE_MASK);
+	pgoff = offset >> HPAGE_SHIFT;
+
+	inode->i_size = offset;
+	spin_lock(&mapping->i_mmap_lock);
+	if (!prio_tree_empty(&mapping->i_mmap))
+		hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
+	spin_unlock(&mapping->i_mmap_lock);
+	truncate_hugepages(mapping, offset);
+	return 0;
+}
+
+static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+	unsigned int ia_valid = attr->ia_valid;
+
+	BUG_ON(!inode);
+
+	error = inode_change_ok(inode, attr);
+	if (error)
+		goto out;
+
+	if (ia_valid & ATTR_SIZE) {
+		error = -EINVAL;
+		if (!(attr->ia_size & ~HPAGE_MASK))
+			error = hugetlb_vmtruncate(inode, attr->ia_size);
+		if (error)
+			goto out;
+		attr->ia_valid &= ~ATTR_SIZE;
+	}
+	error = inode_setattr(inode, attr);
+out:
+	return error;
+}
+
+static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, 
+					gid_t gid, int mode, dev_t dev)
+{
+	struct inode *inode;
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
+
+	if (sbinfo->free_inodes >= 0) {
+		spin_lock(&sbinfo->stat_lock);
+		if (!sbinfo->free_inodes) {
+			spin_unlock(&sbinfo->stat_lock);
+			return NULL;
+		}
+		sbinfo->free_inodes--;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+
+	inode = new_inode(sb);
+	if (inode) {
+		struct hugetlbfs_inode_info *info;
+		inode->i_mode = mode;
+		inode->i_uid = uid;
+		inode->i_gid = gid;
+		inode->i_blksize = HPAGE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_mapping->a_ops = &hugetlbfs_aops;
+		inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		info = HUGETLBFS_I(inode);
+		mpol_shared_policy_init(&info->policy);
+		switch (mode & S_IFMT) {
+		default:
+			init_special_inode(inode, mode, dev);
+			break;
+		case S_IFREG:
+			inode->i_op = &hugetlbfs_inode_operations;
+			inode->i_fop = &hugetlbfs_file_operations;
+			break;
+		case S_IFDIR:
+			inode->i_op = &hugetlbfs_dir_inode_operations;
+			inode->i_fop = &simple_dir_operations;
+
+			/* directory inodes start off with i_nlink == 2 (for "." entry) */
+			inode->i_nlink++;
+			break;
+		case S_IFLNK:
+			inode->i_op = &page_symlink_inode_operations;
+			break;
+		}
+	}
+	return inode;
+}
+
+/*
+ * File creation. Allocate an inode, and we're done..
+ */
+static int hugetlbfs_mknod(struct inode *dir,
+			struct dentry *dentry, int mode, dev_t dev)
+{
+	struct inode *inode;
+	int error = -ENOSPC;
+	gid_t gid;
+
+	if (dir->i_mode & S_ISGID) {
+		gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else {
+		gid = current->fsgid;
+	}
+	inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, gid, mode, dev);
+	if (inode) {
+		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+		d_instantiate(dentry, inode);
+		dget(dentry);	/* Extra count - pin the dentry in core */
+		error = 0;
+	}
+	return error;
+}
+
+static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+	if (!retval)
+		dir->i_nlink++;
+	return retval;
+}
+
+static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+	return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int hugetlbfs_symlink(struct inode *dir,
+			struct dentry *dentry, const char *symname)
+{
+	struct inode *inode;
+	int error = -ENOSPC;
+	gid_t gid;
+
+	if (dir->i_mode & S_ISGID)
+		gid = dir->i_gid;
+	else
+		gid = current->fsgid;
+
+	inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
+					gid, S_IFLNK|S_IRWXUGO, 0);
+	if (inode) {
+		int l = strlen(symname)+1;
+		error = page_symlink(inode, symname, l);
+		if (!error) {
+			d_instantiate(dentry, inode);
+			dget(dentry);
+		} else
+			iput(inode);
+	}
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+
+	return error;
+}
+
+/*
+ * For direct-IO reads into hugetlb pages
+ */
+static int hugetlbfs_set_page_dirty(struct page *page)
+{
+	return 0;
+}
+
+static int hugetlbfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
+
+	buf->f_type = HUGETLBFS_MAGIC;
+	buf->f_bsize = HPAGE_SIZE;
+	if (sbinfo) {
+		spin_lock(&sbinfo->stat_lock);
+		buf->f_blocks = sbinfo->max_blocks;
+		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
+		buf->f_files = sbinfo->max_inodes;
+		buf->f_ffree = sbinfo->free_inodes;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+	buf->f_namelen = NAME_MAX;
+	return 0;
+}
+
+static void hugetlbfs_put_super(struct super_block *sb)
+{
+	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
+
+	if (sbi) {
+		sb->s_fs_info = NULL;
+		kfree(sbi);
+	}
+}
+
+static kmem_cache_t *hugetlbfs_inode_cachep;
+
+static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
+{
+	struct hugetlbfs_inode_info *p;
+
+	p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL);
+	if (!p)
+		return NULL;
+	return &p->vfs_inode;
+}
+
+static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+
+static void hugetlbfs_destroy_inode(struct inode *inode)
+{
+	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
+	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
+}
+
+static struct address_space_operations hugetlbfs_aops = {
+	.readpage	= hugetlbfs_readpage,
+	.prepare_write	= hugetlbfs_prepare_write,
+	.commit_write	= hugetlbfs_commit_write,
+	.set_page_dirty	= hugetlbfs_set_page_dirty,
+};
+
+struct file_operations hugetlbfs_file_operations = {
+	.mmap			= hugetlbfs_file_mmap,
+	.fsync			= simple_sync_file,
+	.get_unmapped_area	= hugetlb_get_unmapped_area,
+};
+
+static struct inode_operations hugetlbfs_dir_inode_operations = {
+	.create		= hugetlbfs_create,
+	.lookup		= simple_lookup,
+	.link		= simple_link,
+	.unlink		= simple_unlink,
+	.symlink	= hugetlbfs_symlink,
+	.mkdir		= hugetlbfs_mkdir,
+	.rmdir		= simple_rmdir,
+	.mknod		= hugetlbfs_mknod,
+	.rename		= simple_rename,
+	.setattr	= hugetlbfs_setattr,
+};
+
+static struct inode_operations hugetlbfs_inode_operations = {
+	.setattr	= hugetlbfs_setattr,
+};
+
+static struct super_operations hugetlbfs_ops = {
+	.alloc_inode    = hugetlbfs_alloc_inode,
+	.destroy_inode  = hugetlbfs_destroy_inode,
+	.statfs		= hugetlbfs_statfs,
+	.drop_inode	= hugetlbfs_drop_inode,
+	.put_super	= hugetlbfs_put_super,
+};
+
+static int
+hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
+{
+	char *opt, *value, *rest;
+
+	if (!options)
+		return 0;
+	while ((opt = strsep(&options, ",")) != NULL) {
+		if (!*opt)
+			continue;
+
+		value = strchr(opt, '=');
+		if (!value || !*value)
+			return -EINVAL;
+		else
+			*value++ = '\0';
+
+		if (!strcmp(opt, "uid"))
+			pconfig->uid = simple_strtoul(value, &value, 0);
+		else if (!strcmp(opt, "gid"))
+			pconfig->gid = simple_strtoul(value, &value, 0);
+		else if (!strcmp(opt, "mode"))
+			pconfig->mode = simple_strtoul(value,&value,0) & 0777U;
+		else if (!strcmp(opt, "size")) {
+			unsigned long long size = memparse(value, &rest);
+			if (*rest == '%') {
+				size <<= HPAGE_SHIFT;
+				size *= max_huge_pages;
+				do_div(size, 100);
+				rest++;
+			}
+			size &= HPAGE_MASK;
+			pconfig->nr_blocks = (size >> HPAGE_SHIFT);
+			value = rest;
+		} else if (!strcmp(opt,"nr_inodes")) {
+			pconfig->nr_inodes = memparse(value, &rest);
+			value = rest;
+		} else
+			return -EINVAL;
+
+		if (*value)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct inode * inode;
+	struct dentry * root;
+	int ret;
+	struct hugetlbfs_config config;
+	struct hugetlbfs_sb_info *sbinfo;
+
+	config.nr_blocks = -1; /* No limit on size by default */
+	config.nr_inodes = -1; /* No limit on number of inodes by default */
+	config.uid = current->fsuid;
+	config.gid = current->fsgid;
+	config.mode = 0755;
+	ret = hugetlbfs_parse_options(data, &config);
+
+	if (ret)
+		return ret;
+
+	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
+	if (!sbinfo)
+		return -ENOMEM;
+	sb->s_fs_info = sbinfo;
+	spin_lock_init(&sbinfo->stat_lock);
+	sbinfo->max_blocks = config.nr_blocks;
+	sbinfo->free_blocks = config.nr_blocks;
+	sbinfo->max_inodes = config.nr_inodes;
+	sbinfo->free_inodes = config.nr_inodes;
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	sb->s_blocksize = HPAGE_SIZE;
+	sb->s_blocksize_bits = HPAGE_SHIFT;
+	sb->s_magic = HUGETLBFS_MAGIC;
+	sb->s_op = &hugetlbfs_ops;
+	sb->s_time_gran = 1;
+	inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
+					S_IFDIR | config.mode, 0);
+	if (!inode)
+		goto out_free;
+
+	root = d_alloc_root(inode);
+	if (!root) {
+		iput(inode);
+		goto out_free;
+	}
+	sb->s_root = root;
+	return 0;
+out_free:
+	kfree(sbinfo);
+	return -ENOMEM;
+}
+
+int hugetlb_get_quota(struct address_space *mapping)
+{
+	int ret = 0;
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+
+	if (sbinfo->free_blocks > -1) {
+		spin_lock(&sbinfo->stat_lock);
+		if (sbinfo->free_blocks > 0)
+			sbinfo->free_blocks--;
+		else
+			ret = -ENOMEM;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+
+	return ret;
+}
+
+void hugetlb_put_quota(struct address_space *mapping)
+{
+	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+
+	if (sbinfo->free_blocks > -1) {
+		spin_lock(&sbinfo->stat_lock);
+		sbinfo->free_blocks++;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+}
+
+static struct super_block *hugetlbfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super);
+}
+
+static struct file_system_type hugetlbfs_fs_type = {
+	.name		= "hugetlbfs",
+	.get_sb		= hugetlbfs_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+static struct vfsmount *hugetlbfs_vfsmount;
+
+/*
+ * Return the next identifier for a shm file
+ */
+static unsigned long hugetlbfs_counter(void)
+{
+	static DEFINE_SPINLOCK(lock);
+	static unsigned long counter;
+	unsigned long ret;
+
+	spin_lock(&lock);
+	ret = ++counter;
+	spin_unlock(&lock);
+	return ret;
+}
+
+static int can_do_hugetlb_shm(void)
+{
+	return likely(capable(CAP_IPC_LOCK) ||
+			in_group_p(sysctl_hugetlb_shm_group) ||
+			can_do_mlock());
+}
+
+struct file *hugetlb_zero_setup(size_t size)
+{
+	int error = -ENOMEM;
+	struct file *file;
+	struct inode *inode;
+	struct dentry *dentry, *root;
+	struct qstr quick_string;
+	char buf[16];
+
+	if (!can_do_hugetlb_shm())
+		return ERR_PTR(-EPERM);
+
+	if (!is_hugepage_mem_enough(size))
+		return ERR_PTR(-ENOMEM);
+
+	if (!user_shm_lock(size, current->user))
+		return ERR_PTR(-ENOMEM);
+
+	root = hugetlbfs_vfsmount->mnt_root;
+	snprintf(buf, 16, "%lu", hugetlbfs_counter());
+	quick_string.name = buf;
+	quick_string.len = strlen(quick_string.name);
+	quick_string.hash = 0;
+	dentry = d_alloc(root, &quick_string);
+	if (!dentry)
+		goto out_shm_unlock;
+
+	error = -ENFILE;
+	file = get_empty_filp();
+	if (!file)
+		goto out_dentry;
+
+	error = -ENOSPC;
+	inode = hugetlbfs_get_inode(root->d_sb, current->fsuid,
+				current->fsgid, S_IFREG | S_IRWXUGO, 0);
+	if (!inode)
+		goto out_file;
+
+	d_instantiate(dentry, inode);
+	inode->i_size = size;
+	inode->i_nlink = 0;
+	file->f_vfsmnt = mntget(hugetlbfs_vfsmount);
+	file->f_dentry = dentry;
+	file->f_mapping = inode->i_mapping;
+	file->f_op = &hugetlbfs_file_operations;
+	file->f_mode = FMODE_WRITE | FMODE_READ;
+	return file;
+
+out_file:
+	put_filp(file);
+out_dentry:
+	dput(dentry);
+out_shm_unlock:
+	user_shm_unlock(size, current->user);
+	return ERR_PTR(error);
+}
+
+static int __init init_hugetlbfs_fs(void)
+{
+	int error;
+	struct vfsmount *vfsmount;
+
+	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
+					sizeof(struct hugetlbfs_inode_info),
+					0, 0, init_once, NULL);
+	if (hugetlbfs_inode_cachep == NULL)
+		return -ENOMEM;
+
+	error = register_filesystem(&hugetlbfs_fs_type);
+	if (error)
+		goto out;
+
+	vfsmount = kern_mount(&hugetlbfs_fs_type);
+
+	if (!IS_ERR(vfsmount)) {
+		hugetlbfs_vfsmount = vfsmount;
+		return 0;
+	}
+
+	error = PTR_ERR(vfsmount);
+
+ out:
+	if (error)
+		kmem_cache_destroy(hugetlbfs_inode_cachep);
+	return error;
+}
+
+static void __exit exit_hugetlbfs_fs(void)
+{
+	kmem_cache_destroy(hugetlbfs_inode_cachep);
+	unregister_filesystem(&hugetlbfs_fs_type);
+}
+
+module_init(init_hugetlbfs_fs)
+module_exit(exit_hugetlbfs_fs)
+
+MODULE_LICENSE("GPL");
diff --git a/fs/inode.c b/fs/inode.c
new file mode 100644
index 0000000..af8fd78
--- /dev/null
+++ b/fs/inode.c
@@ -0,0 +1,1377 @@
+/*
+ * linux/fs/inode.c
+ *
+ * (C) 1997 Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/dcache.h>
+#include <linux/init.h>
+#include <linux/quotaops.h>
+#include <linux/slab.h>
+#include <linux/writeback.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/wait.h>
+#include <linux/hash.h>
+#include <linux/swap.h>
+#include <linux/security.h>
+#include <linux/pagemap.h>
+#include <linux/cdev.h>
+#include <linux/bootmem.h>
+
+/*
+ * This is needed for the following functions:
+ *  - inode_has_buffers
+ *  - invalidate_inode_buffers
+ *  - fsync_bdev
+ *  - invalidate_bdev
+ *
+ * FIXME: remove all knowledge of the buffer layer from this file
+ */
+#include <linux/buffer_head.h>
+
+/*
+ * New inode.c implementation.
+ *
+ * This implementation has the basic premise of trying
+ * to be extremely low-overhead and SMP-safe, yet be
+ * simple enough to be "obviously correct".
+ *
+ * Famous last words.
+ */
+
+/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
+
+/* #define INODE_PARANOIA 1 */
+/* #define INODE_DEBUG 1 */
+
+/*
+ * Inode lookup is no longer as critical as it used to be:
+ * most of the lookups are going to be through the dcache.
+ */
+#define I_HASHBITS	i_hash_shift
+#define I_HASHMASK	i_hash_mask
+
+static unsigned int i_hash_mask;
+static unsigned int i_hash_shift;
+
+/*
+ * Each inode can be on two separate lists. One is
+ * the hash list of the inode, used for lookups. The
+ * other linked list is the "type" list:
+ *  "in_use" - valid inode, i_count > 0, i_nlink > 0
+ *  "dirty"  - as "in_use" but also dirty
+ *  "unused" - valid inode, i_count = 0
+ *
+ * A "dirty" list is maintained for each super block,
+ * allowing for low-overhead inode sync() operations.
+ */
+
+LIST_HEAD(inode_in_use);
+LIST_HEAD(inode_unused);
+static struct hlist_head *inode_hashtable;
+
+/*
+ * A simple spinlock to protect the list manipulations.
+ *
+ * NOTE! You also have to own the lock if you change
+ * the i_state of an inode while it is in use..
+ */
+DEFINE_SPINLOCK(inode_lock);
+
+/*
+ * iprune_sem provides exclusion between the kswapd or try_to_free_pages
+ * icache shrinking path, and the umount path.  Without this exclusion,
+ * by the time prune_icache calls iput for the inode whose pages it has
+ * been invalidating, or by the time it calls clear_inode & destroy_inode
+ * from its final dispose_list, the struct super_block they refer to
+ * (for inode->i_sb->s_op) may already have been freed and reused.
+ */
+DECLARE_MUTEX(iprune_sem);
+
+/*
+ * Statistics gathering..
+ */
+struct inodes_stat_t inodes_stat;
+
+static kmem_cache_t * inode_cachep;
+
+static struct inode *alloc_inode(struct super_block *sb)
+{
+	static struct address_space_operations empty_aops;
+	static struct inode_operations empty_iops;
+	static struct file_operations empty_fops;
+	struct inode *inode;
+
+	if (sb->s_op->alloc_inode)
+		inode = sb->s_op->alloc_inode(sb);
+	else
+		inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
+
+	if (inode) {
+		struct address_space * const mapping = &inode->i_data;
+
+		inode->i_sb = sb;
+		inode->i_blkbits = sb->s_blocksize_bits;
+		inode->i_flags = 0;
+		atomic_set(&inode->i_count, 1);
+		inode->i_op = &empty_iops;
+		inode->i_fop = &empty_fops;
+		inode->i_nlink = 1;
+		atomic_set(&inode->i_writecount, 0);
+		inode->i_size = 0;
+		inode->i_blocks = 0;
+		inode->i_bytes = 0;
+		inode->i_generation = 0;
+#ifdef CONFIG_QUOTA
+		memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
+#endif
+		inode->i_pipe = NULL;
+		inode->i_bdev = NULL;
+		inode->i_cdev = NULL;
+		inode->i_rdev = 0;
+		inode->i_security = NULL;
+		inode->dirtied_when = 0;
+		if (security_inode_alloc(inode)) {
+			if (inode->i_sb->s_op->destroy_inode)
+				inode->i_sb->s_op->destroy_inode(inode);
+			else
+				kmem_cache_free(inode_cachep, (inode));
+			return NULL;
+		}
+
+		mapping->a_ops = &empty_aops;
+ 		mapping->host = inode;
+		mapping->flags = 0;
+		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+		mapping->assoc_mapping = NULL;
+		mapping->backing_dev_info = &default_backing_dev_info;
+
+		/*
+		 * If the block_device provides a backing_dev_info for client
+		 * inodes then use that.  Otherwise the inode share the bdev's
+		 * backing_dev_info.
+		 */
+		if (sb->s_bdev) {
+			struct backing_dev_info *bdi;
+
+			bdi = sb->s_bdev->bd_inode_backing_dev_info;
+			if (!bdi)
+				bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
+			mapping->backing_dev_info = bdi;
+		}
+		memset(&inode->u, 0, sizeof(inode->u));
+		inode->i_mapping = mapping;
+	}
+	return inode;
+}
+
+void destroy_inode(struct inode *inode) 
+{
+	if (inode_has_buffers(inode))
+		BUG();
+	security_inode_free(inode);
+	if (inode->i_sb->s_op->destroy_inode)
+		inode->i_sb->s_op->destroy_inode(inode);
+	else
+		kmem_cache_free(inode_cachep, (inode));
+}
+
+
+/*
+ * These are initializations that only need to be done
+ * once, because the fields are idempotent across use
+ * of the inode, so let the slab aware of that.
+ */
+void inode_init_once(struct inode *inode)
+{
+	memset(inode, 0, sizeof(*inode));
+	INIT_HLIST_NODE(&inode->i_hash);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	INIT_LIST_HEAD(&inode->i_devices);
+	sema_init(&inode->i_sem, 1);
+	init_rwsem(&inode->i_alloc_sem);
+	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
+	rwlock_init(&inode->i_data.tree_lock);
+	spin_lock_init(&inode->i_data.i_mmap_lock);
+	INIT_LIST_HEAD(&inode->i_data.private_list);
+	spin_lock_init(&inode->i_data.private_lock);
+	INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
+	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+	spin_lock_init(&inode->i_lock);
+	i_size_ordered_init(inode);
+}
+
+EXPORT_SYMBOL(inode_init_once);
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct inode * inode = (struct inode *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(inode);
+}
+
+/*
+ * inode_lock must be held
+ */
+void __iget(struct inode * inode)
+{
+	if (atomic_read(&inode->i_count)) {
+		atomic_inc(&inode->i_count);
+		return;
+	}
+	atomic_inc(&inode->i_count);
+	if (!(inode->i_state & (I_DIRTY|I_LOCK)))
+		list_move(&inode->i_list, &inode_in_use);
+	inodes_stat.nr_unused--;
+}
+
+/**
+ * clear_inode - clear an inode
+ * @inode: inode to clear
+ *
+ * This is called by the filesystem to tell us
+ * that the inode is no longer useful. We just
+ * terminate it with extreme prejudice.
+ */
+void clear_inode(struct inode *inode)
+{
+	might_sleep();
+	invalidate_inode_buffers(inode);
+       
+	if (inode->i_data.nrpages)
+		BUG();
+	if (!(inode->i_state & I_FREEING))
+		BUG();
+	if (inode->i_state & I_CLEAR)
+		BUG();
+	wait_on_inode(inode);
+	DQUOT_DROP(inode);
+	if (inode->i_sb && inode->i_sb->s_op->clear_inode)
+		inode->i_sb->s_op->clear_inode(inode);
+	if (inode->i_bdev)
+		bd_forget(inode);
+	if (inode->i_cdev)
+		cd_forget(inode);
+	inode->i_state = I_CLEAR;
+}
+
+EXPORT_SYMBOL(clear_inode);
+
+/*
+ * dispose_list - dispose of the contents of a local list
+ * @head: the head of the list to free
+ *
+ * Dispose-list gets a local list with local inodes in it, so it doesn't
+ * need to worry about list corruption and SMP locks.
+ */
+static void dispose_list(struct list_head *head)
+{
+	int nr_disposed = 0;
+
+	while (!list_empty(head)) {
+		struct inode *inode;
+
+		inode = list_entry(head->next, struct inode, i_list);
+		list_del(&inode->i_list);
+
+		if (inode->i_data.nrpages)
+			truncate_inode_pages(&inode->i_data, 0);
+		clear_inode(inode);
+		destroy_inode(inode);
+		nr_disposed++;
+	}
+	spin_lock(&inode_lock);
+	inodes_stat.nr_inodes -= nr_disposed;
+	spin_unlock(&inode_lock);
+}
+
+/*
+ * Invalidate all inodes for a device.
+ */
+static int invalidate_list(struct list_head *head, struct list_head *dispose)
+{
+	struct list_head *next;
+	int busy = 0, count = 0;
+
+	next = head->next;
+	for (;;) {
+		struct list_head * tmp = next;
+		struct inode * inode;
+
+		/*
+		 * We can reschedule here without worrying about the list's
+		 * consistency because the per-sb list of inodes must not
+		 * change during umount anymore, and because iprune_sem keeps
+		 * shrink_icache_memory() away.
+		 */
+		cond_resched_lock(&inode_lock);
+
+		next = next->next;
+		if (tmp == head)
+			break;
+		inode = list_entry(tmp, struct inode, i_sb_list);
+		invalidate_inode_buffers(inode);
+		if (!atomic_read(&inode->i_count)) {
+			hlist_del_init(&inode->i_hash);
+			list_del(&inode->i_sb_list);
+			list_move(&inode->i_list, dispose);
+			inode->i_state |= I_FREEING;
+			count++;
+			continue;
+		}
+		busy = 1;
+	}
+	/* only unused inodes may be cached with i_count zero */
+	inodes_stat.nr_unused -= count;
+	return busy;
+}
+
+/*
+ * This is a two-stage process. First we collect all
+ * offending inodes onto the throw-away list, and in
+ * the second stage we actually dispose of them. This
+ * is because we don't want to sleep while messing
+ * with the global lists..
+ */
+ 
+/**
+ *	invalidate_inodes	- discard the inodes on a device
+ *	@sb: superblock
+ *
+ *	Discard all of the inodes for a given superblock. If the discard
+ *	fails because there are busy inodes then a non zero value is returned.
+ *	If the discard is successful all the inodes have been discarded.
+ */
+int invalidate_inodes(struct super_block * sb)
+{
+	int busy;
+	LIST_HEAD(throw_away);
+
+	down(&iprune_sem);
+	spin_lock(&inode_lock);
+	busy = invalidate_list(&sb->s_inodes, &throw_away);
+	spin_unlock(&inode_lock);
+
+	dispose_list(&throw_away);
+	up(&iprune_sem);
+
+	return busy;
+}
+
+EXPORT_SYMBOL(invalidate_inodes);
+ 
+int __invalidate_device(struct block_device *bdev, int do_sync)
+{
+	struct super_block *sb;
+	int res;
+
+	if (do_sync)
+		fsync_bdev(bdev);
+
+	res = 0;
+	sb = get_super(bdev);
+	if (sb) {
+		/*
+		 * no need to lock the super, get_super holds the
+		 * read semaphore so the filesystem cannot go away
+		 * under us (->put_super runs with the write lock
+		 * hold).
+		 */
+		shrink_dcache_sb(sb);
+		res = invalidate_inodes(sb);
+		drop_super(sb);
+	}
+	invalidate_bdev(bdev, 0);
+	return res;
+}
+
+EXPORT_SYMBOL(__invalidate_device);
+
+static int can_unuse(struct inode *inode)
+{
+	if (inode->i_state)
+		return 0;
+	if (inode_has_buffers(inode))
+		return 0;
+	if (atomic_read(&inode->i_count))
+		return 0;
+	if (inode->i_data.nrpages)
+		return 0;
+	return 1;
+}
+
+/*
+ * Scan `goal' inodes on the unused list for freeable ones. They are moved to
+ * a temporary list and then are freed outside inode_lock by dispose_list().
+ *
+ * Any inodes which are pinned purely because of attached pagecache have their
+ * pagecache removed.  We expect the final iput() on that inode to add it to
+ * the front of the inode_unused list.  So look for it there and if the
+ * inode is still freeable, proceed.  The right inode is found 99.9% of the
+ * time in testing on a 4-way.
+ *
+ * If the inode has metadata buffers attached to mapping->private_list then
+ * try to remove them.
+ */
+static void prune_icache(int nr_to_scan)
+{
+	LIST_HEAD(freeable);
+	int nr_pruned = 0;
+	int nr_scanned;
+	unsigned long reap = 0;
+
+	down(&iprune_sem);
+	spin_lock(&inode_lock);
+	for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
+		struct inode *inode;
+
+		if (list_empty(&inode_unused))
+			break;
+
+		inode = list_entry(inode_unused.prev, struct inode, i_list);
+
+		if (inode->i_state || atomic_read(&inode->i_count)) {
+			list_move(&inode->i_list, &inode_unused);
+			continue;
+		}
+		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
+			__iget(inode);
+			spin_unlock(&inode_lock);
+			if (remove_inode_buffers(inode))
+				reap += invalidate_inode_pages(&inode->i_data);
+			iput(inode);
+			spin_lock(&inode_lock);
+
+			if (inode != list_entry(inode_unused.next,
+						struct inode, i_list))
+				continue;	/* wrong inode or list_empty */
+			if (!can_unuse(inode))
+				continue;
+		}
+		hlist_del_init(&inode->i_hash);
+		list_del_init(&inode->i_sb_list);
+		list_move(&inode->i_list, &freeable);
+		inode->i_state |= I_FREEING;
+		nr_pruned++;
+	}
+	inodes_stat.nr_unused -= nr_pruned;
+	spin_unlock(&inode_lock);
+
+	dispose_list(&freeable);
+	up(&iprune_sem);
+
+	if (current_is_kswapd())
+		mod_page_state(kswapd_inodesteal, reap);
+	else
+		mod_page_state(pginodesteal, reap);
+}
+
+/*
+ * shrink_icache_memory() will attempt to reclaim some unused inodes.  Here,
+ * "unused" means that no dentries are referring to the inodes: the files are
+ * not open and the dcache references to those inodes have already been
+ * reclaimed.
+ *
+ * This function is passed the number of inodes to scan, and it returns the
+ * total number of remaining possibly-reclaimable inodes.
+ */
+static int shrink_icache_memory(int nr, unsigned int gfp_mask)
+{
+	if (nr) {
+		/*
+		 * Nasty deadlock avoidance.  We may hold various FS locks,
+		 * and we don't want to recurse into the FS that called us
+		 * in clear_inode() and friends..
+	 	 */
+		if (!(gfp_mask & __GFP_FS))
+			return -1;
+		prune_icache(nr);
+	}
+	return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+}
+
+static void __wait_on_freeing_inode(struct inode *inode);
+/*
+ * Called with the inode lock held.
+ * NOTE: we are not increasing the inode-refcount, you must call __iget()
+ * by hand after calling find_inode now! This simplifies iunique and won't
+ * add any additional branch in the common code.
+ */
+static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
+{
+	struct hlist_node *node;
+	struct inode * inode = NULL;
+
+repeat:
+	hlist_for_each (node, head) { 
+		inode = hlist_entry(node, struct inode, i_hash);
+		if (inode->i_sb != sb)
+			continue;
+		if (!test(inode, data))
+			continue;
+		if (inode->i_state & (I_FREEING|I_CLEAR)) {
+			__wait_on_freeing_inode(inode);
+			goto repeat;
+		}
+		break;
+	}
+	return node ? inode : NULL;
+}
+
+/*
+ * find_inode_fast is the fast path version of find_inode, see the comment at
+ * iget_locked for details.
+ */
+static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
+{
+	struct hlist_node *node;
+	struct inode * inode = NULL;
+
+repeat:
+	hlist_for_each (node, head) {
+		inode = hlist_entry(node, struct inode, i_hash);
+		if (inode->i_ino != ino)
+			continue;
+		if (inode->i_sb != sb)
+			continue;
+		if (inode->i_state & (I_FREEING|I_CLEAR)) {
+			__wait_on_freeing_inode(inode);
+			goto repeat;
+		}
+		break;
+	}
+	return node ? inode : NULL;
+}
+
+/**
+ *	new_inode 	- obtain an inode
+ *	@sb: superblock
+ *
+ *	Allocates a new inode for given superblock.
+ */
+struct inode *new_inode(struct super_block *sb)
+{
+	static unsigned long last_ino;
+	struct inode * inode;
+
+	spin_lock_prefetch(&inode_lock);
+	
+	inode = alloc_inode(sb);
+	if (inode) {
+		spin_lock(&inode_lock);
+		inodes_stat.nr_inodes++;
+		list_add(&inode->i_list, &inode_in_use);
+		list_add(&inode->i_sb_list, &sb->s_inodes);
+		inode->i_ino = ++last_ino;
+		inode->i_state = 0;
+		spin_unlock(&inode_lock);
+	}
+	return inode;
+}
+
+EXPORT_SYMBOL(new_inode);
+
+void unlock_new_inode(struct inode *inode)
+{
+	/*
+	 * This is special!  We do not need the spinlock
+	 * when clearing I_LOCK, because we're guaranteed
+	 * that nobody else tries to do anything about the
+	 * state of the inode when it is locked, as we
+	 * just created it (so there can be no old holders
+	 * that haven't tested I_LOCK).
+	 */
+	inode->i_state &= ~(I_LOCK|I_NEW);
+	wake_up_inode(inode);
+}
+
+EXPORT_SYMBOL(unlock_new_inode);
+
+/*
+ * This is called without the inode lock held.. Be careful.
+ *
+ * We no longer cache the sb_flags in i_flags - see fs.h
+ *	-- rmk@arm.uk.linux.org
+ */
+static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
+{
+	struct inode * inode;
+
+	inode = alloc_inode(sb);
+	if (inode) {
+		struct inode * old;
+
+		spin_lock(&inode_lock);
+		/* We released the lock, so.. */
+		old = find_inode(sb, head, test, data);
+		if (!old) {
+			if (set(inode, data))
+				goto set_failed;
+
+			inodes_stat.nr_inodes++;
+			list_add(&inode->i_list, &inode_in_use);
+			list_add(&inode->i_sb_list, &sb->s_inodes);
+			hlist_add_head(&inode->i_hash, head);
+			inode->i_state = I_LOCK|I_NEW;
+			spin_unlock(&inode_lock);
+
+			/* Return the locked inode with I_NEW set, the
+			 * caller is responsible for filling in the contents
+			 */
+			return inode;
+		}
+
+		/*
+		 * Uhhuh, somebody else created the same inode under
+		 * us. Use the old inode instead of the one we just
+		 * allocated.
+		 */
+		__iget(old);
+		spin_unlock(&inode_lock);
+		destroy_inode(inode);
+		inode = old;
+		wait_on_inode(inode);
+	}
+	return inode;
+
+set_failed:
+	spin_unlock(&inode_lock);
+	destroy_inode(inode);
+	return NULL;
+}
+
+/*
+ * get_new_inode_fast is the fast path version of get_new_inode, see the
+ * comment at iget_locked for details.
+ */
+static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
+{
+	struct inode * inode;
+
+	inode = alloc_inode(sb);
+	if (inode) {
+		struct inode * old;
+
+		spin_lock(&inode_lock);
+		/* We released the lock, so.. */
+		old = find_inode_fast(sb, head, ino);
+		if (!old) {
+			inode->i_ino = ino;
+			inodes_stat.nr_inodes++;
+			list_add(&inode->i_list, &inode_in_use);
+			list_add(&inode->i_sb_list, &sb->s_inodes);
+			hlist_add_head(&inode->i_hash, head);
+			inode->i_state = I_LOCK|I_NEW;
+			spin_unlock(&inode_lock);
+
+			/* Return the locked inode with I_NEW set, the
+			 * caller is responsible for filling in the contents
+			 */
+			return inode;
+		}
+
+		/*
+		 * Uhhuh, somebody else created the same inode under
+		 * us. Use the old inode instead of the one we just
+		 * allocated.
+		 */
+		__iget(old);
+		spin_unlock(&inode_lock);
+		destroy_inode(inode);
+		inode = old;
+		wait_on_inode(inode);
+	}
+	return inode;
+}
+
+static inline unsigned long hash(struct super_block *sb, unsigned long hashval)
+{
+	unsigned long tmp;
+
+	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+			L1_CACHE_BYTES;
+	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
+	return tmp & I_HASHMASK;
+}
+
+/**
+ *	iunique - get a unique inode number
+ *	@sb: superblock
+ *	@max_reserved: highest reserved inode number
+ *
+ *	Obtain an inode number that is unique on the system for a given
+ *	superblock. This is used by file systems that have no natural
+ *	permanent inode numbering system. An inode number is returned that
+ *	is higher than the reserved limit but unique.
+ *
+ *	BUGS:
+ *	With a large number of inodes live on the file system this function
+ *	currently becomes quite slow.
+ */
+ino_t iunique(struct super_block *sb, ino_t max_reserved)
+{
+	static ino_t counter;
+	struct inode *inode;
+	struct hlist_head * head;
+	ino_t res;
+	spin_lock(&inode_lock);
+retry:
+	if (counter > max_reserved) {
+		head = inode_hashtable + hash(sb,counter);
+		res = counter++;
+		inode = find_inode_fast(sb, head, res);
+		if (!inode) {
+			spin_unlock(&inode_lock);
+			return res;
+		}
+	} else {
+		counter = max_reserved + 1;
+	}
+	goto retry;
+	
+}
+
+EXPORT_SYMBOL(iunique);
+
+struct inode *igrab(struct inode *inode)
+{
+	spin_lock(&inode_lock);
+	if (!(inode->i_state & I_FREEING))
+		__iget(inode);
+	else
+		/*
+		 * Handle the case where s_op->clear_inode is not been
+		 * called yet, and somebody is calling igrab
+		 * while the inode is getting freed.
+		 */
+		inode = NULL;
+	spin_unlock(&inode_lock);
+	return inode;
+}
+
+EXPORT_SYMBOL(igrab);
+
+/**
+ * ifind - internal function, you want ilookup5() or iget5().
+ * @sb:		super block of file system to search
+ * @head:       the head of the list to search
+ * @test:	callback used for comparisons between inodes
+ * @data:	opaque data pointer to pass to @test
+ *
+ * ifind() searches for the inode specified by @data in the inode
+ * cache. This is a generalized version of ifind_fast() for file systems where
+ * the inode number is not sufficient for unique identification of an inode.
+ *
+ * If the inode is in the cache, the inode is returned with an incremented
+ * reference count.
+ *
+ * Otherwise NULL is returned.
+ *
+ * Note, @test is called with the inode_lock held, so can't sleep.
+ */
+static inline struct inode *ifind(struct super_block *sb,
+		struct hlist_head *head, int (*test)(struct inode *, void *),
+		void *data)
+{
+	struct inode *inode;
+
+	spin_lock(&inode_lock);
+	inode = find_inode(sb, head, test, data);
+	if (inode) {
+		__iget(inode);
+		spin_unlock(&inode_lock);
+		wait_on_inode(inode);
+		return inode;
+	}
+	spin_unlock(&inode_lock);
+	return NULL;
+}
+
+/**
+ * ifind_fast - internal function, you want ilookup() or iget().
+ * @sb:		super block of file system to search
+ * @head:       head of the list to search
+ * @ino:	inode number to search for
+ *
+ * ifind_fast() searches for the inode @ino in the inode cache. This is for
+ * file systems where the inode number is sufficient for unique identification
+ * of an inode.
+ *
+ * If the inode is in the cache, the inode is returned with an incremented
+ * reference count.
+ *
+ * Otherwise NULL is returned.
+ */
+static inline struct inode *ifind_fast(struct super_block *sb,
+		struct hlist_head *head, unsigned long ino)
+{
+	struct inode *inode;
+
+	spin_lock(&inode_lock);
+	inode = find_inode_fast(sb, head, ino);
+	if (inode) {
+		__iget(inode);
+		spin_unlock(&inode_lock);
+		wait_on_inode(inode);
+		return inode;
+	}
+	spin_unlock(&inode_lock);
+	return NULL;
+}
+
+/**
+ * ilookup5 - search for an inode in the inode cache
+ * @sb:		super block of file system to search
+ * @hashval:	hash value (usually inode number) to search for
+ * @test:	callback used for comparisons between inodes
+ * @data:	opaque data pointer to pass to @test
+ *
+ * ilookup5() uses ifind() to search for the inode specified by @hashval and
+ * @data in the inode cache. This is a generalized version of ilookup() for
+ * file systems where the inode number is not sufficient for unique
+ * identification of an inode.
+ *
+ * If the inode is in the cache, the inode is returned with an incremented
+ * reference count.
+ *
+ * Otherwise NULL is returned.
+ *
+ * Note, @test is called with the inode_lock held, so can't sleep.
+ */
+struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
+		int (*test)(struct inode *, void *), void *data)
+{
+	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+
+	return ifind(sb, head, test, data);
+}
+
+EXPORT_SYMBOL(ilookup5);
+
+/**
+ * ilookup - search for an inode in the inode cache
+ * @sb:		super block of file system to search
+ * @ino:	inode number to search for
+ *
+ * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
+ * This is for file systems where the inode number is sufficient for unique
+ * identification of an inode.
+ *
+ * If the inode is in the cache, the inode is returned with an incremented
+ * reference count.
+ *
+ * Otherwise NULL is returned.
+ */
+struct inode *ilookup(struct super_block *sb, unsigned long ino)
+{
+	struct hlist_head *head = inode_hashtable + hash(sb, ino);
+
+	return ifind_fast(sb, head, ino);
+}
+
+EXPORT_SYMBOL(ilookup);
+
+/**
+ * iget5_locked - obtain an inode from a mounted file system
+ * @sb:		super block of file system
+ * @hashval:	hash value (usually inode number) to get
+ * @test:	callback used for comparisons between inodes
+ * @set:	callback used to initialize a new struct inode
+ * @data:	opaque data pointer to pass to @test and @set
+ *
+ * This is iget() without the read_inode() portion of get_new_inode().
+ *
+ * iget5_locked() uses ifind() to search for the inode specified by @hashval
+ * and @data in the inode cache and if present it is returned with an increased
+ * reference count. This is a generalized version of iget_locked() for file
+ * systems where the inode number is not sufficient for unique identification
+ * of an inode.
+ *
+ * If the inode is not in cache, get_new_inode() is called to allocate a new
+ * inode and this is returned locked, hashed, and with the I_NEW flag set. The
+ * file system gets to fill it in before unlocking it via unlock_new_inode().
+ *
+ * Note both @test and @set are called with the inode_lock held, so can't sleep.
+ */
+struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
+		int (*test)(struct inode *, void *),
+		int (*set)(struct inode *, void *), void *data)
+{
+	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+	struct inode *inode;
+
+	inode = ifind(sb, head, test, data);
+	if (inode)
+		return inode;
+	/*
+	 * get_new_inode() will do the right thing, re-trying the search
+	 * in case it had to block at any point.
+	 */
+	return get_new_inode(sb, head, test, set, data);
+}
+
+EXPORT_SYMBOL(iget5_locked);
+
+/**
+ * iget_locked - obtain an inode from a mounted file system
+ * @sb:		super block of file system
+ * @ino:	inode number to get
+ *
+ * This is iget() without the read_inode() portion of get_new_inode_fast().
+ *
+ * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
+ * the inode cache and if present it is returned with an increased reference
+ * count. This is for file systems where the inode number is sufficient for
+ * unique identification of an inode.
+ *
+ * If the inode is not in cache, get_new_inode_fast() is called to allocate a
+ * new inode and this is returned locked, hashed, and with the I_NEW flag set.
+ * The file system gets to fill it in before unlocking it via
+ * unlock_new_inode().
+ */
+struct inode *iget_locked(struct super_block *sb, unsigned long ino)
+{
+	struct hlist_head *head = inode_hashtable + hash(sb, ino);
+	struct inode *inode;
+
+	inode = ifind_fast(sb, head, ino);
+	if (inode)
+		return inode;
+	/*
+	 * get_new_inode_fast() will do the right thing, re-trying the search
+	 * in case it had to block at any point.
+	 */
+	return get_new_inode_fast(sb, head, ino);
+}
+
+EXPORT_SYMBOL(iget_locked);
+
+/**
+ *	__insert_inode_hash - hash an inode
+ *	@inode: unhashed inode
+ *	@hashval: unsigned long value used to locate this object in the
+ *		inode_hashtable.
+ *
+ *	Add an inode to the inode hash for this superblock.
+ */
+void __insert_inode_hash(struct inode *inode, unsigned long hashval)
+{
+	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
+	spin_lock(&inode_lock);
+	hlist_add_head(&inode->i_hash, head);
+	spin_unlock(&inode_lock);
+}
+
+EXPORT_SYMBOL(__insert_inode_hash);
+
+/**
+ *	remove_inode_hash - remove an inode from the hash
+ *	@inode: inode to unhash
+ *
+ *	Remove an inode from the superblock.
+ */
+void remove_inode_hash(struct inode *inode)
+{
+	spin_lock(&inode_lock);
+	hlist_del_init(&inode->i_hash);
+	spin_unlock(&inode_lock);
+}
+
+EXPORT_SYMBOL(remove_inode_hash);
+
+/*
+ * Tell the filesystem that this inode is no longer of any interest and should
+ * be completely destroyed.
+ *
+ * We leave the inode in the inode hash table until *after* the filesystem's
+ * ->delete_inode completes.  This ensures that an iget (such as nfsd might
+ * instigate) will always find up-to-date information either in the hash or on
+ * disk.
+ *
+ * I_FREEING is set so that no-one will take a new reference to the inode while
+ * it is being deleted.
+ */
+void generic_delete_inode(struct inode *inode)
+{
+	struct super_operations *op = inode->i_sb->s_op;
+
+	list_del_init(&inode->i_list);
+	list_del_init(&inode->i_sb_list);
+	inode->i_state|=I_FREEING;
+	inodes_stat.nr_inodes--;
+	spin_unlock(&inode_lock);
+
+	if (inode->i_data.nrpages)
+		truncate_inode_pages(&inode->i_data, 0);
+
+	security_inode_delete(inode);
+
+	if (op->delete_inode) {
+		void (*delete)(struct inode *) = op->delete_inode;
+		if (!is_bad_inode(inode))
+			DQUOT_INIT(inode);
+		/* s_op->delete_inode internally recalls clear_inode() */
+		delete(inode);
+	} else
+		clear_inode(inode);
+	spin_lock(&inode_lock);
+	hlist_del_init(&inode->i_hash);
+	spin_unlock(&inode_lock);
+	wake_up_inode(inode);
+	if (inode->i_state != I_CLEAR)
+		BUG();
+	destroy_inode(inode);
+}
+
+EXPORT_SYMBOL(generic_delete_inode);
+
+static void generic_forget_inode(struct inode *inode)
+{
+	struct super_block *sb = inode->i_sb;
+
+	if (!hlist_unhashed(&inode->i_hash)) {
+		if (!(inode->i_state & (I_DIRTY|I_LOCK)))
+			list_move(&inode->i_list, &inode_unused);
+		inodes_stat.nr_unused++;
+		spin_unlock(&inode_lock);
+		if (!sb || (sb->s_flags & MS_ACTIVE))
+			return;
+		write_inode_now(inode, 1);
+		spin_lock(&inode_lock);
+		inodes_stat.nr_unused--;
+		hlist_del_init(&inode->i_hash);
+	}
+	list_del_init(&inode->i_list);
+	list_del_init(&inode->i_sb_list);
+	inode->i_state|=I_FREEING;
+	inodes_stat.nr_inodes--;
+	spin_unlock(&inode_lock);
+	if (inode->i_data.nrpages)
+		truncate_inode_pages(&inode->i_data, 0);
+	clear_inode(inode);
+	destroy_inode(inode);
+}
+
+/*
+ * Normal UNIX filesystem behaviour: delete the
+ * inode when the usage count drops to zero, and
+ * i_nlink is zero.
+ */
+static void generic_drop_inode(struct inode *inode)
+{
+	if (!inode->i_nlink)
+		generic_delete_inode(inode);
+	else
+		generic_forget_inode(inode);
+}
+
+/*
+ * Called when we're dropping the last reference
+ * to an inode. 
+ *
+ * Call the FS "drop()" function, defaulting to
+ * the legacy UNIX filesystem behaviour..
+ *
+ * NOTE! NOTE! NOTE! We're called with the inode lock
+ * held, and the drop function is supposed to release
+ * the lock!
+ */
+static inline void iput_final(struct inode *inode)
+{
+	struct super_operations *op = inode->i_sb->s_op;
+	void (*drop)(struct inode *) = generic_drop_inode;
+
+	if (op && op->drop_inode)
+		drop = op->drop_inode;
+	drop(inode);
+}
+
+/**
+ *	iput	- put an inode 
+ *	@inode: inode to put
+ *
+ *	Puts an inode, dropping its usage count. If the inode use count hits
+ *	zero, the inode is then freed and may also be destroyed.
+ *
+ *	Consequently, iput() can sleep.
+ */
+void iput(struct inode *inode)
+{
+	if (inode) {
+		struct super_operations *op = inode->i_sb->s_op;
+
+		BUG_ON(inode->i_state == I_CLEAR);
+
+		if (op && op->put_inode)
+			op->put_inode(inode);
+
+		if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
+			iput_final(inode);
+	}
+}
+
+EXPORT_SYMBOL(iput);
+
+/**
+ *	bmap	- find a block number in a file
+ *	@inode: inode of file
+ *	@block: block to find
+ *
+ *	Returns the block number on the device holding the inode that
+ *	is the disk block number for the block of the file requested.
+ *	That is, asked for block 4 of inode 1 the function will return the
+ *	disk block relative to the disk start that holds that block of the 
+ *	file.
+ */
+sector_t bmap(struct inode * inode, sector_t block)
+{
+	sector_t res = 0;
+	if (inode->i_mapping->a_ops->bmap)
+		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
+	return res;
+}
+
+EXPORT_SYMBOL(bmap);
+
+/**
+ *	update_atime	-	update the access time
+ *	@inode: inode accessed
+ *
+ *	Update the accessed time on an inode and mark it for writeback.
+ *	This function automatically handles read only file systems and media,
+ *	as well as the "noatime" flag and inode specific "noatime" markers.
+ */
+void update_atime(struct inode *inode)
+{
+	struct timespec now;
+
+	if (IS_NOATIME(inode))
+		return;
+	if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode))
+		return;
+	if (IS_RDONLY(inode))
+		return;
+
+	now = current_fs_time(inode->i_sb);
+	if (!timespec_equal(&inode->i_atime, &now)) {
+		inode->i_atime = now;
+		mark_inode_dirty_sync(inode);
+	} else {
+		if (!timespec_equal(&inode->i_atime, &now))
+			inode->i_atime = now;
+	}
+}
+
+EXPORT_SYMBOL(update_atime);
+
+/**
+ *	inode_update_time	-	update mtime and ctime time
+ *	@inode: inode accessed
+ *	@ctime_too: update ctime too
+ *
+ *	Update the mtime time on an inode and mark it for writeback.
+ *	When ctime_too is specified update the ctime too.
+ */
+
+void inode_update_time(struct inode *inode, int ctime_too)
+{
+	struct timespec now;
+	int sync_it = 0;
+
+	if (IS_NOCMTIME(inode))
+		return;
+	if (IS_RDONLY(inode))
+		return;
+
+	now = current_fs_time(inode->i_sb);
+	if (!timespec_equal(&inode->i_mtime, &now))
+		sync_it = 1;
+	inode->i_mtime = now;
+
+	if (ctime_too) {
+		if (!timespec_equal(&inode->i_ctime, &now))
+			sync_it = 1;
+		inode->i_ctime = now;
+	}
+	if (sync_it)
+		mark_inode_dirty_sync(inode);
+}
+
+EXPORT_SYMBOL(inode_update_time);
+
+int inode_needs_sync(struct inode *inode)
+{
+	if (IS_SYNC(inode))
+		return 1;
+	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
+		return 1;
+	return 0;
+}
+
+EXPORT_SYMBOL(inode_needs_sync);
+
+/*
+ *	Quota functions that want to walk the inode lists..
+ */
+#ifdef CONFIG_QUOTA
+
+/* Function back in dquot.c */
+int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
+
+void remove_dquot_ref(struct super_block *sb, int type,
+			struct list_head *tofree_head)
+{
+	struct inode *inode;
+
+	if (!sb->dq_op)
+		return;	/* nothing to do */
+	spin_lock(&inode_lock);	/* This lock is for inodes code */
+
+	/*
+	 * We don't have to lock against quota code - test IS_QUOTAINIT is
+	 * just for speedup...
+	 */
+	list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
+		if (!IS_NOQUOTA(inode))
+			remove_inode_dquot_ref(inode, type, tofree_head);
+
+	spin_unlock(&inode_lock);
+}
+
+#endif
+
+int inode_wait(void *word)
+{
+	schedule();
+	return 0;
+}
+
+/*
+ * If we try to find an inode in the inode hash while it is being deleted, we
+ * have to wait until the filesystem completes its deletion before reporting
+ * that it isn't found.  This is because iget will immediately call
+ * ->read_inode, and we want to be sure that evidence of the deletion is found
+ * by ->read_inode.
+ * This is called with inode_lock held.
+ */
+static void __wait_on_freeing_inode(struct inode *inode)
+{
+	wait_queue_head_t *wq;
+	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
+
+	/*
+	 * I_FREEING and I_CLEAR are cleared in process context under
+	 * inode_lock, so we have to give the tasks who would clear them
+	 * a chance to run and acquire inode_lock.
+	 */
+	if (!(inode->i_state & I_LOCK)) {
+		spin_unlock(&inode_lock);
+		yield();
+		spin_lock(&inode_lock);
+		return;
+	}
+	wq = bit_waitqueue(&inode->i_state, __I_LOCK);
+	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+	spin_unlock(&inode_lock);
+	schedule();
+	finish_wait(wq, &wait.wait);
+	spin_lock(&inode_lock);
+}
+
+void wake_up_inode(struct inode *inode)
+{
+	/*
+	 * Prevent speculative execution through spin_unlock(&inode_lock);
+	 */
+	smp_mb();
+	wake_up_bit(&inode->i_state, __I_LOCK);
+}
+
+static __initdata unsigned long ihash_entries;
+static int __init set_ihash_entries(char *str)
+{
+	if (!str)
+		return 0;
+	ihash_entries = simple_strtoul(str, &str, 0);
+	return 1;
+}
+__setup("ihash_entries=", set_ihash_entries);
+
+/*
+ * Initialize the waitqueues and inode hash table.
+ */
+void __init inode_init_early(void)
+{
+	int loop;
+
+	/* If hashes are distributed across NUMA nodes, defer
+	 * hash allocation until vmalloc space is available.
+	 */
+	if (hashdist)
+		return;
+
+	inode_hashtable =
+		alloc_large_system_hash("Inode-cache",
+					sizeof(struct hlist_head),
+					ihash_entries,
+					14,
+					HASH_EARLY,
+					&i_hash_shift,
+					&i_hash_mask,
+					0);
+
+	for (loop = 0; loop < (1 << i_hash_shift); loop++)
+		INIT_HLIST_HEAD(&inode_hashtable[loop]);
+}
+
+void __init inode_init(unsigned long mempages)
+{
+	int loop;
+
+	/* inode slab cache */
+	inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
+				0, SLAB_PANIC, init_once, NULL);
+	set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+
+	/* Hash may have been set up in inode_init_early */
+	if (!hashdist)
+		return;
+
+	inode_hashtable =
+		alloc_large_system_hash("Inode-cache",
+					sizeof(struct hlist_head),
+					ihash_entries,
+					14,
+					0,
+					&i_hash_shift,
+					&i_hash_mask,
+					0);
+
+	for (loop = 0; loop < (1 << i_hash_shift); loop++)
+		INIT_HLIST_HEAD(&inode_hashtable[loop]);
+}
+
+void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
+{
+	inode->i_mode = mode;
+	if (S_ISCHR(mode)) {
+		inode->i_fop = &def_chr_fops;
+		inode->i_rdev = rdev;
+	} else if (S_ISBLK(mode)) {
+		inode->i_fop = &def_blk_fops;
+		inode->i_rdev = rdev;
+	} else if (S_ISFIFO(mode))
+		inode->i_fop = &def_fifo_fops;
+	else if (S_ISSOCK(mode))
+		inode->i_fop = &bad_sock_fops;
+	else
+		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
+		       mode);
+}
+EXPORT_SYMBOL(init_special_inode);
diff --git a/fs/ioctl.c b/fs/ioctl.c
new file mode 100644
index 0000000..5692091
--- /dev/null
+++ b/fs/ioctl.c
@@ -0,0 +1,186 @@
+/*
+ *  linux/fs/ioctl.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/ioctls.h>
+
+static long do_ioctl(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	int error = -ENOTTY;
+
+	if (!filp->f_op)
+		goto out;
+
+	if (filp->f_op->unlocked_ioctl) {
+		error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
+		if (error == -ENOIOCTLCMD)
+			error = -EINVAL;
+		goto out;
+	} else if (filp->f_op->ioctl) {
+		lock_kernel();
+		error = filp->f_op->ioctl(filp->f_dentry->d_inode,
+					  filp, cmd, arg);
+		unlock_kernel();
+	}
+
+ out:
+	return error;
+}
+
+static int file_ioctl(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	int error;
+	int block;
+	struct inode * inode = filp->f_dentry->d_inode;
+	int __user *p = (int __user *)arg;
+
+	switch (cmd) {
+		case FIBMAP:
+		{
+			struct address_space *mapping = filp->f_mapping;
+			int res;
+			/* do we support this mess? */
+			if (!mapping->a_ops->bmap)
+				return -EINVAL;
+			if (!capable(CAP_SYS_RAWIO))
+				return -EPERM;
+			if ((error = get_user(block, p)) != 0)
+				return error;
+
+			lock_kernel();
+			res = mapping->a_ops->bmap(mapping, block);
+			unlock_kernel();
+			return put_user(res, p);
+		}
+		case FIGETBSZ:
+			if (inode->i_sb == NULL)
+				return -EBADF;
+			return put_user(inode->i_sb->s_blocksize, p);
+		case FIONREAD:
+			return put_user(i_size_read(inode) - filp->f_pos, p);
+	}
+
+	return do_ioctl(filp, cmd, arg);
+}
+
+/*
+ * When you add any new common ioctls to the switches above and below
+ * please update compat_sys_ioctl() too.
+ *
+ * vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
+ * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
+ */
+int vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	unsigned int flag;
+	int on, error = 0;
+
+	switch (cmd) {
+		case FIOCLEX:
+			set_close_on_exec(fd, 1);
+			break;
+
+		case FIONCLEX:
+			set_close_on_exec(fd, 0);
+			break;
+
+		case FIONBIO:
+			if ((error = get_user(on, (int __user *)arg)) != 0)
+				break;
+			flag = O_NONBLOCK;
+#ifdef __sparc__
+			/* SunOS compatibility item. */
+			if(O_NONBLOCK != O_NDELAY)
+				flag |= O_NDELAY;
+#endif
+			if (on)
+				filp->f_flags |= flag;
+			else
+				filp->f_flags &= ~flag;
+			break;
+
+		case FIOASYNC:
+			if ((error = get_user(on, (int __user *)arg)) != 0)
+				break;
+			flag = on ? FASYNC : 0;
+
+			/* Did FASYNC state change ? */
+			if ((flag ^ filp->f_flags) & FASYNC) {
+				if (filp->f_op && filp->f_op->fasync) {
+					lock_kernel();
+					error = filp->f_op->fasync(fd, filp, on);
+					unlock_kernel();
+				}
+				else error = -ENOTTY;
+			}
+			if (error != 0)
+				break;
+
+			if (on)
+				filp->f_flags |= FASYNC;
+			else
+				filp->f_flags &= ~FASYNC;
+			break;
+
+		case FIOQSIZE:
+			if (S_ISDIR(filp->f_dentry->d_inode->i_mode) ||
+			    S_ISREG(filp->f_dentry->d_inode->i_mode) ||
+			    S_ISLNK(filp->f_dentry->d_inode->i_mode)) {
+				loff_t res = inode_get_bytes(filp->f_dentry->d_inode);
+				error = copy_to_user((loff_t __user *)arg, &res, sizeof(res)) ? -EFAULT : 0;
+			}
+			else
+				error = -ENOTTY;
+			break;
+		default:
+			if (S_ISREG(filp->f_dentry->d_inode->i_mode))
+				error = file_ioctl(filp, cmd, arg);
+			else
+				error = do_ioctl(filp, cmd, arg);
+			break;
+	}
+	return error;
+}
+
+asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct file * filp;
+	int error = -EBADF;
+	int fput_needed;
+
+	filp = fget_light(fd, &fput_needed);
+	if (!filp)
+		goto out;
+
+	error = security_file_ioctl(filp, cmd, arg);
+	if (error)
+		goto out_fput;
+
+	error = vfs_ioctl(filp, fd, cmd, arg);
+ out_fput:
+	fput_light(filp, fput_needed);
+ out:
+	return error;
+}
+
+/*
+ * Platforms implementing 32 bit compatibility ioctl handlers in
+ * modules need this exported
+ */
+#ifdef CONFIG_COMPAT
+EXPORT_SYMBOL(sys_ioctl);
+#endif
diff --git a/fs/isofs/Makefile b/fs/isofs/Makefile
new file mode 100644
index 0000000..bf162f0
--- /dev/null
+++ b/fs/isofs/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux isofs filesystem routines.
+#
+
+obj-$(CONFIG_ISO9660_FS) += isofs.o
+
+isofs-objs-y 			:= namei.o inode.o dir.o util.o rock.o export.o
+isofs-objs-$(CONFIG_JOLIET)	+= joliet.o
+isofs-objs-$(CONFIG_ZISOFS)	+= compress.o
+isofs-objs			:= $(isofs-objs-y)
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
new file mode 100644
index 0000000..fb42c3f
--- /dev/null
+++ b/fs/isofs/compress.c
@@ -0,0 +1,359 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *   
+ *   Copyright 2001 H. Peter Anvin - All Rights Reserved
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * linux/fs/isofs/compress.c
+ *
+ * Transparent decompression of files on an iso9660 filesystem
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/iso_fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/nls.h>
+#include <linux/ctype.h>
+#include <linux/smp_lock.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+#include <linux/buffer_head.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "zisofs.h"
+
+/* This should probably be global. */
+static char zisofs_sink_page[PAGE_CACHE_SIZE];
+
+/*
+ * This contains the zlib memory allocation and the mutex for the
+ * allocation; this avoids failures at block-decompression time.
+ */
+static void *zisofs_zlib_workspace;
+static struct semaphore zisofs_zlib_semaphore;
+
+/*
+ * When decompressing, we typically obtain more than one page
+ * per reference.  We inject the additional pages into the page
+ * cache as a form of readahead.
+ */
+static int zisofs_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct address_space *mapping = inode->i_mapping;
+	unsigned int maxpage, xpage, fpage, blockindex;
+	unsigned long offset;
+	unsigned long blockptr, blockendptr, cstart, cend, csize;
+	struct buffer_head *bh, *ptrbh[2];
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
+	unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
+	unsigned long bufmask  = bufsize - 1;
+	int err = -EIO;
+	int i;
+	unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
+	unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
+	/* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
+	unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
+	unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
+	unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
+	struct page *pages[zisofs_block_pages];
+	unsigned long index = page->index;
+	int indexblocks;
+
+	/* We have already been given one page, this is the one
+	   we must do. */
+	xpage = index & zisofs_block_page_mask;
+	pages[xpage] = page;
+ 
+	/* The remaining pages need to be allocated and inserted */
+	offset = index & ~zisofs_block_page_mask;
+	blockindex = offset >> zisofs_block_page_shift;
+	maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	maxpage = min(zisofs_block_pages, maxpage-offset);
+
+	for ( i = 0 ; i < maxpage ; i++, offset++ ) {
+		if ( i != xpage ) {
+			pages[i] = grab_cache_page_nowait(mapping, offset);
+		}
+		page = pages[i];
+		if ( page ) {
+			ClearPageError(page);
+			kmap(page);
+		}
+	}
+
+	/* This is the last page filled, plus one; used in case of abort. */
+	fpage = 0;
+
+	/* Find the pointer to this specific chunk */
+	/* Note: we're not using isonum_731() here because the data is known aligned */
+	/* Note: header_size is in 32-bit words (4 bytes) */
+	blockptr = (header_size + blockindex) << 2;
+	blockendptr = blockptr + 4;
+
+	indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
+	ptrbh[0] = ptrbh[1] = NULL;
+
+	if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
+		if ( ptrbh[0] ) brelse(ptrbh[0]);
+		printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
+		       inode->i_ino, blockptr >> bufshift);
+		goto eio;
+	}
+	ll_rw_block(READ, indexblocks, ptrbh);
+
+	bh = ptrbh[0];
+	if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
+		printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
+		       inode->i_ino, blockptr >> bufshift);
+		if ( ptrbh[1] )
+			brelse(ptrbh[1]);
+		goto eio;
+	}
+	cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
+
+	if ( indexblocks == 2 ) {
+		/* We just crossed a block boundary.  Switch to the next block */
+		brelse(bh);
+		bh = ptrbh[1];
+		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
+			printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
+			       inode->i_ino, blockendptr >> bufshift);
+			goto eio;
+		}
+	}
+	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
+	brelse(bh);
+
+	csize = cend-cstart;
+
+	/* Now page[] contains an array of pages, any of which can be NULL,
+	   and the locks on which we hold.  We should now read the data and
+	   release the pages.  If the pages are NULL the decompressed data
+	   for that particular page should be discarded. */
+	
+	if ( csize == 0 ) {
+		/* This data block is empty. */
+
+		for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
+			if ( (page = pages[fpage]) != NULL ) {
+				memset(page_address(page), 0, PAGE_CACHE_SIZE);
+				
+				flush_dcache_page(page);
+				SetPageUptodate(page);
+				kunmap(page);
+				unlock_page(page);
+				if ( fpage == xpage )
+					err = 0; /* The critical page */
+				else
+					page_cache_release(page);
+			}
+		}
+	} else {
+		/* This data block is compressed. */
+		z_stream stream;
+		int bail = 0, left_out = -1;
+		int zerr;
+		int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
+		int haveblocks;
+		struct buffer_head *bhs[needblocks+1];
+		struct buffer_head **bhptr;
+
+		/* Because zlib is not thread-safe, do all the I/O at the top. */
+
+		blockptr = cstart >> bufshift;
+		memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
+		haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
+		ll_rw_block(READ, haveblocks, bhs);
+
+		bhptr = &bhs[0];
+		bh = *bhptr++;
+
+		/* First block is special since it may be fractional.
+		   We also wait for it before grabbing the zlib
+		   semaphore; odds are that the subsequent blocks are
+		   going to come in in short order so we don't hold
+		   the zlib semaphore longer than necessary. */
+
+		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
+			printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
+			       fpage, xpage, csize);
+			goto b_eio;
+		}
+		stream.next_in  = bh->b_data + (cstart & bufmask);
+		stream.avail_in = min(bufsize-(cstart & bufmask), csize);
+		csize -= stream.avail_in;
+
+		stream.workspace = zisofs_zlib_workspace;
+		down(&zisofs_zlib_semaphore);
+		
+		zerr = zlib_inflateInit(&stream);
+		if ( zerr != Z_OK ) {
+			if ( err && zerr == Z_MEM_ERROR )
+				err = -ENOMEM;
+			printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
+			       zerr);
+			goto z_eio;
+		}
+
+		while ( !bail && fpage < maxpage ) {
+			page = pages[fpage];
+			if ( page )
+				stream.next_out = page_address(page);
+			else
+				stream.next_out = (void *)&zisofs_sink_page;
+			stream.avail_out = PAGE_CACHE_SIZE;
+
+			while ( stream.avail_out ) {
+				int ao, ai;
+				if ( stream.avail_in == 0 && left_out ) {
+					if ( !csize ) {
+						printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
+						bail = 1;
+						break;
+					} else {
+						bh = *bhptr++;
+						if ( !bh ||
+						     (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
+							/* Reached an EIO */
+ 							printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
+							       fpage, xpage, csize);
+							       
+							bail = 1;
+							break;
+						}
+						stream.next_in = bh->b_data;
+						stream.avail_in = min(csize,bufsize);
+						csize -= stream.avail_in;
+					}
+				}
+				ao = stream.avail_out;  ai = stream.avail_in;
+				zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
+				left_out = stream.avail_out;
+				if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
+					continue;
+				if ( zerr != Z_OK ) {
+					/* EOF, error, or trying to read beyond end of input */
+					if ( err && zerr == Z_MEM_ERROR )
+						err = -ENOMEM;
+					if ( zerr != Z_STREAM_END )
+						printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
+						       zerr, inode->i_ino, index,
+						       fpage, xpage,
+						       stream.avail_in, stream.avail_out,
+						       ai, ao);
+					bail = 1;
+					break;
+				}
+			}
+
+			if ( stream.avail_out && zerr == Z_STREAM_END ) {
+				/* Fractional page written before EOF.  This may
+				   be the last page in the file. */
+				memset(stream.next_out, 0, stream.avail_out);
+				stream.avail_out = 0;
+			}
+
+			if ( !stream.avail_out ) {
+				/* This page completed */
+				if ( page ) {
+					flush_dcache_page(page);
+					SetPageUptodate(page);
+					kunmap(page);
+					unlock_page(page);
+					if ( fpage == xpage )
+						err = 0; /* The critical page */
+					else
+						page_cache_release(page);
+				}
+				fpage++;
+			}
+		}
+		zlib_inflateEnd(&stream);
+
+	z_eio:
+		up(&zisofs_zlib_semaphore);
+
+	b_eio:
+		for ( i = 0 ; i < haveblocks ; i++ ) {
+			if ( bhs[i] )
+				brelse(bhs[i]);
+		}
+	}
+
+eio:
+
+	/* Release any residual pages, do not SetPageUptodate */
+	while ( fpage < maxpage ) {
+		page = pages[fpage];
+		if ( page ) {
+			flush_dcache_page(page);
+			if ( fpage == xpage )
+				SetPageError(page);
+			kunmap(page);
+			unlock_page(page);
+			if ( fpage != xpage )
+				page_cache_release(page);
+		}
+		fpage++;
+	}			
+
+	/* At this point, err contains 0 or -EIO depending on the "critical" page */
+	return err;
+}
+
+struct address_space_operations zisofs_aops = {
+	.readpage = zisofs_readpage,
+	/* No sync_page operation supported? */
+	/* No bmap operation supported */
+};
+
+static int initialized;
+
+int __init zisofs_init(void)
+{
+	if ( initialized ) {
+		printk("zisofs_init: called more than once\n");
+		return 0;
+	}
+
+	zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
+	if ( !zisofs_zlib_workspace )
+		return -ENOMEM;
+	init_MUTEX(&zisofs_zlib_semaphore);
+
+	initialized = 1;
+	return 0;
+}
+
+void zisofs_cleanup(void)
+{
+	if ( !initialized ) {
+		printk("zisofs_cleanup: called without initialization\n");
+		return;
+	}
+
+	vfree(zisofs_zlib_workspace);
+	initialized = 0;
+}
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
new file mode 100644
index 0000000..14d86de
--- /dev/null
+++ b/fs/isofs/dir.c
@@ -0,0 +1,280 @@
+/*
+ *  linux/fs/isofs/dir.c
+ *
+ *  (C) 1992, 1993, 1994  Eric Youngdale Modified for ISO 9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ *
+ *  Steve Beynon		       : Missing last directory entries fixed
+ *  (stephen@askone.demon.co.uk)      : 21st June 1996
+ * 
+ *  isofs directory handling functions
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/iso_fs.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/config.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include <asm/uaccess.h>
+
+static int isofs_readdir(struct file *, void *, filldir_t);
+
+struct file_operations isofs_dir_operations =
+{
+	.read		= generic_read_dir,
+	.readdir	= isofs_readdir,
+};
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations isofs_dir_inode_operations =
+{
+	.lookup		= isofs_lookup,
+};
+
+int isofs_name_translate(struct iso_directory_record *de, char *new, struct inode *inode)
+{
+	char * old = de->name;
+	int len = de->name_len[0];
+	int i;
+			
+	for (i = 0; i < len; i++) {
+		unsigned char c = old[i];
+		if (!c)
+			break;
+
+		if (c >= 'A' && c <= 'Z')
+			c |= 0x20;	/* lower case */
+
+		/* Drop trailing '.;1' (ISO 9660:1988 7.5.1 requires period) */
+		if (c == '.' && i == len - 3 && old[i + 1] == ';' && old[i + 2] == '1')
+			break;
+
+		/* Drop trailing ';1' */
+		if (c == ';' && i == len - 2 && old[i + 1] == '1')
+			break;
+
+		/* Convert remaining ';' to '.' */
+		/* Also '/' to '.' (broken Acorn-generated ISO9660 images) */
+		if (c == ';' || c == '/')
+			c = '.';
+
+		new[i] = c;
+	}
+	return i;
+}
+
+/* Acorn extensions written by Matthew Wilcox <willy@bofh.ai> 1998 */
+int get_acorn_filename(struct iso_directory_record * de,
+			    char * retname, struct inode * inode)
+{
+	int std;
+	unsigned char * chr;
+	int retnamlen = isofs_name_translate(de, retname, inode);
+	if (retnamlen == 0) return 0;
+	std = sizeof(struct iso_directory_record) + de->name_len[0];
+	if (std & 1) std++;
+	if ((*((unsigned char *) de) - std) != 32) return retnamlen;
+	chr = ((unsigned char *) de) + std;
+	if (strncmp(chr, "ARCHIMEDES", 10)) return retnamlen;
+	if ((*retname == '_') && ((chr[19] & 1) == 1)) *retname = '!';
+	if (((de->flags[0] & 2) == 0) && (chr[13] == 0xff)
+		&& ((chr[12] & 0xf0) == 0xf0))
+	{
+		retname[retnamlen] = ',';
+		sprintf(retname+retnamlen+1, "%3.3x",
+			((chr[12] & 0xf) << 8) | chr[11]);
+		retnamlen += 4;
+	}
+	return retnamlen;
+}
+
+/*
+ * This should _really_ be cleaned up some day..
+ */
+static int do_isofs_readdir(struct inode *inode, struct file *filp,
+		void *dirent, filldir_t filldir,
+		char * tmpname, struct iso_directory_record * tmpde)
+{
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
+	unsigned char bufbits = ISOFS_BUFFER_BITS(inode);
+	unsigned long block, offset, block_saved, offset_saved;
+	unsigned long inode_number = 0;	/* Quiet GCC */
+	struct buffer_head *bh = NULL;
+	int len;
+	int map;
+	int first_de = 1;
+	char *p = NULL;		/* Quiet GCC */
+	struct iso_directory_record *de;
+	struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
+
+	offset = filp->f_pos & (bufsize - 1);
+	block = filp->f_pos >> bufbits;
+
+	while (filp->f_pos < inode->i_size) {
+		int de_len;
+
+		if (!bh) {
+			bh = isofs_bread(inode, block);
+			if (!bh)
+				return 0;
+		}
+
+		de = (struct iso_directory_record *) (bh->b_data + offset);
+
+		de_len = *(unsigned char *) de;
+
+		/* If the length byte is zero, we should move on to the next
+		   CDROM sector.  If we are at the end of the directory, we
+		   kick out of the while loop. */
+
+		if (de_len == 0) {
+			brelse(bh);
+			bh = NULL;
+			filp->f_pos = (filp->f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
+			block = filp->f_pos >> bufbits;
+			offset = 0;
+			continue;
+		}
+
+		block_saved = block;
+		offset_saved = offset;
+		offset += de_len;
+
+		/* Make sure we have a full directory entry */
+		if (offset >= bufsize) {
+			int slop = bufsize - offset + de_len;
+			memcpy(tmpde, de, slop);
+			offset &= bufsize - 1;
+			block++;
+			brelse(bh);
+			bh = NULL;
+			if (offset) {
+				bh = isofs_bread(inode, block);
+				if (!bh)
+					return 0;
+				memcpy((void *) tmpde + slop, bh->b_data, offset);
+			}
+			de = tmpde;
+		}
+
+		if (first_de) {
+			isofs_normalize_block_and_offset(de,
+							 &block_saved,
+							 &offset_saved);
+			inode_number = isofs_get_ino(block_saved,
+						     offset_saved,
+						     bufbits);
+		}
+
+		if (de->flags[-sbi->s_high_sierra] & 0x80) {
+			first_de = 0;
+			filp->f_pos += de_len;
+			continue;
+		}
+		first_de = 1;
+
+		/* Handle the case of the '.' directory */
+		if (de->name_len[0] == 1 && de->name[0] == 0) {
+			if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
+				break;
+			filp->f_pos += de_len;
+			continue;
+		}
+
+		len = 0;
+
+		/* Handle the case of the '..' directory */
+		if (de->name_len[0] == 1 && de->name[0] == 1) {
+			inode_number = parent_ino(filp->f_dentry);
+			if (filldir(dirent, "..", 2, filp->f_pos, inode_number, DT_DIR) < 0)
+				break;
+			filp->f_pos += de_len;
+			continue;
+		}
+
+		/* Handle everything else.  Do name translation if there
+		   is no Rock Ridge NM field. */
+		if (sbi->s_unhide == 'n') {
+			/* Do not report hidden or associated files */
+			if (de->flags[-sbi->s_high_sierra] & 5) {
+				filp->f_pos += de_len;
+				continue;
+			}
+		}
+
+		map = 1;
+		if (sbi->s_rock) {
+			len = get_rock_ridge_filename(de, tmpname, inode);
+			if (len != 0) {		/* may be -1 */
+				p = tmpname;
+				map = 0;
+			}
+		}
+		if (map) {
+#ifdef CONFIG_JOLIET
+			if (sbi->s_joliet_level) {
+				len = get_joliet_filename(de, tmpname, inode);
+				p = tmpname;
+			} else
+#endif
+			if (sbi->s_mapping == 'a') {
+				len = get_acorn_filename(de, tmpname, inode);
+				p = tmpname;
+			} else
+			if (sbi->s_mapping == 'n') {
+				len = isofs_name_translate(de, tmpname, inode);
+				p = tmpname;
+			} else {
+				p = de->name;
+				len = de->name_len[0];
+			}
+		}
+		if (len > 0) {
+			if (filldir(dirent, p, len, filp->f_pos, inode_number, DT_UNKNOWN) < 0)
+				break;
+		}
+		filp->f_pos += de_len;
+
+		continue;
+	}
+	if (bh) brelse(bh);
+	return 0;
+}
+
+/*
+ * Handle allocation of temporary space for name translation and
+ * handling split directory entries.. The real work is done by
+ * "do_isofs_readdir()".
+ */
+static int isofs_readdir(struct file *filp,
+		void *dirent, filldir_t filldir)
+{
+	int result;
+	char * tmpname;
+	struct iso_directory_record * tmpde;
+	struct inode *inode = filp->f_dentry->d_inode;
+
+
+	tmpname = (char *)__get_free_page(GFP_KERNEL);
+	if (tmpname == NULL)
+		return -ENOMEM;
+
+	lock_kernel();
+	tmpde = (struct iso_directory_record *) (tmpname+1024);
+
+	result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde);
+
+	free_page((unsigned long) tmpname);
+	unlock_kernel();
+	return result;
+}
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
new file mode 100644
index 0000000..e4252c9
--- /dev/null
+++ b/fs/isofs/export.c
@@ -0,0 +1,228 @@
+/*
+ * fs/isofs/export.c
+ *
+ *  (C) 2004  Paul Serice - The new inode scheme requires switching
+ *                          from iget() to iget5_locked() which means
+ *                          the NFS export operations have to be hand
+ *                          coded because the default routines rely on
+ *                          iget().
+ *
+ * The following files are helpful:
+ *
+ *     Documentation/filesystems/Exporting
+ *     fs/exportfs/expfs.c.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/iso_fs.h>
+#include <linux/kernel.h>
+
+static struct dentry *
+isofs_export_iget(struct super_block *sb,
+		  unsigned long block,
+		  unsigned long offset,
+		  __u32 generation)
+{
+	struct inode *inode;
+	struct dentry *result;
+	if (block == 0)
+		return ERR_PTR(-ESTALE);
+	inode = isofs_iget(sb, block, offset);
+	if (inode == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (is_bad_inode(inode)
+	    || (generation && inode->i_generation != generation))
+	{
+		iput(inode);
+		return ERR_PTR(-ESTALE);
+	}
+	result = d_alloc_anon(inode);
+	if (!result) {
+		iput(inode);
+		return ERR_PTR(-ENOMEM);
+	}
+	return result;
+}
+
+static struct dentry *
+isofs_export_get_dentry(struct super_block *sb, void *vobjp)
+{
+	__u32 *objp = vobjp;
+	unsigned long block = objp[0];
+	unsigned long offset = objp[1];
+	__u32 generation = objp[2];
+	return isofs_export_iget(sb, block, offset, generation);
+}
+
+/* This function is surprisingly simple.  The trick is understanding
+ * that "child" is always a directory. So, to find its parent, you
+ * simply need to find its ".." entry, normalize its block and offset,
+ * and return the underlying inode.  See the comments for
+ * isofs_normalize_block_and_offset(). */
+static struct dentry *isofs_export_get_parent(struct dentry *child)
+{
+	unsigned long parent_block = 0;
+	unsigned long parent_offset = 0;
+	struct inode *child_inode = child->d_inode;
+	struct iso_inode_info *e_child_inode = ISOFS_I(child_inode);
+	struct inode *parent_inode = NULL;
+	struct iso_directory_record *de = NULL;
+	struct buffer_head * bh = NULL;
+	struct dentry *rv = NULL;
+
+	/* "child" must always be a directory. */
+	if (!S_ISDIR(child_inode->i_mode)) {
+		printk(KERN_ERR "isofs: isofs_export_get_parent(): "
+		       "child is not a directory!\n");
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	/* It is an invariant that the directory offset is zero.  If
+	 * it is not zero, it means the directory failed to be
+	 * normalized for some reason. */
+	if (e_child_inode->i_iget5_offset != 0) {
+		printk(KERN_ERR "isofs: isofs_export_get_parent(): "
+		       "child directory not normalized!\n");
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	/* The child inode has been normalized such that its
+	 * i_iget5_block value points to the "." entry.  Fortunately,
+	 * the ".." entry is located in the same block. */
+	parent_block = e_child_inode->i_iget5_block;
+
+	/* Get the block in question. */
+	bh = sb_bread(child_inode->i_sb, parent_block);
+	if (bh == NULL) {
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	/* This is the "." entry. */
+	de = (struct iso_directory_record*)bh->b_data;
+
+	/* The ".." entry is always the second entry. */
+	parent_offset = (unsigned long)isonum_711(de->length);
+	de = (struct iso_directory_record*)(bh->b_data + parent_offset);
+
+	/* Verify it is in fact the ".." entry. */
+	if ((isonum_711(de->name_len) != 1) || (de->name[0] != 1)) {
+		printk(KERN_ERR "isofs: Unable to find the \"..\" "
+		       "directory for NFS.\n");
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	/* Normalize */
+	isofs_normalize_block_and_offset(de, &parent_block, &parent_offset);
+
+	/* Get the inode. */
+	parent_inode = isofs_iget(child_inode->i_sb,
+				  parent_block,
+				  parent_offset);
+	if (parent_inode == NULL) {
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	/* Allocate the dentry. */
+	rv = d_alloc_anon(parent_inode);
+	if (rv == NULL) {
+		rv = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+ out:
+	if (bh) {
+		brelse(bh);
+	}
+	return rv;
+}
+
+static int
+isofs_export_encode_fh(struct dentry *dentry,
+		       __u32 *fh32,
+		       int *max_len,
+		       int connectable)
+{
+	struct inode * inode = dentry->d_inode;
+	struct iso_inode_info * ei = ISOFS_I(inode);
+	int len = *max_len;
+	int type = 1;
+	__u16 *fh16 = (__u16*)fh32;
+
+	/*
+	 * WARNING: max_len is 5 for NFSv2.  Because of this
+	 * limitation, we use the lower 16 bits of fh32[1] to hold the
+	 * offset of the inode and the upper 16 bits of fh32[1] to
+	 * hold the offset of the parent.
+	 */
+
+	if (len < 3 || (connectable && len < 5))
+		return 255;
+
+	len = 3;
+	fh32[0] = ei->i_iget5_block;
+ 	fh16[2] = (__u16)ei->i_iget5_offset;  /* fh16 [sic] */
+	fh32[2] = inode->i_generation;
+	if (connectable && !S_ISDIR(inode->i_mode)) {
+		struct inode *parent;
+		struct iso_inode_info *eparent;
+		spin_lock(&dentry->d_lock);
+		parent = dentry->d_parent->d_inode;
+		eparent = ISOFS_I(parent);
+		fh32[3] = eparent->i_iget5_block;
+		fh16[3] = (__u16)eparent->i_iget5_offset;  /* fh16 [sic] */
+		fh32[4] = parent->i_generation;
+		spin_unlock(&dentry->d_lock);
+		len = 5;
+		type = 2;
+	}
+	*max_len = len;
+	return type;
+}
+
+
+static struct dentry *
+isofs_export_decode_fh(struct super_block *sb,
+		       __u32 *fh32,
+		       int fh_len,
+		       int fileid_type,
+		       int (*acceptable)(void *context, struct dentry *de),
+		       void *context)
+{
+	__u16 *fh16 = (__u16*)fh32;
+	__u32 child[3];   /* The child is what triggered all this. */
+	__u32 parent[3];  /* The parent is just along for the ride. */
+
+	if (fh_len < 3 || fileid_type > 2)
+		return NULL;
+
+	child[0] = fh32[0];
+	child[1] = fh16[2];  /* fh16 [sic] */
+	child[2] = fh32[2];
+
+	parent[0] = 0;
+	parent[1] = 0;
+	parent[2] = 0;
+	if (fileid_type == 2) {
+		if (fh_len > 2) parent[0] = fh32[3];
+		parent[1] = fh16[3];  /* fh16 [sic] */
+		if (fh_len > 4) parent[2] = fh32[4];
+	}
+
+	return sb->s_export_op->find_exported_dentry(sb, child, parent,
+						     acceptable, context);
+}
+
+
+struct export_operations isofs_export_ops = {
+	.decode_fh	= isofs_export_decode_fh,
+	.encode_fh	= isofs_export_encode_fh,
+	.get_dentry	= isofs_export_get_dentry,
+	.get_parent     = isofs_export_get_parent,
+};
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
new file mode 100644
index 0000000..b9256e6
--- /dev/null
+++ b/fs/isofs/inode.c
@@ -0,0 +1,1503 @@
+/*
+ *  linux/fs/isofs/inode.c
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ *      1992, 1993, 1994  Eric Youngdale Modified for ISO 9660 filesystem.
+ *      1994  Eberhard Moenkeberg - multi session handling.
+ *      1995  Mark Dobie - allow mounting of some weird VideoCDs and PhotoCDs.
+ *	1997  Gordon Chaffee - Joliet CDs
+ *	1998  Eric Lammerts - ISO 9660 Level 3
+ *	2004  Paul Serice - Inode Support pushed out from 4GB to 128GB
+ *	2004  Paul Serice - NFS Export Operations
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/iso_fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/init.h>
+#include <linux/nls.h>
+#include <linux/ctype.h>
+#include <linux/smp_lock.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "zisofs.h"
+
+#define BEQUIET
+
+#ifdef LEAK_CHECK
+static int check_malloc;
+static int check_bread;
+#endif
+
+static int isofs_hashi(struct dentry *parent, struct qstr *qstr);
+static int isofs_hash(struct dentry *parent, struct qstr *qstr);
+static int isofs_dentry_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b);
+static int isofs_dentry_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b);
+
+#ifdef CONFIG_JOLIET
+static int isofs_hashi_ms(struct dentry *parent, struct qstr *qstr);
+static int isofs_hash_ms(struct dentry *parent, struct qstr *qstr);
+static int isofs_dentry_cmpi_ms(struct dentry *dentry, struct qstr *a, struct qstr *b);
+static int isofs_dentry_cmp_ms(struct dentry *dentry, struct qstr *a, struct qstr *b);
+#endif
+
+static void isofs_put_super(struct super_block *sb)
+{
+	struct isofs_sb_info *sbi = ISOFS_SB(sb);
+#ifdef CONFIG_JOLIET
+	if (sbi->s_nls_iocharset) {
+		unload_nls(sbi->s_nls_iocharset);
+		sbi->s_nls_iocharset = NULL;
+	}
+#endif
+
+#ifdef LEAK_CHECK
+	printk("Outstanding mallocs:%d, outstanding buffers: %d\n",
+	       check_malloc, check_bread);
+#endif
+
+	kfree(sbi);
+	sb->s_fs_info = NULL;
+	return;
+}
+
+static void isofs_read_inode(struct inode *);
+static int isofs_statfs (struct super_block *, struct kstatfs *);
+
+static kmem_cache_t *isofs_inode_cachep;
+
+static struct inode *isofs_alloc_inode(struct super_block *sb)
+{
+	struct iso_inode_info *ei;
+	ei = (struct iso_inode_info *)kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void isofs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct iso_inode_info *ei = (struct iso_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	isofs_inode_cachep = kmem_cache_create("isofs_inode_cache",
+					     sizeof(struct iso_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (isofs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(isofs_inode_cachep))
+		printk(KERN_INFO "iso_inode_cache: not all structures were freed\n");
+}
+
+static int isofs_remount(struct super_block *sb, int *flags, char *data)
+{
+	/* we probably want a lot more here */
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+static struct super_operations isofs_sops = {
+	.alloc_inode	= isofs_alloc_inode,
+	.destroy_inode	= isofs_destroy_inode,
+	.read_inode	= isofs_read_inode,
+	.put_super	= isofs_put_super,
+	.statfs		= isofs_statfs,
+	.remount_fs	= isofs_remount,
+};
+
+
+static struct dentry_operations isofs_dentry_ops[] = {
+	{
+		.d_hash		= isofs_hash,
+		.d_compare	= isofs_dentry_cmp,
+	},
+	{
+		.d_hash		= isofs_hashi,
+		.d_compare	= isofs_dentry_cmpi,
+	},
+#ifdef CONFIG_JOLIET
+	{
+		.d_hash		= isofs_hash_ms,
+		.d_compare	= isofs_dentry_cmp_ms,
+	},
+	{
+		.d_hash		= isofs_hashi_ms,
+		.d_compare	= isofs_dentry_cmpi_ms,
+	}
+#endif
+};
+
+struct iso9660_options{
+	char map;
+	char rock;
+	char joliet;
+	char cruft;
+	char unhide;
+	char nocompress;
+	unsigned char check;
+	unsigned int blocksize;
+	mode_t mode;
+	gid_t gid;
+	uid_t uid;
+	char *iocharset;
+	unsigned char utf8;
+        /* LVE */
+        s32 session;
+        s32 sbsector;
+};
+
+/*
+ * Compute the hash for the isofs name corresponding to the dentry.
+ */
+static int
+isofs_hash_common(struct dentry *dentry, struct qstr *qstr, int ms)
+{
+	const char *name;
+	int len;
+
+	len = qstr->len;
+	name = qstr->name;
+	if (ms) {
+		while (len && name[len-1] == '.')
+			len--;
+	}
+
+	qstr->hash = full_name_hash(name, len);
+
+	return 0;
+}
+
+/*
+ * Compute the hash for the isofs name corresponding to the dentry.
+ */
+static int
+isofs_hashi_common(struct dentry *dentry, struct qstr *qstr, int ms)
+{
+	const char *name;
+	int len;
+	char c;
+	unsigned long hash;
+
+	len = qstr->len;
+	name = qstr->name;
+	if (ms) {
+		while (len && name[len-1] == '.')
+			len--;
+	}
+
+	hash = init_name_hash();
+	while (len--) {
+		c = tolower(*name++);
+		hash = partial_name_hash(tolower(c), hash);
+	}
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+/*
+ * Case insensitive compare of two isofs names.
+ */
+static int
+isofs_dentry_cmpi_common(struct dentry *dentry,struct qstr *a,struct qstr *b,int ms)
+{
+	int alen, blen;
+
+	/* A filename cannot end in '.' or we treat it like it has none */
+	alen = a->len;
+	blen = b->len;
+	if (ms) {
+		while (alen && a->name[alen-1] == '.')
+			alen--;
+		while (blen && b->name[blen-1] == '.')
+			blen--;
+	}
+	if (alen == blen) {
+		if (strnicmp(a->name, b->name, alen) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+/*
+ * Case sensitive compare of two isofs names.
+ */
+static int
+isofs_dentry_cmp_common(struct dentry *dentry,struct qstr *a,struct qstr *b,int ms)
+{
+	int alen, blen;
+
+	/* A filename cannot end in '.' or we treat it like it has none */
+	alen = a->len;
+	blen = b->len;
+	if (ms) {
+		while (alen && a->name[alen-1] == '.')
+			alen--;
+		while (blen && b->name[blen-1] == '.')
+			blen--;
+	}
+	if (alen == blen) {
+		if (strncmp(a->name, b->name, alen) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+static int
+isofs_hash(struct dentry *dentry, struct qstr *qstr)
+{
+	return isofs_hash_common(dentry, qstr, 0);
+}
+
+static int
+isofs_hashi(struct dentry *dentry, struct qstr *qstr)
+{
+	return isofs_hashi_common(dentry, qstr, 0);
+}
+
+static int
+isofs_dentry_cmp(struct dentry *dentry,struct qstr *a,struct qstr *b)
+{
+	return isofs_dentry_cmp_common(dentry, a, b, 0);
+}
+
+static int
+isofs_dentry_cmpi(struct dentry *dentry,struct qstr *a,struct qstr *b)
+{
+	return isofs_dentry_cmpi_common(dentry, a, b, 0);
+}
+
+#ifdef CONFIG_JOLIET
+static int
+isofs_hash_ms(struct dentry *dentry, struct qstr *qstr)
+{
+	return isofs_hash_common(dentry, qstr, 1);
+}
+
+static int
+isofs_hashi_ms(struct dentry *dentry, struct qstr *qstr)
+{
+	return isofs_hashi_common(dentry, qstr, 1);
+}
+
+static int
+isofs_dentry_cmp_ms(struct dentry *dentry,struct qstr *a,struct qstr *b)
+{
+	return isofs_dentry_cmp_common(dentry, a, b, 1);
+}
+
+static int
+isofs_dentry_cmpi_ms(struct dentry *dentry,struct qstr *a,struct qstr *b)
+{
+	return isofs_dentry_cmpi_common(dentry, a, b, 1);
+}
+#endif
+
+enum {
+	Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore,
+	Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet,
+	Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err,
+	Opt_nocompress,
+};
+
+static match_table_t tokens = {
+	{Opt_norock, "norock"},
+	{Opt_nojoliet, "nojoliet"},
+	{Opt_unhide, "unhide"},
+	{Opt_cruft, "cruft"},
+	{Opt_utf8, "utf8"},
+	{Opt_iocharset, "iocharset=%s"},
+	{Opt_map_a, "map=acorn"},
+	{Opt_map_a, "map=a"},
+	{Opt_map_n, "map=normal"},
+	{Opt_map_n, "map=n"},
+	{Opt_map_o, "map=off"},
+	{Opt_map_o, "map=o"},
+	{Opt_session, "session=%u"},
+	{Opt_sb, "sbsector=%u"},
+	{Opt_check_r, "check=relaxed"},
+	{Opt_check_r, "check=r"},
+	{Opt_check_s, "check=strict"},
+	{Opt_check_s, "check=s"},
+	{Opt_uid, "uid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_mode, "mode=%u"},
+	{Opt_block, "block=%u"},
+	{Opt_ignore, "conv=binary"},
+	{Opt_ignore, "conv=b"},
+	{Opt_ignore, "conv=text"},
+	{Opt_ignore, "conv=t"},
+	{Opt_ignore, "conv=mtext"},
+	{Opt_ignore, "conv=m"},
+	{Opt_ignore, "conv=auto"},
+	{Opt_ignore, "conv=a"},
+	{Opt_nocompress, "nocompress"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(char *options, struct iso9660_options * popt)
+{
+	char *p;
+	int option;
+
+	popt->map = 'n';
+	popt->rock = 'y';
+	popt->joliet = 'y';
+	popt->cruft = 'n';
+	popt->unhide = 'n';
+	popt->check = 'u';		/* unset */
+	popt->nocompress = 0;
+	popt->blocksize = 1024;
+	popt->mode = S_IRUGO | S_IXUGO; /* r-x for all.  The disc could
+					   be shared with DOS machines so
+					   virtually anything could be
+					   a valid executable. */
+	popt->gid = 0;
+	popt->uid = 0;
+	popt->iocharset = NULL;
+	popt->utf8 = 0;
+	popt->session=-1;
+	popt->sbsector=-1;
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+		unsigned n;
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_norock:
+			popt->rock = 'n';
+			break;
+		case Opt_nojoliet:
+			popt->joliet = 'n';
+			break;
+		case Opt_unhide:
+			popt->unhide = 'y';
+			break;
+		case Opt_cruft:
+			popt->cruft = 'y';
+			break;
+		case Opt_utf8:
+			popt->utf8 = 1;
+			break;
+#ifdef CONFIG_JOLIET
+		case Opt_iocharset:
+			popt->iocharset = match_strdup(&args[0]);
+			break;
+#endif
+		case Opt_map_a:
+			popt->map = 'a';
+			break;
+		case Opt_map_o:
+			popt->map = 'o';
+			break;
+		case Opt_map_n:
+			popt->map = 'n';
+			break;
+		case Opt_session:
+			if (match_int(&args[0], &option))
+				return 0;
+			n = option;
+			if (n > 99)
+				return 0;
+			popt->session = n + 1;
+			break;
+		case Opt_sb:
+			if (match_int(&args[0], &option))
+				return 0;
+			popt->sbsector = option;
+			break;
+		case Opt_check_r:
+			popt->check = 'r';
+			break;
+		case Opt_check_s:
+			popt->check = 's';
+			break;
+		case Opt_ignore:
+			break;
+		case Opt_uid:
+			if (match_int(&args[0], &option))
+				return 0;
+			popt->uid = option;
+			break;
+		case Opt_gid:
+			if (match_int(&args[0], &option))
+				return 0;
+			popt->gid = option;
+			break;
+		case Opt_mode:
+			if (match_int(&args[0], &option))
+				return 0;
+			popt->mode = option;
+			break;
+		case Opt_block:
+			if (match_int(&args[0], &option))
+				return 0;
+			n = option;
+			if (n != 512 && n != 1024 && n != 2048)
+				return 0;
+			popt->blocksize = n;
+			break;
+		case Opt_nocompress:
+			popt->nocompress = 1;
+			break;
+		default:
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * look if the driver can tell the multi session redirection value
+ *
+ * don't change this if you don't know what you do, please!
+ * Multisession is legal only with XA disks.
+ * A non-XA disk with more than one volume descriptor may do it right, but
+ * usually is written in a nowhere standardized "multi-partition" manner.
+ * Multisession uses absolute addressing (solely the first frame of the whole
+ * track is #0), multi-partition uses relative addressing (each first frame of
+ * each track is #0), and a track is not a session.
+ *
+ * A broken CDwriter software or drive firmware does not set new standards,
+ * at least not if conflicting with the existing ones.
+ *
+ * emoenke@gwdg.de
+ */
+#define WE_OBEY_THE_WRITTEN_STANDARDS 1
+
+static unsigned int isofs_get_last_session(struct super_block *sb,s32 session )
+{
+	struct cdrom_multisession ms_info;
+	unsigned int vol_desc_start;
+	struct block_device *bdev = sb->s_bdev;
+	int i;
+
+	vol_desc_start=0;
+	ms_info.addr_format=CDROM_LBA;
+	if(session >= 0 && session <= 99) {
+		struct cdrom_tocentry Te;
+		Te.cdte_track=session;
+		Te.cdte_format=CDROM_LBA;
+		i = ioctl_by_bdev(bdev, CDROMREADTOCENTRY, (unsigned long) &Te);
+		if (!i) {
+			printk(KERN_DEBUG "Session %d start %d type %d\n",
+			       session, Te.cdte_addr.lba,
+			       Te.cdte_ctrl&CDROM_DATA_TRACK);
+			if ((Te.cdte_ctrl&CDROM_DATA_TRACK) == 4)
+				return Te.cdte_addr.lba;
+		}
+			
+		printk(KERN_ERR "Invalid session number or type of track\n");
+	}
+	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
+	if(session > 0) printk(KERN_ERR "Invalid session number\n");
+#if 0
+	printk("isofs.inode: CDROMMULTISESSION: rc=%d\n",i);
+	if (i==0) {
+		printk("isofs.inode: XA disk: %s\n",ms_info.xa_flag?"yes":"no");
+		printk("isofs.inode: vol_desc_start = %d\n", ms_info.addr.lba);
+	}
+#endif
+	if (i==0)
+#if WE_OBEY_THE_WRITTEN_STANDARDS
+        if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
+#endif
+		vol_desc_start=ms_info.addr.lba;
+	return vol_desc_start;
+}
+
+/*
+ * Initialize the superblock and read the root inode.
+ *
+ * Note: a check_disk_change() has been done immediately prior
+ * to this call, so we don't need to check again.
+ */
+static int isofs_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct buffer_head	      * bh = NULL, *pri_bh = NULL;
+	struct hs_primary_descriptor  * h_pri = NULL;
+	struct iso_primary_descriptor * pri = NULL;
+	struct iso_supplementary_descriptor *sec = NULL;
+	struct iso_directory_record   * rootp;
+	int				joliet_level = 0;
+	int				iso_blknum, block;
+	int				orig_zonesize;
+	int				table;
+	unsigned int			vol_desc_start;
+	unsigned long			first_data_zone;
+	struct inode		      * inode;
+	struct iso9660_options		opt;
+	struct isofs_sb_info	      * sbi;
+
+	sbi = kmalloc(sizeof(struct isofs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	s->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct isofs_sb_info));
+
+	if (!parse_options((char *) data, &opt))
+		goto out_freesbi;
+
+	/*
+	 * First of all, get the hardware blocksize for this device.
+	 * If we don't know what it is, or the hardware blocksize is
+	 * larger than the blocksize the user specified, then use
+	 * that value.
+	 */
+	/*
+	 * What if bugger tells us to go beyond page size?
+	 */
+	opt.blocksize = sb_min_blocksize(s, opt.blocksize);
+
+	sbi->s_high_sierra = 0; /* default is iso9660 */
+
+	vol_desc_start = (opt.sbsector != -1) ?
+		opt.sbsector : isofs_get_last_session(s,opt.session);
+
+  	for (iso_blknum = vol_desc_start+16;
+             iso_blknum < vol_desc_start+100; iso_blknum++)
+	{
+	    struct hs_volume_descriptor   * hdp;
+	    struct iso_volume_descriptor  * vdp;
+
+	    block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits);
+	    if (!(bh = sb_bread(s, block)))
+		goto out_no_read;
+
+	    vdp = (struct iso_volume_descriptor *)bh->b_data;
+	    hdp = (struct hs_volume_descriptor *)bh->b_data;
+	    
+	    /* Due to the overlapping physical location of the descriptors, 
+	     * ISO CDs can match hdp->id==HS_STANDARD_ID as well. To ensure 
+	     * proper identification in this case, we first check for ISO.
+	     */
+	    if (strncmp (vdp->id, ISO_STANDARD_ID, sizeof vdp->id) == 0) {
+		if (isonum_711 (vdp->type) == ISO_VD_END)
+		    break;
+		if (isonum_711 (vdp->type) == ISO_VD_PRIMARY) {
+		    if (pri == NULL) {
+			pri = (struct iso_primary_descriptor *)vdp;
+			/* Save the buffer in case we need it ... */
+			pri_bh = bh;
+			bh = NULL;
+		    }
+		}
+#ifdef CONFIG_JOLIET
+		else if (isonum_711 (vdp->type) == ISO_VD_SUPPLEMENTARY) {
+		    sec = (struct iso_supplementary_descriptor *)vdp;
+		    if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
+			if (opt.joliet == 'y') {
+			    if (sec->escape[2] == 0x40) {
+				joliet_level = 1;
+			    } else if (sec->escape[2] == 0x43) {
+				joliet_level = 2;
+			    } else if (sec->escape[2] == 0x45) {
+				joliet_level = 3;
+			    }
+			    printk(KERN_DEBUG"ISO 9660 Extensions: Microsoft Joliet Level %d\n",
+				   joliet_level);
+			}
+			goto root_found;
+		    } else {
+			/* Unknown supplementary volume descriptor */
+			sec = NULL;
+		    }
+		}
+#endif
+	    } else {
+	        if (strncmp (hdp->id, HS_STANDARD_ID, sizeof hdp->id) == 0) {
+		    if (isonum_711 (hdp->type) != ISO_VD_PRIMARY)
+		        goto out_freebh;
+		
+		    sbi->s_high_sierra = 1;
+		    opt.rock = 'n';
+		    h_pri = (struct hs_primary_descriptor *)vdp;
+		    goto root_found;
+		}
+	    }
+
+            /* Just skip any volume descriptors we don't recognize */
+
+	    brelse(bh);
+	    bh = NULL;
+	}
+	/*
+	 * If we fall through, either no volume descriptor was found,
+	 * or else we passed a primary descriptor looking for others.
+	 */
+	if (!pri)
+		goto out_unknown_format;
+	brelse(bh);
+	bh = pri_bh;
+	pri_bh = NULL;
+
+root_found:
+
+	if (joliet_level && (pri == NULL || opt.rock == 'n')) {
+	    /* This is the case of Joliet with the norock mount flag.
+	     * A disc with both Joliet and Rock Ridge is handled later
+	     */
+	    pri = (struct iso_primary_descriptor *) sec;
+	}
+
+	if(sbi->s_high_sierra){
+	  rootp = (struct iso_directory_record *) h_pri->root_directory_record;
+	  sbi->s_nzones = isonum_733 (h_pri->volume_space_size);
+	  sbi->s_log_zone_size = isonum_723 (h_pri->logical_block_size);
+	  sbi->s_max_size = isonum_733(h_pri->volume_space_size);
+	} else {
+	  if (!pri)
+	    goto out_freebh;
+	  rootp = (struct iso_directory_record *) pri->root_directory_record;
+	  sbi->s_nzones = isonum_733 (pri->volume_space_size);
+	  sbi->s_log_zone_size = isonum_723 (pri->logical_block_size);
+	  sbi->s_max_size = isonum_733(pri->volume_space_size);
+	}
+
+	sbi->s_ninodes = 0; /* No way to figure this out easily */
+
+	orig_zonesize = sbi->s_log_zone_size;
+	/*
+	 * If the zone size is smaller than the hardware sector size,
+	 * this is a fatal error.  This would occur if the disc drive
+	 * had sectors that were 2048 bytes, but the filesystem had
+	 * blocks that were 512 bytes (which should only very rarely
+	 * happen.)
+	 */
+	if(orig_zonesize < opt.blocksize)
+		goto out_bad_size;
+
+	/* RDE: convert log zone size to bit shift */
+	switch (sbi->s_log_zone_size)
+	  { case  512: sbi->s_log_zone_size =  9; break;
+	    case 1024: sbi->s_log_zone_size = 10; break;
+	    case 2048: sbi->s_log_zone_size = 11; break;
+
+	    default:
+		goto out_bad_zone_size;
+	  }
+
+	s->s_magic = ISOFS_SUPER_MAGIC;
+	s->s_maxbytes = 0xffffffff; /* We can handle files up to 4 GB */
+
+	/* The CDROM is read-only, has no nodes (devices) on it, and since
+	   all of the files appear to be owned by root, we really do not want
+	   to allow suid.  (suid or devices will not show up unless we have
+	   Rock Ridge extensions) */
+
+	s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
+
+	/* Set this for reference. Its not currently used except on write
+	   which we don't have .. */
+	   
+	first_data_zone = isonum_733 (rootp->extent) +
+			  isonum_711 (rootp->ext_attr_length);
+	sbi->s_firstdatazone = first_data_zone;
+#ifndef BEQUIET
+	printk(KERN_DEBUG "Max size:%ld   Log zone size:%ld\n",
+	       sbi->s_max_size,
+	       1UL << sbi->s_log_zone_size);
+	printk(KERN_DEBUG "First datazone:%ld\n", sbi->s_firstdatazone);
+	if(sbi->s_high_sierra)
+		printk(KERN_DEBUG "Disc in High Sierra format.\n");
+#endif
+
+	/*
+	 * If the Joliet level is set, we _may_ decide to use the
+	 * secondary descriptor, but can't be sure until after we
+	 * read the root inode. But before reading the root inode
+	 * we may need to change the device blocksize, and would
+	 * rather release the old buffer first. So, we cache the
+	 * first_data_zone value from the secondary descriptor.
+	 */
+	if (joliet_level) {
+		pri = (struct iso_primary_descriptor *) sec;
+		rootp = (struct iso_directory_record *)
+			pri->root_directory_record;
+		first_data_zone = isonum_733 (rootp->extent) +
+			  	isonum_711 (rootp->ext_attr_length);
+	}
+
+	/*
+	 * We're all done using the volume descriptor, and may need
+	 * to change the device blocksize, so release the buffer now.
+	 */
+	brelse(pri_bh);
+	brelse(bh);
+
+	/*
+	 * Force the blocksize to 512 for 512 byte sectors.  The file
+	 * read primitives really get it wrong in a bad way if we don't
+	 * do this.
+	 *
+	 * Note - we should never be setting the blocksize to something
+	 * less than the hardware sector size for the device.  If we
+	 * do, we would end up having to read larger buffers and split
+	 * out portions to satisfy requests.
+	 *
+	 * Note2- the idea here is that we want to deal with the optimal
+	 * zonesize in the filesystem.  If we have it set to something less,
+	 * then we have horrible problems with trying to piece together
+	 * bits of adjacent blocks in order to properly read directory
+	 * entries.  By forcing the blocksize in this way, we ensure
+	 * that we will never be required to do this.
+	 */
+	sb_set_blocksize(s, orig_zonesize);
+
+	sbi->s_nls_iocharset = NULL;
+
+#ifdef CONFIG_JOLIET
+	if (joliet_level && opt.utf8 == 0) {
+		char * p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
+		sbi->s_nls_iocharset = load_nls(p);
+		if (! sbi->s_nls_iocharset) {
+			/* Fail only if explicit charset specified */
+			if (opt.iocharset)
+				goto out_freesbi;
+			sbi->s_nls_iocharset = load_nls_default();
+		}
+	}
+#endif
+	s->s_op = &isofs_sops;
+	s->s_export_op = &isofs_export_ops;
+	sbi->s_mapping = opt.map;
+	sbi->s_rock = (opt.rock == 'y' ? 2 : 0);
+	sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/
+	sbi->s_cruft = opt.cruft;
+	sbi->s_unhide = opt.unhide;
+	sbi->s_uid = opt.uid;
+	sbi->s_gid = opt.gid;
+	sbi->s_utf8 = opt.utf8;
+	sbi->s_nocompress = opt.nocompress;
+	/*
+	 * It would be incredibly stupid to allow people to mark every file
+	 * on the disk as suid, so we merely allow them to set the default
+	 * permissions.
+	 */
+	sbi->s_mode = opt.mode & 0777;
+
+	/*
+	 * Read the root inode, which _may_ result in changing
+	 * the s_rock flag. Once we have the final s_rock value,
+	 * we then decide whether to use the Joliet descriptor.
+	 */
+	inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+
+	/*
+	 * If this disk has both Rock Ridge and Joliet on it, then we
+	 * want to use Rock Ridge by default.  This can be overridden
+	 * by using the norock mount option.  There is still one other
+	 * possibility that is not taken into account: a Rock Ridge
+	 * CD with Unicode names.  Until someone sees such a beast, it
+	 * will not be supported.
+	 */
+	if (sbi->s_rock == 1) {
+		joliet_level = 0;
+	} else if (joliet_level) {
+		sbi->s_rock = 0;
+		if (sbi->s_firstdatazone != first_data_zone) {
+			sbi->s_firstdatazone = first_data_zone;
+			printk(KERN_DEBUG 
+				"ISOFS: changing to secondary root\n");
+			iput(inode);
+			inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+		}
+	}
+
+	if (opt.check == 'u') {
+		/* Only Joliet is case insensitive by default */
+		if (joliet_level) opt.check = 'r';
+		else opt.check = 's';
+	}
+	sbi->s_joliet_level = joliet_level;
+
+	/* check the root inode */
+	if (!inode)
+		goto out_no_root;
+	if (!inode->i_op)
+		goto out_bad_root;
+	/* get the root dentry */
+	s->s_root = d_alloc_root(inode);
+	if (!(s->s_root))
+		goto out_no_root;
+
+	table = 0;
+	if (joliet_level) table += 2;
+	if (opt.check == 'r') table++;
+	s->s_root->d_op = &isofs_dentry_ops[table];
+
+	if (opt.iocharset)
+		kfree(opt.iocharset);
+
+	return 0;
+
+	/*
+	 * Display error messages and free resources.
+	 */
+out_bad_root:
+	printk(KERN_WARNING "isofs_fill_super: root inode not initialized\n");
+	goto out_iput;
+out_no_root:
+	printk(KERN_WARNING "isofs_fill_super: get root inode failed\n");
+out_iput:
+	iput(inode);
+#ifdef CONFIG_JOLIET
+	if (sbi->s_nls_iocharset)
+		unload_nls(sbi->s_nls_iocharset);
+#endif
+	goto out_freesbi;
+out_no_read:
+	printk(KERN_WARNING "isofs_fill_super: "
+		"bread failed, dev=%s, iso_blknum=%d, block=%d\n",
+		s->s_id, iso_blknum, block);
+	goto out_freesbi;
+out_bad_zone_size:
+	printk(KERN_WARNING "Bad logical zone size %ld\n",
+		sbi->s_log_zone_size);
+	goto out_freebh;
+out_bad_size:
+	printk(KERN_WARNING "Logical zone size(%d) < hardware blocksize(%u)\n",
+		orig_zonesize, opt.blocksize);
+	goto out_freebh;
+out_unknown_format:
+	if (!silent)
+		printk(KERN_WARNING "Unable to identify CD-ROM format.\n");
+
+out_freebh:
+	brelse(bh);
+out_freesbi:
+	if (opt.iocharset)
+		kfree(opt.iocharset);
+	kfree(sbi);
+	s->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static int isofs_statfs (struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = ISOFS_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = (ISOFS_SB(sb)->s_nzones
+                  << (ISOFS_SB(sb)->s_log_zone_size - sb->s_blocksize_bits));
+	buf->f_bfree = 0;
+	buf->f_bavail = 0;
+	buf->f_files = ISOFS_SB(sb)->s_ninodes;
+	buf->f_ffree = 0;
+	buf->f_namelen = NAME_MAX;
+	return 0;
+}
+
+/*
+ * Get a set of blocks; filling in buffer_heads if already allocated
+ * or getblk() if they are not.  Returns the number of blocks inserted
+ * (0 == error.)
+ */
+int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
+		     struct buffer_head **bh, unsigned long nblocks)
+{
+	unsigned long b_off;
+	unsigned offset, sect_size;
+	unsigned int firstext;
+	unsigned long nextblk, nextoff;
+	long iblock = (long)iblock_s;
+	int section, rv;
+	struct iso_inode_info *ei = ISOFS_I(inode);
+
+	lock_kernel();
+
+	rv = 0;
+	if (iblock < 0 || iblock != iblock_s) {
+		printk("isofs_get_blocks: block number too large\n");
+		goto abort;
+	}
+
+	b_off = iblock;
+	
+	offset    = 0;
+	firstext  = ei->i_first_extent;
+	sect_size = ei->i_section_size >> ISOFS_BUFFER_BITS(inode);
+	nextblk   = ei->i_next_section_block;
+	nextoff   = ei->i_next_section_offset;
+	section   = 0;
+
+	while ( nblocks ) {
+		/* If we are *way* beyond the end of the file, print a message.
+		 * Access beyond the end of the file up to the next page boundary
+		 * is normal, however because of the way the page cache works.
+		 * In this case, we just return 0 so that we can properly fill
+		 * the page with useless information without generating any
+		 * I/O errors.
+		 */
+		if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
+			printk("isofs_get_blocks: block >= EOF (%ld, %ld)\n",
+			       iblock, (unsigned long) inode->i_size);
+			goto abort;
+		}
+		
+		if (nextblk) {
+			while (b_off >= (offset + sect_size)) {
+				struct inode *ninode;
+				
+				offset += sect_size;
+				if (nextblk == 0)
+					goto abort;
+				ninode = isofs_iget(inode->i_sb, nextblk, nextoff);
+				if (!ninode)
+					goto abort;
+				firstext  = ISOFS_I(ninode)->i_first_extent;
+				sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode);
+				nextblk   = ISOFS_I(ninode)->i_next_section_block;
+				nextoff   = ISOFS_I(ninode)->i_next_section_offset;
+				iput(ninode);
+				
+				if (++section > 100) {
+					printk("isofs_get_blocks: More than 100 file sections ?!?, aborting...\n");
+					printk("isofs_get_blocks: block=%ld firstext=%u sect_size=%u "
+					       "nextblk=%lu nextoff=%lu\n",
+					       iblock, firstext, (unsigned) sect_size,
+					       nextblk, nextoff);
+					goto abort;
+				}
+			}
+		}
+		
+		if ( *bh ) {
+			map_bh(*bh, inode->i_sb, firstext + b_off - offset);
+		} else {
+			*bh = sb_getblk(inode->i_sb, firstext+b_off-offset);
+			if ( !*bh )
+				goto abort;
+		}
+		bh++;	/* Next buffer head */
+		b_off++;	/* Next buffer offset */
+		nblocks--;
+		rv++;
+	}
+
+
+abort:
+	unlock_kernel();
+	return rv;
+}
+
+/*
+ * Used by the standard interfaces.
+ */
+static int isofs_get_block(struct inode *inode, sector_t iblock,
+		    struct buffer_head *bh_result, int create)
+{
+	if ( create ) {
+		printk("isofs_get_block: Kernel tries to allocate a block\n");
+		return -EROFS;
+	}
+
+	return isofs_get_blocks(inode, iblock, &bh_result, 1) ? 0 : -EIO;
+}
+
+static int isofs_bmap(struct inode *inode, sector_t block)
+{
+	struct buffer_head dummy;
+	int error;
+
+	dummy.b_state = 0;
+	dummy.b_blocknr = -1000;
+	error = isofs_get_block(inode, block, &dummy, 0);
+	if (!error)
+		return dummy.b_blocknr;
+	return 0;
+}
+
+struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
+{
+	sector_t blknr = isofs_bmap(inode, block);
+	if (!blknr)
+		return NULL;
+	return sb_bread(inode->i_sb, blknr);
+}
+
+static int isofs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,isofs_get_block);
+}
+
+static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,isofs_get_block);
+}
+
+static struct address_space_operations isofs_aops = {
+	.readpage = isofs_readpage,
+	.sync_page = block_sync_page,
+	.bmap = _isofs_bmap
+};
+
+static inline void test_and_set_uid(uid_t *p, uid_t value)
+{
+	if(value) {
+		*p = value;
+	}
+}
+
+static inline void test_and_set_gid(gid_t *p, gid_t value)
+{
+        if(value) {
+                *p = value;
+        }
+}
+
+static int isofs_read_level3_size(struct inode * inode)
+{
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
+	int high_sierra = ISOFS_SB(inode->i_sb)->s_high_sierra;
+	struct buffer_head * bh = NULL;
+	unsigned long block, offset, block_saved, offset_saved;
+	int i = 0;
+	int more_entries = 0;
+	struct iso_directory_record * tmpde = NULL;
+	struct iso_inode_info *ei = ISOFS_I(inode);
+
+	inode->i_size = 0;
+
+	/* The first 16 blocks are reserved as the System Area.  Thus,
+	 * no inodes can appear in block 0.  We use this to flag that
+	 * this is the last section. */
+	ei->i_next_section_block = 0;
+	ei->i_next_section_offset = 0;
+
+	block = ei->i_iget5_block;
+	offset = ei->i_iget5_offset;
+
+	do {
+		struct iso_directory_record * de;
+		unsigned int de_len;
+
+		if (!bh) {
+			bh = sb_bread(inode->i_sb, block);
+			if (!bh)
+				goto out_noread;
+		}
+		de = (struct iso_directory_record *) (bh->b_data + offset);
+		de_len = *(unsigned char *) de;
+
+		if (de_len == 0) {
+			brelse(bh);
+			bh = NULL;
+			++block;
+			offset = 0;
+			continue;
+		}
+
+		block_saved = block;
+		offset_saved = offset;
+		offset += de_len;
+
+		/* Make sure we have a full directory entry */
+		if (offset >= bufsize) {
+			int slop = bufsize - offset + de_len;
+			if (!tmpde) {
+				tmpde = kmalloc(256, GFP_KERNEL);
+				if (!tmpde)
+					goto out_nomem;
+			}
+			memcpy(tmpde, de, slop);
+			offset &= bufsize - 1;
+			block++;
+			brelse(bh);
+			bh = NULL;
+			if (offset) {
+				bh = sb_bread(inode->i_sb, block);
+				if (!bh)
+					goto out_noread;
+				memcpy((void *) tmpde + slop, bh->b_data, offset);
+			}
+			de = tmpde;
+		}
+
+		inode->i_size += isonum_733(de->size);
+		if (i == 1) {
+			ei->i_next_section_block = block_saved;
+			ei->i_next_section_offset = offset_saved;
+		}
+
+		more_entries = de->flags[-high_sierra] & 0x80;
+
+		i++;
+		if(i > 100)
+			goto out_toomany;
+	} while(more_entries);
+out:
+	if (tmpde)
+		kfree(tmpde);
+	if (bh)
+		brelse(bh);
+	return 0;
+
+out_nomem:
+	if (bh)
+		brelse(bh);
+	return -ENOMEM;
+
+out_noread:
+	printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block);
+	if (tmpde)
+		kfree(tmpde);
+	return -EIO;
+
+out_toomany:
+	printk(KERN_INFO "isofs_read_level3_size: "
+		"More than 100 file sections ?!?, aborting...\n"
+	  	"isofs_read_level3_size: inode=%lu\n",
+		inode->i_ino);
+	goto out;
+}
+
+static void isofs_read_inode(struct inode * inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct isofs_sb_info *sbi = ISOFS_SB(sb);
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
+	unsigned long block;
+	int high_sierra = sbi->s_high_sierra;
+	struct buffer_head * bh = NULL;
+	struct iso_directory_record * de;
+	struct iso_directory_record * tmpde = NULL;
+	unsigned int de_len;
+	unsigned long offset;
+	struct iso_inode_info *ei = ISOFS_I(inode);
+
+	block = ei->i_iget5_block;
+	bh = sb_bread(inode->i_sb, block);
+	if (!bh)
+		goto out_badread;
+
+	offset = ei->i_iget5_offset;
+
+	de = (struct iso_directory_record *) (bh->b_data + offset);
+	de_len = *(unsigned char *) de;
+
+	if (offset + de_len > bufsize) {
+		int frag1 = bufsize - offset;
+
+		tmpde = kmalloc(de_len, GFP_KERNEL);
+		if (tmpde == NULL) {
+			printk(KERN_INFO "isofs_read_inode: out of memory\n");
+			goto fail;
+		}
+		memcpy(tmpde, bh->b_data + offset, frag1);
+		brelse(bh);
+		bh = sb_bread(inode->i_sb, ++block);
+		if (!bh)
+			goto out_badread;
+		memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
+		de = tmpde;
+	}
+
+	inode->i_ino = isofs_get_ino(ei->i_iget5_block,
+				     ei->i_iget5_offset,
+				     ISOFS_BUFFER_BITS(inode));
+
+	/* Assume it is a normal-format file unless told otherwise */
+	ei->i_file_format = isofs_file_normal;
+
+	if (de->flags[-high_sierra] & 2) {
+		inode->i_mode = S_IRUGO | S_IXUGO | S_IFDIR;
+		inode->i_nlink = 1; /* Set to 1.  We know there are 2, but
+				       the find utility tries to optimize
+				       if it is 2, and it screws up.  It is
+				       easier to give 1 which tells find to
+				       do it the hard way. */
+	} else {
+ 		/* Everybody gets to read the file. */
+		inode->i_mode = sbi->s_mode;
+		inode->i_nlink = 1;
+	        inode->i_mode |= S_IFREG;
+	}
+	inode->i_uid = sbi->s_uid;
+	inode->i_gid = sbi->s_gid;
+	inode->i_blocks = inode->i_blksize = 0;
+
+	ei->i_format_parm[0] = 0;
+	ei->i_format_parm[1] = 0;
+	ei->i_format_parm[2] = 0;
+
+	ei->i_section_size = isonum_733 (de->size);
+	if(de->flags[-high_sierra] & 0x80) {
+		if(isofs_read_level3_size(inode)) goto fail;
+	} else {
+		ei->i_next_section_block = 0;
+		ei->i_next_section_offset = 0;
+		inode->i_size = isonum_733 (de->size);
+	}
+
+	/*
+	 * Some dipshit decided to store some other bit of information
+	 * in the high byte of the file length.  Truncate size in case
+	 * this CDROM was mounted with the cruft option.
+	 */
+
+	if (sbi->s_cruft == 'y')
+		inode->i_size &= 0x00ffffff;
+
+	if (de->interleave[0]) {
+		printk("Interleaved files not (yet) supported.\n");
+		inode->i_size = 0;
+	}
+
+	/* I have no idea what file_unit_size is used for, so
+	   we will flag it for now */
+	if (de->file_unit_size[0] != 0) {
+		printk("File unit size != 0 for ISO file (%ld).\n",
+		       inode->i_ino);
+	}
+
+	/* I have no idea what other flag bits are used for, so
+	   we will flag it for now */
+#ifdef DEBUG
+	if((de->flags[-high_sierra] & ~2)!= 0){
+		printk("Unusual flag settings for ISO file (%ld %x).\n",
+		       inode->i_ino, de->flags[-high_sierra]);
+	}
+#endif
+
+	inode->i_mtime.tv_sec =
+	inode->i_atime.tv_sec =
+	inode->i_ctime.tv_sec = iso_date(de->date, high_sierra);
+	inode->i_mtime.tv_nsec =
+	inode->i_atime.tv_nsec =
+	inode->i_ctime.tv_nsec = 0;
+
+	ei->i_first_extent = (isonum_733 (de->extent) +
+			      isonum_711 (de->ext_attr_length));
+
+	/* Set the number of blocks for stat() - should be done before RR */
+	inode->i_blksize = PAGE_CACHE_SIZE; /* For stat() only */
+	inode->i_blocks  = (inode->i_size + 511) >> 9;
+
+	/*
+	 * Now test for possible Rock Ridge extensions which will override
+	 * some of these numbers in the inode structure.
+	 */
+
+	if (!high_sierra) {
+		parse_rock_ridge_inode(de, inode);
+		/* if we want uid/gid set, override the rock ridge setting */
+		test_and_set_uid(&inode->i_uid, sbi->s_uid);
+		test_and_set_gid(&inode->i_gid, sbi->s_gid);
+	}
+
+	/* Install the inode operations vector */
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_fop = &generic_ro_fops;
+		switch ( ei->i_file_format ) {
+#ifdef CONFIG_ZISOFS
+		case isofs_file_compressed:
+			inode->i_data.a_ops = &zisofs_aops;
+			break;
+#endif
+		default:
+			inode->i_data.a_ops = &isofs_aops;
+			break;
+		}
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &isofs_dir_inode_operations;
+		inode->i_fop = &isofs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_data.a_ops = &isofs_symlink_aops;
+	} else
+		/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+ out:
+	if (tmpde)
+		kfree(tmpde);
+	if (bh)
+		brelse(bh);
+	return;
+
+ out_badread:
+	printk(KERN_WARNING "ISOFS: unable to read i-node block\n");
+ fail:
+	make_bad_inode(inode);
+	goto out;
+}
+
+struct isofs_iget5_callback_data {
+	unsigned long block;
+	unsigned long offset;
+};
+
+static int isofs_iget5_test(struct inode *ino, void *data)
+{
+	struct iso_inode_info *i = ISOFS_I(ino);
+	struct isofs_iget5_callback_data *d =
+		(struct isofs_iget5_callback_data*)data;
+	return (i->i_iget5_block == d->block)
+	       && (i->i_iget5_offset == d->offset);
+}
+
+static int isofs_iget5_set(struct inode *ino, void *data)
+{
+	struct iso_inode_info *i = ISOFS_I(ino);
+	struct isofs_iget5_callback_data *d =
+		(struct isofs_iget5_callback_data*)data;
+	i->i_iget5_block = d->block;
+	i->i_iget5_offset = d->offset;
+	return 0;
+}
+
+/* Store, in the inode's containing structure, the block and block
+ * offset that point to the underlying meta-data for the inode.  The
+ * code below is otherwise similar to the iget() code in
+ * include/linux/fs.h */
+struct inode *isofs_iget(struct super_block *sb,
+			 unsigned long block,
+			 unsigned long offset)
+{
+	unsigned long hashval;
+	struct inode *inode;
+	struct isofs_iget5_callback_data data;
+
+	if (offset >= 1ul << sb->s_blocksize_bits)
+		return NULL;
+
+	data.block = block;
+	data.offset = offset;
+
+	hashval = (block << sb->s_blocksize_bits) | offset;
+
+	inode = iget5_locked(sb,
+			     hashval,
+			     &isofs_iget5_test,
+			     &isofs_iget5_set,
+			     &data);
+
+	if (inode && (inode->i_state & I_NEW)) {
+		sb->s_op->read_inode(inode);
+		unlock_new_inode(inode);
+	}
+
+	return inode;
+}
+
+#ifdef LEAK_CHECK
+#undef malloc
+#undef free_s
+#undef sb_bread
+#undef brelse
+
+void * leak_check_malloc(unsigned int size){
+  void * tmp;
+  check_malloc++;
+  tmp = kmalloc(size, GFP_KERNEL);
+  return tmp;
+}
+
+void leak_check_free_s(void * obj, int size){
+  check_malloc--;
+  return kfree(obj);
+}
+
+struct buffer_head * leak_check_bread(struct super_block *sb, int block){
+  check_bread++;
+  return sb_bread(sb, block);
+}
+
+void leak_check_brelse(struct buffer_head * bh){
+  check_bread--;
+  return brelse(bh);
+}
+
+#endif
+
+static struct super_block *isofs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+}
+
+static struct file_system_type iso9660_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "iso9660",
+	.get_sb		= isofs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_iso9660_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out;
+#ifdef CONFIG_ZISOFS
+	err = zisofs_init();
+	if (err)
+		goto out1;
+#endif
+	err = register_filesystem(&iso9660_fs_type);
+	if (err)
+		goto out2;
+	return 0;
+out2:
+#ifdef CONFIG_ZISOFS
+	zisofs_cleanup();
+out1:
+#endif
+	destroy_inodecache();
+out:
+	return err;
+}
+
+static void __exit exit_iso9660_fs(void)
+{
+        unregister_filesystem(&iso9660_fs_type);
+#ifdef CONFIG_ZISOFS
+	zisofs_cleanup();
+#endif
+	destroy_inodecache();
+}
+
+module_init(init_iso9660_fs)
+module_exit(exit_iso9660_fs)
+MODULE_LICENSE("GPL");
+/* Actual filesystem name is iso9660, as requested in filesystems.c */
+MODULE_ALIAS("iso9660");
diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
new file mode 100644
index 0000000..86c50e2
--- /dev/null
+++ b/fs/isofs/joliet.c
@@ -0,0 +1,103 @@
+/*
+ *  linux/fs/isofs/joliet.c
+ *
+ *  (C) 1996 Gordon Chaffee
+ *
+ *  Joliet: Microsoft's Unicode extensions to iso9660
+ */
+
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/mm.h>
+#include <linux/iso_fs.h>
+#include <asm/unaligned.h>
+
+/*
+ * Convert Unicode 16 to UTF8 or ASCII.
+ */
+static int
+uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls)
+{
+	wchar_t *ip, ch;
+	unsigned char *op;
+
+	ip = uni;
+	op = ascii;
+
+	while ((ch = get_unaligned(ip)) && len) {
+		int llen;
+		ch = be16_to_cpu(ch);
+		if ((llen = nls->uni2char(ch, op, NLS_MAX_CHARSET_SIZE)) > 0)
+			op += llen;
+		else
+			*op++ = '?';
+		ip++;
+
+		len--;
+	}
+	*op = 0;
+	return (op - ascii);
+}
+
+/* Convert big endian wide character string to utf8 */
+static int
+wcsntombs_be(__u8 *s, const __u8 *pwcs, int inlen, int maxlen)
+{
+	const __u8 *ip;
+	__u8 *op;
+	int size;
+	__u16 c;
+
+	op = s;
+	ip = pwcs;
+	while ((*ip || ip[1]) && (maxlen > 0) && (inlen > 0)) {
+		c = (*ip << 8) | ip[1];
+		if (c > 0x7f) {
+			size = utf8_wctomb(op, c, maxlen);
+			if (size == -1) {
+				/* Ignore character and move on */
+				maxlen--;
+			} else {
+				op += size;
+				maxlen -= size;
+			}
+		} else {
+			*op++ = (__u8) c;
+		}
+		ip += 2;
+		inlen--;
+	}
+	return (op - s);
+}
+
+int
+get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
+{
+	unsigned char utf8;
+	struct nls_table *nls;
+	unsigned char len = 0;
+
+	utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
+	nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
+
+	if (utf8) {
+		len = wcsntombs_be(outname, de->name,
+				   de->name_len[0] >> 1, PAGE_SIZE);
+	} else {
+		len = uni16_to_x8(outname, (u16 *) de->name,
+				  de->name_len[0] >> 1, nls);
+	}
+	if ((len > 2) && (outname[len-2] == ';') && (outname[len-1] == '1')) {
+		len -= 2;
+	}
+
+	/*
+	 * Windows doesn't like periods at the end of a name,
+	 * so neither do we
+	 */
+	while (len >= 2 && (outname[len-1] == '.')) {
+		len--;
+	}
+
+	return len;
+}
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
new file mode 100644
index 0000000..9569fc4
--- /dev/null
+++ b/fs/isofs/namei.c
@@ -0,0 +1,201 @@
+/*
+ *  linux/fs/isofs/namei.c
+ *
+ *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
+ *
+ *  (C) 1991  Linus Torvalds - minix filesystem
+ */
+
+#include <linux/time.h>
+#include <linux/iso_fs.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/config.h>	/* Joliet? */
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/dcache.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * ok, we cannot use strncmp, as the name is not in our data space.
+ * Thus we'll have to use isofs_match. No big problem. Match also makes
+ * some sanity tests.
+ */
+static int
+isofs_cmp(struct dentry * dentry, const char * compare, int dlen)
+{
+	struct qstr qstr;
+
+	if (!compare)
+		return 1;
+
+	/* check special "." and ".." files */
+	if (dlen == 1) {
+		/* "." */
+		if (compare[0] == 0) {
+			if (!dentry->d_name.len)
+				return 0;
+			compare = ".";
+		} else if (compare[0] == 1) {
+			compare = "..";
+			dlen = 2;
+		}
+	}
+
+	qstr.name = compare;
+	qstr.len = dlen;
+	return dentry->d_op->d_compare(dentry, &dentry->d_name, &qstr);
+}
+
+/*
+ *	isofs_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the inode number of the found entry, or 0 on error.
+ */
+static unsigned long
+isofs_find_entry(struct inode *dir, struct dentry *dentry,
+	unsigned long *block_rv, unsigned long* offset_rv,
+	char * tmpname, struct iso_directory_record * tmpde)
+{
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(dir);
+	unsigned char bufbits = ISOFS_BUFFER_BITS(dir);
+	unsigned long block, f_pos, offset, block_saved, offset_saved;
+	struct buffer_head * bh = NULL;
+	struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb);
+
+	if (!ISOFS_I(dir)->i_first_extent)
+		return 0;
+  
+	f_pos = 0;
+	offset = 0;
+	block = 0;
+
+	while (f_pos < dir->i_size) {
+		struct iso_directory_record * de;
+		int de_len, match, i, dlen;
+		char *dpnt;
+
+		if (!bh) {
+			bh = isofs_bread(dir, block);
+			if (!bh)
+				return 0;
+		}
+
+		de = (struct iso_directory_record *) (bh->b_data + offset);
+
+		de_len = *(unsigned char *) de;
+		if (!de_len) {
+			brelse(bh);
+			bh = NULL;
+			f_pos = (f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
+			block = f_pos >> bufbits;
+			offset = 0;
+			continue;
+		}
+
+		block_saved = bh->b_blocknr;
+		offset_saved = offset;
+		offset += de_len;
+		f_pos += de_len;
+
+		/* Make sure we have a full directory entry */
+		if (offset >= bufsize) {
+			int slop = bufsize - offset + de_len;
+			memcpy(tmpde, de, slop);
+			offset &= bufsize - 1;
+			block++;
+			brelse(bh);
+			bh = NULL;
+			if (offset) {
+				bh = isofs_bread(dir, block);
+				if (!bh)
+					return 0;
+				memcpy((void *) tmpde + slop, bh->b_data, offset);
+			}
+			de = tmpde;
+		}
+
+		dlen = de->name_len[0];
+		dpnt = de->name;
+
+		if (sbi->s_rock &&
+		    ((i = get_rock_ridge_filename(de, tmpname, dir)))) {
+			dlen = i; 	/* possibly -1 */
+			dpnt = tmpname;
+#ifdef CONFIG_JOLIET
+		} else if (sbi->s_joliet_level) {
+			dlen = get_joliet_filename(de, tmpname, dir);
+			dpnt = tmpname;
+#endif
+		} else if (sbi->s_mapping == 'a') {
+			dlen = get_acorn_filename(de, tmpname, dir);
+			dpnt = tmpname;
+		} else if (sbi->s_mapping == 'n') {
+			dlen = isofs_name_translate(de, tmpname, dir);
+			dpnt = tmpname;
+		}
+
+		/*
+		 * Skip hidden or associated files unless unhide is set 
+		 */
+		match = 0;
+		if (dlen > 0 &&
+		    (!(de->flags[-sbi->s_high_sierra] & 5)
+		     || sbi->s_unhide == 'y'))
+		{
+			match = (isofs_cmp(dentry,dpnt,dlen) == 0);
+		}
+		if (match) {
+			isofs_normalize_block_and_offset(de,
+							 &block_saved,
+							 &offset_saved);
+                        *block_rv = block_saved;
+                        *offset_rv = offset_saved;
+			if (bh) brelse(bh);
+			return 1;
+		}
+	}
+	if (bh) brelse(bh);
+	return 0;
+}
+
+struct dentry *isofs_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	int found;
+	unsigned long block, offset;
+	struct inode *inode;
+	struct page *page;
+
+	dentry->d_op = dir->i_sb->s_root->d_op;
+
+	page = alloc_page(GFP_USER);
+	if (!page)
+		return ERR_PTR(-ENOMEM);
+
+	lock_kernel();
+	found = isofs_find_entry(dir, dentry,
+				 &block, &offset,
+				 page_address(page),
+				 1024 + page_address(page));
+	__free_page(page);
+
+	inode = NULL;
+	if (found) {
+		inode = isofs_iget(dir->i_sb, block, offset);
+		if (!inode) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	if (inode)
+		return d_splice_alias(inode, dentry);
+	d_add(dentry, inode);
+	return NULL;
+}
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
new file mode 100644
index 0000000..8bdd3e4
--- /dev/null
+++ b/fs/isofs/rock.c
@@ -0,0 +1,565 @@
+/*
+ *  linux/fs/isofs/rock.c
+ *
+ *  (C) 1992, 1993  Eric Youngdale
+ *
+ *  Rock Ridge Extensions to iso9660
+ */
+
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/iso_fs.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <asm/page.h>
+
+#include "rock.h"
+
+/* These functions are designed to read the system areas of a directory record
+ * and extract relevant information.  There are different functions provided
+ * depending upon what information we need at the time.  One function fills
+ * out an inode structure, a second one extracts a filename, a third one
+ * returns a symbolic link name, and a fourth one returns the extent number
+ * for the file. */
+
+#define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */
+
+
+/* This is a way of ensuring that we have something in the system
+   use fields that is compatible with Rock Ridge */
+#define CHECK_SP(FAIL)	       			\
+      if(rr->u.SP.magic[0] != 0xbe) FAIL;	\
+      if(rr->u.SP.magic[1] != 0xef) FAIL;       \
+      ISOFS_SB(inode->i_sb)->s_rock_offset=rr->u.SP.skip;
+/* We define a series of macros because each function must do exactly the
+   same thing in certain places.  We use the macros to ensure that everything
+   is done correctly */
+
+#define CONTINUE_DECLS \
+  int cont_extent = 0, cont_offset = 0, cont_size = 0;   \
+  void *buffer = NULL
+
+#define CHECK_CE	       			\
+      {cont_extent = isonum_733(rr->u.CE.extent); \
+      cont_offset = isonum_733(rr->u.CE.offset); \
+      cont_size = isonum_733(rr->u.CE.size);}
+
+#define SETUP_ROCK_RIDGE(DE,CHR,LEN)	      		      	\
+  {LEN= sizeof(struct iso_directory_record) + DE->name_len[0];	\
+  if(LEN & 1) LEN++;						\
+  CHR = ((unsigned char *) DE) + LEN;				\
+  LEN = *((unsigned char *) DE) - LEN;                          \
+  if (LEN<0) LEN=0;                                             \
+  if (ISOFS_SB(inode->i_sb)->s_rock_offset!=-1)                \
+  {                                                             \
+     LEN-=ISOFS_SB(inode->i_sb)->s_rock_offset;                \
+     CHR+=ISOFS_SB(inode->i_sb)->s_rock_offset;                \
+     if (LEN<0) LEN=0;                                          \
+  }                                                             \
+}                                     
+
+#define MAYBE_CONTINUE(LABEL,DEV) \
+  {if (buffer) { kfree(buffer); buffer = NULL; } \
+  if (cont_extent){ \
+    int block, offset, offset1; \
+    struct buffer_head * pbh; \
+    buffer = kmalloc(cont_size,GFP_KERNEL); \
+    if (!buffer) goto out; \
+    block = cont_extent; \
+    offset = cont_offset; \
+    offset1 = 0; \
+    pbh = sb_bread(DEV->i_sb, block); \
+    if(pbh){       \
+      if (offset > pbh->b_size || offset + cont_size > pbh->b_size){	\
+	brelse(pbh); \
+	goto out; \
+      } \
+      memcpy(buffer + offset1, pbh->b_data + offset, cont_size - offset1); \
+      brelse(pbh); \
+      chr = (unsigned char *) buffer; \
+      len = cont_size; \
+      cont_extent = 0; \
+      cont_size = 0; \
+      cont_offset = 0; \
+      goto LABEL; \
+    }    \
+    printk("Unable to read rock-ridge attributes\n");    \
+  }}
+
+/* return length of name field; 0: not found, -1: to be ignored */
+int get_rock_ridge_filename(struct iso_directory_record * de,
+			    char * retname, struct inode * inode)
+{
+  int len;
+  unsigned char * chr;
+  CONTINUE_DECLS;
+  int retnamlen = 0, truncate=0;
+ 
+  if (!ISOFS_SB(inode->i_sb)->s_rock) return 0;
+  *retname = 0;
+
+  SETUP_ROCK_RIDGE(de, chr, len);
+ repeat:
+  {
+    struct rock_ridge * rr;
+    int sig;
+    
+    while (len > 2){ /* There may be one byte for padding somewhere */
+      rr = (struct rock_ridge *) chr;
+      if (rr->len < 3) goto out; /* Something got screwed up here */
+      sig = isonum_721(chr);
+      chr += rr->len; 
+      len -= rr->len;
+      if (len < 0) goto out;	/* corrupted isofs */
+
+      switch(sig){
+      case SIG('R','R'):
+	if((rr->u.RR.flags[0] & RR_NM) == 0) goto out;
+	break;
+      case SIG('S','P'):
+	CHECK_SP(goto out);
+	break;
+      case SIG('C','E'):
+	CHECK_CE;
+	break;
+      case SIG('N','M'):
+	if (truncate) break;
+	if (rr->len < 5) break;
+        /*
+	 * If the flags are 2 or 4, this indicates '.' or '..'.
+	 * We don't want to do anything with this, because it
+	 * screws up the code that calls us.  We don't really
+	 * care anyways, since we can just use the non-RR
+	 * name.
+	 */
+	if (rr->u.NM.flags & 6) {
+	  break;
+	}
+
+	if (rr->u.NM.flags & ~1) {
+	  printk("Unsupported NM flag settings (%d)\n",rr->u.NM.flags);
+	  break;
+	}
+	if((strlen(retname) + rr->len - 5) >= 254) {
+	  truncate = 1;
+	  break;
+	}
+	strncat(retname, rr->u.NM.name, rr->len - 5);
+	retnamlen += rr->len - 5;
+	break;
+      case SIG('R','E'):
+	if (buffer) kfree(buffer);
+	return -1;
+      default:
+	break;
+      }
+    }
+  }
+  MAYBE_CONTINUE(repeat,inode);
+  if (buffer) kfree(buffer);
+  return retnamlen; /* If 0, this file did not have a NM field */
+ out:
+  if(buffer) kfree(buffer);
+  return 0;
+}
+
+static int
+parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+				struct inode *inode, int regard_xa)
+{
+  int len;
+  unsigned char * chr;
+  int symlink_len = 0;
+  CONTINUE_DECLS;
+
+  if (!ISOFS_SB(inode->i_sb)->s_rock) return 0;
+
+  SETUP_ROCK_RIDGE(de, chr, len);
+  if (regard_xa)
+   {
+     chr+=14;
+     len-=14;
+     if (len<0) len=0;
+   }
+   
+ repeat:
+  {
+    int cnt, sig;
+    struct inode * reloc;
+    struct rock_ridge * rr;
+    int rootflag;
+    
+    while (len > 2){ /* There may be one byte for padding somewhere */
+      rr = (struct rock_ridge *) chr;
+      if (rr->len < 3) goto out; /* Something got screwed up here */
+      sig = isonum_721(chr);
+      chr += rr->len; 
+      len -= rr->len;
+      if (len < 0) goto out;	/* corrupted isofs */
+      
+      switch(sig){
+#ifndef CONFIG_ZISOFS		/* No flag for SF or ZF */
+      case SIG('R','R'):
+	if((rr->u.RR.flags[0] & 
+ 	    (RR_PX | RR_TF | RR_SL | RR_CL)) == 0) goto out;
+	break;
+#endif
+      case SIG('S','P'):
+	CHECK_SP(goto out);
+	break;
+      case SIG('C','E'):
+	CHECK_CE;
+	break;
+      case SIG('E','R'):
+	ISOFS_SB(inode->i_sb)->s_rock = 1;
+	printk(KERN_DEBUG "ISO 9660 Extensions: ");
+	{ int p;
+	  for(p=0;p<rr->u.ER.len_id;p++) printk("%c",rr->u.ER.data[p]);
+	}
+	  printk("\n");
+	break;
+      case SIG('P','X'):
+	inode->i_mode  = isonum_733(rr->u.PX.mode);
+	inode->i_nlink = isonum_733(rr->u.PX.n_links);
+	inode->i_uid   = isonum_733(rr->u.PX.uid);
+	inode->i_gid   = isonum_733(rr->u.PX.gid);
+	break;
+      case SIG('P','N'):
+	{ int high, low;
+	  high = isonum_733(rr->u.PN.dev_high);
+	  low = isonum_733(rr->u.PN.dev_low);
+	  /*
+	   * The Rock Ridge standard specifies that if sizeof(dev_t) <= 4,
+	   * then the high field is unused, and the device number is completely
+	   * stored in the low field.  Some writers may ignore this subtlety,
+	   * and as a result we test to see if the entire device number is
+	   * stored in the low field, and use that.
+	   */
+	  if((low & ~0xff) && high == 0) {
+	    inode->i_rdev = MKDEV(low >> 8, low & 0xff);
+	  } else {
+	    inode->i_rdev = MKDEV(high, low);
+	  }
+	}
+	break;
+      case SIG('T','F'):
+	/* Some RRIP writers incorrectly place ctime in the TF_CREATE field.
+	   Try to handle this correctly for either case. */
+	cnt = 0; /* Rock ridge never appears on a High Sierra disk */
+	if(rr->u.TF.flags & TF_CREATE) { 
+	  inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0);
+	  inode->i_ctime.tv_nsec = 0;
+	}
+	if(rr->u.TF.flags & TF_MODIFY) {
+	  inode->i_mtime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0);
+	  inode->i_mtime.tv_nsec = 0;
+	}
+	if(rr->u.TF.flags & TF_ACCESS) {
+	  inode->i_atime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0);
+	  inode->i_atime.tv_nsec = 0;
+	}
+	if(rr->u.TF.flags & TF_ATTRIBUTES) { 
+	  inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0);
+	  inode->i_ctime.tv_nsec = 0;
+	} 
+	break;
+      case SIG('S','L'):
+	{int slen;
+	 struct SL_component * slp;
+	 struct SL_component * oldslp;
+	 slen = rr->len - 5;
+	 slp = &rr->u.SL.link;
+	 inode->i_size = symlink_len;
+	 while (slen > 1){
+	   rootflag = 0;
+	   switch(slp->flags &~1){
+	   case 0:
+	     inode->i_size += slp->len;
+	     break;
+	   case 2:
+	     inode->i_size += 1;
+	     break;
+	   case 4:
+	     inode->i_size += 2;
+	     break;
+	   case 8:
+	     rootflag = 1;
+	     inode->i_size += 1;
+	     break;
+	   default:
+	     printk("Symlink component flag not implemented\n");
+	   }
+	   slen -= slp->len + 2;
+	   oldslp = slp;
+	   slp = (struct SL_component *) (((char *) slp) + slp->len + 2);
+
+	   if(slen < 2) {
+	     if(    ((rr->u.SL.flags & 1) != 0) 
+		    && ((oldslp->flags & 1) == 0) ) inode->i_size += 1;
+	     break;
+	   }
+
+	   /*
+	    * If this component record isn't continued, then append a '/'.
+	    */
+	   if (!rootflag && (oldslp->flags & 1) == 0)
+		   inode->i_size += 1;
+	 }
+	}
+	symlink_len = inode->i_size;
+	break;
+      case SIG('R','E'):
+	printk(KERN_WARNING "Attempt to read inode for relocated directory\n");
+	goto out;
+      case SIG('C','L'):
+	ISOFS_I(inode)->i_first_extent = isonum_733(rr->u.CL.location);
+	reloc = isofs_iget(inode->i_sb, ISOFS_I(inode)->i_first_extent, 0);
+	if (!reloc)
+		goto out;
+	inode->i_mode = reloc->i_mode;
+	inode->i_nlink = reloc->i_nlink;
+	inode->i_uid = reloc->i_uid;
+	inode->i_gid = reloc->i_gid;
+	inode->i_rdev = reloc->i_rdev;
+	inode->i_size = reloc->i_size;
+	inode->i_blocks = reloc->i_blocks;
+	inode->i_atime = reloc->i_atime;
+	inode->i_ctime = reloc->i_ctime;
+	inode->i_mtime = reloc->i_mtime;
+	iput(reloc);
+	break;
+#ifdef CONFIG_ZISOFS
+      case SIG('Z','F'):
+	      if ( !ISOFS_SB(inode->i_sb)->s_nocompress ) {
+		      int algo;
+		      algo = isonum_721(rr->u.ZF.algorithm);
+		      if ( algo == SIG('p','z') ) {
+			      int block_shift = isonum_711(&rr->u.ZF.parms[1]);
+			      if ( block_shift < PAGE_CACHE_SHIFT || block_shift > 17 ) {
+				      printk(KERN_WARNING "isofs: Can't handle ZF block size of 2^%d\n", block_shift);
+			      } else {
+				/* Note: we don't change i_blocks here */
+				      ISOFS_I(inode)->i_file_format = isofs_file_compressed;
+				/* Parameters to compression algorithm (header size, block size) */
+				      ISOFS_I(inode)->i_format_parm[0] = isonum_711(&rr->u.ZF.parms[0]);
+				      ISOFS_I(inode)->i_format_parm[1] = isonum_711(&rr->u.ZF.parms[1]);
+				      inode->i_size = isonum_733(rr->u.ZF.real_size);
+			      }
+		      } else {
+			      printk(KERN_WARNING "isofs: Unknown ZF compression algorithm: %c%c\n",
+				     rr->u.ZF.algorithm[0], rr->u.ZF.algorithm[1]);
+		      }
+	      }
+	      break;
+#endif
+      default:
+	break;
+      }
+    }
+  }
+  MAYBE_CONTINUE(repeat,inode);
+ out:
+  if(buffer) kfree(buffer);
+  return 0;
+}
+
+static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
+{
+	int slen;
+	int rootflag;
+	struct SL_component *oldslp;
+	struct SL_component *slp;
+	slen = rr->len - 5;
+	slp = &rr->u.SL.link;
+	while (slen > 1) {
+		rootflag = 0;
+		switch (slp->flags & ~1) {
+		case 0:
+			if (slp->len > plimit - rpnt)
+				return NULL;
+			memcpy(rpnt, slp->text, slp->len);
+			rpnt+=slp->len;
+			break;
+		case 2:
+			if (rpnt >= plimit)
+				return NULL;
+			*rpnt++='.';
+			break;
+		case 4:
+			if (2 > plimit - rpnt)
+				return NULL;
+			*rpnt++='.';
+			*rpnt++='.';
+			break;
+		case 8:
+			if (rpnt >= plimit)
+				return NULL;
+			rootflag = 1;
+			*rpnt++='/';
+			break;
+		default:
+			printk("Symlink component flag not implemented (%d)\n",
+			     slp->flags);
+		}
+		slen -= slp->len + 2;
+		oldslp = slp;
+		slp = (struct SL_component *) ((char *) slp + slp->len + 2);
+
+		if (slen < 2) {
+			/*
+			 * If there is another SL record, and this component
+			 * record isn't continued, then add a slash.
+			 */
+			if ((!rootflag) && (rr->u.SL.flags & 1) &&
+			    !(oldslp->flags & 1)) {
+				if (rpnt >= plimit)
+					return NULL;
+				*rpnt++='/';
+			}
+			break;
+		}
+
+		/*
+		 * If this component record isn't continued, then append a '/'.
+		 */
+		if (!rootflag && !(oldslp->flags & 1)) {
+			if (rpnt >= plimit)
+				return NULL;
+			*rpnt++='/';
+		}
+	}
+	return rpnt;
+}
+
+int parse_rock_ridge_inode(struct iso_directory_record * de,
+			   struct inode * inode)
+{
+   int result=parse_rock_ridge_inode_internal(de,inode,0);
+   /* if rockridge flag was reset and we didn't look for attributes
+    * behind eventual XA attributes, have a look there */
+   if ((ISOFS_SB(inode->i_sb)->s_rock_offset==-1)
+       &&(ISOFS_SB(inode->i_sb)->s_rock==2))
+     {
+	result=parse_rock_ridge_inode_internal(de,inode,14);
+     }
+   return result;
+}
+
+/* readpage() for symlinks: reads symlink contents into the page and either
+   makes it uptodate and returns 0 or returns error (-EIO) */
+
+static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+        struct iso_inode_info *ei = ISOFS_I(inode);
+	char *link = kmap(page);
+	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
+	struct buffer_head *bh;
+	char *rpnt = link;
+	unsigned char *pnt;
+	struct iso_directory_record *raw_inode;
+	CONTINUE_DECLS;
+	unsigned long block, offset;
+	int sig;
+	int len;
+	unsigned char *chr;
+	struct rock_ridge *rr;
+
+	if (!ISOFS_SB(inode->i_sb)->s_rock)
+		goto error;
+
+	block = ei->i_iget5_block;
+	lock_kernel();
+	bh = sb_bread(inode->i_sb, block);
+	if (!bh)
+		goto out_noread;
+
+        offset = ei->i_iget5_offset;
+	pnt = (unsigned char *) bh->b_data + offset;
+
+	raw_inode = (struct iso_directory_record *) pnt;
+
+	/*
+	 * If we go past the end of the buffer, there is some sort of error.
+	 */
+	if (offset + *pnt > bufsize)
+		goto out_bad_span;
+
+	/* Now test for possible Rock Ridge extensions which will override
+	   some of these numbers in the inode structure. */
+
+	SETUP_ROCK_RIDGE(raw_inode, chr, len);
+
+      repeat:
+	while (len > 2) { /* There may be one byte for padding somewhere */
+		rr = (struct rock_ridge *) chr;
+		if (rr->len < 3)
+			goto out;	/* Something got screwed up here */
+		sig = isonum_721(chr);
+		chr += rr->len;
+		len -= rr->len;
+		if (len < 0)
+			goto out;	/* corrupted isofs */
+
+		switch (sig) {
+		case SIG('R', 'R'):
+			if ((rr->u.RR.flags[0] & RR_SL) == 0)
+				goto out;
+			break;
+		case SIG('S', 'P'):
+			CHECK_SP(goto out);
+			break;
+		case SIG('S', 'L'):
+			rpnt = get_symlink_chunk(rpnt, rr,
+						 link + (PAGE_SIZE - 1));
+			if (rpnt == NULL)
+				goto out;
+			break;
+		case SIG('C', 'E'):
+			/* This tells is if there is a continuation record */
+			CHECK_CE;
+		default:
+			break;
+		}
+	}
+	MAYBE_CONTINUE(repeat, inode);
+	if (buffer)
+		kfree(buffer);
+
+	if (rpnt == link)
+		goto fail;
+	brelse(bh);
+	*rpnt = '\0';
+	unlock_kernel();
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+
+	/* error exit from macro */
+      out:
+	if (buffer)
+		kfree(buffer);
+	goto fail;
+      out_noread:
+	printk("unable to read i-node block");
+	goto fail;
+      out_bad_span:
+	printk("symlink spans iso9660 blocks\n");
+      fail:
+	brelse(bh);
+	unlock_kernel();
+      error:
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return -EIO;
+}
+
+struct address_space_operations isofs_symlink_aops = {
+	.readpage	= rock_ridge_symlink_readpage
+};
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
new file mode 100644
index 0000000..deaf5c8
--- /dev/null
+++ b/fs/isofs/rock.h
@@ -0,0 +1,119 @@
+/* These structs are used by the system-use-sharing protocol, in which the
+   Rock Ridge extensions are embedded.  It is quite possible that other
+   extensions are present on the disk, and this is fine as long as they
+   all use SUSP */
+
+struct SU_SP{
+  unsigned char magic[2];
+  unsigned char skip;
+} __attribute__((packed));
+
+struct SU_CE{
+  char extent[8];
+  char offset[8];
+  char size[8];
+};
+
+struct SU_ER{
+  unsigned char len_id;
+  unsigned char len_des;
+  unsigned char len_src;
+  unsigned char ext_ver;
+  char data[0];
+} __attribute__((packed));
+
+struct RR_RR{
+  char flags[1];
+} __attribute__((packed));
+
+struct RR_PX{
+  char mode[8];
+  char n_links[8];
+  char uid[8];
+  char gid[8];
+};
+
+struct RR_PN{
+  char dev_high[8];
+  char dev_low[8];
+};
+
+
+struct SL_component{
+  unsigned char flags;
+  unsigned char len;
+  char text[0];
+} __attribute__((packed));
+
+struct RR_SL{
+  unsigned char flags;
+  struct SL_component link;
+} __attribute__((packed));
+
+struct RR_NM{
+  unsigned char flags;
+  char name[0];
+} __attribute__((packed));
+
+struct RR_CL{
+  char location[8];
+};
+
+struct RR_PL{
+  char location[8];
+};
+
+struct stamp{
+  char time[7];
+} __attribute__((packed));
+
+struct RR_TF{
+  char flags;
+  struct stamp times[0];  /* Variable number of these beasts */
+} __attribute__((packed));
+
+/* Linux-specific extension for transparent decompression */
+struct RR_ZF{
+  char algorithm[2];
+  char parms[2];
+  char real_size[8];
+};
+
+/* These are the bits and their meanings for flags in the TF structure. */
+#define TF_CREATE 1
+#define TF_MODIFY 2
+#define TF_ACCESS 4
+#define TF_ATTRIBUTES 8
+#define TF_BACKUP 16
+#define TF_EXPIRATION 32
+#define TF_EFFECTIVE 64
+#define TF_LONG_FORM 128
+
+struct rock_ridge{
+  char signature[2];
+  unsigned char len;
+  unsigned char version;
+  union{
+    struct SU_SP SP;
+    struct SU_CE CE;
+    struct SU_ER ER;
+    struct RR_RR RR;
+    struct RR_PX PX;
+    struct RR_PN PN;
+    struct RR_SL SL;
+    struct RR_NM NM;
+    struct RR_CL CL;
+    struct RR_PL PL;
+    struct RR_TF TF;
+    struct RR_ZF ZF;
+  } u;
+};
+
+#define RR_PX 1   /* POSIX attributes */
+#define RR_PN 2   /* POSIX devices */
+#define RR_SL 4   /* Symbolic link */
+#define RR_NM 8   /* Alternate Name */
+#define RR_CL 16  /* Child link */
+#define RR_PL 32  /* Parent link */
+#define RR_RE 64  /* Relocation directory */
+#define RR_TF 128 /* Timestamps */
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
new file mode 100644
index 0000000..3f6d9c1
--- /dev/null
+++ b/fs/isofs/util.c
@@ -0,0 +1,83 @@
+/*
+ *  linux/fs/isofs/util.c
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/iso_fs.h>
+
+/* 
+ * We have to convert from a MM/DD/YY format to the Unix ctime format.
+ * We have to take into account leap years and all of that good stuff.
+ * Unfortunately, the kernel does not have the information on hand to
+ * take into account daylight savings time, but it shouldn't matter.
+ * The time stored should be localtime (with or without DST in effect),
+ * and the timezone offset should hold the offset required to get back
+ * to GMT.  Thus  we should always be correct.
+ */
+
+int iso_date(char * p, int flag)
+{
+	int year, month, day, hour, minute, second, tz;
+	int crtime, days, i;
+
+	year = p[0] - 70;
+	month = p[1];
+	day = p[2];
+	hour = p[3];
+	minute = p[4];
+	second = p[5];
+	if (flag == 0) tz = p[6]; /* High sierra has no time zone */
+	else tz = 0;
+	
+	if (year < 0) {
+		crtime = 0;
+	} else {
+		int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31};
+
+		days = year * 365;
+		if (year > 2)
+			days += (year+1) / 4;
+		for (i = 1; i < month; i++)
+			days += monlen[i-1];
+		if (((year+2) % 4) == 0 && month > 2)
+			days++;
+		days += day - 1;
+		crtime = ((((days * 24) + hour) * 60 + minute) * 60)
+			+ second;
+
+		/* sign extend */
+		if (tz & 0x80)
+			tz |= (-1 << 8);
+		
+		/* 
+		 * The timezone offset is unreliable on some disks,
+		 * so we make a sanity check.  In no case is it ever
+		 * more than 13 hours from GMT, which is 52*15min.
+		 * The time is always stored in localtime with the
+		 * timezone offset being what get added to GMT to
+		 * get to localtime.  Thus we need to subtract the offset
+		 * to get to true GMT, which is what we store the time
+		 * as internally.  On the local system, the user may set
+		 * their timezone any way they wish, of course, so GMT
+		 * gets converted back to localtime on the receiving
+		 * system.
+		 *
+		 * NOTE: mkisofs in versions prior to mkisofs-1.10 had
+		 * the sign wrong on the timezone offset.  This has now
+		 * been corrected there too, but if you are getting screwy
+		 * results this may be the explanation.  If enough people
+		 * complain, a user configuration option could be added
+		 * to add the timezone offset in with the wrong sign
+		 * for 'compatibility' with older discs, but I cannot see how
+		 * it will matter that much.
+		 *
+		 * Thanks to kuhlmav@elec.canterbury.ac.nz (Volker Kuhlmann)
+		 * for pointing out the sign error.
+		 */
+		if (-52 <= tz && tz <= 52)
+			crtime -= tz * 15 * 60;
+	}
+	return crtime;
+}		
+	
diff --git a/fs/isofs/zisofs.h b/fs/isofs/zisofs.h
new file mode 100644
index 0000000..d78485d
--- /dev/null
+++ b/fs/isofs/zisofs.h
@@ -0,0 +1,21 @@
+/* ----------------------------------------------------------------------- *
+ *   
+ *   Copyright 2001 H. Peter Anvin - All Rights Reserved
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * Prototypes for functions exported from the compressed isofs subsystem
+ */
+
+#ifdef CONFIG_ZISOFS
+extern struct address_space_operations zisofs_aops;
+extern int __init zisofs_init(void);
+extern void zisofs_cleanup(void);
+#endif
diff --git a/fs/jbd/Makefile b/fs/jbd/Makefile
new file mode 100644
index 0000000..54aca48
--- /dev/null
+++ b/fs/jbd/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux journaling routines.
+#
+
+obj-$(CONFIG_JBD) += jbd.o
+
+jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
new file mode 100644
index 0000000..98d8304
--- /dev/null
+++ b/fs/jbd/checkpoint.c
@@ -0,0 +1,636 @@
+/*
+ * linux/fs/checkpoint.c
+ * 
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1999 Red Hat Software --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Checkpoint routines for the generic filesystem journaling code.  
+ * Part of the ext2fs journaling system.  
+ *
+ * Checkpointing is the process of ensuring that a section of the log is
+ * committed fully to disk, so that that portion of the log can be
+ * reused.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+/*
+ * Unlink a buffer from a transaction. 
+ *
+ * Called with j_list_lock held.
+ */
+
+static inline void __buffer_unlink(struct journal_head *jh)
+{
+	transaction_t *transaction;
+
+	transaction = jh->b_cp_transaction;
+	jh->b_cp_transaction = NULL;
+
+	jh->b_cpnext->b_cpprev = jh->b_cpprev;
+	jh->b_cpprev->b_cpnext = jh->b_cpnext;
+	if (transaction->t_checkpoint_list == jh)
+		transaction->t_checkpoint_list = jh->b_cpnext;
+	if (transaction->t_checkpoint_list == jh)
+		transaction->t_checkpoint_list = NULL;
+}
+
+/*
+ * Try to release a checkpointed buffer from its transaction.
+ * Returns 1 if we released it.
+ * Requires j_list_lock
+ * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
+ */
+static int __try_to_free_cp_buf(struct journal_head *jh)
+{
+	int ret = 0;
+	struct buffer_head *bh = jh2bh(jh);
+
+	if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
+		JBUFFER_TRACE(jh, "remove from checkpoint list");
+		__journal_remove_checkpoint(jh);
+		jbd_unlock_bh_state(bh);
+		journal_remove_journal_head(bh);
+		BUFFER_TRACE(bh, "release");
+		__brelse(bh);
+		ret = 1;
+	} else {
+		jbd_unlock_bh_state(bh);
+	}
+	return ret;
+}
+
+/*
+ * __log_wait_for_space: wait until there is space in the journal.
+ *
+ * Called under j-state_lock *only*.  It will be unlocked if we have to wait
+ * for a checkpoint to free up some space in the log.
+ */
+void __log_wait_for_space(journal_t *journal)
+{
+	int nblocks;
+	assert_spin_locked(&journal->j_state_lock);
+
+	nblocks = jbd_space_needed(journal);
+	while (__log_space_left(journal) < nblocks) {
+		if (journal->j_flags & JFS_ABORT)
+			return;
+		spin_unlock(&journal->j_state_lock);
+		down(&journal->j_checkpoint_sem);
+
+		/*
+		 * Test again, another process may have checkpointed while we
+		 * were waiting for the checkpoint lock
+		 */
+		spin_lock(&journal->j_state_lock);
+		nblocks = jbd_space_needed(journal);
+		if (__log_space_left(journal) < nblocks) {
+			spin_unlock(&journal->j_state_lock);
+			log_do_checkpoint(journal);
+			spin_lock(&journal->j_state_lock);
+		}
+		up(&journal->j_checkpoint_sem);
+	}
+}
+
+/*
+ * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
+ * The caller must restart a list walk.  Wait for someone else to run
+ * jbd_unlock_bh_state().
+ */
+static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
+{
+	get_bh(bh);
+	spin_unlock(&journal->j_list_lock);
+	jbd_lock_bh_state(bh);
+	jbd_unlock_bh_state(bh);
+	put_bh(bh);
+}
+
+/*
+ * Clean up a transaction's checkpoint list.  
+ *
+ * We wait for any pending IO to complete and make sure any clean
+ * buffers are removed from the transaction. 
+ *
+ * Return 1 if we performed any actions which might have destroyed the
+ * checkpoint.  (journal_remove_checkpoint() deletes the transaction when
+ * the last checkpoint buffer is cleansed)
+ *
+ * Called with j_list_lock held.
+ */
+static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
+{
+	struct journal_head *jh, *next_jh, *last_jh;
+	struct buffer_head *bh;
+	int ret = 0;
+
+	assert_spin_locked(&journal->j_list_lock);
+	jh = transaction->t_checkpoint_list;
+	if (!jh)
+		return 0;
+
+	last_jh = jh->b_cpprev;
+	next_jh = jh;
+	do {
+		jh = next_jh;
+		bh = jh2bh(jh);
+		if (buffer_locked(bh)) {
+			atomic_inc(&bh->b_count);
+			spin_unlock(&journal->j_list_lock);
+			wait_on_buffer(bh);
+			/* the journal_head may have gone by now */
+			BUFFER_TRACE(bh, "brelse");
+			__brelse(bh);
+			goto out_return_1;
+		}
+
+		/*
+		 * This is foul
+		 */
+		if (!jbd_trylock_bh_state(bh)) {
+			jbd_sync_bh(journal, bh);
+			goto out_return_1;
+		}
+
+		if (jh->b_transaction != NULL) {
+			transaction_t *t = jh->b_transaction;
+			tid_t tid = t->t_tid;
+
+			spin_unlock(&journal->j_list_lock);
+			jbd_unlock_bh_state(bh);
+			log_start_commit(journal, tid);
+			log_wait_commit(journal, tid);
+			goto out_return_1;
+		}
+
+		/*
+		 * AKPM: I think the buffer_jbddirty test is redundant - it
+		 * shouldn't have NULL b_transaction?
+		 */
+		next_jh = jh->b_cpnext;
+		if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
+			BUFFER_TRACE(bh, "remove from checkpoint");
+			__journal_remove_checkpoint(jh);
+			jbd_unlock_bh_state(bh);
+			journal_remove_journal_head(bh);
+			__brelse(bh);
+			ret = 1;
+		} else {
+			jbd_unlock_bh_state(bh);
+		}
+		jh = next_jh;
+	} while (jh != last_jh);
+
+	return ret;
+out_return_1:
+	spin_lock(&journal->j_list_lock);
+	return 1;
+}
+
+#define NR_BATCH	64
+
+static void
+__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
+{
+	int i;
+
+	spin_unlock(&journal->j_list_lock);
+	ll_rw_block(WRITE, *batch_count, bhs);
+	spin_lock(&journal->j_list_lock);
+	for (i = 0; i < *batch_count; i++) {
+		struct buffer_head *bh = bhs[i];
+		clear_buffer_jwrite(bh);
+		BUFFER_TRACE(bh, "brelse");
+		__brelse(bh);
+	}
+	*batch_count = 0;
+}
+
+/*
+ * Try to flush one buffer from the checkpoint list to disk.
+ *
+ * Return 1 if something happened which requires us to abort the current
+ * scan of the checkpoint list.  
+ *
+ * Called with j_list_lock held.
+ * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
+ */
+static int __flush_buffer(journal_t *journal, struct journal_head *jh,
+			struct buffer_head **bhs, int *batch_count,
+			int *drop_count)
+{
+	struct buffer_head *bh = jh2bh(jh);
+	int ret = 0;
+
+	if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) {
+		J_ASSERT_JH(jh, jh->b_transaction == NULL);
+
+		/*
+		 * Important: we are about to write the buffer, and
+		 * possibly block, while still holding the journal lock.
+		 * We cannot afford to let the transaction logic start
+		 * messing around with this buffer before we write it to
+		 * disk, as that would break recoverability.  
+		 */
+		BUFFER_TRACE(bh, "queue");
+		get_bh(bh);
+		J_ASSERT_BH(bh, !buffer_jwrite(bh));
+		set_buffer_jwrite(bh);
+		bhs[*batch_count] = bh;
+		jbd_unlock_bh_state(bh);
+		(*batch_count)++;
+		if (*batch_count == NR_BATCH) {
+			__flush_batch(journal, bhs, batch_count);
+			ret = 1;
+		}
+	} else {
+		int last_buffer = 0;
+		if (jh->b_cpnext == jh) {
+			/* We may be about to drop the transaction.  Tell the
+			 * caller that the lists have changed.
+			 */
+			last_buffer = 1;
+		}
+		if (__try_to_free_cp_buf(jh)) {
+			(*drop_count)++;
+			ret = last_buffer;
+		}
+	}
+	return ret;
+}
+
+/*
+ * Perform an actual checkpoint.  We don't write out only enough to
+ * satisfy the current blocked requests: rather we submit a reasonably
+ * sized chunk of the outstanding data to disk at once for
+ * efficiency.  __log_wait_for_space() will retry if we didn't free enough.
+ * 
+ * However, we _do_ take into account the amount requested so that once
+ * the IO has been queued, we can return as soon as enough of it has
+ * completed to disk.  
+ *
+ * The journal should be locked before calling this function.
+ */
+int log_do_checkpoint(journal_t *journal)
+{
+	int result;
+	int batch_count = 0;
+	struct buffer_head *bhs[NR_BATCH];
+
+	jbd_debug(1, "Start checkpoint\n");
+
+	/* 
+	 * First thing: if there are any transactions in the log which
+	 * don't need checkpointing, just eliminate them from the
+	 * journal straight away.  
+	 */
+	result = cleanup_journal_tail(journal);
+	jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
+	if (result <= 0)
+		return result;
+
+	/*
+	 * OK, we need to start writing disk blocks.  Try to free up a
+	 * quarter of the log in a single checkpoint if we can.
+	 */
+	/*
+	 * AKPM: check this code.  I had a feeling a while back that it
+	 * degenerates into a busy loop at unmount time.
+	 */
+	spin_lock(&journal->j_list_lock);
+	while (journal->j_checkpoint_transactions) {
+		transaction_t *transaction;
+		struct journal_head *jh, *last_jh, *next_jh;
+		int drop_count = 0;
+		int cleanup_ret, retry = 0;
+		tid_t this_tid;
+
+		transaction = journal->j_checkpoint_transactions;
+		this_tid = transaction->t_tid;
+		jh = transaction->t_checkpoint_list;
+		last_jh = jh->b_cpprev;
+		next_jh = jh;
+		do {
+			struct buffer_head *bh;
+
+			jh = next_jh;
+			next_jh = jh->b_cpnext;
+			bh = jh2bh(jh);
+			if (!jbd_trylock_bh_state(bh)) {
+				jbd_sync_bh(journal, bh);
+				spin_lock(&journal->j_list_lock);
+				retry = 1;
+				break;
+			}
+			retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count);
+			if (cond_resched_lock(&journal->j_list_lock)) {
+				retry = 1;
+				break;
+			}
+		} while (jh != last_jh && !retry);
+
+		if (batch_count)
+			__flush_batch(journal, bhs, &batch_count);
+
+		/*
+		 * If someone cleaned up this transaction while we slept, we're
+		 * done
+		 */
+		if (journal->j_checkpoint_transactions != transaction)
+			break;
+		if (retry)
+			continue;
+		/*
+		 * Maybe it's a new transaction, but it fell at the same
+		 * address
+		 */
+		if (transaction->t_tid != this_tid)
+			continue;
+		/*
+		 * We have walked the whole transaction list without
+		 * finding anything to write to disk.  We had better be
+		 * able to make some progress or we are in trouble. 
+		 */
+		cleanup_ret = __cleanup_transaction(journal, transaction);
+		J_ASSERT(drop_count != 0 || cleanup_ret != 0);
+		if (journal->j_checkpoint_transactions != transaction)
+			break;
+	}
+	spin_unlock(&journal->j_list_lock);
+	result = cleanup_journal_tail(journal);
+	if (result < 0)
+		return result;
+
+	return 0;
+}
+
+/*
+ * Check the list of checkpoint transactions for the journal to see if
+ * we have already got rid of any since the last update of the log tail
+ * in the journal superblock.  If so, we can instantly roll the
+ * superblock forward to remove those transactions from the log.
+ * 
+ * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
+ * 
+ * Called with the journal lock held.
+ *
+ * This is the only part of the journaling code which really needs to be
+ * aware of transaction aborts.  Checkpointing involves writing to the
+ * main filesystem area rather than to the journal, so it can proceed
+ * even in abort state, but we must not update the journal superblock if
+ * we have an abort error outstanding.
+ */
+
+int cleanup_journal_tail(journal_t *journal)
+{
+	transaction_t * transaction;
+	tid_t		first_tid;
+	unsigned long	blocknr, freed;
+
+	/* OK, work out the oldest transaction remaining in the log, and
+	 * the log block it starts at. 
+	 * 
+	 * If the log is now empty, we need to work out which is the
+	 * next transaction ID we will write, and where it will
+	 * start. */
+
+	spin_lock(&journal->j_state_lock);
+	spin_lock(&journal->j_list_lock);
+	transaction = journal->j_checkpoint_transactions;
+	if (transaction) {
+		first_tid = transaction->t_tid;
+		blocknr = transaction->t_log_start;
+	} else if ((transaction = journal->j_committing_transaction) != NULL) {
+		first_tid = transaction->t_tid;
+		blocknr = transaction->t_log_start;
+	} else if ((transaction = journal->j_running_transaction) != NULL) {
+		first_tid = transaction->t_tid;
+		blocknr = journal->j_head;
+	} else {
+		first_tid = journal->j_transaction_sequence;
+		blocknr = journal->j_head;
+	}
+	spin_unlock(&journal->j_list_lock);
+	J_ASSERT(blocknr != 0);
+
+	/* If the oldest pinned transaction is at the tail of the log
+           already then there's not much we can do right now. */
+	if (journal->j_tail_sequence == first_tid) {
+		spin_unlock(&journal->j_state_lock);
+		return 1;
+	}
+
+	/* OK, update the superblock to recover the freed space.
+	 * Physical blocks come first: have we wrapped beyond the end of
+	 * the log?  */
+	freed = blocknr - journal->j_tail;
+	if (blocknr < journal->j_tail)
+		freed = freed + journal->j_last - journal->j_first;
+
+	jbd_debug(1,
+		  "Cleaning journal tail from %d to %d (offset %lu), "
+		  "freeing %lu\n",
+		  journal->j_tail_sequence, first_tid, blocknr, freed);
+
+	journal->j_free += freed;
+	journal->j_tail_sequence = first_tid;
+	journal->j_tail = blocknr;
+	spin_unlock(&journal->j_state_lock);
+	if (!(journal->j_flags & JFS_ABORT))
+		journal_update_superblock(journal, 1);
+	return 0;
+}
+
+
+/* Checkpoint list management */
+
+/*
+ * journal_clean_checkpoint_list
+ *
+ * Find all the written-back checkpoint buffers in the journal and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of bufers reaped (for debug)
+ */
+
+int __journal_clean_checkpoint_list(journal_t *journal)
+{
+	transaction_t *transaction, *last_transaction, *next_transaction;
+	int ret = 0;
+
+	transaction = journal->j_checkpoint_transactions;
+	if (transaction == 0)
+		goto out;
+
+	last_transaction = transaction->t_cpprev;
+	next_transaction = transaction;
+	do {
+		struct journal_head *jh;
+
+		transaction = next_transaction;
+		next_transaction = transaction->t_cpnext;
+		jh = transaction->t_checkpoint_list;
+		if (jh) {
+			struct journal_head *last_jh = jh->b_cpprev;
+			struct journal_head *next_jh = jh;
+
+			do {
+				jh = next_jh;
+				next_jh = jh->b_cpnext;
+				/* Use trylock because of the ranknig */
+				if (jbd_trylock_bh_state(jh2bh(jh)))
+					ret += __try_to_free_cp_buf(jh);
+				/*
+				 * This function only frees up some memory
+				 * if possible so we dont have an obligation
+				 * to finish processing. Bail out if preemption
+				 * requested:
+				 */
+				if (need_resched())
+					goto out;
+			} while (jh != last_jh);
+		}
+	} while (transaction != last_transaction);
+out:
+	return ret;
+}
+
+/* 
+ * journal_remove_checkpoint: called after a buffer has been committed
+ * to disk (either by being write-back flushed to disk, or being
+ * committed to the log).
+ *
+ * We cannot safely clean a transaction out of the log until all of the
+ * buffer updates committed in that transaction have safely been stored
+ * elsewhere on disk.  To achieve this, all of the buffers in a
+ * transaction need to be maintained on the transaction's checkpoint
+ * list until they have been rewritten, at which point this function is
+ * called to remove the buffer from the existing transaction's
+ * checkpoint list.  
+ *
+ * This function is called with the journal locked.
+ * This function is called with j_list_lock held.
+ */
+
+void __journal_remove_checkpoint(struct journal_head *jh)
+{
+	transaction_t *transaction;
+	journal_t *journal;
+
+	JBUFFER_TRACE(jh, "entry");
+
+	if ((transaction = jh->b_cp_transaction) == NULL) {
+		JBUFFER_TRACE(jh, "not on transaction");
+		goto out;
+	}
+	journal = transaction->t_journal;
+
+	__buffer_unlink(jh);
+
+	if (transaction->t_checkpoint_list != NULL)
+		goto out;
+	JBUFFER_TRACE(jh, "transaction has no more buffers");
+
+	/*
+	 * There is one special case to worry about: if we have just pulled the
+	 * buffer off a committing transaction's forget list, then even if the
+	 * checkpoint list is empty, the transaction obviously cannot be
+	 * dropped!
+	 *
+	 * The locking here around j_committing_transaction is a bit sleazy.
+	 * See the comment at the end of journal_commit_transaction().
+	 */
+	if (transaction == journal->j_committing_transaction) {
+		JBUFFER_TRACE(jh, "belongs to committing transaction");
+		goto out;
+	}
+
+	/* OK, that was the last buffer for the transaction: we can now
+	   safely remove this transaction from the log */
+
+	__journal_drop_transaction(journal, transaction);
+
+	/* Just in case anybody was waiting for more transactions to be
+           checkpointed... */
+	wake_up(&journal->j_wait_logspace);
+out:
+	JBUFFER_TRACE(jh, "exit");
+}
+
+/*
+ * journal_insert_checkpoint: put a committed buffer onto a checkpoint
+ * list so that we know when it is safe to clean the transaction out of
+ * the log.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ */
+void __journal_insert_checkpoint(struct journal_head *jh, 
+			       transaction_t *transaction)
+{
+	JBUFFER_TRACE(jh, "entry");
+	J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
+	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+
+	jh->b_cp_transaction = transaction;
+
+	if (!transaction->t_checkpoint_list) {
+		jh->b_cpnext = jh->b_cpprev = jh;
+	} else {
+		jh->b_cpnext = transaction->t_checkpoint_list;
+		jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
+		jh->b_cpprev->b_cpnext = jh;
+		jh->b_cpnext->b_cpprev = jh;
+	}
+	transaction->t_checkpoint_list = jh;
+}
+
+/*
+ * We've finished with this transaction structure: adios...
+ * 
+ * The transaction must have no links except for the checkpoint by this
+ * point.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ */
+
+void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
+{
+	assert_spin_locked(&journal->j_list_lock);
+	if (transaction->t_cpnext) {
+		transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
+		transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
+		if (journal->j_checkpoint_transactions == transaction)
+			journal->j_checkpoint_transactions =
+				transaction->t_cpnext;
+		if (journal->j_checkpoint_transactions == transaction)
+			journal->j_checkpoint_transactions = NULL;
+	}
+
+	J_ASSERT(transaction->t_state == T_FINISHED);
+	J_ASSERT(transaction->t_buffers == NULL);
+	J_ASSERT(transaction->t_sync_datalist == NULL);
+	J_ASSERT(transaction->t_forget == NULL);
+	J_ASSERT(transaction->t_iobuf_list == NULL);
+	J_ASSERT(transaction->t_shadow_list == NULL);
+	J_ASSERT(transaction->t_log_list == NULL);
+	J_ASSERT(transaction->t_checkpoint_list == NULL);
+	J_ASSERT(transaction->t_updates == 0);
+	J_ASSERT(journal->j_committing_transaction != transaction);
+	J_ASSERT(journal->j_running_transaction != transaction);
+
+	jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
+	kfree(transaction);
+}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
new file mode 100644
index 0000000..dac720c
--- /dev/null
+++ b/fs/jbd/commit.c
@@ -0,0 +1,844 @@
+/*
+ * linux/fs/commit.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal commit routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+/*
+ * Default IO end handler for temporary BJ_IO buffer_heads.
+ */
+static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
+{
+	BUFFER_TRACE(bh, "");
+	if (uptodate)
+		set_buffer_uptodate(bh);
+	else
+		clear_buffer_uptodate(bh);
+	unlock_buffer(bh);
+}
+
+/*
+ * When an ext3-ordered file is truncated, it is possible that many pages are
+ * not sucessfully freed, because they are attached to a committing transaction.
+ * After the transaction commits, these pages are left on the LRU, with no
+ * ->mapping, and with attached buffers.  These pages are trivially reclaimable
+ * by the VM, but their apparent absence upsets the VM accounting, and it makes
+ * the numbers in /proc/meminfo look odd.
+ *
+ * So here, we have a buffer which has just come off the forget list.  Look to
+ * see if we can strip all buffers from the backing page.
+ *
+ * Called under lock_journal(), and possibly under journal_datalist_lock.  The
+ * caller provided us with a ref against the buffer, and we drop that here.
+ */
+static void release_buffer_page(struct buffer_head *bh)
+{
+	struct page *page;
+
+	if (buffer_dirty(bh))
+		goto nope;
+	if (atomic_read(&bh->b_count) != 1)
+		goto nope;
+	page = bh->b_page;
+	if (!page)
+		goto nope;
+	if (page->mapping)
+		goto nope;
+
+	/* OK, it's a truncated page */
+	if (TestSetPageLocked(page))
+		goto nope;
+
+	page_cache_get(page);
+	__brelse(bh);
+	try_to_free_buffers(page);
+	unlock_page(page);
+	page_cache_release(page);
+	return;
+
+nope:
+	__brelse(bh);
+}
+
+/*
+ * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
+ * held.  For ranking reasons we must trylock.  If we lose, schedule away and
+ * return 0.  j_list_lock is dropped in this case.
+ */
+static int inverted_lock(journal_t *journal, struct buffer_head *bh)
+{
+	if (!jbd_trylock_bh_state(bh)) {
+		spin_unlock(&journal->j_list_lock);
+		schedule();
+		return 0;
+	}
+	return 1;
+}
+
+/* Done it all: now write the commit record.  We should have
+ * cleaned up our previous buffers by now, so if we are in abort
+ * mode we can now just skip the rest of the journal write
+ * entirely.
+ *
+ * Returns 1 if the journal needs to be aborted or 0 on success
+ */
+static int journal_write_commit_record(journal_t *journal,
+					transaction_t *commit_transaction)
+{
+	struct journal_head *descriptor;
+	struct buffer_head *bh;
+	int i, ret;
+	int barrier_done = 0;
+
+	if (is_journal_aborted(journal))
+		return 0;
+
+	descriptor = journal_get_descriptor_buffer(journal);
+	if (!descriptor)
+		return 1;
+
+	bh = jh2bh(descriptor);
+
+	/* AKPM: buglet - add `i' to tmp! */
+	for (i = 0; i < bh->b_size; i += 512) {
+		journal_header_t *tmp = (journal_header_t*)bh->b_data;
+		tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
+		tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
+		tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
+	}
+
+	JBUFFER_TRACE(descriptor, "write commit block");
+	set_buffer_dirty(bh);
+	if (journal->j_flags & JFS_BARRIER) {
+		set_buffer_ordered(bh);
+		barrier_done = 1;
+	}
+	ret = sync_dirty_buffer(bh);
+	/* is it possible for another commit to fail at roughly
+	 * the same time as this one?  If so, we don't want to
+	 * trust the barrier flag in the super, but instead want
+	 * to remember if we sent a barrier request
+	 */
+	if (ret == -EOPNOTSUPP && barrier_done) {
+		char b[BDEVNAME_SIZE];
+
+		printk(KERN_WARNING
+			"JBD: barrier-based sync failed on %s - "
+			"disabling barriers\n",
+			bdevname(journal->j_dev, b));
+		spin_lock(&journal->j_state_lock);
+		journal->j_flags &= ~JFS_BARRIER;
+		spin_unlock(&journal->j_state_lock);
+
+		/* And try again, without the barrier */
+		clear_buffer_ordered(bh);
+		set_buffer_uptodate(bh);
+		set_buffer_dirty(bh);
+		ret = sync_dirty_buffer(bh);
+	}
+	put_bh(bh);		/* One for getblk() */
+	journal_put_journal_head(descriptor);
+
+	return (ret == -EIO);
+}
+
+/*
+ * journal_commit_transaction
+ *
+ * The primary function for committing a transaction to the log.  This
+ * function is called by the journal thread to begin a complete commit.
+ */
+void journal_commit_transaction(journal_t *journal)
+{
+	transaction_t *commit_transaction;
+	struct journal_head *jh, *new_jh, *descriptor;
+	struct buffer_head **wbuf = journal->j_wbuf;
+	int bufs;
+	int flags;
+	int err;
+	unsigned long blocknr;
+	char *tagp = NULL;
+	journal_header_t *header;
+	journal_block_tag_t *tag = NULL;
+	int space_left = 0;
+	int first_tag = 0;
+	int tag_flag;
+	int i;
+
+	/*
+	 * First job: lock down the current transaction and wait for
+	 * all outstanding updates to complete.
+	 */
+
+#ifdef COMMIT_STATS
+	spin_lock(&journal->j_list_lock);
+	summarise_journal_usage(journal);
+	spin_unlock(&journal->j_list_lock);
+#endif
+
+	/* Do we need to erase the effects of a prior journal_flush? */
+	if (journal->j_flags & JFS_FLUSHED) {
+		jbd_debug(3, "super block updated\n");
+		journal_update_superblock(journal, 1);
+	} else {
+		jbd_debug(3, "superblock not updated\n");
+	}
+
+	J_ASSERT(journal->j_running_transaction != NULL);
+	J_ASSERT(journal->j_committing_transaction == NULL);
+
+	commit_transaction = journal->j_running_transaction;
+	J_ASSERT(commit_transaction->t_state == T_RUNNING);
+
+	jbd_debug(1, "JBD: starting commit of transaction %d\n",
+			commit_transaction->t_tid);
+
+	spin_lock(&journal->j_state_lock);
+	commit_transaction->t_state = T_LOCKED;
+
+	spin_lock(&commit_transaction->t_handle_lock);
+	while (commit_transaction->t_updates) {
+		DEFINE_WAIT(wait);
+
+		prepare_to_wait(&journal->j_wait_updates, &wait,
+					TASK_UNINTERRUPTIBLE);
+		if (commit_transaction->t_updates) {
+			spin_unlock(&commit_transaction->t_handle_lock);
+			spin_unlock(&journal->j_state_lock);
+			schedule();
+			spin_lock(&journal->j_state_lock);
+			spin_lock(&commit_transaction->t_handle_lock);
+		}
+		finish_wait(&journal->j_wait_updates, &wait);
+	}
+	spin_unlock(&commit_transaction->t_handle_lock);
+
+	J_ASSERT (commit_transaction->t_outstanding_credits <=
+			journal->j_max_transaction_buffers);
+
+	/*
+	 * First thing we are allowed to do is to discard any remaining
+	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
+	 * that there are no such buffers: if a large filesystem
+	 * operation like a truncate needs to split itself over multiple
+	 * transactions, then it may try to do a journal_restart() while
+	 * there are still BJ_Reserved buffers outstanding.  These must
+	 * be released cleanly from the current transaction.
+	 *
+	 * In this case, the filesystem must still reserve write access
+	 * again before modifying the buffer in the new transaction, but
+	 * we do not require it to remember exactly which old buffers it
+	 * has reserved.  This is consistent with the existing behaviour
+	 * that multiple journal_get_write_access() calls to the same
+	 * buffer are perfectly permissable.
+	 */
+	while (commit_transaction->t_reserved_list) {
+		jh = commit_transaction->t_reserved_list;
+		JBUFFER_TRACE(jh, "reserved, unused: refile");
+		/*
+		 * A journal_get_undo_access()+journal_release_buffer() may
+		 * leave undo-committed data.
+		 */
+		if (jh->b_committed_data) {
+			struct buffer_head *bh = jh2bh(jh);
+
+			jbd_lock_bh_state(bh);
+			if (jh->b_committed_data) {
+				kfree(jh->b_committed_data);
+				jh->b_committed_data = NULL;
+			}
+			jbd_unlock_bh_state(bh);
+		}
+		journal_refile_buffer(journal, jh);
+	}
+
+	/*
+	 * Now try to drop any written-back buffers from the journal's
+	 * checkpoint lists.  We do this *before* commit because it potentially
+	 * frees some memory
+	 */
+	spin_lock(&journal->j_list_lock);
+	__journal_clean_checkpoint_list(journal);
+	spin_unlock(&journal->j_list_lock);
+
+	jbd_debug (3, "JBD: commit phase 1\n");
+
+	/*
+	 * Switch to a new revoke table.
+	 */
+	journal_switch_revoke_table(journal);
+
+	commit_transaction->t_state = T_FLUSH;
+	journal->j_committing_transaction = commit_transaction;
+	journal->j_running_transaction = NULL;
+	commit_transaction->t_log_start = journal->j_head;
+	wake_up(&journal->j_wait_transaction_locked);
+	spin_unlock(&journal->j_state_lock);
+
+	jbd_debug (3, "JBD: commit phase 2\n");
+
+	/*
+	 * First, drop modified flag: all accesses to the buffers
+	 * will be tracked for a new trasaction only -bzzz
+	 */
+	spin_lock(&journal->j_list_lock);
+	if (commit_transaction->t_buffers) {
+		new_jh = jh = commit_transaction->t_buffers->b_tnext;
+		do {
+			J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
+					new_jh->b_modified == 0);
+			new_jh->b_modified = 0;
+			new_jh = new_jh->b_tnext;
+		} while (new_jh != jh);
+	}
+	spin_unlock(&journal->j_list_lock);
+
+	/*
+	 * Now start flushing things to disk, in the order they appear
+	 * on the transaction lists.  Data blocks go first.
+	 */
+
+	err = 0;
+	/*
+	 * Whenever we unlock the journal and sleep, things can get added
+	 * onto ->t_sync_datalist, so we have to keep looping back to
+	 * write_out_data until we *know* that the list is empty.
+	 */
+	bufs = 0;
+	/*
+	 * Cleanup any flushed data buffers from the data list.  Even in
+	 * abort mode, we want to flush this out as soon as possible.
+	 */
+write_out_data:
+	cond_resched();
+	spin_lock(&journal->j_list_lock);
+
+	while (commit_transaction->t_sync_datalist) {
+		struct buffer_head *bh;
+
+		jh = commit_transaction->t_sync_datalist;
+		commit_transaction->t_sync_datalist = jh->b_tnext;
+		bh = jh2bh(jh);
+		if (buffer_locked(bh)) {
+			BUFFER_TRACE(bh, "locked");
+			if (!inverted_lock(journal, bh))
+				goto write_out_data;
+			__journal_temp_unlink_buffer(jh);
+			__journal_file_buffer(jh, commit_transaction,
+						BJ_Locked);
+			jbd_unlock_bh_state(bh);
+			if (lock_need_resched(&journal->j_list_lock)) {
+				spin_unlock(&journal->j_list_lock);
+				goto write_out_data;
+			}
+		} else {
+			if (buffer_dirty(bh)) {
+				BUFFER_TRACE(bh, "start journal writeout");
+				get_bh(bh);
+				wbuf[bufs++] = bh;
+				if (bufs == journal->j_wbufsize) {
+					jbd_debug(2, "submit %d writes\n",
+							bufs);
+					spin_unlock(&journal->j_list_lock);
+					ll_rw_block(WRITE, bufs, wbuf);
+					journal_brelse_array(wbuf, bufs);
+					bufs = 0;
+					goto write_out_data;
+				}
+			} else {
+				BUFFER_TRACE(bh, "writeout complete: unfile");
+				if (!inverted_lock(journal, bh))
+					goto write_out_data;
+				__journal_unfile_buffer(jh);
+				jbd_unlock_bh_state(bh);
+				journal_remove_journal_head(bh);
+				put_bh(bh);
+				if (lock_need_resched(&journal->j_list_lock)) {
+					spin_unlock(&journal->j_list_lock);
+					goto write_out_data;
+				}
+			}
+		}
+	}
+
+	if (bufs) {
+		spin_unlock(&journal->j_list_lock);
+		ll_rw_block(WRITE, bufs, wbuf);
+		journal_brelse_array(wbuf, bufs);
+		spin_lock(&journal->j_list_lock);
+	}
+
+	/*
+	 * Wait for all previously submitted IO to complete.
+	 */
+	while (commit_transaction->t_locked_list) {
+		struct buffer_head *bh;
+
+		jh = commit_transaction->t_locked_list->b_tprev;
+		bh = jh2bh(jh);
+		get_bh(bh);
+		if (buffer_locked(bh)) {
+			spin_unlock(&journal->j_list_lock);
+			wait_on_buffer(bh);
+			if (unlikely(!buffer_uptodate(bh)))
+				err = -EIO;
+			spin_lock(&journal->j_list_lock);
+		}
+		if (!inverted_lock(journal, bh)) {
+			put_bh(bh);
+			spin_lock(&journal->j_list_lock);
+			continue;
+		}
+		if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
+			__journal_unfile_buffer(jh);
+			jbd_unlock_bh_state(bh);
+			journal_remove_journal_head(bh);
+			put_bh(bh);
+		} else {
+			jbd_unlock_bh_state(bh);
+		}
+		put_bh(bh);
+		cond_resched_lock(&journal->j_list_lock);
+	}
+	spin_unlock(&journal->j_list_lock);
+
+	if (err)
+		__journal_abort_hard(journal);
+
+	journal_write_revoke_records(journal, commit_transaction);
+
+	jbd_debug(3, "JBD: commit phase 2\n");
+
+	/*
+	 * If we found any dirty or locked buffers, then we should have
+	 * looped back up to the write_out_data label.  If there weren't
+	 * any then journal_clean_data_list should have wiped the list
+	 * clean by now, so check that it is in fact empty.
+	 */
+	J_ASSERT (commit_transaction->t_sync_datalist == NULL);
+
+	jbd_debug (3, "JBD: commit phase 3\n");
+
+	/*
+	 * Way to go: we have now written out all of the data for a
+	 * transaction!  Now comes the tricky part: we need to write out
+	 * metadata.  Loop over the transaction's entire buffer list:
+	 */
+	commit_transaction->t_state = T_COMMIT;
+
+	descriptor = NULL;
+	bufs = 0;
+	while (commit_transaction->t_buffers) {
+
+		/* Find the next buffer to be journaled... */
+
+		jh = commit_transaction->t_buffers;
+
+		/* If we're in abort mode, we just un-journal the buffer and
+		   release it for background writing. */
+
+		if (is_journal_aborted(journal)) {
+			JBUFFER_TRACE(jh, "journal is aborting: refile");
+			journal_refile_buffer(journal, jh);
+			/* If that was the last one, we need to clean up
+			 * any descriptor buffers which may have been
+			 * already allocated, even if we are now
+			 * aborting. */
+			if (!commit_transaction->t_buffers)
+				goto start_journal_io;
+			continue;
+		}
+
+		/* Make sure we have a descriptor block in which to
+		   record the metadata buffer. */
+
+		if (!descriptor) {
+			struct buffer_head *bh;
+
+			J_ASSERT (bufs == 0);
+
+			jbd_debug(4, "JBD: get descriptor\n");
+
+			descriptor = journal_get_descriptor_buffer(journal);
+			if (!descriptor) {
+				__journal_abort_hard(journal);
+				continue;
+			}
+
+			bh = jh2bh(descriptor);
+			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
+				(unsigned long long)bh->b_blocknr, bh->b_data);
+			header = (journal_header_t *)&bh->b_data[0];
+			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
+			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
+			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
+
+			tagp = &bh->b_data[sizeof(journal_header_t)];
+			space_left = bh->b_size - sizeof(journal_header_t);
+			first_tag = 1;
+			set_buffer_jwrite(bh);
+			set_buffer_dirty(bh);
+			wbuf[bufs++] = bh;
+
+			/* Record it so that we can wait for IO
+                           completion later */
+			BUFFER_TRACE(bh, "ph3: file as descriptor");
+			journal_file_buffer(descriptor, commit_transaction,
+					BJ_LogCtl);
+		}
+
+		/* Where is the buffer to be written? */
+
+		err = journal_next_log_block(journal, &blocknr);
+		/* If the block mapping failed, just abandon the buffer
+		   and repeat this loop: we'll fall into the
+		   refile-on-abort condition above. */
+		if (err) {
+			__journal_abort_hard(journal);
+			continue;
+		}
+
+		/*
+		 * start_this_handle() uses t_outstanding_credits to determine
+		 * the free space in the log, but this counter is changed
+		 * by journal_next_log_block() also.
+		 */
+		commit_transaction->t_outstanding_credits--;
+
+		/* Bump b_count to prevent truncate from stumbling over
+                   the shadowed buffer!  @@@ This can go if we ever get
+                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
+		atomic_inc(&jh2bh(jh)->b_count);
+
+		/* Make a temporary IO buffer with which to write it out
+                   (this will requeue both the metadata buffer and the
+                   temporary IO buffer). new_bh goes on BJ_IO*/
+
+		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
+		/*
+		 * akpm: journal_write_metadata_buffer() sets
+		 * new_bh->b_transaction to commit_transaction.
+		 * We need to clean this up before we release new_bh
+		 * (which is of type BJ_IO)
+		 */
+		JBUFFER_TRACE(jh, "ph3: write metadata");
+		flags = journal_write_metadata_buffer(commit_transaction,
+						      jh, &new_jh, blocknr);
+		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
+		wbuf[bufs++] = jh2bh(new_jh);
+
+		/* Record the new block's tag in the current descriptor
+                   buffer */
+
+		tag_flag = 0;
+		if (flags & 1)
+			tag_flag |= JFS_FLAG_ESCAPE;
+		if (!first_tag)
+			tag_flag |= JFS_FLAG_SAME_UUID;
+
+		tag = (journal_block_tag_t *) tagp;
+		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
+		tag->t_flags = cpu_to_be32(tag_flag);
+		tagp += sizeof(journal_block_tag_t);
+		space_left -= sizeof(journal_block_tag_t);
+
+		if (first_tag) {
+			memcpy (tagp, journal->j_uuid, 16);
+			tagp += 16;
+			space_left -= 16;
+			first_tag = 0;
+		}
+
+		/* If there's no more to do, or if the descriptor is full,
+		   let the IO rip! */
+
+		if (bufs == journal->j_wbufsize ||
+		    commit_transaction->t_buffers == NULL ||
+		    space_left < sizeof(journal_block_tag_t) + 16) {
+
+			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
+
+			/* Write an end-of-descriptor marker before
+                           submitting the IOs.  "tag" still points to
+                           the last tag we set up. */
+
+			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
+
+start_journal_io:
+			for (i = 0; i < bufs; i++) {
+				struct buffer_head *bh = wbuf[i];
+				lock_buffer(bh);
+				clear_buffer_dirty(bh);
+				set_buffer_uptodate(bh);
+				bh->b_end_io = journal_end_buffer_io_sync;
+				submit_bh(WRITE, bh);
+			}
+			cond_resched();
+
+			/* Force a new descriptor to be generated next
+                           time round the loop. */
+			descriptor = NULL;
+			bufs = 0;
+		}
+	}
+
+	/* Lo and behold: we have just managed to send a transaction to
+           the log.  Before we can commit it, wait for the IO so far to
+           complete.  Control buffers being written are on the
+           transaction's t_log_list queue, and metadata buffers are on
+           the t_iobuf_list queue.
+
+	   Wait for the buffers in reverse order.  That way we are
+	   less likely to be woken up until all IOs have completed, and
+	   so we incur less scheduling load.
+	*/
+
+	jbd_debug(3, "JBD: commit phase 4\n");
+
+	/*
+	 * akpm: these are BJ_IO, and j_list_lock is not needed.
+	 * See __journal_try_to_free_buffer.
+	 */
+wait_for_iobuf:
+	while (commit_transaction->t_iobuf_list != NULL) {
+		struct buffer_head *bh;
+
+		jh = commit_transaction->t_iobuf_list->b_tprev;
+		bh = jh2bh(jh);
+		if (buffer_locked(bh)) {
+			wait_on_buffer(bh);
+			goto wait_for_iobuf;
+		}
+		if (cond_resched())
+			goto wait_for_iobuf;
+
+		if (unlikely(!buffer_uptodate(bh)))
+			err = -EIO;
+
+		clear_buffer_jwrite(bh);
+
+		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
+		journal_unfile_buffer(journal, jh);
+
+		/*
+		 * ->t_iobuf_list should contain only dummy buffer_heads
+		 * which were created by journal_write_metadata_buffer().
+		 */
+		BUFFER_TRACE(bh, "dumping temporary bh");
+		journal_put_journal_head(jh);
+		__brelse(bh);
+		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
+		free_buffer_head(bh);
+
+		/* We also have to unlock and free the corresponding
+                   shadowed buffer */
+		jh = commit_transaction->t_shadow_list->b_tprev;
+		bh = jh2bh(jh);
+		clear_bit(BH_JWrite, &bh->b_state);
+		J_ASSERT_BH(bh, buffer_jbddirty(bh));
+
+		/* The metadata is now released for reuse, but we need
+                   to remember it against this transaction so that when
+                   we finally commit, we can do any checkpointing
+                   required. */
+		JBUFFER_TRACE(jh, "file as BJ_Forget");
+		journal_file_buffer(jh, commit_transaction, BJ_Forget);
+		/* Wake up any transactions which were waiting for this
+		   IO to complete */
+		wake_up_bit(&bh->b_state, BH_Unshadow);
+		JBUFFER_TRACE(jh, "brelse shadowed buffer");
+		__brelse(bh);
+	}
+
+	J_ASSERT (commit_transaction->t_shadow_list == NULL);
+
+	jbd_debug(3, "JBD: commit phase 5\n");
+
+	/* Here we wait for the revoke record and descriptor record buffers */
+ wait_for_ctlbuf:
+	while (commit_transaction->t_log_list != NULL) {
+		struct buffer_head *bh;
+
+		jh = commit_transaction->t_log_list->b_tprev;
+		bh = jh2bh(jh);
+		if (buffer_locked(bh)) {
+			wait_on_buffer(bh);
+			goto wait_for_ctlbuf;
+		}
+		if (cond_resched())
+			goto wait_for_ctlbuf;
+
+		if (unlikely(!buffer_uptodate(bh)))
+			err = -EIO;
+
+		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
+		clear_buffer_jwrite(bh);
+		journal_unfile_buffer(journal, jh);
+		journal_put_journal_head(jh);
+		__brelse(bh);		/* One for getblk */
+		/* AKPM: bforget here */
+	}
+
+	jbd_debug(3, "JBD: commit phase 6\n");
+
+	if (journal_write_commit_record(journal, commit_transaction))
+		err = -EIO;
+
+	if (err)
+		__journal_abort_hard(journal);
+
+	/* End of a transaction!  Finally, we can do checkpoint
+           processing: any buffers committed as a result of this
+           transaction can be removed from any checkpoint list it was on
+           before. */
+
+	jbd_debug(3, "JBD: commit phase 7\n");
+
+	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
+	J_ASSERT(commit_transaction->t_buffers == NULL);
+	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
+	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
+	J_ASSERT(commit_transaction->t_shadow_list == NULL);
+	J_ASSERT(commit_transaction->t_log_list == NULL);
+
+restart_loop:
+	while (commit_transaction->t_forget) {
+		transaction_t *cp_transaction;
+		struct buffer_head *bh;
+
+		jh = commit_transaction->t_forget;
+		bh = jh2bh(jh);
+		jbd_lock_bh_state(bh);
+		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
+			jh->b_transaction == journal->j_running_transaction);
+
+		/*
+		 * If there is undo-protected committed data against
+		 * this buffer, then we can remove it now.  If it is a
+		 * buffer needing such protection, the old frozen_data
+		 * field now points to a committed version of the
+		 * buffer, so rotate that field to the new committed
+		 * data.
+		 *
+		 * Otherwise, we can just throw away the frozen data now.
+		 */
+		if (jh->b_committed_data) {
+			kfree(jh->b_committed_data);
+			jh->b_committed_data = NULL;
+			if (jh->b_frozen_data) {
+				jh->b_committed_data = jh->b_frozen_data;
+				jh->b_frozen_data = NULL;
+			}
+		} else if (jh->b_frozen_data) {
+			kfree(jh->b_frozen_data);
+			jh->b_frozen_data = NULL;
+		}
+
+		spin_lock(&journal->j_list_lock);
+		cp_transaction = jh->b_cp_transaction;
+		if (cp_transaction) {
+			JBUFFER_TRACE(jh, "remove from old cp transaction");
+			__journal_remove_checkpoint(jh);
+		}
+
+		/* Only re-checkpoint the buffer_head if it is marked
+		 * dirty.  If the buffer was added to the BJ_Forget list
+		 * by journal_forget, it may no longer be dirty and
+		 * there's no point in keeping a checkpoint record for
+		 * it. */
+
+		/* A buffer which has been freed while still being
+		 * journaled by a previous transaction may end up still
+		 * being dirty here, but we want to avoid writing back
+		 * that buffer in the future now that the last use has
+		 * been committed.  That's not only a performance gain,
+		 * it also stops aliasing problems if the buffer is left
+		 * behind for writeback and gets reallocated for another
+		 * use in a different page. */
+		if (buffer_freed(bh)) {
+			clear_buffer_freed(bh);
+			clear_buffer_jbddirty(bh);
+		}
+
+		if (buffer_jbddirty(bh)) {
+			JBUFFER_TRACE(jh, "add to new checkpointing trans");
+			__journal_insert_checkpoint(jh, commit_transaction);
+			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
+			__journal_refile_buffer(jh);
+			jbd_unlock_bh_state(bh);
+		} else {
+			J_ASSERT_BH(bh, !buffer_dirty(bh));
+			J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+			__journal_unfile_buffer(jh);
+			jbd_unlock_bh_state(bh);
+			journal_remove_journal_head(bh);  /* needs a brelse */
+			release_buffer_page(bh);
+		}
+		spin_unlock(&journal->j_list_lock);
+		if (cond_resched())
+			goto restart_loop;
+	}
+
+	/* Done with this transaction! */
+
+	jbd_debug(3, "JBD: commit phase 8\n");
+
+	J_ASSERT(commit_transaction->t_state == T_COMMIT);
+
+	/*
+	 * This is a bit sleazy.  We borrow j_list_lock to protect
+	 * journal->j_committing_transaction in __journal_remove_checkpoint.
+	 * Really, __jornal_remove_checkpoint should be using j_state_lock but
+	 * it's a bit hassle to hold that across __journal_remove_checkpoint
+	 */
+	spin_lock(&journal->j_state_lock);
+	spin_lock(&journal->j_list_lock);
+	commit_transaction->t_state = T_FINISHED;
+	J_ASSERT(commit_transaction == journal->j_committing_transaction);
+	journal->j_commit_sequence = commit_transaction->t_tid;
+	journal->j_committing_transaction = NULL;
+	spin_unlock(&journal->j_state_lock);
+
+	if (commit_transaction->t_checkpoint_list == NULL) {
+		__journal_drop_transaction(journal, commit_transaction);
+	} else {
+		if (journal->j_checkpoint_transactions == NULL) {
+			journal->j_checkpoint_transactions = commit_transaction;
+			commit_transaction->t_cpnext = commit_transaction;
+			commit_transaction->t_cpprev = commit_transaction;
+		} else {
+			commit_transaction->t_cpnext =
+				journal->j_checkpoint_transactions;
+			commit_transaction->t_cpprev =
+				commit_transaction->t_cpnext->t_cpprev;
+			commit_transaction->t_cpnext->t_cpprev =
+				commit_transaction;
+			commit_transaction->t_cpprev->t_cpnext =
+				commit_transaction;
+		}
+	}
+	spin_unlock(&journal->j_list_lock);
+
+	jbd_debug(1, "JBD: commit %d complete, head %d\n",
+		  journal->j_commit_sequence, journal->j_tail_sequence);
+
+	wake_up(&journal->j_wait_done_commit);
+}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
new file mode 100644
index 0000000..1e6f2e2
--- /dev/null
+++ b/fs/jbd/journal.c
@@ -0,0 +1,2003 @@
+/*
+ * linux/fs/journal.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Generic filesystem journal-writing code; part of the ext2fs
+ * journaling system.
+ *
+ * This file manages journals: areas of disk reserved for logging
+ * transactional updates.  This includes the kernel journaling thread
+ * which is responsible for scheduling updates to the log.
+ *
+ * We do not actually manage the physical storage of the journal in this
+ * file: that is left to a per-journal policy function, which allows us
+ * to store the journal within a filesystem-specified area for ext2
+ * journaling (ext2 can use a reserved inode for storing the log).
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <linux/pagemap.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <linux/proc_fs.h>
+
+EXPORT_SYMBOL(journal_start);
+EXPORT_SYMBOL(journal_restart);
+EXPORT_SYMBOL(journal_extend);
+EXPORT_SYMBOL(journal_stop);
+EXPORT_SYMBOL(journal_lock_updates);
+EXPORT_SYMBOL(journal_unlock_updates);
+EXPORT_SYMBOL(journal_get_write_access);
+EXPORT_SYMBOL(journal_get_create_access);
+EXPORT_SYMBOL(journal_get_undo_access);
+EXPORT_SYMBOL(journal_dirty_data);
+EXPORT_SYMBOL(journal_dirty_metadata);
+EXPORT_SYMBOL(journal_release_buffer);
+EXPORT_SYMBOL(journal_forget);
+#if 0
+EXPORT_SYMBOL(journal_sync_buffer);
+#endif
+EXPORT_SYMBOL(journal_flush);
+EXPORT_SYMBOL(journal_revoke);
+
+EXPORT_SYMBOL(journal_init_dev);
+EXPORT_SYMBOL(journal_init_inode);
+EXPORT_SYMBOL(journal_update_format);
+EXPORT_SYMBOL(journal_check_used_features);
+EXPORT_SYMBOL(journal_check_available_features);
+EXPORT_SYMBOL(journal_set_features);
+EXPORT_SYMBOL(journal_create);
+EXPORT_SYMBOL(journal_load);
+EXPORT_SYMBOL(journal_destroy);
+EXPORT_SYMBOL(journal_recover);
+EXPORT_SYMBOL(journal_update_superblock);
+EXPORT_SYMBOL(journal_abort);
+EXPORT_SYMBOL(journal_errno);
+EXPORT_SYMBOL(journal_ack_err);
+EXPORT_SYMBOL(journal_clear_err);
+EXPORT_SYMBOL(log_wait_commit);
+EXPORT_SYMBOL(journal_start_commit);
+EXPORT_SYMBOL(journal_force_commit_nested);
+EXPORT_SYMBOL(journal_wipe);
+EXPORT_SYMBOL(journal_blocks_per_page);
+EXPORT_SYMBOL(journal_invalidatepage);
+EXPORT_SYMBOL(journal_try_to_free_buffers);
+EXPORT_SYMBOL(journal_force_commit);
+
+static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
+
+/*
+ * Helper function used to manage commit timeouts
+ */
+
+static void commit_timeout(unsigned long __data)
+{
+	struct task_struct * p = (struct task_struct *) __data;
+
+	wake_up_process(p);
+}
+
+/* Static check for data structure consistency.  There's no code
+ * invoked --- we'll just get a linker failure if things aren't right.
+ */
+void __journal_internal_check(void)
+{
+	extern void journal_bad_superblock_size(void);
+	if (sizeof(struct journal_superblock_s) != 1024)
+		journal_bad_superblock_size();
+}
+
+/*
+ * kjournald: The main thread function used to manage a logging device
+ * journal.
+ *
+ * This kernel thread is responsible for two things:
+ *
+ * 1) COMMIT:  Every so often we need to commit the current state of the
+ *    filesystem to disk.  The journal thread is responsible for writing
+ *    all of the metadata buffers to disk.
+ *
+ * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
+ *    of the data in that part of the log has been rewritten elsewhere on
+ *    the disk.  Flushing these old buffers to reclaim space in the log is
+ *    known as checkpointing, and this thread is responsible for that job.
+ */
+
+journal_t *current_journal;		// AKPM: debug
+
+int kjournald(void *arg)
+{
+	journal_t *journal = (journal_t *) arg;
+	transaction_t *transaction;
+	struct timer_list timer;
+
+	current_journal = journal;
+
+	daemonize("kjournald");
+
+	/* Set up an interval timer which can be used to trigger a
+           commit wakeup after the commit interval expires */
+	init_timer(&timer);
+	timer.data = (unsigned long) current;
+	timer.function = commit_timeout;
+	journal->j_commit_timer = &timer;
+
+	/* Record that the journal thread is running */
+	journal->j_task = current;
+	wake_up(&journal->j_wait_done_commit);
+
+	printk(KERN_INFO "kjournald starting.  Commit interval %ld seconds\n",
+			journal->j_commit_interval / HZ);
+
+	/*
+	 * And now, wait forever for commit wakeup events.
+	 */
+	spin_lock(&journal->j_state_lock);
+
+loop:
+	if (journal->j_flags & JFS_UNMOUNT)
+		goto end_loop;
+
+	jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
+		journal->j_commit_sequence, journal->j_commit_request);
+
+	if (journal->j_commit_sequence != journal->j_commit_request) {
+		jbd_debug(1, "OK, requests differ\n");
+		spin_unlock(&journal->j_state_lock);
+		del_timer_sync(journal->j_commit_timer);
+		journal_commit_transaction(journal);
+		spin_lock(&journal->j_state_lock);
+		goto loop;
+	}
+
+	wake_up(&journal->j_wait_done_commit);
+	if (current->flags & PF_FREEZE) {
+		/*
+		 * The simpler the better. Flushing journal isn't a
+		 * good idea, because that depends on threads that may
+		 * be already stopped.
+		 */
+		jbd_debug(1, "Now suspending kjournald\n");
+		spin_unlock(&journal->j_state_lock);
+		refrigerator(PF_FREEZE);
+		spin_lock(&journal->j_state_lock);
+	} else {
+		/*
+		 * We assume on resume that commits are already there,
+		 * so we don't sleep
+		 */
+		DEFINE_WAIT(wait);
+		int should_sleep = 1;
+
+		prepare_to_wait(&journal->j_wait_commit, &wait,
+				TASK_INTERRUPTIBLE);
+		if (journal->j_commit_sequence != journal->j_commit_request)
+			should_sleep = 0;
+		transaction = journal->j_running_transaction;
+		if (transaction && time_after_eq(jiffies,
+						transaction->t_expires))
+			should_sleep = 0;
+		if (should_sleep) {
+			spin_unlock(&journal->j_state_lock);
+			schedule();
+			spin_lock(&journal->j_state_lock);
+		}
+		finish_wait(&journal->j_wait_commit, &wait);
+	}
+
+	jbd_debug(1, "kjournald wakes\n");
+
+	/*
+	 * Were we woken up by a commit wakeup event?
+	 */
+	transaction = journal->j_running_transaction;
+	if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
+		journal->j_commit_request = transaction->t_tid;
+		jbd_debug(1, "woke because of timeout\n");
+	}
+	goto loop;
+
+end_loop:
+	spin_unlock(&journal->j_state_lock);
+	del_timer_sync(journal->j_commit_timer);
+	journal->j_task = NULL;
+	wake_up(&journal->j_wait_done_commit);
+	jbd_debug(1, "Journal thread exiting.\n");
+	return 0;
+}
+
+static void journal_start_thread(journal_t *journal)
+{
+	kernel_thread(kjournald, journal, CLONE_VM|CLONE_FS|CLONE_FILES);
+	wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+}
+
+static void journal_kill_thread(journal_t *journal)
+{
+	spin_lock(&journal->j_state_lock);
+	journal->j_flags |= JFS_UNMOUNT;
+
+	while (journal->j_task) {
+		wake_up(&journal->j_wait_commit);
+		spin_unlock(&journal->j_state_lock);
+		wait_event(journal->j_wait_done_commit, journal->j_task == 0);
+		spin_lock(&journal->j_state_lock);
+	}
+	spin_unlock(&journal->j_state_lock);
+}
+
+/*
+ * journal_write_metadata_buffer: write a metadata buffer to the journal.
+ *
+ * Writes a metadata buffer to a given disk block.  The actual IO is not
+ * performed but a new buffer_head is constructed which labels the data
+ * to be written with the correct destination disk block.
+ *
+ * Any magic-number escaping which needs to be done will cause a
+ * copy-out here.  If the buffer happens to start with the
+ * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
+ * magic number is only written to the log for descripter blocks.  In
+ * this case, we copy the data and replace the first word with 0, and we
+ * return a result code which indicates that this buffer needs to be
+ * marked as an escaped buffer in the corresponding log descriptor
+ * block.  The missing word can then be restored when the block is read
+ * during recovery.
+ *
+ * If the source buffer has already been modified by a new transaction
+ * since we took the last commit snapshot, we use the frozen copy of
+ * that data for IO.  If we end up using the existing buffer_head's data
+ * for the write, then we *have* to lock the buffer to prevent anyone
+ * else from using and possibly modifying it while the IO is in
+ * progress.
+ *
+ * The function returns a pointer to the buffer_heads to be used for IO.
+ *
+ * We assume that the journal has already been locked in this function.
+ *
+ * Return value:
+ *  <0: Error
+ * >=0: Finished OK
+ *
+ * On success:
+ * Bit 0 set == escape performed on the data
+ * Bit 1 set == buffer copy-out performed (kfree the data after IO)
+ */
+
+int journal_write_metadata_buffer(transaction_t *transaction,
+				  struct journal_head  *jh_in,
+				  struct journal_head **jh_out,
+				  int blocknr)
+{
+	int need_copy_out = 0;
+	int done_copy_out = 0;
+	int do_escape = 0;
+	char *mapped_data;
+	struct buffer_head *new_bh;
+	struct journal_head *new_jh;
+	struct page *new_page;
+	unsigned int new_offset;
+	struct buffer_head *bh_in = jh2bh(jh_in);
+
+	/*
+	 * The buffer really shouldn't be locked: only the current committing
+	 * transaction is allowed to write it, so nobody else is allowed
+	 * to do any IO.
+	 *
+	 * akpm: except if we're journalling data, and write() output is
+	 * also part of a shared mapping, and another thread has
+	 * decided to launch a writepage() against this buffer.
+	 */
+	J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
+
+	new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+
+	/*
+	 * If a new transaction has already done a buffer copy-out, then
+	 * we use that version of the data for the commit.
+	 */
+	jbd_lock_bh_state(bh_in);
+repeat:
+	if (jh_in->b_frozen_data) {
+		done_copy_out = 1;
+		new_page = virt_to_page(jh_in->b_frozen_data);
+		new_offset = offset_in_page(jh_in->b_frozen_data);
+	} else {
+		new_page = jh2bh(jh_in)->b_page;
+		new_offset = offset_in_page(jh2bh(jh_in)->b_data);
+	}
+
+	mapped_data = kmap_atomic(new_page, KM_USER0);
+	/*
+	 * Check for escaping
+	 */
+	if (*((__be32 *)(mapped_data + new_offset)) ==
+				cpu_to_be32(JFS_MAGIC_NUMBER)) {
+		need_copy_out = 1;
+		do_escape = 1;
+	}
+	kunmap_atomic(mapped_data, KM_USER0);
+
+	/*
+	 * Do we need to do a data copy?
+	 */
+	if (need_copy_out && !done_copy_out) {
+		char *tmp;
+
+		jbd_unlock_bh_state(bh_in);
+		tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
+		jbd_lock_bh_state(bh_in);
+		if (jh_in->b_frozen_data) {
+			kfree(tmp);
+			goto repeat;
+		}
+
+		jh_in->b_frozen_data = tmp;
+		mapped_data = kmap_atomic(new_page, KM_USER0);
+		memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
+		kunmap_atomic(mapped_data, KM_USER0);
+
+		new_page = virt_to_page(tmp);
+		new_offset = offset_in_page(tmp);
+		done_copy_out = 1;
+	}
+
+	/*
+	 * Did we need to do an escaping?  Now we've done all the
+	 * copying, we can finally do so.
+	 */
+	if (do_escape) {
+		mapped_data = kmap_atomic(new_page, KM_USER0);
+		*((unsigned int *)(mapped_data + new_offset)) = 0;
+		kunmap_atomic(mapped_data, KM_USER0);
+	}
+
+	/* keep subsequent assertions sane */
+	new_bh->b_state = 0;
+	init_buffer(new_bh, NULL, NULL);
+	atomic_set(&new_bh->b_count, 1);
+	jbd_unlock_bh_state(bh_in);
+
+	new_jh = journal_add_journal_head(new_bh);	/* This sleeps */
+
+	set_bh_page(new_bh, new_page, new_offset);
+	new_jh->b_transaction = NULL;
+	new_bh->b_size = jh2bh(jh_in)->b_size;
+	new_bh->b_bdev = transaction->t_journal->j_dev;
+	new_bh->b_blocknr = blocknr;
+	set_buffer_mapped(new_bh);
+	set_buffer_dirty(new_bh);
+
+	*jh_out = new_jh;
+
+	/*
+	 * The to-be-written buffer needs to get moved to the io queue,
+	 * and the original buffer whose contents we are shadowing or
+	 * copying is moved to the transaction's shadow queue.
+	 */
+	JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
+	journal_file_buffer(jh_in, transaction, BJ_Shadow);
+	JBUFFER_TRACE(new_jh, "file as BJ_IO");
+	journal_file_buffer(new_jh, transaction, BJ_IO);
+
+	return do_escape | (done_copy_out << 1);
+}
+
+/*
+ * Allocation code for the journal file.  Manage the space left in the
+ * journal, so that we can begin checkpointing when appropriate.
+ */
+
+/*
+ * __log_space_left: Return the number of free blocks left in the journal.
+ *
+ * Called with the journal already locked.
+ *
+ * Called under j_state_lock
+ */
+
+int __log_space_left(journal_t *journal)
+{
+	int left = journal->j_free;
+
+	assert_spin_locked(&journal->j_state_lock);
+
+	/*
+	 * Be pessimistic here about the number of those free blocks which
+	 * might be required for log descriptor control blocks.
+	 */
+
+#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
+
+	left -= MIN_LOG_RESERVED_BLOCKS;
+
+	if (left <= 0)
+		return 0;
+	left -= (left >> 3);
+	return left;
+}
+
+/*
+ * Called under j_state_lock.  Returns true if a transaction was started.
+ */
+int __log_start_commit(journal_t *journal, tid_t target)
+{
+	/*
+	 * Are we already doing a recent enough commit?
+	 */
+	if (!tid_geq(journal->j_commit_request, target)) {
+		/*
+		 * We want a new commit: OK, mark the request and wakup the
+		 * commit thread.  We do _not_ do the commit ourselves.
+		 */
+
+		journal->j_commit_request = target;
+		jbd_debug(1, "JBD: requesting commit %d/%d\n",
+			  journal->j_commit_request,
+			  journal->j_commit_sequence);
+		wake_up(&journal->j_wait_commit);
+		return 1;
+	}
+	return 0;
+}
+
+int log_start_commit(journal_t *journal, tid_t tid)
+{
+	int ret;
+
+	spin_lock(&journal->j_state_lock);
+	ret = __log_start_commit(journal, tid);
+	spin_unlock(&journal->j_state_lock);
+	return ret;
+}
+
+/*
+ * Force and wait upon a commit if the calling process is not within
+ * transaction.  This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
+ *
+ * We can only force the running transaction if we don't have an active handle;
+ * otherwise, we will deadlock.
+ *
+ * Returns true if a transaction was started.
+ */
+int journal_force_commit_nested(journal_t *journal)
+{
+	transaction_t *transaction = NULL;
+	tid_t tid;
+
+	spin_lock(&journal->j_state_lock);
+	if (journal->j_running_transaction && !current->journal_info) {
+		transaction = journal->j_running_transaction;
+		__log_start_commit(journal, transaction->t_tid);
+	} else if (journal->j_committing_transaction)
+		transaction = journal->j_committing_transaction;
+
+	if (!transaction) {
+		spin_unlock(&journal->j_state_lock);
+		return 0;	/* Nothing to retry */
+	}
+
+	tid = transaction->t_tid;
+	spin_unlock(&journal->j_state_lock);
+	log_wait_commit(journal, tid);
+	return 1;
+}
+
+/*
+ * Start a commit of the current running transaction (if any).  Returns true
+ * if a transaction was started, and fills its tid in at *ptid
+ */
+int journal_start_commit(journal_t *journal, tid_t *ptid)
+{
+	int ret = 0;
+
+	spin_lock(&journal->j_state_lock);
+	if (journal->j_running_transaction) {
+		tid_t tid = journal->j_running_transaction->t_tid;
+
+		ret = __log_start_commit(journal, tid);
+		if (ret && ptid)
+			*ptid = tid;
+	} else if (journal->j_committing_transaction && ptid) {
+		/*
+		 * If ext3_write_super() recently started a commit, then we
+		 * have to wait for completion of that transaction
+		 */
+		*ptid = journal->j_committing_transaction->t_tid;
+		ret = 1;
+	}
+	spin_unlock(&journal->j_state_lock);
+	return ret;
+}
+
+/*
+ * Wait for a specified commit to complete.
+ * The caller may not hold the journal lock.
+ */
+int log_wait_commit(journal_t *journal, tid_t tid)
+{
+	int err = 0;
+
+#ifdef CONFIG_JBD_DEBUG
+	spin_lock(&journal->j_state_lock);
+	if (!tid_geq(journal->j_commit_request, tid)) {
+		printk(KERN_EMERG
+		       "%s: error: j_commit_request=%d, tid=%d\n",
+		       __FUNCTION__, journal->j_commit_request, tid);
+	}
+	spin_unlock(&journal->j_state_lock);
+#endif
+	spin_lock(&journal->j_state_lock);
+	while (tid_gt(tid, journal->j_commit_sequence)) {
+		jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
+				  tid, journal->j_commit_sequence);
+		wake_up(&journal->j_wait_commit);
+		spin_unlock(&journal->j_state_lock);
+		wait_event(journal->j_wait_done_commit,
+				!tid_gt(tid, journal->j_commit_sequence));
+		spin_lock(&journal->j_state_lock);
+	}
+	spin_unlock(&journal->j_state_lock);
+
+	if (unlikely(is_journal_aborted(journal))) {
+		printk(KERN_EMERG "journal commit I/O error\n");
+		err = -EIO;
+	}
+	return err;
+}
+
+/*
+ * Log buffer allocation routines:
+ */
+
+int journal_next_log_block(journal_t *journal, unsigned long *retp)
+{
+	unsigned long blocknr;
+
+	spin_lock(&journal->j_state_lock);
+	J_ASSERT(journal->j_free > 1);
+
+	blocknr = journal->j_head;
+	journal->j_head++;
+	journal->j_free--;
+	if (journal->j_head == journal->j_last)
+		journal->j_head = journal->j_first;
+	spin_unlock(&journal->j_state_lock);
+	return journal_bmap(journal, blocknr, retp);
+}
+
+/*
+ * Conversion of logical to physical block numbers for the journal
+ *
+ * On external journals the journal blocks are identity-mapped, so
+ * this is a no-op.  If needed, we can use j_blk_offset - everything is
+ * ready.
+ */
+int journal_bmap(journal_t *journal, unsigned long blocknr, 
+		 unsigned long *retp)
+{
+	int err = 0;
+	unsigned long ret;
+
+	if (journal->j_inode) {
+		ret = bmap(journal->j_inode, blocknr);
+		if (ret)
+			*retp = ret;
+		else {
+			char b[BDEVNAME_SIZE];
+
+			printk(KERN_ALERT "%s: journal block not found "
+					"at offset %lu on %s\n",
+				__FUNCTION__,
+				blocknr,
+				bdevname(journal->j_dev, b));
+			err = -EIO;
+			__journal_abort_soft(journal, err);
+		}
+	} else {
+		*retp = blocknr; /* +journal->j_blk_offset */
+	}
+	return err;
+}
+
+/*
+ * We play buffer_head aliasing tricks to write data/metadata blocks to
+ * the journal without copying their contents, but for journal
+ * descriptor blocks we do need to generate bona fide buffers.
+ *
+ * After the caller of journal_get_descriptor_buffer() has finished modifying
+ * the buffer's contents they really should run flush_dcache_page(bh->b_page).
+ * But we don't bother doing that, so there will be coherency problems with
+ * mmaps of blockdevs which hold live JBD-controlled filesystems.
+ */
+struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
+{
+	struct buffer_head *bh;
+	unsigned long blocknr;
+	int err;
+
+	err = journal_next_log_block(journal, &blocknr);
+
+	if (err)
+		return NULL;
+
+	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+	lock_buffer(bh);
+	memset(bh->b_data, 0, journal->j_blocksize);
+	set_buffer_uptodate(bh);
+	unlock_buffer(bh);
+	BUFFER_TRACE(bh, "return this buffer");
+	return journal_add_journal_head(bh);
+}
+
+/*
+ * Management for journal control blocks: functions to create and
+ * destroy journal_t structures, and to initialise and read existing
+ * journal blocks from disk.  */
+
+/* First: create and setup a journal_t object in memory.  We initialise
+ * very few fields yet: that has to wait until we have created the
+ * journal structures from from scratch, or loaded them from disk. */
+
+static journal_t * journal_init_common (void)
+{
+	journal_t *journal;
+	int err;
+
+	journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL);
+	if (!journal)
+		goto fail;
+	memset(journal, 0, sizeof(*journal));
+
+	init_waitqueue_head(&journal->j_wait_transaction_locked);
+	init_waitqueue_head(&journal->j_wait_logspace);
+	init_waitqueue_head(&journal->j_wait_done_commit);
+	init_waitqueue_head(&journal->j_wait_checkpoint);
+	init_waitqueue_head(&journal->j_wait_commit);
+	init_waitqueue_head(&journal->j_wait_updates);
+	init_MUTEX(&journal->j_barrier);
+	init_MUTEX(&journal->j_checkpoint_sem);
+	spin_lock_init(&journal->j_revoke_lock);
+	spin_lock_init(&journal->j_list_lock);
+	spin_lock_init(&journal->j_state_lock);
+
+	journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
+
+	/* The journal is marked for error until we succeed with recovery! */
+	journal->j_flags = JFS_ABORT;
+
+	/* Set up a default-sized revoke table for the new mount. */
+	err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
+	if (err) {
+		kfree(journal);
+		goto fail;
+	}
+	return journal;
+fail:
+	return NULL;
+}
+
+/* journal_init_dev and journal_init_inode:
+ *
+ * Create a journal structure assigned some fixed set of disk blocks to
+ * the journal.  We don't actually touch those disk blocks yet, but we
+ * need to set up all of the mapping information to tell the journaling
+ * system where the journal blocks are.
+ *
+ */
+
+/**
+ *  journal_t * journal_init_dev() - creates an initialises a journal structure
+ *  @bdev: Block device on which to create the journal
+ *  @fs_dev: Device which hold journalled filesystem for this journal.
+ *  @start: Block nr Start of journal.
+ *  @len:  Lenght of the journal in blocks.
+ *  @blocksize: blocksize of journalling device
+ *  @returns: a newly created journal_t *
+ *  
+ *  journal_init_dev creates a journal which maps a fixed contiguous
+ *  range of blocks on an arbitrary block device.
+ * 
+ */
+journal_t * journal_init_dev(struct block_device *bdev,
+			struct block_device *fs_dev,
+			int start, int len, int blocksize)
+{
+	journal_t *journal = journal_init_common();
+	struct buffer_head *bh;
+	int n;
+
+	if (!journal)
+		return NULL;
+
+	journal->j_dev = bdev;
+	journal->j_fs_dev = fs_dev;
+	journal->j_blk_offset = start;
+	journal->j_maxlen = len;
+	journal->j_blocksize = blocksize;
+
+	bh = __getblk(journal->j_dev, start, journal->j_blocksize);
+	J_ASSERT(bh != NULL);
+	journal->j_sb_buffer = bh;
+	journal->j_superblock = (journal_superblock_t *)bh->b_data;
+
+	/* journal descriptor can store up to n blocks -bzzz */
+	n = journal->j_blocksize / sizeof(journal_block_tag_t);
+	journal->j_wbufsize = n;
+	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
+	if (!journal->j_wbuf) {
+		printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+			__FUNCTION__);
+		kfree(journal);
+		journal = NULL;
+	}
+
+	return journal;
+}
+ 
+/** 
+ *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
+ *  @inode: An inode to create the journal in
+ *  
+ * journal_init_inode creates a journal which maps an on-disk inode as
+ * the journal.  The inode must exist already, must support bmap() and
+ * must have all data blocks preallocated.
+ */
+journal_t * journal_init_inode (struct inode *inode)
+{
+	struct buffer_head *bh;
+	journal_t *journal = journal_init_common();
+	int err;
+	int n;
+	unsigned long blocknr;
+
+	if (!journal)
+		return NULL;
+
+	journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
+	journal->j_inode = inode;
+	jbd_debug(1,
+		  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
+		  journal, inode->i_sb->s_id, inode->i_ino, 
+		  (long long) inode->i_size,
+		  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
+
+	journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
+	journal->j_blocksize = inode->i_sb->s_blocksize;
+
+	/* journal descriptor can store up to n blocks -bzzz */
+	n = journal->j_blocksize / sizeof(journal_block_tag_t);
+	journal->j_wbufsize = n;
+	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
+	if (!journal->j_wbuf) {
+		printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+			__FUNCTION__);
+		kfree(journal);
+		return NULL;
+	}
+
+	err = journal_bmap(journal, 0, &blocknr);
+	/* If that failed, give up */
+	if (err) {
+		printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+		       __FUNCTION__);
+		kfree(journal);
+		return NULL;
+	}
+
+	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+	J_ASSERT(bh != NULL);
+	journal->j_sb_buffer = bh;
+	journal->j_superblock = (journal_superblock_t *)bh->b_data;
+
+	return journal;
+}
+
+/* 
+ * If the journal init or create aborts, we need to mark the journal
+ * superblock as being NULL to prevent the journal destroy from writing
+ * back a bogus superblock. 
+ */
+static void journal_fail_superblock (journal_t *journal)
+{
+	struct buffer_head *bh = journal->j_sb_buffer;
+	brelse(bh);
+	journal->j_sb_buffer = NULL;
+}
+
+/*
+ * Given a journal_t structure, initialise the various fields for
+ * startup of a new journaling session.  We use this both when creating
+ * a journal, and after recovering an old journal to reset it for
+ * subsequent use.
+ */
+
+static int journal_reset(journal_t *journal)
+{
+	journal_superblock_t *sb = journal->j_superblock;
+	unsigned int first, last;
+
+	first = be32_to_cpu(sb->s_first);
+	last = be32_to_cpu(sb->s_maxlen);
+
+	journal->j_first = first;
+	journal->j_last = last;
+
+	journal->j_head = first;
+	journal->j_tail = first;
+	journal->j_free = last - first;
+
+	journal->j_tail_sequence = journal->j_transaction_sequence;
+	journal->j_commit_sequence = journal->j_transaction_sequence - 1;
+	journal->j_commit_request = journal->j_commit_sequence;
+
+	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
+
+	/* Add the dynamic fields and write it to disk. */
+	journal_update_superblock(journal, 1);
+	journal_start_thread(journal);
+	return 0;
+}
+
+/** 
+ * int journal_create() - Initialise the new journal file
+ * @journal: Journal to create. This structure must have been initialised
+ * 
+ * Given a journal_t structure which tells us which disk blocks we can
+ * use, create a new journal superblock and initialise all of the
+ * journal fields from scratch.  
+ **/
+int journal_create(journal_t *journal)
+{
+	unsigned long blocknr;
+	struct buffer_head *bh;
+	journal_superblock_t *sb;
+	int i, err;
+
+	if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
+		printk (KERN_ERR "Journal length (%d blocks) too short.\n",
+			journal->j_maxlen);
+		journal_fail_superblock(journal);
+		return -EINVAL;
+	}
+
+	if (journal->j_inode == NULL) {
+		/*
+		 * We don't know what block to start at!
+		 */
+		printk(KERN_EMERG
+		       "%s: creation of journal on external device!\n",
+		       __FUNCTION__);
+		BUG();
+	}
+
+	/* Zero out the entire journal on disk.  We cannot afford to
+	   have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
+	jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
+	for (i = 0; i < journal->j_maxlen; i++) {
+		err = journal_bmap(journal, i, &blocknr);
+		if (err)
+			return err;
+		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+		lock_buffer(bh);
+		memset (bh->b_data, 0, journal->j_blocksize);
+		BUFFER_TRACE(bh, "marking dirty");
+		mark_buffer_dirty(bh);
+		BUFFER_TRACE(bh, "marking uptodate");
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		__brelse(bh);
+	}
+
+	sync_blockdev(journal->j_dev);
+	jbd_debug(1, "JBD: journal cleared.\n");
+
+	/* OK, fill in the initial static fields in the new superblock */
+	sb = journal->j_superblock;
+
+	sb->s_header.h_magic	 = cpu_to_be32(JFS_MAGIC_NUMBER);
+	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+
+	sb->s_blocksize	= cpu_to_be32(journal->j_blocksize);
+	sb->s_maxlen	= cpu_to_be32(journal->j_maxlen);
+	sb->s_first	= cpu_to_be32(1);
+
+	journal->j_transaction_sequence = 1;
+
+	journal->j_flags &= ~JFS_ABORT;
+	journal->j_format_version = 2;
+
+	return journal_reset(journal);
+}
+
+/** 
+ * void journal_update_superblock() - Update journal sb on disk.
+ * @journal: The journal to update.
+ * @wait: Set to '0' if you don't want to wait for IO completion.
+ *
+ * Update a journal's dynamic superblock fields and write it to disk,
+ * optionally waiting for the IO to complete.
+ */
+void journal_update_superblock(journal_t *journal, int wait)
+{
+	journal_superblock_t *sb = journal->j_superblock;
+	struct buffer_head *bh = journal->j_sb_buffer;
+
+	/*
+	 * As a special case, if the on-disk copy is already marked as needing
+	 * no recovery (s_start == 0) and there are no outstanding transactions
+	 * in the filesystem, then we can safely defer the superblock update
+	 * until the next commit by setting JFS_FLUSHED.  This avoids
+	 * attempting a write to a potential-readonly device.
+	 */
+	if (sb->s_start == 0 && journal->j_tail_sequence ==
+				journal->j_transaction_sequence) {
+		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+			"(start %ld, seq %d, errno %d)\n",
+			journal->j_tail, journal->j_tail_sequence, 
+			journal->j_errno);
+		goto out;
+	}
+
+	spin_lock(&journal->j_state_lock);
+	jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
+		  journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+
+	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
+	sb->s_start    = cpu_to_be32(journal->j_tail);
+	sb->s_errno    = cpu_to_be32(journal->j_errno);
+	spin_unlock(&journal->j_state_lock);
+
+	BUFFER_TRACE(bh, "marking dirty");
+	mark_buffer_dirty(bh);
+	if (wait)
+		sync_dirty_buffer(bh);
+	else
+		ll_rw_block(WRITE, 1, &bh);
+
+out:
+	/* If we have just flushed the log (by marking s_start==0), then
+	 * any future commit will have to be careful to update the
+	 * superblock again to re-record the true start of the log. */
+
+	spin_lock(&journal->j_state_lock);
+	if (sb->s_start)
+		journal->j_flags &= ~JFS_FLUSHED;
+	else
+		journal->j_flags |= JFS_FLUSHED;
+	spin_unlock(&journal->j_state_lock);
+}
+
+/*
+ * Read the superblock for a given journal, performing initial
+ * validation of the format.
+ */
+
+static int journal_get_superblock(journal_t *journal)
+{
+	struct buffer_head *bh;
+	journal_superblock_t *sb;
+	int err = -EIO;
+
+	bh = journal->j_sb_buffer;
+
+	J_ASSERT(bh != NULL);
+	if (!buffer_uptodate(bh)) {
+		ll_rw_block(READ, 1, &bh);
+		wait_on_buffer(bh);
+		if (!buffer_uptodate(bh)) {
+			printk (KERN_ERR
+				"JBD: IO error reading journal superblock\n");
+			goto out;
+		}
+	}
+
+	sb = journal->j_superblock;
+
+	err = -EINVAL;
+
+	if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
+	    sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
+		printk(KERN_WARNING "JBD: no valid journal superblock found\n");
+		goto out;
+	}
+
+	switch(be32_to_cpu(sb->s_header.h_blocktype)) {
+	case JFS_SUPERBLOCK_V1:
+		journal->j_format_version = 1;
+		break;
+	case JFS_SUPERBLOCK_V2:
+		journal->j_format_version = 2;
+		break;
+	default:
+		printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
+		goto out;
+	}
+
+	if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
+		journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
+	else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
+		printk (KERN_WARNING "JBD: journal file too short\n");
+		goto out;
+	}
+
+	return 0;
+
+out:
+	journal_fail_superblock(journal);
+	return err;
+}
+
+/*
+ * Load the on-disk journal superblock and read the key fields into the
+ * journal_t.
+ */
+
+static int load_superblock(journal_t *journal)
+{
+	int err;
+	journal_superblock_t *sb;
+
+	err = journal_get_superblock(journal);
+	if (err)
+		return err;
+
+	sb = journal->j_superblock;
+
+	journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
+	journal->j_tail = be32_to_cpu(sb->s_start);
+	journal->j_first = be32_to_cpu(sb->s_first);
+	journal->j_last = be32_to_cpu(sb->s_maxlen);
+	journal->j_errno = be32_to_cpu(sb->s_errno);
+
+	return 0;
+}
+
+
+/**
+ * int journal_load() - Read journal from disk.
+ * @journal: Journal to act on.
+ * 
+ * Given a journal_t structure which tells us which disk blocks contain
+ * a journal, read the journal from disk to initialise the in-memory
+ * structures.
+ */
+int journal_load(journal_t *journal)
+{
+	int err;
+
+	err = load_superblock(journal);
+	if (err)
+		return err;
+
+	/* If this is a V2 superblock, then we have to check the
+	 * features flags on it. */
+
+	if (journal->j_format_version >= 2) {
+		journal_superblock_t *sb = journal->j_superblock;
+
+		if ((sb->s_feature_ro_compat &
+		     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
+		    (sb->s_feature_incompat &
+		     ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
+			printk (KERN_WARNING
+				"JBD: Unrecognised features on journal\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Let the recovery code check whether it needs to recover any
+	 * data from the journal. */
+	if (journal_recover(journal))
+		goto recovery_error;
+
+	/* OK, we've finished with the dynamic journal bits:
+	 * reinitialise the dynamic contents of the superblock in memory
+	 * and reset them on disk. */
+	if (journal_reset(journal))
+		goto recovery_error;
+
+	journal->j_flags &= ~JFS_ABORT;
+	journal->j_flags |= JFS_LOADED;
+	return 0;
+
+recovery_error:
+	printk (KERN_WARNING "JBD: recovery failed\n");
+	return -EIO;
+}
+
+/**
+ * void journal_destroy() - Release a journal_t structure.
+ * @journal: Journal to act on.
+ *
+ * Release a journal_t structure once it is no longer in use by the
+ * journaled object.
+ */
+void journal_destroy(journal_t *journal)
+{
+	/* Wait for the commit thread to wake up and die. */
+	journal_kill_thread(journal);
+
+	/* Force a final log commit */
+	if (journal->j_running_transaction)
+		journal_commit_transaction(journal);
+
+	/* Force any old transactions to disk */
+
+	/* Totally anal locking here... */
+	spin_lock(&journal->j_list_lock);
+	while (journal->j_checkpoint_transactions != NULL) {
+		spin_unlock(&journal->j_list_lock);
+		log_do_checkpoint(journal);
+		spin_lock(&journal->j_list_lock);
+	}
+
+	J_ASSERT(journal->j_running_transaction == NULL);
+	J_ASSERT(journal->j_committing_transaction == NULL);
+	J_ASSERT(journal->j_checkpoint_transactions == NULL);
+	spin_unlock(&journal->j_list_lock);
+
+	/* We can now mark the journal as empty. */
+	journal->j_tail = 0;
+	journal->j_tail_sequence = ++journal->j_transaction_sequence;
+	if (journal->j_sb_buffer) {
+		journal_update_superblock(journal, 1);
+		brelse(journal->j_sb_buffer);
+	}
+
+	if (journal->j_inode)
+		iput(journal->j_inode);
+	if (journal->j_revoke)
+		journal_destroy_revoke(journal);
+	kfree(journal->j_wbuf);
+	kfree(journal);
+}
+
+
+/**
+ *int journal_check_used_features () - Check if features specified are used.
+ * @journal: Journal to check.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ * 
+ * Check whether the journal uses all of a given set of
+ * features.  Return true (non-zero) if it does. 
+ **/
+
+int journal_check_used_features (journal_t *journal, unsigned long compat,
+				 unsigned long ro, unsigned long incompat)
+{
+	journal_superblock_t *sb;
+
+	if (!compat && !ro && !incompat)
+		return 1;
+	if (journal->j_format_version == 1)
+		return 0;
+
+	sb = journal->j_superblock;
+
+	if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
+	    ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
+	    ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * int journal_check_available_features() - Check feature set in journalling layer
+ * @journal: Journal to check.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ * 
+ * Check whether the journaling code supports the use of
+ * all of a given set of features on this journal.  Return true
+ * (non-zero) if it can. */
+
+int journal_check_available_features (journal_t *journal, unsigned long compat,
+				      unsigned long ro, unsigned long incompat)
+{
+	journal_superblock_t *sb;
+
+	if (!compat && !ro && !incompat)
+		return 1;
+
+	sb = journal->j_superblock;
+
+	/* We can support any known requested features iff the
+	 * superblock is in version 2.  Otherwise we fail to support any
+	 * extended sb features. */
+
+	if (journal->j_format_version != 2)
+		return 0;
+
+	if ((compat   & JFS_KNOWN_COMPAT_FEATURES) == compat &&
+	    (ro       & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
+	    (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * int journal_set_features () - Mark a given journal feature in the superblock
+ * @journal: Journal to act on.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ *
+ * Mark a given journal feature as present on the
+ * superblock.  Returns true if the requested features could be set. 
+ *
+ */
+
+int journal_set_features (journal_t *journal, unsigned long compat,
+			  unsigned long ro, unsigned long incompat)
+{
+	journal_superblock_t *sb;
+
+	if (journal_check_used_features(journal, compat, ro, incompat))
+		return 1;
+
+	if (!journal_check_available_features(journal, compat, ro, incompat))
+		return 0;
+
+	jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
+		  compat, ro, incompat);
+
+	sb = journal->j_superblock;
+
+	sb->s_feature_compat    |= cpu_to_be32(compat);
+	sb->s_feature_ro_compat |= cpu_to_be32(ro);
+	sb->s_feature_incompat  |= cpu_to_be32(incompat);
+
+	return 1;
+}
+
+
+/**
+ * int journal_update_format () - Update on-disk journal structure.
+ * @journal: Journal to act on.
+ *
+ * Given an initialised but unloaded journal struct, poke about in the
+ * on-disk structure to update it to the most recent supported version.
+ */
+int journal_update_format (journal_t *journal)
+{
+	journal_superblock_t *sb;
+	int err;
+
+	err = journal_get_superblock(journal);
+	if (err)
+		return err;
+
+	sb = journal->j_superblock;
+
+	switch (be32_to_cpu(sb->s_header.h_blocktype)) {
+	case JFS_SUPERBLOCK_V2:
+		return 0;
+	case JFS_SUPERBLOCK_V1:
+		return journal_convert_superblock_v1(journal, sb);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int journal_convert_superblock_v1(journal_t *journal,
+					 journal_superblock_t *sb)
+{
+	int offset, blocksize;
+	struct buffer_head *bh;
+
+	printk(KERN_WARNING
+		"JBD: Converting superblock from version 1 to 2.\n");
+
+	/* Pre-initialise new fields to zero */
+	offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
+	blocksize = be32_to_cpu(sb->s_blocksize);
+	memset(&sb->s_feature_compat, 0, blocksize-offset);
+
+	sb->s_nr_users = cpu_to_be32(1);
+	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+	journal->j_format_version = 2;
+
+	bh = journal->j_sb_buffer;
+	BUFFER_TRACE(bh, "marking dirty");
+	mark_buffer_dirty(bh);
+	sync_dirty_buffer(bh);
+	return 0;
+}
+
+
+/**
+ * int journal_flush () - Flush journal
+ * @journal: Journal to act on.
+ * 
+ * Flush all data for a given journal to disk and empty the journal.
+ * Filesystems can use this when remounting readonly to ensure that
+ * recovery does not need to happen on remount.
+ */
+
+int journal_flush(journal_t *journal)
+{
+	int err = 0;
+	transaction_t *transaction = NULL;
+	unsigned long old_tail;
+
+	spin_lock(&journal->j_state_lock);
+
+	/* Force everything buffered to the log... */
+	if (journal->j_running_transaction) {
+		transaction = journal->j_running_transaction;
+		__log_start_commit(journal, transaction->t_tid);
+	} else if (journal->j_committing_transaction)
+		transaction = journal->j_committing_transaction;
+
+	/* Wait for the log commit to complete... */
+	if (transaction) {
+		tid_t tid = transaction->t_tid;
+
+		spin_unlock(&journal->j_state_lock);
+		log_wait_commit(journal, tid);
+	} else {
+		spin_unlock(&journal->j_state_lock);
+	}
+
+	/* ...and flush everything in the log out to disk. */
+	spin_lock(&journal->j_list_lock);
+	while (!err && journal->j_checkpoint_transactions != NULL) {
+		spin_unlock(&journal->j_list_lock);
+		err = log_do_checkpoint(journal);
+		spin_lock(&journal->j_list_lock);
+	}
+	spin_unlock(&journal->j_list_lock);
+	cleanup_journal_tail(journal);
+
+	/* Finally, mark the journal as really needing no recovery.
+	 * This sets s_start==0 in the underlying superblock, which is
+	 * the magic code for a fully-recovered superblock.  Any future
+	 * commits of data to the journal will restore the current
+	 * s_start value. */
+	spin_lock(&journal->j_state_lock);
+	old_tail = journal->j_tail;
+	journal->j_tail = 0;
+	spin_unlock(&journal->j_state_lock);
+	journal_update_superblock(journal, 1);
+	spin_lock(&journal->j_state_lock);
+	journal->j_tail = old_tail;
+
+	J_ASSERT(!journal->j_running_transaction);
+	J_ASSERT(!journal->j_committing_transaction);
+	J_ASSERT(!journal->j_checkpoint_transactions);
+	J_ASSERT(journal->j_head == journal->j_tail);
+	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+	spin_unlock(&journal->j_state_lock);
+	return err;
+}
+
+/**
+ * int journal_wipe() - Wipe journal contents
+ * @journal: Journal to act on.
+ * @write: flag (see below)
+ * 
+ * Wipe out all of the contents of a journal, safely.  This will produce
+ * a warning if the journal contains any valid recovery information.
+ * Must be called between journal_init_*() and journal_load().
+ *
+ * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
+ * we merely suppress recovery.
+ */
+
+int journal_wipe(journal_t *journal, int write)
+{
+	journal_superblock_t *sb;
+	int err = 0;
+
+	J_ASSERT (!(journal->j_flags & JFS_LOADED));
+
+	err = load_superblock(journal);
+	if (err)
+		return err;
+
+	sb = journal->j_superblock;
+
+	if (!journal->j_tail)
+		goto no_recovery;
+
+	printk (KERN_WARNING "JBD: %s recovery information on journal\n",
+		write ? "Clearing" : "Ignoring");
+
+	err = journal_skip_recovery(journal);
+	if (write)
+		journal_update_superblock(journal, 1);
+
+ no_recovery:
+	return err;
+}
+
+/*
+ * journal_dev_name: format a character string to describe on what
+ * device this journal is present.
+ */
+
+const char *journal_dev_name(journal_t *journal, char *buffer)
+{
+	struct block_device *bdev;
+
+	if (journal->j_inode)
+		bdev = journal->j_inode->i_sb->s_bdev;
+	else
+		bdev = journal->j_dev;
+
+	return bdevname(bdev, buffer);
+}
+
+/*
+ * Journal abort has very specific semantics, which we describe
+ * for journal abort. 
+ *
+ * Two internal function, which provide abort to te jbd layer
+ * itself are here.
+ */
+
+/*
+ * Quick version for internal journal use (doesn't lock the journal).
+ * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
+ * and don't attempt to make any other journal updates.
+ */
+void __journal_abort_hard(journal_t *journal)
+{
+	transaction_t *transaction;
+	char b[BDEVNAME_SIZE];
+
+	if (journal->j_flags & JFS_ABORT)
+		return;
+
+	printk(KERN_ERR "Aborting journal on device %s.\n",
+		journal_dev_name(journal, b));
+
+	spin_lock(&journal->j_state_lock);
+	journal->j_flags |= JFS_ABORT;
+	transaction = journal->j_running_transaction;
+	if (transaction)
+		__log_start_commit(journal, transaction->t_tid);
+	spin_unlock(&journal->j_state_lock);
+}
+
+/* Soft abort: record the abort error status in the journal superblock,
+ * but don't do any other IO. */
+void __journal_abort_soft (journal_t *journal, int errno)
+{
+	if (journal->j_flags & JFS_ABORT)
+		return;
+
+	if (!journal->j_errno)
+		journal->j_errno = errno;
+
+	__journal_abort_hard(journal);
+
+	if (errno)
+		journal_update_superblock(journal, 1);
+}
+
+/**
+ * void journal_abort () - Shutdown the journal immediately.
+ * @journal: the journal to shutdown.
+ * @errno:   an error number to record in the journal indicating
+ *           the reason for the shutdown.
+ *
+ * Perform a complete, immediate shutdown of the ENTIRE
+ * journal (not of a single transaction).  This operation cannot be
+ * undone without closing and reopening the journal.
+ *           
+ * The journal_abort function is intended to support higher level error
+ * recovery mechanisms such as the ext2/ext3 remount-readonly error
+ * mode.
+ *
+ * Journal abort has very specific semantics.  Any existing dirty,
+ * unjournaled buffers in the main filesystem will still be written to
+ * disk by bdflush, but the journaling mechanism will be suspended
+ * immediately and no further transaction commits will be honoured.
+ *
+ * Any dirty, journaled buffers will be written back to disk without
+ * hitting the journal.  Atomicity cannot be guaranteed on an aborted
+ * filesystem, but we _do_ attempt to leave as much data as possible
+ * behind for fsck to use for cleanup.
+ *
+ * Any attempt to get a new transaction handle on a journal which is in
+ * ABORT state will just result in an -EROFS error return.  A
+ * journal_stop on an existing handle will return -EIO if we have
+ * entered abort state during the update.
+ *
+ * Recursive transactions are not disturbed by journal abort until the
+ * final journal_stop, which will receive the -EIO error.
+ *
+ * Finally, the journal_abort call allows the caller to supply an errno
+ * which will be recorded (if possible) in the journal superblock.  This
+ * allows a client to record failure conditions in the middle of a
+ * transaction without having to complete the transaction to record the
+ * failure to disk.  ext3_error, for example, now uses this
+ * functionality.
+ *
+ * Errors which originate from within the journaling layer will NOT
+ * supply an errno; a null errno implies that absolutely no further
+ * writes are done to the journal (unless there are any already in
+ * progress).
+ * 
+ */
+
+void journal_abort(journal_t *journal, int errno)
+{
+	__journal_abort_soft(journal, errno);
+}
+
+/** 
+ * int journal_errno () - returns the journal's error state.
+ * @journal: journal to examine.
+ *
+ * This is the errno numbet set with journal_abort(), the last
+ * time the journal was mounted - if the journal was stopped
+ * without calling abort this will be 0.
+ *
+ * If the journal has been aborted on this mount time -EROFS will
+ * be returned.
+ */
+int journal_errno(journal_t *journal)
+{
+	int err;
+
+	spin_lock(&journal->j_state_lock);
+	if (journal->j_flags & JFS_ABORT)
+		err = -EROFS;
+	else
+		err = journal->j_errno;
+	spin_unlock(&journal->j_state_lock);
+	return err;
+}
+
+/** 
+ * int journal_clear_err () - clears the journal's error state
+ * @journal: journal to act on.
+ *
+ * An error must be cleared or Acked to take a FS out of readonly
+ * mode.
+ */
+int journal_clear_err(journal_t *journal)
+{
+	int err = 0;
+
+	spin_lock(&journal->j_state_lock);
+	if (journal->j_flags & JFS_ABORT)
+		err = -EROFS;
+	else
+		journal->j_errno = 0;
+	spin_unlock(&journal->j_state_lock);
+	return err;
+}
+
+/** 
+ * void journal_ack_err() - Ack journal err.
+ * @journal: journal to act on.
+ *
+ * An error must be cleared or Acked to take a FS out of readonly
+ * mode.
+ */
+void journal_ack_err(journal_t *journal)
+{
+	spin_lock(&journal->j_state_lock);
+	if (journal->j_errno)
+		journal->j_flags |= JFS_ACK_ERR;
+	spin_unlock(&journal->j_state_lock);
+}
+
+int journal_blocks_per_page(struct inode *inode)
+{
+	return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+}
+
+/*
+ * Simple support for retrying memory allocations.  Introduced to help to
+ * debug different VM deadlock avoidance strategies. 
+ */
+void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry)
+{
+	return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
+}
+
+/*
+ * Journal_head storage management
+ */
+static kmem_cache_t *journal_head_cache;
+#ifdef CONFIG_JBD_DEBUG
+static atomic_t nr_journal_heads = ATOMIC_INIT(0);
+#endif
+
+static int journal_init_journal_head_cache(void)
+{
+	int retval;
+
+	J_ASSERT(journal_head_cache == 0);
+	journal_head_cache = kmem_cache_create("journal_head",
+				sizeof(struct journal_head),
+				0,		/* offset */
+				0,		/* flags */
+				NULL,		/* ctor */
+				NULL);		/* dtor */
+	retval = 0;
+	if (journal_head_cache == 0) {
+		retval = -ENOMEM;
+		printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
+	}
+	return retval;
+}
+
+static void journal_destroy_journal_head_cache(void)
+{
+	J_ASSERT(journal_head_cache != NULL);
+	kmem_cache_destroy(journal_head_cache);
+	journal_head_cache = NULL;
+}
+
+/*
+ * journal_head splicing and dicing
+ */
+static struct journal_head *journal_alloc_journal_head(void)
+{
+	struct journal_head *ret;
+	static unsigned long last_warning;
+
+#ifdef CONFIG_JBD_DEBUG
+	atomic_inc(&nr_journal_heads);
+#endif
+	ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+	if (ret == 0) {
+		jbd_debug(1, "out of memory for journal_head\n");
+		if (time_after(jiffies, last_warning + 5*HZ)) {
+			printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
+			       __FUNCTION__);
+			last_warning = jiffies;
+		}
+		while (ret == 0) {
+			yield();
+			ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+		}
+	}
+	return ret;
+}
+
+static void journal_free_journal_head(struct journal_head *jh)
+{
+#ifdef CONFIG_JBD_DEBUG
+	atomic_dec(&nr_journal_heads);
+	memset(jh, 0x5b, sizeof(*jh));
+#endif
+	kmem_cache_free(journal_head_cache, jh);
+}
+
+/*
+ * A journal_head is attached to a buffer_head whenever JBD has an
+ * interest in the buffer.
+ *
+ * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
+ * is set.  This bit is tested in core kernel code where we need to take
+ * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
+ * there.
+ *
+ * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
+ *
+ * When a buffer has its BH_JBD bit set it is immune from being released by
+ * core kernel code, mainly via ->b_count.
+ *
+ * A journal_head may be detached from its buffer_head when the journal_head's
+ * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
+ * Various places in JBD call journal_remove_journal_head() to indicate that the
+ * journal_head can be dropped if needed.
+ *
+ * Various places in the kernel want to attach a journal_head to a buffer_head
+ * _before_ attaching the journal_head to a transaction.  To protect the
+ * journal_head in this situation, journal_add_journal_head elevates the
+ * journal_head's b_jcount refcount by one.  The caller must call
+ * journal_put_journal_head() to undo this.
+ *
+ * So the typical usage would be:
+ *
+ *	(Attach a journal_head if needed.  Increments b_jcount)
+ *	struct journal_head *jh = journal_add_journal_head(bh);
+ *	...
+ *	jh->b_transaction = xxx;
+ *	journal_put_journal_head(jh);
+ *
+ * Now, the journal_head's b_jcount is zero, but it is safe from being released
+ * because it has a non-zero b_transaction.
+ */
+
+/*
+ * Give a buffer_head a journal_head.
+ *
+ * Doesn't need the journal lock.
+ * May sleep.
+ */
+struct journal_head *journal_add_journal_head(struct buffer_head *bh)
+{
+	struct journal_head *jh;
+	struct journal_head *new_jh = NULL;
+
+repeat:
+	if (!buffer_jbd(bh)) {
+		new_jh = journal_alloc_journal_head();
+		memset(new_jh, 0, sizeof(*new_jh));
+	}
+
+	jbd_lock_bh_journal_head(bh);
+	if (buffer_jbd(bh)) {
+		jh = bh2jh(bh);
+	} else {
+		J_ASSERT_BH(bh,
+			(atomic_read(&bh->b_count) > 0) ||
+			(bh->b_page && bh->b_page->mapping));
+
+		if (!new_jh) {
+			jbd_unlock_bh_journal_head(bh);
+			goto repeat;
+		}
+
+		jh = new_jh;
+		new_jh = NULL;		/* We consumed it */
+		set_buffer_jbd(bh);
+		bh->b_private = jh;
+		jh->b_bh = bh;
+		get_bh(bh);
+		BUFFER_TRACE(bh, "added journal_head");
+	}
+	jh->b_jcount++;
+	jbd_unlock_bh_journal_head(bh);
+	if (new_jh)
+		journal_free_journal_head(new_jh);
+	return bh->b_private;
+}
+
+/*
+ * Grab a ref against this buffer_head's journal_head.  If it ended up not
+ * having a journal_head, return NULL
+ */
+struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
+{
+	struct journal_head *jh = NULL;
+
+	jbd_lock_bh_journal_head(bh);
+	if (buffer_jbd(bh)) {
+		jh = bh2jh(bh);
+		jh->b_jcount++;
+	}
+	jbd_unlock_bh_journal_head(bh);
+	return jh;
+}
+
+static void __journal_remove_journal_head(struct buffer_head *bh)
+{
+	struct journal_head *jh = bh2jh(bh);
+
+	J_ASSERT_JH(jh, jh->b_jcount >= 0);
+
+	get_bh(bh);
+	if (jh->b_jcount == 0) {
+		if (jh->b_transaction == NULL &&
+				jh->b_next_transaction == NULL &&
+				jh->b_cp_transaction == NULL) {
+			J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
+			J_ASSERT_BH(bh, buffer_jbd(bh));
+			J_ASSERT_BH(bh, jh2bh(jh) == bh);
+			BUFFER_TRACE(bh, "remove journal_head");
+			if (jh->b_frozen_data) {
+				printk(KERN_WARNING "%s: freeing "
+						"b_frozen_data\n",
+						__FUNCTION__);
+				kfree(jh->b_frozen_data);
+			}
+			if (jh->b_committed_data) {
+				printk(KERN_WARNING "%s: freeing "
+						"b_committed_data\n",
+						__FUNCTION__);
+				kfree(jh->b_committed_data);
+			}
+			bh->b_private = NULL;
+			jh->b_bh = NULL;	/* debug, really */
+			clear_buffer_jbd(bh);
+			__brelse(bh);
+			journal_free_journal_head(jh);
+		} else {
+			BUFFER_TRACE(bh, "journal_head was locked");
+		}
+	}
+}
+
+/*
+ * journal_remove_journal_head(): if the buffer isn't attached to a transaction
+ * and has a zero b_jcount then remove and release its journal_head.   If we did
+ * see that the buffer is not used by any transaction we also "logically"
+ * decrement ->b_count.
+ *
+ * We in fact take an additional increment on ->b_count as a convenience,
+ * because the caller usually wants to do additional things with the bh
+ * after calling here.
+ * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
+ * time.  Once the caller has run __brelse(), the buffer is eligible for
+ * reaping by try_to_free_buffers().
+ */
+void journal_remove_journal_head(struct buffer_head *bh)
+{
+	jbd_lock_bh_journal_head(bh);
+	__journal_remove_journal_head(bh);
+	jbd_unlock_bh_journal_head(bh);
+}
+
+/*
+ * Drop a reference on the passed journal_head.  If it fell to zero then try to
+ * release the journal_head from the buffer_head.
+ */
+void journal_put_journal_head(struct journal_head *jh)
+{
+	struct buffer_head *bh = jh2bh(jh);
+
+	jbd_lock_bh_journal_head(bh);
+	J_ASSERT_JH(jh, jh->b_jcount > 0);
+	--jh->b_jcount;
+	if (!jh->b_jcount && !jh->b_transaction) {
+		__journal_remove_journal_head(bh);
+		__brelse(bh);
+	}
+	jbd_unlock_bh_journal_head(bh);
+}
+
+/*
+ * /proc tunables
+ */
+#if defined(CONFIG_JBD_DEBUG)
+int journal_enable_debug;
+EXPORT_SYMBOL(journal_enable_debug);
+#endif
+
+#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+
+static struct proc_dir_entry *proc_jbd_debug;
+
+int read_jbd_debug(char *page, char **start, off_t off,
+			  int count, int *eof, void *data)
+{
+	int ret;
+
+	ret = sprintf(page + off, "%d\n", journal_enable_debug);
+	*eof = 1;
+	return ret;
+}
+
+int write_jbd_debug(struct file *file, const char __user *buffer,
+			   unsigned long count, void *data)
+{
+	char buf[32];
+
+	if (count > ARRAY_SIZE(buf) - 1)
+		count = ARRAY_SIZE(buf) - 1;
+	if (copy_from_user(buf, buffer, count))
+		return -EFAULT;
+	buf[ARRAY_SIZE(buf) - 1] = '\0';
+	journal_enable_debug = simple_strtoul(buf, NULL, 10);
+	return count;
+}
+
+#define JBD_PROC_NAME "sys/fs/jbd-debug"
+
+static void __init create_jbd_proc_entry(void)
+{
+	proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
+	if (proc_jbd_debug) {
+		/* Why is this so hard? */
+		proc_jbd_debug->read_proc = read_jbd_debug;
+		proc_jbd_debug->write_proc = write_jbd_debug;
+	}
+}
+
+static void __exit remove_jbd_proc_entry(void)
+{
+	if (proc_jbd_debug)
+		remove_proc_entry(JBD_PROC_NAME, NULL);
+}
+
+#else
+
+#define create_jbd_proc_entry() do {} while (0)
+#define remove_jbd_proc_entry() do {} while (0)
+
+#endif
+
+kmem_cache_t *jbd_handle_cache;
+
+static int __init journal_init_handle_cache(void)
+{
+	jbd_handle_cache = kmem_cache_create("journal_handle",
+				sizeof(handle_t),
+				0,		/* offset */
+				0,		/* flags */
+				NULL,		/* ctor */
+				NULL);		/* dtor */
+	if (jbd_handle_cache == NULL) {
+		printk(KERN_EMERG "JBD: failed to create handle cache\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void journal_destroy_handle_cache(void)
+{
+	if (jbd_handle_cache)
+		kmem_cache_destroy(jbd_handle_cache);
+}
+
+/*
+ * Module startup and shutdown
+ */
+
+static int __init journal_init_caches(void)
+{
+	int ret;
+
+	ret = journal_init_revoke_caches();
+	if (ret == 0)
+		ret = journal_init_journal_head_cache();
+	if (ret == 0)
+		ret = journal_init_handle_cache();
+	return ret;
+}
+
+static void journal_destroy_caches(void)
+{
+	journal_destroy_revoke_caches();
+	journal_destroy_journal_head_cache();
+	journal_destroy_handle_cache();
+}
+
+static int __init journal_init(void)
+{
+	int ret;
+
+	ret = journal_init_caches();
+	if (ret != 0)
+		journal_destroy_caches();
+	create_jbd_proc_entry();
+	return ret;
+}
+
+static void __exit journal_exit(void)
+{
+#ifdef CONFIG_JBD_DEBUG
+	int n = atomic_read(&nr_journal_heads);
+	if (n)
+		printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
+#endif
+	remove_jbd_proc_entry();
+	journal_destroy_caches();
+}
+
+MODULE_LICENSE("GPL");
+module_init(journal_init);
+module_exit(journal_exit);
+
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
new file mode 100644
index 0000000..103c34e
--- /dev/null
+++ b/fs/jbd/recovery.c
@@ -0,0 +1,591 @@
+/*
+ * linux/fs/recovery.c
+ * 
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal recovery routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.  
+ */
+
+#ifndef __KERNEL__
+#include "jfs_user.h"
+#else
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#endif
+
+/*
+ * Maintain information about the progress of the recovery job, so that
+ * the different passes can carry information between them. 
+ */
+struct recovery_info 
+{
+	tid_t		start_transaction;
+	tid_t		end_transaction;
+
+	int		nr_replays;
+	int		nr_revokes;
+	int		nr_revoke_hits;
+};
+
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+static int do_one_pass(journal_t *journal,
+				struct recovery_info *info, enum passtype pass);
+static int scan_revoke_records(journal_t *, struct buffer_head *,
+				tid_t, struct recovery_info *);
+
+#ifdef __KERNEL__
+
+/* Release readahead buffers after use */
+void journal_brelse_array(struct buffer_head *b[], int n)
+{
+	while (--n >= 0)
+		brelse (b[n]);
+}
+
+
+/*
+ * When reading from the journal, we are going through the block device
+ * layer directly and so there is no readahead being done for us.  We
+ * need to implement any readahead ourselves if we want it to happen at
+ * all.  Recovery is basically one long sequential read, so make sure we
+ * do the IO in reasonably large chunks.
+ *
+ * This is not so critical that we need to be enormously clever about
+ * the readahead size, though.  128K is a purely arbitrary, good-enough
+ * fixed value.
+ */
+
+#define MAXBUF 8
+static int do_readahead(journal_t *journal, unsigned int start)
+{
+	int err;
+	unsigned int max, nbufs, next;
+	unsigned long blocknr;
+	struct buffer_head *bh;
+
+	struct buffer_head * bufs[MAXBUF];
+
+	/* Do up to 128K of readahead */
+	max = start + (128 * 1024 / journal->j_blocksize);
+	if (max > journal->j_maxlen)
+		max = journal->j_maxlen;
+
+	/* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
+	 * a time to the block device IO layer. */
+
+	nbufs = 0;
+
+	for (next = start; next < max; next++) {
+		err = journal_bmap(journal, next, &blocknr);
+
+		if (err) {
+			printk (KERN_ERR "JBD: bad block at offset %u\n",
+				next);
+			goto failed;
+		}
+
+		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+		if (!bh) {
+			err = -ENOMEM;
+			goto failed;
+		}
+
+		if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
+			bufs[nbufs++] = bh;
+			if (nbufs == MAXBUF) {
+				ll_rw_block(READ, nbufs, bufs);
+				journal_brelse_array(bufs, nbufs);
+				nbufs = 0;
+			}
+		} else
+			brelse(bh);
+	}
+
+	if (nbufs)
+		ll_rw_block(READ, nbufs, bufs);
+	err = 0;
+
+failed:
+	if (nbufs) 
+		journal_brelse_array(bufs, nbufs);
+	return err;
+}
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * Read a block from the journal
+ */
+
+static int jread(struct buffer_head **bhp, journal_t *journal, 
+		 unsigned int offset)
+{
+	int err;
+	unsigned long blocknr;
+	struct buffer_head *bh;
+
+	*bhp = NULL;
+
+	if (offset >= journal->j_maxlen) {
+		printk(KERN_ERR "JBD: corrupted journal superblock\n");
+		return -EIO;
+	}
+
+	err = journal_bmap(journal, offset, &blocknr);
+
+	if (err) {
+		printk (KERN_ERR "JBD: bad block at offset %u\n",
+			offset);
+		return err;
+	}
+
+	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+	if (!bh)
+		return -ENOMEM;
+
+	if (!buffer_uptodate(bh)) {
+		/* If this is a brand new buffer, start readahead.
+                   Otherwise, we assume we are already reading it.  */
+		if (!buffer_req(bh))
+			do_readahead(journal, offset);
+		wait_on_buffer(bh);
+	}
+
+	if (!buffer_uptodate(bh)) {
+		printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
+			offset);
+		brelse(bh);
+		return -EIO;
+	}
+
+	*bhp = bh;
+	return 0;
+}
+
+
+/*
+ * Count the number of in-use tags in a journal descriptor block.
+ */
+
+static int count_tags(struct buffer_head *bh, int size)
+{
+	char *			tagp;
+	journal_block_tag_t *	tag;
+	int			nr = 0;
+
+	tagp = &bh->b_data[sizeof(journal_header_t)];
+
+	while ((tagp - bh->b_data + sizeof(journal_block_tag_t)) <= size) {
+		tag = (journal_block_tag_t *) tagp;
+
+		nr++;
+		tagp += sizeof(journal_block_tag_t);
+		if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
+			tagp += 16;
+
+		if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
+			break;
+	}
+
+	return nr;
+}
+
+
+/* Make sure we wrap around the log correctly! */
+#define wrap(journal, var)						\
+do {									\
+	if (var >= (journal)->j_last)					\
+		var -= ((journal)->j_last - (journal)->j_first);	\
+} while (0)
+
+/**
+ * int journal_recover(journal_t *journal) - recovers a on-disk journal
+ * @journal: the journal to recover
+ * 
+ * The primary function for recovering the log contents when mounting a
+ * journaled device.  
+ *
+ * Recovery is done in three passes.  In the first pass, we look for the
+ * end of the log.  In the second, we assemble the list of revoke
+ * blocks.  In the third and final pass, we replay any un-revoked blocks
+ * in the log.  
+ */
+int journal_recover(journal_t *journal)
+{
+	int			err;
+	journal_superblock_t *	sb;
+
+	struct recovery_info	info;
+
+	memset(&info, 0, sizeof(info));
+	sb = journal->j_superblock;
+
+	/* 
+	 * The journal superblock's s_start field (the current log head)
+	 * is always zero if, and only if, the journal was cleanly
+	 * unmounted.  
+	 */
+
+	if (!sb->s_start) {
+		jbd_debug(1, "No recovery required, last transaction %d\n",
+			  be32_to_cpu(sb->s_sequence));
+		journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
+		return 0;
+	}
+
+	err = do_one_pass(journal, &info, PASS_SCAN);
+	if (!err)
+		err = do_one_pass(journal, &info, PASS_REVOKE);
+	if (!err)
+		err = do_one_pass(journal, &info, PASS_REPLAY);
+
+	jbd_debug(0, "JBD: recovery, exit status %d, "
+		  "recovered transactions %u to %u\n",
+		  err, info.start_transaction, info.end_transaction);
+	jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n", 
+		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
+
+	/* Restart the log at the next transaction ID, thus invalidating
+	 * any existing commit records in the log. */
+	journal->j_transaction_sequence = ++info.end_transaction;
+
+	journal_clear_revoke(journal);
+	sync_blockdev(journal->j_fs_dev);
+	return err;
+}
+
+/**
+ * int journal_skip_recovery() - Start journal and wipe exiting records 
+ * @journal: journal to startup
+ * 
+ * Locate any valid recovery information from the journal and set up the
+ * journal structures in memory to ignore it (presumably because the
+ * caller has evidence that it is out of date).  
+ * This function does'nt appear to be exorted..
+ *
+ * We perform one pass over the journal to allow us to tell the user how
+ * much recovery information is being erased, and to let us initialise
+ * the journal transaction sequence numbers to the next unused ID. 
+ */
+int journal_skip_recovery(journal_t *journal)
+{
+	int			err;
+	journal_superblock_t *	sb;
+
+	struct recovery_info	info;
+
+	memset (&info, 0, sizeof(info));
+	sb = journal->j_superblock;
+
+	err = do_one_pass(journal, &info, PASS_SCAN);
+
+	if (err) {
+		printk(KERN_ERR "JBD: error %d scanning journal\n", err);
+		++journal->j_transaction_sequence;
+	} else {
+#ifdef CONFIG_JBD_DEBUG
+		int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
+#endif
+		jbd_debug(0, 
+			  "JBD: ignoring %d transaction%s from the journal.\n",
+			  dropped, (dropped == 1) ? "" : "s");
+		journal->j_transaction_sequence = ++info.end_transaction;
+	}
+
+	journal->j_tail = 0;
+	return err;
+}
+
+static int do_one_pass(journal_t *journal,
+			struct recovery_info *info, enum passtype pass)
+{
+	unsigned int		first_commit_ID, next_commit_ID;
+	unsigned long		next_log_block;
+	int			err, success = 0;
+	journal_superblock_t *	sb;
+	journal_header_t * 	tmp;
+	struct buffer_head *	bh;
+	unsigned int		sequence;
+	int			blocktype;
+
+	/* Precompute the maximum metadata descriptors in a descriptor block */
+	int			MAX_BLOCKS_PER_DESC;
+	MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
+			       / sizeof(journal_block_tag_t));
+
+	/* 
+	 * First thing is to establish what we expect to find in the log
+	 * (in terms of transaction IDs), and where (in terms of log
+	 * block offsets): query the superblock.  
+	 */
+
+	sb = journal->j_superblock;
+	next_commit_ID = be32_to_cpu(sb->s_sequence);
+	next_log_block = be32_to_cpu(sb->s_start);
+
+	first_commit_ID = next_commit_ID;
+	if (pass == PASS_SCAN)
+		info->start_transaction = first_commit_ID;
+
+	jbd_debug(1, "Starting recovery pass %d\n", pass);
+
+	/*
+	 * Now we walk through the log, transaction by transaction,
+	 * making sure that each transaction has a commit block in the
+	 * expected place.  Each complete transaction gets replayed back
+	 * into the main filesystem. 
+	 */
+
+	while (1) {
+		int			flags;
+		char *			tagp;
+		journal_block_tag_t *	tag;
+		struct buffer_head *	obh;
+		struct buffer_head *	nbh;
+
+		cond_resched();		/* We're under lock_kernel() */
+
+		/* If we already know where to stop the log traversal,
+		 * check right now that we haven't gone past the end of
+		 * the log. */
+
+		if (pass != PASS_SCAN)
+			if (tid_geq(next_commit_ID, info->end_transaction))
+				break;
+
+		jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+			  next_commit_ID, next_log_block, journal->j_last);
+
+		/* Skip over each chunk of the transaction looking
+		 * either the next descriptor block or the final commit
+		 * record. */
+
+		jbd_debug(3, "JBD: checking block %ld\n", next_log_block);
+		err = jread(&bh, journal, next_log_block);
+		if (err)
+			goto failed;
+
+		next_log_block++;
+		wrap(journal, next_log_block);
+
+		/* What kind of buffer is it? 
+		 * 
+		 * If it is a descriptor block, check that it has the
+		 * expected sequence number.  Otherwise, we're all done
+		 * here. */
+
+		tmp = (journal_header_t *)bh->b_data;
+
+		if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
+			brelse(bh);
+			break;
+		}
+
+		blocktype = be32_to_cpu(tmp->h_blocktype);
+		sequence = be32_to_cpu(tmp->h_sequence);
+		jbd_debug(3, "Found magic %d, sequence %d\n", 
+			  blocktype, sequence);
+
+		if (sequence != next_commit_ID) {
+			brelse(bh);
+			break;
+		}
+
+		/* OK, we have a valid descriptor block which matches
+		 * all of the sequence number checks.  What are we going
+		 * to do with it?  That depends on the pass... */
+
+		switch(blocktype) {
+		case JFS_DESCRIPTOR_BLOCK:
+			/* If it is a valid descriptor block, replay it
+			 * in pass REPLAY; otherwise, just skip over the
+			 * blocks it describes. */
+			if (pass != PASS_REPLAY) {
+				next_log_block +=
+					count_tags(bh, journal->j_blocksize);
+				wrap(journal, next_log_block);
+				brelse(bh);
+				continue;
+			}
+
+			/* A descriptor block: we can now write all of
+			 * the data blocks.  Yay, useful work is finally
+			 * getting done here! */
+
+			tagp = &bh->b_data[sizeof(journal_header_t)];
+			while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
+			       <= journal->j_blocksize) {
+				unsigned long io_block;
+
+				tag = (journal_block_tag_t *) tagp;
+				flags = be32_to_cpu(tag->t_flags);
+
+				io_block = next_log_block++;
+				wrap(journal, next_log_block);
+				err = jread(&obh, journal, io_block);
+				if (err) {
+					/* Recover what we can, but
+					 * report failure at the end. */
+					success = err;
+					printk (KERN_ERR 
+						"JBD: IO error %d recovering "
+						"block %ld in log\n",
+						err, io_block);
+				} else {
+					unsigned long blocknr;
+
+					J_ASSERT(obh != NULL);
+					blocknr = be32_to_cpu(tag->t_blocknr);
+
+					/* If the block has been
+					 * revoked, then we're all done
+					 * here. */
+					if (journal_test_revoke
+					    (journal, blocknr, 
+					     next_commit_ID)) {
+						brelse(obh);
+						++info->nr_revoke_hits;
+						goto skip_write;
+					}
+
+					/* Find a buffer for the new
+					 * data being restored */
+					nbh = __getblk(journal->j_fs_dev,
+							blocknr,
+							journal->j_blocksize);
+					if (nbh == NULL) {
+						printk(KERN_ERR 
+						       "JBD: Out of memory "
+						       "during recovery.\n");
+						err = -ENOMEM;
+						brelse(bh);
+						brelse(obh);
+						goto failed;
+					}
+
+					lock_buffer(nbh);
+					memcpy(nbh->b_data, obh->b_data,
+							journal->j_blocksize);
+					if (flags & JFS_FLAG_ESCAPE) {
+						*((__be32 *)bh->b_data) =
+						cpu_to_be32(JFS_MAGIC_NUMBER);
+					}
+
+					BUFFER_TRACE(nbh, "marking dirty");
+					set_buffer_uptodate(nbh);
+					mark_buffer_dirty(nbh);
+					BUFFER_TRACE(nbh, "marking uptodate");
+					++info->nr_replays;
+					/* ll_rw_block(WRITE, 1, &nbh); */
+					unlock_buffer(nbh);
+					brelse(obh);
+					brelse(nbh);
+				}
+
+			skip_write:
+				tagp += sizeof(journal_block_tag_t);
+				if (!(flags & JFS_FLAG_SAME_UUID))
+					tagp += 16;
+
+				if (flags & JFS_FLAG_LAST_TAG)
+					break;
+			}
+
+			brelse(bh);
+			continue;
+
+		case JFS_COMMIT_BLOCK:
+			/* Found an expected commit block: not much to
+			 * do other than move on to the next sequence
+			 * number. */
+			brelse(bh);
+			next_commit_ID++;
+			continue;
+
+		case JFS_REVOKE_BLOCK:
+			/* If we aren't in the REVOKE pass, then we can
+			 * just skip over this block. */
+			if (pass != PASS_REVOKE) {
+				brelse(bh);
+				continue;
+			}
+
+			err = scan_revoke_records(journal, bh,
+						  next_commit_ID, info);
+			brelse(bh);
+			if (err)
+				goto failed;
+			continue;
+
+		default:
+			jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
+				  blocktype);
+			goto done;
+		}
+	}
+
+ done:
+	/* 
+	 * We broke out of the log scan loop: either we came to the
+	 * known end of the log or we found an unexpected block in the
+	 * log.  If the latter happened, then we know that the "current"
+	 * transaction marks the end of the valid log.
+	 */
+
+	if (pass == PASS_SCAN)
+		info->end_transaction = next_commit_ID;
+	else {
+		/* It's really bad news if different passes end up at
+		 * different places (but possible due to IO errors). */
+		if (info->end_transaction != next_commit_ID) {
+			printk (KERN_ERR "JBD: recovery pass %d ended at "
+				"transaction %u, expected %u\n",
+				pass, next_commit_ID, info->end_transaction);
+			if (!success)
+				success = -EIO;
+		}
+	}
+
+	return success;
+
+ failed:
+	return err;
+}
+
+
+/* Scan a revoke record, marking all blocks mentioned as revoked. */
+
+static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, 
+			       tid_t sequence, struct recovery_info *info)
+{
+	journal_revoke_header_t *header;
+	int offset, max;
+
+	header = (journal_revoke_header_t *) bh->b_data;
+	offset = sizeof(journal_revoke_header_t);
+	max = be32_to_cpu(header->r_count);
+
+	while (offset < max) {
+		unsigned long blocknr;
+		int err;
+
+		blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
+		offset += 4;
+		err = journal_set_revoke(journal, blocknr, sequence);
+		if (err)
+			return err;
+		++info->nr_revokes;
+	}
+	return 0;
+}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
new file mode 100644
index 0000000..d327a59
--- /dev/null
+++ b/fs/jbd/revoke.c
@@ -0,0 +1,702 @@
+/*
+ * linux/fs/revoke.c
+ * 
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
+ *
+ * Copyright 2000 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal revoke routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.
+ *
+ * Revoke is the mechanism used to prevent old log records for deleted
+ * metadata from being replayed on top of newer data using the same
+ * blocks.  The revoke mechanism is used in two separate places:
+ * 
+ * + Commit: during commit we write the entire list of the current
+ *   transaction's revoked blocks to the journal
+ * 
+ * + Recovery: during recovery we record the transaction ID of all
+ *   revoked blocks.  If there are multiple revoke records in the log
+ *   for a single block, only the last one counts, and if there is a log
+ *   entry for a block beyond the last revoke, then that log entry still
+ *   gets replayed.
+ *
+ * We can get interactions between revokes and new log data within a
+ * single transaction:
+ *
+ * Block is revoked and then journaled:
+ *   The desired end result is the journaling of the new block, so we 
+ *   cancel the revoke before the transaction commits.
+ *
+ * Block is journaled and then revoked:
+ *   The revoke must take precedence over the write of the block, so we
+ *   need either to cancel the journal entry or to write the revoke
+ *   later in the log than the log block.  In this case, we choose the
+ *   latter: journaling a block cancels any revoke record for that block
+ *   in the current transaction, so any revoke for that block in the
+ *   transaction must have happened after the block was journaled and so
+ *   the revoke must take precedence.
+ *
+ * Block is revoked and then written as data: 
+ *   The data write is allowed to succeed, but the revoke is _not_
+ *   cancelled.  We still need to prevent old log records from
+ *   overwriting the new data.  We don't even need to clear the revoke
+ *   bit here.
+ *
+ * Revoke information on buffers is a tri-state value:
+ *
+ * RevokeValid clear:	no cached revoke status, need to look it up
+ * RevokeValid set, Revoked clear:
+ *			buffer has not been revoked, and cancel_revoke
+ *			need do nothing.
+ * RevokeValid set, Revoked set:
+ *			buffer has been revoked.  
+ */
+
+#ifndef __KERNEL__
+#include "jfs_user.h"
+#else
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#endif
+
+static kmem_cache_t *revoke_record_cache;
+static kmem_cache_t *revoke_table_cache;
+
+/* Each revoke record represents one single revoked block.  During
+   journal replay, this involves recording the transaction ID of the
+   last transaction to revoke this block. */
+
+struct jbd_revoke_record_s 
+{
+	struct list_head  hash;
+	tid_t		  sequence;	/* Used for recovery only */
+	unsigned long	  blocknr;
+};
+
+
+/* The revoke table is just a simple hash table of revoke records. */
+struct jbd_revoke_table_s
+{
+	/* It is conceivable that we might want a larger hash table
+	 * for recovery.  Must be a power of two. */
+	int		  hash_size; 
+	int		  hash_shift; 
+	struct list_head *hash_table;
+};
+
+
+#ifdef __KERNEL__
+static void write_one_revoke_record(journal_t *, transaction_t *,
+				    struct journal_head **, int *,
+				    struct jbd_revoke_record_s *);
+static void flush_descriptor(journal_t *, struct journal_head *, int);
+#endif
+
+/* Utility functions to maintain the revoke table */
+
+/* Borrowed from buffer.c: this is a tried and tested block hash function */
+static inline int hash(journal_t *journal, unsigned long block)
+{
+	struct jbd_revoke_table_s *table = journal->j_revoke;
+	int hash_shift = table->hash_shift;
+
+	return ((block << (hash_shift - 6)) ^
+		(block >> 13) ^
+		(block << (hash_shift - 12))) & (table->hash_size - 1);
+}
+
+int insert_revoke_hash(journal_t *journal, unsigned long blocknr, tid_t seq)
+{
+	struct list_head *hash_list;
+	struct jbd_revoke_record_s *record;
+
+repeat:
+	record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
+	if (!record)
+		goto oom;
+
+	record->sequence = seq;
+	record->blocknr = blocknr;
+	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
+	spin_lock(&journal->j_revoke_lock);
+	list_add(&record->hash, hash_list);
+	spin_unlock(&journal->j_revoke_lock);
+	return 0;
+
+oom:
+	if (!journal_oom_retry)
+		return -ENOMEM;
+	jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
+	yield();
+	goto repeat;
+}
+
+/* Find a revoke record in the journal's hash table. */
+
+static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
+						      unsigned long blocknr)
+{
+	struct list_head *hash_list;
+	struct jbd_revoke_record_s *record;
+
+	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
+
+	spin_lock(&journal->j_revoke_lock);
+	record = (struct jbd_revoke_record_s *) hash_list->next;
+	while (&(record->hash) != hash_list) {
+		if (record->blocknr == blocknr) {
+			spin_unlock(&journal->j_revoke_lock);
+			return record;
+		}
+		record = (struct jbd_revoke_record_s *) record->hash.next;
+	}
+	spin_unlock(&journal->j_revoke_lock);
+	return NULL;
+}
+
+int __init journal_init_revoke_caches(void)
+{
+	revoke_record_cache = kmem_cache_create("revoke_record",
+					   sizeof(struct jbd_revoke_record_s),
+					   0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (revoke_record_cache == 0)
+		return -ENOMEM;
+
+	revoke_table_cache = kmem_cache_create("revoke_table",
+					   sizeof(struct jbd_revoke_table_s),
+					   0, 0, NULL, NULL);
+	if (revoke_table_cache == 0) {
+		kmem_cache_destroy(revoke_record_cache);
+		revoke_record_cache = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void journal_destroy_revoke_caches(void)
+{
+	kmem_cache_destroy(revoke_record_cache);
+	revoke_record_cache = NULL;
+	kmem_cache_destroy(revoke_table_cache);
+	revoke_table_cache = NULL;
+}
+
+/* Initialise the revoke table for a given journal to a given size. */
+
+int journal_init_revoke(journal_t *journal, int hash_size)
+{
+	int shift, tmp;
+
+	J_ASSERT (journal->j_revoke_table[0] == NULL);
+
+	shift = 0;
+	tmp = hash_size;
+	while((tmp >>= 1UL) != 0UL)
+		shift++;
+
+	journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+	if (!journal->j_revoke_table[0])
+		return -ENOMEM;
+	journal->j_revoke = journal->j_revoke_table[0];
+
+	/* Check that the hash_size is a power of two */
+	J_ASSERT ((hash_size & (hash_size-1)) == 0);
+
+	journal->j_revoke->hash_size = hash_size;
+
+	journal->j_revoke->hash_shift = shift;
+
+	journal->j_revoke->hash_table =
+		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
+	if (!journal->j_revoke->hash_table) {
+		kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+		journal->j_revoke = NULL;
+		return -ENOMEM;
+	}
+
+	for (tmp = 0; tmp < hash_size; tmp++)
+		INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+
+	journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+	if (!journal->j_revoke_table[1]) {
+		kfree(journal->j_revoke_table[0]->hash_table);
+		kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+		return -ENOMEM;
+	}
+
+	journal->j_revoke = journal->j_revoke_table[1];
+
+	/* Check that the hash_size is a power of two */
+	J_ASSERT ((hash_size & (hash_size-1)) == 0);
+
+	journal->j_revoke->hash_size = hash_size;
+
+	journal->j_revoke->hash_shift = shift;
+
+	journal->j_revoke->hash_table =
+		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
+	if (!journal->j_revoke->hash_table) {
+		kfree(journal->j_revoke_table[0]->hash_table);
+		kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+		kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
+		journal->j_revoke = NULL;
+		return -ENOMEM;
+	}
+
+	for (tmp = 0; tmp < hash_size; tmp++)
+		INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+
+	spin_lock_init(&journal->j_revoke_lock);
+
+	return 0;
+}
+
+/* Destoy a journal's revoke table.  The table must already be empty! */
+
+void journal_destroy_revoke(journal_t *journal)
+{
+	struct jbd_revoke_table_s *table;
+	struct list_head *hash_list;
+	int i;
+
+	table = journal->j_revoke_table[0];
+	if (!table)
+		return;
+
+	for (i=0; i<table->hash_size; i++) {
+		hash_list = &table->hash_table[i];
+		J_ASSERT (list_empty(hash_list));
+	}
+
+	kfree(table->hash_table);
+	kmem_cache_free(revoke_table_cache, table);
+	journal->j_revoke = NULL;
+
+	table = journal->j_revoke_table[1];
+	if (!table)
+		return;
+
+	for (i=0; i<table->hash_size; i++) {
+		hash_list = &table->hash_table[i];
+		J_ASSERT (list_empty(hash_list));
+	}
+
+	kfree(table->hash_table);
+	kmem_cache_free(revoke_table_cache, table);
+	journal->j_revoke = NULL;
+}
+
+
+#ifdef __KERNEL__
+
+/* 
+ * journal_revoke: revoke a given buffer_head from the journal.  This
+ * prevents the block from being replayed during recovery if we take a
+ * crash after this current transaction commits.  Any subsequent
+ * metadata writes of the buffer in this transaction cancel the
+ * revoke.  
+ *
+ * Note that this call may block --- it is up to the caller to make
+ * sure that there are no further calls to journal_write_metadata
+ * before the revoke is complete.  In ext3, this implies calling the
+ * revoke before clearing the block bitmap when we are deleting
+ * metadata. 
+ *
+ * Revoke performs a journal_forget on any buffer_head passed in as a
+ * parameter, but does _not_ forget the buffer_head if the bh was only
+ * found implicitly. 
+ *
+ * bh_in may not be a journalled buffer - it may have come off
+ * the hash tables without an attached journal_head.
+ *
+ * If bh_in is non-zero, journal_revoke() will decrement its b_count
+ * by one.
+ */
+
+int journal_revoke(handle_t *handle, unsigned long blocknr, 
+		   struct buffer_head *bh_in)
+{
+	struct buffer_head *bh = NULL;
+	journal_t *journal;
+	struct block_device *bdev;
+	int err;
+
+	might_sleep();
+	if (bh_in)
+		BUFFER_TRACE(bh_in, "enter");
+
+	journal = handle->h_transaction->t_journal;
+	if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
+		J_ASSERT (!"Cannot set revoke feature!");
+		return -EINVAL;
+	}
+
+	bdev = journal->j_fs_dev;
+	bh = bh_in;
+
+	if (!bh) {
+		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
+		if (bh)
+			BUFFER_TRACE(bh, "found on hash");
+	}
+#ifdef JBD_EXPENSIVE_CHECKING
+	else {
+		struct buffer_head *bh2;
+
+		/* If there is a different buffer_head lying around in
+		 * memory anywhere... */
+		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
+		if (bh2) {
+			/* ... and it has RevokeValid status... */
+			if (bh2 != bh && buffer_revokevalid(bh2))
+				/* ...then it better be revoked too,
+				 * since it's illegal to create a revoke
+				 * record against a buffer_head which is
+				 * not marked revoked --- that would
+				 * risk missing a subsequent revoke
+				 * cancel. */
+				J_ASSERT_BH(bh2, buffer_revoked(bh2));
+			put_bh(bh2);
+		}
+	}
+#endif
+
+	/* We really ought not ever to revoke twice in a row without
+           first having the revoke cancelled: it's illegal to free a
+           block twice without allocating it in between! */
+	if (bh) {
+		if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
+				 "inconsistent data on disk")) {
+			if (!bh_in)
+				brelse(bh);
+			return -EIO;
+		}
+		set_buffer_revoked(bh);
+		set_buffer_revokevalid(bh);
+		if (bh_in) {
+			BUFFER_TRACE(bh_in, "call journal_forget");
+			journal_forget(handle, bh_in);
+		} else {
+			BUFFER_TRACE(bh, "call brelse");
+			__brelse(bh);
+		}
+	}
+
+	jbd_debug(2, "insert revoke for block %lu, bh_in=%p\n", blocknr, bh_in);
+	err = insert_revoke_hash(journal, blocknr,
+				handle->h_transaction->t_tid);
+	BUFFER_TRACE(bh_in, "exit");
+	return err;
+}
+
+/*
+ * Cancel an outstanding revoke.  For use only internally by the
+ * journaling code (called from journal_get_write_access).
+ *
+ * We trust buffer_revoked() on the buffer if the buffer is already
+ * being journaled: if there is no revoke pending on the buffer, then we
+ * don't do anything here.
+ *
+ * This would break if it were possible for a buffer to be revoked and
+ * discarded, and then reallocated within the same transaction.  In such
+ * a case we would have lost the revoked bit, but when we arrived here
+ * the second time we would still have a pending revoke to cancel.  So,
+ * do not trust the Revoked bit on buffers unless RevokeValid is also
+ * set.
+ *
+ * The caller must have the journal locked.
+ */
+int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+{
+	struct jbd_revoke_record_s *record;
+	journal_t *journal = handle->h_transaction->t_journal;
+	int need_cancel;
+	int did_revoke = 0;	/* akpm: debug */
+	struct buffer_head *bh = jh2bh(jh);
+
+	jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
+
+	/* Is the existing Revoke bit valid?  If so, we trust it, and
+	 * only perform the full cancel if the revoke bit is set.  If
+	 * not, we can't trust the revoke bit, and we need to do the
+	 * full search for a revoke record. */
+	if (test_set_buffer_revokevalid(bh)) {
+		need_cancel = test_clear_buffer_revoked(bh);
+	} else {
+		need_cancel = 1;
+		clear_buffer_revoked(bh);
+	}
+
+	if (need_cancel) {
+		record = find_revoke_record(journal, bh->b_blocknr);
+		if (record) {
+			jbd_debug(4, "cancelled existing revoke on "
+				  "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
+			spin_lock(&journal->j_revoke_lock);
+			list_del(&record->hash);
+			spin_unlock(&journal->j_revoke_lock);
+			kmem_cache_free(revoke_record_cache, record);
+			did_revoke = 1;
+		}
+	}
+
+#ifdef JBD_EXPENSIVE_CHECKING
+	/* There better not be one left behind by now! */
+	record = find_revoke_record(journal, bh->b_blocknr);
+	J_ASSERT_JH(jh, record == NULL);
+#endif
+
+	/* Finally, have we just cleared revoke on an unhashed
+	 * buffer_head?  If so, we'd better make sure we clear the
+	 * revoked status on any hashed alias too, otherwise the revoke
+	 * state machine will get very upset later on. */
+	if (need_cancel) {
+		struct buffer_head *bh2;
+		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
+		if (bh2) {
+			if (bh2 != bh)
+				clear_buffer_revoked(bh2);
+			__brelse(bh2);
+		}
+	}
+	return did_revoke;
+}
+
+/* journal_switch_revoke table select j_revoke for next transaction
+ * we do not want to suspend any processing until all revokes are
+ * written -bzzz
+ */
+void journal_switch_revoke_table(journal_t *journal)
+{
+	int i;
+
+	if (journal->j_revoke == journal->j_revoke_table[0])
+		journal->j_revoke = journal->j_revoke_table[1];
+	else
+		journal->j_revoke = journal->j_revoke_table[0];
+
+	for (i = 0; i < journal->j_revoke->hash_size; i++) 
+		INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
+}
+
+/*
+ * Write revoke records to the journal for all entries in the current
+ * revoke hash, deleting the entries as we go.
+ *
+ * Called with the journal lock held.
+ */
+
+void journal_write_revoke_records(journal_t *journal, 
+				  transaction_t *transaction)
+{
+	struct journal_head *descriptor;
+	struct jbd_revoke_record_s *record;
+	struct jbd_revoke_table_s *revoke;
+	struct list_head *hash_list;
+	int i, offset, count;
+
+	descriptor = NULL; 
+	offset = 0;
+	count = 0;
+
+	/* select revoke table for committing transaction */
+	revoke = journal->j_revoke == journal->j_revoke_table[0] ?
+		journal->j_revoke_table[1] : journal->j_revoke_table[0];
+
+	for (i = 0; i < revoke->hash_size; i++) {
+		hash_list = &revoke->hash_table[i];
+
+		while (!list_empty(hash_list)) {
+			record = (struct jbd_revoke_record_s *) 
+				hash_list->next;
+			write_one_revoke_record(journal, transaction,
+						&descriptor, &offset, 
+						record);
+			count++;
+			list_del(&record->hash);
+			kmem_cache_free(revoke_record_cache, record);
+		}
+	}
+	if (descriptor)
+		flush_descriptor(journal, descriptor, offset);
+	jbd_debug(1, "Wrote %d revoke records\n", count);
+}
+
+/* 
+ * Write out one revoke record.  We need to create a new descriptor
+ * block if the old one is full or if we have not already created one.  
+ */
+
+static void write_one_revoke_record(journal_t *journal, 
+				    transaction_t *transaction,
+				    struct journal_head **descriptorp, 
+				    int *offsetp,
+				    struct jbd_revoke_record_s *record)
+{
+	struct journal_head *descriptor;
+	int offset;
+	journal_header_t *header;
+
+	/* If we are already aborting, this all becomes a noop.  We
+           still need to go round the loop in
+           journal_write_revoke_records in order to free all of the
+           revoke records: only the IO to the journal is omitted. */
+	if (is_journal_aborted(journal))
+		return;
+
+	descriptor = *descriptorp;
+	offset = *offsetp;
+
+	/* Make sure we have a descriptor with space left for the record */
+	if (descriptor) {
+		if (offset == journal->j_blocksize) {
+			flush_descriptor(journal, descriptor, offset);
+			descriptor = NULL;
+		}
+	}
+
+	if (!descriptor) {
+		descriptor = journal_get_descriptor_buffer(journal);
+		if (!descriptor)
+			return;
+		header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
+		header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
+		header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
+		header->h_sequence  = cpu_to_be32(transaction->t_tid);
+
+		/* Record it so that we can wait for IO completion later */
+		JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
+		journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+
+		offset = sizeof(journal_revoke_header_t);
+		*descriptorp = descriptor;
+	}
+
+	* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = 
+		cpu_to_be32(record->blocknr);
+	offset += 4;
+	*offsetp = offset;
+}
+
+/* 
+ * Flush a revoke descriptor out to the journal.  If we are aborting,
+ * this is a noop; otherwise we are generating a buffer which needs to
+ * be waited for during commit, so it has to go onto the appropriate
+ * journal buffer list.
+ */
+
+static void flush_descriptor(journal_t *journal, 
+			     struct journal_head *descriptor, 
+			     int offset)
+{
+	journal_revoke_header_t *header;
+	struct buffer_head *bh = jh2bh(descriptor);
+
+	if (is_journal_aborted(journal)) {
+		put_bh(bh);
+		return;
+	}
+
+	header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+	header->r_count = cpu_to_be32(offset);
+	set_buffer_jwrite(bh);
+	BUFFER_TRACE(bh, "write");
+	set_buffer_dirty(bh);
+	ll_rw_block(WRITE, 1, &bh);
+}
+#endif
+
+/* 
+ * Revoke support for recovery.
+ *
+ * Recovery needs to be able to:
+ *
+ *  record all revoke records, including the tid of the latest instance
+ *  of each revoke in the journal
+ *
+ *  check whether a given block in a given transaction should be replayed
+ *  (ie. has not been revoked by a revoke record in that or a subsequent
+ *  transaction)
+ * 
+ *  empty the revoke table after recovery.
+ */
+
+/*
+ * First, setting revoke records.  We create a new revoke record for
+ * every block ever revoked in the log as we scan it for recovery, and
+ * we update the existing records if we find multiple revokes for a
+ * single block. 
+ */
+
+int journal_set_revoke(journal_t *journal, 
+		       unsigned long blocknr, 
+		       tid_t sequence)
+{
+	struct jbd_revoke_record_s *record;
+
+	record = find_revoke_record(journal, blocknr);
+	if (record) {
+		/* If we have multiple occurrences, only record the
+		 * latest sequence number in the hashed record */
+		if (tid_gt(sequence, record->sequence))
+			record->sequence = sequence;
+		return 0;
+	} 
+	return insert_revoke_hash(journal, blocknr, sequence);
+}
+
+/* 
+ * Test revoke records.  For a given block referenced in the log, has
+ * that block been revoked?  A revoke record with a given transaction
+ * sequence number revokes all blocks in that transaction and earlier
+ * ones, but later transactions still need replayed.
+ */
+
+int journal_test_revoke(journal_t *journal, 
+			unsigned long blocknr,
+			tid_t sequence)
+{
+	struct jbd_revoke_record_s *record;
+
+	record = find_revoke_record(journal, blocknr);
+	if (!record)
+		return 0;
+	if (tid_gt(sequence, record->sequence))
+		return 0;
+	return 1;
+}
+
+/*
+ * Finally, once recovery is over, we need to clear the revoke table so
+ * that it can be reused by the running filesystem.
+ */
+
+void journal_clear_revoke(journal_t *journal)
+{
+	int i;
+	struct list_head *hash_list;
+	struct jbd_revoke_record_s *record;
+	struct jbd_revoke_table_s *revoke;
+
+	revoke = journal->j_revoke;
+
+	for (i = 0; i < revoke->hash_size; i++) {
+		hash_list = &revoke->hash_table[i];
+		while (!list_empty(hash_list)) {
+			record = (struct jbd_revoke_record_s*) hash_list->next;
+			list_del(&record->hash);
+			kmem_cache_free(revoke_record_cache, record);
+		}
+	}
+}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
new file mode 100644
index 0000000..932e7c1
--- /dev/null
+++ b/fs/jbd/transaction.c
@@ -0,0 +1,2062 @@
+/*
+ * linux/fs/transaction.c
+ * 
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Generic filesystem transaction handling code; part of the ext2fs
+ * journaling system.  
+ *
+ * This file manages transactions (compound commits managed by the
+ * journaling code) and handles (individual atomic operations by the
+ * filesystem).
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+/*
+ * get_transaction: obtain a new transaction_t object.
+ *
+ * Simply allocate and initialise a new transaction.  Create it in
+ * RUNNING state and add it to the current journal (which should not
+ * have an existing running transaction: we only make a new transaction
+ * once we have started to commit the old one).
+ *
+ * Preconditions:
+ *	The journal MUST be locked.  We don't perform atomic mallocs on the
+ *	new transaction	and we can't block without protecting against other
+ *	processes trying to touch the journal while it is in transition.
+ *
+ * Called under j_state_lock
+ */
+
+static transaction_t *
+get_transaction(journal_t *journal, transaction_t *transaction)
+{
+	transaction->t_journal = journal;
+	transaction->t_state = T_RUNNING;
+	transaction->t_tid = journal->j_transaction_sequence++;
+	transaction->t_expires = jiffies + journal->j_commit_interval;
+	spin_lock_init(&transaction->t_handle_lock);
+
+	/* Set up the commit timer for the new transaction. */
+	journal->j_commit_timer->expires = transaction->t_expires;
+	add_timer(journal->j_commit_timer);
+
+	J_ASSERT(journal->j_running_transaction == NULL);
+	journal->j_running_transaction = transaction;
+
+	return transaction;
+}
+
+/*
+ * Handle management.
+ *
+ * A handle_t is an object which represents a single atomic update to a
+ * filesystem, and which tracks all of the modifications which form part
+ * of that one update.
+ */
+
+/*
+ * start_this_handle: Given a handle, deal with any locking or stalling
+ * needed to make sure that there is enough journal space for the handle
+ * to begin.  Attach the handle to a transaction and set up the
+ * transaction's buffer credits.  
+ */
+
+static int start_this_handle(journal_t *journal, handle_t *handle)
+{
+	transaction_t *transaction;
+	int needed;
+	int nblocks = handle->h_buffer_credits;
+	transaction_t *new_transaction = NULL;
+	int ret = 0;
+
+	if (nblocks > journal->j_max_transaction_buffers) {
+		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
+		       current->comm, nblocks,
+		       journal->j_max_transaction_buffers);
+		ret = -ENOSPC;
+		goto out;
+	}
+
+alloc_transaction:
+	if (!journal->j_running_transaction) {
+		new_transaction = jbd_kmalloc(sizeof(*new_transaction),
+						GFP_NOFS);
+		if (!new_transaction) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		memset(new_transaction, 0, sizeof(*new_transaction));
+	}
+
+	jbd_debug(3, "New handle %p going live.\n", handle);
+
+repeat:
+
+	/*
+	 * We need to hold j_state_lock until t_updates has been incremented,
+	 * for proper journal barrier handling
+	 */
+	spin_lock(&journal->j_state_lock);
+repeat_locked:
+	if (is_journal_aborted(journal) ||
+	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
+		spin_unlock(&journal->j_state_lock);
+		ret = -EROFS; 
+		goto out;
+	}
+
+	/* Wait on the journal's transaction barrier if necessary */
+	if (journal->j_barrier_count) {
+		spin_unlock(&journal->j_state_lock);
+		wait_event(journal->j_wait_transaction_locked,
+				journal->j_barrier_count == 0);
+		goto repeat;
+	}
+
+	if (!journal->j_running_transaction) {
+		if (!new_transaction) {
+			spin_unlock(&journal->j_state_lock);
+			goto alloc_transaction;
+		}
+		get_transaction(journal, new_transaction);
+		new_transaction = NULL;
+	}
+
+	transaction = journal->j_running_transaction;
+
+	/*
+	 * If the current transaction is locked down for commit, wait for the
+	 * lock to be released.
+	 */
+	if (transaction->t_state == T_LOCKED) {
+		DEFINE_WAIT(wait);
+
+		prepare_to_wait(&journal->j_wait_transaction_locked,
+					&wait, TASK_UNINTERRUPTIBLE);
+		spin_unlock(&journal->j_state_lock);
+		schedule();
+		finish_wait(&journal->j_wait_transaction_locked, &wait);
+		goto repeat;
+	}
+
+	/*
+	 * If there is not enough space left in the log to write all potential
+	 * buffers requested by this operation, we need to stall pending a log
+	 * checkpoint to free some more log space.
+	 */
+	spin_lock(&transaction->t_handle_lock);
+	needed = transaction->t_outstanding_credits + nblocks;
+
+	if (needed > journal->j_max_transaction_buffers) {
+		/*
+		 * If the current transaction is already too large, then start
+		 * to commit it: we can then go back and attach this handle to
+		 * a new transaction.
+		 */
+		DEFINE_WAIT(wait);
+
+		jbd_debug(2, "Handle %p starting new commit...\n", handle);
+		spin_unlock(&transaction->t_handle_lock);
+		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+				TASK_UNINTERRUPTIBLE);
+		__log_start_commit(journal, transaction->t_tid);
+		spin_unlock(&journal->j_state_lock);
+		schedule();
+		finish_wait(&journal->j_wait_transaction_locked, &wait);
+		goto repeat;
+	}
+
+	/* 
+	 * The commit code assumes that it can get enough log space
+	 * without forcing a checkpoint.  This is *critical* for
+	 * correctness: a checkpoint of a buffer which is also
+	 * associated with a committing transaction creates a deadlock,
+	 * so commit simply cannot force through checkpoints.
+	 *
+	 * We must therefore ensure the necessary space in the journal
+	 * *before* starting to dirty potentially checkpointed buffers
+	 * in the new transaction. 
+	 *
+	 * The worst part is, any transaction currently committing can
+	 * reduce the free space arbitrarily.  Be careful to account for
+	 * those buffers when checkpointing.
+	 */
+
+	/*
+	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
+	 * a _lot_ of headroom: 1/4 of the journal plus the size of
+	 * the committing transaction.  Really, we only need to give it
+	 * committing_transaction->t_outstanding_credits plus "enough" for
+	 * the log control blocks.
+	 * Also, this test is inconsitent with the matching one in
+	 * journal_extend().
+	 */
+	if (__log_space_left(journal) < jbd_space_needed(journal)) {
+		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
+		spin_unlock(&transaction->t_handle_lock);
+		__log_wait_for_space(journal);
+		goto repeat_locked;
+	}
+
+	/* OK, account for the buffers that this operation expects to
+	 * use and add the handle to the running transaction. */
+
+	handle->h_transaction = transaction;
+	transaction->t_outstanding_credits += nblocks;
+	transaction->t_updates++;
+	transaction->t_handle_count++;
+	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
+		  handle, nblocks, transaction->t_outstanding_credits,
+		  __log_space_left(journal));
+	spin_unlock(&transaction->t_handle_lock);
+	spin_unlock(&journal->j_state_lock);
+out:
+	if (new_transaction)
+		kfree(new_transaction);
+	return ret;
+}
+
+/* Allocate a new handle.  This should probably be in a slab... */
+static handle_t *new_handle(int nblocks)
+{
+	handle_t *handle = jbd_alloc_handle(GFP_NOFS);
+	if (!handle)
+		return NULL;
+	memset(handle, 0, sizeof(*handle));
+	handle->h_buffer_credits = nblocks;
+	handle->h_ref = 1;
+
+	return handle;
+}
+
+/**
+ * handle_t *journal_start() - Obtain a new handle.  
+ * @journal: Journal to start transaction on.
+ * @nblocks: number of block buffer we might modify
+ *
+ * We make sure that the transaction can guarantee at least nblocks of
+ * modified buffers in the log.  We block until the log can guarantee
+ * that much space.  
+ *
+ * This function is visible to journal users (like ext3fs), so is not
+ * called with the journal already locked.
+ *
+ * Return a pointer to a newly allocated handle, or NULL on failure
+ */
+handle_t *journal_start(journal_t *journal, int nblocks)
+{
+	handle_t *handle = journal_current_handle();
+	int err;
+
+	if (!journal)
+		return ERR_PTR(-EROFS);
+
+	if (handle) {
+		J_ASSERT(handle->h_transaction->t_journal == journal);
+		handle->h_ref++;
+		return handle;
+	}
+
+	handle = new_handle(nblocks);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+
+	current->journal_info = handle;
+
+	err = start_this_handle(journal, handle);
+	if (err < 0) {
+		jbd_free_handle(handle);
+		current->journal_info = NULL;
+		handle = ERR_PTR(err);
+	}
+	return handle;
+}
+
+/**
+ * int journal_extend() - extend buffer credits.
+ * @handle:  handle to 'extend'
+ * @nblocks: nr blocks to try to extend by.
+ * 
+ * Some transactions, such as large extends and truncates, can be done
+ * atomically all at once or in several stages.  The operation requests
+ * a credit for a number of buffer modications in advance, but can
+ * extend its credit if it needs more.  
+ *
+ * journal_extend tries to give the running handle more buffer credits.
+ * It does not guarantee that allocation - this is a best-effort only.
+ * The calling process MUST be able to deal cleanly with a failure to
+ * extend here.
+ *
+ * Return 0 on success, non-zero on failure.
+ *
+ * return code < 0 implies an error
+ * return code > 0 implies normal transaction-full status.
+ */
+int journal_extend(handle_t *handle, int nblocks)
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	int result;
+	int wanted;
+
+	result = -EIO;
+	if (is_handle_aborted(handle))
+		goto out;
+
+	result = 1;
+
+	spin_lock(&journal->j_state_lock);
+
+	/* Don't extend a locked-down transaction! */
+	if (handle->h_transaction->t_state != T_RUNNING) {
+		jbd_debug(3, "denied handle %p %d blocks: "
+			  "transaction not running\n", handle, nblocks);
+		goto error_out;
+	}
+
+	spin_lock(&transaction->t_handle_lock);
+	wanted = transaction->t_outstanding_credits + nblocks;
+
+	if (wanted > journal->j_max_transaction_buffers) {
+		jbd_debug(3, "denied handle %p %d blocks: "
+			  "transaction too large\n", handle, nblocks);
+		goto unlock;
+	}
+
+	if (wanted > __log_space_left(journal)) {
+		jbd_debug(3, "denied handle %p %d blocks: "
+			  "insufficient log space\n", handle, nblocks);
+		goto unlock;
+	}
+
+	handle->h_buffer_credits += nblocks;
+	transaction->t_outstanding_credits += nblocks;
+	result = 0;
+
+	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
+unlock:
+	spin_unlock(&transaction->t_handle_lock);
+error_out:
+	spin_unlock(&journal->j_state_lock);
+out:
+	return result;
+}
+
+
+/**
+ * int journal_restart() - restart a handle .
+ * @handle:  handle to restart
+ * @nblocks: nr credits requested
+ * 
+ * Restart a handle for a multi-transaction filesystem
+ * operation.
+ *
+ * If the journal_extend() call above fails to grant new buffer credits
+ * to a running handle, a call to journal_restart will commit the
+ * handle's transaction so far and reattach the handle to a new
+ * transaction capabable of guaranteeing the requested number of
+ * credits.
+ */
+
+int journal_restart(handle_t *handle, int nblocks)
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	int ret;
+
+	/* If we've had an abort of any type, don't even think about
+	 * actually doing the restart! */
+	if (is_handle_aborted(handle))
+		return 0;
+
+	/*
+	 * First unlink the handle from its current transaction, and start the
+	 * commit on that.
+	 */
+	J_ASSERT(transaction->t_updates > 0);
+	J_ASSERT(journal_current_handle() == handle);
+
+	spin_lock(&journal->j_state_lock);
+	spin_lock(&transaction->t_handle_lock);
+	transaction->t_outstanding_credits -= handle->h_buffer_credits;
+	transaction->t_updates--;
+
+	if (!transaction->t_updates)
+		wake_up(&journal->j_wait_updates);
+	spin_unlock(&transaction->t_handle_lock);
+
+	jbd_debug(2, "restarting handle %p\n", handle);
+	__log_start_commit(journal, transaction->t_tid);
+	spin_unlock(&journal->j_state_lock);
+
+	handle->h_buffer_credits = nblocks;
+	ret = start_this_handle(journal, handle);
+	return ret;
+}
+
+
+/**
+ * void journal_lock_updates () - establish a transaction barrier.
+ * @journal:  Journal to establish a barrier on.
+ *
+ * This locks out any further updates from being started, and blocks
+ * until all existing updates have completed, returning only once the
+ * journal is in a quiescent state with no updates running.
+ *
+ * The journal lock should not be held on entry.
+ */
+void journal_lock_updates(journal_t *journal)
+{
+	DEFINE_WAIT(wait);
+
+	spin_lock(&journal->j_state_lock);
+	++journal->j_barrier_count;
+
+	/* Wait until there are no running updates */
+	while (1) {
+		transaction_t *transaction = journal->j_running_transaction;
+
+		if (!transaction)
+			break;
+
+		spin_lock(&transaction->t_handle_lock);
+		if (!transaction->t_updates) {
+			spin_unlock(&transaction->t_handle_lock);
+			break;
+		}
+		prepare_to_wait(&journal->j_wait_updates, &wait,
+				TASK_UNINTERRUPTIBLE);
+		spin_unlock(&transaction->t_handle_lock);
+		spin_unlock(&journal->j_state_lock);
+		schedule();
+		finish_wait(&journal->j_wait_updates, &wait);
+		spin_lock(&journal->j_state_lock);
+	}
+	spin_unlock(&journal->j_state_lock);
+
+	/*
+	 * We have now established a barrier against other normal updates, but
+	 * we also need to barrier against other journal_lock_updates() calls
+	 * to make sure that we serialise special journal-locked operations
+	 * too.
+	 */
+	down(&journal->j_barrier);
+}
+
+/**
+ * void journal_unlock_updates (journal_t* journal) - release barrier
+ * @journal:  Journal to release the barrier on.
+ * 
+ * Release a transaction barrier obtained with journal_lock_updates().
+ *
+ * Should be called without the journal lock held.
+ */
+void journal_unlock_updates (journal_t *journal)
+{
+	J_ASSERT(journal->j_barrier_count != 0);
+
+	up(&journal->j_barrier);
+	spin_lock(&journal->j_state_lock);
+	--journal->j_barrier_count;
+	spin_unlock(&journal->j_state_lock);
+	wake_up(&journal->j_wait_transaction_locked);
+}
+
+/*
+ * Report any unexpected dirty buffers which turn up.  Normally those
+ * indicate an error, but they can occur if the user is running (say)
+ * tune2fs to modify the live filesystem, so we need the option of
+ * continuing as gracefully as possible.  #
+ *
+ * The caller should already hold the journal lock and
+ * j_list_lock spinlock: most callers will need those anyway
+ * in order to probe the buffer's journaling state safely.
+ */
+static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+{
+	struct buffer_head *bh = jh2bh(jh);
+	int jlist;
+
+	if (buffer_dirty(bh)) {
+		/* If this buffer is one which might reasonably be dirty
+		 * --- ie. data, or not part of this journal --- then
+		 * we're OK to leave it alone, but otherwise we need to
+		 * move the dirty bit to the journal's own internal
+		 * JBDDirty bit. */
+		jlist = jh->b_jlist;
+
+		if (jlist == BJ_Metadata || jlist == BJ_Reserved || 
+		    jlist == BJ_Shadow || jlist == BJ_Forget) {
+			if (test_clear_buffer_dirty(jh2bh(jh))) {
+				set_bit(BH_JBDDirty, &jh2bh(jh)->b_state);
+			}
+		}
+	}
+}
+
+/*
+ * If the buffer is already part of the current transaction, then there
+ * is nothing we need to do.  If it is already part of a prior
+ * transaction which we are still committing to disk, then we need to
+ * make sure that we do not overwrite the old copy: we do copy-out to
+ * preserve the copy going to disk.  We also account the buffer against
+ * the handle's metadata buffer credits (unless the buffer is already
+ * part of the transaction, that is).
+ *
+ */
+static int
+do_get_write_access(handle_t *handle, struct journal_head *jh,
+			int force_copy)
+{
+	struct buffer_head *bh;
+	transaction_t *transaction;
+	journal_t *journal;
+	int error;
+	char *frozen_buffer = NULL;
+	int need_copy = 0;
+
+	if (is_handle_aborted(handle))
+		return -EROFS;
+
+	transaction = handle->h_transaction;
+	journal = transaction->t_journal;
+
+	jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+
+	JBUFFER_TRACE(jh, "entry");
+repeat:
+	bh = jh2bh(jh);
+
+	/* @@@ Need to check for errors here at some point. */
+
+	lock_buffer(bh);
+	jbd_lock_bh_state(bh);
+
+	/* We now hold the buffer lock so it is safe to query the buffer
+	 * state.  Is the buffer dirty? 
+	 * 
+	 * If so, there are two possibilities.  The buffer may be
+	 * non-journaled, and undergoing a quite legitimate writeback.
+	 * Otherwise, it is journaled, and we don't expect dirty buffers
+	 * in that state (the buffers should be marked JBD_Dirty
+	 * instead.)  So either the IO is being done under our own
+	 * control and this is a bug, or it's a third party IO such as
+	 * dump(8) (which may leave the buffer scheduled for read ---
+	 * ie. locked but not dirty) or tune2fs (which may actually have
+	 * the buffer dirtied, ugh.)  */
+
+	if (buffer_dirty(bh)) {
+		/*
+		 * First question: is this buffer already part of the current
+		 * transaction or the existing committing transaction?
+		 */
+		if (jh->b_transaction) {
+			J_ASSERT_JH(jh,
+				jh->b_transaction == transaction || 
+				jh->b_transaction ==
+					journal->j_committing_transaction);
+			if (jh->b_next_transaction)
+				J_ASSERT_JH(jh, jh->b_next_transaction ==
+							transaction);
+			JBUFFER_TRACE(jh, "Unexpected dirty buffer");
+			jbd_unexpected_dirty_buffer(jh);
+ 		}
+ 	}
+
+	unlock_buffer(bh);
+
+	error = -EROFS;
+	if (is_handle_aborted(handle)) {
+		jbd_unlock_bh_state(bh);
+		goto out;
+	}
+	error = 0;
+
+	/*
+	 * The buffer is already part of this transaction if b_transaction or
+	 * b_next_transaction points to it
+	 */
+	if (jh->b_transaction == transaction ||
+	    jh->b_next_transaction == transaction)
+		goto done;
+
+	/*
+	 * If there is already a copy-out version of this buffer, then we don't
+	 * need to make another one
+	 */
+	if (jh->b_frozen_data) {
+		JBUFFER_TRACE(jh, "has frozen data");
+		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+		jh->b_next_transaction = transaction;
+		goto done;
+	}
+
+	/* Is there data here we need to preserve? */
+
+	if (jh->b_transaction && jh->b_transaction != transaction) {
+		JBUFFER_TRACE(jh, "owned by older transaction");
+		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+		J_ASSERT_JH(jh, jh->b_transaction ==
+					journal->j_committing_transaction);
+
+		/* There is one case we have to be very careful about.
+		 * If the committing transaction is currently writing
+		 * this buffer out to disk and has NOT made a copy-out,
+		 * then we cannot modify the buffer contents at all
+		 * right now.  The essence of copy-out is that it is the
+		 * extra copy, not the primary copy, which gets
+		 * journaled.  If the primary copy is already going to
+		 * disk then we cannot do copy-out here. */
+
+		if (jh->b_jlist == BJ_Shadow) {
+			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
+			wait_queue_head_t *wqh;
+
+			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
+
+			JBUFFER_TRACE(jh, "on shadow: sleep");
+			jbd_unlock_bh_state(bh);
+			/* commit wakes up all shadow buffers after IO */
+			for ( ; ; ) {
+				prepare_to_wait(wqh, &wait.wait,
+						TASK_UNINTERRUPTIBLE);
+				if (jh->b_jlist != BJ_Shadow)
+					break;
+				schedule();
+			}
+			finish_wait(wqh, &wait.wait);
+			goto repeat;
+		}
+
+		/* Only do the copy if the currently-owning transaction
+		 * still needs it.  If it is on the Forget list, the
+		 * committing transaction is past that stage.  The
+		 * buffer had better remain locked during the kmalloc,
+		 * but that should be true --- we hold the journal lock
+		 * still and the buffer is already on the BUF_JOURNAL
+		 * list so won't be flushed. 
+		 *
+		 * Subtle point, though: if this is a get_undo_access,
+		 * then we will be relying on the frozen_data to contain
+		 * the new value of the committed_data record after the
+		 * transaction, so we HAVE to force the frozen_data copy
+		 * in that case. */
+
+		if (jh->b_jlist != BJ_Forget || force_copy) {
+			JBUFFER_TRACE(jh, "generate frozen data");
+			if (!frozen_buffer) {
+				JBUFFER_TRACE(jh, "allocate memory for buffer");
+				jbd_unlock_bh_state(bh);
+				frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size,
+							    GFP_NOFS);
+				if (!frozen_buffer) {
+					printk(KERN_EMERG
+					       "%s: OOM for frozen_buffer\n",
+					       __FUNCTION__);
+					JBUFFER_TRACE(jh, "oom!");
+					error = -ENOMEM;
+					jbd_lock_bh_state(bh);
+					goto done;
+				}
+				goto repeat;
+			}
+			jh->b_frozen_data = frozen_buffer;
+			frozen_buffer = NULL;
+			need_copy = 1;
+		}
+		jh->b_next_transaction = transaction;
+	}
+
+
+	/*
+	 * Finally, if the buffer is not journaled right now, we need to make
+	 * sure it doesn't get written to disk before the caller actually
+	 * commits the new data
+	 */
+	if (!jh->b_transaction) {
+		JBUFFER_TRACE(jh, "no transaction");
+		J_ASSERT_JH(jh, !jh->b_next_transaction);
+		jh->b_transaction = transaction;
+		JBUFFER_TRACE(jh, "file as BJ_Reserved");
+		spin_lock(&journal->j_list_lock);
+		__journal_file_buffer(jh, transaction, BJ_Reserved);
+		spin_unlock(&journal->j_list_lock);
+	}
+
+done:
+	if (need_copy) {
+		struct page *page;
+		int offset;
+		char *source;
+
+		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
+			    "Possible IO failure.\n");
+		page = jh2bh(jh)->b_page;
+		offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+		source = kmap_atomic(page, KM_USER0);
+		memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
+		kunmap_atomic(source, KM_USER0);
+	}
+	jbd_unlock_bh_state(bh);
+
+	/*
+	 * If we are about to journal a buffer, then any revoke pending on it is
+	 * no longer valid
+	 */
+	journal_cancel_revoke(handle, jh);
+
+out:
+	if (frozen_buffer)
+		kfree(frozen_buffer);
+
+	JBUFFER_TRACE(jh, "exit");
+	return error;
+}
+
+/**
+ * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
+ * @handle: transaction to add buffer modifications to
+ * @bh:     bh to be used for metadata writes
+ * @credits: variable that will receive credits for the buffer
+ *
+ * Returns an error code or 0 on success.
+ *
+ * In full data journalling mode the buffer may be of type BJ_AsyncData,
+ * because we're write()ing a buffer which is also part of a shared mapping.
+ */
+
+int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+{
+	struct journal_head *jh = journal_add_journal_head(bh);
+	int rc;
+
+	/* We do not want to get caught playing with fields which the
+	 * log thread also manipulates.  Make sure that the buffer
+	 * completes any outstanding IO before proceeding. */
+	rc = do_get_write_access(handle, jh, 0);
+	journal_put_journal_head(jh);
+	return rc;
+}
+
+
+/*
+ * When the user wants to journal a newly created buffer_head
+ * (ie. getblk() returned a new buffer and we are going to populate it
+ * manually rather than reading off disk), then we need to keep the
+ * buffer_head locked until it has been completely filled with new
+ * data.  In this case, we should be able to make the assertion that
+ * the bh is not already part of an existing transaction.  
+ * 
+ * The buffer should already be locked by the caller by this point.
+ * There is no lock ranking violation: it was a newly created,
+ * unlocked buffer beforehand. */
+
+/**
+ * int journal_get_create_access () - notify intent to use newly created bh
+ * @handle: transaction to new buffer to
+ * @bh: new buffer.
+ *
+ * Call this if you create a new bh.
+ */
+int journal_get_create_access(handle_t *handle, struct buffer_head *bh) 
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	struct journal_head *jh = journal_add_journal_head(bh);
+	int err;
+
+	jbd_debug(5, "journal_head %p\n", jh);
+	err = -EROFS;
+	if (is_handle_aborted(handle))
+		goto out;
+	err = 0;
+
+	JBUFFER_TRACE(jh, "entry");
+	/*
+	 * The buffer may already belong to this transaction due to pre-zeroing
+	 * in the filesystem's new_block code.  It may also be on the previous,
+	 * committing transaction's lists, but it HAS to be in Forget state in
+	 * that case: the transaction must have deleted the buffer for it to be
+	 * reused here.
+	 */
+	jbd_lock_bh_state(bh);
+	spin_lock(&journal->j_list_lock);
+	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
+		jh->b_transaction == NULL ||
+		(jh->b_transaction == journal->j_committing_transaction &&
+			  jh->b_jlist == BJ_Forget)));
+
+	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
+
+	if (jh->b_transaction == NULL) {
+		jh->b_transaction = transaction;
+		JBUFFER_TRACE(jh, "file as BJ_Reserved");
+		__journal_file_buffer(jh, transaction, BJ_Reserved);
+	} else if (jh->b_transaction == journal->j_committing_transaction) {
+		JBUFFER_TRACE(jh, "set next transaction");
+		jh->b_next_transaction = transaction;
+	}
+	spin_unlock(&journal->j_list_lock);
+	jbd_unlock_bh_state(bh);
+
+	/*
+	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
+	 * blocks which contain freed but then revoked metadata.  We need
+	 * to cancel the revoke in case we end up freeing it yet again
+	 * and the reallocating as data - this would cause a second revoke,
+	 * which hits an assertion error.
+	 */
+	JBUFFER_TRACE(jh, "cancelling revoke");
+	journal_cancel_revoke(handle, jh);
+	journal_put_journal_head(jh);
+out:
+	return err;
+}
+
+/**
+ * int journal_get_undo_access() -  Notify intent to modify metadata with
+ *     non-rewindable consequences
+ * @handle: transaction
+ * @bh: buffer to undo
+ * @credits: store the number of taken credits here (if not NULL)
+ *
+ * Sometimes there is a need to distinguish between metadata which has
+ * been committed to disk and that which has not.  The ext3fs code uses
+ * this for freeing and allocating space, we have to make sure that we
+ * do not reuse freed space until the deallocation has been committed,
+ * since if we overwrote that space we would make the delete
+ * un-rewindable in case of a crash.
+ * 
+ * To deal with that, journal_get_undo_access requests write access to a
+ * buffer for parts of non-rewindable operations such as delete
+ * operations on the bitmaps.  The journaling code must keep a copy of
+ * the buffer's contents prior to the undo_access call until such time
+ * as we know that the buffer has definitely been committed to disk.
+ * 
+ * We never need to know which transaction the committed data is part
+ * of, buffers touched here are guaranteed to be dirtied later and so
+ * will be committed to a new transaction in due course, at which point
+ * we can discard the old committed data pointer.
+ *
+ * Returns error number or 0 on success.
+ */
+int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+{
+	int err;
+	struct journal_head *jh = journal_add_journal_head(bh);
+	char *committed_data = NULL;
+
+	JBUFFER_TRACE(jh, "entry");
+
+	/*
+	 * Do this first --- it can drop the journal lock, so we want to
+	 * make sure that obtaining the committed_data is done
+	 * atomically wrt. completion of any outstanding commits.
+	 */
+	err = do_get_write_access(handle, jh, 1);
+	if (err)
+		goto out;
+
+repeat:
+	if (!jh->b_committed_data) {
+		committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS);
+		if (!committed_data) {
+			printk(KERN_EMERG "%s: No memory for committed data\n",
+				__FUNCTION__);
+			err = -ENOMEM;
+			goto out;
+		}
+	}
+
+	jbd_lock_bh_state(bh);
+	if (!jh->b_committed_data) {
+		/* Copy out the current buffer contents into the
+		 * preserved, committed copy. */
+		JBUFFER_TRACE(jh, "generate b_committed data");
+		if (!committed_data) {
+			jbd_unlock_bh_state(bh);
+			goto repeat;
+		}
+
+		jh->b_committed_data = committed_data;
+		committed_data = NULL;
+		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
+	}
+	jbd_unlock_bh_state(bh);
+out:
+	journal_put_journal_head(jh);
+	if (committed_data)
+		kfree(committed_data);
+	return err;
+}
+
+/** 
+ * int journal_dirty_data() -  mark a buffer as containing dirty data which
+ *                             needs to be flushed before we can commit the
+ *                             current transaction.  
+ * @handle: transaction
+ * @bh: bufferhead to mark
+ * 
+ * The buffer is placed on the transaction's data list and is marked as
+ * belonging to the transaction.
+ *
+ * Returns error number or 0 on success.
+ *
+ * journal_dirty_data() can be called via page_launder->ext3_writepage
+ * by kswapd.
+ */
+int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+{
+	journal_t *journal = handle->h_transaction->t_journal;
+	int need_brelse = 0;
+	struct journal_head *jh;
+
+	if (is_handle_aborted(handle))
+		return 0;
+
+	jh = journal_add_journal_head(bh);
+	JBUFFER_TRACE(jh, "entry");
+
+	/*
+	 * The buffer could *already* be dirty.  Writeout can start
+	 * at any time.
+	 */
+	jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
+
+	/*
+	 * What if the buffer is already part of a running transaction?
+	 * 
+	 * There are two cases:
+	 * 1) It is part of the current running transaction.  Refile it,
+	 *    just in case we have allocated it as metadata, deallocated
+	 *    it, then reallocated it as data. 
+	 * 2) It is part of the previous, still-committing transaction.
+	 *    If all we want to do is to guarantee that the buffer will be
+	 *    written to disk before this new transaction commits, then
+	 *    being sure that the *previous* transaction has this same 
+	 *    property is sufficient for us!  Just leave it on its old
+	 *    transaction.
+	 *
+	 * In case (2), the buffer must not already exist as metadata
+	 * --- that would violate write ordering (a transaction is free
+	 * to write its data at any point, even before the previous
+	 * committing transaction has committed).  The caller must
+	 * never, ever allow this to happen: there's nothing we can do
+	 * about it in this layer.
+	 */
+	jbd_lock_bh_state(bh);
+	spin_lock(&journal->j_list_lock);
+	if (jh->b_transaction) {
+		JBUFFER_TRACE(jh, "has transaction");
+		if (jh->b_transaction != handle->h_transaction) {
+			JBUFFER_TRACE(jh, "belongs to older transaction");
+			J_ASSERT_JH(jh, jh->b_transaction ==
+					journal->j_committing_transaction);
+
+			/* @@@ IS THIS TRUE  ? */
+			/*
+			 * Not any more.  Scenario: someone does a write()
+			 * in data=journal mode.  The buffer's transaction has
+			 * moved into commit.  Then someone does another
+			 * write() to the file.  We do the frozen data copyout
+			 * and set b_next_transaction to point to j_running_t.
+			 * And while we're in that state, someone does a
+			 * writepage() in an attempt to pageout the same area
+			 * of the file via a shared mapping.  At present that
+			 * calls journal_dirty_data(), and we get right here.
+			 * It may be too late to journal the data.  Simply
+			 * falling through to the next test will suffice: the
+			 * data will be dirty and wil be checkpointed.  The
+			 * ordering comments in the next comment block still
+			 * apply.
+			 */
+			//J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+
+			/*
+			 * If we're journalling data, and this buffer was
+			 * subject to a write(), it could be metadata, forget
+			 * or shadow against the committing transaction.  Now,
+			 * someone has dirtied the same darn page via a mapping
+			 * and it is being writepage()'d.
+			 * We *could* just steal the page from commit, with some
+			 * fancy locking there.  Instead, we just skip it -
+			 * don't tie the page's buffers to the new transaction
+			 * at all.
+			 * Implication: if we crash before the writepage() data
+			 * is written into the filesystem, recovery will replay
+			 * the write() data.
+			 */
+			if (jh->b_jlist != BJ_None &&
+					jh->b_jlist != BJ_SyncData &&
+					jh->b_jlist != BJ_Locked) {
+				JBUFFER_TRACE(jh, "Not stealing");
+				goto no_journal;
+			}
+
+			/*
+			 * This buffer may be undergoing writeout in commit.  We
+			 * can't return from here and let the caller dirty it
+			 * again because that can cause the write-out loop in
+			 * commit to never terminate.
+			 */
+			if (buffer_dirty(bh)) {
+				get_bh(bh);
+				spin_unlock(&journal->j_list_lock);
+				jbd_unlock_bh_state(bh);
+				need_brelse = 1;
+				sync_dirty_buffer(bh);
+				jbd_lock_bh_state(bh);
+				spin_lock(&journal->j_list_lock);
+				/* The buffer may become locked again at any
+				   time if it is redirtied */
+			}
+
+			/* journal_clean_data_list() may have got there first */
+			if (jh->b_transaction != NULL) {
+				JBUFFER_TRACE(jh, "unfile from commit");
+				__journal_temp_unlink_buffer(jh);
+				/* It still points to the committing
+				 * transaction; move it to this one so
+				 * that the refile assert checks are
+				 * happy. */
+				jh->b_transaction = handle->h_transaction;
+			}
+			/* The buffer will be refiled below */
+
+		}
+		/*
+		 * Special case --- the buffer might actually have been
+		 * allocated and then immediately deallocated in the previous,
+		 * committing transaction, so might still be left on that
+		 * transaction's metadata lists.
+		 */
+		if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
+			JBUFFER_TRACE(jh, "not on correct data list: unfile");
+			J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
+			__journal_temp_unlink_buffer(jh);
+			jh->b_transaction = handle->h_transaction;
+			JBUFFER_TRACE(jh, "file as data");
+			__journal_file_buffer(jh, handle->h_transaction,
+						BJ_SyncData);
+		}
+	} else {
+		JBUFFER_TRACE(jh, "not on a transaction");
+		__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
+	}
+no_journal:
+	spin_unlock(&journal->j_list_lock);
+	jbd_unlock_bh_state(bh);
+	if (need_brelse) {
+		BUFFER_TRACE(bh, "brelse");
+		__brelse(bh);
+	}
+	JBUFFER_TRACE(jh, "exit");
+	journal_put_journal_head(jh);
+	return 0;
+}
+
+/** 
+ * int journal_dirty_metadata() -  mark a buffer as containing dirty metadata
+ * @handle: transaction to add buffer to.
+ * @bh: buffer to mark 
+ * 
+ * mark dirty metadata which needs to be journaled as part of the current
+ * transaction.
+ *
+ * The buffer is placed on the transaction's metadata list and is marked
+ * as belonging to the transaction.  
+ *
+ * Returns error number or 0 on success.  
+ *
+ * Special care needs to be taken if the buffer already belongs to the
+ * current committing transaction (in which case we should have frozen
+ * data present for that commit).  In that case, we don't relink the
+ * buffer: that only gets done when the old transaction finally
+ * completes its commit.
+ */
+int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	struct journal_head *jh = bh2jh(bh);
+
+	jbd_debug(5, "journal_head %p\n", jh);
+	JBUFFER_TRACE(jh, "entry");
+	if (is_handle_aborted(handle))
+		goto out;
+
+	jbd_lock_bh_state(bh);
+
+	if (jh->b_modified == 0) {
+		/*
+		 * This buffer's got modified and becoming part
+		 * of the transaction. This needs to be done
+		 * once a transaction -bzzz
+		 */
+		jh->b_modified = 1;
+		J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+		handle->h_buffer_credits--;
+	}
+
+	/*
+	 * fastpath, to avoid expensive locking.  If this buffer is already
+	 * on the running transaction's metadata list there is nothing to do.
+	 * Nobody can take it off again because there is a handle open.
+	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
+	 * result in this test being false, so we go in and take the locks.
+	 */
+	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
+		JBUFFER_TRACE(jh, "fastpath");
+		J_ASSERT_JH(jh, jh->b_transaction ==
+					journal->j_running_transaction);
+		goto out_unlock_bh;
+	}
+
+	set_buffer_jbddirty(bh);
+
+	/* 
+	 * Metadata already on the current transaction list doesn't
+	 * need to be filed.  Metadata on another transaction's list must
+	 * be committing, and will be refiled once the commit completes:
+	 * leave it alone for now. 
+	 */
+	if (jh->b_transaction != transaction) {
+		JBUFFER_TRACE(jh, "already on other transaction");
+		J_ASSERT_JH(jh, jh->b_transaction ==
+					journal->j_committing_transaction);
+		J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
+		/* And this case is illegal: we can't reuse another
+		 * transaction's data buffer, ever. */
+		goto out_unlock_bh;
+	}
+
+	/* That test should have eliminated the following case: */
+	J_ASSERT_JH(jh, jh->b_frozen_data == 0);
+
+	JBUFFER_TRACE(jh, "file as BJ_Metadata");
+	spin_lock(&journal->j_list_lock);
+	__journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+	spin_unlock(&journal->j_list_lock);
+out_unlock_bh:
+	jbd_unlock_bh_state(bh);
+out:
+	JBUFFER_TRACE(jh, "exit");
+	return 0;
+}
+
+/* 
+ * journal_release_buffer: undo a get_write_access without any buffer
+ * updates, if the update decided in the end that it didn't need access.
+ *
+ */
+void
+journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+{
+	BUFFER_TRACE(bh, "entry");
+}
+
+/** 
+ * void journal_forget() - bforget() for potentially-journaled buffers.
+ * @handle: transaction handle
+ * @bh:     bh to 'forget'
+ *
+ * We can only do the bforget if there are no commits pending against the
+ * buffer.  If the buffer is dirty in the current running transaction we
+ * can safely unlink it. 
+ *
+ * bh may not be a journalled buffer at all - it may be a non-JBD
+ * buffer which came off the hashtable.  Check for this.
+ *
+ * Decrements bh->b_count by one.
+ * 
+ * Allow this call even if the handle has aborted --- it may be part of
+ * the caller's cleanup after an abort.
+ */
+int journal_forget (handle_t *handle, struct buffer_head *bh)
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	struct journal_head *jh;
+	int drop_reserve = 0;
+	int err = 0;
+
+	BUFFER_TRACE(bh, "entry");
+
+	jbd_lock_bh_state(bh);
+	spin_lock(&journal->j_list_lock);
+
+	if (!buffer_jbd(bh))
+		goto not_jbd;
+	jh = bh2jh(bh);
+
+	/* Critical error: attempting to delete a bitmap buffer, maybe?
+	 * Don't do any jbd operations, and return an error. */
+	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
+			 "inconsistent data on disk")) {
+		err = -EIO;
+		goto not_jbd;
+	}
+
+	/*
+	 * The buffer's going from the transaction, we must drop
+	 * all references -bzzz
+	 */
+	jh->b_modified = 0;
+
+	if (jh->b_transaction == handle->h_transaction) {
+		J_ASSERT_JH(jh, !jh->b_frozen_data);
+
+		/* If we are forgetting a buffer which is already part
+		 * of this transaction, then we can just drop it from
+		 * the transaction immediately. */
+		clear_buffer_dirty(bh);
+		clear_buffer_jbddirty(bh);
+
+		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
+
+		drop_reserve = 1;
+
+		/* 
+		 * We are no longer going to journal this buffer.
+		 * However, the commit of this transaction is still
+		 * important to the buffer: the delete that we are now
+		 * processing might obsolete an old log entry, so by
+		 * committing, we can satisfy the buffer's checkpoint.
+		 *
+		 * So, if we have a checkpoint on the buffer, we should
+		 * now refile the buffer on our BJ_Forget list so that
+		 * we know to remove the checkpoint after we commit. 
+		 */
+
+		if (jh->b_cp_transaction) {
+			__journal_temp_unlink_buffer(jh);
+			__journal_file_buffer(jh, transaction, BJ_Forget);
+		} else {
+			__journal_unfile_buffer(jh);
+			journal_remove_journal_head(bh);
+			__brelse(bh);
+			if (!buffer_jbd(bh)) {
+				spin_unlock(&journal->j_list_lock);
+				jbd_unlock_bh_state(bh);
+				__bforget(bh);
+				goto drop;
+			}
+		}
+	} else if (jh->b_transaction) {
+		J_ASSERT_JH(jh, (jh->b_transaction == 
+				 journal->j_committing_transaction));
+		/* However, if the buffer is still owned by a prior
+		 * (committing) transaction, we can't drop it yet... */
+		JBUFFER_TRACE(jh, "belongs to older transaction");
+		/* ... but we CAN drop it from the new transaction if we
+		 * have also modified it since the original commit. */
+
+		if (jh->b_next_transaction) {
+			J_ASSERT(jh->b_next_transaction == transaction);
+			jh->b_next_transaction = NULL;
+			drop_reserve = 1;
+		}
+	}
+
+not_jbd:
+	spin_unlock(&journal->j_list_lock);
+	jbd_unlock_bh_state(bh);
+	__brelse(bh);
+drop:
+	if (drop_reserve) {
+		/* no need to reserve log space for this block -bzzz */
+		handle->h_buffer_credits++;
+	}
+	return err;
+}
+
+/**
+ * int journal_stop() - complete a transaction
+ * @handle: tranaction to complete.
+ * 
+ * All done for a particular handle.
+ *
+ * There is not much action needed here.  We just return any remaining
+ * buffer credits to the transaction and remove the handle.  The only
+ * complication is that we need to start a commit operation if the
+ * filesystem is marked for synchronous update.
+ *
+ * journal_stop itself will not usually return an error, but it may
+ * do so in unusual circumstances.  In particular, expect it to 
+ * return -EIO if a journal_abort has been executed since the
+ * transaction began.
+ */
+int journal_stop(handle_t *handle)
+{
+	transaction_t *transaction = handle->h_transaction;
+	journal_t *journal = transaction->t_journal;
+	int old_handle_count, err;
+
+	J_ASSERT(transaction->t_updates > 0);
+	J_ASSERT(journal_current_handle() == handle);
+
+	if (is_handle_aborted(handle))
+		err = -EIO;
+	else
+		err = 0;
+
+	if (--handle->h_ref > 0) {
+		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+			  handle->h_ref);
+		return err;
+	}
+
+	jbd_debug(4, "Handle %p going down\n", handle);
+
+	/*
+	 * Implement synchronous transaction batching.  If the handle
+	 * was synchronous, don't force a commit immediately.  Let's
+	 * yield and let another thread piggyback onto this transaction.
+	 * Keep doing that while new threads continue to arrive.
+	 * It doesn't cost much - we're about to run a commit and sleep
+	 * on IO anyway.  Speeds up many-threaded, many-dir operations
+	 * by 30x or more...
+	 */
+	if (handle->h_sync) {
+		do {
+			old_handle_count = transaction->t_handle_count;
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(1);
+		} while (old_handle_count != transaction->t_handle_count);
+	}
+
+	current->journal_info = NULL;
+	spin_lock(&journal->j_state_lock);
+	spin_lock(&transaction->t_handle_lock);
+	transaction->t_outstanding_credits -= handle->h_buffer_credits;
+	transaction->t_updates--;
+	if (!transaction->t_updates) {
+		wake_up(&journal->j_wait_updates);
+		if (journal->j_barrier_count)
+			wake_up(&journal->j_wait_transaction_locked);
+	}
+
+	/*
+	 * If the handle is marked SYNC, we need to set another commit
+	 * going!  We also want to force a commit if the current
+	 * transaction is occupying too much of the log, or if the
+	 * transaction is too old now.
+	 */
+	if (handle->h_sync ||
+			transaction->t_outstanding_credits >
+				journal->j_max_transaction_buffers ||
+	    		time_after_eq(jiffies, transaction->t_expires)) {
+		/* Do this even for aborted journals: an abort still
+		 * completes the commit thread, it just doesn't write
+		 * anything to disk. */
+		tid_t tid = transaction->t_tid;
+
+		spin_unlock(&transaction->t_handle_lock);
+		jbd_debug(2, "transaction too old, requesting commit for "
+					"handle %p\n", handle);
+		/* This is non-blocking */
+		__log_start_commit(journal, transaction->t_tid);
+		spin_unlock(&journal->j_state_lock);
+
+		/*
+		 * Special case: JFS_SYNC synchronous updates require us
+		 * to wait for the commit to complete.  
+		 */
+		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
+			err = log_wait_commit(journal, tid);
+	} else {
+		spin_unlock(&transaction->t_handle_lock);
+		spin_unlock(&journal->j_state_lock);
+	}
+
+	jbd_free_handle(handle);
+	return err;
+}
+
+/**int journal_force_commit() - force any uncommitted transactions
+ * @journal: journal to force
+ *
+ * For synchronous operations: force any uncommitted transactions
+ * to disk.  May seem kludgy, but it reuses all the handle batching
+ * code in a very simple manner.
+ */
+int journal_force_commit(journal_t *journal)
+{
+	handle_t *handle;
+	int ret;
+
+	handle = journal_start(journal, 1);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+	} else {
+		handle->h_sync = 1;
+		ret = journal_stop(handle);
+	}
+	return ret;
+}
+
+/*
+ *
+ * List management code snippets: various functions for manipulating the
+ * transaction buffer lists.
+ *
+ */
+
+/*
+ * Append a buffer to a transaction list, given the transaction's list head
+ * pointer.
+ *
+ * j_list_lock is held.
+ *
+ * jbd_lock_bh_state(jh2bh(jh)) is held.
+ */
+
+static inline void 
+__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
+{
+	if (!*list) {
+		jh->b_tnext = jh->b_tprev = jh;
+		*list = jh;
+	} else {
+		/* Insert at the tail of the list to preserve order */
+		struct journal_head *first = *list, *last = first->b_tprev;
+		jh->b_tprev = last;
+		jh->b_tnext = first;
+		last->b_tnext = first->b_tprev = jh;
+	}
+}
+
+/* 
+ * Remove a buffer from a transaction list, given the transaction's list
+ * head pointer.
+ *
+ * Called with j_list_lock held, and the journal may not be locked.
+ *
+ * jbd_lock_bh_state(jh2bh(jh)) is held.
+ */
+
+static inline void
+__blist_del_buffer(struct journal_head **list, struct journal_head *jh)
+{
+	if (*list == jh) {
+		*list = jh->b_tnext;
+		if (*list == jh)
+			*list = NULL;
+	}
+	jh->b_tprev->b_tnext = jh->b_tnext;
+	jh->b_tnext->b_tprev = jh->b_tprev;
+}
+
+/* 
+ * Remove a buffer from the appropriate transaction list.
+ *
+ * Note that this function can *change* the value of
+ * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
+ * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
+ * is holding onto a copy of one of thee pointers, it could go bad.
+ * Generally the caller needs to re-read the pointer from the transaction_t.
+ *
+ * Called under j_list_lock.  The journal may not be locked.
+ */
+void __journal_temp_unlink_buffer(struct journal_head *jh)
+{
+	struct journal_head **list = NULL;
+	transaction_t *transaction;
+	struct buffer_head *bh = jh2bh(jh);
+
+	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+	transaction = jh->b_transaction;
+	if (transaction)
+		assert_spin_locked(&transaction->t_journal->j_list_lock);
+
+	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
+	if (jh->b_jlist != BJ_None)
+		J_ASSERT_JH(jh, transaction != 0);
+
+	switch (jh->b_jlist) {
+	case BJ_None:
+		return;
+	case BJ_SyncData:
+		list = &transaction->t_sync_datalist;
+		break;
+	case BJ_Metadata:
+		transaction->t_nr_buffers--;
+		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
+		list = &transaction->t_buffers;
+		break;
+	case BJ_Forget:
+		list = &transaction->t_forget;
+		break;
+	case BJ_IO:
+		list = &transaction->t_iobuf_list;
+		break;
+	case BJ_Shadow:
+		list = &transaction->t_shadow_list;
+		break;
+	case BJ_LogCtl:
+		list = &transaction->t_log_list;
+		break;
+	case BJ_Reserved:
+		list = &transaction->t_reserved_list;
+		break;
+	case BJ_Locked:
+		list = &transaction->t_locked_list;
+		break;
+	}
+
+	__blist_del_buffer(list, jh);
+	jh->b_jlist = BJ_None;
+	if (test_clear_buffer_jbddirty(bh))
+		mark_buffer_dirty(bh);	/* Expose it to the VM */
+}
+
+void __journal_unfile_buffer(struct journal_head *jh)
+{
+	__journal_temp_unlink_buffer(jh);
+	jh->b_transaction = NULL;
+}
+
+void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
+{
+	jbd_lock_bh_state(jh2bh(jh));
+	spin_lock(&journal->j_list_lock);
+	__journal_unfile_buffer(jh);
+	spin_unlock(&journal->j_list_lock);
+	jbd_unlock_bh_state(jh2bh(jh));
+}
+
+/*
+ * Called from journal_try_to_free_buffers().
+ *
+ * Called under jbd_lock_bh_state(bh)
+ */
+static void
+__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
+{
+	struct journal_head *jh;
+
+	jh = bh2jh(bh);
+
+	if (buffer_locked(bh) || buffer_dirty(bh))
+		goto out;
+
+	if (jh->b_next_transaction != 0)
+		goto out;
+
+	spin_lock(&journal->j_list_lock);
+	if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {
+		if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
+			/* A written-back ordered data buffer */
+			JBUFFER_TRACE(jh, "release data");
+			__journal_unfile_buffer(jh);
+			journal_remove_journal_head(bh);
+			__brelse(bh);
+		}
+	} else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
+		/* written-back checkpointed metadata buffer */
+		if (jh->b_jlist == BJ_None) {
+			JBUFFER_TRACE(jh, "remove from checkpoint list");
+			__journal_remove_checkpoint(jh);
+			journal_remove_journal_head(bh);
+			__brelse(bh);
+		}
+	}
+	spin_unlock(&journal->j_list_lock);
+out:
+	return;
+}
+
+
+/** 
+ * int journal_try_to_free_buffers() - try to free page buffers.
+ * @journal: journal for operation
+ * @page: to try and free
+ * @unused_gfp_mask: unused
+ *
+ * 
+ * For all the buffers on this page,
+ * if they are fully written out ordered data, move them onto BUF_CLEAN
+ * so try_to_free_buffers() can reap them.
+ * 
+ * This function returns non-zero if we wish try_to_free_buffers()
+ * to be called. We do this if the page is releasable by try_to_free_buffers().
+ * We also do it if the page has locked or dirty buffers and the caller wants
+ * us to perform sync or async writeout.
+ *
+ * This complicates JBD locking somewhat.  We aren't protected by the
+ * BKL here.  We wish to remove the buffer from its committing or
+ * running transaction's ->t_datalist via __journal_unfile_buffer.
+ *
+ * This may *change* the value of transaction_t->t_datalist, so anyone
+ * who looks at t_datalist needs to lock against this function.
+ *
+ * Even worse, someone may be doing a journal_dirty_data on this
+ * buffer.  So we need to lock against that.  journal_dirty_data()
+ * will come out of the lock with the buffer dirty, which makes it
+ * ineligible for release here.
+ *
+ * Who else is affected by this?  hmm...  Really the only contender
+ * is do_get_write_access() - it could be looking at the buffer while
+ * journal_try_to_free_buffer() is changing its state.  But that
+ * cannot happen because we never reallocate freed data as metadata
+ * while the data is part of a transaction.  Yes?
+ */
+int journal_try_to_free_buffers(journal_t *journal, 
+				struct page *page, int unused_gfp_mask)
+{
+	struct buffer_head *head;
+	struct buffer_head *bh;
+	int ret = 0;
+
+	J_ASSERT(PageLocked(page));
+
+	head = page_buffers(page);
+	bh = head;
+	do {
+		struct journal_head *jh;
+
+		/*
+		 * We take our own ref against the journal_head here to avoid
+		 * having to add tons of locking around each instance of
+		 * journal_remove_journal_head() and journal_put_journal_head().
+		 */
+		jh = journal_grab_journal_head(bh);
+		if (!jh)
+			continue;
+
+		jbd_lock_bh_state(bh);
+		__journal_try_to_free_buffer(journal, bh);
+		journal_put_journal_head(jh);
+		jbd_unlock_bh_state(bh);
+		if (buffer_jbd(bh))
+			goto busy;
+	} while ((bh = bh->b_this_page) != head);
+	ret = try_to_free_buffers(page);
+busy:
+	return ret;
+}
+
+/*
+ * This buffer is no longer needed.  If it is on an older transaction's
+ * checkpoint list we need to record it on this transaction's forget list
+ * to pin this buffer (and hence its checkpointing transaction) down until
+ * this transaction commits.  If the buffer isn't on a checkpoint list, we
+ * release it.
+ * Returns non-zero if JBD no longer has an interest in the buffer.
+ *
+ * Called under j_list_lock.
+ *
+ * Called under jbd_lock_bh_state(bh).
+ */
+static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+{
+	int may_free = 1;
+	struct buffer_head *bh = jh2bh(jh);
+
+	__journal_unfile_buffer(jh);
+
+	if (jh->b_cp_transaction) {
+		JBUFFER_TRACE(jh, "on running+cp transaction");
+		__journal_file_buffer(jh, transaction, BJ_Forget);
+		clear_buffer_jbddirty(bh);
+		may_free = 0;
+	} else {
+		JBUFFER_TRACE(jh, "on running transaction");
+		journal_remove_journal_head(bh);
+		__brelse(bh);
+	}
+	return may_free;
+}
+
+/*
+ * journal_invalidatepage 
+ *
+ * This code is tricky.  It has a number of cases to deal with.
+ *
+ * There are two invariants which this code relies on:
+ *
+ * i_size must be updated on disk before we start calling invalidatepage on the
+ * data.
+ * 
+ *  This is done in ext3 by defining an ext3_setattr method which
+ *  updates i_size before truncate gets going.  By maintaining this
+ *  invariant, we can be sure that it is safe to throw away any buffers
+ *  attached to the current transaction: once the transaction commits,
+ *  we know that the data will not be needed.
+ * 
+ *  Note however that we can *not* throw away data belonging to the
+ *  previous, committing transaction!  
+ *
+ * Any disk blocks which *are* part of the previous, committing
+ * transaction (and which therefore cannot be discarded immediately) are
+ * not going to be reused in the new running transaction
+ *
+ *  The bitmap committed_data images guarantee this: any block which is
+ *  allocated in one transaction and removed in the next will be marked
+ *  as in-use in the committed_data bitmap, so cannot be reused until
+ *  the next transaction to delete the block commits.  This means that
+ *  leaving committing buffers dirty is quite safe: the disk blocks
+ *  cannot be reallocated to a different file and so buffer aliasing is
+ *  not possible.
+ *
+ *
+ * The above applies mainly to ordered data mode.  In writeback mode we
+ * don't make guarantees about the order in which data hits disk --- in
+ * particular we don't guarantee that new dirty data is flushed before
+ * transaction commit --- so it is always safe just to discard data
+ * immediately in that mode.  --sct 
+ */
+
+/*
+ * The journal_unmap_buffer helper function returns zero if the buffer
+ * concerned remains pinned as an anonymous buffer belonging to an older
+ * transaction.
+ *
+ * We're outside-transaction here.  Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+{
+	transaction_t *transaction;
+	struct journal_head *jh;
+	int may_free = 1;
+	int ret;
+
+	BUFFER_TRACE(bh, "entry");
+
+	/*
+	 * It is safe to proceed here without the j_list_lock because the
+	 * buffers cannot be stolen by try_to_free_buffers as long as we are
+	 * holding the page lock. --sct
+	 */
+
+	if (!buffer_jbd(bh))
+		goto zap_buffer_unlocked;
+
+	spin_lock(&journal->j_state_lock);
+	jbd_lock_bh_state(bh);
+	spin_lock(&journal->j_list_lock);
+
+	jh = journal_grab_journal_head(bh);
+	if (!jh)
+		goto zap_buffer_no_jh;
+
+	transaction = jh->b_transaction;
+	if (transaction == NULL) {
+		/* First case: not on any transaction.  If it
+		 * has no checkpoint link, then we can zap it:
+		 * it's a writeback-mode buffer so we don't care
+		 * if it hits disk safely. */
+		if (!jh->b_cp_transaction) {
+			JBUFFER_TRACE(jh, "not on any transaction: zap");
+			goto zap_buffer;
+		}
+
+		if (!buffer_dirty(bh)) {
+			/* bdflush has written it.  We can drop it now */
+			goto zap_buffer;
+		}
+
+		/* OK, it must be in the journal but still not
+		 * written fully to disk: it's metadata or
+		 * journaled data... */
+
+		if (journal->j_running_transaction) {
+			/* ... and once the current transaction has
+			 * committed, the buffer won't be needed any
+			 * longer. */
+			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+			ret = __dispose_buffer(jh,
+					journal->j_running_transaction);
+			journal_put_journal_head(jh);
+			spin_unlock(&journal->j_list_lock);
+			jbd_unlock_bh_state(bh);
+			spin_unlock(&journal->j_state_lock);
+			return ret;
+		} else {
+			/* There is no currently-running transaction. So the
+			 * orphan record which we wrote for this file must have
+			 * passed into commit.  We must attach this buffer to
+			 * the committing transaction, if it exists. */
+			if (journal->j_committing_transaction) {
+				JBUFFER_TRACE(jh, "give to committing trans");
+				ret = __dispose_buffer(jh,
+					journal->j_committing_transaction);
+				journal_put_journal_head(jh);
+				spin_unlock(&journal->j_list_lock);
+				jbd_unlock_bh_state(bh);
+				spin_unlock(&journal->j_state_lock);
+				return ret;
+			} else {
+				/* The orphan record's transaction has
+				 * committed.  We can cleanse this buffer */
+				clear_buffer_jbddirty(bh);
+				goto zap_buffer;
+			}
+		}
+	} else if (transaction == journal->j_committing_transaction) {
+		/* If it is committing, we simply cannot touch it.  We
+		 * can remove it's next_transaction pointer from the
+		 * running transaction if that is set, but nothing
+		 * else. */
+		JBUFFER_TRACE(jh, "on committing transaction");
+		set_buffer_freed(bh);
+		if (jh->b_next_transaction) {
+			J_ASSERT(jh->b_next_transaction ==
+					journal->j_running_transaction);
+			jh->b_next_transaction = NULL;
+		}
+		journal_put_journal_head(jh);
+		spin_unlock(&journal->j_list_lock);
+		jbd_unlock_bh_state(bh);
+		spin_unlock(&journal->j_state_lock);
+		return 0;
+	} else {
+		/* Good, the buffer belongs to the running transaction.
+		 * We are writing our own transaction's data, not any
+		 * previous one's, so it is safe to throw it away
+		 * (remember that we expect the filesystem to have set
+		 * i_size already for this truncate so recovery will not
+		 * expose the disk blocks we are discarding here.) */
+		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
+		may_free = __dispose_buffer(jh, transaction);
+	}
+
+zap_buffer:
+	journal_put_journal_head(jh);
+zap_buffer_no_jh:
+	spin_unlock(&journal->j_list_lock);
+	jbd_unlock_bh_state(bh);
+	spin_unlock(&journal->j_state_lock);
+zap_buffer_unlocked:
+	clear_buffer_dirty(bh);
+	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
+	clear_buffer_mapped(bh);
+	clear_buffer_req(bh);
+	clear_buffer_new(bh);
+	bh->b_bdev = NULL;
+	return may_free;
+}
+
+/** 
+ * int journal_invalidatepage() 
+ * @journal: journal to use for flush... 
+ * @page:    page to flush
+ * @offset:  length of page to invalidate.
+ *
+ * Reap page buffers containing data after offset in page.
+ *
+ * Return non-zero if the page's buffers were successfully reaped.
+ */
+int journal_invalidatepage(journal_t *journal, 
+		      struct page *page, 
+		      unsigned long offset)
+{
+	struct buffer_head *head, *bh, *next;
+	unsigned int curr_off = 0;
+	int may_free = 1;
+
+	if (!PageLocked(page))
+		BUG();
+	if (!page_has_buffers(page))
+		return 1;
+
+	/* We will potentially be playing with lists other than just the
+	 * data lists (especially for journaled data mode), so be
+	 * cautious in our locking. */
+
+	head = bh = page_buffers(page);
+	do {
+		unsigned int next_off = curr_off + bh->b_size;
+		next = bh->b_this_page;
+
+		/* AKPM: doing lock_buffer here may be overly paranoid */
+		if (offset <= curr_off) {
+		 	/* This block is wholly outside the truncation point */
+			lock_buffer(bh);
+			may_free &= journal_unmap_buffer(journal, bh);
+			unlock_buffer(bh);
+		}
+		curr_off = next_off;
+		bh = next;
+
+	} while (bh != head);
+
+	if (!offset) {
+		if (!may_free || !try_to_free_buffers(page))
+			return 0;
+		J_ASSERT(!page_has_buffers(page));
+	}
+	return 1;
+}
+
+/* 
+ * File a buffer on the given transaction list. 
+ */
+void __journal_file_buffer(struct journal_head *jh,
+			transaction_t *transaction, int jlist)
+{
+	struct journal_head **list = NULL;
+	int was_dirty = 0;
+	struct buffer_head *bh = jh2bh(jh);
+
+	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+	assert_spin_locked(&transaction->t_journal->j_list_lock);
+
+	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
+	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+				jh->b_transaction == 0);
+
+	if (jh->b_transaction && jh->b_jlist == jlist)
+		return;
+
+	/* The following list of buffer states needs to be consistent
+	 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
+	 * state. */
+
+	if (jlist == BJ_Metadata || jlist == BJ_Reserved || 
+	    jlist == BJ_Shadow || jlist == BJ_Forget) {
+		if (test_clear_buffer_dirty(bh) ||
+		    test_clear_buffer_jbddirty(bh))
+			was_dirty = 1;
+	}
+
+	if (jh->b_transaction)
+		__journal_temp_unlink_buffer(jh);
+	jh->b_transaction = transaction;
+
+	switch (jlist) {
+	case BJ_None:
+		J_ASSERT_JH(jh, !jh->b_committed_data);
+		J_ASSERT_JH(jh, !jh->b_frozen_data);
+		return;
+	case BJ_SyncData:
+		list = &transaction->t_sync_datalist;
+		break;
+	case BJ_Metadata:
+		transaction->t_nr_buffers++;
+		list = &transaction->t_buffers;
+		break;
+	case BJ_Forget:
+		list = &transaction->t_forget;
+		break;
+	case BJ_IO:
+		list = &transaction->t_iobuf_list;
+		break;
+	case BJ_Shadow:
+		list = &transaction->t_shadow_list;
+		break;
+	case BJ_LogCtl:
+		list = &transaction->t_log_list;
+		break;
+	case BJ_Reserved:
+		list = &transaction->t_reserved_list;
+		break;
+	case BJ_Locked:
+		list =  &transaction->t_locked_list;
+		break;
+	}
+
+	__blist_add_buffer(list, jh);
+	jh->b_jlist = jlist;
+
+	if (was_dirty)
+		set_buffer_jbddirty(bh);
+}
+
+void journal_file_buffer(struct journal_head *jh,
+				transaction_t *transaction, int jlist)
+{
+	jbd_lock_bh_state(jh2bh(jh));
+	spin_lock(&transaction->t_journal->j_list_lock);
+	__journal_file_buffer(jh, transaction, jlist);
+	spin_unlock(&transaction->t_journal->j_list_lock);
+	jbd_unlock_bh_state(jh2bh(jh));
+}
+
+/* 
+ * Remove a buffer from its current buffer list in preparation for
+ * dropping it from its current transaction entirely.  If the buffer has
+ * already started to be used by a subsequent transaction, refile the
+ * buffer on that transaction's metadata list.
+ *
+ * Called under journal->j_list_lock
+ *
+ * Called under jbd_lock_bh_state(jh2bh(jh))
+ */
+void __journal_refile_buffer(struct journal_head *jh)
+{
+	int was_dirty;
+	struct buffer_head *bh = jh2bh(jh);
+
+	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+	if (jh->b_transaction)
+		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
+
+	/* If the buffer is now unused, just drop it. */
+	if (jh->b_next_transaction == NULL) {
+		__journal_unfile_buffer(jh);
+		return;
+	}
+
+	/*
+	 * It has been modified by a later transaction: add it to the new
+	 * transaction's metadata list.
+	 */
+
+	was_dirty = test_clear_buffer_jbddirty(bh);
+	__journal_temp_unlink_buffer(jh);
+	jh->b_transaction = jh->b_next_transaction;
+	jh->b_next_transaction = NULL;
+	__journal_file_buffer(jh, jh->b_transaction, BJ_Metadata);
+	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
+
+	if (was_dirty)
+		set_buffer_jbddirty(bh);
+}
+
+/*
+ * For the unlocked version of this call, also make sure that any
+ * hanging journal_head is cleaned up if necessary.
+ *
+ * __journal_refile_buffer is usually called as part of a single locked
+ * operation on a buffer_head, in which the caller is probably going to
+ * be hooking the journal_head onto other lists.  In that case it is up
+ * to the caller to remove the journal_head if necessary.  For the
+ * unlocked journal_refile_buffer call, the caller isn't going to be
+ * doing anything else to the buffer so we need to do the cleanup
+ * ourselves to avoid a jh leak. 
+ *
+ * *** The journal_head may be freed by this call! ***
+ */
+void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
+{
+	struct buffer_head *bh = jh2bh(jh);
+
+	jbd_lock_bh_state(bh);
+	spin_lock(&journal->j_list_lock);
+
+	__journal_refile_buffer(jh);
+	jbd_unlock_bh_state(bh);
+	journal_remove_journal_head(bh);
+
+	spin_unlock(&journal->j_list_lock);
+	__brelse(bh);
+}
diff --git a/fs/jffs/Makefile b/fs/jffs/Makefile
new file mode 100644
index 0000000..9c1c0bb
--- /dev/null
+++ b/fs/jffs/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the linux Journalling Flash FileSystem (JFFS) routines.
+#
+# $Id: Makefile,v 1.11 2001/09/25 20:59:41 dwmw2 Exp $
+#
+
+obj-$(CONFIG_JFFS_FS) += jffs.o
+
+jffs-y 				:= jffs_fm.o intrep.o inode-v23.o
+jffs-$(CONFIG_JFFS_PROC_FS)	+= jffs_proc.o
+jffs-objs			:= $(jffs-y)
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
new file mode 100644
index 0000000..bfbeb4c
--- /dev/null
+++ b/fs/jffs/inode-v23.c
@@ -0,0 +1,1847 @@
+/*
+ * JFFS -- Journalling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 1999, 2000  Axis Communications AB.
+ *
+ * Created by Finn Hakansson <finn@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: inode-v23.c,v 1.70 2001/10/02 09:16:02 dwmw2 Exp $
+ *
+ * Ported to Linux 2.3.x and MTD:
+ * Copyright (C) 2000  Alexander Larsson (alex@cendio.se), Cendio Systems AB
+ *
+ * Copyright 2000, 2001  Red Hat, Inc.
+ */
+
+/* inode.c -- Contains the code that is called from the VFS.  */
+
+/* TODO-ALEX:
+ * uid and gid are just 16 bit.
+ * jffs_file_write reads from user-space pointers without xx_from_user
+ * maybe other stuff do to.
+ */
+
+#include <linux/time.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/jffs.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/stat.h>
+#include <linux/blkdev.h>
+#include <linux/quotaops.h>
+#include <linux/highmem.h>
+#include <linux/vfs.h>
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include "jffs_fm.h"
+#include "intrep.h"
+#ifdef CONFIG_JFFS_PROC_FS
+#include "jffs_proc.h"
+#endif
+
+static int jffs_remove(struct inode *dir, struct dentry *dentry, int type);
+
+static struct super_operations jffs_ops;
+static struct file_operations jffs_file_operations;
+static struct inode_operations jffs_file_inode_operations;
+static struct file_operations jffs_dir_operations;
+static struct inode_operations jffs_dir_inode_operations;
+static struct address_space_operations jffs_address_operations;
+
+kmem_cache_t     *node_cache = NULL;
+kmem_cache_t     *fm_cache = NULL;
+
+/* Called by the VFS at mount time to initialize the whole file system.  */
+static int jffs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct inode *root_inode;
+	struct jffs_control *c;
+
+	sb->s_flags |= MS_NODIRATIME;
+
+	D1(printk(KERN_NOTICE "JFFS: Trying to mount device %s.\n",
+		  sb->s_id));
+
+	if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
+		printk(KERN_WARNING "JFFS: Trying to mount a "
+		       "non-mtd device.\n");
+		return -EINVAL;
+	}
+
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_fs_info = (void *) 0;
+	sb->s_maxbytes = 0xFFFFFFFF;
+
+	/* Build the file system.  */
+	if (jffs_build_fs(sb) < 0) {
+		goto jffs_sb_err1;
+	}
+
+	/*
+	 * set up enough so that we can read an inode
+	 */
+	sb->s_magic = JFFS_MAGIC_SB_BITMASK;
+	sb->s_op = &jffs_ops;
+
+	root_inode = iget(sb, JFFS_MIN_INO);
+	if (!root_inode)
+	        goto jffs_sb_err2;
+
+	/* Get the root directory of this file system.  */
+	if (!(sb->s_root = d_alloc_root(root_inode))) {
+		goto jffs_sb_err3;
+	}
+
+	c = (struct jffs_control *) sb->s_fs_info;
+
+#ifdef CONFIG_JFFS_PROC_FS
+	/* Set up the jffs proc file system.  */
+	if (jffs_register_jffs_proc_dir(MINOR(sb->s_dev), c) < 0) {
+		printk(KERN_WARNING "JFFS: Failed to initialize the JFFS "
+			"proc file system for device %s.\n",
+			sb->s_id);
+	}
+#endif
+
+	/* Set the Garbage Collection thresholds */
+
+	/* GC if free space goes below 5% of the total size */
+	c->gc_minfree_threshold = c->fmc->flash_size / 20;
+
+	if (c->gc_minfree_threshold < c->fmc->sector_size)
+		c->gc_minfree_threshold = c->fmc->sector_size;
+
+	/* GC if dirty space exceeds 33% of the total size. */
+	c->gc_maxdirty_threshold = c->fmc->flash_size / 3;
+
+	if (c->gc_maxdirty_threshold < c->fmc->sector_size)
+		c->gc_maxdirty_threshold = c->fmc->sector_size;
+
+
+	c->thread_pid = kernel_thread (jffs_garbage_collect_thread, 
+				        (void *) c, 
+				        CLONE_KERNEL);
+	D1(printk(KERN_NOTICE "JFFS: GC thread pid=%d.\n", (int) c->thread_pid));
+
+	D1(printk(KERN_NOTICE "JFFS: Successfully mounted device %s.\n",
+	       sb->s_id));
+	return 0;
+
+jffs_sb_err3:
+	iput(root_inode);
+jffs_sb_err2:
+	jffs_cleanup_control((struct jffs_control *)sb->s_fs_info);
+jffs_sb_err1:
+	printk(KERN_WARNING "JFFS: Failed to mount device %s.\n",
+	       sb->s_id);
+	return -EINVAL;
+}
+
+
+/* This function is called when the file system is umounted.  */
+static void
+jffs_put_super(struct super_block *sb)
+{
+	struct jffs_control *c = (struct jffs_control *) sb->s_fs_info;
+
+	D2(printk("jffs_put_super()\n"));
+
+#ifdef CONFIG_JFFS_PROC_FS
+	jffs_unregister_jffs_proc_dir(c);
+#endif
+
+	if (c->gc_task) {
+		D1(printk (KERN_NOTICE "jffs_put_super(): Telling gc thread to die.\n"));
+		send_sig(SIGKILL, c->gc_task, 1);
+	}
+	wait_for_completion(&c->gc_thread_comp);
+
+	D1(printk (KERN_NOTICE "jffs_put_super(): Successfully waited on thread.\n"));
+
+	jffs_cleanup_control((struct jffs_control *)sb->s_fs_info);
+	D1(printk(KERN_NOTICE "JFFS: Successfully unmounted device %s.\n",
+	       sb->s_id));
+}
+
+
+/* This function is called when user commands like chmod, chgrp and
+   chown are executed. System calls like trunc() results in a call
+   to this function.  */
+static int
+jffs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_fmcontrol *fmc;
+	struct jffs_file *f;
+	struct jffs_node *new_node;
+	int update_all;
+	int res = 0;
+	int recoverable = 0;
+
+	lock_kernel();
+
+	if ((res = inode_change_ok(inode, iattr))) 
+		goto out;
+
+	c = (struct jffs_control *)inode->i_sb->s_fs_info;
+	fmc = c->fmc;
+
+	D3(printk (KERN_NOTICE "notify_change(): down biglock\n"));
+	down(&fmc->biglock);
+
+	f = jffs_find_file(c, inode->i_ino);
+
+	ASSERT(if (!f) {
+		printk("jffs_setattr(): Invalid inode number: %lu\n",
+		       inode->i_ino);
+		D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
+		up(&fmc->biglock);
+		res = -EINVAL;
+		goto out;
+	});
+
+	D1(printk("***jffs_setattr(): file: \"%s\", ino: %u\n",
+		  f->name, f->ino));
+
+	update_all = iattr->ia_valid & ATTR_FORCE;
+
+	if ( (update_all || iattr->ia_valid & ATTR_SIZE)
+	     && (iattr->ia_size + 128 < f->size) ) {
+		/* We're shrinking the file by more than 128 bytes.
+		   We'll be able to GC and recover this space, so
+		   allow it to go into the reserved space. */
+		recoverable = 1;
+        }
+
+	if (!(new_node = jffs_alloc_node())) {
+		D(printk("jffs_setattr(): Allocation failed!\n"));
+		D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
+		up(&fmc->biglock);
+		res = -ENOMEM;
+		goto out;
+	}
+
+	new_node->data_offset = 0;
+	new_node->removed_size = 0;
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = f->ino;
+	raw_inode.pino = f->pino;
+	raw_inode.mode = f->mode;
+	raw_inode.uid = f->uid;
+	raw_inode.gid = f->gid;
+	raw_inode.atime = f->atime;
+	raw_inode.mtime = f->mtime;
+	raw_inode.ctime = f->ctime;
+	raw_inode.dsize = 0;
+	raw_inode.offset = 0;
+	raw_inode.rsize = 0;
+	raw_inode.dsize = 0;
+	raw_inode.nsize = f->nsize;
+	raw_inode.nlink = f->nlink;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	if (update_all || iattr->ia_valid & ATTR_MODE) {
+		raw_inode.mode = iattr->ia_mode;
+		inode->i_mode = iattr->ia_mode;
+	}
+	if (update_all || iattr->ia_valid & ATTR_UID) {
+		raw_inode.uid = iattr->ia_uid;
+		inode->i_uid = iattr->ia_uid;
+	}
+	if (update_all || iattr->ia_valid & ATTR_GID) {
+		raw_inode.gid = iattr->ia_gid;
+		inode->i_gid = iattr->ia_gid;
+	}
+	if (update_all || iattr->ia_valid & ATTR_SIZE) {
+		int len;
+		D1(printk("jffs_notify_change(): Changing size "
+			  "to %lu bytes!\n", (long)iattr->ia_size));
+		raw_inode.offset = iattr->ia_size;
+
+		/* Calculate how many bytes need to be removed from
+		   the end.  */
+		if (f->size < iattr->ia_size) {
+			len = 0;
+		}
+		else {
+			len = f->size - iattr->ia_size;
+		}
+
+		raw_inode.rsize = len;
+
+		/* The updated node will be a removal node, with
+		   base at the new size and size of the nbr of bytes
+		   to be removed.  */
+		new_node->data_offset = iattr->ia_size;
+		new_node->removed_size = len;
+		inode->i_size = iattr->ia_size;
+		inode->i_blocks = (inode->i_size + 511) >> 9;
+
+		if (len) {
+			invalidate_inode_pages(inode->i_mapping);
+		}
+		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_mtime = inode->i_ctime;
+	}
+	if (update_all || iattr->ia_valid & ATTR_ATIME) {
+		raw_inode.atime = iattr->ia_atime.tv_sec;
+		inode->i_atime = iattr->ia_atime;
+	}
+	if (update_all || iattr->ia_valid & ATTR_MTIME) {
+		raw_inode.mtime = iattr->ia_mtime.tv_sec;
+		inode->i_mtime = iattr->ia_mtime;
+	}
+	if (update_all || iattr->ia_valid & ATTR_CTIME) {
+		raw_inode.ctime = iattr->ia_ctime.tv_sec;
+		inode->i_ctime = iattr->ia_ctime;
+	}
+
+	/* Write this node to the flash.  */
+	if ((res = jffs_write_node(c, new_node, &raw_inode, f->name, NULL, recoverable, f)) < 0) {
+		D(printk("jffs_notify_change(): The write failed!\n"));
+		jffs_free_node(new_node);
+		D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
+		up(&c->fmc->biglock);
+		goto out;
+	}
+
+	jffs_insert_node(c, f, &raw_inode, NULL, new_node);
+
+	mark_inode_dirty(inode);
+	D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
+	up(&c->fmc->biglock);
+out:
+	unlock_kernel();
+	return res;
+} /* jffs_notify_change()  */
+
+
+static struct inode *
+jffs_new_inode(const struct inode * dir, struct jffs_raw_inode *raw_inode,
+	       int * err)
+{
+	struct super_block * sb;
+	struct inode * inode;
+	struct jffs_control *c;
+	struct jffs_file *f;
+
+	sb = dir->i_sb;
+	inode = new_inode(sb);
+	if (!inode) {
+		*err = -ENOMEM;
+		return NULL;
+	}
+
+	c = (struct jffs_control *)sb->s_fs_info;
+
+	inode->i_ino = raw_inode->ino;
+	inode->i_mode = raw_inode->mode;
+	inode->i_nlink = raw_inode->nlink;
+	inode->i_uid = raw_inode->uid;
+	inode->i_gid = raw_inode->gid;
+	inode->i_size = raw_inode->dsize;
+	inode->i_atime.tv_sec = raw_inode->atime;
+	inode->i_mtime.tv_sec = raw_inode->mtime;
+	inode->i_ctime.tv_sec = raw_inode->ctime;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = (inode->i_size + 511) >> 9;
+
+	f = jffs_find_file(c, raw_inode->ino);
+
+	inode->u.generic_ip = (void *)f;
+	insert_inode_hash(inode);
+
+	return inode;
+}
+
+/* Get statistics of the file system.  */
+static int
+jffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct jffs_control *c = (struct jffs_control *) sb->s_fs_info;
+	struct jffs_fmcontrol *fmc;
+
+	lock_kernel();
+
+	fmc = c->fmc;
+
+	D2(printk("jffs_statfs()\n"));
+
+	buf->f_type = JFFS_MAGIC_SB_BITMASK;
+	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_blocks = (fmc->flash_size / PAGE_CACHE_SIZE)
+		       - (fmc->min_free_size / PAGE_CACHE_SIZE);
+	buf->f_bfree = (jffs_free_size1(fmc) + jffs_free_size2(fmc) +
+		       fmc->dirty_size - fmc->min_free_size)
+			       >> PAGE_CACHE_SHIFT;
+	buf->f_bavail = buf->f_bfree;
+
+	/* Find out how many files there are in the filesystem.  */
+	buf->f_files = jffs_foreach_file(c, jffs_file_count);
+	buf->f_ffree = buf->f_bfree;
+	/* buf->f_fsid = 0; */
+	buf->f_namelen = JFFS_MAX_NAME_LEN;
+
+	unlock_kernel();
+
+	return 0;
+}
+
+
+/* Rename a file.  */
+static int
+jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+	    struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_file *old_dir_f;
+	struct jffs_file *new_dir_f;
+	struct jffs_file *del_f;
+	struct jffs_file *f;
+	struct jffs_node *node;
+	struct inode *inode;
+	int result = 0;
+	__u32 rename_data = 0;
+
+	D2(printk("***jffs_rename()\n"));
+
+	D(printk("jffs_rename(): old_dir: 0x%p, old name: 0x%p, "
+		 "new_dir: 0x%p, new name: 0x%p\n",
+		 old_dir, old_dentry->d_name.name,
+		 new_dir, new_dentry->d_name.name));
+
+	lock_kernel();
+	c = (struct jffs_control *)old_dir->i_sb->s_fs_info;
+	ASSERT(if (!c) {
+		printk(KERN_ERR "jffs_rename(): The old_dir inode "
+		       "didn't have a reference to a jffs_file struct\n");
+		unlock_kernel();
+		return -EIO;
+	});
+
+	result = -ENOTDIR;
+	if (!(old_dir_f = (struct jffs_file *)old_dir->u.generic_ip)) {
+		D(printk("jffs_rename(): Old dir invalid.\n"));
+		goto jffs_rename_end;
+	}
+
+	/* Try to find the file to move.  */
+	result = -ENOENT;
+	if (!(f = jffs_find_child(old_dir_f, old_dentry->d_name.name,
+				  old_dentry->d_name.len))) {
+		goto jffs_rename_end;
+	}
+
+	/* Find the new directory.  */
+	result = -ENOTDIR;
+	if (!(new_dir_f = (struct jffs_file *)new_dir->u.generic_ip)) {
+		D(printk("jffs_rename(): New dir invalid.\n"));
+		goto jffs_rename_end;
+	}
+	D3(printk (KERN_NOTICE "rename(): down biglock\n"));
+	down(&c->fmc->biglock);
+	/* Create a node and initialize as much as needed.  */
+	result = -ENOMEM;
+	if (!(node = jffs_alloc_node())) {
+		D(printk("jffs_rename(): Allocation failed: node == 0\n"));
+		goto jffs_rename_end;
+	}
+	node->data_offset = 0;
+	node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = f->ino;
+	raw_inode.pino = new_dir_f->ino;
+/*  	raw_inode.version = f->highest_version + 1; */
+	raw_inode.mode = f->mode;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = current->fsgid;
+#if 0
+	raw_inode.uid = f->uid;
+	raw_inode.gid = f->gid;
+#endif
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = raw_inode.atime;
+	raw_inode.ctime = f->ctime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = 0;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = new_dentry->d_name.len;
+	raw_inode.nlink = f->nlink;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	/* See if there already exists a file with the same name as
+	   new_name.  */
+	if ((del_f = jffs_find_child(new_dir_f, new_dentry->d_name.name,
+				     new_dentry->d_name.len))) {
+		raw_inode.rename = 1;
+		raw_inode.dsize = sizeof(__u32);
+		rename_data = del_f->ino;
+	}
+
+	/* Write the new node to the flash memory.  */
+	if ((result = jffs_write_node(c, node, &raw_inode,
+				      new_dentry->d_name.name,
+				      (unsigned char*)&rename_data, 0, f)) < 0) {
+		D(printk("jffs_rename(): Failed to write node to flash.\n"));
+		jffs_free_node(node);
+		goto jffs_rename_end;
+	}
+	raw_inode.dsize = 0;
+
+	if (raw_inode.rename) {
+		/* The file with the same name must be deleted.  */
+		//FIXME deadlock	        down(&c->fmc->gclock);
+		if ((result = jffs_remove(new_dir, new_dentry,
+					  del_f->mode)) < 0) {
+			/* This is really bad.  */
+			printk(KERN_ERR "JFFS: An error occurred in "
+			       "rename().\n");
+		}
+		//		up(&c->fmc->gclock);
+	}
+
+	if (old_dir_f != new_dir_f) {
+		/* Remove the file from its old position in the
+		   filesystem tree.  */
+		jffs_unlink_file_from_tree(f);
+	}
+
+	/* Insert the new node into the file system.  */
+	if ((result = jffs_insert_node(c, f, &raw_inode,
+				       new_dentry->d_name.name, node)) < 0) {
+		D(printk(KERN_ERR "jffs_rename(): jffs_insert_node() "
+			 "failed!\n"));
+	}
+
+	if (old_dir_f != new_dir_f) {
+		/* Insert the file to its new position in the
+		   file system.  */
+		jffs_insert_file_into_tree(f);
+	}
+
+	/* This is a kind of update of the inode we're about to make
+	   here.  This is what they do in ext2fs.  Kind of.  */
+	if ((inode = iget(new_dir->i_sb, f->ino))) {
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		iput(inode);
+	}
+
+jffs_rename_end:
+	D3(printk (KERN_NOTICE "rename(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return result;
+} /* jffs_rename()  */
+
+
+/* Read the contents of a directory.  Used by programs like `ls'
+   for instance.  */
+static int
+jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct jffs_file *f;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct jffs_control *c = (struct jffs_control *)inode->i_sb->s_fs_info;
+	int j;
+	int ddino;
+	lock_kernel();
+	D3(printk (KERN_NOTICE "readdir(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp));
+	if (filp->f_pos == 0) {
+		D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino));
+		if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) {
+			D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+			up(&c->fmc->biglock);
+			unlock_kernel();
+			return 0;
+		}
+		filp->f_pos = 1;
+	}
+	if (filp->f_pos == 1) {
+		if (inode->i_ino == JFFS_MIN_INO) {
+			ddino = JFFS_MIN_INO;
+		}
+		else {
+			ddino = ((struct jffs_file *)
+				 inode->u.generic_ip)->pino;
+		}
+		D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
+		if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
+			D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+			up(&c->fmc->biglock);
+			unlock_kernel();
+			return 0;
+		}
+		filp->f_pos++;
+	}
+	f = ((struct jffs_file *)inode->u.generic_ip)->children;
+
+	j = 2;
+	while(f && (f->deleted || j++ < filp->f_pos )) {
+		f = f->sibling_next;
+	}
+
+	while (f) {
+		D3(printk("jffs_readdir(): \"%s\" ino: %u\n",
+			  (f->name ? f->name : ""), f->ino));
+		if (filldir(dirent, f->name, f->nsize,
+			    filp->f_pos , f->ino, DT_UNKNOWN) < 0) {
+		        D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+			up(&c->fmc->biglock);
+			unlock_kernel();
+			return 0;
+		}
+		filp->f_pos++;
+		do {
+			f = f->sibling_next;
+		} while(f && f->deleted);
+	}
+	D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return filp->f_pos;
+} /* jffs_readdir()  */
+
+
+/* Find a file in a directory. If the file exists, return its
+   corresponding dentry.  */
+static struct dentry *
+jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct jffs_file *d;
+	struct jffs_file *f;
+	struct jffs_control *c = (struct jffs_control *)dir->i_sb->s_fs_info;
+	int len;
+	int r = 0;
+	const char *name;
+	struct inode *inode = NULL;
+
+	len = dentry->d_name.len;
+	name = dentry->d_name.name;
+
+	lock_kernel();
+
+	D3({
+		char *s = (char *)kmalloc(len + 1, GFP_KERNEL);
+		memcpy(s, name, len);
+		s[len] = '\0';
+		printk("jffs_lookup(): dir: 0x%p, name: \"%s\"\n", dir, s);
+		kfree(s);
+	});
+
+	D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	r = -ENAMETOOLONG;
+	if (len > JFFS_MAX_NAME_LEN) {
+		goto jffs_lookup_end;
+	}
+
+	r = -EACCES;
+	if (!(d = (struct jffs_file *)dir->u.generic_ip)) {
+		D(printk("jffs_lookup(): No such inode! (%lu)\n",
+			 dir->i_ino));
+		goto jffs_lookup_end;
+	}
+
+	/* Get the corresponding inode to the file.  */
+
+	/* iget calls jffs_read_inode, so we need to drop the biglock
+           before calling iget.  Unfortunately, the GC has a tendency
+           to sneak in here, because iget sometimes calls schedule ().
+	*/
+
+	if ((len == 1) && (name[0] == '.')) {
+		D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+		up(&c->fmc->biglock);
+		if (!(inode = iget(dir->i_sb, d->ino))) {
+			D(printk("jffs_lookup(): . iget() ==> NULL\n"));
+			goto jffs_lookup_end_no_biglock;
+		}
+		D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
+		down(&c->fmc->biglock);
+	} else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) {
+	        D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+		up(&c->fmc->biglock);
+ 		if (!(inode = iget(dir->i_sb, d->pino))) {
+			D(printk("jffs_lookup(): .. iget() ==> NULL\n"));
+			goto jffs_lookup_end_no_biglock;
+		}
+		D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
+		down(&c->fmc->biglock);
+	} else if ((f = jffs_find_child(d, name, len))) {
+	        D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+		up(&c->fmc->biglock);
+		if (!(inode = iget(dir->i_sb, f->ino))) {
+			D(printk("jffs_lookup(): iget() ==> NULL\n"));
+			goto jffs_lookup_end_no_biglock;
+		}
+		D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
+		down(&c->fmc->biglock);
+	} else {
+		D3(printk("jffs_lookup(): Couldn't find the file. "
+			  "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n",
+			  f, name, d, d->ino));
+		inode = NULL;
+	}
+
+	d_add(dentry, inode);
+	D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return NULL;
+
+jffs_lookup_end:
+	D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+	up(&c->fmc->biglock);
+
+jffs_lookup_end_no_biglock:
+	unlock_kernel();
+	return ERR_PTR(r);
+} /* jffs_lookup()  */
+
+
+/* Try to read a page of data from a file.  */
+static int
+jffs_do_readpage_nolock(struct file *file, struct page *page)
+{
+	void *buf;
+	unsigned long read_len;
+	int result;
+	struct inode *inode = (struct inode*)page->mapping->host;
+	struct jffs_file *f = (struct jffs_file *)inode->u.generic_ip;
+	struct jffs_control *c = (struct jffs_control *)inode->i_sb->s_fs_info;
+	int r;
+	loff_t offset;
+
+	D2(printk("***jffs_readpage(): file = \"%s\", page->index = %lu\n",
+		  (f->name ? f->name : ""), (long)page->index));
+
+	get_page(page);
+	/* Don't SetPageLocked(page), should be locked already */
+	ClearPageUptodate(page);
+	ClearPageError(page);
+
+	D3(printk (KERN_NOTICE "readpage(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	read_len = 0;
+	result = 0;
+	offset = page->index << PAGE_CACHE_SHIFT;
+
+	kmap(page);
+	buf = page_address(page);
+	if (offset < inode->i_size) {
+		read_len = min_t(long, inode->i_size - offset, PAGE_SIZE);
+		r = jffs_read_data(f, buf, offset, read_len);
+		if (r != read_len) {
+			result = -EIO;
+			D(
+			        printk("***jffs_readpage(): Read error! "
+				       "Wanted to read %lu bytes but only "
+				       "read %d bytes.\n", read_len, r);
+			  );
+		}
+
+	}
+
+	/* This handles the case of partial or no read in above */
+	if(read_len < PAGE_SIZE)
+	        memset(buf + read_len, 0, PAGE_SIZE - read_len);
+	flush_dcache_page(page);
+	kunmap(page);
+
+	D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
+	up(&c->fmc->biglock);
+
+	if (result) {
+	        SetPageError(page);
+	}else {
+	        SetPageUptodate(page);	        
+	}
+
+	page_cache_release(page);
+
+	D3(printk("jffs_readpage(): Leaving...\n"));
+
+	return result;
+} /* jffs_do_readpage_nolock()  */
+
+static int jffs_readpage(struct file *file, struct page *page)
+{
+	int ret = jffs_do_readpage_nolock(file, page);
+	unlock_page(page);
+	return ret;
+}
+
+/* Create a new directory.  */
+static int
+jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_node *node;
+	struct jffs_file *dir_f;
+	struct inode *inode;
+	int dir_mode;
+	int result = 0;
+	int err;
+
+	D1({
+	        int len = dentry->d_name.len;
+		char *_name = (char *) kmalloc(len + 1, GFP_KERNEL);
+		memcpy(_name, dentry->d_name.name, len);
+		_name[len] = '\0';
+		printk("***jffs_mkdir(): dir = 0x%p, name = \"%s\", "
+		       "len = %d, mode = 0x%08x\n", dir, _name, len, mode);
+		kfree(_name);
+	});
+
+	lock_kernel();
+	dir_f = (struct jffs_file *)dir->u.generic_ip;
+
+	ASSERT(if (!dir_f) {
+		printk(KERN_ERR "jffs_mkdir(): No reference to a "
+		       "jffs_file struct in inode.\n");
+		unlock_kernel();
+		return -EIO;
+	});
+
+	c = dir_f->c;
+	D3(printk (KERN_NOTICE "mkdir(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX)
+			      & ~current->fs->umask);
+	if (dir->i_mode & S_ISGID) {
+		dir_mode |= S_ISGID;
+	}
+
+	/* Create a node and initialize it as much as needed.  */
+	if (!(node = jffs_alloc_node())) {
+		D(printk("jffs_mkdir(): Allocation failed: node == 0\n"));
+		result = -ENOMEM;
+		goto jffs_mkdir_end;
+	}
+	node->data_offset = 0;
+	node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = c->next_ino++;
+	raw_inode.pino = dir_f->ino;
+	raw_inode.version = 1;
+	raw_inode.mode = dir_mode;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	/*	raw_inode.gid = current->fsgid; */
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = raw_inode.atime;
+	raw_inode.ctime = raw_inode.atime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = 0;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = dentry->d_name.len;
+	raw_inode.nlink = 1;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	/* Write the new node to the flash.  */
+	if ((result = jffs_write_node(c, node, &raw_inode,
+				     dentry->d_name.name, NULL, 0, NULL)) < 0) {
+		D(printk("jffs_mkdir(): jffs_write_node() failed.\n"));
+		jffs_free_node(node);
+		goto jffs_mkdir_end;
+	}
+
+	/* Insert the new node into the file system.  */
+	if ((result = jffs_insert_node(c, NULL, &raw_inode, dentry->d_name.name,
+				       node)) < 0) {
+		goto jffs_mkdir_end;
+	}
+
+	inode = jffs_new_inode(dir, &raw_inode, &err);
+	if (inode == NULL) {
+		result = err;
+		goto jffs_mkdir_end;
+	}
+
+	inode->i_op = &jffs_dir_inode_operations;
+	inode->i_fop = &jffs_dir_operations;
+
+	mark_inode_dirty(dir);
+	d_instantiate(dentry, inode);
+
+	result = 0;
+jffs_mkdir_end:
+	D3(printk (KERN_NOTICE "mkdir(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return result;
+} /* jffs_mkdir()  */
+
+
+/* Remove a directory.  */
+static int
+jffs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct jffs_control *c = (struct jffs_control *)dir->i_sb->s_fs_info;
+	int ret;
+	D3(printk("***jffs_rmdir()\n"));
+	D3(printk (KERN_NOTICE "rmdir(): down biglock\n"));
+	lock_kernel();
+	down(&c->fmc->biglock);
+	ret = jffs_remove(dir, dentry, S_IFDIR);
+	D3(printk (KERN_NOTICE "rmdir(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return ret;
+}
+
+
+/* Remove any kind of file except for directories.  */
+static int
+jffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct jffs_control *c = (struct jffs_control *)dir->i_sb->s_fs_info;
+	int ret; 
+
+	lock_kernel();
+	D3(printk("***jffs_unlink()\n"));
+	D3(printk (KERN_NOTICE "unlink(): down biglock\n"));
+	down(&c->fmc->biglock);
+	ret = jffs_remove(dir, dentry, 0);
+	D3(printk (KERN_NOTICE "unlink(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return ret;
+}
+
+
+/* Remove a JFFS entry, i.e. plain files, directories, etc.  Here we
+   shouldn't test for free space on the device.  */
+static int
+jffs_remove(struct inode *dir, struct dentry *dentry, int type)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_file *dir_f; /* The file-to-remove's parent.  */
+	struct jffs_file *del_f; /* The file to remove.  */
+	struct jffs_node *del_node;
+	struct inode *inode = NULL;
+	int result = 0;
+
+	D1({
+		int len = dentry->d_name.len;
+		const char *name = dentry->d_name.name;
+		char *_name = (char *) kmalloc(len + 1, GFP_KERNEL);
+		memcpy(_name, name, len);
+		_name[len] = '\0';
+		printk("***jffs_remove(): file = \"%s\", ino = %ld\n", _name, dentry->d_inode->i_ino);
+		kfree(_name);
+	});
+
+	dir_f = (struct jffs_file *) dir->u.generic_ip;
+	c = dir_f->c;
+
+	result = -ENOENT;
+	if (!(del_f = jffs_find_child(dir_f, dentry->d_name.name,
+				      dentry->d_name.len))) {
+		D(printk("jffs_remove(): jffs_find_child() failed.\n"));
+		goto jffs_remove_end;
+	}
+
+	if (S_ISDIR(type)) {
+		struct jffs_file *child = del_f->children;
+		while(child) {
+			if( !child->deleted ) {
+				result = -ENOTEMPTY;
+				goto jffs_remove_end;
+			}
+			child = child->sibling_next;
+		}
+	}            
+	else if (S_ISDIR(del_f->mode)) {
+		D(printk("jffs_remove(): node is a directory "
+			 "but it shouldn't be.\n"));
+		result = -EPERM;
+		goto jffs_remove_end;
+	}
+
+	inode = dentry->d_inode;
+
+	result = -EIO;
+	if (del_f->ino != inode->i_ino)
+		goto jffs_remove_end;
+
+	if (!inode->i_nlink) {
+		printk("Deleting nonexistent file inode: %lu, nlink: %d\n",
+		       inode->i_ino, inode->i_nlink);
+		inode->i_nlink=1;
+	}
+
+	/* Create a node for the deletion.  */
+	result = -ENOMEM;
+	if (!(del_node = jffs_alloc_node())) {
+		D(printk("jffs_remove(): Allocation failed!\n"));
+		goto jffs_remove_end;
+	}
+	del_node->data_offset = 0;
+	del_node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = del_f->ino;
+	raw_inode.pino = del_f->pino;
+/*  	raw_inode.version = del_f->highest_version + 1; */
+	raw_inode.mode = del_f->mode;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = current->fsgid;
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = del_f->mtime;
+	raw_inode.ctime = raw_inode.atime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = 0;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = 0;
+	raw_inode.nlink = del_f->nlink;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 1;
+
+	/* Write the new node to the flash memory.  */
+	if (jffs_write_node(c, del_node, &raw_inode, NULL, NULL, 1, del_f) < 0) {
+		jffs_free_node(del_node);
+		result = -EIO;
+		goto jffs_remove_end;
+	}
+
+	/* Update the file.  This operation will make the file disappear
+	   from the in-memory file system structures.  */
+	jffs_insert_node(c, del_f, &raw_inode, NULL, del_node);
+
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	inode->i_nlink--;
+	inode->i_ctime = dir->i_ctime;
+	mark_inode_dirty(inode);
+
+	d_delete(dentry);	/* This also frees the inode */
+
+	result = 0;
+jffs_remove_end:
+	return result;
+} /* jffs_remove()  */
+
+
+static int
+jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_file *dir_f;
+	struct jffs_node *node = NULL;
+	struct jffs_control *c;
+	struct inode *inode;
+	int result = 0;
+	u16 data = old_encode_dev(rdev);
+	int err;
+
+	D1(printk("***jffs_mknod()\n"));
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+	lock_kernel();
+	dir_f = (struct jffs_file *)dir->u.generic_ip;
+	c = dir_f->c;
+
+	D3(printk (KERN_NOTICE "mknod(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	/* Create and initialize a new node.  */
+	if (!(node = jffs_alloc_node())) {
+		D(printk("jffs_mknod(): Allocation failed!\n"));
+		result = -ENOMEM;
+		goto jffs_mknod_err;
+	}
+	node->data_offset = 0;
+	node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = c->next_ino++;
+	raw_inode.pino = dir_f->ino;
+	raw_inode.version = 1;
+	raw_inode.mode = mode;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	/*	raw_inode.gid = current->fsgid; */
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = raw_inode.atime;
+	raw_inode.ctime = raw_inode.atime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = 2;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = dentry->d_name.len;
+	raw_inode.nlink = 1;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	/* Write the new node to the flash.  */
+	if ((err = jffs_write_node(c, node, &raw_inode, dentry->d_name.name,
+				   (unsigned char *)&data, 0, NULL)) < 0) {
+		D(printk("jffs_mknod(): jffs_write_node() failed.\n"));
+		result = err;
+		goto jffs_mknod_err;
+	}
+
+	/* Insert the new node into the file system.  */
+	if ((err = jffs_insert_node(c, NULL, &raw_inode, dentry->d_name.name,
+				    node)) < 0) {
+		result = err;
+		goto jffs_mknod_end;
+	}
+
+	inode = jffs_new_inode(dir, &raw_inode, &err);
+	if (inode == NULL) {
+		result = err;
+		goto jffs_mknod_end;
+	}
+
+	init_special_inode(inode, mode, rdev);
+
+	d_instantiate(dentry, inode);
+
+	goto jffs_mknod_end;
+
+jffs_mknod_err:
+	if (node) {
+		jffs_free_node(node);
+	}
+
+jffs_mknod_end:
+	D3(printk (KERN_NOTICE "mknod(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return result;
+} /* jffs_mknod()  */
+
+
+static int
+jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_file *dir_f;
+	struct jffs_node *node;
+	struct inode *inode;
+
+	int symname_len = strlen(symname);
+	int err;
+
+	lock_kernel();
+	D1({
+		int len = dentry->d_name.len; 
+		char *_name = (char *)kmalloc(len + 1, GFP_KERNEL);
+		char *_symname = (char *)kmalloc(symname_len + 1, GFP_KERNEL);
+		memcpy(_name, dentry->d_name.name, len);
+		_name[len] = '\0';
+		memcpy(_symname, symname, symname_len);
+		_symname[symname_len] = '\0';
+		printk("***jffs_symlink(): dir = 0x%p, "
+		       "dentry->dname.name = \"%s\", "
+		       "symname = \"%s\"\n", dir, _name, _symname);
+		kfree(_name);
+		kfree(_symname);
+	});
+
+	dir_f = (struct jffs_file *)dir->u.generic_ip;
+	ASSERT(if (!dir_f) {
+		printk(KERN_ERR "jffs_symlink(): No reference to a "
+		       "jffs_file struct in inode.\n");
+		unlock_kernel();
+		return -EIO;
+	});
+
+	c = dir_f->c;
+
+	/* Create a node and initialize it as much as needed.  */
+	if (!(node = jffs_alloc_node())) {
+		D(printk("jffs_symlink(): Allocation failed: node = NULL\n"));
+		unlock_kernel();
+		return -ENOMEM;
+	}
+	D3(printk (KERN_NOTICE "symlink(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	node->data_offset = 0;
+	node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = c->next_ino++;
+	raw_inode.pino = dir_f->ino;
+	raw_inode.version = 1;
+	raw_inode.mode = S_IFLNK | S_IRWXUGO;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = raw_inode.atime;
+	raw_inode.ctime = raw_inode.atime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = symname_len;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = dentry->d_name.len;
+	raw_inode.nlink = 1;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	/* Write the new node to the flash.  */
+	if ((err = jffs_write_node(c, node, &raw_inode, dentry->d_name.name,
+				   (const unsigned char *)symname, 0, NULL)) < 0) {
+		D(printk("jffs_symlink(): jffs_write_node() failed.\n"));
+		jffs_free_node(node);
+		goto jffs_symlink_end;
+	}
+
+	/* Insert the new node into the file system.  */
+	if ((err = jffs_insert_node(c, NULL, &raw_inode, dentry->d_name.name,
+				    node)) < 0) {
+		goto jffs_symlink_end;
+	}
+
+	inode = jffs_new_inode(dir, &raw_inode, &err);
+	if (inode == NULL) {
+		goto jffs_symlink_end;
+	}
+	err = 0;
+	inode->i_op = &page_symlink_inode_operations;
+	inode->i_mapping->a_ops = &jffs_address_operations;
+
+	d_instantiate(dentry, inode);
+ jffs_symlink_end:
+	D3(printk (KERN_NOTICE "symlink(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return err;
+} /* jffs_symlink()  */
+
+
+/* Create an inode inside a JFFS directory (dir) and return it.
+ *
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate().
+ */
+static int
+jffs_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_node *node;
+	struct jffs_file *dir_f; /* JFFS representation of the directory.  */
+	struct inode *inode;
+	int err;
+
+	lock_kernel();
+	D1({
+		int len = dentry->d_name.len;
+		char *s = (char *)kmalloc(len + 1, GFP_KERNEL);
+		memcpy(s, dentry->d_name.name, len);
+		s[len] = '\0';
+		printk("jffs_create(): dir: 0x%p, name: \"%s\"\n", dir, s);
+		kfree(s);
+	});
+
+	dir_f = (struct jffs_file *)dir->u.generic_ip;
+	ASSERT(if (!dir_f) {
+		printk(KERN_ERR "jffs_create(): No reference to a "
+		       "jffs_file struct in inode.\n");
+		unlock_kernel();
+		return -EIO;
+	});
+
+	c = dir_f->c;
+
+	/* Create a node and initialize as much as needed.  */
+	if (!(node = jffs_alloc_node())) {
+		D(printk("jffs_create(): Allocation failed: node == 0\n"));
+		unlock_kernel();
+		return -ENOMEM;
+	}
+	D3(printk (KERN_NOTICE "create(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	node->data_offset = 0;
+	node->removed_size = 0;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = c->next_ino++;
+	raw_inode.pino = dir_f->ino;
+	raw_inode.version = 1;
+	raw_inode.mode = mode;
+	raw_inode.uid = current->fsuid;
+	raw_inode.gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	raw_inode.atime = get_seconds();
+	raw_inode.mtime = raw_inode.atime;
+	raw_inode.ctime = raw_inode.atime;
+	raw_inode.offset = 0;
+	raw_inode.dsize = 0;
+	raw_inode.rsize = 0;
+	raw_inode.nsize = dentry->d_name.len;
+	raw_inode.nlink = 1;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = 0;
+
+	/* Write the new node to the flash.  */
+	if ((err = jffs_write_node(c, node, &raw_inode,
+				   dentry->d_name.name, NULL, 0, NULL)) < 0) {
+		D(printk("jffs_create(): jffs_write_node() failed.\n"));
+		jffs_free_node(node);
+		goto jffs_create_end;
+	}
+
+	/* Insert the new node into the file system.  */
+	if ((err = jffs_insert_node(c, NULL, &raw_inode, dentry->d_name.name,
+				    node)) < 0) {
+		goto jffs_create_end;
+	}
+
+	/* Initialize an inode.  */
+	inode = jffs_new_inode(dir, &raw_inode, &err);
+	if (inode == NULL) {
+		goto jffs_create_end;
+	}
+	err = 0;
+	inode->i_op = &jffs_file_inode_operations;
+	inode->i_fop = &jffs_file_operations;
+	inode->i_mapping->a_ops = &jffs_address_operations;
+	inode->i_mapping->nrpages = 0;
+
+	d_instantiate(dentry, inode);
+ jffs_create_end:
+	D3(printk (KERN_NOTICE "create(): up biglock\n"));
+	up(&c->fmc->biglock);
+	unlock_kernel();
+	return err;
+} /* jffs_create()  */
+
+
+/* Write, append or rewrite data to an existing file.  */
+static ssize_t
+jffs_file_write(struct file *filp, const char *buf, size_t count,
+		loff_t *ppos)
+{
+	struct jffs_raw_inode raw_inode;
+	struct jffs_control *c;
+	struct jffs_file *f;
+	struct jffs_node *node;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int recoverable = 0;
+	size_t written = 0;
+	__u32 thiscount = count;
+	loff_t pos = *ppos;
+	int err;
+
+	inode = filp->f_dentry->d_inode;
+
+	D2(printk("***jffs_file_write(): inode: 0x%p (ino: %lu), "
+		  "filp: 0x%p, buf: 0x%p, count: %d\n",
+		  inode, inode->i_ino, filp, buf, count));
+
+#if 0
+	if (inode->i_sb->s_flags & MS_RDONLY) {
+		D(printk("jffs_file_write(): MS_RDONLY\n"));
+		err = -EROFS;
+		goto out_isem;
+	}
+#endif	
+	err = -EINVAL;
+
+	if (!S_ISREG(inode->i_mode)) {
+		D(printk("jffs_file_write(): inode->i_mode == 0x%08x\n",
+				inode->i_mode));
+		goto out_isem;
+	}
+
+	if (!(f = (struct jffs_file *)inode->u.generic_ip)) {
+		D(printk("jffs_file_write(): inode->u.generic_ip = 0x%p\n",
+				inode->u.generic_ip));
+		goto out_isem;
+	}
+
+	c = f->c;
+
+	/*
+	 * This will never trigger with sane page sizes.  leave it in
+	 * anyway, since I'm thinking about how to merge larger writes
+	 * (the current idea is to poke a thread that does the actual
+	 * I/O and starts by doing a down(&inode->i_sem).  then we
+	 * would need to get the page cache pages and have a list of
+	 * I/O requests and do write-merging here.
+	 * -- prumpf
+	 */
+	thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
+
+	D3(printk (KERN_NOTICE "file_write(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	/* Urgh. POSIX says we can do short writes if we feel like it. 
+	 * In practice, we can't. Nothing will cope. So we loop until
+	 * we're done.
+	 *
+	 * <_Anarchy_> posix and reality are not interconnected on this issue
+	 */
+	while (count) {
+		/* Things are going to be written so we could allocate and
+		   initialize the necessary data structures now.  */
+		if (!(node = jffs_alloc_node())) {
+			D(printk("jffs_file_write(): node == 0\n"));
+			err = -ENOMEM;
+			goto out;
+		}
+
+		node->data_offset = pos;
+		node->removed_size = 0;
+
+		/* Initialize the raw inode.  */
+		raw_inode.magic = JFFS_MAGIC_BITMASK;
+		raw_inode.ino = f->ino;
+		raw_inode.pino = f->pino;
+
+		raw_inode.mode = f->mode;
+
+		raw_inode.uid = f->uid;
+		raw_inode.gid = f->gid;
+		raw_inode.atime = get_seconds();
+		raw_inode.mtime = raw_inode.atime;
+		raw_inode.ctime = f->ctime;
+		raw_inode.offset = pos;
+		raw_inode.dsize = thiscount;
+		raw_inode.rsize = 0;
+		raw_inode.nsize = f->nsize;
+		raw_inode.nlink = f->nlink;
+		raw_inode.spare = 0;
+		raw_inode.rename = 0;
+		raw_inode.deleted = 0;
+
+		if (pos < f->size) {
+			node->removed_size = raw_inode.rsize = min(thiscount, (__u32)(f->size - pos));
+
+			/* If this node is going entirely over the top of old data,
+			   we can allow it to go into the reserved space, because
+			   we know that GC can reclaim the space later.
+			*/
+			if (pos + thiscount < f->size) {
+				/* If all the data we're overwriting are _real_,
+				   not just holes, then:
+				   recoverable = 1;
+				*/
+			}
+		}
+
+		/* Write the new node to the flash.  */
+		/* NOTE: We would be quite happy if jffs_write_node() wrote a
+		   smaller node than we were expecting. There's no need for it
+		   to waste the space at the end of the flash just because it's
+		   a little smaller than what we asked for. But that's a whole
+		   new can of worms which I'm not going to open this week. 
+		   -- dwmw2.
+		*/
+		if ((err = jffs_write_node(c, node, &raw_inode, f->name,
+					   (const unsigned char *)buf,
+					   recoverable, f)) < 0) {
+			D(printk("jffs_file_write(): jffs_write_node() failed.\n"));
+			jffs_free_node(node);
+			goto out;
+		}
+
+		written += err;
+		buf += err;
+		count -= err;
+		pos += err;
+
+		/* Insert the new node into the file system.  */
+		if ((err = jffs_insert_node(c, f, &raw_inode, NULL, node)) < 0) {
+			goto out;
+		}
+
+		D3(printk("jffs_file_write(): new f_pos %ld.\n", (long)pos));
+
+		thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
+	}
+ out:
+	D3(printk (KERN_NOTICE "file_write(): up biglock\n"));
+	up(&c->fmc->biglock);
+
+	/* Fix things in the real inode.  */
+	if (pos > inode->i_size) {
+		inode->i_size = pos;
+		inode->i_blocks = (inode->i_size + 511) >> 9;
+	}
+	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	invalidate_inode_pages(inode->i_mapping);
+
+ out_isem:
+	return err;
+} /* jffs_file_write()  */
+
+static int
+jffs_prepare_write(struct file *filp, struct page *page,
+                  unsigned from, unsigned to)
+{
+	/* FIXME: we should detect some error conditions here */
+
+	/* Bugger that. We should make sure the page is uptodate */
+	if (!PageUptodate(page) && (from || to < PAGE_CACHE_SIZE))
+		return jffs_do_readpage_nolock(filp, page);
+
+	return 0;
+} /* jffs_prepare_write() */
+
+static int
+jffs_commit_write(struct file *filp, struct page *page,
+                 unsigned from, unsigned to)
+{
+       void *addr = page_address(page) + from;
+       /* XXX: PAGE_CACHE_SHIFT or PAGE_SHIFT */
+       loff_t pos = (page->index<<PAGE_CACHE_SHIFT) + from;
+
+       return jffs_file_write(filp, addr, to-from, &pos);
+} /* jffs_commit_write() */
+
+/* This is our ioctl() routine.  */
+static int
+jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+	   unsigned long arg)
+{
+	struct jffs_control *c;
+	int ret = 0;
+
+	D2(printk("***jffs_ioctl(): cmd = 0x%08x, arg = 0x%08lx\n",
+		  cmd, arg));
+
+	if (!(c = (struct jffs_control *)inode->i_sb->s_fs_info)) {
+		printk(KERN_ERR "JFFS: Bad inode in ioctl() call. "
+		       "(cmd = 0x%08x)\n", cmd);
+		return -EIO;
+	}
+	D3(printk (KERN_NOTICE "ioctl(): down biglock\n"));
+	down(&c->fmc->biglock);
+
+	switch (cmd) {
+	case JFFS_PRINT_HASH:
+		jffs_print_hash_table(c);
+		break;
+	case JFFS_PRINT_TREE:
+		jffs_print_tree(c->root, 0);
+		break;
+	case JFFS_GET_STATUS:
+		{
+			struct jffs_flash_status fst;
+			struct jffs_fmcontrol *fmc = c->fmc;
+			printk("Flash status -- ");
+			if (!access_ok(VERIFY_WRITE,
+				       (struct jffs_flash_status __user *)arg,
+				       sizeof(struct jffs_flash_status))) {
+				D(printk("jffs_ioctl(): Bad arg in "
+					 "JFFS_GET_STATUS ioctl!\n"));
+				ret = -EFAULT;
+				break;
+			}
+			fst.size = fmc->flash_size;
+			fst.used = fmc->used_size;
+			fst.dirty = fmc->dirty_size;
+			fst.begin = fmc->head->offset;
+			fst.end = fmc->tail->offset + fmc->tail->size;
+			printk("size: %d, used: %d, dirty: %d, "
+			       "begin: %d, end: %d\n",
+			       fst.size, fst.used, fst.dirty,
+			       fst.begin, fst.end);
+			if (copy_to_user((struct jffs_flash_status __user *)arg,
+					 &fst,
+					 sizeof(struct jffs_flash_status))) {
+				ret = -EFAULT;
+			}
+		}
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+	D3(printk (KERN_NOTICE "ioctl(): up biglock\n"));
+	up(&c->fmc->biglock);
+	return ret;
+} /* jffs_ioctl()  */
+
+
+static struct address_space_operations jffs_address_operations = {
+	.readpage	= jffs_readpage,
+	.prepare_write	= jffs_prepare_write,
+	.commit_write	= jffs_commit_write,
+};
+
+static int jffs_fsync(struct file *f, struct dentry *d, int datasync)
+{
+	/* We currently have O_SYNC operations at all times.
+	   Do nothing.
+	*/
+	return 0;
+}
+
+
+extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
+extern loff_t generic_file_llseek(struct file *, loff_t, int) __attribute__((weak));
+
+static struct file_operations jffs_file_operations =
+{
+	.open		= generic_file_open,
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.ioctl		= jffs_ioctl,
+	.mmap		= generic_file_readonly_mmap,
+	.fsync		= jffs_fsync,
+	.sendfile	= generic_file_sendfile,
+};
+
+
+static struct inode_operations jffs_file_inode_operations =
+{
+	.lookup		= jffs_lookup,          /* lookup */
+	.setattr	= jffs_setattr,
+};
+
+
+static struct file_operations jffs_dir_operations =
+{
+	.readdir	= jffs_readdir,
+};
+
+
+static struct inode_operations jffs_dir_inode_operations =
+{
+	.create		= jffs_create,
+	.lookup		= jffs_lookup,
+	.unlink		= jffs_unlink,
+	.symlink	= jffs_symlink,
+	.mkdir		= jffs_mkdir,
+	.rmdir		= jffs_rmdir,
+	.mknod		= jffs_mknod,
+	.rename		= jffs_rename,
+	.setattr	= jffs_setattr,
+};
+
+
+/* Initialize an inode for the VFS.  */
+static void
+jffs_read_inode(struct inode *inode)
+{
+	struct jffs_file *f;
+	struct jffs_control *c;
+
+	D3(printk("jffs_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+
+	if (!inode->i_sb) {
+		D(printk("jffs_read_inode(): !inode->i_sb ==> "
+			 "No super block!\n"));
+		return;
+	}
+	c = (struct jffs_control *)inode->i_sb->s_fs_info;
+	D3(printk (KERN_NOTICE "read_inode(): down biglock\n"));
+	down(&c->fmc->biglock);
+	if (!(f = jffs_find_file(c, inode->i_ino))) {
+		D(printk("jffs_read_inode(): No such inode (%lu).\n",
+			 inode->i_ino));
+		D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
+		up(&c->fmc->biglock);
+		return;
+	}
+	inode->u.generic_ip = (void *)f;
+	inode->i_mode = f->mode;
+	inode->i_nlink = f->nlink;
+	inode->i_uid = f->uid;
+	inode->i_gid = f->gid;
+	inode->i_size = f->size;
+	inode->i_atime.tv_sec = f->atime;
+	inode->i_mtime.tv_sec = f->mtime;
+	inode->i_ctime.tv_sec = f->ctime;
+	inode->i_atime.tv_nsec = 
+	inode->i_mtime.tv_nsec = 
+	inode->i_ctime.tv_nsec = 0;
+
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = (inode->i_size + 511) >> 9;
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &jffs_file_inode_operations;
+		inode->i_fop = &jffs_file_operations;
+		inode->i_mapping->a_ops = &jffs_address_operations;
+	}
+	else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &jffs_dir_inode_operations;
+		inode->i_fop = &jffs_dir_operations;
+	}
+	else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_mapping->a_ops = &jffs_address_operations;
+	}
+	else {
+		/* If the node is a device of some sort, then the number of
+		   the device should be read from the flash memory and then
+		   added to the inode's i_rdev member.  */
+		u16 val;
+		jffs_read_data(f, (char *)&val, 0, 2);
+		init_special_inode(inode, inode->i_mode,
+			old_decode_dev(val));
+	}
+
+	D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
+	up(&c->fmc->biglock);
+}
+
+
+static void
+jffs_delete_inode(struct inode *inode)
+{
+	struct jffs_file *f;
+	struct jffs_control *c;
+	D3(printk("jffs_delete_inode(): inode->i_ino == %lu\n",
+		  inode->i_ino));
+
+	lock_kernel();
+	inode->i_size = 0;
+	inode->i_blocks = 0;
+	inode->u.generic_ip = NULL;
+	clear_inode(inode);
+	if (inode->i_nlink == 0) {
+		c = (struct jffs_control *) inode->i_sb->s_fs_info;
+		f = (struct jffs_file *) jffs_find_file (c, inode->i_ino);
+		jffs_possibly_delete_file(f);
+	}
+
+	unlock_kernel();
+}
+
+
+static void
+jffs_write_super(struct super_block *sb)
+{
+	struct jffs_control *c = (struct jffs_control *)sb->s_fs_info;
+	lock_kernel();
+	jffs_garbage_collect_trigger(c);
+	unlock_kernel();
+}
+
+static int jffs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+static struct super_operations jffs_ops =
+{
+	.read_inode	= jffs_read_inode,
+	.delete_inode 	= jffs_delete_inode,
+	.put_super	= jffs_put_super,
+	.write_super	= jffs_write_super,
+	.statfs		= jffs_statfs,
+	.remount_fs	= jffs_remount,
+};
+
+static struct super_block *jffs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, jffs_fill_super);
+}
+
+static struct file_system_type jffs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "jffs",
+	.get_sb		= jffs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init
+init_jffs_fs(void)
+{
+	printk(KERN_INFO "JFFS version " JFFS_VERSION_STRING
+		", (C) 1999, 2000  Axis Communications AB\n");
+	
+#ifdef CONFIG_JFFS_PROC_FS
+	jffs_proc_root = proc_mkdir("jffs", proc_root_fs);
+	if (!jffs_proc_root) {
+		printk(KERN_WARNING "cannot create /proc/jffs entry\n");
+	}
+#endif
+	fm_cache = kmem_cache_create("jffs_fm", sizeof(struct jffs_fm),
+				     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, 
+				     NULL, NULL);
+	if (!fm_cache) {
+		return -ENOMEM;
+	}
+
+	node_cache = kmem_cache_create("jffs_node",sizeof(struct jffs_node),
+				       0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, 
+				       NULL, NULL);
+	if (!node_cache) {
+		kmem_cache_destroy(fm_cache);
+		return -ENOMEM;
+	}
+
+	return register_filesystem(&jffs_fs_type);
+}
+
+static void __exit
+exit_jffs_fs(void)
+{
+	unregister_filesystem(&jffs_fs_type);
+	kmem_cache_destroy(fm_cache);
+	kmem_cache_destroy(node_cache);
+}
+
+module_init(init_jffs_fs)
+module_exit(exit_jffs_fs)
+
+MODULE_DESCRIPTION("The Journalling Flash File System");
+MODULE_AUTHOR("Axis Communications AB.");
+MODULE_LICENSE("GPL");
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
new file mode 100644
index 0000000..8cc6893
--- /dev/null
+++ b/fs/jffs/intrep.c
@@ -0,0 +1,3457 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 1999, 2000  Axis Communications, Inc.
+ *
+ * Created by Finn Hakansson <finn@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: intrep.c,v 1.102 2001/09/23 23:28:36 dwmw2 Exp $
+ *
+ * Ported to Linux 2.3.x and MTD:
+ * Copyright (C) 2000  Alexander Larsson (alex@cendio.se), Cendio Systems AB
+ *
+ */
+
+/* This file contains the code for the internal structure of the
+   Journaling Flash File System, JFFS.  */
+
+/*
+ * Todo list:
+ *
+ * memcpy_to_flash() and memcpy_from_flash() functions.
+ *
+ * Implementation of hard links.
+ *
+ * Organize the source code in a better way. Against the VFS we could
+ * have jffs_ext.c, and against the block device jffs_int.c.
+ * A better file-internal organization too.
+ *
+ * A better checksum algorithm.
+ *
+ * Consider endianness stuff. ntohl() etc.
+ *
+ * Are we handling the atime, mtime, ctime members of the inode right?
+ *
+ * Remove some duplicated code. Take a look at jffs_write_node() and
+ * jffs_rewrite_data() for instance.
+ *
+ * Implement more meaning of the nlink member in various data structures.
+ * nlink could be used in conjunction with hard links for instance.
+ *
+ * Better memory management. Allocate data structures in larger chunks
+ * if possible.
+ *
+ * If too much meta data is stored, a garbage collect should be issued.
+ * We have experienced problems with too much meta data with for instance
+ * log files.
+ *
+ * Improve the calls to jffs_ioctl(). We would like to retrieve more
+ * information to be able to debug (or to supervise) JFFS during run-time.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/jffs.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/pagemap.h>
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+#include <linux/smp_lock.h>
+#include <linux/time.h>
+#include <linux/ctype.h>
+
+#include "intrep.h"
+#include "jffs_fm.h"
+
+long no_jffs_node = 0;
+static long no_jffs_file = 0;
+#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
+long no_jffs_control = 0;
+long no_jffs_raw_inode = 0;
+long no_jffs_node_ref = 0;
+long no_jffs_fm = 0;
+long no_jffs_fmcontrol = 0;
+long no_hash = 0;
+long no_name = 0;
+#endif
+
+static int jffs_scan_flash(struct jffs_control *c);
+static int jffs_update_file(struct jffs_file *f, struct jffs_node *node);
+static int jffs_build_file(struct jffs_file *f);
+static int jffs_free_file(struct jffs_file *f);
+static int jffs_free_node_list(struct jffs_file *f);
+static int jffs_garbage_collect_now(struct jffs_control *c);
+static int jffs_insert_file_into_hash(struct jffs_file *f);
+static int jffs_remove_redundant_nodes(struct jffs_file *f);
+
+/* Is there enough space on the flash?  */
+static inline int JFFS_ENOUGH_SPACE(struct jffs_control *c, __u32 space)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+
+	while (1) {
+		if ((fmc->flash_size - (fmc->used_size + fmc->dirty_size))
+			>= fmc->min_free_size + space) {
+			return 1;
+		}
+		if (fmc->dirty_size < fmc->sector_size)
+			return 0;
+
+		if (jffs_garbage_collect_now(c)) {
+		  D1(printk("JFFS_ENOUGH_SPACE: jffs_garbage_collect_now() failed.\n"));
+		  return 0;
+		}
+	}
+}
+
+#if CONFIG_JFFS_FS_VERBOSE > 0
+static __u8
+flash_read_u8(struct mtd_info *mtd, loff_t from)
+{
+	size_t retlen;
+	__u8 ret;
+	int res;
+
+	res = MTD_READ(mtd, from, 1, &retlen, &ret);
+	if (retlen != 1) {
+		printk("Didn't read a byte in flash_read_u8(). Returned %d\n", res);
+		return 0;
+	}
+
+	return ret;
+}
+
+static void
+jffs_hexdump(struct mtd_info *mtd, loff_t pos, int size)
+{
+	char line[16];
+	int j = 0;
+
+	while (size > 0) {
+		int i;
+
+		printk("%ld:", (long) pos);
+		for (j = 0; j < 16; j++) {
+			line[j] = flash_read_u8(mtd, pos++);
+		}
+		for (i = 0; i < j; i++) {
+			if (!(i & 1)) {
+				printk(" %.2x", line[i] & 0xff);
+			}
+			else {
+				printk("%.2x", line[i] & 0xff);
+			}
+		}
+
+		/* Print empty space */
+		for (; i < 16; i++) {
+			if (!(i & 1)) {
+				printk("   ");
+			}
+			else {
+				printk("  ");
+			}
+		}
+		printk("  ");
+
+		for (i = 0; i < j; i++) {
+			if (isgraph(line[i])) {
+				printk("%c", line[i]);
+			}
+			else {
+				printk(".");
+			}
+		}
+		printk("\n");
+		size -= 16;
+	}
+}
+
+#endif
+
+#define flash_safe_acquire(arg)
+#define flash_safe_release(arg)
+
+
+static int
+flash_safe_read(struct mtd_info *mtd, loff_t from,
+		u_char *buf, size_t count)
+{
+	size_t retlen;
+	int res;
+
+	D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n",
+		  mtd, (unsigned int) from, buf, count));
+
+	res = MTD_READ(mtd, from, count, &retlen, buf);
+	if (retlen != count) {
+		panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res);
+	}
+	return res?res:retlen;
+}
+
+
+static __u32
+flash_read_u32(struct mtd_info *mtd, loff_t from)
+{
+	size_t retlen;
+	__u32 ret;
+	int res;
+
+	res = MTD_READ(mtd, from, 4, &retlen, (unsigned char *)&ret);
+	if (retlen != 4) {
+		printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res);
+		return 0;
+	}
+
+	return ret;
+}
+
+
+static int
+flash_safe_write(struct mtd_info *mtd, loff_t to,
+		 const u_char *buf, size_t count)
+{
+	size_t retlen;
+	int res;
+
+	D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n",
+		  mtd, (unsigned int) to, buf, count));
+
+	res = MTD_WRITE(mtd, to, count, &retlen, buf);
+	if (retlen != count) {
+		printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res);
+	}
+	return res?res:retlen;
+}
+
+
+static int
+flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs,
+			unsigned long iovec_cnt, loff_t to)
+{
+	size_t retlen, retlen_a;
+	int i;
+	int res;
+
+	D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n",
+		  mtd, (unsigned int) to, vecs));
+	
+	if (mtd->writev) {
+		res = MTD_WRITEV(mtd, vecs, iovec_cnt, to, &retlen);
+		return res ? res : retlen;
+	}
+	/* Not implemented writev. Repeatedly use write - on the not so
+	   unreasonable assumption that the mtd driver doesn't care how
+	   many write cycles we use. */
+	res=0;
+	retlen=0;
+
+	for (i=0; !res && i<iovec_cnt; i++) {
+		res = MTD_WRITE(mtd, to, vecs[i].iov_len, &retlen_a, vecs[i].iov_base);
+		if (retlen_a != vecs[i].iov_len) {
+			printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res);
+			if (i != iovec_cnt-1)
+				return -EIO;
+		}
+		/* If res is non-zero, retlen_a is undefined, but we don't
+		   care because in that case it's not going to be 
+		   returned anyway.
+		*/
+		to += retlen_a;
+		retlen += retlen_a;
+	}
+	return res?res:retlen;
+}
+
+
+static int
+flash_memset(struct mtd_info *mtd, loff_t to,
+	     const u_char c, size_t size)
+{
+	static unsigned char pattern[64];
+	int i;
+
+	/* fill up pattern */
+
+	for(i = 0; i < 64; i++)
+		pattern[i] = c;
+
+	/* write as many 64-byte chunks as we can */
+
+	while (size >= 64) {
+		flash_safe_write(mtd, to, pattern, 64);
+		size -= 64;
+		to += 64;
+	}
+
+	/* and the rest */
+
+	if(size)
+		flash_safe_write(mtd, to, pattern, size);
+
+	return size;
+}
+
+
+static void
+intrep_erase_callback(struct erase_info *done)
+{
+	wait_queue_head_t *wait_q;
+
+	wait_q = (wait_queue_head_t *)done->priv;
+
+	wake_up(wait_q);
+}
+
+
+static int
+flash_erase_region(struct mtd_info *mtd, loff_t start,
+		   size_t size)
+{
+	struct erase_info *erase;
+	DECLARE_WAITQUEUE(wait, current);
+	wait_queue_head_t wait_q;
+
+	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+	if (!erase)
+		return -ENOMEM;
+
+	init_waitqueue_head(&wait_q);
+
+	erase->mtd = mtd;
+	erase->callback = intrep_erase_callback;
+	erase->addr = start;
+	erase->len = size;
+	erase->priv = (u_long)&wait_q;
+
+	/* FIXME: Use TASK_INTERRUPTIBLE and deal with being interrupted */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	add_wait_queue(&wait_q, &wait);
+
+	if (MTD_ERASE(mtd, erase) < 0) {
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&wait_q, &wait);
+		kfree(erase);
+
+		printk(KERN_WARNING "flash: erase of region [0x%lx, 0x%lx] "
+		       "totally failed\n", (long)start, (long)start + size);
+
+		return -1;
+	}
+
+	schedule(); /* Wait for flash to finish. */
+	remove_wait_queue(&wait_q, &wait);
+
+	kfree(erase);
+
+	return 0;
+}
+
+/* This routine calculates checksums in JFFS.  */
+static __u32
+jffs_checksum(const void *data, int size)
+{
+	__u32 sum = 0;
+	__u8 *ptr = (__u8 *)data;
+	while (size-- > 0) {
+		sum += *ptr++;
+	}
+	D3(printk(", result: 0x%08x\n", sum));
+	return sum;
+}
+
+
+static int
+jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result)
+{
+	__u32 sum = 0;
+	loff_t ptr = start;
+	__u8 *read_buf;
+	int i, length;
+
+	/* Allocate read buffer */
+	read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL);
+	if (!read_buf) {
+		printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n");
+		return -ENOMEM;
+	}
+	/* Loop until checksum done */
+	while (size) {
+		/* Get amount of data to read */
+		if (size < 4096)
+			length = size;
+		else
+			length = 4096;
+
+		/* Perform flash read */
+		D3(printk(KERN_NOTICE "jffs_checksum_flash\n"));
+		flash_safe_read(mtd, ptr, &read_buf[0], length);
+
+		/* Compute checksum */
+		for (i=0; i < length ; i++)
+			sum += read_buf[i];
+
+		/* Update pointer and size */
+		size -= length;
+		ptr += length;
+	}
+
+	/* Free read buffer */
+	kfree (read_buf);
+
+	/* Return result */
+	D3(printk("checksum result: 0x%08x\n", sum));
+	*result = sum;
+	return 0;
+}
+
+static __inline__ void jffs_fm_write_lock(struct jffs_fmcontrol *fmc)
+{
+  //	down(&fmc->wlock);
+}
+
+static __inline__ void jffs_fm_write_unlock(struct jffs_fmcontrol *fmc)
+{
+  //	up(&fmc->wlock);
+}
+
+
+/* Create and initialize a new struct jffs_file.  */
+static struct jffs_file *
+jffs_create_file(struct jffs_control *c,
+		 const struct jffs_raw_inode *raw_inode)
+{
+	struct jffs_file *f;
+
+	if (!(f = (struct jffs_file *)kmalloc(sizeof(struct jffs_file),
+					      GFP_KERNEL))) {
+		D(printk("jffs_create_file(): Failed!\n"));
+		return NULL;
+	}
+	no_jffs_file++;
+	memset(f, 0, sizeof(struct jffs_file));
+	f->ino = raw_inode->ino;
+	f->pino = raw_inode->pino;
+	f->nlink = raw_inode->nlink;
+	f->deleted = raw_inode->deleted;
+	f->c = c;
+
+	return f;
+}
+
+
+/* Build a control block for the file system.  */
+static struct jffs_control *
+jffs_create_control(struct super_block *sb)
+{
+	struct jffs_control *c;
+	register int s = sizeof(struct jffs_control);
+	int i;
+	D(char *t = 0);
+
+	D2(printk("jffs_create_control()\n"));
+
+	if (!(c = (struct jffs_control *)kmalloc(s, GFP_KERNEL))) {
+		goto fail_control;
+	}
+	DJM(no_jffs_control++);
+	c->root = NULL;
+	c->gc_task = NULL;
+	c->hash_len = JFFS_HASH_SIZE;
+	s = sizeof(struct list_head) * c->hash_len;
+	if (!(c->hash = (struct list_head *)kmalloc(s, GFP_KERNEL))) {
+		goto fail_hash;
+	}
+	DJM(no_hash++);
+	for (i = 0; i < c->hash_len; i++)
+		INIT_LIST_HEAD(&c->hash[i]);
+	if (!(c->fmc = jffs_build_begin(c, MINOR(sb->s_dev)))) {
+		goto fail_fminit;
+	}
+	c->next_ino = JFFS_MIN_INO + 1;
+	c->delete_list = (struct jffs_delete_list *) 0;
+	return c;
+
+fail_fminit:
+	D(t = "c->fmc");
+fail_hash:
+	kfree(c);
+	DJM(no_jffs_control--);
+	D(t = t ? t : "c->hash");
+fail_control:
+	D(t = t ? t : "control");
+	D(printk("jffs_create_control(): Allocation failed: (%s)\n", t));
+	return (struct jffs_control *)0;
+}
+
+
+/* Clean up all data structures associated with the file system.  */
+void
+jffs_cleanup_control(struct jffs_control *c)
+{
+	D2(printk("jffs_cleanup_control()\n"));
+
+	if (!c) {
+		D(printk("jffs_cleanup_control(): c == NULL !!!\n"));
+		return;
+	}
+
+	while (c->delete_list) {
+		struct jffs_delete_list *delete_list_element;
+		delete_list_element = c->delete_list;
+		c->delete_list = c->delete_list->next;
+		kfree(delete_list_element);
+	}
+
+	/* Free all files and nodes.  */
+	if (c->hash) {
+		jffs_foreach_file(c, jffs_free_node_list);
+		jffs_foreach_file(c, jffs_free_file);
+		kfree(c->hash);
+		DJM(no_hash--);
+	}
+	jffs_cleanup_fmcontrol(c->fmc);
+	kfree(c);
+	DJM(no_jffs_control--);
+	D3(printk("jffs_cleanup_control(): Leaving...\n"));
+}
+
+
+/* This function adds a virtual root node to the in-RAM representation.
+   Called by jffs_build_fs().  */
+static int
+jffs_add_virtual_root(struct jffs_control *c)
+{
+	struct jffs_file *root;
+	struct jffs_node *node;
+
+	D2(printk("jffs_add_virtual_root(): "
+		  "Creating a virtual root directory.\n"));
+
+	if (!(root = (struct jffs_file *)kmalloc(sizeof(struct jffs_file),
+						 GFP_KERNEL))) {
+		return -ENOMEM;
+	}
+	no_jffs_file++;
+	if (!(node = jffs_alloc_node())) {
+		kfree(root);
+		no_jffs_file--;
+		return -ENOMEM;
+	}
+	DJM(no_jffs_node++);
+	memset(node, 0, sizeof(struct jffs_node));
+	node->ino = JFFS_MIN_INO;
+	memset(root, 0, sizeof(struct jffs_file));
+	root->ino = JFFS_MIN_INO;
+	root->mode = S_IFDIR | S_IRWXU | S_IRGRP
+		     | S_IXGRP | S_IROTH | S_IXOTH;
+	root->atime = root->mtime = root->ctime = get_seconds();
+	root->nlink = 1;
+	root->c = c;
+	root->version_head = root->version_tail = node;
+	jffs_insert_file_into_hash(root);
+	return 0;
+}
+
+
+/* This is where the file system is built and initialized.  */
+int
+jffs_build_fs(struct super_block *sb)
+{
+	struct jffs_control *c;
+	int err = 0;
+
+	D2(printk("jffs_build_fs()\n"));
+
+	if (!(c = jffs_create_control(sb))) {
+		return -ENOMEM;
+	}
+	c->building_fs = 1;
+	c->sb = sb;
+	if ((err = jffs_scan_flash(c)) < 0) {
+		if(err == -EAGAIN){
+			/* scan_flash() wants us to try once more. A flipping 
+			   bits sector was detect in the middle of the scan flash.
+			   Clean up old allocated memory before going in.
+			*/
+			D1(printk("jffs_build_fs: Cleaning up all control structures,"
+				  " reallocating them and trying mount again.\n"));
+			jffs_cleanup_control(c);
+			if (!(c = jffs_create_control(sb))) {
+				return -ENOMEM;
+			}
+			c->building_fs = 1;
+			c->sb = sb;
+
+			if ((err = jffs_scan_flash(c)) < 0) {
+				goto jffs_build_fs_fail;
+			}			
+		}else{
+			goto jffs_build_fs_fail;
+		}
+	}
+
+	/* Add a virtual root node if no one exists.  */
+	if (!jffs_find_file(c, JFFS_MIN_INO)) {
+		if ((err = jffs_add_virtual_root(c)) < 0) {
+			goto jffs_build_fs_fail;
+		}
+	}
+
+	while (c->delete_list) {
+		struct jffs_file *f;
+		struct jffs_delete_list *delete_list_element;
+
+		if ((f = jffs_find_file(c, c->delete_list->ino))) {
+			f->deleted = 1;
+		}
+		delete_list_element = c->delete_list;
+		c->delete_list = c->delete_list->next;
+		kfree(delete_list_element);
+	}
+
+	/* Remove deleted nodes.  */
+	if ((err = jffs_foreach_file(c, jffs_possibly_delete_file)) < 0) {
+		printk(KERN_ERR "JFFS: Failed to remove deleted nodes.\n");
+		goto jffs_build_fs_fail;
+	}
+	/* Remove redundant nodes.  (We are not interested in the
+	   return value in this case.)  */
+	jffs_foreach_file(c, jffs_remove_redundant_nodes);
+	/* Try to build a tree from all the nodes.  */
+	if ((err = jffs_foreach_file(c, jffs_insert_file_into_tree)) < 0) {
+		printk("JFFS: Failed to build tree.\n");
+		goto jffs_build_fs_fail;
+	}
+	/* Compute the sizes of all files in the filesystem.  Adjust if
+	   necessary.  */
+	if ((err = jffs_foreach_file(c, jffs_build_file)) < 0) {
+		printk("JFFS: Failed to build file system.\n");
+		goto jffs_build_fs_fail;
+	}
+	sb->s_fs_info = (void *)c;
+	c->building_fs = 0;
+
+	D1(jffs_print_hash_table(c));
+	D1(jffs_print_tree(c->root, 0));
+
+	return 0;
+
+jffs_build_fs_fail:
+	jffs_cleanup_control(c);
+	return err;
+} /* jffs_build_fs()  */
+
+
+/*
+  This checks for sectors that were being erased in their previous 
+  lifetimes and for some reason or the other (power fail etc.), 
+  the erase cycles never completed.
+  As the flash array would have reverted back to read status, 
+  these sectors are detected by the symptom of the "flipping bits",
+  i.e. bits being read back differently from the same location in
+  flash if read multiple times.
+  The only solution to this is to re-erase the entire
+  sector.
+  Unfortunately detecting "flipping bits" is not a simple exercise
+  as a bit may be read back at 1 or 0 depending on the alignment 
+  of the stars in the universe.
+  The level of confidence is in direct proportion to the number of 
+  scans done. By power fail testing I (Vipin) have been able to 
+  proove that reading twice is not enough.
+  Maybe 4 times? Change NUM_REREADS to a higher number if you want
+  a (even) higher degree of confidence in your mount process. 
+  A higher number would of course slow down your mount.
+*/
+static int check_partly_erased_sectors(struct jffs_fmcontrol *fmc){
+
+#define NUM_REREADS             4 /* see note above */
+#define READ_AHEAD_BYTES        4096 /* must be a multiple of 4, 
+					usually set to kernel page size */
+
+	__u8 *read_buf1;
+	__u8 *read_buf2;
+
+	int err = 0;
+	int retlen;
+	int i;
+	int cnt;
+	__u32 offset;
+	loff_t pos = 0;
+	loff_t end = fmc->flash_size;
+
+
+	/* Allocate read buffers */
+	read_buf1 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
+	if (!read_buf1)
+		return -ENOMEM;
+
+	read_buf2 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
+	if (!read_buf2) {
+		kfree(read_buf1);
+		return -ENOMEM;
+	}
+
+ CHECK_NEXT:
+	while(pos < end){
+		
+		D1(printk("check_partly_erased_sector():checking sector which contains"
+			  " offset 0x%x for flipping bits..\n", (__u32)pos));
+		
+		retlen = flash_safe_read(fmc->mtd, pos,
+					 &read_buf1[0], READ_AHEAD_BYTES);
+		retlen &= ~3;
+		
+		for(cnt = 0; cnt < NUM_REREADS; cnt++){
+			(void)flash_safe_read(fmc->mtd, pos,
+					      &read_buf2[0], READ_AHEAD_BYTES);
+			
+			for (i=0 ; i < retlen ; i+=4) {
+				/* buffers MUST match, double word for word! */
+				if(*((__u32 *) &read_buf1[i]) !=
+				   *((__u32 *) &read_buf2[i])
+				   ){
+				        /* flipping bits detected, time to erase sector */
+					/* This will help us log some statistics etc. */
+					D1(printk("Flipping bits detected in re-read round:%i of %i\n",
+					       cnt, NUM_REREADS));
+					D1(printk("check_partly_erased_sectors:flipping bits detected"
+						  " @offset:0x%x(0x%x!=0x%x)\n",
+						  (__u32)pos+i, *((__u32 *) &read_buf1[i]), 
+						  *((__u32 *) &read_buf2[i])));
+					
+				        /* calculate start of present sector */
+					offset = (((__u32)pos+i)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+					
+					D1(printk("check_partly_erased_sector():erasing sector starting 0x%x.\n",
+						  offset));
+					
+					if (flash_erase_region(fmc->mtd,
+							       offset, fmc->sector_size) < 0) {
+						printk(KERN_ERR "JFFS: Erase of flash failed. "
+						       "offset = %u, erase_size = %d\n",
+						       offset , fmc->sector_size);
+						
+						err = -EIO;
+						goto returnBack;
+
+					}else{
+						D1(printk("JFFS: Erase of flash sector @0x%x successful.\n",
+						       offset));
+						/* skip ahead to the next sector */
+						pos = (((__u32)pos+i)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+						pos += fmc->sector_size;
+						goto CHECK_NEXT;
+					}
+				}
+			}
+		}
+		pos += READ_AHEAD_BYTES;
+	}
+
+ returnBack:
+	kfree(read_buf1);
+	kfree(read_buf2);
+
+	D2(printk("check_partly_erased_sector():Done checking all sectors till offset 0x%x for flipping bits.\n",
+		  (__u32)pos));
+
+	return err;
+
+}/* end check_partly_erased_sectors() */
+
+
+
+/* Scan the whole flash memory in order to find all nodes in the
+   file systems.  */
+static int
+jffs_scan_flash(struct jffs_control *c)
+{
+	char name[JFFS_MAX_NAME_LEN + 2];
+	struct jffs_raw_inode raw_inode;
+	struct jffs_node *node = NULL;
+	struct jffs_fmcontrol *fmc = c->fmc;
+	__u32 checksum;
+	__u8 tmp_accurate;
+	__u16 tmp_chksum;
+	__u32 deleted_file;
+	loff_t pos = 0;
+	loff_t start;
+	loff_t test_start;
+	loff_t end = fmc->flash_size;
+	__u8 *read_buf;
+	int i, len, retlen;
+	__u32 offset;
+
+	__u32 free_chunk_size1;
+	__u32 free_chunk_size2;
+
+	
+#define NUMFREEALLOWED     2        /* 2 chunks of at least erase size space allowed */
+	int num_free_space = 0;       /* Flag err if more than TWO
+				       free blocks found. This is NOT allowed
+				       by the current jffs design.
+				    */
+	int num_free_spc_not_accp = 0; /* For debugging purposed keep count 
+					of how much free space was rejected and
+					marked dirty
+				     */
+
+	D1(printk("jffs_scan_flash(): start pos = 0x%lx, end = 0x%lx\n",
+		  (long)pos, (long)end));
+
+	flash_safe_acquire(fmc->mtd);
+
+	/*
+	  check and make sure that any sector does not suffer
+	  from the "partly erased, bit flipping syndrome" (TM Vipin :)
+	  If so, offending sectors will be erased.
+	*/
+	if(check_partly_erased_sectors(fmc) < 0){
+
+		flash_safe_release(fmc->mtd);
+		return -EIO; /* bad, bad, bad error. Cannot continue.*/
+	}
+
+	/* Allocate read buffer */
+	read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL);
+	if (!read_buf) {
+		flash_safe_release(fmc->mtd);
+		return -ENOMEM;
+	}
+			      
+	/* Start the scan.  */
+	while (pos < end) {
+		deleted_file = 0;
+
+		/* Remember the position from where we started this scan.  */
+		start = pos;
+
+		switch (flash_read_u32(fmc->mtd, pos)) {
+		case JFFS_EMPTY_BITMASK:
+			/* We have found 0xffffffff at this position.  We have to
+			   scan the rest of the flash till the end or till
+			   something else than 0xffffffff is found.
+		           Keep going till we do not find JFFS_EMPTY_BITMASK 
+			   anymore */
+
+			D1(printk("jffs_scan_flash(): 0xffffffff at pos 0x%lx.\n",
+				  (long)pos));
+
+		        while(pos < end){
+
+			      len = end - pos < 4096 ? end - pos : 4096;
+			      
+			      retlen = flash_safe_read(fmc->mtd, pos,
+						 &read_buf[0], len);
+
+			      retlen &= ~3;
+			      
+			      for (i=0 ; i < retlen ; i+=4, pos += 4) {
+				      if(*((__u32 *) &read_buf[i]) !=
+					 JFFS_EMPTY_BITMASK)
+					break;
+			      }
+			      if (i == retlen)
+				    continue;
+			      else
+				    break;
+			}
+
+			D1(printk("jffs_scan_flash():0xffffffff ended at pos 0x%lx.\n",
+				  (long)pos));
+			
+			/* If some free space ends in the middle of a sector,
+			   treat it as dirty rather than clean.
+			   This is to handle the case where one thread 
+			   allocated space for a node, but didn't get to
+			   actually _write_ it before power was lost, leaving
+			   a gap in the log. Shifting all node writes into
+			   a single kernel thread will fix the original problem.
+			*/
+			if ((__u32) pos % fmc->sector_size) {
+				/* If there was free space in previous 
+				   sectors, don't mark that dirty too - 
+				   only from the beginning of this sector
+				   (or from start) 
+				*/
+
+			        test_start = pos & ~(fmc->sector_size-1); /* end of last sector */
+
+				if (start < test_start) {
+
+				        /* free space started in the previous sector! */
+
+					if((num_free_space < NUMFREEALLOWED) && 
+					   ((unsigned int)(test_start - start) >= fmc->sector_size)){
+
+				                /*
+						  Count it in if we are still under NUMFREEALLOWED *and* it is 
+						  at least 1 erase sector in length. This will keep us from 
+						  picking any little ole' space as "free".
+						*/
+					  
+					        D1(printk("Reducing end of free space to 0x%x from 0x%x\n",
+							  (unsigned int)test_start, (unsigned int)pos));
+
+						D1(printk("Free space accepted: Starting 0x%x for 0x%x bytes\n",
+							  (unsigned int) start,
+							  (unsigned int)(test_start - start)));
+
+						/* below, space from "start" to "pos" will be marked dirty. */
+						start = test_start; 
+						
+						/* Being in here means that we have found at least an entire 
+						   erase sector size of free space ending on a sector boundary.
+						   Keep track of free spaces accepted.
+						*/
+						num_free_space++;
+					}else{
+					        num_free_spc_not_accp++;
+					        D1(printk("Free space (#%i) found but *Not* accepted: Starting"
+							  " 0x%x for 0x%x bytes\n",
+							  num_free_spc_not_accp, (unsigned int)start, 
+							  (unsigned int)((unsigned int)(pos & ~(fmc->sector_size-1)) - (unsigned int)start)));
+					        
+					}
+					
+				}
+				if((((__u32)(pos - start)) != 0)){
+
+				        D1(printk("Dirty space: Starting 0x%x for 0x%x bytes\n",
+						  (unsigned int) start, (unsigned int) (pos - start)));
+					jffs_fmalloced(fmc, (__u32) start,
+						       (__u32) (pos - start), NULL);
+				}else{
+					/* "Flipping bits" detected. This means that our scan for them
+					   did not catch this offset. See check_partly_erased_sectors() for
+					   more info.
+					*/
+				        
+					D1(printk("jffs_scan_flash():wants to allocate dirty flash "
+						  "space for 0 bytes.\n"));
+					D1(printk("jffs_scan_flash(): Flipping bits! We will free "
+						  "all allocated memory, erase this sector and remount\n"));
+
+					/* calculate start of present sector */
+					offset = (((__u32)pos)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+					
+					D1(printk("jffs_scan_flash():erasing sector starting 0x%x.\n",
+						  offset));
+					
+					if (flash_erase_region(fmc->mtd,
+							       offset, fmc->sector_size) < 0) {
+						printk(KERN_ERR "JFFS: Erase of flash failed. "
+						       "offset = %u, erase_size = %d\n",
+						       offset , fmc->sector_size);
+
+						flash_safe_release(fmc->mtd);
+						kfree (read_buf);
+						return -1; /* bad, bad, bad! */
+
+					}
+					flash_safe_release(fmc->mtd);
+					kfree (read_buf);
+
+					return -EAGAIN; /* erased offending sector. Try mount one more time please. */
+				}
+			}else{
+			        /* Being in here means that we have found free space that ends on an erase sector
+				   boundary.
+				   Count it in if we are still under NUMFREEALLOWED *and* it is at least 1 erase 
+				   sector in length. This will keep us from picking any little ole' space as "free".
+				 */
+			         if((num_free_space < NUMFREEALLOWED) && 
+				    ((unsigned int)(pos - start) >= fmc->sector_size)){
+				           /* We really don't do anything to mark space as free, except *not* 
+					      mark it dirty and just advance the "pos" location pointer. 
+					      It will automatically be picked up as free space.
+					    */ 
+				           num_free_space++;
+				           D1(printk("Free space accepted: Starting 0x%x for 0x%x bytes\n",
+						     (unsigned int) start, (unsigned int) (pos - start)));
+				 }else{
+				         num_free_spc_not_accp++;
+					 D1(printk("Free space (#%i) found but *Not* accepted: Starting "
+						   "0x%x for 0x%x bytes\n", num_free_spc_not_accp, 
+						   (unsigned int) start, 
+						   (unsigned int) (pos - start)));
+					 
+					 /* Mark this space as dirty. We already have our free space. */
+					 D1(printk("Dirty space: Starting 0x%x for 0x%x bytes\n",
+						   (unsigned int) start, (unsigned int) (pos - start)));
+					 jffs_fmalloced(fmc, (__u32) start,
+							(__u32) (pos - start), NULL);				           
+				 }
+				 
+			}
+			if(num_free_space > NUMFREEALLOWED){
+			         printk(KERN_WARNING "jffs_scan_flash(): Found free space "
+					"number %i. Only %i free space is allowed.\n",
+					num_free_space, NUMFREEALLOWED);			      
+			}
+			continue;
+
+		case JFFS_DIRTY_BITMASK:
+			/* We have found 0x00000000 at this position.  Scan as far
+			   as possible to find out how much is dirty.  */
+			D1(printk("jffs_scan_flash(): 0x00000000 at pos 0x%lx.\n",
+				  (long)pos));
+			for (; pos < end
+			       && JFFS_DIRTY_BITMASK == flash_read_u32(fmc->mtd, pos);
+			     pos += 4);
+			D1(printk("jffs_scan_flash(): 0x00 ended at "
+				  "pos 0x%lx.\n", (long)pos));
+			jffs_fmalloced(fmc, (__u32) start,
+				       (__u32) (pos - start), NULL);
+			continue;
+
+		case JFFS_MAGIC_BITMASK:
+			/* We have probably found a new raw inode.  */
+			break;
+
+		default:
+		bad_inode:
+			/* We're f*cked.  This is not solved yet.  We have
+			   to scan for the magic pattern.  */
+			D1(printk("*************** Dirty flash memory or "
+				  "bad inode: "
+				  "hexdump(pos = 0x%lx, len = 128):\n",
+				  (long)pos));
+			D1(jffs_hexdump(fmc->mtd, pos, 128));
+
+			for (pos += 4; pos < end; pos += 4) {
+				switch (flash_read_u32(fmc->mtd, pos)) {
+				case JFFS_MAGIC_BITMASK:
+				case JFFS_EMPTY_BITMASK:
+					/* handle these in the main switch() loop */
+					goto cont_scan;
+
+				default:
+					break;
+				}
+			}
+
+			cont_scan:
+			/* First, mark as dirty the region
+			   which really does contain crap. */
+			jffs_fmalloced(fmc, (__u32) start,
+				       (__u32) (pos - start),
+				       NULL);
+			
+			continue;
+		}/* switch */
+
+		/* We have found the beginning of an inode.  Create a
+		   node for it unless there already is one available.  */
+		if (!node) {
+			if (!(node = jffs_alloc_node())) {
+				/* Free read buffer */
+				kfree (read_buf);
+
+				/* Release the flash device */
+				flash_safe_release(fmc->mtd);
+	
+				return -ENOMEM;
+			}
+			DJM(no_jffs_node++);
+		}
+
+		/* Read the next raw inode.  */
+
+		flash_safe_read(fmc->mtd, pos, (u_char *) &raw_inode,
+				sizeof(struct jffs_raw_inode));
+
+		/* When we compute the checksum for the inode, we never
+		   count the 'accurate' or the 'checksum' fields.  */
+		tmp_accurate = raw_inode.accurate;
+		tmp_chksum = raw_inode.chksum;
+		raw_inode.accurate = 0;
+		raw_inode.chksum = 0;
+		checksum = jffs_checksum(&raw_inode,
+					 sizeof(struct jffs_raw_inode));
+		raw_inode.accurate = tmp_accurate;
+		raw_inode.chksum = tmp_chksum;
+
+		D3(printk("*** We have found this raw inode at pos 0x%lx "
+			  "on the flash:\n", (long)pos));
+		D3(jffs_print_raw_inode(&raw_inode));
+
+		if (checksum != raw_inode.chksum) {
+			D1(printk("jffs_scan_flash(): Bad checksum: "
+				  "checksum = %u, "
+				  "raw_inode.chksum = %u\n",
+				  checksum, raw_inode.chksum));
+			pos += sizeof(struct jffs_raw_inode);
+			jffs_fmalloced(fmc, (__u32) start,
+				       (__u32) (pos - start), NULL);
+			/* Reuse this unused struct jffs_node.  */
+			continue;
+		}
+
+		/* Check the raw inode read so far.  Start with the
+		   maximum length of the filename.  */
+		if (raw_inode.nsize > JFFS_MAX_NAME_LEN) {
+			printk(KERN_WARNING "jffs_scan_flash: Found a "
+			       "JFFS node with name too large\n");
+			goto bad_inode;
+		}
+
+		if (raw_inode.rename && raw_inode.dsize != sizeof(__u32)) {
+			printk(KERN_WARNING "jffs_scan_flash: Found a "
+			       "rename node with dsize %u.\n",
+			       raw_inode.dsize);
+			jffs_print_raw_inode(&raw_inode);
+			goto bad_inode;
+		}
+
+		/* The node's data segment should not exceed a
+		   certain length.  */
+		if (raw_inode.dsize > fmc->max_chunk_size) {
+			printk(KERN_WARNING "jffs_scan_flash: Found a "
+			       "JFFS node with dsize (0x%x) > max_chunk_size (0x%x)\n",
+			       raw_inode.dsize, fmc->max_chunk_size);
+			goto bad_inode;
+		}
+
+		pos += sizeof(struct jffs_raw_inode);
+
+		/* This shouldn't be necessary because a node that
+		   violates the flash boundaries shouldn't be written
+		   in the first place. */
+		if (pos >= end) {
+			goto check_node;
+		}
+
+		/* Read the name.  */
+		*name = 0;
+		if (raw_inode.nsize) {
+		        flash_safe_read(fmc->mtd, pos, name, raw_inode.nsize);
+			name[raw_inode.nsize] = '\0';
+			pos += raw_inode.nsize
+			       + JFFS_GET_PAD_BYTES(raw_inode.nsize);
+			D3(printk("name == \"%s\"\n", name));
+			checksum = jffs_checksum(name, raw_inode.nsize);
+			if (checksum != raw_inode.nchksum) {
+				D1(printk("jffs_scan_flash(): Bad checksum: "
+					  "checksum = %u, "
+					  "raw_inode.nchksum = %u\n",
+					  checksum, raw_inode.nchksum));
+				jffs_fmalloced(fmc, (__u32) start,
+					       (__u32) (pos - start), NULL);
+				/* Reuse this unused struct jffs_node.  */
+				continue;
+			}
+			if (pos >= end) {
+				goto check_node;
+			}
+		}
+
+		/* Read the data, if it exists, in order to be sure it
+		   matches the checksum.  */
+		if (raw_inode.dsize) {
+			if (raw_inode.rename) {
+				deleted_file = flash_read_u32(fmc->mtd, pos);
+			}
+			if (jffs_checksum_flash(fmc->mtd, pos, raw_inode.dsize, &checksum)) {
+				printk("jffs_checksum_flash() failed to calculate a checksum\n");
+				jffs_fmalloced(fmc, (__u32) start,
+					       (__u32) (pos - start), NULL);
+				/* Reuse this unused struct jffs_node.  */
+				continue;
+			}				
+			pos += raw_inode.dsize
+			       + JFFS_GET_PAD_BYTES(raw_inode.dsize);
+
+			if (checksum != raw_inode.dchksum) {
+				D1(printk("jffs_scan_flash(): Bad checksum: "
+					  "checksum = %u, "
+					  "raw_inode.dchksum = %u\n",
+					  checksum, raw_inode.dchksum));
+				jffs_fmalloced(fmc, (__u32) start,
+					       (__u32) (pos - start), NULL);
+				/* Reuse this unused struct jffs_node.  */
+				continue;
+			}
+		}
+
+		check_node:
+
+		/* Remember the highest inode number in the whole file
+		   system.  This information will be used when assigning
+		   new files new inode numbers.  */
+		if (c->next_ino <= raw_inode.ino) {
+			c->next_ino = raw_inode.ino + 1;
+		}
+
+		if (raw_inode.accurate) {
+			int err;
+			node->data_offset = raw_inode.offset;
+			node->data_size = raw_inode.dsize;
+			node->removed_size = raw_inode.rsize;
+			/* Compute the offset to the actual data in the
+			   on-flash node.  */
+			node->fm_offset
+			= sizeof(struct jffs_raw_inode)
+			  + raw_inode.nsize
+			  + JFFS_GET_PAD_BYTES(raw_inode.nsize);
+			node->fm = jffs_fmalloced(fmc, (__u32) start,
+						  (__u32) (pos - start),
+						  node);
+			if (!node->fm) {
+				D(printk("jffs_scan_flash(): !node->fm\n"));
+				jffs_free_node(node);
+				DJM(no_jffs_node--);
+
+				/* Free read buffer */
+				kfree (read_buf);
+
+				/* Release the flash device */
+				flash_safe_release(fmc->mtd);
+
+				return -ENOMEM;
+			}
+			if ((err = jffs_insert_node(c, NULL, &raw_inode,
+						    name, node)) < 0) {
+				printk("JFFS: Failed to handle raw inode. "
+				       "(err = %d)\n", err);
+				break;
+			}
+			if (raw_inode.rename) {
+				struct jffs_delete_list *dl
+				= (struct jffs_delete_list *)
+				  kmalloc(sizeof(struct jffs_delete_list),
+					  GFP_KERNEL);
+				if (!dl) {
+					D(printk("jffs_scan_flash: !dl\n"));
+					jffs_free_node(node);
+					DJM(no_jffs_node--);
+
+					/* Release the flash device */
+					flash_safe_release(fmc->flash_part);
+
+					/* Free read buffer */
+					kfree (read_buf);
+
+					return -ENOMEM;
+				}
+				dl->ino = deleted_file;
+				dl->next = c->delete_list;
+				c->delete_list = dl;
+				node->data_size = 0;
+			}
+			D3(jffs_print_node(node));
+			node = NULL; /* Don't free the node!  */
+		}
+		else {
+			jffs_fmalloced(fmc, (__u32) start,
+				       (__u32) (pos - start), NULL);
+			D3(printk("jffs_scan_flash(): Just found an obsolete "
+				  "raw_inode. Continuing the scan...\n"));
+			/* Reuse this unused struct jffs_node.  */
+		}
+	}
+
+	if (node) {
+		jffs_free_node(node);
+		DJM(no_jffs_node--);
+	}
+	jffs_build_end(fmc);
+
+	/* Free read buffer */
+	kfree (read_buf);
+
+	if(!num_free_space){
+	        printk(KERN_WARNING "jffs_scan_flash(): Did not find even a single "
+		       "chunk of free space. This is BAD!\n");
+	}
+
+	/* Return happy */
+	D3(printk("jffs_scan_flash(): Leaving...\n"));
+	flash_safe_release(fmc->mtd);
+
+	/* This is to trap the "free size accounting screwed error. */
+	free_chunk_size1 = jffs_free_size1(fmc);
+	free_chunk_size2 = jffs_free_size2(fmc);
+
+	if (free_chunk_size1 + free_chunk_size2 != fmc->free_size) {
+
+		printk(KERN_WARNING "jffs_scan_falsh():Free size accounting screwed\n");
+		printk(KERN_WARNING "jfffs_scan_flash():free_chunk_size1 == 0x%x, "
+		       "free_chunk_size2 == 0x%x, fmc->free_size == 0x%x\n", 
+		       free_chunk_size1, free_chunk_size2, fmc->free_size);
+
+		return -1; /* Do NOT mount f/s so that we can inspect what happened.
+			      Mounting this  screwed up f/s will screw us up anyway.
+			    */
+	}	
+
+	return 0; /* as far as we are concerned, we are happy! */
+} /* jffs_scan_flash()  */
+
+
+/* Insert any kind of node into the file system.  Take care of data
+   insertions and deletions.  Also remove redundant information. The
+   memory allocated for the `name' is regarded as "given away" in the
+   caller's perspective.  */
+int
+jffs_insert_node(struct jffs_control *c, struct jffs_file *f,
+		 const struct jffs_raw_inode *raw_inode,
+		 const char *name, struct jffs_node *node)
+{
+	int update_name = 0;
+	int insert_into_tree = 0;
+
+	D2(printk("jffs_insert_node(): ino = %u, version = %u, "
+		  "name = \"%s\", deleted = %d\n",
+		  raw_inode->ino, raw_inode->version,
+		  ((name && *name) ? name : ""), raw_inode->deleted));
+
+	/* If there doesn't exist an associated jffs_file, then
+	   create, initialize and insert one into the file system.  */
+	if (!f && !(f = jffs_find_file(c, raw_inode->ino))) {
+		if (!(f = jffs_create_file(c, raw_inode))) {
+			return -ENOMEM;
+		}
+		jffs_insert_file_into_hash(f);
+		insert_into_tree = 1;
+	}
+	node->ino = raw_inode->ino;
+	node->version = raw_inode->version;
+	node->data_size = raw_inode->dsize;
+	node->fm_offset = sizeof(struct jffs_raw_inode) + raw_inode->nsize
+			  + JFFS_GET_PAD_BYTES(raw_inode->nsize);
+	node->name_size = raw_inode->nsize;
+
+	/* Now insert the node at the correct position into the file's
+	   version list.  */
+	if (!f->version_head) {
+		/* This is the first node.  */
+		f->version_head = node;
+		f->version_tail = node;
+		node->version_prev = NULL;
+		node->version_next = NULL;
+		f->highest_version = node->version;
+		update_name = 1;
+		f->mode = raw_inode->mode;
+		f->uid = raw_inode->uid;
+		f->gid = raw_inode->gid;
+		f->atime = raw_inode->atime;
+		f->mtime = raw_inode->mtime;
+		f->ctime = raw_inode->ctime;
+	}
+	else if ((f->highest_version < node->version)
+		 || (node->version == 0)) {
+		/* Insert at the end of the list.  I.e. this node is the
+		   newest one so far.  */
+		node->version_prev = f->version_tail;
+		node->version_next = NULL;
+		f->version_tail->version_next = node;
+		f->version_tail = node;
+		f->highest_version = node->version;
+		update_name = 1;
+		f->pino = raw_inode->pino;
+		f->mode = raw_inode->mode;
+		f->uid = raw_inode->uid;
+		f->gid = raw_inode->gid;
+		f->atime = raw_inode->atime;
+		f->mtime = raw_inode->mtime;
+		f->ctime = raw_inode->ctime;
+	}
+	else if (f->version_head->version > node->version) {
+		/* Insert at the bottom of the list.  */
+		node->version_prev = NULL;
+		node->version_next = f->version_head;
+		f->version_head->version_prev = node;
+		f->version_head = node;
+		if (!f->name) {
+			update_name = 1;
+		}
+	}
+	else {
+		struct jffs_node *n;
+		int newer_name = 0;
+		/* Search for the insertion position starting from
+		   the tail (newest node).  */
+		for (n = f->version_tail; n; n = n->version_prev) {
+			if (n->version < node->version) {
+				node->version_prev = n;
+				node->version_next = n->version_next;
+				node->version_next->version_prev = node;
+				n->version_next = node;
+				if (!newer_name) {
+					update_name = 1;
+				}
+				break;
+			}
+			if (n->name_size) {
+				newer_name = 1;
+			}
+		}
+	}
+
+	/* Deletion is irreversible. If any 'deleted' node is ever
+	   written, the file is deleted */
+	if (raw_inode->deleted)
+		f->deleted = raw_inode->deleted;
+
+	/* Perhaps update the name.  */
+	if (raw_inode->nsize && update_name && name && *name && (name != f->name)) {
+		if (f->name) {
+			kfree(f->name);
+			DJM(no_name--);
+		}
+		if (!(f->name = (char *) kmalloc(raw_inode->nsize + 1,
+						 GFP_KERNEL))) {
+			return -ENOMEM;
+		}
+		DJM(no_name++);
+		memcpy(f->name, name, raw_inode->nsize);
+		f->name[raw_inode->nsize] = '\0';
+		f->nsize = raw_inode->nsize;
+		D3(printk("jffs_insert_node(): Updated the name of "
+			  "the file to \"%s\".\n", name));
+	}
+
+	if (!c->building_fs) {
+		D3(printk("jffs_insert_node(): ---------------------------"
+			  "------------------------------------------- 1\n"));
+		if (insert_into_tree) {
+			jffs_insert_file_into_tree(f);
+		}
+		/* Once upon a time, we would call jffs_possibly_delete_file()
+		   here. That causes an oops if someone's still got the file
+		   open, so now we only do it in jffs_delete_inode()
+		   -- dwmw2
+		*/
+		if (node->data_size || node->removed_size) {
+			jffs_update_file(f, node);
+		}
+		jffs_remove_redundant_nodes(f);
+
+		jffs_garbage_collect_trigger(c);
+
+		D3(printk("jffs_insert_node(): ---------------------------"
+			  "------------------------------------------- 2\n"));
+	}
+
+	return 0;
+} /* jffs_insert_node()  */
+
+
+/* Unlink a jffs_node from the version list it is in.  */
+static inline void
+jffs_unlink_node_from_version_list(struct jffs_file *f,
+				   struct jffs_node *node)
+{
+	if (node->version_prev) {
+		node->version_prev->version_next = node->version_next;
+	} else {
+		f->version_head = node->version_next;
+	}
+	if (node->version_next) {
+		node->version_next->version_prev = node->version_prev;
+	} else {
+		f->version_tail = node->version_prev;
+	}
+}
+
+
+/* Unlink a jffs_node from the range list it is in.  */
+static inline void
+jffs_unlink_node_from_range_list(struct jffs_file *f, struct jffs_node *node)
+{
+	if (node->range_prev) {
+		node->range_prev->range_next = node->range_next;
+	}
+	else {
+		f->range_head = node->range_next;
+	}
+	if (node->range_next) {
+		node->range_next->range_prev = node->range_prev;
+	}
+	else {
+		f->range_tail = node->range_prev;
+	}
+}
+
+
+/* Function used by jffs_remove_redundant_nodes() below.  This function
+   classifies what kind of information a node adds to a file.  */
+static inline __u8
+jffs_classify_node(struct jffs_node *node)
+{
+	__u8 mod_type = JFFS_MODIFY_INODE;
+
+	if (node->name_size) {
+		mod_type |= JFFS_MODIFY_NAME;
+	}
+	if (node->data_size || node->removed_size) {
+		mod_type |= JFFS_MODIFY_DATA;
+	}
+	return mod_type;
+}
+
+
+/* Remove redundant nodes from a file.  Mark the on-flash memory
+   as dirty.  */
+static int
+jffs_remove_redundant_nodes(struct jffs_file *f)
+{
+	struct jffs_node *newest_node;
+	struct jffs_node *cur;
+	struct jffs_node *prev;
+	__u8 newest_type;
+	__u8 mod_type;
+	__u8 node_with_name_later = 0;
+
+	if (!(newest_node = f->version_tail)) {
+		return 0;
+	}
+
+	/* What does the `newest_node' modify?  */
+	newest_type = jffs_classify_node(newest_node);
+	node_with_name_later = newest_type & JFFS_MODIFY_NAME;
+
+	D3(printk("jffs_remove_redundant_nodes(): ino: %u, name: \"%s\", "
+		  "newest_type: %u\n", f->ino, (f->name ? f->name : ""),
+		  newest_type));
+
+	/* Traverse the file's nodes and determine which of them that are
+	   superfluous.  Yeah, this might look very complex at first
+	   glance but it is actually very simple.  */
+	for (cur = newest_node->version_prev; cur; cur = prev) {
+		prev = cur->version_prev;
+		mod_type = jffs_classify_node(cur);
+		if ((mod_type <= JFFS_MODIFY_INODE)
+		    || ((newest_type & JFFS_MODIFY_NAME)
+			&& (mod_type
+			    <= (JFFS_MODIFY_INODE + JFFS_MODIFY_NAME)))
+		    || (cur->data_size == 0 && cur->removed_size
+			&& !cur->version_prev && node_with_name_later)) {
+			/* Yes, this node is redundant. Remove it.  */
+			D2(printk("jffs_remove_redundant_nodes(): "
+				  "Removing node: ino: %u, version: %u, "
+				  "mod_type: %u\n", cur->ino, cur->version,
+				  mod_type));
+			jffs_unlink_node_from_version_list(f, cur);
+			jffs_fmfree(f->c->fmc, cur->fm, cur);
+			jffs_free_node(cur);
+			DJM(no_jffs_node--);
+		}
+		else {
+			node_with_name_later |= (mod_type & JFFS_MODIFY_NAME);
+		}
+	}
+
+	return 0;
+}
+
+
+/* Insert a file into the hash table.  */
+static int
+jffs_insert_file_into_hash(struct jffs_file *f)
+{
+	int i = f->ino % f->c->hash_len;
+
+	D3(printk("jffs_insert_file_into_hash(): f->ino: %u\n", f->ino));
+
+	list_add(&f->hash, &f->c->hash[i]);
+	return 0;
+}
+
+
+/* Insert a file into the file system tree.  */
+int
+jffs_insert_file_into_tree(struct jffs_file *f)
+{
+	struct jffs_file *parent;
+
+	D3(printk("jffs_insert_file_into_tree(): name: \"%s\"\n",
+		  (f->name ? f->name : "")));
+
+	if (!(parent = jffs_find_file(f->c, f->pino))) {
+		if (f->pino == 0) {
+			f->c->root = f;
+			f->parent = NULL;
+			f->sibling_prev = NULL;
+			f->sibling_next = NULL;
+			return 0;
+		}
+		else {
+			D1(printk("jffs_insert_file_into_tree(): Found "
+				  "inode with no parent and pino == %u\n",
+				  f->pino));
+			return -1;
+		}
+	}
+	f->parent = parent;
+	f->sibling_next = parent->children;
+	if (f->sibling_next) {
+		f->sibling_next->sibling_prev = f;
+	}
+	f->sibling_prev = NULL;
+	parent->children = f;
+	return 0;
+}
+
+
+/* Remove a file from the hash table.  */
+static int
+jffs_unlink_file_from_hash(struct jffs_file *f)
+{
+	D3(printk("jffs_unlink_file_from_hash(): f: 0x%p, "
+		  "ino %u\n", f, f->ino));
+
+	list_del(&f->hash);
+	return 0;
+}
+
+
+/* Just remove the file from the parent's children.  Don't free
+   any memory.  */
+int
+jffs_unlink_file_from_tree(struct jffs_file *f)
+{
+	D3(printk("jffs_unlink_file_from_tree(): ino: %d, pino: %d, name: "
+		  "\"%s\"\n", f->ino, f->pino, (f->name ? f->name : "")));
+
+	if (f->sibling_prev) {
+		f->sibling_prev->sibling_next = f->sibling_next;
+	}
+	else if (f->parent) {
+	        D3(printk("f->parent=%p\n", f->parent));
+		f->parent->children = f->sibling_next;
+	}
+	if (f->sibling_next) {
+		f->sibling_next->sibling_prev = f->sibling_prev;
+	}
+	return 0;
+}
+
+
+/* Find a file with its inode number.  */
+struct jffs_file *
+jffs_find_file(struct jffs_control *c, __u32 ino)
+{
+	struct jffs_file *f;
+	int i = ino % c->hash_len;
+	struct list_head *tmp;
+
+	D3(printk("jffs_find_file(): ino: %u\n", ino));
+
+	for (tmp = c->hash[i].next; tmp != &c->hash[i]; tmp = tmp->next) {
+		f = list_entry(tmp, struct jffs_file, hash);
+		if (ino != f->ino)
+			continue;
+		D3(printk("jffs_find_file(): Found file with ino "
+			       "%u. (name: \"%s\")\n",
+			       ino, (f->name ? f->name : ""));
+		);
+		return f;
+	}
+	D3(printk("jffs_find_file(): Didn't find file "
+			 "with ino %u.\n", ino);
+	);
+	return NULL;
+}
+
+
+/* Find a file in a directory.  We are comparing the names.  */
+struct jffs_file *
+jffs_find_child(struct jffs_file *dir, const char *name, int len)
+{
+	struct jffs_file *f;
+
+	D3(printk("jffs_find_child()\n"));
+
+	for (f = dir->children; f; f = f->sibling_next) {
+		if (!f->deleted && f->name
+		    && !strncmp(f->name, name, len)
+		    && f->name[len] == '\0') {
+			break;
+		}
+	}
+
+	D3(if (f) {
+		printk("jffs_find_child(): Found \"%s\".\n", f->name);
+	}
+	else {
+		char *copy = (char *) kmalloc(len + 1, GFP_KERNEL);
+		if (copy) {
+			memcpy(copy, name, len);
+			copy[len] = '\0';
+		}
+		printk("jffs_find_child(): Didn't find the file \"%s\".\n",
+		       (copy ? copy : ""));
+		if (copy) {
+			kfree(copy);
+		}
+	});
+
+	return f;
+}
+
+
+/* Write a raw inode that takes up a certain amount of space in the flash
+   memory.  At the end of the flash device, there is often space that is
+   impossible to use.  At these times we want to mark this space as not
+   used.  In the cases when the amount of space is greater or equal than
+   a struct jffs_raw_inode, we write a "dummy node" that takes up this
+   space.  The space after the raw inode, if it exists, is left as it is.
+   Since this space after the raw inode contains JFFS_EMPTY_BITMASK bytes,
+   we can compute the checksum of it; we don't have to manipulate it any
+   further.
+
+   If the space left on the device is less than the size of a struct
+   jffs_raw_inode, this space is filled with JFFS_DIRTY_BITMASK bytes.
+   No raw inode is written this time.  */
+static int
+jffs_write_dummy_node(struct jffs_control *c, struct jffs_fm *dirty_fm)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+	int err;
+
+	D1(printk("jffs_write_dummy_node(): dirty_fm->offset = 0x%08x, "
+		  "dirty_fm->size = %u\n",
+		  dirty_fm->offset, dirty_fm->size));
+
+	if (dirty_fm->size >= sizeof(struct jffs_raw_inode)) {
+		struct jffs_raw_inode raw_inode;
+		memset(&raw_inode, 0, sizeof(struct jffs_raw_inode));
+		raw_inode.magic = JFFS_MAGIC_BITMASK;
+		raw_inode.dsize = dirty_fm->size
+				  - sizeof(struct jffs_raw_inode);
+		raw_inode.dchksum = raw_inode.dsize * 0xff;
+		raw_inode.chksum
+		= jffs_checksum(&raw_inode, sizeof(struct jffs_raw_inode));
+
+		if ((err = flash_safe_write(fmc->mtd,
+					    dirty_fm->offset,
+					    (u_char *)&raw_inode,
+					    sizeof(struct jffs_raw_inode)))
+		    < 0) {
+			printk(KERN_ERR "JFFS: jffs_write_dummy_node: "
+			       "flash_safe_write failed!\n");
+			return err;
+		}
+	}
+	else {
+		flash_safe_acquire(fmc->mtd);
+		flash_memset(fmc->mtd, dirty_fm->offset, 0, dirty_fm->size);
+		flash_safe_release(fmc->mtd);
+	}
+
+	D3(printk("jffs_write_dummy_node(): Leaving...\n"));
+	return 0;
+}
+
+
+/* Write a raw inode, possibly its name and possibly some data.  */
+int
+jffs_write_node(struct jffs_control *c, struct jffs_node *node,
+		struct jffs_raw_inode *raw_inode,
+		const char *name, const unsigned char *data,
+		int recoverable,
+		struct jffs_file *f)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+	struct jffs_fm *fm;
+	struct kvec node_iovec[4];
+	unsigned long iovec_cnt;
+
+	__u32 pos;
+	int err;
+	__u32 slack = 0;
+
+	__u32 total_name_size = raw_inode->nsize
+				+ JFFS_GET_PAD_BYTES(raw_inode->nsize);
+	__u32 total_data_size = raw_inode->dsize
+				+ JFFS_GET_PAD_BYTES(raw_inode->dsize);
+	__u32 total_size = sizeof(struct jffs_raw_inode)
+			   + total_name_size + total_data_size;
+	
+	/* If this node isn't something that will eventually let
+	   GC free even more space, then don't allow it unless
+	   there's at least max_chunk_size space still available
+	*/
+	if (!recoverable)
+		slack = fmc->max_chunk_size;
+		
+
+	/* Fire the retrorockets and shoot the fruiton torpedoes, sir!  */
+
+	ASSERT(if (!node) {
+		printk("jffs_write_node(): node == NULL\n");
+		return -EINVAL;
+	});
+	ASSERT(if (raw_inode && raw_inode->nsize && !name) {
+		printk("*** jffs_write_node(): nsize = %u but name == NULL\n",
+		       raw_inode->nsize);
+		return -EINVAL;
+	});
+
+	D1(printk("jffs_write_node(): filename = \"%s\", ino = %u, "
+		  "total_size = %u\n",
+		  (name ? name : ""), raw_inode->ino,
+		  total_size));
+
+	jffs_fm_write_lock(fmc);
+
+retry:
+	fm = NULL;
+	err = 0;
+	while (!fm) {
+
+		/* Deadlocks suck. */
+		while(fmc->free_size < fmc->min_free_size + total_size + slack) {
+			jffs_fm_write_unlock(fmc);
+			if (!JFFS_ENOUGH_SPACE(c, total_size + slack))
+				return -ENOSPC;
+			jffs_fm_write_lock(fmc);
+		}
+
+		/* First try to allocate some flash memory.  */
+		err = jffs_fmalloc(fmc, total_size, node, &fm);
+		
+		if (err == -ENOSPC) {
+			/* Just out of space. GC and try again */
+			if (fmc->dirty_size < fmc->sector_size) {
+				D(printk("jffs_write_node(): jffs_fmalloc(0x%p, %u) "
+					 "failed, no dirty space to GC\n", fmc,
+					 total_size));
+				return err;
+			}
+			
+			D1(printk(KERN_INFO "jffs_write_node(): Calling jffs_garbage_collect_now()\n"));
+			jffs_fm_write_unlock(fmc);
+			if ((err = jffs_garbage_collect_now(c))) {
+				D(printk("jffs_write_node(): jffs_garbage_collect_now() failed\n"));
+				return err;
+			}
+			jffs_fm_write_lock(fmc);
+			continue;
+		} 
+
+		if (err < 0) {
+			jffs_fm_write_unlock(fmc);
+
+			D(printk("jffs_write_node(): jffs_fmalloc(0x%p, %u) "
+				 "failed!\n", fmc, total_size));
+			return err;
+		}
+
+		if (!fm->nodes) {
+			/* The jffs_fm struct that we got is not good enough.
+			   Make that space dirty and try again  */
+			if ((err = jffs_write_dummy_node(c, fm)) < 0) {
+				kfree(fm);
+				DJM(no_jffs_fm--);
+				jffs_fm_write_unlock(fmc);
+				D(printk("jffs_write_node(): "
+					 "jffs_write_dummy_node(): Failed!\n"));
+				return err;
+			}
+			fm = NULL;
+		}
+	} /* while(!fm) */
+	node->fm = fm;
+
+	ASSERT(if (fm->nodes == 0) {
+		printk(KERN_ERR "jffs_write_node(): fm->nodes == 0\n");
+	});
+
+	pos = node->fm->offset;
+
+	/* Increment the version number here. We can't let the caller
+	   set it beforehand, because we might have had to do GC on a node
+	   of this file - and we'd end up reusing version numbers.
+	*/
+	if (f) {
+		raw_inode->version = f->highest_version + 1;
+		D1(printk (KERN_NOTICE "jffs_write_node(): setting version of %s to %d\n", f->name, raw_inode->version));
+
+		/* if the file was deleted, set the deleted bit in the raw inode */
+		if (f->deleted)
+			raw_inode->deleted = 1;
+	}
+
+	/* Compute the checksum for the data and name chunks.  */
+	raw_inode->dchksum = jffs_checksum(data, raw_inode->dsize);
+	raw_inode->nchksum = jffs_checksum(name, raw_inode->nsize);
+
+	/* The checksum is calculated without the chksum and accurate
+	   fields so set them to zero first.  */
+	raw_inode->accurate = 0;
+	raw_inode->chksum = 0;
+	raw_inode->chksum = jffs_checksum(raw_inode,
+					  sizeof(struct jffs_raw_inode));
+	raw_inode->accurate = 0xff;
+
+	D3(printk("jffs_write_node(): About to write this raw inode to the "
+		  "flash at pos 0x%lx:\n", (long)pos));
+	D3(jffs_print_raw_inode(raw_inode));
+
+	/* The actual raw JFFS node */
+	node_iovec[0].iov_base = (void *) raw_inode;
+	node_iovec[0].iov_len = (size_t) sizeof(struct jffs_raw_inode);
+	iovec_cnt = 1;
+
+	/* Get name and size if there is one */
+	if (raw_inode->nsize) {
+		node_iovec[iovec_cnt].iov_base = (void *) name;
+		node_iovec[iovec_cnt].iov_len = (size_t) raw_inode->nsize;
+		iovec_cnt++;
+
+		if (JFFS_GET_PAD_BYTES(raw_inode->nsize)) {
+			static char allff[3]={255,255,255};
+			/* Add some extra padding if necessary */
+			node_iovec[iovec_cnt].iov_base = allff;
+			node_iovec[iovec_cnt].iov_len =
+				JFFS_GET_PAD_BYTES(raw_inode->nsize);
+			iovec_cnt++;
+		}
+	}
+
+	/* Get data and size if there is any */
+	if (raw_inode->dsize) {
+		node_iovec[iovec_cnt].iov_base = (void *) data;
+		node_iovec[iovec_cnt].iov_len = (size_t) raw_inode->dsize;
+		iovec_cnt++;
+		/* No need to pad this because we're not actually putting
+		   anything after it.
+		*/
+	}
+
+	if ((err = flash_safe_writev(fmc->mtd, node_iovec, iovec_cnt,
+				    pos)) < 0) {
+		jffs_fmfree_partly(fmc, fm, 0);
+		jffs_fm_write_unlock(fmc);
+		printk(KERN_ERR "JFFS: jffs_write_node: Failed to write, "
+		       "requested %i, wrote %i\n", total_size, err);
+		goto retry;
+	}
+	if (raw_inode->deleted)
+		f->deleted = 1;
+
+	jffs_fm_write_unlock(fmc);
+	D3(printk("jffs_write_node(): Leaving...\n"));
+	return raw_inode->dsize;
+} /* jffs_write_node()  */
+
+
+/* Read data from the node and write it to the buffer.  'node_offset'
+   is how much we have read from this particular node before and which
+   shouldn't be read again.  'max_size' is how much space there is in
+   the buffer.  */
+static int
+jffs_get_node_data(struct jffs_file *f, struct jffs_node *node, 
+		   unsigned char *buf,__u32 node_offset, __u32 max_size)
+{
+	struct jffs_fmcontrol *fmc = f->c->fmc;
+	__u32 pos = node->fm->offset + node->fm_offset + node_offset;
+	__u32 avail = node->data_size - node_offset;
+	__u32 r;
+
+	D2(printk("  jffs_get_node_data(): file: \"%s\", ino: %u, "
+		  "version: %u, node_offset: %u\n",
+		  f->name, node->ino, node->version, node_offset));
+
+	r = min(avail, max_size);
+	D3(printk(KERN_NOTICE "jffs_get_node_data\n"));
+	flash_safe_read(fmc->mtd, pos, buf, r);
+
+	D3(printk("  jffs_get_node_data(): Read %u byte%s.\n",
+		  r, (r == 1 ? "" : "s")));
+
+	return r;
+}
+
+
+/* Read data from the file's nodes.  Write the data to the buffer
+   'buf'.  'read_offset' tells how much data we should skip.  */
+int
+jffs_read_data(struct jffs_file *f, unsigned char *buf, __u32 read_offset,
+	       __u32 size)
+{
+	struct jffs_node *node;
+	__u32 read_data = 0; /* Total amount of read data.  */
+	__u32 node_offset = 0;
+	__u32 pos = 0; /* Number of bytes traversed.  */
+
+	D2(printk("jffs_read_data(): file = \"%s\", read_offset = %d, "
+		  "size = %u\n",
+		  (f->name ? f->name : ""), read_offset, size));
+
+	if (read_offset >= f->size) {
+		D(printk("  f->size: %d\n", f->size));
+		return 0;
+	}
+
+	/* First find the node to read data from.  */
+	node = f->range_head;
+	while (pos <= read_offset) {
+		node_offset = read_offset - pos;
+		if (node_offset >= node->data_size) {
+			pos += node->data_size;
+			node = node->range_next;
+		}
+		else {
+			break;
+		}
+	}
+
+	/* "Cats are living proof that not everything in nature
+	   has to be useful."
+	   - Garrison Keilor ('97)  */
+
+	/* Fill the buffer.  */
+	while (node && (read_data < size)) {
+		int r;
+		if (!node->fm) {
+			/* This node does not refer to real data.  */
+			r = min(size - read_data,
+				     node->data_size - node_offset);
+			memset(&buf[read_data], 0, r);
+		}
+		else if ((r = jffs_get_node_data(f, node, &buf[read_data],
+						 node_offset,
+						 size - read_data)) < 0) {
+			return r;
+		}
+		read_data += r;
+		node_offset = 0;
+		node = node->range_next;
+	}
+	D3(printk("  jffs_read_data(): Read %u bytes.\n", read_data));
+	return read_data;
+}
+
+
+/* Used for traversing all nodes in the hash table.  */
+int
+jffs_foreach_file(struct jffs_control *c, int (*func)(struct jffs_file *))
+{
+	int pos;
+	int r;
+	int result = 0;
+
+	for (pos = 0; pos < c->hash_len; pos++) {
+		struct list_head *p, *next;
+		for (p = c->hash[pos].next; p != &c->hash[pos]; p = next) {
+			/* We need a reference to the next file in the
+			   list because `func' might remove the current
+			   file `f'.  */
+			next = p->next;
+			r = func(list_entry(p, struct jffs_file, hash));
+			if (r < 0)
+				return r;
+			result += r;
+		}
+	}
+
+	return result;
+}
+
+
+/* Free all nodes associated with a file.  */
+static int
+jffs_free_node_list(struct jffs_file *f)
+{
+	struct jffs_node *node;
+	struct jffs_node *p;
+
+	D3(printk("jffs_free_node_list(): f #%u, \"%s\"\n",
+		  f->ino, (f->name ? f->name : "")));
+	node = f->version_head;
+	while (node) {
+		p = node;
+		node = node->version_next;
+		jffs_free_node(p);
+		DJM(no_jffs_node--);
+	}
+	return 0;
+}
+
+
+/* Free a file and its name.  */
+static int
+jffs_free_file(struct jffs_file *f)
+{
+	D3(printk("jffs_free_file: f #%u, \"%s\"\n",
+		  f->ino, (f->name ? f->name : "")));
+
+	if (f->name) {
+		kfree(f->name);
+		DJM(no_name--);
+	}
+	kfree(f);
+	no_jffs_file--;
+	return 0;
+}
+
+static long
+jffs_get_file_count(void)
+{
+	return no_jffs_file;
+}
+
+/* See if a file is deleted. If so, mark that file's nodes as obsolete.  */
+int
+jffs_possibly_delete_file(struct jffs_file *f)
+{
+	struct jffs_node *n;
+
+	D3(printk("jffs_possibly_delete_file(): ino: %u\n",
+		  f->ino));
+
+	ASSERT(if (!f) {
+		printk(KERN_ERR "jffs_possibly_delete_file(): f == NULL\n");
+		return -1;
+	});
+
+	if (f->deleted) {
+		/* First try to remove all older versions.  Commence with
+		   the oldest node.  */
+		for (n = f->version_head; n; n = n->version_next) {
+			if (!n->fm) {
+				continue;
+			}
+			if (jffs_fmfree(f->c->fmc, n->fm, n) < 0) {
+				break;
+			}
+		}
+		/* Unlink the file from the filesystem.  */
+		if (!f->c->building_fs) {
+			jffs_unlink_file_from_tree(f);
+		}
+		jffs_unlink_file_from_hash(f);
+		jffs_free_node_list(f);
+		jffs_free_file(f);
+	}
+	return 0;
+}
+
+
+/* Used in conjunction with jffs_foreach_file() to count the number
+   of files in the file system.  */
+int
+jffs_file_count(struct jffs_file *f)
+{
+	return 1;
+}
+
+
+/* Build up a file's range list from scratch by going through the
+   version list.  */
+static int
+jffs_build_file(struct jffs_file *f)
+{
+	struct jffs_node *n;
+
+	D3(printk("jffs_build_file(): ino: %u, name: \"%s\"\n",
+		  f->ino, (f->name ? f->name : "")));
+
+	for (n = f->version_head; n; n = n->version_next) {
+		jffs_update_file(f, n);
+	}
+	return 0;
+}
+
+
+/* Remove an amount of data from a file. If this amount of data is
+   zero, that could mean that a node should be split in two parts.
+   We remove or change the appropriate nodes in the lists.
+
+   Starting offset of area to be removed is node->data_offset,
+   and the length of the area is in node->removed_size.   */
+static int
+jffs_delete_data(struct jffs_file *f, struct jffs_node *node)
+{
+	struct jffs_node *n;
+	__u32 offset = node->data_offset;
+	__u32 remove_size = node->removed_size;
+
+	D3(printk("jffs_delete_data(): offset = %u, remove_size = %u\n",
+		  offset, remove_size));
+
+	if (remove_size == 0
+	    && f->range_tail
+	    && f->range_tail->data_offset + f->range_tail->data_size
+	       == offset) {
+		/* A simple append; nothing to remove or no node to split.  */
+		return 0;
+	}
+
+	/* Find the node where we should begin the removal.  */
+	for (n = f->range_head; n; n = n->range_next) {
+		if (n->data_offset + n->data_size > offset) {
+			break;
+		}
+	}
+	if (!n) {
+		/* If there's no data in the file there's no data to
+		   remove either.  */
+		return 0;
+	}
+
+	if (n->data_offset > offset) {
+		/* XXX: Not implemented yet.  */
+		printk(KERN_WARNING "JFFS: An unexpected situation "
+		       "occurred in jffs_delete_data.\n");
+	}
+	else if (n->data_offset < offset) {
+		/* See if the node has to be split into two parts.  */
+		if (n->data_offset + n->data_size > offset + remove_size) {
+			/* Do the split.  */
+			struct jffs_node *new_node;
+			D3(printk("jffs_delete_data(): Split node with "
+				  "version number %u.\n", n->version));
+
+			if (!(new_node = jffs_alloc_node())) {
+				D(printk("jffs_delete_data(): -ENOMEM\n"));
+				return -ENOMEM;
+			}
+			DJM(no_jffs_node++);
+
+			new_node->ino = n->ino;
+			new_node->version = n->version;
+			new_node->data_offset = offset;
+			new_node->data_size = n->data_size - (remove_size + (offset - n->data_offset));
+			new_node->fm_offset = n->fm_offset + (remove_size + (offset - n->data_offset));
+			new_node->name_size = n->name_size;
+			new_node->fm = n->fm;
+			new_node->version_prev = n;
+			new_node->version_next = n->version_next;
+			if (new_node->version_next) {
+				new_node->version_next->version_prev
+				= new_node;
+			}
+			else {
+				f->version_tail = new_node;
+			}
+			n->version_next = new_node;
+			new_node->range_prev = n;
+			new_node->range_next = n->range_next;
+			if (new_node->range_next) {
+				new_node->range_next->range_prev = new_node;
+			}
+			else {
+				f->range_tail = new_node;
+			}
+			/* A very interesting can of worms.  */
+			n->range_next = new_node;
+			n->data_size = offset - n->data_offset;
+			if (new_node->fm)
+				jffs_add_node(new_node);
+			else {
+				D1(printk(KERN_WARNING "jffs_delete_data(): Splitting an empty node (file hold).\n!"));
+				D1(printk(KERN_WARNING "FIXME: Did dwmw2 do the right thing here?\n"));
+			}
+			n = new_node->range_next;
+			remove_size = 0;
+		}
+		else {
+			/* No.  No need to split the node.  Just remove
+			   the end of the node.  */
+			int r = min(n->data_offset + n->data_size
+					 - offset, remove_size);
+			n->data_size -= r;
+			remove_size -= r;
+			n = n->range_next;
+		}
+	}
+
+	/* Remove as many nodes as necessary.  */
+	while (n && remove_size) {
+		if (n->data_size <= remove_size) {
+			struct jffs_node *p = n;
+			remove_size -= n->data_size;
+			n = n->range_next;
+			D3(printk("jffs_delete_data(): Removing node: "
+				  "ino: %u, version: %u%s\n",
+				  p->ino, p->version,
+				  (p->fm ? "" : " (virtual)")));
+			if (p->fm) {
+				jffs_fmfree(f->c->fmc, p->fm, p);
+			}
+			jffs_unlink_node_from_range_list(f, p);
+			jffs_unlink_node_from_version_list(f, p);
+			jffs_free_node(p);
+			DJM(no_jffs_node--);
+		}
+		else {
+			n->data_size -= remove_size;
+			n->fm_offset += remove_size;
+			n->data_offset -= (node->removed_size - remove_size);
+			n = n->range_next;
+			break;
+		}
+	}
+
+	/* Adjust the following nodes' information about offsets etc.  */
+	while (n && node->removed_size) {
+		n->data_offset -= node->removed_size;
+		n = n->range_next;
+	}
+
+	if (node->removed_size > (f->size - node->data_offset)) {
+		/* It's possible that the removed_size is in fact
+		 * greater than the amount of data we actually thought
+		 * were present in the first place - some of the nodes 
+		 * which this node originally obsoleted may already have
+		 * been deleted from the flash by subsequent garbage 
+		 * collection.
+		 *
+		 * If this is the case, don't let f->size go negative.
+		 * Bad things would happen :)
+		 */
+		f->size = node->data_offset;
+	} else {
+		f->size -= node->removed_size;
+	}
+	D3(printk("jffs_delete_data(): f->size = %d\n", f->size));
+	return 0;
+} /* jffs_delete_data()  */
+
+
+/* Insert some data into a file.  Prior to the call to this function,
+   jffs_delete_data should be called.  */
+static int
+jffs_insert_data(struct jffs_file *f, struct jffs_node *node)
+{
+	D3(printk("jffs_insert_data(): node->data_offset = %u, "
+		  "node->data_size = %u, f->size = %u\n",
+		  node->data_offset, node->data_size, f->size));
+
+	/* Find the position where we should insert data.  */
+	retry:
+	if (node->data_offset == f->size) {
+		/* A simple append.  This is the most common operation.  */
+		node->range_next = NULL;
+		node->range_prev = f->range_tail;
+		if (node->range_prev) {
+			node->range_prev->range_next = node;
+		}
+		f->range_tail = node;
+		f->size += node->data_size;
+		if (!f->range_head) {
+			f->range_head = node;
+		}
+	}
+	else if (node->data_offset < f->size) {
+		/* Trying to insert data into the middle of the file.  This
+		   means no problem because jffs_delete_data() has already
+		   prepared the range list for us.  */
+		struct jffs_node *n;
+
+		/* Find the correct place for the insertion and then insert
+		   the node.  */
+		for (n = f->range_head; n; n = n->range_next) {
+			D2(printk("Cool stuff's happening!\n"));
+
+			if (n->data_offset == node->data_offset) {
+				node->range_prev = n->range_prev;
+				if (node->range_prev) {
+					node->range_prev->range_next = node;
+				}
+				else {
+					f->range_head = node;
+				}
+				node->range_next = n;
+				n->range_prev = node;
+				break;
+			}
+			ASSERT(else if (n->data_offset + n->data_size >
+					node->data_offset) {
+				printk(KERN_ERR "jffs_insert_data(): "
+				       "Couldn't find a place to insert "
+				       "the data!\n");
+				return -1;
+			});
+		}
+
+		/* Adjust later nodes' offsets etc.  */
+		n = node->range_next;
+		while (n) {
+			n->data_offset += node->data_size;
+			n = n->range_next;
+		}
+		f->size += node->data_size;
+	}
+	else if (node->data_offset > f->size) {
+		/* Okay.  This is tricky.  This means that we want to insert
+		   data at a place that is beyond the limits of the file as
+		   it is constructed right now.  This is actually a common
+		   event that for instance could occur during the mounting
+		   of the file system if a large file have been truncated,
+		   rewritten and then only partially garbage collected.  */
+
+		struct jffs_node *n;
+
+		/* We need a place holder for the data that is missing in
+		   front of this insertion.  This "virtual node" will not
+		   be associated with any space on the flash device.  */
+		struct jffs_node *virtual_node;
+		if (!(virtual_node = jffs_alloc_node())) {
+			return -ENOMEM;
+		}
+
+		D(printk("jffs_insert_data: Inserting a virtual node.\n"));
+		D(printk("  node->data_offset = %u\n", node->data_offset));
+		D(printk("  f->size = %u\n", f->size));
+
+		virtual_node->ino = node->ino;
+		virtual_node->version = node->version;
+		virtual_node->removed_size = 0;
+		virtual_node->fm_offset = 0;
+		virtual_node->name_size = 0;
+		virtual_node->fm = NULL; /* This is a virtual data holder.  */
+		virtual_node->version_prev = NULL;
+		virtual_node->version_next = NULL;
+		virtual_node->range_next = NULL;
+
+		/* Are there any data at all in the file yet?  */
+		if (f->range_head) {
+			virtual_node->data_offset
+			= f->range_tail->data_offset
+			  + f->range_tail->data_size;
+			virtual_node->data_size
+			= node->data_offset - virtual_node->data_offset;
+			virtual_node->range_prev = f->range_tail;
+			f->range_tail->range_next = virtual_node;
+		}
+		else {
+			virtual_node->data_offset = 0;
+			virtual_node->data_size = node->data_offset;
+			virtual_node->range_prev = NULL;
+			f->range_head = virtual_node;
+		}
+
+		f->range_tail = virtual_node;
+		f->size += virtual_node->data_size;
+
+		/* Insert this virtual node in the version list as well.  */
+		for (n = f->version_head; n ; n = n->version_next) {
+			if (n->version == virtual_node->version) {
+				virtual_node->version_prev = n->version_prev;
+				n->version_prev = virtual_node;
+				if (virtual_node->version_prev) {
+					virtual_node->version_prev
+					->version_next = virtual_node;
+				}
+				else {
+					f->version_head = virtual_node;
+				}
+				virtual_node->version_next = n;
+				break;
+			}
+		}
+
+		D(jffs_print_node(virtual_node));
+
+		/* Make a new try to insert the node.  */
+		goto retry;
+	}
+
+	D3(printk("jffs_insert_data(): f->size = %d\n", f->size));
+	return 0;
+}
+
+
+/* A new node (with data) has been added to the file and now the range
+   list has to be modified.  */
+static int
+jffs_update_file(struct jffs_file *f, struct jffs_node *node)
+{
+	int err;
+
+	D3(printk("jffs_update_file(): ino: %u, version: %u\n",
+		  f->ino, node->version));
+
+	if (node->data_size == 0) {
+		if (node->removed_size == 0) {
+			/* data_offset == X  */
+			/* data_size == 0  */
+			/* remove_size == 0  */
+		}
+		else {
+			/* data_offset == X  */
+			/* data_size == 0  */
+			/* remove_size != 0  */
+			if ((err = jffs_delete_data(f, node)) < 0) {
+				return err;
+			}
+		}
+	}
+	else {
+		/* data_offset == X  */
+		/* data_size != 0  */
+		/* remove_size == Y  */
+		if ((err = jffs_delete_data(f, node)) < 0) {
+			return err;
+		}
+		if ((err = jffs_insert_data(f, node)) < 0) {
+			return err;
+		}
+	}
+	return 0;
+}
+
+/* Print the contents of a node.  */
+void
+jffs_print_node(struct jffs_node *n)
+{
+	D(printk("jffs_node: 0x%p\n", n));
+	D(printk("{\n"));
+	D(printk("        0x%08x, /* version  */\n", n->version));
+	D(printk("        0x%08x, /* data_offset  */\n", n->data_offset));
+	D(printk("        0x%08x, /* data_size  */\n", n->data_size));
+	D(printk("        0x%08x, /* removed_size  */\n", n->removed_size));
+	D(printk("        0x%08x, /* fm_offset  */\n", n->fm_offset));
+	D(printk("        0x%02x,       /* name_size  */\n", n->name_size));
+	D(printk("        0x%p, /* fm,  fm->offset: %u  */\n",
+		 n->fm, (n->fm ? n->fm->offset : 0)));
+	D(printk("        0x%p, /* version_prev  */\n", n->version_prev));
+	D(printk("        0x%p, /* version_next  */\n", n->version_next));
+	D(printk("        0x%p, /* range_prev  */\n", n->range_prev));
+	D(printk("        0x%p, /* range_next  */\n", n->range_next));
+	D(printk("}\n"));
+}
+
+
+/* Print the contents of a raw inode.  */
+void
+jffs_print_raw_inode(struct jffs_raw_inode *raw_inode)
+{
+	D(printk("jffs_raw_inode: inode number: %u\n", raw_inode->ino));
+	D(printk("{\n"));
+	D(printk("        0x%08x, /* magic  */\n", raw_inode->magic));
+	D(printk("        0x%08x, /* ino  */\n", raw_inode->ino));
+	D(printk("        0x%08x, /* pino  */\n", raw_inode->pino));
+	D(printk("        0x%08x, /* version  */\n", raw_inode->version));
+	D(printk("        0x%08x, /* mode  */\n", raw_inode->mode));
+	D(printk("        0x%04x,     /* uid  */\n", raw_inode->uid));
+	D(printk("        0x%04x,     /* gid  */\n", raw_inode->gid));
+	D(printk("        0x%08x, /* atime  */\n", raw_inode->atime));
+	D(printk("        0x%08x, /* mtime  */\n", raw_inode->mtime));
+	D(printk("        0x%08x, /* ctime  */\n", raw_inode->ctime));
+	D(printk("        0x%08x, /* offset  */\n", raw_inode->offset));
+	D(printk("        0x%08x, /* dsize  */\n", raw_inode->dsize));
+	D(printk("        0x%08x, /* rsize  */\n", raw_inode->rsize));
+	D(printk("        0x%02x,       /* nsize  */\n", raw_inode->nsize));
+	D(printk("        0x%02x,       /* nlink  */\n", raw_inode->nlink));
+	D(printk("        0x%02x,       /* spare  */\n",
+		 raw_inode->spare));
+	D(printk("        %u,          /* rename  */\n",
+		 raw_inode->rename));
+	D(printk("        %u,          /* deleted  */\n",
+		 raw_inode->deleted));
+	D(printk("        0x%02x,       /* accurate  */\n",
+		 raw_inode->accurate));
+	D(printk("        0x%08x, /* dchksum  */\n", raw_inode->dchksum));
+	D(printk("        0x%04x,     /* nchksum  */\n", raw_inode->nchksum));
+	D(printk("        0x%04x,     /* chksum  */\n", raw_inode->chksum));
+	D(printk("}\n"));
+}
+
+
+/* Print the contents of a file.  */
+#if 0
+int
+jffs_print_file(struct jffs_file *f)
+{
+	D(int i);
+	D(printk("jffs_file: 0x%p\n", f));
+	D(printk("{\n"));
+	D(printk("        0x%08x, /* ino  */\n", f->ino));
+	D(printk("        0x%08x, /* pino  */\n", f->pino));
+	D(printk("        0x%08x, /* mode  */\n", f->mode));
+	D(printk("        0x%04x,     /* uid  */\n", f->uid));
+	D(printk("        0x%04x,     /* gid  */\n", f->gid));
+	D(printk("        0x%08x, /* atime  */\n", f->atime));
+	D(printk("        0x%08x, /* mtime  */\n", f->mtime));
+	D(printk("        0x%08x, /* ctime  */\n", f->ctime));
+	D(printk("        0x%02x,       /* nsize  */\n", f->nsize));
+	D(printk("        0x%02x,       /* nlink  */\n", f->nlink));
+	D(printk("        0x%02x,       /* deleted  */\n", f->deleted));
+	D(printk("        \"%s\", ", (f->name ? f->name : "")));
+	D(for (i = strlen(f->name ? f->name : ""); i < 8; ++i) {
+		printk(" ");
+	});
+	D(printk("/* name  */\n"));
+	D(printk("        0x%08x, /* size  */\n", f->size));
+	D(printk("        0x%08x, /* highest_version  */\n",
+		 f->highest_version));
+	D(printk("        0x%p, /* c  */\n", f->c));
+	D(printk("        0x%p, /* parent  */\n", f->parent));
+	D(printk("        0x%p, /* children  */\n", f->children));
+	D(printk("        0x%p, /* sibling_prev  */\n", f->sibling_prev));
+	D(printk("        0x%p, /* sibling_next  */\n", f->sibling_next));
+	D(printk("        0x%p, /* hash_prev  */\n", f->hash.prev));
+	D(printk("        0x%p, /* hash_next  */\n", f->hash.next));
+	D(printk("        0x%p, /* range_head  */\n", f->range_head));
+	D(printk("        0x%p, /* range_tail  */\n", f->range_tail));
+	D(printk("        0x%p, /* version_head  */\n", f->version_head));
+	D(printk("        0x%p, /* version_tail  */\n", f->version_tail));
+	D(printk("}\n"));
+	return 0;
+}
+#endif  /*  0  */
+
+void
+jffs_print_hash_table(struct jffs_control *c)
+{
+	int i;
+
+	printk("JFFS: Dumping the file system's hash table...\n");
+	for (i = 0; i < c->hash_len; i++) {
+		struct list_head *p;
+		for (p = c->hash[i].next; p != &c->hash[i]; p = p->next) {
+			struct jffs_file *f=list_entry(p,struct jffs_file,hash);
+			printk("*** c->hash[%u]: \"%s\" "
+			       "(ino: %u, pino: %u)\n",
+			       i, (f->name ? f->name : ""),
+			       f->ino, f->pino);
+		}
+	}
+}
+
+
+void
+jffs_print_tree(struct jffs_file *first_file, int indent)
+{
+	struct jffs_file *f;
+	char *space;
+	int dir;
+
+	if (!first_file) {
+		return;
+	}
+
+	if (!(space = (char *) kmalloc(indent + 1, GFP_KERNEL))) {
+		printk("jffs_print_tree(): Out of memory!\n");
+		return;
+	}
+
+	memset(space, ' ', indent);
+	space[indent] = '\0';
+
+	for (f = first_file; f; f = f->sibling_next) {
+		dir = S_ISDIR(f->mode);
+		printk("%s%s%s (ino: %u, highest_version: %u, size: %u)\n",
+		       space, (f->name ? f->name : ""), (dir ? "/" : ""),
+		       f->ino, f->highest_version, f->size);
+		if (dir) {
+			jffs_print_tree(f->children, indent + 2);
+		}
+	}
+
+	kfree(space);
+}
+
+
+#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
+void
+jffs_print_memory_allocation_statistics(void)
+{
+	static long printout;
+	printk("________ Memory printout #%ld ________\n", ++printout);
+	printk("no_jffs_file = %ld\n", no_jffs_file);
+	printk("no_jffs_node = %ld\n", no_jffs_node);
+	printk("no_jffs_control = %ld\n", no_jffs_control);
+	printk("no_jffs_raw_inode = %ld\n", no_jffs_raw_inode);
+	printk("no_jffs_node_ref = %ld\n", no_jffs_node_ref);
+	printk("no_jffs_fm = %ld\n", no_jffs_fm);
+	printk("no_jffs_fmcontrol = %ld\n", no_jffs_fmcontrol);
+	printk("no_hash = %ld\n", no_hash);
+	printk("no_name = %ld\n", no_name);
+	printk("\n");
+}
+#endif
+
+
+/* Rewrite `size' bytes, and begin at `node'.  */
+static int
+jffs_rewrite_data(struct jffs_file *f, struct jffs_node *node, __u32 size)
+{
+	struct jffs_control *c = f->c;
+	struct jffs_fmcontrol *fmc = c->fmc;
+	struct jffs_raw_inode raw_inode;
+	struct jffs_node *new_node;
+	struct jffs_fm *fm;
+	__u32 pos;
+	__u32 pos_dchksum;
+	__u32 total_name_size;
+	__u32 total_data_size;
+	__u32 total_size;
+	int err;
+
+	D1(printk("***jffs_rewrite_data(): node: %u, name: \"%s\", size: %u\n",
+		  f->ino, (f->name ? f->name : "(null)"), size));
+
+	/* Create and initialize the new node.  */
+	if (!(new_node = jffs_alloc_node())) {
+		D(printk("jffs_rewrite_data(): "
+			 "Failed to allocate node.\n"));
+		return -ENOMEM;
+	}
+	DJM(no_jffs_node++);
+	new_node->data_offset = node->data_offset;
+	new_node->removed_size = size;
+	total_name_size = JFFS_PAD(f->nsize);
+	total_data_size = JFFS_PAD(size);
+	total_size = sizeof(struct jffs_raw_inode)
+		     + total_name_size + total_data_size;
+	new_node->fm_offset = sizeof(struct jffs_raw_inode)
+			      + total_name_size;
+
+retry:
+	jffs_fm_write_lock(fmc);
+	err = 0;
+
+	if ((err = jffs_fmalloc(fmc, total_size, new_node, &fm)) < 0) {
+		DJM(no_jffs_node--);
+		jffs_fm_write_unlock(fmc);
+		D(printk("jffs_rewrite_data(): Failed to allocate fm.\n"));
+		jffs_free_node(new_node);
+		return err;
+	}
+	else if (!fm->nodes) {
+		/* The jffs_fm struct that we got is not big enough.  */
+		/* This should never happen, because we deal with this case
+		   in jffs_garbage_collect_next().*/
+		printk(KERN_WARNING "jffs_rewrite_data(): Allocated node is too small (%d bytes of %d)\n", fm->size, total_size);
+		if ((err = jffs_write_dummy_node(c, fm)) < 0) {
+			D(printk("jffs_rewrite_data(): "
+				 "jffs_write_dummy_node() Failed!\n"));
+		} else {
+			err = -ENOSPC;
+		}
+		DJM(no_jffs_fm--);
+		jffs_fm_write_unlock(fmc);
+		kfree(fm);
+		
+		return err;
+	}
+	new_node->fm = fm;
+
+	/* Initialize the raw inode.  */
+	raw_inode.magic = JFFS_MAGIC_BITMASK;
+	raw_inode.ino = f->ino;
+	raw_inode.pino = f->pino;
+	raw_inode.version = f->highest_version + 1;
+	raw_inode.mode = f->mode;
+	raw_inode.uid = f->uid;
+	raw_inode.gid = f->gid;
+	raw_inode.atime = f->atime;
+	raw_inode.mtime = f->mtime;
+	raw_inode.ctime = f->ctime;
+	raw_inode.offset = node->data_offset;
+	raw_inode.dsize = size;
+	raw_inode.rsize = size;
+	raw_inode.nsize = f->nsize;
+	raw_inode.nlink = f->nlink;
+	raw_inode.spare = 0;
+	raw_inode.rename = 0;
+	raw_inode.deleted = f->deleted;
+	raw_inode.accurate = 0xff;
+	raw_inode.dchksum = 0;
+	raw_inode.nchksum = 0;
+
+	pos = new_node->fm->offset;
+	pos_dchksum = pos +JFFS_RAW_INODE_DCHKSUM_OFFSET;
+
+	D3(printk("jffs_rewrite_data(): Writing this raw inode "
+		  "to pos 0x%ul.\n", pos));
+	D3(jffs_print_raw_inode(&raw_inode));
+
+	if ((err = flash_safe_write(fmc->mtd, pos,
+				    (u_char *) &raw_inode,
+				    sizeof(struct jffs_raw_inode)
+				    - sizeof(__u32)
+				    - sizeof(__u16) - sizeof(__u16))) < 0) {
+		jffs_fmfree_partly(fmc, fm,
+				   total_name_size + total_data_size);
+		jffs_fm_write_unlock(fmc);
+		printk(KERN_ERR "JFFS: jffs_rewrite_data: Write error during "
+			"rewrite. (raw inode)\n");
+		printk(KERN_ERR "JFFS: jffs_rewrite_data: Now retrying "
+			"rewrite. (raw inode)\n");
+		goto retry;
+	}
+	pos += sizeof(struct jffs_raw_inode);
+
+	/* Write the name to the flash memory.  */
+	if (f->nsize) {
+		D3(printk("jffs_rewrite_data(): Writing name \"%s\" to "
+			  "pos 0x%ul.\n", f->name, (unsigned int) pos));
+		if ((err = flash_safe_write(fmc->mtd, pos,
+					    (u_char *)f->name,
+					    f->nsize)) < 0) {
+			jffs_fmfree_partly(fmc, fm, total_data_size);
+			jffs_fm_write_unlock(fmc);
+			printk(KERN_ERR "JFFS: jffs_rewrite_data: Write "
+				"error during rewrite. (name)\n");
+			printk(KERN_ERR "JFFS: jffs_rewrite_data: Now retrying "
+				"rewrite. (name)\n");
+			goto retry;
+		}
+		pos += total_name_size;
+		raw_inode.nchksum = jffs_checksum(f->name, f->nsize);
+	}
+
+	/* Write the data.  */
+	if (size) {
+		int r;
+		unsigned char *page;
+		__u32 offset = node->data_offset;
+
+		if (!(page = (unsigned char *)__get_free_page(GFP_KERNEL))) {
+			jffs_fmfree_partly(fmc, fm, 0);
+			return -1;
+		}
+
+		while (size) {
+			__u32 s = min(size, (__u32)PAGE_SIZE);
+			if ((r = jffs_read_data(f, (char *)page,
+						offset, s)) < s) {
+				free_page((unsigned long)page);
+				jffs_fmfree_partly(fmc, fm, 0);
+				jffs_fm_write_unlock(fmc);
+				printk(KERN_ERR "JFFS: jffs_rewrite_data: "
+					 "jffs_read_data() "
+					 "failed! (r = %d)\n", r);
+				return -1;
+			}
+			if ((err = flash_safe_write(fmc->mtd,
+						    pos, page, r)) < 0) {
+				free_page((unsigned long)page);
+				jffs_fmfree_partly(fmc, fm, 0);
+				jffs_fm_write_unlock(fmc);
+				printk(KERN_ERR "JFFS: jffs_rewrite_data: "
+				       "Write error during rewrite. "
+				       "(data)\n");
+				goto retry;
+			}
+			pos += r;
+			size -= r;
+			offset += r;
+			raw_inode.dchksum += jffs_checksum(page, r);
+		}
+
+	        free_page((unsigned long)page);
+	}
+
+	raw_inode.accurate = 0;
+	raw_inode.chksum = jffs_checksum(&raw_inode,
+					 sizeof(struct jffs_raw_inode)
+					 - sizeof(__u16));
+
+	/* Add the checksum.  */
+	if ((err
+	     = flash_safe_write(fmc->mtd, pos_dchksum,
+				&((u_char *)
+				&raw_inode)[JFFS_RAW_INODE_DCHKSUM_OFFSET],
+				sizeof(__u32) + sizeof(__u16)
+				+ sizeof(__u16))) < 0) {
+		jffs_fmfree_partly(fmc, fm, 0);
+		jffs_fm_write_unlock(fmc);
+		printk(KERN_ERR "JFFS: jffs_rewrite_data: Write error during "
+		       "rewrite. (checksum)\n");
+		goto retry;
+	}
+
+	/* Now make the file system aware of the newly written node.  */
+	jffs_insert_node(c, f, &raw_inode, f->name, new_node);
+	jffs_fm_write_unlock(fmc);
+
+	D3(printk("jffs_rewrite_data(): Leaving...\n"));
+	return 0;
+} /* jffs_rewrite_data()  */
+
+
+/* jffs_garbage_collect_next implements one step in the garbage collect
+   process and is often called multiple times at each occasion of a
+   garbage collect.  */
+
+static int
+jffs_garbage_collect_next(struct jffs_control *c)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+	struct jffs_node *node;
+	struct jffs_file *f;
+	int err = 0;
+	__u32 size;
+	__u32 data_size;
+	__u32 total_name_size;
+	__u32 extra_available;
+	__u32 space_needed;
+	__u32 free_chunk_size1 = jffs_free_size1(fmc);
+	D2(__u32 free_chunk_size2 = jffs_free_size2(fmc));
+
+	/* Get the oldest node in the flash.  */
+	node = jffs_get_oldest_node(fmc);
+	ASSERT(if (!node) {
+		printk(KERN_ERR "JFFS: jffs_garbage_collect_next: "
+		       "No oldest node found!\n");
+                err = -1;
+                goto jffs_garbage_collect_next_end;
+		
+
+	});
+
+	/* Find its corresponding file too.  */
+	f = jffs_find_file(c, node->ino);
+
+	if (!f) {
+	  printk (KERN_ERR "JFFS: jffs_garbage_collect_next: "
+                  "No file to garbage collect! "
+		  "(ino = 0x%08x)\n", node->ino);
+          /* FIXME: Free the offending node and recover. */
+          err = -1;
+          goto jffs_garbage_collect_next_end;
+	}
+
+	/* We always write out the name. Theoretically, we don't need
+	   to, but for now it's easier - because otherwise we'd have
+	   to keep track of how many times the current name exists on
+	   the flash and make sure it never reaches zero.
+
+	   The current approach means that would be possible to cause
+	   the GC to end up eating its tail by writing lots of nodes
+	   with no name for it to garbage-collect. Hence the change in
+	   inode.c to write names with _every_ node.
+
+	   It sucks, but it _should_ work.
+	*/
+	total_name_size = JFFS_PAD(f->nsize);
+
+	D1(printk("jffs_garbage_collect_next(): \"%s\", "
+		  "ino: %u, version: %u, location 0x%x, dsize %u\n",
+		  (f->name ? f->name : ""), node->ino, node->version, 
+		  node->fm->offset, node->data_size));
+
+	/* Compute how many data it's possible to rewrite at the moment.  */
+	data_size = f->size - node->data_offset;
+
+	/* And from that, the total size of the chunk we want to write */
+	size = sizeof(struct jffs_raw_inode) + total_name_size
+	       + data_size + JFFS_GET_PAD_BYTES(data_size);
+
+	/* If that's more than max_chunk_size, reduce it accordingly */
+	if (size > fmc->max_chunk_size) {
+		size = fmc->max_chunk_size;
+		data_size = size - sizeof(struct jffs_raw_inode)
+			    - total_name_size;
+	}
+
+	/* If we're asking to take up more space than free_chunk_size1
+	   but we _could_ fit in it, shrink accordingly.
+	*/
+	if (size > free_chunk_size1) {
+
+		if (free_chunk_size1 <
+		    (sizeof(struct jffs_raw_inode) + total_name_size + BLOCK_SIZE)){
+			/* The space left is too small to be of any
+			   use really.  */
+			struct jffs_fm *dirty_fm
+			= jffs_fmalloced(fmc,
+					 fmc->tail->offset + fmc->tail->size,
+					 free_chunk_size1, NULL);
+			if (!dirty_fm) {
+				printk(KERN_ERR "JFFS: "
+				       "jffs_garbage_collect_next: "
+				       "Failed to allocate `dirty' "
+				       "flash memory!\n");
+				err = -1;
+                                goto jffs_garbage_collect_next_end;
+			}
+			D1(printk("Dirtying end of flash - too small\n"));
+			jffs_write_dummy_node(c, dirty_fm);
+                        err = 0;
+			goto jffs_garbage_collect_next_end;
+		}
+		D1(printk("Reducing size of new node from %d to %d to avoid "
+			  " exceeding free_chunk_size1\n",
+			  size, free_chunk_size1));
+
+		size = free_chunk_size1;
+		data_size = size - sizeof(struct jffs_raw_inode)
+			    - total_name_size;
+	}
+
+
+	/* Calculate the amount of space needed to hold the nodes
+	   which are remaining in the tail */
+	space_needed = fmc->min_free_size - (node->fm->offset % fmc->sector_size);
+
+	/* From that, calculate how much 'extra' space we can use to
+	   increase the size of the node we're writing from the size
+	   of the node we're obsoleting
+	*/
+	if (space_needed > fmc->free_size) {
+		/* If we've gone below min_free_size for some reason,
+		   don't fuck up. This is why we have 
+		   min_free_size > sector_size. Whinge about it though,
+		   just so I can convince myself my maths is right.
+		*/
+		D1(printk(KERN_WARNING "jffs_garbage_collect_next(): "
+			  "space_needed %d exceeded free_size %d\n",
+			  space_needed, fmc->free_size));
+		extra_available = 0;
+	} else {
+		extra_available = fmc->free_size - space_needed;
+	}
+
+	/* Check that we don't use up any more 'extra' space than
+	   what's available */
+	if (size > JFFS_PAD(node->data_size) + total_name_size + 
+	    sizeof(struct jffs_raw_inode) + extra_available) {
+		D1(printk("Reducing size of new node from %d to %ld to avoid "
+		       "catching our tail\n", size, 
+			  (long) (JFFS_PAD(node->data_size) + JFFS_PAD(node->name_size) + 
+			  sizeof(struct jffs_raw_inode) + extra_available)));
+		D1(printk("space_needed = %d, extra_available = %d\n", 
+			  space_needed, extra_available));
+
+		size = JFFS_PAD(node->data_size) + total_name_size + 
+		  sizeof(struct jffs_raw_inode) + extra_available;
+		data_size = size - sizeof(struct jffs_raw_inode)
+			- total_name_size;
+	};
+
+	D2(printk("  total_name_size: %u\n", total_name_size));
+	D2(printk("  data_size: %u\n", data_size));
+	D2(printk("  size: %u\n", size));
+	D2(printk("  f->nsize: %u\n", f->nsize));
+	D2(printk("  f->size: %u\n", f->size));
+	D2(printk("  node->data_offset: %u\n", node->data_offset));
+	D2(printk("  free_chunk_size1: %u\n", free_chunk_size1));
+	D2(printk("  free_chunk_size2: %u\n", free_chunk_size2));
+	D2(printk("  node->fm->offset: 0x%08x\n", node->fm->offset));
+
+	if ((err = jffs_rewrite_data(f, node, data_size))) {
+		printk(KERN_WARNING "jffs_rewrite_data() failed: %d\n", err);
+		return err;
+	}
+	  
+jffs_garbage_collect_next_end:
+	D3(printk("jffs_garbage_collect_next: Leaving...\n"));
+	return err;
+} /* jffs_garbage_collect_next */
+
+
+/* If an obsolete node is partly going to be erased due to garbage
+   collection, the part that isn't going to be erased must be filled
+   with zeroes so that the scan of the flash will work smoothly next
+   time.  (The data in the file could for instance be a JFFS image
+   which could cause enormous confusion during a scan of the flash
+   device if we didn't do this.)
+     There are two phases in this procedure: First, the clearing of
+   the name and data parts of the node. Second, possibly also clearing
+   a part of the raw inode as well.  If the box is power cycled during
+   the first phase, only the checksum of this node-to-be-cleared-at-
+   the-end will be wrong.  If the box is power cycled during, or after,
+   the clearing of the raw inode, the information like the length of
+   the name and data parts are zeroed.  The next time the box is
+   powered up, the scanning algorithm manages this faulty data too
+   because:
+
+   - The checksum is invalid and thus the raw inode must be discarded
+     in any case.
+   - If the lengths of the data part or the name part are zeroed, the
+     scanning just continues after the raw inode.  But after the inode
+     the scanning procedure just finds zeroes which is the same as
+     dirt.
+
+   So, in the end, this could never fail. :-)  Even if it does fail,
+   the scanning algorithm should manage that too.  */
+
+static int
+jffs_clear_end_of_node(struct jffs_control *c, __u32 erase_size)
+{
+	struct jffs_fm *fm;
+	struct jffs_fmcontrol *fmc = c->fmc;
+	__u32 zero_offset;
+	__u32 zero_size;
+	__u32 zero_offset_data;
+	__u32 zero_size_data;
+	__u32 cutting_raw_inode = 0;
+
+	if (!(fm = jffs_cut_node(fmc, erase_size))) {
+		D3(printk("jffs_clear_end_of_node(): fm == NULL\n"));
+		return 0;
+	}
+
+	/* Where and how much shall we clear?  */
+	zero_offset = fmc->head->offset + erase_size;
+	zero_size = fm->offset + fm->size - zero_offset;
+
+	/* Do we have to clear the raw_inode explicitly?  */
+	if (fm->size - zero_size < sizeof(struct jffs_raw_inode)) {
+		cutting_raw_inode = sizeof(struct jffs_raw_inode)
+				    - (fm->size - zero_size);
+	}
+
+	/* First, clear the name and data fields.  */
+	zero_offset_data = zero_offset + cutting_raw_inode;
+	zero_size_data = zero_size - cutting_raw_inode;
+	flash_safe_acquire(fmc->mtd);
+	flash_memset(fmc->mtd, zero_offset_data, 0, zero_size_data);
+	flash_safe_release(fmc->mtd);
+
+	/* Should we clear a part of the raw inode?  */
+	if (cutting_raw_inode) {
+		/* I guess it is ok to clear the raw inode in this order.  */
+		flash_safe_acquire(fmc->mtd);
+		flash_memset(fmc->mtd, zero_offset, 0,
+			     cutting_raw_inode);
+		flash_safe_release(fmc->mtd);
+	}
+
+	return 0;
+} /* jffs_clear_end_of_node()  */
+
+/* Try to erase as much as possible of the dirt in the flash memory.  */
+static long
+jffs_try_to_erase(struct jffs_control *c)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+	long erase_size;
+	int err;
+	__u32 offset;
+
+	D3(printk("jffs_try_to_erase()\n"));
+
+	erase_size = jffs_erasable_size(fmc);
+
+	D2(printk("jffs_try_to_erase(): erase_size = %ld\n", erase_size));
+
+	if (erase_size == 0) {
+		return 0;
+	}
+	else if (erase_size < 0) {
+		printk(KERN_ERR "JFFS: jffs_try_to_erase: "
+		       "jffs_erasable_size returned %ld.\n", erase_size);
+		return erase_size;
+	}
+
+	if ((err = jffs_clear_end_of_node(c, erase_size)) < 0) {
+		printk(KERN_ERR "JFFS: jffs_try_to_erase: "
+		       "Clearing of node failed.\n");
+		return err;
+	}
+
+	offset = fmc->head->offset;
+
+	/* Now, let's try to do the erase.  */
+	if ((err = flash_erase_region(fmc->mtd,
+				      offset, erase_size)) < 0) {
+		printk(KERN_ERR "JFFS: Erase of flash failed. "
+		       "offset = %u, erase_size = %ld\n",
+		       offset, erase_size);
+		/* XXX: Here we should allocate this area as dirty
+		   with jffs_fmalloced or something similar.  Now
+		   we just report the error.  */
+		return err;
+	}
+
+#if 0
+	/* Check if the erased sectors really got erased.  */
+	{
+		__u32 pos;
+		__u32 end;
+
+		pos = (__u32)flash_get_direct_pointer(to_kdev_t(c->sb->s_dev), offset);
+		end = pos + erase_size;
+
+		D2(printk("JFFS: Checking erased sector(s)...\n"));
+
+		flash_safe_acquire(fmc->mtd);
+
+		for (; pos < end; pos += 4) {
+			if (*(__u32 *)pos != JFFS_EMPTY_BITMASK) {
+				printk("JFFS: Erase failed! pos = 0x%lx\n",
+				       (long)pos);
+				jffs_hexdump(fmc->mtd, pos,
+					     jffs_min(256, end - pos));
+				err = -1;
+				break;
+			}
+		}
+
+		flash_safe_release(fmc->mtd);
+
+		if (!err) {
+			D2(printk("JFFS: Erase succeeded.\n"));
+		}
+		else {
+			/* XXX: Here we should allocate the memory
+			   with jffs_fmalloced() in order to prevent
+			   JFFS from using this area accidentally.  */
+			return err;
+		}
+	}
+#endif
+
+	/* Update the flash memory data structures.  */
+	jffs_sync_erase(fmc, erase_size);
+
+	return erase_size;
+}
+
+
+/* There are different criteria that should trigger a garbage collect:
+
+   1. There is too much dirt in the memory.
+   2. The free space is becoming small.
+   3. There are many versions of a node.
+
+   The garbage collect should always be done in a manner that guarantees
+   that future garbage collects cannot be locked.  E.g. Rewritten chunks
+   should not be too large (span more than one sector in the flash memory
+   for exemple).  Of course there is a limit on how intelligent this garbage
+   collection can be.  */
+
+
+static int
+jffs_garbage_collect_now(struct jffs_control *c)
+{
+	struct jffs_fmcontrol *fmc = c->fmc;
+	long erased = 0;
+	int result = 0;
+	D1(int i = 1);
+	D2(printk("***jffs_garbage_collect_now(): fmc->dirty_size = %u, fmc->free_size = 0x%x\n, fcs1=0x%x, fcs2=0x%x",
+		  fmc->dirty_size, fmc->free_size, jffs_free_size1(fmc), jffs_free_size2(fmc)));
+	D2(jffs_print_fmcontrol(fmc));
+
+	//	down(&fmc->gclock);
+
+	/* If it is possible to garbage collect, do so.  */
+	
+	while (erased == 0) {
+		D1(printk("***jffs_garbage_collect_now(): round #%u, "
+			  "fmc->dirty_size = %u\n", i++, fmc->dirty_size));
+		D2(jffs_print_fmcontrol(fmc));
+
+		if ((erased = jffs_try_to_erase(c)) < 0) {
+			printk(KERN_WARNING "JFFS: Error in "
+			       "garbage collector.\n");
+			result = erased;
+			goto gc_end;
+		}
+		if (erased)
+			break;
+		
+		if (fmc->free_size == 0) {
+			/* Argh */
+			printk(KERN_ERR "jffs_garbage_collect_now(): free_size == 0. This is BAD.\n");
+			result = -ENOSPC;
+			break;
+		}
+
+		if (fmc->dirty_size < fmc->sector_size) {
+			/* Actually, we _may_ have been able to free some, 
+			 * if there are many overlapping nodes which aren't
+			 * actually marked dirty because they still have
+			 * some valid data in each.
+			 */
+			result = -ENOSPC;
+			break;
+		}
+
+		/* Let's dare to make a garbage collect.  */
+		if ((result = jffs_garbage_collect_next(c)) < 0) {
+			printk(KERN_ERR "JFFS: Something "
+			       "has gone seriously wrong "
+			       "with a garbage collect.\n");
+			goto gc_end;
+		}
+
+		D1(printk("   jffs_garbage_collect_now(): erased: %ld\n", erased));
+		DJM(jffs_print_memory_allocation_statistics());
+	}
+	
+gc_end:
+	//	up(&fmc->gclock);
+
+	D3(printk("   jffs_garbage_collect_now(): Leaving...\n"));
+	D1(if (erased) {
+		printk("jffs_g_c_now(): erased = %ld\n", erased);
+		jffs_print_fmcontrol(fmc);
+	});
+
+	if (!erased && !result)
+		return -ENOSPC;
+
+	return result;
+} /* jffs_garbage_collect_now() */
+
+
+/* Determine if it is reasonable to start garbage collection.
+   We start a gc pass if either:
+   - The number of free bytes < MIN_FREE_BYTES && at least one
+     block is dirty, OR
+   - The number of dirty bytes > MAX_DIRTY_BYTES
+*/
+static inline int thread_should_wake (struct jffs_control *c)
+{
+	D1(printk (KERN_NOTICE "thread_should_wake(): free=%d, dirty=%d, blocksize=%d.\n",
+		   c->fmc->free_size, c->fmc->dirty_size, c->fmc->sector_size));
+
+	/* If there's not enough dirty space to free a block, there's no point. */
+	if (c->fmc->dirty_size < c->fmc->sector_size) {
+		D2(printk(KERN_NOTICE "thread_should_wake(): Not waking. Insufficient dirty space\n"));
+		return 0;
+	}
+#if 1
+	/* If there is too much RAM used by the various structures, GC */
+	if (jffs_get_node_inuse() > (c->fmc->used_size/c->fmc->max_chunk_size * 5 + jffs_get_file_count() * 2 + 50)) {
+		/* FIXME: Provide proof that this test can be satisfied. We
+		   don't want a filesystem doing endless GC just because this
+		   condition cannot ever be false.
+		*/
+		D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to number of nodes\n"));
+		return 1;
+	}
+#endif
+	/* If there are fewer free bytes than the threshold, GC */
+	if (c->fmc->free_size < c->gc_minfree_threshold) {
+		D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to insufficent free space\n"));
+		return 1;
+	}
+	/* If there are more dirty bytes than the threshold, GC */
+	if (c->fmc->dirty_size > c->gc_maxdirty_threshold) {
+		D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to excessive dirty space\n"));
+		return 1;
+	}	
+	/* FIXME: What about the "There are many versions of a node" condition? */
+
+	return 0;
+}
+
+
+void jffs_garbage_collect_trigger(struct jffs_control *c)
+{
+	/* NOTE: We rely on the fact that we have the BKL here.
+	 * Otherwise, the gc_task could go away between the check
+	 * and the wake_up_process()
+	 */
+	if (c->gc_task && thread_should_wake(c))
+		send_sig(SIGHUP, c->gc_task, 1);
+}
+  
+
+/* Kernel threads  take (void *) as arguments.   Thus we pass
+   the jffs_control data as a (void *) and then cast it. */
+int
+jffs_garbage_collect_thread(void *ptr)
+{
+        struct jffs_control *c = (struct jffs_control *) ptr;
+	struct jffs_fmcontrol *fmc = c->fmc;
+	long erased;
+	int result = 0;
+	D1(int i = 1);
+
+	daemonize("jffs_gcd");
+
+	c->gc_task = current;
+
+	lock_kernel();
+	init_completion(&c->gc_thread_comp); /* barrier */ 
+	spin_lock_irq(&current->sighand->siglock);
+	siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): Starting infinite loop.\n"));
+
+	for (;;) {
+
+		/* See if we need to start gc.  If we don't, go to sleep.
+		   
+		   Current implementation is a BAD THING(tm).  If we try 
+		   to unmount the FS, the unmount operation will sleep waiting
+		   for this thread to exit.  We need to arrange to send it a
+		   sig before the umount process sleeps.
+		*/
+
+		if (!thread_should_wake(c))
+			set_current_state (TASK_INTERRUPTIBLE);
+		
+		schedule(); /* Yes, we do this even if we want to go
+				       on immediately - we're a low priority 
+				       background task. */
+
+		/* Put_super will send a SIGKILL and then wait on the sem. 
+		 */
+		while (signal_pending(current)) {
+			siginfo_t info;
+			unsigned long signr = 0;
+
+			spin_lock_irq(&current->sighand->siglock);
+			signr = dequeue_signal(current, &current->blocked, &info);
+			spin_unlock_irq(&current->sighand->siglock);
+
+			switch(signr) {
+			case SIGSTOP:
+				D1(printk("jffs_garbage_collect_thread(): SIGSTOP received.\n"));
+				set_current_state(TASK_STOPPED);
+				schedule();
+				break;
+
+			case SIGKILL:
+				D1(printk("jffs_garbage_collect_thread(): SIGKILL received.\n"));
+				c->gc_task = NULL;
+				complete_and_exit(&c->gc_thread_comp, 0);
+			}
+		}
+
+
+		D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n"));
+
+		D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n"));
+		down(&fmc->biglock);
+		
+		D1(printk("***jffs_garbage_collect_thread(): round #%u, "
+			  "fmc->dirty_size = %u\n", i++, fmc->dirty_size));
+		D2(jffs_print_fmcontrol(fmc));
+
+		if ((erased = jffs_try_to_erase(c)) < 0) {
+			printk(KERN_WARNING "JFFS: Error in "
+			       "garbage collector: %ld.\n", erased);
+		}
+
+		if (erased)
+			goto gc_end;
+
+		if (fmc->free_size == 0) {
+			/* Argh. Might as well commit suicide. */
+			printk(KERN_ERR "jffs_garbage_collect_thread(): free_size == 0. This is BAD.\n");
+			send_sig(SIGQUIT, c->gc_task, 1);
+			// panic()
+			goto gc_end;
+		}
+		
+		/* Let's dare to make a garbage collect.  */
+		if ((result = jffs_garbage_collect_next(c)) < 0) {
+			printk(KERN_ERR "JFFS: Something "
+			       "has gone seriously wrong "
+			       "with a garbage collect: %d\n", result);
+		}
+		
+	gc_end:
+		D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n"));
+		up(&fmc->biglock);
+	} /* for (;;) */
+} /* jffs_garbage_collect_thread() */
diff --git a/fs/jffs/intrep.h b/fs/jffs/intrep.h
new file mode 100644
index 0000000..4ae97b1
--- /dev/null
+++ b/fs/jffs/intrep.h
@@ -0,0 +1,60 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 1999, 2000  Axis Communications AB.
+ *
+ * Created by Finn Hakansson <finn@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: intrep.h,v 1.14 2001/09/23 23:28:37 dwmw2 Exp $
+ *
+ */
+
+#ifndef __LINUX_JFFS_INTREP_H__
+#define __LINUX_JFFS_INTREP_H__
+#include "jffs_fm.h"
+struct jffs_node *jffs_alloc_node(void);
+void jffs_free_node(struct jffs_node *n);
+int jffs_get_node_inuse(void);
+
+void jffs_cleanup_control(struct jffs_control *c);
+int jffs_build_fs(struct super_block *sb);
+
+int jffs_insert_node(struct jffs_control *c, struct jffs_file *f,
+		     const struct jffs_raw_inode *raw_inode,
+		     const char *name, struct jffs_node *node);
+struct jffs_file *jffs_find_file(struct jffs_control *c, __u32 ino);
+struct jffs_file *jffs_find_child(struct jffs_file *dir, const char *name, int len);
+
+void jffs_free_node(struct jffs_node *node);
+
+int jffs_foreach_file(struct jffs_control *c, int (*func)(struct jffs_file *));
+int jffs_possibly_delete_file(struct jffs_file *f);
+int jffs_insert_file_into_tree(struct jffs_file *f);
+int jffs_unlink_file_from_tree(struct jffs_file *f);
+int jffs_file_count(struct jffs_file *f);
+
+int jffs_write_node(struct jffs_control *c, struct jffs_node *node,
+		    struct jffs_raw_inode *raw_inode,
+		    const char *name, const unsigned char *buf,
+		    int recoverable, struct jffs_file *f);
+int jffs_read_data(struct jffs_file *f, unsigned char *buf, __u32 read_offset, __u32 size);
+
+/* Garbage collection stuff.  */
+int jffs_garbage_collect_thread(void *c);
+void jffs_garbage_collect_trigger(struct jffs_control *c);
+
+/* For debugging purposes.  */
+void jffs_print_node(struct jffs_node *n);
+void jffs_print_raw_inode(struct jffs_raw_inode *raw_inode);
+#if 0
+int jffs_print_file(struct jffs_file *f);
+#endif  /*  0  */
+void jffs_print_hash_table(struct jffs_control *c);
+void jffs_print_tree(struct jffs_file *first_file, int indent);
+
+#endif /* __LINUX_JFFS_INTREP_H__  */
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
new file mode 100644
index 0000000..0cab8da
--- /dev/null
+++ b/fs/jffs/jffs_fm.c
@@ -0,0 +1,795 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 1999, 2000  Axis Communications AB.
+ *
+ * Created by Finn Hakansson <finn@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: jffs_fm.c,v 1.27 2001/09/20 12:29:47 dwmw2 Exp $
+ *
+ * Ported to Linux 2.3.x and MTD:
+ * Copyright (C) 2000  Alexander Larsson (alex@cendio.se), Cendio Systems AB
+ *
+ */
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/jffs.h>
+#include "jffs_fm.h"
+
+#if defined(JFFS_MARK_OBSOLETE) && JFFS_MARK_OBSOLETE
+static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
+#endif
+
+static struct jffs_fm *jffs_alloc_fm(void);
+static void jffs_free_fm(struct jffs_fm *n);
+
+extern kmem_cache_t     *fm_cache;
+extern kmem_cache_t     *node_cache;
+
+/* This function creates a new shiny flash memory control structure.  */
+struct jffs_fmcontrol *
+jffs_build_begin(struct jffs_control *c, int unit)
+{
+	struct jffs_fmcontrol *fmc;
+	struct mtd_info *mtd;
+	
+	D3(printk("jffs_build_begin()\n"));
+	fmc = (struct jffs_fmcontrol *)kmalloc(sizeof(struct jffs_fmcontrol),
+					       GFP_KERNEL);
+	if (!fmc) {
+		D(printk("jffs_build_begin(): Allocation of "
+			 "struct jffs_fmcontrol failed!\n"));
+		return (struct jffs_fmcontrol *)0;
+	}
+	DJM(no_jffs_fmcontrol++);
+
+	mtd = get_mtd_device(NULL, unit);
+
+	if (!mtd) {
+		kfree(fmc);
+		DJM(no_jffs_fmcontrol--);
+		return NULL;
+	}
+	
+	/* Retrieve the size of the flash memory.  */
+	fmc->flash_size = mtd->size;
+	D3(printk("  fmc->flash_size = %d bytes\n", fmc->flash_size));
+
+	fmc->used_size = 0;
+	fmc->dirty_size = 0;
+	fmc->free_size = mtd->size;
+	fmc->sector_size = mtd->erasesize;
+	fmc->max_chunk_size = fmc->sector_size >> 1;
+	/* min_free_size:
+	   1 sector, obviously.
+	   + 1 x max_chunk_size, for when a nodes overlaps the end of a sector
+	   + 1 x max_chunk_size again, which ought to be enough to handle 
+		   the case where a rename causes a name to grow, and GC has
+		   to write out larger nodes than the ones it's obsoleting.
+		   We should fix it so it doesn't have to write the name
+		   _every_ time. Later.
+	   + another 2 sectors because people keep getting GC stuck and
+	           we don't know why. This scares me - I want formal proof
+		   of correctness of whatever number we put here. dwmw2.
+	*/
+	fmc->min_free_size = fmc->sector_size << 2;
+	fmc->mtd = mtd;
+	fmc->c = c;
+	fmc->head = NULL;
+	fmc->tail = NULL;
+	fmc->head_extra = NULL;
+	fmc->tail_extra = NULL;
+	init_MUTEX(&fmc->biglock);
+	return fmc;
+}
+
+
+/* When the flash memory scan has completed, this function should be called
+   before use of the control structure.  */
+void
+jffs_build_end(struct jffs_fmcontrol *fmc)
+{
+	D3(printk("jffs_build_end()\n"));
+
+	if (!fmc->head) {
+		fmc->head = fmc->head_extra;
+		fmc->tail = fmc->tail_extra;
+	}
+	else if (fmc->head_extra) {
+		fmc->tail_extra->next = fmc->head;
+		fmc->head->prev = fmc->tail_extra;
+		fmc->head = fmc->head_extra;
+	}
+	fmc->head_extra = NULL; /* These two instructions should be omitted.  */
+	fmc->tail_extra = NULL;
+	D3(jffs_print_fmcontrol(fmc));
+}
+
+
+/* Call this function when the file system is unmounted.  This function
+   frees all memory used by this module.  */
+void
+jffs_cleanup_fmcontrol(struct jffs_fmcontrol *fmc)
+{
+	if (fmc) {
+		struct jffs_fm *next = fmc->head;
+		while (next) {
+			struct jffs_fm *cur = next;
+			next = next->next;
+			jffs_free_fm(cur);
+		}
+		put_mtd_device(fmc->mtd);
+		kfree(fmc);
+		DJM(no_jffs_fmcontrol--);
+	}
+}
+
+
+/* This function returns the size of the first chunk of free space on the
+   flash memory.  This function will return something nonzero if the flash
+   memory contains any free space.  */
+__u32
+jffs_free_size1(struct jffs_fmcontrol *fmc)
+{
+	__u32 head;
+	__u32 tail;
+	__u32 end = fmc->flash_size;
+
+	if (!fmc->head) {
+		/* There is nothing on the flash.  */
+		return fmc->flash_size;
+	}
+
+	/* Compute the beginning and ending of the contents of the flash.  */
+	head = fmc->head->offset;
+	tail = fmc->tail->offset + fmc->tail->size;
+	if (tail == end) {
+		tail = 0;
+	}
+	ASSERT(else if (tail > end) {
+		printk(KERN_WARNING "jffs_free_size1(): tail > end\n");
+		tail = 0;
+	});
+
+	if (head <= tail) {
+		return end - tail;
+	}
+	else {
+		return head - tail;
+	}
+}
+
+/* This function will return something nonzero in case there are two free
+   areas on the flash.  Like this:
+
+     +----------------+------------------+----------------+
+     |     FREE 1     |   USED / DIRTY   |     FREE 2     |
+     +----------------+------------------+----------------+
+       fmc->head -----^
+       fmc->tail ------------------------^
+
+   The value returned, will be the size of the first empty area on the
+   flash, in this case marked "FREE 1".  */
+__u32
+jffs_free_size2(struct jffs_fmcontrol *fmc)
+{
+	if (fmc->head) {
+		__u32 head = fmc->head->offset;
+		__u32 tail = fmc->tail->offset + fmc->tail->size;
+		if (tail == fmc->flash_size) {
+			tail = 0;
+		}
+
+		if (tail >= head) {
+			return head;
+		}
+	}
+	return 0;
+}
+
+
+/* Allocate a chunk of flash memory.  If there is enough space on the
+   device, a reference to the associated node is stored in the jffs_fm
+   struct.  */
+int
+jffs_fmalloc(struct jffs_fmcontrol *fmc, __u32 size, struct jffs_node *node,
+	     struct jffs_fm **result)
+{
+	struct jffs_fm *fm;
+	__u32 free_chunk_size1;
+	__u32 free_chunk_size2;
+
+	D2(printk("jffs_fmalloc(): fmc = 0x%p, size = %d, "
+		  "node = 0x%p\n", fmc, size, node));
+
+	*result = NULL;
+
+	if (!(fm = jffs_alloc_fm())) {
+		D(printk("jffs_fmalloc(): kmalloc() failed! (fm)\n"));
+		return -ENOMEM;
+	}
+
+	free_chunk_size1 = jffs_free_size1(fmc);
+	free_chunk_size2 = jffs_free_size2(fmc);
+	if (free_chunk_size1 + free_chunk_size2 != fmc->free_size) {
+		printk(KERN_WARNING "Free size accounting screwed\n");
+		printk(KERN_WARNING "free_chunk_size1 == 0x%x, free_chunk_size2 == 0x%x, fmc->free_size == 0x%x\n", free_chunk_size1, free_chunk_size2, fmc->free_size);
+	}
+
+	D3(printk("jffs_fmalloc(): free_chunk_size1 = %u, "
+		  "free_chunk_size2 = %u\n",
+		  free_chunk_size1, free_chunk_size2));
+
+	if (size <= free_chunk_size1) {
+		if (!(fm->nodes = (struct jffs_node_ref *)
+				  kmalloc(sizeof(struct jffs_node_ref),
+					  GFP_KERNEL))) {
+			D(printk("jffs_fmalloc(): kmalloc() failed! "
+				 "(node_ref)\n"));
+			jffs_free_fm(fm);
+			return -ENOMEM;
+		}
+		DJM(no_jffs_node_ref++);
+		fm->nodes->node = node;
+		fm->nodes->next = NULL;
+		if (fmc->tail) {
+			fm->offset = fmc->tail->offset + fmc->tail->size;
+			if (fm->offset == fmc->flash_size) {
+				fm->offset = 0;
+			}
+			ASSERT(else if (fm->offset > fmc->flash_size) {
+				printk(KERN_WARNING "jffs_fmalloc(): "
+				       "offset > flash_end\n");
+				fm->offset = 0;
+			});
+		}
+		else {
+			/* There don't have to be files in the file
+			   system yet.  */
+			fm->offset = 0;
+		}
+		fm->size = size;
+		fmc->free_size -= size;
+		fmc->used_size += size;
+	}
+	else if (size > free_chunk_size2) {
+		printk(KERN_WARNING "JFFS: Tried to allocate a too "
+		       "large flash memory chunk. (size = %u)\n", size);
+		jffs_free_fm(fm);
+		return -ENOSPC;
+	}
+	else {
+		fm->offset = fmc->tail->offset + fmc->tail->size;
+		fm->size = free_chunk_size1;
+		fm->nodes = NULL;
+		fmc->free_size -= fm->size;
+		fmc->dirty_size += fm->size; /* Changed by simonk. This seemingly fixes a 
+						bug that caused infinite garbage collection.
+						It previously set fmc->dirty_size to size (which is the
+						size of the requested chunk).
+					     */
+	}
+
+	fm->next = NULL;
+	if (!fmc->head) {
+		fm->prev = NULL;
+		fmc->head = fm;
+		fmc->tail = fm;
+	}
+	else {
+		fm->prev = fmc->tail;
+		fmc->tail->next = fm;
+		fmc->tail = fm;
+	}
+
+	D3(jffs_print_fmcontrol(fmc));
+	D3(jffs_print_fm(fm));
+	*result = fm;
+	return 0;
+}
+
+
+/* The on-flash space is not needed anymore by the passed node.  Remove
+   the reference to the node from the node list.  If the data chunk in
+   the flash memory isn't used by any more nodes anymore (fm->nodes == 0),
+   then mark that chunk as dirty.  */
+int
+jffs_fmfree(struct jffs_fmcontrol *fmc, struct jffs_fm *fm, struct jffs_node *node)
+{
+	struct jffs_node_ref *ref;
+	struct jffs_node_ref *prev;
+	ASSERT(int del = 0);
+
+	D2(printk("jffs_fmfree(): node->ino = %u, node->version = %u\n",
+		 node->ino, node->version));
+
+	ASSERT(if (!fmc || !fm || !fm->nodes) {
+		printk(KERN_ERR "jffs_fmfree(): fmc: 0x%p, fm: 0x%p, "
+		       "fm->nodes: 0x%p\n",
+		       fmc, fm, (fm ? fm->nodes : NULL));
+		return -1;
+	});
+
+	/* Find the reference to the node that is going to be removed
+	   and remove it.  */
+	for (ref = fm->nodes, prev = NULL; ref; ref = ref->next) {
+		if (ref->node == node) {
+			if (prev) {
+				prev->next = ref->next;
+			}
+			else {
+				fm->nodes = ref->next;
+			}
+			kfree(ref);
+			DJM(no_jffs_node_ref--);
+			ASSERT(del = 1);
+			break;
+		}
+		prev = ref;
+	}
+
+	/* If the data chunk in the flash memory isn't used anymore
+	   just mark it as obsolete.  */
+	if (!fm->nodes) {
+		/* No node uses this chunk so let's remove it.  */
+		fmc->used_size -= fm->size;
+		fmc->dirty_size += fm->size;
+#if defined(JFFS_MARK_OBSOLETE) && JFFS_MARK_OBSOLETE
+		if (jffs_mark_obsolete(fmc, fm->offset) < 0) {
+			D1(printk("jffs_fmfree(): Failed to mark an on-flash "
+				  "node obsolete!\n"));
+			return -1;
+		}
+#endif
+	}
+
+	ASSERT(if (!del) {
+		printk(KERN_WARNING "***jffs_fmfree(): "
+		       "Didn't delete any node reference!\n");
+	});
+
+	return 0;
+}
+
+
+/* This allocation function is used during the initialization of
+   the file system.  */
+struct jffs_fm *
+jffs_fmalloced(struct jffs_fmcontrol *fmc, __u32 offset, __u32 size,
+	       struct jffs_node *node)
+{
+	struct jffs_fm *fm;
+
+	D3(printk("jffs_fmalloced()\n"));
+
+	if (!(fm = jffs_alloc_fm())) {
+		D(printk("jffs_fmalloced(0x%p, %u, %u, 0x%p): failed!\n",
+			 fmc, offset, size, node));
+		return NULL;
+	}
+	fm->offset = offset;
+	fm->size = size;
+	fm->prev = NULL;
+	fm->next = NULL;
+	fm->nodes = NULL;
+	if (node) {
+		/* `node' exists and it should be associated with the
+		    jffs_fm structure `fm'.  */
+		if (!(fm->nodes = (struct jffs_node_ref *)
+				  kmalloc(sizeof(struct jffs_node_ref),
+					  GFP_KERNEL))) {
+			D(printk("jffs_fmalloced(): !fm->nodes\n"));
+			jffs_free_fm(fm);
+			return NULL;
+		}
+		DJM(no_jffs_node_ref++);
+		fm->nodes->node = node;
+		fm->nodes->next = NULL;
+		fmc->used_size += size;
+		fmc->free_size -= size;
+	}
+	else {
+		/* If there is no node, then this is just a chunk of dirt.  */
+		fmc->dirty_size += size;
+		fmc->free_size -= size;
+	}
+
+	if (fmc->head_extra) {
+		fm->prev = fmc->tail_extra;
+		fmc->tail_extra->next = fm;
+		fmc->tail_extra = fm;
+	}
+	else if (!fmc->head) {
+		fmc->head = fm;
+		fmc->tail = fm;
+	}
+	else if (fmc->tail->offset + fmc->tail->size < offset) {
+		fmc->head_extra = fm;
+		fmc->tail_extra = fm;
+	}
+	else {
+		fm->prev = fmc->tail;
+		fmc->tail->next = fm;
+		fmc->tail = fm;
+	}
+	D3(jffs_print_fmcontrol(fmc));
+	D3(jffs_print_fm(fm));
+	return fm;
+}
+
+
+/* Add a new node to an already existing jffs_fm struct.  */
+int
+jffs_add_node(struct jffs_node *node)
+{
+	struct jffs_node_ref *ref;
+
+	D3(printk("jffs_add_node(): ino = %u\n", node->ino));
+
+	ref = (struct jffs_node_ref *)kmalloc(sizeof(struct jffs_node_ref),
+					      GFP_KERNEL);
+	if (!ref)
+		return -ENOMEM;
+
+	DJM(no_jffs_node_ref++);
+	ref->node = node;
+	ref->next = node->fm->nodes;
+	node->fm->nodes = ref;
+	return 0;
+}
+
+
+/* Free a part of some allocated space.  */
+void
+jffs_fmfree_partly(struct jffs_fmcontrol *fmc, struct jffs_fm *fm, __u32 size)
+{
+	D1(printk("***jffs_fmfree_partly(): fm = 0x%p, fm->nodes = 0x%p, "
+		  "fm->nodes->node->ino = %u, size = %u\n",
+		  fm, (fm ? fm->nodes : 0),
+		  (!fm ? 0 : (!fm->nodes ? 0 : fm->nodes->node->ino)), size));
+
+	if (fm->nodes) {
+		kfree(fm->nodes);
+		DJM(no_jffs_node_ref--);
+		fm->nodes = NULL;
+	}
+	fmc->used_size -= fm->size;
+	if (fm == fmc->tail) {
+		fm->size -= size;
+		fmc->free_size += size;
+	}
+	fmc->dirty_size += fm->size;
+}
+
+
+/* Find the jffs_fm struct that contains the end of the data chunk that
+   begins at the logical beginning of the flash memory and spans `size'
+   bytes.  If we want to erase a sector of the flash memory, we use this
+   function to find where the sector limit cuts a chunk of data.  */
+struct jffs_fm *
+jffs_cut_node(struct jffs_fmcontrol *fmc, __u32 size)
+{
+	struct jffs_fm *fm;
+	__u32 pos = 0;
+
+	if (size == 0) {
+		return NULL;
+	}
+
+	ASSERT(if (!fmc) {
+		printk(KERN_ERR "jffs_cut_node(): fmc == NULL\n");
+		return NULL;
+	});
+
+	fm = fmc->head;
+
+	while (fm) {
+		pos += fm->size;
+		if (pos < size) {
+			fm = fm->next;
+		}
+		else if (pos > size) {
+			break;
+		}
+		else {
+			fm = NULL;
+			break;
+		}
+	}
+
+	return fm;
+}
+
+
+/* Move the head of the fmc structures and delete the obsolete parts.  */
+void
+jffs_sync_erase(struct jffs_fmcontrol *fmc, int erased_size)
+{
+	struct jffs_fm *fm;
+	struct jffs_fm *del;
+
+	ASSERT(if (!fmc) {
+		printk(KERN_ERR "jffs_sync_erase(): fmc == NULL\n");
+		return;
+	});
+
+	fmc->dirty_size -= erased_size;
+	fmc->free_size += erased_size;
+
+	for (fm = fmc->head; fm && (erased_size > 0);) {
+		if (erased_size >= fm->size) {
+			erased_size -= fm->size;
+			del = fm;
+			fm = fm->next;
+			fm->prev = NULL;
+			fmc->head = fm;
+			jffs_free_fm(del);
+		}
+		else {
+			fm->size -= erased_size;
+			fm->offset += erased_size;
+			break;
+		}
+	}
+}
+
+
+/* Return the oldest used node in the flash memory.  */
+struct jffs_node *
+jffs_get_oldest_node(struct jffs_fmcontrol *fmc)
+{
+	struct jffs_fm *fm;
+	struct jffs_node_ref *nref;
+	struct jffs_node *node = NULL;
+
+	ASSERT(if (!fmc) {
+		printk(KERN_ERR "jffs_get_oldest_node(): fmc == NULL\n");
+		return NULL;
+	});
+
+	for (fm = fmc->head; fm && !fm->nodes; fm = fm->next);
+
+	if (!fm) {
+		return NULL;
+	}
+
+	/* The oldest node is the last one in the reference list.  This list
+	   shouldn't be too long; just one or perhaps two elements.  */
+	for (nref = fm->nodes; nref; nref = nref->next) {
+		node = nref->node;
+	}
+
+	D2(printk("jffs_get_oldest_node(): ino = %u, version = %u\n",
+		  (node ? node->ino : 0), (node ? node->version : 0)));
+
+	return node;
+}
+
+
+#if defined(JFFS_MARK_OBSOLETE) && JFFS_MARK_OBSOLETE
+
+/* Mark an on-flash node as obsolete.
+
+   Note that this is just an optimization that isn't necessary for the
+   filesystem to work.  */
+
+static int
+jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset)
+{
+	/* The `accurate_pos' holds the position of the accurate byte
+	   in the jffs_raw_inode structure that we are going to mark
+	   as obsolete.  */
+	__u32 accurate_pos = fm_offset + JFFS_RAW_INODE_ACCURATE_OFFSET;
+	unsigned char zero = 0x00;
+	size_t len;
+
+	D3(printk("jffs_mark_obsolete(): accurate_pos = %u\n", accurate_pos));
+	ASSERT(if (!fmc) {
+		printk(KERN_ERR "jffs_mark_obsolete(): fmc == NULL\n");
+		return -1;
+	});
+
+	/* Write 0x00 to the raw inode's accurate member.  Don't care
+	   about the return value.  */
+	MTD_WRITE(fmc->mtd, accurate_pos, 1, &len, &zero);
+	return 0;
+}
+
+#endif /* JFFS_MARK_OBSOLETE  */
+
+/* check if it's possible to erase the wanted range, and if not, return
+ * the range that IS erasable, or a negative error code.
+ */
+static long
+jffs_flash_erasable_size(struct mtd_info *mtd, __u32 offset, __u32 size)
+{
+         u_long ssize;
+
+	/* assume that sector size for a partition is constant even
+	 * if it spans more than one chip (you usually put the same
+	 * type of chips in a system)
+	 */
+
+        ssize = mtd->erasesize;
+
+	if (offset % ssize) {
+		printk(KERN_WARNING "jffs_flash_erasable_size() given non-aligned offset %x (erasesize %lx)\n", offset, ssize);
+		/* The offset is not sector size aligned.  */
+		return -1;
+	}
+	else if (offset > mtd->size) {
+		printk(KERN_WARNING "jffs_flash_erasable_size given offset off the end of device (%x > %x)\n", offset, mtd->size);
+		return -2;
+	}
+	else if (offset + size > mtd->size) {
+		printk(KERN_WARNING "jffs_flash_erasable_size() given length which runs off the end of device (ofs %x + len %x = %x, > %x)\n", offset,size, offset+size, mtd->size);
+		return -3;
+	}
+
+	return (size / ssize) * ssize;
+}
+
+
+/* How much dirty flash memory is possible to erase at the moment?  */
+long
+jffs_erasable_size(struct jffs_fmcontrol *fmc)
+{
+	struct jffs_fm *fm;
+	__u32 size = 0;
+	long ret;
+
+	ASSERT(if (!fmc) {
+		printk(KERN_ERR "jffs_erasable_size(): fmc = NULL\n");
+		return -1;
+	});
+
+	if (!fmc->head) {
+		/* The flash memory is totally empty. No nodes. No dirt.
+		   Just return.  */
+		return 0;
+	}
+
+	/* Calculate how much space that is dirty.  */
+	for (fm = fmc->head; fm && !fm->nodes; fm = fm->next) {
+		if (size && fm->offset == 0) {
+			/* We have reached the beginning of the flash.  */
+			break;
+		}
+		size += fm->size;
+	}
+
+	/* Someone's signature contained this:
+	   There's a fine line between fishing and just standing on
+	   the shore like an idiot...  */
+	ret = jffs_flash_erasable_size(fmc->mtd, fmc->head->offset, size);
+
+	ASSERT(if (ret < 0) {
+		printk("jffs_erasable_size: flash_erasable_size() "
+		       "returned something less than zero (%ld).\n", ret);
+		printk("jffs_erasable_size: offset = 0x%08x\n",
+		       fmc->head->offset);
+	});
+
+	/* If there is dirt on the flash (which is the reason to why
+	   this function was called in the first place) but no space is
+	   possible to erase right now, the initial part of the list of
+	   jffs_fm structs, that hold place for dirty space, could perhaps
+	   be shortened.  The list's initial "dirty" elements are merged
+	   into just one large dirty jffs_fm struct.  This operation must
+	   only be performed if nothing is possible to erase.  Otherwise,
+	   jffs_clear_end_of_node() won't work as expected.  */
+	if (ret == 0) {
+		struct jffs_fm *head = fmc->head;
+		struct jffs_fm *del;
+		/* While there are two dirty nodes beside each other.*/
+		while (head->nodes == 0
+		       && head->next
+		       && head->next->nodes == 0) {
+			del = head->next;
+			head->size += del->size;
+			head->next = del->next;
+			if (del->next) {
+				del->next->prev = head;
+			}
+			jffs_free_fm(del);
+		}
+	}
+
+	return (ret >= 0 ? ret : 0);
+}
+
+static struct jffs_fm *jffs_alloc_fm(void)
+{
+	struct jffs_fm *fm;
+
+	fm = kmem_cache_alloc(fm_cache,GFP_KERNEL);
+	DJM(if (fm) no_jffs_fm++;);
+	
+	return fm;
+}
+
+static void jffs_free_fm(struct jffs_fm *n)
+{
+	kmem_cache_free(fm_cache,n);
+	DJM(no_jffs_fm--);
+}
+
+
+
+struct jffs_node *jffs_alloc_node(void)
+{
+	struct jffs_node *n;
+
+	n = (struct jffs_node *)kmem_cache_alloc(node_cache,GFP_KERNEL);
+	if(n != NULL)
+		no_jffs_node++;
+	return n;
+}
+
+void jffs_free_node(struct jffs_node *n)
+{
+	kmem_cache_free(node_cache,n);
+	no_jffs_node--;
+}
+
+
+int jffs_get_node_inuse(void)
+{
+	return no_jffs_node;
+}
+
+void
+jffs_print_fmcontrol(struct jffs_fmcontrol *fmc)
+{
+	D(printk("struct jffs_fmcontrol: 0x%p\n", fmc));
+	D(printk("{\n"));
+	D(printk("        %u, /* flash_size  */\n", fmc->flash_size));
+	D(printk("        %u, /* used_size  */\n", fmc->used_size));
+	D(printk("        %u, /* dirty_size  */\n", fmc->dirty_size));
+	D(printk("        %u, /* free_size  */\n", fmc->free_size));
+	D(printk("        %u, /* sector_size  */\n", fmc->sector_size));
+	D(printk("        %u, /* min_free_size  */\n", fmc->min_free_size));
+	D(printk("        %u, /* max_chunk_size  */\n", fmc->max_chunk_size));
+	D(printk("        0x%p, /* mtd  */\n", fmc->mtd));
+	D(printk("        0x%p, /* head  */    "
+		 "(head->offset = 0x%08x)\n",
+		 fmc->head, (fmc->head ? fmc->head->offset : 0)));
+	D(printk("        0x%p, /* tail  */    "
+		 "(tail->offset + tail->size = 0x%08x)\n",
+		 fmc->tail,
+		 (fmc->tail ? fmc->tail->offset + fmc->tail->size : 0)));
+	D(printk("        0x%p, /* head_extra  */\n", fmc->head_extra));
+	D(printk("        0x%p, /* tail_extra  */\n", fmc->tail_extra));
+	D(printk("}\n"));
+}
+
+void
+jffs_print_fm(struct jffs_fm *fm)
+{
+	D(printk("struct jffs_fm: 0x%p\n", fm));
+	D(printk("{\n"));
+	D(printk("       0x%08x, /* offset  */\n", fm->offset));
+	D(printk("       %u, /* size  */\n", fm->size));
+	D(printk("       0x%p, /* prev  */\n", fm->prev));
+	D(printk("       0x%p, /* next  */\n", fm->next));
+	D(printk("       0x%p, /* nodes  */\n", fm->nodes));
+	D(printk("}\n"));
+}
+
+#if 0
+void
+jffs_print_node_ref(struct jffs_node_ref *ref)
+{
+	D(printk("struct jffs_node_ref: 0x%p\n", ref));
+	D(printk("{\n"));
+	D(printk("       0x%p, /* node  */\n", ref->node));
+	D(printk("       0x%p, /* next  */\n", ref->next));
+	D(printk("}\n"));
+}
+#endif  /*  0  */
+
diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h
new file mode 100644
index 0000000..bc291c4
--- /dev/null
+++ b/fs/jffs/jffs_fm.h
@@ -0,0 +1,148 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 1999, 2000  Axis Communications AB.
+ *
+ * Created by Finn Hakansson <finn@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: jffs_fm.h,v 1.13 2001/01/11 12:03:25 dwmw2 Exp $
+ *
+ * Ported to Linux 2.3.x and MTD:
+ * Copyright (C) 2000  Alexander Larsson (alex@cendio.se), Cendio Systems AB
+ *
+ */
+
+#ifndef __LINUX_JFFS_FM_H__
+#define __LINUX_JFFS_FM_H__
+
+#include <linux/types.h>
+#include <linux/jffs.h>
+#include <linux/mtd/mtd.h>
+#include <linux/config.h>
+
+/* The alignment between two nodes in the flash memory.  */
+#define JFFS_ALIGN_SIZE 4
+
+/* Mark the on-flash space as obsolete when appropriate.  */
+#define JFFS_MARK_OBSOLETE 0
+
+#ifndef CONFIG_JFFS_FS_VERBOSE
+#define CONFIG_JFFS_FS_VERBOSE 1
+#endif
+
+#if CONFIG_JFFS_FS_VERBOSE > 0
+#define D(x) x
+#define D1(x) D(x)
+#else
+#define D(x)
+#define D1(x)
+#endif
+
+#if CONFIG_JFFS_FS_VERBOSE > 1
+#define D2(x) D(x)
+#else
+#define D2(x)
+#endif
+
+#if CONFIG_JFFS_FS_VERBOSE > 2
+#define D3(x) D(x)
+#else
+#define D3(x)
+#endif
+
+#define ASSERT(x) x
+
+/* How many padding bytes should be inserted between two chunks of data
+   on the flash?  */
+#define JFFS_GET_PAD_BYTES(size) ( (JFFS_ALIGN_SIZE-1) & -(__u32)(size) )
+#define JFFS_PAD(size) ( (size + (JFFS_ALIGN_SIZE-1)) & ~(JFFS_ALIGN_SIZE-1) )
+
+
+
+struct jffs_node_ref
+{
+	struct jffs_node *node;
+	struct jffs_node_ref *next;
+};
+
+
+/* The struct jffs_fm represents a chunk of data in the flash memory.  */
+struct jffs_fm
+{
+	__u32 offset;
+	__u32 size;
+	struct jffs_fm *prev;
+	struct jffs_fm *next;
+	struct jffs_node_ref *nodes; /* USED if != 0.  */
+};
+
+struct jffs_fmcontrol
+{
+	__u32 flash_size;
+	__u32 used_size;
+	__u32 dirty_size;
+	__u32 free_size;
+	__u32 sector_size;
+	__u32 min_free_size;  /* The minimum free space needed to be able
+				 to perform garbage collections.  */
+	__u32 max_chunk_size; /* The maximum size of a chunk of data.  */
+	struct mtd_info *mtd;
+	struct jffs_control *c;
+	struct jffs_fm *head;
+	struct jffs_fm *tail;
+	struct jffs_fm *head_extra;
+	struct jffs_fm *tail_extra;
+	struct semaphore biglock;
+};
+
+/* Notice the two members head_extra and tail_extra in the jffs_control
+   structure above. Those are only used during the scanning of the flash
+   memory; while the file system is being built. If the data in the flash
+   memory is organized like
+
+      +----------------+------------------+----------------+
+      |  USED / DIRTY  |       FREE       |  USED / DIRTY  |
+      +----------------+------------------+----------------+
+
+   then the scan is split in two parts. The first scanned part of the
+   flash memory is organized through the members head and tail. The
+   second scanned part is organized with head_extra and tail_extra. When
+   the scan is completed, the two lists are merged together. The jffs_fm
+   struct that head_extra references is the logical beginning of the
+   flash memory so it will be referenced by the head member.  */
+
+
+
+struct jffs_fmcontrol *jffs_build_begin(struct jffs_control *c, int unit);
+void jffs_build_end(struct jffs_fmcontrol *fmc);
+void jffs_cleanup_fmcontrol(struct jffs_fmcontrol *fmc);
+
+int jffs_fmalloc(struct jffs_fmcontrol *fmc, __u32 size,
+		 struct jffs_node *node, struct jffs_fm **result);
+int jffs_fmfree(struct jffs_fmcontrol *fmc, struct jffs_fm *fm,
+		struct jffs_node *node);
+
+__u32 jffs_free_size1(struct jffs_fmcontrol *fmc);
+__u32 jffs_free_size2(struct jffs_fmcontrol *fmc);
+void jffs_sync_erase(struct jffs_fmcontrol *fmc, int erased_size);
+struct jffs_fm *jffs_cut_node(struct jffs_fmcontrol *fmc, __u32 size);
+struct jffs_node *jffs_get_oldest_node(struct jffs_fmcontrol *fmc);
+long jffs_erasable_size(struct jffs_fmcontrol *fmc);
+struct jffs_fm *jffs_fmalloced(struct jffs_fmcontrol *fmc, __u32 offset,
+			       __u32 size, struct jffs_node *node);
+int jffs_add_node(struct jffs_node *node);
+void jffs_fmfree_partly(struct jffs_fmcontrol *fmc, struct jffs_fm *fm,
+			__u32 size);
+
+void jffs_print_fmcontrol(struct jffs_fmcontrol *fmc);
+void jffs_print_fm(struct jffs_fm *fm);
+#if 0
+void jffs_print_node_ref(struct jffs_node_ref *ref);
+#endif  /*  0  */
+
+#endif /* __LINUX_JFFS_FM_H__  */
diff --git a/fs/jffs/jffs_proc.c b/fs/jffs/jffs_proc.c
new file mode 100644
index 0000000..9bdd99a
--- /dev/null
+++ b/fs/jffs/jffs_proc.c
@@ -0,0 +1,261 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 2000  Axis Communications AB.
+ *
+ * Created by Simon Kagstrom <simonk@axis.com>.
+ *
+ * $Id: jffs_proc.c,v 1.5 2001/06/02 14:34:55 dwmw2 Exp $
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ *  Overview:
+ *   This file defines JFFS partition entries in the proc file system.
+ *
+ *  TODO:
+ *   Create some more proc files for different kinds of info, i.e. statistics
+ *   about written and read bytes, number of calls to different routines,
+ *   reports about failures.
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include "jffs_fm.h"
+#include "jffs_proc.h"
+
+/*
+ * Structure for a JFFS partition in the system
+ */
+struct jffs_partition_dir {
+	struct jffs_control *c;
+	struct proc_dir_entry *part_root;
+	struct proc_dir_entry *part_info;
+	struct proc_dir_entry *part_layout;
+	struct jffs_partition_dir *next;
+};
+
+/*
+ * Structure for top-level entry in '/proc/fs' directory
+ */
+struct proc_dir_entry *jffs_proc_root;
+
+/*
+ * Linked list of 'jffs_partition_dirs' to help us track
+ * the mounted JFFS partitions in the system
+ */
+static struct jffs_partition_dir *jffs_part_dirs;
+
+/*
+ * Read functions for entries
+ */
+static int jffs_proc_info_read(char *page, char **start, off_t off,
+		int count, int *eof, void *data);
+static int jffs_proc_layout_read (char *page, char **start, off_t off,
+		int count, int *eof, void *data);
+
+
+/*
+ * Register a JFFS partition directory (called upon mount)
+ */
+int jffs_register_jffs_proc_dir(int mtd, struct jffs_control *c)
+{
+	struct jffs_partition_dir *part_dir;
+	struct proc_dir_entry *part_info = NULL;
+	struct proc_dir_entry *part_layout = NULL;
+	struct proc_dir_entry *part_root = NULL;
+	char name[10];
+
+	sprintf(name, "%d", mtd);
+	/* Allocate structure for local JFFS partition table */
+	part_dir = (struct jffs_partition_dir *)
+		kmalloc(sizeof (struct jffs_partition_dir), GFP_KERNEL);
+	if (!part_dir)
+		goto out;
+
+	/* Create entry for this partition */
+	part_root = proc_mkdir(name, jffs_proc_root);
+	if (!part_root)
+		goto out1;
+
+	/* Create entry for 'info' file */
+	part_info = create_proc_entry ("info", 0, part_root);
+	if (!part_info)
+		goto out2;
+	part_info->read_proc = jffs_proc_info_read;
+	part_info->data = (void *) c;
+
+	/* Create entry for 'layout' file */
+	part_layout = create_proc_entry ("layout", 0, part_root);
+	if (!part_layout)
+		goto out3;
+	part_layout->read_proc = jffs_proc_layout_read;
+	part_layout->data = (void *) c;
+
+	/* Fill in structure for table and insert in the list */
+	part_dir->c = c;
+	part_dir->part_root = part_root;
+	part_dir->part_info = part_info;
+	part_dir->part_layout = part_layout;
+	part_dir->next = jffs_part_dirs;
+	jffs_part_dirs = part_dir;
+
+	/* Return happy */
+	return 0;
+
+out3:
+	remove_proc_entry("info", part_root);
+out2:
+	remove_proc_entry(name, jffs_proc_root);
+out1:
+	kfree(part_dir);
+out:
+	return -ENOMEM;
+}
+
+
+/*
+ * Unregister a JFFS partition directory (called at umount)
+ */
+int jffs_unregister_jffs_proc_dir(struct jffs_control *c)
+{
+	struct jffs_partition_dir *part_dir = jffs_part_dirs;
+	struct jffs_partition_dir *prev_part_dir = NULL;
+
+	while (part_dir) {
+		if (part_dir->c == c) {
+			/* Remove entries for partition */
+			remove_proc_entry (part_dir->part_info->name,
+				part_dir->part_root);
+			remove_proc_entry (part_dir->part_layout->name,
+				part_dir->part_root);
+			remove_proc_entry (part_dir->part_root->name,
+				jffs_proc_root);
+
+			/* Remove entry from list */
+			if (prev_part_dir)
+				prev_part_dir->next = part_dir->next;
+			else
+				jffs_part_dirs = part_dir->next;
+
+			/*
+			 * Check to see if this is the last one
+			 * and remove the entry from '/proc/fs'
+			 * if it is.
+			 */
+			if (jffs_part_dirs == part_dir->next)
+				remove_proc_entry ("jffs", proc_root_fs);
+
+			/* Free memory for entry */
+			kfree(part_dir);
+
+			/* Return happy */
+			return 0;
+		}
+
+		/* Move to next entry */
+		prev_part_dir = part_dir;
+		part_dir = part_dir->next;
+	}
+
+	/* Return unhappy */
+	return -1;
+}
+
+
+/*
+ * Read a JFFS partition's `info' file
+ */
+static int jffs_proc_info_read (char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	struct jffs_control *c = (struct jffs_control *) data;
+	int len = 0;
+
+	/* Get information on the parition */
+	len += sprintf (page,
+		"partition size:     %08lX (%u)\n"
+		"sector size:        %08lX (%u)\n"
+		"used size:          %08lX (%u)\n"
+		"dirty size:         %08lX (%u)\n"
+		"free size:          %08lX (%u)\n\n",
+		(unsigned long) c->fmc->flash_size, c->fmc->flash_size,
+		(unsigned long) c->fmc->sector_size, c->fmc->sector_size,
+		(unsigned long) c->fmc->used_size, c->fmc->used_size,
+		(unsigned long) c->fmc->dirty_size, c->fmc->dirty_size,
+		(unsigned long) (c->fmc->flash_size -
+			(c->fmc->used_size + c->fmc->dirty_size)),
+		c->fmc->flash_size - (c->fmc->used_size + c->fmc->dirty_size));
+
+	/* We're done */
+	*eof = 1;
+
+	/* Return length */
+	return len;
+}
+
+
+/*
+ * Read a JFFS partition's `layout' file
+ */
+static int jffs_proc_layout_read (char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	struct jffs_control *c = (struct jffs_control *) data;
+	struct jffs_fm *fm = NULL;
+	struct jffs_fm *last_fm = NULL;
+	int len = 0;
+
+	/* Get the first item in the list */
+ 	fm = c->fmc->head;
+
+	/* Print free space */
+	if (fm && fm->offset) {
+		len += sprintf (page, "00000000 %08lX free\n",
+			(unsigned long) fm->offset);
+	}
+
+	/* Loop through all of the flash control structures */
+	while (fm && (len < (off + count))) {
+		if (fm->nodes) {
+			len += sprintf (page + len,
+				"%08lX %08lX ino=%08lX, ver=%08lX\n",
+				(unsigned long) fm->offset,
+				(unsigned long) fm->size,
+				(unsigned long) fm->nodes->node->ino,
+				(unsigned long) fm->nodes->node->version);
+		}
+		else {
+			len += sprintf (page + len,
+				"%08lX %08lX dirty\n",
+				(unsigned long) fm->offset,
+				(unsigned long) fm->size);
+		}
+		last_fm = fm;
+		fm = fm->next;
+	}
+
+	/* Print free space */
+	if ((len < (off + count)) && last_fm
+	    && (last_fm->offset < c->fmc->flash_size)) {
+		len += sprintf (page + len,
+			       "%08lX %08lX free\n",
+			       (unsigned long) last_fm->offset + 
+				last_fm->size,
+			       (unsigned long) (c->fmc->flash_size -
+						    (last_fm->offset + last_fm->size)));
+	}
+
+	/* We're done */
+	*eof = 1;
+
+	/* Return length */
+	return len;
+}
diff --git a/fs/jffs/jffs_proc.h b/fs/jffs/jffs_proc.h
new file mode 100644
index 0000000..39a1c5d
--- /dev/null
+++ b/fs/jffs/jffs_proc.h
@@ -0,0 +1,28 @@
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 2000  Axis Communications AB.
+ *
+ * Created by Simon Kagstrom <simonk@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: jffs_proc.h,v 1.2 2000/11/15 22:04:12 sjhill Exp $
+ */
+
+/* jffs_proc.h defines a structure for inclusion in the proc-file system.  */
+#ifndef __LINUX_JFFS_PROC_H__
+#define __LINUX_JFFS_PROC_H__
+
+#include <linux/proc_fs.h>
+
+/* The proc_dir_entry for jffs (defined in jffs_proc.c).  */
+extern struct proc_dir_entry *jffs_proc_root;
+
+int jffs_register_jffs_proc_dir(int mtd, struct jffs_control *c);
+int jffs_unregister_jffs_proc_dir(struct jffs_control *c);
+
+#endif /* __LINUX_JFFS_PROC_H__ */
diff --git a/fs/jffs2/LICENCE b/fs/jffs2/LICENCE
new file mode 100644
index 0000000..cd81d83
--- /dev/null
+++ b/fs/jffs2/LICENCE
@@ -0,0 +1,35 @@
+The files in this directory and elsewhere which refer to this LICENCE
+file are part of JFFS2, the Journalling Flash File System v2.
+
+	Copyright (C) 2001, 2002 Red Hat, Inc.
+
+JFFS2 is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 or (at your option) any later 
+version.
+
+JFFS2 is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License along
+with JFFS2; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+As a special exception, if other files instantiate templates or use
+macros or inline functions from these files, or you compile these
+files and link them with other works to produce a work based on these
+files, these files do not by themselves cause the resulting work to be
+covered by the GNU General Public License. However the source code for
+these files must still be made available in accordance with section (3)
+of the GNU General Public License.
+
+This exception does not invalidate any other reasons why a work based on
+this file might be covered by the GNU General Public License.
+
+For information on obtaining alternative licences for JFFS2, see 
+http://sources.redhat.com/jffs2/jffs2-licence.html
+
+
+	$Id: LICENCE,v 1.1 2002/05/20 14:56:37 dwmw2 Exp $
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
new file mode 100644
index 0000000..e3c38cc
--- /dev/null
+++ b/fs/jffs2/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
+#
+# $Id: Makefile.common,v 1.7 2004/11/03 12:57:38 jwboyer Exp $
+#
+
+obj-$(CONFIG_JFFS2_FS) += jffs2.o
+
+jffs2-y	:= compr.o dir.o file.o ioctl.o nodelist.o malloc.o
+jffs2-y	+= read.o nodemgmt.o readinode.o write.o scan.o gc.o
+jffs2-y	+= symlink.o build.o erase.o background.o fs.o writev.o
+jffs2-y	+= super.o
+
+jffs2-$(CONFIG_JFFS2_FS_NAND)	+= wbuf.o
+jffs2-$(CONFIG_JFFS2_FS_NOR_ECC) += wbuf.o
+jffs2-$(CONFIG_JFFS2_RUBIN)	+= compr_rubin.o
+jffs2-$(CONFIG_JFFS2_RTIME)	+= compr_rtime.o
+jffs2-$(CONFIG_JFFS2_ZLIB)	+= compr_zlib.o
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
new file mode 100644
index 0000000..49771cf
--- /dev/null
+++ b/fs/jffs2/README.Locking
@@ -0,0 +1,148 @@
+	$Id: README.Locking,v 1.9 2004/11/20 10:35:40 dwmw2 Exp $
+
+	JFFS2 LOCKING DOCUMENTATION
+	---------------------------
+
+At least theoretically, JFFS2 does not require the Big Kernel Lock
+(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
+code. It has its own locking, as described below.
+
+This document attempts to describe the existing locking rules for
+JFFS2. It is not expected to remain perfectly up to date, but ought to
+be fairly close.
+
+
+	alloc_sem
+	---------
+
+The alloc_sem is a per-filesystem semaphore, used primarily to ensure
+contiguous allocation of space on the medium. It is automatically
+obtained during space allocations (jffs2_reserve_space()) and freed
+upon write completion (jffs2_complete_reservation()). Note that
+the garbage collector will obtain this right at the beginning of
+jffs2_garbage_collect_pass() and release it at the end, thereby
+preventing any other write activity on the file system during a
+garbage collect pass.
+
+When writing new nodes, the alloc_sem must be held until the new nodes
+have been properly linked into the data structures for the inode to
+which they belong. This is for the benefit of NAND flash - adding new
+nodes to an inode may obsolete old ones, and by holding the alloc_sem
+until this happens we ensure that any data in the write-buffer at the
+time this happens are part of the new node, not just something that
+was written afterwards. Hence, we can ensure the newly-obsoleted nodes
+don't actually get erased until the write-buffer has been flushed to
+the medium.
+
+With the introduction of NAND flash support and the write-buffer, 
+the alloc_sem is also used to protect the wbuf-related members of the
+jffs2_sb_info structure. Atomically reading the wbuf_len member to see
+if the wbuf is currently holding any data is permitted, though.
+
+Ordering constraints: See f->sem.
+
+
+	File Semaphore f->sem
+	---------------------
+
+This is the JFFS2-internal equivalent of the inode semaphore i->i_sem.
+It protects the contents of the jffs2_inode_info private inode data,
+including the linked list of node fragments (but see the notes below on
+erase_completion_lock), etc.
+
+The reason that the i_sem itself isn't used for this purpose is to
+avoid deadlocks with garbage collection -- the VFS will lock the i_sem
+before calling a function which may need to allocate space. The
+allocation may trigger garbage-collection, which may need to move a
+node belonging to the inode which was locked in the first place by the
+VFS. If the garbage collection code were to attempt to lock the i_sem
+of the inode from which it's garbage-collecting a physical node, this
+lead to deadlock, unless we played games with unlocking the i_sem
+before calling the space allocation functions.
+
+Instead of playing such games, we just have an extra internal
+semaphore, which is obtained by the garbage collection code and also
+by the normal file system code _after_ allocation of space.
+
+Ordering constraints: 
+
+	1. Never attempt to allocate space or lock alloc_sem with 
+	   any f->sem held.
+	2. Never attempt to lock two file semaphores in one thread.
+	   No ordering rules have been made for doing so.
+
+
+	erase_completion_lock spinlock
+	------------------------------
+
+This is used to serialise access to the eraseblock lists, to the
+per-eraseblock lists of physical jffs2_raw_node_ref structures, and
+(NB) the per-inode list of physical nodes. The latter is a special
+case - see below.
+
+As the MTD API no longer permits erase-completion callback functions
+to be called from bottom-half (timer) context (on the basis that nobody
+ever actually implemented such a thing), it's now sufficient to use
+a simple spin_lock() rather than spin_lock_bh().
+
+Note that the per-inode list of physical nodes (f->nodes) is a special
+case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in
+the list are protected by the file semaphore f->sem. But the erase
+code may remove _obsolete_ nodes from the list while holding only the
+erase_completion_lock. So you can walk the list only while holding the
+erase_completion_lock, and can drop the lock temporarily mid-walk as
+long as the pointer you're holding is to a _valid_ node, not an
+obsolete one.
+
+The erase_completion_lock is also used to protect the c->gc_task
+pointer when the garbage collection thread exits. The code to kill the
+GC thread locks it, sends the signal, then unlocks it - while the GC
+thread itself locks it, zeroes c->gc_task, then unlocks on the exit path.
+
+
+	inocache_lock spinlock
+	----------------------
+
+This spinlock protects the hashed list (c->inocache_list) of the
+in-core jffs2_inode_cache objects (each inode in JFFS2 has the
+correspondent jffs2_inode_cache object). So, the inocache_lock
+has to be locked while walking the c->inocache_list hash buckets.
+
+Note, the f->sem guarantees that the correspondent jffs2_inode_cache
+will not be removed. So, it is allowed to access it without locking
+the inocache_lock spinlock. 
+
+Ordering constraints: 
+
+	If both erase_completion_lock and inocache_lock are needed, the
+	c->erase_completion has to be acquired first.
+
+
+	erase_free_sem
+	--------------
+
+This semaphore is only used by the erase code which frees obsolete
+node references and the jffs2_garbage_collect_deletion_dirent()
+function. The latter function on NAND flash must read _obsolete_ nodes
+to determine whether the 'deletion dirent' under consideration can be
+discarded or whether it is still required to show that an inode has
+been unlinked. Because reading from the flash may sleep, the
+erase_completion_lock cannot be held, so an alternative, more
+heavyweight lock was required to prevent the erase code from freeing
+the jffs2_raw_node_ref structures in question while the garbage
+collection code is looking at them.
+
+Suggestions for alternative solutions to this problem would be welcomed.
+
+
+	wbuf_sem
+	--------
+
+This read/write semaphore protects against concurrent access to the
+write-behind buffer ('wbuf') used for flash chips where we must write
+in blocks. It protects both the contents of the wbuf and the metadata
+which indicates which flash region (if any) is currently covered by 
+the buffer.
+
+Ordering constraints:
+	Lock wbuf_sem last, after the alloc_sem or and f->sem.
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
new file mode 100644
index 0000000..2bff82f
--- /dev/null
+++ b/fs/jffs2/TODO
@@ -0,0 +1,40 @@
+$Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $
+
+ - disable compression in commit_write()?
+ - fine-tune the allocation / GC thresholds
+ - chattr support - turning on/off and tuning compression per-inode
+ - checkpointing (do we need this? scan is quite fast)
+ - make the scan code populate real inodes so read_inode just after 
+	mount doesn't have to read the flash twice for large files.
+	Make this a per-inode option, changable with chattr, so you can
+	decide which inodes should be in-core immediately after mount.
+ - test, test, test
+
+ - NAND flash support:
+	- flush_wbuf using GC to fill it, don't just pad.
+	- Deal with write errors. Data don't get lost - we just have to write 
+	  the affected node(s) out again somewhere else.
+	- make fsync flush only if actually required
+	- make sys_sync() work.
+	- reboot notifier
+	- timed flush of old wbuf
+	- fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead.
+
+
+ - Optimisations:
+   - Stop GC from decompressing and immediately recompressing nodes which could
+     just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.)
+   - Furthermore, in the case where it could be copied intact we don't even need
+     to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag
+     to show a node can be copied intact and it's _not_ in icache, we could just do
+     it, fix up the next_in_ino list and move on. We would need a way to find out
+     _whether_ it's in icache though -- if it's in icache we also need to do the 
+     fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could
+     help. (We have half of this now.)
+   - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in 
+     the full dirent, we only need to go to the flash in lookup() when we think we've
+     got a match, and in readdir(). 
+   - Doubly-linked next_in_ino list to allow us to free obsoleted raw_node_refs immediately?
+   - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into
+	jffs2_mark_node_obsolete(). Can all callers work it out?
+   - Remove size from jffs2_raw_node_frag. 
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
new file mode 100644
index 0000000..1be6de2
--- /dev/null
+++ b/fs/jffs2/background.c
@@ -0,0 +1,140 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: background.c,v 1.50 2004/11/16 20:36:10 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/jffs2.h>
+#include <linux/mtd/mtd.h>
+#include <linux/completion.h>
+#include "nodelist.h"
+
+
+static int jffs2_garbage_collect_thread(void *);
+
+void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
+{
+	spin_lock(&c->erase_completion_lock);
+        if (c->gc_task && jffs2_thread_should_wake(c))
+                send_sig(SIGHUP, c->gc_task, 1);
+	spin_unlock(&c->erase_completion_lock);
+}
+
+/* This must only ever be called when no GC thread is currently running */
+int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
+{
+	pid_t pid;
+	int ret = 0;
+
+	if (c->gc_task)
+		BUG();
+
+	init_MUTEX_LOCKED(&c->gc_thread_start);
+	init_completion(&c->gc_thread_exit);
+
+	pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
+	if (pid < 0) {
+		printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
+		complete(&c->gc_thread_exit);
+		ret = pid;
+	} else {
+		/* Wait for it... */
+		D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
+		down(&c->gc_thread_start);
+	}
+ 
+	return ret;
+}
+
+void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
+{
+	spin_lock(&c->erase_completion_lock);
+	if (c->gc_task) {
+		D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
+		send_sig(SIGKILL, c->gc_task, 1);
+	}
+	spin_unlock(&c->erase_completion_lock);
+	wait_for_completion(&c->gc_thread_exit);
+}
+
+static int jffs2_garbage_collect_thread(void *_c)
+{
+	struct jffs2_sb_info *c = _c;
+
+	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
+	allow_signal(SIGKILL);
+	allow_signal(SIGSTOP);
+	allow_signal(SIGCONT);
+
+	c->gc_task = current;
+	up(&c->gc_thread_start);
+
+	set_user_nice(current, 10);
+
+	for (;;) {
+		allow_signal(SIGHUP);
+
+		if (!jffs2_thread_should_wake(c)) {
+			set_current_state (TASK_INTERRUPTIBLE);
+			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
+			/* Yes, there's a race here; we checked jffs2_thread_should_wake()
+			   before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
+			   matter - We don't care if we miss a wakeup, because the GC thread
+			   is only an optimisation anyway. */
+			schedule();
+		}
+
+		if (try_to_freeze(0))
+			continue;
+
+		cond_resched();
+
+		/* Put_super will send a SIGKILL and then wait on the sem. 
+		 */
+		while (signal_pending(current)) {
+			siginfo_t info;
+			unsigned long signr;
+
+			signr = dequeue_signal_lock(current, &current->blocked, &info);
+
+			switch(signr) {
+			case SIGSTOP:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
+				set_current_state(TASK_STOPPED);
+				schedule();
+				break;
+
+			case SIGKILL:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
+				goto die;
+
+			case SIGHUP:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
+				break;
+			default:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
+			}
+		}
+		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
+		disallow_signal(SIGHUP);
+
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
+		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
+			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
+			goto die;
+		}
+	}
+ die:
+	spin_lock(&c->erase_completion_lock);
+	c->gc_task = NULL;
+	spin_unlock(&c->erase_completion_lock);
+	complete_and_exit(&c->gc_thread_exit, 0);
+}
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
new file mode 100644
index 0000000..a01dd5f
--- /dev/null
+++ b/fs/jffs2/build.c
@@ -0,0 +1,371 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: build.c,v 1.69 2004/12/16 20:22:18 dmarlin Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
+
+static inline struct jffs2_inode_cache *
+first_inode_chain(int *i, struct jffs2_sb_info *c)
+{
+	for (; *i < INOCACHE_HASHSIZE; (*i)++) {
+		if (c->inocache_list[*i])
+			return c->inocache_list[*i];
+	}
+	return NULL;
+}
+
+static inline struct jffs2_inode_cache *
+next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+{
+	/* More in this chain? */
+	if (ic->next)
+		return ic->next;
+	(*i)++;
+	return first_inode_chain(i, c);
+}
+
+#define for_each_inode(i, c, ic)			\
+	for (i = 0, ic = first_inode_chain(&i, (c));	\
+	     ic;					\
+	     ic = next_inode(&i, ic, (c)))
+
+
+static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+	struct jffs2_full_dirent *fd;
+
+	D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
+
+	/* For each child, increase nlink */
+	for(fd = ic->scan_dents; fd; fd = fd->next) {
+		struct jffs2_inode_cache *child_ic;
+		if (!fd->ino)
+			continue;
+
+		/* XXX: Can get high latency here with huge directories */
+
+		child_ic = jffs2_get_ino_cache(c, fd->ino);
+		if (!child_ic) {
+			printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+				  fd->name, fd->ino, ic->ino);
+			jffs2_mark_node_obsolete(c, fd->raw);
+			continue;
+		}
+
+		if (child_ic->nlink++ && fd->type == DT_DIR) {
+			printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
+			if (fd->ino == 1 && ic->ino == 1) {
+				printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
+				printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
+			}
+			/* What do we do about it? */
+		}
+		D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
+		/* Can't free them. We might need them in pass 2 */
+	}
+}
+
+/* Scan plan:
+ - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
+ - Scan directory tree from top down, setting nlink in inocaches
+ - Scan inocaches for inodes with nlink==0
+*/
+static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+{
+	int ret;
+	int i;
+	struct jffs2_inode_cache *ic;
+	struct jffs2_full_dirent *fd;
+	struct jffs2_full_dirent *dead_fds = NULL;
+
+	/* First, scan the medium and build all the inode caches with
+	   lists of physical nodes */
+
+	c->flags |= JFFS2_SB_FLAG_MOUNTING;
+	ret = jffs2_scan_medium(c);
+	if (ret)
+		goto exit;
+
+	D1(printk(KERN_DEBUG "Scanned flash completely\n"));
+	D2(jffs2_dump_block_lists(c));
+
+	/* Now scan the directory tree, increasing nlink according to every dirent found. */
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
+
+		D1(BUG_ON(ic->ino > c->highest_ino));
+
+		if (ic->scan_dents) {
+			jffs2_build_inode_pass1(c, ic);
+			cond_resched();
+		}
+	}
+	c->flags &= ~JFFS2_SB_FLAG_MOUNTING;
+
+	D1(printk(KERN_DEBUG "Pass 1 complete\n"));
+
+	/* Next, scan for inodes with nlink == 0 and remove them. If
+	   they were directories, then decrement the nlink of their
+	   children too, and repeat the scan. As that's going to be
+	   a fairly uncommon occurrence, it's not so evil to do it this
+	   way. Recursion bad. */
+	D1(printk(KERN_DEBUG "Pass 2 starting\n"));
+
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
+		if (ic->nlink)
+			continue;
+			
+		jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+		cond_resched();
+	} 
+
+	D1(printk(KERN_DEBUG "Pass 2a starting\n"));
+
+	while (dead_fds) {
+		fd = dead_fds;
+		dead_fds = fd->next;
+
+		ic = jffs2_get_ino_cache(c, fd->ino);
+		D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
+
+		if (ic)
+			jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+		jffs2_free_full_dirent(fd);
+	}
+
+	D1(printk(KERN_DEBUG "Pass 2 complete\n"));
+	
+	/* Finally, we can scan again and free the dirent structs */
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
+
+		while(ic->scan_dents) {
+			fd = ic->scan_dents;
+			ic->scan_dents = fd->next;
+			jffs2_free_full_dirent(fd);
+		}
+		ic->scan_dents = NULL;
+		cond_resched();
+	}
+	D1(printk(KERN_DEBUG "Pass 3 complete\n"));
+	D2(jffs2_dump_block_lists(c));
+
+	/* Rotate the lists by some number to ensure wear levelling */
+	jffs2_rotate_lists(c);
+
+	ret = 0;
+
+exit:
+	if (ret) {
+		for_each_inode(i, c, ic) {
+			while(ic->scan_dents) {
+				fd = ic->scan_dents;
+				ic->scan_dents = fd->next;
+				jffs2_free_full_dirent(fd);
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+
+	D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
+	
+	raw = ic->nodes;
+	while (raw != (void *)ic) {
+		struct jffs2_raw_node_ref *next = raw->next_in_ino;
+		D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
+		jffs2_mark_node_obsolete(c, raw);
+		raw = next;
+	}
+
+	if (ic->scan_dents) {
+		int whinged = 0;
+		D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
+
+		while(ic->scan_dents) {
+			struct jffs2_inode_cache *child_ic;
+
+			fd = ic->scan_dents;
+			ic->scan_dents = fd->next;
+
+			if (!fd->ino) {
+				/* It's a deletion dirent. Ignore it */
+				D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
+				jffs2_free_full_dirent(fd);
+				continue;
+			}
+			if (!whinged) {
+				whinged = 1;
+				printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
+			}
+
+			D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
+				  fd->name, fd->ino));
+			
+			child_ic = jffs2_get_ino_cache(c, fd->ino);
+			if (!child_ic) {
+				printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
+				jffs2_free_full_dirent(fd);
+				continue;
+			}
+
+			/* Reduce nlink of the child. If it's now zero, stick it on the 
+			   dead_fds list to be cleaned up later. Else just free the fd */
+
+			child_ic->nlink--;
+			
+			if (!child_ic->nlink) {
+				D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
+					  fd->ino, fd->name));
+				fd->next = *dead_fds;
+				*dead_fds = fd;
+			} else {
+				D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
+					  fd->ino, fd->name, child_ic->nlink));
+				jffs2_free_full_dirent(fd);
+			}
+		}
+	}
+
+	/*
+	   We don't delete the inocache from the hash list and free it yet. 
+	   The erase code will do that, when all the nodes are completely gone.
+	*/
+}
+
+static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
+{
+	uint32_t size;
+
+	/* Deletion should almost _always_ be allowed. We're fairly
+	   buggered once we stop allowing people to delete stuff
+	   because there's not enough free space... */
+	c->resv_blocks_deletion = 2;
+
+	/* Be conservative about how much space we need before we allow writes. 
+	   On top of that which is required for deletia, require an extra 2%
+	   of the medium to be available, for overhead caused by nodes being
+	   split across blocks, etc. */
+
+	size = c->flash_size / 50; /* 2% of flash size */
+	size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
+	size += c->sector_size - 1; /* ... and round up */
+
+	c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
+
+	/* When do we let the GC thread run in the background */
+
+	c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
+
+	/* When do we allow garbage collection to merge nodes to make 
+	   long-term progress at the expense of short-term space exhaustion? */
+	c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
+
+	/* When do we allow garbage collection to eat from bad blocks rather
+	   than actually making progress? */
+	c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
+
+	/* If there's less than this amount of dirty space, don't bother
+	   trying to GC to make more space. It'll be a fruitless task */
+	c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
+
+	D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+		  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
+	D1(printk(KERN_DEBUG "Blocks required to allow deletion:    %d (%d KiB)\n",
+		  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to allow writes:      %d (%d KiB)\n",
+		  c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
+		  c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to allow GC merges:   %d (%d KiB)\n",
+		  c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to GC bad blocks:     %d (%d KiB)\n",
+		  c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
+		  c->nospc_dirty_size));
+} 
+
+int jffs2_do_mount_fs(struct jffs2_sb_info *c)
+{
+	int i;
+
+	c->free_size = c->flash_size;
+	c->nr_blocks = c->flash_size / c->sector_size;
+ 	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks);
+	else
+		c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
+	if (!c->blocks)
+		return -ENOMEM;
+	for (i=0; i<c->nr_blocks; i++) {
+		INIT_LIST_HEAD(&c->blocks[i].list);
+		c->blocks[i].offset = i * c->sector_size;
+		c->blocks[i].free_size = c->sector_size;
+		c->blocks[i].dirty_size = 0;
+		c->blocks[i].wasted_size = 0;
+		c->blocks[i].unchecked_size = 0;
+		c->blocks[i].used_size = 0;
+		c->blocks[i].first_node = NULL;
+		c->blocks[i].last_node = NULL;
+		c->blocks[i].bad_count = 0;
+	}
+
+	init_MUTEX(&c->alloc_sem);
+	init_MUTEX(&c->erase_free_sem);
+	init_waitqueue_head(&c->erase_wait);
+	init_waitqueue_head(&c->inocache_wq);
+	spin_lock_init(&c->erase_completion_lock);
+	spin_lock_init(&c->inocache_lock);
+
+	INIT_LIST_HEAD(&c->clean_list);
+	INIT_LIST_HEAD(&c->very_dirty_list);
+	INIT_LIST_HEAD(&c->dirty_list);
+	INIT_LIST_HEAD(&c->erasable_list);
+	INIT_LIST_HEAD(&c->erasing_list);
+	INIT_LIST_HEAD(&c->erase_pending_list);
+	INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
+	INIT_LIST_HEAD(&c->erase_complete_list);
+	INIT_LIST_HEAD(&c->free_list);
+	INIT_LIST_HEAD(&c->bad_list);
+	INIT_LIST_HEAD(&c->bad_used_list);
+	c->highest_ino = 1;
+
+	if (jffs2_build_filesystem(c)) {
+		D1(printk(KERN_DEBUG "build_fs failed\n"));
+		jffs2_free_ino_caches(c);
+		jffs2_free_raw_node_refs(c);
+		if (c->mtd->flags & MTD_NO_VIRTBLOCKS) {
+			vfree(c->blocks);
+		} else {
+			kfree(c->blocks);
+		}
+		return -EIO;
+	}
+
+	jffs2_calc_trigger_levels(c);
+
+	return 0;
+}
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
new file mode 100644
index 0000000..af922a9
--- /dev/null
+++ b/fs/jffs2/compr.c
@@ -0,0 +1,469 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ *                    University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $
+ *
+ */
+
+#include "compr.h"
+
+static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
+
+/* Available compressors are on this list */
+static LIST_HEAD(jffs2_compressor_list);
+
+/* Actual compression mode */
+static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+
+/* Statistics for blocks stored without compression */
+static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
+
+/* jffs2_compress:
+ * @data: Pointer to uncompressed data
+ * @cdata: Pointer to returned pointer to buffer for compressed data
+ * @datalen: On entry, holds the amount of data available for compression.
+ *	On exit, expected to hold the amount of data actually compressed.
+ * @cdatalen: On entry, holds the amount of space available for compressed
+ *	data. On exit, expected to hold the actual size of the compressed
+ *	data.
+ *
+ * Returns: Lower byte to be stored with data indicating compression type used.
+ * Zero is used to show that the data could not be compressed - the 
+ * compressed version was actually larger than the original.
+ * Upper byte will be used later. (soon)
+ *
+ * If the cdata buffer isn't large enough to hold all the uncompressed data,
+ * jffs2_compress should compress as much as will fit, and should set 
+ * *datalen accordingly to show the amount of data which were compressed.
+ */
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			     unsigned char *data_in, unsigned char **cpage_out, 
+			     uint32_t *datalen, uint32_t *cdatalen)
+{
+	int ret = JFFS2_COMPR_NONE;
+        int compr_ret;
+        struct jffs2_compressor *this, *best=NULL;
+        unsigned char *output_buf = NULL, *tmp_buf;
+        uint32_t orig_slen, orig_dlen;
+        uint32_t best_slen=0, best_dlen=0;
+
+        switch (jffs2_compression_mode) {
+        case JFFS2_COMPR_MODE_NONE:
+                break;
+        case JFFS2_COMPR_MODE_PRIORITY:
+                output_buf = kmalloc(*cdatalen,GFP_KERNEL);
+                if (!output_buf) {
+                        printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+                        goto out;
+                }
+                orig_slen = *datalen;
+                orig_dlen = *cdatalen;
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        /* Skip decompress-only backwards-compatibility and disabled modules */
+                        if ((!this->compress)||(this->disabled))
+                                continue;
+
+                        this->usecount++;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        *datalen  = orig_slen;
+                        *cdatalen = orig_dlen;
+                        compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL);
+                        spin_lock(&jffs2_compressor_list_lock);
+                        this->usecount--;
+                        if (!compr_ret) {
+                                ret = this->compr;
+                                this->stat_compr_blocks++;
+                                this->stat_compr_orig_size += *datalen;
+                                this->stat_compr_new_size  += *cdatalen;
+                                break;
+                        }
+                }
+                spin_unlock(&jffs2_compressor_list_lock);
+                if (ret == JFFS2_COMPR_NONE) kfree(output_buf);
+                break;
+        case JFFS2_COMPR_MODE_SIZE:
+                orig_slen = *datalen;
+                orig_dlen = *cdatalen;
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        /* Skip decompress-only backwards-compatibility and disabled modules */
+                        if ((!this->compress)||(this->disabled))
+                                continue;
+                        /* Allocating memory for output buffer if necessary */
+                        if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) {
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                kfree(this->compr_buf);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                this->compr_buf_size=0;
+                                this->compr_buf=NULL;
+                        }
+                        if (!this->compr_buf) {
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                tmp_buf = kmalloc(orig_dlen,GFP_KERNEL);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                if (!tmp_buf) {
+                                        printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen);
+                                        continue;
+                                }
+                                else {
+                                        this->compr_buf = tmp_buf;
+                                        this->compr_buf_size = orig_dlen;
+                                }
+                        }
+                        this->usecount++;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        *datalen  = orig_slen;
+                        *cdatalen = orig_dlen;
+                        compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL);
+                        spin_lock(&jffs2_compressor_list_lock);
+                        this->usecount--;
+                        if (!compr_ret) {
+                                if ((!best_dlen)||(best_dlen>*cdatalen)) {
+                                        best_dlen = *cdatalen;
+                                        best_slen = *datalen;
+                                        best = this;
+                                }
+                        }
+                }
+                if (best_dlen) {
+                        *cdatalen = best_dlen;
+                        *datalen  = best_slen;
+                        output_buf = best->compr_buf;
+                        best->compr_buf = NULL;
+                        best->compr_buf_size = 0;
+                        best->stat_compr_blocks++;
+                        best->stat_compr_orig_size += best_slen;
+                        best->stat_compr_new_size  += best_dlen;
+                        ret = best->compr;
+                }
+                spin_unlock(&jffs2_compressor_list_lock);
+                break;
+        default:
+                printk(KERN_ERR "JFFS2: unknow compression mode.\n");
+        }
+ out:
+        if (ret == JFFS2_COMPR_NONE) {
+	        *cpage_out = data_in;
+	        *datalen = *cdatalen;
+                none_stat_compr_blocks++;
+                none_stat_compr_size += *datalen;
+        }
+        else {
+                *cpage_out = output_buf;
+        }
+	return ret;
+}
+
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     uint16_t comprtype, unsigned char *cdata_in, 
+		     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
+{
+        struct jffs2_compressor *this;
+        int ret;
+
+	/* Older code had a bug where it would write non-zero 'usercompr'
+	   fields. Deal with it. */
+	if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
+		comprtype &= 0xff;
+
+	switch (comprtype & 0xff) {
+	case JFFS2_COMPR_NONE:
+		/* This should be special-cased elsewhere, but we might as well deal with it */
+		memcpy(data_out, cdata_in, datalen);
+                none_stat_decompr_blocks++;
+		break;
+	case JFFS2_COMPR_ZERO:
+		memset(data_out, 0, datalen);
+		break;
+	default:
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        if (comprtype == this->compr) {
+                                this->usecount++;
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                if (ret) {
+                                        printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
+                                }
+                                else {
+                                        this->stat_decompr_blocks++;
+                                }
+                                this->usecount--;
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                return ret;
+                        }
+                }
+		printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype);
+                spin_unlock(&jffs2_compressor_list_lock);
+		return -EIO;
+	}
+	return 0;
+}
+
+int jffs2_register_compressor(struct jffs2_compressor *comp)
+{
+        struct jffs2_compressor *this;
+
+        if (!comp->name) {
+                printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
+                return -1;
+        }
+        comp->compr_buf_size=0;
+        comp->compr_buf=NULL;
+        comp->usecount=0;
+        comp->stat_compr_orig_size=0;
+        comp->stat_compr_new_size=0;
+        comp->stat_compr_blocks=0;
+        comp->stat_decompr_blocks=0;
+        D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
+
+        spin_lock(&jffs2_compressor_list_lock);
+
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (this->priority < comp->priority) {
+                        list_add(&comp->list, this->list.prev);
+                        goto out;
+                }
+        }
+        list_add_tail(&comp->list, &jffs2_compressor_list);
+out:
+        D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+                printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+        })
+
+        spin_unlock(&jffs2_compressor_list_lock);
+
+        return 0;
+}
+
+int jffs2_unregister_compressor(struct jffs2_compressor *comp)
+{
+        D2(struct jffs2_compressor *this;)
+
+        D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
+
+        spin_lock(&jffs2_compressor_list_lock);
+
+        if (comp->usecount) {
+                spin_unlock(&jffs2_compressor_list_lock);
+                printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n");
+                return -1;
+        }
+        list_del(&comp->list);
+
+        D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+                printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+        })
+        spin_unlock(&jffs2_compressor_list_lock);
+        return 0;
+}
+
+#ifdef CONFIG_JFFS2_PROC
+
+#define JFFS2_STAT_BUF_SIZE 16000
+
+char *jffs2_list_compressors(void)
+{
+        struct jffs2_compressor *this;
+        char *buf, *act_buf;
+
+        act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority);
+                if ((this->disabled)||(!this->compress))
+                        act_buf += sprintf(act_buf,"disabled");
+                else
+                        act_buf += sprintf(act_buf,"enabled");
+                act_buf += sprintf(act_buf,"\n");
+        }
+        return buf;
+}
+
+char *jffs2_stats(void)
+{
+        struct jffs2_compressor *this;
+        char *buf, *act_buf;
+
+        act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+
+        act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n");
+        act_buf += sprintf(act_buf,"%10s   ","none");
+        act_buf += sprintf(act_buf,"compr: %d blocks (%d)  decompr: %d blocks\n", none_stat_compr_blocks, 
+                           none_stat_compr_size, none_stat_decompr_blocks);
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                act_buf += sprintf(act_buf,"%10s ",this->name);
+                if ((this->disabled)||(!this->compress))
+                        act_buf += sprintf(act_buf,"- ");
+                else
+                        act_buf += sprintf(act_buf,"+ ");
+                act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d)  decompr: %d blocks ", this->stat_compr_blocks, 
+                                   this->stat_compr_new_size, this->stat_compr_orig_size, 
+                                   this->stat_decompr_blocks);
+                act_buf += sprintf(act_buf,"\n");
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+
+        return buf;
+}
+
+char *jffs2_get_compression_mode_name(void) 
+{
+        switch (jffs2_compression_mode) {
+        case JFFS2_COMPR_MODE_NONE:
+                return "none";
+        case JFFS2_COMPR_MODE_PRIORITY:
+                return "priority";
+        case JFFS2_COMPR_MODE_SIZE:
+                return "size";
+        }
+        return "unkown";
+}
+
+int jffs2_set_compression_mode_name(const char *name) 
+{
+        if (!strcmp("none",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+                return 0;
+        }
+        if (!strcmp("priority",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+                return 0;
+        }
+        if (!strcmp("size",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+                return 0;
+        }
+        return 1;
+}
+
+static int jffs2_compressor_Xable(const char *name, int disabled)
+{
+        struct jffs2_compressor *this;
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (!strcmp(this->name, name)) {
+                        this->disabled = disabled;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        return 0;                        
+                }
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+        printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
+        return 1;
+}
+
+int jffs2_enable_compressor_name(const char *name)
+{
+        return jffs2_compressor_Xable(name, 0);
+}
+
+int jffs2_disable_compressor_name(const char *name)
+{
+        return jffs2_compressor_Xable(name, 1);
+}
+
+int jffs2_set_compressor_priority(const char *name, int priority)
+{
+        struct jffs2_compressor *this,*comp;
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (!strcmp(this->name, name)) {
+                        this->priority = priority;
+                        comp = this;
+                        goto reinsert;
+                }
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+        printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);        
+        return 1;
+reinsert:
+        /* list is sorted in the order of priority, so if
+           we change it we have to reinsert it into the
+           good place */
+        list_del(&comp->list);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (this->priority < comp->priority) {
+                        list_add(&comp->list, this->list.prev);
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        return 0;
+                }
+        }
+        list_add_tail(&comp->list, &jffs2_compressor_list);
+        spin_unlock(&jffs2_compressor_list_lock);
+        return 0;
+}
+
+#endif
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
+{
+        if (orig != comprbuf)
+                kfree(comprbuf);
+}
+
+int jffs2_compressors_init(void) 
+{
+/* Registering compressors */
+#ifdef CONFIG_JFFS2_ZLIB
+        jffs2_zlib_init();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+        jffs2_rtime_init();
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+        jffs2_rubinmips_init();
+        jffs2_dynrubin_init();
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+        jffs2_lzari_init();
+#endif
+#ifdef CONFIG_JFFS2_LZO
+        jffs2_lzo_init();
+#endif
+/* Setting default compression mode */
+#ifdef CONFIG_JFFS2_CMODE_NONE
+        jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+        D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
+#else
+#ifdef CONFIG_JFFS2_CMODE_SIZE
+        jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+        D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
+#else
+        D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
+#endif
+#endif
+        return 0;
+}
+
+int jffs2_compressors_exit(void) 
+{
+/* Unregistering compressors */
+#ifdef CONFIG_JFFS2_LZO
+        jffs2_lzo_exit();
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+        jffs2_lzari_exit();
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+        jffs2_dynrubin_exit();
+        jffs2_rubinmips_exit();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+        jffs2_rtime_exit();
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+        jffs2_zlib_exit();
+#endif
+        return 0;
+}
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
new file mode 100644
index 0000000..89ceeed
--- /dev/null
+++ b/fs/jffs2/compr.h
@@ -0,0 +1,115 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ *                    University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in the 
+ * jffs2 directory.
+ *
+ * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+ *
+ */
+
+#ifndef __JFFS2_COMPR_H__
+#define __JFFS2_COMPR_H__
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_i.h>
+#include <linux/jffs2_fs_sb.h>
+#include "nodelist.h"
+
+#define JFFS2_RUBINMIPS_PRIORITY 10
+#define JFFS2_DYNRUBIN_PRIORITY  20
+#define JFFS2_LZARI_PRIORITY     30
+#define JFFS2_LZO_PRIORITY       40
+#define JFFS2_RTIME_PRIORITY     50
+#define JFFS2_ZLIB_PRIORITY      60
+
+#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+#define JFFS2_DYNRUBIN_DISABLED  /*        for decompression */
+
+#define JFFS2_COMPR_MODE_NONE       0
+#define JFFS2_COMPR_MODE_PRIORITY   1
+#define JFFS2_COMPR_MODE_SIZE       2
+
+struct jffs2_compressor {
+        struct list_head list;
+        int priority;              /* used by prirority comr. mode */
+        char *name;
+        char compr;                /* JFFS2_COMPR_XXX */
+        int (*compress)(unsigned char *data_in, unsigned char *cpage_out,
+                        uint32_t *srclen, uint32_t *destlen, void *model);
+        int (*decompress)(unsigned char *cdata_in, unsigned char *data_out,
+                        uint32_t cdatalen, uint32_t datalen, void *model);
+        int usecount;
+        int disabled;              /* if seted the compressor won't compress */
+        unsigned char *compr_buf;  /* used by size compr. mode */
+        uint32_t compr_buf_size;   /* used by size compr. mode */
+        uint32_t stat_compr_orig_size;
+        uint32_t stat_compr_new_size;
+        uint32_t stat_compr_blocks;
+        uint32_t stat_decompr_blocks;
+};
+
+int jffs2_register_compressor(struct jffs2_compressor *comp);
+int jffs2_unregister_compressor(struct jffs2_compressor *comp);
+
+int jffs2_compressors_init(void);
+int jffs2_compressors_exit(void);
+
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+                             unsigned char *data_in, unsigned char **cpage_out,
+                             uint32_t *datalen, uint32_t *cdatalen);
+
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+                     uint16_t comprtype, unsigned char *cdata_in,
+                     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig);
+
+#ifdef CONFIG_JFFS2_PROC
+int jffs2_enable_compressor_name(const char *name);
+int jffs2_disable_compressor_name(const char *name);
+int jffs2_set_compression_mode_name(const char *mode_name);
+char *jffs2_get_compression_mode_name(void);
+int jffs2_set_compressor_priority(const char *mode_name, int priority);
+char *jffs2_list_compressors(void);
+char *jffs2_stats(void);
+#endif
+
+/* Compressor modules */
+/* These functions will be called by jffs2_compressors_init/exit */
+
+#ifdef CONFIG_JFFS2_RUBIN
+int jffs2_rubinmips_init(void);
+void jffs2_rubinmips_exit(void);
+int jffs2_dynrubin_init(void);
+void jffs2_dynrubin_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+int jffs2_rtime_init(void);
+void jffs2_rtime_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+int jffs2_zlib_init(void);
+void jffs2_zlib_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+int jffs2_lzari_init(void);
+void jffs2_lzari_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_LZO
+int jffs2_lzo_init(void);
+void jffs2_lzo_exit(void);
+#endif
+
+#endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
new file mode 100644
index 0000000..3931294
--- /dev/null
+++ b/fs/jffs2/compr_rtime.c
@@ -0,0 +1,132 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_rtime.c,v 1.14 2004/06/23 16:34:40 havasi Exp $
+ *
+ *
+ * Very simple lz77-ish encoder.
+ *
+ * Theory of operation: Both encoder and decoder have a list of "last
+ * occurrences" for every possible source-value; after sending the
+ * first source-byte, the second byte indicated the "run" length of
+ * matches
+ *
+ * The algorithm is intended to only send "whole bytes", no bit-messing.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h> 
+#include <linux/jffs2.h> 
+#include "compr.h"
+
+/* _compress returns the compressed size, -1 if bigger */
+static int jffs2_rtime_compress(unsigned char *data_in,
+				unsigned char *cpage_out,
+				uint32_t *sourcelen, uint32_t *dstlen,
+				void *model)
+{
+	short positions[256];
+	int outpos = 0;
+	int pos=0;
+
+	memset(positions,0,sizeof(positions)); 
+	
+	while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
+		int backpos, runlen=0;
+		unsigned char value;
+		
+		value = data_in[pos];
+
+		cpage_out[outpos++] = data_in[pos++];
+		
+		backpos = positions[value];
+		positions[value]=pos;
+		
+		while ((backpos < pos) && (pos < (*sourcelen)) &&
+		       (data_in[pos]==data_in[backpos++]) && (runlen<255)) {
+			pos++;
+			runlen++;
+		}
+		cpage_out[outpos++] = runlen;
+	}
+
+	if (outpos >= pos) {
+		/* We failed */
+		return -1;
+	}
+	
+	/* Tell the caller how much we managed to compress, and how much space it took */
+	*sourcelen = pos;
+	*dstlen = outpos;
+	return 0;
+}		   
+
+
+static int jffs2_rtime_decompress(unsigned char *data_in,
+				  unsigned char *cpage_out,
+				  uint32_t srclen, uint32_t destlen,
+				  void *model)
+{
+	short positions[256];
+	int outpos = 0;
+	int pos=0;
+	
+	memset(positions,0,sizeof(positions)); 
+	
+	while (outpos<destlen) {
+		unsigned char value;
+		int backoffs;
+		int repeat;
+		
+		value = data_in[pos++];
+		cpage_out[outpos++] = value; /* first the verbatim copied byte */
+		repeat = data_in[pos++];
+		backoffs = positions[value];
+		
+		positions[value]=outpos;
+		if (repeat) {
+			if (backoffs + repeat >= outpos) {
+				while(repeat) {
+					cpage_out[outpos++] = cpage_out[backoffs++];
+					repeat--;
+				}
+			} else {
+				memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
+				outpos+=repeat;		
+			}
+		}
+	}
+        return 0;
+}		   
+
+static struct jffs2_compressor jffs2_rtime_comp = {
+    .priority = JFFS2_RTIME_PRIORITY,
+    .name = "rtime",
+    .compr = JFFS2_COMPR_RTIME,
+    .compress = &jffs2_rtime_compress,
+    .decompress = &jffs2_rtime_decompress,
+#ifdef JFFS2_RTIME_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_rtime_init(void)
+{
+    return jffs2_register_compressor(&jffs2_rtime_comp);
+}
+
+void jffs2_rtime_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_rtime_comp);
+}
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
new file mode 100644
index 0000000..450d662
--- /dev/null
+++ b/fs/jffs2/compr_rubin.c
@@ -0,0 +1,373 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_rubin.c,v 1.20 2004/06/23 16:34:40 havasi Exp $
+ *
+ */
+
+ 
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/jffs2.h>
+#include "compr_rubin.h"
+#include "histo_mips.h"
+#include "compr.h"
+
+static void init_rubin(struct rubin_state *rs, int div, int *bits)
+{	
+	int c;
+
+	rs->q = 0;
+	rs->p = (long) (2 * UPPER_BIT_RUBIN);
+	rs->bit_number = (long) 0;
+	rs->bit_divider = div;
+	for (c=0; c<8; c++)
+		rs->bits[c] = bits[c];
+}
+
+
+static int encode(struct rubin_state *rs, long A, long B, int symbol)
+{
+
+	long i0, i1;
+	int ret;
+
+	while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
+		rs->bit_number++;
+		
+		ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
+		if (ret)
+			return ret;
+		rs->q &= LOWER_BITS_RUBIN;
+		rs->q <<= 1;
+		rs->p <<= 1;
+	}
+	i0 = A * rs->p / (A + B);
+	if (i0 <= 0) {
+		i0 = 1;
+	}
+	if (i0 >= rs->p) {
+		i0 = rs->p - 1;
+	}
+	i1 = rs->p - i0;
+
+	if (symbol == 0)
+		rs->p = i0;
+	else {
+		rs->p = i1;
+		rs->q += i0;
+	}
+	return 0;
+}
+
+
+static void end_rubin(struct rubin_state *rs)
+{				
+
+	int i;
+
+	for (i = 0; i < RUBIN_REG_SIZE; i++) {
+		pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1);
+		rs->q &= LOWER_BITS_RUBIN;
+		rs->q <<= 1;
+	}
+}
+
+
+static void init_decode(struct rubin_state *rs, int div, int *bits)
+{
+	init_rubin(rs, div, bits);		
+
+	/* behalve lower */
+	rs->rec_q = 0;
+
+	for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
+		;
+}
+
+static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q)
+{
+	register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
+	unsigned long rec_q;
+	int c, bits = 0;
+
+	/*
+	 * First, work out how many bits we need from the input stream.
+	 * Note that we have already done the initial check on this
+	 * loop prior to calling this function.
+	 */
+	do {
+		bits++;
+		q &= lower_bits_rubin;
+		q <<= 1;
+		p <<= 1;
+	} while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
+
+	rs->p = p;
+	rs->q = q;
+
+	rs->bit_number += bits;
+
+	/*
+	 * Now get the bits.  We really want this to be "get n bits".
+	 */
+	rec_q = rs->rec_q;
+	do {
+		c = pullbit(&rs->pp);
+		rec_q &= lower_bits_rubin;
+		rec_q <<= 1;
+		rec_q += c;
+	} while (--bits);
+	rs->rec_q = rec_q;
+}
+
+static int decode(struct rubin_state *rs, long A, long B)
+{
+	unsigned long p = rs->p, q = rs->q;
+	long i0, threshold;
+	int symbol;
+
+	if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
+		__do_decode(rs, p, q);
+
+	i0 = A * rs->p / (A + B);
+	if (i0 <= 0) {
+		i0 = 1;
+	}
+	if (i0 >= rs->p) {
+		i0 = rs->p - 1;
+	}
+
+	threshold = rs->q + i0;
+	symbol = rs->rec_q >= threshold;
+	if (rs->rec_q >= threshold) {
+		rs->q += i0;
+		i0 = rs->p - i0;
+	}
+
+	rs->p = i0;
+
+	return symbol;
+}
+
+
+
+static int out_byte(struct rubin_state *rs, unsigned char byte)
+{
+	int i, ret;
+	struct rubin_state rs_copy;
+	rs_copy = *rs;
+
+	for (i=0;i<8;i++) {
+		ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1);
+		if (ret) {
+			/* Failed. Restore old state */
+			*rs = rs_copy;
+			return ret;
+		}
+		byte=byte>>1;
+	}
+	return 0;
+}
+
+static int in_byte(struct rubin_state *rs)
+{
+	int i, result = 0, bit_divider = rs->bit_divider;
+
+	for (i = 0; i < 8; i++)
+		result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i;
+
+	return result;
+}
+
+
+
+static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, 
+		      unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen)
+	{
+	int outpos = 0;
+	int pos=0;
+	struct rubin_state rs;
+
+	init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
+
+	init_rubin(&rs, bit_divider, bits);
+	
+	while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos]))
+		pos++;
+	
+	end_rubin(&rs);
+
+	if (outpos > pos) {
+		/* We failed */
+		return -1;
+	}
+	
+	/* Tell the caller how much we managed to compress, 
+	 * and how much space it took */
+	
+	outpos = (pushedbits(&rs.pp)+7)/8;
+	
+	if (outpos >= pos)
+		return -1; /* We didn't actually compress */
+	*sourcelen = pos;
+	*dstlen = outpos;
+	return 0;
+}		   
+#if 0
+/* _compress returns the compressed size, -1 if bigger */
+int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+}
+#endif
+int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	int bits[8];
+	unsigned char histo[256];
+	int i;
+	int ret;
+	uint32_t mysrclen, mydstlen;
+
+	mysrclen = *sourcelen;
+	mydstlen = *dstlen - 8;
+
+	if (*dstlen <= 12)
+		return -1;
+
+	memset(histo, 0, 256);
+	for (i=0; i<mysrclen; i++) {
+		histo[data_in[i]]++;
+	}
+	memset(bits, 0, sizeof(int)*8);
+	for (i=0; i<256; i++) {
+		if (i&128)
+			bits[7] += histo[i];
+		if (i&64)
+			bits[6] += histo[i];
+		if (i&32)
+			bits[5] += histo[i];
+		if (i&16)
+			bits[4] += histo[i];
+		if (i&8)
+			bits[3] += histo[i];
+		if (i&4)
+			bits[2] += histo[i];
+		if (i&2)
+			bits[1] += histo[i];
+		if (i&1)
+			bits[0] += histo[i];
+	}
+
+	for (i=0; i<8; i++) {
+		bits[i] = (bits[i] * 256) / mysrclen;
+		if (!bits[i]) bits[i] = 1;
+		if (bits[i] > 255) bits[i] = 255;
+		cpage_out[i] = bits[i];
+	}
+
+	ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen);
+	if (ret) 
+		return ret;
+
+	/* Add back the 8 bytes we took for the probabilities */
+	mydstlen += 8;
+
+	if (mysrclen <= mydstlen) {
+		/* We compressed */
+		return -1;
+	}
+
+	*sourcelen = mysrclen;
+	*dstlen = mydstlen;
+	return 0;
+}
+
+static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, 
+			 unsigned char *page_out, uint32_t srclen, uint32_t destlen)
+{
+	int outpos = 0;
+	struct rubin_state rs;
+	
+	init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
+	init_decode(&rs, bit_divider, bits);
+	
+	while (outpos < destlen) {
+		page_out[outpos++] = in_byte(&rs);
+	}
+}		   
+
+
+int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t sourcelen, uint32_t dstlen, void *model)
+{
+	rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+        return 0;
+}
+
+int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t sourcelen, uint32_t dstlen, void *model)
+{
+	int bits[8];
+	int c;
+
+	for (c=0; c<8; c++)
+		bits[c] = data_in[c];
+
+	rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen);
+        return 0;
+}
+
+static struct jffs2_compressor jffs2_rubinmips_comp = {
+    .priority = JFFS2_RUBINMIPS_PRIORITY,
+    .name = "rubinmips",
+    .compr = JFFS2_COMPR_DYNRUBIN,
+    .compress = NULL, /*&jffs2_rubinmips_compress,*/
+    .decompress = &jffs2_rubinmips_decompress,
+#ifdef JFFS2_RUBINMIPS_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_rubinmips_init(void)
+{
+    return jffs2_register_compressor(&jffs2_rubinmips_comp);
+}
+
+void jffs2_rubinmips_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_rubinmips_comp);
+}
+
+static struct jffs2_compressor jffs2_dynrubin_comp = {
+    .priority = JFFS2_DYNRUBIN_PRIORITY,
+    .name = "dynrubin",
+    .compr = JFFS2_COMPR_RUBINMIPS,
+    .compress = jffs2_dynrubin_compress,
+    .decompress = &jffs2_dynrubin_decompress,
+#ifdef JFFS2_DYNRUBIN_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_dynrubin_init(void)
+{
+    return jffs2_register_compressor(&jffs2_dynrubin_comp);
+}
+
+void jffs2_dynrubin_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_dynrubin_comp);
+}
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h
new file mode 100644
index 0000000..cf51e34
--- /dev/null
+++ b/fs/jffs2/compr_rubin.h
@@ -0,0 +1,21 @@
+/* Rubin encoder/decoder header       */
+/* work started at   : aug   3, 1994  */
+/* last modification : aug  15, 1994  */
+/* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */
+
+#include "pushpull.h"
+
+#define RUBIN_REG_SIZE   16
+#define UPPER_BIT_RUBIN    (((long) 1)<<(RUBIN_REG_SIZE-1))
+#define LOWER_BITS_RUBIN   ((((long) 1)<<(RUBIN_REG_SIZE-1))-1)
+
+
+struct rubin_state {
+	unsigned long p;		
+	unsigned long q;	
+	unsigned long rec_q;
+	long bit_number;
+	struct pushpull pp;
+	int bit_divider;
+	int bits[8];
+};
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
new file mode 100644
index 0000000..9f9932c
--- /dev/null
+++ b/fs/jffs2/compr_zlib.c
@@ -0,0 +1,218 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_zlib.c,v 1.29 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#if !defined(__KERNEL__) && !defined(__ECOS)
+#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
+#endif
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+#include <linux/zutil.h>
+#include <asm/semaphore.h>
+#include "nodelist.h"
+#include "compr.h"
+
+	/* Plan: call deflate() with avail_in == *sourcelen, 
+		avail_out = *dstlen - 12 and flush == Z_FINISH. 
+		If it doesn't manage to finish,	call it again with
+		avail_in == 0 and avail_out set to the remaining 12
+		bytes for it to clean up. 
+	   Q: Is 12 bytes sufficient?
+	*/
+#define STREAM_END_SPACE 12
+
+static DECLARE_MUTEX(deflate_sem);
+static DECLARE_MUTEX(inflate_sem);
+static z_stream inf_strm, def_strm;
+
+#ifdef __KERNEL__ /* Linux-only */
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+static int __init alloc_workspaces(void)
+{
+	def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+	if (!def_strm.workspace) {
+		printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize());
+		return -ENOMEM;
+	}
+	D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize()));
+	inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+	if (!inf_strm.workspace) {
+		printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
+		vfree(def_strm.workspace);
+		return -ENOMEM;
+	}
+	D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()));
+	return 0;
+}
+
+static void free_workspaces(void)
+{
+	vfree(def_strm.workspace);
+	vfree(inf_strm.workspace);
+}
+#else
+#define alloc_workspaces() (0)
+#define free_workspaces() do { } while(0)
+#endif /* __KERNEL__ */
+
+int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	int ret;
+
+	if (*dstlen <= STREAM_END_SPACE)
+		return -1;
+
+	down(&deflate_sem);
+
+	if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
+		printk(KERN_WARNING "deflateInit failed\n");
+		up(&deflate_sem);
+		return -1;
+	}
+
+	def_strm.next_in = data_in;
+	def_strm.total_in = 0;
+	
+	def_strm.next_out = cpage_out;
+	def_strm.total_out = 0;
+
+	while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
+		def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
+		def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
+		D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
+			  def_strm.avail_in, def_strm.avail_out));
+		ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
+		D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", 
+			  def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
+		if (ret != Z_OK) {
+			D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
+			zlib_deflateEnd(&def_strm);
+			up(&deflate_sem);
+			return -1;
+		}
+	}
+	def_strm.avail_out += STREAM_END_SPACE;
+	def_strm.avail_in = 0;
+	ret = zlib_deflate(&def_strm, Z_FINISH);
+	zlib_deflateEnd(&def_strm);
+
+	if (ret != Z_STREAM_END) {
+		D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
+		ret = -1;
+		goto out;
+	}
+
+	if (def_strm.total_out >= def_strm.total_in) {
+		D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n",
+			  def_strm.total_in, def_strm.total_out));
+		ret = -1;
+		goto out;
+	}
+
+	D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
+		  def_strm.total_in, def_strm.total_out));
+
+	*dstlen = def_strm.total_out;
+	*sourcelen = def_strm.total_in;
+	ret = 0;
+ out:
+	up(&deflate_sem);
+	return ret;
+}
+
+int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
+		      uint32_t srclen, uint32_t destlen, void *model)
+{
+	int ret;
+	int wbits = MAX_WBITS;
+
+	down(&inflate_sem);
+
+	inf_strm.next_in = data_in;
+	inf_strm.avail_in = srclen;
+	inf_strm.total_in = 0;
+	
+	inf_strm.next_out = cpage_out;
+	inf_strm.avail_out = destlen;
+	inf_strm.total_out = 0;
+
+	/* If it's deflate, and it's got no preset dictionary, then
+	   we can tell zlib to skip the adler32 check. */
+	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
+	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
+	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
+
+		D2(printk(KERN_DEBUG "inflate skipping adler32\n"));
+		wbits = -((data_in[0] >> 4) + 8);
+		inf_strm.next_in += 2;
+		inf_strm.avail_in -= 2;
+	} else {
+		/* Let this remain D1 for now -- it should never happen */
+		D1(printk(KERN_DEBUG "inflate not skipping adler32\n"));
+	}
+
+
+	if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
+		printk(KERN_WARNING "inflateInit failed\n");
+		up(&inflate_sem);
+		return 1;
+	}
+
+	while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
+		;
+	if (ret != Z_STREAM_END) {
+		printk(KERN_NOTICE "inflate returned %d\n", ret);
+	}
+	zlib_inflateEnd(&inf_strm);
+	up(&inflate_sem);
+        return 0;
+}
+
+static struct jffs2_compressor jffs2_zlib_comp = {
+    .priority = JFFS2_ZLIB_PRIORITY,
+    .name = "zlib",
+    .compr = JFFS2_COMPR_ZLIB,
+    .compress = &jffs2_zlib_compress,
+    .decompress = &jffs2_zlib_decompress,
+#ifdef JFFS2_ZLIB_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int __init jffs2_zlib_init(void)
+{
+    int ret;
+
+    ret = alloc_workspaces();
+    if (ret)
+        return ret;
+
+    ret = jffs2_register_compressor(&jffs2_zlib_comp);
+    if (ret)
+        free_workspaces();
+
+    return ret;
+}
+
+void jffs2_zlib_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_zlib_comp);
+    free_workspaces();
+}
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c
new file mode 100644
index 0000000..cf51f09
--- /dev/null
+++ b/fs/jffs2/comprtest.c
@@ -0,0 +1,307 @@
+/* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/types.h>
+#if 0
+#define TESTDATA_LEN 512
+static unsigned char testdata[TESTDATA_LEN] = {
+ 0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x83, 0x04, 0x08, 0x34, 0x00, 0x00, 0x00,
+ 0xb0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x20, 0x00, 0x06, 0x00, 0x28, 0x00,
+ 0x1e, 0x00, 0x1b, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x34, 0x80, 0x04, 0x08,
+ 0x34, 0x80, 0x04, 0x08, 0xc0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xf4, 0x00, 0x00, 0x00, 0xf4, 0x80, 0x04, 0x08,
+ 0xf4, 0x80, 0x04, 0x08, 0x13, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x08,
+ 0x00, 0x80, 0x04, 0x08, 0x0d, 0x05, 0x00, 0x00, 0x0d, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x05, 0x00, 0x00, 0x10, 0x95, 0x04, 0x08,
+ 0x10, 0x95, 0x04, 0x08, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x58, 0x05, 0x00, 0x00, 0x58, 0x95, 0x04, 0x08,
+ 0x58, 0x95, 0x04, 0x08, 0xa0, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x08, 0x81, 0x04, 0x08,
+ 0x08, 0x81, 0x04, 0x08, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x6c, 0x64, 0x2d, 0x6c, 0x69, 0x6e, 0x75,
+ 0x78, 0x2e, 0x73, 0x6f, 0x2e, 0x32, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00,
+ 0x0c, 0x83, 0x04, 0x08, 0x81, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x1c, 0x83, 0x04, 0x08, 0xac, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00,
+ 0x2c, 0x83, 0x04, 0x08, 0xdd, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00,
+ 0x3c, 0x83, 0x04, 0x08, 0x2e, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x4c, 0x83, 0x04, 0x08, 0x7d, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x00, 0x85, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x5f, 0x67,
+ 0x6d, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x5f, 0x00, 0x6c, 0x69, 0x62, 0x63,
+ 0x2e, 0x73, 0x6f, 0x2e, 0x36, 0x00, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x00, 0x5f, 0x5f, 0x63};
+#else
+#define TESTDATA_LEN 3481
+static unsigned char testdata[TESTDATA_LEN] = {
+ 0x23, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x22, 0x64, 0x62, 0x65, 0x6e, 0x63, 0x68,
+ 0x2e, 0x68, 0x22, 0x0a, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x4d, 0x41, 0x58,
+ 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x20, 0x31, 0x30, 0x30, 0x30, 0x0a, 0x0a, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x62, 0x75, 0x66, 0x5b, 0x37, 0x30, 0x30,
+ 0x30, 0x30, 0x5d, 0x3b, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x09, 0x69, 0x6e,
+ 0x74, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x3b, 0x0a, 0x7d, 0x20, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x4d, 0x41, 0x58, 0x5f,
+ 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5d, 0x3b, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f,
+ 0x5f, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e,
+ 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72,
+ 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x75,
+ 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20,
+ 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28,
+ 0x25, 0x64, 0x29, 0x20, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61,
+ 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09,
+ 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a,
+ 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x3b, 0x0a,
+ 0x09, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a,
+ 0x09, 0x09, 0x73, 0x20, 0x3d, 0x20, 0x4d, 0x49, 0x4e, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x6f, 0x66,
+ 0x28, 0x62, 0x75, 0x66, 0x29, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73,
+ 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x3d, 0x20, 0x73, 0x3b, 0x0a,
+ 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20,
+ 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c,
+ 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x3d,
+ 0x20, 0x4f, 0x5f, 0x52, 0x44, 0x57, 0x52, 0x7c, 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x3b,
+ 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74,
+ 0x3b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28,
+ 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69,
+ 0x7a, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x7c,
+ 0x3d, 0x20, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x64, 0x20,
+ 0x3d, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x2c, 0x20, 0x30, 0x36, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20,
+ 0x28, 0x66, 0x64, 0x20, 0x3d, 0x3d, 0x20, 0x2d, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70,
+ 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x6f, 0x70, 0x65, 0x6e,
+ 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22,
+ 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28,
+ 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x64, 0x2c,
+ 0x20, 0x26, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65,
+ 0x20, 0x3e, 0x20, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b,
+ 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64,
+ 0x69, 0x6e, 0x67, 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f,
+ 0x6d, 0x20, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74,
+ 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x23, 0x65,
+ 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69,
+ 0x6c, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x74,
+ 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x20, 0x65, 0x6c,
+ 0x73, 0x65, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3c, 0x20, 0x73, 0x74,
+ 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72,
+ 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67,
+ 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x25,
+ 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e,
+ 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09,
+ 0x09, 0x66, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69,
+ 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69,
+ 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20,
+ 0x30, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66,
+ 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53,
+ 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x66, 0x69,
+ 0x6c, 0x65, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x66, 0x75, 0x6c, 0x6c, 0x20, 0x66, 0x6f,
+ 0x72, 0x20, 0x25, 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b,
+ 0x0a, 0x09, 0x09, 0x65, 0x78, 0x69, 0x74, 0x28, 0x31, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09,
+ 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x20, 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09,
+ 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2b, 0x2b, 0x20, 0x25, 0x20, 0x31, 0x30,
+ 0x30, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e,
+ 0x74, 0x66, 0x28, 0x22, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76,
+ 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x69, 0x6e, 0x74,
+ 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x7a,
+ 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x0a, 0x7b,
+ 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x62,
+ 0x75, 0x66, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x6d, 0x65, 0x6d, 0x73,
+ 0x65, 0x74, 0x28, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x31, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x6f,
+ 0x66, 0x28, 0x62, 0x75, 0x66, 0x29, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28,
+ 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b,
+ 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d,
+ 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a,
+ 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58,
+ 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x31, 0x0a,
+ 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64,
+ 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20,
+ 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a,
+ 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b,
+ 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x2c,
+ 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20,
+ 0x28, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d,
+ 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20,
+ 0x21, 0x3d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65,
+ 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x28, 0x69,
+ 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29,
+ 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20,
+ 0x28, 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53,
+ 0x3b, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d,
+ 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b,
+ 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41,
+ 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61,
+ 0x64, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73,
+ 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25,
+ 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c,
+ 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x2c, 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09,
+ 0x72, 0x65, 0x61, 0x64, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66,
+ 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28,
+ 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69,
+ 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x3d, 0x30, 0x3b,
+ 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, 0x2b, 0x2b, 0x29,
+ 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b,
+ 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09,
+ 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c,
+ 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22,
+ 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x3a, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74,
+ 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20,
+ 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x29, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x20,
+ 0x30, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6d, 0x6b,
+ 0x64, 0x69, 0x72, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29,
+ 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61,
+ 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x6b, 0x64, 0x69, 0x72,
+ 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x30, 0x37, 0x30, 0x30, 0x29, 0x20, 0x21, 0x3d,
+ 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a,
+ 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x20,
+ 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61,
+ 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72,
+ 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x7d, 0x0a,
+ 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x6d, 0x64, 0x69, 0x72,
+ 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29,
+ 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x28, 0x66, 0x6e,
+ 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70,
+ 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x20, 0x25, 0x73, 0x20,
+ 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20,
+ 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c,
+ 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29,
+ 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f,
+ 0x5f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6f, 0x6c,
+ 0x64, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6e, 0x65, 0x77, 0x29, 0x0a, 0x7b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6f, 0x6c, 0x64, 0x29, 0x3b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6e, 0x65, 0x77, 0x29, 0x3b, 0x0a,
+ 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x6f, 0x6c, 0x64,
+ 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09,
+ 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x20,
+ 0x25, 0x73, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73,
+ 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6f, 0x6c, 0x64, 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x28,
+ 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75,
+ 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69,
+ 0x66, 0x20, 0x28, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x26,
+ 0x73, 0x74, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72,
+ 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x25,
+ 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x53, 0x5f, 0x49,
+ 0x53, 0x44, 0x49, 0x52, 0x28, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x29,
+ 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28,
+ 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x73, 0x69,
+ 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22,
+ 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73,
+ 0x20, 0x77, 0x72, 0x6f, 0x6e, 0x67, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x25, 0x64, 0x20, 0x25,
+ 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a,
+ 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28,
+ 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x35, 0x30, 0x30, 0x30, 0x2c, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28,
+ 0x35, 0x30, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x7d, 0x0a
+};
+#endif
+static unsigned char comprbuf[TESTDATA_LEN];
+static unsigned char decomprbuf[TESTDATA_LEN];
+
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, 
+		     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, 
+			     uint32_t *datalen, uint32_t *cdatalen);
+
+int init_module(void ) {
+	unsigned char comprtype;
+	uint32_t c, d;
+	int ret;
+
+	printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       testdata[0],testdata[1],testdata[2],testdata[3], 
+	       testdata[4],testdata[5],testdata[6],testdata[7], 
+	       testdata[8],testdata[9],testdata[10],testdata[11], 
+	       testdata[12],testdata[13],testdata[14],testdata[15]); 
+	d = TESTDATA_LEN;
+	c = TESTDATA_LEN;
+	comprtype = jffs2_compress(testdata, comprbuf, &d, &c);
+
+	printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n",
+	       comprtype, c, d);
+	printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], 
+	       comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], 
+	       comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], 
+	       comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); 
+
+	ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d);
+	printk("jffs2_decompress returned %d\n", ret);
+	printk("Decompressed data:  %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], 
+	       decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], 
+	       decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], 
+	       decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); 
+	if (memcmp(decomprbuf, testdata, d))
+		printk("Compression and decompression corrupted data\n");
+	else
+		printk("Compression good for %d bytes\n", d);
+	return 1;
+}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
new file mode 100644
index 0000000..757306f
--- /dev/null
+++ b/fs/jffs2/dir.c
@@ -0,0 +1,799 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: dir.c,v 1.84 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_i.h>
+#include <linux/jffs2_fs_sb.h>
+#include <linux/time.h>
+#include "nodelist.h"
+
+/* Urgh. Please tell me there's a nicer way of doing these. */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48)
+typedef int mknod_arg_t;
+#define NAMEI_COMPAT(x) ((void *)x)
+#else
+typedef dev_t mknod_arg_t;
+#define NAMEI_COMPAT(x) (x)
+#endif
+
+static int jffs2_readdir (struct file *, void *, filldir_t);
+
+static int jffs2_create (struct inode *,struct dentry *,int,
+			 struct nameidata *);
+static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
+				    struct nameidata *);
+static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
+static int jffs2_unlink (struct inode *,struct dentry *);
+static int jffs2_symlink (struct inode *,struct dentry *,const char *);
+static int jffs2_mkdir (struct inode *,struct dentry *,int);
+static int jffs2_rmdir (struct inode *,struct dentry *);
+static int jffs2_mknod (struct inode *,struct dentry *,int,mknod_arg_t);
+static int jffs2_rename (struct inode *, struct dentry *,
+                        struct inode *, struct dentry *);
+
+struct file_operations jffs2_dir_operations =
+{
+	.read =		generic_read_dir,
+	.readdir =	jffs2_readdir,
+	.ioctl =	jffs2_ioctl,
+	.fsync =	jffs2_fsync
+};
+
+
+struct inode_operations jffs2_dir_inode_operations =
+{
+	.create =	NAMEI_COMPAT(jffs2_create),
+	.lookup =	NAMEI_COMPAT(jffs2_lookup),
+	.link =		jffs2_link,
+	.unlink =	jffs2_unlink,
+	.symlink =	jffs2_symlink,
+	.mkdir =	jffs2_mkdir,
+	.rmdir =	jffs2_rmdir,
+	.mknod =	jffs2_mknod,
+	.rename =	jffs2_rename,
+	.setattr =	jffs2_setattr,
+};
+
+/***********************************************************************/
+
+
+/* We keep the dirent list sorted in increasing order of name hash,
+   and we use the same hash function as the dentries. Makes this 
+   nice and simple
+*/
+static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
+				   struct nameidata *nd)
+{
+	struct jffs2_inode_info *dir_f;
+	struct jffs2_sb_info *c;
+	struct jffs2_full_dirent *fd = NULL, *fd_list;
+	uint32_t ino = 0;
+	struct inode *inode = NULL;
+
+	D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	down(&dir_f->sem);
+
+	/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
+	for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
+		if (fd_list->nhash == target->d_name.hash && 
+		    (!fd || fd_list->version > fd->version) &&
+		    strlen(fd_list->name) == target->d_name.len &&
+		    !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
+			fd = fd_list;
+		}
+	}
+	if (fd)
+		ino = fd->ino;
+	up(&dir_f->sem);
+	if (ino) {
+		inode = iget(dir_i->i_sb, ino);
+		if (!inode) {
+			printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
+			return (ERR_PTR(-EIO));
+		}
+	}
+
+	d_add(target, inode);
+
+	return NULL;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_sb_info *c;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct jffs2_full_dirent *fd;
+	unsigned long offset, curofs;
+
+	D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_dentry->d_inode->i_ino));
+
+	f = JFFS2_INODE_INFO(inode);
+	c = JFFS2_SB_INFO(inode->i_sb);
+
+	offset = filp->f_pos;
+
+	if (offset == 0) {
+		D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino));
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+			goto out;
+		offset++;
+	}
+	if (offset == 1) {
+		unsigned long pino = parent_ino(filp->f_dentry);
+		D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino));
+		if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
+			goto out;
+		offset++;
+	}
+
+	curofs=1;
+	down(&f->sem);
+	for (fd = f->dents; fd; fd = fd->next) {
+
+		curofs++;
+		/* First loop: curofs = 2; offset = 2 */
+		if (curofs < offset) {
+			D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", 
+				  fd->name, fd->ino, fd->type, curofs, offset));
+			continue;
+		}
+		if (!fd->ino) {
+			D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name));
+			offset++;
+			continue;
+		}
+		D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type));
+		if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
+			break;
+		offset++;
+	}
+	up(&f->sem);
+ out:
+	filp->f_pos = offset;
+	return 0;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+			struct nameidata *nd)
+{
+	struct jffs2_raw_inode *ri;
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	int ret;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	D1(printk(KERN_DEBUG "jffs2_create()\n"));
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
+		jffs2_free_raw_inode(ri);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_file_inode_operations;
+	inode->i_fop = &jffs2_file_operations;
+	inode->i_mapping->a_ops = &jffs2_file_address_operations;
+	inode->i_mapping->nrpages = 0;
+
+	f = JFFS2_INODE_INFO(inode);
+	dir_f = JFFS2_INODE_INFO(dir_i);
+
+	ret = jffs2_do_create(c, dir_f, f, ri, 
+			      dentry->d_name.name, dentry->d_name.len);
+
+	if (ret) {
+		make_bad_inode(inode);
+		iput(inode);
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+
+	jffs2_free_raw_inode(ri);
+	d_instantiate(dentry, inode);
+
+	D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
+		  inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
+	return 0;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
+	struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
+	struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode);
+	int ret;
+
+	ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, 
+			       dentry->d_name.len, dead_f);
+	if (dead_f->inocache)
+		dentry->d_inode->i_nlink = dead_f->inocache->nlink;
+	return ret;
+}
+/***********************************************************************/
+
+
+static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb);
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+	struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
+	int ret;
+	uint8_t type;
+
+	/* Don't let people make hard links to bad inodes. */
+	if (!f->inocache)
+		return -EIO;
+
+	if (S_ISDIR(old_dentry->d_inode->i_mode))
+		return -EPERM;
+
+	/* XXX: This is ugly */
+	type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+	if (!type) type = DT_REG;
+
+	ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len);
+
+	if (!ret) {
+		down(&f->sem);
+		old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
+		up(&f->sem);
+		d_instantiate(dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+	}
+	return ret;
+}
+
+/***********************************************************************/
+
+static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	/* FIXME: If you care. We'd need to use frags for the target
+	   if it grows much more than this */
+	if (strlen(target) > 254)
+		return -EINVAL;
+
+	ri = jffs2_alloc_raw_inode();
+
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+	
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri) + strlen(target), &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_symlink_inode_operations;
+
+	f = JFFS2_INODE_INFO(inode);
+
+	inode->i_size = strlen(target);
+	ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->compr = JFFS2_COMPR_NONE;
+	ri->data_crc = cpu_to_je32(crc32(0, target, strlen(target)));
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, target, strlen(target), phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+	rd->type = DT_LNK;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+
+static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	mode |= S_IFDIR;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_dir_inode_operations;
+	inode->i_fop = &jffs2_dir_operations;
+	/* Directories get nlink 2 at start */
+	inode->i_nlink = 2;
+
+	f = JFFS2_INODE_INFO(inode);
+
+	ri->data_crc = cpu_to_je32(0);
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+	
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+	rd->type = DT_DIR;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+	dir_i->i_nlink++;
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+	struct jffs2_full_dirent *fd;
+	int ret;
+
+	for (fd = f->dents ; fd; fd = fd->next) {
+		if (fd->ino)
+			return -ENOTEMPTY;
+	}
+	ret = jffs2_unlink(dir_i, dentry);
+	if (!ret)
+		dir_i->i_nlink--;
+	return ret;
+}
+
+static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, mknod_arg_t rdev)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	jint16_t dev;
+	int devlen = 0;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+	
+	if (S_ISBLK(mode) || S_ISCHR(mode)) {
+		dev = cpu_to_je16(old_encode_dev(rdev));
+		devlen = sizeof(dev);
+	}
+	
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+	inode->i_op = &jffs2_file_inode_operations;
+	init_special_inode(inode, inode->i_mode, rdev);
+
+	f = JFFS2_INODE_INFO(inode);
+
+	ri->dsize = ri->csize = cpu_to_je32(devlen);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + devlen);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->compr = JFFS2_COMPR_NONE;
+	ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+
+	/* XXX: This is ugly. */
+	rd->type = (mode & S_IFMT) >> 12;
+
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+
+	return 0;
+}
+
+static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+                        struct inode *new_dir_i, struct dentry *new_dentry)
+{
+	int ret;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
+	struct jffs2_inode_info *victim_f = NULL;
+	uint8_t type;
+
+	/* The VFS will check for us and prevent trying to rename a 
+	 * file over a directory and vice versa, but if it's a directory,
+	 * the VFS can't check whether the victim is empty. The filesystem
+	 * needs to do that for itself.
+	 */
+	if (new_dentry->d_inode) {
+		victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
+		if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+			struct jffs2_full_dirent *fd;
+
+			down(&victim_f->sem);
+			for (fd = victim_f->dents; fd; fd = fd->next) {
+				if (fd->ino) {
+					up(&victim_f->sem);
+					return -ENOTEMPTY;
+				}
+			}
+			up(&victim_f->sem);
+		}
+	}
+
+	/* XXX: We probably ought to alloc enough space for
+	   both nodes at the same time. Writing the new link, 
+	   then getting -ENOSPC, is quite bad :)
+	*/
+
+	/* Make a hard link */
+	
+	/* XXX: This is ugly */
+	type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+	if (!type) type = DT_REG;
+
+	ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), 
+			    old_dentry->d_inode->i_ino, type,
+			    new_dentry->d_name.name, new_dentry->d_name.len);
+
+	if (ret)
+		return ret;
+
+	if (victim_f) {
+		/* There was a victim. Kill it off nicely */
+		new_dentry->d_inode->i_nlink--;
+		/* Don't oops if the victim was a dirent pointing to an
+		   inode which didn't exist. */
+		if (victim_f->inocache) {
+			down(&victim_f->sem);
+			victim_f->inocache->nlink--;
+			up(&victim_f->sem);
+		}
+	}
+
+	/* If it was a directory we moved, and there was no victim, 
+	   increase i_nlink on its new parent */
+	if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+		new_dir_i->i_nlink++;
+
+	/* Unlink the original */
+	ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), 
+		      old_dentry->d_name.name, old_dentry->d_name.len, NULL);
+
+	/* We don't touch inode->i_nlink */
+
+	if (ret) {
+		/* Oh shit. We really ought to make a single node which can do both atomically */
+		struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+		down(&f->sem);
+		old_dentry->d_inode->i_nlink++;
+		if (f->inocache)
+			f->inocache->nlink++;
+		up(&f->sem);
+
+		printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
+		/* Might as well let the VFS know */
+		d_instantiate(new_dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+		return ret;
+	}
+
+	if (S_ISDIR(old_dentry->d_inode->i_mode))
+		old_dir_i->i_nlink--;
+
+	return 0;
+}
+
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
new file mode 100644
index 0000000..41451e8
--- /dev/null
+++ b/fs/jffs2/erase.c
@@ -0,0 +1,442 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: erase.c,v 1.66 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include "nodelist.h"
+
+struct erase_priv_struct {
+	struct jffs2_eraseblock *jeb;
+	struct jffs2_sb_info *c;
+};
+      
+#ifndef __ECOS
+static void jffs2_erase_callback(struct erase_info *);
+#endif
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+
+static void jffs2_erase_block(struct jffs2_sb_info *c,
+			      struct jffs2_eraseblock *jeb)
+{
+	int ret;
+	uint32_t bad_offset;
+#ifdef __ECOS
+       ret = jffs2_flash_erase(c, jeb);
+       if (!ret) {
+               jffs2_erase_succeeded(c, jeb);
+               return;
+       }
+       bad_offset = jeb->offset;
+#else /* Linux */
+	struct erase_info *instr;
+
+	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
+	if (!instr) {
+		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+		spin_lock(&c->erase_completion_lock);
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->erasing_size -= c->sector_size;
+		c->dirty_size += c->sector_size;
+		jeb->dirty_size = c->sector_size;
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	memset(instr, 0, sizeof(*instr));
+
+	instr->mtd = c->mtd;
+	instr->addr = jeb->offset;
+	instr->len = c->sector_size;
+	instr->callback = jffs2_erase_callback;
+	instr->priv = (unsigned long)(&instr[1]);
+	instr->fail_addr = 0xffffffff;
+	
+	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
+	((struct erase_priv_struct *)instr->priv)->c = c;
+
+	ret = c->mtd->erase(c->mtd, instr);
+	if (!ret)
+		return;
+
+	bad_offset = instr->fail_addr;
+	kfree(instr);
+#endif /* __ECOS */
+
+	if (ret == -ENOMEM || ret == -EAGAIN) {
+		/* Erase failed immediately. Refile it on the list */
+		D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
+		spin_lock(&c->erase_completion_lock);
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->erasing_size -= c->sector_size;
+		c->dirty_size += c->sector_size;
+		jeb->dirty_size = c->sector_size;
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	if (ret == -EROFS) 
+		printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
+	else
+		printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
+
+	jffs2_erase_failed(c, jeb, bad_offset);
+}
+
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
+{
+	struct jffs2_eraseblock *jeb;
+
+	down(&c->erase_free_sem);
+
+	spin_lock(&c->erase_completion_lock);
+
+	while (!list_empty(&c->erase_complete_list) ||
+	       !list_empty(&c->erase_pending_list)) {
+
+		if (!list_empty(&c->erase_complete_list)) {
+			jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
+			list_del(&jeb->list);
+			spin_unlock(&c->erase_completion_lock);
+			jffs2_mark_erased_block(c, jeb);
+
+			if (!--count) {
+				D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
+				goto done;
+			}
+
+		} else if (!list_empty(&c->erase_pending_list)) {
+			jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
+			D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+			list_del(&jeb->list);
+			c->erasing_size += c->sector_size;
+			c->wasted_size -= jeb->wasted_size;
+			c->free_size -= jeb->free_size;
+			c->used_size -= jeb->used_size;
+			c->dirty_size -= jeb->dirty_size;
+			jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
+			jffs2_free_all_node_refs(c, jeb);
+			list_add(&jeb->list, &c->erasing_list);
+			spin_unlock(&c->erase_completion_lock);
+
+			jffs2_erase_block(c, jeb);
+
+		} else {
+			BUG();
+		}
+
+		/* Be nice */
+		cond_resched();
+		spin_lock(&c->erase_completion_lock);
+	}
+
+	spin_unlock(&c->erase_completion_lock);
+ done:
+	D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+
+	up(&c->erase_free_sem);
+}
+
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
+	spin_lock(&c->erase_completion_lock);
+	list_del(&jeb->list);
+	list_add_tail(&jeb->list, &c->erase_complete_list);
+	spin_unlock(&c->erase_completion_lock);
+	/* Ensure that kupdated calls us again to mark them clean */
+	jffs2_erase_pending_trigger(c);
+}
+
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+	/* For NAND, if the failure did not occur at the device level for a
+	   specific physical page, don't bother updating the bad block table. */
+	if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) {
+		/* We had a device-level failure to erase.  Let's see if we've
+		   failed too many times. */
+		if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
+			/* We'd like to give this block another try. */
+			spin_lock(&c->erase_completion_lock);
+			list_del(&jeb->list);
+			list_add(&jeb->list, &c->erase_pending_list);
+			c->erasing_size -= c->sector_size;
+			c->dirty_size += c->sector_size;
+			jeb->dirty_size = c->sector_size;
+			spin_unlock(&c->erase_completion_lock);
+			return;
+		}
+	}
+
+	spin_lock(&c->erase_completion_lock);
+	c->erasing_size -= c->sector_size;
+	c->bad_size += c->sector_size;
+	list_del(&jeb->list);
+	list_add(&jeb->list, &c->bad_list);
+	c->nr_erasing_blocks--;
+	spin_unlock(&c->erase_completion_lock);
+	wake_up(&c->erase_wait);
+}	 
+
+#ifndef __ECOS
+static void jffs2_erase_callback(struct erase_info *instr)
+{
+	struct erase_priv_struct *priv = (void *)instr->priv;
+
+	if(instr->state != MTD_ERASE_DONE) {
+		printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
+		jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
+	} else {
+		jffs2_erase_succeeded(priv->c, priv->jeb);
+	}	
+	kfree(instr);
+}
+#endif /* !__ECOS */
+
+/* Hmmm. Maybe we should accept the extra space it takes and make
+   this a standard doubly-linked list? */
+static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
+			struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_inode_cache *ic = NULL;
+	struct jffs2_raw_node_ref **prev;
+
+	prev = &ref->next_in_ino;
+
+	/* Walk the inode's list once, removing any nodes from this eraseblock */
+	while (1) {
+		if (!(*prev)->next_in_ino) {
+			/* We're looking at the jffs2_inode_cache, which is 
+			   at the end of the linked list. Stash it and continue
+			   from the beginning of the list */
+			ic = (struct jffs2_inode_cache *)(*prev);
+			prev = &ic->nodes;
+			continue;
+		} 
+
+		if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
+			/* It's in the block we're erasing */
+			struct jffs2_raw_node_ref *this;
+
+			this = *prev;
+			*prev = this->next_in_ino;
+			this->next_in_ino = NULL;
+
+			if (this == ref)
+				break;
+
+			continue;
+		}
+		/* Not to be deleted. Skip */
+		prev = &((*prev)->next_in_ino);
+	}
+
+	/* PARANOIA */
+	if (!ic) {
+		printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
+		return;
+	}
+
+	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
+		  jeb->offset, jeb->offset + c->sector_size, ic->ino));
+
+	D2({
+		int i=0;
+		struct jffs2_raw_node_ref *this;
+		printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
+
+		this = ic->nodes;
+	   
+		while(this) {
+			printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
+			if (++i == 5) {
+				printk("\n" KERN_DEBUG);
+				i=0;
+			}
+			this = this->next_in_ino;
+		}
+		printk("\n");
+	});
+
+	if (ic->nodes == (void *)ic) {
+		D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
+		jffs2_del_ino_cache(c, ic);
+		jffs2_free_inode_cache(ic);
+	}
+}
+
+static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *ref;
+	D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
+	while(jeb->first_node) {
+		ref = jeb->first_node;
+		jeb->first_node = ref->next_phys;
+		
+		/* Remove from the inode-list */
+		if (ref->next_in_ino)
+			jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
+		/* else it was a non-inode node or already removed, so don't bother */
+
+		jffs2_free_raw_node_ref(ref);
+	}
+	jeb->last_node = NULL;
+}
+
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *marker_ref = NULL;
+	unsigned char *ebuf;
+	size_t retlen;
+	int ret;
+	uint32_t bad_offset;
+
+	if (!jffs2_cleanmarker_oob(c)) {
+		marker_ref = jffs2_alloc_raw_node_ref();
+		if (!marker_ref) {
+			printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
+			/* Stick it back on the list from whence it came and come back later */
+			jffs2_erase_pending_trigger(c);
+			spin_lock(&c->erase_completion_lock);
+			list_add(&jeb->list, &c->erase_complete_list);
+			spin_unlock(&c->erase_completion_lock);
+			return;
+		}
+	}
+	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!ebuf) {
+		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
+	} else {
+		uint32_t ofs = jeb->offset;
+
+		D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+		while(ofs < jeb->offset + c->sector_size) {
+			uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
+			int i;
+
+			bad_offset = ofs;
+
+			ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
+			if (ret) {
+				printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+				goto bad;
+			}
+			if (retlen != readlen) {
+				printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+				goto bad;
+			}
+			for (i=0; i<readlen; i += sizeof(unsigned long)) {
+				/* It's OK. We know it's properly aligned */
+				unsigned long datum = *(unsigned long *)(&ebuf[i]);
+				if (datum + 1) {
+					bad_offset += i;
+					printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
+				bad: 
+					if (!jffs2_cleanmarker_oob(c))
+						jffs2_free_raw_node_ref(marker_ref);
+					kfree(ebuf);
+				bad2:
+					spin_lock(&c->erase_completion_lock);
+					/* Stick it on a list (any list) so
+					   erase_failed can take it right off
+					   again.  Silly, but shouldn't happen
+					   often. */
+					list_add(&jeb->list, &c->erasing_list);
+					spin_unlock(&c->erase_completion_lock);
+					jffs2_erase_failed(c, jeb, bad_offset);
+					return;
+				}
+			}
+			ofs += readlen;
+			cond_resched();
+		}
+		kfree(ebuf);
+	}
+
+	bad_offset = jeb->offset;
+
+	/* Write the erase complete marker */	
+	D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
+	if (jffs2_cleanmarker_oob(c)) {
+
+		if (jffs2_write_nand_cleanmarker(c, jeb))
+			goto bad2;
+			
+		jeb->first_node = jeb->last_node = NULL;
+
+		jeb->free_size = c->sector_size;
+		jeb->used_size = 0;
+		jeb->dirty_size = 0;
+		jeb->wasted_size = 0;
+	} else {
+		struct kvec vecs[1];
+		struct jffs2_unknown_node marker = {
+			.magic =	cpu_to_je16(JFFS2_MAGIC_BITMASK),
+			.nodetype =	cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+			.totlen =	cpu_to_je32(c->cleanmarker_size)
+		};
+
+		marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
+
+		vecs[0].iov_base = (unsigned char *) &marker;
+		vecs[0].iov_len = sizeof(marker);
+		ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
+		
+		if (ret) {
+			printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
+			       jeb->offset, ret);
+			goto bad2;
+		}
+		if (retlen != sizeof(marker)) {
+			printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+			       jeb->offset, sizeof(marker), retlen);
+			goto bad2;
+		}
+
+		marker_ref->next_in_ino = NULL;
+		marker_ref->next_phys = NULL;
+		marker_ref->flash_offset = jeb->offset | REF_NORMAL;
+		marker_ref->__totlen = c->cleanmarker_size;
+			
+		jeb->first_node = jeb->last_node = marker_ref;
+			
+		jeb->free_size = c->sector_size - c->cleanmarker_size;
+		jeb->used_size = c->cleanmarker_size;
+		jeb->dirty_size = 0;
+		jeb->wasted_size = 0;
+	}
+
+	spin_lock(&c->erase_completion_lock);
+	c->erasing_size -= c->sector_size;
+	c->free_size += jeb->free_size;
+	c->used_size += jeb->used_size;
+
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	list_add_tail(&jeb->list, &c->free_list);
+	c->nr_erasing_blocks--;
+	c->nr_free_blocks++;
+	spin_unlock(&c->erase_completion_lock);
+	wake_up(&c->erase_wait);
+}
+
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
new file mode 100644
index 0000000..0c607c1
--- /dev/null
+++ b/fs/jffs2/file.c
@@ -0,0 +1,290 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: file.c,v 1.99 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include "nodelist.h"
+
+extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak));
+
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+			       unsigned start, unsigned end);
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+				unsigned start, unsigned end);
+static int jffs2_readpage (struct file *filp, struct page *pg);
+
+int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+
+	/* Trigger GC to flush any pending writes for this inode */
+	jffs2_flush_wbuf_gc(c, inode->i_ino);
+			
+	return 0;	
+}
+
+struct file_operations jffs2_file_operations =
+{
+	.llseek =	generic_file_llseek,
+	.open =		generic_file_open,
+	.read =		generic_file_read,
+	.write =	generic_file_write,
+	.ioctl =	jffs2_ioctl,
+	.mmap =		generic_file_readonly_mmap,
+	.fsync =	jffs2_fsync,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,29)
+	.sendfile =	generic_file_sendfile
+#endif
+};
+
+/* jffs2_file_inode_operations */
+
+struct inode_operations jffs2_file_inode_operations =
+{
+	.setattr =	jffs2_setattr
+};
+
+struct address_space_operations jffs2_file_address_operations =
+{
+	.readpage =	jffs2_readpage,
+	.prepare_write =jffs2_prepare_write,
+	.commit_write =	jffs2_commit_write
+};
+
+static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	unsigned char *pg_buf;
+	int ret;
+
+	D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
+
+	if (!PageLocked(pg))
+                PAGE_BUG(pg);
+
+	pg_buf = kmap(pg);
+	/* FIXME: Can kmap fail? */
+
+	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+	if (ret) {
+		ClearPageUptodate(pg);
+		SetPageError(pg);
+	} else {
+		SetPageUptodate(pg);
+		ClearPageError(pg);
+	}
+
+	flush_dcache_page(pg);
+	kunmap(pg);
+
+	D2(printk(KERN_DEBUG "readpage finished\n"));
+	return 0;
+}
+
+int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
+{
+	int ret = jffs2_do_readpage_nolock(inode, pg);
+	unlock_page(pg);
+	return ret;
+}
+
+
+static int jffs2_readpage (struct file *filp, struct page *pg)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
+	int ret;
+	
+	down(&f->sem);
+	ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
+	up(&f->sem);
+	return ret;
+}
+
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+				unsigned start, unsigned end)
+{
+	struct inode *inode = pg->mapping->host;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT;
+	int ret = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_prepare_write()\n"));
+
+	if (pageofs > inode->i_size) {
+		/* Make new hole frag from old EOF to new page */
+		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+		struct jffs2_raw_inode ri;
+		struct jffs2_full_dnode *fn;
+		uint32_t phys_ofs, alloc_len;
+		
+		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+			  (unsigned int)inode->i_size, pageofs));
+
+		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
+		if (ret)
+			return ret;
+
+		down(&f->sem);
+		memset(&ri, 0, sizeof(ri));
+
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri));
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.mode = cpu_to_jemode(inode->i_mode);
+		ri.uid = cpu_to_je16(inode->i_uid);
+		ri.gid = cpu_to_je16(inode->i_gid);
+		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
+		ri.offset = cpu_to_je32(inode->i_size);
+		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+		ri.csize = cpu_to_je32(0);
+		ri.compr = JFFS2_COMPR_ZERO;
+		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+		ri.data_crc = cpu_to_je32(0);
+		
+		fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+		if (IS_ERR(fn)) {
+			ret = PTR_ERR(fn);
+			jffs2_complete_reservation(c);
+			up(&f->sem);
+			return ret;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		if (ret) {
+			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
+			jffs2_mark_node_obsolete(c, fn->raw);
+			jffs2_free_full_dnode(fn);
+			jffs2_complete_reservation(c);
+			up(&f->sem);
+			return ret;
+		}
+		jffs2_complete_reservation(c);
+		inode->i_size = pageofs;
+		up(&f->sem);
+	}
+	
+	/* Read in the page if it wasn't already present, unless it's a whole page */
+	if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
+		down(&f->sem);
+		ret = jffs2_do_readpage_nolock(inode, pg);
+		up(&f->sem);
+	}
+	D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags));
+	return ret;
+}
+
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+			       unsigned start, unsigned end)
+{
+	/* Actually commit the write from the page cache page we're looking at.
+	 * For now, we write the full page out each time. It sucks, but it's simple
+	 */
+	struct inode *inode = pg->mapping->host;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_raw_inode *ri;
+	unsigned aligned_start = start & ~3;
+	int ret = 0;
+	uint32_t writtenlen = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
+		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
+
+	if (!start && end == PAGE_CACHE_SIZE) {
+		/* We need to avoid deadlock with page_cache_read() in
+		   jffs2_garbage_collect_pass(). So we have to mark the
+		   page up to date, to prevent page_cache_read() from 
+		   trying to re-lock it. */
+		SetPageUptodate(pg);
+	}
+
+	ri = jffs2_alloc_raw_inode();
+
+	if (!ri) {
+		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
+		return -ENOMEM;
+	}
+
+	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
+	ri->ino = cpu_to_je32(inode->i_ino);
+	ri->mode = cpu_to_jemode(inode->i_mode);
+	ri->uid = cpu_to_je16(inode->i_uid);
+	ri->gid = cpu_to_je16(inode->i_gid);
+	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
+	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
+
+	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
+	   hurt to do it again. The alternative is ifdefs, which are ugly. */
+	kmap(pg);
+
+	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
+				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+				      end - aligned_start, &writtenlen);
+
+	kunmap(pg);
+
+	if (ret) {
+		/* There was an error writing. */
+		SetPageError(pg);
+	}
+	
+	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
+	if (writtenlen < (start&3))
+		writtenlen = 0;
+	else
+		writtenlen -= (start&3);
+
+	if (writtenlen) {
+		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
+			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
+			inode->i_blocks = (inode->i_size + 511) >> 9;
+			
+			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
+		}
+	}
+
+	jffs2_free_raw_inode(ri);
+
+	if (start+writtenlen < end) {
+		/* generic_file_write has written more to the page cache than we've
+		   actually written to the medium. Mark the page !Uptodate so that 
+		   it gets reread */
+		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
+		SetPageError(pg);
+		ClearPageUptodate(pg);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret));
+	return writtenlen?writtenlen:ret;
+}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
new file mode 100644
index 0000000..30ab233
--- /dev/null
+++ b/fs/jffs2/fs.c
@@ -0,0 +1,677 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: fs.c,v 1.51 2004/11/28 12:19:37 dedekind Exp $
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/vfs.h>
+#include <linux/crc32.h>
+#include "nodelist.h"
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c);
+
+static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
+{
+	struct jffs2_full_dnode *old_metadata, *new_metadata;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_raw_inode *ri;
+	unsigned short dev;
+	unsigned char *mdata = NULL;
+	int mdatalen = 0;
+	unsigned int ivalid;
+	uint32_t phys_ofs, alloclen;
+	int ret;
+	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+	ret = inode_change_ok(inode, iattr);
+	if (ret) 
+		return ret;
+
+	/* Special cases - we don't want more than one data node
+	   for these types on the medium at any time. So setattr
+	   must read the original data associated with the node
+	   (i.e. the device numbers or the target name) and write
+	   it out again with the appropriate data attached */
+	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+		/* For these, we don't actually need to read the old node */
+		dev = old_encode_dev(inode->i_rdev);
+		mdata = (char *)&dev;
+		mdatalen = sizeof(dev);
+		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
+	} else if (S_ISLNK(inode->i_mode)) {
+		mdatalen = f->metadata->size;
+		mdata = kmalloc(f->metadata->size, GFP_USER);
+		if (!mdata)
+			return -ENOMEM;
+		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
+		if (ret) {
+			kfree(mdata);
+			return ret;
+		}
+		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
+	}
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri) {
+		if (S_ISLNK(inode->i_mode))
+			kfree(mdata);
+		return -ENOMEM;
+	}
+		
+	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		if (S_ISLNK(inode->i_mode & S_IFMT))
+			 kfree(mdata);
+		return ret;
+	}
+	down(&f->sem);
+	ivalid = iattr->ia_valid;
+	
+	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->ino = cpu_to_je32(inode->i_ino);
+	ri->version = cpu_to_je32(++f->highest_version);
+
+	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
+	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
+
+	if (ivalid & ATTR_MODE)
+		if (iattr->ia_mode & S_ISGID &&
+		    !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
+			ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
+		else 
+			ri->mode = cpu_to_jemode(iattr->ia_mode);
+	else
+		ri->mode = cpu_to_jemode(inode->i_mode);
+
+
+	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
+	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
+	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
+	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
+
+	ri->offset = cpu_to_je32(0);
+	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
+	ri->compr = JFFS2_COMPR_NONE;
+	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+		/* It's an extension. Make it a hole node */
+		ri->compr = JFFS2_COMPR_ZERO;
+		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
+		ri->offset = cpu_to_je32(inode->i_size);
+	}
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	if (mdatalen)
+		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
+	else
+		ri->data_crc = cpu_to_je32(0);
+
+	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
+	if (S_ISLNK(inode->i_mode))
+		kfree(mdata);
+	
+	if (IS_ERR(new_metadata)) {
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_inode(ri);
+		up(&f->sem);
+		return PTR_ERR(new_metadata);
+	}
+	/* It worked. Update the inode */
+	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+	inode->i_mode = jemode_to_cpu(ri->mode);
+	inode->i_uid = je16_to_cpu(ri->uid);
+	inode->i_gid = je16_to_cpu(ri->gid);
+
+
+	old_metadata = f->metadata;
+
+	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+		jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size);
+
+	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
+		inode->i_size = iattr->ia_size;
+		f->metadata = NULL;
+	} else {
+		f->metadata = new_metadata;
+	}
+	if (old_metadata) {
+		jffs2_mark_node_obsolete(c, old_metadata->raw);
+		jffs2_free_full_dnode(old_metadata);
+	}
+	jffs2_free_raw_inode(ri);
+
+	up(&f->sem);
+	jffs2_complete_reservation(c);
+
+	/* We have to do the vmtruncate() without f->sem held, since
+	   some pages may be locked and waiting for it in readpage(). 
+	   We are protected from a simultaneous write() extending i_size
+	   back past iattr->ia_size, because do_truncate() holds the
+	   generic inode semaphore. */
+	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+		vmtruncate(inode, iattr->ia_size);
+
+	return 0;
+}
+
+int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	return jffs2_do_setattr(dentry->d_inode, iattr);
+}
+
+int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	unsigned long avail;
+
+	buf->f_type = JFFS2_SUPER_MAGIC;
+	buf->f_bsize = 1 << PAGE_SHIFT;
+	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
+	buf->f_files = 0;
+	buf->f_ffree = 0;
+	buf->f_namelen = JFFS2_MAX_NAME_LEN;
+
+	spin_lock(&c->erase_completion_lock);
+
+	avail = c->dirty_size + c->free_size;
+	if (avail > c->sector_size * c->resv_blocks_write)
+		avail -= c->sector_size * c->resv_blocks_write;
+	else
+		avail = 0;
+
+	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
+
+	D2(jffs2_dump_block_lists(c));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	return 0;
+}
+
+
+void jffs2_clear_inode (struct inode *inode)
+{
+	/* We can forget about this inode for now - drop all 
+	 *  the nodelists associated with it, etc.
+	 */
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	
+	D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+
+	jffs2_do_clear_inode(c, f);
+}
+
+void jffs2_read_inode (struct inode *inode)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_sb_info *c;
+	struct jffs2_raw_inode latest_node;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+
+	f = JFFS2_INODE_INFO(inode);
+	c = JFFS2_SB_INFO(inode->i_sb);
+
+	jffs2_init_inode_info(f);
+	
+	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
+
+	if (ret) {
+		make_bad_inode(inode);
+		up(&f->sem);
+		return;
+	}
+	inode->i_mode = jemode_to_cpu(latest_node.mode);
+	inode->i_uid = je16_to_cpu(latest_node.uid);
+	inode->i_gid = je16_to_cpu(latest_node.gid);
+	inode->i_size = je32_to_cpu(latest_node.isize);
+	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
+	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
+
+	inode->i_nlink = f->inocache->nlink;
+
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = (inode->i_size + 511) >> 9;
+	
+	switch (inode->i_mode & S_IFMT) {
+		jint16_t rdev;
+
+	case S_IFLNK:
+		inode->i_op = &jffs2_symlink_inode_operations;
+		break;
+		
+	case S_IFDIR:
+	{
+		struct jffs2_full_dirent *fd;
+
+		for (fd=f->dents; fd; fd = fd->next) {
+			if (fd->type == DT_DIR && fd->ino)
+				inode->i_nlink++;
+		}
+		/* and '..' */
+		inode->i_nlink++;
+		/* Root dir gets i_nlink 3 for some reason */
+		if (inode->i_ino == 1)
+			inode->i_nlink++;
+
+		inode->i_op = &jffs2_dir_inode_operations;
+		inode->i_fop = &jffs2_dir_operations;
+		break;
+	}
+	case S_IFREG:
+		inode->i_op = &jffs2_file_inode_operations;
+		inode->i_fop = &jffs2_file_operations;
+		inode->i_mapping->a_ops = &jffs2_file_address_operations;
+		inode->i_mapping->nrpages = 0;
+		break;
+
+	case S_IFBLK:
+	case S_IFCHR:
+		/* Read the device numbers from the media */
+		D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
+		if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
+			/* Eep */
+			printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			make_bad_inode(inode);
+			return;
+		}			
+
+	case S_IFSOCK:
+	case S_IFIFO:
+		inode->i_op = &jffs2_file_inode_operations;
+		init_special_inode(inode, inode->i_mode,
+				   old_decode_dev((je16_to_cpu(rdev))));
+		break;
+
+	default:
+		printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
+	}
+
+	up(&f->sem);
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+}
+
+void jffs2_dirty_inode(struct inode *inode)
+{
+	struct iattr iattr;
+
+	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
+		D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+		return;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
+
+	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
+	iattr.ia_mode = inode->i_mode;
+	iattr.ia_uid = inode->i_uid;
+	iattr.ia_gid = inode->i_gid;
+	iattr.ia_atime = inode->i_atime;
+	iattr.ia_mtime = inode->i_mtime;
+	iattr.ia_ctime = inode->i_ctime;
+
+	jffs2_do_setattr(inode, &iattr);
+}
+
+int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
+		return -EROFS;
+
+	/* We stop if it was running, then restart if it needs to.
+	   This also catches the case where it was stopped and this
+	   is just a remount to restart it.
+	   Flush the writebuffer, if neccecary, else we loose it */
+	if (!(sb->s_flags & MS_RDONLY)) {
+		jffs2_stop_garbage_collect_thread(c);
+		down(&c->alloc_sem);
+		jffs2_flush_wbuf_pad(c);
+		up(&c->alloc_sem);
+	}	
+
+	if (!(*flags & MS_RDONLY))
+		jffs2_start_garbage_collect_thread(c);
+	
+	*flags |= MS_NOATIME;
+
+	return 0;
+}
+
+void jffs2_write_super (struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	sb->s_dirt = 0;
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
+	jffs2_garbage_collect_trigger(c);
+	jffs2_erase_pending_blocks(c, 0);
+	jffs2_flush_wbuf_gc(c, 0);
+}
+
+
+/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
+   fill in the raw_inode while you're at it. */
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
+{
+	struct inode *inode;
+	struct super_block *sb = dir_i->i_sb;
+	struct jffs2_sb_info *c;
+	struct jffs2_inode_info *f;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
+
+	c = JFFS2_SB_INFO(sb);
+	
+	inode = new_inode(sb);
+	
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	f = JFFS2_INODE_INFO(inode);
+	jffs2_init_inode_info(f);
+
+	memset(ri, 0, sizeof(*ri));
+	/* Set OS-specific defaults for new inodes */
+	ri->uid = cpu_to_je16(current->fsuid);
+
+	if (dir_i->i_mode & S_ISGID) {
+		ri->gid = cpu_to_je16(dir_i->i_gid);
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else {
+		ri->gid = cpu_to_je16(current->fsgid);
+	}
+	ri->mode =  cpu_to_jemode(mode);
+	ret = jffs2_do_new_inode (c, f, mode, ri);
+	if (ret) {
+		make_bad_inode(inode);
+		iput(inode);
+		return ERR_PTR(ret);
+	}
+	inode->i_nlink = 1;
+	inode->i_ino = je32_to_cpu(ri->ino);
+	inode->i_mode = jemode_to_cpu(ri->mode);
+	inode->i_gid = je16_to_cpu(ri->gid);
+	inode->i_uid = je16_to_cpu(ri->uid);
+	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = 0;
+	inode->i_size = 0;
+
+	insert_inode_hash(inode);
+
+	return inode;
+}
+
+
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct jffs2_sb_info *c;
+	struct inode *root_i;
+	int ret;
+	size_t blocks;
+
+	c = JFFS2_SB_INFO(sb);
+
+#ifndef CONFIG_JFFS2_FS_NAND
+	if (c->mtd->type == MTD_NANDFLASH) {
+		printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
+		return -EINVAL;
+	}
+#endif
+
+	c->flash_size = c->mtd->size;
+
+	/* 
+	 * Check, if we have to concatenate physical blocks to larger virtual blocks
+	 * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation)
+	 */
+	c->sector_size = c->mtd->erasesize; 
+	blocks = c->flash_size / c->sector_size;
+	if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) {
+		while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) {
+			blocks >>= 1;
+			c->sector_size <<= 1;
+		}	
+	}
+
+	/*
+	 * Size alignment check
+	 */
+	if ((c->sector_size * blocks) != c->flash_size) {
+		c->flash_size = c->sector_size * blocks;		
+		printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
+			c->flash_size / 1024);
+	}
+
+	if (c->sector_size != c->mtd->erasesize)
+		printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", 
+			c->mtd->erasesize / 1024, c->sector_size / 1024);
+
+	if (c->flash_size < 5*c->sector_size) {
+		printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
+		return -EINVAL;
+	}
+
+	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
+	/* Joern -- stick alignment for weird 8-byte-page flash here */
+
+	/* NAND (or other bizarre) flash... do setup accordingly */
+	ret = jffs2_flash_setup(c);
+	if (ret)
+		return ret;
+
+	c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
+	if (!c->inocache_list) {
+		ret = -ENOMEM;
+		goto out_wbuf;
+	}
+	memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *));
+
+	if ((ret = jffs2_do_mount_fs(c)))
+		goto out_inohash;
+
+	ret = -EINVAL;
+
+	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
+	root_i = iget(sb, 1);
+	if (is_bad_inode(root_i)) {
+		D1(printk(KERN_WARNING "get root inode failed\n"));
+		goto out_nodes;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
+	sb->s_root = d_alloc_root(root_i);
+	if (!sb->s_root)
+		goto out_root_i;
+
+#if LINUX_VERSION_CODE >= 0x20403
+	sb->s_maxbytes = 0xFFFFFFFF;
+#endif
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = JFFS2_SUPER_MAGIC;
+	if (!(sb->s_flags & MS_RDONLY))
+		jffs2_start_garbage_collect_thread(c);
+	return 0;
+
+ out_root_i:
+	iput(root_i);
+ out_nodes:
+	jffs2_free_ino_caches(c);
+	jffs2_free_raw_node_refs(c);
+	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		vfree(c->blocks);
+	else
+		kfree(c->blocks);
+ out_inohash:
+	kfree(c->inocache_list);
+ out_wbuf:
+	jffs2_flash_cleanup(c);
+
+	return ret;
+}
+
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+				   struct jffs2_inode_info *f)
+{
+	iput(OFNI_EDONI_2SFFJ(f));
+}
+
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+						     int inum, int nlink)
+{
+	struct inode *inode;
+	struct jffs2_inode_cache *ic;
+	if (!nlink) {
+		/* The inode has zero nlink but its nodes weren't yet marked
+		   obsolete. This has to be because we're still waiting for 
+		   the final (close() and) iput() to happen.
+
+		   There's a possibility that the final iput() could have 
+		   happened while we were contemplating. In order to ensure
+		   that we don't cause a new read_inode() (which would fail)
+		   for the inode in question, we use ilookup() in this case
+		   instead of iget().
+
+		   The nlink can't _become_ zero at this point because we're 
+		   holding the alloc_sem, and jffs2_do_unlink() would also
+		   need that while decrementing nlink on any inode.
+		*/
+		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
+		if (!inode) {
+			D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
+				  inum));
+
+			spin_lock(&c->inocache_lock);
+			ic = jffs2_get_ino_cache(c, inum);
+			if (!ic) {
+				D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
+				spin_unlock(&c->inocache_lock);
+				return NULL;
+			}
+			if (ic->state != INO_STATE_CHECKEDABSENT) {
+				/* Wait for progress. Don't just loop */
+				D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
+					  ic->ino, ic->state));
+				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			} else {
+				spin_unlock(&c->inocache_lock);
+			}
+
+			return NULL;
+		}
+	} else {
+		/* Inode has links to it still; they're not going away because
+		   jffs2_do_unlink() would need the alloc_sem and we have it.
+		   Just iget() it, and if read_inode() is necessary that's OK.
+		*/
+		inode = iget(OFNI_BS_2SFFJ(c), inum);
+		if (!inode)
+			return ERR_PTR(-ENOMEM);
+	}
+	if (is_bad_inode(inode)) {
+		printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
+		       inum, nlink);
+		/* NB. This will happen again. We need to do something appropriate here. */
+		iput(inode);
+		return ERR_PTR(-EIO);
+	}
+
+	return JFFS2_INODE_INFO(inode);
+}
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 
+				   struct jffs2_inode_info *f, 
+				   unsigned long offset,
+				   unsigned long *priv)
+{
+	struct inode *inode = OFNI_EDONI_2SFFJ(f);
+	struct page *pg;
+
+	pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 
+			     (void *)jffs2_do_readpage_unlock, inode);
+	if (IS_ERR(pg))
+		return (void *)pg;
+	
+	*priv = (unsigned long)pg;
+	return kmap(pg);
+}
+
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+			   unsigned char *ptr,
+			   unsigned long *priv)
+{
+	struct page *pg = (void *)*priv;
+
+	kunmap(pg);
+	page_cache_release(pg);
+}
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c) {
+	int ret = 0;
+	
+	if (jffs2_cleanmarker_oob(c)) {
+		/* NAND flash... do setup accordingly */
+		ret = jffs2_nand_flash_setup(c);
+		if (ret)
+			return ret;
+	}
+
+	/* add setups for other bizarre flashes here... */
+	if (jffs2_nor_ecc(c)) {
+		ret = jffs2_nor_ecc_flash_setup(c);
+		if (ret)
+			return ret;
+	}
+	return ret;
+}
+
+void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
+
+	if (jffs2_cleanmarker_oob(c)) {
+		jffs2_nand_flash_cleanup(c);
+	}
+
+	/* add cleanups for other bizarre flashes here... */
+	if (jffs2_nor_ecc(c)) {
+		jffs2_nor_ecc_flash_cleanup(c);
+	}
+}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
new file mode 100644
index 0000000..87ec74f
--- /dev/null
+++ b/fs/jffs2/gc.c
@@ -0,0 +1,1246 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: gc.c,v 1.144 2004/12/21 11:18:50 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include <linux/stat.h>
+#include "nodelist.h"
+#include "compr.h"
+
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, 
+					  struct jffs2_inode_cache *ic,
+					  struct jffs2_raw_node_ref *raw);
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
+static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				      uint32_t start, uint32_t end);
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				       uint32_t start, uint32_t end);
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
+			       struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
+
+/* Called with erase_completion_lock held */
+static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
+{
+	struct jffs2_eraseblock *ret;
+	struct list_head *nextlist = NULL;
+	int n = jiffies % 128;
+
+	/* Pick an eraseblock to garbage collect next. This is where we'll
+	   put the clever wear-levelling algorithms. Eventually.  */
+	/* We possibly want to favour the dirtier blocks more when the
+	   number of free blocks is low. */
+	if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
+		D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
+		nextlist = &c->bad_used_list;
+	} else if (n < 50 && !list_empty(&c->erasable_list)) {
+		/* Note that most of them will have gone directly to be erased. 
+		   So don't favour the erasable_list _too_ much. */
+		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
+		nextlist = &c->erasable_list;
+	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
+		/* Most of the time, pick one off the very_dirty list */
+		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
+		nextlist = &c->very_dirty_list;
+	} else if (n < 126 && !list_empty(&c->dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
+		nextlist = &c->dirty_list;
+	} else if (!list_empty(&c->clean_list)) {
+		D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
+		nextlist = &c->clean_list;
+	} else if (!list_empty(&c->dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
+
+		nextlist = &c->dirty_list;
+	} else if (!list_empty(&c->very_dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
+		nextlist = &c->very_dirty_list;
+	} else if (!list_empty(&c->erasable_list)) {
+		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
+
+		nextlist = &c->erasable_list;
+	} else {
+		/* Eep. All were empty */
+		D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
+		return NULL;
+	}
+
+	ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
+	list_del(&ret->list);
+	c->gcblock = ret;
+	ret->gc_node = ret->first_node;
+	if (!ret->gc_node) {
+		printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+		BUG();
+	}
+	
+	/* Have we accidentally picked a clean block with wasted space ? */
+	if (ret->wasted_size) {
+		D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+		ret->dirty_size += ret->wasted_size;
+		c->wasted_size -= ret->wasted_size;
+		c->dirty_size += ret->wasted_size;
+		ret->wasted_size = 0;
+	}
+
+	D2(jffs2_dump_block_lists(c));
+	return ret;
+}
+
+/* jffs2_garbage_collect_pass
+ * Make a single attempt to progress GC. Move one node, and possibly
+ * start erasing one eraseblock.
+ */
+int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_inode_cache *ic;
+	struct jffs2_eraseblock *jeb;
+	struct jffs2_raw_node_ref *raw;
+	int ret = 0, inum, nlink;
+
+	if (down_interruptible(&c->alloc_sem))
+		return -EINTR;
+
+	for (;;) {
+		spin_lock(&c->erase_completion_lock);
+		if (!c->unchecked_size)
+			break;
+
+		/* We can't start doing GC yet. We haven't finished checking
+		   the node CRCs etc. Do it now. */
+		
+		/* checked_ino is protected by the alloc_sem */
+		if (c->checked_ino > c->highest_ino) {
+			printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
+			       c->unchecked_size);
+			D2(jffs2_dump_block_lists(c));
+			spin_unlock(&c->erase_completion_lock);
+			BUG();
+		}
+
+		spin_unlock(&c->erase_completion_lock);
+
+		spin_lock(&c->inocache_lock);
+
+		ic = jffs2_get_ino_cache(c, c->checked_ino++);
+
+		if (!ic) {
+			spin_unlock(&c->inocache_lock);
+			continue;
+		}
+
+		if (!ic->nlink) {
+			D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
+				  ic->ino));
+			spin_unlock(&c->inocache_lock);
+			continue;
+		}
+		switch(ic->state) {
+		case INO_STATE_CHECKEDABSENT:
+		case INO_STATE_PRESENT:
+			D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
+			spin_unlock(&c->inocache_lock);
+			continue;
+
+		case INO_STATE_GC:
+		case INO_STATE_CHECKING:
+			printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
+			spin_unlock(&c->inocache_lock);
+			BUG();
+
+		case INO_STATE_READING:
+			/* We need to wait for it to finish, lest we move on
+			   and trigger the BUG() above while we haven't yet 
+			   finished checking all its nodes */
+			D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+			up(&c->alloc_sem);
+			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			return 0;
+
+		default:
+			BUG();
+
+		case INO_STATE_UNCHECKED:
+			;
+		}
+		ic->state = INO_STATE_CHECKING;
+		spin_unlock(&c->inocache_lock);
+
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+
+		ret = jffs2_do_crccheck_inode(c, ic);
+		if (ret)
+			printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
+
+		jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
+		up(&c->alloc_sem);
+		return ret;
+	}
+
+	/* First, work out which block we're garbage-collecting */
+	jeb = c->gcblock;
+
+	if (!jeb)
+		jeb = jffs2_find_gc_block(c);
+
+	if (!jeb) {
+		D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+		spin_unlock(&c->erase_completion_lock);
+		up(&c->alloc_sem);
+		return -EIO;
+	}
+
+	D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
+	D1(if (c->nextblock)
+	   printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
+
+	if (!jeb->used_size) {
+		up(&c->alloc_sem);
+		goto eraseit;
+	}
+
+	raw = jeb->gc_node;
+			
+	while(ref_obsolete(raw)) {
+		D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
+		raw = raw->next_phys;
+		if (unlikely(!raw)) {
+			printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
+			printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", 
+			       jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
+			jeb->gc_node = raw;
+			spin_unlock(&c->erase_completion_lock);
+			up(&c->alloc_sem);
+			BUG();
+		}
+	}
+	jeb->gc_node = raw;
+
+	D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
+
+	if (!raw->next_in_ino) {
+		/* Inode-less node. Clean marker, snapshot or something like that */
+		/* FIXME: If it's something that needs to be copied, including something
+		   we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
+		spin_unlock(&c->erase_completion_lock);
+		jffs2_mark_node_obsolete(c, raw);
+		up(&c->alloc_sem);
+		goto eraseit_lock;
+	}
+
+	ic = jffs2_raw_ref_to_ic(raw);
+
+	/* We need to hold the inocache. Either the erase_completion_lock or
+	   the inocache_lock are sufficient; we trade down since the inocache_lock 
+	   causes less contention. */
+	spin_lock(&c->inocache_lock);
+
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+
+	/* Three possibilities:
+	   1. Inode is already in-core. We must iget it and do proper
+	      updating to its fragtree, etc.
+	   2. Inode is not in-core, node is REF_PRISTINE. We lock the
+	      inocache to prevent a read_inode(), copy the node intact.
+	   3. Inode is not in-core, node is not pristine. We must iget()
+	      and take the slow path.
+	*/
+
+	switch(ic->state) {
+	case INO_STATE_CHECKEDABSENT:
+		/* It's been checked, but it's not currently in-core. 
+		   We can just copy any pristine nodes, but have
+		   to prevent anyone else from doing read_inode() while
+		   we're at it, so we set the state accordingly */
+		if (ref_flags(raw) == REF_PRISTINE)
+			ic->state = INO_STATE_GC;
+		else {
+			D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", 
+				  ic->ino));
+		}
+		break;
+
+	case INO_STATE_PRESENT:
+		/* It's in-core. GC must iget() it. */
+		break;
+
+	case INO_STATE_UNCHECKED:
+	case INO_STATE_CHECKING:
+	case INO_STATE_GC:
+		/* Should never happen. We should have finished checking
+		   by the time we actually start doing any GC, and since 
+		   we're holding the alloc_sem, no other garbage collection 
+		   can happen.
+		*/
+		printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
+		       ic->ino, ic->state);
+		up(&c->alloc_sem);
+		spin_unlock(&c->inocache_lock);
+		BUG();
+
+	case INO_STATE_READING:
+		/* Someone's currently trying to read it. We must wait for
+		   them to finish and then go through the full iget() route
+		   to do the GC. However, sometimes read_inode() needs to get
+		   the alloc_sem() (for marking nodes invalid) so we must
+		   drop the alloc_sem before sleeping. */
+
+		up(&c->alloc_sem);
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
+			  ic->ino, ic->state));
+		sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+		/* And because we dropped the alloc_sem we must start again from the 
+		   beginning. Ponder chance of livelock here -- we're returning success
+		   without actually making any progress.
+
+		   Q: What are the chances that the inode is back in INO_STATE_READING 
+		   again by the time we next enter this function? And that this happens
+		   enough times to cause a real delay?
+
+		   A: Small enough that I don't care :) 
+		*/
+		return 0;
+	}
+
+	/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
+	   node intact, and we don't have to muck about with the fragtree etc. 
+	   because we know it's not in-core. If it _was_ in-core, we go through
+	   all the iget() crap anyway */
+
+	if (ic->state == INO_STATE_GC) {
+		spin_unlock(&c->inocache_lock);
+
+		ret = jffs2_garbage_collect_pristine(c, ic, raw);
+
+		spin_lock(&c->inocache_lock);
+		ic->state = INO_STATE_CHECKEDABSENT;
+		wake_up(&c->inocache_wq);
+
+		if (ret != -EBADFD) {
+			spin_unlock(&c->inocache_lock);
+			goto release_sem;
+		}
+
+		/* Fall through if it wanted us to, with inocache_lock held */
+	}
+
+	/* Prevent the fairly unlikely race where the gcblock is
+	   entirely obsoleted by the final close of a file which had
+	   the only valid nodes in the block, followed by erasure,
+	   followed by freeing of the ic because the erased block(s)
+	   held _all_ the nodes of that inode.... never been seen but
+	   it's vaguely possible. */
+
+	inum = ic->ino;
+	nlink = ic->nlink;
+	spin_unlock(&c->inocache_lock);
+
+	f = jffs2_gc_fetch_inode(c, inum, nlink);
+	if (IS_ERR(f)) {
+		ret = PTR_ERR(f);
+		goto release_sem;
+	}
+	if (!f) {
+		ret = 0;
+		goto release_sem;
+	}
+
+	ret = jffs2_garbage_collect_live(c, jeb, raw, f);
+
+	jffs2_gc_release_inode(c, f);
+
+ release_sem:
+	up(&c->alloc_sem);
+
+ eraseit_lock:
+	/* If we've finished this block, start it erasing */
+	spin_lock(&c->erase_completion_lock);
+
+ eraseit:
+	if (c->gcblock && !c->gcblock->used_size) {
+		D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
+		/* We're GC'ing an empty block? */
+		list_add_tail(&c->gcblock->list, &c->erase_pending_list);
+		c->gcblock = NULL;
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	return ret;
+}
+
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
+				      struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
+{
+	struct jffs2_node_frag *frag;
+	struct jffs2_full_dnode *fn = NULL;
+	struct jffs2_full_dirent *fd;
+	uint32_t start = 0, end = 0, nrfrags = 0;
+	int ret = 0;
+
+	down(&f->sem);
+
+	/* Now we have the lock for this inode. Check that it's still the one at the head
+	   of the list. */
+
+	spin_lock(&c->erase_completion_lock);
+
+	if (c->gcblock != jeb) {
+		spin_unlock(&c->erase_completion_lock);
+		D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
+		goto upnout;
+	}
+	if (ref_obsolete(raw)) {
+		spin_unlock(&c->erase_completion_lock);
+		D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
+		/* They'll call again */
+		goto upnout;
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
+	if (f->metadata && f->metadata->raw == raw) {
+		fn = f->metadata;
+		ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
+		goto upnout;
+	}
+
+	/* FIXME. Read node and do lookup? */
+	for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+		if (frag->node && frag->node->raw == raw) {
+			fn = frag->node;
+			end = frag->ofs + frag->size;
+			if (!nrfrags++)
+				start = frag->ofs;
+			if (nrfrags == frag->node->frags)
+				break; /* We've found them all */
+		}
+	}
+	if (fn) {
+		if (ref_flags(raw) == REF_PRISTINE) {
+			ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
+			if (!ret) {
+				/* Urgh. Return it sensibly. */
+				frag->node->raw = f->inocache->nodes;
+			}	
+			if (ret != -EBADFD)
+				goto upnout;
+		}
+		/* We found a datanode. Do the GC */
+		if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
+			/* It crosses a page boundary. Therefore, it must be a hole. */
+			ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
+		} else {
+			/* It could still be a hole. But we GC the page this way anyway */
+			ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
+		}
+		goto upnout;
+	}
+	
+	/* Wasn't a dnode. Try dirent */
+	for (fd = f->dents; fd; fd=fd->next) {
+		if (fd->raw == raw)
+			break;
+	}
+
+	if (fd && fd->ino) {
+		ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
+	} else if (fd) {
+		ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
+	} else {
+		printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
+		       ref_offset(raw), f->inocache->ino);
+		if (ref_obsolete(raw)) {
+			printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
+		} else {
+			ret = -EIO;
+		}
+	}
+ upnout:
+	up(&f->sem);
+
+	return ret;
+}
+
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, 
+					  struct jffs2_inode_cache *ic,
+					  struct jffs2_raw_node_ref *raw)
+{
+	union jffs2_node_union *node;
+	struct jffs2_raw_node_ref *nraw;
+	size_t retlen;
+	int ret;
+	uint32_t phys_ofs, alloclen;
+	uint32_t crc, rawlen;
+	int retried = 0;
+
+	D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
+
+	rawlen = ref_totlen(c, c->gcblock, raw);
+
+	/* Ask for a small amount of space (or the totlen if smaller) because we
+	   don't want to force wastage of the end of a block if splitting would
+	   work. */
+	ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, 
+					      rawlen), &phys_ofs, &alloclen);
+	if (ret)
+		return ret;
+
+	if (alloclen < rawlen) {
+		/* Doesn't fit untouched. We'll go the old route and split it */
+		return -EBADFD;
+	}
+
+	node = kmalloc(rawlen, GFP_KERNEL);
+	if (!node)
+               return -ENOMEM;
+
+	ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
+	if (!ret && retlen != rawlen)
+		ret = -EIO;
+	if (ret)
+		goto out_node;
+
+	crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
+	if (je32_to_cpu(node->u.hdr_crc) != crc) {
+		printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
+		goto bail;
+	}
+
+	switch(je16_to_cpu(node->u.nodetype)) {
+	case JFFS2_NODETYPE_INODE:
+		crc = crc32(0, node, sizeof(node->i)-8);
+		if (je32_to_cpu(node->i.node_crc) != crc) {
+			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
+			goto bail;
+		}
+
+		if (je32_to_cpu(node->i.dsize)) {
+			crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
+			if (je32_to_cpu(node->i.data_crc) != crc) {
+				printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				       ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
+				goto bail;
+			}
+		}
+		break;
+
+	case JFFS2_NODETYPE_DIRENT:
+		crc = crc32(0, node, sizeof(node->d)-8);
+		if (je32_to_cpu(node->d.node_crc) != crc) {
+			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
+			goto bail;
+		}
+
+		if (node->d.nsize) {
+			crc = crc32(0, node->d.name, node->d.nsize);
+			if (je32_to_cpu(node->d.name_crc) != crc) {
+				printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				       ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
+				goto bail;
+			}
+		}
+		break;
+	default:
+		printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", 
+		       ref_offset(raw), je16_to_cpu(node->u.nodetype));
+		goto bail;
+	}
+
+	nraw = jffs2_alloc_raw_node_ref();
+	if (!nraw) {
+		ret = -ENOMEM;
+		goto out_node;
+	}
+
+	/* OK, all the CRCs are good; this node can just be copied as-is. */
+ retry:
+	nraw->flash_offset = phys_ofs;
+	nraw->__totlen = rawlen;
+	nraw->next_phys = NULL;
+
+	ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
+
+	if (ret || (retlen != rawlen)) {
+		printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
+                       rawlen, phys_ofs, ret, retlen);
+		if (retlen) {
+                        /* Doesn't belong to any inode */
+			nraw->next_in_ino = NULL;
+
+			nraw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, nraw);
+			jffs2_mark_node_obsolete(c, nraw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset);
+                        jffs2_free_raw_node_ref(nraw);
+		}
+		if (!retried && (nraw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
+			
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy);
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
+
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(nraw);
+		}
+
+		jffs2_free_raw_node_ref(nraw);
+		if (!ret)
+			ret = -EIO;
+		goto out_node;
+	}
+	nraw->flash_offset |= REF_PRISTINE;
+	jffs2_add_physical_node_ref(c, nraw);
+
+	/* Link into per-inode list. This is safe because of the ic
+	   state being INO_STATE_GC. Note that if we're doing this
+	   for an inode which is in-core, the 'nraw' pointer is then
+	   going to be fetched from ic->nodes by our caller. */
+	spin_lock(&c->erase_completion_lock);
+        nraw->next_in_ino = ic->nodes;
+        ic->nodes = nraw;
+	spin_unlock(&c->erase_completion_lock);
+
+	jffs2_mark_node_obsolete(c, raw);
+	D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
+
+ out_node:
+	kfree(node);
+	return ret;
+ bail:
+	ret = -EBADFD;
+	goto out_node;
+}
+
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+{
+	struct jffs2_full_dnode *new_fn;
+	struct jffs2_raw_inode ri;
+	jint16_t dev;
+	char *mdata = NULL, mdatalen = 0;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
+	    S_ISCHR(JFFS2_F_I_MODE(f)) ) {
+		/* For these, we don't actually need to read the old node */
+		/* FIXME: for minor or major > 255. */
+		dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | 
+			JFFS2_F_I_RDEV_MIN(f)));
+		mdata = (char *)&dev;
+		mdatalen = sizeof(dev);
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
+	} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
+		mdatalen = fn->size;
+		mdata = kmalloc(fn->size, GFP_KERNEL);
+		if (!mdata) {
+			printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
+			return -ENOMEM;
+		}
+		ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
+		if (ret) {
+			printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
+			kfree(mdata);
+			return ret;
+		}
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
+
+	}
+	
+	ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
+		       sizeof(ri)+ mdatalen, ret);
+		goto out;
+	}
+	
+	memset(&ri, 0, sizeof(ri));
+	ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
+	ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri.ino = cpu_to_je32(f->inocache->ino);
+	ri.version = cpu_to_je32(++f->highest_version);
+	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+	ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+	ri.offset = cpu_to_je32(0);
+	ri.csize = cpu_to_je32(mdatalen);
+	ri.dsize = cpu_to_je32(mdatalen);
+	ri.compr = JFFS2_COMPR_NONE;
+	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+	ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
+
+	new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fn)) {
+		printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+		ret = PTR_ERR(new_fn);
+		goto out;
+	}
+	jffs2_mark_node_obsolete(c, fn->raw);
+	jffs2_free_full_dnode(fn);
+	f->metadata = new_fn;
+ out:
+	if (S_ISLNK(JFFS2_F_I_MODE(f)))
+		kfree(mdata);
+	return ret;
+}
+
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent *new_fd;
+	struct jffs2_raw_dirent rd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd.nsize = strlen(fd->name);
+	rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
+	rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd.pino = cpu_to_je32(f->inocache->ino);
+	rd.version = cpu_to_je32(++f->highest_version);
+	rd.ino = cpu_to_je32(fd->ino);
+	rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f)));
+	rd.type = fd->type;
+	rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
+	rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
+	
+	ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
+		       sizeof(rd)+rd.nsize, ret);
+		return ret;
+	}
+	new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fd)) {
+		printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
+		return PTR_ERR(new_fd);
+	}
+	jffs2_add_fd_to_list(c, new_fd, &f->dents);
+	return 0;
+}
+
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent **fdp = &f->dents;
+	int found = 0;
+
+	/* On a medium where we can't actually mark nodes obsolete
+	   pernamently, such as NAND flash, we need to work out
+	   whether this deletion dirent is still needed to actively
+	   delete a 'real' dirent with the same name that's still
+	   somewhere else on the flash. */
+	if (!jffs2_can_mark_obsolete(c)) {
+		struct jffs2_raw_dirent *rd;
+		struct jffs2_raw_node_ref *raw;
+		int ret;
+		size_t retlen;
+		int name_len = strlen(fd->name);
+		uint32_t name_crc = crc32(0, fd->name, name_len);
+		uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
+
+		rd = kmalloc(rawlen, GFP_KERNEL);
+		if (!rd)
+			return -ENOMEM;
+
+		/* Prevent the erase code from nicking the obsolete node refs while
+		   we're looking at them. I really don't like this extra lock but
+		   can't see any alternative. Suggestions on a postcard to... */
+		down(&c->erase_free_sem);
+
+		for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
+
+			/* We only care about obsolete ones */
+			if (!(ref_obsolete(raw)))
+				continue;
+
+			/* Any dirent with the same name is going to have the same length... */
+			if (ref_totlen(c, NULL, raw) != rawlen)
+				continue;
+
+			/* Doesn't matter if there's one in the same erase block. We're going to 
+			   delete it too at the same time. */
+			if ((raw->flash_offset & ~(c->sector_size-1)) ==
+			    (fd->raw->flash_offset & ~(c->sector_size-1)))
+				continue;
+
+			D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
+
+			/* This is an obsolete node belonging to the same directory, and it's of the right
+			   length. We need to take a closer look...*/
+			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
+			if (ret) {
+				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
+				/* If we can't read it, we don't need to continue to obsolete it. Continue */
+				continue;
+			}
+			if (retlen != rawlen) {
+				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
+				       retlen, rawlen, ref_offset(raw));
+				continue;
+			}
+
+			if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
+				continue;
+
+			/* If the name CRC doesn't match, skip */
+			if (je32_to_cpu(rd->name_crc) != name_crc)
+				continue;
+
+			/* If the name length doesn't match, or it's another deletion dirent, skip */
+			if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
+				continue;
+
+			/* OK, check the actual name now */
+			if (memcmp(rd->name, fd->name, name_len))
+				continue;
+
+			/* OK. The name really does match. There really is still an older node on
+			   the flash which our deletion dirent obsoletes. So we have to write out
+			   a new deletion dirent to replace it */
+			up(&c->erase_free_sem);
+
+			D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
+				  ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
+			kfree(rd);
+
+			return jffs2_garbage_collect_dirent(c, jeb, f, fd);
+		}
+
+		up(&c->erase_free_sem);
+		kfree(rd);
+	}
+
+	/* No need for it any more. Just mark it obsolete and remove it from the list */
+	while (*fdp) {
+		if ((*fdp) == fd) {
+			found = 1;
+			*fdp = fd->next;
+			break;
+		}
+		fdp = &(*fdp)->next;
+	}
+	if (!found) {
+		printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
+	}
+	jffs2_mark_node_obsolete(c, fd->raw);
+	jffs2_free_full_dirent(fd);
+	return 0;
+}
+
+static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				      uint32_t start, uint32_t end)
+{
+	struct jffs2_raw_inode ri;
+	struct jffs2_node_frag *frag;
+	struct jffs2_full_dnode *new_fn;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end));
+	
+	memset(&ri, 0, sizeof(ri));
+
+	if(fn->frags > 1) {
+		size_t readlen;
+		uint32_t crc;
+		/* It's partially obsoleted by a later write. So we have to 
+		   write it out again with the _same_ version as before */
+		ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
+		if (readlen != sizeof(ri) || ret) {
+			printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
+			goto fill;
+		}
+		if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
+			       ref_offset(fn->raw),
+			       je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
+			return -EIO;
+		}
+		if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
+			       ref_offset(fn->raw),
+			       je32_to_cpu(ri.totlen), sizeof(ri));
+			return -EIO;
+		}
+		crc = crc32(0, &ri, sizeof(ri)-8);
+		if (crc != je32_to_cpu(ri.node_crc)) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
+			       ref_offset(fn->raw), 
+			       je32_to_cpu(ri.node_crc), crc);
+			/* FIXME: We could possibly deal with this by writing new holes for each frag */
+			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 
+			       start, end, f->inocache->ino);
+			goto fill;
+		}
+		if (ri.compr != JFFS2_COMPR_ZERO) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
+			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 
+			       start, end, f->inocache->ino);
+			goto fill;
+		}
+	} else {
+	fill:
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri));
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.offset = cpu_to_je32(start);
+		ri.dsize = cpu_to_je32(end - start);
+		ri.csize = cpu_to_je32(0);
+		ri.compr = JFFS2_COMPR_ZERO;
+	}
+	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+	ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+	ri.data_crc = cpu_to_je32(0);
+	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+
+	ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
+		       sizeof(ri), ret);
+		return ret;
+	}
+	new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fn)) {
+		printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
+		return PTR_ERR(new_fn);
+	}
+	if (je32_to_cpu(ri.version) == f->highest_version) {
+		jffs2_add_full_dnode_to_inode(c, f, new_fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		return 0;
+	}
+
+	/* 
+	 * We should only get here in the case where the node we are
+	 * replacing had more than one frag, so we kept the same version
+	 * number as before. (Except in case of error -- see 'goto fill;' 
+	 * above.)
+	 */
+	D1(if(unlikely(fn->frags <= 1)) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
+		       fn->frags, je32_to_cpu(ri.version), f->highest_version,
+		       je32_to_cpu(ri.ino));
+	});
+
+	/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
+	mark_ref_normal(new_fn->raw);
+
+	for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); 
+	     frag; frag = frag_next(frag)) {
+		if (frag->ofs > fn->size + fn->ofs)
+			break;
+		if (frag->node == fn) {
+			frag->node = new_fn;
+			new_fn->frags++;
+			fn->frags--;
+		}
+	}
+	if (fn->frags) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
+		BUG();
+	}
+	if (!new_fn->frags) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
+		BUG();
+	}
+		
+	jffs2_mark_node_obsolete(c, fn->raw);
+	jffs2_free_full_dnode(fn);
+	
+	return 0;
+}
+
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				       uint32_t start, uint32_t end)
+{
+	struct jffs2_full_dnode *new_fn;
+	struct jffs2_raw_inode ri;
+	uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;	
+	int ret = 0;
+	unsigned char *comprbuf = NULL, *writebuf;
+	unsigned long pg;
+	unsigned char *pg_ptr;
+ 
+	memset(&ri, 0, sizeof(ri));
+
+	D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end));
+
+	orig_end = end;
+	orig_start = start;
+
+	if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
+		/* Attempt to do some merging. But only expand to cover logically
+		   adjacent frags if the block containing them is already considered
+		   to be dirty. Otherwise we end up with GC just going round in 
+		   circles dirtying the nodes it already wrote out, especially 
+		   on NAND where we have small eraseblocks and hence a much higher
+		   chance of nodes having to be split to cross boundaries. */
+
+		struct jffs2_node_frag *frag;
+		uint32_t min, max;
+
+		min = start & ~(PAGE_CACHE_SIZE-1);
+		max = min + PAGE_CACHE_SIZE;
+
+		frag = jffs2_lookup_node_frag(&f->fragtree, start);
+
+		/* BUG_ON(!frag) but that'll happen anyway... */
+
+		BUG_ON(frag->ofs != start);
+
+		/* First grow down... */
+		while((frag = frag_prev(frag)) && frag->ofs >= min) {
+
+			/* If the previous frag doesn't even reach the beginning, there's
+			   excessive fragmentation. Just merge. */
+			if (frag->ofs > min) {
+				D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size));
+				start = frag->ofs;
+				continue;
+			}
+			/* OK. This frag holds the first byte of the page. */
+			if (!frag->node || !frag->node->raw) {
+				D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
+					  frag->ofs, frag->ofs+frag->size));
+				break;
+			} else {
+
+				/* OK, it's a frag which extends to the beginning of the page. Does it live 
+				   in a block which is still considered clean? If so, don't obsolete it.
+				   If not, cover it anyway. */
+
+				struct jffs2_raw_node_ref *raw = frag->node->raw;
+				struct jffs2_eraseblock *jeb;
+
+				jeb = &c->blocks[raw->flash_offset / c->sector_size];
+
+				if (jeb == c->gcblock) {
+					D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					start = frag->ofs;
+					break;
+				}
+				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+					D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				start = frag->ofs;
+				break;
+			}
+		}
+
+		/* ... then up */
+
+		/* Find last frag which is actually part of the node we're to GC. */
+		frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
+
+		while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
+
+			/* If the previous frag doesn't even reach the beginning, there's lots
+			   of fragmentation. Just merge. */
+			if (frag->ofs+frag->size < max) {
+				D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size));
+				end = frag->ofs + frag->size;
+				continue;
+			}
+
+			if (!frag->node || !frag->node->raw) {
+				D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
+					  frag->ofs, frag->ofs+frag->size));
+				break;
+			} else {
+
+				/* OK, it's a frag which extends to the beginning of the page. Does it live 
+				   in a block which is still considered clean? If so, don't obsolete it.
+				   If not, cover it anyway. */
+
+				struct jffs2_raw_node_ref *raw = frag->node->raw;
+				struct jffs2_eraseblock *jeb;
+
+				jeb = &c->blocks[raw->flash_offset / c->sector_size];
+
+				if (jeb == c->gcblock) {
+					D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					end = frag->ofs + frag->size;
+					break;
+				}
+				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+					D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				end = frag->ofs + frag->size;
+				break;
+			}
+		}
+		D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", 
+			  orig_start, orig_end, start, end));
+
+		BUG_ON(end > JFFS2_F_I_SIZE(f));
+		BUG_ON(end < orig_end);
+		BUG_ON(start > orig_start);
+	}
+	
+	/* First, use readpage() to read the appropriate page into the page cache */
+	/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
+	 *    triggered garbage collection in the first place?
+	 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
+	 *    page OK. We'll actually write it out again in commit_write, which is a little
+	 *    suboptimal, but at least we're correct.
+	 */
+	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+
+	if (IS_ERR(pg_ptr)) {
+		printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
+		return PTR_ERR(pg_ptr);
+	}
+
+	offset = start;
+	while(offset < orig_end) {
+		uint32_t datalen;
+		uint32_t cdatalen;
+		uint16_t comprtype = JFFS2_COMPR_NONE;
+
+		ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen);
+
+		if (ret) {
+			printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
+			       sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
+			break;
+		}
+		cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
+		datalen = end - offset;
+
+		writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
+
+		comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
+
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+		ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+		ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+		ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+		ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+		ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+		ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+		ri.offset = cpu_to_je32(offset);
+		ri.csize = cpu_to_je32(cdatalen);
+		ri.dsize = cpu_to_je32(datalen);
+		ri.compr = comprtype & 0xff;
+		ri.usercompr = (comprtype >> 8) & 0xff;
+		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+		ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+	
+		new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
+
+		jffs2_free_comprbuf(comprbuf, writebuf);
+
+		if (IS_ERR(new_fn)) {
+			printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+			ret = PTR_ERR(new_fn);
+			break;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
+		offset += datalen;
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+	}
+
+	jffs2_gc_release_page(c, pg_ptr, &pg);
+	return ret;
+}
+
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h
new file mode 100644
index 0000000..84f184f
--- /dev/null
+++ b/fs/jffs2/histo.h
@@ -0,0 +1,3 @@
+/* This file provides the bit-probabilities for the input file */
+#define BIT_DIVIDER 629 
+static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h
new file mode 100644
index 0000000..9a44326
--- /dev/null
+++ b/fs/jffs2/histo_mips.h
@@ -0,0 +1,2 @@
+#define BIT_DIVIDER_MIPS 1043 
+static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c
new file mode 100644
index 0000000..238c799
--- /dev/null
+++ b/fs/jffs2/ioctl.c
@@ -0,0 +1,23 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/fs.h>
+
+int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 
+		unsigned long arg)
+{
+	/* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
+	   will include compression support etc. */
+	return -ENOTTY;
+}
+	
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
new file mode 100644
index 0000000..5abb431
--- /dev/null
+++ b/fs/jffs2/malloc.c
@@ -0,0 +1,205 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/jffs2.h>
+#include "nodelist.h"
+
+#if 0
+#define JFFS2_SLAB_POISON SLAB_POISON
+#else
+#define JFFS2_SLAB_POISON 0
+#endif
+
+// replace this by #define D3 (x) x for cache debugging
+#define D3(x)
+
+/* These are initialised to NULL in the kernel startup code.
+   If you're porting to other operating systems, beware */
+static kmem_cache_t *full_dnode_slab;
+static kmem_cache_t *raw_dirent_slab;
+static kmem_cache_t *raw_inode_slab;
+static kmem_cache_t *tmp_dnode_info_slab;
+static kmem_cache_t *raw_node_ref_slab;
+static kmem_cache_t *node_frag_slab;
+static kmem_cache_t *inode_cache_slab;
+
+int __init jffs2_create_slab_caches(void)
+{
+	full_dnode_slab = kmem_cache_create("jffs2_full_dnode", 
+					    sizeof(struct jffs2_full_dnode),
+					    0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!full_dnode_slab)
+		goto err;
+
+	raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
+					    sizeof(struct jffs2_raw_dirent),
+					    0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_dirent_slab)
+		goto err;
+
+	raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
+					   sizeof(struct jffs2_raw_inode),
+					   0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_inode_slab)
+		goto err;
+
+	tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
+						sizeof(struct jffs2_tmp_dnode_info),
+						0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!tmp_dnode_info_slab)
+		goto err;
+
+	raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
+					      sizeof(struct jffs2_raw_node_ref),
+					      0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_node_ref_slab)
+		goto err;
+
+	node_frag_slab = kmem_cache_create("jffs2_node_frag",
+					   sizeof(struct jffs2_node_frag),
+					   0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!node_frag_slab)
+		goto err;
+
+	inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
+					     sizeof(struct jffs2_inode_cache),
+					     0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (inode_cache_slab)
+		return 0;
+ err:
+	jffs2_destroy_slab_caches();
+	return -ENOMEM;
+}
+
+void jffs2_destroy_slab_caches(void)
+{
+	if(full_dnode_slab)
+		kmem_cache_destroy(full_dnode_slab);
+	if(raw_dirent_slab)
+		kmem_cache_destroy(raw_dirent_slab);
+	if(raw_inode_slab)
+		kmem_cache_destroy(raw_inode_slab);
+	if(tmp_dnode_info_slab)
+		kmem_cache_destroy(tmp_dnode_info_slab);
+	if(raw_node_ref_slab)
+		kmem_cache_destroy(raw_node_ref_slab);
+	if(node_frag_slab)
+		kmem_cache_destroy(node_frag_slab);
+	if(inode_cache_slab)
+		kmem_cache_destroy(inode_cache_slab);
+}
+
+struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
+{
+	return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+}
+
+void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
+{
+	kfree(x);
+}
+
+struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
+{
+	struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
+{
+	D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x));
+	kmem_cache_free(full_dnode_slab, x);
+}
+
+struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
+{
+	struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x));
+	kmem_cache_free(raw_dirent_slab, x);
+}
+
+struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
+{
+	struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x));
+	kmem_cache_free(raw_inode_slab, x);
+}
+
+struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
+{
+	struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
+{
+	D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x));
+	kmem_cache_free(tmp_dnode_info_slab, x);
+}
+
+struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void)
+{
+	struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x));
+	kmem_cache_free(raw_node_ref_slab, x);
+}
+
+struct jffs2_node_frag *jffs2_alloc_node_frag(void)
+{
+	struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_node_frag(struct jffs2_node_frag *x)
+{
+	D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x));
+	kmem_cache_free(node_frag_slab, x);
+}
+
+struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
+{
+	struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
+	D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
+{
+	D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x));
+	kmem_cache_free(inode_cache_slab, x);
+}
+
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
new file mode 100644
index 0000000..cd6a8bd
--- /dev/null
+++ b/fs/jffs2/nodelist.c
@@ -0,0 +1,681 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodelist.c,v 1.90 2004/12/08 17:59:20 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mtd/mtd.h>
+#include <linux/rbtree.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include "nodelist.h"
+
+void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
+{
+	struct jffs2_full_dirent **prev = list;
+	D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
+
+	while ((*prev) && (*prev)->nhash <= new->nhash) {
+		if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
+			/* Duplicate. Free one */
+			if (new->version < (*prev)->version) {
+				D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
+				D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
+				jffs2_mark_node_obsolete(c, new->raw);
+				jffs2_free_full_dirent(new);
+			} else {
+				D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
+				new->next = (*prev)->next;
+				jffs2_mark_node_obsolete(c, ((*prev)->raw));
+				jffs2_free_full_dirent(*prev);
+				*prev = new;
+			}
+			goto out;
+		}
+		prev = &((*prev)->next);
+	}
+	new->next = *prev;
+	*prev = new;
+
+ out:
+	D2(while(*list) {
+		printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
+		list = &(*list)->next;
+	});
+}
+
+/* Put a new tmp_dnode_info into the list, keeping the list in 
+   order of increasing version
+*/
+static void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list)
+{
+	struct jffs2_tmp_dnode_info **prev = list;
+	
+	while ((*prev) && (*prev)->version < tn->version) {
+		prev = &((*prev)->next);
+	}
+	tn->next = (*prev);
+        *prev = tn;
+}
+
+static void jffs2_free_tmp_dnode_info_list(struct jffs2_tmp_dnode_info *tn)
+{
+	struct jffs2_tmp_dnode_info *next;
+
+	while (tn) {
+		next = tn;
+		tn = tn->next;
+		jffs2_free_full_dnode(next->fn);
+		jffs2_free_tmp_dnode_info(next);
+	}
+}
+
+static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent *next;
+
+	while (fd) {
+		next = fd->next;
+		jffs2_free_full_dirent(fd);
+		fd = next;
+	}
+}
+
+/* Returns first valid node after 'ref'. May return 'ref' */
+static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
+{
+	while (ref && ref->next_in_ino) {
+		if (!ref_obsolete(ref))
+			return ref;
+		D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
+		ref = ref->next_in_ino;
+	}
+	return NULL;
+}
+
+/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
+   with this ino, returning the former in order of version */
+
+int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			  struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
+			  uint32_t *highest_version, uint32_t *latest_mctime,
+			  uint32_t *mctime_ver)
+{
+	struct jffs2_raw_node_ref *ref, *valid_ref;
+	struct jffs2_tmp_dnode_info *tn, *ret_tn = NULL;
+	struct jffs2_full_dirent *fd, *ret_fd = NULL;
+	union jffs2_node_union node;
+	size_t retlen;
+	int err;
+
+	*mctime_ver = 0;
+	
+	D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
+
+	spin_lock(&c->erase_completion_lock);
+
+	valid_ref = jffs2_first_valid_node(f->inocache->nodes);
+
+	if (!valid_ref)
+		printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
+
+	while (valid_ref) {
+		/* We can hold a pointer to a non-obsolete node without the spinlock,
+		   but _obsolete_ nodes may disappear at any time, if the block
+		   they're in gets erased. So if we mark 'ref' obsolete while we're
+		   not holding the lock, it can go away immediately. For that reason,
+		   we find the next valid node first, before processing 'ref'.
+		*/
+		ref = valid_ref;
+		valid_ref = jffs2_first_valid_node(ref->next_in_ino);
+		spin_unlock(&c->erase_completion_lock);
+
+		cond_resched();
+
+		/* FIXME: point() */
+		err = jffs2_flash_read(c, (ref_offset(ref)), 
+				       min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
+				       &retlen, (void *)&node);
+		if (err) {
+			printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
+			goto free_out;
+		}
+			
+
+			/* Check we've managed to read at least the common node header */
+		if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
+			printk(KERN_WARNING "short read in get_inode_nodes()\n");
+			err = -EIO;
+			goto free_out;
+		}
+			
+		switch (je16_to_cpu(node.u.nodetype)) {
+		case JFFS2_NODETYPE_DIRENT:
+			D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
+				BUG();
+			}
+			if (retlen < sizeof(node.d)) {
+				printk(KERN_WARNING "short read in get_inode_nodes()\n");
+				err = -EIO;
+				goto free_out;
+			}
+			/* sanity check */
+			if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
+				printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
+				       ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
+				jffs2_mark_node_obsolete(c, ref);
+				spin_lock(&c->erase_completion_lock);
+				continue;
+			}
+			if (je32_to_cpu(node.d.version) > *highest_version)
+				*highest_version = je32_to_cpu(node.d.version);
+			if (ref_obsolete(ref)) {
+				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+				printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n",
+				       ref_offset(ref));
+				BUG();
+			}
+			
+			fd = jffs2_alloc_full_dirent(node.d.nsize+1);
+			if (!fd) {
+				err = -ENOMEM;
+				goto free_out;
+			}
+			fd->raw = ref;
+			fd->version = je32_to_cpu(node.d.version);
+			fd->ino = je32_to_cpu(node.d.ino);
+			fd->type = node.d.type;
+
+			/* Pick out the mctime of the latest dirent */
+			if(fd->version > *mctime_ver) {
+				*mctime_ver = fd->version;
+				*latest_mctime = je32_to_cpu(node.d.mctime);
+			}
+
+			/* memcpy as much of the name as possible from the raw
+			   dirent we've already read from the flash
+			*/
+			if (retlen > sizeof(struct jffs2_raw_dirent))
+				memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
+				
+			/* Do we need to copy any more of the name directly
+			   from the flash?
+			*/
+			if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
+				/* FIXME: point() */
+				int already = retlen - sizeof(struct jffs2_raw_dirent);
+					
+				err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, 
+						   node.d.nsize - already, &retlen, &fd->name[already]);
+				if (!err && retlen != node.d.nsize - already)
+					err = -EIO;
+					
+				if (err) {
+					printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
+					jffs2_free_full_dirent(fd);
+					goto free_out;
+				}
+			}
+			fd->nhash = full_name_hash(fd->name, node.d.nsize);
+			fd->next = NULL;
+			fd->name[node.d.nsize] = '\0';
+				/* Wheee. We now have a complete jffs2_full_dirent structure, with
+				   the name in it and everything. Link it into the list 
+				*/
+			D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
+			jffs2_add_fd_to_list(c, fd, &ret_fd);
+			break;
+
+		case JFFS2_NODETYPE_INODE:
+			D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
+			if (retlen < sizeof(node.i)) {
+				printk(KERN_WARNING "read too short for dnode\n");
+				err = -EIO;
+				goto free_out;
+			}
+			if (je32_to_cpu(node.i.version) > *highest_version)
+				*highest_version = je32_to_cpu(node.i.version);
+			D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version));
+
+			if (ref_obsolete(ref)) {
+				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+				printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n",
+				       ref_offset(ref));
+				BUG();
+			}
+
+			/* If we've never checked the CRCs on this node, check them now. */
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				uint32_t crc, len;
+				struct jffs2_eraseblock *jeb;
+
+				crc = crc32(0, &node, sizeof(node.i)-8);
+				if (crc != je32_to_cpu(node.i.node_crc)) {
+					printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+					       ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
+					jffs2_mark_node_obsolete(c, ref);
+					spin_lock(&c->erase_completion_lock);
+					continue;
+				}
+				
+				/* sanity checks */
+				if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
+				     PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) {
+					printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino  %d, version %d, isize %d, csize %d, dsize %d \n",
+						ref_offset(ref),  je32_to_cpu(node.i.totlen),  je32_to_cpu(node.i.ino),
+						je32_to_cpu(node.i.version),  je32_to_cpu(node.i.isize), 
+						je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
+					jffs2_mark_node_obsolete(c, ref);
+					spin_lock(&c->erase_completion_lock);
+					continue;
+				}
+
+				if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
+					unsigned char *buf=NULL;
+					uint32_t pointed = 0;
+#ifndef __ECOS
+					if (c->mtd->point) {
+						err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
+								     &retlen, &buf);
+						if (!err && retlen < je32_to_cpu(node.i.csize)) {
+							D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
+							c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
+						} else if (err){
+							D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
+						} else
+							pointed = 1; /* succefully pointed to device */
+					}
+#endif					
+					if(!pointed){
+						buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
+						if (!buf)
+							return -ENOMEM;
+						
+						err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
+								       &retlen, buf);
+						if (!err && retlen != je32_to_cpu(node.i.csize))
+							err = -EIO;
+						if (err) {
+							kfree(buf);
+							return err;
+						}
+					}
+					crc = crc32(0, buf, je32_to_cpu(node.i.csize));
+					if(!pointed)
+						kfree(buf);
+#ifndef __ECOS
+					else
+						c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
+#endif
+
+					if (crc != je32_to_cpu(node.i.data_crc)) {
+						printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+						       ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
+						jffs2_mark_node_obsolete(c, ref);
+						spin_lock(&c->erase_completion_lock);
+						continue;
+					}
+					
+				}
+
+				/* Mark the node as having been checked and fix the accounting accordingly */
+				spin_lock(&c->erase_completion_lock);
+				jeb = &c->blocks[ref->flash_offset / c->sector_size];
+				len = ref_totlen(c, jeb, ref);
+
+				jeb->used_size += len;
+				jeb->unchecked_size -= len;
+				c->used_size += len;
+				c->unchecked_size -= len;
+
+				/* If node covers at least a whole page, or if it starts at the 
+				   beginning of a page and runs to the end of the file, or if 
+				   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 
+
+				   If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) 
+				   when the overlapping node(s) get added to the tree anyway. 
+				*/
+				if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
+				    ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
+				      (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) ==  je32_to_cpu(node.i.isize)))) {
+					D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
+					ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
+				} else {
+					D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
+					ref->flash_offset = ref_offset(ref) | REF_NORMAL;
+				}
+				spin_unlock(&c->erase_completion_lock);
+			}
+
+			tn = jffs2_alloc_tmp_dnode_info();
+			if (!tn) {
+				D1(printk(KERN_DEBUG "alloc tn failed\n"));
+				err = -ENOMEM;
+				goto free_out;
+			}
+
+			tn->fn = jffs2_alloc_full_dnode();
+			if (!tn->fn) {
+				D1(printk(KERN_DEBUG "alloc fn failed\n"));
+				err = -ENOMEM;
+				jffs2_free_tmp_dnode_info(tn);
+				goto free_out;
+			}
+			tn->version = je32_to_cpu(node.i.version);
+			tn->fn->ofs = je32_to_cpu(node.i.offset);
+			/* There was a bug where we wrote hole nodes out with
+			   csize/dsize swapped. Deal with it */
+			if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
+				tn->fn->size = je32_to_cpu(node.i.csize);
+			else // normal case...
+				tn->fn->size = je32_to_cpu(node.i.dsize);
+			tn->fn->raw = ref;
+			D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
+				  ref_offset(ref), je32_to_cpu(node.i.version),
+				  je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
+			jffs2_add_tn_to_list(tn, &ret_tn);
+			break;
+
+		default:
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				struct jffs2_eraseblock *jeb;
+				uint32_t len;
+
+				printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
+				       je16_to_cpu(node.u.nodetype), ref_offset(ref));
+
+				/* Mark the node as having been checked and fix the accounting accordingly */
+				spin_lock(&c->erase_completion_lock);
+				jeb = &c->blocks[ref->flash_offset / c->sector_size];
+				len = ref_totlen(c, jeb, ref);
+
+				jeb->used_size += len;
+				jeb->unchecked_size -= len;
+				c->used_size += len;
+				c->unchecked_size -= len;
+
+				mark_ref_normal(ref);
+				spin_unlock(&c->erase_completion_lock);
+			}
+			node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
+			if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
+				/* Hmmm. This should have been caught at scan time. */
+				printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
+				       ref_offset(ref));
+				printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", 
+				       je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
+				       je32_to_cpu(node.u.hdr_crc));
+				jffs2_mark_node_obsolete(c, ref);
+			} else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
+			case JFFS2_FEATURE_INCOMPAT:
+				printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				/* EEP */
+				BUG();
+				break;
+			case JFFS2_FEATURE_ROCOMPAT:
+				printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				if (!(c->flags & JFFS2_SB_FLAG_RO))
+					BUG();
+				break;
+			case JFFS2_FEATURE_RWCOMPAT_COPY:
+				printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				break;
+			case JFFS2_FEATURE_RWCOMPAT_DELETE:
+				printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				jffs2_mark_node_obsolete(c, ref);
+				break;
+			}
+
+		}
+		spin_lock(&c->erase_completion_lock);
+
+	}
+	spin_unlock(&c->erase_completion_lock);
+	*tnp = ret_tn;
+	*fdp = ret_fd;
+
+	return 0;
+
+ free_out:
+	jffs2_free_tmp_dnode_info_list(ret_tn);
+	jffs2_free_full_dirent_list(ret_fd);
+	return err;
+}
+
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
+{
+	spin_lock(&c->inocache_lock);
+	ic->state = state;
+	wake_up(&c->inocache_wq);
+	spin_unlock(&c->inocache_lock);
+}
+
+/* During mount, this needs no locking. During normal operation, its
+   callers want to do other stuff while still holding the inocache_lock.
+   Rather than introducing special case get_ino_cache functions or 
+   callbacks, we just let the caller do the locking itself. */
+   
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inode_cache *ret;
+
+	D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
+
+	ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
+	while (ret && ret->ino < ino) {
+		ret = ret->next;
+	}
+	
+	if (ret && ret->ino != ino)
+		ret = NULL;
+
+	D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
+	return ret;
+}
+
+void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
+{
+	struct jffs2_inode_cache **prev;
+	D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
+	spin_lock(&c->inocache_lock);
+	
+	prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
+
+	while ((*prev) && (*prev)->ino < new->ino) {
+		prev = &(*prev)->next;
+	}
+	new->next = *prev;
+	*prev = new;
+
+	spin_unlock(&c->inocache_lock);
+}
+
+void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
+{
+	struct jffs2_inode_cache **prev;
+	D2(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
+	spin_lock(&c->inocache_lock);
+	
+	prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
+	
+	while ((*prev) && (*prev)->ino < old->ino) {
+		prev = &(*prev)->next;
+	}
+	if ((*prev) == old) {
+		*prev = old->next;
+	}
+
+	spin_unlock(&c->inocache_lock);
+}
+
+void jffs2_free_ino_caches(struct jffs2_sb_info *c)
+{
+	int i;
+	struct jffs2_inode_cache *this, *next;
+	
+	for (i=0; i<INOCACHE_HASHSIZE; i++) {
+		this = c->inocache_list[i];
+		while (this) {
+			next = this->next;
+			D2(printk(KERN_DEBUG "jffs2_free_ino_caches: Freeing ino #%u at %p\n", this->ino, this));
+			jffs2_free_inode_cache(this);
+			this = next;
+		}
+		c->inocache_list[i] = NULL;
+	}
+}
+
+void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
+{
+	int i;
+	struct jffs2_raw_node_ref *this, *next;
+
+	for (i=0; i<c->nr_blocks; i++) {
+		this = c->blocks[i].first_node;
+		while(this) {
+			next = this->next_phys;
+			jffs2_free_raw_node_ref(this);
+			this = next;
+		}
+		c->blocks[i].first_node = c->blocks[i].last_node = NULL;
+	}
+}
+	
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
+{
+	/* The common case in lookup is that there will be a node 
+	   which precisely matches. So we go looking for that first */
+	struct rb_node *next;
+	struct jffs2_node_frag *prev = NULL;
+	struct jffs2_node_frag *frag = NULL;
+
+	D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
+
+	next = fragtree->rb_node;
+
+	while(next) {
+		frag = rb_entry(next, struct jffs2_node_frag, rb);
+
+		D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
+			  frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
+		if (frag->ofs + frag->size <= offset) {
+			D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
+				  frag->ofs, frag->ofs+frag->size));
+			/* Remember the closest smaller match on the way down */
+			if (!prev || frag->ofs > prev->ofs)
+				prev = frag;
+			next = frag->rb.rb_right;
+		} else if (frag->ofs > offset) {
+			D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
+				  frag->ofs, frag->ofs+frag->size));
+			next = frag->rb.rb_left;
+		} else {
+			D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
+				  frag->ofs, frag->ofs+frag->size));
+			return frag;
+		}
+	}
+
+	/* Exact match not found. Go back up looking at each parent,
+	   and return the closest smaller one */
+
+	if (prev)
+		D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
+			  prev->ofs, prev->ofs+prev->size));
+	else 
+		D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
+	
+	return prev;
+}
+
+/* Pass 'c' argument to indicate that nodes should be marked obsolete as
+   they're killed. */
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
+{
+	struct jffs2_node_frag *frag;
+	struct jffs2_node_frag *parent;
+
+	if (!root->rb_node)
+		return;
+
+	frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
+
+	while(frag) {
+		if (frag->rb.rb_left) {
+			D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", 
+				  frag, frag->ofs, frag->ofs+frag->size));
+			frag = frag_left(frag);
+			continue;
+		}
+		if (frag->rb.rb_right) {
+			D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", 
+				  frag, frag->ofs, frag->ofs+frag->size));
+			frag = frag_right(frag);
+			continue;
+		}
+
+		D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
+			  frag->ofs, frag->ofs+frag->size, frag->node,
+			  frag->node?frag->node->frags:0));
+			
+		if (frag->node && !(--frag->node->frags)) {
+			/* Not a hole, and it's the final remaining frag 
+			   of this node. Free the node */
+			if (c)
+				jffs2_mark_node_obsolete(c, frag->node->raw);
+			
+			jffs2_free_full_dnode(frag->node);
+		}
+		parent = frag_parent(frag);
+		if (parent) {
+			if (frag_left(parent) == frag)
+				parent->rb.rb_left = NULL;
+			else 
+				parent->rb.rb_right = NULL;
+		}
+
+		jffs2_free_node_frag(frag);
+		frag = parent;
+
+		cond_resched();
+	}
+}
+
+void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
+{
+	struct rb_node *parent = &base->rb;
+	struct rb_node **link = &parent;
+
+	D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, 
+		  newfrag->ofs, newfrag->ofs+newfrag->size, base));
+
+	while (*link) {
+		parent = *link;
+		base = rb_entry(parent, struct jffs2_node_frag, rb);
+	
+		D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
+		if (newfrag->ofs > base->ofs)
+			link = &base->rb.rb_right;
+		else if (newfrag->ofs < base->ofs)
+			link = &base->rb.rb_left;
+		else {
+			printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
+			BUG();
+		}
+	}
+
+	rb_link_node(&newfrag->rb, &base->rb, link);
+}
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
new file mode 100644
index 0000000..a4864d0
--- /dev/null
+++ b/fs/jffs2/nodelist.h
@@ -0,0 +1,473 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodelist.h,v 1.126 2004/11/19 15:06:29 dedekind Exp $
+ *
+ */
+
+#ifndef __JFFS2_NODELIST_H__
+#define __JFFS2_NODELIST_H__
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_sb.h>
+#include <linux/jffs2_fs_i.h>
+
+#ifdef __ECOS
+#include "os-ecos.h"
+#else
+#include <linux/mtd/compatmac.h> /* For min/max in older kernels */
+#include "os-linux.h"
+#endif
+
+#ifndef CONFIG_JFFS2_FS_DEBUG
+#define CONFIG_JFFS2_FS_DEBUG 1
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+#define D1(x) x
+#else
+#define D1(x)
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define D2(x) x
+#else
+#define D2(x)
+#endif
+
+#define JFFS2_NATIVE_ENDIAN
+
+/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
+   whatever OS we're actually running on here too. */
+
+#if defined(JFFS2_NATIVE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){x})
+#define cpu_to_je32(x) ((jint32_t){x})
+#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
+
+#define je16_to_cpu(x) ((x).v16)
+#define je32_to_cpu(x) ((x).v32)
+#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
+#elif defined(JFFS2_BIG_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (be16_to_cpu(x.v16))
+#define je32_to_cpu(x) (be32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
+#elif defined(JFFS2_LITTLE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (le16_to_cpu(x.v16))
+#define je32_to_cpu(x) (le32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
+#else 
+#error wibble
+#endif
+
+/*
+  This is all we need to keep in-core for each raw node during normal
+  operation. As and when we do read_inode on a particular inode, we can
+  scan the nodes which are listed for it and build up a proper map of 
+  which nodes are currently valid. JFFSv1 always used to keep that whole
+  map in core for each inode.
+*/
+struct jffs2_raw_node_ref
+{
+	struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref
+		for this inode. If this is the last, it points to the inode_cache
+		for this inode instead. The inode_cache will have NULL in the first
+		word so you know when you've got there :) */
+	struct jffs2_raw_node_ref *next_phys;
+	uint32_t flash_offset;
+	uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
+};
+
+        /* flash_offset & 3 always has to be zero, because nodes are
+	   always aligned at 4 bytes. So we have a couple of extra bits
+	   to play with, which indicate the node's status; see below: */ 
+#define REF_UNCHECKED	0	/* We haven't yet checked the CRC or built its inode */
+#define REF_OBSOLETE	1	/* Obsolete, can be completely ignored */
+#define REF_PRISTINE	2	/* Completely clean. GC without looking */
+#define REF_NORMAL	3	/* Possibly overlapped. Read the page and write again on GC */
+#define ref_flags(ref)		((ref)->flash_offset & 3)
+#define ref_offset(ref)		((ref)->flash_offset & ~3)
+#define ref_obsolete(ref)	(((ref)->flash_offset & 3) == REF_OBSOLETE)
+#define mark_ref_normal(ref)    do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
+
+/* For each inode in the filesystem, we need to keep a record of
+   nlink, because it would be a PITA to scan the whole directory tree
+   at read_inode() time to calculate it, and to keep sufficient information
+   in the raw_node_ref (basically both parent and child inode number for 
+   dirent nodes) would take more space than this does. We also keep
+   a pointer to the first physical node which is part of this inode, too.
+*/
+struct jffs2_inode_cache {
+	struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
+		temporary lists of dirents, and later must be set to
+		NULL to mark the end of the raw_node_ref->next_in_ino
+		chain. */
+	struct jffs2_inode_cache *next;
+	struct jffs2_raw_node_ref *nodes;
+	uint32_t ino;
+	int nlink;
+	int state;
+};
+
+/* Inode states for 'state' above. We need the 'GC' state to prevent
+   someone from doing a read_inode() while we're moving a 'REF_PRISTINE'
+   node without going through all the iget() nonsense */
+#define INO_STATE_UNCHECKED	0	/* CRC checks not yet done */
+#define INO_STATE_CHECKING	1	/* CRC checks in progress */
+#define INO_STATE_PRESENT	2	/* In core */
+#define INO_STATE_CHECKEDABSENT	3	/* Checked, cleared again */
+#define INO_STATE_GC		4	/* GCing a 'pristine' node */
+#define INO_STATE_READING	5	/* In read_inode() */
+
+#define INOCACHE_HASHSIZE 128
+
+/*
+  Larger representation of a raw node, kept in-core only when the 
+  struct inode for this particular ino is instantiated.
+*/
+
+struct jffs2_full_dnode
+{
+	struct jffs2_raw_node_ref *raw;
+	uint32_t ofs; /* The offset to which the data of this node belongs */
+	uint32_t size;
+	uint32_t frags; /* Number of fragments which currently refer
+			to this node. When this reaches zero, 
+			the node is obsolete.  */
+};
+
+/* 
+   Even larger representation of a raw node, kept in-core only while
+   we're actually building up the original map of which nodes go where,
+   in read_inode()
+*/
+struct jffs2_tmp_dnode_info
+{
+	struct jffs2_tmp_dnode_info *next;
+	struct jffs2_full_dnode *fn;
+	uint32_t version;
+};       
+
+struct jffs2_full_dirent
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *next;
+	uint32_t version;
+	uint32_t ino; /* == zero for unlink */
+	unsigned int nhash;
+	unsigned char type;
+	unsigned char name[0];
+};
+
+/*
+  Fragments - used to build a map of which raw node to obtain 
+  data from for each part of the ino
+*/
+struct jffs2_node_frag
+{
+	struct rb_node rb;
+	struct jffs2_full_dnode *node; /* NULL for holes */
+	uint32_t size;
+	uint32_t ofs; /* The offset to which this fragment belongs */
+};
+
+struct jffs2_eraseblock
+{
+	struct list_head list;
+	int bad_count;
+	uint32_t offset;		/* of this block in the MTD */
+
+	uint32_t unchecked_size;
+	uint32_t used_size;
+	uint32_t dirty_size;
+	uint32_t wasted_size;
+	uint32_t free_size;	/* Note that sector_size - free_size
+				   is the address of the first free space */
+	struct jffs2_raw_node_ref *first_node;
+	struct jffs2_raw_node_ref *last_node;
+
+	struct jffs2_raw_node_ref *gc_node;	/* Next node to be garbage collected */
+};
+
+#define ACCT_SANITY_CHECK(c, jeb) do { \
+		struct jffs2_eraseblock *___j = jeb; \
+		if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \
+		printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \
+		printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \
+		___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \
+		BUG(); \
+	} \
+	if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \
+		printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \
+		printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \
+		c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \
+		BUG(); \
+	} \
+} while(0)
+
+static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *ref;
+	int i=0;
+
+	printk(KERN_NOTICE);
+	for (ref = jeb->first_node; ref; ref = ref->next_phys) {
+		printk("%08x->", ref_offset(ref));
+		if (++i == 8) {
+			i = 0;
+			printk("\n" KERN_NOTICE);
+		}
+	}
+	printk("\n");
+}
+
+
+#define ACCT_PARANOIA_CHECK(jeb) do { \
+		uint32_t my_used_size = 0; \
+		uint32_t my_unchecked_size = 0; \
+		struct jffs2_raw_node_ref *ref2 = jeb->first_node; \
+		while (ref2) { \
+			if (unlikely(ref2->flash_offset < jeb->offset || \
+				     ref2->flash_offset > jeb->offset + c->sector_size)) { \
+				printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \
+				       ref_offset(ref2), jeb->offset); \
+				paranoia_failed_dump(jeb); \
+				BUG(); \
+			} \
+			if (ref_flags(ref2) == REF_UNCHECKED) \
+				my_unchecked_size += ref_totlen(c, jeb, ref2); \
+			else if (!ref_obsolete(ref2)) \
+				my_used_size += ref_totlen(c, jeb, ref2); \
+			if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \
+                                if (!ref2->next_phys) \
+				       printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \
+				             ref2, ref_offset(ref2), ref2->next_phys, \
+				             jeb->last_node, ref_offset(jeb->last_node)); \
+                                else \
+                                       printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \
+				             ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \
+				             jeb->last_node, ref_offset(jeb->last_node)); \
+				paranoia_failed_dump(jeb); \
+				BUG(); \
+			} \
+			ref2 = ref2->next_phys; \
+		} \
+		if (my_used_size != jeb->used_size) { \
+			printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \
+			BUG(); \
+		} \
+		if (my_unchecked_size != jeb->unchecked_size) { \
+			printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \
+			BUG(); \
+		} \
+	} while(0)
+
+/* Calculate totlen from surrounding nodes or eraseblock */
+static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
+				    struct jffs2_eraseblock *jeb,
+				    struct jffs2_raw_node_ref *ref)
+{
+	uint32_t ref_end;
+	
+	if (ref->next_phys)
+		ref_end = ref_offset(ref->next_phys);
+	else {
+		if (!jeb)
+			jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
+		/* Last node in block. Use free_space */
+		BUG_ON(ref != jeb->last_node);
+		ref_end = jeb->offset + c->sector_size - jeb->free_size;
+	}
+	return ref_end - ref_offset(ref);
+}
+
+static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
+				  struct jffs2_eraseblock *jeb,
+				  struct jffs2_raw_node_ref *ref)
+{
+	uint32_t ret;
+
+	D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
+		printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
+		       jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
+		BUG();
+	})
+
+#if 1
+	ret = ref->__totlen;
+#else
+	/* This doesn't actually work yet */
+	ret = __ref_totlen(c, jeb, ref);
+	if (ret != ref->__totlen) {
+		printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+		       ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
+		       ret, ref->__totlen);
+		if (!jeb)
+			jeb = &c->blocks[ref->flash_offset / c->sector_size];
+		paranoia_failed_dump(jeb);
+		BUG();
+	}
+#endif
+	return ret;
+}
+
+
+#define ALLOC_NORMAL	0	/* Normal allocation */
+#define ALLOC_DELETION	1	/* Deletion node. Best to allow it */
+#define ALLOC_GC	2	/* Space requested for GC. Give it or die */
+#define ALLOC_NORETRY	3	/* For jffs2_write_dnode: On failure, return -EAGAIN instead of retrying */
+
+/* How much dirty space before it goes on the very_dirty_list */
+#define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2))
+
+/* check if dirty space is more than 255 Byte */
+#define ISDIRTY(size) ((size) >  sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) 
+
+#define PAD(x) (((x)+3)&~3)
+
+static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
+{
+	while(raw->next_in_ino) {
+		raw = raw->next_in_ino;
+	}
+
+	return ((struct jffs2_inode_cache *)raw);
+}
+
+static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
+{
+	struct rb_node *node = root->rb_node;
+
+	if (!node)
+		return NULL;
+	while(node->rb_left)
+		node = node->rb_left;
+	return rb_entry(node, struct jffs2_node_frag, rb);
+}
+#define rb_parent(rb) ((rb)->rb_parent)
+#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
+#define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
+#define frag_erase(frag, list) rb_erase(&frag->rb, list);
+
+/* nodelist.c */
+D2(void jffs2_print_frag_list(struct jffs2_inode_info *f));
+void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
+int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			  struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
+			  uint32_t *highest_version, uint32_t *latest_mctime,
+			  uint32_t *mctime_ver);
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state);
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
+void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new);
+void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old);
+void jffs2_free_ino_caches(struct jffs2_sb_info *c);
+void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
+void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base);
+struct rb_node *rb_next(struct rb_node *);
+struct rb_node *rb_prev(struct rb_node *);
+void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
+
+/* nodemgmt.c */
+int jffs2_thread_should_wake(struct jffs2_sb_info *c);
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio);
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
+void jffs2_complete_reservation(struct jffs2_sb_info *c);
+void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
+void jffs2_dump_block_lists(struct jffs2_sb_info *c);
+
+/* write.c */
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
+
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode);
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode);
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			    struct jffs2_raw_inode *ri, unsigned char *buf, 
+			    uint32_t offset, uint32_t writelen, uint32_t *retlen);
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen);
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f);
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen);
+
+
+/* readinode.c */
+void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 
+			uint32_t ino, struct jffs2_raw_inode *latest_node);
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
+
+/* malloc.c */
+int jffs2_create_slab_caches(void);
+void jffs2_destroy_slab_caches(void);
+
+struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize);
+void jffs2_free_full_dirent(struct jffs2_full_dirent *);
+struct jffs2_full_dnode *jffs2_alloc_full_dnode(void);
+void jffs2_free_full_dnode(struct jffs2_full_dnode *);
+struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void);
+void jffs2_free_raw_dirent(struct jffs2_raw_dirent *);
+struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
+void jffs2_free_raw_inode(struct jffs2_raw_inode *);
+struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
+void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
+struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void);
+void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *);
+struct jffs2_node_frag *jffs2_alloc_node_frag(void);
+void jffs2_free_node_frag(struct jffs2_node_frag *);
+struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
+void jffs2_free_inode_cache(struct jffs2_inode_cache *);
+
+/* gc.c */
+int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
+
+/* read.c */
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     struct jffs2_full_dnode *fd, unsigned char *buf,
+		     int ofs, int len);
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			   unsigned char *buf, uint32_t offset, uint32_t len);
+char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
+
+/* scan.c */
+int jffs2_scan_medium(struct jffs2_sb_info *c);
+void jffs2_rotate_lists(struct jffs2_sb_info *c);
+
+/* build.c */
+int jffs2_do_mount_fs(struct jffs2_sb_info *c);
+
+/* erase.c */
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+
+#ifdef CONFIG_JFFS2_FS_NAND
+/* wbuf.c */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+#endif
+
+#endif /* __JFFS2_NODELIST_H__ */
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
new file mode 100644
index 0000000..2651135
--- /dev/null
+++ b/fs/jffs2/nodemgmt.c
@@ -0,0 +1,838 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodemgmt.c,v 1.115 2004/11/22 11:07:21 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include <linux/sched.h> /* For cond_resched() */
+#include "nodelist.h"
+
+/**
+ *	jffs2_reserve_space - request physical space to write nodes to flash
+ *	@c: superblock info
+ *	@minsize: Minimum acceptable size of allocation
+ *	@ofs: Returned value of node offset
+ *	@len: Returned value of allocation length
+ *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
+ *
+ *	Requests a block of physical space on the flash. Returns zero for success
+ *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
+ *	or other error if appropriate.
+ *
+ *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
+ *	allocation semaphore, to prevent more than one allocation from being
+ *	active at any time. The semaphore is later released by jffs2_commit_allocation()
+ *
+ *	jffs2_reserve_space() may trigger garbage collection in order to make room
+ *	for the requested allocation.
+ */
+
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
+
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
+{
+	int ret = -EAGAIN;
+	int blocksneeded = c->resv_blocks_write;
+	/* align it */
+	minsize = PAD(minsize);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
+	down(&c->alloc_sem);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
+
+	spin_lock(&c->erase_completion_lock);
+
+	/* this needs a little more thought (true <tglx> :)) */
+	while(ret == -EAGAIN) {
+		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
+			int ret;
+			uint32_t dirty, avail;
+
+			/* calculate real dirty size
+			 * dirty_size contains blocks on erase_pending_list
+			 * those blocks are counted in c->nr_erasing_blocks.
+			 * If one block is actually erased, it is not longer counted as dirty_space
+			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+			 * with c->nr_erasing_blocks * c->sector_size again.
+			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+			 * This helps us to force gc and pick eventually a clean block to spread the load.
+			 * We add unchecked_size here, as we hopefully will find some space to use.
+			 * This will affect the sum only once, as gc first finishes checking
+			 * of nodes.
+			 */
+			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
+			if (dirty < c->nospc_dirty_size) {
+				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+					printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
+					break;
+				}
+				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
+					  dirty, c->unchecked_size, c->sector_size));
+
+				spin_unlock(&c->erase_completion_lock);
+				up(&c->alloc_sem);
+				return -ENOSPC;
+			}
+			
+			/* Calc possibly available space. Possibly available means that we
+			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
+			 * more usable space. This will affect the sum only once, as gc first finishes checking
+			 * of nodes.
+			 + Return -ENOSPC, if the maximum possibly available space is less or equal than 
+			 * blocksneeded * sector_size.
+			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
+			 * the check above passes.
+			 */
+			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
+			if ( (avail / c->sector_size) <= blocksneeded) {
+				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+					printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
+					  avail, blocksneeded * c->sector_size));
+				spin_unlock(&c->erase_completion_lock);
+				up(&c->alloc_sem);
+				return -ENOSPC;
+			}
+
+			up(&c->alloc_sem);
+
+			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
+				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
+				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
+			spin_unlock(&c->erase_completion_lock);
+			
+			ret = jffs2_garbage_collect_pass(c);
+			if (ret)
+				return ret;
+
+			cond_resched();
+
+			if (signal_pending(current))
+				return -EINTR;
+
+			down(&c->alloc_sem);
+			spin_lock(&c->erase_completion_lock);
+		}
+
+		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+		if (ret) {
+			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
+		}
+	}
+	spin_unlock(&c->erase_completion_lock);
+	if (ret)
+		up(&c->alloc_sem);
+	return ret;
+}
+
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
+{
+	int ret = -EAGAIN;
+	minsize = PAD(minsize);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+
+	spin_lock(&c->erase_completion_lock);
+	while(ret == -EAGAIN) {
+		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+		if (ret) {
+		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+		}
+	}
+	spin_unlock(&c->erase_completion_lock);
+	return ret;
+}
+
+/* Called with alloc sem _and_ erase_completion_lock */
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
+{
+	struct jffs2_eraseblock *jeb = c->nextblock;
+	
+ restart:
+	if (jeb && minsize > jeb->free_size) {
+		/* Skip the end of this block and file it as having some dirty space */
+		/* If there's a pending write to it, flush now */
+		if (jffs2_wbuf_dirty(c)) {
+			spin_unlock(&c->erase_completion_lock);
+			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));			    
+			jffs2_flush_wbuf_pad(c);
+			spin_lock(&c->erase_completion_lock);
+			jeb = c->nextblock;
+			goto restart;
+		}
+		c->wasted_size += jeb->free_size;
+		c->free_size -= jeb->free_size;
+		jeb->wasted_size += jeb->free_size;
+		jeb->free_size = 0;
+		
+		/* Check, if we have a dirty block now, or if it was dirty already */
+		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
+			c->dirty_size += jeb->wasted_size;
+			c->wasted_size -= jeb->wasted_size;
+			jeb->dirty_size += jeb->wasted_size;
+			jeb->wasted_size = 0;
+			if (VERYDIRTY(c, jeb->dirty_size)) {
+				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+				list_add_tail(&jeb->list, &c->very_dirty_list);
+			} else {
+				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+				list_add_tail(&jeb->list, &c->dirty_list);
+			}
+		} else { 
+			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+			list_add_tail(&jeb->list, &c->clean_list);
+		}
+		c->nextblock = jeb = NULL;
+	}
+	
+	if (!jeb) {
+		struct list_head *next;
+		/* Take the next block off the 'free' list */
+
+		if (list_empty(&c->free_list)) {
+
+			if (!c->nr_erasing_blocks && 
+			    !list_empty(&c->erasable_list)) {
+				struct jffs2_eraseblock *ejeb;
+
+				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
+				list_del(&ejeb->list);
+				list_add_tail(&ejeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+				jffs2_erase_pending_trigger(c);
+				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
+					  ejeb->offset));
+			}
+
+			if (!c->nr_erasing_blocks && 
+			    !list_empty(&c->erasable_pending_wbuf_list)) {
+				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
+				/* c->nextblock is NULL, no update to c->nextblock allowed */			    
+				spin_unlock(&c->erase_completion_lock);
+				jffs2_flush_wbuf_pad(c);
+				spin_lock(&c->erase_completion_lock);
+				/* Have another go. It'll be on the erasable_list now */
+				return -EAGAIN;
+			}
+
+			if (!c->nr_erasing_blocks) {
+				/* Ouch. We're in GC, or we wouldn't have got here.
+				   And there's no space left. At all. */
+				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
+				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
+				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+				return -ENOSPC;
+			}
+
+			spin_unlock(&c->erase_completion_lock);
+			/* Don't wait for it; just erase one right now */
+			jffs2_erase_pending_blocks(c, 1);
+			spin_lock(&c->erase_completion_lock);
+
+			/* An erase may have failed, decreasing the
+			   amount of free space available. So we must
+			   restart from the beginning */
+			return -EAGAIN;
+		}
+
+		next = c->free_list.next;
+		list_del(next);
+		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
+		c->nr_free_blocks--;
+
+		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
+			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
+			goto restart;
+		}
+	}
+	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
+	   enough space */
+	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
+	*len = jeb->free_size;
+
+	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
+	    !jeb->first_node->next_in_ino) {
+		/* Only node in it beforehand was a CLEANMARKER node (we think). 
+		   So mark it obsolete now that there's going to be another node
+		   in the block. This will reduce used_size to zero but We've 
+		   already set c->nextblock so that jffs2_mark_node_obsolete()
+		   won't try to refile it to the dirty_list.
+		*/
+		spin_unlock(&c->erase_completion_lock);
+		jffs2_mark_node_obsolete(c, jeb->first_node);
+		spin_lock(&c->erase_completion_lock);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
+	return 0;
+}
+
+/**
+ *	jffs2_add_physical_node_ref - add a physical node reference to the list
+ *	@c: superblock info
+ *	@new: new node reference to add
+ *	@len: length of this physical node
+ *	@dirty: dirty flag for new node
+ *
+ *	Should only be used to report nodes for which space has been allocated 
+ *	by jffs2_reserve_space.
+ *
+ *	Must be called with the alloc_sem held.
+ */
+ 
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
+{
+	struct jffs2_eraseblock *jeb;
+	uint32_t len;
+
+	jeb = &c->blocks[new->flash_offset / c->sector_size];
+	len = ref_totlen(c, jeb, new);
+
+	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
+#if 1
+	if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
+		printk(KERN_WARNING "argh. node added in wrong place\n");
+		jffs2_free_raw_node_ref(new);
+		return -EINVAL;
+	}
+#endif
+	spin_lock(&c->erase_completion_lock);
+
+	if (!jeb->first_node)
+		jeb->first_node = new;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = new;
+	jeb->last_node = new;
+
+	jeb->free_size -= len;
+	c->free_size -= len;
+	if (ref_obsolete(new)) {
+		jeb->dirty_size += len;
+		c->dirty_size += len;
+	} else {
+		jeb->used_size += len;
+		c->used_size += len;
+	}
+
+	if (!jeb->free_size && !jeb->dirty_size) {
+		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
+		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+		if (jffs2_wbuf_dirty(c)) {
+			/* Flush the last write in the block if it's outstanding */
+			spin_unlock(&c->erase_completion_lock);
+			jffs2_flush_wbuf_pad(c);
+			spin_lock(&c->erase_completion_lock);
+		}
+
+		list_add_tail(&jeb->list, &c->clean_list);
+		c->nextblock = NULL;
+	}
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	return 0;
+}
+
+
+void jffs2_complete_reservation(struct jffs2_sb_info *c)
+{
+	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
+	jffs2_garbage_collect_trigger(c);
+	up(&c->alloc_sem);
+}
+
+static inline int on_list(struct list_head *obj, struct list_head *head)
+{
+	struct list_head *this;
+
+	list_for_each(this, head) {
+		if (this == obj) {
+			D1(printk("%p is on list at %p\n", obj, head));
+			return 1;
+
+		}
+	}
+	return 0;
+}
+
+void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
+{
+	struct jffs2_eraseblock *jeb;
+	int blocknr;
+	struct jffs2_unknown_node n;
+	int ret, addedsize;
+	size_t retlen;
+
+	if(!ref) {
+		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
+		return;
+	}
+	if (ref_obsolete(ref)) {
+		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
+		return;
+	}
+	blocknr = ref->flash_offset / c->sector_size;
+	if (blocknr >= c->nr_blocks) {
+		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
+		BUG();
+	}
+	jeb = &c->blocks[blocknr];
+
+	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
+	    !(c->flags & JFFS2_SB_FLAG_MOUNTING)) {
+		/* Hm. This may confuse static lock analysis. If any of the above 
+		   three conditions is false, we're going to return from this 
+		   function without actually obliterating any nodes or freeing
+		   any jffs2_raw_node_refs. So we don't need to stop erases from
+		   happening, or protect against people holding an obsolete
+		   jffs2_raw_node_ref without the erase_completion_lock. */
+		down(&c->erase_free_sem);
+	}
+
+	spin_lock(&c->erase_completion_lock);
+
+	if (ref_flags(ref) == REF_UNCHECKED) {
+		D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
+			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
+			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+			BUG();
+		})
+		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+		jeb->unchecked_size -= ref_totlen(c, jeb, ref);
+		c->unchecked_size -= ref_totlen(c, jeb, ref);
+	} else {
+		D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
+			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
+			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+			BUG();
+		})
+		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+		jeb->used_size -= ref_totlen(c, jeb, ref);
+		c->used_size -= ref_totlen(c, jeb, ref);
+	}
+
+	// Take care, that wasted size is taken into concern
+	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
+		D1(printk("Dirtying\n"));
+		addedsize = ref_totlen(c, jeb, ref);
+		jeb->dirty_size += ref_totlen(c, jeb, ref);
+		c->dirty_size += ref_totlen(c, jeb, ref);
+
+		/* Convert wasted space to dirty, if not a bad block */
+		if (jeb->wasted_size) {
+			if (on_list(&jeb->list, &c->bad_used_list)) {
+				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
+					  jeb->offset));
+				addedsize = 0; /* To fool the refiling code later */
+			} else {
+				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
+					  jeb->wasted_size, jeb->offset));
+				addedsize += jeb->wasted_size;
+				jeb->dirty_size += jeb->wasted_size;
+				c->dirty_size += jeb->wasted_size;
+				c->wasted_size -= jeb->wasted_size;
+				jeb->wasted_size = 0;
+			}
+		}
+	} else {
+		D1(printk("Wasting\n"));
+		addedsize = 0;
+		jeb->wasted_size += ref_totlen(c, jeb, ref);
+		c->wasted_size += ref_totlen(c, jeb, ref);	
+	}
+	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
+	
+	ACCT_SANITY_CHECK(c, jeb);
+
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
+		/* Mount in progress. Don't muck about with the block
+		   lists because they're not ready yet, and don't actually
+		   obliterate nodes that look obsolete. If they weren't 
+		   marked obsolete on the flash at the time they _became_
+		   obsolete, there was probably a reason for that. */
+		spin_unlock(&c->erase_completion_lock);
+		/* We didn't lock the erase_free_sem */
+		return;
+	}
+
+	if (jeb == c->nextblock) {
+		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
+	} else if (!jeb->used_size && !jeb->unchecked_size) {
+		if (jeb == c->gcblock) {
+			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
+			c->gcblock = NULL;
+		} else {
+			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
+			list_del(&jeb->list);
+		}
+		if (jffs2_wbuf_dirty(c)) {
+			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
+			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
+		} else {
+			if (jiffies & 127) {
+				/* Most of the time, we just erase it immediately. Otherwise we
+				   spend ages scanning it on mount, etc. */
+				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+				list_add_tail(&jeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+				jffs2_erase_pending_trigger(c);
+			} else {
+				/* Sometimes, however, we leave it elsewhere so it doesn't get
+				   immediately reused, and we spread the load a bit. */
+				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+				list_add_tail(&jeb->list, &c->erasable_list);
+			}				
+		}
+		D1(printk(KERN_DEBUG "Done OK\n"));
+	} else if (jeb == c->gcblock) {
+		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
+	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
+		list_del(&jeb->list);
+		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
+		list_add_tail(&jeb->list, &c->dirty_list);
+	} else if (VERYDIRTY(c, jeb->dirty_size) &&
+		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
+		list_del(&jeb->list);
+		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
+		list_add_tail(&jeb->list, &c->very_dirty_list);
+	} else {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 
+	}			  	
+
+	spin_unlock(&c->erase_completion_lock);
+
+	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) {
+		/* We didn't lock the erase_free_sem */
+		return;
+	}
+
+	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
+	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
+	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
+	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
+
+	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
+	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
+	if (ret) {
+		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		goto out_erase_sem;
+	}
+	if (retlen != sizeof(n)) {
+		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		goto out_erase_sem;
+	}
+	if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
+		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
+		goto out_erase_sem;
+	}
+	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
+		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+		goto out_erase_sem;
+	}
+	/* XXX FIXME: This is ugly now */
+	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
+	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
+	if (ret) {
+		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		goto out_erase_sem;
+	}
+	if (retlen != sizeof(n)) {
+		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		goto out_erase_sem;
+	}
+
+	/* Nodes which have been marked obsolete no longer need to be
+	   associated with any inode. Remove them from the per-inode list.
+	   
+	   Note we can't do this for NAND at the moment because we need 
+	   obsolete dirent nodes to stay on the lists, because of the
+	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
+	   because we delete the inocache, and on NAND we need that to 
+	   stay around until all the nodes are actually erased, in order
+	   to stop us from giving the same inode number to another newly
+	   created inode. */
+	if (ref->next_in_ino) {
+		struct jffs2_inode_cache *ic;
+		struct jffs2_raw_node_ref **p;
+
+		spin_lock(&c->erase_completion_lock);
+
+		ic = jffs2_raw_ref_to_ic(ref);
+		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
+			;
+
+		*p = ref->next_in_ino;
+		ref->next_in_ino = NULL;
+
+		if (ic->nodes == (void *)ic) {
+			D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
+			jffs2_del_ino_cache(c, ic);
+			jffs2_free_inode_cache(ic);
+		}
+
+		spin_unlock(&c->erase_completion_lock);
+	}
+
+
+	/* Merge with the next node in the physical list, if there is one
+	   and if it's also obsolete and if it doesn't belong to any inode */
+	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
+	    !ref->next_phys->next_in_ino) {
+		struct jffs2_raw_node_ref *n = ref->next_phys;
+		
+		spin_lock(&c->erase_completion_lock);
+
+		ref->__totlen += n->__totlen;
+		ref->next_phys = n->next_phys;
+                if (jeb->last_node == n) jeb->last_node = ref;
+		if (jeb->gc_node == n) {
+			/* gc will be happy continuing gc on this node */
+			jeb->gc_node=ref;
+		}
+		spin_unlock(&c->erase_completion_lock);
+
+		jffs2_free_raw_node_ref(n);
+	}
+	
+	/* Also merge with the previous node in the list, if there is one
+	   and that one is obsolete */
+	if (ref != jeb->first_node ) {
+		struct jffs2_raw_node_ref *p = jeb->first_node;
+
+		spin_lock(&c->erase_completion_lock);
+
+		while (p->next_phys != ref)
+			p = p->next_phys;
+		
+		if (ref_obsolete(p) && !ref->next_in_ino) {
+			p->__totlen += ref->__totlen;
+			if (jeb->last_node == ref) {
+				jeb->last_node = p;
+			}
+			if (jeb->gc_node == ref) {
+				/* gc will be happy continuing gc on this node */
+				jeb->gc_node=p;
+			}
+			p->next_phys = ref->next_phys;
+			jffs2_free_raw_node_ref(ref);
+		}
+		spin_unlock(&c->erase_completion_lock);
+	}
+ out_erase_sem:
+	up(&c->erase_free_sem);
+}
+
+#if CONFIG_JFFS2_FS_DEBUG >= 2
+void jffs2_dump_block_lists(struct jffs2_sb_info *c)
+{
+
+
+	printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
+	printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
+	printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
+	printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
+	printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
+	printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
+	printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
+	printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
+	printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
+	printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
+	printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
+
+	if (c->nextblock) {
+		printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+		       c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
+	} else {
+		printk(KERN_DEBUG "nextblock: NULL\n");
+	}
+	if (c->gcblock) {
+		printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+		       c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
+	} else {
+		printk(KERN_DEBUG "gcblock: NULL\n");
+	}
+	if (list_empty(&c->clean_list)) {
+		printk(KERN_DEBUG "clean_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->clean_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->wasted_size;
+			printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->very_dirty_list)) {
+		printk(KERN_DEBUG "very_dirty_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->very_dirty_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->dirty_size;
+			printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+			numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->dirty_list)) {
+		printk(KERN_DEBUG "dirty_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->dirty_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->dirty_size;
+			printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+			numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->erasable_list)) {
+		printk(KERN_DEBUG "erasable_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasable_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erasing_list)) {
+		printk(KERN_DEBUG "erasing_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasing_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erase_pending_list)) {
+		printk(KERN_DEBUG "erase_pending_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erase_pending_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erasable_pending_wbuf_list)) {
+		printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasable_pending_wbuf_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->free_list)) {
+		printk(KERN_DEBUG "free_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->free_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->bad_list)) {
+		printk(KERN_DEBUG "bad_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->bad_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->bad_used_list)) {
+		printk(KERN_DEBUG "bad_used_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->bad_used_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+}
+#endif /* CONFIG_JFFS2_FS_DEBUG */
+
+int jffs2_thread_should_wake(struct jffs2_sb_info *c)
+{
+	int ret = 0;
+	uint32_t dirty;
+
+	if (c->unchecked_size) {
+		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+			  c->unchecked_size, c->checked_ino));
+		return 1;
+	}
+
+	/* dirty_size contains blocks on erase_pending_list
+	 * those blocks are counted in c->nr_erasing_blocks.
+	 * If one block is actually erased, it is not longer counted as dirty_space
+	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+	 * with c->nr_erasing_blocks * c->sector_size again.
+	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+	 * This helps us to force gc and pick eventually a clean block to spread the load.
+	 */
+	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
+
+	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && 
+			(dirty > c->nospc_dirty_size)) 
+		ret = 1;
+
+	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", 
+		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
+
+	return ret;
+}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
new file mode 100644
index 0000000..03b0acc
--- /dev/null
+++ b/fs/jffs2/os-linux.h
@@ -0,0 +1,217 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2002-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: os-linux.h,v 1.51 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#ifndef __JFFS2_OS_LINUX_H__
+#define __JFFS2_OS_LINUX_H__
+#include <linux/version.h>
+
+/* JFFS2 uses Linux mode bits natively -- no need for conversion */
+#define os_to_jffs2_mode(x) (x)
+#define jffs2_to_os_mode(x) (x)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,73)
+#define kstatfs statfs
+#endif
+
+struct kstatfs;
+struct kvec;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+#define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode))
+#define OFNI_EDONI_2SFFJ(f)  (&(f)->vfs_inode)
+#define JFFS2_SB_INFO(sb) (sb->s_fs_info)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *)c->os_priv)
+#elif defined(JFFS2_OUT_OF_KERNEL)
+#define JFFS2_INODE_INFO(i) ((struct jffs2_inode_info *) &(i)->u)
+#define OFNI_EDONI_2SFFJ(f)  ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) )
+#define JFFS2_SB_INFO(sb) ((struct jffs2_sb_info *) &(sb)->u)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) )
+#else
+#define JFFS2_INODE_INFO(i) (&i->u.jffs2_i)
+#define OFNI_EDONI_2SFFJ(f)  ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) )
+#define JFFS2_SB_INFO(sb) (&sb->u.jffs2_sb)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) )
+#endif
+
+
+#define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
+#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
+#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
+#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,1)
+#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f)))
+#else
+#define JFFS2_F_I_RDEV_MIN(f) (MINOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev)))
+#define JFFS2_F_I_RDEV_MAJ(f) (MAJOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev)))
+#endif
+
+/* Urgh. The things we do to keep the 2.4 build working */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,47)
+#define ITIME(sec) ((struct timespec){sec, 0})
+#define I_SEC(tv) ((tv).tv_sec)
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec)
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec)
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec)
+#else
+#define ITIME(x) (x)
+#define I_SEC(x) (x)
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime)
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime)
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime)
+#endif
+
+#define sleep_on_spinunlock(wq, s)				\
+	do {							\
+		DECLARE_WAITQUEUE(__wait, current);		\
+		add_wait_queue((wq), &__wait);			\
+		set_current_state(TASK_UNINTERRUPTIBLE);	\
+		spin_unlock(s);					\
+		schedule();					\
+		remove_wait_queue((wq), &__wait);		\
+	} while(0)
+
+static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+	f->highest_version = 0;
+	f->fragtree = RB_ROOT;
+	f->metadata = NULL;
+	f->dents = NULL;
+	f->flags = 0;
+	f->usercompr = 0;
+#else
+	memset(f, 0, sizeof(*f));
+	init_MUTEX_LOCKED(&f->sem);
+#endif
+}
+
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+
+#if (!defined CONFIG_JFFS2_FS_NAND && !defined CONFIG_JFFS2_FS_NOR_ECC)
+#define jffs2_can_mark_obsolete(c) (1)
+#define jffs2_cleanmarker_oob(c) (0)
+#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
+
+#define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; })
+#define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; })
+#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
+#define jffs2_nand_flash_setup(c) (0)
+#define jffs2_nand_flash_cleanup(c) do {} while(0)
+#define jffs2_wbuf_dirty(c) (0)
+#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
+#define jffs2_wbuf_timeout NULL
+#define jffs2_wbuf_process NULL
+#define jffs2_nor_ecc(c) (0)
+#define jffs2_nor_ecc_flash_setup(c) (0)
+#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
+
+#else /* NAND and/or ECC'd NOR support present */
+
+#define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM)
+#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
+
+#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
+
+/* wbuf.c */
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino);
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf);
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf);
+int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,int mode);
+int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+void jffs2_wbuf_timeout(unsigned long data);
+void jffs2_wbuf_process(void *data);
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
+#ifdef CONFIG_JFFS2_FS_NOR_ECC
+#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC))
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
+#else
+#define jffs2_nor_ecc(c) (0)
+#define jffs2_nor_ecc_flash_setup(c) (0)
+#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
+#endif /* NOR ECC */
+#endif /* NAND */
+
+/* erase.c */
+static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+{
+	OFNI_BS_2SFFJ(c)->s_dirt = 1;
+}
+
+/* background.c */
+int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
+
+/* dir.c */
+extern struct file_operations jffs2_dir_operations;
+extern struct inode_operations jffs2_dir_inode_operations;
+
+/* file.c */
+extern struct file_operations jffs2_file_operations;
+extern struct inode_operations jffs2_file_inode_operations;
+extern struct address_space_operations jffs2_file_address_operations;
+int jffs2_fsync(struct file *, struct dentry *, int);
+int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
+
+/* ioctl.c */
+int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+/* symlink.c */
+extern struct inode_operations jffs2_symlink_inode_operations;
+
+/* fs.c */
+int jffs2_setattr (struct dentry *, struct iattr *);
+void jffs2_read_inode (struct inode *);
+void jffs2_clear_inode (struct inode *);
+void jffs2_dirty_inode(struct inode *inode);
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
+			       struct jffs2_raw_inode *ri);
+int jffs2_statfs (struct super_block *, struct kstatfs *);
+void jffs2_write_super (struct super_block *);
+int jffs2_remount_fs (struct super_block *, int *, char *);
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+			    struct jffs2_inode_info *f);
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+					      int inum, int nlink);
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 
+				   struct jffs2_inode_info *f, 
+				   unsigned long offset,
+				   unsigned long *priv);
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+			   unsigned char *pg,
+			   unsigned long *priv);
+void jffs2_flash_cleanup(struct jffs2_sb_info *c);
+     
+
+/* writev.c */
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, 
+		       unsigned long count, loff_t to, size_t *retlen);
+
+
+#endif /* __JFFS2_OS_LINUX_H__ */
+
+
diff --git a/fs/jffs2/pushpull.h b/fs/jffs2/pushpull.h
new file mode 100644
index 0000000..c0c2a91
--- /dev/null
+++ b/fs/jffs2/pushpull.h
@@ -0,0 +1,72 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: pushpull.h,v 1.10 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#ifndef __PUSHPULL_H__
+#define __PUSHPULL_H__
+
+#include <linux/errno.h>
+
+struct pushpull {
+	unsigned char *buf;
+	unsigned int buflen;
+	unsigned int ofs;
+	unsigned int reserve;
+};
+
+
+static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve)
+{
+	pp->buf = buf;
+	pp->buflen = buflen;
+	pp->ofs = ofs;
+	pp->reserve = reserve;
+}
+
+static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
+{
+	if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) {
+		return -ENOSPC;
+	}
+
+	if (bit) {
+		pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7)));
+	}
+	else {
+		pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7)));
+	}
+	pp->ofs++;
+
+	return 0;
+}
+
+static inline int pushedbits(struct pushpull *pp)
+{
+	return pp->ofs;
+}
+
+static inline int pullbit(struct pushpull *pp)
+{
+	int bit;
+
+	bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1;
+
+	pp->ofs++;
+	return bit;
+}
+
+static inline int pulledbits(struct pushpull *pp)
+{
+	return pp->ofs;
+}
+
+#endif /* __PUSHPULL_H__ */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
new file mode 100644
index 0000000..eb493dc
--- /dev/null
+++ b/fs/jffs2/read.c
@@ -0,0 +1,246 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: read.c,v 1.38 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+#include "compr.h"
+
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     struct jffs2_full_dnode *fd, unsigned char *buf,
+		     int ofs, int len)
+{
+	struct jffs2_raw_inode *ri;
+	size_t readlen;
+	uint32_t crc;
+	unsigned char *decomprbuf = NULL;
+	unsigned char *readbuf = NULL;
+	int ret = 0;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+
+	ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret);
+		return ret;
+	}
+	if (readlen != sizeof(*ri)) {
+		jffs2_free_raw_inode(ri);
+		printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", 
+		       ref_offset(fd->raw), sizeof(*ri), readlen);
+		return -EIO;
+	}
+	crc = crc32(0, ri, sizeof(*ri)-8);
+
+	D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
+		  ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
+		  crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
+		  je32_to_cpu(ri->offset), buf));
+	if (crc != je32_to_cpu(ri->node_crc)) {
+		printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n",
+		       je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
+		ret = -EIO;
+		goto out_ri;
+	}
+	/* There was a bug where we wrote hole nodes out with csize/dsize
+	   swapped. Deal with it */
+	if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && 
+	    je32_to_cpu(ri->csize)) {
+		ri->dsize = ri->csize;
+		ri->csize = cpu_to_je32(0);
+	}
+
+	D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
+		printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
+		       len, ofs, je32_to_cpu(ri->dsize));
+		ret = -EINVAL;
+		goto out_ri;
+	});
+
+	
+	if (ri->compr == JFFS2_COMPR_ZERO) {
+		memset(buf, 0, len);
+		goto out_ri;
+	}
+
+	/* Cases:
+	   Reading whole node and it's uncompressed - read directly to buffer provided, check CRC.
+	   Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided 
+	   Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy 
+	   Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
+	*/
+	if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
+		readbuf = buf;
+	} else {
+		readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL);
+		if (!readbuf) {
+			ret = -ENOMEM;
+			goto out_ri;
+		}
+	}
+	if (ri->compr != JFFS2_COMPR_NONE) {
+		if (len < je32_to_cpu(ri->dsize)) {
+			decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
+			if (!decomprbuf) {
+				ret = -ENOMEM;
+				goto out_readbuf;
+			}
+		} else {
+			decomprbuf = buf;
+		}
+	} else {
+		decomprbuf = readbuf;
+	}
+
+	D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
+		  readbuf));
+	ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
+			       je32_to_cpu(ri->csize), &readlen, readbuf);
+
+	if (!ret && readlen != je32_to_cpu(ri->csize))
+		ret = -EIO;
+	if (ret)
+		goto out_decomprbuf;
+
+	crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
+	if (crc != je32_to_cpu(ri->data_crc)) {
+		printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n",
+		       je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
+		ret = -EIO;
+		goto out_decomprbuf;
+	}
+	D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
+	if (ri->compr != JFFS2_COMPR_NONE) {
+		D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
+			  je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); 
+		ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
+		if (ret) {
+			printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
+			goto out_decomprbuf;
+		}
+	}
+
+	if (len < je32_to_cpu(ri->dsize)) {
+		memcpy(buf, decomprbuf+ofs, len);
+	}
+ out_decomprbuf:
+	if(decomprbuf != buf && decomprbuf != readbuf)
+		kfree(decomprbuf);
+ out_readbuf:
+	if(readbuf != buf)
+		kfree(readbuf);
+ out_ri:
+	jffs2_free_raw_inode(ri);
+
+	return ret;
+}
+
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			   unsigned char *buf, uint32_t offset, uint32_t len)
+{
+	uint32_t end = offset + len;
+	struct jffs2_node_frag *frag;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n",
+		  f->inocache->ino, offset, offset+len));
+
+	frag = jffs2_lookup_node_frag(&f->fragtree, offset);
+
+	/* XXX FIXME: Where a single physical node actually shows up in two
+	   frags, we read it twice. Don't do that. */
+	/* Now we're pointing at the first frag which overlaps our page */
+	while(offset < end) {
+		D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
+		if (unlikely(!frag || frag->ofs > offset)) {
+			uint32_t holesize = end - offset;
+			if (frag) {
+				D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
+				holesize = min(holesize, frag->ofs - offset);
+				D2(jffs2_print_frag_list(f));
+			}
+			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
+			memset(buf, 0, holesize);
+			buf += holesize;
+			offset += holesize;
+			continue;
+		} else if (unlikely(!frag->node)) {
+			uint32_t holeend = min(end, frag->ofs + frag->size);
+			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
+			memset(buf, 0, holeend - offset);
+			buf += holeend - offset;
+			offset = holeend;
+			frag = frag_next(frag);
+			continue;
+		} else {
+			uint32_t readlen;
+			uint32_t fragofs; /* offset within the frag to start reading */
+			
+			fragofs = offset - frag->ofs;
+			readlen = min(frag->size - fragofs, end - offset);
+			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
+				  frag->ofs+fragofs, frag->ofs+fragofs+readlen,
+				  ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
+			ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
+			D2(printk(KERN_DEBUG "node read done\n"));
+			if (ret) {
+				D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
+				memset(buf, 0, readlen);
+				return ret;
+			}
+			buf += readlen;
+			offset += readlen;
+			frag = frag_next(frag);
+			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
+		}
+	}
+	return 0;
+}
+
+/* Core function to read symlink target. */
+char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
+{
+	char *buf;
+	int ret;
+
+	down(&f->sem);
+
+	if (!f->metadata) {
+		printk(KERN_NOTICE "No metadata for symlink inode #%u\n", f->inocache->ino);
+		up(&f->sem);
+		return ERR_PTR(-EINVAL);
+	}
+	buf = kmalloc(f->metadata->size+1, GFP_USER);
+	if (!buf) {
+		up(&f->sem);
+		return ERR_PTR(-ENOMEM);
+	}
+	buf[f->metadata->size]=0;
+
+	ret = jffs2_read_dnode(c, f, f->metadata, buf, 0, f->metadata->size);
+
+	up(&f->sem);
+
+	if (ret) {
+		kfree(buf);
+		return ERR_PTR(ret);
+	}
+	return buf;
+}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
new file mode 100644
index 0000000..aca4a0b
--- /dev/null
+++ b/fs/jffs2/readinode.c
@@ -0,0 +1,695 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: readinode.c,v 1.117 2004/11/20 18:06:54 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag);
+
+#if CONFIG_JFFS2_FS_DEBUG >= 2
+static void jffs2_print_fragtree(struct rb_root *list, int permitbug)
+{
+	struct jffs2_node_frag *this = frag_first(list);
+	uint32_t lastofs = 0;
+	int buggy = 0;
+
+	while(this) {
+		if (this->node)
+			printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n",
+			       this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw),
+			       this, frag_left(this), frag_right(this), frag_parent(this));
+		else 
+			printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, 
+			       this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this));
+		if (this->ofs != lastofs)
+			buggy = 1;
+		lastofs = this->ofs+this->size;
+		this = frag_next(this);
+	}
+	if (buggy && !permitbug) {
+		printk(KERN_CRIT "Frag tree got a hole in it\n");
+		BUG();
+	}
+}
+
+void jffs2_print_frag_list(struct jffs2_inode_info *f)
+{
+	jffs2_print_fragtree(&f->fragtree, 0);
+
+	if (f->metadata) {
+		printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
+	}
+}
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG >= 1
+static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f)
+{
+	struct jffs2_node_frag *frag;
+	int bitched = 0;
+
+	for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+
+		struct jffs2_full_dnode *fn = frag->node;
+		if (!fn || !fn->raw)
+			continue;
+
+		if (ref_flags(fn->raw) == REF_PRISTINE) {
+
+			if (fn->frags > 1) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags);
+				bitched = 1;
+			}
+			/* A hole node which isn't multi-page should be garbage-collected
+			   and merged anyway, so we just check for the frag size here,
+			   rather than mucking around with actually reading the node
+			   and checking the compression type, which is the real way
+			   to tell a hole node. */
+			if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n",
+				       ref_offset(fn->raw));
+				bitched = 1;
+			}
+
+			if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n",
+				       ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
+				bitched = 1;
+			}
+		}
+	}
+	
+	if (bitched) {
+		struct jffs2_node_frag *thisfrag;
+
+		printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino);
+		thisfrag = frag_first(&f->fragtree);
+		while (thisfrag) {
+			if (!thisfrag->node) {
+				printk("Frag @0x%x-0x%x; node-less hole\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs);
+			} else if (!thisfrag->node->raw) {
+				printk("Frag @0x%x-0x%x; raw-less hole\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs);
+			} else {
+				printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs,
+				       ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw),
+				       thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size);
+			}
+			thisfrag = frag_next(thisfrag);
+		}
+	}
+	return bitched;
+}
+#endif /* D1 */
+
+static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
+{
+	if (this->node) {
+		this->node->frags--;
+		if (!this->node->frags) {
+			/* The node has no valid frags left. It's totally obsoleted */
+			D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
+				  ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size));
+			jffs2_mark_node_obsolete(c, this->node->raw);
+			jffs2_free_full_dnode(this->node);
+		} else {
+			D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
+				  ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size,
+				  this->node->frags));
+			mark_ref_normal(this->node->raw);
+		}
+		
+	}
+	jffs2_free_node_frag(this);
+}
+
+/* Given an inode, probably with existing list of fragments, add the new node
+ * to the fragment list.
+ */
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+{
+	int ret;
+	struct jffs2_node_frag *newfrag;
+
+	D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn));
+
+	newfrag = jffs2_alloc_node_frag();
+	if (unlikely(!newfrag))
+		return -ENOMEM;
+
+	D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n",
+		  fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag));
+	
+	if (unlikely(!fn->size)) {
+		jffs2_free_node_frag(newfrag);
+		return 0;
+	}
+
+	newfrag->ofs = fn->ofs;
+	newfrag->size = fn->size;
+	newfrag->node = fn;
+	newfrag->node->frags = 1;
+
+	ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
+	if (ret)
+		return ret;
+
+	/* If we now share a page with other nodes, mark either previous
+	   or next node REF_NORMAL, as appropriate.  */
+	if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+		struct jffs2_node_frag *prev = frag_prev(newfrag);
+
+		mark_ref_normal(fn->raw);
+		/* If we don't start at zero there's _always_ a previous */	
+		if (prev->node)
+			mark_ref_normal(prev->node->raw);
+	}
+
+	if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+		struct jffs2_node_frag *next = frag_next(newfrag);
+		
+		if (next) {
+			mark_ref_normal(fn->raw);
+			if (next->node)
+				mark_ref_normal(next->node->raw);
+		}
+	}
+	D2(if (jffs2_sanitycheck_fragtree(f)) {
+		   printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n",
+			  fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
+		   return 0;
+	   })
+	D2(jffs2_print_frag_list(f));
+	return 0;
+}
+
+/* Doesn't set inode->i_size */
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag)
+{
+	struct jffs2_node_frag *this;
+	uint32_t lastend;
+
+	/* Skip all the nodes which are completed before this one starts */
+	this = jffs2_lookup_node_frag(list, newfrag->node->ofs);
+
+	if (this) {
+		D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
+			  this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
+		lastend = this->ofs + this->size;
+	} else {
+		D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n"));
+		lastend = 0;
+	}
+			  
+	/* See if we ran off the end of the list */
+	if (lastend <= newfrag->ofs) {
+		/* We did */
+
+		/* Check if 'this' node was on the same page as the new node.
+		   If so, both 'this' and the new node get marked REF_NORMAL so
+		   the GC can take a look.
+		*/
+		if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+			if (this->node)
+				mark_ref_normal(this->node->raw);
+			mark_ref_normal(newfrag->node->raw);
+		}
+
+		if (lastend < newfrag->node->ofs) {
+			/* ... and we need to put a hole in before the new node */
+			struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag();
+			if (!holefrag) {
+				jffs2_free_node_frag(newfrag);
+				return -ENOMEM;
+			}
+			holefrag->ofs = lastend;
+			holefrag->size = newfrag->node->ofs - lastend;
+			holefrag->node = NULL;
+			if (this) {
+				/* By definition, the 'this' node has no right-hand child, 
+				   because there are no frags with offset greater than it.
+				   So that's where we want to put the hole */
+				D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this));
+				rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
+			} else {
+				D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag));
+				rb_link_node(&holefrag->rb, NULL, &list->rb_node);
+			}
+			rb_insert_color(&holefrag->rb, list);
+			this = holefrag;
+		}
+		if (this) {
+			/* By definition, the 'this' node has no right-hand child, 
+			   because there are no frags with offset greater than it.
+			   So that's where we want to put the hole */
+			D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this));
+			rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);			
+		} else {
+			D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag));
+			rb_link_node(&newfrag->rb, NULL, &list->rb_node);
+		}
+		rb_insert_color(&newfrag->rb, list);
+		return 0;
+	}
+
+	D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", 
+		  this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
+
+	/* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
+	 * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs  
+	 */
+	if (newfrag->ofs > this->ofs) {
+		/* This node isn't completely obsoleted. The start of it remains valid */
+
+		/* Mark the new node and the partially covered node REF_NORMAL -- let
+		   the GC take a look at them */
+		mark_ref_normal(newfrag->node->raw);
+		if (this->node)
+			mark_ref_normal(this->node->raw);
+
+		if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
+			/* The new node splits 'this' frag into two */
+			struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag();
+			if (!newfrag2) {
+				jffs2_free_node_frag(newfrag);
+				return -ENOMEM;
+			}
+			D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size);
+			if (this->node)
+				printk("phys 0x%08x\n", ref_offset(this->node->raw));
+			else 
+				printk("hole\n");
+			   )
+			
+			/* New second frag pointing to this's node */
+			newfrag2->ofs = newfrag->ofs + newfrag->size;
+			newfrag2->size = (this->ofs+this->size) - newfrag2->ofs;
+			newfrag2->node = this->node;
+			if (this->node)
+				this->node->frags++;
+
+			/* Adjust size of original 'this' */
+			this->size = newfrag->ofs - this->ofs;
+
+			/* Now, we know there's no node with offset
+			   greater than this->ofs but smaller than
+			   newfrag2->ofs or newfrag->ofs, for obvious
+			   reasons. So we can do a tree insert from
+			   'this' to insert newfrag, and a tree insert
+			   from newfrag to insert newfrag2. */
+			jffs2_fragtree_insert(newfrag, this);
+			rb_insert_color(&newfrag->rb, list);
+			
+			jffs2_fragtree_insert(newfrag2, newfrag);
+			rb_insert_color(&newfrag2->rb, list);
+			
+			return 0;
+		}
+		/* New node just reduces 'this' frag in size, doesn't split it */
+		this->size = newfrag->ofs - this->ofs;
+
+		/* Again, we know it lives down here in the tree */
+		jffs2_fragtree_insert(newfrag, this);
+		rb_insert_color(&newfrag->rb, list);
+	} else {
+		/* New frag starts at the same point as 'this' used to. Replace 
+		   it in the tree without doing a delete and insertion */
+		D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
+			  newfrag, newfrag->ofs, newfrag->ofs+newfrag->size,
+			  this, this->ofs, this->ofs+this->size));
+	
+		rb_replace_node(&this->rb, &newfrag->rb, list);
+		
+		if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
+			D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size));
+			jffs2_obsolete_node_frag(c, this);
+		} else {
+			this->ofs += newfrag->size;
+			this->size -= newfrag->size;
+
+			jffs2_fragtree_insert(this, newfrag);
+			rb_insert_color(&this->rb, list);
+			return 0;
+		}
+	}
+	/* OK, now we have newfrag added in the correct place in the tree, but
+	   frag_next(newfrag) may be a fragment which is overlapped by it 
+	*/
+	while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
+		/* 'this' frag is obsoleted completely. */
+		D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size));
+		rb_erase(&this->rb, list);
+		jffs2_obsolete_node_frag(c, this);
+	}
+	/* Now we're pointing at the first frag which isn't totally obsoleted by 
+	   the new frag */
+
+	if (!this || newfrag->ofs + newfrag->size == this->ofs) {
+		return 0;
+	}
+	/* Still some overlap but we don't need to move it in the tree */
+	this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
+	this->ofs = newfrag->ofs + newfrag->size;
+
+	/* And mark them REF_NORMAL so the GC takes a look at them */
+	if (this->node)
+		mark_ref_normal(this->node->raw);
+	mark_ref_normal(newfrag->node->raw);
+
+	return 0;
+}
+
+void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
+{
+	struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
+
+	D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size));
+
+	/* We know frag->ofs <= size. That's what lookup does for us */
+	if (frag && frag->ofs != size) {
+		if (frag->ofs+frag->size >= size) {
+			D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
+			frag->size = size - frag->ofs;
+		}
+		frag = frag_next(frag);
+	}
+	while (frag && frag->ofs >= size) {
+		struct jffs2_node_frag *next = frag_next(frag);
+
+		D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
+		frag_erase(frag, list);
+		jffs2_obsolete_node_frag(c, frag);
+		frag = next;
+	}
+}
+
+/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, 
+					struct jffs2_inode_info *f,
+					struct jffs2_raw_inode *latest_node);
+
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 
+			uint32_t ino, struct jffs2_raw_inode *latest_node)
+{
+	D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n"));
+
+ retry_inocache:
+	spin_lock(&c->inocache_lock);
+	f->inocache = jffs2_get_ino_cache(c, ino);
+
+	D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache));
+
+	if (f->inocache) {
+		/* Check its state. We may need to wait before we can use it */
+		switch(f->inocache->state) {
+		case INO_STATE_UNCHECKED:
+		case INO_STATE_CHECKEDABSENT:
+			f->inocache->state = INO_STATE_READING;
+			break;
+			
+		case INO_STATE_CHECKING:
+		case INO_STATE_GC:
+			/* If it's in either of these states, we need
+			   to wait for whoever's got it to finish and
+			   put it back. */
+			D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n",
+				  ino, f->inocache->state));
+			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			goto retry_inocache;
+
+		case INO_STATE_READING:
+		case INO_STATE_PRESENT:
+			/* Eep. This should never happen. It can
+			happen if Linux calls read_inode() again
+			before clear_inode() has finished though. */
+			printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
+			/* Fail. That's probably better than allowing it to succeed */
+			f->inocache = NULL;
+			break;
+
+		default:
+			BUG();
+		}
+	}
+	spin_unlock(&c->inocache_lock);
+
+	if (!f->inocache && ino == 1) {
+		/* Special case - no root inode on medium */
+		f->inocache = jffs2_alloc_inode_cache();
+		if (!f->inocache) {
+			printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n");
+			return -ENOMEM;
+		}
+		D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n"));
+		memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
+		f->inocache->ino = f->inocache->nlink = 1;
+		f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+		f->inocache->state = INO_STATE_READING;
+		jffs2_add_ino_cache(c, f->inocache);
+	}
+	if (!f->inocache) {
+		printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino);
+		return -ENOENT;
+	}
+
+	return jffs2_do_read_inode_internal(c, f, latest_node);
+}
+
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+	struct jffs2_raw_inode n;
+	struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
+	int ret;
+
+	if (!f)
+		return -ENOMEM;
+
+	memset(f, 0, sizeof(*f));
+	init_MUTEX_LOCKED(&f->sem);
+	f->inocache = ic;
+
+	ret = jffs2_do_read_inode_internal(c, f, &n);
+	if (!ret) {
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+	}
+	kfree (f);
+	return ret;
+}
+
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, 
+					struct jffs2_inode_info *f,
+					struct jffs2_raw_inode *latest_node)
+{
+	struct jffs2_tmp_dnode_info *tn_list, *tn;
+	struct jffs2_full_dirent *fd_list;
+	struct jffs2_full_dnode *fn = NULL;
+	uint32_t crc;
+	uint32_t latest_mctime, mctime_ver;
+	uint32_t mdata_ver = 0;
+	size_t retlen;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink));
+
+	/* Grab all nodes relevant to this ino */
+	ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
+
+	if (ret) {
+		printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret);
+		if (f->inocache->state == INO_STATE_READING)
+			jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+		return ret;
+	}
+	f->dents = fd_list;
+
+	while (tn_list) {
+		tn = tn_list;
+
+		fn = tn->fn;
+
+		if (f->metadata) {
+			if (likely(tn->version >= mdata_ver)) {
+				D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw)));
+				jffs2_mark_node_obsolete(c, f->metadata->raw);
+				jffs2_free_full_dnode(f->metadata);
+				f->metadata = NULL;
+				
+				mdata_ver = 0;
+			} else {
+				/* This should never happen. */
+				printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n",
+					  ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw));
+				jffs2_mark_node_obsolete(c, fn->raw);
+				jffs2_free_full_dnode(fn);
+				/* Fill in latest_node from the metadata, not this one we're about to free... */
+				fn = f->metadata;
+				goto next_tn;
+			}
+		}
+
+		if (fn->size) {
+			jffs2_add_full_dnode_to_inode(c, f, fn);
+		} else {
+			/* Zero-sized node at end of version list. Just a metadata update */
+			D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version));
+			f->metadata = fn;
+			mdata_ver = tn->version;
+		}
+	next_tn:
+		tn_list = tn->next;
+		jffs2_free_tmp_dnode_info(tn);
+	}
+	D1(jffs2_sanitycheck_fragtree(f));
+
+	if (!fn) {
+		/* No data nodes for this inode. */
+		if (f->inocache->ino != 1) {
+			printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino);
+			if (!fd_list) {
+				if (f->inocache->state == INO_STATE_READING)
+					jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+				return -EIO;
+			}
+			printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n");
+		}
+		latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
+		latest_node->version = cpu_to_je32(0);
+		latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
+		latest_node->isize = cpu_to_je32(0);
+		latest_node->gid = cpu_to_je16(0);
+		latest_node->uid = cpu_to_je16(0);
+		if (f->inocache->state == INO_STATE_READING)
+			jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
+		return 0;
+	}
+
+	ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
+	if (ret || retlen != sizeof(*latest_node)) {
+		printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n",
+		       ret, retlen, sizeof(*latest_node));
+		/* FIXME: If this fails, there seems to be a memory leak. Find it. */
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+		return ret?ret:-EIO;
+	}
+
+	crc = crc32(0, latest_node, sizeof(*latest_node)-8);
+	if (crc != je32_to_cpu(latest_node->node_crc)) {
+		printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw));
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+		return -EIO;
+	}
+
+	switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
+	case S_IFDIR:
+		if (mctime_ver > je32_to_cpu(latest_node->version)) {
+			/* The times in the latest_node are actually older than
+			   mctime in the latest dirent. Cheat. */
+			latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
+		}
+		break;
+
+			
+	case S_IFREG:
+		/* If it was a regular file, truncate it to the latest node's isize */
+		jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize));
+		break;
+
+	case S_IFLNK:
+		/* Hack to work around broken isize in old symlink code.
+		   Remove this when dwmw2 comes to his senses and stops
+		   symlinks from being an entirely gratuitous special
+		   case. */
+		if (!je32_to_cpu(latest_node->isize))
+			latest_node->isize = latest_node->dsize;
+		/* fall through... */
+
+	case S_IFBLK:
+	case S_IFCHR:
+		/* Certain inode types should have only one data node, and it's
+		   kept as the metadata node */
+		if (f->metadata) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		if (!frag_first(&f->fragtree)) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		/* ASSERT: f->fraglist != NULL */
+		if (frag_next(frag_first(&f->fragtree))) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		/* OK. We're happy */
+		f->metadata = frag_first(&f->fragtree)->node;
+		jffs2_free_node_frag(frag_first(&f->fragtree));
+		f->fragtree = RB_ROOT;
+		break;
+	}
+	if (f->inocache->state == INO_STATE_READING)
+		jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
+
+	return 0;
+}
+
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
+{
+	struct jffs2_full_dirent *fd, *fds;
+	int deleted;
+
+	down(&f->sem);
+	deleted = f->inocache && !f->inocache->nlink;
+
+	if (f->metadata) {
+		if (deleted)
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+		jffs2_free_full_dnode(f->metadata);
+	}
+
+	jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
+
+	fds = f->dents;
+
+	while(fds) {
+		fd = fds;
+		fds = fd->next;
+		jffs2_free_full_dirent(fd);
+	}
+
+	if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
+		jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+
+	up(&f->sem);
+}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
new file mode 100644
index 0000000..ded5358
--- /dev/null
+++ b/fs/jffs2/scan.c
@@ -0,0 +1,916 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: scan.c,v 1.115 2004/11/17 12:59:08 dedekind Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+
+#define EMPTY_SCAN_SIZE 1024
+
+#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->dirty_size += _x; \
+		jeb->free_size -= _x ; jeb->dirty_size += _x; \
+		}while(0)
+#define USED_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->used_size += _x; \
+		jeb->free_size -= _x ; jeb->used_size += _x; \
+		}while(0)
+#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->unchecked_size += _x; \
+		jeb->free_size -= _x ; jeb->unchecked_size += _x; \
+		}while(0)
+
+#define noisy_printk(noise, args...) do { \
+	if (*(noise)) { \
+		printk(KERN_NOTICE args); \
+		 (*(noise))--; \
+		 if (!(*(noise))) { \
+			 printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
+		 } \
+	} \
+} while(0)
+
+static uint32_t pseudo_random;
+
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				  unsigned char *buf, uint32_t buf_size);
+
+/* These helper functions _must_ increase ofs and also do the dirty/used space accounting. 
+ * Returning an error will abort the mount - bad checksums etc. should just mark the space
+ * as dirty.
+ */
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				 struct jffs2_raw_inode *ri, uint32_t ofs);
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				 struct jffs2_raw_dirent *rd, uint32_t ofs);
+
+#define BLK_STATE_ALLFF		0
+#define BLK_STATE_CLEAN		1
+#define BLK_STATE_PARTDIRTY	2
+#define BLK_STATE_CLEANMARKER	3
+#define BLK_STATE_ALLDIRTY	4
+#define BLK_STATE_BADBLOCK	5
+
+static inline int min_free(struct jffs2_sb_info *c)
+{
+	uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
+#if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
+	if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
+		return c->wbuf_pagesize;
+#endif
+	return min;
+
+}
+int jffs2_scan_medium(struct jffs2_sb_info *c)
+{
+	int i, ret;
+	uint32_t empty_blocks = 0, bad_blocks = 0;
+	unsigned char *flashbuf = NULL;
+	uint32_t buf_size = 0;
+#ifndef __ECOS
+	size_t pointlen;
+
+	if (c->mtd->point) {
+		ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
+		if (!ret && pointlen < c->mtd->size) {
+			/* Don't muck about if it won't let us point to the whole flash */
+			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
+			c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+			flashbuf = NULL;
+		}
+		if (ret)
+			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+	}
+#endif
+	if (!flashbuf) {
+		/* For NAND it's quicker to read a whole eraseblock at a time,
+		   apparently */
+		if (jffs2_cleanmarker_oob(c))
+			buf_size = c->sector_size;
+		else
+			buf_size = PAGE_SIZE;
+
+		/* Respect kmalloc limitations */
+		if (buf_size > 128*1024)
+			buf_size = 128*1024;
+
+		D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
+		flashbuf = kmalloc(buf_size, GFP_KERNEL);
+		if (!flashbuf)
+			return -ENOMEM;
+	}
+
+	for (i=0; i<c->nr_blocks; i++) {
+		struct jffs2_eraseblock *jeb = &c->blocks[i];
+
+		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
+
+		if (ret < 0)
+			goto out;
+
+		ACCT_PARANOIA_CHECK(jeb);
+
+		/* Now decide which list to put it on */
+		switch(ret) {
+		case BLK_STATE_ALLFF:
+			/* 
+			 * Empty block.   Since we can't be sure it 
+			 * was entirely erased, we just queue it for erase
+			 * again.  It will be marked as such when the erase
+			 * is complete.  Meanwhile we still count it as empty
+			 * for later checks.
+			 */
+			empty_blocks++;
+			list_add(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			break;
+
+		case BLK_STATE_CLEANMARKER:
+			/* Only a CLEANMARKER node is valid */
+			if (!jeb->dirty_size) {
+				/* It's actually free */
+				list_add(&jeb->list, &c->free_list);
+				c->nr_free_blocks++;
+			} else {
+				/* Dirt */
+				D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
+				list_add(&jeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+			}
+			break;
+
+		case BLK_STATE_CLEAN:
+                        /* Full (or almost full) of clean data. Clean list */
+                        list_add(&jeb->list, &c->clean_list);
+			break;
+
+		case BLK_STATE_PARTDIRTY:
+                        /* Some data, but not full. Dirty list. */
+                        /* We want to remember the block with most free space
+                           and stick it in the 'nextblock' position to start writing to it. */
+                        if (jeb->free_size > min_free(c) && 
+			    (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
+                                /* Better candidate for the next writes to go to */
+                                if (c->nextblock) {
+					c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+					c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+					c->free_size -= c->nextblock->free_size;
+					c->wasted_size -= c->nextblock->wasted_size;
+					c->nextblock->free_size = c->nextblock->wasted_size = 0;
+					if (VERYDIRTY(c, c->nextblock->dirty_size)) {
+						list_add(&c->nextblock->list, &c->very_dirty_list);
+					} else {
+						list_add(&c->nextblock->list, &c->dirty_list);
+					}
+				}
+                                c->nextblock = jeb;
+                        } else {
+				jeb->dirty_size += jeb->free_size + jeb->wasted_size;
+				c->dirty_size += jeb->free_size + jeb->wasted_size;
+				c->free_size -= jeb->free_size;
+				c->wasted_size -= jeb->wasted_size;
+				jeb->free_size = jeb->wasted_size = 0;
+				if (VERYDIRTY(c, jeb->dirty_size)) {
+					list_add(&jeb->list, &c->very_dirty_list);
+				} else {
+					list_add(&jeb->list, &c->dirty_list);
+				}
+                        }
+			break;
+
+		case BLK_STATE_ALLDIRTY:
+			/* Nothing valid - not even a clean marker. Needs erasing. */
+                        /* For now we just put it on the erasing list. We'll start the erases later */
+			D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
+                        list_add(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			break;
+			
+		case BLK_STATE_BADBLOCK:
+			D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
+                        list_add(&jeb->list, &c->bad_list);
+			c->bad_size += c->sector_size;
+			c->free_size -= c->sector_size;
+			bad_blocks++;
+			break;
+		default:
+			printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
+			BUG();	
+		}
+	}
+	
+	/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
+	if (c->nextblock && (c->nextblock->dirty_size)) {
+		c->nextblock->wasted_size += c->nextblock->dirty_size;
+		c->wasted_size += c->nextblock->dirty_size;
+		c->dirty_size -= c->nextblock->dirty_size;
+		c->nextblock->dirty_size = 0;
+	}
+#if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
+	if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
+		/* If we're going to start writing into a block which already 
+		   contains data, and the end of the data isn't page-aligned,
+		   skip a little and align it. */
+
+		uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
+
+		D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
+			  skip));
+		c->nextblock->wasted_size += skip;
+		c->wasted_size += skip;
+
+		c->nextblock->free_size -= skip;
+		c->free_size -= skip;
+	}
+#endif
+	if (c->nr_erasing_blocks) {
+		if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { 
+			printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
+			printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
+			ret = -EIO;
+			goto out;
+		}
+		jffs2_erase_pending_trigger(c);
+	}
+	ret = 0;
+ out:
+	if (buf_size)
+		kfree(flashbuf);
+#ifndef __ECOS
+	else 
+		c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+#endif
+	return ret;
+}
+
+static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
+				uint32_t ofs, uint32_t len)
+{
+	int ret;
+	size_t retlen;
+
+	ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
+	if (ret) {
+		D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
+		return ret;
+	}
+	if (retlen < len) {
+		D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
+		return -EIO;
+	}
+	D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
+	D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+		  buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
+	return 0;
+}
+
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				  unsigned char *buf, uint32_t buf_size) {
+	struct jffs2_unknown_node *node;
+	struct jffs2_unknown_node crcnode;
+	uint32_t ofs, prevofs;
+	uint32_t hdr_crc, buf_ofs, buf_len;
+	int err;
+	int noise = 0;
+#ifdef CONFIG_JFFS2_FS_NAND
+	int cleanmarkerfound = 0;
+#endif
+
+	ofs = jeb->offset;
+	prevofs = jeb->offset - 1;
+
+	D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
+
+#ifdef CONFIG_JFFS2_FS_NAND
+	if (jffs2_cleanmarker_oob(c)) {
+		int ret = jffs2_check_nand_cleanmarker(c, jeb);
+		D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
+		/* Even if it's not found, we still scan to see
+		   if the block is empty. We use this information
+		   to decide whether to erase it or not. */
+		switch (ret) {
+		case 0:		cleanmarkerfound = 1; break;
+		case 1: 	break;
+		case 2: 	return BLK_STATE_BADBLOCK;
+		case 3:		return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
+		default: 	return ret;
+		}
+	}
+#endif
+	buf_ofs = jeb->offset;
+
+	if (!buf_size) {
+		buf_len = c->sector_size;
+	} else {
+		buf_len = EMPTY_SCAN_SIZE;
+		err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
+		if (err)
+			return err;
+	}
+	
+	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+	ofs = 0;
+
+	/* Scan only 4KiB of 0xFF before declaring it's empty */
+	while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
+		ofs += 4;
+
+	if (ofs == EMPTY_SCAN_SIZE) {
+#ifdef CONFIG_JFFS2_FS_NAND
+		if (jffs2_cleanmarker_oob(c)) {
+			/* scan oob, take care of cleanmarker */
+			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
+			D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
+			switch (ret) {
+			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
+			case 1: 	return BLK_STATE_ALLDIRTY;
+			default: 	return ret;
+			}
+		}
+#endif
+		D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
+		return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
+	}
+	if (ofs) {
+		D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
+			  jeb->offset + ofs));
+		DIRTY_SPACE(ofs);
+	}
+
+	/* Now ofs is a complete physical flash offset as it always was... */
+	ofs += jeb->offset;
+
+	noise = 10;
+
+scan_more:	
+	while(ofs < jeb->offset + c->sector_size) {
+
+		D1(ACCT_PARANOIA_CHECK(jeb));
+
+		cond_resched();
+
+		if (ofs & 3) {
+			printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
+			ofs = PAD(ofs);
+			continue;
+		}
+		if (ofs == prevofs) {
+			printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		prevofs = ofs;
+
+		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
+			D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
+				  jeb->offset, c->sector_size, ofs, sizeof(*node)));
+			DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
+			break;
+		}
+
+		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
+			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+			D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
+				  sizeof(struct jffs2_unknown_node), buf_len, ofs));
+			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+			if (err)
+				return err;
+			buf_ofs = ofs;
+		}
+
+		node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
+
+		if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
+			uint32_t inbuf_ofs;
+			uint32_t empty_start;
+
+			empty_start = ofs;
+			ofs += 4;
+
+			D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
+		more_empty:
+			inbuf_ofs = ofs - buf_ofs;
+			while (inbuf_ofs < buf_len) {
+				if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
+					printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
+					       empty_start, ofs);
+					DIRTY_SPACE(ofs-empty_start);
+					goto scan_more;
+				}
+
+				inbuf_ofs+=4;
+				ofs += 4;
+			}
+			/* Ran off end. */
+			D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+
+			/* If we're only checking the beginning of a block with a cleanmarker,
+			   bail now */
+			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && 
+			    c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) {
+				D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE));
+				return BLK_STATE_CLEANMARKER;
+			}
+
+			/* See how much more there is to read in this eraseblock... */
+			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+			if (!buf_len) {
+				/* No more to read. Break out of main loop without marking 
+				   this range of empty space as dirty (because it's not) */
+				D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
+					  empty_start));
+				break;
+			}
+			D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
+			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+			if (err)
+				return err;
+			buf_ofs = ofs;
+			goto more_empty;
+		}
+
+		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
+			printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
+			D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
+			printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
+			printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
+			/* OK. We're out of possibilities. Whinge and move on */
+			noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", 
+				     JFFS2_MAGIC_BITMASK, ofs, 
+				     je16_to_cpu(node->magic));
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		/* We seem to have a node of sorts. Check the CRC */
+		crcnode.magic = node->magic;
+		crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
+		crcnode.totlen = node->totlen;
+		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
+
+		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
+			noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+				     ofs, je16_to_cpu(node->magic),
+				     je16_to_cpu(node->nodetype), 
+				     je32_to_cpu(node->totlen),
+				     je32_to_cpu(node->hdr_crc),
+				     hdr_crc);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+
+		if (ofs + je32_to_cpu(node->totlen) > 
+		    jeb->offset + c->sector_size) {
+			/* Eep. Node goes over the end of the erase block. */
+			printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
+			       ofs, je32_to_cpu(node->totlen));
+			printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+
+		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
+			/* Wheee. This is an obsoleted node */
+			D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
+			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+			ofs += PAD(je32_to_cpu(node->totlen));
+			continue;
+		}
+
+		switch(je16_to_cpu(node->nodetype)) {
+		case JFFS2_NODETYPE_INODE:
+			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
+				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+				D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  sizeof(struct jffs2_raw_inode), buf_len, ofs));
+				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+				if (err)
+					return err;
+				buf_ofs = ofs;
+				node = (void *)buf;
+			}
+			err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
+			if (err) return err;
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+			
+		case JFFS2_NODETYPE_DIRENT:
+			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+				D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  je32_to_cpu(node->totlen), buf_len, ofs));
+				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+				if (err)
+					return err;
+				buf_ofs = ofs;
+				node = (void *)buf;
+			}
+			err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
+			if (err) return err;
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+
+		case JFFS2_NODETYPE_CLEANMARKER:
+			D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
+			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
+				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", 
+				       ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
+				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+				ofs += PAD(sizeof(struct jffs2_unknown_node));
+			} else if (jeb->first_node) {
+				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
+				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+				ofs += PAD(sizeof(struct jffs2_unknown_node));
+			} else {
+				struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
+				if (!marker_ref) {
+					printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
+					return -ENOMEM;
+				}
+				marker_ref->next_in_ino = NULL;
+				marker_ref->next_phys = NULL;
+				marker_ref->flash_offset = ofs | REF_NORMAL;
+				marker_ref->__totlen = c->cleanmarker_size;
+				jeb->first_node = jeb->last_node = marker_ref;
+			     
+				USED_SPACE(PAD(c->cleanmarker_size));
+				ofs += PAD(c->cleanmarker_size);
+			}
+			break;
+
+		case JFFS2_NODETYPE_PADDING:
+			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+
+		default:
+			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
+			case JFFS2_FEATURE_ROCOMPAT:
+				printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+			        c->flags |= JFFS2_SB_FLAG_RO;
+				if (!(jffs2_is_readonly(c)))
+					return -EROFS;
+				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+
+			case JFFS2_FEATURE_INCOMPAT:
+				printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+				return -EINVAL;
+
+			case JFFS2_FEATURE_RWCOMPAT_DELETE:
+				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+
+			case JFFS2_FEATURE_RWCOMPAT_COPY:
+				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				USED_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+			}
+		}
+	}
+
+
+	D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, 
+		  jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
+
+	/* mark_node_obsolete can add to wasted !! */
+	if (jeb->wasted_size) {
+		jeb->dirty_size += jeb->wasted_size;
+		c->dirty_size += jeb->wasted_size;
+		c->wasted_size -= jeb->wasted_size;
+		jeb->wasted_size = 0;
+	}
+
+	if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size 
+		&& (!jeb->first_node || !jeb->first_node->next_in_ino) )
+		return BLK_STATE_CLEANMARKER;
+		
+	/* move blocks with max 4 byte dirty space to cleanlist */	
+	else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
+		c->dirty_size -= jeb->dirty_size;
+		c->wasted_size += jeb->dirty_size; 
+		jeb->wasted_size += jeb->dirty_size;
+		jeb->dirty_size = 0;
+		return BLK_STATE_CLEAN;
+	} else if (jeb->used_size || jeb->unchecked_size)
+		return BLK_STATE_PARTDIRTY;
+	else
+		return BLK_STATE_ALLDIRTY;
+}
+
+static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inode_cache *ic;
+
+	ic = jffs2_get_ino_cache(c, ino);
+	if (ic)
+		return ic;
+
+	if (ino > c->highest_ino)
+		c->highest_ino = ino;
+
+	ic = jffs2_alloc_inode_cache();
+	if (!ic) {
+		printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
+		return NULL;
+	}
+	memset(ic, 0, sizeof(*ic));
+
+	ic->ino = ino;
+	ic->nodes = (void *)ic;
+	jffs2_add_ino_cache(c, ic);
+	if (ino == 1)
+		ic->nlink = 1;
+	return ic;
+}
+
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				 struct jffs2_raw_inode *ri, uint32_t ofs)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_inode_cache *ic;
+	uint32_t ino = je32_to_cpu(ri->ino);
+
+	D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
+
+	/* We do very little here now. Just check the ino# to which we should attribute
+	   this node; we can do all the CRC checking etc. later. There's a tradeoff here -- 
+	   we used to scan the flash once only, reading everything we want from it into
+	   memory, then building all our in-core data structures and freeing the extra
+	   information. Now we allow the first part of the mount to complete a lot quicker,
+	   but we have to go _back_ to the flash in order to finish the CRC checking, etc. 
+	   Which means that the _full_ amount of time to get to proper write mode with GC
+	   operational may actually be _longer_ than before. Sucks to be me. */
+
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw) {
+		printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
+		return -ENOMEM;
+	}
+
+	ic = jffs2_get_ino_cache(c, ino);
+	if (!ic) {
+		/* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
+		   first node we found for this inode. Do a CRC check to protect against the former
+		   case */
+		uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
+
+		if (crc != je32_to_cpu(ri->node_crc)) {
+			printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ofs, je32_to_cpu(ri->node_crc), crc);
+			/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+			DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
+			jffs2_free_raw_node_ref(raw);
+			return 0;
+		}
+		ic = jffs2_scan_make_ino_cache(c, ino);
+		if (!ic) {
+			jffs2_free_raw_node_ref(raw);
+			return -ENOMEM;
+		}
+	}
+
+	/* Wheee. It worked */
+
+	raw->flash_offset = ofs | REF_UNCHECKED;
+	raw->__totlen = PAD(je32_to_cpu(ri->totlen));
+	raw->next_phys = NULL;
+	raw->next_in_ino = ic->nodes;
+
+	ic->nodes = raw;
+	if (!jeb->first_node)
+		jeb->first_node = raw;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = raw;
+	jeb->last_node = raw;
+
+	D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", 
+		  je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
+		  je32_to_cpu(ri->offset),
+		  je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
+
+	pseudo_random += je32_to_cpu(ri->version);
+
+	UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
+	return 0;
+}
+
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				  struct jffs2_raw_dirent *rd, uint32_t ofs)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+	struct jffs2_inode_cache *ic;
+	uint32_t crc;
+
+	D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
+
+	/* We don't get here unless the node is still valid, so we don't have to
+	   mask in the ACCURATE bit any more. */
+	crc = crc32(0, rd, sizeof(*rd)-8);
+
+	if (crc != je32_to_cpu(rd->node_crc)) {
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ofs, je32_to_cpu(rd->node_crc), crc);
+		/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+		DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+		return 0;
+	}
+
+	pseudo_random += je32_to_cpu(rd->version);
+
+	fd = jffs2_alloc_full_dirent(rd->nsize+1);
+	if (!fd) {
+		return -ENOMEM;
+	}
+	memcpy(&fd->name, rd->name, rd->nsize);
+	fd->name[rd->nsize] = 0;
+
+	crc = crc32(0, fd->name, rd->nsize);
+	if (crc != je32_to_cpu(rd->name_crc)) {
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ofs, je32_to_cpu(rd->name_crc), crc);	
+		D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
+		jffs2_free_full_dirent(fd);
+		/* FIXME: Why do we believe totlen? */
+		/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
+		DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+		return 0;
+	}
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw) {
+		jffs2_free_full_dirent(fd);
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
+		return -ENOMEM;
+	}
+	ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
+	if (!ic) {
+		jffs2_free_full_dirent(fd);
+		jffs2_free_raw_node_ref(raw);
+		return -ENOMEM;
+	}
+	
+	raw->__totlen = PAD(je32_to_cpu(rd->totlen));
+	raw->flash_offset = ofs | REF_PRISTINE;
+	raw->next_phys = NULL;
+	raw->next_in_ino = ic->nodes;
+	ic->nodes = raw;
+	if (!jeb->first_node)
+		jeb->first_node = raw;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = raw;
+	jeb->last_node = raw;
+
+	fd->raw = raw;
+	fd->next = NULL;
+	fd->version = je32_to_cpu(rd->version);
+	fd->ino = je32_to_cpu(rd->ino);
+	fd->nhash = full_name_hash(fd->name, rd->nsize);
+	fd->type = rd->type;
+	USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
+	jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+
+	return 0;
+}
+
+static int count_list(struct list_head *l)
+{
+	uint32_t count = 0;
+	struct list_head *tmp;
+
+	list_for_each(tmp, l) {
+		count++;
+	}
+	return count;
+}
+
+/* Note: This breaks if list_empty(head). I don't care. You
+   might, if you copy this code and use it elsewhere :) */
+static void rotate_list(struct list_head *head, uint32_t count)
+{
+	struct list_head *n = head->next;
+
+	list_del(head);
+	while(count--) {
+		n = n->next;
+	}
+	list_add(head, n);
+}
+
+void jffs2_rotate_lists(struct jffs2_sb_info *c)
+{
+	uint32_t x;
+	uint32_t rotateby;
+
+	x = count_list(&c->clean_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
+
+		rotate_list((&c->clean_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
+			  list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
+	}
+
+	x = count_list(&c->very_dirty_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
+
+		rotate_list((&c->very_dirty_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
+			  list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
+	}
+
+	x = count_list(&c->dirty_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
+
+		rotate_list((&c->dirty_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
+			  list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
+	}
+
+	x = count_list(&c->erasable_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
+
+		rotate_list((&c->erasable_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
+			  list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
+	}
+
+	if (c->nr_erasing_blocks) {
+		rotateby = pseudo_random % c->nr_erasing_blocks;
+		D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
+
+		rotate_list((&c->erase_pending_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
+			  list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
+	}
+
+	if (c->nr_free_blocks) {
+		rotateby = pseudo_random % c->nr_free_blocks;
+		D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
+
+		rotate_list((&c->free_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
+			  list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
+	}
+}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
new file mode 100644
index 0000000..6b2a441
--- /dev/null
+++ b/fs/jffs2/super.c
@@ -0,0 +1,365 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: super.c,v 1.104 2004/11/23 15:37:31 gleixner Exp $
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/jffs2.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include "compr.h"
+#include "nodelist.h"
+
+static void jffs2_put_super(struct super_block *);
+
+static kmem_cache_t *jffs2_inode_cachep;
+
+static struct inode *jffs2_alloc_inode(struct super_block *sb)
+{
+	struct jffs2_inode_info *ei;
+	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void jffs2_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+}
+
+static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		init_MUTEX_LOCKED(&ei->sem);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static int jffs2_sync_fs(struct super_block *sb, int wait)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	down(&c->alloc_sem);
+	jffs2_flush_wbuf_pad(c);
+	up(&c->alloc_sem);	
+	return 0;
+}
+
+static struct super_operations jffs2_super_operations =
+{
+	.alloc_inode =	jffs2_alloc_inode,
+	.destroy_inode =jffs2_destroy_inode,
+	.read_inode =	jffs2_read_inode,
+	.put_super =	jffs2_put_super,
+	.write_super =	jffs2_write_super,
+	.statfs =	jffs2_statfs,
+	.remount_fs =	jffs2_remount_fs,
+	.clear_inode =	jffs2_clear_inode,
+	.dirty_inode =	jffs2_dirty_inode,
+	.sync_fs =	jffs2_sync_fs,
+};
+
+static int jffs2_sb_compare(struct super_block *sb, void *data)
+{
+	struct jffs2_sb_info *p = data;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	/* The superblocks are considered to be equivalent if the underlying MTD
+	   device is the same one */
+	if (c->mtd == p->mtd) {
+		D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name));
+		return 1;
+	} else {
+		D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+			  c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name));
+		return 0;
+	}
+}
+
+static int jffs2_sb_set(struct super_block *sb, void *data)
+{
+	struct jffs2_sb_info *p = data;
+
+	/* For persistence of NFS exports etc. we use the same s_dev
+	   each time we mount the device, don't just use an anonymous
+	   device */
+	sb->s_fs_info = p;
+	p->os_priv = sb;
+	sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index);
+
+	return 0;
+}
+
+static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
+					      int flags, const char *dev_name, 
+					      void *data, struct mtd_info *mtd)
+{
+	struct super_block *sb;
+	struct jffs2_sb_info *c;
+	int ret;
+
+	c = kmalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+	memset(c, 0, sizeof(*c));
+	c->mtd = mtd;
+
+	sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
+
+	if (IS_ERR(sb))
+		goto out_put;
+
+	if (sb->s_root) {
+		/* New mountpoint for JFFS2 which is already mounted */
+		D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
+			  mtd->index, mtd->name));
+		goto out_put;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n",
+		  mtd->index, mtd->name));
+
+	sb->s_op = &jffs2_super_operations;
+	sb->s_flags = flags | MS_NOATIME;
+
+	ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0);
+
+	if (ret) {
+		/* Failure case... */
+		up_write(&sb->s_umount);
+		deactivate_super(sb);
+		return ERR_PTR(ret);
+	}
+
+	sb->s_flags |= MS_ACTIVE;
+	return sb;
+
+ out_put:
+	kfree(c);
+	put_mtd_device(mtd);
+
+	return sb;
+}
+
+static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
+					      int flags, const char *dev_name, 
+					      void *data, int mtdnr)
+{
+	struct mtd_info *mtd;
+
+	mtd = get_mtd_device(NULL, mtdnr);
+	if (!mtd) {
+		D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
+		return ERR_PTR(-EINVAL);
+	}
+
+	return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+}
+
+static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
+					int flags, const char *dev_name,
+					void *data)
+{
+	int err;
+	struct nameidata nd;
+	int mtdnr;
+
+	if (!dev_name)
+		return ERR_PTR(-EINVAL);
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
+
+	/* The preferred way of mounting in future; especially when
+	   CONFIG_BLK_DEV is implemented - we specify the underlying
+	   MTD device by number or by name, so that we don't require 
+	   block device support to be present in the kernel. */
+
+	/* FIXME: How to do the root fs this way? */
+
+	if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
+		/* Probably mounting without the blkdev crap */
+		if (dev_name[3] == ':') {
+			struct mtd_info *mtd;
+
+			/* Mount by MTD device name */
+			D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4));
+			for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
+				mtd = get_mtd_device(NULL, mtdnr);
+				if (mtd) {
+					if (!strcmp(mtd->name, dev_name+4))
+						return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+					put_mtd_device(mtd);
+				}
+			}
+			printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4);
+		} else if (isdigit(dev_name[3])) {
+			/* Mount by MTD device number name */
+			char *endptr;
+			
+			mtdnr = simple_strtoul(dev_name+3, &endptr, 0);
+			if (!*endptr) {
+				/* It was a valid number */
+				D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
+				return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+			}
+		}
+	}
+
+	/* Try the old way - the hack where we allowed users to mount 
+	   /dev/mtdblock$(n) but didn't actually _use_ the blkdev */
+
+	err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n",
+		  err, nd.dentry->d_inode));
+
+	if (err)
+		return ERR_PTR(err);
+
+	err = -EINVAL;
+
+	if (!S_ISBLK(nd.dentry->d_inode->i_mode))
+		goto out;
+
+	if (nd.mnt->mnt_flags & MNT_NODEV) {
+		err = -EACCES;
+		goto out;
+	}
+
+	if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) {
+		if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */
+			printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n",
+			       dev_name);
+		goto out;
+	}
+
+	mtdnr = iminor(nd.dentry->d_inode);
+	path_release(&nd);
+
+	return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+
+out:
+	path_release(&nd);
+	return ERR_PTR(err);
+}
+
+static void jffs2_put_super (struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+
+	if (!(sb->s_flags & MS_RDONLY))
+		jffs2_stop_garbage_collect_thread(c);
+	down(&c->alloc_sem);
+	jffs2_flush_wbuf_pad(c);
+	up(&c->alloc_sem);
+	jffs2_free_ino_caches(c);
+	jffs2_free_raw_node_refs(c);
+	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		vfree(c->blocks);
+	else
+		kfree(c->blocks);
+	jffs2_flash_cleanup(c);
+	kfree(c->inocache_list);
+	if (c->mtd->sync)
+		c->mtd->sync(c->mtd);
+
+	D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+}
+
+static void jffs2_kill_sb(struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	generic_shutdown_super(sb);
+	put_mtd_device(c->mtd);
+	kfree(c);
+}
+
+static struct file_system_type jffs2_fs_type = {
+	.owner =	THIS_MODULE,
+	.name =		"jffs2",
+	.get_sb =	jffs2_get_sb,
+	.kill_sb =	jffs2_kill_sb,
+};
+
+static int __init init_jffs2_fs(void)
+{
+	int ret;
+
+	printk(KERN_INFO "JFFS2 version 2.2."
+#ifdef CONFIG_JFFS2_FS_NAND
+	       " (NAND)"
+#endif
+	       " (C) 2001-2003 Red Hat, Inc.\n");
+
+	jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+					     sizeof(struct jffs2_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     jffs2_i_init_once, NULL);
+	if (!jffs2_inode_cachep) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
+		return -ENOMEM;
+	}
+	ret = jffs2_compressors_init();
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+		goto out;
+	}
+	ret = jffs2_create_slab_caches();
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
+		goto out_compressors;
+	}
+	ret = register_filesystem(&jffs2_fs_type);
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n");
+		goto out_slab;
+	}
+	return 0;
+
+ out_slab:
+	jffs2_destroy_slab_caches();
+ out_compressors:
+	jffs2_compressors_exit();
+ out:
+	kmem_cache_destroy(jffs2_inode_cachep);
+	return ret;
+}
+
+static void __exit exit_jffs2_fs(void)
+{
+	unregister_filesystem(&jffs2_fs_type);
+	jffs2_destroy_slab_caches();
+	jffs2_compressors_exit();
+	kmem_cache_destroy(jffs2_inode_cachep);
+}
+
+module_init(init_jffs2_fs);
+module_exit(exit_jffs2_fs);
+
+MODULE_DESCRIPTION("The Journalling Flash File System, v2");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for 
+		       // the sake of this tag. It's Free Software.
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
new file mode 100644
index 0000000..7b1820d
--- /dev/null
+++ b/fs/jffs2/symlink.c
@@ -0,0 +1,45 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: symlink.c,v 1.14 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include "nodelist.h"
+
+static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd);
+
+struct inode_operations jffs2_symlink_inode_operations =
+{	
+	.readlink =	generic_readlink,
+	.follow_link =	jffs2_follow_link,
+	.put_link =	jffs2_put_link,
+	.setattr =	jffs2_setattr
+};
+
+static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	unsigned char *buf;
+	buf = jffs2_getlink(JFFS2_SB_INFO(dentry->d_inode->i_sb), JFFS2_INODE_INFO(dentry->d_inode));
+	nd_set_link(nd, buf);
+	return 0;
+}
+
+static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s))
+		kfree(s);
+}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
new file mode 100644
index 0000000..c812806
--- /dev/null
+++ b/fs/jffs2/wbuf.c
@@ -0,0 +1,1184 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: wbuf.c,v 1.82 2004/11/20 22:08:31 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/crc32.h>
+#include <linux/mtd/nand.h>
+#include "nodelist.h"
+
+/* For testing write failures */
+#undef BREAKME
+#undef BREAKMEHEADER
+
+#ifdef BREAKME
+static unsigned char *brokenbuf;
+#endif
+
+/* max. erase failures before we mark a block bad */
+#define MAX_ERASE_FAILURES 	2
+
+/* two seconds timeout for timed wbuf-flushing */
+#define WBUF_FLUSH_TIMEOUT	2 * HZ
+
+struct jffs2_inodirty {
+	uint32_t ino;
+	struct jffs2_inodirty *next;
+};
+
+static struct jffs2_inodirty inodirty_nomem;
+
+static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inodirty *this = c->wbuf_inodes;
+
+	/* If a malloc failed, consider _everything_ dirty */
+	if (this == &inodirty_nomem)
+		return 1;
+
+	/* If ino == 0, _any_ non-GC writes mean 'yes' */
+	if (this && !ino)
+		return 1;
+
+	/* Look to see if the inode in question is pending in the wbuf */
+	while (this) {
+		if (this->ino == ino)
+			return 1;
+		this = this->next;
+	}
+	return 0;
+}
+
+static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
+{
+	struct jffs2_inodirty *this;
+
+	this = c->wbuf_inodes;
+
+	if (this != &inodirty_nomem) {
+		while (this) {
+			struct jffs2_inodirty *next = this->next;
+			kfree(this);
+			this = next;
+		}
+	}
+	c->wbuf_inodes = NULL;
+}
+
+static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inodirty *new;
+
+	/* Mark the superblock dirty so that kupdated will flush... */
+	OFNI_BS_2SFFJ(c)->s_dirt = 1;
+
+	if (jffs2_wbuf_pending_for_ino(c, ino))
+		return;
+
+	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	if (!new) {
+		D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
+		jffs2_clear_wbuf_ino_list(c);
+		c->wbuf_inodes = &inodirty_nomem;
+		return;
+	}
+	new->ino = ino;
+	new->next = c->wbuf_inodes;
+	c->wbuf_inodes = new;
+	return;
+}
+
+static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
+{
+	struct list_head *this, *next;
+	static int n;
+
+	if (list_empty(&c->erasable_pending_wbuf_list))
+		return;
+
+	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
+		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+		D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
+		list_del(this);
+		if ((jiffies + (n++)) & 127) {
+			/* Most of the time, we just erase it immediately. Otherwise we
+			   spend ages scanning it on mount, etc. */
+			D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+			list_add_tail(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			jffs2_erase_pending_trigger(c);
+		} else {
+			/* Sometimes, however, we leave it elsewhere so it doesn't get
+			   immediately reused, and we spread the load a bit. */
+			D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+			list_add_tail(&jeb->list, &c->erasable_list);
+		}
+	}
+}
+
+static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	D1(printk("About to refile bad block at %08x\n", jeb->offset));
+
+	D2(jffs2_dump_block_lists(c));
+	/* File the existing block on the bad_used_list.... */
+	if (c->nextblock == jeb)
+		c->nextblock = NULL;
+	else /* Not sure this should ever happen... need more coffee */
+		list_del(&jeb->list);
+	if (jeb->first_node) {
+		D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
+		list_add(&jeb->list, &c->bad_used_list);
+	} else {
+		BUG();
+		/* It has to have had some nodes or we couldn't be here */
+		D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	D2(jffs2_dump_block_lists(c));
+
+	/* Adjust its size counts accordingly */
+	c->wasted_size += jeb->free_size;
+	c->free_size -= jeb->free_size;
+	jeb->wasted_size += jeb->free_size;
+	jeb->free_size = 0;
+
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+}
+
+/* Recover from failure to write wbuf. Recover the nodes up to the
+ * wbuf, not the one which we were starting to try to write. */
+
+static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
+{
+	struct jffs2_eraseblock *jeb, *new_jeb;
+	struct jffs2_raw_node_ref **first_raw, **raw;
+	size_t retlen;
+	int ret;
+	unsigned char *buf;
+	uint32_t start, end, ofs, len;
+
+	spin_lock(&c->erase_completion_lock);
+
+	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+
+	jffs2_block_refile(c, jeb);
+
+	/* Find the first node to be recovered, by skipping over every
+	   node which ends before the wbuf starts, or which is obsolete. */
+	first_raw = &jeb->first_node;
+	while (*first_raw && 
+	       (ref_obsolete(*first_raw) ||
+		(ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
+		D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
+			  ref_offset(*first_raw), ref_flags(*first_raw),
+			  (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
+			  c->wbuf_ofs));
+		first_raw = &(*first_raw)->next_phys;
+	}
+
+	if (!*first_raw) {
+		/* All nodes were obsolete. Nothing to recover. */
+		D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	start = ref_offset(*first_raw);
+	end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
+
+	/* Find the last node to be recovered */
+	raw = first_raw;
+	while ((*raw)) {
+		if (!ref_obsolete(*raw))
+			end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
+
+		raw = &(*raw)->next_phys;
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
+
+	buf = NULL;
+	if (start < c->wbuf_ofs) {
+		/* First affected node was already partially written.
+		 * Attempt to reread the old data into our buffer. */
+
+		buf = kmalloc(end - start, GFP_KERNEL);
+		if (!buf) {
+			printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
+
+			goto read_failed;
+		}
+
+		/* Do the read... */
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+		
+		if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
+			/* ECC recovered */
+			ret = 0;
+		}
+		if (ret || retlen != c->wbuf_ofs - start) {
+			printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
+
+			kfree(buf);
+			buf = NULL;
+		read_failed:
+			first_raw = &(*first_raw)->next_phys;
+			/* If this was the only node to be recovered, give up */
+			if (!(*first_raw))
+				return;
+
+			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
+			start = ref_offset(*first_raw);
+		} else {
+			/* Read succeeded. Copy the remaining data from the wbuf */
+			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
+		}
+	}
+	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
+	   Either 'buf' contains the data, or we find it in the wbuf */
+
+
+	/* ... and get an allocation of space from a shiny new block instead */
+	ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len);
+	if (ret) {
+		printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
+		if (buf)
+			kfree(buf);
+		return;
+	}
+	if (end-start >= c->wbuf_pagesize) {
+		/* Need to do another write immediately. This, btw,
+		 means that we'll be writing from 'buf' and not from
+		 the wbuf. Since if we're writing from the wbuf there
+		 won't be more than a wbuf full of data, now will
+		 there? :) */
+
+		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
+
+		D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
+			  towrite, ofs));
+	  
+#ifdef BREAKMEHEADER
+		static int breakme;
+		if (breakme++ == 20) {
+			printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
+			breakme = 0;
+			c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+					  brokenbuf, NULL, c->oobinfo);
+			ret = -EIO;
+		} else
+#endif
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+						buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, buf);
+
+		if (ret || retlen != towrite) {
+			/* Argh. We tried. Really we did. */
+			printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
+			kfree(buf);
+
+			if (retlen) {
+				struct jffs2_raw_node_ref *raw2;
+
+				raw2 = jffs2_alloc_raw_node_ref();
+				if (!raw2)
+					return;
+
+				raw2->flash_offset = ofs | REF_OBSOLETE;
+				raw2->__totlen = ref_totlen(c, jeb, *first_raw);
+				raw2->next_phys = NULL;
+				raw2->next_in_ino = NULL;
+
+				jffs2_add_physical_node_ref(c, raw2);
+			}
+			return;
+		}
+		printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
+
+		c->wbuf_len = (end - start) - towrite;
+		c->wbuf_ofs = ofs + towrite;
+		memcpy(c->wbuf, buf + towrite, c->wbuf_len);
+		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
+
+		kfree(buf);
+	} else {
+		/* OK, now we're left with the dregs in whichever buffer we're using */
+		if (buf) {
+			memcpy(c->wbuf, buf, end-start);
+			kfree(buf);
+		} else {
+			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
+		}
+		c->wbuf_ofs = ofs;
+		c->wbuf_len = end - start;
+	}
+
+	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
+	new_jeb = &c->blocks[ofs / c->sector_size];
+
+	spin_lock(&c->erase_completion_lock);
+	if (new_jeb->first_node) {
+		/* Odd, but possible with ST flash later maybe */
+		new_jeb->last_node->next_phys = *first_raw;
+	} else {
+		new_jeb->first_node = *first_raw;
+	}
+
+	raw = first_raw;
+	while (*raw) {
+		uint32_t rawlen = ref_totlen(c, jeb, *raw);
+
+		D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
+			  rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
+
+		if (ref_obsolete(*raw)) {
+			/* Shouldn't really happen much */
+			new_jeb->dirty_size += rawlen;
+			new_jeb->free_size -= rawlen;
+			c->dirty_size += rawlen;
+		} else {
+			new_jeb->used_size += rawlen;
+			new_jeb->free_size -= rawlen;
+			jeb->dirty_size += rawlen;
+			jeb->used_size  -= rawlen;
+			c->dirty_size += rawlen;
+		}
+		c->free_size -= rawlen;
+		(*raw)->flash_offset = ofs | ref_flags(*raw);
+		ofs += rawlen;
+		new_jeb->last_node = *raw;
+
+		raw = &(*raw)->next_phys;
+	}
+
+	/* Fix up the original jeb now it's on the bad_list */
+	*first_raw = NULL;
+	if (first_raw == &jeb->first_node) {
+		jeb->last_node = NULL;
+		D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	else
+		jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
+
+	ACCT_SANITY_CHECK(c,jeb);
+        D1(ACCT_PARANOIA_CHECK(jeb));
+
+	ACCT_SANITY_CHECK(c,new_jeb);
+        D1(ACCT_PARANOIA_CHECK(new_jeb));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
+}
+
+/* Meaning of pad argument:
+   0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
+   1: Pad, do not adjust nextblock free_size
+   2: Pad, adjust nextblock free_size
+*/
+#define NOPAD		0
+#define PAD_NOACCOUNT	1
+#define PAD_ACCOUNTING	2
+
+static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
+{
+	int ret;
+	size_t retlen;
+
+	/* Nothing to do if not NAND flash. In particular, we shouldn't
+	   del_timer() the timer we never initialised. */
+	if (jffs2_can_mark_obsolete(c))
+		return 0;
+
+	if (!down_trylock(&c->alloc_sem)) {
+		up(&c->alloc_sem);
+		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
+		BUG();
+	}
+
+	if(!c->wbuf || !c->wbuf_len)
+		return 0;
+
+	/* claim remaining space on the page
+	   this happens, if we have a change to a new block,
+	   or if fsync forces us to flush the writebuffer.
+	   if we have a switch to next page, we will not have
+	   enough remaining space for this. 
+	*/
+	if (pad) {
+		c->wbuf_len = PAD(c->wbuf_len);
+
+		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
+		   with 8 byte page size */
+		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
+		
+		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
+			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
+			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
+			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
+			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
+		}
+	}
+	/* else jffs2_flash_writev has actually filled in the rest of the
+	   buffer for us, and will deal with the node refs etc. later. */
+	
+#ifdef BREAKME
+	static int breakme;
+	if (breakme++ == 20) {
+		printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
+		breakme = 0;
+		c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
+					&retlen, brokenbuf, NULL, c->oobinfo);
+		ret = -EIO;
+	} else 
+#endif
+	
+	if (jffs2_cleanmarker_oob(c))
+		ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
+	else
+		ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
+
+	if (ret || retlen != c->wbuf_pagesize) {
+		if (ret)
+			printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
+		else {
+			printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
+				retlen, c->wbuf_pagesize);
+			ret = -EIO;
+		}
+
+		jffs2_wbuf_recover(c);
+
+		return ret;
+	}
+
+	spin_lock(&c->erase_completion_lock);
+
+	/* Adjust free size of the block if we padded. */
+	if (pad) {
+		struct jffs2_eraseblock *jeb;
+
+		jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
+			  (jeb==c->nextblock)?"next":"", jeb->offset));
+
+		/* wbuf_pagesize - wbuf_len is the amount of space that's to be 
+		   padded. If there is less free space in the block than that,
+		   something screwed up */
+		if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
+			printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
+			       c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
+			printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
+			       jeb->offset, jeb->free_size);
+			BUG();
+		}
+		jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+		c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+		jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+		c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+	}
+
+	/* Stick any now-obsoleted blocks on the erase_pending_list */
+	jffs2_refile_wbuf_blocks(c);
+	jffs2_clear_wbuf_ino_list(c);
+	spin_unlock(&c->erase_completion_lock);
+
+	memset(c->wbuf,0xff,c->wbuf_pagesize);
+	/* adjust write buffer offset, else we get a non contiguous write bug */
+	c->wbuf_ofs += c->wbuf_pagesize;
+	c->wbuf_len = 0;
+	return 0;
+}
+
+/* Trigger garbage collection to flush the write-buffer. 
+   If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
+   outstanding. If ino arg non-zero, do it only if a write for the 
+   given inode is outstanding. */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
+{
+	uint32_t old_wbuf_ofs;
+	uint32_t old_wbuf_len;
+	int ret = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
+
+	down(&c->alloc_sem);
+	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
+		D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
+		up(&c->alloc_sem);
+		return 0;
+	}
+
+	old_wbuf_ofs = c->wbuf_ofs;
+	old_wbuf_len = c->wbuf_len;
+
+	if (c->unchecked_size) {
+		/* GC won't make any progress for a while */
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
+		down_write(&c->wbuf_sem);
+		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+		up_write(&c->wbuf_sem);
+	} else while (old_wbuf_len &&
+		      old_wbuf_ofs == c->wbuf_ofs) {
+
+		up(&c->alloc_sem);
+
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
+
+		ret = jffs2_garbage_collect_pass(c);
+		if (ret) {
+			/* GC failed. Flush it with padding instead */
+			down(&c->alloc_sem);
+			down_write(&c->wbuf_sem);
+			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+			up_write(&c->wbuf_sem);
+			break;
+		}
+		down(&c->alloc_sem);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
+
+	up(&c->alloc_sem);
+	return ret;
+}
+
+/* Pad write-buffer to end and write it, wasting space. */
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
+{
+	int ret;
+
+	down_write(&c->wbuf_sem);
+	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+	up_write(&c->wbuf_sem);
+
+	return ret;
+}
+
+#define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) )
+#define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) )
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
+{
+	struct kvec outvecs[3];
+	uint32_t totlen = 0;
+	uint32_t split_ofs = 0;
+	uint32_t old_totlen;
+	int ret, splitvec = -1;
+	int invec, outvec;
+	size_t wbuf_retlen;
+	unsigned char *wbuf_ptr;
+	size_t donelen = 0;
+	uint32_t outvec_to = to;
+
+	/* If not NAND flash, don't bother */
+	if (!c->wbuf)
+		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
+	
+	down_write(&c->wbuf_sem);
+
+	/* If wbuf_ofs is not initialized, set it to target address */
+	if (c->wbuf_ofs == 0xFFFFFFFF) {
+		c->wbuf_ofs = PAGE_DIV(to);
+		c->wbuf_len = PAGE_MOD(to);			
+		memset(c->wbuf,0xff,c->wbuf_pagesize);
+	}
+
+	/* Fixup the wbuf if we are moving to a new eraseblock.  The checks below
+	   fail for ECC'd NOR because cleanmarker == 16, so a block starts at
+	   xxx0010.  */
+	if (jffs2_nor_ecc(c)) {
+		if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
+			c->wbuf_ofs = PAGE_DIV(to);
+			c->wbuf_len = PAGE_MOD(to);
+			memset(c->wbuf,0xff,c->wbuf_pagesize);
+		}
+	}
+	
+	/* Sanity checks on target address. 
+	   It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), 
+	   and it's permitted to write at the beginning of a new 
+	   erase block. Anything else, and you die.
+	   New block starts at xxx000c (0-b = block header)
+	*/
+	if ( (to & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) {
+		/* It's a write to a new block */
+		if (c->wbuf_len) {
+			D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
+			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+			if (ret) {
+				/* the underlying layer has to check wbuf_len to do the cleanup */
+				D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+				*retlen = 0;
+				goto exit;
+			}
+		}
+		/* set pointer to new block */
+		c->wbuf_ofs = PAGE_DIV(to);
+		c->wbuf_len = PAGE_MOD(to);			
+	} 
+
+	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
+		/* We're not writing immediately after the writebuffer. Bad. */
+		printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
+		if (c->wbuf_len)
+			printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
+					  c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+		BUG();
+	}
+
+	/* Note outvecs[3] above. We know count is never greater than 2 */
+	if (count > 2) {
+		printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
+		BUG();
+	}
+
+	invec = 0;
+	outvec = 0;
+
+	/* Fill writebuffer first, if already in use */	
+	if (c->wbuf_len) {
+		uint32_t invec_ofs = 0;
+
+		/* adjust alignment offset */ 
+		if (c->wbuf_len != PAGE_MOD(to)) {
+			c->wbuf_len = PAGE_MOD(to);
+			/* take care of alignment to next page */
+			if (!c->wbuf_len)
+				c->wbuf_len = c->wbuf_pagesize;
+		}
+		
+		while(c->wbuf_len < c->wbuf_pagesize) {
+			uint32_t thislen;
+			
+			if (invec == count)
+				goto alldone;
+
+			thislen = c->wbuf_pagesize - c->wbuf_len;
+
+			if (thislen >= invecs[invec].iov_len)
+				thislen = invecs[invec].iov_len;
+	
+			invec_ofs = thislen;
+
+			memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
+			c->wbuf_len += thislen;
+			donelen += thislen;
+			/* Get next invec, if actual did not fill the buffer */
+			if (c->wbuf_len < c->wbuf_pagesize) 
+				invec++;
+		}			
+		
+		/* write buffer is full, flush buffer */
+		ret = __jffs2_flush_wbuf(c, NOPAD);
+		if (ret) {
+			/* the underlying layer has to check wbuf_len to do the cleanup */
+			D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+			/* Retlen zero to make sure our caller doesn't mark the space dirty.
+			   We've already done everything that's necessary */
+			*retlen = 0;
+			goto exit;
+		}
+		outvec_to += donelen;
+		c->wbuf_ofs = outvec_to;
+
+		/* All invecs done ? */
+		if (invec == count)
+			goto alldone;
+
+		/* Set up the first outvec, containing the remainder of the
+		   invec we partially used */
+		if (invecs[invec].iov_len > invec_ofs) {
+			outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
+			totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
+			if (totlen > c->wbuf_pagesize) {
+				splitvec = outvec;
+				split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
+			}
+			outvec++;
+		}
+		invec++;
+	}
+
+	/* OK, now we've flushed the wbuf and the start of the bits
+	   we have been asked to write, now to write the rest.... */
+
+	/* totlen holds the amount of data still to be written */
+	old_totlen = totlen;
+	for ( ; invec < count; invec++,outvec++ ) {
+		outvecs[outvec].iov_base = invecs[invec].iov_base;
+		totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
+		if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
+			splitvec = outvec;
+			split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
+			old_totlen = totlen;
+		}
+	}
+
+	/* Now the outvecs array holds all the remaining data to write */
+	/* Up to splitvec,split_ofs is to be written immediately. The rest
+	   goes into the (now-empty) wbuf */
+
+	if (splitvec != -1) {
+		uint32_t remainder;
+
+		remainder = outvecs[splitvec].iov_len - split_ofs;
+		outvecs[splitvec].iov_len = split_ofs;
+
+		/* We did cross a page boundary, so we write some now */
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); 
+		else
+			ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
+		
+		if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
+			/* At this point we have no problem,
+			   c->wbuf is empty. 
+			*/
+			*retlen = donelen;
+			goto exit;
+		}
+		
+		donelen += wbuf_retlen;
+		c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
+
+		if (remainder) {
+			outvecs[splitvec].iov_base += split_ofs;
+			outvecs[splitvec].iov_len = remainder;
+		} else {
+			splitvec++;
+		}
+
+	} else {
+		splitvec = 0;
+	}
+
+	/* Now splitvec points to the start of the bits we have to copy
+	   into the wbuf */
+	wbuf_ptr = c->wbuf;
+
+	for ( ; splitvec < outvec; splitvec++) {
+		/* Don't copy the wbuf into itself */
+		if (outvecs[splitvec].iov_base == c->wbuf)
+			continue;
+		memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
+		wbuf_ptr += outvecs[splitvec].iov_len;
+		donelen += outvecs[splitvec].iov_len;
+	}
+	c->wbuf_len = wbuf_ptr - c->wbuf;
+
+	/* If there's a remainder in the wbuf and it's a non-GC write,
+	   remember that the wbuf affects this ino */
+alldone:
+	*retlen = donelen;
+
+	if (c->wbuf_len && ino)
+		jffs2_wbuf_dirties_inode(c, ino);
+
+	ret = 0;
+	
+exit:
+	up_write(&c->wbuf_sem);
+	return ret;
+}
+
+/*
+ *	This is the entry for flash write.
+ *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
+*/
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
+{
+	struct kvec vecs[1];
+
+	if (jffs2_can_mark_obsolete(c))
+		return c->mtd->write(c->mtd, ofs, len, retlen, buf);
+
+	vecs[0].iov_base = (unsigned char *) buf;
+	vecs[0].iov_len = len;
+	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
+}
+
+/*
+	Handle readback from writebuffer and ECC failure return
+*/
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
+{
+	loff_t	orbf = 0, owbf = 0, lwbf = 0;
+	int	ret;
+
+	/* Read flash */
+	if (!jffs2_can_mark_obsolete(c)) {
+		down_read(&c->wbuf_sem);
+
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+		if ( (ret == -EBADMSG) && (*retlen == len) ) {
+			printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
+			       len, ofs);
+			/* 
+			 * We have the raw data without ECC correction in the buffer, maybe 
+			 * we are lucky and all data or parts are correct. We check the node.
+			 * If data are corrupted node check will sort it out.
+			 * We keep this block, it will fail on write or erase and the we
+			 * mark it bad. Or should we do that now? But we should give him a chance.
+			 * Maybe we had a system crash or power loss before the ecc write or  
+			 * a erase was completed.
+			 * So we return success. :)
+			 */
+		 	ret = 0;
+		 }	
+	} else
+		return c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+	/* if no writebuffer available or write buffer empty, return */
+	if (!c->wbuf_pagesize || !c->wbuf_len)
+		goto exit;
+
+	/* if we read in a different block, return */
+	if ( (ofs & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) 
+		goto exit;
+
+	if (ofs >= c->wbuf_ofs) {
+		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
+		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
+			goto exit;
+		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
+		if (lwbf > len)	
+			lwbf = len;
+	} else {	
+		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
+		if (orbf > len)			/* is write beyond write buffer ? */
+			goto exit;
+		lwbf = len - orbf; 		/* number of bytes to copy */
+		if (lwbf > c->wbuf_len)	
+			lwbf = c->wbuf_len;
+	}	
+	if (lwbf > 0)
+		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
+
+exit:
+	up_read(&c->wbuf_sem);
+	return ret;
+}
+
+/*
+ *	Check, if the out of band area is empty
+ */
+int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
+{
+	unsigned char *buf;
+	int 	ret = 0;
+	int	i,len,page;
+	size_t  retlen;
+	int	oob_size;
+
+	/* allocate a buffer for all oob data in this sector */
+	oob_size = c->mtd->oobsize;
+	len = 4 * oob_size;
+	buf = kmalloc(len, GFP_KERNEL);
+	if (!buf) {
+		printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
+		return -ENOMEM;
+	}
+	/* 
+	 * if mode = 0, we scan for a total empty oob area, else we have
+	 * to take care of the cleanmarker in the first page of the block
+	*/
+	ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+		goto out;
+	}
+	
+	if (retlen < len) {
+		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
+			  "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
+		ret = -EIO;
+		goto out;
+	}
+	
+	/* Special check for first page */
+	for(i = 0; i < oob_size ; i++) {
+		/* Yeah, we know about the cleanmarker. */
+		if (mode && i >= c->fsdata_pos && 
+		    i < c->fsdata_pos + c->fsdata_len)
+			continue;
+
+		if (buf[i] != 0xFF) {
+			D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
+				  buf[page+i], page+i, jeb->offset));
+			ret = 1; 
+			goto out;
+		}
+	}
+
+	/* we know, we are aligned :) */	
+	for (page = oob_size; page < len; page += sizeof(long)) {
+		unsigned long dat = *(unsigned long *)(&buf[page]);
+		if(dat != -1) {
+			ret = 1; 
+			goto out;
+		}
+	}
+
+out:
+	kfree(buf);	
+	
+	return ret;
+}
+
+/*
+*	Scan for a valid cleanmarker and for bad blocks
+*	For virtual blocks (concatenated physical blocks) check the cleanmarker
+*	only in the first page of the first physical block, but scan for bad blocks in all
+*	physical blocks
+*/
+int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_unknown_node n;
+	unsigned char buf[2 * NAND_MAX_OOBSIZE];
+	unsigned char *p;
+	int ret, i, cnt, retval = 0;
+	size_t retlen, offset;
+	int oob_size;
+
+	offset = jeb->offset;
+	oob_size = c->mtd->oobsize;
+
+	/* Loop through the physical blocks */
+	for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
+		/* Check first if the block is bad. */
+		if (c->mtd->block_isbad (c->mtd, offset)) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
+			return 2;
+		}
+		/*
+		   *    We read oob data from page 0 and 1 of the block.
+		   *    page 0 contains cleanmarker and badblock info
+		   *    page 1 contains failure count of this block
+		 */
+		ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
+
+		if (ret) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+			return ret;
+		}
+		if (retlen < (oob_size << 1)) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
+			return -EIO;
+		}
+
+		/* Check cleanmarker only on the first physical block */
+		if (!cnt) {
+			n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
+			n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
+			n.totlen = cpu_to_je32 (8);
+			p = (unsigned char *) &n;
+
+			for (i = 0; i < c->fsdata_len; i++) {
+				if (buf[c->fsdata_pos + i] != p[i]) {
+					retval = 1;
+				}
+			}
+			D1(if (retval == 1) {
+				printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
+				printk(KERN_WARNING "OOB at %08x was ", offset);
+				for (i=0; i < oob_size; i++) {
+					printk("%02x ", buf[i]);
+				}
+				printk("\n");
+			})
+		}
+		offset += c->mtd->erasesize;
+	}
+	return retval;
+}
+
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct 	jffs2_unknown_node n;
+	int 	ret;
+	size_t 	retlen;
+
+	n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
+	n.totlen = cpu_to_je32(8);
+
+	ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
+	
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+		return ret;
+	}
+	if (retlen != c->fsdata_len) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
+		return ret;
+	}
+	return 0;
+}
+
+/* 
+ * On NAND we try to mark this block bad. If the block was erased more
+ * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * Don't care about failures. This block remains on the erase-pending
+ * or badblock list as long as nobody manipulates the flash with
+ * a bootloader or something like that.
+ */
+
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+	int 	ret;
+
+	/* if the count is < max, we try to write the counter to the 2nd page oob area */
+	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
+		return 0;
+
+	if (!c->mtd->block_markbad)
+		return 1; // What else can we do?
+
+	D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
+	ret = c->mtd->block_markbad(c->mtd, bad_offset);
+	
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+		return ret;
+	}
+	return 1;
+}
+
+#define NAND_JFFS2_OOB16_FSDALEN	8
+
+static struct nand_oobinfo jffs2_oobinfo_docecc = {
+	.useecc = MTD_NANDECC_PLACE,
+	.eccbytes = 6,
+	.eccpos = {0,1,2,3,4,5}
+};
+
+
+static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
+{
+	struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
+
+	/* Do this only, if we have an oob buffer */
+	if (!c->mtd->oobsize)
+		return 0;
+	
+	/* Cleanmarker is out-of-band, so inline size zero */
+	c->cleanmarker_size = 0;
+
+	/* Should we use autoplacement ? */
+	if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
+		D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
+		/* Get the position of the free bytes */
+		if (!oinfo->oobfree[0][1]) {
+			printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
+			return -ENOSPC;
+		}
+		c->fsdata_pos = oinfo->oobfree[0][0];
+		c->fsdata_len = oinfo->oobfree[0][1];
+		if (c->fsdata_len > 8)
+			c->fsdata_len = 8;
+	} else {
+		/* This is just a legacy fallback and should go away soon */
+		switch(c->mtd->ecctype) {
+		case MTD_ECC_RS_DiskOnChip:
+			printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
+			c->oobinfo = &jffs2_oobinfo_docecc;
+			c->fsdata_pos = 6;
+			c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
+			c->badblock_pos = 15;
+			break;
+	
+		default:
+			D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+{
+	int res;
+
+	/* Initialise write buffer */
+	init_rwsem(&c->wbuf_sem);
+	c->wbuf_pagesize = c->mtd->oobblock;
+	c->wbuf_ofs = 0xFFFFFFFF;
+	
+	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!c->wbuf)
+		return -ENOMEM;
+
+	res = jffs2_nand_set_oobinfo(c);
+
+#ifdef BREAKME
+	if (!brokenbuf)
+		brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!brokenbuf) {
+		kfree(c->wbuf);
+		return -ENOMEM;
+	}
+	memset(brokenbuf, 0xdb, c->wbuf_pagesize);
+#endif
+	return res;
+}
+
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
+{
+	kfree(c->wbuf);
+}
+
+#ifdef CONFIG_JFFS2_FS_NOR_ECC
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
+	/* Cleanmarker is actually larger on the flashes */
+	c->cleanmarker_size = 16;
+
+	/* Initialize write buffer */
+	init_rwsem(&c->wbuf_sem);
+	c->wbuf_pagesize = c->mtd->eccsize;
+	c->wbuf_ofs = 0xFFFFFFFF;
+
+	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!c->wbuf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
+	kfree(c->wbuf);
+}
+#endif
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
new file mode 100644
index 0000000..80a5db5
--- /dev/null
+++ b/fs/jffs2/write.c
@@ -0,0 +1,708 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: write.c,v 1.87 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+#include "compr.h"
+
+
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri)
+{
+	struct jffs2_inode_cache *ic;
+
+	ic = jffs2_alloc_inode_cache();
+	if (!ic) {
+		return -ENOMEM;
+	}
+
+	memset(ic, 0, sizeof(*ic));
+
+	f->inocache = ic;
+	f->inocache->nlink = 1;
+	f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+	f->inocache->ino = ++c->highest_ino;
+	f->inocache->state = INO_STATE_PRESENT;
+
+	ri->ino = cpu_to_je32(f->inocache->ino);
+
+	D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
+	jffs2_add_ino_cache(c, f->inocache);
+
+	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri->totlen = cpu_to_je32(PAD(sizeof(*ri)));
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+	ri->mode = cpu_to_jemode(mode);
+
+	f->highest_version = 1;
+	ri->version = cpu_to_je32(f->highest_version);
+
+	return 0;
+}
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+static void writecheck(struct jffs2_sb_info *c, uint32_t ofs)
+{
+	unsigned char buf[16];
+	size_t retlen;
+	int ret, i;
+
+	ret = jffs2_flash_read(c, ofs, 16, &retlen, buf);
+	if (ret || (retlen != 16)) {
+		D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen));
+		return;
+	}
+	ret = 0;
+	for (i=0; i<16; i++) {
+		if (buf[i] != 0xff)
+			ret = 1;
+	}
+	if (ret) {
+		printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs);
+		printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 
+		       ofs,
+		       buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
+		       buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]);
+	}
+}
+#endif
+
+
+/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, 
+   write it to the flash, link it into the existing inode/fragment list */
+
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode)
+
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dnode *fn;
+	size_t retlen;
+	struct kvec vecs[2];
+	int ret;
+	int retried = 0;
+	unsigned long cnt = 2;
+
+	D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
+		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n");
+		BUG();
+	}
+	   );
+	vecs[0].iov_base = ri;
+	vecs[0].iov_len = sizeof(*ri);
+	vecs[1].iov_base = (unsigned char *)data;
+	vecs[1].iov_len = datalen;
+
+	D1(writecheck(c, flash_ofs));
+
+	if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
+		printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
+	}
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw)
+		return ERR_PTR(-ENOMEM);
+	
+	fn = jffs2_alloc_full_dnode();
+	if (!fn) {
+		jffs2_free_raw_node_ref(raw);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fn->ofs = je32_to_cpu(ri->offset);
+	fn->size = je32_to_cpu(ri->dsize);
+	fn->frags = 0;
+
+	/* check number of valid vecs */
+	if (!datalen || !data)
+		cnt = 1;
+ retry:
+	fn->raw = raw;
+
+	raw->flash_offset = flash_ofs;
+	raw->__totlen = PAD(sizeof(*ri)+datalen);
+	raw->next_phys = NULL;
+
+	ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen,
+				 (alloc_mode==ALLOC_GC)?0:f->inocache->ino);
+
+	if (ret || (retlen != sizeof(*ri) + datalen)) {
+		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 
+		       sizeof(*ri)+datalen, flash_ofs, ret, retlen);
+
+		/* Mark the space as dirtied */
+		if (retlen) {
+			/* Doesn't belong to any inode */
+			raw->next_in_ino = NULL;
+
+			/* Don't change raw->size to match retlen. We may have 
+			   written the node header already, and only the data will
+			   seem corrupted, in which case the scan would skip over
+			   any node we write before the original intended end of 
+			   this node */
+			raw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, raw);
+			jffs2_mark_node_obsolete(c, raw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
+			jffs2_free_raw_node_ref(raw);
+		}
+		if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+			
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			if (alloc_mode == ALLOC_GC) {
+				ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy);
+			} else {
+				/* Locking pain */
+				up(&f->sem);
+				jffs2_complete_reservation(c);
+			
+				ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode);
+				down(&f->sem);
+			}
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(raw);
+		}
+		/* Release the full_dnode which is now useless, and return */
+		jffs2_free_full_dnode(fn);
+		return ERR_PTR(ret?ret:-EIO);
+	}
+	/* Mark the space used */
+	/* If node covers at least a whole page, or if it starts at the 
+	   beginning of a page and runs to the end of the file, or if 
+	   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 
+	*/
+	if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
+	    ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+	      (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) ==  je32_to_cpu(ri->isize)))) {
+		raw->flash_offset |= REF_PRISTINE;
+	} else {
+		raw->flash_offset |= REF_NORMAL;
+	}
+	jffs2_add_physical_node_ref(c, raw);
+
+	/* Link into per-inode list */
+	spin_lock(&c->erase_completion_lock);
+	raw->next_in_ino = f->inocache->nodes;
+	f->inocache->nodes = raw;
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
+		  flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), 
+		  je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
+		  je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
+
+	if (retried) {
+		ACCT_SANITY_CHECK(c,NULL);
+	}
+
+	return fn;
+}
+
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+	size_t retlen;
+	struct kvec vecs[2];
+	int retried = 0;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", 
+		  je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
+		  je32_to_cpu(rd->name_crc)));
+	D1(writecheck(c, flash_ofs));
+
+	D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
+		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
+		BUG();
+	}
+	   );
+
+	vecs[0].iov_base = rd;
+	vecs[0].iov_len = sizeof(*rd);
+	vecs[1].iov_base = (unsigned char *)name;
+	vecs[1].iov_len = namelen;
+	
+	raw = jffs2_alloc_raw_node_ref();
+
+	if (!raw)
+		return ERR_PTR(-ENOMEM);
+
+	fd = jffs2_alloc_full_dirent(namelen+1);
+	if (!fd) {
+		jffs2_free_raw_node_ref(raw);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd->version = je32_to_cpu(rd->version);
+	fd->ino = je32_to_cpu(rd->ino);
+	fd->nhash = full_name_hash(name, strlen(name));
+	fd->type = rd->type;
+	memcpy(fd->name, name, namelen);
+	fd->name[namelen]=0;
+
+ retry:
+	fd->raw = raw;
+
+	raw->flash_offset = flash_ofs;
+	raw->__totlen = PAD(sizeof(*rd)+namelen);
+	raw->next_phys = NULL;
+
+	ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
+				 (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
+	if (ret || (retlen != sizeof(*rd) + namelen)) {
+		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 
+			       sizeof(*rd)+namelen, flash_ofs, ret, retlen);
+		/* Mark the space as dirtied */
+		if (retlen) {
+			raw->next_in_ino = NULL;
+			raw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, raw);
+			jffs2_mark_node_obsolete(c, raw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
+			jffs2_free_raw_node_ref(raw);
+		}
+		if (!retried && (raw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			if (alloc_mode == ALLOC_GC) {
+				ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy);
+			} else {
+				/* Locking pain */
+				up(&f->sem);
+				jffs2_complete_reservation(c);
+			
+				ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode);
+				down(&f->sem);
+			}
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(raw);
+		}
+		/* Release the full_dnode which is now useless, and return */
+		jffs2_free_full_dirent(fd);
+		return ERR_PTR(ret?ret:-EIO);
+	}
+	/* Mark the space used */
+	raw->flash_offset |= REF_PRISTINE;
+	jffs2_add_physical_node_ref(c, raw);
+
+	spin_lock(&c->erase_completion_lock);
+	raw->next_in_ino = f->inocache->nodes;
+	f->inocache->nodes = raw;
+	spin_unlock(&c->erase_completion_lock);
+
+	if (retried) {
+		ACCT_SANITY_CHECK(c,NULL);
+	}
+
+	return fd;
+}
+
+/* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that
+   we don't have to go digging in struct inode or its equivalent. It should set:
+   mode, uid, gid, (starting)isize, atime, ctime, mtime */
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			    struct jffs2_raw_inode *ri, unsigned char *buf, 
+			    uint32_t offset, uint32_t writelen, uint32_t *retlen)
+{
+	int ret = 0;
+	uint32_t writtenlen = 0;
+
+       	D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
+		  f->inocache->ino, offset, writelen));
+		
+	while(writelen) {
+		struct jffs2_full_dnode *fn;
+		unsigned char *comprbuf = NULL;
+		uint16_t comprtype = JFFS2_COMPR_NONE;
+		uint32_t phys_ofs, alloclen;
+		uint32_t datalen, cdatalen;
+		int retried = 0;
+
+	retry:
+		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
+
+		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
+		if (ret) {
+			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
+			break;
+		}
+		down(&f->sem);
+		datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+		cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
+
+		comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
+
+		ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen);
+		ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri->ino = cpu_to_je32(f->inocache->ino);
+		ri->version = cpu_to_je32(++f->highest_version);
+		ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen));
+		ri->offset = cpu_to_je32(offset);
+		ri->csize = cpu_to_je32(cdatalen);
+		ri->dsize = cpu_to_je32(datalen);
+		ri->compr = comprtype & 0xff;
+		ri->usercompr = (comprtype >> 8 ) & 0xff;
+		ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+		ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+
+		fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY);
+
+		jffs2_free_comprbuf(comprbuf, buf);
+
+		if (IS_ERR(fn)) {
+			ret = PTR_ERR(fn);
+			up(&f->sem);
+			jffs2_complete_reservation(c);
+			if (!retried) {
+				/* Write error to be retried */
+				retried = 1;
+				D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n"));
+				goto retry;
+			}
+			break;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		if (ret) {
+			/* Eep */
+			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
+			jffs2_mark_node_obsolete(c, fn->raw);
+			jffs2_free_full_dnode(fn);
+
+			up(&f->sem);
+			jffs2_complete_reservation(c);
+			break;
+		}
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		if (!datalen) {
+			printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
+			ret = -EIO;
+			break;
+		}
+		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
+		writtenlen += datalen;
+		offset += datalen;
+		writelen -= datalen;
+		buf += datalen;
+	}
+	*retlen = writtenlen;
+	return ret;
+}
+
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+	D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
+	if (ret) {
+		up(&f->sem);
+		return ret;
+	}
+
+	ri->data_crc = cpu_to_je32(0);
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+	fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+	D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
+		  jemode_to_cpu(ri->mode)));
+
+	if (IS_ERR(fn)) {
+		D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+
+	up(&f->sem);
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+		
+	if (ret) {
+		/* Eep. */
+		D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		return -ENOMEM;
+	}
+
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_f->inocache->ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = ri->ino;
+	rd->mctime = ri->ctime;
+	rd->nsize = namelen;
+	rd->type = DT_REG;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_dirent(rd);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		up(&dir_f->sem);
+		return PTR_ERR(fd);
+	}
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	jffs2_complete_reservation(c);
+	up(&dir_f->sem);
+
+	return 0;
+}
+
+
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
+		    const char *name, int namelen, struct jffs2_inode_info *dead_f)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (1 /* alternative branch needs testing */ || 
+	    !jffs2_can_mark_obsolete(c)) {
+		/* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
+
+		rd = jffs2_alloc_raw_dirent();
+		if (!rd)
+			return -ENOMEM;
+
+		ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION);
+		if (ret) {
+			jffs2_free_raw_dirent(rd);
+			return ret;
+		}
+
+		down(&dir_f->sem);
+
+		/* Build a deletion node */
+		rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+		rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+		rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+		
+		rd->pino = cpu_to_je32(dir_f->inocache->ino);
+		rd->version = cpu_to_je32(++dir_f->highest_version);
+		rd->ino = cpu_to_je32(0);
+		rd->mctime = cpu_to_je32(get_seconds());
+		rd->nsize = namelen;
+		rd->type = DT_UNKNOWN;
+		rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+		rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+		fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
+		
+		jffs2_free_raw_dirent(rd);
+
+		if (IS_ERR(fd)) {
+			jffs2_complete_reservation(c);
+			up(&dir_f->sem);
+			return PTR_ERR(fd);
+		}
+
+		/* File it. This will mark the old one obsolete. */
+		jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+		up(&dir_f->sem);
+	} else {
+		struct jffs2_full_dirent **prev = &dir_f->dents;
+		uint32_t nhash = full_name_hash(name, namelen);
+
+		down(&dir_f->sem);
+
+		while ((*prev) && (*prev)->nhash <= nhash) {
+			if ((*prev)->nhash == nhash && 
+			    !memcmp((*prev)->name, name, namelen) &&
+			    !(*prev)->name[namelen]) {
+				struct jffs2_full_dirent *this = *prev;
+
+				D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
+					  this->ino, ref_offset(this->raw)));
+
+				*prev = this->next;
+				jffs2_mark_node_obsolete(c, (this->raw));
+				jffs2_free_full_dirent(this);
+				break;
+			}
+			prev = &((*prev)->next);
+		}
+		up(&dir_f->sem);
+	}
+
+	/* dead_f is NULL if this was a rename not a real unlink */
+	/* Also catch the !f->inocache case, where there was a dirent
+	   pointing to an inode which didn't exist. */
+	if (dead_f && dead_f->inocache) { 
+
+		down(&dead_f->sem);
+
+		while (dead_f->dents) {
+			/* There can be only deleted ones */
+			fd = dead_f->dents;
+			
+			dead_f->dents = fd->next;
+			
+			if (fd->ino) {
+				printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
+				       dead_f->inocache->ino, fd->name, fd->ino);
+			} else {
+				D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, dead_f->inocache->ino));
+			}
+			jffs2_mark_node_obsolete(c, fd->raw);
+			jffs2_free_full_dirent(fd);
+		}
+
+		dead_f->inocache->nlink--;
+		/* NB: Caller must set inode nlink if appropriate */
+		up(&dead_f->sem);
+	}
+
+	jffs2_complete_reservation(c);
+
+	return 0;
+}
+
+
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd)
+		return -ENOMEM;
+
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		jffs2_free_raw_dirent(rd);
+		return ret;
+	}
+	
+	down(&dir_f->sem);
+
+	/* Build a deletion node */
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_f->inocache->ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+
+	rd->type = type;
+
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	jffs2_free_raw_dirent(rd);
+
+	if (IS_ERR(fd)) {
+		jffs2_complete_reservation(c);
+		up(&dir_f->sem);
+		return PTR_ERR(fd);
+	}
+
+	/* File it. This will mark the old one obsolete. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	jffs2_complete_reservation(c);
+	up(&dir_f->sem);
+
+	return 0;
+}
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c
new file mode 100644
index 0000000..f079f83
--- /dev/null
+++ b/fs/jffs2/writev.c
@@ -0,0 +1,50 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+/* This ought to be in core MTD code. All registered MTD devices
+   without writev should have this put in place. Bug the MTD
+   maintainer */
+static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
+				  unsigned long count, loff_t to, size_t *retlen)
+{
+	unsigned long i;
+	size_t totlen = 0, thislen;
+	int ret = 0;
+
+	for (i=0; i<count; i++) {
+		if (!vecs[i].iov_len)
+			continue;
+		ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
+		totlen += thislen;
+		if (ret || thislen != vecs[i].iov_len)
+			break;
+		to += vecs[i].iov_len;
+	}
+	if (retlen)
+		*retlen = totlen;
+	return ret;
+}
+
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
+			      unsigned long count, loff_t to, size_t *retlen)
+{
+	if (c->mtd->writev)
+		return c->mtd->writev(c->mtd, vecs, count, to, retlen);
+	else
+		return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
+}
+
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile
new file mode 100644
index 0000000..6f1e0e9
--- /dev/null
+++ b/fs/jfs/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the Linux JFS filesystem routines.
+#
+
+obj-$(CONFIG_JFS_FS) += jfs.o
+
+jfs-y    := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
+	    jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \
+	    jfs_unicode.o jfs_dtree.o jfs_inode.o \
+	    jfs_extent.o symlink.o jfs_metapage.o \
+	    jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o resize.o xattr.o
+
+jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
+
+EXTRA_CFLAGS += -D_JFS_4K
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
new file mode 100644
index 0000000..8d2a9ab
--- /dev/null
+++ b/fs/jfs/acl.c
@@ -0,0 +1,234 @@
+/*
+ *   Copyright (C) International Business Machines  Corp., 2002-2004
+ *   Copyright (C) Andreas Gruenbacher, 2001
+ *   Copyright (C) Linus Torvalds, 1991, 1992
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_xattr.h"
+#include "jfs_acl.h"
+
+static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
+{
+	struct posix_acl *acl;
+	char *ea_name;
+	struct jfs_inode_info *ji = JFS_IP(inode);
+	struct posix_acl **p_acl;
+	int size;
+	char *value = NULL;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			ea_name = XATTR_NAME_ACL_ACCESS;
+			p_acl = &ji->i_acl;
+			break;
+		case ACL_TYPE_DEFAULT:
+			ea_name = XATTR_NAME_ACL_DEFAULT;
+			p_acl = &ji->i_default_acl;
+			break;
+		default:
+			return ERR_PTR(-EINVAL);
+	}
+
+	if (*p_acl != JFS_ACL_NOT_CACHED)
+		return posix_acl_dup(*p_acl);
+
+	size = __jfs_getxattr(inode, ea_name, NULL, 0);
+
+	if (size > 0) {
+		value = kmalloc(size, GFP_KERNEL);
+		if (!value)
+			return ERR_PTR(-ENOMEM);
+		size = __jfs_getxattr(inode, ea_name, value, size);
+	}
+
+	if (size < 0) {
+		if (size == -ENODATA) {
+			*p_acl = NULL;
+			acl = NULL;
+		} else
+			acl = ERR_PTR(size);
+	} else {
+		acl = posix_acl_from_xattr(value, size);
+		if (!IS_ERR(acl))
+			*p_acl = posix_acl_dup(acl);
+	}
+	if (value)
+		kfree(value);
+	return acl;
+}
+
+static int jfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+	char *ea_name;
+	struct jfs_inode_info *ji = JFS_IP(inode);
+	struct posix_acl **p_acl;
+	int rc;
+	int size = 0;
+	char *value = NULL;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	switch(type) {
+		case ACL_TYPE_ACCESS:
+			ea_name = XATTR_NAME_ACL_ACCESS;
+			p_acl = &ji->i_acl;
+			break;
+		case ACL_TYPE_DEFAULT:
+			ea_name = XATTR_NAME_ACL_DEFAULT;
+			p_acl = &ji->i_default_acl;
+			if (!S_ISDIR(inode->i_mode))
+				return acl ? -EACCES : 0;
+			break;
+		default:
+			return -EINVAL;
+	}
+	if (acl) {
+		size = xattr_acl_size(acl->a_count);
+		value = kmalloc(size, GFP_KERNEL);
+		if (!value)
+			return -ENOMEM;
+		rc = posix_acl_to_xattr(acl, value, size);
+		if (rc < 0)
+			goto out;
+	}
+	rc = __jfs_setxattr(inode, ea_name, value, size, 0);
+out:
+	if (value)
+		kfree(value);
+
+	if (!rc) {
+		if (*p_acl && (*p_acl != JFS_ACL_NOT_CACHED))
+			posix_acl_release(*p_acl);
+		*p_acl = posix_acl_dup(acl);
+	}
+	return rc;
+}
+
+static int jfs_check_acl(struct inode *inode, int mask)
+{
+	struct jfs_inode_info *ji = JFS_IP(inode);
+
+	if (ji->i_acl == JFS_ACL_NOT_CACHED) {
+		struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		posix_acl_release(acl);
+	}
+
+	if (ji->i_acl)
+		return posix_acl_permission(inode, ji->i_acl, mask);
+	return -EAGAIN;
+}
+
+int jfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	return generic_permission(inode, mask, jfs_check_acl);
+}
+
+int jfs_init_acl(struct inode *inode, struct inode *dir)
+{
+	struct posix_acl *acl = NULL;
+	struct posix_acl *clone;
+	mode_t mode;
+	int rc = 0;
+
+	if (S_ISLNK(inode->i_mode))
+		return 0;
+
+	acl = jfs_get_acl(dir, ACL_TYPE_DEFAULT);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+
+	if (acl) {
+		if (S_ISDIR(inode->i_mode)) {
+			rc = jfs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
+			if (rc)
+				goto cleanup;
+		}
+		clone = posix_acl_clone(acl, GFP_KERNEL);
+		if (!clone) {
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+		mode = inode->i_mode;
+		rc = posix_acl_create_masq(clone, &mode);
+		if (rc >= 0) {
+			inode->i_mode = mode;
+			if (rc > 0)
+				rc = jfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+		}
+		posix_acl_release(clone);
+cleanup:
+		posix_acl_release(acl);
+	} else
+		inode->i_mode &= ~current->fs->umask;
+
+	return rc;
+}
+
+static int jfs_acl_chmod(struct inode *inode)
+{
+	struct posix_acl *acl, *clone;
+	int rc;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+
+	clone = posix_acl_clone(acl, GFP_KERNEL);
+	posix_acl_release(acl);
+	if (!clone)
+		return -ENOMEM;
+
+	rc = posix_acl_chmod_masq(clone, inode->i_mode);
+	if (!rc)
+		rc = jfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+
+	posix_acl_release(clone);
+	return rc;
+}
+
+int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	struct inode *inode = dentry->d_inode;
+	int rc;
+
+	rc = inode_change_ok(inode, iattr);
+	if (rc)
+		return rc;
+
+	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
+	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+		if (DQUOT_TRANSFER(inode, iattr))
+			return -EDQUOT;
+	}
+
+	rc = inode_setattr(inode, iattr);
+
+	if (!rc && (iattr->ia_valid & ATTR_MODE))
+		rc = jfs_acl_chmod(inode);
+
+	return rc;
+}
diff --git a/fs/jfs/endian24.h b/fs/jfs/endian24.h
new file mode 100644
index 0000000..ab7cd05
--- /dev/null
+++ b/fs/jfs/endian24.h
@@ -0,0 +1,49 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2001
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_ENDIAN24
+#define	_H_ENDIAN24
+
+/*
+ *	endian24.h:
+ *
+ * Endian conversion for 24-byte data
+ *
+ */
+#define __swab24(x) \
+({ \
+	__u32 __x = (x); \
+	((__u32)( \
+		((__x & (__u32)0x000000ffUL) << 16) | \
+		 (__x & (__u32)0x0000ff00UL)        | \
+		((__x & (__u32)0x00ff0000UL) >> 16) )); \
+})
+
+#if (defined(__KERNEL__) && defined(__LITTLE_ENDIAN)) || (defined(__BYTE_ORDER) && (__BYTE_ORDER == __LITTLE_ENDIAN))
+	#define __cpu_to_le24(x) ((__u32)(x))
+	#define __le24_to_cpu(x) ((__u32)(x))
+#else
+	#define __cpu_to_le24(x) __swab24(x)
+	#define __le24_to_cpu(x) __swab24(x)
+#endif
+
+#ifdef __KERNEL__
+	#define cpu_to_le24 __cpu_to_le24
+	#define le24_to_cpu __le24_to_cpu
+#endif
+
+#endif				/* !_H_ENDIAN24 */
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
new file mode 100644
index 0000000..a87b06f
--- /dev/null
+++ b/fs/jfs/file.c
@@ -0,0 +1,119 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *   Portions Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include "jfs_incore.h"
+#include "jfs_dmap.h"
+#include "jfs_txnmgr.h"
+#include "jfs_xattr.h"
+#include "jfs_acl.h"
+#include "jfs_debug.h"
+
+
+extern int jfs_commit_inode(struct inode *, int);
+extern void jfs_truncate(struct inode *);
+
+int jfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	int rc = 0;
+
+	if (!(inode->i_state & I_DIRTY) ||
+	    (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
+		/* Make sure committed changes hit the disk */
+		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
+		return rc;
+	}
+
+	rc |= jfs_commit_inode(inode, 1);
+
+	return rc ? -EIO : 0;
+}
+
+static int jfs_open(struct inode *inode, struct file *file)
+{
+	int rc;
+
+	if ((rc = generic_file_open(inode, file)))
+		return rc;
+
+	/*
+	 * We attempt to allow only one "active" file open per aggregate
+	 * group.  Otherwise, appending to files in parallel can cause
+	 * fragmentation within the files.
+	 *
+	 * If the file is empty, it was probably just created and going
+	 * to be written to.  If it has a size, we'll hold off until the
+	 * file is actually grown.
+	 */
+	if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE &&
+	    (inode->i_size == 0)) {
+		struct jfs_inode_info *ji = JFS_IP(inode);
+		spin_lock_irq(&ji->ag_lock);
+		if (ji->active_ag == -1) {
+			ji->active_ag = ji->agno;
+			atomic_inc(
+			    &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]);
+		}
+		spin_unlock_irq(&ji->ag_lock);
+	}
+
+	return 0;
+}
+static int jfs_release(struct inode *inode, struct file *file)
+{
+	struct jfs_inode_info *ji = JFS_IP(inode);
+
+	spin_lock_irq(&ji->ag_lock);
+	if (ji->active_ag != -1) {
+		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
+		atomic_dec(&bmap->db_active[ji->active_ag]);
+		ji->active_ag = -1;
+	}
+	spin_unlock_irq(&ji->ag_lock);
+
+	return 0;
+}
+
+struct inode_operations jfs_file_inode_operations = {
+	.truncate	= jfs_truncate,
+	.setxattr	= jfs_setxattr,
+	.getxattr	= jfs_getxattr,
+	.listxattr	= jfs_listxattr,
+	.removexattr	= jfs_removexattr,
+#ifdef CONFIG_JFS_POSIX_ACL
+	.setattr	= jfs_setattr,
+	.permission	= jfs_permission,
+#endif
+};
+
+struct file_operations jfs_file_operations = {
+	.open		= jfs_open,
+	.llseek		= generic_file_llseek,
+	.write		= generic_file_write,
+	.read		= generic_file_read,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= generic_file_aio_write,
+	.mmap		= generic_file_mmap,
+	.readv		= generic_file_readv,
+	.writev		= generic_file_writev,
+ 	.sendfile	= generic_file_sendfile,
+	.fsync		= jfs_fsync,
+	.release	= jfs_release,
+};
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
new file mode 100644
index 0000000..7bc9066
--- /dev/null
+++ b/fs/jfs/inode.c
@@ -0,0 +1,384 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_imap.h"
+#include "jfs_extent.h"
+#include "jfs_unicode.h"
+#include "jfs_debug.h"
+
+
+extern struct inode_operations jfs_dir_inode_operations;
+extern struct inode_operations jfs_file_inode_operations;
+extern struct inode_operations jfs_symlink_inode_operations;
+extern struct file_operations jfs_dir_operations;
+extern struct file_operations jfs_file_operations;
+struct address_space_operations jfs_aops;
+extern int freeZeroLink(struct inode *);
+
+void jfs_read_inode(struct inode *inode)
+{
+	if (diRead(inode)) { 
+		make_bad_inode(inode);
+		return;
+	}
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &jfs_file_inode_operations;
+		inode->i_fop = &jfs_file_operations;
+		inode->i_mapping->a_ops = &jfs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &jfs_dir_inode_operations;
+		inode->i_fop = &jfs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (inode->i_size >= IDATASIZE) {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &jfs_aops;
+		} else
+			inode->i_op = &jfs_symlink_inode_operations;
+	} else {
+		inode->i_op = &jfs_file_inode_operations;
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+	}
+}
+
+/*
+ * Workhorse of both fsync & write_inode
+ */
+int jfs_commit_inode(struct inode *inode, int wait)
+{
+	int rc = 0;
+	tid_t tid;
+	static int noisy = 5;
+
+	jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
+
+	/*
+	 * Don't commit if inode has been committed since last being
+	 * marked dirty, or if it has been deleted.
+	 */
+	if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
+		return 0;
+
+	if (isReadOnly(inode)) {
+		/* kernel allows writes to devices on read-only
+		 * partitions and may think inode is dirty
+		 */
+		if (!special_file(inode->i_mode) && noisy) {
+			jfs_err("jfs_commit_inode(0x%p) called on "
+				   "read-only volume", inode);
+			jfs_err("Is remount racy?");
+			noisy--;
+		}
+		return 0;
+	}
+
+	tid = txBegin(inode->i_sb, COMMIT_INODE);
+	down(&JFS_IP(inode)->commit_sem);
+
+	/*
+	 * Retest inode state after taking commit_sem
+	 */
+	if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
+		rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
+
+	txEnd(tid);
+	up(&JFS_IP(inode)->commit_sem);
+	return rc;
+}
+
+int jfs_write_inode(struct inode *inode, int wait)
+{
+	if (test_cflag(COMMIT_Nolink, inode))
+		return 0;
+	/*
+	 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
+	 * It has been committed since the last change, but was still
+	 * on the dirty inode list.
+	 */
+	 if (!test_cflag(COMMIT_Dirty, inode)) {
+		/* Make sure committed changes hit the disk */
+		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
+		return 0;
+	 }
+
+	if (jfs_commit_inode(inode, wait)) {
+		jfs_err("jfs_write_inode: jfs_commit_inode failed!");
+		return -EIO;
+	} else
+		return 0;
+}
+
+void jfs_delete_inode(struct inode *inode)
+{
+	jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
+
+	if (test_cflag(COMMIT_Freewmap, inode))
+		freeZeroLink(inode);
+
+	diFree(inode);
+
+	/*
+	 * Free the inode from the quota allocation.
+	 */
+	DQUOT_INIT(inode);
+	DQUOT_FREE_INODE(inode);
+	DQUOT_DROP(inode);
+
+	clear_inode(inode);
+}
+
+void jfs_dirty_inode(struct inode *inode)
+{
+	static int noisy = 5;
+
+	if (isReadOnly(inode)) {
+		if (!special_file(inode->i_mode) && noisy) {
+			/* kernel allows writes to devices on read-only
+			 * partitions and may try to mark inode dirty
+			 */
+			jfs_err("jfs_dirty_inode called on read-only volume");
+			jfs_err("Is remount racy?");
+			noisy--;
+		}
+		return;
+	}
+
+	set_cflag(COMMIT_Dirty, inode);
+}
+
+static int
+jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
+			struct buffer_head *bh_result, int create)
+{
+	s64 lblock64 = lblock;
+	int rc = 0;
+	int take_locks;
+	xad_t xad;
+	s64 xaddr;
+	int xflag;
+	s32 xlen;
+
+	/*
+	 * If this is a special inode (imap, dmap)
+	 * the lock should already be taken
+	 */
+	take_locks = (JFS_IP(ip)->fileset != AGGREGATE_I);
+
+	/*
+	 * Take appropriate lock on inode
+	 */
+	if (take_locks) {
+		if (create)
+			IWRITE_LOCK(ip);
+		else
+			IREAD_LOCK(ip);
+	}
+
+	if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
+	    (xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)
+	     == 0) && xlen) {
+		if (xflag & XAD_NOTRECORDED) {
+			if (!create)
+				/*
+				 * Allocated but not recorded, read treats
+				 * this as a hole
+				 */
+				goto unlock;
+#ifdef _JFS_4K
+			XADoffset(&xad, lblock64);
+			XADlength(&xad, xlen);
+			XADaddress(&xad, xaddr);
+#else				/* _JFS_4K */
+			/*
+			 * As long as block size = 4K, this isn't a problem.
+			 * We should mark the whole page not ABNR, but how
+			 * will we know to mark the other blocks BH_New?
+			 */
+			BUG();
+#endif				/* _JFS_4K */
+			rc = extRecord(ip, &xad);
+			if (rc)
+				goto unlock;
+			set_buffer_new(bh_result);
+		}
+
+		map_bh(bh_result, ip->i_sb, xaddr);
+		bh_result->b_size = xlen << ip->i_blkbits;
+		goto unlock;
+	}
+	if (!create)
+		goto unlock;
+
+	/*
+	 * Allocate a new block
+	 */
+#ifdef _JFS_4K
+	if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
+		goto unlock;
+	rc = extAlloc(ip, max_blocks, lblock64, &xad, FALSE);
+	if (rc)
+		goto unlock;
+
+	set_buffer_new(bh_result);
+	map_bh(bh_result, ip->i_sb, addressXAD(&xad));
+	bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
+
+#else				/* _JFS_4K */
+	/*
+	 * We need to do whatever it takes to keep all but the last buffers
+	 * in 4K pages - see jfs_write.c
+	 */
+	BUG();
+#endif				/* _JFS_4K */
+
+      unlock:
+	/*
+	 * Release lock on inode
+	 */
+	if (take_locks) {
+		if (create)
+			IWRITE_UNLOCK(ip);
+		else
+			IREAD_UNLOCK(ip);
+	}
+	return rc;
+}
+
+static int jfs_get_block(struct inode *ip, sector_t lblock,
+			 struct buffer_head *bh_result, int create)
+{
+	return jfs_get_blocks(ip, lblock, 1, bh_result, create);
+}
+
+static int jfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return nobh_writepage(page, jfs_get_block, wbc);
+}
+
+static int jfs_writepages(struct address_space *mapping,
+			struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, jfs_get_block);
+}
+
+static int jfs_readpage(struct file *file, struct page *page)
+{
+	return mpage_readpage(page, jfs_get_block);
+}
+
+static int jfs_readpages(struct file *file, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+	return mpage_readpages(mapping, pages, nr_pages, jfs_get_block);
+}
+
+static int jfs_prepare_write(struct file *file,
+			     struct page *page, unsigned from, unsigned to)
+{
+	return nobh_prepare_write(page, from, to, jfs_get_block);
+}
+
+static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping, block, jfs_get_block);
+}
+
+static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
+	const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file->f_mapping->host;
+
+	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+				offset, nr_segs, jfs_get_blocks, NULL);
+}
+
+struct address_space_operations jfs_aops = {
+	.readpage	= jfs_readpage,
+	.readpages	= jfs_readpages,
+	.writepage	= jfs_writepage,
+	.writepages	= jfs_writepages,
+	.sync_page	= block_sync_page,
+	.prepare_write	= jfs_prepare_write,
+	.commit_write	= nobh_commit_write,
+	.bmap		= jfs_bmap,
+	.direct_IO	= jfs_direct_IO,
+};
+
+/*
+ * Guts of jfs_truncate.  Called with locks already held.  Can be called
+ * with directory for truncating directory index table.
+ */
+void jfs_truncate_nolock(struct inode *ip, loff_t length)
+{
+	loff_t newsize;
+	tid_t tid;
+
+	ASSERT(length >= 0);
+
+	if (test_cflag(COMMIT_Nolink, ip)) {
+		xtTruncate(0, ip, length, COMMIT_WMAP);
+		return;
+	}
+
+	do {
+		tid = txBegin(ip->i_sb, 0);
+
+		/*
+		 * The commit_sem cannot be taken before txBegin.
+		 * txBegin may block and there is a chance the inode
+		 * could be marked dirty and need to be committed
+		 * before txBegin unblocks
+		 */
+		down(&JFS_IP(ip)->commit_sem);
+
+		newsize = xtTruncate(tid, ip, length,
+				     COMMIT_TRUNCATE | COMMIT_PWMAP);
+		if (newsize < 0) {
+			txEnd(tid);
+			up(&JFS_IP(ip)->commit_sem);
+			break;
+		}
+
+		ip->i_mtime = ip->i_ctime = CURRENT_TIME;
+		mark_inode_dirty(ip);
+
+		txCommit(tid, 1, &ip, 0);
+		txEnd(tid);
+		up(&JFS_IP(ip)->commit_sem);
+	} while (newsize > length);	/* Truncate isn't always atomic */
+}
+
+void jfs_truncate(struct inode *ip)
+{
+	jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
+
+	nobh_truncate_page(ip->i_mapping, ip->i_size);
+
+	IWRITE_LOCK(ip);
+	jfs_truncate_nolock(ip, ip->i_size);
+	IWRITE_UNLOCK(ip);
+}
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
new file mode 100644
index 0000000..d2ae430
--- /dev/null
+++ b/fs/jfs/jfs_acl.h
@@ -0,0 +1,30 @@
+/*
+ *   Copyright (c) International Business Machines  Corp., 2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_ACL
+#define _H_JFS_ACL
+
+#ifdef CONFIG_JFS_POSIX_ACL
+
+#include <linux/xattr_acl.h>
+
+int jfs_permission(struct inode *, int, struct nameidata *);
+int jfs_init_acl(struct inode *, struct inode *);
+int jfs_setattr(struct dentry *, struct iattr *);
+
+#endif		/* CONFIG_JFS_POSIX_ACL */
+#endif		/* _H_JFS_ACL */
diff --git a/fs/jfs/jfs_btree.h b/fs/jfs/jfs_btree.h
new file mode 100644
index 0000000..7f3e9ac
--- /dev/null
+++ b/fs/jfs/jfs_btree.h
@@ -0,0 +1,172 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_BTREE
+#define _H_JFS_BTREE
+
+/*
+ *	jfs_btree.h: B+-tree
+ *
+ * JFS B+-tree (dtree and xtree) common definitions
+ */
+
+/*
+ *	basic btree page - btpage
+ *
+struct btpage {
+	s64 next;		right sibling bn
+	s64 prev;		left sibling bn
+
+	u8 flag;
+	u8 rsrvd[7];		type specific
+	s64 self;		self address
+
+	u8 entry[4064];
+};						*/
+
+/* btpaget_t flag */
+#define BT_TYPE		0x07	/* B+-tree index */
+#define	BT_ROOT		0x01	/* root page */
+#define	BT_LEAF		0x02	/* leaf page */
+#define	BT_INTERNAL	0x04	/* internal page */
+#define	BT_RIGHTMOST	0x10	/* rightmost page */
+#define	BT_LEFTMOST	0x20	/* leftmost page */
+#define	BT_SWAPPED	0x80	/* used by fsck for endian swapping */
+
+/* btorder (in inode) */
+#define	BT_RANDOM		0x0000
+#define	BT_SEQUENTIAL		0x0001
+#define	BT_LOOKUP		0x0010
+#define	BT_INSERT		0x0020
+#define	BT_DELETE		0x0040
+
+/*
+ *	btree page buffer cache access
+ */
+#define BT_IS_ROOT(MP) (((MP)->xflag & COMMIT_PAGE) == 0)
+
+/* get page from buffer page */
+#define BT_PAGE(IP, MP, TYPE, ROOT)\
+	(BT_IS_ROOT(MP) ? (TYPE *)&JFS_IP(IP)->ROOT : (TYPE *)(MP)->data)
+
+/* get the page buffer and the page for specified block address */
+#define BT_GETPAGE(IP, BN, MP, TYPE, SIZE, P, RC, ROOT)\
+{\
+	if ((BN) == 0)\
+	{\
+		MP = (struct metapage *)&JFS_IP(IP)->bxflag;\
+		P = (TYPE *)&JFS_IP(IP)->ROOT;\
+		RC = 0;\
+	}\
+	else\
+	{\
+		MP = read_metapage((IP), BN, SIZE, 1);\
+		if (MP) {\
+			RC = 0;\
+			P = (MP)->data;\
+		} else {\
+			P = NULL;\
+			jfs_err("bread failed!");\
+			RC = -EIO;\
+		}\
+	}\
+}
+
+#define BT_MARK_DIRTY(MP, IP)\
+{\
+	if (BT_IS_ROOT(MP))\
+		mark_inode_dirty(IP);\
+	else\
+		mark_metapage_dirty(MP);\
+}
+
+/* put the page buffer */
+#define BT_PUTPAGE(MP)\
+{\
+	if (! BT_IS_ROOT(MP)) \
+		release_metapage(MP); \
+}
+
+
+/*
+ *	btree traversal stack
+ *
+ * record the path traversed during the search;
+ * top frame record the leaf page/entry selected.
+ */
+struct btframe {	/* stack frame */
+	s64 bn;			/* 8: */
+	s16 index;		/* 2: */
+	s16 lastindex;		/* 2: unused */
+	struct metapage *mp;	/* 4/8: */
+};				/* (16/24) */
+
+struct btstack {
+	struct btframe *top;
+	int nsplit;
+	struct btframe stack[MAXTREEHEIGHT];
+};
+
+#define BT_CLR(btstack)\
+	(btstack)->top = (btstack)->stack
+
+#define BT_STACK_FULL(btstack)\
+	( (btstack)->top == &((btstack)->stack[MAXTREEHEIGHT-1]))
+
+#define BT_PUSH(BTSTACK, BN, INDEX)\
+{\
+	assert(!BT_STACK_FULL(BTSTACK));\
+	(BTSTACK)->top->bn = BN;\
+	(BTSTACK)->top->index = INDEX;\
+	++(BTSTACK)->top;\
+}
+
+#define BT_POP(btstack)\
+	( (btstack)->top == (btstack)->stack ? NULL : --(btstack)->top )
+
+#define BT_STACK(btstack)\
+	( (btstack)->top == (btstack)->stack ? NULL : (btstack)->top )
+
+static inline void BT_STACK_DUMP(struct btstack *btstack)
+{
+	int i;
+	printk("btstack dump:\n");
+	for (i = 0; i < MAXTREEHEIGHT; i++)
+		printk(KERN_ERR "bn = %Lx, index = %d\n",
+		       (long long)btstack->stack[i].bn,
+		       btstack->stack[i].index);
+}
+
+/* retrieve search results */
+#define BT_GETSEARCH(IP, LEAF, BN, MP, TYPE, P, INDEX, ROOT)\
+{\
+	BN = (LEAF)->bn;\
+	MP = (LEAF)->mp;\
+	if (BN)\
+		P = (TYPE *)MP->data;\
+	else\
+		P = (TYPE *)&JFS_IP(IP)->ROOT;\
+	INDEX = (LEAF)->index;\
+}
+
+/* put the page buffer of search */
+#define BT_PUTSEARCH(BTSTACK)\
+{\
+	if (! BT_IS_ROOT((BTSTACK)->top->mp))\
+		release_metapage((BTSTACK)->top->mp);\
+}
+#endif				/* _H_JFS_BTREE */
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
new file mode 100644
index 0000000..91a0a88
--- /dev/null
+++ b/fs/jfs/jfs_debug.c
@@ -0,0 +1,154 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_debug.h"
+
+#ifdef CONFIG_JFS_DEBUG
+void dump_mem(char *label, void *data, int length)
+{
+	int i, j;
+	int *intptr = data;
+	char *charptr = data;
+	char buf[10], line[80];
+
+	printk("%s: dump of %d bytes of data at 0x%p\n\n", label, length,
+	       data);
+	for (i = 0; i < length; i += 16) {
+		line[0] = 0;
+		for (j = 0; (j < 4) && (i + j * 4 < length); j++) {
+			sprintf(buf, " %08x", intptr[i / 4 + j]);
+			strcat(line, buf);
+		}
+		buf[0] = ' ';
+		buf[2] = 0;
+		for (j = 0; (j < 16) && (i + j < length); j++) {
+			buf[1] =
+			    isprint(charptr[i + j]) ? charptr[i + j] : '.';
+			strcat(line, buf);
+		}
+		printk("%s\n", line);
+	}
+}
+#endif
+
+#ifdef PROC_FS_JFS /* see jfs_debug.h */
+
+static struct proc_dir_entry *base;
+#ifdef CONFIG_JFS_DEBUG
+extern read_proc_t jfs_txanchor_read;
+
+static int loglevel_read(char *page, char **start, off_t off,
+			 int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%d\n", jfsloglevel);
+
+	len -= off;
+	*start = page + off;
+
+	if (len > count)
+		len = count;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+
+static int loglevel_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char c;
+
+	if (get_user(c, buffer))
+		return -EFAULT;
+
+	/* yes, I know this is an ASCIIism.  --hch */
+	if (c < '0' || c > '9')
+		return -EINVAL;
+	jfsloglevel = c - '0';
+	return count;
+}
+#endif
+
+
+#ifdef CONFIG_JFS_STATISTICS
+extern read_proc_t jfs_lmstats_read;
+extern read_proc_t jfs_txstats_read;
+extern read_proc_t jfs_xtstat_read;
+extern read_proc_t jfs_mpstat_read;
+#endif
+
+static struct {
+	const char	*name;
+	read_proc_t	*read_fn;
+	write_proc_t	*write_fn;
+} Entries[] = {
+#ifdef CONFIG_JFS_STATISTICS
+	{ "lmstats",	jfs_lmstats_read, },
+	{ "txstats",	jfs_txstats_read, },
+	{ "xtstat",	jfs_xtstat_read, },
+	{ "mpstat",	jfs_mpstat_read, },
+#endif
+#ifdef CONFIG_JFS_DEBUG
+	{ "TxAnchor",	jfs_txanchor_read, },
+	{ "loglevel",	loglevel_read, loglevel_write }
+#endif
+};
+#define NPROCENT	(sizeof(Entries)/sizeof(Entries[0]))
+
+void jfs_proc_init(void)
+{
+	int i;
+
+	if (!(base = proc_mkdir("jfs", proc_root_fs)))
+		return;
+	base->owner = THIS_MODULE;
+
+	for (i = 0; i < NPROCENT; i++) {
+		struct proc_dir_entry *p;
+		if ((p = create_proc_entry(Entries[i].name, 0, base))) {
+			p->read_proc = Entries[i].read_fn;
+			p->write_proc = Entries[i].write_fn;
+		}
+	}
+}
+
+void jfs_proc_clean(void)
+{
+	int i;
+
+	if (base) {
+		for (i = 0; i < NPROCENT; i++)
+			remove_proc_entry(Entries[i].name, base);
+		remove_proc_entry("jfs", proc_root_fs);
+	}
+}
+
+#endif /* PROC_FS_JFS */
diff --git a/fs/jfs/jfs_debug.h b/fs/jfs/jfs_debug.h
new file mode 100644
index 0000000..a38079a
--- /dev/null
+++ b/fs/jfs/jfs_debug.h
@@ -0,0 +1,122 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *   Portions Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_DEBUG
+#define _H_JFS_DEBUG
+
+/*
+ *	jfs_debug.h
+ *
+ * global debug message, data structure/macro definitions
+ * under control of CONFIG_JFS_DEBUG, CONFIG_JFS_STATISTICS;
+ */
+
+/*
+ * Create /proc/fs/jfs if procfs is enabled andeither
+ * CONFIG_JFS_DEBUG or CONFIG_JFS_STATISTICS is defined
+ */
+#if defined(CONFIG_PROC_FS) && (defined(CONFIG_JFS_DEBUG) || defined(CONFIG_JFS_STATISTICS))
+	#define PROC_FS_JFS
+#endif
+
+/*
+ *	assert with traditional printf/panic
+ */
+#ifdef CONFIG_KERNEL_ASSERTS
+/* kgdb stuff */
+#define assert(p) KERNEL_ASSERT(#p, p)
+#else
+#define assert(p) do {	\
+	if (!(p)) {	\
+		printk(KERN_CRIT "BUG at %s:%d assert(%s)\n",	\
+		       __FILE__, __LINE__, #p);			\
+		BUG();	\
+	}		\
+} while (0)
+#endif
+
+/*
+ *	debug ON
+ *	--------
+ */
+#ifdef CONFIG_JFS_DEBUG
+#define ASSERT(p) assert(p)
+
+/* printk verbosity */
+#define JFS_LOGLEVEL_ERR 1
+#define JFS_LOGLEVEL_WARN 2
+#define JFS_LOGLEVEL_DEBUG 3
+#define JFS_LOGLEVEL_INFO 4
+
+extern int jfsloglevel;
+
+/* dump memory contents */
+extern void dump_mem(char *label, void *data, int length);
+
+/* information message: e.g., configuration, major event */
+#define jfs_info(fmt, arg...) do {			\
+	if (jfsloglevel >= JFS_LOGLEVEL_INFO)		\
+		printk(KERN_INFO fmt "\n", ## arg);	\
+} while (0)
+
+/* debug message: ad hoc */
+#define jfs_debug(fmt, arg...) do {			\
+	if (jfsloglevel >= JFS_LOGLEVEL_DEBUG)		\
+		printk(KERN_DEBUG fmt "\n", ## arg);	\
+} while (0)
+
+/* warn message: */
+#define jfs_warn(fmt, arg...) do {			\
+	if (jfsloglevel >= JFS_LOGLEVEL_WARN)		\
+		printk(KERN_WARNING fmt "\n", ## arg);	\
+} while (0)
+
+/* error event message: e.g., i/o error */
+#define jfs_err(fmt, arg...) do {			\
+	if (jfsloglevel >= JFS_LOGLEVEL_ERR)		\
+		printk(KERN_ERR fmt "\n", ## arg);	\
+} while (0)
+
+/*
+ *	debug OFF
+ *	---------
+ */
+#else				/* CONFIG_JFS_DEBUG */
+#define dump_mem(label,data,length) do {} while (0)
+#define ASSERT(p) do {} while (0)
+#define jfs_info(fmt, arg...) do {} while (0)
+#define jfs_debug(fmt, arg...) do {} while (0)
+#define jfs_warn(fmt, arg...) do {} while (0)
+#define jfs_err(fmt, arg...) do {} while (0)
+#endif				/* CONFIG_JFS_DEBUG */
+
+/*
+ *	statistics
+ *	----------
+ */
+#ifdef	CONFIG_JFS_STATISTICS
+#define	INCREMENT(x)		((x)++)
+#define	DECREMENT(x)		((x)--)
+#define	HIGHWATERMARK(x,y)	((x) = max((x), (y)))
+#else
+#define	INCREMENT(x)
+#define	DECREMENT(x)
+#define	HIGHWATERMARK(x,y)
+#endif				/* CONFIG_JFS_STATISTICS */
+
+#endif				/* _H_JFS_DEBUG */
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
new file mode 100644
index 0000000..580a325
--- /dev/null
+++ b/fs/jfs/jfs_dinode.h
@@ -0,0 +1,151 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2001
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_DINODE
+#define _H_JFS_DINODE
+
+/*
+ *      jfs_dinode.h: on-disk inode manager
+ */
+
+#define INODESLOTSIZE           128
+#define L2INODESLOTSIZE         7
+#define log2INODESIZE           9	/* log2(bytes per dinode) */
+
+
+/*
+ *      on-disk inode : 512 bytes
+ *
+ * note: align 64-bit fields on 8-byte boundary.
+ */
+struct dinode {
+	/*
+	 *      I. base area (128 bytes)
+	 *      ------------------------
+	 *
+	 * define generic/POSIX attributes
+	 */
+	__le32 di_inostamp;	/* 4: stamp to show inode belongs to fileset */
+	__le32 di_fileset;	/* 4: fileset number */
+	__le32 di_number;	/* 4: inode number, aka file serial number */
+	__le32 di_gen;		/* 4: inode generation number */
+
+	pxd_t di_ixpxd;		/* 8: inode extent descriptor */
+
+	__le64 di_size;		/* 8: size */
+	__le64 di_nblocks;	/* 8: number of blocks allocated */
+
+	__le32 di_nlink;	/* 4: number of links to the object */
+
+	__le32 di_uid;		/* 4: user id of owner */
+	__le32 di_gid;		/* 4: group id of owner */
+
+	__le32 di_mode;		/* 4: attribute, format and permission */
+
+	struct timestruc_t di_atime;	/* 8: time last data accessed */
+	struct timestruc_t di_ctime;	/* 8: time last status changed */
+	struct timestruc_t di_mtime;	/* 8: time last data modified */
+	struct timestruc_t di_otime;	/* 8: time created */
+
+	dxd_t di_acl;		/* 16: acl descriptor */
+
+	dxd_t di_ea;		/* 16: ea descriptor */
+
+	__le32 di_next_index;	/* 4: Next available dir_table index */
+
+	__le32 di_acltype;	/* 4: Type of ACL */
+
+	/*
+	 *      Extension Areas.
+	 *
+	 *      Historically, the inode was partitioned into 4 128-byte areas,
+	 *      the last 3 being defined as unions which could have multiple
+	 *      uses.  The first 96 bytes had been completely unused until
+	 *      an index table was added to the directory.  It is now more
+	 *      useful to describe the last 3/4 of the inode as a single
+	 *      union.  We would probably be better off redesigning the
+	 *      entire structure from scratch, but we don't want to break
+	 *      commonality with OS/2's JFS at this time.
+	 */
+	union {
+		struct {
+			/*
+			 * This table contains the information needed to
+			 * find a directory entry from a 32-bit index.
+			 * If the index is small enough, the table is inline,
+			 * otherwise, an x-tree root overlays this table
+			 */
+			struct dir_table_slot _table[12]; /* 96: inline */
+
+			dtroot_t _dtroot;		/* 288: dtree root */
+		} _dir;					/* (384) */
+#define di_dirtable	u._dir._table
+#define di_dtroot	u._dir._dtroot
+#define di_parent       di_dtroot.header.idotdot
+#define di_DASD		di_dtroot.header.DASD
+
+		struct {
+			union {
+				u8 _data[96];		/* 96: unused */
+				struct {
+					void *_imap;	/* 4: unused */
+					__le32 _gengen;	/* 4: generator */
+				} _imap;
+			} _u1;				/* 96: */
+#define di_gengen	u._file._u1._imap._gengen
+
+			union {
+				xtpage_t _xtroot;
+				struct {
+					u8 unused[16];	/* 16: */
+					dxd_t _dxd;	/* 16: */
+					union {
+						__le32 _rdev;	/* 4: */
+						u8 _fastsymlink[128];
+					} _u;
+					u8 _inlineea[128];
+				} _special;
+			} _u2;
+		} _file;
+#define di_xtroot	u._file._u2._xtroot
+#define di_dxd		u._file._u2._special._dxd
+#define di_btroot	di_xtroot
+#define di_inlinedata	u._file._u2._special._u
+#define di_rdev		u._file._u2._special._u._rdev
+#define di_fastsymlink	u._file._u2._special._u._fastsymlink
+#define di_inlineea     u._file._u2._special._inlineea
+	} u;
+};
+
+/* extended mode bits (on-disk inode di_mode) */
+#define IFJOURNAL       0x00010000	/* journalled file */
+#define ISPARSE         0x00020000	/* sparse file enabled */
+#define INLINEEA        0x00040000	/* inline EA area free */
+#define ISWAPFILE	0x00800000	/* file open for pager swap space */
+
+/* more extended mode bits: attributes for OS/2 */
+#define IREADONLY	0x02000000	/* no write access to file */
+#define IARCHIVE	0x40000000	/* file archive bit */
+#define ISYSTEM		0x08000000	/* system file */
+#define IHIDDEN		0x04000000	/* hidden file */
+#define IRASH		0x4E000000	/* mask for changeable attributes */
+#define INEWNAME	0x80000000	/* non-8.3 filename format */
+#define IDIRECTORY	0x20000000	/* directory (shadow of real bit) */
+#define ATTRSHIFT	25	/* bits to shift to move attribute
+				   specification to mode position */
+
+#endif /*_H_JFS_DINODE */
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
new file mode 100644
index 0000000..d86e467
--- /dev/null
+++ b/fs/jfs/jfs_dmap.c
@@ -0,0 +1,4272 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_imap.h"
+#include "jfs_lock.h"
+#include "jfs_metapage.h"
+#include "jfs_debug.h"
+
+/*
+ *	Debug code for double-checking block map
+ */
+/* #define	_JFS_DEBUG_DMAP	1 */
+
+#ifdef	_JFS_DEBUG_DMAP
+#define DBINITMAP(size,ipbmap,results) \
+	DBinitmap(size,ipbmap,results)
+#define DBALLOC(dbmap,mapsize,blkno,nblocks) \
+	DBAlloc(dbmap,mapsize,blkno,nblocks)
+#define DBFREE(dbmap,mapsize,blkno,nblocks) \
+	DBFree(dbmap,mapsize,blkno,nblocks)
+#define DBALLOCCK(dbmap,mapsize,blkno,nblocks) \
+	DBAllocCK(dbmap,mapsize,blkno,nblocks)
+#define DBFREECK(dbmap,mapsize,blkno,nblocks) \
+	DBFreeCK(dbmap,mapsize,blkno,nblocks)
+
+static void DBinitmap(s64, struct inode *, u32 **);
+static void DBAlloc(uint *, s64, s64, s64);
+static void DBFree(uint *, s64, s64, s64);
+static void DBAllocCK(uint *, s64, s64, s64);
+static void DBFreeCK(uint *, s64, s64, s64);
+#else
+#define DBINITMAP(size,ipbmap,results)
+#define DBALLOC(dbmap, mapsize, blkno, nblocks)
+#define DBFREE(dbmap, mapsize, blkno, nblocks)
+#define DBALLOCCK(dbmap, mapsize, blkno, nblocks)
+#define DBFREECK(dbmap, mapsize, blkno, nblocks)
+#endif				/* _JFS_DEBUG_DMAP */
+
+/*
+ *	SERIALIZATION of the Block Allocation Map.
+ *
+ *	the working state of the block allocation map is accessed in
+ *	two directions:
+ *	
+ *	1) allocation and free requests that start at the dmap
+ *	   level and move up through the dmap control pages (i.e.
+ *	   the vast majority of requests).
+ * 
+ * 	2) allocation requests that start at dmap control page
+ *	   level and work down towards the dmaps.
+ *	
+ *	the serialization scheme used here is as follows. 
+ *
+ *	requests which start at the bottom are serialized against each 
+ *	other through buffers and each requests holds onto its buffers 
+ *	as it works it way up from a single dmap to the required level 
+ *	of dmap control page.
+ *	requests that start at the top are serialized against each other
+ *	and request that start from the bottom by the multiple read/single
+ *	write inode lock of the bmap inode. requests starting at the top
+ *	take this lock in write mode while request starting at the bottom
+ *	take the lock in read mode.  a single top-down request may proceed
+ *	exclusively while multiple bottoms-up requests may proceed 
+ * 	simultaneously (under the protection of busy buffers).
+ *	
+ *	in addition to information found in dmaps and dmap control pages,
+ *	the working state of the block allocation map also includes read/
+ *	write information maintained in the bmap descriptor (i.e. total
+ *	free block count, allocation group level free block counts).
+ *	a single exclusive lock (BMAP_LOCK) is used to guard this information
+ *	in the face of multiple-bottoms up requests.
+ *	(lock ordering: IREAD_LOCK, BMAP_LOCK);
+ *	
+ *	accesses to the persistent state of the block allocation map (limited
+ *	to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
+ */
+
+#define BMAP_LOCK_INIT(bmp)	init_MUTEX(&bmp->db_bmaplock)
+#define BMAP_LOCK(bmp)		down(&bmp->db_bmaplock)
+#define BMAP_UNLOCK(bmp)	up(&bmp->db_bmaplock)
+
+/*
+ * forward references
+ */
+static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+			int nblocks);
+static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
+static void dbBackSplit(dmtree_t * tp, int leafno);
+static void dbJoin(dmtree_t * tp, int leafno, int newval);
+static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
+static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
+		    int level);
+static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
+static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks);
+static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks,
+		       int l2nb, s64 * results);
+static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks);
+static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
+			  int l2nb,
+			  s64 * results);
+static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
+		     s64 * results);
+static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+		      s64 * results);
+static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+static int dbFindBits(u32 word, int l2nb);
+static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
+static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks);
+static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		      int nblocks);
+static int dbMaxBud(u8 * cp);
+s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
+static int blkstol2(s64 nb);
+
+static int cntlz(u32 value);
+static int cnttz(u32 word);
+
+static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
+			 int nblocks);
+static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
+static int dbInitDmapTree(struct dmap * dp);
+static int dbInitTree(struct dmaptree * dtp);
+static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
+static int dbGetL2AGSize(s64 nblocks);
+
+/*
+ *	buddy table
+ *
+ * table used for determining buddy sizes within characters of 
+ * dmap bitmap words.  the characters themselves serve as indexes
+ * into the table, with the table elements yielding the maximum
+ * binary buddy of free bits within the character.
+ */
+static s8 budtab[256] = {
+	3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
+};
+
+
+/*
+ * NAME:    	dbMount()
+ *
+ * FUNCTION:	initializate the block allocation map.
+ *
+ *		memory is allocated for the in-core bmap descriptor and
+ *		the in-core descriptor is initialized from disk.
+ *
+ * PARAMETERS:
+ *      ipbmap	-  pointer to in-core inode for the block map.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOMEM	- insufficient memory
+ *      -EIO	- i/o error
+ */
+int dbMount(struct inode *ipbmap)
+{
+	struct bmap *bmp;
+	struct dbmap_disk *dbmp_le;
+	struct metapage *mp;
+	int i;
+
+	/*
+	 * allocate/initialize the in-memory bmap descriptor
+	 */
+	/* allocate memory for the in-memory bmap descriptor */
+	bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
+	if (bmp == NULL)
+		return -ENOMEM;
+
+	/* read the on-disk bmap descriptor. */
+	mp = read_metapage(ipbmap,
+			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
+			   PSIZE, 0);
+	if (mp == NULL) {
+		kfree(bmp);
+		return -EIO;
+	}
+
+	/* copy the on-disk bmap descriptor to its in-memory version. */
+	dbmp_le = (struct dbmap_disk *) mp->data;
+	bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
+	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
+	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+	bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth);
+	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+	for (i = 0; i < MAXAG; i++)
+		bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
+	bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
+	bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
+
+	/* release the buffer. */
+	release_metapage(mp);
+
+	/* bind the bmap inode and the bmap descriptor to each other. */
+	bmp->db_ipbmap = ipbmap;
+	JFS_SBI(ipbmap->i_sb)->bmap = bmp;
+
+	memset(bmp->db_active, 0, sizeof(bmp->db_active));
+	DBINITMAP(bmp->db_mapsize, ipbmap, &bmp->db_DBmap);
+
+	/*
+	 * allocate/initialize the bmap lock
+	 */
+	BMAP_LOCK_INIT(bmp);
+
+	return (0);
+}
+
+
+/*
+ * NAME:    	dbUnmount()
+ *
+ * FUNCTION:	terminate the block allocation map in preparation for
+ *		file system unmount.
+ *
+ * 		the in-core bmap descriptor is written to disk and
+ *		the memory for this descriptor is freed.
+ *
+ * PARAMETERS:
+ *      ipbmap	-  pointer to in-core inode for the block map.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ */
+int dbUnmount(struct inode *ipbmap, int mounterror)
+{
+	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
+	int i;
+
+	if (!(mounterror || isReadOnly(ipbmap)))
+		dbSync(ipbmap);
+
+	/*
+	 * Invalidate the page cache buffers
+	 */
+	truncate_inode_pages(ipbmap->i_mapping, 0);
+
+	/*
+	 * Sanity Check
+	 */
+	for (i = 0; i < bmp->db_numag; i++)
+		if (atomic_read(&bmp->db_active[i]))
+			printk(KERN_ERR "dbUnmount: db_active[%d] = %d\n",
+			       i, atomic_read(&bmp->db_active[i]));
+
+	/* free the memory for the in-memory bmap. */
+	kfree(bmp);
+
+	return (0);
+}
+
+/*
+ *	dbSync()
+ */
+int dbSync(struct inode *ipbmap)
+{
+	struct dbmap_disk *dbmp_le;
+	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
+	struct metapage *mp;
+	int i;
+
+	/*
+	 * write bmap global control page
+	 */
+	/* get the buffer for the on-disk bmap descriptor. */
+	mp = read_metapage(ipbmap,
+			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
+			   PSIZE, 0);
+	if (mp == NULL) {
+		jfs_err("dbSync: read_metapage failed!");
+		return -EIO;
+	}
+	/* copy the in-memory version of the bmap to the on-disk version */
+	dbmp_le = (struct dbmap_disk *) mp->data;
+	dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
+	dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
+	dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
+	dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
+	dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
+	dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
+	dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
+	dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
+	dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth);
+	dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
+	dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
+	dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
+	for (i = 0; i < MAXAG; i++)
+		dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
+	dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
+	dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
+
+	/* write the buffer */
+	write_metapage(mp);
+
+	/*
+	 * write out dirty pages of bmap
+	 */
+	filemap_fdatawrite(ipbmap->i_mapping);
+	filemap_fdatawait(ipbmap->i_mapping);
+
+	ipbmap->i_state |= I_DIRTY;
+	diWriteSpecial(ipbmap, 0);
+
+	return (0);
+}
+
+
+/*
+ * NAME:    	dbFree()
+ *
+ * FUNCTION:	free the specified block range from the working block
+ *		allocation map.
+ *
+ *		the blocks will be free from the working map one dmap
+ *		at a time.
+ *
+ * PARAMETERS:
+ *      ip	-  pointer to in-core inode;
+ *      blkno	-  starting block number to be freed.
+ *      nblocks	-  number of blocks to be freed.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ */
+int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
+{
+	struct metapage *mp;
+	struct dmap *dp;
+	int nb, rc;
+	s64 lblkno, rem;
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+
+	IREAD_LOCK(ipbmap);
+
+	/* block to be freed better be within the mapsize. */
+	if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
+		IREAD_UNLOCK(ipbmap);
+		printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
+		       (unsigned long long) blkno,
+		       (unsigned long long) nblocks);
+		jfs_error(ip->i_sb,
+			  "dbFree: block to be freed is outside the map");
+		return -EIO;
+	}
+
+	/*
+	 * free the blocks a dmap at a time.
+	 */
+	mp = NULL;
+	for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
+		/* release previous dmap if any */
+		if (mp) {
+			write_metapage(mp);
+		}
+
+		/* get the buffer for the current dmap. */
+		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL) {
+			IREAD_UNLOCK(ipbmap);
+			return -EIO;
+		}
+		dp = (struct dmap *) mp->data;
+
+		/* determine the number of blocks to be freed from
+		 * this dmap.
+		 */
+		nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
+
+		DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
+
+		/* free the blocks. */
+		if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
+			release_metapage(mp);
+			IREAD_UNLOCK(ipbmap);
+			return (rc);
+		}
+
+		DBFREE(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
+	}
+
+	/* write the last buffer. */
+	write_metapage(mp);
+
+	IREAD_UNLOCK(ipbmap);
+
+	return (0);
+}
+
+
+/*
+ * NAME:	dbUpdatePMap()
+ *
+ * FUNCTION:    update the allocation state (free or allocate) of the
+ *		specified block range in the persistent block allocation map.
+ *		
+ *		the blocks will be updated in the persistent map one
+ *		dmap at a time.
+ *
+ * PARAMETERS:
+ *      ipbmap	-  pointer to in-core inode for the block map.
+ *      free	- TRUE if block range is to be freed from the persistent
+ *		  map; FALSE if it is to   be allocated.
+ *      blkno	-  starting block number of the range.
+ *      nblocks	-  number of contiguous blocks in the range.
+ *      tblk	-  transaction block;
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ */
+int
+dbUpdatePMap(struct inode *ipbmap,
+	     int free, s64 blkno, s64 nblocks, struct tblock * tblk)
+{
+	int nblks, dbitno, wbitno, rbits;
+	int word, nbits, nwords;
+	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
+	s64 lblkno, rem, lastlblkno;
+	u32 mask;
+	struct dmap *dp;
+	struct metapage *mp;
+	struct jfs_log *log;
+	int lsn, difft, diffp;
+
+	/* the blocks better be within the mapsize. */
+	if (blkno + nblocks > bmp->db_mapsize) {
+		printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
+		       (unsigned long long) blkno,
+		       (unsigned long long) nblocks);
+		jfs_error(ipbmap->i_sb,
+			  "dbUpdatePMap: blocks are outside the map");
+		return -EIO;
+	}
+
+	/* compute delta of transaction lsn from log syncpt */
+	lsn = tblk->lsn;
+	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
+	logdiff(difft, lsn, log);
+
+	/*
+	 * update the block state a dmap at a time.
+	 */
+	mp = NULL;
+	lastlblkno = 0;
+	for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
+		/* get the buffer for the current dmap. */
+		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+		if (lblkno != lastlblkno) {
+			if (mp) {
+				write_metapage(mp);
+			}
+
+			mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
+					   0);
+			if (mp == NULL)
+				return -EIO;
+		}
+		dp = (struct dmap *) mp->data;
+
+		/* determine the bit number and word within the dmap of
+		 * the starting block.  also determine how many blocks
+		 * are to be updated within this dmap.
+		 */
+		dbitno = blkno & (BPERDMAP - 1);
+		word = dbitno >> L2DBWORD;
+		nblks = min(rem, (s64)BPERDMAP - dbitno);
+
+		/* update the bits of the dmap words. the first and last
+		 * words may only have a subset of their bits updated. if
+		 * this is the case, we'll work against that word (i.e.
+		 * partial first and/or last) only in a single pass.  a 
+		 * single pass will also be used to update all words that
+		 * are to have all their bits updated.
+		 */
+		for (rbits = nblks; rbits > 0;
+		     rbits -= nbits, dbitno += nbits) {
+			/* determine the bit number within the word and
+			 * the number of bits within the word.
+			 */
+			wbitno = dbitno & (DBWORD - 1);
+			nbits = min(rbits, DBWORD - wbitno);
+
+			/* check if only part of the word is to be updated. */
+			if (nbits < DBWORD) {
+				/* update (free or allocate) the bits
+				 * in this word.
+				 */
+				mask =
+				    (ONES << (DBWORD - nbits) >> wbitno);
+				if (free)
+					dp->pmap[word] &=
+					    cpu_to_le32(~mask);
+				else
+					dp->pmap[word] |=
+					    cpu_to_le32(mask);
+
+				word += 1;
+			} else {
+				/* one or more words are to have all
+				 * their bits updated.  determine how
+				 * many words and how many bits.
+				 */
+				nwords = rbits >> L2DBWORD;
+				nbits = nwords << L2DBWORD;
+
+				/* update (free or allocate) the bits
+				 * in these words.
+				 */
+				if (free)
+					memset(&dp->pmap[word], 0,
+					       nwords * 4);
+				else
+					memset(&dp->pmap[word], (int) ONES,
+					       nwords * 4);
+
+				word += nwords;
+			}
+		}
+
+		/*
+		 * update dmap lsn
+		 */
+		if (lblkno == lastlblkno)
+			continue;
+
+		lastlblkno = lblkno;
+
+		if (mp->lsn != 0) {
+			/* inherit older/smaller lsn */
+			logdiff(diffp, mp->lsn, log);
+			if (difft < diffp) {
+				mp->lsn = lsn;
+
+				/* move bp after tblock in logsync list */
+				LOGSYNC_LOCK(log);
+				list_move(&mp->synclist, &tblk->synclist);
+				LOGSYNC_UNLOCK(log);
+			}
+
+			/* inherit younger/larger clsn */
+			LOGSYNC_LOCK(log);
+			logdiff(difft, tblk->clsn, log);
+			logdiff(diffp, mp->clsn, log);
+			if (difft > diffp)
+				mp->clsn = tblk->clsn;
+			LOGSYNC_UNLOCK(log);
+		} else {
+			mp->log = log;
+			mp->lsn = lsn;
+
+			/* insert bp after tblock in logsync list */
+			LOGSYNC_LOCK(log);
+
+			log->count++;
+			list_add(&mp->synclist, &tblk->synclist);
+
+			mp->clsn = tblk->clsn;
+			LOGSYNC_UNLOCK(log);
+		}
+	}
+
+	/* write the last buffer. */
+	if (mp) {
+		write_metapage(mp);
+	}
+
+	return (0);
+}
+
+
+/*
+ * NAME:	dbNextAG()
+ *
+ * FUNCTION:    find the preferred allocation group for new allocations.
+ *
+ *		Within the allocation groups, we maintain a preferred
+ *		allocation group which consists of a group with at least
+ *		average free space.  It is the preferred group that we target
+ *		new inode allocation towards.  The tie-in between inode
+ *		allocation and block allocation occurs as we allocate the
+ *		first (data) block of an inode and specify the inode (block)
+ *		as the allocation hint for this block.
+ *
+ *		We try to avoid having more than one open file growing in
+ *		an allocation group, as this will lead to fragmentation.
+ *		This differs from the old OS/2 method of trying to keep
+ *		empty ags around for large allocations.
+ *
+ * PARAMETERS:
+ *      ipbmap	-  pointer to in-core inode for the block map.
+ *
+ * RETURN VALUES:
+ *      the preferred allocation group number.
+ */
+int dbNextAG(struct inode *ipbmap)
+{
+	s64 avgfree;
+	int agpref;
+	s64 hwm = 0;
+	int i;
+	int next_best = -1;
+	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
+
+	BMAP_LOCK(bmp);
+
+	/* determine the average number of free blocks within the ags. */
+	avgfree = (u32)bmp->db_nfree / bmp->db_numag;
+
+	/*
+	 * if the current preferred ag does not have an active allocator
+	 * and has at least average freespace, return it
+	 */
+	agpref = bmp->db_agpref;
+	if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
+	    (bmp->db_agfree[agpref] >= avgfree))
+		goto unlock;
+
+	/* From the last preferred ag, find the next one with at least
+	 * average free space.
+	 */
+	for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
+		if (agpref == bmp->db_numag)
+			agpref = 0;
+
+		if (atomic_read(&bmp->db_active[agpref]))
+			/* open file is currently growing in this ag */
+			continue;
+		if (bmp->db_agfree[agpref] >= avgfree) {
+			/* Return this one */
+			bmp->db_agpref = agpref;
+			goto unlock;
+		} else if (bmp->db_agfree[agpref] > hwm) {
+			/* Less than avg. freespace, but best so far */
+			hwm = bmp->db_agfree[agpref];
+			next_best = agpref;
+		}
+	}
+
+	/*
+	 * If no inactive ag was found with average freespace, use the
+	 * next best
+	 */
+	if (next_best != -1)
+		bmp->db_agpref = next_best;
+	/* else leave db_agpref unchanged */
+unlock:
+	BMAP_UNLOCK(bmp);
+
+	/* return the preferred group.
+	 */
+	return (bmp->db_agpref);
+}
+
+/*
+ * NAME:	dbAlloc()
+ *
+ * FUNCTION:    attempt to allocate a specified number of contiguous free
+ *		blocks from the working allocation block map.
+ *
+ *		the block allocation policy uses hints and a multi-step
+ *		approach.
+ *
+ *	  	for allocation requests smaller than the number of blocks
+ *		per dmap, we first try to allocate the new blocks
+ *		immediately following the hint.  if these blocks are not
+ *		available, we try to allocate blocks near the hint.  if
+ *		no blocks near the hint are available, we next try to 
+ *		allocate within the same dmap as contains the hint.
+ *
+ *		if no blocks are available in the dmap or the allocation
+ *		request is larger than the dmap size, we try to allocate
+ *		within the same allocation group as contains the hint. if
+ *		this does not succeed, we finally try to allocate anywhere
+ *		within the aggregate.
+ *
+ *		we also try to allocate anywhere within the aggregate for
+ *		for allocation requests larger than the allocation group
+ *		size or requests that specify no hint value.
+ *
+ * PARAMETERS:
+ *      ip	-  pointer to in-core inode;
+ *      hint	- allocation hint.
+ *      nblocks	- number of contiguous blocks in the range.
+ *      results	- on successful return, set to the starting block number
+ *		  of the newly allocated contiguous range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ */
+int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
+{
+	int rc, agno;
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct bmap *bmp;
+	struct metapage *mp;
+	s64 lblkno, blkno;
+	struct dmap *dp;
+	int l2nb;
+	s64 mapSize;
+	int writers;
+
+	/* assert that nblocks is valid */
+	assert(nblocks > 0);
+
+#ifdef _STILL_TO_PORT
+	/* DASD limit check                                     F226941 */
+	if (OVER_LIMIT(ip, nblocks))
+		return -ENOSPC;
+#endif				/* _STILL_TO_PORT */
+
+	/* get the log2 number of blocks to be allocated.
+	 * if the number of blocks is not a log2 multiple, 
+	 * it will be rounded up to the next log2 multiple.
+	 */
+	l2nb = BLKSTOL2(nblocks);
+
+	bmp = JFS_SBI(ip->i_sb)->bmap;
+
+//retry:        /* serialize w.r.t.extendfs() */
+	mapSize = bmp->db_mapsize;
+
+	/* the hint should be within the map */
+	if (hint >= mapSize) {
+		jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map");
+		return -EIO;
+	}
+
+	/* if the number of blocks to be allocated is greater than the
+	 * allocation group size, try to allocate anywhere.
+	 */
+	if (l2nb > bmp->db_agl2size) {
+		IWRITE_LOCK(ipbmap);
+
+		rc = dbAllocAny(bmp, nblocks, l2nb, results);
+		if (rc == 0) {
+			DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results,
+				nblocks);
+		}
+
+		goto write_unlock;
+	}
+
+	/*
+	 * If no hint, let dbNextAG recommend an allocation group
+	 */
+	if (hint == 0)
+		goto pref_ag;
+
+	/* we would like to allocate close to the hint.  adjust the
+	 * hint to the block following the hint since the allocators
+	 * will start looking for free space starting at this point.
+	 */
+	blkno = hint + 1;
+
+	if (blkno >= bmp->db_mapsize)
+		goto pref_ag;
+
+	agno = blkno >> bmp->db_agl2size;
+
+	/* check if blkno crosses over into a new allocation group.
+	 * if so, check if we should allow allocations within this
+	 * allocation group.
+	 */
+	if ((blkno & (bmp->db_agsize - 1)) == 0)
+		/* check if the AG is currenly being written to.
+		 * if so, call dbNextAG() to find a non-busy
+		 * AG with sufficient free space.
+		 */
+		if (atomic_read(&bmp->db_active[agno]))
+			goto pref_ag;
+
+	/* check if the allocation request size can be satisfied from a
+	 * single dmap.  if so, try to allocate from the dmap containing
+	 * the hint using a tiered strategy.
+	 */
+	if (nblocks <= BPERDMAP) {
+		IREAD_LOCK(ipbmap);
+
+		/* get the buffer for the dmap containing the hint.
+		 */
+		rc = -EIO;
+		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL)
+			goto read_unlock;
+
+		dp = (struct dmap *) mp->data;
+
+		/* first, try to satisfy the allocation request with the
+		 * blocks beginning at the hint.
+		 */
+		if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
+		    != -ENOSPC) {
+			if (rc == 0) {
+				*results = blkno;
+				DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
+					*results, nblocks);
+				mark_metapage_dirty(mp);
+			}
+
+			release_metapage(mp);
+			goto read_unlock;
+		}
+
+		writers = atomic_read(&bmp->db_active[agno]);
+		if ((writers > 1) ||
+		    ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
+			/*
+			 * Someone else is writing in this allocation
+			 * group.  To avoid fragmenting, try another ag
+			 */
+			release_metapage(mp);
+			IREAD_UNLOCK(ipbmap);
+			goto pref_ag;
+		}
+
+		/* next, try to satisfy the allocation request with blocks
+		 * near the hint.
+		 */
+		if ((rc =
+		     dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
+		    != -ENOSPC) {
+			if (rc == 0) {
+				DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
+					*results, nblocks);
+				mark_metapage_dirty(mp);
+			}
+
+			release_metapage(mp);
+			goto read_unlock;
+		}
+
+		/* try to satisfy the allocation request with blocks within
+		 * the same dmap as the hint.
+		 */
+		if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
+		    != -ENOSPC) {
+			if (rc == 0) {
+				DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
+					*results, nblocks);
+				mark_metapage_dirty(mp);
+			}
+
+			release_metapage(mp);
+			goto read_unlock;
+		}
+
+		release_metapage(mp);
+		IREAD_UNLOCK(ipbmap);
+	}
+
+	/* try to satisfy the allocation request with blocks within
+	 * the same allocation group as the hint.
+	 */
+	IWRITE_LOCK(ipbmap);
+	if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results))
+	    != -ENOSPC) {
+		if (rc == 0)
+			DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
+				*results, nblocks);
+		goto write_unlock;
+	}
+	IWRITE_UNLOCK(ipbmap);
+
+
+      pref_ag:
+	/*
+	 * Let dbNextAG recommend a preferred allocation group
+	 */
+	agno = dbNextAG(ipbmap);
+	IWRITE_LOCK(ipbmap);
+
+	/* Try to allocate within this allocation group.  if that fails, try to
+	 * allocate anywhere in the map.
+	 */
+	if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
+		rc = dbAllocAny(bmp, nblocks, l2nb, results);
+	if (rc == 0) {
+		DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results, nblocks);
+	}
+
+      write_unlock:
+	IWRITE_UNLOCK(ipbmap);
+
+	return (rc);
+
+      read_unlock:
+	IREAD_UNLOCK(ipbmap);
+
+	return (rc);
+}
+
+#ifdef _NOTYET
+/*
+ * NAME:	dbAllocExact()
+ *
+ * FUNCTION:    try to allocate the requested extent;
+ *
+ * PARAMETERS:
+ *      ip	- pointer to in-core inode;
+ *      blkno	- extent address;
+ *      nblocks	- extent length;
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ */
+int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
+{
+	int rc;
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+	struct dmap *dp;
+	s64 lblkno;
+	struct metapage *mp;
+
+	IREAD_LOCK(ipbmap);
+
+	/*
+	 * validate extent request:
+	 *
+	 * note: defragfs policy:
+	 *  max 64 blocks will be moved.  
+	 *  allocation request size must be satisfied from a single dmap.
+	 */
+	if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
+		IREAD_UNLOCK(ipbmap);
+		return -EINVAL;
+	}
+
+	if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
+		/* the free space is no longer available */
+		IREAD_UNLOCK(ipbmap);
+		return -ENOSPC;
+	}
+
+	/* read in the dmap covering the extent */
+	lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+	mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+	if (mp == NULL) {
+		IREAD_UNLOCK(ipbmap);
+		return -EIO;
+	}
+	dp = (struct dmap *) mp->data;
+
+	/* try to allocate the requested extent */
+	rc = dbAllocNext(bmp, dp, blkno, nblocks);
+
+	IREAD_UNLOCK(ipbmap);
+
+	if (rc == 0) {
+		DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
+		mark_metapage_dirty(mp);
+	}
+	release_metapage(mp);
+
+	return (rc);
+}
+#endif /* _NOTYET */
+
+/*
+ * NAME:	dbReAlloc()
+ *
+ * FUNCTION:    attempt to extend a current allocation by a specified
+ *		number of blocks.
+ *
+ *		this routine attempts to satisfy the allocation request
+ *		by first trying to extend the existing allocation in
+ *		place by allocating the additional blocks as the blocks
+ *		immediately following the current allocation.  if these
+ *		blocks are not available, this routine will attempt to
+ *		allocate a new set of contiguous blocks large enough
+ *		to cover the existing allocation plus the additional
+ *		number of blocks required.
+ *
+ * PARAMETERS:
+ *      ip	    -  pointer to in-core inode requiring allocation.
+ *      blkno	    -  starting block of the current allocation.
+ *      nblocks	    -  number of contiguous blocks within the current
+ *		       allocation.
+ *      addnblocks  -  number of blocks to add to the allocation.
+ *      results	-      on successful return, set to the starting block number
+ *		       of the existing allocation if the existing allocation
+ *		       was extended in place or to a newly allocated contiguous
+ *		       range if the existing allocation could not be extended
+ *		       in place.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ */
+int
+dbReAlloc(struct inode *ip,
+	  s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
+{
+	int rc;
+
+	/* try to extend the allocation in place.
+	 */
+	if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
+		*results = blkno;
+		return (0);
+	} else {
+		if (rc != -ENOSPC)
+			return (rc);
+	}
+
+	/* could not extend the allocation in place, so allocate a
+	 * new set of blocks for the entire request (i.e. try to get
+	 * a range of contiguous blocks large enough to cover the
+	 * existing allocation plus the additional blocks.)
+	 */
+	return (dbAlloc
+		(ip, blkno + nblocks - 1, addnblocks + nblocks, results));
+}
+
+
+/*
+ * NAME:	dbExtend()
+ *
+ * FUNCTION:    attempt to extend a current allocation by a specified
+ *		number of blocks.
+ *
+ *		this routine attempts to satisfy the allocation request
+ *		by first trying to extend the existing allocation in
+ *		place by allocating the additional blocks as the blocks
+ *		immediately following the current allocation.
+ *
+ * PARAMETERS:
+ *      ip	    -  pointer to in-core inode requiring allocation.
+ *      blkno	    -  starting block of the current allocation.
+ *      nblocks	    -  number of contiguous blocks within the current
+ *		       allocation.
+ *      addnblocks  -  number of blocks to add to the allocation.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ */
+static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	s64 lblkno, lastblkno, extblkno;
+	uint rel_block;
+	struct metapage *mp;
+	struct dmap *dp;
+	int rc;
+	struct inode *ipbmap = sbi->ipbmap;
+	struct bmap *bmp;
+
+	/*
+	 * We don't want a non-aligned extent to cross a page boundary
+	 */
+	if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
+	    (rel_block + nblocks + addnblocks > sbi->nbperpage))
+		return -ENOSPC;
+
+	/* get the last block of the current allocation */
+	lastblkno = blkno + nblocks - 1;
+
+	/* determine the block number of the block following
+	 * the existing allocation.
+	 */
+	extblkno = lastblkno + 1;
+
+	IREAD_LOCK(ipbmap);
+
+	/* better be within the file system */
+	bmp = sbi->bmap;
+	if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
+		IREAD_UNLOCK(ipbmap);
+		jfs_error(ip->i_sb,
+			  "dbExtend: the block is outside the filesystem");
+		return -EIO;
+	}
+
+	/* we'll attempt to extend the current allocation in place by
+	 * allocating the additional blocks as the blocks immediately
+	 * following the current allocation.  we only try to extend the
+	 * current allocation in place if the number of additional blocks
+	 * can fit into a dmap, the last block of the current allocation
+	 * is not the last block of the file system, and the start of the
+	 * inplace extension is not on an allocation group boundary.
+	 */
+	if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
+	    (extblkno & (bmp->db_agsize - 1)) == 0) {
+		IREAD_UNLOCK(ipbmap);
+		return -ENOSPC;
+	}
+
+	/* get the buffer for the dmap containing the first block
+	 * of the extension.
+	 */
+	lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
+	mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+	if (mp == NULL) {
+		IREAD_UNLOCK(ipbmap);
+		return -EIO;
+	}
+
+	DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
+	dp = (struct dmap *) mp->data;
+
+	/* try to allocate the blocks immediately following the
+	 * current allocation.
+	 */
+	rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
+
+	IREAD_UNLOCK(ipbmap);
+
+	/* were we successful ? */
+	if (rc == 0) {
+		DBALLOC(bmp->db_DBmap, bmp->db_mapsize, extblkno,
+			addnblocks);
+		write_metapage(mp);
+	} else
+		/* we were not successful */
+		release_metapage(mp);
+
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbAllocNext()
+ *
+ * FUNCTION:    attempt to allocate the blocks of the specified block
+ *		range within a dmap.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap.
+ *      blkno	-  starting block number of the range.
+ *      nblocks	-  number of contiguous free blocks of the range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
+ */
+static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks)
+{
+	int dbitno, word, rembits, nb, nwords, wbitno, nw;
+	int l2size;
+	s8 *leaf;
+	u32 mask;
+
+	if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAllocNext: Corrupt dmap page");
+		return -EIO;
+	}
+
+	/* pick up a pointer to the leaves of the dmap tree.
+	 */
+	leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
+
+	/* determine the bit number and word within the dmap of the
+	 * starting block.
+	 */
+	dbitno = blkno & (BPERDMAP - 1);
+	word = dbitno >> L2DBWORD;
+
+	/* check if the specified block range is contained within
+	 * this dmap.
+	 */
+	if (dbitno + nblocks > BPERDMAP)
+		return -ENOSPC;
+
+	/* check if the starting leaf indicates that anything
+	 * is free.
+	 */
+	if (leaf[word] == NOFREE)
+		return -ENOSPC;
+
+	/* check the dmaps words corresponding to block range to see
+	 * if the block range is free.  not all bits of the first and
+	 * last words may be contained within the block range.  if this
+	 * is the case, we'll work against those words (i.e. partial first
+	 * and/or last) on an individual basis (a single pass) and examine
+	 * the actual bits to determine if they are free.  a single pass
+	 * will be used for all dmap words fully contained within the
+	 * specified range.  within this pass, the leaves of the dmap
+	 * tree will be examined to determine if the blocks are free. a
+	 * single leaf may describe the free space of multiple dmap
+	 * words, so we may visit only a subset of the actual leaves
+	 * corresponding to the dmap words of the block range.
+	 */
+	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
+		/* determine the bit number within the word and
+		 * the number of bits within the word.
+		 */
+		wbitno = dbitno & (DBWORD - 1);
+		nb = min(rembits, DBWORD - wbitno);
+
+		/* check if only part of the word is to be examined.
+		 */
+		if (nb < DBWORD) {
+			/* check if the bits are free.
+			 */
+			mask = (ONES << (DBWORD - nb) >> wbitno);
+			if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
+				return -ENOSPC;
+
+			word += 1;
+		} else {
+			/* one or more dmap words are fully contained
+			 * within the block range.  determine how many
+			 * words and how many bits.
+			 */
+			nwords = rembits >> L2DBWORD;
+			nb = nwords << L2DBWORD;
+
+			/* now examine the appropriate leaves to determine
+			 * if the blocks are free.
+			 */
+			while (nwords > 0) {
+				/* does the leaf describe any free space ?
+				 */
+				if (leaf[word] < BUDMIN)
+					return -ENOSPC;
+
+				/* determine the l2 number of bits provided
+				 * by this leaf.
+				 */
+				l2size =
+				    min((int)leaf[word], NLSTOL2BSZ(nwords));
+
+				/* determine how many words were handled.
+				 */
+				nw = BUDSIZE(l2size, BUDMIN);
+
+				nwords -= nw;
+				word += nw;
+			}
+		}
+	}
+
+	/* allocate the blocks.
+	 */
+	return (dbAllocDmap(bmp, dp, blkno, nblocks));
+}
+
+
+/*
+ * NAME:	dbAllocNear()
+ *
+ * FUNCTION:    attempt to allocate a number of contiguous free blocks near
+ *		a specified block (hint) within a dmap.
+ *
+ *		starting with the dmap leaf that covers the hint, we'll
+ *		check the next four contiguous leaves for sufficient free
+ *		space.  if sufficient free space is found, we'll allocate
+ *		the desired free space.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap.
+ *      blkno	-  block number to allocate near.
+ *      nblocks	-  actual number of contiguous free blocks desired.
+ *      l2nb	-  log2 number of contiguous free blocks desired.
+ *      results	-  on successful return, set to the starting block number
+ *		   of the newly allocated range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
+ */
+static int
+dbAllocNear(struct bmap * bmp,
+	    struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
+{
+	int word, lword, rc;
+	s8 *leaf;
+
+	if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAllocNear: Corrupt dmap page");
+		return -EIO;
+	}
+
+	leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
+
+	/* determine the word within the dmap that holds the hint
+	 * (i.e. blkno).  also, determine the last word in the dmap
+	 * that we'll include in our examination.
+	 */
+	word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
+	lword = min(word + 4, LPERDMAP);
+
+	/* examine the leaves for sufficient free space.
+	 */
+	for (; word < lword; word++) {
+		/* does the leaf describe sufficient free space ?
+		 */
+		if (leaf[word] < l2nb)
+			continue;
+
+		/* determine the block number within the file system
+		 * of the first block described by this dmap word.
+		 */
+		blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
+
+		/* if not all bits of the dmap word are free, get the
+		 * starting bit number within the dmap word of the required
+		 * string of free bits and adjust the block number with the
+		 * value.
+		 */
+		if (leaf[word] < BUDMIN)
+			blkno +=
+			    dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
+
+		/* allocate the blocks.
+		 */
+		if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
+			*results = blkno;
+
+		return (rc);
+	}
+
+	return -ENOSPC;
+}
+
+
+/*
+ * NAME:	dbAllocAG()
+ *
+ * FUNCTION:    attempt to allocate the specified number of contiguous
+ *		free blocks within the specified allocation group.
+ *
+ *		unless the allocation group size is equal to the number
+ *		of blocks per dmap, the dmap control pages will be used to
+ *		find the required free space, if available.  we start the
+ *		search at the highest dmap control page level which
+ *		distinctly describes the allocation group's free space
+ *		(i.e. the highest level at which the allocation group's
+ *		free space is not mixed in with that of any other group).
+ *		in addition, we start the search within this level at a
+ *		height of the dmapctl dmtree at which the nodes distinctly
+ *		describe the allocation group's free space.  at this height,
+ *		the allocation group's free space may be represented by 1
+ *		or two sub-trees, depending on the allocation group size.
+ *		we search the top nodes of these subtrees left to right for
+ *		sufficient free space.  if sufficient free space is found,
+ *		the subtree is searched to find the leftmost leaf that 
+ *		has free space.  once we have made it to the leaf, we
+ *		move the search to the next lower level dmap control page
+ *		corresponding to this leaf.  we continue down the dmap control
+ *		pages until we find the dmap that contains or starts the
+ *		sufficient free space and we allocate at this dmap.
+ *
+ *		if the allocation group size is equal to the dmap size,
+ *		we'll start at the dmap corresponding to the allocation
+ *		group and attempt the allocation at this level.
+ *
+ *		the dmap control page search is also not performed if the
+ *		allocation group is completely free and we go to the first
+ *		dmap of the allocation group to do the allocation.  this is
+ *		done because the allocation group may be part (not the first
+ *		part) of a larger binary buddy system, causing the dmap
+ *		control pages to indicate no free space (NOFREE) within
+ *		the allocation group.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *	agno	- allocation group number.
+ *      nblocks	-  actual number of contiguous free blocks desired.
+ *      l2nb	-  log2 number of contiguous free blocks desired.
+ *      results	-  on successful return, set to the starting block number
+ *		   of the newly allocated range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * note: IWRITE_LOCK(ipmap) held on entry/exit;
+ */
+static int
+dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
+{
+	struct metapage *mp;
+	struct dmapctl *dcp;
+	int rc, ti, i, k, m, n, agperlev;
+	s64 blkno, lblkno;
+	int budmin;
+
+	/* allocation request should not be for more than the
+	 * allocation group size.
+	 */
+	if (l2nb > bmp->db_agl2size) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAllocAG: allocation request is larger than the "
+			  "allocation group size");
+		return -EIO;
+	}
+
+	/* determine the starting block number of the allocation
+	 * group.
+	 */
+	blkno = (s64) agno << bmp->db_agl2size;
+
+	/* check if the allocation group size is the minimum allocation
+	 * group size or if the allocation group is completely free. if
+	 * the allocation group size is the minimum size of BPERDMAP (i.e.
+	 * 1 dmap), there is no need to search the dmap control page (below)
+	 * that fully describes the allocation group since the allocation
+	 * group is already fully described by a dmap.  in this case, we
+	 * just call dbAllocCtl() to search the dmap tree and allocate the
+	 * required space if available.  
+	 *
+	 * if the allocation group is completely free, dbAllocCtl() is
+	 * also called to allocate the required space.  this is done for
+	 * two reasons.  first, it makes no sense searching the dmap control
+	 * pages for free space when we know that free space exists.  second,
+	 * the dmap control pages may indicate that the allocation group
+	 * has no free space if the allocation group is part (not the first
+	 * part) of a larger binary buddy system.
+	 */
+	if (bmp->db_agsize == BPERDMAP
+	    || bmp->db_agfree[agno] == bmp->db_agsize) {
+		rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
+		if ((rc == -ENOSPC) &&
+		    (bmp->db_agfree[agno] == bmp->db_agsize)) {
+			printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
+			       (unsigned long long) blkno,
+			       (unsigned long long) nblocks);
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbAllocAG: dbAllocCtl failed in free AG");
+		}
+		return (rc);
+	}
+
+	/* the buffer for the dmap control page that fully describes the
+	 * allocation group.
+	 */
+	lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
+	mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+	if (mp == NULL)
+		return -EIO;
+	dcp = (struct dmapctl *) mp->data;
+	budmin = dcp->budmin;
+
+	if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAllocAG: Corrupt dmapctl page");
+		release_metapage(mp);
+		return -EIO;
+	}
+
+	/* search the subtree(s) of the dmap control page that describes
+	 * the allocation group, looking for sufficient free space.  to begin,
+	 * determine how many allocation groups are represented in a dmap
+	 * control page at the control page level (i.e. L0, L1, L2) that
+	 * fully describes an allocation group. next, determine the starting
+	 * tree index of this allocation group within the control page.
+	 */
+	agperlev =
+	    (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth;
+	ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
+
+	/* dmap control page trees fan-out by 4 and a single allocation 
+	 * group may be described by 1 or 2 subtrees within the ag level
+	 * dmap control page, depending upon the ag size. examine the ag's
+	 * subtrees for sufficient free space, starting with the leftmost
+	 * subtree.
+	 */
+	for (i = 0; i < bmp->db_agwidth; i++, ti++) {
+		/* is there sufficient free space ?
+		 */
+		if (l2nb > dcp->stree[ti])
+			continue;
+
+		/* sufficient free space found in a subtree. now search down
+		 * the subtree to find the leftmost leaf that describes this
+		 * free space.
+		 */
+		for (k = bmp->db_agheigth; k > 0; k--) {
+			for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
+				if (l2nb <= dcp->stree[m + n]) {
+					ti = m + n;
+					break;
+				}
+			}
+			if (n == 4) {
+				jfs_error(bmp->db_ipbmap->i_sb,
+					  "dbAllocAG: failed descending stree");
+				release_metapage(mp);
+				return -EIO;
+			}
+		}
+
+		/* determine the block number within the file system
+		 * that corresponds to this leaf.
+		 */
+		if (bmp->db_aglevel == 2)
+			blkno = 0;
+		else if (bmp->db_aglevel == 1)
+			blkno &= ~(MAXL1SIZE - 1);
+		else		/* bmp->db_aglevel == 0 */
+			blkno &= ~(MAXL0SIZE - 1);
+
+		blkno +=
+		    ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
+
+		/* release the buffer in preparation for going down
+		 * the next level of dmap control pages.
+		 */
+		release_metapage(mp);
+
+		/* check if we need to continue to search down the lower
+		 * level dmap control pages.  we need to if the number of
+		 * blocks required is less than maximum number of blocks
+		 * described at the next lower level.
+		 */
+		if (l2nb < budmin) {
+
+			/* search the lower level dmap control pages to get
+			 * the starting block number of the the dmap that
+			 * contains or starts off the free space.
+			 */
+			if ((rc =
+			     dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
+				       &blkno))) {
+				if (rc == -ENOSPC) {
+					jfs_error(bmp->db_ipbmap->i_sb,
+						  "dbAllocAG: control page "
+						  "inconsistent");
+					return -EIO;
+				}
+				return (rc);
+			}
+		}
+
+		/* allocate the blocks.
+		 */
+		rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
+		if (rc == -ENOSPC) {
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbAllocAG: unable to allocate blocks");
+			rc = -EIO;
+		}
+		return (rc);
+	}
+
+	/* no space in the allocation group.  release the buffer and
+	 * return -ENOSPC.
+	 */
+	release_metapage(mp);
+
+	return -ENOSPC;
+}
+
+
+/*
+ * NAME:	dbAllocAny()
+ *
+ * FUNCTION:    attempt to allocate the specified number of contiguous
+ *		free blocks anywhere in the file system.
+ *
+ *		dbAllocAny() attempts to find the sufficient free space by
+ *		searching down the dmap control pages, starting with the
+ *		highest level (i.e. L0, L1, L2) control page.  if free space
+ *		large enough to satisfy the desired free space is found, the
+ *		desired free space is allocated.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      nblocks	 -  actual number of contiguous free blocks desired.
+ *      l2nb	 -  log2 number of contiguous free blocks desired.
+ *      results	-  on successful return, set to the starting block number
+ *		   of the newly allocated range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
+{
+	int rc;
+	s64 blkno = 0;
+
+	/* starting with the top level dmap control page, search
+	 * down the dmap control levels for sufficient free space.
+	 * if free space is found, dbFindCtl() returns the starting
+	 * block number of the dmap that contains or starts off the
+	 * range of free space.
+	 */
+	if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
+		return (rc);
+
+	/* allocate the blocks.
+	 */
+	rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
+	if (rc == -ENOSPC) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAllocAny: unable to allocate blocks");
+		return -EIO;
+	}
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbFindCtl()
+ *
+ * FUNCTION:    starting at a specified dmap control page level and block
+ *		number, search down the dmap control levels for a range of
+ *	        contiguous free blocks large enough to satisfy an allocation
+ *		request for the specified number of free blocks.
+ *
+ *		if sufficient contiguous free blocks are found, this routine
+ *		returns the starting block number within a dmap page that
+ *		contains or starts a range of contiqious free blocks that
+ *		is sufficient in size.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      level	-  starting dmap control page level.
+ *      l2nb	-  log2 number of contiguous free blocks desired.
+ *      *blkno	-  on entry, starting block number for conducting the search.
+ *		   on successful return, the first block within a dmap page
+ *		   that contains or starts a range of contiguous free blocks.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+{
+	int rc, leafidx, lev;
+	s64 b, lblkno;
+	struct dmapctl *dcp;
+	int budmin;
+	struct metapage *mp;
+
+	/* starting at the specified dmap control page level and block
+	 * number, search down the dmap control levels for the starting
+	 * block number of a dmap page that contains or starts off 
+	 * sufficient free blocks.
+	 */
+	for (lev = level, b = *blkno; lev >= 0; lev--) {
+		/* get the buffer of the dmap control page for the block
+		 * number and level (i.e. L0, L1, L2).
+		 */
+		lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
+		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL)
+			return -EIO;
+		dcp = (struct dmapctl *) mp->data;
+		budmin = dcp->budmin;
+
+		if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbFindCtl: Corrupt dmapctl page");
+			release_metapage(mp);
+			return -EIO;
+		}
+
+		/* search the tree within the dmap control page for
+		 * sufficent free space.  if sufficient free space is found,
+		 * dbFindLeaf() returns the index of the leaf at which
+		 * free space was found.
+		 */
+		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
+
+		/* release the buffer.
+		 */
+		release_metapage(mp);
+
+		/* space found ?
+		 */
+		if (rc) {
+			if (lev != level) {
+				jfs_error(bmp->db_ipbmap->i_sb,
+					  "dbFindCtl: dmap inconsistent");
+				return -EIO;
+			}
+			return -ENOSPC;
+		}
+
+		/* adjust the block number to reflect the location within
+		 * the dmap control page (i.e. the leaf) at which free 
+		 * space was found.
+		 */
+		b += (((s64) leafidx) << budmin);
+
+		/* we stop the search at this dmap control page level if
+		 * the number of blocks required is greater than or equal
+		 * to the maximum number of blocks described at the next
+		 * (lower) level.
+		 */
+		if (l2nb >= budmin)
+			break;
+	}
+
+	*blkno = b;
+	return (0);
+}
+
+
+/*
+ * NAME:	dbAllocCtl()
+ *
+ * FUNCTION:    attempt to allocate a specified number of contiguous
+ *		blocks starting within a specific dmap.  
+ *		
+ *		this routine is called by higher level routines that search
+ *		the dmap control pages above the actual dmaps for contiguous
+ *		free space.  the result of successful searches by these
+ * 		routines are the starting block numbers within dmaps, with
+ *		the dmaps themselves containing the desired contiguous free
+ *		space or starting a contiguous free space of desired size
+ *		that is made up of the blocks of one or more dmaps. these
+ *		calls should not fail due to insufficent resources.
+ *
+ *		this routine is called in some cases where it is not known
+ *		whether it will fail due to insufficient resources.  more
+ *		specifically, this occurs when allocating from an allocation
+ *		group whose size is equal to the number of blocks per dmap.
+ *		in this case, the dmap control pages are not examined prior
+ *		to calling this routine (to save pathlength) and the call
+ *		might fail.
+ *
+ *		for a request size that fits within a dmap, this routine relies
+ *		upon the dmap's dmtree to find the requested contiguous free
+ *		space.  for request sizes that are larger than a dmap, the
+ *		requested free space will start at the first block of the
+ *		first dmap (i.e. blkno).
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      nblocks	 -  actual number of contiguous free blocks to allocate.
+ *      l2nb	 -  log2 number of contiguous free blocks to allocate.
+ *      blkno	 -  starting block number of the dmap to start the allocation
+ *		    from.
+ *      results	-  on successful return, set to the starting block number
+ *		   of the newly allocated range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int
+dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
+{
+	int rc, nb;
+	s64 b, lblkno, n;
+	struct metapage *mp;
+	struct dmap *dp;
+
+	/* check if the allocation request is confined to a single dmap.
+	 */
+	if (l2nb <= L2BPERDMAP) {
+		/* get the buffer for the dmap.
+		 */
+		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL)
+			return -EIO;
+		dp = (struct dmap *) mp->data;
+
+		/* try to allocate the blocks.
+		 */
+		rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
+		if (rc == 0)
+			mark_metapage_dirty(mp);
+
+		release_metapage(mp);
+
+		return (rc);
+	}
+
+	/* allocation request involving multiple dmaps. it must start on
+	 * a dmap boundary.
+	 */
+	assert((blkno & (BPERDMAP - 1)) == 0);
+
+	/* allocate the blocks dmap by dmap.
+	 */
+	for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
+		/* get the buffer for the dmap.
+		 */
+		lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
+		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL) {
+			rc = -EIO;
+			goto backout;
+		}
+		dp = (struct dmap *) mp->data;
+
+		/* the dmap better be all free.
+		 */
+		if (dp->tree.stree[ROOT] != L2BPERDMAP) {
+			release_metapage(mp);
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbAllocCtl: the dmap is not all free");
+			rc = -EIO;
+			goto backout;
+		}
+
+		/* determine how many blocks to allocate from this dmap.
+		 */
+		nb = min(n, (s64)BPERDMAP);
+
+		/* allocate the blocks from the dmap.
+		 */
+		if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
+			release_metapage(mp);
+			goto backout;
+		}
+
+		/* write the buffer.
+		 */
+		write_metapage(mp);
+	}
+
+	/* set the results (starting block number) and return.
+	 */
+	*results = blkno;
+	return (0);
+
+	/* something failed in handling an allocation request involving
+	 * multiple dmaps.  we'll try to clean up by backing out any
+	 * allocation that has already happened for this request.  if
+	 * we fail in backing out the allocation, we'll mark the file
+	 * system to indicate that blocks have been leaked.
+	 */
+      backout:
+
+	/* try to backout the allocations dmap by dmap.
+	 */
+	for (n = nblocks - n, b = blkno; n > 0;
+	     n -= BPERDMAP, b += BPERDMAP) {
+		/* get the buffer for this dmap.
+		 */
+		lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
+		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL) {
+			/* could not back out.  mark the file system
+			 * to indicate that we have leaked blocks.
+			 */
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbAllocCtl: I/O Error: Block Leakage.");
+			continue;
+		}
+		dp = (struct dmap *) mp->data;
+
+		/* free the blocks is this dmap.
+		 */
+		if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
+			/* could not back out.  mark the file system
+			 * to indicate that we have leaked blocks.
+			 */
+			release_metapage(mp);
+			jfs_error(bmp->db_ipbmap->i_sb,
+				  "dbAllocCtl: Block Leakage.");
+			continue;
+		}
+
+		/* write the buffer.
+		 */
+		write_metapage(mp);
+	}
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbAllocDmapLev()
+ *
+ * FUNCTION:    attempt to allocate a specified number of contiguous blocks
+ *		from a specified dmap.
+ *		
+ *		this routine checks if the contiguous blocks are available.
+ *		if so, nblocks of blocks are allocated; otherwise, ENOSPC is
+ *		returned.
+ *
+ * PARAMETERS:
+ *      mp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap to attempt to allocate blocks from. 
+ *      l2nb	-  log2 number of contiguous block desired.
+ *      nblocks	-  actual number of contiguous block desired.
+ *      results	-  on successful return, set to the starting block number
+ *		   of the newly allocated range.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient disk resources
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or 
+ *	IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
+ */
+static int
+dbAllocDmapLev(struct bmap * bmp,
+	       struct dmap * dp, int nblocks, int l2nb, s64 * results)
+{
+	s64 blkno;
+	int leafidx, rc;
+
+	/* can't be more than a dmaps worth of blocks */
+	assert(l2nb <= L2BPERDMAP);
+
+	/* search the tree within the dmap page for sufficient
+	 * free space.  if sufficient free space is found, dbFindLeaf()
+	 * returns the index of the leaf at which free space was found.
+	 */
+	if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+		return -ENOSPC;
+
+	/* determine the block number within the file system corresponding
+	 * to the leaf at which free space was found.
+	 */
+	blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
+
+	/* if not all bits of the dmap word are free, get the starting
+	 * bit number within the dmap word of the required string of free
+	 * bits and adjust the block number with this value.
+	 */
+	if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
+		blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
+
+	/* allocate the blocks */
+	if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
+		*results = blkno;
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbAllocDmap()
+ *
+ * FUNCTION:    adjust the disk allocation map to reflect the allocation
+ *		of a specified block range within a dmap.
+ *
+ *		this routine allocates the specified blocks from the dmap
+ *		through a call to dbAllocBits(). if the allocation of the
+ *		block range causes the maximum string of free blocks within
+ *		the dmap to change (i.e. the value of the root of the dmap's
+ *		dmtree), this routine will cause this change to be reflected
+ *		up through the appropriate levels of the dmap control pages
+ *		by a call to dbAdjCtl() for the L0 dmap control page that
+ *		covers this dmap.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap to allocate the block range from.
+ *      blkno	-  starting block number of the block to be allocated.
+ *      nblocks	-  number of blocks to be allocated.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks)
+{
+	s8 oldroot;
+	int rc;
+
+	/* save the current value of the root (i.e. maximum free string)
+	 * of the dmap tree.
+	 */
+	oldroot = dp->tree.stree[ROOT];
+
+	/* allocate the specified (blocks) bits */
+	dbAllocBits(bmp, dp, blkno, nblocks);
+
+	/* if the root has not changed, done. */
+	if (dp->tree.stree[ROOT] == oldroot)
+		return (0);
+
+	/* root changed. bubble the change up to the dmap control pages.
+	 * if the adjustment of the upper level control pages fails,
+	 * backout the bit allocation (thus making everything consistent).
+	 */
+	if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
+		dbFreeBits(bmp, dp, blkno, nblocks);
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbFreeDmap()
+ *
+ * FUNCTION:    adjust the disk allocation map to reflect the allocation
+ *		of a specified block range within a dmap.
+ *
+ *		this routine frees the specified blocks from the dmap through
+ *		a call to dbFreeBits(). if the deallocation of the block range
+ *		causes the maximum string of free blocks within the dmap to
+ *		change (i.e. the value of the root of the dmap's dmtree), this
+ *		routine will cause this change to be reflected up through the
+ *	        appropriate levels of the dmap control pages by a call to
+ *		dbAdjCtl() for the L0 dmap control page that covers this dmap.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap to free the block range from.
+ *      blkno	-  starting block number of the block to be freed.
+ *      nblocks	-  number of blocks to be freed.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		      int nblocks)
+{
+	s8 oldroot;
+	int rc, word;
+
+	/* save the current value of the root (i.e. maximum free string)
+	 * of the dmap tree.
+	 */
+	oldroot = dp->tree.stree[ROOT];
+
+	/* free the specified (blocks) bits */
+	dbFreeBits(bmp, dp, blkno, nblocks);
+
+	/* if the root has not changed, done. */
+	if (dp->tree.stree[ROOT] == oldroot)
+		return (0);
+
+	/* root changed. bubble the change up to the dmap control pages.
+	 * if the adjustment of the upper level control pages fails,
+	 * backout the deallocation. 
+	 */
+	if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
+		word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
+
+		/* as part of backing out the deallocation, we will have
+		 * to back split the dmap tree if the deallocation caused
+		 * the freed blocks to become part of a larger binary buddy
+		 * system.
+		 */
+		if (dp->tree.stree[word] == NOFREE)
+			dbBackSplit((dmtree_t *) & dp->tree, word);
+
+		dbAllocBits(bmp, dp, blkno, nblocks);
+	}
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbAllocBits()
+ *
+ * FUNCTION:    allocate a specified block range from a dmap.
+ *
+ *		this routine updates the dmap to reflect the working
+ *		state allocation of the specified block range. it directly
+ *		updates the bits of the working map and causes the adjustment
+ *		of the binary buddy system described by the dmap's dmtree
+ *		leaves to reflect the bits allocated.  it also causes the
+ *		dmap's dmtree, as a whole, to reflect the allocated range.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap to allocate bits from.
+ *      blkno	-  starting block number of the bits to be allocated.
+ *      nblocks	-  number of bits to be allocated.
+ *
+ * RETURN VALUES: none
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+			int nblocks)
+{
+	int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
+	dmtree_t *tp = (dmtree_t *) & dp->tree;
+	int size;
+	s8 *leaf;
+
+	/* pick up a pointer to the leaves of the dmap tree */
+	leaf = dp->tree.stree + LEAFIND;
+
+	/* determine the bit number and word within the dmap of the
+	 * starting block.
+	 */
+	dbitno = blkno & (BPERDMAP - 1);
+	word = dbitno >> L2DBWORD;
+
+	/* block range better be within the dmap */
+	assert(dbitno + nblocks <= BPERDMAP);
+
+	/* allocate the bits of the dmap's words corresponding to the block
+	 * range. not all bits of the first and last words may be contained
+	 * within the block range.  if this is the case, we'll work against
+	 * those words (i.e. partial first and/or last) on an individual basis
+	 * (a single pass), allocating the bits of interest by hand and
+	 * updating the leaf corresponding to the dmap word. a single pass
+	 * will be used for all dmap words fully contained within the
+	 * specified range.  within this pass, the bits of all fully contained
+	 * dmap words will be marked as free in a single shot and the leaves
+	 * will be updated. a single leaf may describe the free space of
+	 * multiple dmap words, so we may update only a subset of the actual
+	 * leaves corresponding to the dmap words of the block range.
+	 */
+	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
+		/* determine the bit number within the word and
+		 * the number of bits within the word.
+		 */
+		wbitno = dbitno & (DBWORD - 1);
+		nb = min(rembits, DBWORD - wbitno);
+
+		/* check if only part of a word is to be allocated.
+		 */
+		if (nb < DBWORD) {
+			/* allocate (set to 1) the appropriate bits within
+			 * this dmap word.
+			 */
+			dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
+						      >> wbitno);
+
+			/* update the leaf for this dmap word. in addition
+			 * to setting the leaf value to the binary buddy max
+			 * of the updated dmap word, dbSplit() will split
+			 * the binary system of the leaves if need be.
+			 */
+			dbSplit(tp, word, BUDMIN,
+				dbMaxBud((u8 *) & dp->wmap[word]));
+
+			word += 1;
+		} else {
+			/* one or more dmap words are fully contained
+			 * within the block range.  determine how many
+			 * words and allocate (set to 1) the bits of these
+			 * words.
+			 */
+			nwords = rembits >> L2DBWORD;
+			memset(&dp->wmap[word], (int) ONES, nwords * 4);
+
+			/* determine how many bits.
+			 */
+			nb = nwords << L2DBWORD;
+
+			/* now update the appropriate leaves to reflect
+			 * the allocated words.
+			 */
+			for (; nwords > 0; nwords -= nw) {
+			        if (leaf[word] < BUDMIN) {
+					jfs_error(bmp->db_ipbmap->i_sb,
+						  "dbAllocBits: leaf page "
+						  "corrupt");
+					break;
+				}
+
+				/* determine what the leaf value should be
+				 * updated to as the minimum of the l2 number
+				 * of bits being allocated and the l2 number
+				 * of bits currently described by this leaf.
+				 */
+				size = min((int)leaf[word], NLSTOL2BSZ(nwords));
+
+				/* update the leaf to reflect the allocation.
+				 * in addition to setting the leaf value to
+				 * NOFREE, dbSplit() will split the binary
+				 * system of the leaves to reflect the current
+				 * allocation (size).
+				 */
+				dbSplit(tp, word, size, NOFREE);
+
+				/* get the number of dmap words handled */
+				nw = BUDSIZE(size, BUDMIN);
+				word += nw;
+			}
+		}
+	}
+
+	/* update the free count for this dmap */
+	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
+
+	BMAP_LOCK(bmp);
+
+	/* if this allocation group is completely free,
+	 * update the maximum allocation group number if this allocation
+	 * group is the new max.
+	 */
+	agno = blkno >> bmp->db_agl2size;
+	if (agno > bmp->db_maxag)
+		bmp->db_maxag = agno;
+
+	/* update the free count for the allocation group and map */
+	bmp->db_agfree[agno] -= nblocks;
+	bmp->db_nfree -= nblocks;
+
+	BMAP_UNLOCK(bmp);
+}
+
+
+/*
+ * NAME:	dbFreeBits()
+ *
+ * FUNCTION:    free a specified block range from a dmap.
+ *
+ *		this routine updates the dmap to reflect the working
+ *		state allocation of the specified block range. it directly
+ *		updates the bits of the working map and causes the adjustment
+ *		of the binary buddy system described by the dmap's dmtree
+ *		leaves to reflect the bits freed.  it also causes the dmap's
+ *		dmtree, as a whole, to reflect the deallocated range.
+ *
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      dp	-  pointer to dmap to free bits from.
+ *      blkno	-  starting block number of the bits to be freed.
+ *      nblocks	-  number of bits to be freed.
+ *
+ * RETURN VALUES: none
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+		       int nblocks)
+{
+	int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
+	dmtree_t *tp = (dmtree_t *) & dp->tree;
+	int size;
+
+	/* determine the bit number and word within the dmap of the
+	 * starting block.
+	 */
+	dbitno = blkno & (BPERDMAP - 1);
+	word = dbitno >> L2DBWORD;
+
+	/* block range better be within the dmap.
+	 */
+	assert(dbitno + nblocks <= BPERDMAP);
+
+	/* free the bits of the dmaps words corresponding to the block range.
+	 * not all bits of the first and last words may be contained within
+	 * the block range.  if this is the case, we'll work against those
+	 * words (i.e. partial first and/or last) on an individual basis
+	 * (a single pass), freeing the bits of interest by hand and updating
+	 * the leaf corresponding to the dmap word. a single pass will be used
+	 * for all dmap words fully contained within the specified range.  
+	 * within this pass, the bits of all fully contained dmap words will
+	 * be marked as free in a single shot and the leaves will be updated. a
+	 * single leaf may describe the free space of multiple dmap words,
+	 * so we may update only a subset of the actual leaves corresponding
+	 * to the dmap words of the block range.
+	 *
+	 * dbJoin() is used to update leaf values and will join the binary
+	 * buddy system of the leaves if the new leaf values indicate this
+	 * should be done.
+	 */
+	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
+		/* determine the bit number within the word and
+		 * the number of bits within the word.
+		 */
+		wbitno = dbitno & (DBWORD - 1);
+		nb = min(rembits, DBWORD - wbitno);
+
+		/* check if only part of a word is to be freed.
+		 */
+		if (nb < DBWORD) {
+			/* free (zero) the appropriate bits within this
+			 * dmap word. 
+			 */
+			dp->wmap[word] &=
+			    cpu_to_le32(~(ONES << (DBWORD - nb)
+					  >> wbitno));
+
+			/* update the leaf for this dmap word.
+			 */
+			dbJoin(tp, word,
+			       dbMaxBud((u8 *) & dp->wmap[word]));
+
+			word += 1;
+		} else {
+			/* one or more dmap words are fully contained
+			 * within the block range.  determine how many
+			 * words and free (zero) the bits of these words.
+			 */
+			nwords = rembits >> L2DBWORD;
+			memset(&dp->wmap[word], 0, nwords * 4);
+
+			/* determine how many bits.
+			 */
+			nb = nwords << L2DBWORD;
+
+			/* now update the appropriate leaves to reflect
+			 * the freed words.
+			 */
+			for (; nwords > 0; nwords -= nw) {
+				/* determine what the leaf value should be
+				 * updated to as the minimum of the l2 number
+				 * of bits being freed and the l2 (max) number
+				 * of bits that can be described by this leaf.
+				 */
+				size =
+				    min(LITOL2BSZ
+					(word, L2LPERDMAP, BUDMIN),
+					NLSTOL2BSZ(nwords));
+
+				/* update the leaf.
+				 */
+				dbJoin(tp, word, size);
+
+				/* get the number of dmap words handled.
+				 */
+				nw = BUDSIZE(size, BUDMIN);
+				word += nw;
+			}
+		}
+	}
+
+	/* update the free count for this dmap.
+	 */
+	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
+
+	BMAP_LOCK(bmp);
+
+	/* update the free count for the allocation group and 
+	 * map.
+	 */
+	agno = blkno >> bmp->db_agl2size;
+	bmp->db_nfree += nblocks;
+	bmp->db_agfree[agno] += nblocks;
+
+	/* check if this allocation group is not completely free and
+	 * if it is currently the maximum (rightmost) allocation group.
+	 * if so, establish the new maximum allocation group number by
+	 * searching left for the first allocation group with allocation.
+	 */
+	if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
+	    (agno == bmp->db_numag - 1 &&
+	     bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
+		while (bmp->db_maxag > 0) {
+			bmp->db_maxag -= 1;
+			if (bmp->db_agfree[bmp->db_maxag] !=
+			    bmp->db_agsize)
+				break;
+		}
+
+		/* re-establish the allocation group preference if the
+		 * current preference is right of the maximum allocation
+		 * group.
+		 */
+		if (bmp->db_agpref > bmp->db_maxag)
+			bmp->db_agpref = bmp->db_maxag;
+	}
+
+	BMAP_UNLOCK(bmp);
+}
+
+
+/*
+ * NAME:	dbAdjCtl()
+ *
+ * FUNCTION:	adjust a dmap control page at a specified level to reflect
+ *		the change in a lower level dmap or dmap control page's
+ *		maximum string of free blocks (i.e. a change in the root
+ *		of the lower level object's dmtree) due to the allocation
+ *		or deallocation of a range of blocks with a single dmap.
+ *
+ *		on entry, this routine is provided with the new value of
+ *		the lower level dmap or dmap control page root and the
+ *		starting block number of the block range whose allocation
+ *		or deallocation resulted in the root change.  this range
+ *		is respresented by a single leaf of the current dmapctl
+ *		and the leaf will be updated with this value, possibly
+ *		causing a binary buddy system within the leaves to be 
+ *		split or joined.  the update may also cause the dmapctl's
+ *		dmtree to be updated.
+ *
+ *		if the adjustment of the dmap control page, itself, causes its
+ *		root to change, this change will be bubbled up to the next dmap
+ *		control level by a recursive call to this routine, specifying
+ *		the new root value and the next dmap control page level to
+ *		be adjusted.
+ * PARAMETERS:
+ *      bmp	-  pointer to bmap descriptor
+ *      blkno	-  the first block of a block range within a dmap.  it is
+ *		   the allocation or deallocation of this block range that
+ *		   requires the dmap control page to be adjusted.
+ *      newval	-  the new value of the lower level dmap or dmap control
+ *		   page root.
+ *      alloc	-  TRUE if adjustment is due to an allocation.
+ *      level	-  current level of dmap control page (i.e. L0, L1, L2) to
+ *		   be adjusted.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static int
+dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+{
+	struct metapage *mp;
+	s8 oldroot;
+	int oldval;
+	s64 lblkno;
+	struct dmapctl *dcp;
+	int rc, leafno, ti;
+
+	/* get the buffer for the dmap control page for the specified
+	 * block number and control page level.
+	 */
+	lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
+	mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
+	if (mp == NULL)
+		return -EIO;
+	dcp = (struct dmapctl *) mp->data;
+
+	if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
+		jfs_error(bmp->db_ipbmap->i_sb,
+			  "dbAdjCtl: Corrupt dmapctl page");
+		release_metapage(mp);
+		return -EIO;
+	}
+
+	/* determine the leaf number corresponding to the block and
+	 * the index within the dmap control tree.
+	 */
+	leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
+	ti = leafno + le32_to_cpu(dcp->leafidx);
+
+	/* save the current leaf value and the current root level (i.e.
+	 * maximum l2 free string described by this dmapctl).
+	 */
+	oldval = dcp->stree[ti];
+	oldroot = dcp->stree[ROOT];
+
+	/* check if this is a control page update for an allocation.
+	 * if so, update the leaf to reflect the new leaf value using
+	 * dbSplit(); otherwise (deallocation), use dbJoin() to udpate
+	 * the leaf with the new value.  in addition to updating the
+	 * leaf, dbSplit() will also split the binary buddy system of
+	 * the leaves, if required, and bubble new values within the
+	 * dmapctl tree, if required.  similarly, dbJoin() will join
+	 * the binary buddy system of leaves and bubble new values up
+	 * the dmapctl tree as required by the new leaf value.
+	 */
+	if (alloc) {
+		/* check if we are in the middle of a binary buddy
+		 * system.  this happens when we are performing the
+		 * first allocation out of an allocation group that
+		 * is part (not the first part) of a larger binary
+		 * buddy system.  if we are in the middle, back split
+		 * the system prior to calling dbSplit() which assumes
+		 * that it is at the front of a binary buddy system.
+		 */
+		if (oldval == NOFREE) {
+			dbBackSplit((dmtree_t *) dcp, leafno);
+			oldval = dcp->stree[ti];
+		}
+		dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
+	} else {
+		dbJoin((dmtree_t *) dcp, leafno, newval);
+	}
+
+	/* check if the root of the current dmap control page changed due
+	 * to the update and if the current dmap control page is not at
+	 * the current top level (i.e. L0, L1, L2) of the map.  if so (i.e.
+	 * root changed and this is not the top level), call this routine
+	 * again (recursion) for the next higher level of the mapping to
+	 * reflect the change in root for the current dmap control page.
+	 */
+	if (dcp->stree[ROOT] != oldroot) {
+		/* are we below the top level of the map.  if so,
+		 * bubble the root up to the next higher level.
+		 */
+		if (level < bmp->db_maxlevel) {
+			/* bubble up the new root of this dmap control page to
+			 * the next level.
+			 */
+			if ((rc =
+			     dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
+				      level + 1))) {
+				/* something went wrong in bubbling up the new
+				 * root value, so backout the changes to the
+				 * current dmap control page.
+				 */
+				if (alloc) {
+					dbJoin((dmtree_t *) dcp, leafno,
+					       oldval);
+				} else {
+					/* the dbJoin() above might have
+					 * caused a larger binary buddy system
+					 * to form and we may now be in the
+					 * middle of it.  if this is the case,
+					 * back split the buddies.
+					 */
+					if (dcp->stree[ti] == NOFREE)
+						dbBackSplit((dmtree_t *)
+							    dcp, leafno);
+					dbSplit((dmtree_t *) dcp, leafno,
+						dcp->budmin, oldval);
+				}
+
+				/* release the buffer and return the error.
+				 */
+				release_metapage(mp);
+				return (rc);
+			}
+		} else {
+			/* we're at the top level of the map. update
+			 * the bmap control page to reflect the size
+			 * of the maximum free buddy system.
+			 */
+			assert(level == bmp->db_maxlevel);
+			if (bmp->db_maxfreebud != oldroot) {
+				jfs_error(bmp->db_ipbmap->i_sb,
+					  "dbAdjCtl: the maximum free buddy is "
+					  "not the old root");
+			}
+			bmp->db_maxfreebud = dcp->stree[ROOT];
+		}
+	}
+
+	/* write the buffer.
+	 */
+	write_metapage(mp);
+
+	return (0);
+}
+
+
+/*
+ * NAME:	dbSplit()
+ *
+ * FUNCTION:    update the leaf of a dmtree with a new value, splitting
+ *		the leaf from the binary buddy system of the dmtree's
+ *		leaves, as required.
+ *
+ * PARAMETERS:
+ *      tp	- pointer to the tree containing the leaf.
+ *      leafno	- the number of the leaf to be updated.
+ *      splitsz	- the size the binary buddy system starting at the leaf
+ *		  must be split to, specified as the log2 number of blocks.
+ *      newval	- the new value for the leaf.
+ *
+ * RETURN VALUES: none
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+{
+	int budsz;
+	int cursz;
+	s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
+
+	/* check if the leaf needs to be split.
+	 */
+	if (leaf[leafno] > tp->dmt_budmin) {
+		/* the split occurs by cutting the buddy system in half
+		 * at the specified leaf until we reach the specified
+		 * size.  pick up the starting split size (current size
+		 * - 1 in l2) and the corresponding buddy size.
+		 */
+		cursz = leaf[leafno] - 1;
+		budsz = BUDSIZE(cursz, tp->dmt_budmin);
+
+		/* split until we reach the specified size.
+		 */
+		while (cursz >= splitsz) {
+			/* update the buddy's leaf with its new value.
+			 */
+			dbAdjTree(tp, leafno ^ budsz, cursz);
+
+			/* on to the next size and buddy.
+			 */
+			cursz -= 1;
+			budsz >>= 1;
+		}
+	}
+
+	/* adjust the dmap tree to reflect the specified leaf's new 
+	 * value.
+	 */
+	dbAdjTree(tp, leafno, newval);
+}
+
+
+/*
+ * NAME:	dbBackSplit()
+ *
+ * FUNCTION:    back split the binary buddy system of dmtree leaves
+ *		that hold a specified leaf until the specified leaf
+ *		starts its own binary buddy system.
+ *
+ *		the allocators typically perform allocations at the start
+ *		of binary buddy systems and dbSplit() is used to accomplish
+ *		any required splits.  in some cases, however, allocation
+ *		may occur in the middle of a binary system and requires a
+ *		back split, with the split proceeding out from the middle of
+ *		the system (less efficient) rather than the start of the
+ *		system (more efficient).  the cases in which a back split
+ *		is required are rare and are limited to the first allocation
+ *		within an allocation group which is a part (not first part)
+ *		of a larger binary buddy system and a few exception cases
+ *		in which a previous join operation must be backed out.
+ *
+ * PARAMETERS:
+ *      tp	- pointer to the tree containing the leaf.
+ *      leafno	- the number of the leaf to be updated.
+ *
+ * RETURN VALUES: none
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+static void dbBackSplit(dmtree_t * tp, int leafno)
+{
+	int budsz, bud, w, bsz, size;
+	int cursz;
+	s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
+
+	/* leaf should be part (not first part) of a binary
+	 * buddy system.
+	 */
+	assert(leaf[leafno] == NOFREE);
+
+	/* the back split is accomplished by iteratively finding the leaf
+	 * that starts the buddy system that contains the specified leaf and
+	 * splitting that system in two.  this iteration continues until
+	 * the specified leaf becomes the start of a buddy system. 
+	 *
+	 * determine maximum possible l2 size for the specified leaf.
+	 */
+	size =
+	    LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
+		      tp->dmt_budmin);
+
+	/* determine the number of leaves covered by this size.  this
+	 * is the buddy size that we will start with as we search for
+	 * the buddy system that contains the specified leaf.
+	 */
+	budsz = BUDSIZE(size, tp->dmt_budmin);
+
+	/* back split.
+	 */
+	while (leaf[leafno] == NOFREE) {
+		/* find the leftmost buddy leaf.
+		 */
+		for (w = leafno, bsz = budsz;; bsz <<= 1,
+		     w = (w < bud) ? w : bud) {
+			assert(bsz < le32_to_cpu(tp->dmt_nleafs));
+
+			/* determine the buddy.
+			 */
+			bud = w ^ bsz;
+
+			/* check if this buddy is the start of the system.
+			 */
+			if (leaf[bud] != NOFREE) {
+				/* split the leaf at the start of the
+				 * system in two.
+				 */
+				cursz = leaf[bud] - 1;
+				dbSplit(tp, bud, cursz, cursz);
+				break;
+			}
+		}
+	}
+
+	assert(leaf[leafno] == size);
+}
+
+
+/*
+ * NAME:	dbJoin()
+ *
+ * FUNCTION:    update the leaf of a dmtree with a new value, joining
+ *		the leaf with other leaves of the dmtree into a multi-leaf
+ *		binary buddy system, as required.
+ *
+ * PARAMETERS:
+ *      tp	- pointer to the tree containing the leaf.
+ *      leafno	- the number of the leaf to be updated.
+ *      newval	- the new value for the leaf.
+ *
+ * RETURN VALUES: none
+ */
+static void dbJoin(dmtree_t * tp, int leafno, int newval)
+{
+	int budsz, buddy;
+	s8 *leaf;
+
+	/* can the new leaf value require a join with other leaves ?
+	 */
+	if (newval >= tp->dmt_budmin) {
+		/* pickup a pointer to the leaves of the tree.
+		 */
+		leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
+
+		/* try to join the specified leaf into a large binary
+		 * buddy system.  the join proceeds by attempting to join
+		 * the specified leafno with its buddy (leaf) at new value.
+		 * if the join occurs, we attempt to join the left leaf
+		 * of the joined buddies with its buddy at new value + 1.
+		 * we continue to join until we find a buddy that cannot be
+		 * joined (does not have a value equal to the size of the
+		 * last join) or until all leaves have been joined into a
+		 * single system.
+		 *
+		 * get the buddy size (number of words covered) of
+		 * the new value.
+		 */
+		budsz = BUDSIZE(newval, tp->dmt_budmin);
+
+		/* try to join.
+		 */
+		while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
+			/* get the buddy leaf.
+			 */
+			buddy = leafno ^ budsz;
+
+			/* if the leaf's new value is greater than its
+			 * buddy's value, we join no more.
+			 */
+			if (newval > leaf[buddy])
+				break;
+
+			assert(newval == leaf[buddy]);
+
+			/* check which (leafno or buddy) is the left buddy.
+			 * the left buddy gets to claim the blocks resulting
+			 * from the join while the right gets to claim none.
+			 * the left buddy is also eligable to participate in
+			 * a join at the next higher level while the right
+			 * is not.
+			 *
+			 */
+			if (leafno < buddy) {
+				/* leafno is the left buddy.
+				 */
+				dbAdjTree(tp, buddy, NOFREE);
+			} else {
+				/* buddy is the left buddy and becomes
+				 * leafno.
+				 */
+				dbAdjTree(tp, leafno, NOFREE);
+				leafno = buddy;
+			}
+
+			/* on to try the next join.
+			 */
+			newval += 1;
+			budsz <<= 1;
+		}
+	}
+
+	/* update the leaf value.
+	 */
+	dbAdjTree(tp, leafno, newval);
+}
+
+
+/*
+ * NAME:	dbAdjTree()
+ *
+ * FUNCTION:    update a leaf of a dmtree with a new value, adjusting
+ *		the dmtree, as required, to reflect the new leaf value.
+ *		the combination of any buddies must already be done before
+ *		this is called.
+ *
+ * PARAMETERS:
+ *      tp	- pointer to the tree to be adjusted.
+ *      leafno	- the number of the leaf to be updated.
+ *      newval	- the new value for the leaf.
+ *
+ * RETURN VALUES: none
+ */
+static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+{
+	int lp, pp, k;
+	int max;
+
+	/* pick up the index of the leaf for this leafno.
+	 */
+	lp = leafno + le32_to_cpu(tp->dmt_leafidx);
+
+	/* is the current value the same as the old value ?  if so,
+	 * there is nothing to do.
+	 */
+	if (tp->dmt_stree[lp] == newval)
+		return;
+
+	/* set the new value.
+	 */
+	tp->dmt_stree[lp] = newval;
+
+	/* bubble the new value up the tree as required.
+	 */
+	for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
+		/* get the index of the first leaf of the 4 leaf
+		 * group containing the specified leaf (leafno).
+		 */
+		lp = ((lp - 1) & ~0x03) + 1;
+
+		/* get the index of the parent of this 4 leaf group.
+		 */
+		pp = (lp - 1) >> 2;
+
+		/* determine the maximum of the 4 leaves.
+		 */
+		max = TREEMAX(&tp->dmt_stree[lp]);
+
+		/* if the maximum of the 4 is the same as the
+		 * parent's value, we're done.
+		 */
+		if (tp->dmt_stree[pp] == max)
+			break;
+
+		/* parent gets new value.
+		 */
+		tp->dmt_stree[pp] = max;
+
+		/* parent becomes leaf for next go-round.
+		 */
+		lp = pp;
+	}
+}
+
+
+/*
+ * NAME:	dbFindLeaf()
+ *
+ * FUNCTION:    search a dmtree_t for sufficient free blocks, returning
+ *		the index of a leaf describing the free blocks if 
+ *		sufficient free blocks are found.
+ *
+ *		the search starts at the top of the dmtree_t tree and
+ *		proceeds down the tree to the leftmost leaf with sufficient
+ *		free space.
+ *
+ * PARAMETERS:
+ *      tp	- pointer to the tree to be searched.
+ *      l2nb	- log2 number of free blocks to search for.
+ *	leafidx	- return pointer to be set to the index of the leaf
+ *		  describing at least l2nb free blocks if sufficient
+ *		  free blocks are found.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -ENOSPC	- insufficient free blocks. 
+ */
+static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+{
+	int ti, n = 0, k, x = 0;
+
+	/* first check the root of the tree to see if there is
+	 * sufficient free space.
+	 */
+	if (l2nb > tp->dmt_stree[ROOT])
+		return -ENOSPC;
+
+	/* sufficient free space available. now search down the tree
+	 * starting at the next level for the leftmost leaf that
+	 * describes sufficient free space.
+	 */
+	for (k = le32_to_cpu(tp->dmt_height), ti = 1;
+	     k > 0; k--, ti = ((ti + n) << 2) + 1) {
+		/* search the four nodes at this level, starting from
+		 * the left.
+		 */
+		for (x = ti, n = 0; n < 4; n++) {
+			/* sufficient free space found.  move to the next
+			 * level (or quit if this is the last level).
+			 */
+			if (l2nb <= tp->dmt_stree[x + n])
+				break;
+		}
+
+		/* better have found something since the higher
+		 * levels of the tree said it was here.
+		 */
+		assert(n < 4);
+	}
+
+	/* set the return to the leftmost leaf describing sufficient
+	 * free space.
+	 */
+	*leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
+
+	return (0);
+}
+
+
+/*
+ * NAME:	dbFindBits()
+ *
+ * FUNCTION:    find a specified number of binary buddy free bits within a
+ *		dmap bitmap word value.
+ *
+ *		this routine searches the bitmap value for (1 << l2nb) free
+ *		bits at (1 << l2nb) alignments within the value.
+ *
+ * PARAMETERS:
+ *      word	-  dmap bitmap word value.
+ *      l2nb	-  number of free bits specified as a log2 number.
+ *
+ * RETURN VALUES:
+ *      starting bit number of free bits.
+ */
+static int dbFindBits(u32 word, int l2nb)
+{
+	int bitno, nb;
+	u32 mask;
+
+	/* get the number of bits.
+	 */
+	nb = 1 << l2nb;
+	assert(nb <= DBWORD);
+
+	/* complement the word so we can use a mask (i.e. 0s represent
+	 * free bits) and compute the mask.
+	 */
+	word = ~word;
+	mask = ONES << (DBWORD - nb);
+
+	/* scan the word for nb free bits at nb alignments.
+	 */
+	for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
+		if ((mask & word) == mask)
+			break;
+	}
+
+	ASSERT(bitno < 32);
+
+	/* return the bit number.
+	 */
+	return (bitno);
+}
+
+
+/*
+ * NAME:	dbMaxBud(u8 *cp)
+ *
+ * FUNCTION:    determine the largest binary buddy string of free
+ *		bits within 32-bits of the map.
+ *
+ * PARAMETERS:
+ *      cp	-  pointer to the 32-bit value.
+ *
+ * RETURN VALUES:
+ *      largest binary buddy of free bits within a dmap word.
+ */
+static int dbMaxBud(u8 * cp)
+{
+	signed char tmp1, tmp2;
+
+	/* check if the wmap word is all free. if so, the
+	 * free buddy size is BUDMIN.
+	 */
+	if (*((uint *) cp) == 0)
+		return (BUDMIN);
+
+	/* check if the wmap word is half free. if so, the
+	 * free buddy size is BUDMIN-1.
+	 */
+	if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
+		return (BUDMIN - 1);
+
+	/* not all free or half free. determine the free buddy
+	 * size thru table lookup using quarters of the wmap word.
+	 */
+	tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
+	tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
+	return (max(tmp1, tmp2));
+}
+
+
+/*
+ * NAME:	cnttz(uint word)
+ *
+ * FUNCTION:    determine the number of trailing zeros within a 32-bit
+ *		value.
+ *
+ * PARAMETERS:
+ *      value	-  32-bit value to be examined.
+ *
+ * RETURN VALUES:
+ *      count of trailing zeros
+ */
+static int cnttz(u32 word)
+{
+	int n;
+
+	for (n = 0; n < 32; n++, word >>= 1) {
+		if (word & 0x01)
+			break;
+	}
+
+	return (n);
+}
+
+
+/*
+ * NAME:	cntlz(u32 value)
+ *
+ * FUNCTION:    determine the number of leading zeros within a 32-bit
+ *		value.
+ *
+ * PARAMETERS:
+ *      value	-  32-bit value to be examined.
+ *
+ * RETURN VALUES:
+ *      count of leading zeros
+ */
+static int cntlz(u32 value)
+{
+	int n;
+
+	for (n = 0; n < 32; n++, value <<= 1) {
+		if (value & HIGHORDER)
+			break;
+	}
+	return (n);
+}
+
+
+/*
+ * NAME:	blkstol2(s64 nb)
+ *
+ * FUNCTION:	convert a block count to its log2 value. if the block
+ *	        count is not a l2 multiple, it is rounded up to the next
+ *		larger l2 multiple.
+ *
+ * PARAMETERS:
+ *      nb	-  number of blocks
+ *
+ * RETURN VALUES:
+ *      log2 number of blocks
+ */
+int blkstol2(s64 nb)
+{
+	int l2nb;
+	s64 mask;		/* meant to be signed */
+
+	mask = (s64) 1 << (64 - 1);
+
+	/* count the leading bits.
+	 */
+	for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
+		/* leading bit found.
+		 */
+		if (nb & mask) {
+			/* determine the l2 value.
+			 */
+			l2nb = (64 - 1) - l2nb;
+
+			/* check if we need to round up.
+			 */
+			if (~mask & nb)
+				l2nb++;
+
+			return (l2nb);
+		}
+	}
+	assert(0);
+	return 0;		/* fix compiler warning */
+}
+
+
+/*
+ * NAME:    	dbAllocBottomUp()
+ *
+ * FUNCTION:	alloc the specified block range from the working block
+ *		allocation map.
+ *
+ *		the blocks will be alloc from the working map one dmap
+ *		at a time.
+ *
+ * PARAMETERS:
+ *      ip	-  pointer to in-core inode;
+ *      blkno	-  starting block number to be freed.
+ *      nblocks	-  number of blocks to be freed.
+ *
+ * RETURN VALUES:
+ *      0	- success
+ *      -EIO	- i/o error
+ */
+int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
+{
+	struct metapage *mp;
+	struct dmap *dp;
+	int nb, rc;
+	s64 lblkno, rem;
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+
+	IREAD_LOCK(ipbmap);
+
+	/* block to be allocated better be within the mapsize. */
+	ASSERT(nblocks <= bmp->db_mapsize - blkno);
+
+	/*
+	 * allocate the blocks a dmap at a time.
+	 */
+	mp = NULL;
+	for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
+		/* release previous dmap if any */
+		if (mp) {
+			write_metapage(mp);
+		}
+
+		/* get the buffer for the current dmap. */
+		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
+		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL) {
+			IREAD_UNLOCK(ipbmap);
+			return -EIO;
+		}
+		dp = (struct dmap *) mp->data;
+
+		/* determine the number of blocks to be allocated from
+		 * this dmap.
+		 */
+		nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
+
+		DBFREECK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
+
+		/* allocate the blocks. */
+		if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
+			release_metapage(mp);
+			IREAD_UNLOCK(ipbmap);
+			return (rc);
+		}
+
+		DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
+	}
+
+	/* write the last buffer. */
+	write_metapage(mp);
+
+	IREAD_UNLOCK(ipbmap);
+
+	return (0);
+}
+
+
+static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
+			 int nblocks)
+{
+	int rc;
+	int dbitno, word, rembits, nb, nwords, wbitno, agno;
+	s8 oldroot, *leaf;
+	struct dmaptree *tp = (struct dmaptree *) & dp->tree;
+
+	/* save the current value of the root (i.e. maximum free string)
+	 * of the dmap tree.
+	 */
+	oldroot = tp->stree[ROOT];
+
+	/* pick up a pointer to the leaves of the dmap tree */
+	leaf = tp->stree + LEAFIND;
+
+	/* determine the bit number and word within the dmap of the
+	 * starting block.
+	 */
+	dbitno = blkno & (BPERDMAP - 1);
+	word = dbitno >> L2DBWORD;
+
+	/* block range better be within the dmap */
+	assert(dbitno + nblocks <= BPERDMAP);
+
+	/* allocate the bits of the dmap's words corresponding to the block
+	 * range. not all bits of the first and last words may be contained
+	 * within the block range.  if this is the case, we'll work against
+	 * those words (i.e. partial first and/or last) on an individual basis
+	 * (a single pass), allocating the bits of interest by hand and
+	 * updating the leaf corresponding to the dmap word. a single pass
+	 * will be used for all dmap words fully contained within the
+	 * specified range.  within this pass, the bits of all fully contained
+	 * dmap words will be marked as free in a single shot and the leaves
+	 * will be updated. a single leaf may describe the free space of
+	 * multiple dmap words, so we may update only a subset of the actual
+	 * leaves corresponding to the dmap words of the block range.
+	 */
+	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
+		/* determine the bit number within the word and
+		 * the number of bits within the word.
+		 */
+		wbitno = dbitno & (DBWORD - 1);
+		nb = min(rembits, DBWORD - wbitno);
+
+		/* check if only part of a word is to be allocated.
+		 */
+		if (nb < DBWORD) {
+			/* allocate (set to 1) the appropriate bits within
+			 * this dmap word.
+			 */
+			dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
+						      >> wbitno);
+
+			word++;
+		} else {
+			/* one or more dmap words are fully contained
+			 * within the block range.  determine how many
+			 * words and allocate (set to 1) the bits of these
+			 * words.
+			 */
+			nwords = rembits >> L2DBWORD;
+			memset(&dp->wmap[word], (int) ONES, nwords * 4);
+
+			/* determine how many bits */
+			nb = nwords << L2DBWORD;
+			word += nwords;
+		}
+	}
+
+	/* update the free count for this dmap */
+	dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
+
+	/* reconstruct summary tree */
+	dbInitDmapTree(dp);
+
+	BMAP_LOCK(bmp);
+
+	/* if this allocation group is completely free,
+	 * update the highest active allocation group number 
+	 * if this allocation group is the new max.
+	 */
+	agno = blkno >> bmp->db_agl2size;
+	if (agno > bmp->db_maxag)
+		bmp->db_maxag = agno;
+
+	/* update the free count for the allocation group and map */
+	bmp->db_agfree[agno] -= nblocks;
+	bmp->db_nfree -= nblocks;
+
+	BMAP_UNLOCK(bmp);
+
+	/* if the root has not changed, done. */
+	if (tp->stree[ROOT] == oldroot)
+		return (0);
+
+	/* root changed. bubble the change up to the dmap control pages.
+	 * if the adjustment of the upper level control pages fails,
+	 * backout the bit allocation (thus making everything consistent).
+	 */
+	if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
+		dbFreeBits(bmp, dp, blkno, nblocks);
+
+	return (rc);
+}
+
+
+/*
+ * NAME:	dbExtendFS()
+ *
+ * FUNCTION:	extend bmap from blkno for nblocks;
+ * 		dbExtendFS() updates bmap ready for dbAllocBottomUp();
+ *
+ * L2
+ *  |
+ *   L1---------------------------------L1
+ *    |                                  |
+ *     L0---------L0---------L0           L0---------L0---------L0
+ *      |          |          |            |          |          |
+ *       d0,...,dn  d0,...,dn  d0,...,dn    d0,...,dn  d0,...,dn  d0,.,dm;
+ * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
+ *
+ * <---old---><----------------------------extend----------------------->   
+ */
+int dbExtendFS(struct inode *ipbmap, s64 blkno,	s64 nblocks)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
+	int nbperpage = sbi->nbperpage;
+	int i, i0 = TRUE, j, j0 = TRUE, k, n;
+	s64 newsize;
+	s64 p;
+	struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
+	struct dmapctl *l2dcp, *l1dcp, *l0dcp;
+	struct dmap *dp;
+	s8 *l0leaf, *l1leaf, *l2leaf;
+	struct bmap *bmp = sbi->bmap;
+	int agno, l2agsize, oldl2agsize;
+	s64 ag_rem;
+
+	newsize = blkno + nblocks;
+
+	jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
+		 (long long) blkno, (long long) nblocks, (long long) newsize);
+
+	/*
+	 *      initialize bmap control page.
+	 *
+	 * all the data in bmap control page should exclude
+	 * the mkfs hidden dmap page.
+	 */
+
+	/* update mapsize */
+	bmp->db_mapsize = newsize;
+	bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
+
+	/* compute new AG size */
+	l2agsize = dbGetL2AGSize(newsize);
+	oldl2agsize = bmp->db_agl2size;
+
+	bmp->db_agl2size = l2agsize;
+	bmp->db_agsize = 1 << l2agsize;
+
+	/* compute new number of AG */
+	agno = bmp->db_numag;
+	bmp->db_numag = newsize >> l2agsize;
+	bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
+
+	/*
+	 *      reconfigure db_agfree[] 
+	 * from old AG configuration to new AG configuration;
+	 *
+	 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
+	 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
+	 * note: new AG size = old AG size * (2**x).
+	 */
+	if (l2agsize == oldl2agsize)
+		goto extend;
+	k = 1 << (l2agsize - oldl2agsize);
+	ag_rem = bmp->db_agfree[0];	/* save agfree[0] */
+	for (i = 0, n = 0; i < agno; n++) {
+		bmp->db_agfree[n] = 0;	/* init collection point */
+
+		/* coalesce cotiguous k AGs; */
+		for (j = 0; j < k && i < agno; j++, i++) {
+			/* merge AGi to AGn */
+			bmp->db_agfree[n] += bmp->db_agfree[i];
+		}
+	}
+	bmp->db_agfree[0] += ag_rem;	/* restore agfree[0] */
+
+	for (; n < MAXAG; n++)
+		bmp->db_agfree[n] = 0;
+
+	/*
+	 * update highest active ag number
+	 */
+
+	bmp->db_maxag = bmp->db_maxag / k;
+
+	/*
+	 *      extend bmap
+	 *
+	 * update bit maps and corresponding level control pages;
+	 * global control page db_nfree, db_agfree[agno], db_maxfreebud;
+	 */
+      extend:
+	/* get L2 page */
+	p = BMAPBLKNO + nbperpage;	/* L2 page */
+	l2mp = read_metapage(ipbmap, p, PSIZE, 0);
+	if (!l2mp) {
+		jfs_error(ipbmap->i_sb, "dbExtendFS: L2 page could not be read");
+		return -EIO;
+	}
+	l2dcp = (struct dmapctl *) l2mp->data;
+
+	/* compute start L1 */
+	k = blkno >> L2MAXL1SIZE;
+	l2leaf = l2dcp->stree + CTLLEAFIND + k;
+	p = BLKTOL1(blkno, sbi->l2nbperpage);	/* L1 page */
+
+	/*
+	 * extend each L1 in L2
+	 */
+	for (; k < LPERCTL; k++, p += nbperpage) {
+		/* get L1 page */
+		if (j0) {
+			/* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
+			l1mp = read_metapage(ipbmap, p, PSIZE, 0);
+			if (l1mp == NULL)
+				goto errout;
+			l1dcp = (struct dmapctl *) l1mp->data;
+
+			/* compute start L0 */
+			j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
+			l1leaf = l1dcp->stree + CTLLEAFIND + j;
+			p = BLKTOL0(blkno, sbi->l2nbperpage);
+			j0 = FALSE;
+		} else {
+			/* assign/init L1 page */
+			l1mp = get_metapage(ipbmap, p, PSIZE, 0);
+			if (l1mp == NULL)
+				goto errout;
+
+			l1dcp = (struct dmapctl *) l1mp->data;
+
+			/* compute start L0 */
+			j = 0;
+			l1leaf = l1dcp->stree + CTLLEAFIND;
+			p += nbperpage;	/* 1st L0 of L1.k  */
+		}
+
+		/*
+		 * extend each L0 in L1
+		 */
+		for (; j < LPERCTL; j++) {
+			/* get L0 page */
+			if (i0) {
+				/* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
+
+				l0mp = read_metapage(ipbmap, p, PSIZE, 0);
+				if (l0mp == NULL)
+					goto errout;
+				l0dcp = (struct dmapctl *) l0mp->data;
+
+				/* compute start dmap */
+				i = (blkno & (MAXL0SIZE - 1)) >>
+				    L2BPERDMAP;
+				l0leaf = l0dcp->stree + CTLLEAFIND + i;
+				p = BLKTODMAP(blkno,
+					      sbi->l2nbperpage);
+				i0 = FALSE;
+			} else {
+				/* assign/init L0 page */
+				l0mp = get_metapage(ipbmap, p, PSIZE, 0);
+				if (l0mp == NULL)
+					goto errout;
+
+				l0dcp = (struct dmapctl *) l0mp->data;
+
+				/* compute start dmap */
+				i = 0;
+				l0leaf = l0dcp->stree + CTLLEAFIND;
+				p += nbperpage;	/* 1st dmap of L0.j */
+			}
+
+			/*
+			 * extend each dmap in L0
+			 */
+			for (; i < LPERCTL; i++) {
+				/*
+				 * reconstruct the dmap page, and
+				 * initialize corresponding parent L0 leaf
+				 */
+				if ((n = blkno & (BPERDMAP - 1))) {
+					/* read in dmap page: */
+					mp = read_metapage(ipbmap, p,
+							   PSIZE, 0);
+					if (mp == NULL)
+						goto errout;
+					n = min(nblocks, (s64)BPERDMAP - n);
+				} else {
+					/* assign/init dmap page */
+					mp = read_metapage(ipbmap, p,
+							   PSIZE, 0);
+					if (mp == NULL)
+						goto errout;
+
+					n = min(nblocks, (s64)BPERDMAP);
+				}
+
+				dp = (struct dmap *) mp->data;
+				*l0leaf = dbInitDmap(dp, blkno, n);
+
+				bmp->db_nfree += n;
+				agno = le64_to_cpu(dp->start) >> l2agsize;
+				bmp->db_agfree[agno] += n;
+
+				write_metapage(mp);
+
+				l0leaf++;
+				p += nbperpage;
+
+				blkno += n;
+				nblocks -= n;
+				if (nblocks == 0)
+					break;
+			}	/* for each dmap in a L0 */
+
+			/*
+			 * build current L0 page from its leaves, and 
+			 * initialize corresponding parent L1 leaf
+			 */
+			*l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
+			write_metapage(l0mp);
+			l0mp = NULL;
+
+			if (nblocks)
+				l1leaf++;	/* continue for next L0 */
+			else {
+				/* more than 1 L0 ? */
+				if (j > 0)
+					break;	/* build L1 page */
+				else {
+					/* summarize in global bmap page */
+					bmp->db_maxfreebud = *l1leaf;
+					release_metapage(l1mp);
+					release_metapage(l2mp);
+					goto finalize;
+				}
+			}
+		}		/* for each L0 in a L1 */
+
+		/*
+		 * build current L1 page from its leaves, and 
+		 * initialize corresponding parent L2 leaf
+		 */
+		*l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
+		write_metapage(l1mp);
+		l1mp = NULL;
+
+		if (nblocks)
+			l2leaf++;	/* continue for next L1 */
+		else {
+			/* more than 1 L1 ? */
+			if (k > 0)
+				break;	/* build L2 page */
+			else {
+				/* summarize in global bmap page */
+				bmp->db_maxfreebud = *l2leaf;
+				release_metapage(l2mp);
+				goto finalize;
+			}
+		}
+	}			/* for each L1 in a L2 */
+
+	jfs_error(ipbmap->i_sb,
+		  "dbExtendFS: function has not returned as expected");
+errout:
+	if (l0mp)
+		release_metapage(l0mp);
+	if (l1mp)
+		release_metapage(l1mp);
+	release_metapage(l2mp);
+	return -EIO;
+
+	/*
+	 *      finalize bmap control page
+	 */
+finalize:
+
+	return 0;
+}
+
+
+/*
+ *	dbFinalizeBmap()
+ */
+void dbFinalizeBmap(struct inode *ipbmap)
+{
+	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
+	int actags, inactags, l2nl;
+	s64 ag_rem, actfree, inactfree, avgfree;
+	int i, n;
+
+	/*
+	 *      finalize bmap control page
+	 */
+//finalize:
+	/* 
+	 * compute db_agpref: preferred ag to allocate from
+	 * (the leftmost ag with average free space in it);
+	 */
+//agpref:
+	/* get the number of active ags and inacitve ags */
+	actags = bmp->db_maxag + 1;
+	inactags = bmp->db_numag - actags;
+	ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1);	/* ??? */
+
+	/* determine how many blocks are in the inactive allocation
+	 * groups. in doing this, we must account for the fact that
+	 * the rightmost group might be a partial group (i.e. file
+	 * system size is not a multiple of the group size).
+	 */
+	inactfree = (inactags && ag_rem) ?
+	    ((inactags - 1) << bmp->db_agl2size) + ag_rem
+	    : inactags << bmp->db_agl2size;
+
+	/* determine how many free blocks are in the active
+	 * allocation groups plus the average number of free blocks
+	 * within the active ags.
+	 */
+	actfree = bmp->db_nfree - inactfree;
+	avgfree = (u32) actfree / (u32) actags;
+
+	/* if the preferred allocation group has not average free space.
+	 * re-establish the preferred group as the leftmost
+	 * group with average free space.
+	 */
+	if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
+		for (bmp->db_agpref = 0; bmp->db_agpref < actags;
+		     bmp->db_agpref++) {
+			if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
+				break;
+		}
+		if (bmp->db_agpref >= bmp->db_numag) {
+			jfs_error(ipbmap->i_sb,
+				  "cannot find ag with average freespace");
+		}
+	}
+
+	/*
+	 * compute db_aglevel, db_agheigth, db_width, db_agstart:
+	 * an ag is covered in aglevel dmapctl summary tree, 
+	 * at agheight level height (from leaf) with agwidth number of nodes 
+	 * each, which starts at agstart index node of the smmary tree node 
+	 * array;
+	 */
+	bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
+	l2nl =
+	    bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
+	bmp->db_agheigth = l2nl >> 1;
+	bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1));
+	for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0;
+	     i--) {
+		bmp->db_agstart += n;
+		n <<= 2;
+	}
+
+}
+
+
+/*
+ * NAME:	dbInitDmap()/ujfs_idmap_page()
+ *                                                                    
+ * FUNCTION:	initialize working/persistent bitmap of the dmap page
+ *		for the specified number of blocks:
+ *                                                                    
+ *		at entry, the bitmaps had been initialized as free (ZEROS);
+ *		The number of blocks will only account for the actually 
+ *		existing blocks. Blocks which don't actually exist in 
+ *		the aggregate will be marked as allocated (ONES);
+ *
+ * PARAMETERS:
+ *	dp	- pointer to page of map
+ *	nblocks	- number of blocks this page
+ *
+ * RETURNS: NONE
+ */
+static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
+{
+	int blkno, w, b, r, nw, nb, i;
+
+	/* starting block number within the dmap */
+	blkno = Blkno & (BPERDMAP - 1);
+
+	if (blkno == 0) {
+		dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
+		dp->start = cpu_to_le64(Blkno);
+
+		if (nblocks == BPERDMAP) {
+			memset(&dp->wmap[0], 0, LPERDMAP * 4);
+			memset(&dp->pmap[0], 0, LPERDMAP * 4);
+			goto initTree;
+		}
+	} else {
+		dp->nblocks =
+		    cpu_to_le32(le32_to_cpu(dp->nblocks) + nblocks);
+		dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
+	}
+
+	/* word number containing start block number */
+	w = blkno >> L2DBWORD;
+
+	/*
+	 * free the bits corresponding to the block range (ZEROS):
+	 * note: not all bits of the first and last words may be contained 
+	 * within the block range.
+	 */
+	for (r = nblocks; r > 0; r -= nb, blkno += nb) {
+		/* number of bits preceding range to be freed in the word */
+		b = blkno & (DBWORD - 1);
+		/* number of bits to free in the word */
+		nb = min(r, DBWORD - b);
+
+		/* is partial word to be freed ? */
+		if (nb < DBWORD) {
+			/* free (set to 0) from the bitmap word */
+			dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
+						     >> b));
+			dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
+						     >> b));
+
+			/* skip the word freed */
+			w++;
+		} else {
+			/* free (set to 0) contiguous bitmap words */
+			nw = r >> L2DBWORD;
+			memset(&dp->wmap[w], 0, nw * 4);
+			memset(&dp->pmap[w], 0, nw * 4);
+
+			/* skip the words freed */
+			nb = nw << L2DBWORD;
+			w += nw;
+		}
+	}
+
+	/*
+	 * mark bits following the range to be freed (non-existing 
+	 * blocks) as allocated (ONES)
+	 */
+
+	if (blkno == BPERDMAP)
+		goto initTree;
+
+	/* the first word beyond the end of existing blocks */
+	w = blkno >> L2DBWORD;
+
+	/* does nblocks fall on a 32-bit boundary ? */
+	b = blkno & (DBWORD - 1);
+	if (b) {
+		/* mark a partial word allocated */
+		dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
+		w++;
+	}
+
+	/* set the rest of the words in the page to allocated (ONES) */
+	for (i = w; i < LPERDMAP; i++)
+		dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
+
+	/*
+	 * init tree
+	 */
+      initTree:
+	return (dbInitDmapTree(dp));
+}
+
+
+/*
+ * NAME:	dbInitDmapTree()/ujfs_complete_dmap()
+ *                                                                    
+ * FUNCTION:	initialize summary tree of the specified dmap:
+ *
+ *		at entry, bitmap of the dmap has been initialized;
+ *                                                                    
+ * PARAMETERS:
+ *	dp	- dmap to complete
+ *	blkno	- starting block number for this dmap
+ *	treemax	- will be filled in with max free for this dmap
+ *
+ * RETURNS:	max free string at the root of the tree
+ */
+static int dbInitDmapTree(struct dmap * dp)
+{
+	struct dmaptree *tp;
+	s8 *cp;
+	int i;
+
+	/* init fixed info of tree */
+	tp = &dp->tree;
+	tp->nleafs = cpu_to_le32(LPERDMAP);
+	tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
+	tp->leafidx = cpu_to_le32(LEAFIND);
+	tp->height = cpu_to_le32(4);
+	tp->budmin = BUDMIN;
+
+	/* init each leaf from corresponding wmap word:
+	 * note: leaf is set to NOFREE(-1) if all blocks of corresponding
+	 * bitmap word are allocated. 
+	 */
+	cp = tp->stree + le32_to_cpu(tp->leafidx);
+	for (i = 0; i < LPERDMAP; i++)
+		*cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
+
+	/* build the dmap's binary buddy summary tree */
+	return (dbInitTree(tp));
+}
+
+
+/*
+ * NAME:	dbInitTree()/ujfs_adjtree()
+ *                                                                    
+ * FUNCTION:	initialize binary buddy summary tree of a dmap or dmapctl.
+ *
+ *		at entry, the leaves of the tree has been initialized 
+ *		from corresponding bitmap word or root of summary tree
+ *		of the child control page;
+ *		configure binary buddy system at the leaf level, then
+ *		bubble up the values of the leaf nodes up the tree.
+ *
+ * PARAMETERS:
+ *	cp	- Pointer to the root of the tree
+ *	l2leaves- Number of leaf nodes as a power of 2
+ *	l2min	- Number of blocks that can be covered by a leaf
+ *		  as a power of 2
+ *
+ * RETURNS: max free string at the root of the tree
+ */
+static int dbInitTree(struct dmaptree * dtp)
+{
+	int l2max, l2free, bsize, nextb, i;
+	int child, parent, nparent;
+	s8 *tp, *cp, *cp1;
+
+	tp = dtp->stree;
+
+	/* Determine the maximum free string possible for the leaves */
+	l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
+
+	/*
+	 * configure the leaf levevl into binary buddy system
+	 *
+	 * Try to combine buddies starting with a buddy size of 1 
+	 * (i.e. two leaves). At a buddy size of 1 two buddy leaves 
+	 * can be combined if both buddies have a maximum free of l2min; 
+	 * the combination will result in the left-most buddy leaf having 
+	 * a maximum free of l2min+1.  
+	 * After processing all buddies for a given size, process buddies 
+	 * at the next higher buddy size (i.e. current size * 2) and 
+	 * the next maximum free (current free + 1).  
+	 * This continues until the maximum possible buddy combination 
+	 * yields maximum free.
+	 */
+	for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
+	     l2free++, bsize = nextb) {
+		/* get next buddy size == current buddy pair size */
+		nextb = bsize << 1;
+
+		/* scan each adjacent buddy pair at current buddy size */
+		for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
+		     i < le32_to_cpu(dtp->nleafs);
+		     i += nextb, cp += nextb) {
+			/* coalesce if both adjacent buddies are max free */
+			if (*cp == l2free && *(cp + bsize) == l2free) {
+				*cp = l2free + 1;	/* left take right */
+				*(cp + bsize) = -1;	/* right give left */
+			}
+		}
+	}
+
+	/*
+	 * bubble summary information of leaves up the tree.
+	 *
+	 * Starting at the leaf node level, the four nodes described by
+	 * the higher level parent node are compared for a maximum free and 
+	 * this maximum becomes the value of the parent node.  
+	 * when all lower level nodes are processed in this fashion then 
+	 * move up to the next level (parent becomes a lower level node) and 
+	 * continue the process for that level.
+	 */
+	for (child = le32_to_cpu(dtp->leafidx),
+	     nparent = le32_to_cpu(dtp->nleafs) >> 2;
+	     nparent > 0; nparent >>= 2, child = parent) {
+		/* get index of 1st node of parent level */
+		parent = (child - 1) >> 2;
+
+		/* set the value of the parent node as the maximum 
+		 * of the four nodes of the current level.
+		 */
+		for (i = 0, cp = tp + child, cp1 = tp + parent;
+		     i < nparent; i++, cp += 4, cp1++)
+			*cp1 = TREEMAX(cp);
+	}
+
+	return (*tp);
+}
+
+
+/*
+ *	dbInitDmapCtl()
+ *
+ * function: initialize dmapctl page
+ */
+static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
+{				/* start leaf index not covered by range */
+	s8 *cp;
+
+	dcp->nleafs = cpu_to_le32(LPERCTL);
+	dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
+	dcp->leafidx = cpu_to_le32(CTLLEAFIND);
+	dcp->height = cpu_to_le32(5);
+	dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
+
+	/*
+	 * initialize the leaves of current level that were not covered 
+	 * by the specified input block range (i.e. the leaves have no 
+	 * low level dmapctl or dmap).
+	 */
+	cp = &dcp->stree[CTLLEAFIND + i];
+	for (; i < LPERCTL; i++)
+		*cp++ = NOFREE;
+
+	/* build the dmap's binary buddy summary tree */
+	return (dbInitTree((struct dmaptree *) dcp));
+}
+
+
+/*
+ * NAME:	dbGetL2AGSize()/ujfs_getagl2size()
+ *                                                                    
+ * FUNCTION:	Determine log2(allocation group size) from aggregate size
+ *                                                                    
+ * PARAMETERS:
+ *	nblocks	- Number of blocks in aggregate
+ *
+ * RETURNS: log2(allocation group size) in aggregate blocks
+ */
+static int dbGetL2AGSize(s64 nblocks)
+{
+	s64 sz;
+	s64 m;
+	int l2sz;
+
+	if (nblocks < BPERDMAP * MAXAG)
+		return (L2BPERDMAP);
+
+	/* round up aggregate size to power of 2 */
+	m = ((u64) 1 << (64 - 1));
+	for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
+		if (m & nblocks)
+			break;
+	}
+
+	sz = (s64) 1 << l2sz;
+	if (sz < nblocks)
+		l2sz += 1;
+
+	/* agsize = roundupSize/max_number_of_ag */
+	return (l2sz - L2MAXAG);
+}
+
+
+/*
+ * NAME:	dbMapFileSizeToMapSize()
+ *                                                                    
+ * FUNCTION:	compute number of blocks the block allocation map file 
+ *		can cover from the map file size;
+ *
+ * RETURNS:	Number of blocks which can be covered by this block map file;
+ */
+
+/*
+ * maximum number of map pages at each level including control pages
+ */
+#define MAXL0PAGES	(1 + LPERCTL)
+#define MAXL1PAGES	(1 + LPERCTL * MAXL0PAGES)
+#define MAXL2PAGES	(1 + LPERCTL * MAXL1PAGES)
+
+/*
+ * convert number of map pages to the zero origin top dmapctl level
+ */
+#define BMAPPGTOLEV(npages)	\
+	(((npages) <= 3 + MAXL0PAGES) ? 0 \
+       : ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
+
+s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
+{
+	struct super_block *sb = ipbmap->i_sb;
+	s64 nblocks;
+	s64 npages, ndmaps;
+	int level, i;
+	int complete, factor;
+
+	nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
+	npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
+	level = BMAPPGTOLEV(npages);
+
+	/* At each level, accumulate the number of dmap pages covered by 
+	 * the number of full child levels below it;
+	 * repeat for the last incomplete child level.
+	 */
+	ndmaps = 0;
+	npages--;		/* skip the first global control page */
+	/* skip higher level control pages above top level covered by map */
+	npages -= (2 - level);
+	npages--;		/* skip top level's control page */
+	for (i = level; i >= 0; i--) {
+		factor =
+		    (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
+		complete = (u32) npages / factor;
+		ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL
+				      : ((i == 1) ? LPERCTL : 1));
+
+		/* pages in last/incomplete child */
+		npages = (u32) npages % factor;
+		/* skip incomplete child's level control page */
+		npages--;
+	}
+
+	/* convert the number of dmaps into the number of blocks 
+	 * which can be covered by the dmaps;
+	 */
+	nblocks = ndmaps << L2BPERDMAP;
+
+	return (nblocks);
+}
+
+
+#ifdef	_JFS_DEBUG_DMAP
+/*
+ *	DBinitmap()
+ */
+static void DBinitmap(s64 size, struct inode *ipbmap, u32 ** results)
+{
+	int npages;
+	u32 *dbmap, *d;
+	int n;
+	s64 lblkno, cur_block;
+	struct dmap *dp;
+	struct metapage *mp;
+
+	npages = size / 32768;
+	npages += (size % 32768) ? 1 : 0;
+
+	dbmap = (u32 *) xmalloc(npages * 4096, L2PSIZE, kernel_heap);
+	if (dbmap == NULL)
+		BUG();	/* Not robust since this is only unused debug code */
+
+	for (n = 0, d = dbmap; n < npages; n++, d += 1024)
+		bzero(d, 4096);
+
+	/* Need to initialize from disk map pages
+	 */
+	for (d = dbmap, cur_block = 0; cur_block < size;
+	     cur_block += BPERDMAP, d += LPERDMAP) {
+		lblkno = BLKTODMAP(cur_block,
+				   JFS_SBI(ipbmap->i_sb)->bmap->
+				   db_l2nbperpage);
+		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
+		if (mp == NULL) {
+			jfs_error(ipbmap->i_sb,
+				  "DBinitmap: could not read disk map page");
+			continue;
+		}
+		dp = (struct dmap *) mp->data;
+
+		for (n = 0; n < LPERDMAP; n++)
+			d[n] = le32_to_cpu(dp->wmap[n]);
+
+		release_metapage(mp);
+	}
+
+	*results = dbmap;
+}
+
+
+/*
+ *	DBAlloc()
+ */
+void DBAlloc(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
+{
+	int word, nb, bitno;
+	u32 mask;
+
+	assert(blkno > 0 && blkno < mapsize);
+	assert(nblocks > 0 && nblocks <= mapsize);
+
+	assert(blkno + nblocks <= mapsize);
+
+	dbmap += (blkno / 32);
+	while (nblocks > 0) {
+		bitno = blkno & (32 - 1);
+		nb = min(nblocks, 32 - bitno);
+
+		mask = (0xffffffff << (32 - nb) >> bitno);
+		assert((mask & *dbmap) == 0);
+		*dbmap |= mask;
+
+		dbmap++;
+		blkno += nb;
+		nblocks -= nb;
+	}
+}
+
+
+/*
+ *	DBFree()
+ */
+static void DBFree(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
+{
+	int word, nb, bitno;
+	u32 mask;
+
+	assert(blkno > 0 && blkno < mapsize);
+	assert(nblocks > 0 && nblocks <= mapsize);
+
+	assert(blkno + nblocks <= mapsize);
+
+	dbmap += (blkno / 32);
+	while (nblocks > 0) {
+		bitno = blkno & (32 - 1);
+		nb = min(nblocks, 32 - bitno);
+
+		mask = (0xffffffff << (32 - nb) >> bitno);
+		assert((mask & *dbmap) == mask);
+		*dbmap &= ~mask;
+
+		dbmap++;
+		blkno += nb;
+		nblocks -= nb;
+	}
+}
+
+
+/*
+ *	DBAllocCK()
+ */
+static void DBAllocCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
+{
+	int word, nb, bitno;
+	u32 mask;
+
+	assert(blkno > 0 && blkno < mapsize);
+	assert(nblocks > 0 && nblocks <= mapsize);
+
+	assert(blkno + nblocks <= mapsize);
+
+	dbmap += (blkno / 32);
+	while (nblocks > 0) {
+		bitno = blkno & (32 - 1);
+		nb = min(nblocks, 32 - bitno);
+
+		mask = (0xffffffff << (32 - nb) >> bitno);
+		assert((mask & *dbmap) == mask);
+
+		dbmap++;
+		blkno += nb;
+		nblocks -= nb;
+	}
+}
+
+
+/*
+ *	DBFreeCK()
+ */
+static void DBFreeCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
+{
+	int word, nb, bitno;
+	u32 mask;
+
+	assert(blkno > 0 && blkno < mapsize);
+	assert(nblocks > 0 && nblocks <= mapsize);
+
+	assert(blkno + nblocks <= mapsize);
+
+	dbmap += (blkno / 32);
+	while (nblocks > 0) {
+		bitno = blkno & (32 - 1);
+		nb = min(nblocks, 32 - bitno);
+
+		mask = (0xffffffff << (32 - nb) >> bitno);
+		assert((mask & *dbmap) == 0);
+
+		dbmap++;
+		blkno += nb;
+		nblocks -= nb;
+	}
+}
+
+
+/*
+ *	dbPrtMap()
+ */
+static void dbPrtMap(struct bmap * bmp)
+{
+	printk("   mapsize:   %d%d\n", bmp->db_mapsize);
+	printk("   nfree:     %d%d\n", bmp->db_nfree);
+	printk("   numag:     %d\n", bmp->db_numag);
+	printk("   agsize:    %d%d\n", bmp->db_agsize);
+	printk("   agl2size:  %d\n", bmp->db_agl2size);
+	printk("   agwidth:   %d\n", bmp->db_agwidth);
+	printk("   agstart:   %d\n", bmp->db_agstart);
+	printk("   agheigth:  %d\n", bmp->db_agheigth);
+	printk("   aglevel:   %d\n", bmp->db_aglevel);
+	printk("   maxlevel:  %d\n", bmp->db_maxlevel);
+	printk("   maxag:     %d\n", bmp->db_maxag);
+	printk("   agpref:    %d\n", bmp->db_agpref);
+	printk("   l2nbppg:   %d\n", bmp->db_l2nbperpage);
+}
+
+
+/*
+ *	dbPrtCtl()
+ */
+static void dbPrtCtl(struct dmapctl * dcp)
+{
+	int i, j, n;
+
+	printk("   height:    %08x\n", le32_to_cpu(dcp->height));
+	printk("   leafidx:   %08x\n", le32_to_cpu(dcp->leafidx));
+	printk("   budmin:    %08x\n", dcp->budmin);
+	printk("   nleafs:    %08x\n", le32_to_cpu(dcp->nleafs));
+	printk("   l2nleafs:  %08x\n", le32_to_cpu(dcp->l2nleafs));
+
+	printk("\n Tree:\n");
+	for (i = 0; i < CTLLEAFIND; i += 8) {
+		n = min(8, CTLLEAFIND - i);
+
+		for (j = 0; j < n; j++)
+			printf("  [%03x]: %02x", i + j,
+			       (char) dcp->stree[i + j]);
+		printf("\n");
+	}
+
+	printk("\n Tree Leaves:\n");
+	for (i = 0; i < LPERCTL; i += 8) {
+		n = min(8, LPERCTL - i);
+
+		for (j = 0; j < n; j++)
+			printf("  [%03x]: %02x",
+			       i + j,
+			       (char) dcp->stree[i + j + CTLLEAFIND]);
+		printf("\n");
+	}
+}
+#endif				/* _JFS_DEBUG_DMAP */
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
new file mode 100644
index 0000000..32e2588
--- /dev/null
+++ b/fs/jfs/jfs_dmap.h
@@ -0,0 +1,314 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_DMAP
+#define _H_JFS_DMAP
+
+#include "jfs_txnmgr.h"
+
+#define BMAPVERSION	1	/* version number */
+#define	TREESIZE	(256+64+16+4+1)	/* size of a dmap tree */
+#define	LEAFIND		(64+16+4+1)	/* index of 1st leaf of a dmap tree */
+#define LPERDMAP	256	/* num leaves per dmap tree */
+#define L2LPERDMAP	8	/* l2 number of leaves per dmap tree */
+#define	DBWORD		32	/* # of blks covered by a map word */
+#define	L2DBWORD	5	/* l2 # of blks covered by a mword */
+#define BUDMIN  	L2DBWORD	/* max free string in a map word */
+#define BPERDMAP	(LPERDMAP * DBWORD)	/* num of blks per dmap */
+#define L2BPERDMAP	13	/* l2 num of blks per dmap */
+#define CTLTREESIZE	(1024+256+64+16+4+1)	/* size of a dmapctl tree */
+#define CTLLEAFIND	(256+64+16+4+1)	/* idx of 1st leaf of a dmapctl tree */
+#define LPERCTL		1024	/* num of leaves per dmapctl tree */
+#define L2LPERCTL	10	/* l2 num of leaves per dmapctl tree */
+#define	ROOT		0	/* index of the root of a tree */
+#define	NOFREE		((s8) -1)	/* no blocks free */
+#define	MAXAG		128	/* max number of allocation groups */
+#define L2MAXAG		7	/* l2 max num of AG */
+#define L2MINAGSZ	25	/* l2 of minimum AG size in bytes */
+#define	BMAPBLKNO	0	/* lblkno of bmap within the map */
+
+/*
+ * maximum l2 number of disk blocks at the various dmapctl levels.
+ */
+#define	L2MAXL0SIZE	(L2BPERDMAP + 1 * L2LPERCTL)
+#define	L2MAXL1SIZE	(L2BPERDMAP + 2 * L2LPERCTL)
+#define	L2MAXL2SIZE	(L2BPERDMAP + 3 * L2LPERCTL)
+
+/*
+ * maximum number of disk blocks at the various dmapctl levels.
+ */
+#define	MAXL0SIZE	((s64)1 << L2MAXL0SIZE)
+#define	MAXL1SIZE	((s64)1 << L2MAXL1SIZE)
+#define	MAXL2SIZE	((s64)1 << L2MAXL2SIZE)
+
+#define	MAXMAPSIZE	MAXL2SIZE	/* maximum aggregate map size */
+
+/* 
+ * determine the maximum free string for four (lower level) nodes
+ * of the tree.
+ */
+static __inline signed char TREEMAX(signed char *cp)
+{
+	signed char tmp1, tmp2;
+
+	tmp1 = max(*(cp+2), *(cp+3));
+	tmp2 = max(*(cp), *(cp+1));
+
+	return max(tmp1, tmp2);
+}
+
+/*
+ * convert disk block number to the logical block number of the dmap
+ * describing the disk block.  s is the log2(number of logical blocks per page)
+ *
+ * The calculation figures out how many logical pages are in front of the dmap.
+ *	- the number of dmaps preceding it
+ *	- the number of L0 pages preceding its L0 page
+ *	- the number of L1 pages preceding its L1 page
+ *	- 3 is added to account for the L2, L1, and L0 page for this dmap
+ *	- 1 is added to account for the control page of the map.
+ */
+#define BLKTODMAP(b,s)    \
+        ((((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) << (s))
+
+/*
+ * convert disk block number to the logical block number of the LEVEL 0
+ * dmapctl describing the disk block.  s is the log2(number of logical blocks
+ * per page)
+ *
+ * The calculation figures out how many logical pages are in front of the L0.
+ *	- the number of dmap pages preceding it
+ *	- the number of L0 pages preceding it
+ *	- the number of L1 pages preceding its L1 page
+ *	- 2 is added to account for the L2, and L1 page for this L0
+ *	- 1 is added to account for the control page of the map.
+ */
+#define BLKTOL0(b,s)      \
+        (((((b) >> 23) << 10) + ((b) >> 23) + ((b) >> 33) + 2 + 1) << (s))
+
+/*
+ * convert disk block number to the logical block number of the LEVEL 1
+ * dmapctl describing the disk block.  s is the log2(number of logical blocks
+ * per page)
+ *
+ * The calculation figures out how many logical pages are in front of the L1.
+ *	- the number of dmap pages preceding it
+ *	- the number of L0 pages preceding it
+ *	- the number of L1 pages preceding it
+ *	- 1 is added to account for the L2 page
+ *	- 1 is added to account for the control page of the map.
+ */
+#define BLKTOL1(b,s)      \
+     (((((b) >> 33) << 20) + (((b) >> 33) << 10) + ((b) >> 33) + 1 + 1) << (s))
+
+/*
+ * convert disk block number to the logical block number of the dmapctl
+ * at the specified level which describes the disk block.
+ */
+#define BLKTOCTL(b,s,l)   \
+        (((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s)))
+
+/* 
+ * convert aggregate map size to the zero origin dmapctl level of the
+ * top dmapctl.
+ */
+#define	BMAPSZTOLEV(size)	\
+	(((size) <= MAXL0SIZE) ? 0 : ((size) <= MAXL1SIZE) ? 1 : 2)
+
+/* convert disk block number to allocation group number.
+ */
+#define BLKTOAG(b,sbi)	((b) >> ((sbi)->bmap->db_agl2size))
+
+/* convert allocation group number to starting disk block
+ * number.
+ */
+#define AGTOBLK(a,ip)	\
+	((s64)(a) << (JFS_SBI((ip)->i_sb)->bmap->db_agl2size))
+
+/*
+ *	dmap summary tree
+ *
+ * dmaptree must be consistent with dmapctl.
+ */
+struct dmaptree {
+	__le32 nleafs;		/* 4: number of tree leafs      */
+	__le32 l2nleafs;	/* 4: l2 number of tree leafs   */
+	__le32 leafidx;		/* 4: index of first tree leaf  */
+	__le32 height;		/* 4: height of the tree        */
+	s8 budmin;		/* 1: min l2 tree leaf value to combine */
+	s8 stree[TREESIZE];	/* TREESIZE: tree               */
+	u8 pad[2];		/* 2: pad to word boundary      */
+};				/* - 360 -                      */
+
+/*
+ *	dmap page per 8K blocks bitmap
+ */
+struct dmap {
+	__le32 nblocks;		/* 4: num blks covered by this dmap     */
+	__le32 nfree;		/* 4: num of free blks in this dmap     */
+	__le64 start;		/* 8: starting blkno for this dmap      */
+	struct dmaptree tree;	/* 360: dmap tree                       */
+	u8 pad[1672];		/* 1672: pad to 2048 bytes              */
+	__le32 wmap[LPERDMAP];	/* 1024: bits of the working map        */
+	__le32 pmap[LPERDMAP];	/* 1024: bits of the persistent map     */
+};				/* - 4096 -                             */
+
+/*
+ *	disk map control page per level.
+ *
+ * dmapctl must be consistent with dmaptree.
+ */
+struct dmapctl {
+	__le32 nleafs;		/* 4: number of tree leafs      */
+	__le32 l2nleafs;	/* 4: l2 number of tree leafs   */
+	__le32 leafidx;		/* 4: index of the first tree leaf      */
+	__le32 height;		/* 4: height of tree            */
+	s8 budmin;		/* 1: minimum l2 tree leaf value        */
+	s8 stree[CTLTREESIZE];	/* CTLTREESIZE: dmapctl tree    */
+	u8 pad[2714];		/* 2714: pad to 4096            */
+};				/* - 4096 -                     */
+
+/*
+ *	common definition for dmaptree within dmap and dmapctl
+ */
+typedef union dmtree {
+	struct dmaptree t1;
+	struct dmapctl t2;
+} dmtree_t;
+
+/* macros for accessing fields within dmtree */
+#define	dmt_nleafs	t1.nleafs
+#define	dmt_l2nleafs 	t1.l2nleafs
+#define	dmt_leafidx 	t1.leafidx
+#define	dmt_height 	t1.height
+#define	dmt_budmin 	t1.budmin
+#define	dmt_stree 	t1.stree
+
+/* 
+ *	on-disk aggregate disk allocation map descriptor.
+ */
+struct dbmap_disk {
+	__le64 dn_mapsize;	/* 8: number of blocks in aggregate     */
+	__le64 dn_nfree;	/* 8: num free blks in aggregate map    */
+	__le32 dn_l2nbperpage;	/* 4: number of blks per page           */
+	__le32 dn_numag;	/* 4: total number of ags               */
+	__le32 dn_maxlevel;	/* 4: number of active ags              */
+	__le32 dn_maxag;	/* 4: max active alloc group number     */
+	__le32 dn_agpref;	/* 4: preferred alloc group (hint)      */
+	__le32 dn_aglevel;	/* 4: dmapctl level holding the AG      */
+	__le32 dn_agheigth;	/* 4: height in dmapctl of the AG       */
+	__le32 dn_agwidth;	/* 4: width in dmapctl of the AG        */
+	__le32 dn_agstart;	/* 4: start tree index at AG height     */
+	__le32 dn_agl2size;	/* 4: l2 num of blks per alloc group    */
+	__le64 dn_agfree[MAXAG];/* 8*MAXAG: per AG free count           */
+	__le64 dn_agsize;	/* 8: num of blks per alloc group       */
+	s8 dn_maxfreebud;	/* 1: max free buddy system             */
+	u8 pad[3007];		/* 3007: pad to 4096                    */
+};				/* - 4096 -                             */
+
+struct dbmap {
+	s64 dn_mapsize;		/* number of blocks in aggregate     */
+	s64 dn_nfree;		/* num free blks in aggregate map    */
+	int dn_l2nbperpage;	/* number of blks per page           */
+	int dn_numag;		/* total number of ags               */
+	int dn_maxlevel;	/* number of active ags              */
+	int dn_maxag;		/* max active alloc group number     */
+	int dn_agpref;		/* preferred alloc group (hint)      */
+	int dn_aglevel;		/* dmapctl level holding the AG      */
+	int dn_agheigth;	/* height in dmapctl of the AG       */
+	int dn_agwidth;		/* width in dmapctl of the AG        */
+	int dn_agstart;		/* start tree index at AG height     */
+	int dn_agl2size;	/* l2 num of blks per alloc group    */
+	s64 dn_agfree[MAXAG];	/* per AG free count           */
+	s64 dn_agsize;		/* num of blks per alloc group       */
+	signed char dn_maxfreebud;	/* max free buddy system             */
+};				/* - 4096 -                             */
+/* 
+ *	in-memory aggregate disk allocation map descriptor.
+ */
+struct bmap {
+	struct dbmap db_bmap;		/* on-disk aggregate map descriptor */
+	struct inode *db_ipbmap;	/* ptr to aggregate map incore inode */
+	struct semaphore db_bmaplock;	/* aggregate map lock */
+	atomic_t db_active[MAXAG];	/* count of active, open files in AG */
+	u32 *db_DBmap;
+};
+
+/* macros for accessing fields within in-memory aggregate map descriptor */
+#define	db_mapsize	db_bmap.dn_mapsize
+#define	db_nfree	db_bmap.dn_nfree
+#define	db_agfree	db_bmap.dn_agfree
+#define	db_agsize	db_bmap.dn_agsize
+#define	db_agl2size	db_bmap.dn_agl2size
+#define	db_agwidth	db_bmap.dn_agwidth
+#define	db_agheigth	db_bmap.dn_agheigth
+#define	db_agstart	db_bmap.dn_agstart
+#define	db_numag	db_bmap.dn_numag
+#define	db_maxlevel	db_bmap.dn_maxlevel
+#define	db_aglevel	db_bmap.dn_aglevel
+#define	db_agpref	db_bmap.dn_agpref
+#define	db_maxag	db_bmap.dn_maxag
+#define	db_maxfreebud	db_bmap.dn_maxfreebud
+#define	db_l2nbperpage	db_bmap.dn_l2nbperpage
+
+/*
+ * macros for various conversions needed by the allocators.
+ * blkstol2(), cntlz(), and cnttz() are operating system dependent functions.
+ */
+/* convert number of blocks to log2 number of blocks, rounding up to
+ * the next log2 value if blocks is not a l2 multiple.
+ */
+#define	BLKSTOL2(d)		(blkstol2(d))
+
+/* convert number of leafs to log2 leaf value */
+#define	NLSTOL2BSZ(n)		(31 - cntlz((n)) + BUDMIN)
+
+/* convert leaf index to log2 leaf value */
+#define	LITOL2BSZ(n,m,b)	((((n) == 0) ? (m) : cnttz((n))) + (b))
+
+/* convert a block number to a dmap control leaf index */
+#define BLKTOCTLLEAF(b,m)	\
+	(((b) & (((s64)1 << ((m) + L2LPERCTL)) - 1)) >> (m))
+
+/* convert log2 leaf value to buddy size */
+#define	BUDSIZE(s,m)		(1 << ((s) - (m)))
+
+/*
+ *	external references.
+ */
+extern int dbMount(struct inode *ipbmap);
+
+extern int dbUnmount(struct inode *ipbmap, int mounterror);
+
+extern int dbFree(struct inode *ipbmap, s64 blkno, s64 nblocks);
+
+extern int dbUpdatePMap(struct inode *ipbmap,
+			int free, s64 blkno, s64 nblocks, struct tblock * tblk);
+
+extern int dbNextAG(struct inode *ipbmap);
+
+extern int dbAlloc(struct inode *ipbmap, s64 hint, s64 nblocks, s64 * results);
+
+extern int dbReAlloc(struct inode *ipbmap,
+		     s64 blkno, s64 nblocks, s64 addnblocks, s64 * results);
+
+extern int dbSync(struct inode *ipbmap);
+extern int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks);
+extern int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks);
+extern void dbFinalizeBmap(struct inode *ipbmap);
+extern s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
+#endif				/* _H_JFS_DMAP */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
new file mode 100644
index 0000000..e357890
--- /dev/null
+++ b/fs/jfs/jfs_dtree.c
@@ -0,0 +1,4752 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ *	jfs_dtree.c: directory B+-tree manager
+ *
+ * B+-tree with variable length key directory:
+ *
+ * each directory page is structured as an array of 32-byte
+ * directory entry slots initialized as a freelist
+ * to avoid search/compaction of free space at insertion.
+ * when an entry is inserted, a number of slots are allocated
+ * from the freelist as required to store variable length data
+ * of the entry; when the entry is deleted, slots of the entry
+ * are returned to freelist.
+ *
+ * leaf entry stores full name as key and file serial number
+ * (aka inode number) as data.
+ * internal/router entry stores sufffix compressed name
+ * as key and simple extent descriptor as data.
+ *
+ * each directory page maintains a sorted entry index table
+ * which stores the start slot index of sorted entries
+ * to allow binary search on the table.
+ *
+ * directory starts as a root/leaf page in on-disk inode
+ * inline data area.
+ * when it becomes full, it starts a leaf of a external extent
+ * of length of 1 block. each time the first leaf becomes full,
+ * it is extended rather than split (its size is doubled),
+ * until its length becoms 4 KBytes, from then the extent is split
+ * with new 4 Kbyte extent when it becomes full
+ * to reduce external fragmentation of small directories.
+ *
+ * blah, blah, blah, for linear scan of directory in pieces by
+ * readdir().
+ *
+ *
+ *	case-insensitive directory file system
+ *
+ * names are stored in case-sensitive way in leaf entry.
+ * but stored, searched and compared in case-insensitive (uppercase) order
+ * (i.e., both search key and entry key are folded for search/compare):
+ * (note that case-sensitive order is BROKEN in storage, e.g.,
+ *  sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad
+ *
+ *  entries which folds to the same key makes up a equivalent class
+ *  whose members are stored as contiguous cluster (may cross page boundary)
+ *  but whose order is arbitrary and acts as duplicate, e.g.,
+ *  abc, Abc, aBc, abC)
+ *
+ * once match is found at leaf, requires scan forward/backward
+ * either for, in case-insensitive search, duplicate
+ * or for, in case-sensitive search, for exact match
+ *
+ * router entry must be created/stored in case-insensitive way
+ * in internal entry:
+ * (right most key of left page and left most key of right page
+ * are folded, and its suffix compression is propagated as router
+ * key in parent)
+ * (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB>
+ * should be made the router key for the split)
+ *
+ * case-insensitive search:
+ *
+ * 	fold search key;
+ *
+ *	case-insensitive search of B-tree:
+ *	for internal entry, router key is already folded;
+ *	for leaf entry, fold the entry key before comparison.
+ *
+ *	if (leaf entry case-insensitive match found)
+ *		if (next entry satisfies case-insensitive match)
+ *			return EDUPLICATE;
+ *		if (prev entry satisfies case-insensitive match)
+ *			return EDUPLICATE;
+ *		return match;
+ *	else
+ *		return no match;
+ *
+ * 	serialization:
+ * target directory inode lock is being held on entry/exit
+ * of all main directory service routines.
+ *
+ *	log based recovery:
+ */
+
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_dmap.h"
+#include "jfs_unicode.h"
+#include "jfs_debug.h"
+
+/* dtree split parameter */
+struct dtsplit {
+	struct metapage *mp;
+	s16 index;
+	s16 nslot;
+	struct component_name *key;
+	ddata_t *data;
+	struct pxdlist *pxdlist;
+};
+
+#define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
+
+/* get page buffer for specified block address */
+#define DT_GETPAGE(IP, BN, MP, SIZE, P, RC)\
+{\
+	BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot)\
+	if (!(RC))\
+	{\
+		if (((P)->header.nextindex > (((BN)==0)?DTROOTMAXSLOT:(P)->header.maxslot)) ||\
+		    ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT)))\
+		{\
+			BT_PUTPAGE(MP);\
+			jfs_error((IP)->i_sb, "DT_GETPAGE: dtree page corrupt");\
+			MP = NULL;\
+			RC = -EIO;\
+		}\
+	}\
+}
+
+/* for consistency */
+#define DT_PUTPAGE(MP) BT_PUTPAGE(MP)
+
+#define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
+	BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot)
+
+/*
+ * forward references
+ */
+static int dtSplitUp(tid_t tid, struct inode *ip,
+		     struct dtsplit * split, struct btstack * btstack);
+
+static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
+		       struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
+
+static int dtExtendPage(tid_t tid, struct inode *ip,
+			struct dtsplit * split, struct btstack * btstack);
+
+static int dtSplitRoot(tid_t tid, struct inode *ip,
+		       struct dtsplit * split, struct metapage ** rmpp);
+
+static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
+		      dtpage_t * fp, struct btstack * btstack);
+
+static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
+
+static int dtReadFirst(struct inode *ip, struct btstack * btstack);
+
+static int dtReadNext(struct inode *ip,
+		      loff_t * offset, struct btstack * btstack);
+
+static int dtCompare(struct component_name * key, dtpage_t * p, int si);
+
+static int ciCompare(struct component_name * key, dtpage_t * p, int si,
+		     int flag);
+
+static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
+		     int flag);
+
+static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
+			      int ri, struct component_name * key, int flag);
+
+static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
+			  ddata_t * data, struct dt_lock **);
+
+static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
+			struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
+			int do_index);
+
+static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
+
+static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
+
+static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
+
+#define ciToUpper(c)	UniStrupr((c)->name)
+
+/*
+ *	read_index_page()
+ *
+ *	Reads a page of a directory's index table.
+ *	Having metadata mapped into the directory inode's address space
+ *	presents a multitude of problems.  We avoid this by mapping to
+ *	the absolute address space outside of the *_metapage routines
+ */
+static struct metapage *read_index_page(struct inode *inode, s64 blkno)
+{
+	int rc;
+	s64 xaddr;
+	int xflag;
+	s32 xlen;
+
+	rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
+	if (rc || (xlen == 0))
+		return NULL;
+
+	return read_metapage(inode, xaddr, PSIZE, 1);
+}
+
+/*
+ *	get_index_page()
+ *
+ *	Same as get_index_page(), but get's a new page without reading
+ */
+static struct metapage *get_index_page(struct inode *inode, s64 blkno)
+{
+	int rc;
+	s64 xaddr;
+	int xflag;
+	s32 xlen;
+
+	rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
+	if (rc || (xlen == 0))
+		return NULL;
+
+	return get_metapage(inode, xaddr, PSIZE, 1);
+}
+
+/*
+ *	find_index()
+ *
+ *	Returns dtree page containing directory table entry for specified
+ *	index and pointer to its entry.
+ *
+ *	mp must be released by caller.
+ */
+static struct dir_table_slot *find_index(struct inode *ip, u32 index,
+					 struct metapage ** mp, s64 *lblock)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	s64 blkno;
+	s64 offset;
+	int page_offset;
+	struct dir_table_slot *slot;
+	static int maxWarnings = 10;
+
+	if (index < 2) {
+		if (maxWarnings) {
+			jfs_warn("find_entry called with index = %d", index);
+			maxWarnings--;
+		}
+		return NULL;
+	}
+
+	if (index >= jfs_ip->next_index) {
+		jfs_warn("find_entry called with index >= next_index");
+		return NULL;
+	}
+
+	if (jfs_dirtable_inline(ip)) {
+		/*
+		 * Inline directory table
+		 */
+		*mp = NULL;
+		slot = &jfs_ip->i_dirtable[index - 2];
+	} else {
+		offset = (index - 2) * sizeof(struct dir_table_slot);
+		page_offset = offset & (PSIZE - 1);
+		blkno = ((offset + 1) >> L2PSIZE) <<
+		    JFS_SBI(ip->i_sb)->l2nbperpage;
+
+		if (*mp && (*lblock != blkno)) {
+			release_metapage(*mp);
+			*mp = NULL;
+		}
+		if (*mp == 0) {
+			*lblock = blkno;
+			*mp = read_index_page(ip, blkno);
+		}
+		if (*mp == 0) {
+			jfs_err("free_index: error reading directory table");
+			return NULL;
+		}
+
+		slot =
+		    (struct dir_table_slot *) ((char *) (*mp)->data +
+					       page_offset);
+	}
+	return slot;
+}
+
+static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
+			      u32 index)
+{
+	struct tlock *tlck;
+	struct linelock *llck;
+	struct lv *lv;
+
+	tlck = txLock(tid, ip, mp, tlckDATA);
+	llck = (struct linelock *) tlck->lock;
+
+	if (llck->index >= llck->maxcnt)
+		llck = txLinelock(llck);
+	lv = &llck->lv[llck->index];
+
+	/*
+	 *      Linelock slot size is twice the size of directory table
+	 *      slot size.  512 entries per page.
+	 */
+	lv->offset = ((index - 2) & 511) >> 1;
+	lv->length = 1;
+	llck->index++;
+}
+
+/*
+ *	add_index()
+ *
+ *	Adds an entry to the directory index table.  This is used to provide
+ *	each directory entry with a persistent index in which to resume
+ *	directory traversals
+ */
+static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
+{
+	struct super_block *sb = ip->i_sb;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	u64 blkno;
+	struct dir_table_slot *dirtab_slot;
+	u32 index;
+	struct linelock *llck;
+	struct lv *lv;
+	struct metapage *mp;
+	s64 offset;
+	uint page_offset;
+	struct tlock *tlck;
+	s64 xaddr;
+
+	ASSERT(DO_INDEX(ip));
+
+	if (jfs_ip->next_index < 2) {
+		jfs_warn("add_index: next_index = %d.  Resetting!",
+			   jfs_ip->next_index);
+		jfs_ip->next_index = 2;
+	}
+
+	index = jfs_ip->next_index++;
+
+	if (index <= MAX_INLINE_DIRTABLE_ENTRY) {
+		/*
+		 * i_size reflects size of index table, or 8 bytes per entry.
+		 */
+		ip->i_size = (loff_t) (index - 1) << 3;
+
+		/*
+		 * dir table fits inline within inode
+		 */
+		dirtab_slot = &jfs_ip->i_dirtable[index-2];
+		dirtab_slot->flag = DIR_INDEX_VALID;
+		dirtab_slot->slot = slot;
+		DTSaddress(dirtab_slot, bn);
+
+		set_cflag(COMMIT_Dirtable, ip);
+
+		return index;
+	}
+	if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
+		struct dir_table_slot temp_table[12];
+
+		/*
+		 * It's time to move the inline table to an external
+		 * page and begin to build the xtree
+		 */
+		if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage) ||
+		    dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
+			goto clean_up;	/* No space */
+
+		/*
+		 * Save the table, we're going to overwrite it with the
+		 * xtree root
+		 */
+		memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
+
+		/*
+		 * Initialize empty x-tree
+		 */
+		xtInitRoot(tid, ip);
+
+		/*
+		 * Allocate the first block & add it to the xtree
+		 */
+		if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
+			/* This really shouldn't fail */
+			jfs_warn("add_index: xtInsert failed!");
+			memcpy(&jfs_ip->i_dirtable, temp_table,
+			       sizeof (temp_table));
+			goto clean_up;
+		}
+		ip->i_size = PSIZE;
+
+		if ((mp = get_index_page(ip, 0)) == 0) {
+			jfs_err("add_index: get_metapage failed!");
+			xtTruncate(tid, ip, 0, COMMIT_PWMAP);
+			memcpy(&jfs_ip->i_dirtable, temp_table,
+			       sizeof (temp_table));
+			goto clean_up;
+		}
+		tlck = txLock(tid, ip, mp, tlckDATA);
+		llck = (struct linelock *) & tlck->lock;
+		ASSERT(llck->index == 0);
+		lv = &llck->lv[0];
+
+		lv->offset = 0;
+		lv->length = 6;	/* tlckDATA slot size is 16 bytes */
+		llck->index++;
+
+		memcpy(mp->data, temp_table, sizeof(temp_table));
+
+		mark_metapage_dirty(mp);
+		release_metapage(mp);
+
+		/*
+		 * Logging is now directed by xtree tlocks
+		 */
+		clear_cflag(COMMIT_Dirtable, ip);
+	}
+
+	offset = (index - 2) * sizeof(struct dir_table_slot);
+	page_offset = offset & (PSIZE - 1);
+	blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
+	if (page_offset == 0) {
+		/*
+		 * This will be the beginning of a new page
+		 */
+		xaddr = 0;
+		if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
+			jfs_warn("add_index: xtInsert failed!");
+			goto clean_up;
+		}
+		ip->i_size += PSIZE;
+
+		if ((mp = get_index_page(ip, blkno)))
+			memset(mp->data, 0, PSIZE);	/* Just looks better */
+		else
+			xtTruncate(tid, ip, offset, COMMIT_PWMAP);
+	} else
+		mp = read_index_page(ip, blkno);
+
+	if (mp == 0) {
+		jfs_err("add_index: get/read_metapage failed!");
+		goto clean_up;
+	}
+
+	lock_index(tid, ip, mp, index);
+
+	dirtab_slot =
+	    (struct dir_table_slot *) ((char *) mp->data + page_offset);
+	dirtab_slot->flag = DIR_INDEX_VALID;
+	dirtab_slot->slot = slot;
+	DTSaddress(dirtab_slot, bn);
+
+	mark_metapage_dirty(mp);
+	release_metapage(mp);
+
+	return index;
+
+      clean_up:
+
+	jfs_ip->next_index--;
+
+	return 0;
+}
+
+/*
+ *	free_index()
+ *
+ *	Marks an entry to the directory index table as free.
+ */
+static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
+{
+	struct dir_table_slot *dirtab_slot;
+	s64 lblock;
+	struct metapage *mp = NULL;
+
+	dirtab_slot = find_index(ip, index, &mp, &lblock);
+
+	if (dirtab_slot == 0)
+		return;
+
+	dirtab_slot->flag = DIR_INDEX_FREE;
+	dirtab_slot->slot = dirtab_slot->addr1 = 0;
+	dirtab_slot->addr2 = cpu_to_le32(next);
+
+	if (mp) {
+		lock_index(tid, ip, mp, index);
+		mark_metapage_dirty(mp);
+		release_metapage(mp);
+	} else
+		set_cflag(COMMIT_Dirtable, ip);
+}
+
+/*
+ *	modify_index()
+ *
+ *	Changes an entry in the directory index table
+ */
+static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
+			 int slot, struct metapage ** mp, u64 *lblock)
+{
+	struct dir_table_slot *dirtab_slot;
+
+	dirtab_slot = find_index(ip, index, mp, lblock);
+
+	if (dirtab_slot == 0)
+		return;
+
+	DTSaddress(dirtab_slot, bn);
+	dirtab_slot->slot = slot;
+
+	if (*mp) {
+		lock_index(tid, ip, *mp, index);
+		mark_metapage_dirty(*mp);
+	} else
+		set_cflag(COMMIT_Dirtable, ip);
+}
+
+/*
+ *	read_index()
+ *
+ *	reads a directory table slot
+ */
+static int read_index(struct inode *ip, u32 index,
+		     struct dir_table_slot * dirtab_slot)
+{
+	s64 lblock;
+	struct metapage *mp = NULL;
+	struct dir_table_slot *slot;
+
+	slot = find_index(ip, index, &mp, &lblock);
+	if (slot == 0) {
+		return -EIO;
+	}
+
+	memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
+
+	if (mp)
+		release_metapage(mp);
+
+	return 0;
+}
+
+/*
+ *	dtSearch()
+ *
+ * function:
+ *	Search for the entry with specified key
+ *
+ * parameter:
+ *
+ * return: 0 - search result on stack, leaf page pinned;
+ *	   errno - I/O error
+ */
+int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
+	     struct btstack * btstack, int flag)
+{
+	int rc = 0;
+	int cmp = 1;		/* init for empty page */
+	s64 bn;
+	struct metapage *mp;
+	dtpage_t *p;
+	s8 *stbl;
+	int base, index, lim;
+	struct btframe *btsp;
+	pxd_t *pxd;
+	int psize = 288;	/* initial in-line directory */
+	ino_t inumber;
+	struct component_name ciKey;
+	struct super_block *sb = ip->i_sb;
+
+	ciKey.name =
+	    (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
+				GFP_NOFS);
+	if (ciKey.name == 0) {
+		rc = -ENOMEM;
+		goto dtSearch_Exit2;
+	}
+
+
+	/* uppercase search key for c-i directory */
+	UniStrcpy(ciKey.name, key->name);
+	ciKey.namlen = key->namlen;
+
+	/* only uppercase if case-insensitive support is on */
+	if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) {
+		ciToUpper(&ciKey);
+	}
+	BT_CLR(btstack);	/* reset stack */
+
+	/* init level count for max pages to split */
+	btstack->nsplit = 1;
+
+	/*
+	 *      search down tree from root:
+	 *
+	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
+	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
+	 *
+	 * if entry with search key K is not found
+	 * internal page search find the entry with largest key Ki
+	 * less than K which point to the child page to search;
+	 * leaf page search find the entry with smallest key Kj
+	 * greater than K so that the returned index is the position of
+	 * the entry to be shifted right for insertion of new entry.
+	 * for empty tree, search key is greater than any key of the tree.
+	 *
+	 * by convention, root bn = 0.
+	 */
+	for (bn = 0;;) {
+		/* get/pin the page to search */
+		DT_GETPAGE(ip, bn, mp, psize, p, rc);
+		if (rc)
+			goto dtSearch_Exit1;
+
+		/* get sorted entry table of the page */
+		stbl = DT_GETSTBL(p);
+
+		/*
+		 * binary search with search key K on the current page.
+		 */
+		for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
+			index = base + (lim >> 1);
+
+			if (p->header.flag & BT_LEAF) {
+				/* uppercase leaf name to compare */
+				cmp =
+				    ciCompare(&ciKey, p, stbl[index],
+					      JFS_SBI(sb)->mntflag);
+			} else {
+				/* router key is in uppercase */
+
+				cmp = dtCompare(&ciKey, p, stbl[index]);
+
+
+			}
+			if (cmp == 0) {
+				/*
+				 *      search hit
+				 */
+				/* search hit - leaf page:
+				 * return the entry found
+				 */
+				if (p->header.flag & BT_LEAF) {
+					inumber = le32_to_cpu(
+			((struct ldtentry *) & p->slot[stbl[index]])->inumber);
+
+					/*
+					 * search for JFS_LOOKUP
+					 */
+					if (flag == JFS_LOOKUP) {
+						*data = inumber;
+						rc = 0;
+						goto out;
+					}
+
+					/*
+					 * search for JFS_CREATE
+					 */
+					if (flag == JFS_CREATE) {
+						*data = inumber;
+						rc = -EEXIST;
+						goto out;
+					}
+
+					/*
+					 * search for JFS_REMOVE or JFS_RENAME
+					 */
+					if ((flag == JFS_REMOVE ||
+					     flag == JFS_RENAME) &&
+					    *data != inumber) {
+						rc = -ESTALE;
+						goto out;
+					}
+
+					/*
+					 * JFS_REMOVE|JFS_FINDDIR|JFS_RENAME
+					 */
+					/* save search result */
+					*data = inumber;
+					btsp = btstack->top;
+					btsp->bn = bn;
+					btsp->index = index;
+					btsp->mp = mp;
+
+					rc = 0;
+					goto dtSearch_Exit1;
+				}
+
+				/* search hit - internal page:
+				 * descend/search its child page
+				 */
+				goto getChild;
+			}
+
+			if (cmp > 0) {
+				base = index + 1;
+				--lim;
+			}
+		}
+
+		/*
+		 *      search miss
+		 *
+		 * base is the smallest index with key (Kj) greater than
+		 * search key (K) and may be zero or (maxindex + 1) index.
+		 */
+		/*
+		 * search miss - leaf page
+		 *
+		 * return location of entry (base) where new entry with
+		 * search key K is to be inserted.
+		 */
+		if (p->header.flag & BT_LEAF) {
+			/*
+			 * search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME
+			 */
+			if (flag == JFS_LOOKUP || flag == JFS_REMOVE ||
+			    flag == JFS_RENAME) {
+				rc = -ENOENT;
+				goto out;
+			}
+
+			/*
+			 * search for JFS_CREATE|JFS_FINDDIR:
+			 *
+			 * save search result
+			 */
+			*data = 0;
+			btsp = btstack->top;
+			btsp->bn = bn;
+			btsp->index = base;
+			btsp->mp = mp;
+
+			rc = 0;
+			goto dtSearch_Exit1;
+		}
+
+		/*
+		 * search miss - internal page
+		 *
+		 * if base is non-zero, decrement base by one to get the parent
+		 * entry of the child page to search.
+		 */
+		index = base ? base - 1 : base;
+
+		/*
+		 * go down to child page
+		 */
+	      getChild:
+		/* update max. number of pages to split */
+		if (BT_STACK_FULL(btstack)) {
+			/* Something's corrupted, mark filesytem dirty so
+			 * chkdsk will fix it.
+			 */
+			jfs_error(sb, "stack overrun in dtSearch!");
+			BT_STACK_DUMP(btstack);
+			rc = -EIO;
+			goto out;
+		}
+		btstack->nsplit++;
+
+		/* push (bn, index) of the parent page/entry */
+		BT_PUSH(btstack, bn, index);
+
+		/* get the child page block number */
+		pxd = (pxd_t *) & p->slot[stbl[index]];
+		bn = addressPXD(pxd);
+		psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
+
+		/* unpin the parent page */
+		DT_PUTPAGE(mp);
+	}
+
+      out:
+	DT_PUTPAGE(mp);
+
+      dtSearch_Exit1:
+
+	kfree(ciKey.name);
+
+      dtSearch_Exit2:
+
+	return rc;
+}
+
+
+/*
+ *	dtInsert()
+ *
+ * function: insert an entry to directory tree
+ *
+ * parameter:
+ *
+ * return: 0 - success;
+ *	   errno - failure;
+ */
+int dtInsert(tid_t tid, struct inode *ip,
+	 struct component_name * name, ino_t * fsn, struct btstack * btstack)
+{
+	int rc = 0;
+	struct metapage *mp;	/* meta-page buffer */
+	dtpage_t *p;		/* base B+-tree index page */
+	s64 bn;
+	int index;
+	struct dtsplit split;	/* split information */
+	ddata_t data;
+	struct dt_lock *dtlck;
+	int n;
+	struct tlock *tlck;
+	struct lv *lv;
+
+	/*
+	 *      retrieve search result
+	 *
+	 * dtSearch() returns (leaf page pinned, index at which to insert).
+	 * n.b. dtSearch() may return index of (maxindex + 1) of
+	 * the full page.
+	 */
+	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
+
+	/*
+	 *      insert entry for new key
+	 */
+	if (DO_INDEX(ip)) {
+		if (JFS_IP(ip)->next_index == DIREND) {
+			DT_PUTPAGE(mp);
+			return -EMLINK;
+		}
+		n = NDTLEAF(name->namlen);
+		data.leaf.tid = tid;
+		data.leaf.ip = ip;
+	} else {
+		n = NDTLEAF_LEGACY(name->namlen);
+		data.leaf.ip = NULL;	/* signifies legacy directory format */
+	}
+	data.leaf.ino = *fsn;
+
+	/*
+	 *      leaf page does not have enough room for new entry:
+	 *
+	 *      extend/split the leaf page;
+	 *
+	 * dtSplitUp() will insert the entry and unpin the leaf page.
+	 */
+	if (n > p->header.freecnt) {
+		split.mp = mp;
+		split.index = index;
+		split.nslot = n;
+		split.key = name;
+		split.data = &data;
+		rc = dtSplitUp(tid, ip, &split, btstack);
+		return rc;
+	}
+
+	/*
+	 *      leaf page does have enough room for new entry:
+	 *
+	 *      insert the new data entry into the leaf page;
+	 */
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire a transaction lock on the leaf page
+	 */
+	tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
+	dtlck = (struct dt_lock *) & tlck->lock;
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+
+	/* linelock header */
+	lv->offset = 0;
+	lv->length = 1;
+	dtlck->index++;
+
+	dtInsertEntry(p, index, name, &data, &dtlck);
+
+	/* linelock stbl of non-root leaf page */
+	if (!(p->header.flag & BT_ROOT)) {
+		if (dtlck->index >= dtlck->maxcnt)
+			dtlck = (struct dt_lock *) txLinelock(dtlck);
+		lv = & dtlck->lv[dtlck->index];
+		n = index >> L2DTSLOTSIZE;
+		lv->offset = p->header.stblindex + n;
+		lv->length =
+		    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
+		dtlck->index++;
+	}
+
+	/* unpin the leaf page */
+	DT_PUTPAGE(mp);
+
+	return 0;
+}
+
+
+/*
+ *	dtSplitUp()
+ *
+ * function: propagate insertion bottom up;
+ *
+ * parameter:
+ *
+ * return: 0 - success;
+ *	   errno - failure;
+ * 	leaf page unpinned;
+ */
+static int dtSplitUp(tid_t tid,
+	  struct inode *ip, struct dtsplit * split, struct btstack * btstack)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	int rc = 0;
+	struct metapage *smp;
+	dtpage_t *sp;		/* split page */
+	struct metapage *rmp;
+	dtpage_t *rp;		/* new right page split from sp */
+	pxd_t rpxd;		/* new right page extent descriptor */
+	struct metapage *lmp;
+	dtpage_t *lp;		/* left child page */
+	int skip;		/* index of entry of insertion */
+	struct btframe *parent;	/* parent page entry on traverse stack */
+	s64 xaddr, nxaddr;
+	int xlen, xsize;
+	struct pxdlist pxdlist;
+	pxd_t *pxd;
+	struct component_name key = { 0, NULL };
+	ddata_t *data = split->data;
+	int n;
+	struct dt_lock *dtlck;
+	struct tlock *tlck;
+	struct lv *lv;
+	int quota_allocation = 0;
+
+	/* get split page */
+	smp = split->mp;
+	sp = DT_PAGE(ip, smp);
+
+	key.name =
+	    (wchar_t *) kmalloc((JFS_NAME_MAX + 2) * sizeof(wchar_t),
+				GFP_NOFS);
+	if (key.name == 0) {
+		DT_PUTPAGE(smp);
+		rc = -ENOMEM;
+		goto dtSplitUp_Exit;
+	}
+
+	/*
+	 *      split leaf page
+	 *
+	 * The split routines insert the new entry, and
+	 * acquire txLock as appropriate.
+	 */
+	/*
+	 *      split root leaf page:
+	 */
+	if (sp->header.flag & BT_ROOT) {
+		/*
+		 * allocate a single extent child page
+		 */
+		xlen = 1;
+		n = sbi->bsize >> L2DTSLOTSIZE;
+		n -= (n + 31) >> L2DTSLOTSIZE;	/* stbl size */
+		n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
+		if (n <= split->nslot)
+			xlen++;
+		if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
+			DT_PUTPAGE(smp);
+			goto freeKeyName;
+		}
+
+		pxdlist.maxnpxd = 1;
+		pxdlist.npxd = 0;
+		pxd = &pxdlist.pxd[0];
+		PXDaddress(pxd, xaddr);
+		PXDlength(pxd, xlen);
+		split->pxdlist = &pxdlist;
+		rc = dtSplitRoot(tid, ip, split, &rmp);
+
+		if (rc)
+			dbFree(ip, xaddr, xlen);
+		else
+			DT_PUTPAGE(rmp);
+
+		DT_PUTPAGE(smp);
+
+		goto freeKeyName;
+	}
+
+	/*
+	 *      extend first leaf page
+	 *
+	 * extend the 1st extent if less than buffer page size
+	 * (dtExtendPage() reurns leaf page unpinned)
+	 */
+	pxd = &sp->header.self;
+	xlen = lengthPXD(pxd);
+	xsize = xlen << sbi->l2bsize;
+	if (xsize < PSIZE) {
+		xaddr = addressPXD(pxd);
+		n = xsize >> L2DTSLOTSIZE;
+		n -= (n + 31) >> L2DTSLOTSIZE;	/* stbl size */
+		if ((n + sp->header.freecnt) <= split->nslot)
+			n = xlen + (xlen << 1);
+		else
+			n = xlen;
+
+		/* Allocate blocks to quota. */
+		if (DQUOT_ALLOC_BLOCK(ip, n)) {
+			rc = -EDQUOT;
+			goto extendOut;
+		}
+		quota_allocation += n;
+
+		if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
+				    (s64) n, &nxaddr)))
+			goto extendOut;
+
+		pxdlist.maxnpxd = 1;
+		pxdlist.npxd = 0;
+		pxd = &pxdlist.pxd[0];
+		PXDaddress(pxd, nxaddr)
+		    PXDlength(pxd, xlen + n);
+		split->pxdlist = &pxdlist;
+		if ((rc = dtExtendPage(tid, ip, split, btstack))) {
+			nxaddr = addressPXD(pxd);
+			if (xaddr != nxaddr) {
+				/* free relocated extent */
+				xlen = lengthPXD(pxd);
+				dbFree(ip, nxaddr, (s64) xlen);
+			} else {
+				/* free extended delta */
+				xlen = lengthPXD(pxd) - n;
+				xaddr = addressPXD(pxd) + xlen;
+				dbFree(ip, xaddr, (s64) n);
+			}
+		}
+
+	      extendOut:
+		DT_PUTPAGE(smp);
+		goto freeKeyName;
+	}
+
+	/*
+	 *      split leaf page <sp> into <sp> and a new right page <rp>.
+	 *
+	 * return <rp> pinned and its extent descriptor <rpxd>
+	 */
+	/*
+	 * allocate new directory page extent and
+	 * new index page(s) to cover page split(s)
+	 *
+	 * allocation hint: ?
+	 */
+	n = btstack->nsplit;
+	pxdlist.maxnpxd = pxdlist.npxd = 0;
+	xlen = sbi->nbperpage;
+	for (pxd = pxdlist.pxd; n > 0; n--, pxd++) {
+		if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) {
+			PXDaddress(pxd, xaddr);
+			PXDlength(pxd, xlen);
+			pxdlist.maxnpxd++;
+			continue;
+		}
+
+		DT_PUTPAGE(smp);
+
+		/* undo allocation */
+		goto splitOut;
+	}
+
+	split->pxdlist = &pxdlist;
+	if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
+		DT_PUTPAGE(smp);
+
+		/* undo allocation */
+		goto splitOut;
+	}
+
+	/*
+	 * propagate up the router entry for the leaf page just split
+	 *
+	 * insert a router entry for the new page into the parent page,
+	 * propagate the insert/split up the tree by walking back the stack
+	 * of (bn of parent page, index of child page entry in parent page)
+	 * that were traversed during the search for the page that split.
+	 *
+	 * the propagation of insert/split up the tree stops if the root
+	 * splits or the page inserted into doesn't have to split to hold
+	 * the new entry.
+	 *
+	 * the parent entry for the split page remains the same, and
+	 * a new entry is inserted at its right with the first key and
+	 * block number of the new right page.
+	 *
+	 * There are a maximum of 4 pages pinned at any time:
+	 * two children, left parent and right parent (when the parent splits).
+	 * keep the child pages pinned while working on the parent.
+	 * make sure that all pins are released at exit.
+	 */
+	while ((parent = BT_POP(btstack)) != NULL) {
+		/* parent page specified by stack frame <parent> */
+
+		/* keep current child pages (<lp>, <rp>) pinned */
+		lmp = smp;
+		lp = sp;
+
+		/*
+		 * insert router entry in parent for new right child page <rp>
+		 */
+		/* get the parent page <sp> */
+		DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
+		if (rc) {
+			DT_PUTPAGE(lmp);
+			DT_PUTPAGE(rmp);
+			goto splitOut;
+		}
+
+		/*
+		 * The new key entry goes ONE AFTER the index of parent entry,
+		 * because the split was to the right.
+		 */
+		skip = parent->index + 1;
+
+		/*
+		 * compute the key for the router entry
+		 *
+		 * key suffix compression:
+		 * for internal pages that have leaf pages as children,
+		 * retain only what's needed to distinguish between
+		 * the new entry and the entry on the page to its left.
+		 * If the keys compare equal, retain the entire key.
+		 *
+		 * note that compression is performed only at computing
+		 * router key at the lowest internal level.
+		 * further compression of the key between pairs of higher
+		 * level internal pages loses too much information and
+		 * the search may fail.
+		 * (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,}
+		 * results in two adjacent parent entries (a)(xx).
+		 * if split occurs between these two entries, and
+		 * if compression is applied, the router key of parent entry
+		 * of right page (x) will divert search for x into right
+		 * subtree and miss x in the left subtree.)
+		 *
+		 * the entire key must be retained for the next-to-leftmost
+		 * internal key at any level of the tree, or search may fail
+		 * (e.g., ?)
+		 */
+		switch (rp->header.flag & BT_TYPE) {
+		case BT_LEAF:
+			/*
+			 * compute the length of prefix for suffix compression
+			 * between last entry of left page and first entry
+			 * of right page
+			 */
+			if ((sp->header.flag & BT_ROOT && skip > 1) ||
+			    sp->header.prev != 0 || skip > 1) {
+				/* compute uppercase router prefix key */
+				rc = ciGetLeafPrefixKey(lp,
+							lp->header.nextindex-1,
+							rp, 0, &key,
+							sbi->mntflag);
+				if (rc) {
+					DT_PUTPAGE(lmp);
+					DT_PUTPAGE(rmp);
+					DT_PUTPAGE(smp);
+					goto splitOut;
+				}
+			} else {
+				/* next to leftmost entry of
+				   lowest internal level */
+
+				/* compute uppercase router key */
+				dtGetKey(rp, 0, &key, sbi->mntflag);
+				key.name[key.namlen] = 0;
+
+				if ((sbi->mntflag & JFS_OS2) == JFS_OS2)
+					ciToUpper(&key);
+			}
+
+			n = NDTINTERNAL(key.namlen);
+			break;
+
+		case BT_INTERNAL:
+			dtGetKey(rp, 0, &key, sbi->mntflag);
+			n = NDTINTERNAL(key.namlen);
+			break;
+
+		default:
+			jfs_err("dtSplitUp(): UFO!");
+			break;
+		}
+
+		/* unpin left child page */
+		DT_PUTPAGE(lmp);
+
+		/*
+		 * compute the data for the router entry
+		 */
+		data->xd = rpxd;	/* child page xd */
+
+		/*
+		 * parent page is full - split the parent page
+		 */
+		if (n > sp->header.freecnt) {
+			/* init for parent page split */
+			split->mp = smp;
+			split->index = skip;	/* index at insert */
+			split->nslot = n;
+			split->key = &key;
+			/* split->data = data; */
+
+			/* unpin right child page */
+			DT_PUTPAGE(rmp);
+
+			/* The split routines insert the new entry,
+			 * acquire txLock as appropriate.
+			 * return <rp> pinned and its block number <rbn>.
+			 */
+			rc = (sp->header.flag & BT_ROOT) ?
+			    dtSplitRoot(tid, ip, split, &rmp) :
+			    dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
+			if (rc) {
+				DT_PUTPAGE(smp);
+				goto splitOut;
+			}
+
+			/* smp and rmp are pinned */
+		}
+		/*
+		 * parent page is not full - insert router entry in parent page
+		 */
+		else {
+			BT_MARK_DIRTY(smp, ip);
+			/*
+			 * acquire a transaction lock on the parent page
+			 */
+			tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
+			dtlck = (struct dt_lock *) & tlck->lock;
+			ASSERT(dtlck->index == 0);
+			lv = & dtlck->lv[0];
+
+			/* linelock header */
+			lv->offset = 0;
+			lv->length = 1;
+			dtlck->index++;
+
+			/* linelock stbl of non-root parent page */
+			if (!(sp->header.flag & BT_ROOT)) {
+				lv++;
+				n = skip >> L2DTSLOTSIZE;
+				lv->offset = sp->header.stblindex + n;
+				lv->length =
+				    ((sp->header.nextindex -
+				      1) >> L2DTSLOTSIZE) - n + 1;
+				dtlck->index++;
+			}
+
+			dtInsertEntry(sp, skip, &key, data, &dtlck);
+
+			/* exit propagate up */
+			break;
+		}
+	}
+
+	/* unpin current split and its right page */
+	DT_PUTPAGE(smp);
+	DT_PUTPAGE(rmp);
+
+	/*
+	 * free remaining extents allocated for split
+	 */
+      splitOut:
+	n = pxdlist.npxd;
+	pxd = &pxdlist.pxd[n];
+	for (; n < pxdlist.maxnpxd; n++, pxd++)
+		dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd));
+
+      freeKeyName:
+	kfree(key.name);
+
+	/* Rollback quota allocation */
+	if (rc && quota_allocation)
+		DQUOT_FREE_BLOCK(ip, quota_allocation);
+
+      dtSplitUp_Exit:
+
+	return rc;
+}
+
+
+/*
+ *	dtSplitPage()
+ *
+ * function: Split a non-root page of a btree.
+ *
+ * parameter:
+ *
+ * return: 0 - success;
+ *	   errno - failure;
+ *	return split and new page pinned;
+ */
+static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
+	    struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
+{
+	int rc = 0;
+	struct metapage *smp;
+	dtpage_t *sp;
+	struct metapage *rmp;
+	dtpage_t *rp;		/* new right page allocated */
+	s64 rbn;		/* new right page block number */
+	struct metapage *mp;
+	dtpage_t *p;
+	s64 nextbn;
+	struct pxdlist *pxdlist;
+	pxd_t *pxd;
+	int skip, nextindex, half, left, nxt, off, si;
+	struct ldtentry *ldtentry;
+	struct idtentry *idtentry;
+	u8 *stbl;
+	struct dtslot *f;
+	int fsi, stblsize;
+	int n;
+	struct dt_lock *sdtlck, *rdtlck;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct lv *slv, *rlv, *lv;
+
+	/* get split page */
+	smp = split->mp;
+	sp = DT_PAGE(ip, smp);
+
+	/*
+	 * allocate the new right page for the split
+	 */
+	pxdlist = split->pxdlist;
+	pxd = &pxdlist->pxd[pxdlist->npxd];
+	pxdlist->npxd++;
+	rbn = addressPXD(pxd);
+	rmp = get_metapage(ip, rbn, PSIZE, 1);
+	if (rmp == NULL)
+		return -EIO;
+
+	/* Allocate blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+		release_metapage(rmp);
+		return -EDQUOT;
+	}
+
+	jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
+
+	BT_MARK_DIRTY(rmp, ip);
+	/*
+	 * acquire a transaction lock on the new right page
+	 */
+	tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
+	rdtlck = (struct dt_lock *) & tlck->lock;
+
+	rp = (dtpage_t *) rmp->data;
+	*rpp = rp;
+	rp->header.self = *pxd;
+
+	BT_MARK_DIRTY(smp, ip);
+	/*
+	 * acquire a transaction lock on the split page
+	 *
+	 * action:
+	 */
+	tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
+	sdtlck = (struct dt_lock *) & tlck->lock;
+
+	/* linelock header of split page */
+	ASSERT(sdtlck->index == 0);
+	slv = & sdtlck->lv[0];
+	slv->offset = 0;
+	slv->length = 1;
+	sdtlck->index++;
+
+	/*
+	 * initialize/update sibling pointers between sp and rp
+	 */
+	nextbn = le64_to_cpu(sp->header.next);
+	rp->header.next = cpu_to_le64(nextbn);
+	rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
+	sp->header.next = cpu_to_le64(rbn);
+
+	/*
+	 * initialize new right page
+	 */
+	rp->header.flag = sp->header.flag;
+
+	/* compute sorted entry table at start of extent data area */
+	rp->header.nextindex = 0;
+	rp->header.stblindex = 1;
+
+	n = PSIZE >> L2DTSLOTSIZE;
+	rp->header.maxslot = n;
+	stblsize = (n + 31) >> L2DTSLOTSIZE;	/* in unit of slot */
+
+	/* init freelist */
+	fsi = rp->header.stblindex + stblsize;
+	rp->header.freelist = fsi;
+	rp->header.freecnt = rp->header.maxslot - fsi;
+
+	/*
+	 *      sequential append at tail: append without split
+	 *
+	 * If splitting the last page on a level because of appending
+	 * a entry to it (skip is maxentry), it's likely that the access is
+	 * sequential. Adding an empty page on the side of the level is less
+	 * work and can push the fill factor much higher than normal.
+	 * If we're wrong it's no big deal, we'll just do the split the right
+	 * way next time.
+	 * (It may look like it's equally easy to do a similar hack for
+	 * reverse sorted data, that is, split the tree left,
+	 * but it's not. Be my guest.)
+	 */
+	if (nextbn == 0 && split->index == sp->header.nextindex) {
+		/* linelock header + stbl (first slot) of new page */
+		rlv = & rdtlck->lv[rdtlck->index];
+		rlv->offset = 0;
+		rlv->length = 2;
+		rdtlck->index++;
+
+		/*
+		 * initialize freelist of new right page
+		 */
+		f = &rp->slot[fsi];
+		for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
+			f->next = fsi;
+		f->next = -1;
+
+		/* insert entry at the first entry of the new right page */
+		dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
+
+		goto out;
+	}
+
+	/*
+	 *      non-sequential insert (at possibly middle page)
+	 */
+
+	/*
+	 * update prev pointer of previous right sibling page;
+	 */
+	if (nextbn != 0) {
+		DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
+		if (rc) {
+			discard_metapage(rmp);
+			return rc;
+		}
+
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the next page
+		 */
+		tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
+		jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p",
+			tlck, ip, mp);
+		dtlck = (struct dt_lock *) & tlck->lock;
+
+		/* linelock header of previous right sibling page */
+		lv = & dtlck->lv[dtlck->index];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		p->header.prev = cpu_to_le64(rbn);
+
+		DT_PUTPAGE(mp);
+	}
+
+	/*
+	 * split the data between the split and right pages.
+	 */
+	skip = split->index;
+	half = (PSIZE >> L2DTSLOTSIZE) >> 1;	/* swag */
+	left = 0;
+
+	/*
+	 *      compute fill factor for split pages
+	 *
+	 * <nxt> traces the next entry to move to rp
+	 * <off> traces the next entry to stay in sp
+	 */
+	stbl = (u8 *) & sp->slot[sp->header.stblindex];
+	nextindex = sp->header.nextindex;
+	for (nxt = off = 0; nxt < nextindex; ++off) {
+		if (off == skip)
+			/* check for fill factor with new entry size */
+			n = split->nslot;
+		else {
+			si = stbl[nxt];
+			switch (sp->header.flag & BT_TYPE) {
+			case BT_LEAF:
+				ldtentry = (struct ldtentry *) & sp->slot[si];
+				if (DO_INDEX(ip))
+					n = NDTLEAF(ldtentry->namlen);
+				else
+					n = NDTLEAF_LEGACY(ldtentry->
+							   namlen);
+				break;
+
+			case BT_INTERNAL:
+				idtentry = (struct idtentry *) & sp->slot[si];
+				n = NDTINTERNAL(idtentry->namlen);
+				break;
+
+			default:
+				break;
+			}
+
+			++nxt;	/* advance to next entry to move in sp */
+		}
+
+		left += n;
+		if (left >= half)
+			break;
+	}
+
+	/* <nxt> poins to the 1st entry to move */
+
+	/*
+	 *      move entries to right page
+	 *
+	 * dtMoveEntry() initializes rp and reserves entry for insertion
+	 *
+	 * split page moved out entries are linelocked;
+	 * new/right page moved in entries are linelocked;
+	 */
+	/* linelock header + stbl of new right page */
+	rlv = & rdtlck->lv[rdtlck->index];
+	rlv->offset = 0;
+	rlv->length = 5;
+	rdtlck->index++;
+
+	dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
+
+	sp->header.nextindex = nxt;
+
+	/*
+	 * finalize freelist of new right page
+	 */
+	fsi = rp->header.freelist;
+	f = &rp->slot[fsi];
+	for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
+		f->next = fsi;
+	f->next = -1;
+
+	/*
+	 * Update directory index table for entries now in right page
+	 */
+	if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
+		s64 lblock;
+
+		mp = NULL;
+		stbl = DT_GETSTBL(rp);
+		for (n = 0; n < rp->header.nextindex; n++) {
+			ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
+			modify_index(tid, ip, le32_to_cpu(ldtentry->index),
+				     rbn, n, &mp, &lblock);
+		}
+		if (mp)
+			release_metapage(mp);
+	}
+
+	/*
+	 * the skipped index was on the left page,
+	 */
+	if (skip <= off) {
+		/* insert the new entry in the split page */
+		dtInsertEntry(sp, skip, split->key, split->data, &sdtlck);
+
+		/* linelock stbl of split page */
+		if (sdtlck->index >= sdtlck->maxcnt)
+			sdtlck = (struct dt_lock *) txLinelock(sdtlck);
+		slv = & sdtlck->lv[sdtlck->index];
+		n = skip >> L2DTSLOTSIZE;
+		slv->offset = sp->header.stblindex + n;
+		slv->length =
+		    ((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
+		sdtlck->index++;
+	}
+	/*
+	 * the skipped index was on the right page,
+	 */
+	else {
+		/* adjust the skip index to reflect the new position */
+		skip -= nxt;
+
+		/* insert the new entry in the right page */
+		dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
+	}
+
+      out:
+	*rmpp = rmp;
+	*rpxdp = *pxd;
+
+	return rc;
+}
+
+
+/*
+ *	dtExtendPage()
+ *
+ * function: extend 1st/only directory leaf page
+ *
+ * parameter:
+ *
+ * return: 0 - success;
+ *	   errno - failure;
+ *	return extended page pinned;
+ */
+static int dtExtendPage(tid_t tid,
+	     struct inode *ip, struct dtsplit * split, struct btstack * btstack)
+{
+	struct super_block *sb = ip->i_sb;
+	int rc;
+	struct metapage *smp, *pmp, *mp;
+	dtpage_t *sp, *pp;
+	struct pxdlist *pxdlist;
+	pxd_t *pxd, *tpxd;
+	int xlen, xsize;
+	int newstblindex, newstblsize;
+	int oldstblindex, oldstblsize;
+	int fsi, last;
+	struct dtslot *f;
+	struct btframe *parent;
+	int n;
+	struct dt_lock *dtlck;
+	s64 xaddr, txaddr;
+	struct tlock *tlck;
+	struct pxd_lock *pxdlock;
+	struct lv *lv;
+	uint type;
+	struct ldtentry *ldtentry;
+	u8 *stbl;
+
+	/* get page to extend */
+	smp = split->mp;
+	sp = DT_PAGE(ip, smp);
+
+	/* get parent/root page */
+	parent = BT_POP(btstack);
+	DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc);
+	if (rc)
+		return (rc);
+
+	/*
+	 *      extend the extent
+	 */
+	pxdlist = split->pxdlist;
+	pxd = &pxdlist->pxd[pxdlist->npxd];
+	pxdlist->npxd++;
+
+	xaddr = addressPXD(pxd);
+	tpxd = &sp->header.self;
+	txaddr = addressPXD(tpxd);
+	/* in-place extension */
+	if (xaddr == txaddr) {
+		type = tlckEXTEND;
+	}
+	/* relocation */
+	else {
+		type = tlckNEW;
+
+		/* save moved extent descriptor for later free */
+		tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		pxdlock->flag = mlckFREEPXD;
+		pxdlock->pxd = sp->header.self;
+		pxdlock->index = 1;
+
+		/*
+		 * Update directory index table to reflect new page address
+		 */
+		if (DO_INDEX(ip)) {
+			s64 lblock;
+
+			mp = NULL;
+			stbl = DT_GETSTBL(sp);
+			for (n = 0; n < sp->header.nextindex; n++) {
+				ldtentry =
+				    (struct ldtentry *) & sp->slot[stbl[n]];
+				modify_index(tid, ip,
+					     le32_to_cpu(ldtentry->index),
+					     xaddr, n, &mp, &lblock);
+			}
+			if (mp)
+				release_metapage(mp);
+		}
+	}
+
+	/*
+	 *      extend the page
+	 */
+	sp->header.self = *pxd;
+
+	jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp);
+
+	BT_MARK_DIRTY(smp, ip);
+	/*
+	 * acquire a transaction lock on the extended/leaf page
+	 */
+	tlck = txLock(tid, ip, smp, tlckDTREE | type);
+	dtlck = (struct dt_lock *) & tlck->lock;
+	lv = & dtlck->lv[0];
+
+	/* update buffer extent descriptor of extended page */
+	xlen = lengthPXD(pxd);
+	xsize = xlen << JFS_SBI(sb)->l2bsize;
+#ifdef _STILL_TO_PORT
+	bmSetXD(smp, xaddr, xsize);
+#endif				/*  _STILL_TO_PORT */
+
+	/*
+	 * copy old stbl to new stbl at start of extended area
+	 */
+	oldstblindex = sp->header.stblindex;
+	oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE;
+	newstblindex = sp->header.maxslot;
+	n = xsize >> L2DTSLOTSIZE;
+	newstblsize = (n + 31) >> L2DTSLOTSIZE;
+	memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex],
+	       sp->header.nextindex);
+
+	/*
+	 * in-line extension: linelock old area of extended page
+	 */
+	if (type == tlckEXTEND) {
+		/* linelock header */
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+		lv++;
+
+		/* linelock new stbl of extended page */
+		lv->offset = newstblindex;
+		lv->length = newstblsize;
+	}
+	/*
+	 * relocation: linelock whole relocated area
+	 */
+	else {
+		lv->offset = 0;
+		lv->length = sp->header.maxslot + newstblsize;
+	}
+
+	dtlck->index++;
+
+	sp->header.maxslot = n;
+	sp->header.stblindex = newstblindex;
+	/* sp->header.nextindex remains the same */
+
+	/*
+	 * add old stbl region at head of freelist
+	 */
+	fsi = oldstblindex;
+	f = &sp->slot[fsi];
+	last = sp->header.freelist;
+	for (n = 0; n < oldstblsize; n++, fsi++, f++) {
+		f->next = last;
+		last = fsi;
+	}
+	sp->header.freelist = last;
+	sp->header.freecnt += oldstblsize;
+
+	/*
+	 * append free region of newly extended area at tail of freelist
+	 */
+	/* init free region of newly extended area */
+	fsi = n = newstblindex + newstblsize;
+	f = &sp->slot[fsi];
+	for (fsi++; fsi < sp->header.maxslot; f++, fsi++)
+		f->next = fsi;
+	f->next = -1;
+
+	/* append new free region at tail of old freelist */
+	fsi = sp->header.freelist;
+	if (fsi == -1)
+		sp->header.freelist = n;
+	else {
+		do {
+			f = &sp->slot[fsi];
+			fsi = f->next;
+		} while (fsi != -1);
+
+		f->next = n;
+	}
+
+	sp->header.freecnt += sp->header.maxslot - n;
+
+	/*
+	 * insert the new entry
+	 */
+	dtInsertEntry(sp, split->index, split->key, split->data, &dtlck);
+
+	BT_MARK_DIRTY(pmp, ip);
+	/*
+	 * linelock any freeslots residing in old extent
+	 */
+	if (type == tlckEXTEND) {
+		n = sp->header.maxslot >> 2;
+		if (sp->header.freelist < n)
+			dtLinelockFreelist(sp, n, &dtlck);
+	}
+
+	/*
+	 *      update parent entry on the parent/root page
+	 */
+	/*
+	 * acquire a transaction lock on the parent/root page
+	 */
+	tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
+	dtlck = (struct dt_lock *) & tlck->lock;
+	lv = & dtlck->lv[dtlck->index];
+
+	/* linelock parent entry - 1st slot */
+	lv->offset = 1;
+	lv->length = 1;
+	dtlck->index++;
+
+	/* update the parent pxd for page extension */
+	tpxd = (pxd_t *) & pp->slot[1];
+	*tpxd = *pxd;
+
+	DT_PUTPAGE(pmp);
+	return 0;
+}
+
+
+/*
+ *	dtSplitRoot()
+ *
+ * function:
+ *	split the full root page into
+ *	original/root/split page and new right page
+ *	i.e., root remains fixed in tree anchor (inode) and
+ *	the root is copied to a single new right child page
+ *	since root page << non-root page, and
+ *	the split root page contains a single entry for the
+ *	new right child page.
+ *
+ * parameter:
+ *
+ * return: 0 - success;
+ *	   errno - failure;
+ *	return new page pinned;
+ */
+static int dtSplitRoot(tid_t tid,
+	    struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
+{
+	struct super_block *sb = ip->i_sb;
+	struct metapage *smp;
+	dtroot_t *sp;
+	struct metapage *rmp;
+	dtpage_t *rp;
+	s64 rbn;
+	int xlen;
+	int xsize;
+	struct dtslot *f;
+	s8 *stbl;
+	int fsi, stblsize, n;
+	struct idtentry *s;
+	pxd_t *ppxd;
+	struct pxdlist *pxdlist;
+	pxd_t *pxd;
+	struct dt_lock *dtlck;
+	struct tlock *tlck;
+	struct lv *lv;
+
+	/* get split root page */
+	smp = split->mp;
+	sp = &JFS_IP(ip)->i_dtroot;
+
+	/*
+	 *      allocate/initialize a single (right) child page
+	 *
+	 * N.B. at first split, a one (or two) block to fit new entry
+	 * is allocated; at subsequent split, a full page is allocated;
+	 */
+	pxdlist = split->pxdlist;
+	pxd = &pxdlist->pxd[pxdlist->npxd];
+	pxdlist->npxd++;
+	rbn = addressPXD(pxd);
+	xlen = lengthPXD(pxd);
+	xsize = xlen << JFS_SBI(sb)->l2bsize;
+	rmp = get_metapage(ip, rbn, xsize, 1);
+	if (!rmp)
+		return -EIO;
+
+	rp = rmp->data;
+
+	/* Allocate blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+		release_metapage(rmp);
+		return -EDQUOT;
+	}
+
+	BT_MARK_DIRTY(rmp, ip);
+	/*
+	 * acquire a transaction lock on the new right page
+	 */
+	tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
+	dtlck = (struct dt_lock *) & tlck->lock;
+
+	rp->header.flag =
+	    (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
+	rp->header.self = *pxd;
+
+	/* initialize sibling pointers */
+	rp->header.next = 0;
+	rp->header.prev = 0;
+
+	/*
+	 *      move in-line root page into new right page extent
+	 */
+	/* linelock header + copied entries + new stbl (1st slot) in new page */
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+	lv->offset = 0;
+	lv->length = 10;	/* 1 + 8 + 1 */
+	dtlck->index++;
+
+	n = xsize >> L2DTSLOTSIZE;
+	rp->header.maxslot = n;
+	stblsize = (n + 31) >> L2DTSLOTSIZE;
+
+	/* copy old stbl to new stbl at start of extended area */
+	rp->header.stblindex = DTROOTMAXSLOT;
+	stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
+	memcpy(stbl, sp->header.stbl, sp->header.nextindex);
+	rp->header.nextindex = sp->header.nextindex;
+
+	/* copy old data area to start of new data area */
+	memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
+
+	/*
+	 * append free region of newly extended area at tail of freelist
+	 */
+	/* init free region of newly extended area */
+	fsi = n = DTROOTMAXSLOT + stblsize;
+	f = &rp->slot[fsi];
+	for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
+		f->next = fsi;
+	f->next = -1;
+
+	/* append new free region at tail of old freelist */
+	fsi = sp->header.freelist;
+	if (fsi == -1)
+		rp->header.freelist = n;
+	else {
+		rp->header.freelist = fsi;
+
+		do {
+			f = &rp->slot[fsi];
+			fsi = f->next;
+		} while (fsi != -1);
+
+		f->next = n;
+	}
+
+	rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
+
+	/*
+	 * Update directory index table for entries now in right page
+	 */
+	if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
+		s64 lblock;
+		struct metapage *mp = NULL;
+		struct ldtentry *ldtentry;
+
+		stbl = DT_GETSTBL(rp);
+		for (n = 0; n < rp->header.nextindex; n++) {
+			ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
+			modify_index(tid, ip, le32_to_cpu(ldtentry->index),
+				     rbn, n, &mp, &lblock);
+		}
+		if (mp)
+			release_metapage(mp);
+	}
+	/*
+	 * insert the new entry into the new right/child page
+	 * (skip index in the new right page will not change)
+	 */
+	dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
+
+	/*
+	 *      reset parent/root page
+	 *
+	 * set the 1st entry offset to 0, which force the left-most key
+	 * at any level of the tree to be less than any search key.
+	 *
+	 * The btree comparison code guarantees that the left-most key on any
+	 * level of the tree is never used, so it doesn't need to be filled in.
+	 */
+	BT_MARK_DIRTY(smp, ip);
+	/*
+	 * acquire a transaction lock on the root page (in-memory inode)
+	 */
+	tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
+	dtlck = (struct dt_lock *) & tlck->lock;
+
+	/* linelock root */
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+	lv->offset = 0;
+	lv->length = DTROOTMAXSLOT;
+	dtlck->index++;
+
+	/* update page header of root */
+	if (sp->header.flag & BT_LEAF) {
+		sp->header.flag &= ~BT_LEAF;
+		sp->header.flag |= BT_INTERNAL;
+	}
+
+	/* init the first entry */
+	s = (struct idtentry *) & sp->slot[DTENTRYSTART];
+	ppxd = (pxd_t *) s;
+	*ppxd = *pxd;
+	s->next = -1;
+	s->namlen = 0;
+
+	stbl = sp->header.stbl;
+	stbl[0] = DTENTRYSTART;
+	sp->header.nextindex = 1;
+
+	/* init freelist */
+	fsi = DTENTRYSTART + 1;
+	f = &sp->slot[fsi];
+
+	/* init free region of remaining area */
+	for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
+		f->next = fsi;
+	f->next = -1;
+
+	sp->header.freelist = DTENTRYSTART + 1;
+	sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1);
+
+	*rmpp = rmp;
+
+	return 0;
+}
+
+
+/*
+ *	dtDelete()
+ *
+ * function: delete the entry(s) referenced by a key.
+ *
+ * parameter:
+ *
+ * return:
+ */
+int dtDelete(tid_t tid,
+	 struct inode *ip, struct component_name * key, ino_t * ino, int flag)
+{
+	int rc = 0;
+	s64 bn;
+	struct metapage *mp, *imp;
+	dtpage_t *p;
+	int index;
+	struct btstack btstack;
+	struct dt_lock *dtlck;
+	struct tlock *tlck;
+	struct lv *lv;
+	int i;
+	struct ldtentry *ldtentry;
+	u8 *stbl;
+	u32 table_index, next_index;
+	struct metapage *nmp;
+	dtpage_t *np;
+
+	/*
+	 *      search for the entry to delete:
+	 *
+	 * dtSearch() returns (leaf page pinned, index at which to delete).
+	 */
+	if ((rc = dtSearch(ip, key, ino, &btstack, flag)))
+		return rc;
+
+	/* retrieve search result */
+	DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	/*
+	 * We need to find put the index of the next entry into the
+	 * directory index table in order to resume a readdir from this
+	 * entry.
+	 */
+	if (DO_INDEX(ip)) {
+		stbl = DT_GETSTBL(p);
+		ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
+		table_index = le32_to_cpu(ldtentry->index);
+		if (index == (p->header.nextindex - 1)) {
+			/*
+			 * Last entry in this leaf page
+			 */
+			if ((p->header.flag & BT_ROOT)
+			    || (p->header.next == 0))
+				next_index = -1;
+			else {
+				/* Read next leaf page */
+				DT_GETPAGE(ip, le64_to_cpu(p->header.next),
+					   nmp, PSIZE, np, rc);
+				if (rc)
+					next_index = -1;
+				else {
+					stbl = DT_GETSTBL(np);
+					ldtentry =
+					    (struct ldtentry *) & np->
+					    slot[stbl[0]];
+					next_index =
+					    le32_to_cpu(ldtentry->index);
+					DT_PUTPAGE(nmp);
+				}
+			}
+		} else {
+			ldtentry =
+			    (struct ldtentry *) & p->slot[stbl[index + 1]];
+			next_index = le32_to_cpu(ldtentry->index);
+		}
+		free_index(tid, ip, table_index, next_index);
+	}
+	/*
+	 * the leaf page becomes empty, delete the page
+	 */
+	if (p->header.nextindex == 1) {
+		/* delete empty page */
+		rc = dtDeleteUp(tid, ip, mp, p, &btstack);
+	}
+	/*
+	 * the leaf page has other entries remaining:
+	 *
+	 * delete the entry from the leaf page.
+	 */
+	else {
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the leaf page
+		 */
+		tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
+		dtlck = (struct dt_lock *) & tlck->lock;
+
+		/*
+		 * Do not assume that dtlck->index will be zero.  During a
+		 * rename within a directory, this transaction may have
+		 * modified this page already when adding the new entry.
+		 */
+
+		/* linelock header */
+		if (dtlck->index >= dtlck->maxcnt)
+			dtlck = (struct dt_lock *) txLinelock(dtlck);
+		lv = & dtlck->lv[dtlck->index];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		/* linelock stbl of non-root leaf page */
+		if (!(p->header.flag & BT_ROOT)) {
+			if (dtlck->index >= dtlck->maxcnt)
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+			lv = & dtlck->lv[dtlck->index];
+			i = index >> L2DTSLOTSIZE;
+			lv->offset = p->header.stblindex + i;
+			lv->length =
+			    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
+			    i + 1;
+			dtlck->index++;
+		}
+
+		/* free the leaf entry */
+		dtDeleteEntry(p, index, &dtlck);
+
+		/*
+		 * Update directory index table for entries moved in stbl
+		 */
+		if (DO_INDEX(ip) && index < p->header.nextindex) {
+			s64 lblock;
+
+			imp = NULL;
+			stbl = DT_GETSTBL(p);
+			for (i = index; i < p->header.nextindex; i++) {
+				ldtentry =
+				    (struct ldtentry *) & p->slot[stbl[i]];
+				modify_index(tid, ip,
+					     le32_to_cpu(ldtentry->index),
+					     bn, i, &imp, &lblock);
+			}
+			if (imp)
+				release_metapage(imp);
+		}
+
+		DT_PUTPAGE(mp);
+	}
+
+	return rc;
+}
+
+
+/*
+ *	dtDeleteUp()
+ *
+ * function:
+ *	free empty pages as propagating deletion up the tree
+ *
+ * parameter:
+ *
+ * return:
+ */
+static int dtDeleteUp(tid_t tid, struct inode *ip,
+	   struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
+{
+	int rc = 0;
+	struct metapage *mp;
+	dtpage_t *p;
+	int index, nextindex;
+	int xlen;
+	struct btframe *parent;
+	struct dt_lock *dtlck;
+	struct tlock *tlck;
+	struct lv *lv;
+	struct pxd_lock *pxdlock;
+	int i;
+
+	/*
+	 *      keep the root leaf page which has become empty
+	 */
+	if (BT_IS_ROOT(fmp)) {
+		/*
+		 * reset the root
+		 *
+		 * dtInitRoot() acquires txlock on the root
+		 */
+		dtInitRoot(tid, ip, PARENT(ip));
+
+		DT_PUTPAGE(fmp);
+
+		return 0;
+	}
+
+	/*
+	 *      free the non-root leaf page
+	 */
+	/*
+	 * acquire a transaction lock on the page
+	 *
+	 * write FREEXTENT|NOREDOPAGE log record
+	 * N.B. linelock is overlaid as freed extent descriptor, and
+	 * the buffer page is freed;
+	 */
+	tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
+	pxdlock = (struct pxd_lock *) & tlck->lock;
+	pxdlock->flag = mlckFREEPXD;
+	pxdlock->pxd = fp->header.self;
+	pxdlock->index = 1;
+
+	/* update sibling pointers */
+	if ((rc = dtRelink(tid, ip, fp))) {
+		BT_PUTPAGE(fmp);
+		return rc;
+	}
+
+	xlen = lengthPXD(&fp->header.self);
+
+	/* Free quota allocation. */
+	DQUOT_FREE_BLOCK(ip, xlen);
+
+	/* free/invalidate its buffer page */
+	discard_metapage(fmp);
+
+	/*
+	 *      propagate page deletion up the directory tree
+	 *
+	 * If the delete from the parent page makes it empty,
+	 * continue all the way up the tree.
+	 * stop if the root page is reached (which is never deleted) or
+	 * if the entry deletion does not empty the page.
+	 */
+	while ((parent = BT_POP(btstack)) != NULL) {
+		/* pin the parent page <sp> */
+		DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/*
+		 * free the extent of the child page deleted
+		 */
+		index = parent->index;
+
+		/*
+		 * delete the entry for the child page from parent
+		 */
+		nextindex = p->header.nextindex;
+
+		/*
+		 * the parent has the single entry being deleted:
+		 *
+		 * free the parent page which has become empty.
+		 */
+		if (nextindex == 1) {
+			/*
+			 * keep the root internal page which has become empty
+			 */
+			if (p->header.flag & BT_ROOT) {
+				/*
+				 * reset the root
+				 *
+				 * dtInitRoot() acquires txlock on the root
+				 */
+				dtInitRoot(tid, ip, PARENT(ip));
+
+				DT_PUTPAGE(mp);
+
+				return 0;
+			}
+			/*
+			 * free the parent page
+			 */
+			else {
+				/*
+				 * acquire a transaction lock on the page
+				 *
+				 * write FREEXTENT|NOREDOPAGE log record
+				 */
+				tlck =
+				    txMaplock(tid, ip,
+					      tlckDTREE | tlckFREE);
+				pxdlock = (struct pxd_lock *) & tlck->lock;
+				pxdlock->flag = mlckFREEPXD;
+				pxdlock->pxd = p->header.self;
+				pxdlock->index = 1;
+
+				/* update sibling pointers */
+				if ((rc = dtRelink(tid, ip, p))) {
+					DT_PUTPAGE(mp);
+					return rc;
+				}
+
+				xlen = lengthPXD(&p->header.self);
+
+				/* Free quota allocation */
+				DQUOT_FREE_BLOCK(ip, xlen);
+
+				/* free/invalidate its buffer page */
+				discard_metapage(mp);
+
+				/* propagate up */
+				continue;
+			}
+		}
+
+		/*
+		 * the parent has other entries remaining:
+		 *
+		 * delete the router entry from the parent page.
+		 */
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the page
+		 *
+		 * action: router entry deletion
+		 */
+		tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
+		dtlck = (struct dt_lock *) & tlck->lock;
+
+		/* linelock header */
+		if (dtlck->index >= dtlck->maxcnt)
+			dtlck = (struct dt_lock *) txLinelock(dtlck);
+		lv = & dtlck->lv[dtlck->index];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		/* linelock stbl of non-root leaf page */
+		if (!(p->header.flag & BT_ROOT)) {
+			if (dtlck->index < dtlck->maxcnt)
+				lv++;
+			else {
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+				lv = & dtlck->lv[0];
+			}
+			i = index >> L2DTSLOTSIZE;
+			lv->offset = p->header.stblindex + i;
+			lv->length =
+			    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
+			    i + 1;
+			dtlck->index++;
+		}
+
+		/* free the router entry */
+		dtDeleteEntry(p, index, &dtlck);
+
+		/* reset key of new leftmost entry of level (for consistency) */
+		if (index == 0 &&
+		    ((p->header.flag & BT_ROOT) || p->header.prev == 0))
+			dtTruncateEntry(p, 0, &dtlck);
+
+		/* unpin the parent page */
+		DT_PUTPAGE(mp);
+
+		/* exit propagation up */
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef _NOTYET
+/*
+ * NAME:        dtRelocate()
+ *
+ * FUNCTION:    relocate dtpage (internal or leaf) of directory;
+ *              This function is mainly used by defragfs utility.
+ */
+int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
+	       s64 nxaddr)
+{
+	int rc = 0;
+	struct metapage *mp, *pmp, *lmp, *rmp;
+	dtpage_t *p, *pp, *rp = 0, *lp= 0;
+	s64 bn;
+	int index;
+	struct btstack btstack;
+	pxd_t *pxd;
+	s64 oxaddr, nextbn, prevbn;
+	int xlen, xsize;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct pxd_lock *pxdlock;
+	s8 *stbl;
+	struct lv *lv;
+
+	oxaddr = addressPXD(opxd);
+	xlen = lengthPXD(opxd);
+
+	jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
+		   (long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
+		   xlen);
+
+	/*
+	 *      1. get the internal parent dtpage covering
+	 *      router entry for the tartget page to be relocated;
+	 */
+	rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
+	if (rc)
+		return rc;
+
+	/* retrieve search result */
+	DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
+	jfs_info("dtRelocate: parent router entry validated.");
+
+	/*
+	 *      2. relocate the target dtpage
+	 */
+	/* read in the target page from src extent */
+	DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
+	if (rc) {
+		/* release the pinned parent page */
+		DT_PUTPAGE(pmp);
+		return rc;
+	}
+
+	/*
+	 * read in sibling pages if any to update sibling pointers;
+	 */
+	rmp = NULL;
+	if (p->header.next) {
+		nextbn = le64_to_cpu(p->header.next);
+		DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
+		if (rc) {
+			DT_PUTPAGE(mp);
+			DT_PUTPAGE(pmp);
+			return (rc);
+		}
+	}
+
+	lmp = NULL;
+	if (p->header.prev) {
+		prevbn = le64_to_cpu(p->header.prev);
+		DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
+		if (rc) {
+			DT_PUTPAGE(mp);
+			DT_PUTPAGE(pmp);
+			if (rmp)
+				DT_PUTPAGE(rmp);
+			return (rc);
+		}
+	}
+
+	/* at this point, all xtpages to be updated are in memory */
+
+	/*
+	 * update sibling pointers of sibling dtpages if any;
+	 */
+	if (lmp) {
+		tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
+		dtlck = (struct dt_lock *) & tlck->lock;
+		/* linelock header */
+		ASSERT(dtlck->index == 0);
+		lv = & dtlck->lv[0];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		lp->header.next = cpu_to_le64(nxaddr);
+		DT_PUTPAGE(lmp);
+	}
+
+	if (rmp) {
+		tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
+		dtlck = (struct dt_lock *) & tlck->lock;
+		/* linelock header */
+		ASSERT(dtlck->index == 0);
+		lv = & dtlck->lv[0];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		rp->header.prev = cpu_to_le64(nxaddr);
+		DT_PUTPAGE(rmp);
+	}
+
+	/*
+	 * update the target dtpage to be relocated
+	 *
+	 * write LOG_REDOPAGE of LOG_NEW type for dst page
+	 * for the whole target page (logredo() will apply
+	 * after image and update bmap for allocation of the
+	 * dst extent), and update bmap for allocation of
+	 * the dst extent;
+	 */
+	tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
+	dtlck = (struct dt_lock *) & tlck->lock;
+	/* linelock header */
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+
+	/* update the self address in the dtpage header */
+	pxd = &p->header.self;
+	PXDaddress(pxd, nxaddr);
+
+	/* the dst page is the same as the src page, i.e.,
+	 * linelock for afterimage of the whole page;
+	 */
+	lv->offset = 0;
+	lv->length = p->header.maxslot;
+	dtlck->index++;
+
+	/* update the buffer extent descriptor of the dtpage */
+	xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
+#ifdef _STILL_TO_PORT
+	bmSetXD(mp, nxaddr, xsize);
+#endif /* _STILL_TO_PORT */
+	/* unpin the relocated page */
+	DT_PUTPAGE(mp);
+	jfs_info("dtRelocate: target dtpage relocated.");
+
+	/* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
+	 * needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
+	 * will also force a bmap update ).
+	 */
+
+	/*
+	 *      3. acquire maplock for the source extent to be freed;
+	 */
+	/* for dtpage relocation, write a LOG_NOREDOPAGE record
+	 * for the source dtpage (logredo() will init NoRedoPage
+	 * filter and will also update bmap for free of the source
+	 * dtpage), and upadte bmap for free of the source dtpage;
+	 */
+	tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
+	pxdlock = (struct pxd_lock *) & tlck->lock;
+	pxdlock->flag = mlckFREEPXD;
+	PXDaddress(&pxdlock->pxd, oxaddr);
+	PXDlength(&pxdlock->pxd, xlen);
+	pxdlock->index = 1;
+
+	/*
+	 *      4. update the parent router entry for relocation;
+	 *
+	 * acquire tlck for the parent entry covering the target dtpage;
+	 * write LOG_REDOPAGE to apply after image only;
+	 */
+	jfs_info("dtRelocate: update parent router entry.");
+	tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
+	dtlck = (struct dt_lock *) & tlck->lock;
+	lv = & dtlck->lv[dtlck->index];
+
+	/* update the PXD with the new address */
+	stbl = DT_GETSTBL(pp);
+	pxd = (pxd_t *) & pp->slot[stbl[index]];
+	PXDaddress(pxd, nxaddr);
+	lv->offset = stbl[index];
+	lv->length = 1;
+	dtlck->index++;
+
+	/* unpin the parent dtpage */
+	DT_PUTPAGE(pmp);
+
+	return rc;
+}
+
+/*
+ * NAME:	dtSearchNode()
+ *
+ * FUNCTION:	Search for an dtpage containing a specified address
+ *              This function is mainly used by defragfs utility.
+ *
+ * NOTE:	Search result on stack, the found page is pinned at exit.
+ *		The result page must be an internal dtpage.
+ *		lmxaddr give the address of the left most page of the
+ *		dtree level, in which the required dtpage resides.
+ */
+static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
+			struct btstack * btstack)
+{
+	int rc = 0;
+	s64 bn;
+	struct metapage *mp;
+	dtpage_t *p;
+	int psize = 288;	/* initial in-line directory */
+	s8 *stbl;
+	int i;
+	pxd_t *pxd;
+	struct btframe *btsp;
+
+	BT_CLR(btstack);	/* reset stack */
+
+	/*
+	 *      descend tree to the level with specified leftmost page
+	 *
+	 *  by convention, root bn = 0.
+	 */
+	for (bn = 0;;) {
+		/* get/pin the page to search */
+		DT_GETPAGE(ip, bn, mp, psize, p, rc);
+		if (rc)
+			return rc;
+
+		/* does the xaddr of leftmost page of the levevl
+		 * matches levevl search key ?
+		 */
+		if (p->header.flag & BT_ROOT) {
+			if (lmxaddr == 0)
+				break;
+		} else if (addressPXD(&p->header.self) == lmxaddr)
+			break;
+
+		/*
+		 * descend down to leftmost child page
+		 */
+		if (p->header.flag & BT_LEAF) {
+			DT_PUTPAGE(mp);
+			return -ESTALE;
+		}
+
+		/* get the leftmost entry */
+		stbl = DT_GETSTBL(p);
+		pxd = (pxd_t *) & p->slot[stbl[0]];
+
+		/* get the child page block address */
+		bn = addressPXD(pxd);
+		psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
+		/* unpin the parent page */
+		DT_PUTPAGE(mp);
+	}
+
+	/*
+	 *      search each page at the current levevl
+	 */
+      loop:
+	stbl = DT_GETSTBL(p);
+	for (i = 0; i < p->header.nextindex; i++) {
+		pxd = (pxd_t *) & p->slot[stbl[i]];
+
+		/* found the specified router entry */
+		if (addressPXD(pxd) == addressPXD(kpxd) &&
+		    lengthPXD(pxd) == lengthPXD(kpxd)) {
+			btsp = btstack->top;
+			btsp->bn = bn;
+			btsp->index = i;
+			btsp->mp = mp;
+
+			return 0;
+		}
+	}
+
+	/* get the right sibling page if any */
+	if (p->header.next)
+		bn = le64_to_cpu(p->header.next);
+	else {
+		DT_PUTPAGE(mp);
+		return -ESTALE;
+	}
+
+	/* unpin current page */
+	DT_PUTPAGE(mp);
+
+	/* get the right sibling page */
+	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	goto loop;
+}
+#endif /* _NOTYET */
+
+/*
+ *	dtRelink()
+ *
+ * function:
+ *	link around a freed page.
+ *
+ * parameter:
+ *	fp:	page to be freed
+ *
+ * return:
+ */
+static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
+{
+	int rc;
+	struct metapage *mp;
+	s64 nextbn, prevbn;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct lv *lv;
+
+	nextbn = le64_to_cpu(p->header.next);
+	prevbn = le64_to_cpu(p->header.prev);
+
+	/* update prev pointer of the next page */
+	if (nextbn != 0) {
+		DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the next page
+		 *
+		 * action: update prev pointer;
+		 */
+		tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
+		jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
+			tlck, ip, mp);
+		dtlck = (struct dt_lock *) & tlck->lock;
+
+		/* linelock header */
+		if (dtlck->index >= dtlck->maxcnt)
+			dtlck = (struct dt_lock *) txLinelock(dtlck);
+		lv = & dtlck->lv[dtlck->index];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		p->header.prev = cpu_to_le64(prevbn);
+		DT_PUTPAGE(mp);
+	}
+
+	/* update next pointer of the previous page */
+	if (prevbn != 0) {
+		DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the prev page
+		 *
+		 * action: update next pointer;
+		 */
+		tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
+		jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
+			tlck, ip, mp);
+		dtlck = (struct dt_lock *) & tlck->lock;
+
+		/* linelock header */
+		if (dtlck->index >= dtlck->maxcnt)
+			dtlck = (struct dt_lock *) txLinelock(dtlck);
+		lv = & dtlck->lv[dtlck->index];
+		lv->offset = 0;
+		lv->length = 1;
+		dtlck->index++;
+
+		p->header.next = cpu_to_le64(nextbn);
+		DT_PUTPAGE(mp);
+	}
+
+	return 0;
+}
+
+
+/*
+ *	dtInitRoot()
+ *
+ * initialize directory root (inline in inode)
+ */
+void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	dtroot_t *p;
+	int fsi;
+	struct dtslot *f;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct lv *lv;
+	u16 xflag_save;
+
+	/*
+	 * If this was previously an non-empty directory, we need to remove
+	 * the old directory table.
+	 */
+	if (DO_INDEX(ip)) {
+		if (!jfs_dirtable_inline(ip)) {
+			struct tblock *tblk = tid_to_tblock(tid);
+			/*
+			 * We're playing games with the tid's xflag.  If
+			 * we're removing a regular file, the file's xtree
+			 * is committed with COMMIT_PMAP, but we always
+			 * commit the directories xtree with COMMIT_PWMAP.
+			 */
+			xflag_save = tblk->xflag;
+			tblk->xflag = 0;
+			/*
+			 * xtTruncate isn't guaranteed to fully truncate
+			 * the xtree.  The caller needs to check i_size
+			 * after committing the transaction to see if
+			 * additional truncation is needed.  The
+			 * COMMIT_Stale flag tells caller that we
+			 * initiated the truncation.
+			 */
+			xtTruncate(tid, ip, 0, COMMIT_PWMAP);
+			set_cflag(COMMIT_Stale, ip);
+
+			tblk->xflag = xflag_save;
+		} else
+			ip->i_size = 1;
+
+		jfs_ip->next_index = 2;
+	} else
+		ip->i_size = IDATASIZE;
+
+	/*
+	 * acquire a transaction lock on the root
+	 *
+	 * action: directory initialization;
+	 */
+	tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
+		      tlckDTREE | tlckENTRY | tlckBTROOT);
+	dtlck = (struct dt_lock *) & tlck->lock;
+
+	/* linelock root */
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+	lv->offset = 0;
+	lv->length = DTROOTMAXSLOT;
+	dtlck->index++;
+
+	p = &jfs_ip->i_dtroot;
+
+	p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
+
+	p->header.nextindex = 0;
+
+	/* init freelist */
+	fsi = 1;
+	f = &p->slot[fsi];
+
+	/* init data area of root */
+	for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
+		f->next = fsi;
+	f->next = -1;
+
+	p->header.freelist = 1;
+	p->header.freecnt = 8;
+
+	/* init '..' entry */
+	p->header.idotdot = cpu_to_le32(idotdot);
+
+	return;
+}
+
+/*
+ *	add_missing_indices()
+ *
+ * function: Fix dtree page in which one or more entries has an invalid index.
+ *	     fsck.jfs should really fix this, but it currently does not.
+ *	     Called from jfs_readdir when bad index is detected.
+ */
+static void add_missing_indices(struct inode *inode, s64 bn)
+{
+	struct ldtentry *d;
+	struct dt_lock *dtlck;
+	int i;
+	uint index;
+	struct lv *lv;
+	struct metapage *mp;
+	dtpage_t *p;
+	int rc;
+	s8 *stbl;
+	tid_t tid;
+	struct tlock *tlck;
+
+	tid = txBegin(inode->i_sb, 0);
+
+	DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
+
+	if (rc) {
+		printk(KERN_ERR "DT_GETPAGE failed!\n");
+		goto end;
+	}
+	BT_MARK_DIRTY(mp, inode);
+
+	ASSERT(p->header.flag & BT_LEAF);
+
+	tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
+	dtlck = (struct dt_lock *) &tlck->lock;
+
+	stbl = DT_GETSTBL(p);
+	for (i = 0; i < p->header.nextindex; i++) {
+		d = (struct ldtentry *) &p->slot[stbl[i]];
+		index = le32_to_cpu(d->index);
+		if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
+			d->index = cpu_to_le32(add_index(tid, inode, bn, i));
+			if (dtlck->index >= dtlck->maxcnt)
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+			lv = &dtlck->lv[dtlck->index];
+			lv->offset = stbl[i];
+			lv->length = 1;
+			dtlck->index++;
+		}
+	}
+
+	DT_PUTPAGE(mp);
+	(void) txCommit(tid, 1, &inode, 0);
+end:
+	txEnd(tid);
+}
+
+/*
+ * Buffer to hold directory entry info while traversing a dtree page
+ * before being fed to the filldir function
+ */
+struct jfs_dirent {
+	loff_t position;
+	int ino;
+	u16 name_len;
+	char name[0];
+};
+
+/*
+ * function to determine next variable-sized jfs_dirent in buffer
+ */
+static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
+{
+	return (struct jfs_dirent *)
+		((char *)dirent +
+		 ((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
+		   sizeof (loff_t) - 1) &
+		  ~(sizeof (loff_t) - 1)));
+}
+
+/*
+ *	jfs_readdir()
+ *
+ * function: read directory entries sequentially
+ *	from the specified entry offset
+ *
+ * parameter:
+ *
+ * return: offset = (pn, index) of start entry
+ *	of next jfs_readdir()/dtRead()
+ */
+int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *ip = filp->f_dentry->d_inode;
+	struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
+	int rc = 0;
+	loff_t dtpos;	/* legacy OS/2 style position */
+	struct dtoffset {
+		s16 pn;
+		s16 index;
+		s32 unused;
+	} *dtoffset = (struct dtoffset *) &dtpos;
+	s64 bn;
+	struct metapage *mp;
+	dtpage_t *p;
+	int index;
+	s8 *stbl;
+	struct btstack btstack;
+	int i, next;
+	struct ldtentry *d;
+	struct dtslot *t;
+	int d_namleft, len, outlen;
+	unsigned long dirent_buf;
+	char *name_ptr;
+	u32 dir_index;
+	int do_index = 0;
+	uint loop_count = 0;
+	struct jfs_dirent *jfs_dirent;
+	int jfs_dirents;
+	int overflow, fix_page, page_fixed = 0;
+	static int unique_pos = 2;	/* If we can't fix broken index */
+
+	if (filp->f_pos == DIREND)
+		return 0;
+
+	if (DO_INDEX(ip)) {
+		/*
+		 * persistent index is stored in directory entries.
+		 * Special cases:        0 = .
+		 *                       1 = ..
+		 *                      -1 = End of directory
+		 */
+		do_index = 1;
+
+		dir_index = (u32) filp->f_pos;
+
+		if (dir_index > 1) {
+			struct dir_table_slot dirtab_slot;
+
+			if (dtEmpty(ip) ||
+			    (dir_index >= JFS_IP(ip)->next_index)) {
+				/* Stale position.  Directory has shrunk */
+				filp->f_pos = DIREND;
+				return 0;
+			}
+		      repeat:
+			rc = read_index(ip, dir_index, &dirtab_slot);
+			if (rc) {
+				filp->f_pos = DIREND;
+				return rc;
+			}
+			if (dirtab_slot.flag == DIR_INDEX_FREE) {
+				if (loop_count++ > JFS_IP(ip)->next_index) {
+					jfs_err("jfs_readdir detected "
+						   "infinite loop!");
+					filp->f_pos = DIREND;
+					return 0;
+				}
+				dir_index = le32_to_cpu(dirtab_slot.addr2);
+				if (dir_index == -1) {
+					filp->f_pos = DIREND;
+					return 0;
+				}
+				goto repeat;
+			}
+			bn = addressDTS(&dirtab_slot);
+			index = dirtab_slot.slot;
+			DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc) {
+				filp->f_pos = DIREND;
+				return 0;
+			}
+			if (p->header.flag & BT_INTERNAL) {
+				jfs_err("jfs_readdir: bad index table");
+				DT_PUTPAGE(mp);
+				filp->f_pos = -1;
+				return 0;
+			}
+		} else {
+			if (dir_index == 0) {
+				/*
+				 * self "."
+				 */
+				filp->f_pos = 0;
+				if (filldir(dirent, ".", 1, 0, ip->i_ino,
+					    DT_DIR))
+					return 0;
+			}
+			/*
+			 * parent ".."
+			 */
+			filp->f_pos = 1;
+			if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
+				return 0;
+
+			/*
+			 * Find first entry of left-most leaf
+			 */
+			if (dtEmpty(ip)) {
+				filp->f_pos = DIREND;
+				return 0;
+			}
+
+			if ((rc = dtReadFirst(ip, &btstack)))
+				return rc;
+
+			DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+		}
+	} else {
+		/*
+		 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
+		 *
+		 * pn = index = 0:      First entry "."
+		 * pn = 0; index = 1:   Second entry ".."
+		 * pn > 0:              Real entries, pn=1 -> leftmost page
+		 * pn = index = -1:     No more entries
+		 */
+		dtpos = filp->f_pos;
+		if (dtpos == 0) {
+			/* build "." entry */
+
+			if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
+				    DT_DIR))
+				return 0;
+			dtoffset->index = 1;
+			filp->f_pos = dtpos;
+		}
+
+		if (dtoffset->pn == 0) {
+			if (dtoffset->index == 1) {
+				/* build ".." entry */
+
+				if (filldir(dirent, "..", 2, filp->f_pos,
+					    PARENT(ip), DT_DIR))
+					return 0;
+			} else {
+				jfs_err("jfs_readdir called with "
+					"invalid offset!");
+			}
+			dtoffset->pn = 1;
+			dtoffset->index = 0;
+			filp->f_pos = dtpos;
+		}
+
+		if (dtEmpty(ip)) {
+			filp->f_pos = DIREND;
+			return 0;
+		}
+
+		if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
+			jfs_err("jfs_readdir: unexpected rc = %d "
+				"from dtReadNext", rc);
+			filp->f_pos = DIREND;
+			return 0;
+		}
+		/* get start leaf page and index */
+		DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+		/* offset beyond directory eof ? */
+		if (bn < 0) {
+			filp->f_pos = DIREND;
+			return 0;
+		}
+	}
+
+	dirent_buf = __get_free_page(GFP_KERNEL);
+	if (dirent_buf == 0) {
+		DT_PUTPAGE(mp);
+		jfs_warn("jfs_readdir: __get_free_page failed!");
+		filp->f_pos = DIREND;
+		return -ENOMEM;
+	}
+
+	while (1) {
+		jfs_dirent = (struct jfs_dirent *) dirent_buf;
+		jfs_dirents = 0;
+		overflow = fix_page = 0;
+
+		stbl = DT_GETSTBL(p);
+
+		for (i = index; i < p->header.nextindex; i++) {
+			d = (struct ldtentry *) & p->slot[stbl[i]];
+
+			if (((long) jfs_dirent + d->namlen + 1) >
+			    (dirent_buf + PSIZE)) {
+				/* DBCS codepages could overrun dirent_buf */
+				index = i;
+				overflow = 1;
+				break;
+			}
+
+			d_namleft = d->namlen;
+			name_ptr = jfs_dirent->name;
+			jfs_dirent->ino = le32_to_cpu(d->inumber);
+
+			if (do_index) {
+				len = min(d_namleft, DTLHDRDATALEN);
+				jfs_dirent->position = le32_to_cpu(d->index);
+				/*
+				 * d->index should always be valid, but it
+				 * isn't.  fsck.jfs doesn't create the
+				 * directory index for the lost+found
+				 * directory.  Rather than let it go,
+				 * we can try to fix it.
+				 */
+				if ((jfs_dirent->position < 2) ||
+				    (jfs_dirent->position >=
+				     JFS_IP(ip)->next_index)) {
+					if (!page_fixed && !isReadOnly(ip)) {
+						fix_page = 1;
+						/*
+						 * setting overflow and setting
+						 * index to i will cause the
+						 * same page to be processed
+						 * again starting here
+						 */
+						overflow = 1;
+						index = i;
+						break;
+					}
+					jfs_dirent->position = unique_pos++;
+				}
+			} else {
+				jfs_dirent->position = dtpos;
+				len = min(d_namleft, DTLHDRDATALEN_LEGACY);
+			}
+
+			/* copy the name of head/only segment */
+			outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
+						   codepage);
+			jfs_dirent->name_len = outlen;
+
+			/* copy name in the additional segment(s) */
+			next = d->next;
+			while (next >= 0) {
+				t = (struct dtslot *) & p->slot[next];
+				name_ptr += outlen;
+				d_namleft -= len;
+				/* Sanity Check */
+				if (d_namleft == 0) {
+					jfs_error(ip->i_sb,
+						  "JFS:Dtree error: ino = "
+						  "%ld, bn=%Ld, index = %d",
+						  (long)ip->i_ino,
+						  (long long)bn,
+						  i);
+					goto skip_one;
+				}
+				len = min(d_namleft, DTSLOTDATALEN);
+				outlen = jfs_strfromUCS_le(name_ptr, t->name,
+							   len, codepage);
+				jfs_dirent->name_len += outlen;
+
+				next = t->next;
+			}
+
+			jfs_dirents++;
+			jfs_dirent = next_jfs_dirent(jfs_dirent);
+skip_one:
+			if (!do_index)
+				dtoffset->index++;
+		}
+
+		if (!overflow) {
+			/* Point to next leaf page */
+			if (p->header.flag & BT_ROOT)
+				bn = 0;
+			else {
+				bn = le64_to_cpu(p->header.next);
+				index = 0;
+				/* update offset (pn:index) for new page */
+				if (!do_index) {
+					dtoffset->pn++;
+					dtoffset->index = 0;
+				}
+			}
+			page_fixed = 0;
+		}
+
+		/* unpin previous leaf page */
+		DT_PUTPAGE(mp);
+
+		jfs_dirent = (struct jfs_dirent *) dirent_buf;
+		while (jfs_dirents--) {
+			filp->f_pos = jfs_dirent->position;
+			if (filldir(dirent, jfs_dirent->name,
+				    jfs_dirent->name_len, filp->f_pos,
+				    jfs_dirent->ino, DT_UNKNOWN))
+				goto out;
+			jfs_dirent = next_jfs_dirent(jfs_dirent);
+		}
+
+		if (fix_page) {
+			add_missing_indices(ip, bn);
+			page_fixed = 1;
+		}
+
+		if (!overflow && (bn == 0)) {
+			filp->f_pos = DIREND;
+			break;
+		}
+
+		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc) {
+			free_page(dirent_buf);
+			return rc;
+		}
+	}
+
+      out:
+	free_page(dirent_buf);
+
+	return rc;
+}
+
+
+/*
+ *	dtReadFirst()
+ *
+ * function: get the leftmost page of the directory
+ */
+static int dtReadFirst(struct inode *ip, struct btstack * btstack)
+{
+	int rc = 0;
+	s64 bn;
+	int psize = 288;	/* initial in-line directory */
+	struct metapage *mp;
+	dtpage_t *p;
+	s8 *stbl;
+	struct btframe *btsp;
+	pxd_t *xd;
+
+	BT_CLR(btstack);	/* reset stack */
+
+	/*
+	 *      descend leftmost path of the tree
+	 *
+	 * by convention, root bn = 0.
+	 */
+	for (bn = 0;;) {
+		DT_GETPAGE(ip, bn, mp, psize, p, rc);
+		if (rc)
+			return rc;
+
+		/*
+		 * leftmost leaf page
+		 */
+		if (p->header.flag & BT_LEAF) {
+			/* return leftmost entry */
+			btsp = btstack->top;
+			btsp->bn = bn;
+			btsp->index = 0;
+			btsp->mp = mp;
+
+			return 0;
+		}
+
+		/*
+		 * descend down to leftmost child page
+		 */
+		if (BT_STACK_FULL(btstack)) {
+			DT_PUTPAGE(mp);
+			jfs_error(ip->i_sb, "dtReadFirst: btstack overrun");
+			BT_STACK_DUMP(btstack);
+			return -EIO;
+		}
+		/* push (bn, index) of the parent page/entry */
+		BT_PUSH(btstack, bn, 0);
+
+		/* get the leftmost entry */
+		stbl = DT_GETSTBL(p);
+		xd = (pxd_t *) & p->slot[stbl[0]];
+
+		/* get the child page block address */
+		bn = addressPXD(xd);
+		psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize;
+
+		/* unpin the parent page */
+		DT_PUTPAGE(mp);
+	}
+}
+
+
+/*
+ *	dtReadNext()
+ *
+ * function: get the page of the specified offset (pn:index)
+ *
+ * return: if (offset > eof), bn = -1;
+ *
+ * note: if index > nextindex of the target leaf page,
+ * start with 1st entry of next leaf page;
+ */
+static int dtReadNext(struct inode *ip, loff_t * offset,
+		      struct btstack * btstack)
+{
+	int rc = 0;
+	struct dtoffset {
+		s16 pn;
+		s16 index;
+		s32 unused;
+	} *dtoffset = (struct dtoffset *) offset;
+	s64 bn;
+	struct metapage *mp;
+	dtpage_t *p;
+	int index;
+	int pn;
+	s8 *stbl;
+	struct btframe *btsp, *parent;
+	pxd_t *xd;
+
+	/*
+	 * get leftmost leaf page pinned
+	 */
+	if ((rc = dtReadFirst(ip, btstack)))
+		return rc;
+
+	/* get leaf page */
+	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
+
+	/* get the start offset (pn:index) */
+	pn = dtoffset->pn - 1;	/* Now pn = 0 represents leftmost leaf */
+	index = dtoffset->index;
+
+	/* start at leftmost page ? */
+	if (pn == 0) {
+		/* offset beyond eof ? */
+		if (index < p->header.nextindex)
+			goto out;
+
+		if (p->header.flag & BT_ROOT) {
+			bn = -1;
+			goto out;
+		}
+
+		/* start with 1st entry of next leaf page */
+		dtoffset->pn++;
+		dtoffset->index = index = 0;
+		goto a;
+	}
+
+	/* start at non-leftmost page: scan parent pages for large pn */
+	if (p->header.flag & BT_ROOT) {
+		bn = -1;
+		goto out;
+	}
+
+	/* start after next leaf page ? */
+	if (pn > 1)
+		goto b;
+
+	/* get leaf page pn = 1 */
+      a:
+	bn = le64_to_cpu(p->header.next);
+
+	/* unpin leaf page */
+	DT_PUTPAGE(mp);
+
+	/* offset beyond eof ? */
+	if (bn == 0) {
+		bn = -1;
+		goto out;
+	}
+
+	goto c;
+
+	/*
+	 * scan last internal page level to get target leaf page
+	 */
+      b:
+	/* unpin leftmost leaf page */
+	DT_PUTPAGE(mp);
+
+	/* get left most parent page */
+	btsp = btstack->top;
+	parent = btsp - 1;
+	bn = parent->bn;
+	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* scan parent pages at last internal page level */
+	while (pn >= p->header.nextindex) {
+		pn -= p->header.nextindex;
+
+		/* get next parent page address */
+		bn = le64_to_cpu(p->header.next);
+
+		/* unpin current parent page */
+		DT_PUTPAGE(mp);
+
+		/* offset beyond eof ? */
+		if (bn == 0) {
+			bn = -1;
+			goto out;
+		}
+
+		/* get next parent page */
+		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/* update parent page stack frame */
+		parent->bn = bn;
+	}
+
+	/* get leaf page address */
+	stbl = DT_GETSTBL(p);
+	xd = (pxd_t *) & p->slot[stbl[pn]];
+	bn = addressPXD(xd);
+
+	/* unpin parent page */
+	DT_PUTPAGE(mp);
+
+	/*
+	 * get target leaf page
+	 */
+      c:
+	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/*
+	 * leaf page has been completed:
+	 * start with 1st entry of next leaf page
+	 */
+	if (index >= p->header.nextindex) {
+		bn = le64_to_cpu(p->header.next);
+
+		/* unpin leaf page */
+		DT_PUTPAGE(mp);
+
+		/* offset beyond eof ? */
+		if (bn == 0) {
+			bn = -1;
+			goto out;
+		}
+
+		/* get next leaf page */
+		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/* start with 1st entry of next leaf page */
+		dtoffset->pn++;
+		dtoffset->index = 0;
+	}
+
+      out:
+	/* return target leaf page pinned */
+	btsp = btstack->top;
+	btsp->bn = bn;
+	btsp->index = dtoffset->index;
+	btsp->mp = mp;
+
+	return 0;
+}
+
+
+/*
+ *	dtCompare()
+ *
+ * function: compare search key with an internal entry
+ *
+ * return:
+ *	< 0 if k is < record
+ *	= 0 if k is = record
+ *	> 0 if k is > record
+ */
+static int dtCompare(struct component_name * key,	/* search key */
+		     dtpage_t * p,	/* directory page */
+		     int si)
+{				/* entry slot index */
+	wchar_t *kname;
+	__le16 *name;
+	int klen, namlen, len, rc;
+	struct idtentry *ih;
+	struct dtslot *t;
+
+	/*
+	 * force the left-most key on internal pages, at any level of
+	 * the tree, to be less than any search key.
+	 * this obviates having to update the leftmost key on an internal
+	 * page when the user inserts a new key in the tree smaller than
+	 * anything that has been stored.
+	 *
+	 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
+	 * at any internal page at any level of the tree,
+	 * it descends to child of the entry anyway -
+	 * ? make the entry as min size dummy entry)
+	 *
+	 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
+	 * return (1);
+	 */
+
+	kname = key->name;
+	klen = key->namlen;
+
+	ih = (struct idtentry *) & p->slot[si];
+	si = ih->next;
+	name = ih->name;
+	namlen = ih->namlen;
+	len = min(namlen, DTIHDRDATALEN);
+
+	/* compare with head/only segment */
+	len = min(klen, len);
+	if ((rc = UniStrncmp_le(kname, name, len)))
+		return rc;
+
+	klen -= len;
+	namlen -= len;
+
+	/* compare with additional segment(s) */
+	kname += len;
+	while (klen > 0 && namlen > 0) {
+		/* compare with next name segment */
+		t = (struct dtslot *) & p->slot[si];
+		len = min(namlen, DTSLOTDATALEN);
+		len = min(klen, len);
+		name = t->name;
+		if ((rc = UniStrncmp_le(kname, name, len)))
+			return rc;
+
+		klen -= len;
+		namlen -= len;
+		kname += len;
+		si = t->next;
+	}
+
+	return (klen - namlen);
+}
+
+
+
+
+/*
+ *	ciCompare()
+ *
+ * function: compare search key with an (leaf/internal) entry
+ *
+ * return:
+ *	< 0 if k is < record
+ *	= 0 if k is = record
+ *	> 0 if k is > record
+ */
+static int ciCompare(struct component_name * key,	/* search key */
+		     dtpage_t * p,	/* directory page */
+		     int si,	/* entry slot index */
+		     int flag)
+{
+	wchar_t *kname, x;
+	__le16 *name;
+	int klen, namlen, len, rc;
+	struct ldtentry *lh;
+	struct idtentry *ih;
+	struct dtslot *t;
+	int i;
+
+	/*
+	 * force the left-most key on internal pages, at any level of
+	 * the tree, to be less than any search key.
+	 * this obviates having to update the leftmost key on an internal
+	 * page when the user inserts a new key in the tree smaller than
+	 * anything that has been stored.
+	 *
+	 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
+	 * at any internal page at any level of the tree,
+	 * it descends to child of the entry anyway -
+	 * ? make the entry as min size dummy entry)
+	 *
+	 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
+	 * return (1);
+	 */
+
+	kname = key->name;
+	klen = key->namlen;
+
+	/*
+	 * leaf page entry
+	 */
+	if (p->header.flag & BT_LEAF) {
+		lh = (struct ldtentry *) & p->slot[si];
+		si = lh->next;
+		name = lh->name;
+		namlen = lh->namlen;
+		if (flag & JFS_DIR_INDEX)
+			len = min(namlen, DTLHDRDATALEN);
+		else
+			len = min(namlen, DTLHDRDATALEN_LEGACY);
+	}
+	/*
+	 * internal page entry
+	 */
+	else {
+		ih = (struct idtentry *) & p->slot[si];
+		si = ih->next;
+		name = ih->name;
+		namlen = ih->namlen;
+		len = min(namlen, DTIHDRDATALEN);
+	}
+
+	/* compare with head/only segment */
+	len = min(klen, len);
+	for (i = 0; i < len; i++, kname++, name++) {
+		/* only uppercase if case-insensitive support is on */
+		if ((flag & JFS_OS2) == JFS_OS2)
+			x = UniToupper(le16_to_cpu(*name));
+		else
+			x = le16_to_cpu(*name);
+		if ((rc = *kname - x))
+			return rc;
+	}
+
+	klen -= len;
+	namlen -= len;
+
+	/* compare with additional segment(s) */
+	while (klen > 0 && namlen > 0) {
+		/* compare with next name segment */
+		t = (struct dtslot *) & p->slot[si];
+		len = min(namlen, DTSLOTDATALEN);
+		len = min(klen, len);
+		name = t->name;
+		for (i = 0; i < len; i++, kname++, name++) {
+			/* only uppercase if case-insensitive support is on */
+			if ((flag & JFS_OS2) == JFS_OS2)
+				x = UniToupper(le16_to_cpu(*name));
+			else
+				x = le16_to_cpu(*name);
+
+			if ((rc = *kname - x))
+				return rc;
+		}
+
+		klen -= len;
+		namlen -= len;
+		si = t->next;
+	}
+
+	return (klen - namlen);
+}
+
+
+/*
+ *	ciGetLeafPrefixKey()
+ *
+ * function: compute prefix of suffix compression
+ *	     from two adjacent leaf entries
+ *	     across page boundary
+ *
+ * return: non-zero on error
+ *	
+ */
+static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
+			       int ri, struct component_name * key, int flag)
+{
+	int klen, namlen;
+	wchar_t *pl, *pr, *kname;
+	struct component_name lkey;
+	struct component_name rkey;
+
+	lkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
+					GFP_KERNEL);
+	if (lkey.name == NULL)
+		return -ENOSPC;
+
+	rkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
+					GFP_KERNEL);
+	if (rkey.name == NULL) {
+		kfree(lkey.name);
+		return -ENOSPC;
+	}
+
+	/* get left and right key */
+	dtGetKey(lp, li, &lkey, flag);
+	lkey.name[lkey.namlen] = 0;
+
+	if ((flag & JFS_OS2) == JFS_OS2)
+		ciToUpper(&lkey);
+
+	dtGetKey(rp, ri, &rkey, flag);
+	rkey.name[rkey.namlen] = 0;
+
+
+	if ((flag & JFS_OS2) == JFS_OS2)
+		ciToUpper(&rkey);
+
+	/* compute prefix */
+	klen = 0;
+	kname = key->name;
+	namlen = min(lkey.namlen, rkey.namlen);
+	for (pl = lkey.name, pr = rkey.name;
+	     namlen; pl++, pr++, namlen--, klen++, kname++) {
+		*kname = *pr;
+		if (*pl != *pr) {
+			key->namlen = klen + 1;
+			goto free_names;
+		}
+	}
+
+	/* l->namlen <= r->namlen since l <= r */
+	if (lkey.namlen < rkey.namlen) {
+		*kname = *pr;
+		key->namlen = klen + 1;
+	} else			/* l->namelen == r->namelen */
+		key->namlen = klen;
+
+free_names:
+	kfree(lkey.name);
+	kfree(rkey.name);
+	return 0;
+}
+
+
+
+/*
+ *	dtGetKey()
+ *
+ * function: get key of the entry
+ */
+static void dtGetKey(dtpage_t * p, int i,	/* entry index */
+		     struct component_name * key, int flag)
+{
+	int si;
+	s8 *stbl;
+	struct ldtentry *lh;
+	struct idtentry *ih;
+	struct dtslot *t;
+	int namlen, len;
+	wchar_t *kname;
+	__le16 *name;
+
+	/* get entry */
+	stbl = DT_GETSTBL(p);
+	si = stbl[i];
+	if (p->header.flag & BT_LEAF) {
+		lh = (struct ldtentry *) & p->slot[si];
+		si = lh->next;
+		namlen = lh->namlen;
+		name = lh->name;
+		if (flag & JFS_DIR_INDEX)
+			len = min(namlen, DTLHDRDATALEN);
+		else
+			len = min(namlen, DTLHDRDATALEN_LEGACY);
+	} else {
+		ih = (struct idtentry *) & p->slot[si];
+		si = ih->next;
+		namlen = ih->namlen;
+		name = ih->name;
+		len = min(namlen, DTIHDRDATALEN);
+	}
+
+	key->namlen = namlen;
+	kname = key->name;
+
+	/*
+	 * move head/only segment
+	 */
+	UniStrncpy_from_le(kname, name, len);
+
+	/*
+	 * move additional segment(s)
+	 */
+	while (si >= 0) {
+		/* get next segment */
+		t = &p->slot[si];
+		kname += len;
+		namlen -= len;
+		len = min(namlen, DTSLOTDATALEN);
+		UniStrncpy_from_le(kname, t->name, len);
+
+		si = t->next;
+	}
+}
+
+
+/*
+ *	dtInsertEntry()
+ *
+ * function: allocate free slot(s) and
+ *	     write a leaf/internal entry
+ *
+ * return: entry slot index
+ */
+static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
+			  ddata_t * data, struct dt_lock ** dtlock)
+{
+	struct dtslot *h, *t;
+	struct ldtentry *lh = NULL;
+	struct idtentry *ih = NULL;
+	int hsi, fsi, klen, len, nextindex;
+	wchar_t *kname;
+	__le16 *name;
+	s8 *stbl;
+	pxd_t *xd;
+	struct dt_lock *dtlck = *dtlock;
+	struct lv *lv;
+	int xsi, n;
+	s64 bn = 0;
+	struct metapage *mp = NULL;
+
+	klen = key->namlen;
+	kname = key->name;
+
+	/* allocate a free slot */
+	hsi = fsi = p->header.freelist;
+	h = &p->slot[fsi];
+	p->header.freelist = h->next;
+	--p->header.freecnt;
+
+	/* open new linelock */
+	if (dtlck->index >= dtlck->maxcnt)
+		dtlck = (struct dt_lock *) txLinelock(dtlck);
+
+	lv = & dtlck->lv[dtlck->index];
+	lv->offset = hsi;
+
+	/* write head/only segment */
+	if (p->header.flag & BT_LEAF) {
+		lh = (struct ldtentry *) h;
+		lh->next = h->next;
+		lh->inumber = cpu_to_le32(data->leaf.ino);
+		lh->namlen = klen;
+		name = lh->name;
+		if (data->leaf.ip) {
+			len = min(klen, DTLHDRDATALEN);
+			if (!(p->header.flag & BT_ROOT))
+				bn = addressPXD(&p->header.self);
+			lh->index = cpu_to_le32(add_index(data->leaf.tid,
+							  data->leaf.ip,
+							  bn, index));
+		} else
+			len = min(klen, DTLHDRDATALEN_LEGACY);
+	} else {
+		ih = (struct idtentry *) h;
+		ih->next = h->next;
+		xd = (pxd_t *) ih;
+		*xd = data->xd;
+		ih->namlen = klen;
+		name = ih->name;
+		len = min(klen, DTIHDRDATALEN);
+	}
+
+	UniStrncpy_to_le(name, kname, len);
+
+	n = 1;
+	xsi = hsi;
+
+	/* write additional segment(s) */
+	t = h;
+	klen -= len;
+	while (klen) {
+		/* get free slot */
+		fsi = p->header.freelist;
+		t = &p->slot[fsi];
+		p->header.freelist = t->next;
+		--p->header.freecnt;
+
+		/* is next slot contiguous ? */
+		if (fsi != xsi + 1) {
+			/* close current linelock */
+			lv->length = n;
+			dtlck->index++;
+
+			/* open new linelock */
+			if (dtlck->index < dtlck->maxcnt)
+				lv++;
+			else {
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+				lv = & dtlck->lv[0];
+			}
+
+			lv->offset = fsi;
+			n = 0;
+		}
+
+		kname += len;
+		len = min(klen, DTSLOTDATALEN);
+		UniStrncpy_to_le(t->name, kname, len);
+
+		n++;
+		xsi = fsi;
+		klen -= len;
+	}
+
+	/* close current linelock */
+	lv->length = n;
+	dtlck->index++;
+
+	*dtlock = dtlck;
+
+	/* terminate last/only segment */
+	if (h == t) {
+		/* single segment entry */
+		if (p->header.flag & BT_LEAF)
+			lh->next = -1;
+		else
+			ih->next = -1;
+	} else
+		/* multi-segment entry */
+		t->next = -1;
+
+	/* if insert into middle, shift right succeeding entries in stbl */
+	stbl = DT_GETSTBL(p);
+	nextindex = p->header.nextindex;
+	if (index < nextindex) {
+		memmove(stbl + index + 1, stbl + index, nextindex - index);
+
+		if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
+			s64 lblock;
+
+			/*
+			 * Need to update slot number for entries that moved
+			 * in the stbl
+			 */
+			mp = NULL;
+			for (n = index + 1; n <= nextindex; n++) {
+				lh = (struct ldtentry *) & (p->slot[stbl[n]]);
+				modify_index(data->leaf.tid, data->leaf.ip,
+					     le32_to_cpu(lh->index), bn, n,
+					     &mp, &lblock);
+			}
+			if (mp)
+				release_metapage(mp);
+		}
+	}
+
+	stbl[index] = hsi;
+
+	/* advance next available entry index of stbl */
+	++p->header.nextindex;
+}
+
+
+/*
+ *	dtMoveEntry()
+ *
+ * function: move entries from split/left page to new/right page
+ *
+ *	nextindex of dst page and freelist/freecnt of both pages
+ *	are updated.
+ */
+static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
+			struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
+			int do_index)
+{
+	int ssi, next;		/* src slot index */
+	int di;			/* dst entry index */
+	int dsi;		/* dst slot index */
+	s8 *sstbl, *dstbl;	/* sorted entry table */
+	int snamlen, len;
+	struct ldtentry *slh, *dlh = NULL;
+	struct idtentry *sih, *dih = NULL;
+	struct dtslot *h, *s, *d;
+	struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
+	struct lv *slv, *dlv;
+	int xssi, ns, nd;
+	int sfsi;
+
+	sstbl = (s8 *) & sp->slot[sp->header.stblindex];
+	dstbl = (s8 *) & dp->slot[dp->header.stblindex];
+
+	dsi = dp->header.freelist;	/* first (whole page) free slot */
+	sfsi = sp->header.freelist;
+
+	/* linelock destination entry slot */
+	dlv = & ddtlck->lv[ddtlck->index];
+	dlv->offset = dsi;
+
+	/* linelock source entry slot */
+	slv = & sdtlck->lv[sdtlck->index];
+	slv->offset = sstbl[si];
+	xssi = slv->offset - 1;
+
+	/*
+	 * move entries
+	 */
+	ns = nd = 0;
+	for (di = 0; si < sp->header.nextindex; si++, di++) {
+		ssi = sstbl[si];
+		dstbl[di] = dsi;
+
+		/* is next slot contiguous ? */
+		if (ssi != xssi + 1) {
+			/* close current linelock */
+			slv->length = ns;
+			sdtlck->index++;
+
+			/* open new linelock */
+			if (sdtlck->index < sdtlck->maxcnt)
+				slv++;
+			else {
+				sdtlck = (struct dt_lock *) txLinelock(sdtlck);
+				slv = & sdtlck->lv[0];
+			}
+
+			slv->offset = ssi;
+			ns = 0;
+		}
+
+		/*
+		 * move head/only segment of an entry
+		 */
+		/* get dst slot */
+		h = d = &dp->slot[dsi];
+
+		/* get src slot and move */
+		s = &sp->slot[ssi];
+		if (sp->header.flag & BT_LEAF) {
+			/* get source entry */
+			slh = (struct ldtentry *) s;
+			dlh = (struct ldtentry *) h;
+			snamlen = slh->namlen;
+
+			if (do_index) {
+				len = min(snamlen, DTLHDRDATALEN);
+				dlh->index = slh->index; /* little-endian */
+			} else
+				len = min(snamlen, DTLHDRDATALEN_LEGACY);
+
+			memcpy(dlh, slh, 6 + len * 2);
+
+			next = slh->next;
+
+			/* update dst head/only segment next field */
+			dsi++;
+			dlh->next = dsi;
+		} else {
+			sih = (struct idtentry *) s;
+			snamlen = sih->namlen;
+
+			len = min(snamlen, DTIHDRDATALEN);
+			dih = (struct idtentry *) h;
+			memcpy(dih, sih, 10 + len * 2);
+			next = sih->next;
+
+			dsi++;
+			dih->next = dsi;
+		}
+
+		/* free src head/only segment */
+		s->next = sfsi;
+		s->cnt = 1;
+		sfsi = ssi;
+
+		ns++;
+		nd++;
+		xssi = ssi;
+
+		/*
+		 * move additional segment(s) of the entry
+		 */
+		snamlen -= len;
+		while ((ssi = next) >= 0) {
+			/* is next slot contiguous ? */
+			if (ssi != xssi + 1) {
+				/* close current linelock */
+				slv->length = ns;
+				sdtlck->index++;
+
+				/* open new linelock */
+				if (sdtlck->index < sdtlck->maxcnt)
+					slv++;
+				else {
+					sdtlck =
+					    (struct dt_lock *)
+					    txLinelock(sdtlck);
+					slv = & sdtlck->lv[0];
+				}
+
+				slv->offset = ssi;
+				ns = 0;
+			}
+
+			/* get next source segment */
+			s = &sp->slot[ssi];
+
+			/* get next destination free slot */
+			d++;
+
+			len = min(snamlen, DTSLOTDATALEN);
+			UniStrncpy_le(d->name, s->name, len);
+
+			ns++;
+			nd++;
+			xssi = ssi;
+
+			dsi++;
+			d->next = dsi;
+
+			/* free source segment */
+			next = s->next;
+			s->next = sfsi;
+			s->cnt = 1;
+			sfsi = ssi;
+
+			snamlen -= len;
+		}		/* end while */
+
+		/* terminate dst last/only segment */
+		if (h == d) {
+			/* single segment entry */
+			if (dp->header.flag & BT_LEAF)
+				dlh->next = -1;
+			else
+				dih->next = -1;
+		} else
+			/* multi-segment entry */
+			d->next = -1;
+	}			/* end for */
+
+	/* close current linelock */
+	slv->length = ns;
+	sdtlck->index++;
+	*sdtlock = sdtlck;
+
+	dlv->length = nd;
+	ddtlck->index++;
+	*ddtlock = ddtlck;
+
+	/* update source header */
+	sp->header.freelist = sfsi;
+	sp->header.freecnt += nd;
+
+	/* update destination header */
+	dp->header.nextindex = di;
+
+	dp->header.freelist = dsi;
+	dp->header.freecnt -= nd;
+}
+
+
+/*
+ *	dtDeleteEntry()
+ *
+ * function: free a (leaf/internal) entry
+ *
+ * log freelist header, stbl, and each segment slot of entry
+ * (even though last/only segment next field is modified,
+ * physical image logging requires all segment slots of
+ * the entry logged to avoid applying previous updates
+ * to the same slots)
+ */
+static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
+{
+	int fsi;		/* free entry slot index */
+	s8 *stbl;
+	struct dtslot *t;
+	int si, freecnt;
+	struct dt_lock *dtlck = *dtlock;
+	struct lv *lv;
+	int xsi, n;
+
+	/* get free entry slot index */
+	stbl = DT_GETSTBL(p);
+	fsi = stbl[fi];
+
+	/* open new linelock */
+	if (dtlck->index >= dtlck->maxcnt)
+		dtlck = (struct dt_lock *) txLinelock(dtlck);
+	lv = & dtlck->lv[dtlck->index];
+
+	lv->offset = fsi;
+
+	/* get the head/only segment */
+	t = &p->slot[fsi];
+	if (p->header.flag & BT_LEAF)
+		si = ((struct ldtentry *) t)->next;
+	else
+		si = ((struct idtentry *) t)->next;
+	t->next = si;
+	t->cnt = 1;
+
+	n = freecnt = 1;
+	xsi = fsi;
+
+	/* find the last/only segment */
+	while (si >= 0) {
+		/* is next slot contiguous ? */
+		if (si != xsi + 1) {
+			/* close current linelock */
+			lv->length = n;
+			dtlck->index++;
+
+			/* open new linelock */
+			if (dtlck->index < dtlck->maxcnt)
+				lv++;
+			else {
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+				lv = & dtlck->lv[0];
+			}
+
+			lv->offset = si;
+			n = 0;
+		}
+
+		n++;
+		xsi = si;
+		freecnt++;
+
+		t = &p->slot[si];
+		t->cnt = 1;
+		si = t->next;
+	}
+
+	/* close current linelock */
+	lv->length = n;
+	dtlck->index++;
+
+	*dtlock = dtlck;
+
+	/* update freelist */
+	t->next = p->header.freelist;
+	p->header.freelist = fsi;
+	p->header.freecnt += freecnt;
+
+	/* if delete from middle,
+	 * shift left the succedding entries in the stbl
+	 */
+	si = p->header.nextindex;
+	if (fi < si - 1)
+		memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1);
+
+	p->header.nextindex--;
+}
+
+
+/*
+ *	dtTruncateEntry()
+ *
+ * function: truncate a (leaf/internal) entry
+ *
+ * log freelist header, stbl, and each segment slot of entry
+ * (even though last/only segment next field is modified,
+ * physical image logging requires all segment slots of
+ * the entry logged to avoid applying previous updates
+ * to the same slots)
+ */
+static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
+{
+	int tsi;		/* truncate entry slot index */
+	s8 *stbl;
+	struct dtslot *t;
+	int si, freecnt;
+	struct dt_lock *dtlck = *dtlock;
+	struct lv *lv;
+	int fsi, xsi, n;
+
+	/* get free entry slot index */
+	stbl = DT_GETSTBL(p);
+	tsi = stbl[ti];
+
+	/* open new linelock */
+	if (dtlck->index >= dtlck->maxcnt)
+		dtlck = (struct dt_lock *) txLinelock(dtlck);
+	lv = & dtlck->lv[dtlck->index];
+
+	lv->offset = tsi;
+
+	/* get the head/only segment */
+	t = &p->slot[tsi];
+	ASSERT(p->header.flag & BT_INTERNAL);
+	((struct idtentry *) t)->namlen = 0;
+	si = ((struct idtentry *) t)->next;
+	((struct idtentry *) t)->next = -1;
+
+	n = 1;
+	freecnt = 0;
+	fsi = si;
+	xsi = tsi;
+
+	/* find the last/only segment */
+	while (si >= 0) {
+		/* is next slot contiguous ? */
+		if (si != xsi + 1) {
+			/* close current linelock */
+			lv->length = n;
+			dtlck->index++;
+
+			/* open new linelock */
+			if (dtlck->index < dtlck->maxcnt)
+				lv++;
+			else {
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+				lv = & dtlck->lv[0];
+			}
+
+			lv->offset = si;
+			n = 0;
+		}
+
+		n++;
+		xsi = si;
+		freecnt++;
+
+		t = &p->slot[si];
+		t->cnt = 1;
+		si = t->next;
+	}
+
+	/* close current linelock */
+	lv->length = n;
+	dtlck->index++;
+
+	*dtlock = dtlck;
+
+	/* update freelist */
+	if (freecnt == 0)
+		return;
+	t->next = p->header.freelist;
+	p->header.freelist = fsi;
+	p->header.freecnt += freecnt;
+}
+
+
+/*
+ *	dtLinelockFreelist()
+ */
+static void dtLinelockFreelist(dtpage_t * p,	/* directory page */
+			       int m,	/* max slot index */
+			       struct dt_lock ** dtlock)
+{
+	int fsi;		/* free entry slot index */
+	struct dtslot *t;
+	int si;
+	struct dt_lock *dtlck = *dtlock;
+	struct lv *lv;
+	int xsi, n;
+
+	/* get free entry slot index */
+	fsi = p->header.freelist;
+
+	/* open new linelock */
+	if (dtlck->index >= dtlck->maxcnt)
+		dtlck = (struct dt_lock *) txLinelock(dtlck);
+	lv = & dtlck->lv[dtlck->index];
+
+	lv->offset = fsi;
+
+	n = 1;
+	xsi = fsi;
+
+	t = &p->slot[fsi];
+	si = t->next;
+
+	/* find the last/only segment */
+	while (si < m && si >= 0) {
+		/* is next slot contiguous ? */
+		if (si != xsi + 1) {
+			/* close current linelock */
+			lv->length = n;
+			dtlck->index++;
+
+			/* open new linelock */
+			if (dtlck->index < dtlck->maxcnt)
+				lv++;
+			else {
+				dtlck = (struct dt_lock *) txLinelock(dtlck);
+				lv = & dtlck->lv[0];
+			}
+
+			lv->offset = si;
+			n = 0;
+		}
+
+		n++;
+		xsi = si;
+
+		t = &p->slot[si];
+		si = t->next;
+	}
+
+	/* close current linelock */
+	lv->length = n;
+	dtlck->index++;
+
+	*dtlock = dtlck;
+}
+
+
+/*
+ * NAME: dtModify
+ *
+ * FUNCTION: Modify the inode number part of a directory entry
+ *
+ * PARAMETERS:
+ *	tid	- Transaction id
+ *	ip	- Inode of parent directory
+ *	key	- Name of entry to be modified
+ *	orig_ino	- Original inode number expected in entry
+ *	new_ino	- New inode number to put into entry
+ *	flag	- JFS_RENAME
+ *
+ * RETURNS:
+ *	-ESTALE	- If entry found does not match orig_ino passed in
+ *	-ENOENT	- If no entry can be found to match key
+ *	0	- If successfully modified entry
+ */
+int dtModify(tid_t tid, struct inode *ip,
+	 struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
+{
+	int rc;
+	s64 bn;
+	struct metapage *mp;
+	dtpage_t *p;
+	int index;
+	struct btstack btstack;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct lv *lv;
+	s8 *stbl;
+	int entry_si;		/* entry slot index */
+	struct ldtentry *entry;
+
+	/*
+	 *      search for the entry to modify:
+	 *
+	 * dtSearch() returns (leaf page pinned, index at which to modify).
+	 */
+	if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag)))
+		return rc;
+
+	/* retrieve search result */
+	DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire a transaction lock on the leaf page of named entry
+	 */
+	tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
+	dtlck = (struct dt_lock *) & tlck->lock;
+
+	/* get slot index of the entry */
+	stbl = DT_GETSTBL(p);
+	entry_si = stbl[index];
+
+	/* linelock entry */
+	ASSERT(dtlck->index == 0);
+	lv = & dtlck->lv[0];
+	lv->offset = entry_si;
+	lv->length = 1;
+	dtlck->index++;
+
+	/* get the head/only segment */
+	entry = (struct ldtentry *) & p->slot[entry_si];
+
+	/* substitute the inode number of the entry */
+	entry->inumber = cpu_to_le32(new_ino);
+
+	/* unpin the leaf page */
+	DT_PUTPAGE(mp);
+
+	return 0;
+}
+
+#ifdef _JFS_DEBUG_DTREE
+/*
+ *	dtDisplayTree()
+ *
+ * function: traverse forward
+ */
+int dtDisplayTree(struct inode *ip)
+{
+	int rc;
+	struct metapage *mp;
+	dtpage_t *p;
+	s64 bn, pbn;
+	int index, lastindex, v, h;
+	pxd_t *xd;
+	struct btstack btstack;
+	struct btframe *btsp;
+	struct btframe *parent;
+	u8 *stbl;
+	int psize = 256;
+
+	printk("display B+-tree.\n");
+
+	/* clear stack */
+	btsp = btstack.stack;
+
+	/*
+	 * start with root
+	 *
+	 * root resides in the inode
+	 */
+	bn = 0;
+	v = h = 0;
+
+	/*
+	 * first access of each page:
+	 */
+      newPage:
+	DT_GETPAGE(ip, bn, mp, psize, p, rc);
+	if (rc)
+		return rc;
+
+	/* process entries forward from first index */
+	index = 0;
+	lastindex = p->header.nextindex - 1;
+
+	if (p->header.flag & BT_INTERNAL) {
+		/*
+		 * first access of each internal page
+		 */
+		printf("internal page ");
+		dtDisplayPage(ip, bn, p);
+
+		goto getChild;
+	} else {		/* (p->header.flag & BT_LEAF) */
+
+		/*
+		 * first access of each leaf page
+		 */
+		printf("leaf page ");
+		dtDisplayPage(ip, bn, p);
+
+		/*
+		 * process leaf page entries
+		 *
+		 for ( ; index <= lastindex; index++)
+		 {
+		 }
+		 */
+
+		/* unpin the leaf page */
+		DT_PUTPAGE(mp);
+	}
+
+	/*
+	 * go back up to the parent page
+	 */
+      getParent:
+	/* pop/restore parent entry for the current child page */
+	if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
+		/* current page must have been root */
+		return;
+
+	/*
+	 * parent page scan completed
+	 */
+	if ((index = parent->index) == (lastindex = parent->lastindex)) {
+		/* go back up to the parent page */
+		goto getParent;
+	}
+
+	/*
+	 * parent page has entries remaining
+	 */
+	/* get back the parent page */
+	bn = parent->bn;
+	/* v = parent->level; */
+	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* get next parent entry */
+	index++;
+
+	/*
+	 * internal page: go down to child page of current entry
+	 */
+      getChild:
+	/* push/save current parent entry for the child page */
+	btsp->bn = pbn = bn;
+	btsp->index = index;
+	btsp->lastindex = lastindex;
+	/* btsp->level = v; */
+	/* btsp->node = h; */
+	++btsp;
+
+	/* get current entry for the child page */
+	stbl = DT_GETSTBL(p);
+	xd = (pxd_t *) & p->slot[stbl[index]];
+
+	/*
+	 * first access of each internal entry:
+	 */
+
+	/* get child page */
+	bn = addressPXD(xd);
+	psize = lengthPXD(xd) << ip->i_ipmnt->i_l2bsize;
+
+	printk("traverse down 0x%Lx[%d]->0x%Lx\n", pbn, index, bn);
+	v++;
+	h = index;
+
+	/* release parent page */
+	DT_PUTPAGE(mp);
+
+	/* process the child page */
+	goto newPage;
+}
+
+
+/*
+ *	dtDisplayPage()
+ *
+ * function: display page
+ */
+int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p)
+{
+	int rc;
+	struct metapage *mp;
+	struct ldtentry *lh;
+	struct idtentry *ih;
+	pxd_t *xd;
+	int i, j;
+	u8 *stbl;
+	wchar_t name[JFS_NAME_MAX + 1];
+	struct component_name key = { 0, name };
+	int freepage = 0;
+
+	if (p == NULL) {
+		freepage = 1;
+		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+	}
+
+	/* display page control */
+	printk("bn:0x%Lx flag:0x%08x nextindex:%d\n",
+	       bn, p->header.flag, p->header.nextindex);
+
+	/* display entries */
+	stbl = DT_GETSTBL(p);
+	for (i = 0, j = 1; i < p->header.nextindex; i++, j++) {
+		dtGetKey(p, i, &key, JFS_SBI(ip->i_sb)->mntflag);
+		key.name[key.namlen] = '\0';
+		if (p->header.flag & BT_LEAF) {
+			lh = (struct ldtentry *) & p->slot[stbl[i]];
+			printf("\t[%d] %s:%d", i, key.name,
+			       le32_to_cpu(lh->inumber));
+		} else {
+			ih = (struct idtentry *) & p->slot[stbl[i]];
+			xd = (pxd_t *) ih;
+			bn = addressPXD(xd);
+			printf("\t[%d] %s:0x%Lx", i, key.name, bn);
+		}
+
+		if (j == 4) {
+			printf("\n");
+			j = 0;
+		}
+	}
+
+	printf("\n");
+
+	if (freepage)
+		DT_PUTPAGE(mp);
+
+	return 0;
+}
+#endif				/* _JFS_DEBUG_DTREE */
diff --git a/fs/jfs/jfs_dtree.h b/fs/jfs/jfs_dtree.h
new file mode 100644
index 0000000..273a801
--- /dev/null
+++ b/fs/jfs/jfs_dtree.h
@@ -0,0 +1,279 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_DTREE
+#define	_H_JFS_DTREE
+
+/*
+ *	jfs_dtree.h: directory B+-tree manager
+ */
+
+#include "jfs_btree.h"
+
+typedef union {
+	struct {
+		tid_t tid;
+		struct inode *ip;
+		u32 ino;
+	} leaf;
+	pxd_t xd;
+} ddata_t;
+
+
+/*
+ *      entry segment/slot
+ *
+ * an entry consists of type dependent head/only segment/slot and
+ * additional segments/slots linked vi next field;
+ * N.B. last/only segment of entry is terminated by next = -1;
+ */
+/*
+ *	directory page slot
+ */
+struct dtslot {
+	s8 next;		/* 1: */
+	s8 cnt;			/* 1: */
+	__le16 name[15];	/* 30: */
+};				/* (32) */
+
+
+#define DATASLOTSIZE	16
+#define L2DATASLOTSIZE	4
+#define	DTSLOTSIZE	32
+#define	L2DTSLOTSIZE	5
+#define DTSLOTHDRSIZE	2
+#define DTSLOTDATASIZE	30
+#define DTSLOTDATALEN	15
+
+/*
+ *	 internal node entry head/only segment
+ */
+struct idtentry {
+	pxd_t xd;		/* 8: child extent descriptor */
+
+	s8 next;		/* 1: */
+	u8 namlen;		/* 1: */
+	__le16 name[11];	/* 22: 2-byte aligned */
+};				/* (32) */
+
+#define DTIHDRSIZE	10
+#define DTIHDRDATALEN	11
+
+/* compute number of slots for entry */
+#define	NDTINTERNAL(klen) ( ((4 + (klen)) + (15 - 1)) / 15 )
+
+
+/*
+ *	leaf node entry head/only segment
+ *
+ * 	For legacy filesystems, name contains 13 wchars -- no index field
+ */
+struct ldtentry {
+	__le32 inumber;		/* 4: 4-byte aligned */
+	s8 next;		/* 1: */
+	u8 namlen;		/* 1: */
+	__le16 name[11];	/* 22: 2-byte aligned */
+	__le32 index;		/* 4: index into dir_table */
+};				/* (32) */
+
+#define DTLHDRSIZE	6
+#define DTLHDRDATALEN_LEGACY	13	/* Old (OS/2) format */
+#define DTLHDRDATALEN	11
+
+/*
+ * dir_table used for directory traversal during readdir
+ */
+
+/*
+ * Keep persistent index for directory entries
+ */
+#define DO_INDEX(INODE) (JFS_SBI((INODE)->i_sb)->mntflag & JFS_DIR_INDEX)
+
+/*
+ * Maximum entry in inline directory table
+ */
+#define MAX_INLINE_DIRTABLE_ENTRY 13
+
+struct dir_table_slot {
+	u8 rsrvd;		/* 1: */
+	u8 flag;		/* 1: 0 if free */
+	u8 slot;		/* 1: slot within leaf page of entry */
+	u8 addr1;		/* 1: upper 8 bits of leaf page address */
+	__le32 addr2;		/* 4: lower 32 bits of leaf page address -OR-
+				   index of next entry when this entry was deleted */
+};				/* (8) */
+
+/*
+ * flag values
+ */
+#define DIR_INDEX_VALID 1
+#define DIR_INDEX_FREE 0
+
+#define DTSaddress(dir_table_slot, address64)\
+{\
+	(dir_table_slot)->addr1 = ((u64)address64) >> 32;\
+	(dir_table_slot)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+}
+
+#define addressDTS(dts)\
+	( ((s64)((dts)->addr1)) << 32 | __le32_to_cpu((dts)->addr2) )
+
+/* compute number of slots for entry */
+#define	NDTLEAF_LEGACY(klen)	( ((2 + (klen)) + (15 - 1)) / 15 )
+#define	NDTLEAF	NDTINTERNAL
+
+
+/*
+ *	directory root page (in-line in on-disk inode):
+ *
+ * cf. dtpage_t below.
+ */
+typedef union {
+	struct {
+		struct dasd DASD; /* 16: DASD limit/usage info */
+
+		u8 flag;	/* 1: */
+		u8 nextindex;	/* 1: next free entry in stbl */
+		s8 freecnt;	/* 1: free count */
+		s8 freelist;	/* 1: freelist header */
+
+		__le32 idotdot;	/* 4: parent inode number */
+
+		s8 stbl[8];	/* 8: sorted entry index table */
+	} header;		/* (32) */
+
+	struct dtslot slot[9];
+} dtroot_t;
+
+#define PARENT(IP) \
+	(le32_to_cpu(JFS_IP(IP)->i_dtroot.header.idotdot))
+
+#define DTROOTMAXSLOT	9
+
+#define	dtEmpty(IP) (JFS_IP(IP)->i_dtroot.header.nextindex == 0)
+
+
+/*
+ *	directory regular page:
+ *
+ *	entry slot array of 32 byte slot
+ *
+ * sorted entry slot index table (stbl):
+ * contiguous slots at slot specified by stblindex,
+ * 1-byte per entry
+ *   512 byte block:  16 entry tbl (1 slot)
+ *  1024 byte block:  32 entry tbl (1 slot)
+ *  2048 byte block:  64 entry tbl (2 slot)
+ *  4096 byte block: 128 entry tbl (4 slot)
+ *
+ * data area:
+ *   512 byte block:  16 - 2 =  14 slot
+ *  1024 byte block:  32 - 2 =  30 slot
+ *  2048 byte block:  64 - 3 =  61 slot
+ *  4096 byte block: 128 - 5 = 123 slot
+ *
+ * N.B. index is 0-based; index fields refer to slot index
+ * except nextindex which refers to entry index in stbl;
+ * end of entry stot list or freelist is marked with -1.
+ */
+typedef union {
+	struct {
+		__le64 next;	/* 8: next sibling */
+		__le64 prev;	/* 8: previous sibling */
+
+		u8 flag;	/* 1: */
+		u8 nextindex;	/* 1: next entry index in stbl */
+		s8 freecnt;	/* 1: */
+		s8 freelist;	/* 1: slot index of head of freelist */
+
+		u8 maxslot;	/* 1: number of slots in page slot[] */
+		u8 stblindex;	/* 1: slot index of start of stbl */
+		u8 rsrvd[2];	/* 2: */
+
+		pxd_t self;	/* 8: self pxd */
+	} header;		/* (32) */
+
+	struct dtslot slot[128];
+} dtpage_t;
+
+#define DTPAGEMAXSLOT        128
+
+#define DT8THPGNODEBYTES     512
+#define DT8THPGNODETSLOTS      1
+#define DT8THPGNODESLOTS      16
+
+#define DTQTRPGNODEBYTES    1024
+#define DTQTRPGNODETSLOTS      1
+#define DTQTRPGNODESLOTS      32
+
+#define DTHALFPGNODEBYTES   2048
+#define DTHALFPGNODETSLOTS     2
+#define DTHALFPGNODESLOTS     64
+
+#define DTFULLPGNODEBYTES   4096
+#define DTFULLPGNODETSLOTS     4
+#define DTFULLPGNODESLOTS    128
+
+#define DTENTRYSTART	1
+
+/* get sorted entry table of the page */
+#define DT_GETSTBL(p) ( ((p)->header.flag & BT_ROOT) ?\
+	((dtroot_t *)(p))->header.stbl : \
+	(s8 *)&(p)->slot[(p)->header.stblindex] )
+
+/*
+ * Flags for dtSearch
+ */
+#define JFS_CREATE 1
+#define JFS_LOOKUP 2
+#define JFS_REMOVE 3
+#define JFS_RENAME 4
+
+#define DIRENTSIZ(namlen) \
+    ( (sizeof(struct dirent) - 2*(JFS_NAME_MAX+1) + 2*((namlen)+1) + 3) &~ 3 )
+
+/*
+ * Maximum file offset for directories.
+ */
+#define DIREND	INT_MAX
+
+/*
+ *	external declarations
+ */
+extern void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot);
+
+extern int dtSearch(struct inode *ip, struct component_name * key,
+		    ino_t * data, struct btstack * btstack, int flag);
+
+extern int dtInsert(tid_t tid, struct inode *ip, struct component_name * key,
+		    ino_t * ino, struct btstack * btstack);
+
+extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key,
+		    ino_t * data, int flag);
+
+extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
+		    ino_t * orig_ino, ino_t new_ino, int flag);
+
+extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
+
+#ifdef  _JFS_DEBUG_DTREE
+extern int dtDisplayTree(struct inode *ip);
+
+extern int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p);
+#endif				/* _JFS_DEBUG_DTREE */
+
+#endif				/* !_H_JFS_DTREE */
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
new file mode 100644
index 0000000..1953acb
--- /dev/null
+++ b/fs/jfs/jfs_extent.c
@@ -0,0 +1,668 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_extent.h"
+#include "jfs_debug.h"
+
+/*
+ * forward references
+ */
+static int extBalloc(struct inode *, s64, s64 *, s64 *);
+#ifdef _NOTYET
+static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
+#endif
+static s64 extRoundDown(s64 nb);
+
+/*
+ * external references
+ */
+extern int jfs_commit_inode(struct inode *, int);
+
+
+#define DPD(a)          (printk("(a): %d\n",(a)))
+#define DPC(a)          (printk("(a): %c\n",(a)))
+#define DPL1(a)					\
+{						\
+	if ((a) >> 32)				\
+		printk("(a): %x%08x  ",(a));	\
+	else					\
+		printk("(a): %x  ",(a) << 32);	\
+}
+#define DPL(a)					\
+{						\
+	if ((a) >> 32)				\
+		printk("(a): %x%08x\n",(a));	\
+	else					\
+		printk("(a): %x\n",(a) << 32);	\
+}
+
+#define DPD1(a)         (printk("(a): %d  ",(a)))
+#define DPX(a)          (printk("(a): %08x\n",(a)))
+#define DPX1(a)         (printk("(a): %08x  ",(a)))
+#define DPS(a)          (printk("%s\n",(a)))
+#define DPE(a)          (printk("\nENTERING: %s\n",(a)))
+#define DPE1(a)          (printk("\nENTERING: %s",(a)))
+#define DPS1(a)         (printk("  %s  ",(a)))
+
+
+/*
+ * NAME:	extAlloc()
+ *
+ * FUNCTION:    allocate an extent for a specified page range within a
+ *		file.
+ *
+ * PARAMETERS:
+ *	ip	- the inode of the file.
+ *	xlen	- requested extent length.
+ *	pno	- the starting page number with the file.
+ *	xp	- pointer to an xad.  on entry, xad describes an
+ *		  extent that is used as an allocation hint if the
+ *		  xaddr of the xad is non-zero.  on successful exit,
+ *		  the xad describes the newly allocated extent.
+ *	abnr	- boolean_t indicating whether the newly allocated extent
+ *		  should be marked as allocated but not recorded.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+int
+extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	s64 nxlen, nxaddr, xoff, hint, xaddr = 0;
+	int rc;
+	int xflag;
+
+	/* This blocks if we are low on resources */
+	txBeginAnon(ip->i_sb);
+
+	/* Avoid race with jfs_commit_inode() */
+	down(&JFS_IP(ip)->commit_sem);
+
+	/* validate extent length */
+	if (xlen > MAXXLEN)
+		xlen = MAXXLEN;
+
+	/* get the page's starting extent offset */
+	xoff = pno << sbi->l2nbperpage;
+
+	/* check if an allocation hint was provided */
+	if ((hint = addressXAD(xp))) {
+		/* get the size of the extent described by the hint */
+		nxlen = lengthXAD(xp);
+
+		/* check if the hint is for the portion of the file
+		 * immediately previous to the current allocation
+		 * request and if hint extent has the same abnr
+		 * value as the current request.  if so, we can
+		 * extend the hint extent to include the current
+		 * extent if we can allocate the blocks immediately
+		 * following the hint extent.
+		 */
+		if (offsetXAD(xp) + nxlen == xoff &&
+		    abnr == ((xp->flag & XAD_NOTRECORDED) ? TRUE : FALSE))
+			xaddr = hint + nxlen;
+
+		/* adjust the hint to the last block of the extent */
+		hint += (nxlen - 1);
+	}
+
+	/* allocate the disk blocks for the extent.  initially, extBalloc()
+	 * will try to allocate disk blocks for the requested size (xlen). 
+	 * if this fails (xlen contigious free blocks not avaliable), it'll
+	 * try to allocate a smaller number of blocks (producing a smaller
+	 * extent), with this smaller number of blocks consisting of the
+	 * requested number of blocks rounded down to the next smaller
+	 * power of 2 number (i.e. 16 -> 8).  it'll continue to round down
+	 * and retry the allocation until the number of blocks to allocate
+	 * is smaller than the number of blocks per page.
+	 */
+	nxlen = xlen;
+	if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) {
+		up(&JFS_IP(ip)->commit_sem);
+		return (rc);
+	}
+
+	/* Allocate blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+		dbFree(ip, nxaddr, (s64) nxlen);
+		up(&JFS_IP(ip)->commit_sem);
+		return -EDQUOT;
+	}
+
+	/* determine the value of the extent flag */
+	xflag = (abnr == TRUE) ? XAD_NOTRECORDED : 0;
+
+	/* if we can extend the hint extent to cover the current request, 
+	 * extend it.  otherwise, insert a new extent to
+	 * cover the current request.
+	 */
+	if (xaddr && xaddr == nxaddr)
+		rc = xtExtend(0, ip, xoff, (int) nxlen, 0);
+	else
+		rc = xtInsert(0, ip, xflag, xoff, (int) nxlen, &nxaddr, 0);
+
+	/* if the extend or insert failed, 
+	 * free the newly allocated blocks and return the error.
+	 */
+	if (rc) {
+		dbFree(ip, nxaddr, nxlen);
+		DQUOT_FREE_BLOCK(ip, nxlen);
+		up(&JFS_IP(ip)->commit_sem);
+		return (rc);
+	}
+
+	/* set the results of the extent allocation */
+	XADaddress(xp, nxaddr);
+	XADlength(xp, nxlen);
+	XADoffset(xp, xoff);
+	xp->flag = xflag;
+
+	mark_inode_dirty(ip);
+
+	up(&JFS_IP(ip)->commit_sem);
+	/*
+	 * COMMIT_SyncList flags an anonymous tlock on page that is on
+	 * sync list.
+	 * We need to commit the inode to get the page written disk.
+	 */
+	if (test_and_clear_cflag(COMMIT_Synclist,ip))
+		jfs_commit_inode(ip, 0);
+
+	return (0);
+}
+
+
+#ifdef _NOTYET
+/*
+ * NAME:        extRealloc()
+ *
+ * FUNCTION:    extend the allocation of a file extent containing a
+ *		partial back last page.
+ *
+ * PARAMETERS:
+ *	ip	- the inode of the file.
+ *	cp	- cbuf for the partial backed last page.
+ *	xlen	- request size of the resulting extent.
+ *	xp	- pointer to an xad. on successful exit, the xad
+ *		  describes the newly allocated extent.
+ *	abnr	- boolean_t indicating whether the newly allocated extent
+ *		  should be marked as allocated but not recorded.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
+{
+	struct super_block *sb = ip->i_sb;
+	s64 xaddr, xlen, nxaddr, delta, xoff;
+	s64 ntail, nextend, ninsert;
+	int rc, nbperpage = JFS_SBI(sb)->nbperpage;
+	int xflag;
+
+	/* This blocks if we are low on resources */
+	txBeginAnon(ip->i_sb);
+
+	down(&JFS_IP(ip)->commit_sem);
+	/* validate extent length */
+	if (nxlen > MAXXLEN)
+		nxlen = MAXXLEN;
+
+	/* get the extend (partial) page's disk block address and
+	 * number of blocks.
+	 */
+	xaddr = addressXAD(xp);
+	xlen = lengthXAD(xp);
+	xoff = offsetXAD(xp);
+
+	/* if the extend page is abnr and if the request is for
+	 * the extent to be allocated and recorded, 
+	 * make the page allocated and recorded.
+	 */
+	if ((xp->flag & XAD_NOTRECORDED) && !abnr) {
+		xp->flag = 0;
+		if ((rc = xtUpdate(0, ip, xp)))
+			goto exit;
+	}
+
+	/* try to allocated the request number of blocks for the
+	 * extent.  dbRealloc() first tries to satisfy the request
+	 * by extending the allocation in place. otherwise, it will
+	 * try to allocate a new set of blocks large enough for the
+	 * request.  in satisfying a request, dbReAlloc() may allocate
+	 * less than what was request but will always allocate enough
+	 * space as to satisfy the extend page.
+	 */
+	if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
+		goto exit;
+
+	/* Allocat blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+		dbFree(ip, nxaddr, (s64) nxlen);
+		up(&JFS_IP(ip)->commit_sem);
+		return -EDQUOT;
+	}
+
+	delta = nxlen - xlen;
+
+	/* check if the extend page is not abnr but the request is abnr
+	 * and the allocated disk space is for more than one page.  if this
+	 * is the case, there is a miss match of abnr between the extend page
+	 * and the one or more pages following the extend page.  as a result,
+	 * two extents will have to be manipulated. the first will be that
+	 * of the extent of the extend page and will be manipulated thru
+	 * an xtExtend() or an xtTailgate(), depending upon whether the
+	 * disk allocation occurred as an inplace extension.  the second
+	 * extent will be manipulated (created) through an xtInsert() and
+	 * will be for the pages following the extend page.
+	 */
+	if (abnr && (!(xp->flag & XAD_NOTRECORDED)) && (nxlen > nbperpage)) {
+		ntail = nbperpage;
+		nextend = ntail - xlen;
+		ninsert = nxlen - nbperpage;
+
+		xflag = XAD_NOTRECORDED;
+	} else {
+		ntail = nxlen;
+		nextend = delta;
+		ninsert = 0;
+
+		xflag = xp->flag;
+	}
+
+	/* if we were able to extend the disk allocation in place,
+	 * extend the extent.  otherwise, move the extent to a
+	 * new disk location.
+	 */
+	if (xaddr == nxaddr) {
+		/* extend the extent */
+		if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
+			dbFree(ip, xaddr + xlen, delta);
+			DQUOT_FREE_BLOCK(ip, nxlen);
+			goto exit;
+		}
+	} else {
+		/*
+		 * move the extent to a new location:
+		 *
+		 * xtTailgate() accounts for relocated tail extent;
+		 */
+		if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
+			dbFree(ip, nxaddr, nxlen);
+			DQUOT_FREE_BLOCK(ip, nxlen);
+			goto exit;
+		}
+	}
+
+
+	/* check if we need to also insert a new extent */
+	if (ninsert) {
+		/* perform the insert.  if it fails, free the blocks
+		 * to be inserted and make it appear that we only did
+		 * the xtExtend() or xtTailgate() above.
+		 */
+		xaddr = nxaddr + ntail;
+		if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert,
+			      &xaddr, 0)) {
+			dbFree(ip, xaddr, (s64) ninsert);
+			delta = nextend;
+			nxlen = ntail;
+			xflag = 0;
+		}
+	}
+
+	/* set the return results */
+	XADaddress(xp, nxaddr);
+	XADlength(xp, nxlen);
+	XADoffset(xp, xoff);
+	xp->flag = xflag;
+
+	mark_inode_dirty(ip);
+exit:
+	up(&JFS_IP(ip)->commit_sem);
+	return (rc);
+}
+#endif			/* _NOTYET */
+
+
+/*
+ * NAME:        extHint()
+ *
+ * FUNCTION:    produce an extent allocation hint for a file offset.
+ *
+ * PARAMETERS:
+ *	ip	- the inode of the file.
+ *	offset  - file offset for which the hint is needed.
+ *	xp	- pointer to the xad that is to be filled in with
+ *		  the hint.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ */
+int extHint(struct inode *ip, s64 offset, xad_t * xp)
+{
+	struct super_block *sb = ip->i_sb;
+	struct xadlist xadl;
+	struct lxdlist lxdl;
+	lxd_t lxd;
+	s64 prev;
+	int rc, nbperpage = JFS_SBI(sb)->nbperpage;
+
+	/* init the hint as "no hint provided" */
+	XADaddress(xp, 0);
+
+	/* determine the starting extent offset of the page previous
+	 * to the page containing the offset.
+	 */
+	prev = ((offset & ~POFFSET) >> JFS_SBI(sb)->l2bsize) - nbperpage;
+
+	/* if the offsets in the first page of the file,
+	 * no hint provided.
+	 */
+	if (prev < 0)
+		return (0);
+
+	/* prepare to lookup the previous page's extent info */
+	lxdl.maxnlxd = 1;
+	lxdl.nlxd = 1;
+	lxdl.lxd = &lxd;
+	LXDoffset(&lxd, prev)
+	    LXDlength(&lxd, nbperpage);
+
+	xadl.maxnxad = 1;
+	xadl.nxad = 0;
+	xadl.xad = xp;
+
+	/* perform the lookup */
+	if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
+		return (rc);
+
+	/* check if not extent exists for the previous page.  
+	 * this is possible for sparse files.
+	 */
+	if (xadl.nxad == 0) {
+//              assert(ISSPARSE(ip));
+		return (0);
+	}
+
+	/* only preserve the abnr flag within the xad flags
+	 * of the returned hint.
+	 */
+	xp->flag &= XAD_NOTRECORDED;
+
+        if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {          
+		jfs_error(ip->i_sb, "extHint: corrupt xtree");
+		return -EIO;
+        }
+
+	return (0);
+}
+
+
+/*
+ * NAME:        extRecord()
+ *
+ * FUNCTION:    change a page with a file from not recorded to recorded.
+ *
+ * PARAMETERS:
+ *	ip	- inode of the file.
+ *	cp	- cbuf of the file page.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+int extRecord(struct inode *ip, xad_t * xp)
+{
+	int rc;
+
+	txBeginAnon(ip->i_sb);
+
+	down(&JFS_IP(ip)->commit_sem);
+
+	/* update the extent */
+	rc = xtUpdate(0, ip, xp);
+
+	up(&JFS_IP(ip)->commit_sem);
+	return rc;
+}
+
+
+#ifdef _NOTYET
+/*
+ * NAME:        extFill()
+ *
+ * FUNCTION:    allocate disk space for a file page that represents
+ *		a file hole.
+ *
+ * PARAMETERS:
+ *	ip	- the inode of the file.
+ *	cp	- cbuf of the file page represent the hole.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+int extFill(struct inode *ip, xad_t * xp)
+{
+	int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
+	s64 blkno = offsetXAD(xp) >> ip->i_blksize;
+
+//      assert(ISSPARSE(ip));
+
+	/* initialize the extent allocation hint */
+	XADaddress(xp, 0);
+
+	/* allocate an extent to fill the hole */
+	if ((rc = extAlloc(ip, nbperpage, blkno, xp, FALSE)))
+		return (rc);
+
+	assert(lengthPXD(xp) == nbperpage);
+
+	return (0);
+}
+#endif			/* _NOTYET */
+
+
+/*
+ * NAME:	extBalloc()
+ *
+ * FUNCTION:    allocate disk blocks to form an extent.
+ *
+ *		initially, we will try to allocate disk blocks for the
+ *		requested size (nblocks).  if this fails (nblocks 
+ *		contigious free blocks not avaliable), we'll try to allocate
+ *		a smaller number of blocks (producing a smaller extent), with
+ *		this smaller number of blocks consisting of the requested
+ *		number of blocks rounded down to the next smaller power of 2
+ *		number (i.e. 16 -> 8).  we'll continue to round down and
+ *		retry the allocation until the number of blocks to allocate
+ *		is smaller than the number of blocks per page.
+ *		
+ * PARAMETERS:
+ *	ip	 - the inode of the file.
+ *	hint	 - disk block number to be used as an allocation hint.
+ *	*nblocks - pointer to an s64 value.  on entry, this value specifies
+ *		   the desired number of block to be allocated. on successful
+ *		   exit, this value is set to the number of blocks actually
+ *		   allocated.
+ *	blkno	 - pointer to a block address that is filled in on successful
+ *		   return with the starting block number of the newly 
+ *		   allocated block range.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+static int
+extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
+{
+	struct jfs_inode_info *ji = JFS_IP(ip);
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	s64 nb, nblks, daddr, max;
+	int rc, nbperpage = sbi->nbperpage;
+	struct bmap *bmp = sbi->bmap;
+	int ag;
+
+	/* get the number of blocks to initially attempt to allocate.
+	 * we'll first try the number of blocks requested unless this
+	 * number is greater than the maximum number of contigious free
+	 * blocks in the map. in that case, we'll start off with the 
+	 * maximum free.
+	 */
+	max = (s64) 1 << bmp->db_maxfreebud;
+	if (*nblocks >= max && *nblocks > nbperpage)
+		nb = nblks = (max > nbperpage) ? max : nbperpage;
+	else
+		nb = nblks = *nblocks;
+
+	/* try to allocate blocks */
+	while ((rc = dbAlloc(ip, hint, nb, &daddr)) != 0) {
+		/* if something other than an out of space error,
+		 * stop and return this error.
+		 */
+		if (rc != -ENOSPC)
+			return (rc);
+
+		/* decrease the allocation request size */
+		nb = min(nblks, extRoundDown(nb));
+
+		/* give up if we cannot cover a page */
+		if (nb < nbperpage)
+			return (rc);
+	}
+
+	*nblocks = nb;
+	*blkno = daddr;
+
+	if (S_ISREG(ip->i_mode) && (ji->fileset == FILESYSTEM_I)) {
+		ag = BLKTOAG(daddr, sbi);
+		spin_lock_irq(&ji->ag_lock);
+		if (ji->active_ag == -1) {
+			atomic_inc(&bmp->db_active[ag]);
+			ji->active_ag = ag;
+		} else if (ji->active_ag != ag) {
+			atomic_dec(&bmp->db_active[ji->active_ag]);
+			atomic_inc(&bmp->db_active[ag]);
+			ji->active_ag = ag;
+		}
+		spin_unlock_irq(&ji->ag_lock);
+	}
+
+	return (0);
+}
+
+
+#ifdef _NOTYET
+/*
+ * NAME:	extBrealloc()
+ *
+ * FUNCTION:    attempt to extend an extent's allocation.
+ *
+ *		initially, we will try to extend the extent's allocation
+ *		in place.  if this fails, we'll try to move the extent
+ *		to a new set of blocks. if moving the extent, we initially
+ *		will try to allocate disk blocks for the requested size
+ *		(nnew).  if this fails 	(nnew contigious free blocks not
+ *		avaliable), we'll try  to allocate a smaller number of
+ *		blocks (producing a smaller extent), with this smaller
+ *		number of blocks consisting of the requested number of
+ *		blocks rounded down to the next smaller power of 2
+ *		number (i.e. 16 -> 8).  we'll continue to round down and
+ *		retry the allocation until the number of blocks to allocate
+ *		is smaller than the number of blocks per page.
+ *		
+ * PARAMETERS:
+ *	ip	 - the inode of the file.
+ *	blkno    - starting block number of the extents current allocation.
+ *	nblks    - number of blocks within the extents current allocation.
+ *	newnblks - pointer to a s64 value.  on entry, this value is the
+ *		   the new desired extent size (number of blocks).  on
+ *		   successful exit, this value is set to the extent's actual
+ *		   new size (new number of blocks).
+ *	newblkno - the starting block number of the extents new allocation.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO	- i/o error.
+ *      -ENOSPC	- insufficient disk resources.
+ */
+static int
+extBrealloc(struct inode *ip,
+	    s64 blkno, s64 nblks, s64 * newnblks, s64 * newblkno)
+{
+	int rc;
+
+	/* try to extend in place */
+	if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) {
+		*newblkno = blkno;
+		return (0);
+	} else {
+		if (rc != -ENOSPC)
+			return (rc);
+	}
+
+	/* in place extension not possible.  
+	 * try to move the extent to a new set of blocks.
+	 */
+	return (extBalloc(ip, blkno, newnblks, newblkno));
+}
+#endif			/* _NOTYET */
+
+
+/*
+ * NAME:        extRoundDown()
+ *
+ * FUNCTION:    round down a specified number of blocks to the next
+ *		smallest power of 2 number.
+ *
+ * PARAMETERS:
+ *	nb	- the inode of the file.
+ *
+ * RETURN VALUES:
+ *      next smallest power of 2 number.
+ */
+static s64 extRoundDown(s64 nb)
+{
+	int i;
+	u64 m, k;
+
+	for (i = 0, m = (u64) 1 << 63; i < 64; i++, m >>= 1) {
+		if (m & nb)
+			break;
+	}
+
+	i = 63 - i;
+	k = (u64) 1 << i;
+	k = ((k - 1) & nb) ? k : k >> 1;
+
+	return (k);
+}
diff --git a/fs/jfs/jfs_extent.h b/fs/jfs/jfs_extent.h
new file mode 100644
index 0000000..e80fc7c
--- /dev/null
+++ b/fs/jfs/jfs_extent.h
@@ -0,0 +1,31 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2001
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_EXTENT
+#define _H_JFS_EXTENT
+
+/*  get block allocation allocation hint as location of disk inode */
+#define	INOHINT(ip)	\
+	(addressPXD(&(JFS_IP(ip)->ixpxd)) + lengthPXD(&(JFS_IP(ip)->ixpxd)) - 1)
+
+extern int	extAlloc(struct inode *, s64, s64, xad_t *, boolean_t);
+extern int	extFill(struct inode *, xad_t *);
+extern int	extHint(struct inode *, s64, xad_t *);
+extern int	extRealloc(struct inode *, s64, xad_t *, boolean_t);
+extern int	extRecord(struct inode *, xad_t *);
+
+#endif	/* _H_JFS_EXTENT */
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
new file mode 100644
index 0000000..86ccac8
--- /dev/null
+++ b/fs/jfs/jfs_filsys.h
@@ -0,0 +1,280 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2003
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_FILSYS
+#define _H_JFS_FILSYS
+
+/*
+ *	jfs_filsys.h
+ *
+ * file system (implementation-dependent) constants 
+ *
+ * refer to <limits.h> for system wide implementation-dependent constants 
+ */
+
+/*
+ *	 file system option (superblock flag)
+ */
+/* mount time flag to disable journaling to disk */
+#define JFS_NOINTEGRITY 0x00000010
+
+/* mount time flags for error handling */
+#define JFS_ERR_REMOUNT_RO 0x00000002   /* remount read-only */
+#define JFS_ERR_CONTINUE   0x00000004   /* continue */
+#define JFS_ERR_PANIC      0x00000008   /* panic */
+
+/* platform option (conditional compilation) */
+#define JFS_AIX		0x80000000	/* AIX support */
+/*	POSIX name/directory  support */
+
+#define JFS_OS2		0x40000000	/* OS/2 support */
+/*	case-insensitive name/directory support */
+
+#define JFS_DFS		0x20000000	/* DCE DFS LFS support */
+
+#define JFS_LINUX      	0x10000000	/* Linux support */
+/*	case-sensitive name/directory support */
+
+/* directory option */
+#define JFS_UNICODE	0x00000001	/* unicode name */
+
+/* commit option */
+#define	JFS_COMMIT	0x00000f00	/* commit option mask */
+#define	JFS_GROUPCOMMIT	0x00000100	/* group (of 1) commit */
+#define	JFS_LAZYCOMMIT	0x00000200	/* lazy commit */
+#define	JFS_TMPFS	0x00000400	/* temporary file system - 
+					 * do not log/commit:
+					 */
+
+/* log logical volume option */
+#define	JFS_INLINELOG	0x00000800	/* inline log within file system */
+#define JFS_INLINEMOVE	0x00001000	/* inline log being moved */
+
+/* Secondary aggregate inode table */
+#define JFS_BAD_SAIT	0x00010000	/* current secondary ait is bad */
+
+/* sparse regular file support */
+#define JFS_SPARSE	0x00020000	/* sparse regular file */
+
+/* DASD Limits		F226941 */
+#define JFS_DASD_ENABLED	0x00040000	/* DASD limits enabled */
+#define	JFS_DASD_PRIME		0x00080000	/* Prime DASD usage on boot */
+
+/* big endian flag */
+#define	JFS_SWAP_BYTES		0x00100000	/* running on big endian computer */
+
+/* Directory index */
+#define JFS_DIR_INDEX		0x00200000	/* Persistant index for */
+						/* directory entries    */
+
+
+/*
+ *	buffer cache configuration
+ */
+/* page size */
+#ifdef PSIZE
+#undef PSIZE
+#endif
+#define	PSIZE		4096	/* page size (in byte) */
+#define	L2PSIZE		12	/* log2(PSIZE) */
+#define	POFFSET		4095	/* offset within page */
+
+/* buffer page size */
+#define BPSIZE	PSIZE
+
+/*
+ *	fs fundamental size
+ *
+ * PSIZE >= file system block size >= PBSIZE >= DISIZE
+ */
+#define	PBSIZE		512	/* physical block size (in byte) */
+#define	L2PBSIZE	9	/* log2(PBSIZE) */
+
+#define DISIZE		512	/* on-disk inode size (in byte) */
+#define L2DISIZE	9	/* log2(DISIZE) */
+
+#define IDATASIZE	256	/* inode inline data size */
+#define	IXATTRSIZE	128	/* inode inline extended attribute size */
+
+#define XTPAGE_SIZE     4096
+#define log2_PAGESIZE     12
+
+#define IAG_SIZE        4096
+#define IAG_EXTENT_SIZE 4096
+#define	INOSPERIAG	4096	/* number of disk inodes per iag */
+#define	L2INOSPERIAG	12	/* l2 number of disk inodes per iag */
+#define INOSPEREXT	32	/* number of disk inode per extent */
+#define L2INOSPEREXT	5	/* l2 number of disk inode per extent */
+#define	IXSIZE		(DISIZE * INOSPEREXT)	/* inode extent size */
+#define	INOSPERPAGE	8	/* number of disk inodes per 4K page */
+#define	L2INOSPERPAGE	3	/* log2(INOSPERPAGE) */
+
+#define	IAGFREELIST_LWM	64
+
+#define INODE_EXTENT_SIZE	IXSIZE	/* inode extent size */
+#define NUM_INODE_PER_EXTENT	INOSPEREXT
+#define NUM_INODE_PER_IAG	INOSPERIAG
+
+#define MINBLOCKSIZE		512
+#define MAXBLOCKSIZE		4096
+#define	MAXFILESIZE		((s64)1 << 52)
+
+#define JFS_LINK_MAX		0xffffffff
+
+/* Minimum number of bytes supported for a JFS partition */
+#define MINJFS			(0x1000000)
+#define MINJFSTEXT		"16"
+
+/*
+ * file system block size -> physical block size
+ */
+#define LBOFFSET(x)	((x) & (PBSIZE - 1))
+#define LBNUMBER(x)	((x) >> L2PBSIZE)
+#define	LBLK2PBLK(sb,b)	((b) << (sb->s_blocksize_bits - L2PBSIZE))
+#define	PBLK2LBLK(sb,b)	((b) >> (sb->s_blocksize_bits - L2PBSIZE))
+/* size in byte -> last page number */
+#define	SIZE2PN(size)	( ((s64)((size) - 1)) >> (L2PSIZE) )
+/* size in byte -> last file system block number */
+#define	SIZE2BN(size, l2bsize) ( ((s64)((size) - 1)) >> (l2bsize) )
+
+/*
+ * fixed physical block address (physical block size = 512 byte)
+ *
+ * NOTE: since we can't guarantee a physical block size of 512 bytes the use of
+ *	 these macros should be removed and the byte offset macros used instead.
+ */
+#define SUPER1_B	64	/* primary superblock */
+#define	AIMAP_B		(SUPER1_B + 8)	/* 1st extent of aggregate inode map */
+#define	AITBL_B		(AIMAP_B + 16)	/*
+					 * 1st extent of aggregate inode table
+					 */
+#define	SUPER2_B	(AITBL_B + 32)	/* 2ndary superblock pbn */
+#define	BMAP_B		(SUPER2_B + 8)	/* block allocation map */
+
+/*
+ * SIZE_OF_SUPER defines the total amount of space reserved on disk for the
+ * superblock.  This is not the same as the superblock structure, since all of
+ * this space is not currently being used.
+ */
+#define SIZE_OF_SUPER	PSIZE
+
+/*
+ * SIZE_OF_AG_TABLE defines the amount of space reserved to hold the AG table
+ */
+#define SIZE_OF_AG_TABLE	PSIZE
+
+/*
+ * SIZE_OF_MAP_PAGE defines the amount of disk space reserved for each page of
+ * the inode allocation map (to hold iag)
+ */
+#define SIZE_OF_MAP_PAGE	PSIZE
+
+/*
+ * fixed byte offset address
+ */
+#define SUPER1_OFF	0x8000	/* primary superblock */
+#define AIMAP_OFF	(SUPER1_OFF + SIZE_OF_SUPER)
+					/*
+					 * Control page of aggregate inode map
+					 * followed by 1st extent of map
+					 */
+#define AITBL_OFF	(AIMAP_OFF + (SIZE_OF_MAP_PAGE << 1))
+					/* 
+					 * 1st extent of aggregate inode table
+					 */
+#define SUPER2_OFF	(AITBL_OFF + INODE_EXTENT_SIZE)
+					/*
+					 * secondary superblock
+					 */
+#define BMAP_OFF	(SUPER2_OFF + SIZE_OF_SUPER)
+					/*
+					 * block allocation map
+					 */
+
+/*
+ * The following macro is used to indicate the number of reserved disk blocks at
+ * the front of an aggregate, in terms of physical blocks.  This value is
+ * currently defined to be 32K.  This turns out to be the same as the primary
+ * superblock's address, since it directly follows the reserved blocks.
+ */
+#define AGGR_RSVD_BLOCKS	SUPER1_B
+
+/*
+ * The following macro is used to indicate the number of reserved bytes at the
+ * front of an aggregate.  This value is currently defined to be 32K.  This
+ * turns out to be the same as the primary superblock's byte offset, since it
+ * directly follows the reserved blocks.
+ */
+#define AGGR_RSVD_BYTES	SUPER1_OFF
+
+/*
+ * The following macro defines the byte offset for the first inode extent in
+ * the aggregate inode table.  This allows us to find the self inode to find the
+ * rest of the table.  Currently this value is 44K.
+ */
+#define AGGR_INODE_TABLE_START	AITBL_OFF
+
+/*
+ *	fixed reserved inode number
+ */
+/* aggregate inode */
+#define AGGR_RESERVED_I	0	/* aggregate inode (reserved) */
+#define	AGGREGATE_I	1	/* aggregate inode map inode */
+#define	BMAP_I		2	/* aggregate block allocation map inode */
+#define	LOG_I		3	/* aggregate inline log inode */
+#define BADBLOCK_I	4	/* aggregate bad block inode */
+#define	FILESYSTEM_I	16	/* 1st/only fileset inode in ait:
+				 * fileset inode map inode
+				 */
+
+/* per fileset inode */
+#define FILESET_RSVD_I	0	/* fileset inode (reserved) */
+#define FILESET_EXT_I	1	/* fileset inode extension */
+#define	ROOT_I		2	/* fileset root inode */
+#define ACL_I		3	/* fileset ACL inode */
+
+#define FILESET_OBJECT_I 4	/* the first fileset inode available for a file
+				 * or directory or link...
+				 */
+#define FIRST_FILESET_INO 16	/* the first aggregate inode which describes
+				 * an inode.  (To fsck this is also the first
+				 * inode in part 2 of the agg inode table.)
+				 */
+
+/*
+ *	directory configuration
+ */
+#define JFS_NAME_MAX	255
+#define JFS_PATH_MAX	BPSIZE
+
+
+/*
+ *	file system state (superblock state)
+ */
+#define FM_CLEAN 0x00000000	/* file system is unmounted and clean */
+#define FM_MOUNT 0x00000001	/* file system is mounted cleanly */
+#define FM_DIRTY 0x00000002	/* file system was not unmounted and clean 
+				 * when mounted or 
+				 * commit failure occurred while being mounted:
+				 * fsck() must be run to repair 
+				 */
+#define	FM_LOGREDO 0x00000004	/* log based recovery (logredo()) failed:
+				 * fsck() must be run to repair 
+				 */
+#define	FM_EXTENDFS 0x00000008	/* file system extendfs() in progress */
+
+#endif				/* _H_JFS_FILSYS */
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
new file mode 100644
index 0000000..7838313
--- /dev/null
+++ b/fs/jfs/jfs_imap.c
@@ -0,0 +1,3270 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ *	jfs_imap.c: inode allocation map manager
+ *
+ * Serialization:
+ *   Each AG has a simple lock which is used to control the serialization of
+ *	the AG level lists.  This lock should be taken first whenever an AG
+ *	level list will be modified or accessed.
+ *
+ *   Each IAG is locked by obtaining the buffer for the IAG page.
+ *
+ *   There is also a inode lock for the inode map inode.  A read lock needs to
+ *	be taken whenever an IAG is read from the map or the global level
+ *	information is read.  A write lock needs to be taken whenever the global
+ *	level information is modified or an atomic operation needs to be used.
+ *
+ *	If more than one IAG is read at one time, the read lock may not
+ *	be given up until all of the IAG's are read.  Otherwise, a deadlock
+ *	may occur when trying to obtain the read lock while another thread
+ *	holding the read lock is waiting on the IAG already being held.
+ *
+ *   The control page of the inode map is read into memory by diMount().
+ *	Thereafter it should only be modified in memory and then it will be
+ *	written out when the filesystem is unmounted by diUnmount().
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_dinode.h"
+#include "jfs_dmap.h"
+#include "jfs_imap.h"
+#include "jfs_metapage.h"
+#include "jfs_superblock.h"
+#include "jfs_debug.h"
+
+/*
+ * imap locks
+ */
+/* iag free list lock */
+#define IAGFREE_LOCK_INIT(imap)		init_MUTEX(&imap->im_freelock)
+#define IAGFREE_LOCK(imap)		down(&imap->im_freelock)
+#define IAGFREE_UNLOCK(imap)		up(&imap->im_freelock)
+
+/* per ag iag list locks */
+#define AG_LOCK_INIT(imap,index)	init_MUTEX(&(imap->im_aglock[index]))
+#define AG_LOCK(imap,agno)		down(&imap->im_aglock[agno])
+#define AG_UNLOCK(imap,agno)		up(&imap->im_aglock[agno])
+
+/*
+ * external references
+ */
+extern struct address_space_operations jfs_aops;
+
+/*
+ * forward references
+ */
+static int diAllocAG(struct inomap *, int, boolean_t, struct inode *);
+static int diAllocAny(struct inomap *, int, boolean_t, struct inode *);
+static int diAllocBit(struct inomap *, struct iag *, int);
+static int diAllocExt(struct inomap *, int, struct inode *);
+static int diAllocIno(struct inomap *, int, struct inode *);
+static int diFindFree(u32, int);
+static int diNewExt(struct inomap *, struct iag *, int);
+static int diNewIAG(struct inomap *, int *, int, struct metapage **);
+static void duplicateIXtree(struct super_block *, s64, int, s64 *);
+
+static int diIAGRead(struct inomap * imap, int, struct metapage **);
+static int copy_from_dinode(struct dinode *, struct inode *);
+static void copy_to_dinode(struct dinode *, struct inode *);
+
+/*
+ *	debug code for double-checking inode map
+ */
+/* #define	_JFS_DEBUG_IMAP	1 */
+
+#ifdef	_JFS_DEBUG_IMAP
+#define DBG_DIINIT(imap)	DBGdiInit(imap)
+#define DBG_DIALLOC(imap, ino)	DBGdiAlloc(imap, ino)
+#define DBG_DIFREE(imap, ino)	DBGdiFree(imap, ino)
+
+static void *DBGdiInit(struct inomap * imap);
+static void DBGdiAlloc(struct inomap * imap, ino_t ino);
+static void DBGdiFree(struct inomap * imap, ino_t ino);
+#else
+#define DBG_DIINIT(imap)
+#define DBG_DIALLOC(imap, ino)
+#define DBG_DIFREE(imap, ino)
+#endif				/* _JFS_DEBUG_IMAP */
+
+/*
+ * NAME:        diMount()
+ *
+ * FUNCTION:    initialize the incore inode map control structures for
+ *		a fileset or aggregate init time.
+ *
+ *              the inode map's control structure (dinomap) is 
+ *              brought in from disk and placed in virtual memory.
+ *
+ * PARAMETERS:
+ *      ipimap  - pointer to inode map inode for the aggregate or fileset.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -ENOMEM  - insufficient free virtual memory.
+ *      -EIO  	- i/o error.
+ */
+int diMount(struct inode *ipimap)
+{
+	struct inomap *imap;
+	struct metapage *mp;
+	int index;
+	struct dinomap_disk *dinom_le;
+
+	/*
+	 * allocate/initialize the in-memory inode map control structure
+	 */
+	/* allocate the in-memory inode map control structure. */
+	imap = (struct inomap *) kmalloc(sizeof(struct inomap), GFP_KERNEL);
+	if (imap == NULL) {
+		jfs_err("diMount: kmalloc returned NULL!");
+		return -ENOMEM;
+	}
+
+	/* read the on-disk inode map control structure. */
+
+	mp = read_metapage(ipimap,
+			   IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
+			   PSIZE, 0);
+	if (mp == NULL) {
+		kfree(imap);
+		return -EIO;
+	}
+
+	/* copy the on-disk version to the in-memory version. */
+	dinom_le = (struct dinomap_disk *) mp->data;
+	imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag);
+	imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag);
+	atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos));
+	atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree));
+	imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext);
+	imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext);
+	for (index = 0; index < MAXAG; index++) {
+		imap->im_agctl[index].inofree =
+		    le32_to_cpu(dinom_le->in_agctl[index].inofree);
+		imap->im_agctl[index].extfree =
+		    le32_to_cpu(dinom_le->in_agctl[index].extfree);
+		imap->im_agctl[index].numinos =
+		    le32_to_cpu(dinom_le->in_agctl[index].numinos);
+		imap->im_agctl[index].numfree =
+		    le32_to_cpu(dinom_le->in_agctl[index].numfree);
+	}
+
+	/* release the buffer. */
+	release_metapage(mp);
+
+	/*
+	 * allocate/initialize inode allocation map locks
+	 */
+	/* allocate and init iag free list lock */
+	IAGFREE_LOCK_INIT(imap);
+
+	/* allocate and init ag list locks */
+	for (index = 0; index < MAXAG; index++) {
+		AG_LOCK_INIT(imap, index);
+	}
+
+	/* bind the inode map inode and inode map control structure
+	 * to each other.
+	 */
+	imap->im_ipimap = ipimap;
+	JFS_IP(ipimap)->i_imap = imap;
+
+//      DBG_DIINIT(imap);
+
+	return (0);
+}
+
+
+/*
+ * NAME:        diUnmount()
+ *
+ * FUNCTION:    write to disk the incore inode map control structures for
+ *		a fileset or aggregate at unmount time.
+ *
+ * PARAMETERS:
+ *      ipimap  - pointer to inode map inode for the aggregate or fileset.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -ENOMEM  - insufficient free virtual memory.
+ *      -EIO  	- i/o error.
+ */
+int diUnmount(struct inode *ipimap, int mounterror)
+{
+	struct inomap *imap = JFS_IP(ipimap)->i_imap;
+
+	/*
+	 * update the on-disk inode map control structure
+	 */
+
+	if (!(mounterror || isReadOnly(ipimap)))
+		diSync(ipimap);
+
+	/*
+	 * Invalidate the page cache buffers
+	 */
+	truncate_inode_pages(ipimap->i_mapping, 0);
+
+	/*
+	 * free in-memory control structure
+	 */
+	kfree(imap);
+
+	return (0);
+}
+
+
+/*
+ *	diSync()
+ */
+int diSync(struct inode *ipimap)
+{
+	struct dinomap_disk *dinom_le;
+	struct inomap *imp = JFS_IP(ipimap)->i_imap;
+	struct metapage *mp;
+	int index;
+
+	/*
+	 * write imap global conrol page
+	 */
+	/* read the on-disk inode map control structure */
+	mp = get_metapage(ipimap,
+			  IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
+			  PSIZE, 0);
+	if (mp == NULL) {
+		jfs_err("diSync: get_metapage failed!");
+		return -EIO;
+	}
+
+	/* copy the in-memory version to the on-disk version */
+	dinom_le = (struct dinomap_disk *) mp->data;
+	dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag);
+	dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag);
+	dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos));
+	dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree));
+	dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext);
+	dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext);
+	for (index = 0; index < MAXAG; index++) {
+		dinom_le->in_agctl[index].inofree =
+		    cpu_to_le32(imp->im_agctl[index].inofree);
+		dinom_le->in_agctl[index].extfree =
+		    cpu_to_le32(imp->im_agctl[index].extfree);
+		dinom_le->in_agctl[index].numinos =
+		    cpu_to_le32(imp->im_agctl[index].numinos);
+		dinom_le->in_agctl[index].numfree =
+		    cpu_to_le32(imp->im_agctl[index].numfree);
+	}
+
+	/* write out the control structure */
+	write_metapage(mp);
+
+	/*
+	 * write out dirty pages of imap
+	 */
+	filemap_fdatawrite(ipimap->i_mapping);
+	filemap_fdatawait(ipimap->i_mapping);
+
+	diWriteSpecial(ipimap, 0);
+
+	return (0);
+}
+
+
+/*
+ * NAME:        diRead()
+ *
+ * FUNCTION:    initialize an incore inode from disk.
+ *
+ *		on entry, the specifed incore inode should itself
+ *		specify the disk inode number corresponding to the
+ *		incore inode (i.e. i_number should be initialized).
+ *		
+ *		this routine handles incore inode initialization for
+ *		both "special" and "regular" inodes.  special inodes
+ *		are those required early in the mount process and
+ *	        require special handling since much of the file system
+ *		is not yet initialized.  these "special" inodes are
+ *		identified by a NULL inode map inode pointer and are
+ *		actually initialized by a call to diReadSpecial().
+ *		
+ *		for regular inodes, the iag describing the disk inode
+ *		is read from disk to determine the inode extent address
+ *		for the disk inode.  with the inode extent address in
+ *		hand, the page of the extent that contains the disk
+ *		inode is read and the disk inode is copied to the
+ *		incore inode.
+ *
+ * PARAMETERS:
+ *      ip  -  pointer to incore inode to be initialized from disk.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO  	- i/o error.
+ *      -ENOMEM	- insufficient memory
+ *      
+ */
+int diRead(struct inode *ip)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	int iagno, ino, extno, rc;
+	struct inode *ipimap;
+	struct dinode *dp;
+	struct iag *iagp;
+	struct metapage *mp;
+	s64 blkno, agstart;
+	struct inomap *imap;
+	int block_offset;
+	int inodes_left;
+	uint pageno;
+	int rel_inode;
+
+	jfs_info("diRead: ino = %ld", ip->i_ino);
+
+	ipimap = sbi->ipimap;
+	JFS_IP(ip)->ipimap = ipimap;
+
+	/* determine the iag number for this inode (number) */
+	iagno = INOTOIAG(ip->i_ino);
+
+	/* read the iag */
+	imap = JFS_IP(ipimap)->i_imap;
+	IREAD_LOCK(ipimap);
+	rc = diIAGRead(imap, iagno, &mp);
+	IREAD_UNLOCK(ipimap);
+	if (rc) {
+		jfs_err("diRead: diIAGRead returned %d", rc);
+		return (rc);
+	}
+
+	iagp = (struct iag *) mp->data;
+
+	/* determine inode extent that holds the disk inode */
+	ino = ip->i_ino & (INOSPERIAG - 1);
+	extno = ino >> L2INOSPEREXT;
+
+	if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) ||
+	    (addressPXD(&iagp->inoext[extno]) == 0)) {
+		release_metapage(mp);
+		return -ESTALE;
+	}
+
+	/* get disk block number of the page within the inode extent
+	 * that holds the disk inode.
+	 */
+	blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage);
+
+	/* get the ag for the iag */
+	agstart = le64_to_cpu(iagp->agstart);
+
+	release_metapage(mp);
+
+	rel_inode = (ino & (INOSPERPAGE - 1));
+	pageno = blkno >> sbi->l2nbperpage;
+
+	if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
+		/*
+		 * OS/2 didn't always align inode extents on page boundaries
+		 */
+		inodes_left =
+		     (sbi->nbperpage - block_offset) << sbi->l2niperblk;
+
+		if (rel_inode < inodes_left)
+			rel_inode += block_offset << sbi->l2niperblk;
+		else {
+			pageno += 1;
+			rel_inode -= inodes_left;
+		}
+	}
+
+	/* read the page of disk inode */
+	mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
+	if (mp == 0) {
+		jfs_err("diRead: read_metapage failed");
+		return -EIO;
+	}
+
+	/* locate the the disk inode requested */
+	dp = (struct dinode *) mp->data;
+	dp += rel_inode;
+
+	if (ip->i_ino != le32_to_cpu(dp->di_number)) {
+		jfs_error(ip->i_sb, "diRead: i_ino != di_number");
+		rc = -EIO;
+	} else if (le32_to_cpu(dp->di_nlink) == 0)
+		rc = -ESTALE;
+	else
+		/* copy the disk inode to the in-memory inode */
+		rc = copy_from_dinode(dp, ip);
+
+	release_metapage(mp);
+
+	/* set the ag for the inode */
+	JFS_IP(ip)->agno = BLKTOAG(agstart, sbi);
+	JFS_IP(ip)->active_ag = -1;
+
+	return (rc);
+}
+
+
+/*
+ * NAME:        diReadSpecial()
+ *
+ * FUNCTION:    initialize a 'special' inode from disk.
+ *
+ *		this routines handles aggregate level inodes.  The
+ *		inode cache cannot differentiate between the
+ *		aggregate inodes and the filesystem inodes, so we
+ *		handle these here.  We don't actually use the aggregate
+ *	        inode map, since these inodes are at a fixed location
+ *		and in some cases the aggregate inode map isn't initialized
+ *		yet.
+ *
+ * PARAMETERS:
+ *      sb - filesystem superblock
+ *	inum - aggregate inode number
+ *	secondary - 1 if secondary aggregate inode table
+ *
+ * RETURN VALUES:
+ *      new inode	- success
+ *      NULL		- i/o error.
+ */
+struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	uint address;
+	struct dinode *dp;
+	struct inode *ip;
+	struct metapage *mp;
+
+	ip = new_inode(sb);
+	if (ip == NULL) {
+		jfs_err("diReadSpecial: new_inode returned NULL!");
+		return ip;
+	}
+
+	if (secondary) {
+		address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
+		JFS_IP(ip)->ipimap = sbi->ipaimap2;
+	} else {
+		address = AITBL_OFF >> L2PSIZE;
+		JFS_IP(ip)->ipimap = sbi->ipaimap;
+	}
+
+	ASSERT(inum < INOSPEREXT);
+
+	ip->i_ino = inum;
+
+	address += inum >> 3;	/* 8 inodes per 4K page */
+
+	/* read the page of fixed disk inode (AIT) in raw mode */
+	mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
+	if (mp == NULL) {
+		ip->i_nlink = 1;	/* Don't want iput() deleting it */
+		iput(ip);
+		return (NULL);
+	}
+
+	/* get the pointer to the disk inode of interest */
+	dp = (struct dinode *) (mp->data);
+	dp += inum % 8;		/* 8 inodes per 4K page */
+
+	/* copy on-disk inode to in-memory inode */
+	if ((copy_from_dinode(dp, ip)) != 0) {
+		/* handle bad return by returning NULL for ip */
+		ip->i_nlink = 1;	/* Don't want iput() deleting it */
+		iput(ip);
+		/* release the page */
+		release_metapage(mp);
+		return (NULL);
+
+	}
+
+	ip->i_mapping->a_ops = &jfs_aops;
+	mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
+
+	/* Allocations to metadata inodes should not affect quotas */
+	ip->i_flags |= S_NOQUOTA;
+
+	if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
+		sbi->gengen = le32_to_cpu(dp->di_gengen);
+		sbi->inostamp = le32_to_cpu(dp->di_inostamp);
+	}
+
+	/* release the page */
+	release_metapage(mp);
+
+	return (ip);
+}
+
+/*
+ * NAME:        diWriteSpecial()
+ *
+ * FUNCTION:    Write the special inode to disk
+ *
+ * PARAMETERS:
+ *      ip - special inode
+ *	secondary - 1 if secondary aggregate inode table
+ *
+ * RETURN VALUES: none
+ */
+
+void diWriteSpecial(struct inode *ip, int secondary)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	uint address;
+	struct dinode *dp;
+	ino_t inum = ip->i_ino;
+	struct metapage *mp;
+
+	ip->i_state &= ~I_DIRTY;
+
+	if (secondary)
+		address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
+	else
+		address = AITBL_OFF >> L2PSIZE;
+
+	ASSERT(inum < INOSPEREXT);
+
+	address += inum >> 3;	/* 8 inodes per 4K page */
+
+	/* read the page of fixed disk inode (AIT) in raw mode */
+	mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
+	if (mp == NULL) {
+		jfs_err("diWriteSpecial: failed to read aggregate inode "
+			"extent!");
+		return;
+	}
+
+	/* get the pointer to the disk inode of interest */
+	dp = (struct dinode *) (mp->data);
+	dp += inum % 8;		/* 8 inodes per 4K page */
+
+	/* copy on-disk inode to in-memory inode */
+	copy_to_dinode(dp, ip);
+	memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288);
+
+	if (inum == FILESYSTEM_I)
+		dp->di_gengen = cpu_to_le32(sbi->gengen);
+
+	/* write the page */
+	write_metapage(mp);
+}
+
+/*
+ * NAME:        diFreeSpecial()
+ *
+ * FUNCTION:    Free allocated space for special inode
+ */
+void diFreeSpecial(struct inode *ip)
+{
+	if (ip == NULL) {
+		jfs_err("diFreeSpecial called with NULL ip!");
+		return;
+	}
+	filemap_fdatawrite(ip->i_mapping);
+	filemap_fdatawait(ip->i_mapping);
+	truncate_inode_pages(ip->i_mapping, 0);
+	iput(ip);
+}
+
+
+
+/*
+ * NAME:        diWrite()
+ *
+ * FUNCTION:    write the on-disk inode portion of the in-memory inode
+ *		to its corresponding on-disk inode.
+ *
+ *		on entry, the specifed incore inode should itself
+ *		specify the disk inode number corresponding to the
+ *		incore inode (i.e. i_number should be initialized).
+ *
+ *		the inode contains the inode extent address for the disk
+ *		inode.  with the inode extent address in hand, the
+ *		page of the extent that contains the disk inode is
+ *		read and the disk inode portion of the incore inode
+ *		is copied to the disk inode.
+ *		
+ * PARAMETERS:
+ *	tid -  transacation id
+ *      ip  -  pointer to incore inode to be written to the inode extent.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO  	- i/o error.
+ */
+int diWrite(tid_t tid, struct inode *ip)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	int rc = 0;
+	s32 ino;
+	struct dinode *dp;
+	s64 blkno;
+	int block_offset;
+	int inodes_left;
+	struct metapage *mp;
+	uint pageno;
+	int rel_inode;
+	int dioffset;
+	struct inode *ipimap;
+	uint type;
+	lid_t lid;
+	struct tlock *ditlck, *tlck;
+	struct linelock *dilinelock, *ilinelock;
+	struct lv *lv;
+	int n;
+
+	ipimap = jfs_ip->ipimap;
+
+	ino = ip->i_ino & (INOSPERIAG - 1);
+
+	if (!addressPXD(&(jfs_ip->ixpxd)) ||
+	    (lengthPXD(&(jfs_ip->ixpxd)) !=
+	     JFS_IP(ipimap)->i_imap->im_nbperiext)) {
+		jfs_error(ip->i_sb, "diWrite: ixpxd invalid");
+		return -EIO;
+	}
+
+	/*
+	 * read the page of disk inode containing the specified inode:
+	 */
+	/* compute the block address of the page */
+	blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage);
+
+	rel_inode = (ino & (INOSPERPAGE - 1));
+	pageno = blkno >> sbi->l2nbperpage;
+
+	if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
+		/*
+		 * OS/2 didn't always align inode extents on page boundaries
+		 */
+		inodes_left =
+		    (sbi->nbperpage - block_offset) << sbi->l2niperblk;
+
+		if (rel_inode < inodes_left)
+			rel_inode += block_offset << sbi->l2niperblk;
+		else {
+			pageno += 1;
+			rel_inode -= inodes_left;
+		}
+	}
+	/* read the page of disk inode */
+      retry:
+	mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
+	if (mp == 0)
+		return -EIO;
+
+	/* get the pointer to the disk inode */
+	dp = (struct dinode *) mp->data;
+	dp += rel_inode;
+
+	dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE;
+
+	/*
+	 * acquire transaction lock on the on-disk inode;
+	 * N.B. tlock is acquired on ipimap not ip;
+	 */
+	if ((ditlck =
+	     txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL)
+		goto retry;
+	dilinelock = (struct linelock *) & ditlck->lock;
+
+	/*
+	 * copy btree root from in-memory inode to on-disk inode
+	 *
+	 * (tlock is taken from inline B+-tree root in in-memory
+	 * inode when the B+-tree root is updated, which is pointed 
+	 * by jfs_ip->blid as well as being on tx tlock list)
+	 *
+	 * further processing of btree root is based on the copy 
+	 * in in-memory inode, where txLog() will log from, and, 
+	 * for xtree root, txUpdateMap() will update map and reset
+	 * XAD_NEW bit;
+	 */
+
+	if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) {
+		/*
+		 * This is the special xtree inside the directory for storing
+		 * the directory table
+		 */
+		xtpage_t *p, *xp;
+		xad_t *xad;
+
+		jfs_ip->xtlid = 0;
+		tlck = lid_to_tlock(lid);
+		assert(tlck->type & tlckXTREE);
+		tlck->type |= tlckBTROOT;
+		tlck->mp = mp;
+		ilinelock = (struct linelock *) & tlck->lock;
+
+		/*
+		 * copy xtree root from inode to dinode:
+		 */
+		p = &jfs_ip->i_xtroot;
+		xp = (xtpage_t *) &dp->di_dirtable;
+		lv = ilinelock->lv;
+		for (n = 0; n < ilinelock->index; n++, lv++) {
+			memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
+			       lv->length << L2XTSLOTSIZE);
+		}
+
+		/* reset on-disk (metadata page) xtree XAD_NEW bit */
+		xad = &xp->xad[XTENTRYSTART];
+		for (n = XTENTRYSTART;
+		     n < le16_to_cpu(xp->header.nextindex); n++, xad++)
+			if (xad->flag & (XAD_NEW | XAD_EXTENDED))
+				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
+	}
+
+	if ((lid = jfs_ip->blid) == 0)
+		goto inlineData;
+	jfs_ip->blid = 0;
+
+	tlck = lid_to_tlock(lid);
+	type = tlck->type;
+	tlck->type |= tlckBTROOT;
+	tlck->mp = mp;
+	ilinelock = (struct linelock *) & tlck->lock;
+
+	/*
+	 *      regular file: 16 byte (XAD slot) granularity
+	 */
+	if (type & tlckXTREE) {
+		xtpage_t *p, *xp;
+		xad_t *xad;
+
+		/*
+		 * copy xtree root from inode to dinode:
+		 */
+		p = &jfs_ip->i_xtroot;
+		xp = &dp->di_xtroot;
+		lv = ilinelock->lv;
+		for (n = 0; n < ilinelock->index; n++, lv++) {
+			memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
+			       lv->length << L2XTSLOTSIZE);
+		}
+
+		/* reset on-disk (metadata page) xtree XAD_NEW bit */
+		xad = &xp->xad[XTENTRYSTART];
+		for (n = XTENTRYSTART;
+		     n < le16_to_cpu(xp->header.nextindex); n++, xad++)
+			if (xad->flag & (XAD_NEW | XAD_EXTENDED))
+				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
+	}
+	/*
+	 *      directory: 32 byte (directory entry slot) granularity
+	 */
+	else if (type & tlckDTREE) {
+		dtpage_t *p, *xp;
+
+		/*
+		 * copy dtree root from inode to dinode:
+		 */
+		p = (dtpage_t *) &jfs_ip->i_dtroot;
+		xp = (dtpage_t *) & dp->di_dtroot;
+		lv = ilinelock->lv;
+		for (n = 0; n < ilinelock->index; n++, lv++) {
+			memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
+			       lv->length << L2DTSLOTSIZE);
+		}
+	} else {
+		jfs_err("diWrite: UFO tlock");
+	}
+
+      inlineData:
+	/*
+	 * copy inline symlink from in-memory inode to on-disk inode
+	 */
+	if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) {
+		lv = & dilinelock->lv[dilinelock->index];
+		lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
+		lv->length = 2;
+		memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE);
+		dilinelock->index++;
+	}
+	/*
+	 * copy inline data from in-memory inode to on-disk inode:
+	 * 128 byte slot granularity
+	 */
+	if (test_cflag(COMMIT_Inlineea, ip)) {
+		lv = & dilinelock->lv[dilinelock->index];
+		lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE;
+		lv->length = 1;
+		memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE);
+		dilinelock->index++;
+
+		clear_cflag(COMMIT_Inlineea, ip);
+	}
+
+	/*
+	 *      lock/copy inode base: 128 byte slot granularity
+	 */
+// baseDinode:
+	lv = & dilinelock->lv[dilinelock->index];
+	lv->offset = dioffset >> L2INODESLOTSIZE;
+	copy_to_dinode(dp, ip);
+	if (test_and_clear_cflag(COMMIT_Dirtable, ip)) {
+		lv->length = 2;
+		memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96);
+	} else
+		lv->length = 1;
+	dilinelock->index++;
+
+#ifdef _JFS_FASTDASD
+	/*
+	 * We aren't logging changes to the DASD used in directory inodes,
+	 * but we need to write them to disk.  If we don't unmount cleanly,
+	 * mount will recalculate the DASD used.
+	 */
+	if (S_ISDIR(ip->i_mode)
+	    && (ip->i_ipmnt->i_mntflag & JFS_DASD_ENABLED))
+		memcpy(&dp->di_DASD, &ip->i_DASD, sizeof(struct dasd));
+#endif				/*  _JFS_FASTDASD */
+
+	/* release the buffer holding the updated on-disk inode. 
+	 * the buffer will be later written by commit processing.
+	 */
+	write_metapage(mp);
+
+	return (rc);
+}
+
+
+/*
+ * NAME:        diFree(ip)
+ *
+ * FUNCTION:    free a specified inode from the inode working map
+ *		for a fileset or aggregate.
+ *
+ *		if the inode to be freed represents the first (only)
+ *		free inode within the iag, the iag will be placed on
+ *		the ag free inode list.
+ *	
+ *		freeing the inode will cause the inode extent to be
+ *		freed if the inode is the only allocated inode within
+ *		the extent.  in this case all the disk resource backing
+ *		up the inode extent will be freed. in addition, the iag
+ *		will be placed on the ag extent free list if the extent
+ *		is the first free extent in the iag.  if freeing the
+ *		extent also means that no free inodes will exist for
+ *		the iag, the iag will also be removed from the ag free
+ *		inode list.
+ *
+ *		the iag describing the inode will be freed if the extent
+ *		is to be freed and it is the only backed extent within
+ *		the iag.  in this case, the iag will be removed from the
+ *		ag free extent list and ag free inode list and placed on
+ *		the inode map's free iag list.
+ *
+ *		a careful update approach is used to provide consistency
+ *		in the face of updates to multiple buffers.  under this
+ *		approach, all required buffers are obtained before making
+ *		any updates and are held until all updates are complete.
+ *
+ * PARAMETERS:
+ *      ip  	- inode to be freed.
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -EIO  	- i/o error.
+ */
+int diFree(struct inode *ip)
+{
+	int rc;
+	ino_t inum = ip->i_ino;
+	struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp;
+	struct metapage *mp, *amp, *bmp, *cmp, *dmp;
+	int iagno, ino, extno, bitno, sword, agno;
+	int back, fwd;
+	u32 bitmap, mask;
+	struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap;
+	struct inomap *imap = JFS_IP(ipimap)->i_imap;
+	pxd_t freepxd;
+	tid_t tid;
+	struct inode *iplist[3];
+	struct tlock *tlck;
+	struct pxd_lock *pxdlock;
+
+	/*
+	 * This is just to suppress compiler warnings.  The same logic that
+	 * references these variables is used to initialize them.
+	 */
+	aiagp = biagp = ciagp = diagp = NULL;
+
+	/* get the iag number containing the inode.
+	 */
+	iagno = INOTOIAG(inum);
+
+	/* make sure that the iag is contained within 
+	 * the map.
+	 */
+	if (iagno >= imap->im_nextiag) {
+		dump_mem("imap", imap, 32);
+		jfs_error(ip->i_sb,
+			  "diFree: inum = %d, iagno = %d, nextiag = %d",
+			  (uint) inum, iagno, imap->im_nextiag);
+		return -EIO;
+	}
+
+	/* get the allocation group for this ino.
+	 */
+	agno = JFS_IP(ip)->agno;
+
+	/* Lock the AG specific inode map information
+	 */
+	AG_LOCK(imap, agno);
+
+	/* Obtain read lock in imap inode.  Don't release it until we have
+	 * read all of the IAG's that we are going to.
+	 */
+	IREAD_LOCK(ipimap);
+
+	/* read the iag.
+	 */
+	if ((rc = diIAGRead(imap, iagno, &mp))) {
+		IREAD_UNLOCK(ipimap);
+		AG_UNLOCK(imap, agno);
+		return (rc);
+	}
+	iagp = (struct iag *) mp->data;
+
+	/* get the inode number and extent number of the inode within
+	 * the iag and the inode number within the extent.
+	 */
+	ino = inum & (INOSPERIAG - 1);
+	extno = ino >> L2INOSPEREXT;
+	bitno = ino & (INOSPEREXT - 1);
+	mask = HIGHORDER >> bitno;
+
+	if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
+		jfs_error(ip->i_sb,
+			  "diFree: wmap shows inode already free");
+	}
+
+	if (!addressPXD(&iagp->inoext[extno])) {
+		release_metapage(mp);
+		IREAD_UNLOCK(ipimap);
+		AG_UNLOCK(imap, agno);
+		jfs_error(ip->i_sb, "diFree: invalid inoext");
+		return -EIO;
+	}
+
+	/* compute the bitmap for the extent reflecting the freed inode.
+	 */
+	bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask;
+
+	if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) {
+		release_metapage(mp);
+		IREAD_UNLOCK(ipimap);
+		AG_UNLOCK(imap, agno);
+		jfs_error(ip->i_sb, "diFree: numfree > numinos");
+		return -EIO;
+	}
+	/*
+	 *      inode extent still has some inodes or below low water mark:
+	 *      keep the inode extent;
+	 */
+	if (bitmap ||
+	    imap->im_agctl[agno].numfree < 96 ||
+	    (imap->im_agctl[agno].numfree < 288 &&
+	     (((imap->im_agctl[agno].numfree * 100) /
+	       imap->im_agctl[agno].numinos) <= 25))) {
+		/* if the iag currently has no free inodes (i.e.,
+		 * the inode being freed is the first free inode of iag),
+		 * insert the iag at head of the inode free list for the ag.
+		 */
+		if (iagp->nfreeinos == 0) {
+			/* check if there are any iags on the ag inode
+			 * free list.  if so, read the first one so that
+			 * we can link the current iag onto the list at
+			 * the head.
+			 */
+			if ((fwd = imap->im_agctl[agno].inofree) >= 0) {
+				/* read the iag that currently is the head
+				 * of the list.
+				 */
+				if ((rc = diIAGRead(imap, fwd, &amp))) {
+					IREAD_UNLOCK(ipimap);
+					AG_UNLOCK(imap, agno);
+					release_metapage(mp);
+					return (rc);
+				}
+				aiagp = (struct iag *) amp->data;
+
+				/* make current head point back to the iag.
+				 */
+				aiagp->inofreeback = cpu_to_le32(iagno);
+
+				write_metapage(amp);
+			}
+
+			/* iag points forward to current head and iag
+			 * becomes the new head of the list.
+			 */
+			iagp->inofreefwd =
+			    cpu_to_le32(imap->im_agctl[agno].inofree);
+			iagp->inofreeback = cpu_to_le32(-1);
+			imap->im_agctl[agno].inofree = iagno;
+		}
+		IREAD_UNLOCK(ipimap);
+
+		/* update the free inode summary map for the extent if
+		 * freeing the inode means the extent will now have free
+		 * inodes (i.e., the inode being freed is the first free 
+		 * inode of extent),
+		 */
+		if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
+			sword = extno >> L2EXTSPERSUM;
+			bitno = extno & (EXTSPERSUM - 1);
+			iagp->inosmap[sword] &=
+			    cpu_to_le32(~(HIGHORDER >> bitno));
+		}
+
+		/* update the bitmap.
+		 */
+		iagp->wmap[extno] = cpu_to_le32(bitmap);
+		DBG_DIFREE(imap, inum);
+
+		/* update the free inode counts at the iag, ag and
+		 * map level.
+		 */
+		iagp->nfreeinos =
+		    cpu_to_le32(le32_to_cpu(iagp->nfreeinos) + 1);
+		imap->im_agctl[agno].numfree += 1;
+		atomic_inc(&imap->im_numfree);
+
+		/* release the AG inode map lock
+		 */
+		AG_UNLOCK(imap, agno);
+
+		/* write the iag */
+		write_metapage(mp);
+
+		return (0);
+	}
+
+
+	/*
+	 *      inode extent has become free and above low water mark:
+	 *      free the inode extent;
+	 */
+
+	/*
+	 *      prepare to update iag list(s) (careful update step 1)
+	 */
+	amp = bmp = cmp = dmp = NULL;
+	fwd = back = -1;
+
+	/* check if the iag currently has no free extents.  if so,
+	 * it will be placed on the head of the ag extent free list.
+	 */
+	if (iagp->nfreeexts == 0) {
+		/* check if the ag extent free list has any iags.
+		 * if so, read the iag at the head of the list now.
+		 * this (head) iag will be updated later to reflect
+		 * the addition of the current iag at the head of
+		 * the list.
+		 */
+		if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
+			if ((rc = diIAGRead(imap, fwd, &amp)))
+				goto error_out;
+			aiagp = (struct iag *) amp->data;
+		}
+	} else {
+		/* iag has free extents. check if the addition of a free
+		 * extent will cause all extents to be free within this
+		 * iag.  if so, the iag will be removed from the ag extent
+		 * free list and placed on the inode map's free iag list.
+		 */
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
+			/* in preparation for removing the iag from the
+			 * ag extent free list, read the iags preceeding
+			 * and following the iag on the ag extent free
+			 * list.
+			 */
+			if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
+				if ((rc = diIAGRead(imap, fwd, &amp)))
+					goto error_out;
+				aiagp = (struct iag *) amp->data;
+			}
+
+			if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
+				if ((rc = diIAGRead(imap, back, &bmp)))
+					goto error_out;
+				biagp = (struct iag *) bmp->data;
+			}
+		}
+	}
+
+	/* remove the iag from the ag inode free list if freeing
+	 * this extent cause the iag to have no free inodes.
+	 */
+	if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
+		int inofreeback = le32_to_cpu(iagp->inofreeback);
+		int inofreefwd = le32_to_cpu(iagp->inofreefwd);
+
+		/* in preparation for removing the iag from the
+		 * ag inode free list, read the iags preceeding
+		 * and following the iag on the ag inode free
+		 * list.  before reading these iags, we must make
+		 * sure that we already don't have them in hand
+		 * from up above, since re-reading an iag (buffer)
+		 * we are currently holding would cause a deadlock.
+		 */
+		if (inofreefwd >= 0) {
+
+			if (inofreefwd == fwd)
+				ciagp = (struct iag *) amp->data;
+			else if (inofreefwd == back)
+				ciagp = (struct iag *) bmp->data;
+			else {
+				if ((rc =
+				     diIAGRead(imap, inofreefwd, &cmp)))
+					goto error_out;
+				ciagp = (struct iag *) cmp->data;
+			}
+			assert(ciagp != NULL);
+		}
+
+		if (inofreeback >= 0) {
+			if (inofreeback == fwd)
+				diagp = (struct iag *) amp->data;
+			else if (inofreeback == back)
+				diagp = (struct iag *) bmp->data;
+			else {
+				if ((rc =
+				     diIAGRead(imap, inofreeback, &dmp)))
+					goto error_out;
+				diagp = (struct iag *) dmp->data;
+			}
+			assert(diagp != NULL);
+		}
+	}
+
+	IREAD_UNLOCK(ipimap);
+
+	/*
+	 * invalidate any page of the inode extent freed from buffer cache;
+	 */
+	freepxd = iagp->inoext[extno];
+	invalidate_pxd_metapages(ip, freepxd);
+
+	/*
+	 *      update iag list(s) (careful update step 2)
+	 */
+	/* add the iag to the ag extent free list if this is the
+	 * first free extent for the iag.
+	 */
+	if (iagp->nfreeexts == 0) {
+		if (fwd >= 0)
+			aiagp->extfreeback = cpu_to_le32(iagno);
+
+		iagp->extfreefwd =
+		    cpu_to_le32(imap->im_agctl[agno].extfree);
+		iagp->extfreeback = cpu_to_le32(-1);
+		imap->im_agctl[agno].extfree = iagno;
+	} else {
+		/* remove the iag from the ag extent list if all extents
+		 * are now free and place it on the inode map iag free list.
+		 */
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
+			if (fwd >= 0)
+				aiagp->extfreeback = iagp->extfreeback;
+
+			if (back >= 0)
+				biagp->extfreefwd = iagp->extfreefwd;
+			else
+				imap->im_agctl[agno].extfree =
+				    le32_to_cpu(iagp->extfreefwd);
+
+			iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
+
+			IAGFREE_LOCK(imap);
+			iagp->iagfree = cpu_to_le32(imap->im_freeiag);
+			imap->im_freeiag = iagno;
+			IAGFREE_UNLOCK(imap);
+		}
+	}
+
+	/* remove the iag from the ag inode free list if freeing
+	 * this extent causes the iag to have no free inodes.
+	 */
+	if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
+		if ((int) le32_to_cpu(iagp->inofreefwd) >= 0)
+			ciagp->inofreeback = iagp->inofreeback;
+
+		if ((int) le32_to_cpu(iagp->inofreeback) >= 0)
+			diagp->inofreefwd = iagp->inofreefwd;
+		else
+			imap->im_agctl[agno].inofree =
+			    le32_to_cpu(iagp->inofreefwd);
+
+		iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
+	}
+
+	/* update the inode extent address and working map 
+	 * to reflect the free extent.
+	 * the permanent map should have been updated already 
+	 * for the inode being freed.
+	 */
+	if (iagp->pmap[extno] != 0) {
+		jfs_error(ip->i_sb, "diFree: the pmap does not show inode free");
+	}
+	iagp->wmap[extno] = 0;
+	DBG_DIFREE(imap, inum);
+	PXDlength(&iagp->inoext[extno], 0);
+	PXDaddress(&iagp->inoext[extno], 0);
+
+	/* update the free extent and free inode summary maps
+	 * to reflect the freed extent.
+	 * the inode summary map is marked to indicate no inodes 
+	 * available for the freed extent.
+	 */
+	sword = extno >> L2EXTSPERSUM;
+	bitno = extno & (EXTSPERSUM - 1);
+	mask = HIGHORDER >> bitno;
+	iagp->inosmap[sword] |= cpu_to_le32(mask);
+	iagp->extsmap[sword] &= cpu_to_le32(~mask);
+
+	/* update the number of free inodes and number of free extents
+	 * for the iag.
+	 */
+	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) -
+				      (INOSPEREXT - 1));
+	iagp->nfreeexts = cpu_to_le32(le32_to_cpu(iagp->nfreeexts) + 1);
+
+	/* update the number of free inodes and backed inodes
+	 * at the ag and inode map level.
+	 */
+	imap->im_agctl[agno].numfree -= (INOSPEREXT - 1);
+	imap->im_agctl[agno].numinos -= INOSPEREXT;
+	atomic_sub(INOSPEREXT - 1, &imap->im_numfree);
+	atomic_sub(INOSPEREXT, &imap->im_numinos);
+
+	if (amp)
+		write_metapage(amp);
+	if (bmp)
+		write_metapage(bmp);
+	if (cmp)
+		write_metapage(cmp);
+	if (dmp)
+		write_metapage(dmp);
+
+	/*
+	 * start transaction to update block allocation map
+	 * for the inode extent freed;
+	 *
+	 * N.B. AG_LOCK is released and iag will be released below, and 
+	 * other thread may allocate inode from/reusing the ixad freed
+	 * BUT with new/different backing inode extent from the extent 
+	 * to be freed by the transaction;  
+	 */
+	tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
+	down(&JFS_IP(ipimap)->commit_sem);
+
+	/* acquire tlock of the iag page of the freed ixad 
+	 * to force the page NOHOMEOK (even though no data is
+	 * logged from the iag page) until NOREDOPAGE|FREEXTENT log 
+	 * for the free of the extent is committed;
+	 * write FREEXTENT|NOREDOPAGE log record
+	 * N.B. linelock is overlaid as freed extent descriptor;
+	 */
+	tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE);
+	pxdlock = (struct pxd_lock *) & tlck->lock;
+	pxdlock->flag = mlckFREEPXD;
+	pxdlock->pxd = freepxd;
+	pxdlock->index = 1;
+
+	write_metapage(mp);
+
+	iplist[0] = ipimap;
+
+	/*
+	 * logredo needs the IAG number and IAG extent index in order
+	 * to ensure that the IMap is consistent.  The least disruptive
+	 * way to pass these values through  to the transaction manager
+	 * is in the iplist array.  
+	 * 
+	 * It's not pretty, but it works.
+	 */
+	iplist[1] = (struct inode *) (size_t)iagno;
+	iplist[2] = (struct inode *) (size_t)extno;
+
+	rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
+
+	txEnd(tid);
+	up(&JFS_IP(ipimap)->commit_sem);
+
+	/* unlock the AG inode map information */
+	AG_UNLOCK(imap, agno);
+
+	return (0);
+
+      error_out:
+	IREAD_UNLOCK(ipimap);
+
+	if (amp)
+		release_metapage(amp);
+	if (bmp)
+		release_metapage(bmp);
+	if (cmp)
+		release_metapage(cmp);
+	if (dmp)
+		release_metapage(dmp);
+
+	AG_UNLOCK(imap, agno);
+
+	release_metapage(mp);
+
+	return (rc);
+}
+
+/*
+ * There are several places in the diAlloc* routines where we initialize
+ * the inode.
+ */
+static inline void
+diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+	ip->i_ino = (iagno << L2INOSPERIAG) + ino;
+	DBG_DIALLOC(JFS_IP(ipimap)->i_imap, ip->i_ino);
+	jfs_ip->ixpxd = iagp->inoext[extno];
+	jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+	jfs_ip->active_ag = -1;
+}
+
+
+/*
+ * NAME:        diAlloc(pip,dir,ip)
+ *
+ * FUNCTION:    allocate a disk inode from the inode working map 
+ *		for a fileset or aggregate.
+ *
+ * PARAMETERS:
+ *      pip  	- pointer to incore inode for the parent inode.
+ *      dir  	- TRUE if the new disk inode is for a directory.
+ *      ip  	- pointer to a new inode
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+int diAlloc(struct inode *pip, boolean_t dir, struct inode *ip)
+{
+	int rc, ino, iagno, addext, extno, bitno, sword;
+	int nwords, rem, i, agno;
+	u32 mask, inosmap, extsmap;
+	struct inode *ipimap;
+	struct metapage *mp;
+	ino_t inum;
+	struct iag *iagp;
+	struct inomap *imap;
+
+	/* get the pointers to the inode map inode and the
+	 * corresponding imap control structure.
+	 */
+	ipimap = JFS_SBI(pip->i_sb)->ipimap;
+	imap = JFS_IP(ipimap)->i_imap;
+	JFS_IP(ip)->ipimap = ipimap;
+	JFS_IP(ip)->fileset = FILESYSTEM_I;
+
+	/* for a directory, the allocation policy is to start 
+	 * at the ag level using the preferred ag.
+	 */
+	if (dir == TRUE) {
+		agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
+		AG_LOCK(imap, agno);
+		goto tryag;
+	}
+
+	/* for files, the policy starts off by trying to allocate from
+	 * the same iag containing the parent disk inode:
+	 * try to allocate the new disk inode close to the parent disk
+	 * inode, using parent disk inode number + 1 as the allocation
+	 * hint.  (we use a left-to-right policy to attempt to avoid
+	 * moving backward on the disk.)  compute the hint within the
+	 * file system and the iag.
+	 */
+
+	/* get the ag number of this iag */
+	agno = JFS_IP(pip)->agno;
+
+	if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+		/*
+		 * There is an open file actively growing.  We want to
+		 * allocate new inodes from a different ag to avoid
+		 * fragmentation problems.
+		 */
+		agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
+		AG_LOCK(imap, agno);
+		goto tryag;
+	}
+
+	inum = pip->i_ino + 1;
+	ino = inum & (INOSPERIAG - 1);
+
+	/* back off the the hint if it is outside of the iag */
+	if (ino == 0)
+		inum = pip->i_ino;
+
+	/* lock the AG inode map information */
+	AG_LOCK(imap, agno);
+
+	/* Get read lock on imap inode */
+	IREAD_LOCK(ipimap);
+
+	/* get the iag number and read the iag */
+	iagno = INOTOIAG(inum);
+	if ((rc = diIAGRead(imap, iagno, &mp))) {
+		IREAD_UNLOCK(ipimap);
+		AG_UNLOCK(imap, agno);
+		return (rc);
+	}
+	iagp = (struct iag *) mp->data;
+
+	/* determine if new inode extent is allowed to be added to the iag.
+	 * new inode extent can be added to the iag if the ag
+	 * has less than 32 free disk inodes and the iag has free extents.
+	 */
+	addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
+
+	/*
+	 *      try to allocate from the IAG
+	 */
+	/* check if the inode may be allocated from the iag 
+	 * (i.e. the inode has free inodes or new extent can be added).
+	 */
+	if (iagp->nfreeinos || addext) {
+		/* determine the extent number of the hint.
+		 */
+		extno = ino >> L2INOSPEREXT;
+
+		/* check if the extent containing the hint has backed
+		 * inodes.  if so, try to allocate within this extent.
+		 */
+		if (addressPXD(&iagp->inoext[extno])) {
+			bitno = ino & (INOSPEREXT - 1);
+			if ((bitno =
+			     diFindFree(le32_to_cpu(iagp->wmap[extno]),
+					bitno))
+			    < INOSPEREXT) {
+				ino = (extno << L2INOSPEREXT) + bitno;
+
+				/* a free inode (bit) was found within this
+				 * extent, so allocate it.
+				 */
+				rc = diAllocBit(imap, iagp, ino);
+				IREAD_UNLOCK(ipimap);
+				if (rc) {
+					assert(rc == -EIO);
+				} else {
+					/* set the results of the allocation
+					 * and write the iag.
+					 */
+					diInitInode(ip, iagno, ino, extno,
+						    iagp);
+					mark_metapage_dirty(mp);
+				}
+				release_metapage(mp);
+
+				/* free the AG lock and return.
+				 */
+				AG_UNLOCK(imap, agno);
+				return (rc);
+			}
+
+			if (!addext)
+				extno =
+				    (extno ==
+				     EXTSPERIAG - 1) ? 0 : extno + 1;
+		}
+
+		/*
+		 * no free inodes within the extent containing the hint.
+		 *
+		 * try to allocate from the backed extents following
+		 * hint or, if appropriate (i.e. addext is true), allocate
+		 * an extent of free inodes at or following the extent
+		 * containing the hint.
+		 * 
+		 * the free inode and free extent summary maps are used
+		 * here, so determine the starting summary map position
+		 * and the number of words we'll have to examine.  again,
+		 * the approach is to allocate following the hint, so we
+		 * might have to initially ignore prior bits of the summary
+		 * map that represent extents prior to the extent containing
+		 * the hint and later revisit these bits.
+		 */
+		bitno = extno & (EXTSPERSUM - 1);
+		nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1;
+		sword = extno >> L2EXTSPERSUM;
+
+		/* mask any prior bits for the starting words of the
+		 * summary map.
+		 */
+		mask = ONES << (EXTSPERSUM - bitno);
+		inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask;
+		extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask;
+
+		/* scan the free inode and free extent summary maps for
+		 * free resources.
+		 */
+		for (i = 0; i < nwords; i++) {
+			/* check if this word of the free inode summary
+			 * map describes an extent with free inodes.
+			 */
+			if (~inosmap) {
+				/* an extent with free inodes has been
+				 * found. determine the extent number
+				 * and the inode number within the extent.
+				 */
+				rem = diFindFree(inosmap, 0);
+				extno = (sword << L2EXTSPERSUM) + rem;
+				rem = diFindFree(le32_to_cpu(iagp->wmap[extno]),
+						 0);
+				if (rem >= INOSPEREXT) {
+					IREAD_UNLOCK(ipimap);
+					release_metapage(mp);
+					AG_UNLOCK(imap, agno);
+					jfs_error(ip->i_sb,
+						  "diAlloc: can't find free bit "
+						  "in wmap");
+					return EIO;
+				}
+
+				/* determine the inode number within the
+				 * iag and allocate the inode from the
+				 * map.
+				 */
+				ino = (extno << L2INOSPEREXT) + rem;
+				rc = diAllocBit(imap, iagp, ino);
+				IREAD_UNLOCK(ipimap);
+				if (rc)
+					assert(rc == -EIO);
+				else {
+					/* set the results of the allocation
+					 * and write the iag.
+					 */
+					diInitInode(ip, iagno, ino, extno,
+						    iagp);
+					mark_metapage_dirty(mp);
+				}
+				release_metapage(mp);
+
+				/* free the AG lock and return.
+				 */
+				AG_UNLOCK(imap, agno);
+				return (rc);
+
+			}
+
+			/* check if we may allocate an extent of free
+			 * inodes and whether this word of the free
+			 * extents summary map describes a free extent.
+			 */
+			if (addext && ~extsmap) {
+				/* a free extent has been found.  determine
+				 * the extent number.
+				 */
+				rem = diFindFree(extsmap, 0);
+				extno = (sword << L2EXTSPERSUM) + rem;
+
+				/* allocate an extent of free inodes.
+				 */
+				if ((rc = diNewExt(imap, iagp, extno))) {
+					/* if there is no disk space for a
+					 * new extent, try to allocate the
+					 * disk inode from somewhere else.
+					 */
+					if (rc == -ENOSPC)
+						break;
+
+					assert(rc == -EIO);
+				} else {
+					/* set the results of the allocation
+					 * and write the iag.
+					 */
+					diInitInode(ip, iagno,
+						    extno << L2INOSPEREXT,
+						    extno, iagp);
+					mark_metapage_dirty(mp);
+				}
+				release_metapage(mp);
+				/* free the imap inode & the AG lock & return.
+				 */
+				IREAD_UNLOCK(ipimap);
+				AG_UNLOCK(imap, agno);
+				return (rc);
+			}
+
+			/* move on to the next set of summary map words.
+			 */
+			sword = (sword == SMAPSZ - 1) ? 0 : sword + 1;
+			inosmap = le32_to_cpu(iagp->inosmap[sword]);
+			extsmap = le32_to_cpu(iagp->extsmap[sword]);
+		}
+	}
+	/* unlock imap inode */
+	IREAD_UNLOCK(ipimap);
+
+	/* nothing doing in this iag, so release it. */
+	release_metapage(mp);
+
+      tryag:
+	/*
+	 * try to allocate anywhere within the same AG as the parent inode.
+	 */
+	rc = diAllocAG(imap, agno, dir, ip);
+
+	AG_UNLOCK(imap, agno);
+
+	if (rc != -ENOSPC)
+		return (rc);
+
+	/*
+	 * try to allocate in any AG.
+	 */
+	return (diAllocAny(imap, agno, dir, ip));
+}
+
+
+/*
+ * NAME:        diAllocAG(imap,agno,dir,ip)
+ *
+ * FUNCTION:    allocate a disk inode from the allocation group.
+ *
+ *		this routine first determines if a new extent of free
+ *		inodes should be added for the allocation group, with
+ *		the current request satisfied from this extent. if this
+ *		is the case, an attempt will be made to do just that.  if
+ *		this attempt fails or it has been determined that a new 
+ *		extent should not be added, an attempt is made to satisfy
+ *		the request by allocating an existing (backed) free inode
+ *		from the allocation group.
+ *
+ * PRE CONDITION: Already have the AG lock for this AG.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      agno  	- allocation group to allocate from.
+ *      dir  	- TRUE if the new disk inode is for a directory.
+ *      ip  	- pointer to the new inode to be filled in on successful return
+ *		  with the disk inode number allocated, its extent address
+ *		  and the start of the ag.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int
+diAllocAG(struct inomap * imap, int agno, boolean_t dir, struct inode *ip)
+{
+	int rc, addext, numfree, numinos;
+
+	/* get the number of free and the number of backed disk 
+	 * inodes currently within the ag.
+	 */
+	numfree = imap->im_agctl[agno].numfree;
+	numinos = imap->im_agctl[agno].numinos;
+
+	if (numfree > numinos) {
+		jfs_error(ip->i_sb, "diAllocAG: numfree > numinos");
+		return -EIO;
+	}
+
+	/* determine if we should allocate a new extent of free inodes
+	 * within the ag: for directory inodes, add a new extent
+	 * if there are a small number of free inodes or number of free
+	 * inodes is a small percentage of the number of backed inodes.
+	 */
+	if (dir == TRUE)
+		addext = (numfree < 64 ||
+			  (numfree < 256
+			   && ((numfree * 100) / numinos) <= 20));
+	else
+		addext = (numfree == 0);
+
+	/*
+	 * try to allocate a new extent of free inodes.
+	 */
+	if (addext) {
+		/* if free space is not avaliable for this new extent, try
+		 * below to allocate a free and existing (already backed)
+		 * inode from the ag.
+		 */
+		if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC)
+			return (rc);
+	}
+
+	/*
+	 * try to allocate an existing free inode from the ag.
+	 */
+	return (diAllocIno(imap, agno, ip));
+}
+
+
+/*
+ * NAME:        diAllocAny(imap,agno,dir,iap)
+ *
+ * FUNCTION:    allocate a disk inode from any other allocation group.
+ *
+ *		this routine is called when an allocation attempt within
+ *		the primary allocation group has failed. if attempts to
+ *		allocate an inode from any allocation group other than the
+ *		specified primary group.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      agno  	- primary allocation group (to avoid).
+ *      dir  	- TRUE if the new disk inode is for a directory.
+ *      ip  	- pointer to a new inode to be filled in on successful return
+ *		  with the disk inode number allocated, its extent address
+ *		  and the start of the ag.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int
+diAllocAny(struct inomap * imap, int agno, boolean_t dir, struct inode *ip)
+{
+	int ag, rc;
+	int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag;
+
+
+	/* try to allocate from the ags following agno up to 
+	 * the maximum ag number.
+	 */
+	for (ag = agno + 1; ag <= maxag; ag++) {
+		AG_LOCK(imap, ag);
+
+		rc = diAllocAG(imap, ag, dir, ip);
+
+		AG_UNLOCK(imap, ag);
+
+		if (rc != -ENOSPC)
+			return (rc);
+	}
+
+	/* try to allocate from the ags in front of agno.
+	 */
+	for (ag = 0; ag < agno; ag++) {
+		AG_LOCK(imap, ag);
+
+		rc = diAllocAG(imap, ag, dir, ip);
+
+		AG_UNLOCK(imap, ag);
+
+		if (rc != -ENOSPC)
+			return (rc);
+	}
+
+	/* no free disk inodes.
+	 */
+	return -ENOSPC;
+}
+
+
+/*
+ * NAME:        diAllocIno(imap,agno,ip)
+ *
+ * FUNCTION:    allocate a disk inode from the allocation group's free
+ *		inode list, returning an error if this free list is
+ *		empty (i.e. no iags on the list).
+ *
+ *		allocation occurs from the first iag on the list using
+ *		the iag's free inode summary map to find the leftmost
+ *		free inode in the iag. 
+ *		
+ * PRE CONDITION: Already have AG lock for this AG.
+ *		
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      agno  	- allocation group.
+ *      ip  	- pointer to new inode to be filled in on successful return
+ *		  with the disk inode number allocated, its extent address
+ *		  and the start of the ag.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
+{
+	int iagno, ino, rc, rem, extno, sword;
+	struct metapage *mp;
+	struct iag *iagp;
+
+	/* check if there are iags on the ag's free inode list.
+	 */
+	if ((iagno = imap->im_agctl[agno].inofree) < 0)
+		return -ENOSPC;
+
+	/* obtain read lock on imap inode */
+	IREAD_LOCK(imap->im_ipimap);
+
+	/* read the iag at the head of the list.
+	 */
+	if ((rc = diIAGRead(imap, iagno, &mp))) {
+		IREAD_UNLOCK(imap->im_ipimap);
+		return (rc);
+	}
+	iagp = (struct iag *) mp->data;
+
+	/* better be free inodes in this iag if it is on the
+	 * list.
+	 */
+	if (!iagp->nfreeinos) {
+		IREAD_UNLOCK(imap->im_ipimap);
+		release_metapage(mp);
+		jfs_error(ip->i_sb,
+			  "diAllocIno: nfreeinos = 0, but iag on freelist");
+		return -EIO;
+	}
+
+	/* scan the free inode summary map to find an extent
+	 * with free inodes.
+	 */
+	for (sword = 0;; sword++) {
+		if (sword >= SMAPSZ) {
+			IREAD_UNLOCK(imap->im_ipimap);
+			release_metapage(mp);
+			jfs_error(ip->i_sb,
+				  "diAllocIno: free inode not found in summary map");
+			return -EIO;
+		}
+
+		if (~iagp->inosmap[sword])
+			break;
+	}
+
+	/* found a extent with free inodes. determine
+	 * the extent number.
+	 */
+	rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0);
+	if (rem >= EXTSPERSUM) {
+		IREAD_UNLOCK(imap->im_ipimap);
+		release_metapage(mp);
+		jfs_error(ip->i_sb, "diAllocIno: no free extent found");
+		return -EIO;
+	}
+	extno = (sword << L2EXTSPERSUM) + rem;
+
+	/* find the first free inode in the extent.
+	 */
+	rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0);
+	if (rem >= INOSPEREXT) {
+		IREAD_UNLOCK(imap->im_ipimap);
+		release_metapage(mp);
+		jfs_error(ip->i_sb, "diAllocIno: free inode not found");
+		return -EIO;
+	}
+
+	/* compute the inode number within the iag. 
+	 */
+	ino = (extno << L2INOSPEREXT) + rem;
+
+	/* allocate the inode.
+	 */
+	rc = diAllocBit(imap, iagp, ino);
+	IREAD_UNLOCK(imap->im_ipimap);
+	if (rc) {
+		release_metapage(mp);
+		return (rc);
+	}
+
+	/* set the results of the allocation and write the iag.
+	 */
+	diInitInode(ip, iagno, ino, extno, iagp);
+	write_metapage(mp);
+
+	return (0);
+}
+
+
+/*
+ * NAME:        diAllocExt(imap,agno,ip)
+ *
+ * FUNCTION:   	add a new extent of free inodes to an iag, allocating
+ *	       	an inode from this extent to satisfy the current allocation
+ *	       	request.
+ *		
+ *		this routine first tries to find an existing iag with free
+ *		extents through the ag free extent list.  if list is not
+ *		empty, the head of the list will be selected as the home
+ *		of the new extent of free inodes.  otherwise (the list is
+ *		empty), a new iag will be allocated for the ag to contain
+ *		the extent.
+ *		
+ *		once an iag has been selected, the free extent summary map
+ *		is used to locate a free extent within the iag and diNewExt()
+ *		is called to initialize the extent, with initialization
+ *		including the allocation of the first inode of the extent
+ *		for the purpose of satisfying this request.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      agno  	- allocation group number.
+ *      ip  	- pointer to new inode to be filled in on successful return
+ *		  with the disk inode number allocated, its extent address
+ *		  and the start of the ag.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
+{
+	int rem, iagno, sword, extno, rc;
+	struct metapage *mp;
+	struct iag *iagp;
+
+	/* check if the ag has any iags with free extents.  if not,
+	 * allocate a new iag for the ag.
+	 */
+	if ((iagno = imap->im_agctl[agno].extfree) < 0) {
+		/* If successful, diNewIAG will obtain the read lock on the
+		 * imap inode.
+		 */
+		if ((rc = diNewIAG(imap, &iagno, agno, &mp))) {
+			return (rc);
+		}
+		iagp = (struct iag *) mp->data;
+
+		/* set the ag number if this a brand new iag
+		 */
+		iagp->agstart =
+		    cpu_to_le64(AGTOBLK(agno, imap->im_ipimap));
+	} else {
+		/* read the iag.
+		 */
+		IREAD_LOCK(imap->im_ipimap);
+		if ((rc = diIAGRead(imap, iagno, &mp))) {
+			IREAD_UNLOCK(imap->im_ipimap);
+			jfs_error(ip->i_sb, "diAllocExt: error reading iag");
+			return rc;
+		}
+		iagp = (struct iag *) mp->data;
+	}
+
+	/* using the free extent summary map, find a free extent.
+	 */
+	for (sword = 0;; sword++) {
+		if (sword >= SMAPSZ) {
+			release_metapage(mp);
+			IREAD_UNLOCK(imap->im_ipimap);
+			jfs_error(ip->i_sb,
+				  "diAllocExt: free ext summary map not found");
+			return -EIO;
+		}
+		if (~iagp->extsmap[sword])
+			break;
+	}
+
+	/* determine the extent number of the free extent.
+	 */
+	rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0);
+	if (rem >= EXTSPERSUM) {
+		release_metapage(mp);
+		IREAD_UNLOCK(imap->im_ipimap);
+		jfs_error(ip->i_sb, "diAllocExt: free extent not found");
+		return -EIO;
+	}
+	extno = (sword << L2EXTSPERSUM) + rem;
+
+	/* initialize the new extent.
+	 */
+	rc = diNewExt(imap, iagp, extno);
+	IREAD_UNLOCK(imap->im_ipimap);
+	if (rc) {
+		/* something bad happened.  if a new iag was allocated,
+		 * place it back on the inode map's iag free list, and
+		 * clear the ag number information.
+		 */
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
+			IAGFREE_LOCK(imap);
+			iagp->iagfree = cpu_to_le32(imap->im_freeiag);
+			imap->im_freeiag = iagno;
+			IAGFREE_UNLOCK(imap);
+		}
+		write_metapage(mp);
+		return (rc);
+	}
+
+	/* set the results of the allocation and write the iag.
+	 */
+	diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp);
+
+	write_metapage(mp);
+
+	return (0);
+}
+
+
+/*
+ * NAME:        diAllocBit(imap,iagp,ino)
+ *
+ * FUNCTION:   	allocate a backed inode from an iag.
+ *
+ *		this routine performs the mechanics of allocating a
+ *		specified inode from a backed extent.
+ *
+ *		if the inode to be allocated represents the last free
+ *		inode within the iag, the iag will be removed from the
+ *		ag free inode list.
+ *
+ *		a careful update approach is used to provide consistency
+ *		in the face of updates to multiple buffers.  under this
+ *		approach, all required buffers are obtained before making
+ *		any updates and are held all are updates are complete.
+ *		
+ * PRE CONDITION: Already have buffer lock on iagp.  Already have AG lock on
+ *	this AG.  Must have read lock on imap inode.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      iagp  	- pointer to iag. 
+ *      ino   	- inode number to be allocated within the iag.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
+{
+	int extno, bitno, agno, sword, rc;
+	struct metapage *amp = NULL, *bmp = NULL;
+	struct iag *aiagp = NULL, *biagp = NULL;
+	u32 mask;
+
+	/* check if this is the last free inode within the iag.
+	 * if so, it will have to be removed from the ag free
+	 * inode list, so get the iags preceeding and following
+	 * it on the list.
+	 */
+	if (iagp->nfreeinos == cpu_to_le32(1)) {
+		if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) {
+			if ((rc =
+			     diIAGRead(imap, le32_to_cpu(iagp->inofreefwd),
+				       &amp)))
+				return (rc);
+			aiagp = (struct iag *) amp->data;
+		}
+
+		if ((int) le32_to_cpu(iagp->inofreeback) >= 0) {
+			if ((rc =
+			     diIAGRead(imap,
+				       le32_to_cpu(iagp->inofreeback),
+				       &bmp))) {
+				if (amp)
+					release_metapage(amp);
+				return (rc);
+			}
+			biagp = (struct iag *) bmp->data;
+		}
+	}
+
+	/* get the ag number, extent number, inode number within
+	 * the extent.
+	 */
+	agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb));
+	extno = ino >> L2INOSPEREXT;
+	bitno = ino & (INOSPEREXT - 1);
+
+	/* compute the mask for setting the map.
+	 */
+	mask = HIGHORDER >> bitno;
+
+	/* the inode should be free and backed.
+	 */
+	if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) ||
+	    ((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) ||
+	    (addressPXD(&iagp->inoext[extno]) == 0)) {
+		if (amp)
+			release_metapage(amp);
+		if (bmp)
+			release_metapage(bmp);
+
+		jfs_error(imap->im_ipimap->i_sb,
+			  "diAllocBit: iag inconsistent");
+		return -EIO;
+	}
+
+	/* mark the inode as allocated in the working map.
+	 */
+	iagp->wmap[extno] |= cpu_to_le32(mask);
+
+	/* check if all inodes within the extent are now
+	 * allocated.  if so, update the free inode summary
+	 * map to reflect this.
+	 */
+	if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
+		sword = extno >> L2EXTSPERSUM;
+		bitno = extno & (EXTSPERSUM - 1);
+		iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno);
+	}
+
+	/* if this was the last free inode in the iag, remove the
+	 * iag from the ag free inode list.
+	 */
+	if (iagp->nfreeinos == cpu_to_le32(1)) {
+		if (amp) {
+			aiagp->inofreeback = iagp->inofreeback;
+			write_metapage(amp);
+		}
+
+		if (bmp) {
+			biagp->inofreefwd = iagp->inofreefwd;
+			write_metapage(bmp);
+		} else {
+			imap->im_agctl[agno].inofree =
+			    le32_to_cpu(iagp->inofreefwd);
+		}
+		iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
+	}
+
+	/* update the free inode count at the iag, ag, inode
+	 * map levels.
+	 */
+	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) - 1);
+	imap->im_agctl[agno].numfree -= 1;
+	atomic_dec(&imap->im_numfree);
+
+	return (0);
+}
+
+
+/*
+ * NAME:        diNewExt(imap,iagp,extno)
+ *
+ * FUNCTION:    initialize a new extent of inodes for an iag, allocating
+ *	        the first inode of the extent for use for the current
+ *	        allocation request.
+ *
+ *		disk resources are allocated for the new extent of inodes
+ *		and the inodes themselves are initialized to reflect their
+ *		existence within the extent (i.e. their inode numbers and
+ *		inode extent addresses are set) and their initial state
+ *		(mode and link count are set to zero).
+ *
+ *		if the iag is new, it is not yet on an ag extent free list
+ *		but will now be placed on this list.
+ *
+ *		if the allocation of the new extent causes the iag to
+ *		have no free extent, the iag will be removed from the
+ *		ag extent free list.
+ *
+ *		if the iag has no free backed inodes, it will be placed
+ *		on the ag free inode list, since the addition of the new
+ *		extent will now cause it to have free inodes.
+ *
+ *		a careful update approach is used to provide consistency
+ *		(i.e. list consistency) in the face of updates to multiple
+ *		buffers.  under this approach, all required buffers are
+ *		obtained before making any updates and are held until all
+ *		updates are complete.
+ *		
+ * PRE CONDITION: Already have buffer lock on iagp.  Already have AG lock on
+ *	this AG.  Must have read lock on imap inode.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      iagp  	- pointer to iag. 
+ *      extno  	- extent number.
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ */
+static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
+{
+	int agno, iagno, fwd, back, freei = 0, sword, rc;
+	struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL;
+	struct metapage *amp, *bmp, *cmp, *dmp;
+	struct inode *ipimap;
+	s64 blkno, hint;
+	int i, j;
+	u32 mask;
+	ino_t ino;
+	struct dinode *dp;
+	struct jfs_sb_info *sbi;
+
+	/* better have free extents.
+	 */
+	if (!iagp->nfreeexts) {
+		jfs_error(imap->im_ipimap->i_sb, "diNewExt: no free extents");
+		return -EIO;
+	}
+
+	/* get the inode map inode.
+	 */
+	ipimap = imap->im_ipimap;
+	sbi = JFS_SBI(ipimap->i_sb);
+
+	amp = bmp = cmp = NULL;
+
+	/* get the ag and iag numbers for this iag.
+	 */
+	agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+	iagno = le32_to_cpu(iagp->iagnum);
+
+	/* check if this is the last free extent within the
+	 * iag.  if so, the iag must be removed from the ag
+	 * free extent list, so get the iags preceeding and
+	 * following the iag on this list.
+	 */
+	if (iagp->nfreeexts == cpu_to_le32(1)) {
+		if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
+			if ((rc = diIAGRead(imap, fwd, &amp)))
+				return (rc);
+			aiagp = (struct iag *) amp->data;
+		}
+
+		if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
+			if ((rc = diIAGRead(imap, back, &bmp)))
+				goto error_out;
+			biagp = (struct iag *) bmp->data;
+		}
+	} else {
+		/* the iag has free extents.  if all extents are free
+		 * (as is the case for a newly allocated iag), the iag
+		 * must be added to the ag free extent list, so get
+		 * the iag at the head of the list in preparation for
+		 * adding this iag to this list.
+		 */
+		fwd = back = -1;
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
+			if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
+				if ((rc = diIAGRead(imap, fwd, &amp)))
+					goto error_out;
+				aiagp = (struct iag *) amp->data;
+			}
+		}
+	}
+
+	/* check if the iag has no free inodes.  if so, the iag
+	 * will have to be added to the ag free inode list, so get
+	 * the iag at the head of the list in preparation for
+	 * adding this iag to this list.  in doing this, we must
+	 * check if we already have the iag at the head of
+	 * the list in hand.
+	 */
+	if (iagp->nfreeinos == 0) {
+		freei = imap->im_agctl[agno].inofree;
+
+		if (freei >= 0) {
+			if (freei == fwd) {
+				ciagp = aiagp;
+			} else if (freei == back) {
+				ciagp = biagp;
+			} else {
+				if ((rc = diIAGRead(imap, freei, &cmp)))
+					goto error_out;
+				ciagp = (struct iag *) cmp->data;
+			}
+			if (ciagp == NULL) {
+				jfs_error(imap->im_ipimap->i_sb,
+					  "diNewExt: ciagp == NULL");
+				rc = -EIO;
+				goto error_out;
+			}
+		}
+	}
+
+	/* allocate disk space for the inode extent.
+	 */
+	if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0))
+		hint = ((s64) agno << sbi->bmap->db_agl2size) - 1;
+	else
+		hint = addressPXD(&iagp->inoext[extno - 1]) +
+		    lengthPXD(&iagp->inoext[extno - 1]) - 1;
+
+	if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno)))
+		goto error_out;
+
+	/* compute the inode number of the first inode within the
+	 * extent.
+	 */
+	ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT);
+
+	/* initialize the inodes within the newly allocated extent a
+	 * page at a time.
+	 */
+	for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) {
+		/* get a buffer for this page of disk inodes.
+		 */
+		dmp = get_metapage(ipimap, blkno + i, PSIZE, 1);
+		if (dmp == NULL) {
+			rc = -EIO;
+			goto error_out;
+		}
+		dp = (struct dinode *) dmp->data;
+
+		/* initialize the inode number, mode, link count and
+		 * inode extent address.
+		 */
+		for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) {
+			dp->di_inostamp = cpu_to_le32(sbi->inostamp);
+			dp->di_number = cpu_to_le32(ino);
+			dp->di_fileset = cpu_to_le32(FILESYSTEM_I);
+			dp->di_mode = 0;
+			dp->di_nlink = 0;
+			PXDaddress(&(dp->di_ixpxd), blkno);
+			PXDlength(&(dp->di_ixpxd), imap->im_nbperiext);
+		}
+		write_metapage(dmp);
+	}
+
+	/* if this is the last free extent within the iag, remove the
+	 * iag from the ag free extent list.
+	 */
+	if (iagp->nfreeexts == cpu_to_le32(1)) {
+		if (fwd >= 0)
+			aiagp->extfreeback = iagp->extfreeback;
+
+		if (back >= 0)
+			biagp->extfreefwd = iagp->extfreefwd;
+		else
+			imap->im_agctl[agno].extfree =
+			    le32_to_cpu(iagp->extfreefwd);
+
+		iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
+	} else {
+		/* if the iag has all free extents (newly allocated iag),
+		 * add the iag to the ag free extent list.
+		 */
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
+			if (fwd >= 0)
+				aiagp->extfreeback = cpu_to_le32(iagno);
+
+			iagp->extfreefwd = cpu_to_le32(fwd);
+			iagp->extfreeback = cpu_to_le32(-1);
+			imap->im_agctl[agno].extfree = iagno;
+		}
+	}
+
+	/* if the iag has no free inodes, add the iag to the
+	 * ag free inode list.
+	 */
+	if (iagp->nfreeinos == 0) {
+		if (freei >= 0)
+			ciagp->inofreeback = cpu_to_le32(iagno);
+
+		iagp->inofreefwd =
+		    cpu_to_le32(imap->im_agctl[agno].inofree);
+		iagp->inofreeback = cpu_to_le32(-1);
+		imap->im_agctl[agno].inofree = iagno;
+	}
+
+	/* initialize the extent descriptor of the extent. */
+	PXDlength(&iagp->inoext[extno], imap->im_nbperiext);
+	PXDaddress(&iagp->inoext[extno], blkno);
+
+	/* initialize the working and persistent map of the extent.
+	 * the working map will be initialized such that
+	 * it indicates the first inode of the extent is allocated.
+	 */
+	iagp->wmap[extno] = cpu_to_le32(HIGHORDER);
+	iagp->pmap[extno] = 0;
+
+	/* update the free inode and free extent summary maps
+	 * for the extent to indicate the extent has free inodes
+	 * and no longer represents a free extent.
+	 */
+	sword = extno >> L2EXTSPERSUM;
+	mask = HIGHORDER >> (extno & (EXTSPERSUM - 1));
+	iagp->extsmap[sword] |= cpu_to_le32(mask);
+	iagp->inosmap[sword] &= cpu_to_le32(~mask);
+
+	/* update the free inode and free extent counts for the
+	 * iag.
+	 */
+	iagp->nfreeinos = cpu_to_le32(le32_to_cpu(iagp->nfreeinos) +
+				      (INOSPEREXT - 1));
+	iagp->nfreeexts = cpu_to_le32(le32_to_cpu(iagp->nfreeexts) - 1);
+
+	/* update the free and backed inode counts for the ag.
+	 */
+	imap->im_agctl[agno].numfree += (INOSPEREXT - 1);
+	imap->im_agctl[agno].numinos += INOSPEREXT;
+
+	/* update the free and backed inode counts for the inode map.
+	 */
+	atomic_add(INOSPEREXT - 1, &imap->im_numfree);
+	atomic_add(INOSPEREXT, &imap->im_numinos);
+
+	/* write the iags.
+	 */
+	if (amp)
+		write_metapage(amp);
+	if (bmp)
+		write_metapage(bmp);
+	if (cmp)
+		write_metapage(cmp);
+
+	return (0);
+
+      error_out:
+
+	/* release the iags.
+	 */
+	if (amp)
+		release_metapage(amp);
+	if (bmp)
+		release_metapage(bmp);
+	if (cmp)
+		release_metapage(cmp);
+
+	return (rc);
+}
+
+
+/*
+ * NAME:        diNewIAG(imap,iagnop,agno)
+ *
+ * FUNCTION:   	allocate a new iag for an allocation group.
+ *		
+ *		first tries to allocate the iag from the inode map 
+ *		iagfree list:  
+ *		if the list has free iags, the head of the list is removed 
+ *		and returned to satisfy the request.
+ *		if the inode map's iag free list is empty, the inode map
+ *		is extended to hold a new iag. this new iag is initialized
+ *		and returned to satisfy the request.
+ *
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      iagnop 	- pointer to an iag number set with the number of the
+ *		  newly allocated iag upon successful return.
+ *      agno  	- allocation group number.
+ *	bpp	- Buffer pointer to be filled in with new IAG's buffer
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -ENOSPC	- insufficient disk resources.
+ *      -EIO  	- i/o error.
+ *
+ * serialization: 
+ *	AG lock held on entry/exit;
+ *	write lock on the map is held inside;
+ *	read lock on the map is held on successful completion;
+ *
+ * note: new iag transaction: 
+ * . synchronously write iag;
+ * . write log of xtree and inode  of imap;
+ * . commit;
+ * . synchronous write of xtree (right to left, bottom to top);
+ * . at start of logredo(): init in-memory imap with one additional iag page;
+ * . at end of logredo(): re-read imap inode to determine
+ *   new imap size;
+ */
+static int
+diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
+{
+	int rc;
+	int iagno, i, xlen;
+	struct inode *ipimap;
+	struct super_block *sb;
+	struct jfs_sb_info *sbi;
+	struct metapage *mp;
+	struct iag *iagp;
+	s64 xaddr = 0;
+	s64 blkno;
+	tid_t tid;
+#ifdef _STILL_TO_PORT
+	xad_t xad;
+#endif				/*  _STILL_TO_PORT */
+	struct inode *iplist[1];
+
+	/* pick up pointers to the inode map and mount inodes */
+	ipimap = imap->im_ipimap;
+	sb = ipimap->i_sb;
+	sbi = JFS_SBI(sb);
+
+	/* acquire the free iag lock */
+	IAGFREE_LOCK(imap);
+
+	/* if there are any iags on the inode map free iag list, 
+	 * allocate the iag from the head of the list.
+	 */
+	if (imap->im_freeiag >= 0) {
+		/* pick up the iag number at the head of the list */
+		iagno = imap->im_freeiag;
+
+		/* determine the logical block number of the iag */
+		blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
+	} else {
+		/* no free iags. the inode map will have to be extented
+		 * to include a new iag.
+		 */
+
+		/* acquire inode map lock */
+		IWRITE_LOCK(ipimap);
+
+		if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
+			IWRITE_UNLOCK(ipimap);
+			IAGFREE_UNLOCK(imap);
+			jfs_error(imap->im_ipimap->i_sb,
+				  "diNewIAG: ipimap->i_size is wrong");
+			return -EIO;
+		}
+
+
+		/* get the next avaliable iag number */
+		iagno = imap->im_nextiag;
+
+		/* make sure that we have not exceeded the maximum inode
+		 * number limit.
+		 */
+		if (iagno > (MAXIAGS - 1)) {
+			/* release the inode map lock */
+			IWRITE_UNLOCK(ipimap);
+
+			rc = -ENOSPC;
+			goto out;
+		}
+
+		/*
+		 * synchronously append new iag page.
+		 */
+		/* determine the logical address of iag page to append */
+		blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
+
+		/* Allocate extent for new iag page */
+		xlen = sbi->nbperpage;
+		if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) {
+			/* release the inode map lock */
+			IWRITE_UNLOCK(ipimap);
+
+			goto out;
+		}
+
+		/* assign a buffer for the page */
+		mp = get_metapage(ipimap, xaddr, PSIZE, 1);
+		if (!mp) {
+			/* Free the blocks allocated for the iag since it was
+			 * not successfully added to the inode map
+			 */
+			dbFree(ipimap, xaddr, (s64) xlen);
+
+			/* release the inode map lock */
+			IWRITE_UNLOCK(ipimap);
+
+			rc = -EIO;
+			goto out;
+		}
+		iagp = (struct iag *) mp->data;
+
+		/* init the iag */
+		memset(iagp, 0, sizeof(struct iag));
+		iagp->iagnum = cpu_to_le32(iagno);
+		iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
+		iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
+		iagp->iagfree = cpu_to_le32(-1);
+		iagp->nfreeinos = 0;
+		iagp->nfreeexts = cpu_to_le32(EXTSPERIAG);
+
+		/* initialize the free inode summary map (free extent
+		 * summary map initialization handled by bzero).
+		 */
+		for (i = 0; i < SMAPSZ; i++)
+			iagp->inosmap[i] = cpu_to_le32(ONES);
+
+		/*
+		 * Invalidate the page after writing and syncing it.
+		 * After it's initialized, we access it in a different
+		 * address space
+		 */
+		set_bit(META_discard, &mp->flag);
+		flush_metapage(mp);
+
+		/*
+		 * start tyransaction of update of the inode map
+		 * addressing structure pointing to the new iag page;
+		 */
+		tid = txBegin(sb, COMMIT_FORCE);
+		down(&JFS_IP(ipimap)->commit_sem);
+
+		/* update the inode map addressing structure to point to it */
+		if ((rc =
+		     xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
+			txEnd(tid);
+			up(&JFS_IP(ipimap)->commit_sem);
+			/* Free the blocks allocated for the iag since it was
+			 * not successfully added to the inode map
+			 */
+			dbFree(ipimap, xaddr, (s64) xlen);
+
+			/* release the inode map lock */
+			IWRITE_UNLOCK(ipimap);
+
+			goto out;
+		}
+
+		/* update the inode map's inode to reflect the extension */
+		ipimap->i_size += PSIZE;
+		inode_add_bytes(ipimap, PSIZE);
+
+		/*
+		 * txCommit(COMMIT_FORCE) will synchronously write address 
+		 * index pages and inode after commit in careful update order 
+		 * of address index pages (right to left, bottom up);
+		 */
+		iplist[0] = ipimap;
+		rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
+
+		txEnd(tid);
+		up(&JFS_IP(ipimap)->commit_sem);
+
+		duplicateIXtree(sb, blkno, xlen, &xaddr);
+
+		/* update the next avaliable iag number */
+		imap->im_nextiag += 1;
+
+		/* Add the iag to the iag free list so we don't lose the iag
+		 * if a failure happens now.
+		 */
+		imap->im_freeiag = iagno;
+
+		/* Until we have logredo working, we want the imap inode &
+		 * control page to be up to date.
+		 */
+		diSync(ipimap);
+
+		/* release the inode map lock */
+		IWRITE_UNLOCK(ipimap);
+	}
+
+	/* obtain read lock on map */
+	IREAD_LOCK(ipimap);
+
+	/* read the iag */
+	if ((rc = diIAGRead(imap, iagno, &mp))) {
+		IREAD_UNLOCK(ipimap);
+		rc = -EIO;
+		goto out;
+	}
+	iagp = (struct iag *) mp->data;
+
+	/* remove the iag from the iag free list */
+	imap->im_freeiag = le32_to_cpu(iagp->iagfree);
+	iagp->iagfree = cpu_to_le32(-1);
+
+	/* set the return iag number and buffer pointer */
+	*iagnop = iagno;
+	*mpp = mp;
+
+      out:
+	/* release the iag free lock */
+	IAGFREE_UNLOCK(imap);
+
+	return (rc);
+}
+
+/*
+ * NAME:        diIAGRead()
+ *
+ * FUNCTION:    get the buffer for the specified iag within a fileset
+ *		or aggregate inode map.
+ *		
+ * PARAMETERS:
+ *      imap  	- pointer to inode map control structure.
+ *      iagno  	- iag number.
+ *      bpp  	- point to buffer pointer to be filled in on successful
+ *		  exit.
+ *
+ * SERIALIZATION:
+ *	must have read lock on imap inode
+ *	(When called by diExtendFS, the filesystem is quiesced, therefore
+ *	 the read lock is unnecessary.)
+ *
+ * RETURN VALUES:
+ *      0       - success.
+ *      -EIO  	- i/o error.
+ */
+static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
+{
+	struct inode *ipimap = imap->im_ipimap;
+	s64 blkno;
+
+	/* compute the logical block number of the iag. */
+	blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage);
+
+	/* read the iag. */
+	*mpp = read_metapage(ipimap, blkno, PSIZE, 0);
+	if (*mpp == NULL) {
+		return -EIO;
+	}
+
+	return (0);
+}
+
+/*
+ * NAME:        diFindFree()
+ *
+ * FUNCTION:    find the first free bit in a word starting at
+ *		the specified bit position.
+ *
+ * PARAMETERS:
+ *      word  	- word to be examined.
+ *      start  	- starting bit position.
+ *
+ * RETURN VALUES:
+ *      bit position of first free bit in the word or 32 if
+ *	no free bits were found.
+ */
+static int diFindFree(u32 word, int start)
+{
+	int bitno;
+	assert(start < 32);
+	/* scan the word for the first free bit. */
+	for (word <<= start, bitno = start; bitno < 32;
+	     bitno++, word <<= 1) {
+		if ((word & HIGHORDER) == 0)
+			break;
+	}
+	return (bitno);
+}
+
+/*
+ * NAME:	diUpdatePMap()
+ *                                                                    
+ * FUNCTION: Update the persistent map in an IAG for the allocation or 
+ *	freeing of the specified inode.
+ *                                                                    
+ * PRE CONDITIONS: Working map has already been updated for allocate.
+ *
+ * PARAMETERS:
+ *	ipimap	- Incore inode map inode
+ *	inum	- Number of inode to mark in permanent map
+ *	is_free	- If TRUE indicates inode should be marked freed, otherwise
+ *		  indicates inode should be marked allocated.
+ *
+ * RETURN VALUES: 
+ *		0 for success
+ */
+int
+diUpdatePMap(struct inode *ipimap,
+	     unsigned long inum, boolean_t is_free, struct tblock * tblk)
+{
+	int rc;
+	struct iag *iagp;
+	struct metapage *mp;
+	int iagno, ino, extno, bitno;
+	struct inomap *imap;
+	u32 mask;
+	struct jfs_log *log;
+	int lsn, difft, diffp;
+
+	imap = JFS_IP(ipimap)->i_imap;
+	/* get the iag number containing the inode */
+	iagno = INOTOIAG(inum);
+	/* make sure that the iag is contained within the map */
+	if (iagno >= imap->im_nextiag) {
+		jfs_error(ipimap->i_sb,
+			  "diUpdatePMap: the iag is outside the map");
+		return -EIO;
+	}
+	/* read the iag */
+	IREAD_LOCK(ipimap);
+	rc = diIAGRead(imap, iagno, &mp);
+	IREAD_UNLOCK(ipimap);
+	if (rc)
+		return (rc);
+	iagp = (struct iag *) mp->data;
+	/* get the inode number and extent number of the inode within
+	 * the iag and the inode number within the extent.
+	 */
+	ino = inum & (INOSPERIAG - 1);
+	extno = ino >> L2INOSPEREXT;
+	bitno = ino & (INOSPEREXT - 1);
+	mask = HIGHORDER >> bitno;
+	/* 
+	 * mark the inode free in persistent map:
+	 */
+	if (is_free == TRUE) {
+		/* The inode should have been allocated both in working
+		 * map and in persistent map;
+		 * the inode will be freed from working map at the release
+		 * of last reference release;
+		 */
+		if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
+			jfs_error(ipimap->i_sb, 
+				  "diUpdatePMap: inode %ld not marked as "
+				  "allocated in wmap!", inum);
+		}
+		if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) {
+			jfs_error(ipimap->i_sb,
+				  "diUpdatePMap: inode %ld not marked as "
+				  "allocated in pmap!", inum);
+		}
+		/* update the bitmap for the extent of the freed inode */
+		iagp->pmap[extno] &= cpu_to_le32(~mask);
+	}
+	/*
+	 * mark the inode allocated in persistent map:
+	 */
+	else {
+		/* The inode should be already allocated in the working map
+		 * and should be free in persistent map;
+		 */
+		if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
+			release_metapage(mp);
+			jfs_error(ipimap->i_sb,
+				  "diUpdatePMap: the inode is not allocated in "
+				  "the working map");
+			return -EIO;
+		}
+		if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) {
+			release_metapage(mp);
+			jfs_error(ipimap->i_sb,
+				  "diUpdatePMap: the inode is not free in the "
+				  "persistent map");
+			return -EIO;
+		}
+		/* update the bitmap for the extent of the allocated inode */
+		iagp->pmap[extno] |= cpu_to_le32(mask);
+	}
+	/*
+	 * update iag lsn
+	 */
+	lsn = tblk->lsn;
+	log = JFS_SBI(tblk->sb)->log;
+	if (mp->lsn != 0) {
+		/* inherit older/smaller lsn */
+		logdiff(difft, lsn, log);
+		logdiff(diffp, mp->lsn, log);
+		if (difft < diffp) {
+			mp->lsn = lsn;
+			/* move mp after tblock in logsync list */
+			LOGSYNC_LOCK(log);
+			list_move(&mp->synclist, &tblk->synclist);
+			LOGSYNC_UNLOCK(log);
+		}
+		/* inherit younger/larger clsn */
+		LOGSYNC_LOCK(log);
+		assert(mp->clsn);
+		logdiff(difft, tblk->clsn, log);
+		logdiff(diffp, mp->clsn, log);
+		if (difft > diffp)
+			mp->clsn = tblk->clsn;
+		LOGSYNC_UNLOCK(log);
+	} else {
+		mp->log = log;
+		mp->lsn = lsn;
+		/* insert mp after tblock in logsync list */
+		LOGSYNC_LOCK(log);
+		log->count++;
+		list_add(&mp->synclist, &tblk->synclist);
+		mp->clsn = tblk->clsn;
+		LOGSYNC_UNLOCK(log);
+	}
+	write_metapage(mp);
+	return (0);
+}
+
+/*
+ *	diExtendFS()
+ *
+ * function: update imap for extendfs();
+ * 
+ * note: AG size has been increased s.t. each k old contiguous AGs are 
+ * coalesced into a new AG;
+ */
+int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
+{
+	int rc, rcx = 0;
+	struct inomap *imap = JFS_IP(ipimap)->i_imap;
+	struct iag *iagp = NULL, *hiagp = NULL;
+	struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap;
+	struct metapage *bp, *hbp;
+	int i, n, head;
+	int numinos, xnuminos = 0, xnumfree = 0;
+	s64 agstart;
+
+	jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d",
+		   imap->im_nextiag, atomic_read(&imap->im_numinos),
+		   atomic_read(&imap->im_numfree));
+
+	/*
+	 *      reconstruct imap 
+	 *
+	 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
+	 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
+	 * note: new AG size = old AG size * (2**x).
+	 */
+
+	/* init per AG control information im_agctl[] */
+	for (i = 0; i < MAXAG; i++) {
+		imap->im_agctl[i].inofree = -1;
+		imap->im_agctl[i].extfree = -1;
+		imap->im_agctl[i].numinos = 0;	/* number of backed inodes */
+		imap->im_agctl[i].numfree = 0;	/* number of free backed inodes */
+	}
+
+	/*
+	 *      process each iag page of the map.
+	 *
+	 * rebuild AG Free Inode List, AG Free Inode Extent List;
+	 */
+	for (i = 0; i < imap->im_nextiag; i++) {
+		if ((rc = diIAGRead(imap, i, &bp))) {
+			rcx = rc;
+			continue;
+		}
+		iagp = (struct iag *) bp->data;
+		if (le32_to_cpu(iagp->iagnum) != i) {
+			release_metapage(bp);
+			jfs_error(ipimap->i_sb,
+				  "diExtendFs: unexpected value of iagnum");
+			return -EIO;
+		}
+
+		/* leave free iag in the free iag list */
+		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {  
+		        release_metapage(bp);
+			continue;
+		}
+
+		/* agstart that computes to the same ag is treated as same; */
+		agstart = le64_to_cpu(iagp->agstart);
+		/* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
+		n = agstart >> mp->db_agl2size;
+
+		/* compute backed inodes */
+		numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
+		    << L2INOSPEREXT;
+		if (numinos > 0) {
+			/* merge AG backed inodes */
+			imap->im_agctl[n].numinos += numinos;
+			xnuminos += numinos;
+		}
+
+		/* if any backed free inodes, insert at AG free inode list */
+		if ((int) le32_to_cpu(iagp->nfreeinos) > 0) {
+			if ((head = imap->im_agctl[n].inofree) == -1) {
+				iagp->inofreefwd = cpu_to_le32(-1);
+				iagp->inofreeback = cpu_to_le32(-1);
+			} else {
+				if ((rc = diIAGRead(imap, head, &hbp))) {
+					rcx = rc;
+					goto nextiag;
+				}
+				hiagp = (struct iag *) hbp->data;
+				hiagp->inofreeback = iagp->iagnum;
+				iagp->inofreefwd = cpu_to_le32(head);
+				iagp->inofreeback = cpu_to_le32(-1);
+				write_metapage(hbp);
+			}
+
+			imap->im_agctl[n].inofree =
+			    le32_to_cpu(iagp->iagnum);
+
+			/* merge AG backed free inodes */
+			imap->im_agctl[n].numfree +=
+			    le32_to_cpu(iagp->nfreeinos);
+			xnumfree += le32_to_cpu(iagp->nfreeinos);
+		}
+
+		/* if any free extents, insert at AG free extent list */
+		if (le32_to_cpu(iagp->nfreeexts) > 0) {
+			if ((head = imap->im_agctl[n].extfree) == -1) {
+				iagp->extfreefwd = cpu_to_le32(-1);
+				iagp->extfreeback = cpu_to_le32(-1);
+			} else {
+				if ((rc = diIAGRead(imap, head, &hbp))) {
+					rcx = rc;
+					goto nextiag;
+				}
+				hiagp = (struct iag *) hbp->data;
+				hiagp->extfreeback = iagp->iagnum;
+				iagp->extfreefwd = cpu_to_le32(head);
+				iagp->extfreeback = cpu_to_le32(-1);
+				write_metapage(hbp);
+			}
+
+			imap->im_agctl[n].extfree =
+			    le32_to_cpu(iagp->iagnum);
+		}
+
+	      nextiag:
+		write_metapage(bp);
+	}
+
+	if (xnuminos != atomic_read(&imap->im_numinos) ||
+	    xnumfree != atomic_read(&imap->im_numfree)) {
+		jfs_error(ipimap->i_sb,
+			  "diExtendFs: numinos or numfree incorrect");
+		return -EIO;
+	}
+
+	return rcx;
+}
+
+
+/*
+ *	duplicateIXtree()
+ *
+ * serialization: IWRITE_LOCK held on entry/exit
+ *
+ * note: shadow page with regular inode (rel.2);
+ */
+static void duplicateIXtree(struct super_block *sb, s64 blkno,
+			    int xlen, s64 *xaddr)
+{
+	struct jfs_superblock *j_sb;
+	struct buffer_head *bh;
+	struct inode *ip;
+	tid_t tid;
+
+	/* if AIT2 ipmap2 is bad, do not try to update it */
+	if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT)	/* s_flag */
+		return;
+	ip = diReadSpecial(sb, FILESYSTEM_I, 1);
+	if (ip == NULL) {
+		JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
+		if (readSuper(sb, &bh))
+			return;
+		j_sb = (struct jfs_superblock *)bh->b_data;
+		j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
+
+		mark_buffer_dirty(bh);
+		sync_dirty_buffer(bh);
+		brelse(bh);
+		return;
+	}
+
+	/* start transaction */
+	tid = txBegin(sb, COMMIT_FORCE);
+	/* update the inode map addressing structure to point to it */
+	if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) {
+		JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
+		txAbort(tid, 1);
+		goto cleanup;
+
+	}
+	/* update the inode map's inode to reflect the extension */
+	ip->i_size += PSIZE;
+	inode_add_bytes(ip, PSIZE);
+	txCommit(tid, 1, &ip, COMMIT_FORCE);
+      cleanup:
+	txEnd(tid);
+	diFreeSpecial(ip);
+}
+
+/*
+ * NAME:        copy_from_dinode()
+ *
+ * FUNCTION:    Copies inode info from disk inode to in-memory inode
+ *
+ * RETURN VALUES:
+ *      0       - success
+ *      -ENOMEM	- insufficient memory
+ */
+static int copy_from_dinode(struct dinode * dip, struct inode *ip)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+	jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
+	jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
+
+	ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff;
+	ip->i_nlink = le32_to_cpu(dip->di_nlink);
+	ip->i_uid = le32_to_cpu(dip->di_uid);
+	ip->i_gid = le32_to_cpu(dip->di_gid);
+	ip->i_size = le64_to_cpu(dip->di_size);
+	ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
+	ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
+	ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
+	ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
+	ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
+	ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
+	ip->i_blksize = ip->i_sb->s_blocksize;
+	ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
+	ip->i_generation = le32_to_cpu(dip->di_gen);
+
+	jfs_ip->ixpxd = dip->di_ixpxd;	/* in-memory pxd's are little-endian */
+	jfs_ip->acl = dip->di_acl;	/* as are dxd's */
+	jfs_ip->ea = dip->di_ea;
+	jfs_ip->next_index = le32_to_cpu(dip->di_next_index);
+	jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec);
+	jfs_ip->acltype = le32_to_cpu(dip->di_acltype);
+
+	if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) {
+		jfs_ip->dev = le32_to_cpu(dip->di_rdev);
+		ip->i_rdev = new_decode_dev(jfs_ip->dev);
+	}
+
+	if (S_ISDIR(ip->i_mode)) {
+		memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384);
+	} else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
+		memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
+	} else
+		memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128);
+
+	/* Zero the in-memory-only stuff */
+	jfs_ip->cflag = 0;
+	jfs_ip->btindex = 0;
+	jfs_ip->btorder = 0;
+	jfs_ip->bxflag = 0;
+	jfs_ip->blid = 0;
+	jfs_ip->atlhead = 0;
+	jfs_ip->atltail = 0;
+	jfs_ip->xtlid = 0;
+	return (0);
+}
+
+/*
+ * NAME:        copy_to_dinode()
+ *
+ * FUNCTION:    Copies inode info from in-memory inode to disk inode
+ */
+static void copy_to_dinode(struct dinode * dip, struct inode *ip)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+	dip->di_fileset = cpu_to_le32(jfs_ip->fileset);
+	dip->di_inostamp = cpu_to_le32(JFS_SBI(ip->i_sb)->inostamp);
+	dip->di_number = cpu_to_le32(ip->i_ino);
+	dip->di_gen = cpu_to_le32(ip->i_generation);
+	dip->di_size = cpu_to_le64(ip->i_size);
+	dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
+	dip->di_nlink = cpu_to_le32(ip->i_nlink);
+	dip->di_uid = cpu_to_le32(ip->i_uid);
+	dip->di_gid = cpu_to_le32(ip->i_gid);
+	/*
+	 * mode2 is only needed for storing the higher order bits.
+	 * Trust i_mode for the lower order ones
+	 */
+	dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode);
+	dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
+	dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
+	dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
+	dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec);
+	dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
+	dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
+	dip->di_ixpxd = jfs_ip->ixpxd;	/* in-memory pxd's are little-endian */
+	dip->di_acl = jfs_ip->acl;	/* as are dxd's */
+	dip->di_ea = jfs_ip->ea;
+	dip->di_next_index = cpu_to_le32(jfs_ip->next_index);
+	dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime);
+	dip->di_otime.tv_nsec = 0;
+	dip->di_acltype = cpu_to_le32(jfs_ip->acltype);
+	if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
+		dip->di_rdev = cpu_to_le32(jfs_ip->dev);
+}
+
+#ifdef	_JFS_DEBUG_IMAP
+/*
+ *	DBGdiInit()
+ */
+static void *DBGdiInit(struct inomap * imap)
+{
+	u32 *dimap;
+	int size;
+	size = 64 * 1024;
+	if ((dimap = (u32 *) xmalloc(size, L2PSIZE, kernel_heap)) == NULL)
+		assert(0);
+	bzero((void *) dimap, size);
+	imap->im_DBGdimap = dimap;
+}
+
+/*
+ *	DBGdiAlloc()
+ */
+static void DBGdiAlloc(struct inomap * imap, ino_t ino)
+{
+	u32 *dimap = imap->im_DBGdimap;
+	int w, b;
+	u32 m;
+	w = ino >> 5;
+	b = ino & 31;
+	m = 0x80000000 >> b;
+	assert(w < 64 * 256);
+	if (dimap[w] & m) {
+		printk("DEBUG diAlloc: duplicate alloc ino:0x%x\n", ino);
+	}
+	dimap[w] |= m;
+}
+
+/*
+ *	DBGdiFree()
+ */
+static void DBGdiFree(struct inomap * imap, ino_t ino)
+{
+	u32 *dimap = imap->im_DBGdimap;
+	int w, b;
+	u32 m;
+	w = ino >> 5;
+	b = ino & 31;
+	m = 0x80000000 >> b;
+	assert(w < 64 * 256);
+	if ((dimap[w] & m) == 0) {
+		printk("DEBUG diFree: duplicate free ino:0x%x\n", ino);
+	}
+	dimap[w] &= ~m;
+}
+
+static void dump_cp(struct inomap * ipimap, char *function, int line)
+{
+	printk("\n* ********* *\nControl Page %s %d\n", function, line);
+	printk("FreeIAG %d\tNextIAG %d\n", ipimap->im_freeiag,
+	       ipimap->im_nextiag);
+	printk("NumInos %d\tNumFree %d\n",
+	       atomic_read(&ipimap->im_numinos),
+	       atomic_read(&ipimap->im_numfree));
+	printk("AG InoFree %d\tAG ExtFree %d\n",
+	       ipimap->im_agctl[0].inofree, ipimap->im_agctl[0].extfree);
+	printk("AG NumInos %d\tAG NumFree %d\n",
+	       ipimap->im_agctl[0].numinos, ipimap->im_agctl[0].numfree);
+}
+
+static void dump_iag(struct iag * iag, char *function, int line)
+{
+	printk("\n* ********* *\nIAG %s %d\n", function, line);
+	printk("IagNum %d\tIAG Free %d\n", le32_to_cpu(iag->iagnum),
+	       le32_to_cpu(iag->iagfree));
+	printk("InoFreeFwd %d\tInoFreeBack %d\n",
+	       le32_to_cpu(iag->inofreefwd),
+	       le32_to_cpu(iag->inofreeback));
+	printk("ExtFreeFwd %d\tExtFreeBack %d\n",
+	       le32_to_cpu(iag->extfreefwd),
+	       le32_to_cpu(iag->extfreeback));
+	printk("NFreeInos %d\tNFreeExts %d\n", le32_to_cpu(iag->nfreeinos),
+	       le32_to_cpu(iag->nfreeexts));
+}
+#endif				/* _JFS_DEBUG_IMAP */
diff --git a/fs/jfs/jfs_imap.h b/fs/jfs/jfs_imap.h
new file mode 100644
index 0000000..6b59ade
--- /dev/null
+++ b/fs/jfs/jfs_imap.h
@@ -0,0 +1,175 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_IMAP
+#define _H_JFS_IMAP
+
+#include "jfs_txnmgr.h"
+
+/*
+ *	jfs_imap.h: disk inode manager
+ */
+
+#define	EXTSPERIAG	128	/* number of disk inode extent per iag  */
+#define IMAPBLKNO	0	/* lblkno of dinomap within inode map   */
+#define SMAPSZ		4	/* number of words per summary map      */
+#define	EXTSPERSUM	32	/* number of extents per summary map entry */
+#define	L2EXTSPERSUM	5	/* l2 number of extents per summary map */
+#define	PGSPERIEXT	4	/* number of 4K pages per dinode extent */
+#define	MAXIAGS		((1<<20)-1)	/* maximum number of iags       */
+#define	MAXAG		128	/* maximum number of allocation groups  */
+
+#define AMAPSIZE      512	/* bytes in the IAG allocation maps */
+#define SMAPSIZE      16	/* bytes in the IAG summary maps */
+
+/* convert inode number to iag number */
+#define	INOTOIAG(ino)	((ino) >> L2INOSPERIAG)
+
+/* convert iag number to logical block number of the iag page */
+#define IAGTOLBLK(iagno,l2nbperpg)	(((iagno) + 1) << (l2nbperpg))
+
+/* get the starting block number of the 4K page of an inode extent
+ * that contains ino.
+ */
+#define INOPBLK(pxd,ino,l2nbperpg)    	(addressPXD((pxd)) +		\
+	((((ino) & (INOSPEREXT-1)) >> L2INOSPERPAGE) << (l2nbperpg)))
+
+/*
+ *	inode allocation map:
+ * 
+ * inode allocation map consists of 
+ * . the inode map control page and
+ * . inode allocation group pages (per 4096 inodes)
+ * which are addressed by standard JFS xtree.
+ */
+/*
+ *	inode allocation group page (per 4096 inodes of an AG)
+ */
+struct iag {
+	__le64 agstart;		/* 8: starting block of ag              */
+	__le32 iagnum;		/* 4: inode allocation group number     */
+	__le32 inofreefwd;	/* 4: ag inode free list forward        */
+	__le32 inofreeback;	/* 4: ag inode free list back           */
+	__le32 extfreefwd;	/* 4: ag inode extent free list forward */
+	__le32 extfreeback;	/* 4: ag inode extent free list back    */
+	__le32 iagfree;		/* 4: iag free list                     */
+
+	/* summary map: 1 bit per inode extent */
+	__le32 inosmap[SMAPSZ];	/* 16: sum map of mapwords w/ free inodes;
+				 *      note: this indicates free and backed
+				 *      inodes, if the extent is not backed the
+				 *      value will be 1.  if the extent is
+				 *      backed but all inodes are being used the
+				 *      value will be 1.  if the extent is
+				 *      backed but at least one of the inodes is
+				 *      free the value will be 0.
+				 */
+	__le32 extsmap[SMAPSZ];	/* 16: sum map of mapwords w/ free extents */
+	__le32 nfreeinos;		/* 4: number of free inodes             */
+	__le32 nfreeexts;		/* 4: number of free extents            */
+	/* (72) */
+	u8 pad[1976];		/* 1976: pad to 2048 bytes */
+	/* allocation bit map: 1 bit per inode (0 - free, 1 - allocated) */
+	__le32 wmap[EXTSPERIAG];	/* 512: working allocation map  */
+	__le32 pmap[EXTSPERIAG];	/* 512: persistent allocation map */
+	pxd_t inoext[EXTSPERIAG];	/* 1024: inode extent addresses */
+};				/* (4096) */
+
+/*
+ *	per AG control information (in inode map control page)
+ */
+struct iagctl_disk {
+	__le32 inofree;		/* 4: free inode list anchor            */
+	__le32 extfree;		/* 4: free extent list anchor           */
+	__le32 numinos;		/* 4: number of backed inodes           */
+	__le32 numfree;		/* 4: number of free inodes             */
+};				/* (16) */
+
+struct iagctl {
+	int inofree;		/* free inode list anchor            */
+	int extfree;		/* free extent list anchor           */
+	int numinos;		/* number of backed inodes           */
+	int numfree;		/* number of free inodes             */
+};
+
+/*
+ *	per fileset/aggregate inode map control page
+ */
+struct dinomap_disk {
+	__le32 in_freeiag;	/* 4: free iag list anchor     */
+	__le32 in_nextiag;	/* 4: next free iag number     */
+	__le32 in_numinos;	/* 4: num of backed inodes */
+	__le32 in_numfree;	/* 4: num of free backed inodes */
+	__le32 in_nbperiext;	/* 4: num of blocks per inode extent */
+	__le32 in_l2nbperiext;	/* 4: l2 of in_nbperiext */
+	__le32 in_diskblock;	/* 4: for standalone test driver  */
+	__le32 in_maxag;	/* 4: for standalone test driver  */
+	u8 pad[2016];		/* 2016: pad to 2048 */
+	struct iagctl_disk in_agctl[MAXAG]; /* 2048: AG control information */
+};				/* (4096) */
+
+struct dinomap {
+	int in_freeiag;		/* free iag list anchor     */
+	int in_nextiag;		/* next free iag number     */
+	int in_numinos;		/* num of backed inodes */
+	int in_numfree;		/* num of free backed inodes */
+	int in_nbperiext;	/* num of blocks per inode extent */
+	int in_l2nbperiext;	/* l2 of in_nbperiext */
+	int in_diskblock;	/* for standalone test driver  */
+	int in_maxag;		/* for standalone test driver  */
+	struct iagctl in_agctl[MAXAG];	/* AG control information */
+};
+
+/*
+ *	In-core inode map control page
+ */
+struct inomap {
+	struct dinomap im_imap;		/* 4096: inode allocation control */
+	struct inode *im_ipimap;	/* 4: ptr to inode for imap   */
+	struct semaphore im_freelock;	/* 4: iag free list lock      */
+	struct semaphore im_aglock[MAXAG];	/* 512: per AG locks          */
+	u32 *im_DBGdimap;
+	atomic_t im_numinos;	/* num of backed inodes */
+	atomic_t im_numfree;	/* num of free backed inodes */
+};
+
+#define	im_freeiag	im_imap.in_freeiag
+#define	im_nextiag	im_imap.in_nextiag
+#define	im_agctl	im_imap.in_agctl
+#define	im_nbperiext	im_imap.in_nbperiext
+#define	im_l2nbperiext	im_imap.in_l2nbperiext
+
+/* for standalone testdriver
+ */
+#define	im_diskblock	im_imap.in_diskblock
+#define	im_maxag	im_imap.in_maxag
+
+extern int diFree(struct inode *);
+extern int diAlloc(struct inode *, boolean_t, struct inode *);
+extern int diSync(struct inode *);
+/* external references */
+extern int diUpdatePMap(struct inode *ipimap, unsigned long inum,
+			boolean_t is_free, struct tblock * tblk);
+extern int diExtendFS(struct inode *ipimap, struct inode *ipbmap);
+extern int diMount(struct inode *);
+extern int diUnmount(struct inode *, int);
+extern int diRead(struct inode *);
+extern struct inode *diReadSpecial(struct super_block *, ino_t, int);
+extern void diWriteSpecial(struct inode *, int);
+extern void diFreeSpecial(struct inode *);
+extern int diWrite(tid_t tid, struct inode *);
+#endif				/* _H_JFS_IMAP */
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
new file mode 100644
index 0000000..ebd77c1
--- /dev/null
+++ b/fs/jfs/jfs_incore.h
@@ -0,0 +1,197 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */ 
+#ifndef _H_JFS_INCORE
+#define _H_JFS_INCORE
+
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include "jfs_types.h"
+#include "jfs_xtree.h"
+#include "jfs_dtree.h"
+
+/*
+ * JFS magic number
+ */
+#define JFS_SUPER_MAGIC 0x3153464a /* "JFS1" */
+
+/*
+ * JFS-private inode information
+ */
+struct jfs_inode_info {
+	int	fileset;	/* fileset number (always 16)*/
+	uint	mode2;		/* jfs-specific mode		*/
+	pxd_t   ixpxd;		/* inode extent descriptor	*/
+	dxd_t	acl;		/* dxd describing acl	*/
+	dxd_t	ea;		/* dxd describing ea	*/
+	time_t	otime;		/* time created	*/
+	uint	next_index;	/* next available directory entry index */
+	int	acltype;	/* Type of ACL	*/
+	short	btorder;	/* access order	*/
+	short	btindex;	/* btpage entry index*/
+	struct inode *ipimap;	/* inode map			*/
+	long	cflag;		/* commit flags		*/
+	u16	bxflag;		/* xflag of pseudo buffer?	*/
+	unchar	agno;		/* ag number			*/
+	signed char active_ag;	/* ag currently allocating from	*/
+	lid_t	blid;		/* lid of pseudo buffer?	*/
+	lid_t	atlhead;	/* anonymous tlock list head	*/
+	lid_t	atltail;	/* anonymous tlock list tail	*/
+	spinlock_t ag_lock;	/* protects active_ag		*/
+	struct list_head anon_inode_list; /* inodes having anonymous txns */
+	/*
+	 * rdwrlock serializes xtree between reads & writes and synchronizes
+	 * changes to special inodes.  It's use would be redundant on
+	 * directories since the i_sem taken in the VFS is sufficient.
+	 */
+	struct rw_semaphore rdwrlock;
+	/*
+	 * commit_sem serializes transaction processing on an inode.
+	 * It must be taken after beginning a transaction (txBegin), since
+	 * dirty inodes may be committed while a new transaction on the
+	 * inode is blocked in txBegin or TxBeginAnon
+	 */
+	struct semaphore commit_sem;
+	/* xattr_sem allows us to access the xattrs without taking i_sem */
+	struct rw_semaphore xattr_sem;
+	lid_t	xtlid;		/* lid of xtree lock on directory */
+#ifdef CONFIG_JFS_POSIX_ACL
+	struct posix_acl *i_acl;
+	struct posix_acl *i_default_acl;
+#endif
+	union {
+		struct {
+			xtpage_t _xtroot;	/* 288: xtree root */
+			struct inomap *_imap;	/* 4: inode map header	*/
+		} file;
+		struct {
+			struct dir_table_slot _table[12]; /* 96: dir index */
+			dtroot_t _dtroot;	/* 288: dtree root */
+		} dir;
+		struct {
+			unchar _unused[16];	/* 16: */
+			dxd_t _dxd;		/* 16: */
+			unchar _inline[128];	/* 128: inline symlink */
+			/* _inline_ea may overlay the last part of
+			 * file._xtroot if maxentry = XTROOTINITSLOT
+			 */
+			unchar _inline_ea[128];	/* 128: inline extended attr */
+		} link;
+	} u;
+	u32 dev;	/* will die when we get wide dev_t */
+	struct inode	vfs_inode;
+};
+#define i_xtroot u.file._xtroot
+#define i_imap u.file._imap
+#define i_dirtable u.dir._table
+#define i_dtroot u.dir._dtroot
+#define i_inline u.link._inline
+#define i_inline_ea u.link._inline_ea
+
+#define JFS_ACL_NOT_CACHED ((void *)-1)
+
+#define IREAD_LOCK(ip)		down_read(&JFS_IP(ip)->rdwrlock)
+#define IREAD_UNLOCK(ip)	up_read(&JFS_IP(ip)->rdwrlock)
+#define IWRITE_LOCK(ip)		down_write(&JFS_IP(ip)->rdwrlock)
+#define IWRITE_UNLOCK(ip)	up_write(&JFS_IP(ip)->rdwrlock)
+
+/*
+ * cflag
+ */
+enum cflags {
+	COMMIT_Nolink,		/* inode committed with zero link count */
+	COMMIT_Inlineea,	/* commit inode inline EA */
+	COMMIT_Freewmap,	/* free WMAP at iClose() */
+	COMMIT_Dirty,		/* Inode is really dirty */
+	COMMIT_Dirtable,	/* commit changes to di_dirtable */
+	COMMIT_Stale,		/* data extent is no longer valid */
+	COMMIT_Synclist,	/* metadata pages on group commit synclist */
+};
+
+#define set_cflag(flag, ip)	set_bit(flag, &(JFS_IP(ip)->cflag))
+#define clear_cflag(flag, ip)	clear_bit(flag, &(JFS_IP(ip)->cflag))
+#define test_cflag(flag, ip)	test_bit(flag, &(JFS_IP(ip)->cflag))
+#define test_and_clear_cflag(flag, ip) \
+	test_and_clear_bit(flag, &(JFS_IP(ip)->cflag))
+/*
+ * JFS-private superblock information.
+ */
+struct jfs_sb_info {
+	struct super_block *sb;		/* Point back to vfs super block */
+	unsigned long	mntflag;	/* aggregate attributes	*/
+	struct inode	*ipbmap;	/* block map inode		*/
+	struct inode	*ipaimap;	/* aggregate inode map inode	*/
+	struct inode	*ipaimap2;	/* secondary aimap inode	*/
+	struct inode	*ipimap;	/* aggregate inode map inode	*/
+	struct jfs_log	*log;		/* log			*/
+	struct list_head log_list;	/* volumes associated with a journal */
+	short		bsize;		/* logical block size	*/
+	short		l2bsize;	/* log2 logical block size	*/
+	short		nbperpage;	/* blocks per page		*/
+	short		l2nbperpage;	/* log2 blocks per page	*/
+	short		l2niperblk;	/* log2 inodes per page	*/
+	dev_t		logdev;		/* external log device	*/
+	uint		aggregate;	/* volume identifier in log record */
+	pxd_t		logpxd;		/* pxd describing log	*/
+	pxd_t		fsckpxd;	/* pxd describing fsck wkspc */
+	pxd_t		ait2;		/* pxd describing AIT copy	*/
+	char		uuid[16];	/* 128-bit uuid for volume	*/
+	char		loguuid[16];	/* 128-bit uuid for log	*/
+	/*
+	 * commit_state is used for synchronization of the jfs_commit
+	 * threads.  It is protected by LAZY_LOCK().
+	 */
+	int		commit_state;	/* commit state */
+	/* Formerly in ipimap */
+	uint		gengen;		/* inode generation generator*/
+	uint		inostamp;	/* shows inode belongs to fileset*/
+
+        /* Formerly in ipbmap */
+	struct bmap	*bmap;		/* incore bmap descriptor	*/
+	struct nls_table *nls_tab;	/* current codepage		*/
+	uint		state;		/* mount/recovery state	*/
+	unsigned long	flag;		/* mount time flags */
+	uint		p_state;	/* state prior to going no integrity */
+};
+
+/* jfs_sb_info commit_state */
+#define IN_LAZYCOMMIT 1
+
+static inline struct jfs_inode_info *JFS_IP(struct inode *inode)
+{
+	return list_entry(inode, struct jfs_inode_info, vfs_inode);
+}
+
+static inline int jfs_dirtable_inline(struct inode *inode)
+{
+	return (JFS_IP(inode)->next_index <= (MAX_INLINE_DIRTABLE_ENTRY + 1));
+}
+
+static inline struct jfs_sb_info *JFS_SBI(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline int isReadOnly(struct inode *inode)
+{
+	if (JFS_SBI(inode->i_sb)->log)
+		return 0;
+	return 1;
+}
+#endif /* _H_JFS_INCORE */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
new file mode 100644
index 0000000..84f2459
--- /dev/null
+++ b/fs/jfs/jfs_inode.c
@@ -0,0 +1,104 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_imap.h"
+#include "jfs_dinode.h"
+#include "jfs_debug.h"
+
+/*
+ * NAME:	ialloc()
+ *
+ * FUNCTION:	Allocate a new inode
+ *
+ */
+struct inode *ialloc(struct inode *parent, umode_t mode)
+{
+	struct super_block *sb = parent->i_sb;
+	struct inode *inode;
+	struct jfs_inode_info *jfs_inode;
+	int rc;
+
+	inode = new_inode(sb);
+	if (!inode) {
+		jfs_warn("ialloc: new_inode returned NULL!");
+		return inode;
+	}
+
+	jfs_inode = JFS_IP(inode);
+
+	rc = diAlloc(parent, S_ISDIR(mode), inode);
+	if (rc) {
+		jfs_warn("ialloc: diAlloc returned %d!", rc);
+		make_bad_inode(inode);
+		iput(inode);
+		return NULL;
+	}
+
+	inode->i_uid = current->fsuid;
+	if (parent->i_mode & S_ISGID) {
+		inode->i_gid = parent->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+
+	/*
+	 * Allocate inode to quota.
+	 */
+	if (DQUOT_ALLOC_INODE(inode)) {
+		DQUOT_DROP(inode);
+		inode->i_flags |= S_NOQUOTA;
+		inode->i_nlink = 0;
+		iput(inode);
+		return NULL;
+	}
+
+	inode->i_mode = mode;
+	if (S_ISDIR(mode))
+		jfs_inode->mode2 = IDIRECTORY | mode;
+	else
+		jfs_inode->mode2 = INLINEEA | ISPARSE | mode;
+	inode->i_blksize = sb->s_blocksize;
+	inode->i_blocks = 0;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	jfs_inode->otime = inode->i_ctime.tv_sec;
+	inode->i_generation = JFS_SBI(sb)->gengen++;
+
+	jfs_inode->cflag = 0;
+
+	/* Zero remaining fields */
+	memset(&jfs_inode->acl, 0, sizeof(dxd_t));
+	memset(&jfs_inode->ea, 0, sizeof(dxd_t));
+	jfs_inode->next_index = 0;
+	jfs_inode->acltype = 0;
+	jfs_inode->btorder = 0;
+	jfs_inode->btindex = 0;
+	jfs_inode->bxflag = 0;
+	jfs_inode->blid = 0;
+	jfs_inode->atlhead = 0;
+	jfs_inode->atltail = 0;
+	jfs_inode->xtlid = 0;
+
+	jfs_info("ialloc returns inode = 0x%p\n", inode);
+
+	return inode;
+}
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
new file mode 100644
index 0000000..3df91fb
--- /dev/null
+++ b/fs/jfs/jfs_inode.h
@@ -0,0 +1,23 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2001
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_INODE
+#define _H_JFS_INODE
+
+extern struct inode *ialloc(struct inode *, umode_t);
+
+#endif				/* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h
new file mode 100644
index 0000000..10ad1d0
--- /dev/null
+++ b/fs/jfs/jfs_lock.h
@@ -0,0 +1,51 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2001
+ *   Portions Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_LOCK
+#define _H_JFS_LOCK
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+
+/*
+ *	jfs_lock.h
+ */
+
+/*
+ * Conditional sleep where condition is protected by spinlock
+ *
+ * lock_cmd and unlock_cmd take and release the spinlock
+ */
+#define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd)	\
+do {							\
+	DECLARE_WAITQUEUE(__wait, current);		\
+							\
+	add_wait_queue(&wq, &__wait);			\
+	for (;;) {					\
+		set_current_state(TASK_UNINTERRUPTIBLE);\
+		if (cond)				\
+			break;				\
+		unlock_cmd;				\
+		schedule();				\
+		lock_cmd;				\
+	}						\
+	current->state = TASK_RUNNING;			\
+	remove_wait_queue(&wq, &__wait);		\
+} while (0)
+
+#endif				/* _H_JFS_LOCK */
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
new file mode 100644
index 0000000..b6a6869
--- /dev/null
+++ b/fs/jfs/jfs_logmgr.c
@@ -0,0 +1,2524 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ *	jfs_logmgr.c: log manager
+ *
+ * for related information, see transaction manager (jfs_txnmgr.c), and
+ * recovery manager (jfs_logredo.c).
+ *
+ * note: for detail, RTFS.
+ *
+ *	log buffer manager:
+ * special purpose buffer manager supporting log i/o requirements.
+ * per log serial pageout of logpage
+ * queuing i/o requests and redrive i/o at iodone
+ * maintain current logpage buffer
+ * no caching since append only
+ * appropriate jfs buffer cache buffers as needed
+ *
+ *	group commit:
+ * transactions which wrote COMMIT records in the same in-memory
+ * log page during the pageout of previous/current log page(s) are
+ * committed together by the pageout of the page.
+ *
+ *	TBD lazy commit:
+ * transactions are committed asynchronously when the log page
+ * containing it COMMIT is paged out when it becomes full;
+ *
+ *	serialization:
+ * . a per log lock serialize log write.
+ * . a per log lock serialize group commit.
+ * . a per log lock serialize log open/close;
+ *
+ *	TBD log integrity:
+ * careful-write (ping-pong) of last logpage to recover from crash
+ * in overwrite.
+ * detection of split (out-of-order) write of physical sectors
+ * of last logpage via timestamp at end of each sector
+ * with its mirror data array at trailer).
+ *
+ *	alternatives:
+ * lsn - 64-bit monotonically increasing integer vs
+ * 32-bit lspn and page eor.
+ */
+
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/completion.h>
+#include <linux/buffer_head.h>		/* for sync_blockdev() */
+#include <linux/bio.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_txnmgr.h"
+#include "jfs_debug.h"
+
+
+/*
+ * lbuf's ready to be redriven.  Protected by log_redrive_lock (jfsIO thread)
+ */
+static struct lbuf *log_redrive_list;
+static DEFINE_SPINLOCK(log_redrive_lock);
+DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
+
+
+/*
+ *	log read/write serialization (per log)
+ */
+#define LOG_LOCK_INIT(log)	init_MUTEX(&(log)->loglock)
+#define LOG_LOCK(log)		down(&((log)->loglock))
+#define LOG_UNLOCK(log)		up(&((log)->loglock))
+
+
+/*
+ *	log group commit serialization (per log)
+ */
+
+#define LOGGC_LOCK_INIT(log)	spin_lock_init(&(log)->gclock)
+#define LOGGC_LOCK(log)		spin_lock_irq(&(log)->gclock)
+#define LOGGC_UNLOCK(log)	spin_unlock_irq(&(log)->gclock)
+#define LOGGC_WAKEUP(tblk)	wake_up_all(&(tblk)->gcwait)
+
+/*
+ *	log sync serialization (per log)
+ */
+#define	LOGSYNC_DELTA(logsize)		min((logsize)/8, 128*LOGPSIZE)
+#define	LOGSYNC_BARRIER(logsize)	((logsize)/4)
+/*
+#define	LOGSYNC_DELTA(logsize)		min((logsize)/4, 256*LOGPSIZE)
+#define	LOGSYNC_BARRIER(logsize)	((logsize)/2)
+*/
+
+
+/*
+ *	log buffer cache synchronization
+ */
+static DEFINE_SPINLOCK(jfsLCacheLock);
+
+#define	LCACHE_LOCK(flags)	spin_lock_irqsave(&jfsLCacheLock, flags)
+#define	LCACHE_UNLOCK(flags)	spin_unlock_irqrestore(&jfsLCacheLock, flags)
+
+/*
+ * See __SLEEP_COND in jfs_locks.h
+ */
+#define LCACHE_SLEEP_COND(wq, cond, flags)	\
+do {						\
+	if (cond)				\
+		break;				\
+	__SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \
+} while (0)
+
+#define	LCACHE_WAKEUP(event)	wake_up(event)
+
+
+/*
+ *	lbuf buffer cache (lCache) control
+ */
+/* log buffer manager pageout control (cumulative, inclusive) */
+#define	lbmREAD		0x0001
+#define	lbmWRITE	0x0002	/* enqueue at tail of write queue;
+				 * init pageout if at head of queue;
+				 */
+#define	lbmRELEASE	0x0004	/* remove from write queue
+				 * at completion of pageout;
+				 * do not free/recycle it yet:
+				 * caller will free it;
+				 */
+#define	lbmSYNC		0x0008	/* do not return to freelist
+				 * when removed from write queue;
+				 */
+#define lbmFREE		0x0010	/* return to freelist
+				 * at completion of pageout;
+				 * the buffer may be recycled;
+				 */
+#define	lbmDONE		0x0020
+#define	lbmERROR	0x0040
+#define lbmGC		0x0080	/* lbmIODone to perform post-GC processing
+				 * of log page
+				 */
+#define lbmDIRECT	0x0100
+
+/*
+ * Global list of active external journals
+ */
+static LIST_HEAD(jfs_external_logs);
+static struct jfs_log *dummy_log = NULL;
+static DECLARE_MUTEX(jfs_log_sem);
+
+/*
+ * external references
+ */
+extern void txLazyUnlock(struct tblock * tblk);
+extern int jfs_stop_threads;
+extern struct completion jfsIOwait;
+extern int jfs_tlocks_low;
+
+/*
+ * forward references
+ */
+static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk,
+			 struct lrd * lrd, struct tlock * tlck);
+
+static int lmNextPage(struct jfs_log * log);
+static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
+			   int activate);
+
+static int open_inline_log(struct super_block *sb);
+static int open_dummy_log(struct super_block *sb);
+static int lbmLogInit(struct jfs_log * log);
+static void lbmLogShutdown(struct jfs_log * log);
+static struct lbuf *lbmAllocate(struct jfs_log * log, int);
+static void lbmFree(struct lbuf * bp);
+static void lbmfree(struct lbuf * bp);
+static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp);
+static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block);
+static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag);
+static int lbmIOWait(struct lbuf * bp, int flag);
+static bio_end_io_t lbmIODone;
+static void lbmStartIO(struct lbuf * bp);
+static void lmGCwrite(struct jfs_log * log, int cant_block);
+static int lmLogSync(struct jfs_log * log, int nosyncwait);
+
+
+
+/*
+ *	statistics
+ */
+#ifdef CONFIG_JFS_STATISTICS
+static struct lmStat {
+	uint commit;		/* # of commit */
+	uint pagedone;		/* # of page written */
+	uint submitted;		/* # of pages submitted */
+	uint full_page;		/* # of full pages submitted */
+	uint partial_page;	/* # of partial pages submitted */
+} lmStat;
+#endif
+
+
+/*
+ * NAME:	lmLog()
+ *
+ * FUNCTION:	write a log record;
+ *
+ * PARAMETER:
+ *
+ * RETURN:	lsn - offset to the next log record to write (end-of-log);
+ *		-1  - error;
+ *
+ * note: todo: log error handler
+ */
+int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	  struct tlock * tlck)
+{
+	int lsn;
+	int diffp, difft;
+	struct metapage *mp = NULL;
+
+	jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
+		 log, tblk, lrd, tlck);
+
+	LOG_LOCK(log);
+
+	/* log by (out-of-transaction) JFS ? */
+	if (tblk == NULL)
+		goto writeRecord;
+
+	/* log from page ? */
+	if (tlck == NULL ||
+	    tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL)
+		goto writeRecord;
+
+	/*
+	 *      initialize/update page/transaction recovery lsn
+	 */
+	lsn = log->lsn;
+
+	LOGSYNC_LOCK(log);
+
+	/*
+	 * initialize page lsn if first log write of the page
+	 */
+	if (mp->lsn == 0) {
+		mp->log = log;
+		mp->lsn = lsn;
+		log->count++;
+
+		/* insert page at tail of logsynclist */
+		list_add_tail(&mp->synclist, &log->synclist);
+	}
+
+	/*
+	 *      initialize/update lsn of tblock of the page
+	 *
+	 * transaction inherits oldest lsn of pages associated
+	 * with allocation/deallocation of resources (their
+	 * log records are used to reconstruct allocation map
+	 * at recovery time: inode for inode allocation map,
+	 * B+-tree index of extent descriptors for block
+	 * allocation map);
+	 * allocation map pages inherit transaction lsn at
+	 * commit time to allow forwarding log syncpt past log
+	 * records associated with allocation/deallocation of
+	 * resources only after persistent map of these map pages
+	 * have been updated and propagated to home.
+	 */
+	/*
+	 * initialize transaction lsn:
+	 */
+	if (tblk->lsn == 0) {
+		/* inherit lsn of its first page logged */
+		tblk->lsn = mp->lsn;
+		log->count++;
+
+		/* insert tblock after the page on logsynclist */
+		list_add(&tblk->synclist, &mp->synclist);
+	}
+	/*
+	 * update transaction lsn:
+	 */
+	else {
+		/* inherit oldest/smallest lsn of page */
+		logdiff(diffp, mp->lsn, log);
+		logdiff(difft, tblk->lsn, log);
+		if (diffp < difft) {
+			/* update tblock lsn with page lsn */
+			tblk->lsn = mp->lsn;
+
+			/* move tblock after page on logsynclist */
+			list_move(&tblk->synclist, &mp->synclist);
+		}
+	}
+
+	LOGSYNC_UNLOCK(log);
+
+	/*
+	 *      write the log record
+	 */
+      writeRecord:
+	lsn = lmWriteRecord(log, tblk, lrd, tlck);
+
+	/*
+	 * forward log syncpt if log reached next syncpt trigger
+	 */
+	logdiff(diffp, lsn, log);
+	if (diffp >= log->nextsync)
+		lsn = lmLogSync(log, 0);
+
+	/* update end-of-log lsn */
+	log->lsn = lsn;
+
+	LOG_UNLOCK(log);
+
+	/* return end-of-log address */
+	return lsn;
+}
+
+
+/*
+ * NAME:	lmWriteRecord()
+ *
+ * FUNCTION:	move the log record to current log page
+ *
+ * PARAMETER:	cd	- commit descriptor
+ *
+ * RETURN:	end-of-log address
+ *			
+ * serialization: LOG_LOCK() held on entry/exit
+ */
+static int
+lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	      struct tlock * tlck)
+{
+	int lsn = 0;		/* end-of-log address */
+	struct lbuf *bp;	/* dst log page buffer */
+	struct logpage *lp;	/* dst log page */
+	caddr_t dst;		/* destination address in log page */
+	int dstoffset;		/* end-of-log offset in log page */
+	int freespace;		/* free space in log page */
+	caddr_t p;		/* src meta-data page */
+	caddr_t src;
+	int srclen;
+	int nbytes;		/* number of bytes to move */
+	int i;
+	int len;
+	struct linelock *linelock;
+	struct lv *lv;
+	struct lvd *lvd;
+	int l2linesize;
+
+	len = 0;
+
+	/* retrieve destination log page to write */
+	bp = (struct lbuf *) log->bp;
+	lp = (struct logpage *) bp->l_ldata;
+	dstoffset = log->eor;
+
+	/* any log data to write ? */
+	if (tlck == NULL)
+		goto moveLrd;
+
+	/*
+	 *      move log record data
+	 */
+	/* retrieve source meta-data page to log */
+	if (tlck->flag & tlckPAGELOCK) {
+		p = (caddr_t) (tlck->mp->data);
+		linelock = (struct linelock *) & tlck->lock;
+	}
+	/* retrieve source in-memory inode to log */
+	else if (tlck->flag & tlckINODELOCK) {
+		if (tlck->type & tlckDTREE)
+			p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot;
+		else
+			p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
+		linelock = (struct linelock *) & tlck->lock;
+	}
+#ifdef	_JFS_WIP
+	else if (tlck->flag & tlckINLINELOCK) {
+
+		inlinelock = (struct inlinelock *) & tlck;
+		p = (caddr_t) & inlinelock->pxd;
+		linelock = (struct linelock *) & tlck;
+	}
+#endif				/* _JFS_WIP */
+	else {
+		jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
+		return 0;	/* Probably should trap */
+	}
+	l2linesize = linelock->l2linesize;
+
+      moveData:
+	ASSERT(linelock->index <= linelock->maxcnt);
+
+	lv = linelock->lv;
+	for (i = 0; i < linelock->index; i++, lv++) {
+		if (lv->length == 0)
+			continue;
+
+		/* is page full ? */
+		if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) {
+			/* page become full: move on to next page */
+			lmNextPage(log);
+
+			bp = log->bp;
+			lp = (struct logpage *) bp->l_ldata;
+			dstoffset = LOGPHDRSIZE;
+		}
+
+		/*
+		 * move log vector data
+		 */
+		src = (u8 *) p + (lv->offset << l2linesize);
+		srclen = lv->length << l2linesize;
+		len += srclen;
+		while (srclen > 0) {
+			freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
+			nbytes = min(freespace, srclen);
+			dst = (caddr_t) lp + dstoffset;
+			memcpy(dst, src, nbytes);
+			dstoffset += nbytes;
+
+			/* is page not full ? */
+			if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
+				break;
+
+			/* page become full: move on to next page */
+			lmNextPage(log);
+
+			bp = (struct lbuf *) log->bp;
+			lp = (struct logpage *) bp->l_ldata;
+			dstoffset = LOGPHDRSIZE;
+
+			srclen -= nbytes;
+			src += nbytes;
+		}
+
+		/*
+		 * move log vector descriptor
+		 */
+		len += 4;
+		lvd = (struct lvd *) ((caddr_t) lp + dstoffset);
+		lvd->offset = cpu_to_le16(lv->offset);
+		lvd->length = cpu_to_le16(lv->length);
+		dstoffset += 4;
+		jfs_info("lmWriteRecord: lv offset:%d length:%d",
+			 lv->offset, lv->length);
+	}
+
+	if ((i = linelock->next)) {
+		linelock = (struct linelock *) lid_to_tlock(i);
+		goto moveData;
+	}
+
+	/*
+	 *      move log record descriptor
+	 */
+      moveLrd:
+	lrd->length = cpu_to_le16(len);
+
+	src = (caddr_t) lrd;
+	srclen = LOGRDSIZE;
+
+	while (srclen > 0) {
+		freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
+		nbytes = min(freespace, srclen);
+		dst = (caddr_t) lp + dstoffset;
+		memcpy(dst, src, nbytes);
+
+		dstoffset += nbytes;
+		srclen -= nbytes;
+
+		/* are there more to move than freespace of page ? */
+		if (srclen)
+			goto pageFull;
+
+		/*
+		 * end of log record descriptor
+		 */
+
+		/* update last log record eor */
+		log->eor = dstoffset;
+		bp->l_eor = dstoffset;
+		lsn = (log->page << L2LOGPSIZE) + dstoffset;
+
+		if (lrd->type & cpu_to_le16(LOG_COMMIT)) {
+			tblk->clsn = lsn;
+			jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn,
+				 bp->l_eor);
+
+			INCREMENT(lmStat.commit);	/* # of commit */
+
+			/*
+			 * enqueue tblock for group commit:
+			 *
+			 * enqueue tblock of non-trivial/synchronous COMMIT
+			 * at tail of group commit queue
+			 * (trivial/asynchronous COMMITs are ignored by
+			 * group commit.)
+			 */
+			LOGGC_LOCK(log);
+
+			/* init tblock gc state */
+			tblk->flag = tblkGC_QUEUE;
+			tblk->bp = log->bp;
+			tblk->pn = log->page;
+			tblk->eor = log->eor;
+
+			/* enqueue transaction to commit queue */
+			list_add_tail(&tblk->cqueue, &log->cqueue);
+
+			LOGGC_UNLOCK(log);
+		}
+
+		jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x",
+			le16_to_cpu(lrd->type), log->bp, log->page, dstoffset);
+
+		/* page not full ? */
+		if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
+			return lsn;
+
+	      pageFull:
+		/* page become full: move on to next page */
+		lmNextPage(log);
+
+		bp = (struct lbuf *) log->bp;
+		lp = (struct logpage *) bp->l_ldata;
+		dstoffset = LOGPHDRSIZE;
+		src += nbytes;
+	}
+
+	return lsn;
+}
+
+
+/*
+ * NAME:	lmNextPage()
+ *
+ * FUNCTION:	write current page and allocate next page.
+ *
+ * PARAMETER:	log
+ *
+ * RETURN:	0
+ *			
+ * serialization: LOG_LOCK() held on entry/exit
+ */
+static int lmNextPage(struct jfs_log * log)
+{
+	struct logpage *lp;
+	int lspn;		/* log sequence page number */
+	int pn;			/* current page number */
+	struct lbuf *bp;
+	struct lbuf *nextbp;
+	struct tblock *tblk;
+
+	/* get current log page number and log sequence page number */
+	pn = log->page;
+	bp = log->bp;
+	lp = (struct logpage *) bp->l_ldata;
+	lspn = le32_to_cpu(lp->h.page);
+
+	LOGGC_LOCK(log);
+
+	/*
+	 *      write or queue the full page at the tail of write queue
+	 */
+	/* get the tail tblk on commit queue */
+	if (list_empty(&log->cqueue))
+		tblk = NULL;
+	else
+		tblk = list_entry(log->cqueue.prev, struct tblock, cqueue);
+
+	/* every tblk who has COMMIT record on the current page,
+	 * and has not been committed, must be on commit queue
+	 * since tblk is queued at commit queueu at the time
+	 * of writing its COMMIT record on the page before
+	 * page becomes full (even though the tblk thread
+	 * who wrote COMMIT record may have been suspended
+	 * currently);
+	 */
+
+	/* is page bound with outstanding tail tblk ? */
+	if (tblk && tblk->pn == pn) {
+		/* mark tblk for end-of-page */
+		tblk->flag |= tblkGC_EOP;
+
+		if (log->cflag & logGC_PAGEOUT) {
+			/* if page is not already on write queue,
+			 * just enqueue (no lbmWRITE to prevent redrive)
+			 * buffer to wqueue to ensure correct serial order
+			 * of the pages since log pages will be added
+			 * continuously
+			 */
+			if (bp->l_wqnext == NULL)
+				lbmWrite(log, bp, 0, 0);
+		} else {
+			/*
+			 * No current GC leader, initiate group commit
+			 */
+			log->cflag |= logGC_PAGEOUT;
+			lmGCwrite(log, 0);
+		}
+	}
+	/* page is not bound with outstanding tblk:
+	 * init write or mark it to be redriven (lbmWRITE)
+	 */
+	else {
+		/* finalize the page */
+		bp->l_ceor = bp->l_eor;
+		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
+		lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0);
+	}
+	LOGGC_UNLOCK(log);
+
+	/*
+	 *      allocate/initialize next page
+	 */
+	/* if log wraps, the first data page of log is 2
+	 * (0 never used, 1 is superblock).
+	 */
+	log->page = (pn == log->size - 1) ? 2 : pn + 1;
+	log->eor = LOGPHDRSIZE;	/* ? valid page empty/full at logRedo() */
+
+	/* allocate/initialize next log page buffer */
+	nextbp = lbmAllocate(log, log->page);
+	nextbp->l_eor = log->eor;
+	log->bp = nextbp;
+
+	/* initialize next log page */
+	lp = (struct logpage *) nextbp->l_ldata;
+	lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
+	lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
+
+	return 0;
+}
+
+
+/*
+ * NAME:	lmGroupCommit()
+ *
+ * FUNCTION:	group commit
+ *	initiate pageout of the pages with COMMIT in the order of
+ *	page number - redrive pageout of the page at the head of
+ *	pageout queue until full page has been written.
+ *
+ * RETURN:	
+ *
+ * NOTE:
+ *	LOGGC_LOCK serializes log group commit queue, and
+ *	transaction blocks on the commit queue.
+ *	N.B. LOG_LOCK is NOT held during lmGroupCommit().
+ */
+int lmGroupCommit(struct jfs_log * log, struct tblock * tblk)
+{
+	int rc = 0;
+
+	LOGGC_LOCK(log);
+
+	/* group committed already ? */
+	if (tblk->flag & tblkGC_COMMITTED) {
+		if (tblk->flag & tblkGC_ERROR)
+			rc = -EIO;
+
+		LOGGC_UNLOCK(log);
+		return rc;
+	}
+	jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc);
+
+	if (tblk->xflag & COMMIT_LAZY)
+		tblk->flag |= tblkGC_LAZY;
+
+	if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) &&
+	    (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag)
+	     || jfs_tlocks_low)) {
+		/*
+		 * No pageout in progress
+		 *
+		 * start group commit as its group leader.
+		 */
+		log->cflag |= logGC_PAGEOUT;
+
+		lmGCwrite(log, 0);
+	}
+
+	if (tblk->xflag & COMMIT_LAZY) {
+		/*
+		 * Lazy transactions can leave now
+		 */
+		LOGGC_UNLOCK(log);
+		return 0;
+	}
+
+	/* lmGCwrite gives up LOGGC_LOCK, check again */
+
+	if (tblk->flag & tblkGC_COMMITTED) {
+		if (tblk->flag & tblkGC_ERROR)
+			rc = -EIO;
+
+		LOGGC_UNLOCK(log);
+		return rc;
+	}
+
+	/* upcount transaction waiting for completion
+	 */
+	log->gcrtc++;
+	tblk->flag |= tblkGC_READY;
+
+	__SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED),
+		     LOGGC_LOCK(log), LOGGC_UNLOCK(log));
+
+	/* removed from commit queue */
+	if (tblk->flag & tblkGC_ERROR)
+		rc = -EIO;
+
+	LOGGC_UNLOCK(log);
+	return rc;
+}
+
+/*
+ * NAME:	lmGCwrite()
+ *
+ * FUNCTION:	group commit write
+ *	initiate write of log page, building a group of all transactions
+ *	with commit records on that page.
+ *
+ * RETURN:	None
+ *
+ * NOTE:
+ *	LOGGC_LOCK must be held by caller.
+ *	N.B. LOG_LOCK is NOT held during lmGroupCommit().
+ */
+static void lmGCwrite(struct jfs_log * log, int cant_write)
+{
+	struct lbuf *bp;
+	struct logpage *lp;
+	int gcpn;		/* group commit page number */
+	struct tblock *tblk;
+	struct tblock *xtblk = NULL;
+
+	/*
+	 * build the commit group of a log page
+	 *
+	 * scan commit queue and make a commit group of all
+	 * transactions with COMMIT records on the same log page.
+	 */
+	/* get the head tblk on the commit queue */
+	gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn;
+
+	list_for_each_entry(tblk, &log->cqueue, cqueue) {
+		if (tblk->pn != gcpn)
+			break;
+
+		xtblk = tblk;
+
+		/* state transition: (QUEUE, READY) -> COMMIT */
+		tblk->flag |= tblkGC_COMMIT;
+	}
+	tblk = xtblk;		/* last tblk of the page */
+
+	/*
+	 * pageout to commit transactions on the log page.
+	 */
+	bp = (struct lbuf *) tblk->bp;
+	lp = (struct logpage *) bp->l_ldata;
+	/* is page already full ? */
+	if (tblk->flag & tblkGC_EOP) {
+		/* mark page to free at end of group commit of the page */
+		tblk->flag &= ~tblkGC_EOP;
+		tblk->flag |= tblkGC_FREE;
+		bp->l_ceor = bp->l_eor;
+		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
+		lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC,
+			 cant_write);
+		INCREMENT(lmStat.full_page);
+	}
+	/* page is not yet full */
+	else {
+		bp->l_ceor = tblk->eor;	/* ? bp->l_ceor = bp->l_eor; */
+		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
+		lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write);
+		INCREMENT(lmStat.partial_page);
+	}
+}
+
+/*
+ * NAME:	lmPostGC()
+ *
+ * FUNCTION:	group commit post-processing
+ *	Processes transactions after their commit records have been written
+ *	to disk, redriving log I/O if necessary.
+ *
+ * RETURN:	None
+ *
+ * NOTE:
+ *	This routine is called a interrupt time by lbmIODone
+ */
+static void lmPostGC(struct lbuf * bp)
+{
+	unsigned long flags;
+	struct jfs_log *log = bp->l_log;
+	struct logpage *lp;
+	struct tblock *tblk, *temp;
+
+	//LOGGC_LOCK(log);
+	spin_lock_irqsave(&log->gclock, flags);
+	/*
+	 * current pageout of group commit completed.
+	 *
+	 * remove/wakeup transactions from commit queue who were
+	 * group committed with the current log page
+	 */
+	list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) {
+		if (!(tblk->flag & tblkGC_COMMIT))
+			break;
+		/* if transaction was marked GC_COMMIT then
+		 * it has been shipped in the current pageout
+		 * and made it to disk - it is committed.
+		 */
+
+		if (bp->l_flag & lbmERROR)
+			tblk->flag |= tblkGC_ERROR;
+
+		/* remove it from the commit queue */
+		list_del(&tblk->cqueue);
+		tblk->flag &= ~tblkGC_QUEUE;
+
+		if (tblk == log->flush_tblk) {
+			/* we can stop flushing the log now */
+			clear_bit(log_FLUSH, &log->flag);
+			log->flush_tblk = NULL;
+		}
+
+		jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk,
+			 tblk->flag);
+
+		if (!(tblk->xflag & COMMIT_FORCE))
+			/*
+			 * Hand tblk over to lazy commit thread
+			 */
+			txLazyUnlock(tblk);
+		else {
+			/* state transition: COMMIT -> COMMITTED */
+			tblk->flag |= tblkGC_COMMITTED;
+
+			if (tblk->flag & tblkGC_READY)
+				log->gcrtc--;
+
+			LOGGC_WAKEUP(tblk);
+		}
+
+		/* was page full before pageout ?
+		 * (and this is the last tblk bound with the page)
+		 */
+		if (tblk->flag & tblkGC_FREE)
+			lbmFree(bp);
+		/* did page become full after pageout ?
+		 * (and this is the last tblk bound with the page)
+		 */
+		else if (tblk->flag & tblkGC_EOP) {
+			/* finalize the page */
+			lp = (struct logpage *) bp->l_ldata;
+			bp->l_ceor = bp->l_eor;
+			lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
+			jfs_info("lmPostGC: calling lbmWrite");
+			lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE,
+				 1);
+		}
+
+	}
+
+	/* are there any transactions who have entered lnGroupCommit()
+	 * (whose COMMITs are after that of the last log page written.
+	 * They are waiting for new group commit (above at (SLEEP 1))
+	 * or lazy transactions are on a full (queued) log page,
+	 * select the latest ready transaction as new group leader and
+	 * wake her up to lead her group.
+	 */
+	if ((!list_empty(&log->cqueue)) &&
+	    ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||
+	     test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low))
+		/*
+		 * Call lmGCwrite with new group leader
+		 */
+		lmGCwrite(log, 1);
+
+	/* no transaction are ready yet (transactions are only just
+	 * queued (GC_QUEUE) and not entered for group commit yet).
+	 * the first transaction entering group commit
+	 * will elect herself as new group leader.
+	 */
+	else
+		log->cflag &= ~logGC_PAGEOUT;
+
+	//LOGGC_UNLOCK(log);
+	spin_unlock_irqrestore(&log->gclock, flags);
+	return;
+}
+
+/*
+ * NAME:	lmLogSync()
+ *
+ * FUNCTION:	write log SYNCPT record for specified log
+ *	if new sync address is available
+ *	(normally the case if sync() is executed by back-ground
+ *	process).
+ *	if not, explicitly run jfs_blogsync() to initiate
+ *	getting of new sync address.
+ *	calculate new value of i_nextsync which determines when
+ *	this code is called again.
+ *
+ *	this is called only from lmLog().
+ *
+ * PARAMETER:	ip	- pointer to logs inode.
+ *
+ * RETURN:	0
+ *			
+ * serialization: LOG_LOCK() held on entry/exit
+ */
+static int lmLogSync(struct jfs_log * log, int nosyncwait)
+{
+	int logsize;
+	int written;		/* written since last syncpt */
+	int free;		/* free space left available */
+	int delta;		/* additional delta to write normally */
+	int more;		/* additional write granted */
+	struct lrd lrd;
+	int lsn;
+	struct logsyncblk *lp;
+
+	/*
+	 *      forward syncpt
+	 */
+	/* if last sync is same as last syncpt,
+	 * invoke sync point forward processing to update sync.
+	 */
+
+	if (log->sync == log->syncpt) {
+		LOGSYNC_LOCK(log);
+		/* ToDo: push dirty metapages out to disk */
+//              bmLogSync(log);
+
+		if (list_empty(&log->synclist))
+			log->sync = log->lsn;
+		else {
+			lp = list_entry(log->synclist.next,
+					struct logsyncblk, synclist);
+			log->sync = lp->lsn;
+		}
+		LOGSYNC_UNLOCK(log);
+
+	}
+
+	/* if sync is different from last syncpt,
+	 * write a SYNCPT record with syncpt = sync.
+	 * reset syncpt = sync
+	 */
+	if (log->sync != log->syncpt) {
+		struct jfs_sb_info *sbi;
+
+		/*
+		 * We need to make sure all of the "written" metapages
+		 * actually make it to disk
+		 */
+		list_for_each_entry(sbi, &log->sb_list, log_list) {
+			if (sbi->flag & JFS_NOINTEGRITY)
+				continue;
+			filemap_fdatawrite(sbi->ipbmap->i_mapping);
+			filemap_fdatawrite(sbi->ipimap->i_mapping);
+			filemap_fdatawrite(sbi->sb->s_bdev->bd_inode->i_mapping);
+		}
+		list_for_each_entry(sbi, &log->sb_list, log_list) {
+			if (sbi->flag & JFS_NOINTEGRITY)
+				continue;
+			filemap_fdatawait(sbi->ipbmap->i_mapping);
+			filemap_fdatawait(sbi->ipimap->i_mapping);
+			filemap_fdatawait(sbi->sb->s_bdev->bd_inode->i_mapping);
+		}
+
+		lrd.logtid = 0;
+		lrd.backchain = 0;
+		lrd.type = cpu_to_le16(LOG_SYNCPT);
+		lrd.length = 0;
+		lrd.log.syncpt.sync = cpu_to_le32(log->sync);
+		lsn = lmWriteRecord(log, NULL, &lrd, NULL);
+
+		log->syncpt = log->sync;
+	} else
+		lsn = log->lsn;
+
+	/*
+	 *      setup next syncpt trigger (SWAG)
+	 */
+	logsize = log->logsize;
+
+	logdiff(written, lsn, log);
+	free = logsize - written;
+	delta = LOGSYNC_DELTA(logsize);
+	more = min(free / 2, delta);
+	if (more < 2 * LOGPSIZE) {
+		jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");
+		/*
+		 *      log wrapping
+		 *
+		 * option 1 - panic ? No.!
+		 * option 2 - shutdown file systems
+		 *            associated with log ?
+		 * option 3 - extend log ?
+		 */
+		/*
+		 * option 4 - second chance
+		 *
+		 * mark log wrapped, and continue.
+		 * when all active transactions are completed,
+		 * mark log vaild for recovery.
+		 * if crashed during invalid state, log state
+		 * implies invald log, forcing fsck().
+		 */
+		/* mark log state log wrap in log superblock */
+		/* log->state = LOGWRAP; */
+
+		/* reset sync point computation */
+		log->syncpt = log->sync = lsn;
+		log->nextsync = delta;
+	} else
+		/* next syncpt trigger = written + more */
+		log->nextsync = written + more;
+
+	/* return if lmLogSync() from outside of transaction, e.g., sync() */
+	if (nosyncwait)
+		return lsn;
+
+	/* if number of bytes written from last sync point is more
+	 * than 1/4 of the log size, stop new transactions from
+	 * starting until all current transactions are completed
+	 * by setting syncbarrier flag.
+	 */
+	if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) {
+		set_bit(log_SYNCBARRIER, &log->flag);
+		jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
+			 log->syncpt);
+		/*
+		 * We may have to initiate group commit
+		 */
+		jfs_flush_journal(log, 0);
+	}
+
+	return lsn;
+}
+
+
+/*
+ * NAME:	lmLogOpen()
+ *
+ * FUNCTION:    open the log on first open;
+ *	insert filesystem in the active list of the log.
+ *
+ * PARAMETER:	ipmnt	- file system mount inode
+ *		iplog 	- log inode (out)
+ *
+ * RETURN:
+ *
+ * serialization:
+ */
+int lmLogOpen(struct super_block *sb)
+{
+	int rc;
+	struct block_device *bdev;
+	struct jfs_log *log;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+	if (sbi->flag & JFS_NOINTEGRITY)
+		return open_dummy_log(sb);
+	
+	if (sbi->mntflag & JFS_INLINELOG)
+		return open_inline_log(sb);
+
+	down(&jfs_log_sem);
+	list_for_each_entry(log, &jfs_external_logs, journal_list) {
+		if (log->bdev->bd_dev == sbi->logdev) {
+			if (memcmp(log->uuid, sbi->loguuid,
+				   sizeof(log->uuid))) {
+				jfs_warn("wrong uuid on JFS journal\n");
+				up(&jfs_log_sem);
+				return -EINVAL;
+			}
+			/*
+			 * add file system to log active file system list
+			 */
+			if ((rc = lmLogFileSystem(log, sbi, 1))) {
+				up(&jfs_log_sem);
+				return rc;
+			}
+			goto journal_found;
+		}
+	}
+
+	if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
+		up(&jfs_log_sem);
+		return -ENOMEM;
+	}
+	memset(log, 0, sizeof(struct jfs_log));
+	INIT_LIST_HEAD(&log->sb_list);
+	init_waitqueue_head(&log->syncwait);
+
+	/*
+	 *      external log as separate logical volume
+	 *
+	 * file systems to log may have n-to-1 relationship;
+	 */
+
+	bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
+	if (IS_ERR(bdev)) {
+		rc = -PTR_ERR(bdev);
+		goto free;
+	}
+
+	if ((rc = bd_claim(bdev, log))) {
+		goto close;
+	}
+
+	log->bdev = bdev;
+	memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
+	
+	/*
+	 * initialize log:
+	 */
+	if ((rc = lmLogInit(log)))
+		goto unclaim;
+
+	list_add(&log->journal_list, &jfs_external_logs);
+
+	/*
+	 * add file system to log active file system list
+	 */
+	if ((rc = lmLogFileSystem(log, sbi, 1)))
+		goto shutdown;
+
+journal_found:
+	LOG_LOCK(log);
+	list_add(&sbi->log_list, &log->sb_list);
+	sbi->log = log;
+	LOG_UNLOCK(log);
+
+	up(&jfs_log_sem);
+	return 0;
+
+	/*
+	 *      unwind on error
+	 */
+      shutdown:		/* unwind lbmLogInit() */
+	list_del(&log->journal_list);
+	lbmLogShutdown(log);
+
+      unclaim:
+	bd_release(bdev);
+
+      close:		/* close external log device */
+	blkdev_put(bdev);
+
+      free:		/* free log descriptor */
+	up(&jfs_log_sem);
+	kfree(log);
+
+	jfs_warn("lmLogOpen: exit(%d)", rc);
+	return rc;
+}
+
+static int open_inline_log(struct super_block *sb)
+{
+	struct jfs_log *log;
+	int rc;
+
+	if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL)))
+		return -ENOMEM;
+	memset(log, 0, sizeof(struct jfs_log));
+	INIT_LIST_HEAD(&log->sb_list);
+	init_waitqueue_head(&log->syncwait);
+
+	set_bit(log_INLINELOG, &log->flag);
+	log->bdev = sb->s_bdev;
+	log->base = addressPXD(&JFS_SBI(sb)->logpxd);
+	log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
+	    (L2LOGPSIZE - sb->s_blocksize_bits);
+	log->l2bsize = sb->s_blocksize_bits;
+	ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits);
+
+	/*
+	 * initialize log.
+	 */
+	if ((rc = lmLogInit(log))) {
+		kfree(log);
+		jfs_warn("lmLogOpen: exit(%d)", rc);
+		return rc;
+	}
+
+	list_add(&JFS_SBI(sb)->log_list, &log->sb_list);
+	JFS_SBI(sb)->log = log;
+
+	return rc;
+}
+
+static int open_dummy_log(struct super_block *sb)
+{
+	int rc;
+
+	down(&jfs_log_sem);
+	if (!dummy_log) {
+		dummy_log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL);
+		if (!dummy_log) {
+			up(&jfs_log_sem);
+			return -ENOMEM;
+		}
+		memset(dummy_log, 0, sizeof(struct jfs_log));
+		INIT_LIST_HEAD(&dummy_log->sb_list);
+		init_waitqueue_head(&dummy_log->syncwait);
+		dummy_log->no_integrity = 1;
+		/* Make up some stuff */
+		dummy_log->base = 0;
+		dummy_log->size = 1024;
+		rc = lmLogInit(dummy_log);
+		if (rc) {
+			kfree(dummy_log);
+			dummy_log = NULL;
+			up(&jfs_log_sem);
+			return rc;
+		}
+	}
+
+	LOG_LOCK(dummy_log);
+	list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
+	JFS_SBI(sb)->log = dummy_log;
+	LOG_UNLOCK(dummy_log);
+	up(&jfs_log_sem);
+
+	return 0;
+}
+
+/*
+ * NAME:	lmLogInit()
+ *
+ * FUNCTION:	log initialization at first log open.
+ *
+ *	logredo() (or logformat()) should have been run previously.
+ *	initialize the log from log superblock.
+ *	set the log state in the superblock to LOGMOUNT and
+ *	write SYNCPT log record.
+ *		
+ * PARAMETER:	log	- log structure
+ *
+ * RETURN:	0	- if ok
+ *		-EINVAL	- bad log magic number or superblock dirty
+ *		error returned from logwait()
+ *			
+ * serialization: single first open thread
+ */
+int lmLogInit(struct jfs_log * log)
+{
+	int rc = 0;
+	struct lrd lrd;
+	struct logsuper *logsuper;
+	struct lbuf *bpsuper;
+	struct lbuf *bp;
+	struct logpage *lp;
+	int lsn = 0;
+
+	jfs_info("lmLogInit: log:0x%p", log);
+
+	/* initialize the group commit serialization lock */
+	LOGGC_LOCK_INIT(log);
+
+	/* allocate/initialize the log write serialization lock */
+	LOG_LOCK_INIT(log);
+
+	LOGSYNC_LOCK_INIT(log);
+
+	INIT_LIST_HEAD(&log->synclist);
+
+	INIT_LIST_HEAD(&log->cqueue);
+	log->flush_tblk = NULL;
+
+	log->count = 0;
+
+	/*
+	 * initialize log i/o
+	 */
+	if ((rc = lbmLogInit(log)))
+		return rc;
+
+	if (!test_bit(log_INLINELOG, &log->flag))
+		log->l2bsize = L2LOGPSIZE;
+	
+	/* check for disabled journaling to disk */
+	if (log->no_integrity) {
+		/*
+		 * Journal pages will still be filled.  When the time comes
+		 * to actually do the I/O, the write is not done, and the
+		 * endio routine is called directly.
+		 */
+		bp = lbmAllocate(log , 0);
+		log->bp = bp;
+		bp->l_pn = bp->l_eor = 0;
+	} else {
+		/*
+		 * validate log superblock
+		 */
+		if ((rc = lbmRead(log, 1, &bpsuper)))
+			goto errout10;
+
+		logsuper = (struct logsuper *) bpsuper->l_ldata;
+
+		if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {
+			jfs_warn("*** Log Format Error ! ***");
+			rc = -EINVAL;
+			goto errout20;
+		}
+
+		/* logredo() should have been run successfully. */
+		if (logsuper->state != cpu_to_le32(LOGREDONE)) {
+			jfs_warn("*** Log Is Dirty ! ***");
+			rc = -EINVAL;
+			goto errout20;
+		}
+
+		/* initialize log from log superblock */
+		if (test_bit(log_INLINELOG,&log->flag)) {
+			if (log->size != le32_to_cpu(logsuper->size)) {
+				rc = -EINVAL;
+				goto errout20;
+			}
+			jfs_info("lmLogInit: inline log:0x%p base:0x%Lx "
+				 "size:0x%x", log,
+				 (unsigned long long) log->base, log->size);
+		} else {
+			if (memcmp(logsuper->uuid, log->uuid, 16)) {
+				jfs_warn("wrong uuid on JFS log device");
+				goto errout20;
+			}
+			log->size = le32_to_cpu(logsuper->size);
+			log->l2bsize = le32_to_cpu(logsuper->l2bsize);
+			jfs_info("lmLogInit: external log:0x%p base:0x%Lx "
+				 "size:0x%x", log,
+				 (unsigned long long) log->base, log->size);
+		}
+
+		log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
+		log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
+
+		/*
+		 * initialize for log append write mode
+		 */
+		/* establish current/end-of-log page/buffer */
+		if ((rc = lbmRead(log, log->page, &bp)))
+			goto errout20;
+
+		lp = (struct logpage *) bp->l_ldata;
+
+		jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d",
+			 le32_to_cpu(logsuper->end), log->page, log->eor,
+			 le16_to_cpu(lp->h.eor));
+
+		log->bp = bp;
+		bp->l_pn = log->page;
+		bp->l_eor = log->eor;
+
+		/* if current page is full, move on to next page */
+		if (log->eor >= LOGPSIZE - LOGPTLRSIZE)
+			lmNextPage(log);
+
+		/*
+		 * initialize log syncpoint
+		 */
+		/*
+		 * write the first SYNCPT record with syncpoint = 0
+		 * (i.e., log redo up to HERE !);
+		 * remove current page from lbm write queue at end of pageout
+		 * (to write log superblock update), but do not release to
+		 * freelist;
+		 */
+		lrd.logtid = 0;
+		lrd.backchain = 0;
+		lrd.type = cpu_to_le16(LOG_SYNCPT);
+		lrd.length = 0;
+		lrd.log.syncpt.sync = 0;
+		lsn = lmWriteRecord(log, NULL, &lrd, NULL);
+		bp = log->bp;
+		bp->l_ceor = bp->l_eor;
+		lp = (struct logpage *) bp->l_ldata;
+		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
+		lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);
+		if ((rc = lbmIOWait(bp, 0)))
+			goto errout30;
+
+		/*
+		 * update/write superblock
+		 */
+		logsuper->state = cpu_to_le32(LOGMOUNT);
+		log->serial = le32_to_cpu(logsuper->serial) + 1;
+		logsuper->serial = cpu_to_le32(log->serial);
+		lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
+		if ((rc = lbmIOWait(bpsuper, lbmFREE)))
+			goto errout30;
+	}
+
+	/* initialize logsync parameters */
+	log->logsize = (log->size - 2) << L2LOGPSIZE;
+	log->lsn = lsn;
+	log->syncpt = lsn;
+	log->sync = log->syncpt;
+	log->nextsync = LOGSYNC_DELTA(log->logsize);
+
+	jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x",
+		 log->lsn, log->syncpt, log->sync);
+
+	/*
+	 * initialize for lazy/group commit
+	 */
+	log->clsn = lsn;
+
+	return 0;
+
+	/*
+	 *      unwind on error
+	 */
+      errout30:		/* release log page */
+	log->wqueue = NULL;
+	bp->l_wqnext = NULL;
+	lbmFree(bp);
+
+      errout20:		/* release log superblock */
+	lbmFree(bpsuper);
+
+      errout10:		/* unwind lbmLogInit() */
+	lbmLogShutdown(log);
+
+	jfs_warn("lmLogInit: exit(%d)", rc);
+	return rc;
+}
+
+
+/*
+ * NAME:	lmLogClose()
+ *
+ * FUNCTION:	remove file system <ipmnt> from active list of log <iplog>
+ *		and close it on last close.
+ *
+ * PARAMETER:	sb	- superblock
+ *
+ * RETURN:	errors from subroutines
+ *
+ * serialization:
+ */
+int lmLogClose(struct super_block *sb)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_log *log = sbi->log;
+	struct block_device *bdev;
+	int rc = 0;
+
+	jfs_info("lmLogClose: log:0x%p", log);
+
+	down(&jfs_log_sem);
+	LOG_LOCK(log);
+	list_del(&sbi->log_list);
+	LOG_UNLOCK(log);
+	sbi->log = NULL;
+
+	/*
+	 * We need to make sure all of the "written" metapages
+	 * actually make it to disk
+	 */
+	sync_blockdev(sb->s_bdev);
+
+	if (test_bit(log_INLINELOG, &log->flag)) {
+		/*
+		 *      in-line log in host file system
+		 */
+		rc = lmLogShutdown(log);
+		kfree(log);
+		goto out;
+	}
+
+	if (!log->no_integrity)
+		lmLogFileSystem(log, sbi, 0);
+
+	if (!list_empty(&log->sb_list))
+		goto out;
+
+	/*
+	 * TODO: ensure that the dummy_log is in a state to allow
+	 * lbmLogShutdown to deallocate all the buffers and call
+	 * kfree against dummy_log.  For now, leave dummy_log & its
+	 * buffers in memory, and resuse if another no-integrity mount
+	 * is requested.
+	 */
+	if (log->no_integrity)
+		goto out;
+
+	/*
+	 *      external log as separate logical volume
+	 */
+	list_del(&log->journal_list);
+	bdev = log->bdev;
+	rc = lmLogShutdown(log);
+
+	bd_release(bdev);
+	blkdev_put(bdev);
+
+	kfree(log);
+
+      out:
+	up(&jfs_log_sem);
+	jfs_info("lmLogClose: exit(%d)", rc);
+	return rc;
+}
+
+
+/*
+ * NAME:	jfs_flush_journal()
+ *
+ * FUNCTION:	initiate write of any outstanding transactions to the journal
+ *		and optionally wait until they are all written to disk
+ *
+ *		wait == 0  flush until latest txn is committed, don't wait
+ *		wait == 1  flush until latest txn is committed, wait
+ *		wait > 1   flush until all txn's are complete, wait
+ */
+void jfs_flush_journal(struct jfs_log *log, int wait)
+{
+	int i;
+	struct tblock *target = NULL;
+
+	/* jfs_write_inode may call us during read-only mount */
+	if (!log)
+		return;
+
+	jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait);
+
+	LOGGC_LOCK(log);
+
+	if (!list_empty(&log->cqueue)) {
+		/*
+		 * This ensures that we will keep writing to the journal as long
+		 * as there are unwritten commit records
+		 */
+		target = list_entry(log->cqueue.prev, struct tblock, cqueue);
+
+		if (test_bit(log_FLUSH, &log->flag)) {
+			/*
+			 * We're already flushing.
+			 * if flush_tblk is NULL, we are flushing everything,
+			 * so leave it that way.  Otherwise, update it to the
+			 * latest transaction
+			 */
+			if (log->flush_tblk)
+				log->flush_tblk = target;
+		} else {
+			/* Only flush until latest transaction is committed */
+			log->flush_tblk = target;
+			set_bit(log_FLUSH, &log->flag);
+
+			/*
+			 * Initiate I/O on outstanding transactions
+			 */
+			if (!(log->cflag & logGC_PAGEOUT)) {
+				log->cflag |= logGC_PAGEOUT;
+				lmGCwrite(log, 0);
+			}
+		}
+	}
+	if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) {
+		/* Flush until all activity complete */
+		set_bit(log_FLUSH, &log->flag);
+		log->flush_tblk = NULL;
+	}
+
+	if (wait && target && !(target->flag & tblkGC_COMMITTED)) {
+		DECLARE_WAITQUEUE(__wait, current);
+
+		add_wait_queue(&target->gcwait, &__wait);
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		LOGGC_UNLOCK(log);
+		schedule();
+		current->state = TASK_RUNNING;
+		LOGGC_LOCK(log);
+		remove_wait_queue(&target->gcwait, &__wait);
+	}
+	LOGGC_UNLOCK(log);
+
+	if (wait < 2)
+		return;
+
+	/*
+	 * If there was recent activity, we may need to wait
+	 * for the lazycommit thread to catch up
+	 */
+	if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
+		for (i = 0; i < 800; i++) {	/* Too much? */
+			msleep(250);
+			if (list_empty(&log->cqueue) &&
+			    list_empty(&log->synclist))
+				break;
+		}
+	}
+	assert(list_empty(&log->cqueue));
+	assert(list_empty(&log->synclist));
+	clear_bit(log_FLUSH, &log->flag);
+}
+
+/*
+ * NAME:	lmLogShutdown()
+ *
+ * FUNCTION:	log shutdown at last LogClose().
+ *
+ *		write log syncpt record.
+ *		update super block to set redone flag to 0.
+ *
+ * PARAMETER:	log	- log inode
+ *
+ * RETURN:	0	- success
+ *			
+ * serialization: single last close thread
+ */
+int lmLogShutdown(struct jfs_log * log)
+{
+	int rc;
+	struct lrd lrd;
+	int lsn;
+	struct logsuper *logsuper;
+	struct lbuf *bpsuper;
+	struct lbuf *bp;
+	struct logpage *lp;
+
+	jfs_info("lmLogShutdown: log:0x%p", log);
+
+	jfs_flush_journal(log, 2);
+
+	/*
+	 * write the last SYNCPT record with syncpoint = 0
+	 * (i.e., log redo up to HERE !)
+	 */
+	lrd.logtid = 0;
+	lrd.backchain = 0;
+	lrd.type = cpu_to_le16(LOG_SYNCPT);
+	lrd.length = 0;
+	lrd.log.syncpt.sync = 0;
+	
+	lsn = lmWriteRecord(log, NULL, &lrd, NULL);
+	bp = log->bp;
+	lp = (struct logpage *) bp->l_ldata;
+	lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
+	lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);
+	lbmIOWait(log->bp, lbmFREE);
+
+	/*
+	 * synchronous update log superblock
+	 * mark log state as shutdown cleanly
+	 * (i.e., Log does not need to be replayed).
+	 */
+	if ((rc = lbmRead(log, 1, &bpsuper)))
+		goto out;
+
+	logsuper = (struct logsuper *) bpsuper->l_ldata;
+	logsuper->state = cpu_to_le32(LOGREDONE);
+	logsuper->end = cpu_to_le32(lsn);
+	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
+	rc = lbmIOWait(bpsuper, lbmFREE);
+
+	jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d",
+		 lsn, log->page, log->eor);
+
+      out:    
+	/*
+	 * shutdown per log i/o
+	 */
+	lbmLogShutdown(log);
+
+	if (rc) {
+		jfs_warn("lmLogShutdown: exit(%d)", rc);
+	}
+	return rc;
+}
+
+
+/*
+ * NAME:	lmLogFileSystem()
+ *
+ * FUNCTION:	insert (<activate> = true)/remove (<activate> = false)
+ *	file system into/from log active file system list.
+ *
+ * PARAMETE:	log	- pointer to logs inode.
+ *		fsdev	- kdev_t of filesystem.
+ *		serial  - pointer to returned log serial number
+ *		activate - insert/remove device from active list.
+ *
+ * RETURN:	0	- success
+ *		errors returned by vms_iowait().
+ */
+static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
+			   int activate)
+{
+	int rc = 0;
+	int i;
+	struct logsuper *logsuper;
+	struct lbuf *bpsuper;
+	char *uuid = sbi->uuid;
+
+	/*
+	 * insert/remove file system device to log active file system list.
+	 */
+	if ((rc = lbmRead(log, 1, &bpsuper)))
+		return rc;
+
+	logsuper = (struct logsuper *) bpsuper->l_ldata;
+	if (activate) {
+		for (i = 0; i < MAX_ACTIVE; i++)
+			if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) {
+				memcpy(logsuper->active[i].uuid, uuid, 16);
+				sbi->aggregate = i;
+				break;
+			}
+		if (i == MAX_ACTIVE) {
+			jfs_warn("Too many file systems sharing journal!");
+			lbmFree(bpsuper);
+			return -EMFILE;	/* Is there a better rc? */
+		}
+	} else {
+		for (i = 0; i < MAX_ACTIVE; i++)
+			if (!memcmp(logsuper->active[i].uuid, uuid, 16)) {
+				memcpy(logsuper->active[i].uuid, NULL_UUID, 16);
+				break;
+			}
+		if (i == MAX_ACTIVE) {
+			jfs_warn("Somebody stomped on the journal!");
+			lbmFree(bpsuper);
+			return -EIO;
+		}
+		
+	}
+
+	/*
+	 * synchronous write log superblock:
+	 *
+	 * write sidestream bypassing write queue:
+	 * at file system mount, log super block is updated for
+	 * activation of the file system before any log record
+	 * (MOUNT record) of the file system, and at file system
+	 * unmount, all meta data for the file system has been
+	 * flushed before log super block is updated for deactivation
+	 * of the file system.
+	 */
+	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
+	rc = lbmIOWait(bpsuper, lbmFREE);
+
+	return rc;
+}
+
+/*
+ *		log buffer manager (lbm)
+ *		------------------------
+ *
+ * special purpose buffer manager supporting log i/o requirements.
+ *
+ * per log write queue:
+ * log pageout occurs in serial order by fifo write queue and
+ * restricting to a single i/o in pregress at any one time.
+ * a circular singly-linked list
+ * (log->wrqueue points to the tail, and buffers are linked via
+ * bp->wrqueue field), and
+ * maintains log page in pageout ot waiting for pageout in serial pageout.
+ */
+
+/*
+ *	lbmLogInit()
+ *
+ * initialize per log I/O setup at lmLogInit()
+ */
+static int lbmLogInit(struct jfs_log * log)
+{				/* log inode */
+	int i;
+	struct lbuf *lbuf;
+
+	jfs_info("lbmLogInit: log:0x%p", log);
+
+	/* initialize current buffer cursor */
+	log->bp = NULL;
+
+	/* initialize log device write queue */
+	log->wqueue = NULL;
+
+	/*
+	 * Each log has its own buffer pages allocated to it.  These are
+	 * not managed by the page cache.  This ensures that a transaction
+	 * writing to the log does not block trying to allocate a page from
+	 * the page cache (for the log).  This would be bad, since page
+	 * allocation waits on the kswapd thread that may be committing inodes
+	 * which would cause log activity.  Was that clear?  I'm trying to
+	 * avoid deadlock here.
+	 */
+	init_waitqueue_head(&log->free_wait);
+
+	log->lbuf_free = NULL;
+
+	for (i = 0; i < LOGPAGES; i++) {
+		lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
+		if (lbuf == 0)
+			goto error;
+		lbuf->l_ldata = (char *) get_zeroed_page(GFP_KERNEL);
+		if (lbuf->l_ldata == 0) {
+			kfree(lbuf);
+			goto error;
+		}
+		lbuf->l_log = log;
+		init_waitqueue_head(&lbuf->l_ioevent);
+
+		lbuf->l_freelist = log->lbuf_free;
+		log->lbuf_free = lbuf;
+	}
+
+	return (0);
+
+      error:
+	lbmLogShutdown(log);
+	return -ENOMEM;
+}
+
+
+/*
+ *	lbmLogShutdown()
+ *
+ * finalize per log I/O setup at lmLogShutdown()
+ */
+static void lbmLogShutdown(struct jfs_log * log)
+{
+	struct lbuf *lbuf;
+
+	jfs_info("lbmLogShutdown: log:0x%p", log);
+
+	lbuf = log->lbuf_free;
+	while (lbuf) {
+		struct lbuf *next = lbuf->l_freelist;
+		free_page((unsigned long) lbuf->l_ldata);
+		kfree(lbuf);
+		lbuf = next;
+	}
+
+	log->bp = NULL;
+}
+
+
+/*
+ *	lbmAllocate()
+ *
+ * allocate an empty log buffer
+ */
+static struct lbuf *lbmAllocate(struct jfs_log * log, int pn)
+{
+	struct lbuf *bp;
+	unsigned long flags;
+
+	/*
+	 * recycle from log buffer freelist if any
+	 */
+	LCACHE_LOCK(flags);
+	LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags);
+	log->lbuf_free = bp->l_freelist;
+	LCACHE_UNLOCK(flags);
+
+	bp->l_flag = 0;
+
+	bp->l_wqnext = NULL;
+	bp->l_freelist = NULL;
+
+	bp->l_pn = pn;
+	bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize));
+	bp->l_ceor = 0;
+
+	return bp;
+}
+
+
+/*
+ *	lbmFree()
+ *
+ * release a log buffer to freelist
+ */
+static void lbmFree(struct lbuf * bp)
+{
+	unsigned long flags;
+
+	LCACHE_LOCK(flags);
+
+	lbmfree(bp);
+
+	LCACHE_UNLOCK(flags);
+}
+
+static void lbmfree(struct lbuf * bp)
+{
+	struct jfs_log *log = bp->l_log;
+
+	assert(bp->l_wqnext == NULL);
+
+	/*
+	 * return the buffer to head of freelist
+	 */
+	bp->l_freelist = log->lbuf_free;
+	log->lbuf_free = bp;
+
+	wake_up(&log->free_wait);
+	return;
+}
+
+
+/*
+ * NAME:	lbmRedrive
+ *
+ * FUNCTION:	add a log buffer to the the log redrive list
+ *
+ * PARAMETER:
+ *     bp	- log buffer
+ *
+ * NOTES:
+ *	Takes log_redrive_lock.
+ */
+static inline void lbmRedrive(struct lbuf *bp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&log_redrive_lock, flags);
+	bp->l_redrive_next = log_redrive_list;
+	log_redrive_list = bp;
+	spin_unlock_irqrestore(&log_redrive_lock, flags);
+
+	wake_up(&jfs_IO_thread_wait);
+}
+
+
+/*
+ *	lbmRead()
+ */
+static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
+{
+	struct bio *bio;
+	struct lbuf *bp;
+
+	/*
+	 * allocate a log buffer
+	 */
+	*bpp = bp = lbmAllocate(log, pn);
+	jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn);
+
+	bp->l_flag |= lbmREAD;
+
+	bio = bio_alloc(GFP_NOFS, 1);
+
+	bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+	bio->bi_bdev = log->bdev;
+	bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
+	bio->bi_io_vec[0].bv_len = LOGPSIZE;
+	bio->bi_io_vec[0].bv_offset = 0;
+
+	bio->bi_vcnt = 1;
+	bio->bi_idx = 0;
+	bio->bi_size = LOGPSIZE;
+
+	bio->bi_end_io = lbmIODone;
+	bio->bi_private = bp;
+	submit_bio(READ_SYNC, bio);
+
+	wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
+
+	return 0;
+}
+
+
+/*
+ *	lbmWrite()
+ *
+ * buffer at head of pageout queue stays after completion of
+ * partial-page pageout and redriven by explicit initiation of
+ * pageout by caller until full-page pageout is completed and
+ * released.
+ *
+ * device driver i/o done redrives pageout of new buffer at
+ * head of pageout queue when current buffer at head of pageout
+ * queue is released at the completion of its full-page pageout.
+ *
+ * LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit().
+ * LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone()
+ */
+static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag,
+		     int cant_block)
+{
+	struct lbuf *tail;
+	unsigned long flags;
+
+	jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn);
+
+	/* map the logical block address to physical block address */
+	bp->l_blkno =
+	    log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
+
+	LCACHE_LOCK(flags);		/* disable+lock */
+
+	/*
+	 * initialize buffer for device driver
+	 */
+	bp->l_flag = flag;
+
+	/*
+	 *      insert bp at tail of write queue associated with log
+	 *
+	 * (request is either for bp already/currently at head of queue
+	 * or new bp to be inserted at tail)
+	 */
+	tail = log->wqueue;
+
+	/* is buffer not already on write queue ? */
+	if (bp->l_wqnext == NULL) {
+		/* insert at tail of wqueue */
+		if (tail == NULL) {
+			log->wqueue = bp;
+			bp->l_wqnext = bp;
+		} else {
+			log->wqueue = bp;
+			bp->l_wqnext = tail->l_wqnext;
+			tail->l_wqnext = bp;
+		}
+
+		tail = bp;
+	}
+
+	/* is buffer at head of wqueue and for write ? */
+	if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) {
+		LCACHE_UNLOCK(flags);	/* unlock+enable */
+		return;
+	}
+
+	LCACHE_UNLOCK(flags);	/* unlock+enable */
+
+	if (cant_block)
+		lbmRedrive(bp);
+	else if (flag & lbmSYNC)
+		lbmStartIO(bp);
+	else {
+		LOGGC_UNLOCK(log);
+		lbmStartIO(bp);
+		LOGGC_LOCK(log);
+	}
+}
+
+
+/*
+ *	lbmDirectWrite()
+ *
+ * initiate pageout bypassing write queue for sidestream
+ * (e.g., log superblock) write;
+ */
+static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
+{
+	jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x",
+		 bp, flag, bp->l_pn);
+
+	/*
+	 * initialize buffer for device driver
+	 */
+	bp->l_flag = flag | lbmDIRECT;
+
+	/* map the logical block address to physical block address */
+	bp->l_blkno =
+	    log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
+
+	/*
+	 *      initiate pageout of the page
+	 */
+	lbmStartIO(bp);
+}
+
+
+/*
+ * NAME:	lbmStartIO()
+ *
+ * FUNCTION:	Interface to DD strategy routine
+ *
+ * RETURN:      none
+ *
+ * serialization: LCACHE_LOCK() is NOT held during log i/o;
+ */
+static void lbmStartIO(struct lbuf * bp)
+{
+	struct bio *bio;
+	struct jfs_log *log = bp->l_log;
+
+	jfs_info("lbmStartIO\n");
+
+	bio = bio_alloc(GFP_NOFS, 1);
+	bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+	bio->bi_bdev = log->bdev;
+	bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
+	bio->bi_io_vec[0].bv_len = LOGPSIZE;
+	bio->bi_io_vec[0].bv_offset = 0;
+
+	bio->bi_vcnt = 1;
+	bio->bi_idx = 0;
+	bio->bi_size = LOGPSIZE;
+
+	bio->bi_end_io = lbmIODone;
+	bio->bi_private = bp;
+
+	/* check if journaling to disk has been disabled */
+	if (!log->no_integrity) {
+		submit_bio(WRITE_SYNC, bio);
+		INCREMENT(lmStat.submitted);
+	}
+	else {
+		bio->bi_size = 0;
+		lbmIODone(bio, 0, 0); /* 2nd argument appears to not be used => 0
+				       *  3rd argument appears to not be used => 0
+				       */
+	}
+}
+
+
+/*
+ *	lbmIOWait()
+ */
+static int lbmIOWait(struct lbuf * bp, int flag)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
+
+	LCACHE_LOCK(flags);		/* disable+lock */
+
+	LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags);
+
+	rc = (bp->l_flag & lbmERROR) ? -EIO : 0;
+
+	if (flag & lbmFREE)
+		lbmfree(bp);
+
+	LCACHE_UNLOCK(flags);	/* unlock+enable */
+
+	jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
+	return rc;
+}
+
+/*
+ *	lbmIODone()
+ *
+ * executed at INTIODONE level
+ */
+static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
+{
+	struct lbuf *bp = bio->bi_private;
+	struct lbuf *nextbp, *tail;
+	struct jfs_log *log;
+	unsigned long flags;
+
+	if (bio->bi_size)
+		return 1;
+
+	/*
+	 * get back jfs buffer bound to the i/o buffer
+	 */
+	jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag);
+
+	LCACHE_LOCK(flags);		/* disable+lock */
+
+	bp->l_flag |= lbmDONE;
+
+	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+		bp->l_flag |= lbmERROR;
+
+		jfs_err("lbmIODone: I/O error in JFS log");
+	}
+
+	bio_put(bio);
+
+	/*
+	 *      pagein completion
+	 */
+	if (bp->l_flag & lbmREAD) {
+		bp->l_flag &= ~lbmREAD;
+
+		LCACHE_UNLOCK(flags);	/* unlock+enable */
+
+		/* wakeup I/O initiator */
+		LCACHE_WAKEUP(&bp->l_ioevent);
+
+		return 0;
+	}
+
+	/*
+	 *      pageout completion
+	 *
+	 * the bp at the head of write queue has completed pageout.
+	 *
+	 * if single-commit/full-page pageout, remove the current buffer
+	 * from head of pageout queue, and redrive pageout with
+	 * the new buffer at head of pageout queue;
+	 * otherwise, the partial-page pageout buffer stays at
+	 * the head of pageout queue to be redriven for pageout
+	 * by lmGroupCommit() until full-page pageout is completed.
+	 */
+	bp->l_flag &= ~lbmWRITE;
+	INCREMENT(lmStat.pagedone);
+
+	/* update committed lsn */
+	log = bp->l_log;
+	log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor;
+
+	if (bp->l_flag & lbmDIRECT) {
+		LCACHE_WAKEUP(&bp->l_ioevent);
+		LCACHE_UNLOCK(flags);
+		return 0;
+	}
+
+	tail = log->wqueue;
+
+	/* single element queue */
+	if (bp == tail) {
+		/* remove head buffer of full-page pageout
+		 * from log device write queue
+		 */
+		if (bp->l_flag & lbmRELEASE) {
+			log->wqueue = NULL;
+			bp->l_wqnext = NULL;
+		}
+	}
+	/* multi element queue */
+	else {
+		/* remove head buffer of full-page pageout
+		 * from log device write queue
+		 */
+		if (bp->l_flag & lbmRELEASE) {
+			nextbp = tail->l_wqnext = bp->l_wqnext;
+			bp->l_wqnext = NULL;
+
+			/*
+			 * redrive pageout of next page at head of write queue:
+			 * redrive next page without any bound tblk
+			 * (i.e., page w/o any COMMIT records), or
+			 * first page of new group commit which has been
+			 * queued after current page (subsequent pageout
+			 * is performed synchronously, except page without
+			 * any COMMITs) by lmGroupCommit() as indicated
+			 * by lbmWRITE flag;
+			 */
+			if (nextbp->l_flag & lbmWRITE) {
+				/*
+				 * We can't do the I/O at interrupt time.
+				 * The jfsIO thread can do it
+				 */
+				lbmRedrive(nextbp);
+			}
+		}
+	}
+
+	/*
+	 *      synchronous pageout:
+	 *
+	 * buffer has not necessarily been removed from write queue
+	 * (e.g., synchronous write of partial-page with COMMIT):
+	 * leave buffer for i/o initiator to dispose
+	 */
+	if (bp->l_flag & lbmSYNC) {
+		LCACHE_UNLOCK(flags);	/* unlock+enable */
+
+		/* wakeup I/O initiator */
+		LCACHE_WAKEUP(&bp->l_ioevent);
+	}
+
+	/*
+	 *      Group Commit pageout:
+	 */
+	else if (bp->l_flag & lbmGC) {
+		LCACHE_UNLOCK(flags);
+		lmPostGC(bp);
+	}
+
+	/*
+	 *      asynchronous pageout:
+	 *
+	 * buffer must have been removed from write queue:
+	 * insert buffer at head of freelist where it can be recycled
+	 */
+	else {
+		assert(bp->l_flag & lbmRELEASE);
+		assert(bp->l_flag & lbmFREE);
+		lbmfree(bp);
+
+		LCACHE_UNLOCK(flags);	/* unlock+enable */
+	}
+
+	return 0;
+}
+
+int jfsIOWait(void *arg)
+{
+	struct lbuf *bp;
+
+	daemonize("jfsIO");
+
+	complete(&jfsIOwait);
+
+	do {
+		DECLARE_WAITQUEUE(wq, current);
+
+		spin_lock_irq(&log_redrive_lock);
+		while ((bp = log_redrive_list) != 0) {
+			log_redrive_list = bp->l_redrive_next;
+			bp->l_redrive_next = NULL;
+			spin_unlock_irq(&log_redrive_lock);
+			lbmStartIO(bp);
+			spin_lock_irq(&log_redrive_lock);
+		}
+		if (current->flags & PF_FREEZE) {
+			spin_unlock_irq(&log_redrive_lock);
+			refrigerator(PF_FREEZE);
+		} else {
+			add_wait_queue(&jfs_IO_thread_wait, &wq);
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(&log_redrive_lock);
+			schedule();
+			current->state = TASK_RUNNING;
+			remove_wait_queue(&jfs_IO_thread_wait, &wq);
+		}
+	} while (!jfs_stop_threads);
+
+	jfs_info("jfsIOWait being killed!");
+	complete_and_exit(&jfsIOwait, 0);
+}
+
+/*
+ * NAME:	lmLogFormat()/jfs_logform()
+ *
+ * FUNCTION:	format file system log
+ *
+ * PARAMETERS:
+ *      log	- volume log
+ *	logAddress - start address of log space in FS block
+ *	logSize	- length of log space in FS block;
+ *
+ * RETURN:	0	- success
+ *		-EIO	- i/o error
+ *
+ * XXX: We're synchronously writing one page at a time.  This needs to
+ *	be improved by writing multiple pages at once.
+ */
+int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
+{
+	int rc = -EIO;
+	struct jfs_sb_info *sbi;
+	struct logsuper *logsuper;
+	struct logpage *lp;
+	int lspn;		/* log sequence page number */
+	struct lrd *lrd_ptr;
+	int npages = 0;
+	struct lbuf *bp;
+
+	jfs_info("lmLogFormat: logAddress:%Ld logSize:%d",
+		 (long long)logAddress, logSize);
+
+	sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list);
+
+	/* allocate a log buffer */
+	bp = lbmAllocate(log, 1);
+
+	npages = logSize >> sbi->l2nbperpage;
+
+	/*
+	 *      log space:
+	 *
+	 * page 0 - reserved;
+	 * page 1 - log superblock;
+	 * page 2 - log data page: A SYNC log record is written
+	 *          into this page at logform time;
+	 * pages 3-N - log data page: set to empty log data pages;
+	 */
+	/*
+	 *      init log superblock: log page 1
+	 */
+	logsuper = (struct logsuper *) bp->l_ldata;
+
+	logsuper->magic = cpu_to_le32(LOGMAGIC);
+	logsuper->version = cpu_to_le32(LOGVERSION);
+	logsuper->state = cpu_to_le32(LOGREDONE);
+	logsuper->flag = cpu_to_le32(sbi->mntflag);	/* ? */
+	logsuper->size = cpu_to_le32(npages);
+	logsuper->bsize = cpu_to_le32(sbi->bsize);
+	logsuper->l2bsize = cpu_to_le32(sbi->l2bsize);
+	logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE);
+
+	bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+	bp->l_blkno = logAddress + sbi->nbperpage;
+	lbmStartIO(bp);
+	if ((rc = lbmIOWait(bp, 0)))
+		goto exit;
+
+	/*
+	 *      init pages 2 to npages-1 as log data pages:
+	 *
+	 * log page sequence number (lpsn) initialization:
+	 *
+	 * pn:   0     1     2     3                 n-1
+	 *       +-----+-----+=====+=====+===.....===+=====+
+	 * lspn:             N-1   0     1           N-2
+	 *                   <--- N page circular file ---->
+	 *
+	 * the N (= npages-2) data pages of the log is maintained as
+	 * a circular file for the log records;
+	 * lpsn grows by 1 monotonically as each log page is written
+	 * to the circular file of the log;
+	 * and setLogpage() will not reset the page number even if
+	 * the eor is equal to LOGPHDRSIZE. In order for binary search
+	 * still work in find log end process, we have to simulate the
+	 * log wrap situation at the log format time.
+	 * The 1st log page written will have the highest lpsn. Then
+	 * the succeeding log pages will have ascending order of
+	 * the lspn starting from 0, ... (N-2)
+	 */
+	lp = (struct logpage *) bp->l_ldata;
+	/*
+	 * initialize 1st log page to be written: lpsn = N - 1,
+	 * write a SYNCPT log record is written to this page
+	 */
+	lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
+	lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE);
+
+	lrd_ptr = (struct lrd *) &lp->data;
+	lrd_ptr->logtid = 0;
+	lrd_ptr->backchain = 0;
+	lrd_ptr->type = cpu_to_le16(LOG_SYNCPT);
+	lrd_ptr->length = 0;
+	lrd_ptr->log.syncpt.sync = 0;
+
+	bp->l_blkno += sbi->nbperpage;
+	bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+	lbmStartIO(bp);
+	if ((rc = lbmIOWait(bp, 0)))
+		goto exit;
+
+	/*
+	 *      initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
+	 */
+	for (lspn = 0; lspn < npages - 3; lspn++) {
+		lp->h.page = lp->t.page = cpu_to_le32(lspn);
+		lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
+
+		bp->l_blkno += sbi->nbperpage;
+		bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+		lbmStartIO(bp);
+		if ((rc = lbmIOWait(bp, 0)))
+			goto exit;
+	}
+
+	rc = 0;
+exit:
+	/*
+	 *      finalize log
+	 */
+	/* release the buffer */
+	lbmFree(bp);
+
+	return rc;
+}
+
+#ifdef CONFIG_JFS_STATISTICS
+int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length,
+		      int *eof, void *data)
+{
+	int len = 0;
+	off_t begin;
+
+	len += sprintf(buffer,
+		       "JFS Logmgr stats\n"
+		       "================\n"
+		       "commits = %d\n"
+		       "writes submitted = %d\n"
+		       "writes completed = %d\n"
+		       "full pages submitted = %d\n"
+		       "partial pages submitted = %d\n",
+		       lmStat.commit,
+		       lmStat.submitted,
+		       lmStat.pagedone,
+		       lmStat.full_page,
+		       lmStat.partial_page);
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if (len > length)
+		len = length;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+#endif /* CONFIG_JFS_STATISTICS */
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
new file mode 100644
index 0000000..141ad74
--- /dev/null
+++ b/fs/jfs/jfs_logmgr.h
@@ -0,0 +1,510 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_LOGMGR
+#define _H_JFS_LOGMGR
+
+#include "jfs_filsys.h"
+#include "jfs_lock.h"
+
+/*
+ *	log manager configuration parameters
+ */
+
+/* log page size */
+#define	LOGPSIZE	4096
+#define	L2LOGPSIZE	12
+
+#define LOGPAGES	16	/* Log pages per mounted file system */
+
+/*
+ *	log logical volume
+ *
+ * a log is used to make the commit operation on journalled 
+ * files within the same logical volume group atomic.
+ * a log is implemented with a logical volume.
+ * there is one log per logical volume group. 
+ *
+ * block 0 of the log logical volume is not used (ipl etc).
+ * block 1 contains a log "superblock" and is used by logFormat(),
+ * lmLogInit(), lmLogShutdown(), and logRedo() to record status 
+ * of the log but is not otherwise used during normal processing. 
+ * blocks 2 - (N-1) are used to contain log records.
+ *
+ * when a volume group is varied-on-line, logRedo() must have 
+ * been executed before the file systems (logical volumes) in 
+ * the volume group can be mounted.
+ */
+/*
+ *	log superblock (block 1 of logical volume)
+ */
+#define	LOGSUPER_B	1
+#define	LOGSTART_B	2
+
+#define	LOGMAGIC	0x87654321
+#define	LOGVERSION	1
+
+#define MAX_ACTIVE	128	/* Max active file systems sharing log */
+
+struct logsuper {
+	__le32 magic;		/* 4: log lv identifier */
+	__le32 version;		/* 4: version number */
+	__le32 serial;		/* 4: log open/mount counter */
+	__le32 size;		/* 4: size in number of LOGPSIZE blocks */
+	__le32 bsize;		/* 4: logical block size in byte */
+	__le32 l2bsize;		/* 4: log2 of bsize */
+
+	__le32 flag;		/* 4: option */
+	__le32 state;		/* 4: state - see below */
+
+	__le32 end;		/* 4: addr of last log record set by logredo */
+	char uuid[16];		/* 16: 128-bit journal uuid */
+	char label[16];		/* 16: journal label */
+	struct {
+		char uuid[16];
+	} active[MAX_ACTIVE];	/* 2048: active file systems list */
+};
+
+#define NULL_UUID "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+
+/* log flag: commit option (see jfs_filsys.h) */
+
+/* log state */
+#define	LOGMOUNT	0	/* log mounted by lmLogInit() */
+#define LOGREDONE	1	/* log shutdown by lmLogShutdown().
+				 * log redo completed by logredo().
+				 */
+#define LOGWRAP		2	/* log wrapped */
+#define LOGREADERR	3	/* log read error detected in logredo() */
+
+
+/*
+ *	log logical page
+ *
+ * (this comment should be rewritten !)
+ * the header and trailer structures (h,t) will normally have 
+ * the same page and eor value.
+ * An exception to this occurs when a complete page write is not 
+ * accomplished on a power failure. Since the hardware may "split write"
+ * sectors in the page, any out of order sequence may occur during powerfail 
+ * and needs to be recognized during log replay.  The xor value is
+ * an "exclusive or" of all log words in the page up to eor.  This
+ * 32 bit eor is stored with the top 16 bits in the header and the
+ * bottom 16 bits in the trailer.  logredo can easily recognize pages
+ * that were not completed by reconstructing this eor and checking 
+ * the log page.
+ *
+ * Previous versions of the operating system did not allow split 
+ * writes and detected partially written records in logredo by 
+ * ordering the updates to the header, trailer, and the move of data 
+ * into the logdata area.  The order: (1) data is moved (2) header 
+ * is updated (3) trailer is updated.  In logredo, when the header 
+ * differed from the trailer, the header and trailer were reconciled 
+ * as follows: if h.page != t.page they were set to the smaller of 
+ * the two and h.eor and t.eor set to 8 (i.e. empty page). if (only) 
+ * h.eor != t.eor they were set to the smaller of their two values.
+ */
+struct logpage {
+	struct {		/* header */
+		__le32 page;	/* 4: log sequence page number */
+		__le16 rsrvd;	/* 2: */
+		__le16 eor;	/* 2: end-of-log offset of lasrt record write */
+	} h;
+
+	__le32 data[LOGPSIZE / 4 - 4];	/* log record area */
+
+	struct {		/* trailer */
+		__le32 page;	/* 4: normally the same as h.page */
+		__le16 rsrvd;	/* 2: */
+		__le16 eor;	/* 2: normally the same as h.eor */
+	} t;
+};
+
+#define LOGPHDRSIZE	8	/* log page header size */
+#define LOGPTLRSIZE	8	/* log page trailer size */
+
+
+/*
+ *	log record
+ *
+ * (this comment should be rewritten !)
+ * jfs uses only "after" log records (only a single writer is allowed
+ * in a  page, pages are written to temporary paging space if
+ * if they must be written to disk before commit, and i/o is
+ * scheduled for modified pages to their home location after
+ * the log records containing the after values and the commit 
+ * record is written to the log on disk, undo discards the copy
+ * in main-memory.)
+ *
+ * a log record consists of a data area of variable length followed by 
+ * a descriptor of fixed size LOGRDSIZE bytes.
+ * the  data area is rounded up to an integral number of 4-bytes and 
+ * must be no longer than LOGPSIZE.
+ * the descriptor is of size of multiple of 4-bytes and aligned on a 
+ * 4-byte boundary. 
+ * records are packed one after the other in the data area of log pages.
+ * (sometimes a DUMMY record is inserted so that at least one record ends 
+ * on every page or the longest record is placed on at most two pages).
+ * the field eor in page header/trailer points to the byte following 
+ * the last record on a page.
+ */
+
+/* log record types */
+#define LOG_COMMIT		0x8000
+#define LOG_SYNCPT		0x4000
+#define LOG_MOUNT		0x2000
+#define LOG_REDOPAGE		0x0800
+#define LOG_NOREDOPAGE		0x0080
+#define LOG_NOREDOINOEXT	0x0040
+#define LOG_UPDATEMAP		0x0008
+#define LOG_NOREDOFILE		0x0001
+
+/* REDOPAGE/NOREDOPAGE log record data type */
+#define	LOG_INODE		0x0001
+#define	LOG_XTREE		0x0002
+#define	LOG_DTREE		0x0004
+#define	LOG_BTROOT		0x0010
+#define	LOG_EA			0x0020
+#define	LOG_ACL			0x0040
+#define	LOG_DATA		0x0080
+#define	LOG_NEW			0x0100
+#define	LOG_EXTEND		0x0200
+#define LOG_RELOCATE		0x0400
+#define LOG_DIR_XTREE		0x0800	/* Xtree is in directory inode */
+
+/* UPDATEMAP log record descriptor type */
+#define	LOG_ALLOCXADLIST	0x0080
+#define	LOG_ALLOCPXDLIST	0x0040
+#define	LOG_ALLOCXAD		0x0020
+#define	LOG_ALLOCPXD		0x0010
+#define	LOG_FREEXADLIST		0x0008
+#define	LOG_FREEPXDLIST		0x0004
+#define	LOG_FREEXAD		0x0002
+#define	LOG_FREEPXD		0x0001
+
+
+struct lrd {
+	/*
+	 * type independent area
+	 */
+	__le32 logtid;		/* 4: log transaction identifier */
+	__le32 backchain;	/* 4: ptr to prev record of same transaction */
+	__le16 type;		/* 2: record type */
+	__le16 length;		/* 2: length of data in record (in byte) */
+	__le32 aggregate;	/* 4: file system lv/aggregate */
+	/* (16) */
+
+	/*
+	 * type dependent area (20)
+	 */
+	union {
+
+		/*
+		 *      COMMIT: commit
+		 *
+		 * transaction commit: no type-dependent information;
+		 */
+
+		/*
+		 *      REDOPAGE: after-image
+		 *
+		 * apply after-image;
+		 *
+		 * N.B. REDOPAGE, NOREDOPAGE, and UPDATEMAP must be same format;
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 inode;	/* 4: inode number */
+			__le16 type;	/* 2: REDOPAGE record type */
+			__le16 l2linesize;	/* 2: log2 of line size */
+			pxd_t pxd;	/* 8: on-disk page pxd */
+		} redopage;	/* (20) */
+
+		/*
+		 *      NOREDOPAGE: the page is freed
+		 *
+		 * do not apply after-image records which precede this record
+		 * in the log with the same page block number to this page.
+		 *
+		 * N.B. REDOPAGE, NOREDOPAGE, and UPDATEMAP must be same format;
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 inode;	/* 4: inode number */
+			__le16 type;	/* 2: NOREDOPAGE record type */
+			__le16 rsrvd;	/* 2: reserved */
+			pxd_t pxd;	/* 8: on-disk page pxd */
+		} noredopage;	/* (20) */
+
+		/*
+		 *      UPDATEMAP: update block allocation map
+		 *
+		 * either in-line PXD,
+		 * or     out-of-line  XADLIST;
+		 *
+		 * N.B. REDOPAGE, NOREDOPAGE, and UPDATEMAP must be same format;
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 inode;	/* 4: inode number */
+			__le16 type;	/* 2: UPDATEMAP record type */
+			__le16 nxd;	/* 2: number of extents */
+			pxd_t pxd;	/* 8: pxd */
+		} updatemap;	/* (20) */
+
+		/*
+		 *      NOREDOINOEXT: the inode extent is freed
+		 *
+		 * do not apply after-image records which precede this 
+		 * record in the log with the any of the 4 page block 
+		 * numbers in this inode extent. 
+		 * 
+		 * NOTE: The fileset and pxd fields MUST remain in 
+		 *       the same fields in the REDOPAGE record format.
+		 *
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 iagnum;	/* 4: IAG number     */
+			__le32 inoext_idx;	/* 4: inode extent index */
+			pxd_t pxd;	/* 8: on-disk page pxd */
+		} noredoinoext;	/* (20) */
+
+		/*
+		 *      SYNCPT: log sync point
+		 *
+		 * replay log upto syncpt address specified;
+		 */
+		struct {
+			__le32 sync;	/* 4: syncpt address (0 = here) */
+		} syncpt;
+
+		/*
+		 *      MOUNT: file system mount
+		 *
+		 * file system mount: no type-dependent information;
+		 */
+
+		/*
+		 *      ? FREEXTENT: free specified extent(s)
+		 *
+		 * free specified extent(s) from block allocation map
+		 * N.B.: nextents should be length of data/sizeof(xad_t)
+		 */
+		struct {
+			__le32 type;	/* 4: FREEXTENT record type */
+			__le32 nextent;	/* 4: number of extents */
+
+			/* data: PXD or XAD list */
+		} freextent;
+
+		/*
+		 *      ? NOREDOFILE: this file is freed
+		 *
+		 * do not apply records which precede this record in the log
+		 * with the same inode number.
+		 *
+		 * NOREDILE must be the first to be written at commit
+		 * (last to be read in logredo()) - it prevents
+		 * replay of preceding updates of all preceding generations
+		 * of the inumber esp. the on-disk inode itself, 
+		 * but does NOT prevent
+		 * replay of the 
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 inode;	/* 4: inode number */
+		} noredofile;
+
+		/*
+		 *      ? NEWPAGE: 
+		 *
+		 * metadata type dependent
+		 */
+		struct {
+			__le32 fileset;	/* 4: fileset number */
+			__le32 inode;	/* 4: inode number */
+			__le32 type;	/* 4: NEWPAGE record type */
+			pxd_t pxd;	/* 8: on-disk page pxd */
+		} newpage;
+
+		/*
+		 *      ? DUMMY: filler
+		 *
+		 * no type-dependent information
+		 */
+	} log;
+};					/* (36) */
+
+#define	LOGRDSIZE	(sizeof(struct lrd))
+
+/*
+ *	line vector descriptor
+ */
+struct lvd {
+	__le16 offset;
+	__le16 length;
+};
+
+
+/*
+ *	log logical volume
+ */
+struct jfs_log {
+
+	struct list_head sb_list;/*  This is used to sync metadata
+				 *    before writing syncpt.
+				 */
+	struct list_head journal_list; /* Global list */
+	struct block_device *bdev; /* 4: log lv pointer */
+	int serial;		/* 4: log mount serial number */
+
+	s64 base;		/* @8: log extent address (inline log ) */
+	int size;		/* 4: log size in log page (in page) */
+	int l2bsize;		/* 4: log2 of bsize */
+
+	long flag;		/* 4: flag */
+
+	struct lbuf *lbuf_free;	/* 4: free lbufs */
+	wait_queue_head_t free_wait;	/* 4: */
+
+	/* log write */
+	int logtid;		/* 4: log tid */
+	int page;		/* 4: page number of eol page */
+	int eor;		/* 4: eor of last record in eol page */
+	struct lbuf *bp;	/* 4: current log page buffer */
+
+	struct semaphore loglock;	/* 4: log write serialization lock */
+
+	/* syncpt */
+	int nextsync;		/* 4: bytes to write before next syncpt */
+	int active;		/* 4: */
+	wait_queue_head_t syncwait;	/* 4: */
+
+	/* commit */
+	uint cflag;		/* 4: */
+	struct list_head cqueue; /* FIFO commit queue */
+	struct tblock *flush_tblk; /* tblk we're waiting on for flush */
+	int gcrtc;		/* 4: GC_READY transaction count */
+	struct tblock *gclrt;	/* 4: latest GC_READY transaction */
+	spinlock_t gclock;	/* 4: group commit lock */
+	int logsize;		/* 4: log data area size in byte */
+	int lsn;		/* 4: end-of-log */
+	int clsn;		/* 4: clsn */
+	int syncpt;		/* 4: addr of last syncpt record */
+	int sync;		/* 4: addr from last logsync() */
+	struct list_head synclist;	/* 8: logsynclist anchor */
+	spinlock_t synclock;	/* 4: synclist lock */
+	struct lbuf *wqueue;	/* 4: log pageout queue */
+	int count;		/* 4: count */
+	char uuid[16];		/* 16: 128-bit uuid of log device */
+
+	int no_integrity;	/* 3: flag to disable journaling to disk */
+};
+
+/*
+ * Log flag
+ */
+#define log_INLINELOG	1
+#define log_SYNCBARRIER	2
+#define log_QUIESCE	3
+#define log_FLUSH	4
+
+/*
+ * group commit flag
+ */
+/* jfs_log */
+#define logGC_PAGEOUT	0x00000001
+
+/* tblock/lbuf */
+#define tblkGC_QUEUE		0x0001
+#define tblkGC_READY		0x0002
+#define tblkGC_COMMIT		0x0004
+#define tblkGC_COMMITTED	0x0008
+#define tblkGC_EOP		0x0010
+#define tblkGC_FREE		0x0020
+#define tblkGC_LEADER		0x0040
+#define tblkGC_ERROR		0x0080
+#define tblkGC_LAZY		0x0100	// D230860
+#define tblkGC_UNLOCKED		0x0200	// D230860
+
+/*
+ *		log cache buffer header
+ */
+struct lbuf {
+	struct jfs_log *l_log;	/* 4: log associated with buffer */
+
+	/*
+	 * data buffer base area
+	 */
+	uint l_flag;		/* 4: pageout control flags */
+
+	struct lbuf *l_wqnext;	/* 4: write queue link */
+	struct lbuf *l_freelist;	/* 4: freelistlink */
+
+	int l_pn;		/* 4: log page number */
+	int l_eor;		/* 4: log record eor */
+	int l_ceor;		/* 4: committed log record eor */
+
+	s64 l_blkno;		/* 8: log page block number */
+	caddr_t l_ldata;	/* 4: data page */
+
+	wait_queue_head_t l_ioevent;	/* 4: i/o done event */
+	struct page *l_page;	/* The page itself */
+};
+
+/* Reuse l_freelist for redrive list */
+#define l_redrive_next l_freelist
+
+/*
+ *	logsynclist block
+ *
+ * common logsyncblk prefix for jbuf_t and tblock
+ */
+struct logsyncblk {
+	u16 xflag;		/* flags */
+	u16 flag;		/* only meaninful in tblock */
+	lid_t lid;		/* lock id */
+	s32 lsn;		/* log sequence number */
+	struct list_head synclist;	/* log sync list link */
+};
+
+/*
+ *	logsynclist serialization (per log)
+ */
+
+#define LOGSYNC_LOCK_INIT(log) spin_lock_init(&(log)->synclock)
+#define LOGSYNC_LOCK(log) spin_lock(&(log)->synclock)
+#define LOGSYNC_UNLOCK(log) spin_unlock(&(log)->synclock)
+
+/* compute the difference in bytes of lsn from sync point */
+#define logdiff(diff, lsn, log)\
+{\
+	diff = (lsn) - (log)->syncpt;\
+	if (diff < 0)\
+		diff += (log)->logsize;\
+}
+
+extern int lmLogOpen(struct super_block *sb);
+extern int lmLogClose(struct super_block *sb);
+extern int lmLogShutdown(struct jfs_log * log);
+extern int lmLogInit(struct jfs_log * log);
+extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize);
+extern void jfs_flush_journal(struct jfs_log * log, int wait);
+
+#endif				/* _H_JFS_LOGMGR */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
new file mode 100644
index 0000000..4c0a3ac
--- /dev/null
+++ b/fs/jfs/jfs_metapage.c
@@ -0,0 +1,580 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2003
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/buffer_head.h>
+#include <linux/mempool.h>
+#include <linux/delay.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_txnmgr.h"
+#include "jfs_debug.h"
+
+static DEFINE_SPINLOCK(meta_lock);
+
+#ifdef CONFIG_JFS_STATISTICS
+static struct {
+	uint	pagealloc;	/* # of page allocations */
+	uint	pagefree;	/* # of page frees */
+	uint	lockwait;	/* # of sleeping lock_metapage() calls */
+} mpStat;
+#endif
+
+
+#define HASH_BITS 10		/* This makes hash_table 1 4K page */
+#define HASH_SIZE (1 << HASH_BITS)
+static struct metapage **hash_table = NULL;
+static unsigned long hash_order;
+
+
+static inline int metapage_locked(struct metapage *mp)
+{
+	return test_bit(META_locked, &mp->flag);
+}
+
+static inline int trylock_metapage(struct metapage *mp)
+{
+	return test_and_set_bit(META_locked, &mp->flag);
+}
+
+static inline void unlock_metapage(struct metapage *mp)
+{
+	clear_bit(META_locked, &mp->flag);
+	wake_up(&mp->wait);
+}
+
+static void __lock_metapage(struct metapage *mp)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	INCREMENT(mpStat.lockwait);
+
+	add_wait_queue_exclusive(&mp->wait, &wait);
+	do {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (metapage_locked(mp)) {
+			spin_unlock(&meta_lock);
+			schedule();
+			spin_lock(&meta_lock);
+		}
+	} while (trylock_metapage(mp));
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&mp->wait, &wait);
+}
+
+/* needs meta_lock */
+static inline void lock_metapage(struct metapage *mp)
+{
+	if (trylock_metapage(mp))
+		__lock_metapage(mp);
+}
+
+#define METAPOOL_MIN_PAGES 32
+static kmem_cache_t *metapage_cache;
+static mempool_t *metapage_mempool;
+
+static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct metapage *mp = (struct metapage *)foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		mp->lid = 0;
+		mp->lsn = 0;
+		mp->flag = 0;
+		mp->data = NULL;
+		mp->clsn = 0;
+		mp->log = NULL;
+		set_bit(META_free, &mp->flag);
+		init_waitqueue_head(&mp->wait);
+	}
+}
+
+static inline struct metapage *alloc_metapage(int gfp_mask)
+{
+	return mempool_alloc(metapage_mempool, gfp_mask);
+}
+
+static inline void free_metapage(struct metapage *mp)
+{
+	mp->flag = 0;
+	set_bit(META_free, &mp->flag);
+
+	mempool_free(mp, metapage_mempool);
+}
+
+int __init metapage_init(void)
+{
+	/*
+	 * Allocate the metapage structures
+	 */
+	metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
+					   0, 0, init_once, NULL);
+	if (metapage_cache == NULL)
+		return -ENOMEM;
+
+	metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab,
+					  mempool_free_slab, metapage_cache);
+
+	if (metapage_mempool == NULL) {
+		kmem_cache_destroy(metapage_cache);
+		return -ENOMEM;
+	}
+	/*
+	 * Now the hash list
+	 */
+	for (hash_order = 0;
+	     ((PAGE_SIZE << hash_order) / sizeof(void *)) < HASH_SIZE;
+	     hash_order++);
+	hash_table =
+	    (struct metapage **) __get_free_pages(GFP_KERNEL, hash_order);
+	assert(hash_table);
+	memset(hash_table, 0, PAGE_SIZE << hash_order);
+
+	return 0;
+}
+
+void metapage_exit(void)
+{
+	mempool_destroy(metapage_mempool);
+	kmem_cache_destroy(metapage_cache);
+}
+
+/*
+ * Basically same hash as in pagemap.h, but using our hash table
+ */
+static struct metapage **meta_hash(struct address_space *mapping,
+				   unsigned long index)
+{
+#define i (((unsigned long)mapping)/ \
+	   (sizeof(struct inode) & ~(sizeof(struct inode) -1 )))
+#define s(x) ((x) + ((x) >> HASH_BITS))
+	return hash_table + (s(i + index) & (HASH_SIZE - 1));
+#undef i
+#undef s
+}
+
+static struct metapage *search_hash(struct metapage ** hash_ptr,
+				    struct address_space *mapping,
+			       unsigned long index)
+{
+	struct metapage *ptr;
+
+	for (ptr = *hash_ptr; ptr; ptr = ptr->hash_next) {
+		if ((ptr->mapping == mapping) && (ptr->index == index))
+			return ptr;
+	}
+
+	return NULL;
+}
+
+static void add_to_hash(struct metapage * mp, struct metapage ** hash_ptr)
+{
+	if (*hash_ptr)
+		(*hash_ptr)->hash_prev = mp;
+
+	mp->hash_prev = NULL;
+	mp->hash_next = *hash_ptr;
+	*hash_ptr = mp;
+}
+
+static void remove_from_hash(struct metapage * mp, struct metapage ** hash_ptr)
+{
+	if (mp->hash_prev)
+		mp->hash_prev->hash_next = mp->hash_next;
+	else {
+		assert(*hash_ptr == mp);
+		*hash_ptr = mp->hash_next;
+	}
+
+	if (mp->hash_next)
+		mp->hash_next->hash_prev = mp->hash_prev;
+}
+
+struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
+				unsigned int size, int absolute,
+				unsigned long new)
+{
+	struct metapage **hash_ptr;
+	int l2BlocksPerPage;
+	int l2bsize;
+	struct address_space *mapping;
+	struct metapage *mp;
+	unsigned long page_index;
+	unsigned long page_offset;
+
+	jfs_info("__get_metapage: inode = 0x%p, lblock = 0x%lx", inode, lblock);
+
+	if (absolute)
+		mapping = inode->i_sb->s_bdev->bd_inode->i_mapping;
+	else {
+		/*
+		 * If an nfs client tries to read an inode that is larger
+		 * than any existing inodes, we may try to read past the
+		 * end of the inode map
+		 */
+		if ((lblock << inode->i_blkbits) >= inode->i_size)
+			return NULL;
+		mapping = inode->i_mapping;
+	}
+
+	hash_ptr = meta_hash(mapping, lblock);
+again:
+	spin_lock(&meta_lock);
+	mp = search_hash(hash_ptr, mapping, lblock);
+	if (mp) {
+	      page_found:
+		if (test_bit(META_stale, &mp->flag)) {
+			spin_unlock(&meta_lock);
+			msleep(1);
+			goto again;
+		}
+		mp->count++;
+		lock_metapage(mp);
+		spin_unlock(&meta_lock);
+		if (test_bit(META_discard, &mp->flag)) {
+			if (!new) {
+				jfs_error(inode->i_sb,
+					  "__get_metapage: using a "
+					  "discarded metapage");
+				release_metapage(mp);
+				return NULL;
+			}
+			clear_bit(META_discard, &mp->flag);
+		}
+		jfs_info("__get_metapage: found 0x%p, in hash", mp);
+		if (mp->logical_size != size) {
+			jfs_error(inode->i_sb,
+				  "__get_metapage: mp->logical_size != size");
+			release_metapage(mp);
+			return NULL;
+		}
+	} else {
+		l2bsize = inode->i_blkbits;
+		l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
+		page_index = lblock >> l2BlocksPerPage;
+		page_offset = (lblock - (page_index << l2BlocksPerPage)) <<
+		    l2bsize;
+		if ((page_offset + size) > PAGE_CACHE_SIZE) {
+			spin_unlock(&meta_lock);
+			jfs_err("MetaData crosses page boundary!!");
+			return NULL;
+		}
+		
+		/*
+		 * Locks held on aggregate inode pages are usually
+		 * not held long, and they are taken in critical code
+		 * paths (committing dirty inodes, txCommit thread) 
+		 * 
+		 * Attempt to get metapage without blocking, tapping into
+		 * reserves if necessary.
+		 */
+		mp = NULL;
+		if (JFS_IP(inode)->fileset == AGGREGATE_I) {
+			mp = alloc_metapage(GFP_ATOMIC);
+			if (!mp) {
+				/*
+				 * mempool is supposed to protect us from
+				 * failing here.  We will try a blocking
+				 * call, but a deadlock is possible here
+				 */
+				printk(KERN_WARNING
+				       "__get_metapage: atomic call to mempool_alloc failed.\n");
+				printk(KERN_WARNING
+				       "Will attempt blocking call\n");
+			}
+		}
+		if (!mp) {
+			struct metapage *mp2;
+
+			spin_unlock(&meta_lock);
+			mp = alloc_metapage(GFP_NOFS);
+			spin_lock(&meta_lock);
+
+			/* we dropped the meta_lock, we need to search the
+			 * hash again.
+			 */
+			mp2 = search_hash(hash_ptr, mapping, lblock);
+			if (mp2) {
+				free_metapage(mp);
+				mp = mp2;
+				goto page_found;
+			}
+		}
+		mp->flag = 0;
+		lock_metapage(mp);
+		if (absolute)
+			set_bit(META_absolute, &mp->flag);
+		mp->xflag = COMMIT_PAGE;
+		mp->count = 1;
+		atomic_set(&mp->nohomeok,0);
+		mp->mapping = mapping;
+		mp->index = lblock;
+		mp->page = NULL;
+		mp->logical_size = size;
+		add_to_hash(mp, hash_ptr);
+		spin_unlock(&meta_lock);
+
+		if (new) {
+			jfs_info("__get_metapage: Calling grab_cache_page");
+			mp->page = grab_cache_page(mapping, page_index);
+			if (!mp->page) {
+				jfs_err("grab_cache_page failed!");
+				goto freeit;
+			} else {
+				INCREMENT(mpStat.pagealloc);
+				unlock_page(mp->page);
+			}
+		} else {
+			jfs_info("__get_metapage: Calling read_cache_page");
+			mp->page = read_cache_page(mapping, lblock,
+				    (filler_t *)mapping->a_ops->readpage, NULL);
+			if (IS_ERR(mp->page)) {
+				jfs_err("read_cache_page failed!");
+				goto freeit;
+			} else
+				INCREMENT(mpStat.pagealloc);
+		}
+		mp->data = kmap(mp->page) + page_offset;
+	}
+
+	if (new)
+		memset(mp->data, 0, PSIZE);
+
+	jfs_info("__get_metapage: returning = 0x%p", mp);
+	return mp;
+
+freeit:
+	spin_lock(&meta_lock);
+	remove_from_hash(mp, hash_ptr);
+	free_metapage(mp);
+	spin_unlock(&meta_lock);
+	return NULL;
+}
+
+void hold_metapage(struct metapage * mp, int force)
+{
+	spin_lock(&meta_lock);
+
+	mp->count++;
+
+	if (force) {
+		ASSERT (!(test_bit(META_forced, &mp->flag)));
+		if (trylock_metapage(mp))
+			set_bit(META_forced, &mp->flag);
+	} else
+		lock_metapage(mp);
+
+	spin_unlock(&meta_lock);
+}
+
+static void __write_metapage(struct metapage * mp)
+{
+	int l2bsize = mp->mapping->host->i_blkbits;
+	int l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
+	unsigned long page_index;
+	unsigned long page_offset;
+	int rc;
+
+	jfs_info("__write_metapage: mp = 0x%p", mp);
+
+	page_index = mp->page->index;
+	page_offset =
+	    (mp->index - (page_index << l2BlocksPerPage)) << l2bsize;
+
+	lock_page(mp->page);
+	rc = mp->mapping->a_ops->prepare_write(NULL, mp->page, page_offset,
+					       page_offset +
+					       mp->logical_size);
+	if (rc) {
+		jfs_err("prepare_write return %d!", rc);
+		ClearPageUptodate(mp->page);
+		unlock_page(mp->page);
+		clear_bit(META_dirty, &mp->flag);
+		return;
+	}
+	rc = mp->mapping->a_ops->commit_write(NULL, mp->page, page_offset,
+					      page_offset +
+					      mp->logical_size);
+	if (rc) {
+		jfs_err("commit_write returned %d", rc);
+	}
+
+	unlock_page(mp->page);
+	clear_bit(META_dirty, &mp->flag);
+
+	jfs_info("__write_metapage done");
+}
+
+static inline void sync_metapage(struct metapage *mp)
+{
+	struct page *page = mp->page;
+
+	page_cache_get(page);
+	lock_page(page);
+
+	/* we're done with this page - no need to check for errors */
+	if (page_has_buffers(page))
+		write_one_page(page, 1);
+	else
+		unlock_page(page);
+	page_cache_release(page);
+}
+
+void release_metapage(struct metapage * mp)
+{
+	struct jfs_log *log;
+
+	jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
+
+	spin_lock(&meta_lock);
+	if (test_bit(META_forced, &mp->flag)) {
+		clear_bit(META_forced, &mp->flag);
+		mp->count--;
+		spin_unlock(&meta_lock);
+		return;
+	}
+
+	assert(mp->count);
+	if (--mp->count || atomic_read(&mp->nohomeok)) {
+		unlock_metapage(mp);
+		spin_unlock(&meta_lock);
+		return;
+	}
+
+	if (mp->page) {
+		set_bit(META_stale, &mp->flag);
+		spin_unlock(&meta_lock);
+		kunmap(mp->page);
+		mp->data = NULL;
+		if (test_bit(META_dirty, &mp->flag))
+			__write_metapage(mp);
+		if (test_bit(META_sync, &mp->flag)) {
+			sync_metapage(mp);
+			clear_bit(META_sync, &mp->flag);
+		}
+
+		if (test_bit(META_discard, &mp->flag)) {
+			lock_page(mp->page);
+			block_invalidatepage(mp->page, 0);
+			unlock_page(mp->page);
+		}
+
+		page_cache_release(mp->page);
+		mp->page = NULL;
+		INCREMENT(mpStat.pagefree);
+		spin_lock(&meta_lock);
+	}
+
+	if (mp->lsn) {
+		/*
+		 * Remove metapage from logsynclist.
+		 */
+		log = mp->log;
+		LOGSYNC_LOCK(log);
+		mp->log = NULL;
+		mp->lsn = 0;
+		mp->clsn = 0;
+		log->count--;
+		list_del(&mp->synclist);
+		LOGSYNC_UNLOCK(log);
+	}
+	remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
+	spin_unlock(&meta_lock);
+
+	free_metapage(mp);
+}
+
+void __invalidate_metapages(struct inode *ip, s64 addr, int len)
+{
+	struct metapage **hash_ptr;
+	unsigned long lblock;
+	int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
+	/* All callers are interested in block device's mapping */
+	struct address_space *mapping = ip->i_sb->s_bdev->bd_inode->i_mapping;
+	struct metapage *mp;
+	struct page *page;
+
+	/*
+	 * First, mark metapages to discard.  They will eventually be
+	 * released, but should not be written.
+	 */
+	for (lblock = addr; lblock < addr + len;
+	     lblock += 1 << l2BlocksPerPage) {
+		hash_ptr = meta_hash(mapping, lblock);
+again:
+		spin_lock(&meta_lock);
+		mp = search_hash(hash_ptr, mapping, lblock);
+		if (mp) {
+			if (test_bit(META_stale, &mp->flag)) {
+				spin_unlock(&meta_lock);
+				msleep(1);
+				goto again;
+			}
+
+			clear_bit(META_dirty, &mp->flag);
+			set_bit(META_discard, &mp->flag);
+			spin_unlock(&meta_lock);
+		} else {
+			spin_unlock(&meta_lock);
+			page = find_lock_page(mapping, lblock>>l2BlocksPerPage);
+			if (page) {
+				block_invalidatepage(page, 0);
+				unlock_page(page);
+				page_cache_release(page);
+			}
+		}
+	}
+}
+
+#ifdef CONFIG_JFS_STATISTICS
+int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
+		    int *eof, void *data)
+{
+	int len = 0;
+	off_t begin;
+
+	len += sprintf(buffer,
+		       "JFS Metapage statistics\n"
+		       "=======================\n"
+		       "page allocations = %d\n"
+		       "page frees = %d\n"
+		       "lock waits = %d\n",
+		       mpStat.pagealloc,
+		       mpStat.pagefree,
+		       mpStat.lockwait);
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if (len > length)
+		len = length;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+#endif
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
new file mode 100644
index 0000000..0e58aba
--- /dev/null
+++ b/fs/jfs/jfs_metapage.h
@@ -0,0 +1,115 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *   Portions Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_METAPAGE
+#define _H_JFS_METAPAGE
+
+#include <linux/pagemap.h>
+
+struct metapage {
+	/* Common logsyncblk prefix (see jfs_logmgr.h) */
+	u16 xflag;
+	u16 unused;
+	lid_t lid;
+	int lsn;
+	struct list_head synclist;
+	/* End of logsyncblk prefix */
+
+	unsigned long flag;	/* See Below */
+	unsigned long count;	/* Reference count */
+	void *data;		/* Data pointer */
+
+	/* list management stuff */
+	struct metapage *hash_prev;
+	struct metapage *hash_next;	/* Also used for free list */
+
+	/*
+	 * mapping & index become redundant, but we need these here to
+	 * add the metapage to the hash before we have the real page
+	 */
+	struct address_space *mapping;
+	unsigned long index;
+	wait_queue_head_t wait;
+
+	/* implementation */
+	struct page *page;
+	unsigned long logical_size;
+
+	/* Journal management */
+	int clsn;
+	atomic_t nohomeok;
+	struct jfs_log *log;
+};
+
+/* metapage flag */
+#define META_locked	0
+#define META_absolute	1
+#define META_free	2
+#define META_dirty	3
+#define META_sync	4
+#define META_discard	5
+#define META_forced	6
+#define META_stale	7
+
+#define mark_metapage_dirty(mp) set_bit(META_dirty, &(mp)->flag)
+
+/* function prototypes */
+extern struct metapage *__get_metapage(struct inode *inode,
+				  unsigned long lblock, unsigned int size,
+				  int absolute, unsigned long new);
+
+#define read_metapage(inode, lblock, size, absolute)\
+	 __get_metapage(inode, lblock, size, absolute, FALSE)
+
+#define get_metapage(inode, lblock, size, absolute)\
+	 __get_metapage(inode, lblock, size, absolute, TRUE)
+
+extern void release_metapage(struct metapage *);
+extern void hold_metapage(struct metapage *, int);
+
+static inline void write_metapage(struct metapage *mp)
+{
+	set_bit(META_dirty, &mp->flag);
+	release_metapage(mp);
+}
+
+static inline void flush_metapage(struct metapage *mp)
+{
+	set_bit(META_sync, &mp->flag);
+	write_metapage(mp);
+}
+
+static inline void discard_metapage(struct metapage *mp)
+{
+	clear_bit(META_dirty, &mp->flag);
+	set_bit(META_discard, &mp->flag);
+	release_metapage(mp);
+}
+
+/*
+ * This routines invalidate all pages for an extent.
+ */
+extern void __invalidate_metapages(struct inode *, s64, int);
+#define invalidate_pxd_metapages(ip, pxd) \
+	__invalidate_metapages((ip), addressPXD(&(pxd)), lengthPXD(&(pxd)))
+#define invalidate_dxd_metapages(ip, dxd) \
+	__invalidate_metapages((ip), addressDXD(&(dxd)), lengthDXD(&(dxd)))
+#define invalidate_xad_metapages(ip, xad) \
+	__invalidate_metapages((ip), addressXAD(&(xad)), lengthXAD(&(xad)))
+
+#endif				/* _H_JFS_METAPAGE */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
new file mode 100644
index 0000000..c535ffd
--- /dev/null
+++ b/fs/jfs/jfs_mount.c
@@ -0,0 +1,512 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Module: jfs_mount.c
+ *
+ * note: file system in transition to aggregate/fileset:
+ *
+ * file system mount is interpreted as the mount of aggregate, 
+ * if not already mounted, and mount of the single/only fileset in 
+ * the aggregate;
+ *
+ * a file system/aggregate is represented by an internal inode
+ * (aka mount inode) initialized with aggregate superblock;
+ * each vfs represents a fileset, and points to its "fileset inode 
+ * allocation map inode" (aka fileset inode):
+ * (an aggregate itself is structured recursively as a filset: 
+ * an internal vfs is constructed and points to its "fileset inode 
+ * allocation map inode" (aka aggregate inode) where each inode 
+ * represents a fileset inode) so that inode number is mapped to 
+ * on-disk inode in uniform way at both aggregate and fileset level;
+ *
+ * each vnode/inode of a fileset is linked to its vfs (to facilitate
+ * per fileset inode operations, e.g., unmount of a fileset, etc.);
+ * each inode points to the mount inode (to facilitate access to
+ * per aggregate information, e.g., block size, etc.) as well as
+ * its file set inode.
+ *
+ *   aggregate 
+ *   ipmnt
+ *   mntvfs -> fileset ipimap+ -> aggregate ipbmap -> aggregate ipaimap;
+ *             fileset vfs     -> vp(1) <-> ... <-> vp(n) <->vproot;
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_imap.h"
+#include "jfs_metapage.h"
+#include "jfs_debug.h"
+
+
+/*
+ * forward references
+ */
+static int chkSuper(struct super_block *);
+static int logMOUNT(struct super_block *sb);
+
+/*
+ * NAME:	jfs_mount(sb)
+ *
+ * FUNCTION:	vfs_mount()
+ *
+ * PARAMETER:	sb	- super block
+ *
+ * RETURN:	-EBUSY	- device already mounted or open for write
+ *		-EBUSY	- cvrdvp already mounted;
+ *		-EBUSY	- mount table full
+ *		-ENOTDIR- cvrdvp not directory on a device mount
+ *		-ENXIO	- device open failure
+ */
+int jfs_mount(struct super_block *sb)
+{
+	int rc = 0;		/* Return code          */
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct inode *ipaimap = NULL;
+	struct inode *ipaimap2 = NULL;
+	struct inode *ipimap = NULL;
+	struct inode *ipbmap = NULL;
+
+	/*
+	 * read/validate superblock 
+	 * (initialize mount inode from the superblock)
+	 */
+	if ((rc = chkSuper(sb))) {
+		goto errout20;
+	}
+
+	ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
+	if (ipaimap == NULL) {
+		jfs_err("jfs_mount: Faild to read AGGREGATE_I");
+		rc = -EIO;
+		goto errout20;
+	}
+	sbi->ipaimap = ipaimap;
+
+	jfs_info("jfs_mount: ipaimap:0x%p", ipaimap);
+
+	/*
+	 * initialize aggregate inode allocation map
+	 */
+	if ((rc = diMount(ipaimap))) {
+		jfs_err("jfs_mount: diMount(ipaimap) failed w/rc = %d", rc);
+		goto errout21;
+	}
+
+	/*
+	 * open aggregate block allocation map
+	 */
+	ipbmap = diReadSpecial(sb, BMAP_I, 0);
+	if (ipbmap == NULL) {
+		rc = -EIO;
+		goto errout22;
+	}
+
+	jfs_info("jfs_mount: ipbmap:0x%p", ipbmap);
+
+	sbi->ipbmap = ipbmap;
+
+	/*
+	 * initialize aggregate block allocation map
+	 */
+	if ((rc = dbMount(ipbmap))) {
+		jfs_err("jfs_mount: dbMount failed w/rc = %d", rc);
+		goto errout22;
+	}
+
+	/*
+	 * open the secondary aggregate inode allocation map
+	 *
+	 * This is a duplicate of the aggregate inode allocation map.
+	 *
+	 * hand craft a vfs in the same fashion as we did to read ipaimap.
+	 * By adding INOSPEREXT (32) to the inode number, we are telling
+	 * diReadSpecial that we are reading from the secondary aggregate
+	 * inode table.  This also creates a unique entry in the inode hash
+	 * table.
+	 */
+	if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
+		ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
+		if (ipaimap2 == 0) {
+			jfs_err("jfs_mount: Faild to read AGGREGATE_I");
+			rc = -EIO;
+			goto errout35;
+		}
+		sbi->ipaimap2 = ipaimap2;
+
+		jfs_info("jfs_mount: ipaimap2:0x%p", ipaimap2);
+
+		/*
+		 * initialize secondary aggregate inode allocation map
+		 */
+		if ((rc = diMount(ipaimap2))) {
+			jfs_err("jfs_mount: diMount(ipaimap2) failed, rc = %d",
+				rc);
+			goto errout35;
+		}
+	} else
+		/* Secondary aggregate inode table is not valid */
+		sbi->ipaimap2 = NULL;
+
+	/*
+	 *      mount (the only/single) fileset
+	 */
+	/*
+	 * open fileset inode allocation map (aka fileset inode)
+	 */
+	ipimap = diReadSpecial(sb, FILESYSTEM_I, 0);
+	if (ipimap == NULL) {
+		jfs_err("jfs_mount: Failed to read FILESYSTEM_I");
+		/* open fileset secondary inode allocation map */
+		rc = -EIO;
+		goto errout40;
+	}
+	jfs_info("jfs_mount: ipimap:0x%p", ipimap);
+
+	/* map further access of per fileset inodes by the fileset inode */
+	sbi->ipimap = ipimap;
+
+	/* initialize fileset inode allocation map */
+	if ((rc = diMount(ipimap))) {
+		jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
+		goto errout41;
+	}
+
+	goto out;
+
+	/*
+	 *      unwind on error
+	 */
+      errout41:		/* close fileset inode allocation map inode */
+	diFreeSpecial(ipimap);
+
+      errout40:		/* fileset closed */
+
+	/* close secondary aggregate inode allocation map */
+	if (ipaimap2) {
+		diUnmount(ipaimap2, 1);
+		diFreeSpecial(ipaimap2);
+	}
+
+      errout35:
+
+	/* close aggregate block allocation map */
+	dbUnmount(ipbmap, 1);
+	diFreeSpecial(ipbmap);
+
+      errout22:		/* close aggregate inode allocation map */
+
+	diUnmount(ipaimap, 1);
+
+      errout21:		/* close aggregate inodes */
+	diFreeSpecial(ipaimap);
+      errout20:		/* aggregate closed */
+
+      out:
+
+	if (rc)
+		jfs_err("Mount JFS Failure: %d", rc);
+
+	return rc;
+}
+
+/*
+ * NAME:	jfs_mount_rw(sb, remount)
+ *
+ * FUNCTION:	Completes read-write mount, or remounts read-only volume
+ *		as read-write
+ */
+int jfs_mount_rw(struct super_block *sb, int remount)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);  
+	int rc;
+
+	/*
+	 * If we are re-mounting a previously read-only volume, we want to
+	 * re-read the inode and block maps, since fsck.jfs may have updated
+	 * them.
+	 */
+	if (remount) {
+		if (chkSuper(sb) || (sbi->state != FM_CLEAN))
+			return -EINVAL;
+
+		truncate_inode_pages(sbi->ipimap->i_mapping, 0);
+		truncate_inode_pages(sbi->ipbmap->i_mapping, 0);
+		diUnmount(sbi->ipimap, 1);
+		if ((rc = diMount(sbi->ipimap))) {
+			jfs_err("jfs_mount_rw: diMount failed!");
+			return rc;
+		}
+
+		dbUnmount(sbi->ipbmap, 1);
+		if ((rc = dbMount(sbi->ipbmap))) {
+			jfs_err("jfs_mount_rw: dbMount failed!");
+			return rc;
+		}
+	}
+
+	/*
+	 * open/initialize log
+	 */
+	if ((rc = lmLogOpen(sb)))
+		return rc;
+
+	/*
+	 * update file system superblock;
+	 */
+	if ((rc = updateSuper(sb, FM_MOUNT))) {
+		jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc);
+		lmLogClose(sb);
+		return rc;
+	}
+
+	/*
+	 * write MOUNT log record of the file system
+	 */
+	logMOUNT(sb);
+
+	/*
+	 * Set page cache allocation policy
+	 */
+	mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS);
+
+	return rc;
+}
+
+/*
+ *	chkSuper()
+ *
+ * validate the superblock of the file system to be mounted and 
+ * get the file system parameters.
+ *
+ * returns
+ *	0 with fragsize set if check successful
+ *	error code if not successful
+ */
+static int chkSuper(struct super_block *sb)
+{
+	int rc = 0;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_superblock *j_sb;
+	struct buffer_head *bh;
+	int AIM_bytesize, AIT_bytesize;
+	int expected_AIM_bytesize, expected_AIT_bytesize;
+	s64 AIM_byte_addr, AIT_byte_addr, fsckwsp_addr;
+	s64 byte_addr_diff0, byte_addr_diff1;
+	s32 bsize;
+
+	if ((rc = readSuper(sb, &bh)))
+		return rc;
+	j_sb = (struct jfs_superblock *)bh->b_data;
+
+	/*
+	 * validate superblock
+	 */
+	/* validate fs signature */
+	if (strncmp(j_sb->s_magic, JFS_MAGIC, 4) ||
+	    le32_to_cpu(j_sb->s_version) > JFS_VERSION) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	bsize = le32_to_cpu(j_sb->s_bsize);
+#ifdef _JFS_4K
+	if (bsize != PSIZE) {
+		jfs_err("Currently only 4K block size supported!");
+		rc = -EINVAL;
+		goto out;
+	}
+#endif				/* _JFS_4K */
+
+	jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
+		 le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
+		 (unsigned long long) le64_to_cpu(j_sb->s_size));
+
+	/* validate the descriptors for Secondary AIM and AIT */
+	if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) !=
+	    cpu_to_le32(JFS_BAD_SAIT)) {
+		expected_AIM_bytesize = 2 * PSIZE;
+		AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize;
+		expected_AIT_bytesize = 4 * PSIZE;
+		AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize;
+		AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize;
+		AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize;
+		byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr;
+		fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize;
+		byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr;
+		if ((AIM_bytesize != expected_AIM_bytesize) ||
+		    (AIT_bytesize != expected_AIT_bytesize) ||
+		    (byte_addr_diff0 != AIM_bytesize) ||
+		    (byte_addr_diff1 <= AIT_bytesize))
+			j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
+	}
+
+	if ((j_sb->s_flag & cpu_to_le32(JFS_GROUPCOMMIT)) !=
+	    cpu_to_le32(JFS_GROUPCOMMIT))
+		j_sb->s_flag |= cpu_to_le32(JFS_GROUPCOMMIT);
+
+	/* validate fs state */
+	if (j_sb->s_state != cpu_to_le32(FM_CLEAN) &&
+	    !(sb->s_flags & MS_RDONLY)) {
+		jfs_err("jfs_mount: Mount Failure: File System Dirty.");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	sbi->state = le32_to_cpu(j_sb->s_state);
+	sbi->mntflag = le32_to_cpu(j_sb->s_flag);
+
+	/*
+	 * JFS always does I/O by 4K pages.  Don't tell the buffer cache
+	 * that we use anything else (leave s_blocksize alone).
+	 */
+	sbi->bsize = bsize;
+	sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
+
+	/*
+	 * For now, ignore s_pbsize, l2bfactor.  All I/O going through buffer
+	 * cache.
+	 */
+	sbi->nbperpage = PSIZE >> sbi->l2bsize;
+	sbi->l2nbperpage = L2PSIZE - sbi->l2bsize;
+	sbi->l2niperblk = sbi->l2bsize - L2DISIZE;
+	if (sbi->mntflag & JFS_INLINELOG)
+		sbi->logpxd = j_sb->s_logpxd;
+	else {
+		sbi->logdev = new_decode_dev(le32_to_cpu(j_sb->s_logdev));
+		memcpy(sbi->uuid, j_sb->s_uuid, sizeof(sbi->uuid));
+		memcpy(sbi->loguuid, j_sb->s_loguuid, sizeof(sbi->uuid));
+	}
+	sbi->fsckpxd = j_sb->s_fsckpxd;
+	sbi->ait2 = j_sb->s_ait2;
+
+      out:
+	brelse(bh);
+	return rc;
+}
+
+
+/*
+ *	updateSuper()
+ *
+ * update synchronously superblock if it is mounted read-write.
+ */
+int updateSuper(struct super_block *sb, uint state)
+{
+	struct jfs_superblock *j_sb;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct buffer_head *bh;
+	int rc;
+
+	if (sbi->flag & JFS_NOINTEGRITY) {
+		if (state == FM_DIRTY) {
+			sbi->p_state = state;
+			return 0;
+		} else if (state == FM_MOUNT) {
+			sbi->p_state = sbi->state;
+			state = FM_DIRTY;
+		} else if (state == FM_CLEAN) {
+			state = sbi->p_state;
+		} else
+			jfs_err("updateSuper: bad state");
+	} else if (sbi->state == FM_DIRTY)
+		return 0;
+	
+	if ((rc = readSuper(sb, &bh)))
+		return rc;
+
+	j_sb = (struct jfs_superblock *)bh->b_data;
+
+	j_sb->s_state = cpu_to_le32(state);
+	sbi->state = state;
+
+	if (state == FM_MOUNT) {
+		/* record log's dev_t and mount serial number */
+		j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
+		j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
+	} else if (state == FM_CLEAN) {
+		/*
+		 * If this volume is shared with OS/2, OS/2 will need to
+		 * recalculate DASD usage, since we don't deal with it.
+		 */
+		if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED))
+			j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME);
+	}
+
+	mark_buffer_dirty(bh);
+	sync_dirty_buffer(bh);
+	brelse(bh);
+
+	return 0;
+}
+
+
+/*
+ *	readSuper()
+ *
+ * read superblock by raw sector address
+ */
+int readSuper(struct super_block *sb, struct buffer_head **bpp)
+{
+	/* read in primary superblock */
+	*bpp = sb_bread(sb, SUPER1_OFF >> sb->s_blocksize_bits);
+	if (*bpp)
+		return 0;
+
+	/* read in secondary/replicated superblock */
+	*bpp = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
+	if (*bpp)
+		return 0;
+
+	return -EIO;
+}
+
+
+/*
+ *	logMOUNT()
+ *
+ * function: write a MOUNT log record for file system.
+ *
+ * MOUNT record keeps logredo() from processing log records
+ * for this file system past this point in log.
+ * it is harmless if mount fails.
+ *
+ * note: MOUNT record is at aggregate level, not at fileset level, 
+ * since log records of previous mounts of a fileset
+ * (e.g., AFTER record of extent allocation) have to be processed 
+ * to update block allocation map at aggregate level.
+ */
+static int logMOUNT(struct super_block *sb)
+{
+	struct jfs_log *log = JFS_SBI(sb)->log;
+	struct lrd lrd;
+
+	lrd.logtid = 0;
+	lrd.backchain = 0;
+	lrd.type = cpu_to_le16(LOG_MOUNT);
+	lrd.length = 0;
+	lrd.aggregate = cpu_to_le32(new_encode_dev(sb->s_bdev->bd_dev));
+	lmLog(log, NULL, &lrd, NULL);
+
+	return 0;
+}
diff --git a/fs/jfs/jfs_superblock.h b/fs/jfs/jfs_superblock.h
new file mode 100644
index 0000000..ab0566f
--- /dev/null
+++ b/fs/jfs/jfs_superblock.h
@@ -0,0 +1,113 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2003
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef	_H_JFS_SUPERBLOCK
+#define _H_JFS_SUPERBLOCK
+
+/*
+ * make the magic number something a human could read
+ */
+#define JFS_MAGIC 	"JFS1"	/* Magic word */
+
+#define JFS_VERSION	2	/* Version number: Version 2 */
+
+#define LV_NAME_SIZE	11	/* MUST BE 11 for OS/2 boot sector */
+
+/* 
+ *	aggregate superblock 
+ *
+ * The name superblock is too close to super_block, so the name has been
+ * changed to jfs_superblock.  The utilities are still using the old name.
+ */
+struct jfs_superblock {
+	char s_magic[4];	/* 4: magic number */
+	__le32 s_version;	/* 4: version number */
+
+	__le64 s_size;		/* 8: aggregate size in hardware/LVM blocks;
+				 * VFS: number of blocks
+				 */
+	__le32 s_bsize;		/* 4: aggregate block size in bytes; 
+				 * VFS: fragment size
+				 */
+	__le16 s_l2bsize;	/* 2: log2 of s_bsize */
+	__le16 s_l2bfactor;	/* 2: log2(s_bsize/hardware block size) */
+	__le32 s_pbsize;	/* 4: hardware/LVM block size in bytes */
+	__le16 s_l2pbsize;	/* 2: log2 of s_pbsize */
+	__le16 pad;		/* 2: padding necessary for alignment */
+
+	__le32 s_agsize;	/* 4: allocation group size in aggr. blocks */
+
+	__le32 s_flag;		/* 4: aggregate attributes:
+				 *    see jfs_filsys.h
+				 */
+	__le32 s_state;		/* 4: mount/unmount/recovery state: 
+				 *    see jfs_filsys.h
+				 */
+	__le32 s_compress;		/* 4: > 0 if data compression */
+
+	pxd_t s_ait2;		/* 8: first extent of secondary
+				 *    aggregate inode table
+				 */
+
+	pxd_t s_aim2;		/* 8: first extent of secondary
+				 *    aggregate inode map
+				 */
+	__le32 s_logdev;		/* 4: device address of log */
+	__le32 s_logserial;	/* 4: log serial number at aggregate mount */
+	pxd_t s_logpxd;		/* 8: inline log extent */
+
+	pxd_t s_fsckpxd;	/* 8: inline fsck work space extent */
+
+	struct timestruc_t s_time;	/* 8: time last updated */
+
+	__le32 s_fsckloglen;	/* 4: Number of filesystem blocks reserved for
+				 *    the fsck service log.  
+				 *    N.B. These blocks are divided among the
+				 *         versions kept.  This is not a per
+				 *         version size.
+				 *    N.B. These blocks are included in the 
+				 *         length field of s_fsckpxd.
+				 */
+	s8 s_fscklog;		/* 1: which fsck service log is most recent
+				 *    0 => no service log data yet
+				 *    1 => the first one
+				 *    2 => the 2nd one
+				 */
+	char s_fpack[11];	/* 11: file system volume name 
+				 *     N.B. This must be 11 bytes to
+				 *          conform with the OS/2 BootSector
+				 *          requirements
+				 *          Only used when s_version is 1
+				 */
+
+	/* extendfs() parameter under s_state & FM_EXTENDFS */
+	__le64 s_xsize;		/* 8: extendfs s_size */
+	pxd_t s_xfsckpxd;	/* 8: extendfs fsckpxd */
+	pxd_t s_xlogpxd;	/* 8: extendfs logpxd */
+	/* - 128 byte boundary - */
+
+	char s_uuid[16];	/* 16: 128-bit uuid for volume */
+	char s_label[16];	/* 16: volume label */
+	char s_loguuid[16];	/* 16: 128-bit uuid for log device */
+
+};
+
+extern int readSuper(struct super_block *, struct buffer_head **);
+extern int updateSuper(struct super_block *, uint);
+extern void jfs_error(struct super_block *, const char *, ...);
+
+#endif /*_H_JFS_SUPERBLOCK */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
new file mode 100644
index 0000000..f40301d9
--- /dev/null
+++ b/fs/jfs/jfs_txnmgr.c
@@ -0,0 +1,3131 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2005
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ *      jfs_txnmgr.c: transaction manager
+ *
+ * notes:
+ * transaction starts with txBegin() and ends with txCommit()
+ * or txAbort().
+ *
+ * tlock is acquired at the time of update;
+ * (obviate scan at commit time for xtree and dtree)
+ * tlock and mp points to each other;
+ * (no hashlist for mp -> tlock).
+ *
+ * special cases:
+ * tlock on in-memory inode:
+ * in-place tlock in the in-memory inode itself;
+ * converted to page lock by iWrite() at commit time.
+ *
+ * tlock during write()/mmap() under anonymous transaction (tid = 0):
+ * transferred (?) to transaction at commit time.
+ *
+ * use the page itself to update allocation maps
+ * (obviate intermediate replication of allocation/deallocation data)
+ * hold on to mp+lock thru update of maps
+ */
+
+
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_dinode.h"
+#include "jfs_imap.h"
+#include "jfs_dmap.h"
+#include "jfs_superblock.h"
+#include "jfs_debug.h"
+
+/*
+ *      transaction management structures
+ */
+static struct {
+	int freetid;		/* index of a free tid structure */
+	int freelock;		/* index first free lock word */
+	wait_queue_head_t freewait;	/* eventlist of free tblock */
+	wait_queue_head_t freelockwait;	/* eventlist of free tlock */
+	wait_queue_head_t lowlockwait;	/* eventlist of ample tlocks */
+	int tlocksInUse;	/* Number of tlocks in use */
+	spinlock_t LazyLock;	/* synchronize sync_queue & unlock_queue */
+/*	struct tblock *sync_queue; * Transactions waiting for data sync */
+	struct list_head unlock_queue;	/* Txns waiting to be released */
+	struct list_head anon_list;	/* inodes having anonymous txns */
+	struct list_head anon_list2;	/* inodes having anonymous txns
+					   that couldn't be sync'ed */
+} TxAnchor;
+
+int jfs_tlocks_low;		/* Indicates low number of available tlocks */
+
+#ifdef CONFIG_JFS_STATISTICS
+static struct {
+	uint txBegin;
+	uint txBegin_barrier;
+	uint txBegin_lockslow;
+	uint txBegin_freetid;
+	uint txBeginAnon;
+	uint txBeginAnon_barrier;
+	uint txBeginAnon_lockslow;
+	uint txLockAlloc;
+	uint txLockAlloc_freelock;
+} TxStat;
+#endif
+
+static int nTxBlock = -1;	/* number of transaction blocks */
+module_param(nTxBlock, int, 0);
+MODULE_PARM_DESC(nTxBlock,
+		 "Number of transaction blocks (max:65536)");
+
+static int nTxLock = -1;	/* number of transaction locks */
+module_param(nTxLock, int, 0);
+MODULE_PARM_DESC(nTxLock,
+		 "Number of transaction locks (max:65536)");
+
+struct tblock *TxBlock;	        /* transaction block table */
+static int TxLockLWM;		/* Low water mark for number of txLocks used */
+static int TxLockHWM;		/* High water mark for number of txLocks used */
+static int TxLockVHWM;		/* Very High water mark */
+struct tlock *TxLock;           /* transaction lock table */
+
+
+/*
+ *      transaction management lock
+ */
+static DEFINE_SPINLOCK(jfsTxnLock);
+
+#define TXN_LOCK()              spin_lock(&jfsTxnLock)
+#define TXN_UNLOCK()            spin_unlock(&jfsTxnLock)
+
+#define LAZY_LOCK_INIT()	spin_lock_init(&TxAnchor.LazyLock);
+#define LAZY_LOCK(flags)	spin_lock_irqsave(&TxAnchor.LazyLock, flags)
+#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
+
+DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
+DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
+static int jfs_commit_thread_waking;
+
+/*
+ * Retry logic exist outside these macros to protect from spurrious wakeups.
+ */
+static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	add_wait_queue(event, &wait);
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	TXN_UNLOCK();
+	schedule();
+	current->state = TASK_RUNNING;
+	remove_wait_queue(event, &wait);
+}
+
+#define TXN_SLEEP(event)\
+{\
+	TXN_SLEEP_DROP_LOCK(event);\
+	TXN_LOCK();\
+}
+
+#define TXN_WAKEUP(event) wake_up_all(event)
+
+
+/*
+ *      statistics
+ */
+static struct {
+	tid_t maxtid;		/* 4: biggest tid ever used */
+	lid_t maxlid;		/* 4: biggest lid ever used */
+	int ntid;		/* 4: # of transactions performed */
+	int nlid;		/* 4: # of tlocks acquired */
+	int waitlock;		/* 4: # of tlock wait */
+} stattx;
+
+
+/*
+ * external references
+ */
+extern int lmGroupCommit(struct jfs_log *, struct tblock *);
+extern int jfs_commit_inode(struct inode *, int);
+extern int jfs_stop_threads;
+
+extern struct completion jfsIOwait;
+
+/*
+ * forward references
+ */
+static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+		struct tlock * tlck, struct commit * cd);
+static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+		struct tlock * tlck);
+static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+		struct tlock * tlck);
+static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+		struct tlock * tlck);
+static void txAllocPMap(struct inode *ip, struct maplock * maplock,
+		struct tblock * tblk);
+static void txForce(struct tblock * tblk);
+static int txLog(struct jfs_log * log, struct tblock * tblk,
+		struct commit * cd);
+static void txUpdateMap(struct tblock * tblk);
+static void txRelease(struct tblock * tblk);
+static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	   struct tlock * tlck);
+static void LogSyncRelease(struct metapage * mp);
+
+/*
+ *              transaction block/lock management
+ *              ---------------------------------
+ */
+
+/*
+ * Get a transaction lock from the free list.  If the number in use is
+ * greater than the high water mark, wake up the sync daemon.  This should
+ * free some anonymous transaction locks.  (TXN_LOCK must be held.)
+ */
+static lid_t txLockAlloc(void)
+{
+	lid_t lid;
+
+	INCREMENT(TxStat.txLockAlloc);
+	if (!TxAnchor.freelock) {
+		INCREMENT(TxStat.txLockAlloc_freelock);
+	}
+
+	while (!(lid = TxAnchor.freelock))
+		TXN_SLEEP(&TxAnchor.freelockwait);
+	TxAnchor.freelock = TxLock[lid].next;
+	HIGHWATERMARK(stattx.maxlid, lid);
+	if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
+		jfs_info("txLockAlloc tlocks low");
+		jfs_tlocks_low = 1;
+		wake_up(&jfs_sync_thread_wait);
+	}
+
+	return lid;
+}
+
+static void txLockFree(lid_t lid)
+{
+	TxLock[lid].next = TxAnchor.freelock;
+	TxAnchor.freelock = lid;
+	TxAnchor.tlocksInUse--;
+	if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
+		jfs_info("txLockFree jfs_tlocks_low no more");
+		jfs_tlocks_low = 0;
+		TXN_WAKEUP(&TxAnchor.lowlockwait);
+	}
+	TXN_WAKEUP(&TxAnchor.freelockwait);
+}
+
+/*
+ * NAME:        txInit()
+ *
+ * FUNCTION:    initialize transaction management structures
+ *
+ * RETURN:
+ *
+ * serialization: single thread at jfs_init()
+ */
+int txInit(void)
+{
+	int k, size;
+	struct sysinfo si;
+
+	/* Set defaults for nTxLock and nTxBlock if unset */
+
+	if (nTxLock == -1) {
+		if (nTxBlock == -1) {
+			/* Base default on memory size */
+			si_meminfo(&si);
+			if (si.totalram > (256 * 1024)) /* 1 GB */
+				nTxLock = 64 * 1024;
+			else
+				nTxLock = si.totalram >> 2;
+		} else if (nTxBlock > (8 * 1024))
+			nTxLock = 64 * 1024;
+		else
+			nTxLock = nTxBlock << 3;
+	}
+	if (nTxBlock == -1)
+		nTxBlock = nTxLock >> 3;
+
+	/* Verify tunable parameters */
+	if (nTxBlock < 16)
+		nTxBlock = 16;	/* No one should set it this low */
+	if (nTxBlock > 65536)
+		nTxBlock = 65536;
+	if (nTxLock < 256)
+		nTxLock = 256;	/* No one should set it this low */
+	if (nTxLock > 65536)
+		nTxLock = 65536;
+
+	printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
+	       nTxBlock, nTxLock);
+	/*
+	 * initialize transaction block (tblock) table
+	 *
+	 * transaction id (tid) = tblock index
+	 * tid = 0 is reserved.
+	 */
+	TxLockLWM = (nTxLock * 4) / 10;
+	TxLockHWM = (nTxLock * 7) / 10;
+	TxLockVHWM = (nTxLock * 8) / 10;
+
+	size = sizeof(struct tblock) * nTxBlock;
+	TxBlock = (struct tblock *) vmalloc(size);
+	if (TxBlock == NULL)
+		return -ENOMEM;
+
+	for (k = 1; k < nTxBlock - 1; k++) {
+		TxBlock[k].next = k + 1;
+		init_waitqueue_head(&TxBlock[k].gcwait);
+		init_waitqueue_head(&TxBlock[k].waitor);
+	}
+	TxBlock[k].next = 0;
+	init_waitqueue_head(&TxBlock[k].gcwait);
+	init_waitqueue_head(&TxBlock[k].waitor);
+
+	TxAnchor.freetid = 1;
+	init_waitqueue_head(&TxAnchor.freewait);
+
+	stattx.maxtid = 1;	/* statistics */
+
+	/*
+	 * initialize transaction lock (tlock) table
+	 *
+	 * transaction lock id = tlock index
+	 * tlock id = 0 is reserved.
+	 */
+	size = sizeof(struct tlock) * nTxLock;
+	TxLock = (struct tlock *) vmalloc(size);
+	if (TxLock == NULL) {
+		vfree(TxBlock);
+		return -ENOMEM;
+	}
+
+	/* initialize tlock table */
+	for (k = 1; k < nTxLock - 1; k++)
+		TxLock[k].next = k + 1;
+	TxLock[k].next = 0;
+	init_waitqueue_head(&TxAnchor.freelockwait);
+	init_waitqueue_head(&TxAnchor.lowlockwait);
+
+	TxAnchor.freelock = 1;
+	TxAnchor.tlocksInUse = 0;
+	INIT_LIST_HEAD(&TxAnchor.anon_list);
+	INIT_LIST_HEAD(&TxAnchor.anon_list2);
+
+	LAZY_LOCK_INIT();
+	INIT_LIST_HEAD(&TxAnchor.unlock_queue);
+
+	stattx.maxlid = 1;	/* statistics */
+
+	return 0;
+}
+
+/*
+ * NAME:        txExit()
+ *
+ * FUNCTION:    clean up when module is unloaded
+ */
+void txExit(void)
+{
+	vfree(TxLock);
+	TxLock = NULL;
+	vfree(TxBlock);
+	TxBlock = NULL;
+}
+
+
+/*
+ * NAME:        txBegin()
+ *
+ * FUNCTION:    start a transaction.
+ *
+ * PARAMETER:   sb	- superblock
+ *              flag	- force for nested tx;
+ *
+ * RETURN:	tid	- transaction id
+ *
+ * note: flag force allows to start tx for nested tx
+ * to prevent deadlock on logsync barrier;
+ */
+tid_t txBegin(struct super_block *sb, int flag)
+{
+	tid_t t;
+	struct tblock *tblk;
+	struct jfs_log *log;
+
+	jfs_info("txBegin: flag = 0x%x", flag);
+	log = JFS_SBI(sb)->log;
+
+	TXN_LOCK();
+
+	INCREMENT(TxStat.txBegin);
+
+      retry:
+	if (!(flag & COMMIT_FORCE)) {
+		/*
+		 * synchronize with logsync barrier
+		 */
+		if (test_bit(log_SYNCBARRIER, &log->flag) ||
+		    test_bit(log_QUIESCE, &log->flag)) {
+			INCREMENT(TxStat.txBegin_barrier);
+			TXN_SLEEP(&log->syncwait);
+			goto retry;
+		}
+	}
+	if (flag == 0) {
+		/*
+		 * Don't begin transaction if we're getting starved for tlocks
+		 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
+		 * free tlocks)
+		 */
+		if (TxAnchor.tlocksInUse > TxLockVHWM) {
+			INCREMENT(TxStat.txBegin_lockslow);
+			TXN_SLEEP(&TxAnchor.lowlockwait);
+			goto retry;
+		}
+	}
+
+	/*
+	 * allocate transaction id/block
+	 */
+	if ((t = TxAnchor.freetid) == 0) {
+		jfs_info("txBegin: waiting for free tid");
+		INCREMENT(TxStat.txBegin_freetid);
+		TXN_SLEEP(&TxAnchor.freewait);
+		goto retry;
+	}
+
+	tblk = tid_to_tblock(t);
+
+	if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
+		/* Don't let a non-forced transaction take the last tblk */
+		jfs_info("txBegin: waiting for free tid");
+		INCREMENT(TxStat.txBegin_freetid);
+		TXN_SLEEP(&TxAnchor.freewait);
+		goto retry;
+	}
+
+	TxAnchor.freetid = tblk->next;
+
+	/*
+	 * initialize transaction
+	 */
+
+	/*
+	 * We can't zero the whole thing or we screw up another thread being
+	 * awakened after sleeping on tblk->waitor
+	 *
+	 * memset(tblk, 0, sizeof(struct tblock));
+	 */
+	tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
+
+	tblk->sb = sb;
+	++log->logtid;
+	tblk->logtid = log->logtid;
+
+	++log->active;
+
+	HIGHWATERMARK(stattx.maxtid, t);	/* statistics */
+	INCREMENT(stattx.ntid);	/* statistics */
+
+	TXN_UNLOCK();
+
+	jfs_info("txBegin: returning tid = %d", t);
+
+	return t;
+}
+
+
+/*
+ * NAME:        txBeginAnon()
+ *
+ * FUNCTION:    start an anonymous transaction.
+ *		Blocks if logsync or available tlocks are low to prevent
+ *		anonymous tlocks from depleting supply.
+ *
+ * PARAMETER:   sb	- superblock
+ *
+ * RETURN:	none
+ */
+void txBeginAnon(struct super_block *sb)
+{
+	struct jfs_log *log;
+
+	log = JFS_SBI(sb)->log;
+
+	TXN_LOCK();
+	INCREMENT(TxStat.txBeginAnon);
+
+      retry:
+	/*
+	 * synchronize with logsync barrier
+	 */
+	if (test_bit(log_SYNCBARRIER, &log->flag) ||
+	    test_bit(log_QUIESCE, &log->flag)) {
+		INCREMENT(TxStat.txBeginAnon_barrier);
+		TXN_SLEEP(&log->syncwait);
+		goto retry;
+	}
+
+	/*
+	 * Don't begin transaction if we're getting starved for tlocks
+	 */
+	if (TxAnchor.tlocksInUse > TxLockVHWM) {
+		INCREMENT(TxStat.txBeginAnon_lockslow);
+		TXN_SLEEP(&TxAnchor.lowlockwait);
+		goto retry;
+	}
+	TXN_UNLOCK();
+}
+
+
+/*
+ *      txEnd()
+ *
+ * function: free specified transaction block.
+ *
+ *      logsync barrier processing:
+ *
+ * serialization:
+ */
+void txEnd(tid_t tid)
+{
+	struct tblock *tblk = tid_to_tblock(tid);
+	struct jfs_log *log;
+
+	jfs_info("txEnd: tid = %d", tid);
+	TXN_LOCK();
+
+	/*
+	 * wakeup transactions waiting on the page locked
+	 * by the current transaction
+	 */
+	TXN_WAKEUP(&tblk->waitor);
+
+	log = JFS_SBI(tblk->sb)->log;
+
+	/*
+	 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
+	 * otherwise, we would be left with a transaction that may have been
+	 * reused.
+	 *
+	 * Lazy commit thread will turn off tblkGC_LAZY before calling this
+	 * routine.
+	 */
+	if (tblk->flag & tblkGC_LAZY) {
+		jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
+		TXN_UNLOCK();
+
+		spin_lock_irq(&log->gclock);	// LOGGC_LOCK
+		tblk->flag |= tblkGC_UNLOCKED;
+		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
+		return;
+	}
+
+	jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
+
+	assert(tblk->next == 0);
+
+	/*
+	 * insert tblock back on freelist
+	 */
+	tblk->next = TxAnchor.freetid;
+	TxAnchor.freetid = tid;
+
+	/*
+	 * mark the tblock not active
+	 */
+	if (--log->active == 0) {
+		clear_bit(log_FLUSH, &log->flag);
+
+		/*
+		 * synchronize with logsync barrier
+		 */
+		if (test_bit(log_SYNCBARRIER, &log->flag)) {
+			/* forward log syncpt */
+			/* lmSync(log); */
+
+			jfs_info("log barrier off: 0x%x", log->lsn);
+
+			/* enable new transactions start */
+			clear_bit(log_SYNCBARRIER, &log->flag);
+
+			/* wakeup all waitors for logsync barrier */
+			TXN_WAKEUP(&log->syncwait);
+		}
+	}
+
+	/*
+	 * wakeup all waitors for a free tblock
+	 */
+	TXN_WAKEUP(&TxAnchor.freewait);
+
+	TXN_UNLOCK();
+}
+
+
+/*
+ *      txLock()
+ *
+ * function: acquire a transaction lock on the specified <mp>
+ *
+ * parameter:
+ *
+ * return:      transaction lock id
+ *
+ * serialization:
+ */
+struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
+		     int type)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	int dir_xtree = 0;
+	lid_t lid;
+	tid_t xtid;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+	struct linelock *linelock;
+	xtpage_t *p;
+	struct tblock *tblk;
+
+	TXN_LOCK();
+
+	if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
+	    !(mp->xflag & COMMIT_PAGE)) {
+		/*
+		 * Directory inode is special.  It can have both an xtree tlock
+		 * and a dtree tlock associated with it.
+		 */
+		dir_xtree = 1;
+		lid = jfs_ip->xtlid;
+	} else
+		lid = mp->lid;
+
+	/* is page not locked by a transaction ? */
+	if (lid == 0)
+		goto allocateLock;
+
+	jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
+
+	/* is page locked by the requester transaction ? */
+	tlck = lid_to_tlock(lid);
+	if ((xtid = tlck->tid) == tid)
+		goto grantLock;
+
+	/*
+	 * is page locked by anonymous transaction/lock ?
+	 *
+	 * (page update without transaction (i.e., file write) is
+	 * locked under anonymous transaction tid = 0:
+	 * anonymous tlocks maintained on anonymous tlock list of
+	 * the inode of the page and available to all anonymous
+	 * transactions until txCommit() time at which point
+	 * they are transferred to the transaction tlock list of
+	 * the commiting transaction of the inode)
+	 */
+	if (xtid == 0) {
+		tlck->tid = tid;
+		tblk = tid_to_tblock(tid);
+		/*
+		 * The order of the tlocks in the transaction is important
+		 * (during truncate, child xtree pages must be freed before
+		 * parent's tlocks change the working map).
+		 * Take tlock off anonymous list and add to tail of
+		 * transaction list
+		 *
+		 * Note:  We really need to get rid of the tid & lid and
+		 * use list_head's.  This code is getting UGLY!
+		 */
+		if (jfs_ip->atlhead == lid) {
+			if (jfs_ip->atltail == lid) {
+				/* only anonymous txn.
+				 * Remove from anon_list
+				 */
+				list_del_init(&jfs_ip->anon_inode_list);
+			}
+			jfs_ip->atlhead = tlck->next;
+		} else {
+			lid_t last;
+			for (last = jfs_ip->atlhead;
+			     lid_to_tlock(last)->next != lid;
+			     last = lid_to_tlock(last)->next) {
+				assert(last);
+			}
+			lid_to_tlock(last)->next = tlck->next;
+			if (jfs_ip->atltail == lid)
+				jfs_ip->atltail = last;
+		}
+
+		/* insert the tlock at tail of transaction tlock list */
+
+		if (tblk->next)
+			lid_to_tlock(tblk->last)->next = lid;
+		else
+			tblk->next = lid;
+		tlck->next = 0;
+		tblk->last = lid;
+
+		goto grantLock;
+	}
+
+	goto waitLock;
+
+	/*
+	 * allocate a tlock
+	 */
+      allocateLock:
+	lid = txLockAlloc();
+	tlck = lid_to_tlock(lid);
+
+	/*
+	 * initialize tlock
+	 */
+	tlck->tid = tid;
+
+	/* mark tlock for meta-data page */
+	if (mp->xflag & COMMIT_PAGE) {
+
+		tlck->flag = tlckPAGELOCK;
+
+		/* mark the page dirty and nohomeok */
+		mark_metapage_dirty(mp);
+		atomic_inc(&mp->nohomeok);
+
+		jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
+			 mp, atomic_read(&mp->nohomeok), tid, tlck);
+
+		/* if anonymous transaction, and buffer is on the group
+		 * commit synclist, mark inode to show this.  This will
+		 * prevent the buffer from being marked nohomeok for too
+		 * long a time.
+		 */
+		if ((tid == 0) && mp->lsn)
+			set_cflag(COMMIT_Synclist, ip);
+	}
+	/* mark tlock for in-memory inode */
+	else
+		tlck->flag = tlckINODELOCK;
+
+	tlck->type = 0;
+
+	/* bind the tlock and the page */
+	tlck->ip = ip;
+	tlck->mp = mp;
+	if (dir_xtree)
+		jfs_ip->xtlid = lid;
+	else
+		mp->lid = lid;
+
+	/*
+	 * enqueue transaction lock to transaction/inode
+	 */
+	/* insert the tlock at tail of transaction tlock list */
+	if (tid) {
+		tblk = tid_to_tblock(tid);
+		if (tblk->next)
+			lid_to_tlock(tblk->last)->next = lid;
+		else
+			tblk->next = lid;
+		tlck->next = 0;
+		tblk->last = lid;
+	}
+	/* anonymous transaction:
+	 * insert the tlock at head of inode anonymous tlock list
+	 */
+	else {
+		tlck->next = jfs_ip->atlhead;
+		jfs_ip->atlhead = lid;
+		if (tlck->next == 0) {
+			/* This inode's first anonymous transaction */
+			jfs_ip->atltail = lid;
+			list_add_tail(&jfs_ip->anon_inode_list,
+				      &TxAnchor.anon_list);
+		}
+	}
+
+	/* initialize type dependent area for linelock */
+	linelock = (struct linelock *) & tlck->lock;
+	linelock->next = 0;
+	linelock->flag = tlckLINELOCK;
+	linelock->maxcnt = TLOCKSHORT;
+	linelock->index = 0;
+
+	switch (type & tlckTYPE) {
+	case tlckDTREE:
+		linelock->l2linesize = L2DTSLOTSIZE;
+		break;
+
+	case tlckXTREE:
+		linelock->l2linesize = L2XTSLOTSIZE;
+
+		xtlck = (struct xtlock *) linelock;
+		xtlck->header.offset = 0;
+		xtlck->header.length = 2;
+
+		if (type & tlckNEW) {
+			xtlck->lwm.offset = XTENTRYSTART;
+		} else {
+			if (mp->xflag & COMMIT_PAGE)
+				p = (xtpage_t *) mp->data;
+			else
+				p = &jfs_ip->i_xtroot;
+			xtlck->lwm.offset =
+			    le16_to_cpu(p->header.nextindex);
+		}
+		xtlck->lwm.length = 0;	/* ! */
+		xtlck->twm.offset = 0;
+		xtlck->hwm.offset = 0;
+
+		xtlck->index = 2;
+		break;
+
+	case tlckINODE:
+		linelock->l2linesize = L2INODESLOTSIZE;
+		break;
+
+	case tlckDATA:
+		linelock->l2linesize = L2DATASLOTSIZE;
+		break;
+
+	default:
+		jfs_err("UFO tlock:0x%p", tlck);
+	}
+
+	/*
+	 * update tlock vector
+	 */
+      grantLock:
+	tlck->type |= type;
+
+	TXN_UNLOCK();
+
+	return tlck;
+
+	/*
+	 * page is being locked by another transaction:
+	 */
+      waitLock:
+	/* Only locks on ipimap or ipaimap should reach here */
+	/* assert(jfs_ip->fileset == AGGREGATE_I); */
+	if (jfs_ip->fileset != AGGREGATE_I) {
+		jfs_err("txLock: trying to lock locked page!");
+		dump_mem("ip", ip, sizeof(struct inode));
+		dump_mem("mp", mp, sizeof(struct metapage));
+		dump_mem("Locker's tblk", tid_to_tblock(tid),
+			 sizeof(struct tblock));
+		dump_mem("Tlock", tlck, sizeof(struct tlock));
+		BUG();
+	}
+	INCREMENT(stattx.waitlock);	/* statistics */
+	release_metapage(mp);
+
+	jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
+		 tid, xtid, lid);
+	TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
+	jfs_info("txLock: awakened     tid = %d, lid = %d", tid, lid);
+
+	return NULL;
+}
+
+
+/*
+ * NAME:        txRelease()
+ *
+ * FUNCTION:    Release buffers associated with transaction locks, but don't
+ *		mark homeok yet.  The allows other transactions to modify
+ *		buffers, but won't let them go to disk until commit record
+ *		actually gets written.
+ *
+ * PARAMETER:
+ *              tblk    -
+ *
+ * RETURN:      Errors from subroutines.
+ */
+static void txRelease(struct tblock * tblk)
+{
+	struct metapage *mp;
+	lid_t lid;
+	struct tlock *tlck;
+
+	TXN_LOCK();
+
+	for (lid = tblk->next; lid; lid = tlck->next) {
+		tlck = lid_to_tlock(lid);
+		if ((mp = tlck->mp) != NULL &&
+		    (tlck->type & tlckBTROOT) == 0) {
+			assert(mp->xflag & COMMIT_PAGE);
+			mp->lid = 0;
+		}
+	}
+
+	/*
+	 * wakeup transactions waiting on a page locked
+	 * by the current transaction
+	 */
+	TXN_WAKEUP(&tblk->waitor);
+
+	TXN_UNLOCK();
+}
+
+
+/*
+ * NAME:        txUnlock()
+ *
+ * FUNCTION:    Initiates pageout of pages modified by tid in journalled
+ *              objects and frees their lockwords.
+ */
+static void txUnlock(struct tblock * tblk)
+{
+	struct tlock *tlck;
+	struct linelock *linelock;
+	lid_t lid, next, llid, k;
+	struct metapage *mp;
+	struct jfs_log *log;
+	int difft, diffp;
+
+	jfs_info("txUnlock: tblk = 0x%p", tblk);
+	log = JFS_SBI(tblk->sb)->log;
+
+	/*
+	 * mark page under tlock homeok (its log has been written):
+	 */
+	for (lid = tblk->next; lid; lid = next) {
+		tlck = lid_to_tlock(lid);
+		next = tlck->next;
+
+		jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
+
+		/* unbind page from tlock */
+		if ((mp = tlck->mp) != NULL &&
+		    (tlck->type & tlckBTROOT) == 0) {
+			assert(mp->xflag & COMMIT_PAGE);
+
+			/* hold buffer
+			 *
+			 * It's possible that someone else has the metapage.
+			 * The only things were changing are nohomeok, which
+			 * is handled atomically, and clsn which is protected
+			 * by the LOGSYNC_LOCK.
+			 */
+			hold_metapage(mp, 1);
+
+			assert(atomic_read(&mp->nohomeok) > 0);
+			atomic_dec(&mp->nohomeok);
+
+			/* inherit younger/larger clsn */
+			LOGSYNC_LOCK(log);
+			if (mp->clsn) {
+				logdiff(difft, tblk->clsn, log);
+				logdiff(diffp, mp->clsn, log);
+				if (difft > diffp)
+					mp->clsn = tblk->clsn;
+			} else
+				mp->clsn = tblk->clsn;
+			LOGSYNC_UNLOCK(log);
+
+			assert(!(tlck->flag & tlckFREEPAGE));
+
+			if (tlck->flag & tlckWRITEPAGE) {
+				write_metapage(mp);
+			} else {
+				/* release page which has been forced */
+				release_metapage(mp);
+			}
+		}
+
+		/* insert tlock, and linelock(s) of the tlock if any,
+		 * at head of freelist
+		 */
+		TXN_LOCK();
+
+		llid = ((struct linelock *) & tlck->lock)->next;
+		while (llid) {
+			linelock = (struct linelock *) lid_to_tlock(llid);
+			k = linelock->next;
+			txLockFree(llid);
+			llid = k;
+		}
+		txLockFree(lid);
+
+		TXN_UNLOCK();
+	}
+	tblk->next = tblk->last = 0;
+
+	/*
+	 * remove tblock from logsynclist
+	 * (allocation map pages inherited lsn of tblk and
+	 * has been inserted in logsync list at txUpdateMap())
+	 */
+	if (tblk->lsn) {
+		LOGSYNC_LOCK(log);
+		log->count--;
+		list_del(&tblk->synclist);
+		LOGSYNC_UNLOCK(log);
+	}
+}
+
+
+/*
+ *      txMaplock()
+ *
+ * function: allocate a transaction lock for freed page/entry;
+ *      for freed page, maplock is used as xtlock/dtlock type;
+ */
+struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	lid_t lid;
+	struct tblock *tblk;
+	struct tlock *tlck;
+	struct maplock *maplock;
+
+	TXN_LOCK();
+
+	/*
+	 * allocate a tlock
+	 */
+	lid = txLockAlloc();
+	tlck = lid_to_tlock(lid);
+
+	/*
+	 * initialize tlock
+	 */
+	tlck->tid = tid;
+
+	/* bind the tlock and the object */
+	tlck->flag = tlckINODELOCK;
+	tlck->ip = ip;
+	tlck->mp = NULL;
+
+	tlck->type = type;
+
+	/*
+	 * enqueue transaction lock to transaction/inode
+	 */
+	/* insert the tlock at tail of transaction tlock list */
+	if (tid) {
+		tblk = tid_to_tblock(tid);
+		if (tblk->next)
+			lid_to_tlock(tblk->last)->next = lid;
+		else
+			tblk->next = lid;
+		tlck->next = 0;
+		tblk->last = lid;
+	}
+	/* anonymous transaction:
+	 * insert the tlock at head of inode anonymous tlock list
+	 */
+	else {
+		tlck->next = jfs_ip->atlhead;
+		jfs_ip->atlhead = lid;
+		if (tlck->next == 0) {
+			/* This inode's first anonymous transaction */
+			jfs_ip->atltail = lid;
+			list_add_tail(&jfs_ip->anon_inode_list,
+				      &TxAnchor.anon_list);
+		}
+	}
+
+	TXN_UNLOCK();
+
+	/* initialize type dependent area for maplock */
+	maplock = (struct maplock *) & tlck->lock;
+	maplock->next = 0;
+	maplock->maxcnt = 0;
+	maplock->index = 0;
+
+	return tlck;
+}
+
+
+/*
+ *      txLinelock()
+ *
+ * function: allocate a transaction lock for log vector list
+ */
+struct linelock *txLinelock(struct linelock * tlock)
+{
+	lid_t lid;
+	struct tlock *tlck;
+	struct linelock *linelock;
+
+	TXN_LOCK();
+
+	/* allocate a TxLock structure */
+	lid = txLockAlloc();
+	tlck = lid_to_tlock(lid);
+
+	TXN_UNLOCK();
+
+	/* initialize linelock */
+	linelock = (struct linelock *) tlck;
+	linelock->next = 0;
+	linelock->flag = tlckLINELOCK;
+	linelock->maxcnt = TLOCKLONG;
+	linelock->index = 0;
+
+	/* append linelock after tlock */
+	linelock->next = tlock->next;
+	tlock->next = lid;
+
+	return linelock;
+}
+
+
+
+/*
+ *              transaction commit management
+ *              -----------------------------
+ */
+
+/*
+ * NAME:        txCommit()
+ *
+ * FUNCTION:    commit the changes to the objects specified in
+ *              clist.  For journalled segments only the
+ *              changes of the caller are committed, ie by tid.
+ *              for non-journalled segments the data are flushed to
+ *              disk and then the change to the disk inode and indirect
+ *              blocks committed (so blocks newly allocated to the
+ *              segment will be made a part of the segment atomically).
+ *
+ *              all of the segments specified in clist must be in
+ *              one file system. no more than 6 segments are needed
+ *              to handle all unix svcs.
+ *
+ *              if the i_nlink field (i.e. disk inode link count)
+ *              is zero, and the type of inode is a regular file or
+ *              directory, or symbolic link , the inode is truncated
+ *              to zero length. the truncation is committed but the
+ *              VM resources are unaffected until it is closed (see
+ *              iput and iclose).
+ *
+ * PARAMETER:
+ *
+ * RETURN:
+ *
+ * serialization:
+ *              on entry the inode lock on each segment is assumed
+ *              to be held.
+ *
+ * i/o error:
+ */
+int txCommit(tid_t tid,		/* transaction identifier */
+	     int nip,		/* number of inodes to commit */
+	     struct inode **iplist,	/* list of inode to commit */
+	     int flag)
+{
+	int rc = 0;
+	struct commit cd;
+	struct jfs_log *log;
+	struct tblock *tblk;
+	struct lrd *lrd;
+	int lsn;
+	struct inode *ip;
+	struct jfs_inode_info *jfs_ip;
+	int k, n;
+	ino_t top;
+	struct super_block *sb;
+
+	jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
+	/* is read-only file system ? */
+	if (isReadOnly(iplist[0])) {
+		rc = -EROFS;
+		goto TheEnd;
+	}
+
+	sb = cd.sb = iplist[0]->i_sb;
+	cd.tid = tid;
+
+	if (tid == 0)
+		tid = txBegin(sb, 0);
+	tblk = tid_to_tblock(tid);
+
+	/*
+	 * initialize commit structure
+	 */
+	log = JFS_SBI(sb)->log;
+	cd.log = log;
+
+	/* initialize log record descriptor in commit */
+	lrd = &cd.lrd;
+	lrd->logtid = cpu_to_le32(tblk->logtid);
+	lrd->backchain = 0;
+
+	tblk->xflag |= flag;
+
+	if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
+		tblk->xflag |= COMMIT_LAZY;
+	/*
+	 *      prepare non-journaled objects for commit
+	 *
+	 * flush data pages of non-journaled file
+	 * to prevent the file getting non-initialized disk blocks
+	 * in case of crash.
+	 * (new blocks - )
+	 */
+	cd.iplist = iplist;
+	cd.nip = nip;
+
+	/*
+	 *      acquire transaction lock on (on-disk) inodes
+	 *
+	 * update on-disk inode from in-memory inode
+	 * acquiring transaction locks for AFTER records
+	 * on the on-disk inode of file object
+	 *
+	 * sort the inodes array by inode number in descending order
+	 * to prevent deadlock when acquiring transaction lock
+	 * of on-disk inodes on multiple on-disk inode pages by
+	 * multiple concurrent transactions
+	 */
+	for (k = 0; k < cd.nip; k++) {
+		top = (cd.iplist[k])->i_ino;
+		for (n = k + 1; n < cd.nip; n++) {
+			ip = cd.iplist[n];
+			if (ip->i_ino > top) {
+				top = ip->i_ino;
+				cd.iplist[n] = cd.iplist[k];
+				cd.iplist[k] = ip;
+			}
+		}
+
+		ip = cd.iplist[k];
+		jfs_ip = JFS_IP(ip);
+
+		/*
+		 * BUGBUG - This code has temporarily been removed.  The
+		 * intent is to ensure that any file data is written before
+		 * the metadata is committed to the journal.  This prevents
+		 * uninitialized data from appearing in a file after the
+		 * journal has been replayed.  (The uninitialized data
+		 * could be sensitive data removed by another user.)
+		 *
+		 * The problem now is that we are holding the IWRITELOCK
+		 * on the inode, and calling filemap_fdatawrite on an
+		 * unmapped page will cause a deadlock in jfs_get_block.
+		 *
+		 * The long term solution is to pare down the use of
+		 * IWRITELOCK.  We are currently holding it too long.
+		 * We could also be smarter about which data pages need
+		 * to be written before the transaction is committed and
+		 * when we don't need to worry about it at all.
+		 *
+		 * if ((!S_ISDIR(ip->i_mode))
+		 *    && (tblk->flag & COMMIT_DELETE) == 0) {
+		 *	filemap_fdatawrite(ip->i_mapping);
+		 *	filemap_fdatawait(ip->i_mapping);
+		 * }
+		 */
+
+		/*
+		 * Mark inode as not dirty.  It will still be on the dirty
+		 * inode list, but we'll know not to commit it again unless
+		 * it gets marked dirty again
+		 */
+		clear_cflag(COMMIT_Dirty, ip);
+
+		/* inherit anonymous tlock(s) of inode */
+		if (jfs_ip->atlhead) {
+			lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
+			tblk->next = jfs_ip->atlhead;
+			if (!tblk->last)
+				tblk->last = jfs_ip->atltail;
+			jfs_ip->atlhead = jfs_ip->atltail = 0;
+			TXN_LOCK();
+			list_del_init(&jfs_ip->anon_inode_list);
+			TXN_UNLOCK();
+		}
+
+		/*
+		 * acquire transaction lock on on-disk inode page
+		 * (become first tlock of the tblk's tlock list)
+		 */
+		if (((rc = diWrite(tid, ip))))
+			goto out;
+	}
+
+	/*
+	 *      write log records from transaction locks
+	 *
+	 * txUpdateMap() resets XAD_NEW in XAD.
+	 */
+	if ((rc = txLog(log, tblk, &cd)))
+		goto TheEnd;
+
+	/*
+	 * Ensure that inode isn't reused before
+	 * lazy commit thread finishes processing
+	 */
+	if (tblk->xflag & COMMIT_DELETE) {
+		atomic_inc(&tblk->u.ip->i_count);
+		/*
+		 * Avoid a rare deadlock
+		 *
+		 * If the inode is locked, we may be blocked in
+		 * jfs_commit_inode.  If so, we don't want the
+		 * lazy_commit thread doing the last iput() on the inode
+		 * since that may block on the locked inode.  Instead,
+		 * commit the transaction synchronously, so the last iput
+		 * will be done by the calling thread (or later)
+		 */
+		if (tblk->u.ip->i_state & I_LOCK)
+			tblk->xflag &= ~COMMIT_LAZY;
+	}
+
+	ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
+	       ((tblk->u.ip->i_nlink == 0) &&
+		!test_cflag(COMMIT_Nolink, tblk->u.ip)));
+
+	/*
+	 *      write COMMIT log record
+	 */
+	lrd->type = cpu_to_le16(LOG_COMMIT);
+	lrd->length = 0;
+	lsn = lmLog(log, tblk, lrd, NULL);
+
+	lmGroupCommit(log, tblk);
+
+	/*
+	 *      - transaction is now committed -
+	 */
+
+	/*
+	 * force pages in careful update
+	 * (imap addressing structure update)
+	 */
+	if (flag & COMMIT_FORCE)
+		txForce(tblk);
+
+	/*
+	 *      update allocation map.
+	 *
+	 * update inode allocation map and inode:
+	 * free pager lock on memory object of inode if any.
+	 * update  block allocation map.
+	 *
+	 * txUpdateMap() resets XAD_NEW in XAD.
+	 */
+	if (tblk->xflag & COMMIT_FORCE)
+		txUpdateMap(tblk);
+
+	/*
+	 *      free transaction locks and pageout/free pages
+	 */
+	txRelease(tblk);
+
+	if ((tblk->flag & tblkGC_LAZY) == 0)
+		txUnlock(tblk);
+
+
+	/*
+	 *      reset in-memory object state
+	 */
+	for (k = 0; k < cd.nip; k++) {
+		ip = cd.iplist[k];
+		jfs_ip = JFS_IP(ip);
+
+		/*
+		 * reset in-memory inode state
+		 */
+		jfs_ip->bxflag = 0;
+		jfs_ip->blid = 0;
+	}
+
+      out:
+	if (rc != 0)
+		txAbort(tid, 1);
+
+      TheEnd:
+	jfs_info("txCommit: tid = %d, returning %d", tid, rc);
+	return rc;
+}
+
+
+/*
+ * NAME:        txLog()
+ *
+ * FUNCTION:    Writes AFTER log records for all lines modified
+ *              by tid for segments specified by inodes in comdata.
+ *              Code assumes only WRITELOCKS are recorded in lockwords.
+ *
+ * PARAMETERS:
+ *
+ * RETURN :
+ */
+static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
+{
+	int rc = 0;
+	struct inode *ip;
+	lid_t lid;
+	struct tlock *tlck;
+	struct lrd *lrd = &cd->lrd;
+
+	/*
+	 * write log record(s) for each tlock of transaction,
+	 */
+	for (lid = tblk->next; lid; lid = tlck->next) {
+		tlck = lid_to_tlock(lid);
+
+		tlck->flag |= tlckLOG;
+
+		/* initialize lrd common */
+		ip = tlck->ip;
+		lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
+		lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
+		lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
+
+		/* write log record of page from the tlock */
+		switch (tlck->type & tlckTYPE) {
+		case tlckXTREE:
+			xtLog(log, tblk, lrd, tlck);
+			break;
+
+		case tlckDTREE:
+			dtLog(log, tblk, lrd, tlck);
+			break;
+
+		case tlckINODE:
+			diLog(log, tblk, lrd, tlck, cd);
+			break;
+
+		case tlckMAP:
+			mapLog(log, tblk, lrd, tlck);
+			break;
+
+		case tlckDATA:
+			dataLog(log, tblk, lrd, tlck);
+			break;
+
+		default:
+			jfs_err("UFO tlock:0x%p", tlck);
+		}
+	}
+
+	return rc;
+}
+
+
+/*
+ *      diLog()
+ *
+ * function:    log inode tlock and format maplock to update bmap;
+ */
+static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	  struct tlock * tlck, struct commit * cd)
+{
+	int rc = 0;
+	struct metapage *mp;
+	pxd_t *pxd;
+	struct pxd_lock *pxdlock;
+
+	mp = tlck->mp;
+
+	/* initialize as REDOPAGE record format */
+	lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
+	lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
+
+	pxd = &lrd->log.redopage.pxd;
+
+	/*
+	 *      inode after image
+	 */
+	if (tlck->type & tlckENTRY) {
+		/* log after-image for logredo(): */
+		lrd->type = cpu_to_le16(LOG_REDOPAGE);
+//              *pxd = mp->cm_pxd;
+		PXDaddress(pxd, mp->index);
+		PXDlength(pxd,
+			  mp->logical_size >> tblk->sb->s_blocksize_bits);
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+	} else if (tlck->type & tlckFREE) {
+		/*
+		 *      free inode extent
+		 *
+		 * (pages of the freed inode extent have been invalidated and
+		 * a maplock for free of the extent has been formatted at
+		 * txLock() time);
+		 *
+		 * the tlock had been acquired on the inode allocation map page
+		 * (iag) that specifies the freed extent, even though the map
+		 * page is not itself logged, to prevent pageout of the map
+		 * page before the log;
+		 */
+
+		/* log LOG_NOREDOINOEXT of the freed inode extent for
+		 * logredo() to start NoRedoPage filters, and to update
+		 * imap and bmap for free of the extent;
+		 */
+		lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
+		/*
+		 * For the LOG_NOREDOINOEXT record, we need
+		 * to pass the IAG number and inode extent
+		 * index (within that IAG) from which the
+		 * the extent being released.  These have been
+		 * passed to us in the iplist[1] and iplist[2].
+		 */
+		lrd->log.noredoinoext.iagnum =
+		    cpu_to_le32((u32) (size_t) cd->iplist[1]);
+		lrd->log.noredoinoext.inoext_idx =
+		    cpu_to_le32((u32) (size_t) cd->iplist[2]);
+
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		*pxd = pxdlock->pxd;
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+
+		/* update bmap */
+		tlck->flag |= tlckUPDATEMAP;
+
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+	} else
+		jfs_err("diLog: UFO type tlck:0x%p", tlck);
+#ifdef  _JFS_WIP
+	/*
+	 *      alloc/free external EA extent
+	 *
+	 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
+	 * of the extent has been formatted at txLock() time;
+	 */
+	else {
+		assert(tlck->type & tlckEA);
+
+		/* log LOG_UPDATEMAP for logredo() to update bmap for
+		 * alloc of new (and free of old) external EA extent;
+		 */
+		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		nlock = pxdlock->index;
+		for (i = 0; i < nlock; i++, pxdlock++) {
+			if (pxdlock->flag & mlckALLOCPXD)
+				lrd->log.updatemap.type =
+				    cpu_to_le16(LOG_ALLOCPXD);
+			else
+				lrd->log.updatemap.type =
+				    cpu_to_le16(LOG_FREEPXD);
+			lrd->log.updatemap.nxd = cpu_to_le16(1);
+			lrd->log.updatemap.pxd = pxdlock->pxd;
+			lrd->backchain =
+			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+		}
+
+		/* update bmap */
+		tlck->flag |= tlckUPDATEMAP;
+	}
+#endif				/* _JFS_WIP */
+
+	return rc;
+}
+
+
+/*
+ *      dataLog()
+ *
+ * function:    log data tlock
+ */
+static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	    struct tlock * tlck)
+{
+	struct metapage *mp;
+	pxd_t *pxd;
+
+	mp = tlck->mp;
+
+	/* initialize as REDOPAGE record format */
+	lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
+	lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
+
+	pxd = &lrd->log.redopage.pxd;
+
+	/* log after-image for logredo(): */
+	lrd->type = cpu_to_le16(LOG_REDOPAGE);
+
+	if (jfs_dirtable_inline(tlck->ip)) {
+		/*
+		 * The table has been truncated, we've must have deleted
+		 * the last entry, so don't bother logging this
+		 */
+		mp->lid = 0;
+		hold_metapage(mp, 0);
+		atomic_dec(&mp->nohomeok);
+		discard_metapage(mp);
+		tlck->mp = NULL;
+		return 0;
+	}
+
+	PXDaddress(pxd, mp->index);
+	PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
+
+	lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+	/* mark page as homeward bound */
+	tlck->flag |= tlckWRITEPAGE;
+
+	return 0;
+}
+
+
+/*
+ *      dtLog()
+ *
+ * function:    log dtree tlock and format maplock to update bmap;
+ */
+static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	   struct tlock * tlck)
+{
+	struct metapage *mp;
+	struct pxd_lock *pxdlock;
+	pxd_t *pxd;
+
+	mp = tlck->mp;
+
+	/* initialize as REDOPAGE/NOREDOPAGE record format */
+	lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
+	lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
+
+	pxd = &lrd->log.redopage.pxd;
+
+	if (tlck->type & tlckBTROOT)
+		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
+
+	/*
+	 *      page extension via relocation: entry insertion;
+	 *      page extension in-place: entry insertion;
+	 *      new right page from page split, reinitialized in-line
+	 *      root from root page split: entry insertion;
+	 */
+	if (tlck->type & (tlckNEW | tlckEXTEND)) {
+		/* log after-image of the new page for logredo():
+		 * mark log (LOG_NEW) for logredo() to initialize
+		 * freelist and update bmap for alloc of the new page;
+		 */
+		lrd->type = cpu_to_le16(LOG_REDOPAGE);
+		if (tlck->type & tlckEXTEND)
+			lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
+		else
+			lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
+//              *pxd = mp->cm_pxd;
+		PXDaddress(pxd, mp->index);
+		PXDlength(pxd,
+			  mp->logical_size >> tblk->sb->s_blocksize_bits);
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/* format a maplock for txUpdateMap() to update bPMAP for
+		 * alloc of the new page;
+		 */
+		if (tlck->type & tlckBTROOT)
+			return;
+		tlck->flag |= tlckUPDATEMAP;
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		pxdlock->flag = mlckALLOCPXD;
+		pxdlock->pxd = *pxd;
+
+		pxdlock->index = 1;
+
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+		return;
+	}
+
+	/*
+	 *      entry insertion/deletion,
+	 *      sibling page link update (old right page before split);
+	 */
+	if (tlck->type & (tlckENTRY | tlckRELINK)) {
+		/* log after-image for logredo(): */
+		lrd->type = cpu_to_le16(LOG_REDOPAGE);
+		PXDaddress(pxd, mp->index);
+		PXDlength(pxd,
+			  mp->logical_size >> tblk->sb->s_blocksize_bits);
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+		return;
+	}
+
+	/*
+	 *      page deletion: page has been invalidated
+	 *      page relocation: source extent
+	 *
+	 *      a maplock for free of the page has been formatted
+	 *      at txLock() time);
+	 */
+	if (tlck->type & (tlckFREE | tlckRELOCATE)) {
+		/* log LOG_NOREDOPAGE of the deleted page for logredo()
+		 * to start NoRedoPage filter and to update bmap for free
+		 * of the deletd page
+		 */
+		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		*pxd = pxdlock->pxd;
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+
+		/* a maplock for txUpdateMap() for free of the page
+		 * has been formatted at txLock() time;
+		 */
+		tlck->flag |= tlckUPDATEMAP;
+	}
+	return;
+}
+
+
+/*
+ *      xtLog()
+ *
+ * function:    log xtree tlock and format maplock to update bmap;
+ */
+static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	   struct tlock * tlck)
+{
+	struct inode *ip;
+	struct metapage *mp;
+	xtpage_t *p;
+	struct xtlock *xtlck;
+	struct maplock *maplock;
+	struct xdlistlock *xadlock;
+	struct pxd_lock *pxdlock;
+	pxd_t *pxd;
+	int next, lwm, hwm;
+
+	ip = tlck->ip;
+	mp = tlck->mp;
+
+	/* initialize as REDOPAGE/NOREDOPAGE record format */
+	lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
+	lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
+
+	pxd = &lrd->log.redopage.pxd;
+
+	if (tlck->type & tlckBTROOT) {
+		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
+		p = &JFS_IP(ip)->i_xtroot;
+		if (S_ISDIR(ip->i_mode))
+			lrd->log.redopage.type |=
+			    cpu_to_le16(LOG_DIR_XTREE);
+	} else
+		p = (xtpage_t *) mp->data;
+	next = le16_to_cpu(p->header.nextindex);
+
+	xtlck = (struct xtlock *) & tlck->lock;
+
+	maplock = (struct maplock *) & tlck->lock;
+	xadlock = (struct xdlistlock *) maplock;
+
+	/*
+	 *      entry insertion/extension;
+	 *      sibling page link update (old right page before split);
+	 */
+	if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
+		/* log after-image for logredo():
+		 * logredo() will update bmap for alloc of new/extended
+		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
+		 * after-image of XADlist;
+		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
+		 * applying the after-image to the meta-data page.
+		 */
+		lrd->type = cpu_to_le16(LOG_REDOPAGE);
+//              *pxd = mp->cm_pxd;
+		PXDaddress(pxd, mp->index);
+		PXDlength(pxd,
+			  mp->logical_size >> tblk->sb->s_blocksize_bits);
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/* format a maplock for txUpdateMap() to update bPMAP
+		 * for alloc of new/extended extents of XAD[lwm:next)
+		 * from the page itself;
+		 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
+		 */
+		lwm = xtlck->lwm.offset;
+		if (lwm == 0)
+			lwm = XTPAGEMAXSLOT;
+
+		if (lwm == next)
+			goto out;
+		if (lwm > next) {
+			jfs_err("xtLog: lwm > next\n");
+			goto out;
+		}
+		tlck->flag |= tlckUPDATEMAP;
+		xadlock->flag = mlckALLOCXADLIST;
+		xadlock->count = next - lwm;
+		if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
+			int i;
+			/*
+			 * Lazy commit may allow xtree to be modified before
+			 * txUpdateMap runs.  Copy xad into linelock to
+			 * preserve correct data.
+			 */
+			xadlock->xdlist = &xtlck->pxdlock;
+			memcpy(xadlock->xdlist, &p->xad[lwm],
+			       sizeof(xad_t) * xadlock->count);
+
+			for (i = 0; i < xadlock->count; i++)
+				p->xad[lwm + i].flag &=
+				    ~(XAD_NEW | XAD_EXTENDED);
+		} else {
+			/*
+			 * xdlist will point to into inode's xtree, ensure
+			 * that transaction is not committed lazily.
+			 */
+			xadlock->xdlist = &p->xad[lwm];
+			tblk->xflag &= ~COMMIT_LAZY;
+		}
+		jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
+			 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
+
+		maplock->index = 1;
+
+	      out:
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+
+		return;
+	}
+
+	/*
+	 *      page deletion: file deletion/truncation (ref. xtTruncate())
+	 *
+	 * (page will be invalidated after log is written and bmap
+	 * is updated from the page);
+	 */
+	if (tlck->type & tlckFREE) {
+		/* LOG_NOREDOPAGE log for NoRedoPage filter:
+		 * if page free from file delete, NoRedoFile filter from
+		 * inode image of zero link count will subsume NoRedoPage
+		 * filters for each page;
+		 * if page free from file truncattion, write NoRedoPage
+		 * filter;
+		 *
+		 * upadte of block allocation map for the page itself:
+		 * if page free from deletion and truncation, LOG_UPDATEMAP
+		 * log for the page itself is generated from processing
+		 * its parent page xad entries;
+		 */
+		/* if page free from file truncation, log LOG_NOREDOPAGE
+		 * of the deleted page for logredo() to start NoRedoPage
+		 * filter for the page;
+		 */
+		if (tblk->xflag & COMMIT_TRUNCATE) {
+			/* write NOREDOPAGE for the page */
+			lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
+			PXDaddress(pxd, mp->index);
+			PXDlength(pxd,
+				  mp->logical_size >> tblk->sb->
+				  s_blocksize_bits);
+			lrd->backchain =
+			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+
+			if (tlck->type & tlckBTROOT) {
+				/* Empty xtree must be logged */
+				lrd->type = cpu_to_le16(LOG_REDOPAGE);
+				lrd->backchain =
+				    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+			}
+		}
+
+		/* init LOG_UPDATEMAP of the freed extents
+		 * XAD[XTENTRYSTART:hwm) from the deleted page itself
+		 * for logredo() to update bmap;
+		 */
+		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
+		xtlck = (struct xtlock *) & tlck->lock;
+		hwm = xtlck->hwm.offset;
+		lrd->log.updatemap.nxd =
+		    cpu_to_le16(hwm - XTENTRYSTART + 1);
+		/* reformat linelock for lmLog() */
+		xtlck->header.offset = XTENTRYSTART;
+		xtlck->header.length = hwm - XTENTRYSTART + 1;
+		xtlck->index = 1;
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/* format a maplock for txUpdateMap() to update bmap
+		 * to free extents of XAD[XTENTRYSTART:hwm) from the
+		 * deleted page itself;
+		 */
+		tlck->flag |= tlckUPDATEMAP;
+		xadlock->flag = mlckFREEXADLIST;
+		xadlock->count = hwm - XTENTRYSTART + 1;
+		if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
+			/*
+			 * Lazy commit may allow xtree to be modified before
+			 * txUpdateMap runs.  Copy xad into linelock to
+			 * preserve correct data.
+			 */
+			xadlock->xdlist = &xtlck->pxdlock;
+			memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
+			       sizeof(xad_t) * xadlock->count);
+		} else {
+			/*
+			 * xdlist will point to into inode's xtree, ensure
+			 * that transaction is not committed lazily.
+			 */
+			xadlock->xdlist = &p->xad[XTENTRYSTART];
+			tblk->xflag &= ~COMMIT_LAZY;
+		}
+		jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
+			 tlck->ip, mp, xadlock->count);
+
+		maplock->index = 1;
+
+		/* mark page as invalid */
+		if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
+		    && !(tlck->type & tlckBTROOT))
+			tlck->flag |= tlckFREEPAGE;
+		/*
+		   else (tblk->xflag & COMMIT_PMAP)
+		   ? release the page;
+		 */
+		return;
+	}
+
+	/*
+	 *      page/entry truncation: file truncation (ref. xtTruncate())
+	 *
+	 *     |----------+------+------+---------------|
+	 *                |      |      |
+	 *                |      |     hwm - hwm before truncation
+	 *                |     next - truncation point
+	 *               lwm - lwm before truncation
+	 * header ?
+	 */
+	if (tlck->type & tlckTRUNCATE) {
+		pxd_t tpxd;	/* truncated extent of xad */
+		int twm;
+
+		/*
+		 * For truncation the entire linelock may be used, so it would
+		 * be difficult to store xad list in linelock itself.
+		 * Therefore, we'll just force transaction to be committed
+		 * synchronously, so that xtree pages won't be changed before
+		 * txUpdateMap runs.
+		 */
+		tblk->xflag &= ~COMMIT_LAZY;
+		lwm = xtlck->lwm.offset;
+		if (lwm == 0)
+			lwm = XTPAGEMAXSLOT;
+		hwm = xtlck->hwm.offset;
+		twm = xtlck->twm.offset;
+
+		/*
+		 *      write log records
+		 */
+		/* log after-image for logredo():
+		 *
+		 * logredo() will update bmap for alloc of new/extended
+		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
+		 * after-image of XADlist;
+		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
+		 * applying the after-image to the meta-data page.
+		 */
+		lrd->type = cpu_to_le16(LOG_REDOPAGE);
+		PXDaddress(pxd, mp->index);
+		PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+
+		/*
+		 * truncate entry XAD[twm == next - 1]:
+		 */
+		if (twm == next - 1) {
+			/* init LOG_UPDATEMAP for logredo() to update bmap for
+			 * free of truncated delta extent of the truncated
+			 * entry XAD[next - 1]:
+			 * (xtlck->pxdlock = truncated delta extent);
+			 */
+			pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
+			/* assert(pxdlock->type & tlckTRUNCATE); */
+			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+			lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
+			lrd->log.updatemap.nxd = cpu_to_le16(1);
+			lrd->log.updatemap.pxd = pxdlock->pxd;
+			tpxd = pxdlock->pxd;	/* save to format maplock */
+			lrd->backchain =
+			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+		}
+
+		/*
+		 * free entries XAD[next:hwm]:
+		 */
+		if (hwm >= next) {
+			/* init LOG_UPDATEMAP of the freed extents
+			 * XAD[next:hwm] from the deleted page itself
+			 * for logredo() to update bmap;
+			 */
+			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+			lrd->log.updatemap.type =
+			    cpu_to_le16(LOG_FREEXADLIST);
+			xtlck = (struct xtlock *) & tlck->lock;
+			hwm = xtlck->hwm.offset;
+			lrd->log.updatemap.nxd =
+			    cpu_to_le16(hwm - next + 1);
+			/* reformat linelock for lmLog() */
+			xtlck->header.offset = next;
+			xtlck->header.length = hwm - next + 1;
+			xtlck->index = 1;
+			lrd->backchain =
+			    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
+		}
+
+		/*
+		 *      format maplock(s) for txUpdateMap() to update bmap
+		 */
+		maplock->index = 0;
+
+		/*
+		 * allocate entries XAD[lwm:next):
+		 */
+		if (lwm < next) {
+			/* format a maplock for txUpdateMap() to update bPMAP
+			 * for alloc of new/extended extents of XAD[lwm:next)
+			 * from the page itself;
+			 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
+			 */
+			tlck->flag |= tlckUPDATEMAP;
+			xadlock->flag = mlckALLOCXADLIST;
+			xadlock->count = next - lwm;
+			xadlock->xdlist = &p->xad[lwm];
+
+			jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
+				 "lwm:%d next:%d",
+				 tlck->ip, mp, xadlock->count, lwm, next);
+			maplock->index++;
+			xadlock++;
+		}
+
+		/*
+		 * truncate entry XAD[twm == next - 1]:
+		 */
+		if (twm == next - 1) {
+			struct pxd_lock *pxdlock;
+
+			/* format a maplock for txUpdateMap() to update bmap
+			 * to free truncated delta extent of the truncated
+			 * entry XAD[next - 1];
+			 * (xtlck->pxdlock = truncated delta extent);
+			 */
+			tlck->flag |= tlckUPDATEMAP;
+			pxdlock = (struct pxd_lock *) xadlock;
+			pxdlock->flag = mlckFREEPXD;
+			pxdlock->count = 1;
+			pxdlock->pxd = tpxd;
+
+			jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
+				 "hwm:%d", ip, mp, pxdlock->count, hwm);
+			maplock->index++;
+			xadlock++;
+		}
+
+		/*
+		 * free entries XAD[next:hwm]:
+		 */
+		if (hwm >= next) {
+			/* format a maplock for txUpdateMap() to update bmap
+			 * to free extents of XAD[next:hwm] from thedeleted
+			 * page itself;
+			 */
+			tlck->flag |= tlckUPDATEMAP;
+			xadlock->flag = mlckFREEXADLIST;
+			xadlock->count = hwm - next + 1;
+			xadlock->xdlist = &p->xad[next];
+
+			jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
+				 "next:%d hwm:%d",
+				 tlck->ip, mp, xadlock->count, next, hwm);
+			maplock->index++;
+		}
+
+		/* mark page as homeward bound */
+		tlck->flag |= tlckWRITEPAGE;
+	}
+	return;
+}
+
+
+/*
+ *      mapLog()
+ *
+ * function:    log from maplock of freed data extents;
+ */
+void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+	    struct tlock * tlck)
+{
+	struct pxd_lock *pxdlock;
+	int i, nlock;
+	pxd_t *pxd;
+
+	/*
+	 *      page relocation: free the source page extent
+	 *
+	 * a maplock for txUpdateMap() for free of the page
+	 * has been formatted at txLock() time saving the src
+	 * relocated page address;
+	 */
+	if (tlck->type & tlckRELOCATE) {
+		/* log LOG_NOREDOPAGE of the old relocated page
+		 * for logredo() to start NoRedoPage filter;
+		 */
+		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		pxd = &lrd->log.redopage.pxd;
+		*pxd = pxdlock->pxd;
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+
+		/* (N.B. currently, logredo() does NOT update bmap
+		 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
+		 * if page free from relocation, LOG_UPDATEMAP log is
+		 * specifically generated now for logredo()
+		 * to update bmap for free of src relocated page;
+		 * (new flag LOG_RELOCATE may be introduced which will
+		 * inform logredo() to start NORedoPage filter and also
+		 * update block allocation map at the same time, thus
+		 * avoiding an extra log write);
+		 */
+		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
+		lrd->log.updatemap.nxd = cpu_to_le16(1);
+		lrd->log.updatemap.pxd = pxdlock->pxd;
+		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+
+		/* a maplock for txUpdateMap() for free of the page
+		 * has been formatted at txLock() time;
+		 */
+		tlck->flag |= tlckUPDATEMAP;
+		return;
+	}
+	/*
+
+	 * Otherwise it's not a relocate request
+	 *
+	 */
+	else {
+		/* log LOG_UPDATEMAP for logredo() to update bmap for
+		 * free of truncated/relocated delta extent of the data;
+		 * e.g.: external EA extent, relocated/truncated extent
+		 * from xtTailgate();
+		 */
+		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
+		pxdlock = (struct pxd_lock *) & tlck->lock;
+		nlock = pxdlock->index;
+		for (i = 0; i < nlock; i++, pxdlock++) {
+			if (pxdlock->flag & mlckALLOCPXD)
+				lrd->log.updatemap.type =
+				    cpu_to_le16(LOG_ALLOCPXD);
+			else
+				lrd->log.updatemap.type =
+				    cpu_to_le16(LOG_FREEPXD);
+			lrd->log.updatemap.nxd = cpu_to_le16(1);
+			lrd->log.updatemap.pxd = pxdlock->pxd;
+			lrd->backchain =
+			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
+			jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
+				 (ulong) addressPXD(&pxdlock->pxd),
+				 lengthPXD(&pxdlock->pxd));
+		}
+
+		/* update bmap */
+		tlck->flag |= tlckUPDATEMAP;
+	}
+}
+
+
+/*
+ *      txEA()
+ *
+ * function:    acquire maplock for EA/ACL extents or
+ *              set COMMIT_INLINE flag;
+ */
+void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
+{
+	struct tlock *tlck = NULL;
+	struct pxd_lock *maplock = NULL, *pxdlock = NULL;
+
+	/*
+	 * format maplock for alloc of new EA extent
+	 */
+	if (newea) {
+		/* Since the newea could be a completely zeroed entry we need to
+		 * check for the two flags which indicate we should actually
+		 * commit new EA data
+		 */
+		if (newea->flag & DXD_EXTENT) {
+			tlck = txMaplock(tid, ip, tlckMAP);
+			maplock = (struct pxd_lock *) & tlck->lock;
+			pxdlock = (struct pxd_lock *) maplock;
+			pxdlock->flag = mlckALLOCPXD;
+			PXDaddress(&pxdlock->pxd, addressDXD(newea));
+			PXDlength(&pxdlock->pxd, lengthDXD(newea));
+			pxdlock++;
+			maplock->index = 1;
+		} else if (newea->flag & DXD_INLINE) {
+			tlck = NULL;
+
+			set_cflag(COMMIT_Inlineea, ip);
+		}
+	}
+
+	/*
+	 * format maplock for free of old EA extent
+	 */
+	if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
+		if (tlck == NULL) {
+			tlck = txMaplock(tid, ip, tlckMAP);
+			maplock = (struct pxd_lock *) & tlck->lock;
+			pxdlock = (struct pxd_lock *) maplock;
+			maplock->index = 0;
+		}
+		pxdlock->flag = mlckFREEPXD;
+		PXDaddress(&pxdlock->pxd, addressDXD(oldea));
+		PXDlength(&pxdlock->pxd, lengthDXD(oldea));
+		maplock->index++;
+	}
+}
+
+
+/*
+ *      txForce()
+ *
+ * function: synchronously write pages locked by transaction
+ *              after txLog() but before txUpdateMap();
+ */
+void txForce(struct tblock * tblk)
+{
+	struct tlock *tlck;
+	lid_t lid, next;
+	struct metapage *mp;
+
+	/*
+	 * reverse the order of transaction tlocks in
+	 * careful update order of address index pages
+	 * (right to left, bottom up)
+	 */
+	tlck = lid_to_tlock(tblk->next);
+	lid = tlck->next;
+	tlck->next = 0;
+	while (lid) {
+		tlck = lid_to_tlock(lid);
+		next = tlck->next;
+		tlck->next = tblk->next;
+		tblk->next = lid;
+		lid = next;
+	}
+
+	/*
+	 * synchronously write the page, and
+	 * hold the page for txUpdateMap();
+	 */
+	for (lid = tblk->next; lid; lid = next) {
+		tlck = lid_to_tlock(lid);
+		next = tlck->next;
+
+		if ((mp = tlck->mp) != NULL &&
+		    (tlck->type & tlckBTROOT) == 0) {
+			assert(mp->xflag & COMMIT_PAGE);
+
+			if (tlck->flag & tlckWRITEPAGE) {
+				tlck->flag &= ~tlckWRITEPAGE;
+
+				/* do not release page to freelist */
+
+				/*
+				 * The "right" thing to do here is to
+				 * synchronously write the metadata.
+				 * With the current implementation this
+				 * is hard since write_metapage requires
+				 * us to kunmap & remap the page.  If we
+				 * have tlocks pointing into the metadata
+				 * pages, we don't want to do this.  I think
+				 * we can get by with synchronously writing
+				 * the pages when they are released.
+				 */
+				assert(atomic_read(&mp->nohomeok));
+				set_bit(META_dirty, &mp->flag);
+				set_bit(META_sync, &mp->flag);
+			}
+		}
+	}
+}
+
+
+/*
+ *      txUpdateMap()
+ *
+ * function:    update persistent allocation map (and working map
+ *              if appropriate);
+ *
+ * parameter:
+ */
+static void txUpdateMap(struct tblock * tblk)
+{
+	struct inode *ip;
+	struct inode *ipimap;
+	lid_t lid;
+	struct tlock *tlck;
+	struct maplock *maplock;
+	struct pxd_lock pxdlock;
+	int maptype;
+	int k, nlock;
+	struct metapage *mp = NULL;
+
+	ipimap = JFS_SBI(tblk->sb)->ipimap;
+
+	maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
+
+
+	/*
+	 *      update block allocation map
+	 *
+	 * update allocation state in pmap (and wmap) and
+	 * update lsn of the pmap page;
+	 */
+	/*
+	 * scan each tlock/page of transaction for block allocation/free:
+	 *
+	 * for each tlock/page of transaction, update map.
+	 *  ? are there tlock for pmap and pwmap at the same time ?
+	 */
+	for (lid = tblk->next; lid; lid = tlck->next) {
+		tlck = lid_to_tlock(lid);
+
+		if ((tlck->flag & tlckUPDATEMAP) == 0)
+			continue;
+
+		if (tlck->flag & tlckFREEPAGE) {
+			/*
+			 * Another thread may attempt to reuse freed space
+			 * immediately, so we want to get rid of the metapage
+			 * before anyone else has a chance to get it.
+			 * Lock metapage, update maps, then invalidate
+			 * the metapage.
+			 */
+			mp = tlck->mp;
+			ASSERT(mp->xflag & COMMIT_PAGE);
+			hold_metapage(mp, 0);
+		}
+
+		/*
+		 * extent list:
+		 * . in-line PXD list:
+		 * . out-of-line XAD list:
+		 */
+		maplock = (struct maplock *) & tlck->lock;
+		nlock = maplock->index;
+
+		for (k = 0; k < nlock; k++, maplock++) {
+			/*
+			 * allocate blocks in persistent map:
+			 *
+			 * blocks have been allocated from wmap at alloc time;
+			 */
+			if (maplock->flag & mlckALLOC) {
+				txAllocPMap(ipimap, maplock, tblk);
+			}
+			/*
+			 * free blocks in persistent and working map:
+			 * blocks will be freed in pmap and then in wmap;
+			 *
+			 * ? tblock specifies the PMAP/PWMAP based upon
+			 * transaction
+			 *
+			 * free blocks in persistent map:
+			 * blocks will be freed from wmap at last reference
+			 * release of the object for regular files;
+			 *
+			 * Alway free blocks from both persistent & working
+			 * maps for directories
+			 */
+			else {	/* (maplock->flag & mlckFREE) */
+
+				if (S_ISDIR(tlck->ip->i_mode))
+					txFreeMap(ipimap, maplock,
+						  tblk, COMMIT_PWMAP);
+				else
+					txFreeMap(ipimap, maplock,
+						  tblk, maptype);
+			}
+		}
+		if (tlck->flag & tlckFREEPAGE) {
+			if (!(tblk->flag & tblkGC_LAZY)) {
+				/* This is equivalent to txRelease */
+				ASSERT(mp->lid == lid);
+				tlck->mp->lid = 0;
+			}
+			assert(atomic_read(&mp->nohomeok) == 1);
+			atomic_dec(&mp->nohomeok);
+			discard_metapage(mp);
+			tlck->mp = NULL;
+		}
+	}
+	/*
+	 *      update inode allocation map
+	 *
+	 * update allocation state in pmap and
+	 * update lsn of the pmap page;
+	 * update in-memory inode flag/state
+	 *
+	 * unlock mapper/write lock
+	 */
+	if (tblk->xflag & COMMIT_CREATE) {
+		diUpdatePMap(ipimap, tblk->ino, FALSE, tblk);
+		ipimap->i_state |= I_DIRTY;
+		/* update persistent block allocation map
+		 * for the allocation of inode extent;
+		 */
+		pxdlock.flag = mlckALLOCPXD;
+		pxdlock.pxd = tblk->u.ixpxd;
+		pxdlock.index = 1;
+		txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
+	} else if (tblk->xflag & COMMIT_DELETE) {
+		ip = tblk->u.ip;
+		diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
+		ipimap->i_state |= I_DIRTY;
+		iput(ip);
+	}
+}
+
+
+/*
+ *      txAllocPMap()
+ *
+ * function: allocate from persistent map;
+ *
+ * parameter:
+ *      ipbmap  -
+ *      malock -
+ *              xad list:
+ *              pxd:
+ *
+ *      maptype -
+ *              allocate from persistent map;
+ *              free from persistent map;
+ *              (e.g., tmp file - free from working map at releae
+ *               of last reference);
+ *              free from persistent and working map;
+ *
+ *      lsn     - log sequence number;
+ */
+static void txAllocPMap(struct inode *ip, struct maplock * maplock,
+			struct tblock * tblk)
+{
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct xdlistlock *xadlistlock;
+	xad_t *xad;
+	s64 xaddr;
+	int xlen;
+	struct pxd_lock *pxdlock;
+	struct xdlistlock *pxdlistlock;
+	pxd_t *pxd;
+	int n;
+
+	/*
+	 * allocate from persistent map;
+	 */
+	if (maplock->flag & mlckALLOCXADLIST) {
+		xadlistlock = (struct xdlistlock *) maplock;
+		xad = xadlistlock->xdlist;
+		for (n = 0; n < xadlistlock->count; n++, xad++) {
+			if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
+				xaddr = addressXAD(xad);
+				xlen = lengthXAD(xad);
+				dbUpdatePMap(ipbmap, FALSE, xaddr,
+					     (s64) xlen, tblk);
+				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
+				jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
+					 (ulong) xaddr, xlen);
+			}
+		}
+	} else if (maplock->flag & mlckALLOCPXD) {
+		pxdlock = (struct pxd_lock *) maplock;
+		xaddr = addressPXD(&pxdlock->pxd);
+		xlen = lengthPXD(&pxdlock->pxd);
+		dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
+		jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
+	} else {		/* (maplock->flag & mlckALLOCPXDLIST) */
+
+		pxdlistlock = (struct xdlistlock *) maplock;
+		pxd = pxdlistlock->xdlist;
+		for (n = 0; n < pxdlistlock->count; n++, pxd++) {
+			xaddr = addressPXD(pxd);
+			xlen = lengthPXD(pxd);
+			dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
+				     tblk);
+			jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
+				 (ulong) xaddr, xlen);
+		}
+	}
+}
+
+
+/*
+ *      txFreeMap()
+ *
+ * function:    free from persistent and/or working map;
+ *
+ * todo: optimization
+ */
+void txFreeMap(struct inode *ip,
+	       struct maplock * maplock, struct tblock * tblk, int maptype)
+{
+	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+	struct xdlistlock *xadlistlock;
+	xad_t *xad;
+	s64 xaddr;
+	int xlen;
+	struct pxd_lock *pxdlock;
+	struct xdlistlock *pxdlistlock;
+	pxd_t *pxd;
+	int n;
+
+	jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
+		 tblk, maplock, maptype);
+
+	/*
+	 * free from persistent map;
+	 */
+	if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
+		if (maplock->flag & mlckFREEXADLIST) {
+			xadlistlock = (struct xdlistlock *) maplock;
+			xad = xadlistlock->xdlist;
+			for (n = 0; n < xadlistlock->count; n++, xad++) {
+				if (!(xad->flag & XAD_NEW)) {
+					xaddr = addressXAD(xad);
+					xlen = lengthXAD(xad);
+					dbUpdatePMap(ipbmap, TRUE, xaddr,
+						     (s64) xlen, tblk);
+					jfs_info("freePMap: xaddr:0x%lx "
+						 "xlen:%d",
+						 (ulong) xaddr, xlen);
+				}
+			}
+		} else if (maplock->flag & mlckFREEPXD) {
+			pxdlock = (struct pxd_lock *) maplock;
+			xaddr = addressPXD(&pxdlock->pxd);
+			xlen = lengthPXD(&pxdlock->pxd);
+			dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
+				     tblk);
+			jfs_info("freePMap: xaddr:0x%lx xlen:%d",
+				 (ulong) xaddr, xlen);
+		} else {	/* (maplock->flag & mlckALLOCPXDLIST) */
+
+			pxdlistlock = (struct xdlistlock *) maplock;
+			pxd = pxdlistlock->xdlist;
+			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
+				xaddr = addressPXD(pxd);
+				xlen = lengthPXD(pxd);
+				dbUpdatePMap(ipbmap, TRUE, xaddr,
+					     (s64) xlen, tblk);
+				jfs_info("freePMap: xaddr:0x%lx xlen:%d",
+					 (ulong) xaddr, xlen);
+			}
+		}
+	}
+
+	/*
+	 * free from working map;
+	 */
+	if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
+		if (maplock->flag & mlckFREEXADLIST) {
+			xadlistlock = (struct xdlistlock *) maplock;
+			xad = xadlistlock->xdlist;
+			for (n = 0; n < xadlistlock->count; n++, xad++) {
+				xaddr = addressXAD(xad);
+				xlen = lengthXAD(xad);
+				dbFree(ip, xaddr, (s64) xlen);
+				xad->flag = 0;
+				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
+					 (ulong) xaddr, xlen);
+			}
+		} else if (maplock->flag & mlckFREEPXD) {
+			pxdlock = (struct pxd_lock *) maplock;
+			xaddr = addressPXD(&pxdlock->pxd);
+			xlen = lengthPXD(&pxdlock->pxd);
+			dbFree(ip, xaddr, (s64) xlen);
+			jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
+				 (ulong) xaddr, xlen);
+		} else {	/* (maplock->flag & mlckFREEPXDLIST) */
+
+			pxdlistlock = (struct xdlistlock *) maplock;
+			pxd = pxdlistlock->xdlist;
+			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
+				xaddr = addressPXD(pxd);
+				xlen = lengthPXD(pxd);
+				dbFree(ip, xaddr, (s64) xlen);
+				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
+					 (ulong) xaddr, xlen);
+			}
+		}
+	}
+}
+
+
+/*
+ *      txFreelock()
+ *
+ * function:    remove tlock from inode anonymous locklist
+ */
+void txFreelock(struct inode *ip)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	struct tlock *xtlck, *tlck;
+	lid_t xlid = 0, lid;
+
+	if (!jfs_ip->atlhead)
+		return;
+
+	TXN_LOCK();
+	xtlck = (struct tlock *) &jfs_ip->atlhead;
+
+	while ((lid = xtlck->next) != 0) {
+		tlck = lid_to_tlock(lid);
+		if (tlck->flag & tlckFREELOCK) {
+			xtlck->next = tlck->next;
+			txLockFree(lid);
+		} else {
+			xtlck = tlck;
+			xlid = lid;
+		}
+	}
+
+	if (jfs_ip->atlhead)
+		jfs_ip->atltail = xlid;
+	else {
+		jfs_ip->atltail = 0;
+		/*
+		 * If inode was on anon_list, remove it
+		 */
+		list_del_init(&jfs_ip->anon_inode_list);
+	}
+	TXN_UNLOCK();
+}
+
+
+/*
+ *      txAbort()
+ *
+ * function: abort tx before commit;
+ *
+ * frees line-locks and segment locks for all
+ * segments in comdata structure.
+ * Optionally sets state of file-system to FM_DIRTY in super-block.
+ * log age of page-frames in memory for which caller has
+ * are reset to 0 (to avoid logwarap).
+ */
+void txAbort(tid_t tid, int dirty)
+{
+	lid_t lid, next;
+	struct metapage *mp;
+	struct tblock *tblk = tid_to_tblock(tid);
+	struct tlock *tlck;
+
+	/*
+	 * free tlocks of the transaction
+	 */
+	for (lid = tblk->next; lid; lid = next) {
+		tlck = lid_to_tlock(lid);
+		next = tlck->next;
+		mp = tlck->mp;
+		JFS_IP(tlck->ip)->xtlid = 0;
+
+		if (mp) {
+			mp->lid = 0;
+
+			/*
+			 * reset lsn of page to avoid logwarap:
+			 *
+			 * (page may have been previously committed by another
+			 * transaction(s) but has not been paged, i.e.,
+			 * it may be on logsync list even though it has not
+			 * been logged for the current tx.)
+			 */
+			if (mp->xflag & COMMIT_PAGE && mp->lsn)
+				LogSyncRelease(mp);
+		}
+		/* insert tlock at head of freelist */
+		TXN_LOCK();
+		txLockFree(lid);
+		TXN_UNLOCK();
+	}
+
+	/* caller will free the transaction block */
+
+	tblk->next = tblk->last = 0;
+
+	/*
+	 * mark filesystem dirty
+	 */
+	if (dirty)
+		jfs_error(tblk->sb, "txAbort");
+
+	return;
+}
+
+/*
+ *      txLazyCommit(void)
+ *
+ *	All transactions except those changing ipimap (COMMIT_FORCE) are
+ *	processed by this routine.  This insures that the inode and block
+ *	allocation maps are updated in order.  For synchronous transactions,
+ *	let the user thread finish processing after txUpdateMap() is called.
+ */
+static void txLazyCommit(struct tblock * tblk)
+{
+	struct jfs_log *log;
+
+	while (((tblk->flag & tblkGC_READY) == 0) &&
+	       ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
+		/* We must have gotten ahead of the user thread
+		 */
+		jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
+		yield();
+	}
+
+	jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
+
+	txUpdateMap(tblk);
+
+	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
+
+	spin_lock_irq(&log->gclock);	// LOGGC_LOCK
+
+	tblk->flag |= tblkGC_COMMITTED;
+
+	if (tblk->flag & tblkGC_READY)
+		log->gcrtc--;
+
+	wake_up_all(&tblk->gcwait);	// LOGGC_WAKEUP
+
+	/*
+	 * Can't release log->gclock until we've tested tblk->flag
+	 */
+	if (tblk->flag & tblkGC_LAZY) {
+		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
+		txUnlock(tblk);
+		tblk->flag &= ~tblkGC_LAZY;
+		txEnd(tblk - TxBlock);	/* Convert back to tid */
+	} else
+		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
+
+	jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
+}
+
+/*
+ *      jfs_lazycommit(void)
+ *
+ *	To be run as a kernel daemon.  If lbmIODone is called in an interrupt
+ *	context, or where blocking is not wanted, this routine will process
+ *	committed transactions from the unlock queue.
+ */
+int jfs_lazycommit(void *arg)
+{
+	int WorkDone;
+	struct tblock *tblk;
+	unsigned long flags;
+	struct jfs_sb_info *sbi;
+
+	daemonize("jfsCommit");
+
+	complete(&jfsIOwait);
+
+	do {
+		LAZY_LOCK(flags);
+		jfs_commit_thread_waking = 0;	/* OK to wake another thread */
+		while (!list_empty(&TxAnchor.unlock_queue)) {
+			WorkDone = 0;
+			list_for_each_entry(tblk, &TxAnchor.unlock_queue,
+					    cqueue) {
+
+				sbi = JFS_SBI(tblk->sb);
+				/*
+				 * For each volume, the transactions must be
+				 * handled in order.  If another commit thread
+				 * is handling a tblk for this superblock,
+				 * skip it
+				 */
+				if (sbi->commit_state & IN_LAZYCOMMIT)
+					continue;
+
+				sbi->commit_state |= IN_LAZYCOMMIT;
+				WorkDone = 1;
+
+				/*
+				 * Remove transaction from queue
+				 */
+				list_del(&tblk->cqueue);
+
+				LAZY_UNLOCK(flags);
+				txLazyCommit(tblk);
+				LAZY_LOCK(flags);
+
+				sbi->commit_state &= ~IN_LAZYCOMMIT;
+				/*
+				 * Don't continue in the for loop.  (We can't
+				 * anyway, it's unsafe!)  We want to go back to
+				 * the beginning of the list.
+				 */
+				break;
+			}
+
+			/* If there was nothing to do, don't continue */
+			if (!WorkDone)
+				break;
+		}
+		/* In case a wakeup came while all threads were active */
+		jfs_commit_thread_waking = 0;
+
+		if (current->flags & PF_FREEZE) {
+			LAZY_UNLOCK(flags);
+			refrigerator(PF_FREEZE);
+		} else {
+			DECLARE_WAITQUEUE(wq, current);
+
+			add_wait_queue(&jfs_commit_thread_wait, &wq);
+			set_current_state(TASK_INTERRUPTIBLE);
+			LAZY_UNLOCK(flags);
+			schedule();
+			current->state = TASK_RUNNING;
+			remove_wait_queue(&jfs_commit_thread_wait, &wq);
+		}
+	} while (!jfs_stop_threads);
+
+	if (!list_empty(&TxAnchor.unlock_queue))
+		jfs_err("jfs_lazycommit being killed w/pending transactions!");
+	else
+		jfs_info("jfs_lazycommit being killed\n");
+	complete_and_exit(&jfsIOwait, 0);
+}
+
+void txLazyUnlock(struct tblock * tblk)
+{
+	unsigned long flags;
+
+	LAZY_LOCK(flags);
+
+	list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
+	/*
+	 * Don't wake up a commit thread if there is already one servicing
+	 * this superblock, or if the last one we woke up hasn't started yet.
+	 */
+	if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
+	    !jfs_commit_thread_waking) {
+		jfs_commit_thread_waking = 1;
+		wake_up(&jfs_commit_thread_wait);
+	}
+	LAZY_UNLOCK(flags);
+}
+
+static void LogSyncRelease(struct metapage * mp)
+{
+	struct jfs_log *log = mp->log;
+
+	assert(atomic_read(&mp->nohomeok));
+	assert(log);
+	atomic_dec(&mp->nohomeok);
+
+	if (atomic_read(&mp->nohomeok))
+		return;
+
+	hold_metapage(mp, 0);
+
+	LOGSYNC_LOCK(log);
+	mp->log = NULL;
+	mp->lsn = 0;
+	mp->clsn = 0;
+	log->count--;
+	list_del_init(&mp->synclist);
+	LOGSYNC_UNLOCK(log);
+
+	release_metapage(mp);
+}
+
+/*
+ *	txQuiesce
+ *
+ *	Block all new transactions and push anonymous transactions to
+ *	completion
+ *
+ *	This does almost the same thing as jfs_sync below.  We don't
+ *	worry about deadlocking when jfs_tlocks_low is set, since we would
+ *	expect jfs_sync to get us out of that jam.
+ */
+void txQuiesce(struct super_block *sb)
+{
+	struct inode *ip;
+	struct jfs_inode_info *jfs_ip;
+	struct jfs_log *log = JFS_SBI(sb)->log;
+	tid_t tid;
+
+	set_bit(log_QUIESCE, &log->flag);
+
+	TXN_LOCK();
+restart:
+	while (!list_empty(&TxAnchor.anon_list)) {
+		jfs_ip = list_entry(TxAnchor.anon_list.next,
+				    struct jfs_inode_info,
+				    anon_inode_list);
+		ip = &jfs_ip->vfs_inode;
+
+		/*
+		 * inode will be removed from anonymous list
+		 * when it is committed
+		 */
+		TXN_UNLOCK();
+		tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
+		down(&jfs_ip->commit_sem);
+		txCommit(tid, 1, &ip, 0);
+		txEnd(tid);
+		up(&jfs_ip->commit_sem);
+		/*
+		 * Just to be safe.  I don't know how
+		 * long we can run without blocking
+		 */
+		cond_resched();
+		TXN_LOCK();
+	}
+
+	/*
+	 * If jfs_sync is running in parallel, there could be some inodes
+	 * on anon_list2.  Let's check.
+	 */
+	if (!list_empty(&TxAnchor.anon_list2)) {
+		list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
+		INIT_LIST_HEAD(&TxAnchor.anon_list2);
+		goto restart;
+	}
+	TXN_UNLOCK();
+
+	/*
+	 * We may need to kick off the group commit
+	 */
+	jfs_flush_journal(log, 0);
+}
+
+/*
+ * txResume()
+ *
+ * Allows transactions to start again following txQuiesce
+ */
+void txResume(struct super_block *sb)
+{
+	struct jfs_log *log = JFS_SBI(sb)->log;
+
+	clear_bit(log_QUIESCE, &log->flag);
+	TXN_WAKEUP(&log->syncwait);
+}
+
+/*
+ *      jfs_sync(void)
+ *
+ *	To be run as a kernel daemon.  This is awakened when tlocks run low.
+ *	We write any inodes that have anonymous tlocks so they will become
+ *	available.
+ */
+int jfs_sync(void *arg)
+{
+	struct inode *ip;
+	struct jfs_inode_info *jfs_ip;
+	int rc;
+	tid_t tid;
+
+	daemonize("jfsSync");
+
+	complete(&jfsIOwait);
+
+	do {
+		/*
+		 * write each inode on the anonymous inode list
+		 */
+		TXN_LOCK();
+		while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
+			jfs_ip = list_entry(TxAnchor.anon_list.next,
+					    struct jfs_inode_info,
+					    anon_inode_list);
+			ip = &jfs_ip->vfs_inode;
+
+			if (! igrab(ip)) {
+				/*
+				 * Inode is being freed
+				 */
+				list_del_init(&jfs_ip->anon_inode_list);
+			} else if (! down_trylock(&jfs_ip->commit_sem)) {
+				/*
+				 * inode will be removed from anonymous list
+				 * when it is committed
+				 */
+				TXN_UNLOCK();
+				tid = txBegin(ip->i_sb, COMMIT_INODE);
+				rc = txCommit(tid, 1, &ip, 0);
+				txEnd(tid);
+				up(&jfs_ip->commit_sem);
+
+				iput(ip);
+				/*
+				 * Just to be safe.  I don't know how
+				 * long we can run without blocking
+				 */
+				cond_resched();
+				TXN_LOCK();
+			} else {
+				/* We can't get the commit semaphore.  It may
+				 * be held by a thread waiting for tlock's
+				 * so let's not block here.  Save it to
+				 * put back on the anon_list.
+				 */
+
+				/* Take off anon_list */
+				list_del(&jfs_ip->anon_inode_list);
+
+				/* Put on anon_list2 */
+				list_add(&jfs_ip->anon_inode_list,
+					 &TxAnchor.anon_list2);
+
+				TXN_UNLOCK();
+				iput(ip);
+				TXN_LOCK();
+			}
+		}
+		/* Add anon_list2 back to anon_list */
+		list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
+
+		if (current->flags & PF_FREEZE) {
+			TXN_UNLOCK();
+			refrigerator(PF_FREEZE);
+		} else {
+			DECLARE_WAITQUEUE(wq, current);
+
+			add_wait_queue(&jfs_sync_thread_wait, &wq);
+			set_current_state(TASK_INTERRUPTIBLE);
+			TXN_UNLOCK();
+			schedule();
+			current->state = TASK_RUNNING;
+			remove_wait_queue(&jfs_sync_thread_wait, &wq);
+		}
+	} while (!jfs_stop_threads);
+
+	jfs_info("jfs_sync being killed");
+	complete_and_exit(&jfsIOwait, 0);
+}
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
+int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
+		      int *eof, void *data)
+{
+	int len = 0;
+	off_t begin;
+	char *freewait;
+	char *freelockwait;
+	char *lowlockwait;
+
+	freewait =
+	    waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
+	freelockwait =
+	    waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
+	lowlockwait =
+	    waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
+
+	len += sprintf(buffer,
+		       "JFS TxAnchor\n"
+		       "============\n"
+		       "freetid = %d\n"
+		       "freewait = %s\n"
+		       "freelock = %d\n"
+		       "freelockwait = %s\n"
+		       "lowlockwait = %s\n"
+		       "tlocksInUse = %d\n"
+		       "jfs_tlocks_low = %d\n"
+		       "unlock_queue is %sempty\n",
+		       TxAnchor.freetid,
+		       freewait,
+		       TxAnchor.freelock,
+		       freelockwait,
+		       lowlockwait,
+		       TxAnchor.tlocksInUse,
+		       jfs_tlocks_low,
+		       list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if (len > length)
+		len = length;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
+int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
+		     int *eof, void *data)
+{
+	int len = 0;
+	off_t begin;
+
+	len += sprintf(buffer,
+		       "JFS TxStats\n"
+		       "===========\n"
+		       "calls to txBegin = %d\n"
+		       "txBegin blocked by sync barrier = %d\n"
+		       "txBegin blocked by tlocks low = %d\n"
+		       "txBegin blocked by no free tid = %d\n"
+		       "calls to txBeginAnon = %d\n"
+		       "txBeginAnon blocked by sync barrier = %d\n"
+		       "txBeginAnon blocked by tlocks low = %d\n"
+		       "calls to txLockAlloc = %d\n"
+		       "tLockAlloc blocked by no free lock = %d\n",
+		       TxStat.txBegin,
+		       TxStat.txBegin_barrier,
+		       TxStat.txBegin_lockslow,
+		       TxStat.txBegin_freetid,
+		       TxStat.txBeginAnon,
+		       TxStat.txBeginAnon_barrier,
+		       TxStat.txBeginAnon_lockslow,
+		       TxStat.txLockAlloc,
+		       TxStat.txLockAlloc_freelock);
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if (len > length)
+		len = length;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+#endif
diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h
new file mode 100644
index 0000000..b71b82c
--- /dev/null
+++ b/fs/jfs/jfs_txnmgr.h
@@ -0,0 +1,318 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_TXNMGR
+#define _H_JFS_TXNMGR
+
+#include "jfs_logmgr.h"
+
+/*
+ * Hide implementation of TxBlock and TxLock
+ */
+#define tid_to_tblock(tid) (&TxBlock[tid])
+
+#define lid_to_tlock(lid) (&TxLock[lid])
+
+/*
+ *	transaction block
+ */
+struct tblock {
+	/*
+	 * tblock and jbuf_t common area: struct logsyncblk
+	 *
+	 * the following 5 fields are the same as struct logsyncblk
+	 * which is common to tblock and jbuf to form logsynclist
+	 */
+	u16 xflag;		/* tx commit type */
+	u16 flag;		/* tx commit state */
+	lid_t dummy;		/* Must keep structures common */
+	s32 lsn;		/* recovery lsn */
+	struct list_head synclist;	/* logsynclist link */
+
+	/* lock management */
+	struct super_block *sb;	/* super block */
+	lid_t next;		/* index of first tlock of tid */
+	lid_t last;		/* index of last tlock of tid */
+	wait_queue_head_t waitor;	/* tids waiting on this tid */
+
+	/* log management */
+	u32 logtid;		/* log transaction id */
+
+	/* commit management */
+	struct list_head cqueue;	/* commit queue list */
+	s32 clsn;		/* commit lsn */
+	struct lbuf *bp;
+	s32 pn;			/* commit record log page number */
+	s32 eor;		/* commit record eor */
+	wait_queue_head_t gcwait;	/* group commit event list:
+					 * ready transactions wait on this
+					 * event for group commit completion.
+					 */
+	union {
+		struct inode *ip; /* inode being deleted */
+		pxd_t ixpxd;	/* pxd of inode extent for created inode */
+	} u;
+	u32 ino;		/* inode number being created */
+};
+
+extern struct tblock *TxBlock;	/* transaction block table */
+
+/* commit flags: tblk->xflag */
+#define	COMMIT_SYNC	0x0001	/* synchronous commit */
+#define	COMMIT_FORCE	0x0002	/* force pageout at end of commit */
+#define	COMMIT_FLUSH	0x0004	/* init flush at end of commit */
+#define COMMIT_MAP	0x00f0
+#define	COMMIT_PMAP	0x0010	/* update pmap */
+#define	COMMIT_WMAP	0x0020	/* update wmap */
+#define	COMMIT_PWMAP	0x0040	/* update pwmap */
+#define	COMMIT_FREE	0x0f00
+#define	COMMIT_DELETE	0x0100	/* inode delete */
+#define	COMMIT_TRUNCATE	0x0200	/* file truncation */
+#define	COMMIT_CREATE	0x0400	/* inode create */
+#define	COMMIT_LAZY	0x0800	/* lazy commit */
+#define COMMIT_PAGE	0x1000	/* Identifies element as metapage */
+#define COMMIT_INODE	0x2000	/* Identifies element as inode */
+
+/* group commit flags tblk->flag: see jfs_logmgr.h */
+
+/*
+ *	transaction lock
+ */
+struct tlock {
+	lid_t next;		/* 2: index next lockword on tid locklist
+				 *          next lockword on freelist
+				 */
+	tid_t tid;		/* 2: transaction id holding lock */
+
+	u16 flag;		/* 2: lock control */
+	u16 type;		/* 2: log type */
+
+	struct metapage *mp;	/* 4/8: object page buffer locked */
+	struct inode *ip;	/* 4/8: object */
+	/* (16) */
+
+	s16 lock[24];		/* 48: overlay area */
+};				/* (64) */
+
+extern struct tlock *TxLock;	/* transaction lock table */
+
+/*
+ * tlock flag
+ */
+/* txLock state */
+#define tlckPAGELOCK		0x8000
+#define tlckINODELOCK		0x4000
+#define tlckLINELOCK		0x2000
+#define tlckINLINELOCK		0x1000
+/* lmLog state */
+#define tlckLOG			0x0800
+/* updateMap state */
+#define	tlckUPDATEMAP		0x0080
+/* freeLock state */
+#define tlckFREELOCK		0x0008
+#define tlckWRITEPAGE		0x0004
+#define tlckFREEPAGE		0x0002
+
+/*
+ * tlock type
+ */
+#define	tlckTYPE		0xfe00
+#define	tlckINODE		0x8000
+#define	tlckXTREE		0x4000
+#define	tlckDTREE		0x2000
+#define	tlckMAP			0x1000
+#define	tlckEA			0x0800
+#define	tlckACL			0x0400
+#define	tlckDATA		0x0200
+#define	tlckBTROOT		0x0100
+
+#define	tlckOPERATION		0x00ff
+#define tlckGROW		0x0001	/* file grow */
+#define tlckREMOVE		0x0002	/* file delete */
+#define tlckTRUNCATE		0x0004	/* file truncate */
+#define tlckRELOCATE		0x0008	/* file/directory relocate */
+#define tlckENTRY		0x0001	/* directory insert/delete */
+#define tlckEXTEND		0x0002	/* directory extend in-line */
+#define tlckSPLIT		0x0010	/* splited page */
+#define tlckNEW			0x0020	/* new page from split */
+#define tlckFREE		0x0040	/* free page */
+#define tlckRELINK		0x0080	/* update sibling pointer */
+
+/*
+ *	linelock for lmLog()
+ *
+ * note: linelock and its variations are overlaid
+ * at tlock.lock: watch for alignment;
+ */
+struct lv {
+	u8 offset;		/* 1: */
+	u8 length;		/* 1: */
+};				/* (2) */
+
+#define	TLOCKSHORT	20
+#define	TLOCKLONG	28
+
+struct linelock {
+	lid_t next;		/* 2: next linelock */
+
+	s8 maxcnt;		/* 1: */
+	s8 index;		/* 1: */
+
+	u16 flag;		/* 2: */
+	u8 type;		/* 1: */
+	u8 l2linesize;		/* 1: log2 of linesize */
+	/* (8) */
+
+	struct lv lv[20];	/* 40: */
+}; 				/* (48) */
+
+#define dt_lock	linelock
+
+struct xtlock {
+	lid_t next;		/* 2: */
+
+	s8 maxcnt;		/* 1: */
+	s8 index;		/* 1: */
+
+	u16 flag;		/* 2: */
+	u8 type;		/* 1: */
+	u8 l2linesize;		/* 1: log2 of linesize */
+				/* (8) */
+
+	struct lv header;	/* 2: */
+	struct lv lwm;		/* 2: low water mark */
+	struct lv hwm;		/* 2: high water mark */
+	struct lv twm;		/* 2: */
+				/* (16) */
+
+	s32 pxdlock[8];		/* 32: */
+};				/* (48) */
+
+
+/*
+ *	maplock for txUpdateMap()
+ *
+ * note: maplock and its variations are overlaid
+ * at tlock.lock/linelock: watch for alignment;
+ * N.B. next field may be set by linelock, and should not
+ * be modified by maplock;
+ * N.B. index of the first pxdlock specifies index of next 
+ * free maplock (i.e., number of maplock) in the tlock; 
+ */
+struct maplock {
+	lid_t next;		/* 2: */
+
+	u8 maxcnt;		/* 2: */
+	u8 index;		/* 2: next free maplock index */
+
+	u16 flag;		/* 2: */
+	u8 type;		/* 1: */
+	u8 count;		/* 1: number of pxd/xad */
+				/* (8) */
+
+	pxd_t pxd;		/* 8: */
+};				/* (16): */
+
+/* maplock flag */
+#define	mlckALLOC		0x00f0
+#define	mlckALLOCXADLIST	0x0080
+#define	mlckALLOCPXDLIST	0x0040
+#define	mlckALLOCXAD		0x0020
+#define	mlckALLOCPXD		0x0010
+#define	mlckFREE		0x000f
+#define	mlckFREEXADLIST		0x0008
+#define	mlckFREEPXDLIST		0x0004
+#define	mlckFREEXAD		0x0002
+#define	mlckFREEPXD		0x0001
+
+#define	pxd_lock	maplock
+
+struct xdlistlock {
+	lid_t next;		/* 2: */
+
+	u8 maxcnt;		/* 2: */
+	u8 index;		/* 2: */
+
+	u16 flag;		/* 2: */
+	u8 type;		/* 1: */
+	u8 count;		/* 1: number of pxd/xad */
+				/* (8) */
+
+	/*
+	 * We need xdlist to be 64 bits (8 bytes), regardless of
+	 * whether void * is 32 or 64 bits
+	 */
+	union {
+		void *_xdlist;	/* pxd/xad list */
+		s64 pad;	/* 8: Force 64-bit xdlist size */
+	} union64;
+};				/* (16): */
+
+#define xdlist union64._xdlist
+
+/*
+ *	commit
+ *
+ * parameter to the commit manager routines
+ */
+struct commit {
+	tid_t tid;		/* tid = index of tblock */
+	int flag;		/* flags */
+	struct jfs_log *log;	/* log */
+	struct super_block *sb;	/* superblock */
+
+	int nip;		/* number of entries in iplist */
+	struct inode **iplist;	/* list of pointers to inodes */
+
+	/* log record descriptor on 64-bit boundary */
+	struct lrd lrd;		/* : log record descriptor */
+};
+
+/*
+ * external declarations
+ */
+extern struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage *mp,
+			    int flag);
+
+extern struct tlock *txMaplock(tid_t tid, struct inode *ip, int flag);
+
+extern int txCommit(tid_t tid, int nip, struct inode **iplist, int flag);
+
+extern tid_t txBegin(struct super_block *sb, int flag);
+
+extern void txBeginAnon(struct super_block *sb);
+
+extern void txEnd(tid_t tid);
+
+extern void txAbort(tid_t tid, int dirty);
+
+extern struct linelock *txLinelock(struct linelock * tlock);
+
+extern void txFreeMap(struct inode *ip, struct maplock * maplock,
+		      struct tblock * tblk, int maptype);
+
+extern void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea);
+
+extern void txFreelock(struct inode *ip);
+
+extern int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+		 struct tlock * tlck);
+
+extern void txQuiesce(struct super_block *sb);
+
+extern void txResume(struct super_block *sb);
+#endif				/* _H_JFS_TXNMGR */
diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h
new file mode 100644
index 0000000..5bfad39
--- /dev/null
+++ b/fs/jfs/jfs_types.h
@@ -0,0 +1,192 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_TYPES
+#define	_H_JFS_TYPES
+
+/*
+ *	jfs_types.h:
+ *
+ * basic type/utility  definitions
+ *
+ * note: this header file must be the 1st include file
+ * of JFS include list in all JFS .c file.
+ */
+
+#include <linux/types.h>
+#include <linux/nls.h>
+
+#include "endian24.h"
+
+/*
+ * transaction and lock id's
+ *
+ * Don't change these without carefully considering the impact on the
+ * size and alignment of all of the linelock variants
+ */
+typedef u16 tid_t;
+typedef u16 lid_t;
+
+/*
+ * Almost identical to Linux's timespec, but not quite
+ */
+struct timestruc_t {
+	__le32 tv_sec;
+	__le32 tv_nsec;
+};
+
+/*
+ *	handy
+ */
+
+#define LEFTMOSTONE	0x80000000
+#define	HIGHORDER	0x80000000u	/* high order bit on            */
+#define	ONES		0xffffffffu	/* all bit on                   */
+
+typedef int boolean_t;
+#define TRUE 1
+#define FALSE 0
+
+/*
+ *	logical xd (lxd)
+ */
+typedef struct {
+	unsigned len:24;
+	unsigned off1:8;
+	u32 off2;
+} lxd_t;
+
+/* lxd_t field construction */
+#define	LXDlength(lxd, length32)	( (lxd)->len = length32 )
+#define	LXDoffset(lxd, offset64)\
+{\
+	(lxd)->off1 = ((s64)offset64) >> 32;\
+	(lxd)->off2 = (offset64) & 0xffffffff;\
+}
+
+/* lxd_t field extraction */
+#define	lengthLXD(lxd)	( (lxd)->len )
+#define	offsetLXD(lxd)\
+	( ((s64)((lxd)->off1)) << 32 | (lxd)->off2 )
+
+/* lxd list */
+struct lxdlist {
+	s16 maxnlxd;
+	s16 nlxd;
+	lxd_t *lxd;
+};
+
+/*
+ *	physical xd (pxd)
+ */
+typedef struct {
+	unsigned len:24;
+	unsigned addr1:8;
+	__le32 addr2;
+} pxd_t;
+
+/* xd_t field construction */
+
+#define	PXDlength(pxd, length32)	((pxd)->len = __cpu_to_le24(length32))
+#define	PXDaddress(pxd, address64)\
+{\
+	(pxd)->addr1 = ((s64)address64) >> 32;\
+	(pxd)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+}
+
+/* xd_t field extraction */
+#define	lengthPXD(pxd)	__le24_to_cpu((pxd)->len)
+#define	addressPXD(pxd)\
+	( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2))
+
+#define MAXTREEHEIGHT 8
+/* pxd list */
+struct pxdlist {
+	s16 maxnpxd;
+	s16 npxd;
+	pxd_t pxd[MAXTREEHEIGHT];
+};
+
+
+/*
+ *	data extent descriptor (dxd)
+ */
+typedef struct {
+	unsigned flag:8;	/* 1: flags */
+	unsigned rsrvd:24;
+	__le32 size;		/* 4: size in byte */
+	unsigned len:24;	/* 3: length in unit of fsblksize */
+	unsigned addr1:8;	/* 1: address in unit of fsblksize */
+	__le32 addr2;		/* 4: address in unit of fsblksize */
+} dxd_t;			/* - 16 - */
+
+/* dxd_t flags */
+#define	DXD_INDEX	0x80	/* B+-tree index */
+#define	DXD_INLINE	0x40	/* in-line data extent */
+#define	DXD_EXTENT	0x20	/* out-of-line single extent */
+#define	DXD_FILE	0x10	/* out-of-line file (inode) */
+#define DXD_CORRUPT	0x08	/* Inconsistency detected */
+
+/* dxd_t field construction
+ *	Conveniently, the PXD macros work for DXD
+ */
+#define	DXDlength	PXDlength
+#define	DXDaddress	PXDaddress
+#define	lengthDXD	lengthPXD
+#define	addressDXD	addressPXD
+#define DXDsize(dxd, size32) ((dxd)->size = cpu_to_le32(size32))
+#define sizeDXD(dxd)	le32_to_cpu((dxd)->size)
+
+/*
+ *      directory entry argument
+ */
+struct component_name {
+	int namlen;
+	wchar_t *name;
+};
+
+
+/*
+ *	DASD limit information - stored in directory inode
+ */
+struct dasd {
+	u8 thresh;		/* Alert Threshold (in percent) */
+	u8 delta;		/* Alert Threshold delta (in percent)   */
+	u8 rsrvd1;
+	u8 limit_hi;		/* DASD limit (in logical blocks)       */
+	__le32 limit_lo;	/* DASD limit (in logical blocks)       */
+	u8 rsrvd2[3];
+	u8 used_hi;		/* DASD usage (in logical blocks)       */
+	__le32 used_lo;		/* DASD usage (in logical blocks)       */
+};
+
+#define DASDLIMIT(dasdp) \
+	(((u64)((dasdp)->limit_hi) << 32) + __le32_to_cpu((dasdp)->limit_lo))
+#define setDASDLIMIT(dasdp, limit)\
+{\
+	(dasdp)->limit_hi = ((u64)limit) >> 32;\
+	(dasdp)->limit_lo = __cpu_to_le32(limit);\
+}
+#define DASDUSED(dasdp) \
+	(((u64)((dasdp)->used_hi) << 32) + __le32_to_cpu((dasdp)->used_lo))
+#define setDASDUSED(dasdp, used)\
+{\
+	(dasdp)->used_hi = ((u64)used) >> 32;\
+	(dasdp)->used_lo = __cpu_to_le32(used);\
+}
+
+#endif				/* !_H_JFS_TYPES */
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c
new file mode 100644
index 0000000..f31a9e3
--- /dev/null
+++ b/fs/jfs/jfs_umount.c
@@ -0,0 +1,178 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ *	jfs_umount.c
+ *
+ * note: file system in transition to aggregate/fileset:
+ * (ref. jfs_mount.c)
+ *
+ * file system unmount is interpreted as mount of the single/only 
+ * fileset in the aggregate and, if unmount of the last fileset, 
+ * as unmount of the aggerate;
+ */
+
+#include <linux/fs.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_imap.h"
+#include "jfs_metapage.h"
+#include "jfs_debug.h"
+
+/*
+ * NAME:	jfs_umount(vfsp, flags, crp)
+ *
+ * FUNCTION:	vfs_umount()
+ *
+ * PARAMETERS:	vfsp	- virtual file system pointer
+ *		flags	- unmount for shutdown
+ *		crp	- credential
+ *
+ * RETURN :	EBUSY	- device has open files
+ */
+int jfs_umount(struct super_block *sb)
+{
+	struct address_space *bdev_mapping = sb->s_bdev->bd_inode->i_mapping;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct inode *ipbmap = sbi->ipbmap;
+	struct inode *ipimap = sbi->ipimap;
+	struct inode *ipaimap = sbi->ipaimap;
+	struct inode *ipaimap2 = sbi->ipaimap2;
+	struct jfs_log *log;
+	int rc = 0;
+
+	jfs_info("UnMount JFS: sb:0x%p", sb);
+
+	/*
+	 *      update superblock and close log 
+	 *
+	 * if mounted read-write and log based recovery was enabled
+	 */
+	if ((log = sbi->log))
+		/*
+		 * Wait for outstanding transactions to be written to log: 
+		 */
+		jfs_flush_journal(log, 2);
+
+	/*
+	 * close fileset inode allocation map (aka fileset inode)
+	 */
+	diUnmount(ipimap, 0);
+
+	diFreeSpecial(ipimap);
+	sbi->ipimap = NULL;
+
+	/*
+	 * close secondary aggregate inode allocation map
+	 */
+	ipaimap2 = sbi->ipaimap2;
+	if (ipaimap2) {
+		diUnmount(ipaimap2, 0);
+		diFreeSpecial(ipaimap2);
+		sbi->ipaimap2 = NULL;
+	}
+
+	/*
+	 * close aggregate inode allocation map
+	 */
+	ipaimap = sbi->ipaimap;
+	diUnmount(ipaimap, 0);
+	diFreeSpecial(ipaimap);
+	sbi->ipaimap = NULL;
+
+	/*
+	 * close aggregate block allocation map
+	 */
+	dbUnmount(ipbmap, 0);
+
+	diFreeSpecial(ipbmap);
+	sbi->ipimap = NULL;
+
+	/*
+	 * Make sure all metadata makes it to disk before we mark
+	 * the superblock as clean
+	 */
+	filemap_fdatawrite(bdev_mapping);
+	filemap_fdatawait(bdev_mapping);
+
+	/*
+	 * ensure all file system file pages are propagated to their
+	 * home blocks on disk (and their in-memory buffer pages are 
+	 * invalidated) BEFORE updating file system superblock state
+	 * (to signify file system is unmounted cleanly, and thus in 
+	 * consistent state) and log superblock active file system 
+	 * list (to signify skip logredo()).
+	 */
+	if (log) {		/* log = NULL if read-only mount */
+		updateSuper(sb, FM_CLEAN);
+
+		/* Restore default gfp_mask for bdev */
+		mapping_set_gfp_mask(bdev_mapping, GFP_USER);
+
+		/*
+		 * close log: 
+		 *
+		 * remove file system from log active file system list.
+		 */
+		rc = lmLogClose(sb);
+	}
+	jfs_info("UnMount JFS Complete: rc = %d", rc);
+	return rc;
+}
+
+
+int jfs_umount_rw(struct super_block *sb)
+{
+	struct address_space *bdev_mapping = sb->s_bdev->bd_inode->i_mapping;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_log *log = sbi->log;
+
+	if (!log)
+		return 0;
+
+	/*
+	 * close log: 
+	 *
+	 * remove file system from log active file system list.
+	 */
+	jfs_flush_journal(log, 2);
+
+	/*
+	 * Make sure all metadata makes it to disk
+	 */
+	dbSync(sbi->ipbmap);
+	diSync(sbi->ipimap);
+
+	/*
+	 * Note that we have to do this even if sync_blockdev() will
+	 * do exactly the same a few instructions later:  We can't
+	 * mark the superblock clean before everything is flushed to
+	 * disk.
+	 */
+	filemap_fdatawrite(bdev_mapping);
+	filemap_fdatawait(bdev_mapping);
+
+	updateSuper(sb, FM_CLEAN);
+
+	/* Restore default gfp_mask for bdev */
+	mapping_set_gfp_mask(bdev_mapping, GFP_USER);
+
+	return lmLogClose(sb);
+}
diff --git a/fs/jfs/jfs_unicode.c b/fs/jfs/jfs_unicode.c
new file mode 100644
index 0000000..b32208aa
--- /dev/null
+++ b/fs/jfs/jfs_unicode.c
@@ -0,0 +1,137 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_unicode.h"
+#include "jfs_debug.h"
+
+/*
+ * NAME:	jfs_strfromUCS()
+ *
+ * FUNCTION:	Convert little-endian unicode string to character string
+ *
+ */
+int jfs_strfromUCS_le(char *to, const __le16 * from,
+		      int len, struct nls_table *codepage)
+{
+	int i;
+	int outlen = 0;
+	static int warn_again = 5;	/* Only warn up to 5 times total */
+	int warn = !!warn_again;	/* once per string */
+
+	if (codepage) {
+		for (i = 0; (i < len) && from[i]; i++) {
+			int charlen;
+			charlen =
+			    codepage->uni2char(le16_to_cpu(from[i]),
+					       &to[outlen],
+					       NLS_MAX_CHARSET_SIZE);
+			if (charlen > 0)
+				outlen += charlen;
+			else
+				to[outlen++] = '?';
+		}
+	} else {
+		for (i = 0; (i < len) && from[i]; i++) {
+			if (le16_to_cpu(from[i]) & 0xff00) {
+				if (warn) {
+					warn--;
+					warn_again--;
+					printk(KERN_ERR
+			"non-latin1 character 0x%x found in JFS file name\n", 
+		       			       le16_to_cpu(from[i]));
+					printk(KERN_ERR
+				"mount with iocharset=utf8 to access\n");
+				}
+				to[i] = '?';
+			}
+			else
+				to[i] = (char) (le16_to_cpu(from[i]));
+		}
+		outlen = i;
+	}
+	to[outlen] = 0;
+	return outlen;
+}
+
+/*
+ * NAME:	jfs_strtoUCS()
+ *
+ * FUNCTION:	Convert character string to unicode string
+ *
+ */
+static int jfs_strtoUCS(wchar_t * to, const unsigned char *from, int len,
+		struct nls_table *codepage)
+{
+	int charlen;
+	int i;
+
+	if (codepage) {
+		for (i = 0; len && *from; i++, from += charlen, len -= charlen)
+		{
+			charlen = codepage->char2uni(from, len, &to[i]);
+			if (charlen < 1) {
+				jfs_err("jfs_strtoUCS: char2uni returned %d.",
+					charlen);
+				jfs_err("charset = %s, char = 0x%x",
+					codepage->charset, *from);
+				return charlen;
+			}
+		}
+	} else {
+		for (i = 0; (i < len) && from[i]; i++)
+			to[i] = (wchar_t) from[i];
+	}
+
+	to[i] = 0;
+	return i;
+}
+
+/*
+ * NAME:	get_UCSname()
+ *
+ * FUNCTION:	Allocate and translate to unicode string
+ *
+ */
+int get_UCSname(struct component_name * uniName, struct dentry *dentry)
+{
+	struct nls_table *nls_tab = JFS_SBI(dentry->d_sb)->nls_tab;
+	int length = dentry->d_name.len;
+
+	if (length > JFS_NAME_MAX)
+		return -ENAMETOOLONG;
+
+	uniName->name =
+	    kmalloc((length + 1) * sizeof(wchar_t), GFP_NOFS);
+
+	if (uniName->name == NULL)
+		return -ENOSPC;
+
+	uniName->namlen = jfs_strtoUCS(uniName->name, dentry->d_name.name,
+				       length, nls_tab);
+
+	if (uniName->namlen < 0) {
+		kfree(uniName->name);
+		return uniName->namlen;
+	}
+
+	return 0;
+}
diff --git a/fs/jfs/jfs_unicode.h b/fs/jfs/jfs_unicode.h
new file mode 100644
index 0000000..69e25eb
--- /dev/null
+++ b/fs/jfs/jfs_unicode.h
@@ -0,0 +1,155 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *   Portions Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_UNICODE
+#define _H_JFS_UNICODE
+
+#include <asm/byteorder.h>
+#include "jfs_types.h"
+
+typedef struct {
+	wchar_t start;
+	wchar_t end;
+	signed char *table;
+} UNICASERANGE;
+
+extern signed char UniUpperTable[512];
+extern UNICASERANGE UniUpperRange[];
+extern int get_UCSname(struct component_name *, struct dentry *);
+extern int jfs_strfromUCS_le(char *, const __le16 *, int, struct nls_table *);
+
+#define free_UCSname(COMP) kfree((COMP)->name)
+
+/*
+ * UniStrcpy:  Copy a string
+ */
+static inline wchar_t *UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
+{
+	wchar_t *anchor = ucs1;	/* save the start of result string */
+
+	while ((*ucs1++ = *ucs2++));
+	return anchor;
+}
+
+
+
+/*
+ * UniStrncpy:  Copy length limited string with pad
+ */
+static inline __le16 *UniStrncpy_le(__le16 * ucs1, const __le16 * ucs2,
+				  size_t n)
+{
+	__le16 *anchor = ucs1;
+
+	while (n-- && *ucs2)	/* Copy the strings */
+		*ucs1++ = *ucs2++;
+
+	n++;
+	while (n--)		/* Pad with nulls */
+		*ucs1++ = 0;
+	return anchor;
+}
+
+/*
+ * UniStrncmp_le:  Compare length limited string - native to little-endian
+ */
+static inline int UniStrncmp_le(const wchar_t * ucs1, const __le16 * ucs2,
+				size_t n)
+{
+	if (!n)
+		return 0;	/* Null strings are equal */
+	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
+		ucs1++;
+		ucs2++;
+	}
+	return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
+}
+
+/*
+ * UniStrncpy_to_le:  Copy length limited string with pad to little-endian
+ */
+static inline __le16 *UniStrncpy_to_le(__le16 * ucs1, const wchar_t * ucs2,
+				       size_t n)
+{
+	__le16 *anchor = ucs1;
+
+	while (n-- && *ucs2)	/* Copy the strings */
+		*ucs1++ = cpu_to_le16(*ucs2++);
+
+	n++;
+	while (n--)		/* Pad with nulls */
+		*ucs1++ = 0;
+	return anchor;
+}
+
+/*
+ * UniStrncpy_from_le:  Copy length limited string with pad from little-endian
+ */
+static inline wchar_t *UniStrncpy_from_le(wchar_t * ucs1, const __le16 * ucs2,
+					  size_t n)
+{
+	wchar_t *anchor = ucs1;
+
+	while (n-- && *ucs2)	/* Copy the strings */
+		*ucs1++ = __le16_to_cpu(*ucs2++);
+
+	n++;
+	while (n--)		/* Pad with nulls */
+		*ucs1++ = 0;
+	return anchor;
+}
+
+/*
+ * UniToupper:  Convert a unicode character to upper case
+ */
+static inline wchar_t UniToupper(wchar_t uc)
+{
+	UNICASERANGE *rp;
+
+	if (uc < sizeof(UniUpperTable)) {	/* Latin characters */
+		return uc + UniUpperTable[uc];	/* Use base tables */
+	} else {
+		rp = UniUpperRange;	/* Use range tables */
+		while (rp->start) {
+			if (uc < rp->start)	/* Before start of range */
+				return uc;	/* Uppercase = input */
+			if (uc <= rp->end)	/* In range */
+				return uc + rp->table[uc - rp->start];
+			rp++;	/* Try next range */
+		}
+	}
+	return uc;		/* Past last range */
+}
+
+
+/*
+ * UniStrupr:  Upper case a unicode string
+ */
+static inline wchar_t *UniStrupr(wchar_t * upin)
+{
+	wchar_t *up;
+
+	up = upin;
+	while (*up) {		/* For all characters */
+		*up = UniToupper(*up);
+		up++;
+	}
+	return upin;		/* Return input pointer */
+}
+
+#endif				/* !_H_JFS_UNICODE */
diff --git a/fs/jfs/jfs_uniupr.c b/fs/jfs/jfs_uniupr.c
new file mode 100644
index 0000000..4ab185d
--- /dev/null
+++ b/fs/jfs/jfs_uniupr.c
@@ -0,0 +1,134 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include "jfs_unicode.h"
+
+/*
+ * Latin upper case
+ */
+signed char UniUpperTable[512] = {
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 000-00f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 010-01f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 020-02f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 030-03f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 040-04f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 050-05f */
+   0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 060-06f */
+ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,  0,  0,  0,  0,  0, /* 070-07f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 080-08f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 090-09f */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 0a0-0af */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 0b0-0bf */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 0c0-0cf */
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 0d0-0df */
+ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 0e0-0ef */
+ -32,-32,-32,-32,-32,-32,-32,  0,-32,-32,-32,-32,-32,-32,-32,121, /* 0f0-0ff */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 100-10f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 110-11f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 120-12f */
+   0,  0,  0, -1,  0, -1,  0, -1,  0,  0, -1,  0, -1,  0, -1,  0, /* 130-13f */
+  -1,  0, -1,  0, -1,  0, -1,  0, -1,  0,  0, -1,  0, -1,  0, -1, /* 140-14f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 150-15f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 160-16f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0,  0, -1,  0, -1,  0, -1,  0, /* 170-17f */
+   0,  0,  0, -1,  0, -1,  0,  0, -1,  0,  0,  0, -1,  0,  0,  0, /* 180-18f */
+   0,  0, -1,  0,  0,  0,  0,  0,  0, -1,  0,  0,  0,  0,  0,  0, /* 190-19f */
+   0, -1,  0, -1,  0, -1,  0,  0, -1,  0,  0,  0,  0, -1,  0,  0, /* 1a0-1af */
+  -1,  0,  0,  0, -1,  0, -1,  0,  0, -1,  0,  0,  0, -1,  0,  0, /* 1b0-1bf */
+   0,  0,  0,  0,  0, -1, -2,  0, -1, -2,  0, -1, -2,  0, -1,  0, /* 1c0-1cf */
+  -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,-79,  0, -1, /* 1d0-1df */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e0-1ef */
+   0,  0, -1, -2,  0, -1,  0,  0,  0, -1,  0, -1,  0, -1,  0, -1, /* 1f0-1ff */
+};
+
+/* Upper case range - Greek */
+static signed char UniCaseRangeU03a0[47] = {
+   0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,-38,-37,-37,-37, /* 3a0-3af */
+   0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 3b0-3bf */
+ -32,-32,-31,-32,-32,-32,-32,-32,-32,-32,-32,-32,-64,-63,-63,
+};
+
+/* Upper case range - Cyrillic */
+static signed char UniCaseRangeU0430[48] = {
+ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 430-43f */
+ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 440-44f */
+   0,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,  0,-80,-80, /* 450-45f */
+};
+
+/* Upper case range - Extended cyrillic */
+static signed char UniCaseRangeU0490[61] = {
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 490-49f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 4a0-4af */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 4b0-4bf */
+   0,  0, -1,  0, -1,  0,  0,  0, -1,  0,  0,  0, -1,
+};
+
+/* Upper case range - Extended latin and greek */
+static signed char UniCaseRangeU1e00[509] = {
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e00-1e0f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e10-1e1f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e20-1e2f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e30-1e3f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e40-1e4f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e50-1e5f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e60-1e6f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e70-1e7f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1e80-1e8f */
+   0, -1,  0, -1,  0, -1,  0,  0,  0,  0,  0,-59,  0, -1,  0, -1, /* 1e90-1e9f */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1ea0-1eaf */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1eb0-1ebf */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1ec0-1ecf */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1ed0-1edf */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0, -1, /* 1ee0-1eef */
+   0, -1,  0, -1,  0, -1,  0, -1,  0, -1,  0,  0,  0,  0,  0,  0, /* 1ef0-1eff */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f00-1f0f */
+   8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f10-1f1f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f20-1f2f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f30-1f3f */
+   8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f40-1f4f */
+   0,  8,  0,  8,  0,  8,  0,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f50-1f5f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f60-1f6f */
+  74, 74, 86, 86, 86, 86,100,100,  0,  0,112,112,126,126,  0,  0, /* 1f70-1f7f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f80-1f8f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1f90-1f9f */
+   8,  8,  8,  8,  8,  8,  8,  8,  0,  0,  0,  0,  0,  0,  0,  0, /* 1fa0-1faf */
+   8,  8,  0,  9,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1fb0-1fbf */
+   0,  0,  0,  9,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1fc0-1fcf */
+   8,  8,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1fd0-1fdf */
+   8,  8,  0,  0,  0,  7,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, /* 1fe0-1fef */
+   0,  0,  0,  9,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+};
+
+/* Upper case range - Wide latin */
+static signed char UniCaseRangeUff40[27] = {
+   0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* ff40-ff4f */
+ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,
+};
+
+/*
+ * Upper Case Range
+ */
+UNICASERANGE UniUpperRange[] = {
+    { 0x03a0,  0x03ce,  UniCaseRangeU03a0 },
+    { 0x0430,  0x045f,  UniCaseRangeU0430 },
+    { 0x0490,  0x04cc,  UniCaseRangeU0490 },
+    { 0x1e00,  0x1ffc,  UniCaseRangeU1e00 },
+    { 0xff40,  0xff5a,  UniCaseRangeUff40 },
+    { 0 }
+};
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
new file mode 100644
index 0000000..a1052f3f0
--- /dev/null
+++ b/fs/jfs/jfs_xattr.h
@@ -0,0 +1,64 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef H_JFS_XATTR
+#define H_JFS_XATTR
+
+/*
+ * jfs_ea_list describe the on-disk format of the extended attributes.
+ * I know the null-terminator is redundant since namelen is stored, but
+ * I am maintaining compatibility with OS/2 where possible.
+ */
+struct jfs_ea {
+	u8 flag;	/* Unused? */
+	u8 namelen;	/* Length of name */
+	__le16 valuelen;	/* Length of value */
+	char name[0];	/* Attribute name (includes null-terminator) */
+};			/* Value immediately follows name */
+
+struct jfs_ea_list {
+	__le32 size;		/* overall size */
+	struct jfs_ea ea[0];	/* Variable length list */
+};
+
+/* Macros for defining maxiumum number of bytes supported for EAs */
+#define MAXEASIZE	65535
+#define MAXEALISTSIZE	MAXEASIZE
+
+/*
+ * some macros for dealing with variable length EA lists.
+ */
+#define EA_SIZE(ea) \
+	(sizeof (struct jfs_ea) + (ea)->namelen + 1 + \
+	 le16_to_cpu((ea)->valuelen))
+#define	NEXT_EA(ea) ((struct jfs_ea *) (((char *) (ea)) + (EA_SIZE (ea))))
+#define	FIRST_EA(ealist) ((ealist)->ea)
+#define	EALIST_SIZE(ealist) le32_to_cpu((ealist)->size)
+#define	END_EALIST(ealist) \
+	((struct jfs_ea *) (((char *) (ealist)) + EALIST_SIZE(ealist)))
+
+extern int __jfs_setxattr(struct inode *, const char *, const void *, size_t,
+			  int);
+extern int jfs_setxattr(struct dentry *, const char *, const void *, size_t,
+			int);
+extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
+extern ssize_t jfs_getxattr(struct dentry *, const char *, void *, size_t);
+extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
+extern int jfs_removexattr(struct dentry *, const char *);
+
+#endif	/* H_JFS_XATTR */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
new file mode 100644
index 0000000..11c58c5
--- /dev/null
+++ b/fs/jfs/jfs_xtree.c
@@ -0,0 +1,4485 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ *      jfs_xtree.c: extent allocation descriptor B+-tree manager
+ */
+
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_dmap.h"
+#include "jfs_dinode.h"
+#include "jfs_superblock.h"
+#include "jfs_debug.h"
+
+/*
+ * xtree local flag
+ */
+#define XT_INSERT       0x00000001
+
+/*
+ *       xtree key/entry comparison: extent offset
+ *
+ * return:
+ *      -1: k < start of extent
+ *       0: start_of_extent <= k <= end_of_extent
+ *       1: k > end_of_extent
+ */
+#define XT_CMP(CMP, K, X, OFFSET64)\
+{\
+        OFFSET64 = offsetXAD(X);\
+        (CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
+              ((K) < OFFSET64) ? -1 : 0;\
+}
+
+/* write a xad entry */
+#define XT_PUTENTRY(XAD, FLAG, OFF, LEN, ADDR)\
+{\
+        (XAD)->flag = (FLAG);\
+        XADoffset((XAD), (OFF));\
+        XADlength((XAD), (LEN));\
+        XADaddress((XAD), (ADDR));\
+}
+
+#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
+
+/* get page buffer for specified block address */
+/* ToDo: Replace this ugly macro with a function */
+#define XT_GETPAGE(IP, BN, MP, SIZE, P, RC)\
+{\
+	BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot)\
+	if (!(RC))\
+	{\
+		if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) ||\
+		    (le16_to_cpu((P)->header.nextindex) > le16_to_cpu((P)->header.maxentry)) ||\
+		    (le16_to_cpu((P)->header.maxentry) > (((BN)==0)?XTROOTMAXSLOT:PSIZE>>L2XTSLOTSIZE)))\
+		{\
+			jfs_error((IP)->i_sb, "XT_GETPAGE: xtree page corrupt");\
+			BT_PUTPAGE(MP);\
+			MP = NULL;\
+			RC = -EIO;\
+		}\
+        }\
+}
+
+/* for consistency */
+#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
+
+#define XT_GETSEARCH(IP, LEAF, BN, MP,  P, INDEX) \
+	BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot)
+/* xtree entry parameter descriptor */
+struct xtsplit {
+	struct metapage *mp;
+	s16 index;
+	u8 flag;
+	s64 off;
+	s64 addr;
+	int len;
+	struct pxdlist *pxdlist;
+};
+
+
+/*
+ *      statistics
+ */
+#ifdef CONFIG_JFS_STATISTICS
+static struct {
+	uint search;
+	uint fastSearch;
+	uint split;
+} xtStat;
+#endif
+
+
+/*
+ * forward references
+ */
+static int xtSearch(struct inode *ip,
+		    s64 xoff, int *cmpp, struct btstack * btstack, int flag);
+
+static int xtSplitUp(tid_t tid,
+		     struct inode *ip,
+		     struct xtsplit * split, struct btstack * btstack);
+
+static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
+		       struct metapage ** rmpp, s64 * rbnp);
+
+static int xtSplitRoot(tid_t tid, struct inode *ip,
+		       struct xtsplit * split, struct metapage ** rmpp);
+
+#ifdef _STILL_TO_PORT
+static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
+		      xtpage_t * fp, struct btstack * btstack);
+
+static int xtSearchNode(struct inode *ip,
+			xad_t * xad,
+			int *cmpp, struct btstack * btstack, int flag);
+
+static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
+#endif				/*  _STILL_TO_PORT */
+
+/* External references */
+
+/*
+ *      debug control
+ */
+/*      #define _JFS_DEBUG_XTREE        1 */
+
+
+/*
+ *      xtLookup()
+ *
+ * function: map a single page into a physical extent;
+ */
+int xtLookup(struct inode *ip, s64 lstart,
+	     s64 llen, int *pflag, s64 * paddr, s32 * plen, int no_check)
+{
+	int rc = 0;
+	struct btstack btstack;
+	int cmp;
+	s64 bn;
+	struct metapage *mp;
+	xtpage_t *p;
+	int index;
+	xad_t *xad;
+	s64 size, xoff, xend;
+	int xlen;
+	s64 xaddr;
+
+	*plen = 0;
+
+	if (!no_check) {
+		/* is lookup offset beyond eof ? */
+		size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
+		    JFS_SBI(ip->i_sb)->l2bsize;
+		if (lstart >= size) {
+			jfs_err("xtLookup: lstart (0x%lx) >= size (0x%lx)",
+				(ulong) lstart, (ulong) size);
+			return 0;
+		}
+	}
+
+	/*
+	 * search for the xad entry covering the logical extent
+	 */
+//search:
+	if ((rc = xtSearch(ip, lstart, &cmp, &btstack, 0))) {
+		jfs_err("xtLookup: xtSearch returned %d", rc);
+		return rc;
+	}
+
+	/*
+	 *      compute the physical extent covering logical extent
+	 *
+	 * N.B. search may have failed (e.g., hole in sparse file),
+	 * and returned the index of the next entry.
+	 */
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	/* is xad found covering start of logical extent ?
+	 * lstart is a page start address,
+	 * i.e., lstart cannot start in a hole;
+	 */
+	if (cmp)
+		goto out;
+
+	/*
+	 * lxd covered by xad
+	 */
+	xad = &p->xad[index];
+	xoff = offsetXAD(xad);
+	xlen = lengthXAD(xad);
+	xend = xoff + xlen;
+	xaddr = addressXAD(xad);
+
+	/* initialize new pxd */
+	*pflag = xad->flag;
+	*paddr = xaddr + (lstart - xoff);
+	/* a page must be fully covered by an xad */
+	*plen = min(xend - lstart, llen);
+
+      out:
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+
+
+/*
+ *      xtLookupList()
+ *
+ * function: map a single logical extent into a list of physical extent;
+ *
+ * parameter:
+ *      struct inode    *ip,
+ *      struct lxdlist  *lxdlist,       lxd list (in)
+ *      struct xadlist  *xadlist,       xad list (in/out)
+ *      int		flag)
+ *
+ * coverage of lxd by xad under assumption of
+ * . lxd's are ordered and disjoint.
+ * . xad's are ordered and disjoint.
+ *
+ * return:
+ *      0:      success
+ *
+ * note: a page being written (even a single byte) is backed fully,
+ *      except the last page which is only backed with blocks
+ *      required to cover the last byte;
+ *      the extent backing a page is fully contained within an xad;
+ */
+int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
+		 struct xadlist * xadlist, int flag)
+{
+	int rc = 0;
+	struct btstack btstack;
+	int cmp;
+	s64 bn;
+	struct metapage *mp;
+	xtpage_t *p;
+	int index;
+	lxd_t *lxd;
+	xad_t *xad, *pxd;
+	s64 size, lstart, lend, xstart, xend, pstart;
+	s64 llen, xlen, plen;
+	s64 xaddr, paddr;
+	int nlxd, npxd, maxnpxd;
+
+	npxd = xadlist->nxad = 0;
+	maxnpxd = xadlist->maxnxad;
+	pxd = xadlist->xad;
+
+	nlxd = lxdlist->nlxd;
+	lxd = lxdlist->lxd;
+
+	lstart = offsetLXD(lxd);
+	llen = lengthLXD(lxd);
+	lend = lstart + llen;
+
+	size = (ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
+	    JFS_SBI(ip->i_sb)->l2bsize;
+
+	/*
+	 * search for the xad entry covering the logical extent
+	 */
+      search:
+	if (lstart >= size)
+		return 0;
+
+	if ((rc = xtSearch(ip, lstart, &cmp, &btstack, 0)))
+		return rc;
+
+	/*
+	 *      compute the physical extent covering logical extent
+	 *
+	 * N.B. search may have failed (e.g., hole in sparse file),
+	 * and returned the index of the next entry.
+	 */
+//map:
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	/* is xad on the next sibling page ? */
+	if (index == le16_to_cpu(p->header.nextindex)) {
+		if (p->header.flag & BT_ROOT)
+			goto mapend;
+
+		if ((bn = le64_to_cpu(p->header.next)) == 0)
+			goto mapend;
+
+		XT_PUTPAGE(mp);
+
+		/* get next sibling page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		index = XTENTRYSTART;
+	}
+
+	xad = &p->xad[index];
+
+	/*
+	 * is lxd covered by xad ?
+	 */
+      compare:
+	xstart = offsetXAD(xad);
+	xlen = lengthXAD(xad);
+	xend = xstart + xlen;
+	xaddr = addressXAD(xad);
+
+      compare1:
+	if (xstart < lstart)
+		goto compare2;
+
+	/* (lstart <= xstart) */
+
+	/* lxd is NOT covered by xad */
+	if (lend <= xstart) {
+		/*
+		 * get next lxd
+		 */
+		if (--nlxd == 0)
+			goto mapend;
+		lxd++;
+
+		lstart = offsetLXD(lxd);
+		llen = lengthLXD(lxd);
+		lend = lstart + llen;
+		if (lstart >= size)
+			goto mapend;
+
+		/* compare with the current xad  */
+		goto compare1;
+	}
+	/* lxd is covered by xad */
+	else {			/* (xstart < lend) */
+
+		/* initialize new pxd */
+		pstart = xstart;
+		plen = min(lend - xstart, xlen);
+		paddr = xaddr;
+
+		goto cover;
+	}
+
+	/* (xstart < lstart) */
+      compare2:
+	/* lxd is covered by xad */
+	if (lstart < xend) {
+		/* initialize new pxd */
+		pstart = lstart;
+		plen = min(xend - lstart, llen);
+		paddr = xaddr + (lstart - xstart);
+
+		goto cover;
+	}
+	/* lxd is NOT covered by xad */
+	else {			/* (xend <= lstart) */
+
+		/*
+		 * get next xad
+		 *
+		 * linear search next xad covering lxd on
+		 * the current xad page, and then tree search
+		 */
+		if (index == le16_to_cpu(p->header.nextindex) - 1) {
+			if (p->header.flag & BT_ROOT)
+				goto mapend;
+
+			XT_PUTPAGE(mp);
+			goto search;
+		} else {
+			index++;
+			xad++;
+
+			/* compare with new xad */
+			goto compare;
+		}
+	}
+
+	/*
+	 * lxd is covered by xad and a new pxd has been initialized
+	 * (lstart <= xstart < lend) or (xstart < lstart < xend)
+	 */
+      cover:
+	/* finalize pxd corresponding to current xad */
+	XT_PUTENTRY(pxd, xad->flag, pstart, plen, paddr);
+
+	if (++npxd >= maxnpxd)
+		goto mapend;
+	pxd++;
+
+	/*
+	 * lxd is fully covered by xad
+	 */
+	if (lend <= xend) {
+		/*
+		 * get next lxd
+		 */
+		if (--nlxd == 0)
+			goto mapend;
+		lxd++;
+
+		lstart = offsetLXD(lxd);
+		llen = lengthLXD(lxd);
+		lend = lstart + llen;
+		if (lstart >= size)
+			goto mapend;
+
+		/*
+		 * test for old xad covering new lxd
+		 * (old xstart < new lstart)
+		 */
+		goto compare2;
+	}
+	/*
+	 * lxd is partially covered by xad
+	 */
+	else {			/* (xend < lend)  */
+
+		/*
+		 * get next xad
+		 *
+		 * linear search next xad covering lxd on
+		 * the current xad page, and then next xad page search
+		 */
+		if (index == le16_to_cpu(p->header.nextindex) - 1) {
+			if (p->header.flag & BT_ROOT)
+				goto mapend;
+
+			if ((bn = le64_to_cpu(p->header.next)) == 0)
+				goto mapend;
+
+			XT_PUTPAGE(mp);
+
+			/* get next sibling page */
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return rc;
+
+			index = XTENTRYSTART;
+			xad = &p->xad[index];
+		} else {
+			index++;
+			xad++;
+		}
+
+		/*
+		 * test for new xad covering old lxd
+		 * (old lstart < new xstart)
+		 */
+		goto compare;
+	}
+
+      mapend:
+	xadlist->nxad = npxd;
+
+//out:
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+
+
+/*
+ *      xtSearch()
+ *
+ * function:    search for the xad entry covering specified offset.
+ *
+ * parameters:
+ *      ip      - file object;
+ *      xoff    - extent offset;
+ *      cmpp    - comparison result:
+ *      btstack - traverse stack;
+ *      flag    - search process flag (XT_INSERT);
+ *
+ * returns:
+ *      btstack contains (bn, index) of search path traversed to the entry.
+ *      *cmpp is set to result of comparison with the entry returned.
+ *      the page containing the entry is pinned at exit.
+ */
+static int xtSearch(struct inode *ip, s64 xoff,	/* offset of extent */
+		    int *cmpp, struct btstack * btstack, int flag)
+{
+	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+	int rc = 0;
+	int cmp = 1;		/* init for empty page */
+	s64 bn;			/* block number */
+	struct metapage *mp;	/* page buffer */
+	xtpage_t *p;		/* page */
+	xad_t *xad;
+	int base, index, lim, btindex;
+	struct btframe *btsp;
+	int nsplit = 0;		/* number of pages to split */
+	s64 t64;
+
+	INCREMENT(xtStat.search);
+
+	BT_CLR(btstack);
+
+	btstack->nsplit = 0;
+
+	/*
+	 *      search down tree from root:
+	 *
+	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
+	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
+	 *
+	 * if entry with search key K is not found
+	 * internal page search find the entry with largest key Ki
+	 * less than K which point to the child page to search;
+	 * leaf page search find the entry with smallest key Kj
+	 * greater than K so that the returned index is the position of
+	 * the entry to be shifted right for insertion of new entry.
+	 * for empty tree, search key is greater than any key of the tree.
+	 *
+	 * by convention, root bn = 0.
+	 */
+	for (bn = 0;;) {
+		/* get/pin the page to search */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/* try sequential access heuristics with the previous
+		 * access entry in target leaf page:
+		 * once search narrowed down into the target leaf,
+		 * key must either match an entry in the leaf or
+		 * key entry does not exist in the tree;
+		 */
+//fastSearch:
+		if ((jfs_ip->btorder & BT_SEQUENTIAL) &&
+		    (p->header.flag & BT_LEAF) &&
+		    (index = jfs_ip->btindex) <
+		    le16_to_cpu(p->header.nextindex)) {
+			xad = &p->xad[index];
+			t64 = offsetXAD(xad);
+			if (xoff < t64 + lengthXAD(xad)) {
+				if (xoff >= t64) {
+					*cmpp = 0;
+					goto out;
+				}
+
+				/* stop sequential access heuristics */
+				goto binarySearch;
+			} else {	/* (t64 + lengthXAD(xad)) <= xoff */
+
+				/* try next sequential entry */
+				index++;
+				if (index <
+				    le16_to_cpu(p->header.nextindex)) {
+					xad++;
+					t64 = offsetXAD(xad);
+					if (xoff < t64 + lengthXAD(xad)) {
+						if (xoff >= t64) {
+							*cmpp = 0;
+							goto out;
+						}
+
+						/* miss: key falls between
+						 * previous and this entry
+						 */
+						*cmpp = 1;
+						goto out;
+					}
+
+					/* (xoff >= t64 + lengthXAD(xad));
+					 * matching entry may be further out:
+					 * stop heuristic search
+					 */
+					/* stop sequential access heuristics */
+					goto binarySearch;
+				}
+
+				/* (index == p->header.nextindex);
+				 * miss: key entry does not exist in
+				 * the target leaf/tree
+				 */
+				*cmpp = 1;
+				goto out;
+			}
+
+			/*
+			 * if hit, return index of the entry found, and
+			 * if miss, where new entry with search key is
+			 * to be inserted;
+			 */
+		      out:
+			/* compute number of pages to split */
+			if (flag & XT_INSERT) {
+				if (p->header.nextindex ==	/* little-endian */
+				    p->header.maxentry)
+					nsplit++;
+				else
+					nsplit = 0;
+				btstack->nsplit = nsplit;
+			}
+
+			/* save search result */
+			btsp = btstack->top;
+			btsp->bn = bn;
+			btsp->index = index;
+			btsp->mp = mp;
+
+			/* update sequential access heuristics */
+			jfs_ip->btindex = index;
+
+			INCREMENT(xtStat.fastSearch);
+			return 0;
+		}
+
+		/* well, ... full search now */
+	      binarySearch:
+		lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
+
+		/*
+		 * binary search with search key K on the current page
+		 */
+		for (base = XTENTRYSTART; lim; lim >>= 1) {
+			index = base + (lim >> 1);
+
+			XT_CMP(cmp, xoff, &p->xad[index], t64);
+			if (cmp == 0) {
+				/*
+				 *      search hit
+				 */
+				/* search hit - leaf page:
+				 * return the entry found
+				 */
+				if (p->header.flag & BT_LEAF) {
+					*cmpp = cmp;
+
+					/* compute number of pages to split */
+					if (flag & XT_INSERT) {
+						if (p->header.nextindex ==
+						    p->header.maxentry)
+							nsplit++;
+						else
+							nsplit = 0;
+						btstack->nsplit = nsplit;
+					}
+
+					/* save search result */
+					btsp = btstack->top;
+					btsp->bn = bn;
+					btsp->index = index;
+					btsp->mp = mp;
+
+					/* init sequential access heuristics */
+					btindex = jfs_ip->btindex;
+					if (index == btindex ||
+					    index == btindex + 1)
+						jfs_ip->btorder = BT_SEQUENTIAL;
+					else
+						jfs_ip->btorder = BT_RANDOM;
+					jfs_ip->btindex = index;
+
+					return 0;
+				}
+
+				/* search hit - internal page:
+				 * descend/search its child page
+				 */
+				goto next;
+			}
+
+			if (cmp > 0) {
+				base = index + 1;
+				--lim;
+			}
+		}
+
+		/*
+		 *      search miss
+		 *
+		 * base is the smallest index with key (Kj) greater than
+		 * search key (K) and may be zero or maxentry index.
+		 */
+		/*
+		 * search miss - leaf page:
+		 *
+		 * return location of entry (base) where new entry with
+		 * search key K is to be inserted.
+		 */
+		if (p->header.flag & BT_LEAF) {
+			*cmpp = cmp;
+
+			/* compute number of pages to split */
+			if (flag & XT_INSERT) {
+				if (p->header.nextindex ==
+				    p->header.maxentry)
+					nsplit++;
+				else
+					nsplit = 0;
+				btstack->nsplit = nsplit;
+			}
+
+			/* save search result */
+			btsp = btstack->top;
+			btsp->bn = bn;
+			btsp->index = base;
+			btsp->mp = mp;
+
+			/* init sequential access heuristics */
+			btindex = jfs_ip->btindex;
+			if (base == btindex || base == btindex + 1)
+				jfs_ip->btorder = BT_SEQUENTIAL;
+			else
+				jfs_ip->btorder = BT_RANDOM;
+			jfs_ip->btindex = base;
+
+			return 0;
+		}
+
+		/*
+		 * search miss - non-leaf page:
+		 *
+		 * if base is non-zero, decrement base by one to get the parent
+		 * entry of the child page to search.
+		 */
+		index = base ? base - 1 : base;
+
+		/*
+		 * go down to child page
+		 */
+	      next:
+		/* update number of pages to split */
+		if (p->header.nextindex == p->header.maxentry)
+			nsplit++;
+		else
+			nsplit = 0;
+
+		/* push (bn, index) of the parent page/entry */
+		BT_PUSH(btstack, bn, index);
+
+		/* get the child page block number */
+		bn = addressXAD(&p->xad[index]);
+
+		/* unpin the parent page */
+		XT_PUTPAGE(mp);
+	}
+}
+
+/*
+ *      xtInsert()
+ *
+ * function:
+ *
+ * parameter:
+ *      tid     - transaction id;
+ *      ip      - file object;
+ *      xflag   - extent flag (XAD_NOTRECORDED):
+ *      xoff    - extent offset;
+ *      xlen    - extent length;
+ *      xaddrp  - extent address pointer (in/out):
+ *              if (*xaddrp)
+ *                      caller allocated data extent at *xaddrp;
+ *              else
+ *                      allocate data extent and return its xaddr;
+ *      flag    -
+ *
+ * return:
+ */
+int xtInsert(tid_t tid,		/* transaction id */
+	     struct inode *ip, int xflag, s64 xoff, s32 xlen, s64 * xaddrp,
+	     int flag)
+{
+	int rc = 0;
+	s64 xaddr, hint;
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* base B+-tree index page */
+	s64 bn;
+	int index, nextindex;
+	struct btstack btstack;	/* traverse stack */
+	struct xtsplit split;	/* split information */
+	xad_t *xad;
+	int cmp;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+
+	jfs_info("xtInsert: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
+
+	/*
+	 *      search for the entry location at which to insert:
+	 *
+	 * xtFastSearch() and xtSearch() both returns (leaf page
+	 * pinned, index at which to insert).
+	 * n.b. xtSearch() may return index of maxentry of
+	 * the full page.
+	 */
+	if ((rc = xtSearch(ip, xoff, &cmp, &btstack, XT_INSERT)))
+		return rc;
+
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	/* This test must follow XT_GETSEARCH since mp must be valid if
+	 * we branch to out: */
+	if (cmp == 0) {
+		rc = -EEXIST;
+		goto out;
+	}
+
+	/*
+	 * allocate data extent requested
+	 *
+	 * allocation hint: last xad
+	 */
+	if ((xaddr = *xaddrp) == 0) {
+		if (index > XTENTRYSTART) {
+			xad = &p->xad[index - 1];
+			hint = addressXAD(xad) + lengthXAD(xad) - 1;
+		} else
+			hint = 0;
+		if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
+			goto out;
+		if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
+			DQUOT_FREE_BLOCK(ip, xlen);
+			goto out;
+		}
+	}
+
+	/*
+	 *      insert entry for new extent
+	 */
+	xflag |= XAD_NEW;
+
+	/*
+	 *      if the leaf page is full, split the page and
+	 *      propagate up the router entry for the new page from split
+	 *
+	 * The xtSplitUp() will insert the entry and unpin the leaf page.
+	 */
+	nextindex = le16_to_cpu(p->header.nextindex);
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+		split.mp = mp;
+		split.index = index;
+		split.flag = xflag;
+		split.off = xoff;
+		split.len = xlen;
+		split.addr = xaddr;
+		split.pxdlist = NULL;
+		if ((rc = xtSplitUp(tid, ip, &split, &btstack))) {
+			/* undo data extent allocation */
+			if (*xaddrp == 0) {
+				dbFree(ip, xaddr, (s64) xlen);
+				DQUOT_FREE_BLOCK(ip, xlen);
+			}
+			return rc;
+		}
+
+		*xaddrp = xaddr;
+		return 0;
+	}
+
+	/*
+	 *      insert the new entry into the leaf page
+	 */
+	/*
+	 * acquire a transaction lock on the leaf page;
+	 *
+	 * action: xad insertion/extension;
+	 */
+	BT_MARK_DIRTY(mp, ip);
+
+	/* if insert into middle, shift right remaining entries. */
+	if (index < nextindex)
+		memmove(&p->xad[index + 1], &p->xad[index],
+			(nextindex - index) * sizeof(xad_t));
+
+	/* insert the new entry: mark the entry NEW */
+	xad = &p->xad[index];
+	XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
+
+	/* advance next available entry index */
+	p->header.nextindex =
+	    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+
+	/* Don't log it if there are no links to the file */
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+		xtlck = (struct xtlock *) & tlck->lock;
+		xtlck->lwm.offset =
+		    (xtlck->lwm.offset) ? min(index,
+					      (int)xtlck->lwm.offset) : index;
+		xtlck->lwm.length =
+		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
+	}
+
+	*xaddrp = xaddr;
+
+      out:
+	/* unpin the leaf page */
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+
+
+/*
+ *      xtSplitUp()
+ *
+ * function:
+ *      split full pages as propagating insertion up the tree
+ *
+ * parameter:
+ *      tid     - transaction id;
+ *      ip      - file object;
+ *      split   - entry parameter descriptor;
+ *      btstack - traverse stack from xtSearch()
+ *
+ * return:
+ */
+static int
+xtSplitUp(tid_t tid,
+	  struct inode *ip, struct xtsplit * split, struct btstack * btstack)
+{
+	int rc = 0;
+	struct metapage *smp;
+	xtpage_t *sp;		/* split page */
+	struct metapage *rmp;
+	s64 rbn;		/* new right page block number */
+	struct metapage *rcmp;
+	xtpage_t *rcp;		/* right child page */
+	s64 rcbn;		/* right child page block number */
+	int skip;		/* index of entry of insertion */
+	int nextindex;		/* next available entry index of p */
+	struct btframe *parent;	/* parent page entry on traverse stack */
+	xad_t *xad;
+	s64 xaddr;
+	int xlen;
+	int nsplit;		/* number of pages split */
+	struct pxdlist pxdlist;
+	pxd_t *pxd;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+
+	smp = split->mp;
+	sp = XT_PAGE(ip, smp);
+
+	/* is inode xtree root extension/inline EA area free ? */
+	if ((sp->header.flag & BT_ROOT) && (!S_ISDIR(ip->i_mode)) &&
+	    (le16_to_cpu(sp->header.maxentry) < XTROOTMAXSLOT) &&
+	    (JFS_IP(ip)->mode2 & INLINEEA)) {
+		sp->header.maxentry = cpu_to_le16(XTROOTMAXSLOT);
+		JFS_IP(ip)->mode2 &= ~INLINEEA;
+
+		BT_MARK_DIRTY(smp, ip);
+		/*
+		 * acquire a transaction lock on the leaf page;
+		 *
+		 * action: xad insertion/extension;
+		 */
+
+		/* if insert into middle, shift right remaining entries. */
+		skip = split->index;
+		nextindex = le16_to_cpu(sp->header.nextindex);
+		if (skip < nextindex)
+			memmove(&sp->xad[skip + 1], &sp->xad[skip],
+				(nextindex - skip) * sizeof(xad_t));
+
+		/* insert the new entry: mark the entry NEW */
+		xad = &sp->xad[skip];
+		XT_PUTENTRY(xad, split->flag, split->off, split->len,
+			    split->addr);
+
+		/* advance next available entry index */
+		sp->header.nextindex =
+		    cpu_to_le16(le16_to_cpu(sp->header.nextindex) + 1);
+
+		/* Don't log it if there are no links to the file */
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
+			xtlck = (struct xtlock *) & tlck->lock;
+			xtlck->lwm.offset = (xtlck->lwm.offset) ?
+			    min(skip, (int)xtlck->lwm.offset) : skip;
+			xtlck->lwm.length =
+			    le16_to_cpu(sp->header.nextindex) -
+			    xtlck->lwm.offset;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * allocate new index blocks to cover index page split(s)
+	 *
+	 * allocation hint: ?
+	 */
+	if (split->pxdlist == NULL) {
+		nsplit = btstack->nsplit;
+		split->pxdlist = &pxdlist;
+		pxdlist.maxnpxd = pxdlist.npxd = 0;
+		pxd = &pxdlist.pxd[0];
+		xlen = JFS_SBI(ip->i_sb)->nbperpage;
+		for (; nsplit > 0; nsplit--, pxd++) {
+			if ((rc = dbAlloc(ip, (s64) 0, (s64) xlen, &xaddr))
+			    == 0) {
+				PXDaddress(pxd, xaddr);
+				PXDlength(pxd, xlen);
+
+				pxdlist.maxnpxd++;
+
+				continue;
+			}
+
+			/* undo allocation */
+
+			XT_PUTPAGE(smp);
+			return rc;
+		}
+	}
+
+	/*
+	 * Split leaf page <sp> into <sp> and a new right page <rp>.
+	 *
+	 * The split routines insert the new entry into the leaf page,
+	 * and acquire txLock as appropriate.
+	 * return <rp> pinned and its block number <rpbn>.
+	 */
+	rc = (sp->header.flag & BT_ROOT) ?
+	    xtSplitRoot(tid, ip, split, &rmp) :
+	    xtSplitPage(tid, ip, split, &rmp, &rbn);
+
+	XT_PUTPAGE(smp);
+
+	if (rc)
+		return -EIO;
+	/*
+	 * propagate up the router entry for the leaf page just split
+	 *
+	 * insert a router entry for the new page into the parent page,
+	 * propagate the insert/split up the tree by walking back the stack
+	 * of (bn of parent page, index of child page entry in parent page)
+	 * that were traversed during the search for the page that split.
+	 *
+	 * the propagation of insert/split up the tree stops if the root
+	 * splits or the page inserted into doesn't have to split to hold
+	 * the new entry.
+	 *
+	 * the parent entry for the split page remains the same, and
+	 * a new entry is inserted at its right with the first key and
+	 * block number of the new right page.
+	 *
+	 * There are a maximum of 3 pages pinned at any time:
+	 * right child, left parent and right parent (when the parent splits)
+	 * to keep the child page pinned while working on the parent.
+	 * make sure that all pins are released at exit.
+	 */
+	while ((parent = BT_POP(btstack)) != NULL) {
+		/* parent page specified by stack frame <parent> */
+
+		/* keep current child pages <rcp> pinned */
+		rcmp = rmp;
+		rcbn = rbn;
+		rcp = XT_PAGE(ip, rcmp);
+
+		/*
+		 * insert router entry in parent for new right child page <rp>
+		 */
+		/* get/pin the parent page <sp> */
+		XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
+		if (rc) {
+			XT_PUTPAGE(rcmp);
+			return rc;
+		}
+
+		/*
+		 * The new key entry goes ONE AFTER the index of parent entry,
+		 * because the split was to the right.
+		 */
+		skip = parent->index + 1;
+
+		/*
+		 * split or shift right remaining entries of the parent page
+		 */
+		nextindex = le16_to_cpu(sp->header.nextindex);
+		/*
+		 * parent page is full - split the parent page
+		 */
+		if (nextindex == le16_to_cpu(sp->header.maxentry)) {
+			/* init for parent page split */
+			split->mp = smp;
+			split->index = skip;	/* index at insert */
+			split->flag = XAD_NEW;
+			split->off = offsetXAD(&rcp->xad[XTENTRYSTART]);
+			split->len = JFS_SBI(ip->i_sb)->nbperpage;
+			split->addr = rcbn;
+
+			/* unpin previous right child page */
+			XT_PUTPAGE(rcmp);
+
+			/* The split routines insert the new entry,
+			 * and acquire txLock as appropriate.
+			 * return <rp> pinned and its block number <rpbn>.
+			 */
+			rc = (sp->header.flag & BT_ROOT) ?
+			    xtSplitRoot(tid, ip, split, &rmp) :
+			    xtSplitPage(tid, ip, split, &rmp, &rbn);
+			if (rc) {
+				XT_PUTPAGE(smp);
+				return rc;
+			}
+
+			XT_PUTPAGE(smp);
+			/* keep new child page <rp> pinned */
+		}
+		/*
+		 * parent page is not full - insert in parent page
+		 */
+		else {
+			/*
+			 * insert router entry in parent for the right child
+			 * page from the first entry of the right child page:
+			 */
+			/*
+			 * acquire a transaction lock on the parent page;
+			 *
+			 * action: router xad insertion;
+			 */
+			BT_MARK_DIRTY(smp, ip);
+
+			/*
+			 * if insert into middle, shift right remaining entries
+			 */
+			if (skip < nextindex)
+				memmove(&sp->xad[skip + 1], &sp->xad[skip],
+					(nextindex -
+					 skip) << L2XTSLOTSIZE);
+
+			/* insert the router entry */
+			xad = &sp->xad[skip];
+			XT_PUTENTRY(xad, XAD_NEW,
+				    offsetXAD(&rcp->xad[XTENTRYSTART]),
+				    JFS_SBI(ip->i_sb)->nbperpage, rcbn);
+
+			/* advance next available entry index. */
+			sp->header.nextindex =
+			    cpu_to_le16(le16_to_cpu(sp->header.nextindex) +
+					1);
+
+			/* Don't log it if there are no links to the file */
+			if (!test_cflag(COMMIT_Nolink, ip)) {
+				tlck = txLock(tid, ip, smp,
+					      tlckXTREE | tlckGROW);
+				xtlck = (struct xtlock *) & tlck->lock;
+				xtlck->lwm.offset = (xtlck->lwm.offset) ?
+				    min(skip, (int)xtlck->lwm.offset) : skip;
+				xtlck->lwm.length =
+				    le16_to_cpu(sp->header.nextindex) -
+				    xtlck->lwm.offset;
+			}
+
+			/* unpin parent page */
+			XT_PUTPAGE(smp);
+
+			/* exit propagate up */
+			break;
+		}
+	}
+
+	/* unpin current right page */
+	XT_PUTPAGE(rmp);
+
+	return 0;
+}
+
+
+/*
+ *      xtSplitPage()
+ *
+ * function:
+ *      split a full non-root page into
+ *      original/split/left page and new right page
+ *      i.e., the original/split page remains as left page.
+ *
+ * parameter:
+ *      int		tid,
+ *      struct inode    *ip,
+ *      struct xtsplit  *split,
+ *      struct metapage	**rmpp,
+ *      u64		*rbnp,
+ *
+ * return:
+ *      Pointer to page in which to insert or NULL on error.
+ */
+static int
+xtSplitPage(tid_t tid, struct inode *ip,
+	    struct xtsplit * split, struct metapage ** rmpp, s64 * rbnp)
+{
+	int rc = 0;
+	struct metapage *smp;
+	xtpage_t *sp;
+	struct metapage *rmp;
+	xtpage_t *rp;		/* new right page allocated */
+	s64 rbn;		/* new right page block number */
+	struct metapage *mp;
+	xtpage_t *p;
+	s64 nextbn;
+	int skip, maxentry, middle, righthalf, n;
+	xad_t *xad;
+	struct pxdlist *pxdlist;
+	pxd_t *pxd;
+	struct tlock *tlck;
+	struct xtlock *sxtlck = NULL, *rxtlck = NULL;
+	int quota_allocation = 0;
+
+	smp = split->mp;
+	sp = XT_PAGE(ip, smp);
+
+	INCREMENT(xtStat.split);
+
+	pxdlist = split->pxdlist;
+	pxd = &pxdlist->pxd[pxdlist->npxd];
+	pxdlist->npxd++;
+	rbn = addressPXD(pxd);
+
+	/* Allocate blocks to quota. */
+       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+	       rc = -EDQUOT;
+	       goto clean_up;
+	}
+
+	quota_allocation += lengthPXD(pxd);
+
+	/*
+	 * allocate the new right page for the split
+	 */
+	rmp = get_metapage(ip, rbn, PSIZE, 1);
+	if (rmp == NULL) {
+		rc = -EIO;
+		goto clean_up;
+	}
+
+	jfs_info("xtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
+
+	BT_MARK_DIRTY(rmp, ip);
+	/*
+	 * action: new page;
+	 */
+
+	rp = (xtpage_t *) rmp->data;
+	rp->header.self = *pxd;
+	rp->header.flag = sp->header.flag & BT_TYPE;
+	rp->header.maxentry = sp->header.maxentry;	/* little-endian */
+	rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
+
+	BT_MARK_DIRTY(smp, ip);
+	/* Don't log it if there are no links to the file */
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		/*
+		 * acquire a transaction lock on the new right page;
+		 */
+		tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
+		rxtlck = (struct xtlock *) & tlck->lock;
+		rxtlck->lwm.offset = XTENTRYSTART;
+		/*
+		 * acquire a transaction lock on the split page
+		 */
+		tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
+		sxtlck = (struct xtlock *) & tlck->lock;
+	}
+
+	/*
+	 * initialize/update sibling pointers of <sp> and <rp>
+	 */
+	nextbn = le64_to_cpu(sp->header.next);
+	rp->header.next = cpu_to_le64(nextbn);
+	rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
+	sp->header.next = cpu_to_le64(rbn);
+
+	skip = split->index;
+
+	/*
+	 *      sequential append at tail (after last entry of last page)
+	 *
+	 * if splitting the last page on a level because of appending
+	 * a entry to it (skip is maxentry), it's likely that the access is
+	 * sequential. adding an empty page on the side of the level is less
+	 * work and can push the fill factor much higher than normal.
+	 * if we're wrong it's no big deal -  we will do the split the right
+	 * way next time.
+	 * (it may look like it's equally easy to do a similar hack for
+	 * reverse sorted data, that is, split the tree left, but it's not.
+	 * Be my guest.)
+	 */
+	if (nextbn == 0 && skip == le16_to_cpu(sp->header.maxentry)) {
+		/*
+		 * acquire a transaction lock on the new/right page;
+		 *
+		 * action: xad insertion;
+		 */
+		/* insert entry at the first entry of the new right page */
+		xad = &rp->xad[XTENTRYSTART];
+		XT_PUTENTRY(xad, split->flag, split->off, split->len,
+			    split->addr);
+
+		rp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1);
+
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			/* rxtlck->lwm.offset = XTENTRYSTART; */
+			rxtlck->lwm.length = 1;
+		}
+
+		*rmpp = rmp;
+		*rbnp = rbn;
+
+		jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
+		return 0;
+	}
+
+	/*
+	 *      non-sequential insert (at possibly middle page)
+	 */
+
+	/*
+	 * update previous pointer of old next/right page of <sp>
+	 */
+	if (nextbn != 0) {
+		XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
+		if (rc) {
+			XT_PUTPAGE(rmp);
+			goto clean_up;
+		}
+
+		BT_MARK_DIRTY(mp, ip);
+		/*
+		 * acquire a transaction lock on the next page;
+		 *
+		 * action:sibling pointer update;
+		 */
+		if (!test_cflag(COMMIT_Nolink, ip))
+			tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
+
+		p->header.prev = cpu_to_le64(rbn);
+
+		/* sibling page may have been updated previously, or
+		 * it may be updated later;
+		 */
+
+		XT_PUTPAGE(mp);
+	}
+
+	/*
+	 * split the data between the split and new/right pages
+	 */
+	maxentry = le16_to_cpu(sp->header.maxentry);
+	middle = maxentry >> 1;
+	righthalf = maxentry - middle;
+
+	/*
+	 * skip index in old split/left page - insert into left page:
+	 */
+	if (skip <= middle) {
+		/* move right half of split page to the new right page */
+		memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
+			righthalf << L2XTSLOTSIZE);
+
+		/* shift right tail of left half to make room for new entry */
+		if (skip < middle)
+			memmove(&sp->xad[skip + 1], &sp->xad[skip],
+				(middle - skip) << L2XTSLOTSIZE);
+
+		/* insert new entry */
+		xad = &sp->xad[skip];
+		XT_PUTENTRY(xad, split->flag, split->off, split->len,
+			    split->addr);
+
+		/* update page header */
+		sp->header.nextindex = cpu_to_le16(middle + 1);
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			sxtlck->lwm.offset = (sxtlck->lwm.offset) ?
+			    min(skip, (int)sxtlck->lwm.offset) : skip;
+		}
+
+		rp->header.nextindex =
+		    cpu_to_le16(XTENTRYSTART + righthalf);
+	}
+	/*
+	 * skip index in new right page - insert into right page:
+	 */
+	else {
+		/* move left head of right half to right page */
+		n = skip - middle;
+		memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
+			n << L2XTSLOTSIZE);
+
+		/* insert new entry */
+		n += XTENTRYSTART;
+		xad = &rp->xad[n];
+		XT_PUTENTRY(xad, split->flag, split->off, split->len,
+			    split->addr);
+
+		/* move right tail of right half to right page */
+		if (skip < maxentry)
+			memmove(&rp->xad[n + 1], &sp->xad[skip],
+				(maxentry - skip) << L2XTSLOTSIZE);
+
+		/* update page header */
+		sp->header.nextindex = cpu_to_le16(middle);
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			sxtlck->lwm.offset = (sxtlck->lwm.offset) ?
+			    min(middle, (int)sxtlck->lwm.offset) : middle;
+		}
+
+		rp->header.nextindex = cpu_to_le16(XTENTRYSTART +
+						   righthalf + 1);
+	}
+
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		sxtlck->lwm.length = le16_to_cpu(sp->header.nextindex) -
+		    sxtlck->lwm.offset;
+
+		/* rxtlck->lwm.offset = XTENTRYSTART; */
+		rxtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
+		    XTENTRYSTART;
+	}
+
+	*rmpp = rmp;
+	*rbnp = rbn;
+
+	jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
+	return rc;
+
+      clean_up:
+
+	/* Rollback quota allocation. */
+	if (quota_allocation)
+		DQUOT_FREE_BLOCK(ip, quota_allocation);
+
+	return (rc);
+}
+
+
+/*
+ *      xtSplitRoot()
+ *
+ * function:
+ *      split the full root page into
+ *      original/root/split page and new right page
+ *      i.e., root remains fixed in tree anchor (inode) and
+ *      the root is copied to a single new right child page
+ *      since root page << non-root page, and
+ *      the split root page contains a single entry for the
+ *      new right child page.
+ *
+ * parameter:
+ *      int		tid,
+ *      struct inode    *ip,
+ *      struct xtsplit  *split,
+ *      struct metapage	**rmpp)
+ *
+ * return:
+ *      Pointer to page in which to insert or NULL on error.
+ */
+static int
+xtSplitRoot(tid_t tid,
+	    struct inode *ip, struct xtsplit * split, struct metapage ** rmpp)
+{
+	xtpage_t *sp;
+	struct metapage *rmp;
+	xtpage_t *rp;
+	s64 rbn;
+	int skip, nextindex;
+	xad_t *xad;
+	pxd_t *pxd;
+	struct pxdlist *pxdlist;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+
+	sp = &JFS_IP(ip)->i_xtroot;
+
+	INCREMENT(xtStat.split);
+
+	/*
+	 *      allocate a single (right) child page
+	 */
+	pxdlist = split->pxdlist;
+	pxd = &pxdlist->pxd[pxdlist->npxd];
+	pxdlist->npxd++;
+	rbn = addressPXD(pxd);
+	rmp = get_metapage(ip, rbn, PSIZE, 1);
+	if (rmp == NULL)
+		return -EIO;
+
+	/* Allocate blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+		release_metapage(rmp);
+		return -EDQUOT;
+	}
+
+	jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
+
+	/*
+	 * acquire a transaction lock on the new right page;
+	 *
+	 * action: new page;
+	 */
+	BT_MARK_DIRTY(rmp, ip);
+
+	rp = (xtpage_t *) rmp->data;
+	rp->header.flag =
+	    (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
+	rp->header.self = *pxd;
+	rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
+	rp->header.maxentry = cpu_to_le16(PSIZE >> L2XTSLOTSIZE);
+
+	/* initialize sibling pointers */
+	rp->header.next = 0;
+	rp->header.prev = 0;
+
+	/*
+	 * copy the in-line root page into new right page extent
+	 */
+	nextindex = le16_to_cpu(sp->header.maxentry);
+	memmove(&rp->xad[XTENTRYSTART], &sp->xad[XTENTRYSTART],
+		(nextindex - XTENTRYSTART) << L2XTSLOTSIZE);
+
+	/*
+	 * insert the new entry into the new right/child page
+	 * (skip index in the new right page will not change)
+	 */
+	skip = split->index;
+	/* if insert into middle, shift right remaining entries */
+	if (skip != nextindex)
+		memmove(&rp->xad[skip + 1], &rp->xad[skip],
+			(nextindex - skip) * sizeof(xad_t));
+
+	xad = &rp->xad[skip];
+	XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr);
+
+	/* update page header */
+	rp->header.nextindex = cpu_to_le16(nextindex + 1);
+
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
+		xtlck = (struct xtlock *) & tlck->lock;
+		xtlck->lwm.offset = XTENTRYSTART;
+		xtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
+		    XTENTRYSTART;
+	}
+
+	/*
+	 *      reset the root
+	 *
+	 * init root with the single entry for the new right page
+	 * set the 1st entry offset to 0, which force the left-most key
+	 * at any level of the tree to be less than any search key.
+	 */
+	/*
+	 * acquire a transaction lock on the root page (in-memory inode);
+	 *
+	 * action: root split;
+	 */
+	BT_MARK_DIRTY(split->mp, ip);
+
+	xad = &sp->xad[XTENTRYSTART];
+	XT_PUTENTRY(xad, XAD_NEW, 0, JFS_SBI(ip->i_sb)->nbperpage, rbn);
+
+	/* update page header of root */
+	sp->header.flag &= ~BT_LEAF;
+	sp->header.flag |= BT_INTERNAL;
+
+	sp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1);
+
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, split->mp, tlckXTREE | tlckGROW);
+		xtlck = (struct xtlock *) & tlck->lock;
+		xtlck->lwm.offset = XTENTRYSTART;
+		xtlck->lwm.length = 1;
+	}
+
+	*rmpp = rmp;
+
+	jfs_info("xtSplitRoot: sp:0x%p rp:0x%p", sp, rp);
+	return 0;
+}
+
+
+/*
+ *      xtExtend()
+ *
+ * function: extend in-place;
+ *
+ * note: existing extent may or may not have been committed.
+ * caller is responsible for pager buffer cache update, and
+ * working block allocation map update;
+ * update pmap: alloc whole extended extent;
+ */
+int xtExtend(tid_t tid,		/* transaction id */
+	     struct inode *ip, s64 xoff,	/* delta extent offset */
+	     s32 xlen,		/* delta extent length */
+	     int flag)
+{
+	int rc = 0;
+	int cmp;
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* base B+-tree index page */
+	s64 bn;
+	int index, nextindex, len;
+	struct btstack btstack;	/* traverse stack */
+	struct xtsplit split;	/* split information */
+	xad_t *xad;
+	s64 xaddr;
+	struct tlock *tlck;
+	struct xtlock *xtlck = NULL;
+
+	jfs_info("xtExtend: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
+
+	/* there must exist extent to be extended */
+	if ((rc = xtSearch(ip, xoff - 1, &cmp, &btstack, XT_INSERT)))
+		return rc;
+
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	if (cmp != 0) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb, "xtExtend: xtSearch did not find extent");
+		return -EIO;
+	}
+
+	/* extension must be contiguous */
+	xad = &p->xad[index];
+	if ((offsetXAD(xad) + lengthXAD(xad)) != xoff) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb, "xtExtend: extension is not contiguous");
+		return -EIO;
+	}
+
+	/*
+	 * acquire a transaction lock on the leaf page;
+	 *
+	 * action: xad insertion/extension;
+	 */
+	BT_MARK_DIRTY(mp, ip);
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+		xtlck = (struct xtlock *) & tlck->lock;
+	}
+
+	/* extend will overflow extent ? */
+	xlen = lengthXAD(xad) + xlen;
+	if ((len = xlen - MAXXLEN) <= 0)
+		goto extendOld;
+
+	/*
+	 *      extent overflow: insert entry for new extent
+	 */
+//insertNew:
+	xoff = offsetXAD(xad) + MAXXLEN;
+	xaddr = addressXAD(xad) + MAXXLEN;
+	nextindex = le16_to_cpu(p->header.nextindex);
+
+	/*
+	 *      if the leaf page is full, insert the new entry and
+	 *      propagate up the router entry for the new page from split
+	 *
+	 * The xtSplitUp() will insert the entry and unpin the leaf page.
+	 */
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+		/* xtSpliUp() unpins leaf pages */
+		split.mp = mp;
+		split.index = index + 1;
+		split.flag = XAD_NEW;
+		split.off = xoff;	/* split offset */
+		split.len = len;
+		split.addr = xaddr;
+		split.pxdlist = NULL;
+		if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
+			return rc;
+
+		/* get back old page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+		/*
+		 * if leaf root has been split, original root has been
+		 * copied to new child page, i.e., original entry now
+		 * resides on the new child page;
+		 */
+		if (p->header.flag & BT_INTERNAL) {
+			ASSERT(p->header.nextindex ==
+			       cpu_to_le16(XTENTRYSTART + 1));
+			xad = &p->xad[XTENTRYSTART];
+			bn = addressXAD(xad);
+			XT_PUTPAGE(mp);
+
+			/* get new child page */
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return rc;
+
+			BT_MARK_DIRTY(mp, ip);
+			if (!test_cflag(COMMIT_Nolink, ip)) {
+				tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
+				xtlck = (struct xtlock *) & tlck->lock;
+			}
+		}
+	}
+	/*
+	 *      insert the new entry into the leaf page
+	 */
+	else {
+		/* insert the new entry: mark the entry NEW */
+		xad = &p->xad[index + 1];
+		XT_PUTENTRY(xad, XAD_NEW, xoff, len, xaddr);
+
+		/* advance next available entry index */
+		p->header.nextindex =
+		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	}
+
+	/* get back old entry */
+	xad = &p->xad[index];
+	xlen = MAXXLEN;
+
+	/*
+	 * extend old extent
+	 */
+      extendOld:
+	XADlength(xad, xlen);
+	if (!(xad->flag & XAD_NEW))
+		xad->flag |= XAD_EXTENDED;
+
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		xtlck->lwm.offset =
+		    (xtlck->lwm.offset) ? min(index,
+					      (int)xtlck->lwm.offset) : index;
+		xtlck->lwm.length =
+		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
+	}
+
+	/* unpin the leaf page */
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+
+#ifdef _NOTYET
+/*
+ *      xtTailgate()
+ *
+ * function: split existing 'tail' extent
+ *      (split offset >= start offset of tail extent), and
+ *      relocate and extend the split tail half;
+ *
+ * note: existing extent may or may not have been committed.
+ * caller is responsible for pager buffer cache update, and
+ * working block allocation map update;
+ * update pmap: free old split tail extent, alloc new extent;
+ */
+int xtTailgate(tid_t tid,		/* transaction id */
+	       struct inode *ip, s64 xoff,	/* split/new extent offset */
+	       s32 xlen,	/* new extent length */
+	       s64 xaddr,	/* new extent address */
+	       int flag)
+{
+	int rc = 0;
+	int cmp;
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* base B+-tree index page */
+	s64 bn;
+	int index, nextindex, llen, rlen;
+	struct btstack btstack;	/* traverse stack */
+	struct xtsplit split;	/* split information */
+	xad_t *xad;
+	struct tlock *tlck;
+	struct xtlock *xtlck = 0;
+	struct tlock *mtlck;
+	struct maplock *pxdlock;
+
+/*
+printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
+        (ulong)xoff, xlen, (ulong)xaddr);
+*/
+
+	/* there must exist extent to be tailgated */
+	if ((rc = xtSearch(ip, xoff, &cmp, &btstack, XT_INSERT)))
+		return rc;
+
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	if (cmp != 0) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb, "xtTailgate: couldn't find extent");
+		return -EIO;
+	}
+
+	/* entry found must be last entry */
+	nextindex = le16_to_cpu(p->header.nextindex);
+	if (index != nextindex - 1) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb,
+			  "xtTailgate: the entry found is not the last entry");
+		return -EIO;
+	}
+
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire tlock of the leaf page containing original entry
+	 */
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+		xtlck = (struct xtlock *) & tlck->lock;
+	}
+
+	/* completely replace extent ? */
+	xad = &p->xad[index];
+/*
+printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
+        (ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
+*/
+	if ((llen = xoff - offsetXAD(xad)) == 0)
+		goto updateOld;
+
+	/*
+	 *      partially replace extent: insert entry for new extent
+	 */
+//insertNew:
+	/*
+	 *      if the leaf page is full, insert the new entry and
+	 *      propagate up the router entry for the new page from split
+	 *
+	 * The xtSplitUp() will insert the entry and unpin the leaf page.
+	 */
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+		/* xtSpliUp() unpins leaf pages */
+		split.mp = mp;
+		split.index = index + 1;
+		split.flag = XAD_NEW;
+		split.off = xoff;	/* split offset */
+		split.len = xlen;
+		split.addr = xaddr;
+		split.pxdlist = NULL;
+		if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
+			return rc;
+
+		/* get back old page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+		/*
+		 * if leaf root has been split, original root has been
+		 * copied to new child page, i.e., original entry now
+		 * resides on the new child page;
+		 */
+		if (p->header.flag & BT_INTERNAL) {
+			ASSERT(p->header.nextindex ==
+			       cpu_to_le16(XTENTRYSTART + 1));
+			xad = &p->xad[XTENTRYSTART];
+			bn = addressXAD(xad);
+			XT_PUTPAGE(mp);
+
+			/* get new child page */
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return rc;
+
+			BT_MARK_DIRTY(mp, ip);
+			if (!test_cflag(COMMIT_Nolink, ip)) {
+				tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
+				xtlck = (struct xtlock *) & tlck->lock;
+			}
+		}
+	}
+	/*
+	 *      insert the new entry into the leaf page
+	 */
+	else {
+		/* insert the new entry: mark the entry NEW */
+		xad = &p->xad[index + 1];
+		XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
+
+		/* advance next available entry index */
+		p->header.nextindex =
+		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	}
+
+	/* get back old XAD */
+	xad = &p->xad[index];
+
+	/*
+	 * truncate/relocate old extent at split offset
+	 */
+      updateOld:
+	/* update dmap for old/committed/truncated extent */
+	rlen = lengthXAD(xad) - llen;
+	if (!(xad->flag & XAD_NEW)) {
+		/* free from PWMAP at commit */
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			mtlck = txMaplock(tid, ip, tlckMAP);
+			pxdlock = (struct maplock *) & mtlck->lock;
+			pxdlock->flag = mlckFREEPXD;
+			PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen);
+			PXDlength(&pxdlock->pxd, rlen);
+			pxdlock->index = 1;
+		}
+	} else
+		/* free from WMAP */
+		dbFree(ip, addressXAD(xad) + llen, (s64) rlen);
+
+	if (llen)
+		/* truncate */
+		XADlength(xad, llen);
+	else
+		/* replace */
+		XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
+
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		xtlck->lwm.offset = (xtlck->lwm.offset) ?
+		    min(index, (int)xtlck->lwm.offset) : index;
+		xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
+		    xtlck->lwm.offset;
+	}
+
+	/* unpin the leaf page */
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+#endif /* _NOTYET */
+
+/*
+ *      xtUpdate()
+ *
+ * function: update XAD;
+ *
+ *      update extent for allocated_but_not_recorded or
+ *      compressed extent;
+ *
+ * parameter:
+ *      nxad    - new XAD;
+ *                logical extent of the specified XAD must be completely
+ *                contained by an existing XAD;
+ */
+int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
+{				/* new XAD */
+	int rc = 0;
+	int cmp;
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* base B+-tree index page */
+	s64 bn;
+	int index0, index, newindex, nextindex;
+	struct btstack btstack;	/* traverse stack */
+	struct xtsplit split;	/* split information */
+	xad_t *xad, *lxad, *rxad;
+	int xflag;
+	s64 nxoff, xoff;
+	int nxlen, xlen, lxlen, rxlen;
+	s64 nxaddr, xaddr;
+	struct tlock *tlck;
+	struct xtlock *xtlck = NULL;
+	int newpage = 0;
+
+	/* there must exist extent to be tailgated */
+	nxoff = offsetXAD(nxad);
+	nxlen = lengthXAD(nxad);
+	nxaddr = addressXAD(nxad);
+
+	if ((rc = xtSearch(ip, nxoff, &cmp, &btstack, XT_INSERT)))
+		return rc;
+
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
+
+	if (cmp != 0) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb, "xtUpdate: Could not find extent");
+		return -EIO;
+	}
+
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire tlock of the leaf page containing original entry
+	 */
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+		xtlck = (struct xtlock *) & tlck->lock;
+	}
+
+	xad = &p->xad[index0];
+	xflag = xad->flag;
+	xoff = offsetXAD(xad);
+	xlen = lengthXAD(xad);
+	xaddr = addressXAD(xad);
+
+	/* nXAD must be completely contained within XAD */
+	if ((xoff > nxoff) ||
+	    (nxoff + nxlen > xoff + xlen)) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb,
+			  "xtUpdate: nXAD in not completely contained within XAD");
+		return -EIO;
+	}
+
+	index = index0;
+	newindex = index + 1;
+	nextindex = le16_to_cpu(p->header.nextindex);
+
+#ifdef  _JFS_WIP_NOCOALESCE
+	if (xoff < nxoff)
+		goto updateRight;
+
+	/*
+	 * replace XAD with nXAD
+	 */
+      replace:			/* (nxoff == xoff) */
+	if (nxlen == xlen) {
+		/* replace XAD with nXAD:recorded */
+		*xad = *nxad;
+		xad->flag = xflag & ~XAD_NOTRECORDED;
+
+		goto out;
+	} else			/* (nxlen < xlen) */
+		goto updateLeft;
+#endif				/* _JFS_WIP_NOCOALESCE */
+
+/* #ifdef _JFS_WIP_COALESCE */
+	if (xoff < nxoff)
+		goto coalesceRight;
+
+	/*
+	 * coalesce with left XAD
+	 */
+//coalesceLeft: /* (xoff == nxoff) */
+	/* is XAD first entry of page ? */
+	if (index == XTENTRYSTART)
+		goto replace;
+
+	/* is nXAD logically and physically contiguous with lXAD ? */
+	lxad = &p->xad[index - 1];
+	lxlen = lengthXAD(lxad);
+	if (!(lxad->flag & XAD_NOTRECORDED) &&
+	    (nxoff == offsetXAD(lxad) + lxlen) &&
+	    (nxaddr == addressXAD(lxad) + lxlen) &&
+	    (lxlen + nxlen < MAXXLEN)) {
+		/* extend right lXAD */
+		index0 = index - 1;
+		XADlength(lxad, lxlen + nxlen);
+
+		/* If we just merged two extents together, need to make sure the
+		 * right extent gets logged.  If the left one is marked XAD_NEW,
+		 * then we know it will be logged.  Otherwise, mark as
+		 * XAD_EXTENDED
+		 */
+		if (!(lxad->flag & XAD_NEW))
+			lxad->flag |= XAD_EXTENDED;
+
+		if (xlen > nxlen) {
+			/* truncate XAD */
+			XADoffset(xad, xoff + nxlen);
+			XADlength(xad, xlen - nxlen);
+			XADaddress(xad, xaddr + nxlen);
+			goto out;
+		} else {	/* (xlen == nxlen) */
+
+			/* remove XAD */
+			if (index < nextindex - 1)
+				memmove(&p->xad[index], &p->xad[index + 1],
+					(nextindex - index -
+					 1) << L2XTSLOTSIZE);
+
+			p->header.nextindex =
+			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
+					1);
+
+			index = index0;
+			newindex = index + 1;
+			nextindex = le16_to_cpu(p->header.nextindex);
+			xoff = nxoff = offsetXAD(lxad);
+			xlen = nxlen = lxlen + nxlen;
+			xaddr = nxaddr = addressXAD(lxad);
+			goto coalesceRight;
+		}
+	}
+
+	/*
+	 * replace XAD with nXAD
+	 */
+      replace:			/* (nxoff == xoff) */
+	if (nxlen == xlen) {
+		/* replace XAD with nXAD:recorded */
+		*xad = *nxad;
+		xad->flag = xflag & ~XAD_NOTRECORDED;
+
+		goto coalesceRight;
+	} else			/* (nxlen < xlen) */
+		goto updateLeft;
+
+	/*
+	 * coalesce with right XAD
+	 */
+      coalesceRight:		/* (xoff <= nxoff) */
+	/* is XAD last entry of page ? */
+	if (newindex == nextindex) {
+		if (xoff == nxoff)
+			goto out;
+		goto updateRight;
+	}
+
+	/* is nXAD logically and physically contiguous with rXAD ? */
+	rxad = &p->xad[index + 1];
+	rxlen = lengthXAD(rxad);
+	if (!(rxad->flag & XAD_NOTRECORDED) &&
+	    (nxoff + nxlen == offsetXAD(rxad)) &&
+	    (nxaddr + nxlen == addressXAD(rxad)) &&
+	    (rxlen + nxlen < MAXXLEN)) {
+		/* extend left rXAD */
+		XADoffset(rxad, nxoff);
+		XADlength(rxad, rxlen + nxlen);
+		XADaddress(rxad, nxaddr);
+
+		/* If we just merged two extents together, need to make sure
+		 * the left extent gets logged.  If the right one is marked
+		 * XAD_NEW, then we know it will be logged.  Otherwise, mark as
+		 * XAD_EXTENDED
+		 */
+		if (!(rxad->flag & XAD_NEW))
+			rxad->flag |= XAD_EXTENDED;
+
+		if (xlen > nxlen)
+			/* truncate XAD */
+			XADlength(xad, xlen - nxlen);
+		else {		/* (xlen == nxlen) */
+
+			/* remove XAD */
+			memmove(&p->xad[index], &p->xad[index + 1],
+				(nextindex - index - 1) << L2XTSLOTSIZE);
+
+			p->header.nextindex =
+			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
+					1);
+		}
+
+		goto out;
+	} else if (xoff == nxoff)
+		goto out;
+
+	if (xoff >= nxoff) {
+		XT_PUTPAGE(mp);
+		jfs_error(ip->i_sb, "xtUpdate: xoff >= nxoff");
+		return -EIO;
+	}
+/* #endif _JFS_WIP_COALESCE */
+
+	/*
+	 * split XAD into (lXAD, nXAD):
+	 *
+	 *          |---nXAD--->
+	 * --|----------XAD----------|--
+	 *   |-lXAD-|
+	 */
+      updateRight:		/* (xoff < nxoff) */
+	/* truncate old XAD as lXAD:not_recorded */
+	xad = &p->xad[index];
+	XADlength(xad, nxoff - xoff);
+
+	/* insert nXAD:recorded */
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+
+		/* xtSpliUp() unpins leaf pages */
+		split.mp = mp;
+		split.index = newindex;
+		split.flag = xflag & ~XAD_NOTRECORDED;
+		split.off = nxoff;
+		split.len = nxlen;
+		split.addr = nxaddr;
+		split.pxdlist = NULL;
+		if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
+			return rc;
+
+		/* get back old page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+		/*
+		 * if leaf root has been split, original root has been
+		 * copied to new child page, i.e., original entry now
+		 * resides on the new child page;
+		 */
+		if (p->header.flag & BT_INTERNAL) {
+			ASSERT(p->header.nextindex ==
+			       cpu_to_le16(XTENTRYSTART + 1));
+			xad = &p->xad[XTENTRYSTART];
+			bn = addressXAD(xad);
+			XT_PUTPAGE(mp);
+
+			/* get new child page */
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return rc;
+
+			BT_MARK_DIRTY(mp, ip);
+			if (!test_cflag(COMMIT_Nolink, ip)) {
+				tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
+				xtlck = (struct xtlock *) & tlck->lock;
+			}
+		} else {
+			/* is nXAD on new page ? */
+			if (newindex >
+			    (le16_to_cpu(p->header.maxentry) >> 1)) {
+				newindex =
+				    newindex -
+				    le16_to_cpu(p->header.nextindex) +
+				    XTENTRYSTART;
+				newpage = 1;
+			}
+		}
+	} else {
+		/* if insert into middle, shift right remaining entries */
+		if (newindex < nextindex)
+			memmove(&p->xad[newindex + 1], &p->xad[newindex],
+				(nextindex - newindex) << L2XTSLOTSIZE);
+
+		/* insert the entry */
+		xad = &p->xad[newindex];
+		*xad = *nxad;
+		xad->flag = xflag & ~XAD_NOTRECORDED;
+
+		/* advance next available entry index. */
+		p->header.nextindex =
+		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	}
+
+	/*
+	 * does nXAD force 3-way split ?
+	 *
+	 *          |---nXAD--->|
+	 * --|----------XAD-------------|--
+	 *   |-lXAD-|           |-rXAD -|
+	 */
+	if (nxoff + nxlen == xoff + xlen)
+		goto out;
+
+	/* reorient nXAD as XAD for further split XAD into (nXAD, rXAD) */
+	if (newpage) {
+		/* close out old page */
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			xtlck->lwm.offset = (xtlck->lwm.offset) ?
+			    min(index0, (int)xtlck->lwm.offset) : index0;
+			xtlck->lwm.length =
+			    le16_to_cpu(p->header.nextindex) -
+			    xtlck->lwm.offset;
+		}
+
+		bn = le64_to_cpu(p->header.next);
+		XT_PUTPAGE(mp);
+
+		/* get new right page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		BT_MARK_DIRTY(mp, ip);
+		if (!test_cflag(COMMIT_Nolink, ip)) {
+			tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+			xtlck = (struct xtlock *) & tlck->lock;
+		}
+
+		index0 = index = newindex;
+	} else
+		index++;
+
+	newindex = index + 1;
+	nextindex = le16_to_cpu(p->header.nextindex);
+	xlen = xlen - (nxoff - xoff);
+	xoff = nxoff;
+	xaddr = nxaddr;
+
+	/* recompute split pages */
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+		XT_PUTPAGE(mp);
+
+		if ((rc = xtSearch(ip, nxoff, &cmp, &btstack, XT_INSERT)))
+			return rc;
+
+		/* retrieve search result */
+		XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
+
+		if (cmp != 0) {
+			XT_PUTPAGE(mp);
+			jfs_error(ip->i_sb, "xtUpdate: xtSearch failed");
+			return -EIO;
+		}
+
+		if (index0 != index) {
+			XT_PUTPAGE(mp);
+			jfs_error(ip->i_sb,
+				  "xtUpdate: unexpected value of index");
+			return -EIO;
+		}
+	}
+
+	/*
+	 * split XAD into (nXAD, rXAD)
+	 *
+	 *          ---nXAD---|
+	 * --|----------XAD----------|--
+	 *                    |-rXAD-|
+	 */
+      updateLeft:		/* (nxoff == xoff) && (nxlen < xlen) */
+	/* update old XAD with nXAD:recorded */
+	xad = &p->xad[index];
+	*xad = *nxad;
+	xad->flag = xflag & ~XAD_NOTRECORDED;
+
+	/* insert rXAD:not_recorded */
+	xoff = xoff + nxlen;
+	xlen = xlen - nxlen;
+	xaddr = xaddr + nxlen;
+	if (nextindex == le16_to_cpu(p->header.maxentry)) {
+/*
+printf("xtUpdate.updateLeft.split p:0x%p\n", p);
+*/
+		/* xtSpliUp() unpins leaf pages */
+		split.mp = mp;
+		split.index = newindex;
+		split.flag = xflag;
+		split.off = xoff;
+		split.len = xlen;
+		split.addr = xaddr;
+		split.pxdlist = NULL;
+		if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
+			return rc;
+
+		/* get back old page */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/*
+		 * if leaf root has been split, original root has been
+		 * copied to new child page, i.e., original entry now
+		 * resides on the new child page;
+		 */
+		if (p->header.flag & BT_INTERNAL) {
+			ASSERT(p->header.nextindex ==
+			       cpu_to_le16(XTENTRYSTART + 1));
+			xad = &p->xad[XTENTRYSTART];
+			bn = addressXAD(xad);
+			XT_PUTPAGE(mp);
+
+			/* get new child page */
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return rc;
+
+			BT_MARK_DIRTY(mp, ip);
+			if (!test_cflag(COMMIT_Nolink, ip)) {
+				tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
+				xtlck = (struct xtlock *) & tlck->lock;
+			}
+		}
+	} else {
+		/* if insert into middle, shift right remaining entries */
+		if (newindex < nextindex)
+			memmove(&p->xad[newindex + 1], &p->xad[newindex],
+				(nextindex - newindex) << L2XTSLOTSIZE);
+
+		/* insert the entry */
+		xad = &p->xad[newindex];
+		XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
+
+		/* advance next available entry index. */
+		p->header.nextindex =
+		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+	}
+
+      out:
+	if (!test_cflag(COMMIT_Nolink, ip)) {
+		xtlck->lwm.offset = (xtlck->lwm.offset) ?
+		    min(index0, (int)xtlck->lwm.offset) : index0;
+		xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
+		    xtlck->lwm.offset;
+	}
+
+	/* unpin the leaf page */
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+
+
+/*
+ *      xtAppend()
+ *
+ * function: grow in append mode from contiguous region specified ;
+ *
+ * parameter:
+ *      tid             - transaction id;
+ *      ip              - file object;
+ *      xflag           - extent flag:
+ *      xoff            - extent offset;
+ *      maxblocks       - max extent length;
+ *      xlen            - extent length (in/out);
+ *      xaddrp          - extent address pointer (in/out):
+ *      flag            -
+ *
+ * return:
+ */
+int xtAppend(tid_t tid,		/* transaction id */
+	     struct inode *ip, int xflag, s64 xoff, s32 maxblocks,	
+	     s32 * xlenp,	/* (in/out) */
+	     s64 * xaddrp,	/* (in/out) */
+	     int flag)
+{
+	int rc = 0;
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* base B+-tree index page */
+	s64 bn, xaddr;
+	int index, nextindex;
+	struct btstack btstack;	/* traverse stack */
+	struct xtsplit split;	/* split information */
+	xad_t *xad;
+	int cmp;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+	int nsplit, nblocks, xlen;
+	struct pxdlist pxdlist;
+	pxd_t *pxd;
+
+	xaddr = *xaddrp;
+	xlen = *xlenp;
+	jfs_info("xtAppend: xoff:0x%lx maxblocks:%d xlen:%d xaddr:0x%lx",
+		 (ulong) xoff, maxblocks, xlen, (ulong) xaddr);
+
+	/*
+	 *      search for the entry location at which to insert:
+	 *
+	 * xtFastSearch() and xtSearch() both returns (leaf page
+	 * pinned, index at which to insert).
+	 * n.b. xtSearch() may return index of maxentry of
+	 * the full page.
+	 */
+	if ((rc = xtSearch(ip, xoff, &cmp, &btstack, XT_INSERT)))
+		return rc;
+
+	/* retrieve search result */
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+	if (cmp == 0) {
+		rc = -EEXIST;
+		goto out;
+	}
+//insert:
+	/*
+	 *      insert entry for new extent
+	 */
+	xflag |= XAD_NEW;
+
+	/*
+	 *      if the leaf page is full, split the page and
+	 *      propagate up the router entry for the new page from split
+	 *
+	 * The xtSplitUp() will insert the entry and unpin the leaf page.
+	 */
+	nextindex = le16_to_cpu(p->header.nextindex);
+	if (nextindex < le16_to_cpu(p->header.maxentry))
+		goto insertLeaf;
+
+	/*
+	 * allocate new index blocks to cover index page split(s)
+	 */
+	nsplit = btstack.nsplit;
+	split.pxdlist = &pxdlist;
+	pxdlist.maxnpxd = pxdlist.npxd = 0;
+	pxd = &pxdlist.pxd[0];
+	nblocks = JFS_SBI(ip->i_sb)->nbperpage;
+	for (; nsplit > 0; nsplit--, pxd++, xaddr += nblocks, maxblocks -= nblocks) {	
+		if ((rc = dbAllocBottomUp(ip, xaddr, (s64) nblocks)) == 0) {
+			PXDaddress(pxd, xaddr);
+			PXDlength(pxd, nblocks);
+
+			pxdlist.maxnpxd++;
+
+			continue;
+		}
+
+		/* undo allocation */
+
+		goto out;
+	}
+
+	xlen = min(xlen, maxblocks);	
+
+	/*
+	 * allocate data extent requested
+	 */
+	if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen)))
+		goto out;
+
+	split.mp = mp;
+	split.index = index;
+	split.flag = xflag;
+	split.off = xoff;
+	split.len = xlen;
+	split.addr = xaddr;
+	if ((rc = xtSplitUp(tid, ip, &split, &btstack))) {
+		/* undo data extent allocation */
+		dbFree(ip, *xaddrp, (s64) * xlenp);
+
+		return rc;
+	}
+
+	*xaddrp = xaddr;
+	*xlenp = xlen;
+	return 0;
+
+	/*
+	 *      insert the new entry into the leaf page
+	 */
+      insertLeaf:
+	/*
+	 * allocate data extent requested
+	 */
+	if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen)))
+		goto out;
+
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire a transaction lock on the leaf page;
+	 *
+	 * action: xad insertion/extension;
+	 */
+	tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
+	xtlck = (struct xtlock *) & tlck->lock;
+
+	/* insert the new entry: mark the entry NEW */
+	xad = &p->xad[index];
+	XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
+
+	/* advance next available entry index */
+	p->header.nextindex =
+	    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
+
+	xtlck->lwm.offset =
+	    (xtlck->lwm.offset) ? min(index,(int) xtlck->lwm.offset) : index;
+	xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
+	    xtlck->lwm.offset;
+
+	*xaddrp = xaddr;
+	*xlenp = xlen;
+
+      out:
+	/* unpin the leaf page */
+	XT_PUTPAGE(mp);
+
+	return rc;
+}
+#ifdef _STILL_TO_PORT
+
+/* - TBD for defragmentaion/reorganization -
+ *
+ *      xtDelete()
+ *
+ * function:
+ *      delete the entry with the specified key.
+ *
+ *      N.B.: whole extent of the entry is assumed to be deleted.
+ *
+ * parameter:
+ *
+ * return:
+ *       ENOENT: if the entry is not found.
+ *
+ * exception:
+ */
+int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
+{
+	int rc = 0;
+	struct btstack btstack;
+	int cmp;
+	s64 bn;
+	struct metapage *mp;
+	xtpage_t *p;
+	int index, nextindex;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+
+	/*
+	 * find the matching entry; xtSearch() pins the page
+	 */
+	if ((rc = xtSearch(ip, xoff, &cmp, &btstack, 0)))
+		return rc;
+
+	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+	if (cmp) {
+		/* unpin the leaf page */
+		XT_PUTPAGE(mp);
+		return -ENOENT;
+	}
+
+	/*
+	 * delete the entry from the leaf page
+	 */
+	nextindex = le16_to_cpu(p->header.nextindex);
+	p->header.nextindex =
+	    cpu_to_le16(le16_to_cpu(p->header.nextindex) - 1);
+
+	/*
+	 * if the leaf page bocome empty, free the page
+	 */
+	if (p->header.nextindex == cpu_to_le16(XTENTRYSTART))
+		return (xtDeleteUp(tid, ip, mp, p, &btstack));
+
+	BT_MARK_DIRTY(mp, ip);
+	/*
+	 * acquire a transaction lock on the leaf page;
+	 *
+	 * action:xad deletion;
+	 */
+	tlck = txLock(tid, ip, mp, tlckXTREE);
+	xtlck = (struct xtlock *) & tlck->lock;
+	xtlck->lwm.offset =
+	    (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
+
+	/* if delete from middle, shift left/compact the remaining entries */
+	if (index < nextindex - 1)
+		memmove(&p->xad[index], &p->xad[index + 1],
+			(nextindex - index - 1) * sizeof(xad_t));
+
+	XT_PUTPAGE(mp);
+
+	return 0;
+}
+
+
+/* - TBD for defragmentaion/reorganization -
+ *
+ *      xtDeleteUp()
+ *
+ * function:
+ *      free empty pages as propagating deletion up the tree
+ *
+ * parameter:
+ *
+ * return:
+ */
+static int
+xtDeleteUp(tid_t tid, struct inode *ip,
+	   struct metapage * fmp, xtpage_t * fp, struct btstack * btstack)
+{
+	int rc = 0;
+	struct metapage *mp;
+	xtpage_t *p;
+	int index, nextindex;
+	s64 xaddr;
+	int xlen;
+	struct btframe *parent;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+
+	/*
+	 * keep root leaf page which has become empty
+	 */
+	if (fp->header.flag & BT_ROOT) {
+		/* keep the root page */
+		fp->header.flag &= ~BT_INTERNAL;
+		fp->header.flag |= BT_LEAF;
+		fp->header.nextindex = cpu_to_le16(XTENTRYSTART);
+
+		/* XT_PUTPAGE(fmp); */
+
+		return 0;
+	}
+
+	/*
+	 * free non-root leaf page
+	 */
+	if ((rc = xtRelink(tid, ip, fp))) {
+		XT_PUTPAGE(fmp);
+		return rc;
+	}
+
+	xaddr = addressPXD(&fp->header.self);
+	xlen = lengthPXD(&fp->header.self);
+	/* free the page extent */
+	dbFree(ip, xaddr, (s64) xlen);
+
+	/* free the buffer page */
+	discard_metapage(fmp);
+
+	/*
+	 * propagate page deletion up the index tree
+	 *
+	 * If the delete from the parent page makes it empty,
+	 * continue all the way up the tree.
+	 * stop if the root page is reached (which is never deleted) or
+	 * if the entry deletion does not empty the page.
+	 */
+	while ((parent = BT_POP(btstack)) != NULL) {
+		/* get/pin the parent page <sp> */
+		XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		index = parent->index;
+
+		/* delete the entry for the freed child page from parent.
+		 */
+		nextindex = le16_to_cpu(p->header.nextindex);
+
+		/*
+		 * the parent has the single entry being deleted:
+		 * free the parent page which has become empty.
+		 */
+		if (nextindex == 1) {
+			if (p->header.flag & BT_ROOT) {
+				/* keep the root page */
+				p->header.flag &= ~BT_INTERNAL;
+				p->header.flag |= BT_LEAF;
+				p->header.nextindex =
+				    cpu_to_le16(XTENTRYSTART);
+
+				/* XT_PUTPAGE(mp); */
+
+				break;
+			} else {
+				/* free the parent page */
+				if ((rc = xtRelink(tid, ip, p)))
+					return rc;
+
+				xaddr = addressPXD(&p->header.self);
+				/* free the page extent */
+				dbFree(ip, xaddr,
+				       (s64) JFS_SBI(ip->i_sb)->nbperpage);
+
+				/* unpin/free the buffer page */
+				discard_metapage(mp);
+
+				/* propagate up */
+				continue;
+			}
+		}
+		/*
+		 * the parent has other entries remaining:
+		 * delete the router entry from the parent page.
+		 */
+		else {
+			BT_MARK_DIRTY(mp, ip);
+			/*
+			 * acquire a transaction lock on the leaf page;
+			 *
+			 * action:xad deletion;
+			 */
+			tlck = txLock(tid, ip, mp, tlckXTREE);
+			xtlck = (struct xtlock *) & tlck->lock;
+			xtlck->lwm.offset =
+			    (xtlck->lwm.offset) ? min(index,
+						      xtlck->lwm.
+						      offset) : index;
+
+			/* if delete from middle,
+			 * shift left/compact the remaining entries in the page
+			 */
+			if (index < nextindex - 1)
+				memmove(&p->xad[index], &p->xad[index + 1],
+					(nextindex - index -
+					 1) << L2XTSLOTSIZE);
+
+			p->header.nextindex =
+			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
+					1);
+			jfs_info("xtDeleteUp(entry): 0x%lx[%d]",
+				 (ulong) parent->bn, index);
+		}
+
+		/* unpin the parent page */
+		XT_PUTPAGE(mp);
+
+		/* exit propagation up */
+		break;
+	}
+
+	return 0;
+}
+
+
+/*
+ * NAME:        xtRelocate()
+ *
+ * FUNCTION:    relocate xtpage or data extent of regular file;
+ *              This function is mainly used by defragfs utility.
+ *
+ * NOTE:        This routine does not have the logic to handle
+ *              uncommitted allocated extent. The caller should call
+ *              txCommit() to commit all the allocation before call
+ *              this routine.
+ */
+int
+xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad,	/* old XAD */
+	   s64 nxaddr,		/* new xaddr */
+	   int xtype)
+{				/* extent type: XTPAGE or DATAEXT */
+	int rc = 0;
+	struct tblock *tblk;
+	struct tlock *tlck;
+	struct xtlock *xtlck;
+	struct metapage *mp, *pmp, *lmp, *rmp;	/* meta-page buffer */
+	xtpage_t *p, *pp, *rp, *lp;	/* base B+-tree index page */
+	xad_t *xad;
+	pxd_t *pxd;
+	s64 xoff, xsize;
+	int xlen;
+	s64 oxaddr, sxaddr, dxaddr, nextbn, prevbn;
+	cbuf_t *cp;
+	s64 offset, nbytes, nbrd, pno;
+	int nb, npages, nblks;
+	s64 bn;
+	int cmp;
+	int index;
+	struct pxd_lock *pxdlock;
+	struct btstack btstack;	/* traverse stack */
+
+	xtype = xtype & EXTENT_TYPE;
+
+	xoff = offsetXAD(oxad);
+	oxaddr = addressXAD(oxad);
+	xlen = lengthXAD(oxad);
+
+	/* validate extent offset */
+	offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
+	if (offset >= ip->i_size)
+		return -ESTALE;	/* stale extent */
+
+	jfs_info("xtRelocate: xtype:%d xoff:0x%lx xlen:0x%x xaddr:0x%lx:0x%lx",
+		 xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
+
+	/*
+	 *      1. get and validate the parent xtpage/xad entry
+	 *      covering the source extent to be relocated;
+	 */
+	if (xtype == DATAEXT) {
+		/* search in leaf entry */
+		rc = xtSearch(ip, xoff, &cmp, &btstack, 0);
+		if (rc)
+			return rc;
+
+		/* retrieve search result */
+		XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
+
+		if (cmp) {
+			XT_PUTPAGE(pmp);
+			return -ESTALE;
+		}
+
+		/* validate for exact match with a single entry */
+		xad = &pp->xad[index];
+		if (addressXAD(xad) != oxaddr || lengthXAD(xad) != xlen) {
+			XT_PUTPAGE(pmp);
+			return -ESTALE;
+		}
+	} else {		/* (xtype == XTPAGE) */
+
+		/* search in internal entry */
+		rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0);
+		if (rc)
+			return rc;
+
+		/* retrieve search result */
+		XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
+
+		if (cmp) {
+			XT_PUTPAGE(pmp);
+			return -ESTALE;
+		}
+
+		/* xtSearchNode() validated for exact match with a single entry
+		 */
+		xad = &pp->xad[index];
+	}
+	jfs_info("xtRelocate: parent xad entry validated.");
+
+	/*
+	 *      2. relocate the extent
+	 */
+	if (xtype == DATAEXT) {
+		/* if the extent is allocated-but-not-recorded
+		 * there is no real data to be moved in this extent,
+		 */
+		if (xad->flag & XAD_NOTRECORDED)
+			goto out;
+		else
+			/* release xtpage for cmRead()/xtLookup() */
+			XT_PUTPAGE(pmp);
+
+		/*
+		 *      cmRelocate()
+		 *
+		 * copy target data pages to be relocated;
+		 *
+		 * data extent must start at page boundary and
+		 * multiple of page size (except the last data extent);
+		 * read in each page of the source data extent into cbuf,
+		 * update the cbuf extent descriptor of the page to be
+		 * homeward bound to new dst data extent
+		 * copy the data from the old extent to new extent.
+		 * copy is essential for compressed files to avoid problems
+		 * that can arise if there was a change in compression
+		 * algorithms.
+		 * it is a good strategy because it may disrupt cache
+		 * policy to keep the pages in memory afterwards.
+		 */
+		offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
+		assert((offset & CM_OFFSET) == 0);
+		nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize;
+		pno = offset >> CM_L2BSIZE;
+		npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
+/*
+                npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
+                         (offset >> CM_L2BSIZE) + 1;
+*/
+		sxaddr = oxaddr;
+		dxaddr = nxaddr;
+
+		/* process the request one cache buffer at a time */
+		for (nbrd = 0; nbrd < nbytes; nbrd += nb,
+		     offset += nb, pno++, npages--) {
+			/* compute page size */
+			nb = min(nbytes - nbrd, CM_BSIZE);
+
+			/* get the cache buffer of the page */
+			if (rc = cmRead(ip, offset, npages, &cp))
+				break;
+
+			assert(addressPXD(&cp->cm_pxd) == sxaddr);
+			assert(!cp->cm_modified);
+
+			/* bind buffer with the new extent address */
+			nblks = nb >> JFS_IP(ip->i_sb)->l2bsize;
+			cmSetXD(ip, cp, pno, dxaddr, nblks);
+
+			/* release the cbuf, mark it as modified */
+			cmPut(cp, TRUE);
+
+			dxaddr += nblks;
+			sxaddr += nblks;
+		}
+
+		/* get back parent page */
+		if ((rc = xtSearch(ip, xoff, &cmp, &btstack, 0)))
+			return rc;
+
+		XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
+		jfs_info("xtRelocate: target data extent relocated.");
+	} else {		/* (xtype  == XTPAGE) */
+
+		/*
+		 * read in the target xtpage from the source extent;
+		 */
+		XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
+		if (rc) {
+			XT_PUTPAGE(pmp);
+			return rc;
+		}
+
+		/*
+		 * read in sibling pages if any to update sibling pointers;
+		 */
+		rmp = NULL;
+		if (p->header.next) {
+			nextbn = le64_to_cpu(p->header.next);
+			XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
+			if (rc) {
+				XT_PUTPAGE(pmp);
+				XT_PUTPAGE(mp);
+				return (rc);
+			}
+		}
+
+		lmp = NULL;
+		if (p->header.prev) {
+			prevbn = le64_to_cpu(p->header.prev);
+			XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
+			if (rc) {
+				XT_PUTPAGE(pmp);
+				XT_PUTPAGE(mp);
+				if (rmp)
+					XT_PUTPAGE(rmp);
+				return (rc);
+			}
+		}
+
+		/* at this point, all xtpages to be updated are in memory */
+
+		/*
+		 * update sibling pointers of sibling xtpages if any;
+		 */
+		if (lmp) {
+			BT_MARK_DIRTY(lmp, ip);
+			tlck =
+			    txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
+			lp->header.next = cpu_to_le64(nxaddr);
+			XT_PUTPAGE(lmp);
+		}
+
+		if (rmp) {
+			BT_MARK_DIRTY(rmp, ip);
+			tlck =
+			    txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
+			rp->header.prev = cpu_to_le64(nxaddr);
+			XT_PUTPAGE(rmp);
+		}
+
+		/*
+		 * update the target xtpage to be relocated
+		 *
+		 * update the self address of the target page
+		 * and write to destination extent;
+		 * redo image covers the whole xtpage since it is new page
+		 * to the destination extent;
+		 * update of bmap for the free of source extent
+		 * of the target xtpage itself:
+		 * update of bmap for the allocation of destination extent
+		 * of the target xtpage itself:
+		 * update of bmap for the extents covered by xad entries in
+		 * the target xtpage is not necessary since they are not
+		 * updated;
+		 * if not committed before this relocation,
+		 * target page may contain XAD_NEW entries which must
+		 * be scanned for bmap update (logredo() always
+		 * scan xtpage REDOPAGE image for bmap update);
+		 * if committed before this relocation (tlckRELOCATE),
+		 * scan may be skipped by commit() and logredo();
+		 */
+		BT_MARK_DIRTY(mp, ip);
+		/* tlckNEW init  xtlck->lwm.offset = XTENTRYSTART; */
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
+		xtlck = (struct xtlock *) & tlck->lock;
+
+		/* update the self address in the xtpage header */
+		pxd = &p->header.self;
+		PXDaddress(pxd, nxaddr);
+
+		/* linelock for the after image of the whole page */
+		xtlck->lwm.length =
+		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
+
+		/* update the buffer extent descriptor of target xtpage */
+		xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
+		bmSetXD(mp, nxaddr, xsize);
+
+		/* unpin the target page to new homeward bound */
+		XT_PUTPAGE(mp);
+		jfs_info("xtRelocate: target xtpage relocated.");
+	}
+
+	/*
+	 *      3. acquire maplock for the source extent to be freed;
+	 *
+	 * acquire a maplock saving the src relocated extent address;
+	 * to free of the extent at commit time;
+	 */
+      out:
+	/* if DATAEXT relocation, write a LOG_UPDATEMAP record for
+	 * free PXD of the source data extent (logredo() will update
+	 * bmap for free of source data extent), and update bmap for
+	 * free of the source data extent;
+	 */
+	if (xtype == DATAEXT)
+		tlck = txMaplock(tid, ip, tlckMAP);
+	/* if XTPAGE relocation, write a LOG_NOREDOPAGE record
+	 * for the source xtpage (logredo() will init NoRedoPage
+	 * filter and will also update bmap for free of the source
+	 * xtpage), and update bmap for free of the source xtpage;
+	 * N.B. We use tlckMAP instead of tlkcXTREE because there
+	 *      is no buffer associated with this lock since the buffer
+	 *      has been redirected to the target location.
+	 */
+	else			/* (xtype  == XTPAGE) */
+		tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
+
+	pxdlock = (struct pxd_lock *) & tlck->lock;
+	pxdlock->flag = mlckFREEPXD;
+	PXDaddress(&pxdlock->pxd, oxaddr);
+	PXDlength(&pxdlock->pxd, xlen);
+	pxdlock->index = 1;
+
+	/*
+	 *      4. update the parent xad entry for relocation;
+	 *
+	 * acquire tlck for the parent entry with XAD_NEW as entry
+	 * update which will write LOG_REDOPAGE and update bmap for
+	 * allocation of XAD_NEW destination extent;
+	 */
+	jfs_info("xtRelocate: update parent xad entry.");
+	BT_MARK_DIRTY(pmp, ip);
+	tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW);
+	xtlck = (struct xtlock *) & tlck->lock;
+
+	/* update the XAD with the new destination extent; */
+	xad = &pp->xad[index];
+	xad->flag |= XAD_NEW;
+	XADaddress(xad, nxaddr);
+
+	xtlck->lwm.offset = min(index, xtlck->lwm.offset);
+	xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) -
+	    xtlck->lwm.offset;
+
+	/* unpin the parent xtpage */
+	XT_PUTPAGE(pmp);
+
+	return rc;
+}
+
+
+/*
+ *      xtSearchNode()
+ *
+ * function:    search for the internal xad entry covering specified extent.
+ *              This function is mainly used by defragfs utility.
+ *
+ * parameters:
+ *      ip      - file object;
+ *      xad     - extent to find;
+ *      cmpp    - comparison result:
+ *      btstack - traverse stack;
+ *      flag    - search process flag;
+ *
+ * returns:
+ *      btstack contains (bn, index) of search path traversed to the entry.
+ *      *cmpp is set to result of comparison with the entry returned.
+ *      the page containing the entry is pinned at exit.
+ */
+static int xtSearchNode(struct inode *ip, xad_t * xad,	/* required XAD entry */
+			int *cmpp, struct btstack * btstack, int flag)
+{
+	int rc = 0;
+	s64 xoff, xaddr;
+	int xlen;
+	int cmp = 1;		/* init for empty page */
+	s64 bn;			/* block number */
+	struct metapage *mp;	/* meta-page buffer */
+	xtpage_t *p;		/* page */
+	int base, index, lim;
+	struct btframe *btsp;
+	s64 t64;
+
+	BT_CLR(btstack);
+
+	xoff = offsetXAD(xad);
+	xlen = lengthXAD(xad);
+	xaddr = addressXAD(xad);
+
+	/*
+	 *      search down tree from root:
+	 *
+	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
+	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
+	 *
+	 * if entry with search key K is not found
+	 * internal page search find the entry with largest key Ki
+	 * less than K which point to the child page to search;
+	 * leaf page search find the entry with smallest key Kj
+	 * greater than K so that the returned index is the position of
+	 * the entry to be shifted right for insertion of new entry.
+	 * for empty tree, search key is greater than any key of the tree.
+	 *
+	 * by convention, root bn = 0.
+	 */
+	for (bn = 0;;) {
+		/* get/pin the page to search */
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+		if (p->header.flag & BT_LEAF) {
+			XT_PUTPAGE(mp);
+			return -ESTALE;
+		}
+
+		lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
+
+		/*
+		 * binary search with search key K on the current page
+		 */
+		for (base = XTENTRYSTART; lim; lim >>= 1) {
+			index = base + (lim >> 1);
+
+			XT_CMP(cmp, xoff, &p->xad[index], t64);
+			if (cmp == 0) {
+				/*
+				 *      search hit
+				 *
+				 * verify for exact match;
+				 */
+				if (xaddr == addressXAD(&p->xad[index]) &&
+				    xoff == offsetXAD(&p->xad[index])) {
+					*cmpp = cmp;
+
+					/* save search result */
+					btsp = btstack->top;
+					btsp->bn = bn;
+					btsp->index = index;
+					btsp->mp = mp;
+
+					return 0;
+				}
+
+				/* descend/search its child page */
+				goto next;
+			}
+
+			if (cmp > 0) {
+				base = index + 1;
+				--lim;
+			}
+		}
+
+		/*
+		 *      search miss - non-leaf page:
+		 *
+		 * base is the smallest index with key (Kj) greater than
+		 * search key (K) and may be zero or maxentry index.
+		 * if base is non-zero, decrement base by one to get the parent
+		 * entry of the child page to search.
+		 */
+		index = base ? base - 1 : base;
+
+		/*
+		 * go down to child page
+		 */
+	      next:
+		/* get the child page block number */
+		bn = addressXAD(&p->xad[index]);
+
+		/* unpin the parent page */
+		XT_PUTPAGE(mp);
+	}
+}
+
+
+/*
+ *      xtRelink()
+ *
+ * function:
+ *      link around a freed page.
+ *
+ * Parameter:
+ *      int           tid,
+ *      struct inode    *ip,
+ *      xtpage_t        *p)
+ *
+ * returns:
+ */
+static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
+{
+	int rc = 0;
+	struct metapage *mp;
+	s64 nextbn, prevbn;
+	struct tlock *tlck;
+
+	nextbn = le64_to_cpu(p->header.next);
+	prevbn = le64_to_cpu(p->header.prev);
+
+	/* update prev pointer of the next page */
+	if (nextbn != 0) {
+		XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/*
+		 * acquire a transaction lock on the page;
+		 *
+		 * action: update prev pointer;
+		 */
+		BT_MARK_DIRTY(mp, ip);
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
+
+		/* the page may already have been tlock'd */
+
+		p->header.prev = cpu_to_le64(prevbn);
+
+		XT_PUTPAGE(mp);
+	}
+
+	/* update next pointer of the previous page */
+	if (prevbn != 0) {
+		XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/*
+		 * acquire a transaction lock on the page;
+		 *
+		 * action: update next pointer;
+		 */
+		BT_MARK_DIRTY(mp, ip);
+		tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
+
+		/* the page may already have been tlock'd */
+
+		p->header.next = le64_to_cpu(nextbn);
+
+		XT_PUTPAGE(mp);
+	}
+
+	return 0;
+}
+#endif				/*  _STILL_TO_PORT */
+
+
+/*
+ *      xtInitRoot()
+ *
+ * initialize file root (inline in inode)
+ */
+void xtInitRoot(tid_t tid, struct inode *ip)
+{
+	xtpage_t *p;
+
+	/*
+	 * acquire a transaction lock on the root
+	 *
+	 * action:
+	 */
+	txLock(tid, ip, (struct metapage *) &JFS_IP(ip)->bxflag,
+		      tlckXTREE | tlckNEW);
+	p = &JFS_IP(ip)->i_xtroot;
+
+	p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
+	p->header.nextindex = cpu_to_le16(XTENTRYSTART);
+
+	if (S_ISDIR(ip->i_mode))
+		p->header.maxentry = cpu_to_le16(XTROOTINITSLOT_DIR);
+	else {
+		p->header.maxentry = cpu_to_le16(XTROOTINITSLOT);
+		ip->i_size = 0;
+	}
+
+
+	return;
+}
+
+
+/*
+ * We can run into a deadlock truncating a file with a large number of
+ * xtree pages (large fragmented file).  A robust fix would entail a
+ * reservation system where we would reserve a number of metadata pages
+ * and tlocks which we would be guaranteed without a deadlock.  Without
+ * this, a partial fix is to limit number of metadata pages we will lock
+ * in a single transaction.  Currently we will truncate the file so that
+ * no more than 50 leaf pages will be locked.  The caller of xtTruncate
+ * will be responsible for ensuring that the current transaction gets
+ * committed, and that subsequent transactions are created to truncate
+ * the file further if needed.
+ */
+#define MAX_TRUNCATE_LEAVES 50
+
+/*
+ *      xtTruncate()
+ *
+ * function:
+ *      traverse for truncation logging backward bottom up;
+ *      terminate at the last extent entry at the current subtree
+ *      root page covering new down size.
+ *      truncation may occur within the last extent entry.
+ *
+ * parameter:
+ *      int           tid,
+ *      struct inode    *ip,
+ *      s64           newsize,
+ *      int           type)   {PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
+ *
+ * return:
+ *
+ * note:
+ *      PWMAP:
+ *       1. truncate (non-COMMIT_NOLINK file)
+ *          by jfs_truncate() or jfs_open(O_TRUNC):
+ *          xtree is updated;
+ *	 2. truncate index table of directory when last entry removed
+ *       map update via tlock at commit time;
+ *      PMAP:
+ *	 Call xtTruncate_pmap instead
+ *      WMAP:
+ *       1. remove (free zero link count) on last reference release
+ *          (pmap has been freed at commit zero link count);
+ *       2. truncate (COMMIT_NOLINK file, i.e., tmp file):
+ *          xtree is updated;
+ *       map update directly at truncation time;
+ *
+ *      if (DELETE)
+ *              no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
+ *      else if (TRUNCATE)
+ *              must write LOG_NOREDOPAGE for deleted index page;
+ *
+ * pages may already have been tlocked by anonymous transactions
+ * during file growth (i.e., write) before truncation;
+ *
+ * except last truncated entry, deleted entries remains as is
+ * in the page (nextindex is updated) for other use
+ * (e.g., log/update allocation map): this avoid copying the page
+ * info but delay free of pages;
+ *
+ */
+s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
+{
+	int rc = 0;
+	s64 teof;
+	struct metapage *mp;
+	xtpage_t *p;
+	s64 bn;
+	int index, nextindex;
+	xad_t *xad;
+	s64 xoff, xaddr;
+	int xlen, len, freexlen;
+	struct btstack btstack;
+	struct btframe *parent;
+	struct tblock *tblk = NULL;
+	struct tlock *tlck = NULL;
+	struct xtlock *xtlck = NULL;
+	struct xdlistlock xadlock;	/* maplock for COMMIT_WMAP */
+	struct pxd_lock *pxdlock;		/* maplock for COMMIT_WMAP */
+	s64 nfreed;
+	int freed, log;
+	int locked_leaves = 0;
+
+	/* save object truncation type */
+	if (tid) {
+		tblk = tid_to_tblock(tid);
+		tblk->xflag |= flag;
+	}
+
+	nfreed = 0;
+
+	flag &= COMMIT_MAP;
+	assert(flag != COMMIT_PMAP);
+
+	if (flag == COMMIT_PWMAP)
+		log = 1;
+	else {
+		log = 0;
+		xadlock.flag = mlckFREEXADLIST;
+		xadlock.index = 1;
+	}
+
+	/*
+	 * if the newsize is not an integral number of pages,
+	 * the file between newsize and next page boundary will
+	 * be cleared.
+	 * if truncating into a file hole, it will cause
+	 * a full block to be allocated for the logical block.
+	 */
+
+	/*
+	 * release page blocks of truncated region <teof, eof>
+	 *
+	 * free the data blocks from the leaf index blocks.
+	 * delete the parent index entries corresponding to
+	 * the freed child data/index blocks.
+	 * free the index blocks themselves which aren't needed
+	 * in new sized file.
+	 *
+	 * index blocks are updated only if the blocks are to be
+	 * retained in the new sized file.
+	 * if type is PMAP, the data and index pages are NOT
+	 * freed, and the data and index blocks are NOT freed
+	 * from  working map.
+	 * (this will allow continued access of data/index of
+	 * temporary file (zerolink count file truncated to zero-length)).
+	 */
+	teof = (newsize + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
+	    JFS_SBI(ip->i_sb)->l2bsize;
+
+	/* clear stack */
+	BT_CLR(&btstack);
+
+	/*
+	 * start with root
+	 *
+	 * root resides in the inode
+	 */
+	bn = 0;
+
+	/*
+	 * first access of each page:
+	 */
+      getPage:
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* process entries backward from last index */
+	index = le16_to_cpu(p->header.nextindex) - 1;
+
+	if (p->header.flag & BT_INTERNAL)
+		goto getChild;
+
+	/*
+	 *      leaf page
+	 */
+
+	/* Since this is the rightmost leaf, and we may have already freed
+	 * a page that was formerly to the right, let's make sure that the
+	 * next pointer is zero.
+	 */
+	if (p->header.next) {
+		if (log)
+			/*
+			 * Make sure this change to the header is logged.
+			 * If we really truncate this leaf, the flag
+			 * will be changed to tlckTRUNCATE
+			 */
+			tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
+		BT_MARK_DIRTY(mp, ip);
+		p->header.next = 0;
+	}
+
+	freed = 0;
+
+	/* does region covered by leaf page precede Teof ? */
+	xad = &p->xad[index];
+	xoff = offsetXAD(xad);
+	xlen = lengthXAD(xad);
+	if (teof >= xoff + xlen) {
+		XT_PUTPAGE(mp);
+		goto getParent;
+	}
+
+	/* (re)acquire tlock of the leaf page */
+	if (log) {
+		if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
+			/*
+			 * We need to limit the size of the transaction
+			 * to avoid exhausting pagecache & tlocks
+			 */
+			XT_PUTPAGE(mp);
+			newsize = (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
+			goto getParent;
+		}
+		tlck = txLock(tid, ip, mp, tlckXTREE);
+		tlck->type = tlckXTREE | tlckTRUNCATE;
+		xtlck = (struct xtlock *) & tlck->lock;
+		xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
+	}
+	BT_MARK_DIRTY(mp, ip);
+
+	/*
+	 * scan backward leaf page entries
+	 */
+	for (; index >= XTENTRYSTART; index--) {
+		xad = &p->xad[index];
+		xoff = offsetXAD(xad);
+		xlen = lengthXAD(xad);
+		xaddr = addressXAD(xad);
+
+		/*
+		 * The "data" for a directory is indexed by the block
+		 * device's address space.  This metadata must be invalidated
+		 * here
+		 */
+		if (S_ISDIR(ip->i_mode) && (teof == 0))
+			invalidate_xad_metapages(ip, *xad);
+		/*
+		 * entry beyond eof: continue scan of current page
+		 *          xad
+		 * ---|---=======------->
+		 *   eof
+		 */
+		if (teof < xoff) {
+			nfreed += xlen;
+			continue;
+		}
+
+		/*
+		 * (xoff <= teof): last entry to be deleted from page;
+		 * If other entries remain in page: keep and update the page.
+		 */
+
+		/*
+		 * eof == entry_start: delete the entry
+		 *           xad
+		 * -------|=======------->
+		 *       eof
+		 *
+		 */
+		if (teof == xoff) {
+			nfreed += xlen;
+
+			if (index == XTENTRYSTART)
+				break;
+
+			nextindex = index;
+		}
+		/*
+		 * eof within the entry: truncate the entry.
+		 *          xad
+		 * -------===|===------->
+		 *          eof
+		 */
+		else if (teof < xoff + xlen) {
+			/* update truncated entry */
+			len = teof - xoff;
+			freexlen = xlen - len;
+			XADlength(xad, len);
+
+			/* save pxd of truncated extent in tlck */
+			xaddr += len;
+			if (log) {	/* COMMIT_PWMAP */
+				xtlck->lwm.offset = (xtlck->lwm.offset) ?
+				    min(index, (int)xtlck->lwm.offset) : index;
+				xtlck->lwm.length = index + 1 -
+				    xtlck->lwm.offset;
+				xtlck->twm.offset = index;
+				pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
+				pxdlock->flag = mlckFREEPXD;
+				PXDaddress(&pxdlock->pxd, xaddr);
+				PXDlength(&pxdlock->pxd, freexlen);
+			}
+			/* free truncated extent */
+			else {	/* COMMIT_WMAP */
+
+				pxdlock = (struct pxd_lock *) & xadlock;
+				pxdlock->flag = mlckFREEPXD;
+				PXDaddress(&pxdlock->pxd, xaddr);
+				PXDlength(&pxdlock->pxd, freexlen);
+				txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
+
+				/* reset map lock */
+				xadlock.flag = mlckFREEXADLIST;
+			}
+
+			/* current entry is new last entry; */
+			nextindex = index + 1;
+
+			nfreed += freexlen;
+		}
+		/*
+		 * eof beyond the entry:
+		 *          xad
+		 * -------=======---|--->
+		 *                 eof
+		 */
+		else {		/* (xoff + xlen < teof) */
+
+			nextindex = index + 1;
+		}
+
+		if (nextindex < le16_to_cpu(p->header.nextindex)) {
+			if (!log) {	/* COMMIT_WAMP */
+				xadlock.xdlist = &p->xad[nextindex];
+				xadlock.count =
+				    le16_to_cpu(p->header.nextindex) -
+				    nextindex;
+				txFreeMap(ip, (struct maplock *) & xadlock,
+					  NULL, COMMIT_WMAP);
+			}
+			p->header.nextindex = cpu_to_le16(nextindex);
+		}
+
+		XT_PUTPAGE(mp);
+
+		/* assert(freed == 0); */
+		goto getParent;
+	}			/* end scan of leaf page entries */
+
+	freed = 1;
+
+	/*
+	 * leaf page become empty: free the page if type != PMAP
+	 */
+	if (log) {		/* COMMIT_PWMAP */
+		/* txCommit() with tlckFREE:
+		 * free data extents covered by leaf [XTENTRYSTART:hwm);
+		 * invalidate leaf if COMMIT_PWMAP;
+		 * if (TRUNCATE), will write LOG_NOREDOPAGE;
+		 */
+		tlck->type = tlckXTREE | tlckFREE;
+	} else {		/* COMMIT_WAMP */
+
+		/* free data extents covered by leaf */
+		xadlock.xdlist = &p->xad[XTENTRYSTART];
+		xadlock.count =
+		    le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
+		txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP);
+	}
+
+	if (p->header.flag & BT_ROOT) {
+		p->header.flag &= ~BT_INTERNAL;
+		p->header.flag |= BT_LEAF;
+		p->header.nextindex = cpu_to_le16(XTENTRYSTART);
+
+		XT_PUTPAGE(mp);	/* debug */
+		goto out;
+	} else {
+		if (log) {	/* COMMIT_PWMAP */
+			/* page will be invalidated at tx completion
+			 */
+			XT_PUTPAGE(mp);
+		} else {	/* COMMIT_WMAP */
+
+			if (mp->lid)
+				lid_to_tlock(mp->lid)->flag |= tlckFREELOCK;
+
+			/* invalidate empty leaf page */
+			discard_metapage(mp);
+		}
+	}
+
+	/*
+	 * the leaf page become empty: delete the parent entry
+	 * for the leaf page if the parent page is to be kept
+	 * in the new sized file.
+	 */
+
+	/*
+	 * go back up to the parent page
+	 */
+      getParent:
+	/* pop/restore parent entry for the current child page */
+	if ((parent = BT_POP(&btstack)) == NULL)
+		/* current page must have been root */
+		goto out;
+
+	/* get back the parent page */
+	bn = parent->bn;
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	index = parent->index;
+
+	/*
+	 * child page was not empty:
+	 */
+	if (freed == 0) {
+		/* has any entry deleted from parent ? */
+		if (index < le16_to_cpu(p->header.nextindex) - 1) {
+			/* (re)acquire tlock on the parent page */
+			if (log) {	/* COMMIT_PWMAP */
+				/* txCommit() with tlckTRUNCATE:
+				 * free child extents covered by parent [);
+				 */
+				tlck = txLock(tid, ip, mp, tlckXTREE);
+				xtlck = (struct xtlock *) & tlck->lock;
+				if (!(tlck->type & tlckTRUNCATE)) {
+					xtlck->hwm.offset =
+					    le16_to_cpu(p->header.
+							nextindex) - 1;
+					tlck->type =
+					    tlckXTREE | tlckTRUNCATE;
+				}
+			} else {	/* COMMIT_WMAP */
+
+				/* free child extents covered by parent */
+				xadlock.xdlist = &p->xad[index + 1];
+				xadlock.count =
+				    le16_to_cpu(p->header.nextindex) -
+				    index - 1;
+				txFreeMap(ip, (struct maplock *) & xadlock,
+					  NULL, COMMIT_WMAP);
+			}
+			BT_MARK_DIRTY(mp, ip);
+
+			p->header.nextindex = cpu_to_le16(index + 1);
+		}
+		XT_PUTPAGE(mp);
+		goto getParent;
+	}
+
+	/*
+	 * child page was empty:
+	 */
+	nfreed += lengthXAD(&p->xad[index]);
+
+	/*
+	 * During working map update, child page's tlock must be handled
+	 * before parent's.  This is because the parent's tlock will cause
+	 * the child's disk space to be marked available in the wmap, so
+	 * it's important that the child page be released by that time.
+	 *
+	 * ToDo:  tlocks should be on doubly-linked list, so we can
+	 * quickly remove it and add it to the end.
+	 */
+
+	/*
+	 * Move parent page's tlock to the end of the tid's tlock list
+	 */
+	if (log && mp->lid && (tblk->last != mp->lid) &&
+	    lid_to_tlock(mp->lid)->tid) {
+		lid_t lid = mp->lid;
+		struct tlock *prev;
+
+		tlck = lid_to_tlock(lid);
+
+		if (tblk->next == lid)
+			tblk->next = tlck->next;
+		else {
+			for (prev = lid_to_tlock(tblk->next);
+			     prev->next != lid;
+			     prev = lid_to_tlock(prev->next)) {
+				assert(prev->next);
+			}
+			prev->next = tlck->next;
+		}
+		lid_to_tlock(tblk->last)->next = lid;
+		tlck->next = 0;
+		tblk->last = lid;
+	}
+
+	/*
+	 * parent page become empty: free the page
+	 */
+	if (index == XTENTRYSTART) {
+		if (log) {	/* COMMIT_PWMAP */
+			/* txCommit() with tlckFREE:
+			 * free child extents covered by parent;
+			 * invalidate parent if COMMIT_PWMAP;
+			 */
+			tlck = txLock(tid, ip, mp, tlckXTREE);
+			xtlck = (struct xtlock *) & tlck->lock;
+			xtlck->hwm.offset =
+			    le16_to_cpu(p->header.nextindex) - 1;
+			tlck->type = tlckXTREE | tlckFREE;
+		} else {	/* COMMIT_WMAP */
+
+			/* free child extents covered by parent */
+			xadlock.xdlist = &p->xad[XTENTRYSTART];
+			xadlock.count =
+			    le16_to_cpu(p->header.nextindex) -
+			    XTENTRYSTART;
+			txFreeMap(ip, (struct maplock *) & xadlock, NULL,
+				  COMMIT_WMAP);
+		}
+		BT_MARK_DIRTY(mp, ip);
+
+		if (p->header.flag & BT_ROOT) {
+			p->header.flag &= ~BT_INTERNAL;
+			p->header.flag |= BT_LEAF;
+			p->header.nextindex = cpu_to_le16(XTENTRYSTART);
+			if (le16_to_cpu(p->header.maxentry) == XTROOTMAXSLOT) {
+				/*
+				 * Shrink root down to allow inline
+				 * EA (otherwise fsck complains)
+				 */
+				p->header.maxentry =
+				    cpu_to_le16(XTROOTINITSLOT);
+				JFS_IP(ip)->mode2 |= INLINEEA;
+			}
+
+			XT_PUTPAGE(mp);	/* debug */
+			goto out;
+		} else {
+			if (log) {	/* COMMIT_PWMAP */
+				/* page will be invalidated at tx completion
+				 */
+				XT_PUTPAGE(mp);
+			} else {	/* COMMIT_WMAP */
+
+				if (mp->lid)
+					lid_to_tlock(mp->lid)->flag |=
+						tlckFREELOCK;
+
+				/* invalidate parent page */
+				discard_metapage(mp);
+			}
+
+			/* parent has become empty and freed:
+			 * go back up to its parent page
+			 */
+			/* freed = 1; */
+			goto getParent;
+		}
+	}
+	/*
+	 * parent page still has entries for front region;
+	 */
+	else {
+		/* try truncate region covered by preceding entry
+		 * (process backward)
+		 */
+		index--;
+
+		/* go back down to the child page corresponding
+		 * to the entry
+		 */
+		goto getChild;
+	}
+
+	/*
+	 *      internal page: go down to child page of current entry
+	 */
+      getChild:
+	/* save current parent entry for the child page */
+	BT_PUSH(&btstack, bn, index);
+
+	/* get child page */
+	xad = &p->xad[index];
+	bn = addressXAD(xad);
+
+	/*
+	 * first access of each internal entry:
+	 */
+	/* release parent page */
+	XT_PUTPAGE(mp);
+
+	/* process the child page */
+	goto getPage;
+
+      out:
+	/*
+	 * update file resource stat
+	 */
+	/* set size
+	 */
+	if (S_ISDIR(ip->i_mode) && !newsize)
+		ip->i_size = 1;	/* fsck hates zero-length directories */
+	else
+		ip->i_size = newsize;
+
+	/* update quota allocation to reflect freed blocks */
+	DQUOT_FREE_BLOCK(ip, nfreed);
+
+	/*
+	 * free tlock of invalidated pages
+	 */
+	if (flag == COMMIT_WMAP)
+		txFreelock(ip);
+
+	return newsize;
+}
+
+
+/*
+ *      xtTruncate_pmap()
+ *
+ * function:
+ *	Perform truncate to zero lenghth for deleted file, leaving the
+ *	the xtree and working map untouched.  This allows the file to
+ *	be accessed via open file handles, while the delete of the file
+ *	is committed to disk.
+ *
+ * parameter:
+ *      tid_t		tid,
+ *      struct inode	*ip,
+ *      s64		committed_size)
+ *
+ * return: new committed size
+ *
+ * note:
+ *
+ *	To avoid deadlock by holding too many transaction locks, the
+ *	truncation may be broken up into multiple transactions.
+ *	The committed_size keeps track of part of the file has been
+ *	freed from the pmaps.
+ */
+s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
+{
+	s64 bn;
+	struct btstack btstack;
+	int cmp;
+	int index;
+	int locked_leaves = 0;
+	struct metapage *mp;
+	xtpage_t *p;
+	struct btframe *parent;
+	int rc;
+	struct tblock *tblk;
+	struct tlock *tlck = NULL;
+	xad_t *xad;
+	int xlen;
+	s64 xoff;
+	struct xtlock *xtlck = NULL;
+
+	/* save object truncation type */
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_PMAP;
+
+	/* clear stack */
+	BT_CLR(&btstack);
+
+	if (committed_size) {
+		xoff = (committed_size >> JFS_SBI(ip->i_sb)->l2bsize) - 1;
+		rc = xtSearch(ip, xoff, &cmp, &btstack, 0);
+		if (rc)
+			return rc;
+
+		XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
+
+		if (cmp != 0) {
+			XT_PUTPAGE(mp);
+			jfs_error(ip->i_sb,
+				  "xtTruncate_pmap: did not find extent");
+			return -EIO;
+		}
+	} else {
+		/*
+		 * start with root
+		 *
+		 * root resides in the inode
+		 */
+		bn = 0;
+
+		/*
+		 * first access of each page:
+		 */
+      getPage:
+		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+		if (rc)
+			return rc;
+
+		/* process entries backward from last index */
+		index = le16_to_cpu(p->header.nextindex) - 1;
+
+		if (p->header.flag & BT_INTERNAL)
+			goto getChild;
+	}
+
+	/*
+	 *      leaf page
+	 */
+
+	if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
+		/*
+		 * We need to limit the size of the transaction
+		 * to avoid exhausting pagecache & tlocks
+		 */
+		xad = &p->xad[index];
+		xoff = offsetXAD(xad);
+		xlen = lengthXAD(xad);
+		XT_PUTPAGE(mp);
+		return  (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
+	}
+	tlck = txLock(tid, ip, mp, tlckXTREE);
+	tlck->type = tlckXTREE | tlckFREE;
+	xtlck = (struct xtlock *) & tlck->lock;
+	xtlck->hwm.offset = index;
+
+
+	XT_PUTPAGE(mp);
+
+	/*
+	 * go back up to the parent page
+	 */
+      getParent:
+	/* pop/restore parent entry for the current child page */
+	if ((parent = BT_POP(&btstack)) == NULL)
+		/* current page must have been root */
+		goto out;
+
+	/* get back the parent page */
+	bn = parent->bn;
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	index = parent->index;
+
+	/*
+	 * parent page become empty: free the page
+	 */
+	if (index == XTENTRYSTART) {
+		/* txCommit() with tlckFREE:
+		 * free child extents covered by parent;
+		 * invalidate parent if COMMIT_PWMAP;
+		 */
+		tlck = txLock(tid, ip, mp, tlckXTREE);
+		xtlck = (struct xtlock *) & tlck->lock;
+		xtlck->hwm.offset =
+		    le16_to_cpu(p->header.nextindex) - 1;
+		tlck->type = tlckXTREE | tlckFREE;
+
+		XT_PUTPAGE(mp);
+
+		if (p->header.flag & BT_ROOT) {
+
+			goto out;
+		} else {
+			goto getParent;
+		}
+	}
+	/*
+	 * parent page still has entries for front region;
+	 */
+	else
+		index--;
+	/*
+	 *      internal page: go down to child page of current entry
+	 */
+      getChild:
+	/* save current parent entry for the child page */
+	BT_PUSH(&btstack, bn, index);
+
+	/* get child page */
+	xad = &p->xad[index];
+	bn = addressXAD(xad);
+
+	/*
+	 * first access of each internal entry:
+	 */
+	/* release parent page */
+	XT_PUTPAGE(mp);
+
+	/* process the child page */
+	goto getPage;
+
+      out:
+
+	return 0;
+}
+
+
+#ifdef _JFS_DEBUG_XTREE
+/*
+ *      xtDisplayTree()
+ *
+ * function: traverse forward
+ */
+int xtDisplayTree(struct inode *ip)
+{
+	int rc = 0;
+	struct metapage *mp;
+	xtpage_t *p;
+	s64 bn, pbn;
+	int index, lastindex, v, h;
+	xad_t *xad;
+	struct btstack btstack;
+	struct btframe *btsp;
+	struct btframe *parent;
+
+	printk("display B+-tree.\n");
+
+	/* clear stack */
+	btsp = btstack.stack;
+
+	/*
+	 * start with root
+	 *
+	 * root resides in the inode
+	 */
+	bn = 0;
+	v = h = 0;
+
+	/*
+	 * first access of each page:
+	 */
+      getPage:
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* process entries forward from first index */
+	index = XTENTRYSTART;
+	lastindex = le16_to_cpu(p->header.nextindex) - 1;
+
+	if (p->header.flag & BT_INTERNAL) {
+		/*
+		 * first access of each internal page
+		 */
+		goto getChild;
+	} else {		/* (p->header.flag & BT_LEAF) */
+
+		/*
+		 * first access of each leaf page
+		 */
+		printf("leaf page ");
+		xtDisplayPage(ip, bn, p);
+
+		/* unpin the leaf page */
+		XT_PUTPAGE(mp);
+	}
+
+	/*
+	 * go back up to the parent page
+	 */
+      getParent:
+	/* pop/restore parent entry for the current child page */
+	if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
+		/* current page must have been root */
+		return;
+
+	/*
+	 * parent page scan completed
+	 */
+	if ((index = parent->index) == (lastindex = parent->lastindex)) {
+		/* go back up to the parent page */
+		goto getParent;
+	}
+
+	/*
+	 * parent page has entries remaining
+	 */
+	/* get back the parent page */
+	bn = parent->bn;
+	/* v = parent->level; */
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* get next parent entry */
+	index++;
+
+	/*
+	 * internal page: go down to child page of current entry
+	 */
+      getChild:
+	/* push/save current parent entry for the child page */
+	btsp->bn = pbn = bn;
+	btsp->index = index;
+	btsp->lastindex = lastindex;
+	/* btsp->level = v; */
+	/* btsp->node = h; */
+	++btsp;
+
+	/* get child page */
+	xad = &p->xad[index];
+	bn = addressXAD(xad);
+
+	/*
+	 * first access of each internal entry:
+	 */
+	/* release parent page */
+	XT_PUTPAGE(mp);
+
+	printk("traverse down 0x%lx[%d]->0x%lx\n", (ulong) pbn, index,
+	       (ulong) bn);
+	v++;
+	h = index;
+
+	/* process the child page */
+	goto getPage;
+}
+
+
+/*
+ *      xtDisplayPage()
+ *
+ * function: display page
+ */
+int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p)
+{
+	int rc = 0;
+	xad_t *xad;
+	s64 xaddr, xoff;
+	int xlen, i, j;
+
+	/* display page control */
+	printf("bn:0x%lx flag:0x%x nextindex:%d\n",
+	       (ulong) bn, p->header.flag,
+	       le16_to_cpu(p->header.nextindex));
+
+	/* display entries */
+	xad = &p->xad[XTENTRYSTART];
+		for (i = XTENTRYSTART, j = 1; i < le16_to_cpu(p->header.nextindex);
+		     i++, xad++, j++) {
+			xoff = offsetXAD(xad);
+			xaddr = addressXAD(xad);
+			xlen = lengthXAD(xad);
+			printf("\t[%d] 0x%lx:0x%lx(0x%x)", i, (ulong) xoff,
+			       (ulong) xaddr, xlen);
+
+			if (j == 4) {
+				printf("\n");
+				j = 0;
+		}
+	}
+
+	printf("\n");
+}
+#endif				/* _JFS_DEBUG_XTREE */
+
+
+#ifdef _JFS_WIP
+/*
+ *      xtGather()
+ *
+ * function:
+ *      traverse for allocation acquiring tlock at commit time
+ *      (vs at the time of update) logging backward top down
+ *
+ * note:
+ *      problem - establishing that all new allocation have been
+ *      processed both for append and random write in sparse file
+ *      at the current entry at the current subtree root page
+ *
+ */
+int xtGather(btree_t *t)
+{
+	int rc = 0;
+	xtpage_t *p;
+	u64 bn;
+	int index;
+	btentry_t *e;
+	struct btstack btstack;
+	struct btsf *parent;
+
+	/* clear stack */
+	BT_CLR(&btstack);
+
+	/*
+	 * start with root
+	 *
+	 * root resides in the inode
+	 */
+	bn = 0;
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/* new root is NOT pointed by a new entry
+	   if (p->header.flag & NEW)
+	   allocate new page lock;
+	   write a NEWPAGE log;
+	 */
+
+      dopage:
+	/*
+	 * first access of each page:
+	 */
+	/* process entries backward from last index */
+	index = le16_to_cpu(p->header.nextindex) - 1;
+
+	if (p->header.flag & BT_LEAF) {
+		/*
+		 * first access of each leaf page
+		 */
+		/* process leaf page entries backward */
+		for (; index >= XTENTRYSTART; index--) {
+			e = &p->xad[index];
+			/*
+			 * if newpage, log NEWPAGE.
+			 *
+			 if (e->flag & XAD_NEW) {
+			 nfound =+ entry->length;
+			 update current page lock for the entry;
+			 newpage(entry);
+			 *
+			 * if moved, log move.
+			 *
+			 } else if (e->flag & XAD_MOVED) {
+			 reset flag;
+			 update current page lock for the entry;
+			 }
+			 */
+		}
+
+		/* unpin the leaf page */
+		XT_PUTPAGE(mp);
+
+		/*
+		 * go back up to the parent page
+		 */
+	      getParent:
+		/* restore parent entry for the current child page */
+		if ((parent = BT_POP(&btstack)) == NULL)
+			/* current page must have been root */
+			return 0;
+
+		if ((index = parent->index) == XTENTRYSTART) {
+			/*
+			 * parent page scan completed
+			 */
+			/* go back up to the parent page */
+			goto getParent;
+		} else {
+			/*
+			 * parent page has entries remaining
+			 */
+			/* get back the parent page */
+			bn = parent->bn;
+			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+			if (rc)
+				return -EIO;
+
+			/* first subroot page which
+			 * covers all new allocated blocks
+			 * itself not new/modified.
+			 * (if modified from split of descendent,
+			 * go down path of split page)
+
+			 if (nfound == nnew &&
+			 !(p->header.flag & (NEW | MOD)))
+			 exit scan;
+			 */
+
+			/* process parent page entries backward */
+			index--;
+		}
+	} else {
+		/*
+		 * first access of each internal page
+		 */
+	}
+
+	/*
+	 * internal page: go down to child page of current entry
+	 */
+
+	/* save current parent entry for the child page */
+	BT_PUSH(&btstack, bn, index);
+
+	/* get current entry for the child page */
+	e = &p->xad[index];
+
+	/*
+	 * first access of each internal entry:
+	 */
+	/*
+	 * if new entry, log btree_tnewentry.
+	 *
+	 if (e->flag & XAD_NEW)
+	 update parent page lock for the entry;
+	 */
+
+	/* release parent page */
+	XT_PUTPAGE(mp);
+
+	/* get child page */
+	bn = e->bn;
+	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
+	if (rc)
+		return rc;
+
+	/*
+	 * first access of each non-root page:
+	 */
+	/*
+	 * if new, log btree_newpage.
+	 *
+	 if (p->header.flag & NEW)
+	 allocate new page lock;
+	 write a NEWPAGE log (next, prev);
+	 */
+
+	/* process the child page */
+	goto dopage;
+
+      out:
+	return 0;
+}
+#endif				/* _JFS_WIP */
+
+
+#ifdef CONFIG_JFS_STATISTICS
+int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length,
+		    int *eof, void *data)
+{
+	int len = 0;
+	off_t begin;
+
+	len += sprintf(buffer,
+		       "JFS Xtree statistics\n"
+		       "====================\n"
+		       "searches = %d\n"
+		       "fast searches = %d\n"
+		       "splits = %d\n",
+		       xtStat.search,
+		       xtStat.fastSearch,
+		       xtStat.split);
+
+	begin = offset;
+	*start = buffer + begin;
+	len -= begin;
+
+	if (len > length)
+		len = length;
+	else
+		*eof = 1;
+
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+#endif
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
new file mode 100644
index 0000000..a697842
--- /dev/null
+++ b/fs/jfs/jfs_xtree.h
@@ -0,0 +1,140 @@
+/*
+ *   Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_XTREE
+#define _H_JFS_XTREE
+
+/*
+ *      jfs_xtree.h: extent allocation descriptor B+-tree manager
+ */
+
+#include "jfs_btree.h"
+
+
+/*
+ *      extent allocation descriptor (xad)
+ */
+typedef struct xad {
+	unsigned flag:8;	/* 1: flag */
+	unsigned rsvrd:16;	/* 2: reserved */
+	unsigned off1:8;	/* 1: offset in unit of fsblksize */
+	__le32 off2;		/* 4: offset in unit of fsblksize */
+	unsigned len:24;	/* 3: length in unit of fsblksize */
+	unsigned addr1:8;	/* 1: address in unit of fsblksize */
+	__le32 addr2;		/* 4: address in unit of fsblksize */
+} xad_t;			/* (16) */
+
+#define MAXXLEN         ((1 << 24) - 1)
+
+#define XTSLOTSIZE      16
+#define L2XTSLOTSIZE    4
+
+/* xad_t field construction */
+#define XADoffset(xad, offset64)\
+{\
+        (xad)->off1 = ((u64)offset64) >> 32;\
+        (xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
+}
+#define XADaddress(xad, address64)\
+{\
+        (xad)->addr1 = ((u64)address64) >> 32;\
+        (xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+}
+#define XADlength(xad, length32)        (xad)->len = __cpu_to_le24(length32)
+
+/* xad_t field extraction */
+#define offsetXAD(xad)\
+        ( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
+#define addressXAD(xad)\
+        ( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
+#define lengthXAD(xad)  __le24_to_cpu((xad)->len)
+
+/* xad list */
+struct xadlist {
+	s16 maxnxad;
+	s16 nxad;
+	xad_t *xad;
+};
+
+/* xad_t flags */
+#define XAD_NEW         0x01	/* new */
+#define XAD_EXTENDED    0x02	/* extended */
+#define XAD_COMPRESSED  0x04	/* compressed with recorded length */
+#define XAD_NOTRECORDED 0x08	/* allocated but not recorded */
+#define XAD_COW         0x10	/* copy-on-write */
+
+
+/* possible values for maxentry */
+#define XTROOTINITSLOT_DIR  6
+#define XTROOTINITSLOT  10
+#define XTROOTMAXSLOT   18
+#define XTPAGEMAXSLOT   256
+#define XTENTRYSTART    2
+
+/*
+ *      xtree page:
+ */
+typedef union {
+	struct xtheader {
+		__le64 next;	/* 8: */
+		__le64 prev;	/* 8: */
+
+		u8 flag;	/* 1: */
+		u8 rsrvd1;	/* 1: */
+		__le16 nextindex;	/* 2: next index = number of entries */
+		__le16 maxentry;	/* 2: max number of entries */
+		__le16 rsrvd2;	/* 2: */
+
+		pxd_t self;	/* 8: self */
+	} header;		/* (32) */
+
+	xad_t xad[XTROOTMAXSLOT];	/* 16 * maxentry: xad array */
+} xtpage_t;
+
+/*
+ *      external declaration
+ */
+extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
+		    int *pflag, s64 * paddr, int *plen, int flag);
+extern int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
+			struct xadlist * xadlist, int flag);
+extern void xtInitRoot(tid_t tid, struct inode *ip);
+extern int xtInsert(tid_t tid, struct inode *ip,
+		    int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
+extern int xtExtend(tid_t tid, struct inode *ip, s64 xoff, int xlen,
+		    int flag);
+#ifdef _NOTYET
+extern int xtTailgate(tid_t tid, struct inode *ip,
+		      s64 xoff, int xlen, s64 xaddr, int flag);
+#endif
+extern int xtUpdate(tid_t tid, struct inode *ip, struct xad *nxad);
+extern int xtDelete(tid_t tid, struct inode *ip, s64 xoff, int xlen,
+		    int flag);
+extern s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int type);
+extern s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size);
+extern int xtRelocate(tid_t tid, struct inode *ip,
+		      xad_t * oxad, s64 nxaddr, int xtype);
+extern int xtAppend(tid_t tid,
+		    struct inode *ip, int xflag, s64 xoff, int maxblocks,
+		    int *xlenp, s64 * xaddrp, int flag);
+
+#ifdef  _JFS_DEBUG_XTREE
+extern int xtDisplayTree(struct inode *ip);
+extern int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p);
+#endif				/* _JFS_DEBUG_XTREE */
+
+#endif				/* !_H_JFS_XTREE */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
new file mode 100644
index 0000000..8413a36
--- /dev/null
+++ b/fs/jfs/namei.c
@@ -0,0 +1,1540 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/ctype.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_inode.h"
+#include "jfs_dinode.h"
+#include "jfs_dmap.h"
+#include "jfs_unicode.h"
+#include "jfs_metapage.h"
+#include "jfs_xattr.h"
+#include "jfs_acl.h"
+#include "jfs_debug.h"
+
+extern struct inode_operations jfs_file_inode_operations;
+extern struct inode_operations jfs_symlink_inode_operations;
+extern struct file_operations jfs_file_operations;
+extern struct address_space_operations jfs_aops;
+
+extern int jfs_fsync(struct file *, struct dentry *, int);
+extern void jfs_truncate_nolock(struct inode *, loff_t);
+extern int jfs_init_acl(struct inode *, struct inode *);
+
+/*
+ * forward references
+ */
+struct inode_operations jfs_dir_inode_operations;
+struct file_operations jfs_dir_operations;
+struct dentry_operations jfs_ci_dentry_operations;
+
+static s64 commitZeroLink(tid_t, struct inode *);
+
+/*
+ * NAME:	jfs_create(dip, dentry, mode)
+ *
+ * FUNCTION:	create a regular file in the parent directory <dip>
+ *		with name = <from dentry> and mode = <mode>
+ *
+ * PARAMETER:	dip 	- parent directory vnode
+ *		dentry	- dentry of new file
+ *		mode	- create mode (rwxrwxrwx).
+ *		nd- nd struct
+ *
+ * RETURN:	Errors from subroutines
+ *
+ */
+static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	int rc = 0;
+	tid_t tid;		/* transaction id */
+	struct inode *ip = NULL;	/* child directory inode */
+	ino_t ino;
+	struct component_name dname;	/* child directory name */
+	struct btstack btstack;
+	struct inode *iplist[2];
+	struct tblock *tblk;
+
+	jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name);
+
+	/*
+	 * search parent directory for entry/freespace
+	 * (dtSearch() returns parent directory page pinned)
+	 */
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out1;
+
+	/*
+	 * Either iAlloc() or txBegin() may block.  Deadlock can occur if we
+	 * block there while holding dtree page, so we allocate the inode &
+	 * begin the transaction before we search the directory.
+	 */
+	ip = ialloc(dip, mode);
+	if (ip == NULL) {
+		rc = -ENOSPC;
+		goto out2;
+	}
+
+	tid = txBegin(dip->i_sb, 0);
+
+	down(&JFS_IP(dip)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) {
+		jfs_err("jfs_create: dtSearch returned %d", rc);
+		goto out3;
+	}
+
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_CREATE;
+	tblk->ino = ip->i_ino;
+	tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
+
+	iplist[0] = dip;
+	iplist[1] = ip;
+
+	/*
+	 * initialize the child XAD tree root in-line in inode
+	 */
+	xtInitRoot(tid, ip);
+
+	/*
+	 * create entry in parent directory for child directory
+	 * (dtInsert() releases parent directory page)
+	 */
+	ino = ip->i_ino;
+	if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
+		if (rc == -EIO) {
+			jfs_err("jfs_create: dtInsert returned -EIO");
+			txAbort(tid, 1);	/* Marks Filesystem dirty */
+		} else
+			txAbort(tid, 0);	/* Filesystem full */
+		goto out3;
+	}
+
+	ip->i_op = &jfs_file_inode_operations;
+	ip->i_fop = &jfs_file_operations;
+	ip->i_mapping->a_ops = &jfs_aops;
+
+	insert_inode_hash(ip);
+	mark_inode_dirty(ip);
+
+	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+
+	mark_inode_dirty(dip);
+
+	rc = txCommit(tid, 2, &iplist[0], 0);
+
+      out3:
+	txEnd(tid);
+	up(&JFS_IP(dip)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+	if (rc) {
+		ip->i_nlink = 0;
+		iput(ip);
+	} else
+		d_instantiate(dentry, ip);
+
+      out2:
+	free_UCSname(&dname);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	if (rc == 0)
+		jfs_init_acl(ip, dip);
+#endif
+
+      out1:
+
+	jfs_info("jfs_create: rc:%d", rc);
+	return rc;
+}
+
+
+/*
+ * NAME:	jfs_mkdir(dip, dentry, mode)
+ *
+ * FUNCTION:	create a child directory in the parent directory <dip>
+ *		with name = <from dentry> and mode = <mode>
+ *
+ * PARAMETER:	dip 	- parent directory vnode
+ *		dentry	- dentry of child directory
+ *		mode	- create mode (rwxrwxrwx).
+ *
+ * RETURN:	Errors from subroutines
+ *
+ * note:
+ * EACCESS: user needs search+write permission on the parent directory
+ */
+static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
+{
+	int rc = 0;
+	tid_t tid;		/* transaction id */
+	struct inode *ip = NULL;	/* child directory inode */
+	ino_t ino;
+	struct component_name dname;	/* child directory name */
+	struct btstack btstack;
+	struct inode *iplist[2];
+	struct tblock *tblk;
+
+	jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name);
+
+	/* link count overflow on parent directory ? */
+	if (dip->i_nlink == JFS_LINK_MAX) {
+		rc = -EMLINK;
+		goto out1;
+	}
+
+	/*
+	 * search parent directory for entry/freespace
+	 * (dtSearch() returns parent directory page pinned)
+	 */
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out1;
+
+	/*
+	 * Either iAlloc() or txBegin() may block.  Deadlock can occur if we
+	 * block there while holding dtree page, so we allocate the inode &
+	 * begin the transaction before we search the directory.
+	 */
+	ip = ialloc(dip, S_IFDIR | mode);
+	if (ip == NULL) {
+		rc = -ENOSPC;
+		goto out2;
+	}
+
+	tid = txBegin(dip->i_sb, 0);
+
+	down(&JFS_IP(dip)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) {
+		jfs_err("jfs_mkdir: dtSearch returned %d", rc);
+		goto out3;
+	}
+
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_CREATE;
+	tblk->ino = ip->i_ino;
+	tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
+
+	iplist[0] = dip;
+	iplist[1] = ip;
+
+	/*
+	 * initialize the child directory in-line in inode
+	 */
+	dtInitRoot(tid, ip, dip->i_ino);
+
+	/*
+	 * create entry in parent directory for child directory
+	 * (dtInsert() releases parent directory page)
+	 */
+	ino = ip->i_ino;
+	if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
+		if (rc == -EIO) {
+			jfs_err("jfs_mkdir: dtInsert returned -EIO");
+			txAbort(tid, 1);	/* Marks Filesystem dirty */
+		} else
+			txAbort(tid, 0);	/* Filesystem full */
+		goto out3;
+	}
+
+	ip->i_nlink = 2;	/* for '.' */
+	ip->i_op = &jfs_dir_inode_operations;
+	ip->i_fop = &jfs_dir_operations;
+
+	insert_inode_hash(ip);
+	mark_inode_dirty(ip);
+
+	/* update parent directory inode */
+	dip->i_nlink++;		/* for '..' from child directory */
+	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	mark_inode_dirty(dip);
+
+	rc = txCommit(tid, 2, &iplist[0], 0);
+
+      out3:
+	txEnd(tid);
+	up(&JFS_IP(dip)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+	if (rc) {
+		ip->i_nlink = 0;
+		iput(ip);
+	} else
+		d_instantiate(dentry, ip);
+
+      out2:
+	free_UCSname(&dname);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	if (rc == 0)
+		jfs_init_acl(ip, dip);
+#endif
+
+      out1:
+
+	jfs_info("jfs_mkdir: rc:%d", rc);
+	return rc;
+}
+
+/*
+ * NAME:	jfs_rmdir(dip, dentry)
+ *
+ * FUNCTION:	remove a link to child directory
+ *
+ * PARAMETER:	dip 	- parent inode
+ *		dentry	- child directory dentry
+ *
+ * RETURN:	-EINVAL	- if name is . or ..
+ *		-EINVAL  - if . or .. exist but are invalid.
+ *		errors from subroutines
+ *
+ * note:
+ * if other threads have the directory open when the last link 
+ * is removed, the "." and ".." entries, if present, are removed before 
+ * rmdir() returns and no new entries may be created in the directory, 
+ * but the directory is not removed until the last reference to 
+ * the directory is released (cf.unlink() of regular file).
+ */
+static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
+{
+	int rc;
+	tid_t tid;		/* transaction id */
+	struct inode *ip = dentry->d_inode;
+	ino_t ino;
+	struct component_name dname;
+	struct inode *iplist[2];
+	struct tblock *tblk;
+
+	jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
+
+	/* Init inode for quota operations. */
+	DQUOT_INIT(ip);
+
+	/* directory must be empty to be removed */
+	if (!dtEmpty(ip)) {
+		rc = -ENOTEMPTY;
+		goto out;
+	}
+
+	if ((rc = get_UCSname(&dname, dentry))) {
+		goto out;
+	}
+
+	tid = txBegin(dip->i_sb, 0);
+
+	down(&JFS_IP(dip)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	iplist[0] = dip;
+	iplist[1] = ip;
+
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_DELETE;
+	tblk->u.ip = ip;
+
+	/*
+	 * delete the entry of target directory from parent directory
+	 */
+	ino = ip->i_ino;
+	if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) {
+		jfs_err("jfs_rmdir: dtDelete returned %d", rc);
+		if (rc == -EIO)
+			txAbort(tid, 1);
+		txEnd(tid);
+		up(&JFS_IP(dip)->commit_sem);
+		up(&JFS_IP(ip)->commit_sem);
+
+		goto out2;
+	}
+
+	/* update parent directory's link count corresponding
+	 * to ".." entry of the target directory deleted
+	 */
+	dip->i_nlink--;
+	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	mark_inode_dirty(dip);
+
+	/*
+	 * OS/2 could have created EA and/or ACL
+	 */
+	/* free EA from both persistent and working map */
+	if (JFS_IP(ip)->ea.flag & DXD_EXTENT) {
+		/* free EA pages */
+		txEA(tid, ip, &JFS_IP(ip)->ea, NULL);
+	}
+	JFS_IP(ip)->ea.flag = 0;
+
+	/* free ACL from both persistent and working map */
+	if (JFS_IP(ip)->acl.flag & DXD_EXTENT) {
+		/* free ACL pages */
+		txEA(tid, ip, &JFS_IP(ip)->acl, NULL);
+	}
+	JFS_IP(ip)->acl.flag = 0;
+
+	/* mark the target directory as deleted */
+	ip->i_nlink = 0;
+	mark_inode_dirty(ip);
+
+	rc = txCommit(tid, 2, &iplist[0], 0);
+
+	txEnd(tid);
+
+	up(&JFS_IP(dip)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+
+	/*
+	 * Truncating the directory index table is not guaranteed.  It
+	 * may need to be done iteratively
+	 */
+	if (test_cflag(COMMIT_Stale, dip)) {
+		if (dip->i_size > 1)
+			jfs_truncate_nolock(dip, 0);
+
+		clear_cflag(COMMIT_Stale, dip);
+	}
+
+      out2:
+	free_UCSname(&dname);
+
+      out:
+	jfs_info("jfs_rmdir: rc:%d", rc);
+	return rc;
+}
+
+/*
+ * NAME:	jfs_unlink(dip, dentry)
+ *
+ * FUNCTION:	remove a link to object <vp> named by <name> 
+ *		from parent directory <dvp>
+ *
+ * PARAMETER:	dip 	- inode of parent directory
+ *		dentry 	- dentry of object to be removed
+ *
+ * RETURN:	errors from subroutines
+ *
+ * note:
+ * temporary file: if one or more processes have the file open
+ * when the last link is removed, the link will be removed before
+ * unlink() returns, but the removal of the file contents will be
+ * postponed until all references to the files are closed.
+ *
+ * JFS does NOT support unlink() on directories.
+ *
+ */
+static int jfs_unlink(struct inode *dip, struct dentry *dentry)
+{
+	int rc;
+	tid_t tid;		/* transaction id */
+	struct inode *ip = dentry->d_inode;
+	ino_t ino;
+	struct component_name dname;	/* object name */
+	struct inode *iplist[2];
+	struct tblock *tblk;
+	s64 new_size = 0;
+	int commit_flag;
+
+	jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
+
+	/* Init inode for quota operations. */
+	DQUOT_INIT(ip);
+
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out;
+
+	IWRITE_LOCK(ip);
+
+	tid = txBegin(dip->i_sb, 0);
+
+	down(&JFS_IP(dip)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	iplist[0] = dip;
+	iplist[1] = ip;
+
+	/*
+	 * delete the entry of target file from parent directory
+	 */
+	ino = ip->i_ino;
+	if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) {
+		jfs_err("jfs_unlink: dtDelete returned %d", rc);
+		if (rc == -EIO)
+			txAbort(tid, 1);	/* Marks FS Dirty */
+		txEnd(tid);
+		up(&JFS_IP(dip)->commit_sem);
+		up(&JFS_IP(ip)->commit_sem);
+		IWRITE_UNLOCK(ip);
+		goto out1;
+	}
+
+	ASSERT(ip->i_nlink);
+
+	ip->i_ctime = dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	mark_inode_dirty(dip);
+
+	/* update target's inode */
+	ip->i_nlink--;
+	mark_inode_dirty(ip);
+
+	/*
+	 *      commit zero link count object
+	 */
+	if (ip->i_nlink == 0) {
+		assert(!test_cflag(COMMIT_Nolink, ip));
+		/* free block resources */
+		if ((new_size = commitZeroLink(tid, ip)) < 0) {
+			txAbort(tid, 1);	/* Marks FS Dirty */
+			txEnd(tid);
+			up(&JFS_IP(dip)->commit_sem);
+			up(&JFS_IP(ip)->commit_sem);
+			IWRITE_UNLOCK(ip);
+			rc = new_size;
+			goto out1;
+		}
+		tblk = tid_to_tblock(tid);
+		tblk->xflag |= COMMIT_DELETE;
+		tblk->u.ip = ip;
+	}
+
+	/*
+	 * Incomplete truncate of file data can
+	 * result in timing problems unless we synchronously commit the
+	 * transaction.
+	 */
+	if (new_size)
+		commit_flag = COMMIT_SYNC;
+	else
+		commit_flag = 0;
+
+	/*
+	 * If xtTruncate was incomplete, commit synchronously to avoid
+	 * timing complications
+	 */
+	rc = txCommit(tid, 2, &iplist[0], commit_flag);
+
+	txEnd(tid);
+
+	up(&JFS_IP(dip)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+
+
+	while (new_size && (rc == 0)) {
+		tid = txBegin(dip->i_sb, 0);
+		down(&JFS_IP(ip)->commit_sem);
+		new_size = xtTruncate_pmap(tid, ip, new_size);
+		if (new_size < 0) {
+			txAbort(tid, 1);	/* Marks FS Dirty */
+			rc = new_size;
+		} else
+			rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC);
+		txEnd(tid);
+		up(&JFS_IP(ip)->commit_sem);
+	}
+
+	if (ip->i_nlink == 0)
+		set_cflag(COMMIT_Nolink, ip);
+
+	IWRITE_UNLOCK(ip);
+
+	/*
+	 * Truncating the directory index table is not guaranteed.  It
+	 * may need to be done iteratively
+	 */
+	if (test_cflag(COMMIT_Stale, dip)) {
+		if (dip->i_size > 1)
+			jfs_truncate_nolock(dip, 0);
+
+		clear_cflag(COMMIT_Stale, dip);
+	}
+
+      out1:
+	free_UCSname(&dname);
+      out:
+	jfs_info("jfs_unlink: rc:%d", rc);
+	return rc;
+}
+
+/*
+ * NAME:	commitZeroLink()
+ *
+ * FUNCTION:    for non-directory, called by jfs_remove(),
+ *		truncate a regular file, directory or symbolic
+ *		link to zero length. return 0 if type is not 
+ *		one of these.
+ *
+ *		if the file is currently associated with a VM segment
+ *		only permanent disk and inode map resources are freed,
+ *		and neither the inode nor indirect blocks are modified
+ *		so that the resources can be later freed in the work
+ *		map by ctrunc1.
+ *		if there is no VM segment on entry, the resources are
+ *		freed in both work and permanent map.
+ *		(? for temporary file - memory object is cached even 
+ *		after no reference:
+ *		reference count > 0 -   )
+ *
+ * PARAMETERS:	cd	- pointer to commit data structure.
+ *			  current inode is the one to truncate.
+ *
+ * RETURN:	Errors from subroutines
+ */
+static s64 commitZeroLink(tid_t tid, struct inode *ip)
+{
+	int filetype;
+	struct tblock *tblk;
+
+	jfs_info("commitZeroLink: tid = %d, ip = 0x%p", tid, ip);
+
+	filetype = ip->i_mode & S_IFMT;
+	switch (filetype) {
+	case S_IFREG:
+		break;
+	case S_IFLNK:
+		/* fast symbolic link */
+		if (ip->i_size < IDATASIZE) {
+			ip->i_size = 0;
+			return 0;
+		}
+		break;
+	default:
+		assert(filetype != S_IFDIR);
+		return 0;
+	}
+
+	set_cflag(COMMIT_Freewmap, ip);
+
+	/* mark transaction of block map update type */
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_PMAP;
+
+	/*
+	 * free EA
+	 */
+	if (JFS_IP(ip)->ea.flag & DXD_EXTENT)
+		/* acquire maplock on EA to be freed from block map */
+		txEA(tid, ip, &JFS_IP(ip)->ea, NULL);
+
+	/*
+	 * free ACL
+	 */
+	if (JFS_IP(ip)->acl.flag & DXD_EXTENT)
+		/* acquire maplock on EA to be freed from block map */
+		txEA(tid, ip, &JFS_IP(ip)->acl, NULL);
+
+	/*
+	 * free xtree/data (truncate to zero length):
+	 * free xtree/data pages from cache if COMMIT_PWMAP, 
+	 * free xtree/data blocks from persistent block map, and
+	 * free xtree/data blocks from working block map if COMMIT_PWMAP;
+	 */
+	if (ip->i_size)
+		return xtTruncate_pmap(tid, ip, 0);
+
+	return 0;
+}
+
+
+/*
+ * NAME:	freeZeroLink()
+ *
+ * FUNCTION:    for non-directory, called by iClose(),
+ *		free resources of a file from cache and WORKING map 
+ *		for a file previously committed with zero link count
+ *		while associated with a pager object,
+ *
+ * PARAMETER:	ip	- pointer to inode of file.
+ *
+ * RETURN:	0 -ok
+ */
+int freeZeroLink(struct inode *ip)
+{
+	int rc = 0;
+	int type;
+
+	jfs_info("freeZeroLink: ip = 0x%p", ip);
+
+	/* return if not reg or symbolic link or if size is
+	 * already ok.
+	 */
+	type = ip->i_mode & S_IFMT;
+
+	switch (type) {
+	case S_IFREG:
+		break;
+	case S_IFLNK:
+		/* if its contained in inode nothing to do */
+		if (ip->i_size < IDATASIZE)
+			return 0;
+		break;
+	default:
+		return 0;
+	}
+
+	/*
+	 * free EA
+	 */
+	if (JFS_IP(ip)->ea.flag & DXD_EXTENT) {
+		s64 xaddr = addressDXD(&JFS_IP(ip)->ea);
+		int xlen = lengthDXD(&JFS_IP(ip)->ea);
+		struct maplock maplock;	/* maplock for COMMIT_WMAP */
+		struct pxd_lock *pxdlock;	/* maplock for COMMIT_WMAP */
+
+		/* free EA pages from cache */
+		invalidate_dxd_metapages(ip, JFS_IP(ip)->ea);
+
+		/* free EA extent from working block map */
+		maplock.index = 1;
+		pxdlock = (struct pxd_lock *) & maplock;
+		pxdlock->flag = mlckFREEPXD;
+		PXDaddress(&pxdlock->pxd, xaddr);
+		PXDlength(&pxdlock->pxd, xlen);
+		txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
+	}
+
+	/*
+	 * free ACL
+	 */
+	if (JFS_IP(ip)->acl.flag & DXD_EXTENT) {
+		s64 xaddr = addressDXD(&JFS_IP(ip)->acl);
+		int xlen = lengthDXD(&JFS_IP(ip)->acl);
+		struct maplock maplock;	/* maplock for COMMIT_WMAP */
+		struct pxd_lock *pxdlock;	/* maplock for COMMIT_WMAP */
+
+		invalidate_dxd_metapages(ip, JFS_IP(ip)->acl);
+
+		/* free ACL extent from working block map */
+		maplock.index = 1;
+		pxdlock = (struct pxd_lock *) & maplock;
+		pxdlock->flag = mlckFREEPXD;
+		PXDaddress(&pxdlock->pxd, xaddr);
+		PXDlength(&pxdlock->pxd, xlen);
+		txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
+	}
+
+	/*
+	 * free xtree/data (truncate to zero length):
+	 * free xtree/data pages from cache, and
+	 * free xtree/data blocks from working block map;
+	 */
+	if (ip->i_size)
+		rc = xtTruncate(0, ip, 0, COMMIT_WMAP);
+
+	return rc;
+}
+
+/*
+ * NAME:	jfs_link(vp, dvp, name, crp)
+ *
+ * FUNCTION:	create a link to <vp> by the name = <name>
+ *		in the parent directory <dvp>
+ *
+ * PARAMETER:	vp 	- target object
+ *		dvp	- parent directory of new link
+ *		name	- name of new link to target object
+ *		crp	- credential
+ *
+ * RETURN:	Errors from subroutines
+ *
+ * note:
+ * JFS does NOT support link() on directories (to prevent circular
+ * path in the directory hierarchy);
+ * EPERM: the target object is a directory, and either the caller
+ * does not have appropriate privileges or the implementation prohibits
+ * using link() on directories [XPG4.2].
+ *
+ * JFS does NOT support links between file systems:
+ * EXDEV: target object and new link are on different file systems and
+ * implementation does not support links between file systems [XPG4.2].
+ */
+static int jfs_link(struct dentry *old_dentry,
+	     struct inode *dir, struct dentry *dentry)
+{
+	int rc;
+	tid_t tid;
+	struct inode *ip = old_dentry->d_inode;
+	ino_t ino;
+	struct component_name dname;
+	struct btstack btstack;
+	struct inode *iplist[2];
+
+	jfs_info("jfs_link: %s %s", old_dentry->d_name.name,
+		 dentry->d_name.name);
+
+	if (ip->i_nlink == JFS_LINK_MAX)
+		return -EMLINK;
+
+	if (ip->i_nlink == 0)
+		return -ENOENT;
+
+	tid = txBegin(ip->i_sb, 0);
+
+	down(&JFS_IP(dir)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	/*
+	 * scan parent directory for entry/freespace
+	 */
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out;
+
+	if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
+		goto free_dname;
+
+	/*
+	 * create entry for new link in parent directory
+	 */
+	ino = ip->i_ino;
+	if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack)))
+		goto free_dname;
+
+	/* update object inode */
+	ip->i_nlink++;		/* for new link */
+	ip->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(dir);
+	atomic_inc(&ip->i_count);
+
+	iplist[0] = ip;
+	iplist[1] = dir;
+	rc = txCommit(tid, 2, &iplist[0], 0);
+
+	if (rc) {
+		ip->i_nlink--;
+		iput(ip);
+	} else
+		d_instantiate(dentry, ip);
+
+      free_dname:
+	free_UCSname(&dname);
+
+      out:
+	txEnd(tid);
+
+	up(&JFS_IP(dir)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+
+	jfs_info("jfs_link: rc:%d", rc);
+	return rc;
+}
+
+/*
+ * NAME:	jfs_symlink(dip, dentry, name)
+ *
+ * FUNCTION:	creates a symbolic link to <symlink> by name <name>
+ *		        in directory <dip>
+ *
+ * PARAMETER:	dip	    - parent directory vnode
+ *		        dentry 	- dentry of symbolic link
+ *		        name    - the path name of the existing object 
+ *			              that will be the source of the link
+ *
+ * RETURN:	errors from subroutines
+ *
+ * note:
+ * ENAMETOOLONG: pathname resolution of a symbolic link produced
+ * an intermediate result whose length exceeds PATH_MAX [XPG4.2]
+*/
+
+static int jfs_symlink(struct inode *dip, struct dentry *dentry,
+		const char *name)
+{
+	int rc;
+	tid_t tid;
+	ino_t ino = 0;
+	struct component_name dname;
+	int ssize;		/* source pathname size */
+	struct btstack btstack;
+	struct inode *ip = dentry->d_inode;
+	unchar *i_fastsymlink;
+	s64 xlen = 0;
+	int bmask = 0, xsize;
+	s64 extent = 0, xaddr;
+	struct metapage *mp;
+	struct super_block *sb;
+	struct tblock *tblk;
+
+	struct inode *iplist[2];
+
+	jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
+
+	ssize = strlen(name) + 1;
+
+	/*
+	 * search parent directory for entry/freespace
+	 * (dtSearch() returns parent directory page pinned)
+	 */
+
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out1;
+
+	/*
+	 * allocate on-disk/in-memory inode for symbolic link:
+	 * (iAlloc() returns new, locked inode)
+	 */
+	ip = ialloc(dip, S_IFLNK | 0777);
+	if (ip == NULL) {
+		rc = -ENOSPC;
+		goto out2;
+	}
+
+	tid = txBegin(dip->i_sb, 0);
+
+	down(&JFS_IP(dip)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_CREATE;
+	tblk->ino = ip->i_ino;
+	tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
+
+	/* fix symlink access permission
+	 * (dir_create() ANDs in the u.u_cmask, 
+	 * but symlinks really need to be 777 access)
+	 */
+	ip->i_mode |= 0777;
+
+	/*
+	 * write symbolic link target path name
+	 */
+	xtInitRoot(tid, ip);
+
+	/*
+	 * write source path name inline in on-disk inode (fast symbolic link)
+	 */
+
+	if (ssize <= IDATASIZE) {
+		ip->i_op = &jfs_symlink_inode_operations;
+
+		i_fastsymlink = JFS_IP(ip)->i_inline;
+		memcpy(i_fastsymlink, name, ssize);
+		ip->i_size = ssize - 1;
+
+		/*
+		 * if symlink is > 128 bytes, we don't have the space to
+		 * store inline extended attributes
+		 */
+		if (ssize > sizeof (JFS_IP(ip)->i_inline))
+			JFS_IP(ip)->mode2 &= ~INLINEEA;
+
+		jfs_info("jfs_symlink: fast symlink added  ssize:%d name:%s ",
+			 ssize, name);
+	}
+	/*
+	 * write source path name in a single extent
+	 */
+	else {
+		jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
+
+		ip->i_op = &page_symlink_inode_operations;
+		ip->i_mapping->a_ops = &jfs_aops;
+
+		/*
+		 * even though the data of symlink object (source 
+		 * path name) is treated as non-journaled user data,
+		 * it is read/written thru buffer cache for performance.
+		 */
+		sb = ip->i_sb;
+		bmask = JFS_SBI(sb)->bsize - 1;
+		xsize = (ssize + bmask) & ~bmask;
+		xaddr = 0;
+		xlen = xsize >> JFS_SBI(sb)->l2bsize;
+		if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) {
+			txAbort(tid, 0);
+			rc = -ENOSPC;
+			goto out3;
+		}
+		extent = xaddr;
+		ip->i_size = ssize - 1;
+		while (ssize) {
+			/* This is kind of silly since PATH_MAX == 4K */
+			int copy_size = min(ssize, PSIZE);
+
+			mp = get_metapage(ip, xaddr, PSIZE, 1);
+
+			if (mp == NULL) {
+				xtTruncate(tid, ip, 0, COMMIT_PWMAP);
+				rc = -EIO;
+				txAbort(tid, 0);
+				goto out3;
+			}
+			memcpy(mp->data, name, copy_size);
+			flush_metapage(mp);
+			ssize -= copy_size;
+			name += copy_size;
+			xaddr += JFS_SBI(sb)->nbperpage;
+		}
+	}
+
+	/*
+	 * create entry for symbolic link in parent directory
+	 */
+	rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE);
+	if (rc == 0) {
+		ino = ip->i_ino;
+		rc = dtInsert(tid, dip, &dname, &ino, &btstack);
+	}
+	if (rc) {
+		if (xlen)
+			xtTruncate(tid, ip, 0, COMMIT_PWMAP);
+		txAbort(tid, 0);
+		/* discard new inode */
+		goto out3;
+	}
+
+	insert_inode_hash(ip);
+	mark_inode_dirty(ip);
+
+	/*
+	 * commit update of parent directory and link object
+	 */
+
+	iplist[0] = dip;
+	iplist[1] = ip;
+	rc = txCommit(tid, 2, &iplist[0], 0);
+
+      out3:
+	txEnd(tid);
+	up(&JFS_IP(dip)->commit_sem);
+	up(&JFS_IP(ip)->commit_sem);
+	if (rc) {
+		ip->i_nlink = 0;
+		iput(ip);
+	} else
+		d_instantiate(dentry, ip);
+
+      out2:
+	free_UCSname(&dname);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	if (rc == 0)
+		jfs_init_acl(ip, dip);
+#endif
+
+      out1:
+	jfs_info("jfs_symlink: rc:%d", rc);
+	return rc;
+}
+
+
+/*
+ * NAME:        jfs_rename
+ *
+ * FUNCTION:    rename a file or directory
+ */
+static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+	       struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct btstack btstack;
+	ino_t ino;
+	struct component_name new_dname;
+	struct inode *new_ip;
+	struct component_name old_dname;
+	struct inode *old_ip;
+	int rc;
+	tid_t tid;
+	struct tlock *tlck;
+	struct dt_lock *dtlck;
+	struct lv *lv;
+	int ipcount;
+	struct inode *iplist[4];
+	struct tblock *tblk;
+	s64 new_size = 0;
+	int commit_flag;
+
+
+	jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
+		 new_dentry->d_name.name);
+
+	old_ip = old_dentry->d_inode;
+	new_ip = new_dentry->d_inode;
+
+	if ((rc = get_UCSname(&old_dname, old_dentry)))
+		goto out1;
+
+	if ((rc = get_UCSname(&new_dname, new_dentry)))
+		goto out2;
+
+	/*
+	 * Make sure source inode number is what we think it is
+	 */
+	rc = dtSearch(old_dir, &old_dname, &ino, &btstack, JFS_LOOKUP);
+	if (rc || (ino != old_ip->i_ino)) {
+		rc = -ENOENT;
+		goto out3;
+	}
+
+	/*
+	 * Make sure dest inode number (if any) is what we think it is
+	 */
+	rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_LOOKUP);
+	if (rc == 0) {
+		if ((new_ip == 0) || (ino != new_ip->i_ino)) {
+			rc = -ESTALE;
+			goto out3;
+		}
+	} else if (rc != -ENOENT)
+		goto out3;
+	else if (new_ip) {
+		/* no entry exists, but one was expected */
+		rc = -ESTALE;
+		goto out3;
+	}
+
+	if (S_ISDIR(old_ip->i_mode)) {
+		if (new_ip) {
+			if (!dtEmpty(new_ip)) {
+				rc = -ENOTEMPTY;
+				goto out3;
+			}
+		} else if ((new_dir != old_dir) &&
+			   (new_dir->i_nlink == JFS_LINK_MAX)) {
+			rc = -EMLINK;
+			goto out3;
+		}
+	} else if (new_ip) {
+		IWRITE_LOCK(new_ip);
+		/* Init inode for quota operations. */
+		DQUOT_INIT(new_ip);
+	}
+
+	/*
+	 * The real work starts here
+	 */
+	tid = txBegin(new_dir->i_sb, 0);
+
+	down(&JFS_IP(new_dir)->commit_sem);
+	down(&JFS_IP(old_ip)->commit_sem);
+	if (old_dir != new_dir)
+		down(&JFS_IP(old_dir)->commit_sem);
+
+	if (new_ip) {
+		down(&JFS_IP(new_ip)->commit_sem);
+		/*
+		 * Change existing directory entry to new inode number
+		 */
+		ino = new_ip->i_ino;
+		rc = dtModify(tid, new_dir, &new_dname, &ino,
+			      old_ip->i_ino, JFS_RENAME);
+		if (rc)
+			goto out4;
+		new_ip->i_nlink--;
+		if (S_ISDIR(new_ip->i_mode)) {
+			new_ip->i_nlink--;
+			if (new_ip->i_nlink) {
+				up(&JFS_IP(new_dir)->commit_sem);
+				up(&JFS_IP(old_ip)->commit_sem);
+				if (old_dir != new_dir)
+					up(&JFS_IP(old_dir)->commit_sem);
+				if (!S_ISDIR(old_ip->i_mode) && new_ip)
+					IWRITE_UNLOCK(new_ip);
+				jfs_error(new_ip->i_sb,
+					  "jfs_rename: new_ip->i_nlink != 0");
+				return -EIO;
+			}
+			tblk = tid_to_tblock(tid);
+			tblk->xflag |= COMMIT_DELETE;
+			tblk->u.ip = new_ip;
+		} else if (new_ip->i_nlink == 0) {
+			assert(!test_cflag(COMMIT_Nolink, new_ip));
+			/* free block resources */
+			if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
+				txAbort(tid, 1);	/* Marks FS Dirty */
+				rc = new_size;		
+				goto out4;
+			}
+			tblk = tid_to_tblock(tid);
+			tblk->xflag |= COMMIT_DELETE;
+			tblk->u.ip = new_ip;
+		} else {
+			new_ip->i_ctime = CURRENT_TIME;
+			mark_inode_dirty(new_ip);
+		}
+	} else {
+		/*
+		 * Add new directory entry
+		 */
+		rc = dtSearch(new_dir, &new_dname, &ino, &btstack,
+			      JFS_CREATE);
+		if (rc) {
+			jfs_err("jfs_rename didn't expect dtSearch to fail "
+				"w/rc = %d", rc);
+			goto out4;
+		}
+
+		ino = old_ip->i_ino;
+		rc = dtInsert(tid, new_dir, &new_dname, &ino, &btstack);
+		if (rc) {
+			if (rc == -EIO)
+				jfs_err("jfs_rename: dtInsert returned -EIO");
+			goto out4;
+		}
+		if (S_ISDIR(old_ip->i_mode))
+			new_dir->i_nlink++;
+	}
+	/*
+	 * Remove old directory entry
+	 */
+
+	ino = old_ip->i_ino;
+	rc = dtDelete(tid, old_dir, &old_dname, &ino, JFS_REMOVE);
+	if (rc) {
+		jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
+			rc);
+		txAbort(tid, 1);	/* Marks Filesystem dirty */
+		goto out4;
+	}
+	if (S_ISDIR(old_ip->i_mode)) {
+		old_dir->i_nlink--;
+		if (old_dir != new_dir) {
+			/*
+			 * Change inode number of parent for moved directory
+			 */
+
+			JFS_IP(old_ip)->i_dtroot.header.idotdot =
+				cpu_to_le32(new_dir->i_ino);
+
+			/* Linelock header of dtree */
+			tlck = txLock(tid, old_ip,
+				    (struct metapage *) &JFS_IP(old_ip)->bxflag,
+				      tlckDTREE | tlckBTROOT | tlckRELINK);
+			dtlck = (struct dt_lock *) & tlck->lock;
+			ASSERT(dtlck->index == 0);
+			lv = & dtlck->lv[0];
+			lv->offset = 0;
+			lv->length = 1;
+			dtlck->index++;
+		}
+	}
+
+	/*
+	 * Update ctime on changed/moved inodes & mark dirty
+	 */
+	old_ip->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(old_ip);
+
+	new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb);
+	mark_inode_dirty(new_dir);
+
+	/* Build list of inodes modified by this transaction */
+	ipcount = 0;
+	iplist[ipcount++] = old_ip;
+	if (new_ip)
+		iplist[ipcount++] = new_ip;
+	iplist[ipcount++] = old_dir;
+
+	if (old_dir != new_dir) {
+		iplist[ipcount++] = new_dir;
+		old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+		mark_inode_dirty(old_dir);
+	}
+
+	/*
+	 * Incomplete truncate of file data can
+	 * result in timing problems unless we synchronously commit the
+	 * transaction.
+	 */
+	if (new_size)
+		commit_flag = COMMIT_SYNC;
+	else
+		commit_flag = 0;
+
+	rc = txCommit(tid, ipcount, iplist, commit_flag);
+
+      out4:
+	txEnd(tid);
+
+	up(&JFS_IP(new_dir)->commit_sem);
+	up(&JFS_IP(old_ip)->commit_sem);
+	if (old_dir != new_dir)
+		up(&JFS_IP(old_dir)->commit_sem);
+	if (new_ip)
+		up(&JFS_IP(new_ip)->commit_sem);
+
+	while (new_size && (rc == 0)) {
+		tid = txBegin(new_ip->i_sb, 0);
+		down(&JFS_IP(new_ip)->commit_sem);
+		new_size = xtTruncate_pmap(tid, new_ip, new_size);
+		if (new_size < 0) {
+			txAbort(tid, 1);
+			rc = new_size;		
+		} else
+			rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC);
+		txEnd(tid);
+		up(&JFS_IP(new_ip)->commit_sem);
+	}
+	if (new_ip && (new_ip->i_nlink == 0))
+		set_cflag(COMMIT_Nolink, new_ip);
+      out3:
+	free_UCSname(&new_dname);
+      out2:
+	free_UCSname(&old_dname);
+      out1:
+	if (new_ip && !S_ISDIR(new_ip->i_mode))
+		IWRITE_UNLOCK(new_ip);
+	/*
+	 * Truncating the directory index table is not guaranteed.  It
+	 * may need to be done iteratively
+	 */
+	if (test_cflag(COMMIT_Stale, old_dir)) {
+		if (old_dir->i_size > 1)
+			jfs_truncate_nolock(old_dir, 0);
+
+		clear_cflag(COMMIT_Stale, old_dir);
+	}
+
+	jfs_info("jfs_rename: returning %d", rc);
+	return rc;
+}
+
+
+/*
+ * NAME:        jfs_mknod
+ *
+ * FUNCTION:    Create a special file (device)
+ */
+static int jfs_mknod(struct inode *dir, struct dentry *dentry,
+		int mode, dev_t rdev)
+{
+	struct jfs_inode_info *jfs_ip;
+	struct btstack btstack;
+	struct component_name dname;
+	ino_t ino;
+	struct inode *ip;
+	struct inode *iplist[2];
+	int rc;
+	tid_t tid;
+	struct tblock *tblk;
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	jfs_info("jfs_mknod: %s", dentry->d_name.name);
+
+	if ((rc = get_UCSname(&dname, dentry)))
+		goto out;
+
+	ip = ialloc(dir, mode);
+	if (ip == NULL) {
+		rc = -ENOSPC;
+		goto out1;
+	}
+	jfs_ip = JFS_IP(ip);
+
+	tid = txBegin(dir->i_sb, 0);
+
+	down(&JFS_IP(dir)->commit_sem);
+	down(&JFS_IP(ip)->commit_sem);
+
+	if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
+		goto out3;
+
+	tblk = tid_to_tblock(tid);
+	tblk->xflag |= COMMIT_CREATE;
+	tblk->ino = ip->i_ino;
+	tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
+
+	ino = ip->i_ino;
+	if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack)))
+		goto out3;
+
+	ip->i_op = &jfs_file_inode_operations;
+	jfs_ip->dev = new_encode_dev(rdev);
+	init_special_inode(ip, ip->i_mode, rdev);
+
+	insert_inode_hash(ip);
+	mark_inode_dirty(ip);
+
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+
+	mark_inode_dirty(dir);
+
+	iplist[0] = dir;
+	iplist[1] = ip;
+	rc = txCommit(tid, 2, iplist, 0);
+
+      out3:
+	txEnd(tid);
+	up(&JFS_IP(ip)->commit_sem);
+	up(&JFS_IP(dir)->commit_sem);
+	if (rc) {
+		ip->i_nlink = 0;
+		iput(ip);
+	} else
+		d_instantiate(dentry, ip);
+
+      out1:
+	free_UCSname(&dname);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	if (rc == 0)
+		jfs_init_acl(ip, dir);
+#endif
+
+      out:
+	jfs_info("jfs_mknod: returning %d", rc);
+	return rc;
+}
+
+static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struct nameidata *nd)
+{
+	struct btstack btstack;
+	ino_t inum;
+	struct inode *ip;
+	struct component_name key;
+	const char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	int rc;
+
+	jfs_info("jfs_lookup: name = %s", name);
+
+
+	if ((name[0] == '.') && (len == 1))
+		inum = dip->i_ino;
+	else if (strcmp(name, "..") == 0)
+		inum = PARENT(dip);
+	else {
+		if ((rc = get_UCSname(&key, dentry)))
+			return ERR_PTR(rc);
+		rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP);
+		free_UCSname(&key);
+		if (rc == -ENOENT) {
+			d_add(dentry, NULL);
+			return ERR_PTR(0);
+		} else if (rc) {
+			jfs_err("jfs_lookup: dtSearch returned %d", rc);
+			return ERR_PTR(rc);
+		}
+	}
+
+	ip = iget(dip->i_sb, inum);
+	if (ip == NULL || is_bad_inode(ip)) {
+		jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum);
+		if (ip)
+			iput(ip);
+		return ERR_PTR(-EACCES);
+	}
+
+	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
+		dentry->d_op = &jfs_ci_dentry_operations;
+
+	dentry = d_splice_alias(ip, dentry);
+
+	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
+		dentry->d_op = &jfs_ci_dentry_operations;
+
+	return dentry;
+}
+
+struct dentry *jfs_get_parent(struct dentry *dentry)
+{
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct dentry *parent = ERR_PTR(-ENOENT);
+	struct inode *inode;
+	unsigned long parent_ino;
+
+	parent_ino =
+		le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot);
+	inode = iget(sb, parent_ino);
+	if (inode) {
+		if (is_bad_inode(inode)) {
+			iput(inode);
+			parent = ERR_PTR(-EACCES);
+		} else {
+			parent = d_alloc_anon(inode);
+			if (!parent) {
+				parent = ERR_PTR(-ENOMEM);
+				iput(inode);
+			}
+		}
+	}
+
+	return parent;
+}
+
+struct inode_operations jfs_dir_inode_operations = {
+	.create		= jfs_create,
+	.lookup		= jfs_lookup,
+	.link		= jfs_link,
+	.unlink		= jfs_unlink,
+	.symlink	= jfs_symlink,
+	.mkdir		= jfs_mkdir,
+	.rmdir		= jfs_rmdir,
+	.mknod		= jfs_mknod,
+	.rename		= jfs_rename,
+	.setxattr	= jfs_setxattr,
+	.getxattr	= jfs_getxattr,
+	.listxattr	= jfs_listxattr,
+	.removexattr	= jfs_removexattr,
+#ifdef CONFIG_JFS_POSIX_ACL
+	.setattr	= jfs_setattr,
+	.permission	= jfs_permission,
+#endif
+};
+
+struct file_operations jfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= jfs_readdir,
+	.fsync		= jfs_fsync,
+};
+
+static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
+{
+	unsigned long hash;
+	int i;
+
+	hash = init_name_hash();
+	for (i=0; i < this->len; i++)
+		hash = partial_name_hash(tolower(this->name[i]), hash);
+	this->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+static int jfs_ci_compare(struct dentry *dir, struct qstr *a, struct qstr *b)
+{
+	int i, result = 1;
+
+	if (a->len != b->len)
+		goto out;
+	for (i=0; i < a->len; i++) {
+		if (tolower(a->name[i]) != tolower(b->name[i]))
+			goto out;
+	}
+	result = 0;
+
+	/*
+	 * We want creates to preserve case.  A negative dentry, a, that
+	 * has a different case than b may cause a new entry to be created
+	 * with the wrong case.  Since we can't tell if a comes from a negative
+	 * dentry, we blindly replace it with b.  This should be harmless if
+	 * a is not a negative dentry.
+	 */
+	memcpy((unsigned char *)a->name, b->name, a->len);
+out:
+	return result;
+}
+
+struct dentry_operations jfs_ci_dentry_operations =
+{
+	.d_hash = jfs_ci_hash,
+	.d_compare = jfs_ci_compare,
+};
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
new file mode 100644
index 0000000..2eb6869
--- /dev/null
+++ b/fs/jfs/resize.c
@@ -0,0 +1,537 @@
+/*
+ *   Copyright (C) International Business Machines  Corp., 2000-2004
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_dinode.h"
+#include "jfs_imap.h"
+#include "jfs_dmap.h"
+#include "jfs_superblock.h"
+#include "jfs_txnmgr.h"
+#include "jfs_debug.h"
+
+#define BITSPERPAGE     (PSIZE << 3)
+#define L2MEGABYTE      20
+#define MEGABYTE        (1 << L2MEGABYTE)
+#define MEGABYTE32     (MEGABYTE << 5)
+
+/* convert block number to bmap file page number */
+#define BLKTODMAPN(b)\
+        (((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
+
+/*
+ *      jfs_extendfs()
+ *
+ * function: extend file system;
+ *
+ *   |-------------------------------|----------|----------|
+ *   file system space               fsck       inline log
+ *                                   workspace  space
+ *
+ * input:
+ *      new LVSize: in LV blocks (required)
+ *      new LogSize: in LV blocks (optional)
+ *      new FSSize: in LV blocks (optional)
+ *
+ * new configuration:
+ * 1. set new LogSize as specified or default from new LVSize;
+ * 2. compute new FSCKSize from new LVSize;
+ * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
+ *    assert(new FSSize >= old FSSize),
+ *    i.e., file system must not be shrinked;
+ */
+int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
+{
+	int rc = 0;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct inode *ipbmap = sbi->ipbmap;
+	struct inode *ipbmap2;
+	struct inode *ipimap = sbi->ipimap;
+	struct jfs_log *log = sbi->log;
+	struct bmap *bmp = sbi->bmap;
+	s64 newLogAddress, newFSCKAddress;
+	int newFSCKSize;
+	s64 newMapSize = 0, mapSize;
+	s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
+	s64 oldLVSize;
+	s64 newFSSize;
+	s64 VolumeSize;
+	int newNpages = 0, nPages, newPage, xlen, t32;
+	int tid;
+	int log_formatted = 0;
+	struct inode *iplist[1];
+	struct jfs_superblock *j_sb, *j_sb2;
+	uint old_agsize;
+	struct buffer_head *bh, *bh2;
+
+	/* If the volume hasn't grown, get out now */
+
+	if (sbi->mntflag & JFS_INLINELOG)
+		oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
+	else
+		oldLVSize = addressPXD(&sbi->fsckpxd) +
+		    lengthPXD(&sbi->fsckpxd);
+
+	if (oldLVSize >= newLVSize) {
+		printk(KERN_WARNING
+		       "jfs_extendfs: volume hasn't grown, returning\n");
+		goto out;
+	}
+
+	VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+
+	if (VolumeSize) {
+		if (newLVSize > VolumeSize) {
+			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		/* check the device */
+		bh = sb_bread(sb, newLVSize - 1);
+		if (!bh) {
+			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
+			rc = -EINVAL;
+			goto out;
+		}
+		bforget(bh);
+	}
+
+	/* Can't extend write-protected drive */
+
+	if (isReadOnly(ipbmap)) {
+		printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
+		rc = -EROFS;
+		goto out;
+	}
+
+	/*
+	 *      reconfigure LV spaces
+	 *      ---------------------
+	 *
+	 * validate new size, or, if not specified, determine new size
+	 */
+
+	/*
+	 * reconfigure inline log space:
+	 */
+	if ((sbi->mntflag & JFS_INLINELOG)) {
+		if (newLogSize == 0) {
+			/*
+			 * no size specified: default to 1/256 of aggregate
+			 * size; rounded up to a megabyte boundary;
+			 */
+			newLogSize = newLVSize >> 8;
+			t32 = (1 << (20 - sbi->l2bsize)) - 1;
+			newLogSize = (newLogSize + t32) & ~t32;
+			newLogSize =
+			    min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
+		} else {
+			/*
+			 * convert the newLogSize to fs blocks.
+			 *
+			 * Since this is given in megabytes, it will always be
+			 * an even number of pages.
+			 */
+			newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize;
+		}
+
+	} else
+		newLogSize = 0;
+
+	newLogAddress = newLVSize - newLogSize;
+
+	/*
+	 * reconfigure fsck work space:
+	 *
+	 * configure it to the end of the logical volume regardless of
+	 * whether file system extends to the end of the aggregate;
+	 * Need enough 4k pages to cover:
+	 *  - 1 bit per block in aggregate rounded up to BPERDMAP boundary
+	 *  - 1 extra page to handle control page and intermediate level pages
+	 *  - 50 extra pages for the chkdsk service log
+	 */
+	t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP)
+	    << L2BPERDMAP;
+	t32 = ((t64 + (BITSPERPAGE - 1)) / BITSPERPAGE) + 1 + 50;
+	newFSCKSize = t32 << sbi->l2nbperpage;
+	newFSCKAddress = newLogAddress - newFSCKSize;
+
+	/*
+	 * compute new file system space;
+	 */
+	newFSSize = newLVSize - newLogSize - newFSCKSize;
+
+	/* file system cannot be shrinked */
+	if (newFSSize < bmp->db_mapsize) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * If we're expanding enough that the inline log does not overlap
+	 * the old one, we can format the new log before we quiesce the
+	 * filesystem.
+	 */
+	if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) {
+		if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
+			goto out;
+		log_formatted = 1;
+	}
+	/*
+	 *      quiesce file system
+	 *
+	 * (prepare to move the inline log and to prevent map update)
+	 *
+	 * block any new transactions and wait for completion of
+	 * all wip transactions and flush modified pages s.t.
+	 * on-disk file system is in consistent state and
+	 * log is not required for recovery.
+	 */
+	txQuiesce(sb);
+
+	if (sbi->mntflag & JFS_INLINELOG) {
+		/*
+		 * deactivate old inline log
+		 */
+		lmLogShutdown(log);
+
+		/*
+		 * mark on-disk super block for fs in transition;
+		 *
+		 * update on-disk superblock for the new space configuration
+		 * of inline log space and fsck work space descriptors:
+		 * N.B. FS descriptor is NOT updated;
+		 *
+		 * crash recovery:
+		 * logredo(): if FM_EXTENDFS, return to fsck() for cleanup;
+		 * fsck(): if FM_EXTENDFS, reformat inline log and fsck
+		 * workspace from superblock inline log descriptor and fsck
+		 * workspace descriptor;
+		 */
+
+		/* read in superblock */
+		if ((rc = readSuper(sb, &bh)))
+			goto error_out;
+		j_sb = (struct jfs_superblock *)bh->b_data;
+
+		/* mark extendfs() in progress */
+		j_sb->s_state |= cpu_to_le32(FM_EXTENDFS);
+		j_sb->s_xsize = cpu_to_le64(newFSSize);
+		PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress);
+		PXDlength(&j_sb->s_xfsckpxd, newFSCKSize);
+		PXDaddress(&j_sb->s_xlogpxd, newLogAddress);
+		PXDlength(&j_sb->s_xlogpxd, newLogSize);
+
+		/* synchronously update superblock */
+		mark_buffer_dirty(bh);
+		sync_dirty_buffer(bh);
+		brelse(bh);
+
+		/*
+		 * format new inline log synchronously;
+		 *
+		 * crash recovery: if log move in progress,
+		 * reformat log and exit success;
+		 */
+		if (!log_formatted)
+			if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
+				goto error_out;
+
+		/*
+		 * activate new log
+		 */
+		log->base = newLogAddress;
+		log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits);
+		if ((rc = lmLogInit(log)))
+			goto error_out;
+	}
+
+	/*
+	 *      extend block allocation map
+	 *      ---------------------------
+	 *
+	 * extendfs() for new extension, retry after crash recovery;
+	 *
+	 * note: both logredo() and fsck() rebuild map from
+	 * the bitmap and configuration parameter from superblock
+	 * (disregarding all other control information in the map);
+	 *
+	 * superblock:
+	 *  s_size: aggregate size in physical blocks;
+	 */
+	/*
+	 *      compute the new block allocation map configuration
+	 *
+	 * map dinode:
+	 *  di_size: map file size in byte;
+	 *  di_nblocks: number of blocks allocated for map file;
+	 *  di_mapsize: number of blocks in aggregate (covered by map);
+	 * map control page:
+	 *  db_mapsize: number of blocks in aggregate (covered by map);
+	 */
+	newMapSize = newFSSize;
+	/* number of data pages of new bmap file:
+	 * roundup new size to full dmap page boundary and
+	 * add 1 extra dmap page for next extendfs()
+	 */
+	t64 = (newMapSize - 1) + BPERDMAP;
+	newNpages = BLKTODMAPN(t64) + 1;
+
+	/*
+	 *      extend map from current map (WITHOUT growing mapfile)
+	 *
+	 * map new extension with unmapped part of the last partial
+	 * dmap page, if applicable, and extra page(s) allocated
+	 * at end of bmap by mkfs() or previous extendfs();
+	 */
+      extendBmap:
+	/* compute number of blocks requested to extend */
+	mapSize = bmp->db_mapsize;
+	XAddress = mapSize;	/* eXtension Address */
+	XSize = newMapSize - mapSize;	/* eXtension Size */
+	old_agsize = bmp->db_agsize;	/* We need to know if this changes */
+
+	/* compute number of blocks that can be extended by current mapfile */
+	t64 = dbMapFileSizeToMapSize(ipbmap);
+	if (mapSize > t64) {
+		printk(KERN_ERR "jfs_extendfs: mapSize (0x%Lx) > t64 (0x%Lx)\n",
+		       (long long) mapSize, (long long) t64);
+		rc = -EIO;
+		goto error_out;
+	}
+	nblocks = min(t64 - mapSize, XSize);
+
+	/*
+	 * update map pages for new extension:
+	 *
+	 * update/init dmap and bubble up the control hierarchy
+	 * incrementally fold up dmaps into upper levels;
+	 * update bmap control page;
+	 */
+	if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
+		goto error_out;
+	/*
+	 * the map now has extended to cover additional nblocks:
+	 * dn_mapsize = oldMapsize + nblocks;
+	 */
+	/* ipbmap->i_mapsize += nblocks; */
+	XSize -= nblocks;
+
+	/*
+	 *      grow map file to cover remaining extension
+	 *      and/or one extra dmap page for next extendfs();
+	 *
+	 * allocate new map pages and its backing blocks, and
+	 * update map file xtree
+	 */
+	/* compute number of data pages of current bmap file */
+	nPages = ipbmap->i_size >> L2PSIZE;
+
+	/* need to grow map file ? */
+	if (nPages == newNpages)
+		goto finalizeBmap;
+
+	/*
+	 * grow bmap file for the new map pages required:
+	 *
+	 * allocate growth at the start of newly extended region;
+	 * bmap file only grows sequentially, i.e., both data pages
+	 * and possibly xtree index pages may grow in append mode,
+	 * s.t. logredo() can reconstruct pre-extension state
+	 * by washing away bmap file of pages outside s_size boundary;
+	 */
+	/*
+	 * journal map file growth as if a regular file growth:
+	 * (note: bmap is created with di_mode = IFJOURNAL|IFREG);
+	 *
+	 * journaling of bmap file growth is not required since
+	 * logredo() do/can not use log records of bmap file growth
+	 * but it provides careful write semantics, pmap update, etc.;
+	 */
+	/* synchronous write of data pages: bmap data pages are
+	 * cached in meta-data cache, and not written out
+	 * by txCommit();
+	 */
+	filemap_fdatawait(ipbmap->i_mapping);
+	filemap_fdatawrite(ipbmap->i_mapping);
+	filemap_fdatawait(ipbmap->i_mapping);
+	diWriteSpecial(ipbmap, 0);
+
+	newPage = nPages;	/* first new page number */
+	xoff = newPage << sbi->l2nbperpage;
+	xlen = (newNpages - nPages) << sbi->l2nbperpage;
+	xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1);
+	xaddr = XAddress;
+
+	tid = txBegin(sb, COMMIT_FORCE);
+
+	if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) {
+		txEnd(tid);
+		goto error_out;
+	}
+	/* update bmap file size */
+	ipbmap->i_size += xlen << sbi->l2bsize;
+	inode_add_bytes(ipbmap, xlen << sbi->l2bsize);
+
+	iplist[0] = ipbmap;
+	rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
+
+	txEnd(tid);
+
+	if (rc)
+		goto error_out;
+
+	/*
+	 * map file has been grown now to cover extension to further out;
+	 * di_size = new map file size;
+	 *
+	 * if huge extension, the previous extension based on previous
+	 * map file size may not have been sufficient to cover whole extension
+	 * (it could have been used up for new map pages),
+	 * but the newly grown map file now covers lot bigger new free space
+	 * available for further extension of map;
+	 */
+	/* any more blocks to extend ? */
+	if (XSize)
+		goto extendBmap;
+
+      finalizeBmap:
+	/* finalize bmap */
+	dbFinalizeBmap(ipbmap);
+
+	/*
+	 *      update inode allocation map
+	 *      ---------------------------
+	 *
+	 * move iag lists from old to new iag;
+	 * agstart field is not updated for logredo() to reconstruct
+	 * iag lists if system crash occurs.
+	 * (computation of ag number from agstart based on agsize
+	 * will correctly identify the new ag);
+	 */
+	/* if new AG size the same as old AG size, done! */
+	if (bmp->db_agsize != old_agsize) {
+		if ((rc = diExtendFS(ipimap, ipbmap)))
+			goto error_out;
+
+		/* finalize imap */
+		if ((rc = diSync(ipimap)))
+			goto error_out;
+	}
+
+	/*
+	 *      finalize
+	 *      --------
+	 *
+	 * extension is committed when on-disk super block is
+	 * updated with new descriptors: logredo will recover
+	 * crash before it to pre-extension state;
+	 */
+
+	/* sync log to skip log replay of bmap file growth transaction; */
+	/* lmLogSync(log, 1); */
+
+	/*
+	 * synchronous write bmap global control page;
+	 * for crash before completion of write
+	 * logredo() will recover to pre-extendfs state;
+	 * for crash after completion of write,
+	 * logredo() will recover post-extendfs state;
+	 */
+	if ((rc = dbSync(ipbmap)))
+		goto error_out;
+
+	/*
+	 * copy primary bmap inode to secondary bmap inode
+	 */
+
+	ipbmap2 = diReadSpecial(sb, BMAP_I, 1);
+	if (ipbmap2 == NULL) {
+		printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n");
+		goto error_out;
+	}
+	memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288);
+	ipbmap2->i_size = ipbmap->i_size;
+	ipbmap2->i_blocks = ipbmap->i_blocks;
+
+	diWriteSpecial(ipbmap2, 1);
+	diFreeSpecial(ipbmap2);
+
+	/*
+	 *      update superblock
+	 */
+	if ((rc = readSuper(sb, &bh)))
+		goto error_out;
+	j_sb = (struct jfs_superblock *)bh->b_data;
+
+	/* mark extendfs() completion */
+	j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS);
+	j_sb->s_size = cpu_to_le64(bmp->db_mapsize <<
+				   le16_to_cpu(j_sb->s_l2bfactor));
+	j_sb->s_agsize = cpu_to_le32(bmp->db_agsize);
+
+	/* update inline log space descriptor */
+	if (sbi->mntflag & JFS_INLINELOG) {
+		PXDaddress(&(j_sb->s_logpxd), newLogAddress);
+		PXDlength(&(j_sb->s_logpxd), newLogSize);
+	}
+
+	/* record log's mount serial number */
+	j_sb->s_logserial = cpu_to_le32(log->serial);
+
+	/* update fsck work space descriptor */
+	PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress);
+	PXDlength(&(j_sb->s_fsckpxd), newFSCKSize);
+	j_sb->s_fscklog = 1;
+	/* sb->s_fsckloglen remains the same */
+
+	/* Update secondary superblock */
+	bh2 = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
+	if (bh2) {
+		j_sb2 = (struct jfs_superblock *)bh2->b_data;
+		memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
+
+		mark_buffer_dirty(bh);
+		sync_dirty_buffer(bh2);
+		brelse(bh2);
+	}
+
+	/* write primary superblock */
+	mark_buffer_dirty(bh);
+	sync_dirty_buffer(bh);
+	brelse(bh);
+
+	goto resume;
+
+      error_out:
+	jfs_error(sb, "jfs_extendfs");
+
+      resume:
+	/*
+	 *      resume file system transactions
+	 */
+	txResume(sb);
+
+      out:
+	return rc;
+}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
new file mode 100644
index 0000000..5856866
--- /dev/null
+++ b/fs/jfs/super.c
@@ -0,0 +1,700 @@
+/*
+ *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/completion.h>
+#include <linux/vfs.h>
+#include <linux/moduleparam.h>
+#include <asm/uaccess.h>
+
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_imap.h"
+#include "jfs_acl.h"
+#include "jfs_debug.h"
+
+MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
+MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
+MODULE_LICENSE("GPL");
+
+static kmem_cache_t * jfs_inode_cachep;
+
+static struct super_operations jfs_super_operations;
+static struct export_operations jfs_export_operations;
+static struct file_system_type jfs_fs_type;
+
+#define MAX_COMMIT_THREADS 64
+static int commit_threads = 0;
+module_param(commit_threads, int, 0);
+MODULE_PARM_DESC(commit_threads, "Number of commit threads");
+
+int jfs_stop_threads;
+static pid_t jfsIOthread;
+static pid_t jfsCommitThread[MAX_COMMIT_THREADS];
+static pid_t jfsSyncThread;
+DECLARE_COMPLETION(jfsIOwait);
+
+#ifdef CONFIG_JFS_DEBUG
+int jfsloglevel = JFS_LOGLEVEL_WARN;
+module_param(jfsloglevel, int, 0644);
+MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
+#endif
+
+/*
+ * External declarations
+ */
+extern int jfs_mount(struct super_block *);
+extern int jfs_mount_rw(struct super_block *, int);
+extern int jfs_umount(struct super_block *);
+extern int jfs_umount_rw(struct super_block *);
+
+extern int jfsIOWait(void *);
+extern int jfs_lazycommit(void *);
+extern int jfs_sync(void *);
+
+extern void jfs_read_inode(struct inode *inode);
+extern void jfs_dirty_inode(struct inode *inode);
+extern void jfs_delete_inode(struct inode *inode);
+extern int jfs_write_inode(struct inode *inode, int wait);
+
+extern struct dentry *jfs_get_parent(struct dentry *dentry);
+extern int jfs_extendfs(struct super_block *, s64, int);
+
+extern struct dentry_operations jfs_ci_dentry_operations;
+
+#ifdef PROC_FS_JFS		/* see jfs_debug.h */
+extern void jfs_proc_init(void);
+extern void jfs_proc_clean(void);
+#endif
+
+extern wait_queue_head_t jfs_IO_thread_wait;
+extern wait_queue_head_t jfs_commit_thread_wait;
+extern wait_queue_head_t jfs_sync_thread_wait;
+
+static void jfs_handle_error(struct super_block *sb)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	updateSuper(sb, FM_DIRTY);
+
+	if (sbi->flag & JFS_ERR_PANIC)
+		panic("JFS (device %s): panic forced after error\n",
+			sb->s_id);
+	else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
+		jfs_err("ERROR: (device %s): remounting filesystem "
+			"as read-only\n",
+			sb->s_id);
+		sb->s_flags |= MS_RDONLY;
+	} 
+
+	/* nothing is done for continue beyond marking the superblock dirty */
+}
+
+void jfs_error(struct super_block *sb, const char * function, ...)
+{
+	static char error_buf[256];
+	va_list args;
+
+	va_start(args, function);
+	vsprintf(error_buf, function, args);
+	va_end(args);
+
+	printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
+
+	jfs_handle_error(sb);
+}
+
+static struct inode *jfs_alloc_inode(struct super_block *sb)
+{
+	struct jfs_inode_info *jfs_inode;
+
+	jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
+	if (!jfs_inode)
+		return NULL;
+	return &jfs_inode->vfs_inode;
+}
+
+static void jfs_destroy_inode(struct inode *inode)
+{
+	struct jfs_inode_info *ji = JFS_IP(inode);
+
+	spin_lock_irq(&ji->ag_lock);
+	if (ji->active_ag != -1) {
+		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
+		atomic_dec(&bmap->db_active[ji->active_ag]);
+		ji->active_ag = -1;
+	}
+	spin_unlock_irq(&ji->ag_lock);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	if (ji->i_acl != JFS_ACL_NOT_CACHED) {
+		posix_acl_release(ji->i_acl);
+		ji->i_acl = JFS_ACL_NOT_CACHED;
+	}
+	if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
+		posix_acl_release(ji->i_default_acl);
+		ji->i_default_acl = JFS_ACL_NOT_CACHED;
+	}
+#endif
+
+	kmem_cache_free(jfs_inode_cachep, ji);
+}
+
+static int jfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	s64 maxinodes;
+	struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
+
+	jfs_info("In jfs_statfs");
+	buf->f_type = JFS_SUPER_MAGIC;
+	buf->f_bsize = sbi->bsize;
+	buf->f_blocks = sbi->bmap->db_mapsize;
+	buf->f_bfree = sbi->bmap->db_nfree;
+	buf->f_bavail = sbi->bmap->db_nfree;
+	/*
+	 * If we really return the number of allocated & free inodes, some
+	 * applications will fail because they won't see enough free inodes.
+	 * We'll try to calculate some guess as to how may inodes we can
+	 * really allocate
+	 *
+	 * buf->f_files = atomic_read(&imap->im_numinos);
+	 * buf->f_ffree = atomic_read(&imap->im_numfree);
+	 */
+	maxinodes = min((s64) atomic_read(&imap->im_numinos) +
+			((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
+			 << L2INOSPEREXT), (s64) 0xffffffffLL);
+	buf->f_files = maxinodes;
+	buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
+				    atomic_read(&imap->im_numfree));
+
+	buf->f_namelen = JFS_NAME_MAX;
+	return 0;
+}
+
+static void jfs_put_super(struct super_block *sb)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	int rc;
+
+	jfs_info("In jfs_put_super");
+	rc = jfs_umount(sb);
+	if (rc)
+		jfs_err("jfs_umount failed with return code %d", rc);
+	if (sbi->nls_tab)
+		unload_nls(sbi->nls_tab);
+	sbi->nls_tab = NULL;
+
+	kfree(sbi);
+}
+
+enum {
+	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
+	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_integrity, "integrity"},
+	{Opt_nointegrity, "nointegrity"},
+	{Opt_iocharset, "iocharset=%s"},
+	{Opt_resize, "resize=%u"},
+	{Opt_resize_nosize, "resize"},
+	{Opt_errors, "errors=%s"},
+	{Opt_ignore, "noquota"},
+	{Opt_ignore, "quota"},
+	{Opt_ignore, "usrquota"},
+	{Opt_ignore, "grpquota"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
+			 int *flag)
+{
+	void *nls_map = (void *)-1;	/* -1: no change;  NULL: none */
+	char *p;
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+	*newLVSize = 0;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_integrity:
+			*flag &= ~JFS_NOINTEGRITY;
+			break;
+		case Opt_nointegrity:
+			*flag |= JFS_NOINTEGRITY;
+			break;
+		case Opt_ignore:
+			/* Silently ignore the quota options */
+			/* Don't do anything ;-) */
+			break;
+		case Opt_iocharset:
+			if (nls_map && nls_map != (void *) -1)
+				unload_nls(nls_map);
+			if (!strcmp(args[0].from, "none"))
+				nls_map = NULL;
+			else {
+				nls_map = load_nls(args[0].from);
+				if (!nls_map) {
+					printk(KERN_ERR
+					       "JFS: charset not found\n");
+					goto cleanup;
+				}
+			}
+			break;
+		case Opt_resize:
+		{
+			char *resize = args[0].from;
+			*newLVSize = simple_strtoull(resize, &resize, 0);
+			break;
+		}
+		case Opt_resize_nosize:
+		{
+			*newLVSize = sb->s_bdev->bd_inode->i_size >>
+				sb->s_blocksize_bits;
+			if (*newLVSize == 0)
+				printk(KERN_ERR
+				       "JFS: Cannot determine volume size\n");
+			break;
+		}
+		case Opt_errors:
+		{
+			char *errors = args[0].from;
+			if (!errors || !*errors)
+				goto cleanup;
+			if (!strcmp(errors, "continue")) {
+				*flag &= ~JFS_ERR_REMOUNT_RO;
+				*flag &= ~JFS_ERR_PANIC;
+				*flag |= JFS_ERR_CONTINUE;
+			} else if (!strcmp(errors, "remount-ro")) {
+				*flag &= ~JFS_ERR_CONTINUE;
+				*flag &= ~JFS_ERR_PANIC;
+				*flag |= JFS_ERR_REMOUNT_RO;
+			} else if (!strcmp(errors, "panic")) {
+				*flag &= ~JFS_ERR_CONTINUE;
+				*flag &= ~JFS_ERR_REMOUNT_RO;
+				*flag |= JFS_ERR_PANIC;
+			} else {
+				printk(KERN_ERR
+				       "JFS: %s is an invalid error handler\n",
+				       errors);
+				goto cleanup;
+			}
+			break;
+		}
+		default:
+			printk("jfs: Unrecognized mount option \"%s\" "
+					" or missing value\n", p);
+			goto cleanup;
+		}
+	}
+
+	if (nls_map != (void *) -1) {
+		/* Discard old (if remount) */
+		if (sbi->nls_tab)
+			unload_nls(sbi->nls_tab);
+		sbi->nls_tab = nls_map;
+	}
+	return 1;
+
+cleanup:
+	if (nls_map && nls_map != (void *) -1)
+		unload_nls(nls_map);
+	return 0;
+}
+
+static int jfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	s64 newLVSize = 0;
+	int rc = 0;
+	int flag = JFS_SBI(sb)->flag;
+
+	if (!parse_options(data, sb, &newLVSize, &flag)) {
+		return -EINVAL;
+	}
+	if (newLVSize) {
+		if (sb->s_flags & MS_RDONLY) {
+			printk(KERN_ERR
+		  "JFS: resize requires volume to be mounted read-write\n");
+			return -EROFS;
+		}
+		rc = jfs_extendfs(sb, newLVSize, 0);
+		if (rc)
+			return rc;
+	}
+
+	if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+		JFS_SBI(sb)->flag = flag;
+		return jfs_mount_rw(sb, 1);
+	}
+	if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
+		rc = jfs_umount_rw(sb);
+		JFS_SBI(sb)->flag = flag;
+		return rc;
+	}
+	if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
+		if (!(sb->s_flags & MS_RDONLY)) {
+			rc = jfs_umount_rw(sb);
+			if (rc)
+				return rc;
+			JFS_SBI(sb)->flag = flag;
+			return jfs_mount_rw(sb, 1);
+		}
+	JFS_SBI(sb)->flag = flag;
+
+	return 0;
+}
+
+static int jfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct jfs_sb_info *sbi;
+	struct inode *inode;
+	int rc;
+	s64 newLVSize = 0;
+	int flag;
+
+	jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
+
+	if (!new_valid_dev(sb->s_bdev->bd_dev))
+		return -EOVERFLOW;
+
+	sbi = kmalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOSPC;
+	memset(sbi, 0, sizeof (struct jfs_sb_info));
+	sb->s_fs_info = sbi;
+	sbi->sb = sb;
+
+	/* initialize the mount flag and determine the default error handler */
+	flag = JFS_ERR_REMOUNT_RO;
+
+	if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
+		kfree(sbi);
+		return -EINVAL;
+	}
+	sbi->flag = flag;
+
+#ifdef CONFIG_JFS_POSIX_ACL
+	sb->s_flags |= MS_POSIXACL;
+#endif
+
+	if (newLVSize) {
+		printk(KERN_ERR "resize option for remount only\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Initialize blocksize to 4K.
+	 */
+	sb_set_blocksize(sb, PSIZE);
+
+	/*
+	 * Set method vectors.
+	 */
+	sb->s_op = &jfs_super_operations;
+	sb->s_export_op = &jfs_export_operations;
+
+	rc = jfs_mount(sb);
+	if (rc) {
+		if (!silent) {
+			jfs_err("jfs_mount failed w/return code = %d", rc);
+		}
+		goto out_kfree;
+	}
+	if (sb->s_flags & MS_RDONLY)
+		sbi->log = NULL;
+	else {
+		rc = jfs_mount_rw(sb, 0);
+		if (rc) {
+			if (!silent) {
+				jfs_err("jfs_mount_rw failed, return code = %d",
+					rc);
+			}
+			goto out_no_rw;
+		}
+	}
+
+	sb->s_magic = JFS_SUPER_MAGIC;
+
+	inode = iget(sb, ROOT_I);
+	if (!inode || is_bad_inode(inode))
+		goto out_no_root;
+	sb->s_root = d_alloc_root(inode);
+	if (!sb->s_root)
+		goto out_no_root;
+
+	if (sbi->mntflag & JFS_OS2)
+		sb->s_root->d_op = &jfs_ci_dentry_operations;
+
+	/* logical blocks are represented by 40 bits in pxd_t, etc. */
+	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
+#if BITS_PER_LONG == 32
+	/*
+	 * Page cache is indexed by long.
+	 * I would use MAX_LFS_FILESIZE, but it's only half as big
+	 */
+	sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
+#endif
+	sb->s_time_gran = 1;
+	return 0;
+
+out_no_root:
+	jfs_err("jfs_read_super: get root inode failed");
+	if (inode)
+		iput(inode);
+
+out_no_rw:
+	rc = jfs_umount(sb);
+	if (rc) {
+		jfs_err("jfs_umount failed with return code %d", rc);
+	}
+out_kfree:
+	if (sbi->nls_tab)
+		unload_nls(sbi->nls_tab);
+	kfree(sbi);
+	return -EINVAL;
+}
+
+static void jfs_write_super_lockfs(struct super_block *sb)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_log *log = sbi->log;
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		txQuiesce(sb);
+		lmLogShutdown(log);
+		updateSuper(sb, FM_CLEAN);
+	}
+}
+
+static void jfs_unlockfs(struct super_block *sb)
+{
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	struct jfs_log *log = sbi->log;
+	int rc = 0;
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		updateSuper(sb, FM_MOUNT);
+		if ((rc = lmLogInit(log)))
+			jfs_err("jfs_unlock failed with return code %d", rc);
+		else
+			txResume(sb);
+	}
+}
+
+static struct super_block *jfs_get_sb(struct file_system_type *fs_type, 
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
+}
+
+static int jfs_sync_fs(struct super_block *sb, int wait)
+{
+	struct jfs_log *log = JFS_SBI(sb)->log;
+
+	/* log == NULL indicates read-only mount */
+	if (log)
+		jfs_flush_journal(log, wait);
+
+	return 0;
+}
+
+static struct super_operations jfs_super_operations = {
+	.alloc_inode	= jfs_alloc_inode,
+	.destroy_inode	= jfs_destroy_inode,
+	.read_inode	= jfs_read_inode,
+	.dirty_inode	= jfs_dirty_inode,
+	.write_inode	= jfs_write_inode,
+	.delete_inode	= jfs_delete_inode,
+	.put_super	= jfs_put_super,
+	.sync_fs	= jfs_sync_fs,
+	.write_super_lockfs = jfs_write_super_lockfs,
+	.unlockfs       = jfs_unlockfs,
+	.statfs		= jfs_statfs,
+	.remount_fs	= jfs_remount,
+};
+
+static struct export_operations jfs_export_operations = {
+	.get_parent	= jfs_get_parent,
+};
+
+static struct file_system_type jfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "jfs",
+	.get_sb		= jfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+extern int metapage_init(void);
+extern int txInit(void);
+extern void txExit(void);
+extern void metapage_exit(void);
+
+static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
+		INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
+		init_rwsem(&jfs_ip->rdwrlock);
+		init_MUTEX(&jfs_ip->commit_sem);
+		init_rwsem(&jfs_ip->xattr_sem);
+		spin_lock_init(&jfs_ip->ag_lock);
+		jfs_ip->active_ag = -1;
+#ifdef CONFIG_JFS_POSIX_ACL
+		jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
+		jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
+#endif
+		inode_init_once(&jfs_ip->vfs_inode);
+	}
+}
+
+static int __init init_jfs_fs(void)
+{
+	int i;
+	int rc;
+
+	jfs_inode_cachep =
+	    kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 
+			    SLAB_RECLAIM_ACCOUNT, init_once, NULL);
+	if (jfs_inode_cachep == NULL)
+		return -ENOMEM;
+
+	/*
+	 * Metapage initialization
+	 */
+	rc = metapage_init();
+	if (rc) {
+		jfs_err("metapage_init failed w/rc = %d", rc);
+		goto free_slab;
+	}
+
+	/*
+	 * Transaction Manager initialization
+	 */
+	rc = txInit();
+	if (rc) {
+		jfs_err("txInit failed w/rc = %d", rc);
+		goto free_metapage;
+	}
+
+	/*
+	 * I/O completion thread (endio)
+	 */
+	jfsIOthread = kernel_thread(jfsIOWait, NULL, CLONE_KERNEL);
+	if (jfsIOthread < 0) {
+		jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsIOthread);
+		goto end_txmngr;
+	}
+	wait_for_completion(&jfsIOwait);	/* Wait until thread starts */
+
+	if (commit_threads < 1)
+		commit_threads = num_online_cpus();
+	if (commit_threads > MAX_COMMIT_THREADS)
+		commit_threads = MAX_COMMIT_THREADS;
+
+	for (i = 0; i < commit_threads; i++) {
+		jfsCommitThread[i] = kernel_thread(jfs_lazycommit, NULL,
+						   CLONE_KERNEL);
+		if (jfsCommitThread[i] < 0) {
+			jfs_err("init_jfs_fs: fork failed w/rc = %d",
+				jfsCommitThread[i]);
+			commit_threads = i;
+			goto kill_committask;
+		}
+		/* Wait until thread starts */
+		wait_for_completion(&jfsIOwait);
+	}
+
+	jfsSyncThread = kernel_thread(jfs_sync, NULL, CLONE_KERNEL);
+	if (jfsSyncThread < 0) {
+		jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsSyncThread);
+		goto kill_committask;
+	}
+	wait_for_completion(&jfsIOwait);	/* Wait until thread starts */
+
+#ifdef PROC_FS_JFS
+	jfs_proc_init();
+#endif
+
+	return register_filesystem(&jfs_fs_type);
+
+kill_committask:
+	jfs_stop_threads = 1;
+	wake_up_all(&jfs_commit_thread_wait);
+	for (i = 0; i < commit_threads; i++)
+		wait_for_completion(&jfsIOwait);
+
+	wake_up(&jfs_IO_thread_wait);
+	wait_for_completion(&jfsIOwait);	/* Wait for thread exit */
+end_txmngr:
+	txExit();
+free_metapage:
+	metapage_exit();
+free_slab:
+	kmem_cache_destroy(jfs_inode_cachep);
+	return rc;
+}
+
+static void __exit exit_jfs_fs(void)
+{
+	int i;
+
+	jfs_info("exit_jfs_fs called");
+
+	jfs_stop_threads = 1;
+	txExit();
+	metapage_exit();
+	wake_up(&jfs_IO_thread_wait);
+	wait_for_completion(&jfsIOwait);	/* Wait until IO thread exits */
+	wake_up_all(&jfs_commit_thread_wait);
+	for (i = 0; i < commit_threads; i++)
+		wait_for_completion(&jfsIOwait);
+	wake_up(&jfs_sync_thread_wait);
+	wait_for_completion(&jfsIOwait);	/* Wait until Sync thread exits */
+#ifdef PROC_FS_JFS
+	jfs_proc_clean();
+#endif
+	unregister_filesystem(&jfs_fs_type);
+	kmem_cache_destroy(jfs_inode_cachep);
+}
+
+module_init(init_jfs_fs)
+module_exit(exit_jfs_fs)
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
new file mode 100644
index 0000000..ef4c07e
--- /dev/null
+++ b/fs/jfs/symlink.c
@@ -0,0 +1,39 @@
+/*
+ *   Copyright (c) Christoph Hellwig, 2001-2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include "jfs_incore.h"
+#include "jfs_xattr.h"
+
+static int jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = JFS_IP(dentry->d_inode)->i_inline;
+	nd_set_link(nd, s);
+	return 0;
+}
+
+struct inode_operations jfs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= jfs_follow_link,
+	.setxattr	= jfs_setxattr,
+	.getxattr	= jfs_getxattr,
+	.listxattr	= jfs_listxattr,
+	.removexattr	= jfs_removexattr,
+};
+
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
new file mode 100644
index 0000000..7a9ffd5
--- /dev/null
+++ b/fs/jfs/xattr.c
@@ -0,0 +1,1127 @@
+/*
+ *   Copyright (C) International Business Machines  Corp., 2000-2004
+ *   Copyright (C) Christoph Hellwig, 2002
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or 
+ *   (at your option) any later version.
+ * 
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/quotaops.h>
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_dmap.h"
+#include "jfs_debug.h"
+#include "jfs_dinode.h"
+#include "jfs_extent.h"
+#include "jfs_metapage.h"
+#include "jfs_xattr.h"
+#include "jfs_acl.h"
+
+/*
+ *	jfs_xattr.c: extended attribute service
+ *
+ * Overall design --
+ *
+ * Format:
+ *
+ *   Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
+ *   value) and a variable (0 or more) number of extended attribute
+ *   entries.  Each extended attribute entry (jfs_ea) is a <name,value> double
+ *   where <name> is constructed from a null-terminated ascii string
+ *   (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
+ *   (1 ... 65535 bytes).  The in-memory format is
+ *
+ *   0       1        2        4                4 + namelen + 1
+ *   +-------+--------+--------+----------------+-------------------+
+ *   | Flags | Name   | Value  | Name String \0 | Data . . . .      |
+ *   |       | Length | Length |                |                   |
+ *   +-------+--------+--------+----------------+-------------------+
+ *
+ *   A jfs_ea_list then is structured as
+ *
+ *   0            4                   4 + EA_SIZE(ea1)
+ *   +------------+-------------------+--------------------+-----
+ *   | Overall EA | First FEA Element | Second FEA Element | ..... 
+ *   | List Size  |                   |                    |
+ *   +------------+-------------------+--------------------+-----
+ *
+ *   On-disk:
+ *
+ *     FEALISTs are stored on disk using blocks allocated by dbAlloc() and
+ *     written directly. An EA list may be in-lined in the inode if there is
+ *     sufficient room available.
+ */
+
+struct ea_buffer {
+	int flag;		/* Indicates what storage xattr points to */
+	int max_size;		/* largest xattr that fits in current buffer */
+	dxd_t new_ea;		/* dxd to replace ea when modifying xattr */
+	struct metapage *mp;	/* metapage containing ea list */
+	struct jfs_ea_list *xattr;	/* buffer containing ea list */
+};
+
+/*
+ * ea_buffer.flag values
+ */
+#define EA_INLINE	0x0001
+#define EA_EXTENT	0x0002
+#define EA_NEW		0x0004
+#define EA_MALLOC	0x0008
+
+/* Namespaces */
+#define XATTR_SYSTEM_PREFIX "system."
+#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
+
+#define XATTR_USER_PREFIX "user."
+#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
+
+#define XATTR_OS2_PREFIX "os2."
+#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
+
+/* XATTR_SECURITY_PREFIX is defined in include/linux/xattr.h */
+#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
+
+/*
+ * These three routines are used to recognize on-disk extended attributes
+ * that are in a recognized namespace.  If the attribute is not recognized,
+ * "os2." is prepended to the name
+ */
+static inline int is_os2_xattr(struct jfs_ea *ea)
+{
+	/*
+	 * Check for "system."
+	 */
+	if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
+	    !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+		return FALSE;
+	/*
+	 * Check for "user."
+	 */
+	if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
+	    !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+		return FALSE;
+	/*
+	 * Check for "security."
+	 */
+	if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
+	    !strncmp(ea->name, XATTR_SECURITY_PREFIX,
+		     XATTR_SECURITY_PREFIX_LEN))
+		return FALSE;
+	/*
+	 * Check for "trusted."
+	 */
+	if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
+	    !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+		return FALSE;
+	/*
+	 * Add any other valid namespace prefixes here
+	 */
+
+	/*
+	 * We assume it's OS/2's flat namespace
+	 */
+	return TRUE;
+}
+
+static inline int name_size(struct jfs_ea *ea)
+{
+	if (is_os2_xattr(ea))
+		return ea->namelen + XATTR_OS2_PREFIX_LEN;
+	else
+		return ea->namelen;
+}
+
+static inline int copy_name(char *buffer, struct jfs_ea *ea)
+{
+	int len = ea->namelen;
+
+	if (is_os2_xattr(ea)) {
+		memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
+		buffer += XATTR_OS2_PREFIX_LEN;
+		len += XATTR_OS2_PREFIX_LEN;
+	}
+	memcpy(buffer, ea->name, ea->namelen);
+	buffer[ea->namelen] = 0;
+
+	return len;
+}
+
+/* Forward references */
+static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
+
+/*
+ * NAME: ea_write_inline
+ *                                                                    
+ * FUNCTION: Attempt to write an EA inline if area is available
+ *                                                                    
+ * PRE CONDITIONS:
+ *	Already verified that the specified EA is small enough to fit inline
+ *
+ * PARAMETERS:
+ *	ip	- Inode pointer
+ *	ealist	- EA list pointer
+ *	size	- size of ealist in bytes
+ *	ea	- dxd_t structure to be filled in with necessary EA information
+ *		  if we successfully copy the EA inline
+ *
+ * NOTES:
+ *	Checks if the inode's inline area is available.  If so, copies EA inline
+ *	and sets <ea> fields appropriately.  Otherwise, returns failure, EA will
+ *	have to be put into an extent.
+ *
+ * RETURNS: 0 for successful copy to inline area; -1 if area not available
+ */
+static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
+			   int size, dxd_t * ea)
+{
+	struct jfs_inode_info *ji = JFS_IP(ip);
+
+	/*
+	 * Make sure we have an EA -- the NULL EA list is valid, but you
+	 * can't copy it!
+	 */
+	if (ealist && size > sizeof (struct jfs_ea_list)) {
+		assert(size <= sizeof (ji->i_inline_ea));
+
+		/*
+		 * See if the space is available or if it is already being
+		 * used for an inline EA.
+		 */
+		if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
+			return -EPERM;
+
+		DXDsize(ea, size);
+		DXDlength(ea, 0);
+		DXDaddress(ea, 0);
+		memcpy(ji->i_inline_ea, ealist, size);
+		ea->flag = DXD_INLINE;
+		ji->mode2 &= ~INLINEEA;
+	} else {
+		ea->flag = 0;
+		DXDsize(ea, 0);
+		DXDlength(ea, 0);
+		DXDaddress(ea, 0);
+
+		/* Free up INLINE area */
+		if (ji->ea.flag & DXD_INLINE)
+			ji->mode2 |= INLINEEA;
+	}
+
+	return 0;
+}
+
+/*
+ * NAME: ea_write
+ *                                                                    
+ * FUNCTION: Write an EA for an inode
+ *                                                                    
+ * PRE CONDITIONS: EA has been verified 
+ *
+ * PARAMETERS:
+ *	ip	- Inode pointer
+ *	ealist	- EA list pointer
+ *	size	- size of ealist in bytes
+ *	ea	- dxd_t structure to be filled in appropriately with where the
+ *		  EA was copied
+ *
+ * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
+ *	extent and synchronously writes it to those blocks.
+ *
+ * RETURNS: 0 for success; Anything else indicates failure
+ */
+static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
+		       dxd_t * ea)
+{
+	struct super_block *sb = ip->i_sb;
+	struct jfs_inode_info *ji = JFS_IP(ip);
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	int nblocks;
+	s64 blkno;
+	int rc = 0, i;
+	char *cp;
+	s32 nbytes, nb;
+	s32 bytes_to_write;
+	struct metapage *mp;
+
+	/*
+	 * Quick check to see if this is an in-linable EA.  Short EAs
+	 * and empty EAs are all in-linable, provided the space exists.
+	 */
+	if (!ealist || size <= sizeof (ji->i_inline_ea)) {
+		if (!ea_write_inline(ip, ealist, size, ea))
+			return 0;
+	}
+
+	/* figure out how many blocks we need */
+	nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
+
+	/* Allocate new blocks to quota. */
+	if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
+		return -EDQUOT;
+	}
+
+	rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
+	if (rc) {
+		/*Rollback quota allocation. */
+		DQUOT_FREE_BLOCK(ip, nblocks);
+		return rc;
+	}
+
+	/*
+	 * Now have nblocks worth of storage to stuff into the FEALIST.
+	 * loop over the FEALIST copying data into the buffer one page at
+	 * a time.
+	 */
+	cp = (char *) ealist;
+	nbytes = size;
+	for (i = 0; i < nblocks; i += sbi->nbperpage) {
+		/*
+		 * Determine how many bytes for this request, and round up to
+		 * the nearest aggregate block size
+		 */
+		nb = min(PSIZE, nbytes);
+		bytes_to_write =
+		    ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
+		    << sb->s_blocksize_bits;
+
+		if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
+			rc = -EIO;
+			goto failed;
+		}
+
+		memcpy(mp->data, cp, nb);
+
+		/*
+		 * We really need a way to propagate errors for
+		 * forced writes like this one.  --hch
+		 *
+		 * (__write_metapage => release_metapage => flush_metapage)
+		 */
+#ifdef _JFS_FIXME
+		if ((rc = flush_metapage(mp))) {
+			/*
+			 * the write failed -- this means that the buffer
+			 * is still assigned and the blocks are not being
+			 * used.  this seems like the best error recovery
+			 * we can get ...
+			 */
+			goto failed;
+		}
+#else
+		flush_metapage(mp);
+#endif
+
+		cp += PSIZE;
+		nbytes -= nb;
+	}
+
+	ea->flag = DXD_EXTENT;
+	DXDsize(ea, le32_to_cpu(ealist->size));
+	DXDlength(ea, nblocks);
+	DXDaddress(ea, blkno);
+
+	/* Free up INLINE area */
+	if (ji->ea.flag & DXD_INLINE)
+		ji->mode2 |= INLINEEA;
+
+	return 0;
+
+      failed:
+	/* Rollback quota allocation. */
+	DQUOT_FREE_BLOCK(ip, nblocks);
+
+	dbFree(ip, blkno, nblocks);
+	return rc;
+}
+
+/*
+ * NAME: ea_read_inline
+ *                                                                    
+ * FUNCTION: Read an inlined EA into user's buffer
+ *                                                                    
+ * PARAMETERS:
+ *	ip	- Inode pointer
+ *	ealist	- Pointer to buffer to fill in with EA
+ *
+ * RETURNS: 0
+ */
+static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
+{
+	struct jfs_inode_info *ji = JFS_IP(ip);
+	int ea_size = sizeDXD(&ji->ea);
+
+	if (ea_size == 0) {
+		ealist->size = 0;
+		return 0;
+	}
+
+	/* Sanity Check */
+	if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
+		return -EIO;
+	if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
+	    != ea_size)
+		return -EIO;
+
+	memcpy(ealist, ji->i_inline_ea, ea_size);
+	return 0;
+}
+
+/*
+ * NAME: ea_read
+ *                                                                    
+ * FUNCTION: copy EA data into user's buffer
+ *                                                                    
+ * PARAMETERS:
+ *	ip	- Inode pointer
+ *	ealist	- Pointer to buffer to fill in with EA
+ *
+ * NOTES:  If EA is inline calls ea_read_inline() to copy EA.
+ *
+ * RETURNS: 0 for success; other indicates failure
+ */
+static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
+{
+	struct super_block *sb = ip->i_sb;
+	struct jfs_inode_info *ji = JFS_IP(ip);
+	struct jfs_sb_info *sbi = JFS_SBI(sb);
+	int nblocks;
+	s64 blkno;
+	char *cp = (char *) ealist;
+	int i;
+	int nbytes, nb;
+	s32 bytes_to_read;
+	struct metapage *mp;
+
+	/* quick check for in-line EA */
+	if (ji->ea.flag & DXD_INLINE)
+		return ea_read_inline(ip, ealist);
+
+	nbytes = sizeDXD(&ji->ea);
+	if (!nbytes) {
+		jfs_error(sb, "ea_read: nbytes is 0");
+		return -EIO;
+	}
+
+	/* 
+	 * Figure out how many blocks were allocated when this EA list was
+	 * originally written to disk.
+	 */
+	nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
+	blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
+
+	/*
+	 * I have found the disk blocks which were originally used to store
+	 * the FEALIST.  now i loop over each contiguous block copying the
+	 * data into the buffer.
+	 */
+	for (i = 0; i < nblocks; i += sbi->nbperpage) {
+		/*
+		 * Determine how many bytes for this request, and round up to
+		 * the nearest aggregate block size
+		 */
+		nb = min(PSIZE, nbytes);
+		bytes_to_read =
+		    ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
+		    << sb->s_blocksize_bits;
+
+		if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
+			return -EIO;
+
+		memcpy(cp, mp->data, nb);
+		release_metapage(mp);
+
+		cp += PSIZE;
+		nbytes -= nb;
+	}
+
+	return 0;
+}
+
+/*
+ * NAME: ea_get
+ *                                                                    
+ * FUNCTION: Returns buffer containing existing extended attributes.
+ *	     The size of the buffer will be the larger of the existing
+ *	     attributes size, or min_size.
+ *
+ *	     The buffer, which may be inlined in the inode or in the
+ * 	     page cache must be release by calling ea_release or ea_put
+ *                                                                    
+ * PARAMETERS:
+ *	inode	- Inode pointer
+ *	ea_buf	- Structure to be populated with ealist and its metadata
+ *	min_size- minimum size of buffer to be returned
+ *
+ * RETURNS: 0 for success; Other indicates failure
+ */
+static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+{
+	struct jfs_inode_info *ji = JFS_IP(inode);
+	struct super_block *sb = inode->i_sb;
+	int size;
+	int ea_size = sizeDXD(&ji->ea);
+	int blocks_needed, current_blocks;
+	s64 blkno;
+	int rc;
+	int quota_allocation = 0;
+
+	/* When fsck.jfs clears a bad ea, it doesn't clear the size */
+	if (ji->ea.flag == 0)
+		ea_size = 0;
+
+	if (ea_size == 0) {
+		if (min_size == 0) {
+			ea_buf->flag = 0;
+			ea_buf->max_size = 0;
+			ea_buf->xattr = NULL;
+			return 0;
+		}
+		if ((min_size <= sizeof (ji->i_inline_ea)) &&
+		    (ji->mode2 & INLINEEA)) {
+			ea_buf->flag = EA_INLINE | EA_NEW;
+			ea_buf->max_size = sizeof (ji->i_inline_ea);
+			ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
+			DXDlength(&ea_buf->new_ea, 0);
+			DXDaddress(&ea_buf->new_ea, 0);
+			ea_buf->new_ea.flag = DXD_INLINE;
+			DXDsize(&ea_buf->new_ea, min_size);
+			return 0;
+		}
+		current_blocks = 0;
+	} else if (ji->ea.flag & DXD_INLINE) {
+		if (min_size <= sizeof (ji->i_inline_ea)) {
+			ea_buf->flag = EA_INLINE;
+			ea_buf->max_size = sizeof (ji->i_inline_ea);
+			ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
+			goto size_check;
+		}
+		current_blocks = 0;
+	} else {
+		if (!(ji->ea.flag & DXD_EXTENT)) {
+			jfs_error(sb, "ea_get: invalid ea.flag)");
+			return -EIO;
+		}
+		current_blocks = (ea_size + sb->s_blocksize - 1) >>
+		    sb->s_blocksize_bits;
+	}
+	size = max(min_size, ea_size);
+
+	if (size > PSIZE) {
+		/*
+		 * To keep the rest of the code simple.  Allocate a
+		 * contiguous buffer to work with
+		 */
+		ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+		if (ea_buf->xattr == NULL)
+			return -ENOMEM;
+
+		ea_buf->flag = EA_MALLOC;
+		ea_buf->max_size = (size + sb->s_blocksize - 1) &
+		    ~(sb->s_blocksize - 1);
+
+		if (ea_size == 0)
+			return 0;
+
+		if ((rc = ea_read(inode, ea_buf->xattr))) {
+			kfree(ea_buf->xattr);
+			ea_buf->xattr = NULL;
+			return rc;
+		}
+		goto size_check;
+	}
+	blocks_needed = (min_size + sb->s_blocksize - 1) >>
+	    sb->s_blocksize_bits;
+
+	if (blocks_needed > current_blocks) {
+		/* Allocate new blocks to quota. */
+		if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
+			return -EDQUOT;
+
+		quota_allocation = blocks_needed;
+
+		rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
+			     &blkno);
+		if (rc)
+			goto clean_up;
+
+		DXDlength(&ea_buf->new_ea, blocks_needed);
+		DXDaddress(&ea_buf->new_ea, blkno);
+		ea_buf->new_ea.flag = DXD_EXTENT;
+		DXDsize(&ea_buf->new_ea, min_size);
+
+		ea_buf->flag = EA_EXTENT | EA_NEW;
+
+		ea_buf->mp = get_metapage(inode, blkno,
+					  blocks_needed << sb->s_blocksize_bits,
+					  1);
+		if (ea_buf->mp == NULL) {
+			dbFree(inode, blkno, (s64) blocks_needed);
+			rc = -EIO;
+			goto clean_up;
+		}
+		ea_buf->xattr = ea_buf->mp->data;
+		ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
+		    ~(sb->s_blocksize - 1);
+		if (ea_size == 0)
+			return 0;
+		if ((rc = ea_read(inode, ea_buf->xattr))) {
+			discard_metapage(ea_buf->mp);
+			dbFree(inode, blkno, (s64) blocks_needed);
+			goto clean_up;
+		}
+		goto size_check;
+	}
+	ea_buf->flag = EA_EXTENT;
+	ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
+				   lengthDXD(&ji->ea) << sb->s_blocksize_bits,
+				   1);
+	if (ea_buf->mp == NULL) {
+		rc = -EIO;
+		goto clean_up;
+	}
+	ea_buf->xattr = ea_buf->mp->data;
+	ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
+	    ~(sb->s_blocksize - 1);
+
+      size_check:
+	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+		printk(KERN_ERR "ea_get: invalid extended attribute\n");
+		dump_mem("xattr", ea_buf->xattr, ea_size);
+		ea_release(inode, ea_buf);
+		rc = -EIO;
+		goto clean_up;
+	}
+
+	return ea_size;
+
+      clean_up:
+	/* Rollback quota allocation */
+	if (quota_allocation)
+		DQUOT_FREE_BLOCK(inode, quota_allocation);
+
+	return (rc);
+}
+
+static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
+{
+	if (ea_buf->flag & EA_MALLOC)
+		kfree(ea_buf->xattr);
+	else if (ea_buf->flag & EA_EXTENT) {
+		assert(ea_buf->mp);
+		release_metapage(ea_buf->mp);
+
+		if (ea_buf->flag & EA_NEW)
+			dbFree(inode, addressDXD(&ea_buf->new_ea),
+			       lengthDXD(&ea_buf->new_ea));
+	}
+}
+
+static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size)
+{
+	struct jfs_inode_info *ji = JFS_IP(inode);
+	unsigned long old_blocks, new_blocks;
+	int rc = 0;
+	tid_t tid;
+
+	if (new_size == 0) {
+		ea_release(inode, ea_buf);
+		ea_buf = NULL;
+	} else if (ea_buf->flag & EA_INLINE) {
+		assert(new_size <= sizeof (ji->i_inline_ea));
+		ji->mode2 &= ~INLINEEA;
+		ea_buf->new_ea.flag = DXD_INLINE;
+		DXDsize(&ea_buf->new_ea, new_size);
+		DXDaddress(&ea_buf->new_ea, 0);
+		DXDlength(&ea_buf->new_ea, 0);
+	} else if (ea_buf->flag & EA_MALLOC) {
+		rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
+		kfree(ea_buf->xattr);
+	} else if (ea_buf->flag & EA_NEW) {
+		/* We have already allocated a new dxd */
+		flush_metapage(ea_buf->mp);
+	} else {
+		/* ->xattr must point to original ea's metapage */
+		rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
+		discard_metapage(ea_buf->mp);
+	}
+	if (rc)
+		return rc;
+
+	tid = txBegin(inode->i_sb, 0);
+	down(&ji->commit_sem);
+
+	old_blocks = new_blocks = 0;
+
+	if (ji->ea.flag & DXD_EXTENT) {
+		invalidate_dxd_metapages(inode, ji->ea);
+		old_blocks = lengthDXD(&ji->ea);
+	}
+
+	if (ea_buf) {
+		txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
+		if (ea_buf->new_ea.flag & DXD_EXTENT) {
+			new_blocks = lengthDXD(&ea_buf->new_ea);
+			if (ji->ea.flag & DXD_INLINE)
+				ji->mode2 |= INLINEEA;
+		}
+		ji->ea = ea_buf->new_ea;
+	} else {
+		txEA(tid, inode, &ji->ea, NULL);
+		if (ji->ea.flag & DXD_INLINE)
+			ji->mode2 |= INLINEEA;
+		ji->ea.flag = 0;
+		ji->ea.size = 0;
+	}
+
+	/* If old blocks exist, they must be removed from quota allocation. */
+	if (old_blocks)
+		DQUOT_FREE_BLOCK(inode, old_blocks);
+
+	inode->i_ctime = CURRENT_TIME;
+	rc = txCommit(tid, 1, &inode, 0);
+	txEnd(tid);
+	up(&ji->commit_sem);
+
+	return rc;
+}
+
+/*
+ * can_set_system_xattr
+ *
+ * This code is specific to the system.* namespace.  It contains policy
+ * which doesn't belong in the main xattr codepath.
+ */
+static int can_set_system_xattr(struct inode *inode, const char *name,
+				const void *value, size_t value_len)
+{
+#ifdef CONFIG_JFS_POSIX_ACL
+	struct posix_acl *acl;
+	int rc;
+
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		return -EPERM;
+
+	/*
+	 * XATTR_NAME_ACL_ACCESS is tied to i_mode
+	 */
+	if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) {
+		acl = posix_acl_from_xattr(value, value_len);
+		if (IS_ERR(acl)) {
+			rc = PTR_ERR(acl);
+			printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
+			       rc);
+			return rc;
+		}
+		if (acl) {
+			mode_t mode = inode->i_mode;
+			rc = posix_acl_equiv_mode(acl, &mode);
+			posix_acl_release(acl);
+			if (rc < 0) {
+				printk(KERN_ERR
+				       "posix_acl_equiv_mode returned %d\n",
+				       rc);
+				return rc;
+			}
+			inode->i_mode = mode;
+			mark_inode_dirty(inode);
+		}
+		/*
+		 * We're changing the ACL.  Get rid of the cached one
+		 */
+		acl =JFS_IP(inode)->i_acl;
+		if (acl != JFS_ACL_NOT_CACHED)
+			posix_acl_release(acl);
+		JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
+
+		return 0;
+	} else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) {
+		acl = posix_acl_from_xattr(value, value_len);
+		if (IS_ERR(acl)) {
+			rc = PTR_ERR(acl);
+			printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
+			       rc);
+			return rc;
+		}
+		posix_acl_release(acl);
+
+		/*
+		 * We're changing the default ACL.  Get rid of the cached one
+		 */
+		acl =JFS_IP(inode)->i_default_acl;
+		if (acl && (acl != JFS_ACL_NOT_CACHED))
+			posix_acl_release(acl);
+		JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED;
+
+		return 0;
+	}
+#endif			/* CONFIG_JFS_POSIX_ACL */
+	return -EOPNOTSUPP;
+}
+
+static int can_set_xattr(struct inode *inode, const char *name,
+			 const void *value, size_t value_len)
+{
+	if (IS_RDONLY(inode))
+		return -EROFS;
+
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode) || S_ISLNK(inode->i_mode))
+		return -EPERM;
+
+	if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
+		/*
+		 * "system.*"
+		 */
+		return can_set_system_xattr(inode, name, value, value_len);
+
+	if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+		return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+
+#ifdef CONFIG_JFS_SECURITY
+	if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)
+	    != 0)
+		return 0;	/* Leave it to the security module */
+#endif
+		
+	if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) &&
+	   (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0))
+		return -EOPNOTSUPP;
+
+	if (!S_ISREG(inode->i_mode) &&
+	    (!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX))
+		return -EPERM;
+
+	return permission(inode, MAY_WRITE, NULL);
+}
+
+int __jfs_setxattr(struct inode *inode, const char *name, const void *value,
+		   size_t value_len, int flags)
+{
+	struct jfs_ea_list *ealist;
+	struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
+	struct ea_buffer ea_buf;
+	int old_ea_size = 0;
+	int xattr_size;
+	int new_size;
+	int namelen = strlen(name);
+	char *os2name = NULL;
+	int found = 0;
+	int rc;
+	int length;
+
+	if ((rc = can_set_xattr(inode, name, value, value_len)))
+		return rc;
+
+	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+		os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
+				  GFP_KERNEL);
+		if (!os2name)
+			return -ENOMEM;
+		strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
+		name = os2name;
+		namelen -= XATTR_OS2_PREFIX_LEN;
+	}
+
+	down_write(&JFS_IP(inode)->xattr_sem);
+
+	xattr_size = ea_get(inode, &ea_buf, 0);
+	if (xattr_size < 0) {
+		rc = xattr_size;
+		goto out;
+	}
+
+      again:
+	ealist = (struct jfs_ea_list *) ea_buf.xattr;
+	new_size = sizeof (struct jfs_ea_list);
+
+	if (xattr_size) {
+		for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
+		     ea = NEXT_EA(ea)) {
+			if ((namelen == ea->namelen) &&
+			    (memcmp(name, ea->name, namelen) == 0)) {
+				found = 1;
+				if (flags & XATTR_CREATE) {
+					rc = -EEXIST;
+					goto release;
+				}
+				old_ea = ea;
+				old_ea_size = EA_SIZE(ea);
+				next_ea = NEXT_EA(ea);
+			} else
+				new_size += EA_SIZE(ea);
+		}
+	}
+
+	if (!found) {
+		if (flags & XATTR_REPLACE) {
+			rc = -ENODATA;
+			goto release;
+		}
+		if (value == NULL) {
+			rc = 0;
+			goto release;
+		}
+	}
+	if (value)
+		new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
+
+	if (new_size > ea_buf.max_size) {
+		/*
+		 * We need to allocate more space for merged ea list.
+		 * We should only have loop to again: once.
+		 */
+		ea_release(inode, &ea_buf);
+		xattr_size = ea_get(inode, &ea_buf, new_size);
+		if (xattr_size < 0) {
+			rc = xattr_size;
+			goto out;
+		}
+		goto again;
+	}
+
+	/* Remove old ea of the same name */
+	if (found) {
+		/* number of bytes following target EA */
+		length = (char *) END_EALIST(ealist) - (char *) next_ea;
+		if (length > 0)
+			memmove(old_ea, next_ea, length);
+		xattr_size -= old_ea_size;
+	}
+
+	/* Add new entry to the end */
+	if (value) {
+		if (xattr_size == 0)
+			/* Completely new ea list */
+			xattr_size = sizeof (struct jfs_ea_list);
+
+		ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
+		ea->flag = 0;
+		ea->namelen = namelen;
+		ea->valuelen = (cpu_to_le16(value_len));
+		memcpy(ea->name, name, namelen);
+		ea->name[namelen] = 0;
+		if (value_len)
+			memcpy(&ea->name[namelen + 1], value, value_len);
+		xattr_size += EA_SIZE(ea);
+	}
+
+	/* DEBUG - If we did this right, these number match */
+	if (xattr_size != new_size) {
+		printk(KERN_ERR
+		       "jfs_xsetattr: xattr_size = %d, new_size = %d\n",
+		       xattr_size, new_size);
+
+		rc = -EINVAL;
+		goto release;
+	}
+
+	/*
+	 * If we're left with an empty list, there's no ea
+	 */
+	if (new_size == sizeof (struct jfs_ea_list))
+		new_size = 0;
+
+	ealist->size = cpu_to_le32(new_size);
+
+	rc = ea_put(inode, &ea_buf, new_size);
+
+	goto out;
+      release:
+	ea_release(inode, &ea_buf);
+      out:
+	up_write(&JFS_IP(inode)->xattr_sem);
+
+	if (os2name)
+		kfree(os2name);
+
+	return rc;
+}
+
+int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+		 size_t value_len, int flags)
+{
+	if (value == NULL) {	/* empty EA, do not remove */
+		value = "";
+		value_len = 0;
+	}
+
+	return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags);
+}
+
+static int can_get_xattr(struct inode *inode, const char *name)
+{
+#ifdef CONFIG_JFS_SECURITY
+	if(strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0)
+		return 0;
+#endif
+
+	if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
+		return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+
+	if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
+		return 0;
+
+	return permission(inode, MAY_READ, NULL);
+}
+
+ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+		       size_t buf_size)
+{
+	struct jfs_ea_list *ealist;
+	struct jfs_ea *ea;
+	struct ea_buffer ea_buf;
+	int xattr_size;
+	ssize_t size;
+	int namelen = strlen(name);
+	char *os2name = NULL;
+	int rc;
+	char *value;
+
+	if ((rc = can_get_xattr(inode, name)))
+		return rc;
+
+	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+		os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
+				  GFP_KERNEL);
+		if (!os2name)
+			return -ENOMEM;
+		strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
+		name = os2name;
+		namelen -= XATTR_OS2_PREFIX_LEN;
+	}
+
+	down_read(&JFS_IP(inode)->xattr_sem);
+
+	xattr_size = ea_get(inode, &ea_buf, 0);
+
+	if (xattr_size < 0) {
+		size = xattr_size;
+		goto out;
+	}
+
+	if (xattr_size == 0)
+		goto not_found;
+
+	ealist = (struct jfs_ea_list *) ea_buf.xattr;
+
+	/* Find the named attribute */
+	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
+		if ((namelen == ea->namelen) &&
+		    memcmp(name, ea->name, namelen) == 0) {
+			/* Found it */
+			size = le16_to_cpu(ea->valuelen);
+			if (!data)
+				goto release;
+			else if (size > buf_size) {
+				size = -ERANGE;
+				goto release;
+			}
+			value = ((char *) &ea->name) + ea->namelen + 1;
+			memcpy(data, value, size);
+			goto release;
+		}
+      not_found:
+	size = -ENODATA;
+      release:
+	ea_release(inode, &ea_buf);
+      out:
+	up_read(&JFS_IP(inode)->xattr_sem);
+
+	if (os2name)
+		kfree(os2name);
+
+	return size;
+}
+
+ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
+		     size_t buf_size)
+{
+	int err;
+
+	err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
+
+	return err;
+}
+
+/*
+ * No special permissions are needed to list attributes except for trusted.*
+ */
+static inline int can_list(struct jfs_ea *ea)
+{
+	return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
+			    XATTR_TRUSTED_PREFIX_LEN) ||
+		capable(CAP_SYS_ADMIN));
+}
+
+ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+{
+	struct inode *inode = dentry->d_inode;
+	char *buffer;
+	ssize_t size = 0;
+	int xattr_size;
+	struct jfs_ea_list *ealist;
+	struct jfs_ea *ea;
+	struct ea_buffer ea_buf;
+
+	down_read(&JFS_IP(inode)->xattr_sem);
+
+	xattr_size = ea_get(inode, &ea_buf, 0);
+	if (xattr_size < 0) {
+		size = xattr_size;
+		goto out;
+	}
+
+	if (xattr_size == 0)
+		goto release;
+
+	ealist = (struct jfs_ea_list *) ea_buf.xattr;
+
+	/* compute required size of list */
+	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+	    	if (can_list(ea))
+			size += name_size(ea) + 1;
+	}
+
+	if (!data)
+		goto release;
+
+	if (size > buf_size) {
+		size = -ERANGE;
+		goto release;
+	}
+
+	/* Copy attribute names to buffer */
+	buffer = data;
+	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+	    	if (can_list(ea)) {
+			int namelen = copy_name(buffer, ea);
+			buffer += namelen + 1;
+		}
+	}
+
+      release:
+	ea_release(inode, &ea_buf);
+      out:
+	up_read(&JFS_IP(inode)->xattr_sem);
+	return size;
+}
+
+int jfs_removexattr(struct dentry *dentry, const char *name)
+{
+	return __jfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
+}
diff --git a/fs/libfs.c b/fs/libfs.c
new file mode 100644
index 0000000..f90b295
--- /dev/null
+++ b/fs/libfs.c
@@ -0,0 +1,549 @@
+/*
+ *	fs/libfs.c
+ *	Library for filesystems writers.
+ */
+
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <asm/uaccess.h>
+
+int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
+		   struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	generic_fillattr(inode, stat);
+	stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
+	return 0;
+}
+
+int simple_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = sb->s_magic;
+	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_namelen = NAME_MAX;
+	return 0;
+}
+
+/*
+ * Retaining negative dentries for an in-memory filesystem just wastes
+ * memory and lookup time: arrange for them to be deleted immediately.
+ */
+static int simple_delete_dentry(struct dentry *dentry)
+{
+	return 1;
+}
+
+/*
+ * Lookup the data. This is trivial - if the dentry didn't already
+ * exist, we know it is negative.  Set d_op to delete negative dentries.
+ */
+struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	static struct dentry_operations simple_dentry_operations = {
+		.d_delete = simple_delete_dentry,
+	};
+
+	if (dentry->d_name.len > NAME_MAX)
+		return ERR_PTR(-ENAMETOOLONG);
+	dentry->d_op = &simple_dentry_operations;
+	d_add(dentry, NULL);
+	return NULL;
+}
+
+int simple_sync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+	return 0;
+}
+ 
+int dcache_dir_open(struct inode *inode, struct file *file)
+{
+	static struct qstr cursor_name = {.len = 1, .name = "."};
+
+	file->private_data = d_alloc(file->f_dentry, &cursor_name);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+int dcache_dir_close(struct inode *inode, struct file *file)
+{
+	dput(file->private_data);
+	return 0;
+}
+
+loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
+{
+	down(&file->f_dentry->d_inode->i_sem);
+	switch (origin) {
+		case 1:
+			offset += file->f_pos;
+		case 0:
+			if (offset >= 0)
+				break;
+		default:
+			up(&file->f_dentry->d_inode->i_sem);
+			return -EINVAL;
+	}
+	if (offset != file->f_pos) {
+		file->f_pos = offset;
+		if (file->f_pos >= 2) {
+			struct list_head *p;
+			struct dentry *cursor = file->private_data;
+			loff_t n = file->f_pos - 2;
+
+			spin_lock(&dcache_lock);
+			list_del(&cursor->d_child);
+			p = file->f_dentry->d_subdirs.next;
+			while (n && p != &file->f_dentry->d_subdirs) {
+				struct dentry *next;
+				next = list_entry(p, struct dentry, d_child);
+				if (!d_unhashed(next) && next->d_inode)
+					n--;
+				p = p->next;
+			}
+			list_add_tail(&cursor->d_child, p);
+			spin_unlock(&dcache_lock);
+		}
+	}
+	up(&file->f_dentry->d_inode->i_sem);
+	return offset;
+}
+
+/* Relationship between i_mode and the DT_xxx types */
+static inline unsigned char dt_type(struct inode *inode)
+{
+	return (inode->i_mode >> 12) & 15;
+}
+
+/*
+ * Directory is locked and all positive dentries in it are safe, since
+ * for ramfs-type trees they can't go away without unlink() or rmdir(),
+ * both impossible due to the lock on directory.
+ */
+
+int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct dentry *cursor = filp->private_data;
+	struct list_head *p, *q = &cursor->d_child;
+	ino_t ino;
+	int i = filp->f_pos;
+
+	switch (i) {
+		case 0:
+			ino = dentry->d_inode->i_ino;
+			if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+				break;
+			filp->f_pos++;
+			i++;
+			/* fallthrough */
+		case 1:
+			ino = parent_ino(dentry);
+			if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+				break;
+			filp->f_pos++;
+			i++;
+			/* fallthrough */
+		default:
+			spin_lock(&dcache_lock);
+			if (filp->f_pos == 2) {
+				list_del(q);
+				list_add(q, &dentry->d_subdirs);
+			}
+			for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+				struct dentry *next;
+				next = list_entry(p, struct dentry, d_child);
+				if (d_unhashed(next) || !next->d_inode)
+					continue;
+
+				spin_unlock(&dcache_lock);
+				if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, next->d_inode->i_ino, dt_type(next->d_inode)) < 0)
+					return 0;
+				spin_lock(&dcache_lock);
+				/* next is still alive */
+				list_del(q);
+				list_add(q, p);
+				p = q;
+				filp->f_pos++;
+			}
+			spin_unlock(&dcache_lock);
+	}
+	return 0;
+}
+
+ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
+{
+	return -EISDIR;
+}
+
+struct file_operations simple_dir_operations = {
+	.open		= dcache_dir_open,
+	.release	= dcache_dir_close,
+	.llseek		= dcache_dir_lseek,
+	.read		= generic_read_dir,
+	.readdir	= dcache_readdir,
+};
+
+struct inode_operations simple_dir_inode_operations = {
+	.lookup		= simple_lookup,
+};
+
+/*
+ * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
+ * will never be mountable)
+ */
+struct super_block *
+get_sb_pseudo(struct file_system_type *fs_type, char *name,
+	struct super_operations *ops, unsigned long magic)
+{
+	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+	static struct super_operations default_ops = {.statfs = simple_statfs};
+	struct dentry *dentry;
+	struct inode *root;
+	struct qstr d_name = {.name = name, .len = strlen(name)};
+
+	if (IS_ERR(s))
+		return s;
+
+	s->s_flags = MS_NOUSER;
+	s->s_maxbytes = ~0ULL;
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = magic;
+	s->s_op = ops ? ops : &default_ops;
+	s->s_time_gran = 1;
+	root = new_inode(s);
+	if (!root)
+		goto Enomem;
+	root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+	root->i_uid = root->i_gid = 0;
+	root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+	dentry = d_alloc(NULL, &d_name);
+	if (!dentry) {
+		iput(root);
+		goto Enomem;
+	}
+	dentry->d_sb = s;
+	dentry->d_parent = dentry;
+	d_instantiate(dentry, root);
+	s->s_root = dentry;
+	s->s_flags |= MS_ACTIVE;
+	return s;
+
+Enomem:
+	up_write(&s->s_umount);
+	deactivate_super(s);
+	return ERR_PTR(-ENOMEM);
+}
+
+int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_nlink++;
+	atomic_inc(&inode->i_count);
+	dget(dentry);
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static inline int simple_positive(struct dentry *dentry)
+{
+	return dentry->d_inode && !d_unhashed(dentry);
+}
+
+int simple_empty(struct dentry *dentry)
+{
+	struct dentry *child;
+	int ret = 0;
+
+	spin_lock(&dcache_lock);
+	list_for_each_entry(child, &dentry->d_subdirs, d_child)
+		if (simple_positive(child))
+			goto out;
+	ret = 1;
+out:
+	spin_unlock(&dcache_lock);
+	return ret;
+}
+
+int simple_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_nlink--;
+	dput(dentry);
+	return 0;
+}
+
+int simple_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	if (!simple_empty(dentry))
+		return -ENOTEMPTY;
+
+	dentry->d_inode->i_nlink--;
+	simple_unlink(dir, dentry);
+	dir->i_nlink--;
+	return 0;
+}
+
+int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
+
+	if (!simple_empty(new_dentry))
+		return -ENOTEMPTY;
+
+	if (new_dentry->d_inode) {
+		simple_unlink(new_dir, new_dentry);
+		if (they_are_dirs)
+			old_dir->i_nlink--;
+	} else if (they_are_dirs) {
+		old_dir->i_nlink--;
+		new_dir->i_nlink++;
+	}
+
+	old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
+		new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+	return 0;
+}
+
+int simple_readpage(struct file *file, struct page *page)
+{
+	void *kaddr;
+
+	if (PageUptodate(page))
+		goto out;
+
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr, 0, PAGE_CACHE_SIZE);
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+out:
+	unlock_page(page);
+	return 0;
+}
+
+int simple_prepare_write(struct file *file, struct page *page,
+			unsigned from, unsigned to)
+{
+	if (!PageUptodate(page)) {
+		if (to - from != PAGE_CACHE_SIZE) {
+			void *kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr, 0, from);
+			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+		}
+		SetPageUptodate(page);
+	}
+	return 0;
+}
+
+int simple_commit_write(struct file *file, struct page *page,
+			unsigned offset, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+
+	/*
+	 * No need to use i_size_read() here, the i_size
+	 * cannot change under us because we hold the i_sem.
+	 */
+	if (pos > inode->i_size)
+		i_size_write(inode, pos);
+	set_page_dirty(page);
+	return 0;
+}
+
+int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
+{
+	static struct super_operations s_ops = {.statfs = simple_statfs};
+	struct inode *inode;
+	struct dentry *root;
+	struct dentry *dentry;
+	int i;
+
+	s->s_blocksize = PAGE_CACHE_SIZE;
+	s->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	s->s_magic = magic;
+	s->s_op = &s_ops;
+	s->s_time_gran = 1;
+
+	inode = new_inode(s);
+	if (!inode)
+		return -ENOMEM;
+	inode->i_mode = S_IFDIR | 0755;
+	inode->i_uid = inode->i_gid = 0;
+	inode->i_blksize = PAGE_CACHE_SIZE;
+	inode->i_blocks = 0;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_op = &simple_dir_inode_operations;
+	inode->i_fop = &simple_dir_operations;
+	root = d_alloc_root(inode);
+	if (!root) {
+		iput(inode);
+		return -ENOMEM;
+	}
+	for (i = 0; !files->name || files->name[0]; i++, files++) {
+		if (!files->name)
+			continue;
+		dentry = d_alloc_name(root, files->name);
+		if (!dentry)
+			goto out;
+		inode = new_inode(s);
+		if (!inode)
+			goto out;
+		inode->i_mode = S_IFREG | files->mode;
+		inode->i_uid = inode->i_gid = 0;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_fop = files->ops;
+		inode->i_ino = i;
+		d_add(dentry, inode);
+	}
+	s->s_root = root;
+	return 0;
+out:
+	d_genocide(root);
+	dput(root);
+	return -ENOMEM;
+}
+
+static DEFINE_SPINLOCK(pin_fs_lock);
+
+int simple_pin_fs(char *name, struct vfsmount **mount, int *count)
+{
+	struct vfsmount *mnt = NULL;
+	spin_lock(&pin_fs_lock);
+	if (unlikely(!*mount)) {
+		spin_unlock(&pin_fs_lock);
+		mnt = do_kern_mount(name, 0, name, NULL);
+		if (IS_ERR(mnt))
+			return PTR_ERR(mnt);
+		spin_lock(&pin_fs_lock);
+		if (!*mount)
+			*mount = mnt;
+	}
+	mntget(*mount);
+	++*count;
+	spin_unlock(&pin_fs_lock);
+	mntput(mnt);
+	return 0;
+}
+
+void simple_release_fs(struct vfsmount **mount, int *count)
+{
+	struct vfsmount *mnt;
+	spin_lock(&pin_fs_lock);
+	mnt = *mount;
+	if (!--*count)
+		*mount = NULL;
+	spin_unlock(&pin_fs_lock);
+	mntput(mnt);
+}
+
+ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
+				const void *from, size_t available)
+{
+	loff_t pos = *ppos;
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= available)
+		return 0;
+	if (count > available - pos)
+		count = available - pos;
+	if (copy_to_user(to, from + pos, count))
+		return -EFAULT;
+	*ppos = pos + count;
+	return count;
+}
+
+/*
+ * Transaction based IO.
+ * The file expects a single write which triggers the transaction, and then
+ * possibly a read which collects the result - which is stored in a
+ * file-local buffer.
+ */
+char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
+{
+	struct simple_transaction_argresp *ar;
+	static DEFINE_SPINLOCK(simple_transaction_lock);
+
+	if (size > SIMPLE_TRANSACTION_LIMIT - 1)
+		return ERR_PTR(-EFBIG);
+
+	ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
+	if (!ar)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock(&simple_transaction_lock);
+
+	/* only one write allowed per open */
+	if (file->private_data) {
+		spin_unlock(&simple_transaction_lock);
+		free_page((unsigned long)ar);
+		return ERR_PTR(-EBUSY);
+	}
+
+	file->private_data = ar;
+
+	spin_unlock(&simple_transaction_lock);
+
+	if (copy_from_user(ar->data, buf, size))
+		return ERR_PTR(-EFAULT);
+
+	return ar->data;
+}
+
+ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
+{
+	struct simple_transaction_argresp *ar = file->private_data;
+
+	if (!ar)
+		return 0;
+	return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
+}
+
+int simple_transaction_release(struct inode *inode, struct file *file)
+{
+	free_page((unsigned long)file->private_data);
+	return 0;
+}
+
+EXPORT_SYMBOL(dcache_dir_close);
+EXPORT_SYMBOL(dcache_dir_lseek);
+EXPORT_SYMBOL(dcache_dir_open);
+EXPORT_SYMBOL(dcache_readdir);
+EXPORT_SYMBOL(generic_read_dir);
+EXPORT_SYMBOL(get_sb_pseudo);
+EXPORT_SYMBOL(simple_commit_write);
+EXPORT_SYMBOL(simple_dir_inode_operations);
+EXPORT_SYMBOL(simple_dir_operations);
+EXPORT_SYMBOL(simple_empty);
+EXPORT_SYMBOL(d_alloc_name);
+EXPORT_SYMBOL(simple_fill_super);
+EXPORT_SYMBOL(simple_getattr);
+EXPORT_SYMBOL(simple_link);
+EXPORT_SYMBOL(simple_lookup);
+EXPORT_SYMBOL(simple_pin_fs);
+EXPORT_SYMBOL(simple_prepare_write);
+EXPORT_SYMBOL(simple_readpage);
+EXPORT_SYMBOL(simple_release_fs);
+EXPORT_SYMBOL(simple_rename);
+EXPORT_SYMBOL(simple_rmdir);
+EXPORT_SYMBOL(simple_statfs);
+EXPORT_SYMBOL(simple_sync_file);
+EXPORT_SYMBOL(simple_unlink);
+EXPORT_SYMBOL(simple_read_from_buffer);
+EXPORT_SYMBOL(simple_transaction_get);
+EXPORT_SYMBOL(simple_transaction_read);
+EXPORT_SYMBOL(simple_transaction_release);
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
new file mode 100644
index 0000000..7725a0a
--- /dev/null
+++ b/fs/lockd/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the linux lock manager stuff
+#
+
+obj-$(CONFIG_LOCKD) += lockd.o
+
+lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
+	        svcproc.o svcsubs.o mon.o xdr.o
+lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs		      := $(lockd-objs-y)
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
new file mode 100644
index 0000000..ef7103b
--- /dev/null
+++ b/fs/lockd/clntlock.c
@@ -0,0 +1,245 @@
+/*
+ * linux/fs/lockd/clntlock.c
+ *
+ * Lock handling for the client side NLM implementation
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/smp_lock.h>
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+/*
+ * Local function prototypes
+ */
+static int			reclaimer(void *ptr);
+
+/*
+ * The following functions handle blocking and granting from the
+ * client perspective.
+ */
+
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+	struct nlm_wait *	b_next;		/* linked list */
+	wait_queue_head_t	b_wait;		/* where to wait on */
+	struct nlm_host *	b_host;
+	struct file_lock *	b_lock;		/* local file lock */
+	unsigned short		b_reclaim;	/* got to reclaim lock */
+	u32			b_status;	/* grant callback status */
+};
+
+static struct nlm_wait *	nlm_blocked;
+
+/*
+ * Block on a lock
+ */
+int
+nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)
+{
+	struct nlm_wait	block, **head;
+	int		err;
+	u32		pstate;
+
+	block.b_host   = host;
+	block.b_lock   = fl;
+	init_waitqueue_head(&block.b_wait);
+	block.b_status = NLM_LCK_BLOCKED;
+	block.b_next   = nlm_blocked;
+	nlm_blocked    = &block;
+
+	/* Remember pseudo nsm state */
+	pstate = host->h_state;
+
+	/* Go to sleep waiting for GRANT callback. Some servers seem
+	 * to lose callbacks, however, so we're going to poll from
+	 * time to time just to make sure.
+	 *
+	 * For now, the retry frequency is pretty high; normally 
+	 * a 1 minute timeout would do. See the comment before
+	 * nlmclnt_lock for an explanation.
+	 */
+	sleep_on_timeout(&block.b_wait, 30*HZ);
+
+	for (head = &nlm_blocked; *head; head = &(*head)->b_next) {
+		if (*head == &block) {
+			*head = block.b_next;
+			break;
+		}
+	}
+
+	if (!signalled()) {
+		*statp = block.b_status;
+		return 0;
+	}
+
+	/* Okay, we were interrupted. Cancel the pending request
+	 * unless the server has rebooted.
+	 */
+	if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0)
+		printk(KERN_NOTICE
+			"lockd: CANCEL call failed (errno %d)\n", -err);
+
+	return -ERESTARTSYS;
+}
+
+/*
+ * The server lockd has called us back to tell us the lock was granted
+ */
+u32
+nlmclnt_grant(struct nlm_lock *lock)
+{
+	struct nlm_wait	*block;
+
+	/*
+	 * Look up blocked request based on arguments. 
+	 * Warning: must not use cookie to match it!
+	 */
+	for (block = nlm_blocked; block; block = block->b_next) {
+		if (nlm_compare_locks(block->b_lock, &lock->fl))
+			break;
+	}
+
+	/* Ooops, no blocked request found. */
+	if (block == NULL)
+		return nlm_lck_denied;
+
+	/* Alright, we found the lock. Set the return status and
+	 * wake up the caller.
+	 */
+	block->b_status = NLM_LCK_GRANTED;
+	wake_up(&block->b_wait);
+
+	return nlm_granted;
+}
+
+/*
+ * The following procedures deal with the recovery of locks after a
+ * server crash.
+ */
+
+/*
+ * Mark the locks for reclaiming.
+ * FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
+ *        Maintain NLM lock reclaiming lists in the nlm_host instead.
+ */
+static
+void nlmclnt_mark_reclaim(struct nlm_host *host)
+{
+	struct file_lock *fl;
+	struct inode *inode;
+	struct list_head *tmp;
+
+	list_for_each(tmp, &file_lock_list) {
+		fl = list_entry(tmp, struct file_lock, fl_link);
+
+		inode = fl->fl_file->f_dentry->d_inode;
+		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
+			continue;
+		if (fl->fl_u.nfs_fl.owner->host != host)
+			continue;
+		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
+			continue;
+		fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
+	}
+}
+
+/*
+ * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
+ * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
+ */
+static inline
+void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
+{
+	host->h_monitored = 0;
+	host->h_nsmstate = newstate;
+	host->h_state++;
+	host->h_nextrebind = 0;
+	nlm_rebind_host(host);
+	nlmclnt_mark_reclaim(host);
+	dprintk("NLM: reclaiming locks for host %s", host->h_name);
+}
+
+/*
+ * Reclaim all locks on server host. We do this by spawning a separate
+ * reclaimer thread.
+ */
+void
+nlmclnt_recovery(struct nlm_host *host, u32 newstate)
+{
+	if (host->h_reclaiming++) {
+		if (host->h_nsmstate == newstate)
+			return;
+		nlmclnt_prepare_reclaim(host, newstate);
+	} else {
+		nlmclnt_prepare_reclaim(host, newstate);
+		nlm_get_host(host);
+		__module_get(THIS_MODULE);
+		if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
+			module_put(THIS_MODULE);
+	}
+}
+
+static int
+reclaimer(void *ptr)
+{
+	struct nlm_host	  *host = (struct nlm_host *) ptr;
+	struct nlm_wait	  *block;
+	struct list_head *tmp;
+	struct file_lock *fl;
+	struct inode *inode;
+
+	daemonize("%s-reclaim", host->h_name);
+	allow_signal(SIGKILL);
+
+	/* This one ensures that our parent doesn't terminate while the
+	 * reclaim is in progress */
+	lock_kernel();
+	lockd_up();
+
+	/* First, reclaim all locks that have been marked. */
+restart:
+	list_for_each(tmp, &file_lock_list) {
+		fl = list_entry(tmp, struct file_lock, fl_link);
+
+		inode = fl->fl_file->f_dentry->d_inode;
+		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
+			continue;
+		if (fl->fl_u.nfs_fl.owner->host != host)
+			continue;
+		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
+			continue;
+
+		fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
+		nlmclnt_reclaim(host, fl);
+		if (signalled())
+			break;
+		goto restart;
+	}
+
+	host->h_reclaiming = 0;
+
+	/* Now, wake up all processes that sleep on a blocked lock */
+	for (block = nlm_blocked; block; block = block->b_next) {
+		if (block->b_host == host) {
+			block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
+			wake_up(&block->b_wait);
+		}
+	}
+
+	/* Release host handle after use */
+	nlm_release_host(host);
+	lockd_down();
+	unlock_kernel();
+	module_put_and_exit(0);
+}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
new file mode 100644
index 0000000..a440761
--- /dev/null
+++ b/fs/lockd/clntproc.c
@@ -0,0 +1,820 @@
+/*
+ * linux/fs/lockd/clntproc.c
+ *
+ * RPC procedures for the client side NLM implementation
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/nfs_fs.h>
+#include <linux/utsname.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+#define NLMCLNT_GRACE_WAIT	(5*HZ)
+
+static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
+static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
+static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
+static void	nlmclnt_unlock_callback(struct rpc_task *);
+static void	nlmclnt_cancel_callback(struct rpc_task *);
+static int	nlm_stat_to_errno(u32 stat);
+static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
+
+/*
+ * Cookie counter for NLM requests
+ */
+static u32	nlm_cookie = 0x1234;
+
+static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
+{
+	memcpy(c->data, &nlm_cookie, 4);
+	memset(c->data+4, 0, 4);
+	c->len=4;
+	nlm_cookie++;
+}
+
+static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
+{
+	atomic_inc(&lockowner->count);
+	return lockowner;
+}
+
+static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
+{
+	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
+		return;
+	list_del(&lockowner->list);
+	spin_unlock(&lockowner->host->h_lock);
+	nlm_release_host(lockowner->host);
+	kfree(lockowner);
+}
+
+static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
+{
+	struct nlm_lockowner *lockowner;
+	list_for_each_entry(lockowner, &host->h_lockowners, list) {
+		if (lockowner->pid == pid)
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
+{
+	uint32_t res;
+	do {
+		res = host->h_pidcount++;
+	} while (nlm_pidbusy(host, res) < 0);
+	return res;
+}
+
+static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
+{
+	struct nlm_lockowner *lockowner;
+	list_for_each_entry(lockowner, &host->h_lockowners, list) {
+		if (lockowner->owner != owner)
+			continue;
+		return nlm_get_lockowner(lockowner);
+	}
+	return NULL;
+}
+
+static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
+{
+	struct nlm_lockowner *res, *new = NULL;
+
+	spin_lock(&host->h_lock);
+	res = __nlm_find_lockowner(host, owner);
+	if (res == NULL) {
+		spin_unlock(&host->h_lock);
+		new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
+		spin_lock(&host->h_lock);
+		res = __nlm_find_lockowner(host, owner);
+		if (res == NULL && new != NULL) {
+			res = new;
+			atomic_set(&new->count, 1);
+			new->owner = owner;
+			new->pid = __nlm_alloc_pid(host);
+			new->host = nlm_get_host(host);
+			list_add(&new->list, &host->h_lockowners);
+			new = NULL;
+		}
+	}
+	spin_unlock(&host->h_lock);
+	if (new != NULL)
+		kfree(new);
+	return res;
+}
+
+/*
+ * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
+ */
+static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_lock	*lock = &argp->lock;
+
+	nlmclnt_next_cookie(&argp->cookie);
+	argp->state   = nsm_local_state;
+	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
+	lock->caller  = system_utsname.nodename;
+	lock->oh.data = req->a_owner;
+	lock->oh.len  = sprintf(req->a_owner, "%d@%s",
+				current->pid, system_utsname.nodename);
+	locks_copy_lock(&lock->fl, fl);
+}
+
+static void nlmclnt_release_lockargs(struct nlm_rqst *req)
+{
+	struct file_lock *fl = &req->a_args.lock.fl;
+
+	if (fl->fl_ops && fl->fl_ops->fl_release_private)
+		fl->fl_ops->fl_release_private(fl);
+}
+
+/*
+ * Initialize arguments for GRANTED call. The nlm_rqst structure
+ * has been cleared already.
+ */
+int
+nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
+{
+	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
+	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
+	call->a_args.lock.caller = system_utsname.nodename;
+	call->a_args.lock.oh.len = lock->oh.len;
+
+	/* set default data area */
+	call->a_args.lock.oh.data = call->a_owner;
+
+	if (lock->oh.len > NLMCLNT_OHSIZE) {
+		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
+		if (!data) {
+			nlmclnt_freegrantargs(call);
+			return 0;
+		}
+		call->a_args.lock.oh.data = (u8 *) data;
+	}
+
+	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
+	return 1;
+}
+
+void
+nlmclnt_freegrantargs(struct nlm_rqst *call)
+{
+	struct file_lock *fl = &call->a_args.lock.fl;
+	/*
+	 * Check whether we allocated memory for the owner.
+	 */
+	if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
+		kfree(call->a_args.lock.oh.data);
+	}
+	if (fl->fl_ops && fl->fl_ops->fl_release_private)
+		fl->fl_ops->fl_release_private(fl);
+}
+
+/*
+ * This is the main entry point for the NLM client.
+ */
+int
+nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
+{
+	struct nfs_server	*nfssrv = NFS_SERVER(inode);
+	struct nlm_host		*host;
+	struct nlm_rqst		reqst, *call = &reqst;
+	sigset_t		oldset;
+	unsigned long		flags;
+	int			status, proto, vers;
+
+	vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
+	if (NFS_PROTO(inode)->version > 3) {
+		printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
+		return -ENOLCK;
+	}
+
+	/* Retrieve transport protocol from NFS client */
+	proto = NFS_CLIENT(inode)->cl_xprt->prot;
+
+	if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
+		return -ENOLCK;
+
+	/* Create RPC client handle if not there, and copy soft
+	 * and intr flags from NFS client. */
+	if (host->h_rpcclnt == NULL) {
+		struct rpc_clnt	*clnt;
+
+		/* Bind an rpc client to this host handle (does not
+		 * perform a portmapper lookup) */
+		if (!(clnt = nlm_bind_host(host))) {
+			status = -ENOLCK;
+			goto done;
+		}
+		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
+		clnt->cl_intr     = nfssrv->client->cl_intr;
+		clnt->cl_chatty   = nfssrv->client->cl_chatty;
+	}
+
+	/* Keep the old signal mask */
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	oldset = current->blocked;
+
+	/* If we're cleaning up locks because the process is exiting,
+	 * perform the RPC call asynchronously. */
+	if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
+	    && fl->fl_type == F_UNLCK
+	    && (current->flags & PF_EXITING)) {
+		sigfillset(&current->blocked);	/* Mask all signals */
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+		call = nlmclnt_alloc_call();
+		if (!call) {
+			status = -ENOMEM;
+			goto out_restore;
+		}
+		call->a_flags = RPC_TASK_ASYNC;
+	} else {
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+		memset(call, 0, sizeof(*call));
+		locks_init_lock(&call->a_args.lock.fl);
+		locks_init_lock(&call->a_res.lock.fl);
+	}
+	call->a_host = host;
+
+	nlmclnt_locks_init_private(fl, host);
+
+	/* Set up the argument struct */
+	nlmclnt_setlockargs(call, fl);
+
+	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
+		if (fl->fl_type != F_UNLCK) {
+			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
+			status = nlmclnt_lock(call, fl);
+		} else
+			status = nlmclnt_unlock(call, fl);
+	} else if (IS_GETLK(cmd))
+		status = nlmclnt_test(call, fl);
+	else
+		status = -EINVAL;
+
+ out_restore:
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	current->blocked = oldset;
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+done:
+	dprintk("lockd: clnt proc returns %d\n", status);
+	nlm_release_host(host);
+	return status;
+}
+EXPORT_SYMBOL(nlmclnt_proc);
+
+/*
+ * Allocate an NLM RPC call struct
+ */
+struct nlm_rqst *
+nlmclnt_alloc_call(void)
+{
+	struct nlm_rqst	*call;
+
+	while (!signalled()) {
+		call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
+		if (call) {
+			memset(call, 0, sizeof(*call));
+			locks_init_lock(&call->a_args.lock.fl);
+			locks_init_lock(&call->a_res.lock.fl);
+			return call;
+		}
+		printk("nlmclnt_alloc_call: failed, waiting for memory\n");
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(5*HZ);
+	}
+	return NULL;
+}
+
+static int nlm_wait_on_grace(wait_queue_head_t *queue)
+{
+	DEFINE_WAIT(wait);
+	int status = -EINTR;
+
+	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
+	if (!signalled ()) {
+		schedule_timeout(NLMCLNT_GRACE_WAIT);
+		try_to_freeze(PF_FREEZE);
+		if (!signalled ())
+			status = 0;
+	}
+	finish_wait(queue, &wait);
+	return status;
+}
+
+/*
+ * Generic NLM call
+ */
+static int
+nlmclnt_call(struct nlm_rqst *req, u32 proc)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_res	*resp = &req->a_res;
+	struct rpc_message msg = {
+		.rpc_argp	= argp,
+		.rpc_resp	= resp,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s\n",
+			(int)proc, host->h_name);
+
+	do {
+		if (host->h_reclaiming && !argp->reclaim)
+			goto in_grace_period;
+
+		/* If we have no RPC client yet, create one. */
+		if ((clnt = nlm_bind_host(host)) == NULL)
+			return -ENOLCK;
+		msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+		/* Perform the RPC call. If an error occurs, try again */
+		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
+			dprintk("lockd: rpc_call returned error %d\n", -status);
+			switch (status) {
+			case -EPROTONOSUPPORT:
+				status = -EINVAL;
+				break;
+			case -ECONNREFUSED:
+			case -ETIMEDOUT:
+			case -ENOTCONN:
+				nlm_rebind_host(host);
+				status = -EAGAIN;
+				break;
+			case -ERESTARTSYS:
+				return signalled () ? -EINTR : status;
+			default:
+				break;
+			}
+			break;
+		} else
+		if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
+			dprintk("lockd: server in grace period\n");
+			if (argp->reclaim) {
+				printk(KERN_WARNING
+				     "lockd: spurious grace period reject?!\n");
+				return -ENOLCK;
+			}
+		} else {
+			if (!argp->reclaim) {
+				/* We appear to be out of the grace period */
+				wake_up_all(&host->h_gracewait);
+			}
+			dprintk("lockd: server returns status %d\n", resp->status);
+			return 0;	/* Okay, call complete */
+		}
+
+in_grace_period:
+		/*
+		 * The server has rebooted and appears to be in the grace
+		 * period during which locks are only allowed to be
+		 * reclaimed.
+		 * We can only back off and try again later.
+		 */
+		status = nlm_wait_on_grace(&host->h_gracewait);
+	} while (status == 0);
+
+	return status;
+}
+
+/*
+ * Generic NLM call, async version.
+ */
+int
+nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct rpc_message msg = {
+		.rpc_argp	= &req->a_args,
+		.rpc_resp	= &req->a_res,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s (async)\n",
+			(int)proc, host->h_name);
+
+	/* If we have no RPC client yet, create one. */
+	if ((clnt = nlm_bind_host(host)) == NULL)
+		return -ENOLCK;
+	msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+        /* bootstrap and kick off the async RPC call */
+        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+
+	return status;
+}
+
+static int
+nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+{
+	struct nlm_host	*host = req->a_host;
+	struct rpc_clnt	*clnt;
+	struct nlm_args	*argp = &req->a_args;
+	struct nlm_res	*resp = &req->a_res;
+	struct rpc_message msg = {
+		.rpc_argp	= argp,
+		.rpc_resp	= resp,
+	};
+	int		status;
+
+	dprintk("lockd: call procedure %d on %s (async)\n",
+			(int)proc, host->h_name);
+
+	/* If we have no RPC client yet, create one. */
+	if ((clnt = nlm_bind_host(host)) == NULL)
+		return -ENOLCK;
+	msg.rpc_proc = &clnt->cl_procinfo[proc];
+
+	/* Increment host refcount */
+	nlm_get_host(host);
+        /* bootstrap and kick off the async RPC call */
+        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+	if (status < 0)
+		nlm_release_host(host);
+	return status;
+}
+
+/*
+ * TEST for the presence of a conflicting lock
+ */
+static int
+nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
+{
+	int	status;
+
+	status = nlmclnt_call(req, NLMPROC_TEST);
+	nlmclnt_release_lockargs(req);
+	if (status < 0)
+		return status;
+
+	status = req->a_res.status;
+	if (status == NLM_LCK_GRANTED) {
+		fl->fl_type = F_UNLCK;
+	} if (status == NLM_LCK_DENIED) {
+		/*
+		 * Report the conflicting lock back to the application.
+		 */
+		locks_copy_lock(fl, &req->a_res.lock.fl);
+		fl->fl_pid = 0;
+	} else {
+		return nlm_stat_to_errno(req->a_res.status);
+	}
+
+	return 0;
+}
+
+static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+	memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
+	nlm_get_lockowner(new->fl_u.nfs_fl.owner);
+}
+
+static void nlmclnt_locks_release_private(struct file_lock *fl)
+{
+	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
+	fl->fl_ops = NULL;
+}
+
+static struct file_lock_operations nlmclnt_lock_ops = {
+	.fl_copy_lock = nlmclnt_locks_copy_lock,
+	.fl_release_private = nlmclnt_locks_release_private,
+};
+
+static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
+{
+	BUG_ON(fl->fl_ops != NULL);
+	fl->fl_u.nfs_fl.state = 0;
+	fl->fl_u.nfs_fl.flags = 0;
+	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
+	fl->fl_ops = &nlmclnt_lock_ops;
+}
+
+static void do_vfs_lock(struct file_lock *fl)
+{
+	int res = 0;
+	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+		case FL_POSIX:
+			res = posix_lock_file_wait(fl->fl_file, fl);
+			break;
+		case FL_FLOCK:
+			res = flock_lock_file_wait(fl->fl_file, fl);
+			break;
+		default:
+			BUG();
+	}
+	if (res < 0)
+		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
+				__FUNCTION__);
+}
+
+/*
+ * LOCK: Try to create a lock
+ *
+ *			Programmer Harassment Alert
+ *
+ * When given a blocking lock request in a sync RPC call, the HPUX lockd
+ * will faithfully return LCK_BLOCKED but never cares to notify us when
+ * the lock could be granted. This way, our local process could hang
+ * around forever waiting for the callback.
+ *
+ *  Solution A:	Implement busy-waiting
+ *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
+ *
+ * For now I am implementing solution A, because I hate the idea of
+ * re-implementing lockd for a third time in two months. The async
+ * calls shouldn't be too hard to do, however.
+ *
+ * This is one of the lovely things about standards in the NFS area:
+ * they're so soft and squishy you can't really blame HP for doing this.
+ */
+static int
+nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_host	*host = req->a_host;
+	struct nlm_res	*resp = &req->a_res;
+	int		status;
+
+	if (!host->h_monitored && nsm_monitor(host) < 0) {
+		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
+					host->h_name);
+		status = -ENOLCK;
+		goto out;
+	}
+
+	do {
+		if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) {
+			if (resp->status != NLM_LCK_BLOCKED)
+				break;
+			status = nlmclnt_block(host, fl, &resp->status);
+		}
+		if (status < 0)
+			goto out;
+	} while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);
+
+	if (resp->status == NLM_LCK_GRANTED) {
+		fl->fl_u.nfs_fl.state = host->h_state;
+		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
+		fl->fl_flags |= FL_SLEEP;
+		do_vfs_lock(fl);
+	}
+	status = nlm_stat_to_errno(resp->status);
+out:
+	nlmclnt_release_lockargs(req);
+	return status;
+}
+
+/*
+ * RECLAIM: Try to reclaim a lock
+ */
+int
+nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
+{
+	struct nlm_rqst reqst, *req;
+	int		status;
+
+	req = &reqst;
+	memset(req, 0, sizeof(*req));
+	locks_init_lock(&req->a_args.lock.fl);
+	locks_init_lock(&req->a_res.lock.fl);
+	req->a_host  = host;
+	req->a_flags = 0;
+
+	/* Set up the argument struct */
+	nlmclnt_setlockargs(req, fl);
+	req->a_args.reclaim = 1;
+
+	if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
+	 && req->a_res.status == NLM_LCK_GRANTED)
+		return 0;
+
+	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
+				"(errno %d, status %d)\n", fl->fl_pid,
+				status, req->a_res.status);
+
+	/*
+	 * FIXME: This is a serious failure. We can
+	 *
+	 *  a.	Ignore the problem
+	 *  b.	Send the owning process some signal (Linux doesn't have
+	 *	SIGLOST, though...)
+	 *  c.	Retry the operation
+	 *
+	 * Until someone comes up with a simple implementation
+	 * for b or c, I'll choose option a.
+	 */
+
+	return -ENOLCK;
+}
+
+/*
+ * UNLOCK: remove an existing lock
+ */
+static int
+nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
+{
+	struct nlm_res	*resp = &req->a_res;
+	int		status;
+
+	/* Clean the GRANTED flag now so the lock doesn't get
+	 * reclaimed while we're stuck in the unlock call. */
+	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
+
+	if (req->a_flags & RPC_TASK_ASYNC) {
+		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
+					nlmclnt_unlock_callback);
+		/* Hrmf... Do the unlock early since locks_remove_posix()
+		 * really expects us to free the lock synchronously */
+		do_vfs_lock(fl);
+		if (status < 0) {
+			nlmclnt_release_lockargs(req);
+			kfree(req);
+		}
+		return status;
+	}
+
+	status = nlmclnt_call(req, NLMPROC_UNLOCK);
+	nlmclnt_release_lockargs(req);
+	if (status < 0)
+		return status;
+
+	do_vfs_lock(fl);
+	if (resp->status == NLM_LCK_GRANTED)
+		return 0;
+
+	if (resp->status != NLM_LCK_DENIED_NOLOCKS)
+		printk("lockd: unexpected unlock status: %d\n", resp->status);
+
+	/* What to do now? I'm out of my depth... */
+
+	return -ENOLCK;
+}
+
+static void
+nlmclnt_unlock_callback(struct rpc_task *task)
+{
+	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
+	int		status = req->a_res.status;
+
+	if (RPC_ASSASSINATED(task))
+		goto die;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
+		goto retry_rebind;
+	}
+	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
+		rpc_delay(task, NLMCLNT_GRACE_WAIT);
+		goto retry_unlock;
+	}
+	if (status != NLM_LCK_GRANTED)
+		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
+die:
+	nlm_release_host(req->a_host);
+	nlmclnt_release_lockargs(req);
+	kfree(req);
+	return;
+ retry_rebind:
+	nlm_rebind_host(req->a_host);
+ retry_unlock:
+	rpc_restart_call(task);
+}
+
+/*
+ * Cancel a blocked lock request.
+ * We always use an async RPC call for this in order not to hang a
+ * process that has been Ctrl-C'ed.
+ */
+int
+nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
+{
+	struct nlm_rqst	*req;
+	unsigned long	flags;
+	sigset_t	oldset;
+	int		status;
+
+	/* Block all signals while setting up call */
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	oldset = current->blocked;
+	sigfillset(&current->blocked);
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	req = nlmclnt_alloc_call();
+	if (!req)
+		return -ENOMEM;
+	req->a_host  = host;
+	req->a_flags = RPC_TASK_ASYNC;
+
+	nlmclnt_setlockargs(req, fl);
+
+	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
+					nlmclnt_cancel_callback);
+	if (status < 0) {
+		nlmclnt_release_lockargs(req);
+		kfree(req);
+	}
+
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	current->blocked = oldset;
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	return status;
+}
+
+static void
+nlmclnt_cancel_callback(struct rpc_task *task)
+{
+	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
+
+	if (RPC_ASSASSINATED(task))
+		goto die;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: CANCEL call error %d, retrying.\n",
+					task->tk_status);
+		goto retry_cancel;
+	}
+
+	dprintk("lockd: cancel status %d (task %d)\n",
+			req->a_res.status, task->tk_pid);
+
+	switch (req->a_res.status) {
+	case NLM_LCK_GRANTED:
+	case NLM_LCK_DENIED_GRACE_PERIOD:
+		/* Everything's good */
+		break;
+	case NLM_LCK_DENIED_NOLOCKS:
+		dprintk("lockd: CANCEL failed (server has no locks)\n");
+		goto retry_cancel;
+	default:
+		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
+			req->a_res.status);
+	}
+
+die:
+	nlm_release_host(req->a_host);
+	nlmclnt_release_lockargs(req);
+	kfree(req);
+	return;
+
+retry_cancel:
+	nlm_rebind_host(req->a_host);
+	rpc_restart_call(task);
+	rpc_delay(task, 30 * HZ);
+}
+
+/*
+ * Convert an NLM status code to a generic kernel errno
+ */
+static int
+nlm_stat_to_errno(u32 status)
+{
+	switch(status) {
+	case NLM_LCK_GRANTED:
+		return 0;
+	case NLM_LCK_DENIED:
+		return -EAGAIN;
+	case NLM_LCK_DENIED_NOLOCKS:
+	case NLM_LCK_DENIED_GRACE_PERIOD:
+		return -ENOLCK;
+	case NLM_LCK_BLOCKED:
+		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
+		return -ENOLCK;
+#ifdef CONFIG_LOCKD_V4
+	case NLM_DEADLCK:
+		return -EDEADLK;
+	case NLM_ROFS:
+		return -EROFS;
+	case NLM_STALE_FH:
+		return -ESTALE;
+	case NLM_FBIG:
+		return -EOVERFLOW;
+	case NLM_FAILED:
+		return -ENOLCK;
+#endif
+	}
+	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
+	return -ENOLCK;
+}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
new file mode 100644
index 0000000..52707c5
--- /dev/null
+++ b/fs/lockd/host.c
@@ -0,0 +1,346 @@
+/*
+ * linux/fs/lockd/host.c
+ *
+ * Management for NLM peer hosts. The nlm_host struct is shared
+ * between client and server implementation. The only reason to
+ * do so is to reduce code bloat.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_HOSTCACHE
+#define NLM_HOST_MAX		64
+#define NLM_HOST_NRHASH		32
+#define NLM_ADDRHASH(addr)	(ntohl(addr) & (NLM_HOST_NRHASH-1))
+#define NLM_HOST_REBIND		(60 * HZ)
+#define NLM_HOST_EXPIRE		((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
+#define NLM_HOST_COLLECT	((nrhosts > NLM_HOST_MAX)? 120 * HZ :  60 * HZ)
+#define NLM_HOST_ADDR(sv)	(&(sv)->s_nlmclnt->cl_xprt->addr)
+
+static struct nlm_host *	nlm_hosts[NLM_HOST_NRHASH];
+static unsigned long		next_gc;
+static int			nrhosts;
+static DECLARE_MUTEX(nlm_host_sema);
+
+
+static void			nlm_gc_hosts(void);
+
+/*
+ * Find an NLM server handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
+{
+	return nlm_lookup_host(0, sin, proto, version);
+}
+
+/*
+ * Find an NLM client handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmsvc_lookup_host(struct svc_rqst *rqstp)
+{
+	return nlm_lookup_host(1, &rqstp->rq_addr,
+			       rqstp->rq_prot, rqstp->rq_vers);
+}
+
+/*
+ * Common host lookup routine for server & client
+ */
+struct nlm_host *
+nlm_lookup_host(int server, struct sockaddr_in *sin,
+					int proto, int version)
+{
+	struct nlm_host	*host, **hp;
+	u32		addr;
+	int		hash;
+
+	dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
+			(unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
+
+	hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
+
+	/* Lock hash table */
+	down(&nlm_host_sema);
+
+	if (time_after_eq(jiffies, next_gc))
+		nlm_gc_hosts();
+
+	for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
+		if (host->h_proto != proto)
+			continue;
+		if (host->h_version != version)
+			continue;
+		if (host->h_server != server)
+			continue;
+
+		if (nlm_cmp_addr(&host->h_addr, sin)) {
+			if (hp != nlm_hosts + hash) {
+				*hp = host->h_next;
+				host->h_next = nlm_hosts[hash];
+				nlm_hosts[hash] = host;
+			}
+			nlm_get_host(host);
+			up(&nlm_host_sema);
+			return host;
+		}
+	}
+
+	/* Ooops, no host found, create it */
+	dprintk("lockd: creating host entry\n");
+
+	if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
+		goto nohost;
+	memset(host, 0, sizeof(*host));
+
+	addr = sin->sin_addr.s_addr;
+	sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
+
+	host->h_addr       = *sin;
+	host->h_addr.sin_port = 0;	/* ouch! */
+	host->h_version    = version;
+	host->h_proto      = proto;
+	host->h_rpcclnt    = NULL;
+	init_MUTEX(&host->h_sema);
+	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
+	atomic_set(&host->h_count, 1);
+	init_waitqueue_head(&host->h_gracewait);
+	host->h_state      = 0;			/* pseudo NSM state */
+	host->h_nsmstate   = 0;			/* real NSM state */
+	host->h_server	   = server;
+	host->h_next       = nlm_hosts[hash];
+	nlm_hosts[hash]    = host;
+	INIT_LIST_HEAD(&host->h_lockowners);
+	spin_lock_init(&host->h_lock);
+
+	if (++nrhosts > NLM_HOST_MAX)
+		next_gc = 0;
+
+nohost:
+	up(&nlm_host_sema);
+	return host;
+}
+
+struct nlm_host *
+nlm_find_client(void)
+{
+	/* find a nlm_host for a client for which h_killed == 0.
+	 * and return it
+	 */
+	int hash;
+	down(&nlm_host_sema);
+	for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
+		struct nlm_host *host, **hp;
+		for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
+			if (host->h_server &&
+			    host->h_killed == 0) {
+				nlm_get_host(host);
+				up(&nlm_host_sema);
+				return host;
+			}
+		}
+	}
+	up(&nlm_host_sema);
+	return NULL;
+}
+
+				
+/*
+ * Create the NLM RPC client for an NLM peer
+ */
+struct rpc_clnt *
+nlm_bind_host(struct nlm_host *host)
+{
+	struct rpc_clnt	*clnt;
+	struct rpc_xprt	*xprt;
+
+	dprintk("lockd: nlm_bind_host(%08x)\n",
+			(unsigned)ntohl(host->h_addr.sin_addr.s_addr));
+
+	/* Lock host handle */
+	down(&host->h_sema);
+
+	/* If we've already created an RPC client, check whether
+	 * RPC rebind is required
+	 * Note: why keep rebinding if we're on a tcp connection?
+	 */
+	if ((clnt = host->h_rpcclnt) != NULL) {
+		xprt = clnt->cl_xprt;
+		if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
+			clnt->cl_port = 0;
+			host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+			dprintk("lockd: next rebind in %ld jiffies\n",
+					host->h_nextrebind - jiffies);
+		}
+	} else {
+		xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
+		if (IS_ERR(xprt))
+			goto forgetit;
+
+		xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
+
+		/* Existing NLM servers accept AUTH_UNIX only */
+		clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
+					host->h_version, RPC_AUTH_UNIX);
+		if (IS_ERR(clnt)) {
+			xprt_destroy(xprt);
+			goto forgetit;
+		}
+		clnt->cl_autobind = 1;	/* turn on pmap queries */
+		xprt->nocong = 1;	/* No congestion control for NLM */
+		xprt->resvport = 1;	/* NLM requires a reserved port */
+
+		host->h_rpcclnt = clnt;
+	}
+
+	up(&host->h_sema);
+	return clnt;
+
+forgetit:
+	printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
+	up(&host->h_sema);
+	return NULL;
+}
+
+/*
+ * Force a portmap lookup of the remote lockd port
+ */
+void
+nlm_rebind_host(struct nlm_host *host)
+{
+	dprintk("lockd: rebind host %s\n", host->h_name);
+	if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
+		host->h_rpcclnt->cl_port = 0;
+		host->h_nextrebind = jiffies + NLM_HOST_REBIND;
+	}
+}
+
+/*
+ * Increment NLM host count
+ */
+struct nlm_host * nlm_get_host(struct nlm_host *host)
+{
+	if (host) {
+		dprintk("lockd: get host %s\n", host->h_name);
+		atomic_inc(&host->h_count);
+		host->h_expires = jiffies + NLM_HOST_EXPIRE;
+	}
+	return host;
+}
+
+/*
+ * Release NLM host after use
+ */
+void nlm_release_host(struct nlm_host *host)
+{
+	if (host != NULL) {
+		dprintk("lockd: release host %s\n", host->h_name);
+		atomic_dec(&host->h_count);
+		BUG_ON(atomic_read(&host->h_count) < 0);
+	}
+}
+
+/*
+ * Shut down the hosts module.
+ * Note that this routine is called only at server shutdown time.
+ */
+void
+nlm_shutdown_hosts(void)
+{
+	struct nlm_host	*host;
+	int		i;
+
+	dprintk("lockd: shutting down host module\n");
+	down(&nlm_host_sema);
+
+	/* First, make all hosts eligible for gc */
+	dprintk("lockd: nuking all hosts...\n");
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		for (host = nlm_hosts[i]; host; host = host->h_next)
+			host->h_expires = jiffies - 1;
+	}
+
+	/* Then, perform a garbage collection pass */
+	nlm_gc_hosts();
+	up(&nlm_host_sema);
+
+	/* complain if any hosts are left */
+	if (nrhosts) {
+		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
+		dprintk("lockd: %d hosts left:\n", nrhosts);
+		for (i = 0; i < NLM_HOST_NRHASH; i++) {
+			for (host = nlm_hosts[i]; host; host = host->h_next) {
+				dprintk("       %s (cnt %d use %d exp %ld)\n",
+					host->h_name, atomic_read(&host->h_count),
+					host->h_inuse, host->h_expires);
+			}
+		}
+	}
+}
+
+/*
+ * Garbage collect any unused NLM hosts.
+ * This GC combines reference counting for async operations with
+ * mark & sweep for resources held by remote clients.
+ */
+static void
+nlm_gc_hosts(void)
+{
+	struct nlm_host	**q, *host;
+	struct rpc_clnt	*clnt;
+	int		i;
+
+	dprintk("lockd: host garbage collection\n");
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		for (host = nlm_hosts[i]; host; host = host->h_next)
+			host->h_inuse = 0;
+	}
+
+	/* Mark all hosts that hold locks, blocks or shares */
+	nlmsvc_mark_resources();
+
+	for (i = 0; i < NLM_HOST_NRHASH; i++) {
+		q = &nlm_hosts[i];
+		while ((host = *q) != NULL) {
+			if (atomic_read(&host->h_count) || host->h_inuse
+			 || time_before(jiffies, host->h_expires)) {
+				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
+					host->h_name, atomic_read(&host->h_count),
+					host->h_inuse, host->h_expires);
+				q = &host->h_next;
+				continue;
+			}
+			dprintk("lockd: delete host %s\n", host->h_name);
+			*q = host->h_next;
+			/* Don't unmonitor hosts that have been invalidated */
+			if (host->h_monitored && !host->h_killed)
+				nsm_unmonitor(host);
+			if ((clnt = host->h_rpcclnt) != NULL) {
+				if (atomic_read(&clnt->cl_users)) {
+					printk(KERN_WARNING
+						"lockd: active RPC handle\n");
+					clnt->cl_dead = 1;
+				} else {
+					rpc_destroy_client(host->h_rpcclnt);
+				}
+			}
+			BUG_ON(!list_empty(&host->h_lockowners));
+			kfree(host);
+			nrhosts--;
+		}
+	}
+
+	next_gc = jiffies + NLM_HOST_COLLECT;
+}
+
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
new file mode 100644
index 0000000..6fc1beb
--- /dev/null
+++ b/fs/lockd/mon.c
@@ -0,0 +1,246 @@
+/*
+ * linux/fs/lockd/mon.c
+ *
+ * The kernel statd client.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/kernel.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_MONITOR
+
+static struct rpc_clnt *	nsm_create(void);
+
+static struct rpc_program	nsm_program;
+
+/*
+ * Local NSM state
+ */
+u32				nsm_local_state;
+
+/*
+ * Common procedure for SM_MON/SM_UNMON calls
+ */
+static int
+nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
+{
+	struct rpc_clnt	*clnt;
+	int		status;
+	struct nsm_args	args;
+
+	clnt = nsm_create();
+	if (IS_ERR(clnt)) {
+		status = PTR_ERR(clnt);
+		goto out;
+	}
+
+	args.addr = host->h_addr.sin_addr.s_addr;
+	args.proto= (host->h_proto<<1) | host->h_server;
+	args.prog = NLM_PROGRAM;
+	args.vers = host->h_version;
+	args.proc = NLMPROC_NSM_NOTIFY;
+	memset(res, 0, sizeof(*res));
+
+	status = rpc_call(clnt, proc, &args, res, 0);
+	if (status < 0)
+		printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n",
+			status);
+	else
+		status = 0;
+ out:
+	return status;
+}
+
+/*
+ * Set up monitoring of a remote host
+ */
+int
+nsm_monitor(struct nlm_host *host)
+{
+	struct nsm_res	res;
+	int		status;
+
+	dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
+
+	status = nsm_mon_unmon(host, SM_MON, &res);
+
+	if (status < 0 || res.status != 0)
+		printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name);
+	else
+		host->h_monitored = 1;
+	return status;
+}
+
+/*
+ * Cease to monitor remote host
+ */
+int
+nsm_unmonitor(struct nlm_host *host)
+{
+	struct nsm_res	res;
+	int		status;
+
+	dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
+
+	status = nsm_mon_unmon(host, SM_UNMON, &res);
+	if (status < 0)
+		printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", host->h_name);
+	else
+		host->h_monitored = 0;
+	return status;
+}
+
+/*
+ * Create NSM client for the local host
+ */
+static struct rpc_clnt *
+nsm_create(void)
+{
+	struct rpc_xprt		*xprt;
+	struct rpc_clnt		*clnt;
+	struct sockaddr_in	sin;
+
+	sin.sin_family = AF_INET;
+	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+	sin.sin_port = 0;
+
+	xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL);
+	if (IS_ERR(xprt))
+		return (struct rpc_clnt *)xprt;
+
+	clnt = rpc_create_client(xprt, "localhost",
+				&nsm_program, SM_VERSION,
+				RPC_AUTH_NULL);
+	if (IS_ERR(clnt))
+		goto out_destroy;
+	clnt->cl_softrtry = 1;
+	clnt->cl_chatty   = 1;
+	clnt->cl_oneshot  = 1;
+	xprt->resvport = 1;	/* NSM requires a reserved port */
+	return clnt;
+
+out_destroy:
+	xprt_destroy(xprt);
+	return clnt;
+}
+
+/*
+ * XDR functions for NSM.
+ */
+
+static u32 *
+xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	char	buffer[20];
+
+	/*
+	 * Use the dotted-quad IP address of the remote host as
+	 * identifier. Linux statd always looks up the canonical
+	 * hostname first for whatever remote hostname it receives,
+	 * so this works alright.
+	 */
+	sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr));
+	if (!(p = xdr_encode_string(p, buffer))
+	 || !(p = xdr_encode_string(p, system_utsname.nodename)))
+		return ERR_PTR(-EIO);
+	*p++ = htonl(argp->prog);
+	*p++ = htonl(argp->vers);
+	*p++ = htonl(argp->proc);
+
+	return p;
+}
+
+static int
+xdr_encode_mon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	p = xdr_encode_common(rqstp, p, argp);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	*p++ = argp->addr;
+	*p++ = argp->vers;
+	*p++ = argp->proto;
+	*p++ = 0;
+	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
+	return 0;
+}
+
+static int
+xdr_encode_unmon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+	p = xdr_encode_common(rqstp, p, argp);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
+	return 0;
+}
+
+static int
+xdr_decode_stat_res(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+	resp->status = ntohl(*p++);
+	resp->state = ntohl(*p++);
+	dprintk("nsm: xdr_decode_stat_res status %d state %d\n",
+			resp->status, resp->state);
+	return 0;
+}
+
+static int
+xdr_decode_stat(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+	resp->state = ntohl(*p++);
+	return 0;
+}
+
+#define SM_my_name_sz	(1+XDR_QUADLEN(SM_MAXSTRLEN))
+#define SM_my_id_sz	(3+1+SM_my_name_sz)
+#define SM_mon_id_sz	(1+XDR_QUADLEN(20)+SM_my_id_sz)
+#define SM_mon_sz	(SM_mon_id_sz+4)
+#define SM_monres_sz	2
+#define SM_unmonres_sz	1
+
+#ifndef MAX
+# define MAX(a, b)	(((a) > (b))? (a) : (b))
+#endif
+
+static struct rpc_procinfo	nsm_procedures[] = {
+[SM_MON] = {
+		.p_proc		= SM_MON,
+		.p_encode	= (kxdrproc_t) xdr_encode_mon,
+		.p_decode	= (kxdrproc_t) xdr_decode_stat_res,
+		.p_bufsiz	= MAX(SM_mon_sz, SM_monres_sz) << 2,
+	},
+[SM_UNMON] = {
+		.p_proc		= SM_UNMON,
+		.p_encode	= (kxdrproc_t) xdr_encode_unmon,
+		.p_decode	= (kxdrproc_t) xdr_decode_stat,
+		.p_bufsiz	= MAX(SM_mon_id_sz, SM_unmonres_sz) << 2,
+	},
+};
+
+static struct rpc_version	nsm_version1 = {
+		.number		= 1, 
+		.nrprocs	= sizeof(nsm_procedures)/sizeof(nsm_procedures[0]),
+		.procs		= nsm_procedures
+};
+
+static struct rpc_version *	nsm_version[] = {
+	[1] = &nsm_version1,
+};
+
+static struct rpc_stat		nsm_stats;
+
+static struct rpc_program	nsm_program = {
+		.name		= "statd",
+		.number		= SM_PROGRAM,
+		.nrvers		= sizeof(nsm_version)/sizeof(nsm_version[0]),
+		.version	= nsm_version,
+		.stats		= &nsm_stats
+};
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
new file mode 100644
index 0000000..b82e470
--- /dev/null
+++ b/fs/lockd/svc.c
@@ -0,0 +1,519 @@
+/*
+ * linux/fs/lockd/svc.c
+ *
+ * This is the central lockd service.
+ *
+ * FIXME: Separate the lockd NFS server functionality from the lockd NFS
+ * 	  client functionality. Oh why didn't Sun create two separate
+ *	  services in the first place?
+ *
+ * Authors:	Olaf Kirch (okir@monad.swb.de)
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/lockd/lockd.h>
+#include <linux/nfs.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVC
+#define LOCKD_BUFSIZE		(1024 + NLMSVC_XDRSIZE)
+#define ALLOWED_SIGS		(sigmask(SIGKILL))
+
+static struct svc_program	nlmsvc_program;
+
+struct nlmsvc_binding *		nlmsvc_ops;
+EXPORT_SYMBOL(nlmsvc_ops);
+
+static DECLARE_MUTEX(nlmsvc_sema);
+static unsigned int		nlmsvc_users;
+static pid_t			nlmsvc_pid;
+int				nlmsvc_grace_period;
+unsigned long			nlmsvc_timeout;
+
+static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
+
+/*
+ * These can be set at insmod time (useful for NFS as root filesystem),
+ * and also changed through the sysctl interface.  -- Jamie Lokier, Aug 2003
+ */
+static unsigned long		nlm_grace_period;
+static unsigned long		nlm_timeout = LOCKD_DFLT_TIMEO;
+static int			nlm_udpport, nlm_tcpport;
+
+/*
+ * Constants needed for the sysctl interface.
+ */
+static const unsigned long	nlm_grace_period_min = 0;
+static const unsigned long	nlm_grace_period_max = 240;
+static const unsigned long	nlm_timeout_min = 3;
+static const unsigned long	nlm_timeout_max = 20;
+static const int		nlm_port_min = 0, nlm_port_max = 65535;
+
+static struct ctl_table_header * nlm_sysctl_table;
+
+static unsigned long set_grace_period(void)
+{
+	unsigned long grace_period;
+
+	/* Note: nlm_timeout should always be nonzero */
+	if (nlm_grace_period)
+		grace_period = ((nlm_grace_period + nlm_timeout - 1)
+				/ nlm_timeout) * nlm_timeout * HZ;
+	else
+		grace_period = nlm_timeout * 5 * HZ;
+	nlmsvc_grace_period = 1;
+	return grace_period + jiffies;
+}
+
+static inline void clear_grace_period(void)
+{
+	nlmsvc_grace_period = 0;
+}
+
+/*
+ * This is the lockd kernel thread
+ */
+static void
+lockd(struct svc_rqst *rqstp)
+{
+	struct svc_serv	*serv = rqstp->rq_server;
+	int		err = 0;
+	unsigned long grace_period_expire;
+
+	/* Lock module and set up kernel thread */
+	/* lockd_up is waiting for us to startup, so will
+	 * be holding a reference to this module, so it
+	 * is safe to just claim another reference
+	 */
+	__module_get(THIS_MODULE);
+	lock_kernel();
+
+	/*
+	 * Let our maker know we're running.
+	 */
+	nlmsvc_pid = current->pid;
+	up(&lockd_start);
+
+	daemonize("lockd");
+
+	/* Process request with signals blocked, but allow SIGKILL.  */
+	allow_signal(SIGKILL);
+
+	/* kick rpciod */
+	rpciod_up();
+
+	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
+
+	if (!nlm_timeout)
+		nlm_timeout = LOCKD_DFLT_TIMEO;
+	nlmsvc_timeout = nlm_timeout * HZ;
+
+	grace_period_expire = set_grace_period();
+
+	/*
+	 * The main request loop. We don't terminate until the last
+	 * NFS mount or NFS daemon has gone away, and we've been sent a
+	 * signal, or else another process has taken over our job.
+	 */
+	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
+		long timeout = MAX_SCHEDULE_TIMEOUT;
+
+		if (signalled()) {
+			flush_signals(current);
+			if (nlmsvc_ops) {
+				nlmsvc_invalidate_all();
+				grace_period_expire = set_grace_period();
+			}
+		}
+
+		/*
+		 * Retry any blocked locks that have been notified by
+		 * the VFS. Don't do this during grace period.
+		 * (Theoretically, there shouldn't even be blocked locks
+		 * during grace period).
+		 */
+		if (!nlmsvc_grace_period) {
+			timeout = nlmsvc_retry_blocked();
+		} else if (time_before(grace_period_expire, jiffies))
+			clear_grace_period();
+
+		/*
+		 * Find a socket with data available and call its
+		 * recvfrom routine.
+		 */
+		err = svc_recv(serv, rqstp, timeout);
+		if (err == -EAGAIN || err == -EINTR)
+			continue;
+		if (err < 0) {
+			printk(KERN_WARNING
+			       "lockd: terminating on error %d\n",
+			       -err);
+			break;
+		}
+
+		dprintk("lockd: request from %08x\n",
+			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));
+
+		svc_process(serv, rqstp);
+
+	}
+
+	/*
+	 * Check whether there's a new lockd process before
+	 * shutting down the hosts and clearing the slot.
+	 */
+	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
+		if (nlmsvc_ops)
+			nlmsvc_invalidate_all();
+		nlm_shutdown_hosts();
+		nlmsvc_pid = 0;
+	} else
+		printk(KERN_DEBUG
+			"lockd: new process, skipping host shutdown\n");
+	wake_up(&lockd_exit);
+		
+	/* Exit the RPC thread */
+	svc_exit_thread(rqstp);
+
+	/* release rpciod */
+	rpciod_down();
+
+	/* Release module */
+	unlock_kernel();
+	module_put_and_exit(0);
+}
+
+/*
+ * Bring up the lockd process if it's not already up.
+ */
+int
+lockd_up(void)
+{
+	static int		warned;
+	struct svc_serv *	serv;
+	int			error = 0;
+
+	down(&nlmsvc_sema);
+	/*
+	 * Unconditionally increment the user count ... this is
+	 * the number of clients who _want_ a lockd process.
+	 */
+	nlmsvc_users++; 
+	/*
+	 * Check whether we're already up and running.
+	 */
+	if (nlmsvc_pid)
+		goto out;
+
+	/*
+	 * Sanity check: if there's no pid,
+	 * we should be the first user ...
+	 */
+	if (nlmsvc_users > 1)
+		printk(KERN_WARNING
+			"lockd_up: no pid, %d users??\n", nlmsvc_users);
+
+	error = -ENOMEM;
+	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE);
+	if (!serv) {
+		printk(KERN_WARNING "lockd_up: create service failed\n");
+		goto out;
+	}
+
+	if ((error = svc_makesock(serv, IPPROTO_UDP, nlm_udpport)) < 0 
+#ifdef CONFIG_NFSD_TCP
+	 || (error = svc_makesock(serv, IPPROTO_TCP, nlm_tcpport)) < 0
+#endif
+		) {
+		if (warned++ == 0) 
+			printk(KERN_WARNING
+				"lockd_up: makesock failed, error=%d\n", error);
+		goto destroy_and_out;
+	} 
+	warned = 0;
+
+	/*
+	 * Create the kernel thread and wait for it to start.
+	 */
+	error = svc_create_thread(lockd, serv);
+	if (error) {
+		printk(KERN_WARNING
+			"lockd_up: create thread failed, error=%d\n", error);
+		goto destroy_and_out;
+	}
+	down(&lockd_start);
+
+	/*
+	 * Note: svc_serv structures have an initial use count of 1,
+	 * so we exit through here on both success and failure.
+	 */
+destroy_and_out:
+	svc_destroy(serv);
+out:
+	up(&nlmsvc_sema);
+	return error;
+}
+EXPORT_SYMBOL(lockd_up);
+
+/*
+ * Decrement the user count and bring down lockd if we're the last.
+ */
+void
+lockd_down(void)
+{
+	static int warned;
+
+	down(&nlmsvc_sema);
+	if (nlmsvc_users) {
+		if (--nlmsvc_users)
+			goto out;
+	} else
+		printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
+
+	if (!nlmsvc_pid) {
+		if (warned++ == 0)
+			printk(KERN_WARNING "lockd_down: no lockd running.\n"); 
+		goto out;
+	}
+	warned = 0;
+
+	kill_proc(nlmsvc_pid, SIGKILL, 1);
+	/*
+	 * Wait for the lockd process to exit, but since we're holding
+	 * the lockd semaphore, we can't wait around forever ...
+	 */
+	clear_thread_flag(TIF_SIGPENDING);
+	interruptible_sleep_on_timeout(&lockd_exit, HZ);
+	if (nlmsvc_pid) {
+		printk(KERN_WARNING 
+			"lockd_down: lockd failed to exit, clearing pid\n");
+		nlmsvc_pid = 0;
+	}
+	spin_lock_irq(&current->sighand->siglock);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+out:
+	up(&nlmsvc_sema);
+}
+EXPORT_SYMBOL(lockd_down);
+
+/*
+ * Sysctl parameters (same as module parameters, different interface).
+ */
+
+/* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */
+#define CTL_UNNUMBERED		-2
+
+static ctl_table nlm_sysctls[] = {
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_grace_period",
+		.data		= &nlm_grace_period,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_doulongvec_minmax,
+		.extra1		= (unsigned long *) &nlm_grace_period_min,
+		.extra2		= (unsigned long *) &nlm_grace_period_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_timeout",
+		.data		= &nlm_timeout,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_doulongvec_minmax,
+		.extra1		= (unsigned long *) &nlm_timeout_min,
+		.extra2		= (unsigned long *) &nlm_timeout_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_udpport",
+		.data		= &nlm_udpport,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= (int *) &nlm_port_min,
+		.extra2		= (int *) &nlm_port_max,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nlm_tcpport",
+		.data		= &nlm_tcpport,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= (int *) &nlm_port_min,
+		.extra2		= (int *) &nlm_port_max,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nlm_sysctl_dir[] = {
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nfs",
+		.mode		= 0555,
+		.child		= nlm_sysctls,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nlm_sysctl_root[] = {
+	{
+		.ctl_name	= CTL_FS,
+		.procname	= "fs",
+		.mode		= 0555,
+		.child		= nlm_sysctl_dir,
+	},
+	{ .ctl_name = 0 }
+};
+
+/*
+ * Module (and driverfs) parameters.
+ */
+
+#define param_set_min_max(name, type, which_strtol, min, max)		\
+static int param_set_##name(const char *val, struct kernel_param *kp)	\
+{									\
+	char *endp;							\
+	__typeof__(type) num = which_strtol(val, &endp, 0);		\
+	if (endp == val || *endp || num < (min) || num > (max))		\
+		return -EINVAL;						\
+	*((int *) kp->arg) = num;					\
+	return 0;							\
+}
+
+static inline int is_callback(u32 proc)
+{
+	return proc == NLMPROC_GRANTED
+		|| proc == NLMPROC_GRANTED_MSG
+		|| proc == NLMPROC_TEST_RES
+		|| proc == NLMPROC_LOCK_RES
+		|| proc == NLMPROC_CANCEL_RES
+		|| proc == NLMPROC_UNLOCK_RES
+		|| proc == NLMPROC_NSM_NOTIFY;
+}
+
+
+static int lockd_authenticate(struct svc_rqst *rqstp)
+{
+	rqstp->rq_client = NULL;
+	switch (rqstp->rq_authop->flavour) {
+		case RPC_AUTH_NULL:
+		case RPC_AUTH_UNIX:
+			if (rqstp->rq_proc == 0)
+				return SVC_OK;
+			if (is_callback(rqstp->rq_proc)) {
+				/* Leave it to individual procedures to
+				 * call nlmsvc_lookup_host(rqstp)
+				 */
+				return SVC_OK;
+			}
+			return svc_set_client(rqstp);
+	}
+	return SVC_DENIED;
+}
+
+
+param_set_min_max(port, int, simple_strtol, 0, 65535)
+param_set_min_max(grace_period, unsigned long, simple_strtoul,
+		  nlm_grace_period_min, nlm_grace_period_max)
+param_set_min_max(timeout, unsigned long, simple_strtoul,
+		  nlm_timeout_min, nlm_timeout_max)
+
+MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
+MODULE_LICENSE("GPL");
+
+module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong,
+		  &nlm_grace_period, 0644);
+module_param_call(nlm_timeout, param_set_timeout, param_get_ulong,
+		  &nlm_timeout, 0644);
+module_param_call(nlm_udpport, param_set_port, param_get_int,
+		  &nlm_udpport, 0644);
+module_param_call(nlm_tcpport, param_set_port, param_get_int,
+		  &nlm_tcpport, 0644);
+
+/*
+ * Initialising and terminating the module.
+ */
+
+static int __init init_nlm(void)
+{
+	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root, 0);
+	return nlm_sysctl_table ? 0 : -ENOMEM;
+}
+
+static void __exit exit_nlm(void)
+{
+	/* FIXME: delete all NLM clients */
+	nlm_shutdown_hosts();
+	unregister_sysctl_table(nlm_sysctl_table);
+}
+
+module_init(init_nlm);
+module_exit(exit_nlm);
+
+/*
+ * Define NLM program and procedures
+ */
+static struct svc_version	nlmsvc_version1 = {
+		.vs_vers	= 1,
+		.vs_nproc	= 17,
+		.vs_proc	= nlmsvc_procedures,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+static struct svc_version	nlmsvc_version3 = {
+		.vs_vers	= 3,
+		.vs_nproc	= 24,
+		.vs_proc	= nlmsvc_procedures,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+#ifdef CONFIG_LOCKD_V4
+static struct svc_version	nlmsvc_version4 = {
+		.vs_vers	= 4,
+		.vs_nproc	= 24,
+		.vs_proc	= nlmsvc_procedures4,
+		.vs_xdrsize	= NLMSVC_XDRSIZE,
+};
+#endif
+static struct svc_version *	nlmsvc_version[] = {
+	[1] = &nlmsvc_version1,
+	[3] = &nlmsvc_version3,
+#ifdef CONFIG_LOCKD_V4
+	[4] = &nlmsvc_version4,
+#endif
+};
+
+static struct svc_stat		nlmsvc_stats;
+
+#define NLM_NRVERS	(sizeof(nlmsvc_version)/sizeof(nlmsvc_version[0]))
+static struct svc_program	nlmsvc_program = {
+	.pg_prog		= NLM_PROGRAM,		/* program number */
+	.pg_nvers		= NLM_NRVERS,		/* number of entries in nlmsvc_version */
+	.pg_vers		= nlmsvc_version,	/* version table */
+	.pg_name		= "lockd",		/* service name */
+	.pg_class		= "nfsd",		/* share authentication with nfsd */
+	.pg_stats		= &nlmsvc_stats,	/* stats table */
+	.pg_authenticate = &lockd_authenticate	/* export authentication */
+};
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
new file mode 100644
index 0000000..489670e
--- /dev/null
+++ b/fs/lockd/svc4proc.c
@@ -0,0 +1,580 @@
+/*
+ * linux/fs/lockd/svc4proc.c
+ *
+ * Lockd server procedures. We don't implement the NLM_*_RES 
+ * procedures because we don't use the async procedures.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+static u32	nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
+static void	nlm4svc_callback_exit(struct rpc_task *);
+
+/*
+ * Obtain client and file from arguments
+ */
+static u32
+nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+			struct nlm_host **hostp, struct nlm_file **filp)
+{
+	struct nlm_host		*host = NULL;
+	struct nlm_file		*file = NULL;
+	struct nlm_lock		*lock = &argp->lock;
+	u32			error = 0;
+
+	/* nfsd callbacks must have been installed for this procedure */
+	if (!nlmsvc_ops)
+		return nlm_lck_denied_nolocks;
+
+	/* Obtain host handle */
+	if (!(host = nlmsvc_lookup_host(rqstp))
+	 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
+		goto no_locks;
+	*hostp = host;
+
+	/* Obtain file pointer. Not used by FREE_ALL call. */
+	if (filp != NULL) {
+		if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+			goto no_locks;
+		*filp = file;
+
+		/* Set up the missing parts of the file_lock structure */
+		lock->fl.fl_file  = file->f_file;
+		lock->fl.fl_owner = (fl_owner_t) host;
+		lock->fl.fl_lmops = &nlmsvc_lock_operations;
+	}
+
+	return 0;
+
+no_locks:
+	if (host)
+		nlm_release_host(host);
+ 	if (error)
+		return error;	
+	return nlm_lck_denied_nolocks;
+}
+
+/*
+ * NULL: Test for presence of service
+ */
+static int
+nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	dprintk("lockd: NULL          called\n");
+	return rpc_success;
+}
+
+/*
+ * TEST: Check for conflicting lock
+ */
+static int
+nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: TEST4        called\n");
+	resp->cookie = argp->cookie;
+
+	/* Don't accept test requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now check for conflicting locks */
+	resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock);
+
+	dprintk("lockd: TEST4          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: LOCK          called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+#if 0
+	/* If supplied state doesn't match current state, we assume it's
+	 * an old request that time-warped somehow. Any error return would
+	 * do in this case because it's irrelevant anyway.
+	 *
+	 * NB: We don't retrieve the remote host's state yet.
+	 */
+	if (host->h_nsmstate && host->h_nsmstate != argp->state) {
+		resp->status = nlm_lck_denied_nolocks;
+	} else
+#endif
+
+	/* Now try to lock the file */
+	resp->status = nlmsvc_lock(rqstp, file, &argp->lock,
+					argp->block, &argp->cookie);
+
+	dprintk("lockd: LOCK          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: CANCEL        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Try to cancel request. */
+	resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
+
+	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNLOCK: release a lock
+ */
+static int
+nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNLOCK        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to remove the lock */
+	resp->status = nlmsvc_unlock(file, &argp->lock);
+
+	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * GRANTED: A server calls us to tell that a process' lock request
+ * was granted
+ */
+static int
+nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	resp->cookie = argp->cookie;
+
+	dprintk("lockd: GRANTED       called\n");
+	resp->status = nlmclnt_grant(&argp->lock);
+	dprintk("lockd: GRANTED       status %d\n", ntohl(resp->status));
+	return rpc_success;
+}
+
+/*
+ * `Async' versions of the above service routines. They aren't really,
+ * because we send the callback before the reply proper. I hope this
+ * doesn't break any clients.
+ */
+static int
+nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: TEST_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: LOCK_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					       void	       *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: CANCEL_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                               void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: UNLOCK_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                                void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: GRANTED_MSG   called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0)
+		stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
+	return stat;
+}
+
+/*
+ * SHARE: create a DOS share or alter existing share.
+ */
+static int
+nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+				          struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: SHARE         called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to create the share */
+	resp->status = nlmsvc_share_file(host, file, argp);
+
+	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNSHARE: Release a DOS share.
+ */
+static int
+nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNSHARE       called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to lock the file */
+	resp->status = nlmsvc_unshare_file(host, file, argp);
+
+	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * NM_LOCK: Create an unmonitored lock
+ */
+static int
+nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	dprintk("lockd: NM_LOCK       called\n");
+
+	argp->monitor = 0;		/* just clean the monitor flag */
+	return nlm4svc_proc_lock(rqstp, argp, resp);
+}
+
+/*
+ * FREE_ALL: Release all locks and shares held by client
+ */
+static int
+nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void            *resp)
+{
+	struct nlm_host	*host;
+
+	/* Obtain client */
+	if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL))
+		return rpc_success;
+
+	nlmsvc_free_host_resources(host);
+	nlm_release_host(host);
+	return rpc_success;
+}
+
+/*
+ * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+ */
+static int
+nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+					      void	        *resp)
+{
+	struct sockaddr_in	saddr = rqstp->rq_addr;
+	int			vers = argp->vers;
+	int			prot = argp->proto >> 1;
+
+	struct nlm_host		*host;
+
+	dprintk("lockd: SM_NOTIFY     called\n");
+	if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
+	 || ntohs(saddr.sin_port) >= 1024) {
+		printk(KERN_WARNING
+			"lockd: rejected NSM callback from %08x:%d\n",
+			ntohl(rqstp->rq_addr.sin_addr.s_addr),
+			ntohs(rqstp->rq_addr.sin_port));
+		return rpc_system_err;
+	}
+
+	/* Obtain the host pointer for this NFS server and try to
+	 * reclaim all locks we hold on this server.
+	 */
+	saddr.sin_addr.s_addr = argp->addr;
+
+	if ((argp->proto & 1)==0) {
+		if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
+			nlmclnt_recovery(host, argp->state);
+			nlm_release_host(host);
+		}
+	} else {
+		/* If we run on an NFS server, delete all locks held by the client */
+
+		if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
+			nlmsvc_free_host_resources(host);
+			nlm_release_host(host);
+		}
+	}
+	return rpc_success;
+}
+
+/*
+ * client sent a GRANTED_RES, let's remove the associated block
+ */
+static int
+nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+                                                void            *resp)
+{
+        if (!nlmsvc_ops)
+                return rpc_success;
+
+        dprintk("lockd: GRANTED_RES   called\n");
+
+        nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
+        return rpc_success;
+}
+
+
+
+/*
+ * This is the generic lockd callback for async RPC calls
+ */
+static u32
+nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_rqst	*call;
+
+	if (!(call = nlmclnt_alloc_call()))
+		return rpc_system_err;
+
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (!host) {
+		kfree(call);
+		return rpc_system_err;
+	}
+
+	call->a_flags = RPC_TASK_ASYNC;
+	call->a_host  = host;
+	memcpy(&call->a_args, resp, sizeof(*resp));
+
+	if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
+		goto error;
+
+	return rpc_success;
+ error:
+	kfree(call);
+	nlm_release_host(host);
+	return rpc_system_err;
+}
+
+static void
+nlm4svc_callback_exit(struct rpc_task *task)
+{
+	struct nlm_rqst	*call = (struct nlm_rqst *) task->tk_calldata;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: %4d callback failed (errno = %d)\n",
+					task->tk_pid, -task->tk_status);
+	}
+	nlm_release_host(call->a_host);
+	kfree(call);
+}
+
+/*
+ * NLM Server procedures.
+ */
+
+#define nlm4svc_encode_norep	nlm4svc_encode_void
+#define nlm4svc_decode_norep	nlm4svc_decode_void
+#define nlm4svc_decode_testres	nlm4svc_decode_void
+#define nlm4svc_decode_lockres	nlm4svc_decode_void
+#define nlm4svc_decode_unlockres	nlm4svc_decode_void
+#define nlm4svc_decode_cancelres	nlm4svc_decode_void
+#define nlm4svc_decode_grantedres	nlm4svc_decode_void
+
+#define nlm4svc_proc_none	nlm4svc_proc_null
+#define nlm4svc_proc_test_res	nlm4svc_proc_null
+#define nlm4svc_proc_lock_res	nlm4svc_proc_null
+#define nlm4svc_proc_cancel_res	nlm4svc_proc_null
+#define nlm4svc_proc_unlock_res	nlm4svc_proc_null
+
+struct nlm_void			{ int dummy; };
+
+#define PROC(name, xargt, xrest, argt, rest, respsize)	\
+ { .pc_func	= (svc_procfunc) nlm4svc_proc_##name,	\
+   .pc_decode	= (kxdrproc_t) nlm4svc_decode_##xargt,	\
+   .pc_encode	= (kxdrproc_t) nlm4svc_encode_##xrest,	\
+   .pc_release	= NULL,					\
+   .pc_argsize	= sizeof(struct nlm_##argt),		\
+   .pc_ressize	= sizeof(struct nlm_##rest),		\
+   .pc_xdrressize = respsize,				\
+ }
+#define	Ck	(1+XDR_QUADLEN(NLM_MAXCOOKIELEN))	/* cookie */
+#define	No	(1+1024/4)				/* netobj */
+#define	St	1					/* status */
+#define	Rg	4					/* range (offset + length) */
+struct svc_procedure		nlmsvc_procedures4[] = {
+  PROC(null,		void,		void,		void,	void, 1),
+  PROC(test,		testargs,	testres,	args,	res, Ck+St+2+No+Rg),
+  PROC(lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(cancel,		cancargs,	res,		args,	res, Ck+St),
+  PROC(unlock,		unlockargs,	res,		args,	res, Ck+St),
+  PROC(granted,		testargs,	res,		args,	res, Ck+St),
+  PROC(test_msg,	testargs,	norep,		args,	void, 1),
+  PROC(lock_msg,	lockargs,	norep,		args,	void, 1),
+  PROC(cancel_msg,	cancargs,	norep,		args,	void, 1),
+  PROC(unlock_msg,	unlockargs,	norep,		args,	void, 1),
+  PROC(granted_msg,	testargs,	norep,		args,	void, 1),
+  PROC(test_res,	testres,	norep,		res,	void, 1),
+  PROC(lock_res,	lockres,	norep,		res,	void, 1),
+  PROC(cancel_res,	cancelres,	norep,		res,	void, 1),
+  PROC(unlock_res,	unlockres,	norep,		res,	void, 1),
+  PROC(granted_res,	res,		norep,		res,	void, 1),
+  /* statd callback */
+  PROC(sm_notify,	reboot,		void,		reboot,	void, 1),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(none,		void,		void,		void,	void, 0),
+  PROC(share,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(unshare,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(nm_lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(free_all,	notify,		void,		args,	void, 1),
+
+};
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
new file mode 100644
index 0000000..49f9597
--- /dev/null
+++ b/fs/lockd/svclock.c
@@ -0,0 +1,686 @@
+/*
+ * linux/fs/lockd/svclock.c
+ *
+ * Handling of server-side locks, mostly of the blocked variety.
+ * This is the ugliest part of lockd because we tread on very thin ice.
+ * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
+ * IMNSHO introducing the grant callback into the NLM protocol was one
+ * of the worst ideas Sun ever had. Except maybe for the idea of doing
+ * NFS file locking at all.
+ *
+ * I'm trying hard to avoid race conditions by protecting most accesses
+ * to a file's list of blocked locks through a semaphore. The global
+ * list of blocked locks is not protected in this fashion however.
+ * Therefore, some functions (such as the RPC callback for the async grant
+ * call) move blocked locks towards the head of the list *while some other
+ * process might be traversing it*. This should not be a problem in
+ * practice, because this will only cause functions traversing the list
+ * to visit some blocks twice.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/nlm.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVCLOCK
+
+#ifdef CONFIG_LOCKD_V4
+#define nlm_deadlock	nlm4_deadlock
+#else
+#define nlm_deadlock	nlm_lck_denied
+#endif
+
+static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
+static int	nlmsvc_remove_block(struct nlm_block *block);
+static void	nlmsvc_grant_callback(struct rpc_task *task);
+
+/*
+ * The list of blocked locks to retry
+ */
+static struct nlm_block *	nlm_blocked;
+
+/*
+ * Insert a blocked lock into the global list
+ */
+static void
+nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+{
+	struct nlm_block **bp, *b;
+
+	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
+	if (block->b_queued)
+		nlmsvc_remove_block(block);
+	bp = &nlm_blocked;
+	if (when != NLM_NEVER) {
+		if ((when += jiffies) == NLM_NEVER)
+			when ++;
+		while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER)
+			bp = &b->b_next;
+	} else
+		while ((b = *bp) != 0)
+			bp = &b->b_next;
+
+	block->b_queued = 1;
+	block->b_when = when;
+	block->b_next = b;
+	*bp = block;
+}
+
+/*
+ * Remove a block from the global list
+ */
+static int
+nlmsvc_remove_block(struct nlm_block *block)
+{
+	struct nlm_block **bp, *b;
+
+	if (!block->b_queued)
+		return 1;
+	for (bp = &nlm_blocked; (b = *bp) != 0; bp = &b->b_next) {
+		if (b == block) {
+			*bp = block->b_next;
+			block->b_queued = 0;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find a block for a given lock and optionally remove it from
+ * the list.
+ */
+static struct nlm_block *
+nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove)
+{
+	struct nlm_block	**head, *block;
+	struct file_lock	*fl;
+
+	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
+				file, lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end, lock->fl.fl_type);
+	for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) {
+		fl = &block->b_call.a_args.lock.fl;
+		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
+				block->b_file, fl->fl_pid,
+				(long long)fl->fl_start,
+				(long long)fl->fl_end, fl->fl_type,
+				nlmdbg_cookie2a(&block->b_call.a_args.cookie));
+		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
+			if (remove) {
+				*head = block->b_next;
+				block->b_queued = 0;
+			}
+			return block;
+		}
+	}
+
+	return NULL;
+}
+
+static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
+{
+	if(a->len != b->len)
+		return 0;
+	if(memcmp(a->data,b->data,a->len))
+		return 0;
+	return 1;
+}
+
+/*
+ * Find a block with a given NLM cookie.
+ */
+static inline struct nlm_block *
+nlmsvc_find_block(struct nlm_cookie *cookie,  struct sockaddr_in *sin)
+{
+	struct nlm_block *block;
+
+	for (block = nlm_blocked; block; block = block->b_next) {
+		dprintk("cookie: head of blocked queue %p, block %p\n", 
+			nlm_blocked, block);
+		if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie)
+				&& nlm_cmp_addr(sin, &block->b_host->h_addr))
+			break;
+	}
+
+	return block;
+}
+
+/*
+ * Create a block and initialize it.
+ *
+ * Note: we explicitly set the cookie of the grant reply to that of
+ * the blocked lock request. The spec explicitly mentions that the client
+ * should _not_ rely on the callback containing the same cookie as the
+ * request, but (as I found out later) that's because some implementations
+ * do just this. Never mind the standards comittees, they support our
+ * logging industries.
+ */
+static inline struct nlm_block *
+nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
+				struct nlm_lock *lock, struct nlm_cookie *cookie)
+{
+	struct nlm_block	*block;
+	struct nlm_host		*host;
+	struct nlm_rqst		*call;
+
+	/* Create host handle for callback */
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (host == NULL)
+		return NULL;
+
+	/* Allocate memory for block, and initialize arguments */
+	if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL)))
+		goto failed;
+	memset(block, 0, sizeof(*block));
+	locks_init_lock(&block->b_call.a_args.lock.fl);
+	locks_init_lock(&block->b_call.a_res.lock.fl);
+
+	if (!nlmclnt_setgrantargs(&block->b_call, lock))
+		goto failed_free;
+
+	/* Set notifier function for VFS, and init args */
+	block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
+	block->b_call.a_args.cookie = *cookie;	/* see above */
+
+	dprintk("lockd: created block %p...\n", block);
+
+	/* Create and initialize the block */
+	block->b_daemon = rqstp->rq_server;
+	block->b_host   = host;
+	block->b_file   = file;
+
+	/* Add to file's list of blocks */
+	block->b_fnext  = file->f_blocks;
+	file->f_blocks  = block;
+
+	/* Set up RPC arguments for callback */
+	call = &block->b_call;
+	call->a_host    = host;
+	call->a_flags   = RPC_TASK_ASYNC;
+
+	return block;
+
+failed_free:
+	kfree(block);
+failed:
+	nlm_release_host(host);
+	return NULL;
+}
+
+/*
+ * Delete a block. If the lock was cancelled or the grant callback
+ * failed, unlock is set to 1.
+ * It is the caller's responsibility to check whether the file
+ * can be closed hereafter.
+ */
+static void
+nlmsvc_delete_block(struct nlm_block *block, int unlock)
+{
+	struct file_lock	*fl = &block->b_call.a_args.lock.fl;
+	struct nlm_file		*file = block->b_file;
+	struct nlm_block	**bp;
+
+	dprintk("lockd: deleting block %p...\n", block);
+
+	/* Remove block from list */
+	nlmsvc_remove_block(block);
+	if (fl->fl_next)
+		posix_unblock_lock(file->f_file, fl);
+	if (unlock) {
+		fl->fl_type = F_UNLCK;
+		posix_lock_file(file->f_file, fl);
+		block->b_granted = 0;
+	}
+
+	/* If the block is in the middle of a GRANT callback,
+	 * don't kill it yet. */
+	if (block->b_incall) {
+		nlmsvc_insert_block(block, NLM_NEVER);
+		block->b_done = 1;
+		return;
+	}
+
+	/* Remove block from file's list of blocks */
+	for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) {
+		if (*bp == block) {
+			*bp = block->b_fnext;
+			break;
+		}
+	}
+
+	if (block->b_host)
+		nlm_release_host(block->b_host);
+	nlmclnt_freegrantargs(&block->b_call);
+	kfree(block);
+}
+
+/*
+ * Loop over all blocks and perform the action specified.
+ * (NLM_ACT_CHECK handled by nlmsvc_inspect_file).
+ */
+int
+nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct nlm_block	*block, *next;
+
+	down(&file->f_sema);
+	for (block = file->f_blocks; block; block = next) {
+		next = block->b_fnext;
+		if (action == NLM_ACT_MARK)
+			block->b_host->h_inuse = 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			if (host == NULL || host == block->b_host)
+				nlmsvc_delete_block(block, 1);
+		}
+	}
+	up(&file->f_sema);
+	return 0;
+}
+
+/*
+ * Attempt to establish a lock, and if it can't be granted, block it
+ * if required.
+ */
+u32
+nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+			struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
+{
+	struct file_lock	*conflock;
+	struct nlm_block	*block;
+	int			error;
+
+	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_type, lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end,
+				wait);
+
+
+	/* Get existing block (in case client is busy-waiting) */
+	block = nlmsvc_lookup_block(file, lock, 0);
+
+	lock->fl.fl_flags |= FL_LOCKD;
+
+again:
+	/* Lock file against concurrent access */
+	down(&file->f_sema);
+
+	if (!(conflock = posix_test_lock(file->f_file, &lock->fl))) {
+		error = posix_lock_file(file->f_file, &lock->fl);
+
+		if (block)
+			nlmsvc_delete_block(block, 0);
+		up(&file->f_sema);
+
+		dprintk("lockd: posix_lock_file returned %d\n", -error);
+		switch(-error) {
+		case 0:
+			return nlm_granted;
+		case EDEADLK:
+			return nlm_deadlock;
+		case EAGAIN:
+			return nlm_lck_denied;
+		default:			/* includes ENOLCK */
+			return nlm_lck_denied_nolocks;
+		}
+	}
+
+	if (!wait) {
+		up(&file->f_sema);
+		return nlm_lck_denied;
+	}
+
+	if (posix_locks_deadlock(&lock->fl, conflock)) {
+		up(&file->f_sema);
+		return nlm_deadlock;
+	}
+
+	/* If we don't have a block, create and initialize it. Then
+	 * retry because we may have slept in kmalloc. */
+	/* We have to release f_sema as nlmsvc_create_block may try to
+	 * to claim it while doing host garbage collection */
+	if (block == NULL) {
+		up(&file->f_sema);
+		dprintk("lockd: blocking on this lock (allocating).\n");
+		if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie)))
+			return nlm_lck_denied_nolocks;
+		goto again;
+	}
+
+	/* Append to list of blocked */
+	nlmsvc_insert_block(block, NLM_NEVER);
+
+	if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) {
+		/* Now add block to block list of the conflicting lock
+		   if we haven't done so. */
+		dprintk("lockd: blocking on this lock.\n");
+		posix_block_lock(conflock, &block->b_call.a_args.lock.fl);
+	}
+
+	up(&file->f_sema);
+	return nlm_lck_blocked;
+}
+
+/*
+ * Test for presence of a conflicting lock.
+ */
+u32
+nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
+				       struct nlm_lock *conflock)
+{
+	struct file_lock	*fl;
+
+	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_type,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	if ((fl = posix_test_lock(file->f_file, &lock->fl)) != NULL) {
+		dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
+				fl->fl_type, (long long)fl->fl_start,
+				(long long)fl->fl_end);
+		conflock->caller = "somehost";	/* FIXME */
+		conflock->oh.len = 0;		/* don't return OH info */
+		conflock->fl = *fl;
+		return nlm_lck_denied;
+	}
+
+	return nlm_granted;
+}
+
+/*
+ * Remove a lock.
+ * This implies a CANCEL call: We send a GRANT_MSG, the client replies
+ * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
+ * afterwards. In this case the block will still be there, and hence
+ * must be removed.
+ */
+u32
+nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+{
+	int	error;
+
+	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	/* First, cancel any lock that might be there */
+	nlmsvc_cancel_blocked(file, lock);
+
+	lock->fl.fl_type = F_UNLCK;
+	error = posix_lock_file(file->f_file, &lock->fl);
+
+	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
+}
+
+/*
+ * Cancel a previously blocked request.
+ *
+ * A cancel request always overrides any grant that may currently
+ * be in progress.
+ * The calling procedure must check whether the file can be closed.
+ */
+u32
+nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+{
+	struct nlm_block	*block;
+
+	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
+				file->f_file->f_dentry->d_inode->i_sb->s_id,
+				file->f_file->f_dentry->d_inode->i_ino,
+				lock->fl.fl_pid,
+				(long long)lock->fl.fl_start,
+				(long long)lock->fl.fl_end);
+
+	down(&file->f_sema);
+	if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL)
+		nlmsvc_delete_block(block, 1);
+	up(&file->f_sema);
+	return nlm_granted;
+}
+
+/*
+ * Unblock a blocked lock request. This is a callback invoked from the
+ * VFS layer when a lock on which we blocked is removed.
+ *
+ * This function doesn't grant the blocked lock instantly, but rather moves
+ * the block to the head of nlm_blocked where it can be picked up by lockd.
+ */
+static void
+nlmsvc_notify_blocked(struct file_lock *fl)
+{
+	struct nlm_block	**bp, *block;
+
+	dprintk("lockd: VFS unblock notification for block %p\n", fl);
+	for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) {
+		if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) {
+			nlmsvc_insert_block(block, 0);
+			svc_wake_up(block->b_daemon);
+			return;
+		}
+	}
+
+	printk(KERN_WARNING "lockd: notification for unknown block!\n");
+}
+
+static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+{
+	return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
+}
+
+struct lock_manager_operations nlmsvc_lock_operations = {
+	.fl_compare_owner = nlmsvc_same_owner,
+	.fl_notify = nlmsvc_notify_blocked,
+};
+
+/*
+ * Try to claim a lock that was previously blocked.
+ *
+ * Note that we use both the RPC_GRANTED_MSG call _and_ an async
+ * RPC thread when notifying the client. This seems like overkill...
+ * Here's why:
+ *  -	we don't want to use a synchronous RPC thread, otherwise
+ *	we might find ourselves hanging on a dead portmapper.
+ *  -	Some lockd implementations (e.g. HP) don't react to
+ *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
+ */
+static void
+nlmsvc_grant_blocked(struct nlm_block *block)
+{
+	struct nlm_file		*file = block->b_file;
+	struct nlm_lock		*lock = &block->b_call.a_args.lock;
+	struct file_lock	*conflock;
+	int			error;
+
+	dprintk("lockd: grant blocked lock %p\n", block);
+
+	/* First thing is lock the file */
+	down(&file->f_sema);
+
+	/* Unlink block request from list */
+	nlmsvc_remove_block(block);
+
+	/* If b_granted is true this means we've been here before.
+	 * Just retry the grant callback, possibly refreshing the RPC
+	 * binding */
+	if (block->b_granted) {
+		nlm_rebind_host(block->b_host);
+		goto callback;
+	}
+
+	/* Try the lock operation again */
+	if ((conflock = posix_test_lock(file->f_file, &lock->fl)) != NULL) {
+		/* Bummer, we blocked again */
+		dprintk("lockd: lock still blocked\n");
+		nlmsvc_insert_block(block, NLM_NEVER);
+		posix_block_lock(conflock, &lock->fl);
+		up(&file->f_sema);
+		return;
+	}
+
+	/* Alright, no conflicting lock. Now lock it for real. If the
+	 * following yields an error, this is most probably due to low
+	 * memory. Retry the lock in a few seconds.
+	 */
+	if ((error = posix_lock_file(file->f_file, &lock->fl)) < 0) {
+		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
+				-error, __FUNCTION__);
+		nlmsvc_insert_block(block, 10 * HZ);
+		up(&file->f_sema);
+		return;
+	}
+
+callback:
+	/* Lock was granted by VFS. */
+	dprintk("lockd: GRANTing blocked lock.\n");
+	block->b_granted = 1;
+	block->b_incall  = 1;
+
+	/* Schedule next grant callback in 30 seconds */
+	nlmsvc_insert_block(block, 30 * HZ);
+
+	/* Call the client */
+	nlm_get_host(block->b_call.a_host);
+	if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
+						nlmsvc_grant_callback) < 0)
+		nlm_release_host(block->b_call.a_host);
+	up(&file->f_sema);
+}
+
+/*
+ * This is the callback from the RPC layer when the NLM_GRANTED_MSG
+ * RPC call has succeeded or timed out.
+ * Like all RPC callbacks, it is invoked by the rpciod process, so it
+ * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
+ * chain once more in order to have it removed by lockd itself (which can
+ * then sleep on the file semaphore without disrupting e.g. the nfs client).
+ */
+static void
+nlmsvc_grant_callback(struct rpc_task *task)
+{
+	struct nlm_rqst		*call = (struct nlm_rqst *) task->tk_calldata;
+	struct nlm_block	*block;
+	unsigned long		timeout;
+	struct sockaddr_in	*peer_addr = RPC_PEERADDR(task->tk_client);
+
+	dprintk("lockd: GRANT_MSG RPC callback\n");
+	dprintk("callback: looking for cookie %s, host (%u.%u.%u.%u)\n",
+		nlmdbg_cookie2a(&call->a_args.cookie),
+		NIPQUAD(peer_addr->sin_addr.s_addr));
+	if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) {
+		dprintk("lockd: no block for cookie %s, host (%u.%u.%u.%u)\n",
+			nlmdbg_cookie2a(&call->a_args.cookie),
+			NIPQUAD(peer_addr->sin_addr.s_addr));
+		return;
+	}
+
+	/* Technically, we should down the file semaphore here. Since we
+	 * move the block towards the head of the queue only, no harm
+	 * can be done, though. */
+	if (task->tk_status < 0) {
+		/* RPC error: Re-insert for retransmission */
+		timeout = 10 * HZ;
+	} else if (block->b_done) {
+		/* Block already removed, kill it for real */
+		timeout = 0;
+	} else {
+		/* Call was successful, now wait for client callback */
+		timeout = 60 * HZ;
+	}
+	nlmsvc_insert_block(block, timeout);
+	svc_wake_up(block->b_daemon);
+	block->b_incall = 0;
+
+	nlm_release_host(call->a_host);
+}
+
+/*
+ * We received a GRANT_RES callback. Try to find the corresponding
+ * block.
+ */
+void
+nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status)
+{
+	struct nlm_block	*block;
+	struct nlm_file		*file;
+
+	dprintk("grant_reply: looking for cookie %x, host (%08x), s=%d \n", 
+		*(unsigned int *)(cookie->data), 
+		ntohl(rqstp->rq_addr.sin_addr.s_addr), status);
+	if (!(block = nlmsvc_find_block(cookie, &rqstp->rq_addr)))
+		return;
+	file = block->b_file;
+
+	file->f_count++;
+	down(&file->f_sema);
+	if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) {
+		if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
+			/* Try again in a couple of seconds */
+			nlmsvc_insert_block(block, 10 * HZ);
+			block = NULL;
+		} else {
+			/* Lock is now held by client, or has been rejected.
+			 * In both cases, the block should be removed. */
+			up(&file->f_sema);
+			if (status == NLM_LCK_GRANTED)
+				nlmsvc_delete_block(block, 0);
+			else
+				nlmsvc_delete_block(block, 1);
+		}
+	}
+	if (!block)
+		up(&file->f_sema);
+	nlm_release_file(file);
+}
+
+/*
+ * Retry all blocked locks that have been notified. This is where lockd
+ * picks up locks that can be granted, or grant notifications that must
+ * be retransmitted.
+ */
+unsigned long
+nlmsvc_retry_blocked(void)
+{
+	struct nlm_block	*block;
+
+	dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+			nlm_blocked,
+			nlm_blocked? nlm_blocked->b_when : 0);
+	while ((block = nlm_blocked) != 0) {
+		if (block->b_when == NLM_NEVER)
+			break;
+	        if (time_after(block->b_when,jiffies))
+			break;
+		dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n",
+			block, block->b_when, block->b_done);
+		if (block->b_done)
+			nlmsvc_delete_block(block, 0);
+		else
+			nlmsvc_grant_blocked(block);
+	}
+
+	if ((block = nlm_blocked) && block->b_when != NLM_NEVER)
+		return (block->b_when - jiffies);
+
+	return MAX_SCHEDULE_TIMEOUT;
+}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
new file mode 100644
index 0000000..757e344
--- /dev/null
+++ b/fs/lockd/svcproc.c
@@ -0,0 +1,606 @@
+/*
+ * linux/fs/lockd/svcproc.c
+ *
+ * Lockd server procedures. We don't implement the NLM_*_RES 
+ * procedures because we don't use the async procedures.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+
+#define NLMDBG_FACILITY		NLMDBG_CLIENT
+
+static u32	nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
+static void	nlmsvc_callback_exit(struct rpc_task *);
+
+#ifdef CONFIG_LOCKD_V4
+static u32
+cast_to_nlm(u32 status, u32 vers)
+{
+	/* Note: status is assumed to be in network byte order !!! */
+	if (vers != 4){
+		switch (status) {
+		case nlm_granted:
+		case nlm_lck_denied:
+		case nlm_lck_denied_nolocks:
+		case nlm_lck_blocked:
+		case nlm_lck_denied_grace_period:
+			break;
+		case nlm4_deadlock:
+			status = nlm_lck_denied;
+			break;
+		default:
+			status = nlm_lck_denied_nolocks;
+		}
+	}
+
+	return (status);
+}
+#define	cast_status(status) (cast_to_nlm(status, rqstp->rq_vers))
+#else
+#define cast_status(status) (status)
+#endif
+
+/*
+ * Obtain client and file from arguments
+ */
+static u32
+nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+			struct nlm_host **hostp, struct nlm_file **filp)
+{
+	struct nlm_host		*host = NULL;
+	struct nlm_file		*file = NULL;
+	struct nlm_lock		*lock = &argp->lock;
+	u32			error;
+
+	/* nfsd callbacks must have been installed for this procedure */
+	if (!nlmsvc_ops)
+		return nlm_lck_denied_nolocks;
+
+	/* Obtain host handle */
+	if (!(host = nlmsvc_lookup_host(rqstp))
+	 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
+		goto no_locks;
+	*hostp = host;
+
+	/* Obtain file pointer. Not used by FREE_ALL call. */
+	if (filp != NULL) {
+		if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+			goto no_locks;
+		*filp = file;
+
+		/* Set up the missing parts of the file_lock structure */
+		lock->fl.fl_file  = file->f_file;
+		lock->fl.fl_owner = (fl_owner_t) host;
+		lock->fl.fl_lmops = &nlmsvc_lock_operations;
+	}
+
+	return 0;
+
+no_locks:
+	if (host)
+		nlm_release_host(host);
+	return nlm_lck_denied_nolocks;
+}
+
+/*
+ * NULL: Test for presence of service
+ */
+static int
+nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	dprintk("lockd: NULL          called\n");
+	return rpc_success;
+}
+
+/*
+ * TEST: Check for conflicting lock
+ */
+static int
+nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: TEST          called\n");
+	resp->cookie = argp->cookie;
+
+	/* Don't accept test requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now check for conflicting locks */
+	resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock));
+
+	dprintk("lockd: TEST          status %d vers %d\n",
+		ntohl(resp->status), rqstp->rq_vers);
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				         struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: LOCK          called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+#if 0
+	/* If supplied state doesn't match current state, we assume it's
+	 * an old request that time-warped somehow. Any error return would
+	 * do in this case because it's irrelevant anyway.
+	 *
+	 * NB: We don't retrieve the remote host's state yet.
+	 */
+	if (host->h_nsmstate && host->h_nsmstate != argp->state) {
+		resp->status = nlm_lck_denied_nolocks;
+	} else
+#endif
+
+	/* Now try to lock the file */
+	resp->status = cast_status(nlmsvc_lock(rqstp, file, &argp->lock,
+					       argp->block, &argp->cookie));
+
+	dprintk("lockd: LOCK          status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+static int
+nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: CANCEL        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Try to cancel request. */
+	resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
+
+	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNLOCK: release a lock
+ */
+static int
+nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				           struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNLOCK        called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to remove the lock */
+	resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
+
+	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * GRANTED: A server calls us to tell that a process' lock request
+ * was granted
+ */
+static int
+nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	resp->cookie = argp->cookie;
+
+	dprintk("lockd: GRANTED       called\n");
+	resp->status = nlmclnt_grant(&argp->lock);
+	dprintk("lockd: GRANTED       status %d\n", ntohl(resp->status));
+	return rpc_success;
+}
+
+/*
+ * `Async' versions of the above service routines. They aren't really,
+ * because we send the callback before the reply proper. I hope this
+ * doesn't break any clients.
+ */
+static int
+nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: TEST_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void	     *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: LOCK_MSG      called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+					       void	       *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: CANCEL_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                               void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: UNLOCK_MSG    called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
+	return stat;
+}
+
+static int
+nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+                                                void            *resp)
+{
+	struct nlm_res	res;
+	u32		stat;
+
+	dprintk("lockd: GRANTED_MSG   called\n");
+	memset(&res, 0, sizeof(res));
+
+	if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0)
+		stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
+	return stat;
+}
+
+/*
+ * SHARE: create a DOS share or alter existing share.
+ */
+static int
+nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+				          struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: SHARE         called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept new lock requests during grace period */
+	if (nlmsvc_grace_period && !argp->reclaim) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to create the share */
+	resp->status = cast_status(nlmsvc_share_file(host, file, argp));
+
+	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * UNSHARE: Release a DOS share.
+ */
+static int
+nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_file	*file;
+
+	dprintk("lockd: UNSHARE       called\n");
+
+	resp->cookie = argp->cookie;
+
+	/* Don't accept requests during grace period */
+	if (nlmsvc_grace_period) {
+		resp->status = nlm_lck_denied_grace_period;
+		return rpc_success;
+	}
+
+	/* Obtain client and file */
+	if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+		return rpc_success;
+
+	/* Now try to unshare the file */
+	resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
+
+	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
+	nlm_release_host(host);
+	nlm_release_file(file);
+	return rpc_success;
+}
+
+/*
+ * NM_LOCK: Create an unmonitored lock
+ */
+static int
+nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+				            struct nlm_res  *resp)
+{
+	dprintk("lockd: NM_LOCK       called\n");
+
+	argp->monitor = 0;		/* just clean the monitor flag */
+	return nlmsvc_proc_lock(rqstp, argp, resp);
+}
+
+/*
+ * FREE_ALL: Release all locks and shares held by client
+ */
+static int
+nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+					     void            *resp)
+{
+	struct nlm_host	*host;
+
+	/* Obtain client */
+	if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL))
+		return rpc_success;
+
+	nlmsvc_free_host_resources(host);
+	nlm_release_host(host);
+	return rpc_success;
+}
+
+/*
+ * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+ */
+static int
+nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+					      void	        *resp)
+{
+	struct sockaddr_in	saddr = rqstp->rq_addr;
+	int			vers = argp->vers;
+	int			prot = argp->proto >> 1;
+	struct nlm_host		*host;
+
+	dprintk("lockd: SM_NOTIFY     called\n");
+	if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
+	 || ntohs(saddr.sin_port) >= 1024) {
+		printk(KERN_WARNING
+			"lockd: rejected NSM callback from %08x:%d\n",
+			ntohl(rqstp->rq_addr.sin_addr.s_addr),
+			ntohs(rqstp->rq_addr.sin_port));
+		return rpc_system_err;
+	}
+
+	/* Obtain the host pointer for this NFS server and try to
+	 * reclaim all locks we hold on this server.
+	 */
+	saddr.sin_addr.s_addr = argp->addr;
+	if ((argp->proto & 1)==0) {
+		if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
+			nlmclnt_recovery(host, argp->state);
+			nlm_release_host(host);
+		}
+	} else {
+		/* If we run on an NFS server, delete all locks held by the client */
+		if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
+			nlmsvc_free_host_resources(host);
+			nlm_release_host(host);
+		}
+	}
+
+	return rpc_success;
+}
+
+/*
+ * client sent a GRANTED_RES, let's remove the associated block
+ */
+static int
+nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+                                                void            *resp)
+{
+	if (!nlmsvc_ops)
+		return rpc_success;
+
+	dprintk("lockd: GRANTED_RES   called\n");
+
+	nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
+	return rpc_success;
+}
+
+/*
+ * This is the generic lockd callback for async RPC calls
+ */
+static u32
+nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
+{
+	struct nlm_host	*host;
+	struct nlm_rqst	*call;
+
+	if (!(call = nlmclnt_alloc_call()))
+		return rpc_system_err;
+
+	host = nlmclnt_lookup_host(&rqstp->rq_addr,
+				rqstp->rq_prot, rqstp->rq_vers);
+	if (!host) {
+		kfree(call);
+		return rpc_system_err;
+	}
+
+	call->a_flags = RPC_TASK_ASYNC;
+	call->a_host  = host;
+	memcpy(&call->a_args, resp, sizeof(*resp));
+
+	if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
+		goto error;
+
+	return rpc_success;
+ error:
+	nlm_release_host(host);
+	kfree(call);
+	return rpc_system_err;
+}
+
+static void
+nlmsvc_callback_exit(struct rpc_task *task)
+{
+	struct nlm_rqst	*call = (struct nlm_rqst *) task->tk_calldata;
+
+	if (task->tk_status < 0) {
+		dprintk("lockd: %4d callback failed (errno = %d)\n",
+					task->tk_pid, -task->tk_status);
+	}
+	nlm_release_host(call->a_host);
+	kfree(call);
+}
+
+/*
+ * NLM Server procedures.
+ */
+
+#define nlmsvc_encode_norep	nlmsvc_encode_void
+#define nlmsvc_decode_norep	nlmsvc_decode_void
+#define nlmsvc_decode_testres	nlmsvc_decode_void
+#define nlmsvc_decode_lockres	nlmsvc_decode_void
+#define nlmsvc_decode_unlockres	nlmsvc_decode_void
+#define nlmsvc_decode_cancelres	nlmsvc_decode_void
+#define nlmsvc_decode_grantedres	nlmsvc_decode_void
+
+#define nlmsvc_proc_none	nlmsvc_proc_null
+#define nlmsvc_proc_test_res	nlmsvc_proc_null
+#define nlmsvc_proc_lock_res	nlmsvc_proc_null
+#define nlmsvc_proc_cancel_res	nlmsvc_proc_null
+#define nlmsvc_proc_unlock_res	nlmsvc_proc_null
+
+struct nlm_void			{ int dummy; };
+
+#define PROC(name, xargt, xrest, argt, rest, respsize)	\
+ { .pc_func	= (svc_procfunc) nlmsvc_proc_##name,	\
+   .pc_decode	= (kxdrproc_t) nlmsvc_decode_##xargt,	\
+   .pc_encode	= (kxdrproc_t) nlmsvc_encode_##xrest,	\
+   .pc_release	= NULL,					\
+   .pc_argsize	= sizeof(struct nlm_##argt),		\
+   .pc_ressize	= sizeof(struct nlm_##rest),		\
+   .pc_xdrressize = respsize,				\
+ }
+
+#define	Ck	(1+XDR_QUADLEN(NLM_MAXCOOKIELEN))	/* cookie */
+#define	St	1				/* status */
+#define	No	(1+1024/4)			/* Net Obj */
+#define	Rg	2				/* range - offset + size */
+
+struct svc_procedure		nlmsvc_procedures[] = {
+  PROC(null,		void,		void,		void,	void, 1),
+  PROC(test,		testargs,	testres,	args,	res, Ck+St+2+No+Rg),
+  PROC(lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(cancel,		cancargs,	res,		args,	res, Ck+St),
+  PROC(unlock,		unlockargs,	res,		args,	res, Ck+St),
+  PROC(granted,		testargs,	res,		args,	res, Ck+St),
+  PROC(test_msg,	testargs,	norep,		args,	void, 1),
+  PROC(lock_msg,	lockargs,	norep,		args,	void, 1),
+  PROC(cancel_msg,	cancargs,	norep,		args,	void, 1),
+  PROC(unlock_msg,	unlockargs,	norep,		args,	void, 1),
+  PROC(granted_msg,	testargs,	norep,		args,	void, 1),
+  PROC(test_res,	testres,	norep,		res,	void, 1),
+  PROC(lock_res,	lockres,	norep,		res,	void, 1),
+  PROC(cancel_res,	cancelres,	norep,		res,	void, 1),
+  PROC(unlock_res,	unlockres,	norep,		res,	void, 1),
+  PROC(granted_res,	res,		norep,		res,	void, 1),
+  /* statd callback */
+  PROC(sm_notify,	reboot,		void,		reboot,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(none,		void,		void,		void,	void, 1),
+  PROC(share,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(unshare,		shareargs,	shareres,	args,	res, Ck+St+1),
+  PROC(nm_lock,		lockargs,	res,		args,	res, Ck+St),
+  PROC(free_all,	notify,		void,		args,	void, 0),
+
+};
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
new file mode 100644
index 0000000..4943fb7
--- /dev/null
+++ b/fs/lockd/svcshare.c
@@ -0,0 +1,111 @@
+/*
+ * linux/fs/lockd/svcshare.c
+ *
+ * Management of DOS shares.
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/time.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+
+static inline int
+nlm_cmp_owner(struct nlm_share *share, struct xdr_netobj *oh)
+{
+	return share->s_owner.len == oh->len
+	    && !memcmp(share->s_owner.data, oh->data, oh->len);
+}
+
+u32
+nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
+			struct nlm_args *argp)
+{
+	struct nlm_share	*share;
+	struct xdr_netobj	*oh = &argp->lock.oh;
+	u8			*ohdata;
+
+	for (share = file->f_shares; share; share = share->s_next) {
+		if (share->s_host == host && nlm_cmp_owner(share, oh))
+			goto update;
+		if ((argp->fsm_access & share->s_mode)
+		 || (argp->fsm_mode   & share->s_access ))
+			return nlm_lck_denied;
+	}
+
+	share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len,
+						GFP_KERNEL);
+	if (share == NULL)
+		return nlm_lck_denied_nolocks;
+
+	/* Copy owner handle */
+	ohdata = (u8 *) (share + 1);
+	memcpy(ohdata, oh->data, oh->len);
+
+	share->s_file	    = file;
+	share->s_host       = host;
+	share->s_owner.data = ohdata;
+	share->s_owner.len  = oh->len;
+	share->s_next       = file->f_shares;
+	file->f_shares      = share;
+
+update:
+	share->s_access = argp->fsm_access;
+	share->s_mode   = argp->fsm_mode;
+	return nlm_granted;
+}
+
+/*
+ * Delete a share.
+ */
+u32
+nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
+			struct nlm_args *argp)
+{
+	struct nlm_share	*share, **shpp;
+	struct xdr_netobj	*oh = &argp->lock.oh;
+
+	for (shpp = &file->f_shares; (share = *shpp) != 0; shpp = &share->s_next) {
+		if (share->s_host == host && nlm_cmp_owner(share, oh)) {
+			*shpp = share->s_next;
+			kfree(share);
+			return nlm_granted;
+		}
+	}
+
+	/* X/Open spec says return success even if there was no
+	 * corresponding share. */
+	return nlm_granted;
+}
+
+/*
+ * Traverse all shares for a given file (and host).
+ * NLM_ACT_CHECK is handled by nlmsvc_inspect_file.
+ */
+int
+nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct nlm_share	*share, **shpp;
+
+	shpp = &file->f_shares;
+	while ((share = *shpp) !=  NULL) {
+		if (action == NLM_ACT_MARK)
+			share->s_host->h_inuse = 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			if (host == NULL || host == share->s_host) {
+				*shpp = share->s_next;
+				kfree(share);
+				continue;
+			}
+		}
+		shpp = &share->s_next;
+	}
+
+	return 0;
+}
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
new file mode 100644
index 0000000..de75363
--- /dev/null
+++ b/fs/lockd/svcsubs.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/lockd/svcsubs.c
+ *
+ * Various support routines for the NLM server.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsfh.h>
+#include <linux/nfsd/export.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_SVCSUBS
+
+
+/*
+ * Global file hash table
+ */
+#define FILE_HASH_BITS		5
+#define FILE_NRHASH		(1<<FILE_HASH_BITS)
+static struct nlm_file *	nlm_files[FILE_NRHASH];
+static DECLARE_MUTEX(nlm_file_sema);
+
+static inline unsigned int file_hash(struct nfs_fh *f)
+{
+	unsigned int tmp=0;
+	int i;
+	for (i=0; i<NFS2_FHSIZE;i++)
+		tmp += f->data[i];
+	return tmp & (FILE_NRHASH - 1);
+}
+
+/*
+ * Lookup file info. If it doesn't exist, create a file info struct
+ * and open a (VFS) file for the given inode.
+ *
+ * FIXME:
+ * Note that we open the file O_RDONLY even when creating write locks.
+ * This is not quite right, but for now, we assume the client performs
+ * the proper R/W checking.
+ */
+u32
+nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
+					struct nfs_fh *f)
+{
+	struct nlm_file	*file;
+	unsigned int	hash;
+	u32		nfserr;
+	u32		*fhp = (u32*)f->data;
+
+	dprintk("lockd: nlm_file_lookup(%08x %08x %08x %08x %08x %08x)\n",
+		fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
+
+
+	hash = file_hash(f);
+
+	/* Lock file table */
+	down(&nlm_file_sema);
+
+	for (file = nlm_files[hash]; file; file = file->f_next)
+		if (!nfs_compare_fh(&file->f_handle, f))
+			goto found;
+
+	dprintk("lockd: creating file for (%08x %08x %08x %08x %08x %08x)\n",
+		fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
+
+	nfserr = nlm_lck_denied_nolocks;
+	file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL);
+	if (!file)
+		goto out_unlock;
+
+	memset(file, 0, sizeof(*file));
+	memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
+	file->f_hash = hash;
+	init_MUTEX(&file->f_sema);
+
+	/* Open the file. Note that this must not sleep for too long, else
+	 * we would lock up lockd:-) So no NFS re-exports, folks.
+	 *
+	 * We have to make sure we have the right credential to open
+	 * the file.
+	 */
+	if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) {
+		dprintk("lockd: open failed (nfserr %d)\n", ntohl(nfserr));
+		goto out_free;
+	}
+
+	file->f_next = nlm_files[hash];
+	nlm_files[hash] = file;
+
+found:
+	dprintk("lockd: found file %p (count %d)\n", file, file->f_count);
+	*result = file;
+	file->f_count++;
+	nfserr = 0;
+
+out_unlock:
+	up(&nlm_file_sema);
+	return nfserr;
+
+out_free:
+	kfree(file);
+#ifdef CONFIG_LOCKD_V4
+	if (nfserr == 1)
+		nfserr = nlm4_stale_fh;
+	else
+#endif
+	nfserr = nlm_lck_denied;
+	goto out_unlock;
+}
+
+/*
+ * Delete a file after having released all locks, blocks and shares
+ */
+static inline void
+nlm_delete_file(struct nlm_file *file)
+{
+	struct inode *inode = file->f_file->f_dentry->d_inode;
+	struct nlm_file	**fp, *f;
+
+	dprintk("lockd: closing file %s/%ld\n",
+		inode->i_sb->s_id, inode->i_ino);
+	fp = nlm_files + file->f_hash;
+	while ((f = *fp) != NULL) {
+		if (f == file) {
+			*fp = file->f_next;
+			nlmsvc_ops->fclose(file->f_file);
+			kfree(file);
+			return;
+		}
+		fp = &f->f_next;
+	}
+
+	printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
+}
+
+/*
+ * Loop over all locks on the given file and perform the specified
+ * action.
+ */
+static int
+nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	struct inode	 *inode = nlmsvc_file_inode(file);
+	struct file_lock *fl;
+	struct nlm_host	 *lockhost;
+
+again:
+	file->f_locks = 0;
+	for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+		if (!(fl->fl_flags & FL_LOCKD))
+			continue;
+
+		/* update current lock count */
+		file->f_locks++;
+		lockhost = (struct nlm_host *) fl->fl_owner;
+		if (action == NLM_ACT_MARK)
+			lockhost->h_inuse = 1;
+		else if (action == NLM_ACT_CHECK)
+			return 1;
+		else if (action == NLM_ACT_UNLOCK) {
+			struct file_lock lock = *fl;
+
+			if (host && lockhost != host)
+				continue;
+
+			lock.fl_type  = F_UNLCK;
+			lock.fl_start = 0;
+			lock.fl_end   = OFFSET_MAX;
+			if (posix_lock_file(file->f_file, &lock) < 0) {
+				printk("lockd: unlock failure in %s:%d\n",
+						__FILE__, __LINE__);
+				return 1;
+			}
+			goto again;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Operate on a single file
+ */
+static inline int
+nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action)
+{
+	if (action == NLM_ACT_CHECK) {
+		/* Fast path for mark and sweep garbage collection */
+		if (file->f_count || file->f_blocks || file->f_shares)
+			return 1;
+	} else {
+		if (nlmsvc_traverse_blocks(host, file, action)
+		 || nlmsvc_traverse_shares(host, file, action))
+			return 1;
+	}
+	return nlm_traverse_locks(host, file, action);
+}
+
+/*
+ * Loop over all files in the file table.
+ */
+static int
+nlm_traverse_files(struct nlm_host *host, int action)
+{
+	struct nlm_file	*file, **fp;
+	int		i;
+
+	down(&nlm_file_sema);
+	for (i = 0; i < FILE_NRHASH; i++) {
+		fp = nlm_files + i;
+		while ((file = *fp) != NULL) {
+			/* Traverse locks, blocks and shares of this file
+			 * and update file->f_locks count */
+			if (nlm_inspect_file(host, file, action)) {
+				up(&nlm_file_sema);
+				return 1;
+			}
+
+			/* No more references to this file. Let go of it. */
+			if (!file->f_blocks && !file->f_locks
+			 && !file->f_shares && !file->f_count) {
+				*fp = file->f_next;
+				nlmsvc_ops->fclose(file->f_file);
+				kfree(file);
+			} else {
+				fp = &file->f_next;
+			}
+		}
+	}
+	up(&nlm_file_sema);
+	return 0;
+}
+
+/*
+ * Release file. If there are no more remote locks on this file,
+ * close it and free the handle.
+ *
+ * Note that we can't do proper reference counting without major
+ * contortions because the code in fs/locks.c creates, deletes and
+ * splits locks without notification. Our only way is to walk the
+ * entire lock list each time we remove a lock.
+ */
+void
+nlm_release_file(struct nlm_file *file)
+{
+	dprintk("lockd: nlm_release_file(%p, ct = %d)\n",
+				file, file->f_count);
+
+	/* Lock file table */
+	down(&nlm_file_sema);
+
+	/* If there are no more locks etc, delete the file */
+	if(--file->f_count == 0) {
+		if(!nlm_inspect_file(NULL, file, NLM_ACT_CHECK))
+			nlm_delete_file(file);
+	}
+
+	up(&nlm_file_sema);
+}
+
+/*
+ * Mark all hosts that still hold resources
+ */
+void
+nlmsvc_mark_resources(void)
+{
+	dprintk("lockd: nlmsvc_mark_resources\n");
+
+	nlm_traverse_files(NULL, NLM_ACT_MARK);
+}
+
+/*
+ * Release all resources held by the given client
+ */
+void
+nlmsvc_free_host_resources(struct nlm_host *host)
+{
+	dprintk("lockd: nlmsvc_free_host_resources\n");
+
+	if (nlm_traverse_files(host, NLM_ACT_UNLOCK))
+		printk(KERN_WARNING
+			"lockd: couldn't remove all locks held by %s",
+			host->h_name);
+}
+
+/*
+ * delete all hosts structs for clients
+ */
+void
+nlmsvc_invalidate_all(void)
+{
+	struct nlm_host *host;
+	while ((host = nlm_find_client()) != NULL) {
+		nlmsvc_free_host_resources(host);
+		host->h_expires = 0;
+		host->h_killed = 1;
+		nlm_release_host(host);
+	}
+}
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
new file mode 100644
index 0000000..f01e9c0
--- /dev/null
+++ b/fs/lockd/xdr.c
@@ -0,0 +1,635 @@
+/*
+ * linux/fs/lockd/xdr.c
+ *
+ * XDR support for lockd and the lock client.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/nfs.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+
+static inline loff_t
+s32_to_loff_t(__s32 offset)
+{
+	return (loff_t)offset;
+}
+
+static inline __s32
+loff_t_to_s32(loff_t offset)
+{
+	__s32 res;
+	if (offset >= NLM_OFFSET_MAX)
+		res = NLM_OFFSET_MAX;
+	else if (offset <= -NLM_OFFSET_MAX)
+		res = -NLM_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+/*
+ * XDR functions for basic NLM types
+ */
+static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	unsigned int	len;
+
+	len = ntohl(*p++);
+	
+	if(len==0)
+	{
+		c->len=4;
+		memset(c->data, 0, 4);	/* hockeypux brain damage */
+	}
+	else if(len<=NLM_MAXCOOKIELEN)
+	{
+		c->len=len;
+		memcpy(c->data, p, len);
+		p+=XDR_QUADLEN(len);
+	}
+	else 
+	{
+		printk(KERN_NOTICE
+			"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
+		return NULL;
+	}
+	return p;
+}
+
+static inline u32 *
+nlm_encode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	*p++ = htonl(c->len);
+	memcpy(p, c->data, c->len);
+	p+=XDR_QUADLEN(c->len);
+	return p;
+}
+
+static inline u32 *
+nlm_decode_fh(u32 *p, struct nfs_fh *f)
+{
+	unsigned int	len;
+
+	if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
+		printk(KERN_NOTICE
+			"lockd: bad fhandle size %d (should be %d)\n",
+			len, NFS2_FHSIZE);
+		return NULL;
+	}
+	f->size = NFS2_FHSIZE;
+	memset(f->data, 0, sizeof(f->data));
+	memcpy(f->data, p, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+static inline u32 *
+nlm_encode_fh(u32 *p, struct nfs_fh *f)
+{
+	*p++ = htonl(NFS2_FHSIZE);
+	memcpy(p, f->data, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+/*
+ * Encode and decode owner handle
+ */
+static inline u32 *
+nlm_decode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_decode_netobj(p, oh);
+}
+
+static inline u32 *
+nlm_encode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_encode_netobj(p, oh);
+}
+
+static inline u32 *
+nlm_decode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	s32			start, len, end;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len,
+					    NLM_MAXSTRLEN))
+	 || !(p = nlm_decode_fh(p, &lock->fh))
+	 || !(p = nlm_decode_oh(p, &lock->oh)))
+		return NULL;
+
+	locks_init_lock(fl);
+	fl->fl_owner = current->files;
+	fl->fl_pid   = ntohl(*p++);
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = F_RDLCK;		/* as good as anything else */
+	start = ntohl(*p++);
+	len = ntohl(*p++);
+	end = start + len - 1;
+
+	fl->fl_start = s32_to_loff_t(start);
+
+	if (len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = s32_to_loff_t(end);
+	return p;
+}
+
+/*
+ * Encode a lock as part of an NLM call
+ */
+static u32 *
+nlm_encode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s32			start, len;
+
+	if (!(p = xdr_encode_string(p, lock->caller))
+	 || !(p = nlm_encode_fh(p, &lock->fh))
+	 || !(p = nlm_encode_oh(p, &lock->oh)))
+		return NULL;
+
+	if (fl->fl_start > NLM_OFFSET_MAX
+	 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
+		return NULL;
+
+	start = loff_t_to_s32(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		len = 0;
+	else
+		len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+
+	*p++ = htonl(fl->fl_pid);
+	*p++ = htonl(start);
+	*p++ = htonl(len);
+
+	return p;
+}
+
+/*
+ * Encode result of a TEST/TEST_MSG call
+ */
+static u32 *
+nlm_encode_testres(u32 *p, struct nlm_res *resp)
+{
+	s32		start, len;
+
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return NULL;
+	*p++ = resp->status;
+
+	if (resp->status == nlm_lck_denied) {
+		struct file_lock	*fl = &resp->lock.fl;
+
+		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
+		*p++ = htonl(fl->fl_pid);
+
+		/* Encode owner handle. */
+		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
+			return NULL;
+
+		start = loff_t_to_s32(fl->fl_start);
+		if (fl->fl_end == OFFSET_MAX)
+			len = 0;
+		else
+			len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+
+		*p++ = htonl(start);
+		*p++ = htonl(len);
+	}
+
+	return p;
+}
+
+
+/*
+ * First, the server side XDR functions
+ */
+int
+nlmsvc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+
+	exclusive = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_testres(p, resp)))
+		return 0;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block  = ntohl(*p++);
+	exclusive    = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	argp->reclaim = ntohl(*p++);
+	argp->state   = ntohl(*p++);
+	argp->monitor = 1;		/* monitor client by default */
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block = ntohl(*p++);
+	exclusive = ntohl(*p++);
+	if (!(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	if (!(p = nlm_decode_cookie(p, &argp->cookie))
+	 || !(p = nlm_decode_lock(p, &argp->lock)))
+		return 0;
+	argp->lock.fl.fl_type = F_UNLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(&lock->fl);
+	lock->fl.fl_pid = ~(u32) 0;
+
+	if (!(p = nlm_decode_cookie(p, &argp->cookie))
+	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm_decode_fh(p, &lock->fh))
+	 || !(p = nlm_decode_oh(p, &lock->oh)))
+		return 0;
+	argp->fsm_mode = ntohl(*p++);
+	argp->fsm_access = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	*p++ = xdr_zero;		/* sequence argument */
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
+{
+	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	/* Preserve the address in network byte order */
+	argp->addr = *p++;
+	argp->vers = *p++;
+	argp->proto = *p++;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return 0;
+	resp->status = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlmsvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+/*
+ * Now, the client side XDR functions
+ */
+#ifdef NLMCLNT_SUPPORT_SHARES
+static int
+nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
+{
+	return 0;
+}
+#endif
+
+static int
+nlmclt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	if (resp->status == NLM_LCK_DENIED) {
+		struct file_lock	*fl = &resp->lock.fl;
+		u32			excl;
+		s32			start, len, end;
+
+		memset(&resp->lock, 0, sizeof(resp->lock));
+		locks_init_lock(fl);
+		excl = ntohl(*p++);
+		fl->fl_pid = ntohl(*p++);
+		if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
+			return -EIO;
+
+		fl->fl_flags = FL_POSIX;
+		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
+		start = ntohl(*p++);
+		len = ntohl(*p++);
+		end = start + len - 1;
+
+		fl->fl_start = s32_to_loff_t(start);
+		if (len == 0 || end < 0)
+			fl->fl_end = OFFSET_MAX;
+		else
+			fl->fl_end = s32_to_loff_t(end);
+	}
+	return 0;
+}
+
+
+static int
+nlmclt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	*p++ = argp->reclaim? xdr_one : xdr_zero;
+	*p++ = htonl(argp->state);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	if (!(p = nlm_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+		return -EIO;
+	*p++ = resp->status;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_encode_testres(p, resp)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	return 0;
+}
+
+/*
+ * Buffer requirements for NLM
+ */
+#define NLM_void_sz		0
+#define NLM_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
+#define NLM_caller_sz		1+XDR_QUADLEN(sizeof(system_utsname.nodename))
+#define NLM_netobj_sz		1+XDR_QUADLEN(XDR_MAX_NETOBJ)
+/* #define NLM_owner_sz		1+XDR_QUADLEN(NLM_MAXOWNER) */
+#define NLM_fhandle_sz		1+XDR_QUADLEN(NFS2_FHSIZE)
+#define NLM_lock_sz		3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz
+#define NLM_holder_sz		4+NLM_netobj_sz
+
+#define NLM_testargs_sz		NLM_cookie_sz+1+NLM_lock_sz
+#define NLM_lockargs_sz		NLM_cookie_sz+4+NLM_lock_sz
+#define NLM_cancargs_sz		NLM_cookie_sz+2+NLM_lock_sz
+#define NLM_unlockargs_sz	NLM_cookie_sz+NLM_lock_sz
+
+#define NLM_testres_sz		NLM_cookie_sz+1+NLM_holder_sz
+#define NLM_res_sz		NLM_cookie_sz+1
+#define NLM_norep_sz		0
+
+#ifndef MAX
+# define MAX(a, b)		(((a) > (b))? (a) : (b))
+#endif
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlmclt_decode_norep	NULL
+
+#define PROC(proc, argtype, restype)	\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdrproc_t) nlmclt_encode_##argtype,		\
+	.p_decode    = (kxdrproc_t) nlmclt_decode_##restype,		\
+	.p_bufsiz    = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2	\
+	}
+
+static struct rpc_procinfo	nlm_procedures[] = {
+    PROC(TEST,		testargs,	testres),
+    PROC(LOCK,		lockargs,	res),
+    PROC(CANCEL,	cancargs,	res),
+    PROC(UNLOCK,	unlockargs,	res),
+    PROC(GRANTED,	testargs,	res),
+    PROC(TEST_MSG,	testargs,	norep),
+    PROC(LOCK_MSG,	lockargs,	norep),
+    PROC(CANCEL_MSG,	cancargs,	norep),
+    PROC(UNLOCK_MSG,	unlockargs,	norep),
+    PROC(GRANTED_MSG,	testargs,	norep),
+    PROC(TEST_RES,	testres,	norep),
+    PROC(LOCK_RES,	res,		norep),
+    PROC(CANCEL_RES,	res,		norep),
+    PROC(UNLOCK_RES,	res,		norep),
+    PROC(GRANTED_RES,	res,		norep),
+#ifdef NLMCLNT_SUPPORT_SHARES
+    PROC(SHARE,		shareargs,	shareres),
+    PROC(UNSHARE,	shareargs,	shareres),
+    PROC(NM_LOCK,	lockargs,	res),
+    PROC(FREE_ALL,	notify,		void),
+#endif
+};
+
+static struct rpc_version	nlm_version1 = {
+		.number		= 1,
+		.nrprocs	= 16,
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	nlm_version3 = {
+		.number		= 3,
+		.nrprocs	= 24,
+		.procs		= nlm_procedures,
+};
+
+#ifdef 	CONFIG_LOCKD_V4
+extern struct rpc_version nlm_version4;
+#endif
+
+static struct rpc_version *	nlm_versions[] = {
+	[1] = &nlm_version1,
+	[3] = &nlm_version3,
+#ifdef 	CONFIG_LOCKD_V4
+	[4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat		nlm_stats;
+
+struct rpc_program		nlm_program = {
+		.name		= "lockd",
+		.number		= NLM_PROGRAM,
+		.nrvers		= sizeof(nlm_versions) / sizeof(nlm_versions[0]),
+		.version	= nlm_versions,
+		.stats		= &nlm_stats,
+};
+
+#ifdef RPC_DEBUG
+const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+	/*
+	 * We can get away with a static buffer because we're only
+	 * called with BKL held.
+	 */
+	static char buf[2*NLM_MAXCOOKIELEN+1];
+	int i;
+	int len = sizeof(buf);
+	char *p = buf;
+
+	len--;	/* allow for trailing \0 */
+	if (len < 3)
+		return "???";
+	for (i = 0 ; i < cookie->len ; i++) {
+		if (len < 2) {
+			strcpy(p-3, "...");
+			break;
+		}
+		sprintf(p, "%02x", cookie->data[i]);
+		p += 2;
+		len -= 2;
+	}
+	*p = '\0';
+
+	return buf;
+}
+#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
new file mode 100644
index 0000000..ae4d6b4
--- /dev/null
+++ b/fs/lockd/xdr4.c
@@ -0,0 +1,580 @@
+/*
+ * linux/fs/lockd/xdr4.c
+ *
+ * XDR support for lockd and the lock client.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/nfs.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/sm_inter.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+static inline loff_t
+s64_to_loff_t(__s64 offset)
+{
+	return (loff_t)offset;
+}
+
+
+static inline s64
+loff_t_to_s64(loff_t offset)
+{
+	s64 res;
+	if (offset > NLM4_OFFSET_MAX)
+		res = NLM4_OFFSET_MAX;
+	else if (offset < -NLM4_OFFSET_MAX)
+		res = -NLM4_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+/*
+ * XDR functions for basic NLM types
+ */
+static u32 *
+nlm4_decode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	unsigned int	len;
+
+	len = ntohl(*p++);
+	
+	if(len==0)
+	{
+		c->len=4;
+		memset(c->data, 0, 4);	/* hockeypux brain damage */
+	}
+	else if(len<=NLM_MAXCOOKIELEN)
+	{
+		c->len=len;
+		memcpy(c->data, p, len);
+		p+=XDR_QUADLEN(len);
+	}
+	else 
+	{
+		printk(KERN_NOTICE
+			"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
+		return NULL;
+	}
+	return p;
+}
+
+static u32 *
+nlm4_encode_cookie(u32 *p, struct nlm_cookie *c)
+{
+	*p++ = htonl(c->len);
+	memcpy(p, c->data, c->len);
+	p+=XDR_QUADLEN(c->len);
+	return p;
+}
+
+static u32 *
+nlm4_decode_fh(u32 *p, struct nfs_fh *f)
+{
+	memset(f->data, 0, sizeof(f->data));
+	f->size = ntohl(*p++);
+	if (f->size > NFS_MAXFHSIZE) {
+		printk(KERN_NOTICE
+			"lockd: bad fhandle size %d (should be <=%d)\n",
+			f->size, NFS_MAXFHSIZE);
+		return NULL;
+	}
+      	memcpy(f->data, p, f->size);
+	return p + XDR_QUADLEN(f->size);
+}
+
+static u32 *
+nlm4_encode_fh(u32 *p, struct nfs_fh *f)
+{
+	*p++ = htonl(f->size);
+	if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
+	memcpy(p, f->data, f->size);
+	return p + XDR_QUADLEN(f->size);
+}
+
+/*
+ * Encode and decode owner handle
+ */
+static u32 *
+nlm4_decode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_decode_netobj(p, oh);
+}
+
+static u32 *
+nlm4_encode_oh(u32 *p, struct xdr_netobj *oh)
+{
+	return xdr_encode_netobj(p, oh);
+}
+
+static u32 *
+nlm4_decode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s64			len, start, end;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm4_decode_fh(p, &lock->fh))
+	 || !(p = nlm4_decode_oh(p, &lock->oh)))
+		return NULL;
+
+	locks_init_lock(fl);
+	fl->fl_owner = current->files;
+	fl->fl_pid   = ntohl(*p++);
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = F_RDLCK;		/* as good as anything else */
+	p = xdr_decode_hyper(p, &start);
+	p = xdr_decode_hyper(p, &len);
+	end = start + len - 1;
+
+	fl->fl_start = s64_to_loff_t(start);
+
+	if (len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = s64_to_loff_t(end);
+	return p;
+}
+
+/*
+ * Encode a lock as part of an NLM call
+ */
+static u32 *
+nlm4_encode_lock(u32 *p, struct nlm_lock *lock)
+{
+	struct file_lock	*fl = &lock->fl;
+	__s64			start, len;
+
+	if (!(p = xdr_encode_string(p, lock->caller))
+	 || !(p = nlm4_encode_fh(p, &lock->fh))
+	 || !(p = nlm4_encode_oh(p, &lock->oh)))
+		return NULL;
+
+	if (fl->fl_start > NLM4_OFFSET_MAX
+	 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
+		return NULL;
+
+	*p++ = htonl(fl->fl_pid);
+
+	start = loff_t_to_s64(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		len = 0;
+	else
+		len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+
+	p = xdr_encode_hyper(p, start);
+	p = xdr_encode_hyper(p, len);
+
+	return p;
+}
+
+/*
+ * Encode result of a TEST/TEST_MSG call
+ */
+static u32 *
+nlm4_encode_testres(u32 *p, struct nlm_res *resp)
+{
+	s64		start, len;
+
+	dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp);
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return NULL;
+	*p++ = resp->status;
+
+	if (resp->status == nlm_lck_denied) {
+		struct file_lock	*fl = &resp->lock.fl;
+
+		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
+		*p++ = htonl(fl->fl_pid);
+
+		/* Encode owner handle. */
+		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
+			return NULL;
+
+		start = loff_t_to_s64(fl->fl_start);
+		if (fl->fl_end == OFFSET_MAX)
+			len = 0;
+		else
+			len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+		
+		p = xdr_encode_hyper(p, start);
+		p = xdr_encode_hyper(p, len);
+		dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n",
+			resp->status, fl->fl_pid, fl->fl_type,
+			(long long)fl->fl_start,  (long long)fl->fl_end);
+	}
+
+	dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp);
+	return p;
+}
+
+
+/*
+ * First, the server side XDR functions
+ */
+int
+nlm4svc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+
+	exclusive = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_testres(p, resp)))
+		return 0;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block  = ntohl(*p++);
+	exclusive    = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	argp->reclaim = ntohl(*p++);
+	argp->state   = ntohl(*p++);
+	argp->monitor = 1;		/* monitor client by default */
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	u32	exclusive;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+		return 0;
+	argp->block = ntohl(*p++);
+	exclusive = ntohl(*p++);
+	if (!(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	if (exclusive)
+		argp->lock.fl.fl_type = F_WRLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+	 || !(p = nlm4_decode_lock(p, &argp->lock)))
+		return 0;
+	argp->lock.fl.fl_type = F_UNLCK;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(&lock->fl);
+	lock->fl.fl_pid = ~(u32) 0;
+
+	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN))
+	 || !(p = nlm4_decode_fh(p, &lock->fh))
+	 || !(p = nlm4_decode_oh(p, &lock->oh)))
+		return 0;
+	argp->fsm_mode = ntohl(*p++);
+	argp->fsm_access = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	*p++ = xdr_zero;		/* sequence argument */
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return 0;
+	*p++ = resp->status;
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+					    &lock->len, NLM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
+{
+	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+		return 0;
+	argp->state = ntohl(*p++);
+	/* Preserve the address in network byte order */
+	argp->addr = *p++;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return 0;
+	resp->status = ntohl(*p++);
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nlm4svc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+/*
+ * Now, the client side XDR functions
+ */
+#ifdef NLMCLNT_SUPPORT_SHARES
+static int
+nlm4clt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
+{
+	return 0;
+}
+#endif
+
+static int
+nlm4clt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	if (resp->status == NLM_LCK_DENIED) {
+		struct file_lock	*fl = &resp->lock.fl;
+		u32			excl;
+		s64			start, end, len;
+
+		memset(&resp->lock, 0, sizeof(resp->lock));
+		locks_init_lock(fl);
+		excl = ntohl(*p++);
+		fl->fl_pid = ntohl(*p++);
+		if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
+			return -EIO;
+
+		fl->fl_flags = FL_POSIX;
+		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
+		p = xdr_decode_hyper(p, &start);
+		p = xdr_decode_hyper(p, &len);
+		end = start + len - 1;
+
+		fl->fl_start = s64_to_loff_t(start);
+		if (len == 0 || end < 0)
+			fl->fl_end = OFFSET_MAX;
+		else
+			fl->fl_end = s64_to_loff_t(end);
+	}
+	return 0;
+}
+
+
+static int
+nlm4clt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	*p++ = argp->reclaim? xdr_one : xdr_zero;
+	*p++ = htonl(argp->state);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	*p++ = argp->block? xdr_one : xdr_zero;
+	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
+{
+	struct nlm_lock	*lock = &argp->lock;
+
+	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
+		return -EIO;
+	if (!(p = nlm4_encode_lock(p, lock)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+		return -EIO;
+	*p++ = resp->status;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_encode_testres(p, resp)))
+		return -EIO;
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
+{
+	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+		return -EIO;
+	resp->status = ntohl(*p++);
+	return 0;
+}
+
+/*
+ * Buffer requirements for NLM
+ */
+#define NLM4_void_sz		0
+#define NLM4_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
+#define NLM4_caller_sz		1+XDR_QUADLEN(NLM_MAXSTRLEN)
+#define NLM4_netobj_sz		1+XDR_QUADLEN(XDR_MAX_NETOBJ)
+/* #define NLM4_owner_sz		1+XDR_QUADLEN(NLM4_MAXOWNER) */
+#define NLM4_fhandle_sz		1+XDR_QUADLEN(NFS3_FHSIZE)
+#define NLM4_lock_sz		5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz
+#define NLM4_holder_sz		6+NLM4_netobj_sz
+
+#define NLM4_testargs_sz	NLM4_cookie_sz+1+NLM4_lock_sz
+#define NLM4_lockargs_sz	NLM4_cookie_sz+4+NLM4_lock_sz
+#define NLM4_cancargs_sz	NLM4_cookie_sz+2+NLM4_lock_sz
+#define NLM4_unlockargs_sz	NLM4_cookie_sz+NLM4_lock_sz
+
+#define NLM4_testres_sz		NLM4_cookie_sz+1+NLM4_holder_sz
+#define NLM4_res_sz		NLM4_cookie_sz+1
+#define NLM4_norep_sz		0
+
+#ifndef MAX
+# define MAX(a,b)		(((a) > (b))? (a) : (b))
+#endif
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4clt_decode_norep	NULL
+
+#define PROC(proc, argtype, restype)					\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdrproc_t) nlm4clt_encode_##argtype,		\
+	.p_decode    = (kxdrproc_t) nlm4clt_decode_##restype,		\
+	.p_bufsiz    = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2	\
+	}
+
+static struct rpc_procinfo	nlm4_procedures[] = {
+    PROC(TEST,		testargs,	testres),
+    PROC(LOCK,		lockargs,	res),
+    PROC(CANCEL,	cancargs,	res),
+    PROC(UNLOCK,	unlockargs,	res),
+    PROC(GRANTED,	testargs,	res),
+    PROC(TEST_MSG,	testargs,	norep),
+    PROC(LOCK_MSG,	lockargs,	norep),
+    PROC(CANCEL_MSG,	cancargs,	norep),
+    PROC(UNLOCK_MSG,	unlockargs,	norep),
+    PROC(GRANTED_MSG,	testargs,	norep),
+    PROC(TEST_RES,	testres,	norep),
+    PROC(LOCK_RES,	res,		norep),
+    PROC(CANCEL_RES,	res,		norep),
+    PROC(UNLOCK_RES,	res,		norep),
+    PROC(GRANTED_RES,	res,		norep),
+#ifdef NLMCLNT_SUPPORT_SHARES
+    PROC(SHARE,		shareargs,	shareres),
+    PROC(UNSHARE,	shareargs,	shareres),
+    PROC(NM_LOCK,	lockargs,	res),
+    PROC(FREE_ALL,	notify,		void),
+#endif
+};
+
+struct rpc_version	nlm_version4 = {
+	.number		= 4,
+	.nrprocs	= 24,
+	.procs		= nlm4_procedures,
+};
diff --git a/fs/locks.c b/fs/locks.c
new file mode 100644
index 0000000..1792ce5
--- /dev/null
+++ b/fs/locks.c
@@ -0,0 +1,2212 @@
+/*
+ *  linux/fs/locks.c
+ *
+ *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
+ *  Doug Evans (dje@spiff.uucp), August 07, 1992
+ *
+ *  Deadlock detection added.
+ *  FIXME: one thing isn't handled yet:
+ *	- mandatory locks (requires lots of changes elsewhere)
+ *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
+ *
+ *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
+ *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
+ *  
+ *  Converted file_lock_table to a linked list from an array, which eliminates
+ *  the limits on how many active file locks are open.
+ *  Chad Page (pageone@netcom.com), November 27, 1994
+ * 
+ *  Removed dependency on file descriptors. dup()'ed file descriptors now
+ *  get the same locks as the original file descriptors, and a close() on
+ *  any file descriptor removes ALL the locks on the file for the current
+ *  process. Since locks still depend on the process id, locks are inherited
+ *  after an exec() but not after a fork(). This agrees with POSIX, and both
+ *  BSD and SVR4 practice.
+ *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
+ *
+ *  Scrapped free list which is redundant now that we allocate locks
+ *  dynamically with kmalloc()/kfree().
+ *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
+ *
+ *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
+ *
+ *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
+ *  fcntl() system call. They have the semantics described above.
+ *
+ *  FL_FLOCK locks are created with calls to flock(), through the flock()
+ *  system call, which is new. Old C libraries implement flock() via fcntl()
+ *  and will continue to use the old, broken implementation.
+ *
+ *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
+ *  with a file pointer (filp). As a result they can be shared by a parent
+ *  process and its children after a fork(). They are removed when the last
+ *  file descriptor referring to the file pointer is closed (unless explicitly
+ *  unlocked). 
+ *
+ *  FL_FLOCK locks never deadlock, an existing lock is always removed before
+ *  upgrading from shared to exclusive (or vice versa). When this happens
+ *  any processes blocked by the current lock are woken up and allowed to
+ *  run before the new lock is applied.
+ *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
+ *
+ *  Removed some race conditions in flock_lock_file(), marked other possible
+ *  races. Just grep for FIXME to see them. 
+ *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
+ *
+ *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
+ *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
+ *  once we've checked for blocking and deadlocking.
+ *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
+ *
+ *  Initial implementation of mandatory locks. SunOS turned out to be
+ *  a rotten model, so I implemented the "obvious" semantics.
+ *  See 'Documentation/mandatory.txt' for details.
+ *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
+ *
+ *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
+ *  check if a file has mandatory locks, used by mmap(), open() and creat() to
+ *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
+ *  Manual, Section 2.
+ *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
+ *
+ *  Tidied up block list handling. Added '/proc/locks' interface.
+ *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
+ *
+ *  Fixed deadlock condition for pathological code that mixes calls to
+ *  flock() and fcntl().
+ *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
+ *
+ *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
+ *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
+ *  guarantee sensible behaviour in the case where file system modules might
+ *  be compiled with different options than the kernel itself.
+ *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
+ *
+ *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
+ *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
+ *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
+ *
+ *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
+ *  locks. Changed process synchronisation to avoid dereferencing locks that
+ *  have already been freed.
+ *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
+ *
+ *  Made the block list a circular list to minimise searching in the list.
+ *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
+ *
+ *  Made mandatory locking a mount option. Default is not to allow mandatory
+ *  locking.
+ *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
+ *
+ *  Some adaptations for NFS support.
+ *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
+ *
+ *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
+ *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
+ *
+ *  Use slab allocator instead of kmalloc/kfree.
+ *  Use generic list implementation from <linux/list.h>.
+ *  Sped up posix_locks_deadlock by only considering blocked locks.
+ *  Matthew Wilcox <willy@debian.org>, March, 2000.
+ *
+ *  Leases and LOCK_MAND
+ *  Matthew Wilcox <willy@debian.org>, June, 2000.
+ *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
+ */
+
+#include <linux/capability.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+#define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
+#define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
+#define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
+
+int leases_enable = 1;
+int lease_break_time = 45;
+
+#define for_each_lock(inode, lockp) \
+	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
+
+LIST_HEAD(file_lock_list);
+
+EXPORT_SYMBOL(file_lock_list);
+
+static LIST_HEAD(blocked_list);
+
+static kmem_cache_t *filelock_cache;
+
+/* Allocate an empty lock structure. */
+static struct file_lock *locks_alloc_lock(void)
+{
+	return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
+}
+
+/* Free a lock which is not in use. */
+static inline void locks_free_lock(struct file_lock *fl)
+{
+	if (fl == NULL) {
+		BUG();
+		return;
+	}
+	if (waitqueue_active(&fl->fl_wait))
+		panic("Attempting to free lock with active wait queue");
+
+	if (!list_empty(&fl->fl_block))
+		panic("Attempting to free lock with active block list");
+
+	if (!list_empty(&fl->fl_link))
+		panic("Attempting to free lock on active lock list");
+
+	if (fl->fl_ops) {
+		if (fl->fl_ops->fl_release_private)
+			fl->fl_ops->fl_release_private(fl);
+		fl->fl_ops = NULL;
+	}
+
+	if (fl->fl_lmops) {
+		if (fl->fl_lmops->fl_release_private)
+			fl->fl_lmops->fl_release_private(fl);
+		fl->fl_lmops = NULL;
+	}
+
+	kmem_cache_free(filelock_cache, fl);
+}
+
+void locks_init_lock(struct file_lock *fl)
+{
+	INIT_LIST_HEAD(&fl->fl_link);
+	INIT_LIST_HEAD(&fl->fl_block);
+	init_waitqueue_head(&fl->fl_wait);
+	fl->fl_next = NULL;
+	fl->fl_fasync = NULL;
+	fl->fl_owner = NULL;
+	fl->fl_pid = 0;
+	fl->fl_file = NULL;
+	fl->fl_flags = 0;
+	fl->fl_type = 0;
+	fl->fl_start = fl->fl_end = 0;
+	fl->fl_ops = NULL;
+	fl->fl_lmops = NULL;
+}
+
+EXPORT_SYMBOL(locks_init_lock);
+
+/*
+ * Initialises the fields of the file lock which are invariant for
+ * free file_locks.
+ */
+static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
+{
+	struct file_lock *lock = (struct file_lock *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) !=
+					SLAB_CTOR_CONSTRUCTOR)
+		return;
+
+	locks_init_lock(lock);
+}
+
+/*
+ * Initialize a new lock from an existing file_lock structure.
+ */
+void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+	new->fl_owner = fl->fl_owner;
+	new->fl_pid = fl->fl_pid;
+	new->fl_file = fl->fl_file;
+	new->fl_flags = fl->fl_flags;
+	new->fl_type = fl->fl_type;
+	new->fl_start = fl->fl_start;
+	new->fl_end = fl->fl_end;
+	new->fl_ops = fl->fl_ops;
+	new->fl_lmops = fl->fl_lmops;
+	if (fl->fl_ops && fl->fl_ops->fl_copy_lock)
+		fl->fl_ops->fl_copy_lock(new, fl);
+	if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock)
+		fl->fl_lmops->fl_copy_lock(new, fl);
+}
+
+EXPORT_SYMBOL(locks_copy_lock);
+
+static inline int flock_translate_cmd(int cmd) {
+	if (cmd & LOCK_MAND)
+		return cmd & (LOCK_MAND | LOCK_RW);
+	switch (cmd) {
+	case LOCK_SH:
+		return F_RDLCK;
+	case LOCK_EX:
+		return F_WRLCK;
+	case LOCK_UN:
+		return F_UNLCK;
+	}
+	return -EINVAL;
+}
+
+/* Fill in a file_lock structure with an appropriate FLOCK lock. */
+static int flock_make_lock(struct file *filp, struct file_lock **lock,
+		unsigned int cmd)
+{
+	struct file_lock *fl;
+	int type = flock_translate_cmd(cmd);
+	if (type < 0)
+		return type;
+	
+	fl = locks_alloc_lock();
+	if (fl == NULL)
+		return -ENOMEM;
+
+	fl->fl_file = filp;
+	fl->fl_pid = current->tgid;
+	fl->fl_flags = FL_FLOCK;
+	fl->fl_type = type;
+	fl->fl_end = OFFSET_MAX;
+	
+	*lock = fl;
+	return 0;
+}
+
+static int assign_type(struct file_lock *fl, int type)
+{
+	switch (type) {
+	case F_RDLCK:
+	case F_WRLCK:
+	case F_UNLCK:
+		fl->fl_type = type;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
+ * style lock.
+ */
+static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
+			       struct flock *l)
+{
+	off_t start, end;
+
+	switch (l->l_whence) {
+	case 0: /*SEEK_SET*/
+		start = 0;
+		break;
+	case 1: /*SEEK_CUR*/
+		start = filp->f_pos;
+		break;
+	case 2: /*SEEK_END*/
+		start = i_size_read(filp->f_dentry->d_inode);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
+	   POSIX-2001 defines it. */
+	start += l->l_start;
+	end = start + l->l_len - 1;
+	if (l->l_len < 0) {
+		end = start - 1;
+		start += l->l_len;
+	}
+
+	if (start < 0)
+		return -EINVAL;
+	if (l->l_len > 0 && end < 0)
+		return -EOVERFLOW;
+
+	fl->fl_start = start;	/* we record the absolute position */
+	fl->fl_end = end;
+	if (l->l_len == 0)
+		fl->fl_end = OFFSET_MAX;
+	
+	fl->fl_owner = current->files;
+	fl->fl_pid = current->tgid;
+	fl->fl_file = filp;
+	fl->fl_flags = FL_POSIX;
+	fl->fl_ops = NULL;
+	fl->fl_lmops = NULL;
+
+	return assign_type(fl, l->l_type);
+}
+
+#if BITS_PER_LONG == 32
+static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
+				 struct flock64 *l)
+{
+	loff_t start;
+
+	switch (l->l_whence) {
+	case 0: /*SEEK_SET*/
+		start = 0;
+		break;
+	case 1: /*SEEK_CUR*/
+		start = filp->f_pos;
+		break;
+	case 2: /*SEEK_END*/
+		start = i_size_read(filp->f_dentry->d_inode);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (((start += l->l_start) < 0) || (l->l_len < 0))
+		return -EINVAL;
+	fl->fl_end = start + l->l_len - 1;
+	if (l->l_len > 0 && fl->fl_end < 0)
+		return -EOVERFLOW;
+	fl->fl_start = start;	/* we record the absolute position */
+	if (l->l_len == 0)
+		fl->fl_end = OFFSET_MAX;
+	
+	fl->fl_owner = current->files;
+	fl->fl_pid = current->tgid;
+	fl->fl_file = filp;
+	fl->fl_flags = FL_POSIX;
+	fl->fl_ops = NULL;
+	fl->fl_lmops = NULL;
+
+	switch (l->l_type) {
+	case F_RDLCK:
+	case F_WRLCK:
+	case F_UNLCK:
+		fl->fl_type = l->l_type;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return (0);
+}
+#endif
+
+/* default lease lock manager operations */
+static void lease_break_callback(struct file_lock *fl)
+{
+	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
+}
+
+static void lease_release_private_callback(struct file_lock *fl)
+{
+	if (!fl->fl_file)
+		return;
+
+	f_delown(fl->fl_file);
+	fl->fl_file->f_owner.signum = 0;
+}
+
+int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
+{
+	return fl->fl_file == try->fl_file;
+}
+
+struct lock_manager_operations lease_manager_ops = {
+	.fl_break = lease_break_callback,
+	.fl_release_private = lease_release_private_callback,
+	.fl_mylease = lease_mylease_callback,
+	.fl_change = lease_modify,
+};
+
+/*
+ * Initialize a lease, use the default lock manager operations
+ */
+static int lease_init(struct file *filp, int type, struct file_lock *fl)
+ {
+	fl->fl_owner = current->files;
+	fl->fl_pid = current->tgid;
+
+	fl->fl_file = filp;
+	fl->fl_flags = FL_LEASE;
+	if (assign_type(fl, type) != 0) {
+		locks_free_lock(fl);
+		return -EINVAL;
+	}
+	fl->fl_start = 0;
+	fl->fl_end = OFFSET_MAX;
+	fl->fl_ops = NULL;
+	fl->fl_lmops = &lease_manager_ops;
+	return 0;
+}
+
+/* Allocate a file_lock initialised to this type of lease */
+static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
+{
+	struct file_lock *fl = locks_alloc_lock();
+	int error;
+
+	if (fl == NULL)
+		return -ENOMEM;
+
+	error = lease_init(filp, type, fl);
+	if (error)
+		return error;
+	*flp = fl;
+	return 0;
+}
+
+/* Check if two locks overlap each other.
+ */
+static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
+{
+	return ((fl1->fl_end >= fl2->fl_start) &&
+		(fl2->fl_end >= fl1->fl_start));
+}
+
+/*
+ * Check whether two locks have the same owner.
+ */
+static inline int
+posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+{
+	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
+		return fl2->fl_lmops == fl1->fl_lmops &&
+			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
+	return fl1->fl_owner == fl2->fl_owner;
+}
+
+/* Remove waiter from blocker's block list.
+ * When blocker ends up pointing to itself then the list is empty.
+ */
+static inline void __locks_delete_block(struct file_lock *waiter)
+{
+	list_del_init(&waiter->fl_block);
+	list_del_init(&waiter->fl_link);
+	waiter->fl_next = NULL;
+}
+
+/*
+ */
+static void locks_delete_block(struct file_lock *waiter)
+{
+	lock_kernel();
+	__locks_delete_block(waiter);
+	unlock_kernel();
+}
+
+/* Insert waiter into blocker's block list.
+ * We use a circular list so that processes can be easily woken up in
+ * the order they blocked. The documentation doesn't require this but
+ * it seems like the reasonable thing to do.
+ */
+static void locks_insert_block(struct file_lock *blocker, 
+			       struct file_lock *waiter)
+{
+	if (!list_empty(&waiter->fl_block)) {
+		printk(KERN_ERR "locks_insert_block: removing duplicated lock "
+			"(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
+			waiter->fl_start, waiter->fl_end, waiter->fl_type);
+		__locks_delete_block(waiter);
+	}
+	list_add_tail(&waiter->fl_block, &blocker->fl_block);
+	waiter->fl_next = blocker;
+	if (IS_POSIX(blocker))
+		list_add(&waiter->fl_link, &blocked_list);
+}
+
+/* Wake up processes blocked waiting for blocker.
+ * If told to wait then schedule the processes until the block list
+ * is empty, otherwise empty the block list ourselves.
+ */
+static void locks_wake_up_blocks(struct file_lock *blocker)
+{
+	while (!list_empty(&blocker->fl_block)) {
+		struct file_lock *waiter = list_entry(blocker->fl_block.next,
+				struct file_lock, fl_block);
+		__locks_delete_block(waiter);
+		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
+			waiter->fl_lmops->fl_notify(waiter);
+		else
+			wake_up(&waiter->fl_wait);
+	}
+}
+
+/* Insert file lock fl into an inode's lock list at the position indicated
+ * by pos. At the same time add the lock to the global file lock list.
+ */
+static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
+{
+	list_add(&fl->fl_link, &file_lock_list);
+
+	/* insert into file's list */
+	fl->fl_next = *pos;
+	*pos = fl;
+
+	if (fl->fl_ops && fl->fl_ops->fl_insert)
+		fl->fl_ops->fl_insert(fl);
+}
+
+/*
+ * Delete a lock and then free it.
+ * Wake up processes that are blocked waiting for this lock,
+ * notify the FS that the lock has been cleared and
+ * finally free the lock.
+ */
+static void locks_delete_lock(struct file_lock **thisfl_p)
+{
+	struct file_lock *fl = *thisfl_p;
+
+	*thisfl_p = fl->fl_next;
+	fl->fl_next = NULL;
+	list_del_init(&fl->fl_link);
+
+	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
+	if (fl->fl_fasync != NULL) {
+		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
+		fl->fl_fasync = NULL;
+	}
+
+	if (fl->fl_ops && fl->fl_ops->fl_remove)
+		fl->fl_ops->fl_remove(fl);
+
+	locks_wake_up_blocks(fl);
+	locks_free_lock(fl);
+}
+
+/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
+ * checks for shared/exclusive status of overlapping locks.
+ */
+static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+{
+	if (sys_fl->fl_type == F_WRLCK)
+		return 1;
+	if (caller_fl->fl_type == F_WRLCK)
+		return 1;
+	return 0;
+}
+
+/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
+ * checking before calling the locks_conflict().
+ */
+static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+{
+	/* POSIX locks owned by the same process do not conflict with
+	 * each other.
+	 */
+	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
+		return (0);
+
+	/* Check whether they overlap */
+	if (!locks_overlap(caller_fl, sys_fl))
+		return 0;
+
+	return (locks_conflict(caller_fl, sys_fl));
+}
+
+/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
+ * checking before calling the locks_conflict().
+ */
+static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+{
+	/* FLOCK locks referring to the same filp do not conflict with
+	 * each other.
+	 */
+	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
+		return (0);
+	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
+		return 0;
+
+	return (locks_conflict(caller_fl, sys_fl));
+}
+
+static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)
+{
+	int result = 0;
+	DECLARE_WAITQUEUE(wait, current);
+
+	__set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(fl_wait, &wait);
+	if (timeout == 0)
+		schedule();
+	else
+		result = schedule_timeout(timeout);
+	if (signal_pending(current))
+		result = -ERESTARTSYS;
+	remove_wait_queue(fl_wait, &wait);
+	__set_current_state(TASK_RUNNING);
+	return result;
+}
+
+static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)
+{
+	int result;
+	locks_insert_block(blocker, waiter);
+	result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
+	__locks_delete_block(waiter);
+	return result;
+}
+
+struct file_lock *
+posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+	struct file_lock *cfl;
+
+	lock_kernel();
+	for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
+		if (!IS_POSIX(cfl))
+			continue;
+		if (posix_locks_conflict(cfl, fl))
+			break;
+	}
+	unlock_kernel();
+
+	return (cfl);
+}
+
+EXPORT_SYMBOL(posix_test_lock);
+
+/* This function tests for deadlock condition before putting a process to
+ * sleep. The detection scheme is no longer recursive. Recursive was neat,
+ * but dangerous - we risked stack corruption if the lock data was bad, or
+ * if the recursion was too deep for any other reason.
+ *
+ * We rely on the fact that a task can only be on one lock's wait queue
+ * at a time. When we find blocked_task on a wait queue we can re-search
+ * with blocked_task equal to that queue's owner, until either blocked_task
+ * isn't found, or blocked_task is found on a queue owned by my_task.
+ *
+ * Note: the above assumption may not be true when handling lock requests
+ * from a broken NFS client. But broken NFS clients have a lot more to
+ * worry about than proper deadlock detection anyway... --okir
+ */
+int posix_locks_deadlock(struct file_lock *caller_fl,
+				struct file_lock *block_fl)
+{
+	struct list_head *tmp;
+
+next_task:
+	if (posix_same_owner(caller_fl, block_fl))
+		return 1;
+	list_for_each(tmp, &blocked_list) {
+		struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
+		if (posix_same_owner(fl, block_fl)) {
+			fl = fl->fl_next;
+			block_fl = fl;
+			goto next_task;
+		}
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(posix_locks_deadlock);
+
+/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
+ * at the head of the list, but that's secret knowledge known only to
+ * flock_lock_file and posix_lock_file.
+ */
+static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
+{
+	struct file_lock **before;
+	struct inode * inode = filp->f_dentry->d_inode;
+	int error = 0;
+	int found = 0;
+
+	lock_kernel();
+	for_each_lock(inode, before) {
+		struct file_lock *fl = *before;
+		if (IS_POSIX(fl))
+			break;
+		if (IS_LEASE(fl))
+			continue;
+		if (filp != fl->fl_file)
+			continue;
+		if (new_fl->fl_type == fl->fl_type)
+			goto out;
+		found = 1;
+		locks_delete_lock(before);
+		break;
+	}
+	unlock_kernel();
+
+	if (new_fl->fl_type == F_UNLCK)
+		return 0;
+
+	/*
+	 * If a higher-priority process was blocked on the old file lock,
+	 * give it the opportunity to lock the file.
+	 */
+	if (found)
+		cond_resched();
+
+	lock_kernel();
+	for_each_lock(inode, before) {
+		struct file_lock *fl = *before;
+		if (IS_POSIX(fl))
+			break;
+		if (IS_LEASE(fl))
+			continue;
+		if (!flock_locks_conflict(new_fl, fl))
+			continue;
+		error = -EAGAIN;
+		if (new_fl->fl_flags & FL_SLEEP) {
+			locks_insert_block(fl, new_fl);
+		}
+		goto out;
+	}
+	locks_insert_lock(&inode->i_flock, new_fl);
+	error = 0;
+
+out:
+	unlock_kernel();
+	return error;
+}
+
+EXPORT_SYMBOL(posix_lock_file);
+
+static int __posix_lock_file(struct inode *inode, struct file_lock *request)
+{
+	struct file_lock *fl;
+	struct file_lock *new_fl, *new_fl2;
+	struct file_lock *left = NULL;
+	struct file_lock *right = NULL;
+	struct file_lock **before;
+	int error, added = 0;
+
+	/*
+	 * We may need two file_lock structures for this operation,
+	 * so we get them in advance to avoid races.
+	 */
+	new_fl = locks_alloc_lock();
+	new_fl2 = locks_alloc_lock();
+
+	lock_kernel();
+	if (request->fl_type != F_UNLCK) {
+		for_each_lock(inode, before) {
+			struct file_lock *fl = *before;
+			if (!IS_POSIX(fl))
+				continue;
+			if (!posix_locks_conflict(request, fl))
+				continue;
+			error = -EAGAIN;
+			if (!(request->fl_flags & FL_SLEEP))
+				goto out;
+			error = -EDEADLK;
+			if (posix_locks_deadlock(request, fl))
+				goto out;
+			error = -EAGAIN;
+			locks_insert_block(fl, request);
+			goto out;
+  		}
+  	}
+
+	/* If we're just looking for a conflict, we're done. */
+	error = 0;
+	if (request->fl_flags & FL_ACCESS)
+		goto out;
+
+	error = -ENOLCK; /* "no luck" */
+	if (!(new_fl && new_fl2))
+		goto out;
+
+	/*
+	 * We've allocated the new locks in advance, so there are no
+	 * errors possible (and no blocking operations) from here on.
+	 * 
+	 * Find the first old lock with the same owner as the new lock.
+	 */
+	
+	before = &inode->i_flock;
+
+	/* First skip locks owned by other processes.  */
+	while ((fl = *before) && (!IS_POSIX(fl) ||
+				  !posix_same_owner(request, fl))) {
+		before = &fl->fl_next;
+	}
+
+	/* Process locks with this owner.  */
+	while ((fl = *before) && posix_same_owner(request, fl)) {
+		/* Detect adjacent or overlapping regions (if same lock type)
+		 */
+		if (request->fl_type == fl->fl_type) {
+			if (fl->fl_end < request->fl_start - 1)
+				goto next_lock;
+			/* If the next lock in the list has entirely bigger
+			 * addresses than the new one, insert the lock here.
+			 */
+			if (fl->fl_start > request->fl_end + 1)
+				break;
+
+			/* If we come here, the new and old lock are of the
+			 * same type and adjacent or overlapping. Make one
+			 * lock yielding from the lower start address of both
+			 * locks to the higher end address.
+			 */
+			if (fl->fl_start > request->fl_start)
+				fl->fl_start = request->fl_start;
+			else
+				request->fl_start = fl->fl_start;
+			if (fl->fl_end < request->fl_end)
+				fl->fl_end = request->fl_end;
+			else
+				request->fl_end = fl->fl_end;
+			if (added) {
+				locks_delete_lock(before);
+				continue;
+			}
+			request = fl;
+			added = 1;
+		}
+		else {
+			/* Processing for different lock types is a bit
+			 * more complex.
+			 */
+			if (fl->fl_end < request->fl_start)
+				goto next_lock;
+			if (fl->fl_start > request->fl_end)
+				break;
+			if (request->fl_type == F_UNLCK)
+				added = 1;
+			if (fl->fl_start < request->fl_start)
+				left = fl;
+			/* If the next lock in the list has a higher end
+			 * address than the new one, insert the new one here.
+			 */
+			if (fl->fl_end > request->fl_end) {
+				right = fl;
+				break;
+			}
+			if (fl->fl_start >= request->fl_start) {
+				/* The new lock completely replaces an old
+				 * one (This may happen several times).
+				 */
+				if (added) {
+					locks_delete_lock(before);
+					continue;
+				}
+				/* Replace the old lock with the new one.
+				 * Wake up anybody waiting for the old one,
+				 * as the change in lock type might satisfy
+				 * their needs.
+				 */
+				locks_wake_up_blocks(fl);
+				fl->fl_start = request->fl_start;
+				fl->fl_end = request->fl_end;
+				fl->fl_type = request->fl_type;
+				fl->fl_u = request->fl_u;
+				request = fl;
+				added = 1;
+			}
+		}
+		/* Go on to next lock.
+		 */
+	next_lock:
+		before = &fl->fl_next;
+	}
+
+	error = 0;
+	if (!added) {
+		if (request->fl_type == F_UNLCK)
+			goto out;
+		locks_copy_lock(new_fl, request);
+		locks_insert_lock(before, new_fl);
+		new_fl = NULL;
+	}
+	if (right) {
+		if (left == right) {
+			/* The new lock breaks the old one in two pieces,
+			 * so we have to use the second new lock.
+			 */
+			left = new_fl2;
+			new_fl2 = NULL;
+			locks_copy_lock(left, right);
+			locks_insert_lock(before, left);
+		}
+		right->fl_start = request->fl_end + 1;
+		locks_wake_up_blocks(right);
+	}
+	if (left) {
+		left->fl_end = request->fl_start - 1;
+		locks_wake_up_blocks(left);
+	}
+ out:
+	unlock_kernel();
+	/*
+	 * Free any unused locks.
+	 */
+	if (new_fl)
+		locks_free_lock(new_fl);
+	if (new_fl2)
+		locks_free_lock(new_fl2);
+	return error;
+}
+
+/**
+ * posix_lock_file - Apply a POSIX-style lock to a file
+ * @filp: The file to apply the lock to
+ * @fl: The lock to be applied
+ *
+ * Add a POSIX style lock to a file.
+ * We merge adjacent & overlapping locks whenever possible.
+ * POSIX locks are sorted by owner task, then by starting address
+ */
+int posix_lock_file(struct file *filp, struct file_lock *fl)
+{
+	return __posix_lock_file(filp->f_dentry->d_inode, fl);
+}
+
+/**
+ * posix_lock_file_wait - Apply a POSIX-style lock to a file
+ * @filp: The file to apply the lock to
+ * @fl: The lock to be applied
+ *
+ * Add a POSIX style lock to a file.
+ * We merge adjacent & overlapping locks whenever possible.
+ * POSIX locks are sorted by owner task, then by starting address
+ */
+int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+	int error;
+	might_sleep ();
+	for (;;) {
+		error = __posix_lock_file(filp->f_dentry->d_inode, fl);
+		if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
+			break;
+		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+		if (!error)
+			continue;
+
+		locks_delete_block(fl);
+		break;
+	}
+	return error;
+}
+EXPORT_SYMBOL(posix_lock_file_wait);
+
+/**
+ * locks_mandatory_locked - Check for an active lock
+ * @inode: the file to check
+ *
+ * Searches the inode's list of locks to find any POSIX locks which conflict.
+ * This function is called from locks_verify_locked() only.
+ */
+int locks_mandatory_locked(struct inode *inode)
+{
+	fl_owner_t owner = current->files;
+	struct file_lock *fl;
+
+	/*
+	 * Search the lock list for this inode for any POSIX locks.
+	 */
+	lock_kernel();
+	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+		if (!IS_POSIX(fl))
+			continue;
+		if (fl->fl_owner != owner)
+			break;
+	}
+	unlock_kernel();
+	return fl ? -EAGAIN : 0;
+}
+
+/**
+ * locks_mandatory_area - Check for a conflicting lock
+ * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
+ *		for shared
+ * @inode:      the file to check
+ * @filp:       how the file was opened (if it was)
+ * @offset:     start of area to check
+ * @count:      length of area to check
+ *
+ * Searches the inode's list of locks to find any POSIX locks which conflict.
+ * This function is called from rw_verify_area() and
+ * locks_verify_truncate().
+ */
+int locks_mandatory_area(int read_write, struct inode *inode,
+			 struct file *filp, loff_t offset,
+			 size_t count)
+{
+	struct file_lock fl;
+	int error;
+
+	locks_init_lock(&fl);
+	fl.fl_owner = current->files;
+	fl.fl_pid = current->tgid;
+	fl.fl_file = filp;
+	fl.fl_flags = FL_POSIX | FL_ACCESS;
+	if (filp && !(filp->f_flags & O_NONBLOCK))
+		fl.fl_flags |= FL_SLEEP;
+	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
+	fl.fl_start = offset;
+	fl.fl_end = offset + count - 1;
+
+	for (;;) {
+		error = __posix_lock_file(inode, &fl);
+		if (error != -EAGAIN)
+			break;
+		if (!(fl.fl_flags & FL_SLEEP))
+			break;
+		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
+		if (!error) {
+			/*
+			 * If we've been sleeping someone might have
+			 * changed the permissions behind our back.
+			 */
+			if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+				continue;
+		}
+
+		locks_delete_block(&fl);
+		break;
+	}
+
+	return error;
+}
+
+EXPORT_SYMBOL(locks_mandatory_area);
+
+/* We already had a lease on this file; just change its type */
+int lease_modify(struct file_lock **before, int arg)
+{
+	struct file_lock *fl = *before;
+	int error = assign_type(fl, arg);
+
+	if (error)
+		return error;
+	locks_wake_up_blocks(fl);
+	if (arg == F_UNLCK)
+		locks_delete_lock(before);
+	return 0;
+}
+
+EXPORT_SYMBOL(lease_modify);
+
+static void time_out_leases(struct inode *inode)
+{
+	struct file_lock **before;
+	struct file_lock *fl;
+
+	before = &inode->i_flock;
+	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
+		if ((fl->fl_break_time == 0)
+				|| time_before(jiffies, fl->fl_break_time)) {
+			before = &fl->fl_next;
+			continue;
+		}
+		printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid);
+		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
+		if (fl == *before)	/* lease_modify may have freed fl */
+			before = &fl->fl_next;
+	}
+}
+
+/**
+ *	__break_lease	-	revoke all outstanding leases on file
+ *	@inode: the inode of the file to return
+ *	@mode: the open mode (read or write)
+ *
+ *	break_lease (inlined for speed) has checked there already
+ *	is a lease on this file.  Leases are broken on a call to open()
+ *	or truncate().  This function can sleep unless you
+ *	specified %O_NONBLOCK to your open().
+ */
+int __break_lease(struct inode *inode, unsigned int mode)
+{
+	int error = 0, future;
+	struct file_lock *new_fl, *flock;
+	struct file_lock *fl;
+	int alloc_err;
+	unsigned long break_time;
+	int i_have_this_lease = 0;
+
+	alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
+			&new_fl);
+
+	lock_kernel();
+
+	time_out_leases(inode);
+
+	flock = inode->i_flock;
+	if ((flock == NULL) || !IS_LEASE(flock))
+		goto out;
+
+	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
+		if (fl->fl_owner == current->files)
+			i_have_this_lease = 1;
+
+	if (mode & FMODE_WRITE) {
+		/* If we want write access, we have to revoke any lease. */
+		future = F_UNLCK | F_INPROGRESS;
+	} else if (flock->fl_type & F_INPROGRESS) {
+		/* If the lease is already being broken, we just leave it */
+		future = flock->fl_type;
+	} else if (flock->fl_type & F_WRLCK) {
+		/* Downgrade the exclusive lease to a read-only lease. */
+		future = F_RDLCK | F_INPROGRESS;
+	} else {
+		/* the existing lease was read-only, so we can read too. */
+		goto out;
+	}
+
+	if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
+		error = alloc_err;
+		goto out;
+	}
+
+	break_time = 0;
+	if (lease_break_time > 0) {
+		break_time = jiffies + lease_break_time * HZ;
+		if (break_time == 0)
+			break_time++;	/* so that 0 means no break time */
+	}
+
+	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
+		if (fl->fl_type != future) {
+			fl->fl_type = future;
+			fl->fl_break_time = break_time;
+			/* lease must have lmops break callback */
+			fl->fl_lmops->fl_break(fl);
+		}
+	}
+
+	if (i_have_this_lease || (mode & O_NONBLOCK)) {
+		error = -EWOULDBLOCK;
+		goto out;
+	}
+
+restart:
+	break_time = flock->fl_break_time;
+	if (break_time != 0) {
+		break_time -= jiffies;
+		if (break_time == 0)
+			break_time++;
+	}
+	error = locks_block_on_timeout(flock, new_fl, break_time);
+	if (error >= 0) {
+		if (error == 0)
+			time_out_leases(inode);
+		/* Wait for the next lease that has not been broken yet */
+		for (flock = inode->i_flock; flock && IS_LEASE(flock);
+				flock = flock->fl_next) {
+			if (flock->fl_type & F_INPROGRESS)
+				goto restart;
+		}
+		error = 0;
+	}
+
+out:
+	unlock_kernel();
+	if (!alloc_err)
+		locks_free_lock(new_fl);
+	return error;
+}
+
+EXPORT_SYMBOL(__break_lease);
+
+/**
+ *	lease_get_mtime
+ *	@inode: the inode
+ *      @time:  pointer to a timespec which will contain the last modified time
+ *
+ * This is to force NFS clients to flush their caches for files with
+ * exclusive leases.  The justification is that if someone has an
+ * exclusive lease, then they could be modifiying it.
+ */
+void lease_get_mtime(struct inode *inode, struct timespec *time)
+{
+	struct file_lock *flock = inode->i_flock;
+	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
+		*time = current_fs_time(inode->i_sb);
+	else
+		*time = inode->i_mtime;
+}
+
+EXPORT_SYMBOL(lease_get_mtime);
+
+/**
+ *	fcntl_getlease - Enquire what lease is currently active
+ *	@filp: the file
+ *
+ *	The value returned by this function will be one of
+ *	(if no lease break is pending):
+ *
+ *	%F_RDLCK to indicate a shared lease is held.
+ *
+ *	%F_WRLCK to indicate an exclusive lease is held.
+ *
+ *	%F_UNLCK to indicate no lease is held.
+ *
+ *	(if a lease break is pending):
+ *
+ *	%F_RDLCK to indicate an exclusive lease needs to be
+ *		changed to a shared lease (or removed).
+ *
+ *	%F_UNLCK to indicate the lease needs to be removed.
+ *
+ *	XXX: sfr & willy disagree over whether F_INPROGRESS
+ *	should be returned to userspace.
+ */
+int fcntl_getlease(struct file *filp)
+{
+	struct file_lock *fl;
+	int type = F_UNLCK;
+
+	lock_kernel();
+	time_out_leases(filp->f_dentry->d_inode);
+	for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl);
+			fl = fl->fl_next) {
+		if (fl->fl_file == filp) {
+			type = fl->fl_type & ~F_INPROGRESS;
+			break;
+		}
+	}
+	unlock_kernel();
+	return type;
+}
+
+/**
+ *	__setlease	-	sets a lease on an open file
+ *	@filp: file pointer
+ *	@arg: type of lease to obtain
+ *	@flp: input - file_lock to use, output - file_lock inserted
+ *
+ *	The (input) flp->fl_lmops->fl_break function is required
+ *	by break_lease().
+ *
+ *	Called with kernel lock held.
+ */
+int __setlease(struct file *filp, long arg, struct file_lock **flp)
+{
+	struct file_lock *fl, **before, **my_before = NULL, *lease = *flp;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int error, rdlease_count = 0, wrlease_count = 0;
+
+	time_out_leases(inode);
+
+	error = -EINVAL;
+	if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break)
+		goto out;
+
+	error = -EAGAIN;
+	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+		goto out;
+	if ((arg == F_WRLCK)
+	    && ((atomic_read(&dentry->d_count) > 1)
+		|| (atomic_read(&inode->i_count) > 1)))
+		goto out;
+
+	/*
+	 * At this point, we know that if there is an exclusive
+	 * lease on this file, then we hold it on this filp
+	 * (otherwise our open of this file would have blocked).
+	 * And if we are trying to acquire an exclusive lease,
+	 * then the file is not open by anyone (including us)
+	 * except for this filp.
+	 */
+	for (before = &inode->i_flock;
+			((fl = *before) != NULL) && IS_LEASE(fl);
+			before = &fl->fl_next) {
+		if (lease->fl_lmops->fl_mylease(fl, lease))
+			my_before = before;
+		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
+			/*
+			 * Someone is in the process of opening this
+			 * file for writing so we may not take an
+			 * exclusive lease on it.
+			 */
+			wrlease_count++;
+		else
+			rdlease_count++;
+	}
+
+	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
+	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
+		goto out;
+
+	if (my_before != NULL) {
+		error = lease->fl_lmops->fl_change(my_before, arg);
+		goto out;
+	}
+
+	error = 0;
+	if (arg == F_UNLCK)
+		goto out;
+
+	error = -EINVAL;
+	if (!leases_enable)
+		goto out;
+
+	error = lease_alloc(filp, arg, &fl);
+	if (error)
+		goto out;
+
+	locks_copy_lock(fl, lease);
+
+	locks_insert_lock(before, fl);
+
+	*flp = fl;
+out:
+	return error;
+}
+
+ /**
+ *	setlease        -       sets a lease on an open file
+ *	@filp: file pointer
+ *	@arg: type of lease to obtain
+ *	@lease: file_lock to use
+ *
+ *	Call this to establish a lease on the file.
+ *	The fl_lmops fl_break function is required by break_lease
+ */
+
+int setlease(struct file *filp, long arg, struct file_lock **lease)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
+		return -EACCES;
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+	error = security_file_lock(filp, arg);
+	if (error)
+		return error;
+
+	lock_kernel();
+	error = __setlease(filp, arg, lease);
+	unlock_kernel();
+
+	return error;
+}
+
+EXPORT_SYMBOL(setlease);
+
+/**
+ *	fcntl_setlease	-	sets a lease on an open file
+ *	@fd: open file descriptor
+ *	@filp: file pointer
+ *	@arg: type of lease to obtain
+ *
+ *	Call this fcntl to establish a lease on the file.
+ *	Note that you also need to call %F_SETSIG to
+ *	receive a signal when the lease is broken.
+ */
+int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+	struct file_lock fl, *flp = &fl;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
+		return -EACCES;
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+	error = security_file_lock(filp, arg);
+	if (error)
+		return error;
+
+	locks_init_lock(&fl);
+	error = lease_init(filp, arg, &fl);
+	if (error)
+		return error;
+
+	lock_kernel();
+
+	error = __setlease(filp, arg, &flp);
+	if (error)
+		goto out_unlock;
+
+	error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
+	if (error < 0) {
+		/* remove lease just inserted by __setlease */
+		flp->fl_type = F_UNLCK | F_INPROGRESS;
+		flp->fl_break_time = jiffies- 10;
+		time_out_leases(inode);
+		goto out_unlock;
+	}
+
+	error = f_setown(filp, current->pid, 0);
+out_unlock:
+	unlock_kernel();
+	return error;
+}
+
+/**
+ * flock_lock_file_wait - Apply a FLOCK-style lock to a file
+ * @filp: The file to apply the lock to
+ * @fl: The lock to be applied
+ *
+ * Add a FLOCK style lock to a file.
+ */
+int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+	int error;
+	might_sleep();
+	for (;;) {
+		error = flock_lock_file(filp, fl);
+		if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
+			break;
+		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+		if (!error)
+			continue;
+
+		locks_delete_block(fl);
+		break;
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(flock_lock_file_wait);
+
+/**
+ *	sys_flock: - flock() system call.
+ *	@fd: the file descriptor to lock.
+ *	@cmd: the type of lock to apply.
+ *
+ *	Apply a %FL_FLOCK style lock to an open file descriptor.
+ *	The @cmd can be one of
+ *
+ *	%LOCK_SH -- a shared lock.
+ *
+ *	%LOCK_EX -- an exclusive lock.
+ *
+ *	%LOCK_UN -- remove an existing lock.
+ *
+ *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
+ *
+ *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
+ *	processes read and write access respectively.
+ */
+asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
+{
+	struct file *filp;
+	struct file_lock *lock;
+	int can_sleep, unlock;
+	int error;
+
+	error = -EBADF;
+	filp = fget(fd);
+	if (!filp)
+		goto out;
+
+	can_sleep = !(cmd & LOCK_NB);
+	cmd &= ~LOCK_NB;
+	unlock = (cmd == LOCK_UN);
+
+	if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3))
+		goto out_putf;
+
+	error = flock_make_lock(filp, &lock, cmd);
+	if (error)
+		goto out_putf;
+	if (can_sleep)
+		lock->fl_flags |= FL_SLEEP;
+
+	error = security_file_lock(filp, cmd);
+	if (error)
+		goto out_free;
+
+	if (filp->f_op && filp->f_op->flock)
+		error = filp->f_op->flock(filp,
+					  (can_sleep) ? F_SETLKW : F_SETLK,
+					  lock);
+	else
+		error = flock_lock_file_wait(filp, lock);
+
+ out_free:
+	if (list_empty(&lock->fl_link)) {
+		locks_free_lock(lock);
+	}
+
+ out_putf:
+	fput(filp);
+ out:
+	return error;
+}
+
+/* Report the first existing lock that would conflict with l.
+ * This implements the F_GETLK command of fcntl().
+ */
+int fcntl_getlk(struct file *filp, struct flock __user *l)
+{
+	struct file_lock *fl, file_lock;
+	struct flock flock;
+	int error;
+
+	error = -EFAULT;
+	if (copy_from_user(&flock, l, sizeof(flock)))
+		goto out;
+	error = -EINVAL;
+	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
+		goto out;
+
+	error = flock_to_posix_lock(filp, &file_lock, &flock);
+	if (error)
+		goto out;
+
+	if (filp->f_op && filp->f_op->lock) {
+		error = filp->f_op->lock(filp, F_GETLK, &file_lock);
+		if (error < 0)
+			goto out;
+		else
+		  fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
+	} else {
+		fl = posix_test_lock(filp, &file_lock);
+	}
+ 
+	flock.l_type = F_UNLCK;
+	if (fl != NULL) {
+		flock.l_pid = fl->fl_pid;
+#if BITS_PER_LONG == 32
+		/*
+		 * Make sure we can represent the posix lock via
+		 * legacy 32bit flock.
+		 */
+		error = -EOVERFLOW;
+		if (fl->fl_start > OFFT_OFFSET_MAX)
+			goto out;
+		if ((fl->fl_end != OFFSET_MAX)
+		    && (fl->fl_end > OFFT_OFFSET_MAX))
+			goto out;
+#endif
+		flock.l_start = fl->fl_start;
+		flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+			fl->fl_end - fl->fl_start + 1;
+		flock.l_whence = 0;
+		flock.l_type = fl->fl_type;
+	}
+	error = -EFAULT;
+	if (!copy_to_user(l, &flock, sizeof(flock)))
+		error = 0;
+out:
+	return error;
+}
+
+/* Apply the lock described by l to an open file descriptor.
+ * This implements both the F_SETLK and F_SETLKW commands of fcntl().
+ */
+int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
+{
+	struct file_lock *file_lock = locks_alloc_lock();
+	struct flock flock;
+	struct inode *inode;
+	int error;
+
+	if (file_lock == NULL)
+		return -ENOLCK;
+
+	/*
+	 * This might block, so we do it before checking the inode.
+	 */
+	error = -EFAULT;
+	if (copy_from_user(&flock, l, sizeof(flock)))
+		goto out;
+
+	inode = filp->f_dentry->d_inode;
+
+	/* Don't allow mandatory locks on files that may be memory mapped
+	 * and shared.
+	 */
+	if (IS_MANDLOCK(inode) &&
+	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+	    mapping_writably_mapped(filp->f_mapping)) {
+		error = -EAGAIN;
+		goto out;
+	}
+
+	error = flock_to_posix_lock(filp, file_lock, &flock);
+	if (error)
+		goto out;
+	if (cmd == F_SETLKW) {
+		file_lock->fl_flags |= FL_SLEEP;
+	}
+	
+	error = -EBADF;
+	switch (flock.l_type) {
+	case F_RDLCK:
+		if (!(filp->f_mode & FMODE_READ))
+			goto out;
+		break;
+	case F_WRLCK:
+		if (!(filp->f_mode & FMODE_WRITE))
+			goto out;
+		break;
+	case F_UNLCK:
+		break;
+	default:
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = security_file_lock(filp, file_lock->fl_type);
+	if (error)
+		goto out;
+
+	if (filp->f_op && filp->f_op->lock != NULL) {
+		error = filp->f_op->lock(filp, cmd, file_lock);
+		goto out;
+	}
+
+	for (;;) {
+		error = __posix_lock_file(inode, file_lock);
+		if ((error != -EAGAIN) || (cmd == F_SETLK))
+			break;
+		error = wait_event_interruptible(file_lock->fl_wait,
+				!file_lock->fl_next);
+		if (!error)
+			continue;
+
+		locks_delete_block(file_lock);
+		break;
+	}
+
+ out:
+	locks_free_lock(file_lock);
+	return error;
+}
+
+#if BITS_PER_LONG == 32
+/* Report the first existing lock that would conflict with l.
+ * This implements the F_GETLK command of fcntl().
+ */
+int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
+{
+	struct file_lock *fl, file_lock;
+	struct flock64 flock;
+	int error;
+
+	error = -EFAULT;
+	if (copy_from_user(&flock, l, sizeof(flock)))
+		goto out;
+	error = -EINVAL;
+	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
+		goto out;
+
+	error = flock64_to_posix_lock(filp, &file_lock, &flock);
+	if (error)
+		goto out;
+
+	if (filp->f_op && filp->f_op->lock) {
+		error = filp->f_op->lock(filp, F_GETLK, &file_lock);
+		if (error < 0)
+			goto out;
+		else
+		  fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
+	} else {
+		fl = posix_test_lock(filp, &file_lock);
+	}
+ 
+	flock.l_type = F_UNLCK;
+	if (fl != NULL) {
+		flock.l_pid = fl->fl_pid;
+		flock.l_start = fl->fl_start;
+		flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
+			fl->fl_end - fl->fl_start + 1;
+		flock.l_whence = 0;
+		flock.l_type = fl->fl_type;
+	}
+	error = -EFAULT;
+	if (!copy_to_user(l, &flock, sizeof(flock)))
+		error = 0;
+  
+out:
+	return error;
+}
+
+/* Apply the lock described by l to an open file descriptor.
+ * This implements both the F_SETLK and F_SETLKW commands of fcntl().
+ */
+int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
+{
+	struct file_lock *file_lock = locks_alloc_lock();
+	struct flock64 flock;
+	struct inode *inode;
+	int error;
+
+	if (file_lock == NULL)
+		return -ENOLCK;
+
+	/*
+	 * This might block, so we do it before checking the inode.
+	 */
+	error = -EFAULT;
+	if (copy_from_user(&flock, l, sizeof(flock)))
+		goto out;
+
+	inode = filp->f_dentry->d_inode;
+
+	/* Don't allow mandatory locks on files that may be memory mapped
+	 * and shared.
+	 */
+	if (IS_MANDLOCK(inode) &&
+	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+	    mapping_writably_mapped(filp->f_mapping)) {
+		error = -EAGAIN;
+		goto out;
+	}
+
+	error = flock64_to_posix_lock(filp, file_lock, &flock);
+	if (error)
+		goto out;
+	if (cmd == F_SETLKW64) {
+		file_lock->fl_flags |= FL_SLEEP;
+	}
+	
+	error = -EBADF;
+	switch (flock.l_type) {
+	case F_RDLCK:
+		if (!(filp->f_mode & FMODE_READ))
+			goto out;
+		break;
+	case F_WRLCK:
+		if (!(filp->f_mode & FMODE_WRITE))
+			goto out;
+		break;
+	case F_UNLCK:
+		break;
+	default:
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = security_file_lock(filp, file_lock->fl_type);
+	if (error)
+		goto out;
+
+	if (filp->f_op && filp->f_op->lock != NULL) {
+		error = filp->f_op->lock(filp, cmd, file_lock);
+		goto out;
+	}
+
+	for (;;) {
+		error = __posix_lock_file(inode, file_lock);
+		if ((error != -EAGAIN) || (cmd == F_SETLK64))
+			break;
+		error = wait_event_interruptible(file_lock->fl_wait,
+				!file_lock->fl_next);
+		if (!error)
+			continue;
+
+		locks_delete_block(file_lock);
+		break;
+	}
+
+out:
+	locks_free_lock(file_lock);
+	return error;
+}
+#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * This function is called when the file is being removed
+ * from the task's fd array.  POSIX locks belonging to this task
+ * are deleted at this time.
+ */
+void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+	struct file_lock lock, **before;
+
+	/*
+	 * If there are no locks held on this file, we don't need to call
+	 * posix_lock_file().  Another process could be setting a lock on this
+	 * file at the same time, but we wouldn't remove that lock anyway.
+	 */
+	before = &filp->f_dentry->d_inode->i_flock;
+	if (*before == NULL)
+		return;
+
+	lock.fl_type = F_UNLCK;
+	lock.fl_flags = FL_POSIX;
+	lock.fl_start = 0;
+	lock.fl_end = OFFSET_MAX;
+	lock.fl_owner = owner;
+	lock.fl_pid = current->tgid;
+	lock.fl_file = filp;
+	lock.fl_ops = NULL;
+	lock.fl_lmops = NULL;
+
+	if (filp->f_op && filp->f_op->lock != NULL) {
+		filp->f_op->lock(filp, F_SETLK, &lock);
+		goto out;
+	}
+
+	/* Can't use posix_lock_file here; we need to remove it no matter
+	 * which pid we have.
+	 */
+	lock_kernel();
+	while (*before != NULL) {
+		struct file_lock *fl = *before;
+		if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) {
+			locks_delete_lock(before);
+			continue;
+		}
+		before = &fl->fl_next;
+	}
+	unlock_kernel();
+out:
+	if (lock.fl_ops && lock.fl_ops->fl_release_private)
+		lock.fl_ops->fl_release_private(&lock);
+}
+
+EXPORT_SYMBOL(locks_remove_posix);
+
+/*
+ * This function is called on the last close of an open file.
+ */
+void locks_remove_flock(struct file *filp)
+{
+	struct inode * inode = filp->f_dentry->d_inode; 
+	struct file_lock *fl;
+	struct file_lock **before;
+
+	if (!inode->i_flock)
+		return;
+
+	if (filp->f_op && filp->f_op->flock) {
+		struct file_lock fl = {
+			.fl_pid = current->tgid,
+			.fl_file = filp,
+			.fl_flags = FL_FLOCK,
+			.fl_type = F_UNLCK,
+			.fl_end = OFFSET_MAX,
+		};
+		filp->f_op->flock(filp, F_SETLKW, &fl);
+	}
+
+	lock_kernel();
+	before = &inode->i_flock;
+
+	while ((fl = *before) != NULL) {
+		if (fl->fl_file == filp) {
+			/*
+			 * We might have a POSIX lock that was created at the same time
+			 * the filp was closed for the last time. Just remove that too,
+			 * regardless of ownership, since nobody can own it.
+			 */
+			if (IS_FLOCK(fl) || IS_POSIX(fl)) {
+				locks_delete_lock(before);
+				continue;
+			}
+			if (IS_LEASE(fl)) {
+				lease_modify(before, F_UNLCK);
+				continue;
+			}
+			/* What? */
+			BUG();
+ 		}
+		before = &fl->fl_next;
+	}
+	unlock_kernel();
+}
+
+/**
+ *	posix_block_lock - blocks waiting for a file lock
+ *	@blocker: the lock which is blocking
+ *	@waiter: the lock which conflicts and has to wait
+ *
+ * lockd needs to block waiting for locks.
+ */
+void
+posix_block_lock(struct file_lock *blocker, struct file_lock *waiter)
+{
+	locks_insert_block(blocker, waiter);
+}
+
+EXPORT_SYMBOL(posix_block_lock);
+
+/**
+ *	posix_unblock_lock - stop waiting for a file lock
+ *      @filp:   how the file was opened
+ *	@waiter: the lock which was waiting
+ *
+ *	lockd needs to block waiting for locks.
+ */
+void
+posix_unblock_lock(struct file *filp, struct file_lock *waiter)
+{
+	/* 
+	 * A remote machine may cancel the lock request after it's been
+	 * granted locally.  If that happens, we need to delete the lock.
+	 */
+	lock_kernel();
+	if (waiter->fl_next) {
+		__locks_delete_block(waiter);
+		unlock_kernel();
+	} else {
+		unlock_kernel();
+		waiter->fl_type = F_UNLCK;
+		posix_lock_file(filp, waiter);
+	}
+}
+
+EXPORT_SYMBOL(posix_unblock_lock);
+
+static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
+{
+	struct inode *inode = NULL;
+
+	if (fl->fl_file != NULL)
+		inode = fl->fl_file->f_dentry->d_inode;
+
+	out += sprintf(out, "%d:%s ", id, pfx);
+	if (IS_POSIX(fl)) {
+		out += sprintf(out, "%6s %s ",
+			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
+			     (inode == NULL) ? "*NOINODE*" :
+			     (IS_MANDLOCK(inode) &&
+			      (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ?
+			     "MANDATORY" : "ADVISORY ");
+	} else if (IS_FLOCK(fl)) {
+		if (fl->fl_type & LOCK_MAND) {
+			out += sprintf(out, "FLOCK  MSNFS     ");
+		} else {
+			out += sprintf(out, "FLOCK  ADVISORY  ");
+		}
+	} else if (IS_LEASE(fl)) {
+		out += sprintf(out, "LEASE  ");
+		if (fl->fl_type & F_INPROGRESS)
+			out += sprintf(out, "BREAKING  ");
+		else if (fl->fl_file)
+			out += sprintf(out, "ACTIVE    ");
+		else
+			out += sprintf(out, "BREAKER   ");
+	} else {
+		out += sprintf(out, "UNKNOWN UNKNOWN  ");
+	}
+	if (fl->fl_type & LOCK_MAND) {
+		out += sprintf(out, "%s ",
+			       (fl->fl_type & LOCK_READ)
+			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
+			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
+	} else {
+		out += sprintf(out, "%s ",
+			       (fl->fl_type & F_INPROGRESS)
+			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
+			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
+	}
+	if (inode) {
+#ifdef WE_CAN_BREAK_LSLK_NOW
+		out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
+				inode->i_sb->s_id, inode->i_ino);
+#else
+		/* userspace relies on this representation of dev_t ;-( */
+		out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid,
+				MAJOR(inode->i_sb->s_dev),
+				MINOR(inode->i_sb->s_dev), inode->i_ino);
+#endif
+	} else {
+		out += sprintf(out, "%d <none>:0 ", fl->fl_pid);
+	}
+	if (IS_POSIX(fl)) {
+		if (fl->fl_end == OFFSET_MAX)
+			out += sprintf(out, "%Ld EOF\n", fl->fl_start);
+		else
+			out += sprintf(out, "%Ld %Ld\n", fl->fl_start,
+					fl->fl_end);
+	} else {
+		out += sprintf(out, "0 EOF\n");
+	}
+}
+
+static void move_lock_status(char **p, off_t* pos, off_t offset)
+{
+	int len;
+	len = strlen(*p);
+	if(*pos >= offset) {
+		/* the complete line is valid */
+		*p += len;
+		*pos += len;
+		return;
+	}
+	if(*pos+len > offset) {
+		/* use the second part of the line */
+		int i = offset-*pos;
+		memmove(*p,*p+i,len-i);
+		*p += len-i;
+		*pos += len;
+		return;
+	}
+	/* discard the complete line */
+	*pos += len;
+}
+
+/**
+ *	get_locks_status	-	reports lock usage in /proc/locks
+ *	@buffer: address in userspace to write into
+ *	@start: ?
+ *	@offset: how far we are through the buffer
+ *	@length: how much to read
+ */
+
+int get_locks_status(char *buffer, char **start, off_t offset, int length)
+{
+	struct list_head *tmp;
+	char *q = buffer;
+	off_t pos = 0;
+	int i = 0;
+
+	lock_kernel();
+	list_for_each(tmp, &file_lock_list) {
+		struct list_head *btmp;
+		struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
+		lock_get_status(q, fl, ++i, "");
+		move_lock_status(&q, &pos, offset);
+
+		if(pos >= offset+length)
+			goto done;
+
+		list_for_each(btmp, &fl->fl_block) {
+			struct file_lock *bfl = list_entry(btmp,
+					struct file_lock, fl_block);
+			lock_get_status(q, bfl, i, " ->");
+			move_lock_status(&q, &pos, offset);
+
+			if(pos >= offset+length)
+				goto done;
+		}
+	}
+done:
+	unlock_kernel();
+	*start = buffer;
+	if(q-buffer < length)
+		return (q-buffer);
+	return length;
+}
+
+/**
+ *	lock_may_read - checks that the region is free of locks
+ *	@inode: the inode that is being read
+ *	@start: the first byte to read
+ *	@len: the number of bytes to read
+ *
+ *	Emulates Windows locking requirements.  Whole-file
+ *	mandatory locks (share modes) can prohibit a read and
+ *	byte-range POSIX locks can prohibit a read if they overlap.
+ *
+ *	N.B. this function is only ever called
+ *	from knfsd and ownership of locks is never checked.
+ */
+int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
+{
+	struct file_lock *fl;
+	int result = 1;
+	lock_kernel();
+	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+		if (IS_POSIX(fl)) {
+			if (fl->fl_type == F_RDLCK)
+				continue;
+			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
+				continue;
+		} else if (IS_FLOCK(fl)) {
+			if (!(fl->fl_type & LOCK_MAND))
+				continue;
+			if (fl->fl_type & LOCK_READ)
+				continue;
+		} else
+			continue;
+		result = 0;
+		break;
+	}
+	unlock_kernel();
+	return result;
+}
+
+EXPORT_SYMBOL(lock_may_read);
+
+/**
+ *	lock_may_write - checks that the region is free of locks
+ *	@inode: the inode that is being written
+ *	@start: the first byte to write
+ *	@len: the number of bytes to write
+ *
+ *	Emulates Windows locking requirements.  Whole-file
+ *	mandatory locks (share modes) can prohibit a write and
+ *	byte-range POSIX locks can prohibit a write if they overlap.
+ *
+ *	N.B. this function is only ever called
+ *	from knfsd and ownership of locks is never checked.
+ */
+int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
+{
+	struct file_lock *fl;
+	int result = 1;
+	lock_kernel();
+	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+		if (IS_POSIX(fl)) {
+			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
+				continue;
+		} else if (IS_FLOCK(fl)) {
+			if (!(fl->fl_type & LOCK_MAND))
+				continue;
+			if (fl->fl_type & LOCK_WRITE)
+				continue;
+		} else
+			continue;
+		result = 0;
+		break;
+	}
+	unlock_kernel();
+	return result;
+}
+
+EXPORT_SYMBOL(lock_may_write);
+
+static inline void __steal_locks(struct file *file, fl_owner_t from)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct file_lock *fl = inode->i_flock;
+
+	while (fl) {
+		if (fl->fl_file == file && fl->fl_owner == from)
+			fl->fl_owner = current->files;
+		fl = fl->fl_next;
+	}
+}
+
+/* When getting ready for executing a binary, we make sure that current
+ * has a files_struct on its own. Before dropping the old files_struct,
+ * we take over ownership of all locks for all file descriptors we own.
+ * Note that we may accidentally steal a lock for a file that a sibling
+ * has created since the unshare_files() call.
+ */
+void steal_locks(fl_owner_t from)
+{
+	struct files_struct *files = current->files;
+	int i, j;
+
+	if (from == files)
+		return;
+
+	lock_kernel();
+	j = 0;
+	for (;;) {
+		unsigned long set;
+		i = j * __NFDBITS;
+		if (i >= files->max_fdset || i >= files->max_fds)
+			break;
+		set = files->open_fds->fds_bits[j++];
+		while (set) {
+			if (set & 1) {
+				struct file *file = files->fd[i];
+				if (file)
+					__steal_locks(file, from);
+			}
+			i++;
+			set >>= 1;
+		}
+	}
+	unlock_kernel();
+}
+EXPORT_SYMBOL(steal_locks);
+
+static int __init filelock_init(void)
+{
+	filelock_cache = kmem_cache_create("file_lock_cache",
+			sizeof(struct file_lock), 0, SLAB_PANIC,
+			init_once, NULL);
+	return 0;
+}
+
+core_initcall(filelock_init);
diff --git a/fs/mbcache.c b/fs/mbcache.c
new file mode 100644
index 0000000..f9e4d27
--- /dev/null
+++ b/fs/mbcache.c
@@ -0,0 +1,677 @@
+/*
+ * linux/fs/mbcache.c
+ * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+/*
+ * Filesystem Meta Information Block Cache (mbcache)
+ *
+ * The mbcache caches blocks of block devices that need to be located
+ * by their device/block number, as well as by other criteria (such
+ * as the block's contents).
+ *
+ * There can only be one cache entry in a cache per device and block number.
+ * Additional indexes need not be unique in this sense. The number of
+ * additional indexes (=other criteria) can be hardwired at compile time
+ * or specified at cache create time.
+ *
+ * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
+ * in the cache. A valid entry is in the main hash tables of the cache,
+ * and may also be in the lru list. An invalid entry is not in any hashes
+ * or lists.
+ *
+ * A valid cache entry is only in the lru list if no handles refer to it.
+ * Invalid cache entries will be freed when the last handle to the cache
+ * entry is released. Entries that cannot be freed immediately are put
+ * back on the lru list.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/hash.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/mbcache.h>
+
+
+#ifdef MB_CACHE_DEBUG
+# define mb_debug(f...) do { \
+		printk(KERN_DEBUG f); \
+		printk("\n"); \
+	} while (0)
+#define mb_assert(c) do { if (!(c)) \
+		printk(KERN_ERR "assertion " #c " failed\n"); \
+	} while(0)
+#else
+# define mb_debug(f...) do { } while(0)
+# define mb_assert(c) do { } while(0)
+#endif
+#define mb_error(f...) do { \
+		printk(KERN_ERR f); \
+		printk("\n"); \
+	} while(0)
+
+#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
+
+DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
+		
+MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
+MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(mb_cache_create);
+EXPORT_SYMBOL(mb_cache_shrink);
+EXPORT_SYMBOL(mb_cache_destroy);
+EXPORT_SYMBOL(mb_cache_entry_alloc);
+EXPORT_SYMBOL(mb_cache_entry_insert);
+EXPORT_SYMBOL(mb_cache_entry_release);
+EXPORT_SYMBOL(mb_cache_entry_free);
+EXPORT_SYMBOL(mb_cache_entry_get);
+#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
+EXPORT_SYMBOL(mb_cache_entry_find_first);
+EXPORT_SYMBOL(mb_cache_entry_find_next);
+#endif
+
+struct mb_cache {
+	struct list_head		c_cache_list;
+	const char			*c_name;
+	struct mb_cache_op		c_op;
+	atomic_t			c_entry_count;
+	int				c_bucket_bits;
+#ifndef MB_CACHE_INDEXES_COUNT
+	int				c_indexes_count;
+#endif
+	kmem_cache_t			*c_entry_cache;
+	struct list_head		*c_block_hash;
+	struct list_head		*c_indexes_hash[0];
+};
+
+
+/*
+ * Global data: list of all mbcache's, lru list, and a spinlock for
+ * accessing cache data structures on SMP machines. The lru list is
+ * global across all mbcaches.
+ */
+
+static LIST_HEAD(mb_cache_list);
+static LIST_HEAD(mb_cache_lru_list);
+static DEFINE_SPINLOCK(mb_cache_spinlock);
+static struct shrinker *mb_shrinker;
+
+static inline int
+mb_cache_indexes(struct mb_cache *cache)
+{
+#ifdef MB_CACHE_INDEXES_COUNT
+	return MB_CACHE_INDEXES_COUNT;
+#else
+	return cache->c_indexes_count;
+#endif
+}
+
+/*
+ * What the mbcache registers as to get shrunk dynamically.
+ */
+
+static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
+
+
+static inline int
+__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
+{
+	return !list_empty(&ce->e_block_list);
+}
+
+
+static inline void
+__mb_cache_entry_unhash(struct mb_cache_entry *ce)
+{
+	int n;
+
+	if (__mb_cache_entry_is_hashed(ce)) {
+		list_del_init(&ce->e_block_list);
+		for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
+			list_del(&ce->e_indexes[n].o_list);
+	}
+}
+
+
+static inline void
+__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
+{
+	struct mb_cache *cache = ce->e_cache;
+
+	mb_assert(!(ce->e_used || ce->e_queued));
+	if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
+		/* free failed -- put back on the lru list
+		   for freeing later. */
+		spin_lock(&mb_cache_spinlock);
+		list_add(&ce->e_lru_list, &mb_cache_lru_list);
+		spin_unlock(&mb_cache_spinlock);
+	} else {
+		kmem_cache_free(cache->c_entry_cache, ce);
+		atomic_dec(&cache->c_entry_count);
+	}
+}
+
+
+static inline void
+__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
+{
+	/* Wake up all processes queuing for this cache entry. */
+	if (ce->e_queued)
+		wake_up_all(&mb_cache_queue);
+	if (ce->e_used >= MB_CACHE_WRITER)
+		ce->e_used -= MB_CACHE_WRITER;
+	ce->e_used--;
+	if (!(ce->e_used || ce->e_queued)) {
+		if (!__mb_cache_entry_is_hashed(ce))
+			goto forget;
+		mb_assert(list_empty(&ce->e_lru_list));
+		list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
+	}
+	spin_unlock(&mb_cache_spinlock);
+	return;
+forget:
+	spin_unlock(&mb_cache_spinlock);
+	__mb_cache_entry_forget(ce, GFP_KERNEL);
+}
+
+
+/*
+ * mb_cache_shrink_fn()  memory pressure callback
+ *
+ * This function is called by the kernel memory management when memory
+ * gets low.
+ *
+ * @nr_to_scan: Number of objects to scan
+ * @gfp_mask: (ignored)
+ *
+ * Returns the number of objects which are present in the cache.
+ */
+static int
+mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
+{
+	LIST_HEAD(free_list);
+	struct list_head *l, *ltmp;
+	int count = 0;
+
+	spin_lock(&mb_cache_spinlock);
+	list_for_each(l, &mb_cache_list) {
+		struct mb_cache *cache =
+			list_entry(l, struct mb_cache, c_cache_list);
+		mb_debug("cache %s (%d)", cache->c_name,
+			  atomic_read(&cache->c_entry_count));
+		count += atomic_read(&cache->c_entry_count);
+	}
+	mb_debug("trying to free %d entries", nr_to_scan);
+	if (nr_to_scan == 0) {
+		spin_unlock(&mb_cache_spinlock);
+		goto out;
+	}
+	while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
+		struct mb_cache_entry *ce =
+			list_entry(mb_cache_lru_list.next,
+				   struct mb_cache_entry, e_lru_list);
+		list_move_tail(&ce->e_lru_list, &free_list);
+		__mb_cache_entry_unhash(ce);
+	}
+	spin_unlock(&mb_cache_spinlock);
+	list_for_each_safe(l, ltmp, &free_list) {
+		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
+						   e_lru_list), gfp_mask);
+	}
+out:
+	return (count / 100) * sysctl_vfs_cache_pressure;
+}
+
+
+/*
+ * mb_cache_create()  create a new cache
+ *
+ * All entries in one cache are equal size. Cache entries may be from
+ * multiple devices. If this is the first mbcache created, registers
+ * the cache with kernel memory management. Returns NULL if no more
+ * memory was available.
+ *
+ * @name: name of the cache (informal)
+ * @cache_op: contains the callback called when freeing a cache entry
+ * @entry_size: The size of a cache entry, including
+ *              struct mb_cache_entry
+ * @indexes_count: number of additional indexes in the cache. Must equal
+ *                 MB_CACHE_INDEXES_COUNT if the number of indexes is
+ *                 hardwired.
+ * @bucket_bits: log2(number of hash buckets)
+ */
+struct mb_cache *
+mb_cache_create(const char *name, struct mb_cache_op *cache_op,
+		size_t entry_size, int indexes_count, int bucket_bits)
+{
+	int m=0, n, bucket_count = 1 << bucket_bits;
+	struct mb_cache *cache = NULL;
+
+	if(entry_size < sizeof(struct mb_cache_entry) +
+	   indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
+		return NULL;
+
+	cache = kmalloc(sizeof(struct mb_cache) +
+	                indexes_count * sizeof(struct list_head), GFP_KERNEL);
+	if (!cache)
+		goto fail;
+	cache->c_name = name;
+	cache->c_op.free = NULL;
+	if (cache_op)
+		cache->c_op.free = cache_op->free;
+	atomic_set(&cache->c_entry_count, 0);
+	cache->c_bucket_bits = bucket_bits;
+#ifdef MB_CACHE_INDEXES_COUNT
+	mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
+#else
+	cache->c_indexes_count = indexes_count;
+#endif
+	cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
+	                              GFP_KERNEL);
+	if (!cache->c_block_hash)
+		goto fail;
+	for (n=0; n<bucket_count; n++)
+		INIT_LIST_HEAD(&cache->c_block_hash[n]);
+	for (m=0; m<indexes_count; m++) {
+		cache->c_indexes_hash[m] = kmalloc(bucket_count *
+		                                 sizeof(struct list_head),
+		                                 GFP_KERNEL);
+		if (!cache->c_indexes_hash[m])
+			goto fail;
+		for (n=0; n<bucket_count; n++)
+			INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
+	}
+	cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
+		SLAB_RECLAIM_ACCOUNT, NULL, NULL);
+	if (!cache->c_entry_cache)
+		goto fail;
+
+	spin_lock(&mb_cache_spinlock);
+	list_add(&cache->c_cache_list, &mb_cache_list);
+	spin_unlock(&mb_cache_spinlock);
+	return cache;
+
+fail:
+	if (cache) {
+		while (--m >= 0)
+			kfree(cache->c_indexes_hash[m]);
+		if (cache->c_block_hash)
+			kfree(cache->c_block_hash);
+		kfree(cache);
+	}
+	return NULL;
+}
+
+
+/*
+ * mb_cache_shrink()
+ *
+ * Removes all cache entires of a device from the cache. All cache entries
+ * currently in use cannot be freed, and thus remain in the cache. All others
+ * are freed.
+ *
+ * @cache: which cache to shrink
+ * @bdev: which device's cache entries to shrink
+ */
+void
+mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
+{
+	LIST_HEAD(free_list);
+	struct list_head *l, *ltmp;
+
+	spin_lock(&mb_cache_spinlock);
+	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
+		struct mb_cache_entry *ce =
+			list_entry(l, struct mb_cache_entry, e_lru_list);
+		if (ce->e_bdev == bdev) {
+			list_move_tail(&ce->e_lru_list, &free_list);
+			__mb_cache_entry_unhash(ce);
+		}
+	}
+	spin_unlock(&mb_cache_spinlock);
+	list_for_each_safe(l, ltmp, &free_list) {
+		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
+						   e_lru_list), GFP_KERNEL);
+	}
+}
+
+
+/*
+ * mb_cache_destroy()
+ *
+ * Shrinks the cache to its minimum possible size (hopefully 0 entries),
+ * and then destroys it. If this was the last mbcache, un-registers the
+ * mbcache from kernel memory management.
+ */
+void
+mb_cache_destroy(struct mb_cache *cache)
+{
+	LIST_HEAD(free_list);
+	struct list_head *l, *ltmp;
+	int n;
+
+	spin_lock(&mb_cache_spinlock);
+	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
+		struct mb_cache_entry *ce =
+			list_entry(l, struct mb_cache_entry, e_lru_list);
+		if (ce->e_cache == cache) {
+			list_move_tail(&ce->e_lru_list, &free_list);
+			__mb_cache_entry_unhash(ce);
+		}
+	}
+	list_del(&cache->c_cache_list);
+	spin_unlock(&mb_cache_spinlock);
+
+	list_for_each_safe(l, ltmp, &free_list) {
+		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
+						   e_lru_list), GFP_KERNEL);
+	}
+
+	if (atomic_read(&cache->c_entry_count) > 0) {
+		mb_error("cache %s: %d orphaned entries",
+			  cache->c_name,
+			  atomic_read(&cache->c_entry_count));
+	}
+
+	kmem_cache_destroy(cache->c_entry_cache);
+
+	for (n=0; n < mb_cache_indexes(cache); n++)
+		kfree(cache->c_indexes_hash[n]);
+	kfree(cache->c_block_hash);
+	kfree(cache);
+}
+
+
+/*
+ * mb_cache_entry_alloc()
+ *
+ * Allocates a new cache entry. The new entry will not be valid initially,
+ * and thus cannot be looked up yet. It should be filled with data, and
+ * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
+ * if no more memory was available.
+ */
+struct mb_cache_entry *
+mb_cache_entry_alloc(struct mb_cache *cache)
+{
+	struct mb_cache_entry *ce;
+
+	atomic_inc(&cache->c_entry_count);
+	ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
+	if (ce) {
+		INIT_LIST_HEAD(&ce->e_lru_list);
+		INIT_LIST_HEAD(&ce->e_block_list);
+		ce->e_cache = cache;
+		ce->e_used = 1 + MB_CACHE_WRITER;
+		ce->e_queued = 0;
+	}
+	return ce;
+}
+
+
+/*
+ * mb_cache_entry_insert()
+ *
+ * Inserts an entry that was allocated using mb_cache_entry_alloc() into
+ * the cache. After this, the cache entry can be looked up, but is not yet
+ * in the lru list as the caller still holds a handle to it. Returns 0 on
+ * success, or -EBUSY if a cache entry for that device + inode exists
+ * already (this may happen after a failed lookup, but when another process
+ * has inserted the same cache entry in the meantime).
+ *
+ * @bdev: device the cache entry belongs to
+ * @block: block number
+ * @keys: array of additional keys. There must be indexes_count entries
+ *        in the array (as specified when creating the cache).
+ */
+int
+mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
+		      sector_t block, unsigned int keys[])
+{
+	struct mb_cache *cache = ce->e_cache;
+	unsigned int bucket;
+	struct list_head *l;
+	int error = -EBUSY, n;
+
+	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 
+			   cache->c_bucket_bits);
+	spin_lock(&mb_cache_spinlock);
+	list_for_each_prev(l, &cache->c_block_hash[bucket]) {
+		struct mb_cache_entry *ce =
+			list_entry(l, struct mb_cache_entry, e_block_list);
+		if (ce->e_bdev == bdev && ce->e_block == block)
+			goto out;
+	}
+	__mb_cache_entry_unhash(ce);
+	ce->e_bdev = bdev;
+	ce->e_block = block;
+	list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
+	for (n=0; n<mb_cache_indexes(cache); n++) {
+		ce->e_indexes[n].o_key = keys[n];
+		bucket = hash_long(keys[n], cache->c_bucket_bits);
+		list_add(&ce->e_indexes[n].o_list,
+			 &cache->c_indexes_hash[n][bucket]);
+	}
+	error = 0;
+out:
+	spin_unlock(&mb_cache_spinlock);
+	return error;
+}
+
+
+/*
+ * mb_cache_entry_release()
+ *
+ * Release a handle to a cache entry. When the last handle to a cache entry
+ * is released it is either freed (if it is invalid) or otherwise inserted
+ * in to the lru list.
+ */
+void
+mb_cache_entry_release(struct mb_cache_entry *ce)
+{
+	spin_lock(&mb_cache_spinlock);
+	__mb_cache_entry_release_unlock(ce);
+}
+
+
+/*
+ * mb_cache_entry_free()
+ *
+ * This is equivalent to the sequence mb_cache_entry_takeout() --
+ * mb_cache_entry_release().
+ */
+void
+mb_cache_entry_free(struct mb_cache_entry *ce)
+{
+	spin_lock(&mb_cache_spinlock);
+	mb_assert(list_empty(&ce->e_lru_list));
+	__mb_cache_entry_unhash(ce);
+	__mb_cache_entry_release_unlock(ce);
+}
+
+
+/*
+ * mb_cache_entry_get()
+ *
+ * Get a cache entry  by device / block number. (There can only be one entry
+ * in the cache per device and block.) Returns NULL if no such cache entry
+ * exists. The returned cache entry is locked for exclusive access ("single
+ * writer").
+ */
+struct mb_cache_entry *
+mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
+		   sector_t block)
+{
+	unsigned int bucket;
+	struct list_head *l;
+	struct mb_cache_entry *ce;
+
+	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
+			   cache->c_bucket_bits);
+	spin_lock(&mb_cache_spinlock);
+	list_for_each(l, &cache->c_block_hash[bucket]) {
+		ce = list_entry(l, struct mb_cache_entry, e_block_list);
+		if (ce->e_bdev == bdev && ce->e_block == block) {
+			DEFINE_WAIT(wait);
+
+			if (!list_empty(&ce->e_lru_list))
+				list_del_init(&ce->e_lru_list);
+
+			while (ce->e_used > 0) {
+				ce->e_queued++;
+				prepare_to_wait(&mb_cache_queue, &wait,
+						TASK_UNINTERRUPTIBLE);
+				spin_unlock(&mb_cache_spinlock);
+				schedule();
+				spin_lock(&mb_cache_spinlock);
+				ce->e_queued--;
+			}
+			finish_wait(&mb_cache_queue, &wait);
+			ce->e_used += 1 + MB_CACHE_WRITER;
+
+			if (!__mb_cache_entry_is_hashed(ce)) {
+				__mb_cache_entry_release_unlock(ce);
+				return NULL;
+			}
+			goto cleanup;
+		}
+	}
+	ce = NULL;
+
+cleanup:
+	spin_unlock(&mb_cache_spinlock);
+	return ce;
+}
+
+#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
+
+static struct mb_cache_entry *
+__mb_cache_entry_find(struct list_head *l, struct list_head *head,
+		      int index, struct block_device *bdev, unsigned int key)
+{
+	while (l != head) {
+		struct mb_cache_entry *ce =
+			list_entry(l, struct mb_cache_entry,
+			           e_indexes[index].o_list);
+		if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
+			DEFINE_WAIT(wait);
+
+			if (!list_empty(&ce->e_lru_list))
+				list_del_init(&ce->e_lru_list);
+
+			/* Incrementing before holding the lock gives readers
+			   priority over writers. */
+			ce->e_used++;
+			while (ce->e_used >= MB_CACHE_WRITER) {
+				ce->e_queued++;
+				prepare_to_wait(&mb_cache_queue, &wait,
+						TASK_UNINTERRUPTIBLE);
+				spin_unlock(&mb_cache_spinlock);
+				schedule();
+				spin_lock(&mb_cache_spinlock);
+				ce->e_queued--;
+			}
+			finish_wait(&mb_cache_queue, &wait);
+
+			if (!__mb_cache_entry_is_hashed(ce)) {
+				__mb_cache_entry_release_unlock(ce);
+				spin_lock(&mb_cache_spinlock);
+				return ERR_PTR(-EAGAIN);
+			}
+			return ce;
+		}
+		l = l->next;
+	}
+	return NULL;
+}
+
+
+/*
+ * mb_cache_entry_find_first()
+ *
+ * Find the first cache entry on a given device with a certain key in
+ * an additional index. Additonal matches can be found with
+ * mb_cache_entry_find_next(). Returns NULL if no match was found. The
+ * returned cache entry is locked for shared access ("multiple readers").
+ *
+ * @cache: the cache to search
+ * @index: the number of the additonal index to search (0<=index<indexes_count)
+ * @bdev: the device the cache entry should belong to
+ * @key: the key in the index
+ */
+struct mb_cache_entry *
+mb_cache_entry_find_first(struct mb_cache *cache, int index,
+			  struct block_device *bdev, unsigned int key)
+{
+	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
+	struct list_head *l;
+	struct mb_cache_entry *ce;
+
+	mb_assert(index < mb_cache_indexes(cache));
+	spin_lock(&mb_cache_spinlock);
+	l = cache->c_indexes_hash[index][bucket].next;
+	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
+	                           index, bdev, key);
+	spin_unlock(&mb_cache_spinlock);
+	return ce;
+}
+
+
+/*
+ * mb_cache_entry_find_next()
+ *
+ * Find the next cache entry on a given device with a certain key in an
+ * additional index. Returns NULL if no match could be found. The previous
+ * entry is atomatically released, so that mb_cache_entry_find_next() can
+ * be called like this:
+ *
+ * entry = mb_cache_entry_find_first();
+ * while (entry) {
+ * 	...
+ *	entry = mb_cache_entry_find_next(entry, ...);
+ * }
+ *
+ * @prev: The previous match
+ * @index: the number of the additonal index to search (0<=index<indexes_count)
+ * @bdev: the device the cache entry should belong to
+ * @key: the key in the index
+ */
+struct mb_cache_entry *
+mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
+			 struct block_device *bdev, unsigned int key)
+{
+	struct mb_cache *cache = prev->e_cache;
+	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
+	struct list_head *l;
+	struct mb_cache_entry *ce;
+
+	mb_assert(index < mb_cache_indexes(cache));
+	spin_lock(&mb_cache_spinlock);
+	l = prev->e_indexes[index].o_list.next;
+	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
+	                           index, bdev, key);
+	__mb_cache_entry_release_unlock(prev);
+	return ce;
+}
+
+#endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
+
+static int __init init_mbcache(void)
+{
+	mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+	return 0;
+}
+
+static void __exit exit_mbcache(void)
+{
+	remove_shrinker(mb_shrinker);
+}
+
+module_init(init_mbcache)
+module_exit(exit_mbcache)
+
diff --git a/fs/minix/Makefile b/fs/minix/Makefile
new file mode 100644
index 0000000..3063015
--- /dev/null
+++ b/fs/minix/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux minix filesystem routines.
+#
+
+obj-$(CONFIG_MINIX_FS) += minix.o
+
+minix-objs := bitmap.o itree_v1.o itree_v2.o namei.o inode.o file.o dir.o
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
new file mode 100644
index 0000000..dc6a4e4
--- /dev/null
+++ b/fs/minix/bitmap.c
@@ -0,0 +1,269 @@
+/*
+ *  linux/fs/minix/bitmap.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * Modified for 680x0 by Hamish Macdonald
+ * Fixed for 680x0 by Andreas Schwab
+ */
+
+/* bitmap.c contains the code that handles the inode and block bitmaps */
+
+#include "minix.h"
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/bitops.h>
+
+static int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
+
+static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
+{
+	unsigned i, j, sum = 0;
+	struct buffer_head *bh;
+  
+	for (i=0; i<numblocks-1; i++) {
+		if (!(bh=map[i])) 
+			return(0);
+		for (j=0; j<BLOCK_SIZE; j++)
+			sum += nibblemap[bh->b_data[j] & 0xf]
+				+ nibblemap[(bh->b_data[j]>>4) & 0xf];
+	}
+
+	if (numblocks==0 || !(bh=map[numblocks-1]))
+		return(0);
+	i = ((numbits-(numblocks-1)*BLOCK_SIZE*8)/16)*2;
+	for (j=0; j<i; j++) {
+		sum += nibblemap[bh->b_data[j] & 0xf]
+			+ nibblemap[(bh->b_data[j]>>4) & 0xf];
+	}
+
+	i = numbits%16;
+	if (i!=0) {
+		i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1);
+		sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf];
+		sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf];
+	}
+	return(sum);
+}
+
+void minix_free_block(struct inode * inode, int block)
+{
+	struct super_block * sb = inode->i_sb;
+	struct minix_sb_info * sbi = minix_sb(sb);
+	struct buffer_head * bh;
+	unsigned int bit,zone;
+
+	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
+		printk("trying to free block not in datazone\n");
+		return;
+	}
+	zone = block - sbi->s_firstdatazone + 1;
+	bit = zone & 8191;
+	zone >>= 13;
+	if (zone >= sbi->s_zmap_blocks) {
+		printk("minix_free_block: nonexistent bitmap buffer\n");
+		return;
+	}
+	bh = sbi->s_zmap[zone];
+	lock_kernel();
+	if (!minix_test_and_clear_bit(bit,bh->b_data))
+		printk("free_block (%s:%d): bit already cleared\n",
+		       sb->s_id, block);
+	unlock_kernel();
+	mark_buffer_dirty(bh);
+	return;
+}
+
+int minix_new_block(struct inode * inode)
+{
+	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
+	int i;
+
+	for (i = 0; i < sbi->s_zmap_blocks; i++) {
+		struct buffer_head *bh = sbi->s_zmap[i];
+		int j;
+
+		lock_kernel();
+		if ((j = minix_find_first_zero_bit(bh->b_data, 8192)) < 8192) {
+			minix_set_bit(j,bh->b_data);
+			unlock_kernel();
+			mark_buffer_dirty(bh);
+			j += i*8192 + sbi->s_firstdatazone-1;
+			if (j < sbi->s_firstdatazone || j >= sbi->s_nzones)
+				break;
+			return j;
+		}
+		unlock_kernel();
+	}
+	return 0;
+}
+
+unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
+{
+	return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
+		sbi->s_nzones - sbi->s_firstdatazone + 1)
+		<< sbi->s_log_zone_size);
+}
+
+struct minix_inode *
+minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
+{
+	int block;
+	struct minix_sb_info *sbi = minix_sb(sb);
+	struct minix_inode *p;
+
+	if (!ino || ino > sbi->s_ninodes) {
+		printk("Bad inode number on dev %s: %ld is out of range\n",
+		       sb->s_id, (long)ino);
+		return NULL;
+	}
+	ino--;
+	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
+		 ino / MINIX_INODES_PER_BLOCK;
+	*bh = sb_bread(sb, block);
+	if (!*bh) {
+		printk("unable to read i-node block\n");
+		return NULL;
+	}
+	p = (void *)(*bh)->b_data;
+	return p + ino % MINIX_INODES_PER_BLOCK;
+}
+
+struct minix2_inode *
+minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
+{
+	int block;
+	struct minix_sb_info *sbi = minix_sb(sb);
+	struct minix2_inode *p;
+
+	*bh = NULL;
+	if (!ino || ino > sbi->s_ninodes) {
+		printk("Bad inode number on dev %s: %ld is out of range\n",
+		       sb->s_id, (long)ino);
+		return NULL;
+	}
+	ino--;
+	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
+		 ino / MINIX2_INODES_PER_BLOCK;
+	*bh = sb_bread(sb, block);
+	if (!*bh) {
+		printk("unable to read i-node block\n");
+		return NULL;
+	}
+	p = (void *)(*bh)->b_data;
+	return p + ino % MINIX2_INODES_PER_BLOCK;
+}
+
+/* Clear the link count and mode of a deleted inode on disk. */
+
+static void minix_clear_inode(struct inode *inode)
+{
+	struct buffer_head *bh = NULL;
+
+	if (INODE_VERSION(inode) == MINIX_V1) {
+		struct minix_inode *raw_inode;
+		raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
+		if (raw_inode) {
+			raw_inode->i_nlinks = 0;
+			raw_inode->i_mode = 0;
+		}
+	} else {
+		struct minix2_inode *raw_inode;
+		raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
+		if (raw_inode) {
+			raw_inode->i_nlinks = 0;
+			raw_inode->i_mode = 0;
+		}
+	}
+	if (bh) {
+		mark_buffer_dirty(bh);
+		brelse (bh);
+	}
+}
+
+void minix_free_inode(struct inode * inode)
+{
+	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
+	struct buffer_head * bh;
+	unsigned long ino;
+
+	ino = inode->i_ino;
+	if (ino < 1 || ino > sbi->s_ninodes) {
+		printk("minix_free_inode: inode 0 or nonexistent inode\n");
+		goto out;
+	}
+	if ((ino >> 13) >= sbi->s_imap_blocks) {
+		printk("minix_free_inode: nonexistent imap in superblock\n");
+		goto out;
+	}
+
+	minix_clear_inode(inode);	/* clear on-disk copy */
+
+	bh = sbi->s_imap[ino >> 13];
+	lock_kernel();
+	if (!minix_test_and_clear_bit(ino & 8191, bh->b_data))
+		printk("minix_free_inode: bit %lu already cleared.\n", ino);
+	unlock_kernel();
+	mark_buffer_dirty(bh);
+ out:
+	clear_inode(inode);		/* clear in-memory copy */
+}
+
+struct inode * minix_new_inode(const struct inode * dir, int * error)
+{
+	struct super_block *sb = dir->i_sb;
+	struct minix_sb_info *sbi = minix_sb(sb);
+	struct inode *inode = new_inode(sb);
+	struct buffer_head * bh;
+	int i,j;
+
+	if (!inode) {
+		*error = -ENOMEM;
+		return NULL;
+	}
+	j = 8192;
+	bh = NULL;
+	*error = -ENOSPC;
+	lock_kernel();
+	for (i = 0; i < sbi->s_imap_blocks; i++) {
+		bh = sbi->s_imap[i];
+		if ((j = minix_find_first_zero_bit(bh->b_data, 8192)) < 8192)
+			break;
+	}
+	if (!bh || j >= 8192) {
+		unlock_kernel();
+		iput(inode);
+		return NULL;
+	}
+	if (minix_test_and_set_bit(j,bh->b_data)) {	/* shouldn't happen */
+		printk("new_inode: bit already set");
+		unlock_kernel();
+		iput(inode);
+		return NULL;
+	}
+	unlock_kernel();
+	mark_buffer_dirty(bh);
+	j += i*8192;
+	if (!j || j > sbi->s_ninodes) {
+		iput(inode);
+		return NULL;
+	}
+	inode->i_uid = current->fsuid;
+	inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+	inode->i_ino = j;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_blocks = inode->i_blksize = 0;
+	memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+
+	*error = 0;
+	return inode;
+}
+
+unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
+{
+	return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
+}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
new file mode 100644
index 0000000..732502a
--- /dev/null
+++ b/fs/minix/dir.c
@@ -0,0 +1,409 @@
+/*
+ *  linux/fs/minix/dir.c
+ *
+ *  Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *  minix directory handling functions
+ */
+
+#include "minix.h"
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+
+typedef struct minix_dir_entry minix_dirent;
+
+static int minix_readdir(struct file *, void *, filldir_t);
+
+struct file_operations minix_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= minix_readdir,
+	.fsync		= minix_sync_file,
+};
+
+static inline void dir_put_page(struct page *page)
+{
+	kunmap(page);
+	page_cache_release(page);
+}
+
+/*
+ * Return the offset into page `page_nr' of the last valid
+ * byte in that page, plus one.
+ */
+static unsigned
+minix_last_byte(struct inode *inode, unsigned long page_nr)
+{
+	unsigned last_byte = PAGE_CACHE_SIZE;
+
+	if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
+		last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
+	return last_byte;
+}
+
+static inline unsigned long dir_pages(struct inode *inode)
+{
+	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
+}
+
+static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
+{
+	struct inode *dir = (struct inode *)page->mapping->host;
+	int err = 0;
+	page->mapping->a_ops->commit_write(NULL, page, from, to);
+	if (IS_DIRSYNC(dir))
+		err = write_one_page(page, 1);
+	else
+		unlock_page(page);
+	return err;
+}
+
+static struct page * dir_get_page(struct inode *dir, unsigned long n)
+{
+	struct address_space *mapping = dir->i_mapping;
+	struct page *page = read_cache_page(mapping, n,
+				(filler_t*)mapping->a_ops->readpage, NULL);
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		kmap(page);
+		if (!PageUptodate(page))
+			goto fail;
+	}
+	return page;
+
+fail:
+	dir_put_page(page);
+	return ERR_PTR(-EIO);
+}
+
+static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
+{
+	return (void*)((char*)de + sbi->s_dirsize);
+}
+
+static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	unsigned long pos = filp->f_pos;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	unsigned offset = pos & ~PAGE_CACHE_MASK;
+	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned long npages = dir_pages(inode);
+	struct minix_sb_info *sbi = minix_sb(sb);
+	unsigned chunk_size = sbi->s_dirsize;
+
+	lock_kernel();
+
+	pos = (pos + chunk_size-1) & ~(chunk_size-1);
+	if (pos >= inode->i_size)
+		goto done;
+
+	for ( ; n < npages; n++, offset = 0) {
+		char *p, *kaddr, *limit;
+		struct page *page = dir_get_page(inode, n);
+
+		if (IS_ERR(page))
+			continue;
+		kaddr = (char *)page_address(page);
+		p = kaddr+offset;
+		limit = kaddr + minix_last_byte(inode, n) - chunk_size;
+		for ( ; p <= limit ; p = minix_next_entry(p, sbi)) {
+			minix_dirent *de = (minix_dirent *)p;
+			if (de->inode) {
+				int over;
+				unsigned l = strnlen(de->name,sbi->s_namelen);
+
+				offset = p - kaddr;
+				over = filldir(dirent, de->name, l,
+						(n<<PAGE_CACHE_SHIFT) | offset,
+						de->inode, DT_UNKNOWN);
+				if (over) {
+					dir_put_page(page);
+					goto done;
+				}
+			}
+		}
+		dir_put_page(page);
+	}
+
+done:
+	filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
+	unlock_kernel();
+	return 0;
+}
+
+static inline int namecompare(int len, int maxlen,
+	const char * name, const char * buffer)
+{
+	if (len < maxlen && buffer[len])
+		return 0;
+	return !memcmp(name, buffer, len);
+}
+
+/*
+ *	minix_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ */
+minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
+{
+	const char * name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	struct inode * dir = dentry->d_parent->d_inode;
+	struct super_block * sb = dir->i_sb;
+	struct minix_sb_info * sbi = minix_sb(sb);
+	unsigned long n;
+	unsigned long npages = dir_pages(dir);
+	struct page *page = NULL;
+	struct minix_dir_entry *de;
+
+	*res_page = NULL;
+
+	for (n = 0; n < npages; n++) {
+		char *kaddr;
+		page = dir_get_page(dir, n);
+		if (IS_ERR(page))
+			continue;
+
+		kaddr = (char*)page_address(page);
+		de = (struct minix_dir_entry *) kaddr;
+		kaddr += minix_last_byte(dir, n) - sbi->s_dirsize;
+		for ( ; (char *) de <= kaddr ; de = minix_next_entry(de,sbi)) {
+			if (!de->inode)
+				continue;
+			if (namecompare(namelen,sbi->s_namelen,name,de->name))
+				goto found;
+		}
+		dir_put_page(page);
+	}
+	return NULL;
+
+found:
+	*res_page = page;
+	return de;
+}
+
+int minix_add_link(struct dentry *dentry, struct inode *inode)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char * name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	struct super_block * sb = dir->i_sb;
+	struct minix_sb_info * sbi = minix_sb(sb);
+	struct page *page = NULL;
+	struct minix_dir_entry * de;
+	unsigned long npages = dir_pages(dir);
+	unsigned long n;
+	char *kaddr;
+	unsigned from, to;
+	int err;
+
+	/*
+	 * We take care of directory expansion in the same loop
+	 * This code plays outside i_size, so it locks the page
+	 * to protect that region.
+	 */
+	for (n = 0; n <= npages; n++) {
+		char *dir_end;
+
+		page = dir_get_page(dir, n);
+		err = PTR_ERR(page);
+		if (IS_ERR(page))
+			goto out;
+		lock_page(page);
+		kaddr = (char*)page_address(page);
+		dir_end = kaddr + minix_last_byte(dir, n);
+		de = (minix_dirent *)kaddr;
+		kaddr += PAGE_CACHE_SIZE - sbi->s_dirsize;
+		while ((char *)de <= kaddr) {
+			if ((char *)de == dir_end) {
+				/* We hit i_size */
+				de->inode = 0;
+				goto got_it;
+			}
+			if (!de->inode)
+				goto got_it;
+			err = -EEXIST;
+			if (namecompare(namelen,sbi->s_namelen,name,de->name))
+				goto out_unlock;
+			de = minix_next_entry(de, sbi);
+		}
+		unlock_page(page);
+		dir_put_page(page);
+	}
+	BUG();
+	return -EINVAL;
+
+got_it:
+	from = (char*)de - (char*)page_address(page);
+	to = from + sbi->s_dirsize;
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		goto out_unlock;
+	memcpy (de->name, name, namelen);
+	memset (de->name + namelen, 0, sbi->s_dirsize - namelen - 2);
+	de->inode = inode->i_ino;
+	err = dir_commit_chunk(page, from, to);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+out_put:
+	dir_put_page(page);
+out:
+	return err;
+out_unlock:
+	unlock_page(page);
+	goto out_put;
+}
+
+int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = (struct inode*)mapping->host;
+	char *kaddr = page_address(page);
+	unsigned from = (char*)de - kaddr;
+	unsigned to = from + minix_sb(inode->i_sb)->s_dirsize;
+	int err;
+
+	lock_page(page);
+	err = mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err == 0) {
+		de->inode = 0;
+		err = dir_commit_chunk(page, from, to);
+	} else {
+		unlock_page(page);
+	}
+	dir_put_page(page);
+	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return err;
+}
+
+int minix_make_empty(struct inode *inode, struct inode *dir)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page = grab_cache_page(mapping, 0);
+	struct minix_sb_info * sbi = minix_sb(inode->i_sb);
+	struct minix_dir_entry * de;
+	char *kaddr;
+	int err;
+
+	if (!page)
+		return -ENOMEM;
+	err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize);
+	if (err) {
+		unlock_page(page);
+		goto fail;
+	}
+
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr, 0, PAGE_CACHE_SIZE);
+
+	de = (struct minix_dir_entry *)kaddr;
+	de->inode = inode->i_ino;
+	strcpy(de->name,".");
+	de = minix_next_entry(de, sbi);
+	de->inode = dir->i_ino;
+	strcpy(de->name,"..");
+	kunmap_atomic(kaddr, KM_USER0);
+
+	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
+fail:
+	page_cache_release(page);
+	return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int minix_empty_dir(struct inode * inode)
+{
+	struct page *page = NULL;
+	unsigned long i, npages = dir_pages(inode);
+	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
+
+	for (i = 0; i < npages; i++) {
+		char *kaddr;
+		minix_dirent * de;
+		page = dir_get_page(inode, i);
+
+		if (IS_ERR(page))
+			continue;
+
+		kaddr = (char *)page_address(page);
+		de = (minix_dirent *)kaddr;
+		kaddr += minix_last_byte(inode, i) - sbi->s_dirsize;
+
+		while ((char *)de <= kaddr) {
+			if (de->inode != 0) {
+				/* check for . and .. */
+				if (de->name[0] != '.')
+					goto not_empty;
+				if (!de->name[1]) {
+					if (de->inode != inode->i_ino)
+						goto not_empty;
+				} else if (de->name[1] != '.')
+					goto not_empty;
+				else if (de->name[2])
+					goto not_empty;
+			}
+			de = minix_next_entry(de, sbi);
+		}
+		dir_put_page(page);
+	}
+	return 1;
+
+not_empty:
+	dir_put_page(page);
+	return 0;
+}
+
+/* Releases the page */
+void minix_set_link(struct minix_dir_entry *de, struct page *page,
+	struct inode *inode)
+{
+	struct inode *dir = (struct inode*)page->mapping->host;
+	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
+	unsigned from = (char *)de-(char*)page_address(page);
+	unsigned to = from + sbi->s_dirsize;
+	int err;
+
+	lock_page(page);
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err == 0) {
+		de->inode = inode->i_ino;
+		err = dir_commit_chunk(page, from, to);
+	} else {
+		unlock_page(page);
+	}
+	dir_put_page(page);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+}
+
+struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
+{
+	struct page *page = dir_get_page(dir, 0);
+	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
+	struct minix_dir_entry *de = NULL;
+
+	if (!IS_ERR(page)) {
+		de = minix_next_entry(page_address(page), sbi);
+		*p = page;
+	}
+	return de;
+}
+
+ino_t minix_inode_by_name(struct dentry *dentry)
+{
+	struct page *page;
+	struct minix_dir_entry *de = minix_find_entry(dentry, &page);
+	ino_t res = 0;
+
+	if (de) {
+		res = de->inode;
+		dir_put_page(page);
+	}
+	return res;
+}
diff --git a/fs/minix/file.c b/fs/minix/file.c
new file mode 100644
index 0000000..f1d77ac
--- /dev/null
+++ b/fs/minix/file.c
@@ -0,0 +1,45 @@
+/*
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *  minix regular file handling primitives
+ */
+
+#include <linux/buffer_head.h>		/* for fsync_inode_buffers() */
+#include "minix.h"
+
+/*
+ * We have mostly NULLs here: the current defaults are OK for
+ * the minix filesystem.
+ */
+int minix_sync_file(struct file *, struct dentry *, int);
+
+struct file_operations minix_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.fsync		= minix_sync_file,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations minix_file_inode_operations = {
+	.truncate	= minix_truncate,
+	.getattr	= minix_getattr,
+};
+
+int minix_sync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	int err;
+
+	err = sync_mapping_buffers(inode->i_mapping);
+	if (!(inode->i_state & I_DIRTY))
+		return err;
+	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+		return err;
+	
+	err |= minix_sync_inode(inode);
+	return err ? -EIO : 0;
+}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
new file mode 100644
index 0000000..3f18c21
--- /dev/null
+++ b/fs/minix/inode.c
@@ -0,0 +1,598 @@
+/*
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Copyright (C) 1996  Gertjan van Wingerde    (gertjan@cs.vu.nl)
+ *	Minix V2 fs support.
+ *
+ *  Modified for 680x0 by Andreas Schwab
+ */
+
+#include <linux/module.h>
+#include "minix.h"
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/highuid.h>
+#include <linux/vfs.h>
+
+static void minix_read_inode(struct inode * inode);
+static int minix_write_inode(struct inode * inode, int wait);
+static int minix_statfs(struct super_block *sb, struct kstatfs *buf);
+static int minix_remount (struct super_block * sb, int * flags, char * data);
+
+static void minix_delete_inode(struct inode *inode)
+{
+	inode->i_size = 0;
+	minix_truncate(inode);
+	minix_free_inode(inode);
+}
+
+static void minix_put_super(struct super_block *sb)
+{
+	int i;
+	struct minix_sb_info *sbi = minix_sb(sb);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		sbi->s_ms->s_state = sbi->s_mount_state;
+		mark_buffer_dirty(sbi->s_sbh);
+	}
+	for (i = 0; i < sbi->s_imap_blocks; i++)
+		brelse(sbi->s_imap[i]);
+	for (i = 0; i < sbi->s_zmap_blocks; i++)
+		brelse(sbi->s_zmap[i]);
+	brelse (sbi->s_sbh);
+	kfree(sbi->s_imap);
+	sb->s_fs_info = NULL;
+	kfree(sbi);
+
+	return;
+}
+
+static kmem_cache_t * minix_inode_cachep;
+
+static struct inode *minix_alloc_inode(struct super_block *sb)
+{
+	struct minix_inode_info *ei;
+	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void minix_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(minix_inode_cachep, minix_i(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct minix_inode_info *ei = (struct minix_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	minix_inode_cachep = kmem_cache_create("minix_inode_cache",
+					     sizeof(struct minix_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (minix_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(minix_inode_cachep))
+		printk(KERN_INFO "minix_inode_cache: not all structures were freed\n");
+}
+
+static struct super_operations minix_sops = {
+	.alloc_inode	= minix_alloc_inode,
+	.destroy_inode	= minix_destroy_inode,
+	.read_inode	= minix_read_inode,
+	.write_inode	= minix_write_inode,
+	.delete_inode	= minix_delete_inode,
+	.put_super	= minix_put_super,
+	.statfs		= minix_statfs,
+	.remount_fs	= minix_remount,
+};
+
+static int minix_remount (struct super_block * sb, int * flags, char * data)
+{
+	struct minix_sb_info * sbi = minix_sb(sb);
+	struct minix_super_block * ms;
+
+	ms = sbi->s_ms;
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (*flags & MS_RDONLY) {
+		if (ms->s_state & MINIX_VALID_FS ||
+		    !(sbi->s_mount_state & MINIX_VALID_FS))
+			return 0;
+		/* Mounting a rw partition read-only. */
+		ms->s_state = sbi->s_mount_state;
+		mark_buffer_dirty(sbi->s_sbh);
+	} else {
+	  	/* Mount a partition which is read-only, read-write. */
+		sbi->s_mount_state = ms->s_state;
+		ms->s_state &= ~MINIX_VALID_FS;
+		mark_buffer_dirty(sbi->s_sbh);
+
+		if (!(sbi->s_mount_state & MINIX_VALID_FS))
+			printk ("MINIX-fs warning: remounting unchecked fs, "
+				"running fsck is recommended.\n");
+		else if ((sbi->s_mount_state & MINIX_ERROR_FS))
+			printk ("MINIX-fs warning: remounting fs with errors, "
+				"running fsck is recommended.\n");
+	}
+	return 0;
+}
+
+static int minix_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct buffer_head *bh;
+	struct buffer_head **map;
+	struct minix_super_block *ms;
+	int i, block;
+	struct inode *root_inode;
+	struct minix_sb_info *sbi;
+
+	sbi = kmalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	s->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct minix_sb_info));
+
+	/* N.B. These should be compile-time tests.
+	   Unfortunately that is impossible. */
+	if (32 != sizeof (struct minix_inode))
+		panic("bad V1 i-node size");
+	if (64 != sizeof(struct minix2_inode))
+		panic("bad V2 i-node size");
+
+	if (!sb_set_blocksize(s, BLOCK_SIZE))
+		goto out_bad_hblock;
+
+	if (!(bh = sb_bread(s, 1)))
+		goto out_bad_sb;
+
+	ms = (struct minix_super_block *) bh->b_data;
+	sbi->s_ms = ms;
+	sbi->s_sbh = bh;
+	sbi->s_mount_state = ms->s_state;
+	sbi->s_ninodes = ms->s_ninodes;
+	sbi->s_nzones = ms->s_nzones;
+	sbi->s_imap_blocks = ms->s_imap_blocks;
+	sbi->s_zmap_blocks = ms->s_zmap_blocks;
+	sbi->s_firstdatazone = ms->s_firstdatazone;
+	sbi->s_log_zone_size = ms->s_log_zone_size;
+	sbi->s_max_size = ms->s_max_size;
+	s->s_magic = ms->s_magic;
+	if (s->s_magic == MINIX_SUPER_MAGIC) {
+		sbi->s_version = MINIX_V1;
+		sbi->s_dirsize = 16;
+		sbi->s_namelen = 14;
+		sbi->s_link_max = MINIX_LINK_MAX;
+	} else if (s->s_magic == MINIX_SUPER_MAGIC2) {
+		sbi->s_version = MINIX_V1;
+		sbi->s_dirsize = 32;
+		sbi->s_namelen = 30;
+		sbi->s_link_max = MINIX_LINK_MAX;
+	} else if (s->s_magic == MINIX2_SUPER_MAGIC) {
+		sbi->s_version = MINIX_V2;
+		sbi->s_nzones = ms->s_zones;
+		sbi->s_dirsize = 16;
+		sbi->s_namelen = 14;
+		sbi->s_link_max = MINIX2_LINK_MAX;
+	} else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
+		sbi->s_version = MINIX_V2;
+		sbi->s_nzones = ms->s_zones;
+		sbi->s_dirsize = 32;
+		sbi->s_namelen = 30;
+		sbi->s_link_max = MINIX2_LINK_MAX;
+	} else
+		goto out_no_fs;
+
+	/*
+	 * Allocate the buffer map to keep the superblock small.
+	 */
+	i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
+	map = kmalloc(i, GFP_KERNEL);
+	if (!map)
+		goto out_no_map;
+	memset(map, 0, i);
+	sbi->s_imap = &map[0];
+	sbi->s_zmap = &map[sbi->s_imap_blocks];
+
+	block=2;
+	for (i=0 ; i < sbi->s_imap_blocks ; i++) {
+		if (!(sbi->s_imap[i]=sb_bread(s, block)))
+			goto out_no_bitmap;
+		block++;
+	}
+	for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
+		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
+			goto out_no_bitmap;
+		block++;
+	}
+
+	minix_set_bit(0,sbi->s_imap[0]->b_data);
+	minix_set_bit(0,sbi->s_zmap[0]->b_data);
+
+	/* set up enough so that it can read an inode */
+	s->s_op = &minix_sops;
+	root_inode = iget(s, MINIX_ROOT_INO);
+	if (!root_inode || is_bad_inode(root_inode))
+		goto out_no_root;
+
+	s->s_root = d_alloc_root(root_inode);
+	if (!s->s_root)
+		goto out_iput;
+
+	if (!NO_TRUNCATE)
+		s->s_root->d_op = &minix_dentry_operations;
+
+	if (!(s->s_flags & MS_RDONLY)) {
+		ms->s_state &= ~MINIX_VALID_FS;
+		mark_buffer_dirty(bh);
+	}
+	if (!(sbi->s_mount_state & MINIX_VALID_FS))
+		printk ("MINIX-fs: mounting unchecked file system, "
+			"running fsck is recommended.\n");
+ 	else if (sbi->s_mount_state & MINIX_ERROR_FS)
+		printk ("MINIX-fs: mounting file system with errors, "
+			"running fsck is recommended.\n");
+	return 0;
+
+out_iput:
+	iput(root_inode);
+	goto out_freemap;
+
+out_no_root:
+	if (!silent)
+		printk("MINIX-fs: get root inode failed\n");
+	goto out_freemap;
+
+out_no_bitmap:
+	printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
+    out_freemap:
+	for (i = 0; i < sbi->s_imap_blocks; i++)
+		brelse(sbi->s_imap[i]);
+	for (i = 0; i < sbi->s_zmap_blocks; i++)
+		brelse(sbi->s_zmap[i]);
+	kfree(sbi->s_imap);
+	goto out_release;
+
+out_no_map:
+	if (!silent)
+		printk ("MINIX-fs: can't allocate map\n");
+	goto out_release;
+
+out_no_fs:
+	if (!silent)
+		printk("VFS: Can't find a Minix or Minix V2 filesystem on device "
+		       "%s.\n", s->s_id);
+    out_release:
+	brelse(bh);
+	goto out;
+
+out_bad_hblock:
+	printk("MINIX-fs: blocksize too small for device.\n");
+	goto out;
+
+out_bad_sb:
+	printk("MINIX-fs: unable to read superblock\n");
+ out:
+	s->s_fs_info = NULL;
+	kfree(sbi);
+	return -EINVAL;
+}
+
+static int minix_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct minix_sb_info *sbi = minix_sb(sb);
+	buf->f_type = sb->s_magic;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
+	buf->f_bfree = minix_count_free_blocks(sbi);
+	buf->f_bavail = buf->f_bfree;
+	buf->f_files = sbi->s_ninodes;
+	buf->f_ffree = minix_count_free_inodes(sbi);
+	buf->f_namelen = sbi->s_namelen;
+	return 0;
+}
+
+static int minix_get_block(struct inode *inode, sector_t block,
+		    struct buffer_head *bh_result, int create)
+{
+	if (INODE_VERSION(inode) == MINIX_V1)
+		return V1_minix_get_block(inode, block, bh_result, create);
+	else
+		return V2_minix_get_block(inode, block, bh_result, create);
+}
+
+static int minix_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, minix_get_block, wbc);
+}
+static int minix_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,minix_get_block);
+}
+static int minix_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,minix_get_block);
+}
+static sector_t minix_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,minix_get_block);
+}
+static struct address_space_operations minix_aops = {
+	.readpage = minix_readpage,
+	.writepage = minix_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = minix_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = minix_bmap
+};
+
+static struct inode_operations minix_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.getattr	= minix_getattr,
+};
+
+void minix_set_inode(struct inode *inode, dev_t rdev)
+{
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &minix_file_inode_operations;
+		inode->i_fop = &minix_file_operations;
+		inode->i_mapping->a_ops = &minix_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &minix_dir_inode_operations;
+		inode->i_fop = &minix_dir_operations;
+		inode->i_mapping->a_ops = &minix_aops;
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &minix_symlink_inode_operations;
+		inode->i_mapping->a_ops = &minix_aops;
+	} else
+		init_special_inode(inode, inode->i_mode, rdev);
+}
+
+/*
+ * The minix V1 function to read an inode.
+ */
+static void V1_minix_read_inode(struct inode * inode)
+{
+	struct buffer_head * bh;
+	struct minix_inode * raw_inode;
+	struct minix_inode_info *minix_inode = minix_i(inode);
+	int i;
+
+	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
+	if (!raw_inode) {
+		make_bad_inode(inode);
+		return;
+	}
+	inode->i_mode = raw_inode->i_mode;
+	inode->i_uid = (uid_t)raw_inode->i_uid;
+	inode->i_gid = (gid_t)raw_inode->i_gid;
+	inode->i_nlink = raw_inode->i_nlinks;
+	inode->i_size = raw_inode->i_size;
+	inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = inode->i_blksize = 0;
+	for (i = 0; i < 9; i++)
+		minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
+	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
+	brelse(bh);
+}
+
+/*
+ * The minix V2 function to read an inode.
+ */
+static void V2_minix_read_inode(struct inode * inode)
+{
+	struct buffer_head * bh;
+	struct minix2_inode * raw_inode;
+	struct minix_inode_info *minix_inode = minix_i(inode);
+	int i;
+
+	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
+	if (!raw_inode) {
+		make_bad_inode(inode);
+		return;
+	}
+	inode->i_mode = raw_inode->i_mode;
+	inode->i_uid = (uid_t)raw_inode->i_uid;
+	inode->i_gid = (gid_t)raw_inode->i_gid;
+	inode->i_nlink = raw_inode->i_nlinks;
+	inode->i_size = raw_inode->i_size;
+	inode->i_mtime.tv_sec = raw_inode->i_mtime;
+	inode->i_atime.tv_sec = raw_inode->i_atime;
+	inode->i_ctime.tv_sec = raw_inode->i_ctime;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = inode->i_blksize = 0;
+	for (i = 0; i < 10; i++)
+		minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
+	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
+	brelse(bh);
+}
+
+/*
+ * The global function to read an inode.
+ */
+static void minix_read_inode(struct inode * inode)
+{
+	if (INODE_VERSION(inode) == MINIX_V1)
+		V1_minix_read_inode(inode);
+	else
+		V2_minix_read_inode(inode);
+}
+
+/*
+ * The minix V1 function to synchronize an inode.
+ */
+static struct buffer_head * V1_minix_update_inode(struct inode * inode)
+{
+	struct buffer_head * bh;
+	struct minix_inode * raw_inode;
+	struct minix_inode_info *minix_inode = minix_i(inode);
+	int i;
+
+	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
+	if (!raw_inode)
+		return NULL;
+	raw_inode->i_mode = inode->i_mode;
+	raw_inode->i_uid = fs_high2lowuid(inode->i_uid);
+	raw_inode->i_gid = fs_high2lowgid(inode->i_gid);
+	raw_inode->i_nlinks = inode->i_nlink;
+	raw_inode->i_size = inode->i_size;
+	raw_inode->i_time = inode->i_mtime.tv_sec;
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
+	else for (i = 0; i < 9; i++)
+		raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
+	mark_buffer_dirty(bh);
+	return bh;
+}
+
+/*
+ * The minix V2 function to synchronize an inode.
+ */
+static struct buffer_head * V2_minix_update_inode(struct inode * inode)
+{
+	struct buffer_head * bh;
+	struct minix2_inode * raw_inode;
+	struct minix_inode_info *minix_inode = minix_i(inode);
+	int i;
+
+	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
+	if (!raw_inode)
+		return NULL;
+	raw_inode->i_mode = inode->i_mode;
+	raw_inode->i_uid = fs_high2lowuid(inode->i_uid);
+	raw_inode->i_gid = fs_high2lowgid(inode->i_gid);
+	raw_inode->i_nlinks = inode->i_nlink;
+	raw_inode->i_size = inode->i_size;
+	raw_inode->i_mtime = inode->i_mtime.tv_sec;
+	raw_inode->i_atime = inode->i_atime.tv_sec;
+	raw_inode->i_ctime = inode->i_ctime.tv_sec;
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
+	else for (i = 0; i < 10; i++)
+		raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
+	mark_buffer_dirty(bh);
+	return bh;
+}
+
+static struct buffer_head *minix_update_inode(struct inode *inode)
+{
+	if (INODE_VERSION(inode) == MINIX_V1)
+		return V1_minix_update_inode(inode);
+	else
+		return V2_minix_update_inode(inode);
+}
+
+static int minix_write_inode(struct inode * inode, int wait)
+{
+	brelse(minix_update_inode(inode));
+	return 0;
+}
+
+int minix_sync_inode(struct inode * inode)
+{
+	int err = 0;
+	struct buffer_head *bh;
+
+	bh = minix_update_inode(inode);
+	if (bh && buffer_dirty(bh))
+	{
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh))
+		{
+			printk ("IO error syncing minix inode [%s:%08lx]\n",
+				inode->i_sb->s_id, inode->i_ino);
+			err = -1;
+		}
+	}
+	else if (!bh)
+		err = -1;
+	brelse (bh);
+	return err;
+}
+
+int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	generic_fillattr(dentry->d_inode, stat);
+	if (INODE_VERSION(dentry->d_inode) == MINIX_V1)
+		stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size);
+	else
+		stat->blocks = (BLOCK_SIZE / 512) * V2_minix_blocks(stat->size);
+	stat->blksize = BLOCK_SIZE;
+	return 0;
+}
+
+/*
+ * The function that is called for file truncation.
+ */
+void minix_truncate(struct inode * inode)
+{
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+		return;
+	if (INODE_VERSION(inode) == MINIX_V1)
+		V1_minix_truncate(inode);
+	else
+		V2_minix_truncate(inode);
+}
+
+static struct super_block *minix_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, minix_fill_super);
+}
+
+static struct file_system_type minix_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "minix",
+	.get_sb		= minix_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_minix_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&minix_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_minix_fs(void)
+{
+        unregister_filesystem(&minix_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_minix_fs)
+module_exit(exit_minix_fs)
+MODULE_LICENSE("GPL");
+
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
new file mode 100644
index 0000000..429baf8
--- /dev/null
+++ b/fs/minix/itree_common.c
@@ -0,0 +1,362 @@
+/* Generic part */
+
+typedef struct {
+	block_t	*p;
+	block_t	key;
+	struct buffer_head *bh;
+} Indirect;
+
+static DEFINE_RWLOCK(pointers_lock);
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
+{
+	p->key = *(p->p = v);
+	p->bh = bh;
+}
+
+static inline int verify_chain(Indirect *from, Indirect *to)
+{
+	while (from <= to && from->key == *from->p)
+		from++;
+	return (from > to);
+}
+
+static inline block_t *block_end(struct buffer_head *bh)
+{
+	return (block_t *)((char*)bh->b_data + BLOCK_SIZE);
+}
+
+static inline Indirect *get_branch(struct inode *inode,
+					int depth,
+					int *offsets,
+					Indirect chain[DEPTH],
+					int *err)
+{
+	struct super_block *sb = inode->i_sb;
+	Indirect *p = chain;
+	struct buffer_head *bh;
+
+	*err = 0;
+	/* i_data is not going away, no lock needed */
+	add_chain (chain, NULL, i_data(inode) + *offsets);
+	if (!p->key)
+		goto no_block;
+	while (--depth) {
+		bh = sb_bread(sb, block_to_cpu(p->key));
+		if (!bh)
+			goto failure;
+		read_lock(&pointers_lock);
+		if (!verify_chain(chain, p))
+			goto changed;
+		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
+		read_unlock(&pointers_lock);
+		if (!p->key)
+			goto no_block;
+	}
+	return NULL;
+
+changed:
+	read_unlock(&pointers_lock);
+	brelse(bh);
+	*err = -EAGAIN;
+	goto no_block;
+failure:
+	*err = -EIO;
+no_block:
+	return p;
+}
+
+static int alloc_branch(struct inode *inode,
+			     int num,
+			     int *offsets,
+			     Indirect *branch)
+{
+	int n = 0;
+	int i;
+	int parent = minix_new_block(inode);
+
+	branch[0].key = cpu_to_block(parent);
+	if (parent) for (n = 1; n < num; n++) {
+		struct buffer_head *bh;
+		/* Allocate the next block */
+		int nr = minix_new_block(inode);
+		if (!nr)
+			break;
+		branch[n].key = cpu_to_block(nr);
+		bh = sb_getblk(inode->i_sb, parent);
+		lock_buffer(bh);
+		memset(bh->b_data, 0, BLOCK_SIZE);
+		branch[n].bh = bh;
+		branch[n].p = (block_t*) bh->b_data + offsets[n];
+		*branch[n].p = branch[n].key;
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		mark_buffer_dirty_inode(bh, inode);
+		parent = nr;
+	}
+	if (n == num)
+		return 0;
+
+	/* Allocation failed, free what we already allocated */
+	for (i = 1; i < n; i++)
+		bforget(branch[i].bh);
+	for (i = 0; i < n; i++)
+		minix_free_block(inode, block_to_cpu(branch[i].key));
+	return -ENOSPC;
+}
+
+static inline int splice_branch(struct inode *inode,
+				     Indirect chain[DEPTH],
+				     Indirect *where,
+				     int num)
+{
+	int i;
+
+	write_lock(&pointers_lock);
+
+	/* Verify that place we are splicing to is still there and vacant */
+	if (!verify_chain(chain, where-1) || *where->p)
+		goto changed;
+
+	*where->p = where->key;
+
+	write_unlock(&pointers_lock);
+
+	/* We are done with atomic stuff, now do the rest of housekeeping */
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+
+	/* had we spliced it onto indirect block? */
+	if (where->bh)
+		mark_buffer_dirty_inode(where->bh, inode);
+
+	mark_inode_dirty(inode);
+	return 0;
+
+changed:
+	write_unlock(&pointers_lock);
+	for (i = 1; i < num; i++)
+		bforget(where[i].bh);
+	for (i = 0; i < num; i++)
+		minix_free_block(inode, block_to_cpu(where[i].key));
+	return -EAGAIN;
+}
+
+static inline int get_block(struct inode * inode, sector_t block,
+			struct buffer_head *bh, int create)
+{
+	int err = -EIO;
+	int offsets[DEPTH];
+	Indirect chain[DEPTH];
+	Indirect *partial;
+	int left;
+	int depth = block_to_path(inode, block, offsets);
+
+	if (depth == 0)
+		goto out;
+
+reread:
+	partial = get_branch(inode, depth, offsets, chain, &err);
+
+	/* Simplest case - block found, no allocation needed */
+	if (!partial) {
+got_it:
+		map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
+		/* Clean up and exit */
+		partial = chain+depth-1; /* the whole chain */
+		goto cleanup;
+	}
+
+	/* Next simple case - plain lookup or failed read of indirect block */
+	if (!create || err == -EIO) {
+cleanup:
+		while (partial > chain) {
+			brelse(partial->bh);
+			partial--;
+		}
+out:
+		return err;
+	}
+
+	/*
+	 * Indirect block might be removed by truncate while we were
+	 * reading it. Handling of that case (forget what we've got and
+	 * reread) is taken out of the main path.
+	 */
+	if (err == -EAGAIN)
+		goto changed;
+
+	left = (chain + depth) - partial;
+	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
+	if (err)
+		goto cleanup;
+
+	if (splice_branch(inode, chain, partial, left) < 0)
+		goto changed;
+
+	set_buffer_new(bh);
+	goto got_it;
+
+changed:
+	while (partial > chain) {
+		brelse(partial->bh);
+		partial--;
+	}
+	goto reread;
+}
+
+static inline int all_zeroes(block_t *p, block_t *q)
+{
+	while (p < q)
+		if (*p++)
+			return 0;
+	return 1;
+}
+
+static Indirect *find_shared(struct inode *inode,
+				int depth,
+				int offsets[DEPTH],
+				Indirect chain[DEPTH],
+				block_t *top)
+{
+	Indirect *partial, *p;
+	int k, err;
+
+	*top = 0;
+	for (k = depth; k > 1 && !offsets[k-1]; k--)
+		;
+	partial = get_branch(inode, k, offsets, chain, &err);
+
+	write_lock(&pointers_lock);
+	if (!partial)
+		partial = chain + k-1;
+	if (!partial->key && *partial->p) {
+		write_unlock(&pointers_lock);
+		goto no_top;
+	}
+	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
+		;
+	if (p == chain + k - 1 && p > chain) {
+		p->p--;
+	} else {
+		*top = *p->p;
+		*p->p = 0;
+	}
+	write_unlock(&pointers_lock);
+
+	while(partial > p)
+	{
+		brelse(partial->bh);
+		partial--;
+	}
+no_top:
+	return partial;
+}
+
+static inline void free_data(struct inode *inode, block_t *p, block_t *q)
+{
+	unsigned long nr;
+
+	for ( ; p < q ; p++) {
+		nr = block_to_cpu(*p);
+		if (nr) {
+			*p = 0;
+			minix_free_block(inode, nr);
+		}
+	}
+}
+
+static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
+{
+	struct buffer_head * bh;
+	unsigned long nr;
+
+	if (depth--) {
+		for ( ; p < q ; p++) {
+			nr = block_to_cpu(*p);
+			if (!nr)
+				continue;
+			*p = 0;
+			bh = sb_bread(inode->i_sb, nr);
+			if (!bh)
+				continue;
+			free_branches(inode, (block_t*)bh->b_data,
+				      block_end(bh), depth);
+			bforget(bh);
+			minix_free_block(inode, nr);
+			mark_inode_dirty(inode);
+		}
+	} else
+		free_data(inode, p, q);
+}
+
+static inline void truncate (struct inode * inode)
+{
+	block_t *idata = i_data(inode);
+	int offsets[DEPTH];
+	Indirect chain[DEPTH];
+	Indirect *partial;
+	block_t nr = 0;
+	int n;
+	int first_whole;
+	long iblock;
+
+	iblock = (inode->i_size + BLOCK_SIZE-1) >> 10;
+	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
+
+	n = block_to_path(inode, iblock, offsets);
+	if (!n)
+		return;
+
+	if (n == 1) {
+		free_data(inode, idata+offsets[0], idata + DIRECT);
+		first_whole = 0;
+		goto do_indirects;
+	}
+
+	first_whole = offsets[0] + 1 - DIRECT;
+	partial = find_shared(inode, n, offsets, chain, &nr);
+	if (nr) {
+		if (partial == chain)
+			mark_inode_dirty(inode);
+		else
+			mark_buffer_dirty_inode(partial->bh, inode);
+		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
+	}
+	/* Clear the ends of indirect blocks on the shared branch */
+	while (partial > chain) {
+		free_branches(inode, partial->p + 1, block_end(partial->bh),
+				(chain+n-1) - partial);
+		mark_buffer_dirty_inode(partial->bh, inode);
+		brelse (partial->bh);
+		partial--;
+	}
+do_indirects:
+	/* Kill the remaining (whole) subtrees */
+	while (first_whole < DEPTH-1) {
+		nr = idata[DIRECT+first_whole];
+		if (nr) {
+			idata[DIRECT+first_whole] = 0;
+			mark_inode_dirty(inode);
+			free_branches(inode, &nr, &nr+1, first_whole+1);
+		}
+		first_whole++;
+	}
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+}
+
+static inline unsigned nblocks(loff_t size)
+{
+	unsigned blocks, res, direct = DIRECT, i = DEPTH;
+	blocks = (size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
+	res = blocks;
+	while (--i && blocks > direct) {
+		blocks -= direct;
+		blocks += BLOCK_SIZE/sizeof(block_t) - 1;
+		blocks /= BLOCK_SIZE/sizeof(block_t);
+		res += blocks;
+		direct = 1;
+	}
+	return res;
+}
diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
new file mode 100644
index 0000000..ba06aef
--- /dev/null
+++ b/fs/minix/itree_v1.c
@@ -0,0 +1,61 @@
+#include <linux/buffer_head.h>
+#include "minix.h"
+
+enum {DEPTH = 3, DIRECT = 7};	/* Only double indirect */
+
+typedef u16 block_t;	/* 16 bit, host order */
+
+static inline unsigned long block_to_cpu(block_t n)
+{
+	return n;
+}
+
+static inline block_t cpu_to_block(unsigned long n)
+{
+	return n;
+}
+
+static inline block_t *i_data(struct inode *inode)
+{
+	return (block_t *)minix_i(inode)->u.i1_data;
+}
+
+static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+{
+	int n = 0;
+
+	if (block < 0) {
+		printk("minix_bmap: block<0");
+	} else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
+		printk("minix_bmap: block>big");
+	} else if (block < 7) {
+		offsets[n++] = block;
+	} else if ((block -= 7) < 512) {
+		offsets[n++] = 7;
+		offsets[n++] = block;
+	} else {
+		block -= 512;
+		offsets[n++] = 8;
+		offsets[n++] = block>>9;
+		offsets[n++] = block & 511;
+	}
+	return n;
+}
+
+#include "itree_common.c"
+
+int V1_minix_get_block(struct inode * inode, long block,
+			struct buffer_head *bh_result, int create)
+{
+	return get_block(inode, block, bh_result, create);
+}
+
+void V1_minix_truncate(struct inode * inode)
+{
+	truncate(inode);
+}
+
+unsigned V1_minix_blocks(loff_t size)
+{
+	return nblocks(size);
+}
diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
new file mode 100644
index 0000000..3adc767
--- /dev/null
+++ b/fs/minix/itree_v2.c
@@ -0,0 +1,66 @@
+#include <linux/buffer_head.h>
+#include "minix.h"
+
+enum {DIRECT = 7, DEPTH = 4};	/* Have triple indirect */
+
+typedef u32 block_t;	/* 32 bit, host order */
+
+static inline unsigned long block_to_cpu(block_t n)
+{
+	return n;
+}
+
+static inline block_t cpu_to_block(unsigned long n)
+{
+	return n;
+}
+
+static inline block_t *i_data(struct inode *inode)
+{
+	return (block_t *)minix_i(inode)->u.i2_data;
+}
+
+static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+{
+	int n = 0;
+
+	if (block < 0) {
+		printk("minix_bmap: block<0");
+	} else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
+		printk("minix_bmap: block>big");
+	} else if (block < 7) {
+		offsets[n++] = block;
+	} else if ((block -= 7) < 256) {
+		offsets[n++] = 7;
+		offsets[n++] = block;
+	} else if ((block -= 256) < 256*256) {
+		offsets[n++] = 8;
+		offsets[n++] = block>>8;
+		offsets[n++] = block & 255;
+	} else {
+		block -= 256*256;
+		offsets[n++] = 9;
+		offsets[n++] = block>>16;
+		offsets[n++] = (block>>8) & 255;
+		offsets[n++] = block & 255;
+	}
+	return n;
+}
+
+#include "itree_common.c"
+
+int V2_minix_get_block(struct inode * inode, long block,
+			struct buffer_head *bh_result, int create)
+{
+	return get_block(inode, block, bh_result, create);
+}
+
+void V2_minix_truncate(struct inode * inode)
+{
+	truncate(inode);
+}
+
+unsigned V2_minix_blocks(loff_t size)
+{
+	return nblocks(size);
+}
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
new file mode 100644
index 0000000..e42a8bb
--- /dev/null
+++ b/fs/minix/minix.h
@@ -0,0 +1,96 @@
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/minix_fs.h>
+
+/*
+ * change the define below to 0 if you want names > info->s_namelen chars to be
+ * truncated. Else they will be disallowed (ENAMETOOLONG).
+ */
+#define NO_TRUNCATE 1
+
+#define INODE_VERSION(inode)	minix_sb(inode->i_sb)->s_version
+
+#define MINIX_V1		0x0001		/* original minix fs */
+#define MINIX_V2		0x0002		/* minix V2 fs */
+
+/*
+ * minix fs inode data in memory
+ */
+struct minix_inode_info {
+	union {
+		__u16 i1_data[16];
+		__u32 i2_data[16];
+	} u;
+	struct inode vfs_inode;
+};
+
+/*
+ * minix super-block data in memory
+ */
+struct minix_sb_info {
+	unsigned long s_ninodes;
+	unsigned long s_nzones;
+	unsigned long s_imap_blocks;
+	unsigned long s_zmap_blocks;
+	unsigned long s_firstdatazone;
+	unsigned long s_log_zone_size;
+	unsigned long s_max_size;
+	int s_dirsize;
+	int s_namelen;
+	int s_link_max;
+	struct buffer_head ** s_imap;
+	struct buffer_head ** s_zmap;
+	struct buffer_head * s_sbh;
+	struct minix_super_block * s_ms;
+	unsigned short s_mount_state;
+	unsigned short s_version;
+};
+
+extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+extern struct inode * minix_new_inode(const struct inode * dir, int * error);
+extern void minix_free_inode(struct inode * inode);
+extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
+extern int minix_new_block(struct inode * inode);
+extern void minix_free_block(struct inode * inode, int block);
+extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
+
+extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+
+extern void V2_minix_truncate(struct inode *);
+extern void V1_minix_truncate(struct inode *);
+extern void V2_minix_truncate(struct inode *);
+extern void minix_truncate(struct inode *);
+extern int minix_sync_inode(struct inode *);
+extern void minix_set_inode(struct inode *, dev_t);
+extern int V1_minix_get_block(struct inode *, long, struct buffer_head *, int);
+extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
+extern unsigned V1_minix_blocks(loff_t);
+extern unsigned V2_minix_blocks(loff_t);
+
+extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**);
+extern int minix_add_link(struct dentry*, struct inode*);
+extern int minix_delete_entry(struct minix_dir_entry*, struct page*);
+extern int minix_make_empty(struct inode*, struct inode*);
+extern int minix_empty_dir(struct inode*);
+extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*);
+extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
+extern ino_t minix_inode_by_name(struct dentry*);
+
+extern int minix_sync_file(struct file *, struct dentry *, int);
+
+extern struct inode_operations minix_file_inode_operations;
+extern struct inode_operations minix_dir_inode_operations;
+extern struct file_operations minix_file_operations;
+extern struct file_operations minix_dir_operations;
+extern struct dentry_operations minix_dentry_operations;
+
+static inline struct minix_sb_info *minix_sb(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline struct minix_inode_info *minix_i(struct inode *inode)
+{
+	return list_entry(inode, struct minix_inode_info, vfs_inode);
+}
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
new file mode 100644
index 0000000..b25bca5
--- /dev/null
+++ b/fs/minix/namei.c
@@ -0,0 +1,317 @@
+/*
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include "minix.h"
+
+static inline void inc_count(struct inode *inode)
+{
+	inode->i_nlink++;
+	mark_inode_dirty(inode);
+}
+
+static inline void dec_count(struct inode *inode)
+{
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+}
+
+static int add_nondir(struct dentry *dentry, struct inode *inode)
+{
+	int err = minix_add_link(dentry, inode);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	dec_count(inode);
+	iput(inode);
+	return err;
+}
+
+static int minix_hash(struct dentry *dentry, struct qstr *qstr)
+{
+	unsigned long hash;
+	int i;
+	const unsigned char *name;
+
+	i = minix_sb(dentry->d_inode->i_sb)->s_namelen;
+	if (i >= qstr->len)
+		return 0;
+	/* Truncate the name in place, avoids having to define a compare
+	   function. */
+	qstr->len = i;
+	name = qstr->name;
+	hash = init_name_hash();
+	while (i--)
+		hash = partial_name_hash(*name++, hash);
+	qstr->hash = end_name_hash(hash);
+	return 0;
+}
+
+struct dentry_operations minix_dentry_operations = {
+	.d_hash		= minix_hash,
+};
+
+static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode * inode = NULL;
+	ino_t ino;
+
+	dentry->d_op = dir->i_sb->s_root->d_op;
+
+	if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	ino = minix_inode_by_name(dentry);
+	if (ino) {
+		inode = iget(dir->i_sb, ino);
+ 
+		if (!inode)
+			return ERR_PTR(-EACCES);
+	}
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static int minix_mknod(struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	int error;
+	struct inode *inode;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+
+	inode = minix_new_inode(dir, &error);
+
+	if (inode) {
+		inode->i_mode = mode;
+		minix_set_inode(inode, rdev);
+		mark_inode_dirty(inode);
+		error = add_nondir(dentry, inode);
+	}
+	return error;
+}
+
+static int minix_create(struct inode * dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	return minix_mknod(dir, dentry, mode, 0);
+}
+
+static int minix_symlink(struct inode * dir, struct dentry *dentry,
+	  const char * symname)
+{
+	int err = -ENAMETOOLONG;
+	int i = strlen(symname)+1;
+	struct inode * inode;
+
+	if (i > dir->i_sb->s_blocksize)
+		goto out;
+
+	inode = minix_new_inode(dir, &err);
+	if (!inode)
+		goto out;
+
+	inode->i_mode = S_IFLNK | 0777;
+	minix_set_inode(inode, 0);
+	err = page_symlink(inode, symname, i);
+	if (err)
+		goto out_fail;
+
+	err = add_nondir(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	dec_count(inode);
+	iput(inode);
+	goto out;
+}
+
+static int minix_link(struct dentry * old_dentry, struct inode * dir,
+	struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+
+	if (inode->i_nlink >= minix_sb(inode->i_sb)->s_link_max)
+		return -EMLINK;
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	inc_count(inode);
+	atomic_inc(&inode->i_count);
+	return add_nondir(dentry, inode);
+}
+
+static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+{
+	struct inode * inode;
+	int err = -EMLINK;
+
+	if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max)
+		goto out;
+
+	inc_count(dir);
+
+	inode = minix_new_inode(dir, &err);
+	if (!inode)
+		goto out_dir;
+
+	inode->i_mode = S_IFDIR | mode;
+	if (dir->i_mode & S_ISGID)
+		inode->i_mode |= S_ISGID;
+	minix_set_inode(inode, 0);
+
+	inc_count(inode);
+
+	err = minix_make_empty(inode, dir);
+	if (err)
+		goto out_fail;
+
+	err = minix_add_link(dentry, inode);
+	if (err)
+		goto out_fail;
+
+	d_instantiate(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	dec_count(inode);
+	dec_count(inode);
+	iput(inode);
+out_dir:
+	dec_count(dir);
+	goto out;
+}
+
+static int minix_unlink(struct inode * dir, struct dentry *dentry)
+{
+	int err = -ENOENT;
+	struct inode * inode = dentry->d_inode;
+	struct page * page;
+	struct minix_dir_entry * de;
+
+	de = minix_find_entry(dentry, &page);
+	if (!de)
+		goto end_unlink;
+
+	err = minix_delete_entry(de, page);
+	if (err)
+		goto end_unlink;
+
+	inode->i_ctime = dir->i_ctime;
+	dec_count(inode);
+end_unlink:
+	return err;
+}
+
+static int minix_rmdir(struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	int err = -ENOTEMPTY;
+
+	if (minix_empty_dir(inode)) {
+		err = minix_unlink(dir, dentry);
+		if (!err) {
+			dec_count(dir);
+			dec_count(inode);
+		}
+	}
+	return err;
+}
+
+static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
+			   struct inode * new_dir, struct dentry *new_dentry)
+{
+	struct minix_sb_info * info = minix_sb(old_dir->i_sb);
+	struct inode * old_inode = old_dentry->d_inode;
+	struct inode * new_inode = new_dentry->d_inode;
+	struct page * dir_page = NULL;
+	struct minix_dir_entry * dir_de = NULL;
+	struct page * old_page;
+	struct minix_dir_entry * old_de;
+	int err = -ENOENT;
+
+	old_de = minix_find_entry(old_dentry, &old_page);
+	if (!old_de)
+		goto out;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+		err = -EIO;
+		dir_de = minix_dotdot(old_inode, &dir_page);
+		if (!dir_de)
+			goto out_old;
+	}
+
+	if (new_inode) {
+		struct page * new_page;
+		struct minix_dir_entry * new_de;
+
+		err = -ENOTEMPTY;
+		if (dir_de && !minix_empty_dir(new_inode))
+			goto out_dir;
+
+		err = -ENOENT;
+		new_de = minix_find_entry(new_dentry, &new_page);
+		if (!new_de)
+			goto out_dir;
+		inc_count(old_inode);
+		minix_set_link(new_de, new_page, old_inode);
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		if (dir_de)
+			new_inode->i_nlink--;
+		dec_count(new_inode);
+	} else {
+		if (dir_de) {
+			err = -EMLINK;
+			if (new_dir->i_nlink >= info->s_link_max)
+				goto out_dir;
+		}
+		inc_count(old_inode);
+		err = minix_add_link(new_dentry, old_inode);
+		if (err) {
+			dec_count(old_inode);
+			goto out_dir;
+		}
+		if (dir_de)
+			inc_count(new_dir);
+	}
+
+	minix_delete_entry(old_de, old_page);
+	dec_count(old_inode);
+
+	if (dir_de) {
+		minix_set_link(dir_de, dir_page, new_dir);
+		dec_count(old_dir);
+	}
+	return 0;
+
+out_dir:
+	if (dir_de) {
+		kunmap(dir_page);
+		page_cache_release(dir_page);
+	}
+out_old:
+	kunmap(old_page);
+	page_cache_release(old_page);
+out:
+	return err;
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations minix_dir_inode_operations = {
+	.create		= minix_create,
+	.lookup		= minix_lookup,
+	.link		= minix_link,
+	.unlink		= minix_unlink,
+	.symlink	= minix_symlink,
+	.mkdir		= minix_mkdir,
+	.rmdir		= minix_rmdir,
+	.mknod		= minix_mknod,
+	.rename		= minix_rename,
+	.getattr	= minix_getattr,
+};
diff --git a/fs/mpage.c b/fs/mpage.c
new file mode 100644
index 0000000..e7d8d1a
--- /dev/null
+++ b/fs/mpage.c
@@ -0,0 +1,772 @@
+/*
+ * fs/mpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ *
+ * Contains functions related to preparing and submitting BIOs which contain
+ * multiple pagecache pages.
+ *
+ * 15May2002	akpm@zip.com.au
+ *		Initial version
+ * 27Jun2002	axboe@suse.de
+ *		use bio_add_page() to build bio's just the right size
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/bio.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/prefetch.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this?  If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard.  See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+{
+	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+	if (bio->bi_size)
+		return 1;
+
+	do {
+		struct page *page = bvec->bv_page;
+
+		if (--bvec >= bio->bi_io_vec)
+			prefetchw(&bvec->bv_page->flags);
+
+		if (uptodate) {
+			SetPageUptodate(page);
+		} else {
+			ClearPageUptodate(page);
+			SetPageError(page);
+		}
+		unlock_page(page);
+	} while (bvec >= bio->bi_io_vec);
+	bio_put(bio);
+	return 0;
+}
+
+static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
+{
+	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+	if (bio->bi_size)
+		return 1;
+
+	do {
+		struct page *page = bvec->bv_page;
+
+		if (--bvec >= bio->bi_io_vec)
+			prefetchw(&bvec->bv_page->flags);
+
+		if (!uptodate)
+			SetPageError(page);
+		end_page_writeback(page);
+	} while (bvec >= bio->bi_io_vec);
+	bio_put(bio);
+	return 0;
+}
+
+struct bio *mpage_bio_submit(int rw, struct bio *bio)
+{
+	bio->bi_end_io = mpage_end_io_read;
+	if (rw == WRITE)
+		bio->bi_end_io = mpage_end_io_write;
+	submit_bio(rw, bio);
+	return NULL;
+}
+
+static struct bio *
+mpage_alloc(struct block_device *bdev,
+		sector_t first_sector, int nr_vecs,
+		unsigned int __nocast gfp_flags)
+{
+	struct bio *bio;
+
+	bio = bio_alloc(gfp_flags, nr_vecs);
+
+	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
+		while (!bio && (nr_vecs /= 2))
+			bio = bio_alloc(gfp_flags, nr_vecs);
+	}
+
+	if (bio) {
+		bio->bi_bdev = bdev;
+		bio->bi_sector = first_sector;
+	}
+	return bio;
+}
+
+/*
+ * support function for mpage_readpages.  The fs supplied get_block might
+ * return an up to date buffer.  This is used to map that buffer into
+ * the page, which allows readpage to avoid triggering a duplicate call
+ * to get_block.
+ *
+ * The idea is to avoid adding buffers to pages that don't already have
+ * them.  So when the buffer is up to date and the page size == block size,
+ * this marks the page up to date instead of adding new buffers.
+ */
+static void 
+map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
+{
+	struct inode *inode = page->mapping->host;
+	struct buffer_head *page_bh, *head;
+	int block = 0;
+
+	if (!page_has_buffers(page)) {
+		/*
+		 * don't make any buffers if there is only one buffer on
+		 * the page and the page just needs to be set up to date
+		 */
+		if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
+		    buffer_uptodate(bh)) {
+			SetPageUptodate(page);    
+			return;
+		}
+		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+	}
+	head = page_buffers(page);
+	page_bh = head;
+	do {
+		if (block == page_block) {
+			page_bh->b_state = bh->b_state;
+			page_bh->b_bdev = bh->b_bdev;
+			page_bh->b_blocknr = bh->b_blocknr;
+			break;
+		}
+		page_bh = page_bh->b_this_page;
+		block++;
+	} while (page_bh != head);
+}
+
+/**
+ * mpage_readpages - populate an address space with some pages, and
+ *                       start reads against them.
+ *
+ * @mapping: the address_space
+ * @pages: The address of a list_head which contains the target pages.  These
+ *   pages have their ->index populated and are otherwise uninitialised.
+ *
+ *   The page at @pages->prev has the lowest file offset, and reads should be
+ *   issued in @pages->prev to @pages->next order.
+ *
+ * @nr_pages: The number of pages at *@pages
+ * @get_block: The filesystem's block mapper function.
+ *
+ * This function walks the pages and the blocks within each page, building and
+ * emitting large BIOs.
+ *
+ * If anything unusual happens, such as:
+ *
+ * - encountering a page which has buffers
+ * - encountering a page which has a non-hole after a hole
+ * - encountering a page with non-contiguous blocks
+ *
+ * then this code just gives up and calls the buffer_head-based read function.
+ * It does handle a page which has holes at the end - that is a common case:
+ * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ *
+ * BH_Boundary explanation:
+ *
+ * There is a problem.  The mpage read code assembles several pages, gets all
+ * their disk mappings, and then submits them all.  That's fine, but obtaining
+ * the disk mappings may require I/O.  Reads of indirect blocks, for example.
+ *
+ * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
+ * submitted in the following order:
+ * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
+ * because the indirect block has to be read to get the mappings of blocks
+ * 13,14,15,16.  Obviously, this impacts performance.
+ * 
+ * So what we do it to allow the filesystem's get_block() function to set
+ * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
+ * after this one will require I/O against a block which is probably close to
+ * this one.  So you should push what I/O you have currently accumulated.
+ *
+ * This all causes the disk requests to be issued in the correct order.
+ */
+static struct bio *
+do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
+			sector_t *last_block_in_bio, get_block_t get_block)
+{
+	struct inode *inode = page->mapping->host;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	sector_t block_in_file;
+	sector_t last_block;
+	sector_t blocks[MAX_BUF_PER_PAGE];
+	unsigned page_block;
+	unsigned first_hole = blocks_per_page;
+	struct block_device *bdev = NULL;
+	struct buffer_head bh;
+	int length;
+	int fully_mapped = 1;
+
+	if (page_has_buffers(page))
+		goto confused;
+
+	block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
+	last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
+
+	bh.b_page = page;
+	for (page_block = 0; page_block < blocks_per_page;
+				page_block++, block_in_file++) {
+		bh.b_state = 0;
+		if (block_in_file < last_block) {
+			if (get_block(inode, block_in_file, &bh, 0))
+				goto confused;
+		}
+
+		if (!buffer_mapped(&bh)) {
+			fully_mapped = 0;
+			if (first_hole == blocks_per_page)
+				first_hole = page_block;
+			continue;
+		}
+
+		/* some filesystems will copy data into the page during
+		 * the get_block call, in which case we don't want to
+		 * read it again.  map_buffer_to_page copies the data
+		 * we just collected from get_block into the page's buffers
+		 * so readpage doesn't have to repeat the get_block call
+		 */
+		if (buffer_uptodate(&bh)) {
+			map_buffer_to_page(page, &bh, page_block);
+			goto confused;
+		}
+	
+		if (first_hole != blocks_per_page)
+			goto confused;		/* hole -> non-hole */
+
+		/* Contiguous blocks? */
+		if (page_block && blocks[page_block-1] != bh.b_blocknr-1)
+			goto confused;
+		blocks[page_block] = bh.b_blocknr;
+		bdev = bh.b_bdev;
+	}
+
+	if (first_hole != blocks_per_page) {
+		char *kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + (first_hole << blkbits), 0,
+				PAGE_CACHE_SIZE - (first_hole << blkbits));
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		if (first_hole == 0) {
+			SetPageUptodate(page);
+			unlock_page(page);
+			goto out;
+		}
+	} else if (fully_mapped) {
+		SetPageMappedToDisk(page);
+	}
+
+	/*
+	 * This page will go to BIO.  Do we need to send this BIO off first?
+	 */
+	if (bio && (*last_block_in_bio != blocks[0] - 1))
+		bio = mpage_bio_submit(READ, bio);
+
+alloc_new:
+	if (bio == NULL) {
+		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
+				GFP_KERNEL);
+		if (bio == NULL)
+			goto confused;
+	}
+
+	length = first_hole << blkbits;
+	if (bio_add_page(bio, page, length, 0) < length) {
+		bio = mpage_bio_submit(READ, bio);
+		goto alloc_new;
+	}
+
+	if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
+		bio = mpage_bio_submit(READ, bio);
+	else
+		*last_block_in_bio = blocks[blocks_per_page - 1];
+out:
+	return bio;
+
+confused:
+	if (bio)
+		bio = mpage_bio_submit(READ, bio);
+	if (!PageUptodate(page))
+	        block_read_full_page(page, get_block);
+	else
+		unlock_page(page);
+	goto out;
+}
+
+int
+mpage_readpages(struct address_space *mapping, struct list_head *pages,
+				unsigned nr_pages, get_block_t get_block)
+{
+	struct bio *bio = NULL;
+	unsigned page_idx;
+	sector_t last_block_in_bio = 0;
+	struct pagevec lru_pvec;
+
+	pagevec_init(&lru_pvec, 0);
+	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+		struct page *page = list_entry(pages->prev, struct page, lru);
+
+		prefetchw(&page->flags);
+		list_del(&page->lru);
+		if (!add_to_page_cache(page, mapping,
+					page->index, GFP_KERNEL)) {
+			bio = do_mpage_readpage(bio, page,
+					nr_pages - page_idx,
+					&last_block_in_bio, get_block);
+			if (!pagevec_add(&lru_pvec, page))
+				__pagevec_lru_add(&lru_pvec);
+		} else {
+			page_cache_release(page);
+		}
+	}
+	pagevec_lru_add(&lru_pvec);
+	BUG_ON(!list_empty(pages));
+	if (bio)
+		mpage_bio_submit(READ, bio);
+	return 0;
+}
+EXPORT_SYMBOL(mpage_readpages);
+
+/*
+ * This isn't called much at all
+ */
+int mpage_readpage(struct page *page, get_block_t get_block)
+{
+	struct bio *bio = NULL;
+	sector_t last_block_in_bio = 0;
+
+	bio = do_mpage_readpage(bio, page, 1,
+			&last_block_in_bio, get_block);
+	if (bio)
+		mpage_bio_submit(READ, bio);
+	return 0;
+}
+EXPORT_SYMBOL(mpage_readpage);
+
+/*
+ * Writing is not so simple.
+ *
+ * If the page has buffers then they will be used for obtaining the disk
+ * mapping.  We only support pages which are fully mapped-and-dirty, with a
+ * special case for pages which are unmapped at the end: end-of-file.
+ *
+ * If the page has no buffers (preferred) then the page is mapped here.
+ *
+ * If all blocks are found to be contiguous then the page can go into the
+ * BIO.  Otherwise fall back to the mapping's writepage().
+ * 
+ * FIXME: This code wants an estimate of how many pages are still to be
+ * written, so it can intelligently allocate a suitably-sized BIO.  For now,
+ * just allocate full-size (16-page) BIOs.
+ */
+static struct bio *
+__mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
+	sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc,
+	writepage_t writepage_fn)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = page->mapping->host;
+	const unsigned blkbits = inode->i_blkbits;
+	unsigned long end_index;
+	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	sector_t last_block;
+	sector_t block_in_file;
+	sector_t blocks[MAX_BUF_PER_PAGE];
+	unsigned page_block;
+	unsigned first_unmapped = blocks_per_page;
+	struct block_device *bdev = NULL;
+	int boundary = 0;
+	sector_t boundary_block = 0;
+	struct block_device *boundary_bdev = NULL;
+	int length;
+	struct buffer_head map_bh;
+	loff_t i_size = i_size_read(inode);
+
+	if (page_has_buffers(page)) {
+		struct buffer_head *head = page_buffers(page);
+		struct buffer_head *bh = head;
+
+		/* If they're all mapped and dirty, do it */
+		page_block = 0;
+		do {
+			BUG_ON(buffer_locked(bh));
+			if (!buffer_mapped(bh)) {
+				/*
+				 * unmapped dirty buffers are created by
+				 * __set_page_dirty_buffers -> mmapped data
+				 */
+				if (buffer_dirty(bh))
+					goto confused;
+				if (first_unmapped == blocks_per_page)
+					first_unmapped = page_block;
+				continue;
+			}
+
+			if (first_unmapped != blocks_per_page)
+				goto confused;	/* hole -> non-hole */
+
+			if (!buffer_dirty(bh) || !buffer_uptodate(bh))
+				goto confused;
+			if (page_block) {
+				if (bh->b_blocknr != blocks[page_block-1] + 1)
+					goto confused;
+			}
+			blocks[page_block++] = bh->b_blocknr;
+			boundary = buffer_boundary(bh);
+			if (boundary) {
+				boundary_block = bh->b_blocknr;
+				boundary_bdev = bh->b_bdev;
+			}
+			bdev = bh->b_bdev;
+		} while ((bh = bh->b_this_page) != head);
+
+		if (first_unmapped)
+			goto page_is_mapped;
+
+		/*
+		 * Page has buffers, but they are all unmapped. The page was
+		 * created by pagein or read over a hole which was handled by
+		 * block_read_full_page().  If this address_space is also
+		 * using mpage_readpages then this can rarely happen.
+		 */
+		goto confused;
+	}
+
+	/*
+	 * The page has no buffers: map it to disk
+	 */
+	BUG_ON(!PageUptodate(page));
+	block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
+	last_block = (i_size - 1) >> blkbits;
+	map_bh.b_page = page;
+	for (page_block = 0; page_block < blocks_per_page; ) {
+
+		map_bh.b_state = 0;
+		if (get_block(inode, block_in_file, &map_bh, 1))
+			goto confused;
+		if (buffer_new(&map_bh))
+			unmap_underlying_metadata(map_bh.b_bdev,
+						map_bh.b_blocknr);
+		if (buffer_boundary(&map_bh)) {
+			boundary_block = map_bh.b_blocknr;
+			boundary_bdev = map_bh.b_bdev;
+		}
+		if (page_block) {
+			if (map_bh.b_blocknr != blocks[page_block-1] + 1)
+				goto confused;
+		}
+		blocks[page_block++] = map_bh.b_blocknr;
+		boundary = buffer_boundary(&map_bh);
+		bdev = map_bh.b_bdev;
+		if (block_in_file == last_block)
+			break;
+		block_in_file++;
+	}
+	BUG_ON(page_block == 0);
+
+	first_unmapped = page_block;
+
+page_is_mapped:
+	end_index = i_size >> PAGE_CACHE_SHIFT;
+	if (page->index >= end_index) {
+		/*
+		 * The page straddles i_size.  It must be zeroed out on each
+		 * and every writepage invokation because it may be mmapped.
+		 * "A file is mapped in multiples of the page size.  For a file
+		 * that is not a multiple of the page size, the remaining memory
+		 * is zeroed when mapped, and writes to that region are not
+		 * written out to the file."
+		 */
+		unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
+		char *kaddr;
+
+		if (page->index > end_index || !offset)
+			goto confused;
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+	}
+
+	/*
+	 * This page will go to BIO.  Do we need to send this BIO off first?
+	 */
+	if (bio && *last_block_in_bio != blocks[0] - 1)
+		bio = mpage_bio_submit(WRITE, bio);
+
+alloc_new:
+	if (bio == NULL) {
+		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+				bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
+		if (bio == NULL)
+			goto confused;
+	}
+
+	/*
+	 * Must try to add the page before marking the buffer clean or
+	 * the confused fail path above (OOM) will be very confused when
+	 * it finds all bh marked clean (i.e. it will not write anything)
+	 */
+	length = first_unmapped << blkbits;
+	if (bio_add_page(bio, page, length, 0) < length) {
+		bio = mpage_bio_submit(WRITE, bio);
+		goto alloc_new;
+	}
+
+	/*
+	 * OK, we have our BIO, so we can now mark the buffers clean.  Make
+	 * sure to only clean buffers which we know we'll be writing.
+	 */
+	if (page_has_buffers(page)) {
+		struct buffer_head *head = page_buffers(page);
+		struct buffer_head *bh = head;
+		unsigned buffer_counter = 0;
+
+		do {
+			if (buffer_counter++ == first_unmapped)
+				break;
+			clear_buffer_dirty(bh);
+			bh = bh->b_this_page;
+		} while (bh != head);
+
+		/*
+		 * we cannot drop the bh if the page is not uptodate
+		 * or a concurrent readpage would fail to serialize with the bh
+		 * and it would read from disk before we reach the platter.
+		 */
+		if (buffer_heads_over_limit && PageUptodate(page))
+			try_to_free_buffers(page);
+	}
+
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);
+	unlock_page(page);
+	if (boundary || (first_unmapped != blocks_per_page)) {
+		bio = mpage_bio_submit(WRITE, bio);
+		if (boundary_block) {
+			write_boundary_block(boundary_bdev,
+					boundary_block, 1 << blkbits);
+		}
+	} else {
+		*last_block_in_bio = blocks[blocks_per_page - 1];
+	}
+	goto out;
+
+confused:
+	if (bio)
+		bio = mpage_bio_submit(WRITE, bio);
+
+	if (writepage_fn) {
+		*ret = (*writepage_fn)(page, wbc);
+	} else {
+		*ret = -EAGAIN;
+		goto out;
+	}
+	/*
+	 * The caller has a ref on the inode, so *mapping is stable
+	 */
+	if (*ret) {
+		if (*ret == -ENOSPC)
+			set_bit(AS_ENOSPC, &mapping->flags);
+		else
+			set_bit(AS_EIO, &mapping->flags);
+	}
+out:
+	return bio;
+}
+
+/**
+ * mpage_writepages - walk the list of dirty pages of the given
+ * address space and writepage() all of them.
+ * 
+ * @mapping: address space structure to write
+ * @wbc: subtract the number of written pages from *@wbc->nr_to_write
+ * @get_block: the filesystem's block mapper function.
+ *             If this is NULL then use a_ops->writepage.  Otherwise, go
+ *             direct-to-BIO.
+ *
+ * This is a library function, which implements the writepages()
+ * address_space_operation.
+ *
+ * If a page is already under I/O, generic_writepages() skips it, even
+ * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
+ * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
+ * and msync() need to guarantee that all the data which was dirty at the time
+ * the call was made get new I/O started against them.  If wbc->sync_mode is
+ * WB_SYNC_ALL then we were called for data integrity and we must wait for
+ * existing IO to complete.
+ */
+int
+mpage_writepages(struct address_space *mapping,
+		struct writeback_control *wbc, get_block_t get_block)
+{
+	return __mpage_writepages(mapping, wbc, get_block,
+		mapping->a_ops->writepage);
+}
+
+int
+__mpage_writepages(struct address_space *mapping,
+		struct writeback_control *wbc, get_block_t get_block,
+		writepage_t writepage_fn)
+{
+	struct backing_dev_info *bdi = mapping->backing_dev_info;
+	struct bio *bio = NULL;
+	sector_t last_block_in_bio = 0;
+	int ret = 0;
+	int done = 0;
+	int (*writepage)(struct page *page, struct writeback_control *wbc);
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t index;
+	pgoff_t end = -1;		/* Inclusive */
+	int scanned = 0;
+	int is_range = 0;
+
+	if (wbc->nonblocking && bdi_write_congested(bdi)) {
+		wbc->encountered_congestion = 1;
+		return 0;
+	}
+
+	writepage = NULL;
+	if (get_block == NULL)
+		writepage = mapping->a_ops->writepage;
+
+	pagevec_init(&pvec, 0);
+	if (wbc->sync_mode == WB_SYNC_NONE) {
+		index = mapping->writeback_index; /* Start from prev offset */
+	} else {
+		index = 0;			  /* whole-file sweep */
+		scanned = 1;
+	}
+	if (wbc->start || wbc->end) {
+		index = wbc->start >> PAGE_CACHE_SHIFT;
+		end = wbc->end >> PAGE_CACHE_SHIFT;
+		is_range = 1;
+		scanned = 1;
+	}
+retry:
+	while (!done && (index <= end) &&
+			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+			PAGECACHE_TAG_DIRTY,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+		unsigned i;
+
+		scanned = 1;
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			/*
+			 * At this point we hold neither mapping->tree_lock nor
+			 * lock on the page itself: the page may be truncated or
+			 * invalidated (changing page->mapping to NULL), or even
+			 * swizzled back from swapper_space to tmpfs file
+			 * mapping
+			 */
+
+			lock_page(page);
+
+			if (unlikely(page->mapping != mapping)) {
+				unlock_page(page);
+				continue;
+			}
+
+			if (unlikely(is_range) && page->index > end) {
+				done = 1;
+				unlock_page(page);
+				continue;
+			}
+
+			if (wbc->sync_mode != WB_SYNC_NONE)
+				wait_on_page_writeback(page);
+
+			if (PageWriteback(page) ||
+					!clear_page_dirty_for_io(page)) {
+				unlock_page(page);
+				continue;
+			}
+
+			if (writepage) {
+				ret = (*writepage)(page, wbc);
+				if (ret) {
+					if (ret == -ENOSPC)
+						set_bit(AS_ENOSPC,
+							&mapping->flags);
+					else
+						set_bit(AS_EIO,
+							&mapping->flags);
+				}
+			} else {
+				bio = __mpage_writepage(bio, page, get_block,
+						&last_block_in_bio, &ret, wbc,
+						writepage_fn);
+			}
+			if (ret || (--(wbc->nr_to_write) <= 0))
+				done = 1;
+			if (wbc->nonblocking && bdi_write_congested(bdi)) {
+				wbc->encountered_congestion = 1;
+				done = 1;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+	if (!scanned && !done) {
+		/*
+		 * We hit the last page and there is more work to be done: wrap
+		 * back to the start of the file
+		 */
+		scanned = 1;
+		index = 0;
+		goto retry;
+	}
+	if (!is_range)
+		mapping->writeback_index = index;
+	if (bio)
+		mpage_bio_submit(WRITE, bio);
+	return ret;
+}
+EXPORT_SYMBOL(mpage_writepages);
+EXPORT_SYMBOL(__mpage_writepages);
+
+int mpage_writepage(struct page *page, get_block_t get_block,
+	struct writeback_control *wbc)
+{
+	int ret = 0;
+	struct bio *bio;
+	sector_t last_block_in_bio = 0;
+
+	bio = __mpage_writepage(NULL, page, get_block,
+			&last_block_in_bio, &ret, wbc, NULL);
+	if (bio)
+		mpage_bio_submit(WRITE, bio);
+
+	return ret;
+}
+EXPORT_SYMBOL(mpage_writepage);
diff --git a/fs/msdos/Makefile b/fs/msdos/Makefile
new file mode 100644
index 0000000..ea67646
--- /dev/null
+++ b/fs/msdos/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux msdos filesystem routines.
+#
+
+obj-$(CONFIG_MSDOS_FS) += msdos.o
+
+msdos-y := namei.o
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
new file mode 100644
index 0000000..154f511
--- /dev/null
+++ b/fs/msdos/namei.c
@@ -0,0 +1,711 @@
+/*
+ *  linux/fs/msdos/namei.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *  Hidden files 1995 by Albert Cahalan <albert@ccs.neu.edu> <adc@coe.neu.edu>
+ *  Rewritten for constant inumbers 1999 by Al Viro
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/buffer_head.h>
+#include <linux/msdos_fs.h>
+#include <linux/smp_lock.h>
+
+/* MS-DOS "device special files" */
+static const unsigned char *reserved_names[] = {
+	"CON     ", "PRN     ", "NUL     ", "AUX     ",
+	"LPT1    ", "LPT2    ", "LPT3    ", "LPT4    ",
+	"COM1    ", "COM2    ", "COM3    ", "COM4    ",
+	NULL
+};
+
+/* Characters that are undesirable in an MS-DOS file name */
+static unsigned char bad_chars[] = "*?<>|\"";
+static unsigned char bad_if_strict_pc[] = "+=,; ";
+/* GEMDOS is less restrictive */
+static unsigned char bad_if_strict_atari[] = " ";
+
+#define bad_if_strict(opts) \
+	((opts)->atari ? bad_if_strict_atari : bad_if_strict_pc)
+
+/***** Formats an MS-DOS file name. Rejects invalid names. */
+static int msdos_format_name(const unsigned char *name, int len,
+			     unsigned char *res, struct fat_mount_options *opts)
+	/*
+	 * name is the proposed name, len is its length, res is
+	 * the resulting name, opts->name_check is either (r)elaxed,
+	 * (n)ormal or (s)trict, opts->dotsOK allows dots at the
+	 * beginning of name (for hidden files)
+	 */
+{
+	unsigned char *walk;
+	const unsigned char **reserved;
+	unsigned char c;
+	int space;
+
+	if (name[0] == '.') {	/* dotfile because . and .. already done */
+		if (opts->dotsOK) {
+			/* Get rid of dot - test for it elsewhere */
+			name++;
+			len--;
+		} else if (!opts->atari)
+			return -EINVAL;
+	}
+	/*
+	 * disallow names that _really_ start with a dot for MS-DOS,
+	 * GEMDOS does not care
+	 */
+	space = !opts->atari;
+	c = 0;
+	for (walk = res; len && walk - res < 8; walk++) {
+		c = *name++;
+		len--;
+		if (opts->name_check != 'r' && strchr(bad_chars, c))
+			return -EINVAL;
+		if (opts->name_check == 's' && strchr(bad_if_strict(opts), c))
+			return -EINVAL;
+		if (c >= 'A' && c <= 'Z' && opts->name_check == 's')
+			return -EINVAL;
+		if (c < ' ' || c == ':' || c == '\\')
+			return -EINVAL;
+	/*
+	 * 0xE5 is legal as a first character, but we must substitute
+	 * 0x05 because 0xE5 marks deleted files.  Yes, DOS really
+	 * does this.
+	 * It seems that Microsoft hacked DOS to support non-US
+	 * characters after the 0xE5 character was already in use to
+	 * mark deleted files.
+	 */
+		if ((res == walk) && (c == 0xE5))
+			c = 0x05;
+		if (c == '.')
+			break;
+		space = (c == ' ');
+		*walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c;
+	}
+	if (space)
+		return -EINVAL;
+	if (opts->name_check == 's' && len && c != '.') {
+		c = *name++;
+		len--;
+		if (c != '.')
+			return -EINVAL;
+	}
+	while (c != '.' && len--)
+		c = *name++;
+	if (c == '.') {
+		while (walk - res < 8)
+			*walk++ = ' ';
+		while (len > 0 && walk - res < MSDOS_NAME) {
+			c = *name++;
+			len--;
+			if (opts->name_check != 'r' && strchr(bad_chars, c))
+				return -EINVAL;
+			if (opts->name_check == 's' &&
+			    strchr(bad_if_strict(opts), c))
+				return -EINVAL;
+			if (c < ' ' || c == ':' || c == '\\')
+				return -EINVAL;
+			if (c == '.') {
+				if (opts->name_check == 's')
+					return -EINVAL;
+				break;
+			}
+			if (c >= 'A' && c <= 'Z' && opts->name_check == 's')
+				return -EINVAL;
+			space = c == ' ';
+			if (!opts->nocase && c >= 'a' && c <= 'z')
+				*walk++ = c - 32;
+			else
+				*walk++ = c;
+		}
+		if (space)
+			return -EINVAL;
+		if (opts->name_check == 's' && len)
+			return -EINVAL;
+	}
+	while (walk - res < MSDOS_NAME)
+		*walk++ = ' ';
+	if (!opts->atari)
+		/* GEMDOS is less stupid and has no reserved names */
+		for (reserved = reserved_names; *reserved; reserved++)
+			if (!strncmp(res, *reserved, 8))
+				return -EINVAL;
+	return 0;
+}
+
+/***** Locates a directory entry.  Uses unformatted name. */
+static int msdos_find(struct inode *dir, const unsigned char *name, int len,
+		      struct fat_slot_info *sinfo)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
+	unsigned char msdos_name[MSDOS_NAME];
+	int err;
+
+	err = msdos_format_name(name, len, msdos_name, &sbi->options);
+	if (err)
+		return -ENOENT;
+
+	err = fat_scan(dir, msdos_name, sinfo);
+	if (!err && sbi->options.dotsOK) {
+		if (name[0] == '.') {
+			if (!(sinfo->de->attr & ATTR_HIDDEN))
+				err = -ENOENT;
+		} else {
+			if (sinfo->de->attr & ATTR_HIDDEN)
+				err = -ENOENT;
+		}
+		if (err)
+			brelse(sinfo->bh);
+	}
+	return err;
+}
+
+/*
+ * Compute the hash for the msdos name corresponding to the dentry.
+ * Note: if the name is invalid, we leave the hash code unchanged so
+ * that the existing dentry can be used. The msdos fs routines will
+ * return ENOENT or EINVAL as appropriate.
+ */
+static int msdos_hash(struct dentry *dentry, struct qstr *qstr)
+{
+	struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
+	unsigned char msdos_name[MSDOS_NAME];
+	int error;
+
+	error = msdos_format_name(qstr->name, qstr->len, msdos_name, options);
+	if (!error)
+		qstr->hash = full_name_hash(msdos_name, MSDOS_NAME);
+	return 0;
+}
+
+/*
+ * Compare two msdos names. If either of the names are invalid,
+ * we fall back to doing the standard name comparison.
+ */
+static int msdos_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
+	unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
+	int error;
+
+	error = msdos_format_name(a->name, a->len, a_msdos_name, options);
+	if (error)
+		goto old_compare;
+	error = msdos_format_name(b->name, b->len, b_msdos_name, options);
+	if (error)
+		goto old_compare;
+	error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME);
+out:
+	return error;
+
+old_compare:
+	error = 1;
+	if (a->len == b->len)
+		error = memcmp(a->name, b->name, a->len);
+	goto out;
+}
+
+static struct dentry_operations msdos_dentry_operations = {
+	.d_hash		= msdos_hash,
+	.d_compare	= msdos_cmp,
+};
+
+/*
+ * AV. Wrappers for FAT sb operations. Is it wise?
+ */
+
+/***** Get inode using directory and name */
+static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
+				   struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct fat_slot_info sinfo;
+	struct inode *inode = NULL;
+	int res;
+
+	dentry->d_op = &msdos_dentry_operations;
+
+	lock_kernel();
+	res = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
+	if (res == -ENOENT)
+		goto add;
+	if (res < 0)
+		goto out;
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		res = PTR_ERR(inode);
+		goto out;
+	}
+add:
+	res = 0;
+	dentry = d_splice_alias(inode, dentry);
+	if (dentry)
+		dentry->d_op = &msdos_dentry_operations;
+out:
+	unlock_kernel();
+	if (!res)
+		return dentry;
+	return ERR_PTR(res);
+}
+
+/***** Creates a directory entry (name is already formatted). */
+static int msdos_add_entry(struct inode *dir, const unsigned char *name,
+			   int is_dir, int is_hid, int cluster,
+			   struct timespec *ts, struct fat_slot_info *sinfo)
+{
+	struct msdos_dir_entry de;
+	__le16 time, date;
+	int err;
+
+	memcpy(de.name, name, MSDOS_NAME);
+	de.attr = is_dir ? ATTR_DIR : ATTR_ARCH;
+	if (is_hid)
+		de.attr |= ATTR_HIDDEN;
+	de.lcase = 0;
+	fat_date_unix2dos(ts->tv_sec, &time, &date);
+	de.cdate = de.adate = 0;
+	de.ctime = 0;
+	de.ctime_cs = 0;
+	de.time = time;
+	de.date = date;
+	de.start = cpu_to_le16(cluster);
+	de.starthi = cpu_to_le16(cluster >> 16);
+	de.size = 0;
+
+	err = fat_add_entries(dir, &de, 1, sinfo);
+	if (err)
+		return err;
+
+	dir->i_ctime = dir->i_mtime = *ts;
+	if (IS_DIRSYNC(dir))
+		(void)fat_sync_inode(dir);
+	else
+		mark_inode_dirty(dir);
+
+	return 0;
+}
+
+/***** Create a file */
+static int msdos_create(struct inode *dir, struct dentry *dentry, int mode,
+			struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode;
+	struct fat_slot_info sinfo;
+	struct timespec ts;
+	unsigned char msdos_name[MSDOS_NAME];
+	int err, is_hid;
+
+	lock_kernel();
+
+	err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
+				msdos_name, &MSDOS_SB(sb)->options);
+	if (err)
+		goto out;
+	is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.');
+	/* Have to do it due to foo vs. .foo conflicts */
+	if (!fat_scan(dir, msdos_name, &sinfo)) {
+		brelse(sinfo.bh);
+		err = -EINVAL;
+		goto out;
+	}
+
+	ts = CURRENT_TIME_SEC;
+	err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo);
+	if (err)
+		goto out;
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out;
+	}
+	inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+	/* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+	d_instantiate(dentry, inode);
+out:
+	unlock_kernel();
+	return err;
+}
+
+/***** Remove a directory */
+static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct fat_slot_info sinfo;
+	int err;
+
+	lock_kernel();
+	/*
+	 * Check whether the directory is not in use, then check
+	 * whether it is empty.
+	 */
+	err = fat_dir_empty(inode);
+	if (err)
+		goto out;
+	err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
+	if (err)
+		goto out;
+
+	err = fat_remove_entries(dir, &sinfo);	/* and releases bh */
+	if (err)
+		goto out;
+	dir->i_nlink--;
+
+	inode->i_nlink = 0;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	fat_detach(inode);
+out:
+	unlock_kernel();
+
+	return err;
+}
+
+/***** Make a directory */
+static int msdos_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct super_block *sb = dir->i_sb;
+	struct fat_slot_info sinfo;
+	struct inode *inode;
+	unsigned char msdos_name[MSDOS_NAME];
+	struct timespec ts;
+	int err, is_hid, cluster;
+
+	lock_kernel();
+
+	err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
+				msdos_name, &MSDOS_SB(sb)->options);
+	if (err)
+		goto out;
+	is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.');
+	/* foo vs .foo situation */
+	if (!fat_scan(dir, msdos_name, &sinfo)) {
+		brelse(sinfo.bh);
+		err = -EINVAL;
+		goto out;
+	}
+
+	ts = CURRENT_TIME_SEC;
+	cluster = fat_alloc_new_dir(dir, &ts);
+	if (cluster < 0) {
+		err = cluster;
+		goto out;
+	}
+	err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo);
+	if (err)
+		goto out_free;
+	dir->i_nlink++;
+
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		/* the directory was completed, just return a error */
+		goto out;
+	}
+	inode->i_nlink = 2;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+	/* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+	d_instantiate(dentry, inode);
+
+	unlock_kernel();
+	return 0;
+
+out_free:
+	fat_free_clusters(dir, cluster);
+out:
+	unlock_kernel();
+	return err;
+}
+
+/***** Unlink a file */
+static int msdos_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct fat_slot_info sinfo;
+	int err;
+
+	lock_kernel();
+	err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
+	if (err)
+		goto out;
+
+	err = fat_remove_entries(dir, &sinfo);	/* and releases bh */
+	if (err)
+		goto out;
+	inode->i_nlink = 0;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	fat_detach(inode);
+out:
+	unlock_kernel();
+
+	return err;
+}
+
+static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
+			   struct dentry *old_dentry,
+			   struct inode *new_dir, unsigned char *new_name,
+			   struct dentry *new_dentry, int is_hid)
+{
+	struct buffer_head *dotdot_bh;
+	struct msdos_dir_entry *dotdot_de;
+	loff_t dotdot_i_pos;
+	struct inode *old_inode, *new_inode;
+	struct fat_slot_info old_sinfo, sinfo;
+	struct timespec ts;
+	int err, old_attrs, is_dir, update_dotdot, corrupt = 0;
+
+	old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
+	old_inode = old_dentry->d_inode;
+	new_inode = new_dentry->d_inode;
+
+	err = fat_scan(old_dir, old_name, &old_sinfo);
+	if (err) {
+		err = -EIO;
+		goto out;
+	}
+
+	is_dir = S_ISDIR(old_inode->i_mode);
+	update_dotdot = (is_dir && old_dir != new_dir);
+	if (update_dotdot) {
+		if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
+					 &dotdot_i_pos) < 0) {
+			err = -EIO;
+			goto out;
+		}
+	}
+
+	old_attrs = MSDOS_I(old_inode)->i_attrs;
+	err = fat_scan(new_dir, new_name, &sinfo);
+	if (!err) {
+		if (!new_inode) {
+			/* "foo" -> ".foo" case. just change the ATTR_HIDDEN */
+			if (sinfo.de != old_sinfo.de) {
+				err = -EINVAL;
+				goto out;
+			}
+			if (is_hid)
+				MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN;
+			else
+				MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN;
+			if (IS_DIRSYNC(old_dir)) {
+				err = fat_sync_inode(old_inode);
+				if (err) {
+					MSDOS_I(old_inode)->i_attrs = old_attrs;
+					goto out;
+				}
+			} else
+				mark_inode_dirty(old_inode);
+
+			old_dir->i_version++;
+			old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+			if (IS_DIRSYNC(old_dir))
+				(void)fat_sync_inode(old_dir);
+			else
+				mark_inode_dirty(old_dir);
+			goto out;
+		}
+	}
+
+	ts = CURRENT_TIME_SEC;
+	if (new_inode) {
+		if (err)
+			goto out;
+		if (MSDOS_I(new_inode)->i_pos != sinfo.i_pos) {
+			/* WTF??? Cry and fail. */
+			printk(KERN_WARNING "msdos_rename: fs corrupted\n");
+			goto out;
+		}
+
+		if (is_dir) {
+			err = fat_dir_empty(new_inode);
+			if (err)
+				goto out;
+		}
+		fat_detach(new_inode);
+	} else {
+		err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0,
+				      &ts, &sinfo);
+		if (err)
+			goto out;
+	}
+	new_dir->i_version++;
+
+	fat_detach(old_inode);
+	fat_attach(old_inode, sinfo.i_pos);
+	if (is_hid)
+		MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN;
+	else
+		MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN;
+	if (IS_DIRSYNC(new_dir)) {
+		err = fat_sync_inode(old_inode);
+		if (err)
+			goto error_inode;
+	} else
+		mark_inode_dirty(old_inode);
+
+	if (update_dotdot) {
+		int start = MSDOS_I(new_dir)->i_logstart;
+		dotdot_de->start = cpu_to_le16(start);
+		dotdot_de->starthi = cpu_to_le16(start >> 16);
+		mark_buffer_dirty(dotdot_bh);
+		if (IS_DIRSYNC(new_dir)) {
+			err = sync_dirty_buffer(dotdot_bh);
+			if (err)
+				goto error_dotdot;
+		}
+		old_dir->i_nlink--;
+		if (!new_inode)
+			new_dir->i_nlink++;
+	}
+
+	err = fat_remove_entries(old_dir, &old_sinfo);	/* and releases bh */
+	old_sinfo.bh = NULL;
+	if (err)
+		goto error_dotdot;
+	old_dir->i_version++;
+	old_dir->i_ctime = old_dir->i_mtime = ts;
+	if (IS_DIRSYNC(old_dir))
+		(void)fat_sync_inode(old_dir);
+	else
+		mark_inode_dirty(old_dir);
+
+	if (new_inode) {
+		if (is_dir)
+			new_inode->i_nlink -= 2;
+		else
+			new_inode->i_nlink--;
+		new_inode->i_ctime = ts;
+	}
+out:
+	brelse(sinfo.bh);
+	brelse(dotdot_bh);
+	brelse(old_sinfo.bh);
+	return err;
+
+error_dotdot:
+	/* data cluster is shared, serious corruption */
+	corrupt = 1;
+
+	if (update_dotdot) {
+		int start = MSDOS_I(old_dir)->i_logstart;
+		dotdot_de->start = cpu_to_le16(start);
+		dotdot_de->starthi = cpu_to_le16(start >> 16);
+		mark_buffer_dirty(dotdot_bh);
+		corrupt |= sync_dirty_buffer(dotdot_bh);
+	}
+error_inode:
+	fat_detach(old_inode);
+	fat_attach(old_inode, old_sinfo.i_pos);
+	MSDOS_I(old_inode)->i_attrs = old_attrs;
+	if (new_inode) {
+		fat_attach(new_inode, sinfo.i_pos);
+		if (corrupt)
+			corrupt |= fat_sync_inode(new_inode);
+	} else {
+		/*
+		 * If new entry was not sharing the data cluster, it
+		 * shouldn't be serious corruption.
+		 */
+		int err2 = fat_remove_entries(new_dir, &sinfo);
+		if (corrupt)
+			corrupt |= err2;
+		sinfo.bh = NULL;
+	}
+	if (corrupt < 0) {
+		fat_fs_panic(new_dir->i_sb,
+			     "%s: Filesystem corrupted (i_pos %lld)",
+			     __FUNCTION__, sinfo.i_pos);
+	}
+	goto out;
+}
+
+/***** Rename, a wrapper for rename_same_dir & rename_diff_dir */
+static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry)
+{
+	unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME];
+	int err, is_hid;
+
+	lock_kernel();
+
+	err = msdos_format_name(old_dentry->d_name.name,
+				old_dentry->d_name.len, old_msdos_name,
+				&MSDOS_SB(old_dir->i_sb)->options);
+	if (err)
+		goto out;
+	err = msdos_format_name(new_dentry->d_name.name,
+				new_dentry->d_name.len, new_msdos_name,
+				&MSDOS_SB(new_dir->i_sb)->options);
+	if (err)
+		goto out;
+
+	is_hid =
+	     (new_dentry->d_name.name[0] == '.') && (new_msdos_name[0] != '.');
+
+	err = do_msdos_rename(old_dir, old_msdos_name, old_dentry,
+			      new_dir, new_msdos_name, new_dentry, is_hid);
+out:
+	unlock_kernel();
+	return err;
+}
+
+static struct inode_operations msdos_dir_inode_operations = {
+	.create		= msdos_create,
+	.lookup		= msdos_lookup,
+	.unlink		= msdos_unlink,
+	.mkdir		= msdos_mkdir,
+	.rmdir		= msdos_rmdir,
+	.rename		= msdos_rename,
+	.setattr	= fat_notify_change,
+};
+
+static int msdos_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int res;
+
+	res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0);
+	if (res)
+		return res;
+
+	sb->s_flags |= MS_NOATIME;
+	sb->s_root->d_op = &msdos_dentry_operations;
+	return 0;
+}
+
+static struct super_block *msdos_get_sb(struct file_system_type *fs_type,
+					int flags, const char *dev_name,
+					void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
+}
+
+static struct file_system_type msdos_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "msdos",
+	.get_sb		= msdos_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_msdos_fs(void)
+{
+	return register_filesystem(&msdos_fs_type);
+}
+
+static void __exit exit_msdos_fs(void)
+{
+	unregister_filesystem(&msdos_fs_type);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Werner Almesberger");
+MODULE_DESCRIPTION("MS-DOS filesystem support");
+
+module_init(init_msdos_fs)
+module_exit(exit_msdos_fs)
diff --git a/fs/namei.c b/fs/namei.c
new file mode 100644
index 0000000..9e4aef2
--- /dev/null
+++ b/fs/namei.c
@@ -0,0 +1,2454 @@
+/*
+ *  linux/fs/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * Some corrections by tytso.
+ */
+
+/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
+ * lookup logic.
+ */
+/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/quotaops.h>
+#include <linux/pagemap.h>
+#include <linux/dnotify.h>
+#include <linux/smp_lock.h>
+#include <linux/personality.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/audit.h>
+#include <asm/namei.h>
+#include <asm/uaccess.h>
+
+#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
+
+/* [Feb-1997 T. Schoebel-Theuer]
+ * Fundamental changes in the pathname lookup mechanisms (namei)
+ * were necessary because of omirr.  The reason is that omirr needs
+ * to know the _real_ pathname, not the user-supplied one, in case
+ * of symlinks (and also when transname replacements occur).
+ *
+ * The new code replaces the old recursive symlink resolution with
+ * an iterative one (in case of non-nested symlink chains).  It does
+ * this with calls to <fs>_follow_link().
+ * As a side effect, dir_namei(), _namei() and follow_link() are now 
+ * replaced with a single function lookup_dentry() that can handle all 
+ * the special cases of the former code.
+ *
+ * With the new dcache, the pathname is stored at each inode, at least as
+ * long as the refcount of the inode is positive.  As a side effect, the
+ * size of the dcache depends on the inode cache and thus is dynamic.
+ *
+ * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
+ * resolution to correspond with current state of the code.
+ *
+ * Note that the symlink resolution is not *completely* iterative.
+ * There is still a significant amount of tail- and mid- recursion in
+ * the algorithm.  Also, note that <fs>_readlink() is not used in
+ * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
+ * may return different results than <fs>_follow_link().  Many virtual
+ * filesystems (including /proc) exhibit this behavior.
+ */
+
+/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
+ * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
+ * and the name already exists in form of a symlink, try to create the new
+ * name indicated by the symlink. The old code always complained that the
+ * name already exists, due to not following the symlink even if its target
+ * is nonexistent.  The new semantics affects also mknod() and link() when
+ * the name is a symlink pointing to a non-existant name.
+ *
+ * I don't know which semantics is the right one, since I have no access
+ * to standards. But I found by trial that HP-UX 9.0 has the full "new"
+ * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
+ * "old" one. Personally, I think the new semantics is much more logical.
+ * Note that "ln old new" where "new" is a symlink pointing to a non-existing
+ * file does succeed in both HP-UX and SunOs, but not in Solaris
+ * and in the old Linux semantics.
+ */
+
+/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
+ * semantics.  See the comments in "open_namei" and "do_link" below.
+ *
+ * [10-Sep-98 Alan Modra] Another symlink change.
+ */
+
+/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
+ *	inside the path - always follow.
+ *	in the last component in creation/removal/renaming - never follow.
+ *	if LOOKUP_FOLLOW passed - follow.
+ *	if the pathname has trailing slashes - follow.
+ *	otherwise - don't follow.
+ * (applied in that order).
+ *
+ * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
+ * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
+ * During the 2.4 we need to fix the userland stuff depending on it -
+ * hopefully we will be able to get rid of that wart in 2.5. So far only
+ * XEmacs seems to be relying on it...
+ */
+/*
+ * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
+ * implemented.  Let's see if raised priority of ->s_vfs_rename_sem gives
+ * any extra contention...
+ */
+
+/* In order to reduce some races, while at the same time doing additional
+ * checking and hopefully speeding things up, we copy filenames to the
+ * kernel data space before using them..
+ *
+ * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
+ * PATH_MAX includes the nul terminator --RR.
+ */
+static inline int do_getname(const char __user *filename, char *page)
+{
+	int retval;
+	unsigned long len = PATH_MAX;
+
+	if (!segment_eq(get_fs(), KERNEL_DS)) {
+		if ((unsigned long) filename >= TASK_SIZE)
+			return -EFAULT;
+		if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
+			len = TASK_SIZE - (unsigned long) filename;
+	}
+
+	retval = strncpy_from_user(page, filename, len);
+	if (retval > 0) {
+		if (retval < len)
+			return 0;
+		return -ENAMETOOLONG;
+	} else if (!retval)
+		retval = -ENOENT;
+	return retval;
+}
+
+char * getname(const char __user * filename)
+{
+	char *tmp, *result;
+
+	result = ERR_PTR(-ENOMEM);
+	tmp = __getname();
+	if (tmp)  {
+		int retval = do_getname(filename, tmp);
+
+		result = tmp;
+		if (retval < 0) {
+			__putname(tmp);
+			result = ERR_PTR(retval);
+		}
+	}
+	audit_getname(result);
+	return result;
+}
+
+#ifdef CONFIG_AUDITSYSCALL
+void putname(const char *name)
+{
+	if (unlikely(current->audit_context))
+		audit_putname(name);
+	else
+		__putname(name);
+}
+EXPORT_SYMBOL(putname);
+#endif
+
+
+/**
+ * generic_permission  -  check for access rights on a Posix-like filesystem
+ * @inode:	inode to check access rights for
+ * @mask:	right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ * @check_acl:	optional callback to check for Posix ACLs
+ *
+ * Used to check for read/write/execute permissions on a file.
+ * We use "fsuid" for this, letting us set arbitrary permissions
+ * for filesystem access without changing the "normal" uids which
+ * are used for other things..
+ */
+int generic_permission(struct inode *inode, int mask,
+		int (*check_acl)(struct inode *inode, int mask))
+{
+	umode_t			mode = inode->i_mode;
+
+	if (current->fsuid == inode->i_uid)
+		mode >>= 6;
+	else {
+		if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
+			int error = check_acl(inode, mask);
+			if (error == -EACCES)
+				goto check_capabilities;
+			else if (error != -EAGAIN)
+				return error;
+		}
+
+		if (in_group_p(inode->i_gid))
+			mode >>= 3;
+	}
+
+	/*
+	 * If the DACs are ok we don't need any capability check.
+	 */
+	if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
+		return 0;
+
+ check_capabilities:
+	/*
+	 * Read/write DACs are always overridable.
+	 * Executable DACs are overridable if at least one exec bit is set.
+	 */
+	if (!(mask & MAY_EXEC) ||
+	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
+		if (capable(CAP_DAC_OVERRIDE))
+			return 0;
+
+	/*
+	 * Searching includes executable on directories, else just read.
+	 */
+	if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+		if (capable(CAP_DAC_READ_SEARCH))
+			return 0;
+
+	return -EACCES;
+}
+
+int permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	int retval, submask;
+
+	if (mask & MAY_WRITE) {
+		umode_t mode = inode->i_mode;
+
+		/*
+		 * Nobody gets write access to a read-only fs.
+		 */
+		if (IS_RDONLY(inode) &&
+		    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
+			return -EROFS;
+
+		/*
+		 * Nobody gets write access to an immutable file.
+		 */
+		if (IS_IMMUTABLE(inode))
+			return -EACCES;
+	}
+
+
+	/* Ordinary permission routines do not understand MAY_APPEND. */
+	submask = mask & ~MAY_APPEND;
+	if (inode->i_op && inode->i_op->permission)
+		retval = inode->i_op->permission(inode, submask, nd);
+	else
+		retval = generic_permission(inode, submask, NULL);
+	if (retval)
+		return retval;
+
+	return security_inode_permission(inode, mask, nd);
+}
+
+/*
+ * get_write_access() gets write permission for a file.
+ * put_write_access() releases this write permission.
+ * This is used for regular files.
+ * We cannot support write (and maybe mmap read-write shared) accesses and
+ * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
+ * can have the following values:
+ * 0: no writers, no VM_DENYWRITE mappings
+ * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
+ * > 0: (i_writecount) users are writing to the file.
+ *
+ * Normally we operate on that counter with atomic_{inc,dec} and it's safe
+ * except for the cases where we don't hold i_writecount yet. Then we need to
+ * use {get,deny}_write_access() - these functions check the sign and refuse
+ * to do the change if sign is wrong. Exclusion between them is provided by
+ * the inode->i_lock spinlock.
+ */
+
+int get_write_access(struct inode * inode)
+{
+	spin_lock(&inode->i_lock);
+	if (atomic_read(&inode->i_writecount) < 0) {
+		spin_unlock(&inode->i_lock);
+		return -ETXTBSY;
+	}
+	atomic_inc(&inode->i_writecount);
+	spin_unlock(&inode->i_lock);
+
+	return 0;
+}
+
+int deny_write_access(struct file * file)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+
+	spin_lock(&inode->i_lock);
+	if (atomic_read(&inode->i_writecount) > 0) {
+		spin_unlock(&inode->i_lock);
+		return -ETXTBSY;
+	}
+	atomic_dec(&inode->i_writecount);
+	spin_unlock(&inode->i_lock);
+
+	return 0;
+}
+
+void path_release(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+/*
+ * umount() mustn't call path_release()/mntput() as that would clear
+ * mnt_expiry_mark
+ */
+void path_release_on_umount(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	_mntput(nd->mnt);
+}
+
+/*
+ * Internal lookup() using the new generic dcache.
+ * SMP-safe
+ */
+static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd)
+{
+	struct dentry * dentry = __d_lookup(parent, name);
+
+	/* lockess __d_lookup may fail due to concurrent d_move() 
+	 * in some unrelated directory, so try with d_lookup
+	 */
+	if (!dentry)
+		dentry = d_lookup(parent, name);
+
+	if (dentry && dentry->d_op && dentry->d_op->d_revalidate) {
+		if (!dentry->d_op->d_revalidate(dentry, nd) && !d_invalidate(dentry)) {
+			dput(dentry);
+			dentry = NULL;
+		}
+	}
+	return dentry;
+}
+
+/*
+ * Short-cut version of permission(), for calling by
+ * path_walk(), when dcache lock is held.  Combines parts
+ * of permission() and generic_permission(), and tests ONLY for
+ * MAY_EXEC permission.
+ *
+ * If appropriate, check DAC only.  If not appropriate, or
+ * short-cut DAC fails, then call permission() to do more
+ * complete permission check.
+ */
+static inline int exec_permission_lite(struct inode *inode,
+				       struct nameidata *nd)
+{
+	umode_t	mode = inode->i_mode;
+
+	if (inode->i_op && inode->i_op->permission)
+		return -EAGAIN;
+
+	if (current->fsuid == inode->i_uid)
+		mode >>= 6;
+	else if (in_group_p(inode->i_gid))
+		mode >>= 3;
+
+	if (mode & MAY_EXEC)
+		goto ok;
+
+	if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE))
+		goto ok;
+
+	if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_OVERRIDE))
+		goto ok;
+
+	if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH))
+		goto ok;
+
+	return -EACCES;
+ok:
+	return security_inode_permission(inode, MAY_EXEC, nd);
+}
+
+/*
+ * This is called when everything else fails, and we actually have
+ * to go to the low-level filesystem to find out what we should do..
+ *
+ * We get the directory semaphore, and after getting that we also
+ * make sure that nobody added the entry to the dcache in the meantime..
+ * SMP-safe
+ */
+static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd)
+{
+	struct dentry * result;
+	struct inode *dir = parent->d_inode;
+
+	down(&dir->i_sem);
+	/*
+	 * First re-do the cached lookup just in case it was created
+	 * while we waited for the directory semaphore..
+	 *
+	 * FIXME! This could use version numbering or similar to
+	 * avoid unnecessary cache lookups.
+	 *
+	 * The "dcache_lock" is purely to protect the RCU list walker
+	 * from concurrent renames at this point (we mustn't get false
+	 * negatives from the RCU list walk here, unlike the optimistic
+	 * fast walk).
+	 *
+	 * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
+	 */
+	result = d_lookup(parent, name);
+	if (!result) {
+		struct dentry * dentry = d_alloc(parent, name);
+		result = ERR_PTR(-ENOMEM);
+		if (dentry) {
+			result = dir->i_op->lookup(dir, dentry, nd);
+			if (result)
+				dput(dentry);
+			else
+				result = dentry;
+		}
+		up(&dir->i_sem);
+		return result;
+	}
+
+	/*
+	 * Uhhuh! Nasty case: the cache was re-populated while
+	 * we waited on the semaphore. Need to revalidate.
+	 */
+	up(&dir->i_sem);
+	if (result->d_op && result->d_op->d_revalidate) {
+		if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
+			dput(result);
+			result = ERR_PTR(-ENOENT);
+		}
+	}
+	return result;
+}
+
+static int __emul_lookup_dentry(const char *, struct nameidata *);
+
+/* SMP-safe */
+static inline int
+walk_init_root(const char *name, struct nameidata *nd)
+{
+	read_lock(&current->fs->lock);
+	if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
+		nd->mnt = mntget(current->fs->altrootmnt);
+		nd->dentry = dget(current->fs->altroot);
+		read_unlock(&current->fs->lock);
+		if (__emul_lookup_dentry(name,nd))
+			return 0;
+		read_lock(&current->fs->lock);
+	}
+	nd->mnt = mntget(current->fs->rootmnt);
+	nd->dentry = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+	return 1;
+}
+
+static inline int __vfs_follow_link(struct nameidata *nd, const char *link)
+{
+	int res = 0;
+	char *name;
+	if (IS_ERR(link))
+		goto fail;
+
+	if (*link == '/') {
+		path_release(nd);
+		if (!walk_init_root(link, nd))
+			/* weird __emul_prefix() stuff did it */
+			goto out;
+	}
+	res = link_path_walk(link, nd);
+out:
+	if (nd->depth || res || nd->last_type!=LAST_NORM)
+		return res;
+	/*
+	 * If it is an iterative symlinks resolution in open_namei() we
+	 * have to copy the last component. And all that crap because of
+	 * bloody create() on broken symlinks. Furrfu...
+	 */
+	name = __getname();
+	if (unlikely(!name)) {
+		path_release(nd);
+		return -ENOMEM;
+	}
+	strcpy(name, nd->last.name);
+	nd->last.name = name;
+	return 0;
+fail:
+	path_release(nd);
+	return PTR_ERR(link);
+}
+
+static inline int __do_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	int error;
+
+	touch_atime(nd->mnt, dentry);
+	nd_set_link(nd, NULL);
+	error = dentry->d_inode->i_op->follow_link(dentry, nd);
+	if (!error) {
+		char *s = nd_get_link(nd);
+		if (s)
+			error = __vfs_follow_link(nd, s);
+		if (dentry->d_inode->i_op->put_link)
+			dentry->d_inode->i_op->put_link(dentry, nd);
+	}
+
+	return error;
+}
+
+/*
+ * This limits recursive symlink follows to 8, while
+ * limiting consecutive symlinks to 40.
+ *
+ * Without that kind of total limit, nasty chains of consecutive
+ * symlinks can cause almost arbitrarily long lookups. 
+ */
+static inline int do_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	int err = -ELOOP;
+	if (current->link_count >= MAX_NESTED_LINKS)
+		goto loop;
+	if (current->total_link_count >= 40)
+		goto loop;
+	BUG_ON(nd->depth >= MAX_NESTED_LINKS);
+	cond_resched();
+	err = security_inode_follow_link(dentry, nd);
+	if (err)
+		goto loop;
+	current->link_count++;
+	current->total_link_count++;
+	nd->depth++;
+	err = __do_follow_link(dentry, nd);
+	current->link_count--;
+	nd->depth--;
+	return err;
+loop:
+	path_release(nd);
+	return err;
+}
+
+int follow_up(struct vfsmount **mnt, struct dentry **dentry)
+{
+	struct vfsmount *parent;
+	struct dentry *mountpoint;
+	spin_lock(&vfsmount_lock);
+	parent=(*mnt)->mnt_parent;
+	if (parent == *mnt) {
+		spin_unlock(&vfsmount_lock);
+		return 0;
+	}
+	mntget(parent);
+	mountpoint=dget((*mnt)->mnt_mountpoint);
+	spin_unlock(&vfsmount_lock);
+	dput(*dentry);
+	*dentry = mountpoint;
+	mntput(*mnt);
+	*mnt = parent;
+	return 1;
+}
+
+/* no need for dcache_lock, as serialization is taken care in
+ * namespace.c
+ */
+static int follow_mount(struct vfsmount **mnt, struct dentry **dentry)
+{
+	int res = 0;
+	while (d_mountpoint(*dentry)) {
+		struct vfsmount *mounted = lookup_mnt(*mnt, *dentry);
+		if (!mounted)
+			break;
+		mntput(*mnt);
+		*mnt = mounted;
+		dput(*dentry);
+		*dentry = dget(mounted->mnt_root);
+		res = 1;
+	}
+	return res;
+}
+
+/* no need for dcache_lock, as serialization is taken care in
+ * namespace.c
+ */
+static inline int __follow_down(struct vfsmount **mnt, struct dentry **dentry)
+{
+	struct vfsmount *mounted;
+
+	mounted = lookup_mnt(*mnt, *dentry);
+	if (mounted) {
+		mntput(*mnt);
+		*mnt = mounted;
+		dput(*dentry);
+		*dentry = dget(mounted->mnt_root);
+		return 1;
+	}
+	return 0;
+}
+
+int follow_down(struct vfsmount **mnt, struct dentry **dentry)
+{
+	return __follow_down(mnt,dentry);
+}
+ 
+static inline void follow_dotdot(struct vfsmount **mnt, struct dentry **dentry)
+{
+	while(1) {
+		struct vfsmount *parent;
+		struct dentry *old = *dentry;
+
+                read_lock(&current->fs->lock);
+		if (*dentry == current->fs->root &&
+		    *mnt == current->fs->rootmnt) {
+                        read_unlock(&current->fs->lock);
+			break;
+		}
+                read_unlock(&current->fs->lock);
+		spin_lock(&dcache_lock);
+		if (*dentry != (*mnt)->mnt_root) {
+			*dentry = dget((*dentry)->d_parent);
+			spin_unlock(&dcache_lock);
+			dput(old);
+			break;
+		}
+		spin_unlock(&dcache_lock);
+		spin_lock(&vfsmount_lock);
+		parent = (*mnt)->mnt_parent;
+		if (parent == *mnt) {
+			spin_unlock(&vfsmount_lock);
+			break;
+		}
+		mntget(parent);
+		*dentry = dget((*mnt)->mnt_mountpoint);
+		spin_unlock(&vfsmount_lock);
+		dput(old);
+		mntput(*mnt);
+		*mnt = parent;
+	}
+	follow_mount(mnt, dentry);
+}
+
+struct path {
+	struct vfsmount *mnt;
+	struct dentry *dentry;
+};
+
+/*
+ *  It's more convoluted than I'd like it to be, but... it's still fairly
+ *  small and for now I'd prefer to have fast path as straight as possible.
+ *  It _is_ time-critical.
+ */
+static int do_lookup(struct nameidata *nd, struct qstr *name,
+		     struct path *path)
+{
+	struct vfsmount *mnt = nd->mnt;
+	struct dentry *dentry = __d_lookup(nd->dentry, name);
+
+	if (!dentry)
+		goto need_lookup;
+	if (dentry->d_op && dentry->d_op->d_revalidate)
+		goto need_revalidate;
+done:
+	path->mnt = mnt;
+	path->dentry = dentry;
+	return 0;
+
+need_lookup:
+	dentry = real_lookup(nd->dentry, name, nd);
+	if (IS_ERR(dentry))
+		goto fail;
+	goto done;
+
+need_revalidate:
+	if (dentry->d_op->d_revalidate(dentry, nd))
+		goto done;
+	if (d_invalidate(dentry))
+		goto done;
+	dput(dentry);
+	goto need_lookup;
+
+fail:
+	return PTR_ERR(dentry);
+}
+
+/*
+ * Name resolution.
+ *
+ * This is the basic name resolution function, turning a pathname
+ * into the final dentry.
+ *
+ * We expect 'base' to be positive and a directory.
+ */
+static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
+{
+	struct path next;
+	struct inode *inode;
+	int err;
+	unsigned int lookup_flags = nd->flags;
+	
+	while (*name=='/')
+		name++;
+	if (!*name)
+		goto return_reval;
+
+	inode = nd->dentry->d_inode;
+	if (nd->depth)
+		lookup_flags = LOOKUP_FOLLOW;
+
+	/* At this point we know we have a real path component. */
+	for(;;) {
+		unsigned long hash;
+		struct qstr this;
+		unsigned int c;
+
+		err = exec_permission_lite(inode, nd);
+		if (err == -EAGAIN) { 
+			err = permission(inode, MAY_EXEC, nd);
+		}
+ 		if (err)
+			break;
+
+		this.name = name;
+		c = *(const unsigned char *)name;
+
+		hash = init_name_hash();
+		do {
+			name++;
+			hash = partial_name_hash(c, hash);
+			c = *(const unsigned char *)name;
+		} while (c && (c != '/'));
+		this.len = name - (const char *) this.name;
+		this.hash = end_name_hash(hash);
+
+		/* remove trailing slashes? */
+		if (!c)
+			goto last_component;
+		while (*++name == '/');
+		if (!*name)
+			goto last_with_slashes;
+
+		/*
+		 * "." and ".." are special - ".." especially so because it has
+		 * to be able to know about the current root directory and
+		 * parent relationships.
+		 */
+		if (this.name[0] == '.') switch (this.len) {
+			default:
+				break;
+			case 2:	
+				if (this.name[1] != '.')
+					break;
+				follow_dotdot(&nd->mnt, &nd->dentry);
+				inode = nd->dentry->d_inode;
+				/* fallthrough */
+			case 1:
+				continue;
+		}
+		/*
+		 * See if the low-level filesystem might want
+		 * to use its own hash..
+		 */
+		if (nd->dentry->d_op && nd->dentry->d_op->d_hash) {
+			err = nd->dentry->d_op->d_hash(nd->dentry, &this);
+			if (err < 0)
+				break;
+		}
+		nd->flags |= LOOKUP_CONTINUE;
+		/* This does the actual lookups.. */
+		err = do_lookup(nd, &this, &next);
+		if (err)
+			break;
+		/* Check mountpoints.. */
+		follow_mount(&next.mnt, &next.dentry);
+
+		err = -ENOENT;
+		inode = next.dentry->d_inode;
+		if (!inode)
+			goto out_dput;
+		err = -ENOTDIR; 
+		if (!inode->i_op)
+			goto out_dput;
+
+		if (inode->i_op->follow_link) {
+			mntget(next.mnt);
+			err = do_follow_link(next.dentry, nd);
+			dput(next.dentry);
+			mntput(next.mnt);
+			if (err)
+				goto return_err;
+			err = -ENOENT;
+			inode = nd->dentry->d_inode;
+			if (!inode)
+				break;
+			err = -ENOTDIR; 
+			if (!inode->i_op)
+				break;
+		} else {
+			dput(nd->dentry);
+			nd->mnt = next.mnt;
+			nd->dentry = next.dentry;
+		}
+		err = -ENOTDIR; 
+		if (!inode->i_op->lookup)
+			break;
+		continue;
+		/* here ends the main loop */
+
+last_with_slashes:
+		lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+last_component:
+		nd->flags &= ~LOOKUP_CONTINUE;
+		if (lookup_flags & LOOKUP_PARENT)
+			goto lookup_parent;
+		if (this.name[0] == '.') switch (this.len) {
+			default:
+				break;
+			case 2:	
+				if (this.name[1] != '.')
+					break;
+				follow_dotdot(&nd->mnt, &nd->dentry);
+				inode = nd->dentry->d_inode;
+				/* fallthrough */
+			case 1:
+				goto return_reval;
+		}
+		if (nd->dentry->d_op && nd->dentry->d_op->d_hash) {
+			err = nd->dentry->d_op->d_hash(nd->dentry, &this);
+			if (err < 0)
+				break;
+		}
+		err = do_lookup(nd, &this, &next);
+		if (err)
+			break;
+		follow_mount(&next.mnt, &next.dentry);
+		inode = next.dentry->d_inode;
+		if ((lookup_flags & LOOKUP_FOLLOW)
+		    && inode && inode->i_op && inode->i_op->follow_link) {
+			mntget(next.mnt);
+			err = do_follow_link(next.dentry, nd);
+			dput(next.dentry);
+			mntput(next.mnt);
+			if (err)
+				goto return_err;
+			inode = nd->dentry->d_inode;
+		} else {
+			dput(nd->dentry);
+			nd->mnt = next.mnt;
+			nd->dentry = next.dentry;
+		}
+		err = -ENOENT;
+		if (!inode)
+			break;
+		if (lookup_flags & LOOKUP_DIRECTORY) {
+			err = -ENOTDIR; 
+			if (!inode->i_op || !inode->i_op->lookup)
+				break;
+		}
+		goto return_base;
+lookup_parent:
+		nd->last = this;
+		nd->last_type = LAST_NORM;
+		if (this.name[0] != '.')
+			goto return_base;
+		if (this.len == 1)
+			nd->last_type = LAST_DOT;
+		else if (this.len == 2 && this.name[1] == '.')
+			nd->last_type = LAST_DOTDOT;
+		else
+			goto return_base;
+return_reval:
+		/*
+		 * We bypassed the ordinary revalidation routines.
+		 * We may need to check the cached dentry for staleness.
+		 */
+		if (nd->dentry && nd->dentry->d_sb &&
+		    (nd->dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
+			err = -ESTALE;
+			/* Note: we do not d_invalidate() */
+			if (!nd->dentry->d_op->d_revalidate(nd->dentry, nd))
+				break;
+		}
+return_base:
+		return 0;
+out_dput:
+		dput(next.dentry);
+		break;
+	}
+	path_release(nd);
+return_err:
+	return err;
+}
+
+/*
+ * Wrapper to retry pathname resolution whenever the underlying
+ * file system returns an ESTALE.
+ *
+ * Retry the whole path once, forcing real lookup requests
+ * instead of relying on the dcache.
+ */
+int fastcall link_path_walk(const char *name, struct nameidata *nd)
+{
+	struct nameidata save = *nd;
+	int result;
+
+	/* make sure the stuff we saved doesn't go away */
+	dget(save.dentry);
+	mntget(save.mnt);
+
+	result = __link_path_walk(name, nd);
+	if (result == -ESTALE) {
+		*nd = save;
+		dget(nd->dentry);
+		mntget(nd->mnt);
+		nd->flags |= LOOKUP_REVAL;
+		result = __link_path_walk(name, nd);
+	}
+
+	dput(save.dentry);
+	mntput(save.mnt);
+
+	return result;
+}
+
+int fastcall path_walk(const char * name, struct nameidata *nd)
+{
+	current->total_link_count = 0;
+	return link_path_walk(name, nd);
+}
+
+/* SMP-safe */
+/* returns 1 if everything is done */
+static int __emul_lookup_dentry(const char *name, struct nameidata *nd)
+{
+	if (path_walk(name, nd))
+		return 0;		/* something went wrong... */
+
+	if (!nd->dentry->d_inode || S_ISDIR(nd->dentry->d_inode->i_mode)) {
+		struct dentry *old_dentry = nd->dentry;
+		struct vfsmount *old_mnt = nd->mnt;
+		struct qstr last = nd->last;
+		int last_type = nd->last_type;
+		/*
+		 * NAME was not found in alternate root or it's a directory.  Try to find
+		 * it in the normal root:
+		 */
+		nd->last_type = LAST_ROOT;
+		read_lock(&current->fs->lock);
+		nd->mnt = mntget(current->fs->rootmnt);
+		nd->dentry = dget(current->fs->root);
+		read_unlock(&current->fs->lock);
+		if (path_walk(name, nd) == 0) {
+			if (nd->dentry->d_inode) {
+				dput(old_dentry);
+				mntput(old_mnt);
+				return 1;
+			}
+			path_release(nd);
+		}
+		nd->dentry = old_dentry;
+		nd->mnt = old_mnt;
+		nd->last = last;
+		nd->last_type = last_type;
+	}
+	return 1;
+}
+
+void set_fs_altroot(void)
+{
+	char *emul = __emul_prefix();
+	struct nameidata nd;
+	struct vfsmount *mnt = NULL, *oldmnt;
+	struct dentry *dentry = NULL, *olddentry;
+	int err;
+
+	if (!emul)
+		goto set_it;
+	err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd);
+	if (!err) {
+		mnt = nd.mnt;
+		dentry = nd.dentry;
+	}
+set_it:
+	write_lock(&current->fs->lock);
+	oldmnt = current->fs->altrootmnt;
+	olddentry = current->fs->altroot;
+	current->fs->altrootmnt = mnt;
+	current->fs->altroot = dentry;
+	write_unlock(&current->fs->lock);
+	if (olddentry) {
+		dput(olddentry);
+		mntput(oldmnt);
+	}
+}
+
+int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
+{
+	int retval;
+
+	nd->last_type = LAST_ROOT; /* if there are only slashes... */
+	nd->flags = flags;
+	nd->depth = 0;
+
+	read_lock(&current->fs->lock);
+	if (*name=='/') {
+		if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
+			nd->mnt = mntget(current->fs->altrootmnt);
+			nd->dentry = dget(current->fs->altroot);
+			read_unlock(&current->fs->lock);
+			if (__emul_lookup_dentry(name,nd))
+				return 0;
+			read_lock(&current->fs->lock);
+		}
+		nd->mnt = mntget(current->fs->rootmnt);
+		nd->dentry = dget(current->fs->root);
+	} else {
+		nd->mnt = mntget(current->fs->pwdmnt);
+		nd->dentry = dget(current->fs->pwd);
+	}
+	read_unlock(&current->fs->lock);
+	current->total_link_count = 0;
+	retval = link_path_walk(name, nd);
+	if (unlikely(current->audit_context
+		     && nd && nd->dentry && nd->dentry->d_inode))
+		audit_inode(name, nd->dentry->d_inode);
+	return retval;
+}
+
+/*
+ * Restricted form of lookup. Doesn't follow links, single-component only,
+ * needs parent already locked. Doesn't follow mounts.
+ * SMP-safe.
+ */
+static struct dentry * __lookup_hash(struct qstr *name, struct dentry * base, struct nameidata *nd)
+{
+	struct dentry * dentry;
+	struct inode *inode;
+	int err;
+
+	inode = base->d_inode;
+	err = permission(inode, MAY_EXEC, nd);
+	dentry = ERR_PTR(err);
+	if (err)
+		goto out;
+
+	/*
+	 * See if the low-level filesystem might want
+	 * to use its own hash..
+	 */
+	if (base->d_op && base->d_op->d_hash) {
+		err = base->d_op->d_hash(base, name);
+		dentry = ERR_PTR(err);
+		if (err < 0)
+			goto out;
+	}
+
+	dentry = cached_lookup(base, name, nd);
+	if (!dentry) {
+		struct dentry *new = d_alloc(base, name);
+		dentry = ERR_PTR(-ENOMEM);
+		if (!new)
+			goto out;
+		dentry = inode->i_op->lookup(inode, new, nd);
+		if (!dentry)
+			dentry = new;
+		else
+			dput(new);
+	}
+out:
+	return dentry;
+}
+
+struct dentry * lookup_hash(struct qstr *name, struct dentry * base)
+{
+	return __lookup_hash(name, base, NULL);
+}
+
+/* SMP-safe */
+struct dentry * lookup_one_len(const char * name, struct dentry * base, int len)
+{
+	unsigned long hash;
+	struct qstr this;
+	unsigned int c;
+
+	this.name = name;
+	this.len = len;
+	if (!len)
+		goto access;
+
+	hash = init_name_hash();
+	while (len--) {
+		c = *(const unsigned char *)name++;
+		if (c == '/' || c == '\0')
+			goto access;
+		hash = partial_name_hash(c, hash);
+	}
+	this.hash = end_name_hash(hash);
+
+	return lookup_hash(&this, base);
+access:
+	return ERR_PTR(-EACCES);
+}
+
+/*
+ *	namei()
+ *
+ * is used by most simple commands to get the inode of a specified name.
+ * Open, link etc use their own routines, but this is enough for things
+ * like 'chmod' etc.
+ *
+ * namei exists in two versions: namei/lnamei. The only difference is
+ * that namei follows links, while lnamei does not.
+ * SMP-safe
+ */
+int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
+{
+	char *tmp = getname(name);
+	int err = PTR_ERR(tmp);
+
+	if (!IS_ERR(tmp)) {
+		err = path_lookup(tmp, flags, nd);
+		putname(tmp);
+	}
+	return err;
+}
+
+/*
+ * It's inline, so penalty for filesystems that don't use sticky bit is
+ * minimal.
+ */
+static inline int check_sticky(struct inode *dir, struct inode *inode)
+{
+	if (!(dir->i_mode & S_ISVTX))
+		return 0;
+	if (inode->i_uid == current->fsuid)
+		return 0;
+	if (dir->i_uid == current->fsuid)
+		return 0;
+	return !capable(CAP_FOWNER);
+}
+
+/*
+ *	Check whether we can remove a link victim from directory dir, check
+ *  whether the type of victim is right.
+ *  1. We can't do it if dir is read-only (done in permission())
+ *  2. We should have write and exec permissions on dir
+ *  3. We can't remove anything from append-only dir
+ *  4. We can't do anything with immutable dir (done in permission())
+ *  5. If the sticky bit on dir is set we should either
+ *	a. be owner of dir, or
+ *	b. be owner of victim, or
+ *	c. have CAP_FOWNER capability
+ *  6. If the victim is append-only or immutable we can't do antyhing with
+ *     links pointing to it.
+ *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
+ *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
+ *  9. We can't remove a root or mountpoint.
+ * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
+ *     nfs_async_unlink().
+ */
+static inline int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+{
+	int error;
+
+	if (!victim->d_inode)
+		return -ENOENT;
+
+	BUG_ON(victim->d_parent->d_inode != dir);
+
+	error = permission(dir,MAY_WRITE | MAY_EXEC, NULL);
+	if (error)
+		return error;
+	if (IS_APPEND(dir))
+		return -EPERM;
+	if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
+	    IS_IMMUTABLE(victim->d_inode))
+		return -EPERM;
+	if (isdir) {
+		if (!S_ISDIR(victim->d_inode->i_mode))
+			return -ENOTDIR;
+		if (IS_ROOT(victim))
+			return -EBUSY;
+	} else if (S_ISDIR(victim->d_inode->i_mode))
+		return -EISDIR;
+	if (IS_DEADDIR(dir))
+		return -ENOENT;
+	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
+		return -EBUSY;
+	return 0;
+}
+
+/*	Check whether we can create an object with dentry child in directory
+ *  dir.
+ *  1. We can't do it if child already exists (open has special treatment for
+ *     this case, but since we are inlined it's OK)
+ *  2. We can't do it if dir is read-only (done in permission())
+ *  3. We should have write and exec permissions on dir
+ *  4. We can't do it if dir is immutable (done in permission())
+ */
+static inline int may_create(struct inode *dir, struct dentry *child,
+			     struct nameidata *nd)
+{
+	if (child->d_inode)
+		return -EEXIST;
+	if (IS_DEADDIR(dir))
+		return -ENOENT;
+	return permission(dir,MAY_WRITE | MAY_EXEC, nd);
+}
+
+/* 
+ * Special case: O_CREAT|O_EXCL implies O_NOFOLLOW for security
+ * reasons.
+ *
+ * O_DIRECTORY translates into forcing a directory lookup.
+ */
+static inline int lookup_flags(unsigned int f)
+{
+	unsigned long retval = LOOKUP_FOLLOW;
+
+	if (f & O_NOFOLLOW)
+		retval &= ~LOOKUP_FOLLOW;
+	
+	if ((f & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+		retval &= ~LOOKUP_FOLLOW;
+	
+	if (f & O_DIRECTORY)
+		retval |= LOOKUP_DIRECTORY;
+
+	return retval;
+}
+
+/*
+ * p1 and p2 should be directories on the same fs.
+ */
+struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
+{
+	struct dentry *p;
+
+	if (p1 == p2) {
+		down(&p1->d_inode->i_sem);
+		return NULL;
+	}
+
+	down(&p1->d_inode->i_sb->s_vfs_rename_sem);
+
+	for (p = p1; p->d_parent != p; p = p->d_parent) {
+		if (p->d_parent == p2) {
+			down(&p2->d_inode->i_sem);
+			down(&p1->d_inode->i_sem);
+			return p;
+		}
+	}
+
+	for (p = p2; p->d_parent != p; p = p->d_parent) {
+		if (p->d_parent == p1) {
+			down(&p1->d_inode->i_sem);
+			down(&p2->d_inode->i_sem);
+			return p;
+		}
+	}
+
+	down(&p1->d_inode->i_sem);
+	down(&p2->d_inode->i_sem);
+	return NULL;
+}
+
+void unlock_rename(struct dentry *p1, struct dentry *p2)
+{
+	up(&p1->d_inode->i_sem);
+	if (p1 != p2) {
+		up(&p2->d_inode->i_sem);
+		up(&p1->d_inode->i_sb->s_vfs_rename_sem);
+	}
+}
+
+int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	int error = may_create(dir, dentry, nd);
+
+	if (error)
+		return error;
+
+	if (!dir->i_op || !dir->i_op->create)
+		return -EACCES;	/* shouldn't it be ENOSYS? */
+	mode &= S_IALLUGO;
+	mode |= S_IFREG;
+	error = security_inode_create(dir, dentry, mode);
+	if (error)
+		return error;
+	DQUOT_INIT(dir);
+	error = dir->i_op->create(dir, dentry, mode, nd);
+	if (!error) {
+		inode_dir_notify(dir, DN_CREATE);
+		security_inode_post_create(dir, dentry, mode);
+	}
+	return error;
+}
+
+int may_open(struct nameidata *nd, int acc_mode, int flag)
+{
+	struct dentry *dentry = nd->dentry;
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	if (!inode)
+		return -ENOENT;
+
+	if (S_ISLNK(inode->i_mode))
+		return -ELOOP;
+	
+	if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE))
+		return -EISDIR;
+
+	error = permission(inode, acc_mode, nd);
+	if (error)
+		return error;
+
+	/*
+	 * FIFO's, sockets and device files are special: they don't
+	 * actually live on the filesystem itself, and as such you
+	 * can write to them even if the filesystem is read-only.
+	 */
+	if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+	    	flag &= ~O_TRUNC;
+	} else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+		if (nd->mnt->mnt_flags & MNT_NODEV)
+			return -EACCES;
+
+		flag &= ~O_TRUNC;
+	} else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
+		return -EROFS;
+	/*
+	 * An append-only file must be opened in append mode for writing.
+	 */
+	if (IS_APPEND(inode)) {
+		if  ((flag & FMODE_WRITE) && !(flag & O_APPEND))
+			return -EPERM;
+		if (flag & O_TRUNC)
+			return -EPERM;
+	}
+
+	/* O_NOATIME can only be set by the owner or superuser */
+	if (flag & O_NOATIME)
+		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+			return -EPERM;
+
+	/*
+	 * Ensure there are no outstanding leases on the file.
+	 */
+	error = break_lease(inode, flag);
+	if (error)
+		return error;
+
+	if (flag & O_TRUNC) {
+		error = get_write_access(inode);
+		if (error)
+			return error;
+
+		/*
+		 * Refuse to truncate files with mandatory locks held on them.
+		 */
+		error = locks_verify_locked(inode);
+		if (!error) {
+			DQUOT_INIT(inode);
+			
+			error = do_truncate(dentry, 0);
+		}
+		put_write_access(inode);
+		if (error)
+			return error;
+	} else
+		if (flag & FMODE_WRITE)
+			DQUOT_INIT(inode);
+
+	return 0;
+}
+
+/*
+ *	open_namei()
+ *
+ * namei for open - this is in fact almost the whole open-routine.
+ *
+ * Note that the low bits of "flag" aren't the same as in the open
+ * system call - they are 00 - no permissions needed
+ *			  01 - read permission needed
+ *			  10 - write permission needed
+ *			  11 - read/write permissions needed
+ * which is a lot more logical, and also allows the "no perm" needed
+ * for symlinks (where the permissions are checked later).
+ * SMP-safe
+ */
+int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
+{
+	int acc_mode, error = 0;
+	struct dentry *dentry;
+	struct dentry *dir;
+	int count = 0;
+
+	acc_mode = ACC_MODE(flag);
+
+	/* Allow the LSM permission hook to distinguish append 
+	   access from general write access. */
+	if (flag & O_APPEND)
+		acc_mode |= MAY_APPEND;
+
+	/* Fill in the open() intent data */
+	nd->intent.open.flags = flag;
+	nd->intent.open.create_mode = mode;
+
+	/*
+	 * The simplest case - just a plain lookup.
+	 */
+	if (!(flag & O_CREAT)) {
+		error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+		if (error)
+			return error;
+		goto ok;
+	}
+
+	/*
+	 * Create - we need to know the parent.
+	 */
+	error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd);
+	if (error)
+		return error;
+
+	/*
+	 * We have the parent and last component. First of all, check
+	 * that we are not asked to creat(2) an obvious directory - that
+	 * will not do.
+	 */
+	error = -EISDIR;
+	if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len])
+		goto exit;
+
+	dir = nd->dentry;
+	nd->flags &= ~LOOKUP_PARENT;
+	down(&dir->d_inode->i_sem);
+	dentry = __lookup_hash(&nd->last, nd->dentry, nd);
+
+do_last:
+	error = PTR_ERR(dentry);
+	if (IS_ERR(dentry)) {
+		up(&dir->d_inode->i_sem);
+		goto exit;
+	}
+
+	/* Negative dentry, just create the file */
+	if (!dentry->d_inode) {
+		if (!IS_POSIXACL(dir->d_inode))
+			mode &= ~current->fs->umask;
+		error = vfs_create(dir->d_inode, dentry, mode, nd);
+		up(&dir->d_inode->i_sem);
+		dput(nd->dentry);
+		nd->dentry = dentry;
+		if (error)
+			goto exit;
+		/* Don't check for write permission, don't truncate */
+		acc_mode = 0;
+		flag &= ~O_TRUNC;
+		goto ok;
+	}
+
+	/*
+	 * It already exists.
+	 */
+	up(&dir->d_inode->i_sem);
+
+	error = -EEXIST;
+	if (flag & O_EXCL)
+		goto exit_dput;
+
+	if (d_mountpoint(dentry)) {
+		error = -ELOOP;
+		if (flag & O_NOFOLLOW)
+			goto exit_dput;
+		while (__follow_down(&nd->mnt,&dentry) && d_mountpoint(dentry));
+	}
+	error = -ENOENT;
+	if (!dentry->d_inode)
+		goto exit_dput;
+	if (dentry->d_inode->i_op && dentry->d_inode->i_op->follow_link)
+		goto do_link;
+
+	dput(nd->dentry);
+	nd->dentry = dentry;
+	error = -EISDIR;
+	if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))
+		goto exit;
+ok:
+	error = may_open(nd, acc_mode, flag);
+	if (error)
+		goto exit;
+	return 0;
+
+exit_dput:
+	dput(dentry);
+exit:
+	path_release(nd);
+	return error;
+
+do_link:
+	error = -ELOOP;
+	if (flag & O_NOFOLLOW)
+		goto exit_dput;
+	/*
+	 * This is subtle. Instead of calling do_follow_link() we do the
+	 * thing by hands. The reason is that this way we have zero link_count
+	 * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT.
+	 * After that we have the parent and last component, i.e.
+	 * we are in the same situation as after the first path_walk().
+	 * Well, almost - if the last component is normal we get its copy
+	 * stored in nd->last.name and we will have to putname() it when we
+	 * are done. Procfs-like symlinks just set LAST_BIND.
+	 */
+	nd->flags |= LOOKUP_PARENT;
+	error = security_inode_follow_link(dentry, nd);
+	if (error)
+		goto exit_dput;
+	error = __do_follow_link(dentry, nd);
+	dput(dentry);
+	if (error)
+		return error;
+	nd->flags &= ~LOOKUP_PARENT;
+	if (nd->last_type == LAST_BIND) {
+		dentry = nd->dentry;
+		goto ok;
+	}
+	error = -EISDIR;
+	if (nd->last_type != LAST_NORM)
+		goto exit;
+	if (nd->last.name[nd->last.len]) {
+		putname(nd->last.name);
+		goto exit;
+	}
+	error = -ELOOP;
+	if (count++==32) {
+		putname(nd->last.name);
+		goto exit;
+	}
+	dir = nd->dentry;
+	down(&dir->d_inode->i_sem);
+	dentry = __lookup_hash(&nd->last, nd->dentry, nd);
+	putname(nd->last.name);
+	goto do_last;
+}
+
+/**
+ * lookup_create - lookup a dentry, creating it if it doesn't exist
+ * @nd: nameidata info
+ * @is_dir: directory flag
+ *
+ * Simple function to lookup and return a dentry and create it
+ * if it doesn't exist.  Is SMP-safe.
+ */
+struct dentry *lookup_create(struct nameidata *nd, int is_dir)
+{
+	struct dentry *dentry;
+
+	down(&nd->dentry->d_inode->i_sem);
+	dentry = ERR_PTR(-EEXIST);
+	if (nd->last_type != LAST_NORM)
+		goto fail;
+	nd->flags &= ~LOOKUP_PARENT;
+	dentry = lookup_hash(&nd->last, nd->dentry);
+	if (IS_ERR(dentry))
+		goto fail;
+	if (!is_dir && nd->last.name[nd->last.len] && !dentry->d_inode)
+		goto enoent;
+	return dentry;
+enoent:
+	dput(dentry);
+	dentry = ERR_PTR(-ENOENT);
+fail:
+	return dentry;
+}
+
+int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+	int error = may_create(dir, dentry, NULL);
+
+	if (error)
+		return error;
+
+	if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
+		return -EPERM;
+
+	if (!dir->i_op || !dir->i_op->mknod)
+		return -EPERM;
+
+	error = security_inode_mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+
+	DQUOT_INIT(dir);
+	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (!error) {
+		inode_dir_notify(dir, DN_CREATE);
+		security_inode_post_mknod(dir, dentry, mode, dev);
+	}
+	return error;
+}
+
+asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
+{
+	int error = 0;
+	char * tmp;
+	struct dentry * dentry;
+	struct nameidata nd;
+
+	if (S_ISDIR(mode))
+		return -EPERM;
+	tmp = getname(filename);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+
+	error = path_lookup(tmp, LOOKUP_PARENT, &nd);
+	if (error)
+		goto out;
+	dentry = lookup_create(&nd, 0);
+	error = PTR_ERR(dentry);
+
+	if (!IS_POSIXACL(nd.dentry->d_inode))
+		mode &= ~current->fs->umask;
+	if (!IS_ERR(dentry)) {
+		switch (mode & S_IFMT) {
+		case 0: case S_IFREG:
+			error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd);
+			break;
+		case S_IFCHR: case S_IFBLK:
+			error = vfs_mknod(nd.dentry->d_inode,dentry,mode,
+					new_decode_dev(dev));
+			break;
+		case S_IFIFO: case S_IFSOCK:
+			error = vfs_mknod(nd.dentry->d_inode,dentry,mode,0);
+			break;
+		case S_IFDIR:
+			error = -EPERM;
+			break;
+		default:
+			error = -EINVAL;
+		}
+		dput(dentry);
+	}
+	up(&nd.dentry->d_inode->i_sem);
+	path_release(&nd);
+out:
+	putname(tmp);
+
+	return error;
+}
+
+int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	int error = may_create(dir, dentry, NULL);
+
+	if (error)
+		return error;
+
+	if (!dir->i_op || !dir->i_op->mkdir)
+		return -EPERM;
+
+	mode &= (S_IRWXUGO|S_ISVTX);
+	error = security_inode_mkdir(dir, dentry, mode);
+	if (error)
+		return error;
+
+	DQUOT_INIT(dir);
+	error = dir->i_op->mkdir(dir, dentry, mode);
+	if (!error) {
+		inode_dir_notify(dir, DN_CREATE);
+		security_inode_post_mkdir(dir,dentry, mode);
+	}
+	return error;
+}
+
+asmlinkage long sys_mkdir(const char __user * pathname, int mode)
+{
+	int error = 0;
+	char * tmp;
+
+	tmp = getname(pathname);
+	error = PTR_ERR(tmp);
+	if (!IS_ERR(tmp)) {
+		struct dentry *dentry;
+		struct nameidata nd;
+
+		error = path_lookup(tmp, LOOKUP_PARENT, &nd);
+		if (error)
+			goto out;
+		dentry = lookup_create(&nd, 1);
+		error = PTR_ERR(dentry);
+		if (!IS_ERR(dentry)) {
+			if (!IS_POSIXACL(nd.dentry->d_inode))
+				mode &= ~current->fs->umask;
+			error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
+			dput(dentry);
+		}
+		up(&nd.dentry->d_inode->i_sem);
+		path_release(&nd);
+out:
+		putname(tmp);
+	}
+
+	return error;
+}
+
+/*
+ * We try to drop the dentry early: we should have
+ * a usage count of 2 if we're the only user of this
+ * dentry, and if that is true (possibly after pruning
+ * the dcache), then we drop the dentry now.
+ *
+ * A low-level filesystem can, if it choses, legally
+ * do a
+ *
+ *	if (!d_unhashed(dentry))
+ *		return -EBUSY;
+ *
+ * if it cannot handle the case of removing a directory
+ * that is still in use by something else..
+ */
+void dentry_unhash(struct dentry *dentry)
+{
+	dget(dentry);
+	if (atomic_read(&dentry->d_count))
+		shrink_dcache_parent(dentry);
+	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
+	if (atomic_read(&dentry->d_count) == 2)
+		__d_drop(dentry);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+}
+
+int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int error = may_delete(dir, dentry, 1);
+
+	if (error)
+		return error;
+
+	if (!dir->i_op || !dir->i_op->rmdir)
+		return -EPERM;
+
+	DQUOT_INIT(dir);
+
+	down(&dentry->d_inode->i_sem);
+	dentry_unhash(dentry);
+	if (d_mountpoint(dentry))
+		error = -EBUSY;
+	else {
+		error = security_inode_rmdir(dir, dentry);
+		if (!error) {
+			error = dir->i_op->rmdir(dir, dentry);
+			if (!error)
+				dentry->d_inode->i_flags |= S_DEAD;
+		}
+	}
+	up(&dentry->d_inode->i_sem);
+	if (!error) {
+		inode_dir_notify(dir, DN_DELETE);
+		d_delete(dentry);
+	}
+	dput(dentry);
+
+	return error;
+}
+
+asmlinkage long sys_rmdir(const char __user * pathname)
+{
+	int error = 0;
+	char * name;
+	struct dentry *dentry;
+	struct nameidata nd;
+
+	name = getname(pathname);
+	if(IS_ERR(name))
+		return PTR_ERR(name);
+
+	error = path_lookup(name, LOOKUP_PARENT, &nd);
+	if (error)
+		goto exit;
+
+	switch(nd.last_type) {
+		case LAST_DOTDOT:
+			error = -ENOTEMPTY;
+			goto exit1;
+		case LAST_DOT:
+			error = -EINVAL;
+			goto exit1;
+		case LAST_ROOT:
+			error = -EBUSY;
+			goto exit1;
+	}
+	down(&nd.dentry->d_inode->i_sem);
+	dentry = lookup_hash(&nd.last, nd.dentry);
+	error = PTR_ERR(dentry);
+	if (!IS_ERR(dentry)) {
+		error = vfs_rmdir(nd.dentry->d_inode, dentry);
+		dput(dentry);
+	}
+	up(&nd.dentry->d_inode->i_sem);
+exit1:
+	path_release(&nd);
+exit:
+	putname(name);
+	return error;
+}
+
+int vfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int error = may_delete(dir, dentry, 0);
+
+	if (error)
+		return error;
+
+	if (!dir->i_op || !dir->i_op->unlink)
+		return -EPERM;
+
+	DQUOT_INIT(dir);
+
+	down(&dentry->d_inode->i_sem);
+	if (d_mountpoint(dentry))
+		error = -EBUSY;
+	else {
+		error = security_inode_unlink(dir, dentry);
+		if (!error)
+			error = dir->i_op->unlink(dir, dentry);
+	}
+	up(&dentry->d_inode->i_sem);
+
+	/* We don't d_delete() NFS sillyrenamed files--they still exist. */
+	if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
+		d_delete(dentry);
+		inode_dir_notify(dir, DN_DELETE);
+	}
+	return error;
+}
+
+/*
+ * Make sure that the actual truncation of the file will occur outside its
+ * directory's i_sem.  Truncate can take a long time if there is a lot of
+ * writeout happening, and we don't want to prevent access to the directory
+ * while waiting on the I/O.
+ */
+asmlinkage long sys_unlink(const char __user * pathname)
+{
+	int error = 0;
+	char * name;
+	struct dentry *dentry;
+	struct nameidata nd;
+	struct inode *inode = NULL;
+
+	name = getname(pathname);
+	if(IS_ERR(name))
+		return PTR_ERR(name);
+
+	error = path_lookup(name, LOOKUP_PARENT, &nd);
+	if (error)
+		goto exit;
+	error = -EISDIR;
+	if (nd.last_type != LAST_NORM)
+		goto exit1;
+	down(&nd.dentry->d_inode->i_sem);
+	dentry = lookup_hash(&nd.last, nd.dentry);
+	error = PTR_ERR(dentry);
+	if (!IS_ERR(dentry)) {
+		/* Why not before? Because we want correct error value */
+		if (nd.last.name[nd.last.len])
+			goto slashes;
+		inode = dentry->d_inode;
+		if (inode)
+			atomic_inc(&inode->i_count);
+		error = vfs_unlink(nd.dentry->d_inode, dentry);
+	exit2:
+		dput(dentry);
+	}
+	up(&nd.dentry->d_inode->i_sem);
+	if (inode)
+		iput(inode);	/* truncate the inode here */
+exit1:
+	path_release(&nd);
+exit:
+	putname(name);
+	return error;
+
+slashes:
+	error = !dentry->d_inode ? -ENOENT :
+		S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
+	goto exit2;
+}
+
+int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode)
+{
+	int error = may_create(dir, dentry, NULL);
+
+	if (error)
+		return error;
+
+	if (!dir->i_op || !dir->i_op->symlink)
+		return -EPERM;
+
+	error = security_inode_symlink(dir, dentry, oldname);
+	if (error)
+		return error;
+
+	DQUOT_INIT(dir);
+	error = dir->i_op->symlink(dir, dentry, oldname);
+	if (!error) {
+		inode_dir_notify(dir, DN_CREATE);
+		security_inode_post_symlink(dir, dentry, oldname);
+	}
+	return error;
+}
+
+asmlinkage long sys_symlink(const char __user * oldname, const char __user * newname)
+{
+	int error = 0;
+	char * from;
+	char * to;
+
+	from = getname(oldname);
+	if(IS_ERR(from))
+		return PTR_ERR(from);
+	to = getname(newname);
+	error = PTR_ERR(to);
+	if (!IS_ERR(to)) {
+		struct dentry *dentry;
+		struct nameidata nd;
+
+		error = path_lookup(to, LOOKUP_PARENT, &nd);
+		if (error)
+			goto out;
+		dentry = lookup_create(&nd, 0);
+		error = PTR_ERR(dentry);
+		if (!IS_ERR(dentry)) {
+			error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
+			dput(dentry);
+		}
+		up(&nd.dentry->d_inode->i_sem);
+		path_release(&nd);
+out:
+		putname(to);
+	}
+	putname(from);
+	return error;
+}
+
+int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	int error;
+
+	if (!inode)
+		return -ENOENT;
+
+	error = may_create(dir, new_dentry, NULL);
+	if (error)
+		return error;
+
+	if (dir->i_sb != inode->i_sb)
+		return -EXDEV;
+
+	/*
+	 * A link to an append-only or immutable file cannot be created.
+	 */
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return -EPERM;
+	if (!dir->i_op || !dir->i_op->link)
+		return -EPERM;
+	if (S_ISDIR(old_dentry->d_inode->i_mode))
+		return -EPERM;
+
+	error = security_inode_link(old_dentry, dir, new_dentry);
+	if (error)
+		return error;
+
+	down(&old_dentry->d_inode->i_sem);
+	DQUOT_INIT(dir);
+	error = dir->i_op->link(old_dentry, dir, new_dentry);
+	up(&old_dentry->d_inode->i_sem);
+	if (!error) {
+		inode_dir_notify(dir, DN_CREATE);
+		security_inode_post_link(old_dentry, dir, new_dentry);
+	}
+	return error;
+}
+
+/*
+ * Hardlinks are often used in delicate situations.  We avoid
+ * security-related surprises by not following symlinks on the
+ * newname.  --KAB
+ *
+ * We don't follow them on the oldname either to be compatible
+ * with linux 2.0, and to avoid hard-linking to directories
+ * and other special files.  --ADM
+ */
+asmlinkage long sys_link(const char __user * oldname, const char __user * newname)
+{
+	struct dentry *new_dentry;
+	struct nameidata nd, old_nd;
+	int error;
+	char * to;
+
+	to = getname(newname);
+	if (IS_ERR(to))
+		return PTR_ERR(to);
+
+	error = __user_walk(oldname, 0, &old_nd);
+	if (error)
+		goto exit;
+	error = path_lookup(to, LOOKUP_PARENT, &nd);
+	if (error)
+		goto out;
+	error = -EXDEV;
+	if (old_nd.mnt != nd.mnt)
+		goto out_release;
+	new_dentry = lookup_create(&nd, 0);
+	error = PTR_ERR(new_dentry);
+	if (!IS_ERR(new_dentry)) {
+		error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
+		dput(new_dentry);
+	}
+	up(&nd.dentry->d_inode->i_sem);
+out_release:
+	path_release(&nd);
+out:
+	path_release(&old_nd);
+exit:
+	putname(to);
+
+	return error;
+}
+
+/*
+ * The worst of all namespace operations - renaming directory. "Perverted"
+ * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
+ * Problems:
+ *	a) we can get into loop creation. Check is done in is_subdir().
+ *	b) race potential - two innocent renames can create a loop together.
+ *	   That's where 4.4 screws up. Current fix: serialization on
+ *	   sb->s_vfs_rename_sem. We might be more accurate, but that's another
+ *	   story.
+ *	c) we have to lock _three_ objects - parents and victim (if it exists).
+ *	   And that - after we got ->i_sem on parents (until then we don't know
+ *	   whether the target exists).  Solution: try to be smart with locking
+ *	   order for inodes.  We rely on the fact that tree topology may change
+ *	   only under ->s_vfs_rename_sem _and_ that parent of the object we
+ *	   move will be locked.  Thus we can rank directories by the tree
+ *	   (ancestors first) and rank all non-directories after them.
+ *	   That works since everybody except rename does "lock parent, lookup,
+ *	   lock child" and rename is under ->s_vfs_rename_sem.
+ *	   HOWEVER, it relies on the assumption that any object with ->lookup()
+ *	   has no more than 1 dentry.  If "hybrid" objects will ever appear,
+ *	   we'd better make sure that there's no link(2) for them.
+ *	d) some filesystems don't support opened-but-unlinked directories,
+ *	   either because of layout or because they are not ready to deal with
+ *	   all cases correctly. The latter will be fixed (taking this sort of
+ *	   stuff into VFS), but the former is not going away. Solution: the same
+ *	   trick as in rmdir().
+ *	e) conversion from fhandle to dentry may come in the wrong moment - when
+ *	   we are removing the target. Solution: we will have to grab ->i_sem
+ *	   in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
+ *	   ->i_sem on parents, which works but leads to some truely excessive
+ *	   locking].
+ */
+int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
+	       struct inode *new_dir, struct dentry *new_dentry)
+{
+	int error = 0;
+	struct inode *target;
+
+	/*
+	 * If we are going to change the parent - check write permissions,
+	 * we'll need to flip '..'.
+	 */
+	if (new_dir != old_dir) {
+		error = permission(old_dentry->d_inode, MAY_WRITE, NULL);
+		if (error)
+			return error;
+	}
+
+	error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
+	if (error)
+		return error;
+
+	target = new_dentry->d_inode;
+	if (target) {
+		down(&target->i_sem);
+		dentry_unhash(new_dentry);
+	}
+	if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
+		error = -EBUSY;
+	else 
+		error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+	if (target) {
+		if (!error)
+			target->i_flags |= S_DEAD;
+		up(&target->i_sem);
+		if (d_unhashed(new_dentry))
+			d_rehash(new_dentry);
+		dput(new_dentry);
+	}
+	if (!error) {
+		d_move(old_dentry,new_dentry);
+		security_inode_post_rename(old_dir, old_dentry,
+					   new_dir, new_dentry);
+	}
+	return error;
+}
+
+int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
+	       struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct inode *target;
+	int error;
+
+	error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
+	if (error)
+		return error;
+
+	dget(new_dentry);
+	target = new_dentry->d_inode;
+	if (target)
+		down(&target->i_sem);
+	if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
+		error = -EBUSY;
+	else
+		error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+	if (!error) {
+		/* The following d_move() should become unconditional */
+		if (!(old_dir->i_sb->s_type->fs_flags & FS_ODD_RENAME))
+			d_move(old_dentry, new_dentry);
+		security_inode_post_rename(old_dir, old_dentry, new_dir, new_dentry);
+	}
+	if (target)
+		up(&target->i_sem);
+	dput(new_dentry);
+	return error;
+}
+
+int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+	       struct inode *new_dir, struct dentry *new_dentry)
+{
+	int error;
+	int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+
+	if (old_dentry->d_inode == new_dentry->d_inode)
+ 		return 0;
+ 
+	error = may_delete(old_dir, old_dentry, is_dir);
+	if (error)
+		return error;
+
+	if (!new_dentry->d_inode)
+		error = may_create(new_dir, new_dentry, NULL);
+	else
+		error = may_delete(new_dir, new_dentry, is_dir);
+	if (error)
+		return error;
+
+	if (!old_dir->i_op || !old_dir->i_op->rename)
+		return -EPERM;
+
+	DQUOT_INIT(old_dir);
+	DQUOT_INIT(new_dir);
+
+	if (is_dir)
+		error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
+	else
+		error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
+	if (!error) {
+		if (old_dir == new_dir)
+			inode_dir_notify(old_dir, DN_RENAME);
+		else {
+			inode_dir_notify(old_dir, DN_DELETE);
+			inode_dir_notify(new_dir, DN_CREATE);
+		}
+	}
+	return error;
+}
+
+static inline int do_rename(const char * oldname, const char * newname)
+{
+	int error = 0;
+	struct dentry * old_dir, * new_dir;
+	struct dentry * old_dentry, *new_dentry;
+	struct dentry * trap;
+	struct nameidata oldnd, newnd;
+
+	error = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
+	if (error)
+		goto exit;
+
+	error = path_lookup(newname, LOOKUP_PARENT, &newnd);
+	if (error)
+		goto exit1;
+
+	error = -EXDEV;
+	if (oldnd.mnt != newnd.mnt)
+		goto exit2;
+
+	old_dir = oldnd.dentry;
+	error = -EBUSY;
+	if (oldnd.last_type != LAST_NORM)
+		goto exit2;
+
+	new_dir = newnd.dentry;
+	if (newnd.last_type != LAST_NORM)
+		goto exit2;
+
+	trap = lock_rename(new_dir, old_dir);
+
+	old_dentry = lookup_hash(&oldnd.last, old_dir);
+	error = PTR_ERR(old_dentry);
+	if (IS_ERR(old_dentry))
+		goto exit3;
+	/* source must exist */
+	error = -ENOENT;
+	if (!old_dentry->d_inode)
+		goto exit4;
+	/* unless the source is a directory trailing slashes give -ENOTDIR */
+	if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
+		error = -ENOTDIR;
+		if (oldnd.last.name[oldnd.last.len])
+			goto exit4;
+		if (newnd.last.name[newnd.last.len])
+			goto exit4;
+	}
+	/* source should not be ancestor of target */
+	error = -EINVAL;
+	if (old_dentry == trap)
+		goto exit4;
+	new_dentry = lookup_hash(&newnd.last, new_dir);
+	error = PTR_ERR(new_dentry);
+	if (IS_ERR(new_dentry))
+		goto exit4;
+	/* target should not be an ancestor of source */
+	error = -ENOTEMPTY;
+	if (new_dentry == trap)
+		goto exit5;
+
+	error = vfs_rename(old_dir->d_inode, old_dentry,
+				   new_dir->d_inode, new_dentry);
+exit5:
+	dput(new_dentry);
+exit4:
+	dput(old_dentry);
+exit3:
+	unlock_rename(new_dir, old_dir);
+exit2:
+	path_release(&newnd);
+exit1:
+	path_release(&oldnd);
+exit:
+	return error;
+}
+
+asmlinkage long sys_rename(const char __user * oldname, const char __user * newname)
+{
+	int error;
+	char * from;
+	char * to;
+
+	from = getname(oldname);
+	if(IS_ERR(from))
+		return PTR_ERR(from);
+	to = getname(newname);
+	error = PTR_ERR(to);
+	if (!IS_ERR(to)) {
+		error = do_rename(from,to);
+		putname(to);
+	}
+	putname(from);
+	return error;
+}
+
+int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+{
+	int len;
+
+	len = PTR_ERR(link);
+	if (IS_ERR(link))
+		goto out;
+
+	len = strlen(link);
+	if (len > (unsigned) buflen)
+		len = buflen;
+	if (copy_to_user(buffer, link, len))
+		len = -EFAULT;
+out:
+	return len;
+}
+
+/*
+ * A helper for ->readlink().  This should be used *ONLY* for symlinks that
+ * have ->follow_link() touching nd only in nd_set_link().  Using (or not
+ * using) it for any given inode is up to filesystem.
+ */
+int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+	struct nameidata nd;
+	int res;
+	nd.depth = 0;
+	res = dentry->d_inode->i_op->follow_link(dentry, &nd);
+	if (!res) {
+		res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
+		if (dentry->d_inode->i_op->put_link)
+			dentry->d_inode->i_op->put_link(dentry, &nd);
+	}
+	return res;
+}
+
+int vfs_follow_link(struct nameidata *nd, const char *link)
+{
+	return __vfs_follow_link(nd, link);
+}
+
+/* get the link contents into pagecache */
+static char *page_getlink(struct dentry * dentry, struct page **ppage)
+{
+	struct page * page;
+	struct address_space *mapping = dentry->d_inode->i_mapping;
+	page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
+				NULL);
+	if (IS_ERR(page))
+		goto sync_fail;
+	wait_on_page_locked(page);
+	if (!PageUptodate(page))
+		goto async_fail;
+	*ppage = page;
+	return kmap(page);
+
+async_fail:
+	page_cache_release(page);
+	return ERR_PTR(-EIO);
+
+sync_fail:
+	return (char*)page;
+}
+
+int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+	struct page *page = NULL;
+	char *s = page_getlink(dentry, &page);
+	int res = vfs_readlink(dentry,buffer,buflen,s);
+	if (page) {
+		kunmap(page);
+		page_cache_release(page);
+	}
+	return res;
+}
+
+int page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
+{
+	struct page *page;
+	nd_set_link(nd, page_getlink(dentry, &page));
+	return 0;
+}
+
+void page_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	if (!IS_ERR(nd_get_link(nd))) {
+		struct page *page;
+		page = find_get_page(dentry->d_inode->i_mapping, 0);
+		if (!page)
+			BUG();
+		kunmap(page);
+		page_cache_release(page);
+		page_cache_release(page);
+	}
+}
+
+int page_symlink(struct inode *inode, const char *symname, int len)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page = grab_cache_page(mapping, 0);
+	int err = -ENOMEM;
+	char *kaddr;
+
+	if (!page)
+		goto fail;
+	err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);
+	if (err)
+		goto fail_map;
+	kaddr = kmap_atomic(page, KM_USER0);
+	memcpy(kaddr, symname, len-1);
+	kunmap_atomic(kaddr, KM_USER0);
+	mapping->a_ops->commit_write(NULL, page, 0, len-1);
+	/*
+	 * Notice that we are _not_ going to block here - end of page is
+	 * unmapped, so this will only try to map the rest of page, see
+	 * that it is unmapped (typically even will not look into inode -
+	 * ->i_size will be enough for everything) and zero it out.
+	 * OTOH it's obviously correct and should make the page up-to-date.
+	 */
+	if (!PageUptodate(page)) {
+		err = mapping->a_ops->readpage(NULL, page);
+		wait_on_page_locked(page);
+	} else {
+		unlock_page(page);
+	}
+	page_cache_release(page);
+	if (err < 0)
+		goto fail;
+	mark_inode_dirty(inode);
+	return 0;
+fail_map:
+	unlock_page(page);
+	page_cache_release(page);
+fail:
+	return err;
+}
+
+struct inode_operations page_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+};
+
+EXPORT_SYMBOL(__user_walk);
+EXPORT_SYMBOL(follow_down);
+EXPORT_SYMBOL(follow_up);
+EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
+EXPORT_SYMBOL(getname);
+EXPORT_SYMBOL(lock_rename);
+EXPORT_SYMBOL(lookup_hash);
+EXPORT_SYMBOL(lookup_one_len);
+EXPORT_SYMBOL(page_follow_link_light);
+EXPORT_SYMBOL(page_put_link);
+EXPORT_SYMBOL(page_readlink);
+EXPORT_SYMBOL(page_symlink);
+EXPORT_SYMBOL(page_symlink_inode_operations);
+EXPORT_SYMBOL(path_lookup);
+EXPORT_SYMBOL(path_release);
+EXPORT_SYMBOL(path_walk);
+EXPORT_SYMBOL(permission);
+EXPORT_SYMBOL(unlock_rename);
+EXPORT_SYMBOL(vfs_create);
+EXPORT_SYMBOL(vfs_follow_link);
+EXPORT_SYMBOL(vfs_link);
+EXPORT_SYMBOL(vfs_mkdir);
+EXPORT_SYMBOL(vfs_mknod);
+EXPORT_SYMBOL(generic_permission);
+EXPORT_SYMBOL(vfs_readlink);
+EXPORT_SYMBOL(vfs_rename);
+EXPORT_SYMBOL(vfs_rmdir);
+EXPORT_SYMBOL(vfs_symlink);
+EXPORT_SYMBOL(vfs_unlink);
+EXPORT_SYMBOL(dentry_unhash);
+EXPORT_SYMBOL(generic_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
new file mode 100644
index 0000000..3b93e5d
--- /dev/null
+++ b/fs/namespace.c
@@ -0,0 +1,1465 @@
+/*
+ *  linux/fs/namespace.c
+ *
+ * (C) Copyright Al Viro 2000, 2001
+ *	Released under GPL v2.
+ *
+ * Based on code from fs/super.c, copyright Linus Torvalds and others.
+ * Heavily rewritten.
+ */
+
+#include <linux/config.h>
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/quotaops.h>
+#include <linux/acct.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/namespace.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/mount.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+extern int __init init_rootfs(void);
+
+#ifdef CONFIG_SYSFS
+extern int __init sysfs_init(void);
+#else
+static inline int sysfs_init(void)
+{
+	return 0;
+}
+#endif
+
+/* spinlock for vfsmount related operations, inplace of dcache_lock */
+ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+
+static struct list_head *mount_hashtable;
+static int hash_mask, hash_bits;
+static kmem_cache_t *mnt_cache; 
+
+static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+{
+	unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
+	tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
+	tmp = tmp + (tmp >> hash_bits);
+	return tmp & hash_mask;
+}
+
+struct vfsmount *alloc_vfsmnt(const char *name)
+{
+	struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); 
+	if (mnt) {
+		memset(mnt, 0, sizeof(struct vfsmount));
+		atomic_set(&mnt->mnt_count,1);
+		INIT_LIST_HEAD(&mnt->mnt_hash);
+		INIT_LIST_HEAD(&mnt->mnt_child);
+		INIT_LIST_HEAD(&mnt->mnt_mounts);
+		INIT_LIST_HEAD(&mnt->mnt_list);
+		INIT_LIST_HEAD(&mnt->mnt_fslink);
+		if (name) {
+			int size = strlen(name)+1;
+			char *newname = kmalloc(size, GFP_KERNEL);
+			if (newname) {
+				memcpy(newname, name, size);
+				mnt->mnt_devname = newname;
+			}
+		}
+	}
+	return mnt;
+}
+
+void free_vfsmnt(struct vfsmount *mnt)
+{
+	kfree(mnt->mnt_devname);
+	kmem_cache_free(mnt_cache, mnt);
+}
+
+/*
+ * Now, lookup_mnt increments the ref count before returning
+ * the vfsmount struct.
+ */
+struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct list_head * head = mount_hashtable + hash(mnt, dentry);
+	struct list_head * tmp = head;
+	struct vfsmount *p, *found = NULL;
+
+	spin_lock(&vfsmount_lock);
+	for (;;) {
+		tmp = tmp->next;
+		p = NULL;
+		if (tmp == head)
+			break;
+		p = list_entry(tmp, struct vfsmount, mnt_hash);
+		if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
+			found = mntget(p);
+			break;
+		}
+	}
+	spin_unlock(&vfsmount_lock);
+	return found;
+}
+
+static inline int check_mnt(struct vfsmount *mnt)
+{
+	return mnt->mnt_namespace == current->namespace;
+}
+
+static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
+{
+	old_nd->dentry = mnt->mnt_mountpoint;
+	old_nd->mnt = mnt->mnt_parent;
+	mnt->mnt_parent = mnt;
+	mnt->mnt_mountpoint = mnt->mnt_root;
+	list_del_init(&mnt->mnt_child);
+	list_del_init(&mnt->mnt_hash);
+	old_nd->dentry->d_mounted--;
+}
+
+static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
+{
+	mnt->mnt_parent = mntget(nd->mnt);
+	mnt->mnt_mountpoint = dget(nd->dentry);
+	list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
+	list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
+	nd->dentry->d_mounted++;
+}
+
+static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
+{
+	struct list_head *next = p->mnt_mounts.next;
+	if (next == &p->mnt_mounts) {
+		while (1) {
+			if (p == root)
+				return NULL;
+			next = p->mnt_child.next;
+			if (next != &p->mnt_parent->mnt_mounts)
+				break;
+			p = p->mnt_parent;
+		}
+	}
+	return list_entry(next, struct vfsmount, mnt_child);
+}
+
+static struct vfsmount *
+clone_mnt(struct vfsmount *old, struct dentry *root)
+{
+	struct super_block *sb = old->mnt_sb;
+	struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
+
+	if (mnt) {
+		mnt->mnt_flags = old->mnt_flags;
+		atomic_inc(&sb->s_active);
+		mnt->mnt_sb = sb;
+		mnt->mnt_root = dget(root);
+		mnt->mnt_mountpoint = mnt->mnt_root;
+		mnt->mnt_parent = mnt;
+		mnt->mnt_namespace = old->mnt_namespace;
+
+		/* stick the duplicate mount on the same expiry list
+		 * as the original if that was on one */
+		spin_lock(&vfsmount_lock);
+		if (!list_empty(&old->mnt_fslink))
+			list_add(&mnt->mnt_fslink, &old->mnt_fslink);
+		spin_unlock(&vfsmount_lock);
+	}
+	return mnt;
+}
+
+void __mntput(struct vfsmount *mnt)
+{
+	struct super_block *sb = mnt->mnt_sb;
+	dput(mnt->mnt_root);
+	free_vfsmnt(mnt);
+	deactivate_super(sb);
+}
+
+EXPORT_SYMBOL(__mntput);
+
+/* iterator */
+static void *m_start(struct seq_file *m, loff_t *pos)
+{
+	struct namespace *n = m->private;
+	struct list_head *p;
+	loff_t l = *pos;
+
+	down_read(&n->sem);
+	list_for_each(p, &n->list)
+		if (!l--)
+			return list_entry(p, struct vfsmount, mnt_list);
+	return NULL;
+}
+
+static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct namespace *n = m->private;
+	struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
+	(*pos)++;
+	return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+}
+
+static void m_stop(struct seq_file *m, void *v)
+{
+	struct namespace *n = m->private;
+	up_read(&n->sem);
+}
+
+static inline void mangle(struct seq_file *m, const char *s)
+{
+	seq_escape(m, s, " \t\n\\");
+}
+
+static int show_vfsmnt(struct seq_file *m, void *v)
+{
+	struct vfsmount *mnt = v;
+	int err = 0;
+	static struct proc_fs_info {
+		int flag;
+		char *str;
+	} fs_info[] = {
+		{ MS_SYNCHRONOUS, ",sync" },
+		{ MS_DIRSYNC, ",dirsync" },
+		{ MS_MANDLOCK, ",mand" },
+		{ MS_NOATIME, ",noatime" },
+		{ MS_NODIRATIME, ",nodiratime" },
+		{ 0, NULL }
+	};
+	static struct proc_fs_info mnt_info[] = {
+		{ MNT_NOSUID, ",nosuid" },
+		{ MNT_NODEV, ",nodev" },
+		{ MNT_NOEXEC, ",noexec" },
+		{ 0, NULL }
+	};
+	struct proc_fs_info *fs_infop;
+
+	mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+	seq_putc(m, ' ');
+	seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+	seq_putc(m, ' ');
+	mangle(m, mnt->mnt_sb->s_type->name);
+	seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
+	for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+		if (mnt->mnt_sb->s_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+	for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+		if (mnt->mnt_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+	if (mnt->mnt_sb->s_op->show_options)
+		err = mnt->mnt_sb->s_op->show_options(m, mnt);
+	seq_puts(m, " 0 0\n");
+	return err;
+}
+
+struct seq_operations mounts_op = {
+	.start	= m_start,
+	.next	= m_next,
+	.stop	= m_stop,
+	.show	= show_vfsmnt
+};
+
+/**
+ * may_umount_tree - check if a mount tree is busy
+ * @mnt: root of mount tree
+ *
+ * This is called to check if a tree of mounts has any
+ * open files, pwds, chroots or sub mounts that are
+ * busy.
+ */
+int may_umount_tree(struct vfsmount *mnt)
+{
+	struct list_head *next;
+	struct vfsmount *this_parent = mnt;
+	int actual_refs;
+	int minimum_refs;
+
+	spin_lock(&vfsmount_lock);
+	actual_refs = atomic_read(&mnt->mnt_count);
+	minimum_refs = 2;
+repeat:
+	next = this_parent->mnt_mounts.next;
+resume:
+	while (next != &this_parent->mnt_mounts) {
+		struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
+
+		next = next->next;
+
+		actual_refs += atomic_read(&p->mnt_count);
+		minimum_refs += 2;
+
+		if (!list_empty(&p->mnt_mounts)) {
+			this_parent = p;
+			goto repeat;
+		}
+	}
+
+	if (this_parent != mnt) {
+		next = this_parent->mnt_child.next;
+		this_parent = this_parent->mnt_parent;
+		goto resume;
+	}
+	spin_unlock(&vfsmount_lock);
+
+	if (actual_refs > minimum_refs)
+		return -EBUSY;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(may_umount_tree);
+
+/**
+ * may_umount - check if a mount point is busy
+ * @mnt: root of mount
+ *
+ * This is called to check if a mount point has any
+ * open files, pwds, chroots or sub mounts. If the
+ * mount has sub mounts this will return busy
+ * regardless of whether the sub mounts are busy.
+ *
+ * Doesn't take quota and stuff into account. IOW, in some cases it will
+ * give false negatives. The main reason why it's here is that we need
+ * a non-destructive way to look for easily umountable filesystems.
+ */
+int may_umount(struct vfsmount *mnt)
+{
+	if (atomic_read(&mnt->mnt_count) > 2)
+		return -EBUSY;
+	return 0;
+}
+
+EXPORT_SYMBOL(may_umount);
+
+void umount_tree(struct vfsmount *mnt)
+{
+	struct vfsmount *p;
+	LIST_HEAD(kill);
+
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		list_del(&p->mnt_list);
+		list_add(&p->mnt_list, &kill);
+	}
+
+	while (!list_empty(&kill)) {
+		mnt = list_entry(kill.next, struct vfsmount, mnt_list);
+		list_del_init(&mnt->mnt_list);
+		list_del_init(&mnt->mnt_fslink);
+		if (mnt->mnt_parent == mnt) {
+			spin_unlock(&vfsmount_lock);
+		} else {
+			struct nameidata old_nd;
+			detach_mnt(mnt, &old_nd);
+			spin_unlock(&vfsmount_lock);
+			path_release(&old_nd);
+		}
+		mntput(mnt);
+		spin_lock(&vfsmount_lock);
+	}
+}
+
+static int do_umount(struct vfsmount *mnt, int flags)
+{
+	struct super_block * sb = mnt->mnt_sb;
+	int retval;
+
+	retval = security_sb_umount(mnt, flags);
+	if (retval)
+		return retval;
+
+	/*
+	 * Allow userspace to request a mountpoint be expired rather than
+	 * unmounting unconditionally. Unmount only happens if:
+	 *  (1) the mark is already set (the mark is cleared by mntput())
+	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
+	 */
+	if (flags & MNT_EXPIRE) {
+		if (mnt == current->fs->rootmnt ||
+		    flags & (MNT_FORCE | MNT_DETACH))
+			return -EINVAL;
+
+		if (atomic_read(&mnt->mnt_count) != 2)
+			return -EBUSY;
+
+		if (!xchg(&mnt->mnt_expiry_mark, 1))
+			return -EAGAIN;
+	}
+
+	/*
+	 * If we may have to abort operations to get out of this
+	 * mount, and they will themselves hold resources we must
+	 * allow the fs to do things. In the Unix tradition of
+	 * 'Gee thats tricky lets do it in userspace' the umount_begin
+	 * might fail to complete on the first run through as other tasks
+	 * must return, and the like. Thats for the mount program to worry
+	 * about for the moment.
+	 */
+
+	lock_kernel();
+	if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
+		sb->s_op->umount_begin(sb);
+	unlock_kernel();
+
+	/*
+	 * No sense to grab the lock for this test, but test itself looks
+	 * somewhat bogus. Suggestions for better replacement?
+	 * Ho-hum... In principle, we might treat that as umount + switch
+	 * to rootfs. GC would eventually take care of the old vfsmount.
+	 * Actually it makes sense, especially if rootfs would contain a
+	 * /reboot - static binary that would close all descriptors and
+	 * call reboot(9). Then init(8) could umount root and exec /reboot.
+	 */
+	if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
+		/*
+		 * Special case for "unmounting" root ...
+		 * we just try to remount it readonly.
+		 */
+		down_write(&sb->s_umount);
+		if (!(sb->s_flags & MS_RDONLY)) {
+			lock_kernel();
+			DQUOT_OFF(sb);
+			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+			unlock_kernel();
+		}
+		up_write(&sb->s_umount);
+		return retval;
+	}
+
+	down_write(&current->namespace->sem);
+	spin_lock(&vfsmount_lock);
+
+	if (atomic_read(&sb->s_active) == 1) {
+		/* last instance - try to be smart */
+		spin_unlock(&vfsmount_lock);
+		lock_kernel();
+		DQUOT_OFF(sb);
+		acct_auto_close(sb);
+		unlock_kernel();
+		security_sb_umount_close(mnt);
+		spin_lock(&vfsmount_lock);
+	}
+	retval = -EBUSY;
+	if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
+		if (!list_empty(&mnt->mnt_list))
+			umount_tree(mnt);
+		retval = 0;
+	}
+	spin_unlock(&vfsmount_lock);
+	if (retval)
+		security_sb_umount_busy(mnt);
+	up_write(&current->namespace->sem);
+	return retval;
+}
+
+/*
+ * Now umount can handle mount points as well as block devices.
+ * This is important for filesystems which use unnamed block devices.
+ *
+ * We now support a flag for forced unmount like the other 'big iron'
+ * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+ */
+
+asmlinkage long sys_umount(char __user * name, int flags)
+{
+	struct nameidata nd;
+	int retval;
+
+	retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
+	if (retval)
+		goto out;
+	retval = -EINVAL;
+	if (nd.dentry != nd.mnt->mnt_root)
+		goto dput_and_out;
+	if (!check_mnt(nd.mnt))
+		goto dput_and_out;
+
+	retval = -EPERM;
+	if (!capable(CAP_SYS_ADMIN))
+		goto dput_and_out;
+
+	retval = do_umount(nd.mnt, flags);
+dput_and_out:
+	path_release_on_umount(&nd);
+out:
+	return retval;
+}
+
+#ifdef __ARCH_WANT_SYS_OLDUMOUNT
+
+/*
+ *	The 2.0 compatible umount. No flags. 
+ */
+ 
+asmlinkage long sys_oldumount(char __user * name)
+{
+	return sys_umount(name,0);
+}
+
+#endif
+
+static int mount_is_safe(struct nameidata *nd)
+{
+	if (capable(CAP_SYS_ADMIN))
+		return 0;
+	return -EPERM;
+#ifdef notyet
+	if (S_ISLNK(nd->dentry->d_inode->i_mode))
+		return -EPERM;
+	if (nd->dentry->d_inode->i_mode & S_ISVTX) {
+		if (current->uid != nd->dentry->d_inode->i_uid)
+			return -EPERM;
+	}
+	if (permission(nd->dentry->d_inode, MAY_WRITE, nd))
+		return -EPERM;
+	return 0;
+#endif
+}
+
+static int
+lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
+{
+	while (1) {
+		if (d == dentry)
+			return 1;
+		if (d == NULL || d == d->d_parent)
+			return 0;
+		d = d->d_parent;
+	}
+}
+
+static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct vfsmount *res, *p, *q, *r, *s;
+	struct list_head *h;
+	struct nameidata nd;
+
+	res = q = clone_mnt(mnt, dentry);
+	if (!q)
+		goto Enomem;
+	q->mnt_mountpoint = mnt->mnt_mountpoint;
+
+	p = mnt;
+	for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) {
+		r = list_entry(h, struct vfsmount, mnt_child);
+		if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
+			continue;
+
+		for (s = r; s; s = next_mnt(s, r)) {
+			while (p != s->mnt_parent) {
+				p = p->mnt_parent;
+				q = q->mnt_parent;
+			}
+			p = s;
+			nd.mnt = q;
+			nd.dentry = p->mnt_mountpoint;
+			q = clone_mnt(p, p->mnt_root);
+			if (!q)
+				goto Enomem;
+			spin_lock(&vfsmount_lock);
+			list_add_tail(&q->mnt_list, &res->mnt_list);
+			attach_mnt(q, &nd);
+			spin_unlock(&vfsmount_lock);
+		}
+	}
+	return res;
+ Enomem:
+	if (res) {
+		spin_lock(&vfsmount_lock);
+		umount_tree(res);
+		spin_unlock(&vfsmount_lock);
+	}
+	return NULL;
+}
+
+static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
+{
+	int err;
+	if (mnt->mnt_sb->s_flags & MS_NOUSER)
+		return -EINVAL;
+
+	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+	      S_ISDIR(mnt->mnt_root->d_inode->i_mode))
+		return -ENOTDIR;
+
+	err = -ENOENT;
+	down(&nd->dentry->d_inode->i_sem);
+	if (IS_DEADDIR(nd->dentry->d_inode))
+		goto out_unlock;
+
+	err = security_sb_check_sb(mnt, nd);
+	if (err)
+		goto out_unlock;
+
+	err = -ENOENT;
+	spin_lock(&vfsmount_lock);
+	if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
+		struct list_head head;
+
+		attach_mnt(mnt, nd);
+		list_add_tail(&head, &mnt->mnt_list);
+		list_splice(&head, current->namespace->list.prev);
+		mntget(mnt);
+		err = 0;
+	}
+	spin_unlock(&vfsmount_lock);
+out_unlock:
+	up(&nd->dentry->d_inode->i_sem);
+	if (!err)
+		security_sb_post_addmount(mnt, nd);
+	return err;
+}
+
+/*
+ * do loopback mount.
+ */
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
+{
+	struct nameidata old_nd;
+	struct vfsmount *mnt = NULL;
+	int err = mount_is_safe(nd);
+	if (err)
+		return err;
+	if (!old_name || !*old_name)
+		return -EINVAL;
+	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
+	if (err)
+		return err;
+
+	down_write(&current->namespace->sem);
+	err = -EINVAL;
+	if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
+		err = -ENOMEM;
+		if (recurse)
+			mnt = copy_tree(old_nd.mnt, old_nd.dentry);
+		else
+			mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
+	}
+
+	if (mnt) {
+		/* stop bind mounts from expiring */
+		spin_lock(&vfsmount_lock);
+		list_del_init(&mnt->mnt_fslink);
+		spin_unlock(&vfsmount_lock);
+
+		err = graft_tree(mnt, nd);
+		if (err) {
+			spin_lock(&vfsmount_lock);
+			umount_tree(mnt);
+			spin_unlock(&vfsmount_lock);
+		} else
+			mntput(mnt);
+	}
+
+	up_write(&current->namespace->sem);
+	path_release(&old_nd);
+	return err;
+}
+
+/*
+ * change filesystem flags. dir should be a physical root of filesystem.
+ * If you've mounted a non-root directory somewhere and want to do remount
+ * on it - tough luck.
+ */
+
+static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
+		      void *data)
+{
+	int err;
+	struct super_block * sb = nd->mnt->mnt_sb;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (!check_mnt(nd->mnt))
+		return -EINVAL;
+
+	if (nd->dentry != nd->mnt->mnt_root)
+		return -EINVAL;
+
+	down_write(&sb->s_umount);
+	err = do_remount_sb(sb, flags, data, 0);
+	if (!err)
+		nd->mnt->mnt_flags=mnt_flags;
+	up_write(&sb->s_umount);
+	if (!err)
+		security_sb_post_remount(nd->mnt, flags, data);
+	return err;
+}
+
+static int do_move_mount(struct nameidata *nd, char *old_name)
+{
+	struct nameidata old_nd, parent_nd;
+	struct vfsmount *p;
+	int err = 0;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (!old_name || !*old_name)
+		return -EINVAL;
+	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
+	if (err)
+		return err;
+
+	down_write(&current->namespace->sem);
+	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+		;
+	err = -EINVAL;
+	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+		goto out;
+
+	err = -ENOENT;
+	down(&nd->dentry->d_inode->i_sem);
+	if (IS_DEADDIR(nd->dentry->d_inode))
+		goto out1;
+
+	spin_lock(&vfsmount_lock);
+	if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
+		goto out2;
+
+	err = -EINVAL;
+	if (old_nd.dentry != old_nd.mnt->mnt_root)
+		goto out2;
+
+	if (old_nd.mnt == old_nd.mnt->mnt_parent)
+		goto out2;
+
+	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+	      S_ISDIR(old_nd.dentry->d_inode->i_mode))
+		goto out2;
+
+	err = -ELOOP;
+	for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent)
+		if (p == old_nd.mnt)
+			goto out2;
+	err = 0;
+
+	detach_mnt(old_nd.mnt, &parent_nd);
+	attach_mnt(old_nd.mnt, nd);
+
+	/* if the mount is moved, it should no longer be expire
+	 * automatically */
+	list_del_init(&old_nd.mnt->mnt_fslink);
+out2:
+	spin_unlock(&vfsmount_lock);
+out1:
+	up(&nd->dentry->d_inode->i_sem);
+out:
+	up_write(&current->namespace->sem);
+	if (!err)
+		path_release(&parent_nd);
+	path_release(&old_nd);
+	return err;
+}
+
+/*
+ * create a new mount for userspace and request it to be added into the
+ * namespace's tree
+ */
+static int do_new_mount(struct nameidata *nd, char *type, int flags,
+			int mnt_flags, char *name, void *data)
+{
+	struct vfsmount *mnt;
+
+	if (!type || !memchr(type, 0, PAGE_SIZE))
+		return -EINVAL;
+
+	/* we need capabilities... */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	mnt = do_kern_mount(type, flags, name, data);
+	if (IS_ERR(mnt))
+		return PTR_ERR(mnt);
+
+	return do_add_mount(mnt, nd, mnt_flags, NULL);
+}
+
+/*
+ * add a mount into a namespace's mount tree
+ * - provide the option of adding the new mount to an expiration list
+ */
+int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
+		 int mnt_flags, struct list_head *fslist)
+{
+	int err;
+
+	down_write(&current->namespace->sem);
+	/* Something was mounted here while we slept */
+	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+		;
+	err = -EINVAL;
+	if (!check_mnt(nd->mnt))
+		goto unlock;
+
+	/* Refuse the same filesystem on the same mount point */
+	err = -EBUSY;
+	if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
+	    nd->mnt->mnt_root == nd->dentry)
+		goto unlock;
+
+	err = -EINVAL;
+	if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
+		goto unlock;
+
+	newmnt->mnt_flags = mnt_flags;
+	err = graft_tree(newmnt, nd);
+
+	if (err == 0 && fslist) {
+		/* add to the specified expiration list */
+		spin_lock(&vfsmount_lock);
+		list_add_tail(&newmnt->mnt_fslink, fslist);
+		spin_unlock(&vfsmount_lock);
+	}
+
+unlock:
+	up_write(&current->namespace->sem);
+	mntput(newmnt);
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(do_add_mount);
+
+/*
+ * process a list of expirable mountpoints with the intent of discarding any
+ * mountpoints that aren't in use and haven't been touched since last we came
+ * here
+ */
+void mark_mounts_for_expiry(struct list_head *mounts)
+{
+	struct namespace *namespace;
+	struct vfsmount *mnt, *next;
+	LIST_HEAD(graveyard);
+
+	if (list_empty(mounts))
+		return;
+
+	spin_lock(&vfsmount_lock);
+
+	/* extract from the expiration list every vfsmount that matches the
+	 * following criteria:
+	 * - only referenced by its parent vfsmount
+	 * - still marked for expiry (marked on the last call here; marks are
+	 *   cleared by mntput())
+	 */
+	list_for_each_entry_safe(mnt, next, mounts, mnt_fslink) {
+		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
+		    atomic_read(&mnt->mnt_count) != 1)
+			continue;
+
+		mntget(mnt);
+		list_move(&mnt->mnt_fslink, &graveyard);
+	}
+
+	/*
+	 * go through the vfsmounts we've just consigned to the graveyard to
+	 * - check that they're still dead
+	 * - delete the vfsmount from the appropriate namespace under lock
+	 * - dispose of the corpse
+	 */
+	while (!list_empty(&graveyard)) {
+		mnt = list_entry(graveyard.next, struct vfsmount, mnt_fslink);
+		list_del_init(&mnt->mnt_fslink);
+
+		/* don't do anything if the namespace is dead - all the
+		 * vfsmounts from it are going away anyway */
+		namespace = mnt->mnt_namespace;
+		if (!namespace || atomic_read(&namespace->count) <= 0)
+			continue;
+		get_namespace(namespace);
+
+		spin_unlock(&vfsmount_lock);
+		down_write(&namespace->sem);
+		spin_lock(&vfsmount_lock);
+
+		/* check that it is still dead: the count should now be 2 - as
+		 * contributed by the vfsmount parent and the mntget above */
+		if (atomic_read(&mnt->mnt_count) == 2) {
+			struct vfsmount *xdmnt;
+			struct dentry *xdentry;
+
+			/* delete from the namespace */
+			list_del_init(&mnt->mnt_list);
+			list_del_init(&mnt->mnt_child);
+			list_del_init(&mnt->mnt_hash);
+			mnt->mnt_mountpoint->d_mounted--;
+
+			xdentry = mnt->mnt_mountpoint;
+			mnt->mnt_mountpoint = mnt->mnt_root;
+			xdmnt = mnt->mnt_parent;
+			mnt->mnt_parent = mnt;
+
+			spin_unlock(&vfsmount_lock);
+
+			mntput(xdmnt);
+			dput(xdentry);
+
+			/* now lay it to rest if this was the last ref on the
+			 * superblock */
+			if (atomic_read(&mnt->mnt_sb->s_active) == 1) {
+				/* last instance - try to be smart */
+				lock_kernel();
+				DQUOT_OFF(mnt->mnt_sb);
+				acct_auto_close(mnt->mnt_sb);
+				unlock_kernel();
+			}
+
+			mntput(mnt);
+		} else {
+			/* someone brought it back to life whilst we didn't
+			 * have any locks held so return it to the expiration
+			 * list */
+			list_add_tail(&mnt->mnt_fslink, mounts);
+			spin_unlock(&vfsmount_lock);
+		}
+
+		up_write(&namespace->sem);
+
+		mntput(mnt);
+		put_namespace(namespace);
+
+		spin_lock(&vfsmount_lock);
+	}
+
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
+
+/*
+ * Some copy_from_user() implementations do not return the exact number of
+ * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
+ * Note that this function differs from copy_from_user() in that it will oops
+ * on bad values of `to', rather than returning a short copy.
+ */
+static long
+exact_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	char *t = to;
+	const char __user *f = from;
+	char c;
+
+	if (!access_ok(VERIFY_READ, from, n))
+		return n;
+
+	while (n) {
+		if (__get_user(c, f)) {
+			memset(t, 0, n);
+			break;
+		}
+		*t++ = c;
+		f++;
+		n--;
+	}
+	return n;
+}
+
+int copy_mount_options(const void __user *data, unsigned long *where)
+{
+	int i;
+	unsigned long page;
+	unsigned long size;
+	
+	*where = 0;
+	if (!data)
+		return 0;
+
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	/* We only care that *some* data at the address the user
+	 * gave us is valid.  Just in case, we'll zero
+	 * the remainder of the page.
+	 */
+	/* copy_from_user cannot cross TASK_SIZE ! */
+	size = TASK_SIZE - (unsigned long)data;
+	if (size > PAGE_SIZE)
+		size = PAGE_SIZE;
+
+	i = size - exact_copy_from_user((void *)page, data, size);
+	if (!i) {
+		free_page(page); 
+		return -EFAULT;
+	}
+	if (i != PAGE_SIZE)
+		memset((char *)page + i, 0, PAGE_SIZE - i);
+	*where = page;
+	return 0;
+}
+
+/*
+ * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
+ * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
+ *
+ * data is a (void *) that can point to any structure up to
+ * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
+ * information (or be NULL).
+ *
+ * Pre-0.97 versions of mount() didn't have a flags word.
+ * When the flags word was introduced its top half was required
+ * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
+ * Therefore, if this magic number is present, it carries no information
+ * and must be discarded.
+ */
+long do_mount(char * dev_name, char * dir_name, char *type_page,
+		  unsigned long flags, void *data_page)
+{
+	struct nameidata nd;
+	int retval = 0;
+	int mnt_flags = 0;
+
+	/* Discard magic */
+	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+		flags &= ~MS_MGC_MSK;
+
+	/* Basic sanity checks */
+
+	if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
+		return -EINVAL;
+	if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
+		return -EINVAL;
+
+	if (data_page)
+		((char *)data_page)[PAGE_SIZE - 1] = 0;
+
+	/* Separate the per-mountpoint flags */
+	if (flags & MS_NOSUID)
+		mnt_flags |= MNT_NOSUID;
+	if (flags & MS_NODEV)
+		mnt_flags |= MNT_NODEV;
+	if (flags & MS_NOEXEC)
+		mnt_flags |= MNT_NOEXEC;
+	flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
+
+	/* ... and get the mountpoint */
+	retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
+	if (retval)
+		return retval;
+
+	retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
+	if (retval)
+		goto dput_out;
+
+	if (flags & MS_REMOUNT)
+		retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
+				    data_page);
+	else if (flags & MS_BIND)
+		retval = do_loopback(&nd, dev_name, flags & MS_REC);
+	else if (flags & MS_MOVE)
+		retval = do_move_mount(&nd, dev_name);
+	else
+		retval = do_new_mount(&nd, type_page, flags, mnt_flags,
+				      dev_name, data_page);
+dput_out:
+	path_release(&nd);
+	return retval;
+}
+
+int copy_namespace(int flags, struct task_struct *tsk)
+{
+	struct namespace *namespace = tsk->namespace;
+	struct namespace *new_ns;
+	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
+	struct fs_struct *fs = tsk->fs;
+	struct vfsmount *p, *q;
+
+	if (!namespace)
+		return 0;
+
+	get_namespace(namespace);
+
+	if (!(flags & CLONE_NEWNS))
+		return 0;
+
+	if (!capable(CAP_SYS_ADMIN)) {
+		put_namespace(namespace);
+		return -EPERM;
+	}
+
+	new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
+	if (!new_ns)
+		goto out;
+
+	atomic_set(&new_ns->count, 1);
+	init_rwsem(&new_ns->sem);
+	INIT_LIST_HEAD(&new_ns->list);
+
+	down_write(&tsk->namespace->sem);
+	/* First pass: copy the tree topology */
+	new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
+	if (!new_ns->root) {
+		up_write(&tsk->namespace->sem);
+		kfree(new_ns);
+		goto out;
+	}
+	spin_lock(&vfsmount_lock);
+	list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
+	spin_unlock(&vfsmount_lock);
+
+	/*
+	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
+	 * as belonging to new namespace.  We have already acquired a private
+	 * fs_struct, so tsk->fs->lock is not needed.
+	 */
+	p = namespace->root;
+	q = new_ns->root;
+	while (p) {
+		q->mnt_namespace = new_ns;
+		if (fs) {
+			if (p == fs->rootmnt) {
+				rootmnt = p;
+				fs->rootmnt = mntget(q);
+			}
+			if (p == fs->pwdmnt) {
+				pwdmnt = p;
+				fs->pwdmnt = mntget(q);
+			}
+			if (p == fs->altrootmnt) {
+				altrootmnt = p;
+				fs->altrootmnt = mntget(q);
+			}
+		}
+		p = next_mnt(p, namespace->root);
+		q = next_mnt(q, new_ns->root);
+	}
+	up_write(&tsk->namespace->sem);
+
+	tsk->namespace = new_ns;
+
+	if (rootmnt)
+		mntput(rootmnt);
+	if (pwdmnt)
+		mntput(pwdmnt);
+	if (altrootmnt)
+		mntput(altrootmnt);
+
+	put_namespace(namespace);
+	return 0;
+
+out:
+	put_namespace(namespace);
+	return -ENOMEM;
+}
+
+asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
+			  char __user * type, unsigned long flags,
+			  void __user * data)
+{
+	int retval;
+	unsigned long data_page;
+	unsigned long type_page;
+	unsigned long dev_page;
+	char *dir_page;
+
+	retval = copy_mount_options (type, &type_page);
+	if (retval < 0)
+		return retval;
+
+	dir_page = getname(dir_name);
+	retval = PTR_ERR(dir_page);
+	if (IS_ERR(dir_page))
+		goto out1;
+
+	retval = copy_mount_options (dev_name, &dev_page);
+	if (retval < 0)
+		goto out2;
+
+	retval = copy_mount_options (data, &data_page);
+	if (retval < 0)
+		goto out3;
+
+	lock_kernel();
+	retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
+			  flags, (void*)data_page);
+	unlock_kernel();
+	free_page(data_page);
+
+out3:
+	free_page(dev_page);
+out2:
+	putname(dir_page);
+out1:
+	free_page(type_page);
+	return retval;
+}
+
+/*
+ * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
+ * It can block. Requires the big lock held.
+ */
+void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
+		 struct dentry *dentry)
+{
+	struct dentry *old_root;
+	struct vfsmount *old_rootmnt;
+	write_lock(&fs->lock);
+	old_root = fs->root;
+	old_rootmnt = fs->rootmnt;
+	fs->rootmnt = mntget(mnt);
+	fs->root = dget(dentry);
+	write_unlock(&fs->lock);
+	if (old_root) {
+		dput(old_root);
+		mntput(old_rootmnt);
+	}
+}
+
+/*
+ * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
+ * It can block. Requires the big lock held.
+ */
+void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+		struct dentry *dentry)
+{
+	struct dentry *old_pwd;
+	struct vfsmount *old_pwdmnt;
+
+	write_lock(&fs->lock);
+	old_pwd = fs->pwd;
+	old_pwdmnt = fs->pwdmnt;
+	fs->pwdmnt = mntget(mnt);
+	fs->pwd = dget(dentry);
+	write_unlock(&fs->lock);
+
+	if (old_pwd) {
+		dput(old_pwd);
+		mntput(old_pwdmnt);
+	}
+}
+
+static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
+{
+	struct task_struct *g, *p;
+	struct fs_struct *fs;
+
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		task_lock(p);
+		fs = p->fs;
+		if (fs) {
+			atomic_inc(&fs->count);
+			task_unlock(p);
+			if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
+				set_fs_root(fs, new_nd->mnt, new_nd->dentry);
+			if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
+				set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
+			put_fs_struct(fs);
+		} else
+			task_unlock(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+/*
+ * pivot_root Semantics:
+ * Moves the root file system of the current process to the directory put_old,
+ * makes new_root as the new root file system of the current process, and sets
+ * root/cwd of all processes which had them on the current root to new_root.
+ *
+ * Restrictions:
+ * The new_root and put_old must be directories, and  must not be on the
+ * same file  system as the current process root. The put_old  must  be
+ * underneath new_root,  i.e. adding a non-zero number of /.. to the string
+ * pointed to by put_old must yield the same directory as new_root. No other
+ * file system may be mounted on put_old. After all, new_root is a mountpoint.
+ *
+ * Notes:
+ *  - we don't move root/cwd if they are not at the root (reason: if something
+ *    cared enough to change them, it's probably wrong to force them elsewhere)
+ *  - it's okay to pick a root that isn't the root of a file system, e.g.
+ *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
+ *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
+ *    first.
+ */
+
+asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
+{
+	struct vfsmount *tmp;
+	struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
+	int error;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	lock_kernel();
+
+	error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
+	if (error)
+		goto out0;
+	error = -EINVAL;
+	if (!check_mnt(new_nd.mnt))
+		goto out1;
+
+	error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd);
+	if (error)
+		goto out1;
+
+	error = security_sb_pivotroot(&old_nd, &new_nd);
+	if (error) {
+		path_release(&old_nd);
+		goto out1;
+	}
+
+	read_lock(&current->fs->lock);
+	user_nd.mnt = mntget(current->fs->rootmnt);
+	user_nd.dentry = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+	down_write(&current->namespace->sem);
+	down(&old_nd.dentry->d_inode->i_sem);
+	error = -EINVAL;
+	if (!check_mnt(user_nd.mnt))
+		goto out2;
+	error = -ENOENT;
+	if (IS_DEADDIR(new_nd.dentry->d_inode))
+		goto out2;
+	if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
+		goto out2;
+	if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
+		goto out2;
+	error = -EBUSY;
+	if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
+		goto out2; /* loop, on the same file system  */
+	error = -EINVAL;
+	if (user_nd.mnt->mnt_root != user_nd.dentry)
+		goto out2; /* not a mountpoint */
+	if (new_nd.mnt->mnt_root != new_nd.dentry)
+		goto out2; /* not a mountpoint */
+	tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+	spin_lock(&vfsmount_lock);
+	if (tmp != new_nd.mnt) {
+		for (;;) {
+			if (tmp->mnt_parent == tmp)
+				goto out3; /* already mounted on put_old */
+			if (tmp->mnt_parent == new_nd.mnt)
+				break;
+			tmp = tmp->mnt_parent;
+		}
+		if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
+			goto out3;
+	} else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+		goto out3;
+	detach_mnt(new_nd.mnt, &parent_nd);
+	detach_mnt(user_nd.mnt, &root_parent);
+	attach_mnt(user_nd.mnt, &old_nd);     /* mount old root on put_old */
+	attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+	spin_unlock(&vfsmount_lock);
+	chroot_fs_refs(&user_nd, &new_nd);
+	security_sb_post_pivotroot(&user_nd, &new_nd);
+	error = 0;
+	path_release(&root_parent);
+	path_release(&parent_nd);
+out2:
+	up(&old_nd.dentry->d_inode->i_sem);
+	up_write(&current->namespace->sem);
+	path_release(&user_nd);
+	path_release(&old_nd);
+out1:
+	path_release(&new_nd);
+out0:
+	unlock_kernel();
+	return error;
+out3:
+	spin_unlock(&vfsmount_lock);
+	goto out2;
+}
+
+static void __init init_mount_tree(void)
+{
+	struct vfsmount *mnt;
+	struct namespace *namespace;
+	struct task_struct *g, *p;
+
+	mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
+	if (IS_ERR(mnt))
+		panic("Can't create rootfs");
+	namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
+	if (!namespace)
+		panic("Can't allocate initial namespace");
+	atomic_set(&namespace->count, 1);
+	INIT_LIST_HEAD(&namespace->list);
+	init_rwsem(&namespace->sem);
+	list_add(&mnt->mnt_list, &namespace->list);
+	namespace->root = mnt;
+	mnt->mnt_namespace = namespace;
+
+	init_task.namespace = namespace;
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		get_namespace(namespace);
+		p->namespace = namespace;
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+
+	set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
+	set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
+}
+
+void __init mnt_init(unsigned long mempages)
+{
+	struct list_head *d;
+	unsigned int nr_hash;
+	int i;
+
+	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+
+	mount_hashtable = (struct list_head *)
+		__get_free_page(GFP_ATOMIC);
+
+	if (!mount_hashtable)
+		panic("Failed to allocate mount hash table\n");
+
+	/*
+	 * Find the power-of-two list-heads that can fit into the allocation..
+	 * We don't guarantee that "sizeof(struct list_head)" is necessarily
+	 * a power-of-two.
+	 */
+	nr_hash = PAGE_SIZE / sizeof(struct list_head);
+	hash_bits = 0;
+	do {
+		hash_bits++;
+	} while ((nr_hash >> hash_bits) != 0);
+	hash_bits--;
+
+	/*
+	 * Re-calculate the actual number of entries and the mask
+	 * from the number of bits we can fit.
+	 */
+	nr_hash = 1UL << hash_bits;
+	hash_mask = nr_hash-1;
+
+	printk("Mount-cache hash table entries: %d\n", nr_hash);
+
+	/* And initialize the newly allocated array */
+	d = mount_hashtable;
+	i = nr_hash;
+	do {
+		INIT_LIST_HEAD(d);
+		d++;
+		i--;
+	} while (i);
+	sysfs_init();
+	init_rootfs();
+	init_mount_tree();
+}
+
+void __put_namespace(struct namespace *namespace)
+{
+	struct vfsmount *mnt;
+
+	down_write(&namespace->sem);
+	spin_lock(&vfsmount_lock);
+
+	list_for_each_entry(mnt, &namespace->list, mnt_list) {
+		mnt->mnt_namespace = NULL;
+	}
+
+	umount_tree(namespace->root);
+	spin_unlock(&vfsmount_lock);
+	up_write(&namespace->sem);
+	kfree(namespace);
+}
diff --git a/fs/ncpfs/Kconfig b/fs/ncpfs/Kconfig
new file mode 100644
index 0000000..1428084
--- /dev/null
+++ b/fs/ncpfs/Kconfig
@@ -0,0 +1,87 @@
+#
+# NCP Filesystem configuration
+#
+config NCPFS_PACKET_SIGNING
+	bool "Packet signatures"
+	depends on NCP_FS
+	help
+	  NCP allows packets to be signed for stronger security. If you want
+	  security, say Y.  Normal users can leave it off.  To be able to use
+	  packet signing you must use ncpfs > 2.0.12.
+
+config NCPFS_IOCTL_LOCKING
+	bool "Proprietary file locking"
+	depends on NCP_FS
+	help
+	  Allows locking of records on remote volumes.  Say N unless you have
+	  special applications which are able to utilize this locking scheme.
+
+config NCPFS_STRONG
+	bool "Clear remove/delete inhibit when needed"
+	depends on NCP_FS
+	help
+	  Allows manipulation of files flagged as Delete or Rename Inhibit.
+	  To use this feature you must mount volumes with the ncpmount
+	  parameter "-s" (ncpfs-2.0.12 and newer).  Say Y unless you are not
+	  mounting volumes with -f 444.
+
+config NCPFS_NFS_NS
+	bool "Use NFS namespace if available"
+	depends on NCP_FS
+	help
+	  Allows you to utilize NFS namespace on NetWare servers.  It brings
+	  you case sensitive filenames.  Say Y.  You can disable it at
+	  mount-time with the `-N nfs' parameter of ncpmount.
+
+config NCPFS_OS2_NS
+	bool "Use LONG (OS/2) namespace if available"
+	depends on NCP_FS
+	help
+	  Allows you to utilize OS2/LONG namespace on NetWare servers.
+	  Filenames in this namespace are limited to 255 characters, they are
+	  case insensitive, and case in names is preserved.  Say Y.  You can
+	  disable it at mount time with the -N os2 parameter of ncpmount.
+
+config NCPFS_SMALLDOS
+	bool "Lowercase DOS filenames"
+	depends on NCP_FS
+	---help---
+	  If you say Y here, every filename on a NetWare server volume using
+	  the OS2/LONG namespace and created under DOS or on a volume using
+	  DOS namespace will be converted to lowercase characters.
+	  Saying N here will give you these filenames in uppercase.
+
+	  This is only a cosmetic option since the OS2/LONG namespace is case
+	  insensitive. The only major reason for this option is backward
+	  compatibility when moving from DOS to OS2/LONG namespace support.
+	  Long filenames (created by Win95) will not be affected.
+
+	  This option does not solve the problem that filenames appear
+	  differently under Linux and under Windows, since Windows does an
+	  additional conversions on the client side. You can achieve similar
+	  effects by saying Y to "Allow using of Native Language Support"
+	  below.
+
+config NCPFS_NLS
+	bool "Use Native Language Support"
+	depends on NCP_FS
+	select NLS
+	help
+	  Allows you to use codepages and I/O charsets for file name
+	  translation between the server file system and input/output. This
+	  may be useful, if you want to access the server with other operating
+	  systems, e.g. Windows 95. See also NLS for more Information.
+
+	  To select codepages and I/O charsets use ncpfs-2.2.0.13 or newer.
+
+config NCPFS_EXTRAS
+	bool "Enable symbolic links and execute flags"
+	depends on NCP_FS
+	help
+	  This enables the use of symbolic links and an execute permission
+	  bit on NCPFS. The file server need not have long name space or NFS
+	  name space loaded for these to work.
+
+	  To use the new attributes, it is recommended to use the flags
+	  '-f 600 -d 755' on the ncpmount command line.
+
diff --git a/fs/ncpfs/Makefile b/fs/ncpfs/Makefile
new file mode 100644
index 0000000..68ea095
--- /dev/null
+++ b/fs/ncpfs/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the linux ncp filesystem routines.
+#
+
+obj-$(CONFIG_NCP_FS) += ncpfs.o
+
+ncpfs-y      := dir.o file.o inode.o ioctl.o mmap.o ncplib_kernel.o sock.o \
+		ncpsign_kernel.o getopt.o
+
+ncpfs-$(CONFIG_NCPFS_EXTRAS)   += symlink.o
+ncpfs-$(CONFIG_NCPFS_NFS_NS)   += symlink.o
+
+# If you want debugging output, please uncomment the following line
+# EXTRA_CFLAGS += -DDEBUG_NCP=1
+
+CFLAGS_ncplib_kernel.o := -finline-functions
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
new file mode 100644
index 0000000..2dc2d86
--- /dev/null
+++ b/fs/ncpfs/dir.c
@@ -0,0 +1,1260 @@
+/*
+ *  dir.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified for big endian by J.F. Chadima and David S. Miller
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *  Modified 1998, 1999 Wolfram Pienkoss for NLS
+ *  Modified 1999 Wolfram Pienkoss for directory caching
+ *  Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/smp_lock.h>
+
+#include <linux/ncp_fs.h>
+
+#include "ncplib_kernel.h"
+
+static void ncp_read_volume_list(struct file *, void *, filldir_t,
+				struct ncp_cache_control *);
+static void ncp_do_readdir(struct file *, void *, filldir_t,
+				struct ncp_cache_control *);
+
+static int ncp_readdir(struct file *, void *, filldir_t);
+
+static int ncp_create(struct inode *, struct dentry *, int, struct nameidata *);
+static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int ncp_unlink(struct inode *, struct dentry *);
+static int ncp_mkdir(struct inode *, struct dentry *, int);
+static int ncp_rmdir(struct inode *, struct dentry *);
+static int ncp_rename(struct inode *, struct dentry *,
+	  	      struct inode *, struct dentry *);
+static int ncp_mknod(struct inode * dir, struct dentry *dentry,
+		     int mode, dev_t rdev);
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern int ncp_symlink(struct inode *, struct dentry *, const char *);
+#else
+#define ncp_symlink NULL
+#endif
+		      
+struct file_operations ncp_dir_operations =
+{
+	.read		= generic_read_dir,
+	.readdir	= ncp_readdir,
+	.ioctl		= ncp_ioctl,
+};
+
+struct inode_operations ncp_dir_inode_operations =
+{
+	.create		= ncp_create,
+	.lookup		= ncp_lookup,
+	.unlink		= ncp_unlink,
+	.symlink	= ncp_symlink,
+	.mkdir		= ncp_mkdir,
+	.rmdir		= ncp_rmdir,
+	.mknod		= ncp_mknod,
+	.rename		= ncp_rename,
+	.setattr	= ncp_notify_change,
+};
+
+/*
+ * Dentry operations routines
+ */
+static int ncp_lookup_validate(struct dentry *, struct nameidata *);
+static int ncp_hash_dentry(struct dentry *, struct qstr *);
+static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
+static int ncp_delete_dentry(struct dentry *);
+
+static struct dentry_operations ncp_dentry_operations =
+{
+	.d_revalidate	= ncp_lookup_validate,
+	.d_hash		= ncp_hash_dentry,
+	.d_compare	= ncp_compare_dentry,
+	.d_delete	= ncp_delete_dentry,
+};
+
+struct dentry_operations ncp_root_dentry_operations =
+{
+	.d_hash		= ncp_hash_dentry,
+	.d_compare	= ncp_compare_dentry,
+	.d_delete	= ncp_delete_dentry,
+};
+
+
+/*
+ * Note: leave the hash unchanged if the directory
+ * is case-sensitive.
+ */
+static int 
+ncp_hash_dentry(struct dentry *dentry, struct qstr *this)
+{
+	struct nls_table *t;
+	unsigned long hash;
+	int i;
+
+	t = NCP_IO_TABLE(dentry);
+
+	if (!ncp_case_sensitive(dentry->d_inode)) {
+		hash = init_name_hash();
+		for (i=0; i<this->len ; i++)
+			hash = partial_name_hash(ncp_tolower(t, this->name[i]),
+									hash);
+		this->hash = end_name_hash(hash);
+	}
+	return 0;
+}
+
+static int
+ncp_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	if (a->len != b->len)
+		return 1;
+
+	if (ncp_case_sensitive(dentry->d_inode))
+		return strncmp(a->name, b->name, a->len);
+
+	return ncp_strnicmp(NCP_IO_TABLE(dentry), a->name, b->name, a->len);
+}
+
+/*
+ * This is the callback from dput() when d_count is going to 0.
+ * We use this to unhash dentries with bad inodes.
+ * Closing files can be safely postponed until iput() - it's done there anyway.
+ */
+static int
+ncp_delete_dentry(struct dentry * dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (inode) {
+		if (is_bad_inode(inode))
+			return 1;
+	} else
+	{
+	/* N.B. Unhash negative dentries? */
+	}
+	return 0;
+}
+
+static inline int
+ncp_single_volume(struct ncp_server *server)
+{
+	return (server->m.mounted_vol[0] != '\0');
+}
+
+static inline int ncp_is_server_root(struct inode *inode)
+{
+	return (!ncp_single_volume(NCP_SERVER(inode)) &&
+		inode == inode->i_sb->s_root->d_inode);
+}
+
+
+/*
+ * This is the callback when the dcache has a lookup hit.
+ */
+
+
+#ifdef CONFIG_NCPFS_STRONG
+/* try to delete a readonly file (NW R bit set) */
+
+static int
+ncp_force_unlink(struct inode *dir, struct dentry* dentry)
+{
+        int res=0x9c,res2;
+	struct nw_modify_dos_info info;
+	__le32 old_nwattr;
+	struct inode *inode;
+
+	memset(&info, 0, sizeof(info));
+	
+        /* remove the Read-Only flag on the NW server */
+	inode = dentry->d_inode;
+
+	old_nwattr = NCP_FINFO(inode)->nwattr;
+	info.attributes = old_nwattr & ~(aRONLY|aDELETEINHIBIT|aRENAMEINHIBIT);
+	res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
+	if (res2)
+		goto leave_me;
+
+        /* now try again the delete operation */
+        res = ncp_del_file_or_subdir2(NCP_SERVER(dir), dentry);
+
+        if (res)  /* delete failed, set R bit again */
+        {
+		info.attributes = old_nwattr;
+		res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
+		if (res2)
+                        goto leave_me;
+        }
+leave_me:
+        return(res);
+}
+#endif	/* CONFIG_NCPFS_STRONG */
+
+#ifdef CONFIG_NCPFS_STRONG
+static int
+ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_name,
+                 struct inode *new_dir, struct dentry* new_dentry, char *_new_name)
+{
+	struct nw_modify_dos_info info;
+        int res=0x90,res2;
+	struct inode *old_inode = old_dentry->d_inode;
+	__le32 old_nwattr = NCP_FINFO(old_inode)->nwattr;
+	__le32 new_nwattr = 0; /* shut compiler warning */
+	int old_nwattr_changed = 0;
+	int new_nwattr_changed = 0;
+
+	memset(&info, 0, sizeof(info));
+	
+        /* remove the Read-Only flag on the NW server */
+
+	info.attributes = old_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
+	res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
+	if (!res2)
+		old_nwattr_changed = 1;
+	if (new_dentry && new_dentry->d_inode) {
+		new_nwattr = NCP_FINFO(new_dentry->d_inode)->nwattr;
+		info.attributes = new_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
+		res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
+		if (!res2)
+			new_nwattr_changed = 1;
+	}
+        /* now try again the rename operation */
+	/* but only if something really happened */
+	if (new_nwattr_changed || old_nwattr_changed) {
+	        res = ncp_ren_or_mov_file_or_subdir(NCP_SERVER(old_dir),
+        	                                    old_dir, _old_name,
+                	                            new_dir, _new_name);
+	} 
+	if (res)
+		goto leave_me;
+	/* file was successfully renamed, so:
+	   do not set attributes on old file - it no longer exists
+	   copy attributes from old file to new */
+	new_nwattr_changed = old_nwattr_changed;
+	new_nwattr = old_nwattr;
+	old_nwattr_changed = 0;
+	
+leave_me:;
+	if (old_nwattr_changed) {
+		info.attributes = old_nwattr;
+		res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
+		/* ignore errors */
+	}
+	if (new_nwattr_changed)	{
+		info.attributes = new_nwattr;
+		res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
+		/* ignore errors */
+	}
+        return(res);
+}
+#endif	/* CONFIG_NCPFS_STRONG */
+
+
+static int
+__ncp_lookup_validate(struct dentry * dentry, struct nameidata *nd)
+{
+	struct ncp_server *server;
+	struct dentry *parent;
+	struct inode *dir;
+	struct ncp_entry_info finfo;
+	int res, val = 0, len;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+
+	if (!dentry->d_inode)
+		goto finished;
+
+	server = NCP_SERVER(dir);
+
+	if (!ncp_conn_valid(server))
+		goto finished;
+
+	/*
+	 * Inspired by smbfs:
+	 * The default validation is based on dentry age:
+	 * We set the max age at mount time.  (But each
+	 * successful server lookup renews the timestamp.)
+	 */
+	val = NCP_TEST_AGE(server, dentry);
+	if (val)
+		goto finished;
+
+	DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		NCP_GET_AGE(dentry));
+
+	len = sizeof(__name);
+	if (ncp_is_server_root(dir)) {
+		res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+				 dentry->d_name.len, 1);
+		if (!res)
+			res = ncp_lookup_volume(server, __name, &(finfo.i));
+	} else {
+		res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+				 dentry->d_name.len, !ncp_preserve_case(dir));
+		if (!res)
+			res = ncp_obtain_info(server, dir, __name, &(finfo.i));
+	}
+	finfo.volume = finfo.i.volNumber;
+	DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
+		dentry->d_parent->d_name.name, __name, res);
+	/*
+	 * If we didn't find it, or if it has a different dirEntNum to
+	 * what we remember, it's not valid any more.
+	 */
+	if (!res) {
+		if (finfo.i.dirEntNum == NCP_FINFO(dentry->d_inode)->dirEntNum) {
+			ncp_new_dentry(dentry);
+			val=1;
+		} else
+			DDPRINTK("ncp_lookup_validate: found, but dirEntNum changed\n");
+
+		ncp_update_inode2(dentry->d_inode, &finfo);
+	}
+
+finished:
+	DDPRINTK("ncp_lookup_validate: result=%d\n", val);
+	dput(parent);
+	return val;
+}
+
+static int
+ncp_lookup_validate(struct dentry * dentry, struct nameidata *nd)
+{
+	int res;
+	lock_kernel();
+	res = __ncp_lookup_validate(dentry, nd);
+	unlock_kernel();
+	return res;
+}
+
+static struct dentry *
+ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
+{
+	struct dentry *dent = dentry;
+	struct list_head *next;
+
+	if (d_validate(dent, parent)) {
+		if (dent->d_name.len <= NCP_MAXPATHLEN &&
+		    (unsigned long)dent->d_fsdata == fpos) {
+			if (!dent->d_inode) {
+				dput(dent);
+				dent = NULL;
+			}
+			return dent;
+		}
+		dput(dent);
+	}
+
+	/* If a pointer is invalid, we search the dentry. */
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dent = list_entry(next, struct dentry, d_child);
+		if ((unsigned long)dent->d_fsdata == fpos) {
+			if (dent->d_inode)
+				dget_locked(dent);
+			else
+				dent = NULL;
+			spin_unlock(&dcache_lock);
+			goto out;
+		}
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
+	return NULL;
+
+out:
+	return dent;
+}
+
+static time_t ncp_obtain_mtime(struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct ncp_server *server = NCP_SERVER(inode);
+	struct nw_info_struct i;
+
+	if (!ncp_conn_valid(server) || ncp_is_server_root(inode))
+		return 0;
+
+	if (ncp_obtain_info(server, inode, NULL, &i))
+		return 0;
+
+	return ncp_date_dos2unix(i.modifyTime, i.modifyDate);
+}
+
+static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct page *page = NULL;
+	struct ncp_server *server = NCP_SERVER(inode);
+	union  ncp_dir_cache *cache = NULL;
+	struct ncp_cache_control ctl;
+	int result, mtime_valid = 0;
+	time_t mtime = 0;
+
+	lock_kernel();
+
+	ctl.page  = NULL;
+	ctl.cache = NULL;
+
+	DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		(int) filp->f_pos);
+
+	result = -EIO;
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	result = 0;
+	if (filp->f_pos == 0) {
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+			goto out;
+		filp->f_pos = 1;
+	}
+	if (filp->f_pos == 1) {
+		if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR))
+			goto out;
+		filp->f_pos = 2;
+	}
+
+	page = grab_cache_page(&inode->i_data, 0);
+	if (!page)
+		goto read_really;
+
+	ctl.cache = cache = kmap(page);
+	ctl.head  = cache->head;
+
+	if (!PageUptodate(page) || !ctl.head.eof)
+		goto init_cache;
+
+	if (filp->f_pos == 2) {
+		if (jiffies - ctl.head.time >= NCP_MAX_AGE(server))
+			goto init_cache;
+
+		mtime = ncp_obtain_mtime(dentry);
+		mtime_valid = 1;
+		if ((!mtime) || (mtime != ctl.head.mtime))
+			goto init_cache;
+	}
+
+	if (filp->f_pos > ctl.head.end)
+		goto finished;
+
+	ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2);
+	ctl.ofs  = ctl.fpos / NCP_DIRCACHE_SIZE;
+	ctl.idx  = ctl.fpos % NCP_DIRCACHE_SIZE;
+
+	for (;;) {
+		if (ctl.ofs != 0) {
+			ctl.page = find_lock_page(&inode->i_data, ctl.ofs);
+			if (!ctl.page)
+				goto invalid_cache;
+			ctl.cache = kmap(ctl.page);
+			if (!PageUptodate(ctl.page))
+				goto invalid_cache;
+		}
+		while (ctl.idx < NCP_DIRCACHE_SIZE) {
+			struct dentry *dent;
+			int res;
+
+			dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx],
+						dentry, filp->f_pos);
+			if (!dent)
+				goto invalid_cache;
+			res = filldir(dirent, dent->d_name.name,
+					dent->d_name.len, filp->f_pos,
+					dent->d_inode->i_ino, DT_UNKNOWN);
+			dput(dent);
+			if (res)
+				goto finished;
+			filp->f_pos += 1;
+			ctl.idx += 1;
+			if (filp->f_pos > ctl.head.end)
+				goto finished;
+		}
+		if (ctl.page) {
+			kunmap(ctl.page);
+			SetPageUptodate(ctl.page);
+			unlock_page(ctl.page);
+			page_cache_release(ctl.page);
+			ctl.page = NULL;
+		}
+		ctl.idx  = 0;
+		ctl.ofs += 1;
+	}
+invalid_cache:
+	if (ctl.page) {
+		kunmap(ctl.page);
+		unlock_page(ctl.page);
+		page_cache_release(ctl.page);
+		ctl.page = NULL;
+	}
+	ctl.cache = cache;
+init_cache:
+	ncp_invalidate_dircache_entries(dentry);
+	if (!mtime_valid) {
+		mtime = ncp_obtain_mtime(dentry);
+		mtime_valid = 1;
+	}
+	ctl.head.mtime = mtime;
+	ctl.head.time = jiffies;
+	ctl.head.eof = 0;
+	ctl.fpos = 2;
+	ctl.ofs = 0;
+	ctl.idx = NCP_DIRCACHE_START;
+	ctl.filled = 0;
+	ctl.valid  = 1;
+read_really:
+	if (ncp_is_server_root(inode)) {
+		ncp_read_volume_list(filp, dirent, filldir, &ctl);
+	} else {
+		ncp_do_readdir(filp, dirent, filldir, &ctl);
+	}
+	ctl.head.end = ctl.fpos - 1;
+	ctl.head.eof = ctl.valid;
+finished:
+	if (page) {
+		cache->head = ctl.head;
+		kunmap(page);
+		SetPageUptodate(page);
+		unlock_page(page);
+		page_cache_release(page);
+	}
+	if (ctl.page) {
+		kunmap(ctl.page);
+		SetPageUptodate(ctl.page);
+		unlock_page(ctl.page);
+		page_cache_release(ctl.page);
+	}
+out:
+	unlock_kernel();
+	return result;
+}
+
+static int
+ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+		struct ncp_cache_control *ctrl, struct ncp_entry_info *entry)
+{
+	struct dentry *newdent, *dentry = filp->f_dentry;
+	struct inode *newino, *inode = dentry->d_inode;
+	struct ncp_cache_control ctl = *ctrl;
+	struct qstr qname;
+	int valid = 0;
+	int hashed = 0;
+	ino_t ino = 0;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+
+	qname.len = sizeof(__name);
+	if (ncp_vol2io(NCP_SERVER(inode), __name, &qname.len,
+			entry->i.entryName, entry->i.nameLen,
+			!ncp_preserve_entry_case(inode, entry->i.NSCreator)))
+		return 1; /* I'm not sure */
+
+	qname.name = __name;
+	qname.hash = full_name_hash(qname.name, qname.len);
+
+	if (dentry->d_op && dentry->d_op->d_hash)
+		if (dentry->d_op->d_hash(dentry, &qname) != 0)
+			goto end_advance;
+
+	newdent = d_lookup(dentry, &qname);
+
+	if (!newdent) {
+		newdent = d_alloc(dentry, &qname);
+		if (!newdent)
+			goto end_advance;
+	} else {
+		hashed = 1;
+		memcpy((char *) newdent->d_name.name, qname.name,
+							newdent->d_name.len);
+	}
+
+	if (!newdent->d_inode) {
+		entry->opened = 0;
+		entry->ino = iunique(inode->i_sb, 2);
+		newino = ncp_iget(inode->i_sb, entry);
+		if (newino) {
+			newdent->d_op = &ncp_dentry_operations;
+			d_instantiate(newdent, newino);
+			if (!hashed)
+				d_rehash(newdent);
+		}
+	} else
+		ncp_update_inode2(newdent->d_inode, entry);
+
+	if (newdent->d_inode) {
+		ino = newdent->d_inode->i_ino;
+		newdent->d_fsdata = (void *) ctl.fpos;
+		ncp_new_dentry(newdent);
+	}
+
+	if (ctl.idx >= NCP_DIRCACHE_SIZE) {
+		if (ctl.page) {
+			kunmap(ctl.page);
+			SetPageUptodate(ctl.page);
+			unlock_page(ctl.page);
+			page_cache_release(ctl.page);
+		}
+		ctl.cache = NULL;
+		ctl.idx  -= NCP_DIRCACHE_SIZE;
+		ctl.ofs  += 1;
+		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
+		if (ctl.page)
+			ctl.cache = kmap(ctl.page);
+	}
+	if (ctl.cache) {
+		ctl.cache->dentry[ctl.idx] = newdent;
+		valid = 1;
+	}
+	dput(newdent);
+end_advance:
+	if (!valid)
+		ctl.valid = 0;
+	if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
+		if (!ino)
+			ino = find_inode_number(dentry, &qname);
+		if (!ino)
+			ino = iunique(inode->i_sb, 2);
+		ctl.filled = filldir(dirent, qname.name, qname.len,
+				     filp->f_pos, ino, DT_UNKNOWN);
+		if (!ctl.filled)
+			filp->f_pos += 1;
+	}
+	ctl.fpos += 1;
+	ctl.idx  += 1;
+	*ctrl = ctl;
+	return (ctl.valid || !ctl.filled);
+}
+
+static void
+ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
+			struct ncp_cache_control *ctl)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct ncp_server *server = NCP_SERVER(inode);
+	struct ncp_volume_info info;
+	struct ncp_entry_info entry;
+	int i;
+
+	DPRINTK("ncp_read_volume_list: pos=%ld\n",
+			(unsigned long) filp->f_pos);
+
+	for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
+
+		if (ncp_get_volume_info_with_number(server, i, &info) != 0)
+			return;
+		if (!strlen(info.volume_name))
+			continue;
+
+		DPRINTK("ncp_read_volume_list: found vol: %s\n",
+			info.volume_name);
+
+		if (ncp_lookup_volume(server, info.volume_name,
+					&entry.i)) {
+			DPRINTK("ncpfs: could not lookup vol %s\n",
+				info.volume_name);
+			continue;
+		}
+		entry.volume = entry.i.volNumber;
+		if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry))
+			return;
+	}
+}
+
+static void
+ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
+						struct ncp_cache_control *ctl)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *dir = dentry->d_inode;
+	struct ncp_server *server = NCP_SERVER(dir);
+	struct nw_search_sequence seq;
+	struct ncp_entry_info entry;
+	int err;
+	void* buf;
+	int more;
+	size_t bufsize;
+
+	DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		(unsigned long) filp->f_pos);
+	PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
+		dentry->d_name.name, NCP_FINFO(dir)->volNumber,
+		NCP_FINFO(dir)->dirEntNum);
+
+	err = ncp_initialize_search(server, dir, &seq);
+	if (err) {
+		DPRINTK("ncp_do_readdir: init failed, err=%d\n", err);
+		return;
+	}
+#ifdef USE_OLD_SLOW_DIRECTORY_LISTING
+	for (;;) {
+		err = ncp_search_for_file_or_subdir(server, &seq, &entry.i);
+		if (err) {
+			DPRINTK("ncp_do_readdir: search failed, err=%d\n", err);
+			break;
+		}
+		entry.volume = entry.i.volNumber;
+		if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry))
+			break;
+	}
+#else
+	/* We MUST NOT use server->buffer_size handshaked with server if we are
+	   using UDP, as for UDP server uses max. buffer size determined by
+	   MTU, and for TCP server uses hardwired value 65KB (== 66560 bytes). 
+	   So we use 128KB, just to be sure, as there is no way how to know
+	   this value in advance. */
+	bufsize = 131072;
+	buf = vmalloc(bufsize);
+	if (!buf)
+		return;
+	do {
+		int cnt;
+		char* rpl;
+		size_t rpls;
+
+		err = ncp_search_for_fileset(server, &seq, &more, &cnt, buf, bufsize, &rpl, &rpls);
+		if (err)		/* Error */
+			break;
+		if (!cnt)		/* prevent endless loop */
+			break;
+		while (cnt--) {
+			size_t onerpl;
+			
+			if (rpls < offsetof(struct nw_info_struct, entryName))
+				break;	/* short packet */
+			ncp_extract_file_info(rpl, &entry.i);
+			onerpl = offsetof(struct nw_info_struct, entryName) + entry.i.nameLen;
+			if (rpls < onerpl)
+				break;	/* short packet */
+			(void)ncp_obtain_nfs_info(server, &entry.i);
+			rpl += onerpl;
+			rpls -= onerpl;
+			entry.volume = entry.i.volNumber;
+			if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry))
+				break;
+		}
+	} while (more);
+	vfree(buf);
+#endif
+	return;
+}
+
+int ncp_conn_logged_in(struct super_block *sb)
+{
+	struct ncp_server* server = NCP_SBP(sb);
+	int result;
+
+	if (ncp_single_volume(server)) {
+		int len;
+		struct dentry* dent;
+		__u32 volNumber;
+		__le32 dirEntNum;
+		__le32 DosDirNum;
+		__u8 __name[NCP_MAXPATHLEN + 1];
+
+		len = sizeof(__name);
+		result = ncp_io2vol(server, __name, &len, server->m.mounted_vol,
+				    strlen(server->m.mounted_vol), 1);
+		if (result)
+			goto out;
+		result = -ENOENT;
+		if (ncp_get_volume_root(server, __name, &volNumber, &dirEntNum, &DosDirNum)) {
+			PPRINTK("ncp_conn_logged_in: %s not found\n",
+				server->m.mounted_vol);
+			goto out;
+		}
+		dent = sb->s_root;
+		if (dent) {
+			struct inode* ino = dent->d_inode;
+			if (ino) {
+				NCP_FINFO(ino)->volNumber = volNumber;
+				NCP_FINFO(ino)->dirEntNum = dirEntNum;
+				NCP_FINFO(ino)->DosDirNum = DosDirNum;
+			} else {
+				DPRINTK("ncpfs: sb->s_root->d_inode == NULL!\n");
+			}
+		} else {
+			DPRINTK("ncpfs: sb->s_root == NULL!\n");
+		}
+	}
+	result = 0;
+
+out:
+	return result;
+}
+
+static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct ncp_server *server = NCP_SERVER(dir);
+	struct inode *inode = NULL;
+	struct ncp_entry_info finfo;
+	int error, res, len;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+
+	lock_kernel();
+	error = -EIO;
+	if (!ncp_conn_valid(server))
+		goto finished;
+
+	PPRINTK("ncp_lookup: server lookup for %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	len = sizeof(__name);
+	if (ncp_is_server_root(dir)) {
+		res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+				 dentry->d_name.len, 1);
+		if (!res)
+			res = ncp_lookup_volume(server, __name, &(finfo.i));
+	} else {
+		res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+				 dentry->d_name.len, !ncp_preserve_case(dir));
+		if (!res)
+			res = ncp_obtain_info(server, dir, __name, &(finfo.i));
+	}
+	PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
+		dentry->d_parent->d_name.name, __name, res);
+	/*
+	 * If we didn't find an entry, make a negative dentry.
+	 */
+	if (res)
+		goto add_entry;
+
+	/*
+	 * Create an inode for the entry.
+	 */
+	finfo.opened = 0;
+	finfo.ino = iunique(dir->i_sb, 2);
+	finfo.volume = finfo.i.volNumber;
+	error = -EACCES;
+	inode = ncp_iget(dir->i_sb, &finfo);
+
+	if (inode) {
+		ncp_new_dentry(dentry);
+add_entry:
+		dentry->d_op = &ncp_dentry_operations;
+		d_add(dentry, inode);
+		error = 0;
+	}
+
+finished:
+	PPRINTK("ncp_lookup: result=%d\n", error);
+	unlock_kernel();
+	return ERR_PTR(error);
+}
+
+/*
+ * This code is common to create, mkdir, and mknod.
+ */
+static int ncp_instantiate(struct inode *dir, struct dentry *dentry,
+			struct ncp_entry_info *finfo)
+{
+	struct inode *inode;
+	int error = -EINVAL;
+
+	finfo->ino = iunique(dir->i_sb, 2);
+	inode = ncp_iget(dir->i_sb, finfo);
+	if (!inode)
+		goto out_close;
+	d_instantiate(dentry,inode);
+	error = 0;
+out:
+	return error;
+
+out_close:
+	PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+	ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
+	goto out;
+}
+
+int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
+		   dev_t rdev, __le32 attributes)
+{
+	struct ncp_server *server = NCP_SERVER(dir);
+	struct ncp_entry_info finfo;
+	int error, result, len;
+	int opmode;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+	
+	PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+
+	error = -EIO;
+	lock_kernel();
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	ncp_age_dentry(server, dentry);
+	len = sizeof(__name);
+	error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+			   dentry->d_name.len, !ncp_preserve_case(dir));
+	if (error)
+		goto out;
+
+	error = -EACCES;
+	
+	if (S_ISREG(mode) && 
+	    (server->m.flags & NCP_MOUNT_EXTRAS) && 
+	    (mode & S_IXUGO))
+		attributes |= aSYSTEM | aSHARED;
+	
+	result = ncp_open_create_file_or_subdir(server, dir, __name,
+				OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
+				attributes, AR_READ | AR_WRITE, &finfo);
+	opmode = O_RDWR;
+	if (result) {
+		result = ncp_open_create_file_or_subdir(server, dir, __name,
+				OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
+				attributes, AR_WRITE, &finfo);
+		if (result) {
+			if (result == 0x87)
+				error = -ENAMETOOLONG;
+			DPRINTK("ncp_create: %s/%s failed\n",
+				dentry->d_parent->d_name.name, dentry->d_name.name);
+			goto out;
+		}
+		opmode = O_WRONLY;
+	}
+	finfo.access = opmode;
+	if (ncp_is_nfs_extras(server, finfo.volume)) {
+		finfo.i.nfs.mode = mode;
+		finfo.i.nfs.rdev = new_encode_dev(rdev);
+		if (ncp_modify_nfs_info(server, finfo.volume,
+					finfo.i.dirEntNum,
+					mode, new_encode_dev(rdev)) != 0)
+			goto out;
+	}
+
+	error = ncp_instantiate(dir, dentry, &finfo);
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int ncp_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	return ncp_create_new(dir, dentry, mode, 0, 0);
+}
+
+static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct ncp_entry_info finfo;
+	struct ncp_server *server = NCP_SERVER(dir);
+	int error, len;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+
+	DPRINTK("ncp_mkdir: making %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	error = -EIO;
+	lock_kernel();
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	ncp_age_dentry(server, dentry);
+	len = sizeof(__name);
+	error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+			   dentry->d_name.len, !ncp_preserve_case(dir));
+	if (error)
+		goto out;
+
+	error = -EACCES;
+	if (ncp_open_create_file_or_subdir(server, dir, __name,
+					   OC_MODE_CREATE, aDIR,
+					   cpu_to_le16(0xffff),
+					   &finfo) == 0)
+	{
+		if (ncp_is_nfs_extras(server, finfo.volume)) {
+			mode |= S_IFDIR;
+			finfo.i.nfs.mode = mode;
+			if (ncp_modify_nfs_info(server,
+						finfo.volume,
+						finfo.i.dirEntNum,
+						mode, 0) != 0)
+				goto out;
+		}
+		error = ncp_instantiate(dir, dentry, &finfo);
+	}
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct ncp_server *server = NCP_SERVER(dir);
+	int error, result, len;
+	__u8 __name[NCP_MAXPATHLEN + 1];
+
+	DPRINTK("ncp_rmdir: removing %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	error = -EIO;
+	lock_kernel();
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	error = -EBUSY;
+	if (!d_unhashed(dentry))
+		goto out;
+
+	len = sizeof(__name);
+	error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+			   dentry->d_name.len, !ncp_preserve_case(dir));
+	if (error)
+		goto out;
+
+	result = ncp_del_file_or_subdir(server, dir, __name);
+	switch (result) {
+		case 0x00:
+			error = 0;
+			break;
+		case 0x85:	/* unauthorized to delete file */
+		case 0x8A:	/* unauthorized to delete file */
+			error = -EACCES;
+			break;
+		case 0x8F:
+		case 0x90:	/* read only */
+			error = -EPERM;
+			break;
+		case 0x9F:	/* in use by another client */
+			error = -EBUSY;
+			break;
+		case 0xA0:	/* directory not empty */
+			error = -ENOTEMPTY;
+			break;
+		case 0xFF:	/* someone deleted file */
+			error = -ENOENT;
+			break;
+		default:
+			error = -EACCES;
+			break;
+       	}
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int ncp_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct ncp_server *server;
+	int error;
+
+	lock_kernel();
+	server = NCP_SERVER(dir);
+	DPRINTK("ncp_unlink: unlinking %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+	
+	error = -EIO;
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	/*
+	 * Check whether to close the file ...
+	 */
+	if (inode) {
+		PPRINTK("ncp_unlink: closing file\n");
+		ncp_make_closed(inode);
+	}
+
+	error = ncp_del_file_or_subdir2(server, dentry);
+#ifdef CONFIG_NCPFS_STRONG
+	/* 9C is Invalid path.. It should be 8F, 90 - read only, but
+	   it is not :-( */
+	if ((error == 0x9C || error == 0x90) && server->m.flags & NCP_MOUNT_STRONG) { /* R/O */
+		error = ncp_force_unlink(dir, dentry);
+	}
+#endif
+	switch (error) {
+		case 0x00:
+			DPRINTK("ncp: removed %s/%s\n",
+				dentry->d_parent->d_name.name, dentry->d_name.name);
+			break;
+		case 0x85:
+		case 0x8A:
+			error = -EACCES;
+			break;
+		case 0x8D:	/* some files in use */
+		case 0x8E:	/* all files in use */
+			error = -EBUSY;
+			break;
+		case 0x8F:	/* some read only */
+		case 0x90:	/* all read only */
+		case 0x9C:	/* !!! returned when in-use or read-only by NW4 */
+			error = -EPERM;
+			break;
+		case 0xFF:
+			error = -ENOENT;
+			break;
+		default:
+			error = -EACCES;
+			break;
+	}
+		
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
+		      struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct ncp_server *server = NCP_SERVER(old_dir);
+	int error;
+	int old_len, new_len;
+	__u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
+
+	DPRINTK("ncp_rename: %s/%s to %s/%s\n",
+		old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+		new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+
+	error = -EIO;
+	lock_kernel();
+	if (!ncp_conn_valid(server))
+		goto out;
+
+	ncp_age_dentry(server, old_dentry);
+	ncp_age_dentry(server, new_dentry);
+
+	old_len = sizeof(__old_name);
+	error = ncp_io2vol(server, __old_name, &old_len,
+			   old_dentry->d_name.name, old_dentry->d_name.len,
+			   !ncp_preserve_case(old_dir));
+	if (error)
+		goto out;
+
+	new_len = sizeof(__new_name);
+	error = ncp_io2vol(server, __new_name, &new_len,
+			   new_dentry->d_name.name, new_dentry->d_name.len,
+			   !ncp_preserve_case(new_dir));
+	if (error)
+		goto out;
+
+	error = ncp_ren_or_mov_file_or_subdir(server, old_dir, __old_name,
+						      new_dir, __new_name);
+#ifdef CONFIG_NCPFS_STRONG
+	if ((error == 0x90 || error == 0x8B || error == -EACCES) &&
+			server->m.flags & NCP_MOUNT_STRONG) {	/* RO */
+		error = ncp_force_rename(old_dir, old_dentry, __old_name,
+					 new_dir, new_dentry, __new_name);
+	}
+#endif
+	switch (error) {
+		case 0x00:
+               	        DPRINTK("ncp renamed %s -> %s.\n",
+                                old_dentry->d_name.name,new_dentry->d_name.name);
+			break;
+		case 0x9E:
+			error = -ENAMETOOLONG;
+			break;
+		case 0xFF:
+			error = -ENOENT;
+			break;
+		default:
+			error = -EACCES;
+			break;
+	}
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int ncp_mknod(struct inode * dir, struct dentry *dentry,
+		     int mode, dev_t rdev)
+{
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+	if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
+		DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%o\n", mode);
+		return ncp_create_new(dir, dentry, mode, rdev, 0);
+	}
+	return -EPERM; /* Strange, but true */
+}
+
+/* The following routines are taken directly from msdos-fs */
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+
+static int day_n[] =
+{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0};
+/* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
+
+
+extern struct timezone sys_tz;
+
+static int utc2local(int time)
+{
+	return time - sys_tz.tz_minuteswest * 60;
+}
+
+static int local2utc(int time)
+{
+	return time + sys_tz.tz_minuteswest * 60;
+}
+
+/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
+int
+ncp_date_dos2unix(__le16 t, __le16 d)
+{
+	unsigned short time = le16_to_cpu(t), date = le16_to_cpu(d);
+	int month, year, secs;
+
+	/* first subtract and mask after that... Otherwise, if
+	   date == 0, bad things happen */
+	month = ((date >> 5) - 1) & 15;
+	year = date >> 9;
+	secs = (time & 31) * 2 + 60 * ((time >> 5) & 63) + (time >> 11) * 3600 +
+		86400 * ((date & 31) - 1 + day_n[month] + (year / 4) + 
+		year * 365 - ((year & 3) == 0 && month < 2 ? 1 : 0) + 3653);
+	/* days since 1.1.70 plus 80's leap day */
+	return local2utc(secs);
+}
+
+
+/* Convert linear UNIX date to a MS-DOS time/date pair. */
+void
+ncp_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
+{
+	int day, year, nl_day, month;
+
+	unix_date = utc2local(unix_date);
+	*time = cpu_to_le16(
+		(unix_date % 60) / 2 + (((unix_date / 60) % 60) << 5) +
+		(((unix_date / 3600) % 24) << 11));
+	day = unix_date / 86400 - 3652;
+	year = day / 365;
+	if ((year + 3) / 4 + 365 * year > day)
+		year--;
+	day -= (year + 3) / 4 + 365 * year;
+	if (day == 59 && !(year & 3)) {
+		nl_day = day;
+		month = 2;
+	} else {
+		nl_day = (year & 3) || day <= 59 ? day : day - 1;
+		for (month = 0; month < 12; month++)
+			if (day_n[month] > nl_day)
+				break;
+	}
+	*date = cpu_to_le16(nl_day - day_n[month - 1] + 1 + (month << 5) + (year << 9));
+}
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
new file mode 100644
index 0000000..4947d9b
--- /dev/null
+++ b/fs/ncpfs/file.c
@@ -0,0 +1,300 @@
+/*
+ *  file.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+
+#include <linux/ncp_fs.h>
+#include "ncplib_kernel.h"
+
+static int ncp_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	return 0;
+}
+
+/*
+ * Open a file with the specified read/write mode.
+ */
+int ncp_make_open(struct inode *inode, int right)
+{
+	int error;
+	int access;
+
+	error = -EINVAL;
+	if (!inode) {
+		printk(KERN_ERR "ncp_make_open: got NULL inode\n");
+		goto out;
+	}
+
+	DPRINTK("ncp_make_open: opened=%d, volume # %u, dir entry # %u\n",
+		atomic_read(&NCP_FINFO(inode)->opened), 
+		NCP_FINFO(inode)->volNumber, 
+		NCP_FINFO(inode)->dirEntNum);
+	error = -EACCES;
+	down(&NCP_FINFO(inode)->open_sem);
+	if (!atomic_read(&NCP_FINFO(inode)->opened)) {
+		struct ncp_entry_info finfo;
+		int result;
+
+		/* tries max. rights */
+		finfo.access = O_RDWR;
+		result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
+					inode, NULL, OC_MODE_OPEN,
+					0, AR_READ | AR_WRITE, &finfo);
+		if (!result)
+			goto update;
+		/* RDWR did not succeeded, try readonly or writeonly as requested */
+		switch (right) {
+			case O_RDONLY:
+				finfo.access = O_RDONLY;
+				result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
+					inode, NULL, OC_MODE_OPEN,
+					0, AR_READ, &finfo);
+				break;
+			case O_WRONLY:
+				finfo.access = O_WRONLY;
+				result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
+					inode, NULL, OC_MODE_OPEN,
+					0, AR_WRITE, &finfo);
+				break;
+		}
+		if (result) {
+			PPRINTK("ncp_make_open: failed, result=%d\n", result);
+			goto out_unlock;
+		}
+		/*
+		 * Update the inode information.
+		 */
+	update:
+		ncp_update_inode(inode, &finfo);
+		atomic_set(&NCP_FINFO(inode)->opened, 1);
+	}
+
+	access = NCP_FINFO(inode)->access;
+	PPRINTK("ncp_make_open: file open, access=%x\n", access);
+	if (access == right || access == O_RDWR) {
+		atomic_inc(&NCP_FINFO(inode)->opened);
+		error = 0;
+	}
+
+out_unlock:
+	up(&NCP_FINFO(inode)->open_sem);
+out:
+	return error;
+}
+
+static ssize_t
+ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	size_t already_read = 0;
+	off_t pos;
+	size_t bufsize;
+	int error;
+	void* freepage;
+	size_t freelen;
+
+	DPRINTK("ncp_file_read: enter %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	if (!ncp_conn_valid(NCP_SERVER(inode)))
+		return -EIO;
+
+	pos = *ppos;
+
+	if ((ssize_t) count < 0) {
+		return -EINVAL;
+	}
+	if (!count)
+		return 0;
+	if (pos > inode->i_sb->s_maxbytes)
+		return 0;
+	if (pos + count > inode->i_sb->s_maxbytes) {
+		count = inode->i_sb->s_maxbytes - pos;
+	}
+
+	error = ncp_make_open(inode, O_RDONLY);
+	if (error) {
+		DPRINTK(KERN_ERR "ncp_file_read: open failed, error=%d\n", error);
+		return error;
+	}
+
+	bufsize = NCP_SERVER(inode)->buffer_size;
+
+	error = -EIO;
+	freelen = ncp_read_bounce_size(bufsize);
+	freepage = vmalloc(freelen);
+	if (!freepage)
+		goto outrel;
+	error = 0;
+	/* First read in as much as possible for each bufsize. */
+	while (already_read < count) {
+		int read_this_time;
+		size_t to_read = min_t(unsigned int,
+				     bufsize - (pos % bufsize),
+				     count - already_read);
+
+		error = ncp_read_bounce(NCP_SERVER(inode),
+			 	NCP_FINFO(inode)->file_handle,
+				pos, to_read, buf, &read_this_time, 
+				freepage, freelen);
+		if (error) {
+			error = -EIO;	/* NW errno -> Linux errno */
+			break;
+		}
+		pos += read_this_time;
+		buf += read_this_time;
+		already_read += read_this_time;
+
+		if (read_this_time != to_read) {
+			break;
+		}
+	}
+	vfree(freepage);
+
+	*ppos = pos;
+
+	file_accessed(file);
+
+	DPRINTK("ncp_file_read: exit %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+outrel:
+	ncp_inode_close(inode);		
+	return already_read ? already_read : error;
+}
+
+static ssize_t
+ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	size_t already_written = 0;
+	off_t pos;
+	size_t bufsize;
+	int errno;
+	void* bouncebuffer;
+
+	DPRINTK("ncp_file_write: enter %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+	if (!ncp_conn_valid(NCP_SERVER(inode)))
+		return -EIO;
+	if ((ssize_t) count < 0)
+		return -EINVAL;
+	pos = *ppos;
+	if (file->f_flags & O_APPEND) {
+		pos = inode->i_size;
+	}
+
+	if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
+		if (pos >= MAX_NON_LFS) {
+			send_sig(SIGXFSZ, current, 0);
+			return -EFBIG;
+		}
+		if (count > MAX_NON_LFS - (u32)pos) {
+			count = MAX_NON_LFS - (u32)pos;
+		}
+	}
+	if (pos >= inode->i_sb->s_maxbytes) {
+		if (count || pos > inode->i_sb->s_maxbytes) {
+			send_sig(SIGXFSZ, current, 0);
+			return -EFBIG;
+		}
+	}
+	if (pos + count > inode->i_sb->s_maxbytes) {
+		count = inode->i_sb->s_maxbytes - pos;
+	}
+	
+	if (!count)
+		return 0;
+	errno = ncp_make_open(inode, O_WRONLY);
+	if (errno) {
+		DPRINTK(KERN_ERR "ncp_file_write: open failed, error=%d\n", errno);
+		return errno;
+	}
+	bufsize = NCP_SERVER(inode)->buffer_size;
+
+	already_written = 0;
+
+	bouncebuffer = vmalloc(bufsize);
+	if (!bouncebuffer) {
+		errno = -EIO;	/* -ENOMEM */
+		goto outrel;
+	}
+	while (already_written < count) {
+		int written_this_time;
+		size_t to_write = min_t(unsigned int,
+				      bufsize - (pos % bufsize),
+				      count - already_written);
+
+		if (copy_from_user(bouncebuffer, buf, to_write)) {
+			errno = -EFAULT;
+			break;
+		}
+		if (ncp_write_kernel(NCP_SERVER(inode), 
+		    NCP_FINFO(inode)->file_handle,
+		    pos, to_write, bouncebuffer, &written_this_time) != 0) {
+			errno = -EIO;
+			break;
+		}
+		pos += written_this_time;
+		buf += written_this_time;
+		already_written += written_this_time;
+
+		if (written_this_time != to_write) {
+			break;
+		}
+	}
+	vfree(bouncebuffer);
+
+	inode_update_time(inode, 1);
+
+	*ppos = pos;
+
+	if (pos > inode->i_size) {
+		inode->i_size = pos;
+	}
+	DPRINTK("ncp_file_write: exit %s/%s\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+outrel:
+	ncp_inode_close(inode);		
+	return already_written ? already_written : errno;
+}
+
+static int ncp_release(struct inode *inode, struct file *file) {
+	if (ncp_make_closed(inode)) {
+		DPRINTK("ncp_release: failed to close\n");
+	}
+	return 0;
+}
+
+struct file_operations ncp_file_operations =
+{
+	.llseek		= remote_llseek,
+	.read		= ncp_file_read,
+	.write		= ncp_file_write,
+	.ioctl		= ncp_ioctl,
+	.mmap		= ncp_mmap,
+	.release	= ncp_release,
+	.fsync		= ncp_fsync,
+};
+
+struct inode_operations ncp_file_inode_operations =
+{
+	.setattr	= ncp_notify_change,
+};
diff --git a/fs/ncpfs/getopt.c b/fs/ncpfs/getopt.c
new file mode 100644
index 0000000..335b003
--- /dev/null
+++ b/fs/ncpfs/getopt.c
@@ -0,0 +1,75 @@
+/*
+ * getopt.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/errno.h>
+
+#include "getopt.h"
+
+/**
+ *	ncp_getopt - option parser
+ *	@caller: name of the caller, for error messages
+ *	@options: the options string
+ *	@opts: an array of &struct option entries controlling parser operations
+ *	@optopt: output; will contain the current option
+ *	@optarg: output; will contain the value (if one exists)
+ *	@flag: output; may be NULL; should point to a long for or'ing flags
+ *	@value: output; may be NULL; will be overwritten with the integer value
+ *		of the current argument.
+ *
+ *	Helper to parse options on the format used by mount ("a=b,c=d,e,f").
+ *	Returns opts->val if a matching entry in the 'opts' array is found,
+ *	0 when no more tokens are found, -1 if an error is encountered.
+ */
+int ncp_getopt(const char *caller, char **options, const struct ncp_option *opts,
+	       char **optopt, char **optarg, unsigned long *value)
+{
+	char *token;
+	char *val;
+
+	do {
+		if ((token = strsep(options, ",")) == NULL)
+			return 0;
+	} while (*token == '\0');
+	if (optopt)
+		*optopt = token;
+
+	if ((val = strchr (token, '=')) != NULL) {
+		*val++ = 0;
+	}
+	*optarg = val;
+	for (; opts->name; opts++) {
+		if (!strcmp(opts->name, token)) {
+			if (!val) {
+				if (opts->has_arg & OPT_NOPARAM) {
+					return opts->val;
+				}
+				printk(KERN_INFO "%s: the %s option requires an argument\n",
+				       caller, token);
+				return -EINVAL;
+			}
+			if (opts->has_arg & OPT_INT) {
+				char* v;
+
+				*value = simple_strtoul(val, &v, 0);
+				if (!*v) {
+					return opts->val;
+				}
+				printk(KERN_INFO "%s: invalid numeric value in %s=%s\n",
+					caller, token, val);
+				return -EDOM;
+			}
+			if (opts->has_arg & OPT_STRING) {
+				return opts->val;
+			}
+			printk(KERN_INFO "%s: unexpected argument %s to the %s option\n",
+				caller, val, token);
+			return -EINVAL;
+		}
+	}
+	printk(KERN_INFO "%s: Unrecognized mount option %s\n", caller, token);
+	return -EOPNOTSUPP;
+}
diff --git a/fs/ncpfs/getopt.h b/fs/ncpfs/getopt.h
new file mode 100644
index 0000000..cccc007
--- /dev/null
+++ b/fs/ncpfs/getopt.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_GETOPT_H
+#define _LINUX_GETOPT_H
+
+#define OPT_NOPARAM	1
+#define OPT_INT		2
+#define OPT_STRING	4
+struct ncp_option {
+	const char *name;
+	unsigned int has_arg;
+	int val;
+};
+
+extern int ncp_getopt(const char *caller, char **options, const struct ncp_option *opts,
+		      char **optopt, char **optarg, unsigned long *value);
+
+#endif /* _LINUX_GETOPT_H */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
new file mode 100644
index 0000000..44795d2
--- /dev/null
+++ b/fs/ncpfs/inode.c
@@ -0,0 +1,1012 @@
+/*
+ *  inode.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified for big endian by J.F. Chadima and David S. Miller
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *  Modified 1998 Wolfram Pienkoss for NLS
+ *  Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/vfs.h>
+
+#include <linux/ncp_fs.h>
+
+#include <net/sock.h>
+
+#include "ncplib_kernel.h"
+#include "getopt.h"
+
+static void ncp_delete_inode(struct inode *);
+static void ncp_put_super(struct super_block *);
+static int  ncp_statfs(struct super_block *, struct kstatfs *);
+
+static kmem_cache_t * ncp_inode_cachep;
+
+static struct inode *ncp_alloc_inode(struct super_block *sb)
+{
+	struct ncp_inode_info *ei;
+	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void ncp_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		init_MUTEX(&ei->open_sem);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+ 
+static int init_inodecache(void)
+{
+	ncp_inode_cachep = kmem_cache_create("ncp_inode_cache",
+					     sizeof(struct ncp_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (ncp_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(ncp_inode_cachep))
+		printk(KERN_INFO "ncp_inode_cache: not all structures were freed\n");
+}
+
+static int ncp_remount(struct super_block *sb, int *flags, char* data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+static struct super_operations ncp_sops =
+{
+	.alloc_inode	= ncp_alloc_inode,
+	.destroy_inode	= ncp_destroy_inode,
+	.drop_inode	= generic_delete_inode,
+	.delete_inode	= ncp_delete_inode,
+	.put_super	= ncp_put_super,
+	.statfs		= ncp_statfs,
+	.remount_fs	= ncp_remount,
+};
+
+extern struct dentry_operations ncp_root_dentry_operations;
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern struct address_space_operations ncp_symlink_aops;
+extern int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
+/*
+ * Fill in the ncpfs-specific information in the inode.
+ */
+static void ncp_update_dirent(struct inode *inode, struct ncp_entry_info *nwinfo)
+{
+	NCP_FINFO(inode)->DosDirNum = nwinfo->i.DosDirNum;
+	NCP_FINFO(inode)->dirEntNum = nwinfo->i.dirEntNum;
+	NCP_FINFO(inode)->volNumber = nwinfo->volume;
+}
+
+void ncp_update_inode(struct inode *inode, struct ncp_entry_info *nwinfo)
+{
+	ncp_update_dirent(inode, nwinfo);
+	NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
+	NCP_FINFO(inode)->access = nwinfo->access;
+	memcpy(NCP_FINFO(inode)->file_handle, nwinfo->file_handle,
+			sizeof(nwinfo->file_handle));
+	DPRINTK("ncp_update_inode: updated %s, volnum=%d, dirent=%u\n",
+		nwinfo->i.entryName, NCP_FINFO(inode)->volNumber,
+		NCP_FINFO(inode)->dirEntNum);
+}
+
+static void ncp_update_dates(struct inode *inode, struct nw_info_struct *nwi)
+{
+	/* NFS namespace mode overrides others if it's set. */
+	DPRINTK(KERN_DEBUG "ncp_update_dates_and_mode: (%s) nfs.mode=0%o\n",
+		nwi->entryName, nwi->nfs.mode);
+	if (nwi->nfs.mode) {
+		/* XXX Security? */
+		inode->i_mode = nwi->nfs.mode;
+	}
+
+	inode->i_blocks = (inode->i_size + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT;
+
+	inode->i_mtime.tv_sec = ncp_date_dos2unix(nwi->modifyTime, nwi->modifyDate);
+	inode->i_ctime.tv_sec = ncp_date_dos2unix(nwi->creationTime, nwi->creationDate);
+	inode->i_atime.tv_sec = ncp_date_dos2unix(0, nwi->lastAccessDate);
+	inode->i_atime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+}
+
+static void ncp_update_attrs(struct inode *inode, struct ncp_entry_info *nwinfo)
+{
+	struct nw_info_struct *nwi = &nwinfo->i;
+	struct ncp_server *server = NCP_SERVER(inode);
+
+	if (nwi->attributes & aDIR) {
+		inode->i_mode = server->m.dir_mode;
+		/* for directories dataStreamSize seems to be some
+		   Object ID ??? */
+		inode->i_size = NCP_BLOCK_SIZE;
+	} else {
+		inode->i_mode = server->m.file_mode;
+		inode->i_size = le32_to_cpu(nwi->dataStreamSize);
+#ifdef CONFIG_NCPFS_EXTRAS
+		if ((server->m.flags & (NCP_MOUNT_EXTRAS|NCP_MOUNT_SYMLINKS)) 
+		 && (nwi->attributes & aSHARED)) {
+			switch (nwi->attributes & (aHIDDEN|aSYSTEM)) {
+				case aHIDDEN:
+					if (server->m.flags & NCP_MOUNT_SYMLINKS) {
+						if (/* (inode->i_size >= NCP_MIN_SYMLINK_SIZE)
+						 && */ (inode->i_size <= NCP_MAX_SYMLINK_SIZE)) {
+							inode->i_mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
+							NCP_FINFO(inode)->flags |= NCPI_KLUDGE_SYMLINK;
+							break;
+						}
+					}
+					/* FALLTHROUGH */
+				case 0:
+					if (server->m.flags & NCP_MOUNT_EXTRAS)
+						inode->i_mode |= S_IRUGO;
+					break;
+				case aSYSTEM:
+					if (server->m.flags & NCP_MOUNT_EXTRAS)
+						inode->i_mode |= (inode->i_mode >> 2) & S_IXUGO;
+					break;
+				/* case aSYSTEM|aHIDDEN: */
+				default:
+					/* reserved combination */
+					break;
+			}
+		}
+#endif
+	}
+	if (nwi->attributes & aRONLY) inode->i_mode &= ~S_IWUGO;
+}
+
+void ncp_update_inode2(struct inode* inode, struct ncp_entry_info *nwinfo)
+{
+	NCP_FINFO(inode)->flags = 0;
+	if (!atomic_read(&NCP_FINFO(inode)->opened)) {
+		NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
+		ncp_update_attrs(inode, nwinfo);
+	}
+
+	ncp_update_dates(inode, &nwinfo->i);
+	ncp_update_dirent(inode, nwinfo);
+}
+
+/*
+ * Fill in the inode based on the ncp_entry_info structure.
+ */
+static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
+{
+	struct ncp_server *server = NCP_SERVER(inode);
+
+	NCP_FINFO(inode)->flags = 0;
+	
+	ncp_update_attrs(inode, nwinfo);
+
+	DDPRINTK("ncp_read_inode: inode->i_mode = %u\n", inode->i_mode);
+
+	inode->i_nlink = 1;
+	inode->i_uid = server->m.uid;
+	inode->i_gid = server->m.gid;
+	inode->i_blksize = NCP_BLOCK_SIZE;
+
+	ncp_update_dates(inode, &nwinfo->i);
+	ncp_update_inode(inode, nwinfo);
+}
+
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+static struct inode_operations ncp_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.setattr	= ncp_notify_change,
+};
+#endif
+
+/*
+ * Get a new inode.
+ */
+struct inode * 
+ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
+{
+	struct inode *inode;
+
+	if (info == NULL) {
+		printk(KERN_ERR "ncp_iget: info is NULL\n");
+		return NULL;
+	}
+
+	inode = new_inode(sb);
+	if (inode) {
+		atomic_set(&NCP_FINFO(inode)->opened, info->opened);
+
+		inode->i_ino = info->ino;
+		ncp_set_attr(inode, info);
+		if (S_ISREG(inode->i_mode)) {
+			inode->i_op = &ncp_file_inode_operations;
+			inode->i_fop = &ncp_file_operations;
+		} else if (S_ISDIR(inode->i_mode)) {
+			inode->i_op = &ncp_dir_inode_operations;
+			inode->i_fop = &ncp_dir_operations;
+#ifdef CONFIG_NCPFS_NFS_NS
+		} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+			init_special_inode(inode, inode->i_mode,
+				new_decode_dev(info->i.nfs.rdev));
+#endif
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+		} else if (S_ISLNK(inode->i_mode)) {
+			inode->i_op = &ncp_symlink_inode_operations;
+			inode->i_data.a_ops = &ncp_symlink_aops;
+#endif
+		} else {
+			make_bad_inode(inode);
+		}
+		insert_inode_hash(inode);
+	} else
+		printk(KERN_ERR "ncp_iget: iget failed!\n");
+	return inode;
+}
+
+static void
+ncp_delete_inode(struct inode *inode)
+{
+	if (S_ISDIR(inode->i_mode)) {
+		DDPRINTK("ncp_delete_inode: put directory %ld\n", inode->i_ino);
+	}
+
+	if (ncp_make_closed(inode) != 0) {
+		/* We can't do anything but complain. */
+		printk(KERN_ERR "ncp_delete_inode: could not close\n");
+	}
+	clear_inode(inode);
+}
+
+static void ncp_stop_tasks(struct ncp_server *server) {
+	struct sock* sk = server->ncp_sock->sk;
+		
+	sk->sk_error_report = server->error_report;
+	sk->sk_data_ready   = server->data_ready;
+	sk->sk_write_space  = server->write_space;
+	del_timer_sync(&server->timeout_tm);
+	flush_scheduled_work();
+}
+
+static const struct ncp_option ncp_opts[] = {
+	{ "uid",	OPT_INT,	'u' },
+	{ "gid",	OPT_INT,	'g' },
+	{ "owner",	OPT_INT,	'o' },
+	{ "mode",	OPT_INT,	'm' },
+	{ "dirmode",	OPT_INT,	'd' },
+	{ "timeout",	OPT_INT,	't' },
+	{ "retry",	OPT_INT,	'r' },
+	{ "flags",	OPT_INT,	'f' },
+	{ "wdogpid",	OPT_INT,	'w' },
+	{ "ncpfd",	OPT_INT,	'n' },
+	{ "infofd",	OPT_INT,	'i' },	/* v5 */
+	{ "version",	OPT_INT,	'v' },
+	{ NULL,		0,		0 } };
+
+static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) {
+	int optval;
+	char *optarg;
+	unsigned long optint;
+	int version = 0;
+
+	data->flags = 0;
+	data->int_flags = 0;
+	data->mounted_uid = 0;
+	data->wdog_pid = -1;
+	data->ncp_fd = ~0;
+	data->time_out = 10;
+	data->retry_count = 20;
+	data->uid = 0;
+	data->gid = 0;
+	data->file_mode = 0600;
+	data->dir_mode = 0700;
+	data->info_fd = -1;
+	data->mounted_vol[0] = 0;
+	
+	while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) {
+		if (optval < 0)
+			return optval;
+		switch (optval) {
+			case 'u':
+				data->uid = optint;
+				break;
+			case 'g':
+				data->gid = optint;
+				break;
+			case 'o':
+				data->mounted_uid = optint;
+				break;
+			case 'm':
+				data->file_mode = optint;
+				break;
+			case 'd':
+				data->dir_mode = optint;
+				break;
+			case 't':
+				data->time_out = optint;
+				break;
+			case 'r':
+				data->retry_count = optint;
+				break;
+			case 'f':
+				data->flags = optint;
+				break;
+			case 'w':
+				data->wdog_pid = optint;
+				break;
+			case 'n':
+				data->ncp_fd = optint;
+				break;
+			case 'i':
+				data->info_fd = optint;
+				break;
+			case 'v':
+				if (optint < NCP_MOUNT_VERSION_V4) {
+					return -ECHRNG;
+				}
+				if (optint > NCP_MOUNT_VERSION_V5) {
+					return -ECHRNG;
+				}
+				version = optint;
+				break;
+			
+		}
+	}
+	return 0;
+}
+
+static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
+{
+	struct ncp_mount_data_kernel data;
+	struct ncp_server *server;
+	struct file *ncp_filp;
+	struct inode *root_inode;
+	struct inode *sock_inode;
+	struct socket *sock;
+	int error;
+	int default_bufsize;
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+	int options;
+#endif
+	struct ncp_entry_info finfo;
+
+	server = kmalloc(sizeof(struct ncp_server), GFP_KERNEL);
+	if (!server)
+		return -ENOMEM;
+	sb->s_fs_info = server;
+	memset(server, 0, sizeof(struct ncp_server));
+
+	error = -EFAULT;
+	if (raw_data == NULL)
+		goto out;
+	switch (*(int*)raw_data) {
+		case NCP_MOUNT_VERSION:
+			{
+				struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data;
+
+				data.flags = md->flags;
+				data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
+				data.mounted_uid = md->mounted_uid;
+				data.wdog_pid = md->wdog_pid;
+				data.ncp_fd = md->ncp_fd;
+				data.time_out = md->time_out;
+				data.retry_count = md->retry_count;
+				data.uid = md->uid;
+				data.gid = md->gid;
+				data.file_mode = md->file_mode;
+				data.dir_mode = md->dir_mode;
+				data.info_fd = -1;
+				memcpy(data.mounted_vol, md->mounted_vol,
+					NCP_VOLNAME_LEN+1);
+			}
+			break;
+		case NCP_MOUNT_VERSION_V4:
+			{
+				struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
+
+				data.flags = md->flags;
+				data.int_flags = 0;
+				data.mounted_uid = md->mounted_uid;
+				data.wdog_pid = md->wdog_pid;
+				data.ncp_fd = md->ncp_fd;
+				data.time_out = md->time_out;
+				data.retry_count = md->retry_count;
+				data.uid = md->uid;
+				data.gid = md->gid;
+				data.file_mode = md->file_mode;
+				data.dir_mode = md->dir_mode;
+				data.info_fd = -1;
+				data.mounted_vol[0] = 0;
+			}
+			break;
+		default:
+			error = -ECHRNG;
+			if (memcmp(raw_data, "vers", 4) == 0) {
+				error = ncp_parse_options(&data, raw_data);
+			}
+			if (error)
+				goto out;
+			break;
+	}
+	error = -EBADF;
+	ncp_filp = fget(data.ncp_fd);
+	if (!ncp_filp)
+		goto out;
+	error = -ENOTSOCK;
+	sock_inode = ncp_filp->f_dentry->d_inode;
+	if (!S_ISSOCK(sock_inode->i_mode))
+		goto out_fput;
+	sock = SOCKET_I(sock_inode);
+	if (!sock)
+		goto out_fput;
+		
+	if (sock->type == SOCK_STREAM)
+		default_bufsize = 0xF000;
+	else
+		default_bufsize = 1024;
+
+	sb->s_flags |= MS_NODIRATIME;	/* probably even noatime */
+	sb->s_maxbytes = 0xFFFFFFFFU;
+	sb->s_blocksize = 1024;	/* Eh...  Is this correct? */
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = NCP_SUPER_MAGIC;
+	sb->s_op = &ncp_sops;
+
+	server = NCP_SBP(sb);
+	memset(server, 0, sizeof(*server));
+
+	server->ncp_filp = ncp_filp;
+	server->ncp_sock = sock;
+	
+	if (data.info_fd != -1) {
+		struct socket *info_sock;
+
+		error = -EBADF;
+		server->info_filp = fget(data.info_fd);
+		if (!server->info_filp)
+			goto out_fput;
+		error = -ENOTSOCK;
+		sock_inode = server->info_filp->f_dentry->d_inode;
+		if (!S_ISSOCK(sock_inode->i_mode))
+			goto out_fput2;
+		info_sock = SOCKET_I(sock_inode);
+		if (!info_sock)
+			goto out_fput2;
+		error = -EBADFD;
+		if (info_sock->type != SOCK_STREAM)
+			goto out_fput2;
+		server->info_sock = info_sock;
+	}
+
+/*	server->lock = 0;	*/
+	init_MUTEX(&server->sem);
+	server->packet = NULL;
+/*	server->buffer_size = 0;	*/
+/*	server->conn_status = 0;	*/
+/*	server->root_dentry = NULL;	*/
+/*	server->root_setuped = 0;	*/
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+/*	server->sign_wanted = 0;	*/
+/*	server->sign_active = 0;	*/
+#endif
+	server->auth.auth_type = NCP_AUTH_NONE;
+/*	server->auth.object_name_len = 0;	*/
+/*	server->auth.object_name = NULL;	*/
+/*	server->auth.object_type = 0;		*/
+/*	server->priv.len = 0;			*/
+/*	server->priv.data = NULL;		*/
+
+	server->m = data;
+	/* Althought anything producing this is buggy, it happens
+	   now because of PATH_MAX changes.. */
+	if (server->m.time_out < 1) {
+		server->m.time_out = 10;
+		printk(KERN_INFO "You need to recompile your ncpfs utils..\n");
+	}
+	server->m.time_out = server->m.time_out * HZ / 100;
+	server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG;
+	server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR;
+
+#ifdef CONFIG_NCPFS_NLS
+	/* load the default NLS charsets */
+	server->nls_vol = load_nls_default();
+	server->nls_io = load_nls_default();
+#endif /* CONFIG_NCPFS_NLS */
+
+	server->dentry_ttl = 0;	/* no caching */
+
+	INIT_LIST_HEAD(&server->tx.requests);
+	init_MUTEX(&server->rcv.creq_sem);
+	server->tx.creq		= NULL;
+	server->rcv.creq	= NULL;
+	server->data_ready	= sock->sk->sk_data_ready;
+	server->write_space	= sock->sk->sk_write_space;
+	server->error_report	= sock->sk->sk_error_report;
+	sock->sk->sk_user_data	= server;
+
+	init_timer(&server->timeout_tm);
+#undef NCP_PACKET_SIZE
+#define NCP_PACKET_SIZE 131072
+	error = -ENOMEM;
+	server->packet_size = NCP_PACKET_SIZE;
+	server->packet = vmalloc(NCP_PACKET_SIZE);
+	if (server->packet == NULL)
+		goto out_nls;
+
+	sock->sk->sk_data_ready	  = ncp_tcp_data_ready;
+	sock->sk->sk_error_report = ncp_tcp_error_report;
+	if (sock->type == SOCK_STREAM) {
+		server->rcv.ptr = (unsigned char*)&server->rcv.buf;
+		server->rcv.len = 10;
+		server->rcv.state = 0;
+		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
+		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+		sock->sk->sk_write_space = ncp_tcp_write_space;
+	} else {
+		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
+		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+		server->timeout_tm.data = (unsigned long)server;
+		server->timeout_tm.function = ncpdgram_timeout_call;
+	}
+
+	ncp_lock_server(server);
+	error = ncp_connect(server);
+	ncp_unlock_server(server);
+	if (error < 0)
+		goto out_packet;
+	DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb));
+
+	error = -EMSGSIZE;	/* -EREMOTESIDEINCOMPATIBLE */
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+	if (ncp_negotiate_size_and_options(server, default_bufsize,
+		NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0)
+	{
+		if (options != NCP_DEFAULT_OPTIONS)
+		{
+			if (ncp_negotiate_size_and_options(server, 
+				default_bufsize,
+				options & 2, 
+				&(server->buffer_size), &options) != 0)
+				
+			{
+				goto out_disconnect;
+			}
+		}
+		if (options & 2)
+			server->sign_wanted = 1;
+	}
+	else 
+#endif	/* CONFIG_NCPFS_PACKET_SIGNING */
+	if (ncp_negotiate_buffersize(server, default_bufsize,
+  				     &(server->buffer_size)) != 0)
+		goto out_disconnect;
+	DPRINTK("ncpfs: bufsize = %d\n", server->buffer_size);
+
+	memset(&finfo, 0, sizeof(finfo));
+	finfo.i.attributes	= aDIR;
+	finfo.i.dataStreamSize	= 0;	/* ignored */
+	finfo.i.dirEntNum	= 0;
+	finfo.i.DosDirNum	= 0;
+#ifdef CONFIG_NCPFS_SMALLDOS
+	finfo.i.NSCreator	= NW_NS_DOS;
+#endif
+	finfo.volume		= NCP_NUMBER_OF_VOLUMES;
+	/* set dates of mountpoint to Jan 1, 1986; 00:00 */
+	finfo.i.creationTime	= finfo.i.modifyTime
+				= cpu_to_le16(0x0000);
+	finfo.i.creationDate	= finfo.i.modifyDate
+				= finfo.i.lastAccessDate
+				= cpu_to_le16(0x0C21);
+	finfo.i.nameLen		= 0;
+	finfo.i.entryName[0]	= '\0';
+
+	finfo.opened		= 0;
+	finfo.ino		= 2;	/* tradition */
+
+	server->name_space[finfo.volume] = NW_NS_DOS;
+
+	error = -ENOMEM;
+        root_inode = ncp_iget(sb, &finfo);
+        if (!root_inode)
+		goto out_disconnect;
+	DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber);
+	sb->s_root = d_alloc_root(root_inode);
+        if (!sb->s_root)
+		goto out_no_root;
+	sb->s_root->d_op = &ncp_root_dentry_operations;
+	return 0;
+
+out_no_root:
+	iput(root_inode);
+out_disconnect:
+	ncp_lock_server(server);
+	ncp_disconnect(server);
+	ncp_unlock_server(server);
+out_packet:
+	ncp_stop_tasks(server);
+	vfree(server->packet);
+out_nls:
+#ifdef CONFIG_NCPFS_NLS
+	unload_nls(server->nls_io);
+	unload_nls(server->nls_vol);
+#endif
+out_fput2:
+	if (server->info_filp)
+		fput(server->info_filp);
+out_fput:
+	/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
+	 * 
+	 * The previously used put_filp(ncp_filp); was bogous, since
+	 * it doesn't proper unlocking.
+	 */
+	fput(ncp_filp);
+out:
+	sb->s_fs_info = NULL;
+	kfree(server);
+	return error;
+}
+
+static void ncp_put_super(struct super_block *sb)
+{
+	struct ncp_server *server = NCP_SBP(sb);
+
+	ncp_lock_server(server);
+	ncp_disconnect(server);
+	ncp_unlock_server(server);
+
+	ncp_stop_tasks(server);
+
+#ifdef CONFIG_NCPFS_NLS
+	/* unload the NLS charsets */
+	if (server->nls_vol)
+	{
+		unload_nls(server->nls_vol);
+		server->nls_vol = NULL;
+	}
+	if (server->nls_io)
+	{
+		unload_nls(server->nls_io);
+		server->nls_io = NULL;
+	}
+#endif /* CONFIG_NCPFS_NLS */
+
+	if (server->info_filp)
+		fput(server->info_filp);
+	fput(server->ncp_filp);
+	kill_proc(server->m.wdog_pid, SIGTERM, 1);
+
+	if (server->priv.data) 
+		ncp_kfree_s(server->priv.data, server->priv.len);
+	if (server->auth.object_name)
+		ncp_kfree_s(server->auth.object_name, server->auth.object_name_len);
+	vfree(server->packet);
+	sb->s_fs_info = NULL;
+	kfree(server);
+}
+
+static int ncp_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct dentry* d;
+	struct inode* i;
+	struct ncp_inode_info* ni;
+	struct ncp_server* s;
+	struct ncp_volume_info vi;
+	int err;
+	__u8 dh;
+	
+	d = sb->s_root;
+	if (!d) {
+		goto dflt;
+	}
+	i = d->d_inode;
+	if (!i) {
+		goto dflt;
+	}
+	ni = NCP_FINFO(i);
+	if (!ni) {
+		goto dflt;
+	}
+	s = NCP_SBP(sb);
+	if (!s) {
+		goto dflt;
+	}
+	if (!s->m.mounted_vol[0]) {
+		goto dflt;
+	}
+
+	err = ncp_dirhandle_alloc(s, ni->volNumber, ni->DosDirNum, &dh);
+	if (err) {
+		goto dflt;
+	}
+	err = ncp_get_directory_info(s, dh, &vi);
+	ncp_dirhandle_free(s, dh);
+	if (err) {
+		goto dflt;
+	}
+	buf->f_type = NCP_SUPER_MAGIC;
+	buf->f_bsize = vi.sectors_per_block * 512;
+	buf->f_blocks = vi.total_blocks;
+	buf->f_bfree = vi.free_blocks;
+	buf->f_bavail = vi.free_blocks;
+	buf->f_files = vi.total_dir_entries;
+	buf->f_ffree = vi.available_dir_entries;
+	buf->f_namelen = 12;
+	return 0;
+
+	/* We cannot say how much disk space is left on a mounted
+	   NetWare Server, because free space is distributed over
+	   volumes, and the current user might have disk quotas. So
+	   free space is not that simple to determine. Our decision
+	   here is to err conservatively. */
+
+dflt:;
+	buf->f_type = NCP_SUPER_MAGIC;
+	buf->f_bsize = NCP_BLOCK_SIZE;
+	buf->f_blocks = 0;
+	buf->f_bfree = 0;
+	buf->f_bavail = 0;
+	buf->f_namelen = 12;
+	return 0;
+}
+
+int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int result = 0;
+	__le32 info_mask;
+	struct nw_modify_dos_info info;
+	struct ncp_server *server;
+
+	result = -EIO;
+
+	lock_kernel();	
+
+	server = NCP_SERVER(inode);
+	if ((!server) || !ncp_conn_valid(server))
+		goto out;
+
+	/* ageing the dentry to force validation */
+	ncp_age_dentry(server, dentry);
+
+	result = inode_change_ok(inode, attr);
+	if (result < 0)
+		goto out;
+
+	result = -EPERM;
+	if (((attr->ia_valid & ATTR_UID) &&
+	     (attr->ia_uid != server->m.uid)))
+		goto out;
+
+	if (((attr->ia_valid & ATTR_GID) &&
+	     (attr->ia_gid != server->m.gid)))
+		goto out;
+
+	if (((attr->ia_valid & ATTR_MODE) &&
+	     (attr->ia_mode &
+	      ~(S_IFREG | S_IFDIR | S_IRWXUGO))))
+		goto out;
+
+	info_mask = 0;
+	memset(&info, 0, sizeof(info));
+
+#if 1 
+        if ((attr->ia_valid & ATTR_MODE) != 0)
+        {
+		umode_t newmode = attr->ia_mode;
+
+		info_mask |= DM_ATTRIBUTES;
+
+                if (S_ISDIR(inode->i_mode)) {
+                	newmode &= server->m.dir_mode;
+		} else {
+#ifdef CONFIG_NCPFS_EXTRAS			
+			if (server->m.flags & NCP_MOUNT_EXTRAS) {
+				/* any non-default execute bit set */
+				if (newmode & ~server->m.file_mode & S_IXUGO)
+					info.attributes |= aSHARED | aSYSTEM;
+				/* read for group/world and not in default file_mode */
+				else if (newmode & ~server->m.file_mode & S_IRUGO)
+					info.attributes |= aSHARED;
+			} else
+#endif
+				newmode &= server->m.file_mode;			
+                }
+                if (newmode & S_IWUGO)
+                	info.attributes &= ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
+                else
+			info.attributes |=  (aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
+
+#ifdef CONFIG_NCPFS_NFS_NS
+		if (ncp_is_nfs_extras(server, NCP_FINFO(inode)->volNumber)) {
+			result = ncp_modify_nfs_info(server,
+						     NCP_FINFO(inode)->volNumber,
+						     NCP_FINFO(inode)->dirEntNum,
+						     attr->ia_mode, 0);
+			if (result != 0)
+				goto out;
+			info.attributes &= ~(aSHARED | aSYSTEM);
+			{
+				/* mark partial success */
+				struct iattr tmpattr;
+				
+				tmpattr.ia_valid = ATTR_MODE;
+				tmpattr.ia_mode = attr->ia_mode;
+
+				result = inode_setattr(inode, &tmpattr);
+				if (result)
+					goto out;
+			}
+		}
+#endif
+        }
+#endif
+
+	/* Do SIZE before attributes, otherwise mtime together with size does not work...
+	 */
+	if ((attr->ia_valid & ATTR_SIZE) != 0) {
+		int written;
+
+		DPRINTK("ncpfs: trying to change size to %ld\n",
+			attr->ia_size);
+
+		if ((result = ncp_make_open(inode, O_WRONLY)) < 0) {
+			result = -EACCES;
+			goto out;
+		}
+		ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
+			  attr->ia_size, 0, "", &written);
+
+		/* According to ndir, the changes only take effect after
+		   closing the file */
+		ncp_inode_close(inode);
+		result = ncp_make_closed(inode);
+		if (result)
+			goto out;
+		{
+			struct iattr tmpattr;
+			
+			tmpattr.ia_valid = ATTR_SIZE;
+			tmpattr.ia_size = attr->ia_size;
+			
+			result = inode_setattr(inode, &tmpattr);
+			if (result)
+				goto out;
+		}
+	}
+	if ((attr->ia_valid & ATTR_CTIME) != 0) {
+		info_mask |= (DM_CREATE_TIME | DM_CREATE_DATE);
+		ncp_date_unix2dos(attr->ia_ctime.tv_sec,
+			     &info.creationTime, &info.creationDate);
+	}
+	if ((attr->ia_valid & ATTR_MTIME) != 0) {
+		info_mask |= (DM_MODIFY_TIME | DM_MODIFY_DATE);
+		ncp_date_unix2dos(attr->ia_mtime.tv_sec,
+				  &info.modifyTime, &info.modifyDate);
+	}
+	if ((attr->ia_valid & ATTR_ATIME) != 0) {
+		__le16 dummy;
+		info_mask |= (DM_LAST_ACCESS_DATE);
+		ncp_date_unix2dos(attr->ia_atime.tv_sec,
+				  &dummy, &info.lastAccessDate);
+	}
+	if (info_mask != 0) {
+		result = ncp_modify_file_or_subdir_dos_info(NCP_SERVER(inode),
+				      inode, info_mask, &info);
+		if (result != 0) {
+			result = -EACCES;
+
+			if (info_mask == (DM_CREATE_TIME | DM_CREATE_DATE)) {
+				/* NetWare seems not to allow this. I
+				   do not know why. So, just tell the
+				   user everything went fine. This is
+				   a terrible hack, but I do not know
+				   how to do this correctly. */
+				result = 0;
+			} else
+				goto out;
+		}
+#ifdef CONFIG_NCPFS_STRONG		
+		if ((!result) && (info_mask & DM_ATTRIBUTES))
+			NCP_FINFO(inode)->nwattr = info.attributes;
+#endif
+	}
+	if (!result)
+		result = inode_setattr(inode, attr);
+out:
+	unlock_kernel();
+	return result;
+}
+
+#ifdef DEBUG_NCP_MALLOC
+int ncp_malloced;
+int ncp_current_malloced;
+#endif
+
+static struct super_block *ncp_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, ncp_fill_super);
+}
+
+static struct file_system_type ncp_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ncpfs",
+	.get_sb		= ncp_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init init_ncp_fs(void)
+{
+	int err;
+	DPRINTK("ncpfs: init_module called\n");
+
+#ifdef DEBUG_NCP_MALLOC
+	ncp_malloced = 0;
+	ncp_current_malloced = 0;
+#endif
+	err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&ncp_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_ncp_fs(void)
+{
+	DPRINTK("ncpfs: cleanup_module called\n");
+	unregister_filesystem(&ncp_fs_type);
+	destroy_inodecache();
+#ifdef DEBUG_NCP_MALLOC
+	PRINTK("ncp_malloced: %d\n", ncp_malloced);
+	PRINTK("ncp_current_malloced: %d\n", ncp_current_malloced);
+#endif
+}
+
+module_init(init_ncp_fs)
+module_exit(exit_ncp_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
new file mode 100644
index 0000000..88df793
--- /dev/null
+++ b/fs/ncpfs/ioctl.c
@@ -0,0 +1,649 @@
+/*
+ *  ioctl.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *  Modified 1998, 1999 Wolfram Pienkoss for NLS
+ *
+ */
+
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/highuid.h>
+#include <linux/vmalloc.h>
+
+#include <linux/ncp_fs.h>
+
+#include "ncplib_kernel.h"
+
+/* maximum limit for ncp_objectname_ioctl */
+#define NCP_OBJECT_NAME_MAX_LEN	4096
+/* maximum limit for ncp_privatedata_ioctl */
+#define NCP_PRIVATE_DATA_MAX_LEN 8192
+/* maximum negotiable packet size */
+#define NCP_PACKET_SIZE_INTERNAL 65536
+
+static int
+ncp_get_fs_info(struct ncp_server* server, struct inode* inode, struct ncp_fs_info __user *arg)
+{
+	struct ncp_fs_info info;
+
+	if ((permission(inode, MAY_WRITE, NULL) != 0)
+	    && (current->uid != server->m.mounted_uid)) {
+		return -EACCES;
+	}
+	if (copy_from_user(&info, arg, sizeof(info)))
+		return -EFAULT;
+
+	if (info.version != NCP_GET_FS_INFO_VERSION) {
+		DPRINTK("info.version invalid: %d\n", info.version);
+		return -EINVAL;
+	}
+	/* TODO: info.addr = server->m.serv_addr; */
+	SET_UID(info.mounted_uid, server->m.mounted_uid);
+	info.connection		= server->connection;
+	info.buffer_size	= server->buffer_size;
+	info.volume_number	= NCP_FINFO(inode)->volNumber;
+	info.directory_id	= NCP_FINFO(inode)->DosDirNum;
+
+	if (copy_to_user(arg, &info, sizeof(info)))
+		return -EFAULT;
+	return 0;
+}
+
+static int
+ncp_get_fs_info_v2(struct ncp_server* server, struct inode* inode, struct ncp_fs_info_v2 __user * arg)
+{
+	struct ncp_fs_info_v2 info2;
+
+	if ((permission(inode, MAY_WRITE, NULL) != 0)
+	    && (current->uid != server->m.mounted_uid)) {
+		return -EACCES;
+	}
+	if (copy_from_user(&info2, arg, sizeof(info2)))
+		return -EFAULT;
+
+	if (info2.version != NCP_GET_FS_INFO_VERSION_V2) {
+		DPRINTK("info.version invalid: %d\n", info2.version);
+		return -EINVAL;
+	}
+	info2.mounted_uid   = server->m.mounted_uid;
+	info2.connection    = server->connection;
+	info2.buffer_size   = server->buffer_size;
+	info2.volume_number = NCP_FINFO(inode)->volNumber;
+	info2.directory_id  = NCP_FINFO(inode)->DosDirNum;
+	info2.dummy1 = info2.dummy2 = info2.dummy3 = 0;
+
+	if (copy_to_user(arg, &info2, sizeof(info2)))
+		return -EFAULT;
+	return 0;
+}
+
+#ifdef CONFIG_NCPFS_NLS
+/* Here we are select the iocharset and the codepage for NLS.
+ * Thanks Petr Vandrovec for idea and many hints.
+ */
+static int
+ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
+{
+	struct ncp_nls_ioctl user;
+	struct nls_table *codepage;
+	struct nls_table *iocharset;
+	struct nls_table *oldset_io;
+	struct nls_table *oldset_cp;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (server->root_setuped)
+		return -EBUSY;
+
+	if (copy_from_user(&user, arg, sizeof(user)))
+		return -EFAULT;
+
+	codepage = NULL;
+	user.codepage[NCP_IOCSNAME_LEN] = 0;
+	if (!user.codepage[0] || !strcmp(user.codepage, "default"))
+		codepage = load_nls_default();
+	else {
+		codepage = load_nls(user.codepage);
+		if (!codepage) {
+			return -EBADRQC;
+		}
+	}
+
+	iocharset = NULL;
+	user.iocharset[NCP_IOCSNAME_LEN] = 0;
+	if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) {
+		iocharset = load_nls_default();
+		NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
+	} else if (!strcmp(user.iocharset, "utf8")) {
+		iocharset = load_nls_default();
+		NCP_SET_FLAG(server, NCP_FLAG_UTF8);
+	} else {
+		iocharset = load_nls(user.iocharset);
+		if (!iocharset) {
+			unload_nls(codepage);
+			return -EBADRQC;
+		}
+		NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
+	}
+
+	oldset_cp = server->nls_vol;
+	server->nls_vol = codepage;
+	oldset_io = server->nls_io;
+	server->nls_io = iocharset;
+
+	if (oldset_cp)
+		unload_nls(oldset_cp);
+	if (oldset_io)
+		unload_nls(oldset_io);
+
+	return 0;
+}
+
+static int
+ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
+{
+	struct ncp_nls_ioctl user;
+	int len;
+
+	memset(&user, 0, sizeof(user));
+	if (server->nls_vol && server->nls_vol->charset) {
+		len = strlen(server->nls_vol->charset);
+		if (len > NCP_IOCSNAME_LEN)
+			len = NCP_IOCSNAME_LEN;
+		strncpy(user.codepage, server->nls_vol->charset, len);
+		user.codepage[len] = 0;
+	}
+
+	if (NCP_IS_FLAG(server, NCP_FLAG_UTF8))
+		strcpy(user.iocharset, "utf8");
+	else if (server->nls_io && server->nls_io->charset) {
+		len = strlen(server->nls_io->charset);
+		if (len > NCP_IOCSNAME_LEN)
+			len = NCP_IOCSNAME_LEN;
+		strncpy(user.iocharset,	server->nls_io->charset, len);
+		user.iocharset[len] = 0;
+	}
+
+	if (copy_to_user(arg, &user, sizeof(user)))
+		return -EFAULT;
+	return 0;
+}
+#endif /* CONFIG_NCPFS_NLS */
+
+int ncp_ioctl(struct inode *inode, struct file *filp,
+	      unsigned int cmd, unsigned long arg)
+{
+	struct ncp_server *server = NCP_SERVER(inode);
+	int result;
+	struct ncp_ioctl_request request;
+	char* bouncebuffer;
+	void __user *argp = (void __user *)arg;
+
+	switch (cmd) {
+	case NCP_IOC_NCPREQUEST:
+
+		if ((permission(inode, MAY_WRITE, NULL) != 0)
+		    && (current->uid != server->m.mounted_uid)) {
+			return -EACCES;
+		}
+		if (copy_from_user(&request, argp, sizeof(request)))
+			return -EFAULT;
+
+		if ((request.function > 255)
+		    || (request.size >
+		  NCP_PACKET_SIZE - sizeof(struct ncp_request_header))) {
+			return -EINVAL;
+		}
+		bouncebuffer = vmalloc(NCP_PACKET_SIZE_INTERNAL);
+		if (!bouncebuffer)
+			return -ENOMEM;
+		if (copy_from_user(bouncebuffer, request.data, request.size)) {
+			vfree(bouncebuffer);
+			return -EFAULT;
+		}
+		ncp_lock_server(server);
+
+		/* FIXME: We hack around in the server's structures
+		   here to be able to use ncp_request */
+
+		server->has_subfunction = 0;
+		server->current_size = request.size;
+		memcpy(server->packet, bouncebuffer, request.size);
+
+		result = ncp_request2(server, request.function, 
+			bouncebuffer, NCP_PACKET_SIZE_INTERNAL);
+		if (result < 0)
+			result = -EIO;
+		else
+			result = server->reply_size;
+		ncp_unlock_server(server);
+		DPRINTK("ncp_ioctl: copy %d bytes\n",
+			result);
+		if (result >= 0)
+			if (copy_to_user(request.data, bouncebuffer, result))
+				result = -EFAULT;
+		vfree(bouncebuffer);
+		return result;
+
+	case NCP_IOC_CONN_LOGGED_IN:
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE))
+			return -EINVAL;
+		if (server->root_setuped)
+			return -EBUSY;
+		server->root_setuped = 1;
+		return ncp_conn_logged_in(inode->i_sb);
+
+	case NCP_IOC_GET_FS_INFO:
+		return ncp_get_fs_info(server, inode, argp);
+
+	case NCP_IOC_GET_FS_INFO_V2:
+		return ncp_get_fs_info_v2(server, inode, argp);
+
+	case NCP_IOC_GETMOUNTUID2:
+		{
+			unsigned long tmp = server->m.mounted_uid;
+
+			if (   (permission(inode, MAY_READ, NULL) != 0)
+			    && (current->uid != server->m.mounted_uid))
+			{
+				return -EACCES;
+			}
+			if (put_user(tmp, (unsigned long __user *)argp)) 
+				return -EFAULT;
+			return 0;
+		}
+
+	case NCP_IOC_GETROOT:
+		{
+			struct ncp_setroot_ioctl sr;
+
+			if (   (permission(inode, MAY_READ, NULL) != 0)
+			    && (current->uid != server->m.mounted_uid))
+			{
+				return -EACCES;
+			}
+			if (server->m.mounted_vol[0]) {
+				struct dentry* dentry = inode->i_sb->s_root;
+
+				if (dentry) {
+					struct inode* inode = dentry->d_inode;
+				
+					if (inode) {
+						sr.volNumber = NCP_FINFO(inode)->volNumber;
+						sr.dirEntNum = NCP_FINFO(inode)->dirEntNum;
+						sr.namespace = server->name_space[sr.volNumber];
+					} else
+						DPRINTK("ncpfs: s_root->d_inode==NULL\n");
+				} else
+					DPRINTK("ncpfs: s_root==NULL\n");
+			} else {
+				sr.volNumber = -1;
+				sr.namespace = 0;
+				sr.dirEntNum = 0;
+			}
+			if (copy_to_user(argp, &sr, sizeof(sr)))
+				return -EFAULT;
+			return 0;
+		}
+	case NCP_IOC_SETROOT:
+		{
+			struct ncp_setroot_ioctl sr;
+			__u32 vnum;
+			__le32 de;
+			__le32 dosde;
+			struct dentry* dentry;
+
+			if (!capable(CAP_SYS_ADMIN))
+			{
+				return -EACCES;
+			}
+			if (server->root_setuped) return -EBUSY;
+			if (copy_from_user(&sr, argp, sizeof(sr)))
+				return -EFAULT;
+			if (sr.volNumber < 0) {
+				server->m.mounted_vol[0] = 0;
+				vnum = NCP_NUMBER_OF_VOLUMES;
+				de = 0;
+				dosde = 0;
+			} else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) {
+				return -EINVAL;
+			} else if (ncp_mount_subdir(server, sr.volNumber,
+						sr.namespace, sr.dirEntNum,
+						&vnum, &de, &dosde)) {
+				return -ENOENT;
+			}
+			
+			dentry = inode->i_sb->s_root;
+			server->root_setuped = 1;
+			if (dentry) {
+				struct inode* inode = dentry->d_inode;
+				
+				if (inode) {
+					NCP_FINFO(inode)->volNumber = vnum;
+					NCP_FINFO(inode)->dirEntNum = de;
+					NCP_FINFO(inode)->DosDirNum = dosde;
+				} else
+					DPRINTK("ncpfs: s_root->d_inode==NULL\n");
+			} else
+				DPRINTK("ncpfs: s_root==NULL\n");
+
+			return 0;
+		}
+
+#ifdef CONFIG_NCPFS_PACKET_SIGNING	
+	case NCP_IOC_SIGN_INIT:
+		if ((permission(inode, MAY_WRITE, NULL) != 0)
+		    && (current->uid != server->m.mounted_uid))
+		{
+			return -EACCES;
+		}
+		if (argp) {
+			if (server->sign_wanted)
+			{
+				struct ncp_sign_init sign;
+
+				if (copy_from_user(&sign, argp, sizeof(sign)))
+					return -EFAULT;
+				memcpy(server->sign_root,sign.sign_root,8);
+				memcpy(server->sign_last,sign.sign_last,16);
+				server->sign_active = 1;
+			}
+			/* ignore when signatures not wanted */
+		} else {
+			server->sign_active = 0;
+		}
+		return 0;		
+		
+        case NCP_IOC_SIGN_WANTED:
+		if (   (permission(inode, MAY_READ, NULL) != 0)
+		    && (current->uid != server->m.mounted_uid))
+		{
+			return -EACCES;
+		}
+		
+                if (put_user(server->sign_wanted, (int __user *)argp))
+			return -EFAULT;
+                return 0;
+	case NCP_IOC_SET_SIGN_WANTED:
+		{
+			int newstate;
+
+			if (   (permission(inode, MAY_WRITE, NULL) != 0)
+			    && (current->uid != server->m.mounted_uid))
+			{
+				return -EACCES;
+			}
+			/* get only low 8 bits... */
+			if (get_user(newstate, (unsigned char __user *)argp))
+				return -EFAULT;
+			if (server->sign_active) {
+				/* cannot turn signatures OFF when active */
+				if (!newstate) return -EINVAL;
+			} else {
+				server->sign_wanted = newstate != 0;
+			}
+			return 0;
+		}
+
+#endif /* CONFIG_NCPFS_PACKET_SIGNING */
+
+#ifdef CONFIG_NCPFS_IOCTL_LOCKING
+	case NCP_IOC_LOCKUNLOCK:
+		if (   (permission(inode, MAY_WRITE, NULL) != 0)
+		    && (current->uid != server->m.mounted_uid))
+		{
+			return -EACCES;
+		}
+		{
+			struct ncp_lock_ioctl	 rqdata;
+			int result;
+
+			if (copy_from_user(&rqdata, argp, sizeof(rqdata)))
+				return -EFAULT;
+			if (rqdata.origin != 0)
+				return -EINVAL;
+			/* check for cmd */
+			switch (rqdata.cmd) {
+				case NCP_LOCK_EX:
+				case NCP_LOCK_SH:
+						if (rqdata.timeout == 0)
+							rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
+						else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
+							rqdata.timeout = NCP_LOCK_MAX_TIMEOUT;
+						break;
+				case NCP_LOCK_LOG:
+						rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;	/* has no effect */
+				case NCP_LOCK_CLEAR:
+						break;
+				default:
+						return -EINVAL;
+			}
+			/* locking needs both read and write access */
+			if ((result = ncp_make_open(inode, O_RDWR)) != 0)
+			{
+				return result;
+			}
+			result = -EIO;
+			if (!ncp_conn_valid(server))
+				goto outrel;
+			result = -EISDIR;
+			if (!S_ISREG(inode->i_mode))
+				goto outrel;
+			if (rqdata.cmd == NCP_LOCK_CLEAR)
+			{
+				result = ncp_ClearPhysicalRecord(NCP_SERVER(inode),
+							NCP_FINFO(inode)->file_handle, 
+							rqdata.offset,
+							rqdata.length);
+				if (result > 0) result = 0;	/* no such lock */
+			}
+			else
+			{
+				int lockcmd;
+
+				switch (rqdata.cmd)
+				{
+					case NCP_LOCK_EX:  lockcmd=1; break;
+					case NCP_LOCK_SH:  lockcmd=3; break;
+					default:	   lockcmd=0; break;
+				}
+				result = ncp_LogPhysicalRecord(NCP_SERVER(inode),
+							NCP_FINFO(inode)->file_handle,
+							lockcmd,
+							rqdata.offset,
+							rqdata.length,
+							rqdata.timeout);
+				if (result > 0) result = -EAGAIN;
+			}
+outrel:			
+			ncp_inode_close(inode);
+			return result;
+		}
+#endif	/* CONFIG_NCPFS_IOCTL_LOCKING */
+
+	case NCP_IOC_GETOBJECTNAME:
+		if (current->uid != server->m.mounted_uid) {
+			return -EACCES;
+		}
+		{
+			struct ncp_objectname_ioctl user;
+			size_t outl;
+
+			if (copy_from_user(&user, argp, sizeof(user)))
+				return -EFAULT;
+			user.auth_type = server->auth.auth_type;
+			outl = user.object_name_len;
+			user.object_name_len = server->auth.object_name_len;
+			if (outl > user.object_name_len)
+				outl = user.object_name_len;
+			if (outl) {
+				if (copy_to_user(user.object_name,
+						 server->auth.object_name,
+						 outl)) return -EFAULT;
+			}
+			if (copy_to_user(argp, &user, sizeof(user)))
+				return -EFAULT;
+			return 0;
+		}
+	case NCP_IOC_SETOBJECTNAME:
+		if (current->uid != server->m.mounted_uid) {
+			return -EACCES;
+		}
+		{
+			struct ncp_objectname_ioctl user;
+			void* newname;
+			void* oldname;
+			size_t oldnamelen;
+			void* oldprivate;
+			size_t oldprivatelen;
+
+			if (copy_from_user(&user, argp, sizeof(user)))
+				return -EFAULT;
+			if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN)
+				return -ENOMEM;
+			if (user.object_name_len) {
+				newname = ncp_kmalloc(user.object_name_len, GFP_USER);
+				if (!newname) return -ENOMEM;
+				if (copy_from_user(newname, user.object_name, user.object_name_len)) {
+					ncp_kfree_s(newname, user.object_name_len);
+					return -EFAULT;
+				}
+			} else {
+				newname = NULL;
+			}
+			/* enter critical section */
+			/* maybe that kfree can sleep so do that this way */
+			/* it is at least more SMP friendly (in future...) */
+			oldname = server->auth.object_name;
+			oldnamelen = server->auth.object_name_len;
+			oldprivate = server->priv.data;
+			oldprivatelen = server->priv.len;
+			server->auth.auth_type = user.auth_type;
+			server->auth.object_name_len = user.object_name_len;
+			server->auth.object_name = newname;
+			server->priv.len = 0;
+			server->priv.data = NULL;
+			/* leave critical section */
+			if (oldprivate) ncp_kfree_s(oldprivate, oldprivatelen);
+			if (oldname) ncp_kfree_s(oldname, oldnamelen);
+			return 0;
+		}
+	case NCP_IOC_GETPRIVATEDATA:
+		if (current->uid != server->m.mounted_uid) {
+			return -EACCES;
+		}
+		{
+			struct ncp_privatedata_ioctl user;
+			size_t outl;
+
+			if (copy_from_user(&user, argp, sizeof(user)))
+				return -EFAULT;
+			outl = user.len;
+			user.len = server->priv.len;
+			if (outl > user.len) outl = user.len;
+			if (outl) {
+				if (copy_to_user(user.data,
+						 server->priv.data,
+						 outl)) return -EFAULT;
+			}
+			if (copy_to_user(argp, &user, sizeof(user)))
+				return -EFAULT;
+			return 0;
+		}
+	case NCP_IOC_SETPRIVATEDATA:
+		if (current->uid != server->m.mounted_uid) {
+			return -EACCES;
+		}
+		{
+			struct ncp_privatedata_ioctl user;
+			void* new;
+			void* old;
+			size_t oldlen;
+
+			if (copy_from_user(&user, argp, sizeof(user)))
+				return -EFAULT;
+			if (user.len > NCP_PRIVATE_DATA_MAX_LEN)
+				return -ENOMEM;
+			if (user.len) {
+				new = ncp_kmalloc(user.len, GFP_USER);
+				if (!new) return -ENOMEM;
+				if (copy_from_user(new, user.data, user.len)) {
+					ncp_kfree_s(new, user.len);
+					return -EFAULT;
+				}
+			} else {
+				new = NULL;
+			}
+			/* enter critical section */
+			old = server->priv.data;
+			oldlen = server->priv.len;
+			server->priv.len = user.len;
+			server->priv.data = new;
+			/* leave critical section */
+			if (old) ncp_kfree_s(old, oldlen);
+			return 0;
+		}
+
+#ifdef CONFIG_NCPFS_NLS
+	case NCP_IOC_SETCHARSETS:
+		return ncp_set_charsets(server, argp);
+		
+	case NCP_IOC_GETCHARSETS:
+		return ncp_get_charsets(server, argp);
+
+#endif /* CONFIG_NCPFS_NLS */
+
+	case NCP_IOC_SETDENTRYTTL:
+		if ((permission(inode, MAY_WRITE, NULL) != 0) &&
+				 (current->uid != server->m.mounted_uid))
+			return -EACCES;
+		{
+			u_int32_t user;
+
+			if (copy_from_user(&user, argp, sizeof(user)))
+				return -EFAULT;
+			/* 20 secs at most... */
+			if (user > 20000)
+				return -EINVAL;
+			user = (user * HZ) / 1000;
+			server->dentry_ttl = user;
+			return 0;
+		}
+		
+	case NCP_IOC_GETDENTRYTTL:
+		{
+			u_int32_t user = (server->dentry_ttl * 1000) / HZ;
+			if (copy_to_user(argp, &user, sizeof(user)))
+				return -EFAULT;
+			return 0;
+		}
+
+	}
+/* #ifdef CONFIG_UID16 */
+	/* NCP_IOC_GETMOUNTUID may be same as NCP_IOC_GETMOUNTUID2,
+           so we have this out of switch */
+	if (cmd == NCP_IOC_GETMOUNTUID) {
+		__kernel_uid_t uid = 0;
+		if ((permission(inode, MAY_READ, NULL) != 0)
+		    && (current->uid != server->m.mounted_uid)) {
+			return -EACCES;
+		}
+		SET_UID(uid, server->m.mounted_uid);
+		if (put_user(uid, (__kernel_uid_t __user *)argp))
+			return -EFAULT;
+		return 0;
+	}
+/* #endif */
+	return -EINVAL;
+}
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
new file mode 100644
index 0000000..52d60c3
--- /dev/null
+++ b/fs/ncpfs/mmap.c
@@ -0,0 +1,128 @@
+/*
+ *  mmap.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *
+ */
+
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/shm.h>
+#include <linux/errno.h>
+#include <linux/mman.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/ncp_fs.h>
+
+#include "ncplib_kernel.h"
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+/*
+ * Fill in the supplied page for mmap
+ */
+static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
+				     unsigned long address, int *type)
+{
+	struct file *file = area->vm_file;
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct page* page;
+	char *pg_addr;
+	unsigned int already_read;
+	unsigned int count;
+	int bufsize;
+	int pos;
+
+	page = alloc_page(GFP_HIGHUSER); /* ncpfs has nothing against high pages
+	           as long as recvmsg and memset works on it */
+	if (!page)
+		return page;
+	pg_addr = kmap(page);
+	address &= PAGE_MASK;
+	pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
+
+	count = PAGE_SIZE;
+	if (address + PAGE_SIZE > area->vm_end) {
+		count = area->vm_end - address;
+	}
+	/* what we can read in one go */
+	bufsize = NCP_SERVER(inode)->buffer_size;
+
+	already_read = 0;
+	if (ncp_make_open(inode, O_RDONLY) >= 0) {
+		while (already_read < count) {
+			int read_this_time;
+			int to_read;
+
+			to_read = bufsize - (pos % bufsize);
+
+			to_read = min_t(unsigned int, to_read, count - already_read);
+
+			if (ncp_read_kernel(NCP_SERVER(inode),
+				     NCP_FINFO(inode)->file_handle,
+				     pos, to_read,
+				     pg_addr + already_read,
+				     &read_this_time) != 0) {
+				read_this_time = 0;
+			}
+			pos += read_this_time;
+			already_read += read_this_time;
+
+			if (read_this_time < to_read) {
+				break;
+			}
+		}
+		ncp_inode_close(inode);
+
+	}
+
+	if (already_read < PAGE_SIZE)
+		memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
+	flush_dcache_page(page);
+	kunmap(page);
+
+	/*
+	 * If I understand ncp_read_kernel() properly, the above always
+	 * fetches from the network, here the analogue of disk.
+	 * -- wli
+	 */
+	if (type)
+		*type = VM_FAULT_MAJOR;
+	inc_page_state(pgmajfault);
+	return page;
+}
+
+static struct vm_operations_struct ncp_file_mmap =
+{
+	.nopage	= ncp_file_mmap_nopage,
+};
+
+
+/* This is used for a general mmap of a ncp file */
+int ncp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	
+	DPRINTK("ncp_mmap: called\n");
+
+	if (!ncp_conn_valid(NCP_SERVER(inode)))
+		return -EIO;
+
+	/* only PAGE_COW or read-only supported now */
+	if (vma->vm_flags & VM_SHARED)
+		return -EINVAL;
+	/* we do not support files bigger than 4GB... We eventually 
+	   supports just 4GB... */
+	if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff 
+	   > (1U << (32 - PAGE_SHIFT)))
+		return -EFBIG;
+
+	vma->vm_ops = &ncp_file_mmap;
+	file_accessed(file);
+	return 0;
+}
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
new file mode 100644
index 0000000..e4eb5ed
--- /dev/null
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -0,0 +1,1355 @@
+/*
+ *  ncplib_kernel.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified for big endian by J.F. Chadima and David S. Miller
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *  Modified 1999 Wolfram Pienkoss for NLS
+ *  Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
+ *
+ */
+
+
+#include <linux/config.h>
+
+#include "ncplib_kernel.h"
+
+static inline void assert_server_locked(struct ncp_server *server)
+{
+	if (server->lock == 0) {
+		DPRINTK("ncpfs: server not locked!\n");
+	}
+}
+
+static void ncp_add_byte(struct ncp_server *server, __u8 x)
+{
+	assert_server_locked(server);
+	*(__u8 *) (&(server->packet[server->current_size])) = x;
+	server->current_size += 1;
+	return;
+}
+
+static void ncp_add_word(struct ncp_server *server, __le16 x)
+{
+	assert_server_locked(server);
+	put_unaligned(x, (__le16 *) (&(server->packet[server->current_size])));
+	server->current_size += 2;
+	return;
+}
+
+static void ncp_add_be16(struct ncp_server *server, __u16 x)
+{
+	assert_server_locked(server);
+	put_unaligned(cpu_to_be16(x), (__be16 *) (&(server->packet[server->current_size])));
+	server->current_size += 2;
+}
+
+static void ncp_add_dword(struct ncp_server *server, __le32 x)
+{
+	assert_server_locked(server);
+	put_unaligned(x, (__le32 *) (&(server->packet[server->current_size])));
+	server->current_size += 4;
+	return;
+}
+
+static void ncp_add_be32(struct ncp_server *server, __u32 x)
+{
+	assert_server_locked(server);
+	put_unaligned(cpu_to_be32(x), (__be32 *)(&(server->packet[server->current_size])));
+	server->current_size += 4;
+}
+
+static inline void ncp_add_dword_lh(struct ncp_server *server, __u32 x) {
+	ncp_add_dword(server, cpu_to_le32(x));
+}
+
+static void ncp_add_mem(struct ncp_server *server, const void *source, int size)
+{
+	assert_server_locked(server);
+	memcpy(&(server->packet[server->current_size]), source, size);
+	server->current_size += size;
+	return;
+}
+
+static void ncp_add_pstring(struct ncp_server *server, const char *s)
+{
+	int len = strlen(s);
+	assert_server_locked(server);
+	if (len > 255) {
+		DPRINTK("ncpfs: string too long: %s\n", s);
+		len = 255;
+	}
+	ncp_add_byte(server, len);
+	ncp_add_mem(server, s, len);
+	return;
+}
+
+static inline void ncp_init_request(struct ncp_server *server)
+{
+	ncp_lock_server(server);
+
+	server->current_size = sizeof(struct ncp_request_header);
+	server->has_subfunction = 0;
+}
+
+static inline void ncp_init_request_s(struct ncp_server *server, int subfunction)
+{
+	ncp_lock_server(server);
+	
+	server->current_size = sizeof(struct ncp_request_header) + 2;
+	ncp_add_byte(server, subfunction);
+
+	server->has_subfunction = 1;
+}
+
+static inline char *
+ ncp_reply_data(struct ncp_server *server, int offset)
+{
+	return &(server->packet[sizeof(struct ncp_reply_header) + offset]);
+}
+
+static inline __u8 BVAL(void* data)
+{
+	return get_unaligned((__u8*)data);
+}
+
+static __u8
+ ncp_reply_byte(struct ncp_server *server, int offset)
+{
+	return get_unaligned((__u8 *) ncp_reply_data(server, offset));
+}
+
+static inline __u16 WVAL_LH(void* data)
+{
+	return le16_to_cpu(get_unaligned((__le16*)data));
+}
+
+static __u16
+ ncp_reply_le16(struct ncp_server *server, int offset)
+{
+	return le16_to_cpu(get_unaligned((__le16 *) ncp_reply_data(server, offset)));
+}
+
+static __u16
+ ncp_reply_be16(struct ncp_server *server, int offset)
+{
+	return be16_to_cpu(get_unaligned((__be16 *) ncp_reply_data(server, offset)));
+}
+
+static inline __u32 DVAL_LH(void* data)
+{
+	return le32_to_cpu(get_unaligned((__le32*)data));
+}
+
+static __le32
+ ncp_reply_dword(struct ncp_server *server, int offset)
+{
+	return get_unaligned((__le32 *) ncp_reply_data(server, offset));
+}
+
+static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) {
+	return le32_to_cpu(ncp_reply_dword(server, offset));
+}
+
+int
+ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_be16(server, size);
+
+	if ((result = ncp_request(server, 33)) != 0) {
+		ncp_unlock_server(server);
+		return result;
+	}
+	*target = min_t(unsigned int, ncp_reply_be16(server, 0), size);
+
+	ncp_unlock_server(server);
+	return 0;
+}
+
+
+/* options: 
+ *	bit 0	ipx checksum
+ *	bit 1	packet signing
+ */
+int
+ncp_negotiate_size_and_options(struct ncp_server *server, 
+	int size, int options, int *ret_size, int *ret_options) {
+	int result;
+
+	/* there is minimum */
+	if (size < NCP_BLOCK_SIZE) size = NCP_BLOCK_SIZE;
+
+	ncp_init_request(server);
+	ncp_add_be16(server, size);
+	ncp_add_byte(server, options);
+	
+	if ((result = ncp_request(server, 0x61)) != 0)
+	{
+		ncp_unlock_server(server);
+		return result;
+	}
+
+	/* NCP over UDP returns 0 (!!!) */
+	result = ncp_reply_be16(server, 0);
+	if (result >= NCP_BLOCK_SIZE)
+		size = min(result, size);
+	*ret_size = size;
+	*ret_options = ncp_reply_byte(server, 4);
+
+	ncp_unlock_server(server);
+	return 0;
+}
+
+int ncp_get_volume_info_with_number(struct ncp_server* server,
+			     int n, struct ncp_volume_info* target) {
+	int result;
+	int len;
+
+	ncp_init_request_s(server, 44);
+	ncp_add_byte(server, n);
+
+	if ((result = ncp_request(server, 22)) != 0) {
+		goto out;
+	}
+	target->total_blocks = ncp_reply_dword_lh(server, 0);
+	target->free_blocks = ncp_reply_dword_lh(server, 4);
+	target->purgeable_blocks = ncp_reply_dword_lh(server, 8);
+	target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12);
+	target->total_dir_entries = ncp_reply_dword_lh(server, 16);
+	target->available_dir_entries = ncp_reply_dword_lh(server, 20);
+	target->sectors_per_block = ncp_reply_byte(server, 28);
+
+	memset(&(target->volume_name), 0, sizeof(target->volume_name));
+
+	result = -EIO;
+	len = ncp_reply_byte(server, 29);
+	if (len > NCP_VOLNAME_LEN) {
+		DPRINTK("ncpfs: volume name too long: %d\n", len);
+		goto out;
+	}
+	memcpy(&(target->volume_name), ncp_reply_data(server, 30), len);
+	result = 0;
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+int ncp_get_directory_info(struct ncp_server* server, __u8 n, 
+			   struct ncp_volume_info* target) {
+	int result;
+	int len;
+
+	ncp_init_request_s(server, 45);
+	ncp_add_byte(server, n);
+
+	if ((result = ncp_request(server, 22)) != 0) {
+		goto out;
+	}
+	target->total_blocks = ncp_reply_dword_lh(server, 0);
+	target->free_blocks = ncp_reply_dword_lh(server, 4);
+	target->purgeable_blocks = 0;
+	target->not_yet_purgeable_blocks = 0;
+	target->total_dir_entries = ncp_reply_dword_lh(server, 8);
+	target->available_dir_entries = ncp_reply_dword_lh(server, 12);
+	target->sectors_per_block = ncp_reply_byte(server, 20);
+
+	memset(&(target->volume_name), 0, sizeof(target->volume_name));
+
+	result = -EIO;
+	len = ncp_reply_byte(server, 21);
+	if (len > NCP_VOLNAME_LEN) {
+		DPRINTK("ncpfs: volume name too long: %d\n", len);
+		goto out;
+	}
+	memcpy(&(target->volume_name), ncp_reply_data(server, 22), len);
+	result = 0;
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+int
+ncp_close_file(struct ncp_server *server, const char *file_id)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 0);
+	ncp_add_mem(server, file_id, 6);
+
+	result = ncp_request(server, 66);
+	ncp_unlock_server(server);
+	return result;
+}
+
+int
+ncp_make_closed(struct inode *inode)
+{
+	int err;
+
+	err = 0;
+	down(&NCP_FINFO(inode)->open_sem);	
+	if (atomic_read(&NCP_FINFO(inode)->opened) == 1) {
+		atomic_set(&NCP_FINFO(inode)->opened, 0);
+		err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle);
+
+		if (!err)
+			PPRINTK("ncp_make_closed: volnum=%d, dirent=%u, error=%d\n",
+				NCP_FINFO(inode)->volNumber,
+				NCP_FINFO(inode)->dirEntNum, err);
+	}
+	up(&NCP_FINFO(inode)->open_sem);
+	return err;
+}
+
+static void ncp_add_handle_path(struct ncp_server *server, __u8 vol_num,
+				__le32 dir_base, int have_dir_base, 
+				const char *path)
+{
+	ncp_add_byte(server, vol_num);
+	ncp_add_dword(server, dir_base);
+	if (have_dir_base != 0) {
+		ncp_add_byte(server, 1);	/* dir_base */
+	} else {
+		ncp_add_byte(server, 0xff);	/* no handle */
+	}
+	if (path != NULL) {
+		ncp_add_byte(server, 1);	/* 1 component */
+		ncp_add_pstring(server, path);
+	} else {
+		ncp_add_byte(server, 0);
+	}
+}
+
+int ncp_dirhandle_alloc(struct ncp_server* server, __u8 volnum, __le32 dirent,
+			__u8* dirhandle) {
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 12);		/* subfunction */
+	ncp_add_byte(server, NW_NS_DOS);
+	ncp_add_byte(server, 0);
+	ncp_add_word(server, 0);
+	ncp_add_handle_path(server, volnum, dirent, 1, NULL);
+	if ((result = ncp_request(server, 87)) == 0) {
+		*dirhandle = ncp_reply_byte(server, 0);
+	}
+	ncp_unlock_server(server);
+	return result;
+}
+
+int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) {
+	int result;
+	
+	ncp_init_request_s(server, 20);
+	ncp_add_byte(server, dirhandle);
+	result = ncp_request(server, 22);
+	ncp_unlock_server(server);
+	return result;
+}
+
+void ncp_extract_file_info(void *structure, struct nw_info_struct *target)
+{
+	__u8 *name_len;
+	const int info_struct_size = offsetof(struct nw_info_struct, nameLen);
+
+	memcpy(target, structure, info_struct_size);
+	name_len = structure + info_struct_size;
+	target->nameLen = *name_len;
+	memcpy(target->entryName, name_len + 1, *name_len);
+	target->entryName[*name_len] = '\0';
+	target->volNumber = le32_to_cpu(target->volNumber);
+	return;
+}
+
+#ifdef CONFIG_NCPFS_NFS_NS
+static inline void ncp_extract_nfs_info(unsigned char *structure,
+				 struct nw_nfs_info *target)
+{
+	target->mode = DVAL_LH(structure);
+	target->rdev = DVAL_LH(structure + 8);
+}
+#endif
+
+int ncp_obtain_nfs_info(struct ncp_server *server,
+		        struct nw_info_struct *target)
+
+{
+	int result = 0;
+#ifdef CONFIG_NCPFS_NFS_NS
+	__u32 volnum = target->volNumber;
+
+	if (ncp_is_nfs_extras(server, volnum)) {
+		ncp_init_request(server);
+		ncp_add_byte(server, 19);	/* subfunction */
+		ncp_add_byte(server, server->name_space[volnum]);
+		ncp_add_byte(server, NW_NS_NFS);
+		ncp_add_byte(server, 0);
+		ncp_add_byte(server, volnum);
+		ncp_add_dword(server, target->dirEntNum);
+		/* We must retrieve both nlinks and rdev, otherwise some server versions
+		   report zeroes instead of valid data */
+		ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV);
+
+		if ((result = ncp_request(server, 87)) == 0) {
+			ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs);
+			DPRINTK(KERN_DEBUG
+				"ncp_obtain_nfs_info: (%s) mode=0%o, rdev=0x%x\n",
+				target->entryName, target->nfs.mode,
+				target->nfs.rdev);
+		} else {
+			target->nfs.mode = 0;
+			target->nfs.rdev = 0;
+		}
+	        ncp_unlock_server(server);
+
+	} else
+#endif
+	{
+		target->nfs.mode = 0;
+		target->nfs.rdev = 0;
+	}
+	return result;
+}
+
+/*
+ * Returns information for a (one-component) name relative to
+ * the specified directory.
+ */
+int ncp_obtain_info(struct ncp_server *server, struct inode *dir, char *path,
+			struct nw_info_struct *target)
+{
+	__u8  volnum = NCP_FINFO(dir)->volNumber;
+	__le32 dirent = NCP_FINFO(dir)->dirEntNum;
+	int result;
+
+	if (target == NULL) {
+		printk(KERN_ERR "ncp_obtain_info: invalid call\n");
+		return -EINVAL;
+	}
+	ncp_init_request(server);
+	ncp_add_byte(server, 6);	/* subfunction */
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_byte(server, server->name_space[volnum]); /* N.B. twice ?? */
+	ncp_add_word(server, cpu_to_le16(0x8006));	/* get all */
+	ncp_add_dword(server, RIM_ALL);
+	ncp_add_handle_path(server, volnum, dirent, 1, path);
+
+	if ((result = ncp_request(server, 87)) != 0)
+		goto out;
+	ncp_extract_file_info(ncp_reply_data(server, 0), target);
+	ncp_unlock_server(server);
+	
+	result = ncp_obtain_nfs_info(server, target);
+	return result;
+
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+#ifdef CONFIG_NCPFS_NFS_NS
+static int
+ncp_obtain_DOS_dir_base(struct ncp_server *server,
+		__u8 volnum, __le32 dirent,
+		char *path, /* At most 1 component */
+		__le32 *DOS_dir_base)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 6); /* subfunction */
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
+	ncp_add_dword(server, RIM_DIRECTORY);
+	ncp_add_handle_path(server, volnum, dirent, 1, path);
+
+	if ((result = ncp_request(server, 87)) == 0)
+	{
+	   	if (DOS_dir_base) *DOS_dir_base=ncp_reply_dword(server, 0x34);
+	}
+	ncp_unlock_server(server);
+	return result;
+}
+#endif /* CONFIG_NCPFS_NFS_NS */
+
+static inline int
+ncp_get_known_namespace(struct ncp_server *server, __u8 volume)
+{
+#if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS)
+	int result;
+	__u8 *namespace;
+	__u16 no_namespaces;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 24);	/* Subfunction: Get Name Spaces Loaded */
+	ncp_add_word(server, 0);
+	ncp_add_byte(server, volume);
+
+	if ((result = ncp_request(server, 87)) != 0) {
+		ncp_unlock_server(server);
+		return NW_NS_DOS; /* not result ?? */
+	}
+
+	result = NW_NS_DOS;
+	no_namespaces = ncp_reply_le16(server, 0);
+	namespace = ncp_reply_data(server, 2);
+
+	while (no_namespaces > 0) {
+		DPRINTK("get_namespaces: found %d on %d\n", *namespace, volume);
+
+#ifdef CONFIG_NCPFS_NFS_NS
+		if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) 
+		{
+			result = NW_NS_NFS;
+			break;
+		}
+#endif	/* CONFIG_NCPFS_NFS_NS */
+#ifdef CONFIG_NCPFS_OS2_NS
+		if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2))
+		{
+			result = NW_NS_OS2;
+		}
+#endif	/* CONFIG_NCPFS_OS2_NS */
+		namespace += 1;
+		no_namespaces -= 1;
+	}
+	ncp_unlock_server(server);
+	return result;
+#else	/* neither OS2 nor NFS - only DOS */
+	return NW_NS_DOS;
+#endif	/* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */
+}
+
+static int
+ncp_ObtainSpecificDirBase(struct ncp_server *server,
+		__u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base,
+		char *path, /* At most 1 component */
+		__le32 *dirEntNum, __le32 *DosDirNum)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 6); /* subfunction */
+	ncp_add_byte(server, nsSrc);
+	ncp_add_byte(server, nsDst);
+	ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
+	ncp_add_dword(server, RIM_ALL);
+	ncp_add_handle_path(server, vol_num, dir_base, 1, path);
+
+	if ((result = ncp_request(server, 87)) != 0)
+	{
+		ncp_unlock_server(server);
+		return result;
+	}
+
+	if (dirEntNum)
+		*dirEntNum = ncp_reply_dword(server, 0x30);
+	if (DosDirNum)
+		*DosDirNum = ncp_reply_dword(server, 0x34);
+	ncp_unlock_server(server);
+	return 0;
+}
+
+int
+ncp_mount_subdir(struct ncp_server *server,
+		 __u8 volNumber, __u8 srcNS, __le32 dirEntNum,
+		 __u32* volume, __le32* newDirEnt, __le32* newDosEnt)
+{
+	int dstNS;
+	int result;
+	
+	dstNS = ncp_get_known_namespace(server, volNumber);
+	if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber, 
+				      dirEntNum, NULL, newDirEnt, newDosEnt)) != 0)
+	{
+		return result;
+	}
+	server->name_space[volNumber] = dstNS;
+	*volume = volNumber;
+	server->m.mounted_vol[1] = 0;
+	server->m.mounted_vol[0] = 'X';
+	return 0;
+}
+
+int 
+ncp_get_volume_root(struct ncp_server *server, const char *volname,
+		    __u32* volume, __le32* dirent, __le32* dosdirent)
+{
+	int result;
+	__u8 volnum;
+
+	DPRINTK("ncp_get_volume_root: looking up vol %s\n", volname);
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 22);	/* Subfunction: Generate dir handle */
+	ncp_add_byte(server, 0);	/* DOS namespace */
+	ncp_add_byte(server, 0);	/* reserved */
+	ncp_add_byte(server, 0);	/* reserved */
+	ncp_add_byte(server, 0);	/* reserved */
+
+	ncp_add_byte(server, 0);	/* faked volume number */
+	ncp_add_dword(server, 0);	/* faked dir_base */
+	ncp_add_byte(server, 0xff);	/* Don't have a dir_base */
+	ncp_add_byte(server, 1);	/* 1 path component */
+	ncp_add_pstring(server, volname);
+
+	if ((result = ncp_request(server, 87)) != 0) {
+		ncp_unlock_server(server);
+		return result;
+	}
+	*dirent = *dosdirent = ncp_reply_dword(server, 4);
+	volnum = ncp_reply_byte(server, 8);
+	ncp_unlock_server(server);
+	*volume = volnum;
+
+	server->name_space[volnum] = ncp_get_known_namespace(server, volnum);
+
+	DPRINTK("lookup_vol: namespace[%d] = %d\n",
+		volnum, server->name_space[volnum]);
+
+	return 0;
+}
+
+int
+ncp_lookup_volume(struct ncp_server *server, const char *volname,
+		  struct nw_info_struct *target)
+{
+	int result;
+
+	memset(target, 0, sizeof(*target));
+	result = ncp_get_volume_root(server, volname,
+			&target->volNumber, &target->dirEntNum, &target->DosDirNum);
+	if (result) {
+		return result;
+	}
+	target->nameLen = strlen(volname);
+	memcpy(target->entryName, volname, target->nameLen+1);
+	target->attributes = aDIR;
+	/* set dates to Jan 1, 1986  00:00 */
+	target->creationTime = target->modifyTime = cpu_to_le16(0x0000);
+	target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21);
+	target->nfs.mode = 0;
+	return 0;
+}
+
+int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *server,
+					    struct inode *dir,
+					    const char *path,
+					    __le32 info_mask,
+					    const struct nw_modify_dos_info *info)
+{
+	__u8  volnum = NCP_FINFO(dir)->volNumber;
+	__le32 dirent = NCP_FINFO(dir)->dirEntNum;
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 7);	/* subfunction */
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_byte(server, 0);	/* reserved */
+	ncp_add_word(server, cpu_to_le16(0x8006));	/* search attribs: all */
+
+	ncp_add_dword(server, info_mask);
+	ncp_add_mem(server, info, sizeof(*info));
+	ncp_add_handle_path(server, volnum, dirent, 1, path);
+
+	result = ncp_request(server, 87);
+	ncp_unlock_server(server);
+	return result;
+}
+
+int ncp_modify_file_or_subdir_dos_info(struct ncp_server *server,
+				       struct inode *dir,
+				       __le32 info_mask,
+				       const struct nw_modify_dos_info *info)
+{
+	return ncp_modify_file_or_subdir_dos_info_path(server, dir, NULL,
+		info_mask, info);
+}
+
+#ifdef CONFIG_NCPFS_NFS_NS
+int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent,
+			       __u32 mode, __u32 rdev)
+
+{
+	int result = 0;
+
+	if (server->name_space[volnum] == NW_NS_NFS) {
+		ncp_init_request(server);
+		ncp_add_byte(server, 25);	/* subfunction */
+		ncp_add_byte(server, server->name_space[volnum]);
+		ncp_add_byte(server, NW_NS_NFS);
+		ncp_add_byte(server, volnum);
+		ncp_add_dword(server, dirent);
+		/* we must always operate on both nlinks and rdev, otherwise
+		   rdev is not set */
+		ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV);
+		ncp_add_dword_lh(server, mode);
+		ncp_add_dword_lh(server, 1);	/* nlinks */
+		ncp_add_dword_lh(server, rdev);
+		result = ncp_request(server, 87);
+		ncp_unlock_server(server);
+	}
+	return result;
+}
+#endif
+
+
+static int
+ncp_DeleteNSEntry(struct ncp_server *server,
+		  __u8 have_dir_base, __u8 volnum, __le32 dirent,
+		  char* name, __u8 ns, __le16 attr)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 8);	/* subfunction */
+	ncp_add_byte(server, ns);
+	ncp_add_byte(server, 0);	/* reserved */
+	ncp_add_word(server, attr);	/* search attribs: all */
+	ncp_add_handle_path(server, volnum, dirent, have_dir_base, name);
+
+	result = ncp_request(server, 87);
+	ncp_unlock_server(server);
+	return result;
+}
+
+int
+ncp_del_file_or_subdir2(struct ncp_server *server,
+			struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	__u8  volnum;
+	__le32 dirent;
+
+	if (!inode) {
+#ifdef CONFIG_NCPFS_DEBUGDENTRY
+		PRINTK("ncpfs: ncpdel2: dentry->d_inode == NULL\n");
+#endif
+		return 0xFF;	/* Any error */
+	}
+	volnum = NCP_FINFO(inode)->volNumber;
+	dirent = NCP_FINFO(inode)->DosDirNum;
+	return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006));
+}
+
+int
+ncp_del_file_or_subdir(struct ncp_server *server,
+		       struct inode *dir, char *name)
+{
+	__u8  volnum = NCP_FINFO(dir)->volNumber;
+	__le32 dirent = NCP_FINFO(dir)->dirEntNum;
+
+#ifdef CONFIG_NCPFS_NFS_NS
+	if (server->name_space[volnum]==NW_NS_NFS)
+ 	{
+ 		int result;
+ 
+ 		result=ncp_obtain_DOS_dir_base(server, volnum, dirent, name, &dirent);
+ 		if (result) return result;
+ 		return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006));
+ 	}
+ 	else
+#endif	/* CONFIG_NCPFS_NFS_NS */
+ 		return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, server->name_space[volnum], cpu_to_le16(0x8006));
+}
+
+static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6])
+{
+	__le16 *dest = (__le16 *) ret;
+	dest[1] = cpu_to_le16(v0);
+	dest[2] = cpu_to_le16(v1);
+	dest[0] = cpu_to_le16(v0 + 1);
+	return;
+}
+
+/* If both dir and name are NULL, then in target there's already a
+   looked-up entry that wants to be opened. */
+int ncp_open_create_file_or_subdir(struct ncp_server *server,
+				   struct inode *dir, char *name,
+				   int open_create_mode,
+				   __le32 create_attributes,
+				   __le16 desired_acc_rights,
+				   struct ncp_entry_info *target)
+{
+	__le16 search_attribs = cpu_to_le16(0x0006);
+	__u8  volnum;
+	__le32 dirent;
+	int result;
+
+	volnum = NCP_FINFO(dir)->volNumber;
+	dirent = NCP_FINFO(dir)->dirEntNum;
+
+	if ((create_attributes & aDIR) != 0) {
+		search_attribs |= cpu_to_le16(0x8000);
+	}
+	ncp_init_request(server);
+	ncp_add_byte(server, 1);	/* subfunction */
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_byte(server, open_create_mode);
+	ncp_add_word(server, search_attribs);
+	ncp_add_dword(server, RIM_ALL);
+	ncp_add_dword(server, create_attributes);
+	/* The desired acc rights seem to be the inherited rights mask
+	   for directories */
+	ncp_add_word(server, desired_acc_rights);
+	ncp_add_handle_path(server, volnum, dirent, 1, name);
+
+	if ((result = ncp_request(server, 87)) != 0)
+		goto out;
+	if (!(create_attributes & aDIR))
+		target->opened = 1;
+
+	/* in target there's a new finfo to fill */
+	ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i));
+	target->volume = target->i.volNumber;
+	ConvertToNWfromDWORD(ncp_reply_le16(server, 0),
+			     ncp_reply_le16(server, 2),
+			     target->file_handle);
+	
+	ncp_unlock_server(server);
+
+	(void)ncp_obtain_nfs_info(server, &(target->i));
+	return 0;
+
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+int
+ncp_initialize_search(struct ncp_server *server, struct inode *dir,
+			struct nw_search_sequence *target)
+{
+	__u8  volnum = NCP_FINFO(dir)->volNumber;
+	__le32 dirent = NCP_FINFO(dir)->dirEntNum;
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 2);	/* subfunction */
+	ncp_add_byte(server, server->name_space[volnum]);
+	ncp_add_byte(server, 0);	/* reserved */
+	ncp_add_handle_path(server, volnum, dirent, 1, NULL);
+
+	result = ncp_request(server, 87);
+	if (result)
+		goto out;
+	memcpy(target, ncp_reply_data(server, 0), sizeof(*target));
+
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+/* Search for everything */
+int ncp_search_for_file_or_subdir(struct ncp_server *server,
+				  struct nw_search_sequence *seq,
+				  struct nw_info_struct *target)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 3);	/* subfunction */
+	ncp_add_byte(server, server->name_space[seq->volNumber]);
+	ncp_add_byte(server, 0);	/* data stream (???) */
+	ncp_add_word(server, cpu_to_le16(0x8006));	/* Search attribs */
+	ncp_add_dword(server, RIM_ALL);		/* return info mask */
+	ncp_add_mem(server, seq, 9);
+#ifdef CONFIG_NCPFS_NFS_NS
+	if (server->name_space[seq->volNumber] == NW_NS_NFS) {
+		ncp_add_byte(server, 0);	/* 0 byte pattern */
+	} else 
+#endif
+	{
+		ncp_add_byte(server, 2);	/* 2 byte pattern */
+		ncp_add_byte(server, 0xff);	/* following is a wildcard */
+		ncp_add_byte(server, '*');
+	}
+	
+	if ((result = ncp_request(server, 87)) != 0)
+		goto out;
+	memcpy(seq, ncp_reply_data(server, 0), sizeof(*seq));
+	ncp_extract_file_info(ncp_reply_data(server, 10), target);
+
+	ncp_unlock_server(server);
+	
+	result = ncp_obtain_nfs_info(server, target);
+	return result;
+
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+int ncp_search_for_fileset(struct ncp_server *server,
+			   struct nw_search_sequence *seq,
+			   int* more,
+			   int* cnt,
+			   char* buffer,
+			   size_t bufsize,
+			   char** rbuf,
+			   size_t* rsize)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 20);
+	ncp_add_byte(server, server->name_space[seq->volNumber]);
+	ncp_add_byte(server, 0);		/* datastream */
+	ncp_add_word(server, cpu_to_le16(0x8006));
+	ncp_add_dword(server, RIM_ALL);
+	ncp_add_word(server, cpu_to_le16(32767));	/* max returned items */
+	ncp_add_mem(server, seq, 9);
+#ifdef CONFIG_NCPFS_NFS_NS
+	if (server->name_space[seq->volNumber] == NW_NS_NFS) {
+		ncp_add_byte(server, 0);	/* 0 byte pattern */
+	} else 
+#endif
+	{
+		ncp_add_byte(server, 2);	/* 2 byte pattern */
+		ncp_add_byte(server, 0xff);	/* following is a wildcard */
+		ncp_add_byte(server, '*');
+	}
+	result = ncp_request2(server, 87, buffer, bufsize);
+	if (result) {
+		ncp_unlock_server(server);
+		return result;
+	}
+	if (server->ncp_reply_size < 12) {
+		ncp_unlock_server(server);
+		return 0xFF;
+	}
+	*rsize = server->ncp_reply_size - 12;
+	ncp_unlock_server(server);
+	buffer = buffer + sizeof(struct ncp_reply_header);
+	*rbuf = buffer + 12;
+	*cnt = WVAL_LH(buffer + 10);
+	*more = BVAL(buffer + 9);
+	memcpy(seq, buffer, 9);
+	return 0;
+}
+
+static int
+ncp_RenameNSEntry(struct ncp_server *server,
+		  struct inode *old_dir, char *old_name, __le16 old_type,
+		  struct inode *new_dir, char *new_name)
+{
+	int result = -EINVAL;
+
+	if ((old_dir == NULL) || (old_name == NULL) ||
+	    (new_dir == NULL) || (new_name == NULL))
+		goto out;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 4);	/* subfunction */
+	ncp_add_byte(server, server->name_space[NCP_FINFO(old_dir)->volNumber]);
+	ncp_add_byte(server, 1);	/* rename flag */
+	ncp_add_word(server, old_type);	/* search attributes */
+
+	/* source Handle Path */
+	ncp_add_byte(server, NCP_FINFO(old_dir)->volNumber);
+	ncp_add_dword(server, NCP_FINFO(old_dir)->dirEntNum);
+	ncp_add_byte(server, 1);
+	ncp_add_byte(server, 1);	/* 1 source component */
+
+	/* dest Handle Path */
+	ncp_add_byte(server, NCP_FINFO(new_dir)->volNumber);
+	ncp_add_dword(server, NCP_FINFO(new_dir)->dirEntNum);
+	ncp_add_byte(server, 1);
+	ncp_add_byte(server, 1);	/* 1 destination component */
+
+	/* source path string */
+	ncp_add_pstring(server, old_name);
+	/* dest path string */
+	ncp_add_pstring(server, new_name);
+
+	result = ncp_request(server, 87);
+	ncp_unlock_server(server);
+out:
+	return result;
+}
+
+int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
+				struct inode *old_dir, char *old_name,
+				struct inode *new_dir, char *new_name)
+{
+        int result;
+        __le16 old_type = cpu_to_le16(0x06);
+
+/* If somebody can do it atomic, call me... vandrove@vc.cvut.cz */
+	result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
+	                                   new_dir, new_name);
+        if (result == 0xFF)	/* File Not Found, try directory */
+	{
+		old_type = cpu_to_le16(0x16);
+		result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
+						   new_dir, new_name);
+	}
+	if (result != 0x92) return result;	/* All except NO_FILES_RENAMED */
+	result = ncp_del_file_or_subdir(server, new_dir, new_name);
+	if (result != 0) return -EACCES;
+	result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
+					   new_dir, new_name);
+	return result;
+}
+	
+
+/* We have to transfer to/from user space */
+int
+ncp_read_kernel(struct ncp_server *server, const char *file_id,
+	     __u32 offset, __u16 to_read, char *target, int *bytes_read)
+{
+	char *source;
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 0);
+	ncp_add_mem(server, file_id, 6);
+	ncp_add_be32(server, offset);
+	ncp_add_be16(server, to_read);
+
+	if ((result = ncp_request(server, 72)) != 0) {
+		goto out;
+	}
+	*bytes_read = ncp_reply_be16(server, 0);
+	source = ncp_reply_data(server, 2 + (offset & 1));
+
+	memcpy(target, source, *bytes_read);
+out:
+	ncp_unlock_server(server);
+	return result;
+}
+
+/* There is a problem... egrep and some other silly tools do:
+	x = mmap(NULL, MAP_PRIVATE, PROT_READ|PROT_WRITE, <ncpfs fd>, 32768);
+	read(<ncpfs fd>, x, 32768);
+   Now copying read result by copy_to_user causes pagefault. This pagefault
+   could not be handled because of server was locked due to read. So we have
+   to use temporary buffer. So ncp_unlock_server must be done before
+   copy_to_user (and for write, copy_from_user must be done before 
+   ncp_init_request... same applies for send raw packet ioctl). Because of
+   file is normally read in bigger chunks, caller provides kmalloced 
+   (vmalloced) chunk of memory with size >= to_read...
+ */
+int
+ncp_read_bounce(struct ncp_server *server, const char *file_id,
+	 __u32 offset, __u16 to_read, char __user *target, int *bytes_read,
+	 void* bounce, __u32 bufsize)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 0);
+	ncp_add_mem(server, file_id, 6);
+	ncp_add_be32(server, offset);
+	ncp_add_be16(server, to_read);
+	result = ncp_request2(server, 72, bounce, bufsize);
+	ncp_unlock_server(server);
+	if (!result) {
+		int len = be16_to_cpu(get_unaligned((__be16*)((char*)bounce + 
+			  sizeof(struct ncp_reply_header))));
+		result = -EIO;
+		if (len <= to_read) {
+			char* source;
+
+			source = (char*)bounce + 
+			         sizeof(struct ncp_reply_header) + 2 + 
+			         (offset & 1);
+			*bytes_read = len;
+			result = 0;
+			if (copy_to_user(target, source, len))
+				result = -EFAULT;
+		}
+	}
+	return result;
+}
+
+int
+ncp_write_kernel(struct ncp_server *server, const char *file_id,
+		 __u32 offset, __u16 to_write,
+		 const char *source, int *bytes_written)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 0);
+	ncp_add_mem(server, file_id, 6);
+	ncp_add_be32(server, offset);
+	ncp_add_be16(server, to_write);
+	ncp_add_mem(server, source, to_write);
+	
+	if ((result = ncp_request(server, 73)) == 0)
+		*bytes_written = to_write;
+	ncp_unlock_server(server);
+	return result;
+}
+
+#ifdef CONFIG_NCPFS_IOCTL_LOCKING
+int
+ncp_LogPhysicalRecord(struct ncp_server *server, const char *file_id,
+	  __u8 locktype, __u32 offset, __u32 length, __u16 timeout)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, locktype);
+	ncp_add_mem(server, file_id, 6);
+	ncp_add_be32(server, offset);
+	ncp_add_be32(server, length);
+	ncp_add_be16(server, timeout);
+
+	if ((result = ncp_request(server, 0x1A)) != 0)
+	{
+		ncp_unlock_server(server);
+		return result;
+	}
+	ncp_unlock_server(server);
+	return 0;
+}
+
+int
+ncp_ClearPhysicalRecord(struct ncp_server *server, const char *file_id,
+	  __u32 offset, __u32 length)
+{
+	int result;
+
+	ncp_init_request(server);
+	ncp_add_byte(server, 0);	/* who knows... lanalyzer says that */
+	ncp_add_mem(server, file_id, 6);
+	ncp_add_be32(server, offset);
+	ncp_add_be32(server, length);
+
+	if ((result = ncp_request(server, 0x1E)) != 0)
+	{
+		ncp_unlock_server(server);
+		return result;
+	}
+	ncp_unlock_server(server);
+	return 0;
+}
+#endif	/* CONFIG_NCPFS_IOCTL_LOCKING */
+
+#ifdef CONFIG_NCPFS_NLS
+/* This are the NLS conversion routines with inspirations and code parts
+ * from the vfat file system and hints from Petr Vandrovec.
+ */
+
+int
+ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen,
+		const unsigned char *iname, unsigned int ilen, int cc)
+{
+	struct nls_table *in = server->nls_io;
+	struct nls_table *out = server->nls_vol;
+	unsigned char *vname_start;
+	unsigned char *vname_end;
+	const unsigned char *iname_end;
+
+	iname_end = iname + ilen;
+	vname_start = vname;
+	vname_end = vname + *vlen - 1;
+
+	while (iname < iname_end) {
+		int chl;
+		wchar_t ec;
+
+		if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
+			int k;
+
+			k = utf8_mbtowc(&ec, iname, iname_end - iname);
+			if (k < 0)
+				return -EINVAL;
+			iname += k;
+		} else {
+			if (*iname == NCP_ESC) {
+				int k;
+
+				if (iname_end - iname < 5)
+					goto nospec;
+
+				ec = 0;
+				for (k = 1; k < 5; k++) {
+					unsigned char nc;
+
+					nc = iname[k] - '0';
+					if (nc >= 10) {
+						nc -= 'A' - '0' - 10;
+						if ((nc < 10) || (nc > 15)) {
+							goto nospec;
+						}
+					}
+					ec = (ec << 4) | nc;
+				}
+				iname += 5;
+			} else {
+nospec:;			
+				if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0)
+					return chl;
+				iname += chl;
+			}
+		}
+
+		/* unitoupper should be here! */
+
+		chl = out->uni2char(ec, vname, vname_end - vname);
+		if (chl < 0)
+			return chl;
+
+		/* this is wrong... */
+		if (cc) {
+			int chi;
+
+			for (chi = 0; chi < chl; chi++){
+				vname[chi] = ncp_toupper(out, vname[chi]);
+			}
+		}
+		vname += chl;
+	}
+
+	*vname = 0;
+	*vlen = vname - vname_start;
+	return 0;
+}
+
+int
+ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen,
+		const unsigned char *vname, unsigned int vlen, int cc)
+{
+	struct nls_table *in = server->nls_vol;
+	struct nls_table *out = server->nls_io;
+	const unsigned char *vname_end;
+	unsigned char *iname_start;
+	unsigned char *iname_end;
+	unsigned char *vname_cc;
+	int err;
+
+	vname_cc = NULL;
+
+	if (cc) {
+		int i;
+
+		/* this is wrong! */
+		vname_cc = kmalloc(vlen, GFP_KERNEL);
+		if (!vname_cc)
+			return -ENOMEM;
+		for (i = 0; i < vlen; i++)
+			vname_cc[i] = ncp_tolower(in, vname[i]);
+		vname = vname_cc;
+	}
+
+	iname_start = iname;
+	iname_end = iname + *ilen - 1;
+	vname_end = vname + vlen;
+
+	while (vname < vname_end) {
+		wchar_t ec;
+		int chl;
+
+		if ( (chl = in->char2uni(vname, vname_end - vname, &ec)) < 0) {
+			err = chl;
+			goto quit;
+		}
+		vname += chl;
+
+		/* unitolower should be here! */
+
+		if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
+			int k;
+
+			k = utf8_wctomb(iname, ec, iname_end - iname);
+			if (k < 0) {
+				err = -ENAMETOOLONG;
+				goto quit;
+			}
+			iname += k;
+		} else {
+			if ( (chl = out->uni2char(ec, iname, iname_end - iname)) >= 0) {
+				iname += chl;
+			} else {
+				int k;
+
+				if (iname_end - iname < 5) {
+					err = -ENAMETOOLONG;
+					goto quit;
+				}
+				*iname = NCP_ESC;
+				for (k = 4; k > 0; k--) {
+					unsigned char v;
+					
+					v = (ec & 0xF) + '0';
+					if (v > '9') {
+						v += 'A' - '9' - 1;
+					}
+					iname[k] = v;
+					ec >>= 4;
+				}
+				iname += 5;
+			}
+		}
+	}
+
+	*iname = 0;
+	*ilen = iname - iname_start;
+	err = 0;
+quit:;
+	if (cc)
+		kfree(vname_cc);
+	return err;
+}
+
+#else
+
+int
+ncp__io2vol(unsigned char *vname, unsigned int *vlen,
+		const unsigned char *iname, unsigned int ilen, int cc)
+{
+	int i;
+
+	if (*vlen <= ilen)
+		return -ENAMETOOLONG;
+
+	if (cc)
+		for (i = 0; i < ilen; i++) {
+			*vname = toupper(*iname);
+			vname++;
+			iname++;
+		}
+	else {
+		memmove(vname, iname, ilen);
+		vname += ilen;
+	}
+
+	*vlen = ilen;
+	*vname = 0;
+	return 0;
+}
+
+int
+ncp__vol2io(unsigned char *iname, unsigned int *ilen,
+		const unsigned char *vname, unsigned int vlen, int cc)
+{
+	int i;
+
+	if (*ilen <= vlen)
+		return -ENAMETOOLONG;
+
+	if (cc)
+		for (i = 0; i < vlen; i++) {
+			*iname = tolower(*vname);
+			iname++;
+			vname++;
+		}
+	else {
+		memmove(iname, vname, vlen);
+		iname += vlen;
+	}
+
+	*ilen = vlen;
+	*iname = 0;
+	return 0;
+}
+
+#endif
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
new file mode 100644
index 0000000..05ec2e9
--- /dev/null
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -0,0 +1,259 @@
+/*
+ *  ncplib_kernel.h
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Modified for big endian by J.F. Chadima and David S. Miller
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *  Modified 1998, 1999 Wolfram Pienkoss for NLS
+ *  Modified 1999 Wolfram Pienkoss for directory caching
+ *
+ */
+
+#ifndef _NCPLIB_H
+#define _NCPLIB_H
+
+#include <linux/config.h>
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/pagemap.h>
+
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <asm/string.h>
+
+#ifdef CONFIG_NCPFS_NLS
+#include <linux/nls.h>
+#else
+#include <linux/ctype.h>
+#endif /* CONFIG_NCPFS_NLS */
+
+#include <linux/ncp_fs.h>
+
+#define NCP_MIN_SYMLINK_SIZE	8
+#define NCP_MAX_SYMLINK_SIZE	512
+
+#define NCP_BLOCK_SHIFT		9
+#define NCP_BLOCK_SIZE		(1 << (NCP_BLOCK_SHIFT))
+
+int ncp_negotiate_buffersize(struct ncp_server *, int, int *);
+int ncp_negotiate_size_and_options(struct ncp_server *server, int size,
+  			  int options, int *ret_size, int *ret_options);
+
+int ncp_get_volume_info_with_number(struct ncp_server* server, int n,
+				    struct ncp_volume_info *target);
+
+int ncp_get_directory_info(struct ncp_server* server, __u8 dirhandle,
+			   struct ncp_volume_info* target);
+
+int ncp_close_file(struct ncp_server *, const char *);
+static inline int ncp_read_bounce_size(__u32 size) {
+	return sizeof(struct ncp_reply_header) + 2 + 2 + size + 8;
+};
+int ncp_read_bounce(struct ncp_server *, const char *, __u32, __u16, 
+		char __user *, int *, void* bounce, __u32 bouncelen);
+int ncp_read_kernel(struct ncp_server *, const char *, __u32, __u16, 
+		char *, int *);
+int ncp_write_kernel(struct ncp_server *, const char *, __u32, __u16,
+		const char *, int *);
+
+static inline void ncp_inode_close(struct inode *inode) {
+	atomic_dec(&NCP_FINFO(inode)->opened);
+}
+
+void ncp_extract_file_info(void* src, struct nw_info_struct* target);
+int ncp_obtain_info(struct ncp_server *server, struct inode *, char *,
+		struct nw_info_struct *target);
+int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target);
+int ncp_get_volume_root(struct ncp_server *server, const char *volname,
+			__u32 *volume, __le32 *dirent, __le32 *dosdirent);
+int ncp_lookup_volume(struct ncp_server *, const char *, struct nw_info_struct *);
+int ncp_modify_file_or_subdir_dos_info(struct ncp_server *, struct inode *,
+	 __le32, const struct nw_modify_dos_info *info);
+int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *, struct inode *,
+	 const char* path, __le32, const struct nw_modify_dos_info *info);
+int ncp_modify_nfs_info(struct ncp_server *, __u8 volnum, __le32 dirent,
+			__u32 mode, __u32 rdev);
+
+int ncp_del_file_or_subdir2(struct ncp_server *, struct dentry*);
+int ncp_del_file_or_subdir(struct ncp_server *, struct inode *, char *);
+int ncp_open_create_file_or_subdir(struct ncp_server *, struct inode *, char *,
+				int, __le32, __le16, struct ncp_entry_info *);
+
+int ncp_initialize_search(struct ncp_server *, struct inode *,
+		      struct nw_search_sequence *target);
+int ncp_search_for_file_or_subdir(struct ncp_server *server,
+			      struct nw_search_sequence *seq,
+			      struct nw_info_struct *target);
+int ncp_search_for_fileset(struct ncp_server *server,
+			   struct nw_search_sequence *seq,
+			   int* more, int* cnt,
+			   char* buffer, size_t bufsize,
+			   char** rbuf, size_t* rsize);
+
+int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
+			      struct inode *, char *, struct inode *, char *);
+
+
+int
+ncp_LogPhysicalRecord(struct ncp_server *server,
+		      const char *file_id, __u8 locktype,
+		      __u32 offset, __u32 length, __u16 timeout);
+
+#ifdef CONFIG_NCPFS_IOCTL_LOCKING
+int
+ncp_ClearPhysicalRecord(struct ncp_server *server,
+			const char *file_id,
+			__u32 offset, __u32 length);
+#endif	/* CONFIG_NCPFS_IOCTL_LOCKING */
+
+int
+ncp_mount_subdir(struct ncp_server *, __u8, __u8, __le32,
+		 __u32* volume, __le32* dirent, __le32* dosdirent);
+int ncp_dirhandle_alloc(struct ncp_server *, __u8 vol, __le32 dirent, __u8 *dirhandle);
+int ncp_dirhandle_free(struct ncp_server *, __u8 dirhandle);
+
+int ncp_create_new(struct inode *dir, struct dentry *dentry,
+                          int mode, dev_t rdev, __le32 attributes);
+
+static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int volnum) {
+#ifdef CONFIG_NCPFS_NFS_NS
+	return (server->m.flags & NCP_MOUNT_NFS_EXTRAS) &&
+	       (server->name_space[volnum] == NW_NS_NFS);
+#else
+	return 0;
+#endif
+}
+
+#ifdef CONFIG_NCPFS_NLS
+
+int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
+				const unsigned char *, unsigned int, int);
+int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
+				const unsigned char *, unsigned int, int);
+
+#define NCP_ESC			':'
+#define NCP_IO_TABLE(dentry)	(NCP_SERVER((dentry)->d_inode)->nls_io)
+#define ncp_tolower(t, c)	nls_tolower(t, c)
+#define ncp_toupper(t, c)	nls_toupper(t, c)
+#define ncp_strnicmp(t, s1, s2, len) \
+	nls_strnicmp(t, s1, s2, len)
+#define ncp_io2vol(S,m,i,n,k,U)	ncp__io2vol(S,m,i,n,k,U)
+#define ncp_vol2io(S,m,i,n,k,U)	ncp__vol2io(S,m,i,n,k,U)
+
+#else
+
+int ncp__io2vol(unsigned char *, unsigned int *,
+				const unsigned char *, unsigned int, int);
+int ncp__vol2io(unsigned char *, unsigned int *,
+				const unsigned char *, unsigned int, int);
+
+#define NCP_IO_TABLE(dentry)	NULL
+#define ncp_tolower(t, c)	tolower(c)
+#define ncp_toupper(t, c)	toupper(c)
+#define ncp_io2vol(S,m,i,n,k,U)	ncp__io2vol(m,i,n,k,U)
+#define ncp_vol2io(S,m,i,n,k,U)	ncp__vol2io(m,i,n,k,U)
+
+
+static inline int ncp_strnicmp(struct nls_table *t, const unsigned char *s1,
+		const unsigned char *s2, int len)
+{
+	while (len--) {
+		if (tolower(*s1++) != tolower(*s2++))
+			return 1;
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_NCPFS_NLS */
+
+#define NCP_GET_AGE(dentry)	(jiffies - (dentry)->d_time)
+#define NCP_MAX_AGE(server)	((server)->dentry_ttl)
+#define NCP_TEST_AGE(server,dentry)	(NCP_GET_AGE(dentry) < NCP_MAX_AGE(server))
+
+static inline void
+ncp_age_dentry(struct ncp_server* server, struct dentry* dentry)
+{
+	dentry->d_time = jiffies - server->dentry_ttl;
+}
+
+static inline void
+ncp_new_dentry(struct dentry* dentry)
+{
+	dentry->d_time = jiffies;
+}
+
+static inline void
+ncp_renew_dentries(struct dentry *parent)
+{
+	struct ncp_server *server = NCP_SERVER(parent->d_inode);
+	struct list_head *next;
+	struct dentry *dentry;
+
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dentry = list_entry(next, struct dentry, d_child);
+
+		if (dentry->d_fsdata == NULL)
+			ncp_age_dentry(server, dentry);
+		else
+			ncp_new_dentry(dentry);
+
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+static inline void
+ncp_invalidate_dircache_entries(struct dentry *parent)
+{
+	struct ncp_server *server = NCP_SERVER(parent->d_inode);
+	struct list_head *next;
+	struct dentry *dentry;
+
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dentry = list_entry(next, struct dentry, d_child);
+		dentry->d_fsdata = NULL;
+		ncp_age_dentry(server, dentry);
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+struct ncp_cache_head {
+	time_t		mtime;
+	unsigned long	time;	/* cache age */
+	unsigned long	end;	/* last valid fpos in cache */
+	int		eof;
+};
+
+#define NCP_DIRCACHE_SIZE	((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *)))
+union ncp_dir_cache {
+	struct ncp_cache_head	head;
+	struct dentry		*dentry[NCP_DIRCACHE_SIZE];
+};
+
+#define NCP_FIRSTCACHE_SIZE	((int)((NCP_DIRCACHE_SIZE * \
+	sizeof(struct dentry *) - sizeof(struct ncp_cache_head)) / \
+	sizeof(struct dentry *)))
+
+#define NCP_DIRCACHE_START	(NCP_DIRCACHE_SIZE - NCP_FIRSTCACHE_SIZE)
+
+struct ncp_cache_control {
+	struct	ncp_cache_head		head;
+	struct	page			*page;
+	union	ncp_dir_cache		*cache;
+	unsigned long			fpos, ofs;
+	int				filled, valid, idx;
+};
+
+#endif /* _NCPLIB_H */
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
new file mode 100644
index 0000000..a6ec90c
--- /dev/null
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -0,0 +1,127 @@
+/*
+ *  ncpsign_kernel.c
+ *
+ *  Arne de Bruijn (arne@knoware.nl), 1997
+ *
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+
+#include <linux/string.h>
+#include <linux/ncp.h>
+#include <linux/bitops.h>
+#include "ncpsign_kernel.h"
+
+/* i386: 32-bit, little endian, handles mis-alignment */
+#ifdef __i386__
+#define GET_LE32(p) (*(int *)(p))
+#define PUT_LE32(p,v) { *(int *)(p)=v; }
+#else
+/* from include/ncplib.h */
+#define BVAL(buf,pos) (((__u8 *)(buf))[pos])
+#define PVAL(buf,pos) ((unsigned)BVAL(buf,pos))
+#define BSET(buf,pos,val) (BVAL(buf,pos) = (val))
+
+static inline __u16
+WVAL_LH(__u8 * buf, int pos)
+{
+	return PVAL(buf, pos) | PVAL(buf, pos + 1) << 8;
+}
+static inline __u32
+DVAL_LH(__u8 * buf, int pos)
+{
+	return WVAL_LH(buf, pos) | WVAL_LH(buf, pos + 2) << 16;
+}
+static inline void
+WSET_LH(__u8 * buf, int pos, __u16 val)
+{
+	BSET(buf, pos, val & 0xff);
+	BSET(buf, pos + 1, val >> 8);
+}
+static inline void
+DSET_LH(__u8 * buf, int pos, __u32 val)
+{
+	WSET_LH(buf, pos, val & 0xffff);
+	WSET_LH(buf, pos + 2, val >> 16);
+}
+
+#define GET_LE32(p) DVAL_LH(p,0)
+#define PUT_LE32(p,v) DSET_LH(p,0,v)
+#endif
+
+static void nwsign(char *r_data1, char *r_data2, char *outdata) {
+ int i;
+ unsigned int w0,w1,w2,w3;
+ static int rbit[4]={0, 2, 1, 3};
+#ifdef __i386__
+ unsigned int *data2=(int *)r_data2;
+#else
+ unsigned int data2[16];
+ for (i=0;i<16;i++)
+  data2[i]=GET_LE32(r_data2+(i<<2));
+#endif 
+ w0=GET_LE32(r_data1);
+ w1=GET_LE32(r_data1+4);
+ w2=GET_LE32(r_data1+8);
+ w3=GET_LE32(r_data1+12);
+ for (i=0;i<16;i+=4) {
+  w0=rol32(w0 + ((w1 & w2) | ((~w1) & w3)) + data2[i+0],3);
+  w3=rol32(w3 + ((w0 & w1) | ((~w0) & w2)) + data2[i+1],7);
+  w2=rol32(w2 + ((w3 & w0) | ((~w3) & w1)) + data2[i+2],11);
+  w1=rol32(w1 + ((w2 & w3) | ((~w2) & w0)) + data2[i+3],19);
+ }
+ for (i=0;i<4;i++) {
+  w0=rol32(w0 + (((w2 | w3) & w1) | (w2 & w3)) + 0x5a827999 + data2[i+0],3);
+  w3=rol32(w3 + (((w1 | w2) & w0) | (w1 & w2)) + 0x5a827999 + data2[i+4],5);
+  w2=rol32(w2 + (((w0 | w1) & w3) | (w0 & w1)) + 0x5a827999 + data2[i+8],9);
+  w1=rol32(w1 + (((w3 | w0) & w2) | (w3 & w0)) + 0x5a827999 + data2[i+12],13);
+ }
+ for (i=0;i<4;i++) {
+  w0=rol32(w0 + ((w1 ^ w2) ^ w3) + 0x6ed9eba1 + data2[rbit[i]+0],3);
+  w3=rol32(w3 + ((w0 ^ w1) ^ w2) + 0x6ed9eba1 + data2[rbit[i]+8],9);
+  w2=rol32(w2 + ((w3 ^ w0) ^ w1) + 0x6ed9eba1 + data2[rbit[i]+4],11);
+  w1=rol32(w1 + ((w2 ^ w3) ^ w0) + 0x6ed9eba1 + data2[rbit[i]+12],15);
+ }
+ PUT_LE32(outdata,(w0+GET_LE32(r_data1)) & 0xffffffff);
+ PUT_LE32(outdata+4,(w1+GET_LE32(r_data1+4)) & 0xffffffff);
+ PUT_LE32(outdata+8,(w2+GET_LE32(r_data1+8)) & 0xffffffff);
+ PUT_LE32(outdata+12,(w3+GET_LE32(r_data1+12)) & 0xffffffff);
+}
+
+/* Make a signature for the current packet and add it at the end of the */
+/* packet. */
+void __sign_packet(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, void *sign_buff) {
+	unsigned char data[64];
+
+	memcpy(data, server->sign_root, 8);
+	*(__u32*)(data + 8) = totalsize;
+	if (size < 52) {
+		memcpy(data + 12, packet, size);
+		memset(data + 12 + size, 0, 52 - size);
+	} else {
+		memcpy(data + 12, packet, 52);
+	}
+	nwsign(server->sign_last, data, server->sign_last);
+	memcpy(sign_buff, server->sign_last, 8);
+}
+
+int sign_verify_reply(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, const void *sign_buff) {
+	unsigned char data[64];
+	unsigned char hash[16];
+
+	memcpy(data, server->sign_root, 8);
+	*(__u32*)(data + 8) = totalsize;
+	if (size < 52) {
+		memcpy(data + 12, packet, size);
+		memset(data + 12 + size, 0, 52 - size);
+	} else {
+		memcpy(data + 12, packet, 52);
+	}
+	nwsign(server->sign_last, data, hash);
+	return memcmp(sign_buff, hash, 8);
+}
+
+#endif	/* CONFIG_NCPFS_PACKET_SIGNING */
+
diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h
new file mode 100644
index 0000000..6451a68
--- /dev/null
+++ b/fs/ncpfs/ncpsign_kernel.h
@@ -0,0 +1,28 @@
+/*
+ *  ncpsign_kernel.h
+ *
+ *  Arne de Bruijn (arne@knoware.nl), 1997
+ *
+ */
+ 
+#ifndef _NCPSIGN_KERNEL_H
+#define _NCPSIGN_KERNEL_H
+
+#include <linux/ncp_fs.h>
+
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff);
+int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff);
+#endif
+
+static inline size_t sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff) {
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+	if (server->sign_active) {
+		__sign_packet(server, data, size, totalsize, sign_buff);
+		return 8;
+	}
+#endif
+	return 0;
+}
+
+#endif
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
new file mode 100644
index 0000000..6593a5c
--- /dev/null
+++ b/fs/ncpfs/sock.c
@@ -0,0 +1,850 @@
+/*
+ *  linux/fs/ncpfs/sock.c
+ *
+ *  Copyright (C) 1992, 1993  Rick Sladkey
+ *
+ *  Modified 1995, 1996 by Volker Lendecke to be usable for ncp
+ *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <asm/uaccess.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <net/scm.h>
+#include <net/sock.h>
+#include <linux/ipx.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+
+#include <linux/ncp_fs.h>
+
+#include "ncpsign_kernel.h"
+
+static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
+{
+	struct msghdr msg = {NULL, };
+	struct kvec iov = {buf, size};
+	return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+static inline int do_send(struct socket *sock, struct kvec *vec, int count,
+			  int len, unsigned flags)
+{
+	struct msghdr msg = { .msg_flags = flags };
+	return kernel_sendmsg(sock, &msg, vec, count, len);
+}
+
+static int _send(struct socket *sock, const void *buff, int len)
+{
+	struct kvec vec;
+	vec.iov_base = (void *) buff;
+	vec.iov_len = len;
+	return do_send(sock, &vec, 1, len, 0);
+}
+
+struct ncp_request_reply {
+	struct list_head req;
+	wait_queue_head_t wq;
+	struct ncp_reply_header* reply_buf;
+	size_t datalen;
+	int result;
+	enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
+	struct kvec* tx_ciov;
+	size_t tx_totallen;
+	size_t tx_iovlen;
+	struct kvec tx_iov[3];
+	u_int16_t tx_type;
+	u_int32_t sign[6];
+};
+
+void ncp_tcp_data_ready(struct sock *sk, int len)
+{
+	struct ncp_server *server = sk->sk_user_data;
+
+	server->data_ready(sk, len);
+	schedule_work(&server->rcv.tq);
+}
+
+void ncp_tcp_error_report(struct sock *sk)
+{
+	struct ncp_server *server = sk->sk_user_data;
+	
+	server->error_report(sk);
+	schedule_work(&server->rcv.tq);
+}
+
+void ncp_tcp_write_space(struct sock *sk)
+{
+	struct ncp_server *server = sk->sk_user_data;
+	
+	/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
+	   not vice versa... */
+	server->write_space(sk);
+	if (server->tx.creq)
+		schedule_work(&server->tx.tq);
+}
+
+void ncpdgram_timeout_call(unsigned long v)
+{
+	struct ncp_server *server = (void*)v;
+	
+	schedule_work(&server->timeout_tq);
+}
+
+static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
+{
+	req->result = result;
+	req->status = RQ_DONE;
+	wake_up_all(&req->wq);
+}
+
+static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
+{
+	struct ncp_request_reply *req;
+
+	ncp_invalidate_conn(server);
+	del_timer(&server->timeout_tm);
+	while (!list_empty(&server->tx.requests)) {
+		req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
+		
+		list_del_init(&req->req);
+		if (req == aborted) {
+			ncp_finish_request(req, err);
+		} else {
+			ncp_finish_request(req, -EIO);
+		}
+	}
+	req = server->rcv.creq;
+	if (req) {
+		server->rcv.creq = NULL;
+		if (req == aborted) {
+			ncp_finish_request(req, err);
+		} else {
+			ncp_finish_request(req, -EIO);
+		}
+		server->rcv.ptr = NULL;
+		server->rcv.state = 0;
+	}
+	req = server->tx.creq;
+	if (req) {
+		server->tx.creq = NULL;
+		if (req == aborted) {
+			ncp_finish_request(req, err);
+		} else {
+			ncp_finish_request(req, -EIO);
+		}
+	}
+}
+
+static inline int get_conn_number(struct ncp_reply_header *rp)
+{
+	return rp->conn_low | (rp->conn_high << 8);
+}
+
+static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+{
+	/* If req is done, we got signal, but we also received answer... */
+	switch (req->status) {
+		case RQ_IDLE:
+		case RQ_DONE:
+			break;
+		case RQ_QUEUED:
+			list_del_init(&req->req);
+			ncp_finish_request(req, err);
+			break;
+		case RQ_INPROGRESS:
+			__abort_ncp_connection(server, req, err);
+			break;
+	}
+}
+
+static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+{
+	down(&server->rcv.creq_sem);
+	__ncp_abort_request(server, req, err);
+	up(&server->rcv.creq_sem);
+}
+
+static inline void __ncptcp_abort(struct ncp_server *server)
+{
+	__abort_ncp_connection(server, NULL, 0);
+}
+
+static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
+{
+	struct kvec vec[3];
+	/* sock_sendmsg updates iov pointers for us :-( */
+	memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
+	return do_send(sock, vec, req->tx_iovlen,
+		       req->tx_totallen, MSG_DONTWAIT);
+}
+
+static void __ncptcp_try_send(struct ncp_server *server)
+{
+	struct ncp_request_reply *rq;
+	struct kvec *iov;
+	struct kvec iovc[3];
+	int result;
+
+	rq = server->tx.creq;
+	if (!rq)
+		return;
+
+	/* sock_sendmsg updates iov pointers for us :-( */
+	memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
+	result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
+			 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
+
+	if (result == -EAGAIN)
+		return;
+
+	if (result < 0) {
+		printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
+		__ncp_abort_request(server, rq, result);
+		return;
+	}
+	if (result >= rq->tx_totallen) {
+		server->rcv.creq = rq;
+		server->tx.creq = NULL;
+		return;
+	}
+	rq->tx_totallen -= result;
+	iov = rq->tx_ciov;
+	while (iov->iov_len <= result) {
+		result -= iov->iov_len;
+		iov++;
+		rq->tx_iovlen--;
+	}
+	iov->iov_base += result;
+	iov->iov_len -= result;
+	rq->tx_ciov = iov;
+}
+
+static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
+{
+	req->status = RQ_INPROGRESS;
+	h->conn_low = server->connection;
+	h->conn_high = server->connection >> 8;
+	h->sequence = ++server->sequence;
+}
+	
+static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
+	size_t signlen;
+	struct ncp_request_header* h;
+	
+	req->tx_ciov = req->tx_iov + 1;
+
+	h = req->tx_iov[1].iov_base;
+	ncp_init_header(server, req, h);
+	signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, 
+			req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
+			cpu_to_le32(req->tx_totallen), req->sign);
+	if (signlen) {
+		req->tx_ciov[1].iov_base = req->sign;
+		req->tx_ciov[1].iov_len = signlen;
+		req->tx_iovlen += 1;
+		req->tx_totallen += signlen;
+	}
+	server->rcv.creq = req;
+	server->timeout_last = server->m.time_out;
+	server->timeout_retries = server->m.retry_count;
+	ncpdgram_send(server->ncp_sock, req);
+	mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
+}
+
+#define NCP_TCP_XMIT_MAGIC	(0x446D6454)
+#define NCP_TCP_XMIT_VERSION	(1)
+#define NCP_TCP_RCVD_MAGIC	(0x744E6350)
+
+static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
+	size_t signlen;
+	struct ncp_request_header* h;
+
+	req->tx_ciov = req->tx_iov;
+	h = req->tx_iov[1].iov_base;
+	ncp_init_header(server, req, h);
+	signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
+			req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
+			cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
+
+	req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
+	req->sign[1] = htonl(req->tx_totallen + signlen);
+	req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
+	req->sign[3] = htonl(req->datalen + 8);
+	req->tx_iov[0].iov_base = req->sign;
+	req->tx_iov[0].iov_len = signlen;
+	req->tx_iovlen += 1;
+	req->tx_totallen += signlen;
+
+	server->tx.creq = req;
+	__ncptcp_try_send(server);
+}
+
+static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
+	if (server->ncp_sock->type == SOCK_STREAM)
+		ncptcp_start_request(server, req);
+	else
+		ncpdgram_start_request(server, req);
+}
+
+static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
+	down(&server->rcv.creq_sem);
+	if (!ncp_conn_valid(server)) {
+		up(&server->rcv.creq_sem);
+		printk(KERN_ERR "ncpfs: tcp: Server died\n");
+		return -EIO;
+	}
+	if (server->tx.creq || server->rcv.creq) {
+		req->status = RQ_QUEUED;
+		list_add_tail(&req->req, &server->tx.requests);
+		up(&server->rcv.creq_sem);
+		return 0;
+	}
+	__ncp_start_request(server, req);
+	up(&server->rcv.creq_sem);
+	return 0;
+}
+
+static void __ncp_next_request(struct ncp_server *server)
+{
+	struct ncp_request_reply *req;
+
+	server->rcv.creq = NULL;
+	if (list_empty(&server->tx.requests)) {
+		return;
+	}
+	req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
+	list_del_init(&req->req);
+	__ncp_start_request(server, req);
+}
+
+static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
+{
+	if (server->info_sock) {
+		struct kvec iov[2];
+		__be32 hdr[2];
+	
+		hdr[0] = cpu_to_be32(len + 8);
+		hdr[1] = cpu_to_be32(id);
+	
+		iov[0].iov_base = hdr;
+		iov[0].iov_len = 8;
+		iov[1].iov_base = (void *) data;
+		iov[1].iov_len = len;
+
+		do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
+	}
+}
+
+void ncpdgram_rcv_proc(void *s)
+{
+	struct ncp_server *server = s;
+	struct socket* sock;
+	
+	sock = server->ncp_sock;
+	
+	while (1) {
+		struct ncp_reply_header reply;
+		int result;
+
+		result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
+		if (result < 0) {
+			break;
+		}
+		if (result >= sizeof(reply)) {
+			struct ncp_request_reply *req;
+	
+			if (reply.type == NCP_WATCHDOG) {
+				unsigned char buf[10];
+
+				if (server->connection != get_conn_number(&reply)) {
+					goto drop;
+				}
+				result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
+				if (result < 0) {
+					DPRINTK("recv failed with %d\n", result);
+					continue;
+				}
+				if (result < 10) {
+					DPRINTK("too short (%u) watchdog packet\n", result);
+					continue;
+				}
+				if (buf[9] != '?') {
+					DPRINTK("bad signature (%02X) in watchdog packet\n", buf[9]);
+					continue;
+				}
+				buf[9] = 'Y';
+				_send(sock, buf, sizeof(buf));
+				continue;
+			}
+			if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
+				result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
+				if (result < 0) {
+					continue;
+				}
+				info_server(server, 0, server->unexpected_packet.data, result);
+				continue;
+			}
+			down(&server->rcv.creq_sem);		
+			req = server->rcv.creq;
+			if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && 
+					server->connection == get_conn_number(&reply)))) {
+				if (reply.type == NCP_POSITIVE_ACK) {
+					server->timeout_retries = server->m.retry_count;
+					server->timeout_last = NCP_MAX_RPC_TIMEOUT;
+					mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
+				} else if (reply.type == NCP_REPLY) {
+					result = _recv(sock, (void*)req->reply_buf, req->datalen, MSG_DONTWAIT);
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
+					if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
+						if (result < 8 + 8) {
+							result = -EIO;
+						} else {
+							unsigned int hdrl;
+							
+							result -= 8;
+							hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
+							if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) {
+								printk(KERN_INFO "ncpfs: Signature violation\n");
+								result = -EIO;
+							}
+						}
+					}
+#endif
+					del_timer(&server->timeout_tm);
+				     	server->rcv.creq = NULL;
+					ncp_finish_request(req, result);
+					__ncp_next_request(server);
+					up(&server->rcv.creq_sem);
+					continue;
+				}
+			}
+			up(&server->rcv.creq_sem);
+		}
+drop:;		
+		_recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
+	}
+}
+
+static void __ncpdgram_timeout_proc(struct ncp_server *server)
+{
+	/* If timer is pending, we are processing another request... */
+	if (!timer_pending(&server->timeout_tm)) {
+		struct ncp_request_reply* req;
+		
+		req = server->rcv.creq;
+		if (req) {
+			int timeout;
+			
+			if (server->m.flags & NCP_MOUNT_SOFT) {
+				if (server->timeout_retries-- == 0) {
+					__ncp_abort_request(server, req, -ETIMEDOUT);
+					return;
+				}
+			}
+			/* Ignore errors */
+			ncpdgram_send(server->ncp_sock, req);
+			timeout = server->timeout_last << 1;
+			if (timeout > NCP_MAX_RPC_TIMEOUT) {
+				timeout = NCP_MAX_RPC_TIMEOUT;
+			}
+			server->timeout_last = timeout;
+			mod_timer(&server->timeout_tm, jiffies + timeout);
+		}
+	}
+}
+
+void ncpdgram_timeout_proc(void *s)
+{
+	struct ncp_server *server = s;
+	down(&server->rcv.creq_sem);
+	__ncpdgram_timeout_proc(server);
+	up(&server->rcv.creq_sem);
+}
+
+static inline void ncp_init_req(struct ncp_request_reply* req)
+{
+	init_waitqueue_head(&req->wq);
+	req->status = RQ_IDLE;
+}
+
+static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
+{
+	int result;
+	
+	if (buffer) {
+		result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
+	} else {
+		static unsigned char dummy[1024];
+			
+		if (len > sizeof(dummy)) {
+			len = sizeof(dummy);
+		}
+		result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
+	}
+	if (result < 0) {
+		return result;
+	}
+	if (result > len) {
+		printk(KERN_ERR "ncpfs: tcp: bug in recvmsg (%u > %Zu)\n", result, len);
+		return -EIO;			
+	}
+	return result;
+}	
+
+static int __ncptcp_rcv_proc(struct ncp_server *server)
+{
+	/* We have to check the result, so store the complete header */
+	while (1) {
+		int result;
+		struct ncp_request_reply *req;
+		int datalen;
+		int type;
+
+		while (server->rcv.len) {
+			result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
+			if (result == -EAGAIN) {
+				return 0;
+			}
+			if (result <= 0) {
+				req = server->rcv.creq;
+				if (req) {
+					__ncp_abort_request(server, req, -EIO);
+				} else {
+					__ncptcp_abort(server);
+				}
+				if (result < 0) {
+					printk(KERN_ERR "ncpfs: tcp: error in recvmsg: %d\n", result);
+				} else {
+					DPRINTK(KERN_ERR "ncpfs: tcp: EOF\n");
+				}
+				return -EIO;
+			}
+			if (server->rcv.ptr) {
+				server->rcv.ptr += result;
+			}
+			server->rcv.len -= result;
+		}
+		switch (server->rcv.state) {
+			case 0:
+				if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
+					printk(KERN_ERR "ncpfs: tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
+					__ncptcp_abort(server);
+					return -EIO;
+				}
+				datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
+				if (datalen < 10) {
+					printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
+					__ncptcp_abort(server);
+					return -EIO;
+				}
+#ifdef CONFIG_NCPFS_PACKET_SIGNING				
+				if (server->sign_active) {
+					if (datalen < 18) {
+						printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
+						__ncptcp_abort(server);
+						return -EIO;
+					}
+					server->rcv.buf.len = datalen - 8;
+					server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
+					server->rcv.len = 8;
+					server->rcv.state = 4;
+					break;
+				}
+#endif				
+				type = ntohs(server->rcv.buf.type);
+#ifdef CONFIG_NCPFS_PACKET_SIGNING				
+cont:;				
+#endif
+				if (type != NCP_REPLY) {
+					if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
+						*(__u16*)(server->unexpected_packet.data) = htons(type);
+						server->unexpected_packet.len = datalen - 8;
+
+						server->rcv.state = 5;
+						server->rcv.ptr = server->unexpected_packet.data + 2;
+						server->rcv.len = datalen - 10;
+						break;
+					}					
+					DPRINTK("ncpfs: tcp: Unexpected NCP type %02X\n", type);
+skipdata2:;
+					server->rcv.state = 2;
+skipdata:;
+					server->rcv.ptr = NULL;
+					server->rcv.len = datalen - 10;
+					break;
+				}
+				req = server->rcv.creq;
+				if (!req) {
+					DPRINTK(KERN_ERR "ncpfs: Reply without appropriate request\n");
+					goto skipdata2;
+				}
+				if (datalen > req->datalen + 8) {
+					printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8);
+					server->rcv.state = 3;
+					goto skipdata;
+				}
+				req->datalen = datalen - 8;
+				req->reply_buf->type = NCP_REPLY;
+				server->rcv.ptr = (unsigned char*)(req->reply_buf) + 2;
+				server->rcv.len = datalen - 10;
+				server->rcv.state = 1;
+				break;
+#ifdef CONFIG_NCPFS_PACKET_SIGNING				
+			case 4:
+				datalen = server->rcv.buf.len;
+				type = ntohs(server->rcv.buf.type2);
+				goto cont;
+#endif
+			case 1:
+				req = server->rcv.creq;
+				if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
+					if (req->reply_buf->sequence != server->sequence) {
+						printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n");
+						__ncp_abort_request(server, req, -EIO);
+						return -EIO;
+					}
+					if ((req->reply_buf->conn_low | (req->reply_buf->conn_high << 8)) != server->connection) {
+						printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n");
+						__ncp_abort_request(server, req, -EIO);
+						return -EIO;
+					}
+				}
+#ifdef CONFIG_NCPFS_PACKET_SIGNING				
+				if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
+					if (sign_verify_reply(server, (unsigned char*)(req->reply_buf) + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
+						printk(KERN_ERR "ncpfs: tcp: Signature violation\n");
+						__ncp_abort_request(server, req, -EIO);
+						return -EIO;
+					}
+				}
+#endif				
+				ncp_finish_request(req, req->datalen);
+			nextreq:;
+				__ncp_next_request(server);
+			case 2:
+			next:;
+				server->rcv.ptr = (unsigned char*)&server->rcv.buf;
+				server->rcv.len = 10;
+				server->rcv.state = 0;
+				break;
+			case 3:
+				ncp_finish_request(server->rcv.creq, -EIO);
+				goto nextreq;
+			case 5:
+				info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
+				goto next;
+		}
+	}
+}
+
+void ncp_tcp_rcv_proc(void *s)
+{
+	struct ncp_server *server = s;
+
+	down(&server->rcv.creq_sem);
+	__ncptcp_rcv_proc(server);
+	up(&server->rcv.creq_sem);
+}
+
+void ncp_tcp_tx_proc(void *s)
+{
+	struct ncp_server *server = s;
+	
+	down(&server->rcv.creq_sem);
+	__ncptcp_try_send(server);
+	up(&server->rcv.creq_sem);
+}
+
+static int do_ncp_rpc_call(struct ncp_server *server, int size,
+		struct ncp_reply_header* reply_buf, int max_reply_size)
+{
+	int result;
+	struct ncp_request_reply req;
+
+	ncp_init_req(&req);
+	req.reply_buf = reply_buf;
+	req.datalen = max_reply_size;
+	req.tx_iov[1].iov_base = server->packet;
+	req.tx_iov[1].iov_len = size;
+	req.tx_iovlen = 1;
+	req.tx_totallen = size;
+	req.tx_type = *(u_int16_t*)server->packet;
+
+	result = ncp_add_request(server, &req);
+	if (result < 0) {
+		return result;
+	}
+	if (wait_event_interruptible(req.wq, req.status == RQ_DONE)) {
+		ncp_abort_request(server, &req, -EIO);
+	}
+	return req.result;
+}
+
+/*
+ * We need the server to be locked here, so check!
+ */
+
+static int ncp_do_request(struct ncp_server *server, int size,
+		void* reply, int max_reply_size)
+{
+	int result;
+
+	if (server->lock == 0) {
+		printk(KERN_ERR "ncpfs: Server not locked!\n");
+		return -EIO;
+	}
+	if (!ncp_conn_valid(server)) {
+		printk(KERN_ERR "ncpfs: Connection invalid!\n");
+		return -EIO;
+	}
+	{
+		sigset_t old_set;
+		unsigned long mask, flags;
+
+		spin_lock_irqsave(&current->sighand->siglock, flags);
+		old_set = current->blocked;
+		if (current->flags & PF_EXITING)
+			mask = 0;
+		else
+			mask = sigmask(SIGKILL);
+		if (server->m.flags & NCP_MOUNT_INTR) {
+			/* FIXME: This doesn't seem right at all.  So, like,
+			   we can't handle SIGINT and get whatever to stop?
+			   What if we've blocked it ourselves?  What about
+			   alarms?  Why, in fact, are we mucking with the
+			   sigmask at all? -- r~ */
+			if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
+				mask |= sigmask(SIGINT);
+			if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
+				mask |= sigmask(SIGQUIT);
+		}
+		siginitsetinv(&current->blocked, mask);
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+		
+		result = do_ncp_rpc_call(server, size, reply, max_reply_size);
+
+		spin_lock_irqsave(&current->sighand->siglock, flags);
+		current->blocked = old_set;
+		recalc_sigpending();
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+	}
+
+	DDPRINTK("do_ncp_rpc_call returned %d\n", result);
+
+	if (result < 0) {
+		/* There was a problem with I/O, so the connections is
+		 * no longer usable. */
+		ncp_invalidate_conn(server);
+	}
+	return result;
+}
+
+/* ncp_do_request assures that at least a complete reply header is
+ * received. It assumes that server->current_size contains the ncp
+ * request size
+ */
+int ncp_request2(struct ncp_server *server, int function, 
+		void* rpl, int size)
+{
+	struct ncp_request_header *h;
+	struct ncp_reply_header* reply = rpl;
+	int result;
+
+	h = (struct ncp_request_header *) (server->packet);
+	if (server->has_subfunction != 0) {
+		*(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
+	}
+	h->type = NCP_REQUEST;
+	/*
+	 * The server shouldn't know or care what task is making a
+	 * request, so we always use the same task number.
+	 */
+	h->task = 2; /* (current->pid) & 0xff; */
+	h->function = function;
+
+	result = ncp_do_request(server, server->current_size, reply, size);
+	if (result < 0) {
+		DPRINTK("ncp_request_error: %d\n", result);
+		goto out;
+	}
+	server->completion = reply->completion_code;
+	server->conn_status = reply->connection_state;
+	server->reply_size = result;
+	server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
+
+	result = reply->completion_code;
+
+	if (result != 0)
+		PPRINTK("ncp_request: completion code=%x\n", result);
+out:
+	return result;
+}
+
+int ncp_connect(struct ncp_server *server)
+{
+	struct ncp_request_header *h;
+	int result;
+
+	server->connection = 0xFFFF;
+	server->sequence = 255;
+
+	h = (struct ncp_request_header *) (server->packet);
+	h->type = NCP_ALLOC_SLOT_REQUEST;
+	h->task		= 2; /* see above */
+	h->function	= 0;
+
+	result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
+	if (result < 0)
+		goto out;
+	server->connection = h->conn_low + (h->conn_high * 256);
+	result = 0;
+out:
+	return result;
+}
+
+int ncp_disconnect(struct ncp_server *server)
+{
+	struct ncp_request_header *h;
+
+	h = (struct ncp_request_header *) (server->packet);
+	h->type = NCP_DEALLOC_SLOT_REQUEST;
+	h->task		= 2; /* see above */
+	h->function	= 0;
+
+	return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
+}
+
+void ncp_lock_server(struct ncp_server *server)
+{
+	down(&server->sem);
+	if (server->lock)
+		printk(KERN_WARNING "ncp_lock_server: was locked!\n");
+	server->lock = 1;
+}
+
+void ncp_unlock_server(struct ncp_server *server)
+{
+	if (!server->lock) {
+		printk(KERN_WARNING "ncp_unlock_server: was not locked!\n");
+		return;
+	}
+	server->lock = 0;
+	up(&server->sem);
+}
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
new file mode 100644
index 0000000..e935f1b
--- /dev/null
+++ b/fs/ncpfs/symlink.c
@@ -0,0 +1,183 @@
+/*
+ *  linux/fs/ncpfs/symlink.c
+ *
+ *  Code for allowing symbolic links on NCPFS (i.e. NetWare)
+ *  Symbolic links are not supported on native NetWare, so we use an
+ *  infrequently-used flag (Sh) and store a two-word magic header in
+ *  the file to make sure we don't accidentally use a non-link file
+ *  as a link.
+ *
+ *  When using the NFS namespace, we set the mode to indicate a symlink and
+ *  don't bother with the magic numbers.
+ *
+ *  from linux/fs/ext2/symlink.c
+ *
+ *  Copyright (C) 1998-99, Frank A. Vorstenbosch
+ *
+ *  ncpfs symlink handling code
+ *  NLS support (c) 1999 Petr Vandrovec
+ *  Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
+ *
+ */
+
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ncp_fs.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#include "ncplib_kernel.h"
+
+
+/* these magic numbers must appear in the symlink file -- this makes it a bit
+   more resilient against the magic attributes being set on random files. */
+
+#define NCP_SYMLINK_MAGIC0	cpu_to_le32(0x6c6d7973)     /* "symlnk->" */
+#define NCP_SYMLINK_MAGIC1	cpu_to_le32(0x3e2d6b6e)
+
+/* ----- read a symbolic link ------------------------------------------ */
+
+static int ncp_symlink_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	int error, length, len;
+	char *link, *rawlink;
+	char *buf = kmap(page);
+
+	error = -ENOMEM;
+	rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
+	if (!rawlink)
+		goto fail;
+
+	if (ncp_make_open(inode,O_RDONLY))
+		goto failEIO;
+
+	error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle,
+                         0,NCP_MAX_SYMLINK_SIZE,rawlink,&length);
+
+	ncp_inode_close(inode);
+	/* Close file handle if no other users... */
+	ncp_make_closed(inode);
+	if (error)
+		goto failEIO;
+
+	if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) {
+		if (length<NCP_MIN_SYMLINK_SIZE || 
+		    ((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 ||
+		    ((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1)
+		    	goto failEIO;
+		link = rawlink + 8;
+		length -= 8;
+	} else {
+		link = rawlink;
+	}
+
+	len = NCP_MAX_SYMLINK_SIZE;
+	error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0);
+	kfree(rawlink);
+	if (error)
+		goto fail;
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+
+failEIO:
+	error = -EIO;
+	kfree(rawlink);
+fail:
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return error;
+}
+
+/*
+ * symlinks can't do much...
+ */
+struct address_space_operations ncp_symlink_aops = {
+	.readpage	= ncp_symlink_readpage,
+};
+	
+/* ----- create a new symbolic link -------------------------------------- */
+ 
+int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
+	struct inode *inode;
+	char *rawlink;
+	int length, err, i, outlen;
+	int kludge;
+	int mode;
+	__le32 attr;
+	unsigned int hdr;
+
+	DPRINTK("ncp_symlink(dir=%p,dentry=%p,symname=%s)\n",dir,dentry,symname);
+
+	if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber))
+		kludge = 0;
+	else
+#ifdef CONFIG_NCPFS_EXTRAS
+	if (NCP_SERVER(dir)->m.flags & NCP_MOUNT_SYMLINKS)
+		kludge = 1;
+	else
+#endif
+	/* EPERM is returned by VFS if symlink procedure does not exist */
+		return -EPERM;
+  
+	rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
+	if (!rawlink)
+		return -ENOMEM;
+
+	if (kludge) {
+		mode = 0;
+		attr = aSHARED | aHIDDEN;
+		((__le32 *)rawlink)[0]=NCP_SYMLINK_MAGIC0;
+		((__le32 *)rawlink)[1]=NCP_SYMLINK_MAGIC1;
+		hdr = 8;
+	} else {
+		mode = S_IFLNK | S_IRWXUGO;
+		attr = 0;
+		hdr = 0;
+	}			
+
+	length = strlen(symname);
+	/* map to/from server charset, do not touch upper/lower case as
+	   symlink can point out of ncp filesystem */
+	outlen = NCP_MAX_SYMLINK_SIZE - hdr;
+	err = ncp_io2vol(NCP_SERVER(dir), rawlink + hdr, &outlen, symname, length, 0);
+	if (err)
+		goto failfree;
+
+	outlen += hdr;
+
+	err = -EIO;
+	if (ncp_create_new(dir,dentry,mode,0,attr)) {
+		goto failfree;
+	}
+
+	inode=dentry->d_inode;
+
+	if (ncp_make_open(inode, O_WRONLY))
+		goto failfree;
+
+	if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, 
+			     0, outlen, rawlink, &i) || i!=outlen) {
+		goto fail;
+	}
+
+	ncp_inode_close(inode);
+	ncp_make_closed(inode);
+	kfree(rawlink);
+	return 0;
+fail:;
+	ncp_inode_close(inode);
+	ncp_make_closed(inode);
+failfree:;
+	kfree(rawlink);
+	return err;
+}
+
+/* ----- EOF ----- */
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
new file mode 100644
index 0000000..b4baa03
--- /dev/null
+++ b/fs/nfs/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the Linux nfs filesystem routines.
+#
+
+obj-$(CONFIG_NFS_FS) += nfs.o
+
+nfs-y 			:= dir.o file.o inode.o nfs2xdr.o pagelist.o \
+			   proc.o read.o symlink.o unlink.o write.o
+nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o mount_clnt.o      
+nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+nfs-$(CONFIG_NFS_V4)	+= nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
+			   delegation.o idmap.o \
+			   callback.o callback_xdr.o callback_proc.o
+nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
+nfs-objs		:= $(nfs-y)
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
new file mode 100644
index 0000000..560d617
--- /dev/null
+++ b/fs/nfs/callback.c
@@ -0,0 +1,187 @@
+/*
+ * linux/fs/nfs/callback.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback handling
+ */
+
+#include <linux/config.h>
+#include <linux/completion.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/nfs_fs.h>
+#include "callback.h"
+
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+
+struct nfs_callback_data {
+	unsigned int users;
+	struct svc_serv *serv;
+	pid_t pid;
+	struct completion started;
+	struct completion stopped;
+};
+
+static struct nfs_callback_data nfs_callback_info;
+static DECLARE_MUTEX(nfs_callback_sema);
+static struct svc_program nfs4_callback_program;
+
+unsigned short nfs_callback_tcpport;
+
+/*
+ * This is the callback kernel thread.
+ */
+static void nfs_callback_svc(struct svc_rqst *rqstp)
+{
+	struct svc_serv *serv = rqstp->rq_server;
+	int err;
+
+	__module_get(THIS_MODULE);
+	lock_kernel();
+
+	nfs_callback_info.pid = current->pid;
+	daemonize("nfsv4-svc");
+	/* Process request with signals blocked, but allow SIGKILL.  */
+	allow_signal(SIGKILL);
+
+	complete(&nfs_callback_info.started);
+
+	while (nfs_callback_info.users != 0 || !signalled()) {
+		/*
+		 * Listen for a request on the socket
+		 */
+		err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT);
+		if (err == -EAGAIN || err == -EINTR)
+			continue;
+		if (err < 0) {
+			printk(KERN_WARNING
+					"%s: terminating on error %d\n",
+					__FUNCTION__, -err);
+			break;
+		}
+		dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__,
+				NIPQUAD(rqstp->rq_addr.sin_addr.s_addr));
+		svc_process(serv, rqstp);
+	}
+
+	nfs_callback_info.pid = 0;
+	complete(&nfs_callback_info.stopped);
+	unlock_kernel();
+	module_put_and_exit(0);
+}
+
+/*
+ * Bring up the server process if it is not already up.
+ */
+int nfs_callback_up(void)
+{
+	struct svc_serv *serv;
+	struct svc_sock *svsk;
+	int ret = 0;
+
+	lock_kernel();
+	down(&nfs_callback_sema);
+	if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
+		goto out;
+	init_completion(&nfs_callback_info.started);
+	init_completion(&nfs_callback_info.stopped);
+	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE);
+	ret = -ENOMEM;
+	if (!serv)
+		goto out_err;
+	/* FIXME: We don't want to register this socket with the portmapper */
+	ret = svc_makesock(serv, IPPROTO_TCP, 0);
+	if (ret < 0)
+		goto out_destroy;
+	if (!list_empty(&serv->sv_permsocks)) {
+		svsk = list_entry(serv->sv_permsocks.next,
+				struct svc_sock, sk_list);
+		nfs_callback_tcpport = ntohs(inet_sk(svsk->sk_sk)->sport);
+		dprintk ("Callback port = 0x%x\n", nfs_callback_tcpport);
+	} else
+		BUG();
+	ret = svc_create_thread(nfs_callback_svc, serv);
+	if (ret < 0)
+		goto out_destroy;
+	nfs_callback_info.serv = serv;
+	wait_for_completion(&nfs_callback_info.started);
+out:
+	up(&nfs_callback_sema);
+	unlock_kernel();
+	return ret;
+out_destroy:
+	svc_destroy(serv);
+out_err:
+	nfs_callback_info.users--;
+	goto out;
+}
+
+/*
+ * Kill the server process if it is not already up.
+ */
+int nfs_callback_down(void)
+{
+	int ret = 0;
+
+	lock_kernel();
+	down(&nfs_callback_sema);
+	if (--nfs_callback_info.users || nfs_callback_info.pid == 0)
+		goto out;
+	kill_proc(nfs_callback_info.pid, SIGKILL, 1);
+	wait_for_completion(&nfs_callback_info.stopped);
+out:
+	up(&nfs_callback_sema);
+	unlock_kernel();
+	return ret;
+}
+
+static int nfs_callback_authenticate(struct svc_rqst *rqstp)
+{
+	struct in_addr *addr = &rqstp->rq_addr.sin_addr;
+	struct nfs4_client *clp;
+
+	/* Don't talk to strangers */
+	clp = nfs4_find_client(addr);
+	if (clp == NULL)
+		return SVC_DROP;
+	dprintk("%s: %u.%u.%u.%u NFSv4 callback!\n", __FUNCTION__, NIPQUAD(addr));
+	nfs4_put_client(clp);
+	switch (rqstp->rq_authop->flavour) {
+		case RPC_AUTH_NULL:
+			if (rqstp->rq_proc != CB_NULL)
+				return SVC_DENIED;
+			break;
+		case RPC_AUTH_UNIX:
+			break;
+		case RPC_AUTH_GSS:
+			/* FIXME: RPCSEC_GSS handling? */
+		default:
+			return SVC_DENIED;
+	}
+	return SVC_OK;
+}
+
+/*
+ * Define NFS4 callback program
+ */
+extern struct svc_version nfs4_callback_version1;
+
+static struct svc_version *nfs4_callback_version[] = {
+	[1] = &nfs4_callback_version1,
+};
+
+static struct svc_stat nfs4_callback_stats;
+
+static struct svc_program nfs4_callback_program = {
+	.pg_prog = NFS4_CALLBACK,			/* RPC service number */
+	.pg_nvers = ARRAY_SIZE(nfs4_callback_version),	/* Number of entries */
+	.pg_vers = nfs4_callback_version,		/* version table */
+	.pg_name = "NFSv4 callback",			/* service name */
+	.pg_class = "nfs",				/* authentication class */
+	.pg_stats = &nfs4_callback_stats,
+	.pg_authenticate = nfs_callback_authenticate,
+};
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
new file mode 100644
index 0000000..a0db2d4f9
--- /dev/null
+++ b/fs/nfs/callback.h
@@ -0,0 +1,70 @@
+/*
+ * linux/fs/nfs/callback.h
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback definitions
+ */
+#ifndef __LINUX_FS_NFS_CALLBACK_H
+#define __LINUX_FS_NFS_CALLBACK_H
+
+#define NFS4_CALLBACK 0x40000000
+#define NFS4_CALLBACK_XDRSIZE 2048
+#define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE)
+
+enum nfs4_callback_procnum {
+	CB_NULL = 0,
+	CB_COMPOUND = 1,
+};
+
+enum nfs4_callback_opnum {
+	OP_CB_GETATTR = 3,
+	OP_CB_RECALL  = 4,
+	OP_CB_ILLEGAL = 10044,
+};
+
+struct cb_compound_hdr_arg {
+	int taglen;
+	const char *tag;
+	unsigned int callback_ident;
+	unsigned nops;
+};
+
+struct cb_compound_hdr_res {
+	uint32_t *status;
+	int taglen;
+	const char *tag;
+	uint32_t *nops;
+};
+
+struct cb_getattrargs {
+	struct sockaddr_in *addr;
+	struct nfs_fh fh;
+	uint32_t bitmap[2];
+};
+
+struct cb_getattrres {
+	uint32_t status;
+	uint32_t bitmap[2];
+	uint64_t size;
+	uint64_t change_attr;
+	struct timespec ctime;
+	struct timespec mtime;
+};
+
+struct cb_recallargs {
+	struct sockaddr_in *addr;
+	struct nfs_fh fh;
+	nfs4_stateid stateid;
+	uint32_t truncate;
+};
+
+extern unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
+extern unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
+
+extern int nfs_callback_up(void);
+extern int nfs_callback_down(void);
+
+extern unsigned short nfs_callback_tcpport;
+
+#endif /* __LINUX_FS_NFS_CALLBACK_H */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
new file mode 100644
index 0000000..ece27e4
--- /dev/null
+++ b/fs/nfs/callback_proc.c
@@ -0,0 +1,85 @@
+/*
+ * linux/fs/nfs/callback_proc.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback procedures
+ */
+#include <linux/config.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include "callback.h"
+#include "delegation.h"
+
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+ 
+unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
+{
+	struct nfs4_client *clp;
+	struct nfs_delegation *delegation;
+	struct nfs_inode *nfsi;
+	struct inode *inode;
+	
+	res->bitmap[0] = res->bitmap[1] = 0;
+	res->status = htonl(NFS4ERR_BADHANDLE);
+	clp = nfs4_find_client(&args->addr->sin_addr);
+	if (clp == NULL)
+		goto out;
+	inode = nfs_delegation_find_inode(clp, &args->fh);
+	if (inode == NULL)
+		goto out_putclient;
+	nfsi = NFS_I(inode);
+	down_read(&nfsi->rwsem);
+	delegation = nfsi->delegation;
+	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
+		goto out_iput;
+	res->size = i_size_read(inode);
+	res->change_attr = NFS_CHANGE_ATTR(inode);
+	res->ctime = inode->i_ctime;
+	res->mtime = inode->i_mtime;
+	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
+		args->bitmap[0];
+	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
+		args->bitmap[1];
+	res->status = 0;
+out_iput:
+	up_read(&nfsi->rwsem);
+	iput(inode);
+out_putclient:
+	nfs4_put_client(clp);
+out:
+	dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res->status));
+	return res->status;
+}
+
+unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
+{
+	struct nfs4_client *clp;
+	struct inode *inode;
+	unsigned res;
+	
+	res = htonl(NFS4ERR_BADHANDLE);
+	clp = nfs4_find_client(&args->addr->sin_addr);
+	if (clp == NULL)
+		goto out;
+	inode = nfs_delegation_find_inode(clp, &args->fh);
+	if (inode == NULL)
+		goto out_putclient;
+	/* Set up a helper thread to actually return the delegation */
+	switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
+		case 0:
+			res = 0;
+			break;
+		case -ENOENT:
+			res = htonl(NFS4ERR_BAD_STATEID);
+			break;
+		default:
+			res = htonl(NFS4ERR_RESOURCE);
+	}
+	iput(inode);
+out_putclient:
+	nfs4_put_client(clp);
+out:
+	dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res));
+	return res;
+}
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
new file mode 100644
index 0000000..d271df9
--- /dev/null
+++ b/fs/nfs/callback_xdr.c
@@ -0,0 +1,481 @@
+/*
+ * linux/fs/nfs/callback_xdr.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback encode/decode procedures
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include "callback.h"
+
+#define CB_OP_TAGLEN_MAXSZ	(512)
+#define CB_OP_HDR_RES_MAXSZ	(2 + CB_OP_TAGLEN_MAXSZ)
+#define CB_OP_GETATTR_BITMAP_MAXSZ	(4)
+#define CB_OP_GETATTR_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ + \
+				CB_OP_GETATTR_BITMAP_MAXSZ + \
+				2 + 2 + 3 + 3)
+#define CB_OP_RECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
+
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+
+typedef unsigned (*callback_process_op_t)(void *, void *);
+typedef unsigned (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
+typedef unsigned (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
+
+
+struct callback_op {
+	callback_process_op_t process_op;
+	callback_decode_arg_t decode_args;
+	callback_encode_res_t encode_res;
+	long res_maxsize;
+};
+
+static struct callback_op callback_ops[];
+
+static int nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	return htonl(NFS4_OK);
+}
+
+static int nfs4_decode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+static int nfs4_encode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+static uint32_t *read_buf(struct xdr_stream *xdr, int nbytes)
+{
+	uint32_t *p;
+
+	p = xdr_inline_decode(xdr, nbytes);
+	if (unlikely(p == NULL))
+		printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n");
+	return p;
+}
+
+static unsigned decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str)
+{
+	uint32_t *p;
+
+	p = read_buf(xdr, 4);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	*len = ntohl(*p);
+
+	if (*len != 0) {
+		p = read_buf(xdr, *len);
+		if (unlikely(p == NULL))
+			return htonl(NFS4ERR_RESOURCE);
+		*str = (const char *)p;
+	} else
+		*str = NULL;
+
+	return 0;
+}
+
+static unsigned decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	uint32_t *p;
+
+	p = read_buf(xdr, 4);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	fh->size = ntohl(*p);
+	if (fh->size > NFS4_FHSIZE)
+		return htonl(NFS4ERR_BADHANDLE);
+	p = read_buf(xdr, fh->size);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	memcpy(&fh->data[0], p, fh->size);
+	memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size);
+	return 0;
+}
+
+static unsigned decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+	uint32_t *p;
+	unsigned int attrlen;
+
+	p = read_buf(xdr, 4);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	attrlen = ntohl(*p);
+	p = read_buf(xdr, attrlen << 2);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	if (likely(attrlen > 0))
+		bitmap[0] = ntohl(*p++);
+	if (attrlen > 1)
+		bitmap[1] = ntohl(*p);
+	return 0;
+}
+
+static unsigned decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	uint32_t *p;
+
+	p = read_buf(xdr, 16);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	memcpy(stateid->data, p, 16);
+	return 0;
+}
+
+static unsigned decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
+{
+	uint32_t *p;
+	unsigned int minor_version;
+	unsigned status;
+
+	status = decode_string(xdr, &hdr->taglen, &hdr->tag);
+	if (unlikely(status != 0))
+		return status;
+	/* We do not like overly long tags! */
+	if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) {
+		printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
+				__FUNCTION__, hdr->taglen);
+		return htonl(NFS4ERR_RESOURCE);
+	}
+	p = read_buf(xdr, 12);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	minor_version = ntohl(*p++);
+	/* Check minor version is zero. */
+	if (minor_version != 0) {
+		printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
+				__FUNCTION__, minor_version);
+		return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+	}
+	hdr->callback_ident = ntohl(*p++);
+	hdr->nops = ntohl(*p);
+	return 0;
+}
+
+static unsigned decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
+{
+	uint32_t *p;
+	p = read_buf(xdr, 4);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	*op = ntohl(*p);
+	return 0;
+}
+
+static unsigned decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args)
+{
+	unsigned status;
+
+	status = decode_fh(xdr, &args->fh);
+	if (unlikely(status != 0))
+		goto out;
+	args->addr = &rqstp->rq_addr;
+	status = decode_bitmap(xdr, args->bitmap);
+out:
+	dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+	return status;
+}
+
+static unsigned decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args)
+{
+	uint32_t *p;
+	unsigned status;
+
+	args->addr = &rqstp->rq_addr;
+	status = decode_stateid(xdr, &args->stateid);
+	if (unlikely(status != 0))
+		goto out;
+	p = read_buf(xdr, 4);
+	if (unlikely(p == NULL)) {
+		status = htonl(NFS4ERR_RESOURCE);
+		goto out;
+	}
+	args->truncate = ntohl(*p);
+	status = decode_fh(xdr, &args->fh);
+out:
+	dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+	return 0;
+}
+
+static unsigned encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
+{
+	uint32_t *p;
+
+	p = xdr_reserve_space(xdr, 4 + len);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	xdr_encode_opaque(p, str, len);
+	return 0;
+}
+
+#define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE)
+#define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY)
+static unsigned encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, uint32_t **savep)
+{
+	uint32_t bm[2];
+	uint32_t *p;
+
+	bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0);
+	bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1);
+	if (bm[1] != 0) {
+		p = xdr_reserve_space(xdr, 16);
+		if (unlikely(p == NULL))
+			return htonl(NFS4ERR_RESOURCE);
+		*p++ = htonl(2);
+		*p++ = bm[0];
+		*p++ = bm[1];
+	} else if (bm[0] != 0) {
+		p = xdr_reserve_space(xdr, 12);
+		if (unlikely(p == NULL))
+			return htonl(NFS4ERR_RESOURCE);
+		*p++ = htonl(1);
+		*p++ = bm[0];
+	} else {
+		p = xdr_reserve_space(xdr, 8);
+		if (unlikely(p == NULL))
+			return htonl(NFS4ERR_RESOURCE);
+		*p++ = htonl(0);
+	}
+	*savep = p;
+	return 0;
+}
+
+static unsigned encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change)
+{
+	uint32_t *p;
+
+	if (!(bitmap[0] & FATTR4_WORD0_CHANGE))
+		return 0;
+	p = xdr_reserve_space(xdr, 8);
+	if (unlikely(p == 0))
+		return htonl(NFS4ERR_RESOURCE);
+	p = xdr_encode_hyper(p, change);
+	return 0;
+}
+
+static unsigned encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size)
+{
+	uint32_t *p;
+
+	if (!(bitmap[0] & FATTR4_WORD0_SIZE))
+		return 0;
+	p = xdr_reserve_space(xdr, 8);
+	if (unlikely(p == 0))
+		return htonl(NFS4ERR_RESOURCE);
+	p = xdr_encode_hyper(p, size);
+	return 0;
+}
+
+static unsigned encode_attr_time(struct xdr_stream *xdr, const struct timespec *time)
+{
+	uint32_t *p;
+
+	p = xdr_reserve_space(xdr, 12);
+	if (unlikely(p == 0))
+		return htonl(NFS4ERR_RESOURCE);
+	p = xdr_encode_hyper(p, time->tv_sec);
+	*p = htonl(time->tv_nsec);
+	return 0;
+}
+
+static unsigned encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
+{
+	if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
+		return 0;
+	return encode_attr_time(xdr,time);
+}
+
+static unsigned encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
+{
+	if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY))
+		return 0;
+	return encode_attr_time(xdr,time);
+}
+
+static unsigned encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
+{
+	unsigned status;
+
+	hdr->status = xdr_reserve_space(xdr, 4);
+	if (unlikely(hdr->status == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	status = encode_string(xdr, hdr->taglen, hdr->tag);
+	if (unlikely(status != 0))
+		return status;
+	hdr->nops = xdr_reserve_space(xdr, 4);
+	if (unlikely(hdr->nops == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	return 0;
+}
+
+static unsigned encode_op_hdr(struct xdr_stream *xdr, uint32_t op, uint32_t res)
+{
+	uint32_t *p;
+	
+	p = xdr_reserve_space(xdr, 8);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_RESOURCE);
+	*p++ = htonl(op);
+	*p = res;
+	return 0;
+}
+
+static unsigned encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
+{
+	uint32_t *savep;
+	unsigned status = res->status;
+	
+	if (unlikely(status != 0))
+		goto out;
+	status = encode_attr_bitmap(xdr, res->bitmap, &savep);
+	if (unlikely(status != 0))
+		goto out;
+	status = encode_attr_change(xdr, res->bitmap, res->change_attr);
+	if (unlikely(status != 0))
+		goto out;
+	status = encode_attr_size(xdr, res->bitmap, res->size);
+	if (unlikely(status != 0))
+		goto out;
+	status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
+	if (unlikely(status != 0))
+		goto out;
+	status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
+	*savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
+out:
+	dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+	return status;
+}
+
+static unsigned process_op(struct svc_rqst *rqstp,
+		struct xdr_stream *xdr_in, void *argp,
+		struct xdr_stream *xdr_out, void *resp)
+{
+	struct callback_op *op;
+	unsigned int op_nr;
+	unsigned int status = 0;
+	long maxlen;
+	unsigned res;
+
+	dprintk("%s: start\n", __FUNCTION__);
+	status = decode_op_hdr(xdr_in, &op_nr);
+	if (unlikely(status != 0)) {
+		op_nr = OP_CB_ILLEGAL;
+		op = &callback_ops[0];
+	} else if (unlikely(op_nr != OP_CB_GETATTR && op_nr != OP_CB_RECALL)) {
+		op_nr = OP_CB_ILLEGAL;
+		op = &callback_ops[0];
+		status = htonl(NFS4ERR_OP_ILLEGAL);
+	} else
+		op = &callback_ops[op_nr];
+
+	maxlen = xdr_out->end - xdr_out->p;
+	if (maxlen > 0 && maxlen < PAGE_SIZE) {
+		if (likely(status == 0 && op->decode_args != NULL))
+			status = op->decode_args(rqstp, xdr_in, argp);
+		if (likely(status == 0 && op->process_op != NULL))
+			status = op->process_op(argp, resp);
+	} else
+		status = htonl(NFS4ERR_RESOURCE);
+
+	res = encode_op_hdr(xdr_out, op_nr, status);
+	if (status == 0)
+		status = res;
+	if (op->encode_res != NULL && status == 0)
+		status = op->encode_res(rqstp, xdr_out, resp);
+	dprintk("%s: done, status = %d\n", __FUNCTION__, status);
+	return status;
+}
+
+/*
+ * Decode, process and encode a COMPOUND
+ */
+static int nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	struct cb_compound_hdr_arg hdr_arg;
+	struct cb_compound_hdr_res hdr_res;
+	struct xdr_stream xdr_in, xdr_out;
+	uint32_t *p;
+	unsigned int status;
+	unsigned int nops = 1;
+
+	dprintk("%s: start\n", __FUNCTION__);
+
+	xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
+
+	p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
+	rqstp->rq_res.head[0].iov_len = PAGE_SIZE;
+	xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
+
+	decode_compound_hdr_arg(&xdr_in, &hdr_arg);
+	hdr_res.taglen = hdr_arg.taglen;
+	hdr_res.tag = hdr_arg.tag;
+	encode_compound_hdr_res(&xdr_out, &hdr_res);
+
+	for (;;) {
+		status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
+		if (status != 0)
+			break;
+		if (nops == hdr_arg.nops)
+			break;
+		nops++;
+	}
+	*hdr_res.status = status;
+	*hdr_res.nops = htonl(nops);
+	dprintk("%s: done, status = %u\n", __FUNCTION__, status);
+	return rpc_success;
+}
+
+/*
+ * Define NFS4 callback COMPOUND ops.
+ */
+static struct callback_op callback_ops[] = {
+	[0] = {
+		.res_maxsize = CB_OP_HDR_RES_MAXSZ,
+	},
+	[OP_CB_GETATTR] = {
+		.process_op = (callback_process_op_t)nfs4_callback_getattr,
+		.decode_args = (callback_decode_arg_t)decode_getattr_args,
+		.encode_res = (callback_encode_res_t)encode_getattr_res,
+		.res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
+	},
+	[OP_CB_RECALL] = {
+		.process_op = (callback_process_op_t)nfs4_callback_recall,
+		.decode_args = (callback_decode_arg_t)decode_recall_args,
+		.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
+	}
+};
+
+/*
+ * Define NFS4 callback procedures
+ */
+static struct svc_procedure nfs4_callback_procedures1[] = {
+	[CB_NULL] = {
+		.pc_func = nfs4_callback_null,
+		.pc_decode = (kxdrproc_t)nfs4_decode_void,
+		.pc_encode = (kxdrproc_t)nfs4_encode_void,
+		.pc_xdrressize = 1,
+	},
+	[CB_COMPOUND] = {
+		.pc_func = nfs4_callback_compound,
+		.pc_encode = (kxdrproc_t)nfs4_encode_void,
+		.pc_argsize = 256,
+		.pc_ressize = 256,
+		.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
+	}
+};
+
+struct svc_version nfs4_callback_version1 = {
+	.vs_vers = 1,
+	.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
+	.vs_proc = nfs4_callback_procedures1,
+	.vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+	.vs_dispatch = NULL,
+};
+
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
new file mode 100644
index 0000000..5b9c60f
--- /dev/null
+++ b/fs/nfs/delegation.c
@@ -0,0 +1,342 @@
+/*
+ * linux/fs/nfs/delegation.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFS file delegation management
+ *
+ */
+#include <linux/config.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "delegation.h"
+
+static struct nfs_delegation *nfs_alloc_delegation(void)
+{
+	return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
+}
+
+static void nfs_free_delegation(struct nfs_delegation *delegation)
+{
+	if (delegation->cred)
+		put_rpccred(delegation->cred);
+	kfree(delegation);
+}
+
+static void nfs_delegation_claim_opens(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_open_context *ctx;
+	struct nfs4_state *state;
+
+again:
+	spin_lock(&inode->i_lock);
+	list_for_each_entry(ctx, &nfsi->open_files, list) {
+		state = ctx->state;
+		if (state == NULL)
+			continue;
+		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+			continue;
+		get_nfs_open_context(ctx);
+		spin_unlock(&inode->i_lock);
+		if (nfs4_open_delegation_recall(ctx->dentry, state) < 0)
+			return;
+		put_nfs_open_context(ctx);
+		goto again;
+	}
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Set up a delegation on an inode
+ */
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+{
+	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
+
+	if (delegation == NULL)
+		return;
+	memcpy(delegation->stateid.data, res->delegation.data,
+			sizeof(delegation->stateid.data));
+	delegation->type = res->delegation_type;
+	delegation->maxsize = res->maxsize;
+	put_rpccred(cred);
+	delegation->cred = get_rpccred(cred);
+	delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
+	NFS_I(inode)->delegation_state = delegation->type;
+	smp_wmb();
+}
+
+/*
+ * Set up a delegation on an inode
+ */
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+{
+	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_delegation *delegation;
+	int status = 0;
+
+	delegation = nfs_alloc_delegation();
+	if (delegation == NULL)
+		return -ENOMEM;
+	memcpy(delegation->stateid.data, res->delegation.data,
+			sizeof(delegation->stateid.data));
+	delegation->type = res->delegation_type;
+	delegation->maxsize = res->maxsize;
+	delegation->cred = get_rpccred(cred);
+	delegation->inode = inode;
+
+	spin_lock(&clp->cl_lock);
+	if (nfsi->delegation == NULL) {
+		list_add(&delegation->super_list, &clp->cl_delegations);
+		nfsi->delegation = delegation;
+		nfsi->delegation_state = delegation->type;
+		delegation = NULL;
+	} else {
+		if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
+					sizeof(delegation->stateid)) != 0 ||
+				delegation->type != nfsi->delegation->type) {
+			printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
+					__FUNCTION__, NIPQUAD(clp->cl_addr));
+			status = -EIO;
+		}
+	}
+	spin_unlock(&clp->cl_lock);
+	if (delegation != NULL)
+		kfree(delegation);
+	return status;
+}
+
+static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
+{
+	int res = 0;
+
+	__nfs_revalidate_inode(NFS_SERVER(inode), inode);
+
+	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
+	nfs_free_delegation(delegation);
+	return res;
+}
+
+/* Sync all data to disk upon delegation return */
+static void nfs_msync_inode(struct inode *inode)
+{
+	filemap_fdatawrite(inode->i_mapping);
+	nfs_wb_all(inode);
+	filemap_fdatawait(inode->i_mapping);
+}
+
+/*
+ * Basic procedure for returning a delegation to the server
+ */
+int nfs_inode_return_delegation(struct inode *inode)
+{
+	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_delegation *delegation;
+	int res = 0;
+
+	nfs_msync_inode(inode);
+	down_read(&clp->cl_sem);
+	/* Guard against new delegated open calls */
+	down_write(&nfsi->rwsem);
+	spin_lock(&clp->cl_lock);
+	delegation = nfsi->delegation;
+	if (delegation != NULL) {
+		list_del_init(&delegation->super_list);
+		nfsi->delegation = NULL;
+		nfsi->delegation_state = 0;
+	}
+	spin_unlock(&clp->cl_lock);
+	nfs_delegation_claim_opens(inode);
+	up_write(&nfsi->rwsem);
+	up_read(&clp->cl_sem);
+	nfs_msync_inode(inode);
+
+	if (delegation != NULL)
+		res = nfs_do_return_delegation(inode, delegation);
+	return res;
+}
+
+/*
+ * Return all delegations associated to a super block
+ */
+void nfs_return_all_delegations(struct super_block *sb)
+{
+	struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
+	struct nfs_delegation *delegation;
+	struct inode *inode;
+
+	if (clp == NULL)
+		return;
+restart:
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
+		if (delegation->inode->i_sb != sb)
+			continue;
+		inode = igrab(delegation->inode);
+		if (inode == NULL)
+			continue;
+		spin_unlock(&clp->cl_lock);
+		nfs_inode_return_delegation(inode);
+		iput(inode);
+		goto restart;
+	}
+	spin_unlock(&clp->cl_lock);
+}
+
+/*
+ * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+ */
+void nfs_handle_cb_pathdown(struct nfs4_client *clp)
+{
+	struct nfs_delegation *delegation;
+	struct inode *inode;
+
+	if (clp == NULL)
+		return;
+restart:
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
+		inode = igrab(delegation->inode);
+		if (inode == NULL)
+			continue;
+		spin_unlock(&clp->cl_lock);
+		nfs_inode_return_delegation(inode);
+		iput(inode);
+		goto restart;
+	}
+	spin_unlock(&clp->cl_lock);
+}
+
+struct recall_threadargs {
+	struct inode *inode;
+	struct nfs4_client *clp;
+	const nfs4_stateid *stateid;
+
+	struct completion started;
+	int result;
+};
+
+static int recall_thread(void *data)
+{
+	struct recall_threadargs *args = (struct recall_threadargs *)data;
+	struct inode *inode = igrab(args->inode);
+	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_delegation *delegation;
+
+	daemonize("nfsv4-delegreturn");
+
+	nfs_msync_inode(inode);
+	down_read(&clp->cl_sem);
+	down_write(&nfsi->rwsem);
+	spin_lock(&clp->cl_lock);
+	delegation = nfsi->delegation;
+	if (delegation != NULL && memcmp(delegation->stateid.data,
+				args->stateid->data,
+				sizeof(delegation->stateid.data)) == 0) {
+		list_del_init(&delegation->super_list);
+		nfsi->delegation = NULL;
+		nfsi->delegation_state = 0;
+		args->result = 0;
+	} else {
+		delegation = NULL;
+		args->result = -ENOENT;
+	}
+	spin_unlock(&clp->cl_lock);
+	complete(&args->started);
+	nfs_delegation_claim_opens(inode);
+	up_write(&nfsi->rwsem);
+	up_read(&clp->cl_sem);
+	nfs_msync_inode(inode);
+
+	if (delegation != NULL)
+		nfs_do_return_delegation(inode, delegation);
+	iput(inode);
+	module_put_and_exit(0);
+}
+
+/*
+ * Asynchronous delegation recall!
+ */
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+{
+	struct recall_threadargs data = {
+		.inode = inode,
+		.stateid = stateid,
+	};
+	int status;
+
+	init_completion(&data.started);
+	__module_get(THIS_MODULE);
+	status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
+	if (status < 0)
+		goto out_module_put;
+	wait_for_completion(&data.started);
+	return data.result;
+out_module_put:
+	module_put(THIS_MODULE);
+	return status;
+}
+
+/*
+ * Retrieve the inode associated with a delegation
+ */
+struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
+{
+	struct nfs_delegation *delegation;
+	struct inode *res = NULL;
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
+		if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
+			res = igrab(delegation->inode);
+			break;
+		}
+	}
+	spin_unlock(&clp->cl_lock);
+	return res;
+}
+
+/*
+ * Mark all delegations as needing to be reclaimed
+ */
+void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
+{
+	struct nfs_delegation *delegation;
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(delegation, &clp->cl_delegations, super_list)
+		delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
+	spin_unlock(&clp->cl_lock);
+}
+
+/*
+ * Reap all unclaimed delegations after reboot recovery is done
+ */
+void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
+{
+	struct nfs_delegation *delegation, *n;
+	LIST_HEAD(head);
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
+		if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
+			continue;
+		list_move(&delegation->super_list, &head);
+		NFS_I(delegation->inode)->delegation = NULL;
+		NFS_I(delegation->inode)->delegation_state = 0;
+	}
+	spin_unlock(&clp->cl_lock);
+	while(!list_empty(&head)) {
+		delegation = list_entry(head.next, struct nfs_delegation, super_list);
+		list_del(&delegation->super_list);
+		nfs_free_delegation(delegation);
+	}
+}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
new file mode 100644
index 0000000..3f6c45a
--- /dev/null
+++ b/fs/nfs/delegation.h
@@ -0,0 +1,57 @@
+/*
+ * linux/fs/nfs/delegation.h
+ *
+ * Copyright (c) Trond Myklebust
+ *
+ * Definitions pertaining to NFS delegated files
+ */
+#ifndef FS_NFS_DELEGATION_H
+#define FS_NFS_DELEGATION_H
+
+#if defined(CONFIG_NFS_V4)
+/*
+ * NFSv4 delegation
+ */
+struct nfs_delegation {
+	struct list_head super_list;
+	struct rpc_cred *cred;
+	struct inode *inode;
+	nfs4_stateid stateid;
+	int type;
+#define NFS_DELEGATION_NEED_RECLAIM 1
+	long flags;
+	loff_t maxsize;
+};
+
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+int nfs_inode_return_delegation(struct inode *inode);
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+
+struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
+void nfs_return_all_delegations(struct super_block *sb);
+void nfs_handle_cb_pathdown(struct nfs4_client *clp);
+
+void nfs_delegation_mark_reclaim(struct nfs4_client *clp);
+void nfs_delegation_reap_unclaimed(struct nfs4_client *clp);
+
+/* NFSv4 delegation-related procedures */
+int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid);
+int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state);
+
+static inline int nfs_have_delegation(struct inode *inode, int flags)
+{
+	flags &= FMODE_READ|FMODE_WRITE;
+	smp_rmb();
+	if ((NFS_I(inode)->delegation_state & flags) == flags)
+		return 1;
+	return 0;
+}
+#else
+static inline int nfs_have_delegation(struct inode *inode, int flags)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
new file mode 100644
index 0000000..73f96ac
--- /dev/null
+++ b/fs/nfs/dir.c
@@ -0,0 +1,1562 @@
+/*
+ *  linux/fs/nfs/dir.c
+ *
+ *  Copyright (C) 1992  Rick Sladkey
+ *
+ *  nfs directory handling functions
+ *
+ * 10 Apr 1996	Added silly rename for unlink	--okir
+ * 28 Sep 1996	Improved directory cache --okir
+ * 23 Aug 1997  Claus Heine claus@momo.math.rwth-aachen.de 
+ *              Re-implemented silly rename for unlink, newly implemented
+ *              silly rename for nfs_rename() following the suggestions
+ *              of Olaf Kirch (okir) found in this file.
+ *              Following Linus comments on my original hack, this version
+ *              depends only on the dcache stuff and doesn't touch the inode
+ *              layer (iput() and friends).
+ *  6 Jun 1999	Cache readdir lookups in the page cache. -DaveM
+ */
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/namei.h>
+
+#include "delegation.h"
+
+#define NFS_PARANOIA 1
+/* #define NFS_DEBUG_VERBOSE 1 */
+
+static int nfs_opendir(struct inode *, struct file *);
+static int nfs_readdir(struct file *, void *, filldir_t);
+static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+static int nfs_mkdir(struct inode *, struct dentry *, int);
+static int nfs_rmdir(struct inode *, struct dentry *);
+static int nfs_unlink(struct inode *, struct dentry *);
+static int nfs_symlink(struct inode *, struct dentry *, const char *);
+static int nfs_link(struct dentry *, struct inode *, struct dentry *);
+static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
+static int nfs_rename(struct inode *, struct dentry *,
+		      struct inode *, struct dentry *);
+static int nfs_fsync_dir(struct file *, struct dentry *, int);
+
+struct file_operations nfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= nfs_readdir,
+	.open		= nfs_opendir,
+	.release	= nfs_release,
+	.fsync		= nfs_fsync_dir,
+};
+
+struct inode_operations nfs_dir_inode_operations = {
+	.create		= nfs_create,
+	.lookup		= nfs_lookup,
+	.link		= nfs_link,
+	.unlink		= nfs_unlink,
+	.symlink	= nfs_symlink,
+	.mkdir		= nfs_mkdir,
+	.rmdir		= nfs_rmdir,
+	.mknod		= nfs_mknod,
+	.rename		= nfs_rename,
+	.permission	= nfs_permission,
+	.getattr	= nfs_getattr,
+	.setattr	= nfs_setattr,
+};
+
+#ifdef CONFIG_NFS_V4
+
+static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+struct inode_operations nfs4_dir_inode_operations = {
+	.create		= nfs_create,
+	.lookup		= nfs_atomic_lookup,
+	.link		= nfs_link,
+	.unlink		= nfs_unlink,
+	.symlink	= nfs_symlink,
+	.mkdir		= nfs_mkdir,
+	.rmdir		= nfs_rmdir,
+	.mknod		= nfs_mknod,
+	.rename		= nfs_rename,
+	.permission	= nfs_permission,
+	.getattr	= nfs_getattr,
+	.setattr	= nfs_setattr,
+};
+
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * Open file
+ */
+static int
+nfs_opendir(struct inode *inode, struct file *filp)
+{
+	int res = 0;
+
+	lock_kernel();
+	/* Call generic open code in order to cache credentials */
+	if (!res)
+		res = nfs_open(inode, filp);
+	unlock_kernel();
+	return res;
+}
+
+typedef u32 * (*decode_dirent_t)(u32 *, struct nfs_entry *, int);
+typedef struct {
+	struct file	*file;
+	struct page	*page;
+	unsigned long	page_index;
+	u32		*ptr;
+	u64		target;
+	struct nfs_entry *entry;
+	decode_dirent_t	decode;
+	int		plus;
+	int		error;
+} nfs_readdir_descriptor_t;
+
+/* Now we cache directories properly, by stuffing the dirent
+ * data directly in the page cache.
+ *
+ * Inode invalidation due to refresh etc. takes care of
+ * _everything_, no sloppy entry flushing logic, no extraneous
+ * copying, network direct to page cache, the way it was meant
+ * to be.
+ *
+ * NOTE: Dirent information verification is done always by the
+ *	 page-in of the RPC reply, nowhere else, this simplies
+ *	 things substantially.
+ */
+static
+int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+{
+	struct file	*file = desc->file;
+	struct inode	*inode = file->f_dentry->d_inode;
+	struct rpc_cred	*cred = nfs_file_cred(file);
+	unsigned long	timestamp;
+	int		error;
+
+	dfprintk(VFS, "NFS: nfs_readdir_filler() reading cookie %Lu into page %lu.\n", (long long)desc->entry->cookie, page->index);
+
+ again:
+	timestamp = jiffies;
+	error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->entry->cookie, page,
+					  NFS_SERVER(inode)->dtsize, desc->plus);
+	if (error < 0) {
+		/* We requested READDIRPLUS, but the server doesn't grok it */
+		if (error == -ENOTSUPP && desc->plus) {
+			NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
+			NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS;
+			desc->plus = 0;
+			goto again;
+		}
+		goto error;
+	}
+	SetPageUptodate(page);
+	NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+	/* Ensure consistent page alignment of the data.
+	 * Note: assumes we have exclusive access to this mapping either
+	 *	 throught inode->i_sem or some other mechanism.
+	 */
+	if (page->index == 0) {
+		invalidate_inode_pages(inode->i_mapping);
+		NFS_I(inode)->readdir_timestamp = timestamp;
+	}
+	unlock_page(page);
+	return 0;
+ error:
+	SetPageError(page);
+	unlock_page(page);
+	nfs_zap_caches(inode);
+	desc->error = error;
+	return -EIO;
+}
+
+static inline
+int dir_decode(nfs_readdir_descriptor_t *desc)
+{
+	u32	*p = desc->ptr;
+	p = desc->decode(p, desc->entry, desc->plus);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	desc->ptr = p;
+	return 0;
+}
+
+static inline
+void dir_page_release(nfs_readdir_descriptor_t *desc)
+{
+	kunmap(desc->page);
+	page_cache_release(desc->page);
+	desc->page = NULL;
+	desc->ptr = NULL;
+}
+
+/*
+ * Given a pointer to a buffer that has already been filled by a call
+ * to readdir, find the next entry.
+ *
+ * If the end of the buffer has been reached, return -EAGAIN, if not,
+ * return the offset within the buffer of the next entry to be
+ * read.
+ */
+static inline
+int find_dirent(nfs_readdir_descriptor_t *desc, struct page *page)
+{
+	struct nfs_entry *entry = desc->entry;
+	int		loop_count = 0,
+			status;
+
+	while((status = dir_decode(desc)) == 0) {
+		dfprintk(VFS, "NFS: found cookie %Lu\n", (long long)entry->cookie);
+		if (entry->prev_cookie == desc->target)
+			break;
+		if (loop_count++ > 200) {
+			loop_count = 0;
+			schedule();
+		}
+	}
+	dfprintk(VFS, "NFS: find_dirent() returns %d\n", status);
+	return status;
+}
+
+/*
+ * Find the given page, and call find_dirent() in order to try to
+ * return the next entry.
+ */
+static inline
+int find_dirent_page(nfs_readdir_descriptor_t *desc)
+{
+	struct inode	*inode = desc->file->f_dentry->d_inode;
+	struct page	*page;
+	int		status;
+
+	dfprintk(VFS, "NFS: find_dirent_page() searching directory page %ld\n", desc->page_index);
+
+	page = read_cache_page(inode->i_mapping, desc->page_index,
+			       (filler_t *)nfs_readdir_filler, desc);
+	if (IS_ERR(page)) {
+		status = PTR_ERR(page);
+		goto out;
+	}
+	if (!PageUptodate(page))
+		goto read_error;
+
+	/* NOTE: Someone else may have changed the READDIRPLUS flag */
+	desc->page = page;
+	desc->ptr = kmap(page);		/* matching kunmap in nfs_do_filldir */
+	status = find_dirent(desc, page);
+	if (status < 0)
+		dir_page_release(desc);
+ out:
+	dfprintk(VFS, "NFS: find_dirent_page() returns %d\n", status);
+	return status;
+ read_error:
+	page_cache_release(page);
+	return -EIO;
+}
+
+/*
+ * Recurse through the page cache pages, and return a
+ * filled nfs_entry structure of the next directory entry if possible.
+ *
+ * The target for the search is 'desc->target'.
+ */
+static inline
+int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
+{
+	int		loop_count = 0;
+	int		res;
+
+	dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (long long)desc->target);
+	for (;;) {
+		res = find_dirent_page(desc);
+		if (res != -EAGAIN)
+			break;
+		/* Align to beginning of next page */
+		desc->page_index ++;
+		if (loop_count++ > 200) {
+			loop_count = 0;
+			schedule();
+		}
+	}
+	dfprintk(VFS, "NFS: readdir_search_pagecache() returned %d\n", res);
+	return res;
+}
+
+static inline unsigned int dt_type(struct inode *inode)
+{
+	return (inode->i_mode >> 12) & 15;
+}
+
+static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc);
+
+/*
+ * Once we've found the start of the dirent within a page: fill 'er up...
+ */
+static 
+int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
+		   filldir_t filldir)
+{
+	struct file	*file = desc->file;
+	struct nfs_entry *entry = desc->entry;
+	struct dentry	*dentry = NULL;
+	unsigned long	fileid;
+	int		loop_count = 0,
+			res;
+
+	dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target);
+
+	for(;;) {
+		unsigned d_type = DT_UNKNOWN;
+		/* Note: entry->prev_cookie contains the cookie for
+		 *	 retrieving the current dirent on the server */
+		fileid = nfs_fileid_to_ino_t(entry->ino);
+
+		/* Get a dentry if we have one */
+		if (dentry != NULL)
+			dput(dentry);
+		dentry = nfs_readdir_lookup(desc);
+
+		/* Use readdirplus info */
+		if (dentry != NULL && dentry->d_inode != NULL) {
+			d_type = dt_type(dentry->d_inode);
+			fileid = dentry->d_inode->i_ino;
+		}
+
+		res = filldir(dirent, entry->name, entry->len, 
+			      entry->prev_cookie, fileid, d_type);
+		if (res < 0)
+			break;
+		file->f_pos = desc->target = entry->cookie;
+		if (dir_decode(desc) != 0) {
+			desc->page_index ++;
+			break;
+		}
+		if (loop_count++ > 200) {
+			loop_count = 0;
+			schedule();
+		}
+	}
+	dir_page_release(desc);
+	if (dentry != NULL)
+		dput(dentry);
+	dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (long long)desc->target, res);
+	return res;
+}
+
+/*
+ * If we cannot find a cookie in our cache, we suspect that this is
+ * because it points to a deleted file, so we ask the server to return
+ * whatever it thinks is the next entry. We then feed this to filldir.
+ * If all goes well, we should then be able to find our way round the
+ * cache on the next call to readdir_search_pagecache();
+ *
+ * NOTE: we cannot add the anonymous page to the pagecache because
+ *	 the data it contains might not be page aligned. Besides,
+ *	 we should already have a complete representation of the
+ *	 directory in the page cache by the time we get here.
+ */
+static inline
+int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+		     filldir_t filldir)
+{
+	struct file	*file = desc->file;
+	struct inode	*inode = file->f_dentry->d_inode;
+	struct rpc_cred	*cred = nfs_file_cred(file);
+	struct page	*page = NULL;
+	int		status;
+
+	dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (long long)desc->target);
+
+	page = alloc_page(GFP_HIGHUSER);
+	if (!page) {
+		status = -ENOMEM;
+		goto out;
+	}
+	desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->target,
+						page,
+						NFS_SERVER(inode)->dtsize,
+						desc->plus);
+	NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+	desc->page = page;
+	desc->ptr = kmap(page);		/* matching kunmap in nfs_do_filldir */
+	if (desc->error >= 0) {
+		if ((status = dir_decode(desc)) == 0)
+			desc->entry->prev_cookie = desc->target;
+	} else
+		status = -EIO;
+	if (status < 0)
+		goto out_release;
+
+	status = nfs_do_filldir(desc, dirent, filldir);
+
+	/* Reset read descriptor so it searches the page cache from
+	 * the start upon the next call to readdir_search_pagecache() */
+	desc->page_index = 0;
+	desc->entry->cookie = desc->entry->prev_cookie = 0;
+	desc->entry->eof = 0;
+ out:
+	dfprintk(VFS, "NFS: uncached_readdir() returns %d\n", status);
+	return status;
+ out_release:
+	dir_page_release(desc);
+	goto out;
+}
+
+/* The file offset position is now represented as a true offset into the
+ * page cache as is the case in most of the other filesystems.
+ */
+static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct dentry	*dentry = filp->f_dentry;
+	struct inode	*inode = dentry->d_inode;
+	nfs_readdir_descriptor_t my_desc,
+			*desc = &my_desc;
+	struct nfs_entry my_entry;
+	struct nfs_fh	 fh;
+	struct nfs_fattr fattr;
+	long		res;
+
+	lock_kernel();
+
+	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (res < 0) {
+		unlock_kernel();
+		return res;
+	}
+
+	/*
+	 * filp->f_pos points to the file offset in the page cache.
+	 * but if the cache has meanwhile been zapped, we need to
+	 * read from the last dirent to revalidate f_pos
+	 * itself.
+	 */
+	memset(desc, 0, sizeof(*desc));
+
+	desc->file = filp;
+	desc->target = filp->f_pos;
+	desc->decode = NFS_PROTO(inode)->decode_dirent;
+	desc->plus = NFS_USE_READDIRPLUS(inode);
+
+	my_entry.cookie = my_entry.prev_cookie = 0;
+	my_entry.eof = 0;
+	my_entry.fh = &fh;
+	my_entry.fattr = &fattr;
+	desc->entry = &my_entry;
+
+	while(!desc->entry->eof) {
+		res = readdir_search_pagecache(desc);
+		if (res == -EBADCOOKIE) {
+			/* This means either end of directory */
+			if (desc->entry->cookie != desc->target) {
+				/* Or that the server has 'lost' a cookie */
+				res = uncached_readdir(desc, dirent, filldir);
+				if (res >= 0)
+					continue;
+			}
+			res = 0;
+			break;
+		}
+		if (res == -ETOOSMALL && desc->plus) {
+			NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS;
+			nfs_zap_caches(inode);
+			desc->plus = 0;
+			desc->entry->eof = 0;
+			continue;
+		}
+		if (res < 0)
+			break;
+
+		res = nfs_do_filldir(desc, dirent, filldir);
+		if (res < 0) {
+			res = 0;
+			break;
+		}
+	}
+	unlock_kernel();
+	if (desc->error < 0)
+		return desc->error;
+	if (res < 0)
+		return res;
+	return 0;
+}
+
+/*
+ * All directory operations under NFS are synchronous, so fsync()
+ * is a dummy operation.
+ */
+int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
+{
+	return 0;
+}
+
+/*
+ * A check for whether or not the parent directory has changed.
+ * In the case it has, we assume that the dentries are untrustworthy
+ * and may need to be looked up again.
+ */
+static inline int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
+{
+	if (IS_ROOT(dentry))
+		return 1;
+	if ((NFS_FLAGS(dir) & NFS_INO_INVALID_ATTR) != 0
+			|| nfs_attribute_timeout(dir))
+		return 0;
+	return nfs_verify_change_attribute(dir, (unsigned long)dentry->d_fsdata);
+}
+
+static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
+{
+	dentry->d_fsdata = (void *)verf;
+}
+
+/*
+ * Whenever an NFS operation succeeds, we know that the dentry
+ * is valid, so we update the revalidation timestamp.
+ */
+static inline void nfs_renew_times(struct dentry * dentry)
+{
+	dentry->d_time = jiffies;
+}
+
+static inline
+int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
+{
+	struct nfs_server *server = NFS_SERVER(inode);
+
+	if (nd != NULL) {
+		int ndflags = nd->flags;
+		/* VFS wants an on-the-wire revalidation */
+		if (ndflags & LOOKUP_REVAL)
+			goto out_force;
+		/* This is an open(2) */
+		if ((ndflags & LOOKUP_OPEN) &&
+				!(ndflags & LOOKUP_CONTINUE) &&
+				!(server->flags & NFS_MOUNT_NOCTO))
+			goto out_force;
+	}
+	return nfs_revalidate_inode(server, inode);
+out_force:
+	return __nfs_revalidate_inode(server, inode);
+}
+
+/*
+ * We judge how long we want to trust negative
+ * dentries by looking at the parent inode mtime.
+ *
+ * If parent mtime has changed, we revalidate, else we wait for a
+ * period corresponding to the parent's attribute cache timeout value.
+ */
+static inline
+int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
+		       struct nameidata *nd)
+{
+	int ndflags = 0;
+
+	if (nd)
+		ndflags = nd->flags;
+	/* Don't revalidate a negative dentry if we're creating a new file */
+	if ((ndflags & LOOKUP_CREATE) && !(ndflags & LOOKUP_CONTINUE))
+		return 0;
+	return !nfs_check_verifier(dir, dentry);
+}
+
+/*
+ * This is called every time the dcache has a lookup hit,
+ * and we should check whether we can really trust that
+ * lookup.
+ *
+ * NOTE! The hit can be a negative hit too, don't assume
+ * we have an inode!
+ *
+ * If the parent directory is seen to have changed, we throw out the
+ * cached dentry and do a new lookup.
+ */
+static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
+{
+	struct inode *dir;
+	struct inode *inode;
+	struct dentry *parent;
+	int error;
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr;
+	unsigned long verifier;
+
+	parent = dget_parent(dentry);
+	lock_kernel();
+	dir = parent->d_inode;
+	inode = dentry->d_inode;
+
+	if (!inode) {
+		if (nfs_neg_need_reval(dir, dentry, nd))
+			goto out_bad;
+		goto out_valid;
+	}
+
+	if (is_bad_inode(inode)) {
+		dfprintk(VFS, "nfs_lookup_validate: %s/%s has dud inode\n",
+			dentry->d_parent->d_name.name, dentry->d_name.name);
+		goto out_bad;
+	}
+
+	/* Revalidate parent directory attribute cache */
+	if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
+		goto out_zap_parent;
+
+	/* Force a full look up iff the parent directory has changed */
+	if (nfs_check_verifier(dir, dentry)) {
+		if (nfs_lookup_verify_inode(inode, nd))
+			goto out_zap_parent;
+		goto out_valid;
+	}
+
+	if (NFS_STALE(inode))
+		goto out_bad;
+
+	verifier = nfs_save_change_attribute(dir);
+	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+	if (error)
+		goto out_bad;
+	if (nfs_compare_fh(NFS_FH(inode), &fhandle))
+		goto out_bad;
+	if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
+		goto out_bad;
+
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, verifier);
+ out_valid:
+	unlock_kernel();
+	dput(parent);
+	return 1;
+out_zap_parent:
+	nfs_zap_caches(dir);
+ out_bad:
+	NFS_CACHEINV(dir);
+	if (inode && S_ISDIR(inode->i_mode)) {
+		/* Purge readdir caches. */
+		nfs_zap_caches(inode);
+		/* If we have submounts, don't unhash ! */
+		if (have_submounts(dentry))
+			goto out_valid;
+		shrink_dcache_parent(dentry);
+	}
+	d_drop(dentry);
+	unlock_kernel();
+	dput(parent);
+	return 0;
+}
+
+/*
+ * This is called from dput() when d_count is going to 0.
+ */
+static int nfs_dentry_delete(struct dentry *dentry)
+{
+	dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		dentry->d_flags);
+
+	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+		/* Unhash it, so that ->d_iput() would be called */
+		return 1;
+	}
+	if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+		/* Unhash it, so that ancestors of killed async unlink
+		 * files will be cleaned up during umount */
+		return 1;
+	}
+	return 0;
+
+}
+
+/*
+ * Called when the dentry loses inode.
+ * We use it to clean up silly-renamed files.
+ */
+static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
+{
+	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+		lock_kernel();
+		inode->i_nlink--;
+		nfs_complete_unlink(dentry);
+		unlock_kernel();
+	}
+	/* When creating a negative dentry, we want to renew d_time */
+	nfs_renew_times(dentry);
+	iput(inode);
+}
+
+struct dentry_operations nfs_dentry_operations = {
+	.d_revalidate	= nfs_lookup_revalidate,
+	.d_delete	= nfs_dentry_delete,
+	.d_iput		= nfs_dentry_iput,
+};
+
+static inline
+int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
+{
+	if (NFS_PROTO(dir)->version == 2)
+		return 0;
+	if (!nd || (nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_CREATE))
+		return 0;
+	return (nd->intent.open.flags & O_EXCL) != 0;
+}
+
+static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct dentry *res;
+	struct inode *inode = NULL;
+	int error;
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr;
+
+	dfprintk(VFS, "NFS: lookup(%s/%s)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	res = ERR_PTR(-ENAMETOOLONG);
+	if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+		goto out;
+
+	res = ERR_PTR(-ENOMEM);
+	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
+
+	lock_kernel();
+	/* Revalidate parent directory attribute cache */
+	error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
+	if (error < 0) {
+		res = ERR_PTR(error);
+		goto out_unlock;
+	}
+
+	/* If we're doing an exclusive create, optimize away the lookup */
+	if (nfs_is_exclusive_create(dir, nd))
+		goto no_entry;
+
+	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+	if (error == -ENOENT)
+		goto no_entry;
+	if (error < 0) {
+		res = ERR_PTR(error);
+		goto out_unlock;
+	}
+	res = ERR_PTR(-EACCES);
+	inode = nfs_fhget(dentry->d_sb, &fhandle, &fattr);
+	if (!inode)
+		goto out_unlock;
+no_entry:
+	res = d_add_unique(dentry, inode);
+	if (res != NULL)
+		dentry = res;
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+out_unlock:
+	unlock_kernel();
+out:
+	return res;
+}
+
+#ifdef CONFIG_NFS_V4
+static int nfs_open_revalidate(struct dentry *, struct nameidata *);
+
+struct dentry_operations nfs4_dentry_operations = {
+	.d_revalidate	= nfs_open_revalidate,
+	.d_delete	= nfs_dentry_delete,
+	.d_iput		= nfs_dentry_iput,
+};
+
+static int is_atomic_open(struct inode *dir, struct nameidata *nd)
+{
+	if (!nd)
+		return 0;
+	/* Check that we are indeed trying to open this file */
+	if ((nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_OPEN))
+		return 0;
+	/* NFS does not (yet) have a stateful open for directories */
+	if (nd->flags & LOOKUP_DIRECTORY)
+		return 0;
+	/* Are we trying to write to a read only partition? */
+	if (IS_RDONLY(dir) && (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+		return 0;
+	return 1;
+}
+
+static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct dentry *res = NULL;
+	struct inode *inode = NULL;
+	int error;
+
+	/* Check that we are indeed trying to open this file */
+	if (!is_atomic_open(dir, nd))
+		goto no_open;
+
+	if (dentry->d_name.len > NFS_SERVER(dir)->namelen) {
+		res = ERR_PTR(-ENAMETOOLONG);
+		goto out;
+	}
+	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
+
+	/* Let vfs_create() deal with O_EXCL */
+	if (nd->intent.open.flags & O_EXCL)
+		goto no_entry;
+
+	/* Open the file on the server */
+	lock_kernel();
+	/* Revalidate parent directory attribute cache */
+	error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
+	if (error < 0) {
+		res = ERR_PTR(error);
+		goto out;
+	}
+
+	if (nd->intent.open.flags & O_CREAT) {
+		nfs_begin_data_update(dir);
+		inode = nfs4_atomic_open(dir, dentry, nd);
+		nfs_end_data_update(dir);
+	} else
+		inode = nfs4_atomic_open(dir, dentry, nd);
+	unlock_kernel();
+	if (IS_ERR(inode)) {
+		error = PTR_ERR(inode);
+		switch (error) {
+			/* Make a negative dentry */
+			case -ENOENT:
+				inode = NULL;
+				break;
+			/* This turned out not to be a regular file */
+			case -ELOOP:
+				if (!(nd->intent.open.flags & O_NOFOLLOW))
+					goto no_open;
+			/* case -EISDIR: */
+			/* case -EINVAL: */
+			default:
+				res = ERR_PTR(error);
+				goto out;
+		}
+	}
+no_entry:
+	res = d_add_unique(dentry, inode);
+	if (res != NULL)
+		dentry = res;
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+out:
+	return res;
+no_open:
+	return nfs_lookup(dir, dentry, nd);
+}
+
+static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct dentry *parent = NULL;
+	struct inode *inode = dentry->d_inode;
+	struct inode *dir;
+	unsigned long verifier;
+	int openflags, ret = 0;
+
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+	if (!is_atomic_open(dir, nd))
+		goto no_open;
+	/* We can't create new files in nfs_open_revalidate(), so we
+	 * optimize away revalidation of negative dentries.
+	 */
+	if (inode == NULL)
+		goto out;
+	/* NFS only supports OPEN on regular files */
+	if (!S_ISREG(inode->i_mode))
+		goto no_open;
+	openflags = nd->intent.open.flags;
+	/* We cannot do exclusive creation on a positive dentry */
+	if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+		goto no_open;
+	/* We can't create new files, or truncate existing ones here */
+	openflags &= ~(O_CREAT|O_TRUNC);
+
+	/*
+	 * Note: we're not holding inode->i_sem and so may be racing with
+	 * operations that change the directory. We therefore save the
+	 * change attribute *before* we do the RPC call.
+	 */
+	lock_kernel();
+	verifier = nfs_save_change_attribute(dir);
+	ret = nfs4_open_revalidate(dir, dentry, openflags);
+	if (!ret)
+		nfs_set_verifier(dentry, verifier);
+	unlock_kernel();
+out:
+	dput(parent);
+	if (!ret)
+		d_drop(dentry);
+	return ret;
+no_open:
+	dput(parent);
+	if (inode != NULL && nfs_have_delegation(inode, FMODE_READ))
+		return 1;
+	return nfs_lookup_revalidate(dentry, nd);
+}
+#endif /* CONFIG_NFSV4 */
+
+static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
+{
+	struct dentry *parent = desc->file->f_dentry;
+	struct inode *dir = parent->d_inode;
+	struct nfs_entry *entry = desc->entry;
+	struct dentry *dentry, *alias;
+	struct qstr name = {
+		.name = entry->name,
+		.len = entry->len,
+	};
+	struct inode *inode;
+
+	switch (name.len) {
+		case 2:
+			if (name.name[0] == '.' && name.name[1] == '.')
+				return dget_parent(parent);
+			break;
+		case 1:
+			if (name.name[0] == '.')
+				return dget(parent);
+	}
+	name.hash = full_name_hash(name.name, name.len);
+	dentry = d_lookup(parent, &name);
+	if (dentry != NULL)
+		return dentry;
+	if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
+		return NULL;
+	/* Note: caller is already holding the dir->i_sem! */
+	dentry = d_alloc(parent, &name);
+	if (dentry == NULL)
+		return NULL;
+	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
+	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
+	if (!inode) {
+		dput(dentry);
+		return NULL;
+	}
+	alias = d_add_unique(dentry, inode);
+	if (alias != NULL) {
+		dput(dentry);
+		dentry = alias;
+	}
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	return dentry;
+}
+
+/*
+ * Code common to create, mkdir, and mknod.
+ */
+int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+				struct nfs_fattr *fattr)
+{
+	struct inode *inode;
+	int error = -EACCES;
+
+	/* We may have been initialized further down */
+	if (dentry->d_inode)
+		return 0;
+	if (fhandle->size == 0) {
+		struct inode *dir = dentry->d_parent->d_inode;
+		error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+		if (error)
+			goto out_err;
+	}
+	if (!(fattr->valid & NFS_ATTR_FATTR)) {
+		struct nfs_server *server = NFS_SB(dentry->d_sb);
+		error = server->rpc_ops->getattr(server, fhandle, fattr);
+		if (error < 0)
+			goto out_err;
+	}
+	error = -ENOMEM;
+	inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
+	if (inode == NULL)
+		goto out_err;
+	d_instantiate(dentry, inode);
+	return 0;
+out_err:
+	d_drop(dentry);
+	return error;
+}
+
+/*
+ * Following a failed create operation, we drop the dentry rather
+ * than retain a negative dentry. This avoids a problem in the event
+ * that the operation succeeded on the server, but an error in the
+ * reply path made it appear to have failed.
+ */
+static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	struct iattr attr;
+	int error;
+	int open_flags = 0;
+
+	dfprintk(VFS, "NFS: create(%s/%ld, %s\n", dir->i_sb->s_id, 
+		dir->i_ino, dentry->d_name.name);
+
+	attr.ia_mode = mode;
+	attr.ia_valid = ATTR_MODE;
+
+	if (nd && (nd->flags & LOOKUP_CREATE))
+		open_flags = nd->intent.open.flags;
+
+	lock_kernel();
+	nfs_begin_data_update(dir);
+	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
+	nfs_end_data_update(dir);
+	if (error != 0)
+		goto out_err;
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	unlock_kernel();
+	return 0;
+out_err:
+	unlock_kernel();
+	d_drop(dentry);
+	return error;
+}
+
+/*
+ * See comments for nfs_proc_create regarding failed operations.
+ */
+static int
+nfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	struct iattr attr;
+	int status;
+
+	dfprintk(VFS, "NFS: mknod(%s/%ld, %s\n", dir->i_sb->s_id,
+		dir->i_ino, dentry->d_name.name);
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	attr.ia_mode = mode;
+	attr.ia_valid = ATTR_MODE;
+
+	lock_kernel();
+	nfs_begin_data_update(dir);
+	status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
+	nfs_end_data_update(dir);
+	if (status != 0)
+		goto out_err;
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	unlock_kernel();
+	return 0;
+out_err:
+	unlock_kernel();
+	d_drop(dentry);
+	return status;
+}
+
+/*
+ * See comments for nfs_proc_create regarding failed operations.
+ */
+static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct iattr attr;
+	int error;
+
+	dfprintk(VFS, "NFS: mkdir(%s/%ld, %s\n", dir->i_sb->s_id,
+		dir->i_ino, dentry->d_name.name);
+
+	attr.ia_valid = ATTR_MODE;
+	attr.ia_mode = mode | S_IFDIR;
+
+	lock_kernel();
+	nfs_begin_data_update(dir);
+	error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
+	nfs_end_data_update(dir);
+	if (error != 0)
+		goto out_err;
+	nfs_renew_times(dentry);
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	unlock_kernel();
+	return 0;
+out_err:
+	d_drop(dentry);
+	unlock_kernel();
+	return error;
+}
+
+static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int error;
+
+	dfprintk(VFS, "NFS: rmdir(%s/%ld, %s\n", dir->i_sb->s_id,
+		dir->i_ino, dentry->d_name.name);
+
+	lock_kernel();
+	nfs_begin_data_update(dir);
+	error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+	/* Ensure the VFS deletes this inode */
+	if (error == 0 && dentry->d_inode != NULL)
+		dentry->d_inode->i_nlink = 0;
+	nfs_end_data_update(dir);
+	unlock_kernel();
+
+	return error;
+}
+
+static int nfs_sillyrename(struct inode *dir, struct dentry *dentry)
+{
+	static unsigned int sillycounter;
+	const int      i_inosize  = sizeof(dir->i_ino)*2;
+	const int      countersize = sizeof(sillycounter)*2;
+	const int      slen       = sizeof(".nfs") + i_inosize + countersize - 1;
+	char           silly[slen+1];
+	struct qstr    qsilly;
+	struct dentry *sdentry;
+	int            error = -EIO;
+
+	dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name, 
+		atomic_read(&dentry->d_count));
+
+#ifdef NFS_PARANOIA
+if (!dentry->d_inode)
+printk("NFS: silly-renaming %s/%s, negative dentry??\n",
+dentry->d_parent->d_name.name, dentry->d_name.name);
+#endif
+	/*
+	 * We don't allow a dentry to be silly-renamed twice.
+	 */
+	error = -EBUSY;
+	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+		goto out;
+
+	sprintf(silly, ".nfs%*.*lx",
+		i_inosize, i_inosize, dentry->d_inode->i_ino);
+
+	sdentry = NULL;
+	do {
+		char *suffix = silly + slen - countersize;
+
+		dput(sdentry);
+		sillycounter++;
+		sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
+
+		dfprintk(VFS, "trying to rename %s to %s\n",
+			 dentry->d_name.name, silly);
+		
+		sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+		/*
+		 * N.B. Better to return EBUSY here ... it could be
+		 * dangerous to delete the file while it's in use.
+		 */
+		if (IS_ERR(sdentry))
+			goto out;
+	} while(sdentry->d_inode != NULL); /* need negative lookup */
+
+	qsilly.name = silly;
+	qsilly.len  = strlen(silly);
+	nfs_begin_data_update(dir);
+	if (dentry->d_inode) {
+		nfs_begin_data_update(dentry->d_inode);
+		error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
+				dir, &qsilly);
+		nfs_end_data_update(dentry->d_inode);
+	} else
+		error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
+				dir, &qsilly);
+	nfs_end_data_update(dir);
+	if (!error) {
+		nfs_renew_times(dentry);
+		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+		d_move(dentry, sdentry);
+		error = nfs_async_unlink(dentry);
+ 		/* If we return 0 we don't unlink */
+	}
+	dput(sdentry);
+out:
+	return error;
+}
+
+/*
+ * Remove a file after making sure there are no pending writes,
+ * and after checking that the file has only one user. 
+ *
+ * We invalidate the attribute cache and free the inode prior to the operation
+ * to avoid possible races if the server reuses the inode.
+ */
+static int nfs_safe_remove(struct dentry *dentry)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	struct inode *inode = dentry->d_inode;
+	int error = -EBUSY;
+		
+	dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	/* If the dentry was sillyrenamed, we simply call d_delete() */
+	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+		error = 0;
+		goto out;
+	}
+
+	nfs_begin_data_update(dir);
+	if (inode != NULL) {
+		nfs_begin_data_update(inode);
+		error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+		/* The VFS may want to delete this inode */
+		if (error == 0)
+			inode->i_nlink--;
+		nfs_end_data_update(inode);
+	} else
+		error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+	nfs_end_data_update(dir);
+out:
+	return error;
+}
+
+/*  We do silly rename. In case sillyrename() returns -EBUSY, the inode
+ *  belongs to an active ".nfs..." file and we return -EBUSY.
+ *
+ *  If sillyrename() returns 0, we do nothing, otherwise we unlink.
+ */
+static int nfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int error;
+	int need_rehash = 0;
+
+	dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
+		dir->i_ino, dentry->d_name.name);
+
+	lock_kernel();
+	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
+	if (atomic_read(&dentry->d_count) > 1) {
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&dcache_lock);
+		error = nfs_sillyrename(dir, dentry);
+		unlock_kernel();
+		return error;
+	}
+	if (!d_unhashed(dentry)) {
+		__d_drop(dentry);
+		need_rehash = 1;
+	}
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&dcache_lock);
+	error = nfs_safe_remove(dentry);
+	if (!error) {
+		nfs_renew_times(dentry);
+		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+	} else if (need_rehash)
+		d_rehash(dentry);
+	unlock_kernel();
+	return error;
+}
+
+static int
+nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct iattr attr;
+	struct nfs_fattr sym_attr;
+	struct nfs_fh sym_fh;
+	struct qstr qsymname;
+	int error;
+
+	dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
+		dir->i_ino, dentry->d_name.name, symname);
+
+#ifdef NFS_PARANOIA
+if (dentry->d_inode)
+printk("nfs_proc_symlink: %s/%s not negative!\n",
+dentry->d_parent->d_name.name, dentry->d_name.name);
+#endif
+	/*
+	 * Fill in the sattr for the call.
+ 	 * Note: SunOS 4.1.2 crashes if the mode isn't initialized!
+	 */
+	attr.ia_valid = ATTR_MODE;
+	attr.ia_mode = S_IFLNK | S_IRWXUGO;
+
+	qsymname.name = symname;
+	qsymname.len  = strlen(symname);
+
+	lock_kernel();
+	nfs_begin_data_update(dir);
+	error = NFS_PROTO(dir)->symlink(dir, &dentry->d_name, &qsymname,
+					  &attr, &sym_fh, &sym_attr);
+	nfs_end_data_update(dir);
+	if (!error) {
+		error = nfs_instantiate(dentry, &sym_fh, &sym_attr);
+	} else {
+		if (error == -EEXIST)
+			printk("nfs_proc_symlink: %s/%s already exists??\n",
+			       dentry->d_parent->d_name.name, dentry->d_name.name);
+		d_drop(dentry);
+	}
+	unlock_kernel();
+	return error;
+}
+
+static int 
+nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	int error;
+
+	dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
+		old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	/*
+	 * Drop the dentry in advance to force a new lookup.
+	 * Since nfs_proc_link doesn't return a file handle,
+	 * we can't use the existing dentry.
+	 */
+	lock_kernel();
+	d_drop(dentry);
+
+	nfs_begin_data_update(dir);
+	nfs_begin_data_update(inode);
+	error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+	nfs_end_data_update(inode);
+	nfs_end_data_update(dir);
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * RENAME
+ * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
+ * different file handle for the same inode after a rename (e.g. when
+ * moving to a different directory). A fail-safe method to do so would
+ * be to look up old_dir/old_name, create a link to new_dir/new_name and
+ * rename the old file using the sillyrename stuff. This way, the original
+ * file in old_dir will go away when the last process iput()s the inode.
+ *
+ * FIXED.
+ * 
+ * It actually works quite well. One needs to have the possibility for
+ * at least one ".nfs..." file in each directory the file ever gets
+ * moved or linked to which happens automagically with the new
+ * implementation that only depends on the dcache stuff instead of
+ * using the inode layer
+ *
+ * Unfortunately, things are a little more complicated than indicated
+ * above. For a cross-directory move, we want to make sure we can get
+ * rid of the old inode after the operation.  This means there must be
+ * no pending writes (if it's a file), and the use count must be 1.
+ * If these conditions are met, we can drop the dentries before doing
+ * the rename.
+ */
+static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+		      struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	struct dentry *dentry = NULL, *rehash = NULL;
+	int error = -EBUSY;
+
+	/*
+	 * To prevent any new references to the target during the rename,
+	 * we unhash the dentry and free the inode in advance.
+	 */
+	lock_kernel();
+	if (!d_unhashed(new_dentry)) {
+		d_drop(new_dentry);
+		rehash = new_dentry;
+	}
+
+	dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
+		 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+		 new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+		 atomic_read(&new_dentry->d_count));
+
+	/*
+	 * First check whether the target is busy ... we can't
+	 * safely do _any_ rename if the target is in use.
+	 *
+	 * For files, make a copy of the dentry and then do a 
+	 * silly-rename. If the silly-rename succeeds, the
+	 * copied dentry is hashed and becomes the new target.
+	 */
+	if (!new_inode)
+		goto go_ahead;
+	if (S_ISDIR(new_inode->i_mode))
+		goto out;
+	else if (atomic_read(&new_dentry->d_count) > 2) {
+		int err;
+		/* copy the target dentry's name */
+		dentry = d_alloc(new_dentry->d_parent,
+				 &new_dentry->d_name);
+		if (!dentry)
+			goto out;
+
+		/* silly-rename the existing target ... */
+		err = nfs_sillyrename(new_dir, new_dentry);
+		if (!err) {
+			new_dentry = rehash = dentry;
+			new_inode = NULL;
+			/* instantiate the replacement target */
+			d_instantiate(new_dentry, NULL);
+		} else if (atomic_read(&new_dentry->d_count) > 1) {
+		/* dentry still busy? */
+#ifdef NFS_PARANOIA
+			printk("nfs_rename: target %s/%s busy, d_count=%d\n",
+			       new_dentry->d_parent->d_name.name,
+			       new_dentry->d_name.name,
+			       atomic_read(&new_dentry->d_count));
+#endif
+			goto out;
+		}
+	}
+
+go_ahead:
+	/*
+	 * ... prune child dentries and writebacks if needed.
+	 */
+	if (atomic_read(&old_dentry->d_count) > 1) {
+		nfs_wb_all(old_inode);
+		shrink_dcache_parent(old_dentry);
+	}
+
+	if (new_inode)
+		d_delete(new_dentry);
+
+	nfs_begin_data_update(old_dir);
+	nfs_begin_data_update(new_dir);
+	nfs_begin_data_update(old_inode);
+	error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
+					   new_dir, &new_dentry->d_name);
+	nfs_end_data_update(old_inode);
+	nfs_end_data_update(new_dir);
+	nfs_end_data_update(old_dir);
+out:
+	if (rehash)
+		d_rehash(rehash);
+	if (!error) {
+		if (!S_ISDIR(old_inode->i_mode))
+			d_move(old_dentry, new_dentry);
+		nfs_renew_times(new_dentry);
+		nfs_set_verifier(new_dentry, nfs_save_change_attribute(new_dir));
+	}
+
+	/* new dentry created? */
+	if (dentry)
+		dput(dentry);
+	unlock_kernel();
+	return error;
+}
+
+int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
+{
+	struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
+
+	if (cache->cred != cred
+			|| time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))
+			|| (NFS_FLAGS(inode) & NFS_INO_INVALID_ACCESS))
+		return -ENOENT;
+	memcpy(res, cache, sizeof(*res));
+	return 0;
+}
+
+void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
+{
+	struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
+
+	if (cache->cred != set->cred) {
+		if (cache->cred)
+			put_rpccred(cache->cred);
+		cache->cred = get_rpccred(set->cred);
+	}
+	NFS_FLAGS(inode) &= ~NFS_INO_INVALID_ACCESS;
+	cache->jiffies = set->jiffies;
+	cache->mask = set->mask;
+}
+
+static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
+{
+	struct nfs_access_entry cache;
+	int status;
+
+	status = nfs_access_get_cached(inode, cred, &cache);
+	if (status == 0)
+		goto out;
+
+	/* Be clever: ask server to check for all possible rights */
+	cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
+	cache.cred = cred;
+	cache.jiffies = jiffies;
+	status = NFS_PROTO(inode)->access(inode, &cache);
+	if (status != 0)
+		return status;
+	nfs_access_add_cache(inode, &cache);
+out:
+	if ((cache.mask & mask) == mask)
+		return 0;
+	return -EACCES;
+}
+
+int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	struct rpc_cred *cred;
+	int res = 0;
+
+	if (mask == 0)
+		goto out;
+	/* Is this sys_access() ? */
+	if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
+		goto force_lookup;
+
+	switch (inode->i_mode & S_IFMT) {
+		case S_IFLNK:
+			goto out;
+		case S_IFREG:
+			/* NFSv4 has atomic_open... */
+			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+					&& nd != NULL
+					&& (nd->flags & LOOKUP_OPEN))
+				goto out;
+			break;
+		case S_IFDIR:
+			/*
+			 * Optimize away all write operations, since the server
+			 * will check permissions when we perform the op.
+			 */
+			if ((mask & MAY_WRITE) && !(mask & MAY_READ))
+				goto out;
+	}
+
+force_lookup:
+	lock_kernel();
+
+	if (!NFS_PROTO(inode)->access)
+		goto out_notsup;
+
+	cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
+	if (!IS_ERR(cred)) {
+		res = nfs_do_access(inode, cred, mask);
+		put_rpccred(cred);
+	} else
+		res = PTR_ERR(cred);
+	unlock_kernel();
+out:
+	return res;
+out_notsup:
+	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (res == 0)
+		res = generic_permission(inode, mask, NULL);
+	unlock_kernel();
+	return res;
+}
+
+/*
+ * Local variables:
+ *  version-control: t
+ *  kept-new-versions: 5
+ * End:
+ */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
new file mode 100644
index 0000000..68df803
--- /dev/null
+++ b/fs/nfs/direct.c
@@ -0,0 +1,808 @@
+/*
+ * linux/fs/nfs/direct.c
+ *
+ * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
+ *
+ * High-performance uncached I/O for the Linux NFS client
+ *
+ * There are important applications whose performance or correctness
+ * depends on uncached access to file data.  Database clusters
+ * (multiple copies of the same instance running on separate hosts) 
+ * implement their own cache coherency protocol that subsumes file
+ * system cache protocols.  Applications that process datasets 
+ * considerably larger than the client's memory do not always benefit 
+ * from a local cache.  A streaming video server, for instance, has no 
+ * need to cache the contents of a file.
+ *
+ * When an application requests uncached I/O, all read and write requests
+ * are made directly to the server; data stored or fetched via these
+ * requests is not cached in the Linux page cache.  The client does not
+ * correct unaligned requests from applications.  All requested bytes are
+ * held on permanent storage before a direct write system call returns to
+ * an application.
+ *
+ * Solaris implements an uncached I/O facility called directio() that
+ * is used for backups and sequential I/O to very large files.  Solaris
+ * also supports uncaching whole NFS partitions with "-o forcedirectio,"
+ * an undocumented mount option.
+ *
+ * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
+ * help from Andrew Morton.
+ *
+ * 18 Dec 2001	Initial implementation for 2.4  --cel
+ * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
+ * 08 Jun 2003	Port to 2.5 APIs  --cel
+ * 31 Mar 2004	Handle direct I/O without VFS support  --cel
+ * 15 Sep 2004	Parallel async reads  --cel
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/kref.h>
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#define NFSDBG_FACILITY		NFSDBG_VFS
+#define MAX_DIRECTIO_SIZE	(4096UL << PAGE_SHIFT)
+
+static kmem_cache_t *nfs_direct_cachep;
+
+/*
+ * This represents a set of asynchronous requests that we're waiting on
+ */
+struct nfs_direct_req {
+	struct kref		kref;		/* release manager */
+	struct list_head	list;		/* nfs_read_data structs */
+	wait_queue_head_t	wait;		/* wait for i/o completion */
+	struct page **		pages;		/* pages in our buffer */
+	unsigned int		npages;		/* count of pages */
+	atomic_t		complete,	/* i/os we're waiting for */
+				count,		/* bytes actually processed */
+				error;		/* any reported error */
+};
+
+
+/**
+ * nfs_get_user_pages - find and set up pages underlying user's buffer
+ * rw: direction (read or write)
+ * user_addr: starting address of this segment of user's buffer
+ * count: size of this segment
+ * @pages: returned array of page struct pointers underlying user's buffer
+ */
+static inline int
+nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
+		struct page ***pages)
+{
+	int result = -ENOMEM;
+	unsigned long page_count;
+	size_t array_size;
+
+	/* set an arbitrary limit to prevent type overflow */
+	/* XXX: this can probably be as large as INT_MAX */
+	if (size > MAX_DIRECTIO_SIZE) {
+		*pages = NULL;
+		return -EFBIG;
+	}
+
+	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	page_count -= user_addr >> PAGE_SHIFT;
+
+	array_size = (page_count * sizeof(struct page *));
+	*pages = kmalloc(array_size, GFP_KERNEL);
+	if (*pages) {
+		down_read(&current->mm->mmap_sem);
+		result = get_user_pages(current, current->mm, user_addr,
+					page_count, (rw == READ), 0,
+					*pages, NULL);
+		up_read(&current->mm->mmap_sem);
+	}
+	return result;
+}
+
+/**
+ * nfs_free_user_pages - tear down page struct array
+ * @pages: array of page struct pointers underlying target buffer
+ * @npages: number of pages in the array
+ * @do_dirty: dirty the pages as we release them
+ */
+static void
+nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
+{
+	int i;
+	for (i = 0; i < npages; i++) {
+		if (do_dirty)
+			set_page_dirty_lock(pages[i]);
+		page_cache_release(pages[i]);
+	}
+	kfree(pages);
+}
+
+/**
+ * nfs_direct_req_release - release  nfs_direct_req structure for direct read
+ * @kref: kref object embedded in an nfs_direct_req structure
+ *
+ */
+static void nfs_direct_req_release(struct kref *kref)
+{
+	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
+	kmem_cache_free(nfs_direct_cachep, dreq);
+}
+
+/**
+ * nfs_direct_read_alloc - allocate nfs_read_data structures for direct read
+ * @count: count of bytes for the read request
+ * @rsize: local rsize setting
+ *
+ * Note we also set the number of requests we have in the dreq when we are
+ * done.  This prevents races with I/O completion so we will always wait
+ * until all requests have been dispatched and completed.
+ */
+static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int rsize)
+{
+	struct list_head *list;
+	struct nfs_direct_req *dreq;
+	unsigned int reads = 0;
+
+	dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
+	if (!dreq)
+		return NULL;
+
+	kref_init(&dreq->kref);
+	init_waitqueue_head(&dreq->wait);
+	INIT_LIST_HEAD(&dreq->list);
+	atomic_set(&dreq->count, 0);
+	atomic_set(&dreq->error, 0);
+
+	list = &dreq->list;
+	for(;;) {
+		struct nfs_read_data *data = nfs_readdata_alloc();
+
+		if (unlikely(!data)) {
+			while (!list_empty(list)) {
+				data = list_entry(list->next,
+						  struct nfs_read_data, pages);
+				list_del(&data->pages);
+				nfs_readdata_free(data);
+			}
+			kref_put(&dreq->kref, nfs_direct_req_release);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&data->pages);
+		list_add(&data->pages, list);
+
+		data->req = (struct nfs_page *) dreq;
+		reads++;
+		if (nbytes <= rsize)
+			break;
+		nbytes -= rsize;
+	}
+	kref_get(&dreq->kref);
+	atomic_set(&dreq->complete, reads);
+	return dreq;
+}
+
+/**
+ * nfs_direct_read_result - handle a read reply for a direct read request
+ * @data: address of NFS READ operation control block
+ * @status: status of this NFS READ operation
+ *
+ * We must hold a reference to all the pages in this direct read request
+ * until the RPCs complete.  This could be long *after* we are woken up in
+ * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
+ */
+static void nfs_direct_read_result(struct nfs_read_data *data, int status)
+{
+	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+
+	if (likely(status >= 0))
+		atomic_add(data->res.count, &dreq->count);
+	else
+		atomic_set(&dreq->error, status);
+
+	if (unlikely(atomic_dec_and_test(&dreq->complete))) {
+		nfs_free_user_pages(dreq->pages, dreq->npages, 1);
+		wake_up(&dreq->wait);
+		kref_put(&dreq->kref, nfs_direct_req_release);
+	}
+}
+
+/**
+ * nfs_direct_read_schedule - dispatch NFS READ operations for a direct read
+ * @dreq: address of nfs_direct_req struct for this request
+ * @inode: target inode
+ * @ctx: target file open context
+ * @user_addr: starting address of this segment of user's buffer
+ * @count: size of this segment
+ * @file_offset: offset in file to begin the operation
+ *
+ * For each nfs_read_data struct that was allocated on the list, dispatch
+ * an NFS READ operation
+ */
+static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
+		struct inode *inode, struct nfs_open_context *ctx,
+		unsigned long user_addr, size_t count, loff_t file_offset)
+{
+	struct list_head *list = &dreq->list;
+	struct page **pages = dreq->pages;
+	unsigned int curpage, pgbase;
+	unsigned int rsize = NFS_SERVER(inode)->rsize;
+
+	curpage = 0;
+	pgbase = user_addr & ~PAGE_MASK;
+	do {
+		struct nfs_read_data *data;
+		unsigned int bytes;
+
+		bytes = rsize;
+		if (count < rsize)
+			bytes = count;
+
+		data = list_entry(list->next, struct nfs_read_data, pages);
+		list_del_init(&data->pages);
+
+		data->inode = inode;
+		data->cred = ctx->cred;
+		data->args.fh = NFS_FH(inode);
+		data->args.context = ctx;
+		data->args.offset = file_offset;
+		data->args.pgbase = pgbase;
+		data->args.pages = &pages[curpage];
+		data->args.count = bytes;
+		data->res.fattr = &data->fattr;
+		data->res.eof = 0;
+		data->res.count = bytes;
+
+		NFS_PROTO(inode)->read_setup(data);
+
+		data->task.tk_cookie = (unsigned long) inode;
+		data->task.tk_calldata = data;
+		data->task.tk_release = nfs_readdata_release;
+		data->complete = nfs_direct_read_result;
+
+		lock_kernel();
+		rpc_execute(&data->task);
+		unlock_kernel();
+
+		dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+				data->task.tk_pid,
+				inode->i_sb->s_id,
+				(long long)NFS_FILEID(inode),
+				bytes,
+				(unsigned long long)data->args.offset);
+
+		file_offset += bytes;
+		pgbase += bytes;
+		curpage += pgbase >> PAGE_SHIFT;
+		pgbase &= ~PAGE_MASK;
+
+		count -= bytes;
+	} while (count != 0);
+}
+
+/**
+ * nfs_direct_read_wait - wait for I/O completion for direct reads
+ * @dreq: request on which we are to wait
+ * @intr: whether or not this wait can be interrupted
+ *
+ * Collects and returns the final error value/byte-count.
+ */
+static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
+{
+	int result = 0;
+
+	if (intr) {
+		result = wait_event_interruptible(dreq->wait,
+					(atomic_read(&dreq->complete) == 0));
+	} else {
+		wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
+	}
+
+	if (!result)
+		result = atomic_read(&dreq->error);
+	if (!result)
+		result = atomic_read(&dreq->count);
+
+	kref_put(&dreq->kref, nfs_direct_req_release);
+	return (ssize_t) result;
+}
+
+/**
+ * nfs_direct_read_seg - Read in one iov segment.  Generate separate
+ *                        read RPCs for each "rsize" bytes.
+ * @inode: target inode
+ * @ctx: target file open context
+ * @user_addr: starting address of this segment of user's buffer
+ * @count: size of this segment
+ * @file_offset: offset in file to begin the operation
+ * @pages: array of addresses of page structs defining user's buffer
+ * @nr_pages: number of pages in the array
+ *
+ */
+static ssize_t nfs_direct_read_seg(struct inode *inode,
+		struct nfs_open_context *ctx, unsigned long user_addr,
+		size_t count, loff_t file_offset, struct page **pages,
+		unsigned int nr_pages)
+{
+	ssize_t result;
+	sigset_t oldset;
+	struct rpc_clnt *clnt = NFS_CLIENT(inode);
+	struct nfs_direct_req *dreq;
+
+	dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
+	if (!dreq)
+		return -ENOMEM;
+
+	dreq->pages = pages;
+	dreq->npages = nr_pages;
+
+	rpc_clnt_sigmask(clnt, &oldset);
+	nfs_direct_read_schedule(dreq, inode, ctx, user_addr, count,
+				 file_offset);
+	result = nfs_direct_read_wait(dreq, clnt->cl_intr);
+	rpc_clnt_sigunmask(clnt, &oldset);
+
+	return result;
+}
+
+/**
+ * nfs_direct_read - For each iov segment, map the user's buffer
+ *                   then generate read RPCs.
+ * @inode: target inode
+ * @ctx: target file open context
+ * @iov: array of vectors that define I/O buffer
+ * file_offset: offset in file to begin the operation
+ * nr_segs: size of iovec array
+ *
+ * We've already pushed out any non-direct writes so that this read
+ * will see them when we read from the server.
+ */
+static ssize_t
+nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx,
+		const struct iovec *iov, loff_t file_offset,
+		unsigned long nr_segs)
+{
+	ssize_t tot_bytes = 0;
+	unsigned long seg = 0;
+
+	while ((seg < nr_segs) && (tot_bytes >= 0)) {
+		ssize_t result;
+		int page_count;
+		struct page **pages;
+		const struct iovec *vec = &iov[seg++];
+		unsigned long user_addr = (unsigned long) vec->iov_base;
+		size_t size = vec->iov_len;
+
+                page_count = nfs_get_user_pages(READ, user_addr, size, &pages);
+                if (page_count < 0) {
+                        nfs_free_user_pages(pages, 0, 0);
+			if (tot_bytes > 0)
+				break;
+                        return page_count;
+                }
+
+		result = nfs_direct_read_seg(inode, ctx, user_addr, size,
+				file_offset, pages, page_count);
+
+		if (result <= 0) {
+			if (tot_bytes > 0)
+				break;
+			return result;
+		}
+		tot_bytes += result;
+		file_offset += result;
+		if (result < size)
+			break;
+	}
+
+	return tot_bytes;
+}
+
+/**
+ * nfs_direct_write_seg - Write out one iov segment.  Generate separate
+ *                        write RPCs for each "wsize" bytes, then commit.
+ * @inode: target inode
+ * @ctx: target file open context
+ * user_addr: starting address of this segment of user's buffer
+ * count: size of this segment
+ * file_offset: offset in file to begin the operation
+ * @pages: array of addresses of page structs defining user's buffer
+ * nr_pages: size of pages array
+ */
+static ssize_t nfs_direct_write_seg(struct inode *inode,
+		struct nfs_open_context *ctx, unsigned long user_addr,
+		size_t count, loff_t file_offset, struct page **pages,
+		int nr_pages)
+{
+	const unsigned int wsize = NFS_SERVER(inode)->wsize;
+	size_t request;
+	int curpage, need_commit;
+	ssize_t result, tot_bytes;
+	struct nfs_writeverf first_verf;
+	struct nfs_write_data *wdata;
+
+	wdata = nfs_writedata_alloc();
+	if (!wdata)
+		return -ENOMEM;
+
+	wdata->inode = inode;
+	wdata->cred = ctx->cred;
+	wdata->args.fh = NFS_FH(inode);
+	wdata->args.context = ctx;
+	wdata->args.stable = NFS_UNSTABLE;
+	if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
+		wdata->args.stable = NFS_FILE_SYNC;
+	wdata->res.fattr = &wdata->fattr;
+	wdata->res.verf = &wdata->verf;
+
+	nfs_begin_data_update(inode);
+retry:
+	need_commit = 0;
+	tot_bytes = 0;
+	curpage = 0;
+	request = count;
+	wdata->args.pgbase = user_addr & ~PAGE_MASK;
+	wdata->args.offset = file_offset;
+	do {
+		wdata->args.count = request;
+		if (wdata->args.count > wsize)
+			wdata->args.count = wsize;
+		wdata->args.pages = &pages[curpage];
+
+		dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
+			wdata->args.count, (long long) wdata->args.offset,
+			user_addr + tot_bytes, wdata->args.pgbase, curpage);
+
+		lock_kernel();
+		result = NFS_PROTO(inode)->write(wdata);
+		unlock_kernel();
+
+		if (result <= 0) {
+			if (tot_bytes > 0)
+				break;
+			goto out;
+		}
+
+		if (tot_bytes == 0)
+			memcpy(&first_verf.verifier, &wdata->verf.verifier,
+						sizeof(first_verf.verifier));
+		if (wdata->verf.committed != NFS_FILE_SYNC) {
+			need_commit = 1;
+			if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
+					sizeof(first_verf.verifier)));
+				goto sync_retry;
+		}
+
+		tot_bytes += result;
+
+		/* in case of a short write: stop now, let the app recover */
+		if (result < wdata->args.count)
+			break;
+
+		wdata->args.offset += result;
+		wdata->args.pgbase += result;
+		curpage += wdata->args.pgbase >> PAGE_SHIFT;
+		wdata->args.pgbase &= ~PAGE_MASK;
+		request -= result;
+	} while (request != 0);
+
+	/*
+	 * Commit data written so far, even in the event of an error
+	 */
+	if (need_commit) {
+		wdata->args.count = tot_bytes;
+		wdata->args.offset = file_offset;
+
+		lock_kernel();
+		result = NFS_PROTO(inode)->commit(wdata);
+		unlock_kernel();
+
+		if (result < 0 || memcmp(&first_verf.verifier,
+					 &wdata->verf.verifier,
+					 sizeof(first_verf.verifier)) != 0)
+			goto sync_retry;
+	}
+	result = tot_bytes;
+
+out:
+	nfs_end_data_update_defer(inode);
+	nfs_writedata_free(wdata);
+	return result;
+
+sync_retry:
+	wdata->args.stable = NFS_FILE_SYNC;
+	goto retry;
+}
+
+/**
+ * nfs_direct_write - For each iov segment, map the user's buffer
+ *                    then generate write and commit RPCs.
+ * @inode: target inode
+ * @ctx: target file open context
+ * @iov: array of vectors that define I/O buffer
+ * file_offset: offset in file to begin the operation
+ * nr_segs: size of iovec array
+ *
+ * Upon return, generic_file_direct_IO invalidates any cached pages
+ * that non-direct readers might access, so they will pick up these
+ * writes immediately.
+ */
+static ssize_t nfs_direct_write(struct inode *inode,
+		struct nfs_open_context *ctx, const struct iovec *iov,
+		loff_t file_offset, unsigned long nr_segs)
+{
+	ssize_t tot_bytes = 0;
+	unsigned long seg = 0;
+
+	while ((seg < nr_segs) && (tot_bytes >= 0)) {
+		ssize_t result;
+		int page_count;
+		struct page **pages;
+		const struct iovec *vec = &iov[seg++];
+		unsigned long user_addr = (unsigned long) vec->iov_base;
+		size_t size = vec->iov_len;
+
+                page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
+                if (page_count < 0) {
+                        nfs_free_user_pages(pages, 0, 0);
+			if (tot_bytes > 0)
+				break;
+                        return page_count;
+                }
+
+		result = nfs_direct_write_seg(inode, ctx, user_addr, size,
+				file_offset, pages, page_count);
+		nfs_free_user_pages(pages, page_count, 0);
+
+		if (result <= 0) {
+			if (tot_bytes > 0)
+				break;
+			return result;
+		}
+		tot_bytes += result;
+		file_offset += result;
+		if (result < size)
+			break;
+	}
+	return tot_bytes;
+}
+
+/**
+ * nfs_direct_IO - NFS address space operation for direct I/O
+ * rw: direction (read or write)
+ * @iocb: target I/O control block
+ * @iov: array of vectors that define I/O buffer
+ * file_offset: offset in file to begin the operation
+ * nr_segs: size of iovec array
+ *
+ */
+ssize_t
+nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+		loff_t file_offset, unsigned long nr_segs)
+{
+	ssize_t result = -EINVAL;
+	struct file *file = iocb->ki_filp;
+	struct nfs_open_context *ctx;
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+
+	/*
+	 * No support for async yet
+	 */
+	if (!is_sync_kiocb(iocb))
+		return result;
+
+	ctx = (struct nfs_open_context *)file->private_data;
+	switch (rw) {
+	case READ:
+		dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
+				dentry->d_name.name, file_offset, nr_segs);
+
+		result = nfs_direct_read(inode, ctx, iov,
+						file_offset, nr_segs);
+		break;
+	case WRITE:
+		dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
+				dentry->d_name.name, file_offset, nr_segs);
+
+		result = nfs_direct_write(inode, ctx, iov,
+						file_offset, nr_segs);
+		break;
+	default:
+		break;
+	}
+	return result;
+}
+
+/**
+ * nfs_file_direct_read - file direct read operation for NFS files
+ * @iocb: target I/O control block
+ * @buf: user's buffer into which to read data
+ * count: number of bytes to read
+ * pos: byte offset in file where reading starts
+ *
+ * We use this function for direct reads instead of calling
+ * generic_file_aio_read() in order to avoid gfar's check to see if
+ * the request starts before the end of the file.  For that check
+ * to work, we must generate a GETATTR before each direct read, and
+ * even then there is a window between the GETATTR and the subsequent
+ * READ where the file size could change.  So our preference is simply
+ * to do all reads the application wants, and the server will take
+ * care of managing the end of file boundary.
+ * 
+ * This function also eliminates unnecessarily updating the file's
+ * atime locally, as the NFS server sets the file's atime, and this
+ * client must read the updated atime from the server back into its
+ * cache.
+ */
+ssize_t
+nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
+{
+	ssize_t retval = -EINVAL;
+	loff_t *ppos = &iocb->ki_pos;
+	struct file *file = iocb->ki_filp;
+	struct nfs_open_context *ctx =
+			(struct nfs_open_context *) file->private_data;
+	struct dentry *dentry = file->f_dentry;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	struct iovec iov = {
+		.iov_base = buf,
+		.iov_len = count,
+	};
+
+	dprintk("nfs: direct read(%s/%s, %lu@%lu)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		(unsigned long) count, (unsigned long) pos);
+
+	if (!is_sync_kiocb(iocb))
+		goto out;
+	if (count < 0)
+		goto out;
+	retval = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len))
+		goto out;
+	retval = 0;
+	if (!count)
+		goto out;
+
+	if (mapping->nrpages) {
+		retval = filemap_fdatawrite(mapping);
+		if (retval == 0)
+			retval = nfs_wb_all(inode);
+		if (retval == 0)
+			retval = filemap_fdatawait(mapping);
+		if (retval)
+			goto out;
+	}
+
+	retval = nfs_direct_read(inode, ctx, &iov, pos, 1);
+	if (retval > 0)
+		*ppos = pos + retval;
+
+out:
+	return retval;
+}
+
+/**
+ * nfs_file_direct_write - file direct write operation for NFS files
+ * @iocb: target I/O control block
+ * @buf: user's buffer from which to write data
+ * count: number of bytes to write
+ * pos: byte offset in file where writing starts
+ *
+ * We use this function for direct writes instead of calling
+ * generic_file_aio_write() in order to avoid taking the inode
+ * semaphore and updating the i_size.  The NFS server will set
+ * the new i_size and this client must read the updated size
+ * back into its cache.  We let the server do generic write
+ * parameter checking and report problems.
+ *
+ * We also avoid an unnecessary invocation of generic_osync_inode(),
+ * as it is fairly meaningless to sync the metadata of an NFS file.
+ *
+ * We eliminate local atime updates, see direct read above.
+ *
+ * We avoid unnecessary page cache invalidations for normal cached
+ * readers of this file.
+ *
+ * Note that O_APPEND is not supported for NFS direct writes, as there
+ * is no atomic O_APPEND write facility in the NFS protocol.
+ */
+ssize_t
+nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+{
+	ssize_t retval = -EINVAL;
+	loff_t *ppos = &iocb->ki_pos;
+	unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+	struct file *file = iocb->ki_filp;
+	struct nfs_open_context *ctx =
+			(struct nfs_open_context *) file->private_data;
+	struct dentry *dentry = file->f_dentry;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	struct iovec iov = {
+		.iov_base = (char __user *)buf,
+		.iov_len = count,
+	};
+
+	dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		inode->i_ino, (unsigned long) count, (unsigned long) pos);
+
+	if (!is_sync_kiocb(iocb))
+		goto out;
+	if (count < 0)
+		goto out;
+        if (pos < 0)
+		goto out;
+	retval = -EFAULT;
+	if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
+		goto out;
+        if (file->f_error) {
+                retval = file->f_error;
+                file->f_error = 0;
+                goto out;
+        }
+	retval = -EFBIG;
+	if (limit != RLIM_INFINITY) {
+		if (pos >= limit) {
+			send_sig(SIGXFSZ, current, 0);
+			goto out;
+		}
+		if (count > limit - (unsigned long) pos)
+			count = limit - (unsigned long) pos;
+	}
+	retval = 0;
+	if (!count)
+		goto out;
+
+	if (mapping->nrpages) {
+		retval = filemap_fdatawrite(mapping);
+		if (retval == 0)
+			retval = nfs_wb_all(inode);
+		if (retval == 0)
+			retval = filemap_fdatawait(mapping);
+		if (retval)
+			goto out;
+	}
+
+	retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
+	if (mapping->nrpages)
+		invalidate_inode_pages2(mapping);
+	if (retval > 0)
+		*ppos = pos + retval;
+
+out:
+	return retval;
+}
+
+int nfs_init_directcache(void)
+{
+	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
+						sizeof(struct nfs_direct_req),
+						0, SLAB_RECLAIM_ACCOUNT,
+						NULL, NULL);
+	if (nfs_direct_cachep == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void nfs_destroy_directcache(void)
+{
+	if (kmem_cache_destroy(nfs_direct_cachep))
+		printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
+}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
new file mode 100644
index 0000000..f06eee6
--- /dev/null
+++ b/fs/nfs/file.c
@@ -0,0 +1,484 @@
+/*
+ *  linux/fs/nfs/file.c
+ *
+ *  Copyright (C) 1992  Rick Sladkey
+ *
+ *  Changes Copyright (C) 1994 by Florian La Roche
+ *   - Do not copy data too often around in the kernel.
+ *   - In nfs_file_read the return value of kmalloc wasn't checked.
+ *   - Put in a better version of read look-ahead buffering. Original idea
+ *     and implementation by Wai S Kok elekokws@ee.nus.sg.
+ *
+ *  Expire cache on write to a file by Wai S Kok (Oct 1994).
+ *
+ *  Total rewrite of read side for new NFS buffer cache.. Linus.
+ *
+ *  nfs regular file handling functions
+ */
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include "delegation.h"
+
+#define NFSDBG_FACILITY		NFSDBG_FILE
+
+static int nfs_file_open(struct inode *, struct file *);
+static int nfs_file_release(struct inode *, struct file *);
+static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
+static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
+static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
+static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
+static int  nfs_file_flush(struct file *);
+static int  nfs_fsync(struct file *, struct dentry *dentry, int datasync);
+static int nfs_check_flags(int flags);
+static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
+static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+
+struct file_operations nfs_file_operations = {
+	.llseek		= remote_llseek,
+	.read		= do_sync_read,
+	.write		= do_sync_write,
+	.aio_read		= nfs_file_read,
+	.aio_write		= nfs_file_write,
+	.mmap		= nfs_file_mmap,
+	.open		= nfs_file_open,
+	.flush		= nfs_file_flush,
+	.release	= nfs_file_release,
+	.fsync		= nfs_fsync,
+	.lock		= nfs_lock,
+	.flock		= nfs_flock,
+	.sendfile	= nfs_file_sendfile,
+	.check_flags	= nfs_check_flags,
+};
+
+struct inode_operations nfs_file_inode_operations = {
+	.permission	= nfs_permission,
+	.getattr	= nfs_getattr,
+	.setattr	= nfs_setattr,
+};
+
+/* Hack for future NFS swap support */
+#ifndef IS_SWAPFILE
+# define IS_SWAPFILE(inode)	(0)
+#endif
+
+static int nfs_check_flags(int flags)
+{
+	if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Open file
+ */
+static int
+nfs_file_open(struct inode *inode, struct file *filp)
+{
+	struct nfs_server *server = NFS_SERVER(inode);
+	int (*open)(struct inode *, struct file *);
+	int res;
+
+	res = nfs_check_flags(filp->f_flags);
+	if (res)
+		return res;
+
+	lock_kernel();
+	/* Do NFSv4 open() call */
+	if ((open = server->rpc_ops->file_open) != NULL)
+		res = open(inode, filp);
+	unlock_kernel();
+	return res;
+}
+
+static int
+nfs_file_release(struct inode *inode, struct file *filp)
+{
+	/* Ensure that dirty pages are flushed out with the right creds */
+	if (filp->f_mode & FMODE_WRITE)
+		filemap_fdatawrite(filp->f_mapping);
+	return NFS_PROTO(inode)->file_release(inode, filp);
+}
+
+/*
+ * Flush all dirty pages, and check for write errors.
+ *
+ */
+static int
+nfs_file_flush(struct file *file)
+{
+	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+	struct inode	*inode = file->f_dentry->d_inode;
+	int		status;
+
+	dfprintk(VFS, "nfs: flush(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
+
+	if ((file->f_mode & FMODE_WRITE) == 0)
+		return 0;
+	lock_kernel();
+	/* Ensure that data+attribute caches are up to date after close() */
+	status = nfs_wb_all(inode);
+	if (!status) {
+		status = ctx->error;
+		ctx->error = 0;
+		if (!status && !nfs_have_delegation(inode, FMODE_READ))
+			__nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	}
+	unlock_kernel();
+	return status;
+}
+
+static ssize_t
+nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
+{
+	struct dentry * dentry = iocb->ki_filp->f_dentry;
+	struct inode * inode = dentry->d_inode;
+	ssize_t result;
+
+#ifdef CONFIG_NFS_DIRECTIO
+	if (iocb->ki_filp->f_flags & O_DIRECT)
+		return nfs_file_direct_read(iocb, buf, count, pos);
+#endif
+
+	dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		(unsigned long) count, (unsigned long) pos);
+
+	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!result)
+		result = generic_file_aio_read(iocb, buf, count, pos);
+	return result;
+}
+
+static ssize_t
+nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
+		read_actor_t actor, void *target)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	ssize_t res;
+
+	dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		(unsigned long) count, (unsigned long long) *ppos);
+
+	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!res)
+		res = generic_file_sendfile(filp, ppos, count, actor, target);
+	return res;
+}
+
+static int
+nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int	status;
+
+	dfprintk(VFS, "nfs: mmap(%s/%s)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+
+	status = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!status)
+		status = generic_file_mmap(file, vma);
+	return status;
+}
+
+/*
+ * Flush any dirty pages for this process, and check for write errors.
+ * The return status from this call provides a reliable indication of
+ * whether any write errors occurred for this process.
+ */
+static int
+nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+	struct inode *inode = dentry->d_inode;
+	int status;
+
+	dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
+
+	lock_kernel();
+	status = nfs_wb_all(inode);
+	if (!status) {
+		status = ctx->error;
+		ctx->error = 0;
+	}
+	unlock_kernel();
+	return status;
+}
+
+/*
+ * This does the "real" work of the write. The generic routine has
+ * allocated the page, locked it, done all the page alignment stuff
+ * calculations etc. Now we should just copy the data from user
+ * space and write it back to the real medium..
+ *
+ * If the writer ends up delaying the write, the writer needs to
+ * increment the page use counts until he is done with the page.
+ */
+static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+	return nfs_flush_incompatible(file, page);
+}
+
+static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+	long status;
+
+	lock_kernel();
+	status = nfs_updatepage(file, page, offset, to-offset);
+	unlock_kernel();
+	return status;
+}
+
+struct address_space_operations nfs_file_aops = {
+	.readpage = nfs_readpage,
+	.readpages = nfs_readpages,
+	.set_page_dirty = __set_page_dirty_nobuffers,
+	.writepage = nfs_writepage,
+	.writepages = nfs_writepages,
+	.prepare_write = nfs_prepare_write,
+	.commit_write = nfs_commit_write,
+#ifdef CONFIG_NFS_DIRECTIO
+	.direct_IO = nfs_direct_IO,
+#endif
+};
+
+/* 
+ * Write to a file (through the page cache).
+ */
+static ssize_t
+nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+{
+	struct dentry * dentry = iocb->ki_filp->f_dentry;
+	struct inode * inode = dentry->d_inode;
+	ssize_t result;
+
+#ifdef CONFIG_NFS_DIRECTIO
+	if (iocb->ki_filp->f_flags & O_DIRECT)
+		return nfs_file_direct_write(iocb, buf, count, pos);
+#endif
+
+	dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		inode->i_ino, (unsigned long) count, (unsigned long) pos);
+
+	result = -EBUSY;
+	if (IS_SWAPFILE(inode))
+		goto out_swapfile;
+	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (result)
+		goto out;
+
+	result = count;
+	if (!count)
+		goto out;
+
+	result = generic_file_aio_write(iocb, buf, count, pos);
+out:
+	return result;
+
+out_swapfile:
+	printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
+	goto out;
+}
+
+static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
+{
+	struct inode *inode = filp->f_mapping->host;
+	int status = 0;
+
+	lock_kernel();
+	/* Use local locking if mounted with "-onolock" */
+	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
+		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+	else {
+		struct file_lock *cfl = posix_test_lock(filp, fl);
+
+		fl->fl_type = F_UNLCK;
+		if (cfl != NULL)
+			memcpy(fl, cfl, sizeof(*fl));
+	}
+	unlock_kernel();
+	return status;
+}
+
+static int do_vfs_lock(struct file *file, struct file_lock *fl)
+{
+	int res = 0;
+	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+		case FL_POSIX:
+			res = posix_lock_file_wait(file, fl);
+			break;
+		case FL_FLOCK:
+			res = flock_lock_file_wait(file, fl);
+			break;
+		default:
+			BUG();
+	}
+	if (res < 0)
+		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
+				__FUNCTION__);
+	return res;
+}
+
+static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
+{
+	struct inode *inode = filp->f_mapping->host;
+	sigset_t oldset;
+	int status;
+
+	rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
+	/*
+	 * Flush all pending writes before doing anything
+	 * with locks..
+	 */
+	filemap_fdatawrite(filp->f_mapping);
+	down(&inode->i_sem);
+	nfs_wb_all(inode);
+	up(&inode->i_sem);
+	filemap_fdatawait(filp->f_mapping);
+
+	/* NOTE: special case
+	 * 	If we're signalled while cleaning up locks on process exit, we
+	 * 	still need to complete the unlock.
+	 */
+	lock_kernel();
+	/* Use local locking if mounted with "-onolock" */
+	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
+		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+	else
+		status = do_vfs_lock(filp, fl);
+	unlock_kernel();
+	rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
+	return status;
+}
+
+static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
+{
+	struct inode *inode = filp->f_mapping->host;
+	sigset_t oldset;
+	int status;
+
+	rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
+	/*
+	 * Flush all pending writes before doing anything
+	 * with locks..
+	 */
+	status = filemap_fdatawrite(filp->f_mapping);
+	if (status == 0) {
+		down(&inode->i_sem);
+		status = nfs_wb_all(inode);
+		up(&inode->i_sem);
+		if (status == 0)
+			status = filemap_fdatawait(filp->f_mapping);
+	}
+	if (status < 0)
+		goto out;
+
+	lock_kernel();
+	/* Use local locking if mounted with "-onolock" */
+	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) {
+		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+		/* If we were signalled we still need to ensure that
+		 * we clean up any state on the server. We therefore
+		 * record the lock call as having succeeded in order to
+		 * ensure that locks_remove_posix() cleans it out when
+		 * the process exits.
+		 */
+		if (status == -EINTR || status == -ERESTARTSYS)
+			do_vfs_lock(filp, fl);
+	} else
+		status = do_vfs_lock(filp, fl);
+	unlock_kernel();
+	if (status < 0)
+		goto out;
+	/*
+	 * Make sure we clear the cache whenever we try to get the lock.
+	 * This makes locking act as a cache coherency point.
+	 */
+	filemap_fdatawrite(filp->f_mapping);
+	down(&inode->i_sem);
+	nfs_wb_all(inode);	/* we may have slept */
+	up(&inode->i_sem);
+	filemap_fdatawait(filp->f_mapping);
+	nfs_zap_caches(inode);
+out:
+	rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
+	return status;
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+	struct inode * inode = filp->f_mapping->host;
+
+	dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
+			inode->i_sb->s_id, inode->i_ino,
+			fl->fl_type, fl->fl_flags,
+			(long long)fl->fl_start, (long long)fl->fl_end);
+
+	if (!inode)
+		return -EINVAL;
+
+	/* No mandatory locks over NFS */
+	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+		return -ENOLCK;
+
+	if (IS_GETLK(cmd))
+		return do_getlk(filp, cmd, fl);
+	if (fl->fl_type == F_UNLCK)
+		return do_unlk(filp, cmd, fl);
+	return do_setlk(filp, cmd, fl);
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+{
+	struct inode * inode = filp->f_mapping->host;
+
+	dprintk("NFS: nfs_flock(f=%s/%ld, t=%x, fl=%x)\n",
+			inode->i_sb->s_id, inode->i_ino,
+			fl->fl_type, fl->fl_flags);
+
+	if (!inode)
+		return -EINVAL;
+
+	/*
+	 * No BSD flocks over NFS allowed.
+	 * Note: we could try to fake a POSIX lock request here by
+	 * using ((u32) filp | 0x80000000) or some such as the pid.
+	 * Not sure whether that would be unique, though, or whether
+	 * that would break in other places.
+	 */
+	if (!(fl->fl_flags & FL_FLOCK))
+		return -ENOLCK;
+
+	/* We're simulating flock() locks using posix locks on the server */
+	fl->fl_owner = (fl_owner_t)filp;
+	fl->fl_start = 0;
+	fl->fl_end = OFFSET_MAX;
+
+	if (fl->fl_type == F_UNLCK)
+		return do_unlk(filp, cmd, fl);
+	return do_setlk(filp, cmd, fl);
+}
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
new file mode 100644
index 0000000..b74c4e3
--- /dev/null
+++ b/fs/nfs/idmap.c
@@ -0,0 +1,498 @@
+/*
+ * fs/nfs/idmap.c
+ *
+ *  UID and GID to name mapping for clients.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Marius Aamodt Eriksen <marius@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/sched.h>
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/workqueue.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include <linux/nfs_fs_sb.h>
+#include <linux/nfs_fs.h>
+
+#include <linux/nfs_idmap.h>
+
+#define IDMAP_HASH_SZ          128
+
+struct idmap_hashent {
+	__u32 ih_id;
+	int ih_namelen;
+	char ih_name[IDMAP_NAMESZ];
+};
+
+struct idmap_hashtable {
+	__u8 h_type;
+	struct idmap_hashent h_entries[IDMAP_HASH_SZ];
+};
+
+struct idmap {
+	char                  idmap_path[48];
+	struct dentry        *idmap_dentry;
+	wait_queue_head_t     idmap_wq;
+	struct idmap_msg      idmap_im;
+	struct semaphore      idmap_lock;    /* Serializes upcalls */
+	struct semaphore      idmap_im_lock; /* Protects the hashtable */
+	struct idmap_hashtable idmap_user_hash;
+	struct idmap_hashtable idmap_group_hash;
+};
+
+static ssize_t   idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
+		     char __user *, size_t);
+static ssize_t   idmap_pipe_downcall(struct file *, const char __user *,
+		     size_t);
+void             idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
+
+static unsigned int fnvhash32(const void *, size_t);
+
+static struct rpc_pipe_ops idmap_upcall_ops = {
+        .upcall         = idmap_pipe_upcall,
+        .downcall       = idmap_pipe_downcall,
+        .destroy_msg    = idmap_pipe_destroy_msg,
+};
+
+void
+nfs_idmap_new(struct nfs4_client *clp)
+{
+	struct idmap *idmap;
+
+	if (clp->cl_idmap != NULL)
+		return;
+        if ((idmap = kmalloc(sizeof(*idmap), GFP_KERNEL)) == NULL)
+                return;
+
+	memset(idmap, 0, sizeof(*idmap));
+
+	snprintf(idmap->idmap_path, sizeof(idmap->idmap_path),
+	    "%s/idmap", clp->cl_rpcclient->cl_pathname);
+
+        idmap->idmap_dentry = rpc_mkpipe(idmap->idmap_path,
+	    idmap, &idmap_upcall_ops, 0);
+        if (IS_ERR(idmap->idmap_dentry)) {
+		kfree(idmap);
+		return;
+	}
+
+        init_MUTEX(&idmap->idmap_lock);
+        init_MUTEX(&idmap->idmap_im_lock);
+	init_waitqueue_head(&idmap->idmap_wq);
+	idmap->idmap_user_hash.h_type = IDMAP_TYPE_USER;
+	idmap->idmap_group_hash.h_type = IDMAP_TYPE_GROUP;
+
+	clp->cl_idmap = idmap;
+}
+
+void
+nfs_idmap_delete(struct nfs4_client *clp)
+{
+	struct idmap *idmap = clp->cl_idmap;
+
+	if (!idmap)
+		return;
+	rpc_unlink(idmap->idmap_path);
+	clp->cl_idmap = NULL;
+	kfree(idmap);
+}
+
+/*
+ * Helper routines for manipulating the hashtable
+ */
+static inline struct idmap_hashent *
+idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
+{
+	return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
+}
+
+static struct idmap_hashent *
+idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+{
+	struct idmap_hashent *he = idmap_name_hash(h, name, len);
+
+	if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
+		return NULL;
+	return he;
+}
+
+static inline struct idmap_hashent *
+idmap_id_hash(struct idmap_hashtable* h, __u32 id)
+{
+	return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
+}
+
+static struct idmap_hashent *
+idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+{
+	struct idmap_hashent *he = idmap_id_hash(h, id);
+	if (he->ih_id != id || he->ih_namelen == 0)
+		return NULL;
+	return he;
+}
+
+/*
+ * Routines for allocating new entries in the hashtable.
+ * For now, we just have 1 entry per bucket, so it's all
+ * pretty trivial.
+ */
+static inline struct idmap_hashent *
+idmap_alloc_name(struct idmap_hashtable *h, char *name, unsigned len)
+{
+	return idmap_name_hash(h, name, len);
+}
+
+static inline struct idmap_hashent *
+idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
+{
+	return idmap_id_hash(h, id);
+}
+
+static void
+idmap_update_entry(struct idmap_hashent *he, const char *name,
+		size_t namelen, __u32 id)
+{
+	he->ih_id = id;
+	memcpy(he->ih_name, name, namelen);
+	he->ih_name[namelen] = '\0';
+	he->ih_namelen = namelen;
+}
+
+/*
+ * Name -> ID
+ */
+static int
+nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
+		const char *name, size_t namelen, __u32 *id)
+{
+	struct rpc_pipe_msg msg;
+	struct idmap_msg *im;
+	struct idmap_hashent *he;
+	DECLARE_WAITQUEUE(wq, current);
+	int ret = -EIO;
+
+	im = &idmap->idmap_im;
+
+	/*
+	 * String sanity checks
+	 * Note that the userland daemon expects NUL terminated strings
+	 */
+	for (;;) {
+		if (namelen == 0)
+			return -EINVAL;
+		if (name[namelen-1] != '\0')
+			break;
+		namelen--;
+	}
+	if (namelen >= IDMAP_NAMESZ)
+		return -EINVAL;
+
+	down(&idmap->idmap_lock);
+	down(&idmap->idmap_im_lock);
+
+	he = idmap_lookup_name(h, name, namelen);
+	if (he != NULL) {
+		*id = he->ih_id;
+		ret = 0;
+		goto out;
+	}
+
+	memset(im, 0, sizeof(*im));
+	memcpy(im->im_name, name, namelen);
+
+	im->im_type = h->h_type;
+	im->im_conv = IDMAP_CONV_NAMETOID;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.data = im;
+	msg.len = sizeof(*im);
+
+	add_wait_queue(&idmap->idmap_wq, &wq);
+	if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) {
+		remove_wait_queue(&idmap->idmap_wq, &wq);
+		goto out;
+	}
+
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	up(&idmap->idmap_im_lock);
+	schedule();
+	current->state = TASK_RUNNING;
+	remove_wait_queue(&idmap->idmap_wq, &wq);
+	down(&idmap->idmap_im_lock);
+
+	if (im->im_status & IDMAP_STATUS_SUCCESS) {
+		*id = im->im_id;
+		ret = 0;
+	}
+
+ out:
+	memset(im, 0, sizeof(*im));
+	up(&idmap->idmap_im_lock);
+	up(&idmap->idmap_lock);
+	return (ret);
+}
+
+/*
+ * ID -> Name
+ */
+static int
+nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
+		__u32 id, char *name)
+{
+	struct rpc_pipe_msg msg;
+	struct idmap_msg *im;
+	struct idmap_hashent *he;
+	DECLARE_WAITQUEUE(wq, current);
+	int ret = -EIO;
+	unsigned int len;
+
+	im = &idmap->idmap_im;
+
+	down(&idmap->idmap_lock);
+	down(&idmap->idmap_im_lock);
+
+	he = idmap_lookup_id(h, id);
+	if (he != 0) {
+		memcpy(name, he->ih_name, he->ih_namelen);
+		ret = he->ih_namelen;
+		goto out;
+	}
+
+	memset(im, 0, sizeof(*im));
+	im->im_type = h->h_type;
+	im->im_conv = IDMAP_CONV_IDTONAME;
+	im->im_id = id;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.data = im;
+	msg.len = sizeof(*im);
+
+	add_wait_queue(&idmap->idmap_wq, &wq);
+
+	if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) {
+		remove_wait_queue(&idmap->idmap_wq, &wq);
+		goto out;
+	}
+
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	up(&idmap->idmap_im_lock);
+	schedule();
+	current->state = TASK_RUNNING;
+	remove_wait_queue(&idmap->idmap_wq, &wq);
+	down(&idmap->idmap_im_lock);
+
+	if (im->im_status & IDMAP_STATUS_SUCCESS) {
+		if ((len = strnlen(im->im_name, IDMAP_NAMESZ)) == 0)
+			goto out;
+		memcpy(name, im->im_name, len);
+		ret = len;
+	}
+
+ out:
+	memset(im, 0, sizeof(*im));
+	up(&idmap->idmap_im_lock);
+	up(&idmap->idmap_lock);
+	return ret;
+}
+
+/* RPC pipefs upcall/downcall routines */
+static ssize_t
+idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+    char __user *dst, size_t buflen)
+{
+        char *data = (char *)msg->data + msg->copied;
+        ssize_t mlen = msg->len - msg->copied;
+        ssize_t left;
+
+        if (mlen > buflen)
+                mlen = buflen;
+
+        left = copy_to_user(dst, data, mlen);
+	if (left < 0) {
+		msg->errno = left;
+		return left;
+	}
+	mlen -= left;
+	msg->copied += mlen;
+	msg->errno = 0;
+        return mlen;
+}
+
+static ssize_t
+idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+{
+        struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+	struct idmap *idmap = (struct idmap *)rpci->private;
+	struct idmap_msg im_in, *im = &idmap->idmap_im;
+	struct idmap_hashtable *h;
+	struct idmap_hashent *he = NULL;
+	int namelen_in;
+	int ret;
+
+        if (mlen != sizeof(im_in))
+                return (-ENOSPC);
+
+        if (copy_from_user(&im_in, src, mlen) != 0)
+		return (-EFAULT);
+
+	down(&idmap->idmap_im_lock);
+
+	ret = mlen;
+	im->im_status = im_in.im_status;
+	/* If we got an error, terminate now, and wake up pending upcalls */
+	if (!(im_in.im_status & IDMAP_STATUS_SUCCESS)) {
+		wake_up(&idmap->idmap_wq);
+		goto out;
+	}
+
+	/* Sanity checking of strings */
+	ret = -EINVAL;
+	namelen_in = strnlen(im_in.im_name, IDMAP_NAMESZ);
+	if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ)
+		goto out;
+
+	switch (im_in.im_type) {
+		case IDMAP_TYPE_USER:
+			h = &idmap->idmap_user_hash;
+			break;
+		case IDMAP_TYPE_GROUP:
+			h = &idmap->idmap_group_hash;
+			break;
+		default:
+			goto out;
+	}
+
+	switch (im_in.im_conv) {
+	case IDMAP_CONV_IDTONAME:
+		/* Did we match the current upcall? */
+		if (im->im_conv == IDMAP_CONV_IDTONAME
+				&& im->im_type == im_in.im_type
+				&& im->im_id == im_in.im_id) {
+			/* Yes: copy string, including the terminating '\0'  */
+			memcpy(im->im_name, im_in.im_name, namelen_in);
+			im->im_name[namelen_in] = '\0';
+			wake_up(&idmap->idmap_wq);
+		}
+		he = idmap_alloc_id(h, im_in.im_id);
+		break;
+	case IDMAP_CONV_NAMETOID:
+		/* Did we match the current upcall? */
+		if (im->im_conv == IDMAP_CONV_NAMETOID
+				&& im->im_type == im_in.im_type
+				&& strnlen(im->im_name, IDMAP_NAMESZ) == namelen_in
+				&& memcmp(im->im_name, im_in.im_name, namelen_in) == 0) {
+			im->im_id = im_in.im_id;
+			wake_up(&idmap->idmap_wq);
+		}
+		he = idmap_alloc_name(h, im_in.im_name, namelen_in);
+		break;
+	default:
+		goto out;
+	}
+
+	/* If the entry is valid, also copy it to the cache */
+	if (he != NULL)
+		idmap_update_entry(he, im_in.im_name, namelen_in, im_in.im_id);
+	ret = mlen;
+out:
+	up(&idmap->idmap_im_lock);
+	return ret;
+}
+
+void
+idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+	struct idmap_msg *im = msg->data;
+	struct idmap *idmap = container_of(im, struct idmap, idmap_im); 
+
+	if (msg->errno >= 0)
+		return;
+	down(&idmap->idmap_im_lock);
+	im->im_status = IDMAP_STATUS_LOOKUPFAIL;
+	wake_up(&idmap->idmap_wq);
+	up(&idmap->idmap_im_lock);
+}
+
+/* 
+ * Fowler/Noll/Vo hash
+ *    http://www.isthe.com/chongo/tech/comp/fnv/
+ */
+
+#define FNV_P_32 ((unsigned int)0x01000193) /* 16777619 */
+#define FNV_1_32 ((unsigned int)0x811c9dc5) /* 2166136261 */
+
+static unsigned int fnvhash32(const void *buf, size_t buflen)
+{
+	const unsigned char *p, *end = (const unsigned char *)buf + buflen;
+	unsigned int hash = FNV_1_32;
+
+	for (p = buf; p < end; p++) {
+		hash *= FNV_P_32;
+		hash ^= (unsigned int)*p;
+	}
+
+	return (hash);
+}
+
+int nfs_map_name_to_uid(struct nfs4_client *clp, const char *name, size_t namelen, __u32 *uid)
+{
+	struct idmap *idmap = clp->cl_idmap;
+
+	return nfs_idmap_id(idmap, &idmap->idmap_user_hash, name, namelen, uid);
+}
+
+int nfs_map_group_to_gid(struct nfs4_client *clp, const char *name, size_t namelen, __u32 *uid)
+{
+	struct idmap *idmap = clp->cl_idmap;
+
+	return nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, uid);
+}
+
+int nfs_map_uid_to_name(struct nfs4_client *clp, __u32 uid, char *buf)
+{
+	struct idmap *idmap = clp->cl_idmap;
+
+	return nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf);
+}
+int nfs_map_gid_to_group(struct nfs4_client *clp, __u32 uid, char *buf)
+{
+	struct idmap *idmap = clp->cl_idmap;
+
+	return nfs_idmap_name(idmap, &idmap->idmap_group_hash, uid, buf);
+}
+
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
new file mode 100644
index 0000000..6345f26
--- /dev/null
+++ b/fs/nfs/inode.c
@@ -0,0 +1,2003 @@
+/*
+ *  linux/fs/nfs/inode.c
+ *
+ *  Copyright (C) 1992  Rick Sladkey
+ *
+ *  nfs inode and superblock handling functions
+ *
+ *  Modularised by Alan Cox <Alan.Cox@linux.org>, while hacking some
+ *  experimental NFS changes. Modularisation taken straight from SYS5 fs.
+ *
+ *  Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
+ *  J.S.Peatfield@damtp.cam.ac.uk
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/lockd/bind.h>
+#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/nfs_idmap.h>
+#include <linux/vfs.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "delegation.h"
+
+#define NFSDBG_FACILITY		NFSDBG_VFS
+#define NFS_PARANOIA 1
+
+/* Maximum number of readahead requests
+ * FIXME: this should really be a sysctl so that users may tune it to suit
+ *        their needs. People that do NFS over a slow network, might for
+ *        instance want to reduce it to something closer to 1 for improved
+ *        interactive response.
+ */
+#define NFS_MAX_READAHEAD	(RPC_DEF_SLOT_TABLE - 1)
+
+static void nfs_invalidate_inode(struct inode *);
+static int nfs_update_inode(struct inode *, struct nfs_fattr *, unsigned long);
+
+static struct inode *nfs_alloc_inode(struct super_block *sb);
+static void nfs_destroy_inode(struct inode *);
+static int nfs_write_inode(struct inode *,int);
+static void nfs_delete_inode(struct inode *);
+static void nfs_clear_inode(struct inode *);
+static void nfs_umount_begin(struct super_block *);
+static int  nfs_statfs(struct super_block *, struct kstatfs *);
+static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+
+static struct rpc_program	nfs_program;
+
+static struct super_operations nfs_sops = { 
+	.alloc_inode	= nfs_alloc_inode,
+	.destroy_inode	= nfs_destroy_inode,
+	.write_inode	= nfs_write_inode,
+	.delete_inode	= nfs_delete_inode,
+	.statfs		= nfs_statfs,
+	.clear_inode	= nfs_clear_inode,
+	.umount_begin	= nfs_umount_begin,
+	.show_options	= nfs_show_options,
+};
+
+/*
+ * RPC cruft for NFS
+ */
+static struct rpc_stat		nfs_rpcstat = {
+	.program		= &nfs_program
+};
+static struct rpc_version *	nfs_version[] = {
+	NULL,
+	NULL,
+	&nfs_version2,
+#if defined(CONFIG_NFS_V3)
+	&nfs_version3,
+#elif defined(CONFIG_NFS_V4)
+	NULL,
+#endif
+#if defined(CONFIG_NFS_V4)
+	&nfs_version4,
+#endif
+};
+
+static struct rpc_program	nfs_program = {
+	.name			= "nfs",
+	.number			= NFS_PROGRAM,
+	.nrvers			= sizeof(nfs_version) / sizeof(nfs_version[0]),
+	.version		= nfs_version,
+	.stats			= &nfs_rpcstat,
+	.pipe_dir_name		= "/nfs",
+};
+
+static inline unsigned long
+nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
+{
+	return nfs_fileid_to_ino_t(fattr->fileid);
+}
+
+static int
+nfs_write_inode(struct inode *inode, int sync)
+{
+	int flags = sync ? FLUSH_WAIT : 0;
+	int ret;
+
+	ret = nfs_commit_inode(inode, 0, 0, flags);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static void
+nfs_delete_inode(struct inode * inode)
+{
+	dprintk("NFS: delete_inode(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
+
+	nfs_wb_all(inode);
+	/*
+	 * The following should never happen...
+	 */
+	if (nfs_have_writebacks(inode)) {
+		printk(KERN_ERR "nfs_delete_inode: inode %ld has pending RPC requests\n", inode->i_ino);
+	}
+
+	clear_inode(inode);
+}
+
+/*
+ * For the moment, the only task for the NFS clear_inode method is to
+ * release the mmap credential
+ */
+static void
+nfs_clear_inode(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct rpc_cred *cred;
+
+	nfs_wb_all(inode);
+	BUG_ON (!list_empty(&nfsi->open_files));
+	cred = nfsi->cache_access.cred;
+	if (cred)
+		put_rpccred(cred);
+	BUG_ON(atomic_read(&nfsi->data_updates) != 0);
+}
+
+void
+nfs_umount_begin(struct super_block *sb)
+{
+	struct nfs_server *server = NFS_SB(sb);
+	struct rpc_clnt	*rpc;
+
+	/* -EIO all pending I/O */
+	if ((rpc = server->client) != NULL)
+		rpc_killall_tasks(rpc);
+}
+
+
+static inline unsigned long
+nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
+{
+	/* make sure blocksize is a power of two */
+	if ((bsize & (bsize - 1)) || nrbitsp) {
+		unsigned char	nrbits;
+
+		for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
+			;
+		bsize = 1 << nrbits;
+		if (nrbitsp)
+			*nrbitsp = nrbits;
+	}
+
+	return bsize;
+}
+
+/*
+ * Calculate the number of 512byte blocks used.
+ */
+static inline unsigned long
+nfs_calc_block_size(u64 tsize)
+{
+	loff_t used = (tsize + 511) >> 9;
+	return (used > ULONG_MAX) ? ULONG_MAX : used;
+}
+
+/*
+ * Compute and set NFS server blocksize
+ */
+static inline unsigned long
+nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
+{
+	if (bsize < 1024)
+		bsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
+	else if (bsize >= NFS_MAX_FILE_IO_BUFFER_SIZE)
+		bsize = NFS_MAX_FILE_IO_BUFFER_SIZE;
+
+	return nfs_block_bits(bsize, nrbitsp);
+}
+
+/*
+ * Obtain the root inode of the file system.
+ */
+static struct inode *
+nfs_get_root(struct super_block *sb, struct nfs_fh *rootfh, struct nfs_fsinfo *fsinfo)
+{
+	struct nfs_server	*server = NFS_SB(sb);
+	struct inode *rooti;
+	int			error;
+
+	error = server->rpc_ops->getroot(server, rootfh, fsinfo);
+	if (error < 0) {
+		dprintk("nfs_get_root: getattr error = %d\n", -error);
+		return ERR_PTR(error);
+	}
+
+	rooti = nfs_fhget(sb, rootfh, fsinfo->fattr);
+	if (!rooti)
+		return ERR_PTR(-ENOMEM);
+	return rooti;
+}
+
+/*
+ * Do NFS version-independent mount processing, and sanity checking
+ */
+static int
+nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
+{
+	struct nfs_server	*server;
+	struct inode		*root_inode;
+	struct nfs_fattr	fattr;
+	struct nfs_fsinfo	fsinfo = {
+					.fattr = &fattr,
+				};
+	struct nfs_pathconf pathinfo = {
+			.fattr = &fattr,
+	};
+	int no_root_error = 0;
+	unsigned long max_rpc_payload;
+
+	/* We probably want something more informative here */
+	snprintf(sb->s_id, sizeof(sb->s_id), "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev));
+
+	server = NFS_SB(sb);
+
+	sb->s_magic      = NFS_SUPER_MAGIC;
+
+	root_inode = nfs_get_root(sb, &server->fh, &fsinfo);
+	/* Did getting the root inode fail? */
+	if (IS_ERR(root_inode)) {
+		no_root_error = PTR_ERR(root_inode);
+		goto out_no_root;
+	}
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root) {
+		no_root_error = -ENOMEM;
+		goto out_no_root;
+	}
+	sb->s_root->d_op = server->rpc_ops->dentry_ops;
+
+	/* Get some general file system info */
+	if (server->namelen == 0 &&
+	    server->rpc_ops->pathconf(server, &server->fh, &pathinfo) >= 0)
+		server->namelen = pathinfo.max_namelen;
+	/* Work out a lot of parameters */
+	if (server->rsize == 0)
+		server->rsize = nfs_block_size(fsinfo.rtpref, NULL);
+	if (server->wsize == 0)
+		server->wsize = nfs_block_size(fsinfo.wtpref, NULL);
+
+	if (fsinfo.rtmax >= 512 && server->rsize > fsinfo.rtmax)
+		server->rsize = nfs_block_size(fsinfo.rtmax, NULL);
+	if (fsinfo.wtmax >= 512 && server->wsize > fsinfo.wtmax)
+		server->wsize = nfs_block_size(fsinfo.wtmax, NULL);
+
+	max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL);
+	if (server->rsize > max_rpc_payload)
+		server->rsize = max_rpc_payload;
+	if (server->wsize > max_rpc_payload)
+		server->wsize = max_rpc_payload;
+
+	server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	if (server->rpages > NFS_READ_MAXIOV) {
+		server->rpages = NFS_READ_MAXIOV;
+		server->rsize = server->rpages << PAGE_CACHE_SHIFT;
+	}
+
+	server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+        if (server->wpages > NFS_WRITE_MAXIOV) {
+		server->wpages = NFS_WRITE_MAXIOV;
+                server->wsize = server->wpages << PAGE_CACHE_SHIFT;
+	}
+
+	if (sb->s_blocksize == 0)
+		sb->s_blocksize = nfs_block_bits(server->wsize,
+							 &sb->s_blocksize_bits);
+	server->wtmult = nfs_block_bits(fsinfo.wtmult, NULL);
+
+	server->dtsize = nfs_block_size(fsinfo.dtpref, NULL);
+	if (server->dtsize > PAGE_CACHE_SIZE)
+		server->dtsize = PAGE_CACHE_SIZE;
+	if (server->dtsize > server->rsize)
+		server->dtsize = server->rsize;
+
+	if (server->flags & NFS_MOUNT_NOAC) {
+		server->acregmin = server->acregmax = 0;
+		server->acdirmin = server->acdirmax = 0;
+		sb->s_flags |= MS_SYNCHRONOUS;
+	}
+	server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
+
+	sb->s_maxbytes = fsinfo.maxfilesize;
+	if (sb->s_maxbytes > MAX_LFS_FILESIZE) 
+		sb->s_maxbytes = MAX_LFS_FILESIZE; 
+
+	server->client->cl_intr = (server->flags & NFS_MOUNT_INTR) ? 1 : 0;
+	server->client->cl_softrtry = (server->flags & NFS_MOUNT_SOFT) ? 1 : 0;
+
+	/* We're airborne Set socket buffersize */
+	rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
+	return 0;
+	/* Yargs. It didn't work out. */
+out_no_root:
+	dprintk("nfs_sb_init: get root inode failed: errno %d\n", -no_root_error);
+	if (!IS_ERR(root_inode))
+		iput(root_inode);
+	return no_root_error;
+}
+
+/*
+ * Create an RPC client handle.
+ */
+static struct rpc_clnt *
+nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data)
+{
+	struct rpc_timeout	timeparms;
+	struct rpc_xprt		*xprt = NULL;
+	struct rpc_clnt		*clnt = NULL;
+	int			tcp   = (data->flags & NFS_MOUNT_TCP);
+
+	/* Initialize timeout values */
+	timeparms.to_initval = data->timeo * HZ / 10;
+	timeparms.to_retries = data->retrans;
+	timeparms.to_maxval  = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
+	timeparms.to_exponential = 1;
+
+	if (!timeparms.to_initval)
+		timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10;
+	if (!timeparms.to_retries)
+		timeparms.to_retries = 5;
+
+	/* create transport and client */
+	xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP,
+				 &server->addr, &timeparms);
+	if (IS_ERR(xprt)) {
+		printk(KERN_WARNING "NFS: cannot create RPC transport.\n");
+		return (struct rpc_clnt *)xprt;
+	}
+	clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
+				 server->rpc_ops->version, data->pseudoflavor);
+	if (IS_ERR(clnt)) {
+		printk(KERN_WARNING "NFS: cannot create RPC client.\n");
+		goto out_fail;
+	}
+
+	clnt->cl_intr     = 1;
+	clnt->cl_softrtry = 1;
+	clnt->cl_chatty   = 1;
+
+	return clnt;
+
+out_fail:
+	xprt_destroy(xprt);
+	return clnt;
+}
+
+/*
+ * The way this works is that the mount process passes a structure
+ * in the data argument which contains the server's IP address
+ * and the root file handle obtained from the server's mount
+ * daemon. We stash these away in the private superblock fields.
+ */
+static int
+nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int silent)
+{
+	struct nfs_server	*server;
+	rpc_authflavor_t	authflavor;
+
+	server           = NFS_SB(sb);
+	sb->s_blocksize_bits = 0;
+	sb->s_blocksize = 0;
+	if (data->bsize)
+		sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
+	if (data->rsize)
+		server->rsize = nfs_block_size(data->rsize, NULL);
+	if (data->wsize)
+		server->wsize = nfs_block_size(data->wsize, NULL);
+	server->flags    = data->flags & NFS_MOUNT_FLAGMASK;
+
+	server->acregmin = data->acregmin*HZ;
+	server->acregmax = data->acregmax*HZ;
+	server->acdirmin = data->acdirmin*HZ;
+	server->acdirmax = data->acdirmax*HZ;
+
+	/* Start lockd here, before we might error out */
+	if (!(server->flags & NFS_MOUNT_NONLM))
+		lockd_up();
+
+	server->namelen  = data->namlen;
+	server->hostname = kmalloc(strlen(data->hostname) + 1, GFP_KERNEL);
+	if (!server->hostname)
+		return -ENOMEM;
+	strcpy(server->hostname, data->hostname);
+
+	/* Check NFS protocol revision and initialize RPC op vector
+	 * and file handle pool. */
+	if (server->flags & NFS_MOUNT_VER3) {
+#ifdef CONFIG_NFS_V3
+		server->rpc_ops = &nfs_v3_clientops;
+		server->caps |= NFS_CAP_READDIRPLUS;
+		if (data->version < 4) {
+			printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n");
+			return -EIO;
+		}
+#else
+		printk(KERN_NOTICE "NFS: NFSv3 not supported.\n");
+		return -EIO;
+#endif
+	} else {
+		server->rpc_ops = &nfs_v2_clientops;
+	}
+
+	/* Fill in pseudoflavor for mount version < 5 */
+	if (!(data->flags & NFS_MOUNT_SECFLAVOUR))
+		data->pseudoflavor = RPC_AUTH_UNIX;
+	authflavor = data->pseudoflavor;	/* save for sb_init() */
+	/* XXX maybe we want to add a server->pseudoflavor field */
+
+	/* Create RPC client handles */
+	server->client = nfs_create_client(server, data);
+	if (IS_ERR(server->client))
+		return PTR_ERR(server->client);
+	/* RFC 2623, sec 2.3.2 */
+	if (authflavor != RPC_AUTH_UNIX) {
+		server->client_sys = rpc_clone_client(server->client);
+		if (IS_ERR(server->client_sys))
+			return PTR_ERR(server->client_sys);
+		if (!rpcauth_create(RPC_AUTH_UNIX, server->client_sys))
+			return -ENOMEM;
+	} else {
+		atomic_inc(&server->client->cl_count);
+		server->client_sys = server->client;
+	}
+
+	if (server->flags & NFS_MOUNT_VER3) {
+		if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
+			server->namelen = NFS3_MAXNAMLEN;
+		sb->s_time_gran = 1;
+	} else {
+		if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+			server->namelen = NFS2_MAXNAMLEN;
+	}
+
+	sb->s_op = &nfs_sops;
+	return nfs_sb_init(sb, authflavor);
+}
+
+static int
+nfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct nfs_server *server = NFS_SB(sb);
+	unsigned char blockbits;
+	unsigned long blockres;
+	struct nfs_fh *rootfh = NFS_FH(sb->s_root->d_inode);
+	struct nfs_fattr fattr;
+	struct nfs_fsstat res = {
+			.fattr = &fattr,
+	};
+	int error;
+
+	lock_kernel();
+
+	error = server->rpc_ops->statfs(server, rootfh, &res);
+	buf->f_type = NFS_SUPER_MAGIC;
+	if (error < 0)
+		goto out_err;
+
+	/*
+	 * Current versions of glibc do not correctly handle the
+	 * case where f_frsize != f_bsize.  Eventually we want to
+	 * report the value of wtmult in this field.
+	 */
+	buf->f_frsize = sb->s_blocksize;
+
+	/*
+	 * On most *nix systems, f_blocks, f_bfree, and f_bavail
+	 * are reported in units of f_frsize.  Linux hasn't had
+	 * an f_frsize field in its statfs struct until recently,
+	 * thus historically Linux's sys_statfs reports these
+	 * fields in units of f_bsize.
+	 */
+	buf->f_bsize = sb->s_blocksize;
+	blockbits = sb->s_blocksize_bits;
+	blockres = (1 << blockbits) - 1;
+	buf->f_blocks = (res.tbytes + blockres) >> blockbits;
+	buf->f_bfree = (res.fbytes + blockres) >> blockbits;
+	buf->f_bavail = (res.abytes + blockres) >> blockbits;
+
+	buf->f_files = res.tfiles;
+	buf->f_ffree = res.afiles;
+
+	buf->f_namelen = server->namelen;
+ out:
+	unlock_kernel();
+
+	return 0;
+
+ out_err:
+	printk(KERN_WARNING "nfs_statfs: statfs error = %d\n", -error);
+	buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1;
+	goto out;
+
+}
+
+static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+	static struct proc_nfs_info {
+		int flag;
+		char *str;
+		char *nostr;
+	} nfs_info[] = {
+		{ NFS_MOUNT_SOFT, ",soft", ",hard" },
+		{ NFS_MOUNT_INTR, ",intr", "" },
+		{ NFS_MOUNT_POSIX, ",posix", "" },
+		{ NFS_MOUNT_TCP, ",tcp", ",udp" },
+		{ NFS_MOUNT_NOCTO, ",nocto", "" },
+		{ NFS_MOUNT_NOAC, ",noac", "" },
+		{ NFS_MOUNT_NONLM, ",nolock", ",lock" },
+		{ 0, NULL, NULL }
+	};
+	struct proc_nfs_info *nfs_infop;
+	struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+
+	seq_printf(m, ",v%d", nfss->rpc_ops->version);
+	seq_printf(m, ",rsize=%d", nfss->rsize);
+	seq_printf(m, ",wsize=%d", nfss->wsize);
+	if (nfss->acregmin != 3*HZ)
+		seq_printf(m, ",acregmin=%d", nfss->acregmin/HZ);
+	if (nfss->acregmax != 60*HZ)
+		seq_printf(m, ",acregmax=%d", nfss->acregmax/HZ);
+	if (nfss->acdirmin != 30*HZ)
+		seq_printf(m, ",acdirmin=%d", nfss->acdirmin/HZ);
+	if (nfss->acdirmax != 60*HZ)
+		seq_printf(m, ",acdirmax=%d", nfss->acdirmax/HZ);
+	for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
+		if (nfss->flags & nfs_infop->flag)
+			seq_puts(m, nfs_infop->str);
+		else
+			seq_puts(m, nfs_infop->nostr);
+	}
+	seq_puts(m, ",addr=");
+	seq_escape(m, nfss->hostname, " \t\n\\");
+	return 0;
+}
+
+/*
+ * Invalidate the local caches
+ */
+void
+nfs_zap_caches(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	int mode = inode->i_mode;
+
+	NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
+	NFS_ATTRTIMEO_UPDATE(inode) = jiffies;
+
+	memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+		nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS;
+	else
+		nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+}
+
+/*
+ * Invalidate, but do not unhash, the inode
+ */
+static void
+nfs_invalidate_inode(struct inode *inode)
+{
+	umode_t save_mode = inode->i_mode;
+
+	make_bad_inode(inode);
+	inode->i_mode = save_mode;
+	nfs_zap_caches(inode);
+}
+
+struct nfs_find_desc {
+	struct nfs_fh		*fh;
+	struct nfs_fattr	*fattr;
+};
+
+/*
+ * In NFSv3 we can have 64bit inode numbers. In order to support
+ * this, and re-exported directories (also seen in NFSv2)
+ * we are forced to allow 2 different inodes to have the same
+ * i_ino.
+ */
+static int
+nfs_find_actor(struct inode *inode, void *opaque)
+{
+	struct nfs_find_desc	*desc = (struct nfs_find_desc *)opaque;
+	struct nfs_fh		*fh = desc->fh;
+	struct nfs_fattr	*fattr = desc->fattr;
+
+	if (NFS_FILEID(inode) != fattr->fileid)
+		return 0;
+	if (nfs_compare_fh(NFS_FH(inode), fh))
+		return 0;
+	if (is_bad_inode(inode) || NFS_STALE(inode))
+		return 0;
+	return 1;
+}
+
+static int
+nfs_init_locked(struct inode *inode, void *opaque)
+{
+	struct nfs_find_desc	*desc = (struct nfs_find_desc *)opaque;
+	struct nfs_fattr	*fattr = desc->fattr;
+
+	NFS_FILEID(inode) = fattr->fileid;
+	nfs_copy_fh(NFS_FH(inode), desc->fh);
+	return 0;
+}
+
+/* Don't use READDIRPLUS on directories that we believe are too large */
+#define NFS_LIMIT_READDIRPLUS (8*PAGE_SIZE)
+
+/*
+ * This is our front-end to iget that looks up inodes by file handle
+ * instead of inode number.
+ */
+struct inode *
+nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+	struct nfs_find_desc desc = {
+		.fh	= fh,
+		.fattr	= fattr
+	};
+	struct inode *inode = NULL;
+	unsigned long hash;
+
+	if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+		goto out_no_inode;
+
+	if (!fattr->nlink) {
+		printk("NFS: Buggy server - nlink == 0!\n");
+		goto out_no_inode;
+	}
+
+	hash = nfs_fattr_to_ino_t(fattr);
+
+	if (!(inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc)))
+		goto out_no_inode;
+
+	if (inode->i_state & I_NEW) {
+		struct nfs_inode *nfsi = NFS_I(inode);
+
+		/* We set i_ino for the few things that still rely on it,
+		 * such as stat(2) */
+		inode->i_ino = hash;
+
+		/* We can't support update_atime(), since the server will reset it */
+		inode->i_flags |= S_NOATIME|S_NOCMTIME;
+		inode->i_mode = fattr->mode;
+		/* Why so? Because we want revalidate for devices/FIFOs, and
+		 * that's precisely what we have in nfs_file_inode_operations.
+		 */
+		inode->i_op = &nfs_file_inode_operations;
+		if (S_ISREG(inode->i_mode)) {
+			inode->i_fop = &nfs_file_operations;
+			inode->i_data.a_ops = &nfs_file_aops;
+			inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
+		} else if (S_ISDIR(inode->i_mode)) {
+			inode->i_op = NFS_SB(sb)->rpc_ops->dir_inode_ops;
+			inode->i_fop = &nfs_dir_operations;
+			if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
+			    && fattr->size <= NFS_LIMIT_READDIRPLUS)
+				NFS_FLAGS(inode) |= NFS_INO_ADVISE_RDPLUS;
+		} else if (S_ISLNK(inode->i_mode))
+			inode->i_op = &nfs_symlink_inode_operations;
+		else
+			init_special_inode(inode, inode->i_mode, fattr->rdev);
+
+		nfsi->read_cache_jiffies = fattr->timestamp;
+		inode->i_atime = fattr->atime;
+		inode->i_mtime = fattr->mtime;
+		inode->i_ctime = fattr->ctime;
+		if (fattr->valid & NFS_ATTR_FATTR_V4)
+			nfsi->change_attr = fattr->change_attr;
+		inode->i_size = nfs_size_to_loff_t(fattr->size);
+		inode->i_nlink = fattr->nlink;
+		inode->i_uid = fattr->uid;
+		inode->i_gid = fattr->gid;
+		if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
+			/*
+			 * report the blocks in 512byte units
+			 */
+			inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+			inode->i_blksize = inode->i_sb->s_blocksize;
+		} else {
+			inode->i_blocks = fattr->du.nfs2.blocks;
+			inode->i_blksize = fattr->du.nfs2.blocksize;
+		}
+		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+		nfsi->attrtimeo_timestamp = jiffies;
+		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+		nfsi->cache_access.cred = NULL;
+
+		unlock_new_inode(inode);
+	} else
+		nfs_refresh_inode(inode, fattr);
+	dprintk("NFS: nfs_fhget(%s/%Ld ct=%d)\n",
+		inode->i_sb->s_id,
+		(long long)NFS_FILEID(inode),
+		atomic_read(&inode->i_count));
+
+out:
+	return inode;
+
+out_no_inode:
+	printk("nfs_fhget: iget failed\n");
+	goto out;
+}
+
+#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET)
+
+int
+nfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct nfs_fattr fattr;
+	int error;
+
+	if (attr->ia_valid & ATTR_SIZE) {
+		if (!S_ISREG(inode->i_mode) || attr->ia_size == i_size_read(inode))
+			attr->ia_valid &= ~ATTR_SIZE;
+	}
+
+	/* Optimization: if the end result is no change, don't RPC */
+	attr->ia_valid &= NFS_VALID_ATTRS;
+	if (attr->ia_valid == 0)
+		return 0;
+
+	lock_kernel();
+	nfs_begin_data_update(inode);
+	/* Write all dirty data if we're changing file permissions or size */
+	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE)) != 0) {
+		if (filemap_fdatawrite(inode->i_mapping) == 0)
+			filemap_fdatawait(inode->i_mapping);
+		nfs_wb_all(inode);
+	}
+	error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
+	if (error == 0) {
+		nfs_refresh_inode(inode, &fattr);
+		if ((attr->ia_valid & ATTR_MODE) != 0) {
+			int mode;
+			mode = inode->i_mode & ~S_IALLUGO;
+			mode |= attr->ia_mode & S_IALLUGO;
+			inode->i_mode = mode;
+		}
+		if ((attr->ia_valid & ATTR_UID) != 0)
+			inode->i_uid = attr->ia_uid;
+		if ((attr->ia_valid & ATTR_GID) != 0)
+			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_SIZE) != 0) {
+			inode->i_size = attr->ia_size;
+			vmtruncate(inode, attr->ia_size);
+		}
+	}
+	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+		NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS;
+	nfs_end_data_update(inode);
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * Wait for the inode to get unlocked.
+ * (Used for NFS_INO_LOCKED and NFS_INO_REVALIDATING).
+ */
+static int
+nfs_wait_on_inode(struct inode *inode, int flag)
+{
+	struct rpc_clnt	*clnt = NFS_CLIENT(inode);
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	int error;
+	if (!(NFS_FLAGS(inode) & flag))
+		return 0;
+	atomic_inc(&inode->i_count);
+	error = nfs_wait_event(clnt, nfsi->nfs_i_wait,
+				!(NFS_FLAGS(inode) & flag));
+	iput(inode);
+	return error;
+}
+
+int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	int need_atime = nfsi->flags & NFS_INO_INVALID_ATIME;
+	int err;
+
+	if (__IS_FLG(inode, MS_NOATIME))
+		need_atime = 0;
+	else if (__IS_FLG(inode, MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+		need_atime = 0;
+	/* We may force a getattr if the user cares about atime */
+	if (need_atime)
+		err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	else
+		err = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!err)
+		generic_fillattr(inode, stat);
+	return err;
+}
+
+struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred)
+{
+	struct nfs_open_context *ctx;
+
+	ctx = (struct nfs_open_context *)kmalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx != NULL) {
+		atomic_set(&ctx->count, 1);
+		ctx->dentry = dget(dentry);
+		ctx->cred = get_rpccred(cred);
+		ctx->state = NULL;
+		ctx->lockowner = current->files;
+		ctx->error = 0;
+		init_waitqueue_head(&ctx->waitq);
+	}
+	return ctx;
+}
+
+struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
+{
+	if (ctx != NULL)
+		atomic_inc(&ctx->count);
+	return ctx;
+}
+
+void put_nfs_open_context(struct nfs_open_context *ctx)
+{
+	if (atomic_dec_and_test(&ctx->count)) {
+		if (!list_empty(&ctx->list)) {
+			struct inode *inode = ctx->dentry->d_inode;
+			spin_lock(&inode->i_lock);
+			list_del(&ctx->list);
+			spin_unlock(&inode->i_lock);
+		}
+		if (ctx->state != NULL)
+			nfs4_close_state(ctx->state, ctx->mode);
+		if (ctx->cred != NULL)
+			put_rpccred(ctx->cred);
+		dput(ctx->dentry);
+		kfree(ctx);
+	}
+}
+
+/*
+ * Ensure that mmap has a recent RPC credential for use when writing out
+ * shared pages
+ */
+void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	filp->private_data = get_nfs_open_context(ctx);
+	spin_lock(&inode->i_lock);
+	list_add(&ctx->list, &nfsi->open_files);
+	spin_unlock(&inode->i_lock);
+}
+
+struct nfs_open_context *nfs_find_open_context(struct inode *inode, int mode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_open_context *pos, *ctx = NULL;
+
+	spin_lock(&inode->i_lock);
+	list_for_each_entry(pos, &nfsi->open_files, list) {
+		if ((pos->mode & mode) == mode) {
+			ctx = get_nfs_open_context(pos);
+			break;
+		}
+	}
+	spin_unlock(&inode->i_lock);
+	return ctx;
+}
+
+void nfs_file_clear_open_context(struct file *filp)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct nfs_open_context *ctx = (struct nfs_open_context *)filp->private_data;
+
+	if (ctx) {
+		filp->private_data = NULL;
+		spin_lock(&inode->i_lock);
+		list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
+		spin_unlock(&inode->i_lock);
+		put_nfs_open_context(ctx);
+	}
+}
+
+/*
+ * These allocate and release file read/write context information.
+ */
+int nfs_open(struct inode *inode, struct file *filp)
+{
+	struct nfs_open_context *ctx;
+	struct rpc_cred *cred;
+
+	cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
+	if (IS_ERR(cred))
+		return PTR_ERR(cred);
+	ctx = alloc_nfs_open_context(filp->f_dentry, cred);
+	put_rpccred(cred);
+	if (ctx == NULL)
+		return -ENOMEM;
+	ctx->mode = filp->f_mode;
+	nfs_file_set_open_context(filp, ctx);
+	put_nfs_open_context(ctx);
+	if ((filp->f_mode & FMODE_WRITE) != 0)
+		nfs_begin_data_update(inode);
+	return 0;
+}
+
+int nfs_release(struct inode *inode, struct file *filp)
+{
+	if ((filp->f_mode & FMODE_WRITE) != 0)
+		nfs_end_data_update(inode);
+	nfs_file_clear_open_context(filp);
+	return 0;
+}
+
+/*
+ * This function is called whenever some part of NFS notices that
+ * the cached attributes have to be refreshed.
+ */
+int
+__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
+{
+	int		 status = -ESTALE;
+	struct nfs_fattr fattr;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long verifier;
+	unsigned int flags;
+
+	dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
+		inode->i_sb->s_id, (long long)NFS_FILEID(inode));
+
+	lock_kernel();
+	if (!inode || is_bad_inode(inode))
+ 		goto out_nowait;
+	if (NFS_STALE(inode))
+ 		goto out_nowait;
+
+	while (NFS_REVALIDATING(inode)) {
+		status = nfs_wait_on_inode(inode, NFS_INO_REVALIDATING);
+		if (status < 0)
+			goto out_nowait;
+		if (NFS_ATTRTIMEO(inode) == 0)
+			continue;
+		if (NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ATIME))
+			continue;
+		status = NFS_STALE(inode) ? -ESTALE : 0;
+		goto out_nowait;
+	}
+	NFS_FLAGS(inode) |= NFS_INO_REVALIDATING;
+
+	/* Protect against RPC races by saving the change attribute */
+	verifier = nfs_save_change_attribute(inode);
+	status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
+	if (status != 0) {
+		dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
+			 inode->i_sb->s_id,
+			 (long long)NFS_FILEID(inode), status);
+		if (status == -ESTALE) {
+			nfs_zap_caches(inode);
+			if (!S_ISDIR(inode->i_mode))
+				NFS_FLAGS(inode) |= NFS_INO_STALE;
+		}
+		goto out;
+	}
+
+	status = nfs_update_inode(inode, &fattr, verifier);
+	if (status) {
+		dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
+			 inode->i_sb->s_id,
+			 (long long)NFS_FILEID(inode), status);
+		goto out;
+	}
+	flags = nfsi->flags;
+	/*
+	 * We may need to keep the attributes marked as invalid if
+	 * we raced with nfs_end_attr_update().
+	 */
+	if (verifier == nfsi->cache_change_attribute)
+		nfsi->flags &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
+	/* Do the page cache invalidation */
+	if (flags & NFS_INO_INVALID_DATA) {
+		if (S_ISREG(inode->i_mode)) {
+			if (filemap_fdatawrite(inode->i_mapping) == 0)
+				filemap_fdatawait(inode->i_mapping);
+			nfs_wb_all(inode);
+		}
+		nfsi->flags &= ~NFS_INO_INVALID_DATA;
+		invalidate_inode_pages2(inode->i_mapping);
+		memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+		dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
+				inode->i_sb->s_id,
+				(long long)NFS_FILEID(inode));
+		/* This ensures we revalidate dentries */
+		nfsi->cache_change_attribute++;
+	}
+	dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n",
+		inode->i_sb->s_id,
+		(long long)NFS_FILEID(inode));
+
+out:
+	NFS_FLAGS(inode) &= ~NFS_INO_REVALIDATING;
+	wake_up(&nfsi->nfs_i_wait);
+ out_nowait:
+	unlock_kernel();
+	return status;
+}
+
+int nfs_attribute_timeout(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	if (nfs_have_delegation(inode, FMODE_READ))
+		return 0;
+	return time_after(jiffies, nfsi->read_cache_jiffies+nfsi->attrtimeo);
+}
+
+/**
+ * nfs_revalidate_inode - Revalidate the inode attributes
+ * @server - pointer to nfs_server struct
+ * @inode - pointer to inode struct
+ *
+ * Updates inode attribute information by retrieving the data from the server.
+ */
+int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
+{
+	if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
+			&& !nfs_attribute_timeout(inode))
+		return NFS_STALE(inode) ? -ESTALE : 0;
+	return __nfs_revalidate_inode(server, inode);
+}
+
+/**
+ * nfs_begin_data_update
+ * @inode - pointer to inode
+ * Declare that a set of operations will update file data on the server
+ */
+void nfs_begin_data_update(struct inode *inode)
+{
+	atomic_inc(&NFS_I(inode)->data_updates);
+}
+
+/**
+ * nfs_end_data_update
+ * @inode - pointer to inode
+ * Declare end of the operations that will update file data
+ * This will mark the inode as immediately needing revalidation
+ * of its attribute cache.
+ */
+void nfs_end_data_update(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	if (!nfs_have_delegation(inode, FMODE_READ)) {
+		/* Mark the attribute cache for revalidation */
+		nfsi->flags |= NFS_INO_INVALID_ATTR;
+		/* Directories and symlinks: invalidate page cache too */
+		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+			nfsi->flags |= NFS_INO_INVALID_DATA;
+	}
+	nfsi->cache_change_attribute ++;
+	atomic_dec(&nfsi->data_updates);
+}
+
+/**
+ * nfs_end_data_update_defer
+ * @inode - pointer to inode
+ * Declare end of the operations that will update file data
+ * This will defer marking the inode as needing revalidation
+ * unless there are no other pending updates.
+ */
+void nfs_end_data_update_defer(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	if (atomic_dec_and_test(&nfsi->data_updates)) {
+		/* Mark the attribute cache for revalidation */
+		nfsi->flags |= NFS_INO_INVALID_ATTR;
+		/* Directories and symlinks: invalidate page cache too */
+		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+			nfsi->flags |= NFS_INO_INVALID_DATA;
+		nfsi->cache_change_attribute ++;
+	}
+}
+
+/**
+ * nfs_refresh_inode - verify consistency of the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * Verifies the attribute cache. If we have just changed the attributes,
+ * so that fattr carries weak cache consistency data, then it may
+ * also update the ctime/mtime/change_attribute.
+ */
+int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	loff_t cur_size, new_isize;
+	int data_unstable;
+
+	/* Do we hold a delegation? */
+	if (nfs_have_delegation(inode, FMODE_READ))
+		return 0;
+
+	/* Are we in the process of updating data on the server? */
+	data_unstable = nfs_caches_unstable(inode);
+
+	if (fattr->valid & NFS_ATTR_FATTR_V4) {
+		if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0
+				&& nfsi->change_attr == fattr->pre_change_attr)
+			nfsi->change_attr = fattr->change_attr;
+		if (!data_unstable && nfsi->change_attr != fattr->change_attr)
+			nfsi->flags |= NFS_INO_INVALID_ATTR;
+	}
+
+	if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+		return 0;
+
+	/* Has the inode gone and changed behind our back? */
+	if (nfsi->fileid != fattr->fileid
+			|| (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+		return -EIO;
+
+	cur_size = i_size_read(inode);
+ 	new_isize = nfs_size_to_loff_t(fattr->size);
+
+	/* If we have atomic WCC data, we may update some attributes */
+	if ((fattr->valid & NFS_ATTR_WCC) != 0) {
+		if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
+			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+		if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime))
+			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+	}
+
+	/* Verify a few of the more important attributes */
+	if (!data_unstable) {
+		if (!timespec_equal(&inode->i_mtime, &fattr->mtime)
+				|| cur_size != new_isize)
+			nfsi->flags |= NFS_INO_INVALID_ATTR;
+	} else if (S_ISREG(inode->i_mode) && new_isize > cur_size)
+			nfsi->flags |= NFS_INO_INVALID_ATTR;
+
+	/* Have any file permissions changed? */
+	if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
+			|| inode->i_uid != fattr->uid
+			|| inode->i_gid != fattr->gid)
+		nfsi->flags |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+
+	/* Has the link count changed? */
+	if (inode->i_nlink != fattr->nlink)
+		nfsi->flags |= NFS_INO_INVALID_ATTR;
+
+	if (!timespec_equal(&inode->i_atime, &fattr->atime))
+		nfsi->flags |= NFS_INO_INVALID_ATIME;
+
+	nfsi->read_cache_jiffies = fattr->timestamp;
+	return 0;
+}
+
+/*
+ * Many nfs protocol calls return the new file attributes after
+ * an operation.  Here we update the inode to reflect the state
+ * of the server's inode.
+ *
+ * This is a bit tricky because we have to make sure all dirty pages
+ * have been sent off to the server before calling invalidate_inode_pages.
+ * To make sure no other process adds more write requests while we try
+ * our best to flush them, we make them sleep during the attribute refresh.
+ *
+ * A very similar scenario holds for the dir cache.
+ */
+static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsigned long verifier)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	__u64		new_size;
+	loff_t		new_isize;
+	unsigned int	invalid = 0;
+	loff_t		cur_isize;
+	int data_unstable;
+
+	dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
+			__FUNCTION__, inode->i_sb->s_id, inode->i_ino,
+			atomic_read(&inode->i_count), fattr->valid);
+
+	if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+		return 0;
+
+	if (nfsi->fileid != fattr->fileid) {
+		printk(KERN_ERR "%s: inode number mismatch\n"
+		       "expected (%s/0x%Lx), got (%s/0x%Lx)\n",
+		       __FUNCTION__,
+		       inode->i_sb->s_id, (long long)nfsi->fileid,
+		       inode->i_sb->s_id, (long long)fattr->fileid);
+		goto out_err;
+	}
+
+	/*
+	 * Make sure the inode's type hasn't changed.
+	 */
+	if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+		goto out_changed;
+
+	/*
+	 * Update the read time so we don't revalidate too often.
+	 */
+	nfsi->read_cache_jiffies = fattr->timestamp;
+
+	/* Are we racing with known updates of the metadata on the server? */
+	data_unstable = ! nfs_verify_change_attribute(inode, verifier);
+
+	/* Check if the file size agrees */
+	new_size = fattr->size;
+ 	new_isize = nfs_size_to_loff_t(fattr->size);
+	cur_isize = i_size_read(inode);
+	if (cur_isize != new_size) {
+#ifdef NFS_DEBUG_VERBOSE
+		printk(KERN_DEBUG "NFS: isize change on %s/%ld\n", inode->i_sb->s_id, inode->i_ino);
+#endif
+		/*
+		 * If we have pending writebacks, things can get
+		 * messy.
+		 */
+		if (S_ISREG(inode->i_mode) && data_unstable) {
+			if (new_isize > cur_isize) {
+				inode->i_size = new_isize;
+				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+			}
+		} else {
+			inode->i_size = new_isize;
+			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+		}
+	}
+
+	/*
+	 * Note: we don't check inode->i_mtime since pipes etc.
+	 *       can change this value in VFS without requiring a
+	 *	 cache revalidation.
+	 */
+	if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
+		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+#ifdef NFS_DEBUG_VERBOSE
+		printk(KERN_DEBUG "NFS: mtime change on %s/%ld\n", inode->i_sb->s_id, inode->i_ino);
+#endif
+		if (!data_unstable)
+			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+	}
+
+	if ((fattr->valid & NFS_ATTR_FATTR_V4)
+	    && nfsi->change_attr != fattr->change_attr) {
+#ifdef NFS_DEBUG_VERBOSE
+		printk(KERN_DEBUG "NFS: change_attr change on %s/%ld\n",
+		       inode->i_sb->s_id, inode->i_ino);
+#endif
+		nfsi->change_attr = fattr->change_attr;
+		if (!data_unstable)
+			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS;
+	}
+
+	memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+	memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+
+	if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) ||
+	    inode->i_uid != fattr->uid ||
+	    inode->i_gid != fattr->gid)
+		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+
+	inode->i_mode = fattr->mode;
+	inode->i_nlink = fattr->nlink;
+	inode->i_uid = fattr->uid;
+	inode->i_gid = fattr->gid;
+
+	if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
+		/*
+		 * report the blocks in 512byte units
+		 */
+		inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+		inode->i_blksize = inode->i_sb->s_blocksize;
+ 	} else {
+ 		inode->i_blocks = fattr->du.nfs2.blocks;
+ 		inode->i_blksize = fattr->du.nfs2.blocksize;
+ 	}
+
+	/* Update attrtimeo value if we're out of the unstable period */
+	if (invalid & NFS_INO_INVALID_ATTR) {
+		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+		nfsi->attrtimeo_timestamp = jiffies;
+	} else if (time_after(jiffies, nfsi->attrtimeo_timestamp+nfsi->attrtimeo)) {
+		if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
+			nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+		nfsi->attrtimeo_timestamp = jiffies;
+	}
+	/* Don't invalidate the data if we were to blame */
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+				|| S_ISLNK(inode->i_mode)))
+		invalid &= ~NFS_INO_INVALID_DATA;
+	if (!nfs_have_delegation(inode, FMODE_READ))
+		nfsi->flags |= invalid;
+
+	return 0;
+ out_changed:
+	/*
+	 * Big trouble! The inode has become a different object.
+	 */
+#ifdef NFS_PARANOIA
+	printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n",
+			__FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode);
+#endif
+	/*
+	 * No need to worry about unhashing the dentry, as the
+	 * lookup validation will know that the inode is bad.
+	 * (But we fall through to invalidate the caches.)
+	 */
+	nfs_invalidate_inode(inode);
+ out_err:
+	NFS_FLAGS(inode) |= NFS_INO_STALE;
+	return -ESTALE;
+}
+
+/*
+ * File system information
+ */
+
+static int nfs_set_super(struct super_block *s, void *data)
+{
+	s->s_fs_info = data;
+	return set_anon_super(s, data);
+}
+ 
+static int nfs_compare_super(struct super_block *sb, void *data)
+{
+	struct nfs_server *server = data;
+	struct nfs_server *old = NFS_SB(sb);
+
+	if (old->addr.sin_addr.s_addr != server->addr.sin_addr.s_addr)
+		return 0;
+	if (old->addr.sin_port != server->addr.sin_port)
+		return 0;
+	return !nfs_compare_fh(&old->fh, &server->fh);
+}
+
+static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *raw_data)
+{
+	int error;
+	struct nfs_server *server;
+	struct super_block *s;
+	struct nfs_fh *root;
+	struct nfs_mount_data *data = raw_data;
+
+	if (!data) {
+		printk("nfs_read_super: missing data argument\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL);
+	if (!server)
+		return ERR_PTR(-ENOMEM);
+	memset(server, 0, sizeof(struct nfs_server));
+	/* Zero out the NFS state stuff */
+	init_nfsv4_state(server);
+
+	if (data->version != NFS_MOUNT_VERSION) {
+		printk("nfs warning: mount version %s than kernel\n",
+			data->version < NFS_MOUNT_VERSION ? "older" : "newer");
+		if (data->version < 2)
+			data->namlen = 0;
+		if (data->version < 3)
+			data->bsize  = 0;
+		if (data->version < 4) {
+			data->flags &= ~NFS_MOUNT_VER3;
+			data->root.size = NFS2_FHSIZE;
+			memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
+		}
+		if (data->version < 5)
+			data->flags &= ~NFS_MOUNT_SECFLAVOUR;
+	}
+
+	root = &server->fh;
+	if (data->flags & NFS_MOUNT_VER3)
+		root->size = data->root.size;
+	else
+		root->size = NFS2_FHSIZE;
+	if (root->size > sizeof(root->data)) {
+		printk("nfs_get_sb: invalid root filehandle\n");
+		kfree(server);
+		return ERR_PTR(-EINVAL);
+	}
+	memcpy(root->data, data->root.data, root->size);
+
+	/* We now require that the mount process passes the remote address */
+	memcpy(&server->addr, &data->addr, sizeof(server->addr));
+	if (server->addr.sin_addr.s_addr == INADDR_ANY) {
+		printk("NFS: mount program didn't pass remote address!\n");
+		kfree(server);
+		return ERR_PTR(-EINVAL);
+	}
+
+	s = sget(fs_type, nfs_compare_super, nfs_set_super, server);
+
+	if (IS_ERR(s) || s->s_root) {
+		kfree(server);
+		return s;
+	}
+
+	s->s_flags = flags;
+
+	/* Fire up rpciod if not yet running */
+	if (rpciod_up() != 0) {
+		printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
+		kfree(server);
+		return ERR_PTR(-EIO);
+	}
+
+	error = nfs_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+	if (error) {
+		up_write(&s->s_umount);
+		deactivate_super(s);
+		return ERR_PTR(error);
+	}
+	s->s_flags |= MS_ACTIVE;
+	return s;
+}
+
+static void nfs_kill_super(struct super_block *s)
+{
+	struct nfs_server *server = NFS_SB(s);
+
+	kill_anon_super(s);
+
+	if (server->client != NULL && !IS_ERR(server->client))
+		rpc_shutdown_client(server->client);
+	if (server->client_sys != NULL && !IS_ERR(server->client_sys))
+		rpc_shutdown_client(server->client_sys);
+
+	if (!(server->flags & NFS_MOUNT_NONLM))
+		lockd_down();	/* release rpc.lockd */
+
+	rpciod_down();		/* release rpciod */
+
+	if (server->hostname != NULL)
+		kfree(server->hostname);
+	kfree(server);
+}
+
+static struct file_system_type nfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "nfs",
+	.get_sb		= nfs_get_sb,
+	.kill_sb	= nfs_kill_super,
+	.fs_flags	= FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+#ifdef CONFIG_NFS_V4
+
+static void nfs4_clear_inode(struct inode *);
+
+
+static struct super_operations nfs4_sops = { 
+	.alloc_inode	= nfs_alloc_inode,
+	.destroy_inode	= nfs_destroy_inode,
+	.write_inode	= nfs_write_inode,
+	.delete_inode	= nfs_delete_inode,
+	.statfs		= nfs_statfs,
+	.clear_inode	= nfs4_clear_inode,
+	.umount_begin	= nfs_umount_begin,
+	.show_options	= nfs_show_options,
+};
+
+/*
+ * Clean out any remaining NFSv4 state that might be left over due
+ * to open() calls that passed nfs_atomic_lookup, but failed to call
+ * nfs_open().
+ */
+static void nfs4_clear_inode(struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	/* If we are holding a delegation, return it! */
+	if (nfsi->delegation != NULL)
+		nfs_inode_return_delegation(inode);
+	/* First call standard NFS clear_inode() code */
+	nfs_clear_inode(inode);
+	/* Now clear out any remaining state */
+	while (!list_empty(&nfsi->open_states)) {
+		struct nfs4_state *state;
+		
+		state = list_entry(nfsi->open_states.next,
+				struct nfs4_state,
+				inode_states);
+		dprintk("%s(%s/%Ld): found unclaimed NFSv4 state %p\n",
+				__FUNCTION__,
+				inode->i_sb->s_id,
+				(long long)NFS_FILEID(inode),
+				state);
+		BUG_ON(atomic_read(&state->count) != 1);
+		nfs4_close_state(state, state->state);
+	}
+}
+
+
+static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, int silent)
+{
+	struct nfs_server *server;
+	struct nfs4_client *clp = NULL;
+	struct rpc_xprt *xprt = NULL;
+	struct rpc_clnt *clnt = NULL;
+	struct rpc_timeout timeparms;
+	rpc_authflavor_t authflavour;
+	int proto, err = -EIO;
+
+	sb->s_blocksize_bits = 0;
+	sb->s_blocksize = 0;
+	server = NFS_SB(sb);
+	if (data->rsize != 0)
+		server->rsize = nfs_block_size(data->rsize, NULL);
+	if (data->wsize != 0)
+		server->wsize = nfs_block_size(data->wsize, NULL);
+	server->flags = data->flags & NFS_MOUNT_FLAGMASK;
+	server->caps = NFS_CAP_ATOMIC_OPEN;
+
+	server->acregmin = data->acregmin*HZ;
+	server->acregmax = data->acregmax*HZ;
+	server->acdirmin = data->acdirmin*HZ;
+	server->acdirmax = data->acdirmax*HZ;
+
+	server->rpc_ops = &nfs_v4_clientops;
+	/* Initialize timeout values */
+
+	timeparms.to_initval = data->timeo * HZ / 10;
+	timeparms.to_retries = data->retrans;
+	timeparms.to_exponential = 1;
+	if (!timeparms.to_retries)
+		timeparms.to_retries = 5;
+
+	proto = data->proto;
+	/* Which IP protocol do we use? */
+	switch (proto) {
+	case IPPROTO_TCP:
+		timeparms.to_maxval  = RPC_MAX_TCP_TIMEOUT;
+		if (!timeparms.to_initval)
+			timeparms.to_initval = 600 * HZ / 10;
+		break;
+	case IPPROTO_UDP:
+		timeparms.to_maxval  = RPC_MAX_UDP_TIMEOUT;
+		if (!timeparms.to_initval)
+			timeparms.to_initval = 11 * HZ / 10;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clp = nfs4_get_client(&server->addr.sin_addr);
+	if (!clp) {
+		printk(KERN_WARNING "NFS: failed to create NFS4 client.\n");
+		return -EIO;
+	}
+
+	/* Now create transport and client */
+	authflavour = RPC_AUTH_UNIX;
+	if (data->auth_flavourlen != 0) {
+		if (data->auth_flavourlen > 1)
+			printk(KERN_INFO "NFS: cannot yet deal with multiple auth flavours.\n");
+		if (copy_from_user(&authflavour, data->auth_flavours, sizeof(authflavour))) {
+			err = -EFAULT;
+			goto out_fail;
+		}
+	}
+
+	down_write(&clp->cl_sem);
+	if (clp->cl_rpcclient == NULL) {
+		xprt = xprt_create_proto(proto, &server->addr, &timeparms);
+		if (IS_ERR(xprt)) {
+			up_write(&clp->cl_sem);
+			printk(KERN_WARNING "NFS: cannot create RPC transport.\n");
+			err = PTR_ERR(xprt);
+			goto out_fail;
+		}
+		clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
+				server->rpc_ops->version, authflavour);
+		if (IS_ERR(clnt)) {
+			up_write(&clp->cl_sem);
+			printk(KERN_WARNING "NFS: cannot create RPC client.\n");
+			xprt_destroy(xprt);
+			err = PTR_ERR(clnt);
+			goto out_fail;
+		}
+		clnt->cl_intr     = 1;
+		clnt->cl_softrtry = 1;
+		clnt->cl_chatty   = 1;
+		clp->cl_rpcclient = clnt;
+		clp->cl_cred = rpcauth_lookupcred(clnt->cl_auth, 0);
+		if (IS_ERR(clp->cl_cred)) {
+			up_write(&clp->cl_sem);
+			err = PTR_ERR(clp->cl_cred);
+			clp->cl_cred = NULL;
+			goto out_fail;
+		}
+		memcpy(clp->cl_ipaddr, server->ip_addr, sizeof(clp->cl_ipaddr));
+		nfs_idmap_new(clp);
+	}
+	if (list_empty(&clp->cl_superblocks)) {
+		err = nfs4_init_client(clp);
+		if (err != 0) {
+			up_write(&clp->cl_sem);
+			goto out_fail;
+		}
+	}
+	list_add_tail(&server->nfs4_siblings, &clp->cl_superblocks);
+	clnt = rpc_clone_client(clp->cl_rpcclient);
+	if (!IS_ERR(clnt))
+			server->nfs4_state = clp;
+	up_write(&clp->cl_sem);
+	clp = NULL;
+
+	if (IS_ERR(clnt)) {
+		printk(KERN_WARNING "NFS: cannot create RPC client.\n");
+		return PTR_ERR(clnt);
+	}
+
+	server->client    = clnt;
+
+	if (server->nfs4_state->cl_idmap == NULL) {
+		printk(KERN_WARNING "NFS: failed to create idmapper.\n");
+		return -ENOMEM;
+	}
+
+	if (clnt->cl_auth->au_flavor != authflavour) {
+		if (rpcauth_create(authflavour, clnt) == NULL) {
+			printk(KERN_WARNING "NFS: couldn't create credcache!\n");
+			return -ENOMEM;
+		}
+	}
+
+	sb->s_time_gran = 1;
+
+	sb->s_op = &nfs4_sops;
+	err = nfs_sb_init(sb, authflavour);
+	if (err == 0)
+		return 0;
+out_fail:
+	if (clp)
+		nfs4_put_client(clp);
+	return err;
+}
+
+static int nfs4_compare_super(struct super_block *sb, void *data)
+{
+	struct nfs_server *server = data;
+	struct nfs_server *old = NFS_SB(sb);
+
+	if (strcmp(server->hostname, old->hostname) != 0)
+		return 0;
+	if (strcmp(server->mnt_path, old->mnt_path) != 0)
+		return 0;
+	return 1;
+}
+
+static void *
+nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen)
+{
+	void *p = NULL;
+
+	if (!src->len)
+		return ERR_PTR(-EINVAL);
+	if (src->len < maxlen)
+		maxlen = src->len;
+	if (dst == NULL) {
+		p = dst = kmalloc(maxlen + 1, GFP_KERNEL);
+		if (p == NULL)
+			return ERR_PTR(-ENOMEM);
+	}
+	if (copy_from_user(dst, src->data, maxlen)) {
+		if (p != NULL)
+			kfree(p);
+		return ERR_PTR(-EFAULT);
+	}
+	dst[maxlen] = '\0';
+	return dst;
+}
+
+static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *raw_data)
+{
+	int error;
+	struct nfs_server *server;
+	struct super_block *s;
+	struct nfs4_mount_data *data = raw_data;
+	void *p;
+
+	if (!data) {
+		printk("nfs_read_super: missing data argument\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL);
+	if (!server)
+		return ERR_PTR(-ENOMEM);
+	memset(server, 0, sizeof(struct nfs_server));
+	/* Zero out the NFS state stuff */
+	init_nfsv4_state(server);
+
+	if (data->version != NFS4_MOUNT_VERSION) {
+		printk("nfs warning: mount version %s than kernel\n",
+			data->version < NFS4_MOUNT_VERSION ? "older" : "newer");
+	}
+
+	p = nfs_copy_user_string(NULL, &data->hostname, 256);
+	if (IS_ERR(p))
+		goto out_err;
+	server->hostname = p;
+
+	p = nfs_copy_user_string(NULL, &data->mnt_path, 1024);
+	if (IS_ERR(p))
+		goto out_err;
+	server->mnt_path = p;
+
+	p = nfs_copy_user_string(server->ip_addr, &data->client_addr,
+			sizeof(server->ip_addr) - 1);
+	if (IS_ERR(p))
+		goto out_err;
+
+	/* We now require that the mount process passes the remote address */
+	if (data->host_addrlen != sizeof(server->addr)) {
+		s = ERR_PTR(-EINVAL);
+		goto out_free;
+	}
+	if (copy_from_user(&server->addr, data->host_addr, sizeof(server->addr))) {
+		s = ERR_PTR(-EFAULT);
+		goto out_free;
+	}
+	if (server->addr.sin_family != AF_INET ||
+	    server->addr.sin_addr.s_addr == INADDR_ANY) {
+		printk("NFS: mount program didn't pass remote IP address!\n");
+		s = ERR_PTR(-EINVAL);
+		goto out_free;
+	}
+
+	s = sget(fs_type, nfs4_compare_super, nfs_set_super, server);
+
+	if (IS_ERR(s) || s->s_root)
+		goto out_free;
+
+	s->s_flags = flags;
+
+	/* Fire up rpciod if not yet running */
+	if (rpciod_up() != 0) {
+		printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
+		s = ERR_PTR(-EIO);
+		goto out_free;
+	}
+
+	error = nfs4_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+	if (error) {
+		up_write(&s->s_umount);
+		deactivate_super(s);
+		return ERR_PTR(error);
+	}
+	s->s_flags |= MS_ACTIVE;
+	return s;
+out_err:
+	s = (struct super_block *)p;
+out_free:
+	if (server->mnt_path)
+		kfree(server->mnt_path);
+	if (server->hostname)
+		kfree(server->hostname);
+	kfree(server);
+	return s;
+}
+
+static void nfs4_kill_super(struct super_block *sb)
+{
+	struct nfs_server *server = NFS_SB(sb);
+
+	nfs_return_all_delegations(sb);
+	kill_anon_super(sb);
+
+	nfs4_renewd_prepare_shutdown(server);
+
+	if (server->client != NULL && !IS_ERR(server->client))
+		rpc_shutdown_client(server->client);
+	rpciod_down();		/* release rpciod */
+
+	destroy_nfsv4_state(server);
+
+	if (server->hostname != NULL)
+		kfree(server->hostname);
+	kfree(server);
+}
+
+static struct file_system_type nfs4_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "nfs4",
+	.get_sb		= nfs4_get_sb,
+	.kill_sb	= nfs4_kill_super,
+	.fs_flags	= FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+#define nfs4_init_once(nfsi) \
+	do { \
+		INIT_LIST_HEAD(&(nfsi)->open_states); \
+		nfsi->delegation = NULL; \
+		nfsi->delegation_state = 0; \
+		init_rwsem(&nfsi->rwsem); \
+	} while(0)
+#define register_nfs4fs() register_filesystem(&nfs4_fs_type)
+#define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type)
+#else
+#define nfs4_init_once(nfsi) \
+	do { } while (0)
+#define register_nfs4fs() (0)
+#define unregister_nfs4fs()
+#endif
+
+extern int nfs_init_nfspagecache(void);
+extern void nfs_destroy_nfspagecache(void);
+extern int nfs_init_readpagecache(void);
+extern void nfs_destroy_readpagecache(void);
+extern int nfs_init_writepagecache(void);
+extern void nfs_destroy_writepagecache(void);
+#ifdef CONFIG_NFS_DIRECTIO
+extern int nfs_init_directcache(void);
+extern void nfs_destroy_directcache(void);
+#endif
+
+static kmem_cache_t * nfs_inode_cachep;
+
+static struct inode *nfs_alloc_inode(struct super_block *sb)
+{
+	struct nfs_inode *nfsi;
+	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
+	if (!nfsi)
+		return NULL;
+	nfsi->flags = 0;
+	return &nfsi->vfs_inode;
+}
+
+static void nfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		inode_init_once(&nfsi->vfs_inode);
+		spin_lock_init(&nfsi->req_lock);
+		INIT_LIST_HEAD(&nfsi->dirty);
+		INIT_LIST_HEAD(&nfsi->commit);
+		INIT_LIST_HEAD(&nfsi->open_files);
+		INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
+		atomic_set(&nfsi->data_updates, 0);
+		nfsi->ndirty = 0;
+		nfsi->ncommit = 0;
+		nfsi->npages = 0;
+		init_waitqueue_head(&nfsi->nfs_i_wait);
+		nfs4_init_once(nfsi);
+	}
+}
+ 
+int nfs_init_inodecache(void)
+{
+	nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
+					     sizeof(struct nfs_inode),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (nfs_inode_cachep == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void nfs_destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(nfs_inode_cachep))
+		printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
+}
+
+/*
+ * Initialize NFS
+ */
+static int __init init_nfs_fs(void)
+{
+	int err;
+
+	err = nfs_init_nfspagecache();
+	if (err)
+		goto out4;
+
+	err = nfs_init_inodecache();
+	if (err)
+		goto out3;
+
+	err = nfs_init_readpagecache();
+	if (err)
+		goto out2;
+
+	err = nfs_init_writepagecache();
+	if (err)
+		goto out1;
+
+#ifdef CONFIG_NFS_DIRECTIO
+	err = nfs_init_directcache();
+	if (err)
+		goto out0;
+#endif
+
+#ifdef CONFIG_PROC_FS
+	rpc_proc_register(&nfs_rpcstat);
+#endif
+        err = register_filesystem(&nfs_fs_type);
+	if (err)
+		goto out;
+	if ((err = register_nfs4fs()) != 0)
+		goto out;
+	return 0;
+out:
+#ifdef CONFIG_PROC_FS
+	rpc_proc_unregister("nfs");
+#endif
+	nfs_destroy_writepagecache();
+#ifdef CONFIG_NFS_DIRECTIO
+out0:
+	nfs_destroy_directcache();
+#endif
+out1:
+	nfs_destroy_readpagecache();
+out2:
+	nfs_destroy_inodecache();
+out3:
+	nfs_destroy_nfspagecache();
+out4:
+	return err;
+}
+
+static void __exit exit_nfs_fs(void)
+{
+#ifdef CONFIG_NFS_DIRECTIO
+	nfs_destroy_directcache();
+#endif
+	nfs_destroy_writepagecache();
+	nfs_destroy_readpagecache();
+	nfs_destroy_inodecache();
+	nfs_destroy_nfspagecache();
+#ifdef CONFIG_PROC_FS
+	rpc_proc_unregister("nfs");
+#endif
+	unregister_filesystem(&nfs_fs_type);
+	unregister_nfs4fs();
+}
+
+/* Not quite true; I just maintain it */
+MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_LICENSE("GPL");
+
+module_init(init_nfs_fs)
+module_exit(exit_nfs_fs)
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
new file mode 100644
index 0000000..9d3ddad
--- /dev/null
+++ b/fs/nfs/mount_clnt.c
@@ -0,0 +1,183 @@
+/*
+ * linux/fs/nfs/mount_clnt.c
+ *
+ * MOUNT client to support NFSroot.
+ *
+ * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/uio.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/nfs_fs.h>
+
+#ifdef RPC_DEBUG
+# define NFSDBG_FACILITY	NFSDBG_ROOT
+#endif
+
+/*
+#define MOUNT_PROGRAM		100005
+#define MOUNT_VERSION		1
+#define MOUNT_MNT		1
+#define MOUNT_UMNT		3
+ */
+
+static struct rpc_clnt *	mnt_create(char *, struct sockaddr_in *,
+								int, int);
+static struct rpc_program	mnt_program;
+
+struct mnt_fhstatus {
+	unsigned int		status;
+	struct nfs_fh *		fh;
+};
+
+/*
+ * Obtain an NFS file handle for the given host and path
+ */
+int
+nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh,
+		int version, int protocol)
+{
+	struct rpc_clnt		*mnt_clnt;
+	struct mnt_fhstatus	result = {
+		.fh		= fh
+	};
+	char			hostname[32];
+	int			status;
+	int			call;
+
+	dprintk("NFS:      nfs_mount(%08x:%s)\n",
+			(unsigned)ntohl(addr->sin_addr.s_addr), path);
+
+	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr->sin_addr.s_addr));
+	mnt_clnt = mnt_create(hostname, addr, version, protocol);
+	if (IS_ERR(mnt_clnt))
+		return PTR_ERR(mnt_clnt);
+
+	call = (version == NFS_MNT3_VERSION) ? MOUNTPROC3_MNT : MNTPROC_MNT;
+	status = rpc_call(mnt_clnt, call, path, &result, 0);
+	return status < 0? status : (result.status? -EACCES : 0);
+}
+
+static struct rpc_clnt *
+mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version,
+		int protocol)
+{
+	struct rpc_xprt	*xprt;
+	struct rpc_clnt	*clnt;
+
+	xprt = xprt_create_proto(protocol, srvaddr, NULL);
+	if (IS_ERR(xprt))
+		return (struct rpc_clnt *)xprt;
+
+	clnt = rpc_create_client(xprt, hostname,
+				&mnt_program, version,
+				RPC_AUTH_UNIX);
+	if (IS_ERR(clnt)) {
+		xprt_destroy(xprt);
+	} else {
+		clnt->cl_softrtry = 1;
+		clnt->cl_chatty   = 1;
+		clnt->cl_oneshot  = 1;
+		clnt->cl_intr = 1;
+	}
+	return clnt;
+}
+
+/*
+ * XDR encode/decode functions for MOUNT
+ */
+static int
+xdr_encode_dirpath(struct rpc_rqst *req, u32 *p, const char *path)
+{
+	p = xdr_encode_string(p, path);
+
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+static int
+xdr_decode_fhstatus(struct rpc_rqst *req, u32 *p, struct mnt_fhstatus *res)
+{
+	struct nfs_fh *fh = res->fh;
+
+	if ((res->status = ntohl(*p++)) == 0) {
+		fh->size = NFS2_FHSIZE;
+		memcpy(fh->data, p, NFS2_FHSIZE);
+	}
+	return 0;
+}
+
+static int
+xdr_decode_fhstatus3(struct rpc_rqst *req, u32 *p, struct mnt_fhstatus *res)
+{
+	struct nfs_fh *fh = res->fh;
+
+	if ((res->status = ntohl(*p++)) == 0) {
+		int size = ntohl(*p++);
+		if (size <= NFS3_FHSIZE) {
+			fh->size = size;
+			memcpy(fh->data, p, size);
+		} else
+			res->status = -EBADHANDLE;
+	}
+	return 0;
+}
+
+#define MNT_dirpath_sz		(1 + 256)
+#define MNT_fhstatus_sz		(1 + 8)
+
+static struct rpc_procinfo	mnt_procedures[] = {
+[MNTPROC_MNT] = {
+	  .p_proc		= MNTPROC_MNT,
+	  .p_encode		= (kxdrproc_t) xdr_encode_dirpath,	
+	  .p_decode		= (kxdrproc_t) xdr_decode_fhstatus,
+	  .p_bufsiz		= MNT_dirpath_sz << 2,
+	},
+};
+
+static struct rpc_procinfo mnt3_procedures[] = {
+[MOUNTPROC3_MNT] = {
+	  .p_proc		= MOUNTPROC3_MNT,
+	  .p_encode		= (kxdrproc_t) xdr_encode_dirpath,
+	  .p_decode		= (kxdrproc_t) xdr_decode_fhstatus3,
+	  .p_bufsiz		= MNT_dirpath_sz << 2,
+	},
+};
+
+
+static struct rpc_version	mnt_version1 = {
+		.number		= 1,
+		.nrprocs 	= 2,
+		.procs 		= mnt_procedures
+};
+
+static struct rpc_version       mnt_version3 = {
+		.number		= 3,
+		.nrprocs	= 2,
+		.procs		= mnt3_procedures
+};
+
+static struct rpc_version *	mnt_version[] = {
+	NULL,
+	&mnt_version1,
+	NULL,
+	&mnt_version3,
+};
+
+static struct rpc_stat		mnt_stats;
+
+static struct rpc_program	mnt_program = {
+	.name		= "mount",
+	.number		= NFS_MNT_PROGRAM,
+	.nrvers		= sizeof(mnt_version)/sizeof(mnt_version[0]),
+	.version	= mnt_version,
+	.stats		= &mnt_stats,
+};
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
new file mode 100644
index 0000000..d91b690
--- /dev/null
+++ b/fs/nfs/nfs2xdr.c
@@ -0,0 +1,711 @@
+/*
+ * linux/fs/nfs/nfs2xdr.c
+ *
+ * XDR functions to encode/decode NFS RPC arguments and results.
+ *
+ * Copyright (C) 1992, 1993, 1994  Rick Sladkey
+ * Copyright (C) 1996 Olaf Kirch
+ * 04 Aug 1998  Ion Badulescu <ionut@cs.columbia.edu>
+ * 		FIFO's need special handling in NFSv2
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+
+#define NFSDBG_FACILITY		NFSDBG_XDR
+/* #define NFS_PARANOIA 1 */
+
+extern int			nfs_stat_to_errno(int stat);
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO		EIO
+
+/*
+ * Declare the space requirements for NFS arguments and replies as
+ * number of 32bit-words
+ */
+#define NFS_fhandle_sz		(8)
+#define NFS_sattr_sz		(8)
+#define NFS_filename_sz		(1+(NFS2_MAXNAMLEN>>2))
+#define NFS_path_sz		(1+(NFS2_MAXPATHLEN>>2))
+#define NFS_fattr_sz		(17)
+#define NFS_info_sz		(5)
+#define NFS_entry_sz		(NFS_filename_sz+3)
+
+#define NFS_diropargs_sz	(NFS_fhandle_sz+NFS_filename_sz)
+#define NFS_sattrargs_sz	(NFS_fhandle_sz+NFS_sattr_sz)
+#define NFS_readlinkargs_sz	(NFS_fhandle_sz)
+#define NFS_readargs_sz		(NFS_fhandle_sz+3)
+#define NFS_writeargs_sz	(NFS_fhandle_sz+4)
+#define NFS_createargs_sz	(NFS_diropargs_sz+NFS_sattr_sz)
+#define NFS_renameargs_sz	(NFS_diropargs_sz+NFS_diropargs_sz)
+#define NFS_linkargs_sz		(NFS_fhandle_sz+NFS_diropargs_sz)
+#define NFS_symlinkargs_sz	(NFS_diropargs_sz+NFS_path_sz+NFS_sattr_sz)
+#define NFS_readdirargs_sz	(NFS_fhandle_sz+2)
+
+#define NFS_attrstat_sz		(1+NFS_fattr_sz)
+#define NFS_diropres_sz		(1+NFS_fhandle_sz+NFS_fattr_sz)
+#define NFS_readlinkres_sz	(2)
+#define NFS_readres_sz		(1+NFS_fattr_sz+1)
+#define NFS_writeres_sz         (NFS_attrstat_sz)
+#define NFS_stat_sz		(1)
+#define NFS_readdirres_sz	(1)
+#define NFS_statfsres_sz	(1+NFS_info_sz)
+
+/*
+ * Common NFS XDR functions as inlines
+ */
+static inline u32 *
+xdr_encode_fhandle(u32 *p, struct nfs_fh *fhandle)
+{
+	memcpy(p, fhandle->data, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+static inline u32 *
+xdr_decode_fhandle(u32 *p, struct nfs_fh *fhandle)
+{
+	/* NFSv2 handles have a fixed length */
+	fhandle->size = NFS2_FHSIZE;
+	memcpy(fhandle->data, p, NFS2_FHSIZE);
+	return p + XDR_QUADLEN(NFS2_FHSIZE);
+}
+
+static inline u32*
+xdr_encode_time(u32 *p, struct timespec *timep)
+{
+	*p++ = htonl(timep->tv_sec);
+	/* Convert nanoseconds into microseconds */
+	*p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
+	return p;
+}
+
+static inline u32*
+xdr_encode_current_server_time(u32 *p, struct timespec *timep)
+{
+	/*
+	 * Passing the invalid value useconds=1000000 is a
+	 * Sun convention for "set to current server time".
+	 * It's needed to make permissions checks for the
+	 * "touch" program across v2 mounts to Solaris and
+	 * Irix boxes work correctly. See description of
+	 * sattr in section 6.1 of "NFS Illustrated" by
+	 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
+	 */
+	*p++ = htonl(timep->tv_sec);
+	*p++ = htonl(1000000);
+	return p;
+}
+
+static inline u32*
+xdr_decode_time(u32 *p, struct timespec *timep)
+{
+	timep->tv_sec = ntohl(*p++);
+	/* Convert microseconds into nanoseconds */
+	timep->tv_nsec = ntohl(*p++) * 1000;
+	return p;
+}
+
+static u32 *
+xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
+{
+	u32 rdev;
+	fattr->type = (enum nfs_ftype) ntohl(*p++);
+	fattr->mode = ntohl(*p++);
+	fattr->nlink = ntohl(*p++);
+	fattr->uid = ntohl(*p++);
+	fattr->gid = ntohl(*p++);
+	fattr->size = ntohl(*p++);
+	fattr->du.nfs2.blocksize = ntohl(*p++);
+	rdev = ntohl(*p++);
+	fattr->du.nfs2.blocks = ntohl(*p++);
+	fattr->fsid_u.nfs3 = ntohl(*p++);
+	fattr->fileid = ntohl(*p++);
+	p = xdr_decode_time(p, &fattr->atime);
+	p = xdr_decode_time(p, &fattr->mtime);
+	p = xdr_decode_time(p, &fattr->ctime);
+	fattr->valid |= NFS_ATTR_FATTR;
+	fattr->rdev = new_decode_dev(rdev);
+	if (fattr->type == NFCHR && rdev == NFS2_FIFO_DEV) {
+		fattr->type = NFFIFO;
+		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
+		fattr->rdev = 0;
+	}
+	fattr->timestamp = jiffies;
+	return p;
+}
+
+#define SATTR(p, attr, flag, field) \
+        *p++ = (attr->ia_valid & flag) ? htonl(attr->field) : ~(u32) 0
+static inline u32 *
+xdr_encode_sattr(u32 *p, struct iattr *attr)
+{
+	SATTR(p, attr, ATTR_MODE, ia_mode);
+	SATTR(p, attr, ATTR_UID, ia_uid);
+	SATTR(p, attr, ATTR_GID, ia_gid);
+	SATTR(p, attr, ATTR_SIZE, ia_size);
+
+	if (attr->ia_valid & ATTR_ATIME_SET) {
+		p = xdr_encode_time(p, &attr->ia_atime);
+	} else if (attr->ia_valid & ATTR_ATIME) {
+		p = xdr_encode_current_server_time(p, &attr->ia_atime);
+	} else {
+		*p++ = ~(u32) 0;
+		*p++ = ~(u32) 0;
+	}
+
+	if (attr->ia_valid & ATTR_MTIME_SET) {
+		p = xdr_encode_time(p, &attr->ia_mtime);
+	} else if (attr->ia_valid & ATTR_MTIME) {
+		p = xdr_encode_current_server_time(p, &attr->ia_mtime);
+	} else {
+		*p++ = ~(u32) 0;	
+		*p++ = ~(u32) 0;
+	}
+  	return p;
+}
+#undef SATTR
+
+/*
+ * NFS encode functions
+ */
+/*
+ * Encode file handle argument
+ * GETATTR, READLINK, STATFS
+ */
+static int
+nfs_xdr_fhandle(struct rpc_rqst *req, u32 *p, struct nfs_fh *fh)
+{
+	p = xdr_encode_fhandle(p, fh);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode SETATTR arguments
+ */
+static int
+nfs_xdr_sattrargs(struct rpc_rqst *req, u32 *p, struct nfs_sattrargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_sattr(p, args->sattr);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode directory ops argument
+ * LOOKUP, REMOVE, RMDIR
+ */
+static int
+nfs_xdr_diropargs(struct rpc_rqst *req, u32 *p, struct nfs_diropargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Arguments to a READ call. Since we read data directly into the page
+ * cache, we also set up the reply iovec here so that iov[1] points
+ * exactly to the page we want to fetch.
+ */
+static int
+nfs_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
+{
+	struct rpc_auth	*auth = req->rq_task->tk_auth;
+	unsigned int replen;
+	u32 offset = (u32)args->offset;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	*p++ = htonl(offset);
+	*p++ = htonl(count);
+	*p++ = htonl(count);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen,
+			 args->pages, args->pgbase, count);
+	return 0;
+}
+
+/*
+ * Decode READ reply
+ */
+static int
+nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
+{
+	struct kvec *iov = req->rq_rcv_buf.head;
+	int	status, count, recvd, hdrlen;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+	p = xdr_decode_fattr(p, res->fattr);
+
+	count = ntohl(*p++);
+	res->eof = 0;
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READ reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READ header is short. iovec will be shifted.\n");
+		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
+	}
+
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (count > recvd) {
+		printk(KERN_WARNING "NFS: server cheating in read reply: "
+			"count %d > recvd %d\n", count, recvd);
+		count = recvd;
+	}
+
+	dprintk("RPC:      readres OK count %d\n", count);
+	if (count < res->count)
+		res->count = count;
+
+	return count;
+}
+
+
+/*
+ * Write arguments. Splice the buffer to be written into the iovec.
+ */
+static int
+nfs_xdr_writeargs(struct rpc_rqst *req, u32 *p, struct nfs_writeargs *args)
+{
+	struct xdr_buf *sndbuf = &req->rq_snd_buf;
+	u32 offset = (u32)args->offset;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	*p++ = htonl(offset);
+	*p++ = htonl(offset);
+	*p++ = htonl(count);
+	*p++ = htonl(count);
+	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
+
+	/* Copy the page array */
+	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
+	return 0;
+}
+
+/*
+ * Encode create arguments
+ * CREATE, MKDIR
+ */
+static int
+nfs_xdr_createargs(struct rpc_rqst *req, u32 *p, struct nfs_createargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+	p = xdr_encode_sattr(p, args->sattr);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode RENAME arguments
+ */
+static int
+nfs_xdr_renameargs(struct rpc_rqst *req, u32 *p, struct nfs_renameargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_array(p, args->fromname, args->fromlen);
+	p = xdr_encode_fhandle(p, args->tofh);
+	p = xdr_encode_array(p, args->toname, args->tolen);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode LINK arguments
+ */
+static int
+nfs_xdr_linkargs(struct rpc_rqst *req, u32 *p, struct nfs_linkargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_fhandle(p, args->tofh);
+	p = xdr_encode_array(p, args->toname, args->tolen);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode SYMLINK arguments
+ */
+static int
+nfs_xdr_symlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_symlinkargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_array(p, args->fromname, args->fromlen);
+	p = xdr_encode_array(p, args->topath, args->tolen);
+	p = xdr_encode_sattr(p, args->sattr);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode arguments to readdir call
+ */
+static int
+nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args)
+{
+	struct rpc_task	*task = req->rq_task;
+	struct rpc_auth	*auth = task->tk_auth;
+	unsigned int replen;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	*p++ = htonl(args->cookie);
+	*p++ = htonl(count); /* see above */
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
+	return 0;
+}
+
+/*
+ * Decode the result of a readdir call.
+ * We're not really decoding anymore, we just leave the buffer untouched
+ * and only check that it is syntactically correct.
+ * The real decoding happens in nfs_decode_entry below, called directly
+ * from nfs_readdir for each entry.
+ */
+static int
+nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
+{
+	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+	struct kvec *iov = rcvbuf->head;
+	struct page **page;
+	int hdrlen, recvd;
+	int status, nr;
+	unsigned int len, pglen;
+	u32 *end, *entry, *kaddr;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READDIR reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
+		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
+	}
+
+	pglen = rcvbuf->page_len;
+	recvd = rcvbuf->len - hdrlen;
+	if (pglen > recvd)
+		pglen = recvd;
+	page = rcvbuf->pages;
+	kaddr = p = (u32 *)kmap_atomic(*page, KM_USER0);
+	end = (u32 *)((char *)p + pglen);
+	entry = p;
+	for (nr = 0; *p++; nr++) {
+		if (p + 2 > end)
+			goto short_pkt;
+		p++; /* fileid */
+		len = ntohl(*p++);
+		p += XDR_QUADLEN(len) + 1;	/* name plus cookie */
+		if (len > NFS2_MAXNAMLEN) {
+			printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)!\n",
+						len);
+			goto err_unmap;
+		}
+		if (p + 2 > end)
+			goto short_pkt;
+		entry = p;
+	}
+	if (!nr && (entry[0] != 0 || entry[1] == 0))
+		goto short_pkt;
+ out:
+	kunmap_atomic(kaddr, KM_USER0);
+	return nr;
+ short_pkt:
+	entry[0] = entry[1] = 0;
+	/* truncate listing ? */
+	if (!nr) {
+		printk(KERN_NOTICE "NFS: readdir reply truncated!\n");
+		entry[1] = 1;
+	}
+	goto out;
+err_unmap:
+	nr = -errno_NFSERR_IO;
+	goto out;
+}
+
+u32 *
+nfs_decode_dirent(u32 *p, struct nfs_entry *entry, int plus)
+{
+	if (!*p++) {
+		if (!*p)
+			return ERR_PTR(-EAGAIN);
+		entry->eof = 1;
+		return ERR_PTR(-EBADCOOKIE);
+	}
+
+	entry->ino	  = ntohl(*p++);
+	entry->len	  = ntohl(*p++);
+	entry->name	  = (const char *) p;
+	p		 += XDR_QUADLEN(entry->len);
+	entry->prev_cookie	  = entry->cookie;
+	entry->cookie	  = ntohl(*p++);
+	entry->eof	  = !p[0] && p[1];
+
+	return p;
+}
+
+/*
+ * NFS XDR decode functions
+ */
+/*
+ * Decode simple status reply
+ */
+static int
+nfs_xdr_stat(struct rpc_rqst *req, u32 *p, void *dummy)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)) != 0)
+		status = -nfs_stat_to_errno(status);
+	return status;
+}
+
+/*
+ * Decode attrstat reply
+ * GETATTR, SETATTR, WRITE
+ */
+static int
+nfs_xdr_attrstat(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+	xdr_decode_fattr(p, fattr);
+	return 0;
+}
+
+/*
+ * Decode diropres reply
+ * LOOKUP, CREATE, MKDIR
+ */
+static int
+nfs_xdr_diropres(struct rpc_rqst *req, u32 *p, struct nfs_diropok *res)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+	p = xdr_decode_fhandle(p, res->fh);
+	xdr_decode_fattr(p, res->fattr);
+	return 0;
+}
+
+/*
+ * Encode READLINK args
+ */
+static int
+nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args)
+{
+	struct rpc_auth *auth = req->rq_task->tk_auth;
+	unsigned int replen;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
+	return 0;
+}
+
+/*
+ * Decode READLINK reply
+ */
+static int
+nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
+{
+	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+	struct kvec *iov = rcvbuf->head;
+	int hdrlen, len, recvd;
+	char	*kaddr;
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+	/* Convert length of symlink */
+	len = ntohl(*p++);
+	if (len >= rcvbuf->page_len || len <= 0) {
+		dprintk(KERN_WARNING "nfs: server returned giant symlink!\n");
+		return -ENAMETOOLONG;
+	}
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READLINK reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
+		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
+	}
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (recvd < len) {
+		printk(KERN_WARNING "NFS: server cheating in readlink reply: "
+				"count %u > recvd %u\n", len, recvd);
+		return -EIO;
+	}
+
+	/* NULL terminate the string we got */
+	kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0);
+	kaddr[len+rcvbuf->page_base] = '\0';
+	kunmap_atomic(kaddr, KM_USER0);
+	return 0;
+}
+
+/*
+ * Decode WRITE reply
+ */
+static int
+nfs_xdr_writeres(struct rpc_rqst *req, u32 *p, struct nfs_writeres *res)
+{
+	res->verf->committed = NFS_FILE_SYNC;
+	return nfs_xdr_attrstat(req, p, res->fattr);
+}
+
+/*
+ * Decode STATFS reply
+ */
+static int
+nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs2_fsstat *res)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+
+	res->tsize  = ntohl(*p++);
+	res->bsize  = ntohl(*p++);
+	res->blocks = ntohl(*p++);
+	res->bfree  = ntohl(*p++);
+	res->bavail = ntohl(*p++);
+	return 0;
+}
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static struct {
+	int stat;
+	int errno;
+} nfs_errtbl[] = {
+	{ NFS_OK,		0		},
+	{ NFSERR_PERM,		EPERM		},
+	{ NFSERR_NOENT,		ENOENT		},
+	{ NFSERR_IO,		errno_NFSERR_IO	},
+	{ NFSERR_NXIO,		ENXIO		},
+/*	{ NFSERR_EAGAIN,	EAGAIN		}, */
+	{ NFSERR_ACCES,		EACCES		},
+	{ NFSERR_EXIST,		EEXIST		},
+	{ NFSERR_XDEV,		EXDEV		},
+	{ NFSERR_NODEV,		ENODEV		},
+	{ NFSERR_NOTDIR,	ENOTDIR		},
+	{ NFSERR_ISDIR,		EISDIR		},
+	{ NFSERR_INVAL,		EINVAL		},
+	{ NFSERR_FBIG,		EFBIG		},
+	{ NFSERR_NOSPC,		ENOSPC		},
+	{ NFSERR_ROFS,		EROFS		},
+	{ NFSERR_MLINK,		EMLINK		},
+	{ NFSERR_NAMETOOLONG,	ENAMETOOLONG	},
+	{ NFSERR_NOTEMPTY,	ENOTEMPTY	},
+	{ NFSERR_DQUOT,		EDQUOT		},
+	{ NFSERR_STALE,		ESTALE		},
+	{ NFSERR_REMOTE,	EREMOTE		},
+#ifdef EWFLUSH
+	{ NFSERR_WFLUSH,	EWFLUSH		},
+#endif
+	{ NFSERR_BADHANDLE,	EBADHANDLE	},
+	{ NFSERR_NOT_SYNC,	ENOTSYNC	},
+	{ NFSERR_BAD_COOKIE,	EBADCOOKIE	},
+	{ NFSERR_NOTSUPP,	ENOTSUPP	},
+	{ NFSERR_TOOSMALL,	ETOOSMALL	},
+	{ NFSERR_SERVERFAULT,	ESERVERFAULT	},
+	{ NFSERR_BADTYPE,	EBADTYPE	},
+	{ NFSERR_JUKEBOX,	EJUKEBOX	},
+	{ -1,			EIO		}
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used jointly by NFSv2 and NFSv3.
+ */
+int
+nfs_stat_to_errno(int stat)
+{
+	int i;
+
+	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+		if (nfs_errtbl[i].stat == stat)
+			return nfs_errtbl[i].errno;
+	}
+	printk(KERN_ERR "nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
+	return nfs_errtbl[i].errno;
+}
+
+#ifndef MAX
+# define MAX(a, b)	(((a) > (b))? (a) : (b))
+#endif
+
+#define PROC(proc, argtype, restype, timer)				\
+[NFSPROC_##proc] = {							\
+	.p_proc	    =  NFSPROC_##proc,					\
+	.p_encode   =  (kxdrproc_t) nfs_xdr_##argtype,			\
+	.p_decode   =  (kxdrproc_t) nfs_xdr_##restype,			\
+	.p_bufsiz   =  MAX(NFS_##argtype##_sz,NFS_##restype##_sz) << 2,	\
+	.p_timer    =  timer						\
+	}
+struct rpc_procinfo	nfs_procedures[] = {
+    PROC(GETATTR,	fhandle,	attrstat, 1),
+    PROC(SETATTR,	sattrargs,	attrstat, 0),
+    PROC(LOOKUP,	diropargs,	diropres, 2),
+    PROC(READLINK,	readlinkargs,	readlinkres, 3),
+    PROC(READ,		readargs,	readres, 3),
+    PROC(WRITE,		writeargs,	writeres, 4),
+    PROC(CREATE,	createargs,	diropres, 0),
+    PROC(REMOVE,	diropargs,	stat, 0),
+    PROC(RENAME,	renameargs,	stat, 0),
+    PROC(LINK,		linkargs,	stat, 0),
+    PROC(SYMLINK,	symlinkargs,	stat, 0),
+    PROC(MKDIR,		createargs,	diropres, 0),
+    PROC(RMDIR,		diropargs,	stat, 0),
+    PROC(READDIR,	readdirargs,	readdirres, 3),
+    PROC(STATFS,	fhandle,	statfsres, 0),
+};
+
+struct rpc_version		nfs_version2 = {
+	.number			= 2,
+	.nrprocs		= sizeof(nfs_procedures)/sizeof(nfs_procedures[0]),
+	.procs			= nfs_procedures
+};
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
new file mode 100644
index 0000000..3878494d
--- /dev/null
+++ b/fs/nfs/nfs3proc.c
@@ -0,0 +1,859 @@
+/*
+ *  linux/fs/nfs/nfs3proc.c
+ *
+ *  Client-side NFSv3 procedures stubs.
+ *
+ *  Copyright (C) 1997, Olaf Kirch
+ */
+
+#include <linux/mm.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/lockd/bind.h>
+#include <linux/smp_lock.h>
+
+#define NFSDBG_FACILITY		NFSDBG_PROC
+
+extern struct rpc_procinfo nfs3_procedures[];
+
+/* A wrapper to handle the EJUKEBOX error message */
+static int
+nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
+{
+	sigset_t oldset;
+	int res;
+	rpc_clnt_sigmask(clnt, &oldset);
+	do {
+		res = rpc_call_sync(clnt, msg, flags);
+		if (res != -EJUKEBOX)
+			break;
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
+		res = -ERESTARTSYS;
+	} while (!signalled());
+	rpc_clnt_sigunmask(clnt, &oldset);
+	return res;
+}
+
+static inline int
+nfs3_rpc_call_wrapper(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
+{
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs3_procedures[proc],
+		.rpc_argp	= argp,
+		.rpc_resp	= resp,
+	};
+	return nfs3_rpc_wrapper(clnt, &msg, flags);
+}
+
+#define rpc_call(clnt, proc, argp, resp, flags) \
+		nfs3_rpc_call_wrapper(clnt, proc, argp, resp, flags)
+#define rpc_call_sync(clnt, msg, flags) \
+		nfs3_rpc_wrapper(clnt, msg, flags)
+
+static int
+nfs3_async_handle_jukebox(struct rpc_task *task)
+{
+	if (task->tk_status != -EJUKEBOX)
+		return 0;
+	task->tk_status = 0;
+	rpc_restart_call(task);
+	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+	return 1;
+}
+
+/*
+ * Bare-bones access to getattr: this is for nfs_read_super.
+ */
+static int
+nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+		   struct nfs_fsinfo *info)
+{
+	int	status;
+
+	dprintk("%s: call  fsinfo\n", __FUNCTION__);
+	info->fattr->valid = 0;
+	status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
+	dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
+	if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+		status = rpc_call(server->client_sys, NFS3PROC_GETATTR, fhandle, info->fattr, 0);
+		dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
+	}
+	return status;
+}
+
+/*
+ * One function for each procedure in the NFS protocol.
+ */
+static int
+nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fattr *fattr)
+{
+	int	status;
+
+	dprintk("NFS call  getattr\n");
+	fattr->valid = 0;
+	status = rpc_call(server->client, NFS3PROC_GETATTR,
+			  fhandle, fattr, 0);
+	dprintk("NFS reply getattr: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+			struct iattr *sattr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct nfs3_sattrargs	arg = {
+		.fh		= NFS_FH(inode),
+		.sattr		= sattr,
+	};
+	int	status;
+
+	dprintk("NFS call  setattr\n");
+	fattr->valid = 0;
+	status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0);
+	dprintk("NFS reply setattr: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_lookup(struct inode *dir, struct qstr *name,
+		 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	struct nfs_fattr	dir_attr;
+	struct nfs3_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	struct nfs3_diropres	res = {
+		.dir_attr	= &dir_attr,
+		.fh		= fhandle,
+		.fattr		= fattr
+	};
+	int			status;
+
+	dprintk("NFS call  lookup %s\n", name->name);
+	dir_attr.valid = 0;
+	fattr->valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_LOOKUP, &arg, &res, 0);
+	if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR))
+		status = rpc_call(NFS_CLIENT(dir), NFS3PROC_GETATTR,
+			 fhandle, fattr, 0);
+	dprintk("NFS reply lookup: %d\n", status);
+	if (status >= 0)
+		status = nfs_refresh_inode(dir, &dir_attr);
+	return status;
+}
+
+static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+	struct nfs_fattr	fattr;
+	struct nfs3_accessargs	arg = {
+		.fh		= NFS_FH(inode),
+	};
+	struct nfs3_accessres	res = {
+		.fattr		= &fattr,
+	};
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_ACCESS],
+		.rpc_argp	= &arg,
+		.rpc_resp	= &res,
+		.rpc_cred	= entry->cred
+	};
+	int mode = entry->mask;
+	int status;
+
+	dprintk("NFS call  access\n");
+	fattr.valid = 0;
+
+	if (mode & MAY_READ)
+		arg.access |= NFS3_ACCESS_READ;
+	if (S_ISDIR(inode->i_mode)) {
+		if (mode & MAY_WRITE)
+			arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE;
+		if (mode & MAY_EXEC)
+			arg.access |= NFS3_ACCESS_LOOKUP;
+	} else {
+		if (mode & MAY_WRITE)
+			arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND;
+		if (mode & MAY_EXEC)
+			arg.access |= NFS3_ACCESS_EXECUTE;
+	}
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	nfs_refresh_inode(inode, &fattr);
+	if (status == 0) {
+		entry->mask = 0;
+		if (res.access & NFS3_ACCESS_READ)
+			entry->mask |= MAY_READ;
+		if (res.access & (NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE))
+			entry->mask |= MAY_WRITE;
+		if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
+			entry->mask |= MAY_EXEC;
+	}
+	dprintk("NFS reply access: %d\n", status);
+	return status;
+}
+
+static int nfs3_proc_readlink(struct inode *inode, struct page *page,
+		unsigned int pgbase, unsigned int pglen)
+{
+	struct nfs_fattr	fattr;
+	struct nfs3_readlinkargs args = {
+		.fh		= NFS_FH(inode),
+		.pgbase		= pgbase,
+		.pglen		= pglen,
+		.pages		= &page
+	};
+	int			status;
+
+	dprintk("NFS call  readlink\n");
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(inode), NFS3PROC_READLINK,
+			  &args, &fattr, 0);
+	nfs_refresh_inode(inode, &fattr);
+	dprintk("NFS reply readlink: %d\n", status);
+	return status;
+}
+
+static int nfs3_proc_read(struct nfs_read_data *rdata)
+{
+	int			flags = rdata->flags;
+	struct inode *		inode = rdata->inode;
+	struct nfs_fattr *	fattr = rdata->res.fattr;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_READ],
+		.rpc_argp	= &rdata->args,
+		.rpc_resp	= &rdata->res,
+		.rpc_cred	= rdata->cred,
+	};
+	int			status;
+
+	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
+			(long long) rdata->args.offset);
+	fattr->valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
+	if (status >= 0)
+		nfs_refresh_inode(inode, fattr);
+	dprintk("NFS reply read: %d\n", status);
+	return status;
+}
+
+static int nfs3_proc_write(struct nfs_write_data *wdata)
+{
+	int			rpcflags = wdata->flags;
+	struct inode *		inode = wdata->inode;
+	struct nfs_fattr *	fattr = wdata->res.fattr;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_WRITE],
+		.rpc_argp	= &wdata->args,
+		.rpc_resp	= &wdata->res,
+		.rpc_cred	= wdata->cred,
+	};
+	int			status;
+
+	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
+			(long long) wdata->args.offset);
+	fattr->valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
+	if (status >= 0)
+		nfs_refresh_inode(inode, fattr);
+	dprintk("NFS reply write: %d\n", status);
+	return status < 0? status : wdata->res.count;
+}
+
+static int nfs3_proc_commit(struct nfs_write_data *cdata)
+{
+	struct inode *		inode = cdata->inode;
+	struct nfs_fattr *	fattr = cdata->res.fattr;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_COMMIT],
+		.rpc_argp	= &cdata->args,
+		.rpc_resp	= &cdata->res,
+		.rpc_cred	= cdata->cred,
+	};
+	int			status;
+
+	dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
+			(long long) cdata->args.offset);
+	fattr->valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	if (status >= 0)
+		nfs_refresh_inode(inode, fattr);
+	dprintk("NFS reply commit: %d\n", status);
+	return status;
+}
+
+/*
+ * Create a regular file.
+ * For now, we don't implement O_EXCL.
+ */
+static int
+nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+		 int flags)
+{
+	struct nfs_fh		fhandle;
+	struct nfs_fattr	fattr;
+	struct nfs_fattr	dir_attr;
+	struct nfs3_createargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr,
+	};
+	struct nfs3_diropres	res = {
+		.dir_attr	= &dir_attr,
+		.fh		= &fhandle,
+		.fattr		= &fattr
+	};
+	int			status;
+
+	dprintk("NFS call  create %s\n", dentry->d_name.name);
+	arg.createmode = NFS3_CREATE_UNCHECKED;
+	if (flags & O_EXCL) {
+		arg.createmode  = NFS3_CREATE_EXCLUSIVE;
+		arg.verifier[0] = jiffies;
+		arg.verifier[1] = current->pid;
+	}
+
+again:
+	dir_attr.valid = 0;
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_CREATE, &arg, &res, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+
+	/* If the server doesn't support the exclusive creation semantics,
+	 * try again with simple 'guarded' mode. */
+	if (status == NFSERR_NOTSUPP) {
+		switch (arg.createmode) {
+			case NFS3_CREATE_EXCLUSIVE:
+				arg.createmode = NFS3_CREATE_GUARDED;
+				break;
+
+			case NFS3_CREATE_GUARDED:
+				arg.createmode = NFS3_CREATE_UNCHECKED;
+				break;
+
+			case NFS3_CREATE_UNCHECKED:
+				goto out;
+		}
+		goto again;
+	}
+
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	if (status != 0)
+		goto out;
+
+	/* When we created the file with exclusive semantics, make
+	 * sure we set the attributes afterwards. */
+	if (arg.createmode == NFS3_CREATE_EXCLUSIVE) {
+		dprintk("NFS call  setattr (post-create)\n");
+
+		if (!(sattr->ia_valid & ATTR_ATIME_SET))
+			sattr->ia_valid |= ATTR_ATIME;
+		if (!(sattr->ia_valid & ATTR_MTIME_SET))
+			sattr->ia_valid |= ATTR_MTIME;
+
+		/* Note: we could use a guarded setattr here, but I'm
+		 * not sure this buys us anything (and I'd have
+		 * to revamp the NFSv3 XDR code) */
+		status = nfs3_proc_setattr(dentry, &fattr, sattr);
+		nfs_refresh_inode(dentry->d_inode, &fattr);
+		dprintk("NFS reply setattr (post-create): %d\n", status);
+	}
+out:
+	dprintk("NFS reply create: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_remove(struct inode *dir, struct qstr *name)
+{
+	struct nfs_fattr	dir_attr;
+	struct nfs3_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_REMOVE],
+		.rpc_argp	= &arg,
+		.rpc_resp	= &dir_attr,
+	};
+	int			status;
+
+	dprintk("NFS call  remove %s\n", name->name);
+	dir_attr.valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	dprintk("NFS reply remove: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name)
+{
+	struct unlinkxdr {
+		struct nfs3_diropargs arg;
+		struct nfs_fattr res;
+	} *ptr;
+
+	ptr = (struct unlinkxdr *)kmalloc(sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	ptr->arg.fh = NFS_FH(dir->d_inode);
+	ptr->arg.name = name->name;
+	ptr->arg.len = name->len;
+	ptr->res.valid = 0;
+	msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
+	msg->rpc_argp = &ptr->arg;
+	msg->rpc_resp = &ptr->res;
+	return 0;
+}
+
+static int
+nfs3_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+{
+	struct rpc_message *msg = &task->tk_msg;
+	struct nfs_fattr	*dir_attr;
+
+	if (nfs3_async_handle_jukebox(task))
+		return 1;
+	if (msg->rpc_argp) {
+		dir_attr = (struct nfs_fattr*)msg->rpc_resp;
+		nfs_refresh_inode(dir->d_inode, dir_attr);
+		kfree(msg->rpc_argp);
+	}
+	return 0;
+}
+
+static int
+nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
+		 struct inode *new_dir, struct qstr *new_name)
+{
+	struct nfs_fattr	old_dir_attr, new_dir_attr;
+	struct nfs3_renameargs	arg = {
+		.fromfh		= NFS_FH(old_dir),
+		.fromname	= old_name->name,
+		.fromlen	= old_name->len,
+		.tofh		= NFS_FH(new_dir),
+		.toname		= new_name->name,
+		.tolen		= new_name->len
+	};
+	struct nfs3_renameres	res = {
+		.fromattr	= &old_dir_attr,
+		.toattr		= &new_dir_attr
+	};
+	int			status;
+
+	dprintk("NFS call  rename %s -> %s\n", old_name->name, new_name->name);
+	old_dir_attr.valid = 0;
+	new_dir_attr.valid = 0;
+	status = rpc_call(NFS_CLIENT(old_dir), NFS3PROC_RENAME, &arg, &res, 0);
+	nfs_refresh_inode(old_dir, &old_dir_attr);
+	nfs_refresh_inode(new_dir, &new_dir_attr);
+	dprintk("NFS reply rename: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+	struct nfs_fattr	dir_attr, fattr;
+	struct nfs3_linkargs	arg = {
+		.fromfh		= NFS_FH(inode),
+		.tofh		= NFS_FH(dir),
+		.toname		= name->name,
+		.tolen		= name->len
+	};
+	struct nfs3_linkres	res = {
+		.dir_attr	= &dir_attr,
+		.fattr		= &fattr
+	};
+	int			status;
+
+	dprintk("NFS call  link %s\n", name->name);
+	dir_attr.valid = 0;
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(inode), NFS3PROC_LINK, &arg, &res, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	nfs_refresh_inode(inode, &fattr);
+	dprintk("NFS reply link: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
+		  struct iattr *sattr, struct nfs_fh *fhandle,
+		  struct nfs_fattr *fattr)
+{
+	struct nfs_fattr	dir_attr;
+	struct nfs3_symlinkargs	arg = {
+		.fromfh		= NFS_FH(dir),
+		.fromname	= name->name,
+		.fromlen	= name->len,
+		.topath		= path->name,
+		.tolen		= path->len,
+		.sattr		= sattr
+	};
+	struct nfs3_diropres	res = {
+		.dir_attr	= &dir_attr,
+		.fh		= fhandle,
+		.fattr		= fattr
+	};
+	int			status;
+
+	if (path->len > NFS3_MAXPATHLEN)
+		return -ENAMETOOLONG;
+	dprintk("NFS call  symlink %s -> %s\n", name->name, path->name);
+	dir_attr.valid = 0;
+	fattr->valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_SYMLINK, &arg, &res, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	dprintk("NFS reply symlink: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
+{
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr, dir_attr;
+	struct nfs3_mkdirargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr
+	};
+	struct nfs3_diropres	res = {
+		.dir_attr	= &dir_attr,
+		.fh		= &fhandle,
+		.fattr		= &fattr
+	};
+	int			status;
+
+	dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+	dir_attr.valid = 0;
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	dprintk("NFS reply mkdir: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
+{
+	struct nfs_fattr	dir_attr;
+	struct nfs3_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	int			status;
+
+	dprintk("NFS call  rmdir %s\n", name->name);
+	dir_attr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_RMDIR, &arg, &dir_attr, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	dprintk("NFS reply rmdir: %d\n", status);
+	return status;
+}
+
+/*
+ * The READDIR implementation is somewhat hackish - we pass the user buffer
+ * to the encode function, which installs it in the receive iovec.
+ * The decode function itself doesn't perform any decoding, it just makes
+ * sure the reply is syntactically correct.
+ *
+ * Also note that this implementation handles both plain readdir and
+ * readdirplus.
+ */
+static int
+nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+		  u64 cookie, struct page *page, unsigned int count, int plus)
+{
+	struct inode		*dir = dentry->d_inode;
+	struct nfs_fattr	dir_attr;
+	u32			*verf = NFS_COOKIEVERF(dir);
+	struct nfs3_readdirargs	arg = {
+		.fh		= NFS_FH(dir),
+		.cookie		= cookie,
+		.verf		= {verf[0], verf[1]},
+		.plus		= plus,
+		.count		= count,
+		.pages		= &page
+	};
+	struct nfs3_readdirres	res = {
+		.dir_attr	= &dir_attr,
+		.verf		= verf,
+		.plus		= plus
+	};
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_READDIR],
+		.rpc_argp	= &arg,
+		.rpc_resp	= &res,
+		.rpc_cred	= cred
+	};
+	int			status;
+
+	lock_kernel();
+
+	if (plus)
+		msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
+
+	dprintk("NFS call  readdir%s %d\n",
+			plus? "plus" : "", (unsigned int) cookie);
+
+	dir_attr.valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	dprintk("NFS reply readdir: %d\n", status);
+	unlock_kernel();
+	return status;
+}
+
+static int
+nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+		dev_t rdev)
+{
+	struct nfs_fh fh;
+	struct nfs_fattr fattr, dir_attr;
+	struct nfs3_mknodargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr,
+		.rdev		= rdev
+	};
+	struct nfs3_diropres	res = {
+		.dir_attr	= &dir_attr,
+		.fh		= &fh,
+		.fattr		= &fattr
+	};
+	int status;
+
+	switch (sattr->ia_mode & S_IFMT) {
+	case S_IFBLK:	arg.type = NF3BLK;  break;
+	case S_IFCHR:	arg.type = NF3CHR;  break;
+	case S_IFIFO:	arg.type = NF3FIFO; break;
+	case S_IFSOCK:	arg.type = NF3SOCK; break;
+	default:	return -EINVAL;
+	}
+
+	dprintk("NFS call  mknod %s %u:%u\n", dentry->d_name.name,
+			MAJOR(rdev), MINOR(rdev));
+	dir_attr.valid = 0;
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0);
+	nfs_refresh_inode(dir, &dir_attr);
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fh, &fattr);
+	dprintk("NFS reply mknod: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+		 struct nfs_fsstat *stat)
+{
+	int	status;
+
+	dprintk("NFS call  fsstat\n");
+	stat->fattr->valid = 0;
+	status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0);
+	dprintk("NFS reply statfs: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+		 struct nfs_fsinfo *info)
+{
+	int	status;
+
+	dprintk("NFS call  fsinfo\n");
+	info->fattr->valid = 0;
+	status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
+	dprintk("NFS reply fsinfo: %d\n", status);
+	return status;
+}
+
+static int
+nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+		   struct nfs_pathconf *info)
+{
+	int	status;
+
+	dprintk("NFS call  pathconf\n");
+	info->fattr->valid = 0;
+	status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0);
+	dprintk("NFS reply pathconf: %d\n", status);
+	return status;
+}
+
+extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
+
+static void
+nfs3_read_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+
+	if (nfs3_async_handle_jukebox(task))
+		return;
+	/* Call back common NFS readpage processing */
+	if (task->tk_status >= 0)
+		nfs_refresh_inode(data->inode, &data->fattr);
+	nfs_readpage_result(task);
+}
+
+static void
+nfs3_proc_read_setup(struct nfs_read_data *data)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode = data->inode;
+	int			flags;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_READ],
+		.rpc_argp	= &data->args,
+		.rpc_resp	= &data->res,
+		.rpc_cred	= data->cred,
+	};
+
+	/* N.B. Do we need to test? Never called for swapfile inode */
+	flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs3_read_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs3_write_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data;
+
+	if (nfs3_async_handle_jukebox(task))
+		return;
+	data = (struct nfs_write_data *)task->tk_calldata;
+	if (task->tk_status >= 0)
+		nfs_refresh_inode(data->inode, data->res.fattr);
+	nfs_writeback_done(task);
+}
+
+static void
+nfs3_proc_write_setup(struct nfs_write_data *data, int how)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode = data->inode;
+	int			stable;
+	int			flags;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_WRITE],
+		.rpc_argp	= &data->args,
+		.rpc_resp	= &data->res,
+		.rpc_cred	= data->cred,
+	};
+
+	if (how & FLUSH_STABLE) {
+		if (!NFS_I(inode)->ncommit)
+			stable = NFS_FILE_SYNC;
+		else
+			stable = NFS_DATA_SYNC;
+	} else
+		stable = NFS_UNSTABLE;
+	data->args.stable = stable;
+
+	/* Set the initial flags for the task.  */
+	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs3_write_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs3_commit_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data;
+
+	if (nfs3_async_handle_jukebox(task))
+		return;
+	data = (struct nfs_write_data *)task->tk_calldata;
+	if (task->tk_status >= 0)
+		nfs_refresh_inode(data->inode, data->res.fattr);
+	nfs_commit_done(task);
+}
+
+static void
+nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode = data->inode;
+	int			flags;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs3_procedures[NFS3PROC_COMMIT],
+		.rpc_argp	= &data->args,
+		.rpc_resp	= &data->res,
+		.rpc_cred	= data->cred,
+	};
+
+	/* Set the initial flags for the task.  */
+	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs3_commit_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static int
+nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+	return nlmclnt_proc(filp->f_dentry->d_inode, cmd, fl);
+}
+
+struct nfs_rpc_ops	nfs_v3_clientops = {
+	.version	= 3,			/* protocol version */
+	.dentry_ops	= &nfs_dentry_operations,
+	.dir_inode_ops	= &nfs_dir_inode_operations,
+	.getroot	= nfs3_proc_get_root,
+	.getattr	= nfs3_proc_getattr,
+	.setattr	= nfs3_proc_setattr,
+	.lookup		= nfs3_proc_lookup,
+	.access		= nfs3_proc_access,
+	.readlink	= nfs3_proc_readlink,
+	.read		= nfs3_proc_read,
+	.write		= nfs3_proc_write,
+	.commit		= nfs3_proc_commit,
+	.create		= nfs3_proc_create,
+	.remove		= nfs3_proc_remove,
+	.unlink_setup	= nfs3_proc_unlink_setup,
+	.unlink_done	= nfs3_proc_unlink_done,
+	.rename		= nfs3_proc_rename,
+	.link		= nfs3_proc_link,
+	.symlink	= nfs3_proc_symlink,
+	.mkdir		= nfs3_proc_mkdir,
+	.rmdir		= nfs3_proc_rmdir,
+	.readdir	= nfs3_proc_readdir,
+	.mknod		= nfs3_proc_mknod,
+	.statfs		= nfs3_proc_statfs,
+	.fsinfo		= nfs3_proc_fsinfo,
+	.pathconf	= nfs3_proc_pathconf,
+	.decode_dirent	= nfs3_decode_dirent,
+	.read_setup	= nfs3_proc_read_setup,
+	.write_setup	= nfs3_proc_write_setup,
+	.commit_setup	= nfs3_proc_commit_setup,
+	.file_open	= nfs_open,
+	.file_release	= nfs_release,
+	.lock		= nfs3_proc_lock,
+};
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
new file mode 100644
index 0000000..a3593d4
--- /dev/null
+++ b/fs/nfs/nfs3xdr.c
@@ -0,0 +1,1023 @@
+/*
+ * linux/fs/nfs/nfs3xdr.c
+ *
+ * XDR functions to encode/decode NFSv3 RPC arguments and results.
+ *
+ * Copyright (C) 1996, 1997 Olaf Kirch
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/kdev_t.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs_fs.h>
+
+#define NFSDBG_FACILITY		NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO		EIO
+
+extern int			nfs_stat_to_errno(int);
+
+/*
+ * Declare the space requirements for NFS arguments and replies as
+ * number of 32bit-words
+ */
+#define NFS3_fhandle_sz		(1+16)
+#define NFS3_fh_sz		(NFS3_fhandle_sz)	/* shorthand */
+#define NFS3_sattr_sz		(15)
+#define NFS3_filename_sz	(1+(NFS3_MAXNAMLEN>>2))
+#define NFS3_path_sz		(1+(NFS3_MAXPATHLEN>>2))
+#define NFS3_fattr_sz		(21)
+#define NFS3_wcc_attr_sz		(6)
+#define NFS3_pre_op_attr_sz	(1+NFS3_wcc_attr_sz)
+#define NFS3_post_op_attr_sz	(1+NFS3_fattr_sz)
+#define NFS3_wcc_data_sz		(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
+#define NFS3_fsstat_sz		
+#define NFS3_fsinfo_sz		
+#define NFS3_pathconf_sz		
+#define NFS3_entry_sz		(NFS3_filename_sz+3)
+
+#define NFS3_sattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_diropargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
+#define NFS3_accessargs_sz	(NFS3_fh_sz+1)
+#define NFS3_readlinkargs_sz	(NFS3_fh_sz)
+#define NFS3_readargs_sz	(NFS3_fh_sz+3)
+#define NFS3_writeargs_sz	(NFS3_fh_sz+5)
+#define NFS3_createargs_sz	(NFS3_diropargs_sz+NFS3_sattr_sz)
+#define NFS3_mkdirargs_sz	(NFS3_diropargs_sz+NFS3_sattr_sz)
+#define NFS3_symlinkargs_sz	(NFS3_diropargs_sz+NFS3_path_sz+NFS3_sattr_sz)
+#define NFS3_mknodargs_sz	(NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_renameargs_sz	(NFS3_diropargs_sz+NFS3_diropargs_sz)
+#define NFS3_linkargs_sz		(NFS3_fh_sz+NFS3_diropargs_sz)
+#define NFS3_readdirargs_sz	(NFS3_fh_sz+2)
+#define NFS3_commitargs_sz	(NFS3_fh_sz+3)
+
+#define NFS3_attrstat_sz	(1+NFS3_fattr_sz)
+#define NFS3_wccstat_sz		(1+NFS3_wcc_data_sz)
+#define NFS3_lookupres_sz	(1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
+#define NFS3_accessres_sz	(1+NFS3_post_op_attr_sz+1)
+#define NFS3_readlinkres_sz	(1+NFS3_post_op_attr_sz+1)
+#define NFS3_readres_sz		(1+NFS3_post_op_attr_sz+3)
+#define NFS3_writeres_sz	(1+NFS3_wcc_data_sz+4)
+#define NFS3_createres_sz	(1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_renameres_sz	(1+(2 * NFS3_wcc_data_sz))
+#define NFS3_linkres_sz		(1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_readdirres_sz	(1+NFS3_post_op_attr_sz+2)
+#define NFS3_fsstatres_sz	(1+NFS3_post_op_attr_sz+13)
+#define NFS3_fsinfores_sz	(1+NFS3_post_op_attr_sz+12)
+#define NFS3_pathconfres_sz	(1+NFS3_post_op_attr_sz+6)
+#define NFS3_commitres_sz	(1+NFS3_wcc_data_sz+2)
+
+/*
+ * Map file type to S_IFMT bits
+ */
+static struct {
+	unsigned int	mode;
+	unsigned int	nfs2type;
+} nfs_type2fmt[] = {
+      { 0,		NFNON	},
+      { S_IFREG,	NFREG	},
+      { S_IFDIR,	NFDIR	},
+      { S_IFBLK,	NFBLK	},
+      { S_IFCHR,	NFCHR	},
+      { S_IFLNK,	NFLNK	},
+      { S_IFSOCK,	NFSOCK	},
+      { S_IFIFO,	NFFIFO	},
+      { 0,		NFBAD	}
+};
+
+/*
+ * Common NFS XDR functions as inlines
+ */
+static inline u32 *
+xdr_encode_fhandle(u32 *p, struct nfs_fh *fh)
+{
+	return xdr_encode_array(p, fh->data, fh->size);
+}
+
+static inline u32 *
+xdr_decode_fhandle(u32 *p, struct nfs_fh *fh)
+{
+	if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) {
+		memcpy(fh->data, p, fh->size);
+		return p + XDR_QUADLEN(fh->size);
+	}
+	return NULL;
+}
+
+/*
+ * Encode/decode time.
+ */
+static inline u32 *
+xdr_encode_time3(u32 *p, struct timespec *timep)
+{
+	*p++ = htonl(timep->tv_sec);
+	*p++ = htonl(timep->tv_nsec);
+	return p;
+}
+
+static inline u32 *
+xdr_decode_time3(u32 *p, struct timespec *timep)
+{
+	timep->tv_sec = ntohl(*p++);
+	timep->tv_nsec = ntohl(*p++);
+	return p;
+}
+
+static u32 *
+xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
+{
+	unsigned int	type, major, minor;
+	int		fmode;
+
+	type = ntohl(*p++);
+	if (type >= NF3BAD)
+		type = NF3BAD;
+	fmode = nfs_type2fmt[type].mode;
+	fattr->type = nfs_type2fmt[type].nfs2type;
+	fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode;
+	fattr->nlink = ntohl(*p++);
+	fattr->uid = ntohl(*p++);
+	fattr->gid = ntohl(*p++);
+	p = xdr_decode_hyper(p, &fattr->size);
+	p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
+
+	/* Turn remote device info into Linux-specific dev_t */
+	major = ntohl(*p++);
+	minor = ntohl(*p++);
+	fattr->rdev = MKDEV(major, minor);
+	if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
+		fattr->rdev = 0;
+
+	p = xdr_decode_hyper(p, &fattr->fsid_u.nfs3);
+	p = xdr_decode_hyper(p, &fattr->fileid);
+	p = xdr_decode_time3(p, &fattr->atime);
+	p = xdr_decode_time3(p, &fattr->mtime);
+	p = xdr_decode_time3(p, &fattr->ctime);
+
+	/* Update the mode bits */
+	fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3);
+	fattr->timestamp = jiffies;
+	return p;
+}
+
+static inline u32 *
+xdr_encode_sattr(u32 *p, struct iattr *attr)
+{
+	if (attr->ia_valid & ATTR_MODE) {
+		*p++ = xdr_one;
+		*p++ = htonl(attr->ia_mode);
+	} else {
+		*p++ = xdr_zero;
+	}
+	if (attr->ia_valid & ATTR_UID) {
+		*p++ = xdr_one;
+		*p++ = htonl(attr->ia_uid);
+	} else {
+		*p++ = xdr_zero;
+	}
+	if (attr->ia_valid & ATTR_GID) {
+		*p++ = xdr_one;
+		*p++ = htonl(attr->ia_gid);
+	} else {
+		*p++ = xdr_zero;
+	}
+	if (attr->ia_valid & ATTR_SIZE) {
+		*p++ = xdr_one;
+		p = xdr_encode_hyper(p, (__u64) attr->ia_size);
+	} else {
+		*p++ = xdr_zero;
+	}
+	if (attr->ia_valid & ATTR_ATIME_SET) {
+		*p++ = xdr_two;
+		p = xdr_encode_time3(p, &attr->ia_atime);
+	} else if (attr->ia_valid & ATTR_ATIME) {
+		*p++ = xdr_one;
+	} else {
+		*p++ = xdr_zero;
+	}
+	if (attr->ia_valid & ATTR_MTIME_SET) {
+		*p++ = xdr_two;
+		p = xdr_encode_time3(p, &attr->ia_mtime);
+	} else if (attr->ia_valid & ATTR_MTIME) {
+		*p++ = xdr_one;
+	} else {
+		*p++ = xdr_zero;
+	}
+	return p;
+}
+
+static inline u32 *
+xdr_decode_wcc_attr(u32 *p, struct nfs_fattr *fattr)
+{
+	p = xdr_decode_hyper(p, &fattr->pre_size);
+	p = xdr_decode_time3(p, &fattr->pre_mtime);
+	p = xdr_decode_time3(p, &fattr->pre_ctime);
+	fattr->valid |= NFS_ATTR_WCC;
+	return p;
+}
+
+static inline u32 *
+xdr_decode_post_op_attr(u32 *p, struct nfs_fattr *fattr)
+{
+	if (*p++)
+		p = xdr_decode_fattr(p, fattr);
+	return p;
+}
+
+static inline u32 *
+xdr_decode_pre_op_attr(u32 *p, struct nfs_fattr *fattr)
+{
+	if (*p++)
+		return xdr_decode_wcc_attr(p, fattr);
+	return p;
+}
+
+
+static inline u32 *
+xdr_decode_wcc_data(u32 *p, struct nfs_fattr *fattr)
+{
+	p = xdr_decode_pre_op_attr(p, fattr);
+	return xdr_decode_post_op_attr(p, fattr);
+}
+
+/*
+ * NFS encode functions
+ */
+
+/*
+ * Encode file handle argument
+ */
+static int
+nfs3_xdr_fhandle(struct rpc_rqst *req, u32 *p, struct nfs_fh *fh)
+{
+	p = xdr_encode_fhandle(p, fh);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode SETATTR arguments
+ */
+static int
+nfs3_xdr_sattrargs(struct rpc_rqst *req, u32 *p, struct nfs3_sattrargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_sattr(p, args->sattr);
+	*p++ = htonl(args->guard);
+	if (args->guard)
+		p = xdr_encode_time3(p, &args->guardtime);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode directory ops argument
+ */
+static int
+nfs3_xdr_diropargs(struct rpc_rqst *req, u32 *p, struct nfs3_diropargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode access() argument
+ */
+static int
+nfs3_xdr_accessargs(struct rpc_rqst *req, u32 *p, struct nfs3_accessargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	*p++ = htonl(args->access);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Arguments to a READ call. Since we read data directly into the page
+ * cache, we also set up the reply iovec here so that iov[1] points
+ * exactly to the page we want to fetch.
+ */
+static int
+nfs3_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
+{
+	struct rpc_auth	*auth = req->rq_task->tk_auth;
+	unsigned int replen;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = htonl(count);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen,
+			 args->pages, args->pgbase, count);
+	return 0;
+}
+
+/*
+ * Write arguments. Splice the buffer to be written into the iovec.
+ */
+static int
+nfs3_xdr_writeargs(struct rpc_rqst *req, u32 *p, struct nfs_writeargs *args)
+{
+	struct xdr_buf *sndbuf = &req->rq_snd_buf;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = htonl(count);
+	*p++ = htonl(args->stable);
+	*p++ = htonl(count);
+	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
+
+	/* Copy the page array */
+	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
+	return 0;
+}
+
+/*
+ * Encode CREATE arguments
+ */
+static int
+nfs3_xdr_createargs(struct rpc_rqst *req, u32 *p, struct nfs3_createargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+
+	*p++ = htonl(args->createmode);
+	if (args->createmode == NFS3_CREATE_EXCLUSIVE) {
+		*p++ = args->verifier[0];
+		*p++ = args->verifier[1];
+	} else
+		p = xdr_encode_sattr(p, args->sattr);
+
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode MKDIR arguments
+ */
+static int
+nfs3_xdr_mkdirargs(struct rpc_rqst *req, u32 *p, struct nfs3_mkdirargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+	p = xdr_encode_sattr(p, args->sattr);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode SYMLINK arguments
+ */
+static int
+nfs3_xdr_symlinkargs(struct rpc_rqst *req, u32 *p, struct nfs3_symlinkargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_array(p, args->fromname, args->fromlen);
+	p = xdr_encode_sattr(p, args->sattr);
+	p = xdr_encode_array(p, args->topath, args->tolen);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode MKNOD arguments
+ */
+static int
+nfs3_xdr_mknodargs(struct rpc_rqst *req, u32 *p, struct nfs3_mknodargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_array(p, args->name, args->len);
+	*p++ = htonl(args->type);
+	p = xdr_encode_sattr(p, args->sattr);
+	if (args->type == NF3CHR || args->type == NF3BLK) {
+		*p++ = htonl(MAJOR(args->rdev));
+		*p++ = htonl(MINOR(args->rdev));
+	}
+
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode RENAME arguments
+ */
+static int
+nfs3_xdr_renameargs(struct rpc_rqst *req, u32 *p, struct nfs3_renameargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_array(p, args->fromname, args->fromlen);
+	p = xdr_encode_fhandle(p, args->tofh);
+	p = xdr_encode_array(p, args->toname, args->tolen);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode LINK arguments
+ */
+static int
+nfs3_xdr_linkargs(struct rpc_rqst *req, u32 *p, struct nfs3_linkargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fromfh);
+	p = xdr_encode_fhandle(p, args->tofh);
+	p = xdr_encode_array(p, args->toname, args->tolen);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * Encode arguments to readdir call
+ */
+static int
+nfs3_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs3_readdirargs *args)
+{
+	struct rpc_auth	*auth = req->rq_task->tk_auth;
+	unsigned int replen;
+	u32 count = args->count;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_hyper(p, args->cookie);
+	*p++ = args->verf[0];
+	*p++ = args->verf[1];
+	if (args->plus) {
+		/* readdirplus: need dircount + buffer size.
+		 * We just make sure we make dircount big enough */
+		*p++ = htonl(count >> 3);
+	}
+	*p++ = htonl(count);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
+	return 0;
+}
+
+/*
+ * Decode the result of a readdir call.
+ * We just check for syntactical correctness.
+ */
+static int
+nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
+{
+	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+	struct kvec *iov = rcvbuf->head;
+	struct page **page;
+	int hdrlen, recvd;
+	int status, nr;
+	unsigned int len, pglen;
+	u32 *entry, *end, *kaddr;
+
+	status = ntohl(*p++);
+	/* Decode post_op_attrs */
+	p = xdr_decode_post_op_attr(p, res->dir_attr);
+	if (status)
+		return -nfs_stat_to_errno(status);
+	/* Decode verifier cookie */
+	if (res->verf) {
+		res->verf[0] = *p++;
+		res->verf[1] = *p++;
+	} else {
+		p += 2;
+	}
+
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READDIR reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
+		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
+	}
+
+	pglen = rcvbuf->page_len;
+	recvd = rcvbuf->len - hdrlen;
+	if (pglen > recvd)
+		pglen = recvd;
+	page = rcvbuf->pages;
+	kaddr = p = (u32 *)kmap_atomic(*page, KM_USER0);
+	end = (u32 *)((char *)p + pglen);
+	entry = p;
+	for (nr = 0; *p++; nr++) {
+		if (p + 3 > end)
+			goto short_pkt;
+		p += 2;				/* inode # */
+		len = ntohl(*p++);		/* string length */
+		p += XDR_QUADLEN(len) + 2;	/* name + cookie */
+		if (len > NFS3_MAXNAMLEN) {
+			printk(KERN_WARNING "NFS: giant filename in readdir (len %x)!\n",
+						len);
+			goto err_unmap;
+		}
+
+		if (res->plus) {
+			/* post_op_attr */
+			if (p + 2 > end)
+				goto short_pkt;
+			if (*p++) {
+				p += 21;
+				if (p + 1 > end)
+					goto short_pkt;
+			}
+			/* post_op_fh3 */
+			if (*p++) {
+				if (p + 1 > end)
+					goto short_pkt;
+				len = ntohl(*p++);
+				if (len > NFS3_FHSIZE) {
+					printk(KERN_WARNING "NFS: giant filehandle in "
+						"readdir (len %x)!\n", len);
+					goto err_unmap;
+				}
+				p += XDR_QUADLEN(len);
+			}
+		}
+
+		if (p + 2 > end)
+			goto short_pkt;
+		entry = p;
+	}
+	if (!nr && (entry[0] != 0 || entry[1] == 0))
+		goto short_pkt;
+ out:
+	kunmap_atomic(kaddr, KM_USER0);
+	return nr;
+ short_pkt:
+	entry[0] = entry[1] = 0;
+	/* truncate listing ? */
+	if (!nr) {
+		printk(KERN_NOTICE "NFS: readdir reply truncated!\n");
+		entry[1] = 1;
+	}
+	goto out;
+err_unmap:
+	nr = -errno_NFSERR_IO;
+	goto out;
+}
+
+u32 *
+nfs3_decode_dirent(u32 *p, struct nfs_entry *entry, int plus)
+{
+	struct nfs_entry old = *entry;
+
+	if (!*p++) {
+		if (!*p)
+			return ERR_PTR(-EAGAIN);
+		entry->eof = 1;
+		return ERR_PTR(-EBADCOOKIE);
+	}
+
+	p = xdr_decode_hyper(p, &entry->ino);
+	entry->len  = ntohl(*p++);
+	entry->name = (const char *) p;
+	p += XDR_QUADLEN(entry->len);
+	entry->prev_cookie = entry->cookie;
+	p = xdr_decode_hyper(p, &entry->cookie);
+
+	if (plus) {
+		entry->fattr->valid = 0;
+		p = xdr_decode_post_op_attr(p, entry->fattr);
+		/* In fact, a post_op_fh3: */
+		if (*p++) {
+			p = xdr_decode_fhandle(p, entry->fh);
+			/* Ugh -- server reply was truncated */
+			if (p == NULL) {
+				dprintk("NFS: FH truncated\n");
+				*entry = old;
+				return ERR_PTR(-EAGAIN);
+			}
+		} else
+			memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
+	}
+
+	entry->eof = !p[0] && p[1];
+	return p;
+}
+
+/*
+ * Encode COMMIT arguments
+ */
+static int
+nfs3_xdr_commitargs(struct rpc_rqst *req, u32 *p, struct nfs_writeargs *args)
+{
+	p = xdr_encode_fhandle(p, args->fh);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = htonl(args->count);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	return 0;
+}
+
+/*
+ * NFS XDR decode functions
+ */
+
+/*
+ * Decode attrstat reply.
+ */
+static int
+nfs3_xdr_attrstat(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		return -nfs_stat_to_errno(status);
+	xdr_decode_fattr(p, fattr);
+	return 0;
+}
+
+/*
+ * Decode status+wcc_data reply
+ * SATTR, REMOVE, RMDIR
+ */
+static int
+nfs3_xdr_wccstat(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)))
+		status = -nfs_stat_to_errno(status);
+	xdr_decode_wcc_data(p, fattr);
+	return status;
+}
+
+/*
+ * Decode LOOKUP reply
+ */
+static int
+nfs3_xdr_lookupres(struct rpc_rqst *req, u32 *p, struct nfs3_diropres *res)
+{
+	int	status;
+
+	if ((status = ntohl(*p++))) {
+		status = -nfs_stat_to_errno(status);
+	} else {
+		if (!(p = xdr_decode_fhandle(p, res->fh)))
+			return -errno_NFSERR_IO;
+		p = xdr_decode_post_op_attr(p, res->fattr);
+	}
+	xdr_decode_post_op_attr(p, res->dir_attr);
+	return status;
+}
+
+/*
+ * Decode ACCESS reply
+ */
+static int
+nfs3_xdr_accessres(struct rpc_rqst *req, u32 *p, struct nfs3_accessres *res)
+{
+	int	status = ntohl(*p++);
+
+	p = xdr_decode_post_op_attr(p, res->fattr);
+	if (status)
+		return -nfs_stat_to_errno(status);
+	res->access = ntohl(*p++);
+	return 0;
+}
+
+static int
+nfs3_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs3_readlinkargs *args)
+{
+	struct rpc_auth *auth = req->rq_task->tk_auth;
+	unsigned int replen;
+
+	p = xdr_encode_fhandle(p, args->fh);
+	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+
+	/* Inline the page array */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
+	return 0;
+}
+
+/*
+ * Decode READLINK reply
+ */
+static int
+nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
+{
+	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+	struct kvec *iov = rcvbuf->head;
+	int hdrlen, len, recvd;
+	char	*kaddr;
+	int	status;
+
+	status = ntohl(*p++);
+	p = xdr_decode_post_op_attr(p, fattr);
+
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	/* Convert length of symlink */
+	len = ntohl(*p++);
+	if (len >= rcvbuf->page_len || len <= 0) {
+		dprintk(KERN_WARNING "nfs: server returned giant symlink!\n");
+		return -ENAMETOOLONG;
+	}
+
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READLINK reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
+		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
+	}
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (recvd < len) {
+		printk(KERN_WARNING "NFS: server cheating in readlink reply: "
+				"count %u > recvd %u\n", len, recvd);
+		return -EIO;
+	}
+
+	/* NULL terminate the string we got */
+	kaddr = (char*)kmap_atomic(rcvbuf->pages[0], KM_USER0);
+	kaddr[len+rcvbuf->page_base] = '\0';
+	kunmap_atomic(kaddr, KM_USER0);
+	return 0;
+}
+
+/*
+ * Decode READ reply
+ */
+static int
+nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
+{
+	struct kvec *iov = req->rq_rcv_buf.head;
+	int	status, count, ocount, recvd, hdrlen;
+
+	status = ntohl(*p++);
+	p = xdr_decode_post_op_attr(p, res->fattr);
+
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	/* Decode reply could and EOF flag. NFSv3 is somewhat redundant
+	 * in that it puts the count both in the res struct and in the
+	 * opaque data count. */
+	count    = ntohl(*p++);
+	res->eof = ntohl(*p++);
+	ocount   = ntohl(*p++);
+
+	if (ocount != count) {
+		printk(KERN_WARNING "NFS: READ count doesn't match RPC opaque count.\n");
+		return -errno_NFSERR_IO;
+	}
+
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	if (iov->iov_len < hdrlen) {
+		printk(KERN_WARNING "NFS: READ reply header overflowed:"
+				"length %d > %Zu\n", hdrlen, iov->iov_len);
+       		return -errno_NFSERR_IO;
+	} else if (iov->iov_len != hdrlen) {
+		dprintk("NFS: READ header is short. iovec will be shifted.\n");
+		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
+	}
+
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (count > recvd) {
+		printk(KERN_WARNING "NFS: server cheating in read reply: "
+			"count %d > recvd %d\n", count, recvd);
+		count = recvd;
+		res->eof = 0;
+	}
+
+	if (count < res->count)
+		res->count = count;
+
+	return count;
+}
+
+/*
+ * Decode WRITE response
+ */
+static int
+nfs3_xdr_writeres(struct rpc_rqst *req, u32 *p, struct nfs_writeres *res)
+{
+	int	status;
+
+	status = ntohl(*p++);
+	p = xdr_decode_wcc_data(p, res->fattr);
+
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	res->count = ntohl(*p++);
+	res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
+	res->verf->verifier[0] = *p++;
+	res->verf->verifier[1] = *p++;
+
+	return res->count;
+}
+
+/*
+ * Decode a CREATE response
+ */
+static int
+nfs3_xdr_createres(struct rpc_rqst *req, u32 *p, struct nfs3_diropres *res)
+{
+	int	status;
+
+	status = ntohl(*p++);
+	if (status == 0) {
+		if (*p++) {
+			if (!(p = xdr_decode_fhandle(p, res->fh)))
+				return -errno_NFSERR_IO;
+			p = xdr_decode_post_op_attr(p, res->fattr);
+		} else {
+			memset(res->fh, 0, sizeof(*res->fh));
+			/* Do decode post_op_attr but set it to NULL */
+			p = xdr_decode_post_op_attr(p, res->fattr);
+			res->fattr->valid = 0;
+		}
+	} else {
+		status = -nfs_stat_to_errno(status);
+	}
+	p = xdr_decode_wcc_data(p, res->dir_attr);
+	return status;
+}
+
+/*
+ * Decode RENAME reply
+ */
+static int
+nfs3_xdr_renameres(struct rpc_rqst *req, u32 *p, struct nfs3_renameres *res)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)) != 0)
+		status = -nfs_stat_to_errno(status);
+	p = xdr_decode_wcc_data(p, res->fromattr);
+	p = xdr_decode_wcc_data(p, res->toattr);
+	return status;
+}
+
+/*
+ * Decode LINK reply
+ */
+static int
+nfs3_xdr_linkres(struct rpc_rqst *req, u32 *p, struct nfs3_linkres *res)
+{
+	int	status;
+
+	if ((status = ntohl(*p++)) != 0)
+		status = -nfs_stat_to_errno(status);
+	p = xdr_decode_post_op_attr(p, res->fattr);
+	p = xdr_decode_wcc_data(p, res->dir_attr);
+	return status;
+}
+
+/*
+ * Decode FSSTAT reply
+ */
+static int
+nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsstat *res)
+{
+	int		status;
+
+	status = ntohl(*p++);
+
+	p = xdr_decode_post_op_attr(p, res->fattr);
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	p = xdr_decode_hyper(p, &res->tbytes);
+	p = xdr_decode_hyper(p, &res->fbytes);
+	p = xdr_decode_hyper(p, &res->abytes);
+	p = xdr_decode_hyper(p, &res->tfiles);
+	p = xdr_decode_hyper(p, &res->ffiles);
+	p = xdr_decode_hyper(p, &res->afiles);
+
+	/* ignore invarsec */
+	return 0;
+}
+
+/*
+ * Decode FSINFO reply
+ */
+static int
+nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
+{
+	int		status;
+
+	status = ntohl(*p++);
+
+	p = xdr_decode_post_op_attr(p, res->fattr);
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	res->rtmax  = ntohl(*p++);
+	res->rtpref = ntohl(*p++);
+	res->rtmult = ntohl(*p++);
+	res->wtmax  = ntohl(*p++);
+	res->wtpref = ntohl(*p++);
+	res->wtmult = ntohl(*p++);
+	res->dtpref = ntohl(*p++);
+	p = xdr_decode_hyper(p, &res->maxfilesize);
+
+	/* ignore time_delta and properties */
+	res->lease_time = 0;
+	return 0;
+}
+
+/*
+ * Decode PATHCONF reply
+ */
+static int
+nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_pathconf *res)
+{
+	int		status;
+
+	status = ntohl(*p++);
+
+	p = xdr_decode_post_op_attr(p, res->fattr);
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+	res->max_link = ntohl(*p++);
+	res->max_namelen = ntohl(*p++);
+
+	/* ignore remaining fields */
+	return 0;
+}
+
+/*
+ * Decode COMMIT reply
+ */
+static int
+nfs3_xdr_commitres(struct rpc_rqst *req, u32 *p, struct nfs_writeres *res)
+{
+	int		status;
+
+	status = ntohl(*p++);
+	p = xdr_decode_wcc_data(p, res->fattr);
+	if (status != 0)
+		return -nfs_stat_to_errno(status);
+
+	res->verf->verifier[0] = *p++;
+	res->verf->verifier[1] = *p++;
+	return 0;
+}
+
+#ifndef MAX
+# define MAX(a, b)	(((a) > (b))? (a) : (b))
+#endif
+
+#define PROC(proc, argtype, restype, timer)				\
+[NFS3PROC_##proc] = {							\
+	.p_proc      = NFS3PROC_##proc,					\
+	.p_encode    = (kxdrproc_t) nfs3_xdr_##argtype,			\
+	.p_decode    = (kxdrproc_t) nfs3_xdr_##restype,			\
+	.p_bufsiz    = MAX(NFS3_##argtype##_sz,NFS3_##restype##_sz) << 2,	\
+	.p_timer     = timer						\
+	}
+
+struct rpc_procinfo	nfs3_procedures[] = {
+  PROC(GETATTR,		fhandle,	attrstat, 1),
+  PROC(SETATTR, 	sattrargs,	wccstat, 0),
+  PROC(LOOKUP,		diropargs,	lookupres, 2),
+  PROC(ACCESS,		accessargs,	accessres, 1),
+  PROC(READLINK,	readlinkargs,	readlinkres, 3),
+  PROC(READ,		readargs,	readres, 3),
+  PROC(WRITE,		writeargs,	writeres, 4),
+  PROC(CREATE,		createargs,	createres, 0),
+  PROC(MKDIR,		mkdirargs,	createres, 0),
+  PROC(SYMLINK,		symlinkargs,	createres, 0),
+  PROC(MKNOD,		mknodargs,	createres, 0),
+  PROC(REMOVE,		diropargs,	wccstat, 0),
+  PROC(RMDIR,		diropargs,	wccstat, 0),
+  PROC(RENAME,		renameargs,	renameres, 0),
+  PROC(LINK,		linkargs,	linkres, 0),
+  PROC(READDIR,		readdirargs,	readdirres, 3),
+  PROC(READDIRPLUS,	readdirargs,	readdirres, 3),
+  PROC(FSSTAT,		fhandle,	fsstatres, 0),
+  PROC(FSINFO,  	fhandle,	fsinfores, 0),
+  PROC(PATHCONF,	fhandle,	pathconfres, 0),
+  PROC(COMMIT,		commitargs,	commitres, 5),
+};
+
+struct rpc_version		nfs_version3 = {
+	.number			= 3,
+	.nrprocs		= sizeof(nfs3_procedures)/sizeof(nfs3_procedures[0]),
+	.procs			= nfs3_procedures
+};
+
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
new file mode 100644
index 0000000..1d5cb3e
--- /dev/null
+++ b/fs/nfs/nfs4proc.c
@@ -0,0 +1,2786 @@
+/*
+ *  fs/nfs/nfs4proc.c
+ *
+ *  Client-side procedure declarations for NFSv4.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *  Andy Adamson   <andros@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/mm.h>
+#include <linux/utsname.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/smp_lock.h>
+#include <linux/namei.h>
+
+#include "delegation.h"
+
+#define NFSDBG_FACILITY		NFSDBG_PROC
+
+#define NFS4_POLL_RETRY_MIN	(1*HZ)
+#define NFS4_POLL_RETRY_MAX	(15*HZ)
+
+static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *);
+static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
+static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
+extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
+extern struct rpc_procinfo nfs4_procedures[];
+
+extern nfs4_stateid zero_stateid;
+
+/* Prevent leaks of NFSv4 errors into userland */
+int nfs4_map_errors(int err)
+{
+	if (err < -1000) {
+		dprintk("%s could not handle NFSv4 error %d\n",
+				__FUNCTION__, -err);
+		return -EIO;
+	}
+	return err;
+}
+
+/*
+ * This is our standard bitmap for GETATTR requests.
+ */
+const u32 nfs4_fattr_bitmap[2] = {
+	FATTR4_WORD0_TYPE
+	| FATTR4_WORD0_CHANGE
+	| FATTR4_WORD0_SIZE
+	| FATTR4_WORD0_FSID
+	| FATTR4_WORD0_FILEID,
+	FATTR4_WORD1_MODE
+	| FATTR4_WORD1_NUMLINKS
+	| FATTR4_WORD1_OWNER
+	| FATTR4_WORD1_OWNER_GROUP
+	| FATTR4_WORD1_RAWDEV
+	| FATTR4_WORD1_SPACE_USED
+	| FATTR4_WORD1_TIME_ACCESS
+	| FATTR4_WORD1_TIME_METADATA
+	| FATTR4_WORD1_TIME_MODIFY
+};
+
+const u32 nfs4_statfs_bitmap[2] = {
+	FATTR4_WORD0_FILES_AVAIL
+	| FATTR4_WORD0_FILES_FREE
+	| FATTR4_WORD0_FILES_TOTAL,
+	FATTR4_WORD1_SPACE_AVAIL
+	| FATTR4_WORD1_SPACE_FREE
+	| FATTR4_WORD1_SPACE_TOTAL
+};
+
+u32 nfs4_pathconf_bitmap[2] = {
+	FATTR4_WORD0_MAXLINK
+	| FATTR4_WORD0_MAXNAME,
+	0
+};
+
+const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
+			| FATTR4_WORD0_MAXREAD
+			| FATTR4_WORD0_MAXWRITE
+			| FATTR4_WORD0_LEASE_TIME,
+			0
+};
+
+static void nfs4_setup_readdir(u64 cookie, u32 *verifier, struct dentry *dentry,
+		struct nfs4_readdir_arg *readdir)
+{
+	u32 *start, *p;
+
+	BUG_ON(readdir->count < 80);
+	if (cookie > 2) {
+		readdir->cookie = (cookie > 2) ? cookie : 0;
+		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
+		return;
+	}
+
+	readdir->cookie = 0;
+	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
+	if (cookie == 2)
+		return;
+	
+	/*
+	 * NFSv4 servers do not return entries for '.' and '..'
+	 * Therefore, we fake these entries here.  We let '.'
+	 * have cookie 0 and '..' have cookie 1.  Note that
+	 * when talking to the server, we always send cookie 0
+	 * instead of 1 or 2.
+	 */
+	start = p = (u32 *)kmap_atomic(*readdir->pages, KM_USER0);
+	
+	if (cookie == 0) {
+		*p++ = xdr_one;                                  /* next */
+		*p++ = xdr_zero;                   /* cookie, first word */
+		*p++ = xdr_one;                   /* cookie, second word */
+		*p++ = xdr_one;                             /* entry len */
+		memcpy(p, ".\0\0\0", 4);                        /* entry */
+		p++;
+		*p++ = xdr_one;                         /* bitmap length */
+		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
+		*p++ = htonl(8);              /* attribute buffer length */
+		p = xdr_encode_hyper(p, dentry->d_inode->i_ino);
+	}
+	
+	*p++ = xdr_one;                                  /* next */
+	*p++ = xdr_zero;                   /* cookie, first word */
+	*p++ = xdr_two;                   /* cookie, second word */
+	*p++ = xdr_two;                             /* entry len */
+	memcpy(p, "..\0\0", 4);                         /* entry */
+	p++;
+	*p++ = xdr_one;                         /* bitmap length */
+	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
+	*p++ = htonl(8);              /* attribute buffer length */
+	p = xdr_encode_hyper(p, dentry->d_parent->d_inode->i_ino);
+
+	readdir->pgbase = (char *)p - (char *)start;
+	readdir->count -= readdir->pgbase;
+	kunmap_atomic(start, KM_USER0);
+}
+
+static void
+renew_lease(struct nfs_server *server, unsigned long timestamp)
+{
+	struct nfs4_client *clp = server->nfs4_state;
+	spin_lock(&clp->cl_lock);
+	if (time_before(clp->cl_last_renewal,timestamp))
+		clp->cl_last_renewal = timestamp;
+	spin_unlock(&clp->cl_lock);
+}
+
+static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinfo)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	if (cinfo->before == nfsi->change_attr && cinfo->atomic)
+		nfsi->change_attr = cinfo->after;
+}
+
+static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
+{
+	struct inode *inode = state->inode;
+
+	open_flags &= (FMODE_READ|FMODE_WRITE);
+	/* Protect against nfs4_find_state() */
+	spin_lock(&inode->i_lock);
+	state->state |= open_flags;
+	/* NB! List reordering - see the reclaim code for why.  */
+	if ((open_flags & FMODE_WRITE) && 0 == state->nwriters++)
+		list_move(&state->open_states, &state->owner->so_states);
+	if (open_flags & FMODE_READ)
+		state->nreaders++;
+	memcpy(&state->stateid, stateid, sizeof(state->stateid));
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * OPEN_RECLAIM:
+ * 	reclaim state on the server after a reboot.
+ * 	Assumes caller is holding the sp->so_sem
+ */
+static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
+	struct nfs_openargs o_arg = {
+		.fh = NFS_FH(inode),
+		.seqid = sp->so_seqid,
+		.id = sp->so_id,
+		.open_flags = state->state,
+		.clientid = server->nfs4_state->cl_clientid,
+		.claim = NFS4_OPEN_CLAIM_PREVIOUS,
+		.bitmask = server->attr_bitmask,
+	};
+	struct nfs_openres o_res = {
+		.server = server,	/* Grrr */
+	};
+	struct rpc_message msg = {
+		.rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
+		.rpc_argp       = &o_arg,
+		.rpc_resp	= &o_res,
+		.rpc_cred	= sp->so_cred,
+	};
+	int status;
+
+	if (delegation != NULL) {
+		if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) {
+			memcpy(&state->stateid, &delegation->stateid,
+					sizeof(state->stateid));
+			set_bit(NFS_DELEGATED_STATE, &state->flags);
+			return 0;
+		}
+		o_arg.u.delegation_type = delegation->type;
+	}
+	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+	nfs4_increment_seqid(status, sp);
+	if (status == 0) {
+		memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
+		if (o_res.delegation_type != 0) {
+			nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
+			/* Did the server issue an immediate delegation recall? */
+			if (o_res.do_recall)
+				nfs_async_inode_return_delegation(inode, &o_res.stateid);
+		}
+	}
+	clear_bit(NFS_DELEGATED_STATE, &state->flags);
+	/* Ensure we update the inode attributes */
+	NFS_CACHEINV(inode);
+	return status;
+}
+
+static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+	struct nfs_server *server = NFS_SERVER(state->inode);
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = _nfs4_open_reclaim(sp, state);
+		switch (err) {
+			case 0:
+			case -NFS4ERR_STALE_CLIENTID:
+			case -NFS4ERR_STALE_STATEID:
+			case -NFS4ERR_EXPIRED:
+				return err;
+		}
+		err = nfs4_handle_exception(server, err, &exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
+{
+	struct nfs4_state_owner  *sp  = state->owner;
+	struct inode *inode = dentry->d_inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct dentry *parent = dget_parent(dentry);
+	struct nfs_openargs arg = {
+		.fh = NFS_FH(parent->d_inode),
+		.clientid = server->nfs4_state->cl_clientid,
+		.name = &dentry->d_name,
+		.id = sp->so_id,
+		.server = server,
+		.bitmask = server->attr_bitmask,
+		.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR,
+	};
+	struct nfs_openres res = {
+		.server = server,
+	};
+	struct 	rpc_message msg = {
+		.rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
+		.rpc_argp       = &arg,
+		.rpc_resp       = &res,
+		.rpc_cred	= sp->so_cred,
+	};
+	int status = 0;
+
+	down(&sp->so_sema);
+	if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+		goto out;
+	if (state->state == 0)
+		goto out;
+	arg.seqid = sp->so_seqid;
+	arg.open_flags = state->state;
+	memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
+	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+	nfs4_increment_seqid(status, sp);
+	if (status >= 0) {
+		memcpy(state->stateid.data, res.stateid.data,
+				sizeof(state->stateid.data));
+		clear_bit(NFS_DELEGATED_STATE, &state->flags);
+	}
+out:
+	up(&sp->so_sema);
+	dput(parent);
+	return status;
+}
+
+int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
+{
+	struct nfs4_exception exception = { };
+	struct nfs_server *server = NFS_SERVER(dentry->d_inode);
+	int err;
+	do {
+		err = _nfs4_open_delegation_recall(dentry, state);
+		switch (err) {
+			case 0:
+				return err;
+			case -NFS4ERR_STALE_CLIENTID:
+			case -NFS4ERR_STALE_STATEID:
+			case -NFS4ERR_EXPIRED:
+				/* Don't recall a delegation if it was lost */
+				nfs4_schedule_state_recovery(server->nfs4_state);
+				return err;
+		}
+		err = nfs4_handle_exception(server, err, &exception);
+	} while (exception.retry);
+	return err;
+}
+
+static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid)
+{
+	struct nfs_open_confirmargs arg = {
+		.fh             = fh,
+		.seqid          = sp->so_seqid,
+		.stateid	= *stateid,
+	};
+	struct nfs_open_confirmres res;
+	struct 	rpc_message msg = {
+		.rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
+		.rpc_argp       = &arg,
+		.rpc_resp       = &res,
+		.rpc_cred	= sp->so_cred,
+	};
+	int status;
+
+	status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR);
+	nfs4_increment_seqid(status, sp);
+	if (status >= 0)
+		memcpy(stateid, &res.stateid, sizeof(*stateid));
+	return status;
+}
+
+static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner  *sp, struct nfs_openargs *o_arg, struct nfs_openres *o_res)
+{
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
+		.rpc_argp = o_arg,
+		.rpc_resp = o_res,
+		.rpc_cred = sp->so_cred,
+	};
+	int status;
+
+	/* Update sequence id. The caller must serialize! */
+	o_arg->seqid = sp->so_seqid;
+	o_arg->id = sp->so_id;
+	o_arg->clientid = sp->so_client->cl_clientid;
+
+	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+	nfs4_increment_seqid(status, sp);
+	if (status != 0)
+		goto out;
+	update_changeattr(dir, &o_res->cinfo);
+	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
+		status = _nfs4_proc_open_confirm(server->client, &o_res->fh,
+				sp, &o_res->stateid);
+		if (status != 0)
+			goto out;
+	}
+	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
+		status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
+out:
+	return status;
+}
+
+static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openflags)
+{
+	struct nfs_access_entry cache;
+	int mask = 0;
+	int status;
+
+	if (openflags & FMODE_READ)
+		mask |= MAY_READ;
+	if (openflags & FMODE_WRITE)
+		mask |= MAY_WRITE;
+	status = nfs_access_get_cached(inode, cred, &cache);
+	if (status == 0)
+		goto out;
+
+	/* Be clever: ask server to check for all possible rights */
+	cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
+	cache.cred = cred;
+	cache.jiffies = jiffies;
+	status = _nfs4_proc_access(inode, &cache);
+	if (status != 0)
+		return status;
+	nfs_access_add_cache(inode, &cache);
+out:
+	if ((cache.mask & mask) == mask)
+		return 0;
+	return -EACCES;
+}
+
+/*
+ * OPEN_EXPIRED:
+ * 	reclaim state on the server after a network partition.
+ * 	Assumes caller holds the appropriate lock
+ */
+static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
+{
+	struct dentry *parent = dget_parent(dentry);
+	struct inode *dir = parent->d_inode;
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
+	struct nfs_fattr        f_attr = {
+		.valid = 0,
+	};
+	struct nfs_openargs o_arg = {
+		.fh = NFS_FH(dir),
+		.open_flags = state->state,
+		.name = &dentry->d_name,
+		.bitmask = server->attr_bitmask,
+		.claim = NFS4_OPEN_CLAIM_NULL,
+	};
+	struct nfs_openres o_res = {
+		.f_attr = &f_attr,
+		.server = server,
+	};
+	int status = 0;
+
+	if (delegation != NULL && !(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) {
+		status = _nfs4_do_access(inode, sp->so_cred, state->state);
+		if (status < 0)
+			goto out;
+		memcpy(&state->stateid, &delegation->stateid, sizeof(state->stateid));
+		set_bit(NFS_DELEGATED_STATE, &state->flags);
+		goto out;
+	}
+	status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
+	if (status != 0)
+		goto out_nodeleg;
+	/* Check if files differ */
+	if ((f_attr.mode & S_IFMT) != (inode->i_mode & S_IFMT))
+		goto out_stale;
+	/* Has the file handle changed? */
+	if (nfs_compare_fh(&o_res.fh, NFS_FH(inode)) != 0) {
+		/* Verify if the change attributes are the same */
+		if (f_attr.change_attr != NFS_I(inode)->change_attr)
+			goto out_stale;
+		if (nfs_size_to_loff_t(f_attr.size) != inode->i_size)
+			goto out_stale;
+		/* Lets just pretend that this is the same file */
+		nfs_copy_fh(NFS_FH(inode), &o_res.fh);
+		NFS_I(inode)->fileid = f_attr.fileid;
+	}
+	memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
+	if (o_res.delegation_type != 0) {
+		if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM))
+			nfs_inode_set_delegation(inode, sp->so_cred, &o_res);
+		else
+			nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
+	}
+out_nodeleg:
+	clear_bit(NFS_DELEGATED_STATE, &state->flags);
+out:
+	dput(parent);
+	return status;
+out_stale:
+	status = -ESTALE;
+	/* Invalidate the state owner so we don't ever use it again */
+	nfs4_drop_state_owner(sp);
+	d_drop(dentry);
+	/* Should we be trying to close that stateid? */
+	goto out_nodeleg;
+}
+
+static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+	struct nfs_inode *nfsi = NFS_I(state->inode);
+	struct nfs_open_context *ctx;
+	int status;
+
+	spin_lock(&state->inode->i_lock);
+	list_for_each_entry(ctx, &nfsi->open_files, list) {
+		if (ctx->state != state)
+			continue;
+		get_nfs_open_context(ctx);
+		spin_unlock(&state->inode->i_lock);
+		status = _nfs4_open_expired(sp, state, ctx->dentry);
+		put_nfs_open_context(ctx);
+		return status;
+	}
+	spin_unlock(&state->inode->i_lock);
+	return -ENOENT;
+}
+
+/*
+ * Returns an nfs4_state + an extra reference to the inode
+ */
+static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res)
+{
+	struct nfs_delegation *delegation;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_client *clp = server->nfs4_state;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs4_state_owner *sp = NULL;
+	struct nfs4_state *state = NULL;
+	int open_flags = flags & (FMODE_READ|FMODE_WRITE);
+	int err;
+
+	/* Protect against reboot recovery - NOTE ORDER! */
+	down_read(&clp->cl_sem);
+	/* Protect against delegation recall */
+	down_read(&nfsi->rwsem);
+	delegation = NFS_I(inode)->delegation;
+	err = -ENOENT;
+	if (delegation == NULL || (delegation->type & open_flags) != open_flags)
+		goto out_err;
+	err = -ENOMEM;
+	if (!(sp = nfs4_get_state_owner(server, cred))) {
+		dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
+		goto out_err;
+	}
+	down(&sp->so_sema);
+	state = nfs4_get_open_state(inode, sp);
+	if (state == NULL)
+		goto out_err;
+
+	err = -ENOENT;
+	if ((state->state & open_flags) == open_flags) {
+		spin_lock(&inode->i_lock);
+		if (open_flags & FMODE_READ)
+			state->nreaders++;
+		if (open_flags & FMODE_WRITE)
+			state->nwriters++;
+		spin_unlock(&inode->i_lock);
+		goto out_ok;
+	} else if (state->state != 0)
+		goto out_err;
+
+	lock_kernel();
+	err = _nfs4_do_access(inode, cred, open_flags);
+	unlock_kernel();
+	if (err != 0)
+		goto out_err;
+	set_bit(NFS_DELEGATED_STATE, &state->flags);
+	update_open_stateid(state, &delegation->stateid, open_flags);
+out_ok:
+	up(&sp->so_sema);
+	nfs4_put_state_owner(sp);
+	up_read(&nfsi->rwsem);
+	up_read(&clp->cl_sem);
+	igrab(inode);
+	*res = state;
+	return 0; 
+out_err:
+	if (sp != NULL) {
+		if (state != NULL)
+			nfs4_put_open_state(state);
+		up(&sp->so_sema);
+		nfs4_put_state_owner(sp);
+	}
+	up_read(&nfsi->rwsem);
+	up_read(&clp->cl_sem);
+	return err;
+}
+
+static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred)
+{
+	struct nfs4_exception exception = { };
+	struct nfs4_state *res;
+	int err;
+
+	do {
+		err = _nfs4_open_delegated(inode, flags, cred, &res);
+		if (err == 0)
+			break;
+		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(inode),
+					err, &exception));
+	} while (exception.retry);
+	return res;
+}
+
+/*
+ * Returns an nfs4_state + an referenced inode
+ */
+static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+{
+	struct nfs4_state_owner  *sp;
+	struct nfs4_state     *state = NULL;
+	struct nfs_server       *server = NFS_SERVER(dir);
+	struct nfs4_client *clp = server->nfs4_state;
+	struct inode *inode = NULL;
+	int                     status;
+	struct nfs_fattr        f_attr = {
+		.valid          = 0,
+	};
+	struct nfs_openargs o_arg = {
+		.fh             = NFS_FH(dir),
+		.open_flags	= flags,
+		.name           = &dentry->d_name,
+		.server         = server,
+		.bitmask = server->attr_bitmask,
+		.claim = NFS4_OPEN_CLAIM_NULL,
+	};
+	struct nfs_openres o_res = {
+		.f_attr         = &f_attr,
+		.server         = server,
+	};
+
+	/* Protect against reboot recovery conflicts */
+	down_read(&clp->cl_sem);
+	status = -ENOMEM;
+	if (!(sp = nfs4_get_state_owner(server, cred))) {
+		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
+		goto out_err;
+	}
+	if (flags & O_EXCL) {
+		u32 *p = (u32 *) o_arg.u.verifier.data;
+		p[0] = jiffies;
+		p[1] = current->pid;
+	} else
+		o_arg.u.attrs = sattr;
+	/* Serialization for the sequence id */
+	down(&sp->so_sema);
+
+	status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
+	if (status != 0)
+		goto out_err;
+
+	status = -ENOMEM;
+	inode = nfs_fhget(dir->i_sb, &o_res.fh, &f_attr);
+	if (!inode)
+		goto out_err;
+	state = nfs4_get_open_state(inode, sp);
+	if (!state)
+		goto out_err;
+	update_open_stateid(state, &o_res.stateid, flags);
+	if (o_res.delegation_type != 0)
+		nfs_inode_set_delegation(inode, cred, &o_res);
+	up(&sp->so_sema);
+	nfs4_put_state_owner(sp);
+	up_read(&clp->cl_sem);
+	*res = state;
+	return 0;
+out_err:
+	if (sp != NULL) {
+		if (state != NULL)
+			nfs4_put_open_state(state);
+		up(&sp->so_sema);
+		nfs4_put_state_owner(sp);
+	}
+	/* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
+	up_read(&clp->cl_sem);
+	if (inode != NULL)
+		iput(inode);
+	*res = NULL;
+	return status;
+}
+
+
+static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred)
+{
+	struct nfs4_exception exception = { };
+	struct nfs4_state *res;
+	int status;
+
+	do {
+		status = _nfs4_do_open(dir, dentry, flags, sattr, cred, &res);
+		if (status == 0)
+			break;
+		/* NOTE: BAD_SEQID means the server and client disagree about the
+		 * book-keeping w.r.t. state-changing operations
+		 * (OPEN/CLOSE/LOCK/LOCKU...)
+		 * It is actually a sign of a bug on the client or on the server.
+		 *
+		 * If we receive a BAD_SEQID error in the particular case of
+		 * doing an OPEN, we assume that nfs4_increment_seqid() will
+		 * have unhashed the old state_owner for us, and that we can
+		 * therefore safely retry using a new one. We should still warn
+		 * the user though...
+		 */
+		if (status == -NFS4ERR_BAD_SEQID) {
+			printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n");
+			exception.retry = 1;
+			continue;
+		}
+		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
+					status, &exception));
+	} while (exception.retry);
+	return res;
+}
+
+static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
+                struct nfs_fh *fhandle, struct iattr *sattr,
+                struct nfs4_state *state)
+{
+        struct nfs_setattrargs  arg = {
+                .fh             = fhandle,
+                .iap            = sattr,
+		.server		= server,
+		.bitmask = server->attr_bitmask,
+        };
+        struct nfs_setattrres  res = {
+		.fattr		= fattr,
+		.server		= server,
+        };
+        struct rpc_message msg = {
+                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
+                .rpc_argp       = &arg,
+                .rpc_resp       = &res,
+        };
+
+        fattr->valid = 0;
+
+	if (state != NULL)
+		msg.rpc_cred = state->owner->so_cred;
+	if (sattr->ia_valid & ATTR_SIZE)
+		nfs4_copy_stateid(&arg.stateid, state, NULL);
+	else
+		memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
+
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
+                struct nfs_fh *fhandle, struct iattr *sattr,
+                struct nfs4_state *state)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_do_setattr(server, fattr, fhandle, sattr,
+					state),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+struct nfs4_closedata {
+	struct inode *inode;
+	struct nfs4_state *state;
+	struct nfs_closeargs arg;
+	struct nfs_closeres res;
+};
+
+static void nfs4_close_done(struct rpc_task *task)
+{
+	struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+	struct nfs4_state *state = calldata->state;
+	struct nfs4_state_owner *sp = state->owner;
+	struct nfs_server *server = NFS_SERVER(calldata->inode);
+
+        /* hmm. we are done with the inode, and in the process of freeing
+	 * the state_owner. we keep this around to process errors
+	 */
+	nfs4_increment_seqid(task->tk_status, sp);
+	switch (task->tk_status) {
+		case 0:
+			memcpy(&state->stateid, &calldata->res.stateid,
+					sizeof(state->stateid));
+			break;
+		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_EXPIRED:
+			state->state = calldata->arg.open_flags;
+			nfs4_schedule_state_recovery(server->nfs4_state);
+			break;
+		default:
+			if (nfs4_async_handle_error(task, server) == -EAGAIN) {
+				rpc_restart_call(task);
+				return;
+			}
+	}
+	state->state = calldata->arg.open_flags;
+	nfs4_put_open_state(state);
+	up(&sp->so_sema);
+	nfs4_put_state_owner(sp);
+	up_read(&server->nfs4_state->cl_sem);
+	kfree(calldata);
+}
+
+static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata)
+{
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
+		.rpc_argp = &calldata->arg,
+		.rpc_resp = &calldata->res,
+		.rpc_cred = calldata->state->owner->so_cred,
+	};
+	if (calldata->arg.open_flags != 0)
+		msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+	return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata);
+}
+
+/* 
+ * It is possible for data to be read/written from a mem-mapped file 
+ * after the sys_close call (which hits the vfs layer as a flush).
+ * This means that we can't safely call nfsv4 close on a file until 
+ * the inode is cleared. This in turn means that we are not good
+ * NFSv4 citizens - we do not indicate to the server to update the file's 
+ * share state even when we are done with one of the three share 
+ * stateid's in the inode.
+ *
+ * NOTE: Caller must be holding the sp->so_owner semaphore!
+ */
+int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode) 
+{
+	struct nfs4_closedata *calldata;
+	int status;
+
+	/* Tell caller we're done */
+	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+		state->state = mode;
+		return 0;
+	}
+	calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL);
+	if (calldata == NULL)
+		return -ENOMEM;
+	calldata->inode = inode;
+	calldata->state = state;
+	calldata->arg.fh = NFS_FH(inode);
+	/* Serialization for the sequence id */
+	calldata->arg.seqid = state->owner->so_seqid;
+	calldata->arg.open_flags = mode;
+	memcpy(&calldata->arg.stateid, &state->stateid,
+			sizeof(calldata->arg.stateid));
+	status = nfs4_close_call(NFS_SERVER(inode)->client, calldata);
+	/*
+	 * Return -EINPROGRESS on success in order to indicate to the
+	 * caller that an asynchronous RPC call has been launched, and
+	 * that it will release the semaphores on completion.
+	 */
+	return (status == 0) ? -EINPROGRESS : status;
+}
+
+struct inode *
+nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct iattr attr;
+	struct rpc_cred *cred;
+	struct nfs4_state *state;
+
+	if (nd->flags & LOOKUP_CREATE) {
+		attr.ia_mode = nd->intent.open.create_mode;
+		attr.ia_valid = ATTR_MODE;
+		if (!IS_POSIXACL(dir))
+			attr.ia_mode &= ~current->fs->umask;
+	} else {
+		attr.ia_valid = 0;
+		BUG_ON(nd->intent.open.flags & O_CREAT);
+	}
+
+	cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
+	if (IS_ERR(cred))
+		return (struct inode *)cred;
+	state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred);
+	put_rpccred(cred);
+	if (IS_ERR(state))
+		return (struct inode *)state;
+	return state->inode;
+}
+
+int
+nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
+{
+	struct rpc_cred *cred;
+	struct nfs4_state *state;
+	struct inode *inode;
+
+	cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
+	if (IS_ERR(cred))
+		return PTR_ERR(cred);
+	state = nfs4_open_delegated(dentry->d_inode, openflags, cred);
+	if (IS_ERR(state))
+		state = nfs4_do_open(dir, dentry, openflags, NULL, cred);
+	put_rpccred(cred);
+	if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0)
+		return 1;
+	if (IS_ERR(state))
+		return 0;
+	inode = state->inode;
+	if (inode == dentry->d_inode) {
+		iput(inode);
+		return 1;
+	}
+	d_drop(dentry);
+	nfs4_close_state(state, openflags);
+	iput(inode);
+	return 0;
+}
+
+
+static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+{
+	struct nfs4_server_caps_res res = {};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
+		.rpc_argp = fhandle,
+		.rpc_resp = &res,
+	};
+	int status;
+
+	status = rpc_call_sync(server->client, &msg, 0);
+	if (status == 0) {
+		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
+		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
+			server->caps |= NFS_CAP_ACLS;
+		if (res.has_links != 0)
+			server->caps |= NFS_CAP_HARDLINKS;
+		if (res.has_symlinks != 0)
+			server->caps |= NFS_CAP_SYMLINKS;
+		server->acl_bitmask = res.acl_bitmask;
+	}
+	return status;
+}
+
+static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_server_capabilities(server, fhandle),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fsinfo *info)
+{
+	struct nfs_fattr *	fattr = info->fattr;
+	struct nfs4_lookup_root_arg args = {
+		.bitmask = nfs4_fattr_bitmap,
+	};
+	struct nfs4_lookup_res res = {
+		.server = server,
+		.fattr = fattr,
+		.fh = fhandle,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+	};
+	fattr->valid = 0;
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fsinfo *info)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_lookup_root(server, fhandle, info),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fsinfo *info)
+{
+	struct nfs_fattr *	fattr = info->fattr;
+	unsigned char *		p;
+	struct qstr		q;
+	struct nfs4_lookup_arg args = {
+		.dir_fh = fhandle,
+		.name = &q,
+		.bitmask = nfs4_fattr_bitmap,
+	};
+	struct nfs4_lookup_res res = {
+		.server = server,
+		.fattr = fattr,
+		.fh = fhandle,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+	};
+	int status;
+
+	/*
+	 * Now we do a separate LOOKUP for each component of the mount path.
+	 * The LOOKUPs are done separately so that we can conveniently
+	 * catch an ERR_WRONGSEC if it occurs along the way...
+	 */
+	status = nfs4_lookup_root(server, fhandle, info);
+	if (status)
+		goto out;
+
+	p = server->mnt_path;
+	for (;;) {
+		struct nfs4_exception exception = { };
+
+		while (*p == '/')
+			p++;
+		if (!*p)
+			break;
+		q.name = p;
+		while (*p && (*p != '/'))
+			p++;
+		q.len = p - q.name;
+
+		do {
+			fattr->valid = 0;
+			status = nfs4_handle_exception(server,
+					rpc_call_sync(server->client, &msg, 0),
+					&exception);
+		} while (exception.retry);
+		if (status == 0)
+			continue;
+		if (status == -ENOENT) {
+			printk(KERN_NOTICE "NFS: mount path %s does not exist!\n", server->mnt_path);
+			printk(KERN_NOTICE "NFS: suggestion: try mounting '/' instead.\n");
+		}
+		break;
+	}
+	if (status == 0)
+		status = nfs4_server_capabilities(server, fhandle);
+	if (status == 0)
+		status = nfs4_do_fsinfo(server, fhandle, info);
+out:
+	return status;
+}
+
+static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	struct nfs4_getattr_arg args = {
+		.fh = fhandle,
+		.bitmask = server->attr_bitmask,
+	};
+	struct nfs4_getattr_res res = {
+		.fattr = fattr,
+		.server = server,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+	};
+	
+	fattr->valid = 0;
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_proc_getattr(server, fhandle, fattr),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+/* 
+ * The file is not closed if it is opened due to the a request to change
+ * the size of the file. The open call will not be needed once the
+ * VFS layer lookup-intents are implemented.
+ *
+ * Close is called when the inode is destroyed.
+ * If we haven't opened the file for O_WRONLY, we
+ * need to in the size_change case to obtain a stateid.
+ *
+ * Got race?
+ * Because OPEN is always done by name in nfsv4, it is
+ * possible that we opened a different file by the same
+ * name.  We can recognize this race condition, but we
+ * can't do anything about it besides returning an error.
+ *
+ * This will be fixed with VFS changes (lookup-intent).
+ */
+static int
+nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+		  struct iattr *sattr)
+{
+	struct inode *		inode = dentry->d_inode;
+	int			size_change = sattr->ia_valid & ATTR_SIZE;
+	struct nfs4_state	*state = NULL;
+	int need_iput = 0;
+	int status;
+
+	fattr->valid = 0;
+	
+	if (size_change) {
+		struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
+		if (IS_ERR(cred))
+			return PTR_ERR(cred);
+		state = nfs4_find_state(inode, cred, FMODE_WRITE);
+		if (state == NULL) {
+			state = nfs4_open_delegated(dentry->d_inode,
+					FMODE_WRITE, cred);
+			if (IS_ERR(state))
+				state = nfs4_do_open(dentry->d_parent->d_inode,
+						dentry, FMODE_WRITE,
+						NULL, cred);
+			need_iput = 1;
+		}
+		put_rpccred(cred);
+		if (IS_ERR(state))
+			return PTR_ERR(state);
+
+		if (state->inode != inode) {
+			printk(KERN_WARNING "nfs: raced in setattr (%p != %p), returning -EIO\n", inode, state->inode);
+			status = -EIO;
+			goto out;
+		}
+	}
+	status = nfs4_do_setattr(NFS_SERVER(inode), fattr,
+			NFS_FH(inode), sattr, state);
+out:
+	if (state) {
+		inode = state->inode;
+		nfs4_close_state(state, FMODE_WRITE);
+		if (need_iput)
+			iput(inode);
+	}
+	return status;
+}
+
+static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name,
+		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	int		       status;
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct nfs4_lookup_arg args = {
+		.bitmask = server->attr_bitmask,
+		.dir_fh = NFS_FH(dir),
+		.name = name,
+	};
+	struct nfs4_lookup_res res = {
+		.server = server,
+		.fattr = fattr,
+		.fh = fhandle,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+	};
+	
+	fattr->valid = 0;
+	
+	dprintk("NFS call  lookup %s\n", name->name);
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	dprintk("NFS reply lookup: %d\n", status);
+	return status;
+}
+
+static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_lookup(dir, name, fhandle, fattr),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+	struct nfs4_accessargs args = {
+		.fh = NFS_FH(inode),
+	};
+	struct nfs4_accessres res = { 0 };
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+		.rpc_cred = entry->cred,
+	};
+	int mode = entry->mask;
+	int status;
+
+	/*
+	 * Determine which access bits we want to ask for...
+	 */
+	if (mode & MAY_READ)
+		args.access |= NFS4_ACCESS_READ;
+	if (S_ISDIR(inode->i_mode)) {
+		if (mode & MAY_WRITE)
+			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
+		if (mode & MAY_EXEC)
+			args.access |= NFS4_ACCESS_LOOKUP;
+	} else {
+		if (mode & MAY_WRITE)
+			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
+		if (mode & MAY_EXEC)
+			args.access |= NFS4_ACCESS_EXECUTE;
+	}
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	if (!status) {
+		entry->mask = 0;
+		if (res.access & NFS4_ACCESS_READ)
+			entry->mask |= MAY_READ;
+		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
+			entry->mask |= MAY_WRITE;
+		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
+			entry->mask |= MAY_EXEC;
+	}
+	return status;
+}
+
+static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(inode),
+				_nfs4_proc_access(inode, entry),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+/*
+ * TODO: For the time being, we don't try to get any attributes
+ * along with any of the zero-copy operations READ, READDIR,
+ * READLINK, WRITE.
+ *
+ * In the case of the first three, we want to put the GETATTR
+ * after the read-type operation -- this is because it is hard
+ * to predict the length of a GETATTR response in v4, and thus
+ * align the READ data correctly.  This means that the GETATTR
+ * may end up partially falling into the page cache, and we should
+ * shift it into the 'tail' of the xdr_buf before processing.
+ * To do this efficiently, we need to know the total length
+ * of data received, which doesn't seem to be available outside
+ * of the RPC layer.
+ *
+ * In the case of WRITE, we also want to put the GETATTR after
+ * the operation -- in this case because we want to make sure
+ * we get the post-operation mtime and size.  This means that
+ * we can't use xdr_encode_pages() as written: we need a variant
+ * of it which would leave room in the 'tail' iovec.
+ *
+ * Both of these changes to the XDR layer would in fact be quite
+ * minor, but I decided to leave them for a subsequent patch.
+ */
+static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
+		unsigned int pgbase, unsigned int pglen)
+{
+	struct nfs4_readlink args = {
+		.fh       = NFS_FH(inode),
+		.pgbase	  = pgbase,
+		.pglen    = pglen,
+		.pages    = &page,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
+		.rpc_argp = &args,
+		.rpc_resp = NULL,
+	};
+
+	return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+}
+
+static int nfs4_proc_readlink(struct inode *inode, struct page *page,
+		unsigned int pgbase, unsigned int pglen)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(inode),
+				_nfs4_proc_readlink(inode, page, pgbase, pglen),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_read(struct nfs_read_data *rdata)
+{
+	int flags = rdata->flags;
+	struct inode *inode = rdata->inode;
+	struct nfs_fattr *fattr = rdata->res.fattr;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_READ],
+		.rpc_argp	= &rdata->args,
+		.rpc_resp	= &rdata->res,
+		.rpc_cred	= rdata->cred,
+	};
+	unsigned long timestamp = jiffies;
+	int status;
+
+	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
+			(long long) rdata->args.offset);
+
+	fattr->valid = 0;
+	status = rpc_call_sync(server->client, &msg, flags);
+	if (!status)
+		renew_lease(server, timestamp);
+	dprintk("NFS reply read: %d\n", status);
+	return status;
+}
+
+static int nfs4_proc_read(struct nfs_read_data *rdata)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(rdata->inode),
+				_nfs4_proc_read(rdata),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_write(struct nfs_write_data *wdata)
+{
+	int rpcflags = wdata->flags;
+	struct inode *inode = wdata->inode;
+	struct nfs_fattr *fattr = wdata->res.fattr;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_WRITE],
+		.rpc_argp	= &wdata->args,
+		.rpc_resp	= &wdata->res,
+		.rpc_cred	= wdata->cred,
+	};
+	int status;
+
+	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
+			(long long) wdata->args.offset);
+
+	fattr->valid = 0;
+	status = rpc_call_sync(server->client, &msg, rpcflags);
+	dprintk("NFS reply write: %d\n", status);
+	return status;
+}
+
+static int nfs4_proc_write(struct nfs_write_data *wdata)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(wdata->inode),
+				_nfs4_proc_write(wdata),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_commit(struct nfs_write_data *cdata)
+{
+	struct inode *inode = cdata->inode;
+	struct nfs_fattr *fattr = cdata->res.fattr;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
+		.rpc_argp	= &cdata->args,
+		.rpc_resp	= &cdata->res,
+		.rpc_cred	= cdata->cred,
+	};
+	int status;
+
+	dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
+			(long long) cdata->args.offset);
+
+	fattr->valid = 0;
+	status = rpc_call_sync(server->client, &msg, 0);
+	dprintk("NFS reply commit: %d\n", status);
+	return status;
+}
+
+static int nfs4_proc_commit(struct nfs_write_data *cdata)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(cdata->inode),
+				_nfs4_proc_commit(cdata),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+/*
+ * Got race?
+ * We will need to arrange for the VFS layer to provide an atomic open.
+ * Until then, this create/open method is prone to inefficiency and race
+ * conditions due to the lookup, create, and open VFS calls from sys_open()
+ * placed on the wire.
+ *
+ * Given the above sorry state of affairs, I'm simply sending an OPEN.
+ * The file will be opened again in the subsequent VFS open call
+ * (nfs4_proc_file_open).
+ *
+ * The open for read will just hang around to be used by any process that
+ * opens the file O_RDONLY. This will all be resolved with the VFS changes.
+ */
+
+static int
+nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+                 int flags)
+{
+	struct nfs4_state *state;
+	struct rpc_cred *cred;
+	int status = 0;
+
+	cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
+	if (IS_ERR(cred)) {
+		status = PTR_ERR(cred);
+		goto out;
+	}
+	state = nfs4_do_open(dir, dentry, flags, sattr, cred);
+	put_rpccred(cred);
+	if (IS_ERR(state)) {
+		status = PTR_ERR(state);
+		goto out;
+	}
+	d_instantiate(dentry, state->inode);
+	if (flags & O_EXCL) {
+		struct nfs_fattr fattr;
+		status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
+		                     NFS_FH(state->inode), sattr, state);
+		if (status == 0)
+			goto out;
+	} else if (flags != 0)
+		goto out;
+	nfs4_close_state(state, flags);
+out:
+	return status;
+}
+
+static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
+{
+	struct nfs4_remove_arg args = {
+		.fh = NFS_FH(dir),
+		.name = name,
+	};
+	struct nfs4_change_info	res;
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
+		.rpc_argp	= &args,
+		.rpc_resp	= &res,
+	};
+	int			status;
+
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	if (status == 0)
+		update_changeattr(dir, &res);
+	return status;
+}
+
+static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_remove(dir, name),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+struct unlink_desc {
+	struct nfs4_remove_arg	args;
+	struct nfs4_change_info	res;
+};
+
+static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
+		struct qstr *name)
+{
+	struct unlink_desc *up;
+
+	up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL);
+	if (!up)
+		return -ENOMEM;
+	
+	up->args.fh = NFS_FH(dir->d_inode);
+	up->args.name = name;
+	
+	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
+	msg->rpc_argp = &up->args;
+	msg->rpc_resp = &up->res;
+	return 0;
+}
+
+static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+{
+	struct rpc_message *msg = &task->tk_msg;
+	struct unlink_desc *up;
+	
+	if (msg->rpc_resp != NULL) {
+		up = container_of(msg->rpc_resp, struct unlink_desc, res);
+		update_changeattr(dir->d_inode, &up->res);
+		kfree(up);
+		msg->rpc_resp = NULL;
+		msg->rpc_argp = NULL;
+	}
+	return 0;
+}
+
+static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+		struct inode *new_dir, struct qstr *new_name)
+{
+	struct nfs4_rename_arg arg = {
+		.old_dir = NFS_FH(old_dir),
+		.new_dir = NFS_FH(new_dir),
+		.old_name = old_name,
+		.new_name = new_name,
+	};
+	struct nfs4_rename_res res = { };
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
+		.rpc_argp = &arg,
+		.rpc_resp = &res,
+	};
+	int			status;
+	
+	status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
+
+	if (!status) {
+		update_changeattr(old_dir, &res.old_cinfo);
+		update_changeattr(new_dir, &res.new_cinfo);
+	}
+	return status;
+}
+
+static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+		struct inode *new_dir, struct qstr *new_name)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(old_dir),
+				_nfs4_proc_rename(old_dir, old_name,
+					new_dir, new_name),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+	struct nfs4_link_arg arg = {
+		.fh     = NFS_FH(inode),
+		.dir_fh = NFS_FH(dir),
+		.name   = name,
+	};
+	struct nfs4_change_info	cinfo = { };
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
+		.rpc_argp = &arg,
+		.rpc_resp = &cinfo,
+	};
+	int			status;
+
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	if (!status)
+		update_changeattr(dir, &cinfo);
+
+	return status;
+}
+
+static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(inode),
+				_nfs4_proc_link(inode, dir, name),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
+		struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle,
+		struct nfs_fattr *fattr)
+{
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct nfs4_create_arg arg = {
+		.dir_fh = NFS_FH(dir),
+		.server = server,
+		.name = name,
+		.attrs = sattr,
+		.ftype = NF4LNK,
+		.bitmask = server->attr_bitmask,
+	};
+	struct nfs4_create_res res = {
+		.server = server,
+		.fh = fhandle,
+		.fattr = fattr,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK],
+		.rpc_argp = &arg,
+		.rpc_resp = &res,
+	};
+	int			status;
+
+	if (path->len > NFS4_MAXPATHLEN)
+		return -ENAMETOOLONG;
+	arg.u.symlink = path;
+	fattr->valid = 0;
+	
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	if (!status)
+		update_changeattr(dir, &res.dir_cinfo);
+	return status;
+}
+
+static int nfs4_proc_symlink(struct inode *dir, struct qstr *name,
+		struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle,
+		struct nfs_fattr *fattr)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_symlink(dir, name, path, sattr,
+					fhandle, fattr),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+		struct iattr *sattr)
+{
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr;
+	struct nfs4_create_arg arg = {
+		.dir_fh = NFS_FH(dir),
+		.server = server,
+		.name = &dentry->d_name,
+		.attrs = sattr,
+		.ftype = NF4DIR,
+		.bitmask = server->attr_bitmask,
+	};
+	struct nfs4_create_res res = {
+		.server = server,
+		.fh = &fhandle,
+		.fattr = &fattr,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
+		.rpc_argp = &arg,
+		.rpc_resp = &res,
+	};
+	int			status;
+
+	fattr.valid = 0;
+	
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	if (!status) {
+		update_changeattr(dir, &res.dir_cinfo);
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	}
+	return status;
+}
+
+static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+		struct iattr *sattr)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_mkdir(dir, dentry, sattr),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+                  u64 cookie, struct page *page, unsigned int count, int plus)
+{
+	struct inode		*dir = dentry->d_inode;
+	struct nfs4_readdir_arg args = {
+		.fh = NFS_FH(dir),
+		.pages = &page,
+		.pgbase = 0,
+		.count = count,
+		.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
+	};
+	struct nfs4_readdir_res res;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+		.rpc_cred = cred,
+	};
+	int			status;
+
+	lock_kernel();
+	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+	res.pgbase = args.pgbase;
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	if (status == 0)
+		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+	unlock_kernel();
+	return status;
+}
+
+static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+                  u64 cookie, struct page *page, unsigned int count, int plus)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
+				_nfs4_proc_readdir(dentry, cred, cookie,
+					page, count, plus),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+		struct iattr *sattr, dev_t rdev)
+{
+	struct nfs_server *server = NFS_SERVER(dir);
+	struct nfs_fh fh;
+	struct nfs_fattr fattr;
+	struct nfs4_create_arg arg = {
+		.dir_fh = NFS_FH(dir),
+		.server = server,
+		.name = &dentry->d_name,
+		.attrs = sattr,
+		.bitmask = server->attr_bitmask,
+	};
+	struct nfs4_create_res res = {
+		.server = server,
+		.fh = &fh,
+		.fattr = &fattr,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
+		.rpc_argp = &arg,
+		.rpc_resp = &res,
+	};
+	int			status;
+	int                     mode = sattr->ia_mode;
+
+	fattr.valid = 0;
+
+	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
+	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
+	if (S_ISFIFO(mode))
+		arg.ftype = NF4FIFO;
+	else if (S_ISBLK(mode)) {
+		arg.ftype = NF4BLK;
+		arg.u.device.specdata1 = MAJOR(rdev);
+		arg.u.device.specdata2 = MINOR(rdev);
+	}
+	else if (S_ISCHR(mode)) {
+		arg.ftype = NF4CHR;
+		arg.u.device.specdata1 = MAJOR(rdev);
+		arg.u.device.specdata2 = MINOR(rdev);
+	}
+	else
+		arg.ftype = NF4SOCK;
+	
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	if (status == 0) {
+		update_changeattr(dir, &res.dir_cinfo);
+		status = nfs_instantiate(dentry, &fh, &fattr);
+	}
+	return status;
+}
+
+static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+		struct iattr *sattr, dev_t rdev)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+		 struct nfs_fsstat *fsstat)
+{
+	struct nfs4_statfs_arg args = {
+		.fh = fhandle,
+		.bitmask = server->attr_bitmask,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
+		.rpc_argp = &args,
+		.rpc_resp = fsstat,
+	};
+
+	fsstat->fattr->valid = 0;
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_proc_statfs(server, fhandle, fsstat),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fsinfo *fsinfo)
+{
+	struct nfs4_fsinfo_arg args = {
+		.fh = fhandle,
+		.bitmask = server->attr_bitmask,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
+		.rpc_argp = &args,
+		.rpc_resp = fsinfo,
+	};
+
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
+{
+	struct nfs4_exception exception = { };
+	int err;
+
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_do_fsinfo(server, fhandle, fsinfo),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
+{
+	fsinfo->fattr->valid = 0;
+	return nfs4_do_fsinfo(server, fhandle, fsinfo);
+}
+
+static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_pathconf *pathconf)
+{
+	struct nfs4_pathconf_arg args = {
+		.fh = fhandle,
+		.bitmask = server->attr_bitmask,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
+		.rpc_argp = &args,
+		.rpc_resp = pathconf,
+	};
+
+	/* None of the pathconf attributes are mandatory to implement */
+	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
+		memset(pathconf, 0, sizeof(*pathconf));
+		return 0;
+	}
+
+	pathconf->fattr->valid = 0;
+	return rpc_call_sync(server->client, &msg, 0);
+}
+
+static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_pathconf *pathconf)
+{
+	struct nfs4_exception exception = { };
+	int err;
+
+	do {
+		err = nfs4_handle_exception(server,
+				_nfs4_proc_pathconf(server, fhandle, pathconf),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static void
+nfs4_read_done(struct rpc_task *task)
+{
+	struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+	struct inode *inode = data->inode;
+
+	if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
+		rpc_restart_call(task);
+		return;
+	}
+	if (task->tk_status > 0)
+		renew_lease(NFS_SERVER(inode), data->timestamp);
+	/* Call back common NFS readpage processing */
+	nfs_readpage_result(task);
+}
+
+static void
+nfs4_proc_read_setup(struct nfs_read_data *data)
+{
+	struct rpc_task	*task = &data->task;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ],
+		.rpc_argp = &data->args,
+		.rpc_resp = &data->res,
+		.rpc_cred = data->cred,
+	};
+	struct inode *inode = data->inode;
+	int flags;
+
+	data->timestamp   = jiffies;
+
+	/* N.B. Do we need to test? Never called for swapfile inode */
+	flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs4_read_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs4_write_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+	struct inode *inode = data->inode;
+	
+	if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
+		rpc_restart_call(task);
+		return;
+	}
+	if (task->tk_status >= 0)
+		renew_lease(NFS_SERVER(inode), data->timestamp);
+	/* Call back common NFS writeback processing */
+	nfs_writeback_done(task);
+}
+
+static void
+nfs4_proc_write_setup(struct nfs_write_data *data, int how)
+{
+	struct rpc_task	*task = &data->task;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE],
+		.rpc_argp = &data->args,
+		.rpc_resp = &data->res,
+		.rpc_cred = data->cred,
+	};
+	struct inode *inode = data->inode;
+	int stable;
+	int flags;
+	
+	if (how & FLUSH_STABLE) {
+		if (!NFS_I(inode)->ncommit)
+			stable = NFS_FILE_SYNC;
+		else
+			stable = NFS_DATA_SYNC;
+	} else
+		stable = NFS_UNSTABLE;
+	data->args.stable = stable;
+
+	data->timestamp   = jiffies;
+
+	/* Set the initial flags for the task.  */
+	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs4_write_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs4_commit_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+	struct inode *inode = data->inode;
+	
+	if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
+		rpc_restart_call(task);
+		return;
+	}
+	/* Call back common NFS writeback processing */
+	nfs_commit_done(task);
+}
+
+static void
+nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
+{
+	struct rpc_task	*task = &data->task;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
+		.rpc_argp = &data->args,
+		.rpc_resp = &data->res,
+		.rpc_cred = data->cred,
+	};	
+	struct inode *inode = data->inode;
+	int flags;
+	
+	/* Set the initial flags for the task.  */
+	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs4_commit_done, flags);
+	rpc_call_setup(task, &msg, 0);	
+}
+
+/*
+ * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
+ * standalone procedure for queueing an asynchronous RENEW.
+ */
+static void
+renew_done(struct rpc_task *task)
+{
+	struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
+	unsigned long timestamp = (unsigned long)task->tk_calldata;
+
+	if (task->tk_status < 0) {
+		switch (task->tk_status) {
+			case -NFS4ERR_STALE_CLIENTID:
+			case -NFS4ERR_EXPIRED:
+			case -NFS4ERR_CB_PATH_DOWN:
+				nfs4_schedule_state_recovery(clp);
+		}
+		return;
+	}
+	spin_lock(&clp->cl_lock);
+	if (time_before(clp->cl_last_renewal,timestamp))
+		clp->cl_last_renewal = timestamp;
+	spin_unlock(&clp->cl_lock);
+}
+
+int
+nfs4_proc_async_renew(struct nfs4_client *clp)
+{
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
+		.rpc_argp	= clp,
+		.rpc_cred	= clp->cl_cred,
+	};
+
+	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+			renew_done, (void *)jiffies);
+}
+
+int
+nfs4_proc_renew(struct nfs4_client *clp)
+{
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
+		.rpc_argp	= clp,
+		.rpc_cred	= clp->cl_cred,
+	};
+	unsigned long now = jiffies;
+	int status;
+
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+	if (status < 0)
+		return status;
+	spin_lock(&clp->cl_lock);
+	if (time_before(clp->cl_last_renewal,now))
+		clp->cl_last_renewal = now;
+	spin_unlock(&clp->cl_lock);
+	return 0;
+}
+
+/*
+ * We will need to arrange for the VFS layer to provide an atomic open.
+ * Until then, this open method is prone to inefficiency and race conditions
+ * due to the lookup, potential create, and open VFS calls from sys_open()
+ * placed on the wire.
+ */
+static int
+nfs4_proc_file_open(struct inode *inode, struct file *filp)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct nfs_open_context *ctx;
+	struct nfs4_state *state = NULL;
+	struct rpc_cred *cred;
+	int status = -ENOMEM;
+
+	dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n",
+	                       (int)dentry->d_parent->d_name.len,
+	                       dentry->d_parent->d_name.name,
+	                       (int)dentry->d_name.len, dentry->d_name.name);
+
+
+	/* Find our open stateid */
+	cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
+	if (IS_ERR(cred))
+		return PTR_ERR(cred);
+	ctx = alloc_nfs_open_context(dentry, cred);
+	put_rpccred(cred);
+	if (unlikely(ctx == NULL))
+		return -ENOMEM;
+	status = -EIO; /* ERACE actually */
+	state = nfs4_find_state(inode, cred, filp->f_mode);
+	if (unlikely(state == NULL))
+		goto no_state;
+	ctx->state = state;
+	nfs4_close_state(state, filp->f_mode);
+	ctx->mode = filp->f_mode;
+	nfs_file_set_open_context(filp, ctx);
+	put_nfs_open_context(ctx);
+	if (filp->f_mode & FMODE_WRITE)
+		nfs_begin_data_update(inode);
+	return 0;
+no_state:
+	printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
+	put_nfs_open_context(ctx);
+	return status;
+}
+
+/*
+ * Release our state
+ */
+static int
+nfs4_proc_file_release(struct inode *inode, struct file *filp)
+{
+	if (filp->f_mode & FMODE_WRITE)
+		nfs_end_data_update(inode);
+	nfs_file_clear_open_context(filp);
+	return 0;
+}
+
+static int
+nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)
+{
+	struct nfs4_client *clp = server->nfs4_state;
+
+	if (!clp || task->tk_status >= 0)
+		return 0;
+	switch(task->tk_status) {
+		case -NFS4ERR_STALE_CLIENTID:
+		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_EXPIRED:
+			rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL);
+			nfs4_schedule_state_recovery(clp);
+			if (test_bit(NFS4CLNT_OK, &clp->cl_state))
+				rpc_wake_up_task(task);
+			task->tk_status = 0;
+			return -EAGAIN;
+		case -NFS4ERR_GRACE:
+		case -NFS4ERR_DELAY:
+			rpc_delay(task, NFS4_POLL_RETRY_MAX);
+			task->tk_status = 0;
+			return -EAGAIN;
+		case -NFS4ERR_OLD_STATEID:
+			task->tk_status = 0;
+			return -EAGAIN;
+	}
+	task->tk_status = nfs4_map_errors(task->tk_status);
+	return 0;
+}
+
+static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp)
+{
+	DEFINE_WAIT(wait);
+	sigset_t oldset;
+	int interruptible, res = 0;
+
+	might_sleep();
+
+	rpc_clnt_sigmask(clnt, &oldset);
+	interruptible = TASK_UNINTERRUPTIBLE;
+	if (clnt->cl_intr)
+		interruptible = TASK_INTERRUPTIBLE;
+	prepare_to_wait(&clp->cl_waitq, &wait, interruptible);
+	nfs4_schedule_state_recovery(clp);
+	if (clnt->cl_intr && signalled())
+		res = -ERESTARTSYS;
+	else if (!test_bit(NFS4CLNT_OK, &clp->cl_state))
+		schedule();
+	finish_wait(&clp->cl_waitq, &wait);
+	rpc_clnt_sigunmask(clnt, &oldset);
+	return res;
+}
+
+static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
+{
+	sigset_t oldset;
+	int res = 0;
+
+	might_sleep();
+
+	if (*timeout <= 0)
+		*timeout = NFS4_POLL_RETRY_MIN;
+	if (*timeout > NFS4_POLL_RETRY_MAX)
+		*timeout = NFS4_POLL_RETRY_MAX;
+	rpc_clnt_sigmask(clnt, &oldset);
+	if (clnt->cl_intr) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(*timeout);
+		if (signalled())
+			res = -ERESTARTSYS;
+	} else {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(*timeout);
+	}
+	rpc_clnt_sigunmask(clnt, &oldset);
+	*timeout <<= 1;
+	return res;
+}
+
+/* This is the error handling routine for processes that are allowed
+ * to sleep.
+ */
+int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+{
+	struct nfs4_client *clp = server->nfs4_state;
+	int ret = errorcode;
+
+	exception->retry = 0;
+	switch(errorcode) {
+		case 0:
+			return 0;
+		case -NFS4ERR_STALE_CLIENTID:
+		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_EXPIRED:
+			ret = nfs4_wait_clnt_recover(server->client, clp);
+			if (ret == 0)
+				exception->retry = 1;
+			break;
+		case -NFS4ERR_GRACE:
+		case -NFS4ERR_DELAY:
+			ret = nfs4_delay(server->client, &exception->timeout);
+			if (ret == 0)
+				exception->retry = 1;
+			break;
+		case -NFS4ERR_OLD_STATEID:
+			if (ret == 0)
+				exception->retry = 1;
+	}
+	/* We failed to handle the error */
+	return nfs4_map_errors(ret);
+}
+
+int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port)
+{
+	nfs4_verifier sc_verifier;
+	struct nfs4_setclientid setclientid = {
+		.sc_verifier = &sc_verifier,
+		.sc_prog = program,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
+		.rpc_argp = &setclientid,
+		.rpc_resp = clp,
+		.rpc_cred = clp->cl_cred,
+	};
+	u32 *p;
+	int loop = 0;
+	int status;
+
+	p = (u32*)sc_verifier.data;
+	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
+	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
+
+	for(;;) {
+		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
+				sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u %s %u",
+				clp->cl_ipaddr, NIPQUAD(clp->cl_addr.s_addr),
+				clp->cl_cred->cr_ops->cr_name,
+				clp->cl_id_uniquifier);
+		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
+				sizeof(setclientid.sc_netid), "tcp");
+		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
+				sizeof(setclientid.sc_uaddr), "%s.%d.%d",
+				clp->cl_ipaddr, port >> 8, port & 255);
+
+		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+		if (status != -NFS4ERR_CLID_INUSE)
+			break;
+		if (signalled())
+			break;
+		if (loop++ & 1)
+			ssleep(clp->cl_lease_time + 1);
+		else
+			if (++clp->cl_id_uniquifier == 0)
+				break;
+	}
+	return status;
+}
+
+int
+nfs4_proc_setclientid_confirm(struct nfs4_client *clp)
+{
+	struct nfs_fsinfo fsinfo;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
+		.rpc_argp = clp,
+		.rpc_resp = &fsinfo,
+		.rpc_cred = clp->cl_cred,
+	};
+	unsigned long now;
+	int status;
+
+	now = jiffies;
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+	if (status == 0) {
+		spin_lock(&clp->cl_lock);
+		clp->cl_lease_time = fsinfo.lease_time * HZ;
+		clp->cl_last_renewal = now;
+		spin_unlock(&clp->cl_lock);
+	}
+	return status;
+}
+
+static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
+{
+	struct nfs4_delegreturnargs args = {
+		.fhandle = NFS_FH(inode),
+		.stateid = stateid,
+	};
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
+		.rpc_argp = &args,
+		.rpc_cred = cred,
+	};
+
+	return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+}
+
+int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
+{
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = _nfs4_proc_delegreturn(inode, cred, stateid);
+		switch (err) {
+			case -NFS4ERR_STALE_STATEID:
+			case -NFS4ERR_EXPIRED:
+				nfs4_schedule_state_recovery(server->nfs4_state);
+			case 0:
+				return 0;
+		}
+		err = nfs4_handle_exception(server, err, &exception);
+	} while (exception.retry);
+	return err;
+}
+
+#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
+#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
+
+/* 
+ * sleep, with exponential backoff, and retry the LOCK operation. 
+ */
+static unsigned long
+nfs4_set_lock_task_retry(unsigned long timeout)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule_timeout(timeout);
+	timeout <<= 1;
+	if (timeout > NFS4_LOCK_MAXTIMEOUT)
+		return NFS4_LOCK_MAXTIMEOUT;
+	return timeout;
+}
+
+static inline int
+nfs4_lck_type(int cmd, struct file_lock *request)
+{
+	/* set lock type */
+	switch (request->fl_type) {
+		case F_RDLCK:
+			return IS_SETLKW(cmd) ? NFS4_READW_LT : NFS4_READ_LT;
+		case F_WRLCK:
+			return IS_SETLKW(cmd) ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
+		case F_UNLCK:
+			return NFS4_WRITE_LT; 
+	}
+	BUG();
+	return 0;
+}
+
+static inline uint64_t
+nfs4_lck_length(struct file_lock *request)
+{
+	if (request->fl_end == OFFSET_MAX)
+		return ~(uint64_t)0;
+	return request->fl_end - request->fl_start + 1;
+}
+
+static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_client *clp = server->nfs4_state;
+	struct nfs_lockargs arg = {
+		.fh = NFS_FH(inode),
+		.type = nfs4_lck_type(cmd, request),
+		.offset = request->fl_start,
+		.length = nfs4_lck_length(request),
+	};
+	struct nfs_lockres res = {
+		.server = server,
+	};
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
+		.rpc_argp       = &arg,
+		.rpc_resp       = &res,
+		.rpc_cred	= state->owner->so_cred,
+	};
+	struct nfs_lowner nlo;
+	struct nfs4_lock_state *lsp;
+	int status;
+
+	down_read(&clp->cl_sem);
+	nlo.clientid = clp->cl_clientid;
+	down(&state->lock_sema);
+	lsp = nfs4_find_lock_state(state, request->fl_owner);
+	if (lsp)
+		nlo.id = lsp->ls_id; 
+	else {
+		spin_lock(&clp->cl_lock);
+		nlo.id = nfs4_alloc_lockowner_id(clp);
+		spin_unlock(&clp->cl_lock);
+	}
+	arg.u.lockt = &nlo;
+	status = rpc_call_sync(server->client, &msg, 0);
+	if (!status) {
+		request->fl_type = F_UNLCK;
+	} else if (status == -NFS4ERR_DENIED) {
+		int64_t len, start, end;
+		start = res.u.denied.offset;
+		len = res.u.denied.length;
+		end = start + len - 1;
+		if (end < 0 || len == 0)
+			request->fl_end = OFFSET_MAX;
+		else
+			request->fl_end = (loff_t)end;
+		request->fl_start = (loff_t)start;
+		request->fl_type = F_WRLCK;
+		if (res.u.denied.type & 1)
+			request->fl_type = F_RDLCK;
+		request->fl_pid = 0;
+		status = 0;
+	}
+	if (lsp)
+		nfs4_put_lock_state(lsp);
+	up(&state->lock_sema);
+	up_read(&clp->cl_sem);
+	return status;
+}
+
+static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct nfs4_exception exception = { };
+	int err;
+
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(state->inode),
+				_nfs4_proc_getlk(state, cmd, request),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int do_vfs_lock(struct file *file, struct file_lock *fl)
+{
+	int res = 0;
+	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+		case FL_POSIX:
+			res = posix_lock_file_wait(file, fl);
+			break;
+		case FL_FLOCK:
+			res = flock_lock_file_wait(file, fl);
+			break;
+		default:
+			BUG();
+	}
+	if (res < 0)
+		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
+				__FUNCTION__);
+	return res;
+}
+
+static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_client *clp = server->nfs4_state;
+	struct nfs_lockargs arg = {
+		.fh = NFS_FH(inode),
+		.type = nfs4_lck_type(cmd, request),
+		.offset = request->fl_start,
+		.length = nfs4_lck_length(request),
+	};
+	struct nfs_lockres res = {
+		.server = server,
+	};
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
+		.rpc_argp       = &arg,
+		.rpc_resp       = &res,
+		.rpc_cred	= state->owner->so_cred,
+	};
+	struct nfs4_lock_state *lsp;
+	struct nfs_locku_opargs luargs;
+	int status = 0;
+			
+	down_read(&clp->cl_sem);
+	down(&state->lock_sema);
+	lsp = nfs4_find_lock_state(state, request->fl_owner);
+	if (!lsp)
+		goto out;
+	/* We might have lost the locks! */
+	if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
+		luargs.seqid = lsp->ls_seqid;
+		memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
+		arg.u.locku = &luargs;
+		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+		nfs4_increment_lock_seqid(status, lsp);
+	}
+
+	if (status == 0) {
+		memcpy(&lsp->ls_stateid,  &res.u.stateid, 
+				sizeof(lsp->ls_stateid));
+		nfs4_notify_unlck(state, request, lsp);
+	}
+	nfs4_put_lock_state(lsp);
+out:
+	up(&state->lock_sema);
+	if (status == 0)
+		do_vfs_lock(request->fl_file, request);
+	up_read(&clp->cl_sem);
+	return status;
+}
+
+static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct nfs4_exception exception = { };
+	int err;
+
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(state->inode),
+				_nfs4_proc_unlck(state, cmd, request),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
+{
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_lock_state *lsp;
+	struct nfs_lockargs arg = {
+		.fh = NFS_FH(inode),
+		.type = nfs4_lck_type(cmd, request),
+		.offset = request->fl_start,
+		.length = nfs4_lck_length(request),
+	};
+	struct nfs_lockres res = {
+		.server = server,
+	};
+	struct rpc_message msg = {
+		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCK],
+		.rpc_argp       = &arg,
+		.rpc_resp       = &res,
+		.rpc_cred	= state->owner->so_cred,
+	};
+	struct nfs_lock_opargs largs = {
+		.reclaim = reclaim,
+		.new_lock_owner = 0,
+	};
+	int status;
+
+	lsp = nfs4_get_lock_state(state, request->fl_owner);
+	if (lsp == NULL)
+		return -ENOMEM;
+	if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
+		struct nfs4_state_owner *owner = state->owner;
+		struct nfs_open_to_lock otl = {
+			.lock_owner = {
+				.clientid = server->nfs4_state->cl_clientid,
+			},
+		};
+
+		otl.lock_seqid = lsp->ls_seqid;
+		otl.lock_owner.id = lsp->ls_id;
+		memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid));
+		largs.u.open_lock = &otl;
+		largs.new_lock_owner = 1;
+		arg.u.lock = &largs;
+		down(&owner->so_sema);
+		otl.open_seqid = owner->so_seqid;
+		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+		/* increment open_owner seqid on success, and 
+		* seqid mutating errors */
+		nfs4_increment_seqid(status, owner);
+		up(&owner->so_sema);
+	} else {
+		struct nfs_exist_lock el = {
+			.seqid = lsp->ls_seqid,
+		};
+		memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
+		largs.u.exist_lock = &el;
+		largs.new_lock_owner = 0;
+		arg.u.lock = &largs;
+		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
+	}
+	/* increment seqid on success, and * seqid mutating errors*/
+	nfs4_increment_lock_seqid(status, lsp);
+	/* save the returned stateid. */
+	if (status == 0) {
+		memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
+		lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+		if (!reclaim)
+			nfs4_notify_setlk(state, request, lsp);
+	} else if (status == -NFS4ERR_DENIED)
+		status = -EAGAIN;
+	nfs4_put_lock_state(lsp);
+	return status;
+}
+
+static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
+{
+	return _nfs4_do_setlk(state, F_SETLK, request, 1);
+}
+
+static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
+{
+	return _nfs4_do_setlk(state, F_SETLK, request, 0);
+}
+
+static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct nfs4_client *clp = state->owner->so_client;
+	int status;
+
+	down_read(&clp->cl_sem);
+	down(&state->lock_sema);
+	status = _nfs4_do_setlk(state, cmd, request, 0);
+	up(&state->lock_sema);
+	if (status == 0) {
+		/* Note: we always want to sleep here! */
+		request->fl_flags |= FL_SLEEP;
+		if (do_vfs_lock(request->fl_file, request) < 0)
+			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
+	}
+	up_read(&clp->cl_sem);
+	return status;
+}
+
+static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	struct nfs4_exception exception = { };
+	int err;
+
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(state->inode),
+				_nfs4_proc_setlk(state, cmd, request),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
+static int
+nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
+{
+	struct nfs_open_context *ctx;
+	struct nfs4_state *state;
+	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
+	int status;
+
+	/* verify open state */
+	ctx = (struct nfs_open_context *)filp->private_data;
+	state = ctx->state;
+
+	if (request->fl_start < 0 || request->fl_end < 0)
+		return -EINVAL;
+
+	if (IS_GETLK(cmd))
+		return nfs4_proc_getlk(state, F_GETLK, request);
+
+	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
+		return -EINVAL;
+
+	if (request->fl_type == F_UNLCK)
+		return nfs4_proc_unlck(state, cmd, request);
+
+	do {
+		status = nfs4_proc_setlk(state, cmd, request);
+		if ((status != -EAGAIN) || IS_SETLK(cmd))
+			break;
+		timeout = nfs4_set_lock_task_retry(timeout);
+		status = -ERESTARTSYS;
+		if (signalled())
+			break;
+	} while(status < 0);
+
+	return status;
+}
+
+struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
+	.recover_open	= nfs4_open_reclaim,
+	.recover_lock	= nfs4_lock_reclaim,
+};
+
+struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
+	.recover_open	= nfs4_open_expired,
+	.recover_lock	= nfs4_lock_expired,
+};
+
+struct nfs_rpc_ops	nfs_v4_clientops = {
+	.version	= 4,			/* protocol version */
+	.dentry_ops	= &nfs4_dentry_operations,
+	.dir_inode_ops	= &nfs4_dir_inode_operations,
+	.getroot	= nfs4_proc_get_root,
+	.getattr	= nfs4_proc_getattr,
+	.setattr	= nfs4_proc_setattr,
+	.lookup		= nfs4_proc_lookup,
+	.access		= nfs4_proc_access,
+	.readlink	= nfs4_proc_readlink,
+	.read		= nfs4_proc_read,
+	.write		= nfs4_proc_write,
+	.commit		= nfs4_proc_commit,
+	.create		= nfs4_proc_create,
+	.remove		= nfs4_proc_remove,
+	.unlink_setup	= nfs4_proc_unlink_setup,
+	.unlink_done	= nfs4_proc_unlink_done,
+	.rename		= nfs4_proc_rename,
+	.link		= nfs4_proc_link,
+	.symlink	= nfs4_proc_symlink,
+	.mkdir		= nfs4_proc_mkdir,
+	.rmdir		= nfs4_proc_remove,
+	.readdir	= nfs4_proc_readdir,
+	.mknod		= nfs4_proc_mknod,
+	.statfs		= nfs4_proc_statfs,
+	.fsinfo		= nfs4_proc_fsinfo,
+	.pathconf	= nfs4_proc_pathconf,
+	.decode_dirent	= nfs4_decode_dirent,
+	.read_setup	= nfs4_proc_read_setup,
+	.write_setup	= nfs4_proc_write_setup,
+	.commit_setup	= nfs4_proc_commit_setup,
+	.file_open      = nfs4_proc_file_open,
+	.file_release   = nfs4_proc_file_release,
+	.lock		= nfs4_proc_lock,
+};
+
+/*
+ * Local variables:
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
new file mode 100644
index 0000000..667e06f
--- /dev/null
+++ b/fs/nfs/nfs4renewd.c
@@ -0,0 +1,148 @@
+/*
+ *  fs/nfs/nfs4renewd.c
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 "renew daemon", which wakes up periodically to
+ * send a RENEW, to keep state alive on the server.  The daemon is implemented
+ * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
+ * context.  There is one renewd per nfs_server.
+ *
+ * TODO: If the send queue gets backlogged (e.g., if the server goes down),
+ * we will keep filling the queue with periodic RENEW requests.  We need a
+ * mechanism for ensuring that if renewd successfully sends off a request,
+ * then it only wakes up when the request is finished.  Maybe use the
+ * child task framework of the RPC layer?
+ */
+
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+
+#define NFSDBG_FACILITY	NFSDBG_PROC
+
+void
+nfs4_renew_state(void *data)
+{
+	struct nfs4_client *clp = (struct nfs4_client *)data;
+	long lease, timeout;
+	unsigned long last, now;
+
+	down_read(&clp->cl_sem);
+	dprintk("%s: start\n", __FUNCTION__);
+	/* Are there any active superblocks? */
+	if (list_empty(&clp->cl_superblocks))
+		goto out; 
+	spin_lock(&clp->cl_lock);
+	lease = clp->cl_lease_time;
+	last = clp->cl_last_renewal;
+	now = jiffies;
+	timeout = (2 * lease) / 3 + (long)last - (long)now;
+	/* Are we close to a lease timeout? */
+	if (time_after(now, last + lease/3)) {
+		spin_unlock(&clp->cl_lock);
+		/* Queue an asynchronous RENEW. */
+		nfs4_proc_async_renew(clp);
+		timeout = (2 * lease) / 3;
+		spin_lock(&clp->cl_lock);
+	} else
+		dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+				__FUNCTION__);
+	if (timeout < 5 * HZ)    /* safeguard */
+		timeout = 5 * HZ;
+	dprintk("%s: requeueing work. Lease period = %ld\n",
+			__FUNCTION__, (timeout + HZ - 1) / HZ);
+	cancel_delayed_work(&clp->cl_renewd);
+	schedule_delayed_work(&clp->cl_renewd, timeout);
+	spin_unlock(&clp->cl_lock);
+out:
+	up_read(&clp->cl_sem);
+	dprintk("%s: done\n", __FUNCTION__);
+}
+
+/* Must be called with clp->cl_sem locked for writes */
+void
+nfs4_schedule_state_renewal(struct nfs4_client *clp)
+{
+	long timeout;
+
+	spin_lock(&clp->cl_lock);
+	timeout = (2 * clp->cl_lease_time) / 3 + (long)clp->cl_last_renewal
+		- (long)jiffies;
+	if (timeout < 5 * HZ)
+		timeout = 5 * HZ;
+	dprintk("%s: requeueing work. Lease period = %ld\n",
+			__FUNCTION__, (timeout + HZ - 1) / HZ);
+	cancel_delayed_work(&clp->cl_renewd);
+	schedule_delayed_work(&clp->cl_renewd, timeout);
+	spin_unlock(&clp->cl_lock);
+}
+
+void
+nfs4_renewd_prepare_shutdown(struct nfs_server *server)
+{
+	struct nfs4_client *clp = server->nfs4_state;
+
+	if (!clp)
+		return;
+	flush_scheduled_work();
+	down_write(&clp->cl_sem);
+	if (!list_empty(&server->nfs4_siblings))
+		list_del_init(&server->nfs4_siblings);
+	up_write(&clp->cl_sem);
+}
+
+/* Must be called with clp->cl_sem locked for writes */
+void
+nfs4_kill_renewd(struct nfs4_client *clp)
+{
+	down_read(&clp->cl_sem);
+	if (!list_empty(&clp->cl_superblocks)) {
+		up_read(&clp->cl_sem);
+		return;
+	}
+	cancel_delayed_work(&clp->cl_renewd);
+	up_read(&clp->cl_sem);
+	flush_scheduled_work();
+}
+
+/*
+ * Local variables:
+ *   c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
new file mode 100644
index 0000000..231cebc
--- /dev/null
+++ b/fs/nfs/nfs4state.c
@@ -0,0 +1,932 @@
+/*
+ *  fs/nfs/nfs4state.c
+ *
+ *  Client-side XDR for NFSv4.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 state model.  For the time being,
+ * this is minimal, but will be made much more complex in a
+ * subsequent patch.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_idmap.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+
+#include "callback.h"
+#include "delegation.h"
+
+#define OPENOWNER_POOL_SIZE	8
+
+static DEFINE_SPINLOCK(state_spinlock);
+
+nfs4_stateid zero_stateid;
+
+#if 0
+nfs4_stateid one_stateid =
+	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+#endif
+
+static LIST_HEAD(nfs4_clientid_list);
+
+static void nfs4_recover_state(void *);
+extern void nfs4_renew_state(void *);
+
+void
+init_nfsv4_state(struct nfs_server *server)
+{
+	server->nfs4_state = NULL;
+	INIT_LIST_HEAD(&server->nfs4_siblings);
+}
+
+void
+destroy_nfsv4_state(struct nfs_server *server)
+{
+	if (server->mnt_path) {
+		kfree(server->mnt_path);
+		server->mnt_path = NULL;
+	}
+	if (server->nfs4_state) {
+		nfs4_put_client(server->nfs4_state);
+		server->nfs4_state = NULL;
+	}
+}
+
+/*
+ * nfs4_get_client(): returns an empty client structure
+ * nfs4_put_client(): drops reference to client structure
+ *
+ * Since these are allocated/deallocated very rarely, we don't
+ * bother putting them in a slab cache...
+ */
+static struct nfs4_client *
+nfs4_alloc_client(struct in_addr *addr)
+{
+	struct nfs4_client *clp;
+
+	if (nfs_callback_up() < 0)
+		return NULL;
+	if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
+		nfs_callback_down();
+		return NULL;
+	}
+	memset(clp, 0, sizeof(*clp));
+	memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
+	init_rwsem(&clp->cl_sem);
+	INIT_LIST_HEAD(&clp->cl_delegations);
+	INIT_LIST_HEAD(&clp->cl_state_owners);
+	INIT_LIST_HEAD(&clp->cl_unused);
+	spin_lock_init(&clp->cl_lock);
+	atomic_set(&clp->cl_count, 1);
+	INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
+	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+	INIT_LIST_HEAD(&clp->cl_superblocks);
+	init_waitqueue_head(&clp->cl_waitq);
+	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
+	clp->cl_boot_time = CURRENT_TIME;
+	clp->cl_state = 1 << NFS4CLNT_OK;
+	return clp;
+}
+
+static void
+nfs4_free_client(struct nfs4_client *clp)
+{
+	struct nfs4_state_owner *sp;
+
+	while (!list_empty(&clp->cl_unused)) {
+		sp = list_entry(clp->cl_unused.next,
+				struct nfs4_state_owner,
+				so_list);
+		list_del(&sp->so_list);
+		kfree(sp);
+	}
+	BUG_ON(!list_empty(&clp->cl_state_owners));
+	if (clp->cl_cred)
+		put_rpccred(clp->cl_cred);
+	nfs_idmap_delete(clp);
+	if (clp->cl_rpcclient)
+		rpc_shutdown_client(clp->cl_rpcclient);
+	kfree(clp);
+	nfs_callback_down();
+}
+
+static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
+{
+	struct nfs4_client *clp;
+	list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
+		if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
+			atomic_inc(&clp->cl_count);
+			return clp;
+		}
+	}
+	return NULL;
+}
+
+struct nfs4_client *nfs4_find_client(struct in_addr *addr)
+{
+	struct nfs4_client *clp;
+	spin_lock(&state_spinlock);
+	clp = __nfs4_find_client(addr);
+	spin_unlock(&state_spinlock);
+	return clp;
+}
+
+struct nfs4_client *
+nfs4_get_client(struct in_addr *addr)
+{
+	struct nfs4_client *clp, *new = NULL;
+
+	spin_lock(&state_spinlock);
+	for (;;) {
+		clp = __nfs4_find_client(addr);
+		if (clp != NULL)
+			break;
+		clp = new;
+		if (clp != NULL) {
+			list_add(&clp->cl_servers, &nfs4_clientid_list);
+			new = NULL;
+			break;
+		}
+		spin_unlock(&state_spinlock);
+		new = nfs4_alloc_client(addr);
+		spin_lock(&state_spinlock);
+		if (new == NULL)
+			break;
+	}
+	spin_unlock(&state_spinlock);
+	if (new)
+		nfs4_free_client(new);
+	return clp;
+}
+
+void
+nfs4_put_client(struct nfs4_client *clp)
+{
+	if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
+		return;
+	list_del(&clp->cl_servers);
+	spin_unlock(&state_spinlock);
+	BUG_ON(!list_empty(&clp->cl_superblocks));
+	wake_up_all(&clp->cl_waitq);
+	rpc_wake_up(&clp->cl_rpcwaitq);
+	nfs4_kill_renewd(clp);
+	nfs4_free_client(clp);
+}
+
+static int __nfs4_init_client(struct nfs4_client *clp)
+{
+	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
+	if (status == 0)
+		status = nfs4_proc_setclientid_confirm(clp);
+	if (status == 0)
+		nfs4_schedule_state_renewal(clp);
+	return status;
+}
+
+int nfs4_init_client(struct nfs4_client *clp)
+{
+	return nfs4_map_errors(__nfs4_init_client(clp));
+}
+
+u32
+nfs4_alloc_lockowner_id(struct nfs4_client *clp)
+{
+	return clp->cl_lockowner_id ++;
+}
+
+static struct nfs4_state_owner *
+nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
+{
+	struct nfs4_state_owner *sp = NULL;
+
+	if (!list_empty(&clp->cl_unused)) {
+		sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
+		atomic_inc(&sp->so_count);
+		sp->so_cred = cred;
+		list_move(&sp->so_list, &clp->cl_state_owners);
+		clp->cl_nunused--;
+	}
+	return sp;
+}
+
+static struct nfs4_state_owner *
+nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
+{
+	struct nfs4_state_owner *sp, *res = NULL;
+
+	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+		if (sp->so_cred != cred)
+			continue;
+		atomic_inc(&sp->so_count);
+		/* Move to the head of the list */
+		list_move(&sp->so_list, &clp->cl_state_owners);
+		res = sp;
+		break;
+	}
+	return res;
+}
+
+/*
+ * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
+ * create a new state_owner.
+ *
+ */
+static struct nfs4_state_owner *
+nfs4_alloc_state_owner(void)
+{
+	struct nfs4_state_owner *sp;
+
+	sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+	if (!sp)
+		return NULL;
+	init_MUTEX(&sp->so_sema);
+	sp->so_seqid = 0;                 /* arbitrary */
+	INIT_LIST_HEAD(&sp->so_states);
+	INIT_LIST_HEAD(&sp->so_delegations);
+	atomic_set(&sp->so_count, 1);
+	return sp;
+}
+
+void
+nfs4_drop_state_owner(struct nfs4_state_owner *sp)
+{
+	struct nfs4_client *clp = sp->so_client;
+	spin_lock(&clp->cl_lock);
+	list_del_init(&sp->so_list);
+	spin_unlock(&clp->cl_lock);
+}
+
+/*
+ * Note: must be called with clp->cl_sem held in order to prevent races
+ *       with reboot recovery!
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+{
+	struct nfs4_client *clp = server->nfs4_state;
+	struct nfs4_state_owner *sp, *new;
+
+	get_rpccred(cred);
+	new = nfs4_alloc_state_owner();
+	spin_lock(&clp->cl_lock);
+	sp = nfs4_find_state_owner(clp, cred);
+	if (sp == NULL)
+		sp = nfs4_client_grab_unused(clp, cred);
+	if (sp == NULL && new != NULL) {
+		list_add(&new->so_list, &clp->cl_state_owners);
+		new->so_client = clp;
+		new->so_id = nfs4_alloc_lockowner_id(clp);
+		new->so_cred = cred;
+		sp = new;
+		new = NULL;
+	}
+	spin_unlock(&clp->cl_lock);
+	if (new)
+		kfree(new);
+	if (sp != NULL)
+		return sp;
+	put_rpccred(cred);
+	return NULL;
+}
+
+/*
+ * Must be called with clp->cl_sem held in order to avoid races
+ * with state recovery...
+ */
+void nfs4_put_state_owner(struct nfs4_state_owner *sp)
+{
+	struct nfs4_client *clp = sp->so_client;
+	struct rpc_cred *cred = sp->so_cred;
+
+	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
+		return;
+	if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
+		goto out_free;
+	if (list_empty(&sp->so_list))
+		goto out_free;
+	list_move(&sp->so_list, &clp->cl_unused);
+	clp->cl_nunused++;
+	spin_unlock(&clp->cl_lock);
+	put_rpccred(cred);
+	cred = NULL;
+	return;
+out_free:
+	list_del(&sp->so_list);
+	spin_unlock(&clp->cl_lock);
+	put_rpccred(cred);
+	kfree(sp);
+}
+
+static struct nfs4_state *
+nfs4_alloc_open_state(void)
+{
+	struct nfs4_state *state;
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+	state->state = 0;
+	state->nreaders = 0;
+	state->nwriters = 0;
+	state->flags = 0;
+	memset(state->stateid.data, 0, sizeof(state->stateid.data));
+	atomic_set(&state->count, 1);
+	INIT_LIST_HEAD(&state->lock_states);
+	init_MUTEX(&state->lock_sema);
+	rwlock_init(&state->state_lock);
+	return state;
+}
+
+static struct nfs4_state *
+__nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs4_state *state;
+
+	mode &= (FMODE_READ|FMODE_WRITE);
+	list_for_each_entry(state, &nfsi->open_states, inode_states) {
+		if (state->owner->so_cred != cred)
+			continue;
+		if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
+			continue;
+		if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
+			continue;
+		if ((state->state & mode) != mode)
+			continue;
+		atomic_inc(&state->count);
+		if (mode & FMODE_READ)
+			state->nreaders++;
+		if (mode & FMODE_WRITE)
+			state->nwriters++;
+		return state;
+	}
+	return NULL;
+}
+
+static struct nfs4_state *
+__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs4_state *state;
+
+	list_for_each_entry(state, &nfsi->open_states, inode_states) {
+		/* Is this in the process of being freed? */
+		if (state->nreaders == 0 && state->nwriters == 0)
+			continue;
+		if (state->owner == owner) {
+			atomic_inc(&state->count);
+			return state;
+		}
+	}
+	return NULL;
+}
+
+struct nfs4_state *
+nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
+{
+	struct nfs4_state *state;
+
+	spin_lock(&inode->i_lock);
+	state = __nfs4_find_state(inode, cred, mode);
+	spin_unlock(&inode->i_lock);
+	return state;
+}
+
+static void
+nfs4_free_open_state(struct nfs4_state *state)
+{
+	kfree(state);
+}
+
+struct nfs4_state *
+nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
+{
+	struct nfs4_state *state, *new;
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	spin_lock(&inode->i_lock);
+	state = __nfs4_find_state_byowner(inode, owner);
+	spin_unlock(&inode->i_lock);
+	if (state)
+		goto out;
+	new = nfs4_alloc_open_state();
+	spin_lock(&inode->i_lock);
+	state = __nfs4_find_state_byowner(inode, owner);
+	if (state == NULL && new != NULL) {
+		state = new;
+		/* Caller *must* be holding owner->so_sem */
+		/* Note: The reclaim code dictates that we add stateless
+		 * and read-only stateids to the end of the list */
+		list_add_tail(&state->open_states, &owner->so_states);
+		state->owner = owner;
+		atomic_inc(&owner->so_count);
+		list_add(&state->inode_states, &nfsi->open_states);
+		state->inode = igrab(inode);
+		spin_unlock(&inode->i_lock);
+	} else {
+		spin_unlock(&inode->i_lock);
+		if (new)
+			nfs4_free_open_state(new);
+	}
+out:
+	return state;
+}
+
+/*
+ * Beware! Caller must be holding exactly one
+ * reference to clp->cl_sem and owner->so_sema!
+ */
+void nfs4_put_open_state(struct nfs4_state *state)
+{
+	struct inode *inode = state->inode;
+	struct nfs4_state_owner *owner = state->owner;
+
+	if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
+		return;
+	if (!list_empty(&state->inode_states))
+		list_del(&state->inode_states);
+	spin_unlock(&inode->i_lock);
+	list_del(&state->open_states);
+	iput(inode);
+	BUG_ON (state->state != 0);
+	nfs4_free_open_state(state);
+	nfs4_put_state_owner(owner);
+}
+
+/*
+ * Beware! Caller must be holding no references to clp->cl_sem!
+ * of owner->so_sema!
+ */
+void nfs4_close_state(struct nfs4_state *state, mode_t mode)
+{
+	struct inode *inode = state->inode;
+	struct nfs4_state_owner *owner = state->owner;
+	struct nfs4_client *clp = owner->so_client;
+	int newstate;
+
+	atomic_inc(&owner->so_count);
+	down_read(&clp->cl_sem);
+	down(&owner->so_sema);
+	/* Protect against nfs4_find_state() */
+	spin_lock(&inode->i_lock);
+	if (mode & FMODE_READ)
+		state->nreaders--;
+	if (mode & FMODE_WRITE)
+		state->nwriters--;
+	if (state->nwriters == 0) {
+		if (state->nreaders == 0)
+			list_del_init(&state->inode_states);
+		/* See reclaim code */
+		list_move_tail(&state->open_states, &owner->so_states);
+	}
+	spin_unlock(&inode->i_lock);
+	newstate = 0;
+	if (state->state != 0) {
+		if (state->nreaders)
+			newstate |= FMODE_READ;
+		if (state->nwriters)
+			newstate |= FMODE_WRITE;
+		if (state->state == newstate)
+			goto out;
+		if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
+			return;
+	}
+out:
+	nfs4_put_open_state(state);
+	up(&owner->so_sema);
+	nfs4_put_state_owner(owner);
+	up_read(&clp->cl_sem);
+}
+
+/*
+ * Search the state->lock_states for an existing lock_owner
+ * that is compatible with current->files
+ */
+static struct nfs4_lock_state *
+__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+{
+	struct nfs4_lock_state *pos;
+	list_for_each_entry(pos, &state->lock_states, ls_locks) {
+		if (pos->ls_owner != fl_owner)
+			continue;
+		atomic_inc(&pos->ls_count);
+		return pos;
+	}
+	return NULL;
+}
+
+struct nfs4_lock_state *
+nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+{
+	struct nfs4_lock_state *lsp;
+	read_lock(&state->state_lock);
+	lsp = __nfs4_find_lock_state(state, fl_owner);
+	read_unlock(&state->state_lock);
+	return lsp;
+}
+
+/*
+ * Return a compatible lock_state. If no initialized lock_state structure
+ * exists, return an uninitialized one.
+ *
+ * The caller must be holding state->lock_sema
+ */
+static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+{
+	struct nfs4_lock_state *lsp;
+	struct nfs4_client *clp = state->owner->so_client;
+
+	lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
+	if (lsp == NULL)
+		return NULL;
+	lsp->ls_flags = 0;
+	lsp->ls_seqid = 0;	/* arbitrary */
+	lsp->ls_id = -1; 
+	memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
+	atomic_set(&lsp->ls_count, 1);
+	lsp->ls_owner = fl_owner;
+	INIT_LIST_HEAD(&lsp->ls_locks);
+	spin_lock(&clp->cl_lock);
+	lsp->ls_id = nfs4_alloc_lockowner_id(clp);
+	spin_unlock(&clp->cl_lock);
+	return lsp;
+}
+
+/*
+ * Return a compatible lock_state. If no initialized lock_state structure
+ * exists, return an uninitialized one.
+ *
+ * The caller must be holding state->lock_sema and clp->cl_sem
+ */
+struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
+{
+	struct nfs4_lock_state * lsp;
+	
+	lsp = nfs4_find_lock_state(state, owner);
+	if (lsp == NULL)
+		lsp = nfs4_alloc_lock_state(state, owner);
+	return lsp;
+}
+
+/*
+ * Byte-range lock aware utility to initialize the stateid of read/write
+ * requests.
+ */
+void
+nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
+{
+	if (test_bit(LK_STATE_IN_USE, &state->flags)) {
+		struct nfs4_lock_state *lsp;
+
+		lsp = nfs4_find_lock_state(state, fl_owner);
+		if (lsp) {
+			memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
+			nfs4_put_lock_state(lsp);
+			return;
+		}
+	}
+	memcpy(dst, &state->stateid, sizeof(*dst));
+}
+
+/*
+* Called with state->lock_sema and clp->cl_sem held.
+*/
+void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
+{
+	if (status == NFS_OK || seqid_mutating_err(-status))
+		lsp->ls_seqid++;
+}
+
+/* 
+* Check to see if the request lock (type FL_UNLK) effects the fl lock.
+*
+* fl and request must have the same posix owner
+*
+* return: 
+* 0 -> fl not effected by request
+* 1 -> fl consumed by request
+*/
+
+static int
+nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
+{
+	if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
+		return 1;
+	return 0;
+}
+
+/*
+ * Post an initialized lock_state on the state->lock_states list.
+ */
+void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
+{
+	if (!list_empty(&lsp->ls_locks))
+		return;
+	atomic_inc(&lsp->ls_count);
+	write_lock(&state->state_lock);
+	list_add(&lsp->ls_locks, &state->lock_states);
+	set_bit(LK_STATE_IN_USE, &state->flags);
+	write_unlock(&state->state_lock);
+}
+
+/* 
+ * to decide to 'reap' lock state:
+ * 1) search i_flock for file_locks with fl.lock_state = to ls.
+ * 2) determine if unlock will consume found lock. 
+ * 	if so, reap
+ *
+ * 	else, don't reap.
+ *
+ */
+void
+nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
+{
+	struct inode *inode = state->inode;
+	struct file_lock *fl;
+
+	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+		if (!(fl->fl_flags & FL_POSIX))
+			continue;
+		if (fl->fl_owner != lsp->ls_owner)
+			continue;
+		/* Exit if we find at least one lock which is not consumed */
+		if (nfs4_check_unlock(fl,request) == 0)
+			return;
+	}
+
+	write_lock(&state->state_lock);
+	list_del_init(&lsp->ls_locks);
+	if (list_empty(&state->lock_states))
+		clear_bit(LK_STATE_IN_USE, &state->flags);
+	write_unlock(&state->state_lock);
+	nfs4_put_lock_state(lsp);
+}
+
+/*
+ * Release reference to lock_state, and free it if we see that
+ * it is no longer in use
+ */
+void
+nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+{
+	if (!atomic_dec_and_test(&lsp->ls_count))
+		return;
+	BUG_ON (!list_empty(&lsp->ls_locks));
+	kfree(lsp);
+}
+
+/*
+* Called with sp->so_sema and clp->cl_sem held.
+*
+* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+* failed with a seqid incrementing error -
+* see comments nfs_fs.h:seqid_mutating_error()
+*/
+void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
+{
+	if (status == NFS_OK || seqid_mutating_err(-status))
+		sp->so_seqid++;
+	/* If the server returns BAD_SEQID, unhash state_owner here */
+	if (status == -NFS4ERR_BAD_SEQID)
+		nfs4_drop_state_owner(sp);
+}
+
+static int reclaimer(void *);
+struct reclaimer_args {
+	struct nfs4_client *clp;
+	struct completion complete;
+};
+
+/*
+ * State recovery routine
+ */
+void
+nfs4_recover_state(void *data)
+{
+	struct nfs4_client *clp = (struct nfs4_client *)data;
+	struct reclaimer_args args = {
+		.clp = clp,
+	};
+	might_sleep();
+
+	init_completion(&args.complete);
+
+	if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
+		goto out_failed_clear;
+	wait_for_completion(&args.complete);
+	return;
+out_failed_clear:
+	set_bit(NFS4CLNT_OK, &clp->cl_state);
+	wake_up_all(&clp->cl_waitq);
+	rpc_wake_up(&clp->cl_rpcwaitq);
+}
+
+/*
+ * Schedule a state recovery attempt
+ */
+void
+nfs4_schedule_state_recovery(struct nfs4_client *clp)
+{
+	if (!clp)
+		return;
+	if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
+		schedule_work(&clp->cl_recoverd);
+}
+
+static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
+{
+	struct inode *inode = state->inode;
+	struct file_lock *fl;
+	int status = 0;
+
+	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
+		if (!(fl->fl_flags & FL_POSIX))
+			continue;
+		if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
+			continue;
+		status = ops->recover_lock(state, fl);
+		if (status >= 0)
+			continue;
+		switch (status) {
+			default:
+				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
+						__FUNCTION__, status);
+			case -NFS4ERR_EXPIRED:
+			case -NFS4ERR_NO_GRACE:
+			case -NFS4ERR_RECLAIM_BAD:
+			case -NFS4ERR_RECLAIM_CONFLICT:
+				/* kill_proc(fl->fl_owner, SIGLOST, 1); */
+				break;
+			case -NFS4ERR_STALE_CLIENTID:
+				goto out_err;
+		}
+	}
+	return 0;
+out_err:
+	return status;
+}
+
+static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
+{
+	struct nfs4_state *state;
+	struct nfs4_lock_state *lock;
+	int status = 0;
+
+	/* Note: we rely on the sp->so_states list being ordered 
+	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
+	 * states first.
+	 * This is needed to ensure that the server won't give us any
+	 * read delegations that we have to return if, say, we are
+	 * recovering after a network partition or a reboot from a
+	 * server that doesn't support a grace period.
+	 */
+	list_for_each_entry(state, &sp->so_states, open_states) {
+		if (state->state == 0)
+			continue;
+		status = ops->recover_open(sp, state);
+		list_for_each_entry(lock, &state->lock_states, ls_locks)
+			lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+		if (status >= 0) {
+			status = nfs4_reclaim_locks(ops, state);
+			if (status < 0)
+				goto out_err;
+			list_for_each_entry(lock, &state->lock_states, ls_locks) {
+				if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+					printk("%s: Lock reclaim failed!\n",
+							__FUNCTION__);
+			}
+			continue;
+		}
+		switch (status) {
+			default:
+				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
+						__FUNCTION__, status);
+			case -ENOENT:
+			case -NFS4ERR_RECLAIM_BAD:
+			case -NFS4ERR_RECLAIM_CONFLICT:
+				/*
+				 * Open state on this file cannot be recovered
+				 * All we can do is revert to using the zero stateid.
+				 */
+				memset(state->stateid.data, 0,
+					sizeof(state->stateid.data));
+				/* Mark the file as being 'closed' */
+				state->state = 0;
+				break;
+			case -NFS4ERR_EXPIRED:
+			case -NFS4ERR_NO_GRACE:
+			case -NFS4ERR_STALE_CLIENTID:
+				goto out_err;
+		}
+	}
+	return 0;
+out_err:
+	return status;
+}
+
+static int reclaimer(void *ptr)
+{
+	struct reclaimer_args *args = (struct reclaimer_args *)ptr;
+	struct nfs4_client *clp = args->clp;
+	struct nfs4_state_owner *sp;
+	struct nfs4_state_recovery_ops *ops;
+	int status = 0;
+
+	daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
+	allow_signal(SIGKILL);
+
+	atomic_inc(&clp->cl_count);
+	complete(&args->complete);
+
+	/* Ensure exclusive access to NFSv4 state */
+	lock_kernel();
+	down_write(&clp->cl_sem);
+	/* Are there any NFS mounts out there? */
+	if (list_empty(&clp->cl_superblocks))
+		goto out;
+restart_loop:
+	status = nfs4_proc_renew(clp);
+	switch (status) {
+		case 0:
+		case -NFS4ERR_CB_PATH_DOWN:
+			goto out;
+		case -NFS4ERR_STALE_CLIENTID:
+		case -NFS4ERR_LEASE_MOVED:
+			ops = &nfs4_reboot_recovery_ops;
+			break;
+		default:
+			ops = &nfs4_network_partition_recovery_ops;
+	};
+	status = __nfs4_init_client(clp);
+	if (status)
+		goto out_error;
+	/* Mark all delegations for reclaim */
+	nfs_delegation_mark_reclaim(clp);
+	/* Note: list is protected by exclusive lock on cl->cl_sem */
+	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+		status = nfs4_reclaim_open_state(ops, sp);
+		if (status < 0) {
+			if (status == -NFS4ERR_NO_GRACE) {
+				ops = &nfs4_network_partition_recovery_ops;
+				status = nfs4_reclaim_open_state(ops, sp);
+			}
+			if (status == -NFS4ERR_STALE_CLIENTID)
+				goto restart_loop;
+			if (status == -NFS4ERR_EXPIRED)
+				goto restart_loop;
+		}
+	}
+	nfs_delegation_reap_unclaimed(clp);
+out:
+	set_bit(NFS4CLNT_OK, &clp->cl_state);
+	up_write(&clp->cl_sem);
+	unlock_kernel();
+	wake_up_all(&clp->cl_waitq);
+	rpc_wake_up(&clp->cl_rpcwaitq);
+	if (status == -NFS4ERR_CB_PATH_DOWN)
+		nfs_handle_cb_pathdown(clp);
+	nfs4_put_client(clp);
+	return 0;
+out_error:
+	printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
+				NIPQUAD(clp->cl_addr.s_addr), -status);
+	goto out;
+}
+
+/*
+ * Local variables:
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
new file mode 100644
index 0000000..5f4de05
--- /dev/null
+++ b/fs/nfs/nfs4xdr.c
@@ -0,0 +1,4034 @@
+/*
+ *  fs/nfs/nfs4xdr.c
+ *
+ *  Client-side XDR for NFSv4.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *  Andy Adamson   <andros@umich.edu>
+ * 
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/kdev_t.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_idmap.h>
+
+#define NFSDBG_FACILITY		NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO		EIO
+
+static int nfs_stat_to_errno(int);
+
+/* NFSv4 COMPOUND tags are only wanted for debugging purposes */
+#ifdef DEBUG
+#define NFS4_MAXTAGLEN		20
+#else
+#define NFS4_MAXTAGLEN		0
+#endif
+
+/* lock,open owner id: 
+ * we currently use size 1 (u32) out of (NFS4_OPAQUE_LIMIT  >> 2)
+ */
+#define owner_id_maxsz          (1 + 1)
+#define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
+#define compound_decode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
+#define op_encode_hdr_maxsz	(1)
+#define op_decode_hdr_maxsz	(2)
+#define encode_putfh_maxsz	(op_encode_hdr_maxsz + 1 + \
+				(NFS4_FHSIZE >> 2))
+#define decode_putfh_maxsz	(op_decode_hdr_maxsz)
+#define encode_putrootfh_maxsz	(op_encode_hdr_maxsz)
+#define decode_putrootfh_maxsz	(op_decode_hdr_maxsz)
+#define encode_getfh_maxsz      (op_encode_hdr_maxsz)
+#define decode_getfh_maxsz      (op_decode_hdr_maxsz + 1 + \
+				((3+NFS4_FHSIZE) >> 2))
+#define encode_getattr_maxsz    (op_encode_hdr_maxsz + 3)
+#define nfs4_name_maxsz		(1 + ((3 + NFS4_MAXNAMLEN) >> 2))
+#define nfs4_path_maxsz		(1 + ((3 + NFS4_MAXPATHLEN) >> 2))
+#define nfs4_fattr_bitmap_maxsz (36 + 2 * nfs4_name_maxsz)
+#define decode_getattr_maxsz    (op_decode_hdr_maxsz + 3 + \
+                                nfs4_fattr_bitmap_maxsz)
+#define encode_savefh_maxsz     (op_encode_hdr_maxsz)
+#define decode_savefh_maxsz     (op_decode_hdr_maxsz)
+#define encode_fsinfo_maxsz	(op_encode_hdr_maxsz + 2)
+#define decode_fsinfo_maxsz	(op_decode_hdr_maxsz + 11)
+#define encode_renew_maxsz	(op_encode_hdr_maxsz + 3)
+#define decode_renew_maxsz	(op_decode_hdr_maxsz)
+#define encode_setclientid_maxsz \
+				(op_encode_hdr_maxsz + \
+				4 /*server->ip_addr*/ + \
+				1 /*Netid*/ + \
+				6 /*uaddr*/ + \
+				6 + (NFS4_VERIFIER_SIZE >> 2))
+#define decode_setclientid_maxsz \
+				(op_decode_hdr_maxsz + \
+				2 + \
+				1024) /* large value for CLID_INUSE */
+#define encode_setclientid_confirm_maxsz \
+				(op_encode_hdr_maxsz + \
+				3 + (NFS4_VERIFIER_SIZE >> 2))
+#define decode_setclientid_confirm_maxsz \
+				(op_decode_hdr_maxsz)
+#define encode_lookup_maxsz	(op_encode_hdr_maxsz + \
+				1 + ((3 + NFS4_FHSIZE) >> 2))
+#define encode_remove_maxsz	(op_encode_hdr_maxsz + \
+				nfs4_name_maxsz)
+#define encode_rename_maxsz	(op_encode_hdr_maxsz + \
+				2 * nfs4_name_maxsz)
+#define decode_rename_maxsz	(op_decode_hdr_maxsz + 5 + 5)
+#define encode_link_maxsz	(op_encode_hdr_maxsz + \
+				nfs4_name_maxsz)
+#define decode_link_maxsz	(op_decode_hdr_maxsz + 5)
+#define encode_symlink_maxsz	(op_encode_hdr_maxsz + \
+				1 + nfs4_name_maxsz + \
+				nfs4_path_maxsz + \
+				nfs4_fattr_bitmap_maxsz)
+#define decode_symlink_maxsz	(op_decode_hdr_maxsz + 8)
+#define encode_create_maxsz	(op_encode_hdr_maxsz + \
+				2 + nfs4_name_maxsz + \
+				nfs4_fattr_bitmap_maxsz)
+#define decode_create_maxsz	(op_decode_hdr_maxsz + 8)
+#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
+#define NFS4_enc_compound_sz	(1024)  /* XXX: large enough? */
+#define NFS4_dec_compound_sz	(1024)  /* XXX: large enough? */
+#define NFS4_enc_read_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz + 7)
+#define NFS4_dec_read_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 2)
+#define NFS4_enc_readlink_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz)
+#define NFS4_dec_readlink_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz)
+#define NFS4_enc_readdir_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz + 9)
+#define NFS4_dec_readdir_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 2)
+#define NFS4_enc_write_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz + 8)
+#define NFS4_dec_write_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 4)
+#define NFS4_enc_commit_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz + 3)
+#define NFS4_dec_commit_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 2)
+#define NFS4_enc_open_sz        (compound_encode_hdr_maxsz + \
+                                encode_putfh_maxsz + \
+                                op_encode_hdr_maxsz + \
+                                13 + 3 + 2 + 64 + \
+                                encode_getattr_maxsz + \
+                                encode_getfh_maxsz)
+#define NFS4_dec_open_sz        (compound_decode_hdr_maxsz + \
+                                decode_putfh_maxsz + \
+                                op_decode_hdr_maxsz + 4 + 5 + 2 + 3 + \
+                                decode_getattr_maxsz + \
+                                decode_getfh_maxsz)
+#define NFS4_enc_open_confirm_sz      \
+                                (compound_encode_hdr_maxsz + \
+                                encode_putfh_maxsz + \
+                                op_encode_hdr_maxsz + 5)
+#define NFS4_dec_open_confirm_sz        (compound_decode_hdr_maxsz + \
+                                        decode_putfh_maxsz + \
+                                        op_decode_hdr_maxsz + 4)
+#define NFS4_enc_open_noattr_sz	(compound_encode_hdr_maxsz + \
+					encode_putfh_maxsz + \
+					op_encode_hdr_maxsz + \
+					11)
+#define NFS4_dec_open_noattr_sz	(compound_decode_hdr_maxsz + \
+					decode_putfh_maxsz + \
+					op_decode_hdr_maxsz + \
+					4 + 5 + 2 + 3)
+#define NFS4_enc_open_downgrade_sz \
+				(compound_encode_hdr_maxsz + \
+                                encode_putfh_maxsz + \
+                                op_encode_hdr_maxsz + 7)
+#define NFS4_dec_open_downgrade_sz \
+				(compound_decode_hdr_maxsz + \
+                                decode_putfh_maxsz + \
+                                op_decode_hdr_maxsz + 4)
+#define NFS4_enc_close_sz       (compound_encode_hdr_maxsz + \
+                                encode_putfh_maxsz + \
+                                op_encode_hdr_maxsz + 5)
+#define NFS4_dec_close_sz       (compound_decode_hdr_maxsz + \
+                                decode_putfh_maxsz + \
+                                op_decode_hdr_maxsz + 4)
+#define NFS4_enc_setattr_sz     (compound_encode_hdr_maxsz + \
+                                encode_putfh_maxsz + \
+                                op_encode_hdr_maxsz + 4 + \
+                                nfs4_fattr_bitmap_maxsz + \
+                                encode_getattr_maxsz)
+#define NFS4_dec_setattr_sz     (compound_decode_hdr_maxsz + \
+                                decode_putfh_maxsz + \
+                                op_decode_hdr_maxsz + 3)
+#define NFS4_enc_fsinfo_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_fsinfo_maxsz)
+#define NFS4_dec_fsinfo_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_fsinfo_maxsz)
+#define NFS4_enc_renew_sz	(compound_encode_hdr_maxsz + \
+				encode_renew_maxsz)
+#define NFS4_dec_renew_sz	(compound_decode_hdr_maxsz + \
+				decode_renew_maxsz)
+#define NFS4_enc_setclientid_sz	(compound_encode_hdr_maxsz + \
+				encode_setclientid_maxsz)
+#define NFS4_dec_setclientid_sz	(compound_decode_hdr_maxsz + \
+				decode_setclientid_maxsz)
+#define NFS4_enc_setclientid_confirm_sz \
+				(compound_encode_hdr_maxsz + \
+				encode_setclientid_confirm_maxsz + \
+				encode_putrootfh_maxsz + \
+				encode_fsinfo_maxsz)
+#define NFS4_dec_setclientid_confirm_sz \
+				(compound_decode_hdr_maxsz + \
+				decode_setclientid_confirm_maxsz + \
+				decode_putrootfh_maxsz + \
+				decode_fsinfo_maxsz)
+#define NFS4_enc_lock_sz        (compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz + \
+				op_encode_hdr_maxsz + \
+				1 + 1 + 2 + 2 + \
+				1 + 4 + 1 + 2 + \
+				owner_id_maxsz)
+#define NFS4_dec_lock_sz        (compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_getattr_maxsz + \
+				op_decode_hdr_maxsz + \
+				2 + 2 + 1 + 2 + \
+				owner_id_maxsz)
+#define NFS4_enc_lockt_sz       (compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz + \
+				op_encode_hdr_maxsz + \
+				1 + 2 + 2 + 2 + \
+				owner_id_maxsz)
+#define NFS4_dec_lockt_sz       (NFS4_dec_lock_sz)
+#define NFS4_enc_locku_sz       (compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz + \
+				op_encode_hdr_maxsz + \
+				1 + 1 + 4 + 2 + 2)
+#define NFS4_dec_locku_sz       (compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_getattr_maxsz + \
+				op_decode_hdr_maxsz + 4)
+#define NFS4_enc_access_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				op_encode_hdr_maxsz + 1)
+#define NFS4_dec_access_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 2)
+#define NFS4_enc_getattr_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz)
+#define NFS4_dec_getattr_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_getattr_maxsz)
+#define NFS4_enc_lookup_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_lookup_maxsz + \
+				encode_getattr_maxsz + \
+				encode_getfh_maxsz)
+#define NFS4_dec_lookup_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + \
+				decode_getattr_maxsz + \
+				decode_getfh_maxsz)
+#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
+				encode_putrootfh_maxsz + \
+				encode_getattr_maxsz + \
+				encode_getfh_maxsz)
+#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
+				decode_putrootfh_maxsz + \
+				decode_getattr_maxsz + \
+				decode_getfh_maxsz)
+#define NFS4_enc_remove_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_remove_maxsz)
+#define NFS4_dec_remove_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 5)
+#define NFS4_enc_rename_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_savefh_maxsz + \
+				encode_putfh_maxsz + \
+				encode_rename_maxsz)
+#define NFS4_dec_rename_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_savefh_maxsz + \
+				decode_putfh_maxsz + \
+				decode_rename_maxsz)
+#define NFS4_enc_link_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_savefh_maxsz + \
+				encode_putfh_maxsz + \
+				encode_link_maxsz)
+#define NFS4_dec_link_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_savefh_maxsz + \
+				decode_putfh_maxsz + \
+				decode_link_maxsz)
+#define NFS4_enc_symlink_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_symlink_maxsz + \
+				encode_getattr_maxsz + \
+				encode_getfh_maxsz)
+#define NFS4_dec_symlink_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_symlink_maxsz + \
+				decode_getattr_maxsz + \
+				decode_getfh_maxsz)
+#define NFS4_enc_create_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_create_maxsz + \
+				encode_getattr_maxsz + \
+				encode_getfh_maxsz)
+#define NFS4_dec_create_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_create_maxsz + \
+				decode_getattr_maxsz + \
+				decode_getfh_maxsz)
+#define NFS4_enc_pathconf_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz)
+#define NFS4_dec_pathconf_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				decode_getattr_maxsz)
+#define NFS4_enc_statfs_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_getattr_maxsz)
+#define NFS4_dec_statfs_sz	(compound_decode_hdr_maxsz + \
+				decode_putfh_maxsz + \
+				op_decode_hdr_maxsz + 12)
+#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+				encode_getattr_maxsz)
+#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+				decode_getattr_maxsz)
+#define NFS4_enc_delegreturn_sz	(compound_encode_hdr_maxsz + \
+				encode_putfh_maxsz + \
+				encode_delegreturn_maxsz)
+#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
+				decode_delegreturn_maxsz)
+
+static struct {
+	unsigned int	mode;
+	unsigned int	nfs2type;
+} nfs_type2fmt[] = {
+	{ 0,		NFNON	     },
+	{ S_IFREG,	NFREG	     },
+	{ S_IFDIR,	NFDIR	     },
+	{ S_IFBLK,	NFBLK	     },
+	{ S_IFCHR,	NFCHR	     },
+	{ S_IFLNK,	NFLNK	     },
+	{ S_IFSOCK,	NFSOCK	     },
+	{ S_IFIFO,	NFFIFO	     },
+	{ 0,		NFNON	     },
+	{ 0,		NFNON	     },
+};
+
+struct compound_hdr {
+	int32_t		status;
+	uint32_t	nops;
+	uint32_t	taglen;
+	char *		tag;
+};
+
+/*
+ * START OF "GENERIC" ENCODE ROUTINES.
+ *   These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define WRITE32(n)               *p++ = htonl(n)
+#define WRITE64(n)               do {				\
+	*p++ = htonl((uint32_t)((n) >> 32));				\
+	*p++ = htonl((uint32_t)(n));					\
+} while (0)
+#define WRITEMEM(ptr,nbytes)     do {				\
+	p = xdr_encode_opaque_fixed(p, ptr, nbytes);		\
+} while (0)
+
+#define RESERVE_SPACE(nbytes)	do {				\
+	p = xdr_reserve_space(xdr, nbytes);			\
+	if (!p) printk("RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __FUNCTION__); \
+	BUG_ON(!p);						\
+} while (0)
+
+static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
+{
+	uint32_t *p;
+
+	p = xdr_reserve_space(xdr, 4 + len);
+	BUG_ON(p == NULL);
+	xdr_encode_opaque(p, str, len);
+}
+
+static int encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+	uint32_t *p;
+
+	dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
+	BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
+	RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
+	WRITE32(hdr->taglen);
+	WRITEMEM(hdr->tag, hdr->taglen);
+	WRITE32(NFS4_MINOR_VERSION);
+	WRITE32(hdr->nops);
+	return 0;
+}
+
+static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf)
+{
+	uint32_t *p;
+
+	p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
+	BUG_ON(p == NULL);
+	xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE);
+}
+
+static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server)
+{
+	char owner_name[IDMAP_NAMESZ];
+	char owner_group[IDMAP_NAMESZ];
+	int owner_namelen = 0;
+	int owner_grouplen = 0;
+	uint32_t *p;
+	uint32_t *q;
+	int len;
+	uint32_t bmval0 = 0;
+	uint32_t bmval1 = 0;
+	int status;
+
+	/*
+	 * We reserve enough space to write the entire attribute buffer at once.
+	 * In the worst-case, this would be
+	 *   12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
+	 *          = 36 bytes, plus any contribution from variable-length fields
+	 *            such as owner/group/acl's.
+	 */
+	len = 16;
+
+	/* Sigh */
+	if (iap->ia_valid & ATTR_SIZE)
+		len += 8;
+	if (iap->ia_valid & ATTR_MODE)
+		len += 4;
+	if (iap->ia_valid & ATTR_UID) {
+		owner_namelen = nfs_map_uid_to_name(server->nfs4_state, iap->ia_uid, owner_name);
+		if (owner_namelen < 0) {
+			printk(KERN_WARNING "nfs: couldn't resolve uid %d to string\n",
+			       iap->ia_uid);
+			/* XXX */
+			strcpy(owner_name, "nobody");
+			owner_namelen = sizeof("nobody") - 1;
+			/* goto out; */
+		}
+		len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
+	}
+	if (iap->ia_valid & ATTR_GID) {
+		owner_grouplen = nfs_map_gid_to_group(server->nfs4_state, iap->ia_gid, owner_group);
+		if (owner_grouplen < 0) {
+			printk(KERN_WARNING "nfs4: couldn't resolve gid %d to string\n",
+			       iap->ia_gid);
+			strcpy(owner_group, "nobody");
+			owner_grouplen = sizeof("nobody") - 1;
+			/* goto out; */
+		}
+		len += 4 + (XDR_QUADLEN(owner_grouplen) << 2);
+	}
+	if (iap->ia_valid & ATTR_ATIME_SET)
+		len += 16;
+	else if (iap->ia_valid & ATTR_ATIME)
+		len += 4;
+	if (iap->ia_valid & ATTR_MTIME_SET)
+		len += 16;
+	else if (iap->ia_valid & ATTR_MTIME)
+		len += 4;
+	RESERVE_SPACE(len);
+
+	/*
+	 * We write the bitmap length now, but leave the bitmap and the attribute
+	 * buffer length to be backfilled at the end of this routine.
+	 */
+	WRITE32(2);
+	q = p;
+	p += 3;
+
+	if (iap->ia_valid & ATTR_SIZE) {
+		bmval0 |= FATTR4_WORD0_SIZE;
+		WRITE64(iap->ia_size);
+	}
+	if (iap->ia_valid & ATTR_MODE) {
+		bmval1 |= FATTR4_WORD1_MODE;
+		WRITE32(iap->ia_mode);
+	}
+	if (iap->ia_valid & ATTR_UID) {
+		bmval1 |= FATTR4_WORD1_OWNER;
+		WRITE32(owner_namelen);
+		WRITEMEM(owner_name, owner_namelen);
+	}
+	if (iap->ia_valid & ATTR_GID) {
+		bmval1 |= FATTR4_WORD1_OWNER_GROUP;
+		WRITE32(owner_grouplen);
+		WRITEMEM(owner_group, owner_grouplen);
+	}
+	if (iap->ia_valid & ATTR_ATIME_SET) {
+		bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+		WRITE32(NFS4_SET_TO_CLIENT_TIME);
+		WRITE32(0);
+		WRITE32(iap->ia_mtime.tv_sec);
+		WRITE32(iap->ia_mtime.tv_nsec);
+	}
+	else if (iap->ia_valid & ATTR_ATIME) {
+		bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+		WRITE32(NFS4_SET_TO_SERVER_TIME);
+	}
+	if (iap->ia_valid & ATTR_MTIME_SET) {
+		bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+		WRITE32(NFS4_SET_TO_CLIENT_TIME);
+		WRITE32(0);
+		WRITE32(iap->ia_mtime.tv_sec);
+		WRITE32(iap->ia_mtime.tv_nsec);
+	}
+	else if (iap->ia_valid & ATTR_MTIME) {
+		bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+		WRITE32(NFS4_SET_TO_SERVER_TIME);
+	}
+	
+	/*
+	 * Now we backfill the bitmap and the attribute buffer length.
+	 */
+	if (len != ((char *)p - (char *)q) + 4) {
+		printk ("encode_attr: Attr length calculation error! %u != %Zu\n",
+				len, ((char *)p - (char *)q) + 4);
+		BUG();
+	}
+	len = (char *)p - (char *)q - 12;
+	*q++ = htonl(bmval0);
+	*q++ = htonl(bmval1);
+	*q++ = htonl(len);
+
+	status = 0;
+/* out: */
+	return status;
+}
+
+static int encode_access(struct xdr_stream *xdr, u32 access)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8);
+	WRITE32(OP_ACCESS);
+	WRITE32(access);
+	
+	return 0;
+}
+
+static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8+sizeof(arg->stateid.data));
+	WRITE32(OP_CLOSE);
+	WRITE32(arg->seqid);
+	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+	
+	return 0;
+}
+
+static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args)
+{
+	uint32_t *p;
+        
+        RESERVE_SPACE(16);
+        WRITE32(OP_COMMIT);
+        WRITE64(args->offset);
+        WRITE32(args->count);
+
+        return 0;
+}
+
+static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create)
+{
+	uint32_t *p;
+	
+	RESERVE_SPACE(8);
+	WRITE32(OP_CREATE);
+	WRITE32(create->ftype);
+
+	switch (create->ftype) {
+	case NF4LNK:
+		RESERVE_SPACE(4 + create->u.symlink->len);
+		WRITE32(create->u.symlink->len);
+		WRITEMEM(create->u.symlink->name, create->u.symlink->len);
+		break;
+
+	case NF4BLK: case NF4CHR:
+		RESERVE_SPACE(8);
+		WRITE32(create->u.device.specdata1);
+		WRITE32(create->u.device.specdata2);
+		break;
+
+	default:
+		break;
+	}
+
+	RESERVE_SPACE(4 + create->name->len);
+	WRITE32(create->name->len);
+	WRITEMEM(create->name->name, create->name->len);
+
+	return encode_attrs(xdr, create->attrs, create->server);
+}
+
+static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap)
+{
+        uint32_t *p;
+
+        RESERVE_SPACE(12);
+        WRITE32(OP_GETATTR);
+        WRITE32(1);
+        WRITE32(bitmap);
+        return 0;
+}
+
+static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1)
+{
+        uint32_t *p;
+
+        RESERVE_SPACE(16);
+        WRITE32(OP_GETATTR);
+        WRITE32(2);
+        WRITE32(bm0);
+        WRITE32(bm1);
+        return 0;
+}
+
+static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask)
+{
+	extern u32 nfs4_fattr_bitmap[];
+
+	return encode_getattr_two(xdr,
+			bitmask[0] & nfs4_fattr_bitmap[0],
+			bitmask[1] & nfs4_fattr_bitmap[1]);
+}
+
+static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask)
+{
+	extern u32 nfs4_fsinfo_bitmap[];
+
+	return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0],
+			bitmask[1] & nfs4_fsinfo_bitmap[1]);
+}
+
+static int encode_getfh(struct xdr_stream *xdr)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(OP_GETFH);
+
+	return 0;
+}
+
+static int encode_link(struct xdr_stream *xdr, const struct qstr *name)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8 + name->len);
+	WRITE32(OP_LINK);
+	WRITE32(name->len);
+	WRITEMEM(name->name, name->len);
+	
+	return 0;
+}
+
+/*
+ * opcode,type,reclaim,offset,length,new_lock_owner = 32
+ * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
+ */
+static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+{
+	uint32_t *p;
+	struct nfs_lock_opargs *opargs = arg->u.lock;
+
+	RESERVE_SPACE(32);
+	WRITE32(OP_LOCK);
+	WRITE32(arg->type); 
+	WRITE32(opargs->reclaim);
+	WRITE64(arg->offset);
+	WRITE64(arg->length);
+	WRITE32(opargs->new_lock_owner);
+	if (opargs->new_lock_owner){
+		struct nfs_open_to_lock *ol = opargs->u.open_lock;
+
+		RESERVE_SPACE(40);
+		WRITE32(ol->open_seqid);
+		WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid));
+		WRITE32(ol->lock_seqid);
+		WRITE64(ol->lock_owner.clientid);
+		WRITE32(4);
+		WRITE32(ol->lock_owner.id);
+	}
+	else {
+		struct nfs_exist_lock *el = opargs->u.exist_lock;
+
+		RESERVE_SPACE(20);
+		WRITEMEM(&el->stateid, sizeof(el->stateid));
+		WRITE32(el->seqid);
+	}
+
+	return 0;
+}
+
+static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+{
+	uint32_t *p;
+	struct nfs_lowner *opargs = arg->u.lockt;
+
+	RESERVE_SPACE(40);
+	WRITE32(OP_LOCKT);
+	WRITE32(arg->type);
+	WRITE64(arg->offset);
+	WRITE64(arg->length);
+	WRITE64(opargs->clientid);
+	WRITE32(4);
+	WRITE32(opargs->id);
+
+	return 0;
+}
+
+static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+{
+	uint32_t *p;
+	struct nfs_locku_opargs *opargs = arg->u.locku;
+
+	RESERVE_SPACE(44);
+	WRITE32(OP_LOCKU);
+	WRITE32(arg->type);
+	WRITE32(opargs->seqid);
+	WRITEMEM(&opargs->stateid, sizeof(opargs->stateid));
+	WRITE64(arg->offset);
+	WRITE64(arg->length);
+
+	return 0;
+}
+
+static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name)
+{
+	int len = name->len;
+	uint32_t *p;
+
+	RESERVE_SPACE(8 + len);
+	WRITE32(OP_LOOKUP);
+	WRITE32(len);
+	WRITEMEM(name->name, len);
+
+	return 0;
+}
+
+static void encode_share_access(struct xdr_stream *xdr, int open_flags)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8);
+	switch (open_flags & (FMODE_READ|FMODE_WRITE)) {
+		case FMODE_READ:
+			WRITE32(NFS4_SHARE_ACCESS_READ);
+			break;
+		case FMODE_WRITE:
+			WRITE32(NFS4_SHARE_ACCESS_WRITE);
+			break;
+		case FMODE_READ|FMODE_WRITE:
+			WRITE32(NFS4_SHARE_ACCESS_BOTH);
+			break;
+		default:
+			BUG();
+	}
+	WRITE32(0);		/* for linux, share_deny = 0 always */
+}
+
+static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+	uint32_t *p;
+ /*
+ * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
+ * owner 4 = 32
+ */
+	RESERVE_SPACE(8);
+	WRITE32(OP_OPEN);
+	WRITE32(arg->seqid);
+	encode_share_access(xdr, arg->open_flags);
+	RESERVE_SPACE(16);
+	WRITE64(arg->clientid);
+	WRITE32(4);
+	WRITE32(arg->id);
+}
+
+static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	switch(arg->open_flags & O_EXCL) {
+		case 0:
+			WRITE32(NFS4_CREATE_UNCHECKED);
+			encode_attrs(xdr, arg->u.attrs, arg->server);
+			break;
+		default:
+			WRITE32(NFS4_CREATE_EXCLUSIVE);
+			encode_nfs4_verifier(xdr, &arg->u.verifier);
+	}
+}
+
+static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	switch (arg->open_flags & O_CREAT) {
+		case 0:
+			WRITE32(NFS4_OPEN_NOCREATE);
+			break;
+		default:
+			BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
+			WRITE32(NFS4_OPEN_CREATE);
+			encode_createmode(xdr, arg);
+	}
+}
+
+static inline void encode_delegation_type(struct xdr_stream *xdr, int delegation_type)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	switch (delegation_type) {
+		case 0:
+			WRITE32(NFS4_OPEN_DELEGATE_NONE);
+			break;
+		case FMODE_READ:
+			WRITE32(NFS4_OPEN_DELEGATE_READ);
+			break;
+		case FMODE_WRITE|FMODE_READ:
+			WRITE32(NFS4_OPEN_DELEGATE_WRITE);
+			break;
+		default:
+			BUG();
+	}
+}
+
+static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(NFS4_OPEN_CLAIM_NULL);
+	encode_string(xdr, name->len, name->name);
+}
+
+static inline void encode_claim_previous(struct xdr_stream *xdr, int type)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(NFS4_OPEN_CLAIM_PREVIOUS);
+	encode_delegation_type(xdr, type);
+}
+
+static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4+sizeof(stateid->data));
+	WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
+	WRITEMEM(stateid->data, sizeof(stateid->data));
+	encode_string(xdr, name->len, name->name);
+}
+
+static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+	encode_openhdr(xdr, arg);
+	encode_opentype(xdr, arg);
+	switch (arg->claim) {
+		case NFS4_OPEN_CLAIM_NULL:
+			encode_claim_null(xdr, arg->name);
+			break;
+		case NFS4_OPEN_CLAIM_PREVIOUS:
+			encode_claim_previous(xdr, arg->u.delegation_type);
+			break;
+		case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+			encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation);
+			break;
+		default:
+			BUG();
+	}
+	return 0;
+}
+
+static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8+sizeof(arg->stateid.data));
+	WRITE32(OP_OPEN_CONFIRM);
+	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+	WRITE32(arg->seqid);
+
+	return 0;
+}
+
+static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8+sizeof(arg->stateid.data));
+	WRITE32(OP_OPEN_DOWNGRADE);
+	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+	WRITE32(arg->seqid);
+	encode_share_access(xdr, arg->open_flags);
+	return 0;
+}
+
+static int
+encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	int len = fh->size;
+	uint32_t *p;
+
+	RESERVE_SPACE(8 + len);
+	WRITE32(OP_PUTFH);
+	WRITE32(len);
+	WRITEMEM(fh->data, len);
+
+	return 0;
+}
+
+static int encode_putrootfh(struct xdr_stream *xdr)
+{
+        uint32_t *p;
+        
+        RESERVE_SPACE(4);
+        WRITE32(OP_PUTROOTFH);
+
+        return 0;
+}
+
+static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
+{
+	extern nfs4_stateid zero_stateid;
+	nfs4_stateid stateid;
+	uint32_t *p;
+
+	RESERVE_SPACE(16);
+	if (ctx->state != NULL) {
+		nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
+		WRITEMEM(stateid.data, sizeof(stateid.data));
+	} else
+		WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
+}
+
+static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(OP_READ);
+
+	encode_stateid(xdr, args->context);
+
+	RESERVE_SPACE(12);
+	WRITE64(args->offset);
+	WRITE32(args->count);
+
+	return 0;
+}
+
+static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req)
+{
+	struct rpc_auth *auth = req->rq_task->tk_auth;
+	int replen;
+	uint32_t *p;
+
+	RESERVE_SPACE(32+sizeof(nfs4_verifier));
+	WRITE32(OP_READDIR);
+	WRITE64(readdir->cookie);
+	WRITEMEM(readdir->verifier.data, sizeof(readdir->verifier.data));
+	WRITE32(readdir->count >> 1);  /* We're not doing readdirplus */
+	WRITE32(readdir->count);
+	WRITE32(2);
+	if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+		WRITE32(0);
+		WRITE32(FATTR4_WORD1_MOUNTED_ON_FILEID);
+	} else {
+		WRITE32(FATTR4_WORD0_FILEID);
+		WRITE32(0);
+	}
+
+	/* set up reply kvec
+	 *    toplevel_status + taglen + rescount + OP_PUTFH + status
+	 *      + OP_READDIR + status + verifer(2)  = 9
+	 */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + 9) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->pages,
+			 readdir->pgbase, readdir->count);
+
+	return 0;
+}
+
+static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req)
+{
+	struct rpc_auth *auth = req->rq_task->tk_auth;
+	unsigned int replen;
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(OP_READLINK);
+
+	/* set up reply kvec
+	 *    toplevel_status + taglen + rescount + OP_PUTFH + status
+	 *      + OP_READLINK + status + string length = 8
+	 */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + 8) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen, readlink->pages,
+			readlink->pgbase, readlink->pglen);
+	
+	return 0;
+}
+
+static int encode_remove(struct xdr_stream *xdr, const struct qstr *name)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8 + name->len);
+	WRITE32(OP_REMOVE);
+	WRITE32(name->len);
+	WRITEMEM(name->name, name->len);
+
+	return 0;
+}
+
+static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(8 + oldname->len);
+	WRITE32(OP_RENAME);
+	WRITE32(oldname->len);
+	WRITEMEM(oldname->name, oldname->len);
+	
+	RESERVE_SPACE(4 + newname->len);
+	WRITE32(newname->len);
+	WRITEMEM(newname->name, newname->len);
+
+	return 0;
+}
+
+static int encode_renew(struct xdr_stream *xdr, const struct nfs4_client *client_stateid)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(12);
+	WRITE32(OP_RENEW);
+	WRITE64(client_stateid->cl_clientid);
+
+	return 0;
+}
+
+static int
+encode_savefh(struct xdr_stream *xdr)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(OP_SAVEFH);
+
+	return 0;
+}
+
+static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server)
+{
+	int status;
+	uint32_t *p;
+	
+        RESERVE_SPACE(4+sizeof(arg->stateid.data));
+        WRITE32(OP_SETATTR);
+	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+
+        if ((status = encode_attrs(xdr, arg->iap, server)))
+		return status;
+
+        return 0;
+}
+
+static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data));
+	WRITE32(OP_SETCLIENTID);
+	WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data));
+
+	encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
+	RESERVE_SPACE(4);
+	WRITE32(setclientid->sc_prog);
+	encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
+	encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
+	RESERVE_SPACE(4);
+	WRITE32(setclientid->sc_cb_ident);
+
+	return 0;
+}
+
+static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_client *client_state)
+{
+        uint32_t *p;
+
+        RESERVE_SPACE(12 + sizeof(client_state->cl_confirm.data));
+        WRITE32(OP_SETCLIENTID_CONFIRM);
+        WRITE64(client_state->cl_clientid);
+        WRITEMEM(client_state->cl_confirm.data, sizeof(client_state->cl_confirm.data));
+
+        return 0;
+}
+
+static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(4);
+	WRITE32(OP_WRITE);
+
+	encode_stateid(xdr, args->context);
+
+	RESERVE_SPACE(16);
+	WRITE64(args->offset);
+	WRITE32(args->stable);
+	WRITE32(args->count);
+
+	xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+
+	return 0;
+}
+
+static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid)
+{
+	uint32_t *p;
+
+	RESERVE_SPACE(20);
+
+	WRITE32(OP_DELEGRETURN);
+	WRITEMEM(stateid->data, sizeof(stateid->data));
+	return 0;
+
+}
+/*
+ * END OF "GENERIC" ENCODE ROUTINES.
+ */
+
+/*
+ * Encode an ACCESS request
+ */
+static int nfs4_xdr_enc_access(struct rpc_rqst *req, uint32_t *p, const struct nfs4_accessargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->fh)) == 0)
+		status = encode_access(&xdr, args->access);
+	return status;
+}
+
+/*
+ * Encode LOOKUP request
+ */
+static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, uint32_t *p, const struct nfs4_lookup_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 4,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
+		goto out;
+	if ((status = encode_lookup(&xdr, args->name)) != 0)
+		goto out;
+	if ((status = encode_getfh(&xdr)) != 0)
+		goto out;
+	status = encode_getfattr(&xdr, args->bitmask);
+out:
+	return status;
+}
+
+/*
+ * Encode LOOKUP_ROOT request
+ */
+static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, uint32_t *p, const struct nfs4_lookup_root_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 3,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putrootfh(&xdr)) != 0)
+		goto out;
+	if ((status = encode_getfh(&xdr)) == 0)
+		status = encode_getfattr(&xdr, args->bitmask);
+out:
+	return status;
+}
+
+/*
+ * Encode REMOVE request
+ */
+static int nfs4_xdr_enc_remove(struct rpc_rqst *req, uint32_t *p, const struct nfs4_remove_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->fh)) == 0)
+		status = encode_remove(&xdr, args->name);
+	return status;
+}
+
+/*
+ * Encode RENAME request
+ */
+static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct nfs4_rename_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 4,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->old_dir)) != 0)
+		goto out;
+	if ((status = encode_savefh(&xdr)) != 0)
+		goto out;
+	if ((status = encode_putfh(&xdr, args->new_dir)) != 0)
+		goto out;
+	status = encode_rename(&xdr, args->old_name, args->new_name);
+out:
+	return status;
+}
+
+/*
+ * Encode LINK request
+ */
+static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs4_link_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 4,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->fh)) != 0)
+		goto out;
+	if ((status = encode_savefh(&xdr)) != 0)
+		goto out;
+	if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
+		goto out;
+	status = encode_link(&xdr, args->name);
+out:
+	return status;
+}
+
+/*
+ * Encode CREATE request
+ */
+static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct nfs4_create_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 4,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
+		goto out;
+	if ((status = encode_create(&xdr, args)) != 0)
+		goto out;
+	if ((status = encode_getfh(&xdr)) != 0)
+		goto out;
+	status = encode_getfattr(&xdr, args->bitmask);
+out:
+	return status;
+}
+
+/*
+ * Encode SYMLINK request
+ */
+static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, uint32_t *p, const struct nfs4_create_arg *args)
+{
+	return nfs4_xdr_enc_create(req, p, args);
+}
+
+/*
+ * Encode GETATTR request
+ */
+static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, uint32_t *p, const struct nfs4_getattr_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->fh)) == 0)
+		status = encode_getfattr(&xdr, args->bitmask);
+	return status;
+}
+
+/*
+ * Encode a CLOSE request
+ */
+static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_closeargs *args)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr = {
+                .nops   = 2,
+        };
+        int status;
+
+        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+        encode_compound_hdr(&xdr, &hdr);
+        status = encode_putfh(&xdr, args->fh);
+        if(status)
+                goto out;
+        status = encode_close(&xdr, args);
+out:
+        return status;
+}
+
+/*
+ * Encode an OPEN request
+ */
+static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_openargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 4,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_open(&xdr, args);
+	if (status)
+		goto out;
+	status = encode_getfh(&xdr);
+	if (status)
+		goto out;
+	status = encode_getfattr(&xdr, args->bitmask);
+out:
+	return status;
+}
+
+/*
+ * Encode an OPEN_CONFIRM request
+ */
+static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct nfs_open_confirmargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops   = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_open_confirm(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode an OPEN request with no attributes.
+ */
+static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nfs_openargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops   = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_open(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode an OPEN_DOWNGRADE request
+ */
+static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct nfs_closeargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops	= 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_open_downgrade(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode a LOCK request
+ */
+static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops   = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_lock(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode a LOCKT request
+ */
+static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops   = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_lockt(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode a LOCKU request
+ */
+static int nfs4_xdr_enc_locku(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops   = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_locku(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * Encode a READLINK request
+ */
+static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, uint32_t *p, const struct nfs4_readlink *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_readlink(&xdr, args, req);
+out:
+	return status;
+}
+
+/*
+ * Encode a READDIR request
+ */
+static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, uint32_t *p, const struct nfs4_readdir_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if(status)
+		goto out;
+	status = encode_readdir(&xdr, args, req);
+out:
+	return status;
+}
+
+/*
+ * Encode a READ request
+ */
+static int nfs4_xdr_enc_read(struct rpc_rqst *req, uint32_t *p, struct nfs_readargs *args)
+{
+	struct rpc_auth	*auth = req->rq_task->tk_auth;
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int replen, status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_read(&xdr, args);
+	if (status)
+		goto out;
+
+	/* set up reply kvec
+	 *    toplevel status + taglen=0 + rescount + OP_PUTFH + status
+	 *       + OP_READ + status + eof + datalen = 9
+	 */
+	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_read_sz) << 2;
+	xdr_inline_pages(&req->rq_rcv_buf, replen,
+			 args->pages, args->pgbase, args->count);
+out:
+	return status;
+}
+
+/*
+ * Encode an SETATTR request
+ */
+static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, uint32_t *p, struct nfs_setattrargs *args)
+
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr = {
+                .nops   = 3,
+        };
+        int status;
+
+        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+        encode_compound_hdr(&xdr, &hdr);
+        status = encode_putfh(&xdr, args->fh);
+        if(status)
+                goto out;
+        status = encode_setattr(&xdr, args, args->server);
+        if(status)
+                goto out;
+	status = encode_getfattr(&xdr, args->bitmask);
+out:
+        return status;
+}
+
+/*
+ * Encode a WRITE request
+ */
+static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writeargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_write(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ *  a COMMIT request
+ */
+static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_writeargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status)
+		goto out;
+	status = encode_commit(&xdr, args);
+out:
+	return status;
+}
+
+/*
+ * FSINFO request
+ */
+static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, uint32_t *p, struct nfs4_fsinfo_arg *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops	= 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (!status)
+		status = encode_fsinfo(&xdr, args->bitmask);
+	return status;
+}
+
+/*
+ * a PATHCONF request
+ */
+static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, uint32_t *p, const struct nfs4_pathconf_arg *args)
+{
+	extern u32 nfs4_pathconf_bitmap[2];
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (!status)
+		status = encode_getattr_one(&xdr,
+				args->bitmask[0] & nfs4_pathconf_bitmap[0]);
+	return status;
+}
+
+/*
+ * a STATFS request
+ */
+static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, uint32_t *p, const struct nfs4_statfs_arg *args)
+{
+	extern u32 nfs4_statfs_bitmap[];
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, args->fh);
+	if (status == 0)
+		status = encode_getattr_two(&xdr,
+				args->bitmask[0] & nfs4_statfs_bitmap[0],
+				args->bitmask[1] & nfs4_statfs_bitmap[1]);
+	return status;
+}
+
+/*
+ * GETATTR_BITMAP request
+ */
+static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, uint32_t *p, const struct nfs_fh *fhandle)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_putfh(&xdr, fhandle);
+	if (status == 0)
+		status = encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+				FATTR4_WORD0_LINK_SUPPORT|
+				FATTR4_WORD0_SYMLINK_SUPPORT|
+				FATTR4_WORD0_ACLSUPPORT);
+	return status;
+}
+
+/*
+ * a RENEW request
+ */
+static int nfs4_xdr_enc_renew(struct rpc_rqst *req, uint32_t *p, struct nfs4_client *clp)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops	= 1,
+	};
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	return encode_renew(&xdr, clp);
+}
+
+/*
+ * a SETCLIENTID request
+ */
+static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, uint32_t *p, struct nfs4_setclientid *sc)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops	= 1,
+	};
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	return encode_setclientid(&xdr, sc);
+}
+
+/*
+ * a SETCLIENTID_CONFIRM request
+ */
+static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, uint32_t *p, struct nfs4_client *clp)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops	= 3,
+	};
+	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	status = encode_setclientid_confirm(&xdr, clp);
+	if (!status)
+		status = encode_putrootfh(&xdr);
+	if (!status)
+		status = encode_fsinfo(&xdr, lease_bitmap);
+	return status;
+}
+
+/*
+ * DELEGRETURN request
+ */
+static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, uint32_t *p, const struct nfs4_delegreturnargs *args)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr = {
+		.nops = 2,
+	};
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_compound_hdr(&xdr, &hdr);
+	if ((status = encode_putfh(&xdr, args->fhandle)) == 0)
+		status = encode_delegreturn(&xdr, args->stateid);
+	return status;
+}
+
+/*
+ * START OF "GENERIC" DECODE ROUTINES.
+ *   These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define READ32(x)         (x) = ntohl(*p++)
+#define READ64(x)         do {			\
+	(x) = (u64)ntohl(*p++) << 32;		\
+	(x) |= ntohl(*p++);			\
+} while (0)
+#define READTIME(x)       do {			\
+	p++;					\
+	(x.tv_sec) = ntohl(*p++);		\
+	(x.tv_nsec) = ntohl(*p++);		\
+} while (0)
+#define COPYMEM(x,nbytes) do {			\
+	memcpy((x), p, nbytes);			\
+	p += XDR_QUADLEN(nbytes);		\
+} while (0)
+
+#define READ_BUF(nbytes)  do { \
+	p = xdr_inline_decode(xdr, nbytes); \
+	if (!p) { \
+		printk(KERN_WARNING "%s: reply buffer overflowed in line %d.", \
+			       	__FUNCTION__, __LINE__); \
+		return -EIO; \
+	} \
+} while (0)
+
+static int decode_opaque_inline(struct xdr_stream *xdr, uint32_t *len, char **string)
+{
+	uint32_t *p;
+
+	READ_BUF(4);
+	READ32(*len);
+	READ_BUF(*len);
+	*string = (char *)p;
+	return 0;
+}
+
+static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+	uint32_t *p;
+
+	READ_BUF(8);
+	READ32(hdr->status);
+	READ32(hdr->taglen);
+	
+	READ_BUF(hdr->taglen + 4);
+	hdr->tag = (char *)p;
+	p += XDR_QUADLEN(hdr->taglen);
+	READ32(hdr->nops);
+	return 0;
+}
+
+static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+{
+	uint32_t *p;
+	uint32_t opnum;
+	int32_t nfserr;
+
+	READ_BUF(8);
+	READ32(opnum);
+	if (opnum != expected) {
+		printk(KERN_NOTICE
+				"nfs4_decode_op_hdr: Server returned operation"
+			       	" %d but we issued a request for %d\n",
+				opnum, expected);
+		return -EIO;
+	}
+	READ32(nfserr);
+	if (nfserr != NFS_OK)
+		return -nfs_stat_to_errno(nfserr);
+	return 0;
+}
+
+/* Dummy routine */
+static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs4_client *clp)
+{
+	uint32_t *p;
+	uint32_t strlen;
+	char *str;
+
+	READ_BUF(12);
+	return decode_opaque_inline(xdr, &strlen, &str);
+}
+
+static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+	uint32_t bmlen, *p;
+
+	READ_BUF(4);
+	READ32(bmlen);
+
+	bitmap[0] = bitmap[1] = 0;
+	READ_BUF((bmlen << 2));
+	if (bmlen > 0) {
+		READ32(bitmap[0]);
+		if (bmlen > 1)
+			READ32(bitmap[1]);
+	}
+	return 0;
+}
+
+static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, uint32_t **savep)
+{
+	uint32_t *p;
+
+	READ_BUF(4);
+	READ32(*attrlen);
+	*savep = xdr->p;
+	return 0;
+}
+
+static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
+{
+	if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) {
+		decode_attr_bitmap(xdr, bitmask);
+		bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
+	} else
+		bitmask[0] = bitmask[1] = 0;
+	dprintk("%s: bitmask=0x%x%x\n", __FUNCTION__, bitmask[0], bitmask[1]);
+	return 0;
+}
+
+static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *type)
+{
+	uint32_t *p;
+
+	*type = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
+		READ_BUF(4);
+		READ32(*type);
+		if (*type < NF4REG || *type > NF4NAMEDATTR) {
+			dprintk("%s: bad type %d\n", __FUNCTION__, *type);
+			return -EIO;
+		}
+		bitmap[0] &= ~FATTR4_WORD0_TYPE;
+	}
+	dprintk("%s: type=0%o\n", __FUNCTION__, nfs_type2fmt[*type].nfs2type);
+	return 0;
+}
+
+static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
+{
+	uint32_t *p;
+
+	*change = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
+		READ_BUF(8);
+		READ64(*change);
+		bitmap[0] &= ~FATTR4_WORD0_CHANGE;
+	}
+	dprintk("%s: change attribute=%Lu\n", __FUNCTION__,
+			(unsigned long long)*change);
+	return 0;
+}
+
+static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
+{
+	uint32_t *p;
+
+	*size = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
+		READ_BUF(8);
+		READ64(*size);
+		bitmap[0] &= ~FATTR4_WORD0_SIZE;
+	}
+	dprintk("%s: file size=%Lu\n", __FUNCTION__, (unsigned long long)*size);
+	return 0;
+}
+
+static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
+		READ_BUF(4);
+		READ32(*res);
+		bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
+	}
+	dprintk("%s: link support=%s\n", __FUNCTION__, *res == 0 ? "false" : "true");
+	return 0;
+}
+
+static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
+		READ_BUF(4);
+		READ32(*res);
+		bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
+	}
+	dprintk("%s: symlink support=%s\n", __FUNCTION__, *res == 0 ? "false" : "true");
+	return 0;
+}
+
+static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fsid *fsid)
+{
+	uint32_t *p;
+
+	fsid->major = 0;
+	fsid->minor = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
+		READ_BUF(16);
+		READ64(fsid->major);
+		READ64(fsid->minor);
+		bitmap[0] &= ~FATTR4_WORD0_FSID;
+	}
+	dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __FUNCTION__,
+			(unsigned long long)fsid->major,
+			(unsigned long long)fsid->minor);
+	return 0;
+}
+
+static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+
+	*res = 60;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
+		READ_BUF(4);
+		READ32(*res);
+		bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
+	}
+	dprintk("%s: file size=%u\n", __FUNCTION__, (unsigned int)*res);
+	return 0;
+}
+
+static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+
+	*res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
+		READ_BUF(4);
+		READ32(*res);
+		bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
+	}
+	dprintk("%s: ACLs supported=%u\n", __FUNCTION__, (unsigned int)*res);
+	return 0;
+}
+
+static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
+{
+	uint32_t *p;
+
+	*fileid = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
+		READ_BUF(8);
+		READ64(*fileid);
+		bitmap[0] &= ~FATTR4_WORD0_FILEID;
+	}
+	dprintk("%s: fileid=%Lu\n", __FUNCTION__, (unsigned long long)*fileid);
+	return 0;
+}
+
+static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
+	}
+	dprintk("%s: files avail=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
+	}
+	dprintk("%s: files free=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
+	}
+	dprintk("%s: files total=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
+	}
+	dprintk("%s: maxfilesize=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*maxlink = 1;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
+		READ_BUF(4);
+		READ32(*maxlink);
+		bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
+	}
+	dprintk("%s: maxlink=%u\n", __FUNCTION__, *maxlink);
+	return status;
+}
+
+static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*maxname = 1024;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
+		READ_BUF(4);
+		READ32(*maxname);
+		bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
+	}
+	dprintk("%s: maxname=%u\n", __FUNCTION__, *maxname);
+	return status;
+}
+
+static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 1024;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXREAD - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
+		uint64_t maxread;
+		READ_BUF(8);
+		READ64(maxread);
+		if (maxread > 0x7FFFFFFF)
+			maxread = 0x7FFFFFFF;
+		*res = (uint32_t)maxread;
+		bitmap[0] &= ~FATTR4_WORD0_MAXREAD;
+	}
+	dprintk("%s: maxread=%lu\n", __FUNCTION__, (unsigned long)*res);
+	return status;
+}
+
+static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 1024;
+	if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXWRITE - 1U)))
+		return -EIO;
+	if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
+		uint64_t maxwrite;
+		READ_BUF(8);
+		READ64(maxwrite);
+		if (maxwrite > 0x7FFFFFFF)
+			maxwrite = 0x7FFFFFFF;
+		*res = (uint32_t)maxwrite;
+		bitmap[0] &= ~FATTR4_WORD0_MAXWRITE;
+	}
+	dprintk("%s: maxwrite=%lu\n", __FUNCTION__, (unsigned long)*res);
+	return status;
+}
+
+static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *mode)
+{
+	uint32_t *p;
+
+	*mode = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
+		READ_BUF(4);
+		READ32(*mode);
+		*mode &= ~S_IFMT;
+		bitmap[1] &= ~FATTR4_WORD1_MODE;
+	}
+	dprintk("%s: file mode=0%o\n", __FUNCTION__, (unsigned int)*mode);
+	return 0;
+}
+
+static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
+{
+	uint32_t *p;
+
+	*nlink = 1;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
+		READ_BUF(4);
+		READ32(*nlink);
+		bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
+	}
+	dprintk("%s: nlink=%u\n", __FUNCTION__, (unsigned int)*nlink);
+	return 0;
+}
+
+static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_client *clp, int32_t *uid)
+{
+	uint32_t len, *p;
+
+	*uid = -2;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
+		READ_BUF(4);
+		READ32(len);
+		READ_BUF(len);
+		if (len < XDR_MAX_NETOBJ) {
+			if (nfs_map_name_to_uid(clp, (char *)p, len, uid) != 0)
+				dprintk("%s: nfs_map_name_to_uid failed!\n",
+						__FUNCTION__);
+		} else
+			printk(KERN_WARNING "%s: name too long (%u)!\n",
+					__FUNCTION__, len);
+		bitmap[1] &= ~FATTR4_WORD1_OWNER;
+	}
+	dprintk("%s: uid=%d\n", __FUNCTION__, (int)*uid);
+	return 0;
+}
+
+static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_client *clp, int32_t *gid)
+{
+	uint32_t len, *p;
+
+	*gid = -2;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
+		READ_BUF(4);
+		READ32(len);
+		READ_BUF(len);
+		if (len < XDR_MAX_NETOBJ) {
+			if (nfs_map_group_to_gid(clp, (char *)p, len, gid) != 0)
+				dprintk("%s: nfs_map_group_to_gid failed!\n",
+						__FUNCTION__);
+		} else
+			printk(KERN_WARNING "%s: name too long (%u)!\n",
+					__FUNCTION__, len);
+		bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
+	}
+	dprintk("%s: gid=%d\n", __FUNCTION__, (int)*gid);
+	return 0;
+}
+
+static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
+{
+	uint32_t major = 0, minor = 0, *p;
+
+	*rdev = MKDEV(0,0);
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_RAWDEV - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
+		dev_t tmp;
+
+		READ_BUF(8);
+		READ32(major);
+		READ32(minor);
+		tmp = MKDEV(major, minor);
+		if (MAJOR(tmp) == major && MINOR(tmp) == minor)
+			*rdev = tmp;
+		bitmap[1] &= ~ FATTR4_WORD1_RAWDEV;
+	}
+	dprintk("%s: rdev=(0x%x:0x%x)\n", __FUNCTION__, major, minor);
+	return 0;
+}
+
+static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
+	}
+	dprintk("%s: space avail=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
+	}
+	dprintk("%s: space free=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+	uint32_t *p;
+	int status = 0;
+
+	*res = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
+		READ_BUF(8);
+		READ64(*res);
+		bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
+	}
+	dprintk("%s: space total=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+	return status;
+}
+
+static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
+{
+	uint32_t *p;
+
+	*used = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
+		READ_BUF(8);
+		READ64(*used);
+		bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
+	}
+	dprintk("%s: space used=%Lu\n", __FUNCTION__,
+			(unsigned long long)*used);
+	return 0;
+}
+
+static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
+{
+	uint32_t *p;
+	uint64_t sec;
+	uint32_t nsec;
+
+	READ_BUF(12);
+	READ64(sec);
+	READ32(nsec);
+	time->tv_sec = (time_t)sec;
+	time->tv_nsec = (long)nsec;
+	return 0;
+}
+
+static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+	int status = 0;
+
+	time->tv_sec = 0;
+	time->tv_nsec = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_ACCESS - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_TIME_ACCESS)) {
+		status = decode_attr_time(xdr, time);
+		bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
+	}
+	dprintk("%s: atime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+	return status;
+}
+
+static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+	int status = 0;
+
+	time->tv_sec = 0;
+	time->tv_nsec = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_METADATA - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) {
+		status = decode_attr_time(xdr, time);
+		bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
+	}
+	dprintk("%s: ctime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+	return status;
+}
+
+static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+	int status = 0;
+
+	time->tv_sec = 0;
+	time->tv_nsec = 0;
+	if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_MODIFY - 1U)))
+		return -EIO;
+	if (likely(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) {
+		status = decode_attr_time(xdr, time);
+		bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
+	}
+	dprintk("%s: mtime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+	return status;
+}
+
+static int verify_attr_len(struct xdr_stream *xdr, uint32_t *savep, uint32_t attrlen)
+{
+	unsigned int attrwords = XDR_QUADLEN(attrlen);
+	unsigned int nwords = xdr->p - savep;
+
+	if (unlikely(attrwords != nwords)) {
+		printk(KERN_WARNING "%s: server returned incorrect attribute length: %u %c %u\n",
+				__FUNCTION__,
+				attrwords << 2,
+				(attrwords < nwords) ? '<' : '>',
+				nwords << 2);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+	uint32_t *p;
+
+	READ_BUF(20);
+	READ32(cinfo->atomic);
+	READ64(cinfo->before);
+	READ64(cinfo->after);
+	return 0;
+}
+
+static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
+{
+	uint32_t *p;
+	uint32_t supp, acc;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_ACCESS);
+	if (status)
+		return status;
+	READ_BUF(8);
+	READ32(supp);
+	READ32(acc);
+	access->supported = supp;
+	access->access = acc;
+	return 0;
+}
+
+static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_CLOSE);
+	if (status)
+		return status;
+	READ_BUF(sizeof(res->stateid.data));
+	COPYMEM(res->stateid.data, sizeof(res->stateid.data));
+	return 0;
+}
+
+static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_COMMIT);
+	if (status)
+		return status;
+	READ_BUF(8);
+	COPYMEM(res->verf->verifier, 8);
+	return 0;
+}
+
+static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+	uint32_t *p;
+	uint32_t bmlen;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_CREATE);
+	if (status)
+		return status;
+	if ((status = decode_change_info(xdr, cinfo)))
+		return status;
+	READ_BUF(4);
+	READ32(bmlen);
+	READ_BUF(bmlen << 2);
+	return 0;
+}
+
+static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
+{
+	uint32_t *savep;
+	uint32_t attrlen, 
+		 bitmap[2] = {0};
+	int status;
+
+	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
+		goto xdr_error;
+	status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+	if (status != 0)
+		printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+	return status;
+}
+	
+static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
+{
+	uint32_t *savep;
+	uint32_t attrlen, 
+		 bitmap[2] = {0};
+	int status;
+	
+	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+		goto xdr_error;
+
+	if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0)
+		goto xdr_error;
+
+	status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+	if (status != 0)
+		printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+	return status;
+}
+
+static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf)
+{
+	uint32_t *savep;
+	uint32_t attrlen, 
+		 bitmap[2] = {0};
+	int status;
+	
+	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+		goto xdr_error;
+
+	if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0)
+		goto xdr_error;
+
+	status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+	if (status != 0)
+		printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+	return status;
+}
+
+static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server)
+{
+	uint32_t *savep;
+	uint32_t attrlen,
+		 bitmap[2] = {0},
+		 type;
+	int status, fmode = 0;
+
+	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+		goto xdr_error;
+
+	fattr->bitmap[0] = bitmap[0];
+	fattr->bitmap[1] = bitmap[1];
+
+	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+		goto xdr_error;
+
+
+	if ((status = decode_attr_type(xdr, bitmap, &type)) != 0)
+		goto xdr_error;
+	fattr->type = nfs_type2fmt[type].nfs2type;
+	fmode = nfs_type2fmt[type].mode;
+
+	if ((status = decode_attr_change(xdr, bitmap, &fattr->change_attr)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_size(xdr, bitmap, &fattr->size)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_fsid(xdr, bitmap, &fattr->fsid_u.nfs4)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_fileid(xdr, bitmap, &fattr->fileid)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_mode(xdr, bitmap, &fattr->mode)) != 0)
+		goto xdr_error;
+	fattr->mode |= fmode;
+	if ((status = decode_attr_nlink(xdr, bitmap, &fattr->nlink)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_owner(xdr, bitmap, server->nfs4_state, &fattr->uid)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_group(xdr, bitmap, server->nfs4_state, &fattr->gid)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_rdev(xdr, bitmap, &fattr->rdev)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_space_used(xdr, bitmap, &fattr->du.nfs3.used)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_time_access(xdr, bitmap, &fattr->atime)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime)) != 0)
+		goto xdr_error;
+	if ((status = verify_attr_len(xdr, savep, attrlen)) == 0) {
+		fattr->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4;
+		fattr->timestamp = jiffies;
+	}
+xdr_error:
+	if (status != 0)
+		printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+	return status;
+}
+
+
+static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
+{
+	uint32_t *savep;
+	uint32_t attrlen, bitmap[2];
+	int status;
+
+	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+		goto xdr_error;
+
+	fsinfo->rtmult = fsinfo->wtmult = 512;	/* ??? */
+
+	if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0)
+		goto xdr_error;
+	if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0)
+		goto xdr_error;
+	fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax;
+	if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0)
+		goto xdr_error;
+	fsinfo->wtpref = fsinfo->wtmax;
+
+	status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+	if (status != 0)
+		printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+	return status;
+}
+
+static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	uint32_t *p;
+	uint32_t len;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_GETFH);
+	if (status)
+		return status;
+	/* Zero handle first to allow comparisons */
+	memset(fh, 0, sizeof(*fh));
+
+	READ_BUF(4);
+	READ32(len);
+	if (len > NFS4_FHSIZE)
+		return -EIO;
+	fh->size = len;
+	READ_BUF(len);
+	COPYMEM(fh->data, len);
+	return 0;
+}
+
+static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+	int status;
+	
+	status = decode_op_hdr(xdr, OP_LINK);
+	if (status)
+		return status;
+	return decode_change_info(xdr, cinfo);
+}
+
+/*
+ * We create the owner, so we know a proper owner.id length is 4.
+ */
+static int decode_lock_denied (struct xdr_stream *xdr, struct nfs_lock_denied *denied)
+{
+	uint32_t *p;
+	uint32_t namelen;
+
+	READ_BUF(32);
+	READ64(denied->offset);
+	READ64(denied->length);
+	READ32(denied->type);
+	READ64(denied->owner.clientid);
+	READ32(namelen);
+	READ_BUF(namelen);
+	if (namelen == 4)
+		READ32(denied->owner.id);
+	return -NFS4ERR_DENIED;
+}
+
+static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_LOCK);
+	if (status == 0) {
+		READ_BUF(sizeof(nfs4_stateid));
+		COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+	} else if (status == -NFS4ERR_DENIED)
+		return decode_lock_denied(xdr, &res->u.denied);
+	return status;
+}
+
+static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockres *res)
+{
+	int status;
+	status = decode_op_hdr(xdr, OP_LOCKT);
+	if (status == -NFS4ERR_DENIED)
+		return decode_lock_denied(xdr, &res->u.denied);
+	return status;
+}
+
+static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_LOCKU);
+	if (status == 0) {
+		READ_BUF(sizeof(nfs4_stateid));
+		COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+	}
+	return status;
+}
+
+static int decode_lookup(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_LOOKUP);
+}
+
+/* This is too sick! */
+static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
+{
+        uint32_t *p;
+	uint32_t limit_type, nblocks, blocksize;
+
+	READ_BUF(12);
+	READ32(limit_type);
+	switch (limit_type) {
+		case 1:
+			READ64(*maxsize);
+			break;
+		case 2:
+			READ32(nblocks);
+			READ32(blocksize);
+			*maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
+	}
+	return 0;
+}
+
+static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+        uint32_t *p;
+        uint32_t delegation_type;
+
+	READ_BUF(4);
+	READ32(delegation_type);
+	if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
+		res->delegation_type = 0;
+		return 0;
+	}
+	READ_BUF(20);
+	COPYMEM(res->delegation.data, sizeof(res->delegation.data));
+	READ32(res->do_recall);
+	switch (delegation_type) {
+		case NFS4_OPEN_DELEGATE_READ:
+			res->delegation_type = FMODE_READ;
+			break;
+		case NFS4_OPEN_DELEGATE_WRITE:
+			res->delegation_type = FMODE_WRITE|FMODE_READ;
+			if (decode_space_limit(xdr, &res->maxsize) < 0)
+				return -EIO;
+	}
+	return decode_ace(xdr, NULL, res->server->nfs4_state);
+}
+
+static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+        uint32_t *p;
+        uint32_t bmlen;
+        int status;
+
+        status = decode_op_hdr(xdr, OP_OPEN);
+        if (status)
+                return status;
+        READ_BUF(sizeof(res->stateid.data));
+        COPYMEM(res->stateid.data, sizeof(res->stateid.data));
+
+        decode_change_info(xdr, &res->cinfo);
+
+        READ_BUF(8);
+        READ32(res->rflags);
+        READ32(bmlen);
+        if (bmlen > 10)
+                goto xdr_error;
+
+        READ_BUF(bmlen << 2);
+        p += bmlen;
+	return decode_delegation(xdr, res);
+xdr_error:
+	printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__);
+	return -EIO;
+}
+
+static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
+{
+        uint32_t *p;
+	int status;
+
+        status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
+        if (status)
+                return status;
+        READ_BUF(sizeof(res->stateid.data));
+        COPYMEM(res->stateid.data, sizeof(res->stateid.data));
+        return 0;
+}
+
+static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
+	if (status)
+		return status;
+	READ_BUF(sizeof(res->stateid.data));
+	COPYMEM(res->stateid.data, sizeof(res->stateid.data));
+	return 0;
+}
+
+static int decode_putfh(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_PUTFH);
+}
+
+static int decode_putrootfh(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_PUTROOTFH);
+}
+
+static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
+{
+	struct kvec *iov = req->rq_rcv_buf.head;
+	uint32_t *p;
+	uint32_t count, eof, recvd, hdrlen;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_READ);
+	if (status)
+		return status;
+	READ_BUF(8);
+	READ32(eof);
+	READ32(count);
+	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (count > recvd) {
+		printk(KERN_WARNING "NFS: server cheating in read reply: "
+				"count %u > recvd %u\n", count, recvd);
+		count = recvd;
+		eof = 0;
+	}
+	xdr_read_pages(xdr, count);
+	res->eof = eof;
+	res->count = count;
+	return 0;
+}
+
+static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
+{
+	struct xdr_buf	*rcvbuf = &req->rq_rcv_buf;
+	struct page	*page = *rcvbuf->pages;
+	struct kvec	*iov = rcvbuf->head;
+	unsigned int	nr, pglen = rcvbuf->page_len;
+	uint32_t	*end, *entry, *p, *kaddr;
+	uint32_t	len, attrlen;
+	int 		hdrlen, recvd, status;
+
+	status = decode_op_hdr(xdr, OP_READDIR);
+	if (status)
+		return status;
+	READ_BUF(8);
+	COPYMEM(readdir->verifier.data, 8);
+
+	hdrlen = (char *) p - (char *) iov->iov_base;
+	recvd = rcvbuf->len - hdrlen;
+	if (pglen > recvd)
+		pglen = recvd;
+	xdr_read_pages(xdr, pglen);
+
+	BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE);
+	kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0);
+	end = (uint32_t *) ((char *)p + pglen + readdir->pgbase);
+	entry = p;
+	for (nr = 0; *p++; nr++) {
+		if (p + 3 > end)
+			goto short_pkt;
+		p += 2;			/* cookie */
+		len = ntohl(*p++);	/* filename length */
+		if (len > NFS4_MAXNAMLEN) {
+			printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
+			goto err_unmap;
+		}
+		p += XDR_QUADLEN(len);
+		if (p + 1 > end)
+			goto short_pkt;
+		len = ntohl(*p++);	/* bitmap length */
+		p += len;
+		if (p + 1 > end)
+			goto short_pkt;
+		attrlen = XDR_QUADLEN(ntohl(*p++));
+		p += attrlen;		/* attributes */
+		if (p + 2 > end)
+			goto short_pkt;
+		entry = p;
+	}
+	if (!nr && (entry[0] != 0 || entry[1] == 0))
+		goto short_pkt;
+out:	
+	kunmap_atomic(kaddr, KM_USER0);
+	return 0;
+short_pkt:
+	entry[0] = entry[1] = 0;
+	/* truncate listing ? */
+	if (!nr) {
+		printk(KERN_NOTICE "NFS: readdir reply truncated!\n");
+		entry[1] = 1;
+	}
+	goto out;
+err_unmap:
+	kunmap_atomic(kaddr, KM_USER0);
+	return -errno_NFSERR_IO;
+}
+
+static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
+{
+	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+	struct kvec *iov = rcvbuf->head;
+	int hdrlen, len, recvd;
+	uint32_t *p;
+	char *kaddr;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_READLINK);
+	if (status)
+		return status;
+
+	/* Convert length of symlink */
+	READ_BUF(4);
+	READ32(len);
+	if (len >= rcvbuf->page_len || len <= 0) {
+		dprintk(KERN_WARNING "nfs: server returned giant symlink!\n");
+		return -ENAMETOOLONG;
+	}
+	hdrlen = (char *) xdr->p - (char *) iov->iov_base;
+	recvd = req->rq_rcv_buf.len - hdrlen;
+	if (recvd < len) {
+		printk(KERN_WARNING "NFS: server cheating in readlink reply: "
+				"count %u > recvd %u\n", len, recvd);
+		return -EIO;
+	}
+	xdr_read_pages(xdr, len);
+	/*
+	 * The XDR encode routine has set things up so that
+	 * the link text will be copied directly into the
+	 * buffer.  We just have to do overflow-checking,
+	 * and and null-terminate the text (the VFS expects
+	 * null-termination).
+	 */
+	kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0);
+	kaddr[len+rcvbuf->page_base] = '\0';
+	kunmap_atomic(kaddr, KM_USER0);
+	return 0;
+}
+
+static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+	int status;
+
+	status = decode_op_hdr(xdr, OP_REMOVE);
+	if (status)
+		goto out;
+	status = decode_change_info(xdr, cinfo);
+out:
+	return status;
+}
+
+static int decode_rename(struct xdr_stream *xdr, struct nfs4_change_info *old_cinfo,
+	      struct nfs4_change_info *new_cinfo)
+{
+	int status;
+
+	status = decode_op_hdr(xdr, OP_RENAME);
+	if (status)
+		goto out;
+	if ((status = decode_change_info(xdr, old_cinfo)))
+		goto out;
+	status = decode_change_info(xdr, new_cinfo);
+out:
+	return status;
+}
+
+static int decode_renew(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_RENEW);
+}
+
+static int
+decode_savefh(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_SAVEFH);
+}
+
+static int decode_setattr(struct xdr_stream *xdr, struct nfs_setattrres *res)
+{
+	uint32_t *p;
+	uint32_t bmlen;
+	int status;
+
+        
+	status = decode_op_hdr(xdr, OP_SETATTR);
+	if (status)
+		return status;
+	READ_BUF(4);
+	READ32(bmlen);
+	READ_BUF(bmlen << 2);
+	return 0;
+}
+
+static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_client *clp)
+{
+	uint32_t *p;
+	uint32_t opnum;
+	int32_t nfserr;
+
+	READ_BUF(8);
+	READ32(opnum);
+	if (opnum != OP_SETCLIENTID) {
+		printk(KERN_NOTICE
+				"nfs4_decode_setclientid: Server returned operation"
+			       	" %d\n", opnum);
+		return -EIO;
+	}
+	READ32(nfserr);
+	if (nfserr == NFS_OK) {
+		READ_BUF(8 + sizeof(clp->cl_confirm.data));
+		READ64(clp->cl_clientid);
+		COPYMEM(clp->cl_confirm.data, sizeof(clp->cl_confirm.data));
+	} else if (nfserr == NFSERR_CLID_INUSE) {
+		uint32_t len;
+
+		/* skip netid string */
+		READ_BUF(4);
+		READ32(len);
+		READ_BUF(len);
+
+		/* skip uaddr string */
+		READ_BUF(4);
+		READ32(len);
+		READ_BUF(len);
+		return -NFSERR_CLID_INUSE;
+	} else
+		return -nfs_stat_to_errno(nfserr);
+
+	return 0;
+}
+
+static int decode_setclientid_confirm(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM);
+}
+
+static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
+{
+	uint32_t *p;
+	int status;
+
+	status = decode_op_hdr(xdr, OP_WRITE);
+	if (status)
+		return status;
+
+	READ_BUF(16);
+	READ32(res->count);
+	READ32(res->verf->committed);
+	COPYMEM(res->verf->verifier, 8);
+	return 0;
+}
+
+static int decode_delegreturn(struct xdr_stream *xdr)
+{
+	return decode_op_hdr(xdr, OP_DELEGRETURN);
+}
+
+/*
+ * Decode OPEN_DOWNGRADE response
+ */
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_closeres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_open_downgrade(&xdr, res);
+out:
+        return status;
+}
+
+/*
+ * END OF "GENERIC" DECODE ROUTINES.
+ */
+
+/*
+ * Decode ACCESS response
+ */
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_accessres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) == 0)
+		status = decode_access(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * Decode LOOKUP response
+ */
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_lookup_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_lookup(&xdr)) != 0)
+		goto out;
+	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+		goto out;
+	status = decode_getfattr(&xdr, res->fattr, res->server);
+out:
+	return status;
+}
+
+/*
+ * Decode LOOKUP_ROOT response
+ */
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_lookup_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putrootfh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_getfh(&xdr, res->fh)) == 0)
+		status = decode_getfattr(&xdr, res->fattr, res->server);
+out:
+	return status;
+}
+
+/*
+ * Decode REMOVE response
+ */
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) == 0)
+		status = decode_remove(&xdr, cinfo);
+out:
+	return status;
+}
+
+/*
+ * Decode RENAME response
+ */
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_rename_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_savefh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo);
+out:
+	return status;
+}
+
+/*
+ * Decode LINK response
+ */
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_savefh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	status = decode_link(&xdr, cinfo);
+out:
+	return status;
+}
+
+/*
+ * Decode CREATE response
+ */
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_create_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
+		goto out;
+	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+		goto out;
+	status = decode_getfattr(&xdr, res->fattr, res->server);
+	if (status == NFS4ERR_DELAY)
+		status = 0;
+out:
+	return status;
+}
+
+/*
+ * Decode SYMLINK response
+ */
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_create_res *res)
+{
+	return nfs4_xdr_dec_create(rqstp, p, res);
+}
+
+/*
+ * Decode GETATTR response
+ */
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_getattr_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+	
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_getfattr(&xdr, res->fattr, res->server);
+out:
+	return status;
+
+}
+
+
+/*
+ * Decode CLOSE response
+ */
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_closeres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_close(&xdr, res);
+out:
+        return status;
+}
+
+/*
+ * Decode OPEN response
+ */
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_open(&xdr, res);
+        if (status)
+                goto out;
+	status = decode_getfh(&xdr, &res->fh);
+        if (status)
+		goto out;
+	status = decode_getfattr(&xdr, res->f_attr, res->server);
+	if (status == NFS4ERR_DELAY)
+		status = 0;
+out:
+        return status;
+}
+
+/*
+ * Decode OPEN_CONFIRM response
+ */
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_open_confirmres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_open_confirm(&xdr, res);
+out:
+        return status;
+}
+
+/*
+ * Decode OPEN response
+ */
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_open(&xdr, res);
+out:
+        return status;
+}
+
+/*
+ * Decode SETATTR response
+ */
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_setattrres *res)
+{
+        struct xdr_stream xdr;
+        struct compound_hdr hdr;
+        int status;
+
+        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+        status = decode_compound_hdr(&xdr, &hdr);
+        if (status)
+                goto out;
+        status = decode_putfh(&xdr);
+        if (status)
+                goto out;
+        status = decode_setattr(&xdr, res);
+        if (status)
+                goto out;
+	status = decode_getfattr(&xdr, res->fattr, res->server);
+	if (status == NFS4ERR_DELAY)
+		status = 0;
+out:
+        return status;
+}
+
+/*
+ * Decode LOCK response
+ */
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_lock(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * Decode LOCKT response
+ */
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_lockt(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * Decode LOCKU response
+ */
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_locku(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * Decode READLINK response
+ */
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, uint32_t *p, void *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_readlink(&xdr, rqstp);
+out:
+	return status;
+}
+
+/*
+ * Decode READDIR response
+ */
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_readdir_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_readdir(&xdr, rqstp, res);
+out:
+	return status;
+}
+
+/*
+ * Decode Read response
+ */
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_readres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_read(&xdr, rqstp, res);
+	if (!status)
+		status = res->count;
+out:
+	return status;
+}
+
+/*
+ * Decode WRITE response
+ */
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_writeres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_write(&xdr, res);
+	if (!status)
+		status = res->count;
+out:
+	return status;
+}
+
+/*
+ * Decode COMMIT response
+ */
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_writeres *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_putfh(&xdr);
+	if (status)
+		goto out;
+	status = decode_commit(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * FSINFO request
+ */
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, uint32_t *p, struct nfs_fsinfo *fsinfo)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_putfh(&xdr);
+	if (!status)
+		status = decode_fsinfo(&xdr, fsinfo);
+	if (!status)
+		status = -nfs_stat_to_errno(hdr.status);
+	return status;
+}
+
+/*
+ * PATHCONF request
+ */
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, uint32_t *p, struct nfs_pathconf *pathconf)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_putfh(&xdr);
+	if (!status)
+		status = decode_pathconf(&xdr, pathconf);
+	return status;
+}
+
+/*
+ * STATFS request
+ */
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, uint32_t *p, struct nfs_fsstat *fsstat)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_putfh(&xdr);
+	if (!status)
+		status = decode_statfs(&xdr, fsstat);
+	return status;
+}
+
+/*
+ * GETATTR_BITMAP request
+ */
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, uint32_t *p, struct nfs4_server_caps_res *res)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+		goto out;
+	if ((status = decode_putfh(&xdr)) != 0)
+		goto out;
+	status = decode_server_caps(&xdr, res);
+out:
+	return status;
+}
+
+/*
+ * Decode RENEW response
+ */
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, uint32_t *p, void *dummy)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_renew(&xdr);
+	return status;
+}
+
+/*
+ * a SETCLIENTID request
+ */
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, uint32_t *p,
+		struct nfs4_client *clp)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_setclientid(&xdr, clp);
+	if (!status)
+		status = -nfs_stat_to_errno(hdr.status);
+	return status;
+}
+
+/*
+ * a SETCLIENTID_CONFIRM request
+ */
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, uint32_t *p, struct nfs_fsinfo *fsinfo)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (!status)
+		status = decode_setclientid_confirm(&xdr);
+	if (!status)
+		status = decode_putrootfh(&xdr);
+	if (!status)
+		status = decode_fsinfo(&xdr, fsinfo);
+	if (!status)
+		status = -nfs_stat_to_errno(hdr.status);
+	return status;
+}
+
+/*
+ * DELEGRETURN request
+ */
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, void *dummy)
+{
+	struct xdr_stream xdr;
+	struct compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_compound_hdr(&xdr, &hdr);
+	if (status == 0) {
+		status = decode_putfh(&xdr);
+		if (status == 0)
+			status = decode_delegreturn(&xdr);
+	}
+	return status;
+}
+
+uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus)
+{
+	uint32_t bitmap[2] = {0};
+	uint32_t len;
+
+	if (!*p++) {
+		if (!*p)
+			return ERR_PTR(-EAGAIN);
+		entry->eof = 1;
+		return ERR_PTR(-EBADCOOKIE);
+	}
+
+	entry->prev_cookie = entry->cookie;
+	p = xdr_decode_hyper(p, &entry->cookie);
+	entry->len = ntohl(*p++);
+	entry->name = (const char *) p;
+	p += XDR_QUADLEN(entry->len);
+
+	/*
+	 * In case the server doesn't return an inode number,
+	 * we fake one here.  (We don't use inode number 0,
+	 * since glibc seems to choke on it...)
+	 */
+	entry->ino = 1;
+
+	len = ntohl(*p++);		/* bitmap length */
+	if (len-- > 0) {
+		bitmap[0] = ntohl(*p++);
+		if (len-- > 0) {
+			bitmap[1] = ntohl(*p++);
+			p += len;
+		}
+	}
+	len = XDR_QUADLEN(ntohl(*p++));	/* attribute buffer length */
+	if (len > 0) {
+		if (bitmap[0] == 0 && bitmap[1] == FATTR4_WORD1_MOUNTED_ON_FILEID)
+			xdr_decode_hyper(p, &entry->ino);
+		else if (bitmap[0] == FATTR4_WORD0_FILEID)
+			xdr_decode_hyper(p, &entry->ino);
+		p += len;
+	}
+
+	entry->eof = !p[0] && p[1];
+	return p;
+}
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static struct {
+	int stat;
+	int errno;
+} nfs_errtbl[] = {
+	{ NFS4_OK,		0		},
+	{ NFS4ERR_PERM,		EPERM		},
+	{ NFS4ERR_NOENT,	ENOENT		},
+	{ NFS4ERR_IO,		errno_NFSERR_IO	},
+	{ NFS4ERR_NXIO,		ENXIO		},
+	{ NFS4ERR_ACCESS,	EACCES		},
+	{ NFS4ERR_EXIST,	EEXIST		},
+	{ NFS4ERR_XDEV,		EXDEV		},
+	{ NFS4ERR_NOTDIR,	ENOTDIR		},
+	{ NFS4ERR_ISDIR,	EISDIR		},
+	{ NFS4ERR_INVAL,	EINVAL		},
+	{ NFS4ERR_FBIG,		EFBIG		},
+	{ NFS4ERR_NOSPC,	ENOSPC		},
+	{ NFS4ERR_ROFS,		EROFS		},
+	{ NFS4ERR_MLINK,	EMLINK		},
+	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG	},
+	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY	},
+	{ NFS4ERR_DQUOT,	EDQUOT		},
+	{ NFS4ERR_STALE,	ESTALE		},
+	{ NFS4ERR_BADHANDLE,	EBADHANDLE	},
+	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE	},
+	{ NFS4ERR_NOTSUPP,	ENOTSUPP	},
+	{ NFS4ERR_TOOSMALL,	ETOOSMALL	},
+	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT	},
+	{ NFS4ERR_BADTYPE,	EBADTYPE	},
+	{ NFS4ERR_LOCKED,	EAGAIN		},
+	{ NFS4ERR_RESOURCE,	EREMOTEIO	},
+	{ NFS4ERR_SYMLINK,	ELOOP		},
+	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP	},
+	{ NFS4ERR_DEADLOCK,	EDEADLK		},
+	{ NFS4ERR_WRONGSEC,	EPERM		}, /* FIXME: this needs
+						    * to be handled by a
+						    * middle-layer.
+						    */
+	{ -1,			EIO		}
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used jointly by NFSv2 and NFSv3.
+ */
+static int
+nfs_stat_to_errno(int stat)
+{
+	int i;
+	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+		if (nfs_errtbl[i].stat == stat)
+			return nfs_errtbl[i].errno;
+	}
+	if (stat <= 10000 || stat > 10100) {
+		/* The server is looney tunes. */
+		return ESERVERFAULT;
+	}
+	/* If we cannot translate the error, the recovery routines should
+	 * handle it.
+	 * Note: remaining NFSv4 error codes have values > 10000, so should
+	 * not conflict with native Linux error codes.
+	 */
+	return stat;
+}
+
+#ifndef MAX
+# define MAX(a, b)	(((a) > (b))? (a) : (b))
+#endif
+
+#define PROC(proc, argtype, restype)				\
+[NFSPROC4_CLNT_##proc] = {					\
+	.p_proc   = NFSPROC4_COMPOUND,				\
+	.p_encode = (kxdrproc_t) nfs4_xdr_##argtype,		\
+	.p_decode = (kxdrproc_t) nfs4_xdr_##restype,		\
+	.p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2,	\
+    }
+
+struct rpc_procinfo	nfs4_procedures[] = {
+  PROC(READ,		enc_read,	dec_read),
+  PROC(WRITE,		enc_write,	dec_write),
+  PROC(COMMIT,		enc_commit,	dec_commit),
+  PROC(OPEN,		enc_open,	dec_open),
+  PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
+  PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
+  PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
+  PROC(CLOSE,		enc_close,	dec_close),
+  PROC(SETATTR,		enc_setattr,	dec_setattr),
+  PROC(FSINFO,		enc_fsinfo,	dec_fsinfo),
+  PROC(RENEW,		enc_renew,	dec_renew),
+  PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
+  PROC(SETCLIENTID_CONFIRM,	enc_setclientid_confirm,	dec_setclientid_confirm),
+  PROC(LOCK,            enc_lock,       dec_lock),
+  PROC(LOCKT,           enc_lockt,      dec_lockt),
+  PROC(LOCKU,           enc_locku,      dec_locku),
+  PROC(ACCESS,		enc_access,	dec_access),
+  PROC(GETATTR,		enc_getattr,	dec_getattr),
+  PROC(LOOKUP,		enc_lookup,	dec_lookup),
+  PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
+  PROC(REMOVE,		enc_remove,	dec_remove),
+  PROC(RENAME,		enc_rename,	dec_rename),
+  PROC(LINK,		enc_link,	dec_link),
+  PROC(SYMLINK,		enc_symlink,	dec_symlink),
+  PROC(CREATE,		enc_create,	dec_create),
+  PROC(PATHCONF,	enc_pathconf,	dec_pathconf),
+  PROC(STATFS,		enc_statfs,	dec_statfs),
+  PROC(READLINK,	enc_readlink,	dec_readlink),
+  PROC(READDIR,		enc_readdir,	dec_readdir),
+  PROC(SERVER_CAPS,	enc_server_caps, dec_server_caps),
+  PROC(DELEGRETURN,	enc_delegreturn, dec_delegreturn),
+};
+
+struct rpc_version		nfs_version4 = {
+	.number			= 4,
+	.nrprocs		= sizeof(nfs4_procedures)/sizeof(nfs4_procedures[0]),
+	.procs			= nfs4_procedures
+};
+
+/*
+ * Local variables:
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
new file mode 100644
index 0000000..fd5bc59
--- /dev/null
+++ b/fs/nfs/nfsroot.c
@@ -0,0 +1,513 @@
+/*
+ *  $Id: nfsroot.c,v 1.45 1998/03/07 10:44:46 mj Exp $
+ *
+ *  Copyright (C) 1995, 1996  Gero Kuhlmann <gero@gkminix.han.de>
+ *
+ *  Allow an NFS filesystem to be mounted as root. The way this works is:
+ *     (1) Use the IP autoconfig mechanism to set local IP addresses and routes.
+ *     (2) Handle RPC negotiation with the system which replied to RARP or
+ *         was reported as a boot server by BOOTP or manually.
+ *     (3) The actual mounting is done later, when init() is running.
+ *
+ *
+ *	Changes:
+ *
+ *	Alan Cox	:	Removed get_address name clash with FPU.
+ *	Alan Cox	:	Reformatted a bit.
+ *	Gero Kuhlmann	:	Code cleanup
+ *	Michael Rausch  :	Fixed recognition of an incoming RARP answer.
+ *	Martin Mares	: (2.0)	Auto-configuration via BOOTP supported.
+ *	Martin Mares	:	Manual selection of interface & BOOTP/RARP.
+ *	Martin Mares	:	Using network routes instead of host routes,
+ *				allowing the default configuration to be used
+ *				for normal operation of the host.
+ *	Martin Mares	:	Randomized timer with exponential backoff
+ *				installed to minimize network congestion.
+ *	Martin Mares	:	Code cleanup.
+ *	Martin Mares	: (2.1)	BOOTP and RARP made configuration options.
+ *	Martin Mares	:	Server hostname generation fixed.
+ *	Gerd Knorr	:	Fixed wired inode handling
+ *	Martin Mares	: (2.2)	"0.0.0.0" addresses from command line ignored.
+ *	Martin Mares	:	RARP replies not tested for server address.
+ *	Gero Kuhlmann	: (2.3) Some bug fixes and code cleanup again (please
+ *				send me your new patches _before_ bothering
+ *				Linus so that I don' always have to cleanup
+ *				_afterwards_ - thanks)
+ *	Gero Kuhlmann	:	Last changes of Martin Mares undone.
+ *	Gero Kuhlmann	: 	RARP replies are tested for specified server
+ *				again. However, it's now possible to have
+ *				different RARP and NFS servers.
+ *	Gero Kuhlmann	:	"0.0.0.0" addresses from command line are
+ *				now mapped to INADDR_NONE.
+ *	Gero Kuhlmann	:	Fixed a bug which prevented BOOTP path name
+ *				from being used (thanks to Leo Spiekman)
+ *	Andy Walker	:	Allow to specify the NFS server in nfs_root
+ *				without giving a path name
+ *	Swen Thümmler	:	Allow to specify the NFS options in nfs_root
+ *				without giving a path name. Fix BOOTP request
+ *				for domainname (domainname is NIS domain, not
+ *				DNS domain!). Skip dummy devices for BOOTP.
+ *	Jacek Zapala	:	Fixed a bug which prevented server-ip address
+ *				from nfsroot parameter from being used.
+ *	Olaf Kirch	:	Adapted to new NFS code.
+ *	Jakub Jelinek	:	Free used code segment.
+ *	Marko Kohtala	:	Fixed some bugs.
+ *	Martin Mares	:	Debug message cleanup
+ *	Martin Mares	:	Changed to use the new generic IP layer autoconfig
+ *				code. BOOTP and RARP moved there.
+ *	Martin Mares	:	Default path now contains host name instead of
+ *				host IP address (but host name defaults to IP
+ *				address anyway).
+ *	Martin Mares	:	Use root_server_addr appropriately during setup.
+ *	Martin Mares	:	Rewrote parameter parsing, now hopefully giving
+ *				correct overriding.
+ *	Trond Myklebust :	Add in preliminary support for NFSv3 and TCP.
+ *				Fix bug in root_nfs_addr(). nfs_data.namlen
+ *				is NOT for the length of the hostname.
+ *	Hua Qin		:	Support for mounting root file system via
+ *				NFS over TCP.
+ *	Fabian Frederick:	Option parser rebuilt (using parser lib)
+*/
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/in.h>
+#include <linux/major.h>
+#include <linux/utsname.h>
+#include <linux/inet.h>
+#include <linux/root_dev.h>
+#include <net/ipconfig.h>
+#include <linux/parser.h>
+
+/* Define this to allow debugging output */
+#undef NFSROOT_DEBUG
+#define NFSDBG_FACILITY NFSDBG_ROOT
+
+/* Default path we try to mount. "%s" gets replaced by our IP address */
+#define NFS_ROOT		"/tftpboot/%s"
+
+/* Parameters passed from the kernel command line */
+static char nfs_root_name[256] __initdata = "";
+
+/* Address of NFS server */
+static __u32 servaddr __initdata = 0;
+
+/* Name of directory to mount */
+static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, };
+
+/* NFS-related data */
+static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
+static int nfs_port __initdata = 0;		/* Port to connect to for NFS */
+static int mount_port __initdata = 0;		/* Mount daemon port number */
+
+
+/***************************************************************************
+
+			     Parsing of options
+
+ ***************************************************************************/
+
+enum {
+	/* Options that take integer arguments */
+	Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin,
+	Opt_acregmax, Opt_acdirmin, Opt_acdirmax,
+	/* Options that take no arguments */
+	Opt_soft, Opt_hard, Opt_intr,
+	Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, 
+	Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
+	/* Error token */
+	Opt_err
+};
+
+static match_table_t __initdata tokens = {
+	{Opt_port, "port=%u"},
+	{Opt_rsize, "rsize=%u"},
+	{Opt_wsize, "wsize=%u"},
+	{Opt_timeo, "timeo=%u"},
+	{Opt_retrans, "retrans=%u"},
+	{Opt_acregmin, "acregmin=%u"},
+	{Opt_acregmax, "acregmax=%u"},
+	{Opt_acdirmin, "acdirmin=%u"},
+	{Opt_acdirmax, "acdirmax=%u"},
+	{Opt_soft, "soft"},
+	{Opt_hard, "hard"},
+	{Opt_intr, "intr"},
+	{Opt_nointr, "nointr"},
+	{Opt_posix, "posix"},
+	{Opt_noposix, "noposix"},
+	{Opt_cto, "cto"},
+	{Opt_nocto, "nocto"},
+	{Opt_ac, "ac"},
+	{Opt_noac, "noac"},
+	{Opt_lock, "lock"},
+	{Opt_nolock, "nolock"},
+	{Opt_v2, "nfsvers=2"},
+	{Opt_v2, "v2"},
+	{Opt_v3, "nfsvers=3"},
+	{Opt_v3, "v3"},
+	{Opt_udp, "proto=udp"},
+	{Opt_udp, "udp"},
+	{Opt_tcp, "proto=tcp"},
+	{Opt_tcp, "tcp"},
+	{Opt_err, NULL}
+	
+};
+
+/*
+ *  Parse option string.
+ */
+
+static int __init root_nfs_parse(char *name, char *buf)
+{
+
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+
+	if (!name)
+		return 1;
+
+	/* Set the NFS remote path */
+	p = strsep(&name, ",");
+	if (p[0] != '\0' && strcmp(p, "default") != 0)
+		strlcpy(buf, p, NFS_MAXPATHLEN);
+
+	while ((p = strsep (&name, ",")) != NULL) {
+		int token; 
+		if (!*p)
+			continue;
+		token = match_token(p, tokens, args);
+
+		/* %u tokens only. Beware if you add new tokens! */
+		if (token < Opt_soft && match_int(&args[0], &option))
+			return 0;
+		switch (token) {
+			case Opt_port:
+				nfs_port = option;
+				break;
+			case Opt_rsize:
+				nfs_data.rsize = option;
+				break;
+			case Opt_wsize:
+				nfs_data.wsize = option;
+				break;
+			case Opt_timeo:
+				nfs_data.timeo = option;
+				break;
+			case Opt_retrans:
+				nfs_data.retrans = option;
+				break;
+			case Opt_acregmin:
+				nfs_data.acregmin = option;
+				break;
+			case Opt_acregmax:
+				nfs_data.acregmax = option;
+				break;
+			case Opt_acdirmin:
+				nfs_data.acdirmin = option;
+				break;
+			case Opt_acdirmax:
+				nfs_data.acdirmax = option;
+				break;
+			case Opt_soft:
+				nfs_data.flags |= NFS_MOUNT_SOFT;
+				break;
+			case Opt_hard:
+				nfs_data.flags &= ~NFS_MOUNT_SOFT;
+				break;
+			case Opt_intr:
+				nfs_data.flags |= NFS_MOUNT_INTR;
+				break;
+			case Opt_nointr:
+				nfs_data.flags &= ~NFS_MOUNT_INTR;
+				break;
+			case Opt_posix:
+				nfs_data.flags |= NFS_MOUNT_POSIX;
+				break;
+			case Opt_noposix:
+				nfs_data.flags &= ~NFS_MOUNT_POSIX;
+				break;
+			case Opt_cto:
+				nfs_data.flags &= ~NFS_MOUNT_NOCTO;
+				break;
+			case Opt_nocto:
+				nfs_data.flags |= NFS_MOUNT_NOCTO;
+				break;
+			case Opt_ac:
+				nfs_data.flags &= ~NFS_MOUNT_NOAC;
+				break;
+			case Opt_noac:
+				nfs_data.flags |= NFS_MOUNT_NOAC;
+				break;
+			case Opt_lock:
+				nfs_data.flags &= ~NFS_MOUNT_NONLM;
+				break;
+			case Opt_nolock:
+				nfs_data.flags |= NFS_MOUNT_NONLM;
+				break;
+			case Opt_v2:
+				nfs_data.flags &= ~NFS_MOUNT_VER3;
+				break;
+			case Opt_v3:
+				nfs_data.flags |= NFS_MOUNT_VER3;
+				break;
+			case Opt_udp:
+				nfs_data.flags &= ~NFS_MOUNT_TCP;
+				break;
+			case Opt_tcp:
+				nfs_data.flags |= NFS_MOUNT_TCP;
+				break;
+			default : 
+				return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ *  Prepare the NFS data structure and parse all options.
+ */
+static int __init root_nfs_name(char *name)
+{
+	static char buf[NFS_MAXPATHLEN] __initdata;
+	char *cp;
+
+	/* Set some default values */
+	memset(&nfs_data, 0, sizeof(nfs_data));
+	nfs_port          = -1;
+	nfs_data.version  = NFS_MOUNT_VERSION;
+	nfs_data.flags    = NFS_MOUNT_NONLM;	/* No lockd in nfs root yet */
+	nfs_data.rsize    = NFS_DEF_FILE_IO_BUFFER_SIZE;
+	nfs_data.wsize    = NFS_DEF_FILE_IO_BUFFER_SIZE;
+	nfs_data.acregmin = 3;
+	nfs_data.acregmax = 60;
+	nfs_data.acdirmin = 30;
+	nfs_data.acdirmax = 60;
+	strcpy(buf, NFS_ROOT);
+
+	/* Process options received from the remote server */
+	root_nfs_parse(root_server_path, buf);
+
+	/* Override them by options set on kernel command-line */
+	root_nfs_parse(name, buf);
+
+	cp = system_utsname.nodename;
+	if (strlen(buf) + strlen(cp) > NFS_MAXPATHLEN) {
+		printk(KERN_ERR "Root-NFS: Pathname for remote directory too long.\n");
+		return -1;
+	}
+	sprintf(nfs_path, buf, cp);
+
+	return 1;
+}
+
+
+/*
+ *  Get NFS server address.
+ */
+static int __init root_nfs_addr(void)
+{
+	if ((servaddr = root_server_addr) == INADDR_NONE) {
+		printk(KERN_ERR "Root-NFS: No NFS server available, giving up.\n");
+		return -1;
+	}
+
+	snprintf(nfs_data.hostname, sizeof(nfs_data.hostname),
+		 "%u.%u.%u.%u", NIPQUAD(servaddr));
+	return 0;
+}
+
+/*
+ *  Tell the user what's going on.
+ */
+#ifdef NFSROOT_DEBUG
+static void __init root_nfs_print(void)
+{
+	printk(KERN_NOTICE "Root-NFS: Mounting %s on server %s as root\n",
+		nfs_path, nfs_data.hostname);
+	printk(KERN_NOTICE "Root-NFS:     rsize = %d, wsize = %d, timeo = %d, retrans = %d\n",
+		nfs_data.rsize, nfs_data.wsize, nfs_data.timeo, nfs_data.retrans);
+	printk(KERN_NOTICE "Root-NFS:     acreg (min,max) = (%d,%d), acdir (min,max) = (%d,%d)\n",
+		nfs_data.acregmin, nfs_data.acregmax,
+		nfs_data.acdirmin, nfs_data.acdirmax);
+	printk(KERN_NOTICE "Root-NFS:     nfsd port = %d, mountd port = %d, flags = %08x\n",
+		nfs_port, mount_port, nfs_data.flags);
+}
+#endif
+
+
+static int __init root_nfs_init(void)
+{
+#ifdef NFSROOT_DEBUG
+	nfs_debug |= NFSDBG_ROOT;
+#endif
+
+	/*
+	 * Decode the root directory path name and NFS options from
+	 * the kernel command line. This has to go here in order to
+	 * be able to use the client IP address for the remote root
+	 * directory (necessary for pure RARP booting).
+	 */
+	if (root_nfs_name(nfs_root_name) < 0 ||
+	    root_nfs_addr() < 0)
+		return -1;
+
+#ifdef NFSROOT_DEBUG
+	root_nfs_print();
+#endif
+
+	return 0;
+}
+
+
+/*
+ *  Parse NFS server and directory information passed on the kernel
+ *  command line.
+ */
+static int __init nfs_root_setup(char *line)
+{
+	ROOT_DEV = Root_NFS;
+	if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
+		strlcpy(nfs_root_name, line, sizeof(nfs_root_name));
+	} else {
+		int n = strlen(line) + sizeof(NFS_ROOT) - 1;
+		if (n >= sizeof(nfs_root_name))
+			line[sizeof(nfs_root_name) - sizeof(NFS_ROOT) - 2] = '\0';
+		sprintf(nfs_root_name, NFS_ROOT, line);
+	}
+	root_server_addr = root_nfs_parse_addr(nfs_root_name);
+	return 1;
+}
+
+__setup("nfsroot=", nfs_root_setup);
+
+/***************************************************************************
+
+	       Routines to actually mount the root directory
+
+ ***************************************************************************/
+
+/*
+ *  Construct sockaddr_in from address and port number.
+ */
+static inline void
+set_sockaddr(struct sockaddr_in *sin, __u32 addr, __u16 port)
+{
+	sin->sin_family = AF_INET;
+	sin->sin_addr.s_addr = addr;
+	sin->sin_port = port;
+}
+
+/*
+ *  Query server portmapper for the port of a daemon program.
+ */
+static int __init root_nfs_getport(int program, int version, int proto)
+{
+	struct sockaddr_in sin;
+
+	printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n",
+		program, version, NIPQUAD(servaddr));
+	set_sockaddr(&sin, servaddr, 0);
+	return rpc_getport_external(&sin, program, version, proto);
+}
+
+
+/*
+ *  Use portmapper to find mountd and nfsd port numbers if not overriden
+ *  by the user. Use defaults if portmapper is not available.
+ *  XXX: Is there any nfs server with no portmapper?
+ */
+static int __init root_nfs_ports(void)
+{
+	int port;
+	int nfsd_ver, mountd_ver;
+	int nfsd_port, mountd_port;
+	int proto;
+
+	if (nfs_data.flags & NFS_MOUNT_VER3) {
+		nfsd_ver = NFS3_VERSION;
+		mountd_ver = NFS_MNT3_VERSION;
+		nfsd_port = NFS_PORT;
+		mountd_port = NFS_MNT_PORT;
+	} else {
+		nfsd_ver = NFS2_VERSION;
+		mountd_ver = NFS_MNT_VERSION;
+		nfsd_port = NFS_PORT;
+		mountd_port = NFS_MNT_PORT;
+	}
+
+	proto = (nfs_data.flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
+
+	if (nfs_port < 0) {
+		if ((port = root_nfs_getport(NFS_PROGRAM, nfsd_ver, proto)) < 0) {
+			printk(KERN_ERR "Root-NFS: Unable to get nfsd port "
+					"number from server, using default\n");
+			port = nfsd_port;
+		}
+		nfs_port = htons(port);
+		dprintk("Root-NFS: Portmapper on server returned %d "
+			"as nfsd port\n", port);
+	}
+
+	if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) {
+		printk(KERN_ERR "Root-NFS: Unable to get mountd port "
+				"number from server, using default\n");
+		port = mountd_port;
+	}
+	mount_port = htons(port);
+	dprintk("Root-NFS: mountd port is %d\n", port);
+
+	return 0;
+}
+
+
+/*
+ *  Get a file handle from the server for the directory which is to be
+ *  mounted.
+ */
+static int __init root_nfs_get_handle(void)
+{
+	struct nfs_fh fh;
+	struct sockaddr_in sin;
+	int status;
+	int protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
+					IPPROTO_TCP : IPPROTO_UDP;
+	int version = (nfs_data.flags & NFS_MOUNT_VER3) ?
+					NFS_MNT3_VERSION : NFS_MNT_VERSION;
+
+	set_sockaddr(&sin, servaddr, mount_port);
+	status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol);
+	if (status < 0)
+		printk(KERN_ERR "Root-NFS: Server returned error %d "
+				"while mounting %s\n", status, nfs_path);
+	else {
+		nfs_data.root.size = fh.size;
+		memcpy(nfs_data.root.data, fh.data, fh.size);
+	}
+
+	return status;
+}
+
+/*
+ *  Get the NFS port numbers and file handle, and return the prepared 'data'
+ *  argument for mount() if everything went OK. Return NULL otherwise.
+ */
+void * __init nfs_root_data(void)
+{
+	if (root_nfs_init() < 0
+	 || root_nfs_ports() < 0
+	 || root_nfs_get_handle() < 0)
+		return NULL;
+	set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, nfs_port);
+	return (void*)&nfs_data;
+}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
new file mode 100644
index 0000000..4f1ba72
--- /dev/null
+++ b/fs/nfs/pagelist.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/nfs/pagelist.c
+ *
+ * A set of helper functions for managing NFS read and write requests.
+ * The main purpose of these routines is to provide support for the
+ * coalescing of several requests into a single RPC call.
+ *
+ * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_page.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+
+#define NFS_PARANOIA 1
+
+static kmem_cache_t *nfs_page_cachep;
+
+static inline struct nfs_page *
+nfs_page_alloc(void)
+{
+	struct nfs_page	*p;
+	p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
+	if (p) {
+		memset(p, 0, sizeof(*p));
+		INIT_LIST_HEAD(&p->wb_list);
+	}
+	return p;
+}
+
+static inline void
+nfs_page_free(struct nfs_page *p)
+{
+	kmem_cache_free(nfs_page_cachep, p);
+}
+
+/**
+ * nfs_create_request - Create an NFS read/write request.
+ * @file: file descriptor to use
+ * @inode: inode to which the request is attached
+ * @page: page to write
+ * @offset: starting offset within the page for the write
+ * @count: number of bytes to read/write
+ *
+ * The page must be locked by the caller. This makes sure we never
+ * create two different requests for the same page, and avoids
+ * a possible deadlock when we reach the hard limit on the number
+ * of dirty pages.
+ * User should ensure it is safe to sleep in this function.
+ */
+struct nfs_page *
+nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
+		   struct page *page,
+		   unsigned int offset, unsigned int count)
+{
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_page		*req;
+
+	/* Deal with hard limits.  */
+	for (;;) {
+		/* try to allocate the request struct */
+		req = nfs_page_alloc();
+		if (req != NULL)
+			break;
+
+		/* Try to free up at least one request in order to stay
+		 * below the hard limit
+		 */
+		if (signalled() && (server->flags & NFS_MOUNT_INTR))
+			return ERR_PTR(-ERESTARTSYS);
+		yield();
+	}
+
+	/* Initialize the request struct. Initially, we assume a
+	 * long write-back delay. This will be adjusted in
+	 * update_nfs_request below if the region is not locked. */
+	req->wb_page    = page;
+	atomic_set(&req->wb_complete, 0);
+	req->wb_index	= page->index;
+	page_cache_get(page);
+	req->wb_offset  = offset;
+	req->wb_pgbase	= offset;
+	req->wb_bytes   = count;
+	atomic_set(&req->wb_count, 1);
+	req->wb_context = get_nfs_open_context(ctx);
+
+	return req;
+}
+
+/**
+ * nfs_unlock_request - Unlock request and wake up sleepers.
+ * @req:
+ */
+void nfs_unlock_request(struct nfs_page *req)
+{
+	if (!NFS_WBACK_BUSY(req)) {
+		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
+		BUG();
+	}
+	smp_mb__before_clear_bit();
+	clear_bit(PG_BUSY, &req->wb_flags);
+	smp_mb__after_clear_bit();
+	wake_up_all(&req->wb_context->waitq);
+	nfs_release_request(req);
+}
+
+/**
+ * nfs_clear_request - Free up all resources allocated to the request
+ * @req:
+ *
+ * Release page resources associated with a write request after it
+ * has completed.
+ */
+void nfs_clear_request(struct nfs_page *req)
+{
+	if (req->wb_page) {
+		page_cache_release(req->wb_page);
+		req->wb_page = NULL;
+	}
+}
+
+
+/**
+ * nfs_release_request - Release the count on an NFS read/write request
+ * @req: request to release
+ *
+ * Note: Should never be called with the spinlock held!
+ */
+void
+nfs_release_request(struct nfs_page *req)
+{
+	if (!atomic_dec_and_test(&req->wb_count))
+		return;
+
+#ifdef NFS_PARANOIA
+	BUG_ON (!list_empty(&req->wb_list));
+	BUG_ON (NFS_WBACK_BUSY(req));
+#endif
+
+	/* Release struct file or cached credential */
+	nfs_clear_request(req);
+	put_nfs_open_context(req->wb_context);
+	nfs_page_free(req);
+}
+
+/**
+ * nfs_list_add_request - Insert a request into a sorted list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ *
+ * Note that the wb_list is sorted by page index in order to facilitate
+ * coalescing of requests.
+ * We use an insertion sort that is optimized for the case of appended
+ * writes.
+ */
+void
+nfs_list_add_request(struct nfs_page *req, struct list_head *head)
+{
+	struct list_head *pos;
+
+#ifdef NFS_PARANOIA
+	if (!list_empty(&req->wb_list)) {
+		printk(KERN_ERR "NFS: Add to list failed!\n");
+		BUG();
+	}
+#endif
+	list_for_each_prev(pos, head) {
+		struct nfs_page	*p = nfs_list_entry(pos);
+		if (p->wb_index < req->wb_index)
+			break;
+	}
+	list_add(&req->wb_list, pos);
+	req->wb_list_head = head;
+}
+
+/**
+ * nfs_wait_on_request - Wait for a request to complete.
+ * @req: request to wait upon.
+ *
+ * Interruptible by signals only if mounted with intr flag.
+ * The user is responsible for holding a count on the request.
+ */
+int
+nfs_wait_on_request(struct nfs_page *req)
+{
+	struct inode	*inode = req->wb_context->dentry->d_inode;
+        struct rpc_clnt	*clnt = NFS_CLIENT(inode);
+
+	if (!NFS_WBACK_BUSY(req))
+		return 0;
+	return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req));
+}
+
+/**
+ * nfs_coalesce_requests - Split coalesced requests out from a list.
+ * @head: source list
+ * @dst: destination list
+ * @nmax: maximum number of requests to coalesce
+ *
+ * Moves a maximum of 'nmax' elements from one list to another.
+ * The elements are checked to ensure that they form a contiguous set
+ * of pages, and that the RPC credentials are the same.
+ */
+int
+nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
+		      unsigned int nmax)
+{
+	struct nfs_page		*req = NULL;
+	unsigned int		npages = 0;
+
+	while (!list_empty(head)) {
+		struct nfs_page	*prev = req;
+
+		req = nfs_list_entry(head->next);
+		if (prev) {
+			if (req->wb_context->cred != prev->wb_context->cred)
+				break;
+			if (req->wb_context->lockowner != prev->wb_context->lockowner)
+				break;
+			if (req->wb_context->state != prev->wb_context->state)
+				break;
+			if (req->wb_index != (prev->wb_index + 1))
+				break;
+
+			if (req->wb_pgbase != 0)
+				break;
+		}
+		nfs_list_remove_request(req);
+		nfs_list_add_request(req, dst);
+		npages++;
+		if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE)
+			break;
+		if (npages >= nmax)
+			break;
+	}
+	return npages;
+}
+
+/**
+ * nfs_scan_list - Scan a list for matching requests
+ * @head: One of the NFS inode request lists
+ * @dst: Destination list
+ * @idx_start: lower bound of page->index to scan
+ * @npages: idx_start + npages sets the upper bound to scan.
+ *
+ * Moves elements from one of the inode request lists.
+ * If the number of requests is set to 0, the entire address_space
+ * starting at index idx_start, is scanned.
+ * The requests are *not* checked to ensure that they form a contiguous set.
+ * You must be holding the inode's req_lock when calling this function
+ */
+int
+nfs_scan_list(struct list_head *head, struct list_head *dst,
+	      unsigned long idx_start, unsigned int npages)
+{
+	struct list_head	*pos, *tmp;
+	struct nfs_page		*req;
+	unsigned long		idx_end;
+	int			res;
+
+	res = 0;
+	if (npages == 0)
+		idx_end = ~0;
+	else
+		idx_end = idx_start + npages - 1;
+
+	list_for_each_safe(pos, tmp, head) {
+
+		req = nfs_list_entry(pos);
+
+		if (req->wb_index < idx_start)
+			continue;
+		if (req->wb_index > idx_end)
+			break;
+
+		if (!nfs_lock_request(req))
+			continue;
+		nfs_list_remove_request(req);
+		nfs_list_add_request(req, dst);
+		res++;
+	}
+	return res;
+}
+
+int nfs_init_nfspagecache(void)
+{
+	nfs_page_cachep = kmem_cache_create("nfs_page",
+					    sizeof(struct nfs_page),
+					    0, SLAB_HWCACHE_ALIGN,
+					    NULL, NULL);
+	if (nfs_page_cachep == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void nfs_destroy_nfspagecache(void)
+{
+	if (kmem_cache_destroy(nfs_page_cachep))
+		printk(KERN_INFO "nfs_page: not all structures were freed\n");
+}
+
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
new file mode 100644
index 0000000..d31b4d6
--- /dev/null
+++ b/fs/nfs/proc.c
@@ -0,0 +1,655 @@
+/*
+ *  linux/fs/nfs/proc.c
+ *
+ *  Copyright (C) 1992, 1993, 1994  Rick Sladkey
+ *
+ *  OS-independent nfs remote procedure call functions
+ *
+ *  Tuned by Alan Cox <A.Cox@swansea.ac.uk> for >3K buffers
+ *  so at last we can have decent(ish) throughput off a 
+ *  Sun server.
+ *
+ *  Coding optimized and cleaned up by Florian La Roche.
+ *  Note: Error returns are optimized for NFS_OK, which isn't translated via
+ *  nfs_stat_to_errno(), but happens to be already the right return code.
+ *
+ *  Also, the code currently doesn't check the size of the packet, when
+ *  it decodes the packet.
+ *
+ *  Feel free to fix it and mail me the diffs if it worries you.
+ *
+ *  Completely rewritten to support the new RPC call interface;
+ *  rewrote and moved the entire XDR stuff to xdr.c
+ *  --Olaf Kirch June 1996
+ *
+ *  The code below initializes all auto variables explicitly, otherwise
+ *  it will fail to work as a module (gcc generates a memset call for an
+ *  incomplete struct).
+ */
+
+#include <linux/types.h>
+#include <linux/param.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/lockd/bind.h>
+#include <linux/smp_lock.h>
+
+#define NFSDBG_FACILITY		NFSDBG_PROC
+
+extern struct rpc_procinfo nfs_procedures[];
+
+/*
+ * Bare-bones access to getattr: this is for nfs_read_super.
+ */
+static int
+nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+		  struct nfs_fsinfo *info)
+{
+	struct nfs_fattr *fattr = info->fattr;
+	struct nfs2_fsstat fsinfo;
+	int status;
+
+	dprintk("%s: call getattr\n", __FUNCTION__);
+	fattr->valid = 0;
+	status = rpc_call(server->client_sys, NFSPROC_GETATTR, fhandle, fattr, 0);
+	dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
+	if (status)
+		return status;
+	dprintk("%s: call statfs\n", __FUNCTION__);
+	status = rpc_call(server->client_sys, NFSPROC_STATFS, fhandle, &fsinfo, 0);
+	dprintk("%s: reply statfs: %d\n", __FUNCTION__, status);
+	if (status)
+		return status;
+	info->rtmax  = NFS_MAXDATA;
+	info->rtpref = fsinfo.tsize;
+	info->rtmult = fsinfo.bsize;
+	info->wtmax  = NFS_MAXDATA;
+	info->wtpref = fsinfo.tsize;
+	info->wtmult = fsinfo.bsize;
+	info->dtpref = fsinfo.tsize;
+	info->maxfilesize = 0x7FFFFFFF;
+	info->lease_time = 0;
+	return 0;
+}
+
+/*
+ * One function for each procedure in the NFS protocol.
+ */
+static int
+nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
+		struct nfs_fattr *fattr)
+{
+	int	status;
+
+	dprintk("NFS call  getattr\n");
+	fattr->valid = 0;
+	status = rpc_call(server->client, NFSPROC_GETATTR,
+				fhandle, fattr, 0);
+	dprintk("NFS reply getattr: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+		 struct iattr *sattr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct nfs_sattrargs	arg = { 
+		.fh	= NFS_FH(inode),
+		.sattr	= sattr
+	};
+	int	status;
+
+	dprintk("NFS call  setattr\n");
+	fattr->valid = 0;
+	status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
+	dprintk("NFS reply setattr: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_lookup(struct inode *dir, struct qstr *name,
+		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	struct nfs_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	struct nfs_diropok	res = {
+		.fh		= fhandle,
+		.fattr		= fattr
+	};
+	int			status;
+
+	dprintk("NFS call  lookup %s\n", name->name);
+	fattr->valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_LOOKUP, &arg, &res, 0);
+	dprintk("NFS reply lookup: %d\n", status);
+	return status;
+}
+
+static int nfs_proc_readlink(struct inode *inode, struct page *page,
+		unsigned int pgbase, unsigned int pglen)
+{
+	struct nfs_readlinkargs	args = {
+		.fh		= NFS_FH(inode),
+		.pgbase		= pgbase,
+		.pglen		= pglen,
+		.pages		= &page
+	};
+	int			status;
+
+	dprintk("NFS call  readlink\n");
+	status = rpc_call(NFS_CLIENT(inode), NFSPROC_READLINK, &args, NULL, 0);
+	dprintk("NFS reply readlink: %d\n", status);
+	return status;
+}
+
+static int nfs_proc_read(struct nfs_read_data *rdata)
+{
+	int			flags = rdata->flags;
+	struct inode *		inode = rdata->inode;
+	struct nfs_fattr *	fattr = rdata->res.fattr;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs_procedures[NFSPROC_READ],
+		.rpc_argp	= &rdata->args,
+		.rpc_resp	= &rdata->res,
+		.rpc_cred	= rdata->cred,
+	};
+	int			status;
+
+	dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
+			(long long) rdata->args.offset);
+	fattr->valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
+	if (status >= 0) {
+		nfs_refresh_inode(inode, fattr);
+		/* Emulate the eof flag, which isn't normally needed in NFSv2
+		 * as it is guaranteed to always return the file attributes
+		 */
+		if (rdata->args.offset + rdata->args.count >= fattr->size)
+			rdata->res.eof = 1;
+	}
+	dprintk("NFS reply read: %d\n", status);
+	return status;
+}
+
+static int nfs_proc_write(struct nfs_write_data *wdata)
+{
+	int			flags = wdata->flags;
+	struct inode *		inode = wdata->inode;
+	struct nfs_fattr *	fattr = wdata->res.fattr;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs_procedures[NFSPROC_WRITE],
+		.rpc_argp	= &wdata->args,
+		.rpc_resp	= &wdata->res,
+		.rpc_cred	= wdata->cred,
+	};
+	int			status;
+
+	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
+			(long long) wdata->args.offset);
+	fattr->valid = 0;
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
+	if (status >= 0) {
+		nfs_refresh_inode(inode, fattr);
+		wdata->res.count = wdata->args.count;
+		wdata->verf.committed = NFS_FILE_SYNC;
+	}
+	dprintk("NFS reply write: %d\n", status);
+	return status < 0? status : wdata->res.count;
+}
+
+static int
+nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+		int flags)
+{
+	struct nfs_fh		fhandle;
+	struct nfs_fattr	fattr;
+	struct nfs_createargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr
+	};
+	struct nfs_diropok	res = {
+		.fh		= &fhandle,
+		.fattr		= &fattr
+	};
+	int			status;
+
+	fattr.valid = 0;
+	dprintk("NFS call  create %s\n", dentry->d_name.name);
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	dprintk("NFS reply create: %d\n", status);
+	return status;
+}
+
+/*
+ * In NFSv2, mknod is grafted onto the create call.
+ */
+static int
+nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+	       dev_t rdev)
+{
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr;
+	struct nfs_createargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr
+	};
+	struct nfs_diropok	res = {
+		.fh		= &fhandle,
+		.fattr		= &fattr
+	};
+	int status, mode;
+
+	dprintk("NFS call  mknod %s\n", dentry->d_name.name);
+
+	mode = sattr->ia_mode;
+	if (S_ISFIFO(mode)) {
+		sattr->ia_mode = (mode & ~S_IFMT) | S_IFCHR;
+		sattr->ia_valid &= ~ATTR_SIZE;
+	} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
+		sattr->ia_valid |= ATTR_SIZE;
+		sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
+	}
+
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
+
+	if (status == -EINVAL && S_ISFIFO(mode)) {
+		sattr->ia_mode = mode;
+		fattr.valid = 0;
+		status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
+	}
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	dprintk("NFS reply mknod: %d\n", status);
+	return status;
+}
+  
+static int
+nfs_proc_remove(struct inode *dir, struct qstr *name)
+{
+	struct nfs_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	struct rpc_message	msg = { 
+		.rpc_proc	= &nfs_procedures[NFSPROC_REMOVE],
+		.rpc_argp	= &arg,
+		.rpc_resp	= NULL,
+		.rpc_cred	= NULL
+	};
+	int			status;
+
+	dprintk("NFS call  remove %s\n", name->name);
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+
+	dprintk("NFS reply remove: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name)
+{
+	struct nfs_diropargs	*arg;
+
+	arg = (struct nfs_diropargs *)kmalloc(sizeof(*arg), GFP_KERNEL);
+	if (!arg)
+		return -ENOMEM;
+	arg->fh = NFS_FH(dir->d_inode);
+	arg->name = name->name;
+	arg->len = name->len;
+	msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE];
+	msg->rpc_argp = arg;
+	return 0;
+}
+
+static int
+nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+{
+	struct rpc_message *msg = &task->tk_msg;
+	
+	if (msg->rpc_argp)
+		kfree(msg->rpc_argp);
+	return 0;
+}
+
+static int
+nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
+		struct inode *new_dir, struct qstr *new_name)
+{
+	struct nfs_renameargs	arg = {
+		.fromfh		= NFS_FH(old_dir),
+		.fromname	= old_name->name,
+		.fromlen	= old_name->len,
+		.tofh		= NFS_FH(new_dir),
+		.toname		= new_name->name,
+		.tolen		= new_name->len
+	};
+	int			status;
+
+	dprintk("NFS call  rename %s -> %s\n", old_name->name, new_name->name);
+	status = rpc_call(NFS_CLIENT(old_dir), NFSPROC_RENAME, &arg, NULL, 0);
+	dprintk("NFS reply rename: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+	struct nfs_linkargs	arg = {
+		.fromfh		= NFS_FH(inode),
+		.tofh		= NFS_FH(dir),
+		.toname		= name->name,
+		.tolen		= name->len
+	};
+	int			status;
+
+	dprintk("NFS call  link %s\n", name->name);
+	status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0);
+	dprintk("NFS reply link: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
+		 struct iattr *sattr, struct nfs_fh *fhandle,
+		 struct nfs_fattr *fattr)
+{
+	struct nfs_symlinkargs	arg = {
+		.fromfh		= NFS_FH(dir),
+		.fromname	= name->name,
+		.fromlen	= name->len,
+		.topath		= path->name,
+		.tolen		= path->len,
+		.sattr		= sattr
+	};
+	int			status;
+
+	if (path->len > NFS2_MAXPATHLEN)
+		return -ENAMETOOLONG;
+	dprintk("NFS call  symlink %s -> %s\n", name->name, path->name);
+	fattr->valid = 0;
+	fhandle->size = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0);
+	dprintk("NFS reply symlink: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
+{
+	struct nfs_fh fhandle;
+	struct nfs_fattr fattr;
+	struct nfs_createargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len,
+		.sattr		= sattr
+	};
+	struct nfs_diropok	res = {
+		.fh		= &fhandle,
+		.fattr		= &fattr
+	};
+	int			status;
+
+	dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+	fattr.valid = 0;
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_MKDIR, &arg, &res, 0);
+	if (status == 0)
+		status = nfs_instantiate(dentry, &fhandle, &fattr);
+	dprintk("NFS reply mkdir: %d\n", status);
+	return status;
+}
+
+static int
+nfs_proc_rmdir(struct inode *dir, struct qstr *name)
+{
+	struct nfs_diropargs	arg = {
+		.fh		= NFS_FH(dir),
+		.name		= name->name,
+		.len		= name->len
+	};
+	int			status;
+
+	dprintk("NFS call  rmdir %s\n", name->name);
+	status = rpc_call(NFS_CLIENT(dir), NFSPROC_RMDIR, &arg, NULL, 0);
+	dprintk("NFS reply rmdir: %d\n", status);
+	return status;
+}
+
+/*
+ * The READDIR implementation is somewhat hackish - we pass a temporary
+ * buffer to the encode function, which installs it in the receive
+ * the receive iovec. The decode function just parses the reply to make
+ * sure it is syntactically correct; the entries itself are decoded
+ * from nfs_readdir by calling the decode_entry function directly.
+ */
+static int
+nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+		 u64 cookie, struct page *page, unsigned int count, int plus)
+{
+	struct inode		*dir = dentry->d_inode;
+	struct nfs_readdirargs	arg = {
+		.fh		= NFS_FH(dir),
+		.cookie		= cookie,
+		.count		= count,
+		.pages		= &page
+	};
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs_procedures[NFSPROC_READDIR],
+		.rpc_argp	= &arg,
+		.rpc_resp	= NULL,
+		.rpc_cred	= cred
+	};
+	int			status;
+
+	lock_kernel();
+
+	dprintk("NFS call  readdir %d\n", (unsigned int)cookie);
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+
+	dprintk("NFS reply readdir: %d\n", status);
+	unlock_kernel();
+	return status;
+}
+
+static int
+nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+			struct nfs_fsstat *stat)
+{
+	struct nfs2_fsstat fsinfo;
+	int	status;
+
+	dprintk("NFS call  statfs\n");
+	stat->fattr->valid = 0;
+	status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
+	dprintk("NFS reply statfs: %d\n", status);
+	if (status)
+		goto out;
+	stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize;
+	stat->fbytes = (u64)fsinfo.bfree  * fsinfo.bsize;
+	stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize;
+	stat->tfiles = 0;
+	stat->ffiles = 0;
+	stat->afiles = 0;
+out:
+	return status;
+}
+
+static int
+nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+			struct nfs_fsinfo *info)
+{
+	struct nfs2_fsstat fsinfo;
+	int	status;
+
+	dprintk("NFS call  fsinfo\n");
+	info->fattr->valid = 0;
+	status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
+	dprintk("NFS reply fsinfo: %d\n", status);
+	if (status)
+		goto out;
+	info->rtmax  = NFS_MAXDATA;
+	info->rtpref = fsinfo.tsize;
+	info->rtmult = fsinfo.bsize;
+	info->wtmax  = NFS_MAXDATA;
+	info->wtpref = fsinfo.tsize;
+	info->wtmult = fsinfo.bsize;
+	info->dtpref = fsinfo.tsize;
+	info->maxfilesize = 0x7FFFFFFF;
+	info->lease_time = 0;
+out:
+	return status;
+}
+
+static int
+nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+		  struct nfs_pathconf *info)
+{
+	info->max_link = 0;
+	info->max_namelen = NFS2_MAXNAMLEN;
+	return 0;
+}
+
+extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
+
+static void
+nfs_read_done(struct rpc_task *task)
+{
+	struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+
+	if (task->tk_status >= 0) {
+		nfs_refresh_inode(data->inode, data->res.fattr);
+		/* Emulate the eof flag, which isn't normally needed in NFSv2
+		 * as it is guaranteed to always return the file attributes
+		 */
+		if (data->args.offset + data->args.count >= data->res.fattr->size)
+			data->res.eof = 1;
+	}
+	nfs_readpage_result(task);
+}
+
+static void
+nfs_proc_read_setup(struct nfs_read_data *data)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode = data->inode;
+	int			flags;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs_procedures[NFSPROC_READ],
+		.rpc_argp	= &data->args,
+		.rpc_resp	= &data->res,
+		.rpc_cred	= data->cred,
+	};
+
+	/* N.B. Do we need to test? Never called for swapfile inode */
+	flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs_write_done(struct rpc_task *task)
+{
+	struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+
+	if (task->tk_status >= 0)
+		nfs_refresh_inode(data->inode, data->res.fattr);
+	nfs_writeback_done(task);
+}
+
+static void
+nfs_proc_write_setup(struct nfs_write_data *data, int how)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode = data->inode;
+	int			flags;
+	struct rpc_message	msg = {
+		.rpc_proc	= &nfs_procedures[NFSPROC_WRITE],
+		.rpc_argp	= &data->args,
+		.rpc_resp	= &data->res,
+		.rpc_cred	= data->cred,
+	};
+
+	/* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */
+	data->args.stable = NFS_FILE_SYNC;
+
+	/* Set the initial flags for the task.  */
+	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+	/* Finalize the task. */
+	rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags);
+	rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs_proc_commit_setup(struct nfs_write_data *data, int how)
+{
+	BUG();
+}
+
+static int
+nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+	return nlmclnt_proc(filp->f_dentry->d_inode, cmd, fl);
+}
+
+
+struct nfs_rpc_ops	nfs_v2_clientops = {
+	.version	= 2,		       /* protocol version */
+	.dentry_ops	= &nfs_dentry_operations,
+	.dir_inode_ops	= &nfs_dir_inode_operations,
+	.getroot	= nfs_proc_get_root,
+	.getattr	= nfs_proc_getattr,
+	.setattr	= nfs_proc_setattr,
+	.lookup		= nfs_proc_lookup,
+	.access		= NULL,		       /* access */
+	.readlink	= nfs_proc_readlink,
+	.read		= nfs_proc_read,
+	.write		= nfs_proc_write,
+	.commit		= NULL,		       /* commit */
+	.create		= nfs_proc_create,
+	.remove		= nfs_proc_remove,
+	.unlink_setup	= nfs_proc_unlink_setup,
+	.unlink_done	= nfs_proc_unlink_done,
+	.rename		= nfs_proc_rename,
+	.link		= nfs_proc_link,
+	.symlink	= nfs_proc_symlink,
+	.mkdir		= nfs_proc_mkdir,
+	.rmdir		= nfs_proc_rmdir,
+	.readdir	= nfs_proc_readdir,
+	.mknod		= nfs_proc_mknod,
+	.statfs		= nfs_proc_statfs,
+	.fsinfo		= nfs_proc_fsinfo,
+	.pathconf	= nfs_proc_pathconf,
+	.decode_dirent	= nfs_decode_dirent,
+	.read_setup	= nfs_proc_read_setup,
+	.write_setup	= nfs_proc_write_setup,
+	.commit_setup	= nfs_proc_commit_setup,
+	.file_open	= nfs_open,
+	.file_release	= nfs_release,
+	.lock		= nfs_proc_lock,
+};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
new file mode 100644
index 0000000..a0042fb
--- /dev/null
+++ b/fs/nfs/read.c
@@ -0,0 +1,618 @@
+/*
+ * linux/fs/nfs/read.c
+ *
+ * Block I/O for NFS
+ *
+ * Partial copy of Linus' read cache modifications to fs/nfs/file.c
+ * modified for async RPC by okir@monad.swb.de
+ *
+ * We do an ugly hack here in order to return proper error codes to the
+ * user program when a read request failed: since generic_file_read
+ * only checks the return value of inode->i_op->readpage() which is always 0
+ * for async RPC, we set the error bit of the page to 1 when an error occurs,
+ * and make nfs_readpage transmit requests synchronously when encountering this.
+ * This is only a small problem, though, since we now retry all operations
+ * within the RPC code when root squashing is suspected.
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/smp_lock.h>
+
+#include <asm/system.h>
+
+#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
+
+static int nfs_pagein_one(struct list_head *, struct inode *);
+static void nfs_readpage_result_partial(struct nfs_read_data *, int);
+static void nfs_readpage_result_full(struct nfs_read_data *, int);
+
+static kmem_cache_t *nfs_rdata_cachep;
+mempool_t *nfs_rdata_mempool;
+
+#define MIN_POOL_READ	(32)
+
+void nfs_readdata_release(struct rpc_task *task)
+{
+        struct nfs_read_data   *data = (struct nfs_read_data *)task->tk_calldata;
+        nfs_readdata_free(data);
+}
+
+static
+unsigned int nfs_page_length(struct inode *inode, struct page *page)
+{
+	loff_t i_size = i_size_read(inode);
+	unsigned long idx;
+
+	if (i_size <= 0)
+		return 0;
+	idx = (i_size - 1) >> PAGE_CACHE_SHIFT;
+	if (page->index > idx)
+		return 0;
+	if (page->index != idx)
+		return PAGE_CACHE_SIZE;
+	return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1));
+}
+
+static
+int nfs_return_empty_page(struct page *page)
+{
+	memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
+	SetPageUptodate(page);
+	unlock_page(page);
+	return 0;
+}
+
+/*
+ * Read a page synchronously.
+ */
+static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
+		struct page *page)
+{
+	unsigned int	rsize = NFS_SERVER(inode)->rsize;
+	unsigned int	count = PAGE_CACHE_SIZE;
+	int		result;
+	struct nfs_read_data *rdata;
+
+	rdata = nfs_readdata_alloc();
+	if (!rdata)
+		return -ENOMEM;
+
+	memset(rdata, 0, sizeof(*rdata));
+	rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
+	rdata->cred = ctx->cred;
+	rdata->inode = inode;
+	INIT_LIST_HEAD(&rdata->pages);
+	rdata->args.fh = NFS_FH(inode);
+	rdata->args.context = ctx;
+	rdata->args.pages = &page;
+	rdata->args.pgbase = 0UL;
+	rdata->args.count = rsize;
+	rdata->res.fattr = &rdata->fattr;
+
+	dprintk("NFS: nfs_readpage_sync(%p)\n", page);
+
+	/*
+	 * This works now because the socket layer never tries to DMA
+	 * into this buffer directly.
+	 */
+	do {
+		if (count < rsize)
+			rdata->args.count = count;
+		rdata->res.count = rdata->args.count;
+		rdata->args.offset = page_offset(page) + rdata->args.pgbase;
+
+		dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
+			NFS_SERVER(inode)->hostname,
+			inode->i_sb->s_id,
+			(long long)NFS_FILEID(inode),
+			(unsigned long long)rdata->args.pgbase,
+			rdata->args.count);
+
+		lock_kernel();
+		result = NFS_PROTO(inode)->read(rdata);
+		unlock_kernel();
+
+		/*
+		 * Even if we had a partial success we can't mark the page
+		 * cache valid.
+		 */
+		if (result < 0) {
+			if (result == -EISDIR)
+				result = -EINVAL;
+			goto io_error;
+		}
+		count -= result;
+		rdata->args.pgbase += result;
+		/* Note: result == 0 should only happen if we're caching
+		 * a write that extends the file and punches a hole.
+		 */
+		if (rdata->res.eof != 0 || result == 0)
+			break;
+	} while (count);
+	NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
+
+	if (count)
+		memclear_highpage_flush(page, rdata->args.pgbase, count);
+	SetPageUptodate(page);
+	if (PageError(page))
+		ClearPageError(page);
+	result = 0;
+
+io_error:
+	unlock_page(page);
+	nfs_readdata_free(rdata);
+	return result;
+}
+
+static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
+		struct page *page)
+{
+	LIST_HEAD(one_request);
+	struct nfs_page	*new;
+	unsigned int len;
+
+	len = nfs_page_length(inode, page);
+	if (len == 0)
+		return nfs_return_empty_page(page);
+	new = nfs_create_request(ctx, inode, page, 0, len);
+	if (IS_ERR(new)) {
+		unlock_page(page);
+		return PTR_ERR(new);
+	}
+	if (len < PAGE_CACHE_SIZE)
+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
+
+	nfs_lock_request(new);
+	nfs_list_add_request(new, &one_request);
+	nfs_pagein_one(&one_request, inode);
+	return 0;
+}
+
+static void nfs_readpage_release(struct nfs_page *req)
+{
+	unlock_page(req->wb_page);
+
+	nfs_clear_request(req);
+	nfs_release_request(req);
+	nfs_unlock_request(req);
+
+	dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
+			req->wb_context->dentry->d_inode->i_sb->s_id,
+			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+			req->wb_bytes,
+			(long long)req_offset(req));
+}
+
+/*
+ * Set up the NFS read request struct
+ */
+static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+		unsigned int count, unsigned int offset)
+{
+	struct inode		*inode;
+
+	data->req	  = req;
+	data->inode	  = inode = req->wb_context->dentry->d_inode;
+	data->cred	  = req->wb_context->cred;
+
+	data->args.fh     = NFS_FH(inode);
+	data->args.offset = req_offset(req) + offset;
+	data->args.pgbase = req->wb_pgbase + offset;
+	data->args.pages  = data->pagevec;
+	data->args.count  = count;
+	data->args.context = req->wb_context;
+
+	data->res.fattr   = &data->fattr;
+	data->res.count   = count;
+	data->res.eof     = 0;
+
+	NFS_PROTO(inode)->read_setup(data);
+
+	data->task.tk_cookie = (unsigned long)inode;
+	data->task.tk_calldata = data;
+	/* Release requests */
+	data->task.tk_release = nfs_readdata_release;
+
+	dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+			data->task.tk_pid,
+			inode->i_sb->s_id,
+			(long long)NFS_FILEID(inode),
+			count,
+			(unsigned long long)data->args.offset);
+}
+
+static void
+nfs_async_read_error(struct list_head *head)
+{
+	struct nfs_page	*req;
+
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		SetPageError(req->wb_page);
+		nfs_readpage_release(req);
+	}
+}
+
+/*
+ * Start an async read operation
+ */
+static void nfs_execute_read(struct nfs_read_data *data)
+{
+	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
+	sigset_t oldset;
+
+	rpc_clnt_sigmask(clnt, &oldset);
+	lock_kernel();
+	rpc_execute(&data->task);
+	unlock_kernel();
+	rpc_clnt_sigunmask(clnt, &oldset);
+}
+
+/*
+ * Generate multiple requests to fill a single page.
+ *
+ * We optimize to reduce the number of read operations on the wire.  If we
+ * detect that we're reading a page, or an area of a page, that is past the
+ * end of file, we do not generate NFS read operations but just clear the
+ * parts of the page that would have come back zero from the server anyway.
+ *
+ * We rely on the cached value of i_size to make this determination; another
+ * client can fill pages on the server past our cached end-of-file, but we
+ * won't see the new data until our attribute cache is updated.  This is more
+ * or less conventional NFS client behavior.
+ */
+static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
+{
+	struct nfs_page *req = nfs_list_entry(head->next);
+	struct page *page = req->wb_page;
+	struct nfs_read_data *data;
+	unsigned int rsize = NFS_SERVER(inode)->rsize;
+	unsigned int nbytes, offset;
+	int requests = 0;
+	LIST_HEAD(list);
+
+	nfs_list_remove_request(req);
+
+	nbytes = req->wb_bytes;
+	for(;;) {
+		data = nfs_readdata_alloc();
+		if (!data)
+			goto out_bad;
+		INIT_LIST_HEAD(&data->pages);
+		list_add(&data->pages, &list);
+		requests++;
+		if (nbytes <= rsize)
+			break;
+		nbytes -= rsize;
+	}
+	atomic_set(&req->wb_complete, requests);
+
+	ClearPageError(page);
+	offset = 0;
+	nbytes = req->wb_bytes;
+	do {
+		data = list_entry(list.next, struct nfs_read_data, pages);
+		list_del_init(&data->pages);
+
+		data->pagevec[0] = page;
+		data->complete = nfs_readpage_result_partial;
+
+		if (nbytes > rsize) {
+			nfs_read_rpcsetup(req, data, rsize, offset);
+			offset += rsize;
+			nbytes -= rsize;
+		} else {
+			nfs_read_rpcsetup(req, data, nbytes, offset);
+			nbytes = 0;
+		}
+		nfs_execute_read(data);
+	} while (nbytes != 0);
+
+	return 0;
+
+out_bad:
+	while (!list_empty(&list)) {
+		data = list_entry(list.next, struct nfs_read_data, pages);
+		list_del(&data->pages);
+		nfs_readdata_free(data);
+	}
+	SetPageError(page);
+	nfs_readpage_release(req);
+	return -ENOMEM;
+}
+
+static int nfs_pagein_one(struct list_head *head, struct inode *inode)
+{
+	struct nfs_page		*req;
+	struct page		**pages;
+	struct nfs_read_data	*data;
+	unsigned int		count;
+
+	if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
+		return nfs_pagein_multi(head, inode);
+
+	data = nfs_readdata_alloc();
+	if (!data)
+		goto out_bad;
+
+	INIT_LIST_HEAD(&data->pages);
+	pages = data->pagevec;
+	count = 0;
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_list_add_request(req, &data->pages);
+		ClearPageError(req->wb_page);
+		*pages++ = req->wb_page;
+		count += req->wb_bytes;
+	}
+	req = nfs_list_entry(data->pages.next);
+
+	data->complete = nfs_readpage_result_full;
+	nfs_read_rpcsetup(req, data, count, 0);
+
+	nfs_execute_read(data);
+	return 0;
+out_bad:
+	nfs_async_read_error(head);
+	return -ENOMEM;
+}
+
+static int
+nfs_pagein_list(struct list_head *head, int rpages)
+{
+	LIST_HEAD(one_request);
+	struct nfs_page		*req;
+	int			error = 0;
+	unsigned int		pages = 0;
+
+	while (!list_empty(head)) {
+		pages += nfs_coalesce_requests(head, &one_request, rpages);
+		req = nfs_list_entry(one_request.next);
+		error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode);
+		if (error < 0)
+			break;
+	}
+	if (error >= 0)
+		return pages;
+
+	nfs_async_read_error(head);
+	return error;
+}
+
+/*
+ * Handle a read reply that fills part of a page.
+ */
+static void nfs_readpage_result_partial(struct nfs_read_data *data, int status)
+{
+	struct nfs_page *req = data->req;
+	struct page *page = req->wb_page;
+ 
+	if (status >= 0) {
+		unsigned int request = data->args.count;
+		unsigned int result = data->res.count;
+
+		if (result < request) {
+			memclear_highpage_flush(page,
+						data->args.pgbase + result,
+						request - result);
+		}
+	} else
+		SetPageError(page);
+
+	if (atomic_dec_and_test(&req->wb_complete)) {
+		if (!PageError(page))
+			SetPageUptodate(page);
+		nfs_readpage_release(req);
+	}
+}
+
+/*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+static void nfs_readpage_result_full(struct nfs_read_data *data, int status)
+{
+	unsigned int count = data->res.count;
+
+	while (!list_empty(&data->pages)) {
+		struct nfs_page *req = nfs_list_entry(data->pages.next);
+		struct page *page = req->wb_page;
+		nfs_list_remove_request(req);
+
+		if (status >= 0) {
+			if (count < PAGE_CACHE_SIZE) {
+				if (count < req->wb_bytes)
+					memclear_highpage_flush(page,
+							req->wb_pgbase + count,
+							req->wb_bytes - count);
+				count = 0;
+			} else
+				count -= PAGE_CACHE_SIZE;
+			SetPageUptodate(page);
+		} else
+			SetPageError(page);
+		nfs_readpage_release(req);
+	}
+}
+
+/*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+void nfs_readpage_result(struct rpc_task *task)
+{
+	struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
+	struct nfs_readargs *argp = &data->args;
+	struct nfs_readres *resp = &data->res;
+	int status = task->tk_status;
+
+	dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
+		task->tk_pid, status);
+
+	/* Is this a short read? */
+	if (task->tk_status >= 0 && resp->count < argp->count && !resp->eof) {
+		/* Has the server at least made some progress? */
+		if (resp->count != 0) {
+			/* Yes, so retry the read at the end of the data */
+			argp->offset += resp->count;
+			argp->pgbase += resp->count;
+			argp->count -= resp->count;
+			rpc_restart_call(task);
+			return;
+		}
+		task->tk_status = -EIO;
+	}
+	NFS_FLAGS(data->inode) |= NFS_INO_INVALID_ATIME;
+	data->complete(data, status);
+}
+
+/*
+ * Read a page over NFS.
+ * We read the page synchronously in the following case:
+ *  -	The error flag is set for this page. This happens only when a
+ *	previous async read operation failed.
+ */
+int nfs_readpage(struct file *file, struct page *page)
+{
+	struct nfs_open_context *ctx;
+	struct inode *inode = page->mapping->host;
+	int		error;
+
+	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+		page, PAGE_CACHE_SIZE, page->index);
+	/*
+	 * Try to flush any pending writes to the file..
+	 *
+	 * NOTE! Because we own the page lock, there cannot
+	 * be any new pending writes generated at this point
+	 * for this page (other pages can be written to).
+	 */
+	error = nfs_wb_page(inode, page);
+	if (error)
+		goto out_error;
+
+	if (file == NULL) {
+		ctx = nfs_find_open_context(inode, FMODE_READ);
+		if (ctx == NULL)
+			return -EBADF;
+	} else
+		ctx = get_nfs_open_context((struct nfs_open_context *)
+				file->private_data);
+	if (!IS_SYNC(inode)) {
+		error = nfs_readpage_async(ctx, inode, page);
+		goto out;
+	}
+
+	error = nfs_readpage_sync(ctx, inode, page);
+	if (error < 0 && IS_SWAPFILE(inode))
+		printk("Aiee.. nfs swap-in of page failed!\n");
+out:
+	put_nfs_open_context(ctx);
+	return error;
+
+out_error:
+	unlock_page(page);
+	return error;
+}
+
+struct nfs_readdesc {
+	struct list_head *head;
+	struct nfs_open_context *ctx;
+};
+
+static int
+readpage_async_filler(void *data, struct page *page)
+{
+	struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
+	struct inode *inode = page->mapping->host;
+	struct nfs_page *new;
+	unsigned int len;
+
+	nfs_wb_page(inode, page);
+	len = nfs_page_length(inode, page);
+	if (len == 0)
+		return nfs_return_empty_page(page);
+	new = nfs_create_request(desc->ctx, inode, page, 0, len);
+	if (IS_ERR(new)) {
+			SetPageError(page);
+			unlock_page(page);
+			return PTR_ERR(new);
+	}
+	if (len < PAGE_CACHE_SIZE)
+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
+	nfs_lock_request(new);
+	nfs_list_add_request(new, desc->head);
+	return 0;
+}
+
+int nfs_readpages(struct file *filp, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+	LIST_HEAD(head);
+	struct nfs_readdesc desc = {
+		.head		= &head,
+	};
+	struct inode *inode = mapping->host;
+	struct nfs_server *server = NFS_SERVER(inode);
+	int ret;
+
+	dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
+			inode->i_sb->s_id,
+			(long long)NFS_FILEID(inode),
+			nr_pages);
+
+	if (filp == NULL) {
+		desc.ctx = nfs_find_open_context(inode, FMODE_READ);
+		if (desc.ctx == NULL)
+			return -EBADF;
+	} else
+		desc.ctx = get_nfs_open_context((struct nfs_open_context *)
+				filp->private_data);
+	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
+	if (!list_empty(&head)) {
+		int err = nfs_pagein_list(&head, server->rpages);
+		if (!ret)
+			ret = err;
+	}
+	put_nfs_open_context(desc.ctx);
+	return ret;
+}
+
+int nfs_init_readpagecache(void)
+{
+	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
+					     sizeof(struct nfs_read_data),
+					     0, SLAB_HWCACHE_ALIGN,
+					     NULL, NULL);
+	if (nfs_rdata_cachep == NULL)
+		return -ENOMEM;
+
+	nfs_rdata_mempool = mempool_create(MIN_POOL_READ,
+					   mempool_alloc_slab,
+					   mempool_free_slab,
+					   nfs_rdata_cachep);
+	if (nfs_rdata_mempool == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void nfs_destroy_readpagecache(void)
+{
+	mempool_destroy(nfs_rdata_mempool);
+	if (kmem_cache_destroy(nfs_rdata_cachep))
+		printk(KERN_INFO "nfs_read_data: not all structures were freed\n");
+}
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
new file mode 100644
index 0000000..35f1065
--- /dev/null
+++ b/fs/nfs/symlink.c
@@ -0,0 +1,117 @@
+/*
+ *  linux/fs/nfs/symlink.c
+ *
+ *  Copyright (C) 1992  Rick Sladkey
+ *
+ *  Optimization changes Copyright (C) 1994 Florian La Roche
+ *
+ *  Jun 7 1999, cache symlink lookups in the page cache.  -DaveM
+ *
+ *  nfs symlink handling code
+ */
+
+#define NFS_NEED_XDR_TYPES
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/namei.h>
+
+/* Symlink caching in the page cache is even more simplistic
+ * and straight-forward than readdir caching.
+ *
+ * At the beginning of the page we store pointer to struct page in question,
+ * simplifying nfs_put_link() (if inode got invalidated we can't find the page
+ * to be freed via pagecache lookup).
+ * The NUL-terminated string follows immediately thereafter.
+ */
+
+struct nfs_symlink {
+	struct page *page;
+	char body[0];
+};
+
+static int nfs_symlink_filler(struct inode *inode, struct page *page)
+{
+	const unsigned int pgbase = offsetof(struct nfs_symlink, body);
+	const unsigned int pglen = PAGE_SIZE - pgbase;
+	int error;
+
+	lock_kernel();
+	error = NFS_PROTO(inode)->readlink(inode, page, pgbase, pglen);
+	unlock_kernel();
+	if (error < 0)
+		goto error;
+	SetPageUptodate(page);
+	unlock_page(page);
+	return 0;
+
+error:
+	SetPageError(page);
+	unlock_page(page);
+	return -EIO;
+}
+
+static int nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	struct page *page;
+	struct nfs_symlink *p;
+	void *err = ERR_PTR(nfs_revalidate_inode(NFS_SERVER(inode), inode));
+	if (err)
+		goto read_failed;
+	page = read_cache_page(&inode->i_data, 0,
+				(filler_t *)nfs_symlink_filler, inode);
+	if (IS_ERR(page)) {
+		err = page;
+		goto read_failed;
+	}
+	if (!PageUptodate(page)) {
+		err = ERR_PTR(-EIO);
+		goto getlink_read_error;
+	}
+	p = kmap(page);
+	p->page = page;
+	nd_set_link(nd, p->body);
+	return 0;
+
+getlink_read_error:
+	page_cache_release(page);
+read_failed:
+	nd_set_link(nd, err);
+	return 0;
+}
+
+static void nfs_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s)) {
+		struct nfs_symlink *p;
+		struct page *page;
+
+		p = container_of(s, struct nfs_symlink, body[0]);
+		page = p->page;
+
+		kunmap(page);
+		page_cache_release(page);
+	}
+}
+
+/*
+ * symlinks can't do much...
+ */
+struct inode_operations nfs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= nfs_follow_link,
+	.put_link	= nfs_put_link,
+	.getattr	= nfs_getattr,
+	.setattr	= nfs_setattr,
+};
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
new file mode 100644
index 0000000..f732541
--- /dev/null
+++ b/fs/nfs/unlink.c
@@ -0,0 +1,227 @@
+/*
+ *  linux/fs/nfs/unlink.c
+ *
+ * nfs sillydelete handling
+ *
+ * NOTE: we rely on holding the BKL for list manipulation protection.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dcache.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+
+
+struct nfs_unlinkdata {
+	struct nfs_unlinkdata	*next;
+	struct dentry	*dir, *dentry;
+	struct qstr	name;
+	struct rpc_task	task;
+	struct rpc_cred	*cred;
+	unsigned int	count;
+};
+
+static struct nfs_unlinkdata	*nfs_deletes;
+static RPC_WAITQ(nfs_delete_queue, "nfs_delete_queue");
+
+/**
+ * nfs_detach_unlinkdata - Remove asynchronous unlink from global list
+ * @data: pointer to descriptor
+ */
+static inline void
+nfs_detach_unlinkdata(struct nfs_unlinkdata *data)
+{
+	struct nfs_unlinkdata	**q;
+
+	for (q = &nfs_deletes; *q != NULL; q = &((*q)->next)) {
+		if (*q == data) {
+			*q = data->next;
+			break;
+		}
+	}
+}
+
+/**
+ * nfs_put_unlinkdata - release data from a sillydelete operation.
+ * @data: pointer to unlink structure.
+ */
+static void
+nfs_put_unlinkdata(struct nfs_unlinkdata *data)
+{
+	if (--data->count == 0) {
+		nfs_detach_unlinkdata(data);
+		if (data->name.name != NULL)
+			kfree(data->name.name);
+		kfree(data);
+	}
+}
+
+#define NAME_ALLOC_LEN(len)	((len+16) & ~15)
+/**
+ * nfs_copy_dname - copy dentry name to data structure
+ * @dentry: pointer to dentry
+ * @data: nfs_unlinkdata
+ */
+static inline void
+nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
+{
+	char		*str;
+	int		len = dentry->d_name.len;
+
+	str = kmalloc(NAME_ALLOC_LEN(len), GFP_KERNEL);
+	if (!str)
+		return;
+	memcpy(str, dentry->d_name.name, len);
+	if (!data->name.len) {
+		data->name.len = len;
+		data->name.name = str;
+	} else
+		kfree(str);
+}
+
+/**
+ * nfs_async_unlink_init - Initialize the RPC info
+ * @task: rpc_task of the sillydelete
+ *
+ * We delay initializing RPC info until after the call to dentry_iput()
+ * in order to minimize races against rename().
+ */
+static void
+nfs_async_unlink_init(struct rpc_task *task)
+{
+	struct nfs_unlinkdata	*data = (struct nfs_unlinkdata *)task->tk_calldata;
+	struct dentry		*dir = data->dir;
+	struct rpc_message	msg = {
+		.rpc_cred	= data->cred,
+	};
+	int			status = -ENOENT;
+
+	if (!data->name.len)
+		goto out_err;
+
+	status = NFS_PROTO(dir->d_inode)->unlink_setup(&msg, dir, &data->name);
+	if (status < 0)
+		goto out_err;
+	nfs_begin_data_update(dir->d_inode);
+	rpc_call_setup(task, &msg, 0);
+	return;
+ out_err:
+	rpc_exit(task, status);
+}
+
+/**
+ * nfs_async_unlink_done - Sillydelete post-processing
+ * @task: rpc_task of the sillydelete
+ *
+ * Do the directory attribute update.
+ */
+static void
+nfs_async_unlink_done(struct rpc_task *task)
+{
+	struct nfs_unlinkdata	*data = (struct nfs_unlinkdata *)task->tk_calldata;
+	struct dentry		*dir = data->dir;
+	struct inode		*dir_i;
+
+	if (!dir)
+		return;
+	dir_i = dir->d_inode;
+	nfs_end_data_update(dir_i);
+	if (NFS_PROTO(dir_i)->unlink_done(dir, task))
+		return;
+	put_rpccred(data->cred);
+	data->cred = NULL;
+	dput(dir);
+}
+
+/**
+ * nfs_async_unlink_release - Release the sillydelete data.
+ * @task: rpc_task of the sillydelete
+ *
+ * We need to call nfs_put_unlinkdata as a 'tk_release' task since the
+ * rpc_task would be freed too.
+ */
+static void
+nfs_async_unlink_release(struct rpc_task *task)
+{
+	struct nfs_unlinkdata	*data = (struct nfs_unlinkdata *)task->tk_calldata;
+	nfs_put_unlinkdata(data);
+}
+
+/**
+ * nfs_async_unlink - asynchronous unlinking of a file
+ * @dentry: dentry to unlink
+ */
+int
+nfs_async_unlink(struct dentry *dentry)
+{
+	struct dentry	*dir = dentry->d_parent;
+	struct nfs_unlinkdata	*data;
+	struct rpc_task	*task;
+	struct rpc_clnt	*clnt = NFS_CLIENT(dir->d_inode);
+	int		status = -ENOMEM;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		goto out;
+	memset(data, 0, sizeof(*data));
+
+	data->cred = rpcauth_lookupcred(clnt->cl_auth, 0);
+	if (IS_ERR(data->cred)) {
+		status = PTR_ERR(data->cred);
+		goto out_free;
+	}
+	data->dir = dget(dir);
+	data->dentry = dentry;
+
+	data->next = nfs_deletes;
+	nfs_deletes = data;
+	data->count = 1;
+
+	task = &data->task;
+	rpc_init_task(task, clnt, nfs_async_unlink_done , RPC_TASK_ASYNC);
+	task->tk_calldata = data;
+	task->tk_action = nfs_async_unlink_init;
+	task->tk_release = nfs_async_unlink_release;
+
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+	spin_unlock(&dentry->d_lock);
+
+	rpc_sleep_on(&nfs_delete_queue, task, NULL, NULL);
+	status = 0;
+ out:
+	return status;
+out_free:
+	kfree(data);
+	return status;
+}
+
+/**
+ * nfs_complete_unlink - Initialize completion of the sillydelete
+ * @dentry: dentry to delete
+ *
+ * Since we're most likely to be called by dentry_iput(), we
+ * only use the dentry to find the sillydelete. We then copy the name
+ * into the qstr.
+ */
+void
+nfs_complete_unlink(struct dentry *dentry)
+{
+	struct nfs_unlinkdata	*data;
+
+	for(data = nfs_deletes; data != NULL; data = data->next) {
+		if (dentry == data->dentry)
+			break;
+	}
+	if (!data)
+		return;
+	data->count++;
+	nfs_copy_dname(dentry, data);
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+	spin_unlock(&dentry->d_lock);
+	rpc_wake_up_task(&data->task);
+	nfs_put_unlinkdata(data);
+}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
new file mode 100644
index 0000000..6f7a4af
--- /dev/null
+++ b/fs/nfs/write.c
@@ -0,0 +1,1431 @@
+/*
+ * linux/fs/nfs/write.c
+ *
+ * Writing file data over NFS.
+ *
+ * We do it like this: When a (user) process wishes to write data to an
+ * NFS file, a write request is allocated that contains the RPC task data
+ * plus some info on the page to be written, and added to the inode's
+ * write chain. If the process writes past the end of the page, an async
+ * RPC call to write the page is scheduled immediately; otherwise, the call
+ * is delayed for a few seconds.
+ *
+ * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
+ *
+ * Write requests are kept on the inode's writeback list. Each entry in
+ * that list references the page (portion) to be written. When the
+ * cache timeout has expired, the RPC task is woken up, and tries to
+ * lock the page. As soon as it manages to do so, the request is moved
+ * from the writeback list to the writelock list.
+ *
+ * Note: we must make sure never to confuse the inode passed in the
+ * write_page request with the one in page->inode. As far as I understand
+ * it, these are different when doing a swap-out.
+ *
+ * To understand everything that goes on here and in the NFS read code,
+ * one should be aware that a page is locked in exactly one of the following
+ * cases:
+ *
+ *  -	A write request is in progress.
+ *  -	A user process is in generic_file_write/nfs_update_page
+ *  -	A user process is in generic_file_read
+ *
+ * Also note that because of the way pages are invalidated in
+ * nfs_revalidate_inode, the following assertions hold:
+ *
+ *  -	If a page is dirty, there will be no read requests (a page will
+ *	not be re-read unless invalidated by nfs_revalidate_inode).
+ *  -	If the page is not uptodate, there will be no pending write
+ *	requests, and no process will be in nfs_update_page.
+ *
+ * FIXME: Interaction with the vmscan routines is not optimal yet.
+ * Either vmscan must be made nfs-savvy, or we need a different page
+ * reclaim concept that supports something like FS-independent
+ * buffer_heads with a b_ops-> field.
+ *
+ * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs_page.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+
+#include "delegation.h"
+
+#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
+
+#define MIN_POOL_WRITE		(32)
+#define MIN_POOL_COMMIT		(4)
+
+/*
+ * Local function declarations
+ */
+static struct nfs_page * nfs_update_request(struct nfs_open_context*,
+					    struct inode *,
+					    struct page *,
+					    unsigned int, unsigned int);
+static void nfs_writeback_done_partial(struct nfs_write_data *, int);
+static void nfs_writeback_done_full(struct nfs_write_data *, int);
+static int nfs_wait_on_write_congestion(struct address_space *, int);
+static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
+static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
+			   unsigned int npages, int how);
+
+static kmem_cache_t *nfs_wdata_cachep;
+mempool_t *nfs_wdata_mempool;
+static mempool_t *nfs_commit_mempool;
+
+static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
+
+static inline struct nfs_write_data *nfs_commit_alloc(void)
+{
+	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
+	if (p) {
+		memset(p, 0, sizeof(*p));
+		INIT_LIST_HEAD(&p->pages);
+	}
+	return p;
+}
+
+static inline void nfs_commit_free(struct nfs_write_data *p)
+{
+	mempool_free(p, nfs_commit_mempool);
+}
+
+static void nfs_writedata_release(struct rpc_task *task)
+{
+	struct nfs_write_data	*wdata = (struct nfs_write_data *)task->tk_calldata;
+	nfs_writedata_free(wdata);
+}
+
+/* Adjust the file length if we're writing beyond the end */
+static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
+{
+	struct inode *inode = page->mapping->host;
+	loff_t end, i_size = i_size_read(inode);
+	unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+
+	if (i_size > 0 && page->index < end_index)
+		return;
+	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
+	if (i_size >= end)
+		return;
+	i_size_write(inode, end);
+}
+
+/* We can set the PG_uptodate flag if we see that a write request
+ * covers the full page.
+ */
+static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
+{
+	loff_t end_offs;
+
+	if (PageUptodate(page))
+		return;
+	if (base != 0)
+		return;
+	if (count == PAGE_CACHE_SIZE) {
+		SetPageUptodate(page);
+		return;
+	}
+
+	end_offs = i_size_read(page->mapping->host) - 1;
+	if (end_offs < 0)
+		return;
+	/* Is this the last page? */
+	if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
+		return;
+	/* This is the last page: set PG_uptodate if we cover the entire
+	 * extent of the data, then zero the rest of the page.
+	 */
+	if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
+		memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
+		SetPageUptodate(page);
+	}
+}
+
+/*
+ * Write a page synchronously.
+ * Offset is the data offset within the page.
+ */
+static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
+		struct page *page, unsigned int offset, unsigned int count,
+		int how)
+{
+	unsigned int	wsize = NFS_SERVER(inode)->wsize;
+	int		result, written = 0;
+	struct nfs_write_data *wdata;
+
+	wdata = nfs_writedata_alloc();
+	if (!wdata)
+		return -ENOMEM;
+
+	wdata->flags = how;
+	wdata->cred = ctx->cred;
+	wdata->inode = inode;
+	wdata->args.fh = NFS_FH(inode);
+	wdata->args.context = ctx;
+	wdata->args.pages = &page;
+	wdata->args.stable = NFS_FILE_SYNC;
+	wdata->args.pgbase = offset;
+	wdata->args.count = wsize;
+	wdata->res.fattr = &wdata->fattr;
+	wdata->res.verf = &wdata->verf;
+
+	dprintk("NFS:      nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
+		inode->i_sb->s_id,
+		(long long)NFS_FILEID(inode),
+		count, (long long)(page_offset(page) + offset));
+
+	nfs_begin_data_update(inode);
+	do {
+		if (count < wsize)
+			wdata->args.count = count;
+		wdata->args.offset = page_offset(page) + wdata->args.pgbase;
+
+		result = NFS_PROTO(inode)->write(wdata);
+
+		if (result < 0) {
+			/* Must mark the page invalid after I/O error */
+			ClearPageUptodate(page);
+			goto io_error;
+		}
+		if (result < wdata->args.count)
+			printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
+					wdata->args.count, result);
+
+		wdata->args.offset += result;
+	        wdata->args.pgbase += result;
+		written += result;
+		count -= result;
+	} while (count);
+	/* Update file length */
+	nfs_grow_file(page, offset, written);
+	/* Set the PG_uptodate flag? */
+	nfs_mark_uptodate(page, offset, written);
+
+	if (PageError(page))
+		ClearPageError(page);
+
+io_error:
+	nfs_end_data_update_defer(inode);
+	nfs_writedata_free(wdata);
+	return written ? written : result;
+}
+
+static int nfs_writepage_async(struct nfs_open_context *ctx,
+		struct inode *inode, struct page *page,
+		unsigned int offset, unsigned int count)
+{
+	struct nfs_page	*req;
+	int		status;
+
+	req = nfs_update_request(ctx, inode, page, offset, count);
+	status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
+	if (status < 0)
+		goto out;
+	/* Update file length */
+	nfs_grow_file(page, offset, count);
+	/* Set the PG_uptodate flag? */
+	nfs_mark_uptodate(page, offset, count);
+	nfs_unlock_request(req);
+ out:
+	return status;
+}
+
+static int wb_priority(struct writeback_control *wbc)
+{
+	if (wbc->for_reclaim)
+		return FLUSH_HIGHPRI;
+	if (wbc->for_kupdate)
+		return FLUSH_LOWPRI;
+	return 0;
+}
+
+/*
+ * Write an mmapped page to the server.
+ */
+int nfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct nfs_open_context *ctx;
+	struct inode *inode = page->mapping->host;
+	unsigned long end_index;
+	unsigned offset = PAGE_CACHE_SIZE;
+	loff_t i_size = i_size_read(inode);
+	int inode_referenced = 0;
+	int priority = wb_priority(wbc);
+	int err;
+
+	/*
+	 * Note: We need to ensure that we have a reference to the inode
+	 *       if we are to do asynchronous writes. If not, waiting
+	 *       in nfs_wait_on_request() may deadlock with clear_inode().
+	 *
+	 *       If igrab() fails here, then it is in any case safe to
+	 *       call nfs_wb_page(), since there will be no pending writes.
+	 */
+	if (igrab(inode) != 0)
+		inode_referenced = 1;
+	end_index = i_size >> PAGE_CACHE_SHIFT;
+
+	/* Ensure we've flushed out any previous writes */
+	nfs_wb_page_priority(inode, page, priority);
+
+	/* easy case */
+	if (page->index < end_index)
+		goto do_it;
+	/* things got complicated... */
+	offset = i_size & (PAGE_CACHE_SIZE-1);
+
+	/* OK, are we completely out? */
+	err = 0; /* potential race with truncate - ignore */
+	if (page->index >= end_index+1 || !offset)
+		goto out;
+do_it:
+	ctx = nfs_find_open_context(inode, FMODE_WRITE);
+	if (ctx == NULL) {
+		err = -EBADF;
+		goto out;
+	}
+	lock_kernel();
+	if (!IS_SYNC(inode) && inode_referenced) {
+		err = nfs_writepage_async(ctx, inode, page, 0, offset);
+		if (err >= 0) {
+			err = 0;
+			if (wbc->for_reclaim)
+				nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
+		}
+	} else {
+		err = nfs_writepage_sync(ctx, inode, page, 0,
+						offset, priority);
+		if (err >= 0) {
+			if (err != offset)
+				redirty_page_for_writepage(wbc, page);
+			err = 0;
+		}
+	}
+	unlock_kernel();
+	put_nfs_open_context(ctx);
+out:
+	unlock_page(page);
+	if (inode_referenced)
+		iput(inode);
+	return err; 
+}
+
+/*
+ * Note: causes nfs_update_request() to block on the assumption
+ * 	 that the writeback is generated due to memory pressure.
+ */
+int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+	struct backing_dev_info *bdi = mapping->backing_dev_info;
+	struct inode *inode = mapping->host;
+	int err;
+
+	err = generic_writepages(mapping, wbc);
+	if (err)
+		return err;
+	while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
+		if (wbc->nonblocking)
+			return 0;
+		nfs_wait_on_write_congestion(mapping, 0);
+	}
+	err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
+	if (err < 0)
+		goto out;
+	wbc->nr_to_write -= err;
+	if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
+		err = nfs_wait_on_requests(inode, 0, 0);
+		if (err < 0)
+			goto out;
+	}
+	err = nfs_commit_inode(inode, 0, 0, wb_priority(wbc));
+	if (err > 0) {
+		wbc->nr_to_write -= err;
+		err = 0;
+	}
+out:
+	clear_bit(BDI_write_congested, &bdi->state);
+	wake_up_all(&nfs_write_congestion);
+	return err;
+}
+
+/*
+ * Insert a write request into an inode
+ */
+static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	int error;
+
+	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
+	BUG_ON(error == -EEXIST);
+	if (error)
+		return error;
+	if (!nfsi->npages) {
+		igrab(inode);
+		nfs_begin_data_update(inode);
+		if (nfs_have_delegation(inode, FMODE_WRITE))
+			nfsi->change_attr++;
+	}
+	nfsi->npages++;
+	atomic_inc(&req->wb_count);
+	return 0;
+}
+
+/*
+ * Insert a write request into an inode
+ */
+static void nfs_inode_remove_request(struct nfs_page *req)
+{
+	struct inode *inode = req->wb_context->dentry->d_inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	BUG_ON (!NFS_WBACK_BUSY(req));
+
+	spin_lock(&nfsi->req_lock);
+	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+	nfsi->npages--;
+	if (!nfsi->npages) {
+		spin_unlock(&nfsi->req_lock);
+		nfs_end_data_update_defer(inode);
+		iput(inode);
+	} else
+		spin_unlock(&nfsi->req_lock);
+	nfs_clear_request(req);
+	nfs_release_request(req);
+}
+
+/*
+ * Find a request
+ */
+static inline struct nfs_page *
+_nfs_find_request(struct inode *inode, unsigned long index)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_page *req;
+
+	req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
+	if (req)
+		atomic_inc(&req->wb_count);
+	return req;
+}
+
+static struct nfs_page *
+nfs_find_request(struct inode *inode, unsigned long index)
+{
+	struct nfs_page		*req;
+	struct nfs_inode	*nfsi = NFS_I(inode);
+
+	spin_lock(&nfsi->req_lock);
+	req = _nfs_find_request(inode, index);
+	spin_unlock(&nfsi->req_lock);
+	return req;
+}
+
+/*
+ * Add a request to the inode's dirty list.
+ */
+static void
+nfs_mark_request_dirty(struct nfs_page *req)
+{
+	struct inode *inode = req->wb_context->dentry->d_inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	spin_lock(&nfsi->req_lock);
+	nfs_list_add_request(req, &nfsi->dirty);
+	nfsi->ndirty++;
+	spin_unlock(&nfsi->req_lock);
+	inc_page_state(nr_dirty);
+	mark_inode_dirty(inode);
+}
+
+/*
+ * Check if a request is dirty
+ */
+static inline int
+nfs_dirty_request(struct nfs_page *req)
+{
+	struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
+	return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
+}
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+/*
+ * Add a request to the inode's commit list.
+ */
+static void
+nfs_mark_request_commit(struct nfs_page *req)
+{
+	struct inode *inode = req->wb_context->dentry->d_inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+
+	spin_lock(&nfsi->req_lock);
+	nfs_list_add_request(req, &nfsi->commit);
+	nfsi->ncommit++;
+	spin_unlock(&nfsi->req_lock);
+	inc_page_state(nr_unstable);
+	mark_inode_dirty(inode);
+}
+#endif
+
+/*
+ * Wait for a request to complete.
+ *
+ * Interruptible by signals only if mounted with intr flag.
+ */
+static int
+nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_page *req;
+	unsigned long		idx_end, next;
+	unsigned int		res = 0;
+	int			error;
+
+	if (npages == 0)
+		idx_end = ~0;
+	else
+		idx_end = idx_start + npages - 1;
+
+	spin_lock(&nfsi->req_lock);
+	next = idx_start;
+	while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {
+		if (req->wb_index > idx_end)
+			break;
+
+		next = req->wb_index + 1;
+		if (!NFS_WBACK_BUSY(req))
+			continue;
+
+		atomic_inc(&req->wb_count);
+		spin_unlock(&nfsi->req_lock);
+		error = nfs_wait_on_request(req);
+		nfs_release_request(req);
+		if (error < 0)
+			return error;
+		spin_lock(&nfsi->req_lock);
+		res++;
+	}
+	spin_unlock(&nfsi->req_lock);
+	return res;
+}
+
+/*
+ * nfs_scan_dirty - Scan an inode for dirty requests
+ * @inode: NFS inode to scan
+ * @dst: destination list
+ * @idx_start: lower bound of page->index to scan.
+ * @npages: idx_start + npages sets the upper bound to scan.
+ *
+ * Moves requests from the inode's dirty page list.
+ * The requests are *not* checked to ensure that they form a contiguous set.
+ */
+static int
+nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	int	res;
+	res = nfs_scan_list(&nfsi->dirty, dst, idx_start, npages);
+	nfsi->ndirty -= res;
+	sub_page_state(nr_dirty,res);
+	if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
+		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
+	return res;
+}
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+/*
+ * nfs_scan_commit - Scan an inode for commit requests
+ * @inode: NFS inode to scan
+ * @dst: destination list
+ * @idx_start: lower bound of page->index to scan.
+ * @npages: idx_start + npages sets the upper bound to scan.
+ *
+ * Moves requests from the inode's 'commit' request list.
+ * The requests are *not* checked to ensure that they form a contiguous set.
+ */
+static int
+nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	int	res;
+	res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
+	nfsi->ncommit -= res;
+	if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
+		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
+	return res;
+}
+#endif
+
+static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
+{
+	struct backing_dev_info *bdi = mapping->backing_dev_info;
+	DEFINE_WAIT(wait);
+	int ret = 0;
+
+	might_sleep();
+
+	if (!bdi_write_congested(bdi))
+		return 0;
+	if (intr) {
+		struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
+		sigset_t oldset;
+
+		rpc_clnt_sigmask(clnt, &oldset);
+		prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
+		if (bdi_write_congested(bdi)) {
+			if (signalled())
+				ret = -ERESTARTSYS;
+			else
+				schedule();
+		}
+		rpc_clnt_sigunmask(clnt, &oldset);
+	} else {
+		prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
+		if (bdi_write_congested(bdi))
+			schedule();
+	}
+	finish_wait(&nfs_write_congestion, &wait);
+	return ret;
+}
+
+
+/*
+ * Try to update any existing write request, or create one if there is none.
+ * In order to match, the request's credentials must match those of
+ * the calling process.
+ *
+ * Note: Should always be called with the Page Lock held!
+ */
+static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
+		struct inode *inode, struct page *page,
+		unsigned int offset, unsigned int bytes)
+{
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_page		*req, *new = NULL;
+	unsigned long		rqend, end;
+
+	end = offset + bytes;
+
+	if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
+		return ERR_PTR(-ERESTARTSYS);
+	for (;;) {
+		/* Loop over all inode entries and see if we find
+		 * A request for the page we wish to update
+		 */
+		spin_lock(&nfsi->req_lock);
+		req = _nfs_find_request(inode, page->index);
+		if (req) {
+			if (!nfs_lock_request_dontget(req)) {
+				int error;
+				spin_unlock(&nfsi->req_lock);
+				error = nfs_wait_on_request(req);
+				nfs_release_request(req);
+				if (error < 0)
+					return ERR_PTR(error);
+				continue;
+			}
+			spin_unlock(&nfsi->req_lock);
+			if (new)
+				nfs_release_request(new);
+			break;
+		}
+
+		if (new) {
+			int error;
+			nfs_lock_request_dontget(new);
+			error = nfs_inode_add_request(inode, new);
+			if (error) {
+				spin_unlock(&nfsi->req_lock);
+				nfs_unlock_request(new);
+				return ERR_PTR(error);
+			}
+			spin_unlock(&nfsi->req_lock);
+			nfs_mark_request_dirty(new);
+			return new;
+		}
+		spin_unlock(&nfsi->req_lock);
+
+		new = nfs_create_request(ctx, inode, page, offset, bytes);
+		if (IS_ERR(new))
+			return new;
+	}
+
+	/* We have a request for our page.
+	 * If the creds don't match, or the
+	 * page addresses don't match,
+	 * tell the caller to wait on the conflicting
+	 * request.
+	 */
+	rqend = req->wb_offset + req->wb_bytes;
+	if (req->wb_context != ctx
+	    || req->wb_page != page
+	    || !nfs_dirty_request(req)
+	    || offset > rqend || end < req->wb_offset) {
+		nfs_unlock_request(req);
+		return ERR_PTR(-EBUSY);
+	}
+
+	/* Okay, the request matches. Update the region */
+	if (offset < req->wb_offset) {
+		req->wb_offset = offset;
+		req->wb_pgbase = offset;
+		req->wb_bytes = rqend - req->wb_offset;
+	}
+
+	if (end > rqend)
+		req->wb_bytes = end - req->wb_offset;
+
+	return req;
+}
+
+int nfs_flush_incompatible(struct file *file, struct page *page)
+{
+	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+	struct inode	*inode = page->mapping->host;
+	struct nfs_page	*req;
+	int		status = 0;
+	/*
+	 * Look for a request corresponding to this page. If there
+	 * is one, and it belongs to another file, we flush it out
+	 * before we try to copy anything into the page. Do this
+	 * due to the lack of an ACCESS-type call in NFSv2.
+	 * Also do the same if we find a request from an existing
+	 * dropped page.
+	 */
+	req = nfs_find_request(inode, page->index);
+	if (req) {
+		if (req->wb_page != page || ctx != req->wb_context)
+			status = nfs_wb_page(inode, page);
+		nfs_release_request(req);
+	}
+	return (status < 0) ? status : 0;
+}
+
+/*
+ * Update and possibly write a cached page of an NFS file.
+ *
+ * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
+ * things with a page scheduled for an RPC call (e.g. invalidate it).
+ */
+int nfs_updatepage(struct file *file, struct page *page,
+		unsigned int offset, unsigned int count)
+{
+	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+	struct dentry	*dentry = file->f_dentry;
+	struct inode	*inode = page->mapping->host;
+	struct nfs_page	*req;
+	int		status = 0;
+
+	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name,
+		count, (long long)(page_offset(page) +offset));
+
+	if (IS_SYNC(inode)) {
+		status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
+		if (status > 0) {
+			if (offset == 0 && status == PAGE_CACHE_SIZE)
+				SetPageUptodate(page);
+			return 0;
+		}
+		return status;
+	}
+
+	/* If we're not using byte range locks, and we know the page
+	 * is entirely in cache, it may be more efficient to avoid
+	 * fragmenting write requests.
+	 */
+	if (PageUptodate(page) && inode->i_flock == NULL) {
+		loff_t end_offs = i_size_read(inode) - 1;
+		unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
+
+		count += offset;
+		offset = 0;
+		if (unlikely(end_offs < 0)) {
+			/* Do nothing */
+		} else if (page->index == end_index) {
+			unsigned int pglen;
+			pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
+			if (count < pglen)
+				count = pglen;
+		} else if (page->index < end_index)
+			count = PAGE_CACHE_SIZE;
+	}
+
+	/*
+	 * Try to find an NFS request corresponding to this page
+	 * and update it.
+	 * If the existing request cannot be updated, we must flush
+	 * it out now.
+	 */
+	do {
+		req = nfs_update_request(ctx, inode, page, offset, count);
+		status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
+		if (status != -EBUSY)
+			break;
+		/* Request could not be updated. Flush it out and try again */
+		status = nfs_wb_page(inode, page);
+	} while (status >= 0);
+	if (status < 0)
+		goto done;
+
+	status = 0;
+
+	/* Update file length */
+	nfs_grow_file(page, offset, count);
+	/* Set the PG_uptodate flag? */
+	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+	nfs_unlock_request(req);
+done:
+        dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
+			status, (long long)i_size_read(inode));
+	if (status < 0)
+		ClearPageUptodate(page);
+	return status;
+}
+
+static void nfs_writepage_release(struct nfs_page *req)
+{
+	end_page_writeback(req->wb_page);
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+	if (!PageError(req->wb_page)) {
+		if (NFS_NEED_RESCHED(req)) {
+			nfs_mark_request_dirty(req);
+			goto out;
+		} else if (NFS_NEED_COMMIT(req)) {
+			nfs_mark_request_commit(req);
+			goto out;
+		}
+	}
+	nfs_inode_remove_request(req);
+
+out:
+	nfs_clear_commit(req);
+	nfs_clear_reschedule(req);
+#else
+	nfs_inode_remove_request(req);
+#endif
+	nfs_unlock_request(req);
+}
+
+static inline int flush_task_priority(int how)
+{
+	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
+		case FLUSH_HIGHPRI:
+			return RPC_PRIORITY_HIGH;
+		case FLUSH_LOWPRI:
+			return RPC_PRIORITY_LOW;
+	}
+	return RPC_PRIORITY_NORMAL;
+}
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+static void nfs_write_rpcsetup(struct nfs_page *req,
+		struct nfs_write_data *data,
+		unsigned int count, unsigned int offset,
+		int how)
+{
+	struct rpc_task		*task = &data->task;
+	struct inode		*inode;
+
+	/* Set up the RPC argument and reply structs
+	 * NB: take care not to mess about with data->commit et al. */
+
+	data->req = req;
+	data->inode = inode = req->wb_context->dentry->d_inode;
+	data->cred = req->wb_context->cred;
+
+	data->args.fh     = NFS_FH(inode);
+	data->args.offset = req_offset(req) + offset;
+	data->args.pgbase = req->wb_pgbase + offset;
+	data->args.pages  = data->pagevec;
+	data->args.count  = count;
+	data->args.context = req->wb_context;
+
+	data->res.fattr   = &data->fattr;
+	data->res.count   = count;
+	data->res.verf    = &data->verf;
+
+	NFS_PROTO(inode)->write_setup(data, how);
+
+	data->task.tk_priority = flush_task_priority(how);
+	data->task.tk_cookie = (unsigned long)inode;
+	data->task.tk_calldata = data;
+	/* Release requests */
+	data->task.tk_release = nfs_writedata_release;
+
+	dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+		task->tk_pid,
+		inode->i_sb->s_id,
+		(long long)NFS_FILEID(inode),
+		count,
+		(unsigned long long)data->args.offset);
+}
+
+static void nfs_execute_write(struct nfs_write_data *data)
+{
+	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
+	sigset_t oldset;
+
+	rpc_clnt_sigmask(clnt, &oldset);
+	lock_kernel();
+	rpc_execute(&data->task);
+	unlock_kernel();
+	rpc_clnt_sigunmask(clnt, &oldset);
+}
+
+/*
+ * Generate multiple small requests to write out a single
+ * contiguous dirty area on one page.
+ */
+static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
+{
+	struct nfs_page *req = nfs_list_entry(head->next);
+	struct page *page = req->wb_page;
+	struct nfs_write_data *data;
+	unsigned int wsize = NFS_SERVER(inode)->wsize;
+	unsigned int nbytes, offset;
+	int requests = 0;
+	LIST_HEAD(list);
+
+	nfs_list_remove_request(req);
+
+	nbytes = req->wb_bytes;
+	for (;;) {
+		data = nfs_writedata_alloc();
+		if (!data)
+			goto out_bad;
+		list_add(&data->pages, &list);
+		requests++;
+		if (nbytes <= wsize)
+			break;
+		nbytes -= wsize;
+	}
+	atomic_set(&req->wb_complete, requests);
+
+	ClearPageError(page);
+	SetPageWriteback(page);
+	offset = 0;
+	nbytes = req->wb_bytes;
+	do {
+		data = list_entry(list.next, struct nfs_write_data, pages);
+		list_del_init(&data->pages);
+
+		data->pagevec[0] = page;
+		data->complete = nfs_writeback_done_partial;
+
+		if (nbytes > wsize) {
+			nfs_write_rpcsetup(req, data, wsize, offset, how);
+			offset += wsize;
+			nbytes -= wsize;
+		} else {
+			nfs_write_rpcsetup(req, data, nbytes, offset, how);
+			nbytes = 0;
+		}
+		nfs_execute_write(data);
+	} while (nbytes != 0);
+
+	return 0;
+
+out_bad:
+	while (!list_empty(&list)) {
+		data = list_entry(list.next, struct nfs_write_data, pages);
+		list_del(&data->pages);
+		nfs_writedata_free(data);
+	}
+	nfs_mark_request_dirty(req);
+	nfs_unlock_request(req);
+	return -ENOMEM;
+}
+
+/*
+ * Create an RPC task for the given write request and kick it.
+ * The page must have been locked by the caller.
+ *
+ * It may happen that the page we're passed is not marked dirty.
+ * This is the case if nfs_updatepage detects a conflicting request
+ * that has been written but not committed.
+ */
+static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
+{
+	struct nfs_page		*req;
+	struct page		**pages;
+	struct nfs_write_data	*data;
+	unsigned int		count;
+
+	if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
+		return nfs_flush_multi(head, inode, how);
+
+	data = nfs_writedata_alloc();
+	if (!data)
+		goto out_bad;
+
+	pages = data->pagevec;
+	count = 0;
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_list_add_request(req, &data->pages);
+		ClearPageError(req->wb_page);
+		SetPageWriteback(req->wb_page);
+		*pages++ = req->wb_page;
+		count += req->wb_bytes;
+	}
+	req = nfs_list_entry(data->pages.next);
+
+	data->complete = nfs_writeback_done_full;
+	/* Set up the argument struct */
+	nfs_write_rpcsetup(req, data, count, 0, how);
+
+	nfs_execute_write(data);
+	return 0;
+ out_bad:
+	while (!list_empty(head)) {
+		struct nfs_page *req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_mark_request_dirty(req);
+		nfs_unlock_request(req);
+	}
+	return -ENOMEM;
+}
+
+static int
+nfs_flush_list(struct list_head *head, int wpages, int how)
+{
+	LIST_HEAD(one_request);
+	struct nfs_page		*req;
+	int			error = 0;
+	unsigned int		pages = 0;
+
+	while (!list_empty(head)) {
+		pages += nfs_coalesce_requests(head, &one_request, wpages);
+		req = nfs_list_entry(one_request.next);
+		error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how);
+		if (error < 0)
+			break;
+	}
+	if (error >= 0)
+		return pages;
+
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_mark_request_dirty(req);
+		nfs_unlock_request(req);
+	}
+	return error;
+}
+
+/*
+ * Handle a write reply that flushed part of a page.
+ */
+static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
+{
+	struct nfs_page		*req = data->req;
+	struct page		*page = req->wb_page;
+
+	dprintk("NFS: write (%s/%Ld %d@%Ld)",
+		req->wb_context->dentry->d_inode->i_sb->s_id,
+		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+		req->wb_bytes,
+		(long long)req_offset(req));
+
+	if (status < 0) {
+		ClearPageUptodate(page);
+		SetPageError(page);
+		req->wb_context->error = status;
+		dprintk(", error = %d\n", status);
+	} else {
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+		if (data->verf.committed < NFS_FILE_SYNC) {
+			if (!NFS_NEED_COMMIT(req)) {
+				nfs_defer_commit(req);
+				memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+				dprintk(" defer commit\n");
+			} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+				nfs_defer_reschedule(req);
+				dprintk(" server reboot detected\n");
+			}
+		} else
+#endif
+			dprintk(" OK\n");
+	}
+
+	if (atomic_dec_and_test(&req->wb_complete))
+		nfs_writepage_release(req);
+}
+
+/*
+ * Handle a write reply that flushes a whole page.
+ *
+ * FIXME: There is an inherent race with invalidate_inode_pages and
+ *	  writebacks since the page->count is kept > 1 for as long
+ *	  as the page has a write request pending.
+ */
+static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
+{
+	struct nfs_page		*req;
+	struct page		*page;
+
+	/* Update attributes as result of writeback. */
+	while (!list_empty(&data->pages)) {
+		req = nfs_list_entry(data->pages.next);
+		nfs_list_remove_request(req);
+		page = req->wb_page;
+
+		dprintk("NFS: write (%s/%Ld %d@%Ld)",
+			req->wb_context->dentry->d_inode->i_sb->s_id,
+			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+			req->wb_bytes,
+			(long long)req_offset(req));
+
+		if (status < 0) {
+			ClearPageUptodate(page);
+			SetPageError(page);
+			req->wb_context->error = status;
+			end_page_writeback(page);
+			nfs_inode_remove_request(req);
+			dprintk(", error = %d\n", status);
+			goto next;
+		}
+		end_page_writeback(page);
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+		if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
+			nfs_inode_remove_request(req);
+			dprintk(" OK\n");
+			goto next;
+		}
+		memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+		nfs_mark_request_commit(req);
+		dprintk(" marked for commit\n");
+#else
+		nfs_inode_remove_request(req);
+#endif
+	next:
+		nfs_unlock_request(req);
+	}
+}
+
+/*
+ * This function is called when the WRITE call is complete.
+ */
+void nfs_writeback_done(struct rpc_task *task)
+{
+	struct nfs_write_data	*data = (struct nfs_write_data *) task->tk_calldata;
+	struct nfs_writeargs	*argp = &data->args;
+	struct nfs_writeres	*resp = &data->res;
+
+	dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
+		task->tk_pid, task->tk_status);
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
+		/* We tried a write call, but the server did not
+		 * commit data to stable storage even though we
+		 * requested it.
+		 * Note: There is a known bug in Tru64 < 5.0 in which
+		 *	 the server reports NFS_DATA_SYNC, but performs
+		 *	 NFS_FILE_SYNC. We therefore implement this checking
+		 *	 as a dprintk() in order to avoid filling syslog.
+		 */
+		static unsigned long    complain;
+
+		if (time_before(complain, jiffies)) {
+			dprintk("NFS: faulty NFS server %s:"
+				" (committed = %d) != (stable = %d)\n",
+				NFS_SERVER(data->inode)->hostname,
+				resp->verf->committed, argp->stable);
+			complain = jiffies + 300 * HZ;
+		}
+	}
+#endif
+	/* Is this a short write? */
+	if (task->tk_status >= 0 && resp->count < argp->count) {
+		static unsigned long    complain;
+
+		/* Has the server at least made some progress? */
+		if (resp->count != 0) {
+			/* Was this an NFSv2 write or an NFSv3 stable write? */
+			if (resp->verf->committed != NFS_UNSTABLE) {
+				/* Resend from where the server left off */
+				argp->offset += resp->count;
+				argp->pgbase += resp->count;
+				argp->count -= resp->count;
+			} else {
+				/* Resend as a stable write in order to avoid
+				 * headaches in the case of a server crash.
+				 */
+				argp->stable = NFS_FILE_SYNC;
+			}
+			rpc_restart_call(task);
+			return;
+		}
+		if (time_before(complain, jiffies)) {
+			printk(KERN_WARNING
+			       "NFS: Server wrote zero bytes, expected %u.\n",
+					argp->count);
+			complain = jiffies + 300 * HZ;
+		}
+		/* Can't do anything about it except throw an error. */
+		task->tk_status = -EIO;
+	}
+
+	/*
+	 * Process the nfs_page list
+	 */
+	data->complete(data, task->tk_status);
+}
+
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static void nfs_commit_release(struct rpc_task *task)
+{
+	struct nfs_write_data	*wdata = (struct nfs_write_data *)task->tk_calldata;
+	nfs_commit_free(wdata);
+}
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+static void nfs_commit_rpcsetup(struct list_head *head,
+		struct nfs_write_data *data, int how)
+{
+	struct rpc_task		*task = &data->task;
+	struct nfs_page		*first, *last;
+	struct inode		*inode;
+	loff_t			start, end, len;
+
+	/* Set up the RPC argument and reply structs
+	 * NB: take care not to mess about with data->commit et al. */
+
+	list_splice_init(head, &data->pages);
+	first = nfs_list_entry(data->pages.next);
+	last = nfs_list_entry(data->pages.prev);
+	inode = first->wb_context->dentry->d_inode;
+
+	/*
+	 * Determine the offset range of requests in the COMMIT call.
+	 * We rely on the fact that data->pages is an ordered list...
+	 */
+	start = req_offset(first);
+	end = req_offset(last) + last->wb_bytes;
+	len = end - start;
+	/* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */
+	if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1))
+		len = 0;
+
+	data->inode	  = inode;
+	data->cred	  = first->wb_context->cred;
+
+	data->args.fh     = NFS_FH(data->inode);
+	data->args.offset = start;
+	data->args.count  = len;
+	data->res.count   = len;
+	data->res.fattr   = &data->fattr;
+	data->res.verf    = &data->verf;
+	
+	NFS_PROTO(inode)->commit_setup(data, how);
+
+	data->task.tk_priority = flush_task_priority(how);
+	data->task.tk_cookie = (unsigned long)inode;
+	data->task.tk_calldata = data;
+	/* Release requests */
+	data->task.tk_release = nfs_commit_release;
+	
+	dprintk("NFS: %4d initiated commit call\n", task->tk_pid);
+}
+
+/*
+ * Commit dirty pages
+ */
+static int
+nfs_commit_list(struct list_head *head, int how)
+{
+	struct nfs_write_data	*data;
+	struct nfs_page         *req;
+
+	data = nfs_commit_alloc();
+
+	if (!data)
+		goto out_bad;
+
+	/* Set up the argument struct */
+	nfs_commit_rpcsetup(head, data, how);
+
+	nfs_execute_write(data);
+	return 0;
+ out_bad:
+	while (!list_empty(head)) {
+		req = nfs_list_entry(head->next);
+		nfs_list_remove_request(req);
+		nfs_mark_request_commit(req);
+		nfs_unlock_request(req);
+	}
+	return -ENOMEM;
+}
+
+/*
+ * COMMIT call returned
+ */
+void
+nfs_commit_done(struct rpc_task *task)
+{
+	struct nfs_write_data	*data = (struct nfs_write_data *)task->tk_calldata;
+	struct nfs_page		*req;
+	int res = 0;
+
+        dprintk("NFS: %4d nfs_commit_done (status %d)\n",
+                                task->tk_pid, task->tk_status);
+
+	while (!list_empty(&data->pages)) {
+		req = nfs_list_entry(data->pages.next);
+		nfs_list_remove_request(req);
+
+		dprintk("NFS: commit (%s/%Ld %d@%Ld)",
+			req->wb_context->dentry->d_inode->i_sb->s_id,
+			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+			req->wb_bytes,
+			(long long)req_offset(req));
+		if (task->tk_status < 0) {
+			req->wb_context->error = task->tk_status;
+			nfs_inode_remove_request(req);
+			dprintk(", error = %d\n", task->tk_status);
+			goto next;
+		}
+
+		/* Okay, COMMIT succeeded, apparently. Check the verifier
+		 * returned by the server against all stored verfs. */
+		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
+			/* We have a match */
+			nfs_inode_remove_request(req);
+			dprintk(" OK\n");
+			goto next;
+		}
+		/* We have a mismatch. Write the page again */
+		dprintk(" mismatch\n");
+		nfs_mark_request_dirty(req);
+	next:
+		nfs_unlock_request(req);
+		res++;
+	}
+	sub_page_state(nr_unstable,res);
+}
+#endif
+
+static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
+			   unsigned int npages, int how)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	LIST_HEAD(head);
+	int			res,
+				error = 0;
+
+	spin_lock(&nfsi->req_lock);
+	res = nfs_scan_dirty(inode, &head, idx_start, npages);
+	spin_unlock(&nfsi->req_lock);
+	if (res)
+		error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how);
+	if (error < 0)
+		return error;
+	return res;
+}
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+int nfs_commit_inode(struct inode *inode, unsigned long idx_start,
+		    unsigned int npages, int how)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	LIST_HEAD(head);
+	int			res,
+				error = 0;
+
+	spin_lock(&nfsi->req_lock);
+	res = nfs_scan_commit(inode, &head, idx_start, npages);
+	if (res) {
+		res += nfs_scan_commit(inode, &head, 0, 0);
+		spin_unlock(&nfsi->req_lock);
+		error = nfs_commit_list(&head, how);
+	} else
+		spin_unlock(&nfsi->req_lock);
+	if (error < 0)
+		return error;
+	return res;
+}
+#endif
+
+int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
+		  unsigned int npages, int how)
+{
+	int	error,
+		wait;
+
+	wait = how & FLUSH_WAIT;
+	how &= ~FLUSH_WAIT;
+
+	do {
+		error = 0;
+		if (wait)
+			error = nfs_wait_on_requests(inode, idx_start, npages);
+		if (error == 0)
+			error = nfs_flush_inode(inode, idx_start, npages, how);
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+		if (error == 0)
+			error = nfs_commit_inode(inode, idx_start, npages, how);
+#endif
+	} while (error > 0);
+	return error;
+}
+
+int nfs_init_writepagecache(void)
+{
+	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
+					     sizeof(struct nfs_write_data),
+					     0, SLAB_HWCACHE_ALIGN,
+					     NULL, NULL);
+	if (nfs_wdata_cachep == NULL)
+		return -ENOMEM;
+
+	nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
+					   mempool_alloc_slab,
+					   mempool_free_slab,
+					   nfs_wdata_cachep);
+	if (nfs_wdata_mempool == NULL)
+		return -ENOMEM;
+
+	nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
+					   mempool_alloc_slab,
+					   mempool_free_slab,
+					   nfs_wdata_cachep);
+	if (nfs_commit_mempool == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void nfs_destroy_writepagecache(void)
+{
+	mempool_destroy(nfs_commit_mempool);
+	mempool_destroy(nfs_wdata_mempool);
+	if (kmem_cache_destroy(nfs_wdata_cachep))
+		printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
+}
+
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
new file mode 100644
index 0000000..0b14938
--- /dev/null
+++ b/fs/nfsctl.c
@@ -0,0 +1,118 @@
+/*
+ *	fs/nfsctl.c
+ *
+ *	This should eventually move to userland.
+ *
+ */
+#include <linux/config.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/linkage.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+
+/*
+ * open a file on nfsd fs
+ */
+
+static struct file *do_open(char *name, int flags)
+{
+	struct nameidata nd;
+	int error;
+
+	nd.mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
+
+	if (IS_ERR(nd.mnt))
+		return (struct file *)nd.mnt;
+
+	nd.dentry = dget(nd.mnt->mnt_root);
+	nd.last_type = LAST_ROOT;
+	nd.flags = 0;
+	nd.depth = 0;
+
+	error = path_walk(name, &nd);
+	if (error)
+		return ERR_PTR(error);
+
+	if (flags == O_RDWR)
+		error = may_open(&nd,MAY_READ|MAY_WRITE,FMODE_READ|FMODE_WRITE);
+	else
+		error = may_open(&nd, MAY_WRITE, FMODE_WRITE);
+
+	if (!error)
+		return dentry_open(nd.dentry, nd.mnt, flags);
+
+	path_release(&nd);
+	return ERR_PTR(error);
+}
+
+static struct {
+	char *name; int wsize; int rsize;
+} map[] = {
+	[NFSCTL_SVC] = {
+		.name	= ".svc",
+		.wsize	= sizeof(struct nfsctl_svc)
+	},
+	[NFSCTL_ADDCLIENT] = {
+		.name	= ".add",
+		.wsize	= sizeof(struct nfsctl_client)
+	},
+	[NFSCTL_DELCLIENT] = {
+		.name	= ".del",
+		.wsize	= sizeof(struct nfsctl_client)
+	},
+	[NFSCTL_EXPORT] = {
+		.name	= ".export",
+		.wsize	= sizeof(struct nfsctl_export)
+	},
+	[NFSCTL_UNEXPORT] = {
+		.name	= ".unexport",
+		.wsize	= sizeof(struct nfsctl_export)
+	},
+	[NFSCTL_GETFD] = {
+		.name	= ".getfd",
+		.wsize	= sizeof(struct nfsctl_fdparm),
+		.rsize	= NFS_FHSIZE
+	},
+	[NFSCTL_GETFS] = {
+		.name	= ".getfs",
+		.wsize	= sizeof(struct nfsctl_fsparm),
+		.rsize	= sizeof(struct knfsd_fh)
+	},
+};
+
+long
+asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *res)
+{
+	struct file *file;
+	void __user *p = &arg->u;
+	int version;
+	int err;
+
+	if (copy_from_user(&version, &arg->ca_version, sizeof(int)))
+		return -EFAULT;
+
+	if (version != NFSCTL_VERSION) {
+		printk(KERN_WARNING "nfsd: incompatible version in syscall.\n");
+		return -EINVAL;
+	}
+
+	if (cmd < 0 || cmd >= sizeof(map)/sizeof(map[0]) || !map[cmd].name)
+		return -EINVAL;
+
+	file = do_open(map[cmd].name, map[cmd].rsize ? O_RDWR : O_WRONLY);	
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+	err = file->f_op->write(file, p, map[cmd].wsize, &file->f_pos);
+	if (err >= 0 && map[cmd].rsize)
+		err = file->f_op->read(file, res, map[cmd].rsize, &file->f_pos);
+	if (err >= 0)
+		err = 0;
+	fput(file);
+	return err;
+}
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
new file mode 100644
index 0000000..b8680a2
--- /dev/null
+++ b/fs/nfsd/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Linux nfs server
+#
+
+obj-$(CONFIG_NFSD)	+= nfsd.o
+
+nfsd-y 			:= nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
+			   export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
+nfsd-$(CONFIG_NFSD_V3)	+= nfs3proc.o nfs3xdr.o
+nfsd-$(CONFIG_NFSD_V4)	+= nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
+			   nfs4acl.o nfs4callback.o
+nfsd-objs		:= $(nfsd-y)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
new file mode 100644
index 0000000..cfe9ce8
--- /dev/null
+++ b/fs/nfsd/auth.c
@@ -0,0 +1,63 @@
+/*
+ * linux/fs/nfsd/auth.c
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfsd/nfsd.h>
+
+#define	CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
+
+int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+{
+	struct svc_cred	*cred = &rqstp->rq_cred;
+	int i;
+	int ret;
+
+	if (exp->ex_flags & NFSEXP_ALLSQUASH) {
+		cred->cr_uid = exp->ex_anon_uid;
+		cred->cr_gid = exp->ex_anon_gid;
+		put_group_info(cred->cr_group_info);
+		cred->cr_group_info = groups_alloc(0);
+	} else if (exp->ex_flags & NFSEXP_ROOTSQUASH) {
+		struct group_info *gi;
+		if (!cred->cr_uid)
+			cred->cr_uid = exp->ex_anon_uid;
+		if (!cred->cr_gid)
+			cred->cr_gid = exp->ex_anon_gid;
+		gi = groups_alloc(cred->cr_group_info->ngroups);
+		if (gi)
+			for (i = 0; i < cred->cr_group_info->ngroups; i++) {
+				if (!GROUP_AT(cred->cr_group_info, i))
+					GROUP_AT(gi, i) = exp->ex_anon_gid;
+				else
+					GROUP_AT(gi, i) = GROUP_AT(cred->cr_group_info, i);
+			}
+		put_group_info(cred->cr_group_info);
+		cred->cr_group_info = gi;
+	}
+
+	if (cred->cr_uid != (uid_t) -1)
+		current->fsuid = cred->cr_uid;
+	else
+		current->fsuid = exp->ex_anon_uid;
+	if (cred->cr_gid != (gid_t) -1)
+		current->fsgid = cred->cr_gid;
+	else
+		current->fsgid = exp->ex_anon_gid;
+
+	if (!cred->cr_group_info)
+		return -ENOMEM;
+	ret = set_current_groups(cred->cr_group_info);
+	if ((cred->cr_uid)) {
+		cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
+	} else {
+		cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
+						  current->cap_permitted);
+	}
+	return ret;
+}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
new file mode 100644
index 0000000..9a11aa3
--- /dev/null
+++ b/fs/nfsd/export.c
@@ -0,0 +1,1200 @@
+#define MSNFS	/* HACK HACK */
+/*
+ * linux/fs/nfsd/export.c
+ *
+ * NFS exporting and validation.
+ *
+ * We maintain a list of clients, each of which has a list of
+ * exports. To export an fs to a given client, you first have
+ * to create the client entry with NFSCTL_ADDCLIENT, which
+ * creates a client control block and adds it to the hash
+ * table. Then, you call NFSCTL_EXPORT for each fs.
+ *
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de>
+ */
+
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/in.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/rwsem.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/hash.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/nfsfh.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/lockd/bind.h>
+
+#define NFSDDBG_FACILITY	NFSDDBG_EXPORT
+#define NFSD_PARANOIA 1
+
+typedef struct auth_domain	svc_client;
+typedef struct svc_export	svc_export;
+
+static void		exp_do_unexport(svc_export *unexp);
+static int		exp_verify_string(char *cp, int max);
+
+/*
+ * We have two caches.
+ * One maps client+vfsmnt+dentry to export options - the export map
+ * The other maps client+filehandle-fragment to export options. - the expkey map
+ *
+ * The export options are actually stored in the first map, and the
+ * second map contains a reference to the entry in the first map.
+ */
+
+#define	EXPKEY_HASHBITS		8
+#define	EXPKEY_HASHMAX		(1 << EXPKEY_HASHBITS)
+#define	EXPKEY_HASHMASK		(EXPKEY_HASHMAX -1)
+static struct cache_head *expkey_table[EXPKEY_HASHMAX];
+
+static inline int svc_expkey_hash(struct svc_expkey *item)
+{
+	int hash = item->ek_fsidtype;
+	char * cp = (char*)item->ek_fsid;
+	int len = key_len(item->ek_fsidtype);
+
+	hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
+	hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS);
+	return hash & EXPKEY_HASHMASK;
+}
+
+void expkey_put(struct cache_head *item, struct cache_detail *cd)
+{
+	if (cache_put(item, cd)) {
+		struct svc_expkey *key = container_of(item, struct svc_expkey, h);
+		if (test_bit(CACHE_VALID, &item->flags) &&
+		    !test_bit(CACHE_NEGATIVE, &item->flags))
+			exp_put(key->ek_export);
+		auth_domain_put(key->ek_client);
+		kfree(key);
+	}
+}
+
+static void expkey_request(struct cache_detail *cd,
+			   struct cache_head *h,
+			   char **bpp, int *blen)
+{
+	/* client fsidtype \xfsid */
+	struct svc_expkey *ek = container_of(h, struct svc_expkey, h);
+	char type[5];
+
+	qword_add(bpp, blen, ek->ek_client->name);
+	snprintf(type, 5, "%d", ek->ek_fsidtype);
+	qword_add(bpp, blen, type);
+	qword_addhex(bpp, blen, (char*)ek->ek_fsid, key_len(ek->ek_fsidtype));
+	(*bpp)[-1] = '\n';
+}
+
+static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *, int);
+static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+	/* client fsidtype fsid [path] */
+	char *buf;
+	int len;
+	struct auth_domain *dom = NULL;
+	int err;
+	int fsidtype;
+	char *ep;
+	struct svc_expkey key;
+
+	if (mesg[mlen-1] != '\n')
+		return -EINVAL;
+	mesg[mlen-1] = 0;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	err = -ENOMEM;
+	if (!buf) goto out;
+
+	err = -EINVAL;
+	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+		goto out;
+
+	err = -ENOENT;
+	dom = auth_domain_find(buf);
+	if (!dom)
+		goto out;
+	dprintk("found domain %s\n", buf);
+
+	err = -EINVAL;
+	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+		goto out;
+	fsidtype = simple_strtoul(buf, &ep, 10);
+	if (*ep)
+		goto out;
+	dprintk("found fsidtype %d\n", fsidtype);
+	if (fsidtype > 2)
+		goto out;
+	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+		goto out;
+	dprintk("found fsid length %d\n", len);
+	if (len != key_len(fsidtype))
+		goto out;
+
+	/* OK, we seem to have a valid key */
+	key.h.flags = 0;
+	key.h.expiry_time = get_expiry(&mesg);
+	if (key.h.expiry_time == 0)
+		goto out;
+
+	key.ek_client = dom;	
+	key.ek_fsidtype = fsidtype;
+	memcpy(key.ek_fsid, buf, len);
+
+	/* now we want a pathname, or empty meaning NEGATIVE  */
+	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0)
+		goto out;
+	dprintk("Path seems to be <%s>\n", buf);
+	err = 0;
+	if (len == 0) {
+		struct svc_expkey *ek;
+		set_bit(CACHE_NEGATIVE, &key.h.flags);
+		ek = svc_expkey_lookup(&key, 1);
+		if (ek)
+			expkey_put(&ek->h, &svc_expkey_cache);
+	} else {
+		struct nameidata nd;
+		struct svc_expkey *ek;
+		struct svc_export *exp;
+		err = path_lookup(buf, 0, &nd);
+		if (err)
+			goto out;
+
+		dprintk("Found the path %s\n", buf);
+		exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
+
+		err = -ENOENT;
+		if (!exp)
+			goto out_nd;
+		key.ek_export = exp;
+		dprintk("And found export\n");
+		
+		ek = svc_expkey_lookup(&key, 1);
+		if (ek)
+			expkey_put(&ek->h, &svc_expkey_cache);
+		exp_put(exp);
+		err = 0;
+	out_nd:
+		path_release(&nd);
+	}
+	cache_flush();
+ out:
+	if (dom)
+		auth_domain_put(dom);
+	if (buf)
+		kfree(buf);
+	return err;
+}
+
+static int expkey_show(struct seq_file *m,
+		       struct cache_detail *cd,
+		       struct cache_head *h)
+{
+	struct svc_expkey *ek ;
+
+	if (h ==NULL) {
+		seq_puts(m, "#domain fsidtype fsid [path]\n");
+		return 0;
+	}
+	ek = container_of(h, struct svc_expkey, h);
+	seq_printf(m, "%s %d 0x%08x", ek->ek_client->name,
+		   ek->ek_fsidtype, ek->ek_fsid[0]);
+	if (ek->ek_fsidtype != 1)
+		seq_printf(m, "%08x", ek->ek_fsid[1]);
+	if (ek->ek_fsidtype == 2)
+		seq_printf(m, "%08x", ek->ek_fsid[2]);
+	if (test_bit(CACHE_VALID, &h->flags) && 
+	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
+		seq_printf(m, " ");
+		seq_path(m, ek->ek_export->ex_mnt, ek->ek_export->ex_dentry, "\\ \t\n");
+	}
+	seq_printf(m, "\n");
+	return 0;
+}
+	
+struct cache_detail svc_expkey_cache = {
+	.hash_size	= EXPKEY_HASHMAX,
+	.hash_table	= expkey_table,
+	.name		= "nfsd.fh",
+	.cache_put	= expkey_put,
+	.cache_request	= expkey_request,
+	.cache_parse	= expkey_parse,
+	.cache_show	= expkey_show,
+};
+
+static inline int svc_expkey_match (struct svc_expkey *a, struct svc_expkey *b)
+{
+	if (a->ek_fsidtype != b->ek_fsidtype ||
+	    a->ek_client != b->ek_client ||
+	    memcmp(a->ek_fsid, b->ek_fsid, key_len(a->ek_fsidtype)) != 0)
+		return 0;
+	return 1;
+}
+
+static inline void svc_expkey_init(struct svc_expkey *new, struct svc_expkey *item)
+{
+	cache_get(&item->ek_client->h);
+	new->ek_client = item->ek_client;
+	new->ek_fsidtype = item->ek_fsidtype;
+	new->ek_fsid[0] = item->ek_fsid[0];
+	new->ek_fsid[1] = item->ek_fsid[1];
+	new->ek_fsid[2] = item->ek_fsid[2];
+}
+
+static inline void svc_expkey_update(struct svc_expkey *new, struct svc_expkey *item)
+{
+	cache_get(&item->ek_export->h);
+	new->ek_export = item->ek_export;
+}
+
+static DefineSimpleCacheLookup(svc_expkey,0) /* no inplace updates */
+
+#define	EXPORT_HASHBITS		8
+#define	EXPORT_HASHMAX		(1<< EXPORT_HASHBITS)
+#define	EXPORT_HASHMASK		(EXPORT_HASHMAX -1)
+
+static struct cache_head *export_table[EXPORT_HASHMAX];
+
+static inline int svc_export_hash(struct svc_export *item)
+{
+	int rv;
+
+	rv = hash_ptr(item->ex_client, EXPORT_HASHBITS);
+	rv ^= hash_ptr(item->ex_dentry, EXPORT_HASHBITS);
+	rv ^= hash_ptr(item->ex_mnt, EXPORT_HASHBITS);
+	return rv;
+}
+
+void svc_export_put(struct cache_head *item, struct cache_detail *cd)
+{
+	if (cache_put(item, cd)) {
+		struct svc_export *exp = container_of(item, struct svc_export, h);
+		dput(exp->ex_dentry);
+		mntput(exp->ex_mnt);
+		auth_domain_put(exp->ex_client);
+		kfree(exp);
+	}
+}
+
+static void svc_export_request(struct cache_detail *cd,
+			       struct cache_head *h,
+			       char **bpp, int *blen)
+{
+	/*  client path */
+	struct svc_export *exp = container_of(h, struct svc_export, h);
+	char *pth;
+
+	qword_add(bpp, blen, exp->ex_client->name);
+	pth = d_path(exp->ex_dentry, exp->ex_mnt, *bpp, *blen);
+	if (IS_ERR(pth)) {
+		/* is this correct? */
+		(*bpp)[0] = '\n';
+		return;
+	}
+	qword_add(bpp, blen, pth);
+	(*bpp)[-1] = '\n';
+}
+
+static struct svc_export *svc_export_lookup(struct svc_export *, int);
+
+static int check_export(struct inode *inode, int flags)
+{
+
+	/* We currently export only dirs and regular files.
+	 * This is what umountd does.
+	 */
+	if (!S_ISDIR(inode->i_mode) &&
+	    !S_ISREG(inode->i_mode))
+		return -ENOTDIR;
+
+	/* There are two requirements on a filesystem to be exportable.
+	 * 1:  We must be able to identify the filesystem from a number.
+	 *       either a device number (so FS_REQUIRES_DEV needed)
+	 *       or an FSID number (so NFSEXP_FSID needed).
+	 * 2:  We must be able to find an inode from a filehandle.
+	 *       This means that s_export_op must be set.
+	 */
+	if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) &&
+	    !(flags & NFSEXP_FSID)) {
+		dprintk("exp_export: export of non-dev fs without fsid");
+		return -EINVAL;
+	}
+	if (!inode->i_sb->s_export_op) {
+		dprintk("exp_export: export of invalid fs type.\n");
+		return -EINVAL;
+	}
+
+	/* Ok, we can export it */;
+	if (!inode->i_sb->s_export_op->find_exported_dentry)
+		inode->i_sb->s_export_op->find_exported_dentry =
+			find_exported_dentry;
+	return 0;
+
+}
+
+static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+	/* client path expiry [flags anonuid anongid fsid] */
+	char *buf;
+	int len;
+	int err;
+	struct auth_domain *dom = NULL;
+	struct nameidata nd;
+	struct svc_export exp, *expp;
+	int an_int;
+
+	nd.dentry = NULL;
+
+	if (mesg[mlen-1] != '\n')
+		return -EINVAL;
+	mesg[mlen-1] = 0;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	err = -ENOMEM;
+	if (!buf) goto out;
+
+	/* client */
+	len = qword_get(&mesg, buf, PAGE_SIZE);
+	err = -EINVAL;
+	if (len <= 0) goto out;
+
+	err = -ENOENT;
+	dom = auth_domain_find(buf);
+	if (!dom)
+		goto out;
+
+	/* path */
+	err = -EINVAL;
+	if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+		goto out;
+	err = path_lookup(buf, 0, &nd);
+	if (err) goto out;
+
+	exp.h.flags = 0;
+	exp.ex_client = dom;
+	exp.ex_mnt = nd.mnt;
+	exp.ex_dentry = nd.dentry;
+
+	/* expiry */
+	err = -EINVAL;
+	exp.h.expiry_time = get_expiry(&mesg);
+	if (exp.h.expiry_time == 0)
+		goto out;
+
+	/* flags */
+	err = get_int(&mesg, &an_int);
+	if (err == -ENOENT)
+		set_bit(CACHE_NEGATIVE, &exp.h.flags);
+	else {
+		if (err || an_int < 0) goto out;	
+		exp.ex_flags= an_int;
+	
+		/* anon uid */
+		err = get_int(&mesg, &an_int);
+		if (err) goto out;
+		exp.ex_anon_uid= an_int;
+
+		/* anon gid */
+		err = get_int(&mesg, &an_int);
+		if (err) goto out;
+		exp.ex_anon_gid= an_int;
+
+		/* fsid */
+		err = get_int(&mesg, &an_int);
+		if (err) goto out;
+		exp.ex_fsid = an_int;
+
+		err = check_export(nd.dentry->d_inode, exp.ex_flags);
+		if (err) goto out;
+	}
+
+	expp = svc_export_lookup(&exp, 1);
+	if (expp)
+		exp_put(expp);
+	err = 0;
+	cache_flush();
+ out:
+	if (nd.dentry)
+		path_release(&nd);
+	if (dom)
+		auth_domain_put(dom);
+	if (buf)
+		kfree(buf);
+	return err;
+}
+
+static void exp_flags(struct seq_file *m, int flag, int fsid, uid_t anonu, uid_t anong);
+
+static int svc_export_show(struct seq_file *m,
+			   struct cache_detail *cd,
+			   struct cache_head *h)
+{
+	struct svc_export *exp ;
+
+	if (h ==NULL) {
+		seq_puts(m, "#path domain(flags)\n");
+		return 0;
+	}
+	exp = container_of(h, struct svc_export, h);
+	seq_path(m, exp->ex_mnt, exp->ex_dentry, " \t\n\\");
+	seq_putc(m, '\t');
+	seq_escape(m, exp->ex_client->name, " \t\n\\");
+	seq_putc(m, '(');
+	if (test_bit(CACHE_VALID, &h->flags) && 
+	    !test_bit(CACHE_NEGATIVE, &h->flags))
+		exp_flags(m, exp->ex_flags, exp->ex_fsid, 
+			  exp->ex_anon_uid, exp->ex_anon_gid);
+	seq_puts(m, ")\n");
+	return 0;
+}
+struct cache_detail svc_export_cache = {
+	.hash_size	= EXPORT_HASHMAX,
+	.hash_table	= export_table,
+	.name		= "nfsd.export",
+	.cache_put	= svc_export_put,
+	.cache_request	= svc_export_request,
+	.cache_parse	= svc_export_parse,
+	.cache_show	= svc_export_show,
+};
+
+static inline int svc_export_match(struct svc_export *a, struct svc_export *b)
+{
+	return a->ex_client == b->ex_client &&
+		a->ex_dentry == b->ex_dentry &&
+		a->ex_mnt == b->ex_mnt;
+}
+static inline void svc_export_init(struct svc_export *new, struct svc_export *item)
+{
+	cache_get(&item->ex_client->h);
+	new->ex_client = item->ex_client;
+	new->ex_dentry = dget(item->ex_dentry);
+	new->ex_mnt = mntget(item->ex_mnt);
+}
+
+static inline void svc_export_update(struct svc_export *new, struct svc_export *item)
+{
+	new->ex_flags = item->ex_flags;
+	new->ex_anon_uid = item->ex_anon_uid;
+	new->ex_anon_gid = item->ex_anon_gid;
+	new->ex_fsid = item->ex_fsid;
+}
+
+static DefineSimpleCacheLookup(svc_export,1) /* allow inplace updates */
+
+
+struct svc_expkey *
+exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
+{
+	struct svc_expkey key, *ek;
+	int err;
+	
+	if (!clp)
+		return NULL;
+
+	key.ek_client = clp;
+	key.ek_fsidtype = fsid_type;
+	memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
+
+	ek = svc_expkey_lookup(&key, 0);
+	if (ek != NULL)
+		if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp)))
+			ek = ERR_PTR(err);
+	return ek;
+}
+
+static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
+		       struct svc_export *exp)
+{
+	struct svc_expkey key, *ek;
+
+	key.ek_client = clp;
+	key.ek_fsidtype = fsid_type;
+	memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
+	key.ek_export = exp;
+	key.h.expiry_time = NEVER;
+	key.h.flags = 0;
+
+	ek = svc_expkey_lookup(&key, 1);
+	if (ek) {
+		expkey_put(&ek->h, &svc_expkey_cache);
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/*
+ * Find the client's export entry matching xdev/xino.
+ */
+static inline struct svc_expkey *
+exp_get_key(svc_client *clp, dev_t dev, ino_t ino)
+{
+	u32 fsidv[3];
+	
+	if (old_valid_dev(dev)) {
+		mk_fsid_v0(fsidv, dev, ino);
+		return exp_find_key(clp, 0, fsidv, NULL);
+	}
+	mk_fsid_v3(fsidv, dev, ino);
+	return exp_find_key(clp, 3, fsidv, NULL);
+}
+
+/*
+ * Find the client's export entry matching fsid
+ */
+static inline struct svc_expkey *
+exp_get_fsid_key(svc_client *clp, int fsid)
+{
+	u32 fsidv[2];
+
+	mk_fsid_v1(fsidv, fsid);
+
+	return exp_find_key(clp, 1, fsidv, NULL);
+}
+
+svc_export *
+exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
+		struct cache_req *reqp)
+{
+	struct svc_export *exp, key;
+	
+	if (!clp)
+		return NULL;
+
+	key.ex_client = clp;
+	key.ex_mnt = mnt;
+	key.ex_dentry = dentry;
+
+	exp = svc_export_lookup(&key, 0);
+	if (exp != NULL) 
+		switch (cache_check(&svc_export_cache, &exp->h, reqp)) {
+		case 0: break;
+		case -EAGAIN:
+			exp = ERR_PTR(-EAGAIN);
+			break;
+		default:
+			exp = NULL;
+		}
+
+	return exp;
+}
+
+/*
+ * Find the export entry for a given dentry.
+ */
+struct svc_export *
+exp_parent(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
+	   struct cache_req *reqp)
+{
+	svc_export *exp;
+
+	dget(dentry);
+	exp = exp_get_by_name(clp, mnt, dentry, reqp);
+
+	while (exp == NULL && !IS_ROOT(dentry)) {
+		struct dentry *parent;
+
+		parent = dget_parent(dentry);
+		dput(dentry);
+		dentry = parent;
+		exp = exp_get_by_name(clp, mnt, dentry, reqp);
+	}
+	dput(dentry);
+	return exp;
+}
+
+/*
+ * Hashtable locking. Write locks are placed only by user processes
+ * wanting to modify export information.
+ * Write locking only done in this file.  Read locking
+ * needed externally.
+ */
+
+static DECLARE_RWSEM(hash_sem);
+
+void
+exp_readlock(void)
+{
+	down_read(&hash_sem);
+}
+
+static inline void
+exp_writelock(void)
+{
+	down_write(&hash_sem);
+}
+
+void
+exp_readunlock(void)
+{
+	up_read(&hash_sem);
+}
+
+static inline void
+exp_writeunlock(void)
+{
+	up_write(&hash_sem);
+}
+
+static void exp_fsid_unhash(struct svc_export *exp)
+{
+	struct svc_expkey *ek;
+
+	if ((exp->ex_flags & NFSEXP_FSID) == 0)
+		return;
+
+	ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
+	if (ek && !IS_ERR(ek)) {
+		ek->h.expiry_time = get_seconds()-1;
+		expkey_put(&ek->h, &svc_expkey_cache);
+	}
+	svc_expkey_cache.nextcheck = get_seconds();
+}
+
+static int exp_fsid_hash(svc_client *clp, struct svc_export *exp)
+{
+	u32 fsid[2];
+ 
+	if ((exp->ex_flags & NFSEXP_FSID) == 0)
+		return 0;
+
+	mk_fsid_v1(fsid, exp->ex_fsid);
+	return exp_set_key(clp, 1, fsid, exp);
+}
+
+static int exp_hash(struct auth_domain *clp, struct svc_export *exp)
+{
+	u32 fsid[2];
+	struct inode *inode = exp->ex_dentry->d_inode;
+	dev_t dev = inode->i_sb->s_dev;
+
+	if (old_valid_dev(dev)) {
+		mk_fsid_v0(fsid, dev, inode->i_ino);
+		return exp_set_key(clp, 0, fsid, exp);
+	}
+	mk_fsid_v3(fsid, dev, inode->i_ino);
+	return exp_set_key(clp, 3, fsid, exp);
+}
+
+static void exp_unhash(struct svc_export *exp)
+{
+	struct svc_expkey *ek;
+	struct inode *inode = exp->ex_dentry->d_inode;
+
+	ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
+	if (ek && !IS_ERR(ek)) {
+		ek->h.expiry_time = get_seconds()-1;
+		expkey_put(&ek->h, &svc_expkey_cache);
+	}
+	svc_expkey_cache.nextcheck = get_seconds();
+}
+	
+/*
+ * Export a file system.
+ */
+int
+exp_export(struct nfsctl_export *nxp)
+{
+	svc_client	*clp;
+	struct svc_export	*exp = NULL;
+	struct svc_export	new;
+	struct svc_expkey	*fsid_key = NULL;
+	struct nameidata nd;
+	int		err;
+
+	/* Consistency check */
+	err = -EINVAL;
+	if (!exp_verify_string(nxp->ex_path, NFS_MAXPATHLEN) ||
+	    !exp_verify_string(nxp->ex_client, NFSCLNT_IDMAX))
+		goto out;
+
+	dprintk("exp_export called for %s:%s (%x/%ld fl %x).\n",
+			nxp->ex_client, nxp->ex_path,
+			(unsigned)nxp->ex_dev, (long)nxp->ex_ino,
+			nxp->ex_flags);
+
+	/* Try to lock the export table for update */
+	exp_writelock();
+
+	/* Look up client info */
+	if (!(clp = auth_domain_find(nxp->ex_client)))
+		goto out_unlock;
+
+
+	/* Look up the dentry */
+	err = path_lookup(nxp->ex_path, 0, &nd);
+	if (err)
+		goto out_unlock;
+	err = -EINVAL;
+
+	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
+
+	/* must make sure there won't be an ex_fsid clash */
+	if ((nxp->ex_flags & NFSEXP_FSID) &&
+	    (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
+	    !IS_ERR(fsid_key) &&
+	    fsid_key->ek_export &&
+	    fsid_key->ek_export != exp)
+		goto finish;
+
+	if (exp) {
+		/* just a flags/id/fsid update */
+
+		exp_fsid_unhash(exp);
+		exp->ex_flags    = nxp->ex_flags;
+		exp->ex_anon_uid = nxp->ex_anon_uid;
+		exp->ex_anon_gid = nxp->ex_anon_gid;
+		exp->ex_fsid     = nxp->ex_dev;
+
+		err = exp_fsid_hash(clp, exp);
+		goto finish;
+	}
+
+	err = check_export(nd.dentry->d_inode, nxp->ex_flags);
+	if (err) goto finish;
+
+	err = -ENOMEM;
+
+	dprintk("nfsd: creating export entry %p for client %p\n", exp, clp);
+
+	new.h.expiry_time = NEVER;
+	new.h.flags = 0;
+	new.ex_client = clp;
+	new.ex_mnt = nd.mnt;
+	new.ex_dentry = nd.dentry;
+	new.ex_flags = nxp->ex_flags;
+	new.ex_anon_uid = nxp->ex_anon_uid;
+	new.ex_anon_gid = nxp->ex_anon_gid;
+	new.ex_fsid = nxp->ex_dev;
+
+	exp = svc_export_lookup(&new, 1);
+
+	if (exp == NULL)
+		goto finish;
+
+	err = 0;
+
+	if (exp_hash(clp, exp) ||
+	    exp_fsid_hash(clp, exp)) {
+		/* failed to create at least one index */
+		exp_do_unexport(exp);
+		cache_flush();
+		err = -ENOMEM;
+	}
+
+finish:
+	if (exp)
+		exp_put(exp);
+	if (fsid_key && !IS_ERR(fsid_key))
+		expkey_put(&fsid_key->h, &svc_expkey_cache);
+	if (clp)
+		auth_domain_put(clp);
+	path_release(&nd);
+out_unlock:
+	exp_writeunlock();
+out:
+	return err;
+}
+
+/*
+ * Unexport a file system. The export entry has already
+ * been removed from the client's list of exported fs's.
+ */
+static void
+exp_do_unexport(svc_export *unexp)
+{
+	unexp->h.expiry_time = get_seconds()-1;
+	svc_export_cache.nextcheck = get_seconds();
+	exp_unhash(unexp);
+	exp_fsid_unhash(unexp);
+}
+
+
+/*
+ * unexport syscall.
+ */
+int
+exp_unexport(struct nfsctl_export *nxp)
+{
+	struct auth_domain *dom;
+	svc_export *exp;
+	struct nameidata nd;
+	int		err;
+
+	/* Consistency check */
+	if (!exp_verify_string(nxp->ex_path, NFS_MAXPATHLEN) ||
+	    !exp_verify_string(nxp->ex_client, NFSCLNT_IDMAX))
+		return -EINVAL;
+
+	exp_writelock();
+
+	err = -EINVAL;
+	dom = auth_domain_find(nxp->ex_client);
+	if (!dom) {
+		dprintk("nfsd: unexport couldn't find %s\n", nxp->ex_client);
+		goto out_unlock;
+	}
+
+	err = path_lookup(nxp->ex_path, 0, &nd);
+	if (err)
+		goto out_domain;
+
+	err = -EINVAL;
+	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
+	path_release(&nd);
+	if (!exp)
+		goto out_domain;
+
+	exp_do_unexport(exp);
+	exp_put(exp);
+	err = 0;
+
+out_domain:
+	auth_domain_put(dom);
+	cache_flush();
+out_unlock:
+	exp_writeunlock();
+	return err;
+}
+
+/*
+ * Obtain the root fh on behalf of a client.
+ * This could be done in user space, but I feel that it adds some safety
+ * since its harder to fool a kernel module than a user space program.
+ */
+int
+exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+{
+	struct svc_export	*exp;
+	struct nameidata	nd;
+	struct inode		*inode;
+	struct svc_fh		fh;
+	int			err;
+
+	err = -EPERM;
+	/* NB: we probably ought to check that it's NUL-terminated */
+	if (path_lookup(path, 0, &nd)) {
+		printk("nfsd: exp_rootfh path not found %s", path);
+		return err;
+	}
+	inode = nd.dentry->d_inode;
+
+	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
+		 path, nd.dentry, clp->name,
+		 inode->i_sb->s_id, inode->i_ino);
+	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+	if (!exp) {
+		dprintk("nfsd: exp_rootfh export not found.\n");
+		goto out;
+	}
+
+	/*
+	 * fh must be initialized before calling fh_compose
+	 */
+	fh_init(&fh, maxsize);
+	if (fh_compose(&fh, exp, nd.dentry, NULL))
+		err = -EINVAL;
+	else
+		err = 0;
+	memcpy(f, &fh.fh_handle, sizeof(struct knfsd_fh));
+	fh_put(&fh);
+	exp_put(exp);
+out:
+	path_release(&nd);
+	return err;
+}
+
+/*
+ * Called when we need the filehandle for the root of the pseudofs,
+ * for a given NFSv4 client.   The root is defined to be the
+ * export point with fsid==0
+ */
+int
+exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
+	       struct cache_req *creq)
+{
+	struct svc_expkey *fsid_key;
+	int rv;
+	u32 fsidv[2];
+
+	mk_fsid_v1(fsidv, 0);
+
+	fsid_key = exp_find_key(clp, 1, fsidv, creq);
+	if (IS_ERR(fsid_key) && PTR_ERR(fsid_key) == -EAGAIN)
+		return nfserr_dropit;
+	if (!fsid_key || IS_ERR(fsid_key))
+		return nfserr_perm;
+
+	rv = fh_compose(fhp, fsid_key->ek_export, 
+			  fsid_key->ek_export->ex_dentry, NULL);
+	expkey_put(&fsid_key->h, &svc_expkey_cache);
+	return rv;
+}
+
+/* Iterator */
+
+static void *e_start(struct seq_file *m, loff_t *pos)
+{
+	loff_t n = *pos;
+	unsigned hash, export;
+	struct cache_head *ch;
+	
+	exp_readlock();
+	read_lock(&svc_export_cache.hash_lock);
+	if (!n--)
+		return (void *)1;
+	hash = n >> 32;
+	export = n & ((1LL<<32) - 1);
+
+	
+	for (ch=export_table[hash]; ch; ch=ch->next)
+		if (!export--)
+			return ch;
+	n &= ~((1LL<<32) - 1);
+	do {
+		hash++;
+		n += 1LL<<32;
+	} while(hash < EXPORT_HASHMAX && export_table[hash]==NULL);
+	if (hash >= EXPORT_HASHMAX)
+		return NULL;
+	*pos = n+1;
+	return export_table[hash];
+}
+
+static void *e_next(struct seq_file *m, void *p, loff_t *pos)
+{
+	struct cache_head *ch = p;
+	int hash = (*pos >> 32);
+
+	if (p == (void *)1)
+		hash = 0;
+	else if (ch->next == NULL) {
+		hash++;
+		*pos += 1LL<<32;
+	} else {
+		++*pos;
+		return ch->next;
+	}
+	*pos &= ~((1LL<<32) - 1);
+	while (hash < EXPORT_HASHMAX && export_table[hash] == NULL) {
+		hash++;
+		*pos += 1LL<<32;
+	}
+	if (hash >= EXPORT_HASHMAX)
+		return NULL;
+	++*pos;
+	return export_table[hash];
+}
+
+static void e_stop(struct seq_file *m, void *p)
+{
+	read_unlock(&svc_export_cache.hash_lock);
+	exp_readunlock();
+}
+
+static struct flags {
+	int flag;
+	char *name[2];
+} expflags[] = {
+	{ NFSEXP_READONLY, {"ro", "rw"}},
+	{ NFSEXP_INSECURE_PORT, {"insecure", ""}},
+	{ NFSEXP_ROOTSQUASH, {"root_squash", "no_root_squash"}},
+	{ NFSEXP_ALLSQUASH, {"all_squash", ""}},
+	{ NFSEXP_ASYNC, {"async", "sync"}},
+	{ NFSEXP_GATHERED_WRITES, {"wdelay", "no_wdelay"}},
+	{ NFSEXP_NOHIDE, {"nohide", ""}},
+	{ NFSEXP_CROSSMOUNT, {"crossmnt", ""}},
+	{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
+	{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
+#ifdef MSNFS
+	{ NFSEXP_MSNFS, {"msnfs", ""}},
+#endif
+	{ 0, {"", ""}}
+};
+
+static void exp_flags(struct seq_file *m, int flag, int fsid, uid_t anonu, uid_t anong)
+{
+	int first = 0;
+	struct flags *flg;
+
+	for (flg = expflags; flg->flag; flg++) {
+		int state = (flg->flag & flag)?0:1;
+		if (*flg->name[state])
+			seq_printf(m, "%s%s", first++?",":"", flg->name[state]);
+	}
+	if (flag & NFSEXP_FSID)
+		seq_printf(m, "%sfsid=%d", first++?",":"", fsid);
+	if (anonu != (uid_t)-2 && anonu != (0x10000-2))
+		seq_printf(m, "%sanonuid=%d", first++?",":"", anonu);
+	if (anong != (gid_t)-2 && anong != (0x10000-2))
+		seq_printf(m, "%sanongid=%d", first++?",":"", anong);
+}
+
+static int e_show(struct seq_file *m, void *p)
+{
+	struct cache_head *cp = p;
+	struct svc_export *exp = container_of(cp, struct svc_export, h);
+	svc_client *clp;
+
+	if (p == (void *)1) {
+		seq_puts(m, "# Version 1.1\n");
+		seq_puts(m, "# Path Client(Flags) # IPs\n");
+		return 0;
+	}
+
+	clp = exp->ex_client;
+	cache_get(&exp->h);
+	if (cache_check(&svc_export_cache, &exp->h, NULL))
+		return 0;
+	if (cache_put(&exp->h, &svc_export_cache)) BUG();
+	return svc_export_show(m, &svc_export_cache, cp);
+}
+
+struct seq_operations nfs_exports_op = {
+	.start	= e_start,
+	.next	= e_next,
+	.stop	= e_stop,
+	.show	= e_show,
+};
+
+/*
+ * Add or modify a client.
+ * Change requests may involve the list of host addresses. The list of
+ * exports and possibly existing uid maps are left untouched.
+ */
+int
+exp_addclient(struct nfsctl_client *ncp)
+{
+	struct auth_domain	*dom;
+	int			i, err;
+
+	/* First, consistency check. */
+	err = -EINVAL;
+	if (! exp_verify_string(ncp->cl_ident, NFSCLNT_IDMAX))
+		goto out;
+	if (ncp->cl_naddr > NFSCLNT_ADDRMAX)
+		goto out;
+
+	/* Lock the hashtable */
+	exp_writelock();
+
+	dom = unix_domain_find(ncp->cl_ident);
+
+	err = -ENOMEM;
+	if (!dom)
+		goto out_unlock;
+
+	/* Insert client into hashtable. */
+	for (i = 0; i < ncp->cl_naddr; i++)
+		auth_unix_add_addr(ncp->cl_addrlist[i], dom);
+
+	auth_unix_forget_old(dom);
+	auth_domain_put(dom);
+
+	err = 0;
+
+out_unlock:
+	exp_writeunlock();
+out:
+	return err;
+}
+
+/*
+ * Delete a client given an identifier.
+ */
+int
+exp_delclient(struct nfsctl_client *ncp)
+{
+	int		err;
+	struct auth_domain *dom;
+
+	err = -EINVAL;
+	if (!exp_verify_string(ncp->cl_ident, NFSCLNT_IDMAX))
+		goto out;
+
+	/* Lock the hashtable */
+	exp_writelock();
+
+	dom = auth_domain_find(ncp->cl_ident);
+	/* just make sure that no addresses work 
+	 * and that it will expire soon 
+	 */
+	if (dom) {
+		err = auth_unix_forget_old(dom);
+		dom->h.expiry_time = get_seconds();
+		auth_domain_put(dom);
+	}
+
+	exp_writeunlock();
+out:
+	return err;
+}
+
+/*
+ * Verify that string is non-empty and does not exceed max length.
+ */
+static int
+exp_verify_string(char *cp, int max)
+{
+	int	i;
+
+	for (i = 0; i < max; i++)
+		if (!cp[i])
+			return i;
+	cp[i] = 0;
+	printk(KERN_NOTICE "nfsd: couldn't validate string %s\n", cp);
+	return 0;
+}
+
+/*
+ * Initialize the exports module.
+ */
+void
+nfsd_export_init(void)
+{
+	dprintk("nfsd: initializing export module.\n");
+
+	cache_register(&svc_export_cache);
+	cache_register(&svc_expkey_cache);
+
+}
+
+/*
+ * Flush exports table - called when last nfsd thread is killed
+ */
+void
+nfsd_export_flush(void)
+{
+	exp_writelock();
+	cache_purge(&svc_expkey_cache);
+	cache_purge(&svc_export_cache);
+	exp_writeunlock();
+}
+
+/*
+ * Shutdown the exports module.
+ */
+void
+nfsd_export_shutdown(void)
+{
+
+	dprintk("nfsd: shutting down export module.\n");
+
+	exp_writelock();
+
+	if (cache_unregister(&svc_expkey_cache))
+		printk(KERN_ERR "nfsd: failed to unregister expkey cache\n");
+	if (cache_unregister(&svc_export_cache))
+		printk(KERN_ERR "nfsd: failed to unregister export cache\n");
+	svcauth_unix_purge();
+
+	exp_writeunlock();
+	dprintk("nfsd: export shutdown complete.\n");
+}
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
new file mode 100644
index 0000000..7b889ff
--- /dev/null
+++ b/fs/nfsd/lockd.c
@@ -0,0 +1,79 @@
+/*
+ * linux/fs/nfsd/lockd.c
+ *
+ * This file contains all the stubs needed when communicating with lockd.
+ * This level of indirection is necessary so we can run nfsd+lockd without
+ * requiring the nfs client to be compiled in/loaded, and vice versa.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/bind.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_LOCKD
+
+/*
+ * Note: we hold the dentry use count while the file is open.
+ */
+static u32
+nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
+{
+	u32		nfserr;
+	struct svc_fh	fh;
+
+	/* must initialize before using! but maxsize doesn't matter */
+	fh_init(&fh,0);
+	fh.fh_handle.fh_size = f->size;
+	memcpy((char*)&fh.fh_handle.fh_base, f->data, f->size);
+	fh.fh_export = NULL;
+
+	exp_readlock();
+	nfserr = nfsd_open(rqstp, &fh, S_IFREG, MAY_LOCK, filp);
+	fh_put(&fh);
+	rqstp->rq_client = NULL;
+	exp_readunlock();
+ 	/* nlm and nfsd don't share error codes.
+	 * we invent: 0 = no error
+	 *            1 = stale file handle
+	 *	      2 = other error
+	 */
+	switch (nfserr) {
+	case nfs_ok:
+		return 0;
+	case nfserr_stale:
+		return 1;
+	default:
+		return 2;
+	}
+}
+
+static void
+nlm_fclose(struct file *filp)
+{
+	fput(filp);
+}
+
+static struct nlmsvc_binding	nfsd_nlm_ops = {
+	.fopen		= nlm_fopen,		/* open file for locking */
+	.fclose		= nlm_fclose,		/* close file */
+};
+
+void
+nfsd_lockd_init(void)
+{
+	dprintk("nfsd: initializing lockd\n");
+	nlmsvc_ops = &nfsd_nlm_ops;
+}
+
+void
+nfsd_lockd_shutdown(void)
+{
+	nlmsvc_ops = NULL;
+}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
new file mode 100644
index 0000000..041380f
--- /dev/null
+++ b/fs/nfsd/nfs3proc.c
@@ -0,0 +1,702 @@
+/*
+ * linux/fs/nfsd/nfs3proc.c
+ *
+ * Process version 3 NFS requests.
+ *
+ * Copyright (C) 1996, 1997, 1998 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/linkage.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ext2_fs.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr3.h>
+#include <linux/nfs3.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_PROC
+
+#define RETURN_STATUS(st)	{ resp->status = (st); return (st); }
+
+static int	nfs3_ftypes[] = {
+	0,			/* NF3NON */
+	S_IFREG,		/* NF3REG */
+	S_IFDIR,		/* NF3DIR */
+	S_IFBLK,		/* NF3BLK */
+	S_IFCHR,		/* NF3CHR */
+	S_IFLNK,		/* NF3LNK */
+	S_IFSOCK,		/* NF3SOCK */
+	S_IFIFO,		/* NF3FIFO */
+};
+
+/*
+ * NULL call.
+ */
+static int
+nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	return nfs_ok;
+}
+
+/*
+ * Get a file's attributes
+ */
+static int
+nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+					   struct nfsd3_attrstat *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: GETATTR(3)  %s\n",
+				SVCFH_fmt(&argp->fh));
+
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Set a file's attributes
+ */
+static int
+nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp,
+					   struct nfsd3_attrstat  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: SETATTR(3)  %s\n",
+				SVCFH_fmt(&argp->fh));
+
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,
+			      argp->check_guard, argp->guardtime);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Look up a path name component
+ */
+static int
+nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+					  struct nfsd3_diropres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: LOOKUP(3)   %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	fh_copy(&resp->dirfh, &argp->fh);
+	fh_init(&resp->fh, NFS3_FHSIZE);
+
+	nfserr = nfsd_lookup(rqstp, &resp->dirfh,
+				    argp->name,
+				    argp->len,
+				    &resp->fh);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Check file access
+ */
+static int
+nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
+					  struct nfsd3_accessres *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: ACCESS(3)   %s 0x%x\n",
+				SVCFH_fmt(&argp->fh),
+				argp->access);
+
+	fh_copy(&resp->fh, &argp->fh);
+	resp->access = argp->access;
+	nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Read a symlink.
+ */
+static int
+nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp,
+					   struct nfsd3_readlinkres *resp)
+{
+	int nfserr;
+
+	dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh));
+
+	/* Read the symlink. */
+	fh_copy(&resp->fh, &argp->fh);
+	resp->len = NFS3_MAXPATHLEN;
+	nfserr = nfsd_readlink(rqstp, &resp->fh, argp->buffer, &resp->len);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Read a portion of a file.
+ */
+static int
+nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
+				        struct nfsd3_readres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
+				SVCFH_fmt(&argp->fh),
+				(unsigned long) argp->count,
+				(unsigned long) argp->offset);
+
+	/* Obtain buffer pointer for payload.
+	 * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
+	 * + 1 (xdr opaque byte count) = 26
+	 */
+
+	resp->count = argp->count;
+	if (NFSSVC_MAXBLKSIZE < resp->count)
+		resp->count = NFSSVC_MAXBLKSIZE;
+
+	svc_reserve(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
+
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = nfsd_read(rqstp, &resp->fh, NULL,
+				  argp->offset,
+			   	  argp->vec, argp->vlen,
+				  &resp->count);
+	if (nfserr == 0) {
+		struct inode	*inode = resp->fh.fh_dentry->d_inode;
+
+		resp->eof = (argp->offset + resp->count) >= inode->i_size;
+	}
+
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Write data to a file
+ */
+static int
+nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
+					 struct nfsd3_writeres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: WRITE(3)    %s %d bytes at %ld%s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				(unsigned long) argp->offset,
+				argp->stable? " stable" : "");
+
+	fh_copy(&resp->fh, &argp->fh);
+	resp->committed = argp->stable;
+	nfserr = nfsd_write(rqstp, &resp->fh, NULL,
+				   argp->offset,
+				   argp->vec, argp->vlen,
+				   argp->len,
+				   &resp->committed);
+	resp->count = argp->count;
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * With NFSv3, CREATE processing is a lot easier than with NFSv2.
+ * At least in theory; we'll see how it fares in practice when the
+ * first reports about SunOS compatibility problems start to pour in...
+ */
+static int
+nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+					  struct nfsd3_diropres   *resp)
+{
+	svc_fh		*dirfhp, *newfhp = NULL;
+	struct iattr	*attr;
+	u32		nfserr;
+
+	dprintk("nfsd: CREATE(3)   %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	dirfhp = fh_copy(&resp->dirfh, &argp->fh);
+	newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
+	attr   = &argp->attrs;
+
+	/* Get the directory inode */
+	nfserr = fh_verify(rqstp, dirfhp, S_IFDIR, MAY_CREATE);
+	if (nfserr)
+		RETURN_STATUS(nfserr);
+
+	/* Unfudge the mode bits */
+	attr->ia_mode &= ~S_IFMT;
+	if (!(attr->ia_valid & ATTR_MODE)) { 
+		attr->ia_valid |= ATTR_MODE;
+		attr->ia_mode = S_IFREG;
+	} else {
+		attr->ia_mode = (attr->ia_mode & ~S_IFMT) | S_IFREG;
+	}
+
+	/* Now create the file and set attributes */
+	nfserr = nfsd_create_v3(rqstp, dirfhp, argp->name, argp->len,
+				attr, newfhp,
+				argp->createmode, argp->verf, NULL);
+
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Make directory. This operation is not idempotent.
+ */
+static int
+nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+					 struct nfsd3_diropres   *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: MKDIR(3)    %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	argp->attrs.ia_valid &= ~ATTR_SIZE;
+	fh_copy(&resp->dirfh, &argp->fh);
+	fh_init(&resp->fh, NFS3_FHSIZE);
+	nfserr = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
+				    &argp->attrs, S_IFDIR, 0, &resp->fh);
+
+	RETURN_STATUS(nfserr);
+}
+
+static int
+nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp,
+					   struct nfsd3_diropres    *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: SYMLINK(3)  %s %.*s -> %.*s\n",
+				SVCFH_fmt(&argp->ffh),
+				argp->flen, argp->fname,
+				argp->tlen, argp->tname);
+
+	fh_copy(&resp->dirfh, &argp->ffh);
+	fh_init(&resp->fh, NFS3_FHSIZE);
+	nfserr = nfsd_symlink(rqstp, &resp->dirfh, argp->fname, argp->flen,
+						   argp->tname, argp->tlen,
+						   &resp->fh, &argp->attrs);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Make socket/fifo/device.
+ */
+static int
+nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
+					 struct nfsd3_diropres  *resp)
+{
+	int	nfserr, type;
+	dev_t	rdev = 0;
+
+	dprintk("nfsd: MKNOD(3)    %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	fh_copy(&resp->dirfh, &argp->fh);
+	fh_init(&resp->fh, NFS3_FHSIZE);
+
+	if (argp->ftype == 0 || argp->ftype >= NF3BAD)
+		RETURN_STATUS(nfserr_inval);
+	if (argp->ftype == NF3CHR || argp->ftype == NF3BLK) {
+		rdev = MKDEV(argp->major, argp->minor);
+		if (MAJOR(rdev) != argp->major ||
+		    MINOR(rdev) != argp->minor)
+			RETURN_STATUS(nfserr_inval);
+	} else
+		if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO)
+			RETURN_STATUS(nfserr_inval);
+
+	type = nfs3_ftypes[argp->ftype];
+	nfserr = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
+				    &argp->attrs, type, rdev, &resp->fh);
+
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Remove file/fifo/socket etc.
+ */
+static int
+nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+					  struct nfsd3_attrstat  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: REMOVE(3)   %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	/* Unlink. -S_IFDIR means file must not be a directory */
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR, argp->name, argp->len);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Remove a directory
+ */
+static int
+nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+					 struct nfsd3_attrstat  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: RMDIR(3)    %s %.*s\n",
+				SVCFH_fmt(&argp->fh),
+				argp->len,
+				argp->name);
+
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = nfsd_unlink(rqstp, &resp->fh, S_IFDIR, argp->name, argp->len);
+	RETURN_STATUS(nfserr);
+}
+
+static int
+nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp,
+					  struct nfsd3_renameres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: RENAME(3)   %s %.*s ->\n",
+				SVCFH_fmt(&argp->ffh),
+				argp->flen,
+				argp->fname);
+	dprintk("nfsd: -> %s %.*s\n",
+				SVCFH_fmt(&argp->tfh),
+				argp->tlen,
+				argp->tname);
+
+	fh_copy(&resp->ffh, &argp->ffh);
+	fh_copy(&resp->tfh, &argp->tfh);
+	nfserr = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
+				    &resp->tfh, argp->tname, argp->tlen);
+	RETURN_STATUS(nfserr);
+}
+
+static int
+nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp,
+					struct nfsd3_linkres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: LINK(3)     %s ->\n",
+				SVCFH_fmt(&argp->ffh));
+	dprintk("nfsd:   -> %s %.*s\n",
+				SVCFH_fmt(&argp->tfh),
+				argp->tlen,
+				argp->tname);
+
+	fh_copy(&resp->fh,  &argp->ffh);
+	fh_copy(&resp->tfh, &argp->tfh);
+	nfserr = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
+				  &resp->fh);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Read a portion of a directory.
+ */
+static int
+nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+					   struct nfsd3_readdirres  *resp)
+{
+	int		nfserr, count;
+
+	dprintk("nfsd: READDIR(3)  %s %d bytes at %d\n",
+				SVCFH_fmt(&argp->fh),
+				argp->count, (u32) argp->cookie);
+
+	/* Make sure we've room for the NULL ptr & eof flag, and shrink to
+	 * client read size */
+	count = (argp->count >> 2) - 2;
+
+	/* Read directory and encode entries on the fly */
+	fh_copy(&resp->fh, &argp->fh);
+
+	resp->buflen = count;
+	resp->common.err = nfs_ok;
+	resp->buffer = argp->buffer;
+	resp->rqstp = rqstp;
+	nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, 
+					&resp->common, nfs3svc_encode_entry);
+	memcpy(resp->verf, argp->verf, 8);
+	resp->count = resp->buffer - argp->buffer;
+	if (resp->offset)
+		xdr_encode_hyper(resp->offset, argp->cookie);
+
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Read a portion of a directory, including file handles and attrs.
+ * For now, we choose to ignore the dircount parameter.
+ */
+static int
+nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+					       struct nfsd3_readdirres  *resp)
+{
+	int	nfserr, count = 0;
+	loff_t	offset;
+	int	i;
+	caddr_t	page_addr = NULL;
+
+	dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n",
+				SVCFH_fmt(&argp->fh),
+				argp->count, (u32) argp->cookie);
+
+	/* Convert byte count to number of words (i.e. >> 2),
+	 * and reserve room for the NULL ptr & eof flag (-2 words) */
+	resp->count = (argp->count >> 2) - 2;
+
+	/* Read directory and encode entries on the fly */
+	fh_copy(&resp->fh, &argp->fh);
+
+	resp->common.err = nfs_ok;
+	resp->buffer = argp->buffer;
+	resp->buflen = resp->count;
+	resp->rqstp = rqstp;
+	offset = argp->cookie;
+	nfserr = nfsd_readdir(rqstp, &resp->fh,
+				     &offset,
+				     &resp->common,
+				     nfs3svc_encode_entry_plus);
+	memcpy(resp->verf, argp->verf, 8);
+	for (i=1; i<rqstp->rq_resused ; i++) {
+		page_addr = page_address(rqstp->rq_respages[i]);
+
+		if (((caddr_t)resp->buffer >= page_addr) &&
+		    ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) {
+			count += (caddr_t)resp->buffer - page_addr;
+			break;
+		}
+		count += PAGE_SIZE;
+	}
+	resp->count = count >> 2;
+	if (resp->offset) {
+		if (unlikely(resp->offset1)) {
+			/* we ended up with offset on a page boundary */
+			*resp->offset = htonl(offset >> 32);
+			*resp->offset1 = htonl(offset & 0xffffffff);
+			resp->offset1 = NULL;
+		} else {
+			xdr_encode_hyper(resp->offset, offset);
+		}
+	}
+
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Get file system stats
+ */
+static int
+nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+					   struct nfsd3_fsstatres *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: FSSTAT(3)   %s\n",
+				SVCFH_fmt(&argp->fh));
+
+	nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats);
+	fh_put(&argp->fh);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Get file system info
+ */
+static int
+nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+					   struct nfsd3_fsinfores *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: FSINFO(3)   %s\n",
+				SVCFH_fmt(&argp->fh));
+
+	resp->f_rtmax  = NFSSVC_MAXBLKSIZE;
+	resp->f_rtpref = NFSSVC_MAXBLKSIZE;
+	resp->f_rtmult = PAGE_SIZE;
+	resp->f_wtmax  = NFSSVC_MAXBLKSIZE;
+	resp->f_wtpref = NFSSVC_MAXBLKSIZE;
+	resp->f_wtmult = PAGE_SIZE;
+	resp->f_dtpref = PAGE_SIZE;
+	resp->f_maxfilesize = ~(u32) 0;
+	resp->f_properties = NFS3_FSF_DEFAULT;
+
+	nfserr = fh_verify(rqstp, &argp->fh, 0, MAY_NOP);
+
+	/* Check special features of the file system. May request
+	 * different read/write sizes for file systems known to have
+	 * problems with large blocks */
+	if (nfserr == 0) {
+		struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb;
+
+		/* Note that we don't care for remote fs's here */
+		if (sb->s_magic == 0x4d44 /* MSDOS_SUPER_MAGIC */) {
+			resp->f_properties = NFS3_FSF_BILLYBOY;
+		}
+		resp->f_maxfilesize = sb->s_maxbytes;
+	}
+
+	fh_put(&argp->fh);
+	RETURN_STATUS(nfserr);
+}
+
+/*
+ * Get pathconf info for the specified file
+ */
+static int
+nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle      *argp,
+					     struct nfsd3_pathconfres *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: PATHCONF(3) %s\n",
+				SVCFH_fmt(&argp->fh));
+
+	/* Set default pathconf */
+	resp->p_link_max = 255;		/* at least */
+	resp->p_name_max = 255;		/* at least */
+	resp->p_no_trunc = 0;
+	resp->p_chown_restricted = 1;
+	resp->p_case_insensitive = 0;
+	resp->p_case_preserving = 1;
+
+	nfserr = fh_verify(rqstp, &argp->fh, 0, MAY_NOP);
+
+	if (nfserr == 0) {
+		struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb;
+
+		/* Note that we don't care for remote fs's here */
+		switch (sb->s_magic) {
+		case EXT2_SUPER_MAGIC:
+			resp->p_link_max = EXT2_LINK_MAX;
+			resp->p_name_max = EXT2_NAME_LEN;
+			break;
+		case 0x4d44:	/* MSDOS_SUPER_MAGIC */
+			resp->p_case_insensitive = 1;
+			resp->p_case_preserving  = 0;
+			break;
+		}
+	}
+
+	fh_put(&argp->fh);
+	RETURN_STATUS(nfserr);
+}
+
+
+/*
+ * Commit a file (range) to stable storage.
+ */
+static int
+nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp,
+					   struct nfsd3_commitres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: COMMIT(3)   %s %u@%Lu\n",
+				SVCFH_fmt(&argp->fh),
+				argp->count,
+				(unsigned long long) argp->offset);
+
+	if (argp->offset > NFS_OFFSET_MAX)
+		RETURN_STATUS(nfserr_inval);
+
+	fh_copy(&resp->fh, &argp->fh);
+	nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count);
+
+	RETURN_STATUS(nfserr);
+}
+
+
+/*
+ * NFSv3 Server procedures.
+ * Only the results of non-idempotent operations are cached.
+ */
+#define nfs3svc_decode_voidargs		NULL
+#define nfs3svc_release_void		NULL
+#define nfs3svc_decode_fhandleargs	nfs3svc_decode_fhandle
+#define nfs3svc_encode_attrstatres	nfs3svc_encode_attrstat
+#define nfs3svc_encode_wccstatres	nfs3svc_encode_wccstat
+#define nfsd3_mkdirargs			nfsd3_createargs
+#define nfsd3_readdirplusargs		nfsd3_readdirargs
+#define nfsd3_fhandleargs		nfsd_fhandle
+#define nfsd3_fhandleres		nfsd3_attrstat
+#define nfsd3_attrstatres		nfsd3_attrstat
+#define nfsd3_wccstatres		nfsd3_attrstat
+#define nfsd3_createres			nfsd3_diropres
+#define nfsd3_voidres			nfsd3_voidargs
+struct nfsd3_voidargs { int dummy; };
+
+#define PROC(name, argt, rest, relt, cache, respsize)	\
+ { (svc_procfunc) nfsd3_proc_##name,		\
+   (kxdrproc_t) nfs3svc_decode_##argt##args,	\
+   (kxdrproc_t) nfs3svc_encode_##rest##res,	\
+   (kxdrproc_t) nfs3svc_release_##relt,		\
+   sizeof(struct nfsd3_##argt##args),		\
+   sizeof(struct nfsd3_##rest##res),		\
+   0,						\
+   cache,					\
+   respsize,					\
+ }
+
+#define ST 1		/* status*/
+#define FH 17		/* filehandle with length */
+#define AT 21		/* attributes */
+#define pAT (1+AT)	/* post attributes - conditional */
+#define WC (7+pAT)	/* WCC attributes */
+
+static struct svc_procedure		nfsd_procedures3[22] = {
+  PROC(null,	 void,		void,		void,	  RC_NOCACHE, ST),
+  PROC(getattr,	 fhandle,	attrstat,	fhandle,  RC_NOCACHE, ST+AT),
+  PROC(setattr,  sattr,		wccstat,	fhandle,  RC_REPLBUFF, ST+WC),
+  PROC(lookup,	 dirop,		dirop,		fhandle2, RC_NOCACHE, ST+FH+pAT+pAT),
+  PROC(access,	 access,	access,		fhandle,  RC_NOCACHE, ST+pAT+1),
+  PROC(readlink, readlink,	readlink,	fhandle,  RC_NOCACHE, ST+pAT+1+NFS3_MAXPATHLEN/4),
+  PROC(read,	 read,		read,		fhandle,  RC_NOCACHE, ST+pAT+4+NFSSVC_MAXBLKSIZE),
+  PROC(write,	 write,		write,		fhandle,  RC_REPLBUFF, ST+WC+4),
+  PROC(create,	 create,	create,		fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
+  PROC(mkdir,	 mkdir,		create,		fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
+  PROC(symlink,	 symlink,	create,		fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
+  PROC(mknod,	 mknod,		create,		fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
+  PROC(remove,	 dirop,		wccstat,	fhandle,  RC_REPLBUFF, ST+WC),
+  PROC(rmdir,	 dirop,		wccstat,	fhandle,  RC_REPLBUFF, ST+WC),
+  PROC(rename,	 rename,	rename,		fhandle2, RC_REPLBUFF, ST+WC+WC),
+  PROC(link,	 link,		link,		fhandle2, RC_REPLBUFF, ST+pAT+WC),
+  PROC(readdir,	 readdir,	readdir,	fhandle,  RC_NOCACHE, 0),
+  PROC(readdirplus,readdirplus,	readdir,	fhandle,  RC_NOCACHE, 0),
+  PROC(fsstat,	 fhandle,	fsstat,		void,     RC_NOCACHE, ST+pAT+2*6+1),
+  PROC(fsinfo,   fhandle,	fsinfo,		void,     RC_NOCACHE, ST+pAT+12),
+  PROC(pathconf, fhandle,	pathconf,	void,     RC_NOCACHE, ST+pAT+6),
+  PROC(commit,	 commit,	commit,		fhandle,  RC_NOCACHE, ST+WC+2),
+};
+
+struct svc_version	nfsd_version3 = {
+		.vs_vers	= 3,
+		.vs_nproc	= 22,
+		.vs_proc	= nfsd_procedures3,
+		.vs_dispatch	= nfsd_dispatch,
+		.vs_xdrsize	= NFS3_SVC_XDRSIZE,
+};
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
new file mode 100644
index 0000000..11f8068
--- /dev/null
+++ b/fs/nfsd/nfs3xdr.c
@@ -0,0 +1,1092 @@
+/*
+ * linux/fs/nfsd/nfs3xdr.c
+ *
+ * XDR support for nfsd/protocol version 3.
+ *
+ * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
+ *
+ * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()!
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/nfs3.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/mm.h>
+#include <linux/vfs.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/xdr3.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_XDR
+
+#ifdef NFSD_OPTIMIZE_SPACE
+# define inline
+#endif
+
+
+/*
+ * Mapping of S_IF* types to NFS file types
+ */
+static u32	nfs3_ftypes[] = {
+	NF3NON,  NF3FIFO, NF3CHR, NF3BAD,
+	NF3DIR,  NF3BAD,  NF3BLK, NF3BAD,
+	NF3REG,  NF3BAD,  NF3LNK, NF3BAD,
+	NF3SOCK, NF3BAD,  NF3LNK, NF3BAD,
+};
+
+/*
+ * XDR functions for basic NFS types
+ */
+static inline u32 *
+encode_time3(u32 *p, struct timespec *time)
+{
+	*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
+	return p;
+}
+
+static inline u32 *
+decode_time3(u32 *p, struct timespec *time)
+{
+	time->tv_sec = ntohl(*p++);
+	time->tv_nsec = ntohl(*p++);
+	return p;
+}
+
+static inline u32 *
+decode_fh(u32 *p, struct svc_fh *fhp)
+{
+	unsigned int size;
+	fh_init(fhp, NFS3_FHSIZE);
+	size = ntohl(*p++);
+	if (size > NFS3_FHSIZE)
+		return NULL;
+
+	memcpy(&fhp->fh_handle.fh_base, p, size);
+	fhp->fh_handle.fh_size = size;
+	return p + XDR_QUADLEN(size);
+}
+
+static inline u32 *
+encode_fh(u32 *p, struct svc_fh *fhp)
+{
+	unsigned int size = fhp->fh_handle.fh_size;
+	*p++ = htonl(size);
+	if (size) p[XDR_QUADLEN(size)-1]=0;
+	memcpy(p, &fhp->fh_handle.fh_base, size);
+	return p + XDR_QUADLEN(size);
+}
+
+/*
+ * Decode a file name and make sure that the path contains
+ * no slashes or null bytes.
+ */
+static inline u32 *
+decode_filename(u32 *p, char **namp, int *lenp)
+{
+	char		*name;
+	int		i;
+
+	if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) {
+		for (i = 0, name = *namp; i < *lenp; i++, name++) {
+			if (*name == '\0' || *name == '/')
+				return NULL;
+		}
+	}
+
+	return p;
+}
+
+static inline u32 *
+decode_sattr3(u32 *p, struct iattr *iap)
+{
+	u32	tmp;
+
+	iap->ia_valid = 0;
+
+	if (*p++) {
+		iap->ia_valid |= ATTR_MODE;
+		iap->ia_mode = ntohl(*p++);
+	}
+	if (*p++) {
+		iap->ia_valid |= ATTR_UID;
+		iap->ia_uid = ntohl(*p++);
+	}
+	if (*p++) {
+		iap->ia_valid |= ATTR_GID;
+		iap->ia_gid = ntohl(*p++);
+	}
+	if (*p++) {
+		u64	newsize;
+
+		iap->ia_valid |= ATTR_SIZE;
+		p = xdr_decode_hyper(p, &newsize);
+		if (newsize <= NFS_OFFSET_MAX)
+			iap->ia_size = newsize;
+		else
+			iap->ia_size = NFS_OFFSET_MAX;
+	}
+	if ((tmp = ntohl(*p++)) == 1) {	/* set to server time */
+		iap->ia_valid |= ATTR_ATIME;
+	} else if (tmp == 2) {		/* set to client time */
+		iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
+		iap->ia_atime.tv_sec = ntohl(*p++);
+		iap->ia_atime.tv_nsec = ntohl(*p++);
+	}
+	if ((tmp = ntohl(*p++)) == 1) {	/* set to server time */
+		iap->ia_valid |= ATTR_MTIME;
+	} else if (tmp == 2) {		/* set to client time */
+		iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
+		iap->ia_mtime.tv_sec = ntohl(*p++);
+		iap->ia_mtime.tv_nsec = ntohl(*p++);
+	}
+	return p;
+}
+
+static inline u32 *
+encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+{
+	struct vfsmount *mnt = fhp->fh_export->ex_mnt;
+	struct dentry	*dentry = fhp->fh_dentry;
+	struct kstat stat;
+	struct timespec time;
+
+	vfs_getattr(mnt, dentry, &stat);
+
+	*p++ = htonl(nfs3_ftypes[(stat.mode & S_IFMT) >> 12]);
+	*p++ = htonl((u32) stat.mode);
+	*p++ = htonl((u32) stat.nlink);
+	*p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
+	*p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
+	if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) {
+		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
+	} else {
+		p = xdr_encode_hyper(p, (u64) stat.size);
+	}
+	p = xdr_encode_hyper(p, ((u64)stat.blocks) << 9);
+	*p++ = htonl((u32) MAJOR(stat.rdev));
+	*p++ = htonl((u32) MINOR(stat.rdev));
+	if (is_fsid(fhp, rqstp->rq_reffh))
+		p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
+	else
+		p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat.dev));
+	p = xdr_encode_hyper(p, (u64) stat.ino);
+	p = encode_time3(p, &stat.atime);
+	lease_get_mtime(dentry->d_inode, &time); 
+	p = encode_time3(p, &time);
+	p = encode_time3(p, &stat.ctime);
+
+	return p;
+}
+
+static inline u32 *
+encode_saved_post_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+{
+	struct inode	*inode = fhp->fh_dentry->d_inode;
+
+	/* Attributes to follow */
+	*p++ = xdr_one;
+
+	*p++ = htonl(nfs3_ftypes[(fhp->fh_post_mode & S_IFMT) >> 12]);
+	*p++ = htonl((u32) fhp->fh_post_mode);
+	*p++ = htonl((u32) fhp->fh_post_nlink);
+	*p++ = htonl((u32) nfsd_ruid(rqstp, fhp->fh_post_uid));
+	*p++ = htonl((u32) nfsd_rgid(rqstp, fhp->fh_post_gid));
+	if (S_ISLNK(fhp->fh_post_mode) && fhp->fh_post_size > NFS3_MAXPATHLEN) {
+		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
+	} else {
+		p = xdr_encode_hyper(p, (u64) fhp->fh_post_size);
+	}
+	p = xdr_encode_hyper(p, ((u64)fhp->fh_post_blocks) << 9);
+	*p++ = fhp->fh_post_rdev[0];
+	*p++ = fhp->fh_post_rdev[1];
+	if (is_fsid(fhp, rqstp->rq_reffh))
+		p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
+	else
+		p = xdr_encode_hyper(p, (u64)huge_encode_dev(inode->i_sb->s_dev));
+	p = xdr_encode_hyper(p, (u64) inode->i_ino);
+	p = encode_time3(p, &fhp->fh_post_atime);
+	p = encode_time3(p, &fhp->fh_post_mtime);
+	p = encode_time3(p, &fhp->fh_post_ctime);
+
+	return p;
+}
+
+/*
+ * Encode post-operation attributes.
+ * The inode may be NULL if the call failed because of a stale file
+ * handle. In this case, no attributes are returned.
+ */
+static u32 *
+encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+{
+	struct dentry *dentry = fhp->fh_dentry;
+	if (dentry && dentry->d_inode != NULL) {
+		*p++ = xdr_one;		/* attributes follow */
+		return encode_fattr3(rqstp, p, fhp);
+	}
+	*p++ = xdr_zero;
+	return p;
+}
+
+/*
+ * Enocde weak cache consistency data
+ */
+static u32 *
+encode_wcc_data(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+{
+	struct dentry	*dentry = fhp->fh_dentry;
+
+	if (dentry && dentry->d_inode && fhp->fh_post_saved) {
+		if (fhp->fh_pre_saved) {
+			*p++ = xdr_one;
+			p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size);
+			p = encode_time3(p, &fhp->fh_pre_mtime);
+			p = encode_time3(p, &fhp->fh_pre_ctime);
+		} else {
+			*p++ = xdr_zero;
+		}
+		return encode_saved_post_attr(rqstp, p, fhp);
+	}
+	/* no pre- or post-attrs */
+	*p++ = xdr_zero;
+	return encode_post_op_attr(rqstp, p, fhp);
+}
+
+
+/*
+ * XDR decode functions
+ */
+int
+nfs3svc_decode_fhandle(struct svc_rqst *rqstp, u32 *p, struct nfsd_fhandle *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_sattrargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_sattr3(p, &args->attrs)))
+		return 0;
+
+	if ((args->check_guard = ntohl(*p++)) != 0) { 
+		struct timespec time; 
+		p = decode_time3(p, &time);
+		args->guardtime = time.tv_sec;
+	}
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_diropargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_diropargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_accessargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_accessargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	args->access = ntohl(*p++);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readargs *args)
+{
+	unsigned int len;
+	int v,pn;
+
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = xdr_decode_hyper(p, &args->offset)))
+		return 0;
+
+	len = args->count = ntohl(*p++);
+
+	if (len > NFSSVC_MAXBLKSIZE)
+		len = NFSSVC_MAXBLKSIZE;
+
+	/* set up the kvec */
+	v=0;
+	while (len > 0) {
+		pn = rqstp->rq_resused;
+		svc_take_page(rqstp);
+		args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
+		args->vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE;
+		len -= args->vec[v].iov_len;
+		v++;
+	}
+	args->vlen = v;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_writeargs *args)
+{
+	unsigned int len, v, hdr;
+
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = xdr_decode_hyper(p, &args->offset)))
+		return 0;
+
+	args->count = ntohl(*p++);
+	args->stable = ntohl(*p++);
+	len = args->len = ntohl(*p++);
+
+	hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
+	if (rqstp->rq_arg.len < len + hdr)
+		return 0;
+
+	args->vec[0].iov_base = (void*)p;
+	args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+
+	if (len > NFSSVC_MAXBLKSIZE)
+		len = NFSSVC_MAXBLKSIZE;
+	v=  0;
+	while (len > args->vec[v].iov_len) {
+		len -= args->vec[v].iov_len;
+		v++;
+		args->vec[v].iov_base = page_address(rqstp->rq_argpages[v]);
+		args->vec[v].iov_len = PAGE_SIZE;
+	}
+	args->vec[v].iov_len = len;
+	args->vlen = v+1;
+
+	return args->count == args->len && args->vec[0].iov_len > 0;
+}
+
+int
+nfs3svc_decode_createargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_createargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len)))
+		return 0;
+
+	switch (args->createmode = ntohl(*p++)) {
+	case NFS3_CREATE_UNCHECKED:
+	case NFS3_CREATE_GUARDED:
+		if (!(p = decode_sattr3(p, &args->attrs)))
+			return 0;
+		break;
+	case NFS3_CREATE_EXCLUSIVE:
+		args->verf = p;
+		p += 2;
+		break;
+	default:
+		return 0;
+	}
+
+	return xdr_argsize_check(rqstp, p);
+}
+int
+nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_createargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len))
+	 || !(p = decode_sattr3(p, &args->attrs)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_symlinkargs *args)
+{
+	unsigned int len;
+	int avail;
+	char *old, *new;
+	struct kvec *vec;
+
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_filename(p, &args->fname, &args->flen))
+	 || !(p = decode_sattr3(p, &args->attrs))
+		)
+		return 0;
+	/* now decode the pathname, which might be larger than the first page.
+	 * As we have to check for nul's anyway, we copy it into a new page
+	 * This page appears in the rq_res.pages list, but as pages_len is always
+	 * 0, it won't get in the way
+	 */
+	svc_take_page(rqstp);
+	len = ntohl(*p++);
+	if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
+		return 0;
+	args->tname = new = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
+	args->tlen = len;
+	/* first copy and check from the first page */
+	old = (char*)p;
+	vec = &rqstp->rq_arg.head[0];
+	avail = vec->iov_len - (old - (char*)vec->iov_base);
+	while (len && avail && *old) {
+		*new++ = *old++;
+		len--;
+		avail--;
+	}
+	/* now copy next page if there is one */
+	if (len && !avail && rqstp->rq_arg.page_len) {
+		avail = rqstp->rq_arg.page_len;
+		if (avail > PAGE_SIZE) avail = PAGE_SIZE;
+		old = page_address(rqstp->rq_arg.pages[0]);
+	}
+	while (len && avail && *old) {
+		*new++ = *old++;
+		len--;
+		avail--;
+	}
+	*new = '\0';
+	if (len)
+		return 0;
+
+	return 1;
+}
+
+int
+nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_mknodargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len)))
+		return 0;
+
+	args->ftype = ntohl(*p++);
+
+	if (args->ftype == NF3BLK  || args->ftype == NF3CHR
+	 || args->ftype == NF3SOCK || args->ftype == NF3FIFO) {
+		if (!(p = decode_sattr3(p, &args->attrs)))
+			return 0;
+	}
+
+	if (args->ftype == NF3BLK || args->ftype == NF3CHR) {
+		args->major = ntohl(*p++);
+		args->minor = ntohl(*p++);
+	}
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_renameargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_renameargs *args)
+{
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_filename(p, &args->fname, &args->flen))
+	 || !(p = decode_fh(p, &args->tfh))
+	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readlinkargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	svc_take_page(rqstp);
+	args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_linkargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_linkargs *args)
+{
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_fh(p, &args->tfh))
+	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readdirargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	p = xdr_decode_hyper(p, &args->cookie);
+	args->verf   = p; p += 2;
+	args->dircount = ~0;
+	args->count  = ntohl(*p++);
+
+	if (args->count > PAGE_SIZE)
+		args->count = PAGE_SIZE;
+
+	svc_take_page(rqstp);
+	args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readdirargs *args)
+{
+	int len, pn;
+
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	p = xdr_decode_hyper(p, &args->cookie);
+	args->verf     = p; p += 2;
+	args->dircount = ntohl(*p++);
+	args->count    = ntohl(*p++);
+
+	len = (args->count > NFSSVC_MAXBLKSIZE) ? NFSSVC_MAXBLKSIZE :
+						  args->count;
+	args->count = len;
+
+	while (len > 0) {
+		pn = rqstp->rq_resused;
+		svc_take_page(rqstp);
+		if (!args->buffer)
+			args->buffer = page_address(rqstp->rq_respages[pn]);
+		len -= PAGE_SIZE;
+	}
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfs3svc_decode_commitargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_commitargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	p = xdr_decode_hyper(p, &args->offset);
+	args->count = ntohl(*p++);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+/*
+ * XDR encode functions
+ */
+/*
+ * There must be an encoding function for void results so svc_process
+ * will work properly.
+ */
+int
+nfs3svc_encode_voidres(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* GETATTR */
+int
+nfs3svc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_attrstat *resp)
+{
+	if (resp->status == 0)
+		p = encode_fattr3(rqstp, p, &resp->fh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* SETATTR, REMOVE, RMDIR */
+int
+nfs3svc_encode_wccstat(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_attrstat *resp)
+{
+	p = encode_wcc_data(rqstp, p, &resp->fh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* LOOKUP */
+int
+nfs3svc_encode_diropres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_diropres *resp)
+{
+	if (resp->status == 0) {
+		p = encode_fh(p, &resp->fh);
+		p = encode_post_op_attr(rqstp, p, &resp->fh);
+	}
+	p = encode_post_op_attr(rqstp, p, &resp->dirfh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* ACCESS */
+int
+nfs3svc_encode_accessres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_accessres *resp)
+{
+	p = encode_post_op_attr(rqstp, p, &resp->fh);
+	if (resp->status == 0)
+		*p++ = htonl(resp->access);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* READLINK */
+int
+nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readlinkres *resp)
+{
+	p = encode_post_op_attr(rqstp, p, &resp->fh);
+	if (resp->status == 0) {
+		*p++ = htonl(resp->len);
+		xdr_ressize_check(rqstp, p);
+		rqstp->rq_res.page_len = resp->len;
+		if (resp->len & 3) {
+			/* need to pad the tail */
+			rqstp->rq_restailpage = 0;
+			rqstp->rq_res.tail[0].iov_base = p;
+			*p = 0;
+			rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
+		}
+		return 1;
+	} else
+		return xdr_ressize_check(rqstp, p);
+}
+
+/* READ */
+int
+nfs3svc_encode_readres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readres *resp)
+{
+	p = encode_post_op_attr(rqstp, p, &resp->fh);
+	if (resp->status == 0) {
+		*p++ = htonl(resp->count);
+		*p++ = htonl(resp->eof);
+		*p++ = htonl(resp->count);	/* xdr opaque count */
+		xdr_ressize_check(rqstp, p);
+		/* now update rqstp->rq_res to reflect data aswell */
+		rqstp->rq_res.page_len = resp->count;
+		if (resp->count & 3) {
+			/* need to pad the tail */
+			rqstp->rq_restailpage = 0;
+			rqstp->rq_res.tail[0].iov_base = p;
+			*p = 0;
+			rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3);
+		}
+		return 1;
+	} else
+		return xdr_ressize_check(rqstp, p);
+}
+
+/* WRITE */
+int
+nfs3svc_encode_writeres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_writeres *resp)
+{
+	p = encode_wcc_data(rqstp, p, &resp->fh);
+	if (resp->status == 0) {
+		*p++ = htonl(resp->count);
+		*p++ = htonl(resp->committed);
+		*p++ = htonl(nfssvc_boot.tv_sec);
+		*p++ = htonl(nfssvc_boot.tv_usec);
+	}
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* CREATE, MKDIR, SYMLINK, MKNOD */
+int
+nfs3svc_encode_createres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_diropres *resp)
+{
+	if (resp->status == 0) {
+		*p++ = xdr_one;
+		p = encode_fh(p, &resp->fh);
+		p = encode_post_op_attr(rqstp, p, &resp->fh);
+	}
+	p = encode_wcc_data(rqstp, p, &resp->dirfh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* RENAME */
+int
+nfs3svc_encode_renameres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_renameres *resp)
+{
+	p = encode_wcc_data(rqstp, p, &resp->ffh);
+	p = encode_wcc_data(rqstp, p, &resp->tfh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* LINK */
+int
+nfs3svc_encode_linkres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_linkres *resp)
+{
+	p = encode_post_op_attr(rqstp, p, &resp->fh);
+	p = encode_wcc_data(rqstp, p, &resp->tfh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* READDIR */
+int
+nfs3svc_encode_readdirres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_readdirres *resp)
+{
+	p = encode_post_op_attr(rqstp, p, &resp->fh);
+
+	if (resp->status == 0) {
+		/* stupid readdir cookie */
+		memcpy(p, resp->verf, 8); p += 2;
+		xdr_ressize_check(rqstp, p);
+		if (rqstp->rq_res.head[0].iov_len + (2<<2) > PAGE_SIZE)
+			return 1; /*No room for trailer */
+		rqstp->rq_res.page_len = (resp->count) << 2;
+
+		/* add the 'tail' to the end of the 'head' page - page 0. */
+		rqstp->rq_restailpage = 0;
+		rqstp->rq_res.tail[0].iov_base = p;
+		*p++ = 0;		/* no more entries */
+		*p++ = htonl(resp->common.err == nfserr_eof);
+		rqstp->rq_res.tail[0].iov_len = 2<<2;
+		return 1;
+	} else
+		return xdr_ressize_check(rqstp, p);
+}
+
+static inline u32 *
+encode_entry_baggage(struct nfsd3_readdirres *cd, u32 *p, const char *name,
+	     int namlen, ino_t ino)
+{
+	*p++ = xdr_one;				 /* mark entry present */
+	p    = xdr_encode_hyper(p, ino);	 /* file id */
+	p    = xdr_encode_array(p, name, namlen);/* name length & name */
+
+	cd->offset = p;				/* remember pointer */
+	p = xdr_encode_hyper(p, NFS_OFFSET_MAX);/* offset of next entry */
+
+	return p;
+}
+
+static inline u32 *
+encode_entryplus_baggage(struct nfsd3_readdirres *cd, u32 *p,
+		struct svc_fh *fhp)
+{
+		p = encode_post_op_attr(cd->rqstp, p, fhp);
+		*p++ = xdr_one;			/* yes, a file handle follows */
+		p = encode_fh(p, fhp);
+		fh_put(fhp);
+		return p;
+}
+
+static int
+compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
+		const char *name, int namlen)
+{
+	struct svc_export	*exp;
+	struct dentry		*dparent, *dchild;
+	int rv = 0;
+
+	dparent = cd->fh.fh_dentry;
+	exp  = cd->fh.fh_export;
+
+	fh_init(fhp, NFS3_FHSIZE);
+	if (isdotent(name, namlen)) {
+		if (namlen == 2) {
+			dchild = dget_parent(dparent);
+			if (dchild == dparent) {
+				/* filesystem root - cannot return filehandle for ".." */
+				dput(dchild);
+				return 1;
+			}
+		} else
+			dchild = dget(dparent);
+	} else
+		dchild = lookup_one_len(name, dparent, namlen);
+	if (IS_ERR(dchild))
+		return 1;
+	if (d_mountpoint(dchild) ||
+	    fh_compose(fhp, exp, dchild, &cd->fh) != 0 ||
+	    !dchild->d_inode)
+		rv = 1;
+	dput(dchild);
+	return rv;
+}
+
+/*
+ * Encode a directory entry. This one works for both normal readdir
+ * and readdirplus.
+ * The normal readdir reply requires 2 (fileid) + 1 (stringlen)
+ * + string + 2 (cookie) + 1 (next) words, i.e. 6 + strlen.
+ * 
+ * The readdirplus baggage is 1+21 words for post_op_attr, plus the
+ * file handle.
+ */
+
+#define NFS3_ENTRY_BAGGAGE	(2 + 1 + 2 + 1)
+#define NFS3_ENTRYPLUS_BAGGAGE	(1 + 21 + 1 + (NFS3_FHSIZE >> 2))
+static int
+encode_entry(struct readdir_cd *ccd, const char *name,
+	     int namlen, off_t offset, ino_t ino, unsigned int d_type, int plus)
+{
+	struct nfsd3_readdirres *cd = container_of(ccd, struct nfsd3_readdirres,
+		       					common);
+	u32		*p = cd->buffer;
+	caddr_t		curr_page_addr = NULL;
+	int		pn;		/* current page number */
+	int		slen;		/* string (name) length */
+	int		elen;		/* estimated entry length in words */
+	int		num_entry_words = 0;	/* actual number of words */
+
+	if (cd->offset) {
+		u64 offset64 = offset;
+
+		if (unlikely(cd->offset1)) {
+			/* we ended up with offset on a page boundary */
+			*cd->offset = htonl(offset64 >> 32);
+			*cd->offset1 = htonl(offset64 & 0xffffffff);
+			cd->offset1 = NULL;
+		} else {
+			xdr_encode_hyper(cd->offset, (u64) offset);
+		}
+	}
+
+	/*
+	dprintk("encode_entry(%.*s @%ld%s)\n",
+		namlen, name, (long) offset, plus? " plus" : "");
+	 */
+
+	/* truncate filename if too long */
+	if (namlen > NFS3_MAXNAMLEN)
+		namlen = NFS3_MAXNAMLEN;
+
+	slen = XDR_QUADLEN(namlen);
+	elen = slen + NFS3_ENTRY_BAGGAGE
+		+ (plus? NFS3_ENTRYPLUS_BAGGAGE : 0);
+
+	if (cd->buflen < elen) {
+		cd->common.err = nfserr_toosmall;
+		return -EINVAL;
+	}
+
+	/* determine which page in rq_respages[] we are currently filling */
+	for (pn=1; pn < cd->rqstp->rq_resused; pn++) {
+		curr_page_addr = page_address(cd->rqstp->rq_respages[pn]);
+
+		if (((caddr_t)cd->buffer >= curr_page_addr) &&
+		    ((caddr_t)cd->buffer <  curr_page_addr + PAGE_SIZE))
+			break;
+	}
+
+	if ((caddr_t)(cd->buffer + elen) < (curr_page_addr + PAGE_SIZE)) {
+		/* encode entry in current page */
+
+		p = encode_entry_baggage(cd, p, name, namlen, ino);
+
+		/* throw in readdirplus baggage */
+		if (plus) {
+			struct svc_fh	fh;
+
+			if (compose_entry_fh(cd, &fh, name, namlen) > 0) {
+				*p++ = 0;
+				*p++ = 0;
+			} else
+				p = encode_entryplus_baggage(cd, p, &fh);
+		}
+		num_entry_words = p - cd->buffer;
+	} else if (cd->rqstp->rq_respages[pn+1] != NULL) {
+		/* temporarily encode entry into next page, then move back to
+		 * current and next page in rq_respages[] */
+		u32 *p1, *tmp;
+		int len1, len2;
+
+		/* grab next page for temporary storage of entry */
+		p1 = tmp = page_address(cd->rqstp->rq_respages[pn+1]);
+
+		p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
+
+		/* throw in readdirplus baggage */
+		if (plus) {
+			struct svc_fh	fh;
+
+			if (compose_entry_fh(cd, &fh, name, namlen) > 0) {
+				/* zero out the filehandle */
+				*p1++ = 0;
+				*p1++ = 0;
+			} else
+				p1 = encode_entryplus_baggage(cd, p1, &fh);
+		}
+
+		/* determine entry word length and lengths to go in pages */
+		num_entry_words = p1 - tmp;
+		len1 = curr_page_addr + PAGE_SIZE - (caddr_t)cd->buffer;
+		if ((num_entry_words << 2) < len1) {
+			/* the actual number of words in the entry is less
+			 * than elen and can still fit in the current page
+			 */
+			memmove(p, tmp, num_entry_words << 2);
+			p += num_entry_words;
+
+			/* update offset */
+			cd->offset = cd->buffer + (cd->offset - tmp);
+		} else {
+			unsigned int offset_r = (cd->offset - tmp) << 2;
+
+			/* update pointer to offset location.
+			 * This is a 64bit quantity, so we need to
+			 * deal with 3 cases:
+			 *  -	entirely in first page
+			 *  -	entirely in second page
+			 *  -	4 bytes in each page
+			 */
+			if (offset_r + 8 <= len1) {
+				cd->offset = p + (cd->offset - tmp);
+			} else if (offset_r >= len1) {
+				cd->offset -= len1 >> 2;
+			} else {
+				/* sitting on the fence */
+				BUG_ON(offset_r != len1 - 4);
+				cd->offset = p + (cd->offset - tmp);
+				cd->offset1 = tmp;
+			}
+
+			len2 = (num_entry_words << 2) - len1;
+
+			/* move from temp page to current and next pages */
+			memmove(p, tmp, len1);
+			memmove(tmp, (caddr_t)tmp+len1, len2);
+
+			p = tmp + (len2 >> 2);
+		}
+	}
+	else {
+		cd->common.err = nfserr_toosmall;
+		return -EINVAL;
+	}
+
+	cd->buflen -= num_entry_words;
+	cd->buffer = p;
+	cd->common.err = nfs_ok;
+	return 0;
+
+}
+
+int
+nfs3svc_encode_entry(struct readdir_cd *cd, const char *name,
+		     int namlen, loff_t offset, ino_t ino, unsigned int d_type)
+{
+	return encode_entry(cd, name, namlen, offset, ino, d_type, 0);
+}
+
+int
+nfs3svc_encode_entry_plus(struct readdir_cd *cd, const char *name,
+			  int namlen, loff_t offset, ino_t ino, unsigned int d_type)
+{
+	return encode_entry(cd, name, namlen, offset, ino, d_type, 1);
+}
+
+/* FSSTAT */
+int
+nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_fsstatres *resp)
+{
+	struct kstatfs	*s = &resp->stats;
+	u64		bs = s->f_bsize;
+
+	*p++ = xdr_zero;	/* no post_op_attr */
+
+	if (resp->status == 0) {
+		p = xdr_encode_hyper(p, bs * s->f_blocks);	/* total bytes */
+		p = xdr_encode_hyper(p, bs * s->f_bfree);	/* free bytes */
+		p = xdr_encode_hyper(p, bs * s->f_bavail);	/* user available bytes */
+		p = xdr_encode_hyper(p, s->f_files);	/* total inodes */
+		p = xdr_encode_hyper(p, s->f_ffree);	/* free inodes */
+		p = xdr_encode_hyper(p, s->f_ffree);	/* user available inodes */
+		*p++ = htonl(resp->invarsec);	/* mean unchanged time */
+	}
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* FSINFO */
+int
+nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_fsinfores *resp)
+{
+	*p++ = xdr_zero;	/* no post_op_attr */
+
+	if (resp->status == 0) {
+		*p++ = htonl(resp->f_rtmax);
+		*p++ = htonl(resp->f_rtpref);
+		*p++ = htonl(resp->f_rtmult);
+		*p++ = htonl(resp->f_wtmax);
+		*p++ = htonl(resp->f_wtpref);
+		*p++ = htonl(resp->f_wtmult);
+		*p++ = htonl(resp->f_dtpref);
+		p = xdr_encode_hyper(p, resp->f_maxfilesize);
+		*p++ = xdr_one;
+		*p++ = xdr_zero;
+		*p++ = htonl(resp->f_properties);
+	}
+
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* PATHCONF */
+int
+nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_pathconfres *resp)
+{
+	*p++ = xdr_zero;	/* no post_op_attr */
+
+	if (resp->status == 0) {
+		*p++ = htonl(resp->p_link_max);
+		*p++ = htonl(resp->p_name_max);
+		*p++ = htonl(resp->p_no_trunc);
+		*p++ = htonl(resp->p_chown_restricted);
+		*p++ = htonl(resp->p_case_insensitive);
+		*p++ = htonl(resp->p_case_preserving);
+	}
+
+	return xdr_ressize_check(rqstp, p);
+}
+
+/* COMMIT */
+int
+nfs3svc_encode_commitres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_commitres *resp)
+{
+	p = encode_wcc_data(rqstp, p, &resp->fh);
+	/* Write verifier */
+	if (resp->status == 0) {
+		*p++ = htonl(nfssvc_boot.tv_sec);
+		*p++ = htonl(nfssvc_boot.tv_usec);
+	}
+	return xdr_ressize_check(rqstp, p);
+}
+
+/*
+ * XDR release functions
+ */
+int
+nfs3svc_release_fhandle(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_attrstat *resp)
+{
+	fh_put(&resp->fh);
+	return 1;
+}
+
+int
+nfs3svc_release_fhandle2(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd3_fhandle_pair *resp)
+{
+	fh_put(&resp->fh1);
+	fh_put(&resp->fh2);
+	return 1;
+}
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
new file mode 100644
index 0000000..11ebf6c
--- /dev/null
+++ b/fs/nfsd/nfs4acl.c
@@ -0,0 +1,954 @@
+/*
+ *  fs/nfs4acl/acl.c
+ *
+ *  Common NFSv4 ACL handling code.
+ *
+ *  Copyright (c) 2002, 2003 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Marius Aamodt Eriksen <marius@umich.edu>
+ *  Jeff Sedlak <jsedlak@umich.edu>
+ *  J. Bruce Fields <bfields@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/nfs_fs.h>
+#include <linux/posix_acl.h>
+#include <linux/nfs4.h>
+#include <linux/nfs4_acl.h>
+
+
+/* mode bit translations: */
+#define NFS4_READ_MODE (NFS4_ACE_READ_DATA)
+#define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA)
+#define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE
+#define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE)
+#define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)
+
+/* We don't support these bits; insist they be neither allowed nor denied */
+#define NFS4_MASK_UNSUPP (NFS4_ACE_DELETE | NFS4_ACE_WRITE_OWNER \
+		| NFS4_ACE_READ_NAMED_ATTRS | NFS4_ACE_WRITE_NAMED_ATTRS)
+
+/* flags used to simulate posix default ACLs */
+#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \
+		| NFS4_ACE_DIRECTORY_INHERIT_ACE | NFS4_ACE_INHERIT_ONLY_ACE)
+
+#define MASK_EQUAL(mask1, mask2) \
+	( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) )
+
+static u32
+mask_from_posix(unsigned short perm, unsigned int flags)
+{
+	int mask = NFS4_ANYONE_MODE;
+
+	if (flags & NFS4_ACL_OWNER)
+		mask |= NFS4_OWNER_MODE;
+	if (perm & ACL_READ)
+		mask |= NFS4_READ_MODE;
+	if (perm & ACL_WRITE)
+		mask |= NFS4_WRITE_MODE;
+	if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR))
+		mask |= NFS4_ACE_DELETE_CHILD;
+	if (perm & ACL_EXECUTE)
+		mask |= NFS4_EXECUTE_MODE;
+	return mask;
+}
+
+static u32
+deny_mask(u32 allow_mask, unsigned int flags)
+{
+	u32 ret = ~allow_mask & ~NFS4_MASK_UNSUPP;
+	if (!(flags & NFS4_ACL_DIR))
+		ret &= ~NFS4_ACE_DELETE_CHILD;
+	return ret;
+}
+
+/* XXX: modify functions to return NFS errors; they're only ever
+ * used by nfs code, after all.... */
+
+static int
+mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags)
+{
+	u32 ignore = 0;
+
+	if (!(flags & NFS4_ACL_DIR))
+		ignore |= NFS4_ACE_DELETE_CHILD; /* ignore it */
+	perm |= ignore;
+	*mode = 0;
+	if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE)
+		*mode |= ACL_READ;
+	if ((perm & NFS4_WRITE_MODE) == NFS4_WRITE_MODE)
+		*mode |= ACL_WRITE;
+	if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE)
+		*mode |= ACL_EXECUTE;
+	if (!MASK_EQUAL(perm, ignore|mask_from_posix(*mode, flags)))
+		return -EINVAL;
+	return 0;
+}
+
+struct ace_container {
+	struct nfs4_ace  *ace;
+	struct list_head  ace_l;
+};
+
+static short ace2type(struct nfs4_ace *);
+static int _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int);
+static struct posix_acl *_nfsv4_to_posix_one(struct nfs4_acl *, unsigned int);
+int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
+int nfs4_acl_split(struct nfs4_acl *, struct nfs4_acl *);
+
+struct nfs4_acl *
+nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl,
+			unsigned int flags)
+{
+	struct nfs4_acl *acl;
+	int error = -EINVAL;
+
+	if ((pacl != NULL &&
+		(posix_acl_valid(pacl) < 0 || pacl->a_count == 0)) ||
+	    (dpacl != NULL &&
+		(posix_acl_valid(dpacl) < 0 || dpacl->a_count == 0)))
+		goto out_err;
+
+	acl = nfs4_acl_new();
+	if (acl == NULL) {
+		error = -ENOMEM;
+		goto out_err;
+	}
+
+	if (pacl != NULL) {
+		error = _posix_to_nfsv4_one(pacl, acl,
+						flags & ~NFS4_ACL_TYPE_DEFAULT);
+		if (error < 0)
+			goto out_acl;
+	}
+
+	if (dpacl != NULL) {
+		error = _posix_to_nfsv4_one(dpacl, acl,
+						flags | NFS4_ACL_TYPE_DEFAULT);
+		if (error < 0)
+			goto out_acl;
+	}
+
+	return acl;
+
+out_acl:
+	nfs4_acl_free(acl);
+out_err:
+	acl = ERR_PTR(error);
+
+	return acl;
+}
+
+static int
+nfs4_acl_add_pair(struct nfs4_acl *acl, int eflag, u32 mask, int whotype,
+		uid_t owner, unsigned int flags)
+{
+	int error;
+
+	error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
+				 eflag, mask, whotype, owner);
+	if (error < 0)
+		return error;
+	error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+				eflag, deny_mask(mask, flags), whotype, owner);
+	return error;
+}
+
+/* We assume the acl has been verified with posix_acl_valid. */
+static int
+_posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
+						unsigned int flags)
+{
+	struct posix_acl_entry *pa, *pe, *group_owner_entry;
+	int error = -EINVAL;
+	u32 mask, mask_mask;
+	int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ?
+					NFS4_INHERITANCE_FLAGS : 0);
+
+	BUG_ON(pacl->a_count < 3);
+	pe = pacl->a_entries + pacl->a_count;
+	pa = pe - 2; /* if mask entry exists, it's second from the last. */
+	if (pa->e_tag == ACL_MASK)
+		mask_mask = deny_mask(mask_from_posix(pa->e_perm, flags), flags);
+	else
+		mask_mask = 0;
+
+	pa = pacl->a_entries;
+	BUG_ON(pa->e_tag != ACL_USER_OBJ);
+	mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER);
+	error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_OWNER, 0, flags);
+	if (error < 0)
+		goto out;
+	pa++;
+
+	while (pa->e_tag == ACL_USER) {
+		mask = mask_from_posix(pa->e_perm, flags);
+		error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+				eflag,  mask_mask, NFS4_ACL_WHO_NAMED, pa->e_id);
+		if (error < 0)
+			goto out;
+
+
+		error = nfs4_acl_add_pair(acl, eflag, mask,
+				NFS4_ACL_WHO_NAMED, pa->e_id, flags);
+		if (error < 0)
+			goto out;
+		pa++;
+	}
+
+	/* In the case of groups, we apply allow ACEs first, then deny ACEs,
+	 * since a user can be in more than one group.  */
+
+	/* allow ACEs */
+
+	if (pacl->a_count > 3) {
+		BUG_ON(pa->e_tag != ACL_GROUP_OBJ);
+		error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+				NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask,
+				NFS4_ACL_WHO_GROUP, 0);
+		if (error < 0)
+			goto out;
+	}
+	group_owner_entry = pa;
+	mask = mask_from_posix(pa->e_perm, flags);
+	error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
+			NFS4_ACE_IDENTIFIER_GROUP | eflag, mask,
+			NFS4_ACL_WHO_GROUP, 0);
+	if (error < 0)
+		goto out;
+	pa++;
+
+	while (pa->e_tag == ACL_GROUP) {
+		mask = mask_from_posix(pa->e_perm, flags);
+		error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+				NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask,
+				NFS4_ACL_WHO_NAMED, pa->e_id);
+		if (error < 0)
+			goto out;
+
+		error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
+		    		NFS4_ACE_IDENTIFIER_GROUP | eflag, mask,
+		    		NFS4_ACL_WHO_NAMED, pa->e_id);
+		if (error < 0)
+			goto out;
+		pa++;
+	}
+
+	/* deny ACEs */
+
+	pa = group_owner_entry;
+	mask = mask_from_posix(pa->e_perm, flags);
+	error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+			NFS4_ACE_IDENTIFIER_GROUP | eflag,
+			deny_mask(mask, flags), NFS4_ACL_WHO_GROUP, 0);
+	if (error < 0)
+		goto out;
+	pa++;
+	while (pa->e_tag == ACL_GROUP) {
+		mask = mask_from_posix(pa->e_perm, flags);
+		error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
+		    		NFS4_ACE_IDENTIFIER_GROUP | eflag,
+		    		deny_mask(mask, flags), NFS4_ACL_WHO_NAMED, pa->e_id);
+		if (error < 0)
+			goto out;
+		pa++;
+	}
+
+	if (pa->e_tag == ACL_MASK)
+		pa++;
+	BUG_ON(pa->e_tag != ACL_OTHER);
+	mask = mask_from_posix(pa->e_perm, flags);
+	error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_EVERYONE, 0, flags);
+
+out:
+	return error;
+}
+
+static void
+sort_pacl_range(struct posix_acl *pacl, int start, int end) {
+	int sorted = 0, i;
+	struct posix_acl_entry tmp;
+
+	/* We just do a bubble sort; easy to do in place, and we're not
+	 * expecting acl's to be long enough to justify anything more. */
+	while (!sorted) {
+		sorted = 1;
+		for (i = start; i < end; i++) {
+			if (pacl->a_entries[i].e_id
+					> pacl->a_entries[i+1].e_id) {
+				sorted = 0;
+				tmp = pacl->a_entries[i];
+				pacl->a_entries[i] = pacl->a_entries[i+1];
+				pacl->a_entries[i+1] = tmp;
+			}
+		}
+	}
+}
+
+static void
+sort_pacl(struct posix_acl *pacl)
+{
+	/* posix_acl_valid requires that users and groups be in order
+	 * by uid/gid. */
+	int i, j;
+
+	if (pacl->a_count <= 4)
+		return; /* no users or groups */
+	i = 1;
+	while (pacl->a_entries[i].e_tag == ACL_USER)
+		i++;
+	sort_pacl_range(pacl, 1, i-1);
+
+	BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
+	j = i++;
+	while (pacl->a_entries[j].e_tag == ACL_GROUP)
+		j++;
+	sort_pacl_range(pacl, i, j-1);
+	return;
+}
+
+static int
+write_pace(struct nfs4_ace *ace, struct posix_acl *pacl,
+		struct posix_acl_entry **pace, short tag, unsigned int flags)
+{
+	struct posix_acl_entry *this = *pace;
+
+	if (*pace == pacl->a_entries + pacl->a_count)
+		return -EINVAL; /* fell off the end */
+	(*pace)++;
+	this->e_tag = tag;
+	if (tag == ACL_USER_OBJ)
+		flags |= NFS4_ACL_OWNER;
+	if (mode_from_nfs4(ace->access_mask, &this->e_perm, flags))
+		return -EINVAL;
+	this->e_id = (tag == ACL_USER || tag == ACL_GROUP ?
+			ace->who : ACL_UNDEFINED_ID);
+	return 0;
+}
+
+static struct nfs4_ace *
+get_next_v4_ace(struct list_head **p, struct list_head *head)
+{
+	struct nfs4_ace *ace;
+
+	*p = (*p)->next;
+	if (*p == head)
+		return NULL;
+	ace = list_entry(*p, struct nfs4_ace, l_ace);
+
+	return ace;
+}
+
+int
+nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
+		struct posix_acl **dpacl, unsigned int flags)
+{
+	struct nfs4_acl *dacl;
+	int error = -ENOMEM;
+
+	*pacl = NULL;
+	*dpacl = NULL;
+
+	dacl = nfs4_acl_new();
+	if (dacl == NULL)
+		goto out;
+
+	error = nfs4_acl_split(acl, dacl);
+	if (error < 0)
+		goto out_acl;
+
+	if (pacl != NULL) {
+		if (acl->naces == 0) {
+			error = -ENODATA;
+			goto try_dpacl;
+		}
+
+		*pacl = _nfsv4_to_posix_one(acl, flags);
+		if (IS_ERR(*pacl)) {
+			error = PTR_ERR(*pacl);
+			*pacl = NULL;
+			goto out_acl;
+		}
+	}
+
+try_dpacl:
+	if (dpacl != NULL) {
+		if (dacl->naces == 0) {
+			if (pacl == NULL || *pacl == NULL)
+				error = -ENODATA;
+			goto out_acl;
+		}
+
+		error = 0;
+		*dpacl = _nfsv4_to_posix_one(dacl, flags);
+		if (IS_ERR(*dpacl)) {
+			error = PTR_ERR(*dpacl);
+			*dpacl = NULL;
+			goto out_acl;
+		}
+	}
+
+out_acl:
+	if (error && pacl) {
+		posix_acl_release(*pacl);
+		*pacl = NULL;
+	}
+	nfs4_acl_free(dacl);
+out:
+	return error;
+}
+
+static int
+same_who(struct nfs4_ace *a, struct nfs4_ace *b)
+{
+	return a->whotype == b->whotype &&
+		(a->whotype != NFS4_ACL_WHO_NAMED || a->who == b->who);
+}
+
+static int
+complementary_ace_pair(struct nfs4_ace *allow, struct nfs4_ace *deny,
+		unsigned int flags)
+{
+	int ignore = 0;
+	if (!(flags & NFS4_ACL_DIR))
+		ignore |= NFS4_ACE_DELETE_CHILD;
+	return MASK_EQUAL(ignore|deny_mask(allow->access_mask, flags),
+			  ignore|deny->access_mask) &&
+		allow->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
+		deny->type == NFS4_ACE_ACCESS_DENIED_ACE_TYPE &&
+		allow->flag == deny->flag &&
+		same_who(allow, deny);
+}
+
+static inline int
+user_obj_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
+		struct posix_acl *pacl, struct posix_acl_entry **pace,
+		unsigned int flags)
+{
+	int error = -EINVAL;
+	struct nfs4_ace *ace, *ace2;
+
+	ace = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace == NULL)
+		goto out;
+	if (ace2type(ace) != ACL_USER_OBJ)
+		goto out;
+	error = write_pace(ace, pacl, pace, ACL_USER_OBJ, flags);
+	if (error < 0)
+		goto out;
+	error = -EINVAL;
+	ace2 = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace2 == NULL)
+		goto out;
+	if (!complementary_ace_pair(ace, ace2, flags))
+		goto out;
+	error = 0;
+out:
+	return error;
+}
+
+static inline int
+users_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
+		struct nfs4_ace **mask_ace,
+		struct posix_acl *pacl, struct posix_acl_entry **pace,
+		unsigned int flags)
+{
+	int error = -EINVAL;
+	struct nfs4_ace *ace, *ace2;
+
+	ace = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace == NULL)
+		goto out;
+	while (ace2type(ace) == ACL_USER) {
+		if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
+			goto out;
+		if (*mask_ace &&
+			!MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
+			goto out;
+		*mask_ace = ace;
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+		if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
+			goto out;
+		error = write_pace(ace, pacl, pace, ACL_USER, flags);
+		if (error < 0)
+			goto out;
+		error = -EINVAL;
+		ace2 = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace2 == NULL)
+			goto out;
+		if (!complementary_ace_pair(ace, ace2, flags))
+			goto out;
+		if ((*mask_ace)->flag != ace2->flag ||
+				!same_who(*mask_ace, ace2))
+			goto out;
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+	}
+	error = 0;
+out:
+	return error;
+}
+
+static inline int
+group_obj_and_groups_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
+		struct nfs4_ace **mask_ace,
+		struct posix_acl *pacl, struct posix_acl_entry **pace,
+		unsigned int flags)
+{
+	int error = -EINVAL;
+	struct nfs4_ace *ace, *ace2;
+	struct ace_container *ac;
+	struct list_head group_l;
+
+	INIT_LIST_HEAD(&group_l);
+	ace = list_entry(*p, struct nfs4_ace, l_ace);
+
+	/* group owner (mask and allow aces) */
+
+	if (pacl->a_count != 3) {
+		/* then the group owner should be preceded by mask */
+		if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
+			goto out;
+		if (*mask_ace &&
+			!MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
+			goto out;
+		*mask_ace = ace;
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+
+		if ((*mask_ace)->flag != ace->flag || !same_who(*mask_ace, ace))
+			goto out;
+	}
+
+	if (ace2type(ace) != ACL_GROUP_OBJ)
+		goto out;
+
+	ac = kmalloc(sizeof(*ac), GFP_KERNEL);
+	error = -ENOMEM;
+	if (ac == NULL)
+		goto out;
+	ac->ace = ace;
+	list_add_tail(&ac->ace_l, &group_l);
+
+	error = -EINVAL;
+	if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
+		goto out;
+
+	error = write_pace(ace, pacl, pace, ACL_GROUP_OBJ, flags);
+	if (error < 0)
+		goto out;
+
+	error = -EINVAL;
+	ace = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace == NULL)
+		goto out;
+
+	/* groups (mask and allow aces) */
+
+	while (ace2type(ace) == ACL_GROUP) {
+		if (*mask_ace == NULL)
+			goto out;
+
+		if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE ||
+			!MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
+			goto out;
+		*mask_ace = ace;
+
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+		ac = kmalloc(sizeof(*ac), GFP_KERNEL);
+		error = -ENOMEM;
+		if (ac == NULL)
+			goto out;
+		error = -EINVAL;
+		if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE ||
+				!same_who(ace, *mask_ace))
+			goto out;
+
+		ac->ace = ace;
+		list_add_tail(&ac->ace_l, &group_l);
+
+		error = write_pace(ace, pacl, pace, ACL_GROUP, flags);
+		if (error < 0)
+			goto out;
+		error = -EINVAL;
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+	}
+
+	/* group owner (deny ace) */
+
+	if (ace2type(ace) != ACL_GROUP_OBJ)
+		goto out;
+	ac = list_entry(group_l.next, struct ace_container, ace_l);
+	ace2 = ac->ace;
+	if (!complementary_ace_pair(ace2, ace, flags))
+		goto out;
+	list_del(group_l.next);
+	kfree(ac);
+
+	/* groups (deny aces) */
+
+	while (!list_empty(&group_l)) {
+		ace = get_next_v4_ace(p, &n4acl->ace_head);
+		if (ace == NULL)
+			goto out;
+		if (ace2type(ace) != ACL_GROUP)
+			goto out;
+		ac = list_entry(group_l.next, struct ace_container, ace_l);
+		ace2 = ac->ace;
+		if (!complementary_ace_pair(ace2, ace, flags))
+			goto out;
+		list_del(group_l.next);
+		kfree(ac);
+	}
+
+	ace = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace == NULL)
+		goto out;
+	if (ace2type(ace) != ACL_OTHER)
+		goto out;
+	error = 0;
+out:
+	while (!list_empty(&group_l)) {
+		ac = list_entry(group_l.next, struct ace_container, ace_l);
+		list_del(group_l.next);
+		kfree(ac);
+	}
+	return error;
+}
+
+static inline int
+mask_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
+		struct nfs4_ace **mask_ace,
+		struct posix_acl *pacl, struct posix_acl_entry **pace,
+		unsigned int flags)
+{
+	int error = -EINVAL;
+	struct nfs4_ace *ace;
+
+	ace = list_entry(*p, struct nfs4_ace, l_ace);
+	if (pacl->a_count != 3) {
+		if (*mask_ace == NULL)
+			goto out;
+		(*mask_ace)->access_mask = deny_mask((*mask_ace)->access_mask, flags);
+		write_pace(*mask_ace, pacl, pace, ACL_MASK, flags);
+	}
+	error = 0;
+out:
+	return error;
+}
+
+static inline int
+other_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
+		struct posix_acl *pacl, struct posix_acl_entry **pace,
+		unsigned int flags)
+{
+	int error = -EINVAL;
+	struct nfs4_ace *ace, *ace2;
+
+	ace = list_entry(*p, struct nfs4_ace, l_ace);
+	if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
+		goto out;
+	error = write_pace(ace, pacl, pace, ACL_OTHER, flags);
+	if (error < 0)
+		goto out;
+	error = -EINVAL;
+	ace2 = get_next_v4_ace(p, &n4acl->ace_head);
+	if (ace2 == NULL)
+		goto out;
+	if (!complementary_ace_pair(ace, ace2, flags))
+		goto out;
+	error = 0;
+out:
+	return error;
+}
+
+static int
+calculate_posix_ace_count(struct nfs4_acl *n4acl)
+{
+	if (n4acl->naces == 6) /* owner, owner group, and other only */
+		return 3;
+	else { /* Otherwise there must be a mask entry. */
+		/* Also, the remaining entries are for named users and
+		 * groups, and come in threes (mask, allow, deny): */
+		if (n4acl->naces < 7)
+			return -1;
+		if ((n4acl->naces - 7) % 3)
+			return -1;
+		return 4 + (n4acl->naces - 7)/3;
+	}
+}
+
+
+static struct posix_acl *
+_nfsv4_to_posix_one(struct nfs4_acl *n4acl, unsigned int flags)
+{
+	struct posix_acl *pacl;
+	int error = -EINVAL, nace = 0;
+	struct list_head *p;
+	struct nfs4_ace *mask_ace = NULL;
+	struct posix_acl_entry *pace;
+
+	nace = calculate_posix_ace_count(n4acl);
+	if (nace < 0)
+		goto out_err;
+
+	pacl = posix_acl_alloc(nace, GFP_KERNEL);
+	error = -ENOMEM;
+	if (pacl == NULL)
+		goto out_err;
+
+	pace = &pacl->a_entries[0];
+	p = &n4acl->ace_head;
+
+	error = user_obj_from_v4(n4acl, &p, pacl, &pace, flags);
+	if (error)
+		goto out_acl;
+
+	error = users_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags);
+	if (error)
+		goto out_acl;
+
+	error = group_obj_and_groups_from_v4(n4acl, &p, &mask_ace, pacl, &pace,
+						flags);
+	if (error)
+		goto out_acl;
+
+	error = mask_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags);
+	if (error)
+		goto out_acl;
+	error = other_from_v4(n4acl, &p, pacl, &pace, flags);
+	if (error)
+		goto out_acl;
+
+	error = -EINVAL;
+	if (p->next != &n4acl->ace_head)
+		goto out_acl;
+	if (pace != pacl->a_entries + pacl->a_count)
+		goto out_acl;
+
+	sort_pacl(pacl);
+
+	return pacl;
+out_acl:
+	posix_acl_release(pacl);
+out_err:
+	pacl = ERR_PTR(error);
+	return pacl;
+}
+
+int
+nfs4_acl_split(struct nfs4_acl *acl, struct nfs4_acl *dacl)
+{
+	struct list_head *h, *n;
+	struct nfs4_ace *ace;
+	int error = 0;
+
+	list_for_each_safe(h, n, &acl->ace_head) {
+		ace = list_entry(h, struct nfs4_ace, l_ace);
+
+		if ((ace->flag & NFS4_INHERITANCE_FLAGS)
+				!= NFS4_INHERITANCE_FLAGS)
+			continue;
+
+		error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
+				ace->access_mask, ace->whotype, ace->who) == -1;
+		if (error < 0)
+			goto out;
+
+		list_del(h);
+		kfree(ace);
+		acl->naces--;
+	}
+
+out:
+	return error;
+}
+
+static short
+ace2type(struct nfs4_ace *ace)
+{
+	switch (ace->whotype) {
+		case NFS4_ACL_WHO_NAMED:
+			return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ?
+					ACL_GROUP : ACL_USER);
+		case NFS4_ACL_WHO_OWNER:
+			return ACL_USER_OBJ;
+		case NFS4_ACL_WHO_GROUP:
+			return ACL_GROUP_OBJ;
+		case NFS4_ACL_WHO_EVERYONE:
+			return ACL_OTHER;
+	}
+	BUG();
+	return -1;
+}
+
+EXPORT_SYMBOL(nfs4_acl_posix_to_nfsv4);
+EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix);
+
+struct nfs4_acl *
+nfs4_acl_new(void)
+{
+	struct nfs4_acl *acl;
+
+	if ((acl = kmalloc(sizeof(*acl), GFP_KERNEL)) == NULL)
+		return NULL;
+
+	acl->naces = 0;
+	INIT_LIST_HEAD(&acl->ace_head);
+
+	return acl;
+}
+
+void
+nfs4_acl_free(struct nfs4_acl *acl)
+{
+	struct list_head *h;
+	struct nfs4_ace *ace;
+
+	if (!acl)
+		return;
+
+	while (!list_empty(&acl->ace_head)) {
+		h = acl->ace_head.next;
+		list_del(h);
+		ace = list_entry(h, struct nfs4_ace, l_ace);
+		kfree(ace);
+	}
+
+	kfree(acl);
+
+	return;
+}
+
+int
+nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask,
+		int whotype, uid_t who)
+{
+	struct nfs4_ace *ace;
+
+	if ((ace = kmalloc(sizeof(*ace), GFP_KERNEL)) == NULL)
+		return -1;
+
+	ace->type = type;
+	ace->flag = flag;
+	ace->access_mask = access_mask;
+	ace->whotype = whotype;
+	ace->who = who;
+
+	list_add_tail(&ace->l_ace, &acl->ace_head);
+	acl->naces++;
+
+	return 0;
+}
+
+static struct {
+	char *string;
+	int   stringlen;
+	int type;
+} s2t_map[] = {
+	{
+		.string    = "OWNER@",
+		.stringlen = sizeof("OWNER@") - 1,
+		.type      = NFS4_ACL_WHO_OWNER,
+	},
+	{
+		.string    = "GROUP@",
+		.stringlen = sizeof("GROUP@") - 1,
+		.type      = NFS4_ACL_WHO_GROUP,
+	},
+	{
+		.string    = "EVERYONE@",
+		.stringlen = sizeof("EVERYONE@") - 1,
+		.type      = NFS4_ACL_WHO_EVERYONE,
+	},
+};
+
+int
+nfs4_acl_get_whotype(char *p, u32 len)
+{
+	int i;
+
+	for (i=0; i < sizeof(s2t_map) / sizeof(*s2t_map); i++) {
+		if (s2t_map[i].stringlen == len &&
+				0 == memcmp(s2t_map[i].string, p, len))
+			return s2t_map[i].type;
+	}
+	return NFS4_ACL_WHO_NAMED;
+}
+
+int
+nfs4_acl_write_who(int who, char *p)
+{
+	int i;
+
+	for (i=0; i < sizeof(s2t_map) / sizeof(*s2t_map); i++) {
+		if (s2t_map[i].type == who) {
+			memcpy(p, s2t_map[i].string, s2t_map[i].stringlen);
+			return s2t_map[i].stringlen;
+		}
+	}
+	BUG();
+	return -1;
+}
+
+static inline int
+match_who(struct nfs4_ace *ace, uid_t owner, gid_t group, uid_t who)
+{
+	switch (ace->whotype) {
+		case NFS4_ACL_WHO_NAMED:
+			return who == ace->who;
+		case NFS4_ACL_WHO_OWNER:
+			return who == owner;
+		case NFS4_ACL_WHO_GROUP:
+			return who == group;
+		case NFS4_ACL_WHO_EVERYONE:
+			return 1;
+		default:
+			return 0;
+	}
+}
+
+EXPORT_SYMBOL(nfs4_acl_new);
+EXPORT_SYMBOL(nfs4_acl_free);
+EXPORT_SYMBOL(nfs4_acl_add_ace);
+EXPORT_SYMBOL(nfs4_acl_get_whotype);
+EXPORT_SYMBOL(nfs4_acl_write_who);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
new file mode 100644
index 0000000..c70de9c
--- /dev/null
+++ b/fs/nfsd/nfs4callback.c
@@ -0,0 +1,547 @@
+/*
+ *  linux/fs/nfsd/nfs4callback.c
+ *
+ *  Copyright (c) 2001 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *  Andy Adamson <andros@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/inet.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/state.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/nfs4.h>
+
+#define NFSDDBG_FACILITY                NFSDDBG_PROC
+
+#define NFSPROC4_CB_NULL 0
+#define NFSPROC4_CB_COMPOUND 1
+
+/* declarations */
+static void nfs4_cb_null(struct rpc_task *task);
+extern spinlock_t recall_lock;
+
+/* Index of predefined Linux callback client operations */
+
+enum {
+        NFSPROC4_CLNT_CB_NULL = 0,
+	NFSPROC4_CLNT_CB_RECALL,
+};
+
+enum nfs_cb_opnum4 {
+	OP_CB_RECALL            = 4,
+};
+
+#define NFS4_MAXTAGLEN		20
+
+#define NFS4_enc_cb_null_sz		0
+#define NFS4_dec_cb_null_sz		0
+#define cb_compound_enc_hdr_sz		4
+#define cb_compound_dec_hdr_sz		(3 + (NFS4_MAXTAGLEN >> 2))
+#define op_enc_sz			1
+#define op_dec_sz			2
+#define enc_nfs4_fh_sz			(1 + (NFS4_FHSIZE >> 2))
+#define enc_stateid_sz			16
+#define NFS4_enc_cb_recall_sz		(cb_compound_enc_hdr_sz +       \
+					1 + enc_stateid_sz +            \
+					enc_nfs4_fh_sz)
+
+#define NFS4_dec_cb_recall_sz		(cb_compound_dec_hdr_sz  +      \
+					op_dec_sz)
+
+/*
+* Generic encode routines from fs/nfs/nfs4xdr.c
+*/
+static inline u32 *
+xdr_writemem(u32 *p, const void *ptr, int nbytes)
+{
+	int tmp = XDR_QUADLEN(nbytes);
+	if (!tmp)
+		return p;
+	p[tmp-1] = 0;
+	memcpy(p, ptr, nbytes);
+	return p + tmp;
+}
+
+#define WRITE32(n)               *p++ = htonl(n)
+#define WRITEMEM(ptr,nbytes)     do {                           \
+	p = xdr_writemem(p, ptr, nbytes);                       \
+} while (0)
+#define RESERVE_SPACE(nbytes)   do {                            \
+	p = xdr_reserve_space(xdr, nbytes);                     \
+	if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __FUNCTION__); \
+	BUG_ON(!p);                                             \
+} while (0)
+
+/*
+ * Generic decode routines from fs/nfs/nfs4xdr.c
+ */
+#define DECODE_TAIL                             \
+	status = 0;                             \
+out:                                            \
+	return status;                          \
+xdr_error:                                      \
+	dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
+	status = -EIO;                          \
+	goto out
+
+#define READ32(x)         (x) = ntohl(*p++)
+#define READ64(x)         do {                  \
+	(x) = (u64)ntohl(*p++) << 32;           \
+	(x) |= ntohl(*p++);                     \
+} while (0)
+#define READTIME(x)       do {                  \
+	p++;                                    \
+	(x.tv_sec) = ntohl(*p++);               \
+	(x.tv_nsec) = ntohl(*p++);              \
+} while (0)
+#define READ_BUF(nbytes)  do { \
+	p = xdr_inline_decode(xdr, nbytes); \
+	if (!p) { \
+		dprintk("NFSD: %s: reply buffer overflowed in line %d.", \
+			__FUNCTION__, __LINE__); \
+		return -EIO; \
+	} \
+} while (0)
+
+struct nfs4_cb_compound_hdr {
+	int		status;
+	u32		ident;
+	u32		nops;
+	u32		taglen;
+	char *		tag;
+};
+
+static struct {
+int stat;
+int errno;
+} nfs_cb_errtbl[] = {
+	{ NFS4_OK,		0               },
+	{ NFS4ERR_PERM,		EPERM           },
+	{ NFS4ERR_NOENT,	ENOENT          },
+	{ NFS4ERR_IO,		EIO             },
+	{ NFS4ERR_NXIO,		ENXIO           },
+	{ NFS4ERR_ACCESS,	EACCES          },
+	{ NFS4ERR_EXIST,	EEXIST          },
+	{ NFS4ERR_XDEV,		EXDEV           },
+	{ NFS4ERR_NOTDIR,	ENOTDIR         },
+	{ NFS4ERR_ISDIR,	EISDIR          },
+	{ NFS4ERR_INVAL,	EINVAL          },
+	{ NFS4ERR_FBIG,		EFBIG           },
+	{ NFS4ERR_NOSPC,	ENOSPC          },
+	{ NFS4ERR_ROFS,		EROFS           },
+	{ NFS4ERR_MLINK,	EMLINK          },
+	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG    },
+	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY       },
+	{ NFS4ERR_DQUOT,	EDQUOT          },
+	{ NFS4ERR_STALE,	ESTALE          },
+	{ NFS4ERR_BADHANDLE,	EBADHANDLE      },
+	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE      },
+	{ NFS4ERR_NOTSUPP,	ENOTSUPP        },
+	{ NFS4ERR_TOOSMALL,	ETOOSMALL       },
+	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT    },
+	{ NFS4ERR_BADTYPE,	EBADTYPE        },
+	{ NFS4ERR_LOCKED,	EAGAIN          },
+	{ NFS4ERR_RESOURCE,	EREMOTEIO       },
+	{ NFS4ERR_SYMLINK,	ELOOP           },
+	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP      },
+	{ NFS4ERR_DEADLOCK,	EDEADLK         },
+	{ -1,                   EIO             }
+};
+
+static int
+nfs_cb_stat_to_errno(int stat)
+{
+	int i;
+	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
+		if (nfs_cb_errtbl[i].stat == stat)
+			return nfs_cb_errtbl[i].errno;
+	}
+	/* If we cannot translate the error, the recovery routines should
+	* handle it.
+	* Note: remaining NFSv4 error codes have values > 10000, so should
+	* not conflict with native Linux error codes.
+	*/
+	return stat;
+}
+
+/*
+ * XDR encode
+ */
+
+static int
+encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
+{
+	u32 * p;
+
+	RESERVE_SPACE(16);
+	WRITE32(0);            /* tag length is always 0 */
+	WRITE32(NFS4_MINOR_VERSION);
+	WRITE32(hdr->ident);
+	WRITE32(hdr->nops);
+	return 0;
+}
+
+static int
+encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
+{
+	u32 *p;
+	int len = cb_rec->cbr_fhlen;
+
+	RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len);
+	WRITE32(OP_CB_RECALL);
+	WRITEMEM(&cb_rec->cbr_stateid, sizeof(stateid_t));
+	WRITE32(cb_rec->cbr_trunc);
+	WRITE32(len);
+	WRITEMEM(cb_rec->cbr_fhval, len);
+	return 0;
+}
+
+static int
+nfs4_xdr_enc_cb_null(struct rpc_rqst *req, u32 *p)
+{
+	struct xdr_stream xdrs, *xdr = &xdrs;
+
+	xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
+        RESERVE_SPACE(0);
+	return 0;
+}
+
+static int
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, u32 *p, struct nfs4_cb_recall *args)
+{
+	struct xdr_stream xdr;
+	struct nfs4_cb_compound_hdr hdr = {
+		.ident = args->cbr_ident,
+		.nops   = 1,
+	};
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	encode_cb_compound_hdr(&xdr, &hdr);
+	return (encode_cb_recall(&xdr, args));
+}
+
+
+static int
+decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
+        u32 *p;
+
+        READ_BUF(8);
+        READ32(hdr->status);
+        READ32(hdr->taglen);
+        READ_BUF(hdr->taglen + 4);
+        hdr->tag = (char *)p;
+        p += XDR_QUADLEN(hdr->taglen);
+        READ32(hdr->nops);
+        return 0;
+}
+
+static int
+decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+{
+	u32 *p;
+	u32 op;
+	int32_t nfserr;
+
+	READ_BUF(8);
+	READ32(op);
+	if (op != expected) {
+		dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
+		         " operation %d but we issued a request for %d\n",
+		         op, expected);
+		return -EIO;
+	}
+	READ32(nfserr);
+	if (nfserr != NFS_OK)
+		return -nfs_cb_stat_to_errno(nfserr);
+	return 0;
+}
+
+static int
+nfs4_xdr_dec_cb_null(struct rpc_rqst *req, u32 *p)
+{
+	return 0;
+}
+
+static int
+nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, u32 *p)
+{
+	struct xdr_stream xdr;
+	struct nfs4_cb_compound_hdr hdr;
+	int status;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	status = decode_cb_compound_hdr(&xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
+out:
+	return status;
+}
+
+/*
+ * RPC procedure tables
+ */
+#ifndef MAX
+# define MAX(a, b)      (((a) > (b))? (a) : (b))
+#endif
+
+#define PROC(proc, call, argtype, restype)                              \
+[NFSPROC4_CLNT_##proc] = {                                      	\
+        .p_proc   = NFSPROC4_CB_##call,					\
+        .p_encode = (kxdrproc_t) nfs4_xdr_##argtype,                    \
+        .p_decode = (kxdrproc_t) nfs4_xdr_##restype,                    \
+        .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2,  \
+}
+
+struct rpc_procinfo     nfs4_cb_procedures[] = {
+    PROC(CB_NULL,      NULL,     enc_cb_null,     dec_cb_null),
+    PROC(CB_RECALL,    COMPOUND,   enc_cb_recall,      dec_cb_recall),
+};
+
+struct rpc_version              nfs_cb_version4 = {
+        .number                 = 1,
+        .nrprocs                = sizeof(nfs4_cb_procedures)/sizeof(nfs4_cb_procedures[0]),
+        .procs                  = nfs4_cb_procedures
+};
+
+static struct rpc_version *	nfs_cb_version[] = {
+	NULL,
+	&nfs_cb_version4,
+};
+
+/*
+ * Use the SETCLIENTID credential
+ */
+struct rpc_cred *
+nfsd4_lookupcred(struct nfs4_client *clp, int taskflags)
+{
+        struct auth_cred acred;
+	struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+	struct rpc_cred *ret;
+
+        get_group_info(clp->cl_cred.cr_group_info);
+        acred.uid = clp->cl_cred.cr_uid;
+        acred.gid = clp->cl_cred.cr_gid;
+        acred.group_info = clp->cl_cred.cr_group_info;
+
+        dprintk("NFSD:     looking up %s cred\n",
+                clnt->cl_auth->au_ops->au_name);
+        ret = rpcauth_lookup_credcache(clnt->cl_auth, &acred, taskflags);
+        put_group_info(clp->cl_cred.cr_group_info);
+        return ret;
+}
+
+/*
+ * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
+ */
+void
+nfsd4_probe_callback(struct nfs4_client *clp)
+{
+	struct sockaddr_in	addr;
+	struct nfs4_callback    *cb = &clp->cl_callback;
+	struct rpc_timeout	timeparms;
+	struct rpc_xprt *	xprt;
+	struct rpc_program *	program = &cb->cb_program;
+	struct rpc_stat *	stat = &cb->cb_stat;
+	struct rpc_clnt *	clnt;
+	struct rpc_message msg = {
+		.rpc_proc       = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
+		.rpc_argp       = clp,
+	};
+	char                    hostname[32];
+	int status;
+
+	dprintk("NFSD: probe_callback. cb_parsed %d cb_set %d\n",
+			cb->cb_parsed, atomic_read(&cb->cb_set));
+	if (!cb->cb_parsed || atomic_read(&cb->cb_set))
+		return;
+
+	/* Initialize address */
+	memset(&addr, 0, sizeof(addr));
+	addr.sin_family = AF_INET;
+	addr.sin_port = htons(cb->cb_port);
+	addr.sin_addr.s_addr = htonl(cb->cb_addr);
+
+	/* Initialize timeout */
+	timeparms.to_initval = (NFSD_LEASE_TIME/4) * HZ;
+	timeparms.to_retries = 0;
+	timeparms.to_maxval = (NFSD_LEASE_TIME/2) * HZ;
+	timeparms.to_exponential = 1;
+
+	/* Create RPC transport */
+	if (!(xprt = xprt_create_proto(IPPROTO_TCP, &addr, &timeparms))) {
+		dprintk("NFSD: couldn't create callback transport!\n");
+		goto out_err;
+	}
+
+	/* Initialize rpc_program */
+	program->name = "nfs4_cb";
+	program->number = cb->cb_prog;
+	program->nrvers = sizeof(nfs_cb_version)/sizeof(nfs_cb_version[0]);
+	program->version = nfs_cb_version;
+	program->stats = stat;
+
+	/* Initialize rpc_stat */
+	memset(stat, 0, sizeof(struct rpc_stat));
+	stat->program = program;
+
+	/* Create RPC client
+ 	 *
+	 * XXX AUTH_UNIX only - need AUTH_GSS....
+	 */
+	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr.sin_addr.s_addr));
+	if (!(clnt = rpc_create_client(xprt, hostname, program, 1, RPC_AUTH_UNIX))) {
+		dprintk("NFSD: couldn't create callback client\n");
+		goto out_xprt;
+	}
+	clnt->cl_intr = 0;
+	clnt->cl_softrtry = 1;
+	clnt->cl_chatty = 1;
+
+	/* Kick rpciod, put the call on the wire. */
+
+	if (rpciod_up() != 0) {
+		dprintk("nfsd: couldn't start rpciod for callbacks!\n");
+		goto out_clnt;
+	}
+
+	/* the task holds a reference to the nfs4_client struct */
+	cb->cb_client = clnt;
+	atomic_inc(&clp->cl_count);
+
+	msg.rpc_cred = nfsd4_lookupcred(clp,0);
+	if (IS_ERR(msg.rpc_cred))
+		goto out_rpciod;
+	status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, NULL);
+	put_rpccred(msg.rpc_cred);
+
+	if (status != 0) {
+		dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n");
+		goto out_rpciod;
+	}
+	return;
+
+out_rpciod:
+	atomic_dec(&clp->cl_count);
+	rpciod_down();
+out_clnt:
+	rpc_shutdown_client(clnt);
+	goto out_err;
+out_xprt:
+	xprt_destroy(xprt);
+out_err:
+	dprintk("NFSD: warning: no callback path to client %.*s\n",
+		(int)clp->cl_name.len, clp->cl_name.data);
+	cb->cb_client = NULL;
+}
+
+static void
+nfs4_cb_null(struct rpc_task *task)
+{
+	struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
+	struct nfs4_callback *cb = &clp->cl_callback;
+	u32 addr = htonl(cb->cb_addr);
+
+	dprintk("NFSD: nfs4_cb_null task->tk_status %d\n", task->tk_status);
+
+	if (task->tk_status < 0) {
+		dprintk("NFSD: callback establishment to client %.*s failed\n",
+			(int)clp->cl_name.len, clp->cl_name.data);
+		goto out;
+	}
+	atomic_set(&cb->cb_set, 1);
+	dprintk("NFSD: callback set to client %u.%u.%u.%u\n", NIPQUAD(addr));
+out:
+	put_nfs4_client(clp);
+}
+
+/*
+ * called with dp->dl_count inc'ed.
+ * nfs4_lock_state() may or may not have been called.
+ */
+void
+nfsd4_cb_recall(struct nfs4_delegation *dp)
+{
+	struct nfs4_client *clp = dp->dl_client;
+	struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+	struct nfs4_cb_recall *cbr = &dp->dl_recall;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
+		.rpc_argp = cbr,
+	};
+	int retries = 1;
+	int status = 0;
+
+	if ((!atomic_read(&clp->cl_callback.cb_set)) || !clnt)
+		return;
+
+	msg.rpc_cred = nfsd4_lookupcred(clp, 0);
+	if (IS_ERR(msg.rpc_cred))
+		goto out;
+
+	cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */
+	cbr->cbr_dp = dp;
+
+	status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
+	while (retries--) {
+		switch (status) {
+			case -EIO:
+				/* Network partition? */
+			case -EBADHANDLE:
+			case -NFS4ERR_BAD_STATEID:
+				/* Race: client probably got cb_recall
+				 * before open reply granting delegation */
+				break;
+			default:
+				goto out_put_cred;
+		}
+		ssleep(2);
+		status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
+	}
+out_put_cred:
+	put_rpccred(msg.rpc_cred);
+out:
+	if (status == -EIO)
+		atomic_set(&clp->cl_callback.cb_set, 0);
+	/* Success or failure, now we're either waiting for lease expiration
+	 * or deleg_return. */
+	dprintk("NFSD: nfs4_cb_recall: dp %p dl_flock %p dl_count %d\n",dp, dp->dl_flock, atomic_read(&dp->dl_count));
+	nfs4_put_delegation(dp);
+	return;
+}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
new file mode 100644
index 0000000..4ba5408
--- /dev/null
+++ b/fs/nfsd/nfs4idmap.c
@@ -0,0 +1,588 @@
+/*
+ *  fs/nfsd/nfs4idmap.c
+ *
+ *  Mapping of UID/GIDs to name and vice versa.
+ *
+ *  Copyright (c) 2002, 2003 The Regents of the University of
+ *  Michigan.  All rights reserved.
+ *
+ *  Marius Aamodt Eriksen <marius@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/smp_lock.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/nfsd_idmap.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+#include <linux/sunrpc/svcauth.h>
+
+/*
+ * Cache entry
+ */
+
+/*
+ * XXX we know that IDMAP_NAMESZ < PAGE_SIZE, but it's ugly to rely on
+ * that.
+ */
+
+#define IDMAP_TYPE_USER  0
+#define IDMAP_TYPE_GROUP 1
+
+struct ent {
+	struct cache_head h;
+	int               type;		       /* User / Group */
+	uid_t             id;
+	char              name[IDMAP_NAMESZ];
+	char              authname[IDMAP_NAMESZ];
+};
+
+#define DefineSimpleCacheLookupMap(STRUCT, FUNC)			\
+        DefineCacheLookup(struct STRUCT, h, FUNC##_lookup,		\
+        (struct STRUCT *item, int set), /*no setup */,			\
+	& FUNC##_cache, FUNC##_hash(item), FUNC##_match(item, tmp),	\
+	STRUCT##_init(new, item), STRUCT##_update(tmp, item), 0)
+
+/* Common entry handling */
+
+#define ENT_HASHBITS          8
+#define ENT_HASHMAX           (1 << ENT_HASHBITS)
+#define ENT_HASHMASK          (ENT_HASHMAX - 1)
+
+static inline void
+ent_init(struct ent *new, struct ent *itm)
+{
+	new->id = itm->id;
+	new->type = itm->type;
+
+	strlcpy(new->name, itm->name, sizeof(new->name));
+	strlcpy(new->authname, itm->authname, sizeof(new->name));
+}
+
+static inline void
+ent_update(struct ent *new, struct ent *itm)
+{
+	ent_init(new, itm);
+}
+
+void
+ent_put(struct cache_head *ch, struct cache_detail *cd)
+{
+	if (cache_put(ch, cd)) {
+		struct ent *map = container_of(ch, struct ent, h);
+		kfree(map);
+	}
+}
+
+/*
+ * ID -> Name cache
+ */
+
+static struct cache_head *idtoname_table[ENT_HASHMAX];
+
+static uint32_t
+idtoname_hash(struct ent *ent)
+{
+	uint32_t hash;
+
+	hash = hash_str(ent->authname, ENT_HASHBITS);
+	hash = hash_long(hash ^ ent->id, ENT_HASHBITS);
+
+	/* Flip LSB for user/group */
+	if (ent->type == IDMAP_TYPE_GROUP)
+		hash ^= 1;
+
+	return hash;
+}
+
+static void
+idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
+    int *blen)
+{
+ 	struct ent *ent = container_of(ch, struct ent, h);
+	char idstr[11];
+
+	qword_add(bpp, blen, ent->authname);
+	snprintf(idstr, sizeof(idstr), "%d", ent->id);
+	qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user");
+	qword_add(bpp, blen, idstr);
+
+	(*bpp)[-1] = '\n';
+}
+
+static inline int
+idtoname_match(struct ent *a, struct ent *b)
+{
+	return (a->id == b->id && a->type == b->type &&
+	    strcmp(a->authname, b->authname) == 0);
+}
+
+static int
+idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
+{
+	struct ent *ent;
+
+	if (h == NULL) {
+		seq_puts(m, "#domain type id [name]\n");
+		return 0;
+	}
+	ent = container_of(h, struct ent, h);
+	seq_printf(m, "%s %s %d", ent->authname,
+			ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
+			ent->id);
+	if (test_bit(CACHE_VALID, &h->flags))
+		seq_printf(m, " %s", ent->name);
+	seq_printf(m, "\n");
+	return 0;
+}
+
+static void
+warn_no_idmapd(struct cache_detail *detail)
+{
+	printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n",
+			detail->last_close? "died" : "not been started");
+}
+
+
+static int         idtoname_parse(struct cache_detail *, char *, int);
+static struct ent *idtoname_lookup(struct ent *, int);
+
+struct cache_detail idtoname_cache = {
+	.hash_size	= ENT_HASHMAX,
+	.hash_table	= idtoname_table,
+	.name		= "nfs4.idtoname",
+	.cache_put	= ent_put,
+	.cache_request	= idtoname_request,
+	.cache_parse	= idtoname_parse,
+	.cache_show	= idtoname_show,
+	.warn_no_listener = warn_no_idmapd,
+};
+
+int
+idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
+{
+	struct ent ent, *res;
+	char *buf1, *bp;
+	int error = -EINVAL;
+
+	if (buf[buflen - 1] != '\n')
+		return (-EINVAL);
+	buf[buflen - 1]= '\0';
+
+	buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf1 == NULL)
+		return (-ENOMEM);
+
+	memset(&ent, 0, sizeof(ent));
+
+	/* Authentication name */
+	if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
+		goto out;
+	memcpy(ent.authname, buf1, sizeof(ent.authname));
+
+	/* Type */
+	if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
+		goto out;
+	ent.type = strcmp(buf1, "user") == 0 ?
+		IDMAP_TYPE_USER : IDMAP_TYPE_GROUP;
+
+	/* ID */
+	if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
+		goto out;
+	ent.id = simple_strtoul(buf1, &bp, 10);
+	if (bp == buf1)
+		goto out;
+
+	/* expiry */
+	ent.h.expiry_time = get_expiry(&buf);
+	if (ent.h.expiry_time == 0)
+		goto out;
+
+	/* Name */
+	error = qword_get(&buf, buf1, PAGE_SIZE);
+	if (error == -EINVAL)
+		goto out;
+	if (error == -ENOENT)
+		set_bit(CACHE_NEGATIVE, &ent.h.flags);
+	else {
+		if (error >= IDMAP_NAMESZ) {
+			error = -EINVAL;
+			goto out;
+		}
+		memcpy(ent.name, buf1, sizeof(ent.name));
+	}
+	error = -ENOMEM;
+	if ((res = idtoname_lookup(&ent, 1)) == NULL)
+		goto out;
+
+	ent_put(&res->h, &idtoname_cache);
+
+	error = 0;
+out:
+	kfree(buf1);
+
+	return error;
+}
+
+static DefineSimpleCacheLookupMap(ent, idtoname);
+
+/*
+ * Name -> ID cache
+ */
+
+static struct cache_head *nametoid_table[ENT_HASHMAX];
+
+static inline int
+nametoid_hash(struct ent *ent)
+{
+	return hash_str(ent->name, ENT_HASHBITS);
+}
+
+void
+nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
+    int *blen)
+{
+ 	struct ent *ent = container_of(ch, struct ent, h);
+
+	qword_add(bpp, blen, ent->authname);
+	qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user");
+	qword_add(bpp, blen, ent->name);
+
+	(*bpp)[-1] = '\n';
+}
+
+static inline int
+nametoid_match(struct ent *a, struct ent *b)
+{
+	return (a->type == b->type && strcmp(a->name, b->name) == 0 &&
+	    strcmp(a->authname, b->authname) == 0);
+}
+
+static int
+nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
+{
+	struct ent *ent;
+
+	if (h == NULL) {
+		seq_puts(m, "#domain type name [id]\n");
+		return 0;
+	}
+	ent = container_of(h, struct ent, h);
+	seq_printf(m, "%s %s %s", ent->authname,
+			ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
+			ent->name);
+	if (test_bit(CACHE_VALID, &h->flags))
+		seq_printf(m, " %d", ent->id);
+	seq_printf(m, "\n");
+	return 0;
+}
+
+static struct ent *nametoid_lookup(struct ent *, int);
+int                nametoid_parse(struct cache_detail *, char *, int);
+
+struct cache_detail nametoid_cache = {
+	.hash_size	= ENT_HASHMAX,
+	.hash_table	= nametoid_table,
+	.name		= "nfs4.nametoid",
+	.cache_put	= ent_put,
+	.cache_request	= nametoid_request,
+	.cache_parse	= nametoid_parse,
+	.cache_show	= nametoid_show,
+	.warn_no_listener = warn_no_idmapd,
+};
+
+int
+nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
+{
+	struct ent ent, *res;
+	char *buf1;
+	int error = -EINVAL;
+
+	if (buf[buflen - 1] != '\n')
+		return (-EINVAL);
+	buf[buflen - 1]= '\0';
+
+	buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf1 == NULL)
+		return (-ENOMEM);
+
+	memset(&ent, 0, sizeof(ent));
+
+	/* Authentication name */
+	if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
+		goto out;
+	memcpy(ent.authname, buf1, sizeof(ent.authname));
+
+	/* Type */
+	if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
+		goto out;
+	ent.type = strcmp(buf1, "user") == 0 ?
+		IDMAP_TYPE_USER : IDMAP_TYPE_GROUP;
+
+	/* Name */
+	error = qword_get(&buf, buf1, PAGE_SIZE);
+	if (error <= 0 || error >= IDMAP_NAMESZ)
+		goto out;
+	memcpy(ent.name, buf1, sizeof(ent.name));
+
+	/* expiry */
+	ent.h.expiry_time = get_expiry(&buf);
+	if (ent.h.expiry_time == 0)
+		goto out;
+
+	/* ID */
+	error = get_int(&buf, &ent.id);
+	if (error == -EINVAL)
+		goto out;
+	if (error == -ENOENT)
+		set_bit(CACHE_NEGATIVE, &ent.h.flags);
+
+	error = -ENOMEM;
+	if ((res = nametoid_lookup(&ent, 1)) == NULL)
+		goto out;
+
+	ent_put(&res->h, &nametoid_cache);
+	error = 0;
+out:
+	kfree(buf1);
+
+	return (error);
+}
+
+static DefineSimpleCacheLookupMap(ent, nametoid);
+
+/*
+ * Exported API
+ */
+
+void
+nfsd_idmap_init(void)
+{
+	cache_register(&idtoname_cache);
+	cache_register(&nametoid_cache);
+}
+
+void
+nfsd_idmap_shutdown(void)
+{
+	cache_unregister(&idtoname_cache);
+	cache_unregister(&nametoid_cache);
+}
+
+/*
+ * Deferred request handling
+ */
+
+struct idmap_defer_req {
+       struct cache_req		req;
+       struct cache_deferred_req deferred_req;
+       wait_queue_head_t	waitq;
+       atomic_t			count;
+};
+
+static inline void
+put_mdr(struct idmap_defer_req *mdr)
+{
+	if (atomic_dec_and_test(&mdr->count))
+		kfree(mdr);
+}
+
+static inline void
+get_mdr(struct idmap_defer_req *mdr)
+{
+	atomic_inc(&mdr->count);
+}
+
+static void
+idmap_revisit(struct cache_deferred_req *dreq, int toomany)
+{
+	struct idmap_defer_req *mdr =
+		container_of(dreq, struct idmap_defer_req, deferred_req);
+
+	wake_up(&mdr->waitq);
+	put_mdr(mdr);
+}
+
+static struct cache_deferred_req *
+idmap_defer(struct cache_req *req)
+{
+	struct idmap_defer_req *mdr =
+		container_of(req, struct idmap_defer_req, req);
+
+	mdr->deferred_req.revisit = idmap_revisit;
+	get_mdr(mdr);
+	return (&mdr->deferred_req);
+}
+
+static inline int
+do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *, int), struct ent *key,
+		struct cache_detail *detail, struct ent **item,
+		struct idmap_defer_req *mdr)
+{
+	*item = lookup_fn(key, 0);
+	if (!*item)
+		return -ENOMEM;
+	return cache_check(detail, &(*item)->h, &mdr->req);
+}
+
+static inline int
+do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *, int),
+			struct ent *key, struct cache_detail *detail,
+			struct ent **item)
+{
+	int ret = -ENOMEM;
+
+	*item = lookup_fn(key, 0);
+	if (!*item)
+		goto out_err;
+	ret = -ETIMEDOUT;
+	if (!test_bit(CACHE_VALID, &(*item)->h.flags)
+			|| (*item)->h.expiry_time < get_seconds()
+			|| detail->flush_time > (*item)->h.last_refresh)
+		goto out_put;
+	ret = -ENOENT;
+	if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
+		goto out_put;
+	return 0;
+out_put:
+	ent_put(&(*item)->h, detail);
+out_err:
+	*item = NULL;
+	return ret;
+}
+
+static int
+idmap_lookup(struct svc_rqst *rqstp,
+		struct ent *(*lookup_fn)(struct ent *, int), struct ent *key,
+		struct cache_detail *detail, struct ent **item)
+{
+	struct idmap_defer_req *mdr;
+	int ret;
+
+	mdr = kmalloc(sizeof(*mdr), GFP_KERNEL);
+	if (!mdr)
+		return -ENOMEM;
+	memset(mdr, 0, sizeof(*mdr));
+	atomic_set(&mdr->count, 1);
+	init_waitqueue_head(&mdr->waitq);
+	mdr->req.defer = idmap_defer;
+	ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
+	if (ret == -EAGAIN) {
+		wait_event_interruptible_timeout(mdr->waitq,
+			test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
+		ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
+	}
+	put_mdr(mdr);
+	return ret;
+}
+
+static int
+idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
+		uid_t *id)
+{
+	struct ent *item, key = {
+		.type = type,
+	};
+	int ret;
+
+	if (namelen + 1 > sizeof(key.name))
+		return -EINVAL;
+	memcpy(key.name, name, namelen);
+	key.name[namelen] = '\0';
+	strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
+	if (ret == -ENOENT)
+		ret = -ESRCH; /* nfserr_badname */
+	if (ret)
+		return ret;
+	*id = item->id;
+	ent_put(&item->h, &nametoid_cache);
+	return 0;
+}
+
+static int
+idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
+{
+	struct ent *item, key = {
+		.id = id,
+		.type = type,
+	};
+	int ret;
+
+	strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+	ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
+	if (ret == -ENOENT)
+		return sprintf(name, "%u", id);
+	if (ret)
+		return ret;
+	ret = strlen(item->name);
+	BUG_ON(ret > IDMAP_NAMESZ);
+	memcpy(name, item->name, ret);
+	ent_put(&item->h, &idtoname_cache);
+	return ret;
+}
+
+int
+nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
+		__u32 *id)
+{
+	return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
+}
+
+int
+nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
+		__u32 *id)
+{
+	return idmap_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
+}
+
+int
+nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
+{
+	return idmap_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
+}
+
+int
+nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
+{
+	return idmap_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
+}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
new file mode 100644
index 0000000..e815874
--- /dev/null
+++ b/fs/nfsd/nfs4proc.c
@@ -0,0 +1,984 @@
+/*
+ *  fs/nfsd/nfs4proc.c
+ *
+ *  Server-side procedures for NFSv4.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *  Andy Adamson   <andros@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Note: some routines in this file are just trivial wrappers
+ * (e.g. nfsd4_lookup()) defined solely for the sake of consistent
+ * naming.  Since all such routines have been declared "inline",
+ * there shouldn't be any associated overhead.  At some point in
+ * the future, I might inline these "by hand" to clean up a
+ * little.
+ */
+
+#include <linux/param.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfs4.h>
+#include <linux/nfsd/state.h>
+#include <linux/nfsd/xdr4.h>
+#include <linux/nfs4_acl.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_PROC
+
+static inline void
+fh_dup2(struct svc_fh *dst, struct svc_fh *src)
+{
+	fh_put(dst);
+	dget(src->fh_dentry);
+	if (src->fh_export)
+		cache_get(&src->fh_export->h);
+	*dst = *src;
+}
+
+static int
+do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+{
+	int accmode, status;
+
+	if (open->op_truncate &&
+		!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
+		return nfserr_inval;
+
+	accmode = MAY_NOP;
+	if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
+		accmode = MAY_READ;
+	if (open->op_share_deny & NFS4_SHARE_ACCESS_WRITE)
+		accmode |= (MAY_WRITE | MAY_TRUNC);
+	accmode |= MAY_OWNER_OVERRIDE;
+
+	status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
+
+	return status;
+}
+
+static int
+do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+{
+	struct svc_fh resfh;
+	int status;
+
+	fh_init(&resfh, NFS4_FHSIZE);
+	open->op_truncate = 0;
+
+	if (open->op_create) {
+		/*
+		 * Note: create modes (UNCHECKED,GUARDED...) are the same
+		 * in NFSv4 as in v3.
+		 */
+		status = nfsd_create_v3(rqstp, current_fh, open->op_fname.data,
+					open->op_fname.len, &open->op_iattr,
+					&resfh, open->op_createmode,
+					(u32 *)open->op_verf.data, &open->op_truncate);
+	}
+	else {
+		status = nfsd_lookup(rqstp, current_fh,
+				     open->op_fname.data, open->op_fname.len, &resfh);
+		fh_unlock(current_fh);
+	}
+
+	if (!status) {
+		set_change_info(&open->op_cinfo, current_fh);
+
+		/* set reply cache */
+		fh_dup2(current_fh, &resfh);
+		open->op_stateowner->so_replay.rp_openfh_len =
+			resfh.fh_handle.fh_size;
+		memcpy(open->op_stateowner->so_replay.rp_openfh,
+				&resfh.fh_handle.fh_base,
+				resfh.fh_handle.fh_size);
+
+		status = do_open_permission(rqstp, current_fh, open);
+	}
+
+	fh_put(&resfh);
+	return status;
+}
+
+static int
+do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+{
+	int status;
+
+	/* Only reclaims from previously confirmed clients are valid */
+	if ((status = nfs4_check_open_reclaim(&open->op_clientid)))
+		return status;
+
+	/* We don't know the target directory, and therefore can not
+	* set the change info
+	*/
+
+	memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
+
+	/* set replay cache */
+	open->op_stateowner->so_replay.rp_openfh_len = current_fh->fh_handle.fh_size;
+	memcpy(open->op_stateowner->so_replay.rp_openfh,
+		&current_fh->fh_handle.fh_base,
+		current_fh->fh_handle.fh_size);
+
+	open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
+		(open->op_iattr.ia_size == 0);
+
+	status = do_open_permission(rqstp, current_fh, open);
+
+	return status;
+}
+
+
+static inline int
+nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+{
+	int status;
+	dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
+		(int)open->op_fname.len, open->op_fname.data,
+		open->op_stateowner);
+
+	if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+		return nfserr_grace;
+
+	if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+		return nfserr_no_grace;
+
+	/* This check required by spec. */
+	if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
+		return nfserr_inval;
+
+	nfs4_lock_state();
+
+	/* check seqid for replay. set nfs4_owner */
+	status = nfsd4_process_open1(open);
+	if (status == NFSERR_REPLAY_ME) {
+		struct nfs4_replay *rp = &open->op_stateowner->so_replay;
+		fh_put(current_fh);
+		current_fh->fh_handle.fh_size = rp->rp_openfh_len;
+		memcpy(&current_fh->fh_handle.fh_base, rp->rp_openfh,
+				rp->rp_openfh_len);
+		status = fh_verify(rqstp, current_fh, 0, MAY_NOP);
+		if (status)
+			dprintk("nfsd4_open: replay failed"
+				" restoring previous filehandle\n");
+		else
+			status = NFSERR_REPLAY_ME;
+	}
+	if (status)
+		goto out;
+	switch (open->op_claim_type) {
+		case NFS4_OPEN_CLAIM_NULL:
+			/*
+			 * (1) set CURRENT_FH to the file being opened,
+			 * creating it if necessary, (2) set open->op_cinfo,
+			 * (3) set open->op_truncate if the file is to be
+			 * truncated after opening, (4) do permission checking.
+			 */
+			status = do_open_lookup(rqstp, current_fh, open);
+			if (status)
+				goto out;
+			break;
+		case NFS4_OPEN_CLAIM_PREVIOUS:
+			/*
+			 * The CURRENT_FH is already set to the file being
+			 * opened.  (1) set open->op_cinfo, (2) set
+			 * open->op_truncate if the file is to be truncated
+			 * after opening, (3) do permission checking.
+			*/
+			status = do_open_fhandle(rqstp, current_fh, open);
+			if (status)
+				goto out;
+			break;
+		case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+             	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+			printk("NFSD: unsupported OPEN claim type %d\n",
+				open->op_claim_type);
+			status = nfserr_notsupp;
+			goto out;
+		default:
+			printk("NFSD: Invalid OPEN claim type %d\n",
+				open->op_claim_type);
+			status = nfserr_inval;
+			goto out;
+	}
+	/*
+	 * nfsd4_process_open2() does the actual opening of the file.  If
+	 * successful, it (1) truncates the file if open->op_truncate was
+	 * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
+	 */
+	status = nfsd4_process_open2(rqstp, current_fh, open);
+out:
+	if (open->op_stateowner)
+		nfs4_get_stateowner(open->op_stateowner);
+	nfs4_unlock_state();
+	return status;
+}
+
+/*
+ * filehandle-manipulating ops.
+ */
+static inline int
+nfsd4_getfh(struct svc_fh *current_fh, struct svc_fh **getfh)
+{
+	if (!current_fh->fh_dentry)
+		return nfserr_nofilehandle;
+
+	*getfh = current_fh;
+	return nfs_ok;
+}
+
+static inline int
+nfsd4_putfh(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_putfh *putfh)
+{
+	fh_put(current_fh);
+	current_fh->fh_handle.fh_size = putfh->pf_fhlen;
+	memcpy(&current_fh->fh_handle.fh_base, putfh->pf_fhval, putfh->pf_fhlen);
+	return fh_verify(rqstp, current_fh, 0, MAY_NOP);
+}
+
+static inline int
+nfsd4_putrootfh(struct svc_rqst *rqstp, struct svc_fh *current_fh)
+{
+	int status;
+
+	fh_put(current_fh);
+	status = exp_pseudoroot(rqstp->rq_client, current_fh,
+			      &rqstp->rq_chandle);
+	if (!status)
+		status = nfserrno(nfsd_setuser(rqstp, current_fh->fh_export));
+	return status;
+}
+
+static inline int
+nfsd4_restorefh(struct svc_fh *current_fh, struct svc_fh *save_fh)
+{
+	if (!save_fh->fh_dentry)
+		return nfserr_restorefh;
+
+	fh_dup2(current_fh, save_fh);
+	return nfs_ok;
+}
+
+static inline int
+nfsd4_savefh(struct svc_fh *current_fh, struct svc_fh *save_fh)
+{
+	if (!current_fh->fh_dentry)
+		return nfserr_nofilehandle;
+
+	fh_dup2(save_fh, current_fh);
+	return nfs_ok;
+}
+
+/*
+ * misc nfsv4 ops
+ */
+static inline int
+nfsd4_access(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_access *access)
+{
+	if (access->ac_req_access & ~NFS3_ACCESS_FULL)
+		return nfserr_inval;
+
+	access->ac_resp_access = access->ac_req_access;
+	return nfsd_access(rqstp, current_fh, &access->ac_resp_access, &access->ac_supported);
+}
+
+static inline int
+nfsd4_commit(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_commit *commit)
+{
+	int status;
+
+	u32 *p = (u32 *)commit->co_verf.data;
+	*p++ = nfssvc_boot.tv_sec;
+	*p++ = nfssvc_boot.tv_usec;
+
+	status = nfsd_commit(rqstp, current_fh, commit->co_offset, commit->co_count);
+	if (status == nfserr_symlink)
+		status = nfserr_inval;
+	return status;
+}
+
+static int
+nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_create *create)
+{
+	struct svc_fh resfh;
+	int status;
+	dev_t rdev;
+
+	fh_init(&resfh, NFS4_FHSIZE);
+
+	status = fh_verify(rqstp, current_fh, S_IFDIR, MAY_CREATE);
+	if (status == nfserr_symlink)
+		status = nfserr_notdir;
+	if (status)
+		return status;
+
+	switch (create->cr_type) {
+	case NF4LNK:
+		/* ugh! we have to null-terminate the linktext, or
+		 * vfs_symlink() will choke.  it is always safe to
+		 * null-terminate by brute force, since at worst we
+		 * will overwrite the first byte of the create namelen
+		 * in the XDR buffer, which has already been extracted
+		 * during XDR decode.
+		 */
+		create->cr_linkname[create->cr_linklen] = 0;
+
+		status = nfsd_symlink(rqstp, current_fh, create->cr_name,
+				      create->cr_namelen, create->cr_linkname,
+				      create->cr_linklen, &resfh, &create->cr_iattr);
+		break;
+
+	case NF4BLK:
+		rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+		if (MAJOR(rdev) != create->cr_specdata1 ||
+		    MINOR(rdev) != create->cr_specdata2)
+			return nfserr_inval;
+		status = nfsd_create(rqstp, current_fh, create->cr_name,
+				     create->cr_namelen, &create->cr_iattr,
+				     S_IFBLK, rdev, &resfh);
+		break;
+
+	case NF4CHR:
+		rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+		if (MAJOR(rdev) != create->cr_specdata1 ||
+		    MINOR(rdev) != create->cr_specdata2)
+			return nfserr_inval;
+		status = nfsd_create(rqstp, current_fh, create->cr_name,
+				     create->cr_namelen, &create->cr_iattr,
+				     S_IFCHR, rdev, &resfh);
+		break;
+
+	case NF4SOCK:
+		status = nfsd_create(rqstp, current_fh, create->cr_name,
+				     create->cr_namelen, &create->cr_iattr,
+				     S_IFSOCK, 0, &resfh);
+		break;
+
+	case NF4FIFO:
+		status = nfsd_create(rqstp, current_fh, create->cr_name,
+				     create->cr_namelen, &create->cr_iattr,
+				     S_IFIFO, 0, &resfh);
+		break;
+
+	case NF4DIR:
+		create->cr_iattr.ia_valid &= ~ATTR_SIZE;
+		status = nfsd_create(rqstp, current_fh, create->cr_name,
+				     create->cr_namelen, &create->cr_iattr,
+				     S_IFDIR, 0, &resfh);
+		break;
+
+	default:
+		status = nfserr_badtype;
+	}
+
+	if (!status) {
+		fh_unlock(current_fh);
+		set_change_info(&create->cr_cinfo, current_fh);
+		fh_dup2(current_fh, &resfh);
+	}
+
+	fh_put(&resfh);
+	return status;
+}
+
+static inline int
+nfsd4_getattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_getattr *getattr)
+{
+	int status;
+
+	status = fh_verify(rqstp, current_fh, 0, MAY_NOP);
+	if (status)
+		return status;
+
+	if (getattr->ga_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
+		return nfserr_inval;
+
+	getattr->ga_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0;
+	getattr->ga_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1;
+
+	getattr->ga_fhp = current_fh;
+	return nfs_ok;
+}
+
+static inline int
+nfsd4_link(struct svc_rqst *rqstp, struct svc_fh *current_fh,
+	   struct svc_fh *save_fh, struct nfsd4_link *link)
+{
+	int status = nfserr_nofilehandle;
+
+	if (!save_fh->fh_dentry)
+		return status;
+	status = nfsd_link(rqstp, current_fh, link->li_name, link->li_namelen, save_fh);
+	if (!status)
+		set_change_info(&link->li_cinfo, current_fh);
+	return status;
+}
+
+static int
+nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh)
+{
+	struct svc_fh tmp_fh;
+	int ret;
+
+	fh_init(&tmp_fh, NFS4_FHSIZE);
+	if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh,
+			      &rqstp->rq_chandle)) != 0)
+		return ret;
+	if (tmp_fh.fh_dentry == current_fh->fh_dentry) {
+		fh_put(&tmp_fh);
+		return nfserr_noent;
+	}
+	fh_put(&tmp_fh);
+	return nfsd_lookup(rqstp, current_fh, "..", 2, current_fh);
+}
+
+static inline int
+nfsd4_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lookup *lookup)
+{
+	return nfsd_lookup(rqstp, current_fh, lookup->lo_name, lookup->lo_len, current_fh);
+}
+
+static inline int
+nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read *read)
+{
+	int status;
+	struct file *filp = NULL;
+
+	/* no need to check permission - this will be done in nfsd_read() */
+
+	if (read->rd_offset >= OFFSET_MAX)
+		return nfserr_inval;
+
+	nfs4_lock_state();
+	/* check stateid */
+	if ((status = nfs4_preprocess_stateid_op(current_fh, &read->rd_stateid,
+					CHECK_FH | RD_STATE, &filp))) {
+		dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
+		goto out;
+	}
+	status = nfs_ok;
+out:
+	nfs4_unlock_state();
+	read->rd_rqstp = rqstp;
+	read->rd_fhp = current_fh;
+	read->rd_filp = filp;
+	return status;
+}
+
+static inline int
+nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readdir *readdir)
+{
+	u64 cookie = readdir->rd_cookie;
+	static const nfs4_verifier zeroverf;
+
+	/* no need to check permission - this will be done in nfsd_readdir() */
+
+	if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
+		return nfserr_inval;
+
+	readdir->rd_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0;
+	readdir->rd_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1;
+
+	if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
+	    (cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
+		return nfserr_bad_cookie;
+
+	readdir->rd_rqstp = rqstp;
+	readdir->rd_fhp = current_fh;
+	return nfs_ok;
+}
+
+static inline int
+nfsd4_readlink(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readlink *readlink)
+{
+	readlink->rl_rqstp = rqstp;
+	readlink->rl_fhp = current_fh;
+	return nfs_ok;
+}
+
+static inline int
+nfsd4_remove(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_remove *remove)
+{
+	int status;
+
+	status = nfsd_unlink(rqstp, current_fh, 0, remove->rm_name, remove->rm_namelen);
+	if (status == nfserr_symlink)
+		return nfserr_notdir;
+	if (!status) {
+		fh_unlock(current_fh);
+		set_change_info(&remove->rm_cinfo, current_fh);
+	}
+	return status;
+}
+
+static inline int
+nfsd4_rename(struct svc_rqst *rqstp, struct svc_fh *current_fh,
+	     struct svc_fh *save_fh, struct nfsd4_rename *rename)
+{
+	int status = nfserr_nofilehandle;
+
+	if (!save_fh->fh_dentry)
+		return status;
+	status = nfsd_rename(rqstp, save_fh, rename->rn_sname,
+			     rename->rn_snamelen, current_fh,
+			     rename->rn_tname, rename->rn_tnamelen);
+
+	/* the underlying filesystem returns different error's than required
+	 * by NFSv4. both save_fh and current_fh have been verified.. */
+	if (status == nfserr_isdir)
+		status = nfserr_exist;
+	else if ((status == nfserr_notdir) &&
+                  (S_ISDIR(save_fh->fh_dentry->d_inode->i_mode) &&
+                   S_ISDIR(current_fh->fh_dentry->d_inode->i_mode)))
+		status = nfserr_exist;
+	else if (status == nfserr_symlink)
+		status = nfserr_notdir;
+
+	if (!status) {
+		set_change_info(&rename->rn_sinfo, current_fh);
+		set_change_info(&rename->rn_tinfo, save_fh);
+	}
+	return status;
+}
+
+static inline int
+nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_setattr *setattr)
+{
+	int status = nfs_ok;
+
+	if (!current_fh->fh_dentry)
+		return nfserr_nofilehandle;
+
+	status = nfs_ok;
+	if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+		nfs4_lock_state();
+		if ((status = nfs4_preprocess_stateid_op(current_fh,
+						&setattr->sa_stateid,
+						CHECK_FH | WR_STATE, NULL))) {
+			dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
+			goto out_unlock;
+		}
+		nfs4_unlock_state();
+	}
+	status = nfs_ok;
+	if (setattr->sa_acl != NULL)
+		status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl);
+	if (status)
+		goto out;
+	status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr,
+				0, (time_t)0);
+out:
+	return status;
+out_unlock:
+	nfs4_unlock_state();
+	return status;
+}
+
+static inline int
+nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_write *write)
+{
+	stateid_t *stateid = &write->wr_stateid;
+	struct file *filp = NULL;
+	u32 *p;
+	int status = nfs_ok;
+
+	/* no need to check permission - this will be done in nfsd_write() */
+
+	if (write->wr_offset >= OFFSET_MAX)
+		return nfserr_inval;
+
+	nfs4_lock_state();
+	if ((status = nfs4_preprocess_stateid_op(current_fh, stateid,
+					CHECK_FH | WR_STATE, &filp))) {
+		dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+		goto out;
+	}
+	nfs4_unlock_state();
+
+	write->wr_bytes_written = write->wr_buflen;
+	write->wr_how_written = write->wr_stable_how;
+	p = (u32 *)write->wr_verifier.data;
+	*p++ = nfssvc_boot.tv_sec;
+	*p++ = nfssvc_boot.tv_usec;
+
+	status =  nfsd_write(rqstp, current_fh, filp, write->wr_offset,
+			write->wr_vec, write->wr_vlen, write->wr_buflen,
+			&write->wr_how_written);
+
+	if (status == nfserr_symlink)
+		status = nfserr_inval;
+	return status;
+out:
+	nfs4_unlock_state();
+	return status;
+}
+
+/* This routine never returns NFS_OK!  If there are no other errors, it
+ * will return NFSERR_SAME or NFSERR_NOT_SAME depending on whether the
+ * attributes matched.  VERIFY is implemented by mapping NFSERR_SAME
+ * to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
+ */
+static int
+nfsd4_verify(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_verify *verify)
+{
+	u32 *buf, *p;
+	int count;
+	int status;
+
+	status = fh_verify(rqstp, current_fh, 0, MAY_NOP);
+	if (status)
+		return status;
+
+	if ((verify->ve_bmval[0] & ~NFSD_SUPPORTED_ATTRS_WORD0)
+	    || (verify->ve_bmval[1] & ~NFSD_SUPPORTED_ATTRS_WORD1))
+		return nfserr_attrnotsupp;
+	if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
+	    || (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
+		return nfserr_inval;
+	if (verify->ve_attrlen & 3)
+		return nfserr_inval;
+
+	/* count in words:
+	 *   bitmap_len(1) + bitmap(2) + attr_len(1) = 4
+	 */
+	count = 4 + (verify->ve_attrlen >> 2);
+	buf = kmalloc(count << 2, GFP_KERNEL);
+	if (!buf)
+		return nfserr_resource;
+
+	status = nfsd4_encode_fattr(current_fh, current_fh->fh_export,
+				    current_fh->fh_dentry, buf,
+				    &count, verify->ve_bmval,
+				    rqstp);
+
+	/* this means that nfsd4_encode_fattr() ran out of space */
+	if (status == nfserr_resource && count == 0)
+		status = nfserr_not_same;
+	if (status)
+		goto out_kfree;
+
+	p = buf + 3;
+	status = nfserr_not_same;
+	if (ntohl(*p++) != verify->ve_attrlen)
+		goto out_kfree;
+	if (!memcmp(p, verify->ve_attrval, verify->ve_attrlen))
+		status = nfserr_same;
+
+out_kfree:
+	kfree(buf);
+	return status;
+}
+
+/*
+ * NULL call.
+ */
+static int
+nfsd4_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	return nfs_ok;
+}
+
+
+/*
+ * COMPOUND call.
+ */
+static int
+nfsd4_proc_compound(struct svc_rqst *rqstp,
+		    struct nfsd4_compoundargs *args,
+		    struct nfsd4_compoundres *resp)
+{
+	struct nfsd4_op	*op;
+	struct svc_fh	*current_fh = NULL;
+	struct svc_fh	*save_fh = NULL;
+	struct nfs4_stateowner *replay_owner = NULL;
+	int		slack_space;    /* in words, not bytes! */
+	int		status;
+
+	status = nfserr_resource;
+	current_fh = kmalloc(sizeof(*current_fh), GFP_KERNEL);
+	if (current_fh == NULL)
+		goto out;
+	fh_init(current_fh, NFS4_FHSIZE);
+	save_fh = kmalloc(sizeof(*save_fh), GFP_KERNEL);
+	if (save_fh == NULL)
+		goto out;
+	fh_init(save_fh, NFS4_FHSIZE);
+
+	resp->xbuf = &rqstp->rq_res;
+	resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len;
+	resp->tagp = resp->p;
+	/* reserve space for: taglen, tag, and opcnt */
+	resp->p += 2 + XDR_QUADLEN(args->taglen);
+	resp->end = rqstp->rq_res.head[0].iov_base + PAGE_SIZE;
+	resp->taglen = args->taglen;
+	resp->tag = args->tag;
+	resp->opcnt = 0;
+	resp->rqstp = rqstp;
+
+	/*
+	 * According to RFC3010, this takes precedence over all other errors.
+	 */
+	status = nfserr_minor_vers_mismatch;
+	if (args->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
+		goto out;
+
+	status = nfs_ok;
+	while (!status && resp->opcnt < args->opcnt) {
+		op = &args->ops[resp->opcnt++];
+
+		/*
+		 * The XDR decode routines may have pre-set op->status;
+		 * for example, if there is a miscellaneous XDR error
+		 * it will be set to nfserr_bad_xdr.
+		 */
+		if (op->status)
+			goto encode_op;
+
+		/* We must be able to encode a successful response to
+		 * this operation, with enough room left over to encode a
+		 * failed response to the next operation.  If we don't
+		 * have enough room, fail with ERR_RESOURCE.
+		 */
+/* FIXME - is slack_space *really* words, or bytes??? - neilb */
+		slack_space = (char *)resp->end - (char *)resp->p;
+		if (slack_space < COMPOUND_SLACK_SPACE + COMPOUND_ERR_SLACK_SPACE) {
+			BUG_ON(slack_space < COMPOUND_ERR_SLACK_SPACE);
+			op->status = nfserr_resource;
+			goto encode_op;
+		}
+
+		/* All operations except RENEW, SETCLIENTID, RESTOREFH
+		* SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH
+		* require a valid current filehandle
+		*
+		* SETATTR NOFILEHANDLE error handled in nfsd4_setattr
+		* due to required returned bitmap argument
+		*/
+		if ((!current_fh->fh_dentry) &&
+		   !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) ||
+		   (op->opnum == OP_SETCLIENTID) ||
+		   (op->opnum == OP_SETCLIENTID_CONFIRM) ||
+		   (op->opnum == OP_RENEW) || (op->opnum == OP_RESTOREFH) ||
+		   (op->opnum == OP_RELEASE_LOCKOWNER) ||
+		   (op->opnum == OP_SETATTR))) {
+			op->status = nfserr_nofilehandle;
+			goto encode_op;
+		}
+		switch (op->opnum) {
+		case OP_ACCESS:
+			op->status = nfsd4_access(rqstp, current_fh, &op->u.access);
+			break;
+		case OP_CLOSE:
+			op->status = nfsd4_close(rqstp, current_fh, &op->u.close);
+			replay_owner = op->u.close.cl_stateowner;
+			break;
+		case OP_COMMIT:
+			op->status = nfsd4_commit(rqstp, current_fh, &op->u.commit);
+			break;
+		case OP_CREATE:
+			op->status = nfsd4_create(rqstp, current_fh, &op->u.create);
+			break;
+		case OP_DELEGRETURN:
+			op->status = nfsd4_delegreturn(rqstp, current_fh, &op->u.delegreturn);
+			break;
+		case OP_GETATTR:
+			op->status = nfsd4_getattr(rqstp, current_fh, &op->u.getattr);
+			break;
+		case OP_GETFH:
+			op->status = nfsd4_getfh(current_fh, &op->u.getfh);
+			break;
+		case OP_LINK:
+			op->status = nfsd4_link(rqstp, current_fh, save_fh, &op->u.link);
+			break;
+		case OP_LOCK:
+			op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock);
+			replay_owner = op->u.lock.lk_stateowner;
+			break;
+		case OP_LOCKT:
+			op->status = nfsd4_lockt(rqstp, current_fh, &op->u.lockt);
+			break;
+		case OP_LOCKU:
+			op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku);
+			replay_owner = op->u.locku.lu_stateowner;
+			break;
+		case OP_LOOKUP:
+			op->status = nfsd4_lookup(rqstp, current_fh, &op->u.lookup);
+			break;
+		case OP_LOOKUPP:
+			op->status = nfsd4_lookupp(rqstp, current_fh);
+			break;
+		case OP_NVERIFY:
+			op->status = nfsd4_verify(rqstp, current_fh, &op->u.nverify);
+			if (op->status == nfserr_not_same)
+				op->status = nfs_ok;
+			break;
+		case OP_OPEN:
+			op->status = nfsd4_open(rqstp, current_fh, &op->u.open);
+			replay_owner = op->u.open.op_stateowner;
+			break;
+		case OP_OPEN_CONFIRM:
+			op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm);
+			replay_owner = op->u.open_confirm.oc_stateowner;
+			break;
+		case OP_OPEN_DOWNGRADE:
+			op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade);
+			replay_owner = op->u.open_downgrade.od_stateowner;
+			break;
+		case OP_PUTFH:
+			op->status = nfsd4_putfh(rqstp, current_fh, &op->u.putfh);
+			break;
+		case OP_PUTROOTFH:
+			op->status = nfsd4_putrootfh(rqstp, current_fh);
+			break;
+		case OP_READ:
+			op->status = nfsd4_read(rqstp, current_fh, &op->u.read);
+			break;
+		case OP_READDIR:
+			op->status = nfsd4_readdir(rqstp, current_fh, &op->u.readdir);
+			break;
+		case OP_READLINK:
+			op->status = nfsd4_readlink(rqstp, current_fh, &op->u.readlink);
+			break;
+		case OP_REMOVE:
+			op->status = nfsd4_remove(rqstp, current_fh, &op->u.remove);
+			break;
+		case OP_RENAME:
+			op->status = nfsd4_rename(rqstp, current_fh, save_fh, &op->u.rename);
+			break;
+		case OP_RENEW:
+			op->status = nfsd4_renew(&op->u.renew);
+			break;
+		case OP_RESTOREFH:
+			op->status = nfsd4_restorefh(current_fh, save_fh);
+			break;
+		case OP_SAVEFH:
+			op->status = nfsd4_savefh(current_fh, save_fh);
+			break;
+		case OP_SETATTR:
+			op->status = nfsd4_setattr(rqstp, current_fh, &op->u.setattr);
+			break;
+		case OP_SETCLIENTID:
+			op->status = nfsd4_setclientid(rqstp, &op->u.setclientid);
+			break;
+		case OP_SETCLIENTID_CONFIRM:
+			op->status = nfsd4_setclientid_confirm(rqstp, &op->u.setclientid_confirm);
+			break;
+		case OP_VERIFY:
+			op->status = nfsd4_verify(rqstp, current_fh, &op->u.verify);
+			if (op->status == nfserr_same)
+				op->status = nfs_ok;
+			break;
+		case OP_WRITE:
+			op->status = nfsd4_write(rqstp, current_fh, &op->u.write);
+			break;
+		case OP_RELEASE_LOCKOWNER:
+			op->status = nfsd4_release_lockowner(rqstp, &op->u.release_lockowner);
+			break;
+		default:
+			BUG_ON(op->status == nfs_ok);
+			break;
+		}
+
+encode_op:
+		if (op->status == NFSERR_REPLAY_ME) {
+			op->replay = &replay_owner->so_replay;
+			nfsd4_encode_replay(resp, op);
+			status = op->status = op->replay->rp_status;
+		} else {
+			nfsd4_encode_operation(resp, op);
+			status = op->status;
+		}
+		if (replay_owner && (replay_owner != (void *)(-1))) {
+			nfs4_put_stateowner(replay_owner);
+			replay_owner = NULL;
+		}
+	}
+
+out:
+	nfsd4_release_compoundargs(args);
+	if (current_fh)
+		fh_put(current_fh);
+	kfree(current_fh);
+	if (save_fh)
+		fh_put(save_fh);
+	kfree(save_fh);
+	return status;
+}
+
+#define nfs4svc_decode_voidargs		NULL
+#define nfs4svc_release_void		NULL
+#define nfsd4_voidres			nfsd4_voidargs
+#define nfs4svc_release_compound	NULL
+struct nfsd4_voidargs { int dummy; };
+
+#define PROC(name, argt, rest, relt, cache, respsize)	\
+ { (svc_procfunc) nfsd4_proc_##name,		\
+   (kxdrproc_t) nfs4svc_decode_##argt##args,	\
+   (kxdrproc_t) nfs4svc_encode_##rest##res,	\
+   (kxdrproc_t) nfs4svc_release_##relt,		\
+   sizeof(struct nfsd4_##argt##args),		\
+   sizeof(struct nfsd4_##rest##res),		\
+   0,						\
+   cache,					\
+   respsize,					\
+ }
+
+/*
+ * TODO: At the present time, the NFSv4 server does not do XID caching
+ * of requests.  Implementing XID caching would not be a serious problem,
+ * although it would require a mild change in interfaces since one
+ * doesn't know whether an NFSv4 request is idempotent until after the
+ * XDR decode.  However, XID caching totally confuses pynfs (Peter
+ * Astrand's regression testsuite for NFSv4 servers), which reuses
+ * XID's liberally, so I've left it unimplemented until pynfs generates
+ * better XID's.
+ */
+static struct svc_procedure		nfsd_procedures4[2] = {
+  PROC(null,	 void,		void,		void,	  RC_NOCACHE, 1),
+  PROC(compound, compound,	compound,	compound, RC_NOCACHE, NFSD_BUFSIZE)
+};
+
+struct svc_version	nfsd_version4 = {
+		.vs_vers	= 4,
+		.vs_nproc	= 2,
+		.vs_proc	= nfsd_procedures4,
+		.vs_dispatch	= nfsd_dispatch,
+		.vs_xdrsize	= NFS4_SVC_XDRSIZE,
+};
+
+/*
+ * Local variables:
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
new file mode 100644
index 0000000..579f7fe
--- /dev/null
+++ b/fs/nfsd/nfs4state.c
@@ -0,0 +1,3320 @@
+/*
+*  linux/fs/nfsd/nfs4state.c
+*
+*  Copyright (c) 2001 The Regents of the University of Michigan.
+*  All rights reserved.
+*
+*  Kendrick Smith <kmsmith@umich.edu>
+*  Andy Adamson <kandros@umich.edu>
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions
+*  are met:
+*
+*  1. Redistributions of source code must retain the above copyright
+*     notice, this list of conditions and the following disclaimer.
+*  2. Redistributions in binary form must reproduce the above copyright
+*     notice, this list of conditions and the following disclaimer in the
+*     documentation and/or other materials provided with the distribution.
+*  3. Neither the name of the University nor the names of its
+*     contributors may be used to endorse or promote products derived
+*     from this software without specific prior written permission.
+*
+*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include <linux/param.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/mount.h>
+#include <linux/workqueue.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/nfs4.h>
+#include <linux/nfsd/state.h>
+#include <linux/nfsd/xdr4.h>
+
+#define NFSDDBG_FACILITY                NFSDDBG_PROC
+
+/* Globals */
+static time_t lease_time = 90;     /* default lease time */
+static time_t old_lease_time = 90; /* past incarnation lease time */
+static u32 nfs4_reclaim_init = 0;
+time_t boot_time;
+static time_t grace_end = 0;
+static u32 current_clientid = 1;
+static u32 current_ownerid = 1;
+static u32 current_fileid = 1;
+static u32 current_delegid = 1;
+static u32 nfs4_init;
+stateid_t zerostateid;             /* bits all 0 */
+stateid_t onestateid;              /* bits all 1 */
+
+/* debug counters */
+u32 list_add_perfile = 0; 
+u32 list_del_perfile = 0;
+u32 add_perclient = 0;
+u32 del_perclient = 0;
+u32 alloc_file = 0;
+u32 free_file = 0;
+u32 vfsopen = 0;
+u32 vfsclose = 0;
+u32 alloc_delegation= 0;
+u32 free_delegation= 0;
+
+/* forward declarations */
+struct nfs4_stateid * find_stateid(stateid_t *stid, int flags);
+static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid);
+static void release_stateid_lockowners(struct nfs4_stateid *open_stp);
+
+/* Locking:
+ *
+ * client_sema: 
+ * 	protects clientid_hashtbl[], clientstr_hashtbl[],
+ * 	unconfstr_hashtbl[], uncofid_hashtbl[].
+ */
+static DECLARE_MUTEX(client_sema);
+
+void
+nfs4_lock_state(void)
+{
+	down(&client_sema);
+}
+
+void
+nfs4_unlock_state(void)
+{
+	up(&client_sema);
+}
+
+static inline u32
+opaque_hashval(const void *ptr, int nbytes)
+{
+	unsigned char *cptr = (unsigned char *) ptr;
+
+	u32 x = 0;
+	while (nbytes--) {
+		x *= 37;
+		x += *cptr++;
+	}
+	return x;
+}
+
+/* forward declarations */
+static void release_stateowner(struct nfs4_stateowner *sop);
+static void release_stateid(struct nfs4_stateid *stp, int flags);
+static void release_file(struct nfs4_file *fp);
+
+/*
+ * Delegation state
+ */
+
+/* recall_lock protects the del_recall_lru */
+spinlock_t recall_lock;
+static struct list_head del_recall_lru;
+
+static struct nfs4_delegation *
+alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type)
+{
+	struct nfs4_delegation *dp;
+	struct nfs4_file *fp = stp->st_file;
+	struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
+
+	dprintk("NFSD alloc_init_deleg\n");
+	if ((dp = kmalloc(sizeof(struct nfs4_delegation),
+		GFP_KERNEL)) == NULL)
+		return dp;
+	INIT_LIST_HEAD(&dp->dl_del_perfile);
+	INIT_LIST_HEAD(&dp->dl_del_perclnt);
+	INIT_LIST_HEAD(&dp->dl_recall_lru);
+	dp->dl_client = clp;
+	dp->dl_file = fp;
+	dp->dl_flock = NULL;
+	get_file(stp->st_vfs_file);
+	dp->dl_vfs_file = stp->st_vfs_file;
+	dp->dl_type = type;
+	dp->dl_recall.cbr_dp = NULL;
+	dp->dl_recall.cbr_ident = cb->cb_ident;
+	dp->dl_recall.cbr_trunc = 0;
+	dp->dl_stateid.si_boot = boot_time;
+	dp->dl_stateid.si_stateownerid = current_delegid++;
+	dp->dl_stateid.si_fileid = 0;
+	dp->dl_stateid.si_generation = 0;
+	dp->dl_fhlen = current_fh->fh_handle.fh_size;
+	memcpy(dp->dl_fhval, &current_fh->fh_handle.fh_base,
+		        current_fh->fh_handle.fh_size);
+	dp->dl_time = 0;
+	atomic_set(&dp->dl_count, 1);
+	list_add(&dp->dl_del_perfile, &fp->fi_del_perfile);
+	list_add(&dp->dl_del_perclnt, &clp->cl_del_perclnt);
+	alloc_delegation++;
+	return dp;
+}
+
+void
+nfs4_put_delegation(struct nfs4_delegation *dp)
+{
+	if (atomic_dec_and_test(&dp->dl_count)) {
+		dprintk("NFSD: freeing dp %p\n",dp);
+		kfree(dp);
+		free_delegation++;
+	}
+}
+
+/* Remove the associated file_lock first, then remove the delegation.
+ * lease_modify() is called to remove the FS_LEASE file_lock from
+ * the i_flock list, eventually calling nfsd's lock_manager
+ * fl_release_callback.
+ */
+static void
+nfs4_close_delegation(struct nfs4_delegation *dp)
+{
+	struct file *filp = dp->dl_vfs_file;
+
+	dprintk("NFSD: close_delegation dp %p\n",dp);
+	dp->dl_vfs_file = NULL;
+	/* The following nfsd_close may not actually close the file,
+	 * but we want to remove the lease in any case. */
+	setlease(filp, F_UNLCK, &dp->dl_flock);
+	nfsd_close(filp);
+	vfsclose++;
+}
+
+/* Called under the state lock. */
+static void
+unhash_delegation(struct nfs4_delegation *dp)
+{
+	list_del_init(&dp->dl_del_perfile);
+	list_del_init(&dp->dl_del_perclnt);
+	spin_lock(&recall_lock);
+	list_del_init(&dp->dl_recall_lru);
+	spin_unlock(&recall_lock);
+	nfs4_close_delegation(dp);
+	nfs4_put_delegation(dp);
+}
+
+/* 
+ * SETCLIENTID state 
+ */
+
+/* Hash tables for nfs4_clientid state */
+#define CLIENT_HASH_BITS                 4
+#define CLIENT_HASH_SIZE                (1 << CLIENT_HASH_BITS)
+#define CLIENT_HASH_MASK                (CLIENT_HASH_SIZE - 1)
+
+#define clientid_hashval(id) \
+	((id) & CLIENT_HASH_MASK)
+#define clientstr_hashval(name, namelen) \
+	(opaque_hashval((name), (namelen)) & CLIENT_HASH_MASK)
+/*
+ * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
+ * used in reboot/reset lease grace period processing
+ *
+ * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
+ * setclientid_confirmed info. 
+ *
+ * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 
+ * setclientid info.
+ *
+ * client_lru holds client queue ordered by nfs4_client.cl_time
+ * for lease renewal.
+ *
+ * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
+ * for last close replay.
+ */
+static struct list_head	reclaim_str_hashtbl[CLIENT_HASH_SIZE];
+static int reclaim_str_hashtbl_size = 0;
+static struct list_head	conf_id_hashtbl[CLIENT_HASH_SIZE];
+static struct list_head	conf_str_hashtbl[CLIENT_HASH_SIZE];
+static struct list_head	unconf_str_hashtbl[CLIENT_HASH_SIZE];
+static struct list_head	unconf_id_hashtbl[CLIENT_HASH_SIZE];
+static struct list_head client_lru;
+static struct list_head close_lru;
+
+static inline void
+renew_client(struct nfs4_client *clp)
+{
+	/*
+	* Move client to the end to the LRU list.
+	*/
+	dprintk("renewing client (clientid %08x/%08x)\n", 
+			clp->cl_clientid.cl_boot, 
+			clp->cl_clientid.cl_id);
+	list_move_tail(&clp->cl_lru, &client_lru);
+	clp->cl_time = get_seconds();
+}
+
+/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
+static int
+STALE_CLIENTID(clientid_t *clid)
+{
+	if (clid->cl_boot == boot_time)
+		return 0;
+	dprintk("NFSD stale clientid (%08x/%08x)\n", 
+			clid->cl_boot, clid->cl_id);
+	return 1;
+}
+
+/* 
+ * XXX Should we use a slab cache ?
+ * This type of memory management is somewhat inefficient, but we use it
+ * anyway since SETCLIENTID is not a common operation.
+ */
+static inline struct nfs4_client *
+alloc_client(struct xdr_netobj name)
+{
+	struct nfs4_client *clp;
+
+	if ((clp = kmalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) {
+		memset(clp, 0, sizeof(*clp));
+		if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) {
+			memcpy(clp->cl_name.data, name.data, name.len);
+			clp->cl_name.len = name.len;
+		}
+		else {
+			kfree(clp);
+			clp = NULL;
+		}
+	}
+	return clp;
+}
+
+static inline void
+free_client(struct nfs4_client *clp)
+{
+	if (clp->cl_cred.cr_group_info)
+		put_group_info(clp->cl_cred.cr_group_info);
+	kfree(clp->cl_name.data);
+	kfree(clp);
+}
+
+void
+put_nfs4_client(struct nfs4_client *clp)
+{
+	if (atomic_dec_and_test(&clp->cl_count))
+		free_client(clp);
+}
+
+static void
+expire_client(struct nfs4_client *clp)
+{
+	struct nfs4_stateowner *sop;
+	struct nfs4_delegation *dp;
+	struct nfs4_callback *cb = &clp->cl_callback;
+	struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+	struct list_head reaplist;
+
+	dprintk("NFSD: expire_client cl_count %d\n",
+	                    atomic_read(&clp->cl_count));
+
+	/* shutdown rpc client, ending any outstanding recall rpcs */
+	if (atomic_read(&cb->cb_set) == 1 && clnt) {
+		rpc_shutdown_client(clnt);
+		clnt = clp->cl_callback.cb_client = NULL;
+	}
+
+	INIT_LIST_HEAD(&reaplist);
+	spin_lock(&recall_lock);
+	while (!list_empty(&clp->cl_del_perclnt)) {
+		dp = list_entry(clp->cl_del_perclnt.next, struct nfs4_delegation, dl_del_perclnt);
+		dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
+				dp->dl_flock);
+		list_del_init(&dp->dl_del_perclnt);
+		list_move(&dp->dl_recall_lru, &reaplist);
+	}
+	spin_unlock(&recall_lock);
+	while (!list_empty(&reaplist)) {
+		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+		list_del_init(&dp->dl_recall_lru);
+		unhash_delegation(dp);
+	}
+	list_del(&clp->cl_idhash);
+	list_del(&clp->cl_strhash);
+	list_del(&clp->cl_lru);
+	while (!list_empty(&clp->cl_perclient)) {
+		sop = list_entry(clp->cl_perclient.next, struct nfs4_stateowner, so_perclient);
+		release_stateowner(sop);
+	}
+	put_nfs4_client(clp);
+}
+
+static struct nfs4_client *
+create_client(struct xdr_netobj name) {
+	struct nfs4_client *clp;
+
+	if (!(clp = alloc_client(name)))
+		goto out;
+	atomic_set(&clp->cl_count, 1);
+	atomic_set(&clp->cl_callback.cb_set, 0);
+	clp->cl_callback.cb_parsed = 0;
+	INIT_LIST_HEAD(&clp->cl_idhash);
+	INIT_LIST_HEAD(&clp->cl_strhash);
+	INIT_LIST_HEAD(&clp->cl_perclient);
+	INIT_LIST_HEAD(&clp->cl_del_perclnt);
+	INIT_LIST_HEAD(&clp->cl_lru);
+out:
+	return clp;
+}
+
+static void
+copy_verf(struct nfs4_client *target, nfs4_verifier *source) {
+	memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data));
+}
+
+static void
+copy_clid(struct nfs4_client *target, struct nfs4_client *source) {
+	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
+	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
+}
+
+static void
+copy_cred(struct svc_cred *target, struct svc_cred *source) {
+
+	target->cr_uid = source->cr_uid;
+	target->cr_gid = source->cr_gid;
+	target->cr_group_info = source->cr_group_info;
+	get_group_info(target->cr_group_info);
+}
+
+static int
+cmp_name(struct xdr_netobj *n1, struct xdr_netobj *n2) {
+	if (!n1 || !n2)
+		return 0;
+	return((n1->len == n2->len) && !memcmp(n1->data, n2->data, n2->len));
+}
+
+static int
+cmp_verf(nfs4_verifier *v1, nfs4_verifier *v2) {
+	return(!memcmp(v1->data,v2->data,sizeof(v1->data)));
+}
+
+static int
+cmp_clid(clientid_t * cl1, clientid_t * cl2) {
+	return((cl1->cl_boot == cl2->cl_boot) &&
+	   	(cl1->cl_id == cl2->cl_id));
+}
+
+/* XXX what about NGROUP */
+static int
+cmp_creds(struct svc_cred *cr1, struct svc_cred *cr2){
+	return(cr1->cr_uid == cr2->cr_uid);
+
+}
+
+static void
+gen_clid(struct nfs4_client *clp) {
+	clp->cl_clientid.cl_boot = boot_time;
+	clp->cl_clientid.cl_id = current_clientid++; 
+}
+
+static void
+gen_confirm(struct nfs4_client *clp) {
+	struct timespec 	tv;
+	u32 *			p;
+
+	tv = CURRENT_TIME;
+	p = (u32 *)clp->cl_confirm.data;
+	*p++ = tv.tv_sec;
+	*p++ = tv.tv_nsec;
+}
+
+static int
+check_name(struct xdr_netobj name) {
+
+	if (name.len == 0) 
+		return 0;
+	if (name.len > NFS4_OPAQUE_LIMIT) {
+		printk("NFSD: check_name: name too long(%d)!\n", name.len);
+		return 0;
+	}
+	return 1;
+}
+
+void
+add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
+{
+	unsigned int idhashval;
+
+	list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
+	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
+	list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
+	list_add_tail(&clp->cl_lru, &client_lru);
+	clp->cl_time = get_seconds();
+}
+
+void
+move_to_confirmed(struct nfs4_client *clp)
+{
+	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
+	unsigned int strhashval;
+
+	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
+	list_del_init(&clp->cl_strhash);
+	list_del_init(&clp->cl_idhash);
+	list_add(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
+	strhashval = clientstr_hashval(clp->cl_name.data, 
+			clp->cl_name.len);
+	list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
+	renew_client(clp);
+}
+
+static struct nfs4_client *
+find_confirmed_client(clientid_t *clid)
+{
+	struct nfs4_client *clp;
+	unsigned int idhashval = clientid_hashval(clid->cl_id);
+
+	list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
+		if (cmp_clid(&clp->cl_clientid, clid))
+			return clp;
+	}
+	return NULL;
+}
+
+static struct nfs4_client *
+find_unconfirmed_client(clientid_t *clid)
+{
+	struct nfs4_client *clp;
+	unsigned int idhashval = clientid_hashval(clid->cl_id);
+
+	list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
+		if (cmp_clid(&clp->cl_clientid, clid))
+			return clp;
+	}
+	return NULL;
+}
+
+/* a helper function for parse_callback */
+static int
+parse_octet(unsigned int *lenp, char **addrp)
+{
+	unsigned int len = *lenp;
+	char *p = *addrp;
+	int n = -1;
+	char c;
+
+	for (;;) {
+		if (!len)
+			break;
+		len--;
+		c = *p++;
+		if (c == '.')
+			break;
+		if ((c < '0') || (c > '9')) {
+			n = -1;
+			break;
+		}
+		if (n < 0)
+			n = 0;
+		n = (n * 10) + (c - '0');
+		if (n > 255) {
+			n = -1;
+			break;
+		}
+	}
+	*lenp = len;
+	*addrp = p;
+	return n;
+}
+
+/* parse and set the setclientid ipv4 callback address */
+int
+parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp)
+{
+	int temp = 0;
+	u32 cbaddr = 0;
+	u16 cbport = 0;
+	u32 addrlen = addr_len;
+	char *addr = addr_val;
+	int i, shift;
+
+	/* ipaddress */
+	shift = 24;
+	for(i = 4; i > 0  ; i--) {
+		if ((temp = parse_octet(&addrlen, &addr)) < 0) {
+			return 0;
+		}
+		cbaddr |= (temp << shift);
+		if (shift > 0)
+		shift -= 8;
+	}
+	*cbaddrp = cbaddr;
+
+	/* port */
+	shift = 8;
+	for(i = 2; i > 0  ; i--) {
+		if ((temp = parse_octet(&addrlen, &addr)) < 0) {
+			return 0;
+		}
+		cbport |= (temp << shift);
+		if (shift > 0)
+			shift -= 8;
+	}
+	*cbportp = cbport;
+	return 1;
+}
+
+void
+gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
+{
+	struct nfs4_callback *cb = &clp->cl_callback;
+
+	/* Currently, we only support tcp for the callback channel */
+	if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
+		goto out_err;
+
+	if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
+	                 &cb->cb_addr, &cb->cb_port)))
+		goto out_err;
+	cb->cb_prog = se->se_callback_prog;
+	cb->cb_ident = se->se_callback_ident;
+	cb->cb_parsed = 1;
+	return;
+out_err:
+	printk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
+		"will not receive delegations\n",
+		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+
+	cb->cb_parsed = 0;
+	return;
+}
+
+/*
+ * RFC 3010 has a complex implmentation description of processing a 
+ * SETCLIENTID request consisting of 5 bullets, labeled as 
+ * CASE0 - CASE4 below.
+ *
+ * NOTES:
+ * 	callback information will be processed in a future patch
+ *
+ *	an unconfirmed record is added when:
+ *      NORMAL (part of CASE 4): there is no confirmed nor unconfirmed record.
+ *	CASE 1: confirmed record found with matching name, principal,
+ *		verifier, and clientid.
+ *	CASE 2: confirmed record found with matching name, principal,
+ *		and there is no unconfirmed record with matching
+ *		name and principal
+ *
+ *      an unconfirmed record is replaced when:
+ *	CASE 3: confirmed record found with matching name, principal,
+ *		and an unconfirmed record is found with matching 
+ *		name, principal, and with clientid and
+ *		confirm that does not match the confirmed record.
+ *	CASE 4: there is no confirmed record with matching name and 
+ *		principal. there is an unconfirmed record with 
+ *		matching name, principal.
+ *
+ *	an unconfirmed record is deleted when:
+ *	CASE 1: an unconfirmed record that matches input name, verifier,
+ *		and confirmed clientid.
+ *	CASE 4: any unconfirmed records with matching name and principal
+ *		that exist after an unconfirmed record has been replaced
+ *		as described above.
+ *
+ */
+int
+nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_setclientid *setclid)
+{
+	u32 			ip_addr = rqstp->rq_addr.sin_addr.s_addr;
+	struct xdr_netobj 	clname = { 
+		.len = setclid->se_namelen,
+		.data = setclid->se_name,
+	};
+	nfs4_verifier		clverifier = setclid->se_verf;
+	unsigned int 		strhashval;
+	struct nfs4_client *	conf, * unconf, * new, * clp;
+	int 			status;
+	
+	status = nfserr_inval;
+	if (!check_name(clname))
+		goto out;
+
+	/* 
+	 * XXX The Duplicate Request Cache (DRC) has been checked (??)
+	 * We get here on a DRC miss.
+	 */
+
+	strhashval = clientstr_hashval(clname.data, clname.len);
+
+	conf = NULL;
+	nfs4_lock_state();
+	list_for_each_entry(clp, &conf_str_hashtbl[strhashval], cl_strhash) {
+		if (!cmp_name(&clp->cl_name, &clname))
+			continue;
+		/* 
+		 * CASE 0:
+		 * clname match, confirmed, different principal
+		 * or different ip_address
+		 */
+		status = nfserr_clid_inuse;
+		if (!cmp_creds(&clp->cl_cred,&rqstp->rq_cred)) {
+			printk("NFSD: setclientid: string in use by client"
+			"(clientid %08x/%08x)\n",
+			clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+			goto out;
+		}
+		if (clp->cl_addr != ip_addr) { 
+			printk("NFSD: setclientid: string in use by client"
+			"(clientid %08x/%08x)\n",
+			clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+			goto out;
+		}
+
+		/* 
+	 	 * cl_name match from a previous SETCLIENTID operation
+	 	 * XXX check for additional matches?
+		 */
+		conf = clp;
+		break;
+	}
+	unconf = NULL;
+	list_for_each_entry(clp, &unconf_str_hashtbl[strhashval], cl_strhash) {
+		if (!cmp_name(&clp->cl_name, &clname))
+			continue;
+		/* cl_name match from a previous SETCLIENTID operation */
+		unconf = clp;
+		break;
+	}
+	status = nfserr_resource;
+	if (!conf) {
+		/* 
+		 * CASE 4:
+		 * placed first, because it is the normal case.
+		 */
+		if (unconf)
+			expire_client(unconf);
+		if (!(new = create_client(clname)))
+			goto out;
+		copy_verf(new, &clverifier);
+		new->cl_addr = ip_addr;
+		copy_cred(&new->cl_cred,&rqstp->rq_cred);
+		gen_clid(new);
+		gen_confirm(new);
+		gen_callback(new, setclid);
+		add_to_unconfirmed(new, strhashval);
+	} else if (cmp_verf(&conf->cl_verifier, &clverifier)) {
+		/*
+		 * CASE 1:
+		 * cl_name match, confirmed, principal match
+		 * verifier match: probable callback update
+		 *
+		 * remove any unconfirmed nfs4_client with 
+		 * matching cl_name, cl_verifier, and cl_clientid
+		 *
+		 * create and insert an unconfirmed nfs4_client with same 
+		 * cl_name, cl_verifier, and cl_clientid as existing 
+		 * nfs4_client,  but with the new callback info and a 
+		 * new cl_confirm
+		 */
+		if ((unconf) && 
+		    cmp_verf(&unconf->cl_verifier, &conf->cl_verifier) &&
+		     cmp_clid(&unconf->cl_clientid, &conf->cl_clientid)) {
+				expire_client(unconf);
+		}
+		if (!(new = create_client(clname)))
+			goto out;
+		copy_verf(new,&conf->cl_verifier);
+		new->cl_addr = ip_addr;
+		copy_cred(&new->cl_cred,&rqstp->rq_cred);
+		copy_clid(new, conf);
+		gen_confirm(new);
+		gen_callback(new, setclid);
+		add_to_unconfirmed(new,strhashval);
+	} else if (!unconf) {
+		/*
+		 * CASE 2:
+		 * clname match, confirmed, principal match
+		 * verfier does not match
+		 * no unconfirmed. create a new unconfirmed nfs4_client
+		 * using input clverifier, clname, and callback info
+		 * and generate a new cl_clientid and cl_confirm.
+		 */
+		if (!(new = create_client(clname)))
+			goto out;
+		copy_verf(new,&clverifier);
+		new->cl_addr = ip_addr;
+		copy_cred(&new->cl_cred,&rqstp->rq_cred);
+		gen_clid(new);
+		gen_confirm(new);
+		gen_callback(new, setclid);
+		add_to_unconfirmed(new, strhashval);
+	} else if (!cmp_verf(&conf->cl_confirm, &unconf->cl_confirm)) {
+		/*	
+		 * CASE3:
+		 * confirmed found (name, principal match)
+		 * confirmed verifier does not match input clverifier
+		 *
+		 * unconfirmed found (name match)
+		 * confirmed->cl_confirm != unconfirmed->cl_confirm
+		 *
+		 * remove unconfirmed.
+		 *
+		 * create an unconfirmed nfs4_client 
+		 * with same cl_name as existing confirmed nfs4_client, 
+		 * but with new callback info, new cl_clientid,
+		 * new cl_verifier and a new cl_confirm
+		 */
+		expire_client(unconf);
+		if (!(new = create_client(clname)))
+			goto out;
+		copy_verf(new,&clverifier);
+		new->cl_addr = ip_addr;
+		copy_cred(&new->cl_cred,&rqstp->rq_cred);
+		gen_clid(new);
+		gen_confirm(new);
+		gen_callback(new, setclid);
+		add_to_unconfirmed(new, strhashval);
+	} else {
+		/* No cases hit !!! */
+		status = nfserr_inval;
+		goto out;
+
+	}
+	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
+	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
+	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
+	status = nfs_ok;
+out:
+	nfs4_unlock_state();
+	return status;
+}
+
+
+/*
+ * RFC 3010 has a complex implmentation description of processing a 
+ * SETCLIENTID_CONFIRM request consisting of 4 bullets describing
+ * processing on a DRC miss, labeled as CASE1 - CASE4 below.
+ *
+ * NOTE: callback information will be processed here in a future patch
+ */
+int
+nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_setclientid_confirm *setclientid_confirm)
+{
+	u32 ip_addr = rqstp->rq_addr.sin_addr.s_addr;
+	struct nfs4_client *clp, *conf = NULL, *unconf = NULL;
+	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
+	clientid_t * clid = &setclientid_confirm->sc_clientid;
+	int status;
+
+	if (STALE_CLIENTID(clid))
+		return nfserr_stale_clientid;
+	/* 
+	 * XXX The Duplicate Request Cache (DRC) has been checked (??)
+	 * We get here on a DRC miss.
+	 */
+
+	nfs4_lock_state();
+	clp = find_confirmed_client(clid);
+	if (clp) {
+		status = nfserr_inval;
+		/* 
+		 * Found a record for this clientid. If the IP addresses
+		 * don't match, return ERR_INVAL just as if the record had
+		 * not been found.
+		 */
+		if (clp->cl_addr != ip_addr) { 
+			printk("NFSD: setclientid: string in use by client"
+			"(clientid %08x/%08x)\n",
+			clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+			goto out;
+		}
+		conf = clp;
+	}
+	clp = find_unconfirmed_client(clid);
+	if (clp) {
+		status = nfserr_inval;
+		if (clp->cl_addr != ip_addr) { 
+			printk("NFSD: setclientid: string in use by client"
+			"(clientid %08x/%08x)\n",
+			clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+			goto out;
+		}
+		unconf = clp;
+	}
+	/* CASE 1: 
+	* unconf record that matches input clientid and input confirm.
+	* conf record that matches input clientid.
+	* conf  and unconf records match names, verifiers 
+	*/
+	if ((conf && unconf) && 
+	    (cmp_verf(&unconf->cl_confirm, &confirm)) &&
+	    (cmp_verf(&conf->cl_verifier, &unconf->cl_verifier)) &&
+	    (cmp_name(&conf->cl_name,&unconf->cl_name))  &&
+	    (!cmp_verf(&conf->cl_confirm, &unconf->cl_confirm))) {
+		if (!cmp_creds(&conf->cl_cred, &unconf->cl_cred)) 
+			status = nfserr_clid_inuse;
+		else {
+			expire_client(conf);
+			clp = unconf;
+			move_to_confirmed(unconf);
+			status = nfs_ok;
+		}
+		goto out;
+	} 
+	/* CASE 2:
+	 * conf record that matches input clientid.
+	 * if unconf record that matches input clientid, then unconf->cl_name
+	 * or unconf->cl_verifier don't match the conf record.
+	 */
+	if ((conf && !unconf) || 
+	    ((conf && unconf) && 
+	     (!cmp_verf(&conf->cl_verifier, &unconf->cl_verifier) ||
+	      !cmp_name(&conf->cl_name, &unconf->cl_name)))) {
+		if (!cmp_creds(&conf->cl_cred,&rqstp->rq_cred)) {
+			status = nfserr_clid_inuse;
+		} else {
+			clp = conf;
+			status = nfs_ok;
+		}
+		goto out;
+	}
+	/* CASE 3:
+	 * conf record not found.
+	 * unconf record found. 
+	 * unconf->cl_confirm matches input confirm
+	 */ 
+	if (!conf && unconf && cmp_verf(&unconf->cl_confirm, &confirm)) {
+		if (!cmp_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
+			status = nfserr_clid_inuse;
+		} else {
+			status = nfs_ok;
+			clp = unconf;
+			move_to_confirmed(unconf);
+		}
+		goto out;
+	}
+	/* CASE 4:
+	 * conf record not found, or if conf, then conf->cl_confirm does not
+	 * match input confirm.
+	 * unconf record not found, or if unconf, then unconf->cl_confirm 
+	 * does not match input confirm.
+	 */
+	if ((!conf || (conf && !cmp_verf(&conf->cl_confirm, &confirm))) &&
+	    (!unconf || (unconf && !cmp_verf(&unconf->cl_confirm, &confirm)))) {
+		status = nfserr_stale_clientid;
+		goto out;
+	}
+	/* check that we have hit one of the cases...*/
+	status = nfserr_inval;
+	goto out;
+out:
+	if (!status)
+		nfsd4_probe_callback(clp);
+	nfs4_unlock_state();
+	return status;
+}
+
+/* 
+ * Open owner state (share locks)
+ */
+
+/* hash tables for nfs4_stateowner */
+#define OWNER_HASH_BITS              8
+#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
+#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
+
+#define ownerid_hashval(id) \
+        ((id) & OWNER_HASH_MASK)
+#define ownerstr_hashval(clientid, ownername) \
+        (((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK)
+
+static struct list_head	ownerid_hashtbl[OWNER_HASH_SIZE];
+static struct list_head	ownerstr_hashtbl[OWNER_HASH_SIZE];
+
+/* hash table for nfs4_file */
+#define FILE_HASH_BITS                   8
+#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
+#define FILE_HASH_MASK                  (FILE_HASH_SIZE - 1)
+/* hash table for (open)nfs4_stateid */
+#define STATEID_HASH_BITS              10
+#define STATEID_HASH_SIZE              (1 << STATEID_HASH_BITS)
+#define STATEID_HASH_MASK              (STATEID_HASH_SIZE - 1)
+
+#define file_hashval(x) \
+        hash_ptr(x, FILE_HASH_BITS)
+#define stateid_hashval(owner_id, file_id)  \
+        (((owner_id) + (file_id)) & STATEID_HASH_MASK)
+
+static struct list_head file_hashtbl[FILE_HASH_SIZE];
+static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
+
+/* OPEN Share state helper functions */
+static inline struct nfs4_file *
+alloc_init_file(struct inode *ino)
+{
+	struct nfs4_file *fp;
+	unsigned int hashval = file_hashval(ino);
+
+	if ((fp = kmalloc(sizeof(struct nfs4_file),GFP_KERNEL))) {
+		INIT_LIST_HEAD(&fp->fi_hash);
+		INIT_LIST_HEAD(&fp->fi_perfile);
+		INIT_LIST_HEAD(&fp->fi_del_perfile);
+		list_add(&fp->fi_hash, &file_hashtbl[hashval]);
+		fp->fi_inode = igrab(ino);
+		fp->fi_id = current_fileid++;
+		alloc_file++;
+		return fp;
+	}
+	return NULL;
+}
+
+static void
+release_all_files(void)
+{
+	int i;
+	struct nfs4_file *fp;
+
+	for (i=0;i<FILE_HASH_SIZE;i++) {
+		while (!list_empty(&file_hashtbl[i])) {
+			fp = list_entry(file_hashtbl[i].next, struct nfs4_file, fi_hash);
+			/* this should never be more than once... */
+			if (!list_empty(&fp->fi_perfile) || !list_empty(&fp->fi_del_perfile)) {
+				printk("ERROR: release_all_files: file %p is open, creating dangling state !!!\n",fp);
+			}
+			release_file(fp);
+		}
+	}
+}
+
+kmem_cache_t *stateowner_slab = NULL;
+
+static int
+nfsd4_init_slabs(void)
+{
+	stateowner_slab = kmem_cache_create("nfsd4_stateowners",
+			sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL);
+	if (stateowner_slab == NULL) {
+		dprintk("nfsd4: out of memory while initializing nfsv4\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void
+nfsd4_free_slabs(void)
+{
+	int status = 0;
+
+	if (stateowner_slab)
+		status = kmem_cache_destroy(stateowner_slab);
+	stateowner_slab = NULL;
+	BUG_ON(status);
+}
+
+void
+nfs4_free_stateowner(struct kref *kref)
+{
+	struct nfs4_stateowner *sop =
+		container_of(kref, struct nfs4_stateowner, so_ref);
+	kfree(sop->so_owner.data);
+	kmem_cache_free(stateowner_slab, sop);
+}
+
+static inline struct nfs4_stateowner *
+alloc_stateowner(struct xdr_netobj *owner)
+{
+	struct nfs4_stateowner *sop;
+
+	if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) {
+		if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) {
+			memcpy(sop->so_owner.data, owner->data, owner->len);
+			sop->so_owner.len = owner->len;
+			kref_init(&sop->so_ref);
+			return sop;
+		} 
+		kmem_cache_free(stateowner_slab, sop);
+	}
+	return NULL;
+}
+
+static struct nfs4_stateowner *
+alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
+	struct nfs4_stateowner *sop;
+	struct nfs4_replay *rp;
+	unsigned int idhashval;
+
+	if (!(sop = alloc_stateowner(&open->op_owner)))
+		return NULL;
+	idhashval = ownerid_hashval(current_ownerid);
+	INIT_LIST_HEAD(&sop->so_idhash);
+	INIT_LIST_HEAD(&sop->so_strhash);
+	INIT_LIST_HEAD(&sop->so_perclient);
+	INIT_LIST_HEAD(&sop->so_perfilestate);
+	INIT_LIST_HEAD(&sop->so_perlockowner);  /* not used */
+	INIT_LIST_HEAD(&sop->so_close_lru);
+	sop->so_time = 0;
+	list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]);
+	list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]);
+	list_add(&sop->so_perclient, &clp->cl_perclient);
+	add_perclient++;
+	sop->so_is_open_owner = 1;
+	sop->so_id = current_ownerid++;
+	sop->so_client = clp;
+	sop->so_seqid = open->op_seqid;
+	sop->so_confirmed = 0;
+	rp = &sop->so_replay;
+	rp->rp_status = NFSERR_SERVERFAULT;
+	rp->rp_buflen = 0;
+	rp->rp_buf = rp->rp_ibuf;
+	return sop;
+}
+
+static void
+release_stateid_lockowners(struct nfs4_stateid *open_stp)
+{
+	struct nfs4_stateowner *lock_sop;
+
+	while (!list_empty(&open_stp->st_perlockowner)) {
+		lock_sop = list_entry(open_stp->st_perlockowner.next,
+				struct nfs4_stateowner, so_perlockowner);
+		/* list_del(&open_stp->st_perlockowner);  */
+		BUG_ON(lock_sop->so_is_open_owner);
+		release_stateowner(lock_sop);
+	}
+}
+
+static void
+unhash_stateowner(struct nfs4_stateowner *sop)
+{
+	struct nfs4_stateid *stp;
+
+	list_del(&sop->so_idhash);
+	list_del(&sop->so_strhash);
+	if (sop->so_is_open_owner) {
+		list_del(&sop->so_perclient);
+		del_perclient++;
+	}
+	list_del(&sop->so_perlockowner);
+	while (!list_empty(&sop->so_perfilestate)) {
+		stp = list_entry(sop->so_perfilestate.next, 
+			struct nfs4_stateid, st_perfilestate);
+		if (sop->so_is_open_owner)
+			release_stateid(stp, OPEN_STATE);
+		else
+			release_stateid(stp, LOCK_STATE);
+	}
+}
+
+static void
+release_stateowner(struct nfs4_stateowner *sop)
+{
+	unhash_stateowner(sop);
+	list_del(&sop->so_close_lru);
+	nfs4_put_stateowner(sop);
+}
+
+static inline void
+init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
+	struct nfs4_stateowner *sop = open->op_stateowner;
+	unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
+
+	INIT_LIST_HEAD(&stp->st_hash);
+	INIT_LIST_HEAD(&stp->st_perfilestate);
+	INIT_LIST_HEAD(&stp->st_perlockowner);
+	INIT_LIST_HEAD(&stp->st_perfile);
+	list_add(&stp->st_hash, &stateid_hashtbl[hashval]);
+	list_add(&stp->st_perfilestate, &sop->so_perfilestate);
+	list_add_perfile++;
+	list_add(&stp->st_perfile, &fp->fi_perfile);
+	stp->st_stateowner = sop;
+	stp->st_file = fp;
+	stp->st_stateid.si_boot = boot_time;
+	stp->st_stateid.si_stateownerid = sop->so_id;
+	stp->st_stateid.si_fileid = fp->fi_id;
+	stp->st_stateid.si_generation = 0;
+	stp->st_access_bmap = 0;
+	stp->st_deny_bmap = 0;
+	__set_bit(open->op_share_access, &stp->st_access_bmap);
+	__set_bit(open->op_share_deny, &stp->st_deny_bmap);
+}
+
+static void
+release_stateid(struct nfs4_stateid *stp, int flags)
+{
+	struct file *filp = stp->st_vfs_file;
+
+	list_del(&stp->st_hash);
+	list_del_perfile++;
+	list_del(&stp->st_perfile);
+	list_del(&stp->st_perfilestate);
+	if (flags & OPEN_STATE) {
+		release_stateid_lockowners(stp);
+		stp->st_vfs_file = NULL;
+		nfsd_close(filp);
+		vfsclose++;
+	} else if (flags & LOCK_STATE)
+		locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner);
+	kfree(stp);
+	stp = NULL;
+}
+
+static void
+release_file(struct nfs4_file *fp)
+{
+	free_file++;
+	list_del(&fp->fi_hash);
+	iput(fp->fi_inode);
+	kfree(fp);
+}	
+
+void
+move_to_close_lru(struct nfs4_stateowner *sop)
+{
+	dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop);
+
+	unhash_stateowner(sop);
+	list_add_tail(&sop->so_close_lru, &close_lru);
+	sop->so_time = get_seconds();
+}
+
+void
+release_state_owner(struct nfs4_stateid *stp, int flag)
+{
+	struct nfs4_stateowner *sop = stp->st_stateowner;
+	struct nfs4_file *fp = stp->st_file;
+
+	dprintk("NFSD: release_state_owner\n");
+	release_stateid(stp, flag);
+
+	/* place unused nfs4_stateowners on so_close_lru list to be
+	 * released by the laundromat service after the lease period
+	 * to enable us to handle CLOSE replay
+	 */
+	if (sop->so_confirmed && list_empty(&sop->so_perfilestate))
+		move_to_close_lru(sop);
+	/* unused nfs4_file's are releseed. XXX slab cache? */
+	if (list_empty(&fp->fi_perfile) && list_empty(&fp->fi_del_perfile)) {
+		release_file(fp);
+	}
+}
+
+static int
+cmp_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) {
+	return ((sop->so_owner.len == owner->len) && 
+	 !memcmp(sop->so_owner.data, owner->data, owner->len) && 
+	  (sop->so_client->cl_clientid.cl_id == clid->cl_id));
+}
+
+static struct nfs4_stateowner *
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
+{
+	struct nfs4_stateowner *so = NULL;
+
+	list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
+		if (cmp_owner_str(so, &open->op_owner, &open->op_clientid))
+			return so;
+	}
+	return NULL;
+}
+
+/* search file_hashtbl[] for file */
+static struct nfs4_file *
+find_file(struct inode *ino)
+{
+	unsigned int hashval = file_hashval(ino);
+	struct nfs4_file *fp;
+
+	list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
+		if (fp->fi_inode == ino)
+			return fp;
+	}
+	return NULL;
+}
+
+#define TEST_ACCESS(x) ((x > 0 || x < 4)?1:0)
+#define TEST_DENY(x) ((x >= 0 || x < 5)?1:0)
+
+void
+set_access(unsigned int *access, unsigned long bmap) {
+	int i;
+
+	*access = 0;
+	for (i = 1; i < 4; i++) {
+		if (test_bit(i, &bmap))
+			*access |= i;
+	}
+}
+
+void
+set_deny(unsigned int *deny, unsigned long bmap) {
+	int i;
+
+	*deny = 0;
+	for (i = 0; i < 4; i++) {
+		if (test_bit(i, &bmap))
+			*deny |= i ;
+	}
+}
+
+static int
+test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
+	unsigned int access, deny;
+
+	set_access(&access, stp->st_access_bmap);
+	set_deny(&deny, stp->st_deny_bmap);
+	if ((access & open->op_share_deny) || (deny & open->op_share_access))
+		return 0;
+	return 1;
+}
+
+/*
+ * Called to check deny when READ with all zero stateid or
+ * WRITE with all zero or all one stateid
+ */
+int
+nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
+{
+	struct inode *ino = current_fh->fh_dentry->d_inode;
+	struct nfs4_file *fp;
+	struct nfs4_stateid *stp;
+
+	dprintk("NFSD: nfs4_share_conflict\n");
+
+	fp = find_file(ino);
+	if (fp) {
+	/* Search for conflicting share reservations */
+		list_for_each_entry(stp, &fp->fi_perfile, st_perfile) {
+			if (test_bit(deny_type, &stp->st_deny_bmap) ||
+			    test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
+				return nfserr_share_denied;
+		}
+	}
+	return nfs_ok;
+}
+
+static inline void
+nfs4_file_downgrade(struct file *filp, unsigned int share_access)
+{
+	if (share_access & NFS4_SHARE_ACCESS_WRITE) {
+		put_write_access(filp->f_dentry->d_inode);
+		filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
+	}
+}
+
+/*
+ * Recall a delegation
+ */
+static int
+do_recall(void *__dp)
+{
+	struct nfs4_delegation *dp = __dp;
+
+	daemonize("nfsv4-recall");
+
+	nfsd4_cb_recall(dp);
+	return 0;
+}
+
+/*
+ * Spawn a thread to perform a recall on the delegation represented
+ * by the lease (file_lock)
+ *
+ * Called from break_lease() with lock_kernel() held.
+ * Note: we assume break_lease will only call this *once* for any given
+ * lease.
+ */
+static
+void nfsd_break_deleg_cb(struct file_lock *fl)
+{
+	struct nfs4_delegation *dp=  (struct nfs4_delegation *)fl->fl_owner;
+	struct task_struct *t;
+
+	dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
+	if (!dp)
+		return;
+
+	/* We're assuming the state code never drops its reference
+	 * without first removing the lease.  Since we're in this lease
+	 * callback (and since the lease code is serialized by the kernel
+	 * lock) we know the server hasn't removed the lease yet, we know
+	 * it's safe to take a reference: */
+	atomic_inc(&dp->dl_count);
+
+	spin_lock(&recall_lock);
+	list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
+	spin_unlock(&recall_lock);
+
+	/* only place dl_time is set. protected by lock_kernel*/
+	dp->dl_time = get_seconds();
+
+	/* XXX need to merge NFSD_LEASE_TIME with fs/locks.c:lease_break_time */
+	fl->fl_break_time = jiffies + NFSD_LEASE_TIME * HZ;
+
+	t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall");
+	if (IS_ERR(t)) {
+		struct nfs4_client *clp = dp->dl_client;
+
+		printk(KERN_INFO "NFSD: Callback thread failed for "
+			"for client (clientid %08x/%08x)\n",
+			clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
+		nfs4_put_delegation(dp);
+	}
+}
+
+/*
+ * The file_lock is being reapd.
+ *
+ * Called by locks_free_lock() with lock_kernel() held.
+ */
+static
+void nfsd_release_deleg_cb(struct file_lock *fl)
+{
+	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
+
+	dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count));
+
+	if (!(fl->fl_flags & FL_LEASE) || !dp)
+		return;
+	dp->dl_flock = NULL;
+}
+
+/*
+ * Set the delegation file_lock back pointer.
+ *
+ * Called from __setlease() with lock_kernel() held.
+ */
+static
+void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl)
+{
+	struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner;
+
+	dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp);
+	if (!dp)
+		return;
+	dp->dl_flock = new;
+}
+
+/*
+ * Called from __setlease() with lock_kernel() held
+ */
+static
+int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
+{
+	struct nfs4_delegation *onlistd =
+		(struct nfs4_delegation *)onlist->fl_owner;
+	struct nfs4_delegation *tryd =
+		(struct nfs4_delegation *)try->fl_owner;
+
+	if (onlist->fl_lmops != try->fl_lmops)
+		return 0;
+
+	return onlistd->dl_client == tryd->dl_client;
+}
+
+
+static
+int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
+{
+	if (arg & F_UNLCK)
+		return lease_modify(onlist, arg);
+	else
+		return -EAGAIN;
+}
+
+struct lock_manager_operations nfsd_lease_mng_ops = {
+	.fl_break = nfsd_break_deleg_cb,
+	.fl_release_private = nfsd_release_deleg_cb,
+	.fl_copy_lock = nfsd_copy_lock_deleg_cb,
+	.fl_mylease = nfsd_same_client_deleg_cb,
+	.fl_change = nfsd_change_deleg_cb,
+};
+
+
+/*
+ * nfsd4_process_open1()
+ * 	lookup stateowner.
+ * 		found:
+ * 			check confirmed 
+ * 				confirmed:
+ * 					check seqid
+ * 				not confirmed:
+ * 					delete owner
+ * 					create new owner
+ * 		notfound:
+ * 			verify clientid
+ * 			create new owner
+ *
+ * called with nfs4_lock_state() held.
+ */
+int
+nfsd4_process_open1(struct nfsd4_open *open)
+{
+	int status;
+	clientid_t *clientid = &open->op_clientid;
+	struct nfs4_client *clp = NULL;
+	unsigned int strhashval;
+	struct nfs4_stateowner *sop = NULL;
+
+	status = nfserr_inval;
+	if (!check_name(open->op_owner))
+		goto out;
+
+	if (STALE_CLIENTID(&open->op_clientid))
+		return nfserr_stale_clientid;
+
+	strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
+	sop = find_openstateowner_str(strhashval, open);
+	if (sop) {
+		open->op_stateowner = sop;
+		/* check for replay */
+		if (open->op_seqid == sop->so_seqid){
+			if (sop->so_replay.rp_buflen)
+				return NFSERR_REPLAY_ME;
+			else {
+				/* The original OPEN failed so spectacularly
+				 * that we don't even have replay data saved!
+				 * Therefore, we have no choice but to continue
+				 * processing this OPEN; presumably, we'll
+				 * fail again for the same reason.
+				 */
+				dprintk("nfsd4_process_open1:"
+					" replay with no replay cache\n");
+				goto renew;
+			}
+		} else if (sop->so_confirmed) {
+			if (open->op_seqid == sop->so_seqid + 1)
+				goto renew;
+			status = nfserr_bad_seqid;
+			goto out;
+		} else {
+			/* If we get here, we received an OPEN for an
+			 * unconfirmed nfs4_stateowner. Since the seqid's are
+			 * different, purge the existing nfs4_stateowner, and
+			 * instantiate a new one.
+			 */
+			clp = sop->so_client;
+			release_stateowner(sop);
+		}
+	} else {
+		/* nfs4_stateowner not found.
+		 * Verify clientid and instantiate new nfs4_stateowner.
+		 * If verify fails this is presumably the result of the
+		 * client's lease expiring.
+		 */
+		status = nfserr_expired;
+		clp = find_confirmed_client(clientid);
+		if (clp == NULL)
+			goto out;
+	}
+	status = nfserr_resource;
+	sop = alloc_init_open_stateowner(strhashval, clp, open);
+	if (sop == NULL)
+		goto out;
+	open->op_stateowner = sop;
+renew:
+	status = nfs_ok;
+	renew_client(sop->so_client);
+out:
+	if (status && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+		status = nfserr_reclaim_bad;
+	return status;
+}
+
+static int
+nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp)
+{
+	struct nfs4_stateid *local;
+	int status = nfserr_share_denied;
+	struct nfs4_stateowner *sop = open->op_stateowner;
+
+	list_for_each_entry(local, &fp->fi_perfile, st_perfile) {
+		/* ignore lock owners */
+		if (local->st_stateowner->so_is_open_owner == 0)
+			continue;
+		/* remember if we have seen this open owner */
+		if (local->st_stateowner == sop)
+			*stpp = local;
+		/* check for conflicting share reservations */
+		if (!test_share(local, open))
+			goto out;
+	}
+	status = 0;
+out:
+	return status;
+}
+
+static int
+nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
+		struct svc_fh *cur_fh, int flags)
+{
+	struct nfs4_stateid *stp;
+	int status;
+
+	stp = kmalloc(sizeof(struct nfs4_stateid), GFP_KERNEL);
+	if (stp == NULL)
+		return nfserr_resource;
+
+	status = nfsd_open(rqstp, cur_fh, S_IFREG, flags, &stp->st_vfs_file);
+	if (status) {
+		if (status == nfserr_dropit)
+			status = nfserr_jukebox;
+		kfree(stp);
+		return status;
+	}
+	vfsopen++;
+	*stpp = stp;
+	return 0;
+}
+
+static inline int
+nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
+		struct nfsd4_open *open)
+{
+	struct iattr iattr = {
+		.ia_valid = ATTR_SIZE,
+		.ia_size = 0,
+	};
+	if (!open->op_truncate)
+		return 0;
+	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
+		return -EINVAL;
+	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
+}
+
+static int
+nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+{
+	struct file *filp = stp->st_vfs_file;
+	struct inode *inode = filp->f_dentry->d_inode;
+	unsigned int share_access;
+	int status;
+
+	set_access(&share_access, stp->st_access_bmap);
+	share_access = ~share_access;
+	share_access &= open->op_share_access;
+
+	if (!(share_access & NFS4_SHARE_ACCESS_WRITE))
+		return nfsd4_truncate(rqstp, cur_fh, open);
+
+	status = get_write_access(inode);
+	if (status)
+		return nfserrno(status);
+	status = nfsd4_truncate(rqstp, cur_fh, open);
+	if (status) {
+		put_write_access(inode);
+		return status;
+	}
+	/* remember the open */
+	filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ;
+	set_bit(open->op_share_access, &stp->st_access_bmap);
+	set_bit(open->op_share_deny, &stp->st_deny_bmap);
+
+	return nfs_ok;
+}
+
+
+/* decrement seqid on successful reclaim, it will be bumped in encode_open */
+static void
+nfs4_set_claim_prev(struct nfsd4_open *open, int *status)
+{
+	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) {
+		if (*status)
+			*status = nfserr_reclaim_bad;
+		else {
+			open->op_stateowner->so_confirmed = 1;
+			open->op_stateowner->so_seqid--;
+		}
+	}
+}
+
+/*
+ * Attempt to hand out a delegation.
+ */
+static void
+nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp)
+{
+	struct nfs4_delegation *dp;
+	struct nfs4_stateowner *sop = stp->st_stateowner;
+	struct nfs4_callback *cb = &sop->so_client->cl_callback;
+	struct file_lock fl, *flp = &fl;
+	int status, flag = 0;
+
+	flag = NFS4_OPEN_DELEGATE_NONE;
+	if (open->op_claim_type != NFS4_OPEN_CLAIM_NULL
+	     || !atomic_read(&cb->cb_set) || !sop->so_confirmed)
+		goto out;
+
+	if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
+		flag = NFS4_OPEN_DELEGATE_WRITE;
+	else
+		flag = NFS4_OPEN_DELEGATE_READ;
+
+	dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
+	if (dp == NULL) {
+		flag = NFS4_OPEN_DELEGATE_NONE;
+		goto out;
+	}
+	locks_init_lock(&fl);
+	fl.fl_lmops = &nfsd_lease_mng_ops;
+	fl.fl_flags = FL_LEASE;
+	fl.fl_end = OFFSET_MAX;
+	fl.fl_owner =  (fl_owner_t)dp;
+	fl.fl_file = stp->st_vfs_file;
+	fl.fl_pid = current->tgid;
+
+	/* setlease checks to see if delegation should be handed out.
+	 * the lock_manager callbacks fl_mylease and fl_change are used
+	 */
+	if ((status = setlease(stp->st_vfs_file,
+		flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) {
+		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
+		list_del(&dp->dl_del_perfile);
+		list_del(&dp->dl_del_perclnt);
+		nfs4_put_delegation(dp);
+		free_delegation++;
+		flag = NFS4_OPEN_DELEGATE_NONE;
+		goto out;
+	}
+
+	memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
+
+	dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n",
+	             dp->dl_stateid.si_boot,
+	             dp->dl_stateid.si_stateownerid,
+	             dp->dl_stateid.si_fileid,
+	             dp->dl_stateid.si_generation);
+out:
+	open->op_delegate_type = flag;
+}
+
+/*
+ * called with nfs4_lock_state() held.
+ */
+int
+nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+{
+	struct nfs4_file *fp = NULL;
+	struct inode *ino = current_fh->fh_dentry->d_inode;
+	struct nfs4_stateid *stp = NULL;
+	int status;
+
+	status = nfserr_inval;
+	if (!TEST_ACCESS(open->op_share_access) || !TEST_DENY(open->op_share_deny))
+		goto out;
+	/*
+	 * Lookup file; if found, lookup stateid and check open request,
+	 * and check for delegations in the process of being recalled.
+	 * If not found, create the nfs4_file struct
+	 */
+	fp = find_file(ino);
+	if (fp) {
+		if ((status = nfs4_check_open(fp, open, &stp)))
+			goto out;
+	} else {
+		status = nfserr_resource;
+		fp = alloc_init_file(ino);
+		if (fp == NULL)
+			goto out;
+	}
+
+	/*
+	 * OPEN the file, or upgrade an existing OPEN.
+	 * If truncate fails, the OPEN fails.
+	 */
+	if (stp) {
+		/* Stateid was found, this is an OPEN upgrade */
+		status = nfs4_upgrade_open(rqstp, current_fh, stp, open);
+		if (status)
+			goto out;
+	} else {
+		/* Stateid was not found, this is a new OPEN */
+		int flags = 0;
+		if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
+			flags = MAY_WRITE;
+		else
+			flags = MAY_READ;
+		if ((status = nfs4_new_open(rqstp, &stp, current_fh, flags)))
+			goto out;
+		init_stateid(stp, fp, open);
+		status = nfsd4_truncate(rqstp, current_fh, open);
+		if (status) {
+			release_stateid(stp, OPEN_STATE);
+			goto out;
+		}
+	}
+	memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t));
+
+	/*
+	* Attempt to hand out a delegation. No error return, because the
+	* OPEN succeeds even if we fail.
+	*/
+	nfs4_open_delegation(current_fh, open, stp);
+
+	status = nfs_ok;
+
+	dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n",
+	            stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid,
+	            stp->st_stateid.si_fileid, stp->st_stateid.si_generation);
+out:
+	/* take the opportunity to clean up unused state */
+	if (fp && list_empty(&fp->fi_perfile) && list_empty(&fp->fi_del_perfile))
+		release_file(fp);
+
+	/* CLAIM_PREVIOUS has different error returns */
+	nfs4_set_claim_prev(open, &status);
+	/*
+	* To finish the open response, we just need to set the rflags.
+	*/
+	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
+	if (!open->op_stateowner->so_confirmed)
+		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
+
+	return status;
+}
+
+static struct work_struct laundromat_work;
+static void laundromat_main(void *);
+static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+
+int 
+nfsd4_renew(clientid_t *clid)
+{
+	struct nfs4_client *clp;
+	int status;
+
+	nfs4_lock_state();
+	dprintk("process_renew(%08x/%08x): starting\n", 
+			clid->cl_boot, clid->cl_id);
+	status = nfserr_stale_clientid;
+	if (STALE_CLIENTID(clid))
+		goto out;
+	clp = find_confirmed_client(clid);
+	status = nfserr_expired;
+	if (clp == NULL) {
+		/* We assume the client took too long to RENEW. */
+		dprintk("nfsd4_renew: clientid not found!\n");
+		goto out;
+	}
+	renew_client(clp);
+	status = nfserr_cb_path_down;
+	if (!list_empty(&clp->cl_del_perclnt)
+			&& !atomic_read(&clp->cl_callback.cb_set))
+		goto out;
+	status = nfs_ok;
+out:
+	nfs4_unlock_state();
+	return status;
+}
+
+time_t
+nfs4_laundromat(void)
+{
+	struct nfs4_client *clp;
+	struct nfs4_stateowner *sop;
+	struct nfs4_delegation *dp;
+	struct list_head *pos, *next, reaplist;
+	time_t cutoff = get_seconds() - NFSD_LEASE_TIME;
+	time_t t, clientid_val = NFSD_LEASE_TIME;
+	time_t u, test_val = NFSD_LEASE_TIME;
+
+	nfs4_lock_state();
+
+	dprintk("NFSD: laundromat service - starting\n");
+	list_for_each_safe(pos, next, &client_lru) {
+		clp = list_entry(pos, struct nfs4_client, cl_lru);
+		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
+			t = clp->cl_time - cutoff;
+			if (clientid_val > t)
+				clientid_val = t;
+			break;
+		}
+		dprintk("NFSD: purging unused client (clientid %08x)\n",
+			clp->cl_clientid.cl_id);
+		expire_client(clp);
+	}
+	INIT_LIST_HEAD(&reaplist);
+	spin_lock(&recall_lock);
+	list_for_each_safe(pos, next, &del_recall_lru) {
+		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
+			u = dp->dl_time - cutoff;
+			if (test_val > u)
+				test_val = u;
+			break;
+		}
+		dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
+			            dp, dp->dl_flock);
+		list_move(&dp->dl_recall_lru, &reaplist);
+	}
+	spin_unlock(&recall_lock);
+	list_for_each_safe(pos, next, &reaplist) {
+		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+		list_del_init(&dp->dl_recall_lru);
+		unhash_delegation(dp);
+	}
+	test_val = NFSD_LEASE_TIME;
+	list_for_each_safe(pos, next, &close_lru) {
+		sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
+		if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
+			u = sop->so_time - cutoff;
+			if (test_val > u)
+				test_val = u;
+			break;
+		}
+		dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
+			sop->so_id);
+		list_del(&sop->so_close_lru);
+		nfs4_put_stateowner(sop);
+	}
+	if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
+		clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
+	nfs4_unlock_state();
+	return clientid_val;
+}
+
+void
+laundromat_main(void *not_used)
+{
+	time_t t;
+
+	t = nfs4_laundromat();
+	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
+	schedule_delayed_work(&laundromat_work, t*HZ);
+}
+
+/* search ownerid_hashtbl[] and close_lru for stateid owner
+ * (stateid->si_stateownerid)
+ */
+struct nfs4_stateowner *
+find_openstateowner_id(u32 st_id, int flags) {
+	struct nfs4_stateowner *local = NULL;
+
+	dprintk("NFSD: find_openstateowner_id %d\n", st_id);
+	if (flags & CLOSE_STATE) {
+		list_for_each_entry(local, &close_lru, so_close_lru) {
+			if (local->so_id == st_id)
+				return local;
+		}
+	}
+	return NULL;
+}
+
+static inline int
+nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
+{
+	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
+}
+
+static int
+STALE_STATEID(stateid_t *stateid)
+{
+	if (stateid->si_boot == boot_time)
+		return 0;
+	printk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
+		stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
+		stateid->si_generation);
+	return 1;
+}
+
+static inline int
+access_permit_read(unsigned long access_bmap)
+{
+	return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
+		test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
+		test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
+}
+
+static inline int
+access_permit_write(unsigned long access_bmap)
+{
+	return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
+		test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
+}
+
+static
+int nfs4_check_openmode(struct nfs4_stateid *stp, int flags)
+{
+        int status = nfserr_openmode;
+
+	if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
+                goto out;
+	if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
+                goto out;
+	status = nfs_ok;
+out:
+	return status;
+}
+
+static inline int
+nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
+{
+	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
+		return nfserr_openmode;
+	else
+		return nfs_ok;
+}
+
+static inline int
+check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
+{
+	/* Trying to call delegreturn with a special stateid? Yuch: */
+	if (!(flags & (RD_STATE | WR_STATE)))
+		return nfserr_bad_stateid;
+	else if (ONE_STATEID(stateid) && (flags & RD_STATE))
+		return nfs_ok;
+	else if (nfs4_in_grace()) {
+		/* Answer in remaining cases depends on existance of
+		 * conflicting state; so we must wait out the grace period. */
+		return nfserr_grace;
+	} else if (flags & WR_STATE)
+		return nfs4_share_conflict(current_fh,
+				NFS4_SHARE_DENY_WRITE);
+	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
+		return nfs4_share_conflict(current_fh,
+				NFS4_SHARE_DENY_READ);
+}
+
+/*
+ * Allow READ/WRITE during grace period on recovered state only for files
+ * that are not able to provide mandatory locking.
+ */
+static inline int
+io_during_grace_disallowed(struct inode *inode, int flags)
+{
+	return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE))
+		&& MANDATORY_LOCK(inode);
+}
+
+/*
+* Checks for stateid operations
+*/
+int
+nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int flags, struct file **filpp)
+{
+	struct nfs4_stateid *stp = NULL;
+	struct nfs4_delegation *dp = NULL;
+	stateid_t *stidp;
+	struct inode *ino = current_fh->fh_dentry->d_inode;
+	int status;
+
+	dprintk("NFSD: preprocess_stateid_op: stateid = (%08x/%08x/%08x/%08x)\n",
+		stateid->si_boot, stateid->si_stateownerid, 
+		stateid->si_fileid, stateid->si_generation); 
+	if (filpp)
+		*filpp = NULL;
+
+	if (io_during_grace_disallowed(ino, flags))
+		return nfserr_grace;
+
+	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+		return check_special_stateids(current_fh, stateid, flags);
+
+	/* STALE STATEID */
+	status = nfserr_stale_stateid;
+	if (STALE_STATEID(stateid)) 
+		goto out;
+
+	/* BAD STATEID */
+	status = nfserr_bad_stateid;
+	if (!stateid->si_fileid) { /* delegation stateid */
+		if(!(dp = find_delegation_stateid(ino, stateid))) {
+			dprintk("NFSD: delegation stateid not found\n");
+			if (nfs4_in_grace())
+				status = nfserr_grace;
+			goto out;
+		}
+		stidp = &dp->dl_stateid;
+	} else { /* open or lock stateid */
+		if (!(stp = find_stateid(stateid, flags))) {
+			dprintk("NFSD: open or lock stateid not found\n");
+			if (nfs4_in_grace())
+				status = nfserr_grace;
+			goto out;
+		}
+		if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp))
+			goto out;
+		if (!stp->st_stateowner->so_confirmed)
+			goto out;
+		stidp = &stp->st_stateid;
+	}
+	if (stateid->si_generation > stidp->si_generation)
+		goto out;
+
+	/* OLD STATEID */
+	status = nfserr_old_stateid;
+	if (stateid->si_generation < stidp->si_generation)
+		goto out;
+	if (stp) {
+		if ((status = nfs4_check_openmode(stp,flags)))
+			goto out;
+		renew_client(stp->st_stateowner->so_client);
+		if (filpp)
+			*filpp = stp->st_vfs_file;
+	} else if (dp) {
+		if ((status = nfs4_check_delegmode(dp, flags)))
+			goto out;
+		renew_client(dp->dl_client);
+		if (flags & DELEG_RET)
+			unhash_delegation(dp);
+		if (filpp)
+			*filpp = dp->dl_vfs_file;
+	}
+	status = nfs_ok;
+out:
+	return status;
+}
+
+
+/* 
+ * Checks for sequence id mutating operations. 
+ */
+int
+nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *stateid, int flags, struct nfs4_stateowner **sopp, struct nfs4_stateid **stpp, clientid_t *lockclid)
+{
+	int status;
+	struct nfs4_stateid *stp;
+	struct nfs4_stateowner *sop;
+
+	dprintk("NFSD: preprocess_seqid_op: seqid=%d " 
+			"stateid = (%08x/%08x/%08x/%08x)\n", seqid,
+		stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
+		stateid->si_generation);
+			        
+	*stpp = NULL;
+	*sopp = NULL;
+
+	status = nfserr_bad_stateid;
+	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
+		printk("NFSD: preprocess_seqid_op: magic stateid!\n");
+		goto out;
+	}
+
+	status = nfserr_stale_stateid;
+	if (STALE_STATEID(stateid))
+		goto out;
+	/*
+	* We return BAD_STATEID if filehandle doesn't match stateid, 
+	* the confirmed flag is incorrecly set, or the generation 
+	* number is incorrect.  
+	* If there is no entry in the openfile table for this id, 
+	* we can't always return BAD_STATEID;
+	* this might be a retransmitted CLOSE which has arrived after 
+	* the openfile has been released.
+	*/
+	if (!(stp = find_stateid(stateid, flags)))
+		goto no_nfs4_stateid;
+
+	status = nfserr_bad_stateid;
+
+	/* for new lock stateowners:
+	 * check that the lock->v.new.open_stateid
+	 * refers to an open stateowner
+	 *
+	 * check that the lockclid (nfs4_lock->v.new.clientid) is the same
+	 * as the open_stateid->st_stateowner->so_client->clientid
+	 */
+	if (lockclid) {
+		struct nfs4_stateowner *sop = stp->st_stateowner;
+		struct nfs4_client *clp = sop->so_client;
+
+		if (!sop->so_is_open_owner)
+			goto out;
+		if (!cmp_clid(&clp->cl_clientid, lockclid))
+			goto out;
+	}
+
+	if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) {
+		printk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n");
+		goto out;
+	}
+
+	*stpp = stp;
+	*sopp = sop = stp->st_stateowner;
+
+	/*
+	*  We now validate the seqid and stateid generation numbers.
+	*  For the moment, we ignore the possibility of 
+	*  generation number wraparound.
+	*/
+	if (seqid != sop->so_seqid + 1)
+		goto check_replay;
+
+	if (sop->so_confirmed) {
+		if (flags & CONFIRM) {
+			printk("NFSD: preprocess_seqid_op: expected unconfirmed stateowner!\n");
+			goto out;
+		}
+	}
+	else {
+		if (!(flags & CONFIRM)) {
+			printk("NFSD: preprocess_seqid_op: stateowner not confirmed yet!\n");
+			goto out;
+		}
+	}
+	if (stateid->si_generation > stp->st_stateid.si_generation) {
+		printk("NFSD: preprocess_seqid_op: future stateid?!\n");
+		goto out;
+	}
+
+	status = nfserr_old_stateid;
+	if (stateid->si_generation < stp->st_stateid.si_generation) {
+		printk("NFSD: preprocess_seqid_op: old stateid!\n");
+		goto out;
+	}
+	/* XXX renew the client lease here */
+	status = nfs_ok;
+
+out:
+	return status;
+
+no_nfs4_stateid:
+
+	/*
+	* We determine whether this is a bad stateid or a replay, 
+	* starting by trying to look up the stateowner.
+	* If stateowner is not found - stateid is bad.
+	*/
+	if (!(sop = find_openstateowner_id(stateid->si_stateownerid, flags))) {
+		printk("NFSD: preprocess_seqid_op: no stateowner or nfs4_stateid!\n");
+		status = nfserr_bad_stateid;
+		goto out;
+	}
+	*sopp = sop;
+
+check_replay:
+	if (seqid == sop->so_seqid) {
+		printk("NFSD: preprocess_seqid_op: retransmission?\n");
+		/* indicate replay to calling function */
+		status = NFSERR_REPLAY_ME;
+	} else  {
+		printk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d\n", sop->so_seqid +1, seqid);
+
+		*sopp = NULL;
+		status = nfserr_bad_seqid;
+	}
+	goto out;
+}
+
+int
+nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_confirm *oc)
+{
+	int status;
+	struct nfs4_stateowner *sop;
+	struct nfs4_stateid *stp;
+
+	dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
+			(int)current_fh->fh_dentry->d_name.len,
+			current_fh->fh_dentry->d_name.name);
+
+	if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0)))
+		goto out;
+
+	nfs4_lock_state();
+
+	if ((status = nfs4_preprocess_seqid_op(current_fh, oc->oc_seqid,
+					&oc->oc_req_stateid,
+					CHECK_FH | CONFIRM | OPEN_STATE,
+					&oc->oc_stateowner, &stp, NULL)))
+		goto out; 
+
+	sop = oc->oc_stateowner;
+	sop->so_confirmed = 1;
+	update_stateid(&stp->st_stateid);
+	memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
+	dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d " 
+		"stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid,
+		         stp->st_stateid.si_boot,
+		         stp->st_stateid.si_stateownerid,
+		         stp->st_stateid.si_fileid,
+		         stp->st_stateid.si_generation);
+out:
+	if (oc->oc_stateowner)
+		nfs4_get_stateowner(oc->oc_stateowner);
+	nfs4_unlock_state();
+	return status;
+}
+
+
+/*
+ * unset all bits in union bitmap (bmap) that
+ * do not exist in share (from successful OPEN_DOWNGRADE)
+ */
+static void
+reset_union_bmap_access(unsigned long access, unsigned long *bmap)
+{
+	int i;
+	for (i = 1; i < 4; i++) {
+		if ((i & access) != i)
+			__clear_bit(i, bmap);
+	}
+}
+
+static void
+reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
+{
+	int i;
+	for (i = 0; i < 4; i++) {
+		if ((i & deny) != i)
+			__clear_bit(i, bmap);
+	}
+}
+
+int
+nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_downgrade *od)
+{
+	int status;
+	struct nfs4_stateid *stp;
+	unsigned int share_access;
+
+	dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
+			(int)current_fh->fh_dentry->d_name.len,
+			current_fh->fh_dentry->d_name.name);
+
+	if (!TEST_ACCESS(od->od_share_access) || !TEST_DENY(od->od_share_deny))
+		return nfserr_inval;
+
+	nfs4_lock_state();
+	if ((status = nfs4_preprocess_seqid_op(current_fh, od->od_seqid, 
+					&od->od_stateid, 
+					CHECK_FH | OPEN_STATE, 
+					&od->od_stateowner, &stp, NULL)))
+		goto out; 
+
+	status = nfserr_inval;
+	if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
+		dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
+			stp->st_access_bmap, od->od_share_access);
+		goto out;
+	}
+	if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
+		dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
+			stp->st_deny_bmap, od->od_share_deny);
+		goto out;
+	}
+	set_access(&share_access, stp->st_access_bmap);
+	nfs4_file_downgrade(stp->st_vfs_file,
+	                    share_access & ~od->od_share_access);
+
+	reset_union_bmap_access(od->od_share_access, &stp->st_access_bmap);
+	reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
+
+	update_stateid(&stp->st_stateid);
+	memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t));
+	status = nfs_ok;
+out:
+	if (od->od_stateowner)
+		nfs4_get_stateowner(od->od_stateowner);
+	nfs4_unlock_state();
+	return status;
+}
+
+/*
+ * nfs4_unlock_state() called after encode
+ */
+int
+nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_close *close)
+{
+	int status;
+	struct nfs4_stateid *stp;
+
+	dprintk("NFSD: nfsd4_close on file %.*s\n", 
+			(int)current_fh->fh_dentry->d_name.len,
+			current_fh->fh_dentry->d_name.name);
+
+	nfs4_lock_state();
+	/* check close_lru for replay */
+	if ((status = nfs4_preprocess_seqid_op(current_fh, close->cl_seqid, 
+					&close->cl_stateid, 
+					CHECK_FH | OPEN_STATE | CLOSE_STATE,
+					&close->cl_stateowner, &stp, NULL)))
+		goto out; 
+	/*
+	*  Return success, but first update the stateid.
+	*/
+	status = nfs_ok;
+	update_stateid(&stp->st_stateid);
+	memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
+
+	/* release_state_owner() calls nfsd_close() if needed */
+	release_state_owner(stp, OPEN_STATE);
+out:
+	if (close->cl_stateowner)
+		nfs4_get_stateowner(close->cl_stateowner);
+	nfs4_unlock_state();
+	return status;
+}
+
+int
+nfsd4_delegreturn(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_delegreturn *dr)
+{
+	int status;
+
+	if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0)))
+		goto out;
+
+	nfs4_lock_state();
+	status = nfs4_preprocess_stateid_op(current_fh, &dr->dr_stateid, DELEG_RET, NULL);
+	nfs4_unlock_state();
+out:
+	return status;
+}
+
+
+/* 
+ * Lock owner state (byte-range locks)
+ */
+#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
+#define LOCK_HASH_BITS              8
+#define LOCK_HASH_SIZE             (1 << LOCK_HASH_BITS)
+#define LOCK_HASH_MASK             (LOCK_HASH_SIZE - 1)
+
+#define lockownerid_hashval(id) \
+        ((id) & LOCK_HASH_MASK)
+
+static inline unsigned int
+lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
+		struct xdr_netobj *ownername)
+{
+	return (file_hashval(inode) + cl_id
+			+ opaque_hashval(ownername->data, ownername->len))
+		& LOCK_HASH_MASK;
+}
+
+static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
+static struct list_head	lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
+static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE];
+
+struct nfs4_stateid *
+find_stateid(stateid_t *stid, int flags)
+{
+	struct nfs4_stateid *local = NULL;
+	u32 st_id = stid->si_stateownerid;
+	u32 f_id = stid->si_fileid;
+	unsigned int hashval;
+
+	dprintk("NFSD: find_stateid flags 0x%x\n",flags);
+	if ((flags & LOCK_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
+		hashval = stateid_hashval(st_id, f_id);
+		list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
+			if ((local->st_stateid.si_stateownerid == st_id) &&
+			    (local->st_stateid.si_fileid == f_id))
+				return local;
+		}
+	} 
+	if ((flags & OPEN_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
+		hashval = stateid_hashval(st_id, f_id);
+		list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
+			if ((local->st_stateid.si_stateownerid == st_id) &&
+			    (local->st_stateid.si_fileid == f_id))
+				return local;
+		}
+	} else
+		printk("NFSD: find_stateid: ERROR: no state flag\n");
+	return NULL;
+}
+
+static struct nfs4_delegation *
+find_delegation_stateid(struct inode *ino, stateid_t *stid)
+{
+	struct nfs4_delegation *dp = NULL;
+	struct nfs4_file *fp = NULL;
+	u32 st_id;
+
+	dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n",
+                    stid->si_boot, stid->si_stateownerid,
+                    stid->si_fileid, stid->si_generation);
+
+	st_id = stid->si_stateownerid;
+	fp = find_file(ino);
+	if (fp) {
+		list_for_each_entry(dp, &fp->fi_del_perfile, dl_del_perfile) {
+			if(dp->dl_stateid.si_stateownerid == st_id) {
+				dprintk("NFSD: find_delegation dp %p\n",dp);
+				return dp;
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+ * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
+ * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
+ * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
+ * locking, this prevents us from being completely protocol-compliant.  The
+ * real solution to this problem is to start using unsigned file offsets in
+ * the VFS, but this is a very deep change!
+ */
+static inline void
+nfs4_transform_lock_offset(struct file_lock *lock)
+{
+	if (lock->fl_start < 0)
+		lock->fl_start = OFFSET_MAX;
+	if (lock->fl_end < 0)
+		lock->fl_end = OFFSET_MAX;
+}
+
+int
+nfs4_verify_lock_stateowner(struct nfs4_stateowner *sop, unsigned int hashval)
+{
+	struct nfs4_stateowner *local = NULL;
+	int status = 0;
+			        
+	if (hashval >= LOCK_HASH_SIZE)
+		goto out;
+	list_for_each_entry(local, &lock_ownerid_hashtbl[hashval], so_idhash) {
+		if (local == sop) {
+			status = 1;
+			goto out;
+		}
+	}
+out:
+	return status;
+}
+
+
+static inline void
+nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
+{
+	struct nfs4_stateowner *sop = (struct nfs4_stateowner *) fl->fl_owner;
+	unsigned int hval = lockownerid_hashval(sop->so_id);
+
+	deny->ld_sop = NULL;
+	if (nfs4_verify_lock_stateowner(sop, hval)) {
+		kref_get(&sop->so_ref);
+		deny->ld_sop = sop;
+		deny->ld_clientid = sop->so_client->cl_clientid;
+	}
+	deny->ld_start = fl->fl_start;
+	deny->ld_length = ~(u64)0;
+	if (fl->fl_end != ~(u64)0)
+		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
+	deny->ld_type = NFS4_READ_LT;
+	if (fl->fl_type != F_RDLCK)
+		deny->ld_type = NFS4_WRITE_LT;
+}
+
+static struct nfs4_stateowner *
+find_lockstateowner(struct xdr_netobj *owner, clientid_t *clid)
+{
+	struct nfs4_stateowner *local = NULL;
+	int i;
+
+	for (i = 0; i < LOCK_HASH_SIZE; i++) {
+		list_for_each_entry(local, &lock_ownerid_hashtbl[i], so_idhash) {
+			if (!cmp_owner_str(local, owner, clid))
+				continue;
+			return local;
+		}
+	}
+	return NULL;
+}
+
+static struct nfs4_stateowner *
+find_lockstateowner_str(struct inode *inode, clientid_t *clid,
+		struct xdr_netobj *owner)
+{
+	unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
+	struct nfs4_stateowner *op;
+
+	list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
+		if (cmp_owner_str(op, owner, clid))
+			return op;
+	}
+	return NULL;
+}
+
+/*
+ * Alloc a lock owner structure.
+ * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
+ * occured. 
+ *
+ * strhashval = lock_ownerstr_hashval 
+ * so_seqid = lock->lk_new_lock_seqid - 1: it gets bumped in encode 
+ */
+
+static struct nfs4_stateowner *
+alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) {
+	struct nfs4_stateowner *sop;
+	struct nfs4_replay *rp;
+	unsigned int idhashval;
+
+	if (!(sop = alloc_stateowner(&lock->lk_new_owner)))
+		return NULL;
+	idhashval = lockownerid_hashval(current_ownerid);
+	INIT_LIST_HEAD(&sop->so_idhash);
+	INIT_LIST_HEAD(&sop->so_strhash);
+	INIT_LIST_HEAD(&sop->so_perclient);
+	INIT_LIST_HEAD(&sop->so_perfilestate);
+	INIT_LIST_HEAD(&sop->so_perlockowner);
+	INIT_LIST_HEAD(&sop->so_close_lru); /* not used */
+	sop->so_time = 0;
+	list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]);
+	list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]);
+	list_add(&sop->so_perlockowner, &open_stp->st_perlockowner);
+	sop->so_is_open_owner = 0;
+	sop->so_id = current_ownerid++;
+	sop->so_client = clp;
+	sop->so_seqid = lock->lk_new_lock_seqid - 1;
+	sop->so_confirmed = 1;
+	rp = &sop->so_replay;
+	rp->rp_status = NFSERR_SERVERFAULT;
+	rp->rp_buflen = 0;
+	rp->rp_buf = rp->rp_ibuf;
+	return sop;
+}
+
+struct nfs4_stateid *
+alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp)
+{
+	struct nfs4_stateid *stp;
+	unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
+
+	if ((stp = kmalloc(sizeof(struct nfs4_stateid), 
+					GFP_KERNEL)) == NULL)
+		goto out;
+	INIT_LIST_HEAD(&stp->st_hash);
+	INIT_LIST_HEAD(&stp->st_perfile);
+	INIT_LIST_HEAD(&stp->st_perfilestate);
+	INIT_LIST_HEAD(&stp->st_perlockowner); /* not used */
+	list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]);
+	list_add(&stp->st_perfile, &fp->fi_perfile);
+	list_add_perfile++;
+	list_add(&stp->st_perfilestate, &sop->so_perfilestate);
+	stp->st_stateowner = sop;
+	stp->st_file = fp;
+	stp->st_stateid.si_boot = boot_time;
+	stp->st_stateid.si_stateownerid = sop->so_id;
+	stp->st_stateid.si_fileid = fp->fi_id;
+	stp->st_stateid.si_generation = 0;
+	stp->st_vfs_file = open_stp->st_vfs_file; /* FIXME refcount?? */
+	stp->st_access_bmap = open_stp->st_access_bmap;
+	stp->st_deny_bmap = open_stp->st_deny_bmap;
+
+out:
+	return stp;
+}
+
+int
+check_lock_length(u64 offset, u64 length)
+{
+	return ((length == 0)  || ((length != ~(u64)0) &&
+	     LOFF_OVERFLOW(offset, length)));
+}
+
+/*
+ *  LOCK operation 
+ */
+int
+nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock *lock)
+{
+	struct nfs4_stateowner *lock_sop = NULL, *open_sop = NULL;
+	struct nfs4_stateid *lock_stp;
+	struct file *filp;
+	struct file_lock file_lock;
+	struct file_lock *conflock;
+	int status = 0;
+	unsigned int strhashval;
+
+	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
+		(long long) lock->lk_offset,
+		(long long) lock->lk_length);
+
+	if (nfs4_in_grace() && !lock->lk_reclaim)
+		return nfserr_grace;
+	if (!nfs4_in_grace() && lock->lk_reclaim)
+		return nfserr_no_grace;
+
+	if (check_lock_length(lock->lk_offset, lock->lk_length))
+		 return nfserr_inval;
+
+	nfs4_lock_state();
+
+	if (lock->lk_is_new) {
+	/*
+	 * Client indicates that this is a new lockowner.
+	 * Use open owner and open stateid to create lock owner and lock 
+	 * stateid.
+	 */
+		struct nfs4_stateid *open_stp = NULL;
+		struct nfs4_file *fp;
+		
+		status = nfserr_stale_clientid;
+		if (STALE_CLIENTID(&lock->lk_new_clientid)) {
+			printk("NFSD: nfsd4_lock: clientid is stale!\n");
+			goto out;
+		}
+
+		/* is the new lock seqid presented by the client zero? */
+		status = nfserr_bad_seqid;
+		if (lock->v.new.lock_seqid != 0)
+			goto out;
+
+		/* validate and update open stateid and open seqid */
+		status = nfs4_preprocess_seqid_op(current_fh, 
+				        lock->lk_new_open_seqid,
+		                        &lock->lk_new_open_stateid,
+		                        CHECK_FH | OPEN_STATE,
+		                        &open_sop, &open_stp,
+					&lock->v.new.clientid);
+		if (status) {
+			if (lock->lk_reclaim)
+				status = nfserr_reclaim_bad;
+			goto out;
+		}
+		/* create lockowner and lock stateid */
+		fp = open_stp->st_file;
+		strhashval = lock_ownerstr_hashval(fp->fi_inode, 
+				open_sop->so_client->cl_clientid.cl_id, 
+				&lock->v.new.owner);
+		/* 
+		 * If we already have this lock owner, the client is in 
+		 * error (or our bookeeping is wrong!) 
+		 * for asking for a 'new lock'.
+		 */
+		status = nfserr_bad_stateid;
+		lock_sop = find_lockstateowner(&lock->v.new.owner,
+						&lock->v.new.clientid);
+		if (lock_sop)
+			goto out;
+		status = nfserr_resource;
+		if (!(lock->lk_stateowner = alloc_init_lock_stateowner(strhashval, open_sop->so_client, open_stp, lock)))
+			goto out;
+		if ((lock_stp = alloc_init_lock_stateid(lock->lk_stateowner, 
+						fp, open_stp)) == NULL) {
+			release_stateowner(lock->lk_stateowner);
+			lock->lk_stateowner = NULL;
+			goto out;
+		}
+		/* bump the open seqid used to create the lock */
+		open_sop->so_seqid++;
+	} else {
+		/* lock (lock owner + lock stateid) already exists */
+		status = nfs4_preprocess_seqid_op(current_fh,
+				       lock->lk_old_lock_seqid, 
+				       &lock->lk_old_lock_stateid, 
+				       CHECK_FH | LOCK_STATE, 
+				       &lock->lk_stateowner, &lock_stp, NULL);
+		if (status)
+			goto out;
+	}
+	/* lock->lk_stateowner and lock_stp have been created or found */
+	filp = lock_stp->st_vfs_file;
+
+	if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) {
+		printk("NFSD: nfsd4_lock: permission denied!\n");
+		goto out;
+	}
+
+	locks_init_lock(&file_lock);
+	switch (lock->lk_type) {
+		case NFS4_READ_LT:
+		case NFS4_READW_LT:
+			file_lock.fl_type = F_RDLCK;
+		break;
+		case NFS4_WRITE_LT:
+		case NFS4_WRITEW_LT:
+			file_lock.fl_type = F_WRLCK;
+		break;
+		default:
+			status = nfserr_inval;
+		goto out;
+	}
+	file_lock.fl_owner = (fl_owner_t) lock->lk_stateowner;
+	file_lock.fl_pid = current->tgid;
+	file_lock.fl_file = filp;
+	file_lock.fl_flags = FL_POSIX;
+
+	file_lock.fl_start = lock->lk_offset;
+	if ((lock->lk_length == ~(u64)0) || 
+			LOFF_OVERFLOW(lock->lk_offset, lock->lk_length))
+		file_lock.fl_end = ~(u64)0;
+	else
+		file_lock.fl_end = lock->lk_offset + lock->lk_length - 1;
+	nfs4_transform_lock_offset(&file_lock);
+
+	/*
+	* Try to lock the file in the VFS.
+	* Note: locks.c uses the BKL to protect the inode's lock list.
+	*/
+
+	status = posix_lock_file(filp, &file_lock);
+	if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
+		file_lock.fl_ops->fl_release_private(&file_lock);
+	dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status);
+	switch (-status) {
+	case 0: /* success! */
+		update_stateid(&lock_stp->st_stateid);
+		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, 
+				sizeof(stateid_t));
+		goto out;
+	case (EAGAIN):
+		goto conflicting_lock;
+	case (EDEADLK):
+		status = nfserr_deadlock;
+	default:        
+		dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
+		goto out_destroy_new_stateid;
+	}
+
+conflicting_lock:
+	dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
+	status = nfserr_denied;
+	/* XXX There is a race here. Future patch needed to provide 
+	 * an atomic posix_lock_and_test_file
+	 */
+	if (!(conflock = posix_test_lock(filp, &file_lock))) {
+		status = nfserr_serverfault;
+		goto out;
+	}
+	nfs4_set_lock_denied(conflock, &lock->lk_denied);
+
+out_destroy_new_stateid:
+	if (lock->lk_is_new) {
+		dprintk("NFSD: nfsd4_lock: destroy new stateid!\n");
+	/*
+	* An error encountered after instantiation of the new
+	* stateid has forced us to destroy it.
+	*/
+		if (!seqid_mutating_err(status))
+			open_sop->so_seqid--;
+
+		release_state_owner(lock_stp, LOCK_STATE);
+	}
+out:
+	if (lock->lk_stateowner)
+		nfs4_get_stateowner(lock->lk_stateowner);
+	nfs4_unlock_state();
+	return status;
+}
+
+/*
+ * LOCKT operation
+ */
+int
+nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lockt *lockt)
+{
+	struct inode *inode;
+	struct file file;
+	struct file_lock file_lock;
+	struct file_lock *conflicting_lock;
+	int status;
+
+	if (nfs4_in_grace())
+		return nfserr_grace;
+
+	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
+		 return nfserr_inval;
+
+	lockt->lt_stateowner = NULL;
+	nfs4_lock_state();
+
+	status = nfserr_stale_clientid;
+	if (STALE_CLIENTID(&lockt->lt_clientid)) {
+		printk("NFSD: nfsd4_lockt: clientid is stale!\n");
+		goto out;
+	}
+
+	if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) {
+		printk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
+		if (status == nfserr_symlink)
+			status = nfserr_inval;
+		goto out;
+	}
+
+	inode = current_fh->fh_dentry->d_inode;
+	locks_init_lock(&file_lock);
+	switch (lockt->lt_type) {
+		case NFS4_READ_LT:
+		case NFS4_READW_LT:
+			file_lock.fl_type = F_RDLCK;
+		break;
+		case NFS4_WRITE_LT:
+		case NFS4_WRITEW_LT:
+			file_lock.fl_type = F_WRLCK;
+		break;
+		default:
+			printk("NFSD: nfs4_lockt: bad lock type!\n");
+			status = nfserr_inval;
+		goto out;
+	}
+
+	lockt->lt_stateowner = find_lockstateowner_str(inode,
+			&lockt->lt_clientid, &lockt->lt_owner);
+	if (lockt->lt_stateowner)
+		file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
+	file_lock.fl_pid = current->tgid;
+	file_lock.fl_flags = FL_POSIX;
+
+	file_lock.fl_start = lockt->lt_offset;
+	if ((lockt->lt_length == ~(u64)0) || LOFF_OVERFLOW(lockt->lt_offset, lockt->lt_length))
+		file_lock.fl_end = ~(u64)0;
+	else
+		file_lock.fl_end = lockt->lt_offset + lockt->lt_length - 1;
+
+	nfs4_transform_lock_offset(&file_lock);
+
+	/* posix_test_lock uses the struct file _only_ to resolve the inode.
+	 * since LOCKT doesn't require an OPEN, and therefore a struct
+	 * file may not exist, pass posix_test_lock a struct file with
+	 * only the dentry:inode set.
+	 */
+	memset(&file, 0, sizeof (struct file));
+	file.f_dentry = current_fh->fh_dentry;
+
+	status = nfs_ok;
+	conflicting_lock = posix_test_lock(&file, &file_lock);
+	if (conflicting_lock) {
+		status = nfserr_denied;
+		nfs4_set_lock_denied(conflicting_lock, &lockt->lt_denied);
+	}
+out:
+	nfs4_unlock_state();
+	return status;
+}
+
+int
+nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_locku *locku)
+{
+	struct nfs4_stateid *stp;
+	struct file *filp = NULL;
+	struct file_lock file_lock;
+	int status;
+						        
+	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
+		(long long) locku->lu_offset,
+		(long long) locku->lu_length);
+
+	if (check_lock_length(locku->lu_offset, locku->lu_length))
+		 return nfserr_inval;
+
+	nfs4_lock_state();
+									        
+	if ((status = nfs4_preprocess_seqid_op(current_fh, 
+					locku->lu_seqid, 
+					&locku->lu_stateid, 
+					CHECK_FH | LOCK_STATE, 
+					&locku->lu_stateowner, &stp, NULL)))
+		goto out;
+
+	filp = stp->st_vfs_file;
+	BUG_ON(!filp);
+	locks_init_lock(&file_lock);
+	file_lock.fl_type = F_UNLCK;
+	file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner;
+	file_lock.fl_pid = current->tgid;
+	file_lock.fl_file = filp;
+	file_lock.fl_flags = FL_POSIX; 
+	file_lock.fl_start = locku->lu_offset;
+
+	if ((locku->lu_length == ~(u64)0) || LOFF_OVERFLOW(locku->lu_offset, locku->lu_length))
+		file_lock.fl_end = ~(u64)0;
+	else
+		file_lock.fl_end = locku->lu_offset + locku->lu_length - 1;
+	nfs4_transform_lock_offset(&file_lock);
+
+	/*
+	*  Try to unlock the file in the VFS.
+	*/
+	status = posix_lock_file(filp, &file_lock); 
+	if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
+		file_lock.fl_ops->fl_release_private(&file_lock);
+	if (status) {
+		printk("NFSD: nfs4_locku: posix_lock_file failed!\n");
+		goto out_nfserr;
+	}
+	/*
+	* OK, unlock succeeded; the only thing left to do is update the stateid.
+	*/
+	update_stateid(&stp->st_stateid);
+	memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t));
+
+out:
+	if (locku->lu_stateowner)
+		nfs4_get_stateowner(locku->lu_stateowner);
+	nfs4_unlock_state();
+	return status;
+
+out_nfserr:
+	status = nfserrno(status);
+	goto out;
+}
+
+/*
+ * returns
+ * 	1: locks held by lockowner
+ * 	0: no locks held by lockowner
+ */
+static int
+check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
+{
+	struct file_lock **flpp;
+	struct inode *inode = filp->f_dentry->d_inode;
+	int status = 0;
+
+	lock_kernel();
+	for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
+		if ((*flpp)->fl_owner == (fl_owner_t)lowner)
+			status = 1;
+			goto out;
+	}
+out:
+	unlock_kernel();
+	return status;
+}
+
+int
+nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_release_lockowner *rlockowner)
+{
+	clientid_t *clid = &rlockowner->rl_clientid;
+	struct nfs4_stateowner *local = NULL;
+	struct xdr_netobj *owner = &rlockowner->rl_owner;
+	int status;
+
+	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
+		clid->cl_boot, clid->cl_id);
+
+	/* XXX check for lease expiration */
+
+	status = nfserr_stale_clientid;
+	if (STALE_CLIENTID(clid)) {
+		printk("NFSD: nfsd4_release_lockowner: clientid is stale!\n");
+		return status;
+	}
+
+	nfs4_lock_state();
+
+	status = nfs_ok;
+	local = find_lockstateowner(owner, clid);
+	if (local) {
+		struct nfs4_stateid *stp;
+
+		/* check for any locks held by any stateid
+		 * associated with the (lock) stateowner */
+		status = nfserr_locks_held;
+		list_for_each_entry(stp, &local->so_perfilestate,
+				st_perfilestate) {
+			if (check_for_locks(stp->st_vfs_file, local))
+				goto out;
+		}
+		/* no locks held by (lock) stateowner */
+		status = nfs_ok;
+		release_stateowner(local);
+	}
+out:
+	nfs4_unlock_state();
+	return status;
+}
+
+static inline struct nfs4_client_reclaim *
+alloc_reclaim(int namelen)
+{
+	struct nfs4_client_reclaim *crp = NULL;
+
+	crp = kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
+	if (!crp)
+		return NULL;
+	crp->cr_name.data = kmalloc(namelen, GFP_KERNEL);
+	if (!crp->cr_name.data) {
+		kfree(crp);
+		return NULL;
+	}
+	return crp;
+}
+
+/*
+ * failure => all reset bets are off, nfserr_no_grace...
+ */
+static int
+nfs4_client_to_reclaim(char *name, int namlen)
+{
+	unsigned int strhashval;
+	struct nfs4_client_reclaim *crp = NULL;
+
+	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", namlen, name);
+	crp = alloc_reclaim(namlen);
+	if (!crp)
+		return 0;
+	strhashval = clientstr_hashval(name, namlen);
+	INIT_LIST_HEAD(&crp->cr_strhash);
+	list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
+	memcpy(crp->cr_name.data, name, namlen);
+	crp->cr_name.len = namlen;
+	reclaim_str_hashtbl_size++;
+	return 1;
+}
+
+static void
+nfs4_release_reclaim(void)
+{
+	struct nfs4_client_reclaim *crp = NULL;
+	int i;
+
+	BUG_ON(!nfs4_reclaim_init);
+	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+		while (!list_empty(&reclaim_str_hashtbl[i])) {
+			crp = list_entry(reclaim_str_hashtbl[i].next,
+			                struct nfs4_client_reclaim, cr_strhash);
+			list_del(&crp->cr_strhash);
+			kfree(crp->cr_name.data);
+			kfree(crp);
+			reclaim_str_hashtbl_size--;
+		}
+	}
+	BUG_ON(reclaim_str_hashtbl_size);
+}
+
+/*
+ * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
+struct nfs4_client_reclaim *
+nfs4_find_reclaim_client(clientid_t *clid)
+{
+	unsigned int strhashval;
+	struct nfs4_client *clp;
+	struct nfs4_client_reclaim *crp = NULL;
+
+
+	/* find clientid in conf_id_hashtbl */
+	clp = find_confirmed_client(clid);
+	if (clp == NULL)
+		return NULL;
+
+	dprintk("NFSD: nfs4_find_reclaim_client for %.*s\n",
+		            clp->cl_name.len, clp->cl_name.data);
+
+	/* find clp->cl_name in reclaim_str_hashtbl */
+	strhashval = clientstr_hashval(clp->cl_name.data, clp->cl_name.len);
+	list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
+		if (cmp_name(&crp->cr_name, &clp->cl_name)) {
+			return crp;
+		}
+	}
+	return NULL;
+}
+
+/*
+* Called from OPEN. Look for clientid in reclaim list.
+*/
+int
+nfs4_check_open_reclaim(clientid_t *clid)
+{
+	struct nfs4_client_reclaim *crp;
+
+	if ((crp = nfs4_find_reclaim_client(clid)) == NULL)
+		return nfserr_reclaim_bad;
+	return nfs_ok;
+}
+
+
+/* 
+ * Start and stop routines
+ */
+
+static void
+__nfs4_state_init(void)
+{
+	int i;
+	time_t grace_time;
+
+	if (!nfs4_reclaim_init) {
+		for (i = 0; i < CLIENT_HASH_SIZE; i++)
+			INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
+		reclaim_str_hashtbl_size = 0;
+		nfs4_reclaim_init = 1;
+	}
+	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&conf_id_hashtbl[i]);
+		INIT_LIST_HEAD(&conf_str_hashtbl[i]);
+		INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
+		INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
+	}
+	for (i = 0; i < FILE_HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&file_hashtbl[i]);
+	}
+	for (i = 0; i < OWNER_HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
+		INIT_LIST_HEAD(&ownerid_hashtbl[i]);
+	}
+	for (i = 0; i < STATEID_HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&stateid_hashtbl[i]);
+		INIT_LIST_HEAD(&lockstateid_hashtbl[i]);
+	}
+	for (i = 0; i < LOCK_HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
+		INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
+	}
+	memset(&zerostateid, 0, sizeof(stateid_t));
+	memset(&onestateid, ~0, sizeof(stateid_t));
+
+	INIT_LIST_HEAD(&close_lru);
+	INIT_LIST_HEAD(&client_lru);
+	INIT_LIST_HEAD(&del_recall_lru);
+	spin_lock_init(&recall_lock);
+	boot_time = get_seconds();
+	grace_time = max(old_lease_time, lease_time);
+	if (reclaim_str_hashtbl_size == 0)
+		grace_time = 0;
+	if (grace_time)
+		printk("NFSD: starting %ld-second grace period\n", grace_time);
+	grace_end = boot_time + grace_time;
+	INIT_WORK(&laundromat_work,laundromat_main, NULL);
+	schedule_delayed_work(&laundromat_work, NFSD_LEASE_TIME*HZ);
+}
+
+int
+nfs4_state_init(void)
+{
+	int status;
+
+	if (nfs4_init)
+		return 0;
+	status = nfsd4_init_slabs();
+	if (status)
+		return status;
+	__nfs4_state_init();
+	nfs4_init = 1;
+	return 0;
+}
+
+int
+nfs4_in_grace(void)
+{
+	return get_seconds() < grace_end;
+}
+
+void
+set_no_grace(void)
+{
+	printk("NFSD: ERROR in reboot recovery.  State reclaims will fail.\n");
+	grace_end = get_seconds();
+}
+
+time_t
+nfs4_lease_time(void)
+{
+	return lease_time;
+}
+
+static void
+__nfs4_state_shutdown(void)
+{
+	int i;
+	struct nfs4_client *clp = NULL;
+	struct nfs4_delegation *dp = NULL;
+	struct nfs4_stateowner *sop = NULL;
+	struct list_head *pos, *next, reaplist;
+
+	list_for_each_safe(pos, next, &close_lru) {
+		sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
+		list_del(&sop->so_close_lru);
+		nfs4_put_stateowner(sop);
+	}
+
+	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+		while (!list_empty(&conf_id_hashtbl[i])) {
+			clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
+			expire_client(clp);
+		}
+		while (!list_empty(&unconf_str_hashtbl[i])) {
+			clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
+			expire_client(clp);
+		}
+	}
+	INIT_LIST_HEAD(&reaplist);
+	spin_lock(&recall_lock);
+	list_for_each_safe(pos, next, &del_recall_lru) {
+		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+		list_move(&dp->dl_recall_lru, &reaplist);
+	}
+	spin_unlock(&recall_lock);
+	list_for_each_safe(pos, next, &reaplist) {
+		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+		list_del_init(&dp->dl_recall_lru);
+		unhash_delegation(dp);
+	}
+
+	release_all_files();
+	cancel_delayed_work(&laundromat_work);
+	flush_scheduled_work();
+	nfs4_init = 0;
+	dprintk("NFSD: list_add_perfile %d list_del_perfile %d\n",
+			list_add_perfile, list_del_perfile);
+	dprintk("NFSD: add_perclient %d del_perclient %d\n",
+			add_perclient, del_perclient);
+	dprintk("NFSD: alloc_file %d free_file %d\n",
+			alloc_file, free_file);
+	dprintk("NFSD: vfsopen %d vfsclose %d\n",
+			vfsopen, vfsclose);
+	dprintk("NFSD: alloc_delegation %d free_delegation %d\n",
+			alloc_delegation, free_delegation);
+
+}
+
+void
+nfs4_state_shutdown(void)
+{
+	nfs4_lock_state();
+	nfs4_release_reclaim();
+	__nfs4_state_shutdown();
+	nfsd4_free_slabs();
+	nfs4_unlock_state();
+}
+
+/*
+ * Called when leasetime is changed.
+ *
+ * if nfsd is not started, simply set the global lease.
+ *
+ * if nfsd(s) are running, lease change requires nfsv4 state to be reset.
+ * e.g: boot_time is reset, existing nfs4_client structs are
+ * used to fill reclaim_str_hashtbl, then all state (except for the
+ * reclaim_str_hashtbl) is re-initialized.
+ *
+ * if the old lease time is greater than the new lease time, the grace
+ * period needs to be set to the old lease time to allow clients to reclaim
+ * their state. XXX - we may want to set the grace period == lease time
+ * after an initial grace period == old lease time
+ *
+ * if an error occurs in this process, the new lease is set, but the server
+ * will not honor OPEN or LOCK reclaims, and will return nfserr_no_grace
+ * which means OPEN/LOCK/READ/WRITE will fail during grace period.
+ *
+ * clients will attempt to reset all state with SETCLIENTID/CONFIRM, and
+ * OPEN and LOCK reclaims.
+ */
+void
+nfs4_reset_lease(time_t leasetime)
+{
+	struct nfs4_client *clp;
+	int i;
+
+	printk("NFSD: New leasetime %ld\n",leasetime);
+	if (!nfs4_init)
+		return;
+	nfs4_lock_state();
+	old_lease_time = lease_time;
+	lease_time = leasetime;
+
+	nfs4_release_reclaim();
+
+	/* populate reclaim_str_hashtbl with current confirmed nfs4_clientid */
+	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+		list_for_each_entry(clp, &conf_id_hashtbl[i], cl_idhash) {
+			if (!nfs4_client_to_reclaim(clp->cl_name.data,
+						clp->cl_name.len)) {
+				nfs4_release_reclaim();
+				goto init_state;
+			}
+		}
+	}
+init_state:
+	__nfs4_state_shutdown();
+	__nfs4_state_init();
+	nfs4_unlock_state();
+}
+
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
new file mode 100644
index 0000000..36a058a
--- /dev/null
+++ b/fs/nfsd/nfs4xdr.c
@@ -0,0 +1,2536 @@
+/*
+ *  fs/nfs/nfs4xdr.c
+ *
+ *  Server-side XDR for NFSv4
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Kendrick Smith <kmsmith@umich.edu>
+ *  Andy Adamson   <andros@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * TODO: Neil Brown made the following observation:  We currently
+ * initially reserve NFSD_BUFSIZE space on the transmit queue and
+ * never release any of that until the request is complete.
+ * It would be good to calculate a new maximum response size while
+ * decoding the COMPOUND, and call svc_reserve with this number
+ * at the end of nfs4svc_decode_compoundargs.
+ */
+
+#include <linux/param.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/state.h>
+#include <linux/nfsd/xdr4.h>
+#include <linux/nfsd_idmap.h>
+#include <linux/nfs4.h>
+#include <linux/nfs4_acl.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_XDR
+
+static int
+check_filename(char *str, int len, int err)
+{
+	int i;
+
+	if (len == 0)
+		return nfserr_inval;
+	if (isdotent(str, len))
+		return err;
+	for (i = 0; i < len; i++)
+		if (str[i] == '/')
+			return err;
+	return 0;
+}
+
+/*
+ * START OF "GENERIC" DECODE ROUTINES.
+ *   These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define DECODE_HEAD				\
+	u32 *p;					\
+	int status
+#define DECODE_TAIL				\
+	status = 0;				\
+out:						\
+	return status;				\
+xdr_error:					\
+	printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__);	\
+	status = nfserr_bad_xdr;		\
+	goto out
+
+#define READ32(x)         (x) = ntohl(*p++)
+#define READ64(x)         do {			\
+	(x) = (u64)ntohl(*p++) << 32;		\
+	(x) |= ntohl(*p++);			\
+} while (0)
+#define READTIME(x)       do {			\
+	p++;					\
+	(x) = ntohl(*p++);			\
+	p++;					\
+} while (0)
+#define READMEM(x,nbytes) do {			\
+	x = (char *)p;				\
+	p += XDR_QUADLEN(nbytes);		\
+} while (0)
+#define SAVEMEM(x,nbytes) do {			\
+	if (!(x = (p==argp->tmp || p == argp->tmpp) ? \
+ 		savemem(argp, p, nbytes) :	\
+ 		(char *)p)) {			\
+		printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__); \
+		goto xdr_error;			\
+		}				\
+	p += XDR_QUADLEN(nbytes);		\
+} while (0)
+#define COPYMEM(x,nbytes) do {			\
+	memcpy((x), p, nbytes);			\
+	p += XDR_QUADLEN(nbytes);		\
+} while (0)
+
+/* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */
+#define READ_BUF(nbytes)  do {			\
+	if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) {	\
+		p = argp->p;			\
+		argp->p += XDR_QUADLEN(nbytes);	\
+	} else if (!(p = read_buf(argp, nbytes))) { \
+		printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__); \
+		goto xdr_error;			\
+	}					\
+} while (0)
+
+u32 *read_buf(struct nfsd4_compoundargs *argp, int nbytes)
+{
+	/* We want more bytes than seem to be available.
+	 * Maybe we need a new page, maybe we have just run out
+	 */
+	int avail = (char*)argp->end - (char*)argp->p;
+	u32 *p;
+	if (avail + argp->pagelen < nbytes)
+		return NULL;
+	if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
+		return NULL;
+	/* ok, we can do it with the current plus the next page */
+	if (nbytes <= sizeof(argp->tmp))
+		p = argp->tmp;
+	else {
+		if (argp->tmpp)
+			kfree(argp->tmpp);
+		p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
+		if (!p)
+			return NULL;
+		
+	}
+	memcpy(p, argp->p, avail);
+	/* step to next page */
+	argp->p = page_address(argp->pagelist[0]);
+	argp->pagelist++;
+	if (argp->pagelen < PAGE_SIZE) {
+		argp->end = p + (argp->pagelen>>2);
+		argp->pagelen = 0;
+	} else {
+		argp->end = p + (PAGE_SIZE>>2);
+		argp->pagelen -= PAGE_SIZE;
+	}
+	memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
+	argp->p += XDR_QUADLEN(nbytes - avail);
+	return p;
+}
+
+static int
+defer_free(struct nfsd4_compoundargs *argp,
+		void (*release)(const void *), void *p)
+{
+	struct tmpbuf *tb;
+
+	tb = kmalloc(sizeof(*tb), GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+	tb->buf = p;
+	tb->release = release;
+	tb->next = argp->to_free;
+	argp->to_free = tb;
+	return 0;
+}
+
+char *savemem(struct nfsd4_compoundargs *argp, u32 *p, int nbytes)
+{
+	void *new = NULL;
+	if (p == argp->tmp) {
+		new = kmalloc(nbytes, GFP_KERNEL);
+		if (!new) return NULL;
+		p = new;
+		memcpy(p, argp->tmp, nbytes);
+	} else {
+		if (p != argp->tmpp)
+			BUG();
+		argp->tmpp = NULL;
+	}
+	if (defer_free(argp, kfree, p)) {
+		kfree(new);
+		return NULL;
+	} else
+		return (char *)p;
+}
+
+
+static int
+nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
+{
+	u32 bmlen;
+	DECODE_HEAD;
+
+	bmval[0] = 0;
+	bmval[1] = 0;
+
+	READ_BUF(4);
+	READ32(bmlen);
+	if (bmlen > 1000)
+		goto xdr_error;
+
+	READ_BUF(bmlen << 2);
+	if (bmlen > 0)
+		READ32(bmval[0]);
+	if (bmlen > 1)
+		READ32(bmval[1]);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *iattr,
+    struct nfs4_acl **acl)
+{
+	int expected_len, len = 0;
+	u32 dummy32;
+	char *buf;
+
+	DECODE_HEAD;
+	iattr->ia_valid = 0;
+	if ((status = nfsd4_decode_bitmap(argp, bmval)))
+		return status;
+
+	/*
+	 * According to spec, unsupported attributes return ERR_NOTSUPP;
+	 * read-only attributes return ERR_INVAL.
+	 */
+	if ((bmval[0] & ~NFSD_SUPPORTED_ATTRS_WORD0) || (bmval[1] & ~NFSD_SUPPORTED_ATTRS_WORD1))
+		return nfserr_attrnotsupp;
+	if ((bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0) || (bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1))
+		return nfserr_inval;
+
+	READ_BUF(4);
+	READ32(expected_len);
+
+	if (bmval[0] & FATTR4_WORD0_SIZE) {
+		READ_BUF(8);
+		len += 8;
+		READ64(iattr->ia_size);
+		iattr->ia_valid |= ATTR_SIZE;
+	}
+	if (bmval[0] & FATTR4_WORD0_ACL) {
+		int nace, i;
+		struct nfs4_ace ace;
+
+		READ_BUF(4); len += 4;
+		READ32(nace);
+
+		*acl = nfs4_acl_new();
+		if (*acl == NULL) {
+			status = -ENOMEM;
+			goto out_nfserr;
+		}
+		defer_free(argp, (void (*)(const void *))nfs4_acl_free, *acl);
+
+		for (i = 0; i < nace; i++) {
+			READ_BUF(16); len += 16;
+			READ32(ace.type);
+			READ32(ace.flag);
+			READ32(ace.access_mask);
+			READ32(dummy32);
+			READ_BUF(dummy32);
+			len += XDR_QUADLEN(dummy32) << 2;
+			READMEM(buf, dummy32);
+			ace.whotype = nfs4_acl_get_whotype(buf, dummy32);
+			status = 0;
+			if (ace.whotype != NFS4_ACL_WHO_NAMED)
+				ace.who = 0;
+			else if (ace.flag & NFS4_ACE_IDENTIFIER_GROUP)
+				status = nfsd_map_name_to_gid(argp->rqstp,
+						buf, dummy32, &ace.who);
+			else
+				status = nfsd_map_name_to_uid(argp->rqstp,
+						buf, dummy32, &ace.who);
+			if (status)
+				goto out_nfserr;
+			if (nfs4_acl_add_ace(*acl, ace.type, ace.flag,
+				 ace.access_mask, ace.whotype, ace.who) != 0) {
+				status = -ENOMEM;
+				goto out_nfserr;
+			}
+		}
+	} else
+		*acl = NULL;
+	if (bmval[1] & FATTR4_WORD1_MODE) {
+		READ_BUF(4);
+		len += 4;
+		READ32(iattr->ia_mode);
+		iattr->ia_mode &= (S_IFMT | S_IALLUGO);
+		iattr->ia_valid |= ATTR_MODE;
+	}
+	if (bmval[1] & FATTR4_WORD1_OWNER) {
+		READ_BUF(4);
+		len += 4;
+		READ32(dummy32);
+		READ_BUF(dummy32);
+		len += (XDR_QUADLEN(dummy32) << 2);
+		READMEM(buf, dummy32);
+		if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+			goto out_nfserr;
+		iattr->ia_valid |= ATTR_UID;
+	}
+	if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
+		READ_BUF(4);
+		len += 4;
+		READ32(dummy32);
+		READ_BUF(dummy32);
+		len += (XDR_QUADLEN(dummy32) << 2);
+		READMEM(buf, dummy32);
+		if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+			goto out_nfserr;
+		iattr->ia_valid |= ATTR_GID;
+	}
+	if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
+		READ_BUF(4);
+		len += 4;
+		READ32(dummy32);
+		switch (dummy32) {
+		case NFS4_SET_TO_CLIENT_TIME:
+			/* We require the high 32 bits of 'seconds' to be 0, and we ignore
+			   all 32 bits of 'nseconds'. */
+			READ_BUF(12);
+			len += 12;
+			READ32(dummy32);
+			if (dummy32)
+				return nfserr_inval;
+			READ32(iattr->ia_atime.tv_sec);
+			READ32(iattr->ia_atime.tv_nsec);
+			if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
+				return nfserr_inval;
+			iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+			break;
+		case NFS4_SET_TO_SERVER_TIME:
+			iattr->ia_valid |= ATTR_ATIME;
+			break;
+		default:
+			goto xdr_error;
+		}
+	}
+	if (bmval[1] & FATTR4_WORD1_TIME_METADATA) {
+		/* We require the high 32 bits of 'seconds' to be 0, and we ignore
+		   all 32 bits of 'nseconds'. */
+		READ_BUF(12);
+		len += 12;
+		READ32(dummy32);
+		if (dummy32)
+			return nfserr_inval;
+		READ32(iattr->ia_ctime.tv_sec);
+		READ32(iattr->ia_ctime.tv_nsec);
+		if (iattr->ia_ctime.tv_nsec >= (u32)1000000000)
+			return nfserr_inval;
+		iattr->ia_valid |= ATTR_CTIME;
+	}
+	if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
+		READ_BUF(4);
+		len += 4;
+		READ32(dummy32);
+		switch (dummy32) {
+		case NFS4_SET_TO_CLIENT_TIME:
+			/* We require the high 32 bits of 'seconds' to be 0, and we ignore
+			   all 32 bits of 'nseconds'. */
+			READ_BUF(12);
+			len += 12;
+			READ32(dummy32);
+			if (dummy32)
+				return nfserr_inval;
+			READ32(iattr->ia_mtime.tv_sec);
+			READ32(iattr->ia_mtime.tv_nsec);
+			if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
+				return nfserr_inval;
+			iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+			break;
+		case NFS4_SET_TO_SERVER_TIME:
+			iattr->ia_valid |= ATTR_MTIME;
+			break;
+		default:
+			goto xdr_error;
+		}
+	}
+	if (len != expected_len)
+		goto xdr_error;
+
+	DECODE_TAIL;
+
+out_nfserr:
+	status = nfserrno(status);
+	goto out;
+}
+
+static int
+nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(access->ac_req_access);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
+{
+	DECODE_HEAD;
+
+	close->cl_stateowner = NULL;
+	READ_BUF(4 + sizeof(stateid_t));
+	READ32(close->cl_seqid);
+	READ32(close->cl_stateid.si_generation);
+	COPYMEM(&close->cl_stateid.si_opaque, sizeof(stateid_opaque_t));
+
+	DECODE_TAIL;
+}
+
+
+static int
+nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
+{
+	DECODE_HEAD;
+
+	READ_BUF(12);
+	READ64(commit->co_offset);
+	READ32(commit->co_count);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(create->cr_type);
+	switch (create->cr_type) {
+	case NF4LNK:
+		READ_BUF(4);
+		READ32(create->cr_linklen);
+		READ_BUF(create->cr_linklen);
+		SAVEMEM(create->cr_linkname, create->cr_linklen);
+		break;
+	case NF4BLK:
+	case NF4CHR:
+		READ_BUF(8);
+		READ32(create->cr_specdata1);
+		READ32(create->cr_specdata2);
+		break;
+	case NF4SOCK:
+	case NF4FIFO:
+	case NF4DIR:
+	default:
+		break;
+	}
+
+	READ_BUF(4);
+	READ32(create->cr_namelen);
+	READ_BUF(create->cr_namelen);
+	SAVEMEM(create->cr_name, create->cr_namelen);
+	if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
+		return status;
+
+	if ((status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr, &create->cr_acl)))
+		goto out;
+
+	DECODE_TAIL;
+}
+
+static inline int
+nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
+{
+	DECODE_HEAD;
+
+	READ_BUF(sizeof(stateid_t));
+	READ32(dr->dr_stateid.si_generation);
+	COPYMEM(&dr->dr_stateid.si_opaque, sizeof(stateid_opaque_t));
+
+	DECODE_TAIL;
+}
+
+static inline int
+nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
+{
+	return nfsd4_decode_bitmap(argp, getattr->ga_bmval);
+}
+
+static int
+nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(link->li_namelen);
+	READ_BUF(link->li_namelen);
+	SAVEMEM(link->li_name, link->li_namelen);
+	if ((status = check_filename(link->li_name, link->li_namelen, nfserr_inval)))
+		return status;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
+{
+	DECODE_HEAD;
+
+	lock->lk_stateowner = NULL;
+	/*
+	* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
+	*/
+	READ_BUF(28);
+	READ32(lock->lk_type);
+	if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
+		goto xdr_error;
+	READ32(lock->lk_reclaim);
+	READ64(lock->lk_offset);
+	READ64(lock->lk_length);
+	READ32(lock->lk_is_new);
+
+	if (lock->lk_is_new) {
+		READ_BUF(36);
+		READ32(lock->lk_new_open_seqid);
+		READ32(lock->lk_new_open_stateid.si_generation);
+
+		COPYMEM(&lock->lk_new_open_stateid.si_opaque, sizeof(stateid_opaque_t));
+		READ32(lock->lk_new_lock_seqid);
+		COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t));
+		READ32(lock->lk_new_owner.len);
+		READ_BUF(lock->lk_new_owner.len);
+		READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len);
+	} else {
+		READ_BUF(20);
+		READ32(lock->lk_old_lock_stateid.si_generation);
+		COPYMEM(&lock->lk_old_lock_stateid.si_opaque, sizeof(stateid_opaque_t));
+		READ32(lock->lk_old_lock_seqid);
+	}
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
+{
+	DECODE_HEAD;
+		        
+	READ_BUF(32);
+	READ32(lockt->lt_type);
+	if((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
+		goto xdr_error;
+	READ64(lockt->lt_offset);
+	READ64(lockt->lt_length);
+	COPYMEM(&lockt->lt_clientid, 8);
+	READ32(lockt->lt_owner.len);
+	READ_BUF(lockt->lt_owner.len);
+	READMEM(lockt->lt_owner.data, lockt->lt_owner.len);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
+{
+	DECODE_HEAD;
+
+	locku->lu_stateowner = NULL;
+	READ_BUF(24 + sizeof(stateid_t));
+	READ32(locku->lu_type);
+	if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
+		goto xdr_error;
+	READ32(locku->lu_seqid);
+	READ32(locku->lu_stateid.si_generation);
+	COPYMEM(&locku->lu_stateid.si_opaque, sizeof(stateid_opaque_t));
+	READ64(locku->lu_offset);
+	READ64(locku->lu_length);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(lookup->lo_len);
+	READ_BUF(lookup->lo_len);
+	SAVEMEM(lookup->lo_name, lookup->lo_len);
+	if ((status = check_filename(lookup->lo_name, lookup->lo_len, nfserr_noent)))
+		return status;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+{
+	DECODE_HEAD;
+
+	memset(open->op_bmval, 0, sizeof(open->op_bmval));
+	open->op_iattr.ia_valid = 0;
+	open->op_stateowner = NULL;
+
+	/* seqid, share_access, share_deny, clientid, ownerlen */
+	READ_BUF(16 + sizeof(clientid_t));
+	READ32(open->op_seqid);
+	READ32(open->op_share_access);
+	READ32(open->op_share_deny);
+	COPYMEM(&open->op_clientid, sizeof(clientid_t));
+	READ32(open->op_owner.len);
+
+	/* owner, open_flag */
+	READ_BUF(open->op_owner.len + 4);
+	SAVEMEM(open->op_owner.data, open->op_owner.len);
+	READ32(open->op_create);
+	switch (open->op_create) {
+	case NFS4_OPEN_NOCREATE:
+		break;
+	case NFS4_OPEN_CREATE:
+		READ_BUF(4);
+		READ32(open->op_createmode);
+		switch (open->op_createmode) {
+		case NFS4_CREATE_UNCHECKED:
+		case NFS4_CREATE_GUARDED:
+			if ((status = nfsd4_decode_fattr(argp, open->op_bmval, &open->op_iattr, &open->op_acl)))
+				goto out;
+			break;
+		case NFS4_CREATE_EXCLUSIVE:
+			READ_BUF(8);
+			COPYMEM(open->op_verf.data, 8);
+			break;
+		default:
+			goto xdr_error;
+		}
+		break;
+	default:
+		goto xdr_error;
+	}
+
+	/* open_claim */
+	READ_BUF(4);
+	READ32(open->op_claim_type);
+	switch (open->op_claim_type) {
+	case NFS4_OPEN_CLAIM_NULL:
+	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+		READ_BUF(4);
+		READ32(open->op_fname.len);
+		READ_BUF(open->op_fname.len);
+		SAVEMEM(open->op_fname.data, open->op_fname.len);
+		if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
+			return status;
+		break;
+	case NFS4_OPEN_CLAIM_PREVIOUS:
+		READ_BUF(4);
+		READ32(open->op_delegate_type);
+		break;
+	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+		READ_BUF(sizeof(stateid_t) + 4);
+		COPYMEM(&open->op_delegate_stateid, sizeof(stateid_t));
+		READ32(open->op_fname.len);
+		READ_BUF(open->op_fname.len);
+		SAVEMEM(open->op_fname.data, open->op_fname.len);
+		if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
+			return status;
+		break;
+	default:
+		goto xdr_error;
+	}
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
+{
+	DECODE_HEAD;
+		    
+	open_conf->oc_stateowner = NULL;
+	READ_BUF(4 + sizeof(stateid_t));
+	READ32(open_conf->oc_req_stateid.si_generation);
+	COPYMEM(&open_conf->oc_req_stateid.si_opaque, sizeof(stateid_opaque_t));
+	READ32(open_conf->oc_seqid);
+						        
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
+{
+	DECODE_HEAD;
+		    
+	open_down->od_stateowner = NULL;
+	READ_BUF(12 + sizeof(stateid_t));
+	READ32(open_down->od_stateid.si_generation);
+	COPYMEM(&open_down->od_stateid.si_opaque, sizeof(stateid_opaque_t));
+	READ32(open_down->od_seqid);
+	READ32(open_down->od_share_access);
+	READ32(open_down->od_share_deny);
+						        
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(putfh->pf_fhlen);
+	if (putfh->pf_fhlen > NFS4_FHSIZE)
+		goto xdr_error;
+	READ_BUF(putfh->pf_fhlen);
+	SAVEMEM(putfh->pf_fhval, putfh->pf_fhlen);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
+{
+	DECODE_HEAD;
+
+	READ_BUF(sizeof(stateid_t) + 12);
+	READ32(read->rd_stateid.si_generation);
+	COPYMEM(&read->rd_stateid.si_opaque, sizeof(stateid_opaque_t));
+	READ64(read->rd_offset);
+	READ32(read->rd_length);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
+{
+	DECODE_HEAD;
+
+	READ_BUF(24);
+	READ64(readdir->rd_cookie);
+	COPYMEM(readdir->rd_verf.data, sizeof(readdir->rd_verf.data));
+	READ32(readdir->rd_dircount);    /* just in case you needed a useless field... */
+	READ32(readdir->rd_maxcount);
+	if ((status = nfsd4_decode_bitmap(argp, readdir->rd_bmval)))
+		goto out;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(remove->rm_namelen);
+	READ_BUF(remove->rm_namelen);
+	SAVEMEM(remove->rm_name, remove->rm_namelen);
+	if ((status = check_filename(remove->rm_name, remove->rm_namelen, nfserr_noent)))
+		return status;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(rename->rn_snamelen);
+	READ_BUF(rename->rn_snamelen + 4);
+	SAVEMEM(rename->rn_sname, rename->rn_snamelen);
+	READ32(rename->rn_tnamelen);
+	READ_BUF(rename->rn_tnamelen);
+	SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
+	if ((status = check_filename(rename->rn_sname, rename->rn_snamelen, nfserr_noent)))
+		return status;
+	if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen, nfserr_inval)))
+		return status;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
+{
+	DECODE_HEAD;
+
+	READ_BUF(sizeof(clientid_t));
+	COPYMEM(clientid, sizeof(clientid_t));
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
+{
+	DECODE_HEAD;
+
+	READ_BUF(sizeof(stateid_t));
+	READ32(setattr->sa_stateid.si_generation);
+	COPYMEM(&setattr->sa_stateid.si_opaque, sizeof(stateid_opaque_t));
+	if ((status = nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr, &setattr->sa_acl)))
+		goto out;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
+{
+	DECODE_HEAD;
+
+	READ_BUF(12);
+	COPYMEM(setclientid->se_verf.data, 8);
+	READ32(setclientid->se_namelen);
+
+	READ_BUF(setclientid->se_namelen + 8);
+	SAVEMEM(setclientid->se_name, setclientid->se_namelen);
+	READ32(setclientid->se_callback_prog);
+	READ32(setclientid->se_callback_netid_len);
+
+	READ_BUF(setclientid->se_callback_netid_len + 4);
+	SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
+	READ32(setclientid->se_callback_addr_len);
+
+	READ_BUF(setclientid->se_callback_addr_len + 4);
+	SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
+	READ32(setclientid->se_callback_ident);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
+{
+	DECODE_HEAD;
+
+	READ_BUF(8 + sizeof(nfs4_verifier));
+	COPYMEM(&scd_c->sc_clientid, 8);
+	COPYMEM(&scd_c->sc_confirm, sizeof(nfs4_verifier));
+
+	DECODE_TAIL;
+}
+
+/* Also used for NVERIFY */
+static int
+nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
+{
+#if 0
+	struct nfsd4_compoundargs save = {
+		.p = argp->p,
+		.end = argp->end,
+		.rqstp = argp->rqstp,
+	};
+	u32             ve_bmval[2];
+	struct iattr    ve_iattr;           /* request */
+	struct nfs4_acl *ve_acl;            /* request */
+#endif
+	DECODE_HEAD;
+
+	if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
+		goto out;
+
+	/* For convenience's sake, we compare raw xdr'd attributes in
+	 * nfsd4_proc_verify; however we still decode here just to return
+	 * correct error in case of bad xdr. */
+#if 0
+	status = nfsd4_decode_fattr(ve_bmval, &ve_iattr, &ve_acl);
+	if (status == nfserr_inval) {
+		status = nfserrno(status);
+		goto out;
+	}
+#endif
+	READ_BUF(4);
+	READ32(verify->ve_attrlen);
+	READ_BUF(verify->ve_attrlen);
+	SAVEMEM(verify->ve_attrval, verify->ve_attrlen);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+{
+	int avail;
+	int v;
+	int len;
+	DECODE_HEAD;
+
+	READ_BUF(sizeof(stateid_opaque_t) + 20);
+	READ32(write->wr_stateid.si_generation);
+	COPYMEM(&write->wr_stateid.si_opaque, sizeof(stateid_opaque_t));
+	READ64(write->wr_offset);
+	READ32(write->wr_stable_how);
+	if (write->wr_stable_how > 2)
+		goto xdr_error;
+	READ32(write->wr_buflen);
+
+	/* Sorry .. no magic macros for this.. *
+	 * READ_BUF(write->wr_buflen);
+	 * SAVEMEM(write->wr_buf, write->wr_buflen);
+	 */
+	avail = (char*)argp->end - (char*)argp->p;
+	if (avail + argp->pagelen < write->wr_buflen) {
+		printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__); 
+		goto xdr_error;
+	}
+	write->wr_vec[0].iov_base = p;
+	write->wr_vec[0].iov_len = avail;
+	v = 0;
+	len = write->wr_buflen;
+	while (len > write->wr_vec[v].iov_len) {
+		len -= write->wr_vec[v].iov_len;
+		v++;
+		write->wr_vec[v].iov_base = page_address(argp->pagelist[0]);
+		argp->pagelist++;
+		if (argp->pagelen >= PAGE_SIZE) {
+			write->wr_vec[v].iov_len = PAGE_SIZE;
+			argp->pagelen -= PAGE_SIZE;
+		} else {
+			write->wr_vec[v].iov_len = argp->pagelen;
+			argp->pagelen -= len;
+		}
+	}
+	argp->end = (u32*) (write->wr_vec[v].iov_base + write->wr_vec[v].iov_len);
+	argp->p = (u32*)  (write->wr_vec[v].iov_base + (XDR_QUADLEN(len) << 2));
+	write->wr_vec[v].iov_len = len;
+	write->wr_vlen = v+1;
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
+{
+	DECODE_HEAD;
+
+	READ_BUF(12);
+	COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
+	READ32(rlockowner->rl_owner.len);
+	READ_BUF(rlockowner->rl_owner.len);
+	READMEM(rlockowner->rl_owner.data, rlockowner->rl_owner.len);
+
+	DECODE_TAIL;
+}
+
+static int
+nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+{
+	DECODE_HEAD;
+	struct nfsd4_op *op;
+	int i;
+
+	/*
+	 * XXX: According to spec, we should check the tag
+	 * for UTF-8 compliance.  I'm postponing this for
+	 * now because it seems that some clients do use
+	 * binary tags.
+	 */
+	READ_BUF(4);
+	READ32(argp->taglen);
+	READ_BUF(argp->taglen + 8);
+	SAVEMEM(argp->tag, argp->taglen);
+	READ32(argp->minorversion);
+	READ32(argp->opcnt);
+
+	if (argp->taglen > NFSD4_MAX_TAGLEN)
+		goto xdr_error;
+	if (argp->opcnt > 100)
+		goto xdr_error;
+
+	if (argp->opcnt > sizeof(argp->iops)/sizeof(argp->iops[0])) {
+		argp->ops = kmalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
+		if (!argp->ops) {
+			argp->ops = argp->iops;
+			printk(KERN_INFO "nfsd: couldn't allocate room for COMPOUND\n");
+			goto xdr_error;
+		}
+	}
+
+	for (i = 0; i < argp->opcnt; i++) {
+		op = &argp->ops[i];
+		op->replay = NULL;
+
+		/*
+		 * We can't use READ_BUF() here because we need to handle
+		 * a missing opcode as an OP_WRITE + 1. So we need to check
+		 * to see if we're truly at the end of our buffer or if there
+		 * is another page we need to flip to.
+		 */
+
+		if (argp->p == argp->end) {
+			if (argp->pagelen < 4) {
+				/* There isn't an opcode still on the wire */
+				op->opnum = OP_WRITE + 1;
+				op->status = nfserr_bad_xdr;
+				argp->opcnt = i+1;
+				break;
+			}
+
+			/*
+			 * False alarm. We just hit a page boundary, but there
+			 * is still data available.  Move pointer across page
+			 * boundary.  *snip from READ_BUF*
+			 */
+			argp->p = page_address(argp->pagelist[0]);
+			argp->pagelist++;
+			if (argp->pagelen < PAGE_SIZE) {
+				argp->end = p + (argp->pagelen>>2);
+				argp->pagelen = 0;
+			} else {
+				argp->end = p + (PAGE_SIZE>>2);
+				argp->pagelen -= PAGE_SIZE;
+			}
+		}
+		op->opnum = ntohl(*argp->p++);
+
+		switch (op->opnum) {
+		case 2: /* Reserved operation */
+			op->opnum = OP_ILLEGAL;
+			if (argp->minorversion == 0)
+				op->status = nfserr_op_illegal;
+			else
+				op->status = nfserr_minor_vers_mismatch;
+			break;
+		case OP_ACCESS:
+			op->status = nfsd4_decode_access(argp, &op->u.access);
+			break;
+		case OP_CLOSE:
+			op->status = nfsd4_decode_close(argp, &op->u.close);
+			break;
+		case OP_COMMIT:
+			op->status = nfsd4_decode_commit(argp, &op->u.commit);
+			break;
+		case OP_CREATE:
+			op->status = nfsd4_decode_create(argp, &op->u.create);
+			break;
+		case OP_DELEGRETURN:
+			op->status = nfsd4_decode_delegreturn(argp, &op->u.delegreturn);
+			break;
+		case OP_GETATTR:
+			op->status = nfsd4_decode_getattr(argp, &op->u.getattr);
+			break;
+		case OP_GETFH:
+			op->status = nfs_ok;
+			break;
+		case OP_LINK:
+			op->status = nfsd4_decode_link(argp, &op->u.link);
+			break;
+		case OP_LOCK:
+			op->status = nfsd4_decode_lock(argp, &op->u.lock);
+			break;
+		case OP_LOCKT:
+			op->status = nfsd4_decode_lockt(argp, &op->u.lockt);
+			break;
+		case OP_LOCKU:
+			op->status = nfsd4_decode_locku(argp, &op->u.locku);
+			break;
+		case OP_LOOKUP:
+			op->status = nfsd4_decode_lookup(argp, &op->u.lookup);
+			break;
+		case OP_LOOKUPP:
+			op->status = nfs_ok;
+			break;
+		case OP_NVERIFY:
+			op->status = nfsd4_decode_verify(argp, &op->u.nverify);
+			break;
+		case OP_OPEN:
+			op->status = nfsd4_decode_open(argp, &op->u.open);
+			break;
+		case OP_OPEN_CONFIRM:
+			op->status = nfsd4_decode_open_confirm(argp, &op->u.open_confirm);
+			break;
+		case OP_OPEN_DOWNGRADE:
+			op->status = nfsd4_decode_open_downgrade(argp, &op->u.open_downgrade);
+			break;
+		case OP_PUTFH:
+			op->status = nfsd4_decode_putfh(argp, &op->u.putfh);
+			break;
+		case OP_PUTROOTFH:
+			op->status = nfs_ok;
+			break;
+		case OP_READ:
+			op->status = nfsd4_decode_read(argp, &op->u.read);
+			break;
+		case OP_READDIR:
+			op->status = nfsd4_decode_readdir(argp, &op->u.readdir);
+			break;
+		case OP_READLINK:
+			op->status = nfs_ok;
+			break;
+		case OP_REMOVE:
+			op->status = nfsd4_decode_remove(argp, &op->u.remove);
+			break;
+		case OP_RENAME:
+			op->status = nfsd4_decode_rename(argp, &op->u.rename);
+			break;
+		case OP_RESTOREFH:
+			op->status = nfs_ok;
+			break;
+		case OP_RENEW:
+			op->status = nfsd4_decode_renew(argp, &op->u.renew);
+			break;
+		case OP_SAVEFH:
+			op->status = nfs_ok;
+			break;
+		case OP_SETATTR:
+			op->status = nfsd4_decode_setattr(argp, &op->u.setattr);
+			break;
+		case OP_SETCLIENTID:
+			op->status = nfsd4_decode_setclientid(argp, &op->u.setclientid);
+			break;
+		case OP_SETCLIENTID_CONFIRM:
+			op->status = nfsd4_decode_setclientid_confirm(argp, &op->u.setclientid_confirm);
+			break;
+		case OP_VERIFY:
+			op->status = nfsd4_decode_verify(argp, &op->u.verify);
+			break;
+		case OP_WRITE:
+			op->status = nfsd4_decode_write(argp, &op->u.write);
+			break;
+		case OP_RELEASE_LOCKOWNER:
+			op->status = nfsd4_decode_release_lockowner(argp, &op->u.release_lockowner);
+			break;
+		default:
+			op->opnum = OP_ILLEGAL;
+			op->status = nfserr_op_illegal;
+			break;
+		}
+
+		if (op->status) {
+			argp->opcnt = i+1;
+			break;
+		}
+	}
+
+	DECODE_TAIL;
+}
+/*
+ * END OF "GENERIC" DECODE ROUTINES.
+ */
+
+/*
+ * START OF "GENERIC" ENCODE ROUTINES.
+ *   These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define ENCODE_HEAD              u32 *p
+
+#define WRITE32(n)               *p++ = htonl(n)
+#define WRITE64(n)               do {				\
+	*p++ = htonl((u32)((n) >> 32));				\
+	*p++ = htonl((u32)(n));					\
+} while (0)
+#define WRITEMEM(ptr,nbytes)     do {				\
+	*(p + XDR_QUADLEN(nbytes) -1) = 0;                      \
+	memcpy(p, ptr, nbytes);					\
+	p += XDR_QUADLEN(nbytes);				\
+} while (0)
+#define WRITECINFO(c)		do {				\
+	*p++ = htonl(c.atomic);					\
+	*p++ = htonl(c.before_ctime_sec);				\
+	*p++ = htonl(c.before_ctime_nsec);				\
+	*p++ = htonl(c.after_ctime_sec);				\
+	*p++ = htonl(c.after_ctime_nsec);				\
+} while (0)
+
+#define RESERVE_SPACE(nbytes)	do {				\
+	p = resp->p;						\
+	BUG_ON(p + XDR_QUADLEN(nbytes) > resp->end);		\
+} while (0)
+#define ADJUST_ARGS()		resp->p = p
+
+/*
+ * Header routine to setup seqid operation replay cache
+ */
+#define ENCODE_SEQID_OP_HEAD					\
+	u32 *p;							\
+	u32 *save;						\
+								\
+	save = resp->p;
+
+/*
+ * Routine for encoding the result of a
+ * "seqid-mutating" NFSv4 operation.  This is
+ * where seqids are incremented, and the
+ * replay cache is filled.
+ */
+
+#define ENCODE_SEQID_OP_TAIL(stateowner) do {			\
+	if (seqid_mutating_err(nfserr) && stateowner) { 	\
+		if (stateowner->so_confirmed)			\
+			stateowner->so_seqid++;			\
+		stateowner->so_replay.rp_status = nfserr;   	\
+		stateowner->so_replay.rp_buflen = 		\
+			  (((char *)(resp)->p - (char *)save)); \
+		memcpy(stateowner->so_replay.rp_buf, save,      \
+ 			stateowner->so_replay.rp_buflen); 	\
+	} } while (0);
+
+
+static u32 nfs4_ftypes[16] = {
+        NF4BAD,  NF4FIFO, NF4CHR, NF4BAD,
+        NF4DIR,  NF4BAD,  NF4BLK, NF4BAD,
+        NF4REG,  NF4BAD,  NF4LNK, NF4BAD,
+        NF4SOCK, NF4BAD,  NF4LNK, NF4BAD,
+};
+
+static int
+nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
+			u32 **p, int *buflen)
+{
+	int status;
+
+	if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
+		return nfserr_resource;
+	if (whotype != NFS4_ACL_WHO_NAMED)
+		status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
+	else if (group)
+		status = nfsd_map_gid_to_name(rqstp, id, (u8 *)(*p + 1));
+	else
+		status = nfsd_map_uid_to_name(rqstp, id, (u8 *)(*p + 1));
+	if (status < 0)
+		return nfserrno(status);
+	*p = xdr_encode_opaque(*p, NULL, status);
+	*buflen -= (XDR_QUADLEN(status) << 2) + 4;
+	BUG_ON(*buflen < 0);
+	return 0;
+}
+
+static inline int
+nfsd4_encode_user(struct svc_rqst *rqstp, uid_t uid, u32 **p, int *buflen)
+{
+	return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, uid, 0, p, buflen);
+}
+
+static inline int
+nfsd4_encode_group(struct svc_rqst *rqstp, uid_t gid, u32 **p, int *buflen)
+{
+	return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, gid, 1, p, buflen);
+}
+
+static inline int
+nfsd4_encode_aclname(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
+		u32 **p, int *buflen)
+{
+	return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen);
+}
+
+
+/*
+ * Note: @fhp can be NULL; in this case, we might have to compose the filehandle
+ * ourselves.
+ *
+ * @countp is the buffer size in _words_; upon successful return this becomes
+ * replaced with the number of words written.
+ */
+int
+nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
+		struct dentry *dentry, u32 *buffer, int *countp, u32 *bmval,
+		struct svc_rqst *rqstp)
+{
+	u32 bmval0 = bmval[0];
+	u32 bmval1 = bmval[1];
+	struct kstat stat;
+	struct svc_fh tempfh;
+	struct kstatfs statfs;
+	int buflen = *countp << 2;
+	u32 *attrlenp;
+	u32 dummy;
+	u64 dummy64;
+	u32 *p = buffer;
+	int status;
+	int aclsupport = 0;
+	struct nfs4_acl *acl = NULL;
+
+	BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
+	BUG_ON(bmval0 & ~NFSD_SUPPORTED_ATTRS_WORD0);
+	BUG_ON(bmval1 & ~NFSD_SUPPORTED_ATTRS_WORD1);
+
+	status = vfs_getattr(exp->ex_mnt, dentry, &stat);
+	if (status)
+		goto out_nfserr;
+	if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL)) ||
+	    (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+		       FATTR4_WORD1_SPACE_TOTAL))) {
+		status = vfs_statfs(dentry->d_inode->i_sb, &statfs);
+		if (status)
+			goto out_nfserr;
+	}
+	if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
+		fh_init(&tempfh, NFS4_FHSIZE);
+		status = fh_compose(&tempfh, exp, dentry, NULL);
+		if (status)
+			goto out;
+		fhp = &tempfh;
+	}
+	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
+			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
+		status = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
+		aclsupport = (status == 0);
+		if (bmval0 & FATTR4_WORD0_ACL) {
+			if (status == -EOPNOTSUPP)
+				bmval0 &= ~FATTR4_WORD0_ACL;
+			else if (status == -EINVAL) {
+				status = nfserr_attrnotsupp;
+				goto out;
+			} else if (status != 0)
+				goto out_nfserr;
+		}
+	}
+	if ((buflen -= 16) < 0)
+		goto out_resource;
+
+	WRITE32(2);
+	WRITE32(bmval0);
+	WRITE32(bmval1);
+	attrlenp = p++;                /* to be backfilled later */
+
+	if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
+		if ((buflen -= 12) < 0)
+			goto out_resource;
+		WRITE32(2);
+		WRITE32(aclsupport ?
+			NFSD_SUPPORTED_ATTRS_WORD0 :
+			NFSD_SUPPORTED_ATTRS_WORD0 & ~FATTR4_WORD0_ACL);
+		WRITE32(NFSD_SUPPORTED_ATTRS_WORD1);
+	}
+	if (bmval0 & FATTR4_WORD0_TYPE) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		dummy = nfs4_ftypes[(stat.mode & S_IFMT) >> 12];
+		if (dummy == NF4BAD)
+			goto out_serverfault;
+		WRITE32(dummy);
+	}
+	if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32( NFS4_FH_NOEXPIRE_WITH_OPEN | NFS4_FH_VOL_RENAME );
+	}
+	if (bmval0 & FATTR4_WORD0_CHANGE) {
+		/*
+		 * Note: This _must_ be consistent with the scheme for writing
+		 * change_info, so any changes made here must be reflected there
+		 * as well.  (See xdr4.h:set_change_info() and the WRITECINFO()
+		 * macro above.)
+		 */
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE32(stat.ctime.tv_sec);
+		WRITE32(stat.ctime.tv_nsec);
+	}
+	if (bmval0 & FATTR4_WORD0_SIZE) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64(stat.size);
+	}
+	if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(0);
+	}
+	if (bmval0 & FATTR4_WORD0_FSID) {
+		if ((buflen -= 16) < 0)
+			goto out_resource;
+		if (is_fsid(fhp, rqstp->rq_reffh)) {
+			WRITE64((u64)exp->ex_fsid);
+			WRITE64((u64)0);
+		} else {
+			WRITE32(0);
+			WRITE32(MAJOR(stat.dev));
+			WRITE32(0);
+			WRITE32(MINOR(stat.dev));
+		}
+	}
+	if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(0);
+	}
+	if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(NFSD_LEASE_TIME);
+	}
+	if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(0);
+	}
+	if (bmval0 & FATTR4_WORD0_ACL) {
+		struct nfs4_ace *ace;
+		struct list_head *h;
+
+		if (acl == NULL) {
+			if ((buflen -= 4) < 0)
+				goto out_resource;
+
+			WRITE32(0);
+			goto out_acl;
+		}
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(acl->naces);
+
+		list_for_each(h, &acl->ace_head) {
+			ace = list_entry(h, struct nfs4_ace, l_ace);
+
+			if ((buflen -= 4*3) < 0)
+				goto out_resource;
+			WRITE32(ace->type);
+			WRITE32(ace->flag);
+			WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
+			status = nfsd4_encode_aclname(rqstp, ace->whotype,
+				ace->who, ace->flag & NFS4_ACE_IDENTIFIER_GROUP,
+				&p, &buflen);
+			if (status == nfserr_resource)
+				goto out_resource;
+			if (status)
+				goto out;
+		}
+	}
+out_acl:
+	if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(aclsupport ?
+			ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
+	}
+	if (bmval0 & FATTR4_WORD0_CANSETTIME) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
+		buflen -= (XDR_QUADLEN(fhp->fh_handle.fh_size) << 2) + 4;
+		if (buflen < 0)
+			goto out_resource;
+		WRITE32(fhp->fh_handle.fh_size);
+		WRITEMEM(&fhp->fh_handle.fh_base, fhp->fh_handle.fh_size);
+	}
+	if (bmval0 & FATTR4_WORD0_FILEID) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) stat.ino);
+	}
+	if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) statfs.f_ffree);
+	}
+	if (bmval0 & FATTR4_WORD0_FILES_FREE) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) statfs.f_ffree);
+	}
+	if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) statfs.f_files);
+	}
+	if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64(~(u64)0);
+	}
+	if (bmval0 & FATTR4_WORD0_MAXLINK) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(255);
+	}
+	if (bmval0 & FATTR4_WORD0_MAXNAME) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(~(u32) 0);
+	}
+	if (bmval0 & FATTR4_WORD0_MAXREAD) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) NFSSVC_MAXBLKSIZE);
+	}
+	if (bmval0 & FATTR4_WORD0_MAXWRITE) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE64((u64) NFSSVC_MAXBLKSIZE);
+	}
+	if (bmval1 & FATTR4_WORD1_MODE) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(stat.mode & S_IALLUGO);
+	}
+	if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(1);
+	}
+	if (bmval1 & FATTR4_WORD1_NUMLINKS) {
+		if ((buflen -= 4) < 0)
+			goto out_resource;
+		WRITE32(stat.nlink);
+	}
+	if (bmval1 & FATTR4_WORD1_OWNER) {
+		status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
+		if (status == nfserr_resource)
+			goto out_resource;
+		if (status)
+			goto out;
+	}
+	if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
+		status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
+		if (status == nfserr_resource)
+			goto out_resource;
+		if (status)
+			goto out;
+	}
+	if (bmval1 & FATTR4_WORD1_RAWDEV) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		WRITE32((u32) MAJOR(stat.rdev));
+		WRITE32((u32) MINOR(stat.rdev));
+	}
+	if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
+		WRITE64(dummy64);
+	}
+	if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
+		WRITE64(dummy64);
+	}
+	if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
+		WRITE64(dummy64);
+	}
+	if (bmval1 & FATTR4_WORD1_SPACE_USED) {
+		if ((buflen -= 8) < 0)
+			goto out_resource;
+		dummy64 = (u64)stat.blocks << 9;
+		WRITE64(dummy64);
+	}
+	if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+		if ((buflen -= 12) < 0)
+			goto out_resource;
+		WRITE32(0);
+		WRITE32(stat.atime.tv_sec);
+		WRITE32(stat.atime.tv_nsec);
+	}
+	if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
+		if ((buflen -= 12) < 0)
+			goto out_resource;
+		WRITE32(0);
+		WRITE32(1);
+		WRITE32(0);
+	}
+	if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+		if ((buflen -= 12) < 0)
+			goto out_resource;
+		WRITE32(0);
+		WRITE32(stat.ctime.tv_sec);
+		WRITE32(stat.ctime.tv_nsec);
+	}
+	if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+		if ((buflen -= 12) < 0)
+			goto out_resource;
+		WRITE32(0);
+		WRITE32(stat.mtime.tv_sec);
+		WRITE32(stat.mtime.tv_nsec);
+	}
+	if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+		struct dentry *mnt_pnt, *mnt_root;
+
+		if ((buflen -= 8) < 0)
+                	goto out_resource;
+		mnt_root = exp->ex_mnt->mnt_root;
+		if (mnt_root->d_inode == dentry->d_inode) {
+			mnt_pnt = exp->ex_mnt->mnt_mountpoint;
+			WRITE64((u64) mnt_pnt->d_inode->i_ino);
+		} else
+                	WRITE64((u64) stat.ino);
+	}
+	*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
+	*countp = p - buffer;
+	status = nfs_ok;
+
+out:
+	nfs4_acl_free(acl);
+	if (fhp == &tempfh)
+		fh_put(&tempfh);
+	return status;
+out_nfserr:
+	status = nfserrno(status);
+	goto out;
+out_resource:
+	*countp = 0;
+	status = nfserr_resource;
+	goto out;
+out_serverfault:
+	status = nfserr_serverfault;
+	goto out;
+}
+
+static int
+nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
+		const char *name, int namlen, u32 *p, int *buflen)
+{
+	struct svc_export *exp = cd->rd_fhp->fh_export;
+	struct dentry *dentry;
+	int nfserr;
+
+	dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen);
+	if (IS_ERR(dentry))
+		return nfserrno(PTR_ERR(dentry));
+
+	exp_get(exp);
+	if (d_mountpoint(dentry)) {
+		if (nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp)) {
+		/*
+		 * -EAGAIN is the only error returned from
+		 * nfsd_cross_mnt() and it indicates that an
+		 * up-call has  been initiated to fill in the export
+		 * options on exp.  When the answer comes back,
+		 * this call will be retried.
+		 */
+			nfserr = nfserr_dropit;
+			goto out_put;
+		}
+
+	}
+	nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
+					cd->rd_rqstp);
+out_put:
+	dput(dentry);
+	exp_put(exp);
+	return nfserr;
+}
+
+static u32 *
+nfsd4_encode_rdattr_error(u32 *p, int buflen, int nfserr)
+{
+	u32 *attrlenp;
+
+	if (buflen < 6)
+		return NULL;
+	*p++ = htonl(2);
+	*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
+	*p++ = htonl(0);			 /* bmval1 */
+
+	attrlenp = p++;
+	*p++ = nfserr;       /* no htonl */
+	*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
+	return p;
+}
+
+static int
+nfsd4_encode_dirent(struct readdir_cd *ccd, const char *name, int namlen,
+		    loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common);
+	int buflen;
+	u32 *p = cd->buffer;
+	int nfserr = nfserr_toosmall;
+
+	/* In nfsv4, "." and ".." never make it onto the wire.. */
+	if (name && isdotent(name, namlen)) {
+		cd->common.err = nfs_ok;
+		return 0;
+	}
+
+	if (cd->offset)
+		xdr_encode_hyper(cd->offset, (u64) offset);
+
+	buflen = cd->buflen - 4 - XDR_QUADLEN(namlen);
+	if (buflen < 0)
+		goto fail;
+
+	*p++ = xdr_one;                             /* mark entry present */
+	cd->offset = p;                             /* remember pointer */
+	p = xdr_encode_hyper(p, NFS_OFFSET_MAX);    /* offset of next entry */
+	p = xdr_encode_array(p, name, namlen);      /* name length & name */
+
+	nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, p, &buflen);
+	switch (nfserr) {
+	case nfs_ok:
+		p += buflen;
+		break;
+	case nfserr_resource:
+		nfserr = nfserr_toosmall;
+		goto fail;
+	case nfserr_dropit:
+		goto fail;
+	default:
+		/*
+		 * If the client requested the RDATTR_ERROR attribute,
+		 * we stuff the error code into this attribute
+		 * and continue.  If this attribute was not requested,
+		 * then in accordance with the spec, we fail the
+		 * entire READDIR operation(!)
+		 */
+		if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
+			goto fail;
+		nfserr = nfserr_toosmall;
+		p = nfsd4_encode_rdattr_error(p, buflen, nfserr);
+		if (p == NULL)
+			goto fail;
+	}
+	cd->buflen -= (p - cd->buffer);
+	cd->buffer = p;
+	cd->common.err = nfs_ok;
+	return 0;
+fail:
+	cd->common.err = nfserr;
+	return -EINVAL;
+}
+
+static void
+nfsd4_encode_access(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_access *access)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(8);
+		WRITE32(access->ac_supported);
+		WRITE32(access->ac_resp_access);
+		ADJUST_ARGS();
+	}
+}
+
+static void
+nfsd4_encode_close(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_close *close)
+{
+	ENCODE_SEQID_OP_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(sizeof(stateid_t));
+		WRITE32(close->cl_stateid.si_generation);
+		WRITEMEM(&close->cl_stateid.si_opaque, sizeof(stateid_opaque_t));
+		ADJUST_ARGS();
+	}
+	ENCODE_SEQID_OP_TAIL(close->cl_stateowner);
+}
+
+
+static void
+nfsd4_encode_commit(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_commit *commit)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(8);
+		WRITEMEM(commit->co_verf.data, 8);
+		ADJUST_ARGS();
+	}
+}
+
+static void
+nfsd4_encode_create(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_create *create)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(32);
+		WRITECINFO(create->cr_cinfo);
+		WRITE32(2);
+		WRITE32(create->cr_bmval[0]);
+		WRITE32(create->cr_bmval[1]);
+		ADJUST_ARGS();
+	}
+}
+
+static int
+nfsd4_encode_getattr(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_getattr *getattr)
+{
+	struct svc_fh *fhp = getattr->ga_fhp;
+	int buflen;
+
+	if (nfserr)
+		return nfserr;
+
+	buflen = resp->end - resp->p - (COMPOUND_ERR_SLACK_SPACE >> 2);
+	nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry,
+				    resp->p, &buflen, getattr->ga_bmval,
+				    resp->rqstp);
+
+	if (!nfserr)
+		resp->p += buflen;
+	return nfserr;
+}
+
+static void
+nfsd4_encode_getfh(struct nfsd4_compoundres *resp, int nfserr, struct svc_fh *fhp)
+{
+	unsigned int len;
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		len = fhp->fh_handle.fh_size;
+		RESERVE_SPACE(len + 4);
+		WRITE32(len);
+		WRITEMEM(&fhp->fh_handle.fh_base, len);
+		ADJUST_ARGS();
+	}
+}
+
+/*
+* Including all fields other than the name, a LOCK4denied structure requires
+*   8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
+*/
+static void
+nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
+{
+	ENCODE_HEAD;
+
+	RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
+	WRITE64(ld->ld_start);
+	WRITE64(ld->ld_length);
+	WRITE32(ld->ld_type);
+	if (ld->ld_sop) {
+		WRITEMEM(&ld->ld_clientid, 8);
+		WRITE32(ld->ld_sop->so_owner.len);
+		WRITEMEM(ld->ld_sop->so_owner.data, ld->ld_sop->so_owner.len);
+		kref_put(&ld->ld_sop->so_ref, nfs4_free_stateowner);
+	}  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
+		WRITE64((u64)0); /* clientid */
+		WRITE32(0); /* length of owner name */
+	}
+	ADJUST_ARGS();
+}
+
+static void
+nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock *lock)
+{
+
+	ENCODE_SEQID_OP_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(4 + sizeof(stateid_t));
+		WRITE32(lock->lk_resp_stateid.si_generation);
+		WRITEMEM(&lock->lk_resp_stateid.si_opaque, sizeof(stateid_opaque_t));
+		ADJUST_ARGS();
+	} else if (nfserr == nfserr_denied)
+		nfsd4_encode_lock_denied(resp, &lock->lk_denied);
+
+	ENCODE_SEQID_OP_TAIL(lock->lk_stateowner);
+}
+
+static void
+nfsd4_encode_lockt(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lockt *lockt)
+{
+	if (nfserr == nfserr_denied)
+		nfsd4_encode_lock_denied(resp, &lockt->lt_denied);
+}
+
+static void
+nfsd4_encode_locku(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_locku *locku)
+{
+	ENCODE_SEQID_OP_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(sizeof(stateid_t));
+		WRITE32(locku->lu_stateid.si_generation);
+		WRITEMEM(&locku->lu_stateid.si_opaque, sizeof(stateid_opaque_t));
+		ADJUST_ARGS();
+	}
+				        
+	ENCODE_SEQID_OP_TAIL(locku->lu_stateowner);
+}
+
+
+static void
+nfsd4_encode_link(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_link *link)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(20);
+		WRITECINFO(link->li_cinfo);
+		ADJUST_ARGS();
+	}
+}
+
+
+static void
+nfsd4_encode_open(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_open *open)
+{
+	ENCODE_SEQID_OP_HEAD;
+
+	if (nfserr)
+		goto out;
+
+	RESERVE_SPACE(36 + sizeof(stateid_t));
+	WRITE32(open->op_stateid.si_generation);
+	WRITEMEM(&open->op_stateid.si_opaque, sizeof(stateid_opaque_t));
+	WRITECINFO(open->op_cinfo);
+	WRITE32(open->op_rflags);
+	WRITE32(2);
+	WRITE32(open->op_bmval[0]);
+	WRITE32(open->op_bmval[1]);
+	WRITE32(open->op_delegate_type);
+	ADJUST_ARGS();
+
+	switch (open->op_delegate_type) {
+	case NFS4_OPEN_DELEGATE_NONE:
+		break;
+	case NFS4_OPEN_DELEGATE_READ:
+		RESERVE_SPACE(20 + sizeof(stateid_t));
+		WRITEMEM(&open->op_delegate_stateid, sizeof(stateid_t));
+		WRITE32(0);
+
+		/*
+		 * TODO: ACE's in delegations
+		 */
+		WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
+		WRITE32(0);
+		WRITE32(0);
+		WRITE32(0);   /* XXX: is NULL principal ok? */
+		ADJUST_ARGS();
+		break;
+	case NFS4_OPEN_DELEGATE_WRITE:
+		RESERVE_SPACE(32 + sizeof(stateid_t));
+		WRITEMEM(&open->op_delegate_stateid, sizeof(stateid_t));
+		WRITE32(0);
+
+		/*
+		 * TODO: space_limit's in delegations
+		 */
+		WRITE32(NFS4_LIMIT_SIZE);
+		WRITE32(~(u32)0);
+		WRITE32(~(u32)0);
+
+		/*
+		 * TODO: ACE's in delegations
+		 */
+		WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
+		WRITE32(0);
+		WRITE32(0);
+		WRITE32(0);   /* XXX: is NULL principal ok? */
+		ADJUST_ARGS();
+		break;
+	default:
+		BUG();
+	}
+	/* XXX save filehandle here */
+out:
+	ENCODE_SEQID_OP_TAIL(open->op_stateowner);
+}
+
+static void
+nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_open_confirm *oc)
+{
+	ENCODE_SEQID_OP_HEAD;
+				        
+	if (!nfserr) {
+		RESERVE_SPACE(sizeof(stateid_t));
+		WRITE32(oc->oc_resp_stateid.si_generation);
+		WRITEMEM(&oc->oc_resp_stateid.si_opaque, sizeof(stateid_opaque_t));
+		ADJUST_ARGS();
+	}
+
+	ENCODE_SEQID_OP_TAIL(oc->oc_stateowner);
+}
+
+static void
+nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_open_downgrade *od)
+{
+	ENCODE_SEQID_OP_HEAD;
+				        
+	if (!nfserr) {
+		RESERVE_SPACE(sizeof(stateid_t));
+		WRITE32(od->od_stateid.si_generation);
+		WRITEMEM(&od->od_stateid.si_opaque, sizeof(stateid_opaque_t));
+		ADJUST_ARGS();
+	}
+
+	ENCODE_SEQID_OP_TAIL(od->od_stateowner);
+}
+
+static int
+nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read *read)
+{
+	u32 eof;
+	int v, pn;
+	unsigned long maxcount; 
+	long len;
+	ENCODE_HEAD;
+
+	if (nfserr)
+		return nfserr;
+	if (resp->xbuf->page_len)
+		return nfserr_resource;
+
+	RESERVE_SPACE(8); /* eof flag and byte count */
+
+	maxcount = NFSSVC_MAXBLKSIZE;
+	if (maxcount > read->rd_length)
+		maxcount = read->rd_length;
+
+	len = maxcount;
+	v = 0;
+	while (len > 0) {
+		pn = resp->rqstp->rq_resused;
+		svc_take_page(resp->rqstp);
+		read->rd_iov[v].iov_base = page_address(resp->rqstp->rq_respages[pn]);
+		read->rd_iov[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE;
+		v++;
+		len -= PAGE_SIZE;
+	}
+	read->rd_vlen = v;
+
+	nfserr = nfsd_read(read->rd_rqstp, read->rd_fhp, read->rd_filp,
+			read->rd_offset, read->rd_iov, read->rd_vlen,
+			&maxcount);
+
+	if (nfserr == nfserr_symlink)
+		nfserr = nfserr_inval;
+	if (nfserr)
+		return nfserr;
+	eof = (read->rd_offset + maxcount >= read->rd_fhp->fh_dentry->d_inode->i_size);
+
+	WRITE32(eof);
+	WRITE32(maxcount);
+	ADJUST_ARGS();
+	resp->xbuf->head[0].iov_len = ((char*)resp->p) - (char*)resp->xbuf->head[0].iov_base;
+
+	resp->xbuf->page_len = maxcount;
+
+	/* read zero bytes -> don't set up tail */
+	if(!maxcount)
+		return 0;        
+
+	/* set up page for remaining responses */
+	svc_take_page(resp->rqstp);
+	resp->xbuf->tail[0].iov_base = 
+		page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+	resp->rqstp->rq_restailpage = resp->rqstp->rq_resused-1;
+	resp->xbuf->tail[0].iov_len = 0;
+	resp->p = resp->xbuf->tail[0].iov_base;
+	resp->end = resp->p + PAGE_SIZE/4;
+
+	if (maxcount&3) {
+		*(resp->p)++ = 0;
+		resp->xbuf->tail[0].iov_base += maxcount&3;
+		resp->xbuf->tail[0].iov_len = 4 - (maxcount&3);
+	}
+	return 0;
+}
+
+static int
+nfsd4_encode_readlink(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_readlink *readlink)
+{
+	int maxcount;
+	char *page;
+	ENCODE_HEAD;
+
+	if (nfserr)
+		return nfserr;
+	if (resp->xbuf->page_len)
+		return nfserr_resource;
+
+	svc_take_page(resp->rqstp);
+	page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+
+	maxcount = PAGE_SIZE;
+	RESERVE_SPACE(4);
+
+	/*
+	 * XXX: By default, the ->readlink() VFS op will truncate symlinks
+	 * if they would overflow the buffer.  Is this kosher in NFSv4?  If
+	 * not, one easy fix is: if ->readlink() precisely fills the buffer,
+	 * assume that truncation occurred, and return NFS4ERR_RESOURCE.
+	 */
+	nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp, page, &maxcount);
+	if (nfserr == nfserr_isdir)
+		return nfserr_inval;
+	if (nfserr)
+		return nfserr;
+
+	WRITE32(maxcount);
+	ADJUST_ARGS();
+	resp->xbuf->head[0].iov_len = ((char*)resp->p) - (char*)resp->xbuf->head[0].iov_base;
+
+	svc_take_page(resp->rqstp);
+	resp->xbuf->tail[0].iov_base = 
+		page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+	resp->rqstp->rq_restailpage = resp->rqstp->rq_resused-1;
+	resp->xbuf->tail[0].iov_len = 0;
+	resp->p = resp->xbuf->tail[0].iov_base;
+	resp->end = resp->p + PAGE_SIZE/4;
+
+	resp->xbuf->page_len = maxcount;
+	if (maxcount&3) {
+		*(resp->p)++ = 0;
+		resp->xbuf->tail[0].iov_base += maxcount&3;
+		resp->xbuf->tail[0].iov_len = 4 - (maxcount&3);
+	}
+	return 0;
+}
+
+static int
+nfsd4_encode_readdir(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_readdir *readdir)
+{
+	int maxcount;
+	loff_t offset;
+	u32 *page, *savep;
+	ENCODE_HEAD;
+
+	if (nfserr)
+		return nfserr;
+	if (resp->xbuf->page_len)
+		return nfserr_resource;
+
+	RESERVE_SPACE(8);  /* verifier */
+	savep = p;
+
+	/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
+	WRITE32(0);
+	WRITE32(0);
+	ADJUST_ARGS();
+	resp->xbuf->head[0].iov_len = ((char*)resp->p) - (char*)resp->xbuf->head[0].iov_base;
+
+	maxcount = PAGE_SIZE;
+	if (maxcount > readdir->rd_maxcount)
+		maxcount = readdir->rd_maxcount;
+
+	/*
+	 * Convert from bytes to words, account for the two words already
+	 * written, make sure to leave two words at the end for the next
+	 * pointer and eof field.
+	 */
+	maxcount = (maxcount >> 2) - 4;
+	if (maxcount < 0) {
+		nfserr =  nfserr_toosmall;
+		goto err_no_verf;
+	}
+
+	svc_take_page(resp->rqstp);
+	page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+	readdir->common.err = 0;
+	readdir->buflen = maxcount;
+	readdir->buffer = page;
+	readdir->offset = NULL;
+
+	offset = readdir->rd_cookie;
+	nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
+			      &offset,
+			      &readdir->common, nfsd4_encode_dirent);
+	if (nfserr == nfs_ok &&
+	    readdir->common.err == nfserr_toosmall &&
+	    readdir->buffer == page) 
+		nfserr = nfserr_toosmall;
+	if (nfserr == nfserr_symlink)
+		nfserr = nfserr_notdir;
+	if (nfserr)
+		goto err_no_verf;
+
+	if (readdir->offset)
+		xdr_encode_hyper(readdir->offset, offset);
+
+	p = readdir->buffer;
+	*p++ = 0;	/* no more entries */
+	*p++ = htonl(readdir->common.err == nfserr_eof);
+	resp->xbuf->page_len = ((char*)p) - (char*)page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+
+	/* allocate a page for the tail */
+	svc_take_page(resp->rqstp);
+	resp->xbuf->tail[0].iov_base = 
+		page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+	resp->rqstp->rq_restailpage = resp->rqstp->rq_resused-1;
+	resp->xbuf->tail[0].iov_len = 0;
+	resp->p = resp->xbuf->tail[0].iov_base;
+	resp->end = resp->p + PAGE_SIZE/4;
+
+	return 0;
+err_no_verf:
+	p = savep;
+	ADJUST_ARGS();
+	return nfserr;
+}
+
+static void
+nfsd4_encode_remove(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_remove *remove)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(20);
+		WRITECINFO(remove->rm_cinfo);
+		ADJUST_ARGS();
+	}
+}
+
+static void
+nfsd4_encode_rename(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_rename *rename)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(40);
+		WRITECINFO(rename->rn_sinfo);
+		WRITECINFO(rename->rn_tinfo);
+		ADJUST_ARGS();
+	}
+}
+
+/*
+ * The SETATTR encode routine is special -- it always encodes a bitmap,
+ * regardless of the error status.
+ */
+static void
+nfsd4_encode_setattr(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_setattr *setattr)
+{
+	ENCODE_HEAD;
+
+	RESERVE_SPACE(12);
+	if (nfserr) {
+		WRITE32(2);
+		WRITE32(0);
+		WRITE32(0);
+	}
+	else {
+		WRITE32(2);
+		WRITE32(setattr->sa_bmval[0]);
+		WRITE32(setattr->sa_bmval[1]);
+	}
+	ADJUST_ARGS();
+}
+
+static void
+nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_setclientid *scd)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(8 + sizeof(nfs4_verifier));
+		WRITEMEM(&scd->se_clientid, 8);
+		WRITEMEM(&scd->se_confirm, sizeof(nfs4_verifier));
+		ADJUST_ARGS();
+	}
+	else if (nfserr == nfserr_clid_inuse) {
+		RESERVE_SPACE(8);
+		WRITE32(0);
+		WRITE32(0);
+		ADJUST_ARGS();
+	}
+}
+
+static void
+nfsd4_encode_write(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_write *write)
+{
+	ENCODE_HEAD;
+
+	if (!nfserr) {
+		RESERVE_SPACE(16);
+		WRITE32(write->wr_bytes_written);
+		WRITE32(write->wr_how_written);
+		WRITEMEM(write->wr_verifier.data, 8);
+		ADJUST_ARGS();
+	}
+}
+
+void
+nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+{
+	u32 *statp;
+	ENCODE_HEAD;
+
+	RESERVE_SPACE(8);
+	WRITE32(op->opnum);
+	statp = p++;	/* to be backfilled at the end */
+	ADJUST_ARGS();
+
+	switch (op->opnum) {
+	case OP_ACCESS:
+		nfsd4_encode_access(resp, op->status, &op->u.access);
+		break;
+	case OP_CLOSE:
+		nfsd4_encode_close(resp, op->status, &op->u.close);
+		break;
+	case OP_COMMIT:
+		nfsd4_encode_commit(resp, op->status, &op->u.commit);
+		break;
+	case OP_CREATE:
+		nfsd4_encode_create(resp, op->status, &op->u.create);
+		break;
+	case OP_DELEGRETURN:
+		break;
+	case OP_GETATTR:
+		op->status = nfsd4_encode_getattr(resp, op->status, &op->u.getattr);
+		break;
+	case OP_GETFH:
+		nfsd4_encode_getfh(resp, op->status, op->u.getfh);
+		break;
+	case OP_LINK:
+		nfsd4_encode_link(resp, op->status, &op->u.link);
+		break;
+	case OP_LOCK:
+		nfsd4_encode_lock(resp, op->status, &op->u.lock);
+		break;
+	case OP_LOCKT:
+		nfsd4_encode_lockt(resp, op->status, &op->u.lockt);
+		break;
+	case OP_LOCKU:
+		nfsd4_encode_locku(resp, op->status, &op->u.locku);
+		break;
+	case OP_LOOKUP:
+		break;
+	case OP_LOOKUPP:
+		break;
+	case OP_NVERIFY:
+		break;
+	case OP_OPEN:
+		nfsd4_encode_open(resp, op->status, &op->u.open);
+		break;
+	case OP_OPEN_CONFIRM:
+		nfsd4_encode_open_confirm(resp, op->status, &op->u.open_confirm);
+		break;
+	case OP_OPEN_DOWNGRADE:
+		nfsd4_encode_open_downgrade(resp, op->status, &op->u.open_downgrade);
+		break;
+	case OP_PUTFH:
+		break;
+	case OP_PUTROOTFH:
+		break;
+	case OP_READ:
+		op->status = nfsd4_encode_read(resp, op->status, &op->u.read);
+		break;
+	case OP_READDIR:
+		op->status = nfsd4_encode_readdir(resp, op->status, &op->u.readdir);
+		break;
+	case OP_READLINK:
+		op->status = nfsd4_encode_readlink(resp, op->status, &op->u.readlink);
+		break;
+	case OP_REMOVE:
+		nfsd4_encode_remove(resp, op->status, &op->u.remove);
+		break;
+	case OP_RENAME:
+		nfsd4_encode_rename(resp, op->status, &op->u.rename);
+		break;
+	case OP_RENEW:
+		break;
+	case OP_RESTOREFH:
+		break;
+	case OP_SAVEFH:
+		break;
+	case OP_SETATTR:
+		nfsd4_encode_setattr(resp, op->status, &op->u.setattr);
+		break;
+	case OP_SETCLIENTID:
+		nfsd4_encode_setclientid(resp, op->status, &op->u.setclientid);
+		break;
+	case OP_SETCLIENTID_CONFIRM:
+		break;
+	case OP_VERIFY:
+		break;
+	case OP_WRITE:
+		nfsd4_encode_write(resp, op->status, &op->u.write);
+		break;
+	case OP_RELEASE_LOCKOWNER:
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Note: We write the status directly, instead of using WRITE32(),
+	 * since it is already in network byte order.
+	 */
+	*statp = op->status;
+}
+
+/* 
+ * Encode the reply stored in the stateowner reply cache 
+ * 
+ * XDR note: do not encode rp->rp_buflen: the buffer contains the
+ * previously sent already encoded operation.
+ *
+ * called with nfs4_lock_state() held
+ */
+void
+nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+{
+	ENCODE_HEAD;
+	struct nfs4_replay *rp = op->replay;
+
+	BUG_ON(!rp);
+
+	RESERVE_SPACE(8);
+	WRITE32(op->opnum);
+	*p++ = rp->rp_status;  /* already xdr'ed */
+	ADJUST_ARGS();
+
+	RESERVE_SPACE(rp->rp_buflen);
+	WRITEMEM(rp->rp_buf, rp->rp_buflen);
+	ADJUST_ARGS();
+}
+
+/*
+ * END OF "GENERIC" ENCODE ROUTINES.
+ */
+
+int
+nfs4svc_encode_voidres(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+        return xdr_ressize_check(rqstp, p);
+}
+
+void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args)
+{
+	if (args->ops != args->iops) {
+		kfree(args->ops);
+		args->ops = args->iops;
+	}
+	if (args->tmpp) {
+		kfree(args->tmpp);
+		args->tmpp = NULL;
+	}
+	while (args->to_free) {
+		struct tmpbuf *tb = args->to_free;
+		args->to_free = tb->next;
+		tb->release(tb->buf);
+		kfree(tb);
+	}
+}
+
+int
+nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, u32 *p, struct nfsd4_compoundargs *args)
+{
+	int status;
+
+	args->p = p;
+	args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
+	args->pagelist = rqstp->rq_arg.pages;
+	args->pagelen = rqstp->rq_arg.page_len;
+	args->tmpp = NULL;
+	args->to_free = NULL;
+	args->ops = args->iops;
+	args->rqstp = rqstp;
+
+	status = nfsd4_decode_compound(args);
+	if (status) {
+		nfsd4_release_compoundargs(args);
+	}
+	return !status;
+}
+
+int
+nfs4svc_encode_compoundres(struct svc_rqst *rqstp, u32 *p, struct nfsd4_compoundres *resp)
+{
+	/*
+	 * All that remains is to write the tag and operation count...
+	 */
+	struct kvec *iov;
+	p = resp->tagp;
+	*p++ = htonl(resp->taglen);
+	memcpy(p, resp->tag, resp->taglen);
+	p += XDR_QUADLEN(resp->taglen);
+	*p++ = htonl(resp->opcnt);
+
+	if (rqstp->rq_res.page_len) 
+		iov = &rqstp->rq_res.tail[0];
+	else
+		iov = &rqstp->rq_res.head[0];
+	iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base;
+	BUG_ON(iov->iov_len > PAGE_SIZE);
+	return 1;
+}
+
+/*
+ * Local variables:
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
new file mode 100644
index 0000000..119e4d4
--- /dev/null
+++ b/fs/nfsd/nfscache.c
@@ -0,0 +1,328 @@
+/*
+ * linux/fs/nfsd/nfscache.c
+ *
+ * Request reply cache. This is currently a global cache, but this may
+ * change in the future and be a per-client cache.
+ *
+ * This code is heavily inspired by the 44BSD implementation, although
+ * it does things a bit differently.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+
+/* Size of reply cache. Common values are:
+ * 4.3BSD:	128
+ * 4.4BSD:	256
+ * Solaris2:	1024
+ * DEC Unix:	512-4096
+ */
+#define CACHESIZE		1024
+#define HASHSIZE		64
+#define REQHASH(xid)		((((xid) >> 24) ^ (xid)) & (HASHSIZE-1))
+
+static struct hlist_head *	hash_list;
+static struct list_head 	lru_head;
+static int			cache_disabled = 1;
+
+static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
+
+/* 
+ * locking for the reply cache:
+ * A cache entry is "single use" if c_state == RC_INPROG
+ * Otherwise, it when accessing _prev or _next, the lock must be held.
+ */
+static DEFINE_SPINLOCK(cache_lock);
+
+void
+nfsd_cache_init(void)
+{
+	struct svc_cacherep	*rp;
+	int			i;
+
+	INIT_LIST_HEAD(&lru_head);
+	i = CACHESIZE;
+	while(i) {
+		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
+		if (!rp) break;
+		list_add(&rp->c_lru, &lru_head);
+		rp->c_state = RC_UNUSED;
+		rp->c_type = RC_NOCACHE;
+		INIT_HLIST_NODE(&rp->c_hash);
+		i--;
+	}
+
+	if (i)
+		printk (KERN_ERR "nfsd: cannot allocate all %d cache entries, only got %d\n",
+			CACHESIZE, CACHESIZE-i);
+
+	hash_list = kmalloc (HASHSIZE * sizeof(struct hlist_head), GFP_KERNEL);
+	if (!hash_list) {
+		nfsd_cache_shutdown();
+		printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n",
+			HASHSIZE * sizeof(struct hlist_head));
+		return;
+	}
+	memset(hash_list, 0, HASHSIZE * sizeof(struct hlist_head));
+
+	cache_disabled = 0;
+}
+
+void
+nfsd_cache_shutdown(void)
+{
+	struct svc_cacherep	*rp;
+
+	while (!list_empty(&lru_head)) {
+		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
+		if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
+			kfree(rp->c_replvec.iov_base);
+		list_del(&rp->c_lru);
+		kfree(rp);
+	}
+
+	cache_disabled = 1;
+
+	if (hash_list)
+		kfree (hash_list);
+	hash_list = NULL;
+}
+
+/*
+ * Move cache entry to end of LRU list
+ */
+static void
+lru_put_end(struct svc_cacherep *rp)
+{
+	list_del(&rp->c_lru);
+	list_add_tail(&rp->c_lru, &lru_head);
+}
+
+/*
+ * Move a cache entry from one hash list to another
+ */
+static void
+hash_refile(struct svc_cacherep *rp)
+{
+	hlist_del_init(&rp->c_hash);
+	hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
+}
+
+/*
+ * Try to find an entry matching the current call in the cache. When none
+ * is found, we grab the oldest unlocked entry off the LRU list.
+ * Note that no operation within the loop may sleep.
+ */
+int
+nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
+{
+	struct hlist_node	*hn;
+	struct hlist_head 	*rh;
+	struct svc_cacherep	*rp;
+	u32			xid = rqstp->rq_xid,
+				proto =  rqstp->rq_prot,
+				vers = rqstp->rq_vers,
+				proc = rqstp->rq_proc;
+	unsigned long		age;
+	int rtn;
+
+	rqstp->rq_cacherep = NULL;
+	if (cache_disabled || type == RC_NOCACHE) {
+		nfsdstats.rcnocache++;
+		return RC_DOIT;
+	}
+
+	spin_lock(&cache_lock);
+	rtn = RC_DOIT;
+
+	rh = &hash_list[REQHASH(xid)];
+	hlist_for_each_entry(rp, hn, rh, c_hash) {
+		if (rp->c_state != RC_UNUSED &&
+		    xid == rp->c_xid && proc == rp->c_proc &&
+		    proto == rp->c_prot && vers == rp->c_vers &&
+		    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
+		    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
+			nfsdstats.rchits++;
+			goto found_entry;
+		}
+	}
+	nfsdstats.rcmisses++;
+
+	/* This loop shouldn't take more than a few iterations normally */
+	{
+	int	safe = 0;
+	list_for_each_entry(rp, &lru_head, c_lru) {
+		if (rp->c_state != RC_INPROG)
+			break;
+		if (safe++ > CACHESIZE) {
+			printk("nfsd: loop in repcache LRU list\n");
+			cache_disabled = 1;
+			goto out;
+		}
+	}
+	}
+
+	/* This should not happen */
+	if (rp == NULL) {
+		static int	complaints;
+
+		printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
+		if (++complaints > 5) {
+			printk(KERN_WARNING "nfsd: disabling repcache.\n");
+			cache_disabled = 1;
+		}
+		goto out;
+	}
+
+	rqstp->rq_cacherep = rp;
+	rp->c_state = RC_INPROG;
+	rp->c_xid = xid;
+	rp->c_proc = proc;
+	rp->c_addr = rqstp->rq_addr;
+	rp->c_prot = proto;
+	rp->c_vers = vers;
+	rp->c_timestamp = jiffies;
+
+	hash_refile(rp);
+
+	/* release any buffer */
+	if (rp->c_type == RC_REPLBUFF) {
+		kfree(rp->c_replvec.iov_base);
+		rp->c_replvec.iov_base = NULL;
+	}
+	rp->c_type = RC_NOCACHE;
+ out:
+	spin_unlock(&cache_lock);
+	return rtn;
+
+found_entry:
+	/* We found a matching entry which is either in progress or done. */
+	age = jiffies - rp->c_timestamp;
+	rp->c_timestamp = jiffies;
+	lru_put_end(rp);
+
+	rtn = RC_DROPIT;
+	/* Request being processed or excessive rexmits */
+	if (rp->c_state == RC_INPROG || age < RC_DELAY)
+		goto out;
+
+	/* From the hall of fame of impractical attacks:
+	 * Is this a user who tries to snoop on the cache? */
+	rtn = RC_DOIT;
+	if (!rqstp->rq_secure && rp->c_secure)
+		goto out;
+
+	/* Compose RPC reply header */
+	switch (rp->c_type) {
+	case RC_NOCACHE:
+		break;
+	case RC_REPLSTAT:
+		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
+		rtn = RC_REPLY;
+		break;
+	case RC_REPLBUFF:
+		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
+			goto out;	/* should not happen */
+		rtn = RC_REPLY;
+		break;
+	default:
+		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
+		rp->c_state = RC_UNUSED;
+	}
+
+	goto out;
+}
+
+/*
+ * Update a cache entry. This is called from nfsd_dispatch when
+ * the procedure has been executed and the complete reply is in
+ * rqstp->rq_res.
+ *
+ * We're copying around data here rather than swapping buffers because
+ * the toplevel loop requires max-sized buffers, which would be a waste
+ * of memory for a cache with a max reply size of 100 bytes (diropokres).
+ *
+ * If we should start to use different types of cache entries tailored
+ * specifically for attrstat and fh's, we may save even more space.
+ *
+ * Also note that a cachetype of RC_NOCACHE can legally be passed when
+ * nfsd failed to encode a reply that otherwise would have been cached.
+ * In this case, nfsd_cache_update is called with statp == NULL.
+ */
+void
+nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
+{
+	struct svc_cacherep *rp;
+	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
+	int		len;
+
+	if (!(rp = rqstp->rq_cacherep) || cache_disabled)
+		return;
+
+	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
+	len >>= 2;
+	
+	/* Don't cache excessive amounts of data and XDR failures */
+	if (!statp || len > (256 >> 2)) {
+		rp->c_state = RC_UNUSED;
+		return;
+	}
+
+	switch (cachetype) {
+	case RC_REPLSTAT:
+		if (len != 1)
+			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
+		rp->c_replstat = *statp;
+		break;
+	case RC_REPLBUFF:
+		cachv = &rp->c_replvec;
+		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
+		if (!cachv->iov_base) {
+			spin_lock(&cache_lock);
+			rp->c_state = RC_UNUSED;
+			spin_unlock(&cache_lock);
+			return;
+		}
+		cachv->iov_len = len << 2;
+		memcpy(cachv->iov_base, statp, len << 2);
+		break;
+	}
+	spin_lock(&cache_lock);
+	lru_put_end(rp);
+	rp->c_secure = rqstp->rq_secure;
+	rp->c_type = cachetype;
+	rp->c_state = RC_DONE;
+	rp->c_timestamp = jiffies;
+	spin_unlock(&cache_lock);
+	return;
+}
+
+/*
+ * Copy cached reply to current reply buffer. Should always fit.
+ * FIXME as reply is in a page, we should just attach the page, and
+ * keep a refcount....
+ */
+static int
+nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+{
+	struct kvec	*vec = &rqstp->rq_res.head[0];
+
+	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
+		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
+				data->iov_len);
+		return 0;
+	}
+	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
+	vec->iov_len += data->iov_len;
+	return 1;
+}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
new file mode 100644
index 0000000..161afdc
--- /dev/null
+++ b/fs/nfsd/nfsctl.c
@@ -0,0 +1,438 @@
+/*
+ * linux/fs/nfsd/nfsctl.c
+ *
+ * Syscall interface to knfsd.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/linkage.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+
+#include <linux/nfs.h>
+#include <linux/nfsd_idmap.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/nfsd/interface.h>
+
+#include <asm/uaccess.h>
+
+/*
+ *	We have a single directory with 9 nodes in it.
+ */
+enum {
+	NFSD_Root = 1,
+	NFSD_Svc,
+	NFSD_Add,
+	NFSD_Del,
+	NFSD_Export,
+	NFSD_Unexport,
+	NFSD_Getfd,
+	NFSD_Getfs,
+	NFSD_List,
+	NFSD_Fh,
+	NFSD_Threads,
+	NFSD_Leasetime,
+};
+
+/*
+ * write() for these nodes.
+ */
+static ssize_t write_svc(struct file *file, char *buf, size_t size);
+static ssize_t write_add(struct file *file, char *buf, size_t size);
+static ssize_t write_del(struct file *file, char *buf, size_t size);
+static ssize_t write_export(struct file *file, char *buf, size_t size);
+static ssize_t write_unexport(struct file *file, char *buf, size_t size);
+static ssize_t write_getfd(struct file *file, char *buf, size_t size);
+static ssize_t write_getfs(struct file *file, char *buf, size_t size);
+static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
+static ssize_t write_threads(struct file *file, char *buf, size_t size);
+static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
+
+static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+	[NFSD_Svc] = write_svc,
+	[NFSD_Add] = write_add,
+	[NFSD_Del] = write_del,
+	[NFSD_Export] = write_export,
+	[NFSD_Unexport] = write_unexport,
+	[NFSD_Getfd] = write_getfd,
+	[NFSD_Getfs] = write_getfs,
+	[NFSD_Fh] = write_filehandle,
+	[NFSD_Threads] = write_threads,
+	[NFSD_Leasetime] = write_leasetime,
+};
+
+static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+{
+	ino_t ino =  file->f_dentry->d_inode->i_ino;
+	char *data;
+	ssize_t rv;
+
+	if (ino >= sizeof(write_op)/sizeof(write_op[0]) || !write_op[ino])
+		return -EINVAL;
+
+	data = simple_transaction_get(file, buf, size);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	rv =  write_op[ino](file, data, size);
+	if (rv>0) {
+		simple_transaction_set(file, rv);
+		rv = size;
+	}
+	return rv;
+}
+
+static struct file_operations transaction_ops = {
+	.write		= nfsctl_transaction_write,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+};
+
+extern struct seq_operations nfs_exports_op;
+static int exports_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &nfs_exports_op);
+}
+
+static struct file_operations exports_operations = {
+	.open		= exports_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * payload - write methods
+ * If the method has a response, the response should be put in buf,
+ * and the length returned.  Otherwise return 0 or and -error.
+ */
+
+static ssize_t write_svc(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_svc *data;
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_svc*) buf;
+	return nfsd_svc(data->svc_port, data->svc_nthreads);
+}
+
+static ssize_t write_add(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_client *data;
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_client *)buf;
+	return exp_addclient(data);
+}
+
+static ssize_t write_del(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_client *data;
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_client *)buf;
+	return exp_delclient(data);
+}
+
+static ssize_t write_export(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_export *data;
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_export*)buf;
+	return exp_export(data);
+}
+
+static ssize_t write_unexport(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_export *data;
+
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_export*)buf;
+	return exp_unexport(data);
+}
+
+static ssize_t write_getfs(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_fsparm *data;
+	struct sockaddr_in *sin;
+	struct auth_domain *clp;
+	int err = 0;
+	struct knfsd_fh *res;
+
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_fsparm*)buf;
+	err = -EPROTONOSUPPORT;
+	if (data->gd_addr.sa_family != AF_INET)
+		goto out;
+	sin = (struct sockaddr_in *)&data->gd_addr;
+	if (data->gd_maxlen > NFS3_FHSIZE)
+		data->gd_maxlen = NFS3_FHSIZE;
+
+	res = (struct knfsd_fh*)buf;
+
+	exp_readlock();
+	if (!(clp = auth_unix_lookup(sin->sin_addr)))
+		err = -EPERM;
+	else {
+		err = exp_rootfh(clp, data->gd_path, res, data->gd_maxlen);
+		auth_domain_put(clp);
+	}
+	exp_readunlock();
+	if (err == 0)
+		err = res->fh_size + (int)&((struct knfsd_fh*)0)->fh_base;
+ out:
+	return err;
+}
+
+static ssize_t write_getfd(struct file *file, char *buf, size_t size)
+{
+	struct nfsctl_fdparm *data;
+	struct sockaddr_in *sin;
+	struct auth_domain *clp;
+	int err = 0;
+	struct knfsd_fh fh;
+	char *res;
+
+	if (size < sizeof(*data))
+		return -EINVAL;
+	data = (struct nfsctl_fdparm*)buf;
+	err = -EPROTONOSUPPORT;
+	if (data->gd_addr.sa_family != AF_INET)
+		goto out;
+	err = -EINVAL;
+	if (data->gd_version < 2 || data->gd_version > NFSSVC_MAXVERS)
+		goto out;
+
+	res = buf;
+	sin = (struct sockaddr_in *)&data->gd_addr;
+	exp_readlock();
+	if (!(clp = auth_unix_lookup(sin->sin_addr)))
+		err = -EPERM;
+	else {
+		err = exp_rootfh(clp, data->gd_path, &fh, NFS_FHSIZE);
+		auth_domain_put(clp);
+	}
+	exp_readunlock();
+
+	if (err == 0) {
+		memset(res,0, NFS_FHSIZE);
+		memcpy(res, &fh.fh_base, fh.fh_size);
+		err = NFS_FHSIZE;
+	}
+ out:
+	return err;
+}
+
+static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
+{
+	/* request is:
+	 *   domain path maxsize
+	 * response is
+	 *   filehandle
+	 *
+	 * qword quoting is used, so filehandle will be \x....
+	 */
+	char *dname, *path;
+	int maxsize;
+	char *mesg = buf;
+	int len;
+	struct auth_domain *dom;
+	struct knfsd_fh fh;
+
+	if (buf[size-1] != '\n')
+		return -EINVAL;
+	buf[size-1] = 0;
+
+	dname = mesg;
+	len = qword_get(&mesg, dname, size);
+	if (len <= 0) return -EINVAL;
+	
+	path = dname+len+1;
+	len = qword_get(&mesg, path, size);
+	if (len <= 0) return -EINVAL;
+
+	len = get_int(&mesg, &maxsize);
+	if (len)
+		return len;
+
+	if (maxsize < NFS_FHSIZE)
+		return -EINVAL;
+	if (maxsize > NFS3_FHSIZE)
+		maxsize = NFS3_FHSIZE;
+
+	if (qword_get(&mesg, mesg, size)>0)
+		return -EINVAL;
+
+	/* we have all the words, they are in buf.. */
+	dom = unix_domain_find(dname);
+	if (!dom)
+		return -ENOMEM;
+
+	len = exp_rootfh(dom, path, &fh,  maxsize);
+	auth_domain_put(dom);
+	if (len)
+		return len;
+	
+	mesg = buf; len = SIMPLE_TRANSACTION_LIMIT;
+	qword_addhex(&mesg, &len, (char*)&fh.fh_base, fh.fh_size);
+	mesg[-1] = '\n';
+	return mesg - buf;	
+}
+
+extern int nfsd_nrthreads(void);
+
+static ssize_t write_threads(struct file *file, char *buf, size_t size)
+{
+	/* if size > 0, look for a number of threads and call nfsd_svc
+	 * then write out number of threads as reply
+	 */
+	char *mesg = buf;
+	int rv;
+	if (size > 0) {
+		int newthreads;
+		rv = get_int(&mesg, &newthreads);
+		if (rv)
+			return rv;
+		if (newthreads <0)
+			return -EINVAL;
+		rv = nfsd_svc(2049, newthreads);
+		if (rv)
+			return rv;
+	}
+	sprintf(buf, "%d\n", nfsd_nrthreads());
+	return strlen(buf);
+}
+
+extern time_t nfs4_leasetime(void);
+
+static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
+{
+	/* if size > 10 seconds, call
+	 * nfs4_reset_lease() then write out the new lease (seconds) as reply
+	 */
+	char *mesg = buf;
+	int rv;
+
+	if (size > 0) {
+		int lease;
+		rv = get_int(&mesg, &lease);
+		if (rv)
+			return rv;
+		if (lease < 10 || lease > 3600)
+			return -EINVAL;
+		nfs4_reset_lease(lease);
+	}
+	sprintf(buf, "%ld\n", nfs4_lease_time());
+	return strlen(buf);
+}
+
+/*----------------------------------------------------------------------------*/
+/*
+ *	populating the filesystem.
+ */
+
+static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
+{
+	static struct tree_descr nfsd_files[] = {
+		[NFSD_Svc] = {".svc", &transaction_ops, S_IWUSR},
+		[NFSD_Add] = {".add", &transaction_ops, S_IWUSR},
+		[NFSD_Del] = {".del", &transaction_ops, S_IWUSR},
+		[NFSD_Export] = {".export", &transaction_ops, S_IWUSR},
+		[NFSD_Unexport] = {".unexport", &transaction_ops, S_IWUSR},
+		[NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
+		[NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
+		[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+		[NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
+		[NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
+#ifdef CONFIG_NFSD_V4
+		[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
+		/* last one */ {""}
+	};
+	return simple_fill_super(sb, 0x6e667364, nfsd_files);
+}
+
+static struct super_block *nfsd_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, nfsd_fill_super);
+}
+
+static struct file_system_type nfsd_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "nfsd",
+	.get_sb		= nfsd_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+static int __init init_nfsd(void)
+{
+	int retval;
+	printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+	nfsd_stat_init();	/* Statistics */
+	nfsd_cache_init();	/* RPC reply cache */
+	nfsd_export_init();	/* Exports table */
+	nfsd_lockd_init();	/* lockd->nfsd callbacks */
+#ifdef CONFIG_NFSD_V4
+	nfsd_idmap_init();      /* Name to ID mapping */
+#endif /* CONFIG_NFSD_V4 */
+	if (proc_mkdir("fs/nfs", NULL)) {
+		struct proc_dir_entry *entry;
+		entry = create_proc_entry("fs/nfs/exports", 0, NULL);
+		if (entry)
+			entry->proc_fops =  &exports_operations;
+	}
+	retval = register_filesystem(&nfsd_fs_type);
+	if (retval) {
+		nfsd_export_shutdown();
+		nfsd_cache_shutdown();
+		remove_proc_entry("fs/nfs/exports", NULL);
+		remove_proc_entry("fs/nfs", NULL);
+		nfsd_stat_shutdown();
+		nfsd_lockd_shutdown();
+	}
+	return retval;
+}
+
+static void __exit exit_nfsd(void)
+{
+	nfsd_export_shutdown();
+	nfsd_cache_shutdown();
+	remove_proc_entry("fs/nfs/exports", NULL);
+	remove_proc_entry("fs/nfs", NULL);
+	nfsd_stat_shutdown();
+	nfsd_lockd_shutdown();
+#ifdef CONFIG_NFSD_V4
+	nfsd_idmap_shutdown();
+#endif /* CONFIG_NFSD_V4 */
+	unregister_filesystem(&nfsd_fs_type);
+}
+
+MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_LICENSE("GPL");
+module_init(init_nfsd)
+module_exit(exit_nfsd)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
new file mode 100644
index 0000000..7a3e397
--- /dev/null
+++ b/fs/nfsd/nfsfh.c
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nfsd/nfsfh.c
+ *
+ * NFS server file handle treatment.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ * Portions Copyright (C) 1999 G. Allen Morris III <gam3@acm.org>
+ * Extensive rewrite by Neil Brown <neilb@cse.unsw.edu.au> Southern-Spring 1999
+ * ... and again Southern-Winter 2001 to support export_operations
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <asm/pgtable.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_FH
+#define NFSD_PARANOIA 1
+/* #define NFSD_DEBUG_VERBOSE 1 */
+
+
+static int nfsd_nr_verified;
+static int nfsd_nr_put;
+
+extern struct export_operations export_op_default;
+
+#define	CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
+
+/*
+ * our acceptability function.
+ * if NOSUBTREECHECK, accept anything
+ * if not, require that we can walk up to exp->ex_dentry
+ * doing some checks on the 'x' bits
+ */
+static int nfsd_acceptable(void *expv, struct dentry *dentry)
+{
+	struct svc_export *exp = expv;
+	int rv;
+	struct dentry *tdentry;
+	struct dentry *parent;
+
+	if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
+		return 1;
+
+	tdentry = dget(dentry);
+	while (tdentry != exp->ex_dentry && ! IS_ROOT(tdentry)) {
+		/* make sure parents give x permission to user */
+		int err;
+		parent = dget_parent(tdentry);
+		err = permission(parent->d_inode, MAY_EXEC, NULL);
+		if (err < 0) {
+			dput(parent);
+			break;
+		}
+		dput(tdentry);
+		tdentry = parent;
+	}
+	if (tdentry != exp->ex_dentry)
+		dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
+	rv = (tdentry == exp->ex_dentry);
+	dput(tdentry);
+	return rv;
+}
+
+/* Type check. The correct error return for type mismatches does not seem to be
+ * generally agreed upon. SunOS seems to use EISDIR if file isn't S_IFREG; a
+ * comment in the NFSv3 spec says this is incorrect (implementation notes for
+ * the write call).
+ */
+static inline int
+nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type)
+{
+	/* Type can be negative when creating hardlinks - not to a dir */
+	if (type > 0 && (mode & S_IFMT) != type) {
+		if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
+			return nfserr_symlink;
+		else if (type == S_IFDIR)
+			return nfserr_notdir;
+		else if ((mode & S_IFMT) == S_IFDIR)
+			return nfserr_isdir;
+		else
+			return nfserr_inval;
+	}
+	if (type < 0 && (mode & S_IFMT) == -type) {
+		if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
+			return nfserr_symlink;
+		else if (type == -S_IFDIR)
+			return nfserr_isdir;
+		else
+			return nfserr_notdir;
+	}
+	return 0;
+}
+
+/*
+ * Perform sanity checks on the dentry in a client's file handle.
+ *
+ * Note that the file handle dentry may need to be freed even after
+ * an error return.
+ *
+ * This is only called at the start of an nfsproc call, so fhp points to
+ * a svc_fh which is all 0 except for the over-the-wire file handle.
+ */
+u32
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
+{
+	struct knfsd_fh	*fh = &fhp->fh_handle;
+	struct svc_export *exp = NULL;
+	struct dentry	*dentry;
+	u32		error = 0;
+
+	dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp));
+
+	/* keep this filehandle for possible reference  when encoding attributes */
+	rqstp->rq_reffh = fh;
+
+	if (!fhp->fh_dentry) {
+		__u32 *datap=NULL;
+		__u32 tfh[3];		/* filehandle fragment for oldstyle filehandles */
+		int fileid_type;
+		int data_left = fh->fh_size/4;
+
+		error = nfserr_stale;
+		if (rqstp->rq_client == NULL)
+			goto out;
+		if (rqstp->rq_vers > 2)
+			error = nfserr_badhandle;
+		if (rqstp->rq_vers == 4 && fh->fh_size == 0)
+			return nfserr_nofilehandle;
+
+		if (fh->fh_version == 1) {
+			int len;
+			datap = fh->fh_auth;
+			if (--data_left<0) goto out;
+			switch (fh->fh_auth_type) {
+			case 0: break;
+			default: goto out;
+			}
+			len = key_len(fh->fh_fsid_type) / 4;
+			if (len == 0) goto out;
+			if  (fh->fh_fsid_type == 2) {
+				/* deprecated, convert to type 3 */
+				len = 3;
+				fh->fh_fsid_type = 3;
+				fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1])));
+				fh->fh_fsid[1] = fh->fh_fsid[2];
+			}
+			if ((data_left -= len)<0) goto out;
+			exp = exp_find(rqstp->rq_client, fh->fh_fsid_type, datap, &rqstp->rq_chandle);
+			datap += len;
+		} else {
+			dev_t xdev;
+			ino_t xino;
+			if (fh->fh_size != NFS_FHSIZE)
+				goto out;
+			/* assume old filehandle format */
+			xdev = old_decode_dev(fh->ofh_xdev);
+			xino = u32_to_ino_t(fh->ofh_xino);
+			mk_fsid_v0(tfh, xdev, xino);
+			exp = exp_find(rqstp->rq_client, 0, tfh, &rqstp->rq_chandle);
+		}
+
+		error = nfserr_dropit;
+		if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN)
+			goto out;
+
+		error = nfserr_stale; 
+		if (!exp || IS_ERR(exp))
+			goto out;
+
+		/* Check if the request originated from a secure port. */
+		error = nfserr_perm;
+		if (!rqstp->rq_secure && EX_SECURE(exp)) {
+			printk(KERN_WARNING
+			       "nfsd: request from insecure port (%u.%u.%u.%u:%d)!\n",
+			       NIPQUAD(rqstp->rq_addr.sin_addr.s_addr),
+			       ntohs(rqstp->rq_addr.sin_port));
+			goto out;
+		}
+
+		/* Set user creds for this exportpoint */
+		error = nfsd_setuser(rqstp, exp);
+		if (error) {
+			error = nfserrno(error);
+			goto out;
+		}
+
+		/*
+		 * Look up the dentry using the NFS file handle.
+		 */
+		error = nfserr_stale;
+		if (rqstp->rq_vers > 2)
+			error = nfserr_badhandle;
+
+		if (fh->fh_version != 1) {
+			tfh[0] = fh->ofh_ino;
+			tfh[1] = fh->ofh_generation;
+			tfh[2] = fh->ofh_dirino;
+			datap = tfh;
+			data_left = 3;
+			if (fh->ofh_dirino == 0)
+				fileid_type = 1;
+			else
+				fileid_type = 2;
+		} else
+			fileid_type = fh->fh_fileid_type;
+		
+		if (fileid_type == 0)
+			dentry = dget(exp->ex_dentry);
+		else {
+			struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
+			dentry = CALL(nop,decode_fh)(exp->ex_mnt->mnt_sb,
+						     datap, data_left,
+						     fileid_type,
+						     nfsd_acceptable, exp);
+		}
+		if (dentry == NULL)
+			goto out;
+		if (IS_ERR(dentry)) {
+			if (PTR_ERR(dentry) != -EINVAL)
+				error = nfserrno(PTR_ERR(dentry));
+			goto out;
+		}
+#ifdef NFSD_PARANOIA
+		if (S_ISDIR(dentry->d_inode->i_mode) &&
+		    (dentry->d_flags & DCACHE_DISCONNECTED)) {
+			printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
+			       dentry->d_parent->d_name.name, dentry->d_name.name);
+		}
+#endif
+
+		fhp->fh_dentry = dentry;
+		fhp->fh_export = exp;
+		nfsd_nr_verified++;
+	} else {
+		/* just rechecking permissions
+		 * (e.g. nfsproc_create calls fh_verify, then nfsd_create does as well)
+		 */
+		dprintk("nfsd: fh_verify - just checking\n");
+		dentry = fhp->fh_dentry;
+		exp = fhp->fh_export;
+	}
+	cache_get(&exp->h);
+
+	error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
+	if (error)
+		goto out;
+
+	/* Finally, check access permissions. */
+	error = nfsd_permission(exp, dentry, access);
+
+#ifdef NFSD_PARANOIA_EXTREME
+	if (error) {
+		printk("fh_verify: %s/%s permission failure, acc=%x, error=%d\n",
+		       dentry->d_parent->d_name.name, dentry->d_name.name, access, (error >> 24));
+	}
+#endif
+out:
+	if (exp && !IS_ERR(exp))
+		exp_put(exp);
+	if (error == nfserr_stale)
+		nfsdstats.fh_stale++;
+	return error;
+}
+
+
+/*
+ * Compose a file handle for an NFS reply.
+ *
+ * Note that when first composed, the dentry may not yet have
+ * an inode.  In this case a call to fh_update should be made
+ * before the fh goes out on the wire ...
+ */
+static inline int _fh_update(struct dentry *dentry, struct svc_export *exp,
+			     __u32 *datap, int *maxsize)
+{
+	struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
+	
+	if (dentry == exp->ex_dentry) {
+		*maxsize = 0;
+		return 0;
+	}
+
+	return CALL(nop,encode_fh)(dentry, datap, maxsize,
+			  !(exp->ex_flags&NFSEXP_NOSUBTREECHECK));
+}
+
+/*
+ * for composing old style file handles
+ */
+static inline void _fh_update_old(struct dentry *dentry,
+				  struct svc_export *exp,
+				  struct knfsd_fh *fh)
+{
+	fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
+	fh->ofh_generation = dentry->d_inode->i_generation;
+	if (S_ISDIR(dentry->d_inode->i_mode) ||
+	    (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
+		fh->ofh_dirino = 0;
+}
+
+int
+fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, struct svc_fh *ref_fh)
+{
+	/* ref_fh is a reference file handle.
+	 * if it is non-null, then we should compose a filehandle which is
+	 * of the same version, where possible.
+	 * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca
+	 * Then create a 32byte filehandle using nfs_fhbase_old
+	 *
+	 */
+
+	u8 ref_fh_version = 0;
+	u8 ref_fh_fsid_type = 0;
+	struct inode * inode = dentry->d_inode;
+	struct dentry *parent = dentry->d_parent;
+	__u32 *datap;
+	dev_t ex_dev = exp->ex_dentry->d_inode->i_sb->s_dev;
+
+	dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+		MAJOR(ex_dev), MINOR(ex_dev),
+		(long) exp->ex_dentry->d_inode->i_ino,
+		parent->d_name.name, dentry->d_name.name,
+		(inode ? inode->i_ino : 0));
+
+	if (ref_fh) {
+		ref_fh_version = ref_fh->fh_handle.fh_version;
+		if (ref_fh_version == 0xca)
+			ref_fh_fsid_type = 0;
+		else
+			ref_fh_fsid_type = ref_fh->fh_handle.fh_fsid_type;
+		if (ref_fh_fsid_type > 3)
+			ref_fh_fsid_type = 0;
+
+		/* make sure ref_fh type works for given export */
+		if (ref_fh_fsid_type == 1 &&
+		    !(exp->ex_flags & NFSEXP_FSID)) {
+			/* if we don't have an fsid, we cannot provide one... */
+			ref_fh_fsid_type = 0;
+		}
+	} else if (exp->ex_flags & NFSEXP_FSID)
+		ref_fh_fsid_type = 1;
+
+	if (!old_valid_dev(ex_dev) && ref_fh_fsid_type == 0) {
+		/* for newer device numbers, we must use a newer fsid format */
+		ref_fh_version = 1;
+		ref_fh_fsid_type = 3;
+	}
+	if (old_valid_dev(ex_dev) &&
+	    (ref_fh_fsid_type == 2 || ref_fh_fsid_type == 3))
+		/* must use type1 for smaller device numbers */
+		ref_fh_fsid_type = 0;
+
+	if (ref_fh == fhp)
+		fh_put(ref_fh);
+
+	if (fhp->fh_locked || fhp->fh_dentry) {
+		printk(KERN_ERR "fh_compose: fh %s/%s not initialized!\n",
+			parent->d_name.name, dentry->d_name.name);
+	}
+	if (fhp->fh_maxsize < NFS_FHSIZE)
+		printk(KERN_ERR "fh_compose: called with maxsize %d! %s/%s\n",
+		       fhp->fh_maxsize, parent->d_name.name, dentry->d_name.name);
+
+	fhp->fh_dentry = dget(dentry); /* our internal copy */
+	fhp->fh_export = exp;
+	cache_get(&exp->h);
+
+	if (ref_fh_version == 0xca) {
+		/* old style filehandle please */
+		memset(&fhp->fh_handle.fh_base, 0, NFS_FHSIZE);
+		fhp->fh_handle.fh_size = NFS_FHSIZE;
+		fhp->fh_handle.ofh_dcookie = 0xfeebbaca;
+		fhp->fh_handle.ofh_dev =  old_encode_dev(ex_dev);
+		fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev;
+		fhp->fh_handle.ofh_xino = ino_t_to_u32(exp->ex_dentry->d_inode->i_ino);
+		fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry));
+		if (inode)
+			_fh_update_old(dentry, exp, &fhp->fh_handle);
+	} else {
+		int len;
+		fhp->fh_handle.fh_version = 1;
+		fhp->fh_handle.fh_auth_type = 0;
+		datap = fhp->fh_handle.fh_auth+0;
+		fhp->fh_handle.fh_fsid_type = ref_fh_fsid_type;
+		switch (ref_fh_fsid_type) {
+			case 0:
+				/*
+				 * fsid_type 0:
+				 * 2byte major, 2byte minor, 4byte inode
+				 */
+				mk_fsid_v0(datap, ex_dev,
+					exp->ex_dentry->d_inode->i_ino);
+				break;
+			case 1:
+				/* fsid_type 1 == 4 bytes filesystem id */
+				mk_fsid_v1(datap, exp->ex_fsid);
+				break;
+			case 2:
+				/*
+				 * fsid_type 2:
+				 * 4byte major, 4byte minor, 4byte inode
+				 */
+				mk_fsid_v2(datap, ex_dev,
+					exp->ex_dentry->d_inode->i_ino);
+				break;
+			case 3:
+				/*
+				 * fsid_type 3:
+				 * 4byte devicenumber, 4byte inode
+				 */
+				mk_fsid_v3(datap, ex_dev,
+					exp->ex_dentry->d_inode->i_ino);
+				break;
+		}
+		len = key_len(ref_fh_fsid_type);
+		datap += len/4;
+		fhp->fh_handle.fh_size = 4 + len;
+
+		if (inode) {
+			int size = (fhp->fh_maxsize-len-4)/4;
+			fhp->fh_handle.fh_fileid_type =
+				_fh_update(dentry, exp, datap, &size);
+			fhp->fh_handle.fh_size += size*4;
+		}
+		if (fhp->fh_handle.fh_fileid_type == 255)
+			return nfserr_opnotsupp;
+	}
+
+	nfsd_nr_verified++;
+	return 0;
+}
+
+/*
+ * Update file handle information after changing a dentry.
+ * This is only called by nfsd_create, nfsd_create_v3 and nfsd_proc_create
+ */
+int
+fh_update(struct svc_fh *fhp)
+{
+	struct dentry *dentry;
+	__u32 *datap;
+	
+	if (!fhp->fh_dentry)
+		goto out_bad;
+
+	dentry = fhp->fh_dentry;
+	if (!dentry->d_inode)
+		goto out_negative;
+	if (fhp->fh_handle.fh_version != 1) {
+		_fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
+	} else {
+		int size;
+		if (fhp->fh_handle.fh_fileid_type != 0)
+			goto out_uptodate;
+		datap = fhp->fh_handle.fh_auth+
+			fhp->fh_handle.fh_size/4 -1;
+		size = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
+		fhp->fh_handle.fh_fileid_type =
+			_fh_update(dentry, fhp->fh_export, datap, &size);
+		fhp->fh_handle.fh_size += size*4;
+		if (fhp->fh_handle.fh_fileid_type == 255)
+			return nfserr_opnotsupp;
+	}
+out:
+	return 0;
+
+out_bad:
+	printk(KERN_ERR "fh_update: fh not verified!\n");
+	goto out;
+out_negative:
+	printk(KERN_ERR "fh_update: %s/%s still negative!\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+	goto out;
+out_uptodate:
+	printk(KERN_ERR "fh_update: %s/%s already up-to-date!\n",
+		dentry->d_parent->d_name.name, dentry->d_name.name);
+	goto out;
+}
+
+/*
+ * Release a file handle.
+ */
+void
+fh_put(struct svc_fh *fhp)
+{
+	struct dentry * dentry = fhp->fh_dentry;
+	struct svc_export * exp = fhp->fh_export;
+	if (dentry) {
+		fh_unlock(fhp);
+		fhp->fh_dentry = NULL;
+		dput(dentry);
+#ifdef CONFIG_NFSD_V3
+		fhp->fh_pre_saved = 0;
+		fhp->fh_post_saved = 0;
+#endif
+		nfsd_nr_put++;
+	}
+	if (exp) {
+		svc_export_put(&exp->h, &svc_export_cache);
+		fhp->fh_export = NULL;
+	}
+	return;
+}
+
+/*
+ * Shorthand for dprintk()'s
+ */
+char * SVCFH_fmt(struct svc_fh *fhp)
+{
+	struct knfsd_fh *fh = &fhp->fh_handle;
+
+	static char buf[80];
+	sprintf(buf, "%d: %08x %08x %08x %08x %08x %08x",
+		fh->fh_size,
+		fh->fh_base.fh_pad[0],
+		fh->fh_base.fh_pad[1],
+		fh->fh_base.fh_pad[2],
+		fh->fh_base.fh_pad[3],
+		fh->fh_base.fh_pad[4],
+		fh->fh_base.fh_pad[5]);
+	return buf;
+}
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
new file mode 100644
index 0000000..757f9d2
--- /dev/null
+++ b/fs/nfsd/nfsproc.c
@@ -0,0 +1,605 @@
+/*
+ * nfsproc2.c	Process version 2 NFS requests.
+ * linux/fs/nfsd/nfs2proc.c
+ * 
+ * Process version 2 NFS requests.
+ *
+ * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/linkage.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/namei.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+
+typedef struct svc_rqst	svc_rqst;
+typedef struct svc_buf	svc_buf;
+
+#define NFSDDBG_FACILITY		NFSDDBG_PROC
+
+
+static int
+nfsd_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+	return nfs_ok;
+}
+
+/*
+ * Get a file's attributes
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+					  struct nfsd_attrstat *resp)
+{
+	dprintk("nfsd: GETATTR  %s\n", SVCFH_fmt(&argp->fh));
+
+	fh_copy(&resp->fh, &argp->fh);
+	return fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
+}
+
+/*
+ * Set a file's attributes
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
+					  struct nfsd_attrstat  *resp)
+{
+	dprintk("nfsd: SETATTR  %s, valid=%x, size=%ld\n",
+		SVCFH_fmt(&argp->fh),
+		argp->attrs.ia_valid, (long) argp->attrs.ia_size);
+
+	fh_copy(&resp->fh, &argp->fh);
+	return nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
+}
+
+/*
+ * Look up a path name component
+ * Note: the dentry in the resp->fh may be negative if the file
+ * doesn't exist yet.
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+					 struct nfsd_diropres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: LOOKUP   %s %.*s\n",
+		SVCFH_fmt(&argp->fh), argp->len, argp->name);
+
+	fh_init(&resp->fh, NFS_FHSIZE);
+	nfserr = nfsd_lookup(rqstp, &argp->fh, argp->name, argp->len,
+				 &resp->fh);
+
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * Read a symlink.
+ */
+static int
+nfsd_proc_readlink(struct svc_rqst *rqstp, struct nfsd_readlinkargs *argp,
+					   struct nfsd_readlinkres *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
+
+	/* Read the symlink. */
+	resp->len = NFS_MAXPATHLEN;
+	nfserr = nfsd_readlink(rqstp, &argp->fh, argp->buffer, &resp->len);
+
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * Read a portion of a file.
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
+				       struct nfsd_readres  *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: READ    %s %d bytes at %d\n",
+		SVCFH_fmt(&argp->fh),
+		argp->count, argp->offset);
+
+	/* Obtain buffer pointer for payload. 19 is 1 word for
+	 * status, 17 words for fattr, and 1 word for the byte count.
+	 */
+
+	if (NFSSVC_MAXBLKSIZE < argp->count) {
+		printk(KERN_NOTICE
+			"oversized read request from %u.%u.%u.%u:%d (%d bytes)\n",
+				NIPQUAD(rqstp->rq_addr.sin_addr.s_addr),
+				ntohs(rqstp->rq_addr.sin_port),
+				argp->count);
+		argp->count = NFSSVC_MAXBLKSIZE;
+	}
+	svc_reserve(rqstp, (19<<2) + argp->count + 4);
+
+	resp->count = argp->count;
+	nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL,
+				  argp->offset,
+			   	  argp->vec, argp->vlen,
+				  &resp->count);
+
+	return nfserr;
+}
+
+/*
+ * Write data to a file
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
+					struct nfsd_attrstat  *resp)
+{
+	int	nfserr;
+	int	stable = 1;
+
+	dprintk("nfsd: WRITE    %s %d bytes at %d\n",
+		SVCFH_fmt(&argp->fh),
+		argp->len, argp->offset);
+
+	nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), NULL,
+				   argp->offset,
+				   argp->vec, argp->vlen,
+				   argp->len,
+				   &stable);
+	return nfserr;
+}
+
+/*
+ * CREATE processing is complicated. The keyword here is `overloaded.'
+ * The parent directory is kept locked between the check for existence
+ * and the actual create() call in compliance with VFS protocols.
+ * N.B. After this call _both_ argp->fh and resp->fh need an fh_put
+ */
+static int
+nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
+					 struct nfsd_diropres   *resp)
+{
+	svc_fh		*dirfhp = &argp->fh;
+	svc_fh		*newfhp = &resp->fh;
+	struct iattr	*attr = &argp->attrs;
+	struct inode	*inode;
+	struct dentry	*dchild;
+	int		nfserr, type, mode;
+	dev_t		rdev = 0, wanted = new_decode_dev(attr->ia_size);
+
+	dprintk("nfsd: CREATE   %s %.*s\n",
+		SVCFH_fmt(dirfhp), argp->len, argp->name);
+
+	/* First verify the parent file handle */
+	nfserr = fh_verify(rqstp, dirfhp, S_IFDIR, MAY_EXEC);
+	if (nfserr)
+		goto done; /* must fh_put dirfhp even on error */
+
+	/* Check for MAY_WRITE in nfsd_create if necessary */
+
+	nfserr = nfserr_acces;
+	if (!argp->len)
+		goto done;
+	nfserr = nfserr_exist;
+	if (isdotent(argp->name, argp->len))
+		goto done;
+	fh_lock(dirfhp);
+	dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
+	if (IS_ERR(dchild)) {
+		nfserr = nfserrno(PTR_ERR(dchild));
+		goto out_unlock;
+	}
+	fh_init(newfhp, NFS_FHSIZE);
+	nfserr = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp);
+	if (!nfserr && !dchild->d_inode)
+		nfserr = nfserr_noent;
+	dput(dchild);
+	if (nfserr) {
+		if (nfserr != nfserr_noent)
+			goto out_unlock;
+		/*
+		 * If the new file handle wasn't verified, we can't tell
+		 * whether the file exists or not. Time to bail ...
+		 */
+		nfserr = nfserr_acces;
+		if (!newfhp->fh_dentry) {
+			printk(KERN_WARNING 
+				"nfsd_proc_create: file handle not verified\n");
+			goto out_unlock;
+		}
+	}
+
+	inode = newfhp->fh_dentry->d_inode;
+
+	/* Unfudge the mode bits */
+	if (attr->ia_valid & ATTR_MODE) {
+		type = attr->ia_mode & S_IFMT;
+		mode = attr->ia_mode & ~S_IFMT;
+		if (!type) {
+			/* no type, so if target exists, assume same as that,
+			 * else assume a file */
+			if (inode) {
+				type = inode->i_mode & S_IFMT;
+				switch(type) {
+				case S_IFCHR:
+				case S_IFBLK:
+					/* reserve rdev for later checking */
+					rdev = inode->i_rdev;
+					attr->ia_valid |= ATTR_SIZE;
+
+					/* FALLTHROUGH */
+				case S_IFIFO:
+					/* this is probably a permission check..
+					 * at least IRIX implements perm checking on
+					 *   echo thing > device-special-file-or-pipe
+					 * by doing a CREATE with type==0
+					 */
+					nfserr = nfsd_permission(newfhp->fh_export,
+								 newfhp->fh_dentry,
+								 MAY_WRITE|MAY_LOCAL_ACCESS);
+					if (nfserr && nfserr != nfserr_rofs)
+						goto out_unlock;
+				}
+			} else
+				type = S_IFREG;
+		}
+	} else if (inode) {
+		type = inode->i_mode & S_IFMT;
+		mode = inode->i_mode & ~S_IFMT;
+	} else {
+		type = S_IFREG;
+		mode = 0;	/* ??? */
+	}
+
+	attr->ia_valid |= ATTR_MODE;
+	attr->ia_mode = mode;
+
+	/* Special treatment for non-regular files according to the
+	 * gospel of sun micro
+	 */
+	if (type != S_IFREG) {
+		int	is_borc = 0;
+		if (type != S_IFBLK && type != S_IFCHR) {
+			rdev = 0;
+		} else if (type == S_IFCHR && !(attr->ia_valid & ATTR_SIZE)) {
+			/* If you think you've seen the worst, grok this. */
+			type = S_IFIFO;
+		} else {
+			/* Okay, char or block special */
+			is_borc = 1;
+			if (!rdev)
+				rdev = wanted;
+		}
+
+		/* we've used the SIZE information, so discard it */
+		attr->ia_valid &= ~ATTR_SIZE;
+
+		/* Make sure the type and device matches */
+		nfserr = nfserr_exist;
+		if (inode && type != (inode->i_mode & S_IFMT))
+			goto out_unlock;
+	}
+
+	nfserr = 0;
+	if (!inode) {
+		/* File doesn't exist. Create it and set attrs */
+		nfserr = nfsd_create(rqstp, dirfhp, argp->name, argp->len,
+					attr, type, rdev, newfhp);
+	} else if (type == S_IFREG) {
+		dprintk("nfsd:   existing %s, valid=%x, size=%ld\n",
+			argp->name, attr->ia_valid, (long) attr->ia_size);
+		/* File already exists. We ignore all attributes except
+		 * size, so that creat() behaves exactly like
+		 * open(..., O_CREAT|O_TRUNC|O_WRONLY).
+		 */
+		attr->ia_valid &= ATTR_SIZE;
+		if (attr->ia_valid)
+			nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time_t)0);
+	}
+
+out_unlock:
+	/* We don't really need to unlock, as fh_put does it. */
+	fh_unlock(dirfhp);
+
+done:
+	fh_put(dirfhp);
+	return nfserr;
+}
+
+static int
+nfsd_proc_remove(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+					 void		       *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: REMOVE   %s %.*s\n", SVCFH_fmt(&argp->fh),
+		argp->len, argp->name);
+
+	/* Unlink. -SIFDIR means file must not be a directory */
+	nfserr = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR, argp->name, argp->len);
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+static int
+nfsd_proc_rename(struct svc_rqst *rqstp, struct nfsd_renameargs *argp,
+				  	 void		        *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: RENAME   %s %.*s -> \n",
+		SVCFH_fmt(&argp->ffh), argp->flen, argp->fname);
+	dprintk("nfsd:        ->  %s %.*s\n",
+		SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname);
+
+	nfserr = nfsd_rename(rqstp, &argp->ffh, argp->fname, argp->flen,
+				    &argp->tfh, argp->tname, argp->tlen);
+	fh_put(&argp->ffh);
+	fh_put(&argp->tfh);
+	return nfserr;
+}
+
+static int
+nfsd_proc_link(struct svc_rqst *rqstp, struct nfsd_linkargs *argp,
+				void			    *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: LINK     %s ->\n",
+		SVCFH_fmt(&argp->ffh));
+	dprintk("nfsd:    %s %.*s\n",
+		SVCFH_fmt(&argp->tfh),
+		argp->tlen,
+		argp->tname);
+
+	nfserr = nfsd_link(rqstp, &argp->tfh, argp->tname, argp->tlen,
+				  &argp->ffh);
+	fh_put(&argp->ffh);
+	fh_put(&argp->tfh);
+	return nfserr;
+}
+
+static int
+nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp,
+				          void			  *resp)
+{
+	struct svc_fh	newfh;
+	int		nfserr;
+
+	dprintk("nfsd: SYMLINK  %s %.*s -> %.*s\n",
+		SVCFH_fmt(&argp->ffh), argp->flen, argp->fname,
+		argp->tlen, argp->tname);
+
+	fh_init(&newfh, NFS_FHSIZE);
+	/*
+	 * Create the link, look up new file and set attrs.
+	 */
+	nfserr = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
+						 argp->tname, argp->tlen,
+				 		 &newfh, &argp->attrs);
+
+
+	fh_put(&argp->ffh);
+	fh_put(&newfh);
+	return nfserr;
+}
+
+/*
+ * Make directory. This operation is not idempotent.
+ * N.B. After this call resp->fh needs an fh_put
+ */
+static int
+nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
+					struct nfsd_diropres   *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: MKDIR    %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
+
+	if (resp->fh.fh_dentry) {
+		printk(KERN_WARNING
+			"nfsd_proc_mkdir: response already verified??\n");
+	}
+
+	argp->attrs.ia_valid &= ~ATTR_SIZE;
+	fh_init(&resp->fh, NFS_FHSIZE);
+	nfserr = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
+				    &argp->attrs, S_IFDIR, 0, &resp->fh);
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * Remove a directory
+ */
+static int
+nfsd_proc_rmdir(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+				 	void		      *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: RMDIR    %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
+
+	nfserr = nfsd_unlink(rqstp, &argp->fh, S_IFDIR, argp->name, argp->len);
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * Read a portion of a directory.
+ */
+static int
+nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
+					  struct nfsd_readdirres  *resp)
+{
+	int		nfserr, count;
+	loff_t		offset;
+
+	dprintk("nfsd: READDIR  %s %d bytes at %d\n",
+		SVCFH_fmt(&argp->fh),		
+		argp->count, argp->cookie);
+
+	/* Shrink to the client read size */
+	count = (argp->count >> 2) - 2;
+
+	/* Make sure we've room for the NULL ptr & eof flag */
+	count -= 2;
+	if (count < 0)
+		count = 0;
+
+	resp->buffer = argp->buffer;
+	resp->offset = NULL;
+	resp->buflen = count;
+	resp->common.err = nfs_ok;
+	/* Read directory and encode entries on the fly */
+	offset = argp->cookie;
+	nfserr = nfsd_readdir(rqstp, &argp->fh, &offset, 
+			      &resp->common, nfssvc_encode_entry);
+
+	resp->count = resp->buffer - argp->buffer;
+	if (resp->offset)
+		*resp->offset = htonl(offset);
+
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * Get file system info
+ */
+static int
+nfsd_proc_statfs(struct svc_rqst * rqstp, struct nfsd_fhandle   *argp,
+					  struct nfsd_statfsres *resp)
+{
+	int	nfserr;
+
+	dprintk("nfsd: STATFS   %s\n", SVCFH_fmt(&argp->fh));
+
+	nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats);
+	fh_put(&argp->fh);
+	return nfserr;
+}
+
+/*
+ * NFSv2 Server procedures.
+ * Only the results of non-idempotent operations are cached.
+ */
+#define nfsd_proc_none		NULL
+#define nfssvc_release_none	NULL
+struct nfsd_void { int dummy; };
+
+#define PROC(name, argt, rest, relt, cache, respsize)	\
+ { (svc_procfunc) nfsd_proc_##name,		\
+   (kxdrproc_t) nfssvc_decode_##argt,		\
+   (kxdrproc_t) nfssvc_encode_##rest,		\
+   (kxdrproc_t) nfssvc_release_##relt,		\
+   sizeof(struct nfsd_##argt),			\
+   sizeof(struct nfsd_##rest),			\
+   0,						\
+   cache,					\
+   respsize,				       	\
+ }
+
+#define ST 1		/* status */
+#define FH 8		/* filehandle */
+#define	AT 18		/* attributes */
+
+static struct svc_procedure		nfsd_procedures2[18] = {
+  PROC(null,	 void,		void,		none,		RC_NOCACHE, ST),
+  PROC(getattr,	 fhandle,	attrstat,	fhandle,	RC_NOCACHE, ST+AT),
+  PROC(setattr,  sattrargs,	attrstat,	fhandle,	RC_REPLBUFF, ST+AT),
+  PROC(none,	 void,		void,		none,		RC_NOCACHE, ST),
+  PROC(lookup,	 diropargs,	diropres,	fhandle,	RC_NOCACHE, ST+FH+AT),
+  PROC(readlink, readlinkargs,	readlinkres,	none,		RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4),
+  PROC(read,	 readargs,	readres,	fhandle,	RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE),
+  PROC(none,	 void,		void,		none,		RC_NOCACHE, ST),
+  PROC(write,	 writeargs,	attrstat,	fhandle,	RC_REPLBUFF, ST+AT),
+  PROC(create,	 createargs,	diropres,	fhandle,	RC_REPLBUFF, ST+FH+AT),
+  PROC(remove,	 diropargs,	void,		none,		RC_REPLSTAT, ST),
+  PROC(rename,	 renameargs,	void,		none,		RC_REPLSTAT, ST),
+  PROC(link,	 linkargs,	void,		none,		RC_REPLSTAT, ST),
+  PROC(symlink,	 symlinkargs,	void,		none,		RC_REPLSTAT, ST),
+  PROC(mkdir,	 createargs,	diropres,	fhandle,	RC_REPLBUFF, ST+FH+AT),
+  PROC(rmdir,	 diropargs,	void,		none,		RC_REPLSTAT, ST),
+  PROC(readdir,	 readdirargs,	readdirres,	none,		RC_NOCACHE, 0),
+  PROC(statfs,	 fhandle,	statfsres,	none,		RC_NOCACHE, ST+5),
+};
+
+
+struct svc_version	nfsd_version2 = {
+		.vs_vers	= 2,
+		.vs_nproc	= 18,
+		.vs_proc	= nfsd_procedures2,
+		.vs_dispatch	= nfsd_dispatch,
+		.vs_xdrsize	= NFS2_SVC_XDRSIZE,
+};
+
+/*
+ * Map errnos to NFS errnos.
+ */
+int
+nfserrno (int errno)
+{
+	static struct {
+		int	nfserr;
+		int	syserr;
+	} nfs_errtbl[] = {
+		{ nfs_ok, 0 },
+		{ nfserr_perm, -EPERM },
+		{ nfserr_noent, -ENOENT },
+		{ nfserr_io, -EIO },
+		{ nfserr_nxio, -ENXIO },
+		{ nfserr_acces, -EACCES },
+		{ nfserr_exist, -EEXIST },
+		{ nfserr_xdev, -EXDEV },
+		{ nfserr_mlink, -EMLINK },
+		{ nfserr_nodev, -ENODEV },
+		{ nfserr_notdir, -ENOTDIR },
+		{ nfserr_isdir, -EISDIR },
+		{ nfserr_inval, -EINVAL },
+		{ nfserr_fbig, -EFBIG },
+		{ nfserr_nospc, -ENOSPC },
+		{ nfserr_rofs, -EROFS },
+		{ nfserr_mlink, -EMLINK },
+		{ nfserr_nametoolong, -ENAMETOOLONG },
+		{ nfserr_notempty, -ENOTEMPTY },
+#ifdef EDQUOT
+		{ nfserr_dquot, -EDQUOT },
+#endif
+		{ nfserr_stale, -ESTALE },
+		{ nfserr_jukebox, -ETIMEDOUT },
+		{ nfserr_dropit, -EAGAIN },
+		{ nfserr_dropit, -ENOMEM },
+		{ nfserr_badname, -ESRCH },
+		{ nfserr_io, -ETXTBSY },
+		{ -1, -EIO }
+	};
+	int	i;
+
+	for (i = 0; nfs_errtbl[i].nfserr != -1; i++) {
+		if (nfs_errtbl[i].syserr == errno)
+			return nfs_errtbl[i].nfserr;
+	}
+	printk (KERN_INFO "nfsd: non-standard errno: %d\n", errno);
+	return nfserr_io;
+}
+
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
new file mode 100644
index 0000000..3955165
--- /dev/null
+++ b/fs/nfsd/nfssvc.c
@@ -0,0 +1,385 @@
+/*
+ * linux/fs/nfsd/nfssvc.c
+ *
+ * Central processing for nfsd.
+ *
+ * Authors:	Olaf Kirch (okir@monad.swb.de)
+ *
+ * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/nfs.h>
+#include <linux/in.h>
+#include <linux/uio.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/fs_struct.h>
+
+#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/stats.h>
+#include <linux/nfsd/cache.h>
+#include <linux/lockd/bind.h>
+
+#define NFSDDBG_FACILITY	NFSDDBG_SVC
+
+/* these signals will be delivered to an nfsd thread 
+ * when handling a request
+ */
+#define ALLOWED_SIGS	(sigmask(SIGKILL))
+/* these signals will be delivered to an nfsd thread
+ * when not handling a request. i.e. when waiting
+ */
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
+/* if the last thread dies with SIGHUP, then the exports table is
+ * left unchanged ( like 2.4-{0-9} ).  Any other signal will clear
+ * the exports table (like 2.2).
+ */
+#define	SIG_NOCLEAN	SIGHUP
+
+extern struct svc_program	nfsd_program;
+static void			nfsd(struct svc_rqst *rqstp);
+struct timeval			nfssvc_boot;
+static struct svc_serv 		*nfsd_serv;
+static atomic_t			nfsd_busy;
+static unsigned long		nfsd_last_call;
+static DEFINE_SPINLOCK(nfsd_call_lock);
+
+struct nfsd_list {
+	struct list_head 	list;
+	struct task_struct	*task;
+};
+static struct list_head nfsd_list = LIST_HEAD_INIT(nfsd_list);
+
+/*
+ * Maximum number of nfsd processes
+ */
+#define	NFSD_MAXSERVS		8192
+
+int nfsd_nrthreads(void)
+{
+	if (nfsd_serv == NULL)
+		return 0;
+	else
+		return nfsd_serv->sv_nrthreads;
+}
+
+int
+nfsd_svc(unsigned short port, int nrservs)
+{
+	int	error;
+	int	none_left;	
+	struct list_head *victim;
+	
+	lock_kernel();
+	dprintk("nfsd: creating service\n");
+	error = -EINVAL;
+	if (nrservs <= 0)
+		nrservs = 0;
+	if (nrservs > NFSD_MAXSERVS)
+		nrservs = NFSD_MAXSERVS;
+	
+	/* Readahead param cache - will no-op if it already exists */
+	error =	nfsd_racache_init(2*nrservs);
+	if (error<0)
+		goto out;
+	error = nfs4_state_init();
+	if (error<0)
+		goto out;
+	if (!nfsd_serv) {
+		atomic_set(&nfsd_busy, 0);
+		error = -ENOMEM;
+		nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE);
+		if (nfsd_serv == NULL)
+			goto out;
+		error = svc_makesock(nfsd_serv, IPPROTO_UDP, port);
+		if (error < 0)
+			goto failure;
+
+#ifdef CONFIG_NFSD_TCP
+		error = svc_makesock(nfsd_serv, IPPROTO_TCP, port);
+		if (error < 0)
+			goto failure;
+#endif
+		do_gettimeofday(&nfssvc_boot);		/* record boot time */
+	} else
+		nfsd_serv->sv_nrthreads++;
+	nrservs -= (nfsd_serv->sv_nrthreads-1);
+	while (nrservs > 0) {
+		nrservs--;
+		__module_get(THIS_MODULE);
+		error = svc_create_thread(nfsd, nfsd_serv);
+		if (error < 0) {
+			module_put(THIS_MODULE);
+			break;
+		}
+	}
+	victim = nfsd_list.next;
+	while (nrservs < 0 && victim != &nfsd_list) {
+		struct nfsd_list *nl =
+			list_entry(victim,struct nfsd_list, list);
+		victim = victim->next;
+		send_sig(SIG_NOCLEAN, nl->task, 1);
+		nrservs++;
+	}
+ failure:
+	none_left = (nfsd_serv->sv_nrthreads == 1);
+	svc_destroy(nfsd_serv);		/* Release server */
+	if (none_left) {
+		nfsd_serv = NULL;
+		nfsd_racache_shutdown();
+		nfs4_state_shutdown();
+	}
+ out:
+	unlock_kernel();
+	return error;
+}
+
+static inline void
+update_thread_usage(int busy_threads)
+{
+	unsigned long prev_call;
+	unsigned long diff;
+	int decile;
+
+	spin_lock(&nfsd_call_lock);
+	prev_call = nfsd_last_call;
+	nfsd_last_call = jiffies;
+	decile = busy_threads*10/nfsdstats.th_cnt;
+	if (decile>0 && decile <= 10) {
+		diff = nfsd_last_call - prev_call;
+		if ( (nfsdstats.th_usage[decile-1] += diff) >= NFSD_USAGE_WRAP)
+			nfsdstats.th_usage[decile-1] -= NFSD_USAGE_WRAP;
+		if (decile == 10)
+			nfsdstats.th_fullcnt++;
+	}
+	spin_unlock(&nfsd_call_lock);
+}
+
+/*
+ * This is the NFS server kernel thread
+ */
+static void
+nfsd(struct svc_rqst *rqstp)
+{
+	struct svc_serv	*serv = rqstp->rq_server;
+	struct fs_struct *fsp;
+	int		err;
+	struct nfsd_list me;
+	sigset_t shutdown_mask, allowed_mask;
+
+	/* Lock module and set up kernel thread */
+	lock_kernel();
+	daemonize("nfsd");
+
+	/* After daemonize() this kernel thread shares current->fs
+	 * with the init process. We need to create files with a
+	 * umask of 0 instead of init's umask. */
+	fsp = copy_fs_struct(current->fs);
+	if (!fsp) {
+		printk("Unable to start nfsd thread: out of memory\n");
+		goto out;
+	}
+	exit_fs(current);
+	current->fs = fsp;
+	current->fs->umask = 0;
+
+	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
+	siginitsetinv(&allowed_mask, ALLOWED_SIGS);
+
+	nfsdstats.th_cnt++;
+
+	lockd_up();				/* start lockd */
+
+	me.task = current;
+	list_add(&me.list, &nfsd_list);
+
+	unlock_kernel();
+
+	/*
+	 * We want less throttling in balance_dirty_pages() so that nfs to
+	 * localhost doesn't cause nfsd to lock up due to all the client's
+	 * dirty pages.
+	 */
+	current->flags |= PF_LESS_THROTTLE;
+
+	/*
+	 * The main request loop
+	 */
+	for (;;) {
+		/* Block all but the shutdown signals */
+		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);
+
+		/*
+		 * Find a socket with data available and call its
+		 * recvfrom routine.
+		 */
+		while ((err = svc_recv(serv, rqstp,
+				       60*60*HZ)) == -EAGAIN)
+			;
+		if (err < 0)
+			break;
+		update_thread_usage(atomic_read(&nfsd_busy));
+		atomic_inc(&nfsd_busy);
+
+		/* Lock the export hash tables for reading. */
+		exp_readlock();
+
+		/* Process request with signals blocked.  */
+		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
+
+		svc_process(serv, rqstp);
+
+		/* Unlock export hash tables */
+		exp_readunlock();
+		update_thread_usage(atomic_read(&nfsd_busy));
+		atomic_dec(&nfsd_busy);
+	}
+
+	if (err != -EINTR) {
+		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
+	} else {
+		unsigned int	signo;
+
+		for (signo = 1; signo <= _NSIG; signo++)
+			if (sigismember(&current->pending.signal, signo) &&
+			    !sigismember(&current->blocked, signo))
+				break;
+		err = signo;
+	}
+
+	lock_kernel();
+
+	/* Release lockd */
+	lockd_down();
+
+	/* Check if this is last thread */
+	if (serv->sv_nrthreads==1) {
+		
+		printk(KERN_WARNING "nfsd: last server has exited\n");
+		if (err != SIG_NOCLEAN) {
+			printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
+			nfsd_export_flush();
+		}
+		nfsd_serv = NULL;
+	        nfsd_racache_shutdown();	/* release read-ahead cache */
+		nfs4_state_shutdown();
+	}
+	list_del(&me.list);
+	nfsdstats.th_cnt --;
+
+out:
+	/* Release the thread */
+	svc_exit_thread(rqstp);
+
+	/* Release module */
+	module_put_and_exit(0);
+}
+
+int
+nfsd_dispatch(struct svc_rqst *rqstp, u32 *statp)
+{
+	struct svc_procedure	*proc;
+	kxdrproc_t		xdr;
+	u32			nfserr;
+	u32			*nfserrp;
+
+	dprintk("nfsd_dispatch: vers %d proc %d\n",
+				rqstp->rq_vers, rqstp->rq_proc);
+	proc = rqstp->rq_procinfo;
+
+	/* Check whether we have this call in the cache. */
+	switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
+	case RC_INTR:
+	case RC_DROPIT:
+		return 0;
+	case RC_REPLY:
+		return 1;
+	case RC_DOIT:;
+		/* do it */
+	}
+
+	/* Decode arguments */
+	xdr = proc->pc_decode;
+	if (xdr && !xdr(rqstp, (u32*)rqstp->rq_arg.head[0].iov_base,
+			rqstp->rq_argp)) {
+		dprintk("nfsd: failed to decode arguments!\n");
+		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+		*statp = rpc_garbage_args;
+		return 1;
+	}
+
+	/* need to grab the location to store the status, as
+	 * nfsv4 does some encoding while processing 
+	 */
+	nfserrp = rqstp->rq_res.head[0].iov_base
+		+ rqstp->rq_res.head[0].iov_len;
+	rqstp->rq_res.head[0].iov_len += sizeof(u32);
+
+	/* Now call the procedure handler, and encode NFS status. */
+	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+	if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
+		nfserr = nfserr_dropit;
+	if (nfserr == nfserr_dropit) {
+		dprintk("nfsd: Dropping request due to malloc failure!\n");
+		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+		return 0;
+	}
+
+	if (rqstp->rq_proc != 0)
+		*nfserrp++ = nfserr;
+
+	/* Encode result.
+	 * For NFSv2, additional info is never returned in case of an error.
+	 */
+	if (!(nfserr && rqstp->rq_vers == 2)) {
+		xdr = proc->pc_encode;
+		if (xdr && !xdr(rqstp, nfserrp,
+				rqstp->rq_resp)) {
+			/* Failed to encode result. Release cache entry */
+			dprintk("nfsd: failed to encode result!\n");
+			nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+			*statp = rpc_system_err;
+			return 1;
+		}
+	}
+
+	/* Store reply in cache. */
+	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+	return 1;
+}
+
+extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
+
+static struct svc_version *	nfsd_version[] = {
+	[2] = &nfsd_version2,
+#if defined(CONFIG_NFSD_V3)
+	[3] = &nfsd_version3,
+#endif
+#if defined(CONFIG_NFSD_V4)
+	[4] = &nfsd_version4,
+#endif
+};
+
+#define NFSD_NRVERS		(sizeof(nfsd_version)/sizeof(nfsd_version[0]))
+struct svc_program		nfsd_program = {
+	.pg_prog		= NFS_PROGRAM,		/* program number */
+	.pg_nvers		= NFSD_NRVERS,		/* nr of entries in nfsd_version */
+	.pg_vers		= nfsd_version,		/* version table */
+	.pg_name		= "nfsd",		/* program name */
+	.pg_class		= "nfsd",		/* authentication class */
+	.pg_stats		= &nfsd_svcstats,	/* version table */
+	.pg_authenticate	= &svc_set_client,	/* export authentication */
+
+};
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
new file mode 100644
index 0000000..948b082
--- /dev/null
+++ b/fs/nfsd/nfsxdr.c
@@ -0,0 +1,511 @@
+/*
+ * linux/fs/nfsd/xdr.c
+ *
+ * XDR support for nfsd
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/nfs.h>
+#include <linux/vfs.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/mm.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_XDR
+
+
+#ifdef NFSD_OPTIMIZE_SPACE
+# define inline
+#endif
+
+/*
+ * Mapping of S_IF* types to NFS file types
+ */
+static u32	nfs_ftypes[] = {
+	NFNON,  NFCHR,  NFCHR, NFBAD,
+	NFDIR,  NFBAD,  NFBLK, NFBAD,
+	NFREG,  NFBAD,  NFLNK, NFBAD,
+	NFSOCK, NFBAD,  NFLNK, NFBAD,
+};
+
+
+/*
+ * XDR functions for basic NFS types
+ */
+static inline u32 *
+decode_fh(u32 *p, struct svc_fh *fhp)
+{
+	fh_init(fhp, NFS_FHSIZE);
+	memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE);
+	fhp->fh_handle.fh_size = NFS_FHSIZE;
+
+	/* FIXME: Look up export pointer here and verify
+	 * Sun Secure RPC if requested */
+	return p + (NFS_FHSIZE >> 2);
+}
+
+static inline u32 *
+encode_fh(u32 *p, struct svc_fh *fhp)
+{
+	memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
+	return p + (NFS_FHSIZE>> 2);
+}
+
+/*
+ * Decode a file name and make sure that the path contains
+ * no slashes or null bytes.
+ */
+static inline u32 *
+decode_filename(u32 *p, char **namp, int *lenp)
+{
+	char		*name;
+	int		i;
+
+	if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) {
+		for (i = 0, name = *namp; i < *lenp; i++, name++) {
+			if (*name == '\0' || *name == '/')
+				return NULL;
+		}
+	}
+
+	return p;
+}
+
+static inline u32 *
+decode_pathname(u32 *p, char **namp, int *lenp)
+{
+	char		*name;
+	int		i;
+
+	if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXPATHLEN)) != NULL) {
+		for (i = 0, name = *namp; i < *lenp; i++, name++) {
+			if (*name == '\0')
+				return NULL;
+		}
+	}
+
+	return p;
+}
+
+static inline u32 *
+decode_sattr(u32 *p, struct iattr *iap)
+{
+	u32	tmp, tmp1;
+
+	iap->ia_valid = 0;
+
+	/* Sun client bug compatibility check: some sun clients seem to
+	 * put 0xffff in the mode field when they mean 0xffffffff.
+	 * Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah.
+	 */
+	if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) {
+		iap->ia_valid |= ATTR_MODE;
+		iap->ia_mode = tmp;
+	}
+	if ((tmp = ntohl(*p++)) != (u32)-1) {
+		iap->ia_valid |= ATTR_UID;
+		iap->ia_uid = tmp;
+	}
+	if ((tmp = ntohl(*p++)) != (u32)-1) {
+		iap->ia_valid |= ATTR_GID;
+		iap->ia_gid = tmp;
+	}
+	if ((tmp = ntohl(*p++)) != (u32)-1) {
+		iap->ia_valid |= ATTR_SIZE;
+		iap->ia_size = tmp;
+	}
+	tmp  = ntohl(*p++); tmp1 = ntohl(*p++);
+	if (tmp != (u32)-1 && tmp1 != (u32)-1) {
+		iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
+		iap->ia_atime.tv_sec = tmp;
+		iap->ia_atime.tv_nsec = tmp1 * 1000; 
+	}
+	tmp  = ntohl(*p++); tmp1 = ntohl(*p++);
+	if (tmp != (u32)-1 && tmp1 != (u32)-1) {
+		iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
+		iap->ia_mtime.tv_sec = tmp;
+		iap->ia_mtime.tv_nsec = tmp1 * 1000; 
+		/*
+		 * Passing the invalid value useconds=1000000 for mtime
+		 * is a Sun convention for "set both mtime and atime to
+		 * current server time".  It's needed to make permissions
+		 * checks for the "touch" program across v2 mounts to
+		 * Solaris and Irix boxes work correctly. See description of
+		 * sattr in section 6.1 of "NFS Illustrated" by
+		 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
+		 */
+		if (tmp1 == 1000000)
+			iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET);
+	}
+	return p;
+}
+
+static inline u32 *
+encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+{
+	struct vfsmount *mnt = fhp->fh_export->ex_mnt;
+	struct dentry	*dentry = fhp->fh_dentry;
+	struct kstat stat;
+	int type;
+	struct timespec time;
+
+	vfs_getattr(mnt, dentry, &stat);
+	type = (stat.mode & S_IFMT);
+
+	*p++ = htonl(nfs_ftypes[type >> 12]);
+	*p++ = htonl((u32) stat.mode);
+	*p++ = htonl((u32) stat.nlink);
+	*p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
+	*p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
+
+	if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) {
+		*p++ = htonl(NFS_MAXPATHLEN);
+	} else {
+		*p++ = htonl((u32) stat.size);
+	}
+	*p++ = htonl((u32) stat.blksize);
+	if (S_ISCHR(type) || S_ISBLK(type))
+		*p++ = htonl(new_encode_dev(stat.rdev));
+	else
+		*p++ = htonl(0xffffffff);
+	*p++ = htonl((u32) stat.blocks);
+	if (is_fsid(fhp, rqstp->rq_reffh))
+		*p++ = htonl((u32) fhp->fh_export->ex_fsid);
+	else
+		*p++ = htonl(new_encode_dev(stat.dev));
+	*p++ = htonl((u32) stat.ino);
+	*p++ = htonl((u32) stat.atime.tv_sec);
+	*p++ = htonl(stat.atime.tv_nsec ? stat.atime.tv_nsec / 1000 : 0);
+	lease_get_mtime(dentry->d_inode, &time); 
+	*p++ = htonl((u32) time.tv_sec);
+	*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); 
+	*p++ = htonl((u32) stat.ctime.tv_sec);
+	*p++ = htonl(stat.ctime.tv_nsec ? stat.ctime.tv_nsec / 1000 : 0);
+
+	return p;
+}
+
+
+/*
+ * XDR decode functions
+ */
+int
+nfssvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_fhandle(struct svc_rqst *rqstp, u32 *p, struct nfsd_fhandle *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_sattrargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_sattrargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_sattr(p, &args->attrs)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_diropargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_diropargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len)))
+		return 0;
+
+	 return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_readargs *args)
+{
+	unsigned int len;
+	int v,pn;
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+
+	args->offset    = ntohl(*p++);
+	len = args->count     = ntohl(*p++);
+	p++; /* totalcount - unused */
+
+	if (len > NFSSVC_MAXBLKSIZE)
+		len = NFSSVC_MAXBLKSIZE;
+
+	/* set up somewhere to store response.
+	 * We take pages, put them on reslist and include in iovec
+	 */
+	v=0;
+	while (len > 0) {
+		pn=rqstp->rq_resused;
+		svc_take_page(rqstp);
+		args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
+		args->vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE;
+		len -= args->vec[v].iov_len;
+		v++;
+	}
+	args->vlen = v;
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_writeargs *args)
+{
+	unsigned int len;
+	int v;
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+
+	p++;				/* beginoffset */
+	args->offset = ntohl(*p++);	/* offset */
+	p++;				/* totalcount */
+	len = args->len = ntohl(*p++);
+	args->vec[0].iov_base = (void*)p;
+	args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len -
+				(((void*)p) - rqstp->rq_arg.head[0].iov_base);
+	if (len > NFSSVC_MAXBLKSIZE)
+		len = NFSSVC_MAXBLKSIZE;
+	v = 0;
+	while (len > args->vec[v].iov_len) {
+		len -= args->vec[v].iov_len;
+		v++;
+		args->vec[v].iov_base = page_address(rqstp->rq_argpages[v]);
+		args->vec[v].iov_len = PAGE_SIZE;
+	}
+	args->vec[v].iov_len = len;
+	args->vlen = v+1;
+	return args->vec[0].iov_len > 0;
+}
+
+int
+nfssvc_decode_createargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_createargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh))
+	 || !(p = decode_filename(p, &args->name, &args->len))
+	 || !(p = decode_sattr(p, &args->attrs)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_renameargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_renameargs *args)
+{
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_filename(p, &args->fname, &args->flen))
+	 || !(p = decode_fh(p, &args->tfh))
+	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, u32 *p, struct nfsd_readlinkargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	svc_take_page(rqstp);
+	args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_linkargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_linkargs *args)
+{
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_fh(p, &args->tfh))
+	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_symlinkargs *args)
+{
+	if (!(p = decode_fh(p, &args->ffh))
+	 || !(p = decode_filename(p, &args->fname, &args->flen))
+	 || !(p = decode_pathname(p, &args->tname, &args->tlen))
+	 || !(p = decode_sattr(p, &args->attrs)))
+		return 0;
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+int
+nfssvc_decode_readdirargs(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_readdirargs *args)
+{
+	if (!(p = decode_fh(p, &args->fh)))
+		return 0;
+	args->cookie = ntohl(*p++);
+	args->count  = ntohl(*p++);
+	if (args->count > PAGE_SIZE)
+		args->count = PAGE_SIZE;
+
+	svc_take_page(rqstp);
+	args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
+
+	return xdr_argsize_check(rqstp, p);
+}
+
+/*
+ * XDR encode functions
+ */
+int
+nfssvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nfssvc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_attrstat *resp)
+{
+	p = encode_fattr(rqstp, p, &resp->fh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nfssvc_encode_diropres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_diropres *resp)
+{
+	p = encode_fh(p, &resp->fh);
+	p = encode_fattr(rqstp, p, &resp->fh);
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nfssvc_encode_readlinkres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_readlinkres *resp)
+{
+	*p++ = htonl(resp->len);
+	xdr_ressize_check(rqstp, p);
+	rqstp->rq_res.page_len = resp->len;
+	if (resp->len & 3) {
+		/* need to pad the tail */
+		rqstp->rq_restailpage = 0;
+		rqstp->rq_res.tail[0].iov_base = p;
+		*p = 0;
+		rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
+	}
+	return 1;
+}
+
+int
+nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_readres *resp)
+{
+	p = encode_fattr(rqstp, p, &resp->fh);
+	*p++ = htonl(resp->count);
+	xdr_ressize_check(rqstp, p);
+
+	/* now update rqstp->rq_res to reflect data aswell */
+	rqstp->rq_res.page_len = resp->count;
+	if (resp->count & 3) {
+		/* need to pad the tail */
+		rqstp->rq_restailpage = 0;
+		rqstp->rq_res.tail[0].iov_base = p;
+		*p = 0;
+		rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3);
+	}
+	return 1;
+}
+
+int
+nfssvc_encode_readdirres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_readdirres *resp)
+{
+	xdr_ressize_check(rqstp, p);
+	p = resp->buffer;
+	*p++ = 0;			/* no more entries */
+	*p++ = htonl((resp->common.err == nfserr_eof));
+	rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1;
+
+	return 1;
+}
+
+int
+nfssvc_encode_statfsres(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_statfsres *resp)
+{
+	struct kstatfs	*stat = &resp->stats;
+
+	*p++ = htonl(NFSSVC_MAXBLKSIZE);	/* max transfer size */
+	*p++ = htonl(stat->f_bsize);
+	*p++ = htonl(stat->f_blocks);
+	*p++ = htonl(stat->f_bfree);
+	*p++ = htonl(stat->f_bavail);
+	return xdr_ressize_check(rqstp, p);
+}
+
+int
+nfssvc_encode_entry(struct readdir_cd *ccd, const char *name,
+		    int namlen, loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct nfsd_readdirres *cd = container_of(ccd, struct nfsd_readdirres, common);
+	u32	*p = cd->buffer;
+	int	buflen, slen;
+
+	/*
+	dprintk("nfsd: entry(%.*s off %ld ino %ld)\n",
+			namlen, name, offset, ino);
+	 */
+
+	if (offset > ~((u32) 0)) {
+		cd->common.err = nfserr_fbig;
+		return -EINVAL;
+	}
+	if (cd->offset)
+		*cd->offset = htonl(offset);
+	if (namlen > NFS2_MAXNAMLEN)
+		namlen = NFS2_MAXNAMLEN;/* truncate filename */
+
+	slen = XDR_QUADLEN(namlen);
+	if ((buflen = cd->buflen - slen - 4) < 0) {
+		cd->common.err = nfserr_toosmall;
+		return -EINVAL;
+	}
+	*p++ = xdr_one;				/* mark entry present */
+	*p++ = htonl((u32) ino);		/* file id */
+	p    = xdr_encode_array(p, name, namlen);/* name length & name */
+	cd->offset = p;			/* remember pointer */
+	*p++ = ~(u32) 0;		/* offset of next entry */
+
+	cd->buflen = buflen;
+	cd->buffer = p;
+	cd->common.err = nfs_ok;
+	return 0;
+}
+
+/*
+ * XDR release functions
+ */
+int
+nfssvc_release_fhandle(struct svc_rqst *rqstp, u32 *p,
+					struct nfsd_fhandle *resp)
+{
+	fh_put(&resp->fh);
+	return 1;
+}
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
new file mode 100644
index 0000000..1cf955b
--- /dev/null
+++ b/fs/nfsd/stats.c
@@ -0,0 +1,101 @@
+/*
+ * linux/fs/nfsd/stats.c
+ *
+ * procfs-based user access to knfsd statistics
+ *
+ * /proc/net/rpc/nfsd
+ *
+ * Format:
+ *	rc <hits> <misses> <nocache>
+ *			Statistsics for the reply cache
+ *	fh <stale> <total-lookups> <anonlookups> <dir-not-in-dcache> <nondir-not-in-dcache>
+ *			statistics for filehandle lookup
+ *	io <bytes-read> <bytes-writtten>
+ *			statistics for IO throughput
+ *	th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%> 
+ *			time (seconds) when nfsd thread usage above thresholds
+ *			and number of times that all threads were in use
+ *	ra cache-size  <10%  <20%  <30% ... <100% not-found
+ *			number of times that read-ahead entry was found that deep in
+ *			the cache.
+ *	plus generic RPC stats (see net/sunrpc/stats.c)
+ *
+ * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/stats.h>
+
+struct nfsd_stats	nfsdstats;
+struct svc_stat		nfsd_svcstats = {
+	.program	= &nfsd_program,
+};
+
+static int nfsd_proc_show(struct seq_file *seq, void *v)
+{
+	int i;
+
+	seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n",
+		      nfsdstats.rchits,
+		      nfsdstats.rcmisses,
+		      nfsdstats.rcnocache,
+		      nfsdstats.fh_stale,
+		      nfsdstats.fh_lookup,
+		      nfsdstats.fh_anon,
+		      nfsdstats.fh_nocache_dir,
+		      nfsdstats.fh_nocache_nondir,
+		      nfsdstats.io_read,
+		      nfsdstats.io_write);
+	/* thread usage: */
+	seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt);
+	for (i=0; i<10; i++) {
+		unsigned int jifs = nfsdstats.th_usage[i];
+		unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ;
+		seq_printf(seq, " %u.%03u", sec, msec);
+	}
+
+	/* newline and ra-cache */
+	seq_printf(seq, "\nra %u", nfsdstats.ra_size);
+	for (i=0; i<11; i++)
+		seq_printf(seq, " %u", nfsdstats.ra_depth[i]);
+	seq_putc(seq, '\n');
+	
+	/* show my rpc info */
+	svc_seq_show(seq, &nfsd_svcstats);
+
+	return 0;
+}
+
+static int nfsd_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nfsd_proc_show, NULL);
+}
+
+static struct file_operations nfsd_proc_fops = {
+	.owner = THIS_MODULE,
+	.open = nfsd_proc_open,
+	.read  = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void
+nfsd_stat_init(void)
+{
+	svc_proc_register(&nfsd_svcstats, &nfsd_proc_fops);
+}
+
+void
+nfsd_stat_shutdown(void)
+{
+	svc_proc_unregister("nfsd");
+}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
new file mode 100644
index 0000000..e3e9d21
--- /dev/null
+++ b/fs/nfsd/vfs.c
@@ -0,0 +1,1859 @@
+#define MSNFS	/* HACK HACK */
+/*
+ * linux/fs/nfsd/vfs.c
+ *
+ * File operations used by nfsd. Some of these have been ripped from
+ * other parts of the kernel because they weren't exported, others
+ * are partial duplicates with added or changed functionality.
+ *
+ * Note that several functions dget() the dentry upon which they want
+ * to act, most notably those that create directory entries. Response
+ * dentry's are dput()'d if necessary in the release callback.
+ * So if you notice code paths that apparently fail to dput() the
+ * dentry, don't worry--they have been taken care of.
+ *
+ * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
+ * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/major.h>
+#include <linux/ext2_fs.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/net.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/in.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/delay.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#ifdef CONFIG_NFSD_V3
+#include <linux/nfs3.h>
+#include <linux/nfsd/xdr3.h>
+#endif /* CONFIG_NFSD_V3 */
+#include <linux/nfsd/nfsfh.h>
+#include <linux/quotaops.h>
+#include <linux/dnotify.h>
+#ifdef CONFIG_NFSD_V4
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr_acl.h>
+#include <linux/xattr.h>
+#include <linux/nfs4.h>
+#include <linux/nfs4_acl.h>
+#include <linux/nfsd_idmap.h>
+#include <linux/security.h>
+#endif /* CONFIG_NFSD_V4 */
+
+#include <asm/uaccess.h>
+
+#define NFSDDBG_FACILITY		NFSDDBG_FILEOP
+#define NFSD_PARANOIA
+
+
+/* We must ignore files (but only files) which might have mandatory
+ * locks on them because there is no way to know if the accesser has
+ * the lock.
+ */
+#define IS_ISMNDLK(i)	(S_ISREG((i)->i_mode) && MANDATORY_LOCK(i))
+
+/*
+ * This is a cache of readahead params that help us choose the proper
+ * readahead strategy. Initially, we set all readahead parameters to 0
+ * and let the VFS handle things.
+ * If you increase the number of cached files very much, you'll need to
+ * add a hash table here.
+ */
+struct raparms {
+	struct raparms		*p_next;
+	unsigned int		p_count;
+	ino_t			p_ino;
+	dev_t			p_dev;
+	int			p_set;
+	struct file_ra_state	p_ra;
+};
+
+static struct raparms *		raparml;
+static struct raparms *		raparm_cache;
+
+/* 
+ * Called from nfsd_lookup and encode_dirent. Check if we have crossed 
+ * a mount point.
+ * Returns -EAGAIN leaving *dpp and *expp unchanged, 
+ *  or nfs_ok having possibly changed *dpp and *expp
+ */
+int
+nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 
+		        struct svc_export **expp)
+{
+	struct svc_export *exp = *expp, *exp2 = NULL;
+	struct dentry *dentry = *dpp;
+	struct vfsmount *mnt = mntget(exp->ex_mnt);
+	struct dentry *mounts = dget(dentry);
+	int err = nfs_ok;
+
+	while (follow_down(&mnt,&mounts)&&d_mountpoint(mounts));
+
+	exp2 = exp_get_by_name(exp->ex_client, mnt, mounts, &rqstp->rq_chandle);
+	if (IS_ERR(exp2)) {
+		err = PTR_ERR(exp2);
+		dput(mounts);
+		mntput(mnt);
+		goto out;
+	}
+	if (exp2 && ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2))) {
+		/* successfully crossed mount point */
+		exp_put(exp);
+		*expp = exp2;
+		dput(dentry);
+		*dpp = mounts;
+	} else {
+		if (exp2) exp_put(exp2);
+		dput(mounts);
+	}
+	mntput(mnt);
+out:
+	return err;
+}
+
+/*
+ * Look up one component of a pathname.
+ * N.B. After this call _both_ fhp and resfh need an fh_put
+ *
+ * If the lookup would cross a mountpoint, and the mounted filesystem
+ * is exported to the client with NFSEXP_NOHIDE, then the lookup is
+ * accepted as it stands and the mounted directory is
+ * returned. Otherwise the covered directory is returned.
+ * NOTE: this mountpoint crossing is not supported properly by all
+ *   clients and is explicitly disallowed for NFSv3
+ *      NeilBrown <neilb@cse.unsw.edu.au>
+ */
+int
+nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
+					int len, struct svc_fh *resfh)
+{
+	struct svc_export	*exp;
+	struct dentry		*dparent;
+	struct dentry		*dentry;
+	int			err;
+
+	dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
+
+	/* Obtain dentry and export. */
+	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_EXEC);
+	if (err)
+		return err;
+
+	dparent = fhp->fh_dentry;
+	exp  = fhp->fh_export;
+	exp_get(exp);
+
+	err = nfserr_acces;
+
+	/* Lookup the name, but don't follow links */
+	if (isdotent(name, len)) {
+		if (len==1)
+			dentry = dget(dparent);
+		else if (dparent != exp->ex_dentry) {
+			dentry = dget_parent(dparent);
+		} else  if (!EX_NOHIDE(exp))
+			dentry = dget(dparent); /* .. == . just like at / */
+		else {
+			/* checking mountpoint crossing is very different when stepping up */
+			struct svc_export *exp2 = NULL;
+			struct dentry *dp;
+			struct vfsmount *mnt = mntget(exp->ex_mnt);
+			dentry = dget(dparent);
+			while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry))
+				;
+			dp = dget_parent(dentry);
+			dput(dentry);
+			dentry = dp;
+
+			exp2 = exp_parent(exp->ex_client, mnt, dentry,
+					  &rqstp->rq_chandle);
+			if (IS_ERR(exp2)) {
+				err = PTR_ERR(exp2);
+				dput(dentry);
+				mntput(mnt);
+				goto out_nfserr;
+			}
+			if (!exp2) {
+				dput(dentry);
+				dentry = dget(dparent);
+			} else {
+				exp_put(exp);
+				exp = exp2;
+			}
+			mntput(mnt);
+		}
+	} else {
+		fh_lock(fhp);
+		dentry = lookup_one_len(name, dparent, len);
+		err = PTR_ERR(dentry);
+		if (IS_ERR(dentry))
+			goto out_nfserr;
+		/*
+		 * check if we have crossed a mount point ...
+		 */
+		if (d_mountpoint(dentry)) {
+			if ((err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
+				dput(dentry);
+				goto out_nfserr;
+			}
+		}
+	}
+	/*
+	 * Note: we compose the file handle now, but as the
+	 * dentry may be negative, it may need to be updated.
+	 */
+	err = fh_compose(resfh, exp, dentry, fhp);
+	if (!err && !dentry->d_inode)
+		err = nfserr_noent;
+	dput(dentry);
+out:
+	exp_put(exp);
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+/*
+ * Set various file attributes.
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+	     int check_guard, time_t guardtime)
+{
+	struct dentry	*dentry;
+	struct inode	*inode;
+	int		accmode = MAY_SATTR;
+	int		ftype = 0;
+	int		imode;
+	int		err;
+	int		size_change = 0;
+
+	if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+		accmode |= MAY_WRITE|MAY_OWNER_OVERRIDE;
+	if (iap->ia_valid & ATTR_SIZE)
+		ftype = S_IFREG;
+
+	/* Get inode */
+	err = fh_verify(rqstp, fhp, ftype, accmode);
+	if (err || !iap->ia_valid)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	inode = dentry->d_inode;
+
+	/* NFSv2 does not differentiate between "set-[ac]time-to-now"
+	 * which only requires access, and "set-[ac]time-to-X" which
+	 * requires ownership.
+	 * So if it looks like it might be "set both to the same time which
+	 * is close to now", and if inode_change_ok fails, then we
+	 * convert to "set to now" instead of "set to explicit time"
+	 *
+	 * We only call inode_change_ok as the last test as technically
+	 * it is not an interface that we should be using.  It is only
+	 * valid if the filesystem does not define it's own i_op->setattr.
+	 */
+#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+#define	MAX_TOUCH_TIME_ERROR (30*60)
+	if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET
+	    && iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec
+	    ) {
+	    /* Looks probable.  Now just make sure time is in the right ballpark.
+	     * Solaris, at least, doesn't seem to care what the time request is.
+	     * We require it be within 30 minutes of now.
+	     */
+	    time_t delta = iap->ia_atime.tv_sec - get_seconds();
+	    if (delta<0) delta = -delta;
+	    if (delta < MAX_TOUCH_TIME_ERROR &&
+		inode_change_ok(inode, iap) != 0) {
+		/* turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME
+		 * this will cause notify_change to set these times to "now"
+		 */
+		iap->ia_valid &= ~BOTH_TIME_SET;
+	    }
+	}
+	    
+	/* The size case is special. It changes the file as well as the attributes.  */
+	if (iap->ia_valid & ATTR_SIZE) {
+		if (iap->ia_size < inode->i_size) {
+			err = nfsd_permission(fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
+			if (err)
+				goto out;
+		}
+
+		/*
+		 * If we are changing the size of the file, then
+		 * we need to break all leases.
+		 */
+		err = break_lease(inode, FMODE_WRITE | O_NONBLOCK);
+		if (err == -EWOULDBLOCK)
+			err = -ETIMEDOUT;
+		if (err) /* ENOMEM or EWOULDBLOCK */
+			goto out_nfserr;
+
+		err = get_write_access(inode);
+		if (err)
+			goto out_nfserr;
+
+		size_change = 1;
+		err = locks_verify_truncate(inode, NULL, iap->ia_size);
+		if (err) {
+			put_write_access(inode);
+			goto out_nfserr;
+		}
+		DQUOT_INIT(inode);
+	}
+
+	imode = inode->i_mode;
+	if (iap->ia_valid & ATTR_MODE) {
+		iap->ia_mode &= S_IALLUGO;
+		imode = iap->ia_mode |= (imode & ~S_IALLUGO);
+	}
+
+	/* Revoke setuid/setgid bit on chown/chgrp */
+	if ((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid)
+		iap->ia_valid |= ATTR_KILL_SUID;
+	if ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)
+		iap->ia_valid |= ATTR_KILL_SGID;
+
+	/* Change the attributes. */
+
+	iap->ia_valid |= ATTR_CTIME;
+
+	err = nfserr_notsync;
+	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+		fh_lock(fhp);
+		err = notify_change(dentry, iap);
+		err = nfserrno(err);
+		fh_unlock(fhp);
+	}
+	if (size_change)
+		put_write_access(inode);
+	if (!err)
+		if (EX_ISSYNC(fhp->fh_export))
+			write_inode_now(inode, 1);
+out:
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+#if defined(CONFIG_NFSD_V4)
+
+static int
+set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+{
+	int len;
+	size_t buflen;
+	char *buf = NULL;
+	int error = 0;
+	struct inode *inode = dentry->d_inode;
+
+	buflen = posix_acl_xattr_size(pacl->a_count);
+	buf = kmalloc(buflen, GFP_KERNEL);
+	error = -ENOMEM;
+	if (buf == NULL)
+		goto out;
+
+	len = posix_acl_to_xattr(pacl, buf, buflen);
+	if (len < 0) {
+		error = len;
+		goto out;
+	}
+
+	error = -EOPNOTSUPP;
+	if (inode->i_op && inode->i_op->setxattr) {
+		down(&inode->i_sem);
+		security_inode_setxattr(dentry, key, buf, len, 0);
+		error = inode->i_op->setxattr(dentry, key, buf, len, 0);
+		if (!error)
+			security_inode_post_setxattr(dentry, key, buf, len, 0);
+		up(&inode->i_sem);
+	}
+out:
+	kfree(buf);
+	return error;
+}
+
+int
+nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+    struct nfs4_acl *acl)
+{
+	int error;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct posix_acl *pacl = NULL, *dpacl = NULL;
+	unsigned int flags = 0;
+
+	/* Get inode */
+	error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR);
+	if (error)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	inode = dentry->d_inode;
+	if (S_ISDIR(inode->i_mode))
+		flags = NFS4_ACL_DIR;
+
+	error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
+	if (error == -EINVAL) {
+		error = nfserr_attrnotsupp;
+		goto out;
+	} else if (error < 0)
+		goto out_nfserr;
+
+	if (pacl) {
+		error = set_nfsv4_acl_one(dentry, pacl, XATTR_NAME_ACL_ACCESS);
+		if (error < 0)
+			goto out_nfserr;
+	}
+
+	if (dpacl) {
+		error = set_nfsv4_acl_one(dentry, dpacl, XATTR_NAME_ACL_DEFAULT);
+		if (error < 0)
+			goto out_nfserr;
+	}
+
+	error = nfs_ok;
+
+out:
+	posix_acl_release(pacl);
+	posix_acl_release(dpacl);
+	return (error);
+out_nfserr:
+	error = nfserrno(error);
+	goto out;
+}
+
+static struct posix_acl *
+_get_posix_acl(struct dentry *dentry, char *key)
+{
+	struct inode *inode = dentry->d_inode;
+	char *buf = NULL;
+	int buflen, error = 0;
+	struct posix_acl *pacl = NULL;
+
+	error = -EOPNOTSUPP;
+	if (inode->i_op == NULL)
+		goto out_err;
+	if (inode->i_op->getxattr == NULL)
+		goto out_err;
+
+	error = security_inode_getxattr(dentry, key);
+	if (error)
+		goto out_err;
+
+	buflen = inode->i_op->getxattr(dentry, key, NULL, 0);
+	if (buflen <= 0) {
+		error = buflen < 0 ? buflen : -ENODATA;
+		goto out_err;
+	}
+
+	buf = kmalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		error = -ENOMEM;
+		goto out_err;
+	}
+
+	error = inode->i_op->getxattr(dentry, key, buf, buflen);
+	if (error < 0)
+		goto out_err;
+
+	pacl = posix_acl_from_xattr(buf, buflen);
+ out:
+	kfree(buf);
+	return pacl;
+ out_err:
+	pacl = ERR_PTR(error);
+	goto out;
+}
+
+int
+nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct posix_acl *pacl = NULL, *dpacl = NULL;
+	unsigned int flags = 0;
+
+	pacl = _get_posix_acl(dentry, XATTR_NAME_ACL_ACCESS);
+	if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
+		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+	if (IS_ERR(pacl)) {
+		error = PTR_ERR(pacl);
+		pacl = NULL;
+		goto out;
+	}
+
+	if (S_ISDIR(inode->i_mode)) {
+		dpacl = _get_posix_acl(dentry, XATTR_NAME_ACL_DEFAULT);
+		if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
+			dpacl = NULL;
+		else if (IS_ERR(dpacl)) {
+			error = PTR_ERR(dpacl);
+			dpacl = NULL;
+			goto out;
+		}
+		flags = NFS4_ACL_DIR;
+	}
+
+	*acl = nfs4_acl_posix_to_nfsv4(pacl, dpacl, flags);
+	if (IS_ERR(*acl)) {
+		error = PTR_ERR(*acl);
+		*acl = NULL;
+	}
+ out:
+	posix_acl_release(pacl);
+	posix_acl_release(dpacl);
+	return error;
+}
+
+#endif /* defined(CONFIG_NFS_V4) */
+
+#ifdef CONFIG_NFSD_V3
+/*
+ * Check server access rights to a file system object
+ */
+struct accessmap {
+	u32		access;
+	int		how;
+};
+static struct accessmap	nfs3_regaccess[] = {
+    {	NFS3_ACCESS_READ,	MAY_READ			},
+    {	NFS3_ACCESS_EXECUTE,	MAY_EXEC			},
+    {	NFS3_ACCESS_MODIFY,	MAY_WRITE|MAY_TRUNC		},
+    {	NFS3_ACCESS_EXTEND,	MAY_WRITE			},
+
+    {	0,			0				}
+};
+
+static struct accessmap	nfs3_diraccess[] = {
+    {	NFS3_ACCESS_READ,	MAY_READ			},
+    {	NFS3_ACCESS_LOOKUP,	MAY_EXEC			},
+    {	NFS3_ACCESS_MODIFY,	MAY_EXEC|MAY_WRITE|MAY_TRUNC	},
+    {	NFS3_ACCESS_EXTEND,	MAY_EXEC|MAY_WRITE		},
+    {	NFS3_ACCESS_DELETE,	MAY_REMOVE			},
+
+    {	0,			0				}
+};
+
+static struct accessmap	nfs3_anyaccess[] = {
+	/* Some clients - Solaris 2.6 at least, make an access call
+	 * to the server to check for access for things like /dev/null
+	 * (which really, the server doesn't care about).  So
+	 * We provide simple access checking for them, looking
+	 * mainly at mode bits, and we make sure to ignore read-only
+	 * filesystem checks
+	 */
+    {	NFS3_ACCESS_READ,	MAY_READ			},
+    {	NFS3_ACCESS_EXECUTE,	MAY_EXEC			},
+    {	NFS3_ACCESS_MODIFY,	MAY_WRITE|MAY_LOCAL_ACCESS	},
+    {	NFS3_ACCESS_EXTEND,	MAY_WRITE|MAY_LOCAL_ACCESS	},
+
+    {	0,			0				}
+};
+
+int
+nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
+{
+	struct accessmap	*map;
+	struct svc_export	*export;
+	struct dentry		*dentry;
+	u32			query, result = 0, sresult = 0;
+	unsigned int		error;
+
+	error = fh_verify(rqstp, fhp, 0, MAY_NOP);
+	if (error)
+		goto out;
+
+	export = fhp->fh_export;
+	dentry = fhp->fh_dentry;
+
+	if (S_ISREG(dentry->d_inode->i_mode))
+		map = nfs3_regaccess;
+	else if (S_ISDIR(dentry->d_inode->i_mode))
+		map = nfs3_diraccess;
+	else
+		map = nfs3_anyaccess;
+
+
+	query = *access;
+	for  (; map->access; map++) {
+		if (map->access & query) {
+			unsigned int err2;
+
+			sresult |= map->access;
+
+			err2 = nfsd_permission(export, dentry, map->how);
+			switch (err2) {
+			case nfs_ok:
+				result |= map->access;
+				break;
+				
+			/* the following error codes just mean the access was not allowed,
+			 * rather than an error occurred */
+			case nfserr_rofs:
+			case nfserr_acces:
+			case nfserr_perm:
+				/* simply don't "or" in the access bit. */
+				break;
+			default:
+				error = err2;
+				goto out;
+			}
+		}
+	}
+	*access = result;
+	if (supported)
+		*supported = sresult;
+
+ out:
+	return error;
+}
+#endif /* CONFIG_NFSD_V3 */
+
+
+
+/*
+ * Open an existing file or directory.
+ * The access argument indicates the type of open (read/write/lock)
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+			int access, struct file **filp)
+{
+	struct dentry	*dentry;
+	struct inode	*inode;
+	int		flags = O_RDONLY|O_LARGEFILE, err;
+
+	/*
+	 * If we get here, then the client has already done an "open",
+	 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+	 * in case a chmod has now revoked permission.
+	 */
+	err = fh_verify(rqstp, fhp, type, access | MAY_OWNER_OVERRIDE);
+	if (err)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	inode = dentry->d_inode;
+
+	/* Disallow write access to files with the append-only bit set
+	 * or any access when mandatory locking enabled
+	 */
+	err = nfserr_perm;
+	if (IS_APPEND(inode) && (access & MAY_WRITE))
+		goto out;
+	if (IS_ISMNDLK(inode))
+		goto out;
+
+	if (!inode->i_fop)
+		goto out;
+
+	/*
+	 * Check to see if there are any leases on this file.
+	 * This may block while leases are broken.
+	 */
+	err = break_lease(inode, O_NONBLOCK | ((access & MAY_WRITE) ? FMODE_WRITE : 0));
+	if (err == -EWOULDBLOCK)
+		err = -ETIMEDOUT;
+	if (err) /* NOMEM or WOULDBLOCK */
+		goto out_nfserr;
+
+	if (access & MAY_WRITE) {
+		flags = O_WRONLY|O_LARGEFILE;
+
+		DQUOT_INIT(inode);
+	}
+	*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_mnt), flags);
+	if (IS_ERR(*filp))
+		err = PTR_ERR(*filp);
+out_nfserr:
+	if (err)
+		err = nfserrno(err);
+out:
+	return err;
+}
+
+/*
+ * Close a file.
+ */
+void
+nfsd_close(struct file *filp)
+{
+	fput(filp);
+}
+
+/*
+ * Sync a file
+ * As this calls fsync (not fdatasync) there is no need for a write_inode
+ * after it.
+ */
+static inline void nfsd_dosync(struct file *filp, struct dentry *dp,
+			       struct file_operations *fop)
+{
+	struct inode *inode = dp->d_inode;
+	int (*fsync) (struct file *, struct dentry *, int);
+
+	filemap_fdatawrite(inode->i_mapping);
+	if (fop && (fsync = fop->fsync))
+		fsync(filp, dp, 0);
+	filemap_fdatawait(inode->i_mapping);
+}
+	
+
+static void
+nfsd_sync(struct file *filp)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
+	down(&inode->i_sem);
+	nfsd_dosync(filp, filp->f_dentry, filp->f_op);
+	up(&inode->i_sem);
+}
+
+static void
+nfsd_sync_dir(struct dentry *dp)
+{
+	nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
+}
+
+/*
+ * Obtain the readahead parameters for the file
+ * specified by (dev, ino).
+ */
+static DEFINE_SPINLOCK(ra_lock);
+
+static inline struct raparms *
+nfsd_get_raparms(dev_t dev, ino_t ino)
+{
+	struct raparms	*ra, **rap, **frap = NULL;
+	int depth = 0;
+
+	spin_lock(&ra_lock);
+	for (rap = &raparm_cache; (ra = *rap); rap = &ra->p_next) {
+		if (ra->p_ino == ino && ra->p_dev == dev)
+			goto found;
+		depth++;
+		if (ra->p_count == 0)
+			frap = rap;
+	}
+	depth = nfsdstats.ra_size*11/10;
+	if (!frap) {	
+		spin_unlock(&ra_lock);
+		return NULL;
+	}
+	rap = frap;
+	ra = *frap;
+	ra->p_dev = dev;
+	ra->p_ino = ino;
+	ra->p_set = 0;
+found:
+	if (rap != &raparm_cache) {
+		*rap = ra->p_next;
+		ra->p_next   = raparm_cache;
+		raparm_cache = ra;
+	}
+	ra->p_count++;
+	nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++;
+	spin_unlock(&ra_lock);
+	return ra;
+}
+
+/*
+ * Grab and keep cached pages assosiated with a file in the svc_rqst
+ * so that they can be passed to the netowork sendmsg/sendpage routines
+ * directrly. They will be released after the sending has completed.
+ */
+static int
+nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset , unsigned long size)
+{
+	unsigned long count = desc->count;
+	struct svc_rqst *rqstp = desc->arg.data;
+
+	if (size > count)
+		size = count;
+
+	if (rqstp->rq_res.page_len == 0) {
+		get_page(page);
+		rqstp->rq_respages[rqstp->rq_resused++] = page;
+		rqstp->rq_res.page_base = offset;
+		rqstp->rq_res.page_len = size;
+	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
+		get_page(page);
+		rqstp->rq_respages[rqstp->rq_resused++] = page;
+		rqstp->rq_res.page_len += size;
+	} else {
+		rqstp->rq_res.page_len += size;
+	}
+
+	desc->count = count - size;
+	desc->written += size;
+	return size;
+}
+
+static inline int
+nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+              loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
+{
+	struct inode *inode;
+	struct raparms	*ra;
+	mm_segment_t	oldfs;
+	int		err;
+
+	err = nfserr_perm;
+	inode = file->f_dentry->d_inode;
+#ifdef MSNFS
+	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+		(!lock_may_read(inode, offset, *count)))
+		goto out;
+#endif
+
+	/* Get readahead parameters */
+	ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino);
+
+	if (ra && ra->p_set)
+		file->f_ra = ra->p_ra;
+
+	if (file->f_op->sendfile) {
+		svc_pushback_unused_pages(rqstp);
+		err = file->f_op->sendfile(file, &offset, *count,
+						 nfsd_read_actor, rqstp);
+	} else {
+		oldfs = get_fs();
+		set_fs(KERNEL_DS);
+		err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
+		set_fs(oldfs);
+	}
+
+	/* Write back readahead params */
+	if (ra) {
+		spin_lock(&ra_lock);
+		ra->p_ra = file->f_ra;
+		ra->p_set = 1;
+		ra->p_count--;
+		spin_unlock(&ra_lock);
+	}
+
+	if (err >= 0) {
+		nfsdstats.io_read += err;
+		*count = err;
+		err = 0;
+		dnotify_parent(file->f_dentry, DN_ACCESS);
+	} else 
+		err = nfserrno(err);
+out:
+	return err;
+}
+
+static inline int
+nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+				loff_t offset, struct kvec *vec, int vlen,
+	   			unsigned long cnt, int *stablep)
+{
+	struct svc_export	*exp;
+	struct dentry		*dentry;
+	struct inode		*inode;
+	mm_segment_t		oldfs;
+	int			err = 0;
+	int			stable = *stablep;
+
+	err = nfserr_perm;
+
+#ifdef MSNFS
+	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+		(!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
+		goto out;
+#endif
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	exp   = fhp->fh_export;
+
+	/*
+	 * Request sync writes if
+	 *  -	the sync export option has been set, or
+	 *  -	the client requested O_SYNC behavior (NFSv3 feature).
+	 *  -   The file system doesn't support fsync().
+	 * When gathered writes have been configured for this volume,
+	 * flushing the data to disk is handled separately below.
+	 */
+
+	if (file->f_op->fsync == 0) {/* COMMIT3 cannot work */
+	       stable = 2;
+	       *stablep = 2; /* FILE_SYNC */
+	}
+
+	if (!EX_ISSYNC(exp))
+		stable = 0;
+	if (stable && !EX_WGATHER(exp))
+		file->f_flags |= O_SYNC;
+
+	/* Write the data. */
+	oldfs = get_fs(); set_fs(KERNEL_DS);
+	err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
+	set_fs(oldfs);
+	if (err >= 0) {
+		nfsdstats.io_write += cnt;
+		dnotify_parent(file->f_dentry, DN_MODIFY);
+	}
+
+	/* clear setuid/setgid flag after write */
+	if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) {
+		struct iattr	ia;
+		ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
+
+		down(&inode->i_sem);
+		notify_change(dentry, &ia);
+		up(&inode->i_sem);
+	}
+
+	if (err >= 0 && stable) {
+		static ino_t	last_ino;
+		static dev_t	last_dev;
+
+		/*
+		 * Gathered writes: If another process is currently
+		 * writing to the file, there's a high chance
+		 * this is another nfsd (triggered by a bulk write
+		 * from a client's biod). Rather than syncing the
+		 * file with each write request, we sleep for 10 msec.
+		 *
+		 * I don't know if this roughly approximates
+		 * C. Juszak's idea of gathered writes, but it's a
+		 * nice and simple solution (IMHO), and it seems to
+		 * work:-)
+		 */
+		if (EX_WGATHER(exp)) {
+			if (atomic_read(&inode->i_writecount) > 1
+			    || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
+				dprintk("nfsd: write defer %d\n", current->pid);
+				msleep(10);
+				dprintk("nfsd: write resume %d\n", current->pid);
+			}
+
+			if (inode->i_state & I_DIRTY) {
+				dprintk("nfsd: write sync %d\n", current->pid);
+				nfsd_sync(file);
+			}
+#if 0
+			wake_up(&inode->i_wait);
+#endif
+		}
+		last_ino = inode->i_ino;
+		last_dev = inode->i_sb->s_dev;
+	}
+
+	dprintk("nfsd: write complete err=%d\n", err);
+	if (err >= 0)
+		err = 0;
+	else 
+		err = nfserrno(err);
+out:
+	return err;
+}
+
+/*
+ * Read data from a file. count must contain the requested read count
+ * on entry. On return, *count contains the number of bytes actually read.
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+		loff_t offset, struct kvec *vec, int vlen,
+		unsigned long *count)
+{
+	int		err;
+
+	if (file) {
+		err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+				MAY_READ|MAY_OWNER_OVERRIDE);
+		if (err)
+			goto out;
+		err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count);
+	} else {
+		err = nfsd_open(rqstp, fhp, S_IFREG, MAY_READ, &file);
+		if (err)
+			goto out;
+		err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count);
+		nfsd_close(file);
+	}
+out:
+	return err;
+}
+
+/*
+ * Write data to a file.
+ * The stable flag requests synchronous writes.
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+		loff_t offset, struct kvec *vec, int vlen, unsigned long cnt,
+		int *stablep)
+{
+	int			err = 0;
+
+	if (file) {
+		err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+				MAY_WRITE|MAY_OWNER_OVERRIDE);
+		if (err)
+			goto out;
+		err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt,
+				stablep);
+	} else {
+		err = nfsd_open(rqstp, fhp, S_IFREG, MAY_WRITE, &file);
+		if (err)
+			goto out;
+
+		if (cnt)
+			err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen,
+					     cnt, stablep);
+		nfsd_close(file);
+	}
+out:
+	return err;
+}
+
+#ifdef CONFIG_NFSD_V3
+/*
+ * Commit all pending writes to stable storage.
+ * Strictly speaking, we could sync just the indicated file region here,
+ * but there's currently no way we can ask the VFS to do so.
+ *
+ * Unfortunately we cannot lock the file to make sure we return full WCC
+ * data to the client, as locking happens lower down in the filesystem.
+ */
+int
+nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
+               loff_t offset, unsigned long count)
+{
+	struct file	*file;
+	int		err;
+
+	if ((u64)count > ~(u64)offset)
+		return nfserr_inval;
+
+	if ((err = nfsd_open(rqstp, fhp, S_IFREG, MAY_WRITE, &file)) != 0)
+		return err;
+	if (EX_ISSYNC(fhp->fh_export)) {
+		if (file->f_op && file->f_op->fsync) {
+			nfsd_sync(file);
+		} else {
+			err = nfserr_notsupp;
+		}
+	}
+
+	nfsd_close(file);
+	return err;
+}
+#endif /* CONFIG_NFSD_V3 */
+
+/*
+ * Create a file (regular, directory, device, fifo); UNIX sockets 
+ * not yet implemented.
+ * If the response fh has been verified, the parent directory should
+ * already be locked. Note that the parent directory is left locked.
+ *
+ * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
+ */
+int
+nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+		char *fname, int flen, struct iattr *iap,
+		int type, dev_t rdev, struct svc_fh *resfhp)
+{
+	struct dentry	*dentry, *dchild = NULL;
+	struct inode	*dirp;
+	int		err;
+
+	err = nfserr_perm;
+	if (!flen)
+		goto out;
+	err = nfserr_exist;
+	if (isdotent(fname, flen))
+		goto out;
+
+	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_CREATE);
+	if (err)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	dirp = dentry->d_inode;
+
+	err = nfserr_notdir;
+	if(!dirp->i_op || !dirp->i_op->lookup)
+		goto out;
+	/*
+	 * Check whether the response file handle has been verified yet.
+	 * If it has, the parent directory should already be locked.
+	 */
+	if (!resfhp->fh_dentry) {
+		/* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
+		fh_lock(fhp);
+		dchild = lookup_one_len(fname, dentry, flen);
+		err = PTR_ERR(dchild);
+		if (IS_ERR(dchild))
+			goto out_nfserr;
+		err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
+		if (err)
+			goto out;
+	} else {
+		/* called from nfsd_proc_create */
+		dchild = dget(resfhp->fh_dentry);
+		if (!fhp->fh_locked) {
+			/* not actually possible */
+			printk(KERN_ERR
+				"nfsd_create: parent %s/%s not locked!\n",
+				dentry->d_parent->d_name.name,
+				dentry->d_name.name);
+			err = -EIO;
+			goto out;
+		}
+	}
+	/*
+	 * Make sure the child dentry is still negative ...
+	 */
+	err = nfserr_exist;
+	if (dchild->d_inode) {
+		dprintk("nfsd_create: dentry %s/%s not negative!\n",
+			dentry->d_name.name, dchild->d_name.name);
+		goto out; 
+	}
+
+	if (!(iap->ia_valid & ATTR_MODE))
+		iap->ia_mode = 0;
+	iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
+
+	/*
+	 * Get the dir op function pointer.
+	 */
+	err = nfserr_perm;
+	switch (type) {
+	case S_IFREG:
+		err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+		break;
+	case S_IFDIR:
+		err = vfs_mkdir(dirp, dchild, iap->ia_mode);
+		break;
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFIFO:
+	case S_IFSOCK:
+		err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
+		break;
+	default:
+	        printk("nfsd: bad file type %o in nfsd_create\n", type);
+		err = -EINVAL;
+	}
+	if (err < 0)
+		goto out_nfserr;
+
+	if (EX_ISSYNC(fhp->fh_export)) {
+		nfsd_sync_dir(dentry);
+		write_inode_now(dchild->d_inode, 1);
+	}
+
+
+	/* Set file attributes. Mode has already been set and
+	 * setting uid/gid works only for root. Irix appears to
+	 * send along the gid when it tries to implement setgid
+	 * directories via NFS.
+	 */
+	err = 0;
+	if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID|ATTR_MODE)) != 0)
+		err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+	/*
+	 * Update the file handle to get the new inode info.
+	 */
+	if (!err)
+		err = fh_update(resfhp);
+out:
+	if (dchild && !IS_ERR(dchild))
+		dput(dchild);
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+#ifdef CONFIG_NFSD_V3
+/*
+ * NFSv3 version of nfsd_create
+ */
+int
+nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
+		char *fname, int flen, struct iattr *iap,
+		struct svc_fh *resfhp, int createmode, u32 *verifier,
+	        int *truncp)
+{
+	struct dentry	*dentry, *dchild = NULL;
+	struct inode	*dirp;
+	int		err;
+	__u32		v_mtime=0, v_atime=0;
+	int		v_mode=0;
+
+	err = nfserr_perm;
+	if (!flen)
+		goto out;
+	err = nfserr_exist;
+	if (isdotent(fname, flen))
+		goto out;
+	if (!(iap->ia_valid & ATTR_MODE))
+		iap->ia_mode = 0;
+	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_CREATE);
+	if (err)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	dirp = dentry->d_inode;
+
+	/* Get all the sanity checks out of the way before
+	 * we lock the parent. */
+	err = nfserr_notdir;
+	if(!dirp->i_op || !dirp->i_op->lookup)
+		goto out;
+	fh_lock(fhp);
+
+	/*
+	 * Compose the response file handle.
+	 */
+	dchild = lookup_one_len(fname, dentry, flen);
+	err = PTR_ERR(dchild);
+	if (IS_ERR(dchild))
+		goto out_nfserr;
+
+	err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
+	if (err)
+		goto out;
+
+	if (createmode == NFS3_CREATE_EXCLUSIVE) {
+		/* while the verifier would fit in mtime+atime,
+		 * solaris7 gets confused (bugid 4218508) if these have
+		 * the high bit set, so we use the mode as well
+		 */
+		v_mtime = verifier[0]&0x7fffffff;
+		v_atime = verifier[1]&0x7fffffff;
+		v_mode  = S_IFREG
+			| ((verifier[0]&0x80000000) >> (32-7)) /* u+x */
+			| ((verifier[1]&0x80000000) >> (32-9)) /* u+r */
+			;
+	}
+	
+	if (dchild->d_inode) {
+		err = 0;
+
+		switch (createmode) {
+		case NFS3_CREATE_UNCHECKED:
+			if (! S_ISREG(dchild->d_inode->i_mode))
+				err = nfserr_exist;
+			else if (truncp) {
+				/* in nfsv4, we need to treat this case a little
+				 * differently.  we don't want to truncate the
+				 * file now; this would be wrong if the OPEN
+				 * fails for some other reason.  furthermore,
+				 * if the size is nonzero, we should ignore it
+				 * according to spec!
+				 */
+				*truncp = (iap->ia_valid & ATTR_SIZE) && !iap->ia_size;
+			}
+			else {
+				iap->ia_valid &= ATTR_SIZE;
+				goto set_attr;
+			}
+			break;
+		case NFS3_CREATE_EXCLUSIVE:
+			if (   dchild->d_inode->i_mtime.tv_sec == v_mtime
+			    && dchild->d_inode->i_atime.tv_sec == v_atime
+			    && dchild->d_inode->i_mode  == v_mode
+			    && dchild->d_inode->i_size  == 0 )
+				break;
+			 /* fallthru */
+		case NFS3_CREATE_GUARDED:
+			err = nfserr_exist;
+		}
+		goto out;
+	}
+
+	err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+	if (err < 0)
+		goto out_nfserr;
+
+	if (EX_ISSYNC(fhp->fh_export)) {
+		nfsd_sync_dir(dentry);
+		/* setattr will sync the child (or not) */
+	}
+
+	/*
+	 * Update the filehandle to get the new inode info.
+	 */
+	err = fh_update(resfhp);
+	if (err)
+		goto out;
+
+	if (createmode == NFS3_CREATE_EXCLUSIVE) {
+		/* Cram the verifier into atime/mtime/mode */
+		iap->ia_valid = ATTR_MTIME|ATTR_ATIME
+			| ATTR_MTIME_SET|ATTR_ATIME_SET
+			| ATTR_MODE;
+		/* XXX someone who knows this better please fix it for nsec */ 
+		iap->ia_mtime.tv_sec = v_mtime;
+		iap->ia_atime.tv_sec = v_atime;
+		iap->ia_mtime.tv_nsec = 0;
+		iap->ia_atime.tv_nsec = 0;
+		iap->ia_mode  = v_mode;
+	}
+
+	/* Set file attributes.
+	 * Mode has already been set but we might need to reset it
+	 * for CREATE_EXCLUSIVE
+	 * Irix appears to send along the gid when it tries to
+	 * implement setgid directories via NFS. Clear out all that cruft.
+	 */
+ set_attr:
+	if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID)) != 0)
+ 		err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+
+ out:
+	fh_unlock(fhp);
+	if (dchild && !IS_ERR(dchild))
+		dput(dchild);
+ 	return err;
+ 
+ out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+#endif /* CONFIG_NFSD_V3 */
+
+/*
+ * Read a symlink. On entry, *lenp must contain the maximum path length that
+ * fits into the buffer. On return, it contains the true length.
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+{
+	struct dentry	*dentry;
+	struct inode	*inode;
+	mm_segment_t	oldfs;
+	int		err;
+
+	err = fh_verify(rqstp, fhp, S_IFLNK, MAY_NOP);
+	if (err)
+		goto out;
+
+	dentry = fhp->fh_dentry;
+	inode = dentry->d_inode;
+
+	err = nfserr_inval;
+	if (!inode->i_op || !inode->i_op->readlink)
+		goto out;
+
+	touch_atime(fhp->fh_export->ex_mnt, dentry);
+	/* N.B. Why does this call need a get_fs()??
+	 * Remove the set_fs and watch the fireworks:-) --okir
+	 */
+
+	oldfs = get_fs(); set_fs(KERNEL_DS);
+	err = inode->i_op->readlink(dentry, buf, *lenp);
+	set_fs(oldfs);
+
+	if (err < 0)
+		goto out_nfserr;
+	*lenp = err;
+	err = 0;
+out:
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+/*
+ * Create a symlink and look up its inode
+ * N.B. After this call _both_ fhp and resfhp need an fh_put
+ */
+int
+nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+				char *fname, int flen,
+				char *path,  int plen,
+				struct svc_fh *resfhp,
+				struct iattr *iap)
+{
+	struct dentry	*dentry, *dnew;
+	int		err, cerr;
+	umode_t		mode;
+
+	err = nfserr_noent;
+	if (!flen || !plen)
+		goto out;
+	err = nfserr_exist;
+	if (isdotent(fname, flen))
+		goto out;
+
+	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_CREATE);
+	if (err)
+		goto out;
+	fh_lock(fhp);
+	dentry = fhp->fh_dentry;
+	dnew = lookup_one_len(fname, dentry, flen);
+	err = PTR_ERR(dnew);
+	if (IS_ERR(dnew))
+		goto out_nfserr;
+
+	mode = S_IALLUGO;
+	/* Only the MODE ATTRibute is even vaguely meaningful */
+	if (iap && (iap->ia_valid & ATTR_MODE))
+		mode = iap->ia_mode & S_IALLUGO;
+
+	if (unlikely(path[plen] != 0)) {
+		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
+		if (path_alloced == NULL)
+			err = -ENOMEM;
+		else {
+			strncpy(path_alloced, path, plen);
+			path_alloced[plen] = 0;
+			err = vfs_symlink(dentry->d_inode, dnew, path_alloced, mode);
+			kfree(path_alloced);
+		}
+	} else
+		err = vfs_symlink(dentry->d_inode, dnew, path, mode);
+
+	if (!err) {
+		if (EX_ISSYNC(fhp->fh_export))
+			nfsd_sync_dir(dentry);
+	} else
+		err = nfserrno(err);
+	fh_unlock(fhp);
+
+	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
+	dput(dnew);
+	if (err==0) err = cerr;
+out:
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+/*
+ * Create a hardlink
+ * N.B. After this call _both_ ffhp and tfhp need an fh_put
+ */
+int
+nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
+				char *name, int len, struct svc_fh *tfhp)
+{
+	struct dentry	*ddir, *dnew, *dold;
+	struct inode	*dirp, *dest;
+	int		err;
+
+	err = fh_verify(rqstp, ffhp, S_IFDIR, MAY_CREATE);
+	if (err)
+		goto out;
+	err = fh_verify(rqstp, tfhp, -S_IFDIR, MAY_NOP);
+	if (err)
+		goto out;
+
+	err = nfserr_perm;
+	if (!len)
+		goto out;
+	err = nfserr_exist;
+	if (isdotent(name, len))
+		goto out;
+
+	fh_lock(ffhp);
+	ddir = ffhp->fh_dentry;
+	dirp = ddir->d_inode;
+
+	dnew = lookup_one_len(name, ddir, len);
+	err = PTR_ERR(dnew);
+	if (IS_ERR(dnew))
+		goto out_nfserr;
+
+	dold = tfhp->fh_dentry;
+	dest = dold->d_inode;
+
+	err = vfs_link(dold, dirp, dnew);
+	if (!err) {
+		if (EX_ISSYNC(ffhp->fh_export)) {
+			nfsd_sync_dir(ddir);
+			write_inode_now(dest, 1);
+		}
+	} else {
+		if (err == -EXDEV && rqstp->rq_vers == 2)
+			err = nfserr_acces;
+		else
+			err = nfserrno(err);
+	}
+
+	fh_unlock(ffhp);
+	dput(dnew);
+out:
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+/*
+ * Rename a file
+ * N.B. After this call _both_ ffhp and tfhp need an fh_put
+ */
+int
+nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+			    struct svc_fh *tfhp, char *tname, int tlen)
+{
+	struct dentry	*fdentry, *tdentry, *odentry, *ndentry, *trap;
+	struct inode	*fdir, *tdir;
+	int		err;
+
+	err = fh_verify(rqstp, ffhp, S_IFDIR, MAY_REMOVE);
+	if (err)
+		goto out;
+	err = fh_verify(rqstp, tfhp, S_IFDIR, MAY_CREATE);
+	if (err)
+		goto out;
+
+	fdentry = ffhp->fh_dentry;
+	fdir = fdentry->d_inode;
+
+	tdentry = tfhp->fh_dentry;
+	tdir = tdentry->d_inode;
+
+	err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+	if (fdir->i_sb != tdir->i_sb)
+		goto out;
+
+	err = nfserr_perm;
+	if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
+		goto out;
+
+	/* cannot use fh_lock as we need deadlock protective ordering
+	 * so do it by hand */
+	trap = lock_rename(tdentry, fdentry);
+	ffhp->fh_locked = tfhp->fh_locked = 1;
+	fill_pre_wcc(ffhp);
+	fill_pre_wcc(tfhp);
+
+	odentry = lookup_one_len(fname, fdentry, flen);
+	err = PTR_ERR(odentry);
+	if (IS_ERR(odentry))
+		goto out_nfserr;
+
+	err = -ENOENT;
+	if (!odentry->d_inode)
+		goto out_dput_old;
+	err = -EINVAL;
+	if (odentry == trap)
+		goto out_dput_old;
+
+	ndentry = lookup_one_len(tname, tdentry, tlen);
+	err = PTR_ERR(ndentry);
+	if (IS_ERR(ndentry))
+		goto out_dput_old;
+	err = -ENOTEMPTY;
+	if (ndentry == trap)
+		goto out_dput_new;
+
+#ifdef MSNFS
+	if ((ffhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+		((atomic_read(&odentry->d_count) > 1)
+		 || (atomic_read(&ndentry->d_count) > 1))) {
+			err = nfserr_perm;
+	} else
+#endif
+	err = vfs_rename(fdir, odentry, tdir, ndentry);
+	if (!err && EX_ISSYNC(tfhp->fh_export)) {
+		nfsd_sync_dir(tdentry);
+		nfsd_sync_dir(fdentry);
+	}
+
+ out_dput_new:
+	dput(ndentry);
+ out_dput_old:
+	dput(odentry);
+ out_nfserr:
+	if (err)
+		err = nfserrno(err);
+
+	/* we cannot reply on fh_unlock on the two filehandles,
+	 * as that would do the wrong thing if the two directories
+	 * were the same, so again we do it by hand
+	 */
+	fill_post_wcc(ffhp);
+	fill_post_wcc(tfhp);
+	unlock_rename(tdentry, fdentry);
+	ffhp->fh_locked = tfhp->fh_locked = 0;
+
+out:
+	return err;
+}
+
+/*
+ * Unlink a file or directory
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+				char *fname, int flen)
+{
+	struct dentry	*dentry, *rdentry;
+	struct inode	*dirp;
+	int		err;
+
+	err = nfserr_acces;
+	if (!flen || isdotent(fname, flen))
+		goto out;
+	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_REMOVE);
+	if (err)
+		goto out;
+
+	fh_lock(fhp);
+	dentry = fhp->fh_dentry;
+	dirp = dentry->d_inode;
+
+	rdentry = lookup_one_len(fname, dentry, flen);
+	err = PTR_ERR(rdentry);
+	if (IS_ERR(rdentry))
+		goto out_nfserr;
+
+	if (!rdentry->d_inode) {
+		dput(rdentry);
+		err = nfserr_noent;
+		goto out;
+	}
+
+	if (!type)
+		type = rdentry->d_inode->i_mode & S_IFMT;
+
+	if (type != S_IFDIR) { /* It's UNLINK */
+#ifdef MSNFS
+		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+			(atomic_read(&rdentry->d_count) > 1)) {
+			err = nfserr_perm;
+		} else
+#endif
+		err = vfs_unlink(dirp, rdentry);
+	} else { /* It's RMDIR */
+		err = vfs_rmdir(dirp, rdentry);
+	}
+
+	dput(rdentry);
+
+	if (err)
+		goto out_nfserr;
+	if (EX_ISSYNC(fhp->fh_export)) 
+		nfsd_sync_dir(dentry);
+
+out:
+	return err;
+
+out_nfserr:
+	err = nfserrno(err);
+	goto out;
+}
+
+/*
+ * Read entries from a directory.
+ * The  NFSv3/4 verifier we ignore for now.
+ */
+int
+nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, 
+	     struct readdir_cd *cdp, encode_dent_fn func)
+{
+	int		err;
+	struct file	*file;
+	loff_t		offset = *offsetp;
+
+	err = nfsd_open(rqstp, fhp, S_IFDIR, MAY_READ, &file);
+	if (err)
+		goto out;
+
+	offset = vfs_llseek(file, offset, 0);
+	if (offset < 0) {
+		err = nfserrno((int)offset);
+		goto out_close;
+	}
+
+	/*
+	 * Read the directory entries. This silly loop is necessary because
+	 * readdir() is not guaranteed to fill up the entire buffer, but
+	 * may choose to do less.
+	 */
+
+	do {
+		cdp->err = nfserr_eof; /* will be cleared on successful read */
+		err = vfs_readdir(file, (filldir_t) func, cdp);
+	} while (err >=0 && cdp->err == nfs_ok);
+	if (err)
+		err = nfserrno(err);
+	else
+		err = cdp->err;
+	*offsetp = vfs_llseek(file, 0, 1);
+
+	if (err == nfserr_eof || err == nfserr_toosmall)
+		err = nfs_ok; /* can still be found in ->err */
+out_close:
+	nfsd_close(file);
+out:
+	return err;
+}
+
+/*
+ * Get file system stats
+ * N.B. After this call fhp needs an fh_put
+ */
+int
+nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
+{
+	int err = fh_verify(rqstp, fhp, 0, MAY_NOP);
+	if (!err && vfs_statfs(fhp->fh_dentry->d_inode->i_sb,stat))
+		err = nfserr_io;
+	return err;
+}
+
+/*
+ * Check for a user's access permissions to this inode.
+ */
+int
+nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
+{
+	struct inode	*inode = dentry->d_inode;
+	int		err;
+
+	if (acc == MAY_NOP)
+		return 0;
+#if 0
+	dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
+		acc,
+		(acc & MAY_READ)?	" read"  : "",
+		(acc & MAY_WRITE)?	" write" : "",
+		(acc & MAY_EXEC)?	" exec"  : "",
+		(acc & MAY_SATTR)?	" sattr" : "",
+		(acc & MAY_TRUNC)?	" trunc" : "",
+		(acc & MAY_LOCK)?	" lock"  : "",
+		(acc & MAY_OWNER_OVERRIDE)? " owneroverride" : "",
+		inode->i_mode,
+		IS_IMMUTABLE(inode)?	" immut" : "",
+		IS_APPEND(inode)?	" append" : "",
+		IS_RDONLY(inode)?	" ro" : "");
+	dprintk("      owner %d/%d user %d/%d\n",
+		inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
+#endif
+
+	/* Normally we reject any write/sattr etc access on a read-only file
+	 * system.  But if it is IRIX doing check on write-access for a 
+	 * device special file, we ignore rofs.
+	 */
+	if (!(acc & MAY_LOCAL_ACCESS))
+		if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
+			if (EX_RDONLY(exp) || IS_RDONLY(inode))
+				return nfserr_rofs;
+			if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
+				return nfserr_perm;
+		}
+	if ((acc & MAY_TRUNC) && IS_APPEND(inode))
+		return nfserr_perm;
+
+	if (acc & MAY_LOCK) {
+		/* If we cannot rely on authentication in NLM requests,
+		 * just allow locks, otherwise require read permission, or
+		 * ownership
+		 */
+		if (exp->ex_flags & NFSEXP_NOAUTHNLM)
+			return 0;
+		else
+			acc = MAY_READ | MAY_OWNER_OVERRIDE;
+	}
+	/*
+	 * The file owner always gets access permission for accesses that
+	 * would normally be checked at open time. This is to make
+	 * file access work even when the client has done a fchmod(fd, 0).
+	 *
+	 * However, `cp foo bar' should fail nevertheless when bar is
+	 * readonly. A sensible way to do this might be to reject all
+	 * attempts to truncate a read-only file, because a creat() call
+	 * always implies file truncation.
+	 * ... but this isn't really fair.  A process may reasonably call
+	 * ftruncate on an open file descriptor on a file with perm 000.
+	 * We must trust the client to do permission checking - using "ACCESS"
+	 * with NFSv3.
+	 */
+	if ((acc & MAY_OWNER_OVERRIDE) &&
+	    inode->i_uid == current->fsuid)
+		return 0;
+
+	err = permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC), NULL);
+
+	/* Allow read access to binaries even when mode 111 */
+	if (err == -EACCES && S_ISREG(inode->i_mode) &&
+	    acc == (MAY_READ | MAY_OWNER_OVERRIDE))
+		err = permission(inode, MAY_EXEC, NULL);
+
+	return err? nfserrno(err) : 0;
+}
+
+void
+nfsd_racache_shutdown(void)
+{
+	if (!raparm_cache)
+		return;
+	dprintk("nfsd: freeing readahead buffers.\n");
+	kfree(raparml);
+	raparm_cache = raparml = NULL;
+}
+/*
+ * Initialize readahead param cache
+ */
+int
+nfsd_racache_init(int cache_size)
+{
+	int	i;
+
+	if (raparm_cache)
+		return 0;
+	raparml = kmalloc(sizeof(struct raparms) * cache_size, GFP_KERNEL);
+
+	if (raparml != NULL) {
+		dprintk("nfsd: allocating %d readahead buffers.\n",
+			cache_size);
+		memset(raparml, 0, sizeof(struct raparms) * cache_size);
+		for (i = 0; i < cache_size - 1; i++) {
+			raparml[i].p_next = raparml + i + 1;
+		}
+		raparm_cache = raparml;
+	} else {
+		printk(KERN_WARNING
+		       "nfsd: Could not allocate memory read-ahead cache.\n");
+		return -ENOMEM;
+	}
+	nfsdstats.ra_size = cache_size;
+	return 0;
+}
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
new file mode 100644
index 0000000..0ab8f00
--- /dev/null
+++ b/fs/nls/Kconfig
@@ -0,0 +1,504 @@
+#
+# Native language support configuration
+#
+
+menu "Native Language Support"
+
+config NLS
+	tristate "Base native language support"
+	---help---
+	  The base Native Language Support. A number of filesystems
+	  depend on it (e.g. FAT, JOLIET, NT, BEOS filesystems), as well
+	  as the ability of some filesystems to use native languages
+	  (NCP, SMB).
+
+	  If unsure, say Y.
+
+	  To compile this code as a module, choose M here: the module
+	  will be called nls_base.
+
+config NLS_DEFAULT
+	string "Default NLS Option"
+	depends on NLS
+	default "iso8859-1"
+	---help---
+	  The default NLS used when mounting file system. Note, that this is
+	  the NLS used by your console, not the NLS used by a specific file
+	  system (if different) to store data (filenames) on a disk.
+	  Currently, the valid values are:
+	  big5, cp437, cp737, cp775, cp850, cp852, cp855, cp857, cp860, cp861,
+	  cp862, cp863, cp864, cp865, cp866, cp869, cp874, cp932, cp936,
+	  cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
+	  iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
+	  iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
+	  koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8.
+	  If you specify a wrong value, it will use the built-in NLS;
+	  compatible with iso8859-1.
+
+	  If unsure, specify it as "iso8859-1".
+
+config NLS_CODEPAGE_437
+	tristate "Codepage 437 (United States, Canada)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored
+	  in so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage that is used in
+	  the United States and parts of Canada. This is recommended.
+
+config NLS_CODEPAGE_737
+	tristate "Codepage 737 (Greek)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored
+	  in so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage that is used for
+	  Greek. If unsure, say N.
+
+config NLS_CODEPAGE_775
+	tristate "Codepage 775 (Baltic Rim)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored
+	  in so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage that is used
+	  for the Baltic Rim Languages (Latvian and Lithuanian). If unsure,
+	  say N.
+
+config NLS_CODEPAGE_850
+	tristate "Codepage 850 (Europe)"
+	depends on NLS
+	---help---
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage that is used for
+	  much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
+	  more countries here]. It has some characters useful to many European
+	  languages that are not part of the US codepage 437.
+
+	  If unsure, say Y.
+
+config NLS_CODEPAGE_852
+	tristate "Codepage 852 (Central/Eastern Europe)"
+	depends on NLS
+	---help---
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the Latin 2 codepage used by DOS
+	  for much of Central and Eastern Europe. It has all the required
+	  characters for these languages: Albanian, Croatian, Czech, English,
+	  Finnish, Hungarian, Irish, German, Polish, Romanian, Serbian (Latin
+	  transcription), Slovak, Slovenian, and Sorbian.
+
+config NLS_CODEPAGE_855
+	tristate "Codepage 855 (Cyrillic)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Cyrillic.
+
+config NLS_CODEPAGE_857
+	tristate "Codepage 857 (Turkish)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Turkish.
+
+config NLS_CODEPAGE_860
+	tristate "Codepage 860 (Portuguese)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Portuguese.
+
+config NLS_CODEPAGE_861
+	tristate "Codepage 861 (Icelandic)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Icelandic.
+
+config NLS_CODEPAGE_862
+	tristate "Codepage 862 (Hebrew)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Hebrew.
+
+config NLS_CODEPAGE_863
+	tristate "Codepage 863 (Canadian French)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Canadian
+	  French.
+
+config NLS_CODEPAGE_864
+	tristate "Codepage 864 (Arabic)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Arabic.
+
+config NLS_CODEPAGE_865
+	tristate "Codepage 865 (Norwegian, Danish)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for the Nordic
+	  European countries.
+
+config NLS_CODEPAGE_866
+	tristate "Codepage 866 (Cyrillic/Russian)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for
+	  Cyrillic/Russian.
+
+config NLS_CODEPAGE_869
+	tristate "Codepage 869 (Greek)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Greek.
+
+config NLS_CODEPAGE_936
+	tristate "Simplified Chinese charset (CP936, GB2312)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Simplified
+	  Chinese(GBK).
+
+config NLS_CODEPAGE_950
+	tristate "Traditional Chinese charset (Big5)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Traditional
+	  Chinese(Big5).
+
+config NLS_CODEPAGE_932
+	tristate "Japanese charsets (Shift-JIS, EUC-JP)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Shift-JIS
+	  or EUC-JP. To use EUC-JP, you can use 'euc-jp' as mount option or
+	  NLS Default value during kernel configuration, instead of 'cp932'.
+
+config NLS_CODEPAGE_949
+	tristate "Korean charset (CP949, EUC-KR)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for UHC.
+
+config NLS_CODEPAGE_874
+	tristate "Thai charset (CP874, TIS-620)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Thai.
+
+config NLS_ISO8859_8
+	tristate "Hebrew charsets (ISO-8859-8, CP1255)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for ISO8859-8, the Hebrew
+	  character set.
+
+config NLS_CODEPAGE_1250
+	tristate "Windows CP1250 (Slavic/Central European Languages)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CDROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Windows CP-1250
+	  character set, which works for most Latin-written Slavic and Central
+	  European languages: Czech, German, Hungarian, Polish, Rumanian, Croatian,
+	  Slovak, Slovene.
+
+config NLS_CODEPAGE_1251
+	tristate "Windows CP1251 (Bulgarian, Belarusian)"
+	depends on NLS
+	help
+	  The Microsoft FAT file system family can deal with filenames in
+	  native language character sets. These character sets are stored in
+	  so-called DOS codepages. You need to include the appropriate
+	  codepage if you want to be able to read/write these filenames on
+	  DOS/Windows partitions correctly. This does apply to the filenames
+	  only, not to the file contents. You can include several codepages;
+	  say Y here if you want to include the DOS codepage for Russian and
+	  Bulgarian and Belarusian.
+
+config NLS_ASCII
+	tristate "ASCII (United States)"
+	depends on NLS
+	help
+	  An ASCII NLS module is needed if you want to override the
+	  DEFAULT NLS with this very basic charset and don't want any
+	  non-ASCII characters to be translated.
+
+config NLS_ISO8859_1
+	tristate "NLS ISO 8859-1  (Latin 1; Western European Languages)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 1 character
+	  set, which covers most West European languages such as Albanian,
+	  Catalan, Danish, Dutch, English, Faeroese, Finnish, French, German,
+	  Galician, Irish, Icelandic, Italian, Norwegian, Portuguese, Spanish,
+	  and Swedish. It is also the default for the US. If unsure, say Y.
+
+config NLS_ISO8859_2
+	tristate "NLS ISO 8859-2  (Latin 2; Slavic/Central European Languages)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 2 character
+	  set, which works for most Latin-written Slavic and Central European
+	  languages: Czech, German, Hungarian, Polish, Rumanian, Croatian,
+	  Slovak, Slovene.
+
+config NLS_ISO8859_3
+	tristate "NLS ISO 8859-3  (Latin 3; Esperanto, Galician, Maltese, Turkish)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 3 character
+	  set, which is popular with authors of Esperanto, Galician, Maltese,
+	  and Turkish.
+
+config NLS_ISO8859_4
+	tristate "NLS ISO 8859-4  (Latin 4; old Baltic charset)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 4 character
+	  set which introduces letters for Estonian, Latvian, and
+	  Lithuanian. It is an incomplete predecessor of Latin 7.
+
+config NLS_ISO8859_5
+	tristate "NLS ISO 8859-5  (Cyrillic)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for ISO8859-5, a Cyrillic
+	  character set with which you can type Bulgarian, Belarusian,
+	  Macedonian, Russian, Serbian, and Ukrainian. Note that the charset
+	  KOI8-R is preferred in Russia.
+
+config NLS_ISO8859_6
+	tristate "NLS ISO 8859-6  (Arabic)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for ISO8859-6, the Arabic
+	  character set.
+
+config NLS_ISO8859_7
+	tristate "NLS ISO 8859-7  (Modern Greek)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for ISO8859-7, the Modern
+	  Greek character set.
+
+config NLS_ISO8859_9
+	tristate "NLS ISO 8859-9  (Latin 5; Turkish)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 5 character
+	  set, and it replaces the rarely needed Icelandic letters in Latin 1
+	  with the Turkish ones. Useful in Turkey.
+
+config NLS_ISO8859_13
+	tristate "NLS ISO 8859-13 (Latin 7; Baltic)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 7 character
+	  set, which supports modern Baltic languages including Latvian
+	  and Lithuanian.
+
+config NLS_ISO8859_14
+	tristate "NLS ISO 8859-14 (Latin 8; Celtic)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 8 character
+	  set, which adds the last accented vowels for Welsh (aka Cymraeg)
+	  (and Manx Gaelic) that were missing in Latin 1.
+	  <http://linux.speech.cymru.org/> has further information.
+
+config NLS_ISO8859_15
+	tristate "NLS ISO 8859-15 (Latin 9; Western European Languages with Euro)"
+	depends on NLS
+	---help---
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the Latin 9 character
+	  set, which covers most West European languages such as Albanian,
+	  Catalan, Danish, Dutch, English, Estonian, Faeroese, Finnish,
+	  French, German, Galician, Irish, Icelandic, Italian, Norwegian,
+	  Portuguese, Spanish, and Swedish. Latin 9 is an update to
+	  Latin 1 (ISO 8859-1) that removes a handful of rarely used
+	  characters and instead adds support for Estonian, corrects the
+	  support for French and Finnish, and adds the new Euro character.
+	  If unsure, say Y.
+
+config NLS_KOI8_R
+	tristate "NLS KOI8-R (Russian)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the preferred Russian
+	  character set.
+
+config NLS_KOI8_U
+	tristate "NLS KOI8-U/RU (Ukrainian, Belarusian)"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the preferred Ukrainian
+	  (koi8-u) and Belarusian (koi8-ru) character sets.
+
+config NLS_UTF8
+	tristate "NLS UTF8"
+	depends on NLS
+	help
+	  If you want to display filenames with native language characters
+	  from the Microsoft FAT file system family or from JOLIET CD-ROMs
+	  correctly on the screen, you need to include the appropriate
+	  input/output character sets. Say Y here for the UTF-8 encoding of
+	  the Unicode/ISO9646 universal character set.
+
+endmenu
+
diff --git a/fs/nls/Makefile b/fs/nls/Makefile
new file mode 100644
index 0000000..a7ade13
--- /dev/null
+++ b/fs/nls/Makefile
@@ -0,0 +1,46 @@
+#
+# Makefile for native language support
+#
+
+obj-$(CONFIG_NLS)		+= nls_base.o
+
+obj-$(CONFIG_NLS_CODEPAGE_437)	+= nls_cp437.o
+obj-$(CONFIG_NLS_CODEPAGE_737)	+= nls_cp737.o
+obj-$(CONFIG_NLS_CODEPAGE_775)	+= nls_cp775.o
+obj-$(CONFIG_NLS_CODEPAGE_850)	+= nls_cp850.o
+obj-$(CONFIG_NLS_CODEPAGE_852)	+= nls_cp852.o
+obj-$(CONFIG_NLS_CODEPAGE_855)	+= nls_cp855.o
+obj-$(CONFIG_NLS_CODEPAGE_857)	+= nls_cp857.o
+obj-$(CONFIG_NLS_CODEPAGE_860)	+= nls_cp860.o
+obj-$(CONFIG_NLS_CODEPAGE_861)	+= nls_cp861.o
+obj-$(CONFIG_NLS_CODEPAGE_862)	+= nls_cp862.o
+obj-$(CONFIG_NLS_CODEPAGE_863)	+= nls_cp863.o
+obj-$(CONFIG_NLS_CODEPAGE_864)	+= nls_cp864.o
+obj-$(CONFIG_NLS_CODEPAGE_865)	+= nls_cp865.o
+obj-$(CONFIG_NLS_CODEPAGE_866)	+= nls_cp866.o
+obj-$(CONFIG_NLS_CODEPAGE_869)	+= nls_cp869.o
+obj-$(CONFIG_NLS_CODEPAGE_874)	+= nls_cp874.o
+obj-$(CONFIG_NLS_CODEPAGE_932)	+= nls_cp932.o nls_euc-jp.o
+obj-$(CONFIG_NLS_CODEPAGE_936)	+= nls_cp936.o
+obj-$(CONFIG_NLS_CODEPAGE_949)	+= nls_cp949.o
+obj-$(CONFIG_NLS_CODEPAGE_950)	+= nls_cp950.o
+obj-$(CONFIG_NLS_CODEPAGE_1250) += nls_cp1250.o
+obj-$(CONFIG_NLS_CODEPAGE_1251)	+= nls_cp1251.o
+obj-$(CONFIG_NLS_ASCII)		+= nls_ascii.o
+obj-$(CONFIG_NLS_ISO8859_1)	+= nls_iso8859-1.o
+obj-$(CONFIG_NLS_ISO8859_2)	+= nls_iso8859-2.o
+obj-$(CONFIG_NLS_ISO8859_3)	+= nls_iso8859-3.o
+obj-$(CONFIG_NLS_ISO8859_4)	+= nls_iso8859-4.o
+obj-$(CONFIG_NLS_ISO8859_5)	+= nls_iso8859-5.o
+obj-$(CONFIG_NLS_ISO8859_6)	+= nls_iso8859-6.o
+obj-$(CONFIG_NLS_ISO8859_7)	+= nls_iso8859-7.o
+obj-$(CONFIG_NLS_ISO8859_8)	+= nls_cp1255.o
+obj-$(CONFIG_NLS_ISO8859_9)	+= nls_iso8859-9.o
+obj-$(CONFIG_NLS_ISO8859_10)	+= nls_iso8859-10.o
+obj-$(CONFIG_NLS_ISO8859_13)	+= nls_iso8859-13.o
+obj-$(CONFIG_NLS_ISO8859_14)	+= nls_iso8859-14.o
+obj-$(CONFIG_NLS_ISO8859_15)	+= nls_iso8859-15.o
+obj-$(CONFIG_NLS_KOI8_R)	+= nls_koi8-r.o
+obj-$(CONFIG_NLS_KOI8_U)	+= nls_koi8-u.o nls_koi8-ru.o
+obj-$(CONFIG_NLS_ABC)		+= nls_abc.o
+obj-$(CONFIG_NLS_UTF8)		+= nls_utf8.o
diff --git a/fs/nls/nls_ascii.c b/fs/nls/nls_ascii.c
new file mode 100644
index 0000000..b83381c
--- /dev/null
+++ b/fs/nls/nls_ascii.c
@@ -0,0 +1,167 @@
+/*
+ * linux/fs/nls_ascii.c
+ *
+ * Charset ascii translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00,
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "ascii",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_ascii(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_ascii(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_ascii)
+module_exit(exit_nls_ascii)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
new file mode 100644
index 0000000..8975127
--- /dev/null
+++ b/fs/nls/nls_base.c
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls_base.c
+ *
+ * Native language support--charsets and unicode translations.
+ * By Gordon Chaffee 1996, 1997
+ *
+ * Unicode based case conversion 1999 by Wolfram Pienkoss
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+#include <linux/spinlock.h>
+
+static struct nls_table default_table;
+static struct nls_table *tables = &default_table;
+static DEFINE_SPINLOCK(nls_lock);
+
+/*
+ * Sample implementation from Unicode home page.
+ * http://www.stonehand.com/unicode/standard/fss-utf.html
+ */
+struct utf8_table {
+	int     cmask;
+	int     cval;
+	int     shift;
+	long    lmask;
+	long    lval;
+};
+
+static struct utf8_table utf8_table[] =
+{
+    {0x80,  0x00,   0*6,    0x7F,           0,         /* 1 byte sequence */},
+    {0xE0,  0xC0,   1*6,    0x7FF,          0x80,      /* 2 byte sequence */},
+    {0xF0,  0xE0,   2*6,    0xFFFF,         0x800,     /* 3 byte sequence */},
+    {0xF8,  0xF0,   3*6,    0x1FFFFF,       0x10000,   /* 4 byte sequence */},
+    {0xFC,  0xF8,   4*6,    0x3FFFFFF,      0x200000,  /* 5 byte sequence */},
+    {0xFE,  0xFC,   5*6,    0x7FFFFFFF,     0x4000000, /* 6 byte sequence */},
+    {0,						       /* end of table    */}
+};
+
+int
+utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
+{
+	long l;
+	int c0, c, nc;
+	struct utf8_table *t;
+  
+	nc = 0;
+	c0 = *s;
+	l = c0;
+	for (t = utf8_table; t->cmask; t++) {
+		nc++;
+		if ((c0 & t->cmask) == t->cval) {
+			l &= t->lmask;
+			if (l < t->lval)
+				return -1;
+			*p = l;
+			return nc;
+		}
+		if (n <= nc)
+			return -1;
+		s++;
+		c = (*s ^ 0x80) & 0xFF;
+		if (c & 0xC0)
+			return -1;
+		l = (l << 6) | c;
+	}
+	return -1;
+}
+
+int
+utf8_mbstowcs(wchar_t *pwcs, const __u8 *s, int n)
+{
+	__u16 *op;
+	const __u8 *ip;
+	int size;
+
+	op = pwcs;
+	ip = s;
+	while (*ip && n > 0) {
+		if (*ip & 0x80) {
+			size = utf8_mbtowc(op, ip, n);
+			if (size == -1) {
+				/* Ignore character and move on */
+				ip++;
+				n--;
+			} else {
+				op++;
+				ip += size;
+				n -= size;
+			}
+		} else {
+			*op++ = *ip++;
+			n--;
+		}
+	}
+	return (op - pwcs);
+}
+
+int
+utf8_wctomb(__u8 *s, wchar_t wc, int maxlen)
+{
+	long l;
+	int c, nc;
+	struct utf8_table *t;
+  
+	if (s == 0)
+		return 0;
+  
+	l = wc;
+	nc = 0;
+	for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) {
+		nc++;
+		if (l <= t->lmask) {
+			c = t->shift;
+			*s = t->cval | (l >> c);
+			while (c > 0) {
+				c -= 6;
+				s++;
+				*s = 0x80 | ((l >> c) & 0x3F);
+			}
+			return nc;
+		}
+	}
+	return -1;
+}
+
+int
+utf8_wcstombs(__u8 *s, const wchar_t *pwcs, int maxlen)
+{
+	const __u16 *ip;
+	__u8 *op;
+	int size;
+
+	op = s;
+	ip = pwcs;
+	while (*ip && maxlen > 0) {
+		if (*ip > 0x7f) {
+			size = utf8_wctomb(op, *ip, maxlen);
+			if (size == -1) {
+				/* Ignore character and move on */
+				maxlen--;
+			} else {
+				op += size;
+				maxlen -= size;
+			}
+		} else {
+			*op++ = (__u8) *ip;
+		}
+		ip++;
+	}
+	return (op - s);
+}
+
+int register_nls(struct nls_table * nls)
+{
+	struct nls_table ** tmp = &tables;
+
+	if (!nls)
+		return -EINVAL;
+	if (nls->next)
+		return -EBUSY;
+
+	spin_lock(&nls_lock);
+	while (*tmp) {
+		if (nls == *tmp) {
+			spin_unlock(&nls_lock);
+			return -EBUSY;
+		}
+		tmp = &(*tmp)->next;
+	}
+	nls->next = tables;
+	tables = nls;
+	spin_unlock(&nls_lock);
+	return 0;	
+}
+
+int unregister_nls(struct nls_table * nls)
+{
+	struct nls_table ** tmp = &tables;
+
+	spin_lock(&nls_lock);
+	while (*tmp) {
+		if (nls == *tmp) {
+			*tmp = nls->next;
+			spin_unlock(&nls_lock);
+			return 0;
+		}
+		tmp = &(*tmp)->next;
+	}
+	spin_unlock(&nls_lock);
+	return -EINVAL;
+}
+
+static struct nls_table *find_nls(char *charset)
+{
+	struct nls_table *nls;
+	spin_lock(&nls_lock);
+	for (nls = tables; nls; nls = nls->next) {
+		if (!strcmp(nls->charset, charset))
+			break;
+		if (nls->alias && !strcmp(nls->alias, charset))
+			break;
+	}
+	if (nls && !try_module_get(nls->owner))
+		nls = NULL;
+	spin_unlock(&nls_lock);
+	return nls;
+}
+
+struct nls_table *load_nls(char *charset)
+{
+	struct nls_table *nls;
+#ifdef CONFIG_KMOD
+	int ret;
+#endif
+
+	nls = find_nls(charset);
+	if (nls)
+		return nls;
+
+#ifdef CONFIG_KMOD
+	ret = request_module("nls_%s", charset);
+	if (ret != 0) {
+		printk("Unable to load NLS charset %s\n", charset);
+		return NULL;
+	}
+	nls = find_nls(charset);
+#endif
+	return nls;
+}
+
+void unload_nls(struct nls_table *nls)
+{
+	module_put(nls->owner);
+}
+
+wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x00a1, 0x00a2, 0x00a3,
+	0x00a4, 0x00a5, 0x00a6, 0x00a7,
+	0x00a8, 0x00a9, 0x00aa, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x00af,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x00b4, 0x00b5, 0x00b6, 0x00b7,
+	0x00b8, 0x00b9, 0x00ba, 0x00bb,
+	0x00bc, 0x00bd, 0x00be, 0x00bf,
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x00c3,
+	0x00c4, 0x00c5, 0x00c6, 0x00c7,
+	0x00c8, 0x00c9, 0x00ca, 0x00cb,
+	0x00cc, 0x00cd, 0x00ce, 0x00cf,
+	/* 0xd0*/
+	0x00d0, 0x00d1, 0x00d2, 0x00d3,
+	0x00d4, 0x00d5, 0x00d6, 0x00d7,
+	0x00d8, 0x00d9, 0x00da, 0x00db,
+	0x00dc, 0x00dd, 0x00de, 0x00df,
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x00e3,
+	0x00e4, 0x00e5, 0x00e6, 0x00e7,
+	0x00e8, 0x00e9, 0x00ea, 0x00eb,
+	0x00ec, 0x00ed, 0x00ee, 0x00ef,
+	/* 0xf0*/
+	0x00f0, 0x00f1, 0x00f2, 0x00f3,
+	0x00f4, 0x00f5, 0x00f6, 0x00f7,
+	0x00f8, 0x00f9, 0x00fa, 0x00fb,
+	0x00fc, 0x00fd, 0x00fe, 0x00ff,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table default_table = {
+	.charset	= "default",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+};
+
+/* Returns a simple default translation table */
+struct nls_table *load_nls_default(void)
+{
+	struct nls_table *default_nls;
+	
+	default_nls = load_nls(CONFIG_NLS_DEFAULT);
+	if (default_nls != NULL)
+		return default_nls;
+	else
+		return &default_table;
+}
+
+EXPORT_SYMBOL(register_nls);
+EXPORT_SYMBOL(unregister_nls);
+EXPORT_SYMBOL(unload_nls);
+EXPORT_SYMBOL(load_nls);
+EXPORT_SYMBOL(load_nls_default);
+EXPORT_SYMBOL(utf8_mbtowc);
+EXPORT_SYMBOL(utf8_mbstowcs);
+EXPORT_SYMBOL(utf8_wctomb);
+EXPORT_SYMBOL(utf8_wcstombs);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1250.c b/fs/nls/nls_cp1250.c
new file mode 100644
index 0000000..32e78cf
--- /dev/null
+++ b/fs/nls/nls_cp1250.c
@@ -0,0 +1,347 @@
+/*
+ * linux/fs/nls_cp1250.c
+ *
+ * Charset cp1250 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003, 
+	0x0004, 0x0005, 0x0006, 0x0007, 
+	0x0008, 0x0009, 0x000a, 0x000b, 
+	0x000c, 0x000d, 0x000e, 0x000f, 
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013, 
+	0x0014, 0x0015, 0x0016, 0x0017, 
+	0x0018, 0x0019, 0x001a, 0x001b, 
+	0x001c, 0x001d, 0x001e, 0x001f, 
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023, 
+	0x0024, 0x0025, 0x0026, 0x0027, 
+	0x0028, 0x0029, 0x002a, 0x002b, 
+	0x002c, 0x002d, 0x002e, 0x002f, 
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033, 
+	0x0034, 0x0035, 0x0036, 0x0037, 
+	0x0038, 0x0039, 0x003a, 0x003b, 
+	0x003c, 0x003d, 0x003e, 0x003f, 
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043, 
+	0x0044, 0x0045, 0x0046, 0x0047, 
+	0x0048, 0x0049, 0x004a, 0x004b, 
+	0x004c, 0x004d, 0x004e, 0x004f, 
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053, 
+	0x0054, 0x0055, 0x0056, 0x0057, 
+	0x0058, 0x0059, 0x005a, 0x005b, 
+	0x005c, 0x005d, 0x005e, 0x005f, 
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063, 
+	0x0064, 0x0065, 0x0066, 0x0067, 
+	0x0068, 0x0069, 0x006a, 0x006b, 
+	0x006c, 0x006d, 0x006e, 0x006f, 
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073, 
+	0x0074, 0x0075, 0x0076, 0x0077, 
+	0x0078, 0x0079, 0x007a, 0x007b, 
+	0x007c, 0x007d, 0x007e, 0x007f, 
+	/* 0x80*/
+	0x20ac, 0x0000, 0x201a, 0x0000, 
+	0x201e, 0x2026, 0x2020, 0x2021, 
+	0x0000, 0x2030, 0x0160, 0x2039, 
+	0x015a, 0x0164, 0x017d, 0x0179, 
+	/* 0x90*/
+	0x0000, 0x2018, 0x2019, 0x201c, 
+	0x201d, 0x2022, 0x2013, 0x2014, 
+	0x0000, 0x2122, 0x0161, 0x203a, 
+	0x015b, 0x0165, 0x017e, 0x017a, 
+	/* 0xa0*/
+	0x00a0, 0x02c7, 0x02d8, 0x0141, 
+	0x00a4, 0x0104, 0x00a6, 0x00a7, 
+	0x00a8, 0x00a9, 0x015e, 0x00ab, 
+	0x00ac, 0x00ad, 0x00ae, 0x017b, 
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x02db, 0x0142, 
+	0x00b4, 0x00b5, 0x00b6, 0x00b7, 
+	0x00b8, 0x0105, 0x015f, 0x00bb, 
+	0x013d, 0x02dd, 0x013e, 0x017c, 
+	/* 0xc0*/
+	0x0154, 0x00c1, 0x00c2, 0x0102, 
+	0x00c4, 0x0139, 0x0106, 0x00c7, 
+	0x010c, 0x00c9, 0x0118, 0x00cb, 
+	0x011a, 0x00cd, 0x00ce, 0x010e, 
+	/* 0xd0*/
+	0x0110, 0x0143, 0x0147, 0x00d3, 
+	0x00d4, 0x0150, 0x00d6, 0x00d7, 
+	0x0158, 0x016e, 0x00da, 0x0170, 
+	0x00dc, 0x00dd, 0x0162, 0x00df, 
+	/* 0xe0*/
+	0x0155, 0x00e1, 0x00e2, 0x0103, 
+	0x00e4, 0x013a, 0x0107, 0x00e7, 
+	0x010d, 0x00e9, 0x0119, 0x00eb, 
+	0x011b, 0x00ed, 0x00ee, 0x010f, 
+	/* 0xf0*/
+	0x0111, 0x0144, 0x0148, 0x00f3, 
+	0x00f4, 0x0151, 0x00f6, 0x00f7, 
+	0x0159, 0x016f, 0x00fa, 0x0171, 
+	0x00fc, 0x00fd, 0x0163, 0x02d9, 
+	};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
+	0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
+	0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */
+	};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */
+	0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xc5, 0xe5, 0x00, 0x00, 0xbc, 0xbe, 0x00, /* 0x38-0x3f */
+	0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */
+	0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */
+	0xd8, 0xf8, 0x8c, 0x9c, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */
+	0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */
+	0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e, 0x00, /* 0x78-0x7f */
+
+	};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */
+	};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */
+	0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	};
+
+static unsigned char *page_uni2charset[256] = {
+	page00,	page01,	page02,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	page20,	page21,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x00, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */
+	0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xb9, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbe, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+	};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x00, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x00, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xa5, 0xaa, 0xbb, 0xbc, 0xbd, 0xbc, 0xaf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0x00, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
+	};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+        unsigned char *uni2charset;
+        unsigned char cl = uni & 0x00ff;
+        unsigned char ch = (uni & 0xff00) >> 8;
+
+        if (boundlen <= 0)
+                return -ENAMETOOLONG;
+
+        uni2charset = page_uni2charset[ch];
+        if (uni2charset && uni2charset[cl])
+                out[0] = uni2charset[cl];
+        else
+                return -EINVAL;
+        return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+        *uni = charset2uni[*rawstring];
+        if (*uni == 0x0000)
+                return -EINVAL;
+        return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp1250",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp1250(void)
+{
+        return register_nls(&table);
+}
+static void __exit exit_nls_cp1250(void)
+{
+        unregister_nls(&table);
+}
+
+module_init(init_nls_cp1250)
+module_exit(exit_nls_cp1250)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1251.c b/fs/nls/nls_cp1251.c
new file mode 100644
index 0000000..cb41c8a
--- /dev/null
+++ b/fs/nls/nls_cp1251.c
@@ -0,0 +1,302 @@
+/*
+ * linux/fs/nls_cp1251.c
+ *
+ * Charset cp1251 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003, 
+	0x0004, 0x0005, 0x0006, 0x0007, 
+	0x0008, 0x0009, 0x000a, 0x000b, 
+	0x000c, 0x000d, 0x000e, 0x000f, 
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013, 
+	0x0014, 0x0015, 0x0016, 0x0017, 
+	0x0018, 0x0019, 0x001a, 0x001b, 
+	0x001c, 0x001d, 0x001e, 0x001f, 
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023, 
+	0x0024, 0x0025, 0x0026, 0x0027, 
+	0x0028, 0x0029, 0x002a, 0x002b, 
+	0x002c, 0x002d, 0x002e, 0x002f, 
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033, 
+	0x0034, 0x0035, 0x0036, 0x0037, 
+	0x0038, 0x0039, 0x003a, 0x003b, 
+	0x003c, 0x003d, 0x003e, 0x003f, 
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043, 
+	0x0044, 0x0045, 0x0046, 0x0047, 
+	0x0048, 0x0049, 0x004a, 0x004b, 
+	0x004c, 0x004d, 0x004e, 0x004f, 
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053, 
+	0x0054, 0x0055, 0x0056, 0x0057, 
+	0x0058, 0x0059, 0x005a, 0x005b, 
+	0x005c, 0x005d, 0x005e, 0x005f, 
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063, 
+	0x0064, 0x0065, 0x0066, 0x0067, 
+	0x0068, 0x0069, 0x006a, 0x006b, 
+	0x006c, 0x006d, 0x006e, 0x006f, 
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073, 
+	0x0074, 0x0075, 0x0076, 0x0077, 
+	0x0078, 0x0079, 0x007a, 0x007b, 
+	0x007c, 0x007d, 0x007e, 0x007f, 
+	/* 0x80*/
+	0x0402, 0x0403, 0x201a, 0x0453, 
+	0x201e, 0x2026, 0x2020, 0x2021, 
+	0x20ac, 0x2030, 0x0409, 0x2039, 
+	0x040a, 0x040c, 0x040b, 0x040f, 
+	/* 0x90*/
+	0x0452, 0x2018, 0x2019, 0x201c, 
+	0x201d, 0x2022, 0x2013, 0x2014, 
+	0x0000, 0x2122, 0x0459, 0x203a, 
+	0x045a, 0x045c, 0x045b, 0x045f, 
+	/* 0xa0*/
+	0x00a0, 0x040e, 0x045e, 0x0408, 
+	0x00a4, 0x0490, 0x00a6, 0x00a7, 
+	0x0401, 0x00a9, 0x0404, 0x00ab, 
+	0x00ac, 0x00ad, 0x00ae, 0x0407, 
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x0406, 0x0456, 
+	0x0491, 0x00b5, 0x00b6, 0x00b7, 
+	0x0451, 0x2116, 0x0454, 0x00bb, 
+	0x0458, 0x0405, 0x0455, 0x0457, 
+	/* 0xc0*/
+	0x0410, 0x0411, 0x0412, 0x0413, 
+	0x0414, 0x0415, 0x0416, 0x0417, 
+	0x0418, 0x0419, 0x041a, 0x041b, 
+	0x041c, 0x041d, 0x041e, 0x041f, 
+	/* 0xd0*/
+	0x0420, 0x0421, 0x0422, 0x0423, 
+	0x0424, 0x0425, 0x0426, 0x0427, 
+	0x0428, 0x0429, 0x042a, 0x042b, 
+	0x042c, 0x042d, 0x042e, 0x042f, 
+	/* 0xe0*/
+	0x0430, 0x0431, 0x0432, 0x0433, 
+	0x0434, 0x0435, 0x0436, 0x0437, 
+	0x0438, 0x0439, 0x043a, 0x043b, 
+	0x043c, 0x043d, 0x043e, 0x043f, 
+	/* 0xf0*/
+	0x0440, 0x0441, 0x0442, 0x0443, 
+	0x0444, 0x0445, 0x0446, 0x0447, 
+	0x0448, 0x0449, 0x044a, 0x044b, 
+	0x044c, 0x044d, 0x044e, 0x044f, 
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, /* 0x00-0x07 */
+	0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x00, 0xa1, 0x8f, /* 0x08-0x0f */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x10-0x17 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x18-0x1f */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x20-0x27 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x28-0x2f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0x48-0x4f */
+	0x00, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, /* 0x50-0x57 */
+	0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x00, 0xa2, 0x9f, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xa5, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */
+	0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, page21, NULL,	NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+	0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp1251",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp1251(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp1251(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp1251)
+module_exit(exit_nls_cp1251)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1255.c b/fs/nls/nls_cp1255.c
new file mode 100644
index 0000000..efdeefe
--- /dev/null
+++ b/fs/nls/nls_cp1255.c
@@ -0,0 +1,385 @@
+/*
+ * linux/fs/nls_cp1255.c
+ *
+ * Charset cp1255 translation tables.
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x20ac, 0x0000, 0x201a, 0x0192,
+	0x201e, 0x2026, 0x2020, 0x2021,
+	0x02c6, 0x2030, 0x0000, 0x2039,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	/* 0x90*/
+	0x0000, 0x2018, 0x2019, 0x201c,
+	0x201d, 0x2022, 0x2013, 0x2014,
+	0x02dc, 0x2122, 0x0000, 0x203a,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	/* 0xa0*/
+	0x00a0, 0x00a1, 0x00a2, 0x00a3,
+	0x20aa, 0x00a5, 0x00a6, 0x00a7,
+	0x00a8, 0x00a9, 0x00d7, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x203e,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x00b4, 0x00b5, 0x00b6, 0x00b7,
+	0x00b8, 0x00b9, 0x00f7, 0x00bb,
+	0x00bc, 0x00bd, 0x00be, 0x00bf,
+	/* 0xc0*/
+	0x05b0, 0x05b1, 0x05b2, 0x05b3,
+	0x05b4, 0x05b5, 0x05b6, 0x05b7,
+	0x05b8, 0x05b9, 0x0000, 0x05bb,
+	0x05bc, 0x05bd, 0x05be, 0x05bf,
+	/* 0xd0*/
+	0x05c0, 0x05c1, 0x05c2, 0x05c3,
+	0x05f0, 0x05f1, 0x05f2, 0x05f3,
+	0x05f4, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x2017,
+	/* 0xe0*/
+	0x05d0, 0x05d1, 0x05d2, 0x05d3,
+	0x05d4, 0x05d5, 0x05d6, 0x05d7,
+	0x05d8, 0x05d9, 0x05da, 0x05db,
+	0x05dc, 0x05dd, 0x05de, 0x05df,
+	/* 0xf0*/
+	0x05e0, 0x05e1, 0x05e2, 0x05e3,
+	0x05e4, 0x05e5, 0x05e6, 0x05e7,
+	0x05e8, 0x05e9, 0x05ea, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0xa2, 0xa3, 0x00, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0x00, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, /* 0xf0-0xf7 */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+};
+
+static unsigned char page05[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xb0-0xb7 */
+	0xc8, 0xc9, 0x00, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xb8-0xbf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xd0-0xd7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xd8-0xdf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xe0-0xe7 */
+	0xf8, 0xf9, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0xfe, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */
+	0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0xa4, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   page05, NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, page21, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0x00, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp1255",
+	.alias		= "iso8859-8",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp1255(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp1255(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp1255)
+module_exit(exit_nls_cp1255)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(iso8859-8);
diff --git a/fs/nls/nls_cp437.c b/fs/nls/nls_cp437.c
new file mode 100644
index 0000000..5c4a1cd
--- /dev/null
+++ b/fs/nls/nls_cp437.c
@@ -0,0 +1,388 @@
+/*
+ * linux/fs/nls_cp437.c
+ *
+ * Charset cp437 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x00e0, 0x00e5, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00ef,
+	0x00ee, 0x00ec, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x00f4,
+	0x00f6, 0x00f2, 0x00fb, 0x00f9,
+	0x00ff, 0x00d6, 0x00dc, 0x00a2,
+	0x00a3, 0x00a5, 0x20a7, 0x0192,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x00aa, 0x00ba,
+	0x00bf, 0x2310, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
+	0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
+	0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x00, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp437",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp437(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp437(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp437)
+module_exit(exit_nls_cp437)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp737.c b/fs/nls/nls_cp737.c
new file mode 100644
index 0000000..e8b3ca8
--- /dev/null
+++ b/fs/nls/nls_cp737.c
@@ -0,0 +1,351 @@
+/*
+ * linux/fs/nls_cp737.c
+ *
+ * Charset cp737 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0391, 0x0392, 0x0393, 0x0394,
+	0x0395, 0x0396, 0x0397, 0x0398,
+	0x0399, 0x039a, 0x039b, 0x039c,
+	0x039d, 0x039e, 0x039f, 0x03a0,
+	/* 0x90*/
+	0x03a1, 0x03a3, 0x03a4, 0x03a5,
+	0x03a6, 0x03a7, 0x03a8, 0x03a9,
+	0x03b1, 0x03b2, 0x03b3, 0x03b4,
+	0x03b5, 0x03b6, 0x03b7, 0x03b8,
+	/* 0xa0*/
+	0x03b9, 0x03ba, 0x03bb, 0x03bc,
+	0x03bd, 0x03be, 0x03bf, 0x03c0,
+	0x03c1, 0x03c3, 0x03c2, 0x03c4,
+	0x03c5, 0x03c6, 0x03c7, 0x03c8,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03c9, 0x03ac, 0x03ad, 0x03ae,
+	0x03ca, 0x03af, 0x03cc, 0x03cd,
+	0x03cb, 0x03ce, 0x0386, 0x0388,
+	0x0389, 0x038a, 0x038c, 0x038e,
+	/* 0xf0*/
+	0x038f, 0x00b1, 0x2265, 0x2264,
+	0x03aa, 0x03ab, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x00, /* 0x80-0x87 */
+	0xeb, 0xec, 0xed, 0x00, 0xee, 0x00, 0xef, 0xf0, /* 0x88-0x8f */
+	0x00, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, /* 0x90-0x97 */
+	0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, /* 0x98-0x9f */
+	0x8f, 0x90, 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, /* 0xa0-0xa7 */
+	0x96, 0x97, 0xf4, 0xf5, 0xe1, 0xe2, 0xe3, 0xe5, /* 0xa8-0xaf */
+	0x00, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, /* 0xb0-0xb7 */
+	0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, /* 0xb8-0xbf */
+	0xa7, 0xa8, 0xaa, 0xa9, 0xab, 0xac, 0xad, 0xae, /* 0xc0-0xc7 */
+	0xaf, 0xe0, 0xe4, 0xe8, 0xe6, 0xe7, 0xe9, 0x00, /* 0xc8-0xcf */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x80-0x87 */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x88-0x8f */
+	0xa8, 0xa9, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xe0, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xe1, 0xe2, 0xe3, 0xe5, 0xe6, 0xe7, /* 0xe8-0xef */
+	0xe9, 0xf1, 0xf2, 0xf3, 0xe4, 0xe8, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x98-0x9f */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa0-0xa7 */
+	0x90, 0x91, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x97, 0xea, 0xeb, 0xec, 0xf4, 0xed, 0xee, 0xef, /* 0xe0-0xe7 */
+	0xf5, 0xf0, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp737",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp737(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp737(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp737)
+module_exit(exit_nls_cp737)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp775.c b/fs/nls/nls_cp775.c
new file mode 100644
index 0000000..bdb290e
--- /dev/null
+++ b/fs/nls/nls_cp775.c
@@ -0,0 +1,320 @@
+/*
+ * linux/fs/nls_cp775.c
+ *
+ * Charset cp775 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0106, 0x00fc, 0x00e9, 0x0101,
+	0x00e4, 0x0123, 0x00e5, 0x0107,
+	0x0142, 0x0113, 0x0156, 0x0157,
+	0x012b, 0x0179, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x014d,
+	0x00f6, 0x0122, 0x00a2, 0x015a,
+	0x015b, 0x00d6, 0x00dc, 0x00f8,
+	0x00a3, 0x00d8, 0x00d7, 0x00a4,
+	/* 0xa0*/
+	0x0100, 0x012a, 0x00f3, 0x017b,
+	0x017c, 0x017a, 0x201d, 0x00a6,
+	0x00a9, 0x00ae, 0x00ac, 0x00bd,
+	0x00bc, 0x0141, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x0104, 0x010c, 0x0118,
+	0x0116, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x012e, 0x0160, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x0172, 0x016a,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x017d,
+	/* 0xd0*/
+	0x0105, 0x010d, 0x0119, 0x0117,
+	0x012f, 0x0161, 0x0173, 0x016b,
+	0x017e, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x00d3, 0x00df, 0x014c, 0x0143,
+	0x00f5, 0x00d5, 0x00b5, 0x0144,
+	0x0136, 0x0137, 0x013b, 0x013c,
+	0x0146, 0x0112, 0x0145, 0x2019,
+	/* 0xf0*/
+	0x00ad, 0x00b1, 0x201c, 0x00be,
+	0x00b6, 0x00a7, 0x00f7, 0x201e,
+	0x00b0, 0x2219, 0x00b7, 0x00b9,
+	0x00b3, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x96, 0x9c, 0x9f, 0x00, 0xa7, 0xf5, /* 0xa0-0xa7 */
+	0x00, 0xa8, 0x00, 0xae, 0xaa, 0xf0, 0xa9, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0xfc, 0x00, 0xe6, 0xf4, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0xfb, 0x00, 0xaf, 0xac, 0xab, 0xf3, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xe0, 0x00, 0xe5, 0x99, 0x9e, /* 0xd0-0xd7 */
+	0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x84, 0x86, 0x91, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xa2, 0x00, 0xe4, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x9b, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0xa0, 0x83, 0x00, 0x00, 0xb5, 0xd0, 0x80, 0x87, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xb6, 0xd1, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0xed, 0x89, 0x00, 0x00, 0xb8, 0xd3, /* 0x10-0x17 */
+	0xb7, 0xd2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x95, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0xa1, 0x8c, 0x00, 0x00, 0xbd, 0xd4, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xe9, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0xea, 0xeb, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0xad, 0x88, 0xe3, 0xe7, 0xee, 0xec, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x8b, /* 0x50-0x57 */
+	0x00, 0x00, 0x97, 0x98, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xbe, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xc7, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x8d, 0xa5, 0xa3, 0xa4, 0xcf, 0xd8, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xef, 0x00, 0x00, 0xf2, 0xa6, 0xf7, 0x00, /* 0x18-0x1f */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8b, 0x8b, 0x8c, 0xa5, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x85, 0x96, 0x98, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */
+	0x83, 0x8c, 0xa2, 0xa4, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x88, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xd0, 0xd1, 0xd2, /* 0xb0-0xb7 */
+	0xd3, 0xb9, 0xba, 0xbb, 0xbc, 0xd4, 0xd5, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xd6, 0xd7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xd8, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xa2, 0xe1, 0x93, 0xe7, 0xe4, 0xe4, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe9, 0xe9, 0xeb, 0xeb, 0xec, 0x89, 0xec, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0xa0, 0x8e, 0x95, 0x8f, 0x80, /* 0x80-0x87 */
+	0xad, 0xed, 0x8a, 0x8a, 0xa1, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0xe2, 0x99, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x97, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xe0, 0xa3, 0xa3, 0x8d, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xb5, 0xb6, 0xb7, 0xb8, 0xbd, 0xbe, 0xc6, 0xc7, /* 0xd0-0xd7 */
+	0xcf, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe5, 0xe5, 0x00, 0xe3, /* 0xe0-0xe7 */
+	0xe8, 0xe8, 0xea, 0xea, 0xee, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp775",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp775(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp775(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp775)
+module_exit(exit_nls_cp775)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp850.c b/fs/nls/nls_cp850.c
new file mode 100644
index 0000000..25deaa4
--- /dev/null
+++ b/fs/nls/nls_cp850.c
@@ -0,0 +1,316 @@
+/*
+ * linux/fs/nls_cp850.c
+ *
+ * Charset cp850 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x00e0, 0x00e5, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00ef,
+	0x00ee, 0x00ec, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x00f4,
+	0x00f6, 0x00f2, 0x00fb, 0x00f9,
+	0x00ff, 0x00d6, 0x00dc, 0x00f8,
+	0x00a3, 0x00d8, 0x00d7, 0x0192,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x00aa, 0x00ba,
+	0x00bf, 0x00ae, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x00c1, 0x00c2, 0x00c0,
+	0x00a9, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x00a2, 0x00a5, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x00e3, 0x00c3,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x00a4,
+	/* 0xd0*/
+	0x00f0, 0x00d0, 0x00ca, 0x00cb,
+	0x00c8, 0x0131, 0x00cd, 0x00ce,
+	0x00cf, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x00a6, 0x00cc, 0x2580,
+	/* 0xe0*/
+	0x00d3, 0x00df, 0x00d4, 0x00d2,
+	0x00f5, 0x00d5, 0x00b5, 0x00fe,
+	0x00de, 0x00da, 0x00db, 0x00d9,
+	0x00fd, 0x00dd, 0x00af, 0x00b4,
+	/* 0xf0*/
+	0x00ad, 0x00b1, 0x2017, 0x00be,
+	0x00b6, 0x00a7, 0x00f7, 0x00b8,
+	0x00b0, 0x00a8, 0x00b7, 0x00b9,
+	0x00b3, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, /* 0xa0-0xa7 */
+	0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, /* 0xb0-0xb7 */
+	0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, /* 0xb8-0xbf */
+	0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
+	0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, /* 0xc8-0xcf */
+	0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, /* 0xd0-0xd7 */
+	0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
+	0xd0, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x9b, 0x97, 0xa3, 0x96, 0x81, 0xec, 0xe7, 0x98, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, /* 0x10-0x17 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   NULL,   NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0x85, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd0, 0x88, 0x89, 0x8a, 0xd5, 0xa1, 0x8c, /* 0xd0-0xd7 */
+	0x8b, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0x8d, 0xdf, /* 0xd8-0xdf */
+	0xa2, 0xe1, 0x93, 0x95, 0xe4, 0xe4, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe7, 0xa3, 0x96, 0x97, 0xec, 0xec, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xb7, 0x8f, 0x80, /* 0x80-0x87 */
+	0xd2, 0xd3, 0xd4, 0xd8, 0xd7, 0xde, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0xe2, 0x99, 0xe3, 0xea, 0xeb, /* 0x90-0x97 */
+	0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0xb5, 0xd6, 0xe0, 0xe9, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd1, 0xd1, 0xd2, 0xd3, 0xd4, 0x49, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe5, 0xe5, 0x00, 0xe8, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xed, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp850",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp850(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp850(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp850)
+module_exit(exit_nls_cp850)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp852.c b/fs/nls/nls_cp852.c
new file mode 100644
index 0000000..b822a7b
--- /dev/null
+++ b/fs/nls/nls_cp852.c
@@ -0,0 +1,338 @@
+/*
+ * linux/fs/nls_cp852.c
+ *
+ * Charset cp852 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x016f, 0x0107, 0x00e7,
+	0x0142, 0x00eb, 0x0150, 0x0151,
+	0x00ee, 0x0179, 0x00c4, 0x0106,
+	/* 0x90*/
+	0x00c9, 0x0139, 0x013a, 0x00f4,
+	0x00f6, 0x013d, 0x013e, 0x015a,
+	0x015b, 0x00d6, 0x00dc, 0x0164,
+	0x0165, 0x0141, 0x00d7, 0x010d,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x0104, 0x0105, 0x017d, 0x017e,
+	0x0118, 0x0119, 0x00ac, 0x017a,
+	0x010c, 0x015f, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x00c1, 0x00c2, 0x011a,
+	0x015e, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x017b, 0x017c, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x0102, 0x0103,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x00a4,
+	/* 0xd0*/
+	0x0111, 0x0110, 0x010e, 0x00cb,
+	0x010f, 0x0147, 0x00cd, 0x00ce,
+	0x011b, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x0162, 0x016e, 0x2580,
+	/* 0xe0*/
+	0x00d3, 0x00df, 0x00d4, 0x0143,
+	0x0144, 0x0148, 0x0160, 0x0161,
+	0x0154, 0x00da, 0x0155, 0x0170,
+	0x00fd, 0x00dd, 0x0163, 0x00b4,
+	/* 0xf0*/
+	0x00ad, 0x02dd, 0x02db, 0x02c7,
+	0x02d8, 0x00a7, 0x00f7, 0x00b8,
+	0x00b0, 0x00a8, 0x02d9, 0x0171,
+	0x0158, 0x0159, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0xf5, /* 0xa0-0xa7 */
+	0xf9, 0x00, 0x00, 0xae, 0xaa, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0x00, 0x00, 0x00, 0xef, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0xf7, 0x00, 0x00, 0xaf, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0xb5, 0xb6, 0x00, 0x8e, 0x00, 0x00, 0x80, /* 0xc0-0xc7 */
+	0x00, 0x90, 0x00, 0xd3, 0x00, 0xd6, 0xd7, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xe0, 0xe2, 0x00, 0x99, 0x9e, /* 0xd0-0xd7 */
+	0x00, 0x00, 0xe9, 0x00, 0x9a, 0xed, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x00, 0xa0, 0x83, 0x00, 0x84, 0x00, 0x00, 0x87, /* 0xe0-0xe7 */
+	0x00, 0x82, 0x00, 0x89, 0x00, 0xa1, 0x8c, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x00, 0x00, 0xa3, 0x00, 0x81, 0xec, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0xc6, 0xc7, 0xa4, 0xa5, 0x8f, 0x86, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xac, 0x9f, 0xd2, 0xd4, /* 0x08-0x0f */
+	0xd1, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xa8, 0xa9, 0xb7, 0xd8, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x91, 0x92, 0x00, 0x00, 0x95, 0x96, 0x00, /* 0x38-0x3f */
+	0x00, 0x9d, 0x88, 0xe3, 0xe4, 0x00, 0x00, 0xd5, /* 0x40-0x47 */
+	0xe5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x8a, 0x8b, 0x00, 0x00, 0xe8, 0xea, 0x00, 0x00, /* 0x50-0x57 */
+	0xfc, 0xfd, 0x97, 0x98, 0x00, 0x00, 0xb8, 0xad, /* 0x58-0x5f */
+	0xe6, 0xe7, 0xdd, 0xee, 0x9b, 0x9c, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x85, /* 0x68-0x6f */
+	0xeb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x8d, 0xab, 0xbd, 0xbe, 0xa6, 0xa7, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xf4, 0xfa, 0x00, 0xf2, 0x00, 0xf1, 0x00, 0x00, /* 0xd8-0xdf */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8b, 0x8b, 0x8c, 0xab, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x92, 0x92, 0x93, 0x94, 0x96, 0x96, 0x98, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9c, 0x9c, 0x88, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa5, 0xa5, 0xa7, 0xa7, /* 0xa0-0xa7 */
+	0xa9, 0xa9, 0xaa, 0xab, 0x9f, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0xd8, /* 0xb0-0xb7 */
+	0xad, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd0, 0xd4, 0x89, 0xd4, 0xe5, 0xa1, 0x8c, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xee, 0x85, 0xdf, /* 0xd8-0xdf */
+	0xa2, 0xe1, 0x93, 0xe4, 0xe4, 0xe5, 0xe7, 0xe7, /* 0xe0-0xe7 */
+	0xea, 0xa3, 0xea, 0xfb, 0xec, 0xec, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfd, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xde, 0x8f, 0x80, /* 0x80-0x87 */
+	0x9d, 0xd3, 0x8a, 0x8a, 0xd7, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x91, 0xe2, 0x99, 0x95, 0x95, 0x97, /* 0x90-0x97 */
+	0x97, 0x99, 0x9a, 0x9b, 0x9b, 0x9d, 0x9e, 0xac, /* 0x98-0x9f */
+	0xb5, 0xd6, 0xe0, 0xe9, 0xa4, 0xa4, 0xa6, 0xa6, /* 0xa0-0xa7 */
+	0xa8, 0xa8, 0xaa, 0x8d, 0xac, 0xb8, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd1, 0xd1, 0xd2, 0xd3, 0xd2, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xb7, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe3, 0xd5, 0xe6, 0xe6, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xe8, 0xeb, 0xed, 0xed, 0xdd, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xeb, 0xfc, 0xfc, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp852",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp852(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp852(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp852)
+module_exit(exit_nls_cp852)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp855.c b/fs/nls/nls_cp855.c
new file mode 100644
index 0000000..e8641b7
--- /dev/null
+++ b/fs/nls/nls_cp855.c
@@ -0,0 +1,300 @@
+/*
+ * linux/fs/nls_cp855.c
+ *
+ * Charset cp855 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0452, 0x0402, 0x0453, 0x0403,
+	0x0451, 0x0401, 0x0454, 0x0404,
+	0x0455, 0x0405, 0x0456, 0x0406,
+	0x0457, 0x0407, 0x0458, 0x0408,
+	/* 0x90*/
+	0x0459, 0x0409, 0x045a, 0x040a,
+	0x045b, 0x040b, 0x045c, 0x040c,
+	0x045e, 0x040e, 0x045f, 0x040f,
+	0x044e, 0x042e, 0x044a, 0x042a,
+	/* 0xa0*/
+	0x0430, 0x0410, 0x0431, 0x0411,
+	0x0446, 0x0426, 0x0434, 0x0414,
+	0x0435, 0x0415, 0x0444, 0x0424,
+	0x0433, 0x0413, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x0445, 0x0425, 0x0438,
+	0x0418, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x0439, 0x0419, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x043a, 0x041a,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x00a4,
+	/* 0xd0*/
+	0x043b, 0x041b, 0x043c, 0x041c,
+	0x043d, 0x041d, 0x043e, 0x041e,
+	0x043f, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x041f, 0x044f, 0x2580,
+	/* 0xe0*/
+	0x042f, 0x0440, 0x0420, 0x0441,
+	0x0421, 0x0442, 0x0422, 0x0443,
+	0x0423, 0x0436, 0x0416, 0x0432,
+	0x0412, 0x044c, 0x042c, 0x2116,
+	/* 0xf0*/
+	0x00ad, 0x044b, 0x042b, 0x0437,
+	0x0417, 0x0448, 0x0428, 0x044d,
+	0x042d, 0x0449, 0x0429, 0x0447,
+	0x0427, 0x00a7, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0xae, 0x00, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0x85, 0x81, 0x83, 0x87, 0x89, 0x8b, 0x8d, /* 0x00-0x07 */
+	0x8f, 0x91, 0x93, 0x95, 0x97, 0x00, 0x99, 0x9b, /* 0x08-0x0f */
+	0xa1, 0xa3, 0xec, 0xad, 0xa7, 0xa9, 0xea, 0xf4, /* 0x10-0x17 */
+	0xb8, 0xbe, 0xc7, 0xd1, 0xd3, 0xd5, 0xd7, 0xdd, /* 0x18-0x1f */
+	0xe2, 0xe4, 0xe6, 0xe8, 0xab, 0xb6, 0xa5, 0xfc, /* 0x20-0x27 */
+	0xf6, 0xfa, 0x9f, 0xf2, 0xee, 0xf8, 0x9d, 0xe0, /* 0x28-0x2f */
+	0xa0, 0xa2, 0xeb, 0xac, 0xa6, 0xa8, 0xe9, 0xf3, /* 0x30-0x37 */
+	0xb7, 0xbd, 0xc6, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, /* 0x38-0x3f */
+	0xe1, 0xe3, 0xe5, 0xe7, 0xaa, 0xb5, 0xa4, 0xfb, /* 0x40-0x47 */
+	0xf5, 0xf9, 0x9e, 0xf1, 0xed, 0xf7, 0x9c, 0xde, /* 0x48-0x4f */
+	0x00, 0x84, 0x80, 0x82, 0x86, 0x88, 0x8a, 0x8c, /* 0x50-0x57 */
+	0x8e, 0x90, 0x92, 0x94, 0x96, 0x00, 0x98, 0x9a, /* 0x58-0x5f */
+};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, /* 0x10-0x17 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   page21, NULL,   NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x80, 0x82, 0x82, 0x84, 0x84, 0x86, 0x86, /* 0x80-0x87 */
+	0x88, 0x88, 0x8a, 0x8a, 0x8c, 0x8c, 0x8e, 0x8e, /* 0x88-0x8f */
+	0x90, 0x90, 0x92, 0x92, 0x94, 0x94, 0x96, 0x96, /* 0x90-0x97 */
+	0x98, 0x98, 0x9a, 0x9a, 0x9c, 0x9c, 0x9e, 0x9e, /* 0x98-0x9f */
+	0xa0, 0xa0, 0xa2, 0xa2, 0xa4, 0xa4, 0xa6, 0xa6, /* 0xa0-0xa7 */
+	0xa8, 0xa8, 0xaa, 0xaa, 0xac, 0xac, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb5, 0xb7, /* 0xb0-0xb7 */
+	0xb7, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd0, 0xd2, 0xd2, 0xd4, 0xd4, 0xd6, 0xd6, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xd8, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xde, 0xe1, 0xe1, 0xe3, 0xe3, 0xe5, 0xe5, 0xe7, /* 0xe0-0xe7 */
+	0xe7, 0xe9, 0xe9, 0xeb, 0xeb, 0xed, 0xed, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, /* 0xf0-0xf7 */
+	0xf7, 0xf9, 0xf9, 0xfb, 0xfb, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x81, 0x81, 0x83, 0x83, 0x85, 0x85, 0x87, 0x87, /* 0x80-0x87 */
+	0x89, 0x89, 0x8b, 0x8b, 0x8d, 0x8d, 0x8f, 0x8f, /* 0x88-0x8f */
+	0x91, 0x91, 0x93, 0x93, 0x95, 0x95, 0x97, 0x97, /* 0x90-0x97 */
+	0x99, 0x99, 0x9b, 0x9b, 0x9d, 0x9d, 0x9f, 0x9f, /* 0x98-0x9f */
+	0xa1, 0xa1, 0xa3, 0xa3, 0xa5, 0xa5, 0xa7, 0xa7, /* 0xa0-0xa7 */
+	0xa9, 0xa9, 0xab, 0xab, 0xad, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb6, 0xb6, 0xb8, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd1, 0xd1, 0xd3, 0xd3, 0xd5, 0xd5, 0xd7, 0xd7, /* 0xd0-0xd7 */
+	0xdd, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xe0, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe2, 0xe2, 0xe4, 0xe4, 0xe6, 0xe6, 0xe8, /* 0xe0-0xe7 */
+	0xe8, 0xea, 0xea, 0xec, 0xec, 0xee, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, /* 0xf0-0xf7 */
+	0xf8, 0xfa, 0xfa, 0xfc, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp855",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp855(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp855(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp855)
+module_exit(exit_nls_cp855)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp857.c b/fs/nls/nls_cp857.c
new file mode 100644
index 0000000..7ba589e
--- /dev/null
+++ b/fs/nls/nls_cp857.c
@@ -0,0 +1,302 @@
+/*
+ * linux/fs/nls_cp857.c
+ *
+ * Charset cp857 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x00e0, 0x00e5, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00ef,
+	0x00ee, 0x0131, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x00f4,
+	0x00f6, 0x00f2, 0x00fb, 0x00f9,
+	0x0130, 0x00d6, 0x00dc, 0x00f8,
+	0x00a3, 0x00d8, 0x015e, 0x015f,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x011e, 0x011f,
+	0x00bf, 0x00ae, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x00c1, 0x00c2, 0x00c0,
+	0x00a9, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x00a2, 0x00a5, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x00e3, 0x00c3,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x00a4,
+	/* 0xd0*/
+	0x00ba, 0x00aa, 0x00ca, 0x00cb,
+	0x00c8, 0x0000, 0x00cd, 0x00ce,
+	0x00cf, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x00a6, 0x00cc, 0x2580,
+	/* 0xe0*/
+	0x00d3, 0x00df, 0x00d4, 0x00d2,
+	0x00f5, 0x00d5, 0x00b5, 0x0000,
+	0x00d7, 0x00da, 0x00db, 0x00d9,
+	0x00ec, 0x00ff, 0x00af, 0x00b4,
+	/* 0xf0*/
+	0x00ad, 0x00b1, 0x0000, 0x00be,
+	0x00b6, 0x00a7, 0x00f7, 0x00b8,
+	0x00b0, 0x00a8, 0x00b7, 0x00b9,
+	0x00b3, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, /* 0xa0-0xa7 */
+	0xf9, 0xb8, 0xd1, 0xae, 0xaa, 0xf0, 0xa9, 0xee, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, /* 0xb0-0xb7 */
+	0xf7, 0xfb, 0xd0, 0xaf, 0xac, 0xab, 0xf3, 0xa8, /* 0xb8-0xbf */
+	0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
+	0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, /* 0xc8-0xcf */
+	0x00, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0xe8, /* 0xd0-0xd7 */
+	0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0xec, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
+	0x00, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0xed, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0xa7, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x98, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x9f, /* 0x58-0x5f */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x69, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9f, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa7, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0x85, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0x88, 0x89, 0x8a, 0x00, 0xa1, 0x8c, /* 0xd0-0xd7 */
+	0x8b, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xec, 0xdf, /* 0xd8-0xdf */
+	0xa2, 0xe1, 0x93, 0x95, 0xe4, 0xe4, 0xe6, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xa3, 0x96, 0x97, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xb7, 0x8f, 0x80, /* 0x80-0x87 */
+	0xd2, 0xd3, 0xd4, 0xd8, 0xd7, 0x49, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0xe2, 0x99, 0xe3, 0xea, 0xeb, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x9e, /* 0x98-0x9f */
+	0xb5, 0xd6, 0xe0, 0xe9, 0xa5, 0xa5, 0xa6, 0xa6, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe5, 0xe5, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xde, 0x00, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp857",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp857(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp857(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp857)
+module_exit(exit_nls_cp857)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp860.c b/fs/nls/nls_cp860.c
new file mode 100644
index 0000000..3b9e49c
--- /dev/null
+++ b/fs/nls/nls_cp860.c
@@ -0,0 +1,365 @@
+/*
+ * linux/fs/nls_cp860.c
+ *
+ * Charset cp860 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e3, 0x00e0, 0x00c1, 0x00e7,
+	0x00ea, 0x00ca, 0x00e8, 0x00cd,
+	0x00d4, 0x00ec, 0x00c3, 0x00c2,
+	/* 0x90*/
+	0x00c9, 0x00c0, 0x00c8, 0x00f4,
+	0x00f5, 0x00f2, 0x00da, 0x00f9,
+	0x00cc, 0x00d5, 0x00dc, 0x00a2,
+	0x00a3, 0x00d9, 0x20a7, 0x00d3,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x00aa, 0x00ba,
+	0x00bf, 0x00d2, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0x9b, 0x9c, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
+	0x91, 0x86, 0x8f, 0x8e, 0x00, 0x00, 0x00, 0x80, /* 0xc0-0xc7 */
+	0x92, 0x90, 0x89, 0x00, 0x98, 0x8b, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0xa5, 0xa9, 0x9f, 0x8c, 0x99, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x9d, 0x96, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0x84, 0x00, 0x00, 0x00, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x00, 0x8d, 0xa1, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0xa4, 0x95, 0xa2, 0x93, 0x94, 0x00, 0xf6, /* 0xf0-0xf7 */
+	0x00, 0x97, 0xa3, 0x00, 0x81, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0xa0, 0x87, /* 0x80-0x87 */
+	0x88, 0x88, 0x8a, 0xa1, 0x93, 0x8d, 0x84, 0x83, /* 0x88-0x8f */
+	0x82, 0x85, 0x8a, 0x93, 0x94, 0x95, 0xa3, 0x97, /* 0x90-0x97 */
+	0x8d, 0x94, 0x81, 0x9b, 0x9c, 0x97, 0x9e, 0xa2, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0x95, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0x8f, 0x8e, 0x91, 0x86, 0x80, /* 0x80-0x87 */
+	0x89, 0x89, 0x92, 0x8b, 0x8c, 0x98, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x8c, 0x99, 0xa9, 0x96, 0x9d, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0x86, 0x8b, 0x9f, 0x96, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp860",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp860(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp860(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp860)
+module_exit(exit_nls_cp860)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp861.c b/fs/nls/nls_cp861.c
new file mode 100644
index 0000000..959ff64e
--- /dev/null
+++ b/fs/nls/nls_cp861.c
@@ -0,0 +1,388 @@
+/*
+ * linux/fs/nls_cp861.c
+ *
+ * Charset cp861 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x00e0, 0x00e5, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00d0,
+	0x00f0, 0x00de, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x00f4,
+	0x00f6, 0x00fe, 0x00fb, 0x00dd,
+	0x00fd, 0x00d6, 0x00dc, 0x00f8,
+	0x00a3, 0x00d8, 0x20a7, 0x0192,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00c1, 0x00cd, 0x00d3, 0x00da,
+	0x00bf, 0x2310, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
+	0x00, 0xa4, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
+	0x00, 0x90, 0x00, 0x00, 0x00, 0xa5, 0x00, 0x00, /* 0xc8-0xcf */
+	0x8b, 0x00, 0x00, 0xa6, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */
+	0x9d, 0x00, 0xa7, 0x00, 0x9a, 0x97, 0x8d, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */
+	0x8c, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x9b, 0x00, 0xa3, 0x96, 0x81, 0x98, 0x95, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8c, 0x8c, 0x95, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x98, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa0, 0xa1, 0xa2, 0xa3, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x8b, 0x8b, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0x00, 0x99, 0x8d, 0x00, 0x97, /* 0x90-0x97 */
+	0x97, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0xa4, 0xa5, 0xa6, 0xa7, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp861",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp861(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp861(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp861)
+module_exit(exit_nls_cp861)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp862.c b/fs/nls/nls_cp862.c
new file mode 100644
index 0000000..b96928f
--- /dev/null
+++ b/fs/nls/nls_cp862.c
@@ -0,0 +1,422 @@
+/*
+ * linux/fs/nls_cp862.c
+ *
+ * Charset cp862 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x05d0, 0x05d1, 0x05d2, 0x05d3,
+	0x05d4, 0x05d5, 0x05d6, 0x05d7,
+	0x05d8, 0x05d9, 0x05da, 0x05db,
+	0x05dc, 0x05dd, 0x05de, 0x05df,
+	/* 0x90*/
+	0x05e0, 0x05e1, 0x05e2, 0x05e3,
+	0x05e4, 0x05e5, 0x05e6, 0x05e7,
+	0x05e8, 0x05e9, 0x05ea, 0x00a2,
+	0x00a3, 0x00a5, 0x20a7, 0x0192,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x00aa, 0x00ba,
+	0x00bf, 0x2310, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0xa4, 0x00, 0xa2, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */
+	0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page05[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xd0-0xd7 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xd8-0xdf */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */
+	0x98, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   page05, NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp862",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp862(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp862(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp862)
+module_exit(exit_nls_cp862)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp863.c b/fs/nls/nls_cp863.c
new file mode 100644
index 0000000..baa6e0e
--- /dev/null
+++ b/fs/nls/nls_cp863.c
@@ -0,0 +1,382 @@
+/*
+ * linux/fs/nls_cp863.c
+ *
+ * Charset cp863 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00c2, 0x00e0, 0x00b6, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00ef,
+	0x00ee, 0x2017, 0x00c0, 0x00a7,
+	/* 0x90*/
+	0x00c9, 0x00c8, 0x00ca, 0x00f4,
+	0x00cb, 0x00cf, 0x00fb, 0x00f9,
+	0x00a4, 0x00d4, 0x00dc, 0x00a2,
+	0x00a3, 0x00d9, 0x00db, 0x0192,
+	/* 0xa0*/
+	0x00a6, 0x00b4, 0x00f3, 0x00fa,
+	0x00a8, 0x00b8, 0x00b3, 0x00af,
+	0x00ce, 0x2310, 0x00ac, 0x00bd,
+	0x00bc, 0x00be, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x9b, 0x9c, 0x98, 0x00, 0xa0, 0x8f, /* 0xa0-0xa7 */
+	0xa4, 0x00, 0x00, 0xae, 0xaa, 0x00, 0x00, 0xa7, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0xa6, 0xa1, 0xe6, 0x86, 0xfa, /* 0xb0-0xb7 */
+	0xa5, 0x00, 0x00, 0xaf, 0xac, 0xab, 0xad, 0x00, /* 0xb8-0xbf */
+	0x8e, 0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x80, /* 0xc0-0xc7 */
+	0x91, 0x90, 0x92, 0x94, 0x00, 0x00, 0xa8, 0x95, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x9d, 0x00, 0x9e, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x85, 0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0x00, 0x00, 0x8c, 0x8b, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */
+	0x00, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x83, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x85, 0x8f, /* 0x88-0x8f */
+	0x82, 0x8a, 0x88, 0x93, 0x89, 0x8b, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x93, 0x81, 0x9b, 0x9c, 0x97, 0x96, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0x8c, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0x84, 0x84, 0x8e, 0x86, 0x80, /* 0x80-0x87 */
+	0x92, 0x94, 0x91, 0x95, 0xa8, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x99, 0x94, 0x95, 0x9e, 0x9d, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0xa0, 0xa1, 0x00, 0x00, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp863",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp863(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp863(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp863)
+module_exit(exit_nls_cp863)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp864.c b/fs/nls/nls_cp864.c
new file mode 100644
index 0000000..f4dabb0
--- /dev/null
+++ b/fs/nls/nls_cp864.c
@@ -0,0 +1,408 @@
+/*
+ * linux/fs/nls_cp864.c
+ *
+ * Charset cp864 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x066a, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00b0, 0x00b7, 0x2219, 0x221a,
+	0x2592, 0x2500, 0x2502, 0x253c,
+	0x2524, 0x252c, 0x251c, 0x2534,
+	0x2510, 0x250c, 0x2514, 0x2518,
+	/* 0x90*/
+	0x03b2, 0x221e, 0x03c6, 0x00b1,
+	0x00bd, 0x00bc, 0x2248, 0x00ab,
+	0x00bb, 0xfef7, 0xfef8, 0x0000,
+	0x0000, 0xfefb, 0xfefc, 0x0000,
+	/* 0xa0*/
+	0x00a0, 0x00ad, 0xfe82, 0x00a3,
+	0x00a4, 0xfe84, 0x0000, 0x0000,
+	0xfe8e, 0xfe8f, 0xfe95, 0xfe99,
+	0x060c, 0xfe9d, 0xfea1, 0xfea5,
+	/* 0xb0*/
+	0x0660, 0x0661, 0x0662, 0x0663,
+	0x0664, 0x0665, 0x0666, 0x0667,
+	0x0668, 0x0669, 0xfed1, 0x061b,
+	0xfeb1, 0xfeb5, 0xfeb9, 0x061f,
+	/* 0xc0*/
+	0x00a2, 0xfe80, 0xfe81, 0xfe83,
+	0xfe85, 0xfeca, 0xfe8b, 0xfe8d,
+	0xfe91, 0xfe93, 0xfe97, 0xfe9b,
+	0xfe9f, 0xfea3, 0xfea7, 0xfea9,
+	/* 0xd0*/
+	0xfeab, 0xfead, 0xfeaf, 0xfeb3,
+	0xfeb7, 0xfebb, 0xfebf, 0xfec1,
+	0xfec5, 0xfecb, 0xfecf, 0x00a6,
+	0x00ac, 0x00f7, 0x00d7, 0xfec9,
+	/* 0xe0*/
+	0x0640, 0xfed3, 0xfed7, 0xfedb,
+	0xfedf, 0xfee3, 0xfee7, 0xfeeb,
+	0xfeed, 0xfeef, 0xfef3, 0xfebd,
+	0xfecc, 0xfece, 0xfecd, 0xfee1,
+	/* 0xf0*/
+	0xfe7d, 0x0651, 0xfee5, 0xfee9,
+	0xfeec, 0xfef0, 0xfef2, 0xfed0,
+	0xfed5, 0xfef5, 0xfef6, 0xfedd,
+	0xfed9, 0xfef1, 0x25a0, 0x0000,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x00, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0xc0, 0xa3, 0xa4, 0x00, 0xdb, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x97, 0xdc, 0xa1, 0x00, 0x00, /* 0xa8-0xaf */
+	0x80, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x98, 0x95, 0x94, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, /* 0xf0-0xf7 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page06[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0xf1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x60-0x67 */
+	0xb8, 0xb9, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x82, 0x83, 0x00, 0x00, 0x00, 0x91, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+};
+
+static unsigned char page25[256] = {
+	0x85, 0x00, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x8c, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x8f, 0x00, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char pagefe[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xc1, 0xc2, 0xa2, 0xc3, 0xa5, 0xc4, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0xc6, 0x00, 0xc7, 0xa8, 0xa9, /* 0x88-0x8f */
+	0x00, 0xc8, 0x00, 0xc9, 0x00, 0xaa, 0x00, 0xca, /* 0x90-0x97 */
+	0x00, 0xab, 0x00, 0xcb, 0x00, 0xad, 0x00, 0xcc, /* 0x98-0x9f */
+	0x00, 0xae, 0x00, 0xcd, 0x00, 0xaf, 0x00, 0xce, /* 0xa0-0xa7 */
+	0x00, 0xcf, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0xd2, /* 0xa8-0xaf */
+	0x00, 0xbc, 0x00, 0xd3, 0x00, 0xbd, 0x00, 0xd4, /* 0xb0-0xb7 */
+	0x00, 0xbe, 0x00, 0xd5, 0x00, 0xeb, 0x00, 0xd6, /* 0xb8-0xbf */
+	0x00, 0xd7, 0x00, 0x00, 0x00, 0xd8, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0xdf, 0xc5, 0xd9, 0xec, 0xee, 0xed, 0xda, /* 0xc8-0xcf */
+	0xf7, 0xba, 0x00, 0xe1, 0x00, 0xf8, 0x00, 0xe2, /* 0xd0-0xd7 */
+	0x00, 0xfc, 0x00, 0xe3, 0x00, 0xfb, 0x00, 0xe4, /* 0xd8-0xdf */
+	0x00, 0xef, 0x00, 0xe5, 0x00, 0xf2, 0x00, 0xe6, /* 0xe0-0xe7 */
+	0x00, 0xf3, 0x00, 0xe7, 0xf4, 0xe8, 0x00, 0xe9, /* 0xe8-0xef */
+	0xf5, 0xfd, 0xf6, 0xea, 0x00, 0xf9, 0xfa, 0x99, /* 0xf0-0xf7 */
+	0x9a, 0x00, 0x00, 0x9d, 0x9e, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   page03, NULL,   NULL,   page06, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   page22, NULL,   NULL,   page25, NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   pagefe, NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x00, 0x00, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x00, 0x91, 0x00, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x00, 0x00, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp864",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp864(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp864(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp864)
+module_exit(exit_nls_cp864)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp865.c b/fs/nls/nls_cp865.c
new file mode 100644
index 0000000..4caeafa
--- /dev/null
+++ b/fs/nls/nls_cp865.c
@@ -0,0 +1,388 @@
+/*
+ * linux/fs/nls_cp865.c
+ *
+ * Charset cp865 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x00c7, 0x00fc, 0x00e9, 0x00e2,
+	0x00e4, 0x00e0, 0x00e5, 0x00e7,
+	0x00ea, 0x00eb, 0x00e8, 0x00ef,
+	0x00ee, 0x00ec, 0x00c4, 0x00c5,
+	/* 0x90*/
+	0x00c9, 0x00e6, 0x00c6, 0x00f4,
+	0x00f6, 0x00f2, 0x00fb, 0x00f9,
+	0x00ff, 0x00d6, 0x00dc, 0x00f8,
+	0x00a3, 0x00d8, 0x20a7, 0x0192,
+	/* 0xa0*/
+	0x00e1, 0x00ed, 0x00f3, 0x00fa,
+	0x00f1, 0x00d1, 0x00aa, 0x00ba,
+	0x00bf, 0x2310, 0x00ac, 0x00bd,
+	0x00bc, 0x00a1, 0x00ab, 0x00a4,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x03b1, 0x00df, 0x0393, 0x03c0,
+	0x03a3, 0x03c3, 0x00b5, 0x03c4,
+	0x03a6, 0x0398, 0x03a9, 0x03b4,
+	0x221e, 0x03c6, 0x03b5, 0x2229,
+	/* 0xf0*/
+	0x2261, 0x00b1, 0x2265, 0x2264,
+	0x2320, 0x2321, 0x00f7, 0x2248,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x207f, 0x00b2, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0xad, 0x00, 0x9c, 0xaf, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
+	0x00, 0x00, 0xa7, 0x00, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
+	0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */
+	0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
+	0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
+	0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
+	0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
+	0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
+	0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp865",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp865(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp865(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp865)
+module_exit(exit_nls_cp865)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp866.c b/fs/nls/nls_cp866.c
new file mode 100644
index 0000000..f2b4a9a
--- /dev/null
+++ b/fs/nls/nls_cp866.c
@@ -0,0 +1,306 @@
+/*
+ * linux/fs/nls_cp866.c
+ *
+ * Charset cp866 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0410, 0x0411, 0x0412, 0x0413,
+	0x0414, 0x0415, 0x0416, 0x0417,
+	0x0418, 0x0419, 0x041a, 0x041b,
+	0x041c, 0x041d, 0x041e, 0x041f,
+	/* 0x90*/
+	0x0420, 0x0421, 0x0422, 0x0423,
+	0x0424, 0x0425, 0x0426, 0x0427,
+	0x0428, 0x0429, 0x042a, 0x042b,
+	0x042c, 0x042d, 0x042e, 0x042f,
+	/* 0xa0*/
+	0x0430, 0x0431, 0x0432, 0x0433,
+	0x0434, 0x0435, 0x0436, 0x0437,
+	0x0438, 0x0439, 0x043a, 0x043b,
+	0x043c, 0x043d, 0x043e, 0x043f,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x2561, 0x2562, 0x2556,
+	0x2555, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x255c, 0x255b, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x255e, 0x255f,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x2567,
+	/* 0xd0*/
+	0x2568, 0x2564, 0x2565, 0x2559,
+	0x2558, 0x2552, 0x2553, 0x256b,
+	0x256a, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x258c, 0x2590, 0x2580,
+	/* 0xe0*/
+	0x0440, 0x0441, 0x0442, 0x0443,
+	0x0444, 0x0445, 0x0446, 0x0447,
+	0x0448, 0x0449, 0x044a, 0x044b,
+	0x044c, 0x044d, 0x044e, 0x044f,
+	/* 0xf0*/
+	0x0401, 0x0451, 0x0404, 0x0454,
+	0x0407, 0x0457, 0x040e, 0x045e,
+	0x00b0, 0x2219, 0x00b7, 0x221a,
+	0x2116, 0x00a4, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0xf0, 0x00, 0x00, 0xf2, 0x00, 0x00, 0xf4, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, /* 0x08-0x0f */
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x30-0x37 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x38-0x3f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
+	0x00, 0xf1, 0x00, 0x00, 0xf3, 0x00, 0x00, 0xf5, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x00, /* 0x58-0x5f */
+};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, /* 0x10-0x17 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
+	0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
+	0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
+	0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   page21, page22, NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x80-0x87 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x88-0x8f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x90-0x97 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xa0-0xa7 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xe8-0xef */
+	0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp866",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp866(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp866(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp866)
+module_exit(exit_nls_cp866)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp869.c b/fs/nls/nls_cp869.c
new file mode 100644
index 0000000..12b436f
--- /dev/null
+++ b/fs/nls/nls_cp869.c
@@ -0,0 +1,316 @@
+/*
+ * linux/fs/nls_cp869.c
+ *
+ * Charset cp869 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0386, 0x0000,
+	0x00b7, 0x00ac, 0x00a6, 0x2018,
+	0x2019, 0x0388, 0x2015, 0x0389,
+	/* 0x90*/
+	0x038a, 0x03aa, 0x038c, 0x0000,
+	0x0000, 0x038e, 0x03ab, 0x00a9,
+	0x038f, 0x00b2, 0x00b3, 0x03ac,
+	0x00a3, 0x03ad, 0x03ae, 0x03af,
+	/* 0xa0*/
+	0x03ca, 0x0390, 0x03cc, 0x03cd,
+	0x0391, 0x0392, 0x0393, 0x0394,
+	0x0395, 0x0396, 0x0397, 0x00bd,
+	0x0398, 0x0399, 0x00ab, 0x00bb,
+	/* 0xb0*/
+	0x2591, 0x2592, 0x2593, 0x2502,
+	0x2524, 0x039a, 0x039b, 0x039c,
+	0x039d, 0x2563, 0x2551, 0x2557,
+	0x255d, 0x039e, 0x039f, 0x2510,
+	/* 0xc0*/
+	0x2514, 0x2534, 0x252c, 0x251c,
+	0x2500, 0x253c, 0x03a0, 0x03a1,
+	0x255a, 0x2554, 0x2569, 0x2566,
+	0x2560, 0x2550, 0x256c, 0x03a3,
+	/* 0xd0*/
+	0x03a4, 0x03a5, 0x03a6, 0x03a7,
+	0x03a8, 0x03a9, 0x03b1, 0x03b2,
+	0x03b3, 0x2518, 0x250c, 0x2588,
+	0x2584, 0x03b4, 0x03b5, 0x2580,
+	/* 0xe0*/
+	0x03b6, 0x03b7, 0x03b8, 0x03b9,
+	0x03ba, 0x03bb, 0x03bc, 0x03bd,
+	0x03be, 0x03bf, 0x03c0, 0x03c1,
+	0x03c3, 0x03c2, 0x03c4, 0x0384,
+	/* 0xf0*/
+	0x00ad, 0x00b1, 0x03c5, 0x03c6,
+	0x03c7, 0x00a7, 0x03c8, 0x0385,
+	0x00b0, 0x00a8, 0x03c9, 0x03cb,
+	0x03b0, 0x03ce, 0x25a0, 0x00a0,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xff, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x8a, 0xf5, /* 0xa0-0xa7 */
+	0xf9, 0x97, 0x00, 0xae, 0x89, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */
+	0xf8, 0xf1, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x88, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xaf, 0x00, 0xab, 0x00, 0x00, /* 0xb8-0xbf */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0xef, 0xf7, 0x86, 0x00, /* 0x80-0x87 */
+	0x8d, 0x8f, 0x90, 0x00, 0x92, 0x00, 0x95, 0x98, /* 0x88-0x8f */
+	0xa1, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, /* 0x90-0x97 */
+	0xac, 0xad, 0xb5, 0xb6, 0xb7, 0xb8, 0xbd, 0xbe, /* 0x98-0x9f */
+	0xc6, 0xc7, 0x00, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, /* 0xa0-0xa7 */
+	0xd4, 0xd5, 0x91, 0x96, 0x9b, 0x9d, 0x9e, 0x9f, /* 0xa8-0xaf */
+	0xfc, 0xd6, 0xd7, 0xd8, 0xdd, 0xde, 0xe0, 0xe1, /* 0xb0-0xb7 */
+	0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, /* 0xb8-0xbf */
+	0xea, 0xeb, 0xed, 0xec, 0xee, 0xf2, 0xf3, 0xf4, /* 0xc0-0xc7 */
+	0xf6, 0xfa, 0xa0, 0xfb, 0xa2, 0xa3, 0xfd, 0x00, /* 0xc8-0xcf */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, /* 0x10-0x17 */
+	0x8b, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+};
+
+static unsigned char page25[256] = {
+	0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */
+	0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */
+	0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */
+	0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   NULL,   NULL,   NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x00, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x9d, 0x8e, 0x9e, /* 0x88-0x8f */
+	0x9f, 0xa0, 0xa2, 0x00, 0x00, 0xa3, 0xfb, 0x97, /* 0x90-0x97 */
+	0xfd, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xd6, 0xd7, 0xd8, 0xdd, /* 0xa0-0xa7 */
+	0xde, 0xe0, 0xe1, 0xab, 0xe2, 0xe3, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xe4, 0xe5, 0xe6, /* 0xb0-0xb7 */
+	0xe7, 0xb9, 0xba, 0xbb, 0xbc, 0xe8, 0xe9, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xea, 0xeb, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xec, /* 0xc8-0xcf */
+	0xee, 0xf2, 0xf3, 0xf4, 0xf6, 0xfa, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x00, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x00, 0x00, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x86, 0x9c, 0x8d, 0x8f, 0x90, /* 0x98-0x9f */
+	0x91, 0xa1, 0x92, 0x95, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xa4, 0xa5, /* 0xd0-0xd7 */
+	0xa6, 0xd9, 0xda, 0xdb, 0xdc, 0xa7, 0xa8, 0xdf, /* 0xd8-0xdf */
+	0xa9, 0xaa, 0xac, 0xad, 0xb5, 0xb6, 0xb7, 0xb8, /* 0xe0-0xe7 */
+	0xbd, 0xbe, 0xc6, 0xc7, 0xcf, 0xcf, 0xd0, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xd1, 0xd2, 0xd3, 0xf5, 0xd4, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xd5, 0x96, 0xfc, 0x98, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp869",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp869(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp869(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp869)
+module_exit(exit_nls_cp869)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp874.c b/fs/nls/nls_cp874.c
new file mode 100644
index 0000000..b5766a0
--- /dev/null
+++ b/fs/nls/nls_cp874.c
@@ -0,0 +1,276 @@
+/*
+ * linux/fs/nls_cp874.c
+ *
+ * Charset cp874 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x2026, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	/* 0x90*/
+	0x0000, 0x2018, 0x2019, 0x201c,
+	0x201d, 0x2022, 0x2013, 0x2014,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	/* 0xa0*/
+	0x00a0, 0x0e01, 0x0e02, 0x0e03,
+	0x0e04, 0x0e05, 0x0e06, 0x0e07,
+	0x0e08, 0x0e09, 0x0e0a, 0x0e0b,
+	0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f,
+	/* 0xb0*/
+	0x0e10, 0x0e11, 0x0e12, 0x0e13,
+	0x0e14, 0x0e15, 0x0e16, 0x0e17,
+	0x0e18, 0x0e19, 0x0e1a, 0x0e1b,
+	0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f,
+	/* 0xc0*/
+	0x0e20, 0x0e21, 0x0e22, 0x0e23,
+	0x0e24, 0x0e25, 0x0e26, 0x0e27,
+	0x0e28, 0x0e29, 0x0e2a, 0x0e2b,
+	0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f,
+	/* 0xd0*/
+	0x0e30, 0x0e31, 0x0e32, 0x0e33,
+	0x0e34, 0x0e35, 0x0e36, 0x0e37,
+	0x0e38, 0x0e39, 0x0e3a, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0e3f,
+	/* 0xe0*/
+	0x0e40, 0x0e41, 0x0e42, 0x0e43,
+	0x0e44, 0x0e45, 0x0e46, 0x0e47,
+	0x0e48, 0x0e49, 0x0e4a, 0x0e4b,
+	0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f,
+	/* 0xf0*/
+	0x0e50, 0x0e51, 0x0e52, 0x0e53,
+	0x0e54, 0x0e55, 0x0e56, 0x0e57,
+	0x0e58, 0x0e59, 0x0e5a, 0x0e5b,
+	0x0000, 0x0000, 0x0000, 0x0000,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char page0e[256] = {
+	0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page0e, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "cp874",
+	.alias		= "tis-620",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp874(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp874(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp874)
+module_exit(exit_nls_cp874)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(tis-620);
diff --git a/fs/nls/nls_cp932.c b/fs/nls/nls_cp932.c
new file mode 100644
index 0000000..2c1a17c
--- /dev/null
+++ b/fs/nls/nls_cp932.c
@@ -0,0 +1,7934 @@
+/*
+ * linux/fs/nls_cp932.c
+ *
+ * Charset cp932 translation tables.
+ * This translation table was generated automatically, the
+ * original table can be download from the Microsoft website.
+ * (http://www.microsoft.com/typography/unicode/unicodecp.htm)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t c2u_81[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x3000,0x3001,0x3002,0xFF0C,0xFF0E,0x30FB,0xFF1A,0xFF1B,/* 0x40-0x47 */
+	0xFF1F,0xFF01,0x309B,0x309C,0x00B4,0xFF40,0x00A8,0xFF3E,/* 0x48-0x4F */
+	0xFFE3,0xFF3F,0x30FD,0x30FE,0x309D,0x309E,0x3003,0x4EDD,/* 0x50-0x57 */
+	0x3005,0x3006,0x3007,0x30FC,0x2015,0x2010,0xFF0F,0xFF3C,/* 0x58-0x5F */
+	0xFF5E,0x2225,0xFF5C,0x2026,0x2025,0x2018,0x2019,0x201C,/* 0x60-0x67 */
+	0x201D,0xFF08,0xFF09,0x3014,0x3015,0xFF3B,0xFF3D,0xFF5B,/* 0x68-0x6F */
+	0xFF5D,0x3008,0x3009,0x300A,0x300B,0x300C,0x300D,0x300E,/* 0x70-0x77 */
+	0x300F,0x3010,0x3011,0xFF0B,0xFF0D,0x00B1,0x00D7,0x0000,/* 0x78-0x7F */
+
+	0x00F7,0xFF1D,0x2260,0xFF1C,0xFF1E,0x2266,0x2267,0x221E,/* 0x80-0x87 */
+	0x2234,0x2642,0x2640,0x00B0,0x2032,0x2033,0x2103,0xFFE5,/* 0x88-0x8F */
+	0xFF04,0xFFE0,0xFFE1,0xFF05,0xFF03,0xFF06,0xFF0A,0xFF20,/* 0x90-0x97 */
+	0x00A7,0x2606,0x2605,0x25CB,0x25CF,0x25CE,0x25C7,0x25C6,/* 0x98-0x9F */
+	0x25A1,0x25A0,0x25B3,0x25B2,0x25BD,0x25BC,0x203B,0x3012,/* 0xA0-0xA7 */
+	0x2192,0x2190,0x2191,0x2193,0x3013,0x0000,0x0000,0x0000,/* 0xA8-0xAF */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xB0-0xB7 */
+	0x2208,0x220B,0x2286,0x2287,0x2282,0x2283,0x222A,0x2229,/* 0xB8-0xBF */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC0-0xC7 */
+	0x2227,0x2228,0xFFE2,0x21D2,0x21D4,0x2200,0x2203,0x0000,/* 0xC8-0xCF */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xD0-0xD7 */
+	0x0000,0x0000,0x2220,0x22A5,0x2312,0x2202,0x2207,0x2261,/* 0xD8-0xDF */
+	0x2252,0x226A,0x226B,0x221A,0x223D,0x221D,0x2235,0x222B,/* 0xE0-0xE7 */
+	0x222C,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xE8-0xEF */
+	0x212B,0x2030,0x266F,0x266D,0x266A,0x2020,0x2021,0x00B6,/* 0xF0-0xF7 */
+	0x0000,0x0000,0x0000,0x0000,0x25EF,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_82[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0xFF10,/* 0x48-0x4F */
+	0xFF11,0xFF12,0xFF13,0xFF14,0xFF15,0xFF16,0xFF17,0xFF18,/* 0x50-0x57 */
+	0xFF19,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0xFF21,0xFF22,0xFF23,0xFF24,0xFF25,0xFF26,0xFF27,0xFF28,/* 0x60-0x67 */
+	0xFF29,0xFF2A,0xFF2B,0xFF2C,0xFF2D,0xFF2E,0xFF2F,0xFF30,/* 0x68-0x6F */
+	0xFF31,0xFF32,0xFF33,0xFF34,0xFF35,0xFF36,0xFF37,0xFF38,/* 0x70-0x77 */
+	0xFF39,0xFF3A,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xFF41,0xFF42,0xFF43,0xFF44,0xFF45,0xFF46,0xFF47,/* 0x80-0x87 */
+	0xFF48,0xFF49,0xFF4A,0xFF4B,0xFF4C,0xFF4D,0xFF4E,0xFF4F,/* 0x88-0x8F */
+	0xFF50,0xFF51,0xFF52,0xFF53,0xFF54,0xFF55,0xFF56,0xFF57,/* 0x90-0x97 */
+	0xFF58,0xFF59,0xFF5A,0x0000,0x0000,0x0000,0x0000,0x3041,/* 0x98-0x9F */
+	0x3042,0x3043,0x3044,0x3045,0x3046,0x3047,0x3048,0x3049,/* 0xA0-0xA7 */
+	0x304A,0x304B,0x304C,0x304D,0x304E,0x304F,0x3050,0x3051,/* 0xA8-0xAF */
+	0x3052,0x3053,0x3054,0x3055,0x3056,0x3057,0x3058,0x3059,/* 0xB0-0xB7 */
+	0x305A,0x305B,0x305C,0x305D,0x305E,0x305F,0x3060,0x3061,/* 0xB8-0xBF */
+	0x3062,0x3063,0x3064,0x3065,0x3066,0x3067,0x3068,0x3069,/* 0xC0-0xC7 */
+	0x306A,0x306B,0x306C,0x306D,0x306E,0x306F,0x3070,0x3071,/* 0xC8-0xCF */
+	0x3072,0x3073,0x3074,0x3075,0x3076,0x3077,0x3078,0x3079,/* 0xD0-0xD7 */
+	0x307A,0x307B,0x307C,0x307D,0x307E,0x307F,0x3080,0x3081,/* 0xD8-0xDF */
+	0x3082,0x3083,0x3084,0x3085,0x3086,0x3087,0x3088,0x3089,/* 0xE0-0xE7 */
+	0x308A,0x308B,0x308C,0x308D,0x308E,0x308F,0x3090,0x3091,/* 0xE8-0xEF */
+	0x3092,0x3093,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_83[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x30A1,0x30A2,0x30A3,0x30A4,0x30A5,0x30A6,0x30A7,0x30A8,/* 0x40-0x47 */
+	0x30A9,0x30AA,0x30AB,0x30AC,0x30AD,0x30AE,0x30AF,0x30B0,/* 0x48-0x4F */
+	0x30B1,0x30B2,0x30B3,0x30B4,0x30B5,0x30B6,0x30B7,0x30B8,/* 0x50-0x57 */
+	0x30B9,0x30BA,0x30BB,0x30BC,0x30BD,0x30BE,0x30BF,0x30C0,/* 0x58-0x5F */
+	0x30C1,0x30C2,0x30C3,0x30C4,0x30C5,0x30C6,0x30C7,0x30C8,/* 0x60-0x67 */
+	0x30C9,0x30CA,0x30CB,0x30CC,0x30CD,0x30CE,0x30CF,0x30D0,/* 0x68-0x6F */
+	0x30D1,0x30D2,0x30D3,0x30D4,0x30D5,0x30D6,0x30D7,0x30D8,/* 0x70-0x77 */
+	0x30D9,0x30DA,0x30DB,0x30DC,0x30DD,0x30DE,0x30DF,0x0000,/* 0x78-0x7F */
+
+	0x30E0,0x30E1,0x30E2,0x30E3,0x30E4,0x30E5,0x30E6,0x30E7,/* 0x80-0x87 */
+	0x30E8,0x30E9,0x30EA,0x30EB,0x30EC,0x30ED,0x30EE,0x30EF,/* 0x88-0x8F */
+	0x30F0,0x30F1,0x30F2,0x30F3,0x30F4,0x30F5,0x30F6,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0391,/* 0x98-0x9F */
+	0x0392,0x0393,0x0394,0x0395,0x0396,0x0397,0x0398,0x0399,/* 0xA0-0xA7 */
+	0x039A,0x039B,0x039C,0x039D,0x039E,0x039F,0x03A0,0x03A1,/* 0xA8-0xAF */
+	0x03A3,0x03A4,0x03A5,0x03A6,0x03A7,0x03A8,0x03A9,0x0000,/* 0xB0-0xB7 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x03B1,/* 0xB8-0xBF */
+	0x03B2,0x03B3,0x03B4,0x03B5,0x03B6,0x03B7,0x03B8,0x03B9,/* 0xC0-0xC7 */
+	0x03BA,0x03BB,0x03BC,0x03BD,0x03BE,0x03BF,0x03C0,0x03C1,/* 0xC8-0xCF */
+	0x03C3,0x03C4,0x03C5,0x03C6,0x03C7,0x03C8,0x03C9,0x0000,/* 0xD0-0xD7 */
+};
+
+static wchar_t c2u_84[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0410,0x0411,0x0412,0x0413,0x0414,0x0415,0x0401,0x0416,/* 0x40-0x47 */
+	0x0417,0x0418,0x0419,0x041A,0x041B,0x041C,0x041D,0x041E,/* 0x48-0x4F */
+	0x041F,0x0420,0x0421,0x0422,0x0423,0x0424,0x0425,0x0426,/* 0x50-0x57 */
+	0x0427,0x0428,0x0429,0x042A,0x042B,0x042C,0x042D,0x042E,/* 0x58-0x5F */
+	0x042F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0430,0x0431,0x0432,0x0433,0x0434,0x0435,0x0451,0x0436,/* 0x70-0x77 */
+	0x0437,0x0438,0x0439,0x043A,0x043B,0x043C,0x043D,0x0000,/* 0x78-0x7F */
+
+	0x043E,0x043F,0x0440,0x0441,0x0442,0x0443,0x0444,0x0445,/* 0x80-0x87 */
+	0x0446,0x0447,0x0448,0x0449,0x044A,0x044B,0x044C,0x044D,/* 0x88-0x8F */
+	0x044E,0x044F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x2500,/* 0x98-0x9F */
+	0x2502,0x250C,0x2510,0x2518,0x2514,0x251C,0x252C,0x2524,/* 0xA0-0xA7 */
+	0x2534,0x253C,0x2501,0x2503,0x250F,0x2513,0x251B,0x2517,/* 0xA8-0xAF */
+	0x2523,0x2533,0x252B,0x253B,0x254B,0x2520,0x252F,0x2528,/* 0xB0-0xB7 */
+	0x2537,0x253F,0x251D,0x2530,0x2525,0x2538,0x2542,0x0000,/* 0xB8-0xBF */
+};
+
+static wchar_t c2u_87[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x2460,0x2461,0x2462,0x2463,0x2464,0x2465,0x2466,0x2467,/* 0x40-0x47 */
+	0x2468,0x2469,0x246A,0x246B,0x246C,0x246D,0x246E,0x246F,/* 0x48-0x4F */
+	0x2470,0x2471,0x2472,0x2473,0x2160,0x2161,0x2162,0x2163,/* 0x50-0x57 */
+	0x2164,0x2165,0x2166,0x2167,0x2168,0x2169,0x0000,0x3349,/* 0x58-0x5F */
+	0x3314,0x3322,0x334D,0x3318,0x3327,0x3303,0x3336,0x3351,/* 0x60-0x67 */
+	0x3357,0x330D,0x3326,0x3323,0x332B,0x334A,0x333B,0x339C,/* 0x68-0x6F */
+	0x339D,0x339E,0x338E,0x338F,0x33C4,0x33A1,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x337B,0x0000,/* 0x78-0x7F */
+
+	0x301D,0x301F,0x2116,0x33CD,0x2121,0x32A4,0x32A5,0x32A6,/* 0x80-0x87 */
+	0x32A7,0x32A8,0x3231,0x3232,0x3239,0x337E,0x337D,0x337C,/* 0x88-0x8F */
+	0x2252,0x2261,0x222B,0x222E,0x2211,0x221A,0x22A5,0x2220,/* 0x90-0x97 */
+	0x221F,0x22BF,0x2235,0x2229,0x222A,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+};
+
+static wchar_t c2u_88[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x4E9C,/* 0x98-0x9F */
+	0x5516,0x5A03,0x963F,0x54C0,0x611B,0x6328,0x59F6,0x9022,/* 0xA0-0xA7 */
+	0x8475,0x831C,0x7A50,0x60AA,0x63E1,0x6E25,0x65ED,0x8466,/* 0xA8-0xAF */
+	0x82A6,0x9BF5,0x6893,0x5727,0x65A1,0x6271,0x5B9B,0x59D0,/* 0xB0-0xB7 */
+	0x867B,0x98F4,0x7D62,0x7DBE,0x9B8E,0x6216,0x7C9F,0x88B7,/* 0xB8-0xBF */
+	0x5B89,0x5EB5,0x6309,0x6697,0x6848,0x95C7,0x978D,0x674F,/* 0xC0-0xC7 */
+	0x4EE5,0x4F0A,0x4F4D,0x4F9D,0x5049,0x56F2,0x5937,0x59D4,/* 0xC8-0xCF */
+	0x5A01,0x5C09,0x60DF,0x610F,0x6170,0x6613,0x6905,0x70BA,/* 0xD0-0xD7 */
+	0x754F,0x7570,0x79FB,0x7DAD,0x7DEF,0x80C3,0x840E,0x8863,/* 0xD8-0xDF */
+	0x8B02,0x9055,0x907A,0x533B,0x4E95,0x4EA5,0x57DF,0x80B2,/* 0xE0-0xE7 */
+	0x90C1,0x78EF,0x4E00,0x58F1,0x6EA2,0x9038,0x7A32,0x8328,/* 0xE8-0xEF */
+	0x828B,0x9C2F,0x5141,0x5370,0x54BD,0x54E1,0x56E0,0x59FB,/* 0xF0-0xF7 */
+	0x5F15,0x98F2,0x6DEB,0x80E4,0x852D,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_89[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9662,0x9670,0x96A0,0x97FB,0x540B,0x53F3,0x5B87,0x70CF,/* 0x40-0x47 */
+	0x7FBD,0x8FC2,0x96E8,0x536F,0x9D5C,0x7ABA,0x4E11,0x7893,/* 0x48-0x4F */
+	0x81FC,0x6E26,0x5618,0x5504,0x6B1D,0x851A,0x9C3B,0x59E5,/* 0x50-0x57 */
+	0x53A9,0x6D66,0x74DC,0x958F,0x5642,0x4E91,0x904B,0x96F2,/* 0x58-0x5F */
+	0x834F,0x990C,0x53E1,0x55B6,0x5B30,0x5F71,0x6620,0x66F3,/* 0x60-0x67 */
+	0x6804,0x6C38,0x6CF3,0x6D29,0x745B,0x76C8,0x7A4E,0x9834,/* 0x68-0x6F */
+	0x82F1,0x885B,0x8A60,0x92ED,0x6DB2,0x75AB,0x76CA,0x99C5,/* 0x70-0x77 */
+	0x60A6,0x8B01,0x8D8A,0x95B2,0x698E,0x53AD,0x5186,0x0000,/* 0x78-0x7F */
+
+	0x5712,0x5830,0x5944,0x5BB4,0x5EF6,0x6028,0x63A9,0x63F4,/* 0x80-0x87 */
+	0x6CBF,0x6F14,0x708E,0x7114,0x7159,0x71D5,0x733F,0x7E01,/* 0x88-0x8F */
+	0x8276,0x82D1,0x8597,0x9060,0x925B,0x9D1B,0x5869,0x65BC,/* 0x90-0x97 */
+	0x6C5A,0x7525,0x51F9,0x592E,0x5965,0x5F80,0x5FDC,0x62BC,/* 0x98-0x9F */
+	0x65FA,0x6A2A,0x6B27,0x6BB4,0x738B,0x7FC1,0x8956,0x9D2C,/* 0xA0-0xA7 */
+	0x9D0E,0x9EC4,0x5CA1,0x6C96,0x837B,0x5104,0x5C4B,0x61B6,/* 0xA8-0xAF */
+	0x81C6,0x6876,0x7261,0x4E59,0x4FFA,0x5378,0x6069,0x6E29,/* 0xB0-0xB7 */
+	0x7A4F,0x97F3,0x4E0B,0x5316,0x4EEE,0x4F55,0x4F3D,0x4FA1,/* 0xB8-0xBF */
+	0x4F73,0x52A0,0x53EF,0x5609,0x590F,0x5AC1,0x5BB6,0x5BE1,/* 0xC0-0xC7 */
+	0x79D1,0x6687,0x679C,0x67B6,0x6B4C,0x6CB3,0x706B,0x73C2,/* 0xC8-0xCF */
+	0x798D,0x79BE,0x7A3C,0x7B87,0x82B1,0x82DB,0x8304,0x8377,/* 0xD0-0xD7 */
+	0x83EF,0x83D3,0x8766,0x8AB2,0x5629,0x8CA8,0x8FE6,0x904E,/* 0xD8-0xDF */
+	0x971E,0x868A,0x4FC4,0x5CE8,0x6211,0x7259,0x753B,0x81E5,/* 0xE0-0xE7 */
+	0x82BD,0x86FE,0x8CC0,0x96C5,0x9913,0x99D5,0x4ECB,0x4F1A,/* 0xE8-0xEF */
+	0x89E3,0x56DE,0x584A,0x58CA,0x5EFB,0x5FEB,0x602A,0x6094,/* 0xF0-0xF7 */
+	0x6062,0x61D0,0x6212,0x62D0,0x6539,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9B41,0x6666,0x68B0,0x6D77,0x7070,0x754C,0x7686,0x7D75,/* 0x40-0x47 */
+	0x82A5,0x87F9,0x958B,0x968E,0x8C9D,0x51F1,0x52BE,0x5916,/* 0x48-0x4F */
+	0x54B3,0x5BB3,0x5D16,0x6168,0x6982,0x6DAF,0x788D,0x84CB,/* 0x50-0x57 */
+	0x8857,0x8A72,0x93A7,0x9AB8,0x6D6C,0x99A8,0x86D9,0x57A3,/* 0x58-0x5F */
+	0x67FF,0x86CE,0x920E,0x5283,0x5687,0x5404,0x5ED3,0x62E1,/* 0x60-0x67 */
+	0x64B9,0x683C,0x6838,0x6BBB,0x7372,0x78BA,0x7A6B,0x899A,/* 0x68-0x6F */
+	0x89D2,0x8D6B,0x8F03,0x90ED,0x95A3,0x9694,0x9769,0x5B66,/* 0x70-0x77 */
+	0x5CB3,0x697D,0x984D,0x984E,0x639B,0x7B20,0x6A2B,0x0000,/* 0x78-0x7F */
+
+	0x6A7F,0x68B6,0x9C0D,0x6F5F,0x5272,0x559D,0x6070,0x62EC,/* 0x80-0x87 */
+	0x6D3B,0x6E07,0x6ED1,0x845B,0x8910,0x8F44,0x4E14,0x9C39,/* 0x88-0x8F */
+	0x53F6,0x691B,0x6A3A,0x9784,0x682A,0x515C,0x7AC3,0x84B2,/* 0x90-0x97 */
+	0x91DC,0x938C,0x565B,0x9D28,0x6822,0x8305,0x8431,0x7CA5,/* 0x98-0x9F */
+	0x5208,0x82C5,0x74E6,0x4E7E,0x4F83,0x51A0,0x5BD2,0x520A,/* 0xA0-0xA7 */
+	0x52D8,0x52E7,0x5DFB,0x559A,0x582A,0x59E6,0x5B8C,0x5B98,/* 0xA8-0xAF */
+	0x5BDB,0x5E72,0x5E79,0x60A3,0x611F,0x6163,0x61BE,0x63DB,/* 0xB0-0xB7 */
+	0x6562,0x67D1,0x6853,0x68FA,0x6B3E,0x6B53,0x6C57,0x6F22,/* 0xB8-0xBF */
+	0x6F97,0x6F45,0x74B0,0x7518,0x76E3,0x770B,0x7AFF,0x7BA1,/* 0xC0-0xC7 */
+	0x7C21,0x7DE9,0x7F36,0x7FF0,0x809D,0x8266,0x839E,0x89B3,/* 0xC8-0xCF */
+	0x8ACC,0x8CAB,0x9084,0x9451,0x9593,0x9591,0x95A2,0x9665,/* 0xD0-0xD7 */
+	0x97D3,0x9928,0x8218,0x4E38,0x542B,0x5CB8,0x5DCC,0x73A9,/* 0xD8-0xDF */
+	0x764C,0x773C,0x5CA9,0x7FEB,0x8D0B,0x96C1,0x9811,0x9854,/* 0xE0-0xE7 */
+	0x9858,0x4F01,0x4F0E,0x5371,0x559C,0x5668,0x57FA,0x5947,/* 0xE8-0xEF */
+	0x5B09,0x5BC4,0x5C90,0x5E0C,0x5E7E,0x5FCC,0x63EE,0x673A,/* 0xF0-0xF7 */
+	0x65D7,0x65E2,0x671F,0x68CB,0x68C4,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6A5F,0x5E30,0x6BC5,0x6C17,0x6C7D,0x757F,0x7948,0x5B63,/* 0x40-0x47 */
+	0x7A00,0x7D00,0x5FBD,0x898F,0x8A18,0x8CB4,0x8D77,0x8ECC,/* 0x48-0x4F */
+	0x8F1D,0x98E2,0x9A0E,0x9B3C,0x4E80,0x507D,0x5100,0x5993,/* 0x50-0x57 */
+	0x5B9C,0x622F,0x6280,0x64EC,0x6B3A,0x72A0,0x7591,0x7947,/* 0x58-0x5F */
+	0x7FA9,0x87FB,0x8ABC,0x8B70,0x63AC,0x83CA,0x97A0,0x5409,/* 0x60-0x67 */
+	0x5403,0x55AB,0x6854,0x6A58,0x8A70,0x7827,0x6775,0x9ECD,/* 0x68-0x6F */
+	0x5374,0x5BA2,0x811A,0x8650,0x9006,0x4E18,0x4E45,0x4EC7,/* 0x70-0x77 */
+	0x4F11,0x53CA,0x5438,0x5BAE,0x5F13,0x6025,0x6551,0x0000,/* 0x78-0x7F */
+
+	0x673D,0x6C42,0x6C72,0x6CE3,0x7078,0x7403,0x7A76,0x7AAE,/* 0x80-0x87 */
+	0x7B08,0x7D1A,0x7CFE,0x7D66,0x65E7,0x725B,0x53BB,0x5C45,/* 0x88-0x8F */
+	0x5DE8,0x62D2,0x62E0,0x6319,0x6E20,0x865A,0x8A31,0x8DDD,/* 0x90-0x97 */
+	0x92F8,0x6F01,0x79A6,0x9B5A,0x4EA8,0x4EAB,0x4EAC,0x4F9B,/* 0x98-0x9F */
+	0x4FA0,0x50D1,0x5147,0x7AF6,0x5171,0x51F6,0x5354,0x5321,/* 0xA0-0xA7 */
+	0x537F,0x53EB,0x55AC,0x5883,0x5CE1,0x5F37,0x5F4A,0x602F,/* 0xA8-0xAF */
+	0x6050,0x606D,0x631F,0x6559,0x6A4B,0x6CC1,0x72C2,0x72ED,/* 0xB0-0xB7 */
+	0x77EF,0x80F8,0x8105,0x8208,0x854E,0x90F7,0x93E1,0x97FF,/* 0xB8-0xBF */
+	0x9957,0x9A5A,0x4EF0,0x51DD,0x5C2D,0x6681,0x696D,0x5C40,/* 0xC0-0xC7 */
+	0x66F2,0x6975,0x7389,0x6850,0x7C81,0x50C5,0x52E4,0x5747,/* 0xC8-0xCF */
+	0x5DFE,0x9326,0x65A4,0x6B23,0x6B3D,0x7434,0x7981,0x79BD,/* 0xD0-0xD7 */
+	0x7B4B,0x7DCA,0x82B9,0x83CC,0x887F,0x895F,0x8B39,0x8FD1,/* 0xD8-0xDF */
+	0x91D1,0x541F,0x9280,0x4E5D,0x5036,0x53E5,0x533A,0x72D7,/* 0xE0-0xE7 */
+	0x7396,0x77E9,0x82E6,0x8EAF,0x99C6,0x99C8,0x99D2,0x5177,/* 0xE8-0xEF */
+	0x611A,0x865E,0x55B0,0x7A7A,0x5076,0x5BD3,0x9047,0x9685,/* 0xF0-0xF7 */
+	0x4E32,0x6ADB,0x91E7,0x5C51,0x5C48,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6398,0x7A9F,0x6C93,0x9774,0x8F61,0x7AAA,0x718A,0x9688,/* 0x40-0x47 */
+	0x7C82,0x6817,0x7E70,0x6851,0x936C,0x52F2,0x541B,0x85AB,/* 0x48-0x4F */
+	0x8A13,0x7FA4,0x8ECD,0x90E1,0x5366,0x8888,0x7941,0x4FC2,/* 0x50-0x57 */
+	0x50BE,0x5211,0x5144,0x5553,0x572D,0x73EA,0x578B,0x5951,/* 0x58-0x5F */
+	0x5F62,0x5F84,0x6075,0x6176,0x6167,0x61A9,0x63B2,0x643A,/* 0x60-0x67 */
+	0x656C,0x666F,0x6842,0x6E13,0x7566,0x7A3D,0x7CFB,0x7D4C,/* 0x68-0x6F */
+	0x7D99,0x7E4B,0x7F6B,0x830E,0x834A,0x86CD,0x8A08,0x8A63,/* 0x70-0x77 */
+	0x8B66,0x8EFD,0x981A,0x9D8F,0x82B8,0x8FCE,0x9BE8,0x0000,/* 0x78-0x7F */
+
+	0x5287,0x621F,0x6483,0x6FC0,0x9699,0x6841,0x5091,0x6B20,/* 0x80-0x87 */
+	0x6C7A,0x6F54,0x7A74,0x7D50,0x8840,0x8A23,0x6708,0x4EF6,/* 0x88-0x8F */
+	0x5039,0x5026,0x5065,0x517C,0x5238,0x5263,0x55A7,0x570F,/* 0x90-0x97 */
+	0x5805,0x5ACC,0x5EFA,0x61B2,0x61F8,0x62F3,0x6372,0x691C,/* 0x98-0x9F */
+	0x6A29,0x727D,0x72AC,0x732E,0x7814,0x786F,0x7D79,0x770C,/* 0xA0-0xA7 */
+	0x80A9,0x898B,0x8B19,0x8CE2,0x8ED2,0x9063,0x9375,0x967A,/* 0xA8-0xAF */
+	0x9855,0x9A13,0x9E78,0x5143,0x539F,0x53B3,0x5E7B,0x5F26,/* 0xB0-0xB7 */
+	0x6E1B,0x6E90,0x7384,0x73FE,0x7D43,0x8237,0x8A00,0x8AFA,/* 0xB8-0xBF */
+	0x9650,0x4E4E,0x500B,0x53E4,0x547C,0x56FA,0x59D1,0x5B64,/* 0xC0-0xC7 */
+	0x5DF1,0x5EAB,0x5F27,0x6238,0x6545,0x67AF,0x6E56,0x72D0,/* 0xC8-0xCF */
+	0x7CCA,0x88B4,0x80A1,0x80E1,0x83F0,0x864E,0x8A87,0x8DE8,/* 0xD0-0xD7 */
+	0x9237,0x96C7,0x9867,0x9F13,0x4E94,0x4E92,0x4F0D,0x5348,/* 0xD8-0xDF */
+	0x5449,0x543E,0x5A2F,0x5F8C,0x5FA1,0x609F,0x68A7,0x6A8E,/* 0xE0-0xE7 */
+	0x745A,0x7881,0x8A9E,0x8AA4,0x8B77,0x9190,0x4E5E,0x9BC9,/* 0xE8-0xEF */
+	0x4EA4,0x4F7C,0x4FAF,0x5019,0x5016,0x5149,0x516C,0x529F,/* 0xF0-0xF7 */
+	0x52B9,0x52FE,0x539A,0x53E3,0x5411,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x540E,0x5589,0x5751,0x57A2,0x597D,0x5B54,0x5B5D,0x5B8F,/* 0x40-0x47 */
+	0x5DE5,0x5DE7,0x5DF7,0x5E78,0x5E83,0x5E9A,0x5EB7,0x5F18,/* 0x48-0x4F */
+	0x6052,0x614C,0x6297,0x62D8,0x63A7,0x653B,0x6602,0x6643,/* 0x50-0x57 */
+	0x66F4,0x676D,0x6821,0x6897,0x69CB,0x6C5F,0x6D2A,0x6D69,/* 0x58-0x5F */
+	0x6E2F,0x6E9D,0x7532,0x7687,0x786C,0x7A3F,0x7CE0,0x7D05,/* 0x60-0x67 */
+	0x7D18,0x7D5E,0x7DB1,0x8015,0x8003,0x80AF,0x80B1,0x8154,/* 0x68-0x6F */
+	0x818F,0x822A,0x8352,0x884C,0x8861,0x8B1B,0x8CA2,0x8CFC,/* 0x70-0x77 */
+	0x90CA,0x9175,0x9271,0x783F,0x92FC,0x95A4,0x964D,0x0000,/* 0x78-0x7F */
+
+	0x9805,0x9999,0x9AD8,0x9D3B,0x525B,0x52AB,0x53F7,0x5408,/* 0x80-0x87 */
+	0x58D5,0x62F7,0x6FE0,0x8C6A,0x8F5F,0x9EB9,0x514B,0x523B,/* 0x88-0x8F */
+	0x544A,0x56FD,0x7A40,0x9177,0x9D60,0x9ED2,0x7344,0x6F09,/* 0x90-0x97 */
+	0x8170,0x7511,0x5FFD,0x60DA,0x9AA8,0x72DB,0x8FBC,0x6B64,/* 0x98-0x9F */
+	0x9803,0x4ECA,0x56F0,0x5764,0x58BE,0x5A5A,0x6068,0x61C7,/* 0xA0-0xA7 */
+	0x660F,0x6606,0x6839,0x68B1,0x6DF7,0x75D5,0x7D3A,0x826E,/* 0xA8-0xAF */
+	0x9B42,0x4E9B,0x4F50,0x53C9,0x5506,0x5D6F,0x5DE6,0x5DEE,/* 0xB0-0xB7 */
+	0x67FB,0x6C99,0x7473,0x7802,0x8A50,0x9396,0x88DF,0x5750,/* 0xB8-0xBF */
+	0x5EA7,0x632B,0x50B5,0x50AC,0x518D,0x6700,0x54C9,0x585E,/* 0xC0-0xC7 */
+	0x59BB,0x5BB0,0x5F69,0x624D,0x63A1,0x683D,0x6B73,0x6E08,/* 0xC8-0xCF */
+	0x707D,0x91C7,0x7280,0x7815,0x7826,0x796D,0x658E,0x7D30,/* 0xD0-0xD7 */
+	0x83DC,0x88C1,0x8F09,0x969B,0x5264,0x5728,0x6750,0x7F6A,/* 0xD8-0xDF */
+	0x8CA1,0x51B4,0x5742,0x962A,0x583A,0x698A,0x80B4,0x54B2,/* 0xE0-0xE7 */
+	0x5D0E,0x57FC,0x7895,0x9DFA,0x4F5C,0x524A,0x548B,0x643E,/* 0xE8-0xEF */
+	0x6628,0x6714,0x67F5,0x7A84,0x7B56,0x7D22,0x932F,0x685C,/* 0xF0-0xF7 */
+	0x9BAD,0x7B39,0x5319,0x518A,0x5237,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5BDF,0x62F6,0x64AE,0x64E6,0x672D,0x6BBA,0x85A9,0x96D1,/* 0x40-0x47 */
+	0x7690,0x9BD6,0x634C,0x9306,0x9BAB,0x76BF,0x6652,0x4E09,/* 0x48-0x4F */
+	0x5098,0x53C2,0x5C71,0x60E8,0x6492,0x6563,0x685F,0x71E6,/* 0x50-0x57 */
+	0x73CA,0x7523,0x7B97,0x7E82,0x8695,0x8B83,0x8CDB,0x9178,/* 0x58-0x5F */
+	0x9910,0x65AC,0x66AB,0x6B8B,0x4ED5,0x4ED4,0x4F3A,0x4F7F,/* 0x60-0x67 */
+	0x523A,0x53F8,0x53F2,0x55E3,0x56DB,0x58EB,0x59CB,0x59C9,/* 0x68-0x6F */
+	0x59FF,0x5B50,0x5C4D,0x5E02,0x5E2B,0x5FD7,0x601D,0x6307,/* 0x70-0x77 */
+	0x652F,0x5B5C,0x65AF,0x65BD,0x65E8,0x679D,0x6B62,0x0000,/* 0x78-0x7F */
+
+	0x6B7B,0x6C0F,0x7345,0x7949,0x79C1,0x7CF8,0x7D19,0x7D2B,/* 0x80-0x87 */
+	0x80A2,0x8102,0x81F3,0x8996,0x8A5E,0x8A69,0x8A66,0x8A8C,/* 0x88-0x8F */
+	0x8AEE,0x8CC7,0x8CDC,0x96CC,0x98FC,0x6B6F,0x4E8B,0x4F3C,/* 0x90-0x97 */
+	0x4F8D,0x5150,0x5B57,0x5BFA,0x6148,0x6301,0x6642,0x6B21,/* 0x98-0x9F */
+	0x6ECB,0x6CBB,0x723E,0x74BD,0x75D4,0x78C1,0x793A,0x800C,/* 0xA0-0xA7 */
+	0x8033,0x81EA,0x8494,0x8F9E,0x6C50,0x9E7F,0x5F0F,0x8B58,/* 0xA8-0xAF */
+	0x9D2B,0x7AFA,0x8EF8,0x5B8D,0x96EB,0x4E03,0x53F1,0x57F7,/* 0xB0-0xB7 */
+	0x5931,0x5AC9,0x5BA4,0x6089,0x6E7F,0x6F06,0x75BE,0x8CEA,/* 0xB8-0xBF */
+	0x5B9F,0x8500,0x7BE0,0x5072,0x67F4,0x829D,0x5C61,0x854A,/* 0xC0-0xC7 */
+	0x7E1E,0x820E,0x5199,0x5C04,0x6368,0x8D66,0x659C,0x716E,/* 0xC8-0xCF */
+	0x793E,0x7D17,0x8005,0x8B1D,0x8ECA,0x906E,0x86C7,0x90AA,/* 0xD0-0xD7 */
+	0x501F,0x52FA,0x5C3A,0x6753,0x707C,0x7235,0x914C,0x91C8,/* 0xD8-0xDF */
+	0x932B,0x82E5,0x5BC2,0x5F31,0x60F9,0x4E3B,0x53D6,0x5B88,/* 0xE0-0xE7 */
+	0x624B,0x6731,0x6B8A,0x72E9,0x73E0,0x7A2E,0x816B,0x8DA3,/* 0xE8-0xEF */
+	0x9152,0x9996,0x5112,0x53D7,0x546A,0x5BFF,0x6388,0x6A39,/* 0xF0-0xF7 */
+	0x7DAC,0x9700,0x56DA,0x53CE,0x5468,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5B97,0x5C31,0x5DDE,0x4FEE,0x6101,0x62FE,0x6D32,0x79C0,/* 0x40-0x47 */
+	0x79CB,0x7D42,0x7E4D,0x7FD2,0x81ED,0x821F,0x8490,0x8846,/* 0x48-0x4F */
+	0x8972,0x8B90,0x8E74,0x8F2F,0x9031,0x914B,0x916C,0x96C6,/* 0x50-0x57 */
+	0x919C,0x4EC0,0x4F4F,0x5145,0x5341,0x5F93,0x620E,0x67D4,/* 0x58-0x5F */
+	0x6C41,0x6E0B,0x7363,0x7E26,0x91CD,0x9283,0x53D4,0x5919,/* 0x60-0x67 */
+	0x5BBF,0x6DD1,0x795D,0x7E2E,0x7C9B,0x587E,0x719F,0x51FA,/* 0x68-0x6F */
+	0x8853,0x8FF0,0x4FCA,0x5CFB,0x6625,0x77AC,0x7AE3,0x821C,/* 0x70-0x77 */
+	0x99FF,0x51C6,0x5FAA,0x65EC,0x696F,0x6B89,0x6DF3,0x0000,/* 0x78-0x7F */
+
+	0x6E96,0x6F64,0x76FE,0x7D14,0x5DE1,0x9075,0x9187,0x9806,/* 0x80-0x87 */
+	0x51E6,0x521D,0x6240,0x6691,0x66D9,0x6E1A,0x5EB6,0x7DD2,/* 0x88-0x8F */
+	0x7F72,0x66F8,0x85AF,0x85F7,0x8AF8,0x52A9,0x53D9,0x5973,/* 0x90-0x97 */
+	0x5E8F,0x5F90,0x6055,0x92E4,0x9664,0x50B7,0x511F,0x52DD,/* 0x98-0x9F */
+	0x5320,0x5347,0x53EC,0x54E8,0x5546,0x5531,0x5617,0x5968,/* 0xA0-0xA7 */
+	0x59BE,0x5A3C,0x5BB5,0x5C06,0x5C0F,0x5C11,0x5C1A,0x5E84,/* 0xA8-0xAF */
+	0x5E8A,0x5EE0,0x5F70,0x627F,0x6284,0x62DB,0x638C,0x6377,/* 0xB0-0xB7 */
+	0x6607,0x660C,0x662D,0x6676,0x677E,0x68A2,0x6A1F,0x6A35,/* 0xB8-0xBF */
+	0x6CBC,0x6D88,0x6E09,0x6E58,0x713C,0x7126,0x7167,0x75C7,/* 0xC0-0xC7 */
+	0x7701,0x785D,0x7901,0x7965,0x79F0,0x7AE0,0x7B11,0x7CA7,/* 0xC8-0xCF */
+	0x7D39,0x8096,0x83D6,0x848B,0x8549,0x885D,0x88F3,0x8A1F,/* 0xD0-0xD7 */
+	0x8A3C,0x8A54,0x8A73,0x8C61,0x8CDE,0x91A4,0x9266,0x937E,/* 0xD8-0xDF */
+	0x9418,0x969C,0x9798,0x4E0A,0x4E08,0x4E1E,0x4E57,0x5197,/* 0xE0-0xE7 */
+	0x5270,0x57CE,0x5834,0x58CC,0x5B22,0x5E38,0x60C5,0x64FE,/* 0xE8-0xEF */
+	0x6761,0x6756,0x6D44,0x72B6,0x7573,0x7A63,0x84B8,0x8B72,/* 0xF0-0xF7 */
+	0x91B8,0x9320,0x5631,0x57F4,0x98FE,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_90[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x62ED,0x690D,0x6B96,0x71ED,0x7E54,0x8077,0x8272,0x89E6,/* 0x40-0x47 */
+	0x98DF,0x8755,0x8FB1,0x5C3B,0x4F38,0x4FE1,0x4FB5,0x5507,/* 0x48-0x4F */
+	0x5A20,0x5BDD,0x5BE9,0x5FC3,0x614E,0x632F,0x65B0,0x664B,/* 0x50-0x57 */
+	0x68EE,0x699B,0x6D78,0x6DF1,0x7533,0x75B9,0x771F,0x795E,/* 0x58-0x5F */
+	0x79E6,0x7D33,0x81E3,0x82AF,0x85AA,0x89AA,0x8A3A,0x8EAB,/* 0x60-0x67 */
+	0x8F9B,0x9032,0x91DD,0x9707,0x4EBA,0x4EC1,0x5203,0x5875,/* 0x68-0x6F */
+	0x58EC,0x5C0B,0x751A,0x5C3D,0x814E,0x8A0A,0x8FC5,0x9663,/* 0x70-0x77 */
+	0x976D,0x7B25,0x8ACF,0x9808,0x9162,0x56F3,0x53A8,0x0000,/* 0x78-0x7F */
+
+	0x9017,0x5439,0x5782,0x5E25,0x63A8,0x6C34,0x708A,0x7761,/* 0x80-0x87 */
+	0x7C8B,0x7FE0,0x8870,0x9042,0x9154,0x9310,0x9318,0x968F,/* 0x88-0x8F */
+	0x745E,0x9AC4,0x5D07,0x5D69,0x6570,0x67A2,0x8DA8,0x96DB,/* 0x90-0x97 */
+	0x636E,0x6749,0x6919,0x83C5,0x9817,0x96C0,0x88FE,0x6F84,/* 0x98-0x9F */
+	0x647A,0x5BF8,0x4E16,0x702C,0x755D,0x662F,0x51C4,0x5236,/* 0xA0-0xA7 */
+	0x52E2,0x59D3,0x5F81,0x6027,0x6210,0x653F,0x6574,0x661F,/* 0xA8-0xAF */
+	0x6674,0x68F2,0x6816,0x6B63,0x6E05,0x7272,0x751F,0x76DB,/* 0xB0-0xB7 */
+	0x7CBE,0x8056,0x58F0,0x88FD,0x897F,0x8AA0,0x8A93,0x8ACB,/* 0xB8-0xBF */
+	0x901D,0x9192,0x9752,0x9759,0x6589,0x7A0E,0x8106,0x96BB,/* 0xC0-0xC7 */
+	0x5E2D,0x60DC,0x621A,0x65A5,0x6614,0x6790,0x77F3,0x7A4D,/* 0xC8-0xCF */
+	0x7C4D,0x7E3E,0x810A,0x8CAC,0x8D64,0x8DE1,0x8E5F,0x78A9,/* 0xD0-0xD7 */
+	0x5207,0x62D9,0x63A5,0x6442,0x6298,0x8A2D,0x7A83,0x7BC0,/* 0xD8-0xDF */
+	0x8AAC,0x96EA,0x7D76,0x820C,0x8749,0x4ED9,0x5148,0x5343,/* 0xE0-0xE7 */
+	0x5360,0x5BA3,0x5C02,0x5C16,0x5DDD,0x6226,0x6247,0x64B0,/* 0xE8-0xEF */
+	0x6813,0x6834,0x6CC9,0x6D45,0x6D17,0x67D3,0x6F5C,0x714E,/* 0xF0-0xF7 */
+	0x717D,0x65CB,0x7A7F,0x7BAD,0x7DDA,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_91[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7E4A,0x7FA8,0x817A,0x821B,0x8239,0x85A6,0x8A6E,0x8CCE,/* 0x40-0x47 */
+	0x8DF5,0x9078,0x9077,0x92AD,0x9291,0x9583,0x9BAE,0x524D,/* 0x48-0x4F */
+	0x5584,0x6F38,0x7136,0x5168,0x7985,0x7E55,0x81B3,0x7CCE,/* 0x50-0x57 */
+	0x564C,0x5851,0x5CA8,0x63AA,0x66FE,0x66FD,0x695A,0x72D9,/* 0x58-0x5F */
+	0x758F,0x758E,0x790E,0x7956,0x79DF,0x7C97,0x7D20,0x7D44,/* 0x60-0x67 */
+	0x8607,0x8A34,0x963B,0x9061,0x9F20,0x50E7,0x5275,0x53CC,/* 0x68-0x6F */
+	0x53E2,0x5009,0x55AA,0x58EE,0x594F,0x723D,0x5B8B,0x5C64,/* 0x70-0x77 */
+	0x531D,0x60E3,0x60F3,0x635C,0x6383,0x633F,0x63BB,0x0000,/* 0x78-0x7F */
+
+	0x64CD,0x65E9,0x66F9,0x5DE3,0x69CD,0x69FD,0x6F15,0x71E5,/* 0x80-0x87 */
+	0x4E89,0x75E9,0x76F8,0x7A93,0x7CDF,0x7DCF,0x7D9C,0x8061,/* 0x88-0x8F */
+	0x8349,0x8358,0x846C,0x84BC,0x85FB,0x88C5,0x8D70,0x9001,/* 0x90-0x97 */
+	0x906D,0x9397,0x971C,0x9A12,0x50CF,0x5897,0x618E,0x81D3,/* 0x98-0x9F */
+	0x8535,0x8D08,0x9020,0x4FC3,0x5074,0x5247,0x5373,0x606F,/* 0xA0-0xA7 */
+	0x6349,0x675F,0x6E2C,0x8DB3,0x901F,0x4FD7,0x5C5E,0x8CCA,/* 0xA8-0xAF */
+	0x65CF,0x7D9A,0x5352,0x8896,0x5176,0x63C3,0x5B58,0x5B6B,/* 0xB0-0xB7 */
+	0x5C0A,0x640D,0x6751,0x905C,0x4ED6,0x591A,0x592A,0x6C70,/* 0xB8-0xBF */
+	0x8A51,0x553E,0x5815,0x59A5,0x60F0,0x6253,0x67C1,0x8235,/* 0xC0-0xC7 */
+	0x6955,0x9640,0x99C4,0x9A28,0x4F53,0x5806,0x5BFE,0x8010,/* 0xC8-0xCF */
+	0x5CB1,0x5E2F,0x5F85,0x6020,0x614B,0x6234,0x66FF,0x6CF0,/* 0xD0-0xD7 */
+	0x6EDE,0x80CE,0x817F,0x82D4,0x888B,0x8CB8,0x9000,0x902E,/* 0xD8-0xDF */
+	0x968A,0x9EDB,0x9BDB,0x4EE3,0x53F0,0x5927,0x7B2C,0x918D,/* 0xE0-0xE7 */
+	0x984C,0x9DF9,0x6EDD,0x7027,0x5353,0x5544,0x5B85,0x6258,/* 0xE8-0xEF */
+	0x629E,0x62D3,0x6CA2,0x6FEF,0x7422,0x8A17,0x9438,0x6FC1,/* 0xF0-0xF7 */
+	0x8AFE,0x8338,0x51E7,0x86F8,0x53EA,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_92[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x53E9,0x4F46,0x9054,0x8FB0,0x596A,0x8131,0x5DFD,0x7AEA,/* 0x40-0x47 */
+	0x8FBF,0x68DA,0x8C37,0x72F8,0x9C48,0x6A3D,0x8AB0,0x4E39,/* 0x48-0x4F */
+	0x5358,0x5606,0x5766,0x62C5,0x63A2,0x65E6,0x6B4E,0x6DE1,/* 0x50-0x57 */
+	0x6E5B,0x70AD,0x77ED,0x7AEF,0x7BAA,0x7DBB,0x803D,0x80C6,/* 0x58-0x5F */
+	0x86CB,0x8A95,0x935B,0x56E3,0x58C7,0x5F3E,0x65AD,0x6696,/* 0x60-0x67 */
+	0x6A80,0x6BB5,0x7537,0x8AC7,0x5024,0x77E5,0x5730,0x5F1B,/* 0x68-0x6F */
+	0x6065,0x667A,0x6C60,0x75F4,0x7A1A,0x7F6E,0x81F4,0x8718,/* 0x70-0x77 */
+	0x9045,0x99B3,0x7BC9,0x755C,0x7AF9,0x7B51,0x84C4,0x0000,/* 0x78-0x7F */
+
+	0x9010,0x79E9,0x7A92,0x8336,0x5AE1,0x7740,0x4E2D,0x4EF2,/* 0x80-0x87 */
+	0x5B99,0x5FE0,0x62BD,0x663C,0x67F1,0x6CE8,0x866B,0x8877,/* 0x88-0x8F */
+	0x8A3B,0x914E,0x92F3,0x99D0,0x6A17,0x7026,0x732A,0x82E7,/* 0x90-0x97 */
+	0x8457,0x8CAF,0x4E01,0x5146,0x51CB,0x558B,0x5BF5,0x5E16,/* 0x98-0x9F */
+	0x5E33,0x5E81,0x5F14,0x5F35,0x5F6B,0x5FB4,0x61F2,0x6311,/* 0xA0-0xA7 */
+	0x66A2,0x671D,0x6F6E,0x7252,0x753A,0x773A,0x8074,0x8139,/* 0xA8-0xAF */
+	0x8178,0x8776,0x8ABF,0x8ADC,0x8D85,0x8DF3,0x929A,0x9577,/* 0xB0-0xB7 */
+	0x9802,0x9CE5,0x52C5,0x6357,0x76F4,0x6715,0x6C88,0x73CD,/* 0xB8-0xBF */
+	0x8CC3,0x93AE,0x9673,0x6D25,0x589C,0x690E,0x69CC,0x8FFD,/* 0xC0-0xC7 */
+	0x939A,0x75DB,0x901A,0x585A,0x6802,0x63B4,0x69FB,0x4F43,/* 0xC8-0xCF */
+	0x6F2C,0x67D8,0x8FBB,0x8526,0x7DB4,0x9354,0x693F,0x6F70,/* 0xD0-0xD7 */
+	0x576A,0x58F7,0x5B2C,0x7D2C,0x722A,0x540A,0x91E3,0x9DB4,/* 0xD8-0xDF */
+	0x4EAD,0x4F4E,0x505C,0x5075,0x5243,0x8C9E,0x5448,0x5824,/* 0xE0-0xE7 */
+	0x5B9A,0x5E1D,0x5E95,0x5EAD,0x5EF7,0x5F1F,0x608C,0x62B5,/* 0xE8-0xEF */
+	0x633A,0x63D0,0x68AF,0x6C40,0x7887,0x798E,0x7A0B,0x7DE0,/* 0xF0-0xF7 */
+	0x8247,0x8A02,0x8AE6,0x8E44,0x9013,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_93[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x90B8,0x912D,0x91D8,0x9F0E,0x6CE5,0x6458,0x64E2,0x6575,/* 0x40-0x47 */
+	0x6EF4,0x7684,0x7B1B,0x9069,0x93D1,0x6EBA,0x54F2,0x5FB9,/* 0x48-0x4F */
+	0x64A4,0x8F4D,0x8FED,0x9244,0x5178,0x586B,0x5929,0x5C55,/* 0x50-0x57 */
+	0x5E97,0x6DFB,0x7E8F,0x751C,0x8CBC,0x8EE2,0x985B,0x70B9,/* 0x58-0x5F */
+	0x4F1D,0x6BBF,0x6FB1,0x7530,0x96FB,0x514E,0x5410,0x5835,/* 0x60-0x67 */
+	0x5857,0x59AC,0x5C60,0x5F92,0x6597,0x675C,0x6E21,0x767B,/* 0x68-0x6F */
+	0x83DF,0x8CED,0x9014,0x90FD,0x934D,0x7825,0x783A,0x52AA,/* 0x70-0x77 */
+	0x5EA6,0x571F,0x5974,0x6012,0x5012,0x515A,0x51AC,0x0000,/* 0x78-0x7F */
+
+	0x51CD,0x5200,0x5510,0x5854,0x5858,0x5957,0x5B95,0x5CF6,/* 0x80-0x87 */
+	0x5D8B,0x60BC,0x6295,0x642D,0x6771,0x6843,0x68BC,0x68DF,/* 0x88-0x8F */
+	0x76D7,0x6DD8,0x6E6F,0x6D9B,0x706F,0x71C8,0x5F53,0x75D8,/* 0x90-0x97 */
+	0x7977,0x7B49,0x7B54,0x7B52,0x7CD6,0x7D71,0x5230,0x8463,/* 0x98-0x9F */
+	0x8569,0x85E4,0x8A0E,0x8B04,0x8C46,0x8E0F,0x9003,0x900F,/* 0xA0-0xA7 */
+	0x9419,0x9676,0x982D,0x9A30,0x95D8,0x50CD,0x52D5,0x540C,/* 0xA8-0xAF */
+	0x5802,0x5C0E,0x61A7,0x649E,0x6D1E,0x77B3,0x7AE5,0x80F4,/* 0xB0-0xB7 */
+	0x8404,0x9053,0x9285,0x5CE0,0x9D07,0x533F,0x5F97,0x5FB3,/* 0xB8-0xBF */
+	0x6D9C,0x7279,0x7763,0x79BF,0x7BE4,0x6BD2,0x72EC,0x8AAD,/* 0xC0-0xC7 */
+	0x6803,0x6A61,0x51F8,0x7A81,0x6934,0x5C4A,0x9CF6,0x82EB,/* 0xC8-0xCF */
+	0x5BC5,0x9149,0x701E,0x5678,0x5C6F,0x60C7,0x6566,0x6C8C,/* 0xD0-0xD7 */
+	0x8C5A,0x9041,0x9813,0x5451,0x66C7,0x920D,0x5948,0x90A3,/* 0xD8-0xDF */
+	0x5185,0x4E4D,0x51EA,0x8599,0x8B0E,0x7058,0x637A,0x934B,/* 0xE0-0xE7 */
+	0x6962,0x99B4,0x7E04,0x7577,0x5357,0x6960,0x8EDF,0x96E3,/* 0xE8-0xEF */
+	0x6C5D,0x4E8C,0x5C3C,0x5F10,0x8FE9,0x5302,0x8CD1,0x8089,/* 0xF0-0xF7 */
+	0x8679,0x5EFF,0x65E5,0x4E73,0x5165,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_94[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5982,0x5C3F,0x97EE,0x4EFB,0x598A,0x5FCD,0x8A8D,0x6FE1,/* 0x40-0x47 */
+	0x79B0,0x7962,0x5BE7,0x8471,0x732B,0x71B1,0x5E74,0x5FF5,/* 0x48-0x4F */
+	0x637B,0x649A,0x71C3,0x7C98,0x4E43,0x5EFC,0x4E4B,0x57DC,/* 0x50-0x57 */
+	0x56A2,0x60A9,0x6FC3,0x7D0D,0x80FD,0x8133,0x81BF,0x8FB2,/* 0x58-0x5F */
+	0x8997,0x86A4,0x5DF4,0x628A,0x64AD,0x8987,0x6777,0x6CE2,/* 0x60-0x67 */
+	0x6D3E,0x7436,0x7834,0x5A46,0x7F75,0x82AD,0x99AC,0x4FF3,/* 0x68-0x6F */
+	0x5EC3,0x62DD,0x6392,0x6557,0x676F,0x76C3,0x724C,0x80CC,/* 0x70-0x77 */
+	0x80BA,0x8F29,0x914D,0x500D,0x57F9,0x5A92,0x6885,0x0000,/* 0x78-0x7F */
+
+	0x6973,0x7164,0x72FD,0x8CB7,0x58F2,0x8CE0,0x966A,0x9019,/* 0x80-0x87 */
+	0x877F,0x79E4,0x77E7,0x8429,0x4F2F,0x5265,0x535A,0x62CD,/* 0x88-0x8F */
+	0x67CF,0x6CCA,0x767D,0x7B94,0x7C95,0x8236,0x8584,0x8FEB,/* 0x90-0x97 */
+	0x66DD,0x6F20,0x7206,0x7E1B,0x83AB,0x99C1,0x9EA6,0x51FD,/* 0x98-0x9F */
+	0x7BB1,0x7872,0x7BB8,0x8087,0x7B48,0x6AE8,0x5E61,0x808C,/* 0xA0-0xA7 */
+	0x7551,0x7560,0x516B,0x9262,0x6E8C,0x767A,0x9197,0x9AEA,/* 0xA8-0xAF */
+	0x4F10,0x7F70,0x629C,0x7B4F,0x95A5,0x9CE9,0x567A,0x5859,/* 0xB0-0xB7 */
+	0x86E4,0x96BC,0x4F34,0x5224,0x534A,0x53CD,0x53DB,0x5E06,/* 0xB8-0xBF */
+	0x642C,0x6591,0x677F,0x6C3E,0x6C4E,0x7248,0x72AF,0x73ED,/* 0xC0-0xC7 */
+	0x7554,0x7E41,0x822C,0x85E9,0x8CA9,0x7BC4,0x91C6,0x7169,/* 0xC8-0xCF */
+	0x9812,0x98EF,0x633D,0x6669,0x756A,0x76E4,0x78D0,0x8543,/* 0xD0-0xD7 */
+	0x86EE,0x532A,0x5351,0x5426,0x5983,0x5E87,0x5F7C,0x60B2,/* 0xD8-0xDF */
+	0x6249,0x6279,0x62AB,0x6590,0x6BD4,0x6CCC,0x75B2,0x76AE,/* 0xE0-0xE7 */
+	0x7891,0x79D8,0x7DCB,0x7F77,0x80A5,0x88AB,0x8AB9,0x8CBB,/* 0xE8-0xEF */
+	0x907F,0x975E,0x98DB,0x6A0B,0x7C38,0x5099,0x5C3E,0x5FAE,/* 0xF0-0xF7 */
+	0x6787,0x6BD8,0x7435,0x7709,0x7F8E,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_95[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9F3B,0x67CA,0x7A17,0x5339,0x758B,0x9AED,0x5F66,0x819D,/* 0x40-0x47 */
+	0x83F1,0x8098,0x5F3C,0x5FC5,0x7562,0x7B46,0x903C,0x6867,/* 0x48-0x4F */
+	0x59EB,0x5A9B,0x7D10,0x767E,0x8B2C,0x4FF5,0x5F6A,0x6A19,/* 0x50-0x57 */
+	0x6C37,0x6F02,0x74E2,0x7968,0x8868,0x8A55,0x8C79,0x5EDF,/* 0x58-0x5F */
+	0x63CF,0x75C5,0x79D2,0x82D7,0x9328,0x92F2,0x849C,0x86ED,/* 0x60-0x67 */
+	0x9C2D,0x54C1,0x5F6C,0x658C,0x6D5C,0x7015,0x8CA7,0x8CD3,/* 0x68-0x6F */
+	0x983B,0x654F,0x74F6,0x4E0D,0x4ED8,0x57E0,0x592B,0x5A66,/* 0x70-0x77 */
+	0x5BCC,0x51A8,0x5E03,0x5E9C,0x6016,0x6276,0x6577,0x0000,/* 0x78-0x7F */
+
+	0x65A7,0x666E,0x6D6E,0x7236,0x7B26,0x8150,0x819A,0x8299,/* 0x80-0x87 */
+	0x8B5C,0x8CA0,0x8CE6,0x8D74,0x961C,0x9644,0x4FAE,0x64AB,/* 0x88-0x8F */
+	0x6B66,0x821E,0x8461,0x856A,0x90E8,0x5C01,0x6953,0x98A8,/* 0x90-0x97 */
+	0x847A,0x8557,0x4F0F,0x526F,0x5FA9,0x5E45,0x670D,0x798F,/* 0x98-0x9F */
+	0x8179,0x8907,0x8986,0x6DF5,0x5F17,0x6255,0x6CB8,0x4ECF,/* 0xA0-0xA7 */
+	0x7269,0x9B92,0x5206,0x543B,0x5674,0x58B3,0x61A4,0x626E,/* 0xA8-0xAF */
+	0x711A,0x596E,0x7C89,0x7CDE,0x7D1B,0x96F0,0x6587,0x805E,/* 0xB0-0xB7 */
+	0x4E19,0x4F75,0x5175,0x5840,0x5E63,0x5E73,0x5F0A,0x67C4,/* 0xB8-0xBF */
+	0x4E26,0x853D,0x9589,0x965B,0x7C73,0x9801,0x50FB,0x58C1,/* 0xC0-0xC7 */
+	0x7656,0x78A7,0x5225,0x77A5,0x8511,0x7B86,0x504F,0x5909,/* 0xC8-0xCF */
+	0x7247,0x7BC7,0x7DE8,0x8FBA,0x8FD4,0x904D,0x4FBF,0x52C9,/* 0xD0-0xD7 */
+	0x5A29,0x5F01,0x97AD,0x4FDD,0x8217,0x92EA,0x5703,0x6355,/* 0xD8-0xDF */
+	0x6B69,0x752B,0x88DC,0x8F14,0x7A42,0x52DF,0x5893,0x6155,/* 0xE0-0xE7 */
+	0x620A,0x66AE,0x6BCD,0x7C3F,0x83E9,0x5023,0x4FF8,0x5305,/* 0xE8-0xEF */
+	0x5446,0x5831,0x5949,0x5B9D,0x5CF0,0x5CEF,0x5D29,0x5E96,/* 0xF0-0xF7 */
+	0x62B1,0x6367,0x653E,0x65B9,0x670B,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_96[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6CD5,0x6CE1,0x70F9,0x7832,0x7E2B,0x80DE,0x82B3,0x840C,/* 0x40-0x47 */
+	0x84EC,0x8702,0x8912,0x8A2A,0x8C4A,0x90A6,0x92D2,0x98FD,/* 0x48-0x4F */
+	0x9CF3,0x9D6C,0x4E4F,0x4EA1,0x508D,0x5256,0x574A,0x59A8,/* 0x50-0x57 */
+	0x5E3D,0x5FD8,0x5FD9,0x623F,0x66B4,0x671B,0x67D0,0x68D2,/* 0x58-0x5F */
+	0x5192,0x7D21,0x80AA,0x81A8,0x8B00,0x8C8C,0x8CBF,0x927E,/* 0x60-0x67 */
+	0x9632,0x5420,0x982C,0x5317,0x50D5,0x535C,0x58A8,0x64B2,/* 0x68-0x6F */
+	0x6734,0x7267,0x7766,0x7A46,0x91E6,0x52C3,0x6CA1,0x6B86,/* 0x70-0x77 */
+	0x5800,0x5E4C,0x5954,0x672C,0x7FFB,0x51E1,0x76C6,0x0000,/* 0x78-0x7F */
+
+	0x6469,0x78E8,0x9B54,0x9EBB,0x57CB,0x59B9,0x6627,0x679A,/* 0x80-0x87 */
+	0x6BCE,0x54E9,0x69D9,0x5E55,0x819C,0x6795,0x9BAA,0x67FE,/* 0x88-0x8F */
+	0x9C52,0x685D,0x4EA6,0x4FE3,0x53C8,0x62B9,0x672B,0x6CAB,/* 0x90-0x97 */
+	0x8FC4,0x4FAD,0x7E6D,0x9EBF,0x4E07,0x6162,0x6E80,0x6F2B,/* 0x98-0x9F */
+	0x8513,0x5473,0x672A,0x9B45,0x5DF3,0x7B95,0x5CAC,0x5BC6,/* 0xA0-0xA7 */
+	0x871C,0x6E4A,0x84D1,0x7A14,0x8108,0x5999,0x7C8D,0x6C11,/* 0xA8-0xAF */
+	0x7720,0x52D9,0x5922,0x7121,0x725F,0x77DB,0x9727,0x9D61,/* 0xB0-0xB7 */
+	0x690B,0x5A7F,0x5A18,0x51A5,0x540D,0x547D,0x660E,0x76DF,/* 0xB8-0xBF */
+	0x8FF7,0x9298,0x9CF4,0x59EA,0x725D,0x6EC5,0x514D,0x68C9,/* 0xC0-0xC7 */
+	0x7DBF,0x7DEC,0x9762,0x9EBA,0x6478,0x6A21,0x8302,0x5984,/* 0xC8-0xCF */
+	0x5B5F,0x6BDB,0x731B,0x76F2,0x7DB2,0x8017,0x8499,0x5132,/* 0xD0-0xD7 */
+	0x6728,0x9ED9,0x76EE,0x6762,0x52FF,0x9905,0x5C24,0x623B,/* 0xD8-0xDF */
+	0x7C7E,0x8CB0,0x554F,0x60B6,0x7D0B,0x9580,0x5301,0x4E5F,/* 0xE0-0xE7 */
+	0x51B6,0x591C,0x723A,0x8036,0x91CE,0x5F25,0x77E2,0x5384,/* 0xE8-0xEF */
+	0x5F79,0x7D04,0x85AC,0x8A33,0x8E8D,0x9756,0x67F3,0x85AE,/* 0xF0-0xF7 */
+	0x9453,0x6109,0x6108,0x6CB9,0x7652,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_97[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8AED,0x8F38,0x552F,0x4F51,0x512A,0x52C7,0x53CB,0x5BA5,/* 0x40-0x47 */
+	0x5E7D,0x60A0,0x6182,0x63D6,0x6709,0x67DA,0x6E67,0x6D8C,/* 0x48-0x4F */
+	0x7336,0x7337,0x7531,0x7950,0x88D5,0x8A98,0x904A,0x9091,/* 0x50-0x57 */
+	0x90F5,0x96C4,0x878D,0x5915,0x4E88,0x4F59,0x4E0E,0x8A89,/* 0x58-0x5F */
+	0x8F3F,0x9810,0x50AD,0x5E7C,0x5996,0x5BB9,0x5EB8,0x63DA,/* 0x60-0x67 */
+	0x63FA,0x64C1,0x66DC,0x694A,0x69D8,0x6D0B,0x6EB6,0x7194,/* 0x68-0x6F */
+	0x7528,0x7AAF,0x7F8A,0x8000,0x8449,0x84C9,0x8981,0x8B21,/* 0x70-0x77 */
+	0x8E0A,0x9065,0x967D,0x990A,0x617E,0x6291,0x6B32,0x0000,/* 0x78-0x7F */
+
+	0x6C83,0x6D74,0x7FCC,0x7FFC,0x6DC0,0x7F85,0x87BA,0x88F8,/* 0x80-0x87 */
+	0x6765,0x83B1,0x983C,0x96F7,0x6D1B,0x7D61,0x843D,0x916A,/* 0x88-0x8F */
+	0x4E71,0x5375,0x5D50,0x6B04,0x6FEB,0x85CD,0x862D,0x89A7,/* 0x90-0x97 */
+	0x5229,0x540F,0x5C65,0x674E,0x68A8,0x7406,0x7483,0x75E2,/* 0x98-0x9F */
+	0x88CF,0x88E1,0x91CC,0x96E2,0x9678,0x5F8B,0x7387,0x7ACB,/* 0xA0-0xA7 */
+	0x844E,0x63A0,0x7565,0x5289,0x6D41,0x6E9C,0x7409,0x7559,/* 0xA8-0xAF */
+	0x786B,0x7C92,0x9686,0x7ADC,0x9F8D,0x4FB6,0x616E,0x65C5,/* 0xB0-0xB7 */
+	0x865C,0x4E86,0x4EAE,0x50DA,0x4E21,0x51CC,0x5BEE,0x6599,/* 0xB8-0xBF */
+	0x6881,0x6DBC,0x731F,0x7642,0x77AD,0x7A1C,0x7CE7,0x826F,/* 0xC0-0xC7 */
+	0x8AD2,0x907C,0x91CF,0x9675,0x9818,0x529B,0x7DD1,0x502B,/* 0xC8-0xCF */
+	0x5398,0x6797,0x6DCB,0x71D0,0x7433,0x81E8,0x8F2A,0x96A3,/* 0xD0-0xD7 */
+	0x9C57,0x9E9F,0x7460,0x5841,0x6D99,0x7D2F,0x985E,0x4EE4,/* 0xD8-0xDF */
+	0x4F36,0x4F8B,0x51B7,0x52B1,0x5DBA,0x601C,0x73B2,0x793C,/* 0xE0-0xE7 */
+	0x82D3,0x9234,0x96B7,0x96F6,0x970A,0x9E97,0x9F62,0x66A6,/* 0xE8-0xEF */
+	0x6B74,0x5217,0x52A3,0x70C8,0x88C2,0x5EC9,0x604B,0x6190,/* 0xF0-0xF7 */
+	0x6F23,0x7149,0x7C3E,0x7DF4,0x806F,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_98[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x84EE,0x9023,0x932C,0x5442,0x9B6F,0x6AD3,0x7089,0x8CC2,/* 0x40-0x47 */
+	0x8DEF,0x9732,0x52B4,0x5A41,0x5ECA,0x5F04,0x6717,0x697C,/* 0x48-0x4F */
+	0x6994,0x6D6A,0x6F0F,0x7262,0x72FC,0x7BED,0x8001,0x807E,/* 0x50-0x57 */
+	0x874B,0x90CE,0x516D,0x9E93,0x7984,0x808B,0x9332,0x8AD6,/* 0x58-0x5F */
+	0x502D,0x548C,0x8A71,0x6B6A,0x8CC4,0x8107,0x60D1,0x67A0,/* 0x60-0x67 */
+	0x9DF2,0x4E99,0x4E98,0x9C10,0x8A6B,0x85C1,0x8568,0x6900,/* 0x68-0x6F */
+	0x6E7E,0x7897,0x8155,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x5F0C,/* 0x98-0x9F */
+	0x4E10,0x4E15,0x4E2A,0x4E31,0x4E36,0x4E3C,0x4E3F,0x4E42,/* 0xA0-0xA7 */
+	0x4E56,0x4E58,0x4E82,0x4E85,0x8C6B,0x4E8A,0x8212,0x5F0D,/* 0xA8-0xAF */
+	0x4E8E,0x4E9E,0x4E9F,0x4EA0,0x4EA2,0x4EB0,0x4EB3,0x4EB6,/* 0xB0-0xB7 */
+	0x4ECE,0x4ECD,0x4EC4,0x4EC6,0x4EC2,0x4ED7,0x4EDE,0x4EED,/* 0xB8-0xBF */
+	0x4EDF,0x4EF7,0x4F09,0x4F5A,0x4F30,0x4F5B,0x4F5D,0x4F57,/* 0xC0-0xC7 */
+	0x4F47,0x4F76,0x4F88,0x4F8F,0x4F98,0x4F7B,0x4F69,0x4F70,/* 0xC8-0xCF */
+	0x4F91,0x4F6F,0x4F86,0x4F96,0x5118,0x4FD4,0x4FDF,0x4FCE,/* 0xD0-0xD7 */
+	0x4FD8,0x4FDB,0x4FD1,0x4FDA,0x4FD0,0x4FE4,0x4FE5,0x501A,/* 0xD8-0xDF */
+	0x5028,0x5014,0x502A,0x5025,0x5005,0x4F1C,0x4FF6,0x5021,/* 0xE0-0xE7 */
+	0x5029,0x502C,0x4FFE,0x4FEF,0x5011,0x5006,0x5043,0x5047,/* 0xE8-0xEF */
+	0x6703,0x5055,0x5050,0x5048,0x505A,0x5056,0x506C,0x5078,/* 0xF0-0xF7 */
+	0x5080,0x509A,0x5085,0x50B4,0x50B2,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_99[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x50C9,0x50CA,0x50B3,0x50C2,0x50D6,0x50DE,0x50E5,0x50ED,/* 0x40-0x47 */
+	0x50E3,0x50EE,0x50F9,0x50F5,0x5109,0x5101,0x5102,0x5116,/* 0x48-0x4F */
+	0x5115,0x5114,0x511A,0x5121,0x513A,0x5137,0x513C,0x513B,/* 0x50-0x57 */
+	0x513F,0x5140,0x5152,0x514C,0x5154,0x5162,0x7AF8,0x5169,/* 0x58-0x5F */
+	0x516A,0x516E,0x5180,0x5182,0x56D8,0x518C,0x5189,0x518F,/* 0x60-0x67 */
+	0x5191,0x5193,0x5195,0x5196,0x51A4,0x51A6,0x51A2,0x51A9,/* 0x68-0x6F */
+	0x51AA,0x51AB,0x51B3,0x51B1,0x51B2,0x51B0,0x51B5,0x51BD,/* 0x70-0x77 */
+	0x51C5,0x51C9,0x51DB,0x51E0,0x8655,0x51E9,0x51ED,0x0000,/* 0x78-0x7F */
+
+	0x51F0,0x51F5,0x51FE,0x5204,0x520B,0x5214,0x520E,0x5227,/* 0x80-0x87 */
+	0x522A,0x522E,0x5233,0x5239,0x524F,0x5244,0x524B,0x524C,/* 0x88-0x8F */
+	0x525E,0x5254,0x526A,0x5274,0x5269,0x5273,0x527F,0x527D,/* 0x90-0x97 */
+	0x528D,0x5294,0x5292,0x5271,0x5288,0x5291,0x8FA8,0x8FA7,/* 0x98-0x9F */
+	0x52AC,0x52AD,0x52BC,0x52B5,0x52C1,0x52CD,0x52D7,0x52DE,/* 0xA0-0xA7 */
+	0x52E3,0x52E6,0x98ED,0x52E0,0x52F3,0x52F5,0x52F8,0x52F9,/* 0xA8-0xAF */
+	0x5306,0x5308,0x7538,0x530D,0x5310,0x530F,0x5315,0x531A,/* 0xB0-0xB7 */
+	0x5323,0x532F,0x5331,0x5333,0x5338,0x5340,0x5346,0x5345,/* 0xB8-0xBF */
+	0x4E17,0x5349,0x534D,0x51D6,0x535E,0x5369,0x536E,0x5918,/* 0xC0-0xC7 */
+	0x537B,0x5377,0x5382,0x5396,0x53A0,0x53A6,0x53A5,0x53AE,/* 0xC8-0xCF */
+	0x53B0,0x53B6,0x53C3,0x7C12,0x96D9,0x53DF,0x66FC,0x71EE,/* 0xD0-0xD7 */
+	0x53EE,0x53E8,0x53ED,0x53FA,0x5401,0x543D,0x5440,0x542C,/* 0xD8-0xDF */
+	0x542D,0x543C,0x542E,0x5436,0x5429,0x541D,0x544E,0x548F,/* 0xE0-0xE7 */
+	0x5475,0x548E,0x545F,0x5471,0x5477,0x5470,0x5492,0x547B,/* 0xE8-0xEF */
+	0x5480,0x5476,0x5484,0x5490,0x5486,0x54C7,0x54A2,0x54B8,/* 0xF0-0xF7 */
+	0x54A5,0x54AC,0x54C4,0x54C8,0x54A8,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x54AB,0x54C2,0x54A4,0x54BE,0x54BC,0x54D8,0x54E5,0x54E6,/* 0x40-0x47 */
+	0x550F,0x5514,0x54FD,0x54EE,0x54ED,0x54FA,0x54E2,0x5539,/* 0x48-0x4F */
+	0x5540,0x5563,0x554C,0x552E,0x555C,0x5545,0x5556,0x5557,/* 0x50-0x57 */
+	0x5538,0x5533,0x555D,0x5599,0x5580,0x54AF,0x558A,0x559F,/* 0x58-0x5F */
+	0x557B,0x557E,0x5598,0x559E,0x55AE,0x557C,0x5583,0x55A9,/* 0x60-0x67 */
+	0x5587,0x55A8,0x55DA,0x55C5,0x55DF,0x55C4,0x55DC,0x55E4,/* 0x68-0x6F */
+	0x55D4,0x5614,0x55F7,0x5616,0x55FE,0x55FD,0x561B,0x55F9,/* 0x70-0x77 */
+	0x564E,0x5650,0x71DF,0x5634,0x5636,0x5632,0x5638,0x0000,/* 0x78-0x7F */
+
+	0x566B,0x5664,0x562F,0x566C,0x566A,0x5686,0x5680,0x568A,/* 0x80-0x87 */
+	0x56A0,0x5694,0x568F,0x56A5,0x56AE,0x56B6,0x56B4,0x56C2,/* 0x88-0x8F */
+	0x56BC,0x56C1,0x56C3,0x56C0,0x56C8,0x56CE,0x56D1,0x56D3,/* 0x90-0x97 */
+	0x56D7,0x56EE,0x56F9,0x5700,0x56FF,0x5704,0x5709,0x5708,/* 0x98-0x9F */
+	0x570B,0x570D,0x5713,0x5718,0x5716,0x55C7,0x571C,0x5726,/* 0xA0-0xA7 */
+	0x5737,0x5738,0x574E,0x573B,0x5740,0x574F,0x5769,0x57C0,/* 0xA8-0xAF */
+	0x5788,0x5761,0x577F,0x5789,0x5793,0x57A0,0x57B3,0x57A4,/* 0xB0-0xB7 */
+	0x57AA,0x57B0,0x57C3,0x57C6,0x57D4,0x57D2,0x57D3,0x580A,/* 0xB8-0xBF */
+	0x57D6,0x57E3,0x580B,0x5819,0x581D,0x5872,0x5821,0x5862,/* 0xC0-0xC7 */
+	0x584B,0x5870,0x6BC0,0x5852,0x583D,0x5879,0x5885,0x58B9,/* 0xC8-0xCF */
+	0x589F,0x58AB,0x58BA,0x58DE,0x58BB,0x58B8,0x58AE,0x58C5,/* 0xD0-0xD7 */
+	0x58D3,0x58D1,0x58D7,0x58D9,0x58D8,0x58E5,0x58DC,0x58E4,/* 0xD8-0xDF */
+	0x58DF,0x58EF,0x58FA,0x58F9,0x58FB,0x58FC,0x58FD,0x5902,/* 0xE0-0xE7 */
+	0x590A,0x5910,0x591B,0x68A6,0x5925,0x592C,0x592D,0x5932,/* 0xE8-0xEF */
+	0x5938,0x593E,0x7AD2,0x5955,0x5950,0x594E,0x595A,0x5958,/* 0xF0-0xF7 */
+	0x5962,0x5960,0x5967,0x596C,0x5969,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5978,0x5981,0x599D,0x4F5E,0x4FAB,0x59A3,0x59B2,0x59C6,/* 0x40-0x47 */
+	0x59E8,0x59DC,0x598D,0x59D9,0x59DA,0x5A25,0x5A1F,0x5A11,/* 0x48-0x4F */
+	0x5A1C,0x5A09,0x5A1A,0x5A40,0x5A6C,0x5A49,0x5A35,0x5A36,/* 0x50-0x57 */
+	0x5A62,0x5A6A,0x5A9A,0x5ABC,0x5ABE,0x5ACB,0x5AC2,0x5ABD,/* 0x58-0x5F */
+	0x5AE3,0x5AD7,0x5AE6,0x5AE9,0x5AD6,0x5AFA,0x5AFB,0x5B0C,/* 0x60-0x67 */
+	0x5B0B,0x5B16,0x5B32,0x5AD0,0x5B2A,0x5B36,0x5B3E,0x5B43,/* 0x68-0x6F */
+	0x5B45,0x5B40,0x5B51,0x5B55,0x5B5A,0x5B5B,0x5B65,0x5B69,/* 0x70-0x77 */
+	0x5B70,0x5B73,0x5B75,0x5B78,0x6588,0x5B7A,0x5B80,0x0000,/* 0x78-0x7F */
+
+	0x5B83,0x5BA6,0x5BB8,0x5BC3,0x5BC7,0x5BC9,0x5BD4,0x5BD0,/* 0x80-0x87 */
+	0x5BE4,0x5BE6,0x5BE2,0x5BDE,0x5BE5,0x5BEB,0x5BF0,0x5BF6,/* 0x88-0x8F */
+	0x5BF3,0x5C05,0x5C07,0x5C08,0x5C0D,0x5C13,0x5C20,0x5C22,/* 0x90-0x97 */
+	0x5C28,0x5C38,0x5C39,0x5C41,0x5C46,0x5C4E,0x5C53,0x5C50,/* 0x98-0x9F */
+	0x5C4F,0x5B71,0x5C6C,0x5C6E,0x4E62,0x5C76,0x5C79,0x5C8C,/* 0xA0-0xA7 */
+	0x5C91,0x5C94,0x599B,0x5CAB,0x5CBB,0x5CB6,0x5CBC,0x5CB7,/* 0xA8-0xAF */
+	0x5CC5,0x5CBE,0x5CC7,0x5CD9,0x5CE9,0x5CFD,0x5CFA,0x5CED,/* 0xB0-0xB7 */
+	0x5D8C,0x5CEA,0x5D0B,0x5D15,0x5D17,0x5D5C,0x5D1F,0x5D1B,/* 0xB8-0xBF */
+	0x5D11,0x5D14,0x5D22,0x5D1A,0x5D19,0x5D18,0x5D4C,0x5D52,/* 0xC0-0xC7 */
+	0x5D4E,0x5D4B,0x5D6C,0x5D73,0x5D76,0x5D87,0x5D84,0x5D82,/* 0xC8-0xCF */
+	0x5DA2,0x5D9D,0x5DAC,0x5DAE,0x5DBD,0x5D90,0x5DB7,0x5DBC,/* 0xD0-0xD7 */
+	0x5DC9,0x5DCD,0x5DD3,0x5DD2,0x5DD6,0x5DDB,0x5DEB,0x5DF2,/* 0xD8-0xDF */
+	0x5DF5,0x5E0B,0x5E1A,0x5E19,0x5E11,0x5E1B,0x5E36,0x5E37,/* 0xE0-0xE7 */
+	0x5E44,0x5E43,0x5E40,0x5E4E,0x5E57,0x5E54,0x5E5F,0x5E62,/* 0xE8-0xEF */
+	0x5E64,0x5E47,0x5E75,0x5E76,0x5E7A,0x9EBC,0x5E7F,0x5EA0,/* 0xF0-0xF7 */
+	0x5EC1,0x5EC2,0x5EC8,0x5ED0,0x5ECF,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5ED6,0x5EE3,0x5EDD,0x5EDA,0x5EDB,0x5EE2,0x5EE1,0x5EE8,/* 0x40-0x47 */
+	0x5EE9,0x5EEC,0x5EF1,0x5EF3,0x5EF0,0x5EF4,0x5EF8,0x5EFE,/* 0x48-0x4F */
+	0x5F03,0x5F09,0x5F5D,0x5F5C,0x5F0B,0x5F11,0x5F16,0x5F29,/* 0x50-0x57 */
+	0x5F2D,0x5F38,0x5F41,0x5F48,0x5F4C,0x5F4E,0x5F2F,0x5F51,/* 0x58-0x5F */
+	0x5F56,0x5F57,0x5F59,0x5F61,0x5F6D,0x5F73,0x5F77,0x5F83,/* 0x60-0x67 */
+	0x5F82,0x5F7F,0x5F8A,0x5F88,0x5F91,0x5F87,0x5F9E,0x5F99,/* 0x68-0x6F */
+	0x5F98,0x5FA0,0x5FA8,0x5FAD,0x5FBC,0x5FD6,0x5FFB,0x5FE4,/* 0x70-0x77 */
+	0x5FF8,0x5FF1,0x5FDD,0x60B3,0x5FFF,0x6021,0x6060,0x0000,/* 0x78-0x7F */
+
+	0x6019,0x6010,0x6029,0x600E,0x6031,0x601B,0x6015,0x602B,/* 0x80-0x87 */
+	0x6026,0x600F,0x603A,0x605A,0x6041,0x606A,0x6077,0x605F,/* 0x88-0x8F */
+	0x604A,0x6046,0x604D,0x6063,0x6043,0x6064,0x6042,0x606C,/* 0x90-0x97 */
+	0x606B,0x6059,0x6081,0x608D,0x60E7,0x6083,0x609A,0x6084,/* 0x98-0x9F */
+	0x609B,0x6096,0x6097,0x6092,0x60A7,0x608B,0x60E1,0x60B8,/* 0xA0-0xA7 */
+	0x60E0,0x60D3,0x60B4,0x5FF0,0x60BD,0x60C6,0x60B5,0x60D8,/* 0xA8-0xAF */
+	0x614D,0x6115,0x6106,0x60F6,0x60F7,0x6100,0x60F4,0x60FA,/* 0xB0-0xB7 */
+	0x6103,0x6121,0x60FB,0x60F1,0x610D,0x610E,0x6147,0x613E,/* 0xB8-0xBF */
+	0x6128,0x6127,0x614A,0x613F,0x613C,0x612C,0x6134,0x613D,/* 0xC0-0xC7 */
+	0x6142,0x6144,0x6173,0x6177,0x6158,0x6159,0x615A,0x616B,/* 0xC8-0xCF */
+	0x6174,0x616F,0x6165,0x6171,0x615F,0x615D,0x6153,0x6175,/* 0xD0-0xD7 */
+	0x6199,0x6196,0x6187,0x61AC,0x6194,0x619A,0x618A,0x6191,/* 0xD8-0xDF */
+	0x61AB,0x61AE,0x61CC,0x61CA,0x61C9,0x61F7,0x61C8,0x61C3,/* 0xE0-0xE7 */
+	0x61C6,0x61BA,0x61CB,0x7F79,0x61CD,0x61E6,0x61E3,0x61F6,/* 0xE8-0xEF */
+	0x61FA,0x61F4,0x61FF,0x61FD,0x61FC,0x61FE,0x6200,0x6208,/* 0xF0-0xF7 */
+	0x6209,0x620D,0x620C,0x6214,0x621B,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x621E,0x6221,0x622A,0x622E,0x6230,0x6232,0x6233,0x6241,/* 0x40-0x47 */
+	0x624E,0x625E,0x6263,0x625B,0x6260,0x6268,0x627C,0x6282,/* 0x48-0x4F */
+	0x6289,0x627E,0x6292,0x6293,0x6296,0x62D4,0x6283,0x6294,/* 0x50-0x57 */
+	0x62D7,0x62D1,0x62BB,0x62CF,0x62FF,0x62C6,0x64D4,0x62C8,/* 0x58-0x5F */
+	0x62DC,0x62CC,0x62CA,0x62C2,0x62C7,0x629B,0x62C9,0x630C,/* 0x60-0x67 */
+	0x62EE,0x62F1,0x6327,0x6302,0x6308,0x62EF,0x62F5,0x6350,/* 0x68-0x6F */
+	0x633E,0x634D,0x641C,0x634F,0x6396,0x638E,0x6380,0x63AB,/* 0x70-0x77 */
+	0x6376,0x63A3,0x638F,0x6389,0x639F,0x63B5,0x636B,0x0000,/* 0x78-0x7F */
+
+	0x6369,0x63BE,0x63E9,0x63C0,0x63C6,0x63E3,0x63C9,0x63D2,/* 0x80-0x87 */
+	0x63F6,0x63C4,0x6416,0x6434,0x6406,0x6413,0x6426,0x6436,/* 0x88-0x8F */
+	0x651D,0x6417,0x6428,0x640F,0x6467,0x646F,0x6476,0x644E,/* 0x90-0x97 */
+	0x652A,0x6495,0x6493,0x64A5,0x64A9,0x6488,0x64BC,0x64DA,/* 0x98-0x9F */
+	0x64D2,0x64C5,0x64C7,0x64BB,0x64D8,0x64C2,0x64F1,0x64E7,/* 0xA0-0xA7 */
+	0x8209,0x64E0,0x64E1,0x62AC,0x64E3,0x64EF,0x652C,0x64F6,/* 0xA8-0xAF */
+	0x64F4,0x64F2,0x64FA,0x6500,0x64FD,0x6518,0x651C,0x6505,/* 0xB0-0xB7 */
+	0x6524,0x6523,0x652B,0x6534,0x6535,0x6537,0x6536,0x6538,/* 0xB8-0xBF */
+	0x754B,0x6548,0x6556,0x6555,0x654D,0x6558,0x655E,0x655D,/* 0xC0-0xC7 */
+	0x6572,0x6578,0x6582,0x6583,0x8B8A,0x659B,0x659F,0x65AB,/* 0xC8-0xCF */
+	0x65B7,0x65C3,0x65C6,0x65C1,0x65C4,0x65CC,0x65D2,0x65DB,/* 0xD0-0xD7 */
+	0x65D9,0x65E0,0x65E1,0x65F1,0x6772,0x660A,0x6603,0x65FB,/* 0xD8-0xDF */
+	0x6773,0x6635,0x6636,0x6634,0x661C,0x664F,0x6644,0x6649,/* 0xE0-0xE7 */
+	0x6641,0x665E,0x665D,0x6664,0x6667,0x6668,0x665F,0x6662,/* 0xE8-0xEF */
+	0x6670,0x6683,0x6688,0x668E,0x6689,0x6684,0x6698,0x669D,/* 0xF0-0xF7 */
+	0x66C1,0x66B9,0x66C9,0x66BE,0x66BC,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x66C4,0x66B8,0x66D6,0x66DA,0x66E0,0x663F,0x66E6,0x66E9,/* 0x40-0x47 */
+	0x66F0,0x66F5,0x66F7,0x670F,0x6716,0x671E,0x6726,0x6727,/* 0x48-0x4F */
+	0x9738,0x672E,0x673F,0x6736,0x6741,0x6738,0x6737,0x6746,/* 0x50-0x57 */
+	0x675E,0x6760,0x6759,0x6763,0x6764,0x6789,0x6770,0x67A9,/* 0x58-0x5F */
+	0x677C,0x676A,0x678C,0x678B,0x67A6,0x67A1,0x6785,0x67B7,/* 0x60-0x67 */
+	0x67EF,0x67B4,0x67EC,0x67B3,0x67E9,0x67B8,0x67E4,0x67DE,/* 0x68-0x6F */
+	0x67DD,0x67E2,0x67EE,0x67B9,0x67CE,0x67C6,0x67E7,0x6A9C,/* 0x70-0x77 */
+	0x681E,0x6846,0x6829,0x6840,0x684D,0x6832,0x684E,0x0000,/* 0x78-0x7F */
+
+	0x68B3,0x682B,0x6859,0x6863,0x6877,0x687F,0x689F,0x688F,/* 0x80-0x87 */
+	0x68AD,0x6894,0x689D,0x689B,0x6883,0x6AAE,0x68B9,0x6874,/* 0x88-0x8F */
+	0x68B5,0x68A0,0x68BA,0x690F,0x688D,0x687E,0x6901,0x68CA,/* 0x90-0x97 */
+	0x6908,0x68D8,0x6922,0x6926,0x68E1,0x690C,0x68CD,0x68D4,/* 0x98-0x9F */
+	0x68E7,0x68D5,0x6936,0x6912,0x6904,0x68D7,0x68E3,0x6925,/* 0xA0-0xA7 */
+	0x68F9,0x68E0,0x68EF,0x6928,0x692A,0x691A,0x6923,0x6921,/* 0xA8-0xAF */
+	0x68C6,0x6979,0x6977,0x695C,0x6978,0x696B,0x6954,0x697E,/* 0xB0-0xB7 */
+	0x696E,0x6939,0x6974,0x693D,0x6959,0x6930,0x6961,0x695E,/* 0xB8-0xBF */
+	0x695D,0x6981,0x696A,0x69B2,0x69AE,0x69D0,0x69BF,0x69C1,/* 0xC0-0xC7 */
+	0x69D3,0x69BE,0x69CE,0x5BE8,0x69CA,0x69DD,0x69BB,0x69C3,/* 0xC8-0xCF */
+	0x69A7,0x6A2E,0x6991,0x69A0,0x699C,0x6995,0x69B4,0x69DE,/* 0xD0-0xD7 */
+	0x69E8,0x6A02,0x6A1B,0x69FF,0x6B0A,0x69F9,0x69F2,0x69E7,/* 0xD8-0xDF */
+	0x6A05,0x69B1,0x6A1E,0x69ED,0x6A14,0x69EB,0x6A0A,0x6A12,/* 0xE0-0xE7 */
+	0x6AC1,0x6A23,0x6A13,0x6A44,0x6A0C,0x6A72,0x6A36,0x6A78,/* 0xE8-0xEF */
+	0x6A47,0x6A62,0x6A59,0x6A66,0x6A48,0x6A38,0x6A22,0x6A90,/* 0xF0-0xF7 */
+	0x6A8D,0x6AA0,0x6A84,0x6AA2,0x6AA3,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6A97,0x8617,0x6ABB,0x6AC3,0x6AC2,0x6AB8,0x6AB3,0x6AAC,/* 0x40-0x47 */
+	0x6ADE,0x6AD1,0x6ADF,0x6AAA,0x6ADA,0x6AEA,0x6AFB,0x6B05,/* 0x48-0x4F */
+	0x8616,0x6AFA,0x6B12,0x6B16,0x9B31,0x6B1F,0x6B38,0x6B37,/* 0x50-0x57 */
+	0x76DC,0x6B39,0x98EE,0x6B47,0x6B43,0x6B49,0x6B50,0x6B59,/* 0x58-0x5F */
+	0x6B54,0x6B5B,0x6B5F,0x6B61,0x6B78,0x6B79,0x6B7F,0x6B80,/* 0x60-0x67 */
+	0x6B84,0x6B83,0x6B8D,0x6B98,0x6B95,0x6B9E,0x6BA4,0x6BAA,/* 0x68-0x6F */
+	0x6BAB,0x6BAF,0x6BB2,0x6BB1,0x6BB3,0x6BB7,0x6BBC,0x6BC6,/* 0x70-0x77 */
+	0x6BCB,0x6BD3,0x6BDF,0x6BEC,0x6BEB,0x6BF3,0x6BEF,0x0000,/* 0x78-0x7F */
+
+	0x9EBE,0x6C08,0x6C13,0x6C14,0x6C1B,0x6C24,0x6C23,0x6C5E,/* 0x80-0x87 */
+	0x6C55,0x6C62,0x6C6A,0x6C82,0x6C8D,0x6C9A,0x6C81,0x6C9B,/* 0x88-0x8F */
+	0x6C7E,0x6C68,0x6C73,0x6C92,0x6C90,0x6CC4,0x6CF1,0x6CD3,/* 0x90-0x97 */
+	0x6CBD,0x6CD7,0x6CC5,0x6CDD,0x6CAE,0x6CB1,0x6CBE,0x6CBA,/* 0x98-0x9F */
+	0x6CDB,0x6CEF,0x6CD9,0x6CEA,0x6D1F,0x884D,0x6D36,0x6D2B,/* 0xA0-0xA7 */
+	0x6D3D,0x6D38,0x6D19,0x6D35,0x6D33,0x6D12,0x6D0C,0x6D63,/* 0xA8-0xAF */
+	0x6D93,0x6D64,0x6D5A,0x6D79,0x6D59,0x6D8E,0x6D95,0x6FE4,/* 0xB0-0xB7 */
+	0x6D85,0x6DF9,0x6E15,0x6E0A,0x6DB5,0x6DC7,0x6DE6,0x6DB8,/* 0xB8-0xBF */
+	0x6DC6,0x6DEC,0x6DDE,0x6DCC,0x6DE8,0x6DD2,0x6DC5,0x6DFA,/* 0xC0-0xC7 */
+	0x6DD9,0x6DE4,0x6DD5,0x6DEA,0x6DEE,0x6E2D,0x6E6E,0x6E2E,/* 0xC8-0xCF */
+	0x6E19,0x6E72,0x6E5F,0x6E3E,0x6E23,0x6E6B,0x6E2B,0x6E76,/* 0xD0-0xD7 */
+	0x6E4D,0x6E1F,0x6E43,0x6E3A,0x6E4E,0x6E24,0x6EFF,0x6E1D,/* 0xD8-0xDF */
+	0x6E38,0x6E82,0x6EAA,0x6E98,0x6EC9,0x6EB7,0x6ED3,0x6EBD,/* 0xE0-0xE7 */
+	0x6EAF,0x6EC4,0x6EB2,0x6ED4,0x6ED5,0x6E8F,0x6EA5,0x6EC2,/* 0xE8-0xEF */
+	0x6E9F,0x6F41,0x6F11,0x704C,0x6EEC,0x6EF8,0x6EFE,0x6F3F,/* 0xF0-0xF7 */
+	0x6EF2,0x6F31,0x6EEF,0x6F32,0x6ECC,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6F3E,0x6F13,0x6EF7,0x6F86,0x6F7A,0x6F78,0x6F81,0x6F80,/* 0x40-0x47 */
+	0x6F6F,0x6F5B,0x6FF3,0x6F6D,0x6F82,0x6F7C,0x6F58,0x6F8E,/* 0x48-0x4F */
+	0x6F91,0x6FC2,0x6F66,0x6FB3,0x6FA3,0x6FA1,0x6FA4,0x6FB9,/* 0x50-0x57 */
+	0x6FC6,0x6FAA,0x6FDF,0x6FD5,0x6FEC,0x6FD4,0x6FD8,0x6FF1,/* 0x58-0x5F */
+	0x6FEE,0x6FDB,0x7009,0x700B,0x6FFA,0x7011,0x7001,0x700F,/* 0x60-0x67 */
+	0x6FFE,0x701B,0x701A,0x6F74,0x701D,0x7018,0x701F,0x7030,/* 0x68-0x6F */
+	0x703E,0x7032,0x7051,0x7063,0x7099,0x7092,0x70AF,0x70F1,/* 0x70-0x77 */
+	0x70AC,0x70B8,0x70B3,0x70AE,0x70DF,0x70CB,0x70DD,0x0000,/* 0x78-0x7F */
+
+	0x70D9,0x7109,0x70FD,0x711C,0x7119,0x7165,0x7155,0x7188,/* 0x80-0x87 */
+	0x7166,0x7162,0x714C,0x7156,0x716C,0x718F,0x71FB,0x7184,/* 0x88-0x8F */
+	0x7195,0x71A8,0x71AC,0x71D7,0x71B9,0x71BE,0x71D2,0x71C9,/* 0x90-0x97 */
+	0x71D4,0x71CE,0x71E0,0x71EC,0x71E7,0x71F5,0x71FC,0x71F9,/* 0x98-0x9F */
+	0x71FF,0x720D,0x7210,0x721B,0x7228,0x722D,0x722C,0x7230,/* 0xA0-0xA7 */
+	0x7232,0x723B,0x723C,0x723F,0x7240,0x7246,0x724B,0x7258,/* 0xA8-0xAF */
+	0x7274,0x727E,0x7282,0x7281,0x7287,0x7292,0x7296,0x72A2,/* 0xB0-0xB7 */
+	0x72A7,0x72B9,0x72B2,0x72C3,0x72C6,0x72C4,0x72CE,0x72D2,/* 0xB8-0xBF */
+	0x72E2,0x72E0,0x72E1,0x72F9,0x72F7,0x500F,0x7317,0x730A,/* 0xC0-0xC7 */
+	0x731C,0x7316,0x731D,0x7334,0x732F,0x7329,0x7325,0x733E,/* 0xC8-0xCF */
+	0x734E,0x734F,0x9ED8,0x7357,0x736A,0x7368,0x7370,0x7378,/* 0xD0-0xD7 */
+	0x7375,0x737B,0x737A,0x73C8,0x73B3,0x73CE,0x73BB,0x73C0,/* 0xD8-0xDF */
+	0x73E5,0x73EE,0x73DE,0x74A2,0x7405,0x746F,0x7425,0x73F8,/* 0xE0-0xE7 */
+	0x7432,0x743A,0x7455,0x743F,0x745F,0x7459,0x7441,0x745C,/* 0xE8-0xEF */
+	0x7469,0x7470,0x7463,0x746A,0x7476,0x747E,0x748B,0x749E,/* 0xF0-0xF7 */
+	0x74A7,0x74CA,0x74CF,0x74D4,0x73F1,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x74E0,0x74E3,0x74E7,0x74E9,0x74EE,0x74F2,0x74F0,0x74F1,/* 0x40-0x47 */
+	0x74F8,0x74F7,0x7504,0x7503,0x7505,0x750C,0x750E,0x750D,/* 0x48-0x4F */
+	0x7515,0x7513,0x751E,0x7526,0x752C,0x753C,0x7544,0x754D,/* 0x50-0x57 */
+	0x754A,0x7549,0x755B,0x7546,0x755A,0x7569,0x7564,0x7567,/* 0x58-0x5F */
+	0x756B,0x756D,0x7578,0x7576,0x7586,0x7587,0x7574,0x758A,/* 0x60-0x67 */
+	0x7589,0x7582,0x7594,0x759A,0x759D,0x75A5,0x75A3,0x75C2,/* 0x68-0x6F */
+	0x75B3,0x75C3,0x75B5,0x75BD,0x75B8,0x75BC,0x75B1,0x75CD,/* 0x70-0x77 */
+	0x75CA,0x75D2,0x75D9,0x75E3,0x75DE,0x75FE,0x75FF,0x0000,/* 0x78-0x7F */
+
+	0x75FC,0x7601,0x75F0,0x75FA,0x75F2,0x75F3,0x760B,0x760D,/* 0x80-0x87 */
+	0x7609,0x761F,0x7627,0x7620,0x7621,0x7622,0x7624,0x7634,/* 0x88-0x8F */
+	0x7630,0x763B,0x7647,0x7648,0x7646,0x765C,0x7658,0x7661,/* 0x90-0x97 */
+	0x7662,0x7668,0x7669,0x766A,0x7667,0x766C,0x7670,0x7672,/* 0x98-0x9F */
+	0x7676,0x7678,0x767C,0x7680,0x7683,0x7688,0x768B,0x768E,/* 0xA0-0xA7 */
+	0x7696,0x7693,0x7699,0x769A,0x76B0,0x76B4,0x76B8,0x76B9,/* 0xA8-0xAF */
+	0x76BA,0x76C2,0x76CD,0x76D6,0x76D2,0x76DE,0x76E1,0x76E5,/* 0xB0-0xB7 */
+	0x76E7,0x76EA,0x862F,0x76FB,0x7708,0x7707,0x7704,0x7729,/* 0xB8-0xBF */
+	0x7724,0x771E,0x7725,0x7726,0x771B,0x7737,0x7738,0x7747,/* 0xC0-0xC7 */
+	0x775A,0x7768,0x776B,0x775B,0x7765,0x777F,0x777E,0x7779,/* 0xC8-0xCF */
+	0x778E,0x778B,0x7791,0x77A0,0x779E,0x77B0,0x77B6,0x77B9,/* 0xD0-0xD7 */
+	0x77BF,0x77BC,0x77BD,0x77BB,0x77C7,0x77CD,0x77D7,0x77DA,/* 0xD8-0xDF */
+	0x77DC,0x77E3,0x77EE,0x77FC,0x780C,0x7812,0x7926,0x7820,/* 0xE0-0xE7 */
+	0x792A,0x7845,0x788E,0x7874,0x7886,0x787C,0x789A,0x788C,/* 0xE8-0xEF */
+	0x78A3,0x78B5,0x78AA,0x78AF,0x78D1,0x78C6,0x78CB,0x78D4,/* 0xF0-0xF7 */
+	0x78BE,0x78BC,0x78C5,0x78CA,0x78EC,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x78E7,0x78DA,0x78FD,0x78F4,0x7907,0x7912,0x7911,0x7919,/* 0x40-0x47 */
+	0x792C,0x792B,0x7940,0x7960,0x7957,0x795F,0x795A,0x7955,/* 0x48-0x4F */
+	0x7953,0x797A,0x797F,0x798A,0x799D,0x79A7,0x9F4B,0x79AA,/* 0x50-0x57 */
+	0x79AE,0x79B3,0x79B9,0x79BA,0x79C9,0x79D5,0x79E7,0x79EC,/* 0x58-0x5F */
+	0x79E1,0x79E3,0x7A08,0x7A0D,0x7A18,0x7A19,0x7A20,0x7A1F,/* 0x60-0x67 */
+	0x7980,0x7A31,0x7A3B,0x7A3E,0x7A37,0x7A43,0x7A57,0x7A49,/* 0x68-0x6F */
+	0x7A61,0x7A62,0x7A69,0x9F9D,0x7A70,0x7A79,0x7A7D,0x7A88,/* 0x70-0x77 */
+	0x7A97,0x7A95,0x7A98,0x7A96,0x7AA9,0x7AC8,0x7AB0,0x0000,/* 0x78-0x7F */
+
+	0x7AB6,0x7AC5,0x7AC4,0x7ABF,0x9083,0x7AC7,0x7ACA,0x7ACD,/* 0x80-0x87 */
+	0x7ACF,0x7AD5,0x7AD3,0x7AD9,0x7ADA,0x7ADD,0x7AE1,0x7AE2,/* 0x88-0x8F */
+	0x7AE6,0x7AED,0x7AF0,0x7B02,0x7B0F,0x7B0A,0x7B06,0x7B33,/* 0x90-0x97 */
+	0x7B18,0x7B19,0x7B1E,0x7B35,0x7B28,0x7B36,0x7B50,0x7B7A,/* 0x98-0x9F */
+	0x7B04,0x7B4D,0x7B0B,0x7B4C,0x7B45,0x7B75,0x7B65,0x7B74,/* 0xA0-0xA7 */
+	0x7B67,0x7B70,0x7B71,0x7B6C,0x7B6E,0x7B9D,0x7B98,0x7B9F,/* 0xA8-0xAF */
+	0x7B8D,0x7B9C,0x7B9A,0x7B8B,0x7B92,0x7B8F,0x7B5D,0x7B99,/* 0xB0-0xB7 */
+	0x7BCB,0x7BC1,0x7BCC,0x7BCF,0x7BB4,0x7BC6,0x7BDD,0x7BE9,/* 0xB8-0xBF */
+	0x7C11,0x7C14,0x7BE6,0x7BE5,0x7C60,0x7C00,0x7C07,0x7C13,/* 0xC0-0xC7 */
+	0x7BF3,0x7BF7,0x7C17,0x7C0D,0x7BF6,0x7C23,0x7C27,0x7C2A,/* 0xC8-0xCF */
+	0x7C1F,0x7C37,0x7C2B,0x7C3D,0x7C4C,0x7C43,0x7C54,0x7C4F,/* 0xD0-0xD7 */
+	0x7C40,0x7C50,0x7C58,0x7C5F,0x7C64,0x7C56,0x7C65,0x7C6C,/* 0xD8-0xDF */
+	0x7C75,0x7C83,0x7C90,0x7CA4,0x7CAD,0x7CA2,0x7CAB,0x7CA1,/* 0xE0-0xE7 */
+	0x7CA8,0x7CB3,0x7CB2,0x7CB1,0x7CAE,0x7CB9,0x7CBD,0x7CC0,/* 0xE8-0xEF */
+	0x7CC5,0x7CC2,0x7CD8,0x7CD2,0x7CDC,0x7CE2,0x9B3B,0x7CEF,/* 0xF0-0xF7 */
+	0x7CF2,0x7CF4,0x7CF6,0x7CFA,0x7D06,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7D02,0x7D1C,0x7D15,0x7D0A,0x7D45,0x7D4B,0x7D2E,0x7D32,/* 0x40-0x47 */
+	0x7D3F,0x7D35,0x7D46,0x7D73,0x7D56,0x7D4E,0x7D72,0x7D68,/* 0x48-0x4F */
+	0x7D6E,0x7D4F,0x7D63,0x7D93,0x7D89,0x7D5B,0x7D8F,0x7D7D,/* 0x50-0x57 */
+	0x7D9B,0x7DBA,0x7DAE,0x7DA3,0x7DB5,0x7DC7,0x7DBD,0x7DAB,/* 0x58-0x5F */
+	0x7E3D,0x7DA2,0x7DAF,0x7DDC,0x7DB8,0x7D9F,0x7DB0,0x7DD8,/* 0x60-0x67 */
+	0x7DDD,0x7DE4,0x7DDE,0x7DFB,0x7DF2,0x7DE1,0x7E05,0x7E0A,/* 0x68-0x6F */
+	0x7E23,0x7E21,0x7E12,0x7E31,0x7E1F,0x7E09,0x7E0B,0x7E22,/* 0x70-0x77 */
+	0x7E46,0x7E66,0x7E3B,0x7E35,0x7E39,0x7E43,0x7E37,0x0000,/* 0x78-0x7F */
+
+	0x7E32,0x7E3A,0x7E67,0x7E5D,0x7E56,0x7E5E,0x7E59,0x7E5A,/* 0x80-0x87 */
+	0x7E79,0x7E6A,0x7E69,0x7E7C,0x7E7B,0x7E83,0x7DD5,0x7E7D,/* 0x88-0x8F */
+	0x8FAE,0x7E7F,0x7E88,0x7E89,0x7E8C,0x7E92,0x7E90,0x7E93,/* 0x90-0x97 */
+	0x7E94,0x7E96,0x7E8E,0x7E9B,0x7E9C,0x7F38,0x7F3A,0x7F45,/* 0x98-0x9F */
+	0x7F4C,0x7F4D,0x7F4E,0x7F50,0x7F51,0x7F55,0x7F54,0x7F58,/* 0xA0-0xA7 */
+	0x7F5F,0x7F60,0x7F68,0x7F69,0x7F67,0x7F78,0x7F82,0x7F86,/* 0xA8-0xAF */
+	0x7F83,0x7F88,0x7F87,0x7F8C,0x7F94,0x7F9E,0x7F9D,0x7F9A,/* 0xB0-0xB7 */
+	0x7FA3,0x7FAF,0x7FB2,0x7FB9,0x7FAE,0x7FB6,0x7FB8,0x8B71,/* 0xB8-0xBF */
+	0x7FC5,0x7FC6,0x7FCA,0x7FD5,0x7FD4,0x7FE1,0x7FE6,0x7FE9,/* 0xC0-0xC7 */
+	0x7FF3,0x7FF9,0x98DC,0x8006,0x8004,0x800B,0x8012,0x8018,/* 0xC8-0xCF */
+	0x8019,0x801C,0x8021,0x8028,0x803F,0x803B,0x804A,0x8046,/* 0xD0-0xD7 */
+	0x8052,0x8058,0x805A,0x805F,0x8062,0x8068,0x8073,0x8072,/* 0xD8-0xDF */
+	0x8070,0x8076,0x8079,0x807D,0x807F,0x8084,0x8086,0x8085,/* 0xE0-0xE7 */
+	0x809B,0x8093,0x809A,0x80AD,0x5190,0x80AC,0x80DB,0x80E5,/* 0xE8-0xEF */
+	0x80D9,0x80DD,0x80C4,0x80DA,0x80D6,0x8109,0x80EF,0x80F1,/* 0xF0-0xF7 */
+	0x811B,0x8129,0x8123,0x812F,0x814B,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x968B,0x8146,0x813E,0x8153,0x8151,0x80FC,0x8171,0x816E,/* 0x40-0x47 */
+	0x8165,0x8166,0x8174,0x8183,0x8188,0x818A,0x8180,0x8182,/* 0x48-0x4F */
+	0x81A0,0x8195,0x81A4,0x81A3,0x815F,0x8193,0x81A9,0x81B0,/* 0x50-0x57 */
+	0x81B5,0x81BE,0x81B8,0x81BD,0x81C0,0x81C2,0x81BA,0x81C9,/* 0x58-0x5F */
+	0x81CD,0x81D1,0x81D9,0x81D8,0x81C8,0x81DA,0x81DF,0x81E0,/* 0x60-0x67 */
+	0x81E7,0x81FA,0x81FB,0x81FE,0x8201,0x8202,0x8205,0x8207,/* 0x68-0x6F */
+	0x820A,0x820D,0x8210,0x8216,0x8229,0x822B,0x8238,0x8233,/* 0x70-0x77 */
+	0x8240,0x8259,0x8258,0x825D,0x825A,0x825F,0x8264,0x0000,/* 0x78-0x7F */
+
+	0x8262,0x8268,0x826A,0x826B,0x822E,0x8271,0x8277,0x8278,/* 0x80-0x87 */
+	0x827E,0x828D,0x8292,0x82AB,0x829F,0x82BB,0x82AC,0x82E1,/* 0x88-0x8F */
+	0x82E3,0x82DF,0x82D2,0x82F4,0x82F3,0x82FA,0x8393,0x8303,/* 0x90-0x97 */
+	0x82FB,0x82F9,0x82DE,0x8306,0x82DC,0x8309,0x82D9,0x8335,/* 0x98-0x9F */
+	0x8334,0x8316,0x8332,0x8331,0x8340,0x8339,0x8350,0x8345,/* 0xA0-0xA7 */
+	0x832F,0x832B,0x8317,0x8318,0x8385,0x839A,0x83AA,0x839F,/* 0xA8-0xAF */
+	0x83A2,0x8396,0x8323,0x838E,0x8387,0x838A,0x837C,0x83B5,/* 0xB0-0xB7 */
+	0x8373,0x8375,0x83A0,0x8389,0x83A8,0x83F4,0x8413,0x83EB,/* 0xB8-0xBF */
+	0x83CE,0x83FD,0x8403,0x83D8,0x840B,0x83C1,0x83F7,0x8407,/* 0xC0-0xC7 */
+	0x83E0,0x83F2,0x840D,0x8422,0x8420,0x83BD,0x8438,0x8506,/* 0xC8-0xCF */
+	0x83FB,0x846D,0x842A,0x843C,0x855A,0x8484,0x8477,0x846B,/* 0xD0-0xD7 */
+	0x84AD,0x846E,0x8482,0x8469,0x8446,0x842C,0x846F,0x8479,/* 0xD8-0xDF */
+	0x8435,0x84CA,0x8462,0x84B9,0x84BF,0x849F,0x84D9,0x84CD,/* 0xE0-0xE7 */
+	0x84BB,0x84DA,0x84D0,0x84C1,0x84C6,0x84D6,0x84A1,0x8521,/* 0xE8-0xEF */
+	0x84FF,0x84F4,0x8517,0x8518,0x852C,0x851F,0x8515,0x8514,/* 0xF0-0xF7 */
+	0x84FC,0x8540,0x8563,0x8558,0x8548,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8541,0x8602,0x854B,0x8555,0x8580,0x85A4,0x8588,0x8591,/* 0x40-0x47 */
+	0x858A,0x85A8,0x856D,0x8594,0x859B,0x85EA,0x8587,0x859C,/* 0x48-0x4F */
+	0x8577,0x857E,0x8590,0x85C9,0x85BA,0x85CF,0x85B9,0x85D0,/* 0x50-0x57 */
+	0x85D5,0x85DD,0x85E5,0x85DC,0x85F9,0x860A,0x8613,0x860B,/* 0x58-0x5F */
+	0x85FE,0x85FA,0x8606,0x8622,0x861A,0x8630,0x863F,0x864D,/* 0x60-0x67 */
+	0x4E55,0x8654,0x865F,0x8667,0x8671,0x8693,0x86A3,0x86A9,/* 0x68-0x6F */
+	0x86AA,0x868B,0x868C,0x86B6,0x86AF,0x86C4,0x86C6,0x86B0,/* 0x70-0x77 */
+	0x86C9,0x8823,0x86AB,0x86D4,0x86DE,0x86E9,0x86EC,0x0000,/* 0x78-0x7F */
+
+	0x86DF,0x86DB,0x86EF,0x8712,0x8706,0x8708,0x8700,0x8703,/* 0x80-0x87 */
+	0x86FB,0x8711,0x8709,0x870D,0x86F9,0x870A,0x8734,0x873F,/* 0x88-0x8F */
+	0x8737,0x873B,0x8725,0x8729,0x871A,0x8760,0x875F,0x8778,/* 0x90-0x97 */
+	0x874C,0x874E,0x8774,0x8757,0x8768,0x876E,0x8759,0x8753,/* 0x98-0x9F */
+	0x8763,0x876A,0x8805,0x87A2,0x879F,0x8782,0x87AF,0x87CB,/* 0xA0-0xA7 */
+	0x87BD,0x87C0,0x87D0,0x96D6,0x87AB,0x87C4,0x87B3,0x87C7,/* 0xA8-0xAF */
+	0x87C6,0x87BB,0x87EF,0x87F2,0x87E0,0x880F,0x880D,0x87FE,/* 0xB0-0xB7 */
+	0x87F6,0x87F7,0x880E,0x87D2,0x8811,0x8816,0x8815,0x8822,/* 0xB8-0xBF */
+	0x8821,0x8831,0x8836,0x8839,0x8827,0x883B,0x8844,0x8842,/* 0xC0-0xC7 */
+	0x8852,0x8859,0x885E,0x8862,0x886B,0x8881,0x887E,0x889E,/* 0xC8-0xCF */
+	0x8875,0x887D,0x88B5,0x8872,0x8882,0x8897,0x8892,0x88AE,/* 0xD0-0xD7 */
+	0x8899,0x88A2,0x888D,0x88A4,0x88B0,0x88BF,0x88B1,0x88C3,/* 0xD8-0xDF */
+	0x88C4,0x88D4,0x88D8,0x88D9,0x88DD,0x88F9,0x8902,0x88FC,/* 0xE0-0xE7 */
+	0x88F4,0x88E8,0x88F2,0x8904,0x890C,0x890A,0x8913,0x8943,/* 0xE8-0xEF */
+	0x891E,0x8925,0x892A,0x892B,0x8941,0x8944,0x893B,0x8936,/* 0xF0-0xF7 */
+	0x8938,0x894C,0x891D,0x8960,0x895E,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8966,0x8964,0x896D,0x896A,0x896F,0x8974,0x8977,0x897E,/* 0x40-0x47 */
+	0x8983,0x8988,0x898A,0x8993,0x8998,0x89A1,0x89A9,0x89A6,/* 0x48-0x4F */
+	0x89AC,0x89AF,0x89B2,0x89BA,0x89BD,0x89BF,0x89C0,0x89DA,/* 0x50-0x57 */
+	0x89DC,0x89DD,0x89E7,0x89F4,0x89F8,0x8A03,0x8A16,0x8A10,/* 0x58-0x5F */
+	0x8A0C,0x8A1B,0x8A1D,0x8A25,0x8A36,0x8A41,0x8A5B,0x8A52,/* 0x60-0x67 */
+	0x8A46,0x8A48,0x8A7C,0x8A6D,0x8A6C,0x8A62,0x8A85,0x8A82,/* 0x68-0x6F */
+	0x8A84,0x8AA8,0x8AA1,0x8A91,0x8AA5,0x8AA6,0x8A9A,0x8AA3,/* 0x70-0x77 */
+	0x8AC4,0x8ACD,0x8AC2,0x8ADA,0x8AEB,0x8AF3,0x8AE7,0x0000,/* 0x78-0x7F */
+
+	0x8AE4,0x8AF1,0x8B14,0x8AE0,0x8AE2,0x8AF7,0x8ADE,0x8ADB,/* 0x80-0x87 */
+	0x8B0C,0x8B07,0x8B1A,0x8AE1,0x8B16,0x8B10,0x8B17,0x8B20,/* 0x88-0x8F */
+	0x8B33,0x97AB,0x8B26,0x8B2B,0x8B3E,0x8B28,0x8B41,0x8B4C,/* 0x90-0x97 */
+	0x8B4F,0x8B4E,0x8B49,0x8B56,0x8B5B,0x8B5A,0x8B6B,0x8B5F,/* 0x98-0x9F */
+	0x8B6C,0x8B6F,0x8B74,0x8B7D,0x8B80,0x8B8C,0x8B8E,0x8B92,/* 0xA0-0xA7 */
+	0x8B93,0x8B96,0x8B99,0x8B9A,0x8C3A,0x8C41,0x8C3F,0x8C48,/* 0xA8-0xAF */
+	0x8C4C,0x8C4E,0x8C50,0x8C55,0x8C62,0x8C6C,0x8C78,0x8C7A,/* 0xB0-0xB7 */
+	0x8C82,0x8C89,0x8C85,0x8C8A,0x8C8D,0x8C8E,0x8C94,0x8C7C,/* 0xB8-0xBF */
+	0x8C98,0x621D,0x8CAD,0x8CAA,0x8CBD,0x8CB2,0x8CB3,0x8CAE,/* 0xC0-0xC7 */
+	0x8CB6,0x8CC8,0x8CC1,0x8CE4,0x8CE3,0x8CDA,0x8CFD,0x8CFA,/* 0xC8-0xCF */
+	0x8CFB,0x8D04,0x8D05,0x8D0A,0x8D07,0x8D0F,0x8D0D,0x8D10,/* 0xD0-0xD7 */
+	0x9F4E,0x8D13,0x8CCD,0x8D14,0x8D16,0x8D67,0x8D6D,0x8D71,/* 0xD8-0xDF */
+	0x8D73,0x8D81,0x8D99,0x8DC2,0x8DBE,0x8DBA,0x8DCF,0x8DDA,/* 0xE0-0xE7 */
+	0x8DD6,0x8DCC,0x8DDB,0x8DCB,0x8DEA,0x8DEB,0x8DDF,0x8DE3,/* 0xE8-0xEF */
+	0x8DFC,0x8E08,0x8E09,0x8DFF,0x8E1D,0x8E1E,0x8E10,0x8E1F,/* 0xF0-0xF7 */
+	0x8E42,0x8E35,0x8E30,0x8E34,0x8E4A,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8E47,0x8E49,0x8E4C,0x8E50,0x8E48,0x8E59,0x8E64,0x8E60,/* 0x40-0x47 */
+	0x8E2A,0x8E63,0x8E55,0x8E76,0x8E72,0x8E7C,0x8E81,0x8E87,/* 0x48-0x4F */
+	0x8E85,0x8E84,0x8E8B,0x8E8A,0x8E93,0x8E91,0x8E94,0x8E99,/* 0x50-0x57 */
+	0x8EAA,0x8EA1,0x8EAC,0x8EB0,0x8EC6,0x8EB1,0x8EBE,0x8EC5,/* 0x58-0x5F */
+	0x8EC8,0x8ECB,0x8EDB,0x8EE3,0x8EFC,0x8EFB,0x8EEB,0x8EFE,/* 0x60-0x67 */
+	0x8F0A,0x8F05,0x8F15,0x8F12,0x8F19,0x8F13,0x8F1C,0x8F1F,/* 0x68-0x6F */
+	0x8F1B,0x8F0C,0x8F26,0x8F33,0x8F3B,0x8F39,0x8F45,0x8F42,/* 0x70-0x77 */
+	0x8F3E,0x8F4C,0x8F49,0x8F46,0x8F4E,0x8F57,0x8F5C,0x0000,/* 0x78-0x7F */
+
+	0x8F62,0x8F63,0x8F64,0x8F9C,0x8F9F,0x8FA3,0x8FAD,0x8FAF,/* 0x80-0x87 */
+	0x8FB7,0x8FDA,0x8FE5,0x8FE2,0x8FEA,0x8FEF,0x9087,0x8FF4,/* 0x88-0x8F */
+	0x9005,0x8FF9,0x8FFA,0x9011,0x9015,0x9021,0x900D,0x901E,/* 0x90-0x97 */
+	0x9016,0x900B,0x9027,0x9036,0x9035,0x9039,0x8FF8,0x904F,/* 0x98-0x9F */
+	0x9050,0x9051,0x9052,0x900E,0x9049,0x903E,0x9056,0x9058,/* 0xA0-0xA7 */
+	0x905E,0x9068,0x906F,0x9076,0x96A8,0x9072,0x9082,0x907D,/* 0xA8-0xAF */
+	0x9081,0x9080,0x908A,0x9089,0x908F,0x90A8,0x90AF,0x90B1,/* 0xB0-0xB7 */
+	0x90B5,0x90E2,0x90E4,0x6248,0x90DB,0x9102,0x9112,0x9119,/* 0xB8-0xBF */
+	0x9132,0x9130,0x914A,0x9156,0x9158,0x9163,0x9165,0x9169,/* 0xC0-0xC7 */
+	0x9173,0x9172,0x918B,0x9189,0x9182,0x91A2,0x91AB,0x91AF,/* 0xC8-0xCF */
+	0x91AA,0x91B5,0x91B4,0x91BA,0x91C0,0x91C1,0x91C9,0x91CB,/* 0xD0-0xD7 */
+	0x91D0,0x91D6,0x91DF,0x91E1,0x91DB,0x91FC,0x91F5,0x91F6,/* 0xD8-0xDF */
+	0x921E,0x91FF,0x9214,0x922C,0x9215,0x9211,0x925E,0x9257,/* 0xE0-0xE7 */
+	0x9245,0x9249,0x9264,0x9248,0x9295,0x923F,0x924B,0x9250,/* 0xE8-0xEF */
+	0x929C,0x9296,0x9293,0x929B,0x925A,0x92CF,0x92B9,0x92B7,/* 0xF0-0xF7 */
+	0x92E9,0x930F,0x92FA,0x9344,0x932E,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9319,0x9322,0x931A,0x9323,0x933A,0x9335,0x933B,0x935C,/* 0x40-0x47 */
+	0x9360,0x937C,0x936E,0x9356,0x93B0,0x93AC,0x93AD,0x9394,/* 0x48-0x4F */
+	0x93B9,0x93D6,0x93D7,0x93E8,0x93E5,0x93D8,0x93C3,0x93DD,/* 0x50-0x57 */
+	0x93D0,0x93C8,0x93E4,0x941A,0x9414,0x9413,0x9403,0x9407,/* 0x58-0x5F */
+	0x9410,0x9436,0x942B,0x9435,0x9421,0x943A,0x9441,0x9452,/* 0x60-0x67 */
+	0x9444,0x945B,0x9460,0x9462,0x945E,0x946A,0x9229,0x9470,/* 0x68-0x6F */
+	0x9475,0x9477,0x947D,0x945A,0x947C,0x947E,0x9481,0x947F,/* 0x70-0x77 */
+	0x9582,0x9587,0x958A,0x9594,0x9596,0x9598,0x9599,0x0000,/* 0x78-0x7F */
+
+	0x95A0,0x95A8,0x95A7,0x95AD,0x95BC,0x95BB,0x95B9,0x95BE,/* 0x80-0x87 */
+	0x95CA,0x6FF6,0x95C3,0x95CD,0x95CC,0x95D5,0x95D4,0x95D6,/* 0x88-0x8F */
+	0x95DC,0x95E1,0x95E5,0x95E2,0x9621,0x9628,0x962E,0x962F,/* 0x90-0x97 */
+	0x9642,0x964C,0x964F,0x964B,0x9677,0x965C,0x965E,0x965D,/* 0x98-0x9F */
+	0x965F,0x9666,0x9672,0x966C,0x968D,0x9698,0x9695,0x9697,/* 0xA0-0xA7 */
+	0x96AA,0x96A7,0x96B1,0x96B2,0x96B0,0x96B4,0x96B6,0x96B8,/* 0xA8-0xAF */
+	0x96B9,0x96CE,0x96CB,0x96C9,0x96CD,0x894D,0x96DC,0x970D,/* 0xB0-0xB7 */
+	0x96D5,0x96F9,0x9704,0x9706,0x9708,0x9713,0x970E,0x9711,/* 0xB8-0xBF */
+	0x970F,0x9716,0x9719,0x9724,0x972A,0x9730,0x9739,0x973D,/* 0xC0-0xC7 */
+	0x973E,0x9744,0x9746,0x9748,0x9742,0x9749,0x975C,0x9760,/* 0xC8-0xCF */
+	0x9764,0x9766,0x9768,0x52D2,0x976B,0x9771,0x9779,0x9785,/* 0xD0-0xD7 */
+	0x977C,0x9781,0x977A,0x9786,0x978B,0x978F,0x9790,0x979C,/* 0xD8-0xDF */
+	0x97A8,0x97A6,0x97A3,0x97B3,0x97B4,0x97C3,0x97C6,0x97C8,/* 0xE0-0xE7 */
+	0x97CB,0x97DC,0x97ED,0x9F4F,0x97F2,0x7ADF,0x97F6,0x97F5,/* 0xE8-0xEF */
+	0x980F,0x980C,0x9838,0x9824,0x9821,0x9837,0x983D,0x9846,/* 0xF0-0xF7 */
+	0x984F,0x984B,0x986B,0x986F,0x9870,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9871,0x9874,0x9873,0x98AA,0x98AF,0x98B1,0x98B6,0x98C4,/* 0x40-0x47 */
+	0x98C3,0x98C6,0x98E9,0x98EB,0x9903,0x9909,0x9912,0x9914,/* 0x48-0x4F */
+	0x9918,0x9921,0x991D,0x991E,0x9924,0x9920,0x992C,0x992E,/* 0x50-0x57 */
+	0x993D,0x993E,0x9942,0x9949,0x9945,0x9950,0x994B,0x9951,/* 0x58-0x5F */
+	0x9952,0x994C,0x9955,0x9997,0x9998,0x99A5,0x99AD,0x99AE,/* 0x60-0x67 */
+	0x99BC,0x99DF,0x99DB,0x99DD,0x99D8,0x99D1,0x99ED,0x99EE,/* 0x68-0x6F */
+	0x99F1,0x99F2,0x99FB,0x99F8,0x9A01,0x9A0F,0x9A05,0x99E2,/* 0x70-0x77 */
+	0x9A19,0x9A2B,0x9A37,0x9A45,0x9A42,0x9A40,0x9A43,0x0000,/* 0x78-0x7F */
+
+	0x9A3E,0x9A55,0x9A4D,0x9A5B,0x9A57,0x9A5F,0x9A62,0x9A65,/* 0x80-0x87 */
+	0x9A64,0x9A69,0x9A6B,0x9A6A,0x9AAD,0x9AB0,0x9ABC,0x9AC0,/* 0x88-0x8F */
+	0x9ACF,0x9AD1,0x9AD3,0x9AD4,0x9ADE,0x9ADF,0x9AE2,0x9AE3,/* 0x90-0x97 */
+	0x9AE6,0x9AEF,0x9AEB,0x9AEE,0x9AF4,0x9AF1,0x9AF7,0x9AFB,/* 0x98-0x9F */
+	0x9B06,0x9B18,0x9B1A,0x9B1F,0x9B22,0x9B23,0x9B25,0x9B27,/* 0xA0-0xA7 */
+	0x9B28,0x9B29,0x9B2A,0x9B2E,0x9B2F,0x9B32,0x9B44,0x9B43,/* 0xA8-0xAF */
+	0x9B4F,0x9B4D,0x9B4E,0x9B51,0x9B58,0x9B74,0x9B93,0x9B83,/* 0xB0-0xB7 */
+	0x9B91,0x9B96,0x9B97,0x9B9F,0x9BA0,0x9BA8,0x9BB4,0x9BC0,/* 0xB8-0xBF */
+	0x9BCA,0x9BB9,0x9BC6,0x9BCF,0x9BD1,0x9BD2,0x9BE3,0x9BE2,/* 0xC0-0xC7 */
+	0x9BE4,0x9BD4,0x9BE1,0x9C3A,0x9BF2,0x9BF1,0x9BF0,0x9C15,/* 0xC8-0xCF */
+	0x9C14,0x9C09,0x9C13,0x9C0C,0x9C06,0x9C08,0x9C12,0x9C0A,/* 0xD0-0xD7 */
+	0x9C04,0x9C2E,0x9C1B,0x9C25,0x9C24,0x9C21,0x9C30,0x9C47,/* 0xD8-0xDF */
+	0x9C32,0x9C46,0x9C3E,0x9C5A,0x9C60,0x9C67,0x9C76,0x9C78,/* 0xE0-0xE7 */
+	0x9CE7,0x9CEC,0x9CF0,0x9D09,0x9D08,0x9CEB,0x9D03,0x9D06,/* 0xE8-0xEF */
+	0x9D2A,0x9D26,0x9DAF,0x9D23,0x9D1F,0x9D44,0x9D15,0x9D12,/* 0xF0-0xF7 */
+	0x9D41,0x9D3F,0x9D3E,0x9D46,0x9D48,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9D5D,0x9D5E,0x9D64,0x9D51,0x9D50,0x9D59,0x9D72,0x9D89,/* 0x40-0x47 */
+	0x9D87,0x9DAB,0x9D6F,0x9D7A,0x9D9A,0x9DA4,0x9DA9,0x9DB2,/* 0x48-0x4F */
+	0x9DC4,0x9DC1,0x9DBB,0x9DB8,0x9DBA,0x9DC6,0x9DCF,0x9DC2,/* 0x50-0x57 */
+	0x9DD9,0x9DD3,0x9DF8,0x9DE6,0x9DED,0x9DEF,0x9DFD,0x9E1A,/* 0x58-0x5F */
+	0x9E1B,0x9E1E,0x9E75,0x9E79,0x9E7D,0x9E81,0x9E88,0x9E8B,/* 0x60-0x67 */
+	0x9E8C,0x9E92,0x9E95,0x9E91,0x9E9D,0x9EA5,0x9EA9,0x9EB8,/* 0x68-0x6F */
+	0x9EAA,0x9EAD,0x9761,0x9ECC,0x9ECE,0x9ECF,0x9ED0,0x9ED4,/* 0x70-0x77 */
+	0x9EDC,0x9EDE,0x9EDD,0x9EE0,0x9EE5,0x9EE8,0x9EEF,0x0000,/* 0x78-0x7F */
+
+	0x9EF4,0x9EF6,0x9EF7,0x9EF9,0x9EFB,0x9EFC,0x9EFD,0x9F07,/* 0x80-0x87 */
+	0x9F08,0x76B7,0x9F15,0x9F21,0x9F2C,0x9F3E,0x9F4A,0x9F52,/* 0x88-0x8F */
+	0x9F54,0x9F63,0x9F5F,0x9F60,0x9F61,0x9F66,0x9F67,0x9F6C,/* 0x90-0x97 */
+	0x9F6A,0x9F77,0x9F72,0x9F76,0x9F95,0x9F9C,0x9FA0,0x582F,/* 0x98-0x9F */
+	0x69C7,0x9059,0x7464,0x51DC,0x7199,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_ED[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7E8A,0x891C,0x9348,0x9288,0x84DC,0x4FC9,0x70BB,0x6631,/* 0x40-0x47 */
+	0x68C8,0x92F9,0x66FB,0x5F45,0x4E28,0x4EE1,0x4EFC,0x4F00,/* 0x48-0x4F */
+	0x4F03,0x4F39,0x4F56,0x4F92,0x4F8A,0x4F9A,0x4F94,0x4FCD,/* 0x50-0x57 */
+	0x5040,0x5022,0x4FFF,0x501E,0x5046,0x5070,0x5042,0x5094,/* 0x58-0x5F */
+	0x50F4,0x50D8,0x514A,0x5164,0x519D,0x51BE,0x51EC,0x5215,/* 0x60-0x67 */
+	0x529C,0x52A6,0x52C0,0x52DB,0x5300,0x5307,0x5324,0x5372,/* 0x68-0x6F */
+	0x5393,0x53B2,0x53DD,0xFA0E,0x549C,0x548A,0x54A9,0x54FF,/* 0x70-0x77 */
+	0x5586,0x5759,0x5765,0x57AC,0x57C8,0x57C7,0xFA0F,0x0000,/* 0x78-0x7F */
+
+	0xFA10,0x589E,0x58B2,0x590B,0x5953,0x595B,0x595D,0x5963,/* 0x80-0x87 */
+	0x59A4,0x59BA,0x5B56,0x5BC0,0x752F,0x5BD8,0x5BEC,0x5C1E,/* 0x88-0x8F */
+	0x5CA6,0x5CBA,0x5CF5,0x5D27,0x5D53,0xFA11,0x5D42,0x5D6D,/* 0x90-0x97 */
+	0x5DB8,0x5DB9,0x5DD0,0x5F21,0x5F34,0x5F67,0x5FB7,0x5FDE,/* 0x98-0x9F */
+	0x605D,0x6085,0x608A,0x60DE,0x60D5,0x6120,0x60F2,0x6111,/* 0xA0-0xA7 */
+	0x6137,0x6130,0x6198,0x6213,0x62A6,0x63F5,0x6460,0x649D,/* 0xA8-0xAF */
+	0x64CE,0x654E,0x6600,0x6615,0x663B,0x6609,0x662E,0x661E,/* 0xB0-0xB7 */
+	0x6624,0x6665,0x6657,0x6659,0xFA12,0x6673,0x6699,0x66A0,/* 0xB8-0xBF */
+	0x66B2,0x66BF,0x66FA,0x670E,0xF929,0x6766,0x67BB,0x6852,/* 0xC0-0xC7 */
+	0x67C0,0x6801,0x6844,0x68CF,0xFA13,0x6968,0xFA14,0x6998,/* 0xC8-0xCF */
+	0x69E2,0x6A30,0x6A6B,0x6A46,0x6A73,0x6A7E,0x6AE2,0x6AE4,/* 0xD0-0xD7 */
+	0x6BD6,0x6C3F,0x6C5C,0x6C86,0x6C6F,0x6CDA,0x6D04,0x6D87,/* 0xD8-0xDF */
+	0x6D6F,0x6D96,0x6DAC,0x6DCF,0x6DF8,0x6DF2,0x6DFC,0x6E39,/* 0xE0-0xE7 */
+	0x6E5C,0x6E27,0x6E3C,0x6EBF,0x6F88,0x6FB5,0x6FF5,0x7005,/* 0xE8-0xEF */
+	0x7007,0x7028,0x7085,0x70AB,0x710F,0x7104,0x715C,0x7146,/* 0xF0-0xF7 */
+	0x7147,0xFA15,0x71C1,0x71FE,0x72B1,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x72BE,0x7324,0xFA16,0x7377,0x73BD,0x73C9,0x73D6,0x73E3,/* 0x40-0x47 */
+	0x73D2,0x7407,0x73F5,0x7426,0x742A,0x7429,0x742E,0x7462,/* 0x48-0x4F */
+	0x7489,0x749F,0x7501,0x756F,0x7682,0x769C,0x769E,0x769B,/* 0x50-0x57 */
+	0x76A6,0xFA17,0x7746,0x52AF,0x7821,0x784E,0x7864,0x787A,/* 0x58-0x5F */
+	0x7930,0xFA18,0xFA19,0xFA1A,0x7994,0xFA1B,0x799B,0x7AD1,/* 0x60-0x67 */
+	0x7AE7,0xFA1C,0x7AEB,0x7B9E,0xFA1D,0x7D48,0x7D5C,0x7DB7,/* 0x68-0x6F */
+	0x7DA0,0x7DD6,0x7E52,0x7F47,0x7FA1,0xFA1E,0x8301,0x8362,/* 0x70-0x77 */
+	0x837F,0x83C7,0x83F6,0x8448,0x84B4,0x8553,0x8559,0x0000,/* 0x78-0x7F */
+
+	0x856B,0xFA1F,0x85B0,0xFA20,0xFA21,0x8807,0x88F5,0x8A12,/* 0x80-0x87 */
+	0x8A37,0x8A79,0x8AA7,0x8ABE,0x8ADF,0xFA22,0x8AF6,0x8B53,/* 0x88-0x8F */
+	0x8B7F,0x8CF0,0x8CF4,0x8D12,0x8D76,0xFA23,0x8ECF,0xFA24,/* 0x90-0x97 */
+	0xFA25,0x9067,0x90DE,0xFA26,0x9115,0x9127,0x91DA,0x91D7,/* 0x98-0x9F */
+	0x91DE,0x91ED,0x91EE,0x91E4,0x91E5,0x9206,0x9210,0x920A,/* 0xA0-0xA7 */
+	0x923A,0x9240,0x923C,0x924E,0x9259,0x9251,0x9239,0x9267,/* 0xA8-0xAF */
+	0x92A7,0x9277,0x9278,0x92E7,0x92D7,0x92D9,0x92D0,0xFA27,/* 0xB0-0xB7 */
+	0x92D5,0x92E0,0x92D3,0x9325,0x9321,0x92FB,0xFA28,0x931E,/* 0xB8-0xBF */
+	0x92FF,0x931D,0x9302,0x9370,0x9357,0x93A4,0x93C6,0x93DE,/* 0xC0-0xC7 */
+	0x93F8,0x9431,0x9445,0x9448,0x9592,0xF9DC,0xFA29,0x969D,/* 0xC8-0xCF */
+	0x96AF,0x9733,0x973B,0x9743,0x974D,0x974F,0x9751,0x9755,/* 0xD0-0xD7 */
+	0x9857,0x9865,0xFA2A,0xFA2B,0x9927,0xFA2C,0x999E,0x9A4E,/* 0xD8-0xDF */
+	0x9AD9,0x9ADC,0x9B75,0x9B72,0x9B8F,0x9BB1,0x9BBB,0x9C00,/* 0xE0-0xE7 */
+	0x9D70,0x9D6B,0xFA2D,0x9E19,0x9ED1,0x0000,0x0000,0x2170,/* 0xE8-0xEF */
+	0x2171,0x2172,0x2173,0x2174,0x2175,0x2176,0x2177,0x2178,/* 0xF0-0xF7 */
+	0x2179,0xFFE2,0xFFE4,0xFF07,0xFF02,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x2170,0x2171,0x2172,0x2173,0x2174,0x2175,0x2176,0x2177,/* 0x40-0x47 */
+	0x2178,0x2179,0x2160,0x2161,0x2162,0x2163,0x2164,0x2165,/* 0x48-0x4F */
+	0x2166,0x2167,0x2168,0x2169,0xFFE2,0xFFE4,0xFF07,0xFF02,/* 0x50-0x57 */
+	0x3231,0x2116,0x2121,0x2235,0x7E8A,0x891C,0x9348,0x9288,/* 0x58-0x5F */
+	0x84DC,0x4FC9,0x70BB,0x6631,0x68C8,0x92F9,0x66FB,0x5F45,/* 0x60-0x67 */
+	0x4E28,0x4EE1,0x4EFC,0x4F00,0x4F03,0x4F39,0x4F56,0x4F92,/* 0x68-0x6F */
+	0x4F8A,0x4F9A,0x4F94,0x4FCD,0x5040,0x5022,0x4FFF,0x501E,/* 0x70-0x77 */
+	0x5046,0x5070,0x5042,0x5094,0x50F4,0x50D8,0x514A,0x0000,/* 0x78-0x7F */
+
+	0x5164,0x519D,0x51BE,0x51EC,0x5215,0x529C,0x52A6,0x52C0,/* 0x80-0x87 */
+	0x52DB,0x5300,0x5307,0x5324,0x5372,0x5393,0x53B2,0x53DD,/* 0x88-0x8F */
+	0xFA0E,0x549C,0x548A,0x54A9,0x54FF,0x5586,0x5759,0x5765,/* 0x90-0x97 */
+	0x57AC,0x57C8,0x57C7,0xFA0F,0xFA10,0x589E,0x58B2,0x590B,/* 0x98-0x9F */
+	0x5953,0x595B,0x595D,0x5963,0x59A4,0x59BA,0x5B56,0x5BC0,/* 0xA0-0xA7 */
+	0x752F,0x5BD8,0x5BEC,0x5C1E,0x5CA6,0x5CBA,0x5CF5,0x5D27,/* 0xA8-0xAF */
+	0x5D53,0xFA11,0x5D42,0x5D6D,0x5DB8,0x5DB9,0x5DD0,0x5F21,/* 0xB0-0xB7 */
+	0x5F34,0x5F67,0x5FB7,0x5FDE,0x605D,0x6085,0x608A,0x60DE,/* 0xB8-0xBF */
+	0x60D5,0x6120,0x60F2,0x6111,0x6137,0x6130,0x6198,0x6213,/* 0xC0-0xC7 */
+	0x62A6,0x63F5,0x6460,0x649D,0x64CE,0x654E,0x6600,0x6615,/* 0xC8-0xCF */
+	0x663B,0x6609,0x662E,0x661E,0x6624,0x6665,0x6657,0x6659,/* 0xD0-0xD7 */
+	0xFA12,0x6673,0x6699,0x66A0,0x66B2,0x66BF,0x66FA,0x670E,/* 0xD8-0xDF */
+	0xF929,0x6766,0x67BB,0x6852,0x67C0,0x6801,0x6844,0x68CF,/* 0xE0-0xE7 */
+	0xFA13,0x6968,0xFA14,0x6998,0x69E2,0x6A30,0x6A6B,0x6A46,/* 0xE8-0xEF */
+	0x6A73,0x6A7E,0x6AE2,0x6AE4,0x6BD6,0x6C3F,0x6C5C,0x6C86,/* 0xF0-0xF7 */
+	0x6C6F,0x6CDA,0x6D04,0x6D87,0x6D6F,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6D96,0x6DAC,0x6DCF,0x6DF8,0x6DF2,0x6DFC,0x6E39,0x6E5C,/* 0x40-0x47 */
+	0x6E27,0x6E3C,0x6EBF,0x6F88,0x6FB5,0x6FF5,0x7005,0x7007,/* 0x48-0x4F */
+	0x7028,0x7085,0x70AB,0x710F,0x7104,0x715C,0x7146,0x7147,/* 0x50-0x57 */
+	0xFA15,0x71C1,0x71FE,0x72B1,0x72BE,0x7324,0xFA16,0x7377,/* 0x58-0x5F */
+	0x73BD,0x73C9,0x73D6,0x73E3,0x73D2,0x7407,0x73F5,0x7426,/* 0x60-0x67 */
+	0x742A,0x7429,0x742E,0x7462,0x7489,0x749F,0x7501,0x756F,/* 0x68-0x6F */
+	0x7682,0x769C,0x769E,0x769B,0x76A6,0xFA17,0x7746,0x52AF,/* 0x70-0x77 */
+	0x7821,0x784E,0x7864,0x787A,0x7930,0xFA18,0xFA19,0x0000,/* 0x78-0x7F */
+
+	0xFA1A,0x7994,0xFA1B,0x799B,0x7AD1,0x7AE7,0xFA1C,0x7AEB,/* 0x80-0x87 */
+	0x7B9E,0xFA1D,0x7D48,0x7D5C,0x7DB7,0x7DA0,0x7DD6,0x7E52,/* 0x88-0x8F */
+	0x7F47,0x7FA1,0xFA1E,0x8301,0x8362,0x837F,0x83C7,0x83F6,/* 0x90-0x97 */
+	0x8448,0x84B4,0x8553,0x8559,0x856B,0xFA1F,0x85B0,0xFA20,/* 0x98-0x9F */
+	0xFA21,0x8807,0x88F5,0x8A12,0x8A37,0x8A79,0x8AA7,0x8ABE,/* 0xA0-0xA7 */
+	0x8ADF,0xFA22,0x8AF6,0x8B53,0x8B7F,0x8CF0,0x8CF4,0x8D12,/* 0xA8-0xAF */
+	0x8D76,0xFA23,0x8ECF,0xFA24,0xFA25,0x9067,0x90DE,0xFA26,/* 0xB0-0xB7 */
+	0x9115,0x9127,0x91DA,0x91D7,0x91DE,0x91ED,0x91EE,0x91E4,/* 0xB8-0xBF */
+	0x91E5,0x9206,0x9210,0x920A,0x923A,0x9240,0x923C,0x924E,/* 0xC0-0xC7 */
+	0x9259,0x9251,0x9239,0x9267,0x92A7,0x9277,0x9278,0x92E7,/* 0xC8-0xCF */
+	0x92D7,0x92D9,0x92D0,0xFA27,0x92D5,0x92E0,0x92D3,0x9325,/* 0xD0-0xD7 */
+	0x9321,0x92FB,0xFA28,0x931E,0x92FF,0x931D,0x9302,0x9370,/* 0xD8-0xDF */
+	0x9357,0x93A4,0x93C6,0x93DE,0x93F8,0x9431,0x9445,0x9448,/* 0xE0-0xE7 */
+	0x9592,0xF9DC,0xFA29,0x969D,0x96AF,0x9733,0x973B,0x9743,/* 0xE8-0xEF */
+	0x974D,0x974F,0x9751,0x9755,0x9857,0x9865,0xFA2A,0xFA2B,/* 0xF0-0xF7 */
+	0x9927,0xFA2C,0x999E,0x9A4E,0x9AD9,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9ADC,0x9B75,0x9B72,0x9B8F,0x9BB1,0x9BBB,0x9C00,0x9D70,/* 0x40-0x47 */
+	0x9D6B,0xFA2D,0x9E19,0x9ED1,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+};
+
+static wchar_t *page_charset2uni[256] = {
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   c2u_81, c2u_82, c2u_83, c2u_84, NULL,   NULL,   c2u_87, 
+	c2u_88, c2u_89, c2u_8A, c2u_8B, c2u_8C, c2u_8D, c2u_8E, c2u_8F, 
+	c2u_90, c2u_91, c2u_92, c2u_93, c2u_94, c2u_95, c2u_96, c2u_97, 
+	c2u_98, c2u_99, c2u_9A, c2u_9B, c2u_9C, c2u_9D, c2u_9E, c2u_9F, 
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	c2u_E0, c2u_E1, c2u_E2, c2u_E3, c2u_E4, c2u_E5, c2u_E6, c2u_E7, 
+	c2u_E8, c2u_E9, c2u_EA, NULL,   NULL,   c2u_ED, c2u_EE, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   c2u_FA, c2u_FB, c2u_FC, NULL,   NULL,   NULL,   
+};
+
+static unsigned char u2c_00hi[256 - 0xA0][2] = {
+	{0x00, 0x00}, {0x00, 0x00}, {0x81, 0x91}, {0x81, 0x92},/* 0xA0-0xA3 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x81, 0x98},/* 0xA4-0xA7 */
+	{0x81, 0x4E}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xA8-0xAB */
+	{0x81, 0xCA}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xAC-0xAF */
+	{0x81, 0x8B}, {0x81, 0x7D}, {0x00, 0x00}, {0x00, 0x00},/* 0xB0-0xB3 */
+	{0x81, 0x4C}, {0x00, 0x00}, {0x81, 0xF7}, {0x00, 0x00},/* 0xB4-0xB7 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xB8-0xBB */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xBC-0xBF */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xC0-0xC3 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xC4-0xC7 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xC8-0xCB */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xCC-0xCF */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xD0-0xD3 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x81, 0x7E},/* 0xD4-0xD7 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xD8-0xDB */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xDC-0xDF */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xE0-0xE3 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xE4-0xE7 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xE8-0xEB */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xEC-0xEF */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xF0-0xF3 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x81, 0x80},/* 0xF4-0xF7 */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xF8-0xFB */
+	{0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00},/* 0xFC-0xFF */
+};
+
+static unsigned char u2c_03[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x83, 0x9F, 0x83, 0xA0, 0x83, 0xA1, /* 0x90-0x93 */
+	0x83, 0xA2, 0x83, 0xA3, 0x83, 0xA4, 0x83, 0xA5, /* 0x94-0x97 */
+	0x83, 0xA6, 0x83, 0xA7, 0x83, 0xA8, 0x83, 0xA9, /* 0x98-0x9B */
+	0x83, 0xAA, 0x83, 0xAB, 0x83, 0xAC, 0x83, 0xAD, /* 0x9C-0x9F */
+	0x83, 0xAE, 0x83, 0xAF, 0x00, 0x00, 0x83, 0xB0, /* 0xA0-0xA3 */
+	0x83, 0xB1, 0x83, 0xB2, 0x83, 0xB3, 0x83, 0xB4, /* 0xA4-0xA7 */
+	0x83, 0xB5, 0x83, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x83, 0xBF, 0x83, 0xC0, 0x83, 0xC1, /* 0xB0-0xB3 */
+	0x83, 0xC2, 0x83, 0xC3, 0x83, 0xC4, 0x83, 0xC5, /* 0xB4-0xB7 */
+	0x83, 0xC6, 0x83, 0xC7, 0x83, 0xC8, 0x83, 0xC9, /* 0xB8-0xBB */
+	0x83, 0xCA, 0x83, 0xCB, 0x83, 0xCC, 0x83, 0xCD, /* 0xBC-0xBF */
+	0x83, 0xCE, 0x83, 0xCF, 0x00, 0x00, 0x83, 0xD0, /* 0xC0-0xC3 */
+	0x83, 0xD1, 0x83, 0xD2, 0x83, 0xD3, 0x83, 0xD4, /* 0xC4-0xC7 */
+	0x83, 0xD5, 0x83, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+};
+
+static unsigned char u2c_04[512] = {
+	0x00, 0x00, 0x84, 0x46, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x84, 0x40, 0x84, 0x41, 0x84, 0x42, 0x84, 0x43, /* 0x10-0x13 */
+	0x84, 0x44, 0x84, 0x45, 0x84, 0x47, 0x84, 0x48, /* 0x14-0x17 */
+	0x84, 0x49, 0x84, 0x4A, 0x84, 0x4B, 0x84, 0x4C, /* 0x18-0x1B */
+	0x84, 0x4D, 0x84, 0x4E, 0x84, 0x4F, 0x84, 0x50, /* 0x1C-0x1F */
+	0x84, 0x51, 0x84, 0x52, 0x84, 0x53, 0x84, 0x54, /* 0x20-0x23 */
+	0x84, 0x55, 0x84, 0x56, 0x84, 0x57, 0x84, 0x58, /* 0x24-0x27 */
+	0x84, 0x59, 0x84, 0x5A, 0x84, 0x5B, 0x84, 0x5C, /* 0x28-0x2B */
+	0x84, 0x5D, 0x84, 0x5E, 0x84, 0x5F, 0x84, 0x60, /* 0x2C-0x2F */
+	0x84, 0x70, 0x84, 0x71, 0x84, 0x72, 0x84, 0x73, /* 0x30-0x33 */
+	0x84, 0x74, 0x84, 0x75, 0x84, 0x77, 0x84, 0x78, /* 0x34-0x37 */
+	0x84, 0x79, 0x84, 0x7A, 0x84, 0x7B, 0x84, 0x7C, /* 0x38-0x3B */
+	0x84, 0x7D, 0x84, 0x7E, 0x84, 0x80, 0x84, 0x81, /* 0x3C-0x3F */
+	0x84, 0x82, 0x84, 0x83, 0x84, 0x84, 0x84, 0x85, /* 0x40-0x43 */
+	0x84, 0x86, 0x84, 0x87, 0x84, 0x88, 0x84, 0x89, /* 0x44-0x47 */
+	0x84, 0x8A, 0x84, 0x8B, 0x84, 0x8C, 0x84, 0x8D, /* 0x48-0x4B */
+	0x84, 0x8E, 0x84, 0x8F, 0x84, 0x90, 0x84, 0x91, /* 0x4C-0x4F */
+	0x00, 0x00, 0x84, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+};
+
+static unsigned char u2c_20[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x81, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x81, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x81, 0x65, 0x81, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x81, 0x67, 0x81, 0x68, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x81, 0xF5, 0x81, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x81, 0x64, 0x81, 0x63, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x81, 0xF1, 0x00, 0x00, 0x81, 0x8C, 0x81, 0x8D, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xA6, /* 0x38-0x3B */
+};
+
+static unsigned char u2c_21[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x8E, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0x59, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xFA, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xF0, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xFA, 0x4A, 0xFA, 0x4B, 0xFA, 0x4C, 0xFA, 0x4D, /* 0x60-0x63 */
+	0xFA, 0x4E, 0xFA, 0x4F, 0xFA, 0x50, 0xFA, 0x51, /* 0x64-0x67 */
+	0xFA, 0x52, 0xFA, 0x53, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xEE, 0xEF, 0xEE, 0xF0, 0xEE, 0xF1, 0xEE, 0xF2, /* 0x70-0x73 */
+	0xEE, 0xF3, 0xEE, 0xF4, 0xEE, 0xF5, 0xEE, 0xF6, /* 0x74-0x77 */
+	0xEE, 0xF7, 0xEE, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x81, 0xA9, 0x81, 0xAA, 0x81, 0xA8, 0x81, 0xAB, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xCB, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x81, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+};
+
+static unsigned char u2c_22[512] = {
+	0x81, 0xCD, 0x00, 0x00, 0x81, 0xDD, 0x81, 0xCE, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xDE, /* 0x04-0x07 */
+	0x81, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x81, 0xB9, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x87, 0x94, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x95, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x81, 0xE5, 0x81, 0x87, 0x87, 0x98, /* 0x1C-0x1F */
+	0x87, 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x81, 0x61, 0x00, 0x00, 0x81, 0xC8, /* 0x24-0x27 */
+	0x81, 0xC9, 0x87, 0x9B, 0x87, 0x9C, 0x87, 0x92, /* 0x28-0x2B */
+	0x81, 0xE8, 0x00, 0x00, 0x87, 0x93, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x81, 0x88, 0xFA, 0x5B, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x81, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x90, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x81, 0x82, 0x87, 0x91, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0x85, 0x81, 0x86, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xE1, 0x81, 0xE2, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xBC, 0x81, 0xBD, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xBA, 0x81, 0xBB, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x87, 0x96, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x99, /* 0xBC-0xBF */
+};
+
+static unsigned char u2c_23[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xDC, 0x00, 0x00, /* 0x10-0x13 */
+};
+
+static unsigned char u2c_24[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x87, 0x40, 0x87, 0x41, 0x87, 0x42, 0x87, 0x43, /* 0x60-0x63 */
+	0x87, 0x44, 0x87, 0x45, 0x87, 0x46, 0x87, 0x47, /* 0x64-0x67 */
+	0x87, 0x48, 0x87, 0x49, 0x87, 0x4A, 0x87, 0x4B, /* 0x68-0x6B */
+	0x87, 0x4C, 0x87, 0x4D, 0x87, 0x4E, 0x87, 0x4F, /* 0x6C-0x6F */
+	0x87, 0x50, 0x87, 0x51, 0x87, 0x52, 0x87, 0x53, /* 0x70-0x73 */
+};
+
+static unsigned char u2c_25[512] = {
+	0x84, 0x9F, 0x84, 0xAA, 0x84, 0xA0, 0x84, 0xAB, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x84, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x84, 0xAC, /* 0x0C-0x0F */
+	0x84, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x84, 0xAD, /* 0x10-0x13 */
+	0x84, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x84, 0xAF, /* 0x14-0x17 */
+	0x84, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x84, 0xAE, /* 0x18-0x1B */
+	0x84, 0xA5, 0x84, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x84, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB0, /* 0x20-0x23 */
+	0x84, 0xA7, 0x84, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x84, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB2, /* 0x28-0x2B */
+	0x84, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB6, /* 0x2C-0x2F */
+	0x84, 0xBB, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB1, /* 0x30-0x33 */
+	0x84, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB8, /* 0x34-0x37 */
+	0x84, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB3, /* 0x38-0x3B */
+	0x84, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB9, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x84, 0xBE, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xB4, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x81, 0xA1, 0x81, 0xA0, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xA3, 0x81, 0xA2, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x81, 0xA5, 0x81, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0x9F, 0x81, 0x9E, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x9B, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0x9D, 0x81, 0x9C, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xFC, /* 0xEC-0xEF */
+};
+
+static unsigned char u2c_26[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x81, 0x9A, 0x81, 0x99, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x81, 0x8A, 0x00, 0x00, 0x81, 0x89, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x81, 0xF4, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x81, 0xF3, 0x00, 0x00, 0x81, 0xF2, /* 0x6C-0x6F */
+};
+
+static unsigned char u2c_30[512] = {
+	0x81, 0x40, 0x81, 0x41, 0x81, 0x42, 0x81, 0x56, /* 0x00-0x03 */
+	0x00, 0x00, 0x81, 0x58, 0x81, 0x59, 0x81, 0x5A, /* 0x04-0x07 */
+	0x81, 0x71, 0x81, 0x72, 0x81, 0x73, 0x81, 0x74, /* 0x08-0x0B */
+	0x81, 0x75, 0x81, 0x76, 0x81, 0x77, 0x81, 0x78, /* 0x0C-0x0F */
+	0x81, 0x79, 0x81, 0x7A, 0x81, 0xA7, 0x81, 0xAC, /* 0x10-0x13 */
+	0x81, 0x6B, 0x81, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x87, 0x80, 0x00, 0x00, 0x87, 0x81, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x82, 0x9F, 0x82, 0xA0, 0x82, 0xA1, /* 0x40-0x43 */
+	0x82, 0xA2, 0x82, 0xA3, 0x82, 0xA4, 0x82, 0xA5, /* 0x44-0x47 */
+	0x82, 0xA6, 0x82, 0xA7, 0x82, 0xA8, 0x82, 0xA9, /* 0x48-0x4B */
+	0x82, 0xAA, 0x82, 0xAB, 0x82, 0xAC, 0x82, 0xAD, /* 0x4C-0x4F */
+	0x82, 0xAE, 0x82, 0xAF, 0x82, 0xB0, 0x82, 0xB1, /* 0x50-0x53 */
+	0x82, 0xB2, 0x82, 0xB3, 0x82, 0xB4, 0x82, 0xB5, /* 0x54-0x57 */
+	0x82, 0xB6, 0x82, 0xB7, 0x82, 0xB8, 0x82, 0xB9, /* 0x58-0x5B */
+	0x82, 0xBA, 0x82, 0xBB, 0x82, 0xBC, 0x82, 0xBD, /* 0x5C-0x5F */
+	0x82, 0xBE, 0x82, 0xBF, 0x82, 0xC0, 0x82, 0xC1, /* 0x60-0x63 */
+	0x82, 0xC2, 0x82, 0xC3, 0x82, 0xC4, 0x82, 0xC5, /* 0x64-0x67 */
+	0x82, 0xC6, 0x82, 0xC7, 0x82, 0xC8, 0x82, 0xC9, /* 0x68-0x6B */
+	0x82, 0xCA, 0x82, 0xCB, 0x82, 0xCC, 0x82, 0xCD, /* 0x6C-0x6F */
+	0x82, 0xCE, 0x82, 0xCF, 0x82, 0xD0, 0x82, 0xD1, /* 0x70-0x73 */
+	0x82, 0xD2, 0x82, 0xD3, 0x82, 0xD4, 0x82, 0xD5, /* 0x74-0x77 */
+	0x82, 0xD6, 0x82, 0xD7, 0x82, 0xD8, 0x82, 0xD9, /* 0x78-0x7B */
+	0x82, 0xDA, 0x82, 0xDB, 0x82, 0xDC, 0x82, 0xDD, /* 0x7C-0x7F */
+	
+	0x82, 0xDE, 0x82, 0xDF, 0x82, 0xE0, 0x82, 0xE1, /* 0x80-0x83 */
+	0x82, 0xE2, 0x82, 0xE3, 0x82, 0xE4, 0x82, 0xE5, /* 0x84-0x87 */
+	0x82, 0xE6, 0x82, 0xE7, 0x82, 0xE8, 0x82, 0xE9, /* 0x88-0x8B */
+	0x82, 0xEA, 0x82, 0xEB, 0x82, 0xEC, 0x82, 0xED, /* 0x8C-0x8F */
+	0x82, 0xEE, 0x82, 0xEF, 0x82, 0xF0, 0x82, 0xF1, /* 0x90-0x93 */
+	0x83, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x4A, /* 0x98-0x9B */
+	0x81, 0x4B, 0x81, 0x54, 0x81, 0x55, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x83, 0x40, 0x83, 0x41, 0x83, 0x42, /* 0xA0-0xA3 */
+	0x83, 0x43, 0x83, 0x44, 0x83, 0x45, 0x83, 0x46, /* 0xA4-0xA7 */
+	0x83, 0x47, 0x83, 0x48, 0x83, 0x49, 0x83, 0x4A, /* 0xA8-0xAB */
+	0x83, 0x4B, 0x83, 0x4C, 0x83, 0x4D, 0x83, 0x4E, /* 0xAC-0xAF */
+	0x83, 0x4F, 0x83, 0x50, 0x83, 0x51, 0x83, 0x52, /* 0xB0-0xB3 */
+	0x83, 0x53, 0x83, 0x54, 0x83, 0x55, 0x83, 0x56, /* 0xB4-0xB7 */
+	0x83, 0x57, 0x83, 0x58, 0x83, 0x59, 0x83, 0x5A, /* 0xB8-0xBB */
+	0x83, 0x5B, 0x83, 0x5C, 0x83, 0x5D, 0x83, 0x5E, /* 0xBC-0xBF */
+	0x83, 0x5F, 0x83, 0x60, 0x83, 0x61, 0x83, 0x62, /* 0xC0-0xC3 */
+	0x83, 0x63, 0x83, 0x64, 0x83, 0x65, 0x83, 0x66, /* 0xC4-0xC7 */
+	0x83, 0x67, 0x83, 0x68, 0x83, 0x69, 0x83, 0x6A, /* 0xC8-0xCB */
+	0x83, 0x6B, 0x83, 0x6C, 0x83, 0x6D, 0x83, 0x6E, /* 0xCC-0xCF */
+	0x83, 0x6F, 0x83, 0x70, 0x83, 0x71, 0x83, 0x72, /* 0xD0-0xD3 */
+	0x83, 0x73, 0x83, 0x74, 0x83, 0x75, 0x83, 0x76, /* 0xD4-0xD7 */
+	0x83, 0x77, 0x83, 0x78, 0x83, 0x79, 0x83, 0x7A, /* 0xD8-0xDB */
+	0x83, 0x7B, 0x83, 0x7C, 0x83, 0x7D, 0x83, 0x7E, /* 0xDC-0xDF */
+	0x83, 0x80, 0x83, 0x81, 0x83, 0x82, 0x83, 0x83, /* 0xE0-0xE3 */
+	0x83, 0x84, 0x83, 0x85, 0x83, 0x86, 0x83, 0x87, /* 0xE4-0xE7 */
+	0x83, 0x88, 0x83, 0x89, 0x83, 0x8A, 0x83, 0x8B, /* 0xE8-0xEB */
+	0x83, 0x8C, 0x83, 0x8D, 0x83, 0x8E, 0x83, 0x8F, /* 0xEC-0xEF */
+	0x83, 0x90, 0x83, 0x91, 0x83, 0x92, 0x83, 0x93, /* 0xF0-0xF3 */
+	0x83, 0x94, 0x83, 0x95, 0x83, 0x96, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x45, /* 0xF8-0xFB */
+	0x81, 0x5B, 0x81, 0x52, 0x81, 0x53, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_32[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xFA, 0x58, 0x87, 0x8B, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x87, 0x8C, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x87, 0x85, 0x87, 0x86, 0x87, 0x87, 0x87, 0x88, /* 0xA4-0xA7 */
+	0x87, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+};
+
+static unsigned char u2c_33[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x65, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x87, 0x69, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x87, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x87, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x61, 0x87, 0x6B, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x6A, 0x87, 0x64, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x6C, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x66, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x6E, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x87, 0x5F, 0x87, 0x6D, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x87, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x87, 0x67, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x68, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x7E, /* 0x78-0x7B */
+	0x87, 0x8F, 0x87, 0x8E, 0x87, 0x8D, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x72, 0x87, 0x73, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x87, 0x6F, 0x87, 0x70, 0x87, 0x71, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x87, 0x75, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x87, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x87, 0x83, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+};
+
+static unsigned char u2c_4E[512] = {
+	0x88, 0xEA, 0x92, 0x9A, 0x00, 0x00, 0x8E, 0xB5, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x9C, /* 0x04-0x07 */
+	0x8F, 0xE4, 0x8E, 0x4F, 0x8F, 0xE3, 0x89, 0xBA, /* 0x08-0x0B */
+	0x00, 0x00, 0x95, 0x73, 0x97, 0x5E, 0x00, 0x00, /* 0x0C-0x0F */
+	0x98, 0xA0, 0x89, 0x4E, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x8A, 0x8E, 0x98, 0xA1, 0x90, 0xA2, 0x99, 0xC0, /* 0x14-0x17 */
+	0x8B, 0x75, 0x95, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xE5, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x97, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xC0, 0x00, 0x00, /* 0x24-0x27 */
+	0xED, 0x4C, 0x00, 0x00, 0x98, 0xA2, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x92, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x98, 0xA3, 0x8B, 0xF8, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xA4, 0x00, 0x00, /* 0x34-0x37 */
+	0x8A, 0xDB, 0x92, 0x4F, 0x00, 0x00, 0x8E, 0xE5, /* 0x38-0x3B */
+	0x98, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x98, 0xA6, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xA7, 0x94, 0x54, /* 0x40-0x43 */
+	0x00, 0x00, 0x8B, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x56, /* 0x48-0x4B */
+	0x00, 0x00, 0x93, 0xE1, 0x8C, 0xC1, 0x96, 0x52, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE5, 0x68, 0x98, 0xA8, 0x8F, 0xE6, /* 0x54-0x57 */
+	0x98, 0xA9, 0x89, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x8B, 0xE3, 0x8C, 0xEE, 0x96, 0xE7, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xA4, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x97, 0x90, 0x00, 0x00, 0x93, 0xFB, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xA3, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x8B, 0x54, 0x00, 0x00, 0x98, 0xAA, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x98, 0xAB, 0x97, 0xB9, 0x00, 0x00, /* 0x84-0x87 */
+	0x97, 0x5C, 0x91, 0x88, 0x98, 0xAD, 0x8E, 0x96, /* 0x88-0x8B */
+	0x93, 0xF1, 0x00, 0x00, 0x98, 0xB0, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x89, 0x5D, 0x8C, 0xDD, 0x00, 0x00, /* 0x90-0x93 */
+	0x8C, 0xDC, 0x88, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x98, 0x6A, 0x98, 0x69, 0x00, 0x00, 0x8D, 0xB1, /* 0x98-0x9B */
+	0x88, 0x9F, 0x00, 0x00, 0x98, 0xB1, 0x98, 0xB2, /* 0x9C-0x9F */
+	0x98, 0xB3, 0x96, 0x53, 0x98, 0xB4, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x8C, 0xF0, 0x88, 0xE5, 0x96, 0x92, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x8B, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x9D, /* 0xA8-0xAB */
+	0x8B, 0x9E, 0x92, 0xE0, 0x97, 0xBA, 0x00, 0x00, /* 0xAC-0xAF */
+	0x98, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x98, 0xB6, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xB7, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x6C, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x8F, 0x59, 0x90, 0x6D, 0x98, 0xBC, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x98, 0xBA, 0x00, 0x00, 0x98, 0xBB, 0x8B, 0x77, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xA1, 0x89, 0xEE, /* 0xC8-0xCB */
+	0x00, 0x00, 0x98, 0xB9, 0x98, 0xB8, 0x95, 0xA7, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x8E, 0x65, 0x8E, 0x64, 0x91, 0xBC, 0x98, 0xBD, /* 0xD4-0xD7 */
+	0x95, 0x74, 0x90, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x81, 0x57, 0x98, 0xBE, 0x98, 0xC0, /* 0xDC-0xDF */
+	0x00, 0x00, 0xED, 0x4D, 0x00, 0x00, 0x91, 0xE3, /* 0xE0-0xE3 */
+	0x97, 0xDF, 0x88, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x98, 0xBF, 0x89, 0xBC, 0x00, 0x00, /* 0xEC-0xEF */
+	0x8B, 0xC2, 0x00, 0x00, 0x92, 0x87, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x8F, 0x98, 0xC1, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x43, /* 0xF8-0xFB */
+	0xED, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_4F[512] = {
+	0xED, 0x4F, 0x8A, 0xE9, 0x00, 0x00, 0xED, 0x50, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x98, 0xC2, 0x88, 0xC9, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x8C, 0xDE, 0x8A, 0xEA, 0x95, 0x9A, /* 0x0C-0x0F */
+	0x94, 0xB0, 0x8B, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xEF, 0x00, 0x00, /* 0x18-0x1B */
+	0x98, 0xE5, 0x93, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x8C, /* 0x2C-0x2F */
+	0x98, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x94, 0xBA, 0x00, 0x00, 0x97, 0xE0, 0x00, 0x00, /* 0x34-0x37 */
+	0x90, 0x4C, 0xED, 0x51, 0x8E, 0x66, 0x00, 0x00, /* 0x38-0x3B */
+	0x8E, 0x97, 0x89, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xCF, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x41, 0x98, 0xC8, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x88, 0xCA, 0x92, 0xE1, 0x8F, 0x5A, /* 0x4C-0x4F */
+	0x8D, 0xB2, 0x97, 0x43, 0x00, 0x00, 0x91, 0xCC, /* 0x50-0x53 */
+	0x00, 0x00, 0x89, 0xBD, 0xED, 0x52, 0x98, 0xC7, /* 0x54-0x57 */
+	0x00, 0x00, 0x97, 0x5D, 0x98, 0xC3, 0x98, 0xC5, /* 0x58-0x5B */
+	0x8D, 0xEC, 0x98, 0xC6, 0x9B, 0x43, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x98, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xD1, /* 0x6C-0x6F */
+	0x98, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x89, 0xC0, /* 0x70-0x73 */
+	0x00, 0x00, 0x95, 0xB9, 0x98, 0xC9, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xCD, /* 0x78-0x7B */
+	0x8C, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x67, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xA4, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xD2, 0x00, 0x00, /* 0x84-0x87 */
+	0x98, 0xCA, 0x00, 0x00, 0xED, 0x54, 0x97, 0xE1, /* 0x88-0x8B */
+	0x00, 0x00, 0x8E, 0x98, 0x00, 0x00, 0x98, 0xCB, /* 0x8C-0x8F */
+	0x00, 0x00, 0x98, 0xD0, 0xED, 0x53, 0x00, 0x00, /* 0x90-0x93 */
+	0xED, 0x56, 0x00, 0x00, 0x98, 0xD3, 0x00, 0x00, /* 0x94-0x97 */
+	0x98, 0xCC, 0x00, 0x00, 0xED, 0x55, 0x8B, 0x9F, /* 0x98-0x9B */
+	0x00, 0x00, 0x88, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x8B, 0xA0, 0x89, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x44, /* 0xA8-0xAB */
+	0x00, 0x00, 0x96, 0x99, 0x95, 0x8E, 0x8C, 0xF2, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x90, 0x4E, 0x97, 0xB5, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xD6, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x57, 0x91, 0xA3, /* 0xC0-0xC3 */
+	0x89, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xED, 0x45, 0x8F, 0x72, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xED, 0x57, 0x98, 0xD7, 0x00, 0x00, /* 0xCC-0xCF */
+	0x98, 0xDC, 0x98, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x98, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x91, 0xAD, /* 0xD4-0xD7 */
+	0x98, 0xD8, 0x00, 0x00, 0x98, 0xDB, 0x98, 0xD9, /* 0xD8-0xDB */
+	0x00, 0x00, 0x95, 0xDB, 0x00, 0x00, 0x98, 0xD6, /* 0xDC-0xDF */
+	0x00, 0x00, 0x90, 0x4D, 0x00, 0x00, 0x96, 0x93, /* 0xE0-0xE3 */
+	0x98, 0xDD, 0x98, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x43, 0x98, 0xEB, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x6F, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x95, 0x55, 0x98, 0xE6, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x95, 0xEE, 0x00, 0x00, 0x89, 0xB4, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xEA, 0xED, 0x5A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_50[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x98, 0xE4, 0x98, 0xED, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x91, 0x71, 0x00, 0x00, 0x8C, 0xC2, /* 0x08-0x0B */
+	0x00, 0x00, 0x94, 0x7B, 0x00, 0x00, 0xE0, 0xC5, /* 0x0C-0x0F */
+	0x00, 0x00, 0x98, 0xEC, 0x93, 0x7C, 0x00, 0x00, /* 0x10-0x13 */
+	0x98, 0xE1, 0x00, 0x00, 0x8C, 0xF4, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x8C, 0xF3, 0x98, 0xDF, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x5B, 0x8E, 0xD8, /* 0x1C-0x1F */
+	0x00, 0x00, 0x98, 0xE7, 0xED, 0x59, 0x95, 0xED, /* 0x20-0x23 */
+	0x92, 0x6C, 0x98, 0xE3, 0x8C, 0x91, 0x00, 0x00, /* 0x24-0x27 */
+	0x98, 0xE0, 0x98, 0xE8, 0x98, 0xE2, 0x97, 0xCF, /* 0x28-0x2B */
+	0x98, 0xE9, 0x98, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xE4, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x8C, 0x90, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xED, 0x58, 0x00, 0x00, 0xED, 0x5E, 0x98, 0xEE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x5C, 0x98, 0xEF, /* 0x44-0x47 */
+	0x98, 0xF3, 0x88, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xCE, /* 0x4C-0x4F */
+	0x98, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x98, 0xF1, 0x98, 0xF5, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xF4, 0x00, 0x00, /* 0x58-0x5B */
+	0x92, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x8C, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x98, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xED, 0x5D, 0x00, 0x00, 0x8E, 0xC3, 0x00, 0x00, /* 0x70-0x73 */
+	0x91, 0xA4, 0x92, 0xE3, 0x8B, 0xF4, 0x00, 0x00, /* 0x74-0x77 */
+	0x98, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x8B, 0x55, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x98, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x98, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x96, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x8C, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xED, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x8E, 0x50, 0x94, 0xF5, 0x98, 0xF9, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x8D, 0xC3, 0x97, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0xFC, 0x99, 0x42, /* 0xB0-0xB3 */
+	0x98, 0xFB, 0x8D, 0xC2, 0x00, 0x00, 0x8F, 0x9D, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x58, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x43, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x8B, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x99, 0x40, 0x99, 0x41, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x93, 0xAD, 0x00, 0x00, 0x91, 0x9C, /* 0xCC-0xCF */
+	0x00, 0x00, 0x8B, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x96, 0x6C, 0x99, 0x44, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xED, 0x61, 0x00, 0x00, 0x97, 0xBB, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x45, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x48, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x99, 0x46, 0x00, 0x00, 0x91, 0x6D, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x99, 0x47, 0x99, 0x49, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xED, 0x60, 0x99, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x99, 0x4A, 0x00, 0x00, 0x95, 0xC6, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_51[512] = {
+	0x8B, 0x56, 0x99, 0x4D, 0x99, 0x4E, 0x00, 0x00, /* 0x00-0x03 */
+	0x89, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x99, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xF2, 0x00, 0x00, /* 0x10-0x13 */
+	0x99, 0x51, 0x99, 0x50, 0x99, 0x4F, 0x00, 0x00, /* 0x14-0x17 */
+	0x98, 0xD4, 0x00, 0x00, 0x99, 0x52, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x9E, /* 0x1C-0x1F */
+	0x00, 0x00, 0x99, 0x53, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x44, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xD7, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x55, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x54, 0x99, 0x57, /* 0x38-0x3B */
+	0x99, 0x56, 0x00, 0x00, 0x00, 0x00, 0x99, 0x58, /* 0x3C-0x3F */
+	0x99, 0x59, 0x88, 0xF2, 0x00, 0x00, 0x8C, 0xB3, /* 0x40-0x43 */
+	0x8C, 0x5A, 0x8F, 0x5B, 0x92, 0x9B, 0x8B, 0xA2, /* 0x44-0x47 */
+	0x90, 0xE6, 0x8C, 0xF5, 0xED, 0x62, 0x8D, 0x8E, /* 0x48-0x4B */
+	0x99, 0x5B, 0x96, 0xC6, 0x93, 0x65, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8E, 0x99, 0x00, 0x00, 0x99, 0x5A, 0x00, 0x00, /* 0x50-0x53 */
+	0x99, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x7D, 0x00, 0x00, /* 0x58-0x5B */
+	0x8A, 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x5D, 0x00, 0x00, /* 0x60-0x63 */
+	0xED, 0x63, 0x93, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x91, 0x53, 0x99, 0x5F, 0x99, 0x60, 0x94, 0xAA, /* 0x68-0x6B */
+	0x8C, 0xF6, 0x98, 0x5A, 0x99, 0x61, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x8B, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x95, 0xBA, 0x91, 0xB4, 0x8B, 0xEF, /* 0x74-0x77 */
+	0x93, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x8C, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x99, 0x62, 0x00, 0x00, 0x99, 0x63, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x93, 0xE0, 0x89, 0x7E, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x99, 0x66, 0x8D, 0xFB, 0x00, 0x00, /* 0x88-0x8B */
+	0x99, 0x65, 0x8D, 0xC4, 0x00, 0x00, 0x99, 0x67, /* 0x8C-0x8F */
+	0xE3, 0xEC, 0x99, 0x68, 0x96, 0x60, 0x99, 0x69, /* 0x90-0x93 */
+	0x00, 0x00, 0x99, 0x6A, 0x99, 0x6B, 0x8F, 0xE7, /* 0x94-0x97 */
+	0x00, 0x00, 0x8E, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xED, 0x64, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x8A, 0xA5, 0x00, 0x00, 0x99, 0x6E, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x99, 0x6C, 0x96, 0xBB, 0x99, 0x6D, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x95, 0x79, 0x99, 0x6F, 0x99, 0x70, 0x99, 0x71, /* 0xA8-0xAB */
+	0x93, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x99, 0x75, 0x99, 0x73, 0x99, 0x74, 0x99, 0x72, /* 0xB0-0xB3 */
+	0x8D, 0xE1, 0x99, 0x76, 0x96, 0xE8, 0x97, 0xE2, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x99, 0x77, 0xED, 0x65, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x90, 0xA6, 0x99, 0x78, 0x8F, 0x79, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x99, 0x79, 0x00, 0x00, 0x92, 0x9C, /* 0xC8-0xCB */
+	0x97, 0xBD, 0x93, 0x80, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xC3, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x7A, /* 0xD8-0xDB */
+	0xEA, 0xA3, 0x8B, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x99, 0x7B, 0x96, 0x7D, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x88, 0x91, 0xFA, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x99, 0x7D, 0x93, 0xE2, 0x00, 0x00, /* 0xE8-0xEB */
+	0xED, 0x66, 0x99, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x99, 0x80, 0x8A, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x99, 0x81, 0x8B, 0xA5, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x93, 0xCA, 0x89, 0x9A, 0x8F, 0x6F, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x94, 0x9F, 0x99, 0x82, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_52[512] = {
+	0x93, 0x81, 0x00, 0x00, 0x00, 0x00, 0x90, 0x6E, /* 0x00-0x03 */
+	0x99, 0x83, 0x00, 0x00, 0x95, 0xAA, 0x90, 0xD8, /* 0x04-0x07 */
+	0x8A, 0xA0, 0x00, 0x00, 0x8A, 0xA7, 0x99, 0x84, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x86, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x8C, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x99, 0x85, 0xED, 0x67, 0x00, 0x00, 0x97, 0xF1, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x8F, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x94, 0xBB, 0x95, 0xCA, 0x00, 0x00, 0x99, 0x87, /* 0x24-0x27 */
+	0x00, 0x00, 0x97, 0x98, 0x99, 0x88, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x89, 0x00, 0x00, /* 0x2C-0x2F */
+	0x93, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x99, 0x8A, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xA7, 0x8D, 0xFC, /* 0x34-0x37 */
+	0x8C, 0x94, 0x99, 0x8B, 0x8E, 0x68, 0x8D, 0x8F, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xE4, /* 0x40-0x43 */
+	0x99, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x91, 0xA5, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xED, 0x99, 0x8E, /* 0x48-0x4B */
+	0x99, 0x8F, 0x91, 0x4F, 0x00, 0x00, 0x99, 0x8C, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x99, 0x91, 0x00, 0x00, 0x96, 0x55, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x84, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0x90, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x95, /* 0x60-0x63 */
+	0x8D, 0xDC, 0x94, 0x8D, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x99, 0x94, 0x99, 0x92, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x9B, /* 0x6C-0x6F */
+	0x8F, 0xE8, 0x99, 0x9B, 0x8A, 0x84, 0x99, 0x95, /* 0x70-0x73 */
+	0x99, 0x93, 0x91, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x99, 0x97, 0x00, 0x00, 0x99, 0x96, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x63, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x80, /* 0x84-0x87 */
+	0x99, 0x9C, 0x97, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x99, 0x98, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x99, 0x9D, 0x99, 0x9A, 0x00, 0x00, /* 0x90-0x93 */
+	0x99, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xCD, /* 0x98-0x9B */
+	0xED, 0x68, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xF7, /* 0x9C-0x9F */
+	0x89, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x97, 0xF2, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x69, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x8F, 0x95, 0x93, 0x77, 0x8D, 0x85, /* 0xA8-0xAB */
+	0x99, 0xA0, 0x99, 0xA1, 0x00, 0x00, 0xEE, 0x5B, /* 0xAC-0xAF */
+	0x00, 0x00, 0x97, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x98, 0x4A, 0x99, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x8C, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x99, 0xA2, 0x00, 0x00, 0x8A, 0x4E, 0x00, 0x00, /* 0xBC-0xBF */
+	0xED, 0x6A, 0x99, 0xA4, 0x00, 0x00, 0x96, 0x75, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x92, 0xBA, 0x00, 0x00, 0x97, 0x45, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x95, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x99, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xD3, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x93, 0xAE, 0x00, 0x00, 0x99, 0xA6, /* 0xD4-0xD7 */
+	0x8A, 0xA8, 0x96, 0xB1, 0x00, 0x00, 0xED, 0x6B, /* 0xD8-0xDB */
+	0x00, 0x00, 0x8F, 0x9F, 0x99, 0xA7, 0x95, 0xE5, /* 0xDC-0xDF */
+	0x99, 0xAB, 0x00, 0x00, 0x90, 0xA8, 0x99, 0xA8, /* 0xE0-0xE3 */
+	0x8B, 0xCE, 0x00, 0x00, 0x99, 0xA9, 0x8A, 0xA9, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x4D, 0x99, 0xAC, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x99, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x99, 0xAE, 0x99, 0xAF, 0x8E, 0xD9, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xF9, 0x96, 0xDC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_53[512] = {
+	0xED, 0x6C, 0x96, 0xE6, 0x93, 0xF5, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x95, 0xEF, 0x99, 0xB0, 0xED, 0x6D, /* 0x04-0x07 */
+	0x99, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x99, 0xB3, 0x00, 0x00, 0x99, 0xB5, /* 0x0C-0x0F */
+	0x99, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x99, 0xB6, 0x89, 0xBB, 0x96, 0x6B, /* 0x14-0x17 */
+	0x00, 0x00, 0x8D, 0xFA, 0x99, 0xB7, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x91, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x8F, 0xA0, 0x8B, 0xA7, 0x00, 0x00, 0x99, 0xB8, /* 0x20-0x23 */
+	0xED, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xD9, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0xB9, /* 0x2C-0x2F */
+	0x00, 0x00, 0x99, 0xBA, 0x00, 0x00, 0x99, 0xBB, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x99, 0xBC, 0x95, 0x43, 0x8B, 0xE6, 0x88, 0xE3, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xBD, /* 0x3C-0x3F */
+	0x99, 0xBD, 0x8F, 0x5C, 0x00, 0x00, 0x90, 0xE7, /* 0x40-0x43 */
+	0x00, 0x00, 0x99, 0xBF, 0x99, 0xBE, 0x8F, 0xA1, /* 0x44-0x47 */
+	0x8C, 0xDF, 0x99, 0xC1, 0x94, 0xBC, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x99, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x94, 0xDA, 0x91, 0xB2, 0x91, 0xEC, /* 0x50-0x53 */
+	0x8B, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x93, 0xEC, /* 0x54-0x57 */
+	0x92, 0x50, 0x00, 0x00, 0x94, 0x8E, 0x00, 0x00, /* 0x58-0x5B */
+	0x96, 0x6D, 0x00, 0x00, 0x99, 0xC4, 0x00, 0x00, /* 0x5C-0x5F */
+	0x90, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x54, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x99, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xC6, 0x89, 0x4B, /* 0x6C-0x6F */
+	0x88, 0xF3, 0x8A, 0xEB, 0xED, 0x6F, 0x91, 0xA6, /* 0x70-0x73 */
+	0x8B, 0x70, 0x97, 0x91, 0x00, 0x00, 0x99, 0xC9, /* 0x74-0x77 */
+	0x89, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x99, 0xC8, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xA8, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xCA, 0x00, 0x00, /* 0x80-0x83 */
+	0x96, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0x70, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xCB, 0x00, 0x00, /* 0x94-0x97 */
+	0x97, 0xD0, 0x00, 0x00, 0x8C, 0xFA, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xB4, /* 0x9C-0x9F */
+	0x99, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x99, 0xCE, 0x99, 0xCD, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x90, 0x7E, 0x89, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x89, 0x7D, 0x99, 0xCF, 0x00, 0x00, /* 0xAC-0xAF */
+	0x99, 0xD0, 0x00, 0x00, 0xED, 0x71, 0x8C, 0xB5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xD1, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x8E, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x51, 0x99, 0xD2, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x96, 0x94, 0x8D, 0xB3, 0x8B, 0x79, 0x97, 0x46, /* 0xC8-0xCB */
+	0x91, 0x6F, 0x94, 0xBD, 0x8E, 0xFB, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x8F, 0x66, 0x00, 0x00, 0x8E, 0xE6, 0x8E, 0xF3, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x8F, 0x96, 0x00, 0x00, 0x94, 0xBE, /* 0xD8-0xDB */
+	0x00, 0x00, 0xED, 0x72, 0x00, 0x00, 0x99, 0xD5, /* 0xDC-0xDF */
+	0x00, 0x00, 0x89, 0x62, 0x91, 0x70, 0x8C, 0xFB, /* 0xE0-0xE3 */
+	0x8C, 0xC3, 0x8B, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x99, 0xD9, 0x92, 0x40, 0x91, 0xFC, 0x8B, 0xA9, /* 0xE8-0xEB */
+	0x8F, 0xA2, 0x99, 0xDA, 0x99, 0xD8, 0x89, 0xC2, /* 0xEC-0xEF */
+	0x91, 0xE4, 0x8E, 0xB6, 0x8E, 0x6A, 0x89, 0x45, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0x90, 0x8D, 0x86, /* 0xF4-0xF7 */
+	0x8E, 0x69, 0x00, 0x00, 0x99, 0xDB, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_54[512] = {
+	0x00, 0x00, 0x99, 0xDC, 0x00, 0x00, 0x8B, 0x68, /* 0x00-0x03 */
+	0x8A, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x8D, 0x87, 0x8B, 0x67, 0x92, 0xDD, 0x89, 0x44, /* 0x08-0x0B */
+	0x93, 0xAF, 0x96, 0xBC, 0x8D, 0x40, 0x97, 0x99, /* 0x0C-0x0F */
+	0x93, 0x66, 0x8C, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x4E, /* 0x18-0x1B */
+	0x00, 0x00, 0x99, 0xE5, 0x00, 0x00, 0x8B, 0xE1, /* 0x1C-0x1F */
+	0x96, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xDB, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x99, 0xE4, 0x00, 0x00, 0x8A, 0xDC, /* 0x28-0x2B */
+	0x99, 0xDF, 0x99, 0xE0, 0x99, 0xE2, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xE3, 0x00, 0x00, /* 0x34-0x37 */
+	0x8B, 0x7A, 0x90, 0x81, 0x00, 0x00, 0x95, 0xAB, /* 0x38-0x3B */
+	0x99, 0xE1, 0x99, 0xDD, 0x8C, 0xE1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x99, 0xDE, 0x00, 0x00, 0x98, 0x43, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xF0, 0x00, 0x00, /* 0x44-0x47 */
+	0x92, 0xE6, 0x8C, 0xE0, 0x8D, 0x90, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xE6, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x93, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0xEA, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x8E, 0xFC, 0x00, 0x00, 0x8E, 0xF4, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x99, 0xED, 0x99, 0xEB, 0x00, 0x00, 0x96, 0xA1, /* 0x70-0x73 */
+	0x00, 0x00, 0x99, 0xE8, 0x99, 0xF1, 0x99, 0xEC, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0xEF, /* 0x78-0x7B */
+	0x8C, 0xC4, 0x96, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x99, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x99, 0xF2, 0x00, 0x00, 0x99, 0xF4, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x75, 0x8D, 0xEE, /* 0x88-0x8B */
+	0x98, 0x61, 0x00, 0x00, 0x99, 0xE9, 0x99, 0xE7, /* 0x8C-0x8F */
+	0x99, 0xF3, 0x00, 0x00, 0x99, 0xEE, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xED, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x99, 0xF6, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x9A, 0x42, 0x99, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x99, 0xFC, 0xED, 0x76, 0x00, 0x00, 0x9A, 0x40, /* 0xA8-0xAB */
+	0x99, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x5D, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xE7, 0x8A, 0x50, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x99, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9A, 0x44, 0x88, 0xF4, 0x9A, 0x43, 0x00, 0x00, /* 0xBC-0xBF */
+	0x88, 0xA3, 0x95, 0x69, 0x9A, 0x41, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x99, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x99, 0xF5, /* 0xC4-0xC7 */
+	0x99, 0xFB, 0x8D, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x9A, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x88, 0xF5, 0x9A, 0x4E, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x9A, 0x46, 0x9A, 0x47, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x8F, 0xA3, 0x96, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x9A, 0x4C, 0x9A, 0x4B, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x4E, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x4D, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x9A, 0x4A, 0x00, 0x00, 0xED, 0x77, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_55[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x89, 0x53, 0x00, 0x00, 0x8D, 0xB4, 0x90, 0x4F, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x48, /* 0x0C-0x0F */
+	0x93, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x9A, 0x49, 0x00, 0x00, 0x88, 0xA0, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x53, 0x97, 0x42, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8F, 0xA5, 0x00, 0x00, 0x9A, 0x59, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x9A, 0x58, 0x9A, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xC1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9A, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x91, 0xED, 0x9A, 0x55, 0x8F, 0xA4, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x9A, 0x52, 0x00, 0x00, 0x00, 0x00, 0x96, 0xE2, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x5B, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x56, 0x9A, 0x57, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x9A, 0x54, 0x9A, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x51, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x60, /* 0x78-0x7B */
+	0x9A, 0x65, 0x00, 0x00, 0x9A, 0x61, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x9A, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x66, /* 0x80-0x83 */
+	0x91, 0x50, 0x00, 0x00, 0xED, 0x78, 0x9A, 0x68, /* 0x84-0x87 */
+	0x00, 0x00, 0x8D, 0x41, 0x9A, 0x5E, 0x92, 0x9D, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x9A, 0x62, 0x9A, 0x5B, 0x8A, 0xAB, 0x00, 0x00, /* 0x98-0x9B */
+	0x8A, 0xEC, 0x8A, 0x85, 0x9A, 0x63, 0x9A, 0x5F, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x96, /* 0xA4-0xA7 */
+	0x9A, 0x69, 0x9A, 0x67, 0x91, 0x72, 0x8B, 0x69, /* 0xA8-0xAB */
+	0x8B, 0xAA, 0x00, 0x00, 0x9A, 0x64, 0x00, 0x00, /* 0xAC-0xAF */
+	0x8B, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x63, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x9A, 0x6D, 0x9A, 0x6B, 0x00, 0x00, 0x9A, 0xA5, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x9A, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x6A, 0x00, 0x00, /* 0xD8-0xDB */
+	0x9A, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x6C, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x6B, /* 0xE0-0xE3 */
+	0x9A, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x72, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x9A, 0x77, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x9A, 0x75, 0x9A, 0x74, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_56[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x51, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x89, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x9A, 0x71, 0x00, 0x00, 0x9A, 0x73, 0x8F, 0xA6, /* 0x14-0x17 */
+	0x89, 0x52, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x76, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x89, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x82, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8F, 0xFA, 0x9A, 0x7D, 0x00, 0x00, /* 0x30-0x33 */
+	0x9A, 0x7B, 0x00, 0x00, 0x9A, 0x7C, 0x00, 0x00, /* 0x34-0x37 */
+	0x9A, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x5C, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x91, 0x58, 0x00, 0x00, 0x9A, 0x78, 0x00, 0x00, /* 0x4C-0x4F */
+	0x9A, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x9A, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x9A, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x8A, 0xED, 0x00, 0x00, 0x9A, 0x84, 0x9A, 0x80, /* 0x68-0x6B */
+	0x9A, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x95, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x93, 0xD3, 0x00, 0x00, 0x94, 0xB6, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x9A, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x85, 0x8A, 0x64, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x87, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x8A, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x9A, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9A, 0x88, 0x00, 0x00, 0x94, 0x58, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x9A, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x8C, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x9A, 0x8E, 0x00, 0x00, 0x9A, 0x8D, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9A, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x9A, 0x93, 0x9A, 0x91, 0x9A, 0x8F, 0x9A, 0x92, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x9A, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x95, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x9A, 0x96, 0x00, 0x00, 0x9A, 0x97, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x98, /* 0xD4-0xD7 */
+	0x99, 0x64, 0x00, 0x00, 0x8E, 0xFA, 0x8E, 0x6C, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xF1, 0x00, 0x00, /* 0xDC-0xDF */
+	0x88, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x92, 0x63, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0x99, 0x00, 0x00, /* 0xEC-0xEF */
+	0x8D, 0xA2, 0x00, 0x00, 0x88, 0xCD, 0x90, 0x7D, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x9A, 0x9A, 0x8C, 0xC5, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x8D, 0x91, 0x00, 0x00, 0x9A, 0x9C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_57[512] = {
+	0x9A, 0x9B, 0x00, 0x00, 0x00, 0x00, 0x95, 0xDE, /* 0x00-0x03 */
+	0x9A, 0x9D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x9A, 0x9F, 0x9A, 0x9E, 0x00, 0x00, 0x9A, 0xA0, /* 0x08-0x0B */
+	0x00, 0x00, 0x9A, 0xA1, 0x00, 0x00, 0x8C, 0x97, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x80, 0x9A, 0xA2, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xA4, 0x00, 0x00, /* 0x14-0x17 */
+	0x9A, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x9A, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x93, 0x79, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xA7, 0x88, 0xB3, /* 0x24-0x27 */
+	0x8D, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x8C, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x92, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xA8, /* 0x34-0x37 */
+	0x9A, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xAB, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9A, 0xAC, 0x00, 0x00, 0x8D, 0xE2, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xCF, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x56, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xAA, 0x9A, 0xAD, /* 0x4C-0x4F */
+	0x8D, 0xBF, 0x8D, 0x42, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xED, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x9A, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x8D, 0xA3, 0xED, 0x7A, 0x92, 0x52, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x9A, 0xAE, 0x92, 0xD8, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xB2, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x82, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x9A, 0xB0, 0x9A, 0xB3, 0x00, 0x00, 0x8C, 0x5E, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xB4, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9A, 0xB5, 0x00, 0x00, 0x8D, 0x43, 0x8A, 0x5F, /* 0xA0-0xA3 */
+	0x9A, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xB8, 0x00, 0x00, /* 0xA8-0xAB */
+	0xED, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x9A, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xB6, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x9A, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xBA, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xBB, 0xED, 0x7D, /* 0xC4-0xC7 */
+	0xED, 0x7C, 0x00, 0x00, 0x00, 0x00, 0x96, 0x84, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xE9, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xBD, 0x9A, 0xBE, /* 0xD0-0xD3 */
+	0x9A, 0xBC, 0x00, 0x00, 0x9A, 0xC0, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x94, 0x57, 0x00, 0x00, 0x00, 0x00, 0x88, 0xE6, /* 0xDC-0xDF */
+	0x95, 0x75, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xC1, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x8F, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xB7, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x94, 0x7C, 0x8A, 0xEE, 0x00, 0x00, /* 0xF8-0xFB */
+	0x8D, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_58[512] = {
+	0x96, 0x78, 0x00, 0x00, 0x93, 0xB0, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x8C, 0x98, 0x91, 0xCD, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xBF, 0x9A, 0xC2, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x91, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x9A, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x9A, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x9A, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x92, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xAC, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x9F, /* 0x2C-0x2F */
+	0x89, 0x81, 0x95, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x8F, 0xEA, 0x93, 0x67, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xE4, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x9A, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x95, 0xBB, 0x97, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xF2, 0x9A, 0xC8, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x91, 0x59, 0x9A, 0xCB, 0x00, 0x00, /* 0x50-0x53 */
+	0x93, 0x83, 0x00, 0x00, 0x00, 0x00, 0x93, 0x68, /* 0x54-0x57 */
+	0x93, 0x84, 0x94, 0xB7, 0x92, 0xCB, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xC7, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xC7, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x89, 0x96, 0x00, 0x00, 0x93, 0x55, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x9A, 0xC9, 0x00, 0x00, 0x9A, 0xC5, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x90, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x9A, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x6D, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xAB, /* 0x80-0x83 */
+	0x00, 0x00, 0x9A, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xE6, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x9D, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x92, 0xC4, 0x00, 0x00, 0xED, 0x81, 0x9A, 0xD0, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x96, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x9A, 0xD1, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xD6, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x82, 0x95, 0xAD, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9A, 0xD5, 0x9A, 0xCF, 0x9A, 0xD2, 0x9A, 0xD4, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xA4, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x95, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x9A, 0xD7, 0x00, 0x00, 0x92, 0x64, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xF3, 0x00, 0x00, /* 0xC8-0xCB */
+	0x8F, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x9A, 0xD9, 0x00, 0x00, 0x9A, 0xD8, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x8D, 0x88, 0x00, 0x00, 0x9A, 0xDA, /* 0xD4-0xD7 */
+	0x9A, 0xDC, 0x9A, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x9A, 0xDE, 0x00, 0x00, 0x9A, 0xD3, 0x9A, 0xE0, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9A, 0xDF, 0x9A, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x6D, /* 0xE8-0xEB */
+	0x90, 0x70, 0x00, 0x00, 0x91, 0x73, 0x9A, 0xE1, /* 0xEC-0xEF */
+	0x90, 0xBA, 0x88, 0xEB, 0x94, 0x84, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xD9, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x9A, 0xE3, 0x9A, 0xE2, 0x9A, 0xE4, /* 0xF8-0xFB */
+	0x9A, 0xE5, 0x9A, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_59[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xE7, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x95, 0xCF, 0x9A, 0xE8, 0xED, 0x83, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xC4, /* 0x0C-0x0F */
+	0x9A, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x97, 0x5B, 0x8A, 0x4F, 0x00, 0x00, /* 0x14-0x17 */
+	0x99, 0xC7, 0x8F, 0x67, 0x91, 0xBD, 0x9A, 0xEA, /* 0x18-0x1B */
+	0x96, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xB2, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x9A, 0xEC, 0x00, 0x00, 0x91, 0xE5, /* 0x24-0x27 */
+	0x00, 0x00, 0x93, 0x56, 0x91, 0xBE, 0x95, 0x76, /* 0x28-0x2B */
+	0x9A, 0xED, 0x9A, 0xEE, 0x89, 0x9B, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8E, 0xB8, 0x9A, 0xEF, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xCE, /* 0x34-0x37 */
+	0x9A, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xF1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x89, 0x82, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xEF, /* 0x44-0x47 */
+	0x93, 0xDE, 0x95, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xF5, 0x91, 0x74, /* 0x4C-0x4F */
+	0x9A, 0xF4, 0x8C, 0x5F, 0x00, 0x00, 0xED, 0x84, /* 0x50-0x53 */
+	0x96, 0x7A, 0x9A, 0xF3, 0x00, 0x00, 0x93, 0x85, /* 0x54-0x57 */
+	0x9A, 0xF7, 0x00, 0x00, 0x9A, 0xF6, 0xED, 0x85, /* 0x58-0x5B */
+	0x00, 0x00, 0xED, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x9A, 0xF9, 0x00, 0x00, 0x9A, 0xF8, 0xED, 0x87, /* 0x60-0x63 */
+	0x00, 0x00, 0x89, 0x9C, 0x00, 0x00, 0x9A, 0xFA, /* 0x64-0x67 */
+	0x8F, 0xA7, 0x9A, 0xFC, 0x92, 0x44, 0x00, 0x00, /* 0x68-0x6B */
+	0x9A, 0xFB, 0x00, 0x00, 0x95, 0xB1, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x97, /* 0x70-0x73 */
+	0x93, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x9B, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x8D, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x9B, 0x41, 0x94, 0x40, 0x94, 0xDC, /* 0x80-0x83 */
+	0x96, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x44, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x9B, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x57, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x64, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x96, 0xAD, 0x00, 0x00, 0x9B, 0xAA, /* 0x98-0x9B */
+	0x00, 0x00, 0x9B, 0x42, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x45, /* 0xA0-0xA3 */
+	0xED, 0x88, 0x91, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x96, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x93, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x46, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x96, 0x85, 0xED, 0x89, 0x8D, 0xC8, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xA8, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x47, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x8E, 0x6F, 0x00, 0x00, 0x8E, 0x6E, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x88, 0xB7, 0x8C, 0xC6, 0x00, 0x00, 0x90, 0xA9, /* 0xD0-0xD3 */
+	0x88, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x9B, 0x4B, 0x9B, 0x4C, 0x00, 0x00, /* 0xD8-0xDB */
+	0x9B, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x89, 0x57, 0x8A, 0xAD, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x9B, 0x48, 0x00, 0x00, 0x96, 0xC3, 0x95, 0x50, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0xA6, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xF7, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x70, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5A[512] = {
+	0x00, 0x00, 0x88, 0xD0, 0x00, 0x00, 0x88, 0xA1, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x9B, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x9B, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x96, 0xBA, 0x00, 0x00, 0x9B, 0x52, 0x00, 0x00, /* 0x18-0x1B */
+	0x9B, 0x50, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x4E, /* 0x1C-0x1F */
+	0x90, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x9B, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x95, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xE2, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x9B, 0x56, 0x9B, 0x57, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x8F, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9B, 0x53, 0x98, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x6B, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x9B, 0x55, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xA5, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x58, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x77, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x59, 0x00, 0x00, /* 0x68-0x6B */
+	0x9B, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0xB9, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x7D, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x5A, 0x95, 0x51, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9B, 0x5B, 0x9B, 0x5F, 0x9B, 0x5C, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x89, 0xC5, 0x9B, 0x5E, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x8E, 0xB9, 0x00, 0x00, 0x9B, 0x5D, /* 0xC8-0xCB */
+	0x8C, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x9B, 0x6B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x64, 0x9B, 0x61, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x92, 0x84, 0x00, 0x00, 0x9B, 0x60, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x62, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x9B, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x65, 0x9B, 0x66, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_5B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x8A, 0xF0, 0x00, 0x00, 0x9B, 0x68, /* 0x08-0x0B */
+	0x9B, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x69, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xEC, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x6C, 0x00, 0x00, /* 0x28-0x2B */
+	0x92, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x89, 0x64, 0x00, 0x00, 0x9B, 0x6A, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x6D, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0x6E, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9B, 0x71, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x6F, /* 0x40-0x43 */
+	0x00, 0x00, 0x9B, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8E, 0x71, 0x9B, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x8D, 0x45, 0x9B, 0x73, 0xED, 0x8A, 0x8E, 0x9A, /* 0x54-0x57 */
+	0x91, 0xB6, 0x00, 0x00, 0x9B, 0x74, 0x9B, 0x75, /* 0x58-0x5B */
+	0x8E, 0x79, 0x8D, 0x46, 0x00, 0x00, 0x96, 0xD0, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x47, /* 0x60-0x63 */
+	0x8C, 0xC7, 0x9B, 0x76, 0x8A, 0x77, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x9B, 0x77, 0x00, 0x00, 0x91, 0xB7, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x9B, 0x78, 0x9B, 0xA1, 0x00, 0x00, 0x9B, 0x79, /* 0x70-0x73 */
+	0x00, 0x00, 0x9B, 0x7A, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x9B, 0x7B, 0x00, 0x00, 0x9B, 0x7D, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x9B, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x80, /* 0x80-0x83 */
+	0x00, 0x00, 0x91, 0xEE, 0x00, 0x00, 0x89, 0x46, /* 0x84-0x87 */
+	0x8E, 0xE7, 0x88, 0xC0, 0x00, 0x00, 0x91, 0x76, /* 0x88-0x8B */
+	0x8A, 0xAE, 0x8E, 0xB3, 0x00, 0x00, 0x8D, 0x47, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x93, 0x86, 0x00, 0x00, 0x8F, 0x40, /* 0x94-0x97 */
+	0x8A, 0xAF, 0x92, 0x88, 0x92, 0xE8, 0x88, 0xB6, /* 0x98-0x9B */
+	0x8B, 0x58, 0x95, 0xF3, 0x00, 0x00, 0x8E, 0xC0, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x71, 0x90, 0xE9, /* 0xA0-0xA3 */
+	0x8E, 0xBA, 0x97, 0x47, 0x9B, 0x81, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x7B, 0x00, 0x00, /* 0xAC-0xAF */
+	0x8D, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x51, /* 0xB0-0xB3 */
+	0x89, 0x83, 0x8F, 0xAA, 0x89, 0xC6, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9B, 0x82, 0x97, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x68, /* 0xBC-0xBF */
+	0xED, 0x8B, 0x00, 0x00, 0x8E, 0xE2, 0x9B, 0x83, /* 0xC0-0xC3 */
+	0x8A, 0xF1, 0x93, 0xD0, 0x96, 0xA7, 0x9B, 0x84, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x9B, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x95, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x9B, 0x87, 0x00, 0x00, 0x8A, 0xA6, 0x8B, 0xF5, /* 0xD0-0xD3 */
+	0x9B, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xED, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB0, /* 0xD8-0xDB */
+	0x00, 0x00, 0x90, 0x51, 0x9B, 0x8B, 0x8E, 0x40, /* 0xDC-0xDF */
+	0x00, 0x00, 0x89, 0xC7, 0x9B, 0x8A, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9B, 0x88, 0x9B, 0x8C, 0x9B, 0x89, 0x94, 0x4A, /* 0xE4-0xE7 */
+	0x9E, 0xCB, 0x90, 0x52, 0x00, 0x00, 0x9B, 0x8D, /* 0xE8-0xEB */
+	0xED, 0x8E, 0x00, 0x00, 0x97, 0xBE, 0x00, 0x00, /* 0xEC-0xEF */
+	0x9B, 0x8E, 0x00, 0x00, 0x00, 0x00, 0x9B, 0x90, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x92, 0x9E, 0x9B, 0x8F, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x90, 0xA1, 0x00, 0x00, 0x8E, 0x9B, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xCE, 0x8E, 0xF5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5C[512] = {
+	0x00, 0x00, 0x95, 0x95, 0x90, 0xEA, 0x00, 0x00, /* 0x00-0x03 */
+	0x8E, 0xCB, 0x9B, 0x91, 0x8F, 0xAB, 0x9B, 0x92, /* 0x04-0x07 */
+	0x9B, 0x93, 0x88, 0xD1, 0x91, 0xB8, 0x90, 0x71, /* 0x08-0x0B */
+	0x00, 0x00, 0x9B, 0x94, 0x93, 0xB1, 0x8F, 0xAC, /* 0x0C-0x0F */
+	0x00, 0x00, 0x8F, 0xAD, 0x00, 0x00, 0x9B, 0x95, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xEB, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xAE, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x8F, 0x00, 0x00, /* 0x1C-0x1F */
+	0x9B, 0x96, 0x00, 0x00, 0x9B, 0x97, 0x00, 0x00, /* 0x20-0x23 */
+	0x96, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x9B, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x8B, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8F, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x9B, 0x99, 0x9B, 0x9A, 0x8E, 0xDA, 0x90, 0x4B, /* 0x38-0x3B */
+	0x93, 0xF2, 0x90, 0x73, 0x94, 0xF6, 0x94, 0x41, /* 0x3C-0x3F */
+	0x8B, 0xC7, 0x9B, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x8B, 0x8F, 0x9B, 0x9C, 0x00, 0x00, /* 0x44-0x47 */
+	0x8B, 0xFC, 0x00, 0x00, 0x93, 0xCD, 0x89, 0xAE, /* 0x48-0x4B */
+	0x00, 0x00, 0x8E, 0x72, 0x9B, 0x9D, 0x9B, 0xA0, /* 0x4C-0x4F */
+	0x9B, 0x9F, 0x8B, 0xFB, 0x00, 0x00, 0x9B, 0x9E, /* 0x50-0x53 */
+	0x00, 0x00, 0x93, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xAE, 0x00, 0x00, /* 0x5C-0x5F */
+	0x93, 0x6A, 0x8E, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x91, 0x77, 0x97, 0x9A, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x9B, 0xA2, 0x00, 0x00, 0x9B, 0xA3, 0x93, 0xD4, /* 0x6C-0x6F */
+	0x00, 0x00, 0x8E, 0x52, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xA5, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x9B, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x9B, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8A, 0xF2, 0x9B, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x9B, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x89, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x90, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x91, 0x5A, 0x8A, 0xE2, 0x00, 0x00, 0x9B, 0xAB, /* 0xA8-0xAB */
+	0x96, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x91, 0xD0, 0x00, 0x00, 0x8A, 0x78, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xAD, 0x9B, 0xAF, /* 0xB4-0xB7 */
+	0x8A, 0xDD, 0x00, 0x00, 0xED, 0x91, 0x9B, 0xAC, /* 0xB8-0xBB */
+	0x9B, 0xAE, 0x00, 0x00, 0x9B, 0xB1, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x9B, 0xB0, 0x00, 0x00, 0x9B, 0xB2, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x9B, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x93, 0xBB, 0x8B, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x89, 0xE3, 0x9B, 0xB4, 0x9B, 0xB9, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x9B, 0xB7, 0x00, 0x00, 0x95, 0xF5, /* 0xEC-0xEF */
+	0x95, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xED, 0x92, 0x93, 0x87, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xB6, 0x8F, 0x73, /* 0xF8-0xFB */
+	0x00, 0x00, 0x9B, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x92, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xBA, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xE8, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x9B, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x9B, 0xC1, 0x9B, 0xBB, 0x8A, 0x52, 0x9B, 0xBC, /* 0x14-0x17 */
+	0x9B, 0xC5, 0x9B, 0xC4, 0x9B, 0xC3, 0x9B, 0xBF, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xBE, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xC2, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0x93, /* 0x24-0x27 */
+	0x00, 0x00, 0x95, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x96, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xC9, /* 0x48-0x4B */
+	0x9B, 0xC6, 0x00, 0x00, 0x9B, 0xC8, 0x00, 0x00, /* 0x4C-0x4F */
+	0x97, 0x92, 0x00, 0x00, 0x9B, 0xC7, 0xED, 0x94, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x9B, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x90, 0x93, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x9B, 0xCA, 0xED, 0x97, 0x00, 0x00, 0x8D, 0xB5, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xCB, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xCC, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xCF, 0x00, 0x00, /* 0x80-0x83 */
+	0x9B, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xCD, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x88, /* 0x88-0x8B */
+	0x9B, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x9B, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x9B, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xD0, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x9B, 0xD2, 0x00, 0x00, 0x9B, 0xD3, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xD6, /* 0xB4-0xB7 */
+	0xED, 0x98, 0xED, 0x99, 0x97, 0xE4, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9B, 0xD7, 0x9B, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x9B, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x8A, 0xDE, 0x9B, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xED, 0x9A, 0x00, 0x00, 0x9B, 0xDB, 0x9B, 0xDA, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xDC, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xDD, /* 0xD8-0xDB */
+	0x00, 0x00, 0x90, 0xEC, 0x8F, 0x42, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x8F, 0x84, 0x00, 0x00, 0x91, 0x83, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x8D, 0x48, 0x8D, 0xB6, 0x8D, 0x49, /* 0xE4-0xE7 */
+	0x8B, 0x90, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xDE, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xB7, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x8C, 0xC8, 0x9B, 0xDF, 0x96, 0xA4, /* 0xF0-0xF3 */
+	0x94, 0x62, 0x9B, 0xE0, 0x00, 0x00, 0x8D, 0x4A, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xAA, /* 0xF8-0xFB */
+	0x00, 0x00, 0x92, 0x46, 0x8B, 0xD0, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x73, 0x95, 0x7A, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xBF, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xE1, /* 0x08-0x0B */
+	0x8A, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x9B, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x9F, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x9B, 0xE3, 0x9B, 0xE2, 0x9B, 0xE5, /* 0x18-0x1B */
+	0x00, 0x00, 0x92, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x90, 0x83, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x74, /* 0x28-0x2B */
+	0x00, 0x00, 0x90, 0xC8, 0x00, 0x00, 0x91, 0xD1, /* 0x2C-0x2F */
+	0x8B, 0x41, 0x00, 0x00, 0x00, 0x00, 0x92, 0xA0, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x9B, 0xE6, 0x9B, 0xE7, /* 0x34-0x37 */
+	0x8F, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x96, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9B, 0xEA, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xE9, /* 0x40-0x43 */
+	0x9B, 0xE8, 0x95, 0x9D, 0x00, 0x00, 0x9B, 0xF1, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x96, 0x79, 0x00, 0x00, 0x9B, 0xEB, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x9B, 0xED, 0x96, 0x8B, 0x00, 0x00, 0x9B, 0xEC, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xEE, /* 0x5C-0x5F */
+	0x00, 0x00, 0x94, 0xA6, 0x9B, 0xEF, 0x95, 0xBC, /* 0x60-0x63 */
+	0x9B, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xB1, 0x95, 0xBD, /* 0x70-0x73 */
+	0x94, 0x4E, 0x9B, 0xF2, 0x9B, 0xF3, 0x00, 0x00, /* 0x74-0x77 */
+	0x8D, 0x4B, 0x8A, 0xB2, 0x9B, 0xF4, 0x8C, 0xB6, /* 0x78-0x7B */
+	0x97, 0x63, 0x97, 0x48, 0x8A, 0xF4, 0x9B, 0xF6, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x92, 0xA1, 0x00, 0x00, 0x8D, 0x4C, /* 0x80-0x83 */
+	0x8F, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x94, 0xDD, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xB0, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x98, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x92, 0xEA, 0x95, 0xF7, 0x93, 0x58, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0x4D, 0x00, 0x00, /* 0x98-0x9B */
+	0x95, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9B, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x78, 0x8D, 0xC0, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xC9, /* 0xA8-0xAB */
+	0x00, 0x00, 0x92, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x88, 0xC1, 0x8F, 0x8E, 0x8D, 0x4E, /* 0xB4-0xB7 */
+	0x97, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x9B, 0xF8, 0x9B, 0xF9, 0x94, 0x70, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x9B, 0xFA, 0x97, 0xF5, 0x98, 0x4C, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9B, 0xFC, /* 0xCC-0xCF */
+	0x9B, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x66, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x40, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x43, 0x9C, 0x44, /* 0xD8-0xDB */
+	0x00, 0x00, 0x9C, 0x42, 0x00, 0x00, 0x95, 0x5F, /* 0xDC-0xDF */
+	0x8F, 0xB1, 0x9C, 0x46, 0x9C, 0x45, 0x9C, 0x41, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x9C, 0x47, 0x9C, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x9C, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x9C, 0x4C, 0x9C, 0x4A, 0x00, 0x00, 0x9C, 0x4B, /* 0xF0-0xF3 */
+	0x9C, 0x4D, 0x00, 0x00, 0x89, 0x84, 0x92, 0xEC, /* 0xF4-0xF7 */
+	0x9C, 0x4E, 0x00, 0x00, 0x8C, 0x9A, 0x89, 0xF4, /* 0xF8-0xFB */
+	0x94, 0x55, 0x00, 0x00, 0x9C, 0x4F, 0x93, 0xF9, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5F[512] = {
+	0x00, 0x00, 0x95, 0xD9, 0x00, 0x00, 0x9C, 0x50, /* 0x00-0x03 */
+	0x98, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x9C, 0x51, 0x95, 0xBE, 0x9C, 0x54, /* 0x08-0x0B */
+	0x98, 0x9F, 0x98, 0xAF, 0x00, 0x00, 0x8E, 0xAE, /* 0x0C-0x0F */
+	0x93, 0xF3, 0x9C, 0x55, 0x00, 0x00, 0x8B, 0x7C, /* 0x10-0x13 */
+	0x92, 0xA2, 0x88, 0xF8, 0x9C, 0x56, 0x95, 0xA4, /* 0x14-0x17 */
+	0x8D, 0x4F, 0x00, 0x00, 0x00, 0x00, 0x92, 0x6F, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xED, /* 0x1C-0x1F */
+	0x00, 0x00, 0xED, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x96, 0xED, 0x8C, 0xB7, 0x8C, 0xCA, /* 0x24-0x27 */
+	0x00, 0x00, 0x9C, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x9C, 0x58, 0x00, 0x00, 0x9C, 0x5E, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8E, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xED, 0x9C, 0x92, 0xA3, 0x00, 0x00, 0x8B, 0xAD, /* 0x34-0x37 */
+	0x9C, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x95, 0x4A, 0x00, 0x00, 0x92, 0x65, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x9C, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xED, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x9C, 0x5B, 0x00, 0x00, 0x8B, 0xAE, 0x00, 0x00, /* 0x48-0x4B */
+	0x9C, 0x5C, 0x00, 0x00, 0x9C, 0x5D, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x9C, 0x5F, 0x00, 0x00, 0x93, 0x96, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x60, 0x9C, 0x61, /* 0x54-0x57 */
+	0x00, 0x00, 0x9C, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x9C, 0x53, 0x9C, 0x52, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x9C, 0x63, 0x8C, 0x60, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x46, 0xED, 0x9D, /* 0x64-0x67 */
+	0x00, 0x00, 0x8D, 0xCA, 0x95, 0x56, 0x92, 0xA4, /* 0x68-0x6B */
+	0x95, 0x6A, 0x9C, 0x64, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x8F, 0xB2, 0x89, 0x65, 0x00, 0x00, 0x9C, 0x65, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0x66, /* 0x74-0x77 */
+	0x00, 0x00, 0x96, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x94, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x9C, 0x69, /* 0x7C-0x7F */
+	
+	0x89, 0x9D, 0x90, 0xAA, 0x9C, 0x68, 0x9C, 0x67, /* 0x80-0x83 */
+	0x8C, 0x61, 0x91, 0xD2, 0x00, 0x00, 0x9C, 0x6D, /* 0x84-0x87 */
+	0x9C, 0x6B, 0x00, 0x00, 0x9C, 0x6A, 0x97, 0xA5, /* 0x88-0x8B */
+	0x8C, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8F, 0x99, 0x9C, 0x6C, 0x93, 0x6B, 0x8F, 0x5D, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xBE, /* 0x94-0x97 */
+	0x9C, 0x70, 0x9C, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x6E, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9C, 0x71, 0x8C, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x9C, 0x72, 0x95, 0x9C, 0x8F, 0x7A, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x9C, 0x73, 0x94, 0xF7, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xBF, /* 0xB0-0xB3 */
+	0x92, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xED, 0x9E, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x93, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9C, 0x74, 0x8B, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x53, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x95, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x8A, 0xF5, 0x94, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x75, 0x8E, 0x75, /* 0xD4-0xD7 */
+	0x96, 0x59, 0x96, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x89, 0x9E, 0x9C, 0x7A, 0xED, 0x9F, 0x00, 0x00, /* 0xDC-0xDF */
+	0x92, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9C, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xF5, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x9C, 0xAB, 0x9C, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x94, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x9C, 0x78, 0x00, 0x00, 0x00, 0x00, 0x9C, 0x76, /* 0xF8-0xFB */
+	0x00, 0x00, 0x8D, 0x9A, 0x00, 0x00, 0x9C, 0x7C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_60[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x83, 0x9C, 0x89, /* 0x0C-0x0F */
+	0x9C, 0x81, 0x00, 0x00, 0x93, 0x7B, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x9C, 0x86, 0x95, 0x7C, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x9C, 0x80, 0x00, 0x00, 0x9C, 0x85, /* 0x18-0x1B */
+	0x97, 0xE5, 0x8E, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x91, 0xD3, 0x9C, 0x7D, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x8B, 0x7D, 0x9C, 0x88, 0x90, 0xAB, /* 0x24-0x27 */
+	0x89, 0x85, 0x9C, 0x82, 0x89, 0xF6, 0x9C, 0x87, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xAF, /* 0x2C-0x2F */
+	0x00, 0x00, 0x9C, 0x84, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x8A, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x9C, 0x8C, 0x9C, 0x96, 0x9C, 0x94, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x91, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x90, 0x97, 0xF6, /* 0x48-0x4B */
+	0x00, 0x00, 0x9C, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8B, 0xB0, 0x00, 0x00, 0x8D, 0x50, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x8F, 0x9A, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9C, 0x99, 0x9C, 0x8B, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xED, 0xA0, 0x00, 0x00, 0x9C, 0x8F, /* 0x5C-0x5F */
+	0x9C, 0x7E, 0x00, 0x00, 0x89, 0xF8, 0x9C, 0x93, /* 0x60-0x63 */
+	0x9C, 0x95, 0x92, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x8D, 0xA6, 0x89, 0xB6, 0x9C, 0x8D, 0x9C, 0x98, /* 0x68-0x6B */
+	0x9C, 0x97, 0x8B, 0xB1, 0x00, 0x00, 0x91, 0xA7, /* 0x6C-0x6F */
+	0x8A, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x8C, 0x62, 0x00, 0x00, 0x9C, 0x8E, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x9C, 0x9A, 0x00, 0x00, 0x9C, 0x9D, /* 0x80-0x83 */
+	0x9C, 0x9F, 0xED, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x8E, 0xBB, 0xED, 0xA2, 0x9C, 0xA5, /* 0x88-0x8B */
+	0x92, 0xEE, 0x9C, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xA3, 0x00, 0x00, /* 0x90-0x93 */
+	0x89, 0xF7, 0x00, 0x00, 0x9C, 0xA1, 0x9C, 0xA2, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0x9E, 0x9C, 0xA0, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xE5, /* 0x9C-0x9F */
+	0x97, 0x49, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB3, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x78, 0x9C, 0xA4, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x94, 0x59, 0x88, 0xAB, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xDF, 0x9C, 0x7B, /* 0xB0-0xB3 */
+	0x9C, 0xAA, 0x9C, 0xAE, 0x96, 0xE3, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9C, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x93, 0x89, 0x9C, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x8F, 0xEE, 0x9C, 0xAD, 0x93, 0xD5, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x98, 0x66, 0x00, 0x00, 0x9C, 0xA9, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xED, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x9C, 0xAF, 0x00, 0x00, 0x8D, 0x9B, 0x00, 0x00, /* 0xD8-0xDB */
+	0x90, 0xC9, 0x00, 0x00, 0xED, 0xA3, 0x88, 0xD2, /* 0xDC-0xDF */
+	0x9C, 0xA8, 0x9C, 0xA6, 0x00, 0x00, 0x91, 0x79, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0x9C, /* 0xE4-0xE7 */
+	0x8E, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x91, 0xC4, 0x9C, 0xBB, 0xED, 0xA6, 0x91, 0x7A, /* 0xF0-0xF3 */
+	0x9C, 0xB6, 0x00, 0x00, 0x9C, 0xB3, 0x9C, 0xB4, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x8E, 0xE4, 0x9C, 0xB7, 0x9C, 0xBA, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_61[512] = {
+	0x9C, 0xB5, 0x8F, 0x44, 0x00, 0x00, 0x9C, 0xB8, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xB2, 0x00, 0x00, /* 0x04-0x07 */
+	0x96, 0xFA, 0x96, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x9C, 0xBC, 0x9C, 0xBD, 0x88, 0xD3, /* 0x0C-0x0F */
+	0x00, 0x00, 0xED, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x9C, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xF0, 0x88, 0xA4, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB4, /* 0x1C-0x1F */
+	0xED, 0xA5, 0x9C, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xC1, /* 0x24-0x27 */
+	0x9C, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x9C, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xED, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x9C, 0xC6, 0x00, 0x00, 0x00, 0x00, 0xED, 0xA8, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x9C, 0xC4, 0x9C, 0xC7, 0x9C, 0xBF, 0x9C, 0xC3, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xC8, 0x00, 0x00, /* 0x40-0x43 */
+	0x9C, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xBE, /* 0x44-0x47 */
+	0x8E, 0x9C, 0x00, 0x00, 0x9C, 0xC2, 0x91, 0xD4, /* 0x48-0x4B */
+	0x8D, 0x51, 0x9C, 0xB0, 0x90, 0x54, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xD6, /* 0x50-0x53 */
+	0x00, 0x00, 0x95, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x9C, 0xCC, 0x9C, 0xCD, 0x9C, 0xCE, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x9C, 0xD5, 0x00, 0x00, 0x9C, 0xD4, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x9D, 0x8A, 0xB5, /* 0x60-0x63 */
+	0x00, 0x00, 0x9C, 0xD2, 0x00, 0x00, 0x8C, 0x64, /* 0x64-0x67 */
+	0x8A, 0x53, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xCF, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xB6, 0x9C, 0xD1, /* 0x6C-0x6F */
+	0x88, 0xD4, 0x9C, 0xD3, 0x00, 0x00, 0x9C, 0xCA, /* 0x70-0x73 */
+	0x9C, 0xD0, 0x9C, 0xD7, 0x8C, 0x63, 0x9C, 0xCB, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x7C, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x4A, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xDA, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xDE, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x9E, 0x00, 0x00, /* 0x8C-0x8F */
+	0x97, 0xF7, 0x9C, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x9C, 0xDC, 0x00, 0x00, 0x9C, 0xD9, 0x00, 0x00, /* 0x94-0x97 */
+	0xED, 0xAA, 0x9C, 0xD8, 0x9C, 0xDD, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x95, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x93, 0xB2, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x8C, 0x65, 0x00, 0x00, 0x9C, 0xE0, /* 0xA8-0xAB */
+	0x9C, 0xDB, 0x00, 0x00, 0x9C, 0xE1, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x9B, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xAF, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xE9, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xB6, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xE7, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xE8, 0x8D, 0xA7, /* 0xC4-0xC7 */
+	0x9C, 0xE6, 0x9C, 0xE4, 0x9C, 0xE3, 0x9C, 0xEA, /* 0xC8-0xCB */
+	0x9C, 0xE2, 0x9C, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x89, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C, 0xEE, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x9C, 0xED, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0xA6, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x9C, 0xF1, 0x00, 0x00, 0x9C, 0xEF, 0x9C, 0xE5, /* 0xF4-0xF7 */
+	0x8C, 0x9C, 0x00, 0x00, 0x9C, 0xF0, 0x00, 0x00, /* 0xF8-0xFB */
+	0x9C, 0xF4, 0x9C, 0xF3, 0x9C, 0xF5, 0x9C, 0xF2, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_62[512] = {
+	0x9C, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x9C, 0xF7, 0x9C, 0xF8, 0x95, 0xE8, 0x00, 0x00, /* 0x08-0x0B */
+	0x9C, 0xFA, 0x9C, 0xF9, 0x8F, 0x5E, 0x00, 0x00, /* 0x0C-0x0F */
+	0x90, 0xAC, 0x89, 0xE4, 0x89, 0xFA, 0xED, 0xAB, /* 0x10-0x13 */
+	0x9C, 0xFB, 0x00, 0x00, 0x88, 0xBD, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xCA, 0x9C, 0xFC, /* 0x18-0x1B */
+	0x00, 0x00, 0xE6, 0xC1, 0x9D, 0x40, 0x8C, 0x81, /* 0x1C-0x1F */
+	0x00, 0x00, 0x9D, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xED, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x42, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x43, 0x8B, 0x59, /* 0x2C-0x2F */
+	0x9D, 0x44, 0x00, 0x00, 0x9D, 0x45, 0x9D, 0x46, /* 0x30-0x33 */
+	0x91, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x8C, 0xCB, 0x00, 0x00, 0x00, 0x00, 0x96, 0xDF, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x5B, /* 0x3C-0x3F */
+	0x8F, 0x8A, 0x9D, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xEE, /* 0x44-0x47 */
+	0xE7, 0xBB, 0x94, 0xE0, 0x00, 0x00, 0x8E, 0xE8, /* 0x48-0x4B */
+	0x00, 0x00, 0x8D, 0xCB, 0x9D, 0x48, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xC5, /* 0x50-0x53 */
+	0x00, 0x00, 0x95, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x91, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x4B, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x49, 0x00, 0x00, /* 0x5C-0x5F */
+	0x9D, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x4A, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x9D, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xAF, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x88, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x7D, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x94, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x9D, 0x4E, 0x00, 0x00, 0x9D, 0x51, 0x8F, 0xB3, /* 0x7C-0x7F */
+	
+	0x8B, 0x5A, 0x00, 0x00, 0x9D, 0x4F, 0x9D, 0x56, /* 0x80-0x83 */
+	0x8F, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x9D, 0x50, 0x94, 0x63, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x97, 0x7D, 0x9D, 0x52, 0x9D, 0x53, /* 0x90-0x93 */
+	0x9D, 0x57, 0x93, 0x8A, 0x9D, 0x54, 0x8D, 0x52, /* 0x94-0x97 */
+	0x90, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x65, /* 0x98-0x9B */
+	0x94, 0xB2, 0x00, 0x00, 0x91, 0xF0, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xAC, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xE2, /* 0xA8-0xAB */
+	0x9D, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x95, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x92, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x96, 0x95, 0x00, 0x00, 0x9D, 0x5A, /* 0xB8-0xBB */
+	0x89, 0x9F, 0x92, 0x8A, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x63, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x92, 0x53, 0x9D, 0x5D, 0x9D, 0x64, /* 0xC4-0xC7 */
+	0x9D, 0x5F, 0x9D, 0x66, 0x9D, 0x62, 0x00, 0x00, /* 0xC8-0xCB */
+	0x9D, 0x61, 0x94, 0x8F, 0x00, 0x00, 0x9D, 0x5B, /* 0xCC-0xCF */
+	0x89, 0xFB, 0x9D, 0x59, 0x8B, 0x91, 0x91, 0xF1, /* 0xD0-0xD3 */
+	0x9D, 0x55, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x58, /* 0xD4-0xD7 */
+	0x8D, 0x53, 0x90, 0xD9, 0x00, 0x00, 0x8F, 0xB5, /* 0xD8-0xDB */
+	0x9D, 0x60, 0x94, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x8B, 0x92, 0x8A, 0x67, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x8A, 0x87, 0x90, 0x40, 0x9D, 0x68, 0x9D, 0x6D, /* 0xEC-0xEF */
+	0x00, 0x00, 0x9D, 0x69, 0x00, 0x00, 0x8C, 0x9D, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x9D, 0x6E, 0x8E, 0x41, 0x8D, 0x89, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x45, 0x9D, 0x5C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_63[512] = {
+	0x00, 0x00, 0x8E, 0x9D, 0x9D, 0x6B, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x77, /* 0x04-0x07 */
+	0x9D, 0x6C, 0x88, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x9D, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x92, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x8B, 0x93, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xB2, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x6A, /* 0x24-0x27 */
+	0x88, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x8D, 0xC1, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x55, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0xF0, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x94, 0xD2, 0x9D, 0x70, 0x91, 0x7D, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x91, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x8E, 0x4A, 0x9D, 0x71, 0x00, 0x00, 0x9D, 0x73, /* 0x4C-0x4F */
+	0x9D, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x95, 0xDF, 0x00, 0x00, 0x92, 0xBB, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x91, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xF9, /* 0x64-0x67 */
+	0x8E, 0xCC, 0x9D, 0x80, 0x00, 0x00, 0x9D, 0x7E, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x98, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x9E, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x78, 0x8F, 0xB7, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xE6, 0x94, 0x50, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x9D, 0x76, 0x00, 0x00, 0x00, 0x00, 0x91, 0x7C, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x8E, 0xF6, 0x9D, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x8F, 0xB6, 0x00, 0x00, 0x9D, 0x75, 0x9D, 0x7A, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x72, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x74, 0x00, 0x00, /* 0x94-0x97 */
+	0x8C, 0x40, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x7C, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x7C, /* 0x9C-0x9F */
+	0x97, 0xA9, 0x8D, 0xCC, 0x92, 0x54, 0x9D, 0x79, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x90, 0xDA, 0x00, 0x00, 0x8D, 0x54, /* 0xA4-0xA7 */
+	0x90, 0x84, 0x89, 0x86, 0x91, 0x5B, 0x9D, 0x77, /* 0xA8-0xAB */
+	0x8B, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x66, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x92, 0xCD, 0x9D, 0x7D, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x7E, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x81, 0x00, 0x00, /* 0xBC-0xBF */
+	0x9D, 0x83, 0x00, 0x00, 0x00, 0x00, 0x91, 0xB5, /* 0xC0-0xC3 */
+	0x9D, 0x89, 0x00, 0x00, 0x9D, 0x84, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x9D, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x60, /* 0xCC-0xCF */
+	0x92, 0xF1, 0x00, 0x00, 0x9D, 0x87, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x4B, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x67, 0x8A, 0xB7, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x88, 0xAC, 0x00, 0x00, 0x9D, 0x85, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x9D, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xF6, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x89, 0x87, 0xED, 0xAD, 0x9D, 0x88, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x68, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_64[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x8C, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x91, 0xB9, 0x00, 0x00, 0x9D, 0x93, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x8D, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x8A, 0x9D, 0x91, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x9D, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x8E, 0x00, 0x00, /* 0x24-0x27 */
+	0x9D, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x94, 0xC0, 0x93, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x9D, 0x8B, 0x00, 0x00, 0x9D, 0x8F, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x67, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xEF, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xDB, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x97, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x93, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xED, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x94, /* 0x64-0x67 */
+	0x00, 0x00, 0x96, 0x80, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0x95, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x96, 0x00, 0x00, /* 0x74-0x77 */
+	0x96, 0xCC, 0x00, 0x00, 0x90, 0xA0, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x82, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x9D, 0x9D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x54, 0x9D, 0x9A, /* 0x90-0x93 */
+	0x00, 0x00, 0x9D, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x51, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xED, 0xAF, 0x93, 0xB3, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x93, 0x50, 0x9D, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x9D, 0x9C, 0x00, 0x00, 0x95, 0x8F, /* 0xA8-0xAB */
+	0x00, 0x00, 0x94, 0x64, 0x8E, 0x42, 0x00, 0x00, /* 0xAC-0xAF */
+	0x90, 0xEF, 0x00, 0x00, 0x96, 0x6F, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x8A, 0x68, 0x00, 0x00, 0x9D, 0xA3, /* 0xB8-0xBB */
+	0x9D, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x97, 0x69, 0x9D, 0xA5, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x9D, 0xA1, 0x00, 0x00, 0x9D, 0xA2, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x91, 0x80, 0xED, 0xB0, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xA0, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x9D, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x9D, 0xA4, 0x00, 0x00, 0x9D, 0x9F, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x9D, 0xA9, 0x9D, 0xAA, 0x93, 0x46, 0x9D, 0xAC, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x43, 0x9D, 0xA7, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x8B, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xAD, /* 0xEC-0xEF */
+	0x00, 0x00, 0x9D, 0xA6, 0x9D, 0xB1, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x9D, 0xB0, 0x00, 0x00, 0x9D, 0xAF, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x9D, 0xB4, 0x8F, 0xEF, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_65[512] = {
+	0x9D, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x9D, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x9D, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x9D, 0xB6, 0x9D, 0x90, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xB9, /* 0x20-0x23 */
+	0x9D, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0x98, 0x9D, 0xBA, /* 0x28-0x2B */
+	0x9D, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x78, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x9D, 0xBB, 0x9D, 0xBC, 0x9D, 0xBE, 0x9D, 0xBD, /* 0x34-0x37 */
+	0x9D, 0xBF, 0x89, 0xFC, 0x00, 0x00, 0x8D, 0x55, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xFA, 0x90, 0xAD, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x8C, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x9D, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x9D, 0xC4, 0xED, 0xB1, 0x95, 0x71, /* 0x4C-0x4F */
+	0x00, 0x00, 0x8B, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x9D, 0xC3, 0x9D, 0xC2, 0x94, 0x73, /* 0x54-0x57 */
+	0x9D, 0xC5, 0x8B, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x9D, 0xC7, 0x9D, 0xC6, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xB8, 0x8E, 0x55, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xD6, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x8C, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x90, 0x94, 0x00, 0x00, 0x9D, 0xC8, 0x00, 0x00, /* 0x70-0x73 */
+	0x90, 0xAE, 0x93, 0x47, 0x00, 0x00, 0x95, 0x7E, /* 0x74-0x77 */
+	0x9D, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xCA, 0x9D, 0xCB, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xB6, /* 0x84-0x87 */
+	0x9B, 0x7C, 0x90, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x95, 0x6B, 0x00, 0x00, 0x8D, 0xD6, 0x00, 0x00, /* 0x8C-0x8F */
+	0x94, 0xE3, 0x94, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x6C, /* 0x94-0x97 */
+	0x00, 0x00, 0x97, 0xBF, 0x00, 0x00, 0x9D, 0xCD, /* 0x98-0x9B */
+	0x8E, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xCE, /* 0x9C-0x9F */
+	0x00, 0x00, 0x88, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x8B, 0xD2, 0x90, 0xCB, 0x00, 0x00, 0x95, 0x80, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xCF, /* 0xA8-0xAB */
+	0x8E, 0x61, 0x92, 0x66, 0x00, 0x00, 0x8E, 0x7A, /* 0xAC-0xAF */
+	0x90, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xD0, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x95, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x89, 0x97, 0x8E, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x9D, 0xD3, 0x00, 0x00, 0x9D, 0xD1, /* 0xC0-0xC3 */
+	0x9D, 0xD4, 0x97, 0xB7, 0x9D, 0xD2, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xF9, /* 0xC8-0xCB */
+	0x9D, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x91, 0xB0, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xD6, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xF8, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x9D, 0xD8, 0x00, 0x00, 0x9D, 0xD7, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x9D, 0xD9, 0x9D, 0xDA, 0x8A, 0xF9, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x93, 0xFA, 0x92, 0x55, 0x8B, 0x8C, /* 0xE4-0xE7 */
+	0x8E, 0x7C, 0x91, 0x81, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x8F, 0x7B, 0x88, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x9D, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xA0, 0x9D, 0xDF, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_66[512] = {
+	0xED, 0xB2, 0x00, 0x00, 0x8D, 0x56, 0x9D, 0xDE, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xA9, 0x8F, 0xB8, /* 0x04-0x07 */
+	0x00, 0x00, 0xED, 0xB5, 0x9D, 0xDD, 0x00, 0x00, /* 0x08-0x0B */
+	0x8F, 0xB9, 0x00, 0x00, 0x96, 0xBE, 0x8D, 0xA8, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xD5, /* 0x10-0x13 */
+	0x90, 0xCC, 0xED, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x9D, 0xE4, 0x00, 0x00, 0xED, 0xB7, 0x90, 0xAF, /* 0x1C-0x1F */
+	0x89, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xED, 0xB8, 0x8F, 0x74, 0x00, 0x00, 0x96, 0x86, /* 0x24-0x27 */
+	0x8D, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x8F, 0xBA, 0xED, 0xB6, 0x90, 0xA5, /* 0x2C-0x2F */
+	0x00, 0x00, 0xED, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x9D, 0xE3, 0x9D, 0xE1, 0x9D, 0xE2, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xB4, /* 0x38-0x3B */
+	0x92, 0x8B, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x45, /* 0x3C-0x3F */
+	0x00, 0x00, 0x9D, 0xE8, 0x8E, 0x9E, 0x8D, 0x57, /* 0x40-0x43 */
+	0x9D, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x9D, 0xE7, 0x00, 0x00, 0x90, 0x57, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9D, 0xE5, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x4E, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xBA, /* 0x54-0x57 */
+	0x00, 0x00, 0xED, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x9D, 0xEA, 0x9D, 0xE9, 0x9D, 0xEE, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xEF, 0x00, 0x00, /* 0x60-0x63 */
+	0x9D, 0xEB, 0xED, 0xB9, 0x8A, 0x41, 0x9D, 0xEC, /* 0x64-0x67 */
+	0x9D, 0xED, 0x94, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x81, 0x8C, 0x69, /* 0x6C-0x6F */
+	0x9D, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xED, 0xBD, /* 0x70-0x73 */
+	0x90, 0xB0, 0x00, 0x00, 0x8F, 0xBB, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x71, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x8B, 0xC5, 0x00, 0x00, 0x9D, 0xF1, /* 0x80-0x83 */
+	0x9D, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x89, 0xC9, /* 0x84-0x87 */
+	0x9D, 0xF2, 0x9D, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xF3, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x8F, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x67, 0x88, 0xC3, /* 0x94-0x97 */
+	0x9D, 0xF6, 0xED, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x9D, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xED, 0xBF, 0x00, 0x00, 0x92, 0xA8, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xEF, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x62, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xE9, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xC0, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x96, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9E, 0x41, 0x9D, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x9D, 0xFC, 0x00, 0x00, 0x9D, 0xFB, 0xED, 0xC1, /* 0xBC-0xBF */
+	0x00, 0x00, 0x9D, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x9E, 0x40, 0x00, 0x00, 0x00, 0x00, 0x93, 0xDC, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x9D, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x42, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x8F, 0x8C, 0x9E, 0x43, 0x00, 0x00, /* 0xD8-0xDB */
+	0x97, 0x6A, 0x94, 0x98, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x9E, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x46, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x9E, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x9E, 0x48, 0x00, 0x00, 0x8B, 0xC8, 0x89, 0x67, /* 0xF0-0xF3 */
+	0x8D, 0x58, 0x9E, 0x49, 0x00, 0x00, 0x9E, 0x4A, /* 0xF4-0xF7 */
+	0x8F, 0x91, 0x91, 0x82, 0xED, 0xC2, 0xED, 0x4A, /* 0xF8-0xFB */
+	0x99, 0xD6, 0x91, 0x5D, 0x91, 0x5C, 0x91, 0xD6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_67[512] = {
+	0x8D, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x98, 0xF0, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x8C, 0x8E, 0x97, 0x4C, 0x00, 0x00, 0x95, 0xFC, /* 0x08-0x0B */
+	0x00, 0x00, 0x95, 0x9E, 0xED, 0xC3, 0x9E, 0x4B, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x8D, 0xF1, 0x92, 0xBD, 0x9E, 0x4C, 0x98, 0x4E, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x5D, /* 0x18-0x1B */
+	0x00, 0x00, 0x92, 0xA9, 0x9E, 0x4D, 0x8A, 0xFA, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x4E, 0x9E, 0x4F, /* 0x24-0x27 */
+	0x96, 0xD8, 0x00, 0x00, 0x96, 0xA2, 0x96, 0x96, /* 0x28-0x2B */
+	0x96, 0x7B, 0x8E, 0x44, 0x9E, 0x51, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8E, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x96, 0x70, 0x00, 0x00, 0x9E, 0x53, 0x9E, 0x56, /* 0x34-0x37 */
+	0x9E, 0x55, 0x00, 0x00, 0x8A, 0xF7, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x8B, 0x80, 0x00, 0x00, 0x9E, 0x52, /* 0x3C-0x3F */
+	0x00, 0x00, 0x9E, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x57, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x90, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x9B, 0x88, 0xC7, /* 0x4C-0x4F */
+	0x8D, 0xDE, 0x91, 0xBA, 0x00, 0x00, 0x8E, 0xDB, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xF1, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9E, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x93, 0x6D, 0x00, 0x00, 0x9E, 0x58, 0x91, 0xA9, /* 0x5C-0x5F */
+	0x9E, 0x59, 0x8F, 0xF0, 0x96, 0xDB, 0x9E, 0x5B, /* 0x60-0x63 */
+	0x9E, 0x5C, 0x97, 0x88, 0xED, 0xC5, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x61, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x8D, 0x59, 0x00, 0x00, 0x94, 0x74, /* 0x6C-0x6F */
+	0x9E, 0x5E, 0x93, 0x8C, 0x9D, 0xDC, 0x9D, 0xE0, /* 0x70-0x73 */
+	0x00, 0x00, 0x8B, 0x6E, 0x00, 0x00, 0x94, 0x66, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x9E, 0x60, 0x00, 0x00, 0x8F, 0xBC, 0x94, 0xC2, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x9E, 0x66, 0x00, 0x00, 0x94, 0xF8, /* 0x84-0x87 */
+	0x00, 0x00, 0x9E, 0x5D, 0x00, 0x00, 0x9E, 0x63, /* 0x88-0x8B */
+	0x9E, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x90, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x96, 0x8D, 0x00, 0x00, 0x97, 0xD1, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x87, 0x00, 0x00, /* 0x98-0x9B */
+	0x89, 0xCA, 0x8E, 0x7D, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x98, 0x67, 0x9E, 0x65, 0x90, 0x95, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x64, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x9E, 0x5F, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xCD, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x6B, /* 0xB0-0xB3 */
+	0x9E, 0x69, 0x00, 0x00, 0x89, 0xCB, 0x9E, 0x67, /* 0xB4-0xB7 */
+	0x9E, 0x6D, 0x9E, 0x73, 0x00, 0x00, 0xED, 0xC6, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xED, 0xC8, 0x91, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x95, 0xBF, 0x00, 0x00, 0x9E, 0x75, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x41, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x74, 0x94, 0x90, /* 0xCC-0xCF */
+	0x96, 0x5E, 0x8A, 0xB9, 0x00, 0x00, 0x90, 0xF5, /* 0xD0-0xD3 */
+	0x8F, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x92, 0xD1, 0x00, 0x00, 0x97, 0x4D, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x9E, 0x70, 0x9E, 0x6F, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x71, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9E, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x76, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x9E, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x9E, 0x6A, 0x00, 0x00, 0x9E, 0x72, 0x9E, 0x68, /* 0xEC-0xEF */
+	0x00, 0x00, 0x92, 0x8C, 0x00, 0x00, 0x96, 0xF6, /* 0xF0-0xF3 */
+	0x8E, 0xC4, 0x8D, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0xB8, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x8F, 0x8A, 0x60, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_68[512] = {
+	0x00, 0x00, 0xED, 0xC9, 0x92, 0xCC, 0x93, 0xC8, /* 0x00-0x03 */
+	0x89, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xF0, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xB2, 0x8C, 0x49, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x78, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x8D, 0x5A, 0x8A, 0x9C, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x9E, 0x7A, 0x8A, 0x94, 0x9E, 0x81, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x7D, 0x00, 0x00, /* 0x30-0x33 */
+	0x90, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x8A, 0x6A, 0x8D, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x8A, 0x69, 0x8D, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x9E, 0x7B, 0x8C, 0x85, 0x8C, 0x6A, 0x93, 0x8D, /* 0x40-0x43 */
+	0xED, 0xCA, 0x00, 0x00, 0x9E, 0x79, 0x00, 0x00, /* 0x44-0x47 */
+	0x88, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x9E, 0x7C, 0x9E, 0x7E, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8B, 0xCB, 0x8C, 0x4B, 0xED, 0xC7, 0x8A, 0xBA, /* 0x50-0x53 */
+	0x8B, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9E, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x8D, 0xF7, 0x96, 0x91, 0x00, 0x00, 0x8E, 0x56, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x83, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x4F, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x9E, 0x8F, 0x00, 0x00, 0x89, 0xB1, 0x9E, 0x84, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0x95, 0x9E, 0x85, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x97, 0xC0, 0x00, 0x00, 0x9E, 0x8C, /* 0x80-0x83 */
+	0x00, 0x00, 0x94, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x9E, 0x94, 0x00, 0x00, 0x9E, 0x87, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xB2, /* 0x90-0x93 */
+	0x9E, 0x89, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x5B, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x8B, /* 0x98-0x9B */
+	0x00, 0x00, 0x9E, 0x8A, 0x00, 0x00, 0x9E, 0x86, /* 0x9C-0x9F */
+	0x9E, 0x91, 0x00, 0x00, 0x8F, 0xBD, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x9A, 0xEB, 0x8C, 0xE6, /* 0xA4-0xA7 */
+	0x97, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x9E, 0x88, 0x00, 0x00, 0x92, 0xF2, /* 0xAC-0xAF */
+	0x8A, 0x42, 0x8D, 0xAB, 0x00, 0x00, 0x9E, 0x80, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x9E, 0x90, 0x8A, 0x81, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x9E, 0x8E, 0x9E, 0x92, 0x00, 0x00, /* 0xB8-0xBB */
+	0x93, 0x8E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x8A, 0xFC, 0x00, 0x00, 0x9E, 0xB0, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xED, 0x48, 0x96, 0xC7, 0x9E, 0x97, 0x8A, 0xFB, /* 0xC8-0xCB */
+	0x00, 0x00, 0x9E, 0x9E, 0x00, 0x00, 0xED, 0xCB, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x5F, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x9E, 0x9F, 0x9E, 0xA1, 0x00, 0x00, 0x9E, 0xA5, /* 0xD4-0xD7 */
+	0x9E, 0x99, 0x00, 0x00, 0x92, 0x49, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x8F, /* 0xDC-0xDF */
+	0x9E, 0xA9, 0x9E, 0x9C, 0x00, 0x00, 0x9E, 0xA6, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xA0, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x58, 0x9E, 0xAA, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xB1, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x9E, 0xA8, 0x8A, 0xBB, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_69[512] = {
+	0x98, 0x6F, 0x9E, 0x96, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x9E, 0xA4, 0x88, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x9E, 0x98, 0x00, 0x00, 0x00, 0x00, 0x96, 0xB8, /* 0x08-0x0B */
+	0x9E, 0x9D, 0x90, 0x41, 0x92, 0xC5, 0x9E, 0x93, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xA3, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x90, 0x9A, 0x9E, 0xAD, 0x8A, 0x91, /* 0x18-0x1B */
+	0x8C, 0x9F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x9E, 0xAF, 0x9E, 0x9A, 0x9E, 0xAE, /* 0x20-0x23 */
+	0x00, 0x00, 0x9E, 0xA7, 0x9E, 0x9B, 0x00, 0x00, /* 0x24-0x27 */
+	0x9E, 0xAB, 0x00, 0x00, 0x9E, 0xAC, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x9E, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x93, 0xCC, 0x00, 0x00, 0x9E, 0xA2, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x9E, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x9E, 0xBB, 0x00, 0x00, 0x92, 0xD6, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x6B, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x96, /* 0x50-0x53 */
+	0x9E, 0xB6, 0x91, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9E, 0xBC, 0x91, 0x5E, 0x00, 0x00, /* 0x58-0x5B */
+	0x9E, 0xB3, 0x9E, 0xC0, 0x9E, 0xBF, 0x00, 0x00, /* 0x5C-0x5F */
+	0x93, 0xED, 0x9E, 0xBE, 0x93, 0xE8, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xED, 0xCD, 0x00, 0x00, 0x9E, 0xC2, 0x9E, 0xB5, /* 0x68-0x6B */
+	0x00, 0x00, 0x8B, 0xC6, 0x9E, 0xB8, 0x8F, 0x7C, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x80, /* 0x70-0x73 */
+	0x9E, 0xBA, 0x8B, 0xC9, 0x00, 0x00, 0x9E, 0xB2, /* 0x74-0x77 */
+	0x9E, 0xB4, 0x9E, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x98, 0x4F, 0x8A, 0x79, 0x9E, 0xB7, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x9E, 0xC1, 0x8A, 0x54, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xE5, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x7C, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x9E, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x98, 0x50, 0x9E, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xED, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x90, 0x59, /* 0x98-0x9B */
+	0x9E, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9E, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xD0, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xC4, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x9E, 0xE1, 0x9E, 0xC3, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x9E, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xCE, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xC9, 0x9E, 0xC6, /* 0xBC-0xBF */
+	0x00, 0x00, 0x9E, 0xC7, 0x00, 0x00, 0x9E, 0xCF, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xA0, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xCC, 0x8D, 0x5C, /* 0xC8-0xCB */
+	0x92, 0xC6, 0x91, 0x84, 0x9E, 0xCA, 0x00, 0x00, /* 0xCC-0xCF */
+	0x9E, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xC8, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x97, 0x6C, 0x96, 0x8A, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x9E, 0xCD, 0x9E, 0xD7, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD0, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xDF, /* 0xE4-0xE7 */
+	0x9E, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x9E, 0xE5, /* 0xE8-0xEB */
+	0x00, 0x00, 0x9E, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xDE, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x9E, 0xDD, 0x00, 0x00, 0x92, 0xCE, /* 0xF8-0xFB */
+	0x00, 0x00, 0x91, 0x85, 0x00, 0x00, 0x9E, 0xDB, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6A[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xD9, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x9E, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xE6, 0x94, 0xF3, /* 0x08-0x0B */
+	0x9E, 0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xE7, 0x9E, 0xEA, /* 0x10-0x13 */
+	0x9E, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x92, 0x94, /* 0x14-0x17 */
+	0x00, 0x00, 0x95, 0x57, 0x00, 0x00, 0x9E, 0xDA, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xE2, 0x8F, 0xBE, /* 0x1C-0x1F */
+	0x00, 0x00, 0x96, 0xCD, 0x9E, 0xF6, 0x9E, 0xE9, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x8C, 0xA0, 0x89, 0xA1, 0x8A, 0x7E, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xD1, 0x00, 0x00, /* 0x2C-0x2F */
+	0xED, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x8F, 0xBF, 0x9E, 0xEE, 0x00, 0x00, /* 0x34-0x37 */
+	0x9E, 0xF5, 0x8E, 0xF7, 0x8A, 0x92, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x92, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x9E, 0xEB, 0x00, 0x00, 0xED, 0xD3, 0x9E, 0xF0, /* 0x44-0x47 */
+	0x9E, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xB4, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x8B, 0x6B, 0x9E, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x40, /* 0x5C-0x5F */
+	0x00, 0x00, 0x93, 0xC9, 0x9E, 0xF1, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xF3, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xD2, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xED, 0xED, 0xD4, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x9E, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD5, 0x8A, 0x80, /* 0x7C-0x7F */
+	
+	0x92, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x9E, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x9E, 0xF8, 0x8C, 0xE7, 0x00, 0x00, /* 0x8C-0x8F */
+	0x9E, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x40, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x9E, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x9E, 0xF9, 0x00, 0x00, 0x9E, 0xFB, 0x9E, 0xFC, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x4B, 0x00, 0x00, /* 0xA8-0xAB */
+	0x9F, 0x47, 0x00, 0x00, 0x9E, 0x8D, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x46, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9F, 0x45, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x42, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x9E, 0xE8, 0x9F, 0x44, 0x9F, 0x43, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x9F, 0x49, 0x00, 0x00, 0x98, 0x45, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x4C, 0x8B, 0xF9, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x48, 0x9F, 0x4A, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD6, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xED, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x94, 0xA5, 0x00, 0x00, 0x9F, 0x4D, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x51, 0x9F, 0x4E, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_6B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x97, 0x93, 0x9F, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x9E, 0xDC, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x52, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x53, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x89, 0x54, 0x00, 0x00, 0x9F, 0x55, /* 0x1C-0x1F */
+	0x8C, 0x87, 0x8E, 0x9F, 0x00, 0x00, 0x8B, 0xD3, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xA2, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x7E, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x57, /* 0x34-0x37 */
+	0x9F, 0x56, 0x9F, 0x59, 0x8B, 0x5C, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x8B, 0xD4, 0x8A, 0xBC, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x5C, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x5B, /* 0x44-0x47 */
+	0x00, 0x00, 0x9F, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x89, 0xCC, 0x00, 0x00, 0x92, 0x56, 0x00, 0x00, /* 0x4C-0x4F */
+	0x9F, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xBD, /* 0x50-0x53 */
+	0x9F, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9F, 0x5F, 0x00, 0x00, 0x9F, 0x61, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x62, /* 0x5C-0x5F */
+	0x00, 0x00, 0x9F, 0x63, 0x8E, 0x7E, 0x90, 0xB3, /* 0x60-0x63 */
+	0x8D, 0x9F, 0x00, 0x00, 0x95, 0x90, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x95, 0xE0, 0x98, 0x63, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x95, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0xCE, /* 0x70-0x73 */
+	0x97, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x9F, 0x64, 0x9F, 0x65, 0x00, 0x00, 0x8E, 0x80, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x66, /* 0x7C-0x7F */
+	
+	0x9F, 0x67, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x69, /* 0x80-0x83 */
+	0x9F, 0x68, 0x00, 0x00, 0x96, 0x77, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x8F, 0x7D, 0x8E, 0xEA, 0x8E, 0x63, /* 0x88-0x8B */
+	0x00, 0x00, 0x9F, 0x6A, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x9F, 0x6C, 0x90, 0x42, 0x00, 0x00, /* 0x94-0x97 */
+	0x9F, 0x6B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x6D, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x9F, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x6F, 0x9F, 0x70, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x71, /* 0xAC-0xAF */
+	0x00, 0x00, 0x9F, 0x73, 0x9F, 0x72, 0x9F, 0x74, /* 0xB0-0xB3 */
+	0x89, 0xA3, 0x92, 0x69, 0x00, 0x00, 0x9F, 0x75, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x45, 0x8A, 0x6B, /* 0xB8-0xBB */
+	0x9F, 0x76, 0x00, 0x00, 0x00, 0x00, 0x93, 0x61, /* 0xBC-0xBF */
+	0x9A, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x8B, 0x42, 0x9F, 0x77, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x78, /* 0xC8-0xCB */
+	0x00, 0x00, 0x95, 0xEA, 0x96, 0x88, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xC5, 0x9F, 0x79, /* 0xD0-0xD3 */
+	0x94, 0xE4, 0x00, 0x00, 0xED, 0xD8, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x94, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x96, 0xD1, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x7A, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x7C, /* 0xE8-0xEB */
+	0x9F, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x7E, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x7D, /* 0xF0-0xF3 */
+};
+
+static unsigned char u2c_6C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x9F, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x81, /* 0x0C-0x0F */
+	0x00, 0x00, 0x96, 0xAF, 0x00, 0x00, 0x9F, 0x82, /* 0x10-0x13 */
+	0x9F, 0x83, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x43, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x84, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x86, /* 0x20-0x23 */
+	0x9F, 0x85, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x90, 0x85, 0x00, 0x00, 0x00, 0x00, 0x95, 0x58, /* 0x34-0x37 */
+	0x89, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xC3, 0xED, 0xD9, /* 0x3C-0x3F */
+	0x92, 0xF3, 0x8F, 0x60, 0x8B, 0x81, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xC4, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8E, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x9F, 0x88, 0x00, 0x00, 0x8A, 0xBE, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x98, 0x00, 0x00, /* 0x58-0x5B */
+	0xED, 0xDA, 0x93, 0xF0, 0x9F, 0x87, 0x8D, 0x5D, /* 0x5C-0x5F */
+	0x92, 0x72, 0x00, 0x00, 0x9F, 0x89, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x9F, 0x91, 0x00, 0x00, 0x9F, 0x8A, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xDC, /* 0x6C-0x6F */
+	0x91, 0xBF, 0x00, 0x00, 0x8B, 0x82, 0x9F, 0x92, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x88, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x8B, 0x44, 0x9F, 0x90, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x9F, 0x8E, 0x9F, 0x8B, 0x97, 0x80, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xDB, 0x00, 0x00, /* 0x84-0x87 */
+	0x92, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x93, 0xD7, 0x9F, 0x8C, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x9F, 0x94, 0x00, 0x00, 0x9F, 0x93, 0x8C, 0x42, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xAB, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x8D, 0xB9, 0x9F, 0x8D, 0x9F, 0x8F, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x96, 0x76, 0x91, 0xF2, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x97, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x9C, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x9F, 0x9D, 0x00, 0x00, 0x89, 0xCD, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x95, 0xA6, 0x96, 0xFB, 0x9F, 0x9F, 0x8E, 0xA1, /* 0xB8-0xBB */
+	0x8F, 0xC0, 0x9F, 0x98, 0x9F, 0x9E, 0x89, 0x88, /* 0xBC-0xBF */
+	0x00, 0x00, 0x8B, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x9F, 0x95, 0x9F, 0x9A, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x90, 0xF2, 0x94, 0x91, 0x00, 0x00, /* 0xC8-0xCB */
+	0x94, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0x97, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x96, 0x40, 0x00, 0x00, 0x9F, 0x99, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x9F, 0xA2, 0xED, 0xDD, 0x9F, 0xA0, /* 0xD8-0xDB */
+	0x00, 0x00, 0x9F, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x96, 0x41, 0x94, 0x67, 0x8B, 0x83, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x93, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x92, 0x8D, 0x00, 0x00, 0x9F, 0xA3, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xA1, /* 0xEC-0xEF */
+	0x91, 0xD7, 0x9F, 0x96, 0x00, 0x00, 0x89, 0x6A, /* 0xF0-0xF3 */
+};
+
+static unsigned char u2c_6D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xED, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x6D, /* 0x08-0x0B */
+	0x9F, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xAD, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xF4, /* 0x14-0x17 */
+	0x00, 0x00, 0x9F, 0xAA, 0x00, 0x00, 0x97, 0x8C, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xB4, 0x9F, 0xA4, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x92, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x89, 0x6B, 0x8D, 0x5E, 0x9F, 0xA7, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x46, 0x9F, 0xAC, /* 0x30-0x33 */
+	0x00, 0x00, 0x9F, 0xAB, 0x9F, 0xA6, 0x00, 0x00, /* 0x34-0x37 */
+	0x9F, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x88, /* 0x38-0x3B */
+	0x00, 0x00, 0x9F, 0xA8, 0x94, 0x68, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x97, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x8F, 0xF2, 0x90, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x9F, 0xB4, 0x9F, 0xB2, 0x00, 0x00, /* 0x58-0x5B */
+	0x95, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xAF, /* 0x60-0x63 */
+	0x9F, 0xB1, 0x00, 0x00, 0x89, 0x59, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x8D, 0x5F, 0x98, 0x51, 0x00, 0x00, /* 0x68-0x6B */
+	0x8A, 0x5C, 0x00, 0x00, 0x95, 0x82, 0xED, 0xE0, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x97, 0x81, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x43, /* 0x74-0x77 */
+	0x90, 0x5A, 0x9F, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x9F, 0xB8, 0x00, 0x00, 0xED, 0xDF, /* 0x84-0x87 */
+	0x8F, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x97, 0x4F, 0x00, 0x00, 0x9F, 0xB5, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xB0, /* 0x90-0x93 */
+	0x00, 0x00, 0x9F, 0xB6, 0xED, 0xE1, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x97, 0xDC, 0x00, 0x00, 0x93, 0x93, /* 0x98-0x9B */
+	0x93, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xED, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x55, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x74, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x9F, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x9F, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x97, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x97, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x9F, 0xC6, 0x9F, 0xC0, 0x9F, 0xBD, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xD2, /* 0xC8-0xCB */
+	0x9F, 0xC3, 0x00, 0x00, 0x00, 0x00, 0xED, 0xE3, /* 0xCC-0xCF */
+	0x00, 0x00, 0x8F, 0x69, 0x9F, 0xC5, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x9F, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x93, 0x91, 0x9F, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xC2, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x92, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9F, 0xC9, 0x00, 0x00, 0x9F, 0xBE, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x9F, 0xC4, 0x00, 0x00, 0x9F, 0xCB, 0x88, 0xFA, /* 0xE8-0xEB */
+	0x9F, 0xC1, 0x00, 0x00, 0x9F, 0xCC, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x90, 0x5B, 0xED, 0xE5, 0x8F, 0x7E, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x95, 0xA3, 0x00, 0x00, 0x8D, 0xAC, /* 0xF4-0xF7 */
+	0xED, 0xE4, 0x9F, 0xB9, 0x9F, 0xC7, 0x93, 0x59, /* 0xF8-0xFB */
+	0xED, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x90, 0xB4, 0x00, 0x00, 0x8A, 0x89, /* 0x04-0x07 */
+	0x8D, 0xCF, 0x8F, 0xC2, 0x9F, 0xBB, 0x8F, 0x61, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x6B, /* 0x10-0x13 */
+	0x00, 0x00, 0x9F, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x9F, 0xD0, 0x8F, 0x8D, 0x8C, 0xB8, /* 0x18-0x1B */
+	0x00, 0x00, 0x9F, 0xDF, 0x00, 0x00, 0x9F, 0xD9, /* 0x1C-0x1F */
+	0x8B, 0x94, 0x93, 0x6E, 0x00, 0x00, 0x9F, 0xD4, /* 0x20-0x23 */
+	0x9F, 0xDD, 0x88, 0xAD, 0x89, 0x51, 0xED, 0xE9, /* 0x24-0x27 */
+	0x00, 0x00, 0x89, 0xB7, 0x00, 0x00, 0x9F, 0xD6, /* 0x28-0x2B */
+	0x91, 0xAA, 0x9F, 0xCD, 0x9F, 0xCF, 0x8D, 0x60, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x9F, 0xE0, 0xED, 0xE7, 0x9F, 0xDB, 0x00, 0x00, /* 0x38-0x3B */
+	0xED, 0xEA, 0x00, 0x00, 0x9F, 0xD3, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xDA, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xA9, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x9F, 0xD8, 0x9F, 0xDC, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xCE, 0x00, 0x00, /* 0x54-0x57 */
+	0x8F, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x92, 0x58, /* 0x58-0x5B */
+	0xED, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xD2, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x4E, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xD5, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xCE, 0x93, 0x92, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xD1, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xD7, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x70, 0x8E, 0xBC, /* 0x7C-0x7F */
+	
+	0x96, 0x9E, 0x00, 0x00, 0x9F, 0xE1, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x94, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xED, /* 0x8C-0x8F */
+	0x8C, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x80, 0x00, 0x00, /* 0x94-0x97 */
+	0x9F, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x97, 0xAD, 0x8D, 0x61, 0x00, 0x00, 0x9F, 0xF0, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0xEC, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x9F, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xE2, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xE8, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xEA, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x6E, 0x9F, 0xE5, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x4D, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x9F, 0xE7, 0x00, 0x00, 0xED, 0xEB, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xEF, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x9F, 0xE9, 0x96, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x9F, 0xE4, 0x00, 0x00, 0x8E, 0xA0, /* 0xC8-0xCB */
+	0x9F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x8A, 0x8A, 0x00, 0x00, 0x9F, 0xE6, /* 0xD0-0xD3 */
+	0x9F, 0xEB, 0x9F, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x91, 0xEA, 0x91, 0xD8, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x9F, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x9F, 0xFA, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xF8, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x93, 0x48, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x42, /* 0xF4-0xF7 */
+	0x9F, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0xF6, 0x9F, 0xDE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6F[512] = {
+	0x00, 0x00, 0x8B, 0x99, 0x95, 0x59, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xBD, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x8D, 0x97, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x52, /* 0x0C-0x0F */
+	0x00, 0x00, 0x9F, 0xF2, 0x00, 0x00, 0xE0, 0x41, /* 0x10-0x13 */
+	0x89, 0x89, 0x91, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x94, 0x99, 0x00, 0x00, 0x8A, 0xBF, 0x97, 0xF8, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x9F, /* 0x28-0x2B */
+	0x92, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x9F, 0xF9, 0x9F, 0xFB, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x91, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x40, 0x9F, 0xF7, /* 0x3C-0x3F */
+	0x00, 0x00, 0x9F, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x8A, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x8C, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE0, 0x4E, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x49, /* 0x58-0x5B */
+	0x90, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x83, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x8F, 0x81, 0x00, 0x00, 0xE0, 0x52, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xE0, 0x4B, 0x92, 0xAA, 0xE0, 0x48, /* 0x6C-0x6F */
+	0x92, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE0, 0x6B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xE0, 0x45, 0x00, 0x00, 0xE0, 0x44, 0x00, 0x00, /* 0x78-0x7B */
+	0xE0, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE0, 0x47, 0xE0, 0x46, 0xE0, 0x4C, 0x00, 0x00, /* 0x80-0x83 */
+	0x90, 0x9F, 0x00, 0x00, 0xE0, 0x43, 0x00, 0x00, /* 0x84-0x87 */
+	0xED, 0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x4F, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE0, 0x50, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xC0, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE0, 0x55, 0x00, 0x00, 0xE0, 0x54, /* 0xA0-0xA3 */
+	0xE0, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x59, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x93, 0x62, 0x00, 0x00, 0xE0, 0x53, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xED, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE0, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x8C, 0x83, 0x91, 0xF7, 0xE0, 0x51, 0x94, 0x5A, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x58, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE0, 0x5D, 0xE0, 0x5B, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE0, 0x5E, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x61, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x5A, /* 0xDC-0xDF */
+	0x8D, 0x8A, 0x94, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x9F, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x94, /* 0xE8-0xEB */
+	0xE0, 0x5C, 0x00, 0x00, 0xE0, 0x60, 0x91, 0xF3, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE0, 0x5F, 0x00, 0x00, 0xE0, 0x4A, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xED, 0xEE, 0xE8, 0x89, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x64, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x68, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_70[512] = {
+	0x00, 0x00, 0xE0, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xED, 0xEF, 0x00, 0x00, 0xED, 0xF0, /* 0x04-0x07 */
+	0x00, 0x00, 0xE0, 0x62, 0x00, 0x00, 0xE0, 0x63, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x67, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE0, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x95, 0x6D, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE0, 0x6D, 0x00, 0x00, 0xE0, 0x6A, 0xE0, 0x69, /* 0x18-0x1B */
+	0x00, 0x00, 0xE0, 0x6C, 0x93, 0xD2, 0xE0, 0x6E, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x95, 0x91, 0xEB, /* 0x24-0x27 */
+	0xED, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x90, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE0, 0x6F, 0x00, 0x00, 0xE0, 0x71, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x70, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x9F, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xE0, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x93, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x73, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xCE, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x94, /* 0x6C-0x6F */
+	0x8A, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x8B, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x8E, 0xDC, 0x8D, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xED, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x98, 0x46, 0x90, 0x86, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x8A, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x75, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xE0, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xF3, /* 0xA8-0xAB */
+	0xE0, 0x78, 0x92, 0x59, 0xE0, 0x7B, 0xE0, 0x76, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x7A, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE0, 0x79, 0x93, 0x5F, 0x88, 0xD7, 0xED, 0x46, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x97, 0xF3, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x7D, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x47, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE0, 0x80, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE0, 0x7E, 0x00, 0x00, 0xE0, 0x7C, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE0, 0x77, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x96, 0x42, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE0, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_71[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xED, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE0, 0x81, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xF4, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x89, 0x8B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE0, 0x84, 0x95, 0xB0, 0x00, 0x00, /* 0x18-0x1B */
+	0xE0, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x96, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xC5, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x52, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x8F, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xF7, 0xED, 0xF8, /* 0x44-0x47 */
+	0x00, 0x00, 0x97, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE0, 0x8A, 0x00, 0x00, 0x90, 0xF7, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE0, 0x86, 0xE0, 0x8B, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x89, 0x8C, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xED, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x89, 0x00, 0x00, /* 0x60-0x63 */
+	0x94, 0x81, 0xE0, 0x85, 0xE0, 0x88, 0x8F, 0xC6, /* 0x64-0x67 */
+	0x00, 0x00, 0x94, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE0, 0x8C, 0x00, 0x00, 0x8E, 0xCF, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x90, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE0, 0x8F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE0, 0x87, 0x00, 0x00, 0x8C, 0x46, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x8D, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x97, 0x6F, 0xE0, 0x90, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xEA, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x6E, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE0, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE0, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x94, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE0, 0x94, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x95, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xED, 0xFA, 0x00, 0x00, 0x94, 0x52, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x93, 0x95, 0xE0, 0x97, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0x99, 0x00, 0x00, /* 0xCC-0xCF */
+	0x97, 0xD3, 0x00, 0x00, 0xE0, 0x96, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE0, 0x98, 0x89, 0x8D, 0x00, 0x00, 0xE0, 0x93, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x7A, /* 0xDC-0xDF */
+	0xE0, 0x9A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x91, 0x87, 0x8E, 0x57, 0xE0, 0x9C, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE0, 0x9B, 0x90, 0x43, 0x99, 0xD7, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE0, 0x9D, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE0, 0x9F, 0x00, 0x00, 0xE0, 0x8E, /* 0xF8-0xFB */
+	0xE0, 0x9E, 0x00, 0x00, 0xED, 0xFB, 0xE0, 0xA0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_72[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x9A, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE0, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE0, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xA3, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xE0, 0xA4, 0x00, 0x00, 0x92, 0xDC, 0x00, 0x00, /* 0x28-0x2B */
+	0xE0, 0xA6, 0xE0, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE0, 0xA7, 0x00, 0x00, 0xE0, 0xA8, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x8E, 0xDD, 0x95, 0x83, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xEA, 0xE0, 0xA9, /* 0x38-0x3B */
+	0xE0, 0xAA, 0x91, 0x75, 0x8E, 0xA2, 0xE0, 0xAB, /* 0x3C-0x3F */
+	0xE0, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xAD, 0x95, 0xD0, /* 0x44-0x47 */
+	0x94, 0xC5, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xAE, /* 0x48-0x4B */
+	0x94, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0xAB, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE0, 0xAF, 0x89, 0xE5, 0x00, 0x00, 0x8B, 0x8D, /* 0x58-0x5B */
+	0x00, 0x00, 0x96, 0xC4, 0x00, 0x00, 0x96, 0xB4, /* 0x5C-0x5F */
+	0x00, 0x00, 0x89, 0xB2, 0x98, 0x53, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x71, /* 0x64-0x67 */
+	0x00, 0x00, 0x95, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xB5, 0x00, 0x00, /* 0x70-0x73 */
+	0xE0, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x93, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x8C, 0xA1, 0xE0, 0xB1, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x8D, 0xD2, 0xE0, 0xB3, 0xE0, 0xB2, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xB4, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xB5, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xB6, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x8B, 0x5D, 0x00, 0x00, 0xE0, 0xB7, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xB8, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x8C, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x94, 0xC6, /* 0xAC-0xAF */
+	0x00, 0x00, 0xED, 0xFC, 0xE0, 0xBA, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xF3, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE0, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x40, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xB6, 0xE0, 0xBB, /* 0xC0-0xC3 */
+	0xE0, 0xBD, 0x00, 0x00, 0xE0, 0xBC, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xBE, 0x00, 0x00, /* 0xCC-0xCF */
+	0x8C, 0xCF, 0x00, 0x00, 0xE0, 0xBF, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xE7, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x91, 0x5F, 0x00, 0x00, 0x8D, 0x9D, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xE0, 0xC1, 0xE0, 0xC2, 0xE0, 0xC0, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x8E, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x93, 0xC6, 0x8B, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xC4, /* 0xF4-0xF7 */
+	0x92, 0x4B, 0xE0, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x98, 0x54, 0x94, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_73[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xC7, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xC9, 0xE0, 0xC6, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0xD2, /* 0x18-0x1B */
+	0xE0, 0xC8, 0xE0, 0xCA, 0x00, 0x00, 0x97, 0xC2, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xEE, 0x41, 0xE0, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE0, 0xCD, 0x92, 0x96, 0x94, 0x4C, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xA3, 0xE0, 0xCC, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE0, 0xCB, 0x00, 0x00, 0x97, 0x50, 0x97, 0x51, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xCF, 0x89, 0x8E, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x8D, 0x96, 0x8E, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xD0, 0xE0, 0xD1, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xD3, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x62, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xE0, 0xD5, 0x00, 0x00, 0xE0, 0xD4, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE0, 0xD6, 0x00, 0x00, 0x8A, 0x6C, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xE0, 0xD8, 0x00, 0x00, 0xEE, 0x43, /* 0x74-0x77 */
+	0xE0, 0xD7, 0x00, 0x00, 0xE0, 0xDA, 0xE0, 0xD9, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x8C, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x97, 0xA6, /* 0x84-0x87 */
+	0x00, 0x00, 0x8B, 0xCA, 0x00, 0x00, 0x89, 0xA4, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xE8, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x8A, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xE6, 0xE0, 0xDC, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xDE, /* 0xB8-0xBB */
+	0x00, 0x00, 0xEE, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE0, 0xDF, 0x00, 0x00, 0x89, 0xCF, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE0, 0xDB, 0xEE, 0x45, 0x8E, 0x58, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x92, 0xBF, 0xE0, 0xDD, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x48, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x46, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE2, 0x00, 0x00, /* 0xDC-0xDF */
+	0x8E, 0xEC, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x47, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE0, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x5D, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x94, 0xC7, 0xE0, 0xE1, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE0, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xEE, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xE0, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xBB, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_74[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x85, /* 0x00-0x03 */
+	0x00, 0x00, 0xE0, 0xE4, 0x97, 0x9D, 0xEE, 0x49, /* 0x04-0x07 */
+	0x00, 0x00, 0x97, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xF4, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE0, 0xE6, 0xEE, 0x4B, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xEE, 0x4D, 0xEE, 0x4C, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x4E, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE8, 0x97, 0xD4, /* 0x30-0x33 */
+	0x8B, 0xD5, 0x94, 0xFA, 0x94, 0x69, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE9, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xEB, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE0, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE0, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE0, 0xED, 0x8C, 0xE8, 0x89, 0x6C, /* 0x58-0x5B */
+	0xE0, 0xEF, 0x00, 0x00, 0x90, 0x90, 0xE0, 0xEC, /* 0x5C-0x5F */
+	0x97, 0xDA, 0x00, 0x00, 0xEE, 0x4F, 0xE0, 0xF2, /* 0x60-0x63 */
+	0xEA, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE0, 0xF0, 0xE0, 0xF3, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE5, /* 0x6C-0x6F */
+	0xE0, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x8D, 0xBA, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xF4, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xF5, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x9E, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xEE, 0x50, 0x00, 0x00, 0xE0, 0xF6, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xF7, 0xEE, 0x51, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE3, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xF8, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x8A, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x8E, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xF9, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xFA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE0, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x89, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xE1, 0x40, 0x00, 0x00, 0x95, 0x5A, 0xE1, 0x41, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xA2, 0xE1, 0x42, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE1, 0x43, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x44, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE1, 0x46, 0xE1, 0x47, 0xE1, 0x45, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x72, 0xE1, 0x49, /* 0xF4-0xF7 */
+	0xE1, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_75[512] = {
+	0x00, 0x00, 0xEE, 0x52, 0x00, 0x00, 0xE1, 0x4B, /* 0x00-0x03 */
+	0xE1, 0x4A, 0xE1, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xE1, 0x4D, 0xE1, 0x4F, 0xE1, 0x4E, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x8D, 0x99, 0x00, 0x00, 0xE1, 0x51, /* 0x10-0x13 */
+	0x00, 0x00, 0xE1, 0x50, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x8A, 0xC3, 0x00, 0x00, 0x90, 0x72, 0x00, 0x00, /* 0x18-0x1B */
+	0x93, 0x5B, 0x00, 0x00, 0xE1, 0x52, 0x90, 0xB6, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x59, /* 0x20-0x23 */
+	0x00, 0x00, 0x89, 0x99, 0xE1, 0x53, 0x00, 0x00, /* 0x24-0x27 */
+	0x97, 0x70, 0x00, 0x00, 0x00, 0x00, 0x95, 0xE1, /* 0x28-0x2B */
+	0xE1, 0x54, 0x00, 0x00, 0x00, 0x00, 0xED, 0x8C, /* 0x2C-0x2F */
+	0x93, 0x63, 0x97, 0x52, 0x8D, 0x62, 0x90, 0x5C, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x6A, /* 0x34-0x37 */
+	0x99, 0xB2, 0x00, 0x00, 0x92, 0xAC, 0x89, 0xE6, /* 0x38-0x3B */
+	0xE1, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE1, 0x56, 0x00, 0x00, 0xE1, 0x5B, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE1, 0x59, 0xE1, 0x58, 0x9D, 0xC0, /* 0x48-0x4B */
+	0x8A, 0x45, 0xE1, 0x57, 0x00, 0x00, 0x88, 0xD8, /* 0x4C-0x4F */
+	0x00, 0x00, 0x94, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x94, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x97, 0xAF, 0xE1, 0x5C, 0xE1, 0x5A, /* 0x58-0x5B */
+	0x92, 0x7B, 0x90, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x94, 0xA9, 0x00, 0x00, 0x95, 0x4C, 0x00, 0x00, /* 0x60-0x63 */
+	0xE1, 0x5E, 0x97, 0xAA, 0x8C, 0x6C, 0xE1, 0x5F, /* 0x64-0x67 */
+	0x00, 0x00, 0xE1, 0x5D, 0x94, 0xD4, 0xE1, 0x60, /* 0x68-0x6B */
+	0x00, 0x00, 0xE1, 0x61, 0x00, 0x00, 0xEE, 0x53, /* 0x6C-0x6F */
+	0x88, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x8F, 0xF4, /* 0x70-0x73 */
+	0xE1, 0x66, 0x00, 0x00, 0xE1, 0x63, 0x93, 0xEB, /* 0x74-0x77 */
+	0xE1, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x45, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x69, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x64, 0xE1, 0x65, /* 0x84-0x87 */
+	0x00, 0x00, 0xE1, 0x68, 0xE1, 0x67, 0x95, 0x44, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x61, 0x91, 0x60, /* 0x8C-0x8F */
+	0x00, 0x00, 0x8B, 0x5E, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE1, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x6B, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xE1, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x6E, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xE1, 0x6D, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x75, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE1, 0x76, 0x94, 0xE6, 0xE1, 0x70, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE1, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE1, 0x74, 0x90, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE1, 0x75, 0xE1, 0x73, 0x8E, 0xBE, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x6F, 0xE1, 0x71, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x95, 0x61, 0x00, 0x00, 0x8F, 0xC7, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x78, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE1, 0x77, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x79, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x8E, 0xA4, 0x8D, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x93, 0x97, 0xE1, 0x7A, 0x00, 0x00, 0x92, 0xC9, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x7C, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x9F, 0xE1, 0x7B, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x91, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE1, 0x82, 0x00, 0x00, 0xE1, 0x84, 0xE1, 0x85, /* 0xF0-0xF3 */
+	0x92, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x83, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE1, 0x80, 0x00, 0x00, 0xE1, 0x7D, 0xE1, 0x7E, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_76[512] = {
+	0x00, 0x00, 0xE1, 0x81, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE1, 0x88, 0x00, 0x00, 0xE1, 0x86, /* 0x08-0x0B */
+	0x00, 0x00, 0xE1, 0x87, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x89, /* 0x1C-0x1F */
+	0xE1, 0x8B, 0xE1, 0x8C, 0xE1, 0x8D, 0x00, 0x00, /* 0x20-0x23 */
+	0xE1, 0x8E, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x8A, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE1, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE1, 0x8F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x91, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xC3, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x94, 0xE1, 0x92, /* 0x44-0x47 */
+	0xE1, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x8A, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xFC, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xC8, 0x00, 0x00, /* 0x54-0x57 */
+	0xE1, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xE1, 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE1, 0x97, 0xE1, 0x98, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x9C, /* 0x64-0x67 */
+	0xE1, 0x99, 0xE1, 0x9A, 0xE1, 0x9B, 0x00, 0x00, /* 0x68-0x6B */
+	0xE1, 0x9D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE1, 0x9E, 0x00, 0x00, 0xE1, 0x9F, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA0, 0x00, 0x00, /* 0x74-0x77 */
+	0xE1, 0xA1, 0x00, 0x00, 0x94, 0xAD, 0x93, 0x6F, /* 0x78-0x7B */
+	0xE1, 0xA2, 0x94, 0x92, 0x95, 0x53, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE1, 0xA3, 0x00, 0x00, 0xEE, 0x54, 0xE1, 0xA4, /* 0x80-0x83 */
+	0x93, 0x49, 0x00, 0x00, 0x8A, 0x46, 0x8D, 0x63, /* 0x84-0x87 */
+	0xE1, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xA6, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA7, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8E, 0x48, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xA9, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA8, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xE1, 0xAA, 0xE1, 0xAB, 0xEE, 0x57, /* 0x98-0x9B */
+	0xEE, 0x55, 0x00, 0x00, 0xEE, 0x56, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x58, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xE7, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE1, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE1, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x89, /* 0xB4-0xB7 */
+	0xE1, 0xAE, 0xE1, 0xAF, 0xE1, 0xB0, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x4D, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xB1, 0x94, 0x75, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x7E, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x89, 0x6D, 0x00, 0x00, 0x89, 0x76, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE1, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xB4, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xB3, 0x93, 0x90, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xB7, /* 0xD8-0xDB */
+	0x9F, 0x58, 0x00, 0x00, 0xE1, 0xB5, 0x96, 0xBF, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE1, 0xB6, 0x00, 0x00, 0x8A, 0xC4, /* 0xE0-0xE3 */
+	0x94, 0xD5, 0xE1, 0xB7, 0x00, 0x00, 0xE1, 0xB8, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xB9, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xDA, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xD3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x92, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x91, 0x8A, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xBB, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x82, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_77[512] = {
+	0x00, 0x00, 0x8F, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE1, 0xBE, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xBD, /* 0x04-0x07 */
+	0xE1, 0xBC, 0x94, 0xFB, 0x00, 0x00, 0x8A, 0xC5, /* 0x08-0x0B */
+	0x8C, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xC4, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xC1, 0x90, 0x5E, /* 0x1C-0x1F */
+	0x96, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE1, 0xC0, 0xE1, 0xC2, 0xE1, 0xC3, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE1, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xC5, /* 0x34-0x37 */
+	0xE1, 0xC6, 0x00, 0x00, 0x92, 0xAD, 0x00, 0x00, /* 0x38-0x3B */
+	0x8A, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x92, 0x85, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x5A, 0xE1, 0xC7, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xC8, 0xE1, 0xCB, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x90, 0x87, 0x00, 0x00, 0x93, 0xC2, /* 0x60-0x63 */
+	0x00, 0x00, 0xE1, 0xCC, 0x96, 0x72, 0x00, 0x00, /* 0x64-0x67 */
+	0xE1, 0xC9, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xCA, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE1, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xCE, 0xE1, 0xCD, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xD1, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xD0, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE1, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xD4, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE1, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x95, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x8F, 0x75, 0x97, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE1, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x93, 0xB5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xD6, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE1, 0xD7, 0x00, 0x00, 0xE1, 0xDB, /* 0xB8-0xBB */
+	0xE1, 0xD9, 0xE1, 0xDA, 0x00, 0x00, 0xE1, 0xD8, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xDC, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE1, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xDE, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xDF, 0x96, 0xB5, /* 0xD8-0xDB */
+	0xE1, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xEE, 0xE1, 0xE1, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x92, 0x6D, 0x00, 0x00, 0x94, 0x8A, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x8B, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x92, 0x5A, 0xE1, 0xE2, 0x8B, 0xB8, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xCE, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE1, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_78[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xBB, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xE1, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE5, 0x00, 0x00, /* 0x10-0x13 */
+	0x8C, 0xA4, 0x8D, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE1, 0xE7, 0xEE, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x93, 0x75, 0x8D, 0xD4, 0x8B, 0x6D, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x43, 0x00, 0x00, /* 0x30-0x33 */
+	0x94, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x76, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x7B, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE1, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x5D, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x8F, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xEE, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xB0, /* 0x68-0x6B */
+	0x8D, 0x64, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xA5, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xA1, 0x00, 0x00, /* 0x70-0x73 */
+	0xE1, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x5F, 0x00, 0x00, /* 0x78-0x7B */
+	0xE1, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x8C, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xEC, 0x92, 0xF4, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xE1, 0xEF, 0x8A, 0x56, 0xE1, 0xEA, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x94, 0xE8, 0x00, 0x00, 0x89, 0x4F, /* 0x90-0x93 */
+	0x00, 0x00, 0x8D, 0xEA, 0x00, 0x00, 0x98, 0x71, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xEE, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xF0, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xC9, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x90, 0xD7, 0xE1, 0xF2, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xF3, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE1, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0x6D, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE1, 0xF9, 0x00, 0x00, 0xE1, 0xF8, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x8E, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE1, 0xFA, 0xE1, 0xF5, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xFB, 0xE1, 0xF6, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x94, 0xD6, 0xE1, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE1, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x41, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x40, /* 0xE4-0xE7 */
+	0x96, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE1, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x88, 0xE9, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE2, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE2, 0x42, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_79[512] = {
+	0x00, 0x00, 0x8F, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x44, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x62, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE2, 0x46, 0xE2, 0x45, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE2, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE6, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE8, 0xE2, 0x49, /* 0x28-0x2B */
+	0xE2, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEE, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xA6, 0x00, 0x00, /* 0x38-0x3B */
+	0x97, 0xE7, 0x00, 0x00, 0x8E, 0xD0, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE2, 0x4A, 0x8C, 0x56, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x5F, /* 0x44-0x47 */
+	0x8B, 0x46, 0x8E, 0x83, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x97, 0x53, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x50, /* 0x50-0x53 */
+	0x00, 0x00, 0xE2, 0x4F, 0x91, 0x63, 0xE2, 0x4C, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x4E, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x8F, 0x6A, 0x90, 0x5F, 0xE2, 0x4D, /* 0x5C-0x5F */
+	0xE2, 0x4B, 0x00, 0x00, 0x94, 0x49, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x8F, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x95, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x8D, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x98, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x51, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x52, /* 0x7C-0x7F */
+	
+	0xE2, 0x68, 0x8B, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x98, 0x5C, 0x91, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x53, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x89, 0xD0, 0x92, 0xF5, 0x95, 0x9F, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xEE, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x66, /* 0x98-0x9B */
+	0x00, 0x00, 0xE2, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x9A, 0xE2, 0x55, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x57, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x58, 0x00, 0x00, /* 0xAC-0xAF */
+	0x94, 0x48, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x59, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0x5A, 0xE2, 0x5B, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x8B, 0xD7, 0x89, 0xD1, 0x93, 0xC3, /* 0xBC-0xBF */
+	0x8F, 0x47, 0x8E, 0x84, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE2, 0x5C, 0x00, 0x00, 0x8F, 0x48, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x89, 0xC8, 0x95, 0x62, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE2, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x94, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x64, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE2, 0x60, 0x00, 0x00, 0xE2, 0x61, /* 0xE0-0xE3 */
+	0x94, 0x89, 0x00, 0x00, 0x90, 0x60, 0xE2, 0x5E, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x92, 0x81, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE2, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x8F, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xDA, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_7A[512] = {
+	0x8B, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xE2, 0x62, 0x00, 0x00, 0x00, 0x00, 0x92, 0xF6, /* 0x08-0x0B */
+	0x00, 0x00, 0xE2, 0x63, 0x90, 0xC5, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x96, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x95, 0x42, /* 0x14-0x17 */
+	0xE2, 0x64, 0xE2, 0x65, 0x92, 0x74, 0x00, 0x00, /* 0x18-0x1B */
+	0x97, 0xC5, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x67, /* 0x1C-0x1F */
+	0xE2, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xED, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE2, 0x69, 0x88, 0xEE, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x6C, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x6A, /* 0x38-0x3B */
+	0x89, 0xD2, 0x8C, 0x6D, 0xE2, 0x6B, 0x8D, 0x65, /* 0x3C-0x3F */
+	0x8D, 0x92, 0x00, 0x00, 0x95, 0xE4, 0xE2, 0x6D, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x73, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE2, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x90, 0xCF, 0x89, 0x6E, 0x89, 0xB8, /* 0x4C-0x4F */
+	0x88, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x6E, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE2, 0x70, 0xE2, 0x71, 0x8F, 0xF5, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE2, 0x72, 0x00, 0x00, 0x8A, 0x6E, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE2, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x8C, 0x8A, 0x00, 0x00, 0x8B, 0x86, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE2, 0x75, 0x8B, 0xF3, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE2, 0x76, 0x00, 0x00, 0x90, 0xFA, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x93, 0xCB, 0x00, 0x00, 0x90, 0xDE, /* 0x80-0x83 */
+	0x8D, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE2, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x82, 0x91, 0x8B, /* 0x90-0x93 */
+	0x00, 0x00, 0xE2, 0x79, 0xE2, 0x7B, 0xE2, 0x78, /* 0x94-0x97 */
+	0xE2, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x41, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE2, 0x7C, 0x8C, 0x45, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x87, 0x97, 0x71, /* 0xAC-0xAF */
+	0xE2, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x80, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x4D, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x83, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x96, /* 0xC0-0xC3 */
+	0xE2, 0x82, 0xE2, 0x81, 0x00, 0x00, 0xE2, 0x85, /* 0xC4-0xC7 */
+	0xE2, 0x7D, 0x00, 0x00, 0xE2, 0x86, 0x97, 0xA7, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE2, 0x87, 0x00, 0x00, 0xE2, 0x88, /* 0xCC-0xCF */
+	0x00, 0x00, 0xEE, 0x67, 0x9A, 0xF2, 0xE2, 0x8A, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE2, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE2, 0x8B, 0xE2, 0x8C, 0x00, 0x00, /* 0xD8-0xDB */
+	0x97, 0xB3, 0xE2, 0x8D, 0x00, 0x00, 0xE8, 0xED, /* 0xDC-0xDF */
+	0x8F, 0xCD, 0xE2, 0x8E, 0xE2, 0x8F, 0x8F, 0x76, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x93, 0xB6, 0xE2, 0x90, 0xEE, 0x68, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x47, 0xEE, 0x6A, /* 0xE8-0xEB */
+	0x00, 0x00, 0xE2, 0x91, 0x00, 0x00, 0x92, 0x5B, /* 0xEC-0xEF */
+	0xE2, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xA3, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x99, 0x5E, 0x92, 0x7C, 0x8E, 0xB1, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xC6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x93, 0x00, 0x00, /* 0x00-0x03 */
+	0xE2, 0xA0, 0x00, 0x00, 0xE2, 0x96, 0x00, 0x00, /* 0x04-0x07 */
+	0x8B, 0x88, 0x00, 0x00, 0xE2, 0x95, 0xE2, 0xA2, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x94, /* 0x0C-0x0F */
+	0x00, 0x00, 0x8F, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE2, 0x98, 0xE2, 0x99, 0x00, 0x00, 0x93, 0x4A, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x9A, 0x00, 0x00, /* 0x1C-0x1F */
+	0x8A, 0x7D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x90, 0x79, 0x95, 0x84, 0x00, 0x00, /* 0x24-0x27 */
+	0xE2, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x91, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x97, /* 0x30-0x33 */
+	0x00, 0x00, 0xE2, 0x9B, 0xE2, 0x9D, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x8D, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE2, 0xA4, 0x95, 0x4D, 0x00, 0x00, /* 0x44-0x47 */
+	0x94, 0xA4, 0x93, 0x99, 0x00, 0x00, 0x8B, 0xD8, /* 0x48-0x4B */
+	0xE2, 0xA3, 0xE2, 0xA1, 0x00, 0x00, 0x94, 0xB3, /* 0x4C-0x4F */
+	0xE2, 0x9E, 0x92, 0x7D, 0x93, 0x9B, 0x00, 0x00, /* 0x50-0x53 */
+	0x93, 0x9A, 0x00, 0x00, 0x8D, 0xF4, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE2, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xE2, 0xA6, 0x00, 0x00, 0xE2, 0xA8, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE2, 0xAB, 0x00, 0x00, 0xE2, 0xAC, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE2, 0xA9, 0xE2, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE2, 0xA7, 0xE2, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x9F, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xCD, 0x89, 0xD3, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xB3, /* 0x88-0x8B */
+	0x00, 0x00, 0xE2, 0xB0, 0x00, 0x00, 0xE2, 0xB5, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xB4, 0x00, 0x00, /* 0x90-0x93 */
+	0x94, 0x93, 0x96, 0xA5, 0x00, 0x00, 0x8E, 0x5A, /* 0x94-0x97 */
+	0xE2, 0xAE, 0xE2, 0xB7, 0xE2, 0xB2, 0x00, 0x00, /* 0x98-0x9B */
+	0xE2, 0xB1, 0xE2, 0xAD, 0xEE, 0x6B, 0xE2, 0xAF, /* 0x9C-0x9F */
+	0x00, 0x00, 0x8A, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x5C, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x90, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x94, 0xA0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE2, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x94, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x90, 0xDF, 0xE2, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x94, 0xCD, 0x00, 0x00, 0xE2, 0xBD, 0x95, 0xD1, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x92, 0x7A, 0x00, 0x00, 0xE2, 0xB8, /* 0xC8-0xCB */
+	0xE2, 0xBA, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xBB, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE2, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x8E, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x93, 0xC4, 0xE2, 0xC3, 0xE2, 0xC2, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE2, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x98, 0x55, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xC8, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xCC, 0xE2, 0xC9, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_7C[512] = {
+	0xE2, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xC6, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE2, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE2, 0xC0, 0x99, 0xD3, 0xE2, 0xC7, /* 0x10-0x13 */
+	0xE2, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xCA, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD0, /* 0x1C-0x1F */
+	0x00, 0x00, 0x8A, 0xC8, 0x00, 0x00, 0xE2, 0xCD, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xCE, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xCF, 0xE2, 0xD2, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD1, /* 0x34-0x37 */
+	0x94, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xE2, 0xD3, 0x97, 0xFA, 0x95, 0xEB, /* 0x3C-0x3F */
+	0xE2, 0xD8, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD5, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE2, 0xD4, 0x90, 0xD0, 0x00, 0x00, 0xE2, 0xD7, /* 0x4C-0x4F */
+	0xE2, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE2, 0xD6, 0x00, 0x00, 0xE2, 0xDD, 0x00, 0x00, /* 0x54-0x57 */
+	0xE2, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xDB, /* 0x5C-0x5F */
+	0xE2, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xE2, 0xDC, 0xE2, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE2, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xC4, /* 0x70-0x73 */
+	0x00, 0x00, 0xE2, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xE0, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x8B, 0xCC, 0x8C, 0x48, 0xE2, 0xE1, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x95, 0xB2, 0x00, 0x00, 0x90, 0x88, /* 0x88-0x8B */
+	0x00, 0x00, 0x96, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE2, 0xE2, 0x00, 0x00, 0x97, 0xB1, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x94, 0x94, 0x00, 0x00, 0x91, 0x65, /* 0x94-0x97 */
+	0x94, 0x53, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x6C, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xBE, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE2, 0xE7, 0xE2, 0xE5, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE2, 0xE3, 0x8A, 0x9F, 0x00, 0x00, 0x8F, 0xCF, /* 0xA4-0xA7 */
+	0xE2, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xE6, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE2, 0xE4, 0xE2, 0xEC, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE2, 0xEB, 0xE2, 0xEA, 0xE2, 0xE9, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE2, 0xEE, 0x90, 0xB8, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE2, 0xEF, 0x00, 0x00, 0xE2, 0xF1, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE2, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xD0, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x57, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xF3, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x9C, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE2, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE2, 0xF4, 0x00, 0x00, 0x95, 0xB3, 0x91, 0x8C, /* 0xDC-0xDF */
+	0x8D, 0x66, 0x00, 0x00, 0xE2, 0xF5, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xC6, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xF7, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xF8, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE2, 0xF9, 0x00, 0x00, 0xE2, 0xFA, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x8E, 0x85, 0x00, 0x00, 0xE2, 0xFB, 0x8C, 0x6E, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x8A, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7D[512] = {
+	0x8B, 0x49, 0x00, 0x00, 0xE3, 0x40, 0x00, 0x00, /* 0x00-0x03 */
+	0x96, 0xF1, 0x8D, 0x67, 0xE2, 0xFC, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x43, 0x96, 0xE4, /* 0x08-0x0B */
+	0x00, 0x00, 0x94, 0x5B, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x95, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x8F, 0x83, 0xE3, 0x42, 0x00, 0x00, 0x8E, 0xD1, /* 0x14-0x17 */
+	0x8D, 0x68, 0x8E, 0x86, 0x8B, 0x89, 0x95, 0xB4, /* 0x18-0x1B */
+	0xE3, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x91, 0x66, 0x96, 0x61, 0x8D, 0xF5, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x87, /* 0x28-0x2B */
+	0x92, 0xDB, 0x00, 0x00, 0xE3, 0x46, 0x97, 0xDD, /* 0x2C-0x2F */
+	0x8D, 0xD7, 0x00, 0x00, 0xE3, 0x47, 0x90, 0x61, /* 0x30-0x33 */
+	0x00, 0x00, 0xE3, 0x49, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x8F, 0xD0, 0x8D, 0xAE, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x48, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x49, 0x8C, 0xBC, /* 0x40-0x43 */
+	0x91, 0x67, 0xE3, 0x44, 0xE3, 0x4A, 0x00, 0x00, /* 0x44-0x47 */
+	0xEE, 0x6D, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x45, /* 0x48-0x4B */
+	0x8C, 0x6F, 0x00, 0x00, 0xE3, 0x4D, 0xE3, 0x51, /* 0x4C-0x4F */
+	0x8C, 0x8B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x4C, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x55, /* 0x58-0x5B */
+	0xEE, 0x6E, 0x00, 0x00, 0x8D, 0x69, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x97, 0x8D, 0x88, 0xBA, 0xE3, 0x52, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x8B, 0x00, 0x00, /* 0x64-0x67 */
+	0xE3, 0x4F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x50, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x93, 0x9D, 0xE3, 0x4E, 0xE3, 0x4B, /* 0x70-0x73 */
+	0x00, 0x00, 0x8A, 0x47, 0x90, 0xE2, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x8C, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE3, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE3, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x56, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x53, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x8C, 0x70, 0x91, 0xB1, 0xE3, 0x58, /* 0x98-0x9B */
+	0x91, 0x8E, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x65, /* 0x9C-0x9F */
+	0xEE, 0x70, 0x00, 0x00, 0xE3, 0x61, 0xE3, 0x5B, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x5F, /* 0xA8-0xAB */
+	0x8E, 0xF8, 0x88, 0xDB, 0xE3, 0x5A, 0xE3, 0x62, /* 0xAC-0xAF */
+	0xE3, 0x66, 0x8D, 0x6A, 0x96, 0xD4, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x92, 0xD4, 0xE3, 0x5C, 0x00, 0x00, 0xEE, 0x6F, /* 0xB4-0xB7 */
+	0xE3, 0x64, 0x00, 0x00, 0xE3, 0x59, 0x92, 0x5D, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE3, 0x5E, 0x88, 0xBB, 0x96, 0xC8, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x5D, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xD9, 0x94, 0xEA, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x8D, /* 0xCC-0xCF */
+	0x00, 0x00, 0x97, 0xCE, 0x8F, 0x8F, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE3, 0x8E, 0xEE, 0x71, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE3, 0x67, 0x00, 0x00, 0x90, 0xFC, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE3, 0x63, 0xE3, 0x68, 0xE3, 0x6A, 0x00, 0x00, /* 0xDC-0xDF */
+	0x92, 0xF7, 0xE3, 0x6D, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE3, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x95, 0xD2, 0x8A, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x96, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x88, 0xDC, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x6C, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x97, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x6B, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_7E[512] = {
+	0x00, 0x00, 0x89, 0x8F, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x93, 0xEA, 0xE3, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE3, 0x75, 0xE3, 0x6F, 0xE3, 0x76, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x72, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x9B, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xC8, 0xE3, 0x74, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE3, 0x71, 0xE3, 0x77, 0xE3, 0x70, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x63, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x44, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x6B, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE3, 0x73, 0xE3, 0x80, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE3, 0x7B, 0x00, 0x00, 0xE3, 0x7E, /* 0x34-0x37 */
+	0x00, 0x00, 0xE3, 0x7C, 0xE3, 0x81, 0xE3, 0x7A, /* 0x38-0x3B */
+	0x00, 0x00, 0xE3, 0x60, 0x90, 0xD1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x94, 0xC9, 0x00, 0x00, 0xE3, 0x7D, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x78, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x40, 0x8C, 0x71, /* 0x48-0x4B */
+	0x00, 0x00, 0x8F, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x72, 0x00, 0x00, /* 0x50-0x53 */
+	0x90, 0x44, 0x91, 0x55, 0xE3, 0x84, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE3, 0x86, 0xE3, 0x87, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE3, 0x83, 0xE3, 0x85, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x79, 0xE3, 0x82, /* 0x64-0x67 */
+	0x00, 0x00, 0xE3, 0x8A, 0xE3, 0x89, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x96, 0x9A, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x8C, 0x4A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE3, 0x88, 0x00, 0x00, 0xE3, 0x8C, /* 0x78-0x7B */
+	0xE3, 0x8B, 0xE3, 0x8F, 0x00, 0x00, 0xE3, 0x91, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x5B, 0xE3, 0x8D, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE3, 0x92, 0xE3, 0x93, 0xED, 0x40, 0x00, 0x00, /* 0x88-0x8B */
+	0xE3, 0x94, 0x00, 0x00, 0xE3, 0x9A, 0x93, 0x5A, /* 0x8C-0x8F */
+	0xE3, 0x96, 0x00, 0x00, 0xE3, 0x95, 0xE3, 0x97, /* 0x90-0x93 */
+	0xE3, 0x98, 0x00, 0x00, 0xE3, 0x99, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x9B, /* 0x98-0x9B */
+	0xE3, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_7F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xCA, 0x00, 0x00, /* 0x34-0x37 */
+	0xE3, 0x9D, 0x00, 0x00, 0xE3, 0x9E, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE3, 0x9F, 0x00, 0x00, 0xEE, 0x73, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE3, 0xA0, 0xE3, 0xA1, 0xE3, 0xA2, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE3, 0xA3, 0xE3, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE3, 0xA6, 0xE3, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE3, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xA8, /* 0x5C-0x5F */
+	0xE3, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xAC, /* 0x64-0x67 */
+	0xE3, 0xAA, 0xE3, 0xAB, 0x8D, 0xDF, 0x8C, 0x72, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0x75, 0x00, 0x00, /* 0x6C-0x6F */
+	0x94, 0xB1, 0x00, 0x00, 0x8F, 0x90, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x94, 0x6C, 0x00, 0x00, 0x94, 0xEB, /* 0x74-0x77 */
+	0xE3, 0xAD, 0x9C, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xAE, 0xE3, 0xB0, /* 0x80-0x83 */
+	0x00, 0x00, 0x97, 0x85, 0xE3, 0xAF, 0xE3, 0xB2, /* 0x84-0x87 */
+	0xE3, 0xB1, 0x00, 0x00, 0x97, 0x72, 0x00, 0x00, /* 0x88-0x8B */
+	0xE3, 0xB3, 0x00, 0x00, 0x94, 0xFC, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE3, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xB7, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xE3, 0xB6, 0xE3, 0xB5, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xEE, 0x74, 0x00, 0x00, 0xE3, 0xB8, /* 0xA0-0xA3 */
+	0x8C, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x91, 0x41, 0x8B, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xBC, 0xE3, 0xB9, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xBA, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xBD, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE3, 0xBE, 0xE3, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x89, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x89, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE3, 0xC0, 0xE3, 0xC1, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xC2, 0x00, 0x00, /* 0xC8-0xCB */
+	0x97, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x4B, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE3, 0xC4, 0xE3, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x90, 0x89, 0xE3, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xC6, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE3, 0xC7, 0x00, 0x00, 0x8A, 0xE3, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x8A, 0xCB, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xC8, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE3, 0xC9, 0x00, 0x00, 0x96, 0x7C, /* 0xF8-0xFB */
+	0x97, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_80[512] = {
+	0x97, 0x73, 0x98, 0x56, 0x00, 0x00, 0x8D, 0x6C, /* 0x00-0x03 */
+	0xE3, 0xCC, 0x8E, 0xD2, 0xE3, 0xCB, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xCD, /* 0x08-0x0B */
+	0x8E, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x91, 0xCF, 0x00, 0x00, 0xE3, 0xCE, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x8D, 0x6B, 0x00, 0x00, 0x96, 0xD5, /* 0x14-0x17 */
+	0xE3, 0xCF, 0xE3, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xE3, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE3, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xE3, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xA8, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0xEB, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xD5, /* 0x38-0x3B */
+	0x00, 0x00, 0x92, 0x5E, 0x00, 0x00, 0xE3, 0xD4, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xD7, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xD6, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xD8, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0xB9, 0x00, 0x00, /* 0x54-0x57 */
+	0xE3, 0xD9, 0x00, 0x00, 0xE3, 0xDA, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xB7, 0xE3, 0xDB, /* 0x5C-0x5F */
+	0x00, 0x00, 0x91, 0x8F, 0xE3, 0xDC, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xE3, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xFC, /* 0x6C-0x6F */
+	0xE3, 0xE0, 0x00, 0x00, 0xE3, 0xDF, 0xE3, 0xDE, /* 0x70-0x73 */
+	0x92, 0xAE, 0x00, 0x00, 0xE3, 0xE1, 0x90, 0x45, /* 0x74-0x77 */
+	0x00, 0x00, 0xE3, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE3, 0xE3, 0x98, 0x57, 0xE3, 0xE4, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE3, 0xE5, 0xE3, 0xE7, 0xE3, 0xE6, 0x94, 0xA3, /* 0x84-0x87 */
+	0x00, 0x00, 0x93, 0xF7, 0x00, 0x00, 0x98, 0x5D, /* 0x88-0x8B */
+	0x94, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xE9, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xD1, 0x00, 0x00, /* 0x94-0x97 */
+	0x95, 0x49, 0x00, 0x00, 0xE3, 0xEA, 0xE3, 0xE8, /* 0x98-0x9B */
+	0x00, 0x00, 0x8A, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x8C, 0xD2, 0x8E, 0x88, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x94, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x8C, 0xA8, 0x96, 0x62, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE3, 0xED, 0xE3, 0xEB, 0x00, 0x00, 0x8D, 0x6D, /* 0xAC-0xAF */
+	0x00, 0x00, 0x8D, 0x6E, 0x88, 0xE7, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x8D, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x78, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xDD, /* 0xC0-0xC3 */
+	0xE3, 0xF2, 0x00, 0x00, 0x92, 0x5F, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x94, 0x77, 0x00, 0x00, 0x91, 0xD9, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xF4, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE3, 0xF0, 0xE3, 0xF3, 0xE3, 0xEE, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE3, 0xF1, 0x96, 0x45, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x8C, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x88, 0xFB, 0xE3, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xF6, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE3, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x93, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x8B, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE4, 0x45, 0x94, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_81[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x89, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x8B, 0xBA, 0x90, 0xC6, 0x98, 0x65, /* 0x04-0x07 */
+	0x96, 0xAC, 0xE3, 0xF5, 0x90, 0xD2, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x72, 0xE3, 0xF8, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xFA, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE3, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xFB, /* 0x2C-0x2F */
+	0x00, 0x00, 0x92, 0x45, 0x00, 0x00, 0x94, 0x5D, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x92, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x42, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x41, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xFC, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x74, 0x00, 0x00, /* 0x4C-0x4F */
+	0x95, 0x85, 0xE4, 0x44, 0x00, 0x00, 0xE4, 0x43, /* 0x50-0x53 */
+	0x8D, 0x6F, 0x98, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x54, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xE4, 0x48, 0xE4, 0x49, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xEE, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x47, 0x00, 0x00, /* 0x6C-0x6F */
+	0x8D, 0x98, 0xE4, 0x46, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE4, 0x4A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x92, 0xB0, 0x95, 0xA0, 0x91, 0x42, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xDA, /* 0x7C-0x7F */
+	
+	0xE4, 0x4E, 0x00, 0x00, 0xE4, 0x4F, 0xE4, 0x4B, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE4, 0x4C, 0x00, 0x00, 0xE4, 0x4D, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x70, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x55, /* 0x90-0x93 */
+	0x00, 0x00, 0xE4, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x86, 0x00, 0x00, /* 0x98-0x9B */
+	0x96, 0x8C, 0x95, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE4, 0x50, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x53, /* 0xA0-0xA3 */
+	0xE4, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x96, 0x63, 0xE4, 0x56, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE4, 0x57, 0x00, 0x00, 0x00, 0x00, 0x91, 0x56, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE4, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE4, 0x5A, 0x00, 0x00, 0xE4, 0x5E, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE4, 0x5B, 0xE4, 0x59, 0x94, 0x5E, /* 0xBC-0xBF */
+	0xE4, 0x5C, 0x00, 0x00, 0xE4, 0x5D, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xB0, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE4, 0x64, 0xE4, 0x5F, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE4, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE4, 0x61, 0x00, 0x00, 0x91, 0x9F, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE4, 0x63, 0xE4, 0x62, 0xE4, 0x65, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x66, /* 0xDC-0xDF */
+	0xE4, 0x67, 0x00, 0x00, 0x00, 0x00, 0x90, 0x62, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x89, 0xE7, 0x00, 0x00, 0xE4, 0x68, /* 0xE4-0xE7 */
+	0x97, 0xD5, 0x00, 0x00, 0x8E, 0xA9, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x8F, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x8A, /* 0xF0-0xF3 */
+	0x92, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x69, 0xE4, 0x6A, /* 0xF8-0xFB */
+	0x89, 0x50, 0x00, 0x00, 0xE4, 0x6B, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_82[512] = {
+	0x00, 0x00, 0xE4, 0x6C, 0xE4, 0x6D, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE4, 0x6E, 0x00, 0x00, 0xE4, 0x6F, /* 0x04-0x07 */
+	0x8B, 0xBB, 0x9D, 0xA8, 0xE4, 0x70, 0x00, 0x00, /* 0x08-0x0B */
+	0x90, 0xE3, 0xE4, 0x71, 0x8E, 0xC9, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE4, 0x72, 0x00, 0x00, 0x98, 0xAE, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x73, 0x95, 0xDC, /* 0x14-0x17 */
+	0x8A, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x91, 0x43, /* 0x18-0x1B */
+	0x8F, 0x77, 0x00, 0x00, 0x95, 0x91, 0x8F, 0x4D, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE4, 0x74, 0x8D, 0x71, 0xE4, 0x75, /* 0x28-0x2B */
+	0x94, 0xCA, 0x00, 0x00, 0xE4, 0x84, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x77, /* 0x30-0x33 */
+	0x00, 0x00, 0x91, 0xC7, 0x94, 0x95, 0x8C, 0xBD, /* 0x34-0x37 */
+	0xE4, 0x76, 0x91, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE4, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xF8, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE4, 0x7A, 0xE4, 0x79, 0xE4, 0x7C, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE4, 0x7B, 0x00, 0x00, 0xE4, 0x7D, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x80, 0x00, 0x00, /* 0x60-0x63 */
+	0xE4, 0x7E, 0x00, 0x00, 0x8A, 0xCD, 0x00, 0x00, /* 0x64-0x67 */
+	0xE4, 0x81, 0x00, 0x00, 0xE4, 0x82, 0xE4, 0x83, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0xAF, 0x97, 0xC7, /* 0x6C-0x6F */
+	0x00, 0x00, 0xE4, 0x85, 0x90, 0x46, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x90, 0xE4, 0x86, /* 0x74-0x77 */
+	0xE4, 0x87, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x88, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xF0, /* 0x88-0x8B */
+	0x00, 0x00, 0xE4, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x8A, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x95, 0x87, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x8E, 0xC5, 0x00, 0x00, 0xE4, 0x8C, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x8A, 0x48, 0x88, 0xB0, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x8B, /* 0xA8-0xAB */
+	0xE4, 0x8E, 0x94, 0x6D, 0x00, 0x00, 0x90, 0x63, /* 0xAC-0xAF */
+	0x00, 0x00, 0x89, 0xD4, 0x00, 0x00, 0x96, 0x46, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x8C, 0x7C, 0x8B, 0xDA, 0x00, 0x00, 0xE4, 0x8D, /* 0xB8-0xBB */
+	0x00, 0x00, 0x89, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x8A, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x89, 0x91, 0xE4, 0x92, 0x97, 0xE8, /* 0xD0-0xD3 */
+	0x91, 0xDB, 0x00, 0x00, 0x00, 0x00, 0x95, 0x63, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE4, 0x9E, 0x00, 0x00, 0x89, 0xD5, /* 0xD8-0xDB */
+	0xE4, 0x9C, 0x00, 0x00, 0xE4, 0x9A, 0xE4, 0x91, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE4, 0x8F, 0x00, 0x00, 0xE4, 0x90, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x8E, 0xE1, 0x8B, 0xEA, 0x92, 0x97, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xCF, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x89, 0x70, 0x00, 0x00, 0xE4, 0x94, /* 0xF0-0xF3 */
+	0xE4, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE4, 0x99, 0xE4, 0x95, 0xE4, 0x98, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_83[512] = {
+	0x00, 0x00, 0xEE, 0x76, 0x96, 0xCE, 0xE4, 0x97, /* 0x00-0x03 */
+	0x89, 0xD6, 0x8A, 0x9D, 0xE4, 0x9B, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE4, 0x9D, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x73, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xA1, 0xE4, 0xAA, /* 0x14-0x17 */
+	0xE4, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x88, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xB2, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x88, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xA9, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xA8, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE4, 0xA3, 0xE4, 0xA2, 0x00, 0x00, /* 0x30-0x33 */
+	0xE4, 0xA0, 0xE4, 0x9F, 0x92, 0x83, 0x00, 0x00, /* 0x34-0x37 */
+	0x91, 0xF9, 0xE4, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE4, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE4, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x91, 0x90, 0x8C, 0x74, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x60, /* 0x4C-0x4F */
+	0xE4, 0xA6, 0x00, 0x00, 0x8D, 0x72, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x91, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x77, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xB8, /* 0x70-0x73 */
+	0x00, 0x00, 0xE4, 0xB9, 0x00, 0x00, 0x89, 0xD7, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xAC, /* 0x78-0x7B */
+	0xE4, 0xB6, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x78, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE4, 0xAC, 0x00, 0x00, 0xE4, 0xB4, /* 0x84-0x87 */
+	0x00, 0x00, 0xE4, 0xBB, 0xE4, 0xB5, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xB3, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x96, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xB1, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xAD, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0xCE, 0xE4, 0xAF, /* 0x9C-0x9F */
+	0xE4, 0xBA, 0x00, 0x00, 0xE4, 0xB0, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE4, 0xBC, 0x00, 0x00, 0xE4, 0xAE, 0x94, 0x9C, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x97, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE4, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE4, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE4, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x90, 0x9B, 0x00, 0x00, 0xEE, 0x79, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x65, 0x00, 0x00, /* 0xC8-0xCB */
+	0x8B, 0xDB, 0x00, 0x00, 0xE4, 0xC0, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xD9, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0xD2, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE4, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x8D, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x93, 0x70, /* 0xDC-0xDF */
+	0xE4, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x95, 0xEC, 0x00, 0x00, 0xE4, 0xBF, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xD8, /* 0xEC-0xEF */
+	0x8C, 0xD4, 0x95, 0x48, 0xE4, 0xC9, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE4, 0xBD, 0x00, 0x00, 0xEE, 0x7A, 0xE4, 0xC6, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xD0, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE4, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_84[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xC2, /* 0x00-0x03 */
+	0x93, 0xB8, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xC7, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xC4, /* 0x08-0x0B */
+	0x96, 0x47, 0xE4, 0xCA, 0x88, 0xDE, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xBE, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE4, 0xCC, 0x00, 0x00, 0xE4, 0xCB, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x94, 0x8B, 0xE4, 0xD2, 0x00, 0x00, /* 0x28-0x2B */
+	0xE4, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8A, 0x9E, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE4, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xE4, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xE4, 0xD3, 0x97, 0x8E, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xDC, 0x00, 0x00, /* 0x44-0x47 */
+	0xEE, 0x7B, 0x97, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xA8, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x98, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x8B, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x95, 0x92, 0xE4, 0xE2, 0x93, 0x9F, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0xAF, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE4, 0xDB, 0x00, 0x00, 0xE4, 0xD7, /* 0x68-0x6B */
+	0x91, 0x92, 0xE4, 0xD1, 0xE4, 0xD9, 0xE4, 0xDE, /* 0x6C-0x6F */
+	0x00, 0x00, 0x94, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x88, 0xA8, 0x00, 0x00, 0xE4, 0xD6, /* 0x74-0x77 */
+	0x00, 0x00, 0xE4, 0xDF, 0x95, 0x98, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xDA, 0x00, 0x00, /* 0x80-0x83 */
+	0xE4, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0xD3, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8F, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x8E, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x96, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x95, 0x66, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xE5, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE4, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE4, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0x97, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xEE, 0x7C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x8F, 0xF6, 0xE4, 0xE3, 0x00, 0x00, 0xE4, 0xE8, /* 0xB8-0xBB */
+	0x91, 0x93, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xE4, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE4, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x92, 0x7E, 0x00, 0x00, 0xE4, 0xEC, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x97, 0x75, 0xE4, 0xE1, 0x8A, 0x57, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE4, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xE4, 0xEA, 0x96, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xED, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE4, 0xE6, 0xE4, 0xE9, 0x00, 0x00, /* 0xD8-0xDB */
+	0xED, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x96, 0x48, 0x00, 0x00, 0x98, 0x40, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE4, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE4, 0xF8, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xF0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_85[512] = {
+	0x8E, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xCF, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x95, 0xCC, 0x00, 0x00, 0x96, 0xA0, /* 0x10-0x13 */
+	0xE4, 0xF7, 0xE4, 0xF6, 0x00, 0x00, 0xE4, 0xF2, /* 0x14-0x17 */
+	0xE4, 0xF3, 0x00, 0x00, 0x89, 0x55, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xF5, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE4, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0xD3, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xE4, 0xF4, 0x88, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x91, 0xA0, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x95, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE4, 0xF9, 0xE5, 0x40, 0x00, 0x00, 0x94, 0xD7, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xE4, 0xFC, 0x8F, 0xD4, 0x8E, 0xC7, 0xE5, 0x42, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xBC, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x7D, /* 0x50-0x53 */
+	0x00, 0x00, 0xE5, 0x43, 0x00, 0x00, 0x95, 0x99, /* 0x54-0x57 */
+	0xE4, 0xFB, 0xEE, 0x7E, 0xE4, 0xD4, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xFA, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x98, 0x6E, 0x93, 0xA0, 0x95, 0x93, 0xEE, 0x80, /* 0x68-0x6B */
+	0x00, 0x00, 0xE5, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x50, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x51, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE5, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x94, 0x96, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x4E, /* 0x84-0x87 */
+	0xE5, 0x46, 0x00, 0x00, 0xE5, 0x48, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE5, 0x52, 0xE5, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE5, 0x4B, 0x00, 0x00, 0x00, 0x00, 0x89, 0x92, /* 0x94-0x97 */
+	0x00, 0x00, 0x93, 0xE3, 0x00, 0x00, 0xE5, 0x4C, /* 0x98-0x9B */
+	0xE5, 0x4F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE5, 0x45, 0x00, 0x00, 0x91, 0x45, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE5, 0x49, 0x8E, 0x46, 0x90, 0x64, 0x8C, 0x4F, /* 0xA8-0xAB */
+	0x96, 0xF2, 0x00, 0x00, 0x96, 0xF7, 0x8F, 0x92, /* 0xAC-0xAF */
+	0xEE, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE5, 0x56, 0xE5, 0x54, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x98, 0x6D, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE5, 0x53, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x97, 0x95, 0x00, 0x00, 0xE5, 0x55, /* 0xCC-0xCF */
+	0xE5, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE5, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE5, 0x5B, 0xE5, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x93, 0xA1, 0xE5, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x94, 0xCB, 0xE5, 0x4D, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x93, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE5, 0x5C, 0xE5, 0x61, 0x91, 0x94, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x60, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_86[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x41, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x62, 0x91, 0x68, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x5D, 0xE5, 0x5F, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x5E, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x9F, 0x50, 0x9F, 0x41, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x64, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x63, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x97, 0x96, 0x00, 0x00, 0xE1, 0xBA, /* 0x2C-0x2F */
+	0xE5, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x66, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xE5, 0x67, 0x8C, 0xD5, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8B, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE5, 0x69, 0x99, 0x7C, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x95, 0x00, 0x00, /* 0x58-0x5B */
+	0x97, 0xB8, 0x00, 0x00, 0x8B, 0xF1, 0xE5, 0x6A, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x6B, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x8E, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xE5, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x93, 0xF8, 0x00, 0x00, 0x88, 0xB8, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xE1, 0xE5, 0x71, /* 0x88-0x8B */
+	0xE5, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x6D, /* 0x90-0x93 */
+	0x00, 0x00, 0x8E, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x6E, /* 0xA0-0xA3 */
+	0x94, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE5, 0x6F, 0xE5, 0x70, 0xE5, 0x7A, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x74, /* 0xAC-0xAF */
+	0xE5, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x73, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE5, 0x75, 0x00, 0x00, 0xE5, 0x76, 0x8E, 0xD6, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE5, 0x78, 0x00, 0x00, 0x92, 0x60, /* 0xC8-0xCB */
+	0x00, 0x00, 0x8C, 0x75, 0x8A, 0x61, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE5, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x8A, 0x5E, 0x00, 0x00, 0xE5, 0x81, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x7C, 0xE5, 0x80, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x94, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE5, 0x7D, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE5, 0x7E, 0x95, 0x67, 0x94, 0xD8, 0xE5, 0x82, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x91, 0xFB, 0xE5, 0x8C, 0x00, 0x00, 0xE5, 0x88, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xE9, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_87[512] = {
+	0xE5, 0x86, 0x00, 0x00, 0x96, 0x49, 0xE5, 0x87, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x84, 0x00, 0x00, /* 0x04-0x07 */
+	0xE5, 0x85, 0xE5, 0x8A, 0xE5, 0x8D, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE5, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE5, 0x89, 0xE5, 0x83, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x92, 0x77, 0x00, 0x00, 0xE5, 0x94, 0x00, 0x00, /* 0x18-0x1B */
+	0x96, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE5, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE5, 0x93, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE5, 0x8E, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x90, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x91, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x8F, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x90, 0xE4, 0x00, 0x00, 0x98, 0x58, /* 0x48-0x4B */
+	0xE5, 0x98, 0x00, 0x00, 0xE5, 0x99, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x9F, /* 0x50-0x53 */
+	0x00, 0x00, 0x90, 0x49, 0x00, 0x00, 0xE5, 0x9B, /* 0x54-0x57 */
+	0x00, 0x00, 0xE5, 0x9E, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x96, /* 0x5C-0x5F */
+	0xE5, 0x95, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA0, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xDA, 0x00, 0x00, /* 0x64-0x67 */
+	0xE5, 0x9C, 0x00, 0x00, 0xE5, 0xA1, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x9D, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE5, 0x9A, 0x00, 0x00, 0x92, 0xB1, 0x00, 0x00, /* 0x74-0x77 */
+	0xE5, 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x88, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xA5, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x97, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA4, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xA3, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xAC, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA6, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xAE, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0x86, 0xE5, 0xB1, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE5, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE5, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE5, 0xAD, 0x00, 0x00, 0xE5, 0xB0, 0xE5, 0xAF, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA7, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xE5, 0xAA, 0x00, 0x00, 0xE5, 0xBB, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xE5, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xB2, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xB3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xB8, 0xE5, 0xB9, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x8A, 0x49, 0x00, 0x00, 0x8B, 0x61, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xB7, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_88[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE5, 0xA2, 0x00, 0x00, 0xEE, 0x85, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE5, 0xB6, 0xE5, 0xBA, 0xE5, 0xB5, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE5, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE5, 0xBE, 0xE5, 0xBD, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE5, 0xC0, 0xE5, 0xBF, 0xE5, 0x79, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xC4, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE5, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xC2, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xE5, 0xC3, 0x00, 0x00, 0xE5, 0xC5, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x8C, 0x8C, 0x00, 0x00, 0xE5, 0xC7, 0x00, 0x00, /* 0x40-0x43 */
+	0xE5, 0xC6, 0x00, 0x00, 0x8F, 0x4F, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x8D, 0x73, 0x9F, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xC8, 0x8F, 0x70, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x58, /* 0x54-0x57 */
+	0x00, 0x00, 0xE5, 0xC9, 0x00, 0x00, 0x89, 0x71, /* 0x58-0x5B */
+	0x00, 0x00, 0x8F, 0xD5, 0xE5, 0xCA, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x8D, 0x74, 0xE5, 0xCB, 0x88, 0xDF, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x95, 0x5C, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xCC, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x90, 0x8A, 0x00, 0x00, 0xE5, 0xD3, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xE5, 0xD0, 0x00, 0x00, 0x92, 0x8F, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE5, 0xD1, 0xE5, 0xCE, 0x8B, 0xDC, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE5, 0xCD, 0xE5, 0xD4, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x8C, 0x55, 0x00, 0x00, 0x00, 0x00, 0x91, 0xDC, /* 0x88-0x8B */
+	0x00, 0x00, 0xE5, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xD6, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xB3, 0xE5, 0xD5, /* 0x94-0x97 */
+	0x00, 0x00, 0xE5, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xCF, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xD9, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE5, 0xDB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xED, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xD7, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE5, 0xDC, 0xE5, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x8C, 0xD1, 0xE5, 0xD2, 0x00, 0x00, 0x88, 0xBF, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xDD, /* 0xBC-0xBF */
+	0x00, 0x00, 0x8D, 0xD9, 0x97, 0xF4, 0xE5, 0xDF, /* 0xC0-0xC3 */
+	0xE5, 0xE0, 0x91, 0x95, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xA0, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE5, 0xE1, 0x97, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE5, 0xE2, 0xE5, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x95, 0xE2, 0xE5, 0xE4, 0x00, 0x00, 0x8D, 0xBE, /* 0xDC-0xDF */
+	0x00, 0x00, 0x97, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE5, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xEA, 0x8F, 0xD6, /* 0xF0-0xF3 */
+	0xE5, 0xE8, 0xEE, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x97, 0x87, 0xE5, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE5, 0xE7, 0x90, 0xBB, 0x90, 0x9E, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_89[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xE6, 0x00, 0x00, /* 0x00-0x03 */
+	0xE5, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x95, 0xA1, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xED, 0x00, 0x00, /* 0x08-0x0B */
+	0xE5, 0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x8A, 0x8C, 0x00, 0x00, 0x96, 0x4A, 0xE5, 0xEE, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xED, 0x41, 0xE5, 0xFA, 0xE5, 0xF0, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE5, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xF2, 0xE5, 0xF3, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xF7, 0x00, 0x00, /* 0x34-0x37 */
+	0xE5, 0xF8, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xF6, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE5, 0xF4, 0x00, 0x00, 0xE5, 0xEF, /* 0x40-0x43 */
+	0xE5, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE5, 0xF9, 0xE8, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xA6, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xFC, 0x8B, 0xDD, /* 0x5C-0x5F */
+	0xE5, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xE6, 0x41, 0x00, 0x00, 0xE6, 0x40, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x43, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xE6, 0x42, 0x00, 0x00, 0xE6, 0x44, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x8F, 0x50, 0x00, 0x00, /* 0x70-0x73 */
+	0xE6, 0x45, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x46, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x47, 0x90, 0xBC, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x97, 0x76, 0x00, 0x00, 0xE6, 0x48, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xA2, 0x94, 0x65, /* 0x84-0x87 */
+	0xE6, 0x49, 0x00, 0x00, 0xE6, 0x4A, 0x8C, 0xA9, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x4B, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x4B, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x8B, 0x94, 0x60, /* 0x94-0x97 */
+	0xE6, 0x4C, 0x00, 0x00, 0x8A, 0x6F, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE6, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x4F, 0x97, 0x97, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE6, 0x4E, 0x90, 0x65, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE6, 0x50, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x51, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x52, 0x8A, 0xCF, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x53, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE6, 0x54, 0x00, 0x00, 0xE6, 0x55, /* 0xBC-0xBF */
+	0xE6, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x8A, 0x70, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x57, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE6, 0x58, 0xE6, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xF0, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x47, 0xE6, 0x5A, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE6, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xE6, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_8A[512] = {
+	0x8C, 0xBE, 0x00, 0x00, 0x92, 0xF9, 0xE6, 0x5D, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x8C, 0x76, 0x00, 0x00, 0x90, 0x75, 0x00, 0x00, /* 0x08-0x0B */
+	0xE6, 0x60, 0x00, 0x00, 0x93, 0xA2, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE6, 0x5F, 0x00, 0x00, 0xEE, 0x87, 0x8C, 0x50, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x5E, 0x91, 0xF5, /* 0x14-0x17 */
+	0x8B, 0x4C, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x61, /* 0x18-0x1B */
+	0x00, 0x00, 0xE6, 0x62, 0x00, 0x00, 0x8F, 0xD7, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x8D, /* 0x20-0x23 */
+	0x00, 0x00, 0xE6, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x4B, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x90, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8B, 0x96, 0x00, 0x00, 0x96, 0xF3, /* 0x30-0x33 */
+	0x91, 0x69, 0x00, 0x00, 0xE6, 0x64, 0xEE, 0x88, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x66, 0x92, 0x90, /* 0x38-0x3B */
+	0x8F, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE6, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x68, 0x00, 0x00, /* 0x44-0x47 */
+	0xE6, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x8D, 0xBC, 0x91, 0xC0, 0xE6, 0x67, 0x00, 0x00, /* 0x50-0x53 */
+	0x8F, 0xD9, 0x95, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x66, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x8C, 0x00, 0x00, /* 0x5C-0x5F */
+	0x89, 0x72, 0x00, 0x00, 0xE6, 0x6D, 0x8C, 0x77, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x8E, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x8E, 0x8D, 0x00, 0x00, 0x98, 0x6C, /* 0x68-0x6B */
+	0xE6, 0x6C, 0xE6, 0x6B, 0x91, 0x46, 0x00, 0x00, /* 0x6C-0x6F */
+	0x8B, 0x6C, 0x98, 0x62, 0x8A, 0x59, 0x8F, 0xDA, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xEE, 0x89, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE6, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x6F, 0x00, 0x00, /* 0x80-0x83 */
+	0xE6, 0x70, 0xE6, 0x6E, 0x00, 0x00, 0x8C, 0xD6, /* 0x84-0x87 */
+	0x00, 0x00, 0x97, 0x5F, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x8E, 0x8F, 0x94, 0x46, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE6, 0x73, 0x00, 0x00, 0x90, 0xBE, /* 0x90-0x93 */
+	0x00, 0x00, 0x92, 0x61, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x97, 0x55, 0x00, 0x00, 0xE6, 0x76, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xEA, 0x00, 0x00, /* 0x9C-0x9F */
+	0x90, 0xBD, 0xE6, 0x72, 0x00, 0x00, 0xE6, 0x77, /* 0xA0-0xA3 */
+	0x8C, 0xEB, 0xE6, 0x74, 0xE6, 0x75, 0xEE, 0x8A, /* 0xA4-0xA7 */
+	0xE6, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x90, 0xE0, 0x93, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x92, 0x4E, 0x00, 0x00, 0x89, 0xDB, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x94, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x8B, 0x62, 0x00, 0x00, 0xEE, 0x8B, 0x92, 0xB2, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x7A, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE6, 0x78, 0x00, 0x00, 0x00, 0x00, 0x92, 0x6B, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xBF, /* 0xC8-0xCB */
+	0x8A, 0xD0, 0xE6, 0x79, 0x00, 0x00, 0x90, 0x7A, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xC8, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x5F, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x7B, 0xE6, 0x87, /* 0xD8-0xDB */
+	0x92, 0xB3, 0x00, 0x00, 0xE6, 0x86, 0xEE, 0x8C, /* 0xDC-0xDF */
+	0xE6, 0x83, 0xE6, 0x8B, 0xE6, 0x84, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE6, 0x80, 0x00, 0x00, 0x92, 0xFA, 0xE6, 0x7E, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x7C, /* 0xE8-0xEB */
+	0x00, 0x00, 0x97, 0x40, 0x8E, 0x90, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE6, 0x81, 0x00, 0x00, 0xE6, 0x7D, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x8E, 0xE6, 0x85, /* 0xF4-0xF7 */
+	0x8F, 0x94, 0x00, 0x00, 0x8C, 0xBF, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xF8, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8B[512] = {
+	0x96, 0x64, 0x89, 0x79, 0x88, 0xE0, 0x00, 0x00, /* 0x00-0x03 */
+	0x93, 0xA3, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x89, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xE6, 0x88, 0x00, 0x00, 0x93, 0xE4, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE6, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xE6, 0x82, 0x00, 0x00, 0xE6, 0x8C, 0xE6, 0x8E, /* 0x14-0x17 */
+	0x00, 0x00, 0x8C, 0xAA, 0xE6, 0x8A, 0x8D, 0x75, /* 0x18-0x1B */
+	0x00, 0x00, 0x8E, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE6, 0x8F, 0x97, 0x77, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x92, 0x00, 0x00, /* 0x24-0x27 */
+	0xE6, 0x95, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x93, /* 0x28-0x2B */
+	0x95, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x90, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x8B, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x94, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE6, 0x96, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE6, 0x9A, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE6, 0x97, 0x00, 0x00, 0xE6, 0x99, 0xE6, 0x98, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x8F, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x9B, 0x00, 0x00, /* 0x54-0x57 */
+	0x8E, 0xAF, 0x00, 0x00, 0xE6, 0x9D, 0xE6, 0x9C, /* 0x58-0x5B */
+	0x95, 0x88, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x9F, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x78, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x9E, /* 0x68-0x6B */
+	0xE6, 0xA0, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xA1, /* 0x6C-0x6F */
+	0x8B, 0x63, 0xE3, 0xBF, 0x8F, 0xF7, 0x00, 0x00, /* 0x70-0x73 */
+	0xE6, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xEC, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE6, 0xA3, 0x00, 0x00, 0xEE, 0x90, /* 0x7C-0x7F */
+	
+	0xE6, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x5D, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x9D, 0xCC, 0x00, 0x00, /* 0x88-0x8B */
+	0xE6, 0xA5, 0x00, 0x00, 0xE6, 0xA6, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8F, 0x51, 0x00, 0x00, 0xE6, 0xA7, 0xE6, 0xA8, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xA9, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xE6, 0xAA, 0xE6, 0xAB, 0x00, 0x00, /* 0x98-0x9B */
+};
+
+static unsigned char u2c_8C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x4A, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xAC, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xAE, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE6, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xA4, 0x00, 0x00, /* 0x44-0x47 */
+	0xE6, 0xAF, 0x00, 0x00, 0x96, 0x4C, 0x00, 0x00, /* 0x48-0x4B */
+	0xE6, 0xB0, 0x00, 0x00, 0xE6, 0xB1, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE6, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE6, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0xD8, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x8F, 0xDB, 0xE6, 0xB4, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0x8B, 0x98, 0xAC, /* 0x68-0x6B */
+	0xE6, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xE6, 0xB6, 0x95, 0x5E, 0xE6, 0xB7, 0x00, 0x00, /* 0x78-0x7B */
+	0xE6, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xB8, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE6, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE6, 0xB9, 0xE6, 0xBB, 0x00, 0x00, /* 0x88-0x8B */
+	0x96, 0x65, 0xE6, 0xBC, 0xE6, 0xBD, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE6, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xE6, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x8A, 0x4C, 0x92, 0xE5, 0x00, 0x00, /* 0x9C-0x9F */
+	0x95, 0x89, 0x8D, 0xE0, 0x8D, 0x76, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x6E, /* 0xA4-0xA7 */
+	0x89, 0xDD, 0x94, 0xCC, 0xE6, 0xC3, 0x8A, 0xD1, /* 0xA8-0xAB */
+	0x90, 0xD3, 0xE6, 0xC2, 0xE6, 0xC7, 0x92, 0x99, /* 0xAC-0xAF */
+	0x96, 0xE1, 0x00, 0x00, 0xE6, 0xC5, 0xE6, 0xC6, /* 0xB0-0xB3 */
+	0x8B, 0x4D, 0x00, 0x00, 0xE6, 0xC8, 0x94, 0x83, /* 0xB4-0xB7 */
+	0x91, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x94, 0xEF, /* 0xB8-0xBB */
+	0x93, 0x5C, 0xE6, 0xC4, 0x00, 0x00, 0x96, 0x66, /* 0xBC-0xBF */
+	0x89, 0xEA, 0xE6, 0xCA, 0x98, 0x47, 0x92, 0xC0, /* 0xC0-0xC3 */
+	0x98, 0x64, 0x00, 0x00, 0x00, 0x00, 0x8E, 0x91, /* 0xC4-0xC7 */
+	0xE6, 0xC9, 0x00, 0x00, 0x91, 0xAF, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE6, 0xDA, 0x91, 0x47, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x93, 0xF6, 0x00, 0x00, 0x95, 0x6F, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xCD, 0x8E, 0x5E, /* 0xD8-0xDB */
+	0x8E, 0x92, 0x00, 0x00, 0x8F, 0xDC, 0x00, 0x00, /* 0xDC-0xDF */
+	0x94, 0x85, 0x00, 0x00, 0x8C, 0xAB, 0xE6, 0xCC, /* 0xE0-0xE3 */
+	0xE6, 0xCB, 0x00, 0x00, 0x95, 0x8A, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xBF, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x93, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xEE, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEE, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xCF, 0xE6, 0xD0, /* 0xF8-0xFB */
+	0x8D, 0x77, 0xE6, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE6, 0xD1, 0xE6, 0xD2, 0x00, 0x00, 0xE6, 0xD4, /* 0x04-0x07 */
+	0x91, 0xA1, 0x00, 0x00, 0xE6, 0xD3, 0x8A, 0xE4, /* 0x08-0x0B */
+	0x00, 0x00, 0xE6, 0xD6, 0x00, 0x00, 0xE6, 0xD5, /* 0x0C-0x0F */
+	0xE6, 0xD7, 0x00, 0x00, 0xEE, 0x93, 0xE6, 0xD9, /* 0x10-0x13 */
+	0xE6, 0xDB, 0x00, 0x00, 0xE6, 0xDC, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x90, 0xD4, 0x00, 0x00, 0x8E, 0xCD, 0xE6, 0xDD, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x71, /* 0x68-0x6B */
+	0x00, 0x00, 0xE6, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x91, 0x96, 0xE6, 0xDF, 0x00, 0x00, 0xE6, 0xE0, /* 0x70-0x73 */
+	0x95, 0x8B, 0x00, 0x00, 0xEE, 0x94, 0x8B, 0x4E, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE6, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x92, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x7A, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xE6, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xEF, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x90, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xAB, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xE5, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xE4, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xE3, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xEB, /* 0xC8-0xCB */
+	0xE6, 0xE9, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE6, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xE8, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xE7, 0xE6, 0xEA, /* 0xD8-0xDB */
+	0x00, 0x00, 0x8B, 0x97, 0x00, 0x00, 0xE6, 0xEE, /* 0xDC-0xDF */
+	0x00, 0x00, 0x90, 0xD5, 0x00, 0x00, 0xE6, 0xEF, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x8C, 0xD7, 0x00, 0x00, 0xE6, 0xEC, 0xE6, 0xED, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x48, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xB5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x91, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE6, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xF3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xE6, 0xF1, 0xE6, 0xF2, 0x97, 0x78, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xA5, /* 0x0C-0x0F */
+	0xE6, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xE6, 0xF4, 0xE6, 0xF5, 0xE6, 0xF7, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x48, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE6, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE6, 0xFB, 0xE6, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xF8, 0x00, 0x00, /* 0x40-0x43 */
+	0x92, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x40, /* 0x44-0x47 */
+	0xE7, 0x44, 0xE7, 0x41, 0xE6, 0xFC, 0x00, 0x00, /* 0x48-0x4B */
+	0xE7, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE7, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE7, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE7, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xD6, /* 0x5C-0x5F */
+	0xE7, 0x47, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x49, /* 0x60-0x63 */
+	0xE7, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x4C, 0x00, 0x00, /* 0x70-0x73 */
+	0x8F, 0x52, 0x00, 0x00, 0xE7, 0x4B, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE7, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE7, 0x4E, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE7, 0x51, 0xE7, 0x50, 0x00, 0x00, 0xE7, 0x4F, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x53, 0xE7, 0x52, /* 0x88-0x8B */
+	0x00, 0x00, 0x96, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE7, 0x55, 0x00, 0x00, 0xE7, 0x54, /* 0x90-0x93 */
+	0xE7, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xE7, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE7, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x58, 0x90, 0x67, /* 0xA8-0xAB */
+	0xE7, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xEB, /* 0xAC-0xAF */
+	0xE7, 0x5B, 0xE7, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x5E, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE7, 0x5F, 0xE7, 0x5C, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE7, 0x60, 0x00, 0x00, 0x8E, 0xD4, 0xE7, 0x61, /* 0xC8-0xCB */
+	0x8B, 0x4F, 0x8C, 0x52, 0x00, 0x00, 0xEE, 0x96, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0xAC, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x62, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xEE, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x5D, 0xE7, 0x63, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x66, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x8E, 0xB2, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x65, /* 0xF8-0xFB */
+	0xE7, 0x64, 0x8C, 0x79, 0xE7, 0x67, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x72, /* 0x00-0x03 */
+	0x00, 0x00, 0xE7, 0x69, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x8D, 0xDA, 0xE7, 0x68, 0x00, 0x00, /* 0x08-0x0B */
+	0xE7, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x6B, 0xE7, 0x6D, /* 0x10-0x13 */
+	0x95, 0xE3, 0xE7, 0x6A, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE7, 0x6C, 0x00, 0x00, 0xE7, 0x70, /* 0x18-0x1B */
+	0xE7, 0x6E, 0x8B, 0x50, 0x00, 0x00, 0xE7, 0x6F, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x72, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x94, 0x79, 0x97, 0xD6, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x53, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x73, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x97, 0x41, 0xE7, 0x75, 0x00, 0x00, 0xE7, 0x74, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x78, 0x97, 0x60, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x77, 0x00, 0x00, /* 0x40-0x43 */
+	0x8A, 0x8D, 0xE7, 0x76, 0xE7, 0x7B, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE7, 0x7A, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE7, 0x79, 0x93, 0x51, 0xE7, 0x7C, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x7D, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xE7, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x8C, /* 0x5C-0x5F */
+	0x00, 0x00, 0x8C, 0x44, 0xE7, 0x80, 0xE7, 0x81, /* 0x60-0x63 */
+	0xE7, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x68, /* 0x98-0x9B */
+	0xE7, 0x83, 0x00, 0x00, 0x8E, 0xAB, 0xE7, 0x84, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x85, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x9F, /* 0xA4-0xA7 */
+	0x99, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE7, 0x86, 0xE3, 0x90, 0xE7, 0x87, /* 0xAC-0xAF */
+	0x92, 0x43, 0x90, 0x4A, 0x94, 0x5F, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x88, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0xD3, 0x92, 0xD2, /* 0xB8-0xBB */
+	0x8D, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x92, 0x48, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x49, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x96, 0x98, 0x90, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x8C, 0x7D, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x8B, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x95, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x89, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x8B, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE7, 0x8A, 0x89, 0xDE, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x93, 0xF4, 0xE7, 0x8C, 0x94, 0x97, /* 0xE8-0xEB */
+	0x00, 0x00, 0x93, 0x52, 0x00, 0x00, 0xE7, 0x8D, /* 0xEC-0xEF */
+	0x8F, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE7, 0x8F, 0x00, 0x00, 0x00, 0x00, 0x96, 0xC0, /* 0xF4-0xF7 */
+	0xE7, 0x9E, 0xE7, 0x91, 0xE7, 0x92, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x92, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_90[512] = {
+	0x91, 0xDE, 0x91, 0x97, 0x00, 0x00, 0x93, 0xA6, /* 0x00-0x03 */
+	0x00, 0x00, 0xE7, 0x90, 0x8B, 0x74, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x99, /* 0x08-0x0B */
+	0x00, 0x00, 0xE7, 0x96, 0xE7, 0xA3, 0x93, 0xA7, /* 0x0C-0x0F */
+	0x92, 0x80, 0xE7, 0x93, 0x00, 0x00, 0x92, 0xFC, /* 0x10-0x13 */
+	0x93, 0x72, 0xE7, 0x94, 0xE7, 0x98, 0x90, 0x80, /* 0x14-0x17 */
+	0x00, 0x00, 0x94, 0x87, 0x92, 0xCA, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x90, 0xC0, 0xE7, 0x97, 0x91, 0xAC, /* 0x1C-0x1F */
+	0x91, 0xA2, 0xE7, 0x95, 0x88, 0xA7, 0x98, 0x41, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x9A, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0xDF, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x8F, 0x54, 0x90, 0x69, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE7, 0x9C, 0xE7, 0x9B, 0x00, 0x00, /* 0x34-0x37 */
+	0x88, 0xED, 0xE7, 0x9D, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x95, 0x4E, 0x00, 0x00, 0xE7, 0xA5, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x93, 0xD9, 0x90, 0x8B, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x92, 0x78, 0x00, 0x00, 0x8B, 0xF6, /* 0x44-0x47 */
+	0x00, 0x00, 0xE7, 0xA4, 0x97, 0x56, 0x89, 0x5E, /* 0x48-0x4B */
+	0x00, 0x00, 0x95, 0xD5, 0x89, 0xDF, 0xE7, 0x9F, /* 0x4C-0x4F */
+	0xE7, 0xA0, 0xE7, 0xA1, 0xE7, 0xA2, 0x93, 0xB9, /* 0x50-0x53 */
+	0x92, 0x42, 0x88, 0xE1, 0xE7, 0xA6, 0x00, 0x00, /* 0x54-0x57 */
+	0xE7, 0xA7, 0xEA, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x91, 0xBB, 0x00, 0x00, 0xE7, 0xA8, 0x00, 0x00, /* 0x5C-0x5F */
+	0x89, 0x93, 0x91, 0x6B, 0x00, 0x00, 0x8C, 0xAD, /* 0x60-0x63 */
+	0x00, 0x00, 0x97, 0x79, 0x00, 0x00, 0xEE, 0x99, /* 0x64-0x67 */
+	0xE7, 0xA9, 0x93, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x91, 0x98, 0x8E, 0xD5, 0xE7, 0xAA, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xAD, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x8F, 0x85, 0xE7, 0xAB, 0x91, 0x4A, /* 0x74-0x77 */
+	0x91, 0x49, 0x00, 0x00, 0x88, 0xE2, 0x00, 0x00, /* 0x78-0x7B */
+	0x97, 0xC9, 0xE7, 0xAF, 0x00, 0x00, 0x94, 0xF0, /* 0x7C-0x7F */
+	
+	0xE7, 0xB1, 0xE7, 0xB0, 0xE7, 0xAE, 0xE2, 0x84, /* 0x80-0x83 */
+	0x8A, 0xD2, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x8E, /* 0x84-0x87 */
+	0x00, 0x00, 0xE7, 0xB3, 0xE7, 0xB2, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xB4, /* 0x8C-0x8F */
+	0x00, 0x00, 0x97, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0xDF, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x4D, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE7, 0xB5, 0x00, 0x00, 0x8E, 0xD7, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xB6, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE7, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE7, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x93, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x88, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x8D, 0x78, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x59, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xBC, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x9A, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x8C, 0x53, 0xE7, 0xB9, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE7, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x95, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x8A, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x97, 0x58, 0x00, 0x00, 0x8B, 0xBD, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x93, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_91[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xBD, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xBE, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xEE, 0x9C, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE7, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x9D, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x93, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE7, 0xC1, 0x00, 0x00, 0xE7, 0xC0, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x93, 0xD1, 0xE7, 0xC2, 0x8F, 0x55, /* 0x48-0x4B */
+	0x8E, 0xDE, 0x94, 0x7A, 0x92, 0x91, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xF0, 0x00, 0x00, /* 0x50-0x53 */
+	0x90, 0x8C, 0x00, 0x00, 0xE7, 0xC3, 0x00, 0x00, /* 0x54-0x57 */
+	0xE7, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x90, 0x7C, 0xE7, 0xC5, /* 0x60-0x63 */
+	0x00, 0x00, 0xE7, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE7, 0xC7, 0x97, 0x8F, 0x00, 0x00, /* 0x68-0x6B */
+	0x8F, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xC9, 0xE7, 0xC8, /* 0x70-0x73 */
+	0x00, 0x00, 0x8D, 0x79, 0x00, 0x00, 0x8D, 0x93, /* 0x74-0x77 */
+	0x8E, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xCC, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x86, /* 0x84-0x87 */
+	0x00, 0x00, 0xE7, 0xCB, 0x00, 0x00, 0xE7, 0xCA, /* 0x88-0x8B */
+	0x00, 0x00, 0x91, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x8C, 0xED, 0x00, 0x00, 0x90, 0xC1, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xAE, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x8F, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xCD, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x8F, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xD0, 0xE7, 0xCE, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xCF, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE7, 0xD2, 0xE7, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x8F, 0xF8, 0x00, 0x00, 0xE7, 0xD3, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE7, 0xD4, 0xE7, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xCE, 0x8D, 0xD1, /* 0xC4-0xC7 */
+	0x8E, 0xDF, 0xE7, 0xD6, 0x00, 0x00, 0xE7, 0xD7, /* 0xC8-0xCB */
+	0x97, 0xA2, 0x8F, 0x64, 0x96, 0xEC, 0x97, 0xCA, /* 0xCC-0xCF */
+	0xE7, 0xD8, 0x8B, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xD9, 0xEE, 0x9F, /* 0xD4-0xD7 */
+	0x93, 0x42, 0x00, 0x00, 0xEE, 0x9E, 0xE7, 0xDC, /* 0xD8-0xDB */
+	0x8A, 0x98, 0x90, 0x6A, 0xEE, 0xA0, 0xE7, 0xDA, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE7, 0xDB, 0x00, 0x00, 0x92, 0xDE, /* 0xE0-0xE3 */
+	0xEE, 0xA3, 0xEE, 0xA4, 0x96, 0x74, 0x8B, 0xFA, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xEE, 0xA1, 0xEE, 0xA2, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE7, 0xDE, 0xE7, 0xDF, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE7, 0xDD, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xE1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_92[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xA5, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xA7, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x93, 0xDD, 0x8A, 0x62, 0x00, 0x00, /* 0x0C-0x0F */
+	0xEE, 0xA6, 0xE7, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xE7, 0xE2, 0xE7, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xE0, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE8, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xE7, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x97, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xD8, /* 0x34-0x37 */
+	0x00, 0x00, 0xEE, 0xAE, 0xEE, 0xA8, 0x00, 0x00, /* 0x38-0x3B */
+	0xEE, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xED, /* 0x3C-0x3F */
+	0xEE, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x93, 0x53, 0xE7, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xE7, 0xEB, 0xE7, 0xE9, 0x00, 0x00, 0xE7, 0xEE, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xAB, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE7, 0xEF, 0xEE, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xE7, /* 0x54-0x57 */
+	0x00, 0x00, 0xEE, 0xAC, 0xE7, 0xF4, 0x89, 0x94, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xE6, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xAB, 0x00, 0x00, /* 0x60-0x63 */
+	0xE7, 0xEA, 0x00, 0x00, 0x8F, 0xDE, 0xEE, 0xAF, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x8D, 0x7A, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xB1, /* 0x74-0x77 */
+	0xEE, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x67, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x8B, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x65, /* 0x80-0x83 */
+	0x00, 0x00, 0x93, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xED, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x91, 0x4C, 0x00, 0x00, 0xE7, 0xF2, /* 0x90-0x93 */
+	0x00, 0x00, 0xE7, 0xEC, 0xE7, 0xF1, 0x00, 0x00, /* 0x94-0x97 */
+	0x96, 0xC1, 0x00, 0x00, 0x92, 0xB6, 0xE7, 0xF3, /* 0x98-0x9B */
+	0xE7, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xB0, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x91, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF7, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE7, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF5, /* 0xCC-0xCF */
+	0xEE, 0xB6, 0x00, 0x00, 0x96, 0x4E, 0xEE, 0xBA, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xEE, 0xB8, 0x00, 0x00, 0xEE, 0xB4, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xEE, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xEE, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x8F, 0x9B, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xB3, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE7, 0xF8, 0x95, 0xDD, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x89, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x95, 0x65, 0x92, 0x92, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x8B, 0x98, 0xED, 0x49, 0xE7, 0xFA, 0xEE, 0xBD, /* 0xF8-0xFB */
+	0x8D, 0x7C, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xC0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_93[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xC2, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0x4B, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF9, /* 0x0C-0x0F */
+	0x90, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x90, 0x8E, 0xE8, 0x40, 0xE8, 0x42, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xEE, 0xC1, 0xEE, 0xBF, 0x00, 0x00, /* 0x1C-0x1F */
+	0x8F, 0xF9, 0xEE, 0xBC, 0xE8, 0x41, 0xE8, 0x43, /* 0x20-0x23 */
+	0x00, 0x00, 0xEE, 0xBB, 0x8B, 0xD1, 0x00, 0x00, /* 0x24-0x27 */
+	0x95, 0x64, 0x00, 0x00, 0x00, 0x00, 0x8E, 0xE0, /* 0x28-0x2B */
+	0x98, 0x42, 0x00, 0x00, 0xE7, 0xFC, 0x8D, 0xF6, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x5E, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE8, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x44, 0xE8, 0x46, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE7, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xED, 0x42, 0x00, 0x00, 0x00, 0x00, 0x93, 0xE7, /* 0x48-0x4B */
+	0x00, 0x00, 0x93, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x92, 0xD5, 0x00, 0x00, 0xE8, 0x4B, 0xEE, 0xC4, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x62, /* 0x58-0x5B */
+	0xE8, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE8, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x8C, 0x4C, 0x00, 0x00, 0xE8, 0x4A, 0x00, 0x00, /* 0x6C-0x6F */
+	0xEE, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x8C, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE8, 0x49, 0x00, 0x00, 0x8F, 0xDF, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x8A, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE8, 0x4F, 0x00, 0x00, 0x8D, 0xBD, 0x91, 0x99, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x92, 0xC8, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xEE, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x8A, 0x5A, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE8, 0x4D, 0xE8, 0x4E, 0x92, 0xC1, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE8, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE8, 0x50, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x56, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xC6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE8, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xE8, 0x58, 0x93, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x51, 0xE8, 0x52, /* 0xD4-0xD7 */
+	0xE8, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE8, 0x57, 0xEE, 0xC7, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x8B, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE8, 0x5A, 0xE8, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE8, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xEE, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_94[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x5E, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x5F, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE8, 0x60, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x5D, /* 0x10-0x13 */
+	0xE8, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x8F, 0xE0, 0x93, 0xA8, 0xE8, 0x5B, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE8, 0x64, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x62, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xEE, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE8, 0x63, 0xE8, 0x61, 0x00, 0x00, /* 0x34-0x37 */
+	0x91, 0xF6, 0x00, 0x00, 0xE8, 0x65, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE8, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE8, 0x68, 0xEE, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xEE, 0xCB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x8A, 0xD3, 0xE8, 0x67, 0x96, 0xF8, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x73, 0xE8, 0x69, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x6C, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE8, 0x6A, 0x00, 0x00, 0xE8, 0x6B, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x6D, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE8, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xE8, 0x70, 0x00, 0x00, 0xE8, 0x71, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE8, 0x74, 0xE8, 0x72, 0xE8, 0x75, 0xE8, 0x77, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE8, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+};
+
+static unsigned char u2c_95[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0xB7, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x96, 0xE5, 0x00, 0x00, 0xE8, 0x78, 0x91, 0x4D, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x79, /* 0x84-0x87 */
+	0x00, 0x00, 0x95, 0xC2, 0xE8, 0x7A, 0x8A, 0x4A, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x5B, /* 0x8C-0x8F */
+	0x00, 0x00, 0x8A, 0xD5, 0xEE, 0xCC, 0x8A, 0xD4, /* 0x90-0x93 */
+	0xE8, 0x7B, 0x00, 0x00, 0xE8, 0x7C, 0x00, 0x00, /* 0x94-0x97 */
+	0xE8, 0x7D, 0xE8, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE8, 0x80, 0x00, 0x00, 0x8A, 0xD6, 0x8A, 0x74, /* 0xA0-0xA3 */
+	0x8D, 0x7D, 0x94, 0xB4, 0x00, 0x00, 0xE8, 0x82, /* 0xA4-0xA7 */
+	0xE8, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE8, 0x83, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x7B, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE8, 0x86, 0x00, 0x00, 0xE8, 0x85, /* 0xB8-0xBB */
+	0xE8, 0x84, 0x00, 0x00, 0xE8, 0x87, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x8A, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xC5, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x88, 0x00, 0x00, /* 0xC8-0xCB */
+	0xE8, 0x8C, 0xE8, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE8, 0x8E, 0xE8, 0x8D, 0xE8, 0x8F, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x93, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE8, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE8, 0x91, 0xE8, 0x93, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE8, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char u2c_96[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x95, 0x8C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE8, 0x94, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xE8, 0x95, 0x00, 0x00, 0x8D, 0xE3, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0x96, 0xE8, 0x97, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x68, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x6A, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xA2, /* 0x3C-0x3F */
+	0x91, 0xC9, 0x00, 0x00, 0xE8, 0x98, 0x00, 0x00, /* 0x40-0x43 */
+	0x95, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x9B, /* 0x48-0x4B */
+	0xE8, 0x99, 0x8D, 0x7E, 0x00, 0x00, 0xE8, 0x9A, /* 0x4C-0x4F */
+	0x8C, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xC3, /* 0x58-0x5B */
+	0xE8, 0x9D, 0xE8, 0x9F, 0xE8, 0x9E, 0xE8, 0xA0, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x40, 0x90, 0x77, /* 0x60-0x63 */
+	0x8F, 0x9C, 0x8A, 0xD7, 0xE8, 0xA1, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x86, 0x00, 0x00, /* 0x68-0x6B */
+	0xE8, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x89, 0x41, 0x00, 0x00, 0xE8, 0xA2, 0x92, 0xC2, /* 0x70-0x73 */
+	0x00, 0x00, 0x97, 0xCB, 0x93, 0xA9, 0xE8, 0x9C, /* 0x74-0x77 */
+	0x97, 0xA4, 0x00, 0x00, 0x8C, 0xAF, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x97, 0x7A, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x8B, 0xF7, 0x97, 0xB2, 0x00, 0x00, /* 0x84-0x87 */
+	0x8C, 0x47, 0x00, 0x00, 0x91, 0xE0, 0xE4, 0x40, /* 0x88-0x8B */
+	0x00, 0x00, 0xE8, 0xA4, 0x8A, 0x4B, 0x90, 0x8F, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x8A, 0x75, 0xE8, 0xA6, 0x00, 0x00, 0xE8, 0xA7, /* 0x94-0x97 */
+	0xE8, 0xA5, 0x8C, 0x84, 0x00, 0x00, 0x8D, 0xDB, /* 0x98-0x9B */
+	0x8F, 0xE1, 0xEE, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x89, 0x42, 0x00, 0x00, 0x00, 0x00, 0x97, 0xD7, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xA9, /* 0xA4-0xA7 */
+	0xE7, 0xAC, 0x00, 0x00, 0xE8, 0xA8, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xD0, /* 0xAC-0xAF */
+	0xE8, 0xAC, 0xE8, 0xAA, 0xE8, 0xAB, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE8, 0xAD, 0x00, 0x00, 0xE8, 0xAE, 0x97, 0xEA, /* 0xB4-0xB7 */
+	0xE8, 0xAF, 0xE8, 0xB0, 0x00, 0x00, 0x90, 0xC7, /* 0xB8-0xBB */
+	0x94, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x90, 0x9D, 0x8A, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x97, 0x59, 0x89, 0xEB, 0x8F, 0x57, 0x8C, 0xD9, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE8, 0xB3, 0x00, 0x00, 0xE8, 0xB2, /* 0xC8-0xCB */
+	0x8E, 0x93, 0xE8, 0xB4, 0xE8, 0xB1, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x8E, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE8, 0xB8, 0xE5, 0xAB, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x99, 0xD4, 0x00, 0x00, 0x90, 0x97, /* 0xD8-0xDB */
+	0xE8, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xA3, 0x93, 0xEF, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x89, 0x4A, 0x00, 0x00, 0x90, 0xE1, 0x8E, 0xB4, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x95, 0xB5, 0x00, 0x00, 0x89, 0x5F, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xEB, 0x97, 0x8B, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE8, 0xB9, 0x00, 0x00, 0x93, 0x64, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_97[512] = {
+	0x8E, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE8, 0xBA, 0x00, 0x00, 0xE8, 0xBB, 0x90, 0x6B, /* 0x04-0x07 */
+	0xE8, 0xBC, 0x00, 0x00, 0x97, 0xEC, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE8, 0xB7, 0xE8, 0xBE, 0xE8, 0xC0, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE8, 0xBF, 0x00, 0x00, 0xE8, 0xBD, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xC1, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE8, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x91, 0x9A, 0x00, 0x00, 0x89, 0xE0, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE8, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x96, 0xB6, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xC4, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE8, 0xC5, 0x00, 0x00, 0x98, 0x49, 0xEE, 0xD1, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x9E, 0x50, 0xE8, 0xC6, 0x00, 0x00, 0xEE, 0xD2, /* 0x38-0x3B */
+	0x00, 0x00, 0xE8, 0xC7, 0xE8, 0xC8, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xCC, 0xEE, 0xD3, /* 0x40-0x43 */
+	0xE8, 0xC9, 0x00, 0x00, 0xE8, 0xCA, 0x00, 0x00, /* 0x44-0x47 */
+	0xE8, 0xCB, 0xE8, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xEE, 0xD4, 0x00, 0x00, 0xEE, 0xD5, /* 0x4C-0x4F */
+	0x00, 0x00, 0xEE, 0xD6, 0x90, 0xC2, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xEE, 0xD7, 0x96, 0xF5, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x90, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xE8, 0xCE, 0x00, 0x00, 0x94, 0xF1, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE8, 0xCF, 0xEA, 0x72, 0x96, 0xCA, 0x00, 0x00, /* 0x60-0x63 */
+	0xE8, 0xD0, 0x00, 0x00, 0xE8, 0xD1, 0x00, 0x00, /* 0x64-0x67 */
+	0xE8, 0xD2, 0x8A, 0x76, 0x00, 0x00, 0xE8, 0xD4, /* 0x68-0x6B */
+	0x00, 0x00, 0x90, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xE8, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x8C, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE8, 0xD6, 0xE8, 0xDA, 0x00, 0x00, /* 0x78-0x7B */
+	0xE8, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE8, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x8A, 0x93, 0xE8, 0xD7, 0xE8, 0xDB, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xDC, /* 0x88-0x8B */
+	0x00, 0x00, 0x88, 0xC6, 0x00, 0x00, 0xE8, 0xDD, /* 0x8C-0x8F */
+	0xE8, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x8F, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xE8, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x8B, 0x66, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xE2, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xE1, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE8, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x91, /* 0xA8-0xAB */
+	0x00, 0x00, 0x95, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xE3, /* 0xB0-0xB3 */
+	0xE8, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xE5, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xE6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE8, 0xE7, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xE8, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xD8, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE8, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xE8, 0xEA, 0x94, 0x42, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xEC, 0x89, 0xB9, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE8, 0xEF, 0xE8, 0xEE, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x43, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0xBF, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_98[512] = {
+	0x00, 0x00, 0x95, 0xC5, 0x92, 0xB8, 0x8D, 0xA0, /* 0x00-0x03 */
+	0x00, 0x00, 0x8D, 0x80, 0x8F, 0x87, 0x00, 0x00, /* 0x04-0x07 */
+	0x90, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xE8, 0xF1, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xF0, /* 0x0C-0x0F */
+	0x97, 0x61, 0x8A, 0xE6, 0x94, 0xD0, 0x93, 0xDA, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x9C, /* 0x14-0x17 */
+	0x97, 0xCC, 0x00, 0x00, 0x8C, 0x7A, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE8, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE8, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x96, 0x6A, 0x93, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x89, 0x6F, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xF5, /* 0x34-0x37 */
+	0xE8, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x95, 0x70, /* 0x38-0x3B */
+	0x97, 0x8A, 0xE8, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xF7, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xF9, /* 0x48-0x4B */
+	0x91, 0xE8, 0x8A, 0x7A, 0x8A, 0x7B, 0xE8, 0xF8, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x8A, 0xE7, 0x8C, 0xB0, 0x00, 0x00, 0xEE, 0xD8, /* 0x54-0x57 */
+	0x8A, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x93, 0x5E, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x97, 0xDE, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xEE, 0xD9, 0x00, 0x00, 0x8C, 0xDA, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFA, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xFB, /* 0x6C-0x6F */
+	0xE8, 0xFC, 0xE9, 0x40, 0x00, 0x00, 0xE9, 0x42, /* 0x70-0x73 */
+	0xE9, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x95, 0x97, 0x00, 0x00, 0xE9, 0x43, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x44, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE9, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x46, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x48, /* 0xC0-0xC3 */
+	0xE9, 0x47, 0x00, 0x00, 0xE9, 0x49, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xF2, /* 0xD8-0xDB */
+	0xE3, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x90, 0x48, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x51, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE9, 0x4A, 0x00, 0x00, 0xE9, 0x4B, /* 0xE8-0xEB */
+	0x00, 0x00, 0x99, 0xAA, 0x9F, 0x5A, 0x94, 0xD1, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0xF9, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x88, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x8E, 0x94, 0x96, 0x4F, 0x8F, 0xFC, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_99[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x4C, /* 0x00-0x03 */
+	0x00, 0x00, 0x96, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE9, 0x4D, 0x97, 0x7B, 0x00, 0x00, /* 0x08-0x0B */
+	0x89, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x8E, 0x60, 0x00, 0x00, 0xE9, 0x4E, 0x89, 0xEC, /* 0x10-0x13 */
+	0xE9, 0x4F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE9, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xE9, 0x52, 0xE9, 0x53, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE9, 0x55, 0xE9, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE9, 0x54, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xDC, /* 0x24-0x27 */
+	0x8A, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xE9, 0x56, 0x00, 0x00, 0xE9, 0x57, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xE9, 0x58, 0xE9, 0x59, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x5A, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE9, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE9, 0x5B, 0x00, 0x00, 0xE9, 0x5E, /* 0x48-0x4B */
+	0xE9, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE9, 0x5D, 0xE9, 0x5F, 0xE9, 0x60, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE9, 0x62, 0x00, 0x00, 0x8B, 0xC0, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x8E, 0xF1, 0xE9, 0x63, /* 0x94-0x97 */
+	0xE9, 0x64, 0x8D, 0x81, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xDE, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xE9, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x8A, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x94, 0x6E, 0xE9, 0x66, 0xE9, 0x67, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x79, /* 0xB0-0xB3 */
+	0x93, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE9, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x94, 0x9D, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x91, 0xCA, 0x89, 0x77, 0x8B, 0xEC, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x8B, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x92, 0x93, 0xE9, 0x6D, 0x8B, 0xEE, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x89, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE9, 0x6C, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x6A, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE9, 0x6B, 0x00, 0x00, 0xE9, 0x69, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x77, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xE9, 0x6E, 0xE9, 0x6F, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE9, 0x70, 0xE9, 0x71, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xE9, 0x73, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x72, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8F, 0x78, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9A[512] = {
+	0x00, 0x00, 0xE9, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE9, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0x52, 0xE9, 0x75, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x91, 0x9B, 0x8C, 0xB1, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE9, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x91, 0xCB, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x79, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x93, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x7A, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x80, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE9, 0x7D, 0x00, 0x00, 0xE9, 0x7C, 0xE9, 0x7E, /* 0x40-0x43 */
+	0x00, 0x00, 0xE9, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xE9, 0x82, 0xEE, 0xDF, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE9, 0x81, 0x00, 0x00, 0xE9, 0x84, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x8B, 0xC1, 0xE9, 0x83, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x85, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x86, 0x00, 0x00, /* 0x60-0x63 */
+	0xE9, 0x88, 0xE9, 0x87, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE9, 0x89, 0xE9, 0x8B, 0xE9, 0x8A, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x8D, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE9, 0x8C, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE9, 0x8D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x8A, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE9, 0x8E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE9, 0x8F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x90, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x90, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE9, 0x91, 0x00, 0x00, 0xE9, 0x92, /* 0xD0-0xD3 */
+	0xE9, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x8D, 0x82, 0xEE, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xEE, 0xE1, 0x00, 0x00, 0xE9, 0x94, 0xE9, 0x95, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x96, 0xE9, 0x97, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x98, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x94, 0xAF, 0xE9, 0x9A, /* 0xE8-0xEB */
+	0x00, 0x00, 0x95, 0x45, 0xE9, 0x9B, 0xE9, 0x99, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE9, 0x9D, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE9, 0x9C, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x9E, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x9F, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_9B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xA0, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE9, 0xA1, 0x00, 0x00, 0xE9, 0xA2, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xA3, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xA4, 0xE9, 0xA5, /* 0x20-0x23 */
+	0x00, 0x00, 0xE9, 0xA6, 0x00, 0x00, 0xE9, 0xA7, /* 0x24-0x27 */
+	0xE9, 0xA8, 0xE9, 0xA9, 0xE9, 0xAA, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xAB, 0xE9, 0xAC, /* 0x2C-0x2F */
+	0x00, 0x00, 0x9F, 0x54, 0xE9, 0xAD, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xF6, /* 0x38-0x3B */
+	0x8B, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x8A, 0x40, 0x8D, 0xB0, 0xE9, 0xAF, /* 0x40-0x43 */
+	0xE9, 0xAE, 0x96, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xE9, 0xB1, 0xE9, 0xB2, 0xE9, 0xB0, /* 0x4C-0x4F */
+	0x00, 0x00, 0xE9, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x96, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE9, 0xB4, 0x00, 0x00, 0x8B, 0x9B, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x44, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xE3, 0x00, 0x00, /* 0x70-0x73 */
+	0xE9, 0xB5, 0xEE, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xB7, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0xBC, 0xEE, 0xE4, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE9, 0xB8, 0x95, 0xA9, 0xE9, 0xB6, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xB9, 0xE9, 0xBA, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xBB, /* 0x9C-0x9F */
+	0xE9, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE9, 0xBD, 0x00, 0x00, 0x96, 0x8E, 0x8E, 0x4C, /* 0xA8-0xAB */
+	0x00, 0x00, 0x8D, 0xF8, 0x91, 0x4E, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xEE, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE9, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE9, 0xC1, 0x00, 0x00, 0xEE, 0xE6, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE9, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xC2, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x8C, 0xEF, 0xE9, 0xC0, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xC3, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE9, 0xC4, 0xE9, 0xC5, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE9, 0xC9, 0x00, 0x00, 0x8E, 0x49, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xE2, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE9, 0xCA, 0xE9, 0xC7, 0xE9, 0xC6, /* 0xE0-0xE3 */
+	0xE9, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x8C, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE9, 0xCE, 0xE9, 0xCD, 0xE9, 0xCC, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x88, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_9C[512] = {
+	0xEE, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE9, 0xD8, 0x00, 0x00, 0xE9, 0xD4, 0x00, 0x00, /* 0x04-0x07 */
+	0xE9, 0xD5, 0xE9, 0xD1, 0xE9, 0xD7, 0x00, 0x00, /* 0x08-0x0B */
+	0xE9, 0xD3, 0x8A, 0x82, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x98, 0x6B, 0x00, 0x00, 0xE9, 0xD6, 0xE9, 0xD2, /* 0x10-0x13 */
+	0xE9, 0xD0, 0xE9, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xDA, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xE9, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE9, 0xDC, 0xE9, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x95, 0x68, 0xE9, 0xD9, 0x88, 0xF1, /* 0x2C-0x2F */
+	0xE9, 0xDE, 0x00, 0x00, 0xE9, 0xE0, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x8A, 0x8F, 0xE9, 0xCB, 0x89, 0x56, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE2, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE1, 0xE9, 0xDF, /* 0x44-0x47 */
+	0x92, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x96, 0x90, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xD8, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE3, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE9, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xE5, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE6, 0x00, 0x00, /* 0x74-0x77 */
+	0xE9, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x92, 0xB9, 0x00, 0x00, 0xE9, 0xE8, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x94, 0xB5, 0x00, 0x00, 0xE9, 0xED, /* 0xE8-0xEB */
+	0xE9, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE9, 0xEA, 0x00, 0x00, 0x00, 0x00, 0x96, 0x50, /* 0xF0-0xF3 */
+	0x96, 0xC2, 0x00, 0x00, 0x93, 0xCE, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_9D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xEE, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xEF, 0x93, 0xBC, /* 0x04-0x07 */
+	0xE9, 0xEC, 0xE9, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0xA8, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF7, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE9, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x95, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xF4, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xF3, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF1, 0x00, 0x00, /* 0x24-0x27 */
+	0x8A, 0x9B, 0x00, 0x00, 0xE9, 0xF0, 0x8E, 0xB0, /* 0x28-0x2B */
+	0x89, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8D, 0x83, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xFA, 0xE9, 0xF9, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE9, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE9, 0xF5, 0x00, 0x00, 0xE9, 0xFB, 0x00, 0x00, /* 0x44-0x47 */
+	0xE9, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xEA, 0x44, 0xEA, 0x43, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xEA, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x89, 0x4C, 0xEA, 0x40, 0xEA, 0x41, 0x00, 0x00, /* 0x5C-0x5F */
+	0x8D, 0x94, 0x96, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xEA, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xE9, /* 0x68-0x6B */
+	0x96, 0x51, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x4A, /* 0x6C-0x6F */
+	0xEE, 0xE8, 0x00, 0x00, 0xEA, 0x46, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x4B, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x48, /* 0x84-0x87 */
+	0x00, 0x00, 0xEA, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x7B, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x4C, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xEA, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xEA, 0x4E, 0x00, 0x00, 0xEA, 0x49, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xF2, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x4F, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x92, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xEA, 0x53, 0x00, 0x00, 0xEA, 0x54, 0xEA, 0x52, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xEA, 0x51, 0xEA, 0x57, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xEA, 0x50, 0x00, 0x00, 0xEA, 0x55, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x56, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x59, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xEA, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x5B, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xEA, 0x5C, 0x00, 0x00, 0xEA, 0x5D, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x68, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xEA, 0x5A, 0x91, 0xE9, 0x8D, 0xEB, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xEA, 0x5E, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xEE, 0xEB, 0xEA, 0x5F, 0xEA, 0x60, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x61, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xEA, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x8C, 0xB2, 0xEA, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xEA, 0x64, 0x00, 0x00, 0x8E, 0xAD, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xEA, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xEA, 0x66, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x67, /* 0x88-0x8B */
+	0xEA, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xEA, 0x6B, 0xEA, 0x69, 0x98, 0x5B, /* 0x90-0x93 */
+	0x00, 0x00, 0xEA, 0x6A, 0x00, 0x00, 0x97, 0xED, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xEA, 0x6C, 0x00, 0x00, 0x97, 0xD9, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xEA, 0x6D, 0x94, 0x9E, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xEA, 0x6E, 0xEA, 0x70, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xEA, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xEA, 0x6F, 0x8D, 0x8D, 0x96, 0xCB, 0x96, 0x83, /* 0xB8-0xBB */
+	0x9B, 0xF5, 0x00, 0x00, 0x9F, 0x80, 0x96, 0x9B, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x89, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xEA, 0x73, 0x8B, 0x6F, 0xEA, 0x74, 0xEA, 0x75, /* 0xCC-0xCF */
+	0xEA, 0x76, 0xEE, 0xEC, 0x8D, 0x95, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xEA, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xE0, 0xD2, 0x96, 0xD9, 0x00, 0x00, 0x91, 0xE1, /* 0xD8-0xDB */
+	0xEA, 0x78, 0xEA, 0x7A, 0xEA, 0x79, 0x00, 0x00, /* 0xDC-0xDF */
+	0xEA, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xEA, 0x7C, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xEA, 0x7D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x7E, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEA, 0x80, 0x00, 0x00, 0xEA, 0x81, 0xEA, 0x82, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xEA, 0x83, 0x00, 0x00, 0xEA, 0x84, /* 0xF8-0xFB */
+	0xEA, 0x85, 0xEA, 0x86, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x87, /* 0x04-0x07 */
+	0xEA, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x93, 0x43, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8C, 0xDB, /* 0x10-0x13 */
+	0x00, 0x00, 0xEA, 0x8A, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x91, 0x6C, 0xEA, 0x8B, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xEA, 0x8C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x40, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x8D, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x8E, 0xE2, 0x56, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xD8, 0xE8, 0xEB, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x8F, 0x00, 0x00, /* 0x50-0x53 */
+	0xEA, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x92, /* 0x5C-0x5F */
+	0xEA, 0x93, 0xEA, 0x94, 0x97, 0xEE, 0xEA, 0x91, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x95, 0xEA, 0x96, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x98, 0x00, 0x00, /* 0x68-0x6B */
+	0xEA, 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x9A, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0x9B, 0xEA, 0x99, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x97, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xEA, 0x9C, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xEA, 0x9D, 0xE2, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xEA, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+};
+
+static unsigned char u2c_DC[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+};
+
+static unsigned char u2c_F9[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xED, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xEE, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+};
+
+static unsigned char u2c_FA[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x73, 0xED, 0x7E, /* 0x0C-0x0F */
+	0xED, 0x80, 0xED, 0x95, 0xED, 0xBC, 0xED, 0xCC, /* 0x10-0x13 */
+	0xED, 0xCE, 0xED, 0xF9, 0xEE, 0x42, 0xEE, 0x59, /* 0x14-0x17 */
+	0xEE, 0x61, 0xEE, 0x62, 0xEE, 0x63, 0xEE, 0x65, /* 0x18-0x1B */
+	0xEE, 0x69, 0xEE, 0x6C, 0xEE, 0x75, 0xEE, 0x81, /* 0x1C-0x1F */
+	0xEE, 0x83, 0xEE, 0x84, 0xEE, 0x8D, 0xEE, 0x95, /* 0x20-0x23 */
+	0xEE, 0x97, 0xEE, 0x98, 0xEE, 0x9B, 0xEE, 0xB7, /* 0x24-0x27 */
+	0xEE, 0xBE, 0xEE, 0xCE, 0xEE, 0xDA, 0xEE, 0xDB, /* 0x28-0x2B */
+	0xEE, 0xDD, 0xEE, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+};
+
+static unsigned char u2c_FF[512] = {
+	0x00, 0x00, 0x81, 0x49, 0xEE, 0xFC, 0x81, 0x94, /* 0x00-0x03 */
+	0x81, 0x90, 0x81, 0x93, 0x81, 0x95, 0xEE, 0xFB, /* 0x04-0x07 */
+	0x81, 0x69, 0x81, 0x6A, 0x81, 0x96, 0x81, 0x7B, /* 0x08-0x0B */
+	0x81, 0x43, 0x81, 0x7C, 0x81, 0x44, 0x81, 0x5E, /* 0x0C-0x0F */
+	0x82, 0x4F, 0x82, 0x50, 0x82, 0x51, 0x82, 0x52, /* 0x10-0x13 */
+	0x82, 0x53, 0x82, 0x54, 0x82, 0x55, 0x82, 0x56, /* 0x14-0x17 */
+	0x82, 0x57, 0x82, 0x58, 0x81, 0x46, 0x81, 0x47, /* 0x18-0x1B */
+	0x81, 0x83, 0x81, 0x81, 0x81, 0x84, 0x81, 0x48, /* 0x1C-0x1F */
+	0x81, 0x97, 0x82, 0x60, 0x82, 0x61, 0x82, 0x62, /* 0x20-0x23 */
+	0x82, 0x63, 0x82, 0x64, 0x82, 0x65, 0x82, 0x66, /* 0x24-0x27 */
+	0x82, 0x67, 0x82, 0x68, 0x82, 0x69, 0x82, 0x6A, /* 0x28-0x2B */
+	0x82, 0x6B, 0x82, 0x6C, 0x82, 0x6D, 0x82, 0x6E, /* 0x2C-0x2F */
+	0x82, 0x6F, 0x82, 0x70, 0x82, 0x71, 0x82, 0x72, /* 0x30-0x33 */
+	0x82, 0x73, 0x82, 0x74, 0x82, 0x75, 0x82, 0x76, /* 0x34-0x37 */
+	0x82, 0x77, 0x82, 0x78, 0x82, 0x79, 0x81, 0x6D, /* 0x38-0x3B */
+	0x81, 0x5F, 0x81, 0x6E, 0x81, 0x4F, 0x81, 0x51, /* 0x3C-0x3F */
+	0x81, 0x4D, 0x82, 0x81, 0x82, 0x82, 0x82, 0x83, /* 0x40-0x43 */
+	0x82, 0x84, 0x82, 0x85, 0x82, 0x86, 0x82, 0x87, /* 0x44-0x47 */
+	0x82, 0x88, 0x82, 0x89, 0x82, 0x8A, 0x82, 0x8B, /* 0x48-0x4B */
+	0x82, 0x8C, 0x82, 0x8D, 0x82, 0x8E, 0x82, 0x8F, /* 0x4C-0x4F */
+	0x82, 0x90, 0x82, 0x91, 0x82, 0x92, 0x82, 0x93, /* 0x50-0x53 */
+	0x82, 0x94, 0x82, 0x95, 0x82, 0x96, 0x82, 0x97, /* 0x54-0x57 */
+	0x82, 0x98, 0x82, 0x99, 0x82, 0x9A, 0x81, 0x6F, /* 0x58-0x5B */
+	0x81, 0x62, 0x81, 0x70, 0x81, 0x60, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, /* 0x60-0x63 */
+	0x00, 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, /* 0x64-0x67 */
+	0x00, 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, /* 0x68-0x6B */
+	0x00, 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, /* 0x6C-0x6F */
+	0x00, 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, /* 0x70-0x73 */
+	0x00, 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, /* 0x74-0x77 */
+	0x00, 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, /* 0x78-0x7B */
+	0x00, 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, /* 0x7C-0x7F */
+	
+	0x00, 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, /* 0x80-0x83 */
+	0x00, 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, /* 0x84-0x87 */
+	0x00, 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, /* 0x88-0x8B */
+	0x00, 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, /* 0x8C-0x8F */
+	0x00, 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, /* 0x90-0x93 */
+	0x00, 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, /* 0x94-0x97 */
+	0x00, 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, /* 0x98-0x9B */
+	0x00, 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x81, 0x91, 0x81, 0x92, 0x81, 0xCA, 0x81, 0x50, /* 0xE0-0xE3 */
+	0xEE, 0xFA, 0x81, 0x8F, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	NULL,   NULL,   NULL,   u2c_03, u2c_04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_20, u2c_21, u2c_22, u2c_23, u2c_24, u2c_25, u2c_26, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_30, NULL,   u2c_32, u2c_33, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   u2c_4E, u2c_4F, 
+	u2c_50, u2c_51, u2c_52, u2c_53, u2c_54, u2c_55, u2c_56, u2c_57, 
+	u2c_58, u2c_59, u2c_5A, u2c_5B, u2c_5C, u2c_5D, u2c_5E, u2c_5F, 
+	u2c_60, u2c_61, u2c_62, u2c_63, u2c_64, u2c_65, u2c_66, u2c_67, 
+	u2c_68, u2c_69, u2c_6A, u2c_6B, u2c_6C, u2c_6D, u2c_6E, u2c_6F, 
+	u2c_70, u2c_71, u2c_72, u2c_73, u2c_74, u2c_75, u2c_76, u2c_77, 
+	u2c_78, u2c_79, u2c_7A, u2c_7B, u2c_7C, u2c_7D, u2c_7E, u2c_7F, 
+	u2c_80, u2c_81, u2c_82, u2c_83, u2c_84, u2c_85, u2c_86, u2c_87, 
+	u2c_88, u2c_89, u2c_8A, u2c_8B, u2c_8C, u2c_8D, u2c_8E, u2c_8F, 
+	u2c_90, u2c_91, u2c_92, u2c_93, u2c_94, u2c_95, u2c_96, u2c_97, 
+	u2c_98, u2c_99, u2c_9A, u2c_9B, u2c_9C, u2c_9D, u2c_9E, u2c_9F, 
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   u2c_DC, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   u2c_F9, u2c_FA, NULL,   NULL,   NULL,   NULL,   u2c_FF, };
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(const wchar_t uni,
+		    unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni&0xFF;
+	unsigned char ch = (uni>>8)&0xFF;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if (ch == 0xFF && 0x61 <= cl && cl <= 0x9F) {
+		out[0] = cl + 0x40;
+		return 1;
+	}
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset) {
+		if (boundlen < 2)
+			return -ENAMETOOLONG;
+
+		out[0] = uni2charset[cl*2];
+		out[1] = uni2charset[cl*2+1];
+		if (out[0] == 0x00 && out[1] == 0x00)
+			return -EINVAL;
+		return 2;
+	} else if (ch == 0) {
+		if (cl <= 0x7F) {
+			out[0] = cl;
+			return 1;
+		} else if (0xA0 <= cl) {
+			out[0] = u2c_00hi[cl - 0xA0][0];
+			out[1] = u2c_00hi[cl - 0xA0][1];
+			if (out[0] && out[1])
+				return 2;
+		}
+		return -EINVAL;
+	}
+	else
+		return -EINVAL;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+		    wchar_t *uni)
+{
+	unsigned char ch, cl;
+	wchar_t *charset2uni;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if (rawstring[0] <= 0x7F) {
+		*uni = rawstring[0];
+		return 1;
+	}
+	if (0xA1 <= rawstring[0] && rawstring[0] <= 0xDF) {
+		*uni = 0xFF00 | (rawstring[0] - 0x40);
+		return 1;
+	}
+
+	if (boundlen < 2)
+		return -ENAMETOOLONG;
+	ch = rawstring[0];
+	cl = rawstring[1];
+	charset2uni = page_charset2uni[ch];
+	if (charset2uni && cl) {
+		*uni = charset2uni[cl];
+		if (*uni == 0x0000)
+			return -EINVAL;
+		return 2;
+	}
+	else
+		return -EINVAL;
+}
+
+static struct nls_table table = {
+	.charset	= "cp932",
+	.alias		= "sjis",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp932(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp932(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp932)
+module_exit(exit_nls_cp932)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(sjis);
diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
new file mode 100644
index 0000000..ef4cef4
--- /dev/null
+++ b/fs/nls/nls_cp936.c
@@ -0,0 +1,11019 @@
+/*
+ * linux/fs/nls_cp936.c
+ *
+ * Charset cp936 translation tables.
+ * This translation table was generated automatically, the
+ * original table can be download from the Microsoft website.
+ * (http://www.microsoft.com/typography/unicode/unicodecp.htm)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t c2u_81[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4E02,0x4E04,0x4E05,0x4E06,0x4E0F,0x4E12,0x4E17,0x4E1F,/* 0x40-0x47 */
+	0x4E20,0x4E21,0x4E23,0x4E26,0x4E29,0x4E2E,0x4E2F,0x4E31,/* 0x48-0x4F */
+	0x4E33,0x4E35,0x4E37,0x4E3C,0x4E40,0x4E41,0x4E42,0x4E44,/* 0x50-0x57 */
+	0x4E46,0x4E4A,0x4E51,0x4E55,0x4E57,0x4E5A,0x4E5B,0x4E62,/* 0x58-0x5F */
+	0x4E63,0x4E64,0x4E65,0x4E67,0x4E68,0x4E6A,0x4E6B,0x4E6C,/* 0x60-0x67 */
+	0x4E6D,0x4E6E,0x4E6F,0x4E72,0x4E74,0x4E75,0x4E76,0x4E77,/* 0x68-0x6F */
+	0x4E78,0x4E79,0x4E7A,0x4E7B,0x4E7C,0x4E7D,0x4E7F,0x4E80,/* 0x70-0x77 */
+	0x4E81,0x4E82,0x4E83,0x4E84,0x4E85,0x4E87,0x4E8A,0x0000,/* 0x78-0x7F */
+
+	0x4E90,0x4E96,0x4E97,0x4E99,0x4E9C,0x4E9D,0x4E9E,0x4EA3,/* 0x80-0x87 */
+	0x4EAA,0x4EAF,0x4EB0,0x4EB1,0x4EB4,0x4EB6,0x4EB7,0x4EB8,/* 0x88-0x8F */
+	0x4EB9,0x4EBC,0x4EBD,0x4EBE,0x4EC8,0x4ECC,0x4ECF,0x4ED0,/* 0x90-0x97 */
+	0x4ED2,0x4EDA,0x4EDB,0x4EDC,0x4EE0,0x4EE2,0x4EE6,0x4EE7,/* 0x98-0x9F */
+	0x4EE9,0x4EED,0x4EEE,0x4EEF,0x4EF1,0x4EF4,0x4EF8,0x4EF9,/* 0xA0-0xA7 */
+	0x4EFA,0x4EFC,0x4EFE,0x4F00,0x4F02,0x4F03,0x4F04,0x4F05,/* 0xA8-0xAF */
+	0x4F06,0x4F07,0x4F08,0x4F0B,0x4F0C,0x4F12,0x4F13,0x4F14,/* 0xB0-0xB7 */
+	0x4F15,0x4F16,0x4F1C,0x4F1D,0x4F21,0x4F23,0x4F28,0x4F29,/* 0xB8-0xBF */
+	0x4F2C,0x4F2D,0x4F2E,0x4F31,0x4F33,0x4F35,0x4F37,0x4F39,/* 0xC0-0xC7 */
+	0x4F3B,0x4F3E,0x4F3F,0x4F40,0x4F41,0x4F42,0x4F44,0x4F45,/* 0xC8-0xCF */
+	0x4F47,0x4F48,0x4F49,0x4F4A,0x4F4B,0x4F4C,0x4F52,0x4F54,/* 0xD0-0xD7 */
+	0x4F56,0x4F61,0x4F62,0x4F66,0x4F68,0x4F6A,0x4F6B,0x4F6D,/* 0xD8-0xDF */
+	0x4F6E,0x4F71,0x4F72,0x4F75,0x4F77,0x4F78,0x4F79,0x4F7A,/* 0xE0-0xE7 */
+	0x4F7D,0x4F80,0x4F81,0x4F82,0x4F85,0x4F86,0x4F87,0x4F8A,/* 0xE8-0xEF */
+	0x4F8C,0x4F8E,0x4F90,0x4F92,0x4F93,0x4F95,0x4F96,0x4F98,/* 0xF0-0xF7 */
+	0x4F99,0x4F9A,0x4F9C,0x4F9E,0x4F9F,0x4FA1,0x4FA2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_82[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4FA4,0x4FAB,0x4FAD,0x4FB0,0x4FB1,0x4FB2,0x4FB3,0x4FB4,/* 0x40-0x47 */
+	0x4FB6,0x4FB7,0x4FB8,0x4FB9,0x4FBA,0x4FBB,0x4FBC,0x4FBD,/* 0x48-0x4F */
+	0x4FBE,0x4FC0,0x4FC1,0x4FC2,0x4FC6,0x4FC7,0x4FC8,0x4FC9,/* 0x50-0x57 */
+	0x4FCB,0x4FCC,0x4FCD,0x4FD2,0x4FD3,0x4FD4,0x4FD5,0x4FD6,/* 0x58-0x5F */
+	0x4FD9,0x4FDB,0x4FE0,0x4FE2,0x4FE4,0x4FE5,0x4FE7,0x4FEB,/* 0x60-0x67 */
+	0x4FEC,0x4FF0,0x4FF2,0x4FF4,0x4FF5,0x4FF6,0x4FF7,0x4FF9,/* 0x68-0x6F */
+	0x4FFB,0x4FFC,0x4FFD,0x4FFF,0x5000,0x5001,0x5002,0x5003,/* 0x70-0x77 */
+	0x5004,0x5005,0x5006,0x5007,0x5008,0x5009,0x500A,0x0000,/* 0x78-0x7F */
+
+	0x500B,0x500E,0x5010,0x5011,0x5013,0x5015,0x5016,0x5017,/* 0x80-0x87 */
+	0x501B,0x501D,0x501E,0x5020,0x5022,0x5023,0x5024,0x5027,/* 0x88-0x8F */
+	0x502B,0x502F,0x5030,0x5031,0x5032,0x5033,0x5034,0x5035,/* 0x90-0x97 */
+	0x5036,0x5037,0x5038,0x5039,0x503B,0x503D,0x503F,0x5040,/* 0x98-0x9F */
+	0x5041,0x5042,0x5044,0x5045,0x5046,0x5049,0x504A,0x504B,/* 0xA0-0xA7 */
+	0x504D,0x5050,0x5051,0x5052,0x5053,0x5054,0x5056,0x5057,/* 0xA8-0xAF */
+	0x5058,0x5059,0x505B,0x505D,0x505E,0x505F,0x5060,0x5061,/* 0xB0-0xB7 */
+	0x5062,0x5063,0x5064,0x5066,0x5067,0x5068,0x5069,0x506A,/* 0xB8-0xBF */
+	0x506B,0x506D,0x506E,0x506F,0x5070,0x5071,0x5072,0x5073,/* 0xC0-0xC7 */
+	0x5074,0x5075,0x5078,0x5079,0x507A,0x507C,0x507D,0x5081,/* 0xC8-0xCF */
+	0x5082,0x5083,0x5084,0x5086,0x5087,0x5089,0x508A,0x508B,/* 0xD0-0xD7 */
+	0x508C,0x508E,0x508F,0x5090,0x5091,0x5092,0x5093,0x5094,/* 0xD8-0xDF */
+	0x5095,0x5096,0x5097,0x5098,0x5099,0x509A,0x509B,0x509C,/* 0xE0-0xE7 */
+	0x509D,0x509E,0x509F,0x50A0,0x50A1,0x50A2,0x50A4,0x50A6,/* 0xE8-0xEF */
+	0x50AA,0x50AB,0x50AD,0x50AE,0x50AF,0x50B0,0x50B1,0x50B3,/* 0xF0-0xF7 */
+	0x50B4,0x50B5,0x50B6,0x50B7,0x50B8,0x50B9,0x50BC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_83[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x50BD,0x50BE,0x50BF,0x50C0,0x50C1,0x50C2,0x50C3,0x50C4,/* 0x40-0x47 */
+	0x50C5,0x50C6,0x50C7,0x50C8,0x50C9,0x50CA,0x50CB,0x50CC,/* 0x48-0x4F */
+	0x50CD,0x50CE,0x50D0,0x50D1,0x50D2,0x50D3,0x50D4,0x50D5,/* 0x50-0x57 */
+	0x50D7,0x50D8,0x50D9,0x50DB,0x50DC,0x50DD,0x50DE,0x50DF,/* 0x58-0x5F */
+	0x50E0,0x50E1,0x50E2,0x50E3,0x50E4,0x50E5,0x50E8,0x50E9,/* 0x60-0x67 */
+	0x50EA,0x50EB,0x50EF,0x50F0,0x50F1,0x50F2,0x50F4,0x50F6,/* 0x68-0x6F */
+	0x50F7,0x50F8,0x50F9,0x50FA,0x50FC,0x50FD,0x50FE,0x50FF,/* 0x70-0x77 */
+	0x5100,0x5101,0x5102,0x5103,0x5104,0x5105,0x5108,0x0000,/* 0x78-0x7F */
+
+	0x5109,0x510A,0x510C,0x510D,0x510E,0x510F,0x5110,0x5111,/* 0x80-0x87 */
+	0x5113,0x5114,0x5115,0x5116,0x5117,0x5118,0x5119,0x511A,/* 0x88-0x8F */
+	0x511B,0x511C,0x511D,0x511E,0x511F,0x5120,0x5122,0x5123,/* 0x90-0x97 */
+	0x5124,0x5125,0x5126,0x5127,0x5128,0x5129,0x512A,0x512B,/* 0x98-0x9F */
+	0x512C,0x512D,0x512E,0x512F,0x5130,0x5131,0x5132,0x5133,/* 0xA0-0xA7 */
+	0x5134,0x5135,0x5136,0x5137,0x5138,0x5139,0x513A,0x513B,/* 0xA8-0xAF */
+	0x513C,0x513D,0x513E,0x5142,0x5147,0x514A,0x514C,0x514E,/* 0xB0-0xB7 */
+	0x514F,0x5150,0x5152,0x5153,0x5157,0x5158,0x5159,0x515B,/* 0xB8-0xBF */
+	0x515D,0x515E,0x515F,0x5160,0x5161,0x5163,0x5164,0x5166,/* 0xC0-0xC7 */
+	0x5167,0x5169,0x516A,0x516F,0x5172,0x517A,0x517E,0x517F,/* 0xC8-0xCF */
+	0x5183,0x5184,0x5186,0x5187,0x518A,0x518B,0x518E,0x518F,/* 0xD0-0xD7 */
+	0x5190,0x5191,0x5193,0x5194,0x5198,0x519A,0x519D,0x519E,/* 0xD8-0xDF */
+	0x519F,0x51A1,0x51A3,0x51A6,0x51A7,0x51A8,0x51A9,0x51AA,/* 0xE0-0xE7 */
+	0x51AD,0x51AE,0x51B4,0x51B8,0x51B9,0x51BA,0x51BE,0x51BF,/* 0xE8-0xEF */
+	0x51C1,0x51C2,0x51C3,0x51C5,0x51C8,0x51CA,0x51CD,0x51CE,/* 0xF0-0xF7 */
+	0x51D0,0x51D2,0x51D3,0x51D4,0x51D5,0x51D6,0x51D7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_84[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x51D8,0x51D9,0x51DA,0x51DC,0x51DE,0x51DF,0x51E2,0x51E3,/* 0x40-0x47 */
+	0x51E5,0x51E6,0x51E7,0x51E8,0x51E9,0x51EA,0x51EC,0x51EE,/* 0x48-0x4F */
+	0x51F1,0x51F2,0x51F4,0x51F7,0x51FE,0x5204,0x5205,0x5209,/* 0x50-0x57 */
+	0x520B,0x520C,0x520F,0x5210,0x5213,0x5214,0x5215,0x521C,/* 0x58-0x5F */
+	0x521E,0x521F,0x5221,0x5222,0x5223,0x5225,0x5226,0x5227,/* 0x60-0x67 */
+	0x522A,0x522C,0x522F,0x5231,0x5232,0x5234,0x5235,0x523C,/* 0x68-0x6F */
+	0x523E,0x5244,0x5245,0x5246,0x5247,0x5248,0x5249,0x524B,/* 0x70-0x77 */
+	0x524E,0x524F,0x5252,0x5253,0x5255,0x5257,0x5258,0x0000,/* 0x78-0x7F */
+
+	0x5259,0x525A,0x525B,0x525D,0x525F,0x5260,0x5262,0x5263,/* 0x80-0x87 */
+	0x5264,0x5266,0x5268,0x526B,0x526C,0x526D,0x526E,0x5270,/* 0x88-0x8F */
+	0x5271,0x5273,0x5274,0x5275,0x5276,0x5277,0x5278,0x5279,/* 0x90-0x97 */
+	0x527A,0x527B,0x527C,0x527E,0x5280,0x5283,0x5284,0x5285,/* 0x98-0x9F */
+	0x5286,0x5287,0x5289,0x528A,0x528B,0x528C,0x528D,0x528E,/* 0xA0-0xA7 */
+	0x528F,0x5291,0x5292,0x5294,0x5295,0x5296,0x5297,0x5298,/* 0xA8-0xAF */
+	0x5299,0x529A,0x529C,0x52A4,0x52A5,0x52A6,0x52A7,0x52AE,/* 0xB0-0xB7 */
+	0x52AF,0x52B0,0x52B4,0x52B5,0x52B6,0x52B7,0x52B8,0x52B9,/* 0xB8-0xBF */
+	0x52BA,0x52BB,0x52BC,0x52BD,0x52C0,0x52C1,0x52C2,0x52C4,/* 0xC0-0xC7 */
+	0x52C5,0x52C6,0x52C8,0x52CA,0x52CC,0x52CD,0x52CE,0x52CF,/* 0xC8-0xCF */
+	0x52D1,0x52D3,0x52D4,0x52D5,0x52D7,0x52D9,0x52DA,0x52DB,/* 0xD0-0xD7 */
+	0x52DC,0x52DD,0x52DE,0x52E0,0x52E1,0x52E2,0x52E3,0x52E5,/* 0xD8-0xDF */
+	0x52E6,0x52E7,0x52E8,0x52E9,0x52EA,0x52EB,0x52EC,0x52ED,/* 0xE0-0xE7 */
+	0x52EE,0x52EF,0x52F1,0x52F2,0x52F3,0x52F4,0x52F5,0x52F6,/* 0xE8-0xEF */
+	0x52F7,0x52F8,0x52FB,0x52FC,0x52FD,0x5301,0x5302,0x5303,/* 0xF0-0xF7 */
+	0x5304,0x5307,0x5309,0x530A,0x530B,0x530C,0x530E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_85[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5311,0x5312,0x5313,0x5314,0x5318,0x531B,0x531C,0x531E,/* 0x40-0x47 */
+	0x531F,0x5322,0x5324,0x5325,0x5327,0x5328,0x5329,0x532B,/* 0x48-0x4F */
+	0x532C,0x532D,0x532F,0x5330,0x5331,0x5332,0x5333,0x5334,/* 0x50-0x57 */
+	0x5335,0x5336,0x5337,0x5338,0x533C,0x533D,0x5340,0x5342,/* 0x58-0x5F */
+	0x5344,0x5346,0x534B,0x534C,0x534D,0x5350,0x5354,0x5358,/* 0x60-0x67 */
+	0x5359,0x535B,0x535D,0x5365,0x5368,0x536A,0x536C,0x536D,/* 0x68-0x6F */
+	0x5372,0x5376,0x5379,0x537B,0x537C,0x537D,0x537E,0x5380,/* 0x70-0x77 */
+	0x5381,0x5383,0x5387,0x5388,0x538A,0x538E,0x538F,0x0000,/* 0x78-0x7F */
+
+	0x5390,0x5391,0x5392,0x5393,0x5394,0x5396,0x5397,0x5399,/* 0x80-0x87 */
+	0x539B,0x539C,0x539E,0x53A0,0x53A1,0x53A4,0x53A7,0x53AA,/* 0x88-0x8F */
+	0x53AB,0x53AC,0x53AD,0x53AF,0x53B0,0x53B1,0x53B2,0x53B3,/* 0x90-0x97 */
+	0x53B4,0x53B5,0x53B7,0x53B8,0x53B9,0x53BA,0x53BC,0x53BD,/* 0x98-0x9F */
+	0x53BE,0x53C0,0x53C3,0x53C4,0x53C5,0x53C6,0x53C7,0x53CE,/* 0xA0-0xA7 */
+	0x53CF,0x53D0,0x53D2,0x53D3,0x53D5,0x53DA,0x53DC,0x53DD,/* 0xA8-0xAF */
+	0x53DE,0x53E1,0x53E2,0x53E7,0x53F4,0x53FA,0x53FE,0x53FF,/* 0xB0-0xB7 */
+	0x5400,0x5402,0x5405,0x5407,0x540B,0x5414,0x5418,0x5419,/* 0xB8-0xBF */
+	0x541A,0x541C,0x5422,0x5424,0x5425,0x542A,0x5430,0x5433,/* 0xC0-0xC7 */
+	0x5436,0x5437,0x543A,0x543D,0x543F,0x5441,0x5442,0x5444,/* 0xC8-0xCF */
+	0x5445,0x5447,0x5449,0x544C,0x544D,0x544E,0x544F,0x5451,/* 0xD0-0xD7 */
+	0x545A,0x545D,0x545E,0x545F,0x5460,0x5461,0x5463,0x5465,/* 0xD8-0xDF */
+	0x5467,0x5469,0x546A,0x546B,0x546C,0x546D,0x546E,0x546F,/* 0xE0-0xE7 */
+	0x5470,0x5474,0x5479,0x547A,0x547E,0x547F,0x5481,0x5483,/* 0xE8-0xEF */
+	0x5485,0x5487,0x5488,0x5489,0x548A,0x548D,0x5491,0x5493,/* 0xF0-0xF7 */
+	0x5497,0x5498,0x549C,0x549E,0x549F,0x54A0,0x54A1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_86[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x54A2,0x54A5,0x54AE,0x54B0,0x54B2,0x54B5,0x54B6,0x54B7,/* 0x40-0x47 */
+	0x54B9,0x54BA,0x54BC,0x54BE,0x54C3,0x54C5,0x54CA,0x54CB,/* 0x48-0x4F */
+	0x54D6,0x54D8,0x54DB,0x54E0,0x54E1,0x54E2,0x54E3,0x54E4,/* 0x50-0x57 */
+	0x54EB,0x54EC,0x54EF,0x54F0,0x54F1,0x54F4,0x54F5,0x54F6,/* 0x58-0x5F */
+	0x54F7,0x54F8,0x54F9,0x54FB,0x54FE,0x5500,0x5502,0x5503,/* 0x60-0x67 */
+	0x5504,0x5505,0x5508,0x550A,0x550B,0x550C,0x550D,0x550E,/* 0x68-0x6F */
+	0x5512,0x5513,0x5515,0x5516,0x5517,0x5518,0x5519,0x551A,/* 0x70-0x77 */
+	0x551C,0x551D,0x551E,0x551F,0x5521,0x5525,0x5526,0x0000,/* 0x78-0x7F */
+
+	0x5528,0x5529,0x552B,0x552D,0x5532,0x5534,0x5535,0x5536,/* 0x80-0x87 */
+	0x5538,0x5539,0x553A,0x553B,0x553D,0x5540,0x5542,0x5545,/* 0x88-0x8F */
+	0x5547,0x5548,0x554B,0x554C,0x554D,0x554E,0x554F,0x5551,/* 0x90-0x97 */
+	0x5552,0x5553,0x5554,0x5557,0x5558,0x5559,0x555A,0x555B,/* 0x98-0x9F */
+	0x555D,0x555E,0x555F,0x5560,0x5562,0x5563,0x5568,0x5569,/* 0xA0-0xA7 */
+	0x556B,0x556F,0x5570,0x5571,0x5572,0x5573,0x5574,0x5579,/* 0xA8-0xAF */
+	0x557A,0x557D,0x557F,0x5585,0x5586,0x558C,0x558D,0x558E,/* 0xB0-0xB7 */
+	0x5590,0x5592,0x5593,0x5595,0x5596,0x5597,0x559A,0x559B,/* 0xB8-0xBF */
+	0x559E,0x55A0,0x55A1,0x55A2,0x55A3,0x55A4,0x55A5,0x55A6,/* 0xC0-0xC7 */
+	0x55A8,0x55A9,0x55AA,0x55AB,0x55AC,0x55AD,0x55AE,0x55AF,/* 0xC8-0xCF */
+	0x55B0,0x55B2,0x55B4,0x55B6,0x55B8,0x55BA,0x55BC,0x55BF,/* 0xD0-0xD7 */
+	0x55C0,0x55C1,0x55C2,0x55C3,0x55C6,0x55C7,0x55C8,0x55CA,/* 0xD8-0xDF */
+	0x55CB,0x55CE,0x55CF,0x55D0,0x55D5,0x55D7,0x55D8,0x55D9,/* 0xE0-0xE7 */
+	0x55DA,0x55DB,0x55DE,0x55E0,0x55E2,0x55E7,0x55E9,0x55ED,/* 0xE8-0xEF */
+	0x55EE,0x55F0,0x55F1,0x55F4,0x55F6,0x55F8,0x55F9,0x55FA,/* 0xF0-0xF7 */
+	0x55FB,0x55FC,0x55FF,0x5602,0x5603,0x5604,0x5605,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_87[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5606,0x5607,0x560A,0x560B,0x560D,0x5610,0x5611,0x5612,/* 0x40-0x47 */
+	0x5613,0x5614,0x5615,0x5616,0x5617,0x5619,0x561A,0x561C,/* 0x48-0x4F */
+	0x561D,0x5620,0x5621,0x5622,0x5625,0x5626,0x5628,0x5629,/* 0x50-0x57 */
+	0x562A,0x562B,0x562E,0x562F,0x5630,0x5633,0x5635,0x5637,/* 0x58-0x5F */
+	0x5638,0x563A,0x563C,0x563D,0x563E,0x5640,0x5641,0x5642,/* 0x60-0x67 */
+	0x5643,0x5644,0x5645,0x5646,0x5647,0x5648,0x5649,0x564A,/* 0x68-0x6F */
+	0x564B,0x564F,0x5650,0x5651,0x5652,0x5653,0x5655,0x5656,/* 0x70-0x77 */
+	0x565A,0x565B,0x565D,0x565E,0x565F,0x5660,0x5661,0x0000,/* 0x78-0x7F */
+
+	0x5663,0x5665,0x5666,0x5667,0x566D,0x566E,0x566F,0x5670,/* 0x80-0x87 */
+	0x5672,0x5673,0x5674,0x5675,0x5677,0x5678,0x5679,0x567A,/* 0x88-0x8F */
+	0x567D,0x567E,0x567F,0x5680,0x5681,0x5682,0x5683,0x5684,/* 0x90-0x97 */
+	0x5687,0x5688,0x5689,0x568A,0x568B,0x568C,0x568D,0x5690,/* 0x98-0x9F */
+	0x5691,0x5692,0x5694,0x5695,0x5696,0x5697,0x5698,0x5699,/* 0xA0-0xA7 */
+	0x569A,0x569B,0x569C,0x569D,0x569E,0x569F,0x56A0,0x56A1,/* 0xA8-0xAF */
+	0x56A2,0x56A4,0x56A5,0x56A6,0x56A7,0x56A8,0x56A9,0x56AA,/* 0xB0-0xB7 */
+	0x56AB,0x56AC,0x56AD,0x56AE,0x56B0,0x56B1,0x56B2,0x56B3,/* 0xB8-0xBF */
+	0x56B4,0x56B5,0x56B6,0x56B8,0x56B9,0x56BA,0x56BB,0x56BD,/* 0xC0-0xC7 */
+	0x56BE,0x56BF,0x56C0,0x56C1,0x56C2,0x56C3,0x56C4,0x56C5,/* 0xC8-0xCF */
+	0x56C6,0x56C7,0x56C8,0x56C9,0x56CB,0x56CC,0x56CD,0x56CE,/* 0xD0-0xD7 */
+	0x56CF,0x56D0,0x56D1,0x56D2,0x56D3,0x56D5,0x56D6,0x56D8,/* 0xD8-0xDF */
+	0x56D9,0x56DC,0x56E3,0x56E5,0x56E6,0x56E7,0x56E8,0x56E9,/* 0xE0-0xE7 */
+	0x56EA,0x56EC,0x56EE,0x56EF,0x56F2,0x56F3,0x56F6,0x56F7,/* 0xE8-0xEF */
+	0x56F8,0x56FB,0x56FC,0x5700,0x5701,0x5702,0x5705,0x5707,/* 0xF0-0xF7 */
+	0x570B,0x570C,0x570D,0x570E,0x570F,0x5710,0x5711,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_88[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5712,0x5713,0x5714,0x5715,0x5716,0x5717,0x5718,0x5719,/* 0x40-0x47 */
+	0x571A,0x571B,0x571D,0x571E,0x5720,0x5721,0x5722,0x5724,/* 0x48-0x4F */
+	0x5725,0x5726,0x5727,0x572B,0x5731,0x5732,0x5734,0x5735,/* 0x50-0x57 */
+	0x5736,0x5737,0x5738,0x573C,0x573D,0x573F,0x5741,0x5743,/* 0x58-0x5F */
+	0x5744,0x5745,0x5746,0x5748,0x5749,0x574B,0x5752,0x5753,/* 0x60-0x67 */
+	0x5754,0x5755,0x5756,0x5758,0x5759,0x5762,0x5763,0x5765,/* 0x68-0x6F */
+	0x5767,0x576C,0x576E,0x5770,0x5771,0x5772,0x5774,0x5775,/* 0x70-0x77 */
+	0x5778,0x5779,0x577A,0x577D,0x577E,0x577F,0x5780,0x0000,/* 0x78-0x7F */
+
+	0x5781,0x5787,0x5788,0x5789,0x578A,0x578D,0x578E,0x578F,/* 0x80-0x87 */
+	0x5790,0x5791,0x5794,0x5795,0x5796,0x5797,0x5798,0x5799,/* 0x88-0x8F */
+	0x579A,0x579C,0x579D,0x579E,0x579F,0x57A5,0x57A8,0x57AA,/* 0x90-0x97 */
+	0x57AC,0x57AF,0x57B0,0x57B1,0x57B3,0x57B5,0x57B6,0x57B7,/* 0x98-0x9F */
+	0x57B9,0x57BA,0x57BB,0x57BC,0x57BD,0x57BE,0x57BF,0x57C0,/* 0xA0-0xA7 */
+	0x57C1,0x57C4,0x57C5,0x57C6,0x57C7,0x57C8,0x57C9,0x57CA,/* 0xA8-0xAF */
+	0x57CC,0x57CD,0x57D0,0x57D1,0x57D3,0x57D6,0x57D7,0x57DB,/* 0xB0-0xB7 */
+	0x57DC,0x57DE,0x57E1,0x57E2,0x57E3,0x57E5,0x57E6,0x57E7,/* 0xB8-0xBF */
+	0x57E8,0x57E9,0x57EA,0x57EB,0x57EC,0x57EE,0x57F0,0x57F1,/* 0xC0-0xC7 */
+	0x57F2,0x57F3,0x57F5,0x57F6,0x57F7,0x57FB,0x57FC,0x57FE,/* 0xC8-0xCF */
+	0x57FF,0x5801,0x5803,0x5804,0x5805,0x5808,0x5809,0x580A,/* 0xD0-0xD7 */
+	0x580C,0x580E,0x580F,0x5810,0x5812,0x5813,0x5814,0x5816,/* 0xD8-0xDF */
+	0x5817,0x5818,0x581A,0x581B,0x581C,0x581D,0x581F,0x5822,/* 0xE0-0xE7 */
+	0x5823,0x5825,0x5826,0x5827,0x5828,0x5829,0x582B,0x582C,/* 0xE8-0xEF */
+	0x582D,0x582E,0x582F,0x5831,0x5832,0x5833,0x5834,0x5836,/* 0xF0-0xF7 */
+	0x5837,0x5838,0x5839,0x583A,0x583B,0x583C,0x583D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_89[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x583E,0x583F,0x5840,0x5841,0x5842,0x5843,0x5845,0x5846,/* 0x40-0x47 */
+	0x5847,0x5848,0x5849,0x584A,0x584B,0x584E,0x584F,0x5850,/* 0x48-0x4F */
+	0x5852,0x5853,0x5855,0x5856,0x5857,0x5859,0x585A,0x585B,/* 0x50-0x57 */
+	0x585C,0x585D,0x585F,0x5860,0x5861,0x5862,0x5863,0x5864,/* 0x58-0x5F */
+	0x5866,0x5867,0x5868,0x5869,0x586A,0x586D,0x586E,0x586F,/* 0x60-0x67 */
+	0x5870,0x5871,0x5872,0x5873,0x5874,0x5875,0x5876,0x5877,/* 0x68-0x6F */
+	0x5878,0x5879,0x587A,0x587B,0x587C,0x587D,0x587F,0x5882,/* 0x70-0x77 */
+	0x5884,0x5886,0x5887,0x5888,0x588A,0x588B,0x588C,0x0000,/* 0x78-0x7F */
+
+	0x588D,0x588E,0x588F,0x5890,0x5891,0x5894,0x5895,0x5896,/* 0x80-0x87 */
+	0x5897,0x5898,0x589B,0x589C,0x589D,0x58A0,0x58A1,0x58A2,/* 0x88-0x8F */
+	0x58A3,0x58A4,0x58A5,0x58A6,0x58A7,0x58AA,0x58AB,0x58AC,/* 0x90-0x97 */
+	0x58AD,0x58AE,0x58AF,0x58B0,0x58B1,0x58B2,0x58B3,0x58B4,/* 0x98-0x9F */
+	0x58B5,0x58B6,0x58B7,0x58B8,0x58B9,0x58BA,0x58BB,0x58BD,/* 0xA0-0xA7 */
+	0x58BE,0x58BF,0x58C0,0x58C2,0x58C3,0x58C4,0x58C6,0x58C7,/* 0xA8-0xAF */
+	0x58C8,0x58C9,0x58CA,0x58CB,0x58CC,0x58CD,0x58CE,0x58CF,/* 0xB0-0xB7 */
+	0x58D0,0x58D2,0x58D3,0x58D4,0x58D6,0x58D7,0x58D8,0x58D9,/* 0xB8-0xBF */
+	0x58DA,0x58DB,0x58DC,0x58DD,0x58DE,0x58DF,0x58E0,0x58E1,/* 0xC0-0xC7 */
+	0x58E2,0x58E3,0x58E5,0x58E6,0x58E7,0x58E8,0x58E9,0x58EA,/* 0xC8-0xCF */
+	0x58ED,0x58EF,0x58F1,0x58F2,0x58F4,0x58F5,0x58F7,0x58F8,/* 0xD0-0xD7 */
+	0x58FA,0x58FB,0x58FC,0x58FD,0x58FE,0x58FF,0x5900,0x5901,/* 0xD8-0xDF */
+	0x5903,0x5905,0x5906,0x5908,0x5909,0x590A,0x590B,0x590C,/* 0xE0-0xE7 */
+	0x590E,0x5910,0x5911,0x5912,0x5913,0x5917,0x5918,0x591B,/* 0xE8-0xEF */
+	0x591D,0x591E,0x5920,0x5921,0x5922,0x5923,0x5926,0x5928,/* 0xF0-0xF7 */
+	0x592C,0x5930,0x5932,0x5933,0x5935,0x5936,0x593B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x593D,0x593E,0x593F,0x5940,0x5943,0x5945,0x5946,0x594A,/* 0x40-0x47 */
+	0x594C,0x594D,0x5950,0x5952,0x5953,0x5959,0x595B,0x595C,/* 0x48-0x4F */
+	0x595D,0x595E,0x595F,0x5961,0x5963,0x5964,0x5966,0x5967,/* 0x50-0x57 */
+	0x5968,0x5969,0x596A,0x596B,0x596C,0x596D,0x596E,0x596F,/* 0x58-0x5F */
+	0x5970,0x5971,0x5972,0x5975,0x5977,0x597A,0x597B,0x597C,/* 0x60-0x67 */
+	0x597E,0x597F,0x5980,0x5985,0x5989,0x598B,0x598C,0x598E,/* 0x68-0x6F */
+	0x598F,0x5990,0x5991,0x5994,0x5995,0x5998,0x599A,0x599B,/* 0x70-0x77 */
+	0x599C,0x599D,0x599F,0x59A0,0x59A1,0x59A2,0x59A6,0x0000,/* 0x78-0x7F */
+
+	0x59A7,0x59AC,0x59AD,0x59B0,0x59B1,0x59B3,0x59B4,0x59B5,/* 0x80-0x87 */
+	0x59B6,0x59B7,0x59B8,0x59BA,0x59BC,0x59BD,0x59BF,0x59C0,/* 0x88-0x8F */
+	0x59C1,0x59C2,0x59C3,0x59C4,0x59C5,0x59C7,0x59C8,0x59C9,/* 0x90-0x97 */
+	0x59CC,0x59CD,0x59CE,0x59CF,0x59D5,0x59D6,0x59D9,0x59DB,/* 0x98-0x9F */
+	0x59DE,0x59DF,0x59E0,0x59E1,0x59E2,0x59E4,0x59E6,0x59E7,/* 0xA0-0xA7 */
+	0x59E9,0x59EA,0x59EB,0x59ED,0x59EE,0x59EF,0x59F0,0x59F1,/* 0xA8-0xAF */
+	0x59F2,0x59F3,0x59F4,0x59F5,0x59F6,0x59F7,0x59F8,0x59FA,/* 0xB0-0xB7 */
+	0x59FC,0x59FD,0x59FE,0x5A00,0x5A02,0x5A0A,0x5A0B,0x5A0D,/* 0xB8-0xBF */
+	0x5A0E,0x5A0F,0x5A10,0x5A12,0x5A14,0x5A15,0x5A16,0x5A17,/* 0xC0-0xC7 */
+	0x5A19,0x5A1A,0x5A1B,0x5A1D,0x5A1E,0x5A21,0x5A22,0x5A24,/* 0xC8-0xCF */
+	0x5A26,0x5A27,0x5A28,0x5A2A,0x5A2B,0x5A2C,0x5A2D,0x5A2E,/* 0xD0-0xD7 */
+	0x5A2F,0x5A30,0x5A33,0x5A35,0x5A37,0x5A38,0x5A39,0x5A3A,/* 0xD8-0xDF */
+	0x5A3B,0x5A3D,0x5A3E,0x5A3F,0x5A41,0x5A42,0x5A43,0x5A44,/* 0xE0-0xE7 */
+	0x5A45,0x5A47,0x5A48,0x5A4B,0x5A4C,0x5A4D,0x5A4E,0x5A4F,/* 0xE8-0xEF */
+	0x5A50,0x5A51,0x5A52,0x5A53,0x5A54,0x5A56,0x5A57,0x5A58,/* 0xF0-0xF7 */
+	0x5A59,0x5A5B,0x5A5C,0x5A5D,0x5A5E,0x5A5F,0x5A60,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5A61,0x5A63,0x5A64,0x5A65,0x5A66,0x5A68,0x5A69,0x5A6B,/* 0x40-0x47 */
+	0x5A6C,0x5A6D,0x5A6E,0x5A6F,0x5A70,0x5A71,0x5A72,0x5A73,/* 0x48-0x4F */
+	0x5A78,0x5A79,0x5A7B,0x5A7C,0x5A7D,0x5A7E,0x5A80,0x5A81,/* 0x50-0x57 */
+	0x5A82,0x5A83,0x5A84,0x5A85,0x5A86,0x5A87,0x5A88,0x5A89,/* 0x58-0x5F */
+	0x5A8A,0x5A8B,0x5A8C,0x5A8D,0x5A8E,0x5A8F,0x5A90,0x5A91,/* 0x60-0x67 */
+	0x5A93,0x5A94,0x5A95,0x5A96,0x5A97,0x5A98,0x5A99,0x5A9C,/* 0x68-0x6F */
+	0x5A9D,0x5A9E,0x5A9F,0x5AA0,0x5AA1,0x5AA2,0x5AA3,0x5AA4,/* 0x70-0x77 */
+	0x5AA5,0x5AA6,0x5AA7,0x5AA8,0x5AA9,0x5AAB,0x5AAC,0x0000,/* 0x78-0x7F */
+
+	0x5AAD,0x5AAE,0x5AAF,0x5AB0,0x5AB1,0x5AB4,0x5AB6,0x5AB7,/* 0x80-0x87 */
+	0x5AB9,0x5ABA,0x5ABB,0x5ABC,0x5ABD,0x5ABF,0x5AC0,0x5AC3,/* 0x88-0x8F */
+	0x5AC4,0x5AC5,0x5AC6,0x5AC7,0x5AC8,0x5ACA,0x5ACB,0x5ACD,/* 0x90-0x97 */
+	0x5ACE,0x5ACF,0x5AD0,0x5AD1,0x5AD3,0x5AD5,0x5AD7,0x5AD9,/* 0x98-0x9F */
+	0x5ADA,0x5ADB,0x5ADD,0x5ADE,0x5ADF,0x5AE2,0x5AE4,0x5AE5,/* 0xA0-0xA7 */
+	0x5AE7,0x5AE8,0x5AEA,0x5AEC,0x5AED,0x5AEE,0x5AEF,0x5AF0,/* 0xA8-0xAF */
+	0x5AF2,0x5AF3,0x5AF4,0x5AF5,0x5AF6,0x5AF7,0x5AF8,0x5AF9,/* 0xB0-0xB7 */
+	0x5AFA,0x5AFB,0x5AFC,0x5AFD,0x5AFE,0x5AFF,0x5B00,0x5B01,/* 0xB8-0xBF */
+	0x5B02,0x5B03,0x5B04,0x5B05,0x5B06,0x5B07,0x5B08,0x5B0A,/* 0xC0-0xC7 */
+	0x5B0B,0x5B0C,0x5B0D,0x5B0E,0x5B0F,0x5B10,0x5B11,0x5B12,/* 0xC8-0xCF */
+	0x5B13,0x5B14,0x5B15,0x5B18,0x5B19,0x5B1A,0x5B1B,0x5B1C,/* 0xD0-0xD7 */
+	0x5B1D,0x5B1E,0x5B1F,0x5B20,0x5B21,0x5B22,0x5B23,0x5B24,/* 0xD8-0xDF */
+	0x5B25,0x5B26,0x5B27,0x5B28,0x5B29,0x5B2A,0x5B2B,0x5B2C,/* 0xE0-0xE7 */
+	0x5B2D,0x5B2E,0x5B2F,0x5B30,0x5B31,0x5B33,0x5B35,0x5B36,/* 0xE8-0xEF */
+	0x5B38,0x5B39,0x5B3A,0x5B3B,0x5B3C,0x5B3D,0x5B3E,0x5B3F,/* 0xF0-0xF7 */
+	0x5B41,0x5B42,0x5B43,0x5B44,0x5B45,0x5B46,0x5B47,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5B48,0x5B49,0x5B4A,0x5B4B,0x5B4C,0x5B4D,0x5B4E,0x5B4F,/* 0x40-0x47 */
+	0x5B52,0x5B56,0x5B5E,0x5B60,0x5B61,0x5B67,0x5B68,0x5B6B,/* 0x48-0x4F */
+	0x5B6D,0x5B6E,0x5B6F,0x5B72,0x5B74,0x5B76,0x5B77,0x5B78,/* 0x50-0x57 */
+	0x5B79,0x5B7B,0x5B7C,0x5B7E,0x5B7F,0x5B82,0x5B86,0x5B8A,/* 0x58-0x5F */
+	0x5B8D,0x5B8E,0x5B90,0x5B91,0x5B92,0x5B94,0x5B96,0x5B9F,/* 0x60-0x67 */
+	0x5BA7,0x5BA8,0x5BA9,0x5BAC,0x5BAD,0x5BAE,0x5BAF,0x5BB1,/* 0x68-0x6F */
+	0x5BB2,0x5BB7,0x5BBA,0x5BBB,0x5BBC,0x5BC0,0x5BC1,0x5BC3,/* 0x70-0x77 */
+	0x5BC8,0x5BC9,0x5BCA,0x5BCB,0x5BCD,0x5BCE,0x5BCF,0x0000,/* 0x78-0x7F */
+
+	0x5BD1,0x5BD4,0x5BD5,0x5BD6,0x5BD7,0x5BD8,0x5BD9,0x5BDA,/* 0x80-0x87 */
+	0x5BDB,0x5BDC,0x5BE0,0x5BE2,0x5BE3,0x5BE6,0x5BE7,0x5BE9,/* 0x88-0x8F */
+	0x5BEA,0x5BEB,0x5BEC,0x5BED,0x5BEF,0x5BF1,0x5BF2,0x5BF3,/* 0x90-0x97 */
+	0x5BF4,0x5BF5,0x5BF6,0x5BF7,0x5BFD,0x5BFE,0x5C00,0x5C02,/* 0x98-0x9F */
+	0x5C03,0x5C05,0x5C07,0x5C08,0x5C0B,0x5C0C,0x5C0D,0x5C0E,/* 0xA0-0xA7 */
+	0x5C10,0x5C12,0x5C13,0x5C17,0x5C19,0x5C1B,0x5C1E,0x5C1F,/* 0xA8-0xAF */
+	0x5C20,0x5C21,0x5C23,0x5C26,0x5C28,0x5C29,0x5C2A,0x5C2B,/* 0xB0-0xB7 */
+	0x5C2D,0x5C2E,0x5C2F,0x5C30,0x5C32,0x5C33,0x5C35,0x5C36,/* 0xB8-0xBF */
+	0x5C37,0x5C43,0x5C44,0x5C46,0x5C47,0x5C4C,0x5C4D,0x5C52,/* 0xC0-0xC7 */
+	0x5C53,0x5C54,0x5C56,0x5C57,0x5C58,0x5C5A,0x5C5B,0x5C5C,/* 0xC8-0xCF */
+	0x5C5D,0x5C5F,0x5C62,0x5C64,0x5C67,0x5C68,0x5C69,0x5C6A,/* 0xD0-0xD7 */
+	0x5C6B,0x5C6C,0x5C6D,0x5C70,0x5C72,0x5C73,0x5C74,0x5C75,/* 0xD8-0xDF */
+	0x5C76,0x5C77,0x5C78,0x5C7B,0x5C7C,0x5C7D,0x5C7E,0x5C80,/* 0xE0-0xE7 */
+	0x5C83,0x5C84,0x5C85,0x5C86,0x5C87,0x5C89,0x5C8A,0x5C8B,/* 0xE8-0xEF */
+	0x5C8E,0x5C8F,0x5C92,0x5C93,0x5C95,0x5C9D,0x5C9E,0x5C9F,/* 0xF0-0xF7 */
+	0x5CA0,0x5CA1,0x5CA4,0x5CA5,0x5CA6,0x5CA7,0x5CA8,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5CAA,0x5CAE,0x5CAF,0x5CB0,0x5CB2,0x5CB4,0x5CB6,0x5CB9,/* 0x40-0x47 */
+	0x5CBA,0x5CBB,0x5CBC,0x5CBE,0x5CC0,0x5CC2,0x5CC3,0x5CC5,/* 0x48-0x4F */
+	0x5CC6,0x5CC7,0x5CC8,0x5CC9,0x5CCA,0x5CCC,0x5CCD,0x5CCE,/* 0x50-0x57 */
+	0x5CCF,0x5CD0,0x5CD1,0x5CD3,0x5CD4,0x5CD5,0x5CD6,0x5CD7,/* 0x58-0x5F */
+	0x5CD8,0x5CDA,0x5CDB,0x5CDC,0x5CDD,0x5CDE,0x5CDF,0x5CE0,/* 0x60-0x67 */
+	0x5CE2,0x5CE3,0x5CE7,0x5CE9,0x5CEB,0x5CEC,0x5CEE,0x5CEF,/* 0x68-0x6F */
+	0x5CF1,0x5CF2,0x5CF3,0x5CF4,0x5CF5,0x5CF6,0x5CF7,0x5CF8,/* 0x70-0x77 */
+	0x5CF9,0x5CFA,0x5CFC,0x5CFD,0x5CFE,0x5CFF,0x5D00,0x0000,/* 0x78-0x7F */
+
+	0x5D01,0x5D04,0x5D05,0x5D08,0x5D09,0x5D0A,0x5D0B,0x5D0C,/* 0x80-0x87 */
+	0x5D0D,0x5D0F,0x5D10,0x5D11,0x5D12,0x5D13,0x5D15,0x5D17,/* 0x88-0x8F */
+	0x5D18,0x5D19,0x5D1A,0x5D1C,0x5D1D,0x5D1F,0x5D20,0x5D21,/* 0x90-0x97 */
+	0x5D22,0x5D23,0x5D25,0x5D28,0x5D2A,0x5D2B,0x5D2C,0x5D2F,/* 0x98-0x9F */
+	0x5D30,0x5D31,0x5D32,0x5D33,0x5D35,0x5D36,0x5D37,0x5D38,/* 0xA0-0xA7 */
+	0x5D39,0x5D3A,0x5D3B,0x5D3C,0x5D3F,0x5D40,0x5D41,0x5D42,/* 0xA8-0xAF */
+	0x5D43,0x5D44,0x5D45,0x5D46,0x5D48,0x5D49,0x5D4D,0x5D4E,/* 0xB0-0xB7 */
+	0x5D4F,0x5D50,0x5D51,0x5D52,0x5D53,0x5D54,0x5D55,0x5D56,/* 0xB8-0xBF */
+	0x5D57,0x5D59,0x5D5A,0x5D5C,0x5D5E,0x5D5F,0x5D60,0x5D61,/* 0xC0-0xC7 */
+	0x5D62,0x5D63,0x5D64,0x5D65,0x5D66,0x5D67,0x5D68,0x5D6A,/* 0xC8-0xCF */
+	0x5D6D,0x5D6E,0x5D70,0x5D71,0x5D72,0x5D73,0x5D75,0x5D76,/* 0xD0-0xD7 */
+	0x5D77,0x5D78,0x5D79,0x5D7A,0x5D7B,0x5D7C,0x5D7D,0x5D7E,/* 0xD8-0xDF */
+	0x5D7F,0x5D80,0x5D81,0x5D83,0x5D84,0x5D85,0x5D86,0x5D87,/* 0xE0-0xE7 */
+	0x5D88,0x5D89,0x5D8A,0x5D8B,0x5D8C,0x5D8D,0x5D8E,0x5D8F,/* 0xE8-0xEF */
+	0x5D90,0x5D91,0x5D92,0x5D93,0x5D94,0x5D95,0x5D96,0x5D97,/* 0xF0-0xF7 */
+	0x5D98,0x5D9A,0x5D9B,0x5D9C,0x5D9E,0x5D9F,0x5DA0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5DA1,0x5DA2,0x5DA3,0x5DA4,0x5DA5,0x5DA6,0x5DA7,0x5DA8,/* 0x40-0x47 */
+	0x5DA9,0x5DAA,0x5DAB,0x5DAC,0x5DAD,0x5DAE,0x5DAF,0x5DB0,/* 0x48-0x4F */
+	0x5DB1,0x5DB2,0x5DB3,0x5DB4,0x5DB5,0x5DB6,0x5DB8,0x5DB9,/* 0x50-0x57 */
+	0x5DBA,0x5DBB,0x5DBC,0x5DBD,0x5DBE,0x5DBF,0x5DC0,0x5DC1,/* 0x58-0x5F */
+	0x5DC2,0x5DC3,0x5DC4,0x5DC6,0x5DC7,0x5DC8,0x5DC9,0x5DCA,/* 0x60-0x67 */
+	0x5DCB,0x5DCC,0x5DCE,0x5DCF,0x5DD0,0x5DD1,0x5DD2,0x5DD3,/* 0x68-0x6F */
+	0x5DD4,0x5DD5,0x5DD6,0x5DD7,0x5DD8,0x5DD9,0x5DDA,0x5DDC,/* 0x70-0x77 */
+	0x5DDF,0x5DE0,0x5DE3,0x5DE4,0x5DEA,0x5DEC,0x5DED,0x0000,/* 0x78-0x7F */
+
+	0x5DF0,0x5DF5,0x5DF6,0x5DF8,0x5DF9,0x5DFA,0x5DFB,0x5DFC,/* 0x80-0x87 */
+	0x5DFF,0x5E00,0x5E04,0x5E07,0x5E09,0x5E0A,0x5E0B,0x5E0D,/* 0x88-0x8F */
+	0x5E0E,0x5E12,0x5E13,0x5E17,0x5E1E,0x5E1F,0x5E20,0x5E21,/* 0x90-0x97 */
+	0x5E22,0x5E23,0x5E24,0x5E25,0x5E28,0x5E29,0x5E2A,0x5E2B,/* 0x98-0x9F */
+	0x5E2C,0x5E2F,0x5E30,0x5E32,0x5E33,0x5E34,0x5E35,0x5E36,/* 0xA0-0xA7 */
+	0x5E39,0x5E3A,0x5E3E,0x5E3F,0x5E40,0x5E41,0x5E43,0x5E46,/* 0xA8-0xAF */
+	0x5E47,0x5E48,0x5E49,0x5E4A,0x5E4B,0x5E4D,0x5E4E,0x5E4F,/* 0xB0-0xB7 */
+	0x5E50,0x5E51,0x5E52,0x5E53,0x5E56,0x5E57,0x5E58,0x5E59,/* 0xB8-0xBF */
+	0x5E5A,0x5E5C,0x5E5D,0x5E5F,0x5E60,0x5E63,0x5E64,0x5E65,/* 0xC0-0xC7 */
+	0x5E66,0x5E67,0x5E68,0x5E69,0x5E6A,0x5E6B,0x5E6C,0x5E6D,/* 0xC8-0xCF */
+	0x5E6E,0x5E6F,0x5E70,0x5E71,0x5E75,0x5E77,0x5E79,0x5E7E,/* 0xD0-0xD7 */
+	0x5E81,0x5E82,0x5E83,0x5E85,0x5E88,0x5E89,0x5E8C,0x5E8D,/* 0xD8-0xDF */
+	0x5E8E,0x5E92,0x5E98,0x5E9B,0x5E9D,0x5EA1,0x5EA2,0x5EA3,/* 0xE0-0xE7 */
+	0x5EA4,0x5EA8,0x5EA9,0x5EAA,0x5EAB,0x5EAC,0x5EAE,0x5EAF,/* 0xE8-0xEF */
+	0x5EB0,0x5EB1,0x5EB2,0x5EB4,0x5EBA,0x5EBB,0x5EBC,0x5EBD,/* 0xF0-0xF7 */
+	0x5EBF,0x5EC0,0x5EC1,0x5EC2,0x5EC3,0x5EC4,0x5EC5,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5EC6,0x5EC7,0x5EC8,0x5ECB,0x5ECC,0x5ECD,0x5ECE,0x5ECF,/* 0x40-0x47 */
+	0x5ED0,0x5ED4,0x5ED5,0x5ED7,0x5ED8,0x5ED9,0x5EDA,0x5EDC,/* 0x48-0x4F */
+	0x5EDD,0x5EDE,0x5EDF,0x5EE0,0x5EE1,0x5EE2,0x5EE3,0x5EE4,/* 0x50-0x57 */
+	0x5EE5,0x5EE6,0x5EE7,0x5EE9,0x5EEB,0x5EEC,0x5EED,0x5EEE,/* 0x58-0x5F */
+	0x5EEF,0x5EF0,0x5EF1,0x5EF2,0x5EF3,0x5EF5,0x5EF8,0x5EF9,/* 0x60-0x67 */
+	0x5EFB,0x5EFC,0x5EFD,0x5F05,0x5F06,0x5F07,0x5F09,0x5F0C,/* 0x68-0x6F */
+	0x5F0D,0x5F0E,0x5F10,0x5F12,0x5F14,0x5F16,0x5F19,0x5F1A,/* 0x70-0x77 */
+	0x5F1C,0x5F1D,0x5F1E,0x5F21,0x5F22,0x5F23,0x5F24,0x0000,/* 0x78-0x7F */
+
+	0x5F28,0x5F2B,0x5F2C,0x5F2E,0x5F30,0x5F32,0x5F33,0x5F34,/* 0x80-0x87 */
+	0x5F35,0x5F36,0x5F37,0x5F38,0x5F3B,0x5F3D,0x5F3E,0x5F3F,/* 0x88-0x8F */
+	0x5F41,0x5F42,0x5F43,0x5F44,0x5F45,0x5F46,0x5F47,0x5F48,/* 0x90-0x97 */
+	0x5F49,0x5F4A,0x5F4B,0x5F4C,0x5F4D,0x5F4E,0x5F4F,0x5F51,/* 0x98-0x9F */
+	0x5F54,0x5F59,0x5F5A,0x5F5B,0x5F5C,0x5F5E,0x5F5F,0x5F60,/* 0xA0-0xA7 */
+	0x5F63,0x5F65,0x5F67,0x5F68,0x5F6B,0x5F6E,0x5F6F,0x5F72,/* 0xA8-0xAF */
+	0x5F74,0x5F75,0x5F76,0x5F78,0x5F7A,0x5F7D,0x5F7E,0x5F7F,/* 0xB0-0xB7 */
+	0x5F83,0x5F86,0x5F8D,0x5F8E,0x5F8F,0x5F91,0x5F93,0x5F94,/* 0xB8-0xBF */
+	0x5F96,0x5F9A,0x5F9B,0x5F9D,0x5F9E,0x5F9F,0x5FA0,0x5FA2,/* 0xC0-0xC7 */
+	0x5FA3,0x5FA4,0x5FA5,0x5FA6,0x5FA7,0x5FA9,0x5FAB,0x5FAC,/* 0xC8-0xCF */
+	0x5FAF,0x5FB0,0x5FB1,0x5FB2,0x5FB3,0x5FB4,0x5FB6,0x5FB8,/* 0xD0-0xD7 */
+	0x5FB9,0x5FBA,0x5FBB,0x5FBE,0x5FBF,0x5FC0,0x5FC1,0x5FC2,/* 0xD8-0xDF */
+	0x5FC7,0x5FC8,0x5FCA,0x5FCB,0x5FCE,0x5FD3,0x5FD4,0x5FD5,/* 0xE0-0xE7 */
+	0x5FDA,0x5FDB,0x5FDC,0x5FDE,0x5FDF,0x5FE2,0x5FE3,0x5FE5,/* 0xE8-0xEF */
+	0x5FE6,0x5FE8,0x5FE9,0x5FEC,0x5FEF,0x5FF0,0x5FF2,0x5FF3,/* 0xF0-0xF7 */
+	0x5FF4,0x5FF6,0x5FF7,0x5FF9,0x5FFA,0x5FFC,0x6007,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_90[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6008,0x6009,0x600B,0x600C,0x6010,0x6011,0x6013,0x6017,/* 0x40-0x47 */
+	0x6018,0x601A,0x601E,0x601F,0x6022,0x6023,0x6024,0x602C,/* 0x48-0x4F */
+	0x602D,0x602E,0x6030,0x6031,0x6032,0x6033,0x6034,0x6036,/* 0x50-0x57 */
+	0x6037,0x6038,0x6039,0x603A,0x603D,0x603E,0x6040,0x6044,/* 0x58-0x5F */
+	0x6045,0x6046,0x6047,0x6048,0x6049,0x604A,0x604C,0x604E,/* 0x60-0x67 */
+	0x604F,0x6051,0x6053,0x6054,0x6056,0x6057,0x6058,0x605B,/* 0x68-0x6F */
+	0x605C,0x605E,0x605F,0x6060,0x6061,0x6065,0x6066,0x606E,/* 0x70-0x77 */
+	0x6071,0x6072,0x6074,0x6075,0x6077,0x607E,0x6080,0x0000,/* 0x78-0x7F */
+
+	0x6081,0x6082,0x6085,0x6086,0x6087,0x6088,0x608A,0x608B,/* 0x80-0x87 */
+	0x608E,0x608F,0x6090,0x6091,0x6093,0x6095,0x6097,0x6098,/* 0x88-0x8F */
+	0x6099,0x609C,0x609E,0x60A1,0x60A2,0x60A4,0x60A5,0x60A7,/* 0x90-0x97 */
+	0x60A9,0x60AA,0x60AE,0x60B0,0x60B3,0x60B5,0x60B6,0x60B7,/* 0x98-0x9F */
+	0x60B9,0x60BA,0x60BD,0x60BE,0x60BF,0x60C0,0x60C1,0x60C2,/* 0xA0-0xA7 */
+	0x60C3,0x60C4,0x60C7,0x60C8,0x60C9,0x60CC,0x60CD,0x60CE,/* 0xA8-0xAF */
+	0x60CF,0x60D0,0x60D2,0x60D3,0x60D4,0x60D6,0x60D7,0x60D9,/* 0xB0-0xB7 */
+	0x60DB,0x60DE,0x60E1,0x60E2,0x60E3,0x60E4,0x60E5,0x60EA,/* 0xB8-0xBF */
+	0x60F1,0x60F2,0x60F5,0x60F7,0x60F8,0x60FB,0x60FC,0x60FD,/* 0xC0-0xC7 */
+	0x60FE,0x60FF,0x6102,0x6103,0x6104,0x6105,0x6107,0x610A,/* 0xC8-0xCF */
+	0x610B,0x610C,0x6110,0x6111,0x6112,0x6113,0x6114,0x6116,/* 0xD0-0xD7 */
+	0x6117,0x6118,0x6119,0x611B,0x611C,0x611D,0x611E,0x6121,/* 0xD8-0xDF */
+	0x6122,0x6125,0x6128,0x6129,0x612A,0x612C,0x612D,0x612E,/* 0xE0-0xE7 */
+	0x612F,0x6130,0x6131,0x6132,0x6133,0x6134,0x6135,0x6136,/* 0xE8-0xEF */
+	0x6137,0x6138,0x6139,0x613A,0x613B,0x613C,0x613D,0x613E,/* 0xF0-0xF7 */
+	0x6140,0x6141,0x6142,0x6143,0x6144,0x6145,0x6146,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_91[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6147,0x6149,0x614B,0x614D,0x614F,0x6150,0x6152,0x6153,/* 0x40-0x47 */
+	0x6154,0x6156,0x6157,0x6158,0x6159,0x615A,0x615B,0x615C,/* 0x48-0x4F */
+	0x615E,0x615F,0x6160,0x6161,0x6163,0x6164,0x6165,0x6166,/* 0x50-0x57 */
+	0x6169,0x616A,0x616B,0x616C,0x616D,0x616E,0x616F,0x6171,/* 0x58-0x5F */
+	0x6172,0x6173,0x6174,0x6176,0x6178,0x6179,0x617A,0x617B,/* 0x60-0x67 */
+	0x617C,0x617D,0x617E,0x617F,0x6180,0x6181,0x6182,0x6183,/* 0x68-0x6F */
+	0x6184,0x6185,0x6186,0x6187,0x6188,0x6189,0x618A,0x618C,/* 0x70-0x77 */
+	0x618D,0x618F,0x6190,0x6191,0x6192,0x6193,0x6195,0x0000,/* 0x78-0x7F */
+
+	0x6196,0x6197,0x6198,0x6199,0x619A,0x619B,0x619C,0x619E,/* 0x80-0x87 */
+	0x619F,0x61A0,0x61A1,0x61A2,0x61A3,0x61A4,0x61A5,0x61A6,/* 0x88-0x8F */
+	0x61AA,0x61AB,0x61AD,0x61AE,0x61AF,0x61B0,0x61B1,0x61B2,/* 0x90-0x97 */
+	0x61B3,0x61B4,0x61B5,0x61B6,0x61B8,0x61B9,0x61BA,0x61BB,/* 0x98-0x9F */
+	0x61BC,0x61BD,0x61BF,0x61C0,0x61C1,0x61C3,0x61C4,0x61C5,/* 0xA0-0xA7 */
+	0x61C6,0x61C7,0x61C9,0x61CC,0x61CD,0x61CE,0x61CF,0x61D0,/* 0xA8-0xAF */
+	0x61D3,0x61D5,0x61D6,0x61D7,0x61D8,0x61D9,0x61DA,0x61DB,/* 0xB0-0xB7 */
+	0x61DC,0x61DD,0x61DE,0x61DF,0x61E0,0x61E1,0x61E2,0x61E3,/* 0xB8-0xBF */
+	0x61E4,0x61E5,0x61E7,0x61E8,0x61E9,0x61EA,0x61EB,0x61EC,/* 0xC0-0xC7 */
+	0x61ED,0x61EE,0x61EF,0x61F0,0x61F1,0x61F2,0x61F3,0x61F4,/* 0xC8-0xCF */
+	0x61F6,0x61F7,0x61F8,0x61F9,0x61FA,0x61FB,0x61FC,0x61FD,/* 0xD0-0xD7 */
+	0x61FE,0x6200,0x6201,0x6202,0x6203,0x6204,0x6205,0x6207,/* 0xD8-0xDF */
+	0x6209,0x6213,0x6214,0x6219,0x621C,0x621D,0x621E,0x6220,/* 0xE0-0xE7 */
+	0x6223,0x6226,0x6227,0x6228,0x6229,0x622B,0x622D,0x622F,/* 0xE8-0xEF */
+	0x6230,0x6231,0x6232,0x6235,0x6236,0x6238,0x6239,0x623A,/* 0xF0-0xF7 */
+	0x623B,0x623C,0x6242,0x6244,0x6245,0x6246,0x624A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_92[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x624F,0x6250,0x6255,0x6256,0x6257,0x6259,0x625A,0x625C,/* 0x40-0x47 */
+	0x625D,0x625E,0x625F,0x6260,0x6261,0x6262,0x6264,0x6265,/* 0x48-0x4F */
+	0x6268,0x6271,0x6272,0x6274,0x6275,0x6277,0x6278,0x627A,/* 0x50-0x57 */
+	0x627B,0x627D,0x6281,0x6282,0x6283,0x6285,0x6286,0x6287,/* 0x58-0x5F */
+	0x6288,0x628B,0x628C,0x628D,0x628E,0x628F,0x6290,0x6294,/* 0x60-0x67 */
+	0x6299,0x629C,0x629D,0x629E,0x62A3,0x62A6,0x62A7,0x62A9,/* 0x68-0x6F */
+	0x62AA,0x62AD,0x62AE,0x62AF,0x62B0,0x62B2,0x62B3,0x62B4,/* 0x70-0x77 */
+	0x62B6,0x62B7,0x62B8,0x62BA,0x62BE,0x62C0,0x62C1,0x0000,/* 0x78-0x7F */
+
+	0x62C3,0x62CB,0x62CF,0x62D1,0x62D5,0x62DD,0x62DE,0x62E0,/* 0x80-0x87 */
+	0x62E1,0x62E4,0x62EA,0x62EB,0x62F0,0x62F2,0x62F5,0x62F8,/* 0x88-0x8F */
+	0x62F9,0x62FA,0x62FB,0x6300,0x6303,0x6304,0x6305,0x6306,/* 0x90-0x97 */
+	0x630A,0x630B,0x630C,0x630D,0x630F,0x6310,0x6312,0x6313,/* 0x98-0x9F */
+	0x6314,0x6315,0x6317,0x6318,0x6319,0x631C,0x6326,0x6327,/* 0xA0-0xA7 */
+	0x6329,0x632C,0x632D,0x632E,0x6330,0x6331,0x6333,0x6334,/* 0xA8-0xAF */
+	0x6335,0x6336,0x6337,0x6338,0x633B,0x633C,0x633E,0x633F,/* 0xB0-0xB7 */
+	0x6340,0x6341,0x6344,0x6347,0x6348,0x634A,0x6351,0x6352,/* 0xB8-0xBF */
+	0x6353,0x6354,0x6356,0x6357,0x6358,0x6359,0x635A,0x635B,/* 0xC0-0xC7 */
+	0x635C,0x635D,0x6360,0x6364,0x6365,0x6366,0x6368,0x636A,/* 0xC8-0xCF */
+	0x636B,0x636C,0x636F,0x6370,0x6372,0x6373,0x6374,0x6375,/* 0xD0-0xD7 */
+	0x6378,0x6379,0x637C,0x637D,0x637E,0x637F,0x6381,0x6383,/* 0xD8-0xDF */
+	0x6384,0x6385,0x6386,0x638B,0x638D,0x6391,0x6393,0x6394,/* 0xE0-0xE7 */
+	0x6395,0x6397,0x6399,0x639A,0x639B,0x639C,0x639D,0x639E,/* 0xE8-0xEF */
+	0x639F,0x63A1,0x63A4,0x63A6,0x63AB,0x63AF,0x63B1,0x63B2,/* 0xF0-0xF7 */
+	0x63B5,0x63B6,0x63B9,0x63BB,0x63BD,0x63BF,0x63C0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_93[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x63C1,0x63C2,0x63C3,0x63C5,0x63C7,0x63C8,0x63CA,0x63CB,/* 0x40-0x47 */
+	0x63CC,0x63D1,0x63D3,0x63D4,0x63D5,0x63D7,0x63D8,0x63D9,/* 0x48-0x4F */
+	0x63DA,0x63DB,0x63DC,0x63DD,0x63DF,0x63E2,0x63E4,0x63E5,/* 0x50-0x57 */
+	0x63E6,0x63E7,0x63E8,0x63EB,0x63EC,0x63EE,0x63EF,0x63F0,/* 0x58-0x5F */
+	0x63F1,0x63F3,0x63F5,0x63F7,0x63F9,0x63FA,0x63FB,0x63FC,/* 0x60-0x67 */
+	0x63FE,0x6403,0x6404,0x6406,0x6407,0x6408,0x6409,0x640A,/* 0x68-0x6F */
+	0x640D,0x640E,0x6411,0x6412,0x6415,0x6416,0x6417,0x6418,/* 0x70-0x77 */
+	0x6419,0x641A,0x641D,0x641F,0x6422,0x6423,0x6424,0x0000,/* 0x78-0x7F */
+
+	0x6425,0x6427,0x6428,0x6429,0x642B,0x642E,0x642F,0x6430,/* 0x80-0x87 */
+	0x6431,0x6432,0x6433,0x6435,0x6436,0x6437,0x6438,0x6439,/* 0x88-0x8F */
+	0x643B,0x643C,0x643E,0x6440,0x6442,0x6443,0x6449,0x644B,/* 0x90-0x97 */
+	0x644C,0x644D,0x644E,0x644F,0x6450,0x6451,0x6453,0x6455,/* 0x98-0x9F */
+	0x6456,0x6457,0x6459,0x645A,0x645B,0x645C,0x645D,0x645F,/* 0xA0-0xA7 */
+	0x6460,0x6461,0x6462,0x6463,0x6464,0x6465,0x6466,0x6468,/* 0xA8-0xAF */
+	0x646A,0x646B,0x646C,0x646E,0x646F,0x6470,0x6471,0x6472,/* 0xB0-0xB7 */
+	0x6473,0x6474,0x6475,0x6476,0x6477,0x647B,0x647C,0x647D,/* 0xB8-0xBF */
+	0x647E,0x647F,0x6480,0x6481,0x6483,0x6486,0x6488,0x6489,/* 0xC0-0xC7 */
+	0x648A,0x648B,0x648C,0x648D,0x648E,0x648F,0x6490,0x6493,/* 0xC8-0xCF */
+	0x6494,0x6497,0x6498,0x649A,0x649B,0x649C,0x649D,0x649F,/* 0xD0-0xD7 */
+	0x64A0,0x64A1,0x64A2,0x64A3,0x64A5,0x64A6,0x64A7,0x64A8,/* 0xD8-0xDF */
+	0x64AA,0x64AB,0x64AF,0x64B1,0x64B2,0x64B3,0x64B4,0x64B6,/* 0xE0-0xE7 */
+	0x64B9,0x64BB,0x64BD,0x64BE,0x64BF,0x64C1,0x64C3,0x64C4,/* 0xE8-0xEF */
+	0x64C6,0x64C7,0x64C8,0x64C9,0x64CA,0x64CB,0x64CC,0x64CF,/* 0xF0-0xF7 */
+	0x64D1,0x64D3,0x64D4,0x64D5,0x64D6,0x64D9,0x64DA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_94[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x64DB,0x64DC,0x64DD,0x64DF,0x64E0,0x64E1,0x64E3,0x64E5,/* 0x40-0x47 */
+	0x64E7,0x64E8,0x64E9,0x64EA,0x64EB,0x64EC,0x64ED,0x64EE,/* 0x48-0x4F */
+	0x64EF,0x64F0,0x64F1,0x64F2,0x64F3,0x64F4,0x64F5,0x64F6,/* 0x50-0x57 */
+	0x64F7,0x64F8,0x64F9,0x64FA,0x64FB,0x64FC,0x64FD,0x64FE,/* 0x58-0x5F */
+	0x64FF,0x6501,0x6502,0x6503,0x6504,0x6505,0x6506,0x6507,/* 0x60-0x67 */
+	0x6508,0x650A,0x650B,0x650C,0x650D,0x650E,0x650F,0x6510,/* 0x68-0x6F */
+	0x6511,0x6513,0x6514,0x6515,0x6516,0x6517,0x6519,0x651A,/* 0x70-0x77 */
+	0x651B,0x651C,0x651D,0x651E,0x651F,0x6520,0x6521,0x0000,/* 0x78-0x7F */
+
+	0x6522,0x6523,0x6524,0x6526,0x6527,0x6528,0x6529,0x652A,/* 0x80-0x87 */
+	0x652C,0x652D,0x6530,0x6531,0x6532,0x6533,0x6537,0x653A,/* 0x88-0x8F */
+	0x653C,0x653D,0x6540,0x6541,0x6542,0x6543,0x6544,0x6546,/* 0x90-0x97 */
+	0x6547,0x654A,0x654B,0x654D,0x654E,0x6550,0x6552,0x6553,/* 0x98-0x9F */
+	0x6554,0x6557,0x6558,0x655A,0x655C,0x655F,0x6560,0x6561,/* 0xA0-0xA7 */
+	0x6564,0x6565,0x6567,0x6568,0x6569,0x656A,0x656D,0x656E,/* 0xA8-0xAF */
+	0x656F,0x6571,0x6573,0x6575,0x6576,0x6578,0x6579,0x657A,/* 0xB0-0xB7 */
+	0x657B,0x657C,0x657D,0x657E,0x657F,0x6580,0x6581,0x6582,/* 0xB8-0xBF */
+	0x6583,0x6584,0x6585,0x6586,0x6588,0x6589,0x658A,0x658D,/* 0xC0-0xC7 */
+	0x658E,0x658F,0x6592,0x6594,0x6595,0x6596,0x6598,0x659A,/* 0xC8-0xCF */
+	0x659D,0x659E,0x65A0,0x65A2,0x65A3,0x65A6,0x65A8,0x65AA,/* 0xD0-0xD7 */
+	0x65AC,0x65AE,0x65B1,0x65B2,0x65B3,0x65B4,0x65B5,0x65B6,/* 0xD8-0xDF */
+	0x65B7,0x65B8,0x65BA,0x65BB,0x65BE,0x65BF,0x65C0,0x65C2,/* 0xE0-0xE7 */
+	0x65C7,0x65C8,0x65C9,0x65CA,0x65CD,0x65D0,0x65D1,0x65D3,/* 0xE8-0xEF */
+	0x65D4,0x65D5,0x65D8,0x65D9,0x65DA,0x65DB,0x65DC,0x65DD,/* 0xF0-0xF7 */
+	0x65DE,0x65DF,0x65E1,0x65E3,0x65E4,0x65EA,0x65EB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_95[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x65F2,0x65F3,0x65F4,0x65F5,0x65F8,0x65F9,0x65FB,0x65FC,/* 0x40-0x47 */
+	0x65FD,0x65FE,0x65FF,0x6601,0x6604,0x6605,0x6607,0x6608,/* 0x48-0x4F */
+	0x6609,0x660B,0x660D,0x6610,0x6611,0x6612,0x6616,0x6617,/* 0x50-0x57 */
+	0x6618,0x661A,0x661B,0x661C,0x661E,0x6621,0x6622,0x6623,/* 0x58-0x5F */
+	0x6624,0x6626,0x6629,0x662A,0x662B,0x662C,0x662E,0x6630,/* 0x60-0x67 */
+	0x6632,0x6633,0x6637,0x6638,0x6639,0x663A,0x663B,0x663D,/* 0x68-0x6F */
+	0x663F,0x6640,0x6642,0x6644,0x6645,0x6646,0x6647,0x6648,/* 0x70-0x77 */
+	0x6649,0x664A,0x664D,0x664E,0x6650,0x6651,0x6658,0x0000,/* 0x78-0x7F */
+
+	0x6659,0x665B,0x665C,0x665D,0x665E,0x6660,0x6662,0x6663,/* 0x80-0x87 */
+	0x6665,0x6667,0x6669,0x666A,0x666B,0x666C,0x666D,0x6671,/* 0x88-0x8F */
+	0x6672,0x6673,0x6675,0x6678,0x6679,0x667B,0x667C,0x667D,/* 0x90-0x97 */
+	0x667F,0x6680,0x6681,0x6683,0x6685,0x6686,0x6688,0x6689,/* 0x98-0x9F */
+	0x668A,0x668B,0x668D,0x668E,0x668F,0x6690,0x6692,0x6693,/* 0xA0-0xA7 */
+	0x6694,0x6695,0x6698,0x6699,0x669A,0x669B,0x669C,0x669E,/* 0xA8-0xAF */
+	0x669F,0x66A0,0x66A1,0x66A2,0x66A3,0x66A4,0x66A5,0x66A6,/* 0xB0-0xB7 */
+	0x66A9,0x66AA,0x66AB,0x66AC,0x66AD,0x66AF,0x66B0,0x66B1,/* 0xB8-0xBF */
+	0x66B2,0x66B3,0x66B5,0x66B6,0x66B7,0x66B8,0x66BA,0x66BB,/* 0xC0-0xC7 */
+	0x66BC,0x66BD,0x66BF,0x66C0,0x66C1,0x66C2,0x66C3,0x66C4,/* 0xC8-0xCF */
+	0x66C5,0x66C6,0x66C7,0x66C8,0x66C9,0x66CA,0x66CB,0x66CC,/* 0xD0-0xD7 */
+	0x66CD,0x66CE,0x66CF,0x66D0,0x66D1,0x66D2,0x66D3,0x66D4,/* 0xD8-0xDF */
+	0x66D5,0x66D6,0x66D7,0x66D8,0x66DA,0x66DE,0x66DF,0x66E0,/* 0xE0-0xE7 */
+	0x66E1,0x66E2,0x66E3,0x66E4,0x66E5,0x66E7,0x66E8,0x66EA,/* 0xE8-0xEF */
+	0x66EB,0x66EC,0x66ED,0x66EE,0x66EF,0x66F1,0x66F5,0x66F6,/* 0xF0-0xF7 */
+	0x66F8,0x66FA,0x66FB,0x66FD,0x6701,0x6702,0x6703,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_96[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6704,0x6705,0x6706,0x6707,0x670C,0x670E,0x670F,0x6711,/* 0x40-0x47 */
+	0x6712,0x6713,0x6716,0x6718,0x6719,0x671A,0x671C,0x671E,/* 0x48-0x4F */
+	0x6720,0x6721,0x6722,0x6723,0x6724,0x6725,0x6727,0x6729,/* 0x50-0x57 */
+	0x672E,0x6730,0x6732,0x6733,0x6736,0x6737,0x6738,0x6739,/* 0x58-0x5F */
+	0x673B,0x673C,0x673E,0x673F,0x6741,0x6744,0x6745,0x6747,/* 0x60-0x67 */
+	0x674A,0x674B,0x674D,0x6752,0x6754,0x6755,0x6757,0x6758,/* 0x68-0x6F */
+	0x6759,0x675A,0x675B,0x675D,0x6762,0x6763,0x6764,0x6766,/* 0x70-0x77 */
+	0x6767,0x676B,0x676C,0x676E,0x6771,0x6774,0x6776,0x0000,/* 0x78-0x7F */
+
+	0x6778,0x6779,0x677A,0x677B,0x677D,0x6780,0x6782,0x6783,/* 0x80-0x87 */
+	0x6785,0x6786,0x6788,0x678A,0x678C,0x678D,0x678E,0x678F,/* 0x88-0x8F */
+	0x6791,0x6792,0x6793,0x6794,0x6796,0x6799,0x679B,0x679F,/* 0x90-0x97 */
+	0x67A0,0x67A1,0x67A4,0x67A6,0x67A9,0x67AC,0x67AE,0x67B1,/* 0x98-0x9F */
+	0x67B2,0x67B4,0x67B9,0x67BA,0x67BB,0x67BC,0x67BD,0x67BE,/* 0xA0-0xA7 */
+	0x67BF,0x67C0,0x67C2,0x67C5,0x67C6,0x67C7,0x67C8,0x67C9,/* 0xA8-0xAF */
+	0x67CA,0x67CB,0x67CC,0x67CD,0x67CE,0x67D5,0x67D6,0x67D7,/* 0xB0-0xB7 */
+	0x67DB,0x67DF,0x67E1,0x67E3,0x67E4,0x67E6,0x67E7,0x67E8,/* 0xB8-0xBF */
+	0x67EA,0x67EB,0x67ED,0x67EE,0x67F2,0x67F5,0x67F6,0x67F7,/* 0xC0-0xC7 */
+	0x67F8,0x67F9,0x67FA,0x67FB,0x67FC,0x67FE,0x6801,0x6802,/* 0xC8-0xCF */
+	0x6803,0x6804,0x6806,0x680D,0x6810,0x6812,0x6814,0x6815,/* 0xD0-0xD7 */
+	0x6818,0x6819,0x681A,0x681B,0x681C,0x681E,0x681F,0x6820,/* 0xD8-0xDF */
+	0x6822,0x6823,0x6824,0x6825,0x6826,0x6827,0x6828,0x682B,/* 0xE0-0xE7 */
+	0x682C,0x682D,0x682E,0x682F,0x6830,0x6831,0x6834,0x6835,/* 0xE8-0xEF */
+	0x6836,0x683A,0x683B,0x683F,0x6847,0x684B,0x684D,0x684F,/* 0xF0-0xF7 */
+	0x6852,0x6856,0x6857,0x6858,0x6859,0x685A,0x685B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_97[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x685C,0x685D,0x685E,0x685F,0x686A,0x686C,0x686D,0x686E,/* 0x40-0x47 */
+	0x686F,0x6870,0x6871,0x6872,0x6873,0x6875,0x6878,0x6879,/* 0x48-0x4F */
+	0x687A,0x687B,0x687C,0x687D,0x687E,0x687F,0x6880,0x6882,/* 0x50-0x57 */
+	0x6884,0x6887,0x6888,0x6889,0x688A,0x688B,0x688C,0x688D,/* 0x58-0x5F */
+	0x688E,0x6890,0x6891,0x6892,0x6894,0x6895,0x6896,0x6898,/* 0x60-0x67 */
+	0x6899,0x689A,0x689B,0x689C,0x689D,0x689E,0x689F,0x68A0,/* 0x68-0x6F */
+	0x68A1,0x68A3,0x68A4,0x68A5,0x68A9,0x68AA,0x68AB,0x68AC,/* 0x70-0x77 */
+	0x68AE,0x68B1,0x68B2,0x68B4,0x68B6,0x68B7,0x68B8,0x0000,/* 0x78-0x7F */
+
+	0x68B9,0x68BA,0x68BB,0x68BC,0x68BD,0x68BE,0x68BF,0x68C1,/* 0x80-0x87 */
+	0x68C3,0x68C4,0x68C5,0x68C6,0x68C7,0x68C8,0x68CA,0x68CC,/* 0x88-0x8F */
+	0x68CE,0x68CF,0x68D0,0x68D1,0x68D3,0x68D4,0x68D6,0x68D7,/* 0x90-0x97 */
+	0x68D9,0x68DB,0x68DC,0x68DD,0x68DE,0x68DF,0x68E1,0x68E2,/* 0x98-0x9F */
+	0x68E4,0x68E5,0x68E6,0x68E7,0x68E8,0x68E9,0x68EA,0x68EB,/* 0xA0-0xA7 */
+	0x68EC,0x68ED,0x68EF,0x68F2,0x68F3,0x68F4,0x68F6,0x68F7,/* 0xA8-0xAF */
+	0x68F8,0x68FB,0x68FD,0x68FE,0x68FF,0x6900,0x6902,0x6903,/* 0xB0-0xB7 */
+	0x6904,0x6906,0x6907,0x6908,0x6909,0x690A,0x690C,0x690F,/* 0xB8-0xBF */
+	0x6911,0x6913,0x6914,0x6915,0x6916,0x6917,0x6918,0x6919,/* 0xC0-0xC7 */
+	0x691A,0x691B,0x691C,0x691D,0x691E,0x6921,0x6922,0x6923,/* 0xC8-0xCF */
+	0x6925,0x6926,0x6927,0x6928,0x6929,0x692A,0x692B,0x692C,/* 0xD0-0xD7 */
+	0x692E,0x692F,0x6931,0x6932,0x6933,0x6935,0x6936,0x6937,/* 0xD8-0xDF */
+	0x6938,0x693A,0x693B,0x693C,0x693E,0x6940,0x6941,0x6943,/* 0xE0-0xE7 */
+	0x6944,0x6945,0x6946,0x6947,0x6948,0x6949,0x694A,0x694B,/* 0xE8-0xEF */
+	0x694C,0x694D,0x694E,0x694F,0x6950,0x6951,0x6952,0x6953,/* 0xF0-0xF7 */
+	0x6955,0x6956,0x6958,0x6959,0x695B,0x695C,0x695F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_98[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6961,0x6962,0x6964,0x6965,0x6967,0x6968,0x6969,0x696A,/* 0x40-0x47 */
+	0x696C,0x696D,0x696F,0x6970,0x6972,0x6973,0x6974,0x6975,/* 0x48-0x4F */
+	0x6976,0x697A,0x697B,0x697D,0x697E,0x697F,0x6981,0x6983,/* 0x50-0x57 */
+	0x6985,0x698A,0x698B,0x698C,0x698E,0x698F,0x6990,0x6991,/* 0x58-0x5F */
+	0x6992,0x6993,0x6996,0x6997,0x6999,0x699A,0x699D,0x699E,/* 0x60-0x67 */
+	0x699F,0x69A0,0x69A1,0x69A2,0x69A3,0x69A4,0x69A5,0x69A6,/* 0x68-0x6F */
+	0x69A9,0x69AA,0x69AC,0x69AE,0x69AF,0x69B0,0x69B2,0x69B3,/* 0x70-0x77 */
+	0x69B5,0x69B6,0x69B8,0x69B9,0x69BA,0x69BC,0x69BD,0x0000,/* 0x78-0x7F */
+
+	0x69BE,0x69BF,0x69C0,0x69C2,0x69C3,0x69C4,0x69C5,0x69C6,/* 0x80-0x87 */
+	0x69C7,0x69C8,0x69C9,0x69CB,0x69CD,0x69CF,0x69D1,0x69D2,/* 0x88-0x8F */
+	0x69D3,0x69D5,0x69D6,0x69D7,0x69D8,0x69D9,0x69DA,0x69DC,/* 0x90-0x97 */
+	0x69DD,0x69DE,0x69E1,0x69E2,0x69E3,0x69E4,0x69E5,0x69E6,/* 0x98-0x9F */
+	0x69E7,0x69E8,0x69E9,0x69EA,0x69EB,0x69EC,0x69EE,0x69EF,/* 0xA0-0xA7 */
+	0x69F0,0x69F1,0x69F3,0x69F4,0x69F5,0x69F6,0x69F7,0x69F8,/* 0xA8-0xAF */
+	0x69F9,0x69FA,0x69FB,0x69FC,0x69FE,0x6A00,0x6A01,0x6A02,/* 0xB0-0xB7 */
+	0x6A03,0x6A04,0x6A05,0x6A06,0x6A07,0x6A08,0x6A09,0x6A0B,/* 0xB8-0xBF */
+	0x6A0C,0x6A0D,0x6A0E,0x6A0F,0x6A10,0x6A11,0x6A12,0x6A13,/* 0xC0-0xC7 */
+	0x6A14,0x6A15,0x6A16,0x6A19,0x6A1A,0x6A1B,0x6A1C,0x6A1D,/* 0xC8-0xCF */
+	0x6A1E,0x6A20,0x6A22,0x6A23,0x6A24,0x6A25,0x6A26,0x6A27,/* 0xD0-0xD7 */
+	0x6A29,0x6A2B,0x6A2C,0x6A2D,0x6A2E,0x6A30,0x6A32,0x6A33,/* 0xD8-0xDF */
+	0x6A34,0x6A36,0x6A37,0x6A38,0x6A39,0x6A3A,0x6A3B,0x6A3C,/* 0xE0-0xE7 */
+	0x6A3F,0x6A40,0x6A41,0x6A42,0x6A43,0x6A45,0x6A46,0x6A48,/* 0xE8-0xEF */
+	0x6A49,0x6A4A,0x6A4B,0x6A4C,0x6A4D,0x6A4E,0x6A4F,0x6A51,/* 0xF0-0xF7 */
+	0x6A52,0x6A53,0x6A54,0x6A55,0x6A56,0x6A57,0x6A5A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_99[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6A5C,0x6A5D,0x6A5E,0x6A5F,0x6A60,0x6A62,0x6A63,0x6A64,/* 0x40-0x47 */
+	0x6A66,0x6A67,0x6A68,0x6A69,0x6A6A,0x6A6B,0x6A6C,0x6A6D,/* 0x48-0x4F */
+	0x6A6E,0x6A6F,0x6A70,0x6A72,0x6A73,0x6A74,0x6A75,0x6A76,/* 0x50-0x57 */
+	0x6A77,0x6A78,0x6A7A,0x6A7B,0x6A7D,0x6A7E,0x6A7F,0x6A81,/* 0x58-0x5F */
+	0x6A82,0x6A83,0x6A85,0x6A86,0x6A87,0x6A88,0x6A89,0x6A8A,/* 0x60-0x67 */
+	0x6A8B,0x6A8C,0x6A8D,0x6A8F,0x6A92,0x6A93,0x6A94,0x6A95,/* 0x68-0x6F */
+	0x6A96,0x6A98,0x6A99,0x6A9A,0x6A9B,0x6A9C,0x6A9D,0x6A9E,/* 0x70-0x77 */
+	0x6A9F,0x6AA1,0x6AA2,0x6AA3,0x6AA4,0x6AA5,0x6AA6,0x0000,/* 0x78-0x7F */
+
+	0x6AA7,0x6AA8,0x6AAA,0x6AAD,0x6AAE,0x6AAF,0x6AB0,0x6AB1,/* 0x80-0x87 */
+	0x6AB2,0x6AB3,0x6AB4,0x6AB5,0x6AB6,0x6AB7,0x6AB8,0x6AB9,/* 0x88-0x8F */
+	0x6ABA,0x6ABB,0x6ABC,0x6ABD,0x6ABE,0x6ABF,0x6AC0,0x6AC1,/* 0x90-0x97 */
+	0x6AC2,0x6AC3,0x6AC4,0x6AC5,0x6AC6,0x6AC7,0x6AC8,0x6AC9,/* 0x98-0x9F */
+	0x6ACA,0x6ACB,0x6ACC,0x6ACD,0x6ACE,0x6ACF,0x6AD0,0x6AD1,/* 0xA0-0xA7 */
+	0x6AD2,0x6AD3,0x6AD4,0x6AD5,0x6AD6,0x6AD7,0x6AD8,0x6AD9,/* 0xA8-0xAF */
+	0x6ADA,0x6ADB,0x6ADC,0x6ADD,0x6ADE,0x6ADF,0x6AE0,0x6AE1,/* 0xB0-0xB7 */
+	0x6AE2,0x6AE3,0x6AE4,0x6AE5,0x6AE6,0x6AE7,0x6AE8,0x6AE9,/* 0xB8-0xBF */
+	0x6AEA,0x6AEB,0x6AEC,0x6AED,0x6AEE,0x6AEF,0x6AF0,0x6AF1,/* 0xC0-0xC7 */
+	0x6AF2,0x6AF3,0x6AF4,0x6AF5,0x6AF6,0x6AF7,0x6AF8,0x6AF9,/* 0xC8-0xCF */
+	0x6AFA,0x6AFB,0x6AFC,0x6AFD,0x6AFE,0x6AFF,0x6B00,0x6B01,/* 0xD0-0xD7 */
+	0x6B02,0x6B03,0x6B04,0x6B05,0x6B06,0x6B07,0x6B08,0x6B09,/* 0xD8-0xDF */
+	0x6B0A,0x6B0B,0x6B0C,0x6B0D,0x6B0E,0x6B0F,0x6B10,0x6B11,/* 0xE0-0xE7 */
+	0x6B12,0x6B13,0x6B14,0x6B15,0x6B16,0x6B17,0x6B18,0x6B19,/* 0xE8-0xEF */
+	0x6B1A,0x6B1B,0x6B1C,0x6B1D,0x6B1E,0x6B1F,0x6B25,0x6B26,/* 0xF0-0xF7 */
+	0x6B28,0x6B29,0x6B2A,0x6B2B,0x6B2C,0x6B2D,0x6B2E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6B2F,0x6B30,0x6B31,0x6B33,0x6B34,0x6B35,0x6B36,0x6B38,/* 0x40-0x47 */
+	0x6B3B,0x6B3C,0x6B3D,0x6B3F,0x6B40,0x6B41,0x6B42,0x6B44,/* 0x48-0x4F */
+	0x6B45,0x6B48,0x6B4A,0x6B4B,0x6B4D,0x6B4E,0x6B4F,0x6B50,/* 0x50-0x57 */
+	0x6B51,0x6B52,0x6B53,0x6B54,0x6B55,0x6B56,0x6B57,0x6B58,/* 0x58-0x5F */
+	0x6B5A,0x6B5B,0x6B5C,0x6B5D,0x6B5E,0x6B5F,0x6B60,0x6B61,/* 0x60-0x67 */
+	0x6B68,0x6B69,0x6B6B,0x6B6C,0x6B6D,0x6B6E,0x6B6F,0x6B70,/* 0x68-0x6F */
+	0x6B71,0x6B72,0x6B73,0x6B74,0x6B75,0x6B76,0x6B77,0x6B78,/* 0x70-0x77 */
+	0x6B7A,0x6B7D,0x6B7E,0x6B7F,0x6B80,0x6B85,0x6B88,0x0000,/* 0x78-0x7F */
+
+	0x6B8C,0x6B8E,0x6B8F,0x6B90,0x6B91,0x6B94,0x6B95,0x6B97,/* 0x80-0x87 */
+	0x6B98,0x6B99,0x6B9C,0x6B9D,0x6B9E,0x6B9F,0x6BA0,0x6BA2,/* 0x88-0x8F */
+	0x6BA3,0x6BA4,0x6BA5,0x6BA6,0x6BA7,0x6BA8,0x6BA9,0x6BAB,/* 0x90-0x97 */
+	0x6BAC,0x6BAD,0x6BAE,0x6BAF,0x6BB0,0x6BB1,0x6BB2,0x6BB6,/* 0x98-0x9F */
+	0x6BB8,0x6BB9,0x6BBA,0x6BBB,0x6BBC,0x6BBD,0x6BBE,0x6BC0,/* 0xA0-0xA7 */
+	0x6BC3,0x6BC4,0x6BC6,0x6BC7,0x6BC8,0x6BC9,0x6BCA,0x6BCC,/* 0xA8-0xAF */
+	0x6BCE,0x6BD0,0x6BD1,0x6BD8,0x6BDA,0x6BDC,0x6BDD,0x6BDE,/* 0xB0-0xB7 */
+	0x6BDF,0x6BE0,0x6BE2,0x6BE3,0x6BE4,0x6BE5,0x6BE6,0x6BE7,/* 0xB8-0xBF */
+	0x6BE8,0x6BE9,0x6BEC,0x6BED,0x6BEE,0x6BF0,0x6BF1,0x6BF2,/* 0xC0-0xC7 */
+	0x6BF4,0x6BF6,0x6BF7,0x6BF8,0x6BFA,0x6BFB,0x6BFC,0x6BFE,/* 0xC8-0xCF */
+	0x6BFF,0x6C00,0x6C01,0x6C02,0x6C03,0x6C04,0x6C08,0x6C09,/* 0xD0-0xD7 */
+	0x6C0A,0x6C0B,0x6C0C,0x6C0E,0x6C12,0x6C17,0x6C1C,0x6C1D,/* 0xD8-0xDF */
+	0x6C1E,0x6C20,0x6C23,0x6C25,0x6C2B,0x6C2C,0x6C2D,0x6C31,/* 0xE0-0xE7 */
+	0x6C33,0x6C36,0x6C37,0x6C39,0x6C3A,0x6C3B,0x6C3C,0x6C3E,/* 0xE8-0xEF */
+	0x6C3F,0x6C43,0x6C44,0x6C45,0x6C48,0x6C4B,0x6C4C,0x6C4D,/* 0xF0-0xF7 */
+	0x6C4E,0x6C4F,0x6C51,0x6C52,0x6C53,0x6C56,0x6C58,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6C59,0x6C5A,0x6C62,0x6C63,0x6C65,0x6C66,0x6C67,0x6C6B,/* 0x40-0x47 */
+	0x6C6C,0x6C6D,0x6C6E,0x6C6F,0x6C71,0x6C73,0x6C75,0x6C77,/* 0x48-0x4F */
+	0x6C78,0x6C7A,0x6C7B,0x6C7C,0x6C7F,0x6C80,0x6C84,0x6C87,/* 0x50-0x57 */
+	0x6C8A,0x6C8B,0x6C8D,0x6C8E,0x6C91,0x6C92,0x6C95,0x6C96,/* 0x58-0x5F */
+	0x6C97,0x6C98,0x6C9A,0x6C9C,0x6C9D,0x6C9E,0x6CA0,0x6CA2,/* 0x60-0x67 */
+	0x6CA8,0x6CAC,0x6CAF,0x6CB0,0x6CB4,0x6CB5,0x6CB6,0x6CB7,/* 0x68-0x6F */
+	0x6CBA,0x6CC0,0x6CC1,0x6CC2,0x6CC3,0x6CC6,0x6CC7,0x6CC8,/* 0x70-0x77 */
+	0x6CCB,0x6CCD,0x6CCE,0x6CCF,0x6CD1,0x6CD2,0x6CD8,0x0000,/* 0x78-0x7F */
+
+	0x6CD9,0x6CDA,0x6CDC,0x6CDD,0x6CDF,0x6CE4,0x6CE6,0x6CE7,/* 0x80-0x87 */
+	0x6CE9,0x6CEC,0x6CED,0x6CF2,0x6CF4,0x6CF9,0x6CFF,0x6D00,/* 0x88-0x8F */
+	0x6D02,0x6D03,0x6D05,0x6D06,0x6D08,0x6D09,0x6D0A,0x6D0D,/* 0x90-0x97 */
+	0x6D0F,0x6D10,0x6D11,0x6D13,0x6D14,0x6D15,0x6D16,0x6D18,/* 0x98-0x9F */
+	0x6D1C,0x6D1D,0x6D1F,0x6D20,0x6D21,0x6D22,0x6D23,0x6D24,/* 0xA0-0xA7 */
+	0x6D26,0x6D28,0x6D29,0x6D2C,0x6D2D,0x6D2F,0x6D30,0x6D34,/* 0xA8-0xAF */
+	0x6D36,0x6D37,0x6D38,0x6D3A,0x6D3F,0x6D40,0x6D42,0x6D44,/* 0xB0-0xB7 */
+	0x6D49,0x6D4C,0x6D50,0x6D55,0x6D56,0x6D57,0x6D58,0x6D5B,/* 0xB8-0xBF */
+	0x6D5D,0x6D5F,0x6D61,0x6D62,0x6D64,0x6D65,0x6D67,0x6D68,/* 0xC0-0xC7 */
+	0x6D6B,0x6D6C,0x6D6D,0x6D70,0x6D71,0x6D72,0x6D73,0x6D75,/* 0xC8-0xCF */
+	0x6D76,0x6D79,0x6D7A,0x6D7B,0x6D7D,0x6D7E,0x6D7F,0x6D80,/* 0xD0-0xD7 */
+	0x6D81,0x6D83,0x6D84,0x6D86,0x6D87,0x6D8A,0x6D8B,0x6D8D,/* 0xD8-0xDF */
+	0x6D8F,0x6D90,0x6D92,0x6D96,0x6D97,0x6D98,0x6D99,0x6D9A,/* 0xE0-0xE7 */
+	0x6D9C,0x6DA2,0x6DA5,0x6DAC,0x6DAD,0x6DB0,0x6DB1,0x6DB3,/* 0xE8-0xEF */
+	0x6DB4,0x6DB6,0x6DB7,0x6DB9,0x6DBA,0x6DBB,0x6DBC,0x6DBD,/* 0xF0-0xF7 */
+	0x6DBE,0x6DC1,0x6DC2,0x6DC3,0x6DC8,0x6DC9,0x6DCA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6DCD,0x6DCE,0x6DCF,0x6DD0,0x6DD2,0x6DD3,0x6DD4,0x6DD5,/* 0x40-0x47 */
+	0x6DD7,0x6DDA,0x6DDB,0x6DDC,0x6DDF,0x6DE2,0x6DE3,0x6DE5,/* 0x48-0x4F */
+	0x6DE7,0x6DE8,0x6DE9,0x6DEA,0x6DED,0x6DEF,0x6DF0,0x6DF2,/* 0x50-0x57 */
+	0x6DF4,0x6DF5,0x6DF6,0x6DF8,0x6DFA,0x6DFD,0x6DFE,0x6DFF,/* 0x58-0x5F */
+	0x6E00,0x6E01,0x6E02,0x6E03,0x6E04,0x6E06,0x6E07,0x6E08,/* 0x60-0x67 */
+	0x6E09,0x6E0B,0x6E0F,0x6E12,0x6E13,0x6E15,0x6E18,0x6E19,/* 0x68-0x6F */
+	0x6E1B,0x6E1C,0x6E1E,0x6E1F,0x6E22,0x6E26,0x6E27,0x6E28,/* 0x70-0x77 */
+	0x6E2A,0x6E2C,0x6E2E,0x6E30,0x6E31,0x6E33,0x6E35,0x0000,/* 0x78-0x7F */
+
+	0x6E36,0x6E37,0x6E39,0x6E3B,0x6E3C,0x6E3D,0x6E3E,0x6E3F,/* 0x80-0x87 */
+	0x6E40,0x6E41,0x6E42,0x6E45,0x6E46,0x6E47,0x6E48,0x6E49,/* 0x88-0x8F */
+	0x6E4A,0x6E4B,0x6E4C,0x6E4F,0x6E50,0x6E51,0x6E52,0x6E55,/* 0x90-0x97 */
+	0x6E57,0x6E59,0x6E5A,0x6E5C,0x6E5D,0x6E5E,0x6E60,0x6E61,/* 0x98-0x9F */
+	0x6E62,0x6E63,0x6E64,0x6E65,0x6E66,0x6E67,0x6E68,0x6E69,/* 0xA0-0xA7 */
+	0x6E6A,0x6E6C,0x6E6D,0x6E6F,0x6E70,0x6E71,0x6E72,0x6E73,/* 0xA8-0xAF */
+	0x6E74,0x6E75,0x6E76,0x6E77,0x6E78,0x6E79,0x6E7A,0x6E7B,/* 0xB0-0xB7 */
+	0x6E7C,0x6E7D,0x6E80,0x6E81,0x6E82,0x6E84,0x6E87,0x6E88,/* 0xB8-0xBF */
+	0x6E8A,0x6E8B,0x6E8C,0x6E8D,0x6E8E,0x6E91,0x6E92,0x6E93,/* 0xC0-0xC7 */
+	0x6E94,0x6E95,0x6E96,0x6E97,0x6E99,0x6E9A,0x6E9B,0x6E9D,/* 0xC8-0xCF */
+	0x6E9E,0x6EA0,0x6EA1,0x6EA3,0x6EA4,0x6EA6,0x6EA8,0x6EA9,/* 0xD0-0xD7 */
+	0x6EAB,0x6EAC,0x6EAD,0x6EAE,0x6EB0,0x6EB3,0x6EB5,0x6EB8,/* 0xD8-0xDF */
+	0x6EB9,0x6EBC,0x6EBE,0x6EBF,0x6EC0,0x6EC3,0x6EC4,0x6EC5,/* 0xE0-0xE7 */
+	0x6EC6,0x6EC8,0x6EC9,0x6ECA,0x6ECC,0x6ECD,0x6ECE,0x6ED0,/* 0xE8-0xEF */
+	0x6ED2,0x6ED6,0x6ED8,0x6ED9,0x6EDB,0x6EDC,0x6EDD,0x6EE3,/* 0xF0-0xF7 */
+	0x6EE7,0x6EEA,0x6EEB,0x6EEC,0x6EED,0x6EEE,0x6EEF,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6EF0,0x6EF1,0x6EF2,0x6EF3,0x6EF5,0x6EF6,0x6EF7,0x6EF8,/* 0x40-0x47 */
+	0x6EFA,0x6EFB,0x6EFC,0x6EFD,0x6EFE,0x6EFF,0x6F00,0x6F01,/* 0x48-0x4F */
+	0x6F03,0x6F04,0x6F05,0x6F07,0x6F08,0x6F0A,0x6F0B,0x6F0C,/* 0x50-0x57 */
+	0x6F0D,0x6F0E,0x6F10,0x6F11,0x6F12,0x6F16,0x6F17,0x6F18,/* 0x58-0x5F */
+	0x6F19,0x6F1A,0x6F1B,0x6F1C,0x6F1D,0x6F1E,0x6F1F,0x6F21,/* 0x60-0x67 */
+	0x6F22,0x6F23,0x6F25,0x6F26,0x6F27,0x6F28,0x6F2C,0x6F2E,/* 0x68-0x6F */
+	0x6F30,0x6F32,0x6F34,0x6F35,0x6F37,0x6F38,0x6F39,0x6F3A,/* 0x70-0x77 */
+	0x6F3B,0x6F3C,0x6F3D,0x6F3F,0x6F40,0x6F41,0x6F42,0x0000,/* 0x78-0x7F */
+
+	0x6F43,0x6F44,0x6F45,0x6F48,0x6F49,0x6F4A,0x6F4C,0x6F4E,/* 0x80-0x87 */
+	0x6F4F,0x6F50,0x6F51,0x6F52,0x6F53,0x6F54,0x6F55,0x6F56,/* 0x88-0x8F */
+	0x6F57,0x6F59,0x6F5A,0x6F5B,0x6F5D,0x6F5F,0x6F60,0x6F61,/* 0x90-0x97 */
+	0x6F63,0x6F64,0x6F65,0x6F67,0x6F68,0x6F69,0x6F6A,0x6F6B,/* 0x98-0x9F */
+	0x6F6C,0x6F6F,0x6F70,0x6F71,0x6F73,0x6F75,0x6F76,0x6F77,/* 0xA0-0xA7 */
+	0x6F79,0x6F7B,0x6F7D,0x6F7E,0x6F7F,0x6F80,0x6F81,0x6F82,/* 0xA8-0xAF */
+	0x6F83,0x6F85,0x6F86,0x6F87,0x6F8A,0x6F8B,0x6F8F,0x6F90,/* 0xB0-0xB7 */
+	0x6F91,0x6F92,0x6F93,0x6F94,0x6F95,0x6F96,0x6F97,0x6F98,/* 0xB8-0xBF */
+	0x6F99,0x6F9A,0x6F9B,0x6F9D,0x6F9E,0x6F9F,0x6FA0,0x6FA2,/* 0xC0-0xC7 */
+	0x6FA3,0x6FA4,0x6FA5,0x6FA6,0x6FA8,0x6FA9,0x6FAA,0x6FAB,/* 0xC8-0xCF */
+	0x6FAC,0x6FAD,0x6FAE,0x6FAF,0x6FB0,0x6FB1,0x6FB2,0x6FB4,/* 0xD0-0xD7 */
+	0x6FB5,0x6FB7,0x6FB8,0x6FBA,0x6FBB,0x6FBC,0x6FBD,0x6FBE,/* 0xD8-0xDF */
+	0x6FBF,0x6FC1,0x6FC3,0x6FC4,0x6FC5,0x6FC6,0x6FC7,0x6FC8,/* 0xE0-0xE7 */
+	0x6FCA,0x6FCB,0x6FCC,0x6FCD,0x6FCE,0x6FCF,0x6FD0,0x6FD3,/* 0xE8-0xEF */
+	0x6FD4,0x6FD5,0x6FD6,0x6FD7,0x6FD8,0x6FD9,0x6FDA,0x6FDB,/* 0xF0-0xF7 */
+	0x6FDC,0x6FDD,0x6FDF,0x6FE2,0x6FE3,0x6FE4,0x6FE5,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6FE6,0x6FE7,0x6FE8,0x6FE9,0x6FEA,0x6FEB,0x6FEC,0x6FED,/* 0x40-0x47 */
+	0x6FF0,0x6FF1,0x6FF2,0x6FF3,0x6FF4,0x6FF5,0x6FF6,0x6FF7,/* 0x48-0x4F */
+	0x6FF8,0x6FF9,0x6FFA,0x6FFB,0x6FFC,0x6FFD,0x6FFE,0x6FFF,/* 0x50-0x57 */
+	0x7000,0x7001,0x7002,0x7003,0x7004,0x7005,0x7006,0x7007,/* 0x58-0x5F */
+	0x7008,0x7009,0x700A,0x700B,0x700C,0x700D,0x700E,0x700F,/* 0x60-0x67 */
+	0x7010,0x7012,0x7013,0x7014,0x7015,0x7016,0x7017,0x7018,/* 0x68-0x6F */
+	0x7019,0x701C,0x701D,0x701E,0x701F,0x7020,0x7021,0x7022,/* 0x70-0x77 */
+	0x7024,0x7025,0x7026,0x7027,0x7028,0x7029,0x702A,0x0000,/* 0x78-0x7F */
+
+	0x702B,0x702C,0x702D,0x702E,0x702F,0x7030,0x7031,0x7032,/* 0x80-0x87 */
+	0x7033,0x7034,0x7036,0x7037,0x7038,0x703A,0x703B,0x703C,/* 0x88-0x8F */
+	0x703D,0x703E,0x703F,0x7040,0x7041,0x7042,0x7043,0x7044,/* 0x90-0x97 */
+	0x7045,0x7046,0x7047,0x7048,0x7049,0x704A,0x704B,0x704D,/* 0x98-0x9F */
+	0x704E,0x7050,0x7051,0x7052,0x7053,0x7054,0x7055,0x7056,/* 0xA0-0xA7 */
+	0x7057,0x7058,0x7059,0x705A,0x705B,0x705C,0x705D,0x705F,/* 0xA8-0xAF */
+	0x7060,0x7061,0x7062,0x7063,0x7064,0x7065,0x7066,0x7067,/* 0xB0-0xB7 */
+	0x7068,0x7069,0x706A,0x706E,0x7071,0x7072,0x7073,0x7074,/* 0xB8-0xBF */
+	0x7077,0x7079,0x707A,0x707B,0x707D,0x7081,0x7082,0x7083,/* 0xC0-0xC7 */
+	0x7084,0x7086,0x7087,0x7088,0x708B,0x708C,0x708D,0x708F,/* 0xC8-0xCF */
+	0x7090,0x7091,0x7093,0x7097,0x7098,0x709A,0x709B,0x709E,/* 0xD0-0xD7 */
+	0x709F,0x70A0,0x70A1,0x70A2,0x70A3,0x70A4,0x70A5,0x70A6,/* 0xD8-0xDF */
+	0x70A7,0x70A8,0x70A9,0x70AA,0x70B0,0x70B2,0x70B4,0x70B5,/* 0xE0-0xE7 */
+	0x70B6,0x70BA,0x70BE,0x70BF,0x70C4,0x70C5,0x70C6,0x70C7,/* 0xE8-0xEF */
+	0x70C9,0x70CB,0x70CC,0x70CD,0x70CE,0x70CF,0x70D0,0x70D1,/* 0xF0-0xF7 */
+	0x70D2,0x70D3,0x70D4,0x70D5,0x70D6,0x70D7,0x70DA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x70DC,0x70DD,0x70DE,0x70E0,0x70E1,0x70E2,0x70E3,0x70E5,/* 0x40-0x47 */
+	0x70EA,0x70EE,0x70F0,0x70F1,0x70F2,0x70F3,0x70F4,0x70F5,/* 0x48-0x4F */
+	0x70F6,0x70F8,0x70FA,0x70FB,0x70FC,0x70FE,0x70FF,0x7100,/* 0x50-0x57 */
+	0x7101,0x7102,0x7103,0x7104,0x7105,0x7106,0x7107,0x7108,/* 0x58-0x5F */
+	0x710B,0x710C,0x710D,0x710E,0x710F,0x7111,0x7112,0x7114,/* 0x60-0x67 */
+	0x7117,0x711B,0x711C,0x711D,0x711E,0x711F,0x7120,0x7121,/* 0x68-0x6F */
+	0x7122,0x7123,0x7124,0x7125,0x7127,0x7128,0x7129,0x712A,/* 0x70-0x77 */
+	0x712B,0x712C,0x712D,0x712E,0x7132,0x7133,0x7134,0x0000,/* 0x78-0x7F */
+
+	0x7135,0x7137,0x7138,0x7139,0x713A,0x713B,0x713C,0x713D,/* 0x80-0x87 */
+	0x713E,0x713F,0x7140,0x7141,0x7142,0x7143,0x7144,0x7146,/* 0x88-0x8F */
+	0x7147,0x7148,0x7149,0x714B,0x714D,0x714F,0x7150,0x7151,/* 0x90-0x97 */
+	0x7152,0x7153,0x7154,0x7155,0x7156,0x7157,0x7158,0x7159,/* 0x98-0x9F */
+	0x715A,0x715B,0x715D,0x715F,0x7160,0x7161,0x7162,0x7163,/* 0xA0-0xA7 */
+	0x7165,0x7169,0x716A,0x716B,0x716C,0x716D,0x716F,0x7170,/* 0xA8-0xAF */
+	0x7171,0x7174,0x7175,0x7176,0x7177,0x7179,0x717B,0x717C,/* 0xB0-0xB7 */
+	0x717E,0x717F,0x7180,0x7181,0x7182,0x7183,0x7185,0x7186,/* 0xB8-0xBF */
+	0x7187,0x7188,0x7189,0x718B,0x718C,0x718D,0x718E,0x7190,/* 0xC0-0xC7 */
+	0x7191,0x7192,0x7193,0x7195,0x7196,0x7197,0x719A,0x719B,/* 0xC8-0xCF */
+	0x719C,0x719D,0x719E,0x71A1,0x71A2,0x71A3,0x71A4,0x71A5,/* 0xD0-0xD7 */
+	0x71A6,0x71A7,0x71A9,0x71AA,0x71AB,0x71AD,0x71AE,0x71AF,/* 0xD8-0xDF */
+	0x71B0,0x71B1,0x71B2,0x71B4,0x71B6,0x71B7,0x71B8,0x71BA,/* 0xE0-0xE7 */
+	0x71BB,0x71BC,0x71BD,0x71BE,0x71BF,0x71C0,0x71C1,0x71C2,/* 0xE8-0xEF */
+	0x71C4,0x71C5,0x71C6,0x71C7,0x71C8,0x71C9,0x71CA,0x71CB,/* 0xF0-0xF7 */
+	0x71CC,0x71CD,0x71CF,0x71D0,0x71D1,0x71D2,0x71D3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x71D6,0x71D7,0x71D8,0x71D9,0x71DA,0x71DB,0x71DC,0x71DD,/* 0x40-0x47 */
+	0x71DE,0x71DF,0x71E1,0x71E2,0x71E3,0x71E4,0x71E6,0x71E8,/* 0x48-0x4F */
+	0x71E9,0x71EA,0x71EB,0x71EC,0x71ED,0x71EF,0x71F0,0x71F1,/* 0x50-0x57 */
+	0x71F2,0x71F3,0x71F4,0x71F5,0x71F6,0x71F7,0x71F8,0x71FA,/* 0x58-0x5F */
+	0x71FB,0x71FC,0x71FD,0x71FE,0x71FF,0x7200,0x7201,0x7202,/* 0x60-0x67 */
+	0x7203,0x7204,0x7205,0x7207,0x7208,0x7209,0x720A,0x720B,/* 0x68-0x6F */
+	0x720C,0x720D,0x720E,0x720F,0x7210,0x7211,0x7212,0x7213,/* 0x70-0x77 */
+	0x7214,0x7215,0x7216,0x7217,0x7218,0x7219,0x721A,0x0000,/* 0x78-0x7F */
+
+	0x721B,0x721C,0x721E,0x721F,0x7220,0x7221,0x7222,0x7223,/* 0x80-0x87 */
+	0x7224,0x7225,0x7226,0x7227,0x7229,0x722B,0x722D,0x722E,/* 0x88-0x8F */
+	0x722F,0x7232,0x7233,0x7234,0x723A,0x723C,0x723E,0x7240,/* 0x90-0x97 */
+	0x7241,0x7242,0x7243,0x7244,0x7245,0x7246,0x7249,0x724A,/* 0x98-0x9F */
+	0x724B,0x724E,0x724F,0x7250,0x7251,0x7253,0x7254,0x7255,/* 0xA0-0xA7 */
+	0x7257,0x7258,0x725A,0x725C,0x725E,0x7260,0x7263,0x7264,/* 0xA8-0xAF */
+	0x7265,0x7268,0x726A,0x726B,0x726C,0x726D,0x7270,0x7271,/* 0xB0-0xB7 */
+	0x7273,0x7274,0x7276,0x7277,0x7278,0x727B,0x727C,0x727D,/* 0xB8-0xBF */
+	0x7282,0x7283,0x7285,0x7286,0x7287,0x7288,0x7289,0x728C,/* 0xC0-0xC7 */
+	0x728E,0x7290,0x7291,0x7293,0x7294,0x7295,0x7296,0x7297,/* 0xC8-0xCF */
+	0x7298,0x7299,0x729A,0x729B,0x729C,0x729D,0x729E,0x72A0,/* 0xD0-0xD7 */
+	0x72A1,0x72A2,0x72A3,0x72A4,0x72A5,0x72A6,0x72A7,0x72A8,/* 0xD8-0xDF */
+	0x72A9,0x72AA,0x72AB,0x72AE,0x72B1,0x72B2,0x72B3,0x72B5,/* 0xE0-0xE7 */
+	0x72BA,0x72BB,0x72BC,0x72BD,0x72BE,0x72BF,0x72C0,0x72C5,/* 0xE8-0xEF */
+	0x72C6,0x72C7,0x72C9,0x72CA,0x72CB,0x72CC,0x72CF,0x72D1,/* 0xF0-0xF7 */
+	0x72D3,0x72D4,0x72D5,0x72D6,0x72D8,0x72DA,0x72DB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x3000,0x3001,0x3002,0x00B7,0x02C9,0x02C7,0x00A8,/* 0xA0-0xA7 */
+	0x3003,0x3005,0x2014,0xFF5E,0x2016,0x2026,0x2018,0x2019,/* 0xA8-0xAF */
+	0x201C,0x201D,0x3014,0x3015,0x3008,0x3009,0x300A,0x300B,/* 0xB0-0xB7 */
+	0x300C,0x300D,0x300E,0x300F,0x3016,0x3017,0x3010,0x3011,/* 0xB8-0xBF */
+	0x00B1,0x00D7,0x00F7,0x2236,0x2227,0x2228,0x2211,0x220F,/* 0xC0-0xC7 */
+	0x222A,0x2229,0x2208,0x2237,0x221A,0x22A5,0x2225,0x2220,/* 0xC8-0xCF */
+	0x2312,0x2299,0x222B,0x222E,0x2261,0x224C,0x2248,0x223D,/* 0xD0-0xD7 */
+	0x221D,0x2260,0x226E,0x226F,0x2264,0x2265,0x221E,0x2235,/* 0xD8-0xDF */
+	0x2234,0x2642,0x2640,0x00B0,0x2032,0x2033,0x2103,0xFF04,/* 0xE0-0xE7 */
+	0x00A4,0xFFE0,0xFFE1,0x2030,0x00A7,0x2116,0x2606,0x2605,/* 0xE8-0xEF */
+	0x25CB,0x25CF,0x25CE,0x25C7,0x25C6,0x25A1,0x25A0,0x25B3,/* 0xF0-0xF7 */
+	0x25B2,0x203B,0x2192,0x2190,0x2191,0x2193,0x3013,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x2170,0x2171,0x2172,0x2173,0x2174,0x2175,0x2176,/* 0xA0-0xA7 */
+	0x2177,0x2178,0x2179,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA8-0xAF */
+	0x0000,0x2488,0x2489,0x248A,0x248B,0x248C,0x248D,0x248E,/* 0xB0-0xB7 */
+	0x248F,0x2490,0x2491,0x2492,0x2493,0x2494,0x2495,0x2496,/* 0xB8-0xBF */
+	0x2497,0x2498,0x2499,0x249A,0x249B,0x2474,0x2475,0x2476,/* 0xC0-0xC7 */
+	0x2477,0x2478,0x2479,0x247A,0x247B,0x247C,0x247D,0x247E,/* 0xC8-0xCF */
+	0x247F,0x2480,0x2481,0x2482,0x2483,0x2484,0x2485,0x2486,/* 0xD0-0xD7 */
+	0x2487,0x2460,0x2461,0x2462,0x2463,0x2464,0x2465,0x2466,/* 0xD8-0xDF */
+	0x2467,0x2468,0x2469,0x0000,0x0000,0x3220,0x3221,0x3222,/* 0xE0-0xE7 */
+	0x3223,0x3224,0x3225,0x3226,0x3227,0x3228,0x3229,0x0000,/* 0xE8-0xEF */
+	0x0000,0x2160,0x2161,0x2162,0x2163,0x2164,0x2165,0x2166,/* 0xF0-0xF7 */
+	0x2167,0x2168,0x2169,0x216A,0x216B,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xFF01,0xFF02,0xFF03,0xFFE5,0xFF05,0xFF06,0xFF07,/* 0xA0-0xA7 */
+	0xFF08,0xFF09,0xFF0A,0xFF0B,0xFF0C,0xFF0D,0xFF0E,0xFF0F,/* 0xA8-0xAF */
+	0xFF10,0xFF11,0xFF12,0xFF13,0xFF14,0xFF15,0xFF16,0xFF17,/* 0xB0-0xB7 */
+	0xFF18,0xFF19,0xFF1A,0xFF1B,0xFF1C,0xFF1D,0xFF1E,0xFF1F,/* 0xB8-0xBF */
+	0xFF20,0xFF21,0xFF22,0xFF23,0xFF24,0xFF25,0xFF26,0xFF27,/* 0xC0-0xC7 */
+	0xFF28,0xFF29,0xFF2A,0xFF2B,0xFF2C,0xFF2D,0xFF2E,0xFF2F,/* 0xC8-0xCF */
+	0xFF30,0xFF31,0xFF32,0xFF33,0xFF34,0xFF35,0xFF36,0xFF37,/* 0xD0-0xD7 */
+	0xFF38,0xFF39,0xFF3A,0xFF3B,0xFF3C,0xFF3D,0xFF3E,0xFF3F,/* 0xD8-0xDF */
+	0xFF40,0xFF41,0xFF42,0xFF43,0xFF44,0xFF45,0xFF46,0xFF47,/* 0xE0-0xE7 */
+	0xFF48,0xFF49,0xFF4A,0xFF4B,0xFF4C,0xFF4D,0xFF4E,0xFF4F,/* 0xE8-0xEF */
+	0xFF50,0xFF51,0xFF52,0xFF53,0xFF54,0xFF55,0xFF56,0xFF57,/* 0xF0-0xF7 */
+	0xFF58,0xFF59,0xFF5A,0xFF5B,0xFF5C,0xFF5D,0xFFE3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x3041,0x3042,0x3043,0x3044,0x3045,0x3046,0x3047,/* 0xA0-0xA7 */
+	0x3048,0x3049,0x304A,0x304B,0x304C,0x304D,0x304E,0x304F,/* 0xA8-0xAF */
+	0x3050,0x3051,0x3052,0x3053,0x3054,0x3055,0x3056,0x3057,/* 0xB0-0xB7 */
+	0x3058,0x3059,0x305A,0x305B,0x305C,0x305D,0x305E,0x305F,/* 0xB8-0xBF */
+	0x3060,0x3061,0x3062,0x3063,0x3064,0x3065,0x3066,0x3067,/* 0xC0-0xC7 */
+	0x3068,0x3069,0x306A,0x306B,0x306C,0x306D,0x306E,0x306F,/* 0xC8-0xCF */
+	0x3070,0x3071,0x3072,0x3073,0x3074,0x3075,0x3076,0x3077,/* 0xD0-0xD7 */
+	0x3078,0x3079,0x307A,0x307B,0x307C,0x307D,0x307E,0x307F,/* 0xD8-0xDF */
+	0x3080,0x3081,0x3082,0x3083,0x3084,0x3085,0x3086,0x3087,/* 0xE0-0xE7 */
+	0x3088,0x3089,0x308A,0x308B,0x308C,0x308D,0x308E,0x308F,/* 0xE8-0xEF */
+	0x3090,0x3091,0x3092,0x3093,0x0000,0x0000,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_A5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x30A1,0x30A2,0x30A3,0x30A4,0x30A5,0x30A6,0x30A7,/* 0xA0-0xA7 */
+	0x30A8,0x30A9,0x30AA,0x30AB,0x30AC,0x30AD,0x30AE,0x30AF,/* 0xA8-0xAF */
+	0x30B0,0x30B1,0x30B2,0x30B3,0x30B4,0x30B5,0x30B6,0x30B7,/* 0xB0-0xB7 */
+	0x30B8,0x30B9,0x30BA,0x30BB,0x30BC,0x30BD,0x30BE,0x30BF,/* 0xB8-0xBF */
+	0x30C0,0x30C1,0x30C2,0x30C3,0x30C4,0x30C5,0x30C6,0x30C7,/* 0xC0-0xC7 */
+	0x30C8,0x30C9,0x30CA,0x30CB,0x30CC,0x30CD,0x30CE,0x30CF,/* 0xC8-0xCF */
+	0x30D0,0x30D1,0x30D2,0x30D3,0x30D4,0x30D5,0x30D6,0x30D7,/* 0xD0-0xD7 */
+	0x30D8,0x30D9,0x30DA,0x30DB,0x30DC,0x30DD,0x30DE,0x30DF,/* 0xD8-0xDF */
+	0x30E0,0x30E1,0x30E2,0x30E3,0x30E4,0x30E5,0x30E6,0x30E7,/* 0xE0-0xE7 */
+	0x30E8,0x30E9,0x30EA,0x30EB,0x30EC,0x30ED,0x30EE,0x30EF,/* 0xE8-0xEF */
+	0x30F0,0x30F1,0x30F2,0x30F3,0x30F4,0x30F5,0x30F6,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_A6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x0391,0x0392,0x0393,0x0394,0x0395,0x0396,0x0397,/* 0xA0-0xA7 */
+	0x0398,0x0399,0x039A,0x039B,0x039C,0x039D,0x039E,0x039F,/* 0xA8-0xAF */
+	0x03A0,0x03A1,0x03A3,0x03A4,0x03A5,0x03A6,0x03A7,0x03A8,/* 0xB0-0xB7 */
+	0x03A9,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xB8-0xBF */
+	0x0000,0x03B1,0x03B2,0x03B3,0x03B4,0x03B5,0x03B6,0x03B7,/* 0xC0-0xC7 */
+	0x03B8,0x03B9,0x03BA,0x03BB,0x03BC,0x03BD,0x03BE,0x03BF,/* 0xC8-0xCF */
+	0x03C0,0x03C1,0x03C3,0x03C4,0x03C5,0x03C6,0x03C7,0x03C8,/* 0xD0-0xD7 */
+	0x03C9,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xD8-0xDF */
+	0xFE35,0xFE36,0xFE39,0xFE3A,0xFE3F,0xFE40,0xFE3D,0xFE3E,/* 0xE0-0xE7 */
+	0xFE41,0xFE42,0xFE43,0xFE44,0x0000,0x0000,0xFE3B,0xFE3C,/* 0xE8-0xEF */
+	0xFE37,0xFE38,0xFE31,0x0000,0xFE33,0xFE34,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_A7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x0410,0x0411,0x0412,0x0413,0x0414,0x0415,0x0401,/* 0xA0-0xA7 */
+	0x0416,0x0417,0x0418,0x0419,0x041A,0x041B,0x041C,0x041D,/* 0xA8-0xAF */
+	0x041E,0x041F,0x0420,0x0421,0x0422,0x0423,0x0424,0x0425,/* 0xB0-0xB7 */
+	0x0426,0x0427,0x0428,0x0429,0x042A,0x042B,0x042C,0x042D,/* 0xB8-0xBF */
+	0x042E,0x042F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC0-0xC7 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC8-0xCF */
+	0x0000,0x0430,0x0431,0x0432,0x0433,0x0434,0x0435,0x0451,/* 0xD0-0xD7 */
+	0x0436,0x0437,0x0438,0x0439,0x043A,0x043B,0x043C,0x043D,/* 0xD8-0xDF */
+	0x043E,0x043F,0x0440,0x0441,0x0442,0x0443,0x0444,0x0445,/* 0xE0-0xE7 */
+	0x0446,0x0447,0x0448,0x0449,0x044A,0x044B,0x044C,0x044D,/* 0xE8-0xEF */
+	0x044E,0x044F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_A8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x02CA,0x02CB,0x02D9,0x2013,0x2015,0x2025,0x2035,0x2105,/* 0x40-0x47 */
+	0x2109,0x2196,0x2197,0x2198,0x2199,0x2215,0x221F,0x2223,/* 0x48-0x4F */
+	0x2252,0x2266,0x2267,0x22BF,0x2550,0x2551,0x2552,0x2553,/* 0x50-0x57 */
+	0x2554,0x2555,0x2556,0x2557,0x2558,0x2559,0x255A,0x255B,/* 0x58-0x5F */
+	0x255C,0x255D,0x255E,0x255F,0x2560,0x2561,0x2562,0x2563,/* 0x60-0x67 */
+	0x2564,0x2565,0x2566,0x2567,0x2568,0x2569,0x256A,0x256B,/* 0x68-0x6F */
+	0x256C,0x256D,0x256E,0x256F,0x2570,0x2571,0x2572,0x2573,/* 0x70-0x77 */
+	0x2581,0x2582,0x2583,0x2584,0x2585,0x2586,0x2587,0x0000,/* 0x78-0x7F */
+
+	0x2588,0x2589,0x258A,0x258B,0x258C,0x258D,0x258E,0x258F,/* 0x80-0x87 */
+	0x2593,0x2594,0x2595,0x25BC,0x25BD,0x25E2,0x25E3,0x25E4,/* 0x88-0x8F */
+	0x25E5,0x2609,0x2295,0x3012,0x301D,0x301E,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x0101,0x00E1,0x01CE,0x00E0,0x0113,0x00E9,0x011B,/* 0xA0-0xA7 */
+	0x00E8,0x012B,0x00ED,0x01D0,0x00EC,0x014D,0x00F3,0x01D2,/* 0xA8-0xAF */
+	0x00F2,0x016B,0x00FA,0x01D4,0x00F9,0x01D6,0x01D8,0x01DA,/* 0xB0-0xB7 */
+	0x01DC,0x00FC,0x00EA,0x0251,0x0000,0x0144,0x0148,0x0000,/* 0xB8-0xBF */
+	0x0261,0x0000,0x0000,0x0000,0x0000,0x3105,0x3106,0x3107,/* 0xC0-0xC7 */
+	0x3108,0x3109,0x310A,0x310B,0x310C,0x310D,0x310E,0x310F,/* 0xC8-0xCF */
+	0x3110,0x3111,0x3112,0x3113,0x3114,0x3115,0x3116,0x3117,/* 0xD0-0xD7 */
+	0x3118,0x3119,0x311A,0x311B,0x311C,0x311D,0x311E,0x311F,/* 0xD8-0xDF */
+	0x3120,0x3121,0x3122,0x3123,0x3124,0x3125,0x3126,0x3127,/* 0xE0-0xE7 */
+	0x3128,0x3129,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xE8-0xEF */
+};
+
+static wchar_t c2u_A9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x3021,0x3022,0x3023,0x3024,0x3025,0x3026,0x3027,0x3028,/* 0x40-0x47 */
+	0x3029,0x32A3,0x338E,0x338F,0x339C,0x339D,0x339E,0x33A1,/* 0x48-0x4F */
+	0x33C4,0x33CE,0x33D1,0x33D2,0x33D5,0xFE30,0xFFE2,0xFFE4,/* 0x50-0x57 */
+	0x0000,0x2121,0x3231,0x0000,0x2010,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x30FC,0x309B,0x309C,0x30FD,0x30FE,0x3006,0x309D,0x309E,/* 0x60-0x67 */
+	0xFE49,0xFE4A,0xFE4B,0xFE4C,0xFE4D,0xFE4E,0xFE4F,0xFE50,/* 0x68-0x6F */
+	0xFE51,0xFE52,0xFE54,0xFE55,0xFE56,0xFE57,0xFE59,0xFE5A,/* 0x70-0x77 */
+	0xFE5B,0xFE5C,0xFE5D,0xFE5E,0xFE5F,0xFE60,0xFE61,0x0000,/* 0x78-0x7F */
+
+	0xFE62,0xFE63,0xFE64,0xFE65,0xFE66,0xFE68,0xFE69,0xFE6A,/* 0x80-0x87 */
+	0xFE6B,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x3007,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x0000,0x0000,0x0000,0x2500,0x2501,0x2502,0x2503,/* 0xA0-0xA7 */
+	0x2504,0x2505,0x2506,0x2507,0x2508,0x2509,0x250A,0x250B,/* 0xA8-0xAF */
+	0x250C,0x250D,0x250E,0x250F,0x2510,0x2511,0x2512,0x2513,/* 0xB0-0xB7 */
+	0x2514,0x2515,0x2516,0x2517,0x2518,0x2519,0x251A,0x251B,/* 0xB8-0xBF */
+	0x251C,0x251D,0x251E,0x251F,0x2520,0x2521,0x2522,0x2523,/* 0xC0-0xC7 */
+	0x2524,0x2525,0x2526,0x2527,0x2528,0x2529,0x252A,0x252B,/* 0xC8-0xCF */
+	0x252C,0x252D,0x252E,0x252F,0x2530,0x2531,0x2532,0x2533,/* 0xD0-0xD7 */
+	0x2534,0x2535,0x2536,0x2537,0x2538,0x2539,0x253A,0x253B,/* 0xD8-0xDF */
+	0x253C,0x253D,0x253E,0x253F,0x2540,0x2541,0x2542,0x2543,/* 0xE0-0xE7 */
+	0x2544,0x2545,0x2546,0x2547,0x2548,0x2549,0x254A,0x254B,/* 0xE8-0xEF */
+};
+
+static wchar_t c2u_AA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x72DC,0x72DD,0x72DF,0x72E2,0x72E3,0x72E4,0x72E5,0x72E6,/* 0x40-0x47 */
+	0x72E7,0x72EA,0x72EB,0x72F5,0x72F6,0x72F9,0x72FD,0x72FE,/* 0x48-0x4F */
+	0x72FF,0x7300,0x7302,0x7304,0x7305,0x7306,0x7307,0x7308,/* 0x50-0x57 */
+	0x7309,0x730B,0x730C,0x730D,0x730F,0x7310,0x7311,0x7312,/* 0x58-0x5F */
+	0x7314,0x7318,0x7319,0x731A,0x731F,0x7320,0x7323,0x7324,/* 0x60-0x67 */
+	0x7326,0x7327,0x7328,0x732D,0x732F,0x7330,0x7332,0x7333,/* 0x68-0x6F */
+	0x7335,0x7336,0x733A,0x733B,0x733C,0x733D,0x7340,0x7341,/* 0x70-0x77 */
+	0x7342,0x7343,0x7344,0x7345,0x7346,0x7347,0x7348,0x0000,/* 0x78-0x7F */
+
+	0x7349,0x734A,0x734B,0x734C,0x734E,0x734F,0x7351,0x7353,/* 0x80-0x87 */
+	0x7354,0x7355,0x7356,0x7358,0x7359,0x735A,0x735B,0x735C,/* 0x88-0x8F */
+	0x735D,0x735E,0x735F,0x7361,0x7362,0x7363,0x7364,0x7365,/* 0x90-0x97 */
+	0x7366,0x7367,0x7368,0x7369,0x736A,0x736B,0x736E,0x7370,/* 0x98-0x9F */
+	0x7371,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7372,0x7373,0x7374,0x7375,0x7376,0x7377,0x7378,0x7379,/* 0x40-0x47 */
+	0x737A,0x737B,0x737C,0x737D,0x737F,0x7380,0x7381,0x7382,/* 0x48-0x4F */
+	0x7383,0x7385,0x7386,0x7388,0x738A,0x738C,0x738D,0x738F,/* 0x50-0x57 */
+	0x7390,0x7392,0x7393,0x7394,0x7395,0x7397,0x7398,0x7399,/* 0x58-0x5F */
+	0x739A,0x739C,0x739D,0x739E,0x73A0,0x73A1,0x73A3,0x73A4,/* 0x60-0x67 */
+	0x73A5,0x73A6,0x73A7,0x73A8,0x73AA,0x73AC,0x73AD,0x73B1,/* 0x68-0x6F */
+	0x73B4,0x73B5,0x73B6,0x73B8,0x73B9,0x73BC,0x73BD,0x73BE,/* 0x70-0x77 */
+	0x73BF,0x73C1,0x73C3,0x73C4,0x73C5,0x73C6,0x73C7,0x0000,/* 0x78-0x7F */
+
+	0x73CB,0x73CC,0x73CE,0x73D2,0x73D3,0x73D4,0x73D5,0x73D6,/* 0x80-0x87 */
+	0x73D7,0x73D8,0x73DA,0x73DB,0x73DC,0x73DD,0x73DF,0x73E1,/* 0x88-0x8F */
+	0x73E2,0x73E3,0x73E4,0x73E6,0x73E8,0x73EA,0x73EB,0x73EC,/* 0x90-0x97 */
+	0x73EE,0x73EF,0x73F0,0x73F1,0x73F3,0x73F4,0x73F5,0x73F6,/* 0x98-0x9F */
+	0x73F7,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x73F8,0x73F9,0x73FA,0x73FB,0x73FC,0x73FD,0x73FE,0x73FF,/* 0x40-0x47 */
+	0x7400,0x7401,0x7402,0x7404,0x7407,0x7408,0x740B,0x740C,/* 0x48-0x4F */
+	0x740D,0x740E,0x7411,0x7412,0x7413,0x7414,0x7415,0x7416,/* 0x50-0x57 */
+	0x7417,0x7418,0x7419,0x741C,0x741D,0x741E,0x741F,0x7420,/* 0x58-0x5F */
+	0x7421,0x7423,0x7424,0x7427,0x7429,0x742B,0x742D,0x742F,/* 0x60-0x67 */
+	0x7431,0x7432,0x7437,0x7438,0x7439,0x743A,0x743B,0x743D,/* 0x68-0x6F */
+	0x743E,0x743F,0x7440,0x7442,0x7443,0x7444,0x7445,0x7446,/* 0x70-0x77 */
+	0x7447,0x7448,0x7449,0x744A,0x744B,0x744C,0x744D,0x0000,/* 0x78-0x7F */
+
+	0x744E,0x744F,0x7450,0x7451,0x7452,0x7453,0x7454,0x7456,/* 0x80-0x87 */
+	0x7458,0x745D,0x7460,0x7461,0x7462,0x7463,0x7464,0x7465,/* 0x88-0x8F */
+	0x7466,0x7467,0x7468,0x7469,0x746A,0x746B,0x746C,0x746E,/* 0x90-0x97 */
+	0x746F,0x7471,0x7472,0x7473,0x7474,0x7475,0x7478,0x7479,/* 0x98-0x9F */
+	0x747A,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x747B,0x747C,0x747D,0x747F,0x7482,0x7484,0x7485,0x7486,/* 0x40-0x47 */
+	0x7488,0x7489,0x748A,0x748C,0x748D,0x748F,0x7491,0x7492,/* 0x48-0x4F */
+	0x7493,0x7494,0x7495,0x7496,0x7497,0x7498,0x7499,0x749A,/* 0x50-0x57 */
+	0x749B,0x749D,0x749F,0x74A0,0x74A1,0x74A2,0x74A3,0x74A4,/* 0x58-0x5F */
+	0x74A5,0x74A6,0x74AA,0x74AB,0x74AC,0x74AD,0x74AE,0x74AF,/* 0x60-0x67 */
+	0x74B0,0x74B1,0x74B2,0x74B3,0x74B4,0x74B5,0x74B6,0x74B7,/* 0x68-0x6F */
+	0x74B8,0x74B9,0x74BB,0x74BC,0x74BD,0x74BE,0x74BF,0x74C0,/* 0x70-0x77 */
+	0x74C1,0x74C2,0x74C3,0x74C4,0x74C5,0x74C6,0x74C7,0x0000,/* 0x78-0x7F */
+
+	0x74C8,0x74C9,0x74CA,0x74CB,0x74CC,0x74CD,0x74CE,0x74CF,/* 0x80-0x87 */
+	0x74D0,0x74D1,0x74D3,0x74D4,0x74D5,0x74D6,0x74D7,0x74D8,/* 0x88-0x8F */
+	0x74D9,0x74DA,0x74DB,0x74DD,0x74DF,0x74E1,0x74E5,0x74E7,/* 0x90-0x97 */
+	0x74E8,0x74E9,0x74EA,0x74EB,0x74EC,0x74ED,0x74F0,0x74F1,/* 0x98-0x9F */
+	0x74F2,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x74F3,0x74F5,0x74F8,0x74F9,0x74FA,0x74FB,0x74FC,0x74FD,/* 0x40-0x47 */
+	0x74FE,0x7500,0x7501,0x7502,0x7503,0x7505,0x7506,0x7507,/* 0x48-0x4F */
+	0x7508,0x7509,0x750A,0x750B,0x750C,0x750E,0x7510,0x7512,/* 0x50-0x57 */
+	0x7514,0x7515,0x7516,0x7517,0x751B,0x751D,0x751E,0x7520,/* 0x58-0x5F */
+	0x7521,0x7522,0x7523,0x7524,0x7526,0x7527,0x752A,0x752E,/* 0x60-0x67 */
+	0x7534,0x7536,0x7539,0x753C,0x753D,0x753F,0x7541,0x7542,/* 0x68-0x6F */
+	0x7543,0x7544,0x7546,0x7547,0x7549,0x754A,0x754D,0x7550,/* 0x70-0x77 */
+	0x7551,0x7552,0x7553,0x7555,0x7556,0x7557,0x7558,0x0000,/* 0x78-0x7F */
+
+	0x755D,0x755E,0x755F,0x7560,0x7561,0x7562,0x7563,0x7564,/* 0x80-0x87 */
+	0x7567,0x7568,0x7569,0x756B,0x756C,0x756D,0x756E,0x756F,/* 0x88-0x8F */
+	0x7570,0x7571,0x7573,0x7575,0x7576,0x7577,0x757A,0x757B,/* 0x90-0x97 */
+	0x757C,0x757D,0x757E,0x7580,0x7581,0x7582,0x7584,0x7585,/* 0x98-0x9F */
+	0x7587,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7588,0x7589,0x758A,0x758C,0x758D,0x758E,0x7590,0x7593,/* 0x40-0x47 */
+	0x7595,0x7598,0x759B,0x759C,0x759E,0x75A2,0x75A6,0x75A7,/* 0x48-0x4F */
+	0x75A8,0x75A9,0x75AA,0x75AD,0x75B6,0x75B7,0x75BA,0x75BB,/* 0x50-0x57 */
+	0x75BF,0x75C0,0x75C1,0x75C6,0x75CB,0x75CC,0x75CE,0x75CF,/* 0x58-0x5F */
+	0x75D0,0x75D1,0x75D3,0x75D7,0x75D9,0x75DA,0x75DC,0x75DD,/* 0x60-0x67 */
+	0x75DF,0x75E0,0x75E1,0x75E5,0x75E9,0x75EC,0x75ED,0x75EE,/* 0x68-0x6F */
+	0x75EF,0x75F2,0x75F3,0x75F5,0x75F6,0x75F7,0x75F8,0x75FA,/* 0x70-0x77 */
+	0x75FB,0x75FD,0x75FE,0x7602,0x7604,0x7606,0x7607,0x0000,/* 0x78-0x7F */
+
+	0x7608,0x7609,0x760B,0x760D,0x760E,0x760F,0x7611,0x7612,/* 0x80-0x87 */
+	0x7613,0x7614,0x7616,0x761A,0x761C,0x761D,0x761E,0x7621,/* 0x88-0x8F */
+	0x7623,0x7627,0x7628,0x762C,0x762E,0x762F,0x7631,0x7632,/* 0x90-0x97 */
+	0x7636,0x7637,0x7639,0x763A,0x763B,0x763D,0x7641,0x7642,/* 0x98-0x9F */
+	0x7644,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_B0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7645,0x7646,0x7647,0x7648,0x7649,0x764A,0x764B,0x764E,/* 0x40-0x47 */
+	0x764F,0x7650,0x7651,0x7652,0x7653,0x7655,0x7657,0x7658,/* 0x48-0x4F */
+	0x7659,0x765A,0x765B,0x765D,0x765F,0x7660,0x7661,0x7662,/* 0x50-0x57 */
+	0x7664,0x7665,0x7666,0x7667,0x7668,0x7669,0x766A,0x766C,/* 0x58-0x5F */
+	0x766D,0x766E,0x7670,0x7671,0x7672,0x7673,0x7674,0x7675,/* 0x60-0x67 */
+	0x7676,0x7677,0x7679,0x767A,0x767C,0x767F,0x7680,0x7681,/* 0x68-0x6F */
+	0x7683,0x7685,0x7689,0x768A,0x768C,0x768D,0x768F,0x7690,/* 0x70-0x77 */
+	0x7692,0x7694,0x7695,0x7697,0x7698,0x769A,0x769B,0x0000,/* 0x78-0x7F */
+
+	0x769C,0x769D,0x769E,0x769F,0x76A0,0x76A1,0x76A2,0x76A3,/* 0x80-0x87 */
+	0x76A5,0x76A6,0x76A7,0x76A8,0x76A9,0x76AA,0x76AB,0x76AC,/* 0x88-0x8F */
+	0x76AD,0x76AF,0x76B0,0x76B3,0x76B5,0x76B6,0x76B7,0x76B8,/* 0x90-0x97 */
+	0x76B9,0x76BA,0x76BB,0x76BC,0x76BD,0x76BE,0x76C0,0x76C1,/* 0x98-0x9F */
+	0x76C3,0x554A,0x963F,0x57C3,0x6328,0x54CE,0x5509,0x54C0,/* 0xA0-0xA7 */
+	0x7691,0x764C,0x853C,0x77EE,0x827E,0x788D,0x7231,0x9698,/* 0xA8-0xAF */
+	0x978D,0x6C28,0x5B89,0x4FFA,0x6309,0x6697,0x5CB8,0x80FA,/* 0xB0-0xB7 */
+	0x6848,0x80AE,0x6602,0x76CE,0x51F9,0x6556,0x71AC,0x7FF1,/* 0xB8-0xBF */
+	0x8884,0x50B2,0x5965,0x61CA,0x6FB3,0x82AD,0x634C,0x6252,/* 0xC0-0xC7 */
+	0x53ED,0x5427,0x7B06,0x516B,0x75A4,0x5DF4,0x62D4,0x8DCB,/* 0xC8-0xCF */
+	0x9776,0x628A,0x8019,0x575D,0x9738,0x7F62,0x7238,0x767D,/* 0xD0-0xD7 */
+	0x67CF,0x767E,0x6446,0x4F70,0x8D25,0x62DC,0x7A17,0x6591,/* 0xD8-0xDF */
+	0x73ED,0x642C,0x6273,0x822C,0x9881,0x677F,0x7248,0x626E,/* 0xE0-0xE7 */
+	0x62CC,0x4F34,0x74E3,0x534A,0x529E,0x7ECA,0x90A6,0x5E2E,/* 0xE8-0xEF */
+	0x6886,0x699C,0x8180,0x7ED1,0x68D2,0x78C5,0x868C,0x9551,/* 0xF0-0xF7 */
+	0x508D,0x8C24,0x82DE,0x80DE,0x5305,0x8912,0x5265,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x76C4,0x76C7,0x76C9,0x76CB,0x76CC,0x76D3,0x76D5,0x76D9,/* 0x40-0x47 */
+	0x76DA,0x76DC,0x76DD,0x76DE,0x76E0,0x76E1,0x76E2,0x76E3,/* 0x48-0x4F */
+	0x76E4,0x76E6,0x76E7,0x76E8,0x76E9,0x76EA,0x76EB,0x76EC,/* 0x50-0x57 */
+	0x76ED,0x76F0,0x76F3,0x76F5,0x76F6,0x76F7,0x76FA,0x76FB,/* 0x58-0x5F */
+	0x76FD,0x76FF,0x7700,0x7702,0x7703,0x7705,0x7706,0x770A,/* 0x60-0x67 */
+	0x770C,0x770E,0x770F,0x7710,0x7711,0x7712,0x7713,0x7714,/* 0x68-0x6F */
+	0x7715,0x7716,0x7717,0x7718,0x771B,0x771C,0x771D,0x771E,/* 0x70-0x77 */
+	0x7721,0x7723,0x7724,0x7725,0x7727,0x772A,0x772B,0x0000,/* 0x78-0x7F */
+
+	0x772C,0x772E,0x7730,0x7731,0x7732,0x7733,0x7734,0x7739,/* 0x80-0x87 */
+	0x773B,0x773D,0x773E,0x773F,0x7742,0x7744,0x7745,0x7746,/* 0x88-0x8F */
+	0x7748,0x7749,0x774A,0x774B,0x774C,0x774D,0x774E,0x774F,/* 0x90-0x97 */
+	0x7752,0x7753,0x7754,0x7755,0x7756,0x7757,0x7758,0x7759,/* 0x98-0x9F */
+	0x775C,0x8584,0x96F9,0x4FDD,0x5821,0x9971,0x5B9D,0x62B1,/* 0xA0-0xA7 */
+	0x62A5,0x66B4,0x8C79,0x9C8D,0x7206,0x676F,0x7891,0x60B2,/* 0xA8-0xAF */
+	0x5351,0x5317,0x8F88,0x80CC,0x8D1D,0x94A1,0x500D,0x72C8,/* 0xB0-0xB7 */
+	0x5907,0x60EB,0x7119,0x88AB,0x5954,0x82EF,0x672C,0x7B28,/* 0xB8-0xBF */
+	0x5D29,0x7EF7,0x752D,0x6CF5,0x8E66,0x8FF8,0x903C,0x9F3B,/* 0xC0-0xC7 */
+	0x6BD4,0x9119,0x7B14,0x5F7C,0x78A7,0x84D6,0x853D,0x6BD5,/* 0xC8-0xCF */
+	0x6BD9,0x6BD6,0x5E01,0x5E87,0x75F9,0x95ED,0x655D,0x5F0A,/* 0xD0-0xD7 */
+	0x5FC5,0x8F9F,0x58C1,0x81C2,0x907F,0x965B,0x97AD,0x8FB9,/* 0xD8-0xDF */
+	0x7F16,0x8D2C,0x6241,0x4FBF,0x53D8,0x535E,0x8FA8,0x8FA9,/* 0xE0-0xE7 */
+	0x8FAB,0x904D,0x6807,0x5F6A,0x8198,0x8868,0x9CD6,0x618B,/* 0xE8-0xEF */
+	0x522B,0x762A,0x5F6C,0x658C,0x6FD2,0x6EE8,0x5BBE,0x6448,/* 0xF0-0xF7 */
+	0x5175,0x51B0,0x67C4,0x4E19,0x79C9,0x997C,0x70B3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x775D,0x775E,0x775F,0x7760,0x7764,0x7767,0x7769,0x776A,/* 0x40-0x47 */
+	0x776D,0x776E,0x776F,0x7770,0x7771,0x7772,0x7773,0x7774,/* 0x48-0x4F */
+	0x7775,0x7776,0x7777,0x7778,0x777A,0x777B,0x777C,0x7781,/* 0x50-0x57 */
+	0x7782,0x7783,0x7786,0x7787,0x7788,0x7789,0x778A,0x778B,/* 0x58-0x5F */
+	0x778F,0x7790,0x7793,0x7794,0x7795,0x7796,0x7797,0x7798,/* 0x60-0x67 */
+	0x7799,0x779A,0x779B,0x779C,0x779D,0x779E,0x77A1,0x77A3,/* 0x68-0x6F */
+	0x77A4,0x77A6,0x77A8,0x77AB,0x77AD,0x77AE,0x77AF,0x77B1,/* 0x70-0x77 */
+	0x77B2,0x77B4,0x77B6,0x77B7,0x77B8,0x77B9,0x77BA,0x0000,/* 0x78-0x7F */
+
+	0x77BC,0x77BE,0x77C0,0x77C1,0x77C2,0x77C3,0x77C4,0x77C5,/* 0x80-0x87 */
+	0x77C6,0x77C7,0x77C8,0x77C9,0x77CA,0x77CB,0x77CC,0x77CE,/* 0x88-0x8F */
+	0x77CF,0x77D0,0x77D1,0x77D2,0x77D3,0x77D4,0x77D5,0x77D6,/* 0x90-0x97 */
+	0x77D8,0x77D9,0x77DA,0x77DD,0x77DE,0x77DF,0x77E0,0x77E1,/* 0x98-0x9F */
+	0x77E4,0x75C5,0x5E76,0x73BB,0x83E0,0x64AD,0x62E8,0x94B5,/* 0xA0-0xA7 */
+	0x6CE2,0x535A,0x52C3,0x640F,0x94C2,0x7B94,0x4F2F,0x5E1B,/* 0xA8-0xAF */
+	0x8236,0x8116,0x818A,0x6E24,0x6CCA,0x9A73,0x6355,0x535C,/* 0xB0-0xB7 */
+	0x54FA,0x8865,0x57E0,0x4E0D,0x5E03,0x6B65,0x7C3F,0x90E8,/* 0xB8-0xBF */
+	0x6016,0x64E6,0x731C,0x88C1,0x6750,0x624D,0x8D22,0x776C,/* 0xC0-0xC7 */
+	0x8E29,0x91C7,0x5F69,0x83DC,0x8521,0x9910,0x53C2,0x8695,/* 0xC8-0xCF */
+	0x6B8B,0x60ED,0x60E8,0x707F,0x82CD,0x8231,0x4ED3,0x6CA7,/* 0xD0-0xD7 */
+	0x85CF,0x64CD,0x7CD9,0x69FD,0x66F9,0x8349,0x5395,0x7B56,/* 0xD8-0xDF */
+	0x4FA7,0x518C,0x6D4B,0x5C42,0x8E6D,0x63D2,0x53C9,0x832C,/* 0xE0-0xE7 */
+	0x8336,0x67E5,0x78B4,0x643D,0x5BDF,0x5C94,0x5DEE,0x8BE7,/* 0xE8-0xEF */
+	0x62C6,0x67F4,0x8C7A,0x6400,0x63BA,0x8749,0x998B,0x8C17,/* 0xF0-0xF7 */
+	0x7F20,0x94F2,0x4EA7,0x9610,0x98A4,0x660C,0x7316,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x77E6,0x77E8,0x77EA,0x77EF,0x77F0,0x77F1,0x77F2,0x77F4,/* 0x40-0x47 */
+	0x77F5,0x77F7,0x77F9,0x77FA,0x77FB,0x77FC,0x7803,0x7804,/* 0x48-0x4F */
+	0x7805,0x7806,0x7807,0x7808,0x780A,0x780B,0x780E,0x780F,/* 0x50-0x57 */
+	0x7810,0x7813,0x7815,0x7819,0x781B,0x781E,0x7820,0x7821,/* 0x58-0x5F */
+	0x7822,0x7824,0x7828,0x782A,0x782B,0x782E,0x782F,0x7831,/* 0x60-0x67 */
+	0x7832,0x7833,0x7835,0x7836,0x783D,0x783F,0x7841,0x7842,/* 0x68-0x6F */
+	0x7843,0x7844,0x7846,0x7848,0x7849,0x784A,0x784B,0x784D,/* 0x70-0x77 */
+	0x784F,0x7851,0x7853,0x7854,0x7858,0x7859,0x785A,0x0000,/* 0x78-0x7F */
+
+	0x785B,0x785C,0x785E,0x785F,0x7860,0x7861,0x7862,0x7863,/* 0x80-0x87 */
+	0x7864,0x7865,0x7866,0x7867,0x7868,0x7869,0x786F,0x7870,/* 0x88-0x8F */
+	0x7871,0x7872,0x7873,0x7874,0x7875,0x7876,0x7878,0x7879,/* 0x90-0x97 */
+	0x787A,0x787B,0x787D,0x787E,0x787F,0x7880,0x7881,0x7882,/* 0x98-0x9F */
+	0x7883,0x573A,0x5C1D,0x5E38,0x957F,0x507F,0x80A0,0x5382,/* 0xA0-0xA7 */
+	0x655E,0x7545,0x5531,0x5021,0x8D85,0x6284,0x949E,0x671D,/* 0xA8-0xAF */
+	0x5632,0x6F6E,0x5DE2,0x5435,0x7092,0x8F66,0x626F,0x64A4,/* 0xB0-0xB7 */
+	0x63A3,0x5F7B,0x6F88,0x90F4,0x81E3,0x8FB0,0x5C18,0x6668,/* 0xB8-0xBF */
+	0x5FF1,0x6C89,0x9648,0x8D81,0x886C,0x6491,0x79F0,0x57CE,/* 0xC0-0xC7 */
+	0x6A59,0x6210,0x5448,0x4E58,0x7A0B,0x60E9,0x6F84,0x8BDA,/* 0xC8-0xCF */
+	0x627F,0x901E,0x9A8B,0x79E4,0x5403,0x75F4,0x6301,0x5319,/* 0xD0-0xD7 */
+	0x6C60,0x8FDF,0x5F1B,0x9A70,0x803B,0x9F7F,0x4F88,0x5C3A,/* 0xD8-0xDF */
+	0x8D64,0x7FC5,0x65A5,0x70BD,0x5145,0x51B2,0x866B,0x5D07,/* 0xE0-0xE7 */
+	0x5BA0,0x62BD,0x916C,0x7574,0x8E0C,0x7A20,0x6101,0x7B79,/* 0xE8-0xEF */
+	0x4EC7,0x7EF8,0x7785,0x4E11,0x81ED,0x521D,0x51FA,0x6A71,/* 0xF0-0xF7 */
+	0x53A8,0x8E87,0x9504,0x96CF,0x6EC1,0x9664,0x695A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7884,0x7885,0x7886,0x7888,0x788A,0x788B,0x788F,0x7890,/* 0x40-0x47 */
+	0x7892,0x7894,0x7895,0x7896,0x7899,0x789D,0x789E,0x78A0,/* 0x48-0x4F */
+	0x78A2,0x78A4,0x78A6,0x78A8,0x78A9,0x78AA,0x78AB,0x78AC,/* 0x50-0x57 */
+	0x78AD,0x78AE,0x78AF,0x78B5,0x78B6,0x78B7,0x78B8,0x78BA,/* 0x58-0x5F */
+	0x78BB,0x78BC,0x78BD,0x78BF,0x78C0,0x78C2,0x78C3,0x78C4,/* 0x60-0x67 */
+	0x78C6,0x78C7,0x78C8,0x78CC,0x78CD,0x78CE,0x78CF,0x78D1,/* 0x68-0x6F */
+	0x78D2,0x78D3,0x78D6,0x78D7,0x78D8,0x78DA,0x78DB,0x78DC,/* 0x70-0x77 */
+	0x78DD,0x78DE,0x78DF,0x78E0,0x78E1,0x78E2,0x78E3,0x0000,/* 0x78-0x7F */
+
+	0x78E4,0x78E5,0x78E6,0x78E7,0x78E9,0x78EA,0x78EB,0x78ED,/* 0x80-0x87 */
+	0x78EE,0x78EF,0x78F0,0x78F1,0x78F3,0x78F5,0x78F6,0x78F8,/* 0x88-0x8F */
+	0x78F9,0x78FB,0x78FC,0x78FD,0x78FE,0x78FF,0x7900,0x7902,/* 0x90-0x97 */
+	0x7903,0x7904,0x7906,0x7907,0x7908,0x7909,0x790A,0x790B,/* 0x98-0x9F */
+	0x790C,0x7840,0x50A8,0x77D7,0x6410,0x89E6,0x5904,0x63E3,/* 0xA0-0xA7 */
+	0x5DDD,0x7A7F,0x693D,0x4F20,0x8239,0x5598,0x4E32,0x75AE,/* 0xA8-0xAF */
+	0x7A97,0x5E62,0x5E8A,0x95EF,0x521B,0x5439,0x708A,0x6376,/* 0xB0-0xB7 */
+	0x9524,0x5782,0x6625,0x693F,0x9187,0x5507,0x6DF3,0x7EAF,/* 0xB8-0xBF */
+	0x8822,0x6233,0x7EF0,0x75B5,0x8328,0x78C1,0x96CC,0x8F9E,/* 0xC0-0xC7 */
+	0x6148,0x74F7,0x8BCD,0x6B64,0x523A,0x8D50,0x6B21,0x806A,/* 0xC8-0xCF */
+	0x8471,0x56F1,0x5306,0x4ECE,0x4E1B,0x51D1,0x7C97,0x918B,/* 0xD0-0xD7 */
+	0x7C07,0x4FC3,0x8E7F,0x7BE1,0x7A9C,0x6467,0x5D14,0x50AC,/* 0xD8-0xDF */
+	0x8106,0x7601,0x7CB9,0x6DEC,0x7FE0,0x6751,0x5B58,0x5BF8,/* 0xE0-0xE7 */
+	0x78CB,0x64AE,0x6413,0x63AA,0x632B,0x9519,0x642D,0x8FBE,/* 0xE8-0xEF */
+	0x7B54,0x7629,0x6253,0x5927,0x5446,0x6B79,0x50A3,0x6234,/* 0xF0-0xF7 */
+	0x5E26,0x6B86,0x4EE3,0x8D37,0x888B,0x5F85,0x902E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x790D,0x790E,0x790F,0x7910,0x7911,0x7912,0x7914,0x7915,/* 0x40-0x47 */
+	0x7916,0x7917,0x7918,0x7919,0x791A,0x791B,0x791C,0x791D,/* 0x48-0x4F */
+	0x791F,0x7920,0x7921,0x7922,0x7923,0x7925,0x7926,0x7927,/* 0x50-0x57 */
+	0x7928,0x7929,0x792A,0x792B,0x792C,0x792D,0x792E,0x792F,/* 0x58-0x5F */
+	0x7930,0x7931,0x7932,0x7933,0x7935,0x7936,0x7937,0x7938,/* 0x60-0x67 */
+	0x7939,0x793D,0x793F,0x7942,0x7943,0x7944,0x7945,0x7947,/* 0x68-0x6F */
+	0x794A,0x794B,0x794C,0x794D,0x794E,0x794F,0x7950,0x7951,/* 0x70-0x77 */
+	0x7952,0x7954,0x7955,0x7958,0x7959,0x7961,0x7963,0x0000,/* 0x78-0x7F */
+
+	0x7964,0x7966,0x7969,0x796A,0x796B,0x796C,0x796E,0x7970,/* 0x80-0x87 */
+	0x7971,0x7972,0x7973,0x7974,0x7975,0x7976,0x7979,0x797B,/* 0x88-0x8F */
+	0x797C,0x797D,0x797E,0x797F,0x7982,0x7983,0x7986,0x7987,/* 0x90-0x97 */
+	0x7988,0x7989,0x798B,0x798C,0x798D,0x798E,0x7990,0x7991,/* 0x98-0x9F */
+	0x7992,0x6020,0x803D,0x62C5,0x4E39,0x5355,0x90F8,0x63B8,/* 0xA0-0xA7 */
+	0x80C6,0x65E6,0x6C2E,0x4F46,0x60EE,0x6DE1,0x8BDE,0x5F39,/* 0xA8-0xAF */
+	0x86CB,0x5F53,0x6321,0x515A,0x8361,0x6863,0x5200,0x6363,/* 0xB0-0xB7 */
+	0x8E48,0x5012,0x5C9B,0x7977,0x5BFC,0x5230,0x7A3B,0x60BC,/* 0xB8-0xBF */
+	0x9053,0x76D7,0x5FB7,0x5F97,0x7684,0x8E6C,0x706F,0x767B,/* 0xC0-0xC7 */
+	0x7B49,0x77AA,0x51F3,0x9093,0x5824,0x4F4E,0x6EF4,0x8FEA,/* 0xC8-0xCF */
+	0x654C,0x7B1B,0x72C4,0x6DA4,0x7FDF,0x5AE1,0x62B5,0x5E95,/* 0xD0-0xD7 */
+	0x5730,0x8482,0x7B2C,0x5E1D,0x5F1F,0x9012,0x7F14,0x98A0,/* 0xD8-0xDF */
+	0x6382,0x6EC7,0x7898,0x70B9,0x5178,0x975B,0x57AB,0x7535,/* 0xE0-0xE7 */
+	0x4F43,0x7538,0x5E97,0x60E6,0x5960,0x6DC0,0x6BBF,0x7889,/* 0xE8-0xEF */
+	0x53FC,0x96D5,0x51CB,0x5201,0x6389,0x540A,0x9493,0x8C03,/* 0xF0-0xF7 */
+	0x8DCC,0x7239,0x789F,0x8776,0x8FED,0x8C0D,0x53E0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7993,0x7994,0x7995,0x7996,0x7997,0x7998,0x7999,0x799B,/* 0x40-0x47 */
+	0x799C,0x799D,0x799E,0x799F,0x79A0,0x79A1,0x79A2,0x79A3,/* 0x48-0x4F */
+	0x79A4,0x79A5,0x79A6,0x79A8,0x79A9,0x79AA,0x79AB,0x79AC,/* 0x50-0x57 */
+	0x79AD,0x79AE,0x79AF,0x79B0,0x79B1,0x79B2,0x79B4,0x79B5,/* 0x58-0x5F */
+	0x79B6,0x79B7,0x79B8,0x79BC,0x79BF,0x79C2,0x79C4,0x79C5,/* 0x60-0x67 */
+	0x79C7,0x79C8,0x79CA,0x79CC,0x79CE,0x79CF,0x79D0,0x79D3,/* 0x68-0x6F */
+	0x79D4,0x79D6,0x79D7,0x79D9,0x79DA,0x79DB,0x79DC,0x79DD,/* 0x70-0x77 */
+	0x79DE,0x79E0,0x79E1,0x79E2,0x79E5,0x79E8,0x79EA,0x0000,/* 0x78-0x7F */
+
+	0x79EC,0x79EE,0x79F1,0x79F2,0x79F3,0x79F4,0x79F5,0x79F6,/* 0x80-0x87 */
+	0x79F7,0x79F9,0x79FA,0x79FC,0x79FE,0x79FF,0x7A01,0x7A04,/* 0x88-0x8F */
+	0x7A05,0x7A07,0x7A08,0x7A09,0x7A0A,0x7A0C,0x7A0F,0x7A10,/* 0x90-0x97 */
+	0x7A11,0x7A12,0x7A13,0x7A15,0x7A16,0x7A18,0x7A19,0x7A1B,/* 0x98-0x9F */
+	0x7A1C,0x4E01,0x76EF,0x53EE,0x9489,0x9876,0x9F0E,0x952D,/* 0xA0-0xA7 */
+	0x5B9A,0x8BA2,0x4E22,0x4E1C,0x51AC,0x8463,0x61C2,0x52A8,/* 0xA8-0xAF */
+	0x680B,0x4F97,0x606B,0x51BB,0x6D1E,0x515C,0x6296,0x6597,/* 0xB0-0xB7 */
+	0x9661,0x8C46,0x9017,0x75D8,0x90FD,0x7763,0x6BD2,0x728A,/* 0xB8-0xBF */
+	0x72EC,0x8BFB,0x5835,0x7779,0x8D4C,0x675C,0x9540,0x809A,/* 0xC0-0xC7 */
+	0x5EA6,0x6E21,0x5992,0x7AEF,0x77ED,0x953B,0x6BB5,0x65AD,/* 0xC8-0xCF */
+	0x7F0E,0x5806,0x5151,0x961F,0x5BF9,0x58A9,0x5428,0x8E72,/* 0xD0-0xD7 */
+	0x6566,0x987F,0x56E4,0x949D,0x76FE,0x9041,0x6387,0x54C6,/* 0xD8-0xDF */
+	0x591A,0x593A,0x579B,0x8EB2,0x6735,0x8DFA,0x8235,0x5241,/* 0xE0-0xE7 */
+	0x60F0,0x5815,0x86FE,0x5CE8,0x9E45,0x4FC4,0x989D,0x8BB9,/* 0xE8-0xEF */
+	0x5A25,0x6076,0x5384,0x627C,0x904F,0x9102,0x997F,0x6069,/* 0xF0-0xF7 */
+	0x800C,0x513F,0x8033,0x5C14,0x9975,0x6D31,0x4E8C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7A1D,0x7A1F,0x7A21,0x7A22,0x7A24,0x7A25,0x7A26,0x7A27,/* 0x40-0x47 */
+	0x7A28,0x7A29,0x7A2A,0x7A2B,0x7A2C,0x7A2D,0x7A2E,0x7A2F,/* 0x48-0x4F */
+	0x7A30,0x7A31,0x7A32,0x7A34,0x7A35,0x7A36,0x7A38,0x7A3A,/* 0x50-0x57 */
+	0x7A3E,0x7A40,0x7A41,0x7A42,0x7A43,0x7A44,0x7A45,0x7A47,/* 0x58-0x5F */
+	0x7A48,0x7A49,0x7A4A,0x7A4B,0x7A4C,0x7A4D,0x7A4E,0x7A4F,/* 0x60-0x67 */
+	0x7A50,0x7A52,0x7A53,0x7A54,0x7A55,0x7A56,0x7A58,0x7A59,/* 0x68-0x6F */
+	0x7A5A,0x7A5B,0x7A5C,0x7A5D,0x7A5E,0x7A5F,0x7A60,0x7A61,/* 0x70-0x77 */
+	0x7A62,0x7A63,0x7A64,0x7A65,0x7A66,0x7A67,0x7A68,0x0000,/* 0x78-0x7F */
+
+	0x7A69,0x7A6A,0x7A6B,0x7A6C,0x7A6D,0x7A6E,0x7A6F,0x7A71,/* 0x80-0x87 */
+	0x7A72,0x7A73,0x7A75,0x7A7B,0x7A7C,0x7A7D,0x7A7E,0x7A82,/* 0x88-0x8F */
+	0x7A85,0x7A87,0x7A89,0x7A8A,0x7A8B,0x7A8C,0x7A8E,0x7A8F,/* 0x90-0x97 */
+	0x7A90,0x7A93,0x7A94,0x7A99,0x7A9A,0x7A9B,0x7A9E,0x7AA1,/* 0x98-0x9F */
+	0x7AA2,0x8D30,0x53D1,0x7F5A,0x7B4F,0x4F10,0x4E4F,0x9600,/* 0xA0-0xA7 */
+	0x6CD5,0x73D0,0x85E9,0x5E06,0x756A,0x7FFB,0x6A0A,0x77FE,/* 0xA8-0xAF */
+	0x9492,0x7E41,0x51E1,0x70E6,0x53CD,0x8FD4,0x8303,0x8D29,/* 0xB0-0xB7 */
+	0x72AF,0x996D,0x6CDB,0x574A,0x82B3,0x65B9,0x80AA,0x623F,/* 0xB8-0xBF */
+	0x9632,0x59A8,0x4EFF,0x8BBF,0x7EBA,0x653E,0x83F2,0x975E,/* 0xC0-0xC7 */
+	0x5561,0x98DE,0x80A5,0x532A,0x8BFD,0x5420,0x80BA,0x5E9F,/* 0xC8-0xCF */
+	0x6CB8,0x8D39,0x82AC,0x915A,0x5429,0x6C1B,0x5206,0x7EB7,/* 0xD0-0xD7 */
+	0x575F,0x711A,0x6C7E,0x7C89,0x594B,0x4EFD,0x5FFF,0x6124,/* 0xD8-0xDF */
+	0x7CAA,0x4E30,0x5C01,0x67AB,0x8702,0x5CF0,0x950B,0x98CE,/* 0xE0-0xE7 */
+	0x75AF,0x70FD,0x9022,0x51AF,0x7F1D,0x8BBD,0x5949,0x51E4,/* 0xE8-0xEF */
+	0x4F5B,0x5426,0x592B,0x6577,0x80A4,0x5B75,0x6276,0x62C2,/* 0xF0-0xF7 */
+	0x8F90,0x5E45,0x6C1F,0x7B26,0x4F0F,0x4FD8,0x670D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7AA3,0x7AA4,0x7AA7,0x7AA9,0x7AAA,0x7AAB,0x7AAE,0x7AAF,/* 0x40-0x47 */
+	0x7AB0,0x7AB1,0x7AB2,0x7AB4,0x7AB5,0x7AB6,0x7AB7,0x7AB8,/* 0x48-0x4F */
+	0x7AB9,0x7ABA,0x7ABB,0x7ABC,0x7ABD,0x7ABE,0x7AC0,0x7AC1,/* 0x50-0x57 */
+	0x7AC2,0x7AC3,0x7AC4,0x7AC5,0x7AC6,0x7AC7,0x7AC8,0x7AC9,/* 0x58-0x5F */
+	0x7ACA,0x7ACC,0x7ACD,0x7ACE,0x7ACF,0x7AD0,0x7AD1,0x7AD2,/* 0x60-0x67 */
+	0x7AD3,0x7AD4,0x7AD5,0x7AD7,0x7AD8,0x7ADA,0x7ADB,0x7ADC,/* 0x68-0x6F */
+	0x7ADD,0x7AE1,0x7AE2,0x7AE4,0x7AE7,0x7AE8,0x7AE9,0x7AEA,/* 0x70-0x77 */
+	0x7AEB,0x7AEC,0x7AEE,0x7AF0,0x7AF1,0x7AF2,0x7AF3,0x0000,/* 0x78-0x7F */
+
+	0x7AF4,0x7AF5,0x7AF6,0x7AF7,0x7AF8,0x7AFB,0x7AFC,0x7AFE,/* 0x80-0x87 */
+	0x7B00,0x7B01,0x7B02,0x7B05,0x7B07,0x7B09,0x7B0C,0x7B0D,/* 0x88-0x8F */
+	0x7B0E,0x7B10,0x7B12,0x7B13,0x7B16,0x7B17,0x7B18,0x7B1A,/* 0x90-0x97 */
+	0x7B1C,0x7B1D,0x7B1F,0x7B21,0x7B22,0x7B23,0x7B27,0x7B29,/* 0x98-0x9F */
+	0x7B2D,0x6D6E,0x6DAA,0x798F,0x88B1,0x5F17,0x752B,0x629A,/* 0xA0-0xA7 */
+	0x8F85,0x4FEF,0x91DC,0x65A7,0x812F,0x8151,0x5E9C,0x8150,/* 0xA8-0xAF */
+	0x8D74,0x526F,0x8986,0x8D4B,0x590D,0x5085,0x4ED8,0x961C,/* 0xB0-0xB7 */
+	0x7236,0x8179,0x8D1F,0x5BCC,0x8BA3,0x9644,0x5987,0x7F1A,/* 0xB8-0xBF */
+	0x5490,0x5676,0x560E,0x8BE5,0x6539,0x6982,0x9499,0x76D6,/* 0xC0-0xC7 */
+	0x6E89,0x5E72,0x7518,0x6746,0x67D1,0x7AFF,0x809D,0x8D76,/* 0xC8-0xCF */
+	0x611F,0x79C6,0x6562,0x8D63,0x5188,0x521A,0x94A2,0x7F38,/* 0xD0-0xD7 */
+	0x809B,0x7EB2,0x5C97,0x6E2F,0x6760,0x7BD9,0x768B,0x9AD8,/* 0xD8-0xDF */
+	0x818F,0x7F94,0x7CD5,0x641E,0x9550,0x7A3F,0x544A,0x54E5,/* 0xE0-0xE7 */
+	0x6B4C,0x6401,0x6208,0x9E3D,0x80F3,0x7599,0x5272,0x9769,/* 0xE8-0xEF */
+	0x845B,0x683C,0x86E4,0x9601,0x9694,0x94EC,0x4E2A,0x5404,/* 0xF0-0xF7 */
+	0x7ED9,0x6839,0x8DDF,0x8015,0x66F4,0x5E9A,0x7FB9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7B2F,0x7B30,0x7B32,0x7B34,0x7B35,0x7B36,0x7B37,0x7B39,/* 0x40-0x47 */
+	0x7B3B,0x7B3D,0x7B3F,0x7B40,0x7B41,0x7B42,0x7B43,0x7B44,/* 0x48-0x4F */
+	0x7B46,0x7B48,0x7B4A,0x7B4D,0x7B4E,0x7B53,0x7B55,0x7B57,/* 0x50-0x57 */
+	0x7B59,0x7B5C,0x7B5E,0x7B5F,0x7B61,0x7B63,0x7B64,0x7B65,/* 0x58-0x5F */
+	0x7B66,0x7B67,0x7B68,0x7B69,0x7B6A,0x7B6B,0x7B6C,0x7B6D,/* 0x60-0x67 */
+	0x7B6F,0x7B70,0x7B73,0x7B74,0x7B76,0x7B78,0x7B7A,0x7B7C,/* 0x68-0x6F */
+	0x7B7D,0x7B7F,0x7B81,0x7B82,0x7B83,0x7B84,0x7B86,0x7B87,/* 0x70-0x77 */
+	0x7B88,0x7B89,0x7B8A,0x7B8B,0x7B8C,0x7B8E,0x7B8F,0x0000,/* 0x78-0x7F */
+
+	0x7B91,0x7B92,0x7B93,0x7B96,0x7B98,0x7B99,0x7B9A,0x7B9B,/* 0x80-0x87 */
+	0x7B9E,0x7B9F,0x7BA0,0x7BA3,0x7BA4,0x7BA5,0x7BAE,0x7BAF,/* 0x88-0x8F */
+	0x7BB0,0x7BB2,0x7BB3,0x7BB5,0x7BB6,0x7BB7,0x7BB9,0x7BBA,/* 0x90-0x97 */
+	0x7BBB,0x7BBC,0x7BBD,0x7BBE,0x7BBF,0x7BC0,0x7BC2,0x7BC3,/* 0x98-0x9F */
+	0x7BC4,0x57C2,0x803F,0x6897,0x5DE5,0x653B,0x529F,0x606D,/* 0xA0-0xA7 */
+	0x9F9A,0x4F9B,0x8EAC,0x516C,0x5BAB,0x5F13,0x5DE9,0x6C5E,/* 0xA8-0xAF */
+	0x62F1,0x8D21,0x5171,0x94A9,0x52FE,0x6C9F,0x82DF,0x72D7,/* 0xB0-0xB7 */
+	0x57A2,0x6784,0x8D2D,0x591F,0x8F9C,0x83C7,0x5495,0x7B8D,/* 0xB8-0xBF */
+	0x4F30,0x6CBD,0x5B64,0x59D1,0x9F13,0x53E4,0x86CA,0x9AA8,/* 0xC0-0xC7 */
+	0x8C37,0x80A1,0x6545,0x987E,0x56FA,0x96C7,0x522E,0x74DC,/* 0xC8-0xCF */
+	0x5250,0x5BE1,0x6302,0x8902,0x4E56,0x62D0,0x602A,0x68FA,/* 0xD0-0xD7 */
+	0x5173,0x5B98,0x51A0,0x89C2,0x7BA1,0x9986,0x7F50,0x60EF,/* 0xD8-0xDF */
+	0x704C,0x8D2F,0x5149,0x5E7F,0x901B,0x7470,0x89C4,0x572D,/* 0xE0-0xE7 */
+	0x7845,0x5F52,0x9F9F,0x95FA,0x8F68,0x9B3C,0x8BE1,0x7678,/* 0xE8-0xEF */
+	0x6842,0x67DC,0x8DEA,0x8D35,0x523D,0x8F8A,0x6EDA,0x68CD,/* 0xF0-0xF7 */
+	0x9505,0x90ED,0x56FD,0x679C,0x88F9,0x8FC7,0x54C8,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7BC5,0x7BC8,0x7BC9,0x7BCA,0x7BCB,0x7BCD,0x7BCE,0x7BCF,/* 0x40-0x47 */
+	0x7BD0,0x7BD2,0x7BD4,0x7BD5,0x7BD6,0x7BD7,0x7BD8,0x7BDB,/* 0x48-0x4F */
+	0x7BDC,0x7BDE,0x7BDF,0x7BE0,0x7BE2,0x7BE3,0x7BE4,0x7BE7,/* 0x50-0x57 */
+	0x7BE8,0x7BE9,0x7BEB,0x7BEC,0x7BED,0x7BEF,0x7BF0,0x7BF2,/* 0x58-0x5F */
+	0x7BF3,0x7BF4,0x7BF5,0x7BF6,0x7BF8,0x7BF9,0x7BFA,0x7BFB,/* 0x60-0x67 */
+	0x7BFD,0x7BFF,0x7C00,0x7C01,0x7C02,0x7C03,0x7C04,0x7C05,/* 0x68-0x6F */
+	0x7C06,0x7C08,0x7C09,0x7C0A,0x7C0D,0x7C0E,0x7C10,0x7C11,/* 0x70-0x77 */
+	0x7C12,0x7C13,0x7C14,0x7C15,0x7C17,0x7C18,0x7C19,0x0000,/* 0x78-0x7F */
+
+	0x7C1A,0x7C1B,0x7C1C,0x7C1D,0x7C1E,0x7C20,0x7C21,0x7C22,/* 0x80-0x87 */
+	0x7C23,0x7C24,0x7C25,0x7C28,0x7C29,0x7C2B,0x7C2C,0x7C2D,/* 0x88-0x8F */
+	0x7C2E,0x7C2F,0x7C30,0x7C31,0x7C32,0x7C33,0x7C34,0x7C35,/* 0x90-0x97 */
+	0x7C36,0x7C37,0x7C39,0x7C3A,0x7C3B,0x7C3C,0x7C3D,0x7C3E,/* 0x98-0x9F */
+	0x7C42,0x9AB8,0x5B69,0x6D77,0x6C26,0x4EA5,0x5BB3,0x9A87,/* 0xA0-0xA7 */
+	0x9163,0x61A8,0x90AF,0x97E9,0x542B,0x6DB5,0x5BD2,0x51FD,/* 0xA8-0xAF */
+	0x558A,0x7F55,0x7FF0,0x64BC,0x634D,0x65F1,0x61BE,0x608D,/* 0xB0-0xB7 */
+	0x710A,0x6C57,0x6C49,0x592F,0x676D,0x822A,0x58D5,0x568E,/* 0xB8-0xBF */
+	0x8C6A,0x6BEB,0x90DD,0x597D,0x8017,0x53F7,0x6D69,0x5475,/* 0xC0-0xC7 */
+	0x559D,0x8377,0x83CF,0x6838,0x79BE,0x548C,0x4F55,0x5408,/* 0xC8-0xCF */
+	0x76D2,0x8C89,0x9602,0x6CB3,0x6DB8,0x8D6B,0x8910,0x9E64,/* 0xD0-0xD7 */
+	0x8D3A,0x563F,0x9ED1,0x75D5,0x5F88,0x72E0,0x6068,0x54FC,/* 0xD8-0xDF */
+	0x4EA8,0x6A2A,0x8861,0x6052,0x8F70,0x54C4,0x70D8,0x8679,/* 0xE0-0xE7 */
+	0x9E3F,0x6D2A,0x5B8F,0x5F18,0x7EA2,0x5589,0x4FAF,0x7334,/* 0xE8-0xEF */
+	0x543C,0x539A,0x5019,0x540E,0x547C,0x4E4E,0x5FFD,0x745A,/* 0xF0-0xF7 */
+	0x58F6,0x846B,0x80E1,0x8774,0x72D0,0x7CCA,0x6E56,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7C43,0x7C44,0x7C45,0x7C46,0x7C47,0x7C48,0x7C49,0x7C4A,/* 0x40-0x47 */
+	0x7C4B,0x7C4C,0x7C4E,0x7C4F,0x7C50,0x7C51,0x7C52,0x7C53,/* 0x48-0x4F */
+	0x7C54,0x7C55,0x7C56,0x7C57,0x7C58,0x7C59,0x7C5A,0x7C5B,/* 0x50-0x57 */
+	0x7C5C,0x7C5D,0x7C5E,0x7C5F,0x7C60,0x7C61,0x7C62,0x7C63,/* 0x58-0x5F */
+	0x7C64,0x7C65,0x7C66,0x7C67,0x7C68,0x7C69,0x7C6A,0x7C6B,/* 0x60-0x67 */
+	0x7C6C,0x7C6D,0x7C6E,0x7C6F,0x7C70,0x7C71,0x7C72,0x7C75,/* 0x68-0x6F */
+	0x7C76,0x7C77,0x7C78,0x7C79,0x7C7A,0x7C7E,0x7C7F,0x7C80,/* 0x70-0x77 */
+	0x7C81,0x7C82,0x7C83,0x7C84,0x7C85,0x7C86,0x7C87,0x0000,/* 0x78-0x7F */
+
+	0x7C88,0x7C8A,0x7C8B,0x7C8C,0x7C8D,0x7C8E,0x7C8F,0x7C90,/* 0x80-0x87 */
+	0x7C93,0x7C94,0x7C96,0x7C99,0x7C9A,0x7C9B,0x7CA0,0x7CA1,/* 0x88-0x8F */
+	0x7CA3,0x7CA6,0x7CA7,0x7CA8,0x7CA9,0x7CAB,0x7CAC,0x7CAD,/* 0x90-0x97 */
+	0x7CAF,0x7CB0,0x7CB4,0x7CB5,0x7CB6,0x7CB7,0x7CB8,0x7CBA,/* 0x98-0x9F */
+	0x7CBB,0x5F27,0x864E,0x552C,0x62A4,0x4E92,0x6CAA,0x6237,/* 0xA0-0xA7 */
+	0x82B1,0x54D7,0x534E,0x733E,0x6ED1,0x753B,0x5212,0x5316,/* 0xA8-0xAF */
+	0x8BDD,0x69D0,0x5F8A,0x6000,0x6DEE,0x574F,0x6B22,0x73AF,/* 0xB0-0xB7 */
+	0x6853,0x8FD8,0x7F13,0x6362,0x60A3,0x5524,0x75EA,0x8C62,/* 0xB8-0xBF */
+	0x7115,0x6DA3,0x5BA6,0x5E7B,0x8352,0x614C,0x9EC4,0x78FA,/* 0xC0-0xC7 */
+	0x8757,0x7C27,0x7687,0x51F0,0x60F6,0x714C,0x6643,0x5E4C,/* 0xC8-0xCF */
+	0x604D,0x8C0E,0x7070,0x6325,0x8F89,0x5FBD,0x6062,0x86D4,/* 0xD0-0xD7 */
+	0x56DE,0x6BC1,0x6094,0x6167,0x5349,0x60E0,0x6666,0x8D3F,/* 0xD8-0xDF */
+	0x79FD,0x4F1A,0x70E9,0x6C47,0x8BB3,0x8BF2,0x7ED8,0x8364,/* 0xE0-0xE7 */
+	0x660F,0x5A5A,0x9B42,0x6D51,0x6DF7,0x8C41,0x6D3B,0x4F19,/* 0xE8-0xEF */
+	0x706B,0x83B7,0x6216,0x60D1,0x970D,0x8D27,0x7978,0x51FB,/* 0xF0-0xF7 */
+	0x573E,0x57FA,0x673A,0x7578,0x7A3D,0x79EF,0x7B95,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7CBF,0x7CC0,0x7CC2,0x7CC3,0x7CC4,0x7CC6,0x7CC9,0x7CCB,/* 0x40-0x47 */
+	0x7CCE,0x7CCF,0x7CD0,0x7CD1,0x7CD2,0x7CD3,0x7CD4,0x7CD8,/* 0x48-0x4F */
+	0x7CDA,0x7CDB,0x7CDD,0x7CDE,0x7CE1,0x7CE2,0x7CE3,0x7CE4,/* 0x50-0x57 */
+	0x7CE5,0x7CE6,0x7CE7,0x7CE9,0x7CEA,0x7CEB,0x7CEC,0x7CED,/* 0x58-0x5F */
+	0x7CEE,0x7CF0,0x7CF1,0x7CF2,0x7CF3,0x7CF4,0x7CF5,0x7CF6,/* 0x60-0x67 */
+	0x7CF7,0x7CF9,0x7CFA,0x7CFC,0x7CFD,0x7CFE,0x7CFF,0x7D00,/* 0x68-0x6F */
+	0x7D01,0x7D02,0x7D03,0x7D04,0x7D05,0x7D06,0x7D07,0x7D08,/* 0x70-0x77 */
+	0x7D09,0x7D0B,0x7D0C,0x7D0D,0x7D0E,0x7D0F,0x7D10,0x0000,/* 0x78-0x7F */
+
+	0x7D11,0x7D12,0x7D13,0x7D14,0x7D15,0x7D16,0x7D17,0x7D18,/* 0x80-0x87 */
+	0x7D19,0x7D1A,0x7D1B,0x7D1C,0x7D1D,0x7D1E,0x7D1F,0x7D21,/* 0x88-0x8F */
+	0x7D23,0x7D24,0x7D25,0x7D26,0x7D28,0x7D29,0x7D2A,0x7D2C,/* 0x90-0x97 */
+	0x7D2D,0x7D2E,0x7D30,0x7D31,0x7D32,0x7D33,0x7D34,0x7D35,/* 0x98-0x9F */
+	0x7D36,0x808C,0x9965,0x8FF9,0x6FC0,0x8BA5,0x9E21,0x59EC,/* 0xA0-0xA7 */
+	0x7EE9,0x7F09,0x5409,0x6781,0x68D8,0x8F91,0x7C4D,0x96C6,/* 0xA8-0xAF */
+	0x53CA,0x6025,0x75BE,0x6C72,0x5373,0x5AC9,0x7EA7,0x6324,/* 0xB0-0xB7 */
+	0x51E0,0x810A,0x5DF1,0x84DF,0x6280,0x5180,0x5B63,0x4F0E,/* 0xB8-0xBF */
+	0x796D,0x5242,0x60B8,0x6D4E,0x5BC4,0x5BC2,0x8BA1,0x8BB0,/* 0xC0-0xC7 */
+	0x65E2,0x5FCC,0x9645,0x5993,0x7EE7,0x7EAA,0x5609,0x67B7,/* 0xC8-0xCF */
+	0x5939,0x4F73,0x5BB6,0x52A0,0x835A,0x988A,0x8D3E,0x7532,/* 0xD0-0xD7 */
+	0x94BE,0x5047,0x7A3C,0x4EF7,0x67B6,0x9A7E,0x5AC1,0x6B7C,/* 0xD8-0xDF */
+	0x76D1,0x575A,0x5C16,0x7B3A,0x95F4,0x714E,0x517C,0x80A9,/* 0xE0-0xE7 */
+	0x8270,0x5978,0x7F04,0x8327,0x68C0,0x67EC,0x78B1,0x7877,/* 0xE8-0xEF */
+	0x62E3,0x6361,0x7B80,0x4FED,0x526A,0x51CF,0x8350,0x69DB,/* 0xF0-0xF7 */
+	0x9274,0x8DF5,0x8D31,0x89C1,0x952E,0x7BAD,0x4EF6,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7D37,0x7D38,0x7D39,0x7D3A,0x7D3B,0x7D3C,0x7D3D,0x7D3E,/* 0x40-0x47 */
+	0x7D3F,0x7D40,0x7D41,0x7D42,0x7D43,0x7D44,0x7D45,0x7D46,/* 0x48-0x4F */
+	0x7D47,0x7D48,0x7D49,0x7D4A,0x7D4B,0x7D4C,0x7D4D,0x7D4E,/* 0x50-0x57 */
+	0x7D4F,0x7D50,0x7D51,0x7D52,0x7D53,0x7D54,0x7D55,0x7D56,/* 0x58-0x5F */
+	0x7D57,0x7D58,0x7D59,0x7D5A,0x7D5B,0x7D5C,0x7D5D,0x7D5E,/* 0x60-0x67 */
+	0x7D5F,0x7D60,0x7D61,0x7D62,0x7D63,0x7D64,0x7D65,0x7D66,/* 0x68-0x6F */
+	0x7D67,0x7D68,0x7D69,0x7D6A,0x7D6B,0x7D6C,0x7D6D,0x7D6F,/* 0x70-0x77 */
+	0x7D70,0x7D71,0x7D72,0x7D73,0x7D74,0x7D75,0x7D76,0x0000,/* 0x78-0x7F */
+
+	0x7D78,0x7D79,0x7D7A,0x7D7B,0x7D7C,0x7D7D,0x7D7E,0x7D7F,/* 0x80-0x87 */
+	0x7D80,0x7D81,0x7D82,0x7D83,0x7D84,0x7D85,0x7D86,0x7D87,/* 0x88-0x8F */
+	0x7D88,0x7D89,0x7D8A,0x7D8B,0x7D8C,0x7D8D,0x7D8E,0x7D8F,/* 0x90-0x97 */
+	0x7D90,0x7D91,0x7D92,0x7D93,0x7D94,0x7D95,0x7D96,0x7D97,/* 0x98-0x9F */
+	0x7D98,0x5065,0x8230,0x5251,0x996F,0x6E10,0x6E85,0x6DA7,/* 0xA0-0xA7 */
+	0x5EFA,0x50F5,0x59DC,0x5C06,0x6D46,0x6C5F,0x7586,0x848B,/* 0xA8-0xAF */
+	0x6868,0x5956,0x8BB2,0x5320,0x9171,0x964D,0x8549,0x6912,/* 0xB0-0xB7 */
+	0x7901,0x7126,0x80F6,0x4EA4,0x90CA,0x6D47,0x9A84,0x5A07,/* 0xB8-0xBF */
+	0x56BC,0x6405,0x94F0,0x77EB,0x4FA5,0x811A,0x72E1,0x89D2,/* 0xC0-0xC7 */
+	0x997A,0x7F34,0x7EDE,0x527F,0x6559,0x9175,0x8F7F,0x8F83,/* 0xC8-0xCF */
+	0x53EB,0x7A96,0x63ED,0x63A5,0x7686,0x79F8,0x8857,0x9636,/* 0xD0-0xD7 */
+	0x622A,0x52AB,0x8282,0x6854,0x6770,0x6377,0x776B,0x7AED,/* 0xD8-0xDF */
+	0x6D01,0x7ED3,0x89E3,0x59D0,0x6212,0x85C9,0x82A5,0x754C,/* 0xE0-0xE7 */
+	0x501F,0x4ECB,0x75A5,0x8BEB,0x5C4A,0x5DFE,0x7B4B,0x65A4,/* 0xE8-0xEF */
+	0x91D1,0x4ECA,0x6D25,0x895F,0x7D27,0x9526,0x4EC5,0x8C28,/* 0xF0-0xF7 */
+	0x8FDB,0x9773,0x664B,0x7981,0x8FD1,0x70EC,0x6D78,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7D99,0x7D9A,0x7D9B,0x7D9C,0x7D9D,0x7D9E,0x7D9F,0x7DA0,/* 0x40-0x47 */
+	0x7DA1,0x7DA2,0x7DA3,0x7DA4,0x7DA5,0x7DA7,0x7DA8,0x7DA9,/* 0x48-0x4F */
+	0x7DAA,0x7DAB,0x7DAC,0x7DAD,0x7DAF,0x7DB0,0x7DB1,0x7DB2,/* 0x50-0x57 */
+	0x7DB3,0x7DB4,0x7DB5,0x7DB6,0x7DB7,0x7DB8,0x7DB9,0x7DBA,/* 0x58-0x5F */
+	0x7DBB,0x7DBC,0x7DBD,0x7DBE,0x7DBF,0x7DC0,0x7DC1,0x7DC2,/* 0x60-0x67 */
+	0x7DC3,0x7DC4,0x7DC5,0x7DC6,0x7DC7,0x7DC8,0x7DC9,0x7DCA,/* 0x68-0x6F */
+	0x7DCB,0x7DCC,0x7DCD,0x7DCE,0x7DCF,0x7DD0,0x7DD1,0x7DD2,/* 0x70-0x77 */
+	0x7DD3,0x7DD4,0x7DD5,0x7DD6,0x7DD7,0x7DD8,0x7DD9,0x0000,/* 0x78-0x7F */
+
+	0x7DDA,0x7DDB,0x7DDC,0x7DDD,0x7DDE,0x7DDF,0x7DE0,0x7DE1,/* 0x80-0x87 */
+	0x7DE2,0x7DE3,0x7DE4,0x7DE5,0x7DE6,0x7DE7,0x7DE8,0x7DE9,/* 0x88-0x8F */
+	0x7DEA,0x7DEB,0x7DEC,0x7DED,0x7DEE,0x7DEF,0x7DF0,0x7DF1,/* 0x90-0x97 */
+	0x7DF2,0x7DF3,0x7DF4,0x7DF5,0x7DF6,0x7DF7,0x7DF8,0x7DF9,/* 0x98-0x9F */
+	0x7DFA,0x5C3D,0x52B2,0x8346,0x5162,0x830E,0x775B,0x6676,/* 0xA0-0xA7 */
+	0x9CB8,0x4EAC,0x60CA,0x7CBE,0x7CB3,0x7ECF,0x4E95,0x8B66,/* 0xA8-0xAF */
+	0x666F,0x9888,0x9759,0x5883,0x656C,0x955C,0x5F84,0x75C9,/* 0xB0-0xB7 */
+	0x9756,0x7ADF,0x7ADE,0x51C0,0x70AF,0x7A98,0x63EA,0x7A76,/* 0xB8-0xBF */
+	0x7EA0,0x7396,0x97ED,0x4E45,0x7078,0x4E5D,0x9152,0x53A9,/* 0xC0-0xC7 */
+	0x6551,0x65E7,0x81FC,0x8205,0x548E,0x5C31,0x759A,0x97A0,/* 0xC8-0xCF */
+	0x62D8,0x72D9,0x75BD,0x5C45,0x9A79,0x83CA,0x5C40,0x5480,/* 0xD0-0xD7 */
+	0x77E9,0x4E3E,0x6CAE,0x805A,0x62D2,0x636E,0x5DE8,0x5177,/* 0xD8-0xDF */
+	0x8DDD,0x8E1E,0x952F,0x4FF1,0x53E5,0x60E7,0x70AC,0x5267,/* 0xE0-0xE7 */
+	0x6350,0x9E43,0x5A1F,0x5026,0x7737,0x5377,0x7EE2,0x6485,/* 0xE8-0xEF */
+	0x652B,0x6289,0x6398,0x5014,0x7235,0x89C9,0x51B3,0x8BC0,/* 0xF0-0xF7 */
+	0x7EDD,0x5747,0x83CC,0x94A7,0x519B,0x541B,0x5CFB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7DFB,0x7DFC,0x7DFD,0x7DFE,0x7DFF,0x7E00,0x7E01,0x7E02,/* 0x40-0x47 */
+	0x7E03,0x7E04,0x7E05,0x7E06,0x7E07,0x7E08,0x7E09,0x7E0A,/* 0x48-0x4F */
+	0x7E0B,0x7E0C,0x7E0D,0x7E0E,0x7E0F,0x7E10,0x7E11,0x7E12,/* 0x50-0x57 */
+	0x7E13,0x7E14,0x7E15,0x7E16,0x7E17,0x7E18,0x7E19,0x7E1A,/* 0x58-0x5F */
+	0x7E1B,0x7E1C,0x7E1D,0x7E1E,0x7E1F,0x7E20,0x7E21,0x7E22,/* 0x60-0x67 */
+	0x7E23,0x7E24,0x7E25,0x7E26,0x7E27,0x7E28,0x7E29,0x7E2A,/* 0x68-0x6F */
+	0x7E2B,0x7E2C,0x7E2D,0x7E2E,0x7E2F,0x7E30,0x7E31,0x7E32,/* 0x70-0x77 */
+	0x7E33,0x7E34,0x7E35,0x7E36,0x7E37,0x7E38,0x7E39,0x0000,/* 0x78-0x7F */
+
+	0x7E3A,0x7E3C,0x7E3D,0x7E3E,0x7E3F,0x7E40,0x7E42,0x7E43,/* 0x80-0x87 */
+	0x7E44,0x7E45,0x7E46,0x7E48,0x7E49,0x7E4A,0x7E4B,0x7E4C,/* 0x88-0x8F */
+	0x7E4D,0x7E4E,0x7E4F,0x7E50,0x7E51,0x7E52,0x7E53,0x7E54,/* 0x90-0x97 */
+	0x7E55,0x7E56,0x7E57,0x7E58,0x7E59,0x7E5A,0x7E5B,0x7E5C,/* 0x98-0x9F */
+	0x7E5D,0x4FCA,0x7AE3,0x6D5A,0x90E1,0x9A8F,0x5580,0x5496,/* 0xA0-0xA7 */
+	0x5361,0x54AF,0x5F00,0x63E9,0x6977,0x51EF,0x6168,0x520A,/* 0xA8-0xAF */
+	0x582A,0x52D8,0x574E,0x780D,0x770B,0x5EB7,0x6177,0x7CE0,/* 0xB0-0xB7 */
+	0x625B,0x6297,0x4EA2,0x7095,0x8003,0x62F7,0x70E4,0x9760,/* 0xB8-0xBF */
+	0x5777,0x82DB,0x67EF,0x68F5,0x78D5,0x9897,0x79D1,0x58F3,/* 0xC0-0xC7 */
+	0x54B3,0x53EF,0x6E34,0x514B,0x523B,0x5BA2,0x8BFE,0x80AF,/* 0xC8-0xCF */
+	0x5543,0x57A6,0x6073,0x5751,0x542D,0x7A7A,0x6050,0x5B54,/* 0xD0-0xD7 */
+	0x63A7,0x62A0,0x53E3,0x6263,0x5BC7,0x67AF,0x54ED,0x7A9F,/* 0xD8-0xDF */
+	0x82E6,0x9177,0x5E93,0x88E4,0x5938,0x57AE,0x630E,0x8DE8,/* 0xE0-0xE7 */
+	0x80EF,0x5757,0x7B77,0x4FA9,0x5FEB,0x5BBD,0x6B3E,0x5321,/* 0xE8-0xEF */
+	0x7B50,0x72C2,0x6846,0x77FF,0x7736,0x65F7,0x51B5,0x4E8F,/* 0xF0-0xF7 */
+	0x76D4,0x5CBF,0x7AA5,0x8475,0x594E,0x9B41,0x5080,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7E5E,0x7E5F,0x7E60,0x7E61,0x7E62,0x7E63,0x7E64,0x7E65,/* 0x40-0x47 */
+	0x7E66,0x7E67,0x7E68,0x7E69,0x7E6A,0x7E6B,0x7E6C,0x7E6D,/* 0x48-0x4F */
+	0x7E6E,0x7E6F,0x7E70,0x7E71,0x7E72,0x7E73,0x7E74,0x7E75,/* 0x50-0x57 */
+	0x7E76,0x7E77,0x7E78,0x7E79,0x7E7A,0x7E7B,0x7E7C,0x7E7D,/* 0x58-0x5F */
+	0x7E7E,0x7E7F,0x7E80,0x7E81,0x7E83,0x7E84,0x7E85,0x7E86,/* 0x60-0x67 */
+	0x7E87,0x7E88,0x7E89,0x7E8A,0x7E8B,0x7E8C,0x7E8D,0x7E8E,/* 0x68-0x6F */
+	0x7E8F,0x7E90,0x7E91,0x7E92,0x7E93,0x7E94,0x7E95,0x7E96,/* 0x70-0x77 */
+	0x7E97,0x7E98,0x7E99,0x7E9A,0x7E9C,0x7E9D,0x7E9E,0x0000,/* 0x78-0x7F */
+
+	0x7EAE,0x7EB4,0x7EBB,0x7EBC,0x7ED6,0x7EE4,0x7EEC,0x7EF9,/* 0x80-0x87 */
+	0x7F0A,0x7F10,0x7F1E,0x7F37,0x7F39,0x7F3B,0x7F3C,0x7F3D,/* 0x88-0x8F */
+	0x7F3E,0x7F3F,0x7F40,0x7F41,0x7F43,0x7F46,0x7F47,0x7F48,/* 0x90-0x97 */
+	0x7F49,0x7F4A,0x7F4B,0x7F4C,0x7F4D,0x7F4E,0x7F4F,0x7F52,/* 0x98-0x9F */
+	0x7F53,0x9988,0x6127,0x6E83,0x5764,0x6606,0x6346,0x56F0,/* 0xA0-0xA7 */
+	0x62EC,0x6269,0x5ED3,0x9614,0x5783,0x62C9,0x5587,0x8721,/* 0xA8-0xAF */
+	0x814A,0x8FA3,0x5566,0x83B1,0x6765,0x8D56,0x84DD,0x5A6A,/* 0xB0-0xB7 */
+	0x680F,0x62E6,0x7BEE,0x9611,0x5170,0x6F9C,0x8C30,0x63FD,/* 0xB8-0xBF */
+	0x89C8,0x61D2,0x7F06,0x70C2,0x6EE5,0x7405,0x6994,0x72FC,/* 0xC0-0xC7 */
+	0x5ECA,0x90CE,0x6717,0x6D6A,0x635E,0x52B3,0x7262,0x8001,/* 0xC8-0xCF */
+	0x4F6C,0x59E5,0x916A,0x70D9,0x6D9D,0x52D2,0x4E50,0x96F7,/* 0xD0-0xD7 */
+	0x956D,0x857E,0x78CA,0x7D2F,0x5121,0x5792,0x64C2,0x808B,/* 0xD8-0xDF */
+	0x7C7B,0x6CEA,0x68F1,0x695E,0x51B7,0x5398,0x68A8,0x7281,/* 0xE0-0xE7 */
+	0x9ECE,0x7BF1,0x72F8,0x79BB,0x6F13,0x7406,0x674E,0x91CC,/* 0xE8-0xEF */
+	0x9CA4,0x793C,0x8389,0x8354,0x540F,0x6817,0x4E3D,0x5389,/* 0xF0-0xF7 */
+	0x52B1,0x783E,0x5386,0x5229,0x5088,0x4F8B,0x4FD0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7F56,0x7F59,0x7F5B,0x7F5C,0x7F5D,0x7F5E,0x7F60,0x7F63,/* 0x40-0x47 */
+	0x7F64,0x7F65,0x7F66,0x7F67,0x7F6B,0x7F6C,0x7F6D,0x7F6F,/* 0x48-0x4F */
+	0x7F70,0x7F73,0x7F75,0x7F76,0x7F77,0x7F78,0x7F7A,0x7F7B,/* 0x50-0x57 */
+	0x7F7C,0x7F7D,0x7F7F,0x7F80,0x7F82,0x7F83,0x7F84,0x7F85,/* 0x58-0x5F */
+	0x7F86,0x7F87,0x7F88,0x7F89,0x7F8B,0x7F8D,0x7F8F,0x7F90,/* 0x60-0x67 */
+	0x7F91,0x7F92,0x7F93,0x7F95,0x7F96,0x7F97,0x7F98,0x7F99,/* 0x68-0x6F */
+	0x7F9B,0x7F9C,0x7FA0,0x7FA2,0x7FA3,0x7FA5,0x7FA6,0x7FA8,/* 0x70-0x77 */
+	0x7FA9,0x7FAA,0x7FAB,0x7FAC,0x7FAD,0x7FAE,0x7FB1,0x0000,/* 0x78-0x7F */
+
+	0x7FB3,0x7FB4,0x7FB5,0x7FB6,0x7FB7,0x7FBA,0x7FBB,0x7FBE,/* 0x80-0x87 */
+	0x7FC0,0x7FC2,0x7FC3,0x7FC4,0x7FC6,0x7FC7,0x7FC8,0x7FC9,/* 0x88-0x8F */
+	0x7FCB,0x7FCD,0x7FCF,0x7FD0,0x7FD1,0x7FD2,0x7FD3,0x7FD6,/* 0x90-0x97 */
+	0x7FD7,0x7FD9,0x7FDA,0x7FDB,0x7FDC,0x7FDD,0x7FDE,0x7FE2,/* 0x98-0x9F */
+	0x7FE3,0x75E2,0x7ACB,0x7C92,0x6CA5,0x96B6,0x529B,0x7483,/* 0xA0-0xA7 */
+	0x54E9,0x4FE9,0x8054,0x83B2,0x8FDE,0x9570,0x5EC9,0x601C,/* 0xA8-0xAF */
+	0x6D9F,0x5E18,0x655B,0x8138,0x94FE,0x604B,0x70BC,0x7EC3,/* 0xB0-0xB7 */
+	0x7CAE,0x51C9,0x6881,0x7CB1,0x826F,0x4E24,0x8F86,0x91CF,/* 0xB8-0xBF */
+	0x667E,0x4EAE,0x8C05,0x64A9,0x804A,0x50DA,0x7597,0x71CE,/* 0xC0-0xC7 */
+	0x5BE5,0x8FBD,0x6F66,0x4E86,0x6482,0x9563,0x5ED6,0x6599,/* 0xC8-0xCF */
+	0x5217,0x88C2,0x70C8,0x52A3,0x730E,0x7433,0x6797,0x78F7,/* 0xD0-0xD7 */
+	0x9716,0x4E34,0x90BB,0x9CDE,0x6DCB,0x51DB,0x8D41,0x541D,/* 0xD8-0xDF */
+	0x62CE,0x73B2,0x83F1,0x96F6,0x9F84,0x94C3,0x4F36,0x7F9A,/* 0xE0-0xE7 */
+	0x51CC,0x7075,0x9675,0x5CAD,0x9886,0x53E6,0x4EE4,0x6E9C,/* 0xE8-0xEF */
+	0x7409,0x69B4,0x786B,0x998F,0x7559,0x5218,0x7624,0x6D41,/* 0xF0-0xF7 */
+	0x67F3,0x516D,0x9F99,0x804B,0x5499,0x7B3C,0x7ABF,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7FE4,0x7FE7,0x7FE8,0x7FEA,0x7FEB,0x7FEC,0x7FED,0x7FEF,/* 0x40-0x47 */
+	0x7FF2,0x7FF4,0x7FF5,0x7FF6,0x7FF7,0x7FF8,0x7FF9,0x7FFA,/* 0x48-0x4F */
+	0x7FFD,0x7FFE,0x7FFF,0x8002,0x8007,0x8008,0x8009,0x800A,/* 0x50-0x57 */
+	0x800E,0x800F,0x8011,0x8013,0x801A,0x801B,0x801D,0x801E,/* 0x58-0x5F */
+	0x801F,0x8021,0x8023,0x8024,0x802B,0x802C,0x802D,0x802E,/* 0x60-0x67 */
+	0x802F,0x8030,0x8032,0x8034,0x8039,0x803A,0x803C,0x803E,/* 0x68-0x6F */
+	0x8040,0x8041,0x8044,0x8045,0x8047,0x8048,0x8049,0x804E,/* 0x70-0x77 */
+	0x804F,0x8050,0x8051,0x8053,0x8055,0x8056,0x8057,0x0000,/* 0x78-0x7F */
+
+	0x8059,0x805B,0x805C,0x805D,0x805E,0x805F,0x8060,0x8061,/* 0x80-0x87 */
+	0x8062,0x8063,0x8064,0x8065,0x8066,0x8067,0x8068,0x806B,/* 0x88-0x8F */
+	0x806C,0x806D,0x806E,0x806F,0x8070,0x8072,0x8073,0x8074,/* 0x90-0x97 */
+	0x8075,0x8076,0x8077,0x8078,0x8079,0x807A,0x807B,0x807C,/* 0x98-0x9F */
+	0x807D,0x9686,0x5784,0x62E2,0x9647,0x697C,0x5A04,0x6402,/* 0xA0-0xA7 */
+	0x7BD3,0x6F0F,0x964B,0x82A6,0x5362,0x9885,0x5E90,0x7089,/* 0xA8-0xAF */
+	0x63B3,0x5364,0x864F,0x9C81,0x9E93,0x788C,0x9732,0x8DEF,/* 0xB0-0xB7 */
+	0x8D42,0x9E7F,0x6F5E,0x7984,0x5F55,0x9646,0x622E,0x9A74,/* 0xB8-0xBF */
+	0x5415,0x94DD,0x4FA3,0x65C5,0x5C65,0x5C61,0x7F15,0x8651,/* 0xC0-0xC7 */
+	0x6C2F,0x5F8B,0x7387,0x6EE4,0x7EFF,0x5CE6,0x631B,0x5B6A,/* 0xC8-0xCF */
+	0x6EE6,0x5375,0x4E71,0x63A0,0x7565,0x62A1,0x8F6E,0x4F26,/* 0xD0-0xD7 */
+	0x4ED1,0x6CA6,0x7EB6,0x8BBA,0x841D,0x87BA,0x7F57,0x903B,/* 0xD8-0xDF */
+	0x9523,0x7BA9,0x9AA1,0x88F8,0x843D,0x6D1B,0x9A86,0x7EDC,/* 0xE0-0xE7 */
+	0x5988,0x9EBB,0x739B,0x7801,0x8682,0x9A6C,0x9A82,0x561B,/* 0xE8-0xEF */
+	0x5417,0x57CB,0x4E70,0x9EA6,0x5356,0x8FC8,0x8109,0x7792,/* 0xF0-0xF7 */
+	0x9992,0x86EE,0x6EE1,0x8513,0x66FC,0x6162,0x6F2B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x807E,0x8081,0x8082,0x8085,0x8088,0x808A,0x808D,0x808E,/* 0x40-0x47 */
+	0x808F,0x8090,0x8091,0x8092,0x8094,0x8095,0x8097,0x8099,/* 0x48-0x4F */
+	0x809E,0x80A3,0x80A6,0x80A7,0x80A8,0x80AC,0x80B0,0x80B3,/* 0x50-0x57 */
+	0x80B5,0x80B6,0x80B8,0x80B9,0x80BB,0x80C5,0x80C7,0x80C8,/* 0x58-0x5F */
+	0x80C9,0x80CA,0x80CB,0x80CF,0x80D0,0x80D1,0x80D2,0x80D3,/* 0x60-0x67 */
+	0x80D4,0x80D5,0x80D8,0x80DF,0x80E0,0x80E2,0x80E3,0x80E6,/* 0x68-0x6F */
+	0x80EE,0x80F5,0x80F7,0x80F9,0x80FB,0x80FE,0x80FF,0x8100,/* 0x70-0x77 */
+	0x8101,0x8103,0x8104,0x8105,0x8107,0x8108,0x810B,0x0000,/* 0x78-0x7F */
+
+	0x810C,0x8115,0x8117,0x8119,0x811B,0x811C,0x811D,0x811F,/* 0x80-0x87 */
+	0x8120,0x8121,0x8122,0x8123,0x8124,0x8125,0x8126,0x8127,/* 0x88-0x8F */
+	0x8128,0x8129,0x812A,0x812B,0x812D,0x812E,0x8130,0x8133,/* 0x90-0x97 */
+	0x8134,0x8135,0x8137,0x8139,0x813A,0x813B,0x813C,0x813D,/* 0x98-0x9F */
+	0x813F,0x8C29,0x8292,0x832B,0x76F2,0x6C13,0x5FD9,0x83BD,/* 0xA0-0xA7 */
+	0x732B,0x8305,0x951A,0x6BDB,0x77DB,0x94C6,0x536F,0x8302,/* 0xA8-0xAF */
+	0x5192,0x5E3D,0x8C8C,0x8D38,0x4E48,0x73AB,0x679A,0x6885,/* 0xB0-0xB7 */
+	0x9176,0x9709,0x7164,0x6CA1,0x7709,0x5A92,0x9541,0x6BCF,/* 0xB8-0xBF */
+	0x7F8E,0x6627,0x5BD0,0x59B9,0x5A9A,0x95E8,0x95F7,0x4EEC,/* 0xC0-0xC7 */
+	0x840C,0x8499,0x6AAC,0x76DF,0x9530,0x731B,0x68A6,0x5B5F,/* 0xC8-0xCF */
+	0x772F,0x919A,0x9761,0x7CDC,0x8FF7,0x8C1C,0x5F25,0x7C73,/* 0xD0-0xD7 */
+	0x79D8,0x89C5,0x6CCC,0x871C,0x5BC6,0x5E42,0x68C9,0x7720,/* 0xD8-0xDF */
+	0x7EF5,0x5195,0x514D,0x52C9,0x5A29,0x7F05,0x9762,0x82D7,/* 0xE0-0xE7 */
+	0x63CF,0x7784,0x85D0,0x79D2,0x6E3A,0x5E99,0x5999,0x8511,/* 0xE8-0xEF */
+	0x706D,0x6C11,0x62BF,0x76BF,0x654F,0x60AF,0x95FD,0x660E,/* 0xF0-0xF7 */
+	0x879F,0x9E23,0x94ED,0x540D,0x547D,0x8C2C,0x6478,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8140,0x8141,0x8142,0x8143,0x8144,0x8145,0x8147,0x8149,/* 0x40-0x47 */
+	0x814D,0x814E,0x814F,0x8152,0x8156,0x8157,0x8158,0x815B,/* 0x48-0x4F */
+	0x815C,0x815D,0x815E,0x815F,0x8161,0x8162,0x8163,0x8164,/* 0x50-0x57 */
+	0x8166,0x8168,0x816A,0x816B,0x816C,0x816F,0x8172,0x8173,/* 0x58-0x5F */
+	0x8175,0x8176,0x8177,0x8178,0x8181,0x8183,0x8184,0x8185,/* 0x60-0x67 */
+	0x8186,0x8187,0x8189,0x818B,0x818C,0x818D,0x818E,0x8190,/* 0x68-0x6F */
+	0x8192,0x8193,0x8194,0x8195,0x8196,0x8197,0x8199,0x819A,/* 0x70-0x77 */
+	0x819E,0x819F,0x81A0,0x81A1,0x81A2,0x81A4,0x81A5,0x0000,/* 0x78-0x7F */
+
+	0x81A7,0x81A9,0x81AB,0x81AC,0x81AD,0x81AE,0x81AF,0x81B0,/* 0x80-0x87 */
+	0x81B1,0x81B2,0x81B4,0x81B5,0x81B6,0x81B7,0x81B8,0x81B9,/* 0x88-0x8F */
+	0x81BC,0x81BD,0x81BE,0x81BF,0x81C4,0x81C5,0x81C7,0x81C8,/* 0x90-0x97 */
+	0x81C9,0x81CB,0x81CD,0x81CE,0x81CF,0x81D0,0x81D1,0x81D2,/* 0x98-0x9F */
+	0x81D3,0x6479,0x8611,0x6A21,0x819C,0x78E8,0x6469,0x9B54,/* 0xA0-0xA7 */
+	0x62B9,0x672B,0x83AB,0x58A8,0x9ED8,0x6CAB,0x6F20,0x5BDE,/* 0xA8-0xAF */
+	0x964C,0x8C0B,0x725F,0x67D0,0x62C7,0x7261,0x4EA9,0x59C6,/* 0xB0-0xB7 */
+	0x6BCD,0x5893,0x66AE,0x5E55,0x52DF,0x6155,0x6728,0x76EE,/* 0xB8-0xBF */
+	0x7766,0x7267,0x7A46,0x62FF,0x54EA,0x5450,0x94A0,0x90A3,/* 0xC0-0xC7 */
+	0x5A1C,0x7EB3,0x6C16,0x4E43,0x5976,0x8010,0x5948,0x5357,/* 0xC8-0xCF */
+	0x7537,0x96BE,0x56CA,0x6320,0x8111,0x607C,0x95F9,0x6DD6,/* 0xD0-0xD7 */
+	0x5462,0x9981,0x5185,0x5AE9,0x80FD,0x59AE,0x9713,0x502A,/* 0xD8-0xDF */
+	0x6CE5,0x5C3C,0x62DF,0x4F60,0x533F,0x817B,0x9006,0x6EBA,/* 0xE0-0xE7 */
+	0x852B,0x62C8,0x5E74,0x78BE,0x64B5,0x637B,0x5FF5,0x5A18,/* 0xE8-0xEF */
+	0x917F,0x9E1F,0x5C3F,0x634F,0x8042,0x5B7D,0x556E,0x954A,/* 0xF0-0xF7 */
+	0x954D,0x6D85,0x60A8,0x67E0,0x72DE,0x51DD,0x5B81,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x81D4,0x81D5,0x81D6,0x81D7,0x81D8,0x81D9,0x81DA,0x81DB,/* 0x40-0x47 */
+	0x81DC,0x81DD,0x81DE,0x81DF,0x81E0,0x81E1,0x81E2,0x81E4,/* 0x48-0x4F */
+	0x81E5,0x81E6,0x81E8,0x81E9,0x81EB,0x81EE,0x81EF,0x81F0,/* 0x50-0x57 */
+	0x81F1,0x81F2,0x81F5,0x81F6,0x81F7,0x81F8,0x81F9,0x81FA,/* 0x58-0x5F */
+	0x81FD,0x81FF,0x8203,0x8207,0x8208,0x8209,0x820A,0x820B,/* 0x60-0x67 */
+	0x820E,0x820F,0x8211,0x8213,0x8215,0x8216,0x8217,0x8218,/* 0x68-0x6F */
+	0x8219,0x821A,0x821D,0x8220,0x8224,0x8225,0x8226,0x8227,/* 0x70-0x77 */
+	0x8229,0x822E,0x8232,0x823A,0x823C,0x823D,0x823F,0x0000,/* 0x78-0x7F */
+
+	0x8240,0x8241,0x8242,0x8243,0x8245,0x8246,0x8248,0x824A,/* 0x80-0x87 */
+	0x824C,0x824D,0x824E,0x8250,0x8251,0x8252,0x8253,0x8254,/* 0x88-0x8F */
+	0x8255,0x8256,0x8257,0x8259,0x825B,0x825C,0x825D,0x825E,/* 0x90-0x97 */
+	0x8260,0x8261,0x8262,0x8263,0x8264,0x8265,0x8266,0x8267,/* 0x98-0x9F */
+	0x8269,0x62E7,0x6CDE,0x725B,0x626D,0x94AE,0x7EBD,0x8113,/* 0xA0-0xA7 */
+	0x6D53,0x519C,0x5F04,0x5974,0x52AA,0x6012,0x5973,0x6696,/* 0xA8-0xAF */
+	0x8650,0x759F,0x632A,0x61E6,0x7CEF,0x8BFA,0x54E6,0x6B27,/* 0xB0-0xB7 */
+	0x9E25,0x6BB4,0x85D5,0x5455,0x5076,0x6CA4,0x556A,0x8DB4,/* 0xB8-0xBF */
+	0x722C,0x5E15,0x6015,0x7436,0x62CD,0x6392,0x724C,0x5F98,/* 0xC0-0xC7 */
+	0x6E43,0x6D3E,0x6500,0x6F58,0x76D8,0x78D0,0x76FC,0x7554,/* 0xC8-0xCF */
+	0x5224,0x53DB,0x4E53,0x5E9E,0x65C1,0x802A,0x80D6,0x629B,/* 0xD0-0xD7 */
+	0x5486,0x5228,0x70AE,0x888D,0x8DD1,0x6CE1,0x5478,0x80DA,/* 0xD8-0xDF */
+	0x57F9,0x88F4,0x8D54,0x966A,0x914D,0x4F69,0x6C9B,0x55B7,/* 0xE0-0xE7 */
+	0x76C6,0x7830,0x62A8,0x70F9,0x6F8E,0x5F6D,0x84EC,0x68DA,/* 0xE8-0xEF */
+	0x787C,0x7BF7,0x81A8,0x670B,0x9E4F,0x6367,0x78B0,0x576F,/* 0xF0-0xF7 */
+	0x7812,0x9739,0x6279,0x62AB,0x5288,0x7435,0x6BD7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x826A,0x826B,0x826C,0x826D,0x8271,0x8275,0x8276,0x8277,/* 0x40-0x47 */
+	0x8278,0x827B,0x827C,0x8280,0x8281,0x8283,0x8285,0x8286,/* 0x48-0x4F */
+	0x8287,0x8289,0x828C,0x8290,0x8293,0x8294,0x8295,0x8296,/* 0x50-0x57 */
+	0x829A,0x829B,0x829E,0x82A0,0x82A2,0x82A3,0x82A7,0x82B2,/* 0x58-0x5F */
+	0x82B5,0x82B6,0x82BA,0x82BB,0x82BC,0x82BF,0x82C0,0x82C2,/* 0x60-0x67 */
+	0x82C3,0x82C5,0x82C6,0x82C9,0x82D0,0x82D6,0x82D9,0x82DA,/* 0x68-0x6F */
+	0x82DD,0x82E2,0x82E7,0x82E8,0x82E9,0x82EA,0x82EC,0x82ED,/* 0x70-0x77 */
+	0x82EE,0x82F0,0x82F2,0x82F3,0x82F5,0x82F6,0x82F8,0x0000,/* 0x78-0x7F */
+
+	0x82FA,0x82FC,0x82FD,0x82FE,0x82FF,0x8300,0x830A,0x830B,/* 0x80-0x87 */
+	0x830D,0x8310,0x8312,0x8313,0x8316,0x8318,0x8319,0x831D,/* 0x88-0x8F */
+	0x831E,0x831F,0x8320,0x8321,0x8322,0x8323,0x8324,0x8325,/* 0x90-0x97 */
+	0x8326,0x8329,0x832A,0x832E,0x8330,0x8332,0x8337,0x833B,/* 0x98-0x9F */
+	0x833D,0x5564,0x813E,0x75B2,0x76AE,0x5339,0x75DE,0x50FB,/* 0xA0-0xA7 */
+	0x5C41,0x8B6C,0x7BC7,0x504F,0x7247,0x9A97,0x98D8,0x6F02,/* 0xA8-0xAF */
+	0x74E2,0x7968,0x6487,0x77A5,0x62FC,0x9891,0x8D2B,0x54C1,/* 0xB0-0xB7 */
+	0x8058,0x4E52,0x576A,0x82F9,0x840D,0x5E73,0x51ED,0x74F6,/* 0xB8-0xBF */
+	0x8BC4,0x5C4F,0x5761,0x6CFC,0x9887,0x5A46,0x7834,0x9B44,/* 0xC0-0xC7 */
+	0x8FEB,0x7C95,0x5256,0x6251,0x94FA,0x4EC6,0x8386,0x8461,/* 0xC8-0xCF */
+	0x83E9,0x84B2,0x57D4,0x6734,0x5703,0x666E,0x6D66,0x8C31,/* 0xD0-0xD7 */
+	0x66DD,0x7011,0x671F,0x6B3A,0x6816,0x621A,0x59BB,0x4E03,/* 0xD8-0xDF */
+	0x51C4,0x6F06,0x67D2,0x6C8F,0x5176,0x68CB,0x5947,0x6B67,/* 0xE0-0xE7 */
+	0x7566,0x5D0E,0x8110,0x9F50,0x65D7,0x7948,0x7941,0x9A91,/* 0xE8-0xEF */
+	0x8D77,0x5C82,0x4E5E,0x4F01,0x542F,0x5951,0x780C,0x5668,/* 0xF0-0xF7 */
+	0x6C14,0x8FC4,0x5F03,0x6C7D,0x6CE3,0x8BAB,0x6390,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x833E,0x833F,0x8341,0x8342,0x8344,0x8345,0x8348,0x834A,/* 0x40-0x47 */
+	0x834B,0x834C,0x834D,0x834E,0x8353,0x8355,0x8356,0x8357,/* 0x48-0x4F */
+	0x8358,0x8359,0x835D,0x8362,0x8370,0x8371,0x8372,0x8373,/* 0x50-0x57 */
+	0x8374,0x8375,0x8376,0x8379,0x837A,0x837E,0x837F,0x8380,/* 0x58-0x5F */
+	0x8381,0x8382,0x8383,0x8384,0x8387,0x8388,0x838A,0x838B,/* 0x60-0x67 */
+	0x838C,0x838D,0x838F,0x8390,0x8391,0x8394,0x8395,0x8396,/* 0x68-0x6F */
+	0x8397,0x8399,0x839A,0x839D,0x839F,0x83A1,0x83A2,0x83A3,/* 0x70-0x77 */
+	0x83A4,0x83A5,0x83A6,0x83A7,0x83AC,0x83AD,0x83AE,0x0000,/* 0x78-0x7F */
+
+	0x83AF,0x83B5,0x83BB,0x83BE,0x83BF,0x83C2,0x83C3,0x83C4,/* 0x80-0x87 */
+	0x83C6,0x83C8,0x83C9,0x83CB,0x83CD,0x83CE,0x83D0,0x83D1,/* 0x88-0x8F */
+	0x83D2,0x83D3,0x83D5,0x83D7,0x83D9,0x83DA,0x83DB,0x83DE,/* 0x90-0x97 */
+	0x83E2,0x83E3,0x83E4,0x83E6,0x83E7,0x83E8,0x83EB,0x83EC,/* 0x98-0x9F */
+	0x83ED,0x6070,0x6D3D,0x7275,0x6266,0x948E,0x94C5,0x5343,/* 0xA0-0xA7 */
+	0x8FC1,0x7B7E,0x4EDF,0x8C26,0x4E7E,0x9ED4,0x94B1,0x94B3,/* 0xA8-0xAF */
+	0x524D,0x6F5C,0x9063,0x6D45,0x8C34,0x5811,0x5D4C,0x6B20,/* 0xB0-0xB7 */
+	0x6B49,0x67AA,0x545B,0x8154,0x7F8C,0x5899,0x8537,0x5F3A,/* 0xB8-0xBF */
+	0x62A2,0x6A47,0x9539,0x6572,0x6084,0x6865,0x77A7,0x4E54,/* 0xC0-0xC7 */
+	0x4FA8,0x5DE7,0x9798,0x64AC,0x7FD8,0x5CED,0x4FCF,0x7A8D,/* 0xC8-0xCF */
+	0x5207,0x8304,0x4E14,0x602F,0x7A83,0x94A6,0x4FB5,0x4EB2,/* 0xD0-0xD7 */
+	0x79E6,0x7434,0x52E4,0x82B9,0x64D2,0x79BD,0x5BDD,0x6C81,/* 0xD8-0xDF */
+	0x9752,0x8F7B,0x6C22,0x503E,0x537F,0x6E05,0x64CE,0x6674,/* 0xE0-0xE7 */
+	0x6C30,0x60C5,0x9877,0x8BF7,0x5E86,0x743C,0x7A77,0x79CB,/* 0xE8-0xEF */
+	0x4E18,0x90B1,0x7403,0x6C42,0x56DA,0x914B,0x6CC5,0x8D8B,/* 0xF0-0xF7 */
+	0x533A,0x86C6,0x66F2,0x8EAF,0x5C48,0x9A71,0x6E20,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x83EE,0x83EF,0x83F3,0x83F4,0x83F5,0x83F6,0x83F7,0x83FA,/* 0x40-0x47 */
+	0x83FB,0x83FC,0x83FE,0x83FF,0x8400,0x8402,0x8405,0x8407,/* 0x48-0x4F */
+	0x8408,0x8409,0x840A,0x8410,0x8412,0x8413,0x8414,0x8415,/* 0x50-0x57 */
+	0x8416,0x8417,0x8419,0x841A,0x841B,0x841E,0x841F,0x8420,/* 0x58-0x5F */
+	0x8421,0x8422,0x8423,0x8429,0x842A,0x842B,0x842C,0x842D,/* 0x60-0x67 */
+	0x842E,0x842F,0x8430,0x8432,0x8433,0x8434,0x8435,0x8436,/* 0x68-0x6F */
+	0x8437,0x8439,0x843A,0x843B,0x843E,0x843F,0x8440,0x8441,/* 0x70-0x77 */
+	0x8442,0x8443,0x8444,0x8445,0x8447,0x8448,0x8449,0x0000,/* 0x78-0x7F */
+
+	0x844A,0x844B,0x844C,0x844D,0x844E,0x844F,0x8450,0x8452,/* 0x80-0x87 */
+	0x8453,0x8454,0x8455,0x8456,0x8458,0x845D,0x845E,0x845F,/* 0x88-0x8F */
+	0x8460,0x8462,0x8464,0x8465,0x8466,0x8467,0x8468,0x846A,/* 0x90-0x97 */
+	0x846E,0x846F,0x8470,0x8472,0x8474,0x8477,0x8479,0x847B,/* 0x98-0x9F */
+	0x847C,0x53D6,0x5A36,0x9F8B,0x8DA3,0x53BB,0x5708,0x98A7,/* 0xA0-0xA7 */
+	0x6743,0x919B,0x6CC9,0x5168,0x75CA,0x62F3,0x72AC,0x5238,/* 0xA8-0xAF */
+	0x529D,0x7F3A,0x7094,0x7638,0x5374,0x9E4A,0x69B7,0x786E,/* 0xB0-0xB7 */
+	0x96C0,0x88D9,0x7FA4,0x7136,0x71C3,0x5189,0x67D3,0x74E4,/* 0xB8-0xBF */
+	0x58E4,0x6518,0x56B7,0x8BA9,0x9976,0x6270,0x7ED5,0x60F9,/* 0xC0-0xC7 */
+	0x70ED,0x58EC,0x4EC1,0x4EBA,0x5FCD,0x97E7,0x4EFB,0x8BA4,/* 0xC8-0xCF */
+	0x5203,0x598A,0x7EAB,0x6254,0x4ECD,0x65E5,0x620E,0x8338,/* 0xD0-0xD7 */
+	0x84C9,0x8363,0x878D,0x7194,0x6EB6,0x5BB9,0x7ED2,0x5197,/* 0xD8-0xDF */
+	0x63C9,0x67D4,0x8089,0x8339,0x8815,0x5112,0x5B7A,0x5982,/* 0xE0-0xE7 */
+	0x8FB1,0x4E73,0x6C5D,0x5165,0x8925,0x8F6F,0x962E,0x854A,/* 0xE8-0xEF */
+	0x745E,0x9510,0x95F0,0x6DA6,0x82E5,0x5F31,0x6492,0x6D12,/* 0xF0-0xF7 */
+	0x8428,0x816E,0x9CC3,0x585E,0x8D5B,0x4E09,0x53C1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x847D,0x847E,0x847F,0x8480,0x8481,0x8483,0x8484,0x8485,/* 0x40-0x47 */
+	0x8486,0x848A,0x848D,0x848F,0x8490,0x8491,0x8492,0x8493,/* 0x48-0x4F */
+	0x8494,0x8495,0x8496,0x8498,0x849A,0x849B,0x849D,0x849E,/* 0x50-0x57 */
+	0x849F,0x84A0,0x84A2,0x84A3,0x84A4,0x84A5,0x84A6,0x84A7,/* 0x58-0x5F */
+	0x84A8,0x84A9,0x84AA,0x84AB,0x84AC,0x84AD,0x84AE,0x84B0,/* 0x60-0x67 */
+	0x84B1,0x84B3,0x84B5,0x84B6,0x84B7,0x84BB,0x84BC,0x84BE,/* 0x68-0x6F */
+	0x84C0,0x84C2,0x84C3,0x84C5,0x84C6,0x84C7,0x84C8,0x84CB,/* 0x70-0x77 */
+	0x84CC,0x84CE,0x84CF,0x84D2,0x84D4,0x84D5,0x84D7,0x0000,/* 0x78-0x7F */
+
+	0x84D8,0x84D9,0x84DA,0x84DB,0x84DC,0x84DE,0x84E1,0x84E2,/* 0x80-0x87 */
+	0x84E4,0x84E7,0x84E8,0x84E9,0x84EA,0x84EB,0x84ED,0x84EE,/* 0x88-0x8F */
+	0x84EF,0x84F1,0x84F2,0x84F3,0x84F4,0x84F5,0x84F6,0x84F7,/* 0x90-0x97 */
+	0x84F8,0x84F9,0x84FA,0x84FB,0x84FD,0x84FE,0x8500,0x8501,/* 0x98-0x9F */
+	0x8502,0x4F1E,0x6563,0x6851,0x55D3,0x4E27,0x6414,0x9A9A,/* 0xA0-0xA7 */
+	0x626B,0x5AC2,0x745F,0x8272,0x6DA9,0x68EE,0x50E7,0x838E,/* 0xA8-0xAF */
+	0x7802,0x6740,0x5239,0x6C99,0x7EB1,0x50BB,0x5565,0x715E,/* 0xB0-0xB7 */
+	0x7B5B,0x6652,0x73CA,0x82EB,0x6749,0x5C71,0x5220,0x717D,/* 0xB8-0xBF */
+	0x886B,0x95EA,0x9655,0x64C5,0x8D61,0x81B3,0x5584,0x6C55,/* 0xC0-0xC7 */
+	0x6247,0x7F2E,0x5892,0x4F24,0x5546,0x8D4F,0x664C,0x4E0A,/* 0xC8-0xCF */
+	0x5C1A,0x88F3,0x68A2,0x634E,0x7A0D,0x70E7,0x828D,0x52FA,/* 0xD0-0xD7 */
+	0x97F6,0x5C11,0x54E8,0x90B5,0x7ECD,0x5962,0x8D4A,0x86C7,/* 0xD8-0xDF */
+	0x820C,0x820D,0x8D66,0x6444,0x5C04,0x6151,0x6D89,0x793E,/* 0xE0-0xE7 */
+	0x8BBE,0x7837,0x7533,0x547B,0x4F38,0x8EAB,0x6DF1,0x5A20,/* 0xE8-0xEF */
+	0x7EC5,0x795E,0x6C88,0x5BA1,0x5A76,0x751A,0x80BE,0x614E,/* 0xF0-0xF7 */
+	0x6E17,0x58F0,0x751F,0x7525,0x7272,0x5347,0x7EF3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8503,0x8504,0x8505,0x8506,0x8507,0x8508,0x8509,0x850A,/* 0x40-0x47 */
+	0x850B,0x850D,0x850E,0x850F,0x8510,0x8512,0x8514,0x8515,/* 0x48-0x4F */
+	0x8516,0x8518,0x8519,0x851B,0x851C,0x851D,0x851E,0x8520,/* 0x50-0x57 */
+	0x8522,0x8523,0x8524,0x8525,0x8526,0x8527,0x8528,0x8529,/* 0x58-0x5F */
+	0x852A,0x852D,0x852E,0x852F,0x8530,0x8531,0x8532,0x8533,/* 0x60-0x67 */
+	0x8534,0x8535,0x8536,0x853E,0x853F,0x8540,0x8541,0x8542,/* 0x68-0x6F */
+	0x8544,0x8545,0x8546,0x8547,0x854B,0x854C,0x854D,0x854E,/* 0x70-0x77 */
+	0x854F,0x8550,0x8551,0x8552,0x8553,0x8554,0x8555,0x0000,/* 0x78-0x7F */
+
+	0x8557,0x8558,0x855A,0x855B,0x855C,0x855D,0x855F,0x8560,/* 0x80-0x87 */
+	0x8561,0x8562,0x8563,0x8565,0x8566,0x8567,0x8569,0x856A,/* 0x88-0x8F */
+	0x856B,0x856C,0x856D,0x856E,0x856F,0x8570,0x8571,0x8573,/* 0x90-0x97 */
+	0x8575,0x8576,0x8577,0x8578,0x857C,0x857D,0x857F,0x8580,/* 0x98-0x9F */
+	0x8581,0x7701,0x76DB,0x5269,0x80DC,0x5723,0x5E08,0x5931,/* 0xA0-0xA7 */
+	0x72EE,0x65BD,0x6E7F,0x8BD7,0x5C38,0x8671,0x5341,0x77F3,/* 0xA8-0xAF */
+	0x62FE,0x65F6,0x4EC0,0x98DF,0x8680,0x5B9E,0x8BC6,0x53F2,/* 0xB0-0xB7 */
+	0x77E2,0x4F7F,0x5C4E,0x9A76,0x59CB,0x5F0F,0x793A,0x58EB,/* 0xB8-0xBF */
+	0x4E16,0x67FF,0x4E8B,0x62ED,0x8A93,0x901D,0x52BF,0x662F,/* 0xC0-0xC7 */
+	0x55DC,0x566C,0x9002,0x4ED5,0x4F8D,0x91CA,0x9970,0x6C0F,/* 0xC8-0xCF */
+	0x5E02,0x6043,0x5BA4,0x89C6,0x8BD5,0x6536,0x624B,0x9996,/* 0xD0-0xD7 */
+	0x5B88,0x5BFF,0x6388,0x552E,0x53D7,0x7626,0x517D,0x852C,/* 0xD8-0xDF */
+	0x67A2,0x68B3,0x6B8A,0x6292,0x8F93,0x53D4,0x8212,0x6DD1,/* 0xE0-0xE7 */
+	0x758F,0x4E66,0x8D4E,0x5B70,0x719F,0x85AF,0x6691,0x66D9,/* 0xE8-0xEF */
+	0x7F72,0x8700,0x9ECD,0x9F20,0x5C5E,0x672F,0x8FF0,0x6811,/* 0xF0-0xF7 */
+	0x675F,0x620D,0x7AD6,0x5885,0x5EB6,0x6570,0x6F31,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8582,0x8583,0x8586,0x8588,0x8589,0x858A,0x858B,0x858C,/* 0x40-0x47 */
+	0x858D,0x858E,0x8590,0x8591,0x8592,0x8593,0x8594,0x8595,/* 0x48-0x4F */
+	0x8596,0x8597,0x8598,0x8599,0x859A,0x859D,0x859E,0x859F,/* 0x50-0x57 */
+	0x85A0,0x85A1,0x85A2,0x85A3,0x85A5,0x85A6,0x85A7,0x85A9,/* 0x58-0x5F */
+	0x85AB,0x85AC,0x85AD,0x85B1,0x85B2,0x85B3,0x85B4,0x85B5,/* 0x60-0x67 */
+	0x85B6,0x85B8,0x85BA,0x85BB,0x85BC,0x85BD,0x85BE,0x85BF,/* 0x68-0x6F */
+	0x85C0,0x85C2,0x85C3,0x85C4,0x85C5,0x85C6,0x85C7,0x85C8,/* 0x70-0x77 */
+	0x85CA,0x85CB,0x85CC,0x85CD,0x85CE,0x85D1,0x85D2,0x0000,/* 0x78-0x7F */
+
+	0x85D4,0x85D6,0x85D7,0x85D8,0x85D9,0x85DA,0x85DB,0x85DD,/* 0x80-0x87 */
+	0x85DE,0x85DF,0x85E0,0x85E1,0x85E2,0x85E3,0x85E5,0x85E6,/* 0x88-0x8F */
+	0x85E7,0x85E8,0x85EA,0x85EB,0x85EC,0x85ED,0x85EE,0x85EF,/* 0x90-0x97 */
+	0x85F0,0x85F1,0x85F2,0x85F3,0x85F4,0x85F5,0x85F6,0x85F7,/* 0x98-0x9F */
+	0x85F8,0x6055,0x5237,0x800D,0x6454,0x8870,0x7529,0x5E05,/* 0xA0-0xA7 */
+	0x6813,0x62F4,0x971C,0x53CC,0x723D,0x8C01,0x6C34,0x7761,/* 0xA8-0xAF */
+	0x7A0E,0x542E,0x77AC,0x987A,0x821C,0x8BF4,0x7855,0x6714,/* 0xB0-0xB7 */
+	0x70C1,0x65AF,0x6495,0x5636,0x601D,0x79C1,0x53F8,0x4E1D,/* 0xB8-0xBF */
+	0x6B7B,0x8086,0x5BFA,0x55E3,0x56DB,0x4F3A,0x4F3C,0x9972,/* 0xC0-0xC7 */
+	0x5DF3,0x677E,0x8038,0x6002,0x9882,0x9001,0x5B8B,0x8BBC,/* 0xC8-0xCF */
+	0x8BF5,0x641C,0x8258,0x64DE,0x55FD,0x82CF,0x9165,0x4FD7,/* 0xD0-0xD7 */
+	0x7D20,0x901F,0x7C9F,0x50F3,0x5851,0x6EAF,0x5BBF,0x8BC9,/* 0xD8-0xDF */
+	0x8083,0x9178,0x849C,0x7B97,0x867D,0x968B,0x968F,0x7EE5,/* 0xE0-0xE7 */
+	0x9AD3,0x788E,0x5C81,0x7A57,0x9042,0x96A7,0x795F,0x5B59,/* 0xE8-0xEF */
+	0x635F,0x7B0B,0x84D1,0x68AD,0x5506,0x7F29,0x7410,0x7D22,/* 0xF0-0xF7 */
+	0x9501,0x6240,0x584C,0x4ED6,0x5B83,0x5979,0x5854,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x85F9,0x85FA,0x85FC,0x85FD,0x85FE,0x8600,0x8601,0x8602,/* 0x40-0x47 */
+	0x8603,0x8604,0x8606,0x8607,0x8608,0x8609,0x860A,0x860B,/* 0x48-0x4F */
+	0x860C,0x860D,0x860E,0x860F,0x8610,0x8612,0x8613,0x8614,/* 0x50-0x57 */
+	0x8615,0x8617,0x8618,0x8619,0x861A,0x861B,0x861C,0x861D,/* 0x58-0x5F */
+	0x861E,0x861F,0x8620,0x8621,0x8622,0x8623,0x8624,0x8625,/* 0x60-0x67 */
+	0x8626,0x8628,0x862A,0x862B,0x862C,0x862D,0x862E,0x862F,/* 0x68-0x6F */
+	0x8630,0x8631,0x8632,0x8633,0x8634,0x8635,0x8636,0x8637,/* 0x70-0x77 */
+	0x8639,0x863A,0x863B,0x863D,0x863E,0x863F,0x8640,0x0000,/* 0x78-0x7F */
+
+	0x8641,0x8642,0x8643,0x8644,0x8645,0x8646,0x8647,0x8648,/* 0x80-0x87 */
+	0x8649,0x864A,0x864B,0x864C,0x8652,0x8653,0x8655,0x8656,/* 0x88-0x8F */
+	0x8657,0x8658,0x8659,0x865B,0x865C,0x865D,0x865F,0x8660,/* 0x90-0x97 */
+	0x8661,0x8663,0x8664,0x8665,0x8666,0x8667,0x8668,0x8669,/* 0x98-0x9F */
+	0x866A,0x736D,0x631E,0x8E4B,0x8E0F,0x80CE,0x82D4,0x62AC,/* 0xA0-0xA7 */
+	0x53F0,0x6CF0,0x915E,0x592A,0x6001,0x6C70,0x574D,0x644A,/* 0xA8-0xAF */
+	0x8D2A,0x762B,0x6EE9,0x575B,0x6A80,0x75F0,0x6F6D,0x8C2D,/* 0xB0-0xB7 */
+	0x8C08,0x5766,0x6BEF,0x8892,0x78B3,0x63A2,0x53F9,0x70AD,/* 0xB8-0xBF */
+	0x6C64,0x5858,0x642A,0x5802,0x68E0,0x819B,0x5510,0x7CD6,/* 0xC0-0xC7 */
+	0x5018,0x8EBA,0x6DCC,0x8D9F,0x70EB,0x638F,0x6D9B,0x6ED4,/* 0xC8-0xCF */
+	0x7EE6,0x8404,0x6843,0x9003,0x6DD8,0x9676,0x8BA8,0x5957,/* 0xD0-0xD7 */
+	0x7279,0x85E4,0x817E,0x75BC,0x8A8A,0x68AF,0x5254,0x8E22,/* 0xD8-0xDF */
+	0x9511,0x63D0,0x9898,0x8E44,0x557C,0x4F53,0x66FF,0x568F,/* 0xE0-0xE7 */
+	0x60D5,0x6D95,0x5243,0x5C49,0x5929,0x6DFB,0x586B,0x7530,/* 0xE8-0xEF */
+	0x751C,0x606C,0x8214,0x8146,0x6311,0x6761,0x8FE2,0x773A,/* 0xF0-0xF7 */
+	0x8DF3,0x8D34,0x94C1,0x5E16,0x5385,0x542C,0x70C3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x866D,0x866F,0x8670,0x8672,0x8673,0x8674,0x8675,0x8676,/* 0x40-0x47 */
+	0x8677,0x8678,0x8683,0x8684,0x8685,0x8686,0x8687,0x8688,/* 0x48-0x4F */
+	0x8689,0x868E,0x868F,0x8690,0x8691,0x8692,0x8694,0x8696,/* 0x50-0x57 */
+	0x8697,0x8698,0x8699,0x869A,0x869B,0x869E,0x869F,0x86A0,/* 0x58-0x5F */
+	0x86A1,0x86A2,0x86A5,0x86A6,0x86AB,0x86AD,0x86AE,0x86B2,/* 0x60-0x67 */
+	0x86B3,0x86B7,0x86B8,0x86B9,0x86BB,0x86BC,0x86BD,0x86BE,/* 0x68-0x6F */
+	0x86BF,0x86C1,0x86C2,0x86C3,0x86C5,0x86C8,0x86CC,0x86CD,/* 0x70-0x77 */
+	0x86D2,0x86D3,0x86D5,0x86D6,0x86D7,0x86DA,0x86DC,0x0000,/* 0x78-0x7F */
+
+	0x86DD,0x86E0,0x86E1,0x86E2,0x86E3,0x86E5,0x86E6,0x86E7,/* 0x80-0x87 */
+	0x86E8,0x86EA,0x86EB,0x86EC,0x86EF,0x86F5,0x86F6,0x86F7,/* 0x88-0x8F */
+	0x86FA,0x86FB,0x86FC,0x86FD,0x86FF,0x8701,0x8704,0x8705,/* 0x90-0x97 */
+	0x8706,0x870B,0x870C,0x870E,0x870F,0x8710,0x8711,0x8714,/* 0x98-0x9F */
+	0x8716,0x6C40,0x5EF7,0x505C,0x4EAD,0x5EAD,0x633A,0x8247,/* 0xA0-0xA7 */
+	0x901A,0x6850,0x916E,0x77B3,0x540C,0x94DC,0x5F64,0x7AE5,/* 0xA8-0xAF */
+	0x6876,0x6345,0x7B52,0x7EDF,0x75DB,0x5077,0x6295,0x5934,/* 0xB0-0xB7 */
+	0x900F,0x51F8,0x79C3,0x7A81,0x56FE,0x5F92,0x9014,0x6D82,/* 0xB8-0xBF */
+	0x5C60,0x571F,0x5410,0x5154,0x6E4D,0x56E2,0x63A8,0x9893,/* 0xC0-0xC7 */
+	0x817F,0x8715,0x892A,0x9000,0x541E,0x5C6F,0x81C0,0x62D6,/* 0xC8-0xCF */
+	0x6258,0x8131,0x9E35,0x9640,0x9A6E,0x9A7C,0x692D,0x59A5,/* 0xD0-0xD7 */
+	0x62D3,0x553E,0x6316,0x54C7,0x86D9,0x6D3C,0x5A03,0x74E6,/* 0xD8-0xDF */
+	0x889C,0x6B6A,0x5916,0x8C4C,0x5F2F,0x6E7E,0x73A9,0x987D,/* 0xE0-0xE7 */
+	0x4E38,0x70F7,0x5B8C,0x7897,0x633D,0x665A,0x7696,0x60CB,/* 0xE8-0xEF */
+	0x5B9B,0x5A49,0x4E07,0x8155,0x6C6A,0x738B,0x4EA1,0x6789,/* 0xF0-0xF7 */
+	0x7F51,0x5F80,0x65FA,0x671B,0x5FD8,0x5984,0x5A01,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8719,0x871B,0x871D,0x871F,0x8720,0x8724,0x8726,0x8727,/* 0x40-0x47 */
+	0x8728,0x872A,0x872B,0x872C,0x872D,0x872F,0x8730,0x8732,/* 0x48-0x4F */
+	0x8733,0x8735,0x8736,0x8738,0x8739,0x873A,0x873C,0x873D,/* 0x50-0x57 */
+	0x8740,0x8741,0x8742,0x8743,0x8744,0x8745,0x8746,0x874A,/* 0x58-0x5F */
+	0x874B,0x874D,0x874F,0x8750,0x8751,0x8752,0x8754,0x8755,/* 0x60-0x67 */
+	0x8756,0x8758,0x875A,0x875B,0x875C,0x875D,0x875E,0x875F,/* 0x68-0x6F */
+	0x8761,0x8762,0x8766,0x8767,0x8768,0x8769,0x876A,0x876B,/* 0x70-0x77 */
+	0x876C,0x876D,0x876F,0x8771,0x8772,0x8773,0x8775,0x0000,/* 0x78-0x7F */
+
+	0x8777,0x8778,0x8779,0x877A,0x877F,0x8780,0x8781,0x8784,/* 0x80-0x87 */
+	0x8786,0x8787,0x8789,0x878A,0x878C,0x878E,0x878F,0x8790,/* 0x88-0x8F */
+	0x8791,0x8792,0x8794,0x8795,0x8796,0x8798,0x8799,0x879A,/* 0x90-0x97 */
+	0x879B,0x879C,0x879D,0x879E,0x87A0,0x87A1,0x87A2,0x87A3,/* 0x98-0x9F */
+	0x87A4,0x5DCD,0x5FAE,0x5371,0x97E6,0x8FDD,0x6845,0x56F4,/* 0xA0-0xA7 */
+	0x552F,0x60DF,0x4E3A,0x6F4D,0x7EF4,0x82C7,0x840E,0x59D4,/* 0xA8-0xAF */
+	0x4F1F,0x4F2A,0x5C3E,0x7EAC,0x672A,0x851A,0x5473,0x754F,/* 0xB0-0xB7 */
+	0x80C3,0x5582,0x9B4F,0x4F4D,0x6E2D,0x8C13,0x5C09,0x6170,/* 0xB8-0xBF */
+	0x536B,0x761F,0x6E29,0x868A,0x6587,0x95FB,0x7EB9,0x543B,/* 0xC0-0xC7 */
+	0x7A33,0x7D0A,0x95EE,0x55E1,0x7FC1,0x74EE,0x631D,0x8717,/* 0xC8-0xCF */
+	0x6DA1,0x7A9D,0x6211,0x65A1,0x5367,0x63E1,0x6C83,0x5DEB,/* 0xD0-0xD7 */
+	0x545C,0x94A8,0x4E4C,0x6C61,0x8BEC,0x5C4B,0x65E0,0x829C,/* 0xD8-0xDF */
+	0x68A7,0x543E,0x5434,0x6BCB,0x6B66,0x4E94,0x6342,0x5348,/* 0xE0-0xE7 */
+	0x821E,0x4F0D,0x4FAE,0x575E,0x620A,0x96FE,0x6664,0x7269,/* 0xE8-0xEF */
+	0x52FF,0x52A1,0x609F,0x8BEF,0x6614,0x7199,0x6790,0x897F,/* 0xF0-0xF7 */
+	0x7852,0x77FD,0x6670,0x563B,0x5438,0x9521,0x727A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x87A5,0x87A6,0x87A7,0x87A9,0x87AA,0x87AE,0x87B0,0x87B1,/* 0x40-0x47 */
+	0x87B2,0x87B4,0x87B6,0x87B7,0x87B8,0x87B9,0x87BB,0x87BC,/* 0x48-0x4F */
+	0x87BE,0x87BF,0x87C1,0x87C2,0x87C3,0x87C4,0x87C5,0x87C7,/* 0x50-0x57 */
+	0x87C8,0x87C9,0x87CC,0x87CD,0x87CE,0x87CF,0x87D0,0x87D4,/* 0x58-0x5F */
+	0x87D5,0x87D6,0x87D7,0x87D8,0x87D9,0x87DA,0x87DC,0x87DD,/* 0x60-0x67 */
+	0x87DE,0x87DF,0x87E1,0x87E2,0x87E3,0x87E4,0x87E6,0x87E7,/* 0x68-0x6F */
+	0x87E8,0x87E9,0x87EB,0x87EC,0x87ED,0x87EF,0x87F0,0x87F1,/* 0x70-0x77 */
+	0x87F2,0x87F3,0x87F4,0x87F5,0x87F6,0x87F7,0x87F8,0x0000,/* 0x78-0x7F */
+
+	0x87FA,0x87FB,0x87FC,0x87FD,0x87FF,0x8800,0x8801,0x8802,/* 0x80-0x87 */
+	0x8804,0x8805,0x8806,0x8807,0x8808,0x8809,0x880B,0x880C,/* 0x88-0x8F */
+	0x880D,0x880E,0x880F,0x8810,0x8811,0x8812,0x8814,0x8817,/* 0x90-0x97 */
+	0x8818,0x8819,0x881A,0x881C,0x881D,0x881E,0x881F,0x8820,/* 0x98-0x9F */
+	0x8823,0x7A00,0x606F,0x5E0C,0x6089,0x819D,0x5915,0x60DC,/* 0xA0-0xA7 */
+	0x7184,0x70EF,0x6EAA,0x6C50,0x7280,0x6A84,0x88AD,0x5E2D,/* 0xA8-0xAF */
+	0x4E60,0x5AB3,0x559C,0x94E3,0x6D17,0x7CFB,0x9699,0x620F,/* 0xB0-0xB7 */
+	0x7EC6,0x778E,0x867E,0x5323,0x971E,0x8F96,0x6687,0x5CE1,/* 0xB8-0xBF */
+	0x4FA0,0x72ED,0x4E0B,0x53A6,0x590F,0x5413,0x6380,0x9528,/* 0xC0-0xC7 */
+	0x5148,0x4ED9,0x9C9C,0x7EA4,0x54B8,0x8D24,0x8854,0x8237,/* 0xC8-0xCF */
+	0x95F2,0x6D8E,0x5F26,0x5ACC,0x663E,0x9669,0x73B0,0x732E,/* 0xD0-0xD7 */
+	0x53BF,0x817A,0x9985,0x7FA1,0x5BAA,0x9677,0x9650,0x7EBF,/* 0xD8-0xDF */
+	0x76F8,0x53A2,0x9576,0x9999,0x7BB1,0x8944,0x6E58,0x4E61,/* 0xE0-0xE7 */
+	0x7FD4,0x7965,0x8BE6,0x60F3,0x54CD,0x4EAB,0x9879,0x5DF7,/* 0xE8-0xEF */
+	0x6A61,0x50CF,0x5411,0x8C61,0x8427,0x785D,0x9704,0x524A,/* 0xF0-0xF7 */
+	0x54EE,0x56A3,0x9500,0x6D88,0x5BB5,0x6DC6,0x6653,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8824,0x8825,0x8826,0x8827,0x8828,0x8829,0x882A,0x882B,/* 0x40-0x47 */
+	0x882C,0x882D,0x882E,0x882F,0x8830,0x8831,0x8833,0x8834,/* 0x48-0x4F */
+	0x8835,0x8836,0x8837,0x8838,0x883A,0x883B,0x883D,0x883E,/* 0x50-0x57 */
+	0x883F,0x8841,0x8842,0x8843,0x8846,0x8847,0x8848,0x8849,/* 0x58-0x5F */
+	0x884A,0x884B,0x884E,0x884F,0x8850,0x8851,0x8852,0x8853,/* 0x60-0x67 */
+	0x8855,0x8856,0x8858,0x885A,0x885B,0x885C,0x885D,0x885E,/* 0x68-0x6F */
+	0x885F,0x8860,0x8866,0x8867,0x886A,0x886D,0x886F,0x8871,/* 0x70-0x77 */
+	0x8873,0x8874,0x8875,0x8876,0x8878,0x8879,0x887A,0x0000,/* 0x78-0x7F */
+
+	0x887B,0x887C,0x8880,0x8883,0x8886,0x8887,0x8889,0x888A,/* 0x80-0x87 */
+	0x888C,0x888E,0x888F,0x8890,0x8891,0x8893,0x8894,0x8895,/* 0x88-0x8F */
+	0x8897,0x8898,0x8899,0x889A,0x889B,0x889D,0x889E,0x889F,/* 0x90-0x97 */
+	0x88A0,0x88A1,0x88A3,0x88A5,0x88A6,0x88A7,0x88A8,0x88A9,/* 0x98-0x9F */
+	0x88AA,0x5C0F,0x5B5D,0x6821,0x8096,0x5578,0x7B11,0x6548,/* 0xA0-0xA7 */
+	0x6954,0x4E9B,0x6B47,0x874E,0x978B,0x534F,0x631F,0x643A,/* 0xA8-0xAF */
+	0x90AA,0x659C,0x80C1,0x8C10,0x5199,0x68B0,0x5378,0x87F9,/* 0xB0-0xB7 */
+	0x61C8,0x6CC4,0x6CFB,0x8C22,0x5C51,0x85AA,0x82AF,0x950C,/* 0xB8-0xBF */
+	0x6B23,0x8F9B,0x65B0,0x5FFB,0x5FC3,0x4FE1,0x8845,0x661F,/* 0xC0-0xC7 */
+	0x8165,0x7329,0x60FA,0x5174,0x5211,0x578B,0x5F62,0x90A2,/* 0xC8-0xCF */
+	0x884C,0x9192,0x5E78,0x674F,0x6027,0x59D3,0x5144,0x51F6,/* 0xD0-0xD7 */
+	0x80F8,0x5308,0x6C79,0x96C4,0x718A,0x4F11,0x4FEE,0x7F9E,/* 0xD8-0xDF */
+	0x673D,0x55C5,0x9508,0x79C0,0x8896,0x7EE3,0x589F,0x620C,/* 0xE0-0xE7 */
+	0x9700,0x865A,0x5618,0x987B,0x5F90,0x8BB8,0x84C4,0x9157,/* 0xE8-0xEF */
+	0x53D9,0x65ED,0x5E8F,0x755C,0x6064,0x7D6E,0x5A7F,0x7EEA,/* 0xF0-0xF7 */
+	0x7EED,0x8F69,0x55A7,0x5BA3,0x60AC,0x65CB,0x7384,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x88AC,0x88AE,0x88AF,0x88B0,0x88B2,0x88B3,0x88B4,0x88B5,/* 0x40-0x47 */
+	0x88B6,0x88B8,0x88B9,0x88BA,0x88BB,0x88BD,0x88BE,0x88BF,/* 0x48-0x4F */
+	0x88C0,0x88C3,0x88C4,0x88C7,0x88C8,0x88CA,0x88CB,0x88CC,/* 0x50-0x57 */
+	0x88CD,0x88CF,0x88D0,0x88D1,0x88D3,0x88D6,0x88D7,0x88DA,/* 0x58-0x5F */
+	0x88DB,0x88DC,0x88DD,0x88DE,0x88E0,0x88E1,0x88E6,0x88E7,/* 0x60-0x67 */
+	0x88E9,0x88EA,0x88EB,0x88EC,0x88ED,0x88EE,0x88EF,0x88F2,/* 0x68-0x6F */
+	0x88F5,0x88F6,0x88F7,0x88FA,0x88FB,0x88FD,0x88FF,0x8900,/* 0x70-0x77 */
+	0x8901,0x8903,0x8904,0x8905,0x8906,0x8907,0x8908,0x0000,/* 0x78-0x7F */
+
+	0x8909,0x890B,0x890C,0x890D,0x890E,0x890F,0x8911,0x8914,/* 0x80-0x87 */
+	0x8915,0x8916,0x8917,0x8918,0x891C,0x891D,0x891E,0x891F,/* 0x88-0x8F */
+	0x8920,0x8922,0x8923,0x8924,0x8926,0x8927,0x8928,0x8929,/* 0x90-0x97 */
+	0x892C,0x892D,0x892E,0x892F,0x8931,0x8932,0x8933,0x8935,/* 0x98-0x9F */
+	0x8937,0x9009,0x7663,0x7729,0x7EDA,0x9774,0x859B,0x5B66,/* 0xA0-0xA7 */
+	0x7A74,0x96EA,0x8840,0x52CB,0x718F,0x5FAA,0x65EC,0x8BE2,/* 0xA8-0xAF */
+	0x5BFB,0x9A6F,0x5DE1,0x6B89,0x6C5B,0x8BAD,0x8BAF,0x900A,/* 0xB0-0xB7 */
+	0x8FC5,0x538B,0x62BC,0x9E26,0x9E2D,0x5440,0x4E2B,0x82BD,/* 0xB8-0xBF */
+	0x7259,0x869C,0x5D16,0x8859,0x6DAF,0x96C5,0x54D1,0x4E9A,/* 0xC0-0xC7 */
+	0x8BB6,0x7109,0x54BD,0x9609,0x70DF,0x6DF9,0x76D0,0x4E25,/* 0xC8-0xCF */
+	0x7814,0x8712,0x5CA9,0x5EF6,0x8A00,0x989C,0x960E,0x708E,/* 0xD0-0xD7 */
+	0x6CBF,0x5944,0x63A9,0x773C,0x884D,0x6F14,0x8273,0x5830,/* 0xD8-0xDF */
+	0x71D5,0x538C,0x781A,0x96C1,0x5501,0x5F66,0x7130,0x5BB4,/* 0xE0-0xE7 */
+	0x8C1A,0x9A8C,0x6B83,0x592E,0x9E2F,0x79E7,0x6768,0x626C,/* 0xE8-0xEF */
+	0x4F6F,0x75A1,0x7F8A,0x6D0B,0x9633,0x6C27,0x4EF0,0x75D2,/* 0xF0-0xF7 */
+	0x517B,0x6837,0x6F3E,0x9080,0x8170,0x5996,0x7476,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8938,0x8939,0x893A,0x893B,0x893C,0x893D,0x893E,0x893F,/* 0x40-0x47 */
+	0x8940,0x8942,0x8943,0x8945,0x8946,0x8947,0x8948,0x8949,/* 0x48-0x4F */
+	0x894A,0x894B,0x894C,0x894D,0x894E,0x894F,0x8950,0x8951,/* 0x50-0x57 */
+	0x8952,0x8953,0x8954,0x8955,0x8956,0x8957,0x8958,0x8959,/* 0x58-0x5F */
+	0x895A,0x895B,0x895C,0x895D,0x8960,0x8961,0x8962,0x8963,/* 0x60-0x67 */
+	0x8964,0x8965,0x8967,0x8968,0x8969,0x896A,0x896B,0x896C,/* 0x68-0x6F */
+	0x896D,0x896E,0x896F,0x8970,0x8971,0x8972,0x8973,0x8974,/* 0x70-0x77 */
+	0x8975,0x8976,0x8977,0x8978,0x8979,0x897A,0x897C,0x0000,/* 0x78-0x7F */
+
+	0x897D,0x897E,0x8980,0x8982,0x8984,0x8985,0x8987,0x8988,/* 0x80-0x87 */
+	0x8989,0x898A,0x898B,0x898C,0x898D,0x898E,0x898F,0x8990,/* 0x88-0x8F */
+	0x8991,0x8992,0x8993,0x8994,0x8995,0x8996,0x8997,0x8998,/* 0x90-0x97 */
+	0x8999,0x899A,0x899B,0x899C,0x899D,0x899E,0x899F,0x89A0,/* 0x98-0x9F */
+	0x89A1,0x6447,0x5C27,0x9065,0x7A91,0x8C23,0x59DA,0x54AC,/* 0xA0-0xA7 */
+	0x8200,0x836F,0x8981,0x8000,0x6930,0x564E,0x8036,0x7237,/* 0xA8-0xAF */
+	0x91CE,0x51B6,0x4E5F,0x9875,0x6396,0x4E1A,0x53F6,0x66F3,/* 0xB0-0xB7 */
+	0x814B,0x591C,0x6DB2,0x4E00,0x58F9,0x533B,0x63D6,0x94F1,/* 0xB8-0xBF */
+	0x4F9D,0x4F0A,0x8863,0x9890,0x5937,0x9057,0x79FB,0x4EEA,/* 0xC0-0xC7 */
+	0x80F0,0x7591,0x6C82,0x5B9C,0x59E8,0x5F5D,0x6905,0x8681,/* 0xC8-0xCF */
+	0x501A,0x5DF2,0x4E59,0x77E3,0x4EE5,0x827A,0x6291,0x6613,/* 0xD0-0xD7 */
+	0x9091,0x5C79,0x4EBF,0x5F79,0x81C6,0x9038,0x8084,0x75AB,/* 0xD8-0xDF */
+	0x4EA6,0x88D4,0x610F,0x6BC5,0x5FC6,0x4E49,0x76CA,0x6EA2,/* 0xE0-0xE7 */
+	0x8BE3,0x8BAE,0x8C0A,0x8BD1,0x5F02,0x7FFC,0x7FCC,0x7ECE,/* 0xE8-0xEF */
+	0x8335,0x836B,0x56E0,0x6BB7,0x97F3,0x9634,0x59FB,0x541F,/* 0xF0-0xF7 */
+	0x94F6,0x6DEB,0x5BC5,0x996E,0x5C39,0x5F15,0x9690,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x89A2,0x89A3,0x89A4,0x89A5,0x89A6,0x89A7,0x89A8,0x89A9,/* 0x40-0x47 */
+	0x89AA,0x89AB,0x89AC,0x89AD,0x89AE,0x89AF,0x89B0,0x89B1,/* 0x48-0x4F */
+	0x89B2,0x89B3,0x89B4,0x89B5,0x89B6,0x89B7,0x89B8,0x89B9,/* 0x50-0x57 */
+	0x89BA,0x89BB,0x89BC,0x89BD,0x89BE,0x89BF,0x89C0,0x89C3,/* 0x58-0x5F */
+	0x89CD,0x89D3,0x89D4,0x89D5,0x89D7,0x89D8,0x89D9,0x89DB,/* 0x60-0x67 */
+	0x89DD,0x89DF,0x89E0,0x89E1,0x89E2,0x89E4,0x89E7,0x89E8,/* 0x68-0x6F */
+	0x89E9,0x89EA,0x89EC,0x89ED,0x89EE,0x89F0,0x89F1,0x89F2,/* 0x70-0x77 */
+	0x89F4,0x89F5,0x89F6,0x89F7,0x89F8,0x89F9,0x89FA,0x0000,/* 0x78-0x7F */
+
+	0x89FB,0x89FC,0x89FD,0x89FE,0x89FF,0x8A01,0x8A02,0x8A03,/* 0x80-0x87 */
+	0x8A04,0x8A05,0x8A06,0x8A08,0x8A09,0x8A0A,0x8A0B,0x8A0C,/* 0x88-0x8F */
+	0x8A0D,0x8A0E,0x8A0F,0x8A10,0x8A11,0x8A12,0x8A13,0x8A14,/* 0x90-0x97 */
+	0x8A15,0x8A16,0x8A17,0x8A18,0x8A19,0x8A1A,0x8A1B,0x8A1C,/* 0x98-0x9F */
+	0x8A1D,0x5370,0x82F1,0x6A31,0x5A74,0x9E70,0x5E94,0x7F28,/* 0xA0-0xA7 */
+	0x83B9,0x8424,0x8425,0x8367,0x8747,0x8FCE,0x8D62,0x76C8,/* 0xA8-0xAF */
+	0x5F71,0x9896,0x786C,0x6620,0x54DF,0x62E5,0x4F63,0x81C3,/* 0xB0-0xB7 */
+	0x75C8,0x5EB8,0x96CD,0x8E0A,0x86F9,0x548F,0x6CF3,0x6D8C,/* 0xB8-0xBF */
+	0x6C38,0x607F,0x52C7,0x7528,0x5E7D,0x4F18,0x60A0,0x5FE7,/* 0xC0-0xC7 */
+	0x5C24,0x7531,0x90AE,0x94C0,0x72B9,0x6CB9,0x6E38,0x9149,/* 0xC8-0xCF */
+	0x6709,0x53CB,0x53F3,0x4F51,0x91C9,0x8BF1,0x53C8,0x5E7C,/* 0xD0-0xD7 */
+	0x8FC2,0x6DE4,0x4E8E,0x76C2,0x6986,0x865E,0x611A,0x8206,/* 0xD8-0xDF */
+	0x4F59,0x4FDE,0x903E,0x9C7C,0x6109,0x6E1D,0x6E14,0x9685,/* 0xE0-0xE7 */
+	0x4E88,0x5A31,0x96E8,0x4E0E,0x5C7F,0x79B9,0x5B87,0x8BED,/* 0xE8-0xEF */
+	0x7FBD,0x7389,0x57DF,0x828B,0x90C1,0x5401,0x9047,0x55BB,/* 0xF0-0xF7 */
+	0x5CEA,0x5FA1,0x6108,0x6B32,0x72F1,0x80B2,0x8A89,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8A1E,0x8A1F,0x8A20,0x8A21,0x8A22,0x8A23,0x8A24,0x8A25,/* 0x40-0x47 */
+	0x8A26,0x8A27,0x8A28,0x8A29,0x8A2A,0x8A2B,0x8A2C,0x8A2D,/* 0x48-0x4F */
+	0x8A2E,0x8A2F,0x8A30,0x8A31,0x8A32,0x8A33,0x8A34,0x8A35,/* 0x50-0x57 */
+	0x8A36,0x8A37,0x8A38,0x8A39,0x8A3A,0x8A3B,0x8A3C,0x8A3D,/* 0x58-0x5F */
+	0x8A3F,0x8A40,0x8A41,0x8A42,0x8A43,0x8A44,0x8A45,0x8A46,/* 0x60-0x67 */
+	0x8A47,0x8A49,0x8A4A,0x8A4B,0x8A4C,0x8A4D,0x8A4E,0x8A4F,/* 0x68-0x6F */
+	0x8A50,0x8A51,0x8A52,0x8A53,0x8A54,0x8A55,0x8A56,0x8A57,/* 0x70-0x77 */
+	0x8A58,0x8A59,0x8A5A,0x8A5B,0x8A5C,0x8A5D,0x8A5E,0x0000,/* 0x78-0x7F */
+
+	0x8A5F,0x8A60,0x8A61,0x8A62,0x8A63,0x8A64,0x8A65,0x8A66,/* 0x80-0x87 */
+	0x8A67,0x8A68,0x8A69,0x8A6A,0x8A6B,0x8A6C,0x8A6D,0x8A6E,/* 0x88-0x8F */
+	0x8A6F,0x8A70,0x8A71,0x8A72,0x8A73,0x8A74,0x8A75,0x8A76,/* 0x90-0x97 */
+	0x8A77,0x8A78,0x8A7A,0x8A7B,0x8A7C,0x8A7D,0x8A7E,0x8A7F,/* 0x98-0x9F */
+	0x8A80,0x6D74,0x5BD3,0x88D5,0x9884,0x8C6B,0x9A6D,0x9E33,/* 0xA0-0xA7 */
+	0x6E0A,0x51A4,0x5143,0x57A3,0x8881,0x539F,0x63F4,0x8F95,/* 0xA8-0xAF */
+	0x56ED,0x5458,0x5706,0x733F,0x6E90,0x7F18,0x8FDC,0x82D1,/* 0xB0-0xB7 */
+	0x613F,0x6028,0x9662,0x66F0,0x7EA6,0x8D8A,0x8DC3,0x94A5,/* 0xB8-0xBF */
+	0x5CB3,0x7CA4,0x6708,0x60A6,0x9605,0x8018,0x4E91,0x90E7,/* 0xC0-0xC7 */
+	0x5300,0x9668,0x5141,0x8FD0,0x8574,0x915D,0x6655,0x97F5,/* 0xC8-0xCF */
+	0x5B55,0x531D,0x7838,0x6742,0x683D,0x54C9,0x707E,0x5BB0,/* 0xD0-0xD7 */
+	0x8F7D,0x518D,0x5728,0x54B1,0x6512,0x6682,0x8D5E,0x8D43,/* 0xD8-0xDF */
+	0x810F,0x846C,0x906D,0x7CDF,0x51FF,0x85FB,0x67A3,0x65E9,/* 0xE0-0xE7 */
+	0x6FA1,0x86A4,0x8E81,0x566A,0x9020,0x7682,0x7076,0x71E5,/* 0xE8-0xEF */
+	0x8D23,0x62E9,0x5219,0x6CFD,0x8D3C,0x600E,0x589E,0x618E,/* 0xF0-0xF7 */
+	0x66FE,0x8D60,0x624E,0x55B3,0x6E23,0x672D,0x8F67,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8A81,0x8A82,0x8A83,0x8A84,0x8A85,0x8A86,0x8A87,0x8A88,/* 0x40-0x47 */
+	0x8A8B,0x8A8C,0x8A8D,0x8A8E,0x8A8F,0x8A90,0x8A91,0x8A92,/* 0x48-0x4F */
+	0x8A94,0x8A95,0x8A96,0x8A97,0x8A98,0x8A99,0x8A9A,0x8A9B,/* 0x50-0x57 */
+	0x8A9C,0x8A9D,0x8A9E,0x8A9F,0x8AA0,0x8AA1,0x8AA2,0x8AA3,/* 0x58-0x5F */
+	0x8AA4,0x8AA5,0x8AA6,0x8AA7,0x8AA8,0x8AA9,0x8AAA,0x8AAB,/* 0x60-0x67 */
+	0x8AAC,0x8AAD,0x8AAE,0x8AAF,0x8AB0,0x8AB1,0x8AB2,0x8AB3,/* 0x68-0x6F */
+	0x8AB4,0x8AB5,0x8AB6,0x8AB7,0x8AB8,0x8AB9,0x8ABA,0x8ABB,/* 0x70-0x77 */
+	0x8ABC,0x8ABD,0x8ABE,0x8ABF,0x8AC0,0x8AC1,0x8AC2,0x0000,/* 0x78-0x7F */
+
+	0x8AC3,0x8AC4,0x8AC5,0x8AC6,0x8AC7,0x8AC8,0x8AC9,0x8ACA,/* 0x80-0x87 */
+	0x8ACB,0x8ACC,0x8ACD,0x8ACE,0x8ACF,0x8AD0,0x8AD1,0x8AD2,/* 0x88-0x8F */
+	0x8AD3,0x8AD4,0x8AD5,0x8AD6,0x8AD7,0x8AD8,0x8AD9,0x8ADA,/* 0x90-0x97 */
+	0x8ADB,0x8ADC,0x8ADD,0x8ADE,0x8ADF,0x8AE0,0x8AE1,0x8AE2,/* 0x98-0x9F */
+	0x8AE3,0x94E1,0x95F8,0x7728,0x6805,0x69A8,0x548B,0x4E4D,/* 0xA0-0xA7 */
+	0x70B8,0x8BC8,0x6458,0x658B,0x5B85,0x7A84,0x503A,0x5BE8,/* 0xA8-0xAF */
+	0x77BB,0x6BE1,0x8A79,0x7C98,0x6CBE,0x76CF,0x65A9,0x8F97,/* 0xB0-0xB7 */
+	0x5D2D,0x5C55,0x8638,0x6808,0x5360,0x6218,0x7AD9,0x6E5B,/* 0xB8-0xBF */
+	0x7EFD,0x6A1F,0x7AE0,0x5F70,0x6F33,0x5F20,0x638C,0x6DA8,/* 0xC0-0xC7 */
+	0x6756,0x4E08,0x5E10,0x8D26,0x4ED7,0x80C0,0x7634,0x969C,/* 0xC8-0xCF */
+	0x62DB,0x662D,0x627E,0x6CBC,0x8D75,0x7167,0x7F69,0x5146,/* 0xD0-0xD7 */
+	0x8087,0x53EC,0x906E,0x6298,0x54F2,0x86F0,0x8F99,0x8005,/* 0xD8-0xDF */
+	0x9517,0x8517,0x8FD9,0x6D59,0x73CD,0x659F,0x771F,0x7504,/* 0xE0-0xE7 */
+	0x7827,0x81FB,0x8D1E,0x9488,0x4FA6,0x6795,0x75B9,0x8BCA,/* 0xE8-0xEF */
+	0x9707,0x632F,0x9547,0x9635,0x84B8,0x6323,0x7741,0x5F81,/* 0xF0-0xF7 */
+	0x72F0,0x4E89,0x6014,0x6574,0x62EF,0x6B63,0x653F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8AE4,0x8AE5,0x8AE6,0x8AE7,0x8AE8,0x8AE9,0x8AEA,0x8AEB,/* 0x40-0x47 */
+	0x8AEC,0x8AED,0x8AEE,0x8AEF,0x8AF0,0x8AF1,0x8AF2,0x8AF3,/* 0x48-0x4F */
+	0x8AF4,0x8AF5,0x8AF6,0x8AF7,0x8AF8,0x8AF9,0x8AFA,0x8AFB,/* 0x50-0x57 */
+	0x8AFC,0x8AFD,0x8AFE,0x8AFF,0x8B00,0x8B01,0x8B02,0x8B03,/* 0x58-0x5F */
+	0x8B04,0x8B05,0x8B06,0x8B08,0x8B09,0x8B0A,0x8B0B,0x8B0C,/* 0x60-0x67 */
+	0x8B0D,0x8B0E,0x8B0F,0x8B10,0x8B11,0x8B12,0x8B13,0x8B14,/* 0x68-0x6F */
+	0x8B15,0x8B16,0x8B17,0x8B18,0x8B19,0x8B1A,0x8B1B,0x8B1C,/* 0x70-0x77 */
+	0x8B1D,0x8B1E,0x8B1F,0x8B20,0x8B21,0x8B22,0x8B23,0x0000,/* 0x78-0x7F */
+
+	0x8B24,0x8B25,0x8B27,0x8B28,0x8B29,0x8B2A,0x8B2B,0x8B2C,/* 0x80-0x87 */
+	0x8B2D,0x8B2E,0x8B2F,0x8B30,0x8B31,0x8B32,0x8B33,0x8B34,/* 0x88-0x8F */
+	0x8B35,0x8B36,0x8B37,0x8B38,0x8B39,0x8B3A,0x8B3B,0x8B3C,/* 0x90-0x97 */
+	0x8B3D,0x8B3E,0x8B3F,0x8B40,0x8B41,0x8B42,0x8B43,0x8B44,/* 0x98-0x9F */
+	0x8B45,0x5E27,0x75C7,0x90D1,0x8BC1,0x829D,0x679D,0x652F,/* 0xA0-0xA7 */
+	0x5431,0x8718,0x77E5,0x80A2,0x8102,0x6C41,0x4E4B,0x7EC7,/* 0xA8-0xAF */
+	0x804C,0x76F4,0x690D,0x6B96,0x6267,0x503C,0x4F84,0x5740,/* 0xB0-0xB7 */
+	0x6307,0x6B62,0x8DBE,0x53EA,0x65E8,0x7EB8,0x5FD7,0x631A,/* 0xB8-0xBF */
+	0x63B7,0x81F3,0x81F4,0x7F6E,0x5E1C,0x5CD9,0x5236,0x667A,/* 0xC0-0xC7 */
+	0x79E9,0x7A1A,0x8D28,0x7099,0x75D4,0x6EDE,0x6CBB,0x7A92,/* 0xC8-0xCF */
+	0x4E2D,0x76C5,0x5FE0,0x949F,0x8877,0x7EC8,0x79CD,0x80BF,/* 0xD0-0xD7 */
+	0x91CD,0x4EF2,0x4F17,0x821F,0x5468,0x5DDE,0x6D32,0x8BCC,/* 0xD8-0xDF */
+	0x7CA5,0x8F74,0x8098,0x5E1A,0x5492,0x76B1,0x5B99,0x663C,/* 0xE0-0xE7 */
+	0x9AA4,0x73E0,0x682A,0x86DB,0x6731,0x732A,0x8BF8,0x8BDB,/* 0xE8-0xEF */
+	0x9010,0x7AF9,0x70DB,0x716E,0x62C4,0x77A9,0x5631,0x4E3B,/* 0xF0-0xF7 */
+	0x8457,0x67F1,0x52A9,0x86C0,0x8D2E,0x94F8,0x7B51,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8B46,0x8B47,0x8B48,0x8B49,0x8B4A,0x8B4B,0x8B4C,0x8B4D,/* 0x40-0x47 */
+	0x8B4E,0x8B4F,0x8B50,0x8B51,0x8B52,0x8B53,0x8B54,0x8B55,/* 0x48-0x4F */
+	0x8B56,0x8B57,0x8B58,0x8B59,0x8B5A,0x8B5B,0x8B5C,0x8B5D,/* 0x50-0x57 */
+	0x8B5E,0x8B5F,0x8B60,0x8B61,0x8B62,0x8B63,0x8B64,0x8B65,/* 0x58-0x5F */
+	0x8B67,0x8B68,0x8B69,0x8B6A,0x8B6B,0x8B6D,0x8B6E,0x8B6F,/* 0x60-0x67 */
+	0x8B70,0x8B71,0x8B72,0x8B73,0x8B74,0x8B75,0x8B76,0x8B77,/* 0x68-0x6F */
+	0x8B78,0x8B79,0x8B7A,0x8B7B,0x8B7C,0x8B7D,0x8B7E,0x8B7F,/* 0x70-0x77 */
+	0x8B80,0x8B81,0x8B82,0x8B83,0x8B84,0x8B85,0x8B86,0x0000,/* 0x78-0x7F */
+
+	0x8B87,0x8B88,0x8B89,0x8B8A,0x8B8B,0x8B8C,0x8B8D,0x8B8E,/* 0x80-0x87 */
+	0x8B8F,0x8B90,0x8B91,0x8B92,0x8B93,0x8B94,0x8B95,0x8B96,/* 0x88-0x8F */
+	0x8B97,0x8B98,0x8B99,0x8B9A,0x8B9B,0x8B9C,0x8B9D,0x8B9E,/* 0x90-0x97 */
+	0x8B9F,0x8BAC,0x8BB1,0x8BBB,0x8BC7,0x8BD0,0x8BEA,0x8C09,/* 0x98-0x9F */
+	0x8C1E,0x4F4F,0x6CE8,0x795D,0x9A7B,0x6293,0x722A,0x62FD,/* 0xA0-0xA7 */
+	0x4E13,0x7816,0x8F6C,0x64B0,0x8D5A,0x7BC6,0x6869,0x5E84,/* 0xA8-0xAF */
+	0x88C5,0x5986,0x649E,0x58EE,0x72B6,0x690E,0x9525,0x8FFD,/* 0xB0-0xB7 */
+	0x8D58,0x5760,0x7F00,0x8C06,0x51C6,0x6349,0x62D9,0x5353,/* 0xB8-0xBF */
+	0x684C,0x7422,0x8301,0x914C,0x5544,0x7740,0x707C,0x6D4A,/* 0xC0-0xC7 */
+	0x5179,0x54A8,0x8D44,0x59FF,0x6ECB,0x6DC4,0x5B5C,0x7D2B,/* 0xC8-0xCF */
+	0x4ED4,0x7C7D,0x6ED3,0x5B50,0x81EA,0x6E0D,0x5B57,0x9B03,/* 0xD0-0xD7 */
+	0x68D5,0x8E2A,0x5B97,0x7EFC,0x603B,0x7EB5,0x90B9,0x8D70,/* 0xD8-0xDF */
+	0x594F,0x63CD,0x79DF,0x8DB3,0x5352,0x65CF,0x7956,0x8BC5,/* 0xE0-0xE7 */
+	0x963B,0x7EC4,0x94BB,0x7E82,0x5634,0x9189,0x6700,0x7F6A,/* 0xE8-0xEF */
+	0x5C0A,0x9075,0x6628,0x5DE6,0x4F50,0x67DE,0x505A,0x4F5C,/* 0xF0-0xF7 */
+	0x5750,0x5EA7,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8C38,0x8C39,0x8C3A,0x8C3B,0x8C3C,0x8C3D,0x8C3E,0x8C3F,/* 0x40-0x47 */
+	0x8C40,0x8C42,0x8C43,0x8C44,0x8C45,0x8C48,0x8C4A,0x8C4B,/* 0x48-0x4F */
+	0x8C4D,0x8C4E,0x8C4F,0x8C50,0x8C51,0x8C52,0x8C53,0x8C54,/* 0x50-0x57 */
+	0x8C56,0x8C57,0x8C58,0x8C59,0x8C5B,0x8C5C,0x8C5D,0x8C5E,/* 0x58-0x5F */
+	0x8C5F,0x8C60,0x8C63,0x8C64,0x8C65,0x8C66,0x8C67,0x8C68,/* 0x60-0x67 */
+	0x8C69,0x8C6C,0x8C6D,0x8C6E,0x8C6F,0x8C70,0x8C71,0x8C72,/* 0x68-0x6F */
+	0x8C74,0x8C75,0x8C76,0x8C77,0x8C7B,0x8C7C,0x8C7D,0x8C7E,/* 0x70-0x77 */
+	0x8C7F,0x8C80,0x8C81,0x8C83,0x8C84,0x8C86,0x8C87,0x0000,/* 0x78-0x7F */
+
+	0x8C88,0x8C8B,0x8C8D,0x8C8E,0x8C8F,0x8C90,0x8C91,0x8C92,/* 0x80-0x87 */
+	0x8C93,0x8C95,0x8C96,0x8C97,0x8C99,0x8C9A,0x8C9B,0x8C9C,/* 0x88-0x8F */
+	0x8C9D,0x8C9E,0x8C9F,0x8CA0,0x8CA1,0x8CA2,0x8CA3,0x8CA4,/* 0x90-0x97 */
+	0x8CA5,0x8CA6,0x8CA7,0x8CA8,0x8CA9,0x8CAA,0x8CAB,0x8CAC,/* 0x98-0x9F */
+	0x8CAD,0x4E8D,0x4E0C,0x5140,0x4E10,0x5EFF,0x5345,0x4E15,/* 0xA0-0xA7 */
+	0x4E98,0x4E1E,0x9B32,0x5B6C,0x5669,0x4E28,0x79BA,0x4E3F,/* 0xA8-0xAF */
+	0x5315,0x4E47,0x592D,0x723B,0x536E,0x6C10,0x56DF,0x80E4,/* 0xB0-0xB7 */
+	0x9997,0x6BD3,0x777E,0x9F17,0x4E36,0x4E9F,0x9F10,0x4E5C,/* 0xB8-0xBF */
+	0x4E69,0x4E93,0x8288,0x5B5B,0x556C,0x560F,0x4EC4,0x538D,/* 0xC0-0xC7 */
+	0x539D,0x53A3,0x53A5,0x53AE,0x9765,0x8D5D,0x531A,0x53F5,/* 0xC8-0xCF */
+	0x5326,0x532E,0x533E,0x8D5C,0x5366,0x5363,0x5202,0x5208,/* 0xD0-0xD7 */
+	0x520E,0x522D,0x5233,0x523F,0x5240,0x524C,0x525E,0x5261,/* 0xD8-0xDF */
+	0x525C,0x84AF,0x527D,0x5282,0x5281,0x5290,0x5293,0x5182,/* 0xE0-0xE7 */
+	0x7F54,0x4EBB,0x4EC3,0x4EC9,0x4EC2,0x4EE8,0x4EE1,0x4EEB,/* 0xE8-0xEF */
+	0x4EDE,0x4F1B,0x4EF3,0x4F22,0x4F64,0x4EF5,0x4F25,0x4F27,/* 0xF0-0xF7 */
+	0x4F09,0x4F2B,0x4F5E,0x4F67,0x6538,0x4F5A,0x4F5D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8CAE,0x8CAF,0x8CB0,0x8CB1,0x8CB2,0x8CB3,0x8CB4,0x8CB5,/* 0x40-0x47 */
+	0x8CB6,0x8CB7,0x8CB8,0x8CB9,0x8CBA,0x8CBB,0x8CBC,0x8CBD,/* 0x48-0x4F */
+	0x8CBE,0x8CBF,0x8CC0,0x8CC1,0x8CC2,0x8CC3,0x8CC4,0x8CC5,/* 0x50-0x57 */
+	0x8CC6,0x8CC7,0x8CC8,0x8CC9,0x8CCA,0x8CCB,0x8CCC,0x8CCD,/* 0x58-0x5F */
+	0x8CCE,0x8CCF,0x8CD0,0x8CD1,0x8CD2,0x8CD3,0x8CD4,0x8CD5,/* 0x60-0x67 */
+	0x8CD6,0x8CD7,0x8CD8,0x8CD9,0x8CDA,0x8CDB,0x8CDC,0x8CDD,/* 0x68-0x6F */
+	0x8CDE,0x8CDF,0x8CE0,0x8CE1,0x8CE2,0x8CE3,0x8CE4,0x8CE5,/* 0x70-0x77 */
+	0x8CE6,0x8CE7,0x8CE8,0x8CE9,0x8CEA,0x8CEB,0x8CEC,0x0000,/* 0x78-0x7F */
+
+	0x8CED,0x8CEE,0x8CEF,0x8CF0,0x8CF1,0x8CF2,0x8CF3,0x8CF4,/* 0x80-0x87 */
+	0x8CF5,0x8CF6,0x8CF7,0x8CF8,0x8CF9,0x8CFA,0x8CFB,0x8CFC,/* 0x88-0x8F */
+	0x8CFD,0x8CFE,0x8CFF,0x8D00,0x8D01,0x8D02,0x8D03,0x8D04,/* 0x90-0x97 */
+	0x8D05,0x8D06,0x8D07,0x8D08,0x8D09,0x8D0A,0x8D0B,0x8D0C,/* 0x98-0x9F */
+	0x8D0D,0x4F5F,0x4F57,0x4F32,0x4F3D,0x4F76,0x4F74,0x4F91,/* 0xA0-0xA7 */
+	0x4F89,0x4F83,0x4F8F,0x4F7E,0x4F7B,0x4FAA,0x4F7C,0x4FAC,/* 0xA8-0xAF */
+	0x4F94,0x4FE6,0x4FE8,0x4FEA,0x4FC5,0x4FDA,0x4FE3,0x4FDC,/* 0xB0-0xB7 */
+	0x4FD1,0x4FDF,0x4FF8,0x5029,0x504C,0x4FF3,0x502C,0x500F,/* 0xB8-0xBF */
+	0x502E,0x502D,0x4FFE,0x501C,0x500C,0x5025,0x5028,0x507E,/* 0xC0-0xC7 */
+	0x5043,0x5055,0x5048,0x504E,0x506C,0x507B,0x50A5,0x50A7,/* 0xC8-0xCF */
+	0x50A9,0x50BA,0x50D6,0x5106,0x50ED,0x50EC,0x50E6,0x50EE,/* 0xD0-0xD7 */
+	0x5107,0x510B,0x4EDD,0x6C3D,0x4F58,0x4F65,0x4FCE,0x9FA0,/* 0xD8-0xDF */
+	0x6C46,0x7C74,0x516E,0x5DFD,0x9EC9,0x9998,0x5181,0x5914,/* 0xE0-0xE7 */
+	0x52F9,0x530D,0x8A07,0x5310,0x51EB,0x5919,0x5155,0x4EA0,/* 0xE8-0xEF */
+	0x5156,0x4EB3,0x886E,0x88A4,0x4EB5,0x8114,0x88D2,0x7980,/* 0xF0-0xF7 */
+	0x5B34,0x8803,0x7FB8,0x51AB,0x51B1,0x51BD,0x51BC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8D0E,0x8D0F,0x8D10,0x8D11,0x8D12,0x8D13,0x8D14,0x8D15,/* 0x40-0x47 */
+	0x8D16,0x8D17,0x8D18,0x8D19,0x8D1A,0x8D1B,0x8D1C,0x8D20,/* 0x48-0x4F */
+	0x8D51,0x8D52,0x8D57,0x8D5F,0x8D65,0x8D68,0x8D69,0x8D6A,/* 0x50-0x57 */
+	0x8D6C,0x8D6E,0x8D6F,0x8D71,0x8D72,0x8D78,0x8D79,0x8D7A,/* 0x58-0x5F */
+	0x8D7B,0x8D7C,0x8D7D,0x8D7E,0x8D7F,0x8D80,0x8D82,0x8D83,/* 0x60-0x67 */
+	0x8D86,0x8D87,0x8D88,0x8D89,0x8D8C,0x8D8D,0x8D8E,0x8D8F,/* 0x68-0x6F */
+	0x8D90,0x8D92,0x8D93,0x8D95,0x8D96,0x8D97,0x8D98,0x8D99,/* 0x70-0x77 */
+	0x8D9A,0x8D9B,0x8D9C,0x8D9D,0x8D9E,0x8DA0,0x8DA1,0x0000,/* 0x78-0x7F */
+
+	0x8DA2,0x8DA4,0x8DA5,0x8DA6,0x8DA7,0x8DA8,0x8DA9,0x8DAA,/* 0x80-0x87 */
+	0x8DAB,0x8DAC,0x8DAD,0x8DAE,0x8DAF,0x8DB0,0x8DB2,0x8DB6,/* 0x88-0x8F */
+	0x8DB7,0x8DB9,0x8DBB,0x8DBD,0x8DC0,0x8DC1,0x8DC2,0x8DC5,/* 0x90-0x97 */
+	0x8DC7,0x8DC8,0x8DC9,0x8DCA,0x8DCD,0x8DD0,0x8DD2,0x8DD3,/* 0x98-0x9F */
+	0x8DD4,0x51C7,0x5196,0x51A2,0x51A5,0x8BA0,0x8BA6,0x8BA7,/* 0xA0-0xA7 */
+	0x8BAA,0x8BB4,0x8BB5,0x8BB7,0x8BC2,0x8BC3,0x8BCB,0x8BCF,/* 0xA8-0xAF */
+	0x8BCE,0x8BD2,0x8BD3,0x8BD4,0x8BD6,0x8BD8,0x8BD9,0x8BDC,/* 0xB0-0xB7 */
+	0x8BDF,0x8BE0,0x8BE4,0x8BE8,0x8BE9,0x8BEE,0x8BF0,0x8BF3,/* 0xB8-0xBF */
+	0x8BF6,0x8BF9,0x8BFC,0x8BFF,0x8C00,0x8C02,0x8C04,0x8C07,/* 0xC0-0xC7 */
+	0x8C0C,0x8C0F,0x8C11,0x8C12,0x8C14,0x8C15,0x8C16,0x8C19,/* 0xC8-0xCF */
+	0x8C1B,0x8C18,0x8C1D,0x8C1F,0x8C20,0x8C21,0x8C25,0x8C27,/* 0xD0-0xD7 */
+	0x8C2A,0x8C2B,0x8C2E,0x8C2F,0x8C32,0x8C33,0x8C35,0x8C36,/* 0xD8-0xDF */
+	0x5369,0x537A,0x961D,0x9622,0x9621,0x9631,0x962A,0x963D,/* 0xE0-0xE7 */
+	0x963C,0x9642,0x9649,0x9654,0x965F,0x9667,0x966C,0x9672,/* 0xE8-0xEF */
+	0x9674,0x9688,0x968D,0x9697,0x96B0,0x9097,0x909B,0x909D,/* 0xF0-0xF7 */
+	0x9099,0x90AC,0x90A1,0x90B4,0x90B3,0x90B6,0x90BA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8DD5,0x8DD8,0x8DD9,0x8DDC,0x8DE0,0x8DE1,0x8DE2,0x8DE5,/* 0x40-0x47 */
+	0x8DE6,0x8DE7,0x8DE9,0x8DED,0x8DEE,0x8DF0,0x8DF1,0x8DF2,/* 0x48-0x4F */
+	0x8DF4,0x8DF6,0x8DFC,0x8DFE,0x8DFF,0x8E00,0x8E01,0x8E02,/* 0x50-0x57 */
+	0x8E03,0x8E04,0x8E06,0x8E07,0x8E08,0x8E0B,0x8E0D,0x8E0E,/* 0x58-0x5F */
+	0x8E10,0x8E11,0x8E12,0x8E13,0x8E15,0x8E16,0x8E17,0x8E18,/* 0x60-0x67 */
+	0x8E19,0x8E1A,0x8E1B,0x8E1C,0x8E20,0x8E21,0x8E24,0x8E25,/* 0x68-0x6F */
+	0x8E26,0x8E27,0x8E28,0x8E2B,0x8E2D,0x8E30,0x8E32,0x8E33,/* 0x70-0x77 */
+	0x8E34,0x8E36,0x8E37,0x8E38,0x8E3B,0x8E3C,0x8E3E,0x0000,/* 0x78-0x7F */
+
+	0x8E3F,0x8E43,0x8E45,0x8E46,0x8E4C,0x8E4D,0x8E4E,0x8E4F,/* 0x80-0x87 */
+	0x8E50,0x8E53,0x8E54,0x8E55,0x8E56,0x8E57,0x8E58,0x8E5A,/* 0x88-0x8F */
+	0x8E5B,0x8E5C,0x8E5D,0x8E5E,0x8E5F,0x8E60,0x8E61,0x8E62,/* 0x90-0x97 */
+	0x8E63,0x8E64,0x8E65,0x8E67,0x8E68,0x8E6A,0x8E6B,0x8E6E,/* 0x98-0x9F */
+	0x8E71,0x90B8,0x90B0,0x90CF,0x90C5,0x90BE,0x90D0,0x90C4,/* 0xA0-0xA7 */
+	0x90C7,0x90D3,0x90E6,0x90E2,0x90DC,0x90D7,0x90DB,0x90EB,/* 0xA8-0xAF */
+	0x90EF,0x90FE,0x9104,0x9122,0x911E,0x9123,0x9131,0x912F,/* 0xB0-0xB7 */
+	0x9139,0x9143,0x9146,0x520D,0x5942,0x52A2,0x52AC,0x52AD,/* 0xB8-0xBF */
+	0x52BE,0x54FF,0x52D0,0x52D6,0x52F0,0x53DF,0x71EE,0x77CD,/* 0xC0-0xC7 */
+	0x5EF4,0x51F5,0x51FC,0x9B2F,0x53B6,0x5F01,0x755A,0x5DEF,/* 0xC8-0xCF */
+	0x574C,0x57A9,0x57A1,0x587E,0x58BC,0x58C5,0x58D1,0x5729,/* 0xD0-0xD7 */
+	0x572C,0x572A,0x5733,0x5739,0x572E,0x572F,0x575C,0x573B,/* 0xD8-0xDF */
+	0x5742,0x5769,0x5785,0x576B,0x5786,0x577C,0x577B,0x5768,/* 0xE0-0xE7 */
+	0x576D,0x5776,0x5773,0x57AD,0x57A4,0x578C,0x57B2,0x57CF,/* 0xE8-0xEF */
+	0x57A7,0x57B4,0x5793,0x57A0,0x57D5,0x57D8,0x57DA,0x57D9,/* 0xF0-0xF7 */
+	0x57D2,0x57B8,0x57F4,0x57EF,0x57F8,0x57E4,0x57DD,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8E73,0x8E75,0x8E77,0x8E78,0x8E79,0x8E7A,0x8E7B,0x8E7D,/* 0x40-0x47 */
+	0x8E7E,0x8E80,0x8E82,0x8E83,0x8E84,0x8E86,0x8E88,0x8E89,/* 0x48-0x4F */
+	0x8E8A,0x8E8B,0x8E8C,0x8E8D,0x8E8E,0x8E91,0x8E92,0x8E93,/* 0x50-0x57 */
+	0x8E95,0x8E96,0x8E97,0x8E98,0x8E99,0x8E9A,0x8E9B,0x8E9D,/* 0x58-0x5F */
+	0x8E9F,0x8EA0,0x8EA1,0x8EA2,0x8EA3,0x8EA4,0x8EA5,0x8EA6,/* 0x60-0x67 */
+	0x8EA7,0x8EA8,0x8EA9,0x8EAA,0x8EAD,0x8EAE,0x8EB0,0x8EB1,/* 0x68-0x6F */
+	0x8EB3,0x8EB4,0x8EB5,0x8EB6,0x8EB7,0x8EB8,0x8EB9,0x8EBB,/* 0x70-0x77 */
+	0x8EBC,0x8EBD,0x8EBE,0x8EBF,0x8EC0,0x8EC1,0x8EC2,0x0000,/* 0x78-0x7F */
+
+	0x8EC3,0x8EC4,0x8EC5,0x8EC6,0x8EC7,0x8EC8,0x8EC9,0x8ECA,/* 0x80-0x87 */
+	0x8ECB,0x8ECC,0x8ECD,0x8ECF,0x8ED0,0x8ED1,0x8ED2,0x8ED3,/* 0x88-0x8F */
+	0x8ED4,0x8ED5,0x8ED6,0x8ED7,0x8ED8,0x8ED9,0x8EDA,0x8EDB,/* 0x90-0x97 */
+	0x8EDC,0x8EDD,0x8EDE,0x8EDF,0x8EE0,0x8EE1,0x8EE2,0x8EE3,/* 0x98-0x9F */
+	0x8EE4,0x580B,0x580D,0x57FD,0x57ED,0x5800,0x581E,0x5819,/* 0xA0-0xA7 */
+	0x5844,0x5820,0x5865,0x586C,0x5881,0x5889,0x589A,0x5880,/* 0xA8-0xAF */
+	0x99A8,0x9F19,0x61FF,0x8279,0x827D,0x827F,0x828F,0x828A,/* 0xB0-0xB7 */
+	0x82A8,0x8284,0x828E,0x8291,0x8297,0x8299,0x82AB,0x82B8,/* 0xB8-0xBF */
+	0x82BE,0x82B0,0x82C8,0x82CA,0x82E3,0x8298,0x82B7,0x82AE,/* 0xC0-0xC7 */
+	0x82CB,0x82CC,0x82C1,0x82A9,0x82B4,0x82A1,0x82AA,0x829F,/* 0xC8-0xCF */
+	0x82C4,0x82CE,0x82A4,0x82E1,0x8309,0x82F7,0x82E4,0x830F,/* 0xD0-0xD7 */
+	0x8307,0x82DC,0x82F4,0x82D2,0x82D8,0x830C,0x82FB,0x82D3,/* 0xD8-0xDF */
+	0x8311,0x831A,0x8306,0x8314,0x8315,0x82E0,0x82D5,0x831C,/* 0xE0-0xE7 */
+	0x8351,0x835B,0x835C,0x8308,0x8392,0x833C,0x8334,0x8331,/* 0xE8-0xEF */
+	0x839B,0x835E,0x832F,0x834F,0x8347,0x8343,0x835F,0x8340,/* 0xF0-0xF7 */
+	0x8317,0x8360,0x832D,0x833A,0x8333,0x8366,0x8365,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8EE5,0x8EE6,0x8EE7,0x8EE8,0x8EE9,0x8EEA,0x8EEB,0x8EEC,/* 0x40-0x47 */
+	0x8EED,0x8EEE,0x8EEF,0x8EF0,0x8EF1,0x8EF2,0x8EF3,0x8EF4,/* 0x48-0x4F */
+	0x8EF5,0x8EF6,0x8EF7,0x8EF8,0x8EF9,0x8EFA,0x8EFB,0x8EFC,/* 0x50-0x57 */
+	0x8EFD,0x8EFE,0x8EFF,0x8F00,0x8F01,0x8F02,0x8F03,0x8F04,/* 0x58-0x5F */
+	0x8F05,0x8F06,0x8F07,0x8F08,0x8F09,0x8F0A,0x8F0B,0x8F0C,/* 0x60-0x67 */
+	0x8F0D,0x8F0E,0x8F0F,0x8F10,0x8F11,0x8F12,0x8F13,0x8F14,/* 0x68-0x6F */
+	0x8F15,0x8F16,0x8F17,0x8F18,0x8F19,0x8F1A,0x8F1B,0x8F1C,/* 0x70-0x77 */
+	0x8F1D,0x8F1E,0x8F1F,0x8F20,0x8F21,0x8F22,0x8F23,0x0000,/* 0x78-0x7F */
+
+	0x8F24,0x8F25,0x8F26,0x8F27,0x8F28,0x8F29,0x8F2A,0x8F2B,/* 0x80-0x87 */
+	0x8F2C,0x8F2D,0x8F2E,0x8F2F,0x8F30,0x8F31,0x8F32,0x8F33,/* 0x88-0x8F */
+	0x8F34,0x8F35,0x8F36,0x8F37,0x8F38,0x8F39,0x8F3A,0x8F3B,/* 0x90-0x97 */
+	0x8F3C,0x8F3D,0x8F3E,0x8F3F,0x8F40,0x8F41,0x8F42,0x8F43,/* 0x98-0x9F */
+	0x8F44,0x8368,0x831B,0x8369,0x836C,0x836A,0x836D,0x836E,/* 0xA0-0xA7 */
+	0x83B0,0x8378,0x83B3,0x83B4,0x83A0,0x83AA,0x8393,0x839C,/* 0xA8-0xAF */
+	0x8385,0x837C,0x83B6,0x83A9,0x837D,0x83B8,0x837B,0x8398,/* 0xB0-0xB7 */
+	0x839E,0x83A8,0x83BA,0x83BC,0x83C1,0x8401,0x83E5,0x83D8,/* 0xB8-0xBF */
+	0x5807,0x8418,0x840B,0x83DD,0x83FD,0x83D6,0x841C,0x8438,/* 0xC0-0xC7 */
+	0x8411,0x8406,0x83D4,0x83DF,0x840F,0x8403,0x83F8,0x83F9,/* 0xC8-0xCF */
+	0x83EA,0x83C5,0x83C0,0x8426,0x83F0,0x83E1,0x845C,0x8451,/* 0xD0-0xD7 */
+	0x845A,0x8459,0x8473,0x8487,0x8488,0x847A,0x8489,0x8478,/* 0xD8-0xDF */
+	0x843C,0x8446,0x8469,0x8476,0x848C,0x848E,0x8431,0x846D,/* 0xE0-0xE7 */
+	0x84C1,0x84CD,0x84D0,0x84E6,0x84BD,0x84D3,0x84CA,0x84BF,/* 0xE8-0xEF */
+	0x84BA,0x84E0,0x84A1,0x84B9,0x84B4,0x8497,0x84E5,0x84E3,/* 0xF0-0xF7 */
+	0x850C,0x750D,0x8538,0x84F0,0x8539,0x851F,0x853A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8F45,0x8F46,0x8F47,0x8F48,0x8F49,0x8F4A,0x8F4B,0x8F4C,/* 0x40-0x47 */
+	0x8F4D,0x8F4E,0x8F4F,0x8F50,0x8F51,0x8F52,0x8F53,0x8F54,/* 0x48-0x4F */
+	0x8F55,0x8F56,0x8F57,0x8F58,0x8F59,0x8F5A,0x8F5B,0x8F5C,/* 0x50-0x57 */
+	0x8F5D,0x8F5E,0x8F5F,0x8F60,0x8F61,0x8F62,0x8F63,0x8F64,/* 0x58-0x5F */
+	0x8F65,0x8F6A,0x8F80,0x8F8C,0x8F92,0x8F9D,0x8FA0,0x8FA1,/* 0x60-0x67 */
+	0x8FA2,0x8FA4,0x8FA5,0x8FA6,0x8FA7,0x8FAA,0x8FAC,0x8FAD,/* 0x68-0x6F */
+	0x8FAE,0x8FAF,0x8FB2,0x8FB3,0x8FB4,0x8FB5,0x8FB7,0x8FB8,/* 0x70-0x77 */
+	0x8FBA,0x8FBB,0x8FBC,0x8FBF,0x8FC0,0x8FC3,0x8FC6,0x0000,/* 0x78-0x7F */
+
+	0x8FC9,0x8FCA,0x8FCB,0x8FCC,0x8FCD,0x8FCF,0x8FD2,0x8FD6,/* 0x80-0x87 */
+	0x8FD7,0x8FDA,0x8FE0,0x8FE1,0x8FE3,0x8FE7,0x8FEC,0x8FEF,/* 0x88-0x8F */
+	0x8FF1,0x8FF2,0x8FF4,0x8FF5,0x8FF6,0x8FFA,0x8FFB,0x8FFC,/* 0x90-0x97 */
+	0x8FFE,0x8FFF,0x9007,0x9008,0x900C,0x900E,0x9013,0x9015,/* 0x98-0x9F */
+	0x9018,0x8556,0x853B,0x84FF,0x84FC,0x8559,0x8548,0x8568,/* 0xA0-0xA7 */
+	0x8564,0x855E,0x857A,0x77A2,0x8543,0x8572,0x857B,0x85A4,/* 0xA8-0xAF */
+	0x85A8,0x8587,0x858F,0x8579,0x85AE,0x859C,0x8585,0x85B9,/* 0xB0-0xB7 */
+	0x85B7,0x85B0,0x85D3,0x85C1,0x85DC,0x85FF,0x8627,0x8605,/* 0xB8-0xBF */
+	0x8629,0x8616,0x863C,0x5EFE,0x5F08,0x593C,0x5941,0x8037,/* 0xC0-0xC7 */
+	0x5955,0x595A,0x5958,0x530F,0x5C22,0x5C25,0x5C2C,0x5C34,/* 0xC8-0xCF */
+	0x624C,0x626A,0x629F,0x62BB,0x62CA,0x62DA,0x62D7,0x62EE,/* 0xD0-0xD7 */
+	0x6322,0x62F6,0x6339,0x634B,0x6343,0x63AD,0x63F6,0x6371,/* 0xD8-0xDF */
+	0x637A,0x638E,0x63B4,0x636D,0x63AC,0x638A,0x6369,0x63AE,/* 0xE0-0xE7 */
+	0x63BC,0x63F2,0x63F8,0x63E0,0x63FF,0x63C4,0x63DE,0x63CE,/* 0xE8-0xEF */
+	0x6452,0x63C6,0x63BE,0x6445,0x6441,0x640B,0x641B,0x6420,/* 0xF0-0xF7 */
+	0x640C,0x6426,0x6421,0x645E,0x6484,0x646D,0x6496,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9019,0x901C,0x9023,0x9024,0x9025,0x9027,0x9028,0x9029,/* 0x40-0x47 */
+	0x902A,0x902B,0x902C,0x9030,0x9031,0x9032,0x9033,0x9034,/* 0x48-0x4F */
+	0x9037,0x9039,0x903A,0x903D,0x903F,0x9040,0x9043,0x9045,/* 0x50-0x57 */
+	0x9046,0x9048,0x9049,0x904A,0x904B,0x904C,0x904E,0x9054,/* 0x58-0x5F */
+	0x9055,0x9056,0x9059,0x905A,0x905C,0x905D,0x905E,0x905F,/* 0x60-0x67 */
+	0x9060,0x9061,0x9064,0x9066,0x9067,0x9069,0x906A,0x906B,/* 0x68-0x6F */
+	0x906C,0x906F,0x9070,0x9071,0x9072,0x9073,0x9076,0x9077,/* 0x70-0x77 */
+	0x9078,0x9079,0x907A,0x907B,0x907C,0x907E,0x9081,0x0000,/* 0x78-0x7F */
+
+	0x9084,0x9085,0x9086,0x9087,0x9089,0x908A,0x908C,0x908D,/* 0x80-0x87 */
+	0x908E,0x908F,0x9090,0x9092,0x9094,0x9096,0x9098,0x909A,/* 0x88-0x8F */
+	0x909C,0x909E,0x909F,0x90A0,0x90A4,0x90A5,0x90A7,0x90A8,/* 0x90-0x97 */
+	0x90A9,0x90AB,0x90AD,0x90B2,0x90B7,0x90BC,0x90BD,0x90BF,/* 0x98-0x9F */
+	0x90C0,0x647A,0x64B7,0x64B8,0x6499,0x64BA,0x64C0,0x64D0,/* 0xA0-0xA7 */
+	0x64D7,0x64E4,0x64E2,0x6509,0x6525,0x652E,0x5F0B,0x5FD2,/* 0xA8-0xAF */
+	0x7519,0x5F11,0x535F,0x53F1,0x53FD,0x53E9,0x53E8,0x53FB,/* 0xB0-0xB7 */
+	0x5412,0x5416,0x5406,0x544B,0x5452,0x5453,0x5454,0x5456,/* 0xB8-0xBF */
+	0x5443,0x5421,0x5457,0x5459,0x5423,0x5432,0x5482,0x5494,/* 0xC0-0xC7 */
+	0x5477,0x5471,0x5464,0x549A,0x549B,0x5484,0x5476,0x5466,/* 0xC8-0xCF */
+	0x549D,0x54D0,0x54AD,0x54C2,0x54B4,0x54D2,0x54A7,0x54A6,/* 0xD0-0xD7 */
+	0x54D3,0x54D4,0x5472,0x54A3,0x54D5,0x54BB,0x54BF,0x54CC,/* 0xD8-0xDF */
+	0x54D9,0x54DA,0x54DC,0x54A9,0x54AA,0x54A4,0x54DD,0x54CF,/* 0xE0-0xE7 */
+	0x54DE,0x551B,0x54E7,0x5520,0x54FD,0x5514,0x54F3,0x5522,/* 0xE8-0xEF */
+	0x5523,0x550F,0x5511,0x5527,0x552A,0x5567,0x558F,0x55B5,/* 0xF0-0xF7 */
+	0x5549,0x556D,0x5541,0x5555,0x553F,0x5550,0x553C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x90C2,0x90C3,0x90C6,0x90C8,0x90C9,0x90CB,0x90CC,0x90CD,/* 0x40-0x47 */
+	0x90D2,0x90D4,0x90D5,0x90D6,0x90D8,0x90D9,0x90DA,0x90DE,/* 0x48-0x4F */
+	0x90DF,0x90E0,0x90E3,0x90E4,0x90E5,0x90E9,0x90EA,0x90EC,/* 0x50-0x57 */
+	0x90EE,0x90F0,0x90F1,0x90F2,0x90F3,0x90F5,0x90F6,0x90F7,/* 0x58-0x5F */
+	0x90F9,0x90FA,0x90FB,0x90FC,0x90FF,0x9100,0x9101,0x9103,/* 0x60-0x67 */
+	0x9105,0x9106,0x9107,0x9108,0x9109,0x910A,0x910B,0x910C,/* 0x68-0x6F */
+	0x910D,0x910E,0x910F,0x9110,0x9111,0x9112,0x9113,0x9114,/* 0x70-0x77 */
+	0x9115,0x9116,0x9117,0x9118,0x911A,0x911B,0x911C,0x0000,/* 0x78-0x7F */
+
+	0x911D,0x911F,0x9120,0x9121,0x9124,0x9125,0x9126,0x9127,/* 0x80-0x87 */
+	0x9128,0x9129,0x912A,0x912B,0x912C,0x912D,0x912E,0x9130,/* 0x88-0x8F */
+	0x9132,0x9133,0x9134,0x9135,0x9136,0x9137,0x9138,0x913A,/* 0x90-0x97 */
+	0x913B,0x913C,0x913D,0x913E,0x913F,0x9140,0x9141,0x9142,/* 0x98-0x9F */
+	0x9144,0x5537,0x5556,0x5575,0x5576,0x5577,0x5533,0x5530,/* 0xA0-0xA7 */
+	0x555C,0x558B,0x55D2,0x5583,0x55B1,0x55B9,0x5588,0x5581,/* 0xA8-0xAF */
+	0x559F,0x557E,0x55D6,0x5591,0x557B,0x55DF,0x55BD,0x55BE,/* 0xB0-0xB7 */
+	0x5594,0x5599,0x55EA,0x55F7,0x55C9,0x561F,0x55D1,0x55EB,/* 0xB8-0xBF */
+	0x55EC,0x55D4,0x55E6,0x55DD,0x55C4,0x55EF,0x55E5,0x55F2,/* 0xC0-0xC7 */
+	0x55F3,0x55CC,0x55CD,0x55E8,0x55F5,0x55E4,0x8F94,0x561E,/* 0xC8-0xCF */
+	0x5608,0x560C,0x5601,0x5624,0x5623,0x55FE,0x5600,0x5627,/* 0xD0-0xD7 */
+	0x562D,0x5658,0x5639,0x5657,0x562C,0x564D,0x5662,0x5659,/* 0xD8-0xDF */
+	0x565C,0x564C,0x5654,0x5686,0x5664,0x5671,0x566B,0x567B,/* 0xE0-0xE7 */
+	0x567C,0x5685,0x5693,0x56AF,0x56D4,0x56D7,0x56DD,0x56E1,/* 0xE8-0xEF */
+	0x56F5,0x56EB,0x56F9,0x56FF,0x5704,0x570A,0x5709,0x571C,/* 0xF0-0xF7 */
+	0x5E0F,0x5E19,0x5E14,0x5E11,0x5E31,0x5E3B,0x5E3C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9145,0x9147,0x9148,0x9151,0x9153,0x9154,0x9155,0x9156,/* 0x40-0x47 */
+	0x9158,0x9159,0x915B,0x915C,0x915F,0x9160,0x9166,0x9167,/* 0x48-0x4F */
+	0x9168,0x916B,0x916D,0x9173,0x917A,0x917B,0x917C,0x9180,/* 0x50-0x57 */
+	0x9181,0x9182,0x9183,0x9184,0x9186,0x9188,0x918A,0x918E,/* 0x58-0x5F */
+	0x918F,0x9193,0x9194,0x9195,0x9196,0x9197,0x9198,0x9199,/* 0x60-0x67 */
+	0x919C,0x919D,0x919E,0x919F,0x91A0,0x91A1,0x91A4,0x91A5,/* 0x68-0x6F */
+	0x91A6,0x91A7,0x91A8,0x91A9,0x91AB,0x91AC,0x91B0,0x91B1,/* 0x70-0x77 */
+	0x91B2,0x91B3,0x91B6,0x91B7,0x91B8,0x91B9,0x91BB,0x0000,/* 0x78-0x7F */
+
+	0x91BC,0x91BD,0x91BE,0x91BF,0x91C0,0x91C1,0x91C2,0x91C3,/* 0x80-0x87 */
+	0x91C4,0x91C5,0x91C6,0x91C8,0x91CB,0x91D0,0x91D2,0x91D3,/* 0x88-0x8F */
+	0x91D4,0x91D5,0x91D6,0x91D7,0x91D8,0x91D9,0x91DA,0x91DB,/* 0x90-0x97 */
+	0x91DD,0x91DE,0x91DF,0x91E0,0x91E1,0x91E2,0x91E3,0x91E4,/* 0x98-0x9F */
+	0x91E5,0x5E37,0x5E44,0x5E54,0x5E5B,0x5E5E,0x5E61,0x5C8C,/* 0xA0-0xA7 */
+	0x5C7A,0x5C8D,0x5C90,0x5C96,0x5C88,0x5C98,0x5C99,0x5C91,/* 0xA8-0xAF */
+	0x5C9A,0x5C9C,0x5CB5,0x5CA2,0x5CBD,0x5CAC,0x5CAB,0x5CB1,/* 0xB0-0xB7 */
+	0x5CA3,0x5CC1,0x5CB7,0x5CC4,0x5CD2,0x5CE4,0x5CCB,0x5CE5,/* 0xB8-0xBF */
+	0x5D02,0x5D03,0x5D27,0x5D26,0x5D2E,0x5D24,0x5D1E,0x5D06,/* 0xC0-0xC7 */
+	0x5D1B,0x5D58,0x5D3E,0x5D34,0x5D3D,0x5D6C,0x5D5B,0x5D6F,/* 0xC8-0xCF */
+	0x5D5D,0x5D6B,0x5D4B,0x5D4A,0x5D69,0x5D74,0x5D82,0x5D99,/* 0xD0-0xD7 */
+	0x5D9D,0x8C73,0x5DB7,0x5DC5,0x5F73,0x5F77,0x5F82,0x5F87,/* 0xD8-0xDF */
+	0x5F89,0x5F8C,0x5F95,0x5F99,0x5F9C,0x5FA8,0x5FAD,0x5FB5,/* 0xE0-0xE7 */
+	0x5FBC,0x8862,0x5F61,0x72AD,0x72B0,0x72B4,0x72B7,0x72B8,/* 0xE8-0xEF */
+	0x72C3,0x72C1,0x72CE,0x72CD,0x72D2,0x72E8,0x72EF,0x72E9,/* 0xF0-0xF7 */
+	0x72F2,0x72F4,0x72F7,0x7301,0x72F3,0x7303,0x72FA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x91E6,0x91E7,0x91E8,0x91E9,0x91EA,0x91EB,0x91EC,0x91ED,/* 0x40-0x47 */
+	0x91EE,0x91EF,0x91F0,0x91F1,0x91F2,0x91F3,0x91F4,0x91F5,/* 0x48-0x4F */
+	0x91F6,0x91F7,0x91F8,0x91F9,0x91FA,0x91FB,0x91FC,0x91FD,/* 0x50-0x57 */
+	0x91FE,0x91FF,0x9200,0x9201,0x9202,0x9203,0x9204,0x9205,/* 0x58-0x5F */
+	0x9206,0x9207,0x9208,0x9209,0x920A,0x920B,0x920C,0x920D,/* 0x60-0x67 */
+	0x920E,0x920F,0x9210,0x9211,0x9212,0x9213,0x9214,0x9215,/* 0x68-0x6F */
+	0x9216,0x9217,0x9218,0x9219,0x921A,0x921B,0x921C,0x921D,/* 0x70-0x77 */
+	0x921E,0x921F,0x9220,0x9221,0x9222,0x9223,0x9224,0x0000,/* 0x78-0x7F */
+
+	0x9225,0x9226,0x9227,0x9228,0x9229,0x922A,0x922B,0x922C,/* 0x80-0x87 */
+	0x922D,0x922E,0x922F,0x9230,0x9231,0x9232,0x9233,0x9234,/* 0x88-0x8F */
+	0x9235,0x9236,0x9237,0x9238,0x9239,0x923A,0x923B,0x923C,/* 0x90-0x97 */
+	0x923D,0x923E,0x923F,0x9240,0x9241,0x9242,0x9243,0x9244,/* 0x98-0x9F */
+	0x9245,0x72FB,0x7317,0x7313,0x7321,0x730A,0x731E,0x731D,/* 0xA0-0xA7 */
+	0x7315,0x7322,0x7339,0x7325,0x732C,0x7338,0x7331,0x7350,/* 0xA8-0xAF */
+	0x734D,0x7357,0x7360,0x736C,0x736F,0x737E,0x821B,0x5925,/* 0xB0-0xB7 */
+	0x98E7,0x5924,0x5902,0x9963,0x9967,0x9968,0x9969,0x996A,/* 0xB8-0xBF */
+	0x996B,0x996C,0x9974,0x9977,0x997D,0x9980,0x9984,0x9987,/* 0xC0-0xC7 */
+	0x998A,0x998D,0x9990,0x9991,0x9993,0x9994,0x9995,0x5E80,/* 0xC8-0xCF */
+	0x5E91,0x5E8B,0x5E96,0x5EA5,0x5EA0,0x5EB9,0x5EB5,0x5EBE,/* 0xD0-0xD7 */
+	0x5EB3,0x8D53,0x5ED2,0x5ED1,0x5EDB,0x5EE8,0x5EEA,0x81BA,/* 0xD8-0xDF */
+	0x5FC4,0x5FC9,0x5FD6,0x5FCF,0x6003,0x5FEE,0x6004,0x5FE1,/* 0xE0-0xE7 */
+	0x5FE4,0x5FFE,0x6005,0x6006,0x5FEA,0x5FED,0x5FF8,0x6019,/* 0xE8-0xEF */
+	0x6035,0x6026,0x601B,0x600F,0x600D,0x6029,0x602B,0x600A,/* 0xF0-0xF7 */
+	0x603F,0x6021,0x6078,0x6079,0x607B,0x607A,0x6042,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9246,0x9247,0x9248,0x9249,0x924A,0x924B,0x924C,0x924D,/* 0x40-0x47 */
+	0x924E,0x924F,0x9250,0x9251,0x9252,0x9253,0x9254,0x9255,/* 0x48-0x4F */
+	0x9256,0x9257,0x9258,0x9259,0x925A,0x925B,0x925C,0x925D,/* 0x50-0x57 */
+	0x925E,0x925F,0x9260,0x9261,0x9262,0x9263,0x9264,0x9265,/* 0x58-0x5F */
+	0x9266,0x9267,0x9268,0x9269,0x926A,0x926B,0x926C,0x926D,/* 0x60-0x67 */
+	0x926E,0x926F,0x9270,0x9271,0x9272,0x9273,0x9275,0x9276,/* 0x68-0x6F */
+	0x9277,0x9278,0x9279,0x927A,0x927B,0x927C,0x927D,0x927E,/* 0x70-0x77 */
+	0x927F,0x9280,0x9281,0x9282,0x9283,0x9284,0x9285,0x0000,/* 0x78-0x7F */
+
+	0x9286,0x9287,0x9288,0x9289,0x928A,0x928B,0x928C,0x928D,/* 0x80-0x87 */
+	0x928F,0x9290,0x9291,0x9292,0x9293,0x9294,0x9295,0x9296,/* 0x88-0x8F */
+	0x9297,0x9298,0x9299,0x929A,0x929B,0x929C,0x929D,0x929E,/* 0x90-0x97 */
+	0x929F,0x92A0,0x92A1,0x92A2,0x92A3,0x92A4,0x92A5,0x92A6,/* 0x98-0x9F */
+	0x92A7,0x606A,0x607D,0x6096,0x609A,0x60AD,0x609D,0x6083,/* 0xA0-0xA7 */
+	0x6092,0x608C,0x609B,0x60EC,0x60BB,0x60B1,0x60DD,0x60D8,/* 0xA8-0xAF */
+	0x60C6,0x60DA,0x60B4,0x6120,0x6126,0x6115,0x6123,0x60F4,/* 0xB0-0xB7 */
+	0x6100,0x610E,0x612B,0x614A,0x6175,0x61AC,0x6194,0x61A7,/* 0xB8-0xBF */
+	0x61B7,0x61D4,0x61F5,0x5FDD,0x96B3,0x95E9,0x95EB,0x95F1,/* 0xC0-0xC7 */
+	0x95F3,0x95F5,0x95F6,0x95FC,0x95FE,0x9603,0x9604,0x9606,/* 0xC8-0xCF */
+	0x9608,0x960A,0x960B,0x960C,0x960D,0x960F,0x9612,0x9615,/* 0xD0-0xD7 */
+	0x9616,0x9617,0x9619,0x961A,0x4E2C,0x723F,0x6215,0x6C35,/* 0xD8-0xDF */
+	0x6C54,0x6C5C,0x6C4A,0x6CA3,0x6C85,0x6C90,0x6C94,0x6C8C,/* 0xE0-0xE7 */
+	0x6C68,0x6C69,0x6C74,0x6C76,0x6C86,0x6CA9,0x6CD0,0x6CD4,/* 0xE8-0xEF */
+	0x6CAD,0x6CF7,0x6CF8,0x6CF1,0x6CD7,0x6CB2,0x6CE0,0x6CD6,/* 0xF0-0xF7 */
+	0x6CFA,0x6CEB,0x6CEE,0x6CB1,0x6CD3,0x6CEF,0x6CFE,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x92A8,0x92A9,0x92AA,0x92AB,0x92AC,0x92AD,0x92AF,0x92B0,/* 0x40-0x47 */
+	0x92B1,0x92B2,0x92B3,0x92B4,0x92B5,0x92B6,0x92B7,0x92B8,/* 0x48-0x4F */
+	0x92B9,0x92BA,0x92BB,0x92BC,0x92BD,0x92BE,0x92BF,0x92C0,/* 0x50-0x57 */
+	0x92C1,0x92C2,0x92C3,0x92C4,0x92C5,0x92C6,0x92C7,0x92C9,/* 0x58-0x5F */
+	0x92CA,0x92CB,0x92CC,0x92CD,0x92CE,0x92CF,0x92D0,0x92D1,/* 0x60-0x67 */
+	0x92D2,0x92D3,0x92D4,0x92D5,0x92D6,0x92D7,0x92D8,0x92D9,/* 0x68-0x6F */
+	0x92DA,0x92DB,0x92DC,0x92DD,0x92DE,0x92DF,0x92E0,0x92E1,/* 0x70-0x77 */
+	0x92E2,0x92E3,0x92E4,0x92E5,0x92E6,0x92E7,0x92E8,0x0000,/* 0x78-0x7F */
+
+	0x92E9,0x92EA,0x92EB,0x92EC,0x92ED,0x92EE,0x92EF,0x92F0,/* 0x80-0x87 */
+	0x92F1,0x92F2,0x92F3,0x92F4,0x92F5,0x92F6,0x92F7,0x92F8,/* 0x88-0x8F */
+	0x92F9,0x92FA,0x92FB,0x92FC,0x92FD,0x92FE,0x92FF,0x9300,/* 0x90-0x97 */
+	0x9301,0x9302,0x9303,0x9304,0x9305,0x9306,0x9307,0x9308,/* 0x98-0x9F */
+	0x9309,0x6D39,0x6D27,0x6D0C,0x6D43,0x6D48,0x6D07,0x6D04,/* 0xA0-0xA7 */
+	0x6D19,0x6D0E,0x6D2B,0x6D4D,0x6D2E,0x6D35,0x6D1A,0x6D4F,/* 0xA8-0xAF */
+	0x6D52,0x6D54,0x6D33,0x6D91,0x6D6F,0x6D9E,0x6DA0,0x6D5E,/* 0xB0-0xB7 */
+	0x6D93,0x6D94,0x6D5C,0x6D60,0x6D7C,0x6D63,0x6E1A,0x6DC7,/* 0xB8-0xBF */
+	0x6DC5,0x6DDE,0x6E0E,0x6DBF,0x6DE0,0x6E11,0x6DE6,0x6DDD,/* 0xC0-0xC7 */
+	0x6DD9,0x6E16,0x6DAB,0x6E0C,0x6DAE,0x6E2B,0x6E6E,0x6E4E,/* 0xC8-0xCF */
+	0x6E6B,0x6EB2,0x6E5F,0x6E86,0x6E53,0x6E54,0x6E32,0x6E25,/* 0xD0-0xD7 */
+	0x6E44,0x6EDF,0x6EB1,0x6E98,0x6EE0,0x6F2D,0x6EE2,0x6EA5,/* 0xD8-0xDF */
+	0x6EA7,0x6EBD,0x6EBB,0x6EB7,0x6ED7,0x6EB4,0x6ECF,0x6E8F,/* 0xE0-0xE7 */
+	0x6EC2,0x6E9F,0x6F62,0x6F46,0x6F47,0x6F24,0x6F15,0x6EF9,/* 0xE8-0xEF */
+	0x6F2F,0x6F36,0x6F4B,0x6F74,0x6F2A,0x6F09,0x6F29,0x6F89,/* 0xF0-0xF7 */
+	0x6F8D,0x6F8C,0x6F78,0x6F72,0x6F7C,0x6F7A,0x6FD1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x930A,0x930B,0x930C,0x930D,0x930E,0x930F,0x9310,0x9311,/* 0x40-0x47 */
+	0x9312,0x9313,0x9314,0x9315,0x9316,0x9317,0x9318,0x9319,/* 0x48-0x4F */
+	0x931A,0x931B,0x931C,0x931D,0x931E,0x931F,0x9320,0x9321,/* 0x50-0x57 */
+	0x9322,0x9323,0x9324,0x9325,0x9326,0x9327,0x9328,0x9329,/* 0x58-0x5F */
+	0x932A,0x932B,0x932C,0x932D,0x932E,0x932F,0x9330,0x9331,/* 0x60-0x67 */
+	0x9332,0x9333,0x9334,0x9335,0x9336,0x9337,0x9338,0x9339,/* 0x68-0x6F */
+	0x933A,0x933B,0x933C,0x933D,0x933F,0x9340,0x9341,0x9342,/* 0x70-0x77 */
+	0x9343,0x9344,0x9345,0x9346,0x9347,0x9348,0x9349,0x0000,/* 0x78-0x7F */
+
+	0x934A,0x934B,0x934C,0x934D,0x934E,0x934F,0x9350,0x9351,/* 0x80-0x87 */
+	0x9352,0x9353,0x9354,0x9355,0x9356,0x9357,0x9358,0x9359,/* 0x88-0x8F */
+	0x935A,0x935B,0x935C,0x935D,0x935E,0x935F,0x9360,0x9361,/* 0x90-0x97 */
+	0x9362,0x9363,0x9364,0x9365,0x9366,0x9367,0x9368,0x9369,/* 0x98-0x9F */
+	0x936B,0x6FC9,0x6FA7,0x6FB9,0x6FB6,0x6FC2,0x6FE1,0x6FEE,/* 0xA0-0xA7 */
+	0x6FDE,0x6FE0,0x6FEF,0x701A,0x7023,0x701B,0x7039,0x7035,/* 0xA8-0xAF */
+	0x704F,0x705E,0x5B80,0x5B84,0x5B95,0x5B93,0x5BA5,0x5BB8,/* 0xB0-0xB7 */
+	0x752F,0x9A9E,0x6434,0x5BE4,0x5BEE,0x8930,0x5BF0,0x8E47,/* 0xB8-0xBF */
+	0x8B07,0x8FB6,0x8FD3,0x8FD5,0x8FE5,0x8FEE,0x8FE4,0x8FE9,/* 0xC0-0xC7 */
+	0x8FE6,0x8FF3,0x8FE8,0x9005,0x9004,0x900B,0x9026,0x9011,/* 0xC8-0xCF */
+	0x900D,0x9016,0x9021,0x9035,0x9036,0x902D,0x902F,0x9044,/* 0xD0-0xD7 */
+	0x9051,0x9052,0x9050,0x9068,0x9058,0x9062,0x905B,0x66B9,/* 0xD8-0xDF */
+	0x9074,0x907D,0x9082,0x9088,0x9083,0x908B,0x5F50,0x5F57,/* 0xE0-0xE7 */
+	0x5F56,0x5F58,0x5C3B,0x54AB,0x5C50,0x5C59,0x5B71,0x5C63,/* 0xE8-0xEF */
+	0x5C66,0x7FBC,0x5F2A,0x5F29,0x5F2D,0x8274,0x5F3C,0x9B3B,/* 0xF0-0xF7 */
+	0x5C6E,0x5981,0x5983,0x598D,0x59A9,0x59AA,0x59A3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x936C,0x936D,0x936E,0x936F,0x9370,0x9371,0x9372,0x9373,/* 0x40-0x47 */
+	0x9374,0x9375,0x9376,0x9377,0x9378,0x9379,0x937A,0x937B,/* 0x48-0x4F */
+	0x937C,0x937D,0x937E,0x937F,0x9380,0x9381,0x9382,0x9383,/* 0x50-0x57 */
+	0x9384,0x9385,0x9386,0x9387,0x9388,0x9389,0x938A,0x938B,/* 0x58-0x5F */
+	0x938C,0x938D,0x938E,0x9390,0x9391,0x9392,0x9393,0x9394,/* 0x60-0x67 */
+	0x9395,0x9396,0x9397,0x9398,0x9399,0x939A,0x939B,0x939C,/* 0x68-0x6F */
+	0x939D,0x939E,0x939F,0x93A0,0x93A1,0x93A2,0x93A3,0x93A4,/* 0x70-0x77 */
+	0x93A5,0x93A6,0x93A7,0x93A8,0x93A9,0x93AA,0x93AB,0x0000,/* 0x78-0x7F */
+
+	0x93AC,0x93AD,0x93AE,0x93AF,0x93B0,0x93B1,0x93B2,0x93B3,/* 0x80-0x87 */
+	0x93B4,0x93B5,0x93B6,0x93B7,0x93B8,0x93B9,0x93BA,0x93BB,/* 0x88-0x8F */
+	0x93BC,0x93BD,0x93BE,0x93BF,0x93C0,0x93C1,0x93C2,0x93C3,/* 0x90-0x97 */
+	0x93C4,0x93C5,0x93C6,0x93C7,0x93C8,0x93C9,0x93CB,0x93CC,/* 0x98-0x9F */
+	0x93CD,0x5997,0x59CA,0x59AB,0x599E,0x59A4,0x59D2,0x59B2,/* 0xA0-0xA7 */
+	0x59AF,0x59D7,0x59BE,0x5A05,0x5A06,0x59DD,0x5A08,0x59E3,/* 0xA8-0xAF */
+	0x59D8,0x59F9,0x5A0C,0x5A09,0x5A32,0x5A34,0x5A11,0x5A23,/* 0xB0-0xB7 */
+	0x5A13,0x5A40,0x5A67,0x5A4A,0x5A55,0x5A3C,0x5A62,0x5A75,/* 0xB8-0xBF */
+	0x80EC,0x5AAA,0x5A9B,0x5A77,0x5A7A,0x5ABE,0x5AEB,0x5AB2,/* 0xC0-0xC7 */
+	0x5AD2,0x5AD4,0x5AB8,0x5AE0,0x5AE3,0x5AF1,0x5AD6,0x5AE6,/* 0xC8-0xCF */
+	0x5AD8,0x5ADC,0x5B09,0x5B17,0x5B16,0x5B32,0x5B37,0x5B40,/* 0xD0-0xD7 */
+	0x5C15,0x5C1C,0x5B5A,0x5B65,0x5B73,0x5B51,0x5B53,0x5B62,/* 0xD8-0xDF */
+	0x9A75,0x9A77,0x9A78,0x9A7A,0x9A7F,0x9A7D,0x9A80,0x9A81,/* 0xE0-0xE7 */
+	0x9A85,0x9A88,0x9A8A,0x9A90,0x9A92,0x9A93,0x9A96,0x9A98,/* 0xE8-0xEF */
+	0x9A9B,0x9A9C,0x9A9D,0x9A9F,0x9AA0,0x9AA2,0x9AA3,0x9AA5,/* 0xF0-0xF7 */
+	0x9AA7,0x7E9F,0x7EA1,0x7EA3,0x7EA5,0x7EA8,0x7EA9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x93CE,0x93CF,0x93D0,0x93D1,0x93D2,0x93D3,0x93D4,0x93D5,/* 0x40-0x47 */
+	0x93D7,0x93D8,0x93D9,0x93DA,0x93DB,0x93DC,0x93DD,0x93DE,/* 0x48-0x4F */
+	0x93DF,0x93E0,0x93E1,0x93E2,0x93E3,0x93E4,0x93E5,0x93E6,/* 0x50-0x57 */
+	0x93E7,0x93E8,0x93E9,0x93EA,0x93EB,0x93EC,0x93ED,0x93EE,/* 0x58-0x5F */
+	0x93EF,0x93F0,0x93F1,0x93F2,0x93F3,0x93F4,0x93F5,0x93F6,/* 0x60-0x67 */
+	0x93F7,0x93F8,0x93F9,0x93FA,0x93FB,0x93FC,0x93FD,0x93FE,/* 0x68-0x6F */
+	0x93FF,0x9400,0x9401,0x9402,0x9403,0x9404,0x9405,0x9406,/* 0x70-0x77 */
+	0x9407,0x9408,0x9409,0x940A,0x940B,0x940C,0x940D,0x0000,/* 0x78-0x7F */
+
+	0x940E,0x940F,0x9410,0x9411,0x9412,0x9413,0x9414,0x9415,/* 0x80-0x87 */
+	0x9416,0x9417,0x9418,0x9419,0x941A,0x941B,0x941C,0x941D,/* 0x88-0x8F */
+	0x941E,0x941F,0x9420,0x9421,0x9422,0x9423,0x9424,0x9425,/* 0x90-0x97 */
+	0x9426,0x9427,0x9428,0x9429,0x942A,0x942B,0x942C,0x942D,/* 0x98-0x9F */
+	0x942E,0x7EAD,0x7EB0,0x7EBE,0x7EC0,0x7EC1,0x7EC2,0x7EC9,/* 0xA0-0xA7 */
+	0x7ECB,0x7ECC,0x7ED0,0x7ED4,0x7ED7,0x7EDB,0x7EE0,0x7EE1,/* 0xA8-0xAF */
+	0x7EE8,0x7EEB,0x7EEE,0x7EEF,0x7EF1,0x7EF2,0x7F0D,0x7EF6,/* 0xB0-0xB7 */
+	0x7EFA,0x7EFB,0x7EFE,0x7F01,0x7F02,0x7F03,0x7F07,0x7F08,/* 0xB8-0xBF */
+	0x7F0B,0x7F0C,0x7F0F,0x7F11,0x7F12,0x7F17,0x7F19,0x7F1C,/* 0xC0-0xC7 */
+	0x7F1B,0x7F1F,0x7F21,0x7F22,0x7F23,0x7F24,0x7F25,0x7F26,/* 0xC8-0xCF */
+	0x7F27,0x7F2A,0x7F2B,0x7F2C,0x7F2D,0x7F2F,0x7F30,0x7F31,/* 0xD0-0xD7 */
+	0x7F32,0x7F33,0x7F35,0x5E7A,0x757F,0x5DDB,0x753E,0x9095,/* 0xD8-0xDF */
+	0x738E,0x7391,0x73AE,0x73A2,0x739F,0x73CF,0x73C2,0x73D1,/* 0xE0-0xE7 */
+	0x73B7,0x73B3,0x73C0,0x73C9,0x73C8,0x73E5,0x73D9,0x987C,/* 0xE8-0xEF */
+	0x740A,0x73E9,0x73E7,0x73DE,0x73BA,0x73F2,0x740F,0x742A,/* 0xF0-0xF7 */
+	0x745B,0x7426,0x7425,0x7428,0x7430,0x742E,0x742C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x942F,0x9430,0x9431,0x9432,0x9433,0x9434,0x9435,0x9436,/* 0x40-0x47 */
+	0x9437,0x9438,0x9439,0x943A,0x943B,0x943C,0x943D,0x943F,/* 0x48-0x4F */
+	0x9440,0x9441,0x9442,0x9443,0x9444,0x9445,0x9446,0x9447,/* 0x50-0x57 */
+	0x9448,0x9449,0x944A,0x944B,0x944C,0x944D,0x944E,0x944F,/* 0x58-0x5F */
+	0x9450,0x9451,0x9452,0x9453,0x9454,0x9455,0x9456,0x9457,/* 0x60-0x67 */
+	0x9458,0x9459,0x945A,0x945B,0x945C,0x945D,0x945E,0x945F,/* 0x68-0x6F */
+	0x9460,0x9461,0x9462,0x9463,0x9464,0x9465,0x9466,0x9467,/* 0x70-0x77 */
+	0x9468,0x9469,0x946A,0x946C,0x946D,0x946E,0x946F,0x0000,/* 0x78-0x7F */
+
+	0x9470,0x9471,0x9472,0x9473,0x9474,0x9475,0x9476,0x9477,/* 0x80-0x87 */
+	0x9478,0x9479,0x947A,0x947B,0x947C,0x947D,0x947E,0x947F,/* 0x88-0x8F */
+	0x9480,0x9481,0x9482,0x9483,0x9484,0x9491,0x9496,0x9498,/* 0x90-0x97 */
+	0x94C7,0x94CF,0x94D3,0x94D4,0x94DA,0x94E6,0x94FB,0x951C,/* 0x98-0x9F */
+	0x9520,0x741B,0x741A,0x7441,0x745C,0x7457,0x7455,0x7459,/* 0xA0-0xA7 */
+	0x7477,0x746D,0x747E,0x749C,0x748E,0x7480,0x7481,0x7487,/* 0xA8-0xAF */
+	0x748B,0x749E,0x74A8,0x74A9,0x7490,0x74A7,0x74D2,0x74BA,/* 0xB0-0xB7 */
+	0x97EA,0x97EB,0x97EC,0x674C,0x6753,0x675E,0x6748,0x6769,/* 0xB8-0xBF */
+	0x67A5,0x6787,0x676A,0x6773,0x6798,0x67A7,0x6775,0x67A8,/* 0xC0-0xC7 */
+	0x679E,0x67AD,0x678B,0x6777,0x677C,0x67F0,0x6809,0x67D8,/* 0xC8-0xCF */
+	0x680A,0x67E9,0x67B0,0x680C,0x67D9,0x67B5,0x67DA,0x67B3,/* 0xD0-0xD7 */
+	0x67DD,0x6800,0x67C3,0x67B8,0x67E2,0x680E,0x67C1,0x67FD,/* 0xD8-0xDF */
+	0x6832,0x6833,0x6860,0x6861,0x684E,0x6862,0x6844,0x6864,/* 0xE0-0xE7 */
+	0x6883,0x681D,0x6855,0x6866,0x6841,0x6867,0x6840,0x683E,/* 0xE8-0xEF */
+	0x684A,0x6849,0x6829,0x68B5,0x688F,0x6874,0x6877,0x6893,/* 0xF0-0xF7 */
+	0x686B,0x68C2,0x696E,0x68FC,0x691F,0x6920,0x68F9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9527,0x9533,0x953D,0x9543,0x9548,0x954B,0x9555,0x955A,/* 0x40-0x47 */
+	0x9560,0x956E,0x9574,0x9575,0x9577,0x9578,0x9579,0x957A,/* 0x48-0x4F */
+	0x957B,0x957C,0x957D,0x957E,0x9580,0x9581,0x9582,0x9583,/* 0x50-0x57 */
+	0x9584,0x9585,0x9586,0x9587,0x9588,0x9589,0x958A,0x958B,/* 0x58-0x5F */
+	0x958C,0x958D,0x958E,0x958F,0x9590,0x9591,0x9592,0x9593,/* 0x60-0x67 */
+	0x9594,0x9595,0x9596,0x9597,0x9598,0x9599,0x959A,0x959B,/* 0x68-0x6F */
+	0x959C,0x959D,0x959E,0x959F,0x95A0,0x95A1,0x95A2,0x95A3,/* 0x70-0x77 */
+	0x95A4,0x95A5,0x95A6,0x95A7,0x95A8,0x95A9,0x95AA,0x0000,/* 0x78-0x7F */
+
+	0x95AB,0x95AC,0x95AD,0x95AE,0x95AF,0x95B0,0x95B1,0x95B2,/* 0x80-0x87 */
+	0x95B3,0x95B4,0x95B5,0x95B6,0x95B7,0x95B8,0x95B9,0x95BA,/* 0x88-0x8F */
+	0x95BB,0x95BC,0x95BD,0x95BE,0x95BF,0x95C0,0x95C1,0x95C2,/* 0x90-0x97 */
+	0x95C3,0x95C4,0x95C5,0x95C6,0x95C7,0x95C8,0x95C9,0x95CA,/* 0x98-0x9F */
+	0x95CB,0x6924,0x68F0,0x690B,0x6901,0x6957,0x68E3,0x6910,/* 0xA0-0xA7 */
+	0x6971,0x6939,0x6960,0x6942,0x695D,0x6984,0x696B,0x6980,/* 0xA8-0xAF */
+	0x6998,0x6978,0x6934,0x69CC,0x6987,0x6988,0x69CE,0x6989,/* 0xB0-0xB7 */
+	0x6966,0x6963,0x6979,0x699B,0x69A7,0x69BB,0x69AB,0x69AD,/* 0xB8-0xBF */
+	0x69D4,0x69B1,0x69C1,0x69CA,0x69DF,0x6995,0x69E0,0x698D,/* 0xC0-0xC7 */
+	0x69FF,0x6A2F,0x69ED,0x6A17,0x6A18,0x6A65,0x69F2,0x6A44,/* 0xC8-0xCF */
+	0x6A3E,0x6AA0,0x6A50,0x6A5B,0x6A35,0x6A8E,0x6A79,0x6A3D,/* 0xD0-0xD7 */
+	0x6A28,0x6A58,0x6A7C,0x6A91,0x6A90,0x6AA9,0x6A97,0x6AAB,/* 0xD8-0xDF */
+	0x7337,0x7352,0x6B81,0x6B82,0x6B87,0x6B84,0x6B92,0x6B93,/* 0xE0-0xE7 */
+	0x6B8D,0x6B9A,0x6B9B,0x6BA1,0x6BAA,0x8F6B,0x8F6D,0x8F71,/* 0xE8-0xEF */
+	0x8F72,0x8F73,0x8F75,0x8F76,0x8F78,0x8F77,0x8F79,0x8F7A,/* 0xF0-0xF7 */
+	0x8F7C,0x8F7E,0x8F81,0x8F82,0x8F84,0x8F87,0x8F8B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x95CC,0x95CD,0x95CE,0x95CF,0x95D0,0x95D1,0x95D2,0x95D3,/* 0x40-0x47 */
+	0x95D4,0x95D5,0x95D6,0x95D7,0x95D8,0x95D9,0x95DA,0x95DB,/* 0x48-0x4F */
+	0x95DC,0x95DD,0x95DE,0x95DF,0x95E0,0x95E1,0x95E2,0x95E3,/* 0x50-0x57 */
+	0x95E4,0x95E5,0x95E6,0x95E7,0x95EC,0x95FF,0x9607,0x9613,/* 0x58-0x5F */
+	0x9618,0x961B,0x961E,0x9620,0x9623,0x9624,0x9625,0x9626,/* 0x60-0x67 */
+	0x9627,0x9628,0x9629,0x962B,0x962C,0x962D,0x962F,0x9630,/* 0x68-0x6F */
+	0x9637,0x9638,0x9639,0x963A,0x963E,0x9641,0x9643,0x964A,/* 0x70-0x77 */
+	0x964E,0x964F,0x9651,0x9652,0x9653,0x9656,0x9657,0x0000,/* 0x78-0x7F */
+
+	0x9658,0x9659,0x965A,0x965C,0x965D,0x965E,0x9660,0x9663,/* 0x80-0x87 */
+	0x9665,0x9666,0x966B,0x966D,0x966E,0x966F,0x9670,0x9671,/* 0x88-0x8F */
+	0x9673,0x9678,0x9679,0x967A,0x967B,0x967C,0x967D,0x967E,/* 0x90-0x97 */
+	0x967F,0x9680,0x9681,0x9682,0x9683,0x9684,0x9687,0x9689,/* 0x98-0x9F */
+	0x968A,0x8F8D,0x8F8E,0x8F8F,0x8F98,0x8F9A,0x8ECE,0x620B,/* 0xA0-0xA7 */
+	0x6217,0x621B,0x621F,0x6222,0x6221,0x6225,0x6224,0x622C,/* 0xA8-0xAF */
+	0x81E7,0x74EF,0x74F4,0x74FF,0x750F,0x7511,0x7513,0x6534,/* 0xB0-0xB7 */
+	0x65EE,0x65EF,0x65F0,0x660A,0x6619,0x6772,0x6603,0x6615,/* 0xB8-0xBF */
+	0x6600,0x7085,0x66F7,0x661D,0x6634,0x6631,0x6636,0x6635,/* 0xC0-0xC7 */
+	0x8006,0x665F,0x6654,0x6641,0x664F,0x6656,0x6661,0x6657,/* 0xC8-0xCF */
+	0x6677,0x6684,0x668C,0x66A7,0x669D,0x66BE,0x66DB,0x66DC,/* 0xD0-0xD7 */
+	0x66E6,0x66E9,0x8D32,0x8D33,0x8D36,0x8D3B,0x8D3D,0x8D40,/* 0xD8-0xDF */
+	0x8D45,0x8D46,0x8D48,0x8D49,0x8D47,0x8D4D,0x8D55,0x8D59,/* 0xE0-0xE7 */
+	0x89C7,0x89CA,0x89CB,0x89CC,0x89CE,0x89CF,0x89D0,0x89D1,/* 0xE8-0xEF */
+	0x726E,0x729F,0x725D,0x7266,0x726F,0x727E,0x727F,0x7284,/* 0xF0-0xF7 */
+	0x728B,0x728D,0x728F,0x7292,0x6308,0x6332,0x63B0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x968C,0x968E,0x9691,0x9692,0x9693,0x9695,0x9696,0x969A,/* 0x40-0x47 */
+	0x969B,0x969D,0x969E,0x969F,0x96A0,0x96A1,0x96A2,0x96A3,/* 0x48-0x4F */
+	0x96A4,0x96A5,0x96A6,0x96A8,0x96A9,0x96AA,0x96AB,0x96AC,/* 0x50-0x57 */
+	0x96AD,0x96AE,0x96AF,0x96B1,0x96B2,0x96B4,0x96B5,0x96B7,/* 0x58-0x5F */
+	0x96B8,0x96BA,0x96BB,0x96BF,0x96C2,0x96C3,0x96C8,0x96CA,/* 0x60-0x67 */
+	0x96CB,0x96D0,0x96D1,0x96D3,0x96D4,0x96D6,0x96D7,0x96D8,/* 0x68-0x6F */
+	0x96D9,0x96DA,0x96DB,0x96DC,0x96DD,0x96DE,0x96DF,0x96E1,/* 0x70-0x77 */
+	0x96E2,0x96E3,0x96E4,0x96E5,0x96E6,0x96E7,0x96EB,0x0000,/* 0x78-0x7F */
+
+	0x96EC,0x96ED,0x96EE,0x96F0,0x96F1,0x96F2,0x96F4,0x96F5,/* 0x80-0x87 */
+	0x96F8,0x96FA,0x96FB,0x96FC,0x96FD,0x96FF,0x9702,0x9703,/* 0x88-0x8F */
+	0x9705,0x970A,0x970B,0x970C,0x9710,0x9711,0x9712,0x9714,/* 0x90-0x97 */
+	0x9715,0x9717,0x9718,0x9719,0x971A,0x971B,0x971D,0x971F,/* 0x98-0x9F */
+	0x9720,0x643F,0x64D8,0x8004,0x6BEA,0x6BF3,0x6BFD,0x6BF5,/* 0xA0-0xA7 */
+	0x6BF9,0x6C05,0x6C07,0x6C06,0x6C0D,0x6C15,0x6C18,0x6C19,/* 0xA8-0xAF */
+	0x6C1A,0x6C21,0x6C29,0x6C24,0x6C2A,0x6C32,0x6535,0x6555,/* 0xB0-0xB7 */
+	0x656B,0x724D,0x7252,0x7256,0x7230,0x8662,0x5216,0x809F,/* 0xB8-0xBF */
+	0x809C,0x8093,0x80BC,0x670A,0x80BD,0x80B1,0x80AB,0x80AD,/* 0xC0-0xC7 */
+	0x80B4,0x80B7,0x80E7,0x80E8,0x80E9,0x80EA,0x80DB,0x80C2,/* 0xC8-0xCF */
+	0x80C4,0x80D9,0x80CD,0x80D7,0x6710,0x80DD,0x80EB,0x80F1,/* 0xD0-0xD7 */
+	0x80F4,0x80ED,0x810D,0x810E,0x80F2,0x80FC,0x6715,0x8112,/* 0xD8-0xDF */
+	0x8C5A,0x8136,0x811E,0x812C,0x8118,0x8132,0x8148,0x814C,/* 0xE0-0xE7 */
+	0x8153,0x8174,0x8159,0x815A,0x8171,0x8160,0x8169,0x817C,/* 0xE8-0xEF */
+	0x817D,0x816D,0x8167,0x584D,0x5AB5,0x8188,0x8182,0x8191,/* 0xF0-0xF7 */
+	0x6ED5,0x81A3,0x81AA,0x81CC,0x6726,0x81CA,0x81BB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9721,0x9722,0x9723,0x9724,0x9725,0x9726,0x9727,0x9728,/* 0x40-0x47 */
+	0x9729,0x972B,0x972C,0x972E,0x972F,0x9731,0x9733,0x9734,/* 0x48-0x4F */
+	0x9735,0x9736,0x9737,0x973A,0x973B,0x973C,0x973D,0x973F,/* 0x50-0x57 */
+	0x9740,0x9741,0x9742,0x9743,0x9744,0x9745,0x9746,0x9747,/* 0x58-0x5F */
+	0x9748,0x9749,0x974A,0x974B,0x974C,0x974D,0x974E,0x974F,/* 0x60-0x67 */
+	0x9750,0x9751,0x9754,0x9755,0x9757,0x9758,0x975A,0x975C,/* 0x68-0x6F */
+	0x975D,0x975F,0x9763,0x9764,0x9766,0x9767,0x9768,0x976A,/* 0x70-0x77 */
+	0x976B,0x976C,0x976D,0x976E,0x976F,0x9770,0x9771,0x0000,/* 0x78-0x7F */
+
+	0x9772,0x9775,0x9777,0x9778,0x9779,0x977A,0x977B,0x977D,/* 0x80-0x87 */
+	0x977E,0x977F,0x9780,0x9781,0x9782,0x9783,0x9784,0x9786,/* 0x88-0x8F */
+	0x9787,0x9788,0x9789,0x978A,0x978C,0x978E,0x978F,0x9790,/* 0x90-0x97 */
+	0x9793,0x9795,0x9796,0x9797,0x9799,0x979A,0x979B,0x979C,/* 0x98-0x9F */
+	0x979D,0x81C1,0x81A6,0x6B24,0x6B37,0x6B39,0x6B43,0x6B46,/* 0xA0-0xA7 */
+	0x6B59,0x98D1,0x98D2,0x98D3,0x98D5,0x98D9,0x98DA,0x6BB3,/* 0xA8-0xAF */
+	0x5F40,0x6BC2,0x89F3,0x6590,0x9F51,0x6593,0x65BC,0x65C6,/* 0xB0-0xB7 */
+	0x65C4,0x65C3,0x65CC,0x65CE,0x65D2,0x65D6,0x7080,0x709C,/* 0xB8-0xBF */
+	0x7096,0x709D,0x70BB,0x70C0,0x70B7,0x70AB,0x70B1,0x70E8,/* 0xC0-0xC7 */
+	0x70CA,0x7110,0x7113,0x7116,0x712F,0x7131,0x7173,0x715C,/* 0xC8-0xCF */
+	0x7168,0x7145,0x7172,0x714A,0x7178,0x717A,0x7198,0x71B3,/* 0xD0-0xD7 */
+	0x71B5,0x71A8,0x71A0,0x71E0,0x71D4,0x71E7,0x71F9,0x721D,/* 0xD8-0xDF */
+	0x7228,0x706C,0x7118,0x7166,0x71B9,0x623E,0x623D,0x6243,/* 0xE0-0xE7 */
+	0x6248,0x6249,0x793B,0x7940,0x7946,0x7949,0x795B,0x795C,/* 0xE8-0xEF */
+	0x7953,0x795A,0x7962,0x7957,0x7960,0x796F,0x7967,0x797A,/* 0xF0-0xF7 */
+	0x7985,0x798A,0x799A,0x79A7,0x79B3,0x5FD1,0x5FD0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_ED[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x979E,0x979F,0x97A1,0x97A2,0x97A4,0x97A5,0x97A6,0x97A7,/* 0x40-0x47 */
+	0x97A8,0x97A9,0x97AA,0x97AC,0x97AE,0x97B0,0x97B1,0x97B3,/* 0x48-0x4F */
+	0x97B5,0x97B6,0x97B7,0x97B8,0x97B9,0x97BA,0x97BB,0x97BC,/* 0x50-0x57 */
+	0x97BD,0x97BE,0x97BF,0x97C0,0x97C1,0x97C2,0x97C3,0x97C4,/* 0x58-0x5F */
+	0x97C5,0x97C6,0x97C7,0x97C8,0x97C9,0x97CA,0x97CB,0x97CC,/* 0x60-0x67 */
+	0x97CD,0x97CE,0x97CF,0x97D0,0x97D1,0x97D2,0x97D3,0x97D4,/* 0x68-0x6F */
+	0x97D5,0x97D6,0x97D7,0x97D8,0x97D9,0x97DA,0x97DB,0x97DC,/* 0x70-0x77 */
+	0x97DD,0x97DE,0x97DF,0x97E0,0x97E1,0x97E2,0x97E3,0x0000,/* 0x78-0x7F */
+
+	0x97E4,0x97E5,0x97E8,0x97EE,0x97EF,0x97F0,0x97F1,0x97F2,/* 0x80-0x87 */
+	0x97F4,0x97F7,0x97F8,0x97F9,0x97FA,0x97FB,0x97FC,0x97FD,/* 0x88-0x8F */
+	0x97FE,0x97FF,0x9800,0x9801,0x9802,0x9803,0x9804,0x9805,/* 0x90-0x97 */
+	0x9806,0x9807,0x9808,0x9809,0x980A,0x980B,0x980C,0x980D,/* 0x98-0x9F */
+	0x980E,0x603C,0x605D,0x605A,0x6067,0x6041,0x6059,0x6063,/* 0xA0-0xA7 */
+	0x60AB,0x6106,0x610D,0x615D,0x61A9,0x619D,0x61CB,0x61D1,/* 0xA8-0xAF */
+	0x6206,0x8080,0x807F,0x6C93,0x6CF6,0x6DFC,0x77F6,0x77F8,/* 0xB0-0xB7 */
+	0x7800,0x7809,0x7817,0x7818,0x7811,0x65AB,0x782D,0x781C,/* 0xB8-0xBF */
+	0x781D,0x7839,0x783A,0x783B,0x781F,0x783C,0x7825,0x782C,/* 0xC0-0xC7 */
+	0x7823,0x7829,0x784E,0x786D,0x7856,0x7857,0x7826,0x7850,/* 0xC8-0xCF */
+	0x7847,0x784C,0x786A,0x789B,0x7893,0x789A,0x7887,0x789C,/* 0xD0-0xD7 */
+	0x78A1,0x78A3,0x78B2,0x78B9,0x78A5,0x78D4,0x78D9,0x78C9,/* 0xD8-0xDF */
+	0x78EC,0x78F2,0x7905,0x78F4,0x7913,0x7924,0x791E,0x7934,/* 0xE0-0xE7 */
+	0x9F9B,0x9EF9,0x9EFB,0x9EFC,0x76F1,0x7704,0x770D,0x76F9,/* 0xE8-0xEF */
+	0x7707,0x7708,0x771A,0x7722,0x7719,0x772D,0x7726,0x7735,/* 0xF0-0xF7 */
+	0x7738,0x7750,0x7751,0x7747,0x7743,0x775A,0x7768,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x980F,0x9810,0x9811,0x9812,0x9813,0x9814,0x9815,0x9816,/* 0x40-0x47 */
+	0x9817,0x9818,0x9819,0x981A,0x981B,0x981C,0x981D,0x981E,/* 0x48-0x4F */
+	0x981F,0x9820,0x9821,0x9822,0x9823,0x9824,0x9825,0x9826,/* 0x50-0x57 */
+	0x9827,0x9828,0x9829,0x982A,0x982B,0x982C,0x982D,0x982E,/* 0x58-0x5F */
+	0x982F,0x9830,0x9831,0x9832,0x9833,0x9834,0x9835,0x9836,/* 0x60-0x67 */
+	0x9837,0x9838,0x9839,0x983A,0x983B,0x983C,0x983D,0x983E,/* 0x68-0x6F */
+	0x983F,0x9840,0x9841,0x9842,0x9843,0x9844,0x9845,0x9846,/* 0x70-0x77 */
+	0x9847,0x9848,0x9849,0x984A,0x984B,0x984C,0x984D,0x0000,/* 0x78-0x7F */
+
+	0x984E,0x984F,0x9850,0x9851,0x9852,0x9853,0x9854,0x9855,/* 0x80-0x87 */
+	0x9856,0x9857,0x9858,0x9859,0x985A,0x985B,0x985C,0x985D,/* 0x88-0x8F */
+	0x985E,0x985F,0x9860,0x9861,0x9862,0x9863,0x9864,0x9865,/* 0x90-0x97 */
+	0x9866,0x9867,0x9868,0x9869,0x986A,0x986B,0x986C,0x986D,/* 0x98-0x9F */
+	0x986E,0x7762,0x7765,0x777F,0x778D,0x777D,0x7780,0x778C,/* 0xA0-0xA7 */
+	0x7791,0x779F,0x77A0,0x77B0,0x77B5,0x77BD,0x753A,0x7540,/* 0xA8-0xAF */
+	0x754E,0x754B,0x7548,0x755B,0x7572,0x7579,0x7583,0x7F58,/* 0xB0-0xB7 */
+	0x7F61,0x7F5F,0x8A48,0x7F68,0x7F74,0x7F71,0x7F79,0x7F81,/* 0xB8-0xBF */
+	0x7F7E,0x76CD,0x76E5,0x8832,0x9485,0x9486,0x9487,0x948B,/* 0xC0-0xC7 */
+	0x948A,0x948C,0x948D,0x948F,0x9490,0x9494,0x9497,0x9495,/* 0xC8-0xCF */
+	0x949A,0x949B,0x949C,0x94A3,0x94A4,0x94AB,0x94AA,0x94AD,/* 0xD0-0xD7 */
+	0x94AC,0x94AF,0x94B0,0x94B2,0x94B4,0x94B6,0x94B7,0x94B8,/* 0xD8-0xDF */
+	0x94B9,0x94BA,0x94BC,0x94BD,0x94BF,0x94C4,0x94C8,0x94C9,/* 0xE0-0xE7 */
+	0x94CA,0x94CB,0x94CC,0x94CD,0x94CE,0x94D0,0x94D1,0x94D2,/* 0xE8-0xEF */
+	0x94D5,0x94D6,0x94D7,0x94D9,0x94D8,0x94DB,0x94DE,0x94DF,/* 0xF0-0xF7 */
+	0x94E0,0x94E2,0x94E4,0x94E5,0x94E7,0x94E8,0x94EA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x986F,0x9870,0x9871,0x9872,0x9873,0x9874,0x988B,0x988E,/* 0x40-0x47 */
+	0x9892,0x9895,0x9899,0x98A3,0x98A8,0x98A9,0x98AA,0x98AB,/* 0x48-0x4F */
+	0x98AC,0x98AD,0x98AE,0x98AF,0x98B0,0x98B1,0x98B2,0x98B3,/* 0x50-0x57 */
+	0x98B4,0x98B5,0x98B6,0x98B7,0x98B8,0x98B9,0x98BA,0x98BB,/* 0x58-0x5F */
+	0x98BC,0x98BD,0x98BE,0x98BF,0x98C0,0x98C1,0x98C2,0x98C3,/* 0x60-0x67 */
+	0x98C4,0x98C5,0x98C6,0x98C7,0x98C8,0x98C9,0x98CA,0x98CB,/* 0x68-0x6F */
+	0x98CC,0x98CD,0x98CF,0x98D0,0x98D4,0x98D6,0x98D7,0x98DB,/* 0x70-0x77 */
+	0x98DC,0x98DD,0x98E0,0x98E1,0x98E2,0x98E3,0x98E4,0x0000,/* 0x78-0x7F */
+
+	0x98E5,0x98E6,0x98E9,0x98EA,0x98EB,0x98EC,0x98ED,0x98EE,/* 0x80-0x87 */
+	0x98EF,0x98F0,0x98F1,0x98F2,0x98F3,0x98F4,0x98F5,0x98F6,/* 0x88-0x8F */
+	0x98F7,0x98F8,0x98F9,0x98FA,0x98FB,0x98FC,0x98FD,0x98FE,/* 0x90-0x97 */
+	0x98FF,0x9900,0x9901,0x9902,0x9903,0x9904,0x9905,0x9906,/* 0x98-0x9F */
+	0x9907,0x94E9,0x94EB,0x94EE,0x94EF,0x94F3,0x94F4,0x94F5,/* 0xA0-0xA7 */
+	0x94F7,0x94F9,0x94FC,0x94FD,0x94FF,0x9503,0x9502,0x9506,/* 0xA8-0xAF */
+	0x9507,0x9509,0x950A,0x950D,0x950E,0x950F,0x9512,0x9513,/* 0xB0-0xB7 */
+	0x9514,0x9515,0x9516,0x9518,0x951B,0x951D,0x951E,0x951F,/* 0xB8-0xBF */
+	0x9522,0x952A,0x952B,0x9529,0x952C,0x9531,0x9532,0x9534,/* 0xC0-0xC7 */
+	0x9536,0x9537,0x9538,0x953C,0x953E,0x953F,0x9542,0x9535,/* 0xC8-0xCF */
+	0x9544,0x9545,0x9546,0x9549,0x954C,0x954E,0x954F,0x9552,/* 0xD0-0xD7 */
+	0x9553,0x9554,0x9556,0x9557,0x9558,0x9559,0x955B,0x955E,/* 0xD8-0xDF */
+	0x955F,0x955D,0x9561,0x9562,0x9564,0x9565,0x9566,0x9567,/* 0xE0-0xE7 */
+	0x9568,0x9569,0x956A,0x956B,0x956C,0x956F,0x9571,0x9572,/* 0xE8-0xEF */
+	0x9573,0x953A,0x77E7,0x77EC,0x96C9,0x79D5,0x79ED,0x79E3,/* 0xF0-0xF7 */
+	0x79EB,0x7A06,0x5D47,0x7A03,0x7A02,0x7A1E,0x7A14,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9908,0x9909,0x990A,0x990B,0x990C,0x990E,0x990F,0x9911,/* 0x40-0x47 */
+	0x9912,0x9913,0x9914,0x9915,0x9916,0x9917,0x9918,0x9919,/* 0x48-0x4F */
+	0x991A,0x991B,0x991C,0x991D,0x991E,0x991F,0x9920,0x9921,/* 0x50-0x57 */
+	0x9922,0x9923,0x9924,0x9925,0x9926,0x9927,0x9928,0x9929,/* 0x58-0x5F */
+	0x992A,0x992B,0x992C,0x992D,0x992F,0x9930,0x9931,0x9932,/* 0x60-0x67 */
+	0x9933,0x9934,0x9935,0x9936,0x9937,0x9938,0x9939,0x993A,/* 0x68-0x6F */
+	0x993B,0x993C,0x993D,0x993E,0x993F,0x9940,0x9941,0x9942,/* 0x70-0x77 */
+	0x9943,0x9944,0x9945,0x9946,0x9947,0x9948,0x9949,0x0000,/* 0x78-0x7F */
+
+	0x994A,0x994B,0x994C,0x994D,0x994E,0x994F,0x9950,0x9951,/* 0x80-0x87 */
+	0x9952,0x9953,0x9956,0x9957,0x9958,0x9959,0x995A,0x995B,/* 0x88-0x8F */
+	0x995C,0x995D,0x995E,0x995F,0x9960,0x9961,0x9962,0x9964,/* 0x90-0x97 */
+	0x9966,0x9973,0x9978,0x9979,0x997B,0x997E,0x9982,0x9983,/* 0x98-0x9F */
+	0x9989,0x7A39,0x7A37,0x7A51,0x9ECF,0x99A5,0x7A70,0x7688,/* 0xA0-0xA7 */
+	0x768E,0x7693,0x7699,0x76A4,0x74DE,0x74E0,0x752C,0x9E20,/* 0xA8-0xAF */
+	0x9E22,0x9E28,0x9E29,0x9E2A,0x9E2B,0x9E2C,0x9E32,0x9E31,/* 0xB0-0xB7 */
+	0x9E36,0x9E38,0x9E37,0x9E39,0x9E3A,0x9E3E,0x9E41,0x9E42,/* 0xB8-0xBF */
+	0x9E44,0x9E46,0x9E47,0x9E48,0x9E49,0x9E4B,0x9E4C,0x9E4E,/* 0xC0-0xC7 */
+	0x9E51,0x9E55,0x9E57,0x9E5A,0x9E5B,0x9E5C,0x9E5E,0x9E63,/* 0xC8-0xCF */
+	0x9E66,0x9E67,0x9E68,0x9E69,0x9E6A,0x9E6B,0x9E6C,0x9E71,/* 0xD0-0xD7 */
+	0x9E6D,0x9E73,0x7592,0x7594,0x7596,0x75A0,0x759D,0x75AC,/* 0xD8-0xDF */
+	0x75A3,0x75B3,0x75B4,0x75B8,0x75C4,0x75B1,0x75B0,0x75C3,/* 0xE0-0xE7 */
+	0x75C2,0x75D6,0x75CD,0x75E3,0x75E8,0x75E6,0x75E4,0x75EB,/* 0xE8-0xEF */
+	0x75E7,0x7603,0x75F1,0x75FC,0x75FF,0x7610,0x7600,0x7605,/* 0xF0-0xF7 */
+	0x760C,0x7617,0x760A,0x7625,0x7618,0x7615,0x7619,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x998C,0x998E,0x999A,0x999B,0x999C,0x999D,0x999E,0x999F,/* 0x40-0x47 */
+	0x99A0,0x99A1,0x99A2,0x99A3,0x99A4,0x99A6,0x99A7,0x99A9,/* 0x48-0x4F */
+	0x99AA,0x99AB,0x99AC,0x99AD,0x99AE,0x99AF,0x99B0,0x99B1,/* 0x50-0x57 */
+	0x99B2,0x99B3,0x99B4,0x99B5,0x99B6,0x99B7,0x99B8,0x99B9,/* 0x58-0x5F */
+	0x99BA,0x99BB,0x99BC,0x99BD,0x99BE,0x99BF,0x99C0,0x99C1,/* 0x60-0x67 */
+	0x99C2,0x99C3,0x99C4,0x99C5,0x99C6,0x99C7,0x99C8,0x99C9,/* 0x68-0x6F */
+	0x99CA,0x99CB,0x99CC,0x99CD,0x99CE,0x99CF,0x99D0,0x99D1,/* 0x70-0x77 */
+	0x99D2,0x99D3,0x99D4,0x99D5,0x99D6,0x99D7,0x99D8,0x0000,/* 0x78-0x7F */
+
+	0x99D9,0x99DA,0x99DB,0x99DC,0x99DD,0x99DE,0x99DF,0x99E0,/* 0x80-0x87 */
+	0x99E1,0x99E2,0x99E3,0x99E4,0x99E5,0x99E6,0x99E7,0x99E8,/* 0x88-0x8F */
+	0x99E9,0x99EA,0x99EB,0x99EC,0x99ED,0x99EE,0x99EF,0x99F0,/* 0x90-0x97 */
+	0x99F1,0x99F2,0x99F3,0x99F4,0x99F5,0x99F6,0x99F7,0x99F8,/* 0x98-0x9F */
+	0x99F9,0x761B,0x763C,0x7622,0x7620,0x7640,0x762D,0x7630,/* 0xA0-0xA7 */
+	0x763F,0x7635,0x7643,0x763E,0x7633,0x764D,0x765E,0x7654,/* 0xA8-0xAF */
+	0x765C,0x7656,0x766B,0x766F,0x7FCA,0x7AE6,0x7A78,0x7A79,/* 0xB0-0xB7 */
+	0x7A80,0x7A86,0x7A88,0x7A95,0x7AA6,0x7AA0,0x7AAC,0x7AA8,/* 0xB8-0xBF */
+	0x7AAD,0x7AB3,0x8864,0x8869,0x8872,0x887D,0x887F,0x8882,/* 0xC0-0xC7 */
+	0x88A2,0x88C6,0x88B7,0x88BC,0x88C9,0x88E2,0x88CE,0x88E3,/* 0xC8-0xCF */
+	0x88E5,0x88F1,0x891A,0x88FC,0x88E8,0x88FE,0x88F0,0x8921,/* 0xD0-0xD7 */
+	0x8919,0x8913,0x891B,0x890A,0x8934,0x892B,0x8936,0x8941,/* 0xD8-0xDF */
+	0x8966,0x897B,0x758B,0x80E5,0x76B2,0x76B4,0x77DC,0x8012,/* 0xE0-0xE7 */
+	0x8014,0x8016,0x801C,0x8020,0x8022,0x8025,0x8026,0x8027,/* 0xE8-0xEF */
+	0x8029,0x8028,0x8031,0x800B,0x8035,0x8043,0x8046,0x804D,/* 0xF0-0xF7 */
+	0x8052,0x8069,0x8071,0x8983,0x9878,0x9880,0x9883,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x99FA,0x99FB,0x99FC,0x99FD,0x99FE,0x99FF,0x9A00,0x9A01,/* 0x40-0x47 */
+	0x9A02,0x9A03,0x9A04,0x9A05,0x9A06,0x9A07,0x9A08,0x9A09,/* 0x48-0x4F */
+	0x9A0A,0x9A0B,0x9A0C,0x9A0D,0x9A0E,0x9A0F,0x9A10,0x9A11,/* 0x50-0x57 */
+	0x9A12,0x9A13,0x9A14,0x9A15,0x9A16,0x9A17,0x9A18,0x9A19,/* 0x58-0x5F */
+	0x9A1A,0x9A1B,0x9A1C,0x9A1D,0x9A1E,0x9A1F,0x9A20,0x9A21,/* 0x60-0x67 */
+	0x9A22,0x9A23,0x9A24,0x9A25,0x9A26,0x9A27,0x9A28,0x9A29,/* 0x68-0x6F */
+	0x9A2A,0x9A2B,0x9A2C,0x9A2D,0x9A2E,0x9A2F,0x9A30,0x9A31,/* 0x70-0x77 */
+	0x9A32,0x9A33,0x9A34,0x9A35,0x9A36,0x9A37,0x9A38,0x0000,/* 0x78-0x7F */
+
+	0x9A39,0x9A3A,0x9A3B,0x9A3C,0x9A3D,0x9A3E,0x9A3F,0x9A40,/* 0x80-0x87 */
+	0x9A41,0x9A42,0x9A43,0x9A44,0x9A45,0x9A46,0x9A47,0x9A48,/* 0x88-0x8F */
+	0x9A49,0x9A4A,0x9A4B,0x9A4C,0x9A4D,0x9A4E,0x9A4F,0x9A50,/* 0x90-0x97 */
+	0x9A51,0x9A52,0x9A53,0x9A54,0x9A55,0x9A56,0x9A57,0x9A58,/* 0x98-0x9F */
+	0x9A59,0x9889,0x988C,0x988D,0x988F,0x9894,0x989A,0x989B,/* 0xA0-0xA7 */
+	0x989E,0x989F,0x98A1,0x98A2,0x98A5,0x98A6,0x864D,0x8654,/* 0xA8-0xAF */
+	0x866C,0x866E,0x867F,0x867A,0x867C,0x867B,0x86A8,0x868D,/* 0xB0-0xB7 */
+	0x868B,0x86AC,0x869D,0x86A7,0x86A3,0x86AA,0x8693,0x86A9,/* 0xB8-0xBF */
+	0x86B6,0x86C4,0x86B5,0x86CE,0x86B0,0x86BA,0x86B1,0x86AF,/* 0xC0-0xC7 */
+	0x86C9,0x86CF,0x86B4,0x86E9,0x86F1,0x86F2,0x86ED,0x86F3,/* 0xC8-0xCF */
+	0x86D0,0x8713,0x86DE,0x86F4,0x86DF,0x86D8,0x86D1,0x8703,/* 0xD0-0xD7 */
+	0x8707,0x86F8,0x8708,0x870A,0x870D,0x8709,0x8723,0x873B,/* 0xD8-0xDF */
+	0x871E,0x8725,0x872E,0x871A,0x873E,0x8748,0x8734,0x8731,/* 0xE0-0xE7 */
+	0x8729,0x8737,0x873F,0x8782,0x8722,0x877D,0x877E,0x877B,/* 0xE8-0xEF */
+	0x8760,0x8770,0x874C,0x876E,0x878B,0x8753,0x8763,0x877C,/* 0xF0-0xF7 */
+	0x8764,0x8759,0x8765,0x8793,0x87AF,0x87A8,0x87D2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9A5A,0x9A5B,0x9A5C,0x9A5D,0x9A5E,0x9A5F,0x9A60,0x9A61,/* 0x40-0x47 */
+	0x9A62,0x9A63,0x9A64,0x9A65,0x9A66,0x9A67,0x9A68,0x9A69,/* 0x48-0x4F */
+	0x9A6A,0x9A6B,0x9A72,0x9A83,0x9A89,0x9A8D,0x9A8E,0x9A94,/* 0x50-0x57 */
+	0x9A95,0x9A99,0x9AA6,0x9AA9,0x9AAA,0x9AAB,0x9AAC,0x9AAD,/* 0x58-0x5F */
+	0x9AAE,0x9AAF,0x9AB2,0x9AB3,0x9AB4,0x9AB5,0x9AB9,0x9ABB,/* 0x60-0x67 */
+	0x9ABD,0x9ABE,0x9ABF,0x9AC3,0x9AC4,0x9AC6,0x9AC7,0x9AC8,/* 0x68-0x6F */
+	0x9AC9,0x9ACA,0x9ACD,0x9ACE,0x9ACF,0x9AD0,0x9AD2,0x9AD4,/* 0x70-0x77 */
+	0x9AD5,0x9AD6,0x9AD7,0x9AD9,0x9ADA,0x9ADB,0x9ADC,0x0000,/* 0x78-0x7F */
+
+	0x9ADD,0x9ADE,0x9AE0,0x9AE2,0x9AE3,0x9AE4,0x9AE5,0x9AE7,/* 0x80-0x87 */
+	0x9AE8,0x9AE9,0x9AEA,0x9AEC,0x9AEE,0x9AF0,0x9AF1,0x9AF2,/* 0x88-0x8F */
+	0x9AF3,0x9AF4,0x9AF5,0x9AF6,0x9AF7,0x9AF8,0x9AFA,0x9AFC,/* 0x90-0x97 */
+	0x9AFD,0x9AFE,0x9AFF,0x9B00,0x9B01,0x9B02,0x9B04,0x9B05,/* 0x98-0x9F */
+	0x9B06,0x87C6,0x8788,0x8785,0x87AD,0x8797,0x8783,0x87AB,/* 0xA0-0xA7 */
+	0x87E5,0x87AC,0x87B5,0x87B3,0x87CB,0x87D3,0x87BD,0x87D1,/* 0xA8-0xAF */
+	0x87C0,0x87CA,0x87DB,0x87EA,0x87E0,0x87EE,0x8816,0x8813,/* 0xB0-0xB7 */
+	0x87FE,0x880A,0x881B,0x8821,0x8839,0x883C,0x7F36,0x7F42,/* 0xB8-0xBF */
+	0x7F44,0x7F45,0x8210,0x7AFA,0x7AFD,0x7B08,0x7B03,0x7B04,/* 0xC0-0xC7 */
+	0x7B15,0x7B0A,0x7B2B,0x7B0F,0x7B47,0x7B38,0x7B2A,0x7B19,/* 0xC8-0xCF */
+	0x7B2E,0x7B31,0x7B20,0x7B25,0x7B24,0x7B33,0x7B3E,0x7B1E,/* 0xD0-0xD7 */
+	0x7B58,0x7B5A,0x7B45,0x7B75,0x7B4C,0x7B5D,0x7B60,0x7B6E,/* 0xD8-0xDF */
+	0x7B7B,0x7B62,0x7B72,0x7B71,0x7B90,0x7BA6,0x7BA7,0x7BB8,/* 0xE0-0xE7 */
+	0x7BAC,0x7B9D,0x7BA8,0x7B85,0x7BAA,0x7B9C,0x7BA2,0x7BAB,/* 0xE8-0xEF */
+	0x7BB4,0x7BD1,0x7BC1,0x7BCC,0x7BDD,0x7BDA,0x7BE5,0x7BE6,/* 0xF0-0xF7 */
+	0x7BEA,0x7C0C,0x7BFE,0x7BFC,0x7C0F,0x7C16,0x7C0B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9B07,0x9B09,0x9B0A,0x9B0B,0x9B0C,0x9B0D,0x9B0E,0x9B10,/* 0x40-0x47 */
+	0x9B11,0x9B12,0x9B14,0x9B15,0x9B16,0x9B17,0x9B18,0x9B19,/* 0x48-0x4F */
+	0x9B1A,0x9B1B,0x9B1C,0x9B1D,0x9B1E,0x9B20,0x9B21,0x9B22,/* 0x50-0x57 */
+	0x9B24,0x9B25,0x9B26,0x9B27,0x9B28,0x9B29,0x9B2A,0x9B2B,/* 0x58-0x5F */
+	0x9B2C,0x9B2D,0x9B2E,0x9B30,0x9B31,0x9B33,0x9B34,0x9B35,/* 0x60-0x67 */
+	0x9B36,0x9B37,0x9B38,0x9B39,0x9B3A,0x9B3D,0x9B3E,0x9B3F,/* 0x68-0x6F */
+	0x9B40,0x9B46,0x9B4A,0x9B4B,0x9B4C,0x9B4E,0x9B50,0x9B52,/* 0x70-0x77 */
+	0x9B53,0x9B55,0x9B56,0x9B57,0x9B58,0x9B59,0x9B5A,0x0000,/* 0x78-0x7F */
+
+	0x9B5B,0x9B5C,0x9B5D,0x9B5E,0x9B5F,0x9B60,0x9B61,0x9B62,/* 0x80-0x87 */
+	0x9B63,0x9B64,0x9B65,0x9B66,0x9B67,0x9B68,0x9B69,0x9B6A,/* 0x88-0x8F */
+	0x9B6B,0x9B6C,0x9B6D,0x9B6E,0x9B6F,0x9B70,0x9B71,0x9B72,/* 0x90-0x97 */
+	0x9B73,0x9B74,0x9B75,0x9B76,0x9B77,0x9B78,0x9B79,0x9B7A,/* 0x98-0x9F */
+	0x9B7B,0x7C1F,0x7C2A,0x7C26,0x7C38,0x7C41,0x7C40,0x81FE,/* 0xA0-0xA7 */
+	0x8201,0x8202,0x8204,0x81EC,0x8844,0x8221,0x8222,0x8223,/* 0xA8-0xAF */
+	0x822D,0x822F,0x8228,0x822B,0x8238,0x823B,0x8233,0x8234,/* 0xB0-0xB7 */
+	0x823E,0x8244,0x8249,0x824B,0x824F,0x825A,0x825F,0x8268,/* 0xB8-0xBF */
+	0x887E,0x8885,0x8888,0x88D8,0x88DF,0x895E,0x7F9D,0x7F9F,/* 0xC0-0xC7 */
+	0x7FA7,0x7FAF,0x7FB0,0x7FB2,0x7C7C,0x6549,0x7C91,0x7C9D,/* 0xC8-0xCF */
+	0x7C9C,0x7C9E,0x7CA2,0x7CB2,0x7CBC,0x7CBD,0x7CC1,0x7CC7,/* 0xD0-0xD7 */
+	0x7CCC,0x7CCD,0x7CC8,0x7CC5,0x7CD7,0x7CE8,0x826E,0x66A8,/* 0xD8-0xDF */
+	0x7FBF,0x7FCE,0x7FD5,0x7FE5,0x7FE1,0x7FE6,0x7FE9,0x7FEE,/* 0xE0-0xE7 */
+	0x7FF3,0x7CF8,0x7D77,0x7DA6,0x7DAE,0x7E47,0x7E9B,0x9EB8,/* 0xE8-0xEF */
+	0x9EB4,0x8D73,0x8D84,0x8D94,0x8D91,0x8DB1,0x8D67,0x8D6D,/* 0xF0-0xF7 */
+	0x8C47,0x8C49,0x914A,0x9150,0x914E,0x914F,0x9164,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9B7C,0x9B7D,0x9B7E,0x9B7F,0x9B80,0x9B81,0x9B82,0x9B83,/* 0x40-0x47 */
+	0x9B84,0x9B85,0x9B86,0x9B87,0x9B88,0x9B89,0x9B8A,0x9B8B,/* 0x48-0x4F */
+	0x9B8C,0x9B8D,0x9B8E,0x9B8F,0x9B90,0x9B91,0x9B92,0x9B93,/* 0x50-0x57 */
+	0x9B94,0x9B95,0x9B96,0x9B97,0x9B98,0x9B99,0x9B9A,0x9B9B,/* 0x58-0x5F */
+	0x9B9C,0x9B9D,0x9B9E,0x9B9F,0x9BA0,0x9BA1,0x9BA2,0x9BA3,/* 0x60-0x67 */
+	0x9BA4,0x9BA5,0x9BA6,0x9BA7,0x9BA8,0x9BA9,0x9BAA,0x9BAB,/* 0x68-0x6F */
+	0x9BAC,0x9BAD,0x9BAE,0x9BAF,0x9BB0,0x9BB1,0x9BB2,0x9BB3,/* 0x70-0x77 */
+	0x9BB4,0x9BB5,0x9BB6,0x9BB7,0x9BB8,0x9BB9,0x9BBA,0x0000,/* 0x78-0x7F */
+
+	0x9BBB,0x9BBC,0x9BBD,0x9BBE,0x9BBF,0x9BC0,0x9BC1,0x9BC2,/* 0x80-0x87 */
+	0x9BC3,0x9BC4,0x9BC5,0x9BC6,0x9BC7,0x9BC8,0x9BC9,0x9BCA,/* 0x88-0x8F */
+	0x9BCB,0x9BCC,0x9BCD,0x9BCE,0x9BCF,0x9BD0,0x9BD1,0x9BD2,/* 0x90-0x97 */
+	0x9BD3,0x9BD4,0x9BD5,0x9BD6,0x9BD7,0x9BD8,0x9BD9,0x9BDA,/* 0x98-0x9F */
+	0x9BDB,0x9162,0x9161,0x9170,0x9169,0x916F,0x917D,0x917E,/* 0xA0-0xA7 */
+	0x9172,0x9174,0x9179,0x918C,0x9185,0x9190,0x918D,0x9191,/* 0xA8-0xAF */
+	0x91A2,0x91A3,0x91AA,0x91AD,0x91AE,0x91AF,0x91B5,0x91B4,/* 0xB0-0xB7 */
+	0x91BA,0x8C55,0x9E7E,0x8DB8,0x8DEB,0x8E05,0x8E59,0x8E69,/* 0xB8-0xBF */
+	0x8DB5,0x8DBF,0x8DBC,0x8DBA,0x8DC4,0x8DD6,0x8DD7,0x8DDA,/* 0xC0-0xC7 */
+	0x8DDE,0x8DCE,0x8DCF,0x8DDB,0x8DC6,0x8DEC,0x8DF7,0x8DF8,/* 0xC8-0xCF */
+	0x8DE3,0x8DF9,0x8DFB,0x8DE4,0x8E09,0x8DFD,0x8E14,0x8E1D,/* 0xD0-0xD7 */
+	0x8E1F,0x8E2C,0x8E2E,0x8E23,0x8E2F,0x8E3A,0x8E40,0x8E39,/* 0xD8-0xDF */
+	0x8E35,0x8E3D,0x8E31,0x8E49,0x8E41,0x8E42,0x8E51,0x8E52,/* 0xE0-0xE7 */
+	0x8E4A,0x8E70,0x8E76,0x8E7C,0x8E6F,0x8E74,0x8E85,0x8E8F,/* 0xE8-0xEF */
+	0x8E94,0x8E90,0x8E9C,0x8E9E,0x8C78,0x8C82,0x8C8A,0x8C85,/* 0xF0-0xF7 */
+	0x8C98,0x8C94,0x659B,0x89D6,0x89DE,0x89DA,0x89DC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9BDC,0x9BDD,0x9BDE,0x9BDF,0x9BE0,0x9BE1,0x9BE2,0x9BE3,/* 0x40-0x47 */
+	0x9BE4,0x9BE5,0x9BE6,0x9BE7,0x9BE8,0x9BE9,0x9BEA,0x9BEB,/* 0x48-0x4F */
+	0x9BEC,0x9BED,0x9BEE,0x9BEF,0x9BF0,0x9BF1,0x9BF2,0x9BF3,/* 0x50-0x57 */
+	0x9BF4,0x9BF5,0x9BF6,0x9BF7,0x9BF8,0x9BF9,0x9BFA,0x9BFB,/* 0x58-0x5F */
+	0x9BFC,0x9BFD,0x9BFE,0x9BFF,0x9C00,0x9C01,0x9C02,0x9C03,/* 0x60-0x67 */
+	0x9C04,0x9C05,0x9C06,0x9C07,0x9C08,0x9C09,0x9C0A,0x9C0B,/* 0x68-0x6F */
+	0x9C0C,0x9C0D,0x9C0E,0x9C0F,0x9C10,0x9C11,0x9C12,0x9C13,/* 0x70-0x77 */
+	0x9C14,0x9C15,0x9C16,0x9C17,0x9C18,0x9C19,0x9C1A,0x0000,/* 0x78-0x7F */
+
+	0x9C1B,0x9C1C,0x9C1D,0x9C1E,0x9C1F,0x9C20,0x9C21,0x9C22,/* 0x80-0x87 */
+	0x9C23,0x9C24,0x9C25,0x9C26,0x9C27,0x9C28,0x9C29,0x9C2A,/* 0x88-0x8F */
+	0x9C2B,0x9C2C,0x9C2D,0x9C2E,0x9C2F,0x9C30,0x9C31,0x9C32,/* 0x90-0x97 */
+	0x9C33,0x9C34,0x9C35,0x9C36,0x9C37,0x9C38,0x9C39,0x9C3A,/* 0x98-0x9F */
+	0x9C3B,0x89E5,0x89EB,0x89EF,0x8A3E,0x8B26,0x9753,0x96E9,/* 0xA0-0xA7 */
+	0x96F3,0x96EF,0x9706,0x9701,0x9708,0x970F,0x970E,0x972A,/* 0xA8-0xAF */
+	0x972D,0x9730,0x973E,0x9F80,0x9F83,0x9F85,0x9F86,0x9F87,/* 0xB0-0xB7 */
+	0x9F88,0x9F89,0x9F8A,0x9F8C,0x9EFE,0x9F0B,0x9F0D,0x96B9,/* 0xB8-0xBF */
+	0x96BC,0x96BD,0x96CE,0x96D2,0x77BF,0x96E0,0x928E,0x92AE,/* 0xC0-0xC7 */
+	0x92C8,0x933E,0x936A,0x93CA,0x938F,0x943E,0x946B,0x9C7F,/* 0xC8-0xCF */
+	0x9C82,0x9C85,0x9C86,0x9C87,0x9C88,0x7A23,0x9C8B,0x9C8E,/* 0xD0-0xD7 */
+	0x9C90,0x9C91,0x9C92,0x9C94,0x9C95,0x9C9A,0x9C9B,0x9C9E,/* 0xD8-0xDF */
+	0x9C9F,0x9CA0,0x9CA1,0x9CA2,0x9CA3,0x9CA5,0x9CA6,0x9CA7,/* 0xE0-0xE7 */
+	0x9CA8,0x9CA9,0x9CAB,0x9CAD,0x9CAE,0x9CB0,0x9CB1,0x9CB2,/* 0xE8-0xEF */
+	0x9CB3,0x9CB4,0x9CB5,0x9CB6,0x9CB7,0x9CBA,0x9CBB,0x9CBC,/* 0xF0-0xF7 */
+	0x9CBD,0x9CC4,0x9CC5,0x9CC6,0x9CC7,0x9CCA,0x9CCB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9C3C,0x9C3D,0x9C3E,0x9C3F,0x9C40,0x9C41,0x9C42,0x9C43,/* 0x40-0x47 */
+	0x9C44,0x9C45,0x9C46,0x9C47,0x9C48,0x9C49,0x9C4A,0x9C4B,/* 0x48-0x4F */
+	0x9C4C,0x9C4D,0x9C4E,0x9C4F,0x9C50,0x9C51,0x9C52,0x9C53,/* 0x50-0x57 */
+	0x9C54,0x9C55,0x9C56,0x9C57,0x9C58,0x9C59,0x9C5A,0x9C5B,/* 0x58-0x5F */
+	0x9C5C,0x9C5D,0x9C5E,0x9C5F,0x9C60,0x9C61,0x9C62,0x9C63,/* 0x60-0x67 */
+	0x9C64,0x9C65,0x9C66,0x9C67,0x9C68,0x9C69,0x9C6A,0x9C6B,/* 0x68-0x6F */
+	0x9C6C,0x9C6D,0x9C6E,0x9C6F,0x9C70,0x9C71,0x9C72,0x9C73,/* 0x70-0x77 */
+	0x9C74,0x9C75,0x9C76,0x9C77,0x9C78,0x9C79,0x9C7A,0x0000,/* 0x78-0x7F */
+
+	0x9C7B,0x9C7D,0x9C7E,0x9C80,0x9C83,0x9C84,0x9C89,0x9C8A,/* 0x80-0x87 */
+	0x9C8C,0x9C8F,0x9C93,0x9C96,0x9C97,0x9C98,0x9C99,0x9C9D,/* 0x88-0x8F */
+	0x9CAA,0x9CAC,0x9CAF,0x9CB9,0x9CBE,0x9CBF,0x9CC0,0x9CC1,/* 0x90-0x97 */
+	0x9CC2,0x9CC8,0x9CC9,0x9CD1,0x9CD2,0x9CDA,0x9CDB,0x9CE0,/* 0x98-0x9F */
+	0x9CE1,0x9CCC,0x9CCD,0x9CCE,0x9CCF,0x9CD0,0x9CD3,0x9CD4,/* 0xA0-0xA7 */
+	0x9CD5,0x9CD7,0x9CD8,0x9CD9,0x9CDC,0x9CDD,0x9CDF,0x9CE2,/* 0xA8-0xAF */
+	0x977C,0x9785,0x9791,0x9792,0x9794,0x97AF,0x97AB,0x97A3,/* 0xB0-0xB7 */
+	0x97B2,0x97B4,0x9AB1,0x9AB0,0x9AB7,0x9E58,0x9AB6,0x9ABA,/* 0xB8-0xBF */
+	0x9ABC,0x9AC1,0x9AC0,0x9AC5,0x9AC2,0x9ACB,0x9ACC,0x9AD1,/* 0xC0-0xC7 */
+	0x9B45,0x9B43,0x9B47,0x9B49,0x9B48,0x9B4D,0x9B51,0x98E8,/* 0xC8-0xCF */
+	0x990D,0x992E,0x9955,0x9954,0x9ADF,0x9AE1,0x9AE6,0x9AEF,/* 0xD0-0xD7 */
+	0x9AEB,0x9AFB,0x9AED,0x9AF9,0x9B08,0x9B0F,0x9B13,0x9B1F,/* 0xD8-0xDF */
+	0x9B23,0x9EBD,0x9EBE,0x7E3B,0x9E82,0x9E87,0x9E88,0x9E8B,/* 0xE0-0xE7 */
+	0x9E92,0x93D6,0x9E9D,0x9E9F,0x9EDB,0x9EDC,0x9EDD,0x9EE0,/* 0xE8-0xEF */
+	0x9EDF,0x9EE2,0x9EE9,0x9EE7,0x9EE5,0x9EEA,0x9EEF,0x9F22,/* 0xF0-0xF7 */
+	0x9F2C,0x9F2F,0x9F39,0x9F37,0x9F3D,0x9F3E,0x9F44,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9CE3,0x9CE4,0x9CE5,0x9CE6,0x9CE7,0x9CE8,0x9CE9,0x9CEA,/* 0x40-0x47 */
+	0x9CEB,0x9CEC,0x9CED,0x9CEE,0x9CEF,0x9CF0,0x9CF1,0x9CF2,/* 0x48-0x4F */
+	0x9CF3,0x9CF4,0x9CF5,0x9CF6,0x9CF7,0x9CF8,0x9CF9,0x9CFA,/* 0x50-0x57 */
+	0x9CFB,0x9CFC,0x9CFD,0x9CFE,0x9CFF,0x9D00,0x9D01,0x9D02,/* 0x58-0x5F */
+	0x9D03,0x9D04,0x9D05,0x9D06,0x9D07,0x9D08,0x9D09,0x9D0A,/* 0x60-0x67 */
+	0x9D0B,0x9D0C,0x9D0D,0x9D0E,0x9D0F,0x9D10,0x9D11,0x9D12,/* 0x68-0x6F */
+	0x9D13,0x9D14,0x9D15,0x9D16,0x9D17,0x9D18,0x9D19,0x9D1A,/* 0x70-0x77 */
+	0x9D1B,0x9D1C,0x9D1D,0x9D1E,0x9D1F,0x9D20,0x9D21,0x0000,/* 0x78-0x7F */
+
+	0x9D22,0x9D23,0x9D24,0x9D25,0x9D26,0x9D27,0x9D28,0x9D29,/* 0x80-0x87 */
+	0x9D2A,0x9D2B,0x9D2C,0x9D2D,0x9D2E,0x9D2F,0x9D30,0x9D31,/* 0x88-0x8F */
+	0x9D32,0x9D33,0x9D34,0x9D35,0x9D36,0x9D37,0x9D38,0x9D39,/* 0x90-0x97 */
+	0x9D3A,0x9D3B,0x9D3C,0x9D3D,0x9D3E,0x9D3F,0x9D40,0x9D41,/* 0x98-0x9F */
+	0x9D42,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_F9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9D43,0x9D44,0x9D45,0x9D46,0x9D47,0x9D48,0x9D49,0x9D4A,/* 0x40-0x47 */
+	0x9D4B,0x9D4C,0x9D4D,0x9D4E,0x9D4F,0x9D50,0x9D51,0x9D52,/* 0x48-0x4F */
+	0x9D53,0x9D54,0x9D55,0x9D56,0x9D57,0x9D58,0x9D59,0x9D5A,/* 0x50-0x57 */
+	0x9D5B,0x9D5C,0x9D5D,0x9D5E,0x9D5F,0x9D60,0x9D61,0x9D62,/* 0x58-0x5F */
+	0x9D63,0x9D64,0x9D65,0x9D66,0x9D67,0x9D68,0x9D69,0x9D6A,/* 0x60-0x67 */
+	0x9D6B,0x9D6C,0x9D6D,0x9D6E,0x9D6F,0x9D70,0x9D71,0x9D72,/* 0x68-0x6F */
+	0x9D73,0x9D74,0x9D75,0x9D76,0x9D77,0x9D78,0x9D79,0x9D7A,/* 0x70-0x77 */
+	0x9D7B,0x9D7C,0x9D7D,0x9D7E,0x9D7F,0x9D80,0x9D81,0x0000,/* 0x78-0x7F */
+
+	0x9D82,0x9D83,0x9D84,0x9D85,0x9D86,0x9D87,0x9D88,0x9D89,/* 0x80-0x87 */
+	0x9D8A,0x9D8B,0x9D8C,0x9D8D,0x9D8E,0x9D8F,0x9D90,0x9D91,/* 0x88-0x8F */
+	0x9D92,0x9D93,0x9D94,0x9D95,0x9D96,0x9D97,0x9D98,0x9D99,/* 0x90-0x97 */
+	0x9D9A,0x9D9B,0x9D9C,0x9D9D,0x9D9E,0x9D9F,0x9DA0,0x9DA1,/* 0x98-0x9F */
+	0x9DA2,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_FA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9DA3,0x9DA4,0x9DA5,0x9DA6,0x9DA7,0x9DA8,0x9DA9,0x9DAA,/* 0x40-0x47 */
+	0x9DAB,0x9DAC,0x9DAD,0x9DAE,0x9DAF,0x9DB0,0x9DB1,0x9DB2,/* 0x48-0x4F */
+	0x9DB3,0x9DB4,0x9DB5,0x9DB6,0x9DB7,0x9DB8,0x9DB9,0x9DBA,/* 0x50-0x57 */
+	0x9DBB,0x9DBC,0x9DBD,0x9DBE,0x9DBF,0x9DC0,0x9DC1,0x9DC2,/* 0x58-0x5F */
+	0x9DC3,0x9DC4,0x9DC5,0x9DC6,0x9DC7,0x9DC8,0x9DC9,0x9DCA,/* 0x60-0x67 */
+	0x9DCB,0x9DCC,0x9DCD,0x9DCE,0x9DCF,0x9DD0,0x9DD1,0x9DD2,/* 0x68-0x6F */
+	0x9DD3,0x9DD4,0x9DD5,0x9DD6,0x9DD7,0x9DD8,0x9DD9,0x9DDA,/* 0x70-0x77 */
+	0x9DDB,0x9DDC,0x9DDD,0x9DDE,0x9DDF,0x9DE0,0x9DE1,0x0000,/* 0x78-0x7F */
+
+	0x9DE2,0x9DE3,0x9DE4,0x9DE5,0x9DE6,0x9DE7,0x9DE8,0x9DE9,/* 0x80-0x87 */
+	0x9DEA,0x9DEB,0x9DEC,0x9DED,0x9DEE,0x9DEF,0x9DF0,0x9DF1,/* 0x88-0x8F */
+	0x9DF2,0x9DF3,0x9DF4,0x9DF5,0x9DF6,0x9DF7,0x9DF8,0x9DF9,/* 0x90-0x97 */
+	0x9DFA,0x9DFB,0x9DFC,0x9DFD,0x9DFE,0x9DFF,0x9E00,0x9E01,/* 0x98-0x9F */
+	0x9E02,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_FB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9E03,0x9E04,0x9E05,0x9E06,0x9E07,0x9E08,0x9E09,0x9E0A,/* 0x40-0x47 */
+	0x9E0B,0x9E0C,0x9E0D,0x9E0E,0x9E0F,0x9E10,0x9E11,0x9E12,/* 0x48-0x4F */
+	0x9E13,0x9E14,0x9E15,0x9E16,0x9E17,0x9E18,0x9E19,0x9E1A,/* 0x50-0x57 */
+	0x9E1B,0x9E1C,0x9E1D,0x9E1E,0x9E24,0x9E27,0x9E2E,0x9E30,/* 0x58-0x5F */
+	0x9E34,0x9E3B,0x9E3C,0x9E40,0x9E4D,0x9E50,0x9E52,0x9E53,/* 0x60-0x67 */
+	0x9E54,0x9E56,0x9E59,0x9E5D,0x9E5F,0x9E60,0x9E61,0x9E62,/* 0x68-0x6F */
+	0x9E65,0x9E6E,0x9E6F,0x9E72,0x9E74,0x9E75,0x9E76,0x9E77,/* 0x70-0x77 */
+	0x9E78,0x9E79,0x9E7A,0x9E7B,0x9E7C,0x9E7D,0x9E80,0x0000,/* 0x78-0x7F */
+
+	0x9E81,0x9E83,0x9E84,0x9E85,0x9E86,0x9E89,0x9E8A,0x9E8C,/* 0x80-0x87 */
+	0x9E8D,0x9E8E,0x9E8F,0x9E90,0x9E91,0x9E94,0x9E95,0x9E96,/* 0x88-0x8F */
+	0x9E97,0x9E98,0x9E99,0x9E9A,0x9E9B,0x9E9C,0x9E9E,0x9EA0,/* 0x90-0x97 */
+	0x9EA1,0x9EA2,0x9EA3,0x9EA4,0x9EA5,0x9EA7,0x9EA8,0x9EA9,/* 0x98-0x9F */
+	0x9EAA,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_FC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9EAB,0x9EAC,0x9EAD,0x9EAE,0x9EAF,0x9EB0,0x9EB1,0x9EB2,/* 0x40-0x47 */
+	0x9EB3,0x9EB5,0x9EB6,0x9EB7,0x9EB9,0x9EBA,0x9EBC,0x9EBF,/* 0x48-0x4F */
+	0x9EC0,0x9EC1,0x9EC2,0x9EC3,0x9EC5,0x9EC6,0x9EC7,0x9EC8,/* 0x50-0x57 */
+	0x9ECA,0x9ECB,0x9ECC,0x9ED0,0x9ED2,0x9ED3,0x9ED5,0x9ED6,/* 0x58-0x5F */
+	0x9ED7,0x9ED9,0x9EDA,0x9EDE,0x9EE1,0x9EE3,0x9EE4,0x9EE6,/* 0x60-0x67 */
+	0x9EE8,0x9EEB,0x9EEC,0x9EED,0x9EEE,0x9EF0,0x9EF1,0x9EF2,/* 0x68-0x6F */
+	0x9EF3,0x9EF4,0x9EF5,0x9EF6,0x9EF7,0x9EF8,0x9EFA,0x9EFD,/* 0x70-0x77 */
+	0x9EFF,0x9F00,0x9F01,0x9F02,0x9F03,0x9F04,0x9F05,0x0000,/* 0x78-0x7F */
+
+	0x9F06,0x9F07,0x9F08,0x9F09,0x9F0A,0x9F0C,0x9F0F,0x9F11,/* 0x80-0x87 */
+	0x9F12,0x9F14,0x9F15,0x9F16,0x9F18,0x9F1A,0x9F1B,0x9F1C,/* 0x88-0x8F */
+	0x9F1D,0x9F1E,0x9F1F,0x9F21,0x9F23,0x9F24,0x9F25,0x9F26,/* 0x90-0x97 */
+	0x9F27,0x9F28,0x9F29,0x9F2A,0x9F2B,0x9F2D,0x9F2E,0x9F30,/* 0x98-0x9F */
+	0x9F31,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_FD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9F32,0x9F33,0x9F34,0x9F35,0x9F36,0x9F38,0x9F3A,0x9F3C,/* 0x40-0x47 */
+	0x9F3F,0x9F40,0x9F41,0x9F42,0x9F43,0x9F45,0x9F46,0x9F47,/* 0x48-0x4F */
+	0x9F48,0x9F49,0x9F4A,0x9F4B,0x9F4C,0x9F4D,0x9F4E,0x9F4F,/* 0x50-0x57 */
+	0x9F52,0x9F53,0x9F54,0x9F55,0x9F56,0x9F57,0x9F58,0x9F59,/* 0x58-0x5F */
+	0x9F5A,0x9F5B,0x9F5C,0x9F5D,0x9F5E,0x9F5F,0x9F60,0x9F61,/* 0x60-0x67 */
+	0x9F62,0x9F63,0x9F64,0x9F65,0x9F66,0x9F67,0x9F68,0x9F69,/* 0x68-0x6F */
+	0x9F6A,0x9F6B,0x9F6C,0x9F6D,0x9F6E,0x9F6F,0x9F70,0x9F71,/* 0x70-0x77 */
+	0x9F72,0x9F73,0x9F74,0x9F75,0x9F76,0x9F77,0x9F78,0x0000,/* 0x78-0x7F */
+
+	0x9F79,0x9F7A,0x9F7B,0x9F7C,0x9F7D,0x9F7E,0x9F81,0x9F82,/* 0x80-0x87 */
+	0x9F8D,0x9F8E,0x9F8F,0x9F90,0x9F91,0x9F92,0x9F93,0x9F94,/* 0x88-0x8F */
+	0x9F95,0x9F96,0x9F97,0x9F98,0x9F9C,0x9F9D,0x9F9E,0x9FA1,/* 0x90-0x97 */
+	0x9FA2,0x9FA3,0x9FA4,0x9FA5,0xF92C,0xF979,0xF995,0xF9E7,/* 0x98-0x9F */
+	0xF9F1,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_FE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0xFA0C,0xFA0D,0xFA0E,0xFA0F,0xFA11,0xFA13,0xFA14,0xFA18,/* 0x40-0x47 */
+	0xFA1F,0xFA20,0xFA21,0xFA23,0xFA24,0xFA27,0xFA28,0xFA29,/* 0x48-0x4F */
+};
+
+static wchar_t *page_charset2uni[256] = {
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   c2u_81, c2u_82, c2u_83, c2u_84, c2u_85, c2u_86, c2u_87, 
+	c2u_88, c2u_89, c2u_8A, c2u_8B, c2u_8C, c2u_8D, c2u_8E, c2u_8F, 
+	c2u_90, c2u_91, c2u_92, c2u_93, c2u_94, c2u_95, c2u_96, c2u_97, 
+	c2u_98, c2u_99, c2u_9A, c2u_9B, c2u_9C, c2u_9D, c2u_9E, c2u_9F, 
+	c2u_A0, c2u_A1, c2u_A2, c2u_A3, c2u_A4, c2u_A5, c2u_A6, c2u_A7, 
+	c2u_A8, c2u_A9, c2u_AA, c2u_AB, c2u_AC, c2u_AD, c2u_AE, c2u_AF, 
+	c2u_B0, c2u_B1, c2u_B2, c2u_B3, c2u_B4, c2u_B5, c2u_B6, c2u_B7, 
+	c2u_B8, c2u_B9, c2u_BA, c2u_BB, c2u_BC, c2u_BD, c2u_BE, c2u_BF, 
+	c2u_C0, c2u_C1, c2u_C2, c2u_C3, c2u_C4, c2u_C5, c2u_C6, c2u_C7, 
+	c2u_C8, c2u_C9, c2u_CA, c2u_CB, c2u_CC, c2u_CD, c2u_CE, c2u_CF, 
+	c2u_D0, c2u_D1, c2u_D2, c2u_D3, c2u_D4, c2u_D5, c2u_D6, c2u_D7, 
+	c2u_D8, c2u_D9, c2u_DA, c2u_DB, c2u_DC, c2u_DD, c2u_DE, c2u_DF, 
+	c2u_E0, c2u_E1, c2u_E2, c2u_E3, c2u_E4, c2u_E5, c2u_E6, c2u_E7, 
+	c2u_E8, c2u_E9, c2u_EA, c2u_EB, c2u_EC, c2u_ED, c2u_EE, c2u_EF, 
+	c2u_F0, c2u_F1, c2u_F2, c2u_F3, c2u_F4, c2u_F5, c2u_F6, c2u_F7, 
+	c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL,   
+};
+
+static unsigned char u2c_01[512] = {
+	0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xA5, 0xA8, 0xA5, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xA7, 0xA8, 0xA7, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xA9, 0xA8, 0xA9, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xA8, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xA8, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xA8, 0xAD, 0xA8, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xB1, 0xA8, 0xB1, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xA1, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xA8, 0xA3, 0xA8, 0xA3, 0xA8, 0xAB, /* 0xCC-0xCF */
+	0xA8, 0xAB, 0xA8, 0xAF, 0xA8, 0xAF, 0xA8, 0xB3, /* 0xD0-0xD3 */
+	0xA8, 0xB3, 0xA8, 0xB5, 0xA8, 0xB5, 0xA8, 0xB6, /* 0xD4-0xD7 */
+	0xA8, 0xB6, 0xA8, 0xB7, 0xA8, 0xB7, 0xA8, 0xB8, /* 0xD8-0xDB */
+	0xA8, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+};
+
+static unsigned char u2c_02[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xA8, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xA8, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA6, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xA1, 0xA5, 0xA8, 0x40, 0xA8, 0x41, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xA8, 0x42, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+};
+
+static unsigned char u2c_03[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA6, 0xA1, 0xA6, 0xA2, 0xA6, 0xA3, /* 0x90-0x93 */
+	0xA6, 0xA4, 0xA6, 0xA5, 0xA6, 0xA6, 0xA6, 0xA7, /* 0x94-0x97 */
+	0xA6, 0xA8, 0xA6, 0xA9, 0xA6, 0xAA, 0xA6, 0xAB, /* 0x98-0x9B */
+	0xA6, 0xAC, 0xA6, 0xAD, 0xA6, 0xAE, 0xA6, 0xAF, /* 0x9C-0x9F */
+	0xA6, 0xB0, 0xA6, 0xB1, 0x00, 0x00, 0xA6, 0xB2, /* 0xA0-0xA3 */
+	0xA6, 0xB3, 0xA6, 0xB4, 0xA6, 0xB5, 0xA6, 0xB6, /* 0xA4-0xA7 */
+	0xA6, 0xB7, 0xA6, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xA6, 0xC1, 0xA6, 0xC2, 0xA6, 0xC3, /* 0xB0-0xB3 */
+	0xA6, 0xC4, 0xA6, 0xC5, 0xA6, 0xC6, 0xA6, 0xC7, /* 0xB4-0xB7 */
+	0xA6, 0xC8, 0xA6, 0xC9, 0xA6, 0xCA, 0xA6, 0xCB, /* 0xB8-0xBB */
+	0xA6, 0xCC, 0xA6, 0xCD, 0xA6, 0xCE, 0xA6, 0xCF, /* 0xBC-0xBF */
+	0xA6, 0xD0, 0xA6, 0xD1, 0x00, 0x00, 0xA6, 0xD2, /* 0xC0-0xC3 */
+	0xA6, 0xD3, 0xA6, 0xD4, 0xA6, 0xD5, 0xA6, 0xD6, /* 0xC4-0xC7 */
+	0xA6, 0xD7, 0xA6, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+};
+
+static unsigned char u2c_04[512] = {
+	0x00, 0x00, 0xA7, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xA7, 0xA1, 0xA7, 0xA2, 0xA7, 0xA3, 0xA7, 0xA4, /* 0x10-0x13 */
+	0xA7, 0xA5, 0xA7, 0xA6, 0xA7, 0xA8, 0xA7, 0xA9, /* 0x14-0x17 */
+	0xA7, 0xAA, 0xA7, 0xAB, 0xA7, 0xAC, 0xA7, 0xAD, /* 0x18-0x1B */
+	0xA7, 0xAE, 0xA7, 0xAF, 0xA7, 0xB0, 0xA7, 0xB1, /* 0x1C-0x1F */
+	0xA7, 0xB2, 0xA7, 0xB3, 0xA7, 0xB4, 0xA7, 0xB5, /* 0x20-0x23 */
+	0xA7, 0xB6, 0xA7, 0xB7, 0xA7, 0xB8, 0xA7, 0xB9, /* 0x24-0x27 */
+	0xA7, 0xBA, 0xA7, 0xBB, 0xA7, 0xBC, 0xA7, 0xBD, /* 0x28-0x2B */
+	0xA7, 0xBE, 0xA7, 0xBF, 0xA7, 0xC0, 0xA7, 0xC1, /* 0x2C-0x2F */
+	0xA7, 0xD1, 0xA7, 0xD2, 0xA7, 0xD3, 0xA7, 0xD4, /* 0x30-0x33 */
+	0xA7, 0xD5, 0xA7, 0xD6, 0xA7, 0xD8, 0xA7, 0xD9, /* 0x34-0x37 */
+	0xA7, 0xDA, 0xA7, 0xDB, 0xA7, 0xDC, 0xA7, 0xDD, /* 0x38-0x3B */
+	0xA7, 0xDE, 0xA7, 0xDF, 0xA7, 0xE0, 0xA7, 0xE1, /* 0x3C-0x3F */
+	0xA7, 0xE2, 0xA7, 0xE3, 0xA7, 0xE4, 0xA7, 0xE5, /* 0x40-0x43 */
+	0xA7, 0xE6, 0xA7, 0xE7, 0xA7, 0xE8, 0xA7, 0xE9, /* 0x44-0x47 */
+	0xA7, 0xEA, 0xA7, 0xEB, 0xA7, 0xEC, 0xA7, 0xED, /* 0x48-0x4B */
+	0xA7, 0xEE, 0xA7, 0xEF, 0xA7, 0xF0, 0xA7, 0xF1, /* 0x4C-0x4F */
+	0x00, 0x00, 0xA7, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+};
+
+static unsigned char u2c_20[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xA9, 0x5C, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x43, /* 0x10-0x13 */
+	0xA1, 0xAA, 0xA8, 0x44, 0xA1, 0xAC, 0x00, 0x00, /* 0x14-0x17 */
+	0xA1, 0xAE, 0xA1, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA1, 0xB0, 0xA1, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xA8, 0x45, 0xA1, 0xAD, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA1, 0xEB, 0x00, 0x00, 0xA1, 0xE4, 0xA1, 0xE5, /* 0x30-0x33 */
+	0x00, 0x00, 0xA8, 0x46, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xF9, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xA3, 0xFE, 0x00, 0x00, /* 0x3C-0x3F */
+};
+
+static unsigned char u2c_21[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xE6, /* 0x00-0x03 */
+	0x00, 0x00, 0xA8, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xA8, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xED, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xA9, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA2, 0xF1, 0xA2, 0xF2, 0xA2, 0xF3, 0xA2, 0xF4, /* 0x60-0x63 */
+	0xA2, 0xF5, 0xA2, 0xF6, 0xA2, 0xF7, 0xA2, 0xF8, /* 0x64-0x67 */
+	0xA2, 0xF9, 0xA2, 0xFA, 0xA2, 0xFB, 0xA2, 0xFC, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xA2, 0xA1, 0xA2, 0xA2, 0xA2, 0xA3, 0xA2, 0xA4, /* 0x70-0x73 */
+	0xA2, 0xA5, 0xA2, 0xA6, 0xA2, 0xA7, 0xA2, 0xA8, /* 0x74-0x77 */
+	0xA2, 0xA9, 0xA2, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xA1, 0xFB, 0xA1, 0xFC, 0xA1, 0xFA, 0xA1, 0xFD, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0x49, 0xA8, 0x4A, /* 0x94-0x97 */
+	0xA8, 0x4B, 0xA8, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+};
+
+static unsigned char u2c_22[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xA1, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC7, /* 0x0C-0x0F */
+	0x00, 0x00, 0xA1, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xA8, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xA1, 0xE3, 0x00, 0x00, 0xA1, 0xCC, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xA1, 0xD8, 0xA1, 0xDE, 0xA8, 0x4E, /* 0x1C-0x1F */
+	0xA1, 0xCF, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x4F, /* 0x20-0x23 */
+	0x00, 0x00, 0xA1, 0xCE, 0x00, 0x00, 0xA1, 0xC4, /* 0x24-0x27 */
+	0xA1, 0xC5, 0xA1, 0xC9, 0xA1, 0xC8, 0xA1, 0xD2, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xD3, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xA1, 0xE0, 0xA1, 0xDF, 0xA1, 0xC3, 0xA1, 0xCB, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xA1, 0xAB, 0xA1, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xA1, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xA1, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0x50, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA1, 0xD9, 0xA1, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xA1, 0xDC, 0xA1, 0xDD, 0xA8, 0x51, 0xA8, 0x52, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xDA, 0xA1, 0xDB, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xA8, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xA1, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xA1, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x53, /* 0xBC-0xBF */
+};
+
+static unsigned char u2c_23[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xD0, 0x00, 0x00, /* 0x10-0x13 */
+};
+
+static unsigned char u2c_24[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA2, 0xD9, 0xA2, 0xDA, 0xA2, 0xDB, 0xA2, 0xDC, /* 0x60-0x63 */
+	0xA2, 0xDD, 0xA2, 0xDE, 0xA2, 0xDF, 0xA2, 0xE0, /* 0x64-0x67 */
+	0xA2, 0xE1, 0xA2, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xA2, 0xC5, 0xA2, 0xC6, 0xA2, 0xC7, 0xA2, 0xC8, /* 0x74-0x77 */
+	0xA2, 0xC9, 0xA2, 0xCA, 0xA2, 0xCB, 0xA2, 0xCC, /* 0x78-0x7B */
+	0xA2, 0xCD, 0xA2, 0xCE, 0xA2, 0xCF, 0xA2, 0xD0, /* 0x7C-0x7F */
+	
+	0xA2, 0xD1, 0xA2, 0xD2, 0xA2, 0xD3, 0xA2, 0xD4, /* 0x80-0x83 */
+	0xA2, 0xD5, 0xA2, 0xD6, 0xA2, 0xD7, 0xA2, 0xD8, /* 0x84-0x87 */
+	0xA2, 0xB1, 0xA2, 0xB2, 0xA2, 0xB3, 0xA2, 0xB4, /* 0x88-0x8B */
+	0xA2, 0xB5, 0xA2, 0xB6, 0xA2, 0xB7, 0xA2, 0xB8, /* 0x8C-0x8F */
+	0xA2, 0xB9, 0xA2, 0xBA, 0xA2, 0xBB, 0xA2, 0xBC, /* 0x90-0x93 */
+	0xA2, 0xBD, 0xA2, 0xBE, 0xA2, 0xBF, 0xA2, 0xC0, /* 0x94-0x97 */
+	0xA2, 0xC1, 0xA2, 0xC2, 0xA2, 0xC3, 0xA2, 0xC4, /* 0x98-0x9B */
+};
+
+static unsigned char u2c_25[512] = {
+	0xA9, 0xA4, 0xA9, 0xA5, 0xA9, 0xA6, 0xA9, 0xA7, /* 0x00-0x03 */
+	0xA9, 0xA8, 0xA9, 0xA9, 0xA9, 0xAA, 0xA9, 0xAB, /* 0x04-0x07 */
+	0xA9, 0xAC, 0xA9, 0xAD, 0xA9, 0xAE, 0xA9, 0xAF, /* 0x08-0x0B */
+	0xA9, 0xB0, 0xA9, 0xB1, 0xA9, 0xB2, 0xA9, 0xB3, /* 0x0C-0x0F */
+	0xA9, 0xB4, 0xA9, 0xB5, 0xA9, 0xB6, 0xA9, 0xB7, /* 0x10-0x13 */
+	0xA9, 0xB8, 0xA9, 0xB9, 0xA9, 0xBA, 0xA9, 0xBB, /* 0x14-0x17 */
+	0xA9, 0xBC, 0xA9, 0xBD, 0xA9, 0xBE, 0xA9, 0xBF, /* 0x18-0x1B */
+	0xA9, 0xC0, 0xA9, 0xC1, 0xA9, 0xC2, 0xA9, 0xC3, /* 0x1C-0x1F */
+	0xA9, 0xC4, 0xA9, 0xC5, 0xA9, 0xC6, 0xA9, 0xC7, /* 0x20-0x23 */
+	0xA9, 0xC8, 0xA9, 0xC9, 0xA9, 0xCA, 0xA9, 0xCB, /* 0x24-0x27 */
+	0xA9, 0xCC, 0xA9, 0xCD, 0xA9, 0xCE, 0xA9, 0xCF, /* 0x28-0x2B */
+	0xA9, 0xD0, 0xA9, 0xD1, 0xA9, 0xD2, 0xA9, 0xD3, /* 0x2C-0x2F */
+	0xA9, 0xD4, 0xA9, 0xD5, 0xA9, 0xD6, 0xA9, 0xD7, /* 0x30-0x33 */
+	0xA9, 0xD8, 0xA9, 0xD9, 0xA9, 0xDA, 0xA9, 0xDB, /* 0x34-0x37 */
+	0xA9, 0xDC, 0xA9, 0xDD, 0xA9, 0xDE, 0xA9, 0xDF, /* 0x38-0x3B */
+	0xA9, 0xE0, 0xA9, 0xE1, 0xA9, 0xE2, 0xA9, 0xE3, /* 0x3C-0x3F */
+	0xA9, 0xE4, 0xA9, 0xE5, 0xA9, 0xE6, 0xA9, 0xE7, /* 0x40-0x43 */
+	0xA9, 0xE8, 0xA9, 0xE9, 0xA9, 0xEA, 0xA9, 0xEB, /* 0x44-0x47 */
+	0xA9, 0xEC, 0xA9, 0xED, 0xA9, 0xEE, 0xA9, 0xEF, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xA8, 0x54, 0xA8, 0x55, 0xA8, 0x56, 0xA8, 0x57, /* 0x50-0x53 */
+	0xA8, 0x58, 0xA8, 0x59, 0xA8, 0x5A, 0xA8, 0x5B, /* 0x54-0x57 */
+	0xA8, 0x5C, 0xA8, 0x5D, 0xA8, 0x5E, 0xA8, 0x5F, /* 0x58-0x5B */
+	0xA8, 0x60, 0xA8, 0x61, 0xA8, 0x62, 0xA8, 0x63, /* 0x5C-0x5F */
+	0xA8, 0x64, 0xA8, 0x65, 0xA8, 0x66, 0xA8, 0x67, /* 0x60-0x63 */
+	0xA8, 0x68, 0xA8, 0x69, 0xA8, 0x6A, 0xA8, 0x6B, /* 0x64-0x67 */
+	0xA8, 0x6C, 0xA8, 0x6D, 0xA8, 0x6E, 0xA8, 0x6F, /* 0x68-0x6B */
+	0xA8, 0x70, 0xA8, 0x71, 0xA8, 0x72, 0xA8, 0x73, /* 0x6C-0x6F */
+	0xA8, 0x74, 0xA8, 0x75, 0xA8, 0x76, 0xA8, 0x77, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xA8, 0x78, 0xA8, 0x79, 0xA8, 0x7A, /* 0x80-0x83 */
+	0xA8, 0x7B, 0xA8, 0x7C, 0xA8, 0x7D, 0xA8, 0x7E, /* 0x84-0x87 */
+	0xA8, 0x80, 0xA8, 0x81, 0xA8, 0x82, 0xA8, 0x83, /* 0x88-0x8B */
+	0xA8, 0x84, 0xA8, 0x85, 0xA8, 0x86, 0xA8, 0x87, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x88, /* 0x90-0x93 */
+	0xA8, 0x89, 0xA8, 0x8A, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xA1, 0xF6, 0xA1, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF8, 0xA1, 0xF7, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xA8, 0x8B, 0xA8, 0x8C, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF4, 0xA1, 0xF3, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xF0, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF2, 0xA1, 0xF1, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0x8D, 0xA8, 0x8E, /* 0xE0-0xE3 */
+	0xA8, 0x8F, 0xA8, 0x90, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_26[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xEF, 0xA1, 0xEE, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xA8, 0x91, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xA1, 0xE2, 0x00, 0x00, 0xA1, 0xE1, 0x00, 0x00, /* 0x40-0x43 */
+};
+
+static unsigned char u2c_30[512] = {
+	0xA1, 0xA1, 0xA1, 0xA2, 0xA1, 0xA3, 0xA1, 0xA8, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xA9, 0xA9, 0x65, 0xA9, 0x96, /* 0x04-0x07 */
+	0xA1, 0xB4, 0xA1, 0xB5, 0xA1, 0xB6, 0xA1, 0xB7, /* 0x08-0x0B */
+	0xA1, 0xB8, 0xA1, 0xB9, 0xA1, 0xBA, 0xA1, 0xBB, /* 0x0C-0x0F */
+	0xA1, 0xBE, 0xA1, 0xBF, 0xA8, 0x93, 0xA1, 0xFE, /* 0x10-0x13 */
+	0xA1, 0xB2, 0xA1, 0xB3, 0xA1, 0xBC, 0xA1, 0xBD, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xA8, 0x94, 0xA8, 0x95, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xA9, 0x40, 0xA9, 0x41, 0xA9, 0x42, /* 0x20-0x23 */
+	0xA9, 0x43, 0xA9, 0x44, 0xA9, 0x45, 0xA9, 0x46, /* 0x24-0x27 */
+	0xA9, 0x47, 0xA9, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xA4, 0xA1, 0xA4, 0xA2, 0xA4, 0xA3, /* 0x40-0x43 */
+	0xA4, 0xA4, 0xA4, 0xA5, 0xA4, 0xA6, 0xA4, 0xA7, /* 0x44-0x47 */
+	0xA4, 0xA8, 0xA4, 0xA9, 0xA4, 0xAA, 0xA4, 0xAB, /* 0x48-0x4B */
+	0xA4, 0xAC, 0xA4, 0xAD, 0xA4, 0xAE, 0xA4, 0xAF, /* 0x4C-0x4F */
+	0xA4, 0xB0, 0xA4, 0xB1, 0xA4, 0xB2, 0xA4, 0xB3, /* 0x50-0x53 */
+	0xA4, 0xB4, 0xA4, 0xB5, 0xA4, 0xB6, 0xA4, 0xB7, /* 0x54-0x57 */
+	0xA4, 0xB8, 0xA4, 0xB9, 0xA4, 0xBA, 0xA4, 0xBB, /* 0x58-0x5B */
+	0xA4, 0xBC, 0xA4, 0xBD, 0xA4, 0xBE, 0xA4, 0xBF, /* 0x5C-0x5F */
+	0xA4, 0xC0, 0xA4, 0xC1, 0xA4, 0xC2, 0xA4, 0xC3, /* 0x60-0x63 */
+	0xA4, 0xC4, 0xA4, 0xC5, 0xA4, 0xC6, 0xA4, 0xC7, /* 0x64-0x67 */
+	0xA4, 0xC8, 0xA4, 0xC9, 0xA4, 0xCA, 0xA4, 0xCB, /* 0x68-0x6B */
+	0xA4, 0xCC, 0xA4, 0xCD, 0xA4, 0xCE, 0xA4, 0xCF, /* 0x6C-0x6F */
+	0xA4, 0xD0, 0xA4, 0xD1, 0xA4, 0xD2, 0xA4, 0xD3, /* 0x70-0x73 */
+	0xA4, 0xD4, 0xA4, 0xD5, 0xA4, 0xD6, 0xA4, 0xD7, /* 0x74-0x77 */
+	0xA4, 0xD8, 0xA4, 0xD9, 0xA4, 0xDA, 0xA4, 0xDB, /* 0x78-0x7B */
+	0xA4, 0xDC, 0xA4, 0xDD, 0xA4, 0xDE, 0xA4, 0xDF, /* 0x7C-0x7F */
+	
+	0xA4, 0xE0, 0xA4, 0xE1, 0xA4, 0xE2, 0xA4, 0xE3, /* 0x80-0x83 */
+	0xA4, 0xE4, 0xA4, 0xE5, 0xA4, 0xE6, 0xA4, 0xE7, /* 0x84-0x87 */
+	0xA4, 0xE8, 0xA4, 0xE9, 0xA4, 0xEA, 0xA4, 0xEB, /* 0x88-0x8B */
+	0xA4, 0xEC, 0xA4, 0xED, 0xA4, 0xEE, 0xA4, 0xEF, /* 0x8C-0x8F */
+	0xA4, 0xF0, 0xA4, 0xF1, 0xA4, 0xF2, 0xA4, 0xF3, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA9, 0x61, /* 0x98-0x9B */
+	0xA9, 0x62, 0xA9, 0x66, 0xA9, 0x67, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xA5, 0xA1, 0xA5, 0xA2, 0xA5, 0xA3, /* 0xA0-0xA3 */
+	0xA5, 0xA4, 0xA5, 0xA5, 0xA5, 0xA6, 0xA5, 0xA7, /* 0xA4-0xA7 */
+	0xA5, 0xA8, 0xA5, 0xA9, 0xA5, 0xAA, 0xA5, 0xAB, /* 0xA8-0xAB */
+	0xA5, 0xAC, 0xA5, 0xAD, 0xA5, 0xAE, 0xA5, 0xAF, /* 0xAC-0xAF */
+	0xA5, 0xB0, 0xA5, 0xB1, 0xA5, 0xB2, 0xA5, 0xB3, /* 0xB0-0xB3 */
+	0xA5, 0xB4, 0xA5, 0xB5, 0xA5, 0xB6, 0xA5, 0xB7, /* 0xB4-0xB7 */
+	0xA5, 0xB8, 0xA5, 0xB9, 0xA5, 0xBA, 0xA5, 0xBB, /* 0xB8-0xBB */
+	0xA5, 0xBC, 0xA5, 0xBD, 0xA5, 0xBE, 0xA5, 0xBF, /* 0xBC-0xBF */
+	0xA5, 0xC0, 0xA5, 0xC1, 0xA5, 0xC2, 0xA5, 0xC3, /* 0xC0-0xC3 */
+	0xA5, 0xC4, 0xA5, 0xC5, 0xA5, 0xC6, 0xA5, 0xC7, /* 0xC4-0xC7 */
+	0xA5, 0xC8, 0xA5, 0xC9, 0xA5, 0xCA, 0xA5, 0xCB, /* 0xC8-0xCB */
+	0xA5, 0xCC, 0xA5, 0xCD, 0xA5, 0xCE, 0xA5, 0xCF, /* 0xCC-0xCF */
+	0xA5, 0xD0, 0xA5, 0xD1, 0xA5, 0xD2, 0xA5, 0xD3, /* 0xD0-0xD3 */
+	0xA5, 0xD4, 0xA5, 0xD5, 0xA5, 0xD6, 0xA5, 0xD7, /* 0xD4-0xD7 */
+	0xA5, 0xD8, 0xA5, 0xD9, 0xA5, 0xDA, 0xA5, 0xDB, /* 0xD8-0xDB */
+	0xA5, 0xDC, 0xA5, 0xDD, 0xA5, 0xDE, 0xA5, 0xDF, /* 0xDC-0xDF */
+	0xA5, 0xE0, 0xA5, 0xE1, 0xA5, 0xE2, 0xA5, 0xE3, /* 0xE0-0xE3 */
+	0xA5, 0xE4, 0xA5, 0xE5, 0xA5, 0xE6, 0xA5, 0xE7, /* 0xE4-0xE7 */
+	0xA5, 0xE8, 0xA5, 0xE9, 0xA5, 0xEA, 0xA5, 0xEB, /* 0xE8-0xEB */
+	0xA5, 0xEC, 0xA5, 0xED, 0xA5, 0xEE, 0xA5, 0xEF, /* 0xEC-0xEF */
+	0xA5, 0xF0, 0xA5, 0xF1, 0xA5, 0xF2, 0xA5, 0xF3, /* 0xF0-0xF3 */
+	0xA5, 0xF4, 0xA5, 0xF5, 0xA5, 0xF6, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xA9, 0x60, 0xA9, 0x63, 0xA9, 0x64, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_31[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA8, 0xC5, 0xA8, 0xC6, 0xA8, 0xC7, /* 0x04-0x07 */
+	0xA8, 0xC8, 0xA8, 0xC9, 0xA8, 0xCA, 0xA8, 0xCB, /* 0x08-0x0B */
+	0xA8, 0xCC, 0xA8, 0xCD, 0xA8, 0xCE, 0xA8, 0xCF, /* 0x0C-0x0F */
+	0xA8, 0xD0, 0xA8, 0xD1, 0xA8, 0xD2, 0xA8, 0xD3, /* 0x10-0x13 */
+	0xA8, 0xD4, 0xA8, 0xD5, 0xA8, 0xD6, 0xA8, 0xD7, /* 0x14-0x17 */
+	0xA8, 0xD8, 0xA8, 0xD9, 0xA8, 0xDA, 0xA8, 0xDB, /* 0x18-0x1B */
+	0xA8, 0xDC, 0xA8, 0xDD, 0xA8, 0xDE, 0xA8, 0xDF, /* 0x1C-0x1F */
+	0xA8, 0xE0, 0xA8, 0xE1, 0xA8, 0xE2, 0xA8, 0xE3, /* 0x20-0x23 */
+	0xA8, 0xE4, 0xA8, 0xE5, 0xA8, 0xE6, 0xA8, 0xE7, /* 0x24-0x27 */
+	0xA8, 0xE8, 0xA8, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xBB, 0xB6, 0xFE, /* 0x90-0x93 */
+	0xC8, 0xFD, 0xCB, 0xC4, 0xC9, 0xCF, 0xD6, 0xD0, /* 0x94-0x97 */
+	0xCF, 0xC2, 0xBC, 0xD7, 0xD2, 0xD2, 0xB1, 0xFB, /* 0x98-0x9B */
+	0xB6, 0xA1, 0xCC, 0xEC, 0xB5, 0xD8, 0xC8, 0xCB, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_32[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA2, 0xE5, 0xA2, 0xE6, 0xA2, 0xE7, 0xA2, 0xE8, /* 0x20-0x23 */
+	0xA2, 0xE9, 0xA2, 0xEA, 0xA2, 0xEB, 0xA2, 0xEC, /* 0x24-0x27 */
+	0xA2, 0xED, 0xA2, 0xEE, 0xD4, 0xC2, 0xBB, 0xF0, /* 0x28-0x2B */
+	0xCB, 0xAE, 0xC4, 0xBE, 0xBD, 0xF0, 0xCD, 0xC1, /* 0x2C-0x2F */
+	0xC8, 0xD5, 0xA9, 0x5A, 0xD3, 0xD0, 0xC9, 0xE7, /* 0x30-0x33 */
+	0xC3, 0xFB, 0xCC, 0xD8, 0xB2, 0xC6, 0xD7, 0xA3, /* 0x34-0x37 */
+	0xC0, 0xCD, 0xB4, 0xFA, 0xBA, 0xF4, 0xD1, 0xA7, /* 0x38-0x3B */
+	0xBC, 0xE0, 0xC6, 0xF3, 0xD7, 0xCA, 0xD0, 0xAD, /* 0x3C-0x3F */
+	0xBC, 0xC0, 0xD0, 0xDD, 0xD7, 0xD4, 0xD6, 0xC1, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xD2, 0xBB, 0xB6, 0xFE, 0xC8, 0xFD, 0xCB, 0xC4, /* 0x80-0x83 */
+	0xCE, 0xE5, 0xC1, 0xF9, 0xC6, 0xDF, 0xB0, 0xCB, /* 0x84-0x87 */
+	0xBE, 0xC5, 0xCA, 0xAE, 0xD4, 0xC2, 0xBB, 0xF0, /* 0x88-0x8B */
+	0xCB, 0xAE, 0xC4, 0xBE, 0xBD, 0xF0, 0xCD, 0xC1, /* 0x8C-0x8F */
+	0xC8, 0xD5, 0xD6, 0xEA, 0xD3, 0xD0, 0xC9, 0xE7, /* 0x90-0x93 */
+	0xC3, 0xFB, 0xCC, 0xD8, 0xB2, 0xC6, 0xD7, 0xA3, /* 0x94-0x97 */
+	0xC0, 0xCD, 0xC3, 0xD8, 0xC4, 0xD0, 0xC5, 0xAE, /* 0x98-0x9B */
+	0xCA, 0xCA, 0xD3, 0xC5, 0x00, 0x00, 0xD7, 0xA2, /* 0x9C-0x9F */
+	0xCF, 0xEE, 0xD0, 0xDD, 0xD0, 0xB4, 0xA9, 0x49, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xD2, 0xBD, 0xD7, 0xDA, 0xD1, 0xA7, /* 0xA8-0xAB */
+	0xBC, 0xE0, 0xC6, 0xF3, 0xD7, 0xCA, 0xD0, 0xAD, /* 0xAC-0xAF */
+	0xD2, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+};
+
+static unsigned char u2c_33[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0x4A, 0xA9, 0x4B, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xA9, 0x4C, 0xA9, 0x4D, 0xA9, 0x4E, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xA9, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xA9, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0x51, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xA9, 0x52, 0xA9, 0x53, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xA9, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+};
+
+static unsigned char u2c_4E[512] = {
+	0xD2, 0xBB, 0xB6, 0xA1, 0x81, 0x40, 0xC6, 0xDF, /* 0x00-0x03 */
+	0x81, 0x41, 0x81, 0x42, 0x81, 0x43, 0xCD, 0xF2, /* 0x04-0x07 */
+	0xD5, 0xC9, 0xC8, 0xFD, 0xC9, 0xCF, 0xCF, 0xC2, /* 0x08-0x0B */
+	0xD8, 0xA2, 0xB2, 0xBB, 0xD3, 0xEB, 0x81, 0x44, /* 0x0C-0x0F */
+	0xD8, 0xA4, 0xB3, 0xF3, 0x81, 0x45, 0xD7, 0xA8, /* 0x10-0x13 */
+	0xC7, 0xD2, 0xD8, 0xA7, 0xCA, 0xC0, 0x81, 0x46, /* 0x14-0x17 */
+	0xC7, 0xF0, 0xB1, 0xFB, 0xD2, 0xB5, 0xB4, 0xD4, /* 0x18-0x1B */
+	0xB6, 0xAB, 0xCB, 0xBF, 0xD8, 0xA9, 0x81, 0x47, /* 0x1C-0x1F */
+	0x81, 0x48, 0x81, 0x49, 0xB6, 0xAA, 0x81, 0x4A, /* 0x20-0x23 */
+	0xC1, 0xBD, 0xD1, 0xCF, 0x81, 0x4B, 0xC9, 0xA5, /* 0x24-0x27 */
+	0xD8, 0xAD, 0x81, 0x4C, 0xB8, 0xF6, 0xD1, 0xBE, /* 0x28-0x2B */
+	0xE3, 0xDC, 0xD6, 0xD0, 0x81, 0x4D, 0x81, 0x4E, /* 0x2C-0x2F */
+	0xB7, 0xE1, 0x81, 0x4F, 0xB4, 0xAE, 0x81, 0x50, /* 0x30-0x33 */
+	0xC1, 0xD9, 0x81, 0x51, 0xD8, 0xBC, 0x81, 0x52, /* 0x34-0x37 */
+	0xCD, 0xE8, 0xB5, 0xA4, 0xCE, 0xAA, 0xD6, 0xF7, /* 0x38-0x3B */
+	0x81, 0x53, 0xC0, 0xF6, 0xBE, 0xD9, 0xD8, 0xAF, /* 0x3C-0x3F */
+	0x81, 0x54, 0x81, 0x55, 0x81, 0x56, 0xC4, 0xCB, /* 0x40-0x43 */
+	0x81, 0x57, 0xBE, 0xC3, 0x81, 0x58, 0xD8, 0xB1, /* 0x44-0x47 */
+	0xC3, 0xB4, 0xD2, 0xE5, 0x81, 0x59, 0xD6, 0xAE, /* 0x48-0x4B */
+	0xCE, 0xDA, 0xD5, 0xA7, 0xBA, 0xF5, 0xB7, 0xA6, /* 0x4C-0x4F */
+	0xC0, 0xD6, 0x81, 0x5A, 0xC6, 0xB9, 0xC5, 0xD2, /* 0x50-0x53 */
+	0xC7, 0xC7, 0x81, 0x5B, 0xB9, 0xD4, 0x81, 0x5C, /* 0x54-0x57 */
+	0xB3, 0xCB, 0xD2, 0xD2, 0x81, 0x5D, 0x81, 0x5E, /* 0x58-0x5B */
+	0xD8, 0xBF, 0xBE, 0xC5, 0xC6, 0xF2, 0xD2, 0xB2, /* 0x5C-0x5F */
+	0xCF, 0xB0, 0xCF, 0xE7, 0x81, 0x5F, 0x81, 0x60, /* 0x60-0x63 */
+	0x81, 0x61, 0x81, 0x62, 0xCA, 0xE9, 0x81, 0x63, /* 0x64-0x67 */
+	0x81, 0x64, 0xD8, 0xC0, 0x81, 0x65, 0x81, 0x66, /* 0x68-0x6B */
+	0x81, 0x67, 0x81, 0x68, 0x81, 0x69, 0x81, 0x6A, /* 0x6C-0x6F */
+	0xC2, 0xF2, 0xC2, 0xD2, 0x81, 0x6B, 0xC8, 0xE9, /* 0x70-0x73 */
+	0x81, 0x6C, 0x81, 0x6D, 0x81, 0x6E, 0x81, 0x6F, /* 0x74-0x77 */
+	0x81, 0x70, 0x81, 0x71, 0x81, 0x72, 0x81, 0x73, /* 0x78-0x7B */
+	0x81, 0x74, 0x81, 0x75, 0xC7, 0xAC, 0x81, 0x76, /* 0x7C-0x7F */
+	
+	0x81, 0x77, 0x81, 0x78, 0x81, 0x79, 0x81, 0x7A, /* 0x80-0x83 */
+	0x81, 0x7B, 0x81, 0x7C, 0xC1, 0xCB, 0x81, 0x7D, /* 0x84-0x87 */
+	0xD3, 0xE8, 0xD5, 0xF9, 0x81, 0x7E, 0xCA, 0xC2, /* 0x88-0x8B */
+	0xB6, 0xFE, 0xD8, 0xA1, 0xD3, 0xDA, 0xBF, 0xF7, /* 0x8C-0x8F */
+	0x81, 0x80, 0xD4, 0xC6, 0xBB, 0xA5, 0xD8, 0xC1, /* 0x90-0x93 */
+	0xCE, 0xE5, 0xBE, 0xAE, 0x81, 0x81, 0x81, 0x82, /* 0x94-0x97 */
+	0xD8, 0xA8, 0x81, 0x83, 0xD1, 0xC7, 0xD0, 0xA9, /* 0x98-0x9B */
+	0x81, 0x84, 0x81, 0x85, 0x81, 0x86, 0xD8, 0xBD, /* 0x9C-0x9F */
+	0xD9, 0xEF, 0xCD, 0xF6, 0xBF, 0xBA, 0x81, 0x87, /* 0xA0-0xA3 */
+	0xBD, 0xBB, 0xBA, 0xA5, 0xD2, 0xE0, 0xB2, 0xFA, /* 0xA4-0xA7 */
+	0xBA, 0xE0, 0xC4, 0xB6, 0x81, 0x88, 0xCF, 0xED, /* 0xA8-0xAB */
+	0xBE, 0xA9, 0xCD, 0xA4, 0xC1, 0xC1, 0x81, 0x89, /* 0xAC-0xAF */
+	0x81, 0x8A, 0x81, 0x8B, 0xC7, 0xD7, 0xD9, 0xF1, /* 0xB0-0xB3 */
+	0x81, 0x8C, 0xD9, 0xF4, 0x81, 0x8D, 0x81, 0x8E, /* 0xB4-0xB7 */
+	0x81, 0x8F, 0x81, 0x90, 0xC8, 0xCB, 0xD8, 0xE9, /* 0xB8-0xBB */
+	0x81, 0x91, 0x81, 0x92, 0x81, 0x93, 0xD2, 0xDA, /* 0xBC-0xBF */
+	0xCA, 0xB2, 0xC8, 0xCA, 0xD8, 0xEC, 0xD8, 0xEA, /* 0xC0-0xC3 */
+	0xD8, 0xC6, 0xBD, 0xF6, 0xC6, 0xCD, 0xB3, 0xF0, /* 0xC4-0xC7 */
+	0x81, 0x94, 0xD8, 0xEB, 0xBD, 0xF1, 0xBD, 0xE9, /* 0xC8-0xCB */
+	0x81, 0x95, 0xC8, 0xD4, 0xB4, 0xD3, 0x81, 0x96, /* 0xCC-0xCF */
+	0x81, 0x97, 0xC2, 0xD8, 0x81, 0x98, 0xB2, 0xD6, /* 0xD0-0xD3 */
+	0xD7, 0xD0, 0xCA, 0xCB, 0xCB, 0xFB, 0xD5, 0xCC, /* 0xD4-0xD7 */
+	0xB8, 0xB6, 0xCF, 0xC9, 0x81, 0x99, 0x81, 0x9A, /* 0xD8-0xDB */
+	0x81, 0x9B, 0xD9, 0xDA, 0xD8, 0xF0, 0xC7, 0xAA, /* 0xDC-0xDF */
+	0x81, 0x9C, 0xD8, 0xEE, 0x81, 0x9D, 0xB4, 0xFA, /* 0xE0-0xE3 */
+	0xC1, 0xEE, 0xD2, 0xD4, 0x81, 0x9E, 0x81, 0x9F, /* 0xE4-0xE7 */
+	0xD8, 0xED, 0x81, 0xA0, 0xD2, 0xC7, 0xD8, 0xEF, /* 0xE8-0xEB */
+	0xC3, 0xC7, 0x81, 0xA1, 0x81, 0xA2, 0x81, 0xA3, /* 0xEC-0xEF */
+	0xD1, 0xF6, 0x81, 0xA4, 0xD6, 0xD9, 0xD8, 0xF2, /* 0xF0-0xF3 */
+	0x81, 0xA5, 0xD8, 0xF5, 0xBC, 0xFE, 0xBC, 0xDB, /* 0xF4-0xF7 */
+	0x81, 0xA6, 0x81, 0xA7, 0x81, 0xA8, 0xC8, 0xCE, /* 0xF8-0xFB */
+	0x81, 0xA9, 0xB7, 0xDD, 0x81, 0xAA, 0xB7, 0xC2, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_4F[512] = {
+	0x81, 0xAB, 0xC6, 0xF3, 0x81, 0xAC, 0x81, 0xAD, /* 0x00-0x03 */
+	0x81, 0xAE, 0x81, 0xAF, 0x81, 0xB0, 0x81, 0xB1, /* 0x04-0x07 */
+	0x81, 0xB2, 0xD8, 0xF8, 0xD2, 0xC1, 0x81, 0xB3, /* 0x08-0x0B */
+	0x81, 0xB4, 0xCE, 0xE9, 0xBC, 0xBF, 0xB7, 0xFC, /* 0x0C-0x0F */
+	0xB7, 0xA5, 0xD0, 0xDD, 0x81, 0xB5, 0x81, 0xB6, /* 0x10-0x13 */
+	0x81, 0xB7, 0x81, 0xB8, 0x81, 0xB9, 0xD6, 0xDA, /* 0x14-0x17 */
+	0xD3, 0xC5, 0xBB, 0xEF, 0xBB, 0xE1, 0xD8, 0xF1, /* 0x18-0x1B */
+	0x81, 0xBA, 0x81, 0xBB, 0xC9, 0xA1, 0xCE, 0xB0, /* 0x1C-0x1F */
+	0xB4, 0xAB, 0x81, 0xBC, 0xD8, 0xF3, 0x81, 0xBD, /* 0x20-0x23 */
+	0xC9, 0xCB, 0xD8, 0xF6, 0xC2, 0xD7, 0xD8, 0xF7, /* 0x24-0x27 */
+	0x81, 0xBE, 0x81, 0xBF, 0xCE, 0xB1, 0xD8, 0xF9, /* 0x28-0x2B */
+	0x81, 0xC0, 0x81, 0xC1, 0x81, 0xC2, 0xB2, 0xAE, /* 0x2C-0x2F */
+	0xB9, 0xC0, 0x81, 0xC3, 0xD9, 0xA3, 0x81, 0xC4, /* 0x30-0x33 */
+	0xB0, 0xE9, 0x81, 0xC5, 0xC1, 0xE6, 0x81, 0xC6, /* 0x34-0x37 */
+	0xC9, 0xEC, 0x81, 0xC7, 0xCB, 0xC5, 0x81, 0xC8, /* 0x38-0x3B */
+	0xCB, 0xC6, 0xD9, 0xA4, 0x81, 0xC9, 0x81, 0xCA, /* 0x3C-0x3F */
+	0x81, 0xCB, 0x81, 0xCC, 0x81, 0xCD, 0xB5, 0xE8, /* 0x40-0x43 */
+	0x81, 0xCE, 0x81, 0xCF, 0xB5, 0xAB, 0x81, 0xD0, /* 0x44-0x47 */
+	0x81, 0xD1, 0x81, 0xD2, 0x81, 0xD3, 0x81, 0xD4, /* 0x48-0x4B */
+	0x81, 0xD5, 0xCE, 0xBB, 0xB5, 0xCD, 0xD7, 0xA1, /* 0x4C-0x4F */
+	0xD7, 0xF4, 0xD3, 0xD3, 0x81, 0xD6, 0xCC, 0xE5, /* 0x50-0x53 */
+	0x81, 0xD7, 0xBA, 0xCE, 0x81, 0xD8, 0xD9, 0xA2, /* 0x54-0x57 */
+	0xD9, 0xDC, 0xD3, 0xE0, 0xD8, 0xFD, 0xB7, 0xF0, /* 0x58-0x5B */
+	0xD7, 0xF7, 0xD8, 0xFE, 0xD8, 0xFA, 0xD9, 0xA1, /* 0x5C-0x5F */
+	0xC4, 0xE3, 0x81, 0xD9, 0x81, 0xDA, 0xD3, 0xB6, /* 0x60-0x63 */
+	0xD8, 0xF4, 0xD9, 0xDD, 0x81, 0xDB, 0xD8, 0xFB, /* 0x64-0x67 */
+	0x81, 0xDC, 0xC5, 0xE5, 0x81, 0xDD, 0x81, 0xDE, /* 0x68-0x6B */
+	0xC0, 0xD0, 0x81, 0xDF, 0x81, 0xE0, 0xD1, 0xF0, /* 0x6C-0x6F */
+	0xB0, 0xDB, 0x81, 0xE1, 0x81, 0xE2, 0xBC, 0xD1, /* 0x70-0x73 */
+	0xD9, 0xA6, 0x81, 0xE3, 0xD9, 0xA5, 0x81, 0xE4, /* 0x74-0x77 */
+	0x81, 0xE5, 0x81, 0xE6, 0x81, 0xE7, 0xD9, 0xAC, /* 0x78-0x7B */
+	0xD9, 0xAE, 0x81, 0xE8, 0xD9, 0xAB, 0xCA, 0xB9, /* 0x7C-0x7F */
+	
+	0x81, 0xE9, 0x81, 0xEA, 0x81, 0xEB, 0xD9, 0xA9, /* 0x80-0x83 */
+	0xD6, 0xB6, 0x81, 0xEC, 0x81, 0xED, 0x81, 0xEE, /* 0x84-0x87 */
+	0xB3, 0xDE, 0xD9, 0xA8, 0x81, 0xEF, 0xC0, 0xFD, /* 0x88-0x8B */
+	0x81, 0xF0, 0xCA, 0xCC, 0x81, 0xF1, 0xD9, 0xAA, /* 0x8C-0x8F */
+	0x81, 0xF2, 0xD9, 0xA7, 0x81, 0xF3, 0x81, 0xF4, /* 0x90-0x93 */
+	0xD9, 0xB0, 0x81, 0xF5, 0x81, 0xF6, 0xB6, 0xB1, /* 0x94-0x97 */
+	0x81, 0xF7, 0x81, 0xF8, 0x81, 0xF9, 0xB9, 0xA9, /* 0x98-0x9B */
+	0x81, 0xFA, 0xD2, 0xC0, 0x81, 0xFB, 0x81, 0xFC, /* 0x9C-0x9F */
+	0xCF, 0xC0, 0x81, 0xFD, 0x81, 0xFE, 0xC2, 0xC2, /* 0xA0-0xA3 */
+	0x82, 0x40, 0xBD, 0xC4, 0xD5, 0xEC, 0xB2, 0xE0, /* 0xA4-0xA7 */
+	0xC7, 0xC8, 0xBF, 0xEB, 0xD9, 0xAD, 0x82, 0x41, /* 0xA8-0xAB */
+	0xD9, 0xAF, 0x82, 0x42, 0xCE, 0xEA, 0xBA, 0xEE, /* 0xAC-0xAF */
+	0x82, 0x43, 0x82, 0x44, 0x82, 0x45, 0x82, 0x46, /* 0xB0-0xB3 */
+	0x82, 0x47, 0xC7, 0xD6, 0x82, 0x48, 0x82, 0x49, /* 0xB4-0xB7 */
+	0x82, 0x4A, 0x82, 0x4B, 0x82, 0x4C, 0x82, 0x4D, /* 0xB8-0xBB */
+	0x82, 0x4E, 0x82, 0x4F, 0x82, 0x50, 0xB1, 0xE3, /* 0xBC-0xBF */
+	0x82, 0x51, 0x82, 0x52, 0x82, 0x53, 0xB4, 0xD9, /* 0xC0-0xC3 */
+	0xB6, 0xED, 0xD9, 0xB4, 0x82, 0x54, 0x82, 0x55, /* 0xC4-0xC7 */
+	0x82, 0x56, 0x82, 0x57, 0xBF, 0xA1, 0x82, 0x58, /* 0xC8-0xCB */
+	0x82, 0x59, 0x82, 0x5A, 0xD9, 0xDE, 0xC7, 0xCE, /* 0xCC-0xCF */
+	0xC0, 0xFE, 0xD9, 0xB8, 0x82, 0x5B, 0x82, 0x5C, /* 0xD0-0xD3 */
+	0x82, 0x5D, 0x82, 0x5E, 0x82, 0x5F, 0xCB, 0xD7, /* 0xD4-0xD7 */
+	0xB7, 0xFD, 0x82, 0x60, 0xD9, 0xB5, 0x82, 0x61, /* 0xD8-0xDB */
+	0xD9, 0xB7, 0xB1, 0xA3, 0xD3, 0xE1, 0xD9, 0xB9, /* 0xDC-0xDF */
+	0x82, 0x62, 0xD0, 0xC5, 0x82, 0x63, 0xD9, 0xB6, /* 0xE0-0xE3 */
+	0x82, 0x64, 0x82, 0x65, 0xD9, 0xB1, 0x82, 0x66, /* 0xE4-0xE7 */
+	0xD9, 0xB2, 0xC1, 0xA9, 0xD9, 0xB3, 0x82, 0x67, /* 0xE8-0xEB */
+	0x82, 0x68, 0xBC, 0xF3, 0xD0, 0xDE, 0xB8, 0xA9, /* 0xEC-0xEF */
+	0x82, 0x69, 0xBE, 0xE3, 0x82, 0x6A, 0xD9, 0xBD, /* 0xF0-0xF3 */
+	0x82, 0x6B, 0x82, 0x6C, 0x82, 0x6D, 0x82, 0x6E, /* 0xF4-0xF7 */
+	0xD9, 0xBA, 0x82, 0x6F, 0xB0, 0xB3, 0x82, 0x70, /* 0xF8-0xFB */
+	0x82, 0x71, 0x82, 0x72, 0xD9, 0xC2, 0x82, 0x73, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_50[512] = {
+	0x82, 0x74, 0x82, 0x75, 0x82, 0x76, 0x82, 0x77, /* 0x00-0x03 */
+	0x82, 0x78, 0x82, 0x79, 0x82, 0x7A, 0x82, 0x7B, /* 0x04-0x07 */
+	0x82, 0x7C, 0x82, 0x7D, 0x82, 0x7E, 0x82, 0x80, /* 0x08-0x0B */
+	0xD9, 0xC4, 0xB1, 0xB6, 0x82, 0x81, 0xD9, 0xBF, /* 0x0C-0x0F */
+	0x82, 0x82, 0x82, 0x83, 0xB5, 0xB9, 0x82, 0x84, /* 0x10-0x13 */
+	0xBE, 0xF3, 0x82, 0x85, 0x82, 0x86, 0x82, 0x87, /* 0x14-0x17 */
+	0xCC, 0xC8, 0xBA, 0xF2, 0xD2, 0xD0, 0x82, 0x88, /* 0x18-0x1B */
+	0xD9, 0xC3, 0x82, 0x89, 0x82, 0x8A, 0xBD, 0xE8, /* 0x1C-0x1F */
+	0x82, 0x8B, 0xB3, 0xAB, 0x82, 0x8C, 0x82, 0x8D, /* 0x20-0x23 */
+	0x82, 0x8E, 0xD9, 0xC5, 0xBE, 0xEB, 0x82, 0x8F, /* 0x24-0x27 */
+	0xD9, 0xC6, 0xD9, 0xBB, 0xC4, 0xDF, 0x82, 0x90, /* 0x28-0x2B */
+	0xD9, 0xBE, 0xD9, 0xC1, 0xD9, 0xC0, 0x82, 0x91, /* 0x2C-0x2F */
+	0x82, 0x92, 0x82, 0x93, 0x82, 0x94, 0x82, 0x95, /* 0x30-0x33 */
+	0x82, 0x96, 0x82, 0x97, 0x82, 0x98, 0x82, 0x99, /* 0x34-0x37 */
+	0x82, 0x9A, 0x82, 0x9B, 0xD5, 0xAE, 0x82, 0x9C, /* 0x38-0x3B */
+	0xD6, 0xB5, 0x82, 0x9D, 0xC7, 0xE3, 0x82, 0x9E, /* 0x3C-0x3F */
+	0x82, 0x9F, 0x82, 0xA0, 0x82, 0xA1, 0xD9, 0xC8, /* 0x40-0x43 */
+	0x82, 0xA2, 0x82, 0xA3, 0x82, 0xA4, 0xBC, 0xD9, /* 0x44-0x47 */
+	0xD9, 0xCA, 0x82, 0xA5, 0x82, 0xA6, 0x82, 0xA7, /* 0x48-0x4B */
+	0xD9, 0xBC, 0x82, 0xA8, 0xD9, 0xCB, 0xC6, 0xAB, /* 0x4C-0x4F */
+	0x82, 0xA9, 0x82, 0xAA, 0x82, 0xAB, 0x82, 0xAC, /* 0x50-0x53 */
+	0x82, 0xAD, 0xD9, 0xC9, 0x82, 0xAE, 0x82, 0xAF, /* 0x54-0x57 */
+	0x82, 0xB0, 0x82, 0xB1, 0xD7, 0xF6, 0x82, 0xB2, /* 0x58-0x5B */
+	0xCD, 0xA3, 0x82, 0xB3, 0x82, 0xB4, 0x82, 0xB5, /* 0x5C-0x5F */
+	0x82, 0xB6, 0x82, 0xB7, 0x82, 0xB8, 0x82, 0xB9, /* 0x60-0x63 */
+	0x82, 0xBA, 0xBD, 0xA1, 0x82, 0xBB, 0x82, 0xBC, /* 0x64-0x67 */
+	0x82, 0xBD, 0x82, 0xBE, 0x82, 0xBF, 0x82, 0xC0, /* 0x68-0x6B */
+	0xD9, 0xCC, 0x82, 0xC1, 0x82, 0xC2, 0x82, 0xC3, /* 0x6C-0x6F */
+	0x82, 0xC4, 0x82, 0xC5, 0x82, 0xC6, 0x82, 0xC7, /* 0x70-0x73 */
+	0x82, 0xC8, 0x82, 0xC9, 0xC5, 0xBC, 0xCD, 0xB5, /* 0x74-0x77 */
+	0x82, 0xCA, 0x82, 0xCB, 0x82, 0xCC, 0xD9, 0xCD, /* 0x78-0x7B */
+	0x82, 0xCD, 0x82, 0xCE, 0xD9, 0xC7, 0xB3, 0xA5, /* 0x7C-0x7F */
+	
+	0xBF, 0xFE, 0x82, 0xCF, 0x82, 0xD0, 0x82, 0xD1, /* 0x80-0x83 */
+	0x82, 0xD2, 0xB8, 0xB5, 0x82, 0xD3, 0x82, 0xD4, /* 0x84-0x87 */
+	0xC0, 0xFC, 0x82, 0xD5, 0x82, 0xD6, 0x82, 0xD7, /* 0x88-0x8B */
+	0x82, 0xD8, 0xB0, 0xF8, 0x82, 0xD9, 0x82, 0xDA, /* 0x8C-0x8F */
+	0x82, 0xDB, 0x82, 0xDC, 0x82, 0xDD, 0x82, 0xDE, /* 0x90-0x93 */
+	0x82, 0xDF, 0x82, 0xE0, 0x82, 0xE1, 0x82, 0xE2, /* 0x94-0x97 */
+	0x82, 0xE3, 0x82, 0xE4, 0x82, 0xE5, 0x82, 0xE6, /* 0x98-0x9B */
+	0x82, 0xE7, 0x82, 0xE8, 0x82, 0xE9, 0x82, 0xEA, /* 0x9C-0x9F */
+	0x82, 0xEB, 0x82, 0xEC, 0x82, 0xED, 0xB4, 0xF6, /* 0xA0-0xA3 */
+	0x82, 0xEE, 0xD9, 0xCE, 0x82, 0xEF, 0xD9, 0xCF, /* 0xA4-0xA7 */
+	0xB4, 0xA2, 0xD9, 0xD0, 0x82, 0xF0, 0x82, 0xF1, /* 0xA8-0xAB */
+	0xB4, 0xDF, 0x82, 0xF2, 0x82, 0xF3, 0x82, 0xF4, /* 0xAC-0xAF */
+	0x82, 0xF5, 0x82, 0xF6, 0xB0, 0xC1, 0x82, 0xF7, /* 0xB0-0xB3 */
+	0x82, 0xF8, 0x82, 0xF9, 0x82, 0xFA, 0x82, 0xFB, /* 0xB4-0xB7 */
+	0x82, 0xFC, 0x82, 0xFD, 0xD9, 0xD1, 0xC9, 0xB5, /* 0xB8-0xBB */
+	0x82, 0xFE, 0x83, 0x40, 0x83, 0x41, 0x83, 0x42, /* 0xBC-0xBF */
+	0x83, 0x43, 0x83, 0x44, 0x83, 0x45, 0x83, 0x46, /* 0xC0-0xC3 */
+	0x83, 0x47, 0x83, 0x48, 0x83, 0x49, 0x83, 0x4A, /* 0xC4-0xC7 */
+	0x83, 0x4B, 0x83, 0x4C, 0x83, 0x4D, 0x83, 0x4E, /* 0xC8-0xCB */
+	0x83, 0x4F, 0x83, 0x50, 0x83, 0x51, 0xCF, 0xF1, /* 0xCC-0xCF */
+	0x83, 0x52, 0x83, 0x53, 0x83, 0x54, 0x83, 0x55, /* 0xD0-0xD3 */
+	0x83, 0x56, 0x83, 0x57, 0xD9, 0xD2, 0x83, 0x58, /* 0xD4-0xD7 */
+	0x83, 0x59, 0x83, 0x5A, 0xC1, 0xC5, 0x83, 0x5B, /* 0xD8-0xDB */
+	0x83, 0x5C, 0x83, 0x5D, 0x83, 0x5E, 0x83, 0x5F, /* 0xDC-0xDF */
+	0x83, 0x60, 0x83, 0x61, 0x83, 0x62, 0x83, 0x63, /* 0xE0-0xE3 */
+	0x83, 0x64, 0x83, 0x65, 0xD9, 0xD6, 0xC9, 0xAE, /* 0xE4-0xE7 */
+	0x83, 0x66, 0x83, 0x67, 0x83, 0x68, 0x83, 0x69, /* 0xE8-0xEB */
+	0xD9, 0xD5, 0xD9, 0xD4, 0xD9, 0xD7, 0x83, 0x6A, /* 0xEC-0xEF */
+	0x83, 0x6B, 0x83, 0x6C, 0x83, 0x6D, 0xCB, 0xDB, /* 0xF0-0xF3 */
+	0x83, 0x6E, 0xBD, 0xA9, 0x83, 0x6F, 0x83, 0x70, /* 0xF4-0xF7 */
+	0x83, 0x71, 0x83, 0x72, 0x83, 0x73, 0xC6, 0xA7, /* 0xF8-0xFB */
+	0x83, 0x74, 0x83, 0x75, 0x83, 0x76, 0x83, 0x77, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_51[512] = {
+	0x83, 0x78, 0x83, 0x79, 0x83, 0x7A, 0x83, 0x7B, /* 0x00-0x03 */
+	0x83, 0x7C, 0x83, 0x7D, 0xD9, 0xD3, 0xD9, 0xD8, /* 0x04-0x07 */
+	0x83, 0x7E, 0x83, 0x80, 0x83, 0x81, 0xD9, 0xD9, /* 0x08-0x0B */
+	0x83, 0x82, 0x83, 0x83, 0x83, 0x84, 0x83, 0x85, /* 0x0C-0x0F */
+	0x83, 0x86, 0x83, 0x87, 0xC8, 0xE5, 0x83, 0x88, /* 0x10-0x13 */
+	0x83, 0x89, 0x83, 0x8A, 0x83, 0x8B, 0x83, 0x8C, /* 0x14-0x17 */
+	0x83, 0x8D, 0x83, 0x8E, 0x83, 0x8F, 0x83, 0x90, /* 0x18-0x1B */
+	0x83, 0x91, 0x83, 0x92, 0x83, 0x93, 0x83, 0x94, /* 0x1C-0x1F */
+	0x83, 0x95, 0xC0, 0xDC, 0x83, 0x96, 0x83, 0x97, /* 0x20-0x23 */
+	0x83, 0x98, 0x83, 0x99, 0x83, 0x9A, 0x83, 0x9B, /* 0x24-0x27 */
+	0x83, 0x9C, 0x83, 0x9D, 0x83, 0x9E, 0x83, 0x9F, /* 0x28-0x2B */
+	0x83, 0xA0, 0x83, 0xA1, 0x83, 0xA2, 0x83, 0xA3, /* 0x2C-0x2F */
+	0x83, 0xA4, 0x83, 0xA5, 0x83, 0xA6, 0x83, 0xA7, /* 0x30-0x33 */
+	0x83, 0xA8, 0x83, 0xA9, 0x83, 0xAA, 0x83, 0xAB, /* 0x34-0x37 */
+	0x83, 0xAC, 0x83, 0xAD, 0x83, 0xAE, 0x83, 0xAF, /* 0x38-0x3B */
+	0x83, 0xB0, 0x83, 0xB1, 0x83, 0xB2, 0xB6, 0xF9, /* 0x3C-0x3F */
+	0xD8, 0xA3, 0xD4, 0xCA, 0x83, 0xB3, 0xD4, 0xAA, /* 0x40-0x43 */
+	0xD0, 0xD6, 0xB3, 0xE4, 0xD5, 0xD7, 0x83, 0xB4, /* 0x44-0x47 */
+	0xCF, 0xC8, 0xB9, 0xE2, 0x83, 0xB5, 0xBF, 0xCB, /* 0x48-0x4B */
+	0x83, 0xB6, 0xC3, 0xE2, 0x83, 0xB7, 0x83, 0xB8, /* 0x4C-0x4F */
+	0x83, 0xB9, 0xB6, 0xD2, 0x83, 0xBA, 0x83, 0xBB, /* 0x50-0x53 */
+	0xCD, 0xC3, 0xD9, 0xEE, 0xD9, 0xF0, 0x83, 0xBC, /* 0x54-0x57 */
+	0x83, 0xBD, 0x83, 0xBE, 0xB5, 0xB3, 0x83, 0xBF, /* 0x58-0x5B */
+	0xB6, 0xB5, 0x83, 0xC0, 0x83, 0xC1, 0x83, 0xC2, /* 0x5C-0x5F */
+	0x83, 0xC3, 0x83, 0xC4, 0xBE, 0xA4, 0x83, 0xC5, /* 0x60-0x63 */
+	0x83, 0xC6, 0xC8, 0xEB, 0x83, 0xC7, 0x83, 0xC8, /* 0x64-0x67 */
+	0xC8, 0xAB, 0x83, 0xC9, 0x83, 0xCA, 0xB0, 0xCB, /* 0x68-0x6B */
+	0xB9, 0xAB, 0xC1, 0xF9, 0xD9, 0xE2, 0x83, 0xCB, /* 0x6C-0x6F */
+	0xC0, 0xBC, 0xB9, 0xB2, 0x83, 0xCC, 0xB9, 0xD8, /* 0x70-0x73 */
+	0xD0, 0xCB, 0xB1, 0xF8, 0xC6, 0xE4, 0xBE, 0xDF, /* 0x74-0x77 */
+	0xB5, 0xE4, 0xD7, 0xC8, 0x83, 0xCD, 0xD1, 0xF8, /* 0x78-0x7B */
+	0xBC, 0xE6, 0xCA, 0xDE, 0x83, 0xCE, 0x83, 0xCF, /* 0x7C-0x7F */
+	
+	0xBC, 0xBD, 0xD9, 0xE6, 0xD8, 0xE7, 0x83, 0xD0, /* 0x80-0x83 */
+	0x83, 0xD1, 0xC4, 0xDA, 0x83, 0xD2, 0x83, 0xD3, /* 0x84-0x87 */
+	0xB8, 0xD4, 0xC8, 0xBD, 0x83, 0xD4, 0x83, 0xD5, /* 0x88-0x8B */
+	0xB2, 0xE1, 0xD4, 0xD9, 0x83, 0xD6, 0x83, 0xD7, /* 0x8C-0x8F */
+	0x83, 0xD8, 0x83, 0xD9, 0xC3, 0xB0, 0x83, 0xDA, /* 0x90-0x93 */
+	0x83, 0xDB, 0xC3, 0xE1, 0xDA, 0xA2, 0xC8, 0xDF, /* 0x94-0x97 */
+	0x83, 0xDC, 0xD0, 0xB4, 0x83, 0xDD, 0xBE, 0xFC, /* 0x98-0x9B */
+	0xC5, 0xA9, 0x83, 0xDE, 0x83, 0xDF, 0x83, 0xE0, /* 0x9C-0x9F */
+	0xB9, 0xDA, 0x83, 0xE1, 0xDA, 0xA3, 0x83, 0xE2, /* 0xA0-0xA3 */
+	0xD4, 0xA9, 0xDA, 0xA4, 0x83, 0xE3, 0x83, 0xE4, /* 0xA4-0xA7 */
+	0x83, 0xE5, 0x83, 0xE6, 0x83, 0xE7, 0xD9, 0xFB, /* 0xA8-0xAB */
+	0xB6, 0xAC, 0x83, 0xE8, 0x83, 0xE9, 0xB7, 0xEB, /* 0xAC-0xAF */
+	0xB1, 0xF9, 0xD9, 0xFC, 0xB3, 0xE5, 0xBE, 0xF6, /* 0xB0-0xB3 */
+	0x83, 0xEA, 0xBF, 0xF6, 0xD2, 0xB1, 0xC0, 0xE4, /* 0xB4-0xB7 */
+	0x83, 0xEB, 0x83, 0xEC, 0x83, 0xED, 0xB6, 0xB3, /* 0xB8-0xBB */
+	0xD9, 0xFE, 0xD9, 0xFD, 0x83, 0xEE, 0x83, 0xEF, /* 0xBC-0xBF */
+	0xBE, 0xBB, 0x83, 0xF0, 0x83, 0xF1, 0x83, 0xF2, /* 0xC0-0xC3 */
+	0xC6, 0xE0, 0x83, 0xF3, 0xD7, 0xBC, 0xDA, 0xA1, /* 0xC4-0xC7 */
+	0x83, 0xF4, 0xC1, 0xB9, 0x83, 0xF5, 0xB5, 0xF2, /* 0xC8-0xCB */
+	0xC1, 0xE8, 0x83, 0xF6, 0x83, 0xF7, 0xBC, 0xF5, /* 0xCC-0xCF */
+	0x83, 0xF8, 0xB4, 0xD5, 0x83, 0xF9, 0x83, 0xFA, /* 0xD0-0xD3 */
+	0x83, 0xFB, 0x83, 0xFC, 0x83, 0xFD, 0x83, 0xFE, /* 0xD4-0xD7 */
+	0x84, 0x40, 0x84, 0x41, 0x84, 0x42, 0xC1, 0xDD, /* 0xD8-0xDB */
+	0x84, 0x43, 0xC4, 0xFD, 0x84, 0x44, 0x84, 0x45, /* 0xDC-0xDF */
+	0xBC, 0xB8, 0xB7, 0xB2, 0x84, 0x46, 0x84, 0x47, /* 0xE0-0xE3 */
+	0xB7, 0xEF, 0x84, 0x48, 0x84, 0x49, 0x84, 0x4A, /* 0xE4-0xE7 */
+	0x84, 0x4B, 0x84, 0x4C, 0x84, 0x4D, 0xD9, 0xEC, /* 0xE8-0xEB */
+	0x84, 0x4E, 0xC6, 0xBE, 0x84, 0x4F, 0xBF, 0xAD, /* 0xEC-0xEF */
+	0xBB, 0xCB, 0x84, 0x50, 0x84, 0x51, 0xB5, 0xCA, /* 0xF0-0xF3 */
+	0x84, 0x52, 0xDB, 0xC9, 0xD0, 0xD7, 0x84, 0x53, /* 0xF4-0xF7 */
+	0xCD, 0xB9, 0xB0, 0xBC, 0xB3, 0xF6, 0xBB, 0xF7, /* 0xF8-0xFB */
+	0xDB, 0xCA, 0xBA, 0xAF, 0x84, 0x54, 0xD4, 0xE4, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_52[512] = {
+	0xB5, 0xB6, 0xB5, 0xF3, 0xD8, 0xD6, 0xC8, 0xD0, /* 0x00-0x03 */
+	0x84, 0x55, 0x84, 0x56, 0xB7, 0xD6, 0xC7, 0xD0, /* 0x04-0x07 */
+	0xD8, 0xD7, 0x84, 0x57, 0xBF, 0xAF, 0x84, 0x58, /* 0x08-0x0B */
+	0x84, 0x59, 0xDB, 0xBB, 0xD8, 0xD8, 0x84, 0x5A, /* 0x0C-0x0F */
+	0x84, 0x5B, 0xD0, 0xCC, 0xBB, 0xAE, 0x84, 0x5C, /* 0x10-0x13 */
+	0x84, 0x5D, 0x84, 0x5E, 0xEB, 0xBE, 0xC1, 0xD0, /* 0x14-0x17 */
+	0xC1, 0xF5, 0xD4, 0xF2, 0xB8, 0xD5, 0xB4, 0xB4, /* 0x18-0x1B */
+	0x84, 0x5F, 0xB3, 0xF5, 0x84, 0x60, 0x84, 0x61, /* 0x1C-0x1F */
+	0xC9, 0xBE, 0x84, 0x62, 0x84, 0x63, 0x84, 0x64, /* 0x20-0x23 */
+	0xC5, 0xD0, 0x84, 0x65, 0x84, 0x66, 0x84, 0x67, /* 0x24-0x27 */
+	0xC5, 0xD9, 0xC0, 0xFB, 0x84, 0x68, 0xB1, 0xF0, /* 0x28-0x2B */
+	0x84, 0x69, 0xD8, 0xD9, 0xB9, 0xCE, 0x84, 0x6A, /* 0x2C-0x2F */
+	0xB5, 0xBD, 0x84, 0x6B, 0x84, 0x6C, 0xD8, 0xDA, /* 0x30-0x33 */
+	0x84, 0x6D, 0x84, 0x6E, 0xD6, 0xC6, 0xCB, 0xA2, /* 0x34-0x37 */
+	0xC8, 0xAF, 0xC9, 0xB2, 0xB4, 0xCC, 0xBF, 0xCC, /* 0x38-0x3B */
+	0x84, 0x6F, 0xB9, 0xF4, 0x84, 0x70, 0xD8, 0xDB, /* 0x3C-0x3F */
+	0xD8, 0xDC, 0xB6, 0xE7, 0xBC, 0xC1, 0xCC, 0xEA, /* 0x40-0x43 */
+	0x84, 0x71, 0x84, 0x72, 0x84, 0x73, 0x84, 0x74, /* 0x44-0x47 */
+	0x84, 0x75, 0x84, 0x76, 0xCF, 0xF7, 0x84, 0x77, /* 0x48-0x4B */
+	0xD8, 0xDD, 0xC7, 0xB0, 0x84, 0x78, 0x84, 0x79, /* 0x4C-0x4F */
+	0xB9, 0xD0, 0xBD, 0xA3, 0x84, 0x7A, 0x84, 0x7B, /* 0x50-0x53 */
+	0xCC, 0xDE, 0x84, 0x7C, 0xC6, 0xCA, 0x84, 0x7D, /* 0x54-0x57 */
+	0x84, 0x7E, 0x84, 0x80, 0x84, 0x81, 0x84, 0x82, /* 0x58-0x5B */
+	0xD8, 0xE0, 0x84, 0x83, 0xD8, 0xDE, 0x84, 0x84, /* 0x5C-0x5F */
+	0x84, 0x85, 0xD8, 0xDF, 0x84, 0x86, 0x84, 0x87, /* 0x60-0x63 */
+	0x84, 0x88, 0xB0, 0xFE, 0x84, 0x89, 0xBE, 0xE7, /* 0x64-0x67 */
+	0x84, 0x8A, 0xCA, 0xA3, 0xBC, 0xF4, 0x84, 0x8B, /* 0x68-0x6B */
+	0x84, 0x8C, 0x84, 0x8D, 0x84, 0x8E, 0xB8, 0xB1, /* 0x6C-0x6F */
+	0x84, 0x8F, 0x84, 0x90, 0xB8, 0xEE, 0x84, 0x91, /* 0x70-0x73 */
+	0x84, 0x92, 0x84, 0x93, 0x84, 0x94, 0x84, 0x95, /* 0x74-0x77 */
+	0x84, 0x96, 0x84, 0x97, 0x84, 0x98, 0x84, 0x99, /* 0x78-0x7B */
+	0x84, 0x9A, 0xD8, 0xE2, 0x84, 0x9B, 0xBD, 0xCB, /* 0x7C-0x7F */
+	
+	0x84, 0x9C, 0xD8, 0xE4, 0xD8, 0xE3, 0x84, 0x9D, /* 0x80-0x83 */
+	0x84, 0x9E, 0x84, 0x9F, 0x84, 0xA0, 0x84, 0xA1, /* 0x84-0x87 */
+	0xC5, 0xFC, 0x84, 0xA2, 0x84, 0xA3, 0x84, 0xA4, /* 0x88-0x8B */
+	0x84, 0xA5, 0x84, 0xA6, 0x84, 0xA7, 0x84, 0xA8, /* 0x8C-0x8F */
+	0xD8, 0xE5, 0x84, 0xA9, 0x84, 0xAA, 0xD8, 0xE6, /* 0x90-0x93 */
+	0x84, 0xAB, 0x84, 0xAC, 0x84, 0xAD, 0x84, 0xAE, /* 0x94-0x97 */
+	0x84, 0xAF, 0x84, 0xB0, 0x84, 0xB1, 0xC1, 0xA6, /* 0x98-0x9B */
+	0x84, 0xB2, 0xC8, 0xB0, 0xB0, 0xEC, 0xB9, 0xA6, /* 0x9C-0x9F */
+	0xBC, 0xD3, 0xCE, 0xF1, 0xDB, 0xBD, 0xC1, 0xD3, /* 0xA0-0xA3 */
+	0x84, 0xB3, 0x84, 0xB4, 0x84, 0xB5, 0x84, 0xB6, /* 0xA4-0xA7 */
+	0xB6, 0xAF, 0xD6, 0xFA, 0xC5, 0xAC, 0xBD, 0xD9, /* 0xA8-0xAB */
+	0xDB, 0xBE, 0xDB, 0xBF, 0x84, 0xB7, 0x84, 0xB8, /* 0xAC-0xAF */
+	0x84, 0xB9, 0xC0, 0xF8, 0xBE, 0xA2, 0xC0, 0xCD, /* 0xB0-0xB3 */
+	0x84, 0xBA, 0x84, 0xBB, 0x84, 0xBC, 0x84, 0xBD, /* 0xB4-0xB7 */
+	0x84, 0xBE, 0x84, 0xBF, 0x84, 0xC0, 0x84, 0xC1, /* 0xB8-0xBB */
+	0x84, 0xC2, 0x84, 0xC3, 0xDB, 0xC0, 0xCA, 0xC6, /* 0xBC-0xBF */
+	0x84, 0xC4, 0x84, 0xC5, 0x84, 0xC6, 0xB2, 0xAA, /* 0xC0-0xC3 */
+	0x84, 0xC7, 0x84, 0xC8, 0x84, 0xC9, 0xD3, 0xC2, /* 0xC4-0xC7 */
+	0x84, 0xCA, 0xC3, 0xE3, 0x84, 0xCB, 0xD1, 0xAB, /* 0xC8-0xCB */
+	0x84, 0xCC, 0x84, 0xCD, 0x84, 0xCE, 0x84, 0xCF, /* 0xCC-0xCF */
+	0xDB, 0xC2, 0x84, 0xD0, 0xC0, 0xD5, 0x84, 0xD1, /* 0xD0-0xD3 */
+	0x84, 0xD2, 0x84, 0xD3, 0xDB, 0xC3, 0x84, 0xD4, /* 0xD4-0xD7 */
+	0xBF, 0xB1, 0x84, 0xD5, 0x84, 0xD6, 0x84, 0xD7, /* 0xD8-0xDB */
+	0x84, 0xD8, 0x84, 0xD9, 0x84, 0xDA, 0xC4, 0xBC, /* 0xDC-0xDF */
+	0x84, 0xDB, 0x84, 0xDC, 0x84, 0xDD, 0x84, 0xDE, /* 0xE0-0xE3 */
+	0xC7, 0xDA, 0x84, 0xDF, 0x84, 0xE0, 0x84, 0xE1, /* 0xE4-0xE7 */
+	0x84, 0xE2, 0x84, 0xE3, 0x84, 0xE4, 0x84, 0xE5, /* 0xE8-0xEB */
+	0x84, 0xE6, 0x84, 0xE7, 0x84, 0xE8, 0x84, 0xE9, /* 0xEC-0xEF */
+	0xDB, 0xC4, 0x84, 0xEA, 0x84, 0xEB, 0x84, 0xEC, /* 0xF0-0xF3 */
+	0x84, 0xED, 0x84, 0xEE, 0x84, 0xEF, 0x84, 0xF0, /* 0xF4-0xF7 */
+	0x84, 0xF1, 0xD9, 0xE8, 0xC9, 0xD7, 0x84, 0xF2, /* 0xF8-0xFB */
+	0x84, 0xF3, 0x84, 0xF4, 0xB9, 0xB4, 0xCE, 0xF0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_53[512] = {
+	0xD4, 0xC8, 0x84, 0xF5, 0x84, 0xF6, 0x84, 0xF7, /* 0x00-0x03 */
+	0x84, 0xF8, 0xB0, 0xFC, 0xB4, 0xD2, 0x84, 0xF9, /* 0x04-0x07 */
+	0xD0, 0xD9, 0x84, 0xFA, 0x84, 0xFB, 0x84, 0xFC, /* 0x08-0x0B */
+	0x84, 0xFD, 0xD9, 0xE9, 0x84, 0xFE, 0xDE, 0xCB, /* 0x0C-0x0F */
+	0xD9, 0xEB, 0x85, 0x40, 0x85, 0x41, 0x85, 0x42, /* 0x10-0x13 */
+	0x85, 0x43, 0xD8, 0xB0, 0xBB, 0xAF, 0xB1, 0xB1, /* 0x14-0x17 */
+	0x85, 0x44, 0xB3, 0xD7, 0xD8, 0xCE, 0x85, 0x45, /* 0x18-0x1B */
+	0x85, 0x46, 0xD4, 0xD1, 0x85, 0x47, 0x85, 0x48, /* 0x1C-0x1F */
+	0xBD, 0xB3, 0xBF, 0xEF, 0x85, 0x49, 0xCF, 0xBB, /* 0x20-0x23 */
+	0x85, 0x4A, 0x85, 0x4B, 0xD8, 0xD0, 0x85, 0x4C, /* 0x24-0x27 */
+	0x85, 0x4D, 0x85, 0x4E, 0xB7, 0xCB, 0x85, 0x4F, /* 0x28-0x2B */
+	0x85, 0x50, 0x85, 0x51, 0xD8, 0xD1, 0x85, 0x52, /* 0x2C-0x2F */
+	0x85, 0x53, 0x85, 0x54, 0x85, 0x55, 0x85, 0x56, /* 0x30-0x33 */
+	0x85, 0x57, 0x85, 0x58, 0x85, 0x59, 0x85, 0x5A, /* 0x34-0x37 */
+	0x85, 0x5B, 0xC6, 0xA5, 0xC7, 0xF8, 0xD2, 0xBD, /* 0x38-0x3B */
+	0x85, 0x5C, 0x85, 0x5D, 0xD8, 0xD2, 0xC4, 0xE4, /* 0x3C-0x3F */
+	0x85, 0x5E, 0xCA, 0xAE, 0x85, 0x5F, 0xC7, 0xA7, /* 0x40-0x43 */
+	0x85, 0x60, 0xD8, 0xA6, 0x85, 0x61, 0xC9, 0xFD, /* 0x44-0x47 */
+	0xCE, 0xE7, 0xBB, 0xDC, 0xB0, 0xEB, 0x85, 0x62, /* 0x48-0x4B */
+	0x85, 0x63, 0x85, 0x64, 0xBB, 0xAA, 0xD0, 0xAD, /* 0x4C-0x4F */
+	0x85, 0x65, 0xB1, 0xB0, 0xD7, 0xE4, 0xD7, 0xBF, /* 0x50-0x53 */
+	0x85, 0x66, 0xB5, 0xA5, 0xC2, 0xF4, 0xC4, 0xCF, /* 0x54-0x57 */
+	0x85, 0x67, 0x85, 0x68, 0xB2, 0xA9, 0x85, 0x69, /* 0x58-0x5B */
+	0xB2, 0xB7, 0x85, 0x6A, 0xB1, 0xE5, 0xDF, 0xB2, /* 0x5C-0x5F */
+	0xD5, 0xBC, 0xBF, 0xA8, 0xC2, 0xAC, 0xD8, 0xD5, /* 0x60-0x63 */
+	0xC2, 0xB1, 0x85, 0x6B, 0xD8, 0xD4, 0xCE, 0xD4, /* 0x64-0x67 */
+	0x85, 0x6C, 0xDA, 0xE0, 0x85, 0x6D, 0xCE, 0xC0, /* 0x68-0x6B */
+	0x85, 0x6E, 0x85, 0x6F, 0xD8, 0xB4, 0xC3, 0xAE, /* 0x6C-0x6F */
+	0xD3, 0xA1, 0xCE, 0xA3, 0x85, 0x70, 0xBC, 0xB4, /* 0x70-0x73 */
+	0xC8, 0xB4, 0xC2, 0xD1, 0x85, 0x71, 0xBE, 0xED, /* 0x74-0x77 */
+	0xD0, 0xB6, 0x85, 0x72, 0xDA, 0xE1, 0x85, 0x73, /* 0x78-0x7B */
+	0x85, 0x74, 0x85, 0x75, 0x85, 0x76, 0xC7, 0xE4, /* 0x7C-0x7F */
+	
+	0x85, 0x77, 0x85, 0x78, 0xB3, 0xA7, 0x85, 0x79, /* 0x80-0x83 */
+	0xB6, 0xF2, 0xCC, 0xFC, 0xC0, 0xFA, 0x85, 0x7A, /* 0x84-0x87 */
+	0x85, 0x7B, 0xC0, 0xF7, 0x85, 0x7C, 0xD1, 0xB9, /* 0x88-0x8B */
+	0xD1, 0xE1, 0xD8, 0xC7, 0x85, 0x7D, 0x85, 0x7E, /* 0x8C-0x8F */
+	0x85, 0x80, 0x85, 0x81, 0x85, 0x82, 0x85, 0x83, /* 0x90-0x93 */
+	0x85, 0x84, 0xB2, 0xDE, 0x85, 0x85, 0x85, 0x86, /* 0x94-0x97 */
+	0xC0, 0xE5, 0x85, 0x87, 0xBA, 0xF1, 0x85, 0x88, /* 0x98-0x9B */
+	0x85, 0x89, 0xD8, 0xC8, 0x85, 0x8A, 0xD4, 0xAD, /* 0x9C-0x9F */
+	0x85, 0x8B, 0x85, 0x8C, 0xCF, 0xE1, 0xD8, 0xC9, /* 0xA0-0xA3 */
+	0x85, 0x8D, 0xD8, 0xCA, 0xCF, 0xC3, 0x85, 0x8E, /* 0xA4-0xA7 */
+	0xB3, 0xF8, 0xBE, 0xC7, 0x85, 0x8F, 0x85, 0x90, /* 0xA8-0xAB */
+	0x85, 0x91, 0x85, 0x92, 0xD8, 0xCB, 0x85, 0x93, /* 0xAC-0xAF */
+	0x85, 0x94, 0x85, 0x95, 0x85, 0x96, 0x85, 0x97, /* 0xB0-0xB3 */
+	0x85, 0x98, 0x85, 0x99, 0xDB, 0xCC, 0x85, 0x9A, /* 0xB4-0xB7 */
+	0x85, 0x9B, 0x85, 0x9C, 0x85, 0x9D, 0xC8, 0xA5, /* 0xB8-0xBB */
+	0x85, 0x9E, 0x85, 0x9F, 0x85, 0xA0, 0xCF, 0xD8, /* 0xBC-0xBF */
+	0x85, 0xA1, 0xC8, 0xFE, 0xB2, 0xCE, 0x85, 0xA2, /* 0xC0-0xC3 */
+	0x85, 0xA3, 0x85, 0xA4, 0x85, 0xA5, 0x85, 0xA6, /* 0xC4-0xC7 */
+	0xD3, 0xD6, 0xB2, 0xE6, 0xBC, 0xB0, 0xD3, 0xD1, /* 0xC8-0xCB */
+	0xCB, 0xAB, 0xB7, 0xB4, 0x85, 0xA7, 0x85, 0xA8, /* 0xCC-0xCF */
+	0x85, 0xA9, 0xB7, 0xA2, 0x85, 0xAA, 0x85, 0xAB, /* 0xD0-0xD3 */
+	0xCA, 0xE5, 0x85, 0xAC, 0xC8, 0xA1, 0xCA, 0xDC, /* 0xD4-0xD7 */
+	0xB1, 0xE4, 0xD0, 0xF0, 0x85, 0xAD, 0xC5, 0xD1, /* 0xD8-0xDB */
+	0x85, 0xAE, 0x85, 0xAF, 0x85, 0xB0, 0xDB, 0xC5, /* 0xDC-0xDF */
+	0xB5, 0xFE, 0x85, 0xB1, 0x85, 0xB2, 0xBF, 0xDA, /* 0xE0-0xE3 */
+	0xB9, 0xC5, 0xBE, 0xE4, 0xC1, 0xED, 0x85, 0xB3, /* 0xE4-0xE7 */
+	0xDF, 0xB6, 0xDF, 0xB5, 0xD6, 0xBB, 0xBD, 0xD0, /* 0xE8-0xEB */
+	0xD5, 0xD9, 0xB0, 0xC8, 0xB6, 0xA3, 0xBF, 0xC9, /* 0xEC-0xEF */
+	0xCC, 0xA8, 0xDF, 0xB3, 0xCA, 0xB7, 0xD3, 0xD2, /* 0xF0-0xF3 */
+	0x85, 0xB4, 0xD8, 0xCF, 0xD2, 0xB6, 0xBA, 0xC5, /* 0xF4-0xF7 */
+	0xCB, 0xBE, 0xCC, 0xBE, 0x85, 0xB5, 0xDF, 0xB7, /* 0xF8-0xFB */
+	0xB5, 0xF0, 0xDF, 0xB4, 0x85, 0xB6, 0x85, 0xB7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_54[512] = {
+	0x85, 0xB8, 0xD3, 0xF5, 0x85, 0xB9, 0xB3, 0xD4, /* 0x00-0x03 */
+	0xB8, 0xF7, 0x85, 0xBA, 0xDF, 0xBA, 0x85, 0xBB, /* 0x04-0x07 */
+	0xBA, 0xCF, 0xBC, 0xAA, 0xB5, 0xF5, 0x85, 0xBC, /* 0x08-0x0B */
+	0xCD, 0xAC, 0xC3, 0xFB, 0xBA, 0xF3, 0xC0, 0xF4, /* 0x0C-0x0F */
+	0xCD, 0xC2, 0xCF, 0xF2, 0xDF, 0xB8, 0xCF, 0xC5, /* 0x10-0x13 */
+	0x85, 0xBD, 0xC2, 0xC0, 0xDF, 0xB9, 0xC2, 0xF0, /* 0x14-0x17 */
+	0x85, 0xBE, 0x85, 0xBF, 0x85, 0xC0, 0xBE, 0xFD, /* 0x18-0x1B */
+	0x85, 0xC1, 0xC1, 0xDF, 0xCD, 0xCC, 0xD2, 0xF7, /* 0x1C-0x1F */
+	0xB7, 0xCD, 0xDF, 0xC1, 0x85, 0xC2, 0xDF, 0xC4, /* 0x20-0x23 */
+	0x85, 0xC3, 0x85, 0xC4, 0xB7, 0xF1, 0xB0, 0xC9, /* 0x24-0x27 */
+	0xB6, 0xD6, 0xB7, 0xD4, 0x85, 0xC5, 0xBA, 0xAC, /* 0x28-0x2B */
+	0xCC, 0xFD, 0xBF, 0xD4, 0xCB, 0xB1, 0xC6, 0xF4, /* 0x2C-0x2F */
+	0x85, 0xC6, 0xD6, 0xA8, 0xDF, 0xC5, 0x85, 0xC7, /* 0x30-0x33 */
+	0xCE, 0xE2, 0xB3, 0xB3, 0x85, 0xC8, 0x85, 0xC9, /* 0x34-0x37 */
+	0xCE, 0xFC, 0xB4, 0xB5, 0x85, 0xCA, 0xCE, 0xC7, /* 0x38-0x3B */
+	0xBA, 0xF0, 0x85, 0xCB, 0xCE, 0xE1, 0x85, 0xCC, /* 0x3C-0x3F */
+	0xD1, 0xBD, 0x85, 0xCD, 0x85, 0xCE, 0xDF, 0xC0, /* 0x40-0x43 */
+	0x85, 0xCF, 0x85, 0xD0, 0xB4, 0xF4, 0x85, 0xD1, /* 0x44-0x47 */
+	0xB3, 0xCA, 0x85, 0xD2, 0xB8, 0xE6, 0xDF, 0xBB, /* 0x48-0x4B */
+	0x85, 0xD3, 0x85, 0xD4, 0x85, 0xD5, 0x85, 0xD6, /* 0x4C-0x4F */
+	0xC4, 0xC5, 0x85, 0xD7, 0xDF, 0xBC, 0xDF, 0xBD, /* 0x50-0x53 */
+	0xDF, 0xBE, 0xC5, 0xBB, 0xDF, 0xBF, 0xDF, 0xC2, /* 0x54-0x57 */
+	0xD4, 0xB1, 0xDF, 0xC3, 0x85, 0xD8, 0xC7, 0xBA, /* 0x58-0x5B */
+	0xCE, 0xD8, 0x85, 0xD9, 0x85, 0xDA, 0x85, 0xDB, /* 0x5C-0x5F */
+	0x85, 0xDC, 0x85, 0xDD, 0xC4, 0xD8, 0x85, 0xDE, /* 0x60-0x63 */
+	0xDF, 0xCA, 0x85, 0xDF, 0xDF, 0xCF, 0x85, 0xE0, /* 0x64-0x67 */
+	0xD6, 0xDC, 0x85, 0xE1, 0x85, 0xE2, 0x85, 0xE3, /* 0x68-0x6B */
+	0x85, 0xE4, 0x85, 0xE5, 0x85, 0xE6, 0x85, 0xE7, /* 0x6C-0x6F */
+	0x85, 0xE8, 0xDF, 0xC9, 0xDF, 0xDA, 0xCE, 0xB6, /* 0x70-0x73 */
+	0x85, 0xE9, 0xBA, 0xC7, 0xDF, 0xCE, 0xDF, 0xC8, /* 0x74-0x77 */
+	0xC5, 0xDE, 0x85, 0xEA, 0x85, 0xEB, 0xC9, 0xEB, /* 0x78-0x7B */
+	0xBA, 0xF4, 0xC3, 0xFC, 0x85, 0xEC, 0x85, 0xED, /* 0x7C-0x7F */
+	
+	0xBE, 0xD7, 0x85, 0xEE, 0xDF, 0xC6, 0x85, 0xEF, /* 0x80-0x83 */
+	0xDF, 0xCD, 0x85, 0xF0, 0xC5, 0xD8, 0x85, 0xF1, /* 0x84-0x87 */
+	0x85, 0xF2, 0x85, 0xF3, 0x85, 0xF4, 0xD5, 0xA6, /* 0x88-0x8B */
+	0xBA, 0xCD, 0x85, 0xF5, 0xBE, 0xCC, 0xD3, 0xBD, /* 0x8C-0x8F */
+	0xB8, 0xC0, 0x85, 0xF6, 0xD6, 0xE4, 0x85, 0xF7, /* 0x90-0x93 */
+	0xDF, 0xC7, 0xB9, 0xBE, 0xBF, 0xA7, 0x85, 0xF8, /* 0x94-0x97 */
+	0x85, 0xF9, 0xC1, 0xFC, 0xDF, 0xCB, 0xDF, 0xCC, /* 0x98-0x9B */
+	0x85, 0xFA, 0xDF, 0xD0, 0x85, 0xFB, 0x85, 0xFC, /* 0x9C-0x9F */
+	0x85, 0xFD, 0x85, 0xFE, 0x86, 0x40, 0xDF, 0xDB, /* 0xA0-0xA3 */
+	0xDF, 0xE5, 0x86, 0x41, 0xDF, 0xD7, 0xDF, 0xD6, /* 0xA4-0xA7 */
+	0xD7, 0xC9, 0xDF, 0xE3, 0xDF, 0xE4, 0xE5, 0xEB, /* 0xA8-0xAB */
+	0xD2, 0xA7, 0xDF, 0xD2, 0x86, 0x42, 0xBF, 0xA9, /* 0xAC-0xAF */
+	0x86, 0x43, 0xD4, 0xDB, 0x86, 0x44, 0xBF, 0xC8, /* 0xB0-0xB3 */
+	0xDF, 0xD4, 0x86, 0x45, 0x86, 0x46, 0x86, 0x47, /* 0xB4-0xB7 */
+	0xCF, 0xCC, 0x86, 0x48, 0x86, 0x49, 0xDF, 0xDD, /* 0xB8-0xBB */
+	0x86, 0x4A, 0xD1, 0xCA, 0x86, 0x4B, 0xDF, 0xDE, /* 0xBC-0xBF */
+	0xB0, 0xA7, 0xC6, 0xB7, 0xDF, 0xD3, 0x86, 0x4C, /* 0xC0-0xC3 */
+	0xBA, 0xE5, 0x86, 0x4D, 0xB6, 0xDF, 0xCD, 0xDB, /* 0xC4-0xC7 */
+	0xB9, 0xFE, 0xD4, 0xD5, 0x86, 0x4E, 0x86, 0x4F, /* 0xC8-0xCB */
+	0xDF, 0xDF, 0xCF, 0xEC, 0xB0, 0xA5, 0xDF, 0xE7, /* 0xCC-0xCF */
+	0xDF, 0xD1, 0xD1, 0xC6, 0xDF, 0xD5, 0xDF, 0xD8, /* 0xD0-0xD3 */
+	0xDF, 0xD9, 0xDF, 0xDC, 0x86, 0x50, 0xBB, 0xA9, /* 0xD4-0xD7 */
+	0x86, 0x51, 0xDF, 0xE0, 0xDF, 0xE1, 0x86, 0x52, /* 0xD8-0xDB */
+	0xDF, 0xE2, 0xDF, 0xE6, 0xDF, 0xE8, 0xD3, 0xB4, /* 0xDC-0xDF */
+	0x86, 0x53, 0x86, 0x54, 0x86, 0x55, 0x86, 0x56, /* 0xE0-0xE3 */
+	0x86, 0x57, 0xB8, 0xE7, 0xC5, 0xB6, 0xDF, 0xEA, /* 0xE4-0xE7 */
+	0xC9, 0xDA, 0xC1, 0xA8, 0xC4, 0xC4, 0x86, 0x58, /* 0xE8-0xEB */
+	0x86, 0x59, 0xBF, 0xDE, 0xCF, 0xF8, 0x86, 0x5A, /* 0xEC-0xEF */
+	0x86, 0x5B, 0x86, 0x5C, 0xD5, 0xDC, 0xDF, 0xEE, /* 0xF0-0xF3 */
+	0x86, 0x5D, 0x86, 0x5E, 0x86, 0x5F, 0x86, 0x60, /* 0xF4-0xF7 */
+	0x86, 0x61, 0x86, 0x62, 0xB2, 0xB8, 0x86, 0x63, /* 0xF8-0xFB */
+	0xBA, 0xDF, 0xDF, 0xEC, 0x86, 0x64, 0xDB, 0xC1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_55[512] = {
+	0x86, 0x65, 0xD1, 0xE4, 0x86, 0x66, 0x86, 0x67, /* 0x00-0x03 */
+	0x86, 0x68, 0x86, 0x69, 0xCB, 0xF4, 0xB4, 0xBD, /* 0x04-0x07 */
+	0x86, 0x6A, 0xB0, 0xA6, 0x86, 0x6B, 0x86, 0x6C, /* 0x08-0x0B */
+	0x86, 0x6D, 0x86, 0x6E, 0x86, 0x6F, 0xDF, 0xF1, /* 0x0C-0x0F */
+	0xCC, 0xC6, 0xDF, 0xF2, 0x86, 0x70, 0x86, 0x71, /* 0x10-0x13 */
+	0xDF, 0xED, 0x86, 0x72, 0x86, 0x73, 0x86, 0x74, /* 0x14-0x17 */
+	0x86, 0x75, 0x86, 0x76, 0x86, 0x77, 0xDF, 0xE9, /* 0x18-0x1B */
+	0x86, 0x78, 0x86, 0x79, 0x86, 0x7A, 0x86, 0x7B, /* 0x1C-0x1F */
+	0xDF, 0xEB, 0x86, 0x7C, 0xDF, 0xEF, 0xDF, 0xF0, /* 0x20-0x23 */
+	0xBB, 0xBD, 0x86, 0x7D, 0x86, 0x7E, 0xDF, 0xF3, /* 0x24-0x27 */
+	0x86, 0x80, 0x86, 0x81, 0xDF, 0xF4, 0x86, 0x82, /* 0x28-0x2B */
+	0xBB, 0xA3, 0x86, 0x83, 0xCA, 0xDB, 0xCE, 0xA8, /* 0x2C-0x2F */
+	0xE0, 0xA7, 0xB3, 0xAA, 0x86, 0x84, 0xE0, 0xA6, /* 0x30-0x33 */
+	0x86, 0x85, 0x86, 0x86, 0x86, 0x87, 0xE0, 0xA1, /* 0x34-0x37 */
+	0x86, 0x88, 0x86, 0x89, 0x86, 0x8A, 0x86, 0x8B, /* 0x38-0x3B */
+	0xDF, 0xFE, 0x86, 0x8C, 0xCD, 0xD9, 0xDF, 0xFC, /* 0x3C-0x3F */
+	0x86, 0x8D, 0xDF, 0xFA, 0x86, 0x8E, 0xBF, 0xD0, /* 0x40-0x43 */
+	0xD7, 0xC4, 0x86, 0x8F, 0xC9, 0xCC, 0x86, 0x90, /* 0x44-0x47 */
+	0x86, 0x91, 0xDF, 0xF8, 0xB0, 0xA1, 0x86, 0x92, /* 0x48-0x4B */
+	0x86, 0x93, 0x86, 0x94, 0x86, 0x95, 0x86, 0x96, /* 0x4C-0x4F */
+	0xDF, 0xFD, 0x86, 0x97, 0x86, 0x98, 0x86, 0x99, /* 0x50-0x53 */
+	0x86, 0x9A, 0xDF, 0xFB, 0xE0, 0xA2, 0x86, 0x9B, /* 0x54-0x57 */
+	0x86, 0x9C, 0x86, 0x9D, 0x86, 0x9E, 0x86, 0x9F, /* 0x58-0x5B */
+	0xE0, 0xA8, 0x86, 0xA0, 0x86, 0xA1, 0x86, 0xA2, /* 0x5C-0x5F */
+	0x86, 0xA3, 0xB7, 0xC8, 0x86, 0xA4, 0x86, 0xA5, /* 0x60-0x63 */
+	0xC6, 0xA1, 0xC9, 0xB6, 0xC0, 0xB2, 0xDF, 0xF5, /* 0x64-0x67 */
+	0x86, 0xA6, 0x86, 0xA7, 0xC5, 0xBE, 0x86, 0xA8, /* 0x68-0x6B */
+	0xD8, 0xC4, 0xDF, 0xF9, 0xC4, 0xF6, 0x86, 0xA9, /* 0x6C-0x6F */
+	0x86, 0xAA, 0x86, 0xAB, 0x86, 0xAC, 0x86, 0xAD, /* 0x70-0x73 */
+	0x86, 0xAE, 0xE0, 0xA3, 0xE0, 0xA4, 0xE0, 0xA5, /* 0x74-0x77 */
+	0xD0, 0xA5, 0x86, 0xAF, 0x86, 0xB0, 0xE0, 0xB4, /* 0x78-0x7B */
+	0xCC, 0xE4, 0x86, 0xB1, 0xE0, 0xB1, 0x86, 0xB2, /* 0x7C-0x7F */
+	
+	0xBF, 0xA6, 0xE0, 0xAF, 0xCE, 0xB9, 0xE0, 0xAB, /* 0x80-0x83 */
+	0xC9, 0xC6, 0x86, 0xB3, 0x86, 0xB4, 0xC0, 0xAE, /* 0x84-0x87 */
+	0xE0, 0xAE, 0xBA, 0xED, 0xBA, 0xB0, 0xE0, 0xA9, /* 0x88-0x8B */
+	0x86, 0xB5, 0x86, 0xB6, 0x86, 0xB7, 0xDF, 0xF6, /* 0x8C-0x8F */
+	0x86, 0xB8, 0xE0, 0xB3, 0x86, 0xB9, 0x86, 0xBA, /* 0x90-0x93 */
+	0xE0, 0xB8, 0x86, 0xBB, 0x86, 0xBC, 0x86, 0xBD, /* 0x94-0x97 */
+	0xB4, 0xAD, 0xE0, 0xB9, 0x86, 0xBE, 0x86, 0xBF, /* 0x98-0x9B */
+	0xCF, 0xB2, 0xBA, 0xC8, 0x86, 0xC0, 0xE0, 0xB0, /* 0x9C-0x9F */
+	0x86, 0xC1, 0x86, 0xC2, 0x86, 0xC3, 0x86, 0xC4, /* 0xA0-0xA3 */
+	0x86, 0xC5, 0x86, 0xC6, 0x86, 0xC7, 0xD0, 0xFA, /* 0xA4-0xA7 */
+	0x86, 0xC8, 0x86, 0xC9, 0x86, 0xCA, 0x86, 0xCB, /* 0xA8-0xAB */
+	0x86, 0xCC, 0x86, 0xCD, 0x86, 0xCE, 0x86, 0xCF, /* 0xAC-0xAF */
+	0x86, 0xD0, 0xE0, 0xAC, 0x86, 0xD1, 0xD4, 0xFB, /* 0xB0-0xB3 */
+	0x86, 0xD2, 0xDF, 0xF7, 0x86, 0xD3, 0xC5, 0xE7, /* 0xB4-0xB7 */
+	0x86, 0xD4, 0xE0, 0xAD, 0x86, 0xD5, 0xD3, 0xF7, /* 0xB8-0xBB */
+	0x86, 0xD6, 0xE0, 0xB6, 0xE0, 0xB7, 0x86, 0xD7, /* 0xBC-0xBF */
+	0x86, 0xD8, 0x86, 0xD9, 0x86, 0xDA, 0x86, 0xDB, /* 0xC0-0xC3 */
+	0xE0, 0xC4, 0xD0, 0xE1, 0x86, 0xDC, 0x86, 0xDD, /* 0xC4-0xC7 */
+	0x86, 0xDE, 0xE0, 0xBC, 0x86, 0xDF, 0x86, 0xE0, /* 0xC8-0xCB */
+	0xE0, 0xC9, 0xE0, 0xCA, 0x86, 0xE1, 0x86, 0xE2, /* 0xCC-0xCF */
+	0x86, 0xE3, 0xE0, 0xBE, 0xE0, 0xAA, 0xC9, 0xA4, /* 0xD0-0xD3 */
+	0xE0, 0xC1, 0x86, 0xE4, 0xE0, 0xB2, 0x86, 0xE5, /* 0xD4-0xD7 */
+	0x86, 0xE6, 0x86, 0xE7, 0x86, 0xE8, 0x86, 0xE9, /* 0xD8-0xDB */
+	0xCA, 0xC8, 0xE0, 0xC3, 0x86, 0xEA, 0xE0, 0xB5, /* 0xDC-0xDF */
+	0x86, 0xEB, 0xCE, 0xCB, 0x86, 0xEC, 0xCB, 0xC3, /* 0xE0-0xE3 */
+	0xE0, 0xCD, 0xE0, 0xC6, 0xE0, 0xC2, 0x86, 0xED, /* 0xE4-0xE7 */
+	0xE0, 0xCB, 0x86, 0xEE, 0xE0, 0xBA, 0xE0, 0xBF, /* 0xE8-0xEB */
+	0xE0, 0xC0, 0x86, 0xEF, 0x86, 0xF0, 0xE0, 0xC5, /* 0xEC-0xEF */
+	0x86, 0xF1, 0x86, 0xF2, 0xE0, 0xC7, 0xE0, 0xC8, /* 0xF0-0xF3 */
+	0x86, 0xF3, 0xE0, 0xCC, 0x86, 0xF4, 0xE0, 0xBB, /* 0xF4-0xF7 */
+	0x86, 0xF5, 0x86, 0xF6, 0x86, 0xF7, 0x86, 0xF8, /* 0xF8-0xFB */
+	0x86, 0xF9, 0xCB, 0xD4, 0xE0, 0xD5, 0x86, 0xFA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_56[512] = {
+	0xE0, 0xD6, 0xE0, 0xD2, 0x86, 0xFB, 0x86, 0xFC, /* 0x00-0x03 */
+	0x86, 0xFD, 0x86, 0xFE, 0x87, 0x40, 0x87, 0x41, /* 0x04-0x07 */
+	0xE0, 0xD0, 0xBC, 0xCE, 0x87, 0x42, 0x87, 0x43, /* 0x08-0x0B */
+	0xE0, 0xD1, 0x87, 0x44, 0xB8, 0xC2, 0xD8, 0xC5, /* 0x0C-0x0F */
+	0x87, 0x45, 0x87, 0x46, 0x87, 0x47, 0x87, 0x48, /* 0x10-0x13 */
+	0x87, 0x49, 0x87, 0x4A, 0x87, 0x4B, 0x87, 0x4C, /* 0x14-0x17 */
+	0xD0, 0xEA, 0x87, 0x4D, 0x87, 0x4E, 0xC2, 0xEF, /* 0x18-0x1B */
+	0x87, 0x4F, 0x87, 0x50, 0xE0, 0xCF, 0xE0, 0xBD, /* 0x1C-0x1F */
+	0x87, 0x51, 0x87, 0x52, 0x87, 0x53, 0xE0, 0xD4, /* 0x20-0x23 */
+	0xE0, 0xD3, 0x87, 0x54, 0x87, 0x55, 0xE0, 0xD7, /* 0x24-0x27 */
+	0x87, 0x56, 0x87, 0x57, 0x87, 0x58, 0x87, 0x59, /* 0x28-0x2B */
+	0xE0, 0xDC, 0xE0, 0xD8, 0x87, 0x5A, 0x87, 0x5B, /* 0x2C-0x2F */
+	0x87, 0x5C, 0xD6, 0xF6, 0xB3, 0xB0, 0x87, 0x5D, /* 0x30-0x33 */
+	0xD7, 0xEC, 0x87, 0x5E, 0xCB, 0xBB, 0x87, 0x5F, /* 0x34-0x37 */
+	0x87, 0x60, 0xE0, 0xDA, 0x87, 0x61, 0xCE, 0xFB, /* 0x38-0x3B */
+	0x87, 0x62, 0x87, 0x63, 0x87, 0x64, 0xBA, 0xD9, /* 0x3C-0x3F */
+	0x87, 0x65, 0x87, 0x66, 0x87, 0x67, 0x87, 0x68, /* 0x40-0x43 */
+	0x87, 0x69, 0x87, 0x6A, 0x87, 0x6B, 0x87, 0x6C, /* 0x44-0x47 */
+	0x87, 0x6D, 0x87, 0x6E, 0x87, 0x6F, 0x87, 0x70, /* 0x48-0x4B */
+	0xE0, 0xE1, 0xE0, 0xDD, 0xD2, 0xAD, 0x87, 0x71, /* 0x4C-0x4F */
+	0x87, 0x72, 0x87, 0x73, 0x87, 0x74, 0x87, 0x75, /* 0x50-0x53 */
+	0xE0, 0xE2, 0x87, 0x76, 0x87, 0x77, 0xE0, 0xDB, /* 0x54-0x57 */
+	0xE0, 0xD9, 0xE0, 0xDF, 0x87, 0x78, 0x87, 0x79, /* 0x58-0x5B */
+	0xE0, 0xE0, 0x87, 0x7A, 0x87, 0x7B, 0x87, 0x7C, /* 0x5C-0x5F */
+	0x87, 0x7D, 0x87, 0x7E, 0xE0, 0xDE, 0x87, 0x80, /* 0x60-0x63 */
+	0xE0, 0xE4, 0x87, 0x81, 0x87, 0x82, 0x87, 0x83, /* 0x64-0x67 */
+	0xC6, 0xF7, 0xD8, 0xAC, 0xD4, 0xEB, 0xE0, 0xE6, /* 0x68-0x6B */
+	0xCA, 0xC9, 0x87, 0x84, 0x87, 0x85, 0x87, 0x86, /* 0x6C-0x6F */
+	0x87, 0x87, 0xE0, 0xE5, 0x87, 0x88, 0x87, 0x89, /* 0x70-0x73 */
+	0x87, 0x8A, 0x87, 0x8B, 0xB8, 0xC1, 0x87, 0x8C, /* 0x74-0x77 */
+	0x87, 0x8D, 0x87, 0x8E, 0x87, 0x8F, 0xE0, 0xE7, /* 0x78-0x7B */
+	0xE0, 0xE8, 0x87, 0x90, 0x87, 0x91, 0x87, 0x92, /* 0x7C-0x7F */
+	
+	0x87, 0x93, 0x87, 0x94, 0x87, 0x95, 0x87, 0x96, /* 0x80-0x83 */
+	0x87, 0x97, 0xE0, 0xE9, 0xE0, 0xE3, 0x87, 0x98, /* 0x84-0x87 */
+	0x87, 0x99, 0x87, 0x9A, 0x87, 0x9B, 0x87, 0x9C, /* 0x88-0x8B */
+	0x87, 0x9D, 0x87, 0x9E, 0xBA, 0xBF, 0xCC, 0xE7, /* 0x8C-0x8F */
+	0x87, 0x9F, 0x87, 0xA0, 0x87, 0xA1, 0xE0, 0xEA, /* 0x90-0x93 */
+	0x87, 0xA2, 0x87, 0xA3, 0x87, 0xA4, 0x87, 0xA5, /* 0x94-0x97 */
+	0x87, 0xA6, 0x87, 0xA7, 0x87, 0xA8, 0x87, 0xA9, /* 0x98-0x9B */
+	0x87, 0xAA, 0x87, 0xAB, 0x87, 0xAC, 0x87, 0xAD, /* 0x9C-0x9F */
+	0x87, 0xAE, 0x87, 0xAF, 0x87, 0xB0, 0xCF, 0xF9, /* 0xA0-0xA3 */
+	0x87, 0xB1, 0x87, 0xB2, 0x87, 0xB3, 0x87, 0xB4, /* 0xA4-0xA7 */
+	0x87, 0xB5, 0x87, 0xB6, 0x87, 0xB7, 0x87, 0xB8, /* 0xA8-0xAB */
+	0x87, 0xB9, 0x87, 0xBA, 0x87, 0xBB, 0xE0, 0xEB, /* 0xAC-0xAF */
+	0x87, 0xBC, 0x87, 0xBD, 0x87, 0xBE, 0x87, 0xBF, /* 0xB0-0xB3 */
+	0x87, 0xC0, 0x87, 0xC1, 0x87, 0xC2, 0xC8, 0xC2, /* 0xB4-0xB7 */
+	0x87, 0xC3, 0x87, 0xC4, 0x87, 0xC5, 0x87, 0xC6, /* 0xB8-0xBB */
+	0xBD, 0xC0, 0x87, 0xC7, 0x87, 0xC8, 0x87, 0xC9, /* 0xBC-0xBF */
+	0x87, 0xCA, 0x87, 0xCB, 0x87, 0xCC, 0x87, 0xCD, /* 0xC0-0xC3 */
+	0x87, 0xCE, 0x87, 0xCF, 0x87, 0xD0, 0x87, 0xD1, /* 0xC4-0xC7 */
+	0x87, 0xD2, 0x87, 0xD3, 0xC4, 0xD2, 0x87, 0xD4, /* 0xC8-0xCB */
+	0x87, 0xD5, 0x87, 0xD6, 0x87, 0xD7, 0x87, 0xD8, /* 0xCC-0xCF */
+	0x87, 0xD9, 0x87, 0xDA, 0x87, 0xDB, 0x87, 0xDC, /* 0xD0-0xD3 */
+	0xE0, 0xEC, 0x87, 0xDD, 0x87, 0xDE, 0xE0, 0xED, /* 0xD4-0xD7 */
+	0x87, 0xDF, 0x87, 0xE0, 0xC7, 0xF4, 0xCB, 0xC4, /* 0xD8-0xDB */
+	0x87, 0xE1, 0xE0, 0xEE, 0xBB, 0xD8, 0xD8, 0xB6, /* 0xDC-0xDF */
+	0xD2, 0xF2, 0xE0, 0xEF, 0xCD, 0xC5, 0x87, 0xE2, /* 0xE0-0xE3 */
+	0xB6, 0xDA, 0x87, 0xE3, 0x87, 0xE4, 0x87, 0xE5, /* 0xE4-0xE7 */
+	0x87, 0xE6, 0x87, 0xE7, 0x87, 0xE8, 0xE0, 0xF1, /* 0xE8-0xEB */
+	0x87, 0xE9, 0xD4, 0xB0, 0x87, 0xEA, 0x87, 0xEB, /* 0xEC-0xEF */
+	0xC0, 0xA7, 0xB4, 0xD1, 0x87, 0xEC, 0x87, 0xED, /* 0xF0-0xF3 */
+	0xCE, 0xA7, 0xE0, 0xF0, 0x87, 0xEE, 0x87, 0xEF, /* 0xF4-0xF7 */
+	0x87, 0xF0, 0xE0, 0xF2, 0xB9, 0xCC, 0x87, 0xF1, /* 0xF8-0xFB */
+	0x87, 0xF2, 0xB9, 0xFA, 0xCD, 0xBC, 0xE0, 0xF3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_57[512] = {
+	0x87, 0xF3, 0x87, 0xF4, 0x87, 0xF5, 0xC6, 0xD4, /* 0x00-0x03 */
+	0xE0, 0xF4, 0x87, 0xF6, 0xD4, 0xB2, 0x87, 0xF7, /* 0x04-0x07 */
+	0xC8, 0xA6, 0xE0, 0xF6, 0xE0, 0xF5, 0x87, 0xF8, /* 0x08-0x0B */
+	0x87, 0xF9, 0x87, 0xFA, 0x87, 0xFB, 0x87, 0xFC, /* 0x0C-0x0F */
+	0x87, 0xFD, 0x87, 0xFE, 0x88, 0x40, 0x88, 0x41, /* 0x10-0x13 */
+	0x88, 0x42, 0x88, 0x43, 0x88, 0x44, 0x88, 0x45, /* 0x14-0x17 */
+	0x88, 0x46, 0x88, 0x47, 0x88, 0x48, 0x88, 0x49, /* 0x18-0x1B */
+	0xE0, 0xF7, 0x88, 0x4A, 0x88, 0x4B, 0xCD, 0xC1, /* 0x1C-0x1F */
+	0x88, 0x4C, 0x88, 0x4D, 0x88, 0x4E, 0xCA, 0xA5, /* 0x20-0x23 */
+	0x88, 0x4F, 0x88, 0x50, 0x88, 0x51, 0x88, 0x52, /* 0x24-0x27 */
+	0xD4, 0xDA, 0xDB, 0xD7, 0xDB, 0xD9, 0x88, 0x53, /* 0x28-0x2B */
+	0xDB, 0xD8, 0xB9, 0xE7, 0xDB, 0xDC, 0xDB, 0xDD, /* 0x2C-0x2F */
+	0xB5, 0xD8, 0x88, 0x54, 0x88, 0x55, 0xDB, 0xDA, /* 0x30-0x33 */
+	0x88, 0x56, 0x88, 0x57, 0x88, 0x58, 0x88, 0x59, /* 0x34-0x37 */
+	0x88, 0x5A, 0xDB, 0xDB, 0xB3, 0xA1, 0xDB, 0xDF, /* 0x38-0x3B */
+	0x88, 0x5B, 0x88, 0x5C, 0xBB, 0xF8, 0x88, 0x5D, /* 0x3C-0x3F */
+	0xD6, 0xB7, 0x88, 0x5E, 0xDB, 0xE0, 0x88, 0x5F, /* 0x40-0x43 */
+	0x88, 0x60, 0x88, 0x61, 0x88, 0x62, 0xBE, 0xF9, /* 0x44-0x47 */
+	0x88, 0x63, 0x88, 0x64, 0xB7, 0xBB, 0x88, 0x65, /* 0x48-0x4B */
+	0xDB, 0xD0, 0xCC, 0xAE, 0xBF, 0xB2, 0xBB, 0xB5, /* 0x4C-0x4F */
+	0xD7, 0xF8, 0xBF, 0xD3, 0x88, 0x66, 0x88, 0x67, /* 0x50-0x53 */
+	0x88, 0x68, 0x88, 0x69, 0x88, 0x6A, 0xBF, 0xE9, /* 0x54-0x57 */
+	0x88, 0x6B, 0x88, 0x6C, 0xBC, 0xE1, 0xCC, 0xB3, /* 0x58-0x5B */
+	0xDB, 0xDE, 0xB0, 0xD3, 0xCE, 0xEB, 0xB7, 0xD8, /* 0x5C-0x5F */
+	0xD7, 0xB9, 0xC6, 0xC2, 0x88, 0x6D, 0x88, 0x6E, /* 0x60-0x63 */
+	0xC0, 0xA4, 0x88, 0x6F, 0xCC, 0xB9, 0x88, 0x70, /* 0x64-0x67 */
+	0xDB, 0xE7, 0xDB, 0xE1, 0xC6, 0xBA, 0xDB, 0xE3, /* 0x68-0x6B */
+	0x88, 0x71, 0xDB, 0xE8, 0x88, 0x72, 0xC5, 0xF7, /* 0x6C-0x6F */
+	0x88, 0x73, 0x88, 0x74, 0x88, 0x75, 0xDB, 0xEA, /* 0x70-0x73 */
+	0x88, 0x76, 0x88, 0x77, 0xDB, 0xE9, 0xBF, 0xC0, /* 0x74-0x77 */
+	0x88, 0x78, 0x88, 0x79, 0x88, 0x7A, 0xDB, 0xE6, /* 0x78-0x7B */
+	0xDB, 0xE5, 0x88, 0x7B, 0x88, 0x7C, 0x88, 0x7D, /* 0x7C-0x7F */
+	
+	0x88, 0x7E, 0x88, 0x80, 0xB4, 0xB9, 0xC0, 0xAC, /* 0x80-0x83 */
+	0xC2, 0xA2, 0xDB, 0xE2, 0xDB, 0xE4, 0x88, 0x81, /* 0x84-0x87 */
+	0x88, 0x82, 0x88, 0x83, 0x88, 0x84, 0xD0, 0xCD, /* 0x88-0x8B */
+	0xDB, 0xED, 0x88, 0x85, 0x88, 0x86, 0x88, 0x87, /* 0x8C-0x8F */
+	0x88, 0x88, 0x88, 0x89, 0xC0, 0xDD, 0xDB, 0xF2, /* 0x90-0x93 */
+	0x88, 0x8A, 0x88, 0x8B, 0x88, 0x8C, 0x88, 0x8D, /* 0x94-0x97 */
+	0x88, 0x8E, 0x88, 0x8F, 0x88, 0x90, 0xB6, 0xE2, /* 0x98-0x9B */
+	0x88, 0x91, 0x88, 0x92, 0x88, 0x93, 0x88, 0x94, /* 0x9C-0x9F */
+	0xDB, 0xF3, 0xDB, 0xD2, 0xB9, 0xB8, 0xD4, 0xAB, /* 0xA0-0xA3 */
+	0xDB, 0xEC, 0x88, 0x95, 0xBF, 0xD1, 0xDB, 0xF0, /* 0xA4-0xA7 */
+	0x88, 0x96, 0xDB, 0xD1, 0x88, 0x97, 0xB5, 0xE6, /* 0xA8-0xAB */
+	0x88, 0x98, 0xDB, 0xEB, 0xBF, 0xE5, 0x88, 0x99, /* 0xAC-0xAF */
+	0x88, 0x9A, 0x88, 0x9B, 0xDB, 0xEE, 0x88, 0x9C, /* 0xB0-0xB3 */
+	0xDB, 0xF1, 0x88, 0x9D, 0x88, 0x9E, 0x88, 0x9F, /* 0xB4-0xB7 */
+	0xDB, 0xF9, 0x88, 0xA0, 0x88, 0xA1, 0x88, 0xA2, /* 0xB8-0xBB */
+	0x88, 0xA3, 0x88, 0xA4, 0x88, 0xA5, 0x88, 0xA6, /* 0xBC-0xBF */
+	0x88, 0xA7, 0x88, 0xA8, 0xB9, 0xA1, 0xB0, 0xA3, /* 0xC0-0xC3 */
+	0x88, 0xA9, 0x88, 0xAA, 0x88, 0xAB, 0x88, 0xAC, /* 0xC4-0xC7 */
+	0x88, 0xAD, 0x88, 0xAE, 0x88, 0xAF, 0xC2, 0xF1, /* 0xC8-0xCB */
+	0x88, 0xB0, 0x88, 0xB1, 0xB3, 0xC7, 0xDB, 0xEF, /* 0xCC-0xCF */
+	0x88, 0xB2, 0x88, 0xB3, 0xDB, 0xF8, 0x88, 0xB4, /* 0xD0-0xD3 */
+	0xC6, 0xD2, 0xDB, 0xF4, 0x88, 0xB5, 0x88, 0xB6, /* 0xD4-0xD7 */
+	0xDB, 0xF5, 0xDB, 0xF7, 0xDB, 0xF6, 0x88, 0xB7, /* 0xD8-0xDB */
+	0x88, 0xB8, 0xDB, 0xFE, 0x88, 0xB9, 0xD3, 0xF2, /* 0xDC-0xDF */
+	0xB2, 0xBA, 0x88, 0xBA, 0x88, 0xBB, 0x88, 0xBC, /* 0xE0-0xE3 */
+	0xDB, 0xFD, 0x88, 0xBD, 0x88, 0xBE, 0x88, 0xBF, /* 0xE4-0xE7 */
+	0x88, 0xC0, 0x88, 0xC1, 0x88, 0xC2, 0x88, 0xC3, /* 0xE8-0xEB */
+	0x88, 0xC4, 0xDC, 0xA4, 0x88, 0xC5, 0xDB, 0xFB, /* 0xEC-0xEF */
+	0x88, 0xC6, 0x88, 0xC7, 0x88, 0xC8, 0x88, 0xC9, /* 0xF0-0xF3 */
+	0xDB, 0xFA, 0x88, 0xCA, 0x88, 0xCB, 0x88, 0xCC, /* 0xF4-0xF7 */
+	0xDB, 0xFC, 0xC5, 0xE0, 0xBB, 0xF9, 0x88, 0xCD, /* 0xF8-0xFB */
+	0x88, 0xCE, 0xDC, 0xA3, 0x88, 0xCF, 0x88, 0xD0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_58[512] = {
+	0xDC, 0xA5, 0x88, 0xD1, 0xCC, 0xC3, 0x88, 0xD2, /* 0x00-0x03 */
+	0x88, 0xD3, 0x88, 0xD4, 0xB6, 0xD1, 0xDD, 0xC0, /* 0x04-0x07 */
+	0x88, 0xD5, 0x88, 0xD6, 0x88, 0xD7, 0xDC, 0xA1, /* 0x08-0x0B */
+	0x88, 0xD8, 0xDC, 0xA2, 0x88, 0xD9, 0x88, 0xDA, /* 0x0C-0x0F */
+	0x88, 0xDB, 0xC7, 0xB5, 0x88, 0xDC, 0x88, 0xDD, /* 0x10-0x13 */
+	0x88, 0xDE, 0xB6, 0xE9, 0x88, 0xDF, 0x88, 0xE0, /* 0x14-0x17 */
+	0x88, 0xE1, 0xDC, 0xA7, 0x88, 0xE2, 0x88, 0xE3, /* 0x18-0x1B */
+	0x88, 0xE4, 0x88, 0xE5, 0xDC, 0xA6, 0x88, 0xE6, /* 0x1C-0x1F */
+	0xDC, 0xA9, 0xB1, 0xA4, 0x88, 0xE7, 0x88, 0xE8, /* 0x20-0x23 */
+	0xB5, 0xCC, 0x88, 0xE9, 0x88, 0xEA, 0x88, 0xEB, /* 0x24-0x27 */
+	0x88, 0xEC, 0x88, 0xED, 0xBF, 0xB0, 0x88, 0xEE, /* 0x28-0x2B */
+	0x88, 0xEF, 0x88, 0xF0, 0x88, 0xF1, 0x88, 0xF2, /* 0x2C-0x2F */
+	0xD1, 0xDF, 0x88, 0xF3, 0x88, 0xF4, 0x88, 0xF5, /* 0x30-0x33 */
+	0x88, 0xF6, 0xB6, 0xC2, 0x88, 0xF7, 0x88, 0xF8, /* 0x34-0x37 */
+	0x88, 0xF9, 0x88, 0xFA, 0x88, 0xFB, 0x88, 0xFC, /* 0x38-0x3B */
+	0x88, 0xFD, 0x88, 0xFE, 0x89, 0x40, 0x89, 0x41, /* 0x3C-0x3F */
+	0x89, 0x42, 0x89, 0x43, 0x89, 0x44, 0x89, 0x45, /* 0x40-0x43 */
+	0xDC, 0xA8, 0x89, 0x46, 0x89, 0x47, 0x89, 0x48, /* 0x44-0x47 */
+	0x89, 0x49, 0x89, 0x4A, 0x89, 0x4B, 0x89, 0x4C, /* 0x48-0x4B */
+	0xCB, 0xFA, 0xEB, 0xF3, 0x89, 0x4D, 0x89, 0x4E, /* 0x4C-0x4F */
+	0x89, 0x4F, 0xCB, 0xDC, 0x89, 0x50, 0x89, 0x51, /* 0x50-0x53 */
+	0xCB, 0xFE, 0x89, 0x52, 0x89, 0x53, 0x89, 0x54, /* 0x54-0x57 */
+	0xCC, 0xC1, 0x89, 0x55, 0x89, 0x56, 0x89, 0x57, /* 0x58-0x5B */
+	0x89, 0x58, 0x89, 0x59, 0xC8, 0xFB, 0x89, 0x5A, /* 0x5C-0x5F */
+	0x89, 0x5B, 0x89, 0x5C, 0x89, 0x5D, 0x89, 0x5E, /* 0x60-0x63 */
+	0x89, 0x5F, 0xDC, 0xAA, 0x89, 0x60, 0x89, 0x61, /* 0x64-0x67 */
+	0x89, 0x62, 0x89, 0x63, 0x89, 0x64, 0xCC, 0xEE, /* 0x68-0x6B */
+	0xDC, 0xAB, 0x89, 0x65, 0x89, 0x66, 0x89, 0x67, /* 0x6C-0x6F */
+	0x89, 0x68, 0x89, 0x69, 0x89, 0x6A, 0x89, 0x6B, /* 0x70-0x73 */
+	0x89, 0x6C, 0x89, 0x6D, 0x89, 0x6E, 0x89, 0x6F, /* 0x74-0x77 */
+	0x89, 0x70, 0x89, 0x71, 0x89, 0x72, 0x89, 0x73, /* 0x78-0x7B */
+	0x89, 0x74, 0x89, 0x75, 0xDB, 0xD3, 0x89, 0x76, /* 0x7C-0x7F */
+	
+	0xDC, 0xAF, 0xDC, 0xAC, 0x89, 0x77, 0xBE, 0xB3, /* 0x80-0x83 */
+	0x89, 0x78, 0xCA, 0xFB, 0x89, 0x79, 0x89, 0x7A, /* 0x84-0x87 */
+	0x89, 0x7B, 0xDC, 0xAD, 0x89, 0x7C, 0x89, 0x7D, /* 0x88-0x8B */
+	0x89, 0x7E, 0x89, 0x80, 0x89, 0x81, 0x89, 0x82, /* 0x8C-0x8F */
+	0x89, 0x83, 0x89, 0x84, 0xC9, 0xCA, 0xC4, 0xB9, /* 0x90-0x93 */
+	0x89, 0x85, 0x89, 0x86, 0x89, 0x87, 0x89, 0x88, /* 0x94-0x97 */
+	0x89, 0x89, 0xC7, 0xBD, 0xDC, 0xAE, 0x89, 0x8A, /* 0x98-0x9B */
+	0x89, 0x8B, 0x89, 0x8C, 0xD4, 0xF6, 0xD0, 0xE6, /* 0x9C-0x9F */
+	0x89, 0x8D, 0x89, 0x8E, 0x89, 0x8F, 0x89, 0x90, /* 0xA0-0xA3 */
+	0x89, 0x91, 0x89, 0x92, 0x89, 0x93, 0x89, 0x94, /* 0xA4-0xA7 */
+	0xC4, 0xAB, 0xB6, 0xD5, 0x89, 0x95, 0x89, 0x96, /* 0xA8-0xAB */
+	0x89, 0x97, 0x89, 0x98, 0x89, 0x99, 0x89, 0x9A, /* 0xAC-0xAF */
+	0x89, 0x9B, 0x89, 0x9C, 0x89, 0x9D, 0x89, 0x9E, /* 0xB0-0xB3 */
+	0x89, 0x9F, 0x89, 0xA0, 0x89, 0xA1, 0x89, 0xA2, /* 0xB4-0xB7 */
+	0x89, 0xA3, 0x89, 0xA4, 0x89, 0xA5, 0x89, 0xA6, /* 0xB8-0xBB */
+	0xDB, 0xD4, 0x89, 0xA7, 0x89, 0xA8, 0x89, 0xA9, /* 0xBC-0xBF */
+	0x89, 0xAA, 0xB1, 0xDA, 0x89, 0xAB, 0x89, 0xAC, /* 0xC0-0xC3 */
+	0x89, 0xAD, 0xDB, 0xD5, 0x89, 0xAE, 0x89, 0xAF, /* 0xC4-0xC7 */
+	0x89, 0xB0, 0x89, 0xB1, 0x89, 0xB2, 0x89, 0xB3, /* 0xC8-0xCB */
+	0x89, 0xB4, 0x89, 0xB5, 0x89, 0xB6, 0x89, 0xB7, /* 0xCC-0xCF */
+	0x89, 0xB8, 0xDB, 0xD6, 0x89, 0xB9, 0x89, 0xBA, /* 0xD0-0xD3 */
+	0x89, 0xBB, 0xBA, 0xBE, 0x89, 0xBC, 0x89, 0xBD, /* 0xD4-0xD7 */
+	0x89, 0xBE, 0x89, 0xBF, 0x89, 0xC0, 0x89, 0xC1, /* 0xD8-0xDB */
+	0x89, 0xC2, 0x89, 0xC3, 0x89, 0xC4, 0x89, 0xC5, /* 0xDC-0xDF */
+	0x89, 0xC6, 0x89, 0xC7, 0x89, 0xC8, 0x89, 0xC9, /* 0xE0-0xE3 */
+	0xC8, 0xC0, 0x89, 0xCA, 0x89, 0xCB, 0x89, 0xCC, /* 0xE4-0xE7 */
+	0x89, 0xCD, 0x89, 0xCE, 0x89, 0xCF, 0xCA, 0xBF, /* 0xE8-0xEB */
+	0xC8, 0xC9, 0x89, 0xD0, 0xD7, 0xB3, 0x89, 0xD1, /* 0xEC-0xEF */
+	0xC9, 0xF9, 0x89, 0xD2, 0x89, 0xD3, 0xBF, 0xC7, /* 0xF0-0xF3 */
+	0x89, 0xD4, 0x89, 0xD5, 0xBA, 0xF8, 0x89, 0xD6, /* 0xF4-0xF7 */
+	0x89, 0xD7, 0xD2, 0xBC, 0x89, 0xD8, 0x89, 0xD9, /* 0xF8-0xFB */
+	0x89, 0xDA, 0x89, 0xDB, 0x89, 0xDC, 0x89, 0xDD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_59[512] = {
+	0x89, 0xDE, 0x89, 0xDF, 0xE2, 0xBA, 0x89, 0xE0, /* 0x00-0x03 */
+	0xB4, 0xA6, 0x89, 0xE1, 0x89, 0xE2, 0xB1, 0xB8, /* 0x04-0x07 */
+	0x89, 0xE3, 0x89, 0xE4, 0x89, 0xE5, 0x89, 0xE6, /* 0x08-0x0B */
+	0x89, 0xE7, 0xB8, 0xB4, 0x89, 0xE8, 0xCF, 0xC4, /* 0x0C-0x0F */
+	0x89, 0xE9, 0x89, 0xEA, 0x89, 0xEB, 0x89, 0xEC, /* 0x10-0x13 */
+	0xD9, 0xE7, 0xCF, 0xA6, 0xCD, 0xE2, 0x89, 0xED, /* 0x14-0x17 */
+	0x89, 0xEE, 0xD9, 0xED, 0xB6, 0xE0, 0x89, 0xEF, /* 0x18-0x1B */
+	0xD2, 0xB9, 0x89, 0xF0, 0x89, 0xF1, 0xB9, 0xBB, /* 0x1C-0x1F */
+	0x89, 0xF2, 0x89, 0xF3, 0x89, 0xF4, 0x89, 0xF5, /* 0x20-0x23 */
+	0xE2, 0xB9, 0xE2, 0xB7, 0x89, 0xF6, 0xB4, 0xF3, /* 0x24-0x27 */
+	0x89, 0xF7, 0xCC, 0xEC, 0xCC, 0xAB, 0xB7, 0xF2, /* 0x28-0x2B */
+	0x89, 0xF8, 0xD8, 0xB2, 0xD1, 0xEB, 0xBA, 0xBB, /* 0x2C-0x2F */
+	0x89, 0xF9, 0xCA, 0xA7, 0x89, 0xFA, 0x89, 0xFB, /* 0x30-0x33 */
+	0xCD, 0xB7, 0x89, 0xFC, 0x89, 0xFD, 0xD2, 0xC4, /* 0x34-0x37 */
+	0xBF, 0xE4, 0xBC, 0xD0, 0xB6, 0xE1, 0x89, 0xFE, /* 0x38-0x3B */
+	0xDE, 0xC5, 0x8A, 0x40, 0x8A, 0x41, 0x8A, 0x42, /* 0x3C-0x3F */
+	0x8A, 0x43, 0xDE, 0xC6, 0xDB, 0xBC, 0x8A, 0x44, /* 0x40-0x43 */
+	0xD1, 0xD9, 0x8A, 0x45, 0x8A, 0x46, 0xC6, 0xE6, /* 0x44-0x47 */
+	0xC4, 0xCE, 0xB7, 0xEE, 0x8A, 0x47, 0xB7, 0xDC, /* 0x48-0x4B */
+	0x8A, 0x48, 0x8A, 0x49, 0xBF, 0xFC, 0xD7, 0xE0, /* 0x4C-0x4F */
+	0x8A, 0x4A, 0xC6, 0xF5, 0x8A, 0x4B, 0x8A, 0x4C, /* 0x50-0x53 */
+	0xB1, 0xBC, 0xDE, 0xC8, 0xBD, 0xB1, 0xCC, 0xD7, /* 0x54-0x57 */
+	0xDE, 0xCA, 0x8A, 0x4D, 0xDE, 0xC9, 0x8A, 0x4E, /* 0x58-0x5B */
+	0x8A, 0x4F, 0x8A, 0x50, 0x8A, 0x51, 0x8A, 0x52, /* 0x5C-0x5F */
+	0xB5, 0xEC, 0x8A, 0x53, 0xC9, 0xDD, 0x8A, 0x54, /* 0x60-0x63 */
+	0x8A, 0x55, 0xB0, 0xC2, 0x8A, 0x56, 0x8A, 0x57, /* 0x64-0x67 */
+	0x8A, 0x58, 0x8A, 0x59, 0x8A, 0x5A, 0x8A, 0x5B, /* 0x68-0x6B */
+	0x8A, 0x5C, 0x8A, 0x5D, 0x8A, 0x5E, 0x8A, 0x5F, /* 0x6C-0x6F */
+	0x8A, 0x60, 0x8A, 0x61, 0x8A, 0x62, 0xC5, 0xAE, /* 0x70-0x73 */
+	0xC5, 0xAB, 0x8A, 0x63, 0xC4, 0xCC, 0x8A, 0x64, /* 0x74-0x77 */
+	0xBC, 0xE9, 0xCB, 0xFD, 0x8A, 0x65, 0x8A, 0x66, /* 0x78-0x7B */
+	0x8A, 0x67, 0xBA, 0xC3, 0x8A, 0x68, 0x8A, 0x69, /* 0x7C-0x7F */
+	
+	0x8A, 0x6A, 0xE5, 0xF9, 0xC8, 0xE7, 0xE5, 0xFA, /* 0x80-0x83 */
+	0xCD, 0xFD, 0x8A, 0x6B, 0xD7, 0xB1, 0xB8, 0xBE, /* 0x84-0x87 */
+	0xC2, 0xE8, 0x8A, 0x6C, 0xC8, 0xD1, 0x8A, 0x6D, /* 0x88-0x8B */
+	0x8A, 0x6E, 0xE5, 0xFB, 0x8A, 0x6F, 0x8A, 0x70, /* 0x8C-0x8F */
+	0x8A, 0x71, 0x8A, 0x72, 0xB6, 0xCA, 0xBC, 0xCB, /* 0x90-0x93 */
+	0x8A, 0x73, 0x8A, 0x74, 0xD1, 0xFD, 0xE6, 0xA1, /* 0x94-0x97 */
+	0x8A, 0x75, 0xC3, 0xEE, 0x8A, 0x76, 0x8A, 0x77, /* 0x98-0x9B */
+	0x8A, 0x78, 0x8A, 0x79, 0xE6, 0xA4, 0x8A, 0x7A, /* 0x9C-0x9F */
+	0x8A, 0x7B, 0x8A, 0x7C, 0x8A, 0x7D, 0xE5, 0xFE, /* 0xA0-0xA3 */
+	0xE6, 0xA5, 0xCD, 0xD7, 0x8A, 0x7E, 0x8A, 0x80, /* 0xA4-0xA7 */
+	0xB7, 0xC1, 0xE5, 0xFC, 0xE5, 0xFD, 0xE6, 0xA3, /* 0xA8-0xAB */
+	0x8A, 0x81, 0x8A, 0x82, 0xC4, 0xDD, 0xE6, 0xA8, /* 0xAC-0xAF */
+	0x8A, 0x83, 0x8A, 0x84, 0xE6, 0xA7, 0x8A, 0x85, /* 0xB0-0xB3 */
+	0x8A, 0x86, 0x8A, 0x87, 0x8A, 0x88, 0x8A, 0x89, /* 0xB4-0xB7 */
+	0x8A, 0x8A, 0xC3, 0xC3, 0x8A, 0x8B, 0xC6, 0xDE, /* 0xB8-0xBB */
+	0x8A, 0x8C, 0x8A, 0x8D, 0xE6, 0xAA, 0x8A, 0x8E, /* 0xBC-0xBF */
+	0x8A, 0x8F, 0x8A, 0x90, 0x8A, 0x91, 0x8A, 0x92, /* 0xC0-0xC3 */
+	0x8A, 0x93, 0x8A, 0x94, 0xC4, 0xB7, 0x8A, 0x95, /* 0xC4-0xC7 */
+	0x8A, 0x96, 0x8A, 0x97, 0xE6, 0xA2, 0xCA, 0xBC, /* 0xC8-0xCB */
+	0x8A, 0x98, 0x8A, 0x99, 0x8A, 0x9A, 0x8A, 0x9B, /* 0xCC-0xCF */
+	0xBD, 0xE3, 0xB9, 0xC3, 0xE6, 0xA6, 0xD0, 0xD5, /* 0xD0-0xD3 */
+	0xCE, 0xAF, 0x8A, 0x9C, 0x8A, 0x9D, 0xE6, 0xA9, /* 0xD4-0xD7 */
+	0xE6, 0xB0, 0x8A, 0x9E, 0xD2, 0xA6, 0x8A, 0x9F, /* 0xD8-0xDB */
+	0xBD, 0xAA, 0xE6, 0xAD, 0x8A, 0xA0, 0x8A, 0xA1, /* 0xDC-0xDF */
+	0x8A, 0xA2, 0x8A, 0xA3, 0x8A, 0xA4, 0xE6, 0xAF, /* 0xE0-0xE3 */
+	0x8A, 0xA5, 0xC0, 0xD1, 0x8A, 0xA6, 0x8A, 0xA7, /* 0xE4-0xE7 */
+	0xD2, 0xCC, 0x8A, 0xA8, 0x8A, 0xA9, 0x8A, 0xAA, /* 0xE8-0xEB */
+	0xBC, 0xA7, 0x8A, 0xAB, 0x8A, 0xAC, 0x8A, 0xAD, /* 0xEC-0xEF */
+	0x8A, 0xAE, 0x8A, 0xAF, 0x8A, 0xB0, 0x8A, 0xB1, /* 0xF0-0xF3 */
+	0x8A, 0xB2, 0x8A, 0xB3, 0x8A, 0xB4, 0x8A, 0xB5, /* 0xF4-0xF7 */
+	0x8A, 0xB6, 0xE6, 0xB1, 0x8A, 0xB7, 0xD2, 0xF6, /* 0xF8-0xFB */
+	0x8A, 0xB8, 0x8A, 0xB9, 0x8A, 0xBA, 0xD7, 0xCB, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5A[512] = {
+	0x8A, 0xBB, 0xCD, 0xFE, 0x8A, 0xBC, 0xCD, 0xDE, /* 0x00-0x03 */
+	0xC2, 0xA6, 0xE6, 0xAB, 0xE6, 0xAC, 0xBD, 0xBF, /* 0x04-0x07 */
+	0xE6, 0xAE, 0xE6, 0xB3, 0x8A, 0xBD, 0x8A, 0xBE, /* 0x08-0x0B */
+	0xE6, 0xB2, 0x8A, 0xBF, 0x8A, 0xC0, 0x8A, 0xC1, /* 0x0C-0x0F */
+	0x8A, 0xC2, 0xE6, 0xB6, 0x8A, 0xC3, 0xE6, 0xB8, /* 0x10-0x13 */
+	0x8A, 0xC4, 0x8A, 0xC5, 0x8A, 0xC6, 0x8A, 0xC7, /* 0x14-0x17 */
+	0xC4, 0xEF, 0x8A, 0xC8, 0x8A, 0xC9, 0x8A, 0xCA, /* 0x18-0x1B */
+	0xC4, 0xC8, 0x8A, 0xCB, 0x8A, 0xCC, 0xBE, 0xEA, /* 0x1C-0x1F */
+	0xC9, 0xEF, 0x8A, 0xCD, 0x8A, 0xCE, 0xE6, 0xB7, /* 0x20-0x23 */
+	0x8A, 0xCF, 0xB6, 0xF0, 0x8A, 0xD0, 0x8A, 0xD1, /* 0x24-0x27 */
+	0x8A, 0xD2, 0xC3, 0xE4, 0x8A, 0xD3, 0x8A, 0xD4, /* 0x28-0x2B */
+	0x8A, 0xD5, 0x8A, 0xD6, 0x8A, 0xD7, 0x8A, 0xD8, /* 0x2C-0x2F */
+	0x8A, 0xD9, 0xD3, 0xE9, 0xE6, 0xB4, 0x8A, 0xDA, /* 0x30-0x33 */
+	0xE6, 0xB5, 0x8A, 0xDB, 0xC8, 0xA2, 0x8A, 0xDC, /* 0x34-0x37 */
+	0x8A, 0xDD, 0x8A, 0xDE, 0x8A, 0xDF, 0x8A, 0xE0, /* 0x38-0x3B */
+	0xE6, 0xBD, 0x8A, 0xE1, 0x8A, 0xE2, 0x8A, 0xE3, /* 0x3C-0x3F */
+	0xE6, 0xB9, 0x8A, 0xE4, 0x8A, 0xE5, 0x8A, 0xE6, /* 0x40-0x43 */
+	0x8A, 0xE7, 0x8A, 0xE8, 0xC6, 0xC5, 0x8A, 0xE9, /* 0x44-0x47 */
+	0x8A, 0xEA, 0xCD, 0xF1, 0xE6, 0xBB, 0x8A, 0xEB, /* 0x48-0x4B */
+	0x8A, 0xEC, 0x8A, 0xED, 0x8A, 0xEE, 0x8A, 0xEF, /* 0x4C-0x4F */
+	0x8A, 0xF0, 0x8A, 0xF1, 0x8A, 0xF2, 0x8A, 0xF3, /* 0x50-0x53 */
+	0x8A, 0xF4, 0xE6, 0xBC, 0x8A, 0xF5, 0x8A, 0xF6, /* 0x54-0x57 */
+	0x8A, 0xF7, 0x8A, 0xF8, 0xBB, 0xE9, 0x8A, 0xF9, /* 0x58-0x5B */
+	0x8A, 0xFA, 0x8A, 0xFB, 0x8A, 0xFC, 0x8A, 0xFD, /* 0x5C-0x5F */
+	0x8A, 0xFE, 0x8B, 0x40, 0xE6, 0xBE, 0x8B, 0x41, /* 0x60-0x63 */
+	0x8B, 0x42, 0x8B, 0x43, 0x8B, 0x44, 0xE6, 0xBA, /* 0x64-0x67 */
+	0x8B, 0x45, 0x8B, 0x46, 0xC0, 0xB7, 0x8B, 0x47, /* 0x68-0x6B */
+	0x8B, 0x48, 0x8B, 0x49, 0x8B, 0x4A, 0x8B, 0x4B, /* 0x6C-0x6F */
+	0x8B, 0x4C, 0x8B, 0x4D, 0x8B, 0x4E, 0x8B, 0x4F, /* 0x70-0x73 */
+	0xD3, 0xA4, 0xE6, 0xBF, 0xC9, 0xF4, 0xE6, 0xC3, /* 0x74-0x77 */
+	0x8B, 0x50, 0x8B, 0x51, 0xE6, 0xC4, 0x8B, 0x52, /* 0x78-0x7B */
+	0x8B, 0x53, 0x8B, 0x54, 0x8B, 0x55, 0xD0, 0xF6, /* 0x7C-0x7F */
+	
+	0x8B, 0x56, 0x8B, 0x57, 0x8B, 0x58, 0x8B, 0x59, /* 0x80-0x83 */
+	0x8B, 0x5A, 0x8B, 0x5B, 0x8B, 0x5C, 0x8B, 0x5D, /* 0x84-0x87 */
+	0x8B, 0x5E, 0x8B, 0x5F, 0x8B, 0x60, 0x8B, 0x61, /* 0x88-0x8B */
+	0x8B, 0x62, 0x8B, 0x63, 0x8B, 0x64, 0x8B, 0x65, /* 0x8C-0x8F */
+	0x8B, 0x66, 0x8B, 0x67, 0xC3, 0xBD, 0x8B, 0x68, /* 0x90-0x93 */
+	0x8B, 0x69, 0x8B, 0x6A, 0x8B, 0x6B, 0x8B, 0x6C, /* 0x94-0x97 */
+	0x8B, 0x6D, 0x8B, 0x6E, 0xC3, 0xC4, 0xE6, 0xC2, /* 0x98-0x9B */
+	0x8B, 0x6F, 0x8B, 0x70, 0x8B, 0x71, 0x8B, 0x72, /* 0x9C-0x9F */
+	0x8B, 0x73, 0x8B, 0x74, 0x8B, 0x75, 0x8B, 0x76, /* 0xA0-0xA3 */
+	0x8B, 0x77, 0x8B, 0x78, 0x8B, 0x79, 0x8B, 0x7A, /* 0xA4-0xA7 */
+	0x8B, 0x7B, 0x8B, 0x7C, 0xE6, 0xC1, 0x8B, 0x7D, /* 0xA8-0xAB */
+	0x8B, 0x7E, 0x8B, 0x80, 0x8B, 0x81, 0x8B, 0x82, /* 0xAC-0xAF */
+	0x8B, 0x83, 0x8B, 0x84, 0xE6, 0xC7, 0xCF, 0xB1, /* 0xB0-0xB3 */
+	0x8B, 0x85, 0xEB, 0xF4, 0x8B, 0x86, 0x8B, 0x87, /* 0xB4-0xB7 */
+	0xE6, 0xCA, 0x8B, 0x88, 0x8B, 0x89, 0x8B, 0x8A, /* 0xB8-0xBB */
+	0x8B, 0x8B, 0x8B, 0x8C, 0xE6, 0xC5, 0x8B, 0x8D, /* 0xBC-0xBF */
+	0x8B, 0x8E, 0xBC, 0xDE, 0xC9, 0xA9, 0x8B, 0x8F, /* 0xC0-0xC3 */
+	0x8B, 0x90, 0x8B, 0x91, 0x8B, 0x92, 0x8B, 0x93, /* 0xC4-0xC7 */
+	0x8B, 0x94, 0xBC, 0xB5, 0x8B, 0x95, 0x8B, 0x96, /* 0xC8-0xCB */
+	0xCF, 0xD3, 0x8B, 0x97, 0x8B, 0x98, 0x8B, 0x99, /* 0xCC-0xCF */
+	0x8B, 0x9A, 0x8B, 0x9B, 0xE6, 0xC8, 0x8B, 0x9C, /* 0xD0-0xD3 */
+	0xE6, 0xC9, 0x8B, 0x9D, 0xE6, 0xCE, 0x8B, 0x9E, /* 0xD4-0xD7 */
+	0xE6, 0xD0, 0x8B, 0x9F, 0x8B, 0xA0, 0x8B, 0xA1, /* 0xD8-0xDB */
+	0xE6, 0xD1, 0x8B, 0xA2, 0x8B, 0xA3, 0x8B, 0xA4, /* 0xDC-0xDF */
+	0xE6, 0xCB, 0xB5, 0xD5, 0x8B, 0xA5, 0xE6, 0xCC, /* 0xE0-0xE3 */
+	0x8B, 0xA6, 0x8B, 0xA7, 0xE6, 0xCF, 0x8B, 0xA8, /* 0xE4-0xE7 */
+	0x8B, 0xA9, 0xC4, 0xDB, 0x8B, 0xAA, 0xE6, 0xC6, /* 0xE8-0xEB */
+	0x8B, 0xAB, 0x8B, 0xAC, 0x8B, 0xAD, 0x8B, 0xAE, /* 0xEC-0xEF */
+	0x8B, 0xAF, 0xE6, 0xCD, 0x8B, 0xB0, 0x8B, 0xB1, /* 0xF0-0xF3 */
+	0x8B, 0xB2, 0x8B, 0xB3, 0x8B, 0xB4, 0x8B, 0xB5, /* 0xF4-0xF7 */
+	0x8B, 0xB6, 0x8B, 0xB7, 0x8B, 0xB8, 0x8B, 0xB9, /* 0xF8-0xFB */
+	0x8B, 0xBA, 0x8B, 0xBB, 0x8B, 0xBC, 0x8B, 0xBD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5B[512] = {
+	0x8B, 0xBE, 0x8B, 0xBF, 0x8B, 0xC0, 0x8B, 0xC1, /* 0x00-0x03 */
+	0x8B, 0xC2, 0x8B, 0xC3, 0x8B, 0xC4, 0x8B, 0xC5, /* 0x04-0x07 */
+	0x8B, 0xC6, 0xE6, 0xD2, 0x8B, 0xC7, 0x8B, 0xC8, /* 0x08-0x0B */
+	0x8B, 0xC9, 0x8B, 0xCA, 0x8B, 0xCB, 0x8B, 0xCC, /* 0x0C-0x0F */
+	0x8B, 0xCD, 0x8B, 0xCE, 0x8B, 0xCF, 0x8B, 0xD0, /* 0x10-0x13 */
+	0x8B, 0xD1, 0x8B, 0xD2, 0xE6, 0xD4, 0xE6, 0xD3, /* 0x14-0x17 */
+	0x8B, 0xD3, 0x8B, 0xD4, 0x8B, 0xD5, 0x8B, 0xD6, /* 0x18-0x1B */
+	0x8B, 0xD7, 0x8B, 0xD8, 0x8B, 0xD9, 0x8B, 0xDA, /* 0x1C-0x1F */
+	0x8B, 0xDB, 0x8B, 0xDC, 0x8B, 0xDD, 0x8B, 0xDE, /* 0x20-0x23 */
+	0x8B, 0xDF, 0x8B, 0xE0, 0x8B, 0xE1, 0x8B, 0xE2, /* 0x24-0x27 */
+	0x8B, 0xE3, 0x8B, 0xE4, 0x8B, 0xE5, 0x8B, 0xE6, /* 0x28-0x2B */
+	0x8B, 0xE7, 0x8B, 0xE8, 0x8B, 0xE9, 0x8B, 0xEA, /* 0x2C-0x2F */
+	0x8B, 0xEB, 0x8B, 0xEC, 0xE6, 0xD5, 0x8B, 0xED, /* 0x30-0x33 */
+	0xD9, 0xF8, 0x8B, 0xEE, 0x8B, 0xEF, 0xE6, 0xD6, /* 0x34-0x37 */
+	0x8B, 0xF0, 0x8B, 0xF1, 0x8B, 0xF2, 0x8B, 0xF3, /* 0x38-0x3B */
+	0x8B, 0xF4, 0x8B, 0xF5, 0x8B, 0xF6, 0x8B, 0xF7, /* 0x3C-0x3F */
+	0xE6, 0xD7, 0x8B, 0xF8, 0x8B, 0xF9, 0x8B, 0xFA, /* 0x40-0x43 */
+	0x8B, 0xFB, 0x8B, 0xFC, 0x8B, 0xFD, 0x8B, 0xFE, /* 0x44-0x47 */
+	0x8C, 0x40, 0x8C, 0x41, 0x8C, 0x42, 0x8C, 0x43, /* 0x48-0x4B */
+	0x8C, 0x44, 0x8C, 0x45, 0x8C, 0x46, 0x8C, 0x47, /* 0x4C-0x4F */
+	0xD7, 0xD3, 0xE6, 0xDD, 0x8C, 0x48, 0xE6, 0xDE, /* 0x50-0x53 */
+	0xBF, 0xD7, 0xD4, 0xD0, 0x8C, 0x49, 0xD7, 0xD6, /* 0x54-0x57 */
+	0xB4, 0xE6, 0xCB, 0xEF, 0xE6, 0xDA, 0xD8, 0xC3, /* 0x58-0x5B */
+	0xD7, 0xCE, 0xD0, 0xA2, 0x8C, 0x4A, 0xC3, 0xCF, /* 0x5C-0x5F */
+	0x8C, 0x4B, 0x8C, 0x4C, 0xE6, 0xDF, 0xBC, 0xBE, /* 0x60-0x63 */
+	0xB9, 0xC2, 0xE6, 0xDB, 0xD1, 0xA7, 0x8C, 0x4D, /* 0x64-0x67 */
+	0x8C, 0x4E, 0xBA, 0xA2, 0xC2, 0xCF, 0x8C, 0x4F, /* 0x68-0x6B */
+	0xD8, 0xAB, 0x8C, 0x50, 0x8C, 0x51, 0x8C, 0x52, /* 0x6C-0x6F */
+	0xCA, 0xEB, 0xE5, 0xEE, 0x8C, 0x53, 0xE6, 0xDC, /* 0x70-0x73 */
+	0x8C, 0x54, 0xB7, 0xF5, 0x8C, 0x55, 0x8C, 0x56, /* 0x74-0x77 */
+	0x8C, 0x57, 0x8C, 0x58, 0xC8, 0xE6, 0x8C, 0x59, /* 0x78-0x7B */
+	0x8C, 0x5A, 0xC4, 0xF5, 0x8C, 0x5B, 0x8C, 0x5C, /* 0x7C-0x7F */
+	
+	0xE5, 0xB2, 0xC4, 0xFE, 0x8C, 0x5D, 0xCB, 0xFC, /* 0x80-0x83 */
+	0xE5, 0xB3, 0xD5, 0xAC, 0x8C, 0x5E, 0xD3, 0xEE, /* 0x84-0x87 */
+	0xCA, 0xD8, 0xB0, 0xB2, 0x8C, 0x5F, 0xCB, 0xCE, /* 0x88-0x8B */
+	0xCD, 0xEA, 0x8C, 0x60, 0x8C, 0x61, 0xBA, 0xEA, /* 0x8C-0x8F */
+	0x8C, 0x62, 0x8C, 0x63, 0x8C, 0x64, 0xE5, 0xB5, /* 0x90-0x93 */
+	0x8C, 0x65, 0xE5, 0xB4, 0x8C, 0x66, 0xD7, 0xDA, /* 0x94-0x97 */
+	0xB9, 0xD9, 0xD6, 0xE6, 0xB6, 0xA8, 0xCD, 0xF0, /* 0x98-0x9B */
+	0xD2, 0xCB, 0xB1, 0xA6, 0xCA, 0xB5, 0x8C, 0x67, /* 0x9C-0x9F */
+	0xB3, 0xE8, 0xC9, 0xF3, 0xBF, 0xCD, 0xD0, 0xFB, /* 0xA0-0xA3 */
+	0xCA, 0xD2, 0xE5, 0xB6, 0xBB, 0xC2, 0x8C, 0x68, /* 0xA4-0xA7 */
+	0x8C, 0x69, 0x8C, 0x6A, 0xCF, 0xDC, 0xB9, 0xAC, /* 0xA8-0xAB */
+	0x8C, 0x6B, 0x8C, 0x6C, 0x8C, 0x6D, 0x8C, 0x6E, /* 0xAC-0xAF */
+	0xD4, 0xD7, 0x8C, 0x6F, 0x8C, 0x70, 0xBA, 0xA6, /* 0xB0-0xB3 */
+	0xD1, 0xE7, 0xCF, 0xFC, 0xBC, 0xD2, 0x8C, 0x71, /* 0xB4-0xB7 */
+	0xE5, 0xB7, 0xC8, 0xDD, 0x8C, 0x72, 0x8C, 0x73, /* 0xB8-0xBB */
+	0x8C, 0x74, 0xBF, 0xED, 0xB1, 0xF6, 0xCB, 0xDE, /* 0xBC-0xBF */
+	0x8C, 0x75, 0x8C, 0x76, 0xBC, 0xC5, 0x8C, 0x77, /* 0xC0-0xC3 */
+	0xBC, 0xC4, 0xD2, 0xFA, 0xC3, 0xDC, 0xBF, 0xDC, /* 0xC4-0xC7 */
+	0x8C, 0x78, 0x8C, 0x79, 0x8C, 0x7A, 0x8C, 0x7B, /* 0xC8-0xCB */
+	0xB8, 0xBB, 0x8C, 0x7C, 0x8C, 0x7D, 0x8C, 0x7E, /* 0xCC-0xCF */
+	0xC3, 0xC2, 0x8C, 0x80, 0xBA, 0xAE, 0xD4, 0xA2, /* 0xD0-0xD3 */
+	0x8C, 0x81, 0x8C, 0x82, 0x8C, 0x83, 0x8C, 0x84, /* 0xD4-0xD7 */
+	0x8C, 0x85, 0x8C, 0x86, 0x8C, 0x87, 0x8C, 0x88, /* 0xD8-0xDB */
+	0x8C, 0x89, 0xC7, 0xDE, 0xC4, 0xAF, 0xB2, 0xEC, /* 0xDC-0xDF */
+	0x8C, 0x8A, 0xB9, 0xD1, 0x8C, 0x8B, 0x8C, 0x8C, /* 0xE0-0xE3 */
+	0xE5, 0xBB, 0xC1, 0xC8, 0x8C, 0x8D, 0x8C, 0x8E, /* 0xE4-0xE7 */
+	0xD5, 0xAF, 0x8C, 0x8F, 0x8C, 0x90, 0x8C, 0x91, /* 0xE8-0xEB */
+	0x8C, 0x92, 0x8C, 0x93, 0xE5, 0xBC, 0x8C, 0x94, /* 0xEC-0xEF */
+	0xE5, 0xBE, 0x8C, 0x95, 0x8C, 0x96, 0x8C, 0x97, /* 0xF0-0xF3 */
+	0x8C, 0x98, 0x8C, 0x99, 0x8C, 0x9A, 0x8C, 0x9B, /* 0xF4-0xF7 */
+	0xB4, 0xE7, 0xB6, 0xD4, 0xCB, 0xC2, 0xD1, 0xB0, /* 0xF8-0xFB */
+	0xB5, 0xBC, 0x8C, 0x9C, 0x8C, 0x9D, 0xCA, 0xD9, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5C[512] = {
+	0x8C, 0x9E, 0xB7, 0xE2, 0x8C, 0x9F, 0x8C, 0xA0, /* 0x00-0x03 */
+	0xC9, 0xE4, 0x8C, 0xA1, 0xBD, 0xAB, 0x8C, 0xA2, /* 0x04-0x07 */
+	0x8C, 0xA3, 0xCE, 0xBE, 0xD7, 0xF0, 0x8C, 0xA4, /* 0x08-0x0B */
+	0x8C, 0xA5, 0x8C, 0xA6, 0x8C, 0xA7, 0xD0, 0xA1, /* 0x0C-0x0F */
+	0x8C, 0xA8, 0xC9, 0xD9, 0x8C, 0xA9, 0x8C, 0xAA, /* 0x10-0x13 */
+	0xB6, 0xFB, 0xE6, 0xD8, 0xBC, 0xE2, 0x8C, 0xAB, /* 0x14-0x17 */
+	0xB3, 0xBE, 0x8C, 0xAC, 0xC9, 0xD0, 0x8C, 0xAD, /* 0x18-0x1B */
+	0xE6, 0xD9, 0xB3, 0xA2, 0x8C, 0xAE, 0x8C, 0xAF, /* 0x1C-0x1F */
+	0x8C, 0xB0, 0x8C, 0xB1, 0xDE, 0xCC, 0x8C, 0xB2, /* 0x20-0x23 */
+	0xD3, 0xC8, 0xDE, 0xCD, 0x8C, 0xB3, 0xD2, 0xA2, /* 0x24-0x27 */
+	0x8C, 0xB4, 0x8C, 0xB5, 0x8C, 0xB6, 0x8C, 0xB7, /* 0x28-0x2B */
+	0xDE, 0xCE, 0x8C, 0xB8, 0x8C, 0xB9, 0x8C, 0xBA, /* 0x2C-0x2F */
+	0x8C, 0xBB, 0xBE, 0xCD, 0x8C, 0xBC, 0x8C, 0xBD, /* 0x30-0x33 */
+	0xDE, 0xCF, 0x8C, 0xBE, 0x8C, 0xBF, 0x8C, 0xC0, /* 0x34-0x37 */
+	0xCA, 0xAC, 0xD2, 0xFC, 0xB3, 0xDF, 0xE5, 0xEA, /* 0x38-0x3B */
+	0xC4, 0xE1, 0xBE, 0xA1, 0xCE, 0xB2, 0xC4, 0xF2, /* 0x3C-0x3F */
+	0xBE, 0xD6, 0xC6, 0xA8, 0xB2, 0xE3, 0x8C, 0xC1, /* 0x40-0x43 */
+	0x8C, 0xC2, 0xBE, 0xD3, 0x8C, 0xC3, 0x8C, 0xC4, /* 0x44-0x47 */
+	0xC7, 0xFC, 0xCC, 0xEB, 0xBD, 0xEC, 0xCE, 0xDD, /* 0x48-0x4B */
+	0x8C, 0xC5, 0x8C, 0xC6, 0xCA, 0xBA, 0xC6, 0xC1, /* 0x4C-0x4F */
+	0xE5, 0xEC, 0xD0, 0xBC, 0x8C, 0xC7, 0x8C, 0xC8, /* 0x50-0x53 */
+	0x8C, 0xC9, 0xD5, 0xB9, 0x8C, 0xCA, 0x8C, 0xCB, /* 0x54-0x57 */
+	0x8C, 0xCC, 0xE5, 0xED, 0x8C, 0xCD, 0x8C, 0xCE, /* 0x58-0x5B */
+	0x8C, 0xCF, 0x8C, 0xD0, 0xCA, 0xF4, 0x8C, 0xD1, /* 0x5C-0x5F */
+	0xCD, 0xC0, 0xC2, 0xC5, 0x8C, 0xD2, 0xE5, 0xEF, /* 0x60-0x63 */
+	0x8C, 0xD3, 0xC2, 0xC4, 0xE5, 0xF0, 0x8C, 0xD4, /* 0x64-0x67 */
+	0x8C, 0xD5, 0x8C, 0xD6, 0x8C, 0xD7, 0x8C, 0xD8, /* 0x68-0x6B */
+	0x8C, 0xD9, 0x8C, 0xDA, 0xE5, 0xF8, 0xCD, 0xCD, /* 0x6C-0x6F */
+	0x8C, 0xDB, 0xC9, 0xBD, 0x8C, 0xDC, 0x8C, 0xDD, /* 0x70-0x73 */
+	0x8C, 0xDE, 0x8C, 0xDF, 0x8C, 0xE0, 0x8C, 0xE1, /* 0x74-0x77 */
+	0x8C, 0xE2, 0xD2, 0xD9, 0xE1, 0xA8, 0x8C, 0xE3, /* 0x78-0x7B */
+	0x8C, 0xE4, 0x8C, 0xE5, 0x8C, 0xE6, 0xD3, 0xEC, /* 0x7C-0x7F */
+	
+	0x8C, 0xE7, 0xCB, 0xEA, 0xC6, 0xF1, 0x8C, 0xE8, /* 0x80-0x83 */
+	0x8C, 0xE9, 0x8C, 0xEA, 0x8C, 0xEB, 0x8C, 0xEC, /* 0x84-0x87 */
+	0xE1, 0xAC, 0x8C, 0xED, 0x8C, 0xEE, 0x8C, 0xEF, /* 0x88-0x8B */
+	0xE1, 0xA7, 0xE1, 0xA9, 0x8C, 0xF0, 0x8C, 0xF1, /* 0x8C-0x8F */
+	0xE1, 0xAA, 0xE1, 0xAF, 0x8C, 0xF2, 0x8C, 0xF3, /* 0x90-0x93 */
+	0xB2, 0xED, 0x8C, 0xF4, 0xE1, 0xAB, 0xB8, 0xDA, /* 0x94-0x97 */
+	0xE1, 0xAD, 0xE1, 0xAE, 0xE1, 0xB0, 0xB5, 0xBA, /* 0x98-0x9B */
+	0xE1, 0xB1, 0x8C, 0xF5, 0x8C, 0xF6, 0x8C, 0xF7, /* 0x9C-0x9F */
+	0x8C, 0xF8, 0x8C, 0xF9, 0xE1, 0xB3, 0xE1, 0xB8, /* 0xA0-0xA3 */
+	0x8C, 0xFA, 0x8C, 0xFB, 0x8C, 0xFC, 0x8C, 0xFD, /* 0xA4-0xA7 */
+	0x8C, 0xFE, 0xD1, 0xD2, 0x8D, 0x40, 0xE1, 0xB6, /* 0xA8-0xAB */
+	0xE1, 0xB5, 0xC1, 0xEB, 0x8D, 0x41, 0x8D, 0x42, /* 0xAC-0xAF */
+	0x8D, 0x43, 0xE1, 0xB7, 0x8D, 0x44, 0xD4, 0xC0, /* 0xB0-0xB3 */
+	0x8D, 0x45, 0xE1, 0xB2, 0x8D, 0x46, 0xE1, 0xBA, /* 0xB4-0xB7 */
+	0xB0, 0xB6, 0x8D, 0x47, 0x8D, 0x48, 0x8D, 0x49, /* 0xB8-0xBB */
+	0x8D, 0x4A, 0xE1, 0xB4, 0x8D, 0x4B, 0xBF, 0xF9, /* 0xBC-0xBF */
+	0x8D, 0x4C, 0xE1, 0xB9, 0x8D, 0x4D, 0x8D, 0x4E, /* 0xC0-0xC3 */
+	0xE1, 0xBB, 0x8D, 0x4F, 0x8D, 0x50, 0x8D, 0x51, /* 0xC4-0xC7 */
+	0x8D, 0x52, 0x8D, 0x53, 0x8D, 0x54, 0xE1, 0xBE, /* 0xC8-0xCB */
+	0x8D, 0x55, 0x8D, 0x56, 0x8D, 0x57, 0x8D, 0x58, /* 0xCC-0xCF */
+	0x8D, 0x59, 0x8D, 0x5A, 0xE1, 0xBC, 0x8D, 0x5B, /* 0xD0-0xD3 */
+	0x8D, 0x5C, 0x8D, 0x5D, 0x8D, 0x5E, 0x8D, 0x5F, /* 0xD4-0xD7 */
+	0x8D, 0x60, 0xD6, 0xC5, 0x8D, 0x61, 0x8D, 0x62, /* 0xD8-0xDB */
+	0x8D, 0x63, 0x8D, 0x64, 0x8D, 0x65, 0x8D, 0x66, /* 0xDC-0xDF */
+	0x8D, 0x67, 0xCF, 0xBF, 0x8D, 0x68, 0x8D, 0x69, /* 0xE0-0xE3 */
+	0xE1, 0xBD, 0xE1, 0xBF, 0xC2, 0xCD, 0x8D, 0x6A, /* 0xE4-0xE7 */
+	0xB6, 0xEB, 0x8D, 0x6B, 0xD3, 0xF8, 0x8D, 0x6C, /* 0xE8-0xEB */
+	0x8D, 0x6D, 0xC7, 0xCD, 0x8D, 0x6E, 0x8D, 0x6F, /* 0xEC-0xEF */
+	0xB7, 0xE5, 0x8D, 0x70, 0x8D, 0x71, 0x8D, 0x72, /* 0xF0-0xF3 */
+	0x8D, 0x73, 0x8D, 0x74, 0x8D, 0x75, 0x8D, 0x76, /* 0xF4-0xF7 */
+	0x8D, 0x77, 0x8D, 0x78, 0x8D, 0x79, 0xBE, 0xFE, /* 0xF8-0xFB */
+	0x8D, 0x7A, 0x8D, 0x7B, 0x8D, 0x7C, 0x8D, 0x7D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5D[512] = {
+	0x8D, 0x7E, 0x8D, 0x80, 0xE1, 0xC0, 0xE1, 0xC1, /* 0x00-0x03 */
+	0x8D, 0x81, 0x8D, 0x82, 0xE1, 0xC7, 0xB3, 0xE7, /* 0x04-0x07 */
+	0x8D, 0x83, 0x8D, 0x84, 0x8D, 0x85, 0x8D, 0x86, /* 0x08-0x0B */
+	0x8D, 0x87, 0x8D, 0x88, 0xC6, 0xE9, 0x8D, 0x89, /* 0x0C-0x0F */
+	0x8D, 0x8A, 0x8D, 0x8B, 0x8D, 0x8C, 0x8D, 0x8D, /* 0x10-0x13 */
+	0xB4, 0xDE, 0x8D, 0x8E, 0xD1, 0xC2, 0x8D, 0x8F, /* 0x14-0x17 */
+	0x8D, 0x90, 0x8D, 0x91, 0x8D, 0x92, 0xE1, 0xC8, /* 0x18-0x1B */
+	0x8D, 0x93, 0x8D, 0x94, 0xE1, 0xC6, 0x8D, 0x95, /* 0x1C-0x1F */
+	0x8D, 0x96, 0x8D, 0x97, 0x8D, 0x98, 0x8D, 0x99, /* 0x20-0x23 */
+	0xE1, 0xC5, 0x8D, 0x9A, 0xE1, 0xC3, 0xE1, 0xC2, /* 0x24-0x27 */
+	0x8D, 0x9B, 0xB1, 0xC0, 0x8D, 0x9C, 0x8D, 0x9D, /* 0x28-0x2B */
+	0x8D, 0x9E, 0xD5, 0xB8, 0xE1, 0xC4, 0x8D, 0x9F, /* 0x2C-0x2F */
+	0x8D, 0xA0, 0x8D, 0xA1, 0x8D, 0xA2, 0x8D, 0xA3, /* 0x30-0x33 */
+	0xE1, 0xCB, 0x8D, 0xA4, 0x8D, 0xA5, 0x8D, 0xA6, /* 0x34-0x37 */
+	0x8D, 0xA7, 0x8D, 0xA8, 0x8D, 0xA9, 0x8D, 0xAA, /* 0x38-0x3B */
+	0x8D, 0xAB, 0xE1, 0xCC, 0xE1, 0xCA, 0x8D, 0xAC, /* 0x3C-0x3F */
+	0x8D, 0xAD, 0x8D, 0xAE, 0x8D, 0xAF, 0x8D, 0xB0, /* 0x40-0x43 */
+	0x8D, 0xB1, 0x8D, 0xB2, 0x8D, 0xB3, 0xEF, 0xFA, /* 0x44-0x47 */
+	0x8D, 0xB4, 0x8D, 0xB5, 0xE1, 0xD3, 0xE1, 0xD2, /* 0x48-0x4B */
+	0xC7, 0xB6, 0x8D, 0xB6, 0x8D, 0xB7, 0x8D, 0xB8, /* 0x4C-0x4F */
+	0x8D, 0xB9, 0x8D, 0xBA, 0x8D, 0xBB, 0x8D, 0xBC, /* 0x50-0x53 */
+	0x8D, 0xBD, 0x8D, 0xBE, 0x8D, 0xBF, 0x8D, 0xC0, /* 0x54-0x57 */
+	0xE1, 0xC9, 0x8D, 0xC1, 0x8D, 0xC2, 0xE1, 0xCE, /* 0x58-0x5B */
+	0x8D, 0xC3, 0xE1, 0xD0, 0x8D, 0xC4, 0x8D, 0xC5, /* 0x5C-0x5F */
+	0x8D, 0xC6, 0x8D, 0xC7, 0x8D, 0xC8, 0x8D, 0xC9, /* 0x60-0x63 */
+	0x8D, 0xCA, 0x8D, 0xCB, 0x8D, 0xCC, 0x8D, 0xCD, /* 0x64-0x67 */
+	0x8D, 0xCE, 0xE1, 0xD4, 0x8D, 0xCF, 0xE1, 0xD1, /* 0x68-0x6B */
+	0xE1, 0xCD, 0x8D, 0xD0, 0x8D, 0xD1, 0xE1, 0xCF, /* 0x6C-0x6F */
+	0x8D, 0xD2, 0x8D, 0xD3, 0x8D, 0xD4, 0x8D, 0xD5, /* 0x70-0x73 */
+	0xE1, 0xD5, 0x8D, 0xD6, 0x8D, 0xD7, 0x8D, 0xD8, /* 0x74-0x77 */
+	0x8D, 0xD9, 0x8D, 0xDA, 0x8D, 0xDB, 0x8D, 0xDC, /* 0x78-0x7B */
+	0x8D, 0xDD, 0x8D, 0xDE, 0x8D, 0xDF, 0x8D, 0xE0, /* 0x7C-0x7F */
+	
+	0x8D, 0xE1, 0x8D, 0xE2, 0xE1, 0xD6, 0x8D, 0xE3, /* 0x80-0x83 */
+	0x8D, 0xE4, 0x8D, 0xE5, 0x8D, 0xE6, 0x8D, 0xE7, /* 0x84-0x87 */
+	0x8D, 0xE8, 0x8D, 0xE9, 0x8D, 0xEA, 0x8D, 0xEB, /* 0x88-0x8B */
+	0x8D, 0xEC, 0x8D, 0xED, 0x8D, 0xEE, 0x8D, 0xEF, /* 0x8C-0x8F */
+	0x8D, 0xF0, 0x8D, 0xF1, 0x8D, 0xF2, 0x8D, 0xF3, /* 0x90-0x93 */
+	0x8D, 0xF4, 0x8D, 0xF5, 0x8D, 0xF6, 0x8D, 0xF7, /* 0x94-0x97 */
+	0x8D, 0xF8, 0xE1, 0xD7, 0x8D, 0xF9, 0x8D, 0xFA, /* 0x98-0x9B */
+	0x8D, 0xFB, 0xE1, 0xD8, 0x8D, 0xFC, 0x8D, 0xFD, /* 0x9C-0x9F */
+	0x8D, 0xFE, 0x8E, 0x40, 0x8E, 0x41, 0x8E, 0x42, /* 0xA0-0xA3 */
+	0x8E, 0x43, 0x8E, 0x44, 0x8E, 0x45, 0x8E, 0x46, /* 0xA4-0xA7 */
+	0x8E, 0x47, 0x8E, 0x48, 0x8E, 0x49, 0x8E, 0x4A, /* 0xA8-0xAB */
+	0x8E, 0x4B, 0x8E, 0x4C, 0x8E, 0x4D, 0x8E, 0x4E, /* 0xAC-0xAF */
+	0x8E, 0x4F, 0x8E, 0x50, 0x8E, 0x51, 0x8E, 0x52, /* 0xB0-0xB3 */
+	0x8E, 0x53, 0x8E, 0x54, 0x8E, 0x55, 0xE1, 0xDA, /* 0xB4-0xB7 */
+	0x8E, 0x56, 0x8E, 0x57, 0x8E, 0x58, 0x8E, 0x59, /* 0xB8-0xBB */
+	0x8E, 0x5A, 0x8E, 0x5B, 0x8E, 0x5C, 0x8E, 0x5D, /* 0xBC-0xBF */
+	0x8E, 0x5E, 0x8E, 0x5F, 0x8E, 0x60, 0x8E, 0x61, /* 0xC0-0xC3 */
+	0x8E, 0x62, 0xE1, 0xDB, 0x8E, 0x63, 0x8E, 0x64, /* 0xC4-0xC7 */
+	0x8E, 0x65, 0x8E, 0x66, 0x8E, 0x67, 0x8E, 0x68, /* 0xC8-0xCB */
+	0x8E, 0x69, 0xCE, 0xA1, 0x8E, 0x6A, 0x8E, 0x6B, /* 0xCC-0xCF */
+	0x8E, 0x6C, 0x8E, 0x6D, 0x8E, 0x6E, 0x8E, 0x6F, /* 0xD0-0xD3 */
+	0x8E, 0x70, 0x8E, 0x71, 0x8E, 0x72, 0x8E, 0x73, /* 0xD4-0xD7 */
+	0x8E, 0x74, 0x8E, 0x75, 0x8E, 0x76, 0xE7, 0xDD, /* 0xD8-0xDB */
+	0x8E, 0x77, 0xB4, 0xA8, 0xD6, 0xDD, 0x8E, 0x78, /* 0xDC-0xDF */
+	0x8E, 0x79, 0xD1, 0xB2, 0xB3, 0xB2, 0x8E, 0x7A, /* 0xE0-0xE3 */
+	0x8E, 0x7B, 0xB9, 0xA4, 0xD7, 0xF3, 0xC7, 0xC9, /* 0xE4-0xE7 */
+	0xBE, 0xDE, 0xB9, 0xAE, 0x8E, 0x7C, 0xCE, 0xD7, /* 0xE8-0xEB */
+	0x8E, 0x7D, 0x8E, 0x7E, 0xB2, 0xEE, 0xDB, 0xCF, /* 0xEC-0xEF */
+	0x8E, 0x80, 0xBC, 0xBA, 0xD2, 0xD1, 0xCB, 0xC8, /* 0xF0-0xF3 */
+	0xB0, 0xCD, 0x8E, 0x81, 0x8E, 0x82, 0xCF, 0xEF, /* 0xF4-0xF7 */
+	0x8E, 0x83, 0x8E, 0x84, 0x8E, 0x85, 0x8E, 0x86, /* 0xF8-0xFB */
+	0x8E, 0x87, 0xD9, 0xE3, 0xBD, 0xED, 0x8E, 0x88, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5E[512] = {
+	0x8E, 0x89, 0xB1, 0xD2, 0xCA, 0xD0, 0xB2, 0xBC, /* 0x00-0x03 */
+	0x8E, 0x8A, 0xCB, 0xA7, 0xB7, 0xAB, 0x8E, 0x8B, /* 0x04-0x07 */
+	0xCA, 0xA6, 0x8E, 0x8C, 0x8E, 0x8D, 0x8E, 0x8E, /* 0x08-0x0B */
+	0xCF, 0xA3, 0x8E, 0x8F, 0x8E, 0x90, 0xE0, 0xF8, /* 0x0C-0x0F */
+	0xD5, 0xCA, 0xE0, 0xFB, 0x8E, 0x91, 0x8E, 0x92, /* 0x10-0x13 */
+	0xE0, 0xFA, 0xC5, 0xC1, 0xCC, 0xFB, 0x8E, 0x93, /* 0x14-0x17 */
+	0xC1, 0xB1, 0xE0, 0xF9, 0xD6, 0xE3, 0xB2, 0xAF, /* 0x18-0x1B */
+	0xD6, 0xC4, 0xB5, 0xDB, 0x8E, 0x94, 0x8E, 0x95, /* 0x1C-0x1F */
+	0x8E, 0x96, 0x8E, 0x97, 0x8E, 0x98, 0x8E, 0x99, /* 0x20-0x23 */
+	0x8E, 0x9A, 0x8E, 0x9B, 0xB4, 0xF8, 0xD6, 0xA1, /* 0x24-0x27 */
+	0x8E, 0x9C, 0x8E, 0x9D, 0x8E, 0x9E, 0x8E, 0x9F, /* 0x28-0x2B */
+	0x8E, 0xA0, 0xCF, 0xAF, 0xB0, 0xEF, 0x8E, 0xA1, /* 0x2C-0x2F */
+	0x8E, 0xA2, 0xE0, 0xFC, 0x8E, 0xA3, 0x8E, 0xA4, /* 0x30-0x33 */
+	0x8E, 0xA5, 0x8E, 0xA6, 0x8E, 0xA7, 0xE1, 0xA1, /* 0x34-0x37 */
+	0xB3, 0xA3, 0x8E, 0xA8, 0x8E, 0xA9, 0xE0, 0xFD, /* 0x38-0x3B */
+	0xE0, 0xFE, 0xC3, 0xB1, 0x8E, 0xAA, 0x8E, 0xAB, /* 0x3C-0x3F */
+	0x8E, 0xAC, 0x8E, 0xAD, 0xC3, 0xDD, 0x8E, 0xAE, /* 0x40-0x43 */
+	0xE1, 0xA2, 0xB7, 0xF9, 0x8E, 0xAF, 0x8E, 0xB0, /* 0x44-0x47 */
+	0x8E, 0xB1, 0x8E, 0xB2, 0x8E, 0xB3, 0x8E, 0xB4, /* 0x48-0x4B */
+	0xBB, 0xCF, 0x8E, 0xB5, 0x8E, 0xB6, 0x8E, 0xB7, /* 0x4C-0x4F */
+	0x8E, 0xB8, 0x8E, 0xB9, 0x8E, 0xBA, 0x8E, 0xBB, /* 0x50-0x53 */
+	0xE1, 0xA3, 0xC4, 0xBB, 0x8E, 0xBC, 0x8E, 0xBD, /* 0x54-0x57 */
+	0x8E, 0xBE, 0x8E, 0xBF, 0x8E, 0xC0, 0xE1, 0xA4, /* 0x58-0x5B */
+	0x8E, 0xC1, 0x8E, 0xC2, 0xE1, 0xA5, 0x8E, 0xC3, /* 0x5C-0x5F */
+	0x8E, 0xC4, 0xE1, 0xA6, 0xB4, 0xB1, 0x8E, 0xC5, /* 0x60-0x63 */
+	0x8E, 0xC6, 0x8E, 0xC7, 0x8E, 0xC8, 0x8E, 0xC9, /* 0x64-0x67 */
+	0x8E, 0xCA, 0x8E, 0xCB, 0x8E, 0xCC, 0x8E, 0xCD, /* 0x68-0x6B */
+	0x8E, 0xCE, 0x8E, 0xCF, 0x8E, 0xD0, 0x8E, 0xD1, /* 0x6C-0x6F */
+	0x8E, 0xD2, 0x8E, 0xD3, 0xB8, 0xC9, 0xC6, 0xBD, /* 0x70-0x73 */
+	0xC4, 0xEA, 0x8E, 0xD4, 0xB2, 0xA2, 0x8E, 0xD5, /* 0x74-0x77 */
+	0xD0, 0xD2, 0x8E, 0xD6, 0xE7, 0xDB, 0xBB, 0xC3, /* 0x78-0x7B */
+	0xD3, 0xD7, 0xD3, 0xC4, 0x8E, 0xD7, 0xB9, 0xE3, /* 0x7C-0x7F */
+	
+	0xE2, 0xCF, 0x8E, 0xD8, 0x8E, 0xD9, 0x8E, 0xDA, /* 0x80-0x83 */
+	0xD7, 0xAF, 0x8E, 0xDB, 0xC7, 0xEC, 0xB1, 0xD3, /* 0x84-0x87 */
+	0x8E, 0xDC, 0x8E, 0xDD, 0xB4, 0xB2, 0xE2, 0xD1, /* 0x88-0x8B */
+	0x8E, 0xDE, 0x8E, 0xDF, 0x8E, 0xE0, 0xD0, 0xF2, /* 0x8C-0x8F */
+	0xC2, 0xAE, 0xE2, 0xD0, 0x8E, 0xE1, 0xBF, 0xE2, /* 0x90-0x93 */
+	0xD3, 0xA6, 0xB5, 0xD7, 0xE2, 0xD2, 0xB5, 0xEA, /* 0x94-0x97 */
+	0x8E, 0xE2, 0xC3, 0xED, 0xB8, 0xFD, 0x8E, 0xE3, /* 0x98-0x9B */
+	0xB8, 0xAE, 0x8E, 0xE4, 0xC5, 0xD3, 0xB7, 0xCF, /* 0x9C-0x9F */
+	0xE2, 0xD4, 0x8E, 0xE5, 0x8E, 0xE6, 0x8E, 0xE7, /* 0xA0-0xA3 */
+	0x8E, 0xE8, 0xE2, 0xD3, 0xB6, 0xC8, 0xD7, 0xF9, /* 0xA4-0xA7 */
+	0x8E, 0xE9, 0x8E, 0xEA, 0x8E, 0xEB, 0x8E, 0xEC, /* 0xA8-0xAB */
+	0x8E, 0xED, 0xCD, 0xA5, 0x8E, 0xEE, 0x8E, 0xEF, /* 0xAC-0xAF */
+	0x8E, 0xF0, 0x8E, 0xF1, 0x8E, 0xF2, 0xE2, 0xD8, /* 0xB0-0xB3 */
+	0x8E, 0xF3, 0xE2, 0xD6, 0xCA, 0xFC, 0xBF, 0xB5, /* 0xB4-0xB7 */
+	0xD3, 0xB9, 0xE2, 0xD5, 0x8E, 0xF4, 0x8E, 0xF5, /* 0xB8-0xBB */
+	0x8E, 0xF6, 0x8E, 0xF7, 0xE2, 0xD7, 0x8E, 0xF8, /* 0xBC-0xBF */
+	0x8E, 0xF9, 0x8E, 0xFA, 0x8E, 0xFB, 0x8E, 0xFC, /* 0xC0-0xC3 */
+	0x8E, 0xFD, 0x8E, 0xFE, 0x8F, 0x40, 0x8F, 0x41, /* 0xC4-0xC7 */
+	0x8F, 0x42, 0xC1, 0xAE, 0xC0, 0xC8, 0x8F, 0x43, /* 0xC8-0xCB */
+	0x8F, 0x44, 0x8F, 0x45, 0x8F, 0x46, 0x8F, 0x47, /* 0xCC-0xCF */
+	0x8F, 0x48, 0xE2, 0xDB, 0xE2, 0xDA, 0xC0, 0xAA, /* 0xD0-0xD3 */
+	0x8F, 0x49, 0x8F, 0x4A, 0xC1, 0xCE, 0x8F, 0x4B, /* 0xD4-0xD7 */
+	0x8F, 0x4C, 0x8F, 0x4D, 0x8F, 0x4E, 0xE2, 0xDC, /* 0xD8-0xDB */
+	0x8F, 0x4F, 0x8F, 0x50, 0x8F, 0x51, 0x8F, 0x52, /* 0xDC-0xDF */
+	0x8F, 0x53, 0x8F, 0x54, 0x8F, 0x55, 0x8F, 0x56, /* 0xE0-0xE3 */
+	0x8F, 0x57, 0x8F, 0x58, 0x8F, 0x59, 0x8F, 0x5A, /* 0xE4-0xE7 */
+	0xE2, 0xDD, 0x8F, 0x5B, 0xE2, 0xDE, 0x8F, 0x5C, /* 0xE8-0xEB */
+	0x8F, 0x5D, 0x8F, 0x5E, 0x8F, 0x5F, 0x8F, 0x60, /* 0xEC-0xEF */
+	0x8F, 0x61, 0x8F, 0x62, 0x8F, 0x63, 0x8F, 0x64, /* 0xF0-0xF3 */
+	0xDB, 0xC8, 0x8F, 0x65, 0xD1, 0xD3, 0xCD, 0xA2, /* 0xF4-0xF7 */
+	0x8F, 0x66, 0x8F, 0x67, 0xBD, 0xA8, 0x8F, 0x68, /* 0xF8-0xFB */
+	0x8F, 0x69, 0x8F, 0x6A, 0xDE, 0xC3, 0xD8, 0xA5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5F[512] = {
+	0xBF, 0xAA, 0xDB, 0xCD, 0xD2, 0xEC, 0xC6, 0xFA, /* 0x00-0x03 */
+	0xC5, 0xAA, 0x8F, 0x6B, 0x8F, 0x6C, 0x8F, 0x6D, /* 0x04-0x07 */
+	0xDE, 0xC4, 0x8F, 0x6E, 0xB1, 0xD7, 0xDF, 0xAE, /* 0x08-0x0B */
+	0x8F, 0x6F, 0x8F, 0x70, 0x8F, 0x71, 0xCA, 0xBD, /* 0x0C-0x0F */
+	0x8F, 0x72, 0xDF, 0xB1, 0x8F, 0x73, 0xB9, 0xAD, /* 0x10-0x13 */
+	0x8F, 0x74, 0xD2, 0xFD, 0x8F, 0x75, 0xB8, 0xA5, /* 0x14-0x17 */
+	0xBA, 0xEB, 0x8F, 0x76, 0x8F, 0x77, 0xB3, 0xDA, /* 0x18-0x1B */
+	0x8F, 0x78, 0x8F, 0x79, 0x8F, 0x7A, 0xB5, 0xDC, /* 0x1C-0x1F */
+	0xD5, 0xC5, 0x8F, 0x7B, 0x8F, 0x7C, 0x8F, 0x7D, /* 0x20-0x23 */
+	0x8F, 0x7E, 0xC3, 0xD6, 0xCF, 0xD2, 0xBB, 0xA1, /* 0x24-0x27 */
+	0x8F, 0x80, 0xE5, 0xF3, 0xE5, 0xF2, 0x8F, 0x81, /* 0x28-0x2B */
+	0x8F, 0x82, 0xE5, 0xF4, 0x8F, 0x83, 0xCD, 0xE4, /* 0x2C-0x2F */
+	0x8F, 0x84, 0xC8, 0xF5, 0x8F, 0x85, 0x8F, 0x86, /* 0x30-0x33 */
+	0x8F, 0x87, 0x8F, 0x88, 0x8F, 0x89, 0x8F, 0x8A, /* 0x34-0x37 */
+	0x8F, 0x8B, 0xB5, 0xAF, 0xC7, 0xBF, 0x8F, 0x8C, /* 0x38-0x3B */
+	0xE5, 0xF6, 0x8F, 0x8D, 0x8F, 0x8E, 0x8F, 0x8F, /* 0x3C-0x3F */
+	0xEC, 0xB0, 0x8F, 0x90, 0x8F, 0x91, 0x8F, 0x92, /* 0x40-0x43 */
+	0x8F, 0x93, 0x8F, 0x94, 0x8F, 0x95, 0x8F, 0x96, /* 0x44-0x47 */
+	0x8F, 0x97, 0x8F, 0x98, 0x8F, 0x99, 0x8F, 0x9A, /* 0x48-0x4B */
+	0x8F, 0x9B, 0x8F, 0x9C, 0x8F, 0x9D, 0x8F, 0x9E, /* 0x4C-0x4F */
+	0xE5, 0xE6, 0x8F, 0x9F, 0xB9, 0xE9, 0xB5, 0xB1, /* 0x50-0x53 */
+	0x8F, 0xA0, 0xC2, 0xBC, 0xE5, 0xE8, 0xE5, 0xE7, /* 0x54-0x57 */
+	0xE5, 0xE9, 0x8F, 0xA1, 0x8F, 0xA2, 0x8F, 0xA3, /* 0x58-0x5B */
+	0x8F, 0xA4, 0xD2, 0xCD, 0x8F, 0xA5, 0x8F, 0xA6, /* 0x5C-0x5F */
+	0x8F, 0xA7, 0xE1, 0xEA, 0xD0, 0xCE, 0x8F, 0xA8, /* 0x60-0x63 */
+	0xCD, 0xAE, 0x8F, 0xA9, 0xD1, 0xE5, 0x8F, 0xAA, /* 0x64-0x67 */
+	0x8F, 0xAB, 0xB2, 0xCA, 0xB1, 0xEB, 0x8F, 0xAC, /* 0x68-0x6B */
+	0xB1, 0xF2, 0xC5, 0xED, 0x8F, 0xAD, 0x8F, 0xAE, /* 0x6C-0x6F */
+	0xD5, 0xC3, 0xD3, 0xB0, 0x8F, 0xAF, 0xE1, 0xDC, /* 0x70-0x73 */
+	0x8F, 0xB0, 0x8F, 0xB1, 0x8F, 0xB2, 0xE1, 0xDD, /* 0x74-0x77 */
+	0x8F, 0xB3, 0xD2, 0xDB, 0x8F, 0xB4, 0xB3, 0xB9, /* 0x78-0x7B */
+	0xB1, 0xCB, 0x8F, 0xB5, 0x8F, 0xB6, 0x8F, 0xB7, /* 0x7C-0x7F */
+	
+	0xCD, 0xF9, 0xD5, 0xF7, 0xE1, 0xDE, 0x8F, 0xB8, /* 0x80-0x83 */
+	0xBE, 0xB6, 0xB4, 0xFD, 0x8F, 0xB9, 0xE1, 0xDF, /* 0x84-0x87 */
+	0xBA, 0xDC, 0xE1, 0xE0, 0xBB, 0xB2, 0xC2, 0xC9, /* 0x88-0x8B */
+	0xE1, 0xE1, 0x8F, 0xBA, 0x8F, 0xBB, 0x8F, 0xBC, /* 0x8C-0x8F */
+	0xD0, 0xEC, 0x8F, 0xBD, 0xCD, 0xBD, 0x8F, 0xBE, /* 0x90-0x93 */
+	0x8F, 0xBF, 0xE1, 0xE2, 0x8F, 0xC0, 0xB5, 0xC3, /* 0x94-0x97 */
+	0xC5, 0xC7, 0xE1, 0xE3, 0x8F, 0xC1, 0x8F, 0xC2, /* 0x98-0x9B */
+	0xE1, 0xE4, 0x8F, 0xC3, 0x8F, 0xC4, 0x8F, 0xC5, /* 0x9C-0x9F */
+	0x8F, 0xC6, 0xD3, 0xF9, 0x8F, 0xC7, 0x8F, 0xC8, /* 0xA0-0xA3 */
+	0x8F, 0xC9, 0x8F, 0xCA, 0x8F, 0xCB, 0x8F, 0xCC, /* 0xA4-0xA7 */
+	0xE1, 0xE5, 0x8F, 0xCD, 0xD1, 0xAD, 0x8F, 0xCE, /* 0xA8-0xAB */
+	0x8F, 0xCF, 0xE1, 0xE6, 0xCE, 0xA2, 0x8F, 0xD0, /* 0xAC-0xAF */
+	0x8F, 0xD1, 0x8F, 0xD2, 0x8F, 0xD3, 0x8F, 0xD4, /* 0xB0-0xB3 */
+	0x8F, 0xD5, 0xE1, 0xE7, 0x8F, 0xD6, 0xB5, 0xC2, /* 0xB4-0xB7 */
+	0x8F, 0xD7, 0x8F, 0xD8, 0x8F, 0xD9, 0x8F, 0xDA, /* 0xB8-0xBB */
+	0xE1, 0xE8, 0xBB, 0xD5, 0x8F, 0xDB, 0x8F, 0xDC, /* 0xBC-0xBF */
+	0x8F, 0xDD, 0x8F, 0xDE, 0x8F, 0xDF, 0xD0, 0xC4, /* 0xC0-0xC3 */
+	0xE2, 0xE0, 0xB1, 0xD8, 0xD2, 0xE4, 0x8F, 0xE0, /* 0xC4-0xC7 */
+	0x8F, 0xE1, 0xE2, 0xE1, 0x8F, 0xE2, 0x8F, 0xE3, /* 0xC8-0xCB */
+	0xBC, 0xC9, 0xC8, 0xCC, 0x8F, 0xE4, 0xE2, 0xE3, /* 0xCC-0xCF */
+	0xEC, 0xFE, 0xEC, 0xFD, 0xDF, 0xAF, 0x8F, 0xE5, /* 0xD0-0xD3 */
+	0x8F, 0xE6, 0x8F, 0xE7, 0xE2, 0xE2, 0xD6, 0xBE, /* 0xD4-0xD7 */
+	0xCD, 0xFC, 0xC3, 0xA6, 0x8F, 0xE8, 0x8F, 0xE9, /* 0xD8-0xDB */
+	0x8F, 0xEA, 0xE3, 0xC3, 0x8F, 0xEB, 0x8F, 0xEC, /* 0xDC-0xDF */
+	0xD6, 0xD2, 0xE2, 0xE7, 0x8F, 0xED, 0x8F, 0xEE, /* 0xE0-0xE3 */
+	0xE2, 0xE8, 0x8F, 0xEF, 0x8F, 0xF0, 0xD3, 0xC7, /* 0xE4-0xE7 */
+	0x8F, 0xF1, 0x8F, 0xF2, 0xE2, 0xEC, 0xBF, 0xEC, /* 0xE8-0xEB */
+	0x8F, 0xF3, 0xE2, 0xED, 0xE2, 0xE5, 0x8F, 0xF4, /* 0xEC-0xEF */
+	0x8F, 0xF5, 0xB3, 0xC0, 0x8F, 0xF6, 0x8F, 0xF7, /* 0xF0-0xF3 */
+	0x8F, 0xF8, 0xC4, 0xEE, 0x8F, 0xF9, 0x8F, 0xFA, /* 0xF4-0xF7 */
+	0xE2, 0xEE, 0x8F, 0xFB, 0x8F, 0xFC, 0xD0, 0xC3, /* 0xF8-0xFB */
+	0x8F, 0xFD, 0xBA, 0xF6, 0xE2, 0xE9, 0xB7, 0xDE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_60[512] = {
+	0xBB, 0xB3, 0xCC, 0xAC, 0xCB, 0xCB, 0xE2, 0xE4, /* 0x00-0x03 */
+	0xE2, 0xE6, 0xE2, 0xEA, 0xE2, 0xEB, 0x8F, 0xFE, /* 0x04-0x07 */
+	0x90, 0x40, 0x90, 0x41, 0xE2, 0xF7, 0x90, 0x42, /* 0x08-0x0B */
+	0x90, 0x43, 0xE2, 0xF4, 0xD4, 0xF5, 0xE2, 0xF3, /* 0x0C-0x0F */
+	0x90, 0x44, 0x90, 0x45, 0xC5, 0xAD, 0x90, 0x46, /* 0x10-0x13 */
+	0xD5, 0xFA, 0xC5, 0xC2, 0xB2, 0xC0, 0x90, 0x47, /* 0x14-0x17 */
+	0x90, 0x48, 0xE2, 0xEF, 0x90, 0x49, 0xE2, 0xF2, /* 0x18-0x1B */
+	0xC1, 0xAF, 0xCB, 0xBC, 0x90, 0x4A, 0x90, 0x4B, /* 0x1C-0x1F */
+	0xB5, 0xA1, 0xE2, 0xF9, 0x90, 0x4C, 0x90, 0x4D, /* 0x20-0x23 */
+	0x90, 0x4E, 0xBC, 0xB1, 0xE2, 0xF1, 0xD0, 0xD4, /* 0x24-0x27 */
+	0xD4, 0xB9, 0xE2, 0xF5, 0xB9, 0xD6, 0xE2, 0xF6, /* 0x28-0x2B */
+	0x90, 0x4F, 0x90, 0x50, 0x90, 0x51, 0xC7, 0xD3, /* 0x2C-0x2F */
+	0x90, 0x52, 0x90, 0x53, 0x90, 0x54, 0x90, 0x55, /* 0x30-0x33 */
+	0x90, 0x56, 0xE2, 0xF0, 0x90, 0x57, 0x90, 0x58, /* 0x34-0x37 */
+	0x90, 0x59, 0x90, 0x5A, 0x90, 0x5B, 0xD7, 0xDC, /* 0x38-0x3B */
+	0xED, 0xA1, 0x90, 0x5C, 0x90, 0x5D, 0xE2, 0xF8, /* 0x3C-0x3F */
+	0x90, 0x5E, 0xED, 0xA5, 0xE2, 0xFE, 0xCA, 0xD1, /* 0x40-0x43 */
+	0x90, 0x5F, 0x90, 0x60, 0x90, 0x61, 0x90, 0x62, /* 0x44-0x47 */
+	0x90, 0x63, 0x90, 0x64, 0x90, 0x65, 0xC1, 0xB5, /* 0x48-0x4B */
+	0x90, 0x66, 0xBB, 0xD0, 0x90, 0x67, 0x90, 0x68, /* 0x4C-0x4F */
+	0xBF, 0xD6, 0x90, 0x69, 0xBA, 0xE3, 0x90, 0x6A, /* 0x50-0x53 */
+	0x90, 0x6B, 0xCB, 0xA1, 0x90, 0x6C, 0x90, 0x6D, /* 0x54-0x57 */
+	0x90, 0x6E, 0xED, 0xA6, 0xED, 0xA3, 0x90, 0x6F, /* 0x58-0x5B */
+	0x90, 0x70, 0xED, 0xA2, 0x90, 0x71, 0x90, 0x72, /* 0x5C-0x5F */
+	0x90, 0x73, 0x90, 0x74, 0xBB, 0xD6, 0xED, 0xA7, /* 0x60-0x63 */
+	0xD0, 0xF4, 0x90, 0x75, 0x90, 0x76, 0xED, 0xA4, /* 0x64-0x67 */
+	0xBA, 0xDE, 0xB6, 0xF7, 0xE3, 0xA1, 0xB6, 0xB2, /* 0x68-0x6B */
+	0xCC, 0xF1, 0xB9, 0xA7, 0x90, 0x77, 0xCF, 0xA2, /* 0x6C-0x6F */
+	0xC7, 0xA1, 0x90, 0x78, 0x90, 0x79, 0xBF, 0xD2, /* 0x70-0x73 */
+	0x90, 0x7A, 0x90, 0x7B, 0xB6, 0xF1, 0x90, 0x7C, /* 0x74-0x77 */
+	0xE2, 0xFA, 0xE2, 0xFB, 0xE2, 0xFD, 0xE2, 0xFC, /* 0x78-0x7B */
+	0xC4, 0xD5, 0xE3, 0xA2, 0x90, 0x7D, 0xD3, 0xC1, /* 0x7C-0x7F */
+	
+	0x90, 0x7E, 0x90, 0x80, 0x90, 0x81, 0xE3, 0xA7, /* 0x80-0x83 */
+	0xC7, 0xC4, 0x90, 0x82, 0x90, 0x83, 0x90, 0x84, /* 0x84-0x87 */
+	0x90, 0x85, 0xCF, 0xA4, 0x90, 0x86, 0x90, 0x87, /* 0x88-0x8B */
+	0xE3, 0xA9, 0xBA, 0xB7, 0x90, 0x88, 0x90, 0x89, /* 0x8C-0x8F */
+	0x90, 0x8A, 0x90, 0x8B, 0xE3, 0xA8, 0x90, 0x8C, /* 0x90-0x93 */
+	0xBB, 0xDA, 0x90, 0x8D, 0xE3, 0xA3, 0x90, 0x8E, /* 0x94-0x97 */
+	0x90, 0x8F, 0x90, 0x90, 0xE3, 0xA4, 0xE3, 0xAA, /* 0x98-0x9B */
+	0x90, 0x91, 0xE3, 0xA6, 0x90, 0x92, 0xCE, 0xF2, /* 0x9C-0x9F */
+	0xD3, 0xC6, 0x90, 0x93, 0x90, 0x94, 0xBB, 0xBC, /* 0xA0-0xA3 */
+	0x90, 0x95, 0x90, 0x96, 0xD4, 0xC3, 0x90, 0x97, /* 0xA4-0xA7 */
+	0xC4, 0xFA, 0x90, 0x98, 0x90, 0x99, 0xED, 0xA8, /* 0xA8-0xAB */
+	0xD0, 0xFC, 0xE3, 0xA5, 0x90, 0x9A, 0xC3, 0xF5, /* 0xAC-0xAF */
+	0x90, 0x9B, 0xE3, 0xAD, 0xB1, 0xAF, 0x90, 0x9C, /* 0xB0-0xB3 */
+	0xE3, 0xB2, 0x90, 0x9D, 0x90, 0x9E, 0x90, 0x9F, /* 0xB4-0xB7 */
+	0xBC, 0xC2, 0x90, 0xA0, 0x90, 0xA1, 0xE3, 0xAC, /* 0xB8-0xBB */
+	0xB5, 0xBF, 0x90, 0xA2, 0x90, 0xA3, 0x90, 0xA4, /* 0xBC-0xBF */
+	0x90, 0xA5, 0x90, 0xA6, 0x90, 0xA7, 0x90, 0xA8, /* 0xC0-0xC3 */
+	0x90, 0xA9, 0xC7, 0xE9, 0xE3, 0xB0, 0x90, 0xAA, /* 0xC4-0xC7 */
+	0x90, 0xAB, 0x90, 0xAC, 0xBE, 0xAA, 0xCD, 0xEF, /* 0xC8-0xCB */
+	0x90, 0xAD, 0x90, 0xAE, 0x90, 0xAF, 0x90, 0xB0, /* 0xCC-0xCF */
+	0x90, 0xB1, 0xBB, 0xF3, 0x90, 0xB2, 0x90, 0xB3, /* 0xD0-0xD3 */
+	0x90, 0xB4, 0xCC, 0xE8, 0x90, 0xB5, 0x90, 0xB6, /* 0xD4-0xD7 */
+	0xE3, 0xAF, 0x90, 0xB7, 0xE3, 0xB1, 0x90, 0xB8, /* 0xD8-0xDB */
+	0xCF, 0xA7, 0xE3, 0xAE, 0x90, 0xB9, 0xCE, 0xA9, /* 0xDC-0xDF */
+	0xBB, 0xDD, 0x90, 0xBA, 0x90, 0xBB, 0x90, 0xBC, /* 0xE0-0xE3 */
+	0x90, 0xBD, 0x90, 0xBE, 0xB5, 0xEB, 0xBE, 0xE5, /* 0xE4-0xE7 */
+	0xB2, 0xD2, 0xB3, 0xCD, 0x90, 0xBF, 0xB1, 0xB9, /* 0xE8-0xEB */
+	0xE3, 0xAB, 0xB2, 0xD1, 0xB5, 0xAC, 0xB9, 0xDF, /* 0xEC-0xEF */
+	0xB6, 0xE8, 0x90, 0xC0, 0x90, 0xC1, 0xCF, 0xEB, /* 0xF0-0xF3 */
+	0xE3, 0xB7, 0x90, 0xC2, 0xBB, 0xCC, 0x90, 0xC3, /* 0xF4-0xF7 */
+	0x90, 0xC4, 0xC8, 0xC7, 0xD0, 0xCA, 0x90, 0xC5, /* 0xF8-0xFB */
+	0x90, 0xC6, 0x90, 0xC7, 0x90, 0xC8, 0x90, 0xC9, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_61[512] = {
+	0xE3, 0xB8, 0xB3, 0xEE, 0x90, 0xCA, 0x90, 0xCB, /* 0x00-0x03 */
+	0x90, 0xCC, 0x90, 0xCD, 0xED, 0xA9, 0x90, 0xCE, /* 0x04-0x07 */
+	0xD3, 0xFA, 0xD3, 0xE4, 0x90, 0xCF, 0x90, 0xD0, /* 0x08-0x0B */
+	0x90, 0xD1, 0xED, 0xAA, 0xE3, 0xB9, 0xD2, 0xE2, /* 0x0C-0x0F */
+	0x90, 0xD2, 0x90, 0xD3, 0x90, 0xD4, 0x90, 0xD5, /* 0x10-0x13 */
+	0x90, 0xD6, 0xE3, 0xB5, 0x90, 0xD7, 0x90, 0xD8, /* 0x14-0x17 */
+	0x90, 0xD9, 0x90, 0xDA, 0xD3, 0xDE, 0x90, 0xDB, /* 0x18-0x1B */
+	0x90, 0xDC, 0x90, 0xDD, 0x90, 0xDE, 0xB8, 0xD0, /* 0x1C-0x1F */
+	0xE3, 0xB3, 0x90, 0xDF, 0x90, 0xE0, 0xE3, 0xB6, /* 0x20-0x23 */
+	0xB7, 0xDF, 0x90, 0xE1, 0xE3, 0xB4, 0xC0, 0xA2, /* 0x24-0x27 */
+	0x90, 0xE2, 0x90, 0xE3, 0x90, 0xE4, 0xE3, 0xBA, /* 0x28-0x2B */
+	0x90, 0xE5, 0x90, 0xE6, 0x90, 0xE7, 0x90, 0xE8, /* 0x2C-0x2F */
+	0x90, 0xE9, 0x90, 0xEA, 0x90, 0xEB, 0x90, 0xEC, /* 0x30-0x33 */
+	0x90, 0xED, 0x90, 0xEE, 0x90, 0xEF, 0x90, 0xF0, /* 0x34-0x37 */
+	0x90, 0xF1, 0x90, 0xF2, 0x90, 0xF3, 0x90, 0xF4, /* 0x38-0x3B */
+	0x90, 0xF5, 0x90, 0xF6, 0x90, 0xF7, 0xD4, 0xB8, /* 0x3C-0x3F */
+	0x90, 0xF8, 0x90, 0xF9, 0x90, 0xFA, 0x90, 0xFB, /* 0x40-0x43 */
+	0x90, 0xFC, 0x90, 0xFD, 0x90, 0xFE, 0x91, 0x40, /* 0x44-0x47 */
+	0xB4, 0xC8, 0x91, 0x41, 0xE3, 0xBB, 0x91, 0x42, /* 0x48-0x4B */
+	0xBB, 0xC5, 0x91, 0x43, 0xC9, 0xF7, 0x91, 0x44, /* 0x4C-0x4F */
+	0x91, 0x45, 0xC9, 0xE5, 0x91, 0x46, 0x91, 0x47, /* 0x50-0x53 */
+	0x91, 0x48, 0xC4, 0xBD, 0x91, 0x49, 0x91, 0x4A, /* 0x54-0x57 */
+	0x91, 0x4B, 0x91, 0x4C, 0x91, 0x4D, 0x91, 0x4E, /* 0x58-0x5B */
+	0x91, 0x4F, 0xED, 0xAB, 0x91, 0x50, 0x91, 0x51, /* 0x5C-0x5F */
+	0x91, 0x52, 0x91, 0x53, 0xC2, 0xFD, 0x91, 0x54, /* 0x60-0x63 */
+	0x91, 0x55, 0x91, 0x56, 0x91, 0x57, 0xBB, 0xDB, /* 0x64-0x67 */
+	0xBF, 0xAE, 0x91, 0x58, 0x91, 0x59, 0x91, 0x5A, /* 0x68-0x6B */
+	0x91, 0x5B, 0x91, 0x5C, 0x91, 0x5D, 0x91, 0x5E, /* 0x6C-0x6F */
+	0xCE, 0xBF, 0x91, 0x5F, 0x91, 0x60, 0x91, 0x61, /* 0x70-0x73 */
+	0x91, 0x62, 0xE3, 0xBC, 0x91, 0x63, 0xBF, 0xB6, /* 0x74-0x77 */
+	0x91, 0x64, 0x91, 0x65, 0x91, 0x66, 0x91, 0x67, /* 0x78-0x7B */
+	0x91, 0x68, 0x91, 0x69, 0x91, 0x6A, 0x91, 0x6B, /* 0x7C-0x7F */
+	
+	0x91, 0x6C, 0x91, 0x6D, 0x91, 0x6E, 0x91, 0x6F, /* 0x80-0x83 */
+	0x91, 0x70, 0x91, 0x71, 0x91, 0x72, 0x91, 0x73, /* 0x84-0x87 */
+	0x91, 0x74, 0x91, 0x75, 0x91, 0x76, 0xB1, 0xEF, /* 0x88-0x8B */
+	0x91, 0x77, 0x91, 0x78, 0xD4, 0xF7, 0x91, 0x79, /* 0x8C-0x8F */
+	0x91, 0x7A, 0x91, 0x7B, 0x91, 0x7C, 0x91, 0x7D, /* 0x90-0x93 */
+	0xE3, 0xBE, 0x91, 0x7E, 0x91, 0x80, 0x91, 0x81, /* 0x94-0x97 */
+	0x91, 0x82, 0x91, 0x83, 0x91, 0x84, 0x91, 0x85, /* 0x98-0x9B */
+	0x91, 0x86, 0xED, 0xAD, 0x91, 0x87, 0x91, 0x88, /* 0x9C-0x9F */
+	0x91, 0x89, 0x91, 0x8A, 0x91, 0x8B, 0x91, 0x8C, /* 0xA0-0xA3 */
+	0x91, 0x8D, 0x91, 0x8E, 0x91, 0x8F, 0xE3, 0xBF, /* 0xA4-0xA7 */
+	0xBA, 0xA9, 0xED, 0xAC, 0x91, 0x90, 0x91, 0x91, /* 0xA8-0xAB */
+	0xE3, 0xBD, 0x91, 0x92, 0x91, 0x93, 0x91, 0x94, /* 0xAC-0xAF */
+	0x91, 0x95, 0x91, 0x96, 0x91, 0x97, 0x91, 0x98, /* 0xB0-0xB3 */
+	0x91, 0x99, 0x91, 0x9A, 0x91, 0x9B, 0xE3, 0xC0, /* 0xB4-0xB7 */
+	0x91, 0x9C, 0x91, 0x9D, 0x91, 0x9E, 0x91, 0x9F, /* 0xB8-0xBB */
+	0x91, 0xA0, 0x91, 0xA1, 0xBA, 0xB6, 0x91, 0xA2, /* 0xBC-0xBF */
+	0x91, 0xA3, 0x91, 0xA4, 0xB6, 0xAE, 0x91, 0xA5, /* 0xC0-0xC3 */
+	0x91, 0xA6, 0x91, 0xA7, 0x91, 0xA8, 0x91, 0xA9, /* 0xC4-0xC7 */
+	0xD0, 0xB8, 0x91, 0xAA, 0xB0, 0xC3, 0xED, 0xAE, /* 0xC8-0xCB */
+	0x91, 0xAB, 0x91, 0xAC, 0x91, 0xAD, 0x91, 0xAE, /* 0xCC-0xCF */
+	0x91, 0xAF, 0xED, 0xAF, 0xC0, 0xC1, 0x91, 0xB0, /* 0xD0-0xD3 */
+	0xE3, 0xC1, 0x91, 0xB1, 0x91, 0xB2, 0x91, 0xB3, /* 0xD4-0xD7 */
+	0x91, 0xB4, 0x91, 0xB5, 0x91, 0xB6, 0x91, 0xB7, /* 0xD8-0xDB */
+	0x91, 0xB8, 0x91, 0xB9, 0x91, 0xBA, 0x91, 0xBB, /* 0xDC-0xDF */
+	0x91, 0xBC, 0x91, 0xBD, 0x91, 0xBE, 0x91, 0xBF, /* 0xE0-0xE3 */
+	0x91, 0xC0, 0x91, 0xC1, 0xC5, 0xB3, 0x91, 0xC2, /* 0xE4-0xE7 */
+	0x91, 0xC3, 0x91, 0xC4, 0x91, 0xC5, 0x91, 0xC6, /* 0xE8-0xEB */
+	0x91, 0xC7, 0x91, 0xC8, 0x91, 0xC9, 0x91, 0xCA, /* 0xEC-0xEF */
+	0x91, 0xCB, 0x91, 0xCC, 0x91, 0xCD, 0x91, 0xCE, /* 0xF0-0xF3 */
+	0x91, 0xCF, 0xE3, 0xC2, 0x91, 0xD0, 0x91, 0xD1, /* 0xF4-0xF7 */
+	0x91, 0xD2, 0x91, 0xD3, 0x91, 0xD4, 0x91, 0xD5, /* 0xF8-0xFB */
+	0x91, 0xD6, 0x91, 0xD7, 0x91, 0xD8, 0xDC, 0xB2, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_62[512] = {
+	0x91, 0xD9, 0x91, 0xDA, 0x91, 0xDB, 0x91, 0xDC, /* 0x00-0x03 */
+	0x91, 0xDD, 0x91, 0xDE, 0xED, 0xB0, 0x91, 0xDF, /* 0x04-0x07 */
+	0xB8, 0xEA, 0x91, 0xE0, 0xCE, 0xEC, 0xEA, 0xA7, /* 0x08-0x0B */
+	0xD0, 0xE7, 0xCA, 0xF9, 0xC8, 0xD6, 0xCF, 0xB7, /* 0x0C-0x0F */
+	0xB3, 0xC9, 0xCE, 0xD2, 0xBD, 0xE4, 0x91, 0xE1, /* 0x10-0x13 */
+	0x91, 0xE2, 0xE3, 0xDE, 0xBB, 0xF2, 0xEA, 0xA8, /* 0x14-0x17 */
+	0xD5, 0xBD, 0x91, 0xE3, 0xC6, 0xDD, 0xEA, 0xA9, /* 0x18-0x1B */
+	0x91, 0xE4, 0x91, 0xE5, 0x91, 0xE6, 0xEA, 0xAA, /* 0x1C-0x1F */
+	0x91, 0xE7, 0xEA, 0xAC, 0xEA, 0xAB, 0x91, 0xE8, /* 0x20-0x23 */
+	0xEA, 0xAE, 0xEA, 0xAD, 0x91, 0xE9, 0x91, 0xEA, /* 0x24-0x27 */
+	0x91, 0xEB, 0x91, 0xEC, 0xBD, 0xD8, 0x91, 0xED, /* 0x28-0x2B */
+	0xEA, 0xAF, 0x91, 0xEE, 0xC2, 0xBE, 0x91, 0xEF, /* 0x2C-0x2F */
+	0x91, 0xF0, 0x91, 0xF1, 0x91, 0xF2, 0xB4, 0xC1, /* 0x30-0x33 */
+	0xB4, 0xF7, 0x91, 0xF3, 0x91, 0xF4, 0xBB, 0xA7, /* 0x34-0x37 */
+	0x91, 0xF5, 0x91, 0xF6, 0x91, 0xF7, 0x91, 0xF8, /* 0x38-0x3B */
+	0x91, 0xF9, 0xEC, 0xE6, 0xEC, 0xE5, 0xB7, 0xBF, /* 0x3C-0x3F */
+	0xCB, 0xF9, 0xB1, 0xE2, 0x91, 0xFA, 0xEC, 0xE7, /* 0x40-0x43 */
+	0x91, 0xFB, 0x91, 0xFC, 0x91, 0xFD, 0xC9, 0xC8, /* 0x44-0x47 */
+	0xEC, 0xE8, 0xEC, 0xE9, 0x91, 0xFE, 0xCA, 0xD6, /* 0x48-0x4B */
+	0xDE, 0xD0, 0xB2, 0xC5, 0xD4, 0xFA, 0x92, 0x40, /* 0x4C-0x4F */
+	0x92, 0x41, 0xC6, 0xCB, 0xB0, 0xC7, 0xB4, 0xF2, /* 0x50-0x53 */
+	0xC8, 0xD3, 0x92, 0x42, 0x92, 0x43, 0x92, 0x44, /* 0x54-0x57 */
+	0xCD, 0xD0, 0x92, 0x45, 0x92, 0x46, 0xBF, 0xB8, /* 0x58-0x5B */
+	0x92, 0x47, 0x92, 0x48, 0x92, 0x49, 0x92, 0x4A, /* 0x5C-0x5F */
+	0x92, 0x4B, 0x92, 0x4C, 0x92, 0x4D, 0xBF, 0xDB, /* 0x60-0x63 */
+	0x92, 0x4E, 0x92, 0x4F, 0xC7, 0xA4, 0xD6, 0xB4, /* 0x64-0x67 */
+	0x92, 0x50, 0xC0, 0xA9, 0xDE, 0xD1, 0xC9, 0xA8, /* 0x68-0x6B */
+	0xD1, 0xEF, 0xC5, 0xA4, 0xB0, 0xE7, 0xB3, 0xB6, /* 0x6C-0x6F */
+	0xC8, 0xC5, 0x92, 0x51, 0x92, 0x52, 0xB0, 0xE2, /* 0x70-0x73 */
+	0x92, 0x53, 0x92, 0x54, 0xB7, 0xF6, 0x92, 0x55, /* 0x74-0x77 */
+	0x92, 0x56, 0xC5, 0xFA, 0x92, 0x57, 0x92, 0x58, /* 0x78-0x7B */
+	0xB6, 0xF3, 0x92, 0x59, 0xD5, 0xD2, 0xB3, 0xD0, /* 0x7C-0x7F */
+	
+	0xBC, 0xBC, 0x92, 0x5A, 0x92, 0x5B, 0x92, 0x5C, /* 0x80-0x83 */
+	0xB3, 0xAD, 0x92, 0x5D, 0x92, 0x5E, 0x92, 0x5F, /* 0x84-0x87 */
+	0x92, 0x60, 0xBE, 0xF1, 0xB0, 0xD1, 0x92, 0x61, /* 0x88-0x8B */
+	0x92, 0x62, 0x92, 0x63, 0x92, 0x64, 0x92, 0x65, /* 0x8C-0x8F */
+	0x92, 0x66, 0xD2, 0xD6, 0xCA, 0xE3, 0xD7, 0xA5, /* 0x90-0x93 */
+	0x92, 0x67, 0xCD, 0xB6, 0xB6, 0xB6, 0xBF, 0xB9, /* 0x94-0x97 */
+	0xD5, 0xDB, 0x92, 0x68, 0xB8, 0xA7, 0xC5, 0xD7, /* 0x98-0x9B */
+	0x92, 0x69, 0x92, 0x6A, 0x92, 0x6B, 0xDE, 0xD2, /* 0x9C-0x9F */
+	0xBF, 0xD9, 0xC2, 0xD5, 0xC7, 0xC0, 0x92, 0x6C, /* 0xA0-0xA3 */
+	0xBB, 0xA4, 0xB1, 0xA8, 0x92, 0x6D, 0x92, 0x6E, /* 0xA4-0xA7 */
+	0xC5, 0xEA, 0x92, 0x6F, 0x92, 0x70, 0xC5, 0xFB, /* 0xA8-0xAB */
+	0xCC, 0xA7, 0x92, 0x71, 0x92, 0x72, 0x92, 0x73, /* 0xAC-0xAF */
+	0x92, 0x74, 0xB1, 0xA7, 0x92, 0x75, 0x92, 0x76, /* 0xB0-0xB3 */
+	0x92, 0x77, 0xB5, 0xD6, 0x92, 0x78, 0x92, 0x79, /* 0xB4-0xB7 */
+	0x92, 0x7A, 0xC4, 0xA8, 0x92, 0x7B, 0xDE, 0xD3, /* 0xB8-0xBB */
+	0xD1, 0xBA, 0xB3, 0xE9, 0x92, 0x7C, 0xC3, 0xF2, /* 0xBC-0xBF */
+	0x92, 0x7D, 0x92, 0x7E, 0xB7, 0xF7, 0x92, 0x80, /* 0xC0-0xC3 */
+	0xD6, 0xF4, 0xB5, 0xA3, 0xB2, 0xF0, 0xC4, 0xB4, /* 0xC4-0xC7 */
+	0xC4, 0xE9, 0xC0, 0xAD, 0xDE, 0xD4, 0x92, 0x81, /* 0xC8-0xCB */
+	0xB0, 0xE8, 0xC5, 0xC4, 0xC1, 0xE0, 0x92, 0x82, /* 0xCC-0xCF */
+	0xB9, 0xD5, 0x92, 0x83, 0xBE, 0xDC, 0xCD, 0xD8, /* 0xD0-0xD3 */
+	0xB0, 0xCE, 0x92, 0x84, 0xCD, 0xCF, 0xDE, 0xD6, /* 0xD4-0xD7 */
+	0xBE, 0xD0, 0xD7, 0xBE, 0xDE, 0xD5, 0xD5, 0xD0, /* 0xD8-0xDB */
+	0xB0, 0xDD, 0x92, 0x85, 0x92, 0x86, 0xC4, 0xE2, /* 0xDC-0xDF */
+	0x92, 0x87, 0x92, 0x88, 0xC2, 0xA3, 0xBC, 0xF0, /* 0xE0-0xE3 */
+	0x92, 0x89, 0xD3, 0xB5, 0xC0, 0xB9, 0xC5, 0xA1, /* 0xE4-0xE7 */
+	0xB2, 0xA6, 0xD4, 0xF1, 0x92, 0x8A, 0x92, 0x8B, /* 0xE8-0xEB */
+	0xC0, 0xA8, 0xCA, 0xC3, 0xDE, 0xD7, 0xD5, 0xFC, /* 0xEC-0xEF */
+	0x92, 0x8C, 0xB9, 0xB0, 0x92, 0x8D, 0xC8, 0xAD, /* 0xF0-0xF3 */
+	0xCB, 0xA9, 0x92, 0x8E, 0xDE, 0xD9, 0xBF, 0xBD, /* 0xF4-0xF7 */
+	0x92, 0x8F, 0x92, 0x90, 0x92, 0x91, 0x92, 0x92, /* 0xF8-0xFB */
+	0xC6, 0xB4, 0xD7, 0xA7, 0xCA, 0xB0, 0xC4, 0xC3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_63[512] = {
+	0x92, 0x93, 0xB3, 0xD6, 0xB9, 0xD2, 0x92, 0x94, /* 0x00-0x03 */
+	0x92, 0x95, 0x92, 0x96, 0x92, 0x97, 0xD6, 0xB8, /* 0x04-0x07 */
+	0xEA, 0xFC, 0xB0, 0xB4, 0x92, 0x98, 0x92, 0x99, /* 0x08-0x0B */
+	0x92, 0x9A, 0x92, 0x9B, 0xBF, 0xE6, 0x92, 0x9C, /* 0x0C-0x0F */
+	0x92, 0x9D, 0xCC, 0xF4, 0x92, 0x9E, 0x92, 0x9F, /* 0x10-0x13 */
+	0x92, 0xA0, 0x92, 0xA1, 0xCD, 0xDA, 0x92, 0xA2, /* 0x14-0x17 */
+	0x92, 0xA3, 0x92, 0xA4, 0xD6, 0xBF, 0xC2, 0xCE, /* 0x18-0x1B */
+	0x92, 0xA5, 0xCE, 0xCE, 0xCC, 0xA2, 0xD0, 0xAE, /* 0x1C-0x1F */
+	0xC4, 0xD3, 0xB5, 0xB2, 0xDE, 0xD8, 0xD5, 0xF5, /* 0x20-0x23 */
+	0xBC, 0xB7, 0xBB, 0xD3, 0x92, 0xA6, 0x92, 0xA7, /* 0x24-0x27 */
+	0xB0, 0xA4, 0x92, 0xA8, 0xC5, 0xB2, 0xB4, 0xEC, /* 0x28-0x2B */
+	0x92, 0xA9, 0x92, 0xAA, 0x92, 0xAB, 0xD5, 0xF1, /* 0x2C-0x2F */
+	0x92, 0xAC, 0x92, 0xAD, 0xEA, 0xFD, 0x92, 0xAE, /* 0x30-0x33 */
+	0x92, 0xAF, 0x92, 0xB0, 0x92, 0xB1, 0x92, 0xB2, /* 0x34-0x37 */
+	0x92, 0xB3, 0xDE, 0xDA, 0xCD, 0xA6, 0x92, 0xB4, /* 0x38-0x3B */
+	0x92, 0xB5, 0xCD, 0xEC, 0x92, 0xB6, 0x92, 0xB7, /* 0x3C-0x3F */
+	0x92, 0xB8, 0x92, 0xB9, 0xCE, 0xE6, 0xDE, 0xDC, /* 0x40-0x43 */
+	0x92, 0xBA, 0xCD, 0xB1, 0xC0, 0xA6, 0x92, 0xBB, /* 0x44-0x47 */
+	0x92, 0xBC, 0xD7, 0xBD, 0x92, 0xBD, 0xDE, 0xDB, /* 0x48-0x4B */
+	0xB0, 0xC6, 0xBA, 0xB4, 0xC9, 0xD3, 0xC4, 0xF3, /* 0x4C-0x4F */
+	0xBE, 0xE8, 0x92, 0xBE, 0x92, 0xBF, 0x92, 0xC0, /* 0x50-0x53 */
+	0x92, 0xC1, 0xB2, 0xB6, 0x92, 0xC2, 0x92, 0xC3, /* 0x54-0x57 */
+	0x92, 0xC4, 0x92, 0xC5, 0x92, 0xC6, 0x92, 0xC7, /* 0x58-0x5B */
+	0x92, 0xC8, 0x92, 0xC9, 0xC0, 0xCC, 0xCB, 0xF0, /* 0x5C-0x5F */
+	0x92, 0xCA, 0xBC, 0xF1, 0xBB, 0xBB, 0xB5, 0xB7, /* 0x60-0x63 */
+	0x92, 0xCB, 0x92, 0xCC, 0x92, 0xCD, 0xC5, 0xF5, /* 0x64-0x67 */
+	0x92, 0xCE, 0xDE, 0xE6, 0x92, 0xCF, 0x92, 0xD0, /* 0x68-0x6B */
+	0x92, 0xD1, 0xDE, 0xE3, 0xBE, 0xDD, 0x92, 0xD2, /* 0x6C-0x6F */
+	0x92, 0xD3, 0xDE, 0xDF, 0x92, 0xD4, 0x92, 0xD5, /* 0x70-0x73 */
+	0x92, 0xD6, 0x92, 0xD7, 0xB4, 0xB7, 0xBD, 0xDD, /* 0x74-0x77 */
+	0x92, 0xD8, 0x92, 0xD9, 0xDE, 0xE0, 0xC4, 0xED, /* 0x78-0x7B */
+	0x92, 0xDA, 0x92, 0xDB, 0x92, 0xDC, 0x92, 0xDD, /* 0x7C-0x7F */
+	
+	0xCF, 0xC6, 0x92, 0xDE, 0xB5, 0xE0, 0x92, 0xDF, /* 0x80-0x83 */
+	0x92, 0xE0, 0x92, 0xE1, 0x92, 0xE2, 0xB6, 0xDE, /* 0x84-0x87 */
+	0xCA, 0xDA, 0xB5, 0xF4, 0xDE, 0xE5, 0x92, 0xE3, /* 0x88-0x8B */
+	0xD5, 0xC6, 0x92, 0xE4, 0xDE, 0xE1, 0xCC, 0xCD, /* 0x8C-0x8F */
+	0xC6, 0xFE, 0x92, 0xE5, 0xC5, 0xC5, 0x92, 0xE6, /* 0x90-0x93 */
+	0x92, 0xE7, 0x92, 0xE8, 0xD2, 0xB4, 0x92, 0xE9, /* 0x94-0x97 */
+	0xBE, 0xF2, 0x92, 0xEA, 0x92, 0xEB, 0x92, 0xEC, /* 0x98-0x9B */
+	0x92, 0xED, 0x92, 0xEE, 0x92, 0xEF, 0x92, 0xF0, /* 0x9C-0x9F */
+	0xC2, 0xD3, 0x92, 0xF1, 0xCC, 0xBD, 0xB3, 0xB8, /* 0xA0-0xA3 */
+	0x92, 0xF2, 0xBD, 0xD3, 0x92, 0xF3, 0xBF, 0xD8, /* 0xA4-0xA7 */
+	0xCD, 0xC6, 0xD1, 0xDA, 0xB4, 0xEB, 0x92, 0xF4, /* 0xA8-0xAB */
+	0xDE, 0xE4, 0xDE, 0xDD, 0xDE, 0xE7, 0x92, 0xF5, /* 0xAC-0xAF */
+	0xEA, 0xFE, 0x92, 0xF6, 0x92, 0xF7, 0xC2, 0xB0, /* 0xB0-0xB3 */
+	0xDE, 0xE2, 0x92, 0xF8, 0x92, 0xF9, 0xD6, 0xC0, /* 0xB4-0xB7 */
+	0xB5, 0xA7, 0x92, 0xFA, 0xB2, 0xF4, 0x92, 0xFB, /* 0xB8-0xBB */
+	0xDE, 0xE8, 0x92, 0xFC, 0xDE, 0xF2, 0x92, 0xFD, /* 0xBC-0xBF */
+	0x92, 0xFE, 0x93, 0x40, 0x93, 0x41, 0x93, 0x42, /* 0xC0-0xC3 */
+	0xDE, 0xED, 0x93, 0x43, 0xDE, 0xF1, 0x93, 0x44, /* 0xC4-0xC7 */
+	0x93, 0x45, 0xC8, 0xE0, 0x93, 0x46, 0x93, 0x47, /* 0xC8-0xCB */
+	0x93, 0x48, 0xD7, 0xE1, 0xDE, 0xEF, 0xC3, 0xE8, /* 0xCC-0xCF */
+	0xCC, 0xE1, 0x93, 0x49, 0xB2, 0xE5, 0x93, 0x4A, /* 0xD0-0xD3 */
+	0x93, 0x4B, 0x93, 0x4C, 0xD2, 0xBE, 0x93, 0x4D, /* 0xD4-0xD7 */
+	0x93, 0x4E, 0x93, 0x4F, 0x93, 0x50, 0x93, 0x51, /* 0xD8-0xDB */
+	0x93, 0x52, 0x93, 0x53, 0xDE, 0xEE, 0x93, 0x54, /* 0xDC-0xDF */
+	0xDE, 0xEB, 0xCE, 0xD5, 0x93, 0x55, 0xB4, 0xA7, /* 0xE0-0xE3 */
+	0x93, 0x56, 0x93, 0x57, 0x93, 0x58, 0x93, 0x59, /* 0xE4-0xE7 */
+	0x93, 0x5A, 0xBF, 0xAB, 0xBE, 0xBE, 0x93, 0x5B, /* 0xE8-0xEB */
+	0x93, 0x5C, 0xBD, 0xD2, 0x93, 0x5D, 0x93, 0x5E, /* 0xEC-0xEF */
+	0x93, 0x5F, 0x93, 0x60, 0xDE, 0xE9, 0x93, 0x61, /* 0xF0-0xF3 */
+	0xD4, 0xAE, 0x93, 0x62, 0xDE, 0xDE, 0x93, 0x63, /* 0xF4-0xF7 */
+	0xDE, 0xEA, 0x93, 0x64, 0x93, 0x65, 0x93, 0x66, /* 0xF8-0xFB */
+	0x93, 0x67, 0xC0, 0xBF, 0x93, 0x68, 0xDE, 0xEC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_64[512] = {
+	0xB2, 0xF3, 0xB8, 0xE9, 0xC2, 0xA7, 0x93, 0x69, /* 0x00-0x03 */
+	0x93, 0x6A, 0xBD, 0xC1, 0x93, 0x6B, 0x93, 0x6C, /* 0x04-0x07 */
+	0x93, 0x6D, 0x93, 0x6E, 0x93, 0x6F, 0xDE, 0xF5, /* 0x08-0x0B */
+	0xDE, 0xF8, 0x93, 0x70, 0x93, 0x71, 0xB2, 0xAB, /* 0x0C-0x0F */
+	0xB4, 0xA4, 0x93, 0x72, 0x93, 0x73, 0xB4, 0xEA, /* 0x10-0x13 */
+	0xC9, 0xA6, 0x93, 0x74, 0x93, 0x75, 0x93, 0x76, /* 0x14-0x17 */
+	0x93, 0x77, 0x93, 0x78, 0x93, 0x79, 0xDE, 0xF6, /* 0x18-0x1B */
+	0xCB, 0xD1, 0x93, 0x7A, 0xB8, 0xE3, 0x93, 0x7B, /* 0x1C-0x1F */
+	0xDE, 0xF7, 0xDE, 0xFA, 0x93, 0x7C, 0x93, 0x7D, /* 0x20-0x23 */
+	0x93, 0x7E, 0x93, 0x80, 0xDE, 0xF9, 0x93, 0x81, /* 0x24-0x27 */
+	0x93, 0x82, 0x93, 0x83, 0xCC, 0xC2, 0x93, 0x84, /* 0x28-0x2B */
+	0xB0, 0xE1, 0xB4, 0xEE, 0x93, 0x85, 0x93, 0x86, /* 0x2C-0x2F */
+	0x93, 0x87, 0x93, 0x88, 0x93, 0x89, 0x93, 0x8A, /* 0x30-0x33 */
+	0xE5, 0xBA, 0x93, 0x8B, 0x93, 0x8C, 0x93, 0x8D, /* 0x34-0x37 */
+	0x93, 0x8E, 0x93, 0x8F, 0xD0, 0xAF, 0x93, 0x90, /* 0x38-0x3B */
+	0x93, 0x91, 0xB2, 0xEB, 0x93, 0x92, 0xEB, 0xA1, /* 0x3C-0x3F */
+	0x93, 0x93, 0xDE, 0xF4, 0x93, 0x94, 0x93, 0x95, /* 0x40-0x43 */
+	0xC9, 0xE3, 0xDE, 0xF3, 0xB0, 0xDA, 0xD2, 0xA1, /* 0x44-0x47 */
+	0xB1, 0xF7, 0x93, 0x96, 0xCC, 0xAF, 0x93, 0x97, /* 0x48-0x4B */
+	0x93, 0x98, 0x93, 0x99, 0x93, 0x9A, 0x93, 0x9B, /* 0x4C-0x4F */
+	0x93, 0x9C, 0x93, 0x9D, 0xDE, 0xF0, 0x93, 0x9E, /* 0x50-0x53 */
+	0xCB, 0xA4, 0x93, 0x9F, 0x93, 0xA0, 0x93, 0xA1, /* 0x54-0x57 */
+	0xD5, 0xAA, 0x93, 0xA2, 0x93, 0xA3, 0x93, 0xA4, /* 0x58-0x5B */
+	0x93, 0xA5, 0x93, 0xA6, 0xDE, 0xFB, 0x93, 0xA7, /* 0x5C-0x5F */
+	0x93, 0xA8, 0x93, 0xA9, 0x93, 0xAA, 0x93, 0xAB, /* 0x60-0x63 */
+	0x93, 0xAC, 0x93, 0xAD, 0x93, 0xAE, 0xB4, 0xDD, /* 0x64-0x67 */
+	0x93, 0xAF, 0xC4, 0xA6, 0x93, 0xB0, 0x93, 0xB1, /* 0x68-0x6B */
+	0x93, 0xB2, 0xDE, 0xFD, 0x93, 0xB3, 0x93, 0xB4, /* 0x6C-0x6F */
+	0x93, 0xB5, 0x93, 0xB6, 0x93, 0xB7, 0x93, 0xB8, /* 0x70-0x73 */
+	0x93, 0xB9, 0x93, 0xBA, 0x93, 0xBB, 0x93, 0xBC, /* 0x74-0x77 */
+	0xC3, 0xFE, 0xC4, 0xA1, 0xDF, 0xA1, 0x93, 0xBD, /* 0x78-0x7B */
+	0x93, 0xBE, 0x93, 0xBF, 0x93, 0xC0, 0x93, 0xC1, /* 0x7C-0x7F */
+	
+	0x93, 0xC2, 0x93, 0xC3, 0xC1, 0xCC, 0x93, 0xC4, /* 0x80-0x83 */
+	0xDE, 0xFC, 0xBE, 0xEF, 0x93, 0xC5, 0xC6, 0xB2, /* 0x84-0x87 */
+	0x93, 0xC6, 0x93, 0xC7, 0x93, 0xC8, 0x93, 0xC9, /* 0x88-0x8B */
+	0x93, 0xCA, 0x93, 0xCB, 0x93, 0xCC, 0x93, 0xCD, /* 0x8C-0x8F */
+	0x93, 0xCE, 0xB3, 0xC5, 0xC8, 0xF6, 0x93, 0xCF, /* 0x90-0x93 */
+	0x93, 0xD0, 0xCB, 0xBA, 0xDE, 0xFE, 0x93, 0xD1, /* 0x94-0x97 */
+	0x93, 0xD2, 0xDF, 0xA4, 0x93, 0xD3, 0x93, 0xD4, /* 0x98-0x9B */
+	0x93, 0xD5, 0x93, 0xD6, 0xD7, 0xB2, 0x93, 0xD7, /* 0x9C-0x9F */
+	0x93, 0xD8, 0x93, 0xD9, 0x93, 0xDA, 0x93, 0xDB, /* 0xA0-0xA3 */
+	0xB3, 0xB7, 0x93, 0xDC, 0x93, 0xDD, 0x93, 0xDE, /* 0xA4-0xA7 */
+	0x93, 0xDF, 0xC1, 0xC3, 0x93, 0xE0, 0x93, 0xE1, /* 0xA8-0xAB */
+	0xC7, 0xCB, 0xB2, 0xA5, 0xB4, 0xE9, 0x93, 0xE2, /* 0xAC-0xAF */
+	0xD7, 0xAB, 0x93, 0xE3, 0x93, 0xE4, 0x93, 0xE5, /* 0xB0-0xB3 */
+	0x93, 0xE6, 0xC4, 0xEC, 0x93, 0xE7, 0xDF, 0xA2, /* 0xB4-0xB7 */
+	0xDF, 0xA3, 0x93, 0xE8, 0xDF, 0xA5, 0x93, 0xE9, /* 0xB8-0xBB */
+	0xBA, 0xB3, 0x93, 0xEA, 0x93, 0xEB, 0x93, 0xEC, /* 0xBC-0xBF */
+	0xDF, 0xA6, 0x93, 0xED, 0xC0, 0xDE, 0x93, 0xEE, /* 0xC0-0xC3 */
+	0x93, 0xEF, 0xC9, 0xC3, 0x93, 0xF0, 0x93, 0xF1, /* 0xC4-0xC7 */
+	0x93, 0xF2, 0x93, 0xF3, 0x93, 0xF4, 0x93, 0xF5, /* 0xC8-0xCB */
+	0x93, 0xF6, 0xB2, 0xD9, 0xC7, 0xE6, 0x93, 0xF7, /* 0xCC-0xCF */
+	0xDF, 0xA7, 0x93, 0xF8, 0xC7, 0xDC, 0x93, 0xF9, /* 0xD0-0xD3 */
+	0x93, 0xFA, 0x93, 0xFB, 0x93, 0xFC, 0xDF, 0xA8, /* 0xD4-0xD7 */
+	0xEB, 0xA2, 0x93, 0xFD, 0x93, 0xFE, 0x94, 0x40, /* 0xD8-0xDB */
+	0x94, 0x41, 0x94, 0x42, 0xCB, 0xD3, 0x94, 0x43, /* 0xDC-0xDF */
+	0x94, 0x44, 0x94, 0x45, 0xDF, 0xAA, 0x94, 0x46, /* 0xE0-0xE3 */
+	0xDF, 0xA9, 0x94, 0x47, 0xB2, 0xC1, 0x94, 0x48, /* 0xE4-0xE7 */
+	0x94, 0x49, 0x94, 0x4A, 0x94, 0x4B, 0x94, 0x4C, /* 0xE8-0xEB */
+	0x94, 0x4D, 0x94, 0x4E, 0x94, 0x4F, 0x94, 0x50, /* 0xEC-0xEF */
+	0x94, 0x51, 0x94, 0x52, 0x94, 0x53, 0x94, 0x54, /* 0xF0-0xF3 */
+	0x94, 0x55, 0x94, 0x56, 0x94, 0x57, 0x94, 0x58, /* 0xF4-0xF7 */
+	0x94, 0x59, 0x94, 0x5A, 0x94, 0x5B, 0x94, 0x5C, /* 0xF8-0xFB */
+	0x94, 0x5D, 0x94, 0x5E, 0x94, 0x5F, 0x94, 0x60, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_65[512] = {
+	0xC5, 0xCA, 0x94, 0x61, 0x94, 0x62, 0x94, 0x63, /* 0x00-0x03 */
+	0x94, 0x64, 0x94, 0x65, 0x94, 0x66, 0x94, 0x67, /* 0x04-0x07 */
+	0x94, 0x68, 0xDF, 0xAB, 0x94, 0x69, 0x94, 0x6A, /* 0x08-0x0B */
+	0x94, 0x6B, 0x94, 0x6C, 0x94, 0x6D, 0x94, 0x6E, /* 0x0C-0x0F */
+	0x94, 0x6F, 0x94, 0x70, 0xD4, 0xDC, 0x94, 0x71, /* 0x10-0x13 */
+	0x94, 0x72, 0x94, 0x73, 0x94, 0x74, 0x94, 0x75, /* 0x14-0x17 */
+	0xC8, 0xC1, 0x94, 0x76, 0x94, 0x77, 0x94, 0x78, /* 0x18-0x1B */
+	0x94, 0x79, 0x94, 0x7A, 0x94, 0x7B, 0x94, 0x7C, /* 0x1C-0x1F */
+	0x94, 0x7D, 0x94, 0x7E, 0x94, 0x80, 0x94, 0x81, /* 0x20-0x23 */
+	0x94, 0x82, 0xDF, 0xAC, 0x94, 0x83, 0x94, 0x84, /* 0x24-0x27 */
+	0x94, 0x85, 0x94, 0x86, 0x94, 0x87, 0xBE, 0xF0, /* 0x28-0x2B */
+	0x94, 0x88, 0x94, 0x89, 0xDF, 0xAD, 0xD6, 0xA7, /* 0x2C-0x2F */
+	0x94, 0x8A, 0x94, 0x8B, 0x94, 0x8C, 0x94, 0x8D, /* 0x30-0x33 */
+	0xEA, 0xB7, 0xEB, 0xB6, 0xCA, 0xD5, 0x94, 0x8E, /* 0x34-0x37 */
+	0xD8, 0xFC, 0xB8, 0xC4, 0x94, 0x8F, 0xB9, 0xA5, /* 0x38-0x3B */
+	0x94, 0x90, 0x94, 0x91, 0xB7, 0xC5, 0xD5, 0xFE, /* 0x3C-0x3F */
+	0x94, 0x92, 0x94, 0x93, 0x94, 0x94, 0x94, 0x95, /* 0x40-0x43 */
+	0x94, 0x96, 0xB9, 0xCA, 0x94, 0x97, 0x94, 0x98, /* 0x44-0x47 */
+	0xD0, 0xA7, 0xF4, 0xCD, 0x94, 0x99, 0x94, 0x9A, /* 0x48-0x4B */
+	0xB5, 0xD0, 0x94, 0x9B, 0x94, 0x9C, 0xC3, 0xF4, /* 0x4C-0x4F */
+	0x94, 0x9D, 0xBE, 0xC8, 0x94, 0x9E, 0x94, 0x9F, /* 0x50-0x53 */
+	0x94, 0xA0, 0xEB, 0xB7, 0xB0, 0xBD, 0x94, 0xA1, /* 0x54-0x57 */
+	0x94, 0xA2, 0xBD, 0xCC, 0x94, 0xA3, 0xC1, 0xB2, /* 0x58-0x5B */
+	0x94, 0xA4, 0xB1, 0xD6, 0xB3, 0xA8, 0x94, 0xA5, /* 0x5C-0x5F */
+	0x94, 0xA6, 0x94, 0xA7, 0xB8, 0xD2, 0xC9, 0xA2, /* 0x60-0x63 */
+	0x94, 0xA8, 0x94, 0xA9, 0xB6, 0xD8, 0x94, 0xAA, /* 0x64-0x67 */
+	0x94, 0xAB, 0x94, 0xAC, 0x94, 0xAD, 0xEB, 0xB8, /* 0x68-0x6B */
+	0xBE, 0xB4, 0x94, 0xAE, 0x94, 0xAF, 0x94, 0xB0, /* 0x6C-0x6F */
+	0xCA, 0xFD, 0x94, 0xB1, 0xC7, 0xC3, 0x94, 0xB2, /* 0x70-0x73 */
+	0xD5, 0xFB, 0x94, 0xB3, 0x94, 0xB4, 0xB7, 0xF3, /* 0x74-0x77 */
+	0x94, 0xB5, 0x94, 0xB6, 0x94, 0xB7, 0x94, 0xB8, /* 0x78-0x7B */
+	0x94, 0xB9, 0x94, 0xBA, 0x94, 0xBB, 0x94, 0xBC, /* 0x7C-0x7F */
+	
+	0x94, 0xBD, 0x94, 0xBE, 0x94, 0xBF, 0x94, 0xC0, /* 0x80-0x83 */
+	0x94, 0xC1, 0x94, 0xC2, 0x94, 0xC3, 0xCE, 0xC4, /* 0x84-0x87 */
+	0x94, 0xC4, 0x94, 0xC5, 0x94, 0xC6, 0xD5, 0xAB, /* 0x88-0x8B */
+	0xB1, 0xF3, 0x94, 0xC7, 0x94, 0xC8, 0x94, 0xC9, /* 0x8C-0x8F */
+	0xEC, 0xB3, 0xB0, 0xDF, 0x94, 0xCA, 0xEC, 0xB5, /* 0x90-0x93 */
+	0x94, 0xCB, 0x94, 0xCC, 0x94, 0xCD, 0xB6, 0xB7, /* 0x94-0x97 */
+	0x94, 0xCE, 0xC1, 0xCF, 0x94, 0xCF, 0xF5, 0xFA, /* 0x98-0x9B */
+	0xD0, 0xB1, 0x94, 0xD0, 0x94, 0xD1, 0xD5, 0xE5, /* 0x9C-0x9F */
+	0x94, 0xD2, 0xCE, 0xD3, 0x94, 0xD3, 0x94, 0xD4, /* 0xA0-0xA3 */
+	0xBD, 0xEF, 0xB3, 0xE2, 0x94, 0xD5, 0xB8, 0xAB, /* 0xA4-0xA7 */
+	0x94, 0xD6, 0xD5, 0xB6, 0x94, 0xD7, 0xED, 0xBD, /* 0xA8-0xAB */
+	0x94, 0xD8, 0xB6, 0xCF, 0x94, 0xD9, 0xCB, 0xB9, /* 0xAC-0xAF */
+	0xD0, 0xC2, 0x94, 0xDA, 0x94, 0xDB, 0x94, 0xDC, /* 0xB0-0xB3 */
+	0x94, 0xDD, 0x94, 0xDE, 0x94, 0xDF, 0x94, 0xE0, /* 0xB4-0xB7 */
+	0x94, 0xE1, 0xB7, 0xBD, 0x94, 0xE2, 0x94, 0xE3, /* 0xB8-0xBB */
+	0xEC, 0xB6, 0xCA, 0xA9, 0x94, 0xE4, 0x94, 0xE5, /* 0xBC-0xBF */
+	0x94, 0xE6, 0xC5, 0xD4, 0x94, 0xE7, 0xEC, 0xB9, /* 0xC0-0xC3 */
+	0xEC, 0xB8, 0xC2, 0xC3, 0xEC, 0xB7, 0x94, 0xE8, /* 0xC4-0xC7 */
+	0x94, 0xE9, 0x94, 0xEA, 0x94, 0xEB, 0xD0, 0xFD, /* 0xC8-0xCB */
+	0xEC, 0xBA, 0x94, 0xEC, 0xEC, 0xBB, 0xD7, 0xE5, /* 0xCC-0xCF */
+	0x94, 0xED, 0x94, 0xEE, 0xEC, 0xBC, 0x94, 0xEF, /* 0xD0-0xD3 */
+	0x94, 0xF0, 0x94, 0xF1, 0xEC, 0xBD, 0xC6, 0xEC, /* 0xD4-0xD7 */
+	0x94, 0xF2, 0x94, 0xF3, 0x94, 0xF4, 0x94, 0xF5, /* 0xD8-0xDB */
+	0x94, 0xF6, 0x94, 0xF7, 0x94, 0xF8, 0x94, 0xF9, /* 0xDC-0xDF */
+	0xCE, 0xDE, 0x94, 0xFA, 0xBC, 0xC8, 0x94, 0xFB, /* 0xE0-0xE3 */
+	0x94, 0xFC, 0xC8, 0xD5, 0xB5, 0xA9, 0xBE, 0xC9, /* 0xE4-0xE7 */
+	0xD6, 0xBC, 0xD4, 0xE7, 0x94, 0xFD, 0x94, 0xFE, /* 0xE8-0xEB */
+	0xD1, 0xAE, 0xD0, 0xF1, 0xEA, 0xB8, 0xEA, 0xB9, /* 0xEC-0xEF */
+	0xEA, 0xBA, 0xBA, 0xB5, 0x95, 0x40, 0x95, 0x41, /* 0xF0-0xF3 */
+	0x95, 0x42, 0x95, 0x43, 0xCA, 0xB1, 0xBF, 0xF5, /* 0xF4-0xF7 */
+	0x95, 0x44, 0x95, 0x45, 0xCD, 0xFA, 0x95, 0x46, /* 0xF8-0xFB */
+	0x95, 0x47, 0x95, 0x48, 0x95, 0x49, 0x95, 0x4A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_66[512] = {
+	0xEA, 0xC0, 0x95, 0x4B, 0xB0, 0xBA, 0xEA, 0xBE, /* 0x00-0x03 */
+	0x95, 0x4C, 0x95, 0x4D, 0xC0, 0xA5, 0x95, 0x4E, /* 0x04-0x07 */
+	0x95, 0x4F, 0x95, 0x50, 0xEA, 0xBB, 0x95, 0x51, /* 0x08-0x0B */
+	0xB2, 0xFD, 0x95, 0x52, 0xC3, 0xF7, 0xBB, 0xE8, /* 0x0C-0x0F */
+	0x95, 0x53, 0x95, 0x54, 0x95, 0x55, 0xD2, 0xD7, /* 0x10-0x13 */
+	0xCE, 0xF4, 0xEA, 0xBF, 0x95, 0x56, 0x95, 0x57, /* 0x14-0x17 */
+	0x95, 0x58, 0xEA, 0xBC, 0x95, 0x59, 0x95, 0x5A, /* 0x18-0x1B */
+	0x95, 0x5B, 0xEA, 0xC3, 0x95, 0x5C, 0xD0, 0xC7, /* 0x1C-0x1F */
+	0xD3, 0xB3, 0x95, 0x5D, 0x95, 0x5E, 0x95, 0x5F, /* 0x20-0x23 */
+	0x95, 0x60, 0xB4, 0xBA, 0x95, 0x61, 0xC3, 0xC1, /* 0x24-0x27 */
+	0xD7, 0xF2, 0x95, 0x62, 0x95, 0x63, 0x95, 0x64, /* 0x28-0x2B */
+	0x95, 0x65, 0xD5, 0xD1, 0x95, 0x66, 0xCA, 0xC7, /* 0x2C-0x2F */
+	0x95, 0x67, 0xEA, 0xC5, 0x95, 0x68, 0x95, 0x69, /* 0x30-0x33 */
+	0xEA, 0xC4, 0xEA, 0xC7, 0xEA, 0xC6, 0x95, 0x6A, /* 0x34-0x37 */
+	0x95, 0x6B, 0x95, 0x6C, 0x95, 0x6D, 0x95, 0x6E, /* 0x38-0x3B */
+	0xD6, 0xE7, 0x95, 0x6F, 0xCF, 0xD4, 0x95, 0x70, /* 0x3C-0x3F */
+	0x95, 0x71, 0xEA, 0xCB, 0x95, 0x72, 0xBB, 0xCE, /* 0x40-0x43 */
+	0x95, 0x73, 0x95, 0x74, 0x95, 0x75, 0x95, 0x76, /* 0x44-0x47 */
+	0x95, 0x77, 0x95, 0x78, 0x95, 0x79, 0xBD, 0xFA, /* 0x48-0x4B */
+	0xC9, 0xCE, 0x95, 0x7A, 0x95, 0x7B, 0xEA, 0xCC, /* 0x4C-0x4F */
+	0x95, 0x7C, 0x95, 0x7D, 0xC9, 0xB9, 0xCF, 0xFE, /* 0x50-0x53 */
+	0xEA, 0xCA, 0xD4, 0xCE, 0xEA, 0xCD, 0xEA, 0xCF, /* 0x54-0x57 */
+	0x95, 0x7E, 0x95, 0x80, 0xCD, 0xED, 0x95, 0x81, /* 0x58-0x5B */
+	0x95, 0x82, 0x95, 0x83, 0x95, 0x84, 0xEA, 0xC9, /* 0x5C-0x5F */
+	0x95, 0x85, 0xEA, 0xCE, 0x95, 0x86, 0x95, 0x87, /* 0x60-0x63 */
+	0xCE, 0xEE, 0x95, 0x88, 0xBB, 0xDE, 0x95, 0x89, /* 0x64-0x67 */
+	0xB3, 0xBF, 0x95, 0x8A, 0x95, 0x8B, 0x95, 0x8C, /* 0x68-0x6B */
+	0x95, 0x8D, 0x95, 0x8E, 0xC6, 0xD5, 0xBE, 0xB0, /* 0x6C-0x6F */
+	0xCE, 0xFA, 0x95, 0x8F, 0x95, 0x90, 0x95, 0x91, /* 0x70-0x73 */
+	0xC7, 0xE7, 0x95, 0x92, 0xBE, 0xA7, 0xEA, 0xD0, /* 0x74-0x77 */
+	0x95, 0x93, 0x95, 0x94, 0xD6, 0xC7, 0x95, 0x95, /* 0x78-0x7B */
+	0x95, 0x96, 0x95, 0x97, 0xC1, 0xC0, 0x95, 0x98, /* 0x7C-0x7F */
+	
+	0x95, 0x99, 0x95, 0x9A, 0xD4, 0xDD, 0x95, 0x9B, /* 0x80-0x83 */
+	0xEA, 0xD1, 0x95, 0x9C, 0x95, 0x9D, 0xCF, 0xBE, /* 0x84-0x87 */
+	0x95, 0x9E, 0x95, 0x9F, 0x95, 0xA0, 0x95, 0xA1, /* 0x88-0x8B */
+	0xEA, 0xD2, 0x95, 0xA2, 0x95, 0xA3, 0x95, 0xA4, /* 0x8C-0x8F */
+	0x95, 0xA5, 0xCA, 0xEE, 0x95, 0xA6, 0x95, 0xA7, /* 0x90-0x93 */
+	0x95, 0xA8, 0x95, 0xA9, 0xC5, 0xAF, 0xB0, 0xB5, /* 0x94-0x97 */
+	0x95, 0xAA, 0x95, 0xAB, 0x95, 0xAC, 0x95, 0xAD, /* 0x98-0x9B */
+	0x95, 0xAE, 0xEA, 0xD4, 0x95, 0xAF, 0x95, 0xB0, /* 0x9C-0x9F */
+	0x95, 0xB1, 0x95, 0xB2, 0x95, 0xB3, 0x95, 0xB4, /* 0xA0-0xA3 */
+	0x95, 0xB5, 0x95, 0xB6, 0x95, 0xB7, 0xEA, 0xD3, /* 0xA4-0xA7 */
+	0xF4, 0xDF, 0x95, 0xB8, 0x95, 0xB9, 0x95, 0xBA, /* 0xA8-0xAB */
+	0x95, 0xBB, 0x95, 0xBC, 0xC4, 0xBA, 0x95, 0xBD, /* 0xAC-0xAF */
+	0x95, 0xBE, 0x95, 0xBF, 0x95, 0xC0, 0x95, 0xC1, /* 0xB0-0xB3 */
+	0xB1, 0xA9, 0x95, 0xC2, 0x95, 0xC3, 0x95, 0xC4, /* 0xB4-0xB7 */
+	0x95, 0xC5, 0xE5, 0xDF, 0x95, 0xC6, 0x95, 0xC7, /* 0xB8-0xBB */
+	0x95, 0xC8, 0x95, 0xC9, 0xEA, 0xD5, 0x95, 0xCA, /* 0xBC-0xBF */
+	0x95, 0xCB, 0x95, 0xCC, 0x95, 0xCD, 0x95, 0xCE, /* 0xC0-0xC3 */
+	0x95, 0xCF, 0x95, 0xD0, 0x95, 0xD1, 0x95, 0xD2, /* 0xC4-0xC7 */
+	0x95, 0xD3, 0x95, 0xD4, 0x95, 0xD5, 0x95, 0xD6, /* 0xC8-0xCB */
+	0x95, 0xD7, 0x95, 0xD8, 0x95, 0xD9, 0x95, 0xDA, /* 0xCC-0xCF */
+	0x95, 0xDB, 0x95, 0xDC, 0x95, 0xDD, 0x95, 0xDE, /* 0xD0-0xD3 */
+	0x95, 0xDF, 0x95, 0xE0, 0x95, 0xE1, 0x95, 0xE2, /* 0xD4-0xD7 */
+	0x95, 0xE3, 0xCA, 0xEF, 0x95, 0xE4, 0xEA, 0xD6, /* 0xD8-0xDB */
+	0xEA, 0xD7, 0xC6, 0xD8, 0x95, 0xE5, 0x95, 0xE6, /* 0xDC-0xDF */
+	0x95, 0xE7, 0x95, 0xE8, 0x95, 0xE9, 0x95, 0xEA, /* 0xE0-0xE3 */
+	0x95, 0xEB, 0x95, 0xEC, 0xEA, 0xD8, 0x95, 0xED, /* 0xE4-0xE7 */
+	0x95, 0xEE, 0xEA, 0xD9, 0x95, 0xEF, 0x95, 0xF0, /* 0xE8-0xEB */
+	0x95, 0xF1, 0x95, 0xF2, 0x95, 0xF3, 0x95, 0xF4, /* 0xEC-0xEF */
+	0xD4, 0xBB, 0x95, 0xF5, 0xC7, 0xFA, 0xD2, 0xB7, /* 0xF0-0xF3 */
+	0xB8, 0xFC, 0x95, 0xF6, 0x95, 0xF7, 0xEA, 0xC2, /* 0xF4-0xF7 */
+	0x95, 0xF8, 0xB2, 0xDC, 0x95, 0xF9, 0x95, 0xFA, /* 0xF8-0xFB */
+	0xC2, 0xFC, 0x95, 0xFB, 0xD4, 0xF8, 0xCC, 0xE6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_67[512] = {
+	0xD7, 0xEE, 0x95, 0xFC, 0x95, 0xFD, 0x95, 0xFE, /* 0x00-0x03 */
+	0x96, 0x40, 0x96, 0x41, 0x96, 0x42, 0x96, 0x43, /* 0x04-0x07 */
+	0xD4, 0xC2, 0xD3, 0xD0, 0xEB, 0xC3, 0xC5, 0xF3, /* 0x08-0x0B */
+	0x96, 0x44, 0xB7, 0xFE, 0x96, 0x45, 0x96, 0x46, /* 0x0C-0x0F */
+	0xEB, 0xD4, 0x96, 0x47, 0x96, 0x48, 0x96, 0x49, /* 0x10-0x13 */
+	0xCB, 0xB7, 0xEB, 0xDE, 0x96, 0x4A, 0xC0, 0xCA, /* 0x14-0x17 */
+	0x96, 0x4B, 0x96, 0x4C, 0x96, 0x4D, 0xCD, 0xFB, /* 0x18-0x1B */
+	0x96, 0x4E, 0xB3, 0xAF, 0x96, 0x4F, 0xC6, 0xDA, /* 0x1C-0x1F */
+	0x96, 0x50, 0x96, 0x51, 0x96, 0x52, 0x96, 0x53, /* 0x20-0x23 */
+	0x96, 0x54, 0x96, 0x55, 0xEB, 0xFC, 0x96, 0x56, /* 0x24-0x27 */
+	0xC4, 0xBE, 0x96, 0x57, 0xCE, 0xB4, 0xC4, 0xA9, /* 0x28-0x2B */
+	0xB1, 0xBE, 0xD4, 0xFD, 0x96, 0x58, 0xCA, 0xF5, /* 0x2C-0x2F */
+	0x96, 0x59, 0xD6, 0xEC, 0x96, 0x5A, 0x96, 0x5B, /* 0x30-0x33 */
+	0xC6, 0xD3, 0xB6, 0xE4, 0x96, 0x5C, 0x96, 0x5D, /* 0x34-0x37 */
+	0x96, 0x5E, 0x96, 0x5F, 0xBB, 0xFA, 0x96, 0x60, /* 0x38-0x3B */
+	0x96, 0x61, 0xD0, 0xE0, 0x96, 0x62, 0x96, 0x63, /* 0x3C-0x3F */
+	0xC9, 0xB1, 0x96, 0x64, 0xD4, 0xD3, 0xC8, 0xA8, /* 0x40-0x43 */
+	0x96, 0x65, 0x96, 0x66, 0xB8, 0xCB, 0x96, 0x67, /* 0x44-0x47 */
+	0xE8, 0xBE, 0xC9, 0xBC, 0x96, 0x68, 0x96, 0x69, /* 0x48-0x4B */
+	0xE8, 0xBB, 0x96, 0x6A, 0xC0, 0xEE, 0xD0, 0xD3, /* 0x4C-0x4F */
+	0xB2, 0xC4, 0xB4, 0xE5, 0x96, 0x6B, 0xE8, 0xBC, /* 0x50-0x53 */
+	0x96, 0x6C, 0x96, 0x6D, 0xD5, 0xC8, 0x96, 0x6E, /* 0x54-0x57 */
+	0x96, 0x6F, 0x96, 0x70, 0x96, 0x71, 0x96, 0x72, /* 0x58-0x5B */
+	0xB6, 0xC5, 0x96, 0x73, 0xE8, 0xBD, 0xCA, 0xF8, /* 0x5C-0x5F */
+	0xB8, 0xDC, 0xCC, 0xF5, 0x96, 0x74, 0x96, 0x75, /* 0x60-0x63 */
+	0x96, 0x76, 0xC0, 0xB4, 0x96, 0x77, 0x96, 0x78, /* 0x64-0x67 */
+	0xD1, 0xEE, 0xE8, 0xBF, 0xE8, 0xC2, 0x96, 0x79, /* 0x68-0x6B */
+	0x96, 0x7A, 0xBA, 0xBC, 0x96, 0x7B, 0xB1, 0xAD, /* 0x6C-0x6F */
+	0xBD, 0xDC, 0x96, 0x7C, 0xEA, 0xBD, 0xE8, 0xC3, /* 0x70-0x73 */
+	0x96, 0x7D, 0xE8, 0xC6, 0x96, 0x7E, 0xE8, 0xCB, /* 0x74-0x77 */
+	0x96, 0x80, 0x96, 0x81, 0x96, 0x82, 0x96, 0x83, /* 0x78-0x7B */
+	0xE8, 0xCC, 0x96, 0x84, 0xCB, 0xC9, 0xB0, 0xE5, /* 0x7C-0x7F */
+	
+	0x96, 0x85, 0xBC, 0xAB, 0x96, 0x86, 0x96, 0x87, /* 0x80-0x83 */
+	0xB9, 0xB9, 0x96, 0x88, 0x96, 0x89, 0xE8, 0xC1, /* 0x84-0x87 */
+	0x96, 0x8A, 0xCD, 0xF7, 0x96, 0x8B, 0xE8, 0xCA, /* 0x88-0x8B */
+	0x96, 0x8C, 0x96, 0x8D, 0x96, 0x8E, 0x96, 0x8F, /* 0x8C-0x8F */
+	0xCE, 0xF6, 0x96, 0x90, 0x96, 0x91, 0x96, 0x92, /* 0x90-0x93 */
+	0x96, 0x93, 0xD5, 0xED, 0x96, 0x94, 0xC1, 0xD6, /* 0x94-0x97 */
+	0xE8, 0xC4, 0x96, 0x95, 0xC3, 0xB6, 0x96, 0x96, /* 0x98-0x9B */
+	0xB9, 0xFB, 0xD6, 0xA6, 0xE8, 0xC8, 0x96, 0x97, /* 0x9C-0x9F */
+	0x96, 0x98, 0x96, 0x99, 0xCA, 0xE0, 0xD4, 0xE6, /* 0xA0-0xA3 */
+	0x96, 0x9A, 0xE8, 0xC0, 0x96, 0x9B, 0xE8, 0xC5, /* 0xA4-0xA7 */
+	0xE8, 0xC7, 0x96, 0x9C, 0xC7, 0xB9, 0xB7, 0xE3, /* 0xA8-0xAB */
+	0x96, 0x9D, 0xE8, 0xC9, 0x96, 0x9E, 0xBF, 0xDD, /* 0xAC-0xAF */
+	0xE8, 0xD2, 0x96, 0x9F, 0x96, 0xA0, 0xE8, 0xD7, /* 0xB0-0xB3 */
+	0x96, 0xA1, 0xE8, 0xD5, 0xBC, 0xDC, 0xBC, 0xCF, /* 0xB4-0xB7 */
+	0xE8, 0xDB, 0x96, 0xA2, 0x96, 0xA3, 0x96, 0xA4, /* 0xB8-0xBB */
+	0x96, 0xA5, 0x96, 0xA6, 0x96, 0xA7, 0x96, 0xA8, /* 0xBC-0xBF */
+	0x96, 0xA9, 0xE8, 0xDE, 0x96, 0xAA, 0xE8, 0xDA, /* 0xC0-0xC3 */
+	0xB1, 0xFA, 0x96, 0xAB, 0x96, 0xAC, 0x96, 0xAD, /* 0xC4-0xC7 */
+	0x96, 0xAE, 0x96, 0xAF, 0x96, 0xB0, 0x96, 0xB1, /* 0xC8-0xCB */
+	0x96, 0xB2, 0x96, 0xB3, 0x96, 0xB4, 0xB0, 0xD8, /* 0xCC-0xCF */
+	0xC4, 0xB3, 0xB8, 0xCC, 0xC6, 0xE2, 0xC8, 0xBE, /* 0xD0-0xD3 */
+	0xC8, 0xE1, 0x96, 0xB5, 0x96, 0xB6, 0x96, 0xB7, /* 0xD4-0xD7 */
+	0xE8, 0xCF, 0xE8, 0xD4, 0xE8, 0xD6, 0x96, 0xB8, /* 0xD8-0xDB */
+	0xB9, 0xF1, 0xE8, 0xD8, 0xD7, 0xF5, 0x96, 0xB9, /* 0xDC-0xDF */
+	0xC4, 0xFB, 0x96, 0xBA, 0xE8, 0xDC, 0x96, 0xBB, /* 0xE0-0xE3 */
+	0x96, 0xBC, 0xB2, 0xE9, 0x96, 0xBD, 0x96, 0xBE, /* 0xE4-0xE7 */
+	0x96, 0xBF, 0xE8, 0xD1, 0x96, 0xC0, 0x96, 0xC1, /* 0xE8-0xEB */
+	0xBC, 0xED, 0x96, 0xC2, 0x96, 0xC3, 0xBF, 0xC2, /* 0xEC-0xEF */
+	0xE8, 0xCD, 0xD6, 0xF9, 0x96, 0xC4, 0xC1, 0xF8, /* 0xF0-0xF3 */
+	0xB2, 0xF1, 0x96, 0xC5, 0x96, 0xC6, 0x96, 0xC7, /* 0xF4-0xF7 */
+	0x96, 0xC8, 0x96, 0xC9, 0x96, 0xCA, 0x96, 0xCB, /* 0xF8-0xFB */
+	0x96, 0xCC, 0xE8, 0xDF, 0x96, 0xCD, 0xCA, 0xC1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_68[512] = {
+	0xE8, 0xD9, 0x96, 0xCE, 0x96, 0xCF, 0x96, 0xD0, /* 0x00-0x03 */
+	0x96, 0xD1, 0xD5, 0xA4, 0x96, 0xD2, 0xB1, 0xEA, /* 0x04-0x07 */
+	0xD5, 0xBB, 0xE8, 0xCE, 0xE8, 0xD0, 0xB6, 0xB0, /* 0x08-0x0B */
+	0xE8, 0xD3, 0x96, 0xD3, 0xE8, 0xDD, 0xC0, 0xB8, /* 0x0C-0x0F */
+	0x96, 0xD4, 0xCA, 0xF7, 0x96, 0xD5, 0xCB, 0xA8, /* 0x10-0x13 */
+	0x96, 0xD6, 0x96, 0xD7, 0xC6, 0xDC, 0xC0, 0xF5, /* 0x14-0x17 */
+	0x96, 0xD8, 0x96, 0xD9, 0x96, 0xDA, 0x96, 0xDB, /* 0x18-0x1B */
+	0x96, 0xDC, 0xE8, 0xE9, 0x96, 0xDD, 0x96, 0xDE, /* 0x1C-0x1F */
+	0x96, 0xDF, 0xD0, 0xA3, 0x96, 0xE0, 0x96, 0xE1, /* 0x20-0x23 */
+	0x96, 0xE2, 0x96, 0xE3, 0x96, 0xE4, 0x96, 0xE5, /* 0x24-0x27 */
+	0x96, 0xE6, 0xE8, 0xF2, 0xD6, 0xEA, 0x96, 0xE7, /* 0x28-0x2B */
+	0x96, 0xE8, 0x96, 0xE9, 0x96, 0xEA, 0x96, 0xEB, /* 0x2C-0x2F */
+	0x96, 0xEC, 0x96, 0xED, 0xE8, 0xE0, 0xE8, 0xE1, /* 0x30-0x33 */
+	0x96, 0xEE, 0x96, 0xEF, 0x96, 0xF0, 0xD1, 0xF9, /* 0x34-0x37 */
+	0xBA, 0xCB, 0xB8, 0xF9, 0x96, 0xF1, 0x96, 0xF2, /* 0x38-0x3B */
+	0xB8, 0xF1, 0xD4, 0xD4, 0xE8, 0xEF, 0x96, 0xF3, /* 0x3C-0x3F */
+	0xE8, 0xEE, 0xE8, 0xEC, 0xB9, 0xF0, 0xCC, 0xD2, /* 0x40-0x43 */
+	0xE8, 0xE6, 0xCE, 0xA6, 0xBF, 0xF2, 0x96, 0xF4, /* 0x44-0x47 */
+	0xB0, 0xB8, 0xE8, 0xF1, 0xE8, 0xF0, 0x96, 0xF5, /* 0x48-0x4B */
+	0xD7, 0xC0, 0x96, 0xF6, 0xE8, 0xE4, 0x96, 0xF7, /* 0x4C-0x4F */
+	0xCD, 0xA9, 0xC9, 0xA3, 0x96, 0xF8, 0xBB, 0xB8, /* 0x50-0x53 */
+	0xBD, 0xDB, 0xE8, 0xEA, 0x96, 0xF9, 0x96, 0xFA, /* 0x54-0x57 */
+	0x96, 0xFB, 0x96, 0xFC, 0x96, 0xFD, 0x96, 0xFE, /* 0x58-0x5B */
+	0x97, 0x40, 0x97, 0x41, 0x97, 0x42, 0x97, 0x43, /* 0x5C-0x5F */
+	0xE8, 0xE2, 0xE8, 0xE3, 0xE8, 0xE5, 0xB5, 0xB5, /* 0x60-0x63 */
+	0xE8, 0xE7, 0xC7, 0xC5, 0xE8, 0xEB, 0xE8, 0xED, /* 0x64-0x67 */
+	0xBD, 0xB0, 0xD7, 0xAE, 0x97, 0x44, 0xE8, 0xF8, /* 0x68-0x6B */
+	0x97, 0x45, 0x97, 0x46, 0x97, 0x47, 0x97, 0x48, /* 0x6C-0x6F */
+	0x97, 0x49, 0x97, 0x4A, 0x97, 0x4B, 0x97, 0x4C, /* 0x70-0x73 */
+	0xE8, 0xF5, 0x97, 0x4D, 0xCD, 0xB0, 0xE8, 0xF6, /* 0x74-0x77 */
+	0x97, 0x4E, 0x97, 0x4F, 0x97, 0x50, 0x97, 0x51, /* 0x78-0x7B */
+	0x97, 0x52, 0x97, 0x53, 0x97, 0x54, 0x97, 0x55, /* 0x7C-0x7F */
+	
+	0x97, 0x56, 0xC1, 0xBA, 0x97, 0x57, 0xE8, 0xE8, /* 0x80-0x83 */
+	0x97, 0x58, 0xC3, 0xB7, 0xB0, 0xF0, 0x97, 0x59, /* 0x84-0x87 */
+	0x97, 0x5A, 0x97, 0x5B, 0x97, 0x5C, 0x97, 0x5D, /* 0x88-0x8B */
+	0x97, 0x5E, 0x97, 0x5F, 0x97, 0x60, 0xE8, 0xF4, /* 0x8C-0x8F */
+	0x97, 0x61, 0x97, 0x62, 0x97, 0x63, 0xE8, 0xF7, /* 0x90-0x93 */
+	0x97, 0x64, 0x97, 0x65, 0x97, 0x66, 0xB9, 0xA3, /* 0x94-0x97 */
+	0x97, 0x67, 0x97, 0x68, 0x97, 0x69, 0x97, 0x6A, /* 0x98-0x9B */
+	0x97, 0x6B, 0x97, 0x6C, 0x97, 0x6D, 0x97, 0x6E, /* 0x9C-0x9F */
+	0x97, 0x6F, 0x97, 0x70, 0xC9, 0xD2, 0x97, 0x71, /* 0xA0-0xA3 */
+	0x97, 0x72, 0x97, 0x73, 0xC3, 0xCE, 0xCE, 0xE0, /* 0xA4-0xA7 */
+	0xC0, 0xE6, 0x97, 0x74, 0x97, 0x75, 0x97, 0x76, /* 0xA8-0xAB */
+	0x97, 0x77, 0xCB, 0xF3, 0x97, 0x78, 0xCC, 0xDD, /* 0xAC-0xAF */
+	0xD0, 0xB5, 0x97, 0x79, 0x97, 0x7A, 0xCA, 0xE1, /* 0xB0-0xB3 */
+	0x97, 0x7B, 0xE8, 0xF3, 0x97, 0x7C, 0x97, 0x7D, /* 0xB4-0xB7 */
+	0x97, 0x7E, 0x97, 0x80, 0x97, 0x81, 0x97, 0x82, /* 0xB8-0xBB */
+	0x97, 0x83, 0x97, 0x84, 0x97, 0x85, 0x97, 0x86, /* 0xBC-0xBF */
+	0xBC, 0xEC, 0x97, 0x87, 0xE8, 0xF9, 0x97, 0x88, /* 0xC0-0xC3 */
+	0x97, 0x89, 0x97, 0x8A, 0x97, 0x8B, 0x97, 0x8C, /* 0xC4-0xC7 */
+	0x97, 0x8D, 0xC3, 0xDE, 0x97, 0x8E, 0xC6, 0xE5, /* 0xC8-0xCB */
+	0x97, 0x8F, 0xB9, 0xF7, 0x97, 0x90, 0x97, 0x91, /* 0xCC-0xCF */
+	0x97, 0x92, 0x97, 0x93, 0xB0, 0xF4, 0x97, 0x94, /* 0xD0-0xD3 */
+	0x97, 0x95, 0xD7, 0xD8, 0x97, 0x96, 0x97, 0x97, /* 0xD4-0xD7 */
+	0xBC, 0xAC, 0x97, 0x98, 0xC5, 0xEF, 0x97, 0x99, /* 0xD8-0xDB */
+	0x97, 0x9A, 0x97, 0x9B, 0x97, 0x9C, 0x97, 0x9D, /* 0xDC-0xDF */
+	0xCC, 0xC4, 0x97, 0x9E, 0x97, 0x9F, 0xE9, 0xA6, /* 0xE0-0xE3 */
+	0x97, 0xA0, 0x97, 0xA1, 0x97, 0xA2, 0x97, 0xA3, /* 0xE4-0xE7 */
+	0x97, 0xA4, 0x97, 0xA5, 0x97, 0xA6, 0x97, 0xA7, /* 0xE8-0xEB */
+	0x97, 0xA8, 0x97, 0xA9, 0xC9, 0xAD, 0x97, 0xAA, /* 0xEC-0xEF */
+	0xE9, 0xA2, 0xC0, 0xE2, 0x97, 0xAB, 0x97, 0xAC, /* 0xF0-0xF3 */
+	0x97, 0xAD, 0xBF, 0xC3, 0x97, 0xAE, 0x97, 0xAF, /* 0xF4-0xF7 */
+	0x97, 0xB0, 0xE8, 0xFE, 0xB9, 0xD7, 0x97, 0xB1, /* 0xF8-0xFB */
+	0xE8, 0xFB, 0x97, 0xB2, 0x97, 0xB3, 0x97, 0xB4, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_69[512] = {
+	0x97, 0xB5, 0xE9, 0xA4, 0x97, 0xB6, 0x97, 0xB7, /* 0x00-0x03 */
+	0x97, 0xB8, 0xD2, 0xCE, 0x97, 0xB9, 0x97, 0xBA, /* 0x04-0x07 */
+	0x97, 0xBB, 0x97, 0xBC, 0x97, 0xBD, 0xE9, 0xA3, /* 0x08-0x0B */
+	0x97, 0xBE, 0xD6, 0xB2, 0xD7, 0xB5, 0x97, 0xBF, /* 0x0C-0x0F */
+	0xE9, 0xA7, 0x97, 0xC0, 0xBD, 0xB7, 0x97, 0xC1, /* 0x10-0x13 */
+	0x97, 0xC2, 0x97, 0xC3, 0x97, 0xC4, 0x97, 0xC5, /* 0x14-0x17 */
+	0x97, 0xC6, 0x97, 0xC7, 0x97, 0xC8, 0x97, 0xC9, /* 0x18-0x1B */
+	0x97, 0xCA, 0x97, 0xCB, 0x97, 0xCC, 0xE8, 0xFC, /* 0x1C-0x1F */
+	0xE8, 0xFD, 0x97, 0xCD, 0x97, 0xCE, 0x97, 0xCF, /* 0x20-0x23 */
+	0xE9, 0xA1, 0x97, 0xD0, 0x97, 0xD1, 0x97, 0xD2, /* 0x24-0x27 */
+	0x97, 0xD3, 0x97, 0xD4, 0x97, 0xD5, 0x97, 0xD6, /* 0x28-0x2B */
+	0x97, 0xD7, 0xCD, 0xD6, 0x97, 0xD8, 0x97, 0xD9, /* 0x2C-0x2F */
+	0xD2, 0xAC, 0x97, 0xDA, 0x97, 0xDB, 0x97, 0xDC, /* 0x30-0x33 */
+	0xE9, 0xB2, 0x97, 0xDD, 0x97, 0xDE, 0x97, 0xDF, /* 0x34-0x37 */
+	0x97, 0xE0, 0xE9, 0xA9, 0x97, 0xE1, 0x97, 0xE2, /* 0x38-0x3B */
+	0x97, 0xE3, 0xB4, 0xAA, 0x97, 0xE4, 0xB4, 0xBB, /* 0x3C-0x3F */
+	0x97, 0xE5, 0x97, 0xE6, 0xE9, 0xAB, 0x97, 0xE7, /* 0x40-0x43 */
+	0x97, 0xE8, 0x97, 0xE9, 0x97, 0xEA, 0x97, 0xEB, /* 0x44-0x47 */
+	0x97, 0xEC, 0x97, 0xED, 0x97, 0xEE, 0x97, 0xEF, /* 0x48-0x4B */
+	0x97, 0xF0, 0x97, 0xF1, 0x97, 0xF2, 0x97, 0xF3, /* 0x4C-0x4F */
+	0x97, 0xF4, 0x97, 0xF5, 0x97, 0xF6, 0x97, 0xF7, /* 0x50-0x53 */
+	0xD0, 0xA8, 0x97, 0xF8, 0x97, 0xF9, 0xE9, 0xA5, /* 0x54-0x57 */
+	0x97, 0xFA, 0x97, 0xFB, 0xB3, 0xFE, 0x97, 0xFC, /* 0x58-0x5B */
+	0x97, 0xFD, 0xE9, 0xAC, 0xC0, 0xE3, 0x97, 0xFE, /* 0x5C-0x5F */
+	0xE9, 0xAA, 0x98, 0x40, 0x98, 0x41, 0xE9, 0xB9, /* 0x60-0x63 */
+	0x98, 0x42, 0x98, 0x43, 0xE9, 0xB8, 0x98, 0x44, /* 0x64-0x67 */
+	0x98, 0x45, 0x98, 0x46, 0x98, 0x47, 0xE9, 0xAE, /* 0x68-0x6B */
+	0x98, 0x48, 0x98, 0x49, 0xE8, 0xFA, 0x98, 0x4A, /* 0x6C-0x6F */
+	0x98, 0x4B, 0xE9, 0xA8, 0x98, 0x4C, 0x98, 0x4D, /* 0x70-0x73 */
+	0x98, 0x4E, 0x98, 0x4F, 0x98, 0x50, 0xBF, 0xAC, /* 0x74-0x77 */
+	0xE9, 0xB1, 0xE9, 0xBA, 0x98, 0x51, 0x98, 0x52, /* 0x78-0x7B */
+	0xC2, 0xA5, 0x98, 0x53, 0x98, 0x54, 0x98, 0x55, /* 0x7C-0x7F */
+	
+	0xE9, 0xAF, 0x98, 0x56, 0xB8, 0xC5, 0x98, 0x57, /* 0x80-0x83 */
+	0xE9, 0xAD, 0x98, 0x58, 0xD3, 0xDC, 0xE9, 0xB4, /* 0x84-0x87 */
+	0xE9, 0xB5, 0xE9, 0xB7, 0x98, 0x59, 0x98, 0x5A, /* 0x88-0x8B */
+	0x98, 0x5B, 0xE9, 0xC7, 0x98, 0x5C, 0x98, 0x5D, /* 0x8C-0x8F */
+	0x98, 0x5E, 0x98, 0x5F, 0x98, 0x60, 0x98, 0x61, /* 0x90-0x93 */
+	0xC0, 0xC6, 0xE9, 0xC5, 0x98, 0x62, 0x98, 0x63, /* 0x94-0x97 */
+	0xE9, 0xB0, 0x98, 0x64, 0x98, 0x65, 0xE9, 0xBB, /* 0x98-0x9B */
+	0xB0, 0xF1, 0x98, 0x66, 0x98, 0x67, 0x98, 0x68, /* 0x9C-0x9F */
+	0x98, 0x69, 0x98, 0x6A, 0x98, 0x6B, 0x98, 0x6C, /* 0xA0-0xA3 */
+	0x98, 0x6D, 0x98, 0x6E, 0x98, 0x6F, 0xE9, 0xBC, /* 0xA4-0xA7 */
+	0xD5, 0xA5, 0x98, 0x70, 0x98, 0x71, 0xE9, 0xBE, /* 0xA8-0xAB */
+	0x98, 0x72, 0xE9, 0xBF, 0x98, 0x73, 0x98, 0x74, /* 0xAC-0xAF */
+	0x98, 0x75, 0xE9, 0xC1, 0x98, 0x76, 0x98, 0x77, /* 0xB0-0xB3 */
+	0xC1, 0xF1, 0x98, 0x78, 0x98, 0x79, 0xC8, 0xB6, /* 0xB4-0xB7 */
+	0x98, 0x7A, 0x98, 0x7B, 0x98, 0x7C, 0xE9, 0xBD, /* 0xB8-0xBB */
+	0x98, 0x7D, 0x98, 0x7E, 0x98, 0x80, 0x98, 0x81, /* 0xBC-0xBF */
+	0x98, 0x82, 0xE9, 0xC2, 0x98, 0x83, 0x98, 0x84, /* 0xC0-0xC3 */
+	0x98, 0x85, 0x98, 0x86, 0x98, 0x87, 0x98, 0x88, /* 0xC4-0xC7 */
+	0x98, 0x89, 0x98, 0x8A, 0xE9, 0xC3, 0x98, 0x8B, /* 0xC8-0xCB */
+	0xE9, 0xB3, 0x98, 0x8C, 0xE9, 0xB6, 0x98, 0x8D, /* 0xCC-0xCF */
+	0xBB, 0xB1, 0x98, 0x8E, 0x98, 0x8F, 0x98, 0x90, /* 0xD0-0xD3 */
+	0xE9, 0xC0, 0x98, 0x91, 0x98, 0x92, 0x98, 0x93, /* 0xD4-0xD7 */
+	0x98, 0x94, 0x98, 0x95, 0x98, 0x96, 0xBC, 0xF7, /* 0xD8-0xDB */
+	0x98, 0x97, 0x98, 0x98, 0x98, 0x99, 0xE9, 0xC4, /* 0xDC-0xDF */
+	0xE9, 0xC6, 0x98, 0x9A, 0x98, 0x9B, 0x98, 0x9C, /* 0xE0-0xE3 */
+	0x98, 0x9D, 0x98, 0x9E, 0x98, 0x9F, 0x98, 0xA0, /* 0xE4-0xE7 */
+	0x98, 0xA1, 0x98, 0xA2, 0x98, 0xA3, 0x98, 0xA4, /* 0xE8-0xEB */
+	0x98, 0xA5, 0xE9, 0xCA, 0x98, 0xA6, 0x98, 0xA7, /* 0xEC-0xEF */
+	0x98, 0xA8, 0x98, 0xA9, 0xE9, 0xCE, 0x98, 0xAA, /* 0xF0-0xF3 */
+	0x98, 0xAB, 0x98, 0xAC, 0x98, 0xAD, 0x98, 0xAE, /* 0xF4-0xF7 */
+	0x98, 0xAF, 0x98, 0xB0, 0x98, 0xB1, 0x98, 0xB2, /* 0xF8-0xFB */
+	0x98, 0xB3, 0xB2, 0xDB, 0x98, 0xB4, 0xE9, 0xC8, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6A[512] = {
+	0x98, 0xB5, 0x98, 0xB6, 0x98, 0xB7, 0x98, 0xB8, /* 0x00-0x03 */
+	0x98, 0xB9, 0x98, 0xBA, 0x98, 0xBB, 0x98, 0xBC, /* 0x04-0x07 */
+	0x98, 0xBD, 0x98, 0xBE, 0xB7, 0xAE, 0x98, 0xBF, /* 0x08-0x0B */
+	0x98, 0xC0, 0x98, 0xC1, 0x98, 0xC2, 0x98, 0xC3, /* 0x0C-0x0F */
+	0x98, 0xC4, 0x98, 0xC5, 0x98, 0xC6, 0x98, 0xC7, /* 0x10-0x13 */
+	0x98, 0xC8, 0x98, 0xC9, 0x98, 0xCA, 0xE9, 0xCB, /* 0x14-0x17 */
+	0xE9, 0xCC, 0x98, 0xCB, 0x98, 0xCC, 0x98, 0xCD, /* 0x18-0x1B */
+	0x98, 0xCE, 0x98, 0xCF, 0x98, 0xD0, 0xD5, 0xC1, /* 0x1C-0x1F */
+	0x98, 0xD1, 0xC4, 0xA3, 0x98, 0xD2, 0x98, 0xD3, /* 0x20-0x23 */
+	0x98, 0xD4, 0x98, 0xD5, 0x98, 0xD6, 0x98, 0xD7, /* 0x24-0x27 */
+	0xE9, 0xD8, 0x98, 0xD8, 0xBA, 0xE1, 0x98, 0xD9, /* 0x28-0x2B */
+	0x98, 0xDA, 0x98, 0xDB, 0x98, 0xDC, 0xE9, 0xC9, /* 0x2C-0x2F */
+	0x98, 0xDD, 0xD3, 0xA3, 0x98, 0xDE, 0x98, 0xDF, /* 0x30-0x33 */
+	0x98, 0xE0, 0xE9, 0xD4, 0x98, 0xE1, 0x98, 0xE2, /* 0x34-0x37 */
+	0x98, 0xE3, 0x98, 0xE4, 0x98, 0xE5, 0x98, 0xE6, /* 0x38-0x3B */
+	0x98, 0xE7, 0xE9, 0xD7, 0xE9, 0xD0, 0x98, 0xE8, /* 0x3C-0x3F */
+	0x98, 0xE9, 0x98, 0xEA, 0x98, 0xEB, 0x98, 0xEC, /* 0x40-0x43 */
+	0xE9, 0xCF, 0x98, 0xED, 0x98, 0xEE, 0xC7, 0xC1, /* 0x44-0x47 */
+	0x98, 0xEF, 0x98, 0xF0, 0x98, 0xF1, 0x98, 0xF2, /* 0x48-0x4B */
+	0x98, 0xF3, 0x98, 0xF4, 0x98, 0xF5, 0x98, 0xF6, /* 0x4C-0x4F */
+	0xE9, 0xD2, 0x98, 0xF7, 0x98, 0xF8, 0x98, 0xF9, /* 0x50-0x53 */
+	0x98, 0xFA, 0x98, 0xFB, 0x98, 0xFC, 0x98, 0xFD, /* 0x54-0x57 */
+	0xE9, 0xD9, 0xB3, 0xC8, 0x98, 0xFE, 0xE9, 0xD3, /* 0x58-0x5B */
+	0x99, 0x40, 0x99, 0x41, 0x99, 0x42, 0x99, 0x43, /* 0x5C-0x5F */
+	0x99, 0x44, 0xCF, 0xF0, 0x99, 0x45, 0x99, 0x46, /* 0x60-0x63 */
+	0x99, 0x47, 0xE9, 0xCD, 0x99, 0x48, 0x99, 0x49, /* 0x64-0x67 */
+	0x99, 0x4A, 0x99, 0x4B, 0x99, 0x4C, 0x99, 0x4D, /* 0x68-0x6B */
+	0x99, 0x4E, 0x99, 0x4F, 0x99, 0x50, 0x99, 0x51, /* 0x6C-0x6F */
+	0x99, 0x52, 0xB3, 0xF7, 0x99, 0x53, 0x99, 0x54, /* 0x70-0x73 */
+	0x99, 0x55, 0x99, 0x56, 0x99, 0x57, 0x99, 0x58, /* 0x74-0x77 */
+	0x99, 0x59, 0xE9, 0xD6, 0x99, 0x5A, 0x99, 0x5B, /* 0x78-0x7B */
+	0xE9, 0xDA, 0x99, 0x5C, 0x99, 0x5D, 0x99, 0x5E, /* 0x7C-0x7F */
+	
+	0xCC, 0xB4, 0x99, 0x5F, 0x99, 0x60, 0x99, 0x61, /* 0x80-0x83 */
+	0xCF, 0xAD, 0x99, 0x62, 0x99, 0x63, 0x99, 0x64, /* 0x84-0x87 */
+	0x99, 0x65, 0x99, 0x66, 0x99, 0x67, 0x99, 0x68, /* 0x88-0x8B */
+	0x99, 0x69, 0x99, 0x6A, 0xE9, 0xD5, 0x99, 0x6B, /* 0x8C-0x8F */
+	0xE9, 0xDC, 0xE9, 0xDB, 0x99, 0x6C, 0x99, 0x6D, /* 0x90-0x93 */
+	0x99, 0x6E, 0x99, 0x6F, 0x99, 0x70, 0xE9, 0xDE, /* 0x94-0x97 */
+	0x99, 0x71, 0x99, 0x72, 0x99, 0x73, 0x99, 0x74, /* 0x98-0x9B */
+	0x99, 0x75, 0x99, 0x76, 0x99, 0x77, 0x99, 0x78, /* 0x9C-0x9F */
+	0xE9, 0xD1, 0x99, 0x79, 0x99, 0x7A, 0x99, 0x7B, /* 0xA0-0xA3 */
+	0x99, 0x7C, 0x99, 0x7D, 0x99, 0x7E, 0x99, 0x80, /* 0xA4-0xA7 */
+	0x99, 0x81, 0xE9, 0xDD, 0x99, 0x82, 0xE9, 0xDF, /* 0xA8-0xAB */
+	0xC3, 0xCA, 0x99, 0x83, 0x99, 0x84, 0x99, 0x85, /* 0xAC-0xAF */
+	0x99, 0x86, 0x99, 0x87, 0x99, 0x88, 0x99, 0x89, /* 0xB0-0xB3 */
+	0x99, 0x8A, 0x99, 0x8B, 0x99, 0x8C, 0x99, 0x8D, /* 0xB4-0xB7 */
+	0x99, 0x8E, 0x99, 0x8F, 0x99, 0x90, 0x99, 0x91, /* 0xB8-0xBB */
+	0x99, 0x92, 0x99, 0x93, 0x99, 0x94, 0x99, 0x95, /* 0xBC-0xBF */
+	0x99, 0x96, 0x99, 0x97, 0x99, 0x98, 0x99, 0x99, /* 0xC0-0xC3 */
+	0x99, 0x9A, 0x99, 0x9B, 0x99, 0x9C, 0x99, 0x9D, /* 0xC4-0xC7 */
+	0x99, 0x9E, 0x99, 0x9F, 0x99, 0xA0, 0x99, 0xA1, /* 0xC8-0xCB */
+	0x99, 0xA2, 0x99, 0xA3, 0x99, 0xA4, 0x99, 0xA5, /* 0xCC-0xCF */
+	0x99, 0xA6, 0x99, 0xA7, 0x99, 0xA8, 0x99, 0xA9, /* 0xD0-0xD3 */
+	0x99, 0xAA, 0x99, 0xAB, 0x99, 0xAC, 0x99, 0xAD, /* 0xD4-0xD7 */
+	0x99, 0xAE, 0x99, 0xAF, 0x99, 0xB0, 0x99, 0xB1, /* 0xD8-0xDB */
+	0x99, 0xB2, 0x99, 0xB3, 0x99, 0xB4, 0x99, 0xB5, /* 0xDC-0xDF */
+	0x99, 0xB6, 0x99, 0xB7, 0x99, 0xB8, 0x99, 0xB9, /* 0xE0-0xE3 */
+	0x99, 0xBA, 0x99, 0xBB, 0x99, 0xBC, 0x99, 0xBD, /* 0xE4-0xE7 */
+	0x99, 0xBE, 0x99, 0xBF, 0x99, 0xC0, 0x99, 0xC1, /* 0xE8-0xEB */
+	0x99, 0xC2, 0x99, 0xC3, 0x99, 0xC4, 0x99, 0xC5, /* 0xEC-0xEF */
+	0x99, 0xC6, 0x99, 0xC7, 0x99, 0xC8, 0x99, 0xC9, /* 0xF0-0xF3 */
+	0x99, 0xCA, 0x99, 0xCB, 0x99, 0xCC, 0x99, 0xCD, /* 0xF4-0xF7 */
+	0x99, 0xCE, 0x99, 0xCF, 0x99, 0xD0, 0x99, 0xD1, /* 0xF8-0xFB */
+	0x99, 0xD2, 0x99, 0xD3, 0x99, 0xD4, 0x99, 0xD5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6B[512] = {
+	0x99, 0xD6, 0x99, 0xD7, 0x99, 0xD8, 0x99, 0xD9, /* 0x00-0x03 */
+	0x99, 0xDA, 0x99, 0xDB, 0x99, 0xDC, 0x99, 0xDD, /* 0x04-0x07 */
+	0x99, 0xDE, 0x99, 0xDF, 0x99, 0xE0, 0x99, 0xE1, /* 0x08-0x0B */
+	0x99, 0xE2, 0x99, 0xE3, 0x99, 0xE4, 0x99, 0xE5, /* 0x0C-0x0F */
+	0x99, 0xE6, 0x99, 0xE7, 0x99, 0xE8, 0x99, 0xE9, /* 0x10-0x13 */
+	0x99, 0xEA, 0x99, 0xEB, 0x99, 0xEC, 0x99, 0xED, /* 0x14-0x17 */
+	0x99, 0xEE, 0x99, 0xEF, 0x99, 0xF0, 0x99, 0xF1, /* 0x18-0x1B */
+	0x99, 0xF2, 0x99, 0xF3, 0x99, 0xF4, 0x99, 0xF5, /* 0x1C-0x1F */
+	0xC7, 0xB7, 0xB4, 0xCE, 0xBB, 0xB6, 0xD0, 0xC0, /* 0x20-0x23 */
+	0xEC, 0xA3, 0x99, 0xF6, 0x99, 0xF7, 0xC5, 0xB7, /* 0x24-0x27 */
+	0x99, 0xF8, 0x99, 0xF9, 0x99, 0xFA, 0x99, 0xFB, /* 0x28-0x2B */
+	0x99, 0xFC, 0x99, 0xFD, 0x99, 0xFE, 0x9A, 0x40, /* 0x2C-0x2F */
+	0x9A, 0x41, 0x9A, 0x42, 0xD3, 0xFB, 0x9A, 0x43, /* 0x30-0x33 */
+	0x9A, 0x44, 0x9A, 0x45, 0x9A, 0x46, 0xEC, 0xA4, /* 0x34-0x37 */
+	0x9A, 0x47, 0xEC, 0xA5, 0xC6, 0xDB, 0x9A, 0x48, /* 0x38-0x3B */
+	0x9A, 0x49, 0x9A, 0x4A, 0xBF, 0xEE, 0x9A, 0x4B, /* 0x3C-0x3F */
+	0x9A, 0x4C, 0x9A, 0x4D, 0x9A, 0x4E, 0xEC, 0xA6, /* 0x40-0x43 */
+	0x9A, 0x4F, 0x9A, 0x50, 0xEC, 0xA7, 0xD0, 0xAA, /* 0x44-0x47 */
+	0x9A, 0x51, 0xC7, 0xB8, 0x9A, 0x52, 0x9A, 0x53, /* 0x48-0x4B */
+	0xB8, 0xE8, 0x9A, 0x54, 0x9A, 0x55, 0x9A, 0x56, /* 0x4C-0x4F */
+	0x9A, 0x57, 0x9A, 0x58, 0x9A, 0x59, 0x9A, 0x5A, /* 0x50-0x53 */
+	0x9A, 0x5B, 0x9A, 0x5C, 0x9A, 0x5D, 0x9A, 0x5E, /* 0x54-0x57 */
+	0x9A, 0x5F, 0xEC, 0xA8, 0x9A, 0x60, 0x9A, 0x61, /* 0x58-0x5B */
+	0x9A, 0x62, 0x9A, 0x63, 0x9A, 0x64, 0x9A, 0x65, /* 0x5C-0x5F */
+	0x9A, 0x66, 0x9A, 0x67, 0xD6, 0xB9, 0xD5, 0xFD, /* 0x60-0x63 */
+	0xB4, 0xCB, 0xB2, 0xBD, 0xCE, 0xE4, 0xC6, 0xE7, /* 0x64-0x67 */
+	0x9A, 0x68, 0x9A, 0x69, 0xCD, 0xE1, 0x9A, 0x6A, /* 0x68-0x6B */
+	0x9A, 0x6B, 0x9A, 0x6C, 0x9A, 0x6D, 0x9A, 0x6E, /* 0x6C-0x6F */
+	0x9A, 0x6F, 0x9A, 0x70, 0x9A, 0x71, 0x9A, 0x72, /* 0x70-0x73 */
+	0x9A, 0x73, 0x9A, 0x74, 0x9A, 0x75, 0x9A, 0x76, /* 0x74-0x77 */
+	0x9A, 0x77, 0xB4, 0xF5, 0x9A, 0x78, 0xCB, 0xC0, /* 0x78-0x7B */
+	0xBC, 0xDF, 0x9A, 0x79, 0x9A, 0x7A, 0x9A, 0x7B, /* 0x7C-0x7F */
+	
+	0x9A, 0x7C, 0xE9, 0xE2, 0xE9, 0xE3, 0xD1, 0xEA, /* 0x80-0x83 */
+	0xE9, 0xE5, 0x9A, 0x7D, 0xB4, 0xF9, 0xE9, 0xE4, /* 0x84-0x87 */
+	0x9A, 0x7E, 0xD1, 0xB3, 0xCA, 0xE2, 0xB2, 0xD0, /* 0x88-0x8B */
+	0x9A, 0x80, 0xE9, 0xE8, 0x9A, 0x81, 0x9A, 0x82, /* 0x8C-0x8F */
+	0x9A, 0x83, 0x9A, 0x84, 0xE9, 0xE6, 0xE9, 0xE7, /* 0x90-0x93 */
+	0x9A, 0x85, 0x9A, 0x86, 0xD6, 0xB3, 0x9A, 0x87, /* 0x94-0x97 */
+	0x9A, 0x88, 0x9A, 0x89, 0xE9, 0xE9, 0xE9, 0xEA, /* 0x98-0x9B */
+	0x9A, 0x8A, 0x9A, 0x8B, 0x9A, 0x8C, 0x9A, 0x8D, /* 0x9C-0x9F */
+	0x9A, 0x8E, 0xE9, 0xEB, 0x9A, 0x8F, 0x9A, 0x90, /* 0xA0-0xA3 */
+	0x9A, 0x91, 0x9A, 0x92, 0x9A, 0x93, 0x9A, 0x94, /* 0xA4-0xA7 */
+	0x9A, 0x95, 0x9A, 0x96, 0xE9, 0xEC, 0x9A, 0x97, /* 0xA8-0xAB */
+	0x9A, 0x98, 0x9A, 0x99, 0x9A, 0x9A, 0x9A, 0x9B, /* 0xAC-0xAF */
+	0x9A, 0x9C, 0x9A, 0x9D, 0x9A, 0x9E, 0xEC, 0xAF, /* 0xB0-0xB3 */
+	0xC5, 0xB9, 0xB6, 0xCE, 0x9A, 0x9F, 0xD2, 0xF3, /* 0xB4-0xB7 */
+	0x9A, 0xA0, 0x9A, 0xA1, 0x9A, 0xA2, 0x9A, 0xA3, /* 0xB8-0xBB */
+	0x9A, 0xA4, 0x9A, 0xA5, 0x9A, 0xA6, 0xB5, 0xEE, /* 0xBC-0xBF */
+	0x9A, 0xA7, 0xBB, 0xD9, 0xEC, 0xB1, 0x9A, 0xA8, /* 0xC0-0xC3 */
+	0x9A, 0xA9, 0xD2, 0xE3, 0x9A, 0xAA, 0x9A, 0xAB, /* 0xC4-0xC7 */
+	0x9A, 0xAC, 0x9A, 0xAD, 0x9A, 0xAE, 0xCE, 0xE3, /* 0xC8-0xCB */
+	0x9A, 0xAF, 0xC4, 0xB8, 0x9A, 0xB0, 0xC3, 0xBF, /* 0xCC-0xCF */
+	0x9A, 0xB1, 0x9A, 0xB2, 0xB6, 0xBE, 0xD8, 0xB9, /* 0xD0-0xD3 */
+	0xB1, 0xC8, 0xB1, 0xCF, 0xB1, 0xD1, 0xC5, 0xFE, /* 0xD4-0xD7 */
+	0x9A, 0xB3, 0xB1, 0xD0, 0x9A, 0xB4, 0xC3, 0xAB, /* 0xD8-0xDB */
+	0x9A, 0xB5, 0x9A, 0xB6, 0x9A, 0xB7, 0x9A, 0xB8, /* 0xDC-0xDF */
+	0x9A, 0xB9, 0xD5, 0xB1, 0x9A, 0xBA, 0x9A, 0xBB, /* 0xE0-0xE3 */
+	0x9A, 0xBC, 0x9A, 0xBD, 0x9A, 0xBE, 0x9A, 0xBF, /* 0xE4-0xE7 */
+	0x9A, 0xC0, 0x9A, 0xC1, 0xEB, 0xA4, 0xBA, 0xC1, /* 0xE8-0xEB */
+	0x9A, 0xC2, 0x9A, 0xC3, 0x9A, 0xC4, 0xCC, 0xBA, /* 0xEC-0xEF */
+	0x9A, 0xC5, 0x9A, 0xC6, 0x9A, 0xC7, 0xEB, 0xA5, /* 0xF0-0xF3 */
+	0x9A, 0xC8, 0xEB, 0xA7, 0x9A, 0xC9, 0x9A, 0xCA, /* 0xF4-0xF7 */
+	0x9A, 0xCB, 0xEB, 0xA8, 0x9A, 0xCC, 0x9A, 0xCD, /* 0xF8-0xFB */
+	0x9A, 0xCE, 0xEB, 0xA6, 0x9A, 0xCF, 0x9A, 0xD0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6C[512] = {
+	0x9A, 0xD1, 0x9A, 0xD2, 0x9A, 0xD3, 0x9A, 0xD4, /* 0x00-0x03 */
+	0x9A, 0xD5, 0xEB, 0xA9, 0xEB, 0xAB, 0xEB, 0xAA, /* 0x04-0x07 */
+	0x9A, 0xD6, 0x9A, 0xD7, 0x9A, 0xD8, 0x9A, 0xD9, /* 0x08-0x0B */
+	0x9A, 0xDA, 0xEB, 0xAC, 0x9A, 0xDB, 0xCA, 0xCF, /* 0x0C-0x0F */
+	0xD8, 0xB5, 0xC3, 0xF1, 0x9A, 0xDC, 0xC3, 0xA5, /* 0x10-0x13 */
+	0xC6, 0xF8, 0xEB, 0xAD, 0xC4, 0xCA, 0x9A, 0xDD, /* 0x14-0x17 */
+	0xEB, 0xAE, 0xEB, 0xAF, 0xEB, 0xB0, 0xB7, 0xD5, /* 0x18-0x1B */
+	0x9A, 0xDE, 0x9A, 0xDF, 0x9A, 0xE0, 0xB7, 0xFA, /* 0x1C-0x1F */
+	0x9A, 0xE1, 0xEB, 0xB1, 0xC7, 0xE2, 0x9A, 0xE2, /* 0x20-0x23 */
+	0xEB, 0xB3, 0x9A, 0xE3, 0xBA, 0xA4, 0xD1, 0xF5, /* 0x24-0x27 */
+	0xB0, 0xB1, 0xEB, 0xB2, 0xEB, 0xB4, 0x9A, 0xE4, /* 0x28-0x2B */
+	0x9A, 0xE5, 0x9A, 0xE6, 0xB5, 0xAA, 0xC2, 0xC8, /* 0x2C-0x2F */
+	0xC7, 0xE8, 0x9A, 0xE7, 0xEB, 0xB5, 0x9A, 0xE8, /* 0x30-0x33 */
+	0xCB, 0xAE, 0xE3, 0xDF, 0x9A, 0xE9, 0x9A, 0xEA, /* 0x34-0x37 */
+	0xD3, 0xC0, 0x9A, 0xEB, 0x9A, 0xEC, 0x9A, 0xED, /* 0x38-0x3B */
+	0x9A, 0xEE, 0xD9, 0xDB, 0x9A, 0xEF, 0x9A, 0xF0, /* 0x3C-0x3F */
+	0xCD, 0xA1, 0xD6, 0xAD, 0xC7, 0xF3, 0x9A, 0xF1, /* 0x40-0x43 */
+	0x9A, 0xF2, 0x9A, 0xF3, 0xD9, 0xE0, 0xBB, 0xE3, /* 0x44-0x47 */
+	0x9A, 0xF4, 0xBA, 0xBA, 0xE3, 0xE2, 0x9A, 0xF5, /* 0x48-0x4B */
+	0x9A, 0xF6, 0x9A, 0xF7, 0x9A, 0xF8, 0x9A, 0xF9, /* 0x4C-0x4F */
+	0xCF, 0xAB, 0x9A, 0xFA, 0x9A, 0xFB, 0x9A, 0xFC, /* 0x50-0x53 */
+	0xE3, 0xE0, 0xC9, 0xC7, 0x9A, 0xFD, 0xBA, 0xB9, /* 0x54-0x57 */
+	0x9A, 0xFE, 0x9B, 0x40, 0x9B, 0x41, 0xD1, 0xB4, /* 0x58-0x5B */
+	0xE3, 0xE1, 0xC8, 0xEA, 0xB9, 0xAF, 0xBD, 0xAD, /* 0x5C-0x5F */
+	0xB3, 0xD8, 0xCE, 0xDB, 0x9B, 0x42, 0x9B, 0x43, /* 0x60-0x63 */
+	0xCC, 0xC0, 0x9B, 0x44, 0x9B, 0x45, 0x9B, 0x46, /* 0x64-0x67 */
+	0xE3, 0xE8, 0xE3, 0xE9, 0xCD, 0xF4, 0x9B, 0x47, /* 0x68-0x6B */
+	0x9B, 0x48, 0x9B, 0x49, 0x9B, 0x4A, 0x9B, 0x4B, /* 0x6C-0x6F */
+	0xCC, 0xAD, 0x9B, 0x4C, 0xBC, 0xB3, 0x9B, 0x4D, /* 0x70-0x73 */
+	0xE3, 0xEA, 0x9B, 0x4E, 0xE3, 0xEB, 0x9B, 0x4F, /* 0x74-0x77 */
+	0x9B, 0x50, 0xD0, 0xDA, 0x9B, 0x51, 0x9B, 0x52, /* 0x78-0x7B */
+	0x9B, 0x53, 0xC6, 0xFB, 0xB7, 0xDA, 0x9B, 0x54, /* 0x7C-0x7F */
+	
+	0x9B, 0x55, 0xC7, 0xDF, 0xD2, 0xCA, 0xCE, 0xD6, /* 0x80-0x83 */
+	0x9B, 0x56, 0xE3, 0xE4, 0xE3, 0xEC, 0x9B, 0x57, /* 0x84-0x87 */
+	0xC9, 0xF2, 0xB3, 0xC1, 0x9B, 0x58, 0x9B, 0x59, /* 0x88-0x8B */
+	0xE3, 0xE7, 0x9B, 0x5A, 0x9B, 0x5B, 0xC6, 0xE3, /* 0x8C-0x8F */
+	0xE3, 0xE5, 0x9B, 0x5C, 0x9B, 0x5D, 0xED, 0xB3, /* 0x90-0x93 */
+	0xE3, 0xE6, 0x9B, 0x5E, 0x9B, 0x5F, 0x9B, 0x60, /* 0x94-0x97 */
+	0x9B, 0x61, 0xC9, 0xB3, 0x9B, 0x62, 0xC5, 0xE6, /* 0x98-0x9B */
+	0x9B, 0x63, 0x9B, 0x64, 0x9B, 0x65, 0xB9, 0xB5, /* 0x9C-0x9F */
+	0x9B, 0x66, 0xC3, 0xBB, 0x9B, 0x67, 0xE3, 0xE3, /* 0xA0-0xA3 */
+	0xC5, 0xBD, 0xC1, 0xA4, 0xC2, 0xD9, 0xB2, 0xD7, /* 0xA4-0xA7 */
+	0x9B, 0x68, 0xE3, 0xED, 0xBB, 0xA6, 0xC4, 0xAD, /* 0xA8-0xAB */
+	0x9B, 0x69, 0xE3, 0xF0, 0xBE, 0xDA, 0x9B, 0x6A, /* 0xAC-0xAF */
+	0x9B, 0x6B, 0xE3, 0xFB, 0xE3, 0xF5, 0xBA, 0xD3, /* 0xB0-0xB3 */
+	0x9B, 0x6C, 0x9B, 0x6D, 0x9B, 0x6E, 0x9B, 0x6F, /* 0xB4-0xB7 */
+	0xB7, 0xD0, 0xD3, 0xCD, 0x9B, 0x70, 0xD6, 0xCE, /* 0xB8-0xBB */
+	0xD5, 0xD3, 0xB9, 0xC1, 0xD5, 0xB4, 0xD1, 0xD8, /* 0xBC-0xBF */
+	0x9B, 0x71, 0x9B, 0x72, 0x9B, 0x73, 0x9B, 0x74, /* 0xC0-0xC3 */
+	0xD0, 0xB9, 0xC7, 0xF6, 0x9B, 0x75, 0x9B, 0x76, /* 0xC4-0xC7 */
+	0x9B, 0x77, 0xC8, 0xAA, 0xB2, 0xB4, 0x9B, 0x78, /* 0xC8-0xCB */
+	0xC3, 0xDA, 0x9B, 0x79, 0x9B, 0x7A, 0x9B, 0x7B, /* 0xCC-0xCF */
+	0xE3, 0xEE, 0x9B, 0x7C, 0x9B, 0x7D, 0xE3, 0xFC, /* 0xD0-0xD3 */
+	0xE3, 0xEF, 0xB7, 0xA8, 0xE3, 0xF7, 0xE3, 0xF4, /* 0xD4-0xD7 */
+	0x9B, 0x7E, 0x9B, 0x80, 0x9B, 0x81, 0xB7, 0xBA, /* 0xD8-0xDB */
+	0x9B, 0x82, 0x9B, 0x83, 0xC5, 0xA2, 0x9B, 0x84, /* 0xDC-0xDF */
+	0xE3, 0xF6, 0xC5, 0xDD, 0xB2, 0xA8, 0xC6, 0xFC, /* 0xE0-0xE3 */
+	0x9B, 0x85, 0xC4, 0xE0, 0x9B, 0x86, 0x9B, 0x87, /* 0xE4-0xE7 */
+	0xD7, 0xA2, 0x9B, 0x88, 0xC0, 0xE1, 0xE3, 0xF9, /* 0xE8-0xEB */
+	0x9B, 0x89, 0x9B, 0x8A, 0xE3, 0xFA, 0xE3, 0xFD, /* 0xEC-0xEF */
+	0xCC, 0xA9, 0xE3, 0xF3, 0x9B, 0x8B, 0xD3, 0xBE, /* 0xF0-0xF3 */
+	0x9B, 0x8C, 0xB1, 0xC3, 0xED, 0xB4, 0xE3, 0xF1, /* 0xF4-0xF7 */
+	0xE3, 0xF2, 0x9B, 0x8D, 0xE3, 0xF8, 0xD0, 0xBA, /* 0xF8-0xFB */
+	0xC6, 0xC3, 0xD4, 0xF3, 0xE3, 0xFE, 0x9B, 0x8E, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6D[512] = {
+	0x9B, 0x8F, 0xBD, 0xE0, 0x9B, 0x90, 0x9B, 0x91, /* 0x00-0x03 */
+	0xE4, 0xA7, 0x9B, 0x92, 0x9B, 0x93, 0xE4, 0xA6, /* 0x04-0x07 */
+	0x9B, 0x94, 0x9B, 0x95, 0x9B, 0x96, 0xD1, 0xF3, /* 0x08-0x0B */
+	0xE4, 0xA3, 0x9B, 0x97, 0xE4, 0xA9, 0x9B, 0x98, /* 0x0C-0x0F */
+	0x9B, 0x99, 0x9B, 0x9A, 0xC8, 0xF7, 0x9B, 0x9B, /* 0x10-0x13 */
+	0x9B, 0x9C, 0x9B, 0x9D, 0x9B, 0x9E, 0xCF, 0xB4, /* 0x14-0x17 */
+	0x9B, 0x9F, 0xE4, 0xA8, 0xE4, 0xAE, 0xC2, 0xE5, /* 0x18-0x1B */
+	0x9B, 0xA0, 0x9B, 0xA1, 0xB6, 0xB4, 0x9B, 0xA2, /* 0x1C-0x1F */
+	0x9B, 0xA3, 0x9B, 0xA4, 0x9B, 0xA5, 0x9B, 0xA6, /* 0x20-0x23 */
+	0x9B, 0xA7, 0xBD, 0xF2, 0x9B, 0xA8, 0xE4, 0xA2, /* 0x24-0x27 */
+	0x9B, 0xA9, 0x9B, 0xAA, 0xBA, 0xE9, 0xE4, 0xAA, /* 0x28-0x2B */
+	0x9B, 0xAB, 0x9B, 0xAC, 0xE4, 0xAC, 0x9B, 0xAD, /* 0x2C-0x2F */
+	0x9B, 0xAE, 0xB6, 0xFD, 0xD6, 0xDE, 0xE4, 0xB2, /* 0x30-0x33 */
+	0x9B, 0xAF, 0xE4, 0xAD, 0x9B, 0xB0, 0x9B, 0xB1, /* 0x34-0x37 */
+	0x9B, 0xB2, 0xE4, 0xA1, 0x9B, 0xB3, 0xBB, 0xEE, /* 0x38-0x3B */
+	0xCD, 0xDD, 0xC7, 0xA2, 0xC5, 0xC9, 0x9B, 0xB4, /* 0x3C-0x3F */
+	0x9B, 0xB5, 0xC1, 0xF7, 0x9B, 0xB6, 0xE4, 0xA4, /* 0x40-0x43 */
+	0x9B, 0xB7, 0xC7, 0xB3, 0xBD, 0xAC, 0xBD, 0xBD, /* 0x44-0x47 */
+	0xE4, 0xA5, 0x9B, 0xB8, 0xD7, 0xC7, 0xB2, 0xE2, /* 0x48-0x4B */
+	0x9B, 0xB9, 0xE4, 0xAB, 0xBC, 0xC3, 0xE4, 0xAF, /* 0x4C-0x4F */
+	0x9B, 0xBA, 0xBB, 0xEB, 0xE4, 0xB0, 0xC5, 0xA8, /* 0x50-0x53 */
+	0xE4, 0xB1, 0x9B, 0xBB, 0x9B, 0xBC, 0x9B, 0xBD, /* 0x54-0x57 */
+	0x9B, 0xBE, 0xD5, 0xE3, 0xBF, 0xA3, 0x9B, 0xBF, /* 0x58-0x5B */
+	0xE4, 0xBA, 0x9B, 0xC0, 0xE4, 0xB7, 0x9B, 0xC1, /* 0x5C-0x5F */
+	0xE4, 0xBB, 0x9B, 0xC2, 0x9B, 0xC3, 0xE4, 0xBD, /* 0x60-0x63 */
+	0x9B, 0xC4, 0x9B, 0xC5, 0xC6, 0xD6, 0x9B, 0xC6, /* 0x64-0x67 */
+	0x9B, 0xC7, 0xBA, 0xC6, 0xC0, 0xCB, 0x9B, 0xC8, /* 0x68-0x6B */
+	0x9B, 0xC9, 0x9B, 0xCA, 0xB8, 0xA1, 0xE4, 0xB4, /* 0x6C-0x6F */
+	0x9B, 0xCB, 0x9B, 0xCC, 0x9B, 0xCD, 0x9B, 0xCE, /* 0x70-0x73 */
+	0xD4, 0xA1, 0x9B, 0xCF, 0x9B, 0xD0, 0xBA, 0xA3, /* 0x74-0x77 */
+	0xBD, 0xFE, 0x9B, 0xD1, 0x9B, 0xD2, 0x9B, 0xD3, /* 0x78-0x7B */
+	0xE4, 0xBC, 0x9B, 0xD4, 0x9B, 0xD5, 0x9B, 0xD6, /* 0x7C-0x7F */
+	
+	0x9B, 0xD7, 0x9B, 0xD8, 0xCD, 0xBF, 0x9B, 0xD9, /* 0x80-0x83 */
+	0x9B, 0xDA, 0xC4, 0xF9, 0x9B, 0xDB, 0x9B, 0xDC, /* 0x84-0x87 */
+	0xCF, 0xFB, 0xC9, 0xE6, 0x9B, 0xDD, 0x9B, 0xDE, /* 0x88-0x8B */
+	0xD3, 0xBF, 0x9B, 0xDF, 0xCF, 0xD1, 0x9B, 0xE0, /* 0x8C-0x8F */
+	0x9B, 0xE1, 0xE4, 0xB3, 0x9B, 0xE2, 0xE4, 0xB8, /* 0x90-0x93 */
+	0xE4, 0xB9, 0xCC, 0xE9, 0x9B, 0xE3, 0x9B, 0xE4, /* 0x94-0x97 */
+	0x9B, 0xE5, 0x9B, 0xE6, 0x9B, 0xE7, 0xCC, 0xCE, /* 0x98-0x9B */
+	0x9B, 0xE8, 0xC0, 0xD4, 0xE4, 0xB5, 0xC1, 0xB0, /* 0x9C-0x9F */
+	0xE4, 0xB6, 0xCE, 0xD0, 0x9B, 0xE9, 0xBB, 0xC1, /* 0xA0-0xA3 */
+	0xB5, 0xD3, 0x9B, 0xEA, 0xC8, 0xF3, 0xBD, 0xA7, /* 0xA4-0xA7 */
+	0xD5, 0xC7, 0xC9, 0xAC, 0xB8, 0xA2, 0xE4, 0xCA, /* 0xA8-0xAB */
+	0x9B, 0xEB, 0x9B, 0xEC, 0xE4, 0xCC, 0xD1, 0xC4, /* 0xAC-0xAF */
+	0x9B, 0xED, 0x9B, 0xEE, 0xD2, 0xBA, 0x9B, 0xEF, /* 0xB0-0xB3 */
+	0x9B, 0xF0, 0xBA, 0xAD, 0x9B, 0xF1, 0x9B, 0xF2, /* 0xB4-0xB7 */
+	0xBA, 0xD4, 0x9B, 0xF3, 0x9B, 0xF4, 0x9B, 0xF5, /* 0xB8-0xBB */
+	0x9B, 0xF6, 0x9B, 0xF7, 0x9B, 0xF8, 0xE4, 0xC3, /* 0xBC-0xBF */
+	0xB5, 0xED, 0x9B, 0xF9, 0x9B, 0xFA, 0x9B, 0xFB, /* 0xC0-0xC3 */
+	0xD7, 0xCD, 0xE4, 0xC0, 0xCF, 0xFD, 0xE4, 0xBF, /* 0xC4-0xC7 */
+	0x9B, 0xFC, 0x9B, 0xFD, 0x9B, 0xFE, 0xC1, 0xDC, /* 0xC8-0xCB */
+	0xCC, 0xCA, 0x9C, 0x40, 0x9C, 0x41, 0x9C, 0x42, /* 0xCC-0xCF */
+	0x9C, 0x43, 0xCA, 0xE7, 0x9C, 0x44, 0x9C, 0x45, /* 0xD0-0xD3 */
+	0x9C, 0x46, 0x9C, 0x47, 0xC4, 0xD7, 0x9C, 0x48, /* 0xD4-0xD7 */
+	0xCC, 0xD4, 0xE4, 0xC8, 0x9C, 0x49, 0x9C, 0x4A, /* 0xD8-0xDB */
+	0x9C, 0x4B, 0xE4, 0xC7, 0xE4, 0xC1, 0x9C, 0x4C, /* 0xDC-0xDF */
+	0xE4, 0xC4, 0xB5, 0xAD, 0x9C, 0x4D, 0x9C, 0x4E, /* 0xE0-0xE3 */
+	0xD3, 0xD9, 0x9C, 0x4F, 0xE4, 0xC6, 0x9C, 0x50, /* 0xE4-0xE7 */
+	0x9C, 0x51, 0x9C, 0x52, 0x9C, 0x53, 0xD2, 0xF9, /* 0xE8-0xEB */
+	0xB4, 0xE3, 0x9C, 0x54, 0xBB, 0xB4, 0x9C, 0x55, /* 0xEC-0xEF */
+	0x9C, 0x56, 0xC9, 0xEE, 0x9C, 0x57, 0xB4, 0xBE, /* 0xF0-0xF3 */
+	0x9C, 0x58, 0x9C, 0x59, 0x9C, 0x5A, 0xBB, 0xEC, /* 0xF4-0xF7 */
+	0x9C, 0x5B, 0xD1, 0xCD, 0x9C, 0x5C, 0xCC, 0xED, /* 0xF8-0xFB */
+	0xED, 0xB5, 0x9C, 0x5D, 0x9C, 0x5E, 0x9C, 0x5F, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6E[512] = {
+	0x9C, 0x60, 0x9C, 0x61, 0x9C, 0x62, 0x9C, 0x63, /* 0x00-0x03 */
+	0x9C, 0x64, 0xC7, 0xE5, 0x9C, 0x65, 0x9C, 0x66, /* 0x04-0x07 */
+	0x9C, 0x67, 0x9C, 0x68, 0xD4, 0xA8, 0x9C, 0x69, /* 0x08-0x0B */
+	0xE4, 0xCB, 0xD7, 0xD5, 0xE4, 0xC2, 0x9C, 0x6A, /* 0x0C-0x0F */
+	0xBD, 0xA5, 0xE4, 0xC5, 0x9C, 0x6B, 0x9C, 0x6C, /* 0x10-0x13 */
+	0xD3, 0xE6, 0x9C, 0x6D, 0xE4, 0xC9, 0xC9, 0xF8, /* 0x14-0x17 */
+	0x9C, 0x6E, 0x9C, 0x6F, 0xE4, 0xBE, 0x9C, 0x70, /* 0x18-0x1B */
+	0x9C, 0x71, 0xD3, 0xE5, 0x9C, 0x72, 0x9C, 0x73, /* 0x1C-0x1F */
+	0xC7, 0xFE, 0xB6, 0xC9, 0x9C, 0x74, 0xD4, 0xFC, /* 0x20-0x23 */
+	0xB2, 0xB3, 0xE4, 0xD7, 0x9C, 0x75, 0x9C, 0x76, /* 0x24-0x27 */
+	0x9C, 0x77, 0xCE, 0xC2, 0x9C, 0x78, 0xE4, 0xCD, /* 0x28-0x2B */
+	0x9C, 0x79, 0xCE, 0xBC, 0x9C, 0x7A, 0xB8, 0xDB, /* 0x2C-0x2F */
+	0x9C, 0x7B, 0x9C, 0x7C, 0xE4, 0xD6, 0x9C, 0x7D, /* 0x30-0x33 */
+	0xBF, 0xCA, 0x9C, 0x7E, 0x9C, 0x80, 0x9C, 0x81, /* 0x34-0x37 */
+	0xD3, 0xCE, 0x9C, 0x82, 0xC3, 0xEC, 0x9C, 0x83, /* 0x38-0x3B */
+	0x9C, 0x84, 0x9C, 0x85, 0x9C, 0x86, 0x9C, 0x87, /* 0x3C-0x3F */
+	0x9C, 0x88, 0x9C, 0x89, 0x9C, 0x8A, 0xC5, 0xC8, /* 0x40-0x43 */
+	0xE4, 0xD8, 0x9C, 0x8B, 0x9C, 0x8C, 0x9C, 0x8D, /* 0x44-0x47 */
+	0x9C, 0x8E, 0x9C, 0x8F, 0x9C, 0x90, 0x9C, 0x91, /* 0x48-0x4B */
+	0x9C, 0x92, 0xCD, 0xC4, 0xE4, 0xCF, 0x9C, 0x93, /* 0x4C-0x4F */
+	0x9C, 0x94, 0x9C, 0x95, 0x9C, 0x96, 0xE4, 0xD4, /* 0x50-0x53 */
+	0xE4, 0xD5, 0x9C, 0x97, 0xBA, 0xFE, 0x9C, 0x98, /* 0x54-0x57 */
+	0xCF, 0xE6, 0x9C, 0x99, 0x9C, 0x9A, 0xD5, 0xBF, /* 0x58-0x5B */
+	0x9C, 0x9B, 0x9C, 0x9C, 0x9C, 0x9D, 0xE4, 0xD2, /* 0x5C-0x5F */
+	0x9C, 0x9E, 0x9C, 0x9F, 0x9C, 0xA0, 0x9C, 0xA1, /* 0x60-0x63 */
+	0x9C, 0xA2, 0x9C, 0xA3, 0x9C, 0xA4, 0x9C, 0xA5, /* 0x64-0x67 */
+	0x9C, 0xA6, 0x9C, 0xA7, 0x9C, 0xA8, 0xE4, 0xD0, /* 0x68-0x6B */
+	0x9C, 0xA9, 0x9C, 0xAA, 0xE4, 0xCE, 0x9C, 0xAB, /* 0x6C-0x6F */
+	0x9C, 0xAC, 0x9C, 0xAD, 0x9C, 0xAE, 0x9C, 0xAF, /* 0x70-0x73 */
+	0x9C, 0xB0, 0x9C, 0xB1, 0x9C, 0xB2, 0x9C, 0xB3, /* 0x74-0x77 */
+	0x9C, 0xB4, 0x9C, 0xB5, 0x9C, 0xB6, 0x9C, 0xB7, /* 0x78-0x7B */
+	0x9C, 0xB8, 0x9C, 0xB9, 0xCD, 0xE5, 0xCA, 0xAA, /* 0x7C-0x7F */
+	
+	0x9C, 0xBA, 0x9C, 0xBB, 0x9C, 0xBC, 0xC0, 0xA3, /* 0x80-0x83 */
+	0x9C, 0xBD, 0xBD, 0xA6, 0xE4, 0xD3, 0x9C, 0xBE, /* 0x84-0x87 */
+	0x9C, 0xBF, 0xB8, 0xC8, 0x9C, 0xC0, 0x9C, 0xC1, /* 0x88-0x8B */
+	0x9C, 0xC2, 0x9C, 0xC3, 0x9C, 0xC4, 0xE4, 0xE7, /* 0x8C-0x8F */
+	0xD4, 0xB4, 0x9C, 0xC5, 0x9C, 0xC6, 0x9C, 0xC7, /* 0x90-0x93 */
+	0x9C, 0xC8, 0x9C, 0xC9, 0x9C, 0xCA, 0x9C, 0xCB, /* 0x94-0x97 */
+	0xE4, 0xDB, 0x9C, 0xCC, 0x9C, 0xCD, 0x9C, 0xCE, /* 0x98-0x9B */
+	0xC1, 0xEF, 0x9C, 0xCF, 0x9C, 0xD0, 0xE4, 0xE9, /* 0x9C-0x9F */
+	0x9C, 0xD1, 0x9C, 0xD2, 0xD2, 0xE7, 0x9C, 0xD3, /* 0xA0-0xA3 */
+	0x9C, 0xD4, 0xE4, 0xDF, 0x9C, 0xD5, 0xE4, 0xE0, /* 0xA4-0xA7 */
+	0x9C, 0xD6, 0x9C, 0xD7, 0xCF, 0xAA, 0x9C, 0xD8, /* 0xA8-0xAB */
+	0x9C, 0xD9, 0x9C, 0xDA, 0x9C, 0xDB, 0xCB, 0xDD, /* 0xAC-0xAF */
+	0x9C, 0xDC, 0xE4, 0xDA, 0xE4, 0xD1, 0x9C, 0xDD, /* 0xB0-0xB3 */
+	0xE4, 0xE5, 0x9C, 0xDE, 0xC8, 0xDC, 0xE4, 0xE3, /* 0xB4-0xB7 */
+	0x9C, 0xDF, 0x9C, 0xE0, 0xC4, 0xE7, 0xE4, 0xE2, /* 0xB8-0xBB */
+	0x9C, 0xE1, 0xE4, 0xE1, 0x9C, 0xE2, 0x9C, 0xE3, /* 0xBC-0xBF */
+	0x9C, 0xE4, 0xB3, 0xFC, 0xE4, 0xE8, 0x9C, 0xE5, /* 0xC0-0xC3 */
+	0x9C, 0xE6, 0x9C, 0xE7, 0x9C, 0xE8, 0xB5, 0xE1, /* 0xC4-0xC7 */
+	0x9C, 0xE9, 0x9C, 0xEA, 0x9C, 0xEB, 0xD7, 0xCC, /* 0xC8-0xCB */
+	0x9C, 0xEC, 0x9C, 0xED, 0x9C, 0xEE, 0xE4, 0xE6, /* 0xCC-0xCF */
+	0x9C, 0xEF, 0xBB, 0xAC, 0x9C, 0xF0, 0xD7, 0xD2, /* 0xD0-0xD3 */
+	0xCC, 0xCF, 0xEB, 0xF8, 0x9C, 0xF1, 0xE4, 0xE4, /* 0xD4-0xD7 */
+	0x9C, 0xF2, 0x9C, 0xF3, 0xB9, 0xF6, 0x9C, 0xF4, /* 0xD8-0xDB */
+	0x9C, 0xF5, 0x9C, 0xF6, 0xD6, 0xCD, 0xE4, 0xD9, /* 0xDC-0xDF */
+	0xE4, 0xDC, 0xC2, 0xFA, 0xE4, 0xDE, 0x9C, 0xF7, /* 0xE0-0xE3 */
+	0xC2, 0xCB, 0xC0, 0xC4, 0xC2, 0xD0, 0x9C, 0xF8, /* 0xE4-0xE7 */
+	0xB1, 0xF5, 0xCC, 0xB2, 0x9C, 0xF9, 0x9C, 0xFA, /* 0xE8-0xEB */
+	0x9C, 0xFB, 0x9C, 0xFC, 0x9C, 0xFD, 0x9C, 0xFE, /* 0xEC-0xEF */
+	0x9D, 0x40, 0x9D, 0x41, 0x9D, 0x42, 0x9D, 0x43, /* 0xF0-0xF3 */
+	0xB5, 0xCE, 0x9D, 0x44, 0x9D, 0x45, 0x9D, 0x46, /* 0xF4-0xF7 */
+	0x9D, 0x47, 0xE4, 0xEF, 0x9D, 0x48, 0x9D, 0x49, /* 0xF8-0xFB */
+	0x9D, 0x4A, 0x9D, 0x4B, 0x9D, 0x4C, 0x9D, 0x4D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6F[512] = {
+	0x9D, 0x4E, 0x9D, 0x4F, 0xC6, 0xAF, 0x9D, 0x50, /* 0x00-0x03 */
+	0x9D, 0x51, 0x9D, 0x52, 0xC6, 0xE1, 0x9D, 0x53, /* 0x04-0x07 */
+	0x9D, 0x54, 0xE4, 0xF5, 0x9D, 0x55, 0x9D, 0x56, /* 0x08-0x0B */
+	0x9D, 0x57, 0x9D, 0x58, 0x9D, 0x59, 0xC2, 0xA9, /* 0x0C-0x0F */
+	0x9D, 0x5A, 0x9D, 0x5B, 0x9D, 0x5C, 0xC0, 0xEC, /* 0x10-0x13 */
+	0xD1, 0xDD, 0xE4, 0xEE, 0x9D, 0x5D, 0x9D, 0x5E, /* 0x14-0x17 */
+	0x9D, 0x5F, 0x9D, 0x60, 0x9D, 0x61, 0x9D, 0x62, /* 0x18-0x1B */
+	0x9D, 0x63, 0x9D, 0x64, 0x9D, 0x65, 0x9D, 0x66, /* 0x1C-0x1F */
+	0xC4, 0xAE, 0x9D, 0x67, 0x9D, 0x68, 0x9D, 0x69, /* 0x20-0x23 */
+	0xE4, 0xED, 0x9D, 0x6A, 0x9D, 0x6B, 0x9D, 0x6C, /* 0x24-0x27 */
+	0x9D, 0x6D, 0xE4, 0xF6, 0xE4, 0xF4, 0xC2, 0xFE, /* 0x28-0x2B */
+	0x9D, 0x6E, 0xE4, 0xDD, 0x9D, 0x6F, 0xE4, 0xF0, /* 0x2C-0x2F */
+	0x9D, 0x70, 0xCA, 0xFE, 0x9D, 0x71, 0xD5, 0xC4, /* 0x30-0x33 */
+	0x9D, 0x72, 0x9D, 0x73, 0xE4, 0xF1, 0x9D, 0x74, /* 0x34-0x37 */
+	0x9D, 0x75, 0x9D, 0x76, 0x9D, 0x77, 0x9D, 0x78, /* 0x38-0x3B */
+	0x9D, 0x79, 0x9D, 0x7A, 0xD1, 0xFA, 0x9D, 0x7B, /* 0x3C-0x3F */
+	0x9D, 0x7C, 0x9D, 0x7D, 0x9D, 0x7E, 0x9D, 0x80, /* 0x40-0x43 */
+	0x9D, 0x81, 0x9D, 0x82, 0xE4, 0xEB, 0xE4, 0xEC, /* 0x44-0x47 */
+	0x9D, 0x83, 0x9D, 0x84, 0x9D, 0x85, 0xE4, 0xF2, /* 0x48-0x4B */
+	0x9D, 0x86, 0xCE, 0xAB, 0x9D, 0x87, 0x9D, 0x88, /* 0x4C-0x4F */
+	0x9D, 0x89, 0x9D, 0x8A, 0x9D, 0x8B, 0x9D, 0x8C, /* 0x50-0x53 */
+	0x9D, 0x8D, 0x9D, 0x8E, 0x9D, 0x8F, 0x9D, 0x90, /* 0x54-0x57 */
+	0xC5, 0xCB, 0x9D, 0x91, 0x9D, 0x92, 0x9D, 0x93, /* 0x58-0x5B */
+	0xC7, 0xB1, 0x9D, 0x94, 0xC2, 0xBA, 0x9D, 0x95, /* 0x5C-0x5F */
+	0x9D, 0x96, 0x9D, 0x97, 0xE4, 0xEA, 0x9D, 0x98, /* 0x60-0x63 */
+	0x9D, 0x99, 0x9D, 0x9A, 0xC1, 0xCA, 0x9D, 0x9B, /* 0x64-0x67 */
+	0x9D, 0x9C, 0x9D, 0x9D, 0x9D, 0x9E, 0x9D, 0x9F, /* 0x68-0x6B */
+	0x9D, 0xA0, 0xCC, 0xB6, 0xB3, 0xB1, 0x9D, 0xA1, /* 0x6C-0x6F */
+	0x9D, 0xA2, 0x9D, 0xA3, 0xE4, 0xFB, 0x9D, 0xA4, /* 0x70-0x73 */
+	0xE4, 0xF3, 0x9D, 0xA5, 0x9D, 0xA6, 0x9D, 0xA7, /* 0x74-0x77 */
+	0xE4, 0xFA, 0x9D, 0xA8, 0xE4, 0xFD, 0x9D, 0xA9, /* 0x78-0x7B */
+	0xE4, 0xFC, 0x9D, 0xAA, 0x9D, 0xAB, 0x9D, 0xAC, /* 0x7C-0x7F */
+	
+	0x9D, 0xAD, 0x9D, 0xAE, 0x9D, 0xAF, 0x9D, 0xB0, /* 0x80-0x83 */
+	0xB3, 0xCE, 0x9D, 0xB1, 0x9D, 0xB2, 0x9D, 0xB3, /* 0x84-0x87 */
+	0xB3, 0xBA, 0xE4, 0xF7, 0x9D, 0xB4, 0x9D, 0xB5, /* 0x88-0x8B */
+	0xE4, 0xF9, 0xE4, 0xF8, 0xC5, 0xEC, 0x9D, 0xB6, /* 0x8C-0x8F */
+	0x9D, 0xB7, 0x9D, 0xB8, 0x9D, 0xB9, 0x9D, 0xBA, /* 0x90-0x93 */
+	0x9D, 0xBB, 0x9D, 0xBC, 0x9D, 0xBD, 0x9D, 0xBE, /* 0x94-0x97 */
+	0x9D, 0xBF, 0x9D, 0xC0, 0x9D, 0xC1, 0x9D, 0xC2, /* 0x98-0x9B */
+	0xC0, 0xBD, 0x9D, 0xC3, 0x9D, 0xC4, 0x9D, 0xC5, /* 0x9C-0x9F */
+	0x9D, 0xC6, 0xD4, 0xE8, 0x9D, 0xC7, 0x9D, 0xC8, /* 0xA0-0xA3 */
+	0x9D, 0xC9, 0x9D, 0xCA, 0x9D, 0xCB, 0xE5, 0xA2, /* 0xA4-0xA7 */
+	0x9D, 0xCC, 0x9D, 0xCD, 0x9D, 0xCE, 0x9D, 0xCF, /* 0xA8-0xAB */
+	0x9D, 0xD0, 0x9D, 0xD1, 0x9D, 0xD2, 0x9D, 0xD3, /* 0xAC-0xAF */
+	0x9D, 0xD4, 0x9D, 0xD5, 0x9D, 0xD6, 0xB0, 0xC4, /* 0xB0-0xB3 */
+	0x9D, 0xD7, 0x9D, 0xD8, 0xE5, 0xA4, 0x9D, 0xD9, /* 0xB4-0xB7 */
+	0x9D, 0xDA, 0xE5, 0xA3, 0x9D, 0xDB, 0x9D, 0xDC, /* 0xB8-0xBB */
+	0x9D, 0xDD, 0x9D, 0xDE, 0x9D, 0xDF, 0x9D, 0xE0, /* 0xBC-0xBF */
+	0xBC, 0xA4, 0x9D, 0xE1, 0xE5, 0xA5, 0x9D, 0xE2, /* 0xC0-0xC3 */
+	0x9D, 0xE3, 0x9D, 0xE4, 0x9D, 0xE5, 0x9D, 0xE6, /* 0xC4-0xC7 */
+	0x9D, 0xE7, 0xE5, 0xA1, 0x9D, 0xE8, 0x9D, 0xE9, /* 0xC8-0xCB */
+	0x9D, 0xEA, 0x9D, 0xEB, 0x9D, 0xEC, 0x9D, 0xED, /* 0xCC-0xCF */
+	0x9D, 0xEE, 0xE4, 0xFE, 0xB1, 0xF4, 0x9D, 0xEF, /* 0xD0-0xD3 */
+	0x9D, 0xF0, 0x9D, 0xF1, 0x9D, 0xF2, 0x9D, 0xF3, /* 0xD4-0xD7 */
+	0x9D, 0xF4, 0x9D, 0xF5, 0x9D, 0xF6, 0x9D, 0xF7, /* 0xD8-0xDB */
+	0x9D, 0xF8, 0x9D, 0xF9, 0xE5, 0xA8, 0x9D, 0xFA, /* 0xDC-0xDF */
+	0xE5, 0xA9, 0xE5, 0xA6, 0x9D, 0xFB, 0x9D, 0xFC, /* 0xE0-0xE3 */
+	0x9D, 0xFD, 0x9D, 0xFE, 0x9E, 0x40, 0x9E, 0x41, /* 0xE4-0xE7 */
+	0x9E, 0x42, 0x9E, 0x43, 0x9E, 0x44, 0x9E, 0x45, /* 0xE8-0xEB */
+	0x9E, 0x46, 0x9E, 0x47, 0xE5, 0xA7, 0xE5, 0xAA, /* 0xEC-0xEF */
+	0x9E, 0x48, 0x9E, 0x49, 0x9E, 0x4A, 0x9E, 0x4B, /* 0xF0-0xF3 */
+	0x9E, 0x4C, 0x9E, 0x4D, 0x9E, 0x4E, 0x9E, 0x4F, /* 0xF4-0xF7 */
+	0x9E, 0x50, 0x9E, 0x51, 0x9E, 0x52, 0x9E, 0x53, /* 0xF8-0xFB */
+	0x9E, 0x54, 0x9E, 0x55, 0x9E, 0x56, 0x9E, 0x57, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_70[512] = {
+	0x9E, 0x58, 0x9E, 0x59, 0x9E, 0x5A, 0x9E, 0x5B, /* 0x00-0x03 */
+	0x9E, 0x5C, 0x9E, 0x5D, 0x9E, 0x5E, 0x9E, 0x5F, /* 0x04-0x07 */
+	0x9E, 0x60, 0x9E, 0x61, 0x9E, 0x62, 0x9E, 0x63, /* 0x08-0x0B */
+	0x9E, 0x64, 0x9E, 0x65, 0x9E, 0x66, 0x9E, 0x67, /* 0x0C-0x0F */
+	0x9E, 0x68, 0xC6, 0xD9, 0x9E, 0x69, 0x9E, 0x6A, /* 0x10-0x13 */
+	0x9E, 0x6B, 0x9E, 0x6C, 0x9E, 0x6D, 0x9E, 0x6E, /* 0x14-0x17 */
+	0x9E, 0x6F, 0x9E, 0x70, 0xE5, 0xAB, 0xE5, 0xAD, /* 0x18-0x1B */
+	0x9E, 0x71, 0x9E, 0x72, 0x9E, 0x73, 0x9E, 0x74, /* 0x1C-0x1F */
+	0x9E, 0x75, 0x9E, 0x76, 0x9E, 0x77, 0xE5, 0xAC, /* 0x20-0x23 */
+	0x9E, 0x78, 0x9E, 0x79, 0x9E, 0x7A, 0x9E, 0x7B, /* 0x24-0x27 */
+	0x9E, 0x7C, 0x9E, 0x7D, 0x9E, 0x7E, 0x9E, 0x80, /* 0x28-0x2B */
+	0x9E, 0x81, 0x9E, 0x82, 0x9E, 0x83, 0x9E, 0x84, /* 0x2C-0x2F */
+	0x9E, 0x85, 0x9E, 0x86, 0x9E, 0x87, 0x9E, 0x88, /* 0x30-0x33 */
+	0x9E, 0x89, 0xE5, 0xAF, 0x9E, 0x8A, 0x9E, 0x8B, /* 0x34-0x37 */
+	0x9E, 0x8C, 0xE5, 0xAE, 0x9E, 0x8D, 0x9E, 0x8E, /* 0x38-0x3B */
+	0x9E, 0x8F, 0x9E, 0x90, 0x9E, 0x91, 0x9E, 0x92, /* 0x3C-0x3F */
+	0x9E, 0x93, 0x9E, 0x94, 0x9E, 0x95, 0x9E, 0x96, /* 0x40-0x43 */
+	0x9E, 0x97, 0x9E, 0x98, 0x9E, 0x99, 0x9E, 0x9A, /* 0x44-0x47 */
+	0x9E, 0x9B, 0x9E, 0x9C, 0x9E, 0x9D, 0x9E, 0x9E, /* 0x48-0x4B */
+	0xB9, 0xE0, 0x9E, 0x9F, 0x9E, 0xA0, 0xE5, 0xB0, /* 0x4C-0x4F */
+	0x9E, 0xA1, 0x9E, 0xA2, 0x9E, 0xA3, 0x9E, 0xA4, /* 0x50-0x53 */
+	0x9E, 0xA5, 0x9E, 0xA6, 0x9E, 0xA7, 0x9E, 0xA8, /* 0x54-0x57 */
+	0x9E, 0xA9, 0x9E, 0xAA, 0x9E, 0xAB, 0x9E, 0xAC, /* 0x58-0x5B */
+	0x9E, 0xAD, 0x9E, 0xAE, 0xE5, 0xB1, 0x9E, 0xAF, /* 0x5C-0x5F */
+	0x9E, 0xB0, 0x9E, 0xB1, 0x9E, 0xB2, 0x9E, 0xB3, /* 0x60-0x63 */
+	0x9E, 0xB4, 0x9E, 0xB5, 0x9E, 0xB6, 0x9E, 0xB7, /* 0x64-0x67 */
+	0x9E, 0xB8, 0x9E, 0xB9, 0x9E, 0xBA, 0xBB, 0xF0, /* 0x68-0x6B */
+	0xEC, 0xE1, 0xC3, 0xF0, 0x9E, 0xBB, 0xB5, 0xC6, /* 0x6C-0x6F */
+	0xBB, 0xD2, 0x9E, 0xBC, 0x9E, 0xBD, 0x9E, 0xBE, /* 0x70-0x73 */
+	0x9E, 0xBF, 0xC1, 0xE9, 0xD4, 0xEE, 0x9E, 0xC0, /* 0x74-0x77 */
+	0xBE, 0xC4, 0x9E, 0xC1, 0x9E, 0xC2, 0x9E, 0xC3, /* 0x78-0x7B */
+	0xD7, 0xC6, 0x9E, 0xC4, 0xD4, 0xD6, 0xB2, 0xD3, /* 0x7C-0x7F */
+	
+	0xEC, 0xBE, 0x9E, 0xC5, 0x9E, 0xC6, 0x9E, 0xC7, /* 0x80-0x83 */
+	0x9E, 0xC8, 0xEA, 0xC1, 0x9E, 0xC9, 0x9E, 0xCA, /* 0x84-0x87 */
+	0x9E, 0xCB, 0xC2, 0xAF, 0xB4, 0xB6, 0x9E, 0xCC, /* 0x88-0x8B */
+	0x9E, 0xCD, 0x9E, 0xCE, 0xD1, 0xD7, 0x9E, 0xCF, /* 0x8C-0x8F */
+	0x9E, 0xD0, 0x9E, 0xD1, 0xB3, 0xB4, 0x9E, 0xD2, /* 0x90-0x93 */
+	0xC8, 0xB2, 0xBF, 0xBB, 0xEC, 0xC0, 0x9E, 0xD3, /* 0x94-0x97 */
+	0x9E, 0xD4, 0xD6, 0xCB, 0x9E, 0xD5, 0x9E, 0xD6, /* 0x98-0x9B */
+	0xEC, 0xBF, 0xEC, 0xC1, 0x9E, 0xD7, 0x9E, 0xD8, /* 0x9C-0x9F */
+	0x9E, 0xD9, 0x9E, 0xDA, 0x9E, 0xDB, 0x9E, 0xDC, /* 0xA0-0xA3 */
+	0x9E, 0xDD, 0x9E, 0xDE, 0x9E, 0xDF, 0x9E, 0xE0, /* 0xA4-0xA7 */
+	0x9E, 0xE1, 0x9E, 0xE2, 0x9E, 0xE3, 0xEC, 0xC5, /* 0xA8-0xAB */
+	0xBE, 0xE6, 0xCC, 0xBF, 0xC5, 0xDA, 0xBE, 0xBC, /* 0xAC-0xAF */
+	0x9E, 0xE4, 0xEC, 0xC6, 0x9E, 0xE5, 0xB1, 0xFE, /* 0xB0-0xB3 */
+	0x9E, 0xE6, 0x9E, 0xE7, 0x9E, 0xE8, 0xEC, 0xC4, /* 0xB4-0xB7 */
+	0xD5, 0xA8, 0xB5, 0xE3, 0x9E, 0xE9, 0xEC, 0xC2, /* 0xB8-0xBB */
+	0xC1, 0xB6, 0xB3, 0xE3, 0x9E, 0xEA, 0x9E, 0xEB, /* 0xBC-0xBF */
+	0xEC, 0xC3, 0xCB, 0xB8, 0xC0, 0xC3, 0xCC, 0xFE, /* 0xC0-0xC3 */
+	0x9E, 0xEC, 0x9E, 0xED, 0x9E, 0xEE, 0x9E, 0xEF, /* 0xC4-0xC7 */
+	0xC1, 0xD2, 0x9E, 0xF0, 0xEC, 0xC8, 0x9E, 0xF1, /* 0xC8-0xCB */
+	0x9E, 0xF2, 0x9E, 0xF3, 0x9E, 0xF4, 0x9E, 0xF5, /* 0xCC-0xCF */
+	0x9E, 0xF6, 0x9E, 0xF7, 0x9E, 0xF8, 0x9E, 0xF9, /* 0xD0-0xD3 */
+	0x9E, 0xFA, 0x9E, 0xFB, 0x9E, 0xFC, 0x9E, 0xFD, /* 0xD4-0xD7 */
+	0xBA, 0xE6, 0xC0, 0xD3, 0x9E, 0xFE, 0xD6, 0xF2, /* 0xD8-0xDB */
+	0x9F, 0x40, 0x9F, 0x41, 0x9F, 0x42, 0xD1, 0xCC, /* 0xDC-0xDF */
+	0x9F, 0x43, 0x9F, 0x44, 0x9F, 0x45, 0x9F, 0x46, /* 0xE0-0xE3 */
+	0xBF, 0xBE, 0x9F, 0x47, 0xB7, 0xB3, 0xC9, 0xD5, /* 0xE4-0xE7 */
+	0xEC, 0xC7, 0xBB, 0xE2, 0x9F, 0x48, 0xCC, 0xCC, /* 0xE8-0xEB */
+	0xBD, 0xFD, 0xC8, 0xC8, 0x9F, 0x49, 0xCF, 0xA9, /* 0xEC-0xEF */
+	0x9F, 0x4A, 0x9F, 0x4B, 0x9F, 0x4C, 0x9F, 0x4D, /* 0xF0-0xF3 */
+	0x9F, 0x4E, 0x9F, 0x4F, 0x9F, 0x50, 0xCD, 0xE9, /* 0xF4-0xF7 */
+	0x9F, 0x51, 0xC5, 0xEB, 0x9F, 0x52, 0x9F, 0x53, /* 0xF8-0xFB */
+	0x9F, 0x54, 0xB7, 0xE9, 0x9F, 0x55, 0x9F, 0x56, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_71[512] = {
+	0x9F, 0x57, 0x9F, 0x58, 0x9F, 0x59, 0x9F, 0x5A, /* 0x00-0x03 */
+	0x9F, 0x5B, 0x9F, 0x5C, 0x9F, 0x5D, 0x9F, 0x5E, /* 0x04-0x07 */
+	0x9F, 0x5F, 0xD1, 0xC9, 0xBA, 0xB8, 0x9F, 0x60, /* 0x08-0x0B */
+	0x9F, 0x61, 0x9F, 0x62, 0x9F, 0x63, 0x9F, 0x64, /* 0x0C-0x0F */
+	0xEC, 0xC9, 0x9F, 0x65, 0x9F, 0x66, 0xEC, 0xCA, /* 0x10-0x13 */
+	0x9F, 0x67, 0xBB, 0xC0, 0xEC, 0xCB, 0x9F, 0x68, /* 0x14-0x17 */
+	0xEC, 0xE2, 0xB1, 0xBA, 0xB7, 0xD9, 0x9F, 0x69, /* 0x18-0x1B */
+	0x9F, 0x6A, 0x9F, 0x6B, 0x9F, 0x6C, 0x9F, 0x6D, /* 0x1C-0x1F */
+	0x9F, 0x6E, 0x9F, 0x6F, 0x9F, 0x70, 0x9F, 0x71, /* 0x20-0x23 */
+	0x9F, 0x72, 0x9F, 0x73, 0xBD, 0xB9, 0x9F, 0x74, /* 0x24-0x27 */
+	0x9F, 0x75, 0x9F, 0x76, 0x9F, 0x77, 0x9F, 0x78, /* 0x28-0x2B */
+	0x9F, 0x79, 0x9F, 0x7A, 0x9F, 0x7B, 0xEC, 0xCC, /* 0x2C-0x2F */
+	0xD1, 0xE6, 0xEC, 0xCD, 0x9F, 0x7C, 0x9F, 0x7D, /* 0x30-0x33 */
+	0x9F, 0x7E, 0x9F, 0x80, 0xC8, 0xBB, 0x9F, 0x81, /* 0x34-0x37 */
+	0x9F, 0x82, 0x9F, 0x83, 0x9F, 0x84, 0x9F, 0x85, /* 0x38-0x3B */
+	0x9F, 0x86, 0x9F, 0x87, 0x9F, 0x88, 0x9F, 0x89, /* 0x3C-0x3F */
+	0x9F, 0x8A, 0x9F, 0x8B, 0x9F, 0x8C, 0x9F, 0x8D, /* 0x40-0x43 */
+	0x9F, 0x8E, 0xEC, 0xD1, 0x9F, 0x8F, 0x9F, 0x90, /* 0x44-0x47 */
+	0x9F, 0x91, 0x9F, 0x92, 0xEC, 0xD3, 0x9F, 0x93, /* 0x48-0x4B */
+	0xBB, 0xCD, 0x9F, 0x94, 0xBC, 0xE5, 0x9F, 0x95, /* 0x4C-0x4F */
+	0x9F, 0x96, 0x9F, 0x97, 0x9F, 0x98, 0x9F, 0x99, /* 0x50-0x53 */
+	0x9F, 0x9A, 0x9F, 0x9B, 0x9F, 0x9C, 0x9F, 0x9D, /* 0x54-0x57 */
+	0x9F, 0x9E, 0x9F, 0x9F, 0x9F, 0xA0, 0x9F, 0xA1, /* 0x58-0x5B */
+	0xEC, 0xCF, 0x9F, 0xA2, 0xC9, 0xB7, 0x9F, 0xA3, /* 0x5C-0x5F */
+	0x9F, 0xA4, 0x9F, 0xA5, 0x9F, 0xA6, 0x9F, 0xA7, /* 0x60-0x63 */
+	0xC3, 0xBA, 0x9F, 0xA8, 0xEC, 0xE3, 0xD5, 0xD5, /* 0x64-0x67 */
+	0xEC, 0xD0, 0x9F, 0xA9, 0x9F, 0xAA, 0x9F, 0xAB, /* 0x68-0x6B */
+	0x9F, 0xAC, 0x9F, 0xAD, 0xD6, 0xF3, 0x9F, 0xAE, /* 0x6C-0x6F */
+	0x9F, 0xAF, 0x9F, 0xB0, 0xEC, 0xD2, 0xEC, 0xCE, /* 0x70-0x73 */
+	0x9F, 0xB1, 0x9F, 0xB2, 0x9F, 0xB3, 0x9F, 0xB4, /* 0x74-0x77 */
+	0xEC, 0xD4, 0x9F, 0xB5, 0xEC, 0xD5, 0x9F, 0xB6, /* 0x78-0x7B */
+	0x9F, 0xB7, 0xC9, 0xBF, 0x9F, 0xB8, 0x9F, 0xB9, /* 0x7C-0x7F */
+	
+	0x9F, 0xBA, 0x9F, 0xBB, 0x9F, 0xBC, 0x9F, 0xBD, /* 0x80-0x83 */
+	0xCF, 0xA8, 0x9F, 0xBE, 0x9F, 0xBF, 0x9F, 0xC0, /* 0x84-0x87 */
+	0x9F, 0xC1, 0x9F, 0xC2, 0xD0, 0xDC, 0x9F, 0xC3, /* 0x88-0x8B */
+	0x9F, 0xC4, 0x9F, 0xC5, 0x9F, 0xC6, 0xD1, 0xAC, /* 0x8C-0x8F */
+	0x9F, 0xC7, 0x9F, 0xC8, 0x9F, 0xC9, 0x9F, 0xCA, /* 0x90-0x93 */
+	0xC8, 0xDB, 0x9F, 0xCB, 0x9F, 0xCC, 0x9F, 0xCD, /* 0x94-0x97 */
+	0xEC, 0xD6, 0xCE, 0xF5, 0x9F, 0xCE, 0x9F, 0xCF, /* 0x98-0x9B */
+	0x9F, 0xD0, 0x9F, 0xD1, 0x9F, 0xD2, 0xCA, 0xEC, /* 0x9C-0x9F */
+	0xEC, 0xDA, 0x9F, 0xD3, 0x9F, 0xD4, 0x9F, 0xD5, /* 0xA0-0xA3 */
+	0x9F, 0xD6, 0x9F, 0xD7, 0x9F, 0xD8, 0x9F, 0xD9, /* 0xA4-0xA7 */
+	0xEC, 0xD9, 0x9F, 0xDA, 0x9F, 0xDB, 0x9F, 0xDC, /* 0xA8-0xAB */
+	0xB0, 0xBE, 0x9F, 0xDD, 0x9F, 0xDE, 0x9F, 0xDF, /* 0xAC-0xAF */
+	0x9F, 0xE0, 0x9F, 0xE1, 0x9F, 0xE2, 0xEC, 0xD7, /* 0xB0-0xB3 */
+	0x9F, 0xE3, 0xEC, 0xD8, 0x9F, 0xE4, 0x9F, 0xE5, /* 0xB4-0xB7 */
+	0x9F, 0xE6, 0xEC, 0xE4, 0x9F, 0xE7, 0x9F, 0xE8, /* 0xB8-0xBB */
+	0x9F, 0xE9, 0x9F, 0xEA, 0x9F, 0xEB, 0x9F, 0xEC, /* 0xBC-0xBF */
+	0x9F, 0xED, 0x9F, 0xEE, 0x9F, 0xEF, 0xC8, 0xBC, /* 0xC0-0xC3 */
+	0x9F, 0xF0, 0x9F, 0xF1, 0x9F, 0xF2, 0x9F, 0xF3, /* 0xC4-0xC7 */
+	0x9F, 0xF4, 0x9F, 0xF5, 0x9F, 0xF6, 0x9F, 0xF7, /* 0xC8-0xCB */
+	0x9F, 0xF8, 0x9F, 0xF9, 0xC1, 0xC7, 0x9F, 0xFA, /* 0xCC-0xCF */
+	0x9F, 0xFB, 0x9F, 0xFC, 0x9F, 0xFD, 0x9F, 0xFE, /* 0xD0-0xD3 */
+	0xEC, 0xDC, 0xD1, 0xE0, 0xA0, 0x40, 0xA0, 0x41, /* 0xD4-0xD7 */
+	0xA0, 0x42, 0xA0, 0x43, 0xA0, 0x44, 0xA0, 0x45, /* 0xD8-0xDB */
+	0xA0, 0x46, 0xA0, 0x47, 0xA0, 0x48, 0xA0, 0x49, /* 0xDC-0xDF */
+	0xEC, 0xDB, 0xA0, 0x4A, 0xA0, 0x4B, 0xA0, 0x4C, /* 0xE0-0xE3 */
+	0xA0, 0x4D, 0xD4, 0xEF, 0xA0, 0x4E, 0xEC, 0xDD, /* 0xE4-0xE7 */
+	0xA0, 0x4F, 0xA0, 0x50, 0xA0, 0x51, 0xA0, 0x52, /* 0xE8-0xEB */
+	0xA0, 0x53, 0xA0, 0x54, 0xDB, 0xC6, 0xA0, 0x55, /* 0xEC-0xEF */
+	0xA0, 0x56, 0xA0, 0x57, 0xA0, 0x58, 0xA0, 0x59, /* 0xF0-0xF3 */
+	0xA0, 0x5A, 0xA0, 0x5B, 0xA0, 0x5C, 0xA0, 0x5D, /* 0xF4-0xF7 */
+	0xA0, 0x5E, 0xEC, 0xDE, 0xA0, 0x5F, 0xA0, 0x60, /* 0xF8-0xFB */
+	0xA0, 0x61, 0xA0, 0x62, 0xA0, 0x63, 0xA0, 0x64, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_72[512] = {
+	0xA0, 0x65, 0xA0, 0x66, 0xA0, 0x67, 0xA0, 0x68, /* 0x00-0x03 */
+	0xA0, 0x69, 0xA0, 0x6A, 0xB1, 0xAC, 0xA0, 0x6B, /* 0x04-0x07 */
+	0xA0, 0x6C, 0xA0, 0x6D, 0xA0, 0x6E, 0xA0, 0x6F, /* 0x08-0x0B */
+	0xA0, 0x70, 0xA0, 0x71, 0xA0, 0x72, 0xA0, 0x73, /* 0x0C-0x0F */
+	0xA0, 0x74, 0xA0, 0x75, 0xA0, 0x76, 0xA0, 0x77, /* 0x10-0x13 */
+	0xA0, 0x78, 0xA0, 0x79, 0xA0, 0x7A, 0xA0, 0x7B, /* 0x14-0x17 */
+	0xA0, 0x7C, 0xA0, 0x7D, 0xA0, 0x7E, 0xA0, 0x80, /* 0x18-0x1B */
+	0xA0, 0x81, 0xEC, 0xDF, 0xA0, 0x82, 0xA0, 0x83, /* 0x1C-0x1F */
+	0xA0, 0x84, 0xA0, 0x85, 0xA0, 0x86, 0xA0, 0x87, /* 0x20-0x23 */
+	0xA0, 0x88, 0xA0, 0x89, 0xA0, 0x8A, 0xA0, 0x8B, /* 0x24-0x27 */
+	0xEC, 0xE0, 0xA0, 0x8C, 0xD7, 0xA6, 0xA0, 0x8D, /* 0x28-0x2B */
+	0xC5, 0xC0, 0xA0, 0x8E, 0xA0, 0x8F, 0xA0, 0x90, /* 0x2C-0x2F */
+	0xEB, 0xBC, 0xB0, 0xAE, 0xA0, 0x91, 0xA0, 0x92, /* 0x30-0x33 */
+	0xA0, 0x93, 0xBE, 0xF4, 0xB8, 0xB8, 0xD2, 0xAF, /* 0x34-0x37 */
+	0xB0, 0xD6, 0xB5, 0xF9, 0xA0, 0x94, 0xD8, 0xB3, /* 0x38-0x3B */
+	0xA0, 0x95, 0xCB, 0xAC, 0xA0, 0x96, 0xE3, 0xDD, /* 0x3C-0x3F */
+	0xA0, 0x97, 0xA0, 0x98, 0xA0, 0x99, 0xA0, 0x9A, /* 0x40-0x43 */
+	0xA0, 0x9B, 0xA0, 0x9C, 0xA0, 0x9D, 0xC6, 0xAC, /* 0x44-0x47 */
+	0xB0, 0xE6, 0xA0, 0x9E, 0xA0, 0x9F, 0xA0, 0xA0, /* 0x48-0x4B */
+	0xC5, 0xC6, 0xEB, 0xB9, 0xA0, 0xA1, 0xA0, 0xA2, /* 0x4C-0x4F */
+	0xA0, 0xA3, 0xA0, 0xA4, 0xEB, 0xBA, 0xA0, 0xA5, /* 0x50-0x53 */
+	0xA0, 0xA6, 0xA0, 0xA7, 0xEB, 0xBB, 0xA0, 0xA8, /* 0x54-0x57 */
+	0xA0, 0xA9, 0xD1, 0xC0, 0xA0, 0xAA, 0xC5, 0xA3, /* 0x58-0x5B */
+	0xA0, 0xAB, 0xEA, 0xF2, 0xA0, 0xAC, 0xC4, 0xB2, /* 0x5C-0x5F */
+	0xA0, 0xAD, 0xC4, 0xB5, 0xC0, 0xCE, 0xA0, 0xAE, /* 0x60-0x63 */
+	0xA0, 0xAF, 0xA0, 0xB0, 0xEA, 0xF3, 0xC4, 0xC1, /* 0x64-0x67 */
+	0xA0, 0xB1, 0xCE, 0xEF, 0xA0, 0xB2, 0xA0, 0xB3, /* 0x68-0x6B */
+	0xA0, 0xB4, 0xA0, 0xB5, 0xEA, 0xF0, 0xEA, 0xF4, /* 0x6C-0x6F */
+	0xA0, 0xB6, 0xA0, 0xB7, 0xC9, 0xFC, 0xA0, 0xB8, /* 0x70-0x73 */
+	0xA0, 0xB9, 0xC7, 0xA3, 0xA0, 0xBA, 0xA0, 0xBB, /* 0x74-0x77 */
+	0xA0, 0xBC, 0xCC, 0xD8, 0xCE, 0xFE, 0xA0, 0xBD, /* 0x78-0x7B */
+	0xA0, 0xBE, 0xA0, 0xBF, 0xEA, 0xF5, 0xEA, 0xF6, /* 0x7C-0x7F */
+	
+	0xCF, 0xAC, 0xC0, 0xE7, 0xA0, 0xC0, 0xA0, 0xC1, /* 0x80-0x83 */
+	0xEA, 0xF7, 0xA0, 0xC2, 0xA0, 0xC3, 0xA0, 0xC4, /* 0x84-0x87 */
+	0xA0, 0xC5, 0xA0, 0xC6, 0xB6, 0xBF, 0xEA, 0xF8, /* 0x88-0x8B */
+	0xA0, 0xC7, 0xEA, 0xF9, 0xA0, 0xC8, 0xEA, 0xFA, /* 0x8C-0x8F */
+	0xA0, 0xC9, 0xA0, 0xCA, 0xEA, 0xFB, 0xA0, 0xCB, /* 0x90-0x93 */
+	0xA0, 0xCC, 0xA0, 0xCD, 0xA0, 0xCE, 0xA0, 0xCF, /* 0x94-0x97 */
+	0xA0, 0xD0, 0xA0, 0xD1, 0xA0, 0xD2, 0xA0, 0xD3, /* 0x98-0x9B */
+	0xA0, 0xD4, 0xA0, 0xD5, 0xA0, 0xD6, 0xEA, 0xF1, /* 0x9C-0x9F */
+	0xA0, 0xD7, 0xA0, 0xD8, 0xA0, 0xD9, 0xA0, 0xDA, /* 0xA0-0xA3 */
+	0xA0, 0xDB, 0xA0, 0xDC, 0xA0, 0xDD, 0xA0, 0xDE, /* 0xA4-0xA7 */
+	0xA0, 0xDF, 0xA0, 0xE0, 0xA0, 0xE1, 0xA0, 0xE2, /* 0xA8-0xAB */
+	0xC8, 0xAE, 0xE1, 0xEB, 0xA0, 0xE3, 0xB7, 0xB8, /* 0xAC-0xAF */
+	0xE1, 0xEC, 0xA0, 0xE4, 0xA0, 0xE5, 0xA0, 0xE6, /* 0xB0-0xB3 */
+	0xE1, 0xED, 0xA0, 0xE7, 0xD7, 0xB4, 0xE1, 0xEE, /* 0xB4-0xB7 */
+	0xE1, 0xEF, 0xD3, 0xCC, 0xA0, 0xE8, 0xA0, 0xE9, /* 0xB8-0xBB */
+	0xA0, 0xEA, 0xA0, 0xEB, 0xA0, 0xEC, 0xA0, 0xED, /* 0xBC-0xBF */
+	0xA0, 0xEE, 0xE1, 0xF1, 0xBF, 0xF1, 0xE1, 0xF0, /* 0xC0-0xC3 */
+	0xB5, 0xD2, 0xA0, 0xEF, 0xA0, 0xF0, 0xA0, 0xF1, /* 0xC4-0xC7 */
+	0xB1, 0xB7, 0xA0, 0xF2, 0xA0, 0xF3, 0xA0, 0xF4, /* 0xC8-0xCB */
+	0xA0, 0xF5, 0xE1, 0xF3, 0xE1, 0xF2, 0xA0, 0xF6, /* 0xCC-0xCF */
+	0xBA, 0xFC, 0xA0, 0xF7, 0xE1, 0xF4, 0xA0, 0xF8, /* 0xD0-0xD3 */
+	0xA0, 0xF9, 0xA0, 0xFA, 0xA0, 0xFB, 0xB9, 0xB7, /* 0xD4-0xD7 */
+	0xA0, 0xFC, 0xBE, 0xD1, 0xA0, 0xFD, 0xA0, 0xFE, /* 0xD8-0xDB */
+	0xAA, 0x40, 0xAA, 0x41, 0xC4, 0xFC, 0xAA, 0x42, /* 0xDC-0xDF */
+	0xBA, 0xDD, 0xBD, 0xC6, 0xAA, 0x43, 0xAA, 0x44, /* 0xE0-0xE3 */
+	0xAA, 0x45, 0xAA, 0x46, 0xAA, 0x47, 0xAA, 0x48, /* 0xE4-0xE7 */
+	0xE1, 0xF5, 0xE1, 0xF7, 0xAA, 0x49, 0xAA, 0x4A, /* 0xE8-0xEB */
+	0xB6, 0xC0, 0xCF, 0xC1, 0xCA, 0xA8, 0xE1, 0xF6, /* 0xEC-0xEF */
+	0xD5, 0xF8, 0xD3, 0xFC, 0xE1, 0xF8, 0xE1, 0xFC, /* 0xF0-0xF3 */
+	0xE1, 0xF9, 0xAA, 0x4B, 0xAA, 0x4C, 0xE1, 0xFA, /* 0xF4-0xF7 */
+	0xC0, 0xEA, 0xAA, 0x4D, 0xE1, 0xFE, 0xE2, 0xA1, /* 0xF8-0xFB */
+	0xC0, 0xC7, 0xAA, 0x4E, 0xAA, 0x4F, 0xAA, 0x50, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_73[512] = {
+	0xAA, 0x51, 0xE1, 0xFB, 0xAA, 0x52, 0xE1, 0xFD, /* 0x00-0x03 */
+	0xAA, 0x53, 0xAA, 0x54, 0xAA, 0x55, 0xAA, 0x56, /* 0x04-0x07 */
+	0xAA, 0x57, 0xAA, 0x58, 0xE2, 0xA5, 0xAA, 0x59, /* 0x08-0x0B */
+	0xAA, 0x5A, 0xAA, 0x5B, 0xC1, 0xD4, 0xAA, 0x5C, /* 0x0C-0x0F */
+	0xAA, 0x5D, 0xAA, 0x5E, 0xAA, 0x5F, 0xE2, 0xA3, /* 0x10-0x13 */
+	0xAA, 0x60, 0xE2, 0xA8, 0xB2, 0xFE, 0xE2, 0xA2, /* 0x14-0x17 */
+	0xAA, 0x61, 0xAA, 0x62, 0xAA, 0x63, 0xC3, 0xCD, /* 0x18-0x1B */
+	0xB2, 0xC2, 0xE2, 0xA7, 0xE2, 0xA6, 0xAA, 0x64, /* 0x1C-0x1F */
+	0xAA, 0x65, 0xE2, 0xA4, 0xE2, 0xA9, 0xAA, 0x66, /* 0x20-0x23 */
+	0xAA, 0x67, 0xE2, 0xAB, 0xAA, 0x68, 0xAA, 0x69, /* 0x24-0x27 */
+	0xAA, 0x6A, 0xD0, 0xC9, 0xD6, 0xED, 0xC3, 0xA8, /* 0x28-0x2B */
+	0xE2, 0xAC, 0xAA, 0x6B, 0xCF, 0xD7, 0xAA, 0x6C, /* 0x2C-0x2F */
+	0xAA, 0x6D, 0xE2, 0xAE, 0xAA, 0x6E, 0xAA, 0x6F, /* 0x30-0x33 */
+	0xBA, 0xEF, 0xAA, 0x70, 0xAA, 0x71, 0xE9, 0xE0, /* 0x34-0x37 */
+	0xE2, 0xAD, 0xE2, 0xAA, 0xAA, 0x72, 0xAA, 0x73, /* 0x38-0x3B */
+	0xAA, 0x74, 0xAA, 0x75, 0xBB, 0xAB, 0xD4, 0xB3, /* 0x3C-0x3F */
+	0xAA, 0x76, 0xAA, 0x77, 0xAA, 0x78, 0xAA, 0x79, /* 0x40-0x43 */
+	0xAA, 0x7A, 0xAA, 0x7B, 0xAA, 0x7C, 0xAA, 0x7D, /* 0x44-0x47 */
+	0xAA, 0x7E, 0xAA, 0x80, 0xAA, 0x81, 0xAA, 0x82, /* 0x48-0x4B */
+	0xAA, 0x83, 0xE2, 0xB0, 0xAA, 0x84, 0xAA, 0x85, /* 0x4C-0x4F */
+	0xE2, 0xAF, 0xAA, 0x86, 0xE9, 0xE1, 0xAA, 0x87, /* 0x50-0x53 */
+	0xAA, 0x88, 0xAA, 0x89, 0xAA, 0x8A, 0xE2, 0xB1, /* 0x54-0x57 */
+	0xAA, 0x8B, 0xAA, 0x8C, 0xAA, 0x8D, 0xAA, 0x8E, /* 0x58-0x5B */
+	0xAA, 0x8F, 0xAA, 0x90, 0xAA, 0x91, 0xAA, 0x92, /* 0x5C-0x5F */
+	0xE2, 0xB2, 0xAA, 0x93, 0xAA, 0x94, 0xAA, 0x95, /* 0x60-0x63 */
+	0xAA, 0x96, 0xAA, 0x97, 0xAA, 0x98, 0xAA, 0x99, /* 0x64-0x67 */
+	0xAA, 0x9A, 0xAA, 0x9B, 0xAA, 0x9C, 0xAA, 0x9D, /* 0x68-0x6B */
+	0xE2, 0xB3, 0xCC, 0xA1, 0xAA, 0x9E, 0xE2, 0xB4, /* 0x6C-0x6F */
+	0xAA, 0x9F, 0xAA, 0xA0, 0xAB, 0x40, 0xAB, 0x41, /* 0x70-0x73 */
+	0xAB, 0x42, 0xAB, 0x43, 0xAB, 0x44, 0xAB, 0x45, /* 0x74-0x77 */
+	0xAB, 0x46, 0xAB, 0x47, 0xAB, 0x48, 0xAB, 0x49, /* 0x78-0x7B */
+	0xAB, 0x4A, 0xAB, 0x4B, 0xE2, 0xB5, 0xAB, 0x4C, /* 0x7C-0x7F */
+	
+	0xAB, 0x4D, 0xAB, 0x4E, 0xAB, 0x4F, 0xAB, 0x50, /* 0x80-0x83 */
+	0xD0, 0xFE, 0xAB, 0x51, 0xAB, 0x52, 0xC2, 0xCA, /* 0x84-0x87 */
+	0xAB, 0x53, 0xD3, 0xF1, 0xAB, 0x54, 0xCD, 0xF5, /* 0x88-0x8B */
+	0xAB, 0x55, 0xAB, 0x56, 0xE7, 0xE0, 0xAB, 0x57, /* 0x8C-0x8F */
+	0xAB, 0x58, 0xE7, 0xE1, 0xAB, 0x59, 0xAB, 0x5A, /* 0x90-0x93 */
+	0xAB, 0x5B, 0xAB, 0x5C, 0xBE, 0xC1, 0xAB, 0x5D, /* 0x94-0x97 */
+	0xAB, 0x5E, 0xAB, 0x5F, 0xAB, 0x60, 0xC2, 0xEA, /* 0x98-0x9B */
+	0xAB, 0x61, 0xAB, 0x62, 0xAB, 0x63, 0xE7, 0xE4, /* 0x9C-0x9F */
+	0xAB, 0x64, 0xAB, 0x65, 0xE7, 0xE3, 0xAB, 0x66, /* 0xA0-0xA3 */
+	0xAB, 0x67, 0xAB, 0x68, 0xAB, 0x69, 0xAB, 0x6A, /* 0xA4-0xA7 */
+	0xAB, 0x6B, 0xCD, 0xE6, 0xAB, 0x6C, 0xC3, 0xB5, /* 0xA8-0xAB */
+	0xAB, 0x6D, 0xAB, 0x6E, 0xE7, 0xE2, 0xBB, 0xB7, /* 0xAC-0xAF */
+	0xCF, 0xD6, 0xAB, 0x6F, 0xC1, 0xE1, 0xE7, 0xE9, /* 0xB0-0xB3 */
+	0xAB, 0x70, 0xAB, 0x71, 0xAB, 0x72, 0xE7, 0xE8, /* 0xB4-0xB7 */
+	0xAB, 0x73, 0xAB, 0x74, 0xE7, 0xF4, 0xB2, 0xA3, /* 0xB8-0xBB */
+	0xAB, 0x75, 0xAB, 0x76, 0xAB, 0x77, 0xAB, 0x78, /* 0xBC-0xBF */
+	0xE7, 0xEA, 0xAB, 0x79, 0xE7, 0xE6, 0xAB, 0x7A, /* 0xC0-0xC3 */
+	0xAB, 0x7B, 0xAB, 0x7C, 0xAB, 0x7D, 0xAB, 0x7E, /* 0xC4-0xC7 */
+	0xE7, 0xEC, 0xE7, 0xEB, 0xC9, 0xBA, 0xAB, 0x80, /* 0xC8-0xCB */
+	0xAB, 0x81, 0xD5, 0xE4, 0xAB, 0x82, 0xE7, 0xE5, /* 0xCC-0xCF */
+	0xB7, 0xA9, 0xE7, 0xE7, 0xAB, 0x83, 0xAB, 0x84, /* 0xD0-0xD3 */
+	0xAB, 0x85, 0xAB, 0x86, 0xAB, 0x87, 0xAB, 0x88, /* 0xD4-0xD7 */
+	0xAB, 0x89, 0xE7, 0xEE, 0xAB, 0x8A, 0xAB, 0x8B, /* 0xD8-0xDB */
+	0xAB, 0x8C, 0xAB, 0x8D, 0xE7, 0xF3, 0xAB, 0x8E, /* 0xDC-0xDF */
+	0xD6, 0xE9, 0xAB, 0x8F, 0xAB, 0x90, 0xAB, 0x91, /* 0xE0-0xE3 */
+	0xAB, 0x92, 0xE7, 0xED, 0xAB, 0x93, 0xE7, 0xF2, /* 0xE4-0xE7 */
+	0xAB, 0x94, 0xE7, 0xF1, 0xAB, 0x95, 0xAB, 0x96, /* 0xE8-0xEB */
+	0xAB, 0x97, 0xB0, 0xE0, 0xAB, 0x98, 0xAB, 0x99, /* 0xEC-0xEF */
+	0xAB, 0x9A, 0xAB, 0x9B, 0xE7, 0xF5, 0xAB, 0x9C, /* 0xF0-0xF3 */
+	0xAB, 0x9D, 0xAB, 0x9E, 0xAB, 0x9F, 0xAB, 0xA0, /* 0xF4-0xF7 */
+	0xAC, 0x40, 0xAC, 0x41, 0xAC, 0x42, 0xAC, 0x43, /* 0xF8-0xFB */
+	0xAC, 0x44, 0xAC, 0x45, 0xAC, 0x46, 0xAC, 0x47, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_74[512] = {
+	0xAC, 0x48, 0xAC, 0x49, 0xAC, 0x4A, 0xC7, 0xF2, /* 0x00-0x03 */
+	0xAC, 0x4B, 0xC0, 0xC5, 0xC0, 0xED, 0xAC, 0x4C, /* 0x04-0x07 */
+	0xAC, 0x4D, 0xC1, 0xF0, 0xE7, 0xF0, 0xAC, 0x4E, /* 0x08-0x0B */
+	0xAC, 0x4F, 0xAC, 0x50, 0xAC, 0x51, 0xE7, 0xF6, /* 0x0C-0x0F */
+	0xCB, 0xF6, 0xAC, 0x52, 0xAC, 0x53, 0xAC, 0x54, /* 0x10-0x13 */
+	0xAC, 0x55, 0xAC, 0x56, 0xAC, 0x57, 0xAC, 0x58, /* 0x14-0x17 */
+	0xAC, 0x59, 0xAC, 0x5A, 0xE8, 0xA2, 0xE8, 0xA1, /* 0x18-0x1B */
+	0xAC, 0x5B, 0xAC, 0x5C, 0xAC, 0x5D, 0xAC, 0x5E, /* 0x1C-0x1F */
+	0xAC, 0x5F, 0xAC, 0x60, 0xD7, 0xC1, 0xAC, 0x61, /* 0x20-0x23 */
+	0xAC, 0x62, 0xE7, 0xFA, 0xE7, 0xF9, 0xAC, 0x63, /* 0x24-0x27 */
+	0xE7, 0xFB, 0xAC, 0x64, 0xE7, 0xF7, 0xAC, 0x65, /* 0x28-0x2B */
+	0xE7, 0xFE, 0xAC, 0x66, 0xE7, 0xFD, 0xAC, 0x67, /* 0x2C-0x2F */
+	0xE7, 0xFC, 0xAC, 0x68, 0xAC, 0x69, 0xC1, 0xD5, /* 0x30-0x33 */
+	0xC7, 0xD9, 0xC5, 0xFD, 0xC5, 0xC3, 0xAC, 0x6A, /* 0x34-0x37 */
+	0xAC, 0x6B, 0xAC, 0x6C, 0xAC, 0x6D, 0xAC, 0x6E, /* 0x38-0x3B */
+	0xC7, 0xED, 0xAC, 0x6F, 0xAC, 0x70, 0xAC, 0x71, /* 0x3C-0x3F */
+	0xAC, 0x72, 0xE8, 0xA3, 0xAC, 0x73, 0xAC, 0x74, /* 0x40-0x43 */
+	0xAC, 0x75, 0xAC, 0x76, 0xAC, 0x77, 0xAC, 0x78, /* 0x44-0x47 */
+	0xAC, 0x79, 0xAC, 0x7A, 0xAC, 0x7B, 0xAC, 0x7C, /* 0x48-0x4B */
+	0xAC, 0x7D, 0xAC, 0x7E, 0xAC, 0x80, 0xAC, 0x81, /* 0x4C-0x4F */
+	0xAC, 0x82, 0xAC, 0x83, 0xAC, 0x84, 0xAC, 0x85, /* 0x50-0x53 */
+	0xAC, 0x86, 0xE8, 0xA6, 0xAC, 0x87, 0xE8, 0xA5, /* 0x54-0x57 */
+	0xAC, 0x88, 0xE8, 0xA7, 0xBA, 0xF7, 0xE7, 0xF8, /* 0x58-0x5B */
+	0xE8, 0xA4, 0xAC, 0x89, 0xC8, 0xF0, 0xC9, 0xAA, /* 0x5C-0x5F */
+	0xAC, 0x8A, 0xAC, 0x8B, 0xAC, 0x8C, 0xAC, 0x8D, /* 0x60-0x63 */
+	0xAC, 0x8E, 0xAC, 0x8F, 0xAC, 0x90, 0xAC, 0x91, /* 0x64-0x67 */
+	0xAC, 0x92, 0xAC, 0x93, 0xAC, 0x94, 0xAC, 0x95, /* 0x68-0x6B */
+	0xAC, 0x96, 0xE8, 0xA9, 0xAC, 0x97, 0xAC, 0x98, /* 0x6C-0x6F */
+	0xB9, 0xE5, 0xAC, 0x99, 0xAC, 0x9A, 0xAC, 0x9B, /* 0x70-0x73 */
+	0xAC, 0x9C, 0xAC, 0x9D, 0xD1, 0xFE, 0xE8, 0xA8, /* 0x74-0x77 */
+	0xAC, 0x9E, 0xAC, 0x9F, 0xAC, 0xA0, 0xAD, 0x40, /* 0x78-0x7B */
+	0xAD, 0x41, 0xAD, 0x42, 0xE8, 0xAA, 0xAD, 0x43, /* 0x7C-0x7F */
+	
+	0xE8, 0xAD, 0xE8, 0xAE, 0xAD, 0x44, 0xC1, 0xA7, /* 0x80-0x83 */
+	0xAD, 0x45, 0xAD, 0x46, 0xAD, 0x47, 0xE8, 0xAF, /* 0x84-0x87 */
+	0xAD, 0x48, 0xAD, 0x49, 0xAD, 0x4A, 0xE8, 0xB0, /* 0x88-0x8B */
+	0xAD, 0x4B, 0xAD, 0x4C, 0xE8, 0xAC, 0xAD, 0x4D, /* 0x8C-0x8F */
+	0xE8, 0xB4, 0xAD, 0x4E, 0xAD, 0x4F, 0xAD, 0x50, /* 0x90-0x93 */
+	0xAD, 0x51, 0xAD, 0x52, 0xAD, 0x53, 0xAD, 0x54, /* 0x94-0x97 */
+	0xAD, 0x55, 0xAD, 0x56, 0xAD, 0x57, 0xAD, 0x58, /* 0x98-0x9B */
+	0xE8, 0xAB, 0xAD, 0x59, 0xE8, 0xB1, 0xAD, 0x5A, /* 0x9C-0x9F */
+	0xAD, 0x5B, 0xAD, 0x5C, 0xAD, 0x5D, 0xAD, 0x5E, /* 0xA0-0xA3 */
+	0xAD, 0x5F, 0xAD, 0x60, 0xAD, 0x61, 0xE8, 0xB5, /* 0xA4-0xA7 */
+	0xE8, 0xB2, 0xE8, 0xB3, 0xAD, 0x62, 0xAD, 0x63, /* 0xA8-0xAB */
+	0xAD, 0x64, 0xAD, 0x65, 0xAD, 0x66, 0xAD, 0x67, /* 0xAC-0xAF */
+	0xAD, 0x68, 0xAD, 0x69, 0xAD, 0x6A, 0xAD, 0x6B, /* 0xB0-0xB3 */
+	0xAD, 0x6C, 0xAD, 0x6D, 0xAD, 0x6E, 0xAD, 0x6F, /* 0xB4-0xB7 */
+	0xAD, 0x70, 0xAD, 0x71, 0xE8, 0xB7, 0xAD, 0x72, /* 0xB8-0xBB */
+	0xAD, 0x73, 0xAD, 0x74, 0xAD, 0x75, 0xAD, 0x76, /* 0xBC-0xBF */
+	0xAD, 0x77, 0xAD, 0x78, 0xAD, 0x79, 0xAD, 0x7A, /* 0xC0-0xC3 */
+	0xAD, 0x7B, 0xAD, 0x7C, 0xAD, 0x7D, 0xAD, 0x7E, /* 0xC4-0xC7 */
+	0xAD, 0x80, 0xAD, 0x81, 0xAD, 0x82, 0xAD, 0x83, /* 0xC8-0xCB */
+	0xAD, 0x84, 0xAD, 0x85, 0xAD, 0x86, 0xAD, 0x87, /* 0xCC-0xCF */
+	0xAD, 0x88, 0xAD, 0x89, 0xE8, 0xB6, 0xAD, 0x8A, /* 0xD0-0xD3 */
+	0xAD, 0x8B, 0xAD, 0x8C, 0xAD, 0x8D, 0xAD, 0x8E, /* 0xD4-0xD7 */
+	0xAD, 0x8F, 0xAD, 0x90, 0xAD, 0x91, 0xAD, 0x92, /* 0xD8-0xDB */
+	0xB9, 0xCF, 0xAD, 0x93, 0xF0, 0xAC, 0xAD, 0x94, /* 0xDC-0xDF */
+	0xF0, 0xAD, 0xAD, 0x95, 0xC6, 0xB0, 0xB0, 0xEA, /* 0xE0-0xE3 */
+	0xC8, 0xBF, 0xAD, 0x96, 0xCD, 0xDF, 0xAD, 0x97, /* 0xE4-0xE7 */
+	0xAD, 0x98, 0xAD, 0x99, 0xAD, 0x9A, 0xAD, 0x9B, /* 0xE8-0xEB */
+	0xAD, 0x9C, 0xAD, 0x9D, 0xCE, 0xCD, 0xEA, 0xB1, /* 0xEC-0xEF */
+	0xAD, 0x9E, 0xAD, 0x9F, 0xAD, 0xA0, 0xAE, 0x40, /* 0xF0-0xF3 */
+	0xEA, 0xB2, 0xAE, 0x41, 0xC6, 0xBF, 0xB4, 0xC9, /* 0xF4-0xF7 */
+	0xAE, 0x42, 0xAE, 0x43, 0xAE, 0x44, 0xAE, 0x45, /* 0xF8-0xFB */
+	0xAE, 0x46, 0xAE, 0x47, 0xAE, 0x48, 0xEA, 0xB3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_75[512] = {
+	0xAE, 0x49, 0xAE, 0x4A, 0xAE, 0x4B, 0xAE, 0x4C, /* 0x00-0x03 */
+	0xD5, 0xE7, 0xAE, 0x4D, 0xAE, 0x4E, 0xAE, 0x4F, /* 0x04-0x07 */
+	0xAE, 0x50, 0xAE, 0x51, 0xAE, 0x52, 0xAE, 0x53, /* 0x08-0x0B */
+	0xAE, 0x54, 0xDD, 0xF9, 0xAE, 0x55, 0xEA, 0xB4, /* 0x0C-0x0F */
+	0xAE, 0x56, 0xEA, 0xB5, 0xAE, 0x57, 0xEA, 0xB6, /* 0x10-0x13 */
+	0xAE, 0x58, 0xAE, 0x59, 0xAE, 0x5A, 0xAE, 0x5B, /* 0x14-0x17 */
+	0xB8, 0xCA, 0xDF, 0xB0, 0xC9, 0xF5, 0xAE, 0x5C, /* 0x18-0x1B */
+	0xCC, 0xF0, 0xAE, 0x5D, 0xAE, 0x5E, 0xC9, 0xFA, /* 0x1C-0x1F */
+	0xAE, 0x5F, 0xAE, 0x60, 0xAE, 0x61, 0xAE, 0x62, /* 0x20-0x23 */
+	0xAE, 0x63, 0xC9, 0xFB, 0xAE, 0x64, 0xAE, 0x65, /* 0x24-0x27 */
+	0xD3, 0xC3, 0xCB, 0xA6, 0xAE, 0x66, 0xB8, 0xA6, /* 0x28-0x2B */
+	0xF0, 0xAE, 0xB1, 0xC2, 0xAE, 0x67, 0xE5, 0xB8, /* 0x2C-0x2F */
+	0xCC, 0xEF, 0xD3, 0xC9, 0xBC, 0xD7, 0xC9, 0xEA, /* 0x30-0x33 */
+	0xAE, 0x68, 0xB5, 0xE7, 0xAE, 0x69, 0xC4, 0xD0, /* 0x34-0x37 */
+	0xB5, 0xE9, 0xAE, 0x6A, 0xEE, 0xAE, 0xBB, 0xAD, /* 0x38-0x3B */
+	0xAE, 0x6B, 0xAE, 0x6C, 0xE7, 0xDE, 0xAE, 0x6D, /* 0x3C-0x3F */
+	0xEE, 0xAF, 0xAE, 0x6E, 0xAE, 0x6F, 0xAE, 0x70, /* 0x40-0x43 */
+	0xAE, 0x71, 0xB3, 0xA9, 0xAE, 0x72, 0xAE, 0x73, /* 0x44-0x47 */
+	0xEE, 0xB2, 0xAE, 0x74, 0xAE, 0x75, 0xEE, 0xB1, /* 0x48-0x4B */
+	0xBD, 0xE7, 0xAE, 0x76, 0xEE, 0xB0, 0xCE, 0xB7, /* 0x4C-0x4F */
+	0xAE, 0x77, 0xAE, 0x78, 0xAE, 0x79, 0xAE, 0x7A, /* 0x50-0x53 */
+	0xC5, 0xCF, 0xAE, 0x7B, 0xAE, 0x7C, 0xAE, 0x7D, /* 0x54-0x57 */
+	0xAE, 0x7E, 0xC1, 0xF4, 0xDB, 0xCE, 0xEE, 0xB3, /* 0x58-0x5B */
+	0xD0, 0xF3, 0xAE, 0x80, 0xAE, 0x81, 0xAE, 0x82, /* 0x5C-0x5F */
+	0xAE, 0x83, 0xAE, 0x84, 0xAE, 0x85, 0xAE, 0x86, /* 0x60-0x63 */
+	0xAE, 0x87, 0xC2, 0xD4, 0xC6, 0xE8, 0xAE, 0x88, /* 0x64-0x67 */
+	0xAE, 0x89, 0xAE, 0x8A, 0xB7, 0xAC, 0xAE, 0x8B, /* 0x68-0x6B */
+	0xAE, 0x8C, 0xAE, 0x8D, 0xAE, 0x8E, 0xAE, 0x8F, /* 0x6C-0x6F */
+	0xAE, 0x90, 0xAE, 0x91, 0xEE, 0xB4, 0xAE, 0x92, /* 0x70-0x73 */
+	0xB3, 0xEB, 0xAE, 0x93, 0xAE, 0x94, 0xAE, 0x95, /* 0x74-0x77 */
+	0xBB, 0xFB, 0xEE, 0xB5, 0xAE, 0x96, 0xAE, 0x97, /* 0x78-0x7B */
+	0xAE, 0x98, 0xAE, 0x99, 0xAE, 0x9A, 0xE7, 0xDC, /* 0x7C-0x7F */
+	
+	0xAE, 0x9B, 0xAE, 0x9C, 0xAE, 0x9D, 0xEE, 0xB6, /* 0x80-0x83 */
+	0xAE, 0x9E, 0xAE, 0x9F, 0xBD, 0xAE, 0xAE, 0xA0, /* 0x84-0x87 */
+	0xAF, 0x40, 0xAF, 0x41, 0xAF, 0x42, 0xF1, 0xE2, /* 0x88-0x8B */
+	0xAF, 0x43, 0xAF, 0x44, 0xAF, 0x45, 0xCA, 0xE8, /* 0x8C-0x8F */
+	0xAF, 0x46, 0xD2, 0xC9, 0xF0, 0xDA, 0xAF, 0x47, /* 0x90-0x93 */
+	0xF0, 0xDB, 0xAF, 0x48, 0xF0, 0xDC, 0xC1, 0xC6, /* 0x94-0x97 */
+	0xAF, 0x49, 0xB8, 0xED, 0xBE, 0xCE, 0xAF, 0x4A, /* 0x98-0x9B */
+	0xAF, 0x4B, 0xF0, 0xDE, 0xAF, 0x4C, 0xC5, 0xB1, /* 0x9C-0x9F */
+	0xF0, 0xDD, 0xD1, 0xF1, 0xAF, 0x4D, 0xF0, 0xE0, /* 0xA0-0xA3 */
+	0xB0, 0xCC, 0xBD, 0xEA, 0xAF, 0x4E, 0xAF, 0x4F, /* 0xA4-0xA7 */
+	0xAF, 0x50, 0xAF, 0x51, 0xAF, 0x52, 0xD2, 0xDF, /* 0xA8-0xAB */
+	0xF0, 0xDF, 0xAF, 0x53, 0xB4, 0xAF, 0xB7, 0xE8, /* 0xAC-0xAF */
+	0xF0, 0xE6, 0xF0, 0xE5, 0xC6, 0xA3, 0xF0, 0xE1, /* 0xB0-0xB3 */
+	0xF0, 0xE2, 0xB4, 0xC3, 0xAF, 0x54, 0xAF, 0x55, /* 0xB4-0xB7 */
+	0xF0, 0xE3, 0xD5, 0xEE, 0xAF, 0x56, 0xAF, 0x57, /* 0xB8-0xBB */
+	0xCC, 0xDB, 0xBE, 0xD2, 0xBC, 0xB2, 0xAF, 0x58, /* 0xBC-0xBF */
+	0xAF, 0x59, 0xAF, 0x5A, 0xF0, 0xE8, 0xF0, 0xE7, /* 0xC0-0xC3 */
+	0xF0, 0xE4, 0xB2, 0xA1, 0xAF, 0x5B, 0xD6, 0xA2, /* 0xC4-0xC7 */
+	0xD3, 0xB8, 0xBE, 0xB7, 0xC8, 0xAC, 0xAF, 0x5C, /* 0xC8-0xCB */
+	0xAF, 0x5D, 0xF0, 0xEA, 0xAF, 0x5E, 0xAF, 0x5F, /* 0xCC-0xCF */
+	0xAF, 0x60, 0xAF, 0x61, 0xD1, 0xF7, 0xAF, 0x62, /* 0xD0-0xD3 */
+	0xD6, 0xCC, 0xBA, 0xDB, 0xF0, 0xE9, 0xAF, 0x63, /* 0xD4-0xD7 */
+	0xB6, 0xBB, 0xAF, 0x64, 0xAF, 0x65, 0xCD, 0xB4, /* 0xD8-0xDB */
+	0xAF, 0x66, 0xAF, 0x67, 0xC6, 0xA6, 0xAF, 0x68, /* 0xDC-0xDF */
+	0xAF, 0x69, 0xAF, 0x6A, 0xC1, 0xA1, 0xF0, 0xEB, /* 0xE0-0xE3 */
+	0xF0, 0xEE, 0xAF, 0x6B, 0xF0, 0xED, 0xF0, 0xF0, /* 0xE4-0xE7 */
+	0xF0, 0xEC, 0xAF, 0x6C, 0xBB, 0xBE, 0xF0, 0xEF, /* 0xE8-0xEB */
+	0xAF, 0x6D, 0xAF, 0x6E, 0xAF, 0x6F, 0xAF, 0x70, /* 0xEC-0xEF */
+	0xCC, 0xB5, 0xF0, 0xF2, 0xAF, 0x71, 0xAF, 0x72, /* 0xF0-0xF3 */
+	0xB3, 0xD5, 0xAF, 0x73, 0xAF, 0x74, 0xAF, 0x75, /* 0xF4-0xF7 */
+	0xAF, 0x76, 0xB1, 0xD4, 0xAF, 0x77, 0xAF, 0x78, /* 0xF8-0xFB */
+	0xF0, 0xF3, 0xAF, 0x79, 0xAF, 0x7A, 0xF0, 0xF4, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_76[512] = {
+	0xF0, 0xF6, 0xB4, 0xE1, 0xAF, 0x7B, 0xF0, 0xF1, /* 0x00-0x03 */
+	0xAF, 0x7C, 0xF0, 0xF7, 0xAF, 0x7D, 0xAF, 0x7E, /* 0x04-0x07 */
+	0xAF, 0x80, 0xAF, 0x81, 0xF0, 0xFA, 0xAF, 0x82, /* 0x08-0x0B */
+	0xF0, 0xF8, 0xAF, 0x83, 0xAF, 0x84, 0xAF, 0x85, /* 0x0C-0x0F */
+	0xF0, 0xF5, 0xAF, 0x86, 0xAF, 0x87, 0xAF, 0x88, /* 0x10-0x13 */
+	0xAF, 0x89, 0xF0, 0xFD, 0xAF, 0x8A, 0xF0, 0xF9, /* 0x14-0x17 */
+	0xF0, 0xFC, 0xF0, 0xFE, 0xAF, 0x8B, 0xF1, 0xA1, /* 0x18-0x1B */
+	0xAF, 0x8C, 0xAF, 0x8D, 0xAF, 0x8E, 0xCE, 0xC1, /* 0x1C-0x1F */
+	0xF1, 0xA4, 0xAF, 0x8F, 0xF1, 0xA3, 0xAF, 0x90, /* 0x20-0x23 */
+	0xC1, 0xF6, 0xF0, 0xFB, 0xCA, 0xDD, 0xAF, 0x91, /* 0x24-0x27 */
+	0xAF, 0x92, 0xB4, 0xF1, 0xB1, 0xF1, 0xCC, 0xB1, /* 0x28-0x2B */
+	0xAF, 0x93, 0xF1, 0xA6, 0xAF, 0x94, 0xAF, 0x95, /* 0x2C-0x2F */
+	0xF1, 0xA7, 0xAF, 0x96, 0xAF, 0x97, 0xF1, 0xAC, /* 0x30-0x33 */
+	0xD5, 0xCE, 0xF1, 0xA9, 0xAF, 0x98, 0xAF, 0x99, /* 0x34-0x37 */
+	0xC8, 0xB3, 0xAF, 0x9A, 0xAF, 0x9B, 0xAF, 0x9C, /* 0x38-0x3B */
+	0xF1, 0xA2, 0xAF, 0x9D, 0xF1, 0xAB, 0xF1, 0xA8, /* 0x3C-0x3F */
+	0xF1, 0xA5, 0xAF, 0x9E, 0xAF, 0x9F, 0xF1, 0xAA, /* 0x40-0x43 */
+	0xAF, 0xA0, 0xB0, 0x40, 0xB0, 0x41, 0xB0, 0x42, /* 0x44-0x47 */
+	0xB0, 0x43, 0xB0, 0x44, 0xB0, 0x45, 0xB0, 0x46, /* 0x48-0x4B */
+	0xB0, 0xA9, 0xF1, 0xAD, 0xB0, 0x47, 0xB0, 0x48, /* 0x4C-0x4F */
+	0xB0, 0x49, 0xB0, 0x4A, 0xB0, 0x4B, 0xB0, 0x4C, /* 0x50-0x53 */
+	0xF1, 0xAF, 0xB0, 0x4D, 0xF1, 0xB1, 0xB0, 0x4E, /* 0x54-0x57 */
+	0xB0, 0x4F, 0xB0, 0x50, 0xB0, 0x51, 0xB0, 0x52, /* 0x58-0x5B */
+	0xF1, 0xB0, 0xB0, 0x53, 0xF1, 0xAE, 0xB0, 0x54, /* 0x5C-0x5F */
+	0xB0, 0x55, 0xB0, 0x56, 0xB0, 0x57, 0xD1, 0xA2, /* 0x60-0x63 */
+	0xB0, 0x58, 0xB0, 0x59, 0xB0, 0x5A, 0xB0, 0x5B, /* 0x64-0x67 */
+	0xB0, 0x5C, 0xB0, 0x5D, 0xB0, 0x5E, 0xF1, 0xB2, /* 0x68-0x6B */
+	0xB0, 0x5F, 0xB0, 0x60, 0xB0, 0x61, 0xF1, 0xB3, /* 0x6C-0x6F */
+	0xB0, 0x62, 0xB0, 0x63, 0xB0, 0x64, 0xB0, 0x65, /* 0x70-0x73 */
+	0xB0, 0x66, 0xB0, 0x67, 0xB0, 0x68, 0xB0, 0x69, /* 0x74-0x77 */
+	0xB9, 0xEF, 0xB0, 0x6A, 0xB0, 0x6B, 0xB5, 0xC7, /* 0x78-0x7B */
+	0xB0, 0x6C, 0xB0, 0xD7, 0xB0, 0xD9, 0xB0, 0x6D, /* 0x7C-0x7F */
+	
+	0xB0, 0x6E, 0xB0, 0x6F, 0xD4, 0xED, 0xB0, 0x70, /* 0x80-0x83 */
+	0xB5, 0xC4, 0xB0, 0x71, 0xBD, 0xD4, 0xBB, 0xCA, /* 0x84-0x87 */
+	0xF0, 0xA7, 0xB0, 0x72, 0xB0, 0x73, 0xB8, 0xDE, /* 0x88-0x8B */
+	0xB0, 0x74, 0xB0, 0x75, 0xF0, 0xA8, 0xB0, 0x76, /* 0x8C-0x8F */
+	0xB0, 0x77, 0xB0, 0xA8, 0xB0, 0x78, 0xF0, 0xA9, /* 0x90-0x93 */
+	0xB0, 0x79, 0xB0, 0x7A, 0xCD, 0xEE, 0xB0, 0x7B, /* 0x94-0x97 */
+	0xB0, 0x7C, 0xF0, 0xAA, 0xB0, 0x7D, 0xB0, 0x7E, /* 0x98-0x9B */
+	0xB0, 0x80, 0xB0, 0x81, 0xB0, 0x82, 0xB0, 0x83, /* 0x9C-0x9F */
+	0xB0, 0x84, 0xB0, 0x85, 0xB0, 0x86, 0xB0, 0x87, /* 0xA0-0xA3 */
+	0xF0, 0xAB, 0xB0, 0x88, 0xB0, 0x89, 0xB0, 0x8A, /* 0xA4-0xA7 */
+	0xB0, 0x8B, 0xB0, 0x8C, 0xB0, 0x8D, 0xB0, 0x8E, /* 0xA8-0xAB */
+	0xB0, 0x8F, 0xB0, 0x90, 0xC6, 0xA4, 0xB0, 0x91, /* 0xAC-0xAF */
+	0xB0, 0x92, 0xD6, 0xE5, 0xF1, 0xE4, 0xB0, 0x93, /* 0xB0-0xB3 */
+	0xF1, 0xE5, 0xB0, 0x94, 0xB0, 0x95, 0xB0, 0x96, /* 0xB4-0xB7 */
+	0xB0, 0x97, 0xB0, 0x98, 0xB0, 0x99, 0xB0, 0x9A, /* 0xB8-0xBB */
+	0xB0, 0x9B, 0xB0, 0x9C, 0xB0, 0x9D, 0xC3, 0xF3, /* 0xBC-0xBF */
+	0xB0, 0x9E, 0xB0, 0x9F, 0xD3, 0xDB, 0xB0, 0xA0, /* 0xC0-0xC3 */
+	0xB1, 0x40, 0xD6, 0xD1, 0xC5, 0xE8, 0xB1, 0x41, /* 0xC4-0xC7 */
+	0xD3, 0xAF, 0xB1, 0x42, 0xD2, 0xE6, 0xB1, 0x43, /* 0xC8-0xCB */
+	0xB1, 0x44, 0xEE, 0xC1, 0xB0, 0xBB, 0xD5, 0xB5, /* 0xCC-0xCF */
+	0xD1, 0xCE, 0xBC, 0xE0, 0xBA, 0xD0, 0xB1, 0x45, /* 0xD0-0xD3 */
+	0xBF, 0xF8, 0xB1, 0x46, 0xB8, 0xC7, 0xB5, 0xC1, /* 0xD4-0xD7 */
+	0xC5, 0xCC, 0xB1, 0x47, 0xB1, 0x48, 0xCA, 0xA2, /* 0xD8-0xDB */
+	0xB1, 0x49, 0xB1, 0x4A, 0xB1, 0x4B, 0xC3, 0xCB, /* 0xDC-0xDF */
+	0xB1, 0x4C, 0xB1, 0x4D, 0xB1, 0x4E, 0xB1, 0x4F, /* 0xE0-0xE3 */
+	0xB1, 0x50, 0xEE, 0xC2, 0xB1, 0x51, 0xB1, 0x52, /* 0xE4-0xE7 */
+	0xB1, 0x53, 0xB1, 0x54, 0xB1, 0x55, 0xB1, 0x56, /* 0xE8-0xEB */
+	0xB1, 0x57, 0xB1, 0x58, 0xC4, 0xBF, 0xB6, 0xA2, /* 0xEC-0xEF */
+	0xB1, 0x59, 0xED, 0xEC, 0xC3, 0xA4, 0xB1, 0x5A, /* 0xF0-0xF3 */
+	0xD6, 0xB1, 0xB1, 0x5B, 0xB1, 0x5C, 0xB1, 0x5D, /* 0xF4-0xF7 */
+	0xCF, 0xE0, 0xED, 0xEF, 0xB1, 0x5E, 0xB1, 0x5F, /* 0xF8-0xFB */
+	0xC5, 0xCE, 0xB1, 0x60, 0xB6, 0xDC, 0xB1, 0x61, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_77[512] = {
+	0xB1, 0x62, 0xCA, 0xA1, 0xB1, 0x63, 0xB1, 0x64, /* 0x00-0x03 */
+	0xED, 0xED, 0xB1, 0x65, 0xB1, 0x66, 0xED, 0xF0, /* 0x04-0x07 */
+	0xED, 0xF1, 0xC3, 0xBC, 0xB1, 0x67, 0xBF, 0xB4, /* 0x08-0x0B */
+	0xB1, 0x68, 0xED, 0xEE, 0xB1, 0x69, 0xB1, 0x6A, /* 0x0C-0x0F */
+	0xB1, 0x6B, 0xB1, 0x6C, 0xB1, 0x6D, 0xB1, 0x6E, /* 0x10-0x13 */
+	0xB1, 0x6F, 0xB1, 0x70, 0xB1, 0x71, 0xB1, 0x72, /* 0x14-0x17 */
+	0xB1, 0x73, 0xED, 0xF4, 0xED, 0xF2, 0xB1, 0x74, /* 0x18-0x1B */
+	0xB1, 0x75, 0xB1, 0x76, 0xB1, 0x77, 0xD5, 0xE6, /* 0x1C-0x1F */
+	0xC3, 0xDF, 0xB1, 0x78, 0xED, 0xF3, 0xB1, 0x79, /* 0x20-0x23 */
+	0xB1, 0x7A, 0xB1, 0x7B, 0xED, 0xF6, 0xB1, 0x7C, /* 0x24-0x27 */
+	0xD5, 0xA3, 0xD1, 0xA3, 0xB1, 0x7D, 0xB1, 0x7E, /* 0x28-0x2B */
+	0xB1, 0x80, 0xED, 0xF5, 0xB1, 0x81, 0xC3, 0xD0, /* 0x2C-0x2F */
+	0xB1, 0x82, 0xB1, 0x83, 0xB1, 0x84, 0xB1, 0x85, /* 0x30-0x33 */
+	0xB1, 0x86, 0xED, 0xF7, 0xBF, 0xF4, 0xBE, 0xEC, /* 0x34-0x37 */
+	0xED, 0xF8, 0xB1, 0x87, 0xCC, 0xF7, 0xB1, 0x88, /* 0x38-0x3B */
+	0xD1, 0xDB, 0xB1, 0x89, 0xB1, 0x8A, 0xB1, 0x8B, /* 0x3C-0x3F */
+	0xD7, 0xC5, 0xD5, 0xF6, 0xB1, 0x8C, 0xED, 0xFC, /* 0x40-0x43 */
+	0xB1, 0x8D, 0xB1, 0x8E, 0xB1, 0x8F, 0xED, 0xFB, /* 0x44-0x47 */
+	0xB1, 0x90, 0xB1, 0x91, 0xB1, 0x92, 0xB1, 0x93, /* 0x48-0x4B */
+	0xB1, 0x94, 0xB1, 0x95, 0xB1, 0x96, 0xB1, 0x97, /* 0x4C-0x4F */
+	0xED, 0xF9, 0xED, 0xFA, 0xB1, 0x98, 0xB1, 0x99, /* 0x50-0x53 */
+	0xB1, 0x9A, 0xB1, 0x9B, 0xB1, 0x9C, 0xB1, 0x9D, /* 0x54-0x57 */
+	0xB1, 0x9E, 0xB1, 0x9F, 0xED, 0xFD, 0xBE, 0xA6, /* 0x58-0x5B */
+	0xB1, 0xA0, 0xB2, 0x40, 0xB2, 0x41, 0xB2, 0x42, /* 0x5C-0x5F */
+	0xB2, 0x43, 0xCB, 0xAF, 0xEE, 0xA1, 0xB6, 0xBD, /* 0x60-0x63 */
+	0xB2, 0x44, 0xEE, 0xA2, 0xC4, 0xC0, 0xB2, 0x45, /* 0x64-0x67 */
+	0xED, 0xFE, 0xB2, 0x46, 0xB2, 0x47, 0xBD, 0xDE, /* 0x68-0x6B */
+	0xB2, 0xC7, 0xB2, 0x48, 0xB2, 0x49, 0xB2, 0x4A, /* 0x6C-0x6F */
+	0xB2, 0x4B, 0xB2, 0x4C, 0xB2, 0x4D, 0xB2, 0x4E, /* 0x70-0x73 */
+	0xB2, 0x4F, 0xB2, 0x50, 0xB2, 0x51, 0xB2, 0x52, /* 0x74-0x77 */
+	0xB2, 0x53, 0xB6, 0xC3, 0xB2, 0x54, 0xB2, 0x55, /* 0x78-0x7B */
+	0xB2, 0x56, 0xEE, 0xA5, 0xD8, 0xBA, 0xEE, 0xA3, /* 0x7C-0x7F */
+	
+	0xEE, 0xA6, 0xB2, 0x57, 0xB2, 0x58, 0xB2, 0x59, /* 0x80-0x83 */
+	0xC3, 0xE9, 0xB3, 0xF2, 0xB2, 0x5A, 0xB2, 0x5B, /* 0x84-0x87 */
+	0xB2, 0x5C, 0xB2, 0x5D, 0xB2, 0x5E, 0xB2, 0x5F, /* 0x88-0x8B */
+	0xEE, 0xA7, 0xEE, 0xA4, 0xCF, 0xB9, 0xB2, 0x60, /* 0x8C-0x8F */
+	0xB2, 0x61, 0xEE, 0xA8, 0xC2, 0xF7, 0xB2, 0x62, /* 0x90-0x93 */
+	0xB2, 0x63, 0xB2, 0x64, 0xB2, 0x65, 0xB2, 0x66, /* 0x94-0x97 */
+	0xB2, 0x67, 0xB2, 0x68, 0xB2, 0x69, 0xB2, 0x6A, /* 0x98-0x9B */
+	0xB2, 0x6B, 0xB2, 0x6C, 0xB2, 0x6D, 0xEE, 0xA9, /* 0x9C-0x9F */
+	0xEE, 0xAA, 0xB2, 0x6E, 0xDE, 0xAB, 0xB2, 0x6F, /* 0xA0-0xA3 */
+	0xB2, 0x70, 0xC6, 0xB3, 0xB2, 0x71, 0xC7, 0xC6, /* 0xA4-0xA7 */
+	0xB2, 0x72, 0xD6, 0xF5, 0xB5, 0xC9, 0xB2, 0x73, /* 0xA8-0xAB */
+	0xCB, 0xB2, 0xB2, 0x74, 0xB2, 0x75, 0xB2, 0x76, /* 0xAC-0xAF */
+	0xEE, 0xAB, 0xB2, 0x77, 0xB2, 0x78, 0xCD, 0xAB, /* 0xB0-0xB3 */
+	0xB2, 0x79, 0xEE, 0xAC, 0xB2, 0x7A, 0xB2, 0x7B, /* 0xB4-0xB7 */
+	0xB2, 0x7C, 0xB2, 0x7D, 0xB2, 0x7E, 0xD5, 0xB0, /* 0xB8-0xBB */
+	0xB2, 0x80, 0xEE, 0xAD, 0xB2, 0x81, 0xF6, 0xC4, /* 0xBC-0xBF */
+	0xB2, 0x82, 0xB2, 0x83, 0xB2, 0x84, 0xB2, 0x85, /* 0xC0-0xC3 */
+	0xB2, 0x86, 0xB2, 0x87, 0xB2, 0x88, 0xB2, 0x89, /* 0xC4-0xC7 */
+	0xB2, 0x8A, 0xB2, 0x8B, 0xB2, 0x8C, 0xB2, 0x8D, /* 0xC8-0xCB */
+	0xB2, 0x8E, 0xDB, 0xC7, 0xB2, 0x8F, 0xB2, 0x90, /* 0xCC-0xCF */
+	0xB2, 0x91, 0xB2, 0x92, 0xB2, 0x93, 0xB2, 0x94, /* 0xD0-0xD3 */
+	0xB2, 0x95, 0xB2, 0x96, 0xB2, 0x97, 0xB4, 0xA3, /* 0xD4-0xD7 */
+	0xB2, 0x98, 0xB2, 0x99, 0xB2, 0x9A, 0xC3, 0xAC, /* 0xD8-0xDB */
+	0xF1, 0xE6, 0xB2, 0x9B, 0xB2, 0x9C, 0xB2, 0x9D, /* 0xDC-0xDF */
+	0xB2, 0x9E, 0xB2, 0x9F, 0xCA, 0xB8, 0xD2, 0xD3, /* 0xE0-0xE3 */
+	0xB2, 0xA0, 0xD6, 0xAA, 0xB3, 0x40, 0xEF, 0xF2, /* 0xE4-0xE7 */
+	0xB3, 0x41, 0xBE, 0xD8, 0xB3, 0x42, 0xBD, 0xC3, /* 0xE8-0xEB */
+	0xEF, 0xF3, 0xB6, 0xCC, 0xB0, 0xAB, 0xB3, 0x43, /* 0xEC-0xEF */
+	0xB3, 0x44, 0xB3, 0x45, 0xB3, 0x46, 0xCA, 0xAF, /* 0xF0-0xF3 */
+	0xB3, 0x47, 0xB3, 0x48, 0xED, 0xB6, 0xB3, 0x49, /* 0xF4-0xF7 */
+	0xED, 0xB7, 0xB3, 0x4A, 0xB3, 0x4B, 0xB3, 0x4C, /* 0xF8-0xFB */
+	0xB3, 0x4D, 0xCE, 0xF9, 0xB7, 0xAF, 0xBF, 0xF3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_78[512] = {
+	0xED, 0xB8, 0xC2, 0xEB, 0xC9, 0xB0, 0xB3, 0x4E, /* 0x00-0x03 */
+	0xB3, 0x4F, 0xB3, 0x50, 0xB3, 0x51, 0xB3, 0x52, /* 0x04-0x07 */
+	0xB3, 0x53, 0xED, 0xB9, 0xB3, 0x54, 0xB3, 0x55, /* 0x08-0x0B */
+	0xC6, 0xF6, 0xBF, 0xB3, 0xB3, 0x56, 0xB3, 0x57, /* 0x0C-0x0F */
+	0xB3, 0x58, 0xED, 0xBC, 0xC5, 0xF8, 0xB3, 0x59, /* 0x10-0x13 */
+	0xD1, 0xD0, 0xB3, 0x5A, 0xD7, 0xA9, 0xED, 0xBA, /* 0x14-0x17 */
+	0xED, 0xBB, 0xB3, 0x5B, 0xD1, 0xE2, 0xB3, 0x5C, /* 0x18-0x1B */
+	0xED, 0xBF, 0xED, 0xC0, 0xB3, 0x5D, 0xED, 0xC4, /* 0x1C-0x1F */
+	0xB3, 0x5E, 0xB3, 0x5F, 0xB3, 0x60, 0xED, 0xC8, /* 0x20-0x23 */
+	0xB3, 0x61, 0xED, 0xC6, 0xED, 0xCE, 0xD5, 0xE8, /* 0x24-0x27 */
+	0xB3, 0x62, 0xED, 0xC9, 0xB3, 0x63, 0xB3, 0x64, /* 0x28-0x2B */
+	0xED, 0xC7, 0xED, 0xBE, 0xB3, 0x65, 0xB3, 0x66, /* 0x2C-0x2F */
+	0xC5, 0xE9, 0xB3, 0x67, 0xB3, 0x68, 0xB3, 0x69, /* 0x30-0x33 */
+	0xC6, 0xC6, 0xB3, 0x6A, 0xB3, 0x6B, 0xC9, 0xE9, /* 0x34-0x37 */
+	0xD4, 0xD2, 0xED, 0xC1, 0xED, 0xC2, 0xED, 0xC3, /* 0x38-0x3B */
+	0xED, 0xC5, 0xB3, 0x6C, 0xC0, 0xF9, 0xB3, 0x6D, /* 0x3C-0x3F */
+	0xB4, 0xA1, 0xB3, 0x6E, 0xB3, 0x6F, 0xB3, 0x70, /* 0x40-0x43 */
+	0xB3, 0x71, 0xB9, 0xE8, 0xB3, 0x72, 0xED, 0xD0, /* 0x44-0x47 */
+	0xB3, 0x73, 0xB3, 0x74, 0xB3, 0x75, 0xB3, 0x76, /* 0x48-0x4B */
+	0xED, 0xD1, 0xB3, 0x77, 0xED, 0xCA, 0xB3, 0x78, /* 0x4C-0x4F */
+	0xED, 0xCF, 0xB3, 0x79, 0xCE, 0xF8, 0xB3, 0x7A, /* 0x50-0x53 */
+	0xB3, 0x7B, 0xCB, 0xB6, 0xED, 0xCC, 0xED, 0xCD, /* 0x54-0x57 */
+	0xB3, 0x7C, 0xB3, 0x7D, 0xB3, 0x7E, 0xB3, 0x80, /* 0x58-0x5B */
+	0xB3, 0x81, 0xCF, 0xF5, 0xB3, 0x82, 0xB3, 0x83, /* 0x5C-0x5F */
+	0xB3, 0x84, 0xB3, 0x85, 0xB3, 0x86, 0xB3, 0x87, /* 0x60-0x63 */
+	0xB3, 0x88, 0xB3, 0x89, 0xB3, 0x8A, 0xB3, 0x8B, /* 0x64-0x67 */
+	0xB3, 0x8C, 0xB3, 0x8D, 0xED, 0xD2, 0xC1, 0xF2, /* 0x68-0x6B */
+	0xD3, 0xB2, 0xED, 0xCB, 0xC8, 0xB7, 0xB3, 0x8E, /* 0x6C-0x6F */
+	0xB3, 0x8F, 0xB3, 0x90, 0xB3, 0x91, 0xB3, 0x92, /* 0x70-0x73 */
+	0xB3, 0x93, 0xB3, 0x94, 0xB3, 0x95, 0xBC, 0xEF, /* 0x74-0x77 */
+	0xB3, 0x96, 0xB3, 0x97, 0xB3, 0x98, 0xB3, 0x99, /* 0x78-0x7B */
+	0xC5, 0xF0, 0xB3, 0x9A, 0xB3, 0x9B, 0xB3, 0x9C, /* 0x7C-0x7F */
+	
+	0xB3, 0x9D, 0xB3, 0x9E, 0xB3, 0x9F, 0xB3, 0xA0, /* 0x80-0x83 */
+	0xB4, 0x40, 0xB4, 0x41, 0xB4, 0x42, 0xED, 0xD6, /* 0x84-0x87 */
+	0xB4, 0x43, 0xB5, 0xEF, 0xB4, 0x44, 0xB4, 0x45, /* 0x88-0x8B */
+	0xC2, 0xB5, 0xB0, 0xAD, 0xCB, 0xE9, 0xB4, 0x46, /* 0x8C-0x8F */
+	0xB4, 0x47, 0xB1, 0xAE, 0xB4, 0x48, 0xED, 0xD4, /* 0x90-0x93 */
+	0xB4, 0x49, 0xB4, 0x4A, 0xB4, 0x4B, 0xCD, 0xEB, /* 0x94-0x97 */
+	0xB5, 0xE2, 0xB4, 0x4C, 0xED, 0xD5, 0xED, 0xD3, /* 0x98-0x9B */
+	0xED, 0xD7, 0xB4, 0x4D, 0xB4, 0x4E, 0xB5, 0xFA, /* 0x9C-0x9F */
+	0xB4, 0x4F, 0xED, 0xD8, 0xB4, 0x50, 0xED, 0xD9, /* 0xA0-0xA3 */
+	0xB4, 0x51, 0xED, 0xDC, 0xB4, 0x52, 0xB1, 0xCC, /* 0xA4-0xA7 */
+	0xB4, 0x53, 0xB4, 0x54, 0xB4, 0x55, 0xB4, 0x56, /* 0xA8-0xAB */
+	0xB4, 0x57, 0xB4, 0x58, 0xB4, 0x59, 0xB4, 0x5A, /* 0xAC-0xAF */
+	0xC5, 0xF6, 0xBC, 0xEE, 0xED, 0xDA, 0xCC, 0xBC, /* 0xB0-0xB3 */
+	0xB2, 0xEA, 0xB4, 0x5B, 0xB4, 0x5C, 0xB4, 0x5D, /* 0xB4-0xB7 */
+	0xB4, 0x5E, 0xED, 0xDB, 0xB4, 0x5F, 0xB4, 0x60, /* 0xB8-0xBB */
+	0xB4, 0x61, 0xB4, 0x62, 0xC4, 0xEB, 0xB4, 0x63, /* 0xBC-0xBF */
+	0xB4, 0x64, 0xB4, 0xC5, 0xB4, 0x65, 0xB4, 0x66, /* 0xC0-0xC3 */
+	0xB4, 0x67, 0xB0, 0xF5, 0xB4, 0x68, 0xB4, 0x69, /* 0xC4-0xC7 */
+	0xB4, 0x6A, 0xED, 0xDF, 0xC0, 0xDA, 0xB4, 0xE8, /* 0xC8-0xCB */
+	0xB4, 0x6B, 0xB4, 0x6C, 0xB4, 0x6D, 0xB4, 0x6E, /* 0xCC-0xCF */
+	0xC5, 0xCD, 0xB4, 0x6F, 0xB4, 0x70, 0xB4, 0x71, /* 0xD0-0xD3 */
+	0xED, 0xDD, 0xBF, 0xC4, 0xB4, 0x72, 0xB4, 0x73, /* 0xD4-0xD7 */
+	0xB4, 0x74, 0xED, 0xDE, 0xB4, 0x75, 0xB4, 0x76, /* 0xD8-0xDB */
+	0xB4, 0x77, 0xB4, 0x78, 0xB4, 0x79, 0xB4, 0x7A, /* 0xDC-0xDF */
+	0xB4, 0x7B, 0xB4, 0x7C, 0xB4, 0x7D, 0xB4, 0x7E, /* 0xE0-0xE3 */
+	0xB4, 0x80, 0xB4, 0x81, 0xB4, 0x82, 0xB4, 0x83, /* 0xE4-0xE7 */
+	0xC4, 0xA5, 0xB4, 0x84, 0xB4, 0x85, 0xB4, 0x86, /* 0xE8-0xEB */
+	0xED, 0xE0, 0xB4, 0x87, 0xB4, 0x88, 0xB4, 0x89, /* 0xEC-0xEF */
+	0xB4, 0x8A, 0xB4, 0x8B, 0xED, 0xE1, 0xB4, 0x8C, /* 0xF0-0xF3 */
+	0xED, 0xE3, 0xB4, 0x8D, 0xB4, 0x8E, 0xC1, 0xD7, /* 0xF4-0xF7 */
+	0xB4, 0x8F, 0xB4, 0x90, 0xBB, 0xC7, 0xB4, 0x91, /* 0xF8-0xFB */
+	0xB4, 0x92, 0xB4, 0x93, 0xB4, 0x94, 0xB4, 0x95, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_79[512] = {
+	0xB4, 0x96, 0xBD, 0xB8, 0xB4, 0x97, 0xB4, 0x98, /* 0x00-0x03 */
+	0xB4, 0x99, 0xED, 0xE2, 0xB4, 0x9A, 0xB4, 0x9B, /* 0x04-0x07 */
+	0xB4, 0x9C, 0xB4, 0x9D, 0xB4, 0x9E, 0xB4, 0x9F, /* 0x08-0x0B */
+	0xB4, 0xA0, 0xB5, 0x40, 0xB5, 0x41, 0xB5, 0x42, /* 0x0C-0x0F */
+	0xB5, 0x43, 0xB5, 0x44, 0xB5, 0x45, 0xED, 0xE4, /* 0x10-0x13 */
+	0xB5, 0x46, 0xB5, 0x47, 0xB5, 0x48, 0xB5, 0x49, /* 0x14-0x17 */
+	0xB5, 0x4A, 0xB5, 0x4B, 0xB5, 0x4C, 0xB5, 0x4D, /* 0x18-0x1B */
+	0xB5, 0x4E, 0xB5, 0x4F, 0xED, 0xE6, 0xB5, 0x50, /* 0x1C-0x1F */
+	0xB5, 0x51, 0xB5, 0x52, 0xB5, 0x53, 0xB5, 0x54, /* 0x20-0x23 */
+	0xED, 0xE5, 0xB5, 0x55, 0xB5, 0x56, 0xB5, 0x57, /* 0x24-0x27 */
+	0xB5, 0x58, 0xB5, 0x59, 0xB5, 0x5A, 0xB5, 0x5B, /* 0x28-0x2B */
+	0xB5, 0x5C, 0xB5, 0x5D, 0xB5, 0x5E, 0xB5, 0x5F, /* 0x2C-0x2F */
+	0xB5, 0x60, 0xB5, 0x61, 0xB5, 0x62, 0xB5, 0x63, /* 0x30-0x33 */
+	0xED, 0xE7, 0xB5, 0x64, 0xB5, 0x65, 0xB5, 0x66, /* 0x34-0x37 */
+	0xB5, 0x67, 0xB5, 0x68, 0xCA, 0xBE, 0xEC, 0xEA, /* 0x38-0x3B */
+	0xC0, 0xF1, 0xB5, 0x69, 0xC9, 0xE7, 0xB5, 0x6A, /* 0x3C-0x3F */
+	0xEC, 0xEB, 0xC6, 0xEE, 0xB5, 0x6B, 0xB5, 0x6C, /* 0x40-0x43 */
+	0xB5, 0x6D, 0xB5, 0x6E, 0xEC, 0xEC, 0xB5, 0x6F, /* 0x44-0x47 */
+	0xC6, 0xED, 0xEC, 0xED, 0xB5, 0x70, 0xB5, 0x71, /* 0x48-0x4B */
+	0xB5, 0x72, 0xB5, 0x73, 0xB5, 0x74, 0xB5, 0x75, /* 0x4C-0x4F */
+	0xB5, 0x76, 0xB5, 0x77, 0xB5, 0x78, 0xEC, 0xF0, /* 0x50-0x53 */
+	0xB5, 0x79, 0xB5, 0x7A, 0xD7, 0xE6, 0xEC, 0xF3, /* 0x54-0x57 */
+	0xB5, 0x7B, 0xB5, 0x7C, 0xEC, 0xF1, 0xEC, 0xEE, /* 0x58-0x5B */
+	0xEC, 0xEF, 0xD7, 0xA3, 0xC9, 0xF1, 0xCB, 0xEE, /* 0x5C-0x5F */
+	0xEC, 0xF4, 0xB5, 0x7D, 0xEC, 0xF2, 0xB5, 0x7E, /* 0x60-0x63 */
+	0xB5, 0x80, 0xCF, 0xE9, 0xB5, 0x81, 0xEC, 0xF6, /* 0x64-0x67 */
+	0xC6, 0xB1, 0xB5, 0x82, 0xB5, 0x83, 0xB5, 0x84, /* 0x68-0x6B */
+	0xB5, 0x85, 0xBC, 0xC0, 0xB5, 0x86, 0xEC, 0xF5, /* 0x6C-0x6F */
+	0xB5, 0x87, 0xB5, 0x88, 0xB5, 0x89, 0xB5, 0x8A, /* 0x70-0x73 */
+	0xB5, 0x8B, 0xB5, 0x8C, 0xB5, 0x8D, 0xB5, 0xBB, /* 0x74-0x77 */
+	0xBB, 0xF6, 0xB5, 0x8E, 0xEC, 0xF7, 0xB5, 0x8F, /* 0x78-0x7B */
+	0xB5, 0x90, 0xB5, 0x91, 0xB5, 0x92, 0xB5, 0x93, /* 0x7C-0x7F */
+	
+	0xD9, 0xF7, 0xBD, 0xFB, 0xB5, 0x94, 0xB5, 0x95, /* 0x80-0x83 */
+	0xC2, 0xBB, 0xEC, 0xF8, 0xB5, 0x96, 0xB5, 0x97, /* 0x84-0x87 */
+	0xB5, 0x98, 0xB5, 0x99, 0xEC, 0xF9, 0xB5, 0x9A, /* 0x88-0x8B */
+	0xB5, 0x9B, 0xB5, 0x9C, 0xB5, 0x9D, 0xB8, 0xA3, /* 0x8C-0x8F */
+	0xB5, 0x9E, 0xB5, 0x9F, 0xB5, 0xA0, 0xB6, 0x40, /* 0x90-0x93 */
+	0xB6, 0x41, 0xB6, 0x42, 0xB6, 0x43, 0xB6, 0x44, /* 0x94-0x97 */
+	0xB6, 0x45, 0xB6, 0x46, 0xEC, 0xFA, 0xB6, 0x47, /* 0x98-0x9B */
+	0xB6, 0x48, 0xB6, 0x49, 0xB6, 0x4A, 0xB6, 0x4B, /* 0x9C-0x9F */
+	0xB6, 0x4C, 0xB6, 0x4D, 0xB6, 0x4E, 0xB6, 0x4F, /* 0xA0-0xA3 */
+	0xB6, 0x50, 0xB6, 0x51, 0xB6, 0x52, 0xEC, 0xFB, /* 0xA4-0xA7 */
+	0xB6, 0x53, 0xB6, 0x54, 0xB6, 0x55, 0xB6, 0x56, /* 0xA8-0xAB */
+	0xB6, 0x57, 0xB6, 0x58, 0xB6, 0x59, 0xB6, 0x5A, /* 0xAC-0xAF */
+	0xB6, 0x5B, 0xB6, 0x5C, 0xB6, 0x5D, 0xEC, 0xFC, /* 0xB0-0xB3 */
+	0xB6, 0x5E, 0xB6, 0x5F, 0xB6, 0x60, 0xB6, 0x61, /* 0xB4-0xB7 */
+	0xB6, 0x62, 0xD3, 0xED, 0xD8, 0xAE, 0xC0, 0xEB, /* 0xB8-0xBB */
+	0xB6, 0x63, 0xC7, 0xDD, 0xBA, 0xCC, 0xB6, 0x64, /* 0xBC-0xBF */
+	0xD0, 0xE3, 0xCB, 0xBD, 0xB6, 0x65, 0xCD, 0xBA, /* 0xC0-0xC3 */
+	0xB6, 0x66, 0xB6, 0x67, 0xB8, 0xD1, 0xB6, 0x68, /* 0xC4-0xC7 */
+	0xB6, 0x69, 0xB1, 0xFC, 0xB6, 0x6A, 0xC7, 0xEF, /* 0xC8-0xCB */
+	0xB6, 0x6B, 0xD6, 0xD6, 0xB6, 0x6C, 0xB6, 0x6D, /* 0xCC-0xCF */
+	0xB6, 0x6E, 0xBF, 0xC6, 0xC3, 0xEB, 0xB6, 0x6F, /* 0xD0-0xD3 */
+	0xB6, 0x70, 0xEF, 0xF5, 0xB6, 0x71, 0xB6, 0x72, /* 0xD4-0xD7 */
+	0xC3, 0xD8, 0xB6, 0x73, 0xB6, 0x74, 0xB6, 0x75, /* 0xD8-0xDB */
+	0xB6, 0x76, 0xB6, 0x77, 0xB6, 0x78, 0xD7, 0xE2, /* 0xDC-0xDF */
+	0xB6, 0x79, 0xB6, 0x7A, 0xB6, 0x7B, 0xEF, 0xF7, /* 0xE0-0xE3 */
+	0xB3, 0xD3, 0xB6, 0x7C, 0xC7, 0xD8, 0xD1, 0xED, /* 0xE4-0xE7 */
+	0xB6, 0x7D, 0xD6, 0xC8, 0xB6, 0x7E, 0xEF, 0xF8, /* 0xE8-0xEB */
+	0xB6, 0x80, 0xEF, 0xF6, 0xB6, 0x81, 0xBB, 0xFD, /* 0xEC-0xEF */
+	0xB3, 0xC6, 0xB6, 0x82, 0xB6, 0x83, 0xB6, 0x84, /* 0xF0-0xF3 */
+	0xB6, 0x85, 0xB6, 0x86, 0xB6, 0x87, 0xB6, 0x88, /* 0xF4-0xF7 */
+	0xBD, 0xD5, 0xB6, 0x89, 0xB6, 0x8A, 0xD2, 0xC6, /* 0xF8-0xFB */
+	0xB6, 0x8B, 0xBB, 0xE0, 0xB6, 0x8C, 0xB6, 0x8D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7A[512] = {
+	0xCF, 0xA1, 0xB6, 0x8E, 0xEF, 0xFC, 0xEF, 0xFB, /* 0x00-0x03 */
+	0xB6, 0x8F, 0xB6, 0x90, 0xEF, 0xF9, 0xB6, 0x91, /* 0x04-0x07 */
+	0xB6, 0x92, 0xB6, 0x93, 0xB6, 0x94, 0xB3, 0xCC, /* 0x08-0x0B */
+	0xB6, 0x95, 0xC9, 0xD4, 0xCB, 0xB0, 0xB6, 0x96, /* 0x0C-0x0F */
+	0xB6, 0x97, 0xB6, 0x98, 0xB6, 0x99, 0xB6, 0x9A, /* 0x10-0x13 */
+	0xEF, 0xFE, 0xB6, 0x9B, 0xB6, 0x9C, 0xB0, 0xDE, /* 0x14-0x17 */
+	0xB6, 0x9D, 0xB6, 0x9E, 0xD6, 0xC9, 0xB6, 0x9F, /* 0x18-0x1B */
+	0xB6, 0xA0, 0xB7, 0x40, 0xEF, 0xFD, 0xB7, 0x41, /* 0x1C-0x1F */
+	0xB3, 0xED, 0xB7, 0x42, 0xB7, 0x43, 0xF6, 0xD5, /* 0x20-0x23 */
+	0xB7, 0x44, 0xB7, 0x45, 0xB7, 0x46, 0xB7, 0x47, /* 0x24-0x27 */
+	0xB7, 0x48, 0xB7, 0x49, 0xB7, 0x4A, 0xB7, 0x4B, /* 0x28-0x2B */
+	0xB7, 0x4C, 0xB7, 0x4D, 0xB7, 0x4E, 0xB7, 0x4F, /* 0x2C-0x2F */
+	0xB7, 0x50, 0xB7, 0x51, 0xB7, 0x52, 0xCE, 0xC8, /* 0x30-0x33 */
+	0xB7, 0x53, 0xB7, 0x54, 0xB7, 0x55, 0xF0, 0xA2, /* 0x34-0x37 */
+	0xB7, 0x56, 0xF0, 0xA1, 0xB7, 0x57, 0xB5, 0xBE, /* 0x38-0x3B */
+	0xBC, 0xDA, 0xBB, 0xFC, 0xB7, 0x58, 0xB8, 0xE5, /* 0x3C-0x3F */
+	0xB7, 0x59, 0xB7, 0x5A, 0xB7, 0x5B, 0xB7, 0x5C, /* 0x40-0x43 */
+	0xB7, 0x5D, 0xB7, 0x5E, 0xC4, 0xC2, 0xB7, 0x5F, /* 0x44-0x47 */
+	0xB7, 0x60, 0xB7, 0x61, 0xB7, 0x62, 0xB7, 0x63, /* 0x48-0x4B */
+	0xB7, 0x64, 0xB7, 0x65, 0xB7, 0x66, 0xB7, 0x67, /* 0x4C-0x4F */
+	0xB7, 0x68, 0xF0, 0xA3, 0xB7, 0x69, 0xB7, 0x6A, /* 0x50-0x53 */
+	0xB7, 0x6B, 0xB7, 0x6C, 0xB7, 0x6D, 0xCB, 0xEB, /* 0x54-0x57 */
+	0xB7, 0x6E, 0xB7, 0x6F, 0xB7, 0x70, 0xB7, 0x71, /* 0x58-0x5B */
+	0xB7, 0x72, 0xB7, 0x73, 0xB7, 0x74, 0xB7, 0x75, /* 0x5C-0x5F */
+	0xB7, 0x76, 0xB7, 0x77, 0xB7, 0x78, 0xB7, 0x79, /* 0x60-0x63 */
+	0xB7, 0x7A, 0xB7, 0x7B, 0xB7, 0x7C, 0xB7, 0x7D, /* 0x64-0x67 */
+	0xB7, 0x7E, 0xB7, 0x80, 0xB7, 0x81, 0xB7, 0x82, /* 0x68-0x6B */
+	0xB7, 0x83, 0xB7, 0x84, 0xB7, 0x85, 0xB7, 0x86, /* 0x6C-0x6F */
+	0xF0, 0xA6, 0xB7, 0x87, 0xB7, 0x88, 0xB7, 0x89, /* 0x70-0x73 */
+	0xD1, 0xA8, 0xB7, 0x8A, 0xBE, 0xBF, 0xC7, 0xEE, /* 0x74-0x77 */
+	0xF1, 0xB6, 0xF1, 0xB7, 0xBF, 0xD5, 0xB7, 0x8B, /* 0x78-0x7B */
+	0xB7, 0x8C, 0xB7, 0x8D, 0xB7, 0x8E, 0xB4, 0xA9, /* 0x7C-0x7F */
+	
+	0xF1, 0xB8, 0xCD, 0xBB, 0xB7, 0x8F, 0xC7, 0xD4, /* 0x80-0x83 */
+	0xD5, 0xAD, 0xB7, 0x90, 0xF1, 0xB9, 0xB7, 0x91, /* 0x84-0x87 */
+	0xF1, 0xBA, 0xB7, 0x92, 0xB7, 0x93, 0xB7, 0x94, /* 0x88-0x8B */
+	0xB7, 0x95, 0xC7, 0xCF, 0xB7, 0x96, 0xB7, 0x97, /* 0x8C-0x8F */
+	0xB7, 0x98, 0xD2, 0xA4, 0xD6, 0xCF, 0xB7, 0x99, /* 0x90-0x93 */
+	0xB7, 0x9A, 0xF1, 0xBB, 0xBD, 0xD1, 0xB4, 0xB0, /* 0x94-0x97 */
+	0xBE, 0xBD, 0xB7, 0x9B, 0xB7, 0x9C, 0xB7, 0x9D, /* 0x98-0x9B */
+	0xB4, 0xDC, 0xCE, 0xD1, 0xB7, 0x9E, 0xBF, 0xDF, /* 0x9C-0x9F */
+	0xF1, 0xBD, 0xB7, 0x9F, 0xB7, 0xA0, 0xB8, 0x40, /* 0xA0-0xA3 */
+	0xB8, 0x41, 0xBF, 0xFA, 0xF1, 0xBC, 0xB8, 0x42, /* 0xA4-0xA7 */
+	0xF1, 0xBF, 0xB8, 0x43, 0xB8, 0x44, 0xB8, 0x45, /* 0xA8-0xAB */
+	0xF1, 0xBE, 0xF1, 0xC0, 0xB8, 0x46, 0xB8, 0x47, /* 0xAC-0xAF */
+	0xB8, 0x48, 0xB8, 0x49, 0xB8, 0x4A, 0xF1, 0xC1, /* 0xB0-0xB3 */
+	0xB8, 0x4B, 0xB8, 0x4C, 0xB8, 0x4D, 0xB8, 0x4E, /* 0xB4-0xB7 */
+	0xB8, 0x4F, 0xB8, 0x50, 0xB8, 0x51, 0xB8, 0x52, /* 0xB8-0xBB */
+	0xB8, 0x53, 0xB8, 0x54, 0xB8, 0x55, 0xC1, 0xFE, /* 0xBC-0xBF */
+	0xB8, 0x56, 0xB8, 0x57, 0xB8, 0x58, 0xB8, 0x59, /* 0xC0-0xC3 */
+	0xB8, 0x5A, 0xB8, 0x5B, 0xB8, 0x5C, 0xB8, 0x5D, /* 0xC4-0xC7 */
+	0xB8, 0x5E, 0xB8, 0x5F, 0xB8, 0x60, 0xC1, 0xA2, /* 0xC8-0xCB */
+	0xB8, 0x61, 0xB8, 0x62, 0xB8, 0x63, 0xB8, 0x64, /* 0xCC-0xCF */
+	0xB8, 0x65, 0xB8, 0x66, 0xB8, 0x67, 0xB8, 0x68, /* 0xD0-0xD3 */
+	0xB8, 0x69, 0xB8, 0x6A, 0xCA, 0xFA, 0xB8, 0x6B, /* 0xD4-0xD7 */
+	0xB8, 0x6C, 0xD5, 0xBE, 0xB8, 0x6D, 0xB8, 0x6E, /* 0xD8-0xDB */
+	0xB8, 0x6F, 0xB8, 0x70, 0xBE, 0xBA, 0xBE, 0xB9, /* 0xDC-0xDF */
+	0xD5, 0xC2, 0xB8, 0x71, 0xB8, 0x72, 0xBF, 0xA2, /* 0xE0-0xE3 */
+	0xB8, 0x73, 0xCD, 0xAF, 0xF1, 0xB5, 0xB8, 0x74, /* 0xE4-0xE7 */
+	0xB8, 0x75, 0xB8, 0x76, 0xB8, 0x77, 0xB8, 0x78, /* 0xE8-0xEB */
+	0xB8, 0x79, 0xBD, 0xDF, 0xB8, 0x7A, 0xB6, 0xCB, /* 0xEC-0xEF */
+	0xB8, 0x7B, 0xB8, 0x7C, 0xB8, 0x7D, 0xB8, 0x7E, /* 0xF0-0xF3 */
+	0xB8, 0x80, 0xB8, 0x81, 0xB8, 0x82, 0xB8, 0x83, /* 0xF4-0xF7 */
+	0xB8, 0x84, 0xD6, 0xF1, 0xF3, 0xC3, 0xB8, 0x85, /* 0xF8-0xFB */
+	0xB8, 0x86, 0xF3, 0xC4, 0xB8, 0x87, 0xB8, 0xCD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7B[512] = {
+	0xB8, 0x88, 0xB8, 0x89, 0xB8, 0x8A, 0xF3, 0xC6, /* 0x00-0x03 */
+	0xF3, 0xC7, 0xB8, 0x8B, 0xB0, 0xCA, 0xB8, 0x8C, /* 0x04-0x07 */
+	0xF3, 0xC5, 0xB8, 0x8D, 0xF3, 0xC9, 0xCB, 0xF1, /* 0x08-0x0B */
+	0xB8, 0x8E, 0xB8, 0x8F, 0xB8, 0x90, 0xF3, 0xCB, /* 0x0C-0x0F */
+	0xB8, 0x91, 0xD0, 0xA6, 0xB8, 0x92, 0xB8, 0x93, /* 0x10-0x13 */
+	0xB1, 0xCA, 0xF3, 0xC8, 0xB8, 0x94, 0xB8, 0x95, /* 0x14-0x17 */
+	0xB8, 0x96, 0xF3, 0xCF, 0xB8, 0x97, 0xB5, 0xD1, /* 0x18-0x1B */
+	0xB8, 0x98, 0xB8, 0x99, 0xF3, 0xD7, 0xB8, 0x9A, /* 0x1C-0x1F */
+	0xF3, 0xD2, 0xB8, 0x9B, 0xB8, 0x9C, 0xB8, 0x9D, /* 0x20-0x23 */
+	0xF3, 0xD4, 0xF3, 0xD3, 0xB7, 0xFB, 0xB8, 0x9E, /* 0x24-0x27 */
+	0xB1, 0xBF, 0xB8, 0x9F, 0xF3, 0xCE, 0xF3, 0xCA, /* 0x28-0x2B */
+	0xB5, 0xDA, 0xB8, 0xA0, 0xF3, 0xD0, 0xB9, 0x40, /* 0x2C-0x2F */
+	0xB9, 0x41, 0xF3, 0xD1, 0xB9, 0x42, 0xF3, 0xD5, /* 0x30-0x33 */
+	0xB9, 0x43, 0xB9, 0x44, 0xB9, 0x45, 0xB9, 0x46, /* 0x34-0x37 */
+	0xF3, 0xCD, 0xB9, 0x47, 0xBC, 0xE3, 0xB9, 0x48, /* 0x38-0x3B */
+	0xC1, 0xFD, 0xB9, 0x49, 0xF3, 0xD6, 0xB9, 0x4A, /* 0x3C-0x3F */
+	0xB9, 0x4B, 0xB9, 0x4C, 0xB9, 0x4D, 0xB9, 0x4E, /* 0x40-0x43 */
+	0xB9, 0x4F, 0xF3, 0xDA, 0xB9, 0x50, 0xF3, 0xCC, /* 0x44-0x47 */
+	0xB9, 0x51, 0xB5, 0xC8, 0xB9, 0x52, 0xBD, 0xEE, /* 0x48-0x4B */
+	0xF3, 0xDC, 0xB9, 0x53, 0xB9, 0x54, 0xB7, 0xA4, /* 0x4C-0x4F */
+	0xBF, 0xF0, 0xD6, 0xFE, 0xCD, 0xB2, 0xB9, 0x55, /* 0x50-0x53 */
+	0xB4, 0xF0, 0xB9, 0x56, 0xB2, 0xDF, 0xB9, 0x57, /* 0x54-0x57 */
+	0xF3, 0xD8, 0xB9, 0x58, 0xF3, 0xD9, 0xC9, 0xB8, /* 0x58-0x5B */
+	0xB9, 0x59, 0xF3, 0xDD, 0xB9, 0x5A, 0xB9, 0x5B, /* 0x5C-0x5F */
+	0xF3, 0xDE, 0xB9, 0x5C, 0xF3, 0xE1, 0xB9, 0x5D, /* 0x60-0x63 */
+	0xB9, 0x5E, 0xB9, 0x5F, 0xB9, 0x60, 0xB9, 0x61, /* 0x64-0x67 */
+	0xB9, 0x62, 0xB9, 0x63, 0xB9, 0x64, 0xB9, 0x65, /* 0x68-0x6B */
+	0xB9, 0x66, 0xB9, 0x67, 0xF3, 0xDF, 0xB9, 0x68, /* 0x6C-0x6F */
+	0xB9, 0x69, 0xF3, 0xE3, 0xF3, 0xE2, 0xB9, 0x6A, /* 0x70-0x73 */
+	0xB9, 0x6B, 0xF3, 0xDB, 0xB9, 0x6C, 0xBF, 0xEA, /* 0x74-0x77 */
+	0xB9, 0x6D, 0xB3, 0xEF, 0xB9, 0x6E, 0xF3, 0xE0, /* 0x78-0x7B */
+	0xB9, 0x6F, 0xB9, 0x70, 0xC7, 0xA9, 0xB9, 0x71, /* 0x7C-0x7F */
+	
+	0xBC, 0xF2, 0xB9, 0x72, 0xB9, 0x73, 0xB9, 0x74, /* 0x80-0x83 */
+	0xB9, 0x75, 0xF3, 0xEB, 0xB9, 0x76, 0xB9, 0x77, /* 0x84-0x87 */
+	0xB9, 0x78, 0xB9, 0x79, 0xB9, 0x7A, 0xB9, 0x7B, /* 0x88-0x8B */
+	0xB9, 0x7C, 0xB9, 0xBF, 0xB9, 0x7D, 0xB9, 0x7E, /* 0x8C-0x8F */
+	0xF3, 0xE4, 0xB9, 0x80, 0xB9, 0x81, 0xB9, 0x82, /* 0x90-0x93 */
+	0xB2, 0xAD, 0xBB, 0xFE, 0xB9, 0x83, 0xCB, 0xE3, /* 0x94-0x97 */
+	0xB9, 0x84, 0xB9, 0x85, 0xB9, 0x86, 0xB9, 0x87, /* 0x98-0x9B */
+	0xF3, 0xED, 0xF3, 0xE9, 0xB9, 0x88, 0xB9, 0x89, /* 0x9C-0x9F */
+	0xB9, 0x8A, 0xB9, 0xDC, 0xF3, 0xEE, 0xB9, 0x8B, /* 0xA0-0xA3 */
+	0xB9, 0x8C, 0xB9, 0x8D, 0xF3, 0xE5, 0xF3, 0xE6, /* 0xA4-0xA7 */
+	0xF3, 0xEA, 0xC2, 0xE1, 0xF3, 0xEC, 0xF3, 0xEF, /* 0xA8-0xAB */
+	0xF3, 0xE8, 0xBC, 0xFD, 0xB9, 0x8E, 0xB9, 0x8F, /* 0xAC-0xAF */
+	0xB9, 0x90, 0xCF, 0xE4, 0xB9, 0x91, 0xB9, 0x92, /* 0xB0-0xB3 */
+	0xF3, 0xF0, 0xB9, 0x93, 0xB9, 0x94, 0xB9, 0x95, /* 0xB4-0xB7 */
+	0xF3, 0xE7, 0xB9, 0x96, 0xB9, 0x97, 0xB9, 0x98, /* 0xB8-0xBB */
+	0xB9, 0x99, 0xB9, 0x9A, 0xB9, 0x9B, 0xB9, 0x9C, /* 0xBC-0xBF */
+	0xB9, 0x9D, 0xF3, 0xF2, 0xB9, 0x9E, 0xB9, 0x9F, /* 0xC0-0xC3 */
+	0xB9, 0xA0, 0xBA, 0x40, 0xD7, 0xAD, 0xC6, 0xAA, /* 0xC4-0xC7 */
+	0xBA, 0x41, 0xBA, 0x42, 0xBA, 0x43, 0xBA, 0x44, /* 0xC8-0xCB */
+	0xF3, 0xF3, 0xBA, 0x45, 0xBA, 0x46, 0xBA, 0x47, /* 0xCC-0xCF */
+	0xBA, 0x48, 0xF3, 0xF1, 0xBA, 0x49, 0xC2, 0xA8, /* 0xD0-0xD3 */
+	0xBA, 0x4A, 0xBA, 0x4B, 0xBA, 0x4C, 0xBA, 0x4D, /* 0xD4-0xD7 */
+	0xBA, 0x4E, 0xB8, 0xDD, 0xF3, 0xF5, 0xBA, 0x4F, /* 0xD8-0xDB */
+	0xBA, 0x50, 0xF3, 0xF4, 0xBA, 0x51, 0xBA, 0x52, /* 0xDC-0xDF */
+	0xBA, 0x53, 0xB4, 0xDB, 0xBA, 0x54, 0xBA, 0x55, /* 0xE0-0xE3 */
+	0xBA, 0x56, 0xF3, 0xF6, 0xF3, 0xF7, 0xBA, 0x57, /* 0xE4-0xE7 */
+	0xBA, 0x58, 0xBA, 0x59, 0xF3, 0xF8, 0xBA, 0x5A, /* 0xE8-0xEB */
+	0xBA, 0x5B, 0xBA, 0x5C, 0xC0, 0xBA, 0xBA, 0x5D, /* 0xEC-0xEF */
+	0xBA, 0x5E, 0xC0, 0xE9, 0xBA, 0x5F, 0xBA, 0x60, /* 0xF0-0xF3 */
+	0xBA, 0x61, 0xBA, 0x62, 0xBA, 0x63, 0xC5, 0xF1, /* 0xF4-0xF7 */
+	0xBA, 0x64, 0xBA, 0x65, 0xBA, 0x66, 0xBA, 0x67, /* 0xF8-0xFB */
+	0xF3, 0xFB, 0xBA, 0x68, 0xF3, 0xFA, 0xBA, 0x69, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7C[512] = {
+	0xBA, 0x6A, 0xBA, 0x6B, 0xBA, 0x6C, 0xBA, 0x6D, /* 0x00-0x03 */
+	0xBA, 0x6E, 0xBA, 0x6F, 0xBA, 0x70, 0xB4, 0xD8, /* 0x04-0x07 */
+	0xBA, 0x71, 0xBA, 0x72, 0xBA, 0x73, 0xF3, 0xFE, /* 0x08-0x0B */
+	0xF3, 0xF9, 0xBA, 0x74, 0xBA, 0x75, 0xF3, 0xFC, /* 0x0C-0x0F */
+	0xBA, 0x76, 0xBA, 0x77, 0xBA, 0x78, 0xBA, 0x79, /* 0x10-0x13 */
+	0xBA, 0x7A, 0xBA, 0x7B, 0xF3, 0xFD, 0xBA, 0x7C, /* 0x14-0x17 */
+	0xBA, 0x7D, 0xBA, 0x7E, 0xBA, 0x80, 0xBA, 0x81, /* 0x18-0x1B */
+	0xBA, 0x82, 0xBA, 0x83, 0xBA, 0x84, 0xF4, 0xA1, /* 0x1C-0x1F */
+	0xBA, 0x85, 0xBA, 0x86, 0xBA, 0x87, 0xBA, 0x88, /* 0x20-0x23 */
+	0xBA, 0x89, 0xBA, 0x8A, 0xF4, 0xA3, 0xBB, 0xC9, /* 0x24-0x27 */
+	0xBA, 0x8B, 0xBA, 0x8C, 0xF4, 0xA2, 0xBA, 0x8D, /* 0x28-0x2B */
+	0xBA, 0x8E, 0xBA, 0x8F, 0xBA, 0x90, 0xBA, 0x91, /* 0x2C-0x2F */
+	0xBA, 0x92, 0xBA, 0x93, 0xBA, 0x94, 0xBA, 0x95, /* 0x30-0x33 */
+	0xBA, 0x96, 0xBA, 0x97, 0xBA, 0x98, 0xBA, 0x99, /* 0x34-0x37 */
+	0xF4, 0xA4, 0xBA, 0x9A, 0xBA, 0x9B, 0xBA, 0x9C, /* 0x38-0x3B */
+	0xBA, 0x9D, 0xBA, 0x9E, 0xBA, 0x9F, 0xB2, 0xBE, /* 0x3C-0x3F */
+	0xF4, 0xA6, 0xF4, 0xA5, 0xBA, 0xA0, 0xBB, 0x40, /* 0x40-0x43 */
+	0xBB, 0x41, 0xBB, 0x42, 0xBB, 0x43, 0xBB, 0x44, /* 0x44-0x47 */
+	0xBB, 0x45, 0xBB, 0x46, 0xBB, 0x47, 0xBB, 0x48, /* 0x48-0x4B */
+	0xBB, 0x49, 0xBC, 0xAE, 0xBB, 0x4A, 0xBB, 0x4B, /* 0x4C-0x4F */
+	0xBB, 0x4C, 0xBB, 0x4D, 0xBB, 0x4E, 0xBB, 0x4F, /* 0x50-0x53 */
+	0xBB, 0x50, 0xBB, 0x51, 0xBB, 0x52, 0xBB, 0x53, /* 0x54-0x57 */
+	0xBB, 0x54, 0xBB, 0x55, 0xBB, 0x56, 0xBB, 0x57, /* 0x58-0x5B */
+	0xBB, 0x58, 0xBB, 0x59, 0xBB, 0x5A, 0xBB, 0x5B, /* 0x5C-0x5F */
+	0xBB, 0x5C, 0xBB, 0x5D, 0xBB, 0x5E, 0xBB, 0x5F, /* 0x60-0x63 */
+	0xBB, 0x60, 0xBB, 0x61, 0xBB, 0x62, 0xBB, 0x63, /* 0x64-0x67 */
+	0xBB, 0x64, 0xBB, 0x65, 0xBB, 0x66, 0xBB, 0x67, /* 0x68-0x6B */
+	0xBB, 0x68, 0xBB, 0x69, 0xBB, 0x6A, 0xBB, 0x6B, /* 0x6C-0x6F */
+	0xBB, 0x6C, 0xBB, 0x6D, 0xBB, 0x6E, 0xC3, 0xD7, /* 0x70-0x73 */
+	0xD9, 0xE1, 0xBB, 0x6F, 0xBB, 0x70, 0xBB, 0x71, /* 0x74-0x77 */
+	0xBB, 0x72, 0xBB, 0x73, 0xBB, 0x74, 0xC0, 0xE0, /* 0x78-0x7B */
+	0xF4, 0xCC, 0xD7, 0xD1, 0xBB, 0x75, 0xBB, 0x76, /* 0x7C-0x7F */
+	
+	0xBB, 0x77, 0xBB, 0x78, 0xBB, 0x79, 0xBB, 0x7A, /* 0x80-0x83 */
+	0xBB, 0x7B, 0xBB, 0x7C, 0xBB, 0x7D, 0xBB, 0x7E, /* 0x84-0x87 */
+	0xBB, 0x80, 0xB7, 0xDB, 0xBB, 0x81, 0xBB, 0x82, /* 0x88-0x8B */
+	0xBB, 0x83, 0xBB, 0x84, 0xBB, 0x85, 0xBB, 0x86, /* 0x8C-0x8F */
+	0xBB, 0x87, 0xF4, 0xCE, 0xC1, 0xA3, 0xBB, 0x88, /* 0x90-0x93 */
+	0xBB, 0x89, 0xC6, 0xC9, 0xBB, 0x8A, 0xB4, 0xD6, /* 0x94-0x97 */
+	0xD5, 0xB3, 0xBB, 0x8B, 0xBB, 0x8C, 0xBB, 0x8D, /* 0x98-0x9B */
+	0xF4, 0xD0, 0xF4, 0xCF, 0xF4, 0xD1, 0xCB, 0xDA, /* 0x9C-0x9F */
+	0xBB, 0x8E, 0xBB, 0x8F, 0xF4, 0xD2, 0xBB, 0x90, /* 0xA0-0xA3 */
+	0xD4, 0xC1, 0xD6, 0xE0, 0xBB, 0x91, 0xBB, 0x92, /* 0xA4-0xA7 */
+	0xBB, 0x93, 0xBB, 0x94, 0xB7, 0xE0, 0xBB, 0x95, /* 0xA8-0xAB */
+	0xBB, 0x96, 0xBB, 0x97, 0xC1, 0xB8, 0xBB, 0x98, /* 0xAC-0xAF */
+	0xBB, 0x99, 0xC1, 0xBB, 0xF4, 0xD3, 0xBE, 0xAC, /* 0xB0-0xB3 */
+	0xBB, 0x9A, 0xBB, 0x9B, 0xBB, 0x9C, 0xBB, 0x9D, /* 0xB4-0xB7 */
+	0xBB, 0x9E, 0xB4, 0xE2, 0xBB, 0x9F, 0xBB, 0xA0, /* 0xB8-0xBB */
+	0xF4, 0xD4, 0xF4, 0xD5, 0xBE, 0xAB, 0xBC, 0x40, /* 0xBC-0xBF */
+	0xBC, 0x41, 0xF4, 0xD6, 0xBC, 0x42, 0xBC, 0x43, /* 0xC0-0xC3 */
+	0xBC, 0x44, 0xF4, 0xDB, 0xBC, 0x45, 0xF4, 0xD7, /* 0xC4-0xC7 */
+	0xF4, 0xDA, 0xBC, 0x46, 0xBA, 0xFD, 0xBC, 0x47, /* 0xC8-0xCB */
+	0xF4, 0xD8, 0xF4, 0xD9, 0xBC, 0x48, 0xBC, 0x49, /* 0xCC-0xCF */
+	0xBC, 0x4A, 0xBC, 0x4B, 0xBC, 0x4C, 0xBC, 0x4D, /* 0xD0-0xD3 */
+	0xBC, 0x4E, 0xB8, 0xE2, 0xCC, 0xC7, 0xF4, 0xDC, /* 0xD4-0xD7 */
+	0xBC, 0x4F, 0xB2, 0xDA, 0xBC, 0x50, 0xBC, 0x51, /* 0xD8-0xDB */
+	0xC3, 0xD3, 0xBC, 0x52, 0xBC, 0x53, 0xD4, 0xE3, /* 0xDC-0xDF */
+	0xBF, 0xB7, 0xBC, 0x54, 0xBC, 0x55, 0xBC, 0x56, /* 0xE0-0xE3 */
+	0xBC, 0x57, 0xBC, 0x58, 0xBC, 0x59, 0xBC, 0x5A, /* 0xE4-0xE7 */
+	0xF4, 0xDD, 0xBC, 0x5B, 0xBC, 0x5C, 0xBC, 0x5D, /* 0xE8-0xEB */
+	0xBC, 0x5E, 0xBC, 0x5F, 0xBC, 0x60, 0xC5, 0xB4, /* 0xEC-0xEF */
+	0xBC, 0x61, 0xBC, 0x62, 0xBC, 0x63, 0xBC, 0x64, /* 0xF0-0xF3 */
+	0xBC, 0x65, 0xBC, 0x66, 0xBC, 0x67, 0xBC, 0x68, /* 0xF4-0xF7 */
+	0xF4, 0xE9, 0xBC, 0x69, 0xBC, 0x6A, 0xCF, 0xB5, /* 0xF8-0xFB */
+	0xBC, 0x6B, 0xBC, 0x6C, 0xBC, 0x6D, 0xBC, 0x6E, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7D[512] = {
+	0xBC, 0x6F, 0xBC, 0x70, 0xBC, 0x71, 0xBC, 0x72, /* 0x00-0x03 */
+	0xBC, 0x73, 0xBC, 0x74, 0xBC, 0x75, 0xBC, 0x76, /* 0x04-0x07 */
+	0xBC, 0x77, 0xBC, 0x78, 0xCE, 0xC9, 0xBC, 0x79, /* 0x08-0x0B */
+	0xBC, 0x7A, 0xBC, 0x7B, 0xBC, 0x7C, 0xBC, 0x7D, /* 0x0C-0x0F */
+	0xBC, 0x7E, 0xBC, 0x80, 0xBC, 0x81, 0xBC, 0x82, /* 0x10-0x13 */
+	0xBC, 0x83, 0xBC, 0x84, 0xBC, 0x85, 0xBC, 0x86, /* 0x14-0x17 */
+	0xBC, 0x87, 0xBC, 0x88, 0xBC, 0x89, 0xBC, 0x8A, /* 0x18-0x1B */
+	0xBC, 0x8B, 0xBC, 0x8C, 0xBC, 0x8D, 0xBC, 0x8E, /* 0x1C-0x1F */
+	0xCB, 0xD8, 0xBC, 0x8F, 0xCB, 0xF7, 0xBC, 0x90, /* 0x20-0x23 */
+	0xBC, 0x91, 0xBC, 0x92, 0xBC, 0x93, 0xBD, 0xF4, /* 0x24-0x27 */
+	0xBC, 0x94, 0xBC, 0x95, 0xBC, 0x96, 0xD7, 0xCF, /* 0x28-0x2B */
+	0xBC, 0x97, 0xBC, 0x98, 0xBC, 0x99, 0xC0, 0xDB, /* 0x2C-0x2F */
+	0xBC, 0x9A, 0xBC, 0x9B, 0xBC, 0x9C, 0xBC, 0x9D, /* 0x30-0x33 */
+	0xBC, 0x9E, 0xBC, 0x9F, 0xBC, 0xA0, 0xBD, 0x40, /* 0x34-0x37 */
+	0xBD, 0x41, 0xBD, 0x42, 0xBD, 0x43, 0xBD, 0x44, /* 0x38-0x3B */
+	0xBD, 0x45, 0xBD, 0x46, 0xBD, 0x47, 0xBD, 0x48, /* 0x3C-0x3F */
+	0xBD, 0x49, 0xBD, 0x4A, 0xBD, 0x4B, 0xBD, 0x4C, /* 0x40-0x43 */
+	0xBD, 0x4D, 0xBD, 0x4E, 0xBD, 0x4F, 0xBD, 0x50, /* 0x44-0x47 */
+	0xBD, 0x51, 0xBD, 0x52, 0xBD, 0x53, 0xBD, 0x54, /* 0x48-0x4B */
+	0xBD, 0x55, 0xBD, 0x56, 0xBD, 0x57, 0xBD, 0x58, /* 0x4C-0x4F */
+	0xBD, 0x59, 0xBD, 0x5A, 0xBD, 0x5B, 0xBD, 0x5C, /* 0x50-0x53 */
+	0xBD, 0x5D, 0xBD, 0x5E, 0xBD, 0x5F, 0xBD, 0x60, /* 0x54-0x57 */
+	0xBD, 0x61, 0xBD, 0x62, 0xBD, 0x63, 0xBD, 0x64, /* 0x58-0x5B */
+	0xBD, 0x65, 0xBD, 0x66, 0xBD, 0x67, 0xBD, 0x68, /* 0x5C-0x5F */
+	0xBD, 0x69, 0xBD, 0x6A, 0xBD, 0x6B, 0xBD, 0x6C, /* 0x60-0x63 */
+	0xBD, 0x6D, 0xBD, 0x6E, 0xBD, 0x6F, 0xBD, 0x70, /* 0x64-0x67 */
+	0xBD, 0x71, 0xBD, 0x72, 0xBD, 0x73, 0xBD, 0x74, /* 0x68-0x6B */
+	0xBD, 0x75, 0xBD, 0x76, 0xD0, 0xF5, 0xBD, 0x77, /* 0x6C-0x6F */
+	0xBD, 0x78, 0xBD, 0x79, 0xBD, 0x7A, 0xBD, 0x7B, /* 0x70-0x73 */
+	0xBD, 0x7C, 0xBD, 0x7D, 0xBD, 0x7E, 0xF4, 0xEA, /* 0x74-0x77 */
+	0xBD, 0x80, 0xBD, 0x81, 0xBD, 0x82, 0xBD, 0x83, /* 0x78-0x7B */
+	0xBD, 0x84, 0xBD, 0x85, 0xBD, 0x86, 0xBD, 0x87, /* 0x7C-0x7F */
+	
+	0xBD, 0x88, 0xBD, 0x89, 0xBD, 0x8A, 0xBD, 0x8B, /* 0x80-0x83 */
+	0xBD, 0x8C, 0xBD, 0x8D, 0xBD, 0x8E, 0xBD, 0x8F, /* 0x84-0x87 */
+	0xBD, 0x90, 0xBD, 0x91, 0xBD, 0x92, 0xBD, 0x93, /* 0x88-0x8B */
+	0xBD, 0x94, 0xBD, 0x95, 0xBD, 0x96, 0xBD, 0x97, /* 0x8C-0x8F */
+	0xBD, 0x98, 0xBD, 0x99, 0xBD, 0x9A, 0xBD, 0x9B, /* 0x90-0x93 */
+	0xBD, 0x9C, 0xBD, 0x9D, 0xBD, 0x9E, 0xBD, 0x9F, /* 0x94-0x97 */
+	0xBD, 0xA0, 0xBE, 0x40, 0xBE, 0x41, 0xBE, 0x42, /* 0x98-0x9B */
+	0xBE, 0x43, 0xBE, 0x44, 0xBE, 0x45, 0xBE, 0x46, /* 0x9C-0x9F */
+	0xBE, 0x47, 0xBE, 0x48, 0xBE, 0x49, 0xBE, 0x4A, /* 0xA0-0xA3 */
+	0xBE, 0x4B, 0xBE, 0x4C, 0xF4, 0xEB, 0xBE, 0x4D, /* 0xA4-0xA7 */
+	0xBE, 0x4E, 0xBE, 0x4F, 0xBE, 0x50, 0xBE, 0x51, /* 0xA8-0xAB */
+	0xBE, 0x52, 0xBE, 0x53, 0xF4, 0xEC, 0xBE, 0x54, /* 0xAC-0xAF */
+	0xBE, 0x55, 0xBE, 0x56, 0xBE, 0x57, 0xBE, 0x58, /* 0xB0-0xB3 */
+	0xBE, 0x59, 0xBE, 0x5A, 0xBE, 0x5B, 0xBE, 0x5C, /* 0xB4-0xB7 */
+	0xBE, 0x5D, 0xBE, 0x5E, 0xBE, 0x5F, 0xBE, 0x60, /* 0xB8-0xBB */
+	0xBE, 0x61, 0xBE, 0x62, 0xBE, 0x63, 0xBE, 0x64, /* 0xBC-0xBF */
+	0xBE, 0x65, 0xBE, 0x66, 0xBE, 0x67, 0xBE, 0x68, /* 0xC0-0xC3 */
+	0xBE, 0x69, 0xBE, 0x6A, 0xBE, 0x6B, 0xBE, 0x6C, /* 0xC4-0xC7 */
+	0xBE, 0x6D, 0xBE, 0x6E, 0xBE, 0x6F, 0xBE, 0x70, /* 0xC8-0xCB */
+	0xBE, 0x71, 0xBE, 0x72, 0xBE, 0x73, 0xBE, 0x74, /* 0xCC-0xCF */
+	0xBE, 0x75, 0xBE, 0x76, 0xBE, 0x77, 0xBE, 0x78, /* 0xD0-0xD3 */
+	0xBE, 0x79, 0xBE, 0x7A, 0xBE, 0x7B, 0xBE, 0x7C, /* 0xD4-0xD7 */
+	0xBE, 0x7D, 0xBE, 0x7E, 0xBE, 0x80, 0xBE, 0x81, /* 0xD8-0xDB */
+	0xBE, 0x82, 0xBE, 0x83, 0xBE, 0x84, 0xBE, 0x85, /* 0xDC-0xDF */
+	0xBE, 0x86, 0xBE, 0x87, 0xBE, 0x88, 0xBE, 0x89, /* 0xE0-0xE3 */
+	0xBE, 0x8A, 0xBE, 0x8B, 0xBE, 0x8C, 0xBE, 0x8D, /* 0xE4-0xE7 */
+	0xBE, 0x8E, 0xBE, 0x8F, 0xBE, 0x90, 0xBE, 0x91, /* 0xE8-0xEB */
+	0xBE, 0x92, 0xBE, 0x93, 0xBE, 0x94, 0xBE, 0x95, /* 0xEC-0xEF */
+	0xBE, 0x96, 0xBE, 0x97, 0xBE, 0x98, 0xBE, 0x99, /* 0xF0-0xF3 */
+	0xBE, 0x9A, 0xBE, 0x9B, 0xBE, 0x9C, 0xBE, 0x9D, /* 0xF4-0xF7 */
+	0xBE, 0x9E, 0xBE, 0x9F, 0xBE, 0xA0, 0xBF, 0x40, /* 0xF8-0xFB */
+	0xBF, 0x41, 0xBF, 0x42, 0xBF, 0x43, 0xBF, 0x44, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7E[512] = {
+	0xBF, 0x45, 0xBF, 0x46, 0xBF, 0x47, 0xBF, 0x48, /* 0x00-0x03 */
+	0xBF, 0x49, 0xBF, 0x4A, 0xBF, 0x4B, 0xBF, 0x4C, /* 0x04-0x07 */
+	0xBF, 0x4D, 0xBF, 0x4E, 0xBF, 0x4F, 0xBF, 0x50, /* 0x08-0x0B */
+	0xBF, 0x51, 0xBF, 0x52, 0xBF, 0x53, 0xBF, 0x54, /* 0x0C-0x0F */
+	0xBF, 0x55, 0xBF, 0x56, 0xBF, 0x57, 0xBF, 0x58, /* 0x10-0x13 */
+	0xBF, 0x59, 0xBF, 0x5A, 0xBF, 0x5B, 0xBF, 0x5C, /* 0x14-0x17 */
+	0xBF, 0x5D, 0xBF, 0x5E, 0xBF, 0x5F, 0xBF, 0x60, /* 0x18-0x1B */
+	0xBF, 0x61, 0xBF, 0x62, 0xBF, 0x63, 0xBF, 0x64, /* 0x1C-0x1F */
+	0xBF, 0x65, 0xBF, 0x66, 0xBF, 0x67, 0xBF, 0x68, /* 0x20-0x23 */
+	0xBF, 0x69, 0xBF, 0x6A, 0xBF, 0x6B, 0xBF, 0x6C, /* 0x24-0x27 */
+	0xBF, 0x6D, 0xBF, 0x6E, 0xBF, 0x6F, 0xBF, 0x70, /* 0x28-0x2B */
+	0xBF, 0x71, 0xBF, 0x72, 0xBF, 0x73, 0xBF, 0x74, /* 0x2C-0x2F */
+	0xBF, 0x75, 0xBF, 0x76, 0xBF, 0x77, 0xBF, 0x78, /* 0x30-0x33 */
+	0xBF, 0x79, 0xBF, 0x7A, 0xBF, 0x7B, 0xBF, 0x7C, /* 0x34-0x37 */
+	0xBF, 0x7D, 0xBF, 0x7E, 0xBF, 0x80, 0xF7, 0xE3, /* 0x38-0x3B */
+	0xBF, 0x81, 0xBF, 0x82, 0xBF, 0x83, 0xBF, 0x84, /* 0x3C-0x3F */
+	0xBF, 0x85, 0xB7, 0xB1, 0xBF, 0x86, 0xBF, 0x87, /* 0x40-0x43 */
+	0xBF, 0x88, 0xBF, 0x89, 0xBF, 0x8A, 0xF4, 0xED, /* 0x44-0x47 */
+	0xBF, 0x8B, 0xBF, 0x8C, 0xBF, 0x8D, 0xBF, 0x8E, /* 0x48-0x4B */
+	0xBF, 0x8F, 0xBF, 0x90, 0xBF, 0x91, 0xBF, 0x92, /* 0x4C-0x4F */
+	0xBF, 0x93, 0xBF, 0x94, 0xBF, 0x95, 0xBF, 0x96, /* 0x50-0x53 */
+	0xBF, 0x97, 0xBF, 0x98, 0xBF, 0x99, 0xBF, 0x9A, /* 0x54-0x57 */
+	0xBF, 0x9B, 0xBF, 0x9C, 0xBF, 0x9D, 0xBF, 0x9E, /* 0x58-0x5B */
+	0xBF, 0x9F, 0xBF, 0xA0, 0xC0, 0x40, 0xC0, 0x41, /* 0x5C-0x5F */
+	0xC0, 0x42, 0xC0, 0x43, 0xC0, 0x44, 0xC0, 0x45, /* 0x60-0x63 */
+	0xC0, 0x46, 0xC0, 0x47, 0xC0, 0x48, 0xC0, 0x49, /* 0x64-0x67 */
+	0xC0, 0x4A, 0xC0, 0x4B, 0xC0, 0x4C, 0xC0, 0x4D, /* 0x68-0x6B */
+	0xC0, 0x4E, 0xC0, 0x4F, 0xC0, 0x50, 0xC0, 0x51, /* 0x6C-0x6F */
+	0xC0, 0x52, 0xC0, 0x53, 0xC0, 0x54, 0xC0, 0x55, /* 0x70-0x73 */
+	0xC0, 0x56, 0xC0, 0x57, 0xC0, 0x58, 0xC0, 0x59, /* 0x74-0x77 */
+	0xC0, 0x5A, 0xC0, 0x5B, 0xC0, 0x5C, 0xC0, 0x5D, /* 0x78-0x7B */
+	0xC0, 0x5E, 0xC0, 0x5F, 0xC0, 0x60, 0xC0, 0x61, /* 0x7C-0x7F */
+	
+	0xC0, 0x62, 0xC0, 0x63, 0xD7, 0xEB, 0xC0, 0x64, /* 0x80-0x83 */
+	0xC0, 0x65, 0xC0, 0x66, 0xC0, 0x67, 0xC0, 0x68, /* 0x84-0x87 */
+	0xC0, 0x69, 0xC0, 0x6A, 0xC0, 0x6B, 0xC0, 0x6C, /* 0x88-0x8B */
+	0xC0, 0x6D, 0xC0, 0x6E, 0xC0, 0x6F, 0xC0, 0x70, /* 0x8C-0x8F */
+	0xC0, 0x71, 0xC0, 0x72, 0xC0, 0x73, 0xC0, 0x74, /* 0x90-0x93 */
+	0xC0, 0x75, 0xC0, 0x76, 0xC0, 0x77, 0xC0, 0x78, /* 0x94-0x97 */
+	0xC0, 0x79, 0xC0, 0x7A, 0xC0, 0x7B, 0xF4, 0xEE, /* 0x98-0x9B */
+	0xC0, 0x7C, 0xC0, 0x7D, 0xC0, 0x7E, 0xE6, 0xF9, /* 0x9C-0x9F */
+	0xBE, 0xC0, 0xE6, 0xFA, 0xBA, 0xEC, 0xE6, 0xFB, /* 0xA0-0xA3 */
+	0xCF, 0xCB, 0xE6, 0xFC, 0xD4, 0xBC, 0xBC, 0xB6, /* 0xA4-0xA7 */
+	0xE6, 0xFD, 0xE6, 0xFE, 0xBC, 0xCD, 0xC8, 0xD2, /* 0xA8-0xAB */
+	0xCE, 0xB3, 0xE7, 0xA1, 0xC0, 0x80, 0xB4, 0xBF, /* 0xAC-0xAF */
+	0xE7, 0xA2, 0xC9, 0xB4, 0xB8, 0xD9, 0xC4, 0xC9, /* 0xB0-0xB3 */
+	0xC0, 0x81, 0xD7, 0xDD, 0xC2, 0xDA, 0xB7, 0xD7, /* 0xB4-0xB7 */
+	0xD6, 0xBD, 0xCE, 0xC6, 0xB7, 0xC4, 0xC0, 0x82, /* 0xB8-0xBB */
+	0xC0, 0x83, 0xC5, 0xA6, 0xE7, 0xA3, 0xCF, 0xDF, /* 0xBC-0xBF */
+	0xE7, 0xA4, 0xE7, 0xA5, 0xE7, 0xA6, 0xC1, 0xB7, /* 0xC0-0xC3 */
+	0xD7, 0xE9, 0xC9, 0xF0, 0xCF, 0xB8, 0xD6, 0xAF, /* 0xC4-0xC7 */
+	0xD6, 0xD5, 0xE7, 0xA7, 0xB0, 0xED, 0xE7, 0xA8, /* 0xC8-0xCB */
+	0xE7, 0xA9, 0xC9, 0xDC, 0xD2, 0xEF, 0xBE, 0xAD, /* 0xCC-0xCF */
+	0xE7, 0xAA, 0xB0, 0xF3, 0xC8, 0xDE, 0xBD, 0xE1, /* 0xD0-0xD3 */
+	0xE7, 0xAB, 0xC8, 0xC6, 0xC0, 0x84, 0xE7, 0xAC, /* 0xD4-0xD7 */
+	0xBB, 0xE6, 0xB8, 0xF8, 0xD1, 0xA4, 0xE7, 0xAD, /* 0xD8-0xDB */
+	0xC2, 0xE7, 0xBE, 0xF8, 0xBD, 0xCA, 0xCD, 0xB3, /* 0xDC-0xDF */
+	0xE7, 0xAE, 0xE7, 0xAF, 0xBE, 0xEE, 0xD0, 0xE5, /* 0xE0-0xE3 */
+	0xC0, 0x85, 0xCB, 0xE7, 0xCC, 0xD0, 0xBC, 0xCC, /* 0xE4-0xE7 */
+	0xE7, 0xB0, 0xBC, 0xA8, 0xD0, 0xF7, 0xE7, 0xB1, /* 0xE8-0xEB */
+	0xC0, 0x86, 0xD0, 0xF8, 0xE7, 0xB2, 0xE7, 0xB3, /* 0xEC-0xEF */
+	0xB4, 0xC2, 0xE7, 0xB4, 0xE7, 0xB5, 0xC9, 0xFE, /* 0xF0-0xF3 */
+	0xCE, 0xAC, 0xC3, 0xE0, 0xE7, 0xB7, 0xB1, 0xC1, /* 0xF4-0xF7 */
+	0xB3, 0xF1, 0xC0, 0x87, 0xE7, 0xB8, 0xE7, 0xB9, /* 0xF8-0xFB */
+	0xD7, 0xDB, 0xD5, 0xC0, 0xE7, 0xBA, 0xC2, 0xCC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7F[512] = {
+	0xD7, 0xBA, 0xE7, 0xBB, 0xE7, 0xBC, 0xE7, 0xBD, /* 0x00-0x03 */
+	0xBC, 0xEA, 0xC3, 0xE5, 0xC0, 0xC2, 0xE7, 0xBE, /* 0x04-0x07 */
+	0xE7, 0xBF, 0xBC, 0xA9, 0xC0, 0x88, 0xE7, 0xC0, /* 0x08-0x0B */
+	0xE7, 0xC1, 0xE7, 0xB6, 0xB6, 0xD0, 0xE7, 0xC2, /* 0x0C-0x0F */
+	0xC0, 0x89, 0xE7, 0xC3, 0xE7, 0xC4, 0xBB, 0xBA, /* 0x10-0x13 */
+	0xB5, 0xDE, 0xC2, 0xC6, 0xB1, 0xE0, 0xE7, 0xC5, /* 0x14-0x17 */
+	0xD4, 0xB5, 0xE7, 0xC6, 0xB8, 0xBF, 0xE7, 0xC8, /* 0x18-0x1B */
+	0xE7, 0xC7, 0xB7, 0xEC, 0xC0, 0x8A, 0xE7, 0xC9, /* 0x1C-0x1F */
+	0xB2, 0xF8, 0xE7, 0xCA, 0xE7, 0xCB, 0xE7, 0xCC, /* 0x20-0x23 */
+	0xE7, 0xCD, 0xE7, 0xCE, 0xE7, 0xCF, 0xE7, 0xD0, /* 0x24-0x27 */
+	0xD3, 0xA7, 0xCB, 0xF5, 0xE7, 0xD1, 0xE7, 0xD2, /* 0x28-0x2B */
+	0xE7, 0xD3, 0xE7, 0xD4, 0xC9, 0xC9, 0xE7, 0xD5, /* 0x2C-0x2F */
+	0xE7, 0xD6, 0xE7, 0xD7, 0xE7, 0xD8, 0xE7, 0xD9, /* 0x30-0x33 */
+	0xBD, 0xC9, 0xE7, 0xDA, 0xF3, 0xBE, 0xC0, 0x8B, /* 0x34-0x37 */
+	0xB8, 0xD7, 0xC0, 0x8C, 0xC8, 0xB1, 0xC0, 0x8D, /* 0x38-0x3B */
+	0xC0, 0x8E, 0xC0, 0x8F, 0xC0, 0x90, 0xC0, 0x91, /* 0x3C-0x3F */
+	0xC0, 0x92, 0xC0, 0x93, 0xF3, 0xBF, 0xC0, 0x94, /* 0x40-0x43 */
+	0xF3, 0xC0, 0xF3, 0xC1, 0xC0, 0x95, 0xC0, 0x96, /* 0x44-0x47 */
+	0xC0, 0x97, 0xC0, 0x98, 0xC0, 0x99, 0xC0, 0x9A, /* 0x48-0x4B */
+	0xC0, 0x9B, 0xC0, 0x9C, 0xC0, 0x9D, 0xC0, 0x9E, /* 0x4C-0x4F */
+	0xB9, 0xDE, 0xCD, 0xF8, 0xC0, 0x9F, 0xC0, 0xA0, /* 0x50-0x53 */
+	0xD8, 0xE8, 0xBA, 0xB1, 0xC1, 0x40, 0xC2, 0xDE, /* 0x54-0x57 */
+	0xEE, 0xB7, 0xC1, 0x41, 0xB7, 0xA3, 0xC1, 0x42, /* 0x58-0x5B */
+	0xC1, 0x43, 0xC1, 0x44, 0xC1, 0x45, 0xEE, 0xB9, /* 0x5C-0x5F */
+	0xC1, 0x46, 0xEE, 0xB8, 0xB0, 0xD5, 0xC1, 0x47, /* 0x60-0x63 */
+	0xC1, 0x48, 0xC1, 0x49, 0xC1, 0x4A, 0xC1, 0x4B, /* 0x64-0x67 */
+	0xEE, 0xBB, 0xD5, 0xD6, 0xD7, 0xEF, 0xC1, 0x4C, /* 0x68-0x6B */
+	0xC1, 0x4D, 0xC1, 0x4E, 0xD6, 0xC3, 0xC1, 0x4F, /* 0x6C-0x6F */
+	0xC1, 0x50, 0xEE, 0xBD, 0xCA, 0xF0, 0xC1, 0x51, /* 0x70-0x73 */
+	0xEE, 0xBC, 0xC1, 0x52, 0xC1, 0x53, 0xC1, 0x54, /* 0x74-0x77 */
+	0xC1, 0x55, 0xEE, 0xBE, 0xC1, 0x56, 0xC1, 0x57, /* 0x78-0x7B */
+	0xC1, 0x58, 0xC1, 0x59, 0xEE, 0xC0, 0xC1, 0x5A, /* 0x7C-0x7F */
+	
+	0xC1, 0x5B, 0xEE, 0xBF, 0xC1, 0x5C, 0xC1, 0x5D, /* 0x80-0x83 */
+	0xC1, 0x5E, 0xC1, 0x5F, 0xC1, 0x60, 0xC1, 0x61, /* 0x84-0x87 */
+	0xC1, 0x62, 0xC1, 0x63, 0xD1, 0xF2, 0xC1, 0x64, /* 0x88-0x8B */
+	0xC7, 0xBC, 0xC1, 0x65, 0xC3, 0xC0, 0xC1, 0x66, /* 0x8C-0x8F */
+	0xC1, 0x67, 0xC1, 0x68, 0xC1, 0x69, 0xC1, 0x6A, /* 0x90-0x93 */
+	0xB8, 0xE1, 0xC1, 0x6B, 0xC1, 0x6C, 0xC1, 0x6D, /* 0x94-0x97 */
+	0xC1, 0x6E, 0xC1, 0x6F, 0xC1, 0xE7, 0xC1, 0x70, /* 0x98-0x9B */
+	0xC1, 0x71, 0xF4, 0xC6, 0xD0, 0xDF, 0xF4, 0xC7, /* 0x9C-0x9F */
+	0xC1, 0x72, 0xCF, 0xDB, 0xC1, 0x73, 0xC1, 0x74, /* 0xA0-0xA3 */
+	0xC8, 0xBA, 0xC1, 0x75, 0xC1, 0x76, 0xF4, 0xC8, /* 0xA4-0xA7 */
+	0xC1, 0x77, 0xC1, 0x78, 0xC1, 0x79, 0xC1, 0x7A, /* 0xA8-0xAB */
+	0xC1, 0x7B, 0xC1, 0x7C, 0xC1, 0x7D, 0xF4, 0xC9, /* 0xAC-0xAF */
+	0xF4, 0xCA, 0xC1, 0x7E, 0xF4, 0xCB, 0xC1, 0x80, /* 0xB0-0xB3 */
+	0xC1, 0x81, 0xC1, 0x82, 0xC1, 0x83, 0xC1, 0x84, /* 0xB4-0xB7 */
+	0xD9, 0xFA, 0xB8, 0xFE, 0xC1, 0x85, 0xC1, 0x86, /* 0xB8-0xBB */
+	0xE5, 0xF1, 0xD3, 0xF0, 0xC1, 0x87, 0xF4, 0xE0, /* 0xBC-0xBF */
+	0xC1, 0x88, 0xCE, 0xCC, 0xC1, 0x89, 0xC1, 0x8A, /* 0xC0-0xC3 */
+	0xC1, 0x8B, 0xB3, 0xE1, 0xC1, 0x8C, 0xC1, 0x8D, /* 0xC4-0xC7 */
+	0xC1, 0x8E, 0xC1, 0x8F, 0xF1, 0xB4, 0xC1, 0x90, /* 0xC8-0xCB */
+	0xD2, 0xEE, 0xC1, 0x91, 0xF4, 0xE1, 0xC1, 0x92, /* 0xCC-0xCF */
+	0xC1, 0x93, 0xC1, 0x94, 0xC1, 0x95, 0xC1, 0x96, /* 0xD0-0xD3 */
+	0xCF, 0xE8, 0xF4, 0xE2, 0xC1, 0x97, 0xC1, 0x98, /* 0xD4-0xD7 */
+	0xC7, 0xCC, 0xC1, 0x99, 0xC1, 0x9A, 0xC1, 0x9B, /* 0xD8-0xDB */
+	0xC1, 0x9C, 0xC1, 0x9D, 0xC1, 0x9E, 0xB5, 0xD4, /* 0xDC-0xDF */
+	0xB4, 0xE4, 0xF4, 0xE4, 0xC1, 0x9F, 0xC1, 0xA0, /* 0xE0-0xE3 */
+	0xC2, 0x40, 0xF4, 0xE3, 0xF4, 0xE5, 0xC2, 0x41, /* 0xE4-0xE7 */
+	0xC2, 0x42, 0xF4, 0xE6, 0xC2, 0x43, 0xC2, 0x44, /* 0xE8-0xEB */
+	0xC2, 0x45, 0xC2, 0x46, 0xF4, 0xE7, 0xC2, 0x47, /* 0xEC-0xEF */
+	0xBA, 0xB2, 0xB0, 0xBF, 0xC2, 0x48, 0xF4, 0xE8, /* 0xF0-0xF3 */
+	0xC2, 0x49, 0xC2, 0x4A, 0xC2, 0x4B, 0xC2, 0x4C, /* 0xF4-0xF7 */
+	0xC2, 0x4D, 0xC2, 0x4E, 0xC2, 0x4F, 0xB7, 0xAD, /* 0xF8-0xFB */
+	0xD2, 0xED, 0xC2, 0x50, 0xC2, 0x51, 0xC2, 0x52, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_80[512] = {
+	0xD2, 0xAB, 0xC0, 0xCF, 0xC2, 0x53, 0xBF, 0xBC, /* 0x00-0x03 */
+	0xEB, 0xA3, 0xD5, 0xDF, 0xEA, 0xC8, 0xC2, 0x54, /* 0x04-0x07 */
+	0xC2, 0x55, 0xC2, 0x56, 0xC2, 0x57, 0xF1, 0xF3, /* 0x08-0x0B */
+	0xB6, 0xF8, 0xCB, 0xA3, 0xC2, 0x58, 0xC2, 0x59, /* 0x0C-0x0F */
+	0xC4, 0xCD, 0xC2, 0x5A, 0xF1, 0xE7, 0xC2, 0x5B, /* 0x10-0x13 */
+	0xF1, 0xE8, 0xB8, 0xFB, 0xF1, 0xE9, 0xBA, 0xC4, /* 0x14-0x17 */
+	0xD4, 0xC5, 0xB0, 0xD2, 0xC2, 0x5C, 0xC2, 0x5D, /* 0x18-0x1B */
+	0xF1, 0xEA, 0xC2, 0x5E, 0xC2, 0x5F, 0xC2, 0x60, /* 0x1C-0x1F */
+	0xF1, 0xEB, 0xC2, 0x61, 0xF1, 0xEC, 0xC2, 0x62, /* 0x20-0x23 */
+	0xC2, 0x63, 0xF1, 0xED, 0xF1, 0xEE, 0xF1, 0xEF, /* 0x24-0x27 */
+	0xF1, 0xF1, 0xF1, 0xF0, 0xC5, 0xD5, 0xC2, 0x64, /* 0x28-0x2B */
+	0xC2, 0x65, 0xC2, 0x66, 0xC2, 0x67, 0xC2, 0x68, /* 0x2C-0x2F */
+	0xC2, 0x69, 0xF1, 0xF2, 0xC2, 0x6A, 0xB6, 0xFA, /* 0x30-0x33 */
+	0xC2, 0x6B, 0xF1, 0xF4, 0xD2, 0xAE, 0xDE, 0xC7, /* 0x34-0x37 */
+	0xCB, 0xCA, 0xC2, 0x6C, 0xC2, 0x6D, 0xB3, 0xDC, /* 0x38-0x3B */
+	0xC2, 0x6E, 0xB5, 0xA2, 0xC2, 0x6F, 0xB9, 0xA2, /* 0x3C-0x3F */
+	0xC2, 0x70, 0xC2, 0x71, 0xC4, 0xF4, 0xF1, 0xF5, /* 0x40-0x43 */
+	0xC2, 0x72, 0xC2, 0x73, 0xF1, 0xF6, 0xC2, 0x74, /* 0x44-0x47 */
+	0xC2, 0x75, 0xC2, 0x76, 0xC1, 0xC4, 0xC1, 0xFB, /* 0x48-0x4B */
+	0xD6, 0xB0, 0xF1, 0xF7, 0xC2, 0x77, 0xC2, 0x78, /* 0x4C-0x4F */
+	0xC2, 0x79, 0xC2, 0x7A, 0xF1, 0xF8, 0xC2, 0x7B, /* 0x50-0x53 */
+	0xC1, 0xAA, 0xC2, 0x7C, 0xC2, 0x7D, 0xC2, 0x7E, /* 0x54-0x57 */
+	0xC6, 0xB8, 0xC2, 0x80, 0xBE, 0xDB, 0xC2, 0x81, /* 0x58-0x5B */
+	0xC2, 0x82, 0xC2, 0x83, 0xC2, 0x84, 0xC2, 0x85, /* 0x5C-0x5F */
+	0xC2, 0x86, 0xC2, 0x87, 0xC2, 0x88, 0xC2, 0x89, /* 0x60-0x63 */
+	0xC2, 0x8A, 0xC2, 0x8B, 0xC2, 0x8C, 0xC2, 0x8D, /* 0x64-0x67 */
+	0xC2, 0x8E, 0xF1, 0xF9, 0xB4, 0xCF, 0xC2, 0x8F, /* 0x68-0x6B */
+	0xC2, 0x90, 0xC2, 0x91, 0xC2, 0x92, 0xC2, 0x93, /* 0x6C-0x6F */
+	0xC2, 0x94, 0xF1, 0xFA, 0xC2, 0x95, 0xC2, 0x96, /* 0x70-0x73 */
+	0xC2, 0x97, 0xC2, 0x98, 0xC2, 0x99, 0xC2, 0x9A, /* 0x74-0x77 */
+	0xC2, 0x9B, 0xC2, 0x9C, 0xC2, 0x9D, 0xC2, 0x9E, /* 0x78-0x7B */
+	0xC2, 0x9F, 0xC2, 0xA0, 0xC3, 0x40, 0xED, 0xB2, /* 0x7C-0x7F */
+	
+	0xED, 0xB1, 0xC3, 0x41, 0xC3, 0x42, 0xCB, 0xE0, /* 0x80-0x83 */
+	0xD2, 0xDE, 0xC3, 0x43, 0xCB, 0xC1, 0xD5, 0xD8, /* 0x84-0x87 */
+	0xC3, 0x44, 0xC8, 0xE2, 0xC3, 0x45, 0xC0, 0xDF, /* 0x88-0x8B */
+	0xBC, 0xA1, 0xC3, 0x46, 0xC3, 0x47, 0xC3, 0x48, /* 0x8C-0x8F */
+	0xC3, 0x49, 0xC3, 0x4A, 0xC3, 0x4B, 0xEB, 0xC1, /* 0x90-0x93 */
+	0xC3, 0x4C, 0xC3, 0x4D, 0xD0, 0xA4, 0xC3, 0x4E, /* 0x94-0x97 */
+	0xD6, 0xE2, 0xC3, 0x4F, 0xB6, 0xC7, 0xB8, 0xD8, /* 0x98-0x9B */
+	0xEB, 0xC0, 0xB8, 0xCE, 0xC3, 0x50, 0xEB, 0xBF, /* 0x9C-0x9F */
+	0xB3, 0xA6, 0xB9, 0xC9, 0xD6, 0xAB, 0xC3, 0x51, /* 0xA0-0xA3 */
+	0xB7, 0xF4, 0xB7, 0xCA, 0xC3, 0x52, 0xC3, 0x53, /* 0xA4-0xA7 */
+	0xC3, 0x54, 0xBC, 0xE7, 0xB7, 0xBE, 0xEB, 0xC6, /* 0xA8-0xAB */
+	0xC3, 0x55, 0xEB, 0xC7, 0xB0, 0xB9, 0xBF, 0xCF, /* 0xAC-0xAF */
+	0xC3, 0x56, 0xEB, 0xC5, 0xD3, 0xFD, 0xC3, 0x57, /* 0xB0-0xB3 */
+	0xEB, 0xC8, 0xC3, 0x58, 0xC3, 0x59, 0xEB, 0xC9, /* 0xB4-0xB7 */
+	0xC3, 0x5A, 0xC3, 0x5B, 0xB7, 0xCE, 0xC3, 0x5C, /* 0xB8-0xBB */
+	0xEB, 0xC2, 0xEB, 0xC4, 0xC9, 0xF6, 0xD6, 0xD7, /* 0xBC-0xBF */
+	0xD5, 0xCD, 0xD0, 0xB2, 0xEB, 0xCF, 0xCE, 0xB8, /* 0xC0-0xC3 */
+	0xEB, 0xD0, 0xC3, 0x5D, 0xB5, 0xA8, 0xC3, 0x5E, /* 0xC4-0xC7 */
+	0xC3, 0x5F, 0xC3, 0x60, 0xC3, 0x61, 0xC3, 0x62, /* 0xC8-0xCB */
+	0xB1, 0xB3, 0xEB, 0xD2, 0xCC, 0xA5, 0xC3, 0x63, /* 0xCC-0xCF */
+	0xC3, 0x64, 0xC3, 0x65, 0xC3, 0x66, 0xC3, 0x67, /* 0xD0-0xD3 */
+	0xC3, 0x68, 0xC3, 0x69, 0xC5, 0xD6, 0xEB, 0xD3, /* 0xD4-0xD7 */
+	0xC3, 0x6A, 0xEB, 0xD1, 0xC5, 0xDF, 0xEB, 0xCE, /* 0xD8-0xDB */
+	0xCA, 0xA4, 0xEB, 0xD5, 0xB0, 0xFB, 0xC3, 0x6B, /* 0xDC-0xDF */
+	0xC3, 0x6C, 0xBA, 0xFA, 0xC3, 0x6D, 0xC3, 0x6E, /* 0xE0-0xE3 */
+	0xD8, 0xB7, 0xF1, 0xE3, 0xC3, 0x6F, 0xEB, 0xCA, /* 0xE4-0xE7 */
+	0xEB, 0xCB, 0xEB, 0xCC, 0xEB, 0xCD, 0xEB, 0xD6, /* 0xE8-0xEB */
+	0xE6, 0xC0, 0xEB, 0xD9, 0xC3, 0x70, 0xBF, 0xE8, /* 0xEC-0xEF */
+	0xD2, 0xC8, 0xEB, 0xD7, 0xEB, 0xDC, 0xB8, 0xEC, /* 0xF0-0xF3 */
+	0xEB, 0xD8, 0xC3, 0x71, 0xBD, 0xBA, 0xC3, 0x72, /* 0xF4-0xF7 */
+	0xD0, 0xD8, 0xC3, 0x73, 0xB0, 0xB7, 0xC3, 0x74, /* 0xF8-0xFB */
+	0xEB, 0xDD, 0xC4, 0xDC, 0xC3, 0x75, 0xC3, 0x76, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_81[512] = {
+	0xC3, 0x77, 0xC3, 0x78, 0xD6, 0xAC, 0xC3, 0x79, /* 0x00-0x03 */
+	0xC3, 0x7A, 0xC3, 0x7B, 0xB4, 0xE0, 0xC3, 0x7C, /* 0x04-0x07 */
+	0xC3, 0x7D, 0xC2, 0xF6, 0xBC, 0xB9, 0xC3, 0x7E, /* 0x08-0x0B */
+	0xC3, 0x80, 0xEB, 0xDA, 0xEB, 0xDB, 0xD4, 0xE0, /* 0x0C-0x0F */
+	0xC6, 0xEA, 0xC4, 0xD4, 0xEB, 0xDF, 0xC5, 0xA7, /* 0x10-0x13 */
+	0xD9, 0xF5, 0xC3, 0x81, 0xB2, 0xB1, 0xC3, 0x82, /* 0x14-0x17 */
+	0xEB, 0xE4, 0xC3, 0x83, 0xBD, 0xC5, 0xC3, 0x84, /* 0x18-0x1B */
+	0xC3, 0x85, 0xC3, 0x86, 0xEB, 0xE2, 0xC3, 0x87, /* 0x1C-0x1F */
+	0xC3, 0x88, 0xC3, 0x89, 0xC3, 0x8A, 0xC3, 0x8B, /* 0x20-0x23 */
+	0xC3, 0x8C, 0xC3, 0x8D, 0xC3, 0x8E, 0xC3, 0x8F, /* 0x24-0x27 */
+	0xC3, 0x90, 0xC3, 0x91, 0xC3, 0x92, 0xC3, 0x93, /* 0x28-0x2B */
+	0xEB, 0xE3, 0xC3, 0x94, 0xC3, 0x95, 0xB8, 0xAC, /* 0x2C-0x2F */
+	0xC3, 0x96, 0xCD, 0xD1, 0xEB, 0xE5, 0xC3, 0x97, /* 0x30-0x33 */
+	0xC3, 0x98, 0xC3, 0x99, 0xEB, 0xE1, 0xC3, 0x9A, /* 0x34-0x37 */
+	0xC1, 0xB3, 0xC3, 0x9B, 0xC3, 0x9C, 0xC3, 0x9D, /* 0x38-0x3B */
+	0xC3, 0x9E, 0xC3, 0x9F, 0xC6, 0xA2, 0xC3, 0xA0, /* 0x3C-0x3F */
+	0xC4, 0x40, 0xC4, 0x41, 0xC4, 0x42, 0xC4, 0x43, /* 0x40-0x43 */
+	0xC4, 0x44, 0xC4, 0x45, 0xCC, 0xF3, 0xC4, 0x46, /* 0x44-0x47 */
+	0xEB, 0xE6, 0xC4, 0x47, 0xC0, 0xB0, 0xD2, 0xB8, /* 0x48-0x4B */
+	0xEB, 0xE7, 0xC4, 0x48, 0xC4, 0x49, 0xC4, 0x4A, /* 0x4C-0x4F */
+	0xB8, 0xAF, 0xB8, 0xAD, 0xC4, 0x4B, 0xEB, 0xE8, /* 0x50-0x53 */
+	0xC7, 0xBB, 0xCD, 0xF3, 0xC4, 0x4C, 0xC4, 0x4D, /* 0x54-0x57 */
+	0xC4, 0x4E, 0xEB, 0xEA, 0xEB, 0xEB, 0xC4, 0x4F, /* 0x58-0x5B */
+	0xC4, 0x50, 0xC4, 0x51, 0xC4, 0x52, 0xC4, 0x53, /* 0x5C-0x5F */
+	0xEB, 0xED, 0xC4, 0x54, 0xC4, 0x55, 0xC4, 0x56, /* 0x60-0x63 */
+	0xC4, 0x57, 0xD0, 0xC8, 0xC4, 0x58, 0xEB, 0xF2, /* 0x64-0x67 */
+	0xC4, 0x59, 0xEB, 0xEE, 0xC4, 0x5A, 0xC4, 0x5B, /* 0x68-0x6B */
+	0xC4, 0x5C, 0xEB, 0xF1, 0xC8, 0xF9, 0xC4, 0x5D, /* 0x6C-0x6F */
+	0xD1, 0xFC, 0xEB, 0xEC, 0xC4, 0x5E, 0xC4, 0x5F, /* 0x70-0x73 */
+	0xEB, 0xE9, 0xC4, 0x60, 0xC4, 0x61, 0xC4, 0x62, /* 0x74-0x77 */
+	0xC4, 0x63, 0xB8, 0xB9, 0xCF, 0xD9, 0xC4, 0xE5, /* 0x78-0x7B */
+	0xEB, 0xEF, 0xEB, 0xF0, 0xCC, 0xDA, 0xCD, 0xC8, /* 0x7C-0x7F */
+	
+	0xB0, 0xF2, 0xC4, 0x64, 0xEB, 0xF6, 0xC4, 0x65, /* 0x80-0x83 */
+	0xC4, 0x66, 0xC4, 0x67, 0xC4, 0x68, 0xC4, 0x69, /* 0x84-0x87 */
+	0xEB, 0xF5, 0xC4, 0x6A, 0xB2, 0xB2, 0xC4, 0x6B, /* 0x88-0x8B */
+	0xC4, 0x6C, 0xC4, 0x6D, 0xC4, 0x6E, 0xB8, 0xE0, /* 0x8C-0x8F */
+	0xC4, 0x6F, 0xEB, 0xF7, 0xC4, 0x70, 0xC4, 0x71, /* 0x90-0x93 */
+	0xC4, 0x72, 0xC4, 0x73, 0xC4, 0x74, 0xC4, 0x75, /* 0x94-0x97 */
+	0xB1, 0xEC, 0xC4, 0x76, 0xC4, 0x77, 0xCC, 0xC5, /* 0x98-0x9B */
+	0xC4, 0xA4, 0xCF, 0xA5, 0xC4, 0x78, 0xC4, 0x79, /* 0x9C-0x9F */
+	0xC4, 0x7A, 0xC4, 0x7B, 0xC4, 0x7C, 0xEB, 0xF9, /* 0xA0-0xA3 */
+	0xC4, 0x7D, 0xC4, 0x7E, 0xEC, 0xA2, 0xC4, 0x80, /* 0xA4-0xA7 */
+	0xC5, 0xF2, 0xC4, 0x81, 0xEB, 0xFA, 0xC4, 0x82, /* 0xA8-0xAB */
+	0xC4, 0x83, 0xC4, 0x84, 0xC4, 0x85, 0xC4, 0x86, /* 0xAC-0xAF */
+	0xC4, 0x87, 0xC4, 0x88, 0xC4, 0x89, 0xC9, 0xC5, /* 0xB0-0xB3 */
+	0xC4, 0x8A, 0xC4, 0x8B, 0xC4, 0x8C, 0xC4, 0x8D, /* 0xB4-0xB7 */
+	0xC4, 0x8E, 0xC4, 0x8F, 0xE2, 0xDF, 0xEB, 0xFE, /* 0xB8-0xBB */
+	0xC4, 0x90, 0xC4, 0x91, 0xC4, 0x92, 0xC4, 0x93, /* 0xBC-0xBF */
+	0xCD, 0xCE, 0xEC, 0xA1, 0xB1, 0xDB, 0xD3, 0xB7, /* 0xC0-0xC3 */
+	0xC4, 0x94, 0xC4, 0x95, 0xD2, 0xDC, 0xC4, 0x96, /* 0xC4-0xC7 */
+	0xC4, 0x97, 0xC4, 0x98, 0xEB, 0xFD, 0xC4, 0x99, /* 0xC8-0xCB */
+	0xEB, 0xFB, 0xC4, 0x9A, 0xC4, 0x9B, 0xC4, 0x9C, /* 0xCC-0xCF */
+	0xC4, 0x9D, 0xC4, 0x9E, 0xC4, 0x9F, 0xC4, 0xA0, /* 0xD0-0xD3 */
+	0xC5, 0x40, 0xC5, 0x41, 0xC5, 0x42, 0xC5, 0x43, /* 0xD4-0xD7 */
+	0xC5, 0x44, 0xC5, 0x45, 0xC5, 0x46, 0xC5, 0x47, /* 0xD8-0xDB */
+	0xC5, 0x48, 0xC5, 0x49, 0xC5, 0x4A, 0xC5, 0x4B, /* 0xDC-0xDF */
+	0xC5, 0x4C, 0xC5, 0x4D, 0xC5, 0x4E, 0xB3, 0xBC, /* 0xE0-0xE3 */
+	0xC5, 0x4F, 0xC5, 0x50, 0xC5, 0x51, 0xEA, 0xB0, /* 0xE4-0xE7 */
+	0xC5, 0x52, 0xC5, 0x53, 0xD7, 0xD4, 0xC5, 0x54, /* 0xE8-0xEB */
+	0xF4, 0xAB, 0xB3, 0xF4, 0xC5, 0x55, 0xC5, 0x56, /* 0xEC-0xEF */
+	0xC5, 0x57, 0xC5, 0x58, 0xC5, 0x59, 0xD6, 0xC1, /* 0xF0-0xF3 */
+	0xD6, 0xC2, 0xC5, 0x5A, 0xC5, 0x5B, 0xC5, 0x5C, /* 0xF4-0xF7 */
+	0xC5, 0x5D, 0xC5, 0x5E, 0xC5, 0x5F, 0xD5, 0xE9, /* 0xF8-0xFB */
+	0xBE, 0xCA, 0xC5, 0x60, 0xF4, 0xA7, 0xC5, 0x61, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_82[512] = {
+	0xD2, 0xA8, 0xF4, 0xA8, 0xF4, 0xA9, 0xC5, 0x62, /* 0x00-0x03 */
+	0xF4, 0xAA, 0xBE, 0xCB, 0xD3, 0xDF, 0xC5, 0x63, /* 0x04-0x07 */
+	0xC5, 0x64, 0xC5, 0x65, 0xC5, 0x66, 0xC5, 0x67, /* 0x08-0x0B */
+	0xC9, 0xE0, 0xC9, 0xE1, 0xC5, 0x68, 0xC5, 0x69, /* 0x0C-0x0F */
+	0xF3, 0xC2, 0xC5, 0x6A, 0xCA, 0xE6, 0xC5, 0x6B, /* 0x10-0x13 */
+	0xCC, 0xF2, 0xC5, 0x6C, 0xC5, 0x6D, 0xC5, 0x6E, /* 0x14-0x17 */
+	0xC5, 0x6F, 0xC5, 0x70, 0xC5, 0x71, 0xE2, 0xB6, /* 0x18-0x1B */
+	0xCB, 0xB4, 0xC5, 0x72, 0xCE, 0xE8, 0xD6, 0xDB, /* 0x1C-0x1F */
+	0xC5, 0x73, 0xF4, 0xAD, 0xF4, 0xAE, 0xF4, 0xAF, /* 0x20-0x23 */
+	0xC5, 0x74, 0xC5, 0x75, 0xC5, 0x76, 0xC5, 0x77, /* 0x24-0x27 */
+	0xF4, 0xB2, 0xC5, 0x78, 0xBA, 0xBD, 0xF4, 0xB3, /* 0x28-0x2B */
+	0xB0, 0xE3, 0xF4, 0xB0, 0xC5, 0x79, 0xF4, 0xB1, /* 0x2C-0x2F */
+	0xBD, 0xA2, 0xB2, 0xD5, 0xC5, 0x7A, 0xF4, 0xB6, /* 0x30-0x33 */
+	0xF4, 0xB7, 0xB6, 0xE6, 0xB2, 0xB0, 0xCF, 0xCF, /* 0x34-0x37 */
+	0xF4, 0xB4, 0xB4, 0xAC, 0xC5, 0x7B, 0xF4, 0xB5, /* 0x38-0x3B */
+	0xC5, 0x7C, 0xC5, 0x7D, 0xF4, 0xB8, 0xC5, 0x7E, /* 0x3C-0x3F */
+	0xC5, 0x80, 0xC5, 0x81, 0xC5, 0x82, 0xC5, 0x83, /* 0x40-0x43 */
+	0xF4, 0xB9, 0xC5, 0x84, 0xC5, 0x85, 0xCD, 0xA7, /* 0x44-0x47 */
+	0xC5, 0x86, 0xF4, 0xBA, 0xC5, 0x87, 0xF4, 0xBB, /* 0x48-0x4B */
+	0xC5, 0x88, 0xC5, 0x89, 0xC5, 0x8A, 0xF4, 0xBC, /* 0x4C-0x4F */
+	0xC5, 0x8B, 0xC5, 0x8C, 0xC5, 0x8D, 0xC5, 0x8E, /* 0x50-0x53 */
+	0xC5, 0x8F, 0xC5, 0x90, 0xC5, 0x91, 0xC5, 0x92, /* 0x54-0x57 */
+	0xCB, 0xD2, 0xC5, 0x93, 0xF4, 0xBD, 0xC5, 0x94, /* 0x58-0x5B */
+	0xC5, 0x95, 0xC5, 0x96, 0xC5, 0x97, 0xF4, 0xBE, /* 0x5C-0x5F */
+	0xC5, 0x98, 0xC5, 0x99, 0xC5, 0x9A, 0xC5, 0x9B, /* 0x60-0x63 */
+	0xC5, 0x9C, 0xC5, 0x9D, 0xC5, 0x9E, 0xC5, 0x9F, /* 0x64-0x67 */
+	0xF4, 0xBF, 0xC5, 0xA0, 0xC6, 0x40, 0xC6, 0x41, /* 0x68-0x6B */
+	0xC6, 0x42, 0xC6, 0x43, 0xF4, 0xDE, 0xC1, 0xBC, /* 0x6C-0x6F */
+	0xBC, 0xE8, 0xC6, 0x44, 0xC9, 0xAB, 0xD1, 0xDE, /* 0x70-0x73 */
+	0xE5, 0xF5, 0xC6, 0x45, 0xC6, 0x46, 0xC6, 0x47, /* 0x74-0x77 */
+	0xC6, 0x48, 0xDC, 0xB3, 0xD2, 0xD5, 0xC6, 0x49, /* 0x78-0x7B */
+	0xC6, 0x4A, 0xDC, 0xB4, 0xB0, 0xAC, 0xDC, 0xB5, /* 0x7C-0x7F */
+	
+	0xC6, 0x4B, 0xC6, 0x4C, 0xBD, 0xDA, 0xC6, 0x4D, /* 0x80-0x83 */
+	0xDC, 0xB9, 0xC6, 0x4E, 0xC6, 0x4F, 0xC6, 0x50, /* 0x84-0x87 */
+	0xD8, 0xC2, 0xC6, 0x51, 0xDC, 0xB7, 0xD3, 0xF3, /* 0x88-0x8B */
+	0xC6, 0x52, 0xC9, 0xD6, 0xDC, 0xBA, 0xDC, 0xB6, /* 0x8C-0x8F */
+	0xC6, 0x53, 0xDC, 0xBB, 0xC3, 0xA2, 0xC6, 0x54, /* 0x90-0x93 */
+	0xC6, 0x55, 0xC6, 0x56, 0xC6, 0x57, 0xDC, 0xBC, /* 0x94-0x97 */
+	0xDC, 0xC5, 0xDC, 0xBD, 0xC6, 0x58, 0xC6, 0x59, /* 0x98-0x9B */
+	0xCE, 0xDF, 0xD6, 0xA5, 0xC6, 0x5A, 0xDC, 0xCF, /* 0x9C-0x9F */
+	0xC6, 0x5B, 0xDC, 0xCD, 0xC6, 0x5C, 0xC6, 0x5D, /* 0xA0-0xA3 */
+	0xDC, 0xD2, 0xBD, 0xE6, 0xC2, 0xAB, 0xC6, 0x5E, /* 0xA4-0xA7 */
+	0xDC, 0xB8, 0xDC, 0xCB, 0xDC, 0xCE, 0xDC, 0xBE, /* 0xA8-0xAB */
+	0xB7, 0xD2, 0xB0, 0xC5, 0xDC, 0xC7, 0xD0, 0xBE, /* 0xAC-0xAF */
+	0xDC, 0xC1, 0xBB, 0xA8, 0xC6, 0x5F, 0xB7, 0xBC, /* 0xB0-0xB3 */
+	0xDC, 0xCC, 0xC6, 0x60, 0xC6, 0x61, 0xDC, 0xC6, /* 0xB4-0xB7 */
+	0xDC, 0xBF, 0xC7, 0xDB, 0xC6, 0x62, 0xC6, 0x63, /* 0xB8-0xBB */
+	0xC6, 0x64, 0xD1, 0xBF, 0xDC, 0xC0, 0xC6, 0x65, /* 0xBC-0xBF */
+	0xC6, 0x66, 0xDC, 0xCA, 0xC6, 0x67, 0xC6, 0x68, /* 0xC0-0xC3 */
+	0xDC, 0xD0, 0xC6, 0x69, 0xC6, 0x6A, 0xCE, 0xAD, /* 0xC4-0xC7 */
+	0xDC, 0xC2, 0xC6, 0x6B, 0xDC, 0xC3, 0xDC, 0xC8, /* 0xC8-0xCB */
+	0xDC, 0xC9, 0xB2, 0xD4, 0xDC, 0xD1, 0xCB, 0xD5, /* 0xCC-0xCF */
+	0xC6, 0x6C, 0xD4, 0xB7, 0xDC, 0xDB, 0xDC, 0xDF, /* 0xD0-0xD3 */
+	0xCC, 0xA6, 0xDC, 0xE6, 0xC6, 0x6D, 0xC3, 0xE7, /* 0xD4-0xD7 */
+	0xDC, 0xDC, 0xC6, 0x6E, 0xC6, 0x6F, 0xBF, 0xC1, /* 0xD8-0xDB */
+	0xDC, 0xD9, 0xC6, 0x70, 0xB0, 0xFA, 0xB9, 0xB6, /* 0xDC-0xDF */
+	0xDC, 0xE5, 0xDC, 0xD3, 0xC6, 0x71, 0xDC, 0xC4, /* 0xE0-0xE3 */
+	0xDC, 0xD6, 0xC8, 0xF4, 0xBF, 0xE0, 0xC6, 0x72, /* 0xE4-0xE7 */
+	0xC6, 0x73, 0xC6, 0x74, 0xC6, 0x75, 0xC9, 0xBB, /* 0xE8-0xEB */
+	0xC6, 0x76, 0xC6, 0x77, 0xC6, 0x78, 0xB1, 0xBD, /* 0xEC-0xEF */
+	0xC6, 0x79, 0xD3, 0xA2, 0xC6, 0x7A, 0xC6, 0x7B, /* 0xF0-0xF3 */
+	0xDC, 0xDA, 0xC6, 0x7C, 0xC6, 0x7D, 0xDC, 0xD5, /* 0xF4-0xF7 */
+	0xC6, 0x7E, 0xC6, 0xBB, 0xC6, 0x80, 0xDC, 0xDE, /* 0xF8-0xFB */
+	0xC6, 0x81, 0xC6, 0x82, 0xC6, 0x83, 0xC6, 0x84, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_83[512] = {
+	0xC6, 0x85, 0xD7, 0xC2, 0xC3, 0xAF, 0xB7, 0xB6, /* 0x00-0x03 */
+	0xC7, 0xD1, 0xC3, 0xA9, 0xDC, 0xE2, 0xDC, 0xD8, /* 0x04-0x07 */
+	0xDC, 0xEB, 0xDC, 0xD4, 0xC6, 0x86, 0xC6, 0x87, /* 0x08-0x0B */
+	0xDC, 0xDD, 0xC6, 0x88, 0xBE, 0xA5, 0xDC, 0xD7, /* 0x0C-0x0F */
+	0xC6, 0x89, 0xDC, 0xE0, 0xC6, 0x8A, 0xC6, 0x8B, /* 0x10-0x13 */
+	0xDC, 0xE3, 0xDC, 0xE4, 0xC6, 0x8C, 0xDC, 0xF8, /* 0x14-0x17 */
+	0xC6, 0x8D, 0xC6, 0x8E, 0xDC, 0xE1, 0xDD, 0xA2, /* 0x18-0x1B */
+	0xDC, 0xE7, 0xC6, 0x8F, 0xC6, 0x90, 0xC6, 0x91, /* 0x1C-0x1F */
+	0xC6, 0x92, 0xC6, 0x93, 0xC6, 0x94, 0xC6, 0x95, /* 0x20-0x23 */
+	0xC6, 0x96, 0xC6, 0x97, 0xC6, 0x98, 0xBC, 0xEB, /* 0x24-0x27 */
+	0xB4, 0xC4, 0xC6, 0x99, 0xC6, 0x9A, 0xC3, 0xA3, /* 0x28-0x2B */
+	0xB2, 0xE7, 0xDC, 0xFA, 0xC6, 0x9B, 0xDC, 0xF2, /* 0x2C-0x2F */
+	0xC6, 0x9C, 0xDC, 0xEF, 0xC6, 0x9D, 0xDC, 0xFC, /* 0x30-0x33 */
+	0xDC, 0xEE, 0xD2, 0xF0, 0xB2, 0xE8, 0xC6, 0x9E, /* 0x34-0x37 */
+	0xC8, 0xD7, 0xC8, 0xE3, 0xDC, 0xFB, 0xC6, 0x9F, /* 0x38-0x3B */
+	0xDC, 0xED, 0xC6, 0xA0, 0xC7, 0x40, 0xC7, 0x41, /* 0x3C-0x3F */
+	0xDC, 0xF7, 0xC7, 0x42, 0xC7, 0x43, 0xDC, 0xF5, /* 0x40-0x43 */
+	0xC7, 0x44, 0xC7, 0x45, 0xBE, 0xA3, 0xDC, 0xF4, /* 0x44-0x47 */
+	0xC7, 0x46, 0xB2, 0xDD, 0xC7, 0x47, 0xC7, 0x48, /* 0x48-0x4B */
+	0xC7, 0x49, 0xC7, 0x4A, 0xC7, 0x4B, 0xDC, 0xF3, /* 0x4C-0x4F */
+	0xBC, 0xF6, 0xDC, 0xE8, 0xBB, 0xC4, 0xC7, 0x4C, /* 0x50-0x53 */
+	0xC0, 0xF3, 0xC7, 0x4D, 0xC7, 0x4E, 0xC7, 0x4F, /* 0x54-0x57 */
+	0xC7, 0x50, 0xC7, 0x51, 0xBC, 0xD4, 0xDC, 0xE9, /* 0x58-0x5B */
+	0xDC, 0xEA, 0xC7, 0x52, 0xDC, 0xF1, 0xDC, 0xF6, /* 0x5C-0x5F */
+	0xDC, 0xF9, 0xB5, 0xB4, 0xC7, 0x53, 0xC8, 0xD9, /* 0x60-0x63 */
+	0xBB, 0xE7, 0xDC, 0xFE, 0xDC, 0xFD, 0xD3, 0xAB, /* 0x64-0x67 */
+	0xDD, 0xA1, 0xDD, 0xA3, 0xDD, 0xA5, 0xD2, 0xF1, /* 0x68-0x6B */
+	0xDD, 0xA4, 0xDD, 0xA6, 0xDD, 0xA7, 0xD2, 0xA9, /* 0x6C-0x6F */
+	0xC7, 0x54, 0xC7, 0x55, 0xC7, 0x56, 0xC7, 0x57, /* 0x70-0x73 */
+	0xC7, 0x58, 0xC7, 0x59, 0xC7, 0x5A, 0xBA, 0xC9, /* 0x74-0x77 */
+	0xDD, 0xA9, 0xC7, 0x5B, 0xC7, 0x5C, 0xDD, 0xB6, /* 0x78-0x7B */
+	0xDD, 0xB1, 0xDD, 0xB4, 0xC7, 0x5D, 0xC7, 0x5E, /* 0x7C-0x7F */
+	
+	0xC7, 0x5F, 0xC7, 0x60, 0xC7, 0x61, 0xC7, 0x62, /* 0x80-0x83 */
+	0xC7, 0x63, 0xDD, 0xB0, 0xC6, 0xCE, 0xC7, 0x64, /* 0x84-0x87 */
+	0xC7, 0x65, 0xC0, 0xF2, 0xC7, 0x66, 0xC7, 0x67, /* 0x88-0x8B */
+	0xC7, 0x68, 0xC7, 0x69, 0xC9, 0xAF, 0xC7, 0x6A, /* 0x8C-0x8F */
+	0xC7, 0x6B, 0xC7, 0x6C, 0xDC, 0xEC, 0xDD, 0xAE, /* 0x90-0x93 */
+	0xC7, 0x6D, 0xC7, 0x6E, 0xC7, 0x6F, 0xC7, 0x70, /* 0x94-0x97 */
+	0xDD, 0xB7, 0xC7, 0x71, 0xC7, 0x72, 0xDC, 0xF0, /* 0x98-0x9B */
+	0xDD, 0xAF, 0xC7, 0x73, 0xDD, 0xB8, 0xC7, 0x74, /* 0x9C-0x9F */
+	0xDD, 0xAC, 0xC7, 0x75, 0xC7, 0x76, 0xC7, 0x77, /* 0xA0-0xA3 */
+	0xC7, 0x78, 0xC7, 0x79, 0xC7, 0x7A, 0xC7, 0x7B, /* 0xA4-0xA7 */
+	0xDD, 0xB9, 0xDD, 0xB3, 0xDD, 0xAD, 0xC4, 0xAA, /* 0xA8-0xAB */
+	0xC7, 0x7C, 0xC7, 0x7D, 0xC7, 0x7E, 0xC7, 0x80, /* 0xAC-0xAF */
+	0xDD, 0xA8, 0xC0, 0xB3, 0xC1, 0xAB, 0xDD, 0xAA, /* 0xB0-0xB3 */
+	0xDD, 0xAB, 0xC7, 0x81, 0xDD, 0xB2, 0xBB, 0xF1, /* 0xB4-0xB7 */
+	0xDD, 0xB5, 0xD3, 0xA8, 0xDD, 0xBA, 0xC7, 0x82, /* 0xB8-0xBB */
+	0xDD, 0xBB, 0xC3, 0xA7, 0xC7, 0x83, 0xC7, 0x84, /* 0xBC-0xBF */
+	0xDD, 0xD2, 0xDD, 0xBC, 0xC7, 0x85, 0xC7, 0x86, /* 0xC0-0xC3 */
+	0xC7, 0x87, 0xDD, 0xD1, 0xC7, 0x88, 0xB9, 0xBD, /* 0xC4-0xC7 */
+	0xC7, 0x89, 0xC7, 0x8A, 0xBE, 0xD5, 0xC7, 0x8B, /* 0xC8-0xCB */
+	0xBE, 0xFA, 0xC7, 0x8C, 0xC7, 0x8D, 0xBA, 0xCA, /* 0xCC-0xCF */
+	0xC7, 0x8E, 0xC7, 0x8F, 0xC7, 0x90, 0xC7, 0x91, /* 0xD0-0xD3 */
+	0xDD, 0xCA, 0xC7, 0x92, 0xDD, 0xC5, 0xC7, 0x93, /* 0xD4-0xD7 */
+	0xDD, 0xBF, 0xC7, 0x94, 0xC7, 0x95, 0xC7, 0x96, /* 0xD8-0xDB */
+	0xB2, 0xCB, 0xDD, 0xC3, 0xC7, 0x97, 0xDD, 0xCB, /* 0xDC-0xDF */
+	0xB2, 0xA4, 0xDD, 0xD5, 0xC7, 0x98, 0xC7, 0x99, /* 0xE0-0xE3 */
+	0xC7, 0x9A, 0xDD, 0xBE, 0xC7, 0x9B, 0xC7, 0x9C, /* 0xE4-0xE7 */
+	0xC7, 0x9D, 0xC6, 0xD0, 0xDD, 0xD0, 0xC7, 0x9E, /* 0xE8-0xEB */
+	0xC7, 0x9F, 0xC7, 0xA0, 0xC8, 0x40, 0xC8, 0x41, /* 0xEC-0xEF */
+	0xDD, 0xD4, 0xC1, 0xE2, 0xB7, 0xC6, 0xC8, 0x42, /* 0xF0-0xF3 */
+	0xC8, 0x43, 0xC8, 0x44, 0xC8, 0x45, 0xC8, 0x46, /* 0xF4-0xF7 */
+	0xDD, 0xCE, 0xDD, 0xCF, 0xC8, 0x47, 0xC8, 0x48, /* 0xF8-0xFB */
+	0xC8, 0x49, 0xDD, 0xC4, 0xC8, 0x4A, 0xC8, 0x4B, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_84[512] = {
+	0xC8, 0x4C, 0xDD, 0xBD, 0xC8, 0x4D, 0xDD, 0xCD, /* 0x00-0x03 */
+	0xCC, 0xD1, 0xC8, 0x4E, 0xDD, 0xC9, 0xC8, 0x4F, /* 0x04-0x07 */
+	0xC8, 0x50, 0xC8, 0x51, 0xC8, 0x52, 0xDD, 0xC2, /* 0x08-0x0B */
+	0xC3, 0xC8, 0xC6, 0xBC, 0xCE, 0xAE, 0xDD, 0xCC, /* 0x0C-0x0F */
+	0xC8, 0x53, 0xDD, 0xC8, 0xC8, 0x54, 0xC8, 0x55, /* 0x10-0x13 */
+	0xC8, 0x56, 0xC8, 0x57, 0xC8, 0x58, 0xC8, 0x59, /* 0x14-0x17 */
+	0xDD, 0xC1, 0xC8, 0x5A, 0xC8, 0x5B, 0xC8, 0x5C, /* 0x18-0x1B */
+	0xDD, 0xC6, 0xC2, 0xDC, 0xC8, 0x5D, 0xC8, 0x5E, /* 0x1C-0x1F */
+	0xC8, 0x5F, 0xC8, 0x60, 0xC8, 0x61, 0xC8, 0x62, /* 0x20-0x23 */
+	0xD3, 0xA9, 0xD3, 0xAA, 0xDD, 0xD3, 0xCF, 0xF4, /* 0x24-0x27 */
+	0xC8, 0xF8, 0xC8, 0x63, 0xC8, 0x64, 0xC8, 0x65, /* 0x28-0x2B */
+	0xC8, 0x66, 0xC8, 0x67, 0xC8, 0x68, 0xC8, 0x69, /* 0x2C-0x2F */
+	0xC8, 0x6A, 0xDD, 0xE6, 0xC8, 0x6B, 0xC8, 0x6C, /* 0x30-0x33 */
+	0xC8, 0x6D, 0xC8, 0x6E, 0xC8, 0x6F, 0xC8, 0x70, /* 0x34-0x37 */
+	0xDD, 0xC7, 0xC8, 0x71, 0xC8, 0x72, 0xC8, 0x73, /* 0x38-0x3B */
+	0xDD, 0xE0, 0xC2, 0xE4, 0xC8, 0x74, 0xC8, 0x75, /* 0x3C-0x3F */
+	0xC8, 0x76, 0xC8, 0x77, 0xC8, 0x78, 0xC8, 0x79, /* 0x40-0x43 */
+	0xC8, 0x7A, 0xC8, 0x7B, 0xDD, 0xE1, 0xC8, 0x7C, /* 0x44-0x47 */
+	0xC8, 0x7D, 0xC8, 0x7E, 0xC8, 0x80, 0xC8, 0x81, /* 0x48-0x4B */
+	0xC8, 0x82, 0xC8, 0x83, 0xC8, 0x84, 0xC8, 0x85, /* 0x4C-0x4F */
+	0xC8, 0x86, 0xDD, 0xD7, 0xC8, 0x87, 0xC8, 0x88, /* 0x50-0x53 */
+	0xC8, 0x89, 0xC8, 0x8A, 0xC8, 0x8B, 0xD6, 0xF8, /* 0x54-0x57 */
+	0xC8, 0x8C, 0xDD, 0xD9, 0xDD, 0xD8, 0xB8, 0xF0, /* 0x58-0x5B */
+	0xDD, 0xD6, 0xC8, 0x8D, 0xC8, 0x8E, 0xC8, 0x8F, /* 0x5C-0x5F */
+	0xC8, 0x90, 0xC6, 0xCF, 0xC8, 0x91, 0xB6, 0xAD, /* 0x60-0x63 */
+	0xC8, 0x92, 0xC8, 0x93, 0xC8, 0x94, 0xC8, 0x95, /* 0x64-0x67 */
+	0xC8, 0x96, 0xDD, 0xE2, 0xC8, 0x97, 0xBA, 0xF9, /* 0x68-0x6B */
+	0xD4, 0xE1, 0xDD, 0xE7, 0xC8, 0x98, 0xC8, 0x99, /* 0x6C-0x6F */
+	0xC8, 0x9A, 0xB4, 0xD0, 0xC8, 0x9B, 0xDD, 0xDA, /* 0x70-0x73 */
+	0xC8, 0x9C, 0xBF, 0xFB, 0xDD, 0xE3, 0xC8, 0x9D, /* 0x74-0x77 */
+	0xDD, 0xDF, 0xC8, 0x9E, 0xDD, 0xDD, 0xC8, 0x9F, /* 0x78-0x7B */
+	0xC8, 0xA0, 0xC9, 0x40, 0xC9, 0x41, 0xC9, 0x42, /* 0x7C-0x7F */
+	
+	0xC9, 0x43, 0xC9, 0x44, 0xB5, 0xD9, 0xC9, 0x45, /* 0x80-0x83 */
+	0xC9, 0x46, 0xC9, 0x47, 0xC9, 0x48, 0xDD, 0xDB, /* 0x84-0x87 */
+	0xDD, 0xDC, 0xDD, 0xDE, 0xC9, 0x49, 0xBD, 0xAF, /* 0x88-0x8B */
+	0xDD, 0xE4, 0xC9, 0x4A, 0xDD, 0xE5, 0xC9, 0x4B, /* 0x8C-0x8F */
+	0xC9, 0x4C, 0xC9, 0x4D, 0xC9, 0x4E, 0xC9, 0x4F, /* 0x90-0x93 */
+	0xC9, 0x50, 0xC9, 0x51, 0xC9, 0x52, 0xDD, 0xF5, /* 0x94-0x97 */
+	0xC9, 0x53, 0xC3, 0xC9, 0xC9, 0x54, 0xC9, 0x55, /* 0x98-0x9B */
+	0xCB, 0xE2, 0xC9, 0x56, 0xC9, 0x57, 0xC9, 0x58, /* 0x9C-0x9F */
+	0xC9, 0x59, 0xDD, 0xF2, 0xC9, 0x5A, 0xC9, 0x5B, /* 0xA0-0xA3 */
+	0xC9, 0x5C, 0xC9, 0x5D, 0xC9, 0x5E, 0xC9, 0x5F, /* 0xA4-0xA7 */
+	0xC9, 0x60, 0xC9, 0x61, 0xC9, 0x62, 0xC9, 0x63, /* 0xA8-0xAB */
+	0xC9, 0x64, 0xC9, 0x65, 0xC9, 0x66, 0xD8, 0xE1, /* 0xAC-0xAF */
+	0xC9, 0x67, 0xC9, 0x68, 0xC6, 0xD1, 0xC9, 0x69, /* 0xB0-0xB3 */
+	0xDD, 0xF4, 0xC9, 0x6A, 0xC9, 0x6B, 0xC9, 0x6C, /* 0xB4-0xB7 */
+	0xD5, 0xF4, 0xDD, 0xF3, 0xDD, 0xF0, 0xC9, 0x6D, /* 0xB8-0xBB */
+	0xC9, 0x6E, 0xDD, 0xEC, 0xC9, 0x6F, 0xDD, 0xEF, /* 0xBC-0xBF */
+	0xC9, 0x70, 0xDD, 0xE8, 0xC9, 0x71, 0xC9, 0x72, /* 0xC0-0xC3 */
+	0xD0, 0xEE, 0xC9, 0x73, 0xC9, 0x74, 0xC9, 0x75, /* 0xC4-0xC7 */
+	0xC9, 0x76, 0xC8, 0xD8, 0xDD, 0xEE, 0xC9, 0x77, /* 0xC8-0xCB */
+	0xC9, 0x78, 0xDD, 0xE9, 0xC9, 0x79, 0xC9, 0x7A, /* 0xCC-0xCF */
+	0xDD, 0xEA, 0xCB, 0xF2, 0xC9, 0x7B, 0xDD, 0xED, /* 0xD0-0xD3 */
+	0xC9, 0x7C, 0xC9, 0x7D, 0xB1, 0xCD, 0xC9, 0x7E, /* 0xD4-0xD7 */
+	0xC9, 0x80, 0xC9, 0x81, 0xC9, 0x82, 0xC9, 0x83, /* 0xD8-0xDB */
+	0xC9, 0x84, 0xC0, 0xB6, 0xC9, 0x85, 0xBC, 0xBB, /* 0xDC-0xDF */
+	0xDD, 0xF1, 0xC9, 0x86, 0xC9, 0x87, 0xDD, 0xF7, /* 0xE0-0xE3 */
+	0xC9, 0x88, 0xDD, 0xF6, 0xDD, 0xEB, 0xC9, 0x89, /* 0xE4-0xE7 */
+	0xC9, 0x8A, 0xC9, 0x8B, 0xC9, 0x8C, 0xC9, 0x8D, /* 0xE8-0xEB */
+	0xC5, 0xEE, 0xC9, 0x8E, 0xC9, 0x8F, 0xC9, 0x90, /* 0xEC-0xEF */
+	0xDD, 0xFB, 0xC9, 0x91, 0xC9, 0x92, 0xC9, 0x93, /* 0xF0-0xF3 */
+	0xC9, 0x94, 0xC9, 0x95, 0xC9, 0x96, 0xC9, 0x97, /* 0xF4-0xF7 */
+	0xC9, 0x98, 0xC9, 0x99, 0xC9, 0x9A, 0xC9, 0x9B, /* 0xF8-0xFB */
+	0xDE, 0xA4, 0xC9, 0x9C, 0xC9, 0x9D, 0xDE, 0xA3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_85[512] = {
+	0xC9, 0x9E, 0xC9, 0x9F, 0xC9, 0xA0, 0xCA, 0x40, /* 0x00-0x03 */
+	0xCA, 0x41, 0xCA, 0x42, 0xCA, 0x43, 0xCA, 0x44, /* 0x04-0x07 */
+	0xCA, 0x45, 0xCA, 0x46, 0xCA, 0x47, 0xCA, 0x48, /* 0x08-0x0B */
+	0xDD, 0xF8, 0xCA, 0x49, 0xCA, 0x4A, 0xCA, 0x4B, /* 0x0C-0x0F */
+	0xCA, 0x4C, 0xC3, 0xEF, 0xCA, 0x4D, 0xC2, 0xFB, /* 0x10-0x13 */
+	0xCA, 0x4E, 0xCA, 0x4F, 0xCA, 0x50, 0xD5, 0xE1, /* 0x14-0x17 */
+	0xCA, 0x51, 0xCA, 0x52, 0xCE, 0xB5, 0xCA, 0x53, /* 0x18-0x1B */
+	0xCA, 0x54, 0xCA, 0x55, 0xCA, 0x56, 0xDD, 0xFD, /* 0x1C-0x1F */
+	0xCA, 0x57, 0xB2, 0xCC, 0xCA, 0x58, 0xCA, 0x59, /* 0x20-0x23 */
+	0xCA, 0x5A, 0xCA, 0x5B, 0xCA, 0x5C, 0xCA, 0x5D, /* 0x24-0x27 */
+	0xCA, 0x5E, 0xCA, 0x5F, 0xCA, 0x60, 0xC4, 0xE8, /* 0x28-0x2B */
+	0xCA, 0xDF, 0xCA, 0x61, 0xCA, 0x62, 0xCA, 0x63, /* 0x2C-0x2F */
+	0xCA, 0x64, 0xCA, 0x65, 0xCA, 0x66, 0xCA, 0x67, /* 0x30-0x33 */
+	0xCA, 0x68, 0xCA, 0x69, 0xCA, 0x6A, 0xC7, 0xBE, /* 0x34-0x37 */
+	0xDD, 0xFA, 0xDD, 0xFC, 0xDD, 0xFE, 0xDE, 0xA2, /* 0x38-0x3B */
+	0xB0, 0xAA, 0xB1, 0xCE, 0xCA, 0x6B, 0xCA, 0x6C, /* 0x3C-0x3F */
+	0xCA, 0x6D, 0xCA, 0x6E, 0xCA, 0x6F, 0xDE, 0xAC, /* 0x40-0x43 */
+	0xCA, 0x70, 0xCA, 0x71, 0xCA, 0x72, 0xCA, 0x73, /* 0x44-0x47 */
+	0xDE, 0xA6, 0xBD, 0xB6, 0xC8, 0xEF, 0xCA, 0x74, /* 0x48-0x4B */
+	0xCA, 0x75, 0xCA, 0x76, 0xCA, 0x77, 0xCA, 0x78, /* 0x4C-0x4F */
+	0xCA, 0x79, 0xCA, 0x7A, 0xCA, 0x7B, 0xCA, 0x7C, /* 0x50-0x53 */
+	0xCA, 0x7D, 0xCA, 0x7E, 0xDE, 0xA1, 0xCA, 0x80, /* 0x54-0x57 */
+	0xCA, 0x81, 0xDE, 0xA5, 0xCA, 0x82, 0xCA, 0x83, /* 0x58-0x5B */
+	0xCA, 0x84, 0xCA, 0x85, 0xDE, 0xA9, 0xCA, 0x86, /* 0x5C-0x5F */
+	0xCA, 0x87, 0xCA, 0x88, 0xCA, 0x89, 0xCA, 0x8A, /* 0x60-0x63 */
+	0xDE, 0xA8, 0xCA, 0x8B, 0xCA, 0x8C, 0xCA, 0x8D, /* 0x64-0x67 */
+	0xDE, 0xA7, 0xCA, 0x8E, 0xCA, 0x8F, 0xCA, 0x90, /* 0x68-0x6B */
+	0xCA, 0x91, 0xCA, 0x92, 0xCA, 0x93, 0xCA, 0x94, /* 0x6C-0x6F */
+	0xCA, 0x95, 0xCA, 0x96, 0xDE, 0xAD, 0xCA, 0x97, /* 0x70-0x73 */
+	0xD4, 0xCC, 0xCA, 0x98, 0xCA, 0x99, 0xCA, 0x9A, /* 0x74-0x77 */
+	0xCA, 0x9B, 0xDE, 0xB3, 0xDE, 0xAA, 0xDE, 0xAE, /* 0x78-0x7B */
+	0xCA, 0x9C, 0xCA, 0x9D, 0xC0, 0xD9, 0xCA, 0x9E, /* 0x7C-0x7F */
+	
+	0xCA, 0x9F, 0xCA, 0xA0, 0xCB, 0x40, 0xCB, 0x41, /* 0x80-0x83 */
+	0xB1, 0xA1, 0xDE, 0xB6, 0xCB, 0x42, 0xDE, 0xB1, /* 0x84-0x87 */
+	0xCB, 0x43, 0xCB, 0x44, 0xCB, 0x45, 0xCB, 0x46, /* 0x88-0x8B */
+	0xCB, 0x47, 0xCB, 0x48, 0xCB, 0x49, 0xDE, 0xB2, /* 0x8C-0x8F */
+	0xCB, 0x4A, 0xCB, 0x4B, 0xCB, 0x4C, 0xCB, 0x4D, /* 0x90-0x93 */
+	0xCB, 0x4E, 0xCB, 0x4F, 0xCB, 0x50, 0xCB, 0x51, /* 0x94-0x97 */
+	0xCB, 0x52, 0xCB, 0x53, 0xCB, 0x54, 0xD1, 0xA6, /* 0x98-0x9B */
+	0xDE, 0xB5, 0xCB, 0x55, 0xCB, 0x56, 0xCB, 0x57, /* 0x9C-0x9F */
+	0xCB, 0x58, 0xCB, 0x59, 0xCB, 0x5A, 0xCB, 0x5B, /* 0xA0-0xA3 */
+	0xDE, 0xAF, 0xCB, 0x5C, 0xCB, 0x5D, 0xCB, 0x5E, /* 0xA4-0xA7 */
+	0xDE, 0xB0, 0xCB, 0x5F, 0xD0, 0xBD, 0xCB, 0x60, /* 0xA8-0xAB */
+	0xCB, 0x61, 0xCB, 0x62, 0xDE, 0xB4, 0xCA, 0xED, /* 0xAC-0xAF */
+	0xDE, 0xB9, 0xCB, 0x63, 0xCB, 0x64, 0xCB, 0x65, /* 0xB0-0xB3 */
+	0xCB, 0x66, 0xCB, 0x67, 0xCB, 0x68, 0xDE, 0xB8, /* 0xB4-0xB7 */
+	0xCB, 0x69, 0xDE, 0xB7, 0xCB, 0x6A, 0xCB, 0x6B, /* 0xB8-0xBB */
+	0xCB, 0x6C, 0xCB, 0x6D, 0xCB, 0x6E, 0xCB, 0x6F, /* 0xBC-0xBF */
+	0xCB, 0x70, 0xDE, 0xBB, 0xCB, 0x71, 0xCB, 0x72, /* 0xC0-0xC3 */
+	0xCB, 0x73, 0xCB, 0x74, 0xCB, 0x75, 0xCB, 0x76, /* 0xC4-0xC7 */
+	0xCB, 0x77, 0xBD, 0xE5, 0xCB, 0x78, 0xCB, 0x79, /* 0xC8-0xCB */
+	0xCB, 0x7A, 0xCB, 0x7B, 0xCB, 0x7C, 0xB2, 0xD8, /* 0xCC-0xCF */
+	0xC3, 0xEA, 0xCB, 0x7D, 0xCB, 0x7E, 0xDE, 0xBA, /* 0xD0-0xD3 */
+	0xCB, 0x80, 0xC5, 0xBA, 0xCB, 0x81, 0xCB, 0x82, /* 0xD4-0xD7 */
+	0xCB, 0x83, 0xCB, 0x84, 0xCB, 0x85, 0xCB, 0x86, /* 0xD8-0xDB */
+	0xDE, 0xBC, 0xCB, 0x87, 0xCB, 0x88, 0xCB, 0x89, /* 0xDC-0xDF */
+	0xCB, 0x8A, 0xCB, 0x8B, 0xCB, 0x8C, 0xCB, 0x8D, /* 0xE0-0xE3 */
+	0xCC, 0xD9, 0xCB, 0x8E, 0xCB, 0x8F, 0xCB, 0x90, /* 0xE4-0xE7 */
+	0xCB, 0x91, 0xB7, 0xAA, 0xCB, 0x92, 0xCB, 0x93, /* 0xE8-0xEB */
+	0xCB, 0x94, 0xCB, 0x95, 0xCB, 0x96, 0xCB, 0x97, /* 0xEC-0xEF */
+	0xCB, 0x98, 0xCB, 0x99, 0xCB, 0x9A, 0xCB, 0x9B, /* 0xF0-0xF3 */
+	0xCB, 0x9C, 0xCB, 0x9D, 0xCB, 0x9E, 0xCB, 0x9F, /* 0xF4-0xF7 */
+	0xCB, 0xA0, 0xCC, 0x40, 0xCC, 0x41, 0xD4, 0xE5, /* 0xF8-0xFB */
+	0xCC, 0x42, 0xCC, 0x43, 0xCC, 0x44, 0xDE, 0xBD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_86[512] = {
+	0xCC, 0x45, 0xCC, 0x46, 0xCC, 0x47, 0xCC, 0x48, /* 0x00-0x03 */
+	0xCC, 0x49, 0xDE, 0xBF, 0xCC, 0x4A, 0xCC, 0x4B, /* 0x04-0x07 */
+	0xCC, 0x4C, 0xCC, 0x4D, 0xCC, 0x4E, 0xCC, 0x4F, /* 0x08-0x0B */
+	0xCC, 0x50, 0xCC, 0x51, 0xCC, 0x52, 0xCC, 0x53, /* 0x0C-0x0F */
+	0xCC, 0x54, 0xC4, 0xA2, 0xCC, 0x55, 0xCC, 0x56, /* 0x10-0x13 */
+	0xCC, 0x57, 0xCC, 0x58, 0xDE, 0xC1, 0xCC, 0x59, /* 0x14-0x17 */
+	0xCC, 0x5A, 0xCC, 0x5B, 0xCC, 0x5C, 0xCC, 0x5D, /* 0x18-0x1B */
+	0xCC, 0x5E, 0xCC, 0x5F, 0xCC, 0x60, 0xCC, 0x61, /* 0x1C-0x1F */
+	0xCC, 0x62, 0xCC, 0x63, 0xCC, 0x64, 0xCC, 0x65, /* 0x20-0x23 */
+	0xCC, 0x66, 0xCC, 0x67, 0xCC, 0x68, 0xDE, 0xBE, /* 0x24-0x27 */
+	0xCC, 0x69, 0xDE, 0xC0, 0xCC, 0x6A, 0xCC, 0x6B, /* 0x28-0x2B */
+	0xCC, 0x6C, 0xCC, 0x6D, 0xCC, 0x6E, 0xCC, 0x6F, /* 0x2C-0x2F */
+	0xCC, 0x70, 0xCC, 0x71, 0xCC, 0x72, 0xCC, 0x73, /* 0x30-0x33 */
+	0xCC, 0x74, 0xCC, 0x75, 0xCC, 0x76, 0xCC, 0x77, /* 0x34-0x37 */
+	0xD5, 0xBA, 0xCC, 0x78, 0xCC, 0x79, 0xCC, 0x7A, /* 0x38-0x3B */
+	0xDE, 0xC2, 0xCC, 0x7B, 0xCC, 0x7C, 0xCC, 0x7D, /* 0x3C-0x3F */
+	0xCC, 0x7E, 0xCC, 0x80, 0xCC, 0x81, 0xCC, 0x82, /* 0x40-0x43 */
+	0xCC, 0x83, 0xCC, 0x84, 0xCC, 0x85, 0xCC, 0x86, /* 0x44-0x47 */
+	0xCC, 0x87, 0xCC, 0x88, 0xCC, 0x89, 0xCC, 0x8A, /* 0x48-0x4B */
+	0xCC, 0x8B, 0xF2, 0xAE, 0xBB, 0xA2, 0xC2, 0xB2, /* 0x4C-0x4F */
+	0xC5, 0xB0, 0xC2, 0xC7, 0xCC, 0x8C, 0xCC, 0x8D, /* 0x50-0x53 */
+	0xF2, 0xAF, 0xCC, 0x8E, 0xCC, 0x8F, 0xCC, 0x90, /* 0x54-0x57 */
+	0xCC, 0x91, 0xCC, 0x92, 0xD0, 0xE9, 0xCC, 0x93, /* 0x58-0x5B */
+	0xCC, 0x94, 0xCC, 0x95, 0xD3, 0xDD, 0xCC, 0x96, /* 0x5C-0x5F */
+	0xCC, 0x97, 0xCC, 0x98, 0xEB, 0xBD, 0xCC, 0x99, /* 0x60-0x63 */
+	0xCC, 0x9A, 0xCC, 0x9B, 0xCC, 0x9C, 0xCC, 0x9D, /* 0x64-0x67 */
+	0xCC, 0x9E, 0xCC, 0x9F, 0xCC, 0xA0, 0xB3, 0xE6, /* 0x68-0x6B */
+	0xF2, 0xB0, 0xCD, 0x40, 0xF2, 0xB1, 0xCD, 0x41, /* 0x6C-0x6F */
+	0xCD, 0x42, 0xCA, 0xAD, 0xCD, 0x43, 0xCD, 0x44, /* 0x70-0x73 */
+	0xCD, 0x45, 0xCD, 0x46, 0xCD, 0x47, 0xCD, 0x48, /* 0x74-0x77 */
+	0xCD, 0x49, 0xBA, 0xE7, 0xF2, 0xB3, 0xF2, 0xB5, /* 0x78-0x7B */
+	0xF2, 0xB4, 0xCB, 0xE4, 0xCF, 0xBA, 0xF2, 0xB2, /* 0x7C-0x7F */
+	
+	0xCA, 0xB4, 0xD2, 0xCF, 0xC2, 0xEC, 0xCD, 0x4A, /* 0x80-0x83 */
+	0xCD, 0x4B, 0xCD, 0x4C, 0xCD, 0x4D, 0xCD, 0x4E, /* 0x84-0x87 */
+	0xCD, 0x4F, 0xCD, 0x50, 0xCE, 0xC3, 0xF2, 0xB8, /* 0x88-0x8B */
+	0xB0, 0xF6, 0xF2, 0xB7, 0xCD, 0x51, 0xCD, 0x52, /* 0x8C-0x8F */
+	0xCD, 0x53, 0xCD, 0x54, 0xCD, 0x55, 0xF2, 0xBE, /* 0x90-0x93 */
+	0xCD, 0x56, 0xB2, 0xCF, 0xCD, 0x57, 0xCD, 0x58, /* 0x94-0x97 */
+	0xCD, 0x59, 0xCD, 0x5A, 0xCD, 0x5B, 0xCD, 0x5C, /* 0x98-0x9B */
+	0xD1, 0xC1, 0xF2, 0xBA, 0xCD, 0x5D, 0xCD, 0x5E, /* 0x9C-0x9F */
+	0xCD, 0x5F, 0xCD, 0x60, 0xCD, 0x61, 0xF2, 0xBC, /* 0xA0-0xA3 */
+	0xD4, 0xE9, 0xCD, 0x62, 0xCD, 0x63, 0xF2, 0xBB, /* 0xA4-0xA7 */
+	0xF2, 0xB6, 0xF2, 0xBF, 0xF2, 0xBD, 0xCD, 0x64, /* 0xA8-0xAB */
+	0xF2, 0xB9, 0xCD, 0x65, 0xCD, 0x66, 0xF2, 0xC7, /* 0xAC-0xAF */
+	0xF2, 0xC4, 0xF2, 0xC6, 0xCD, 0x67, 0xCD, 0x68, /* 0xB0-0xB3 */
+	0xF2, 0xCA, 0xF2, 0xC2, 0xF2, 0xC0, 0xCD, 0x69, /* 0xB4-0xB7 */
+	0xCD, 0x6A, 0xCD, 0x6B, 0xF2, 0xC5, 0xCD, 0x6C, /* 0xB8-0xBB */
+	0xCD, 0x6D, 0xCD, 0x6E, 0xCD, 0x6F, 0xCD, 0x70, /* 0xBC-0xBF */
+	0xD6, 0xFB, 0xCD, 0x71, 0xCD, 0x72, 0xCD, 0x73, /* 0xC0-0xC3 */
+	0xF2, 0xC1, 0xCD, 0x74, 0xC7, 0xF9, 0xC9, 0xDF, /* 0xC4-0xC7 */
+	0xCD, 0x75, 0xF2, 0xC8, 0xB9, 0xC6, 0xB5, 0xB0, /* 0xC8-0xCB */
+	0xCD, 0x76, 0xCD, 0x77, 0xF2, 0xC3, 0xF2, 0xC9, /* 0xCC-0xCF */
+	0xF2, 0xD0, 0xF2, 0xD6, 0xCD, 0x78, 0xCD, 0x79, /* 0xD0-0xD3 */
+	0xBB, 0xD7, 0xCD, 0x7A, 0xCD, 0x7B, 0xCD, 0x7C, /* 0xD4-0xD7 */
+	0xF2, 0xD5, 0xCD, 0xDC, 0xCD, 0x7D, 0xD6, 0xEB, /* 0xD8-0xDB */
+	0xCD, 0x7E, 0xCD, 0x80, 0xF2, 0xD2, 0xF2, 0xD4, /* 0xDC-0xDF */
+	0xCD, 0x81, 0xCD, 0x82, 0xCD, 0x83, 0xCD, 0x84, /* 0xE0-0xE3 */
+	0xB8, 0xF2, 0xCD, 0x85, 0xCD, 0x86, 0xCD, 0x87, /* 0xE4-0xE7 */
+	0xCD, 0x88, 0xF2, 0xCB, 0xCD, 0x89, 0xCD, 0x8A, /* 0xE8-0xEB */
+	0xCD, 0x8B, 0xF2, 0xCE, 0xC2, 0xF9, 0xCD, 0x8C, /* 0xEC-0xEF */
+	0xD5, 0xDD, 0xF2, 0xCC, 0xF2, 0xCD, 0xF2, 0xCF, /* 0xF0-0xF3 */
+	0xF2, 0xD3, 0xCD, 0x8D, 0xCD, 0x8E, 0xCD, 0x8F, /* 0xF4-0xF7 */
+	0xF2, 0xD9, 0xD3, 0xBC, 0xCD, 0x90, 0xCD, 0x91, /* 0xF8-0xFB */
+	0xCD, 0x92, 0xCD, 0x93, 0xB6, 0xEA, 0xCD, 0x94, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_87[512] = {
+	0xCA, 0xF1, 0xCD, 0x95, 0xB7, 0xE4, 0xF2, 0xD7, /* 0x00-0x03 */
+	0xCD, 0x96, 0xCD, 0x97, 0xCD, 0x98, 0xF2, 0xD8, /* 0x04-0x07 */
+	0xF2, 0xDA, 0xF2, 0xDD, 0xF2, 0xDB, 0xCD, 0x99, /* 0x08-0x0B */
+	0xCD, 0x9A, 0xF2, 0xDC, 0xCD, 0x9B, 0xCD, 0x9C, /* 0x0C-0x0F */
+	0xCD, 0x9D, 0xCD, 0x9E, 0xD1, 0xD1, 0xF2, 0xD1, /* 0x10-0x13 */
+	0xCD, 0x9F, 0xCD, 0xC9, 0xCD, 0xA0, 0xCE, 0xCF, /* 0x14-0x17 */
+	0xD6, 0xA9, 0xCE, 0x40, 0xF2, 0xE3, 0xCE, 0x41, /* 0x18-0x1B */
+	0xC3, 0xDB, 0xCE, 0x42, 0xF2, 0xE0, 0xCE, 0x43, /* 0x1C-0x1F */
+	0xCE, 0x44, 0xC0, 0xAF, 0xF2, 0xEC, 0xF2, 0xDE, /* 0x20-0x23 */
+	0xCE, 0x45, 0xF2, 0xE1, 0xCE, 0x46, 0xCE, 0x47, /* 0x24-0x27 */
+	0xCE, 0x48, 0xF2, 0xE8, 0xCE, 0x49, 0xCE, 0x4A, /* 0x28-0x2B */
+	0xCE, 0x4B, 0xCE, 0x4C, 0xF2, 0xE2, 0xCE, 0x4D, /* 0x2C-0x2F */
+	0xCE, 0x4E, 0xF2, 0xE7, 0xCE, 0x4F, 0xCE, 0x50, /* 0x30-0x33 */
+	0xF2, 0xE6, 0xCE, 0x51, 0xCE, 0x52, 0xF2, 0xE9, /* 0x34-0x37 */
+	0xCE, 0x53, 0xCE, 0x54, 0xCE, 0x55, 0xF2, 0xDF, /* 0x38-0x3B */
+	0xCE, 0x56, 0xCE, 0x57, 0xF2, 0xE4, 0xF2, 0xEA, /* 0x3C-0x3F */
+	0xCE, 0x58, 0xCE, 0x59, 0xCE, 0x5A, 0xCE, 0x5B, /* 0x40-0x43 */
+	0xCE, 0x5C, 0xCE, 0x5D, 0xCE, 0x5E, 0xD3, 0xAC, /* 0x44-0x47 */
+	0xF2, 0xE5, 0xB2, 0xF5, 0xCE, 0x5F, 0xCE, 0x60, /* 0x48-0x4B */
+	0xF2, 0xF2, 0xCE, 0x61, 0xD0, 0xAB, 0xCE, 0x62, /* 0x4C-0x4F */
+	0xCE, 0x63, 0xCE, 0x64, 0xCE, 0x65, 0xF2, 0xF5, /* 0x50-0x53 */
+	0xCE, 0x66, 0xCE, 0x67, 0xCE, 0x68, 0xBB, 0xC8, /* 0x54-0x57 */
+	0xCE, 0x69, 0xF2, 0xF9, 0xCE, 0x6A, 0xCE, 0x6B, /* 0x58-0x5B */
+	0xCE, 0x6C, 0xCE, 0x6D, 0xCE, 0x6E, 0xCE, 0x6F, /* 0x5C-0x5F */
+	0xF2, 0xF0, 0xCE, 0x70, 0xCE, 0x71, 0xF2, 0xF6, /* 0x60-0x63 */
+	0xF2, 0xF8, 0xF2, 0xFA, 0xCE, 0x72, 0xCE, 0x73, /* 0x64-0x67 */
+	0xCE, 0x74, 0xCE, 0x75, 0xCE, 0x76, 0xCE, 0x77, /* 0x68-0x6B */
+	0xCE, 0x78, 0xCE, 0x79, 0xF2, 0xF3, 0xCE, 0x7A, /* 0x6C-0x6F */
+	0xF2, 0xF1, 0xCE, 0x7B, 0xCE, 0x7C, 0xCE, 0x7D, /* 0x70-0x73 */
+	0xBA, 0xFB, 0xCE, 0x7E, 0xB5, 0xFB, 0xCE, 0x80, /* 0x74-0x77 */
+	0xCE, 0x81, 0xCE, 0x82, 0xCE, 0x83, 0xF2, 0xEF, /* 0x78-0x7B */
+	0xF2, 0xF7, 0xF2, 0xED, 0xF2, 0xEE, 0xCE, 0x84, /* 0x7C-0x7F */
+	
+	0xCE, 0x85, 0xCE, 0x86, 0xF2, 0xEB, 0xF3, 0xA6, /* 0x80-0x83 */
+	0xCE, 0x87, 0xF3, 0xA3, 0xCE, 0x88, 0xCE, 0x89, /* 0x84-0x87 */
+	0xF3, 0xA2, 0xCE, 0x8A, 0xCE, 0x8B, 0xF2, 0xF4, /* 0x88-0x8B */
+	0xCE, 0x8C, 0xC8, 0xDA, 0xCE, 0x8D, 0xCE, 0x8E, /* 0x8C-0x8F */
+	0xCE, 0x8F, 0xCE, 0x90, 0xCE, 0x91, 0xF2, 0xFB, /* 0x90-0x93 */
+	0xCE, 0x92, 0xCE, 0x93, 0xCE, 0x94, 0xF3, 0xA5, /* 0x94-0x97 */
+	0xCE, 0x95, 0xCE, 0x96, 0xCE, 0x97, 0xCE, 0x98, /* 0x98-0x9B */
+	0xCE, 0x99, 0xCE, 0x9A, 0xCE, 0x9B, 0xC3, 0xF8, /* 0x9C-0x9F */
+	0xCE, 0x9C, 0xCE, 0x9D, 0xCE, 0x9E, 0xCE, 0x9F, /* 0xA0-0xA3 */
+	0xCE, 0xA0, 0xCF, 0x40, 0xCF, 0x41, 0xCF, 0x42, /* 0xA4-0xA7 */
+	0xF2, 0xFD, 0xCF, 0x43, 0xCF, 0x44, 0xF3, 0xA7, /* 0xA8-0xAB */
+	0xF3, 0xA9, 0xF3, 0xA4, 0xCF, 0x45, 0xF2, 0xFC, /* 0xAC-0xAF */
+	0xCF, 0x46, 0xCF, 0x47, 0xCF, 0x48, 0xF3, 0xAB, /* 0xB0-0xB3 */
+	0xCF, 0x49, 0xF3, 0xAA, 0xCF, 0x4A, 0xCF, 0x4B, /* 0xB4-0xB7 */
+	0xCF, 0x4C, 0xCF, 0x4D, 0xC2, 0xDD, 0xCF, 0x4E, /* 0xB8-0xBB */
+	0xCF, 0x4F, 0xF3, 0xAE, 0xCF, 0x50, 0xCF, 0x51, /* 0xBC-0xBF */
+	0xF3, 0xB0, 0xCF, 0x52, 0xCF, 0x53, 0xCF, 0x54, /* 0xC0-0xC3 */
+	0xCF, 0x55, 0xCF, 0x56, 0xF3, 0xA1, 0xCF, 0x57, /* 0xC4-0xC7 */
+	0xCF, 0x58, 0xCF, 0x59, 0xF3, 0xB1, 0xF3, 0xAC, /* 0xC8-0xCB */
+	0xCF, 0x5A, 0xCF, 0x5B, 0xCF, 0x5C, 0xCF, 0x5D, /* 0xCC-0xCF */
+	0xCF, 0x5E, 0xF3, 0xAF, 0xF2, 0xFE, 0xF3, 0xAD, /* 0xD0-0xD3 */
+	0xCF, 0x5F, 0xCF, 0x60, 0xCF, 0x61, 0xCF, 0x62, /* 0xD4-0xD7 */
+	0xCF, 0x63, 0xCF, 0x64, 0xCF, 0x65, 0xF3, 0xB2, /* 0xD8-0xDB */
+	0xCF, 0x66, 0xCF, 0x67, 0xCF, 0x68, 0xCF, 0x69, /* 0xDC-0xDF */
+	0xF3, 0xB4, 0xCF, 0x6A, 0xCF, 0x6B, 0xCF, 0x6C, /* 0xE0-0xE3 */
+	0xCF, 0x6D, 0xF3, 0xA8, 0xCF, 0x6E, 0xCF, 0x6F, /* 0xE4-0xE7 */
+	0xCF, 0x70, 0xCF, 0x71, 0xF3, 0xB3, 0xCF, 0x72, /* 0xE8-0xEB */
+	0xCF, 0x73, 0xCF, 0x74, 0xF3, 0xB5, 0xCF, 0x75, /* 0xEC-0xEF */
+	0xCF, 0x76, 0xCF, 0x77, 0xCF, 0x78, 0xCF, 0x79, /* 0xF0-0xF3 */
+	0xCF, 0x7A, 0xCF, 0x7B, 0xCF, 0x7C, 0xCF, 0x7D, /* 0xF4-0xF7 */
+	0xCF, 0x7E, 0xD0, 0xB7, 0xCF, 0x80, 0xCF, 0x81, /* 0xF8-0xFB */
+	0xCF, 0x82, 0xCF, 0x83, 0xF3, 0xB8, 0xCF, 0x84, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_88[512] = {
+	0xCF, 0x85, 0xCF, 0x86, 0xCF, 0x87, 0xD9, 0xF9, /* 0x00-0x03 */
+	0xCF, 0x88, 0xCF, 0x89, 0xCF, 0x8A, 0xCF, 0x8B, /* 0x04-0x07 */
+	0xCF, 0x8C, 0xCF, 0x8D, 0xF3, 0xB9, 0xCF, 0x8E, /* 0x08-0x0B */
+	0xCF, 0x8F, 0xCF, 0x90, 0xCF, 0x91, 0xCF, 0x92, /* 0x0C-0x0F */
+	0xCF, 0x93, 0xCF, 0x94, 0xCF, 0x95, 0xF3, 0xB7, /* 0x10-0x13 */
+	0xCF, 0x96, 0xC8, 0xE4, 0xF3, 0xB6, 0xCF, 0x97, /* 0x14-0x17 */
+	0xCF, 0x98, 0xCF, 0x99, 0xCF, 0x9A, 0xF3, 0xBA, /* 0x18-0x1B */
+	0xCF, 0x9B, 0xCF, 0x9C, 0xCF, 0x9D, 0xCF, 0x9E, /* 0x1C-0x1F */
+	0xCF, 0x9F, 0xF3, 0xBB, 0xB4, 0xC0, 0xCF, 0xA0, /* 0x20-0x23 */
+	0xD0, 0x40, 0xD0, 0x41, 0xD0, 0x42, 0xD0, 0x43, /* 0x24-0x27 */
+	0xD0, 0x44, 0xD0, 0x45, 0xD0, 0x46, 0xD0, 0x47, /* 0x28-0x2B */
+	0xD0, 0x48, 0xD0, 0x49, 0xD0, 0x4A, 0xD0, 0x4B, /* 0x2C-0x2F */
+	0xD0, 0x4C, 0xD0, 0x4D, 0xEE, 0xC3, 0xD0, 0x4E, /* 0x30-0x33 */
+	0xD0, 0x4F, 0xD0, 0x50, 0xD0, 0x51, 0xD0, 0x52, /* 0x34-0x37 */
+	0xD0, 0x53, 0xF3, 0xBC, 0xD0, 0x54, 0xD0, 0x55, /* 0x38-0x3B */
+	0xF3, 0xBD, 0xD0, 0x56, 0xD0, 0x57, 0xD0, 0x58, /* 0x3C-0x3F */
+	0xD1, 0xAA, 0xD0, 0x59, 0xD0, 0x5A, 0xD0, 0x5B, /* 0x40-0x43 */
+	0xF4, 0xAC, 0xD0, 0xC6, 0xD0, 0x5C, 0xD0, 0x5D, /* 0x44-0x47 */
+	0xD0, 0x5E, 0xD0, 0x5F, 0xD0, 0x60, 0xD0, 0x61, /* 0x48-0x4B */
+	0xD0, 0xD0, 0xD1, 0xDC, 0xD0, 0x62, 0xD0, 0x63, /* 0x4C-0x4F */
+	0xD0, 0x64, 0xD0, 0x65, 0xD0, 0x66, 0xD0, 0x67, /* 0x50-0x53 */
+	0xCF, 0xCE, 0xD0, 0x68, 0xD0, 0x69, 0xBD, 0xD6, /* 0x54-0x57 */
+	0xD0, 0x6A, 0xD1, 0xC3, 0xD0, 0x6B, 0xD0, 0x6C, /* 0x58-0x5B */
+	0xD0, 0x6D, 0xD0, 0x6E, 0xD0, 0x6F, 0xD0, 0x70, /* 0x5C-0x5F */
+	0xD0, 0x71, 0xBA, 0xE2, 0xE1, 0xE9, 0xD2, 0xC2, /* 0x60-0x63 */
+	0xF1, 0xC2, 0xB2, 0xB9, 0xD0, 0x72, 0xD0, 0x73, /* 0x64-0x67 */
+	0xB1, 0xED, 0xF1, 0xC3, 0xD0, 0x74, 0xC9, 0xC0, /* 0x68-0x6B */
+	0xB3, 0xC4, 0xD0, 0x75, 0xD9, 0xF2, 0xD0, 0x76, /* 0x6C-0x6F */
+	0xCB, 0xA5, 0xD0, 0x77, 0xF1, 0xC4, 0xD0, 0x78, /* 0x70-0x73 */
+	0xD0, 0x79, 0xD0, 0x7A, 0xD0, 0x7B, 0xD6, 0xD4, /* 0x74-0x77 */
+	0xD0, 0x7C, 0xD0, 0x7D, 0xD0, 0x7E, 0xD0, 0x80, /* 0x78-0x7B */
+	0xD0, 0x81, 0xF1, 0xC5, 0xF4, 0xC0, 0xF1, 0xC6, /* 0x7C-0x7F */
+	
+	0xD0, 0x82, 0xD4, 0xAC, 0xF1, 0xC7, 0xD0, 0x83, /* 0x80-0x83 */
+	0xB0, 0xC0, 0xF4, 0xC1, 0xD0, 0x84, 0xD0, 0x85, /* 0x84-0x87 */
+	0xF4, 0xC2, 0xD0, 0x86, 0xD0, 0x87, 0xB4, 0xFC, /* 0x88-0x8B */
+	0xD0, 0x88, 0xC5, 0xDB, 0xD0, 0x89, 0xD0, 0x8A, /* 0x8C-0x8F */
+	0xD0, 0x8B, 0xD0, 0x8C, 0xCC, 0xBB, 0xD0, 0x8D, /* 0x90-0x93 */
+	0xD0, 0x8E, 0xD0, 0x8F, 0xD0, 0xE4, 0xD0, 0x90, /* 0x94-0x97 */
+	0xD0, 0x91, 0xD0, 0x92, 0xD0, 0x93, 0xD0, 0x94, /* 0x98-0x9B */
+	0xCD, 0xE0, 0xD0, 0x95, 0xD0, 0x96, 0xD0, 0x97, /* 0x9C-0x9F */
+	0xD0, 0x98, 0xD0, 0x99, 0xF1, 0xC8, 0xD0, 0x9A, /* 0xA0-0xA3 */
+	0xD9, 0xF3, 0xD0, 0x9B, 0xD0, 0x9C, 0xD0, 0x9D, /* 0xA4-0xA7 */
+	0xD0, 0x9E, 0xD0, 0x9F, 0xD0, 0xA0, 0xB1, 0xBB, /* 0xA8-0xAB */
+	0xD1, 0x40, 0xCF, 0xAE, 0xD1, 0x41, 0xD1, 0x42, /* 0xAC-0xAF */
+	0xD1, 0x43, 0xB8, 0xA4, 0xD1, 0x44, 0xD1, 0x45, /* 0xB0-0xB3 */
+	0xD1, 0x46, 0xD1, 0x47, 0xD1, 0x48, 0xF1, 0xCA, /* 0xB4-0xB7 */
+	0xD1, 0x49, 0xD1, 0x4A, 0xD1, 0x4B, 0xD1, 0x4C, /* 0xB8-0xBB */
+	0xF1, 0xCB, 0xD1, 0x4D, 0xD1, 0x4E, 0xD1, 0x4F, /* 0xBC-0xBF */
+	0xD1, 0x50, 0xB2, 0xC3, 0xC1, 0xD1, 0xD1, 0x51, /* 0xC0-0xC3 */
+	0xD1, 0x52, 0xD7, 0xB0, 0xF1, 0xC9, 0xD1, 0x53, /* 0xC4-0xC7 */
+	0xD1, 0x54, 0xF1, 0xCC, 0xD1, 0x55, 0xD1, 0x56, /* 0xC8-0xCB */
+	0xD1, 0x57, 0xD1, 0x58, 0xF1, 0xCE, 0xD1, 0x59, /* 0xCC-0xCF */
+	0xD1, 0x5A, 0xD1, 0x5B, 0xD9, 0xF6, 0xD1, 0x5C, /* 0xD0-0xD3 */
+	0xD2, 0xE1, 0xD4, 0xA3, 0xD1, 0x5D, 0xD1, 0x5E, /* 0xD4-0xD7 */
+	0xF4, 0xC3, 0xC8, 0xB9, 0xD1, 0x5F, 0xD1, 0x60, /* 0xD8-0xDB */
+	0xD1, 0x61, 0xD1, 0x62, 0xD1, 0x63, 0xF4, 0xC4, /* 0xDC-0xDF */
+	0xD1, 0x64, 0xD1, 0x65, 0xF1, 0xCD, 0xF1, 0xCF, /* 0xE0-0xE3 */
+	0xBF, 0xE3, 0xF1, 0xD0, 0xD1, 0x66, 0xD1, 0x67, /* 0xE4-0xE7 */
+	0xF1, 0xD4, 0xD1, 0x68, 0xD1, 0x69, 0xD1, 0x6A, /* 0xE8-0xEB */
+	0xD1, 0x6B, 0xD1, 0x6C, 0xD1, 0x6D, 0xD1, 0x6E, /* 0xEC-0xEF */
+	0xF1, 0xD6, 0xF1, 0xD1, 0xD1, 0x6F, 0xC9, 0xD1, /* 0xF0-0xF3 */
+	0xC5, 0xE1, 0xD1, 0x70, 0xD1, 0x71, 0xD1, 0x72, /* 0xF4-0xF7 */
+	0xC2, 0xE3, 0xB9, 0xFC, 0xD1, 0x73, 0xD1, 0x74, /* 0xF8-0xFB */
+	0xF1, 0xD3, 0xD1, 0x75, 0xF1, 0xD5, 0xD1, 0x76, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_89[512] = {
+	0xD1, 0x77, 0xD1, 0x78, 0xB9, 0xD3, 0xD1, 0x79, /* 0x00-0x03 */
+	0xD1, 0x7A, 0xD1, 0x7B, 0xD1, 0x7C, 0xD1, 0x7D, /* 0x04-0x07 */
+	0xD1, 0x7E, 0xD1, 0x80, 0xF1, 0xDB, 0xD1, 0x81, /* 0x08-0x0B */
+	0xD1, 0x82, 0xD1, 0x83, 0xD1, 0x84, 0xD1, 0x85, /* 0x0C-0x0F */
+	0xBA, 0xD6, 0xD1, 0x86, 0xB0, 0xFD, 0xF1, 0xD9, /* 0x10-0x13 */
+	0xD1, 0x87, 0xD1, 0x88, 0xD1, 0x89, 0xD1, 0x8A, /* 0x14-0x17 */
+	0xD1, 0x8B, 0xF1, 0xD8, 0xF1, 0xD2, 0xF1, 0xDA, /* 0x18-0x1B */
+	0xD1, 0x8C, 0xD1, 0x8D, 0xD1, 0x8E, 0xD1, 0x8F, /* 0x1C-0x1F */
+	0xD1, 0x90, 0xF1, 0xD7, 0xD1, 0x91, 0xD1, 0x92, /* 0x20-0x23 */
+	0xD1, 0x93, 0xC8, 0xEC, 0xD1, 0x94, 0xD1, 0x95, /* 0x24-0x27 */
+	0xD1, 0x96, 0xD1, 0x97, 0xCD, 0xCA, 0xF1, 0xDD, /* 0x28-0x2B */
+	0xD1, 0x98, 0xD1, 0x99, 0xD1, 0x9A, 0xD1, 0x9B, /* 0x2C-0x2F */
+	0xE5, 0xBD, 0xD1, 0x9C, 0xD1, 0x9D, 0xD1, 0x9E, /* 0x30-0x33 */
+	0xF1, 0xDC, 0xD1, 0x9F, 0xF1, 0xDE, 0xD1, 0xA0, /* 0x34-0x37 */
+	0xD2, 0x40, 0xD2, 0x41, 0xD2, 0x42, 0xD2, 0x43, /* 0x38-0x3B */
+	0xD2, 0x44, 0xD2, 0x45, 0xD2, 0x46, 0xD2, 0x47, /* 0x3C-0x3F */
+	0xD2, 0x48, 0xF1, 0xDF, 0xD2, 0x49, 0xD2, 0x4A, /* 0x40-0x43 */
+	0xCF, 0xE5, 0xD2, 0x4B, 0xD2, 0x4C, 0xD2, 0x4D, /* 0x44-0x47 */
+	0xD2, 0x4E, 0xD2, 0x4F, 0xD2, 0x50, 0xD2, 0x51, /* 0x48-0x4B */
+	0xD2, 0x52, 0xD2, 0x53, 0xD2, 0x54, 0xD2, 0x55, /* 0x4C-0x4F */
+	0xD2, 0x56, 0xD2, 0x57, 0xD2, 0x58, 0xD2, 0x59, /* 0x50-0x53 */
+	0xD2, 0x5A, 0xD2, 0x5B, 0xD2, 0x5C, 0xD2, 0x5D, /* 0x54-0x57 */
+	0xD2, 0x5E, 0xD2, 0x5F, 0xD2, 0x60, 0xD2, 0x61, /* 0x58-0x5B */
+	0xD2, 0x62, 0xD2, 0x63, 0xF4, 0xC5, 0xBD, 0xF3, /* 0x5C-0x5F */
+	0xD2, 0x64, 0xD2, 0x65, 0xD2, 0x66, 0xD2, 0x67, /* 0x60-0x63 */
+	0xD2, 0x68, 0xD2, 0x69, 0xF1, 0xE0, 0xD2, 0x6A, /* 0x64-0x67 */
+	0xD2, 0x6B, 0xD2, 0x6C, 0xD2, 0x6D, 0xD2, 0x6E, /* 0x68-0x6B */
+	0xD2, 0x6F, 0xD2, 0x70, 0xD2, 0x71, 0xD2, 0x72, /* 0x6C-0x6F */
+	0xD2, 0x73, 0xD2, 0x74, 0xD2, 0x75, 0xD2, 0x76, /* 0x70-0x73 */
+	0xD2, 0x77, 0xD2, 0x78, 0xD2, 0x79, 0xD2, 0x7A, /* 0x74-0x77 */
+	0xD2, 0x7B, 0xD2, 0x7C, 0xD2, 0x7D, 0xF1, 0xE1, /* 0x78-0x7B */
+	0xD2, 0x7E, 0xD2, 0x80, 0xD2, 0x81, 0xCE, 0xF7, /* 0x7C-0x7F */
+	
+	0xD2, 0x82, 0xD2, 0xAA, 0xD2, 0x83, 0xF1, 0xFB, /* 0x80-0x83 */
+	0xD2, 0x84, 0xD2, 0x85, 0xB8, 0xB2, 0xD2, 0x86, /* 0x84-0x87 */
+	0xD2, 0x87, 0xD2, 0x88, 0xD2, 0x89, 0xD2, 0x8A, /* 0x88-0x8B */
+	0xD2, 0x8B, 0xD2, 0x8C, 0xD2, 0x8D, 0xD2, 0x8E, /* 0x8C-0x8F */
+	0xD2, 0x8F, 0xD2, 0x90, 0xD2, 0x91, 0xD2, 0x92, /* 0x90-0x93 */
+	0xD2, 0x93, 0xD2, 0x94, 0xD2, 0x95, 0xD2, 0x96, /* 0x94-0x97 */
+	0xD2, 0x97, 0xD2, 0x98, 0xD2, 0x99, 0xD2, 0x9A, /* 0x98-0x9B */
+	0xD2, 0x9B, 0xD2, 0x9C, 0xD2, 0x9D, 0xD2, 0x9E, /* 0x9C-0x9F */
+	0xD2, 0x9F, 0xD2, 0xA0, 0xD3, 0x40, 0xD3, 0x41, /* 0xA0-0xA3 */
+	0xD3, 0x42, 0xD3, 0x43, 0xD3, 0x44, 0xD3, 0x45, /* 0xA4-0xA7 */
+	0xD3, 0x46, 0xD3, 0x47, 0xD3, 0x48, 0xD3, 0x49, /* 0xA8-0xAB */
+	0xD3, 0x4A, 0xD3, 0x4B, 0xD3, 0x4C, 0xD3, 0x4D, /* 0xAC-0xAF */
+	0xD3, 0x4E, 0xD3, 0x4F, 0xD3, 0x50, 0xD3, 0x51, /* 0xB0-0xB3 */
+	0xD3, 0x52, 0xD3, 0x53, 0xD3, 0x54, 0xD3, 0x55, /* 0xB4-0xB7 */
+	0xD3, 0x56, 0xD3, 0x57, 0xD3, 0x58, 0xD3, 0x59, /* 0xB8-0xBB */
+	0xD3, 0x5A, 0xD3, 0x5B, 0xD3, 0x5C, 0xD3, 0x5D, /* 0xBC-0xBF */
+	0xD3, 0x5E, 0xBC, 0xFB, 0xB9, 0xDB, 0xD3, 0x5F, /* 0xC0-0xC3 */
+	0xB9, 0xE6, 0xC3, 0xD9, 0xCA, 0xD3, 0xEA, 0xE8, /* 0xC4-0xC7 */
+	0xC0, 0xC0, 0xBE, 0xF5, 0xEA, 0xE9, 0xEA, 0xEA, /* 0xC8-0xCB */
+	0xEA, 0xEB, 0xD3, 0x60, 0xEA, 0xEC, 0xEA, 0xED, /* 0xCC-0xCF */
+	0xEA, 0xEE, 0xEA, 0xEF, 0xBD, 0xC7, 0xD3, 0x61, /* 0xD0-0xD3 */
+	0xD3, 0x62, 0xD3, 0x63, 0xF5, 0xFB, 0xD3, 0x64, /* 0xD4-0xD7 */
+	0xD3, 0x65, 0xD3, 0x66, 0xF5, 0xFD, 0xD3, 0x67, /* 0xD8-0xDB */
+	0xF5, 0xFE, 0xD3, 0x68, 0xF5, 0xFC, 0xD3, 0x69, /* 0xDC-0xDF */
+	0xD3, 0x6A, 0xD3, 0x6B, 0xD3, 0x6C, 0xBD, 0xE2, /* 0xE0-0xE3 */
+	0xD3, 0x6D, 0xF6, 0xA1, 0xB4, 0xA5, 0xD3, 0x6E, /* 0xE4-0xE7 */
+	0xD3, 0x6F, 0xD3, 0x70, 0xD3, 0x71, 0xF6, 0xA2, /* 0xE8-0xEB */
+	0xD3, 0x72, 0xD3, 0x73, 0xD3, 0x74, 0xF6, 0xA3, /* 0xEC-0xEF */
+	0xD3, 0x75, 0xD3, 0x76, 0xD3, 0x77, 0xEC, 0xB2, /* 0xF0-0xF3 */
+	0xD3, 0x78, 0xD3, 0x79, 0xD3, 0x7A, 0xD3, 0x7B, /* 0xF4-0xF7 */
+	0xD3, 0x7C, 0xD3, 0x7D, 0xD3, 0x7E, 0xD3, 0x80, /* 0xF8-0xFB */
+	0xD3, 0x81, 0xD3, 0x82, 0xD3, 0x83, 0xD3, 0x84, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8A[512] = {
+	0xD1, 0xD4, 0xD3, 0x85, 0xD3, 0x86, 0xD3, 0x87, /* 0x00-0x03 */
+	0xD3, 0x88, 0xD3, 0x89, 0xD3, 0x8A, 0xD9, 0xEA, /* 0x04-0x07 */
+	0xD3, 0x8B, 0xD3, 0x8C, 0xD3, 0x8D, 0xD3, 0x8E, /* 0x08-0x0B */
+	0xD3, 0x8F, 0xD3, 0x90, 0xD3, 0x91, 0xD3, 0x92, /* 0x0C-0x0F */
+	0xD3, 0x93, 0xD3, 0x94, 0xD3, 0x95, 0xD3, 0x96, /* 0x10-0x13 */
+	0xD3, 0x97, 0xD3, 0x98, 0xD3, 0x99, 0xD3, 0x9A, /* 0x14-0x17 */
+	0xD3, 0x9B, 0xD3, 0x9C, 0xD3, 0x9D, 0xD3, 0x9E, /* 0x18-0x1B */
+	0xD3, 0x9F, 0xD3, 0xA0, 0xD4, 0x40, 0xD4, 0x41, /* 0x1C-0x1F */
+	0xD4, 0x42, 0xD4, 0x43, 0xD4, 0x44, 0xD4, 0x45, /* 0x20-0x23 */
+	0xD4, 0x46, 0xD4, 0x47, 0xD4, 0x48, 0xD4, 0x49, /* 0x24-0x27 */
+	0xD4, 0x4A, 0xD4, 0x4B, 0xD4, 0x4C, 0xD4, 0x4D, /* 0x28-0x2B */
+	0xD4, 0x4E, 0xD4, 0x4F, 0xD4, 0x50, 0xD4, 0x51, /* 0x2C-0x2F */
+	0xD4, 0x52, 0xD4, 0x53, 0xD4, 0x54, 0xD4, 0x55, /* 0x30-0x33 */
+	0xD4, 0x56, 0xD4, 0x57, 0xD4, 0x58, 0xD4, 0x59, /* 0x34-0x37 */
+	0xD4, 0x5A, 0xD4, 0x5B, 0xD4, 0x5C, 0xD4, 0x5D, /* 0x38-0x3B */
+	0xD4, 0x5E, 0xD4, 0x5F, 0xF6, 0xA4, 0xD4, 0x60, /* 0x3C-0x3F */
+	0xD4, 0x61, 0xD4, 0x62, 0xD4, 0x63, 0xD4, 0x64, /* 0x40-0x43 */
+	0xD4, 0x65, 0xD4, 0x66, 0xD4, 0x67, 0xD4, 0x68, /* 0x44-0x47 */
+	0xEE, 0xBA, 0xD4, 0x69, 0xD4, 0x6A, 0xD4, 0x6B, /* 0x48-0x4B */
+	0xD4, 0x6C, 0xD4, 0x6D, 0xD4, 0x6E, 0xD4, 0x6F, /* 0x4C-0x4F */
+	0xD4, 0x70, 0xD4, 0x71, 0xD4, 0x72, 0xD4, 0x73, /* 0x50-0x53 */
+	0xD4, 0x74, 0xD4, 0x75, 0xD4, 0x76, 0xD4, 0x77, /* 0x54-0x57 */
+	0xD4, 0x78, 0xD4, 0x79, 0xD4, 0x7A, 0xD4, 0x7B, /* 0x58-0x5B */
+	0xD4, 0x7C, 0xD4, 0x7D, 0xD4, 0x7E, 0xD4, 0x80, /* 0x5C-0x5F */
+	0xD4, 0x81, 0xD4, 0x82, 0xD4, 0x83, 0xD4, 0x84, /* 0x60-0x63 */
+	0xD4, 0x85, 0xD4, 0x86, 0xD4, 0x87, 0xD4, 0x88, /* 0x64-0x67 */
+	0xD4, 0x89, 0xD4, 0x8A, 0xD4, 0x8B, 0xD4, 0x8C, /* 0x68-0x6B */
+	0xD4, 0x8D, 0xD4, 0x8E, 0xD4, 0x8F, 0xD4, 0x90, /* 0x6C-0x6F */
+	0xD4, 0x91, 0xD4, 0x92, 0xD4, 0x93, 0xD4, 0x94, /* 0x70-0x73 */
+	0xD4, 0x95, 0xD4, 0x96, 0xD4, 0x97, 0xD4, 0x98, /* 0x74-0x77 */
+	0xD4, 0x99, 0xD5, 0xB2, 0xD4, 0x9A, 0xD4, 0x9B, /* 0x78-0x7B */
+	0xD4, 0x9C, 0xD4, 0x9D, 0xD4, 0x9E, 0xD4, 0x9F, /* 0x7C-0x7F */
+	
+	0xD4, 0xA0, 0xD5, 0x40, 0xD5, 0x41, 0xD5, 0x42, /* 0x80-0x83 */
+	0xD5, 0x43, 0xD5, 0x44, 0xD5, 0x45, 0xD5, 0x46, /* 0x84-0x87 */
+	0xD5, 0x47, 0xD3, 0xFE, 0xCC, 0xDC, 0xD5, 0x48, /* 0x88-0x8B */
+	0xD5, 0x49, 0xD5, 0x4A, 0xD5, 0x4B, 0xD5, 0x4C, /* 0x8C-0x8F */
+	0xD5, 0x4D, 0xD5, 0x4E, 0xD5, 0x4F, 0xCA, 0xC4, /* 0x90-0x93 */
+	0xD5, 0x50, 0xD5, 0x51, 0xD5, 0x52, 0xD5, 0x53, /* 0x94-0x97 */
+	0xD5, 0x54, 0xD5, 0x55, 0xD5, 0x56, 0xD5, 0x57, /* 0x98-0x9B */
+	0xD5, 0x58, 0xD5, 0x59, 0xD5, 0x5A, 0xD5, 0x5B, /* 0x9C-0x9F */
+	0xD5, 0x5C, 0xD5, 0x5D, 0xD5, 0x5E, 0xD5, 0x5F, /* 0xA0-0xA3 */
+	0xD5, 0x60, 0xD5, 0x61, 0xD5, 0x62, 0xD5, 0x63, /* 0xA4-0xA7 */
+	0xD5, 0x64, 0xD5, 0x65, 0xD5, 0x66, 0xD5, 0x67, /* 0xA8-0xAB */
+	0xD5, 0x68, 0xD5, 0x69, 0xD5, 0x6A, 0xD5, 0x6B, /* 0xAC-0xAF */
+	0xD5, 0x6C, 0xD5, 0x6D, 0xD5, 0x6E, 0xD5, 0x6F, /* 0xB0-0xB3 */
+	0xD5, 0x70, 0xD5, 0x71, 0xD5, 0x72, 0xD5, 0x73, /* 0xB4-0xB7 */
+	0xD5, 0x74, 0xD5, 0x75, 0xD5, 0x76, 0xD5, 0x77, /* 0xB8-0xBB */
+	0xD5, 0x78, 0xD5, 0x79, 0xD5, 0x7A, 0xD5, 0x7B, /* 0xBC-0xBF */
+	0xD5, 0x7C, 0xD5, 0x7D, 0xD5, 0x7E, 0xD5, 0x80, /* 0xC0-0xC3 */
+	0xD5, 0x81, 0xD5, 0x82, 0xD5, 0x83, 0xD5, 0x84, /* 0xC4-0xC7 */
+	0xD5, 0x85, 0xD5, 0x86, 0xD5, 0x87, 0xD5, 0x88, /* 0xC8-0xCB */
+	0xD5, 0x89, 0xD5, 0x8A, 0xD5, 0x8B, 0xD5, 0x8C, /* 0xCC-0xCF */
+	0xD5, 0x8D, 0xD5, 0x8E, 0xD5, 0x8F, 0xD5, 0x90, /* 0xD0-0xD3 */
+	0xD5, 0x91, 0xD5, 0x92, 0xD5, 0x93, 0xD5, 0x94, /* 0xD4-0xD7 */
+	0xD5, 0x95, 0xD5, 0x96, 0xD5, 0x97, 0xD5, 0x98, /* 0xD8-0xDB */
+	0xD5, 0x99, 0xD5, 0x9A, 0xD5, 0x9B, 0xD5, 0x9C, /* 0xDC-0xDF */
+	0xD5, 0x9D, 0xD5, 0x9E, 0xD5, 0x9F, 0xD5, 0xA0, /* 0xE0-0xE3 */
+	0xD6, 0x40, 0xD6, 0x41, 0xD6, 0x42, 0xD6, 0x43, /* 0xE4-0xE7 */
+	0xD6, 0x44, 0xD6, 0x45, 0xD6, 0x46, 0xD6, 0x47, /* 0xE8-0xEB */
+	0xD6, 0x48, 0xD6, 0x49, 0xD6, 0x4A, 0xD6, 0x4B, /* 0xEC-0xEF */
+	0xD6, 0x4C, 0xD6, 0x4D, 0xD6, 0x4E, 0xD6, 0x4F, /* 0xF0-0xF3 */
+	0xD6, 0x50, 0xD6, 0x51, 0xD6, 0x52, 0xD6, 0x53, /* 0xF4-0xF7 */
+	0xD6, 0x54, 0xD6, 0x55, 0xD6, 0x56, 0xD6, 0x57, /* 0xF8-0xFB */
+	0xD6, 0x58, 0xD6, 0x59, 0xD6, 0x5A, 0xD6, 0x5B, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8B[512] = {
+	0xD6, 0x5C, 0xD6, 0x5D, 0xD6, 0x5E, 0xD6, 0x5F, /* 0x00-0x03 */
+	0xD6, 0x60, 0xD6, 0x61, 0xD6, 0x62, 0xE5, 0xC0, /* 0x04-0x07 */
+	0xD6, 0x63, 0xD6, 0x64, 0xD6, 0x65, 0xD6, 0x66, /* 0x08-0x0B */
+	0xD6, 0x67, 0xD6, 0x68, 0xD6, 0x69, 0xD6, 0x6A, /* 0x0C-0x0F */
+	0xD6, 0x6B, 0xD6, 0x6C, 0xD6, 0x6D, 0xD6, 0x6E, /* 0x10-0x13 */
+	0xD6, 0x6F, 0xD6, 0x70, 0xD6, 0x71, 0xD6, 0x72, /* 0x14-0x17 */
+	0xD6, 0x73, 0xD6, 0x74, 0xD6, 0x75, 0xD6, 0x76, /* 0x18-0x1B */
+	0xD6, 0x77, 0xD6, 0x78, 0xD6, 0x79, 0xD6, 0x7A, /* 0x1C-0x1F */
+	0xD6, 0x7B, 0xD6, 0x7C, 0xD6, 0x7D, 0xD6, 0x7E, /* 0x20-0x23 */
+	0xD6, 0x80, 0xD6, 0x81, 0xF6, 0xA5, 0xD6, 0x82, /* 0x24-0x27 */
+	0xD6, 0x83, 0xD6, 0x84, 0xD6, 0x85, 0xD6, 0x86, /* 0x28-0x2B */
+	0xD6, 0x87, 0xD6, 0x88, 0xD6, 0x89, 0xD6, 0x8A, /* 0x2C-0x2F */
+	0xD6, 0x8B, 0xD6, 0x8C, 0xD6, 0x8D, 0xD6, 0x8E, /* 0x30-0x33 */
+	0xD6, 0x8F, 0xD6, 0x90, 0xD6, 0x91, 0xD6, 0x92, /* 0x34-0x37 */
+	0xD6, 0x93, 0xD6, 0x94, 0xD6, 0x95, 0xD6, 0x96, /* 0x38-0x3B */
+	0xD6, 0x97, 0xD6, 0x98, 0xD6, 0x99, 0xD6, 0x9A, /* 0x3C-0x3F */
+	0xD6, 0x9B, 0xD6, 0x9C, 0xD6, 0x9D, 0xD6, 0x9E, /* 0x40-0x43 */
+	0xD6, 0x9F, 0xD6, 0xA0, 0xD7, 0x40, 0xD7, 0x41, /* 0x44-0x47 */
+	0xD7, 0x42, 0xD7, 0x43, 0xD7, 0x44, 0xD7, 0x45, /* 0x48-0x4B */
+	0xD7, 0x46, 0xD7, 0x47, 0xD7, 0x48, 0xD7, 0x49, /* 0x4C-0x4F */
+	0xD7, 0x4A, 0xD7, 0x4B, 0xD7, 0x4C, 0xD7, 0x4D, /* 0x50-0x53 */
+	0xD7, 0x4E, 0xD7, 0x4F, 0xD7, 0x50, 0xD7, 0x51, /* 0x54-0x57 */
+	0xD7, 0x52, 0xD7, 0x53, 0xD7, 0x54, 0xD7, 0x55, /* 0x58-0x5B */
+	0xD7, 0x56, 0xD7, 0x57, 0xD7, 0x58, 0xD7, 0x59, /* 0x5C-0x5F */
+	0xD7, 0x5A, 0xD7, 0x5B, 0xD7, 0x5C, 0xD7, 0x5D, /* 0x60-0x63 */
+	0xD7, 0x5E, 0xD7, 0x5F, 0xBE, 0xAF, 0xD7, 0x60, /* 0x64-0x67 */
+	0xD7, 0x61, 0xD7, 0x62, 0xD7, 0x63, 0xD7, 0x64, /* 0x68-0x6B */
+	0xC6, 0xA9, 0xD7, 0x65, 0xD7, 0x66, 0xD7, 0x67, /* 0x6C-0x6F */
+	0xD7, 0x68, 0xD7, 0x69, 0xD7, 0x6A, 0xD7, 0x6B, /* 0x70-0x73 */
+	0xD7, 0x6C, 0xD7, 0x6D, 0xD7, 0x6E, 0xD7, 0x6F, /* 0x74-0x77 */
+	0xD7, 0x70, 0xD7, 0x71, 0xD7, 0x72, 0xD7, 0x73, /* 0x78-0x7B */
+	0xD7, 0x74, 0xD7, 0x75, 0xD7, 0x76, 0xD7, 0x77, /* 0x7C-0x7F */
+	
+	0xD7, 0x78, 0xD7, 0x79, 0xD7, 0x7A, 0xD7, 0x7B, /* 0x80-0x83 */
+	0xD7, 0x7C, 0xD7, 0x7D, 0xD7, 0x7E, 0xD7, 0x80, /* 0x84-0x87 */
+	0xD7, 0x81, 0xD7, 0x82, 0xD7, 0x83, 0xD7, 0x84, /* 0x88-0x8B */
+	0xD7, 0x85, 0xD7, 0x86, 0xD7, 0x87, 0xD7, 0x88, /* 0x8C-0x8F */
+	0xD7, 0x89, 0xD7, 0x8A, 0xD7, 0x8B, 0xD7, 0x8C, /* 0x90-0x93 */
+	0xD7, 0x8D, 0xD7, 0x8E, 0xD7, 0x8F, 0xD7, 0x90, /* 0x94-0x97 */
+	0xD7, 0x91, 0xD7, 0x92, 0xD7, 0x93, 0xD7, 0x94, /* 0x98-0x9B */
+	0xD7, 0x95, 0xD7, 0x96, 0xD7, 0x97, 0xD7, 0x98, /* 0x9C-0x9F */
+	0xDA, 0xA5, 0xBC, 0xC6, 0xB6, 0xA9, 0xB8, 0xBC, /* 0xA0-0xA3 */
+	0xC8, 0xCF, 0xBC, 0xA5, 0xDA, 0xA6, 0xDA, 0xA7, /* 0xA4-0xA7 */
+	0xCC, 0xD6, 0xC8, 0xC3, 0xDA, 0xA8, 0xC6, 0xFD, /* 0xA8-0xAB */
+	0xD7, 0x99, 0xD1, 0xB5, 0xD2, 0xE9, 0xD1, 0xB6, /* 0xAC-0xAF */
+	0xBC, 0xC7, 0xD7, 0x9A, 0xBD, 0xB2, 0xBB, 0xE4, /* 0xB0-0xB3 */
+	0xDA, 0xA9, 0xDA, 0xAA, 0xD1, 0xC8, 0xDA, 0xAB, /* 0xB4-0xB7 */
+	0xD0, 0xED, 0xB6, 0xEF, 0xC2, 0xDB, 0xD7, 0x9B, /* 0xB8-0xBB */
+	0xCB, 0xCF, 0xB7, 0xED, 0xC9, 0xE8, 0xB7, 0xC3, /* 0xBC-0xBF */
+	0xBE, 0xF7, 0xD6, 0xA4, 0xDA, 0xAC, 0xDA, 0xAD, /* 0xC0-0xC3 */
+	0xC6, 0xC0, 0xD7, 0xE7, 0xCA, 0xB6, 0xD7, 0x9C, /* 0xC4-0xC7 */
+	0xD5, 0xA9, 0xCB, 0xDF, 0xD5, 0xEF, 0xDA, 0xAE, /* 0xC8-0xCB */
+	0xD6, 0xDF, 0xB4, 0xCA, 0xDA, 0xB0, 0xDA, 0xAF, /* 0xCC-0xCF */
+	0xD7, 0x9D, 0xD2, 0xEB, 0xDA, 0xB1, 0xDA, 0xB2, /* 0xD0-0xD3 */
+	0xDA, 0xB3, 0xCA, 0xD4, 0xDA, 0xB4, 0xCA, 0xAB, /* 0xD4-0xD7 */
+	0xDA, 0xB5, 0xDA, 0xB6, 0xB3, 0xCF, 0xD6, 0xEF, /* 0xD8-0xDB */
+	0xDA, 0xB7, 0xBB, 0xB0, 0xB5, 0xAE, 0xDA, 0xB8, /* 0xDC-0xDF */
+	0xDA, 0xB9, 0xB9, 0xEE, 0xD1, 0xAF, 0xD2, 0xE8, /* 0xE0-0xE3 */
+	0xDA, 0xBA, 0xB8, 0xC3, 0xCF, 0xEA, 0xB2, 0xEF, /* 0xE4-0xE7 */
+	0xDA, 0xBB, 0xDA, 0xBC, 0xD7, 0x9E, 0xBD, 0xEB, /* 0xE8-0xEB */
+	0xCE, 0xDC, 0xD3, 0xEF, 0xDA, 0xBD, 0xCE, 0xF3, /* 0xEC-0xEF */
+	0xDA, 0xBE, 0xD3, 0xD5, 0xBB, 0xE5, 0xDA, 0xBF, /* 0xF0-0xF3 */
+	0xCB, 0xB5, 0xCB, 0xD0, 0xDA, 0xC0, 0xC7, 0xEB, /* 0xF4-0xF7 */
+	0xD6, 0xEE, 0xDA, 0xC1, 0xC5, 0xB5, 0xB6, 0xC1, /* 0xF8-0xFB */
+	0xDA, 0xC2, 0xB7, 0xCC, 0xBF, 0xCE, 0xDA, 0xC3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8C[512] = {
+	0xDA, 0xC4, 0xCB, 0xAD, 0xDA, 0xC5, 0xB5, 0xF7, /* 0x00-0x03 */
+	0xDA, 0xC6, 0xC1, 0xC2, 0xD7, 0xBB, 0xDA, 0xC7, /* 0x04-0x07 */
+	0xCC, 0xB8, 0xD7, 0x9F, 0xD2, 0xEA, 0xC4, 0xB1, /* 0x08-0x0B */
+	0xDA, 0xC8, 0xB5, 0xFD, 0xBB, 0xD1, 0xDA, 0xC9, /* 0x0C-0x0F */
+	0xD0, 0xB3, 0xDA, 0xCA, 0xDA, 0xCB, 0xCE, 0xBD, /* 0x10-0x13 */
+	0xDA, 0xCC, 0xDA, 0xCD, 0xDA, 0xCE, 0xB2, 0xF7, /* 0x14-0x17 */
+	0xDA, 0xD1, 0xDA, 0xCF, 0xD1, 0xE8, 0xDA, 0xD0, /* 0x18-0x1B */
+	0xC3, 0xD5, 0xDA, 0xD2, 0xD7, 0xA0, 0xDA, 0xD3, /* 0x1C-0x1F */
+	0xDA, 0xD4, 0xDA, 0xD5, 0xD0, 0xBB, 0xD2, 0xA5, /* 0x20-0x23 */
+	0xB0, 0xF9, 0xDA, 0xD6, 0xC7, 0xAB, 0xDA, 0xD7, /* 0x24-0x27 */
+	0xBD, 0xF7, 0xC3, 0xA1, 0xDA, 0xD8, 0xDA, 0xD9, /* 0x28-0x2B */
+	0xC3, 0xFD, 0xCC, 0xB7, 0xDA, 0xDA, 0xDA, 0xDB, /* 0x2C-0x2F */
+	0xC0, 0xBE, 0xC6, 0xD7, 0xDA, 0xDC, 0xDA, 0xDD, /* 0x30-0x33 */
+	0xC7, 0xB4, 0xDA, 0xDE, 0xDA, 0xDF, 0xB9, 0xC8, /* 0x34-0x37 */
+	0xD8, 0x40, 0xD8, 0x41, 0xD8, 0x42, 0xD8, 0x43, /* 0x38-0x3B */
+	0xD8, 0x44, 0xD8, 0x45, 0xD8, 0x46, 0xD8, 0x47, /* 0x3C-0x3F */
+	0xD8, 0x48, 0xBB, 0xED, 0xD8, 0x49, 0xD8, 0x4A, /* 0x40-0x43 */
+	0xD8, 0x4B, 0xD8, 0x4C, 0xB6, 0xB9, 0xF4, 0xF8, /* 0x44-0x47 */
+	0xD8, 0x4D, 0xF4, 0xF9, 0xD8, 0x4E, 0xD8, 0x4F, /* 0x48-0x4B */
+	0xCD, 0xE3, 0xD8, 0x50, 0xD8, 0x51, 0xD8, 0x52, /* 0x4C-0x4F */
+	0xD8, 0x53, 0xD8, 0x54, 0xD8, 0x55, 0xD8, 0x56, /* 0x50-0x53 */
+	0xD8, 0x57, 0xF5, 0xB9, 0xD8, 0x58, 0xD8, 0x59, /* 0x54-0x57 */
+	0xD8, 0x5A, 0xD8, 0x5B, 0xEB, 0xE0, 0xD8, 0x5C, /* 0x58-0x5B */
+	0xD8, 0x5D, 0xD8, 0x5E, 0xD8, 0x5F, 0xD8, 0x60, /* 0x5C-0x5F */
+	0xD8, 0x61, 0xCF, 0xF3, 0xBB, 0xBF, 0xD8, 0x62, /* 0x60-0x63 */
+	0xD8, 0x63, 0xD8, 0x64, 0xD8, 0x65, 0xD8, 0x66, /* 0x64-0x67 */
+	0xD8, 0x67, 0xD8, 0x68, 0xBA, 0xC0, 0xD4, 0xA5, /* 0x68-0x6B */
+	0xD8, 0x69, 0xD8, 0x6A, 0xD8, 0x6B, 0xD8, 0x6C, /* 0x6C-0x6F */
+	0xD8, 0x6D, 0xD8, 0x6E, 0xD8, 0x6F, 0xE1, 0xD9, /* 0x70-0x73 */
+	0xD8, 0x70, 0xD8, 0x71, 0xD8, 0x72, 0xD8, 0x73, /* 0x74-0x77 */
+	0xF5, 0xF4, 0xB1, 0xAA, 0xB2, 0xF2, 0xD8, 0x74, /* 0x78-0x7B */
+	0xD8, 0x75, 0xD8, 0x76, 0xD8, 0x77, 0xD8, 0x78, /* 0x7C-0x7F */
+	
+	0xD8, 0x79, 0xD8, 0x7A, 0xF5, 0xF5, 0xD8, 0x7B, /* 0x80-0x83 */
+	0xD8, 0x7C, 0xF5, 0xF7, 0xD8, 0x7D, 0xD8, 0x7E, /* 0x84-0x87 */
+	0xD8, 0x80, 0xBA, 0xD1, 0xF5, 0xF6, 0xD8, 0x81, /* 0x88-0x8B */
+	0xC3, 0xB2, 0xD8, 0x82, 0xD8, 0x83, 0xD8, 0x84, /* 0x8C-0x8F */
+	0xD8, 0x85, 0xD8, 0x86, 0xD8, 0x87, 0xD8, 0x88, /* 0x90-0x93 */
+	0xF5, 0xF9, 0xD8, 0x89, 0xD8, 0x8A, 0xD8, 0x8B, /* 0x94-0x97 */
+	0xF5, 0xF8, 0xD8, 0x8C, 0xD8, 0x8D, 0xD8, 0x8E, /* 0x98-0x9B */
+	0xD8, 0x8F, 0xD8, 0x90, 0xD8, 0x91, 0xD8, 0x92, /* 0x9C-0x9F */
+	0xD8, 0x93, 0xD8, 0x94, 0xD8, 0x95, 0xD8, 0x96, /* 0xA0-0xA3 */
+	0xD8, 0x97, 0xD8, 0x98, 0xD8, 0x99, 0xD8, 0x9A, /* 0xA4-0xA7 */
+	0xD8, 0x9B, 0xD8, 0x9C, 0xD8, 0x9D, 0xD8, 0x9E, /* 0xA8-0xAB */
+	0xD8, 0x9F, 0xD8, 0xA0, 0xD9, 0x40, 0xD9, 0x41, /* 0xAC-0xAF */
+	0xD9, 0x42, 0xD9, 0x43, 0xD9, 0x44, 0xD9, 0x45, /* 0xB0-0xB3 */
+	0xD9, 0x46, 0xD9, 0x47, 0xD9, 0x48, 0xD9, 0x49, /* 0xB4-0xB7 */
+	0xD9, 0x4A, 0xD9, 0x4B, 0xD9, 0x4C, 0xD9, 0x4D, /* 0xB8-0xBB */
+	0xD9, 0x4E, 0xD9, 0x4F, 0xD9, 0x50, 0xD9, 0x51, /* 0xBC-0xBF */
+	0xD9, 0x52, 0xD9, 0x53, 0xD9, 0x54, 0xD9, 0x55, /* 0xC0-0xC3 */
+	0xD9, 0x56, 0xD9, 0x57, 0xD9, 0x58, 0xD9, 0x59, /* 0xC4-0xC7 */
+	0xD9, 0x5A, 0xD9, 0x5B, 0xD9, 0x5C, 0xD9, 0x5D, /* 0xC8-0xCB */
+	0xD9, 0x5E, 0xD9, 0x5F, 0xD9, 0x60, 0xD9, 0x61, /* 0xCC-0xCF */
+	0xD9, 0x62, 0xD9, 0x63, 0xD9, 0x64, 0xD9, 0x65, /* 0xD0-0xD3 */
+	0xD9, 0x66, 0xD9, 0x67, 0xD9, 0x68, 0xD9, 0x69, /* 0xD4-0xD7 */
+	0xD9, 0x6A, 0xD9, 0x6B, 0xD9, 0x6C, 0xD9, 0x6D, /* 0xD8-0xDB */
+	0xD9, 0x6E, 0xD9, 0x6F, 0xD9, 0x70, 0xD9, 0x71, /* 0xDC-0xDF */
+	0xD9, 0x72, 0xD9, 0x73, 0xD9, 0x74, 0xD9, 0x75, /* 0xE0-0xE3 */
+	0xD9, 0x76, 0xD9, 0x77, 0xD9, 0x78, 0xD9, 0x79, /* 0xE4-0xE7 */
+	0xD9, 0x7A, 0xD9, 0x7B, 0xD9, 0x7C, 0xD9, 0x7D, /* 0xE8-0xEB */
+	0xD9, 0x7E, 0xD9, 0x80, 0xD9, 0x81, 0xD9, 0x82, /* 0xEC-0xEF */
+	0xD9, 0x83, 0xD9, 0x84, 0xD9, 0x85, 0xD9, 0x86, /* 0xF0-0xF3 */
+	0xD9, 0x87, 0xD9, 0x88, 0xD9, 0x89, 0xD9, 0x8A, /* 0xF4-0xF7 */
+	0xD9, 0x8B, 0xD9, 0x8C, 0xD9, 0x8D, 0xD9, 0x8E, /* 0xF8-0xFB */
+	0xD9, 0x8F, 0xD9, 0x90, 0xD9, 0x91, 0xD9, 0x92, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8D[512] = {
+	0xD9, 0x93, 0xD9, 0x94, 0xD9, 0x95, 0xD9, 0x96, /* 0x00-0x03 */
+	0xD9, 0x97, 0xD9, 0x98, 0xD9, 0x99, 0xD9, 0x9A, /* 0x04-0x07 */
+	0xD9, 0x9B, 0xD9, 0x9C, 0xD9, 0x9D, 0xD9, 0x9E, /* 0x08-0x0B */
+	0xD9, 0x9F, 0xD9, 0xA0, 0xDA, 0x40, 0xDA, 0x41, /* 0x0C-0x0F */
+	0xDA, 0x42, 0xDA, 0x43, 0xDA, 0x44, 0xDA, 0x45, /* 0x10-0x13 */
+	0xDA, 0x46, 0xDA, 0x47, 0xDA, 0x48, 0xDA, 0x49, /* 0x14-0x17 */
+	0xDA, 0x4A, 0xDA, 0x4B, 0xDA, 0x4C, 0xDA, 0x4D, /* 0x18-0x1B */
+	0xDA, 0x4E, 0xB1, 0xB4, 0xD5, 0xEA, 0xB8, 0xBA, /* 0x1C-0x1F */
+	0xDA, 0x4F, 0xB9, 0xB1, 0xB2, 0xC6, 0xD4, 0xF0, /* 0x20-0x23 */
+	0xCF, 0xCD, 0xB0, 0xDC, 0xD5, 0xCB, 0xBB, 0xF5, /* 0x24-0x27 */
+	0xD6, 0xCA, 0xB7, 0xB7, 0xCC, 0xB0, 0xC6, 0xB6, /* 0x28-0x2B */
+	0xB1, 0xE1, 0xB9, 0xBA, 0xD6, 0xFC, 0xB9, 0xE1, /* 0x2C-0x2F */
+	0xB7, 0xA1, 0xBC, 0xFA, 0xEA, 0xDA, 0xEA, 0xDB, /* 0x30-0x33 */
+	0xCC, 0xF9, 0xB9, 0xF3, 0xEA, 0xDC, 0xB4, 0xFB, /* 0x34-0x37 */
+	0xC3, 0xB3, 0xB7, 0xD1, 0xBA, 0xD8, 0xEA, 0xDD, /* 0x38-0x3B */
+	0xD4, 0xF4, 0xEA, 0xDE, 0xBC, 0xD6, 0xBB, 0xDF, /* 0x3C-0x3F */
+	0xEA, 0xDF, 0xC1, 0xDE, 0xC2, 0xB8, 0xD4, 0xDF, /* 0x40-0x43 */
+	0xD7, 0xCA, 0xEA, 0xE0, 0xEA, 0xE1, 0xEA, 0xE4, /* 0x44-0x47 */
+	0xEA, 0xE2, 0xEA, 0xE3, 0xC9, 0xDE, 0xB8, 0xB3, /* 0x48-0x4B */
+	0xB6, 0xC4, 0xEA, 0xE5, 0xCA, 0xEA, 0xC9, 0xCD, /* 0x4C-0x4F */
+	0xB4, 0xCD, 0xDA, 0x50, 0xDA, 0x51, 0xE2, 0xD9, /* 0x50-0x53 */
+	0xC5, 0xE2, 0xEA, 0xE6, 0xC0, 0xB5, 0xDA, 0x52, /* 0x54-0x57 */
+	0xD7, 0xB8, 0xEA, 0xE7, 0xD7, 0xAC, 0xC8, 0xFC, /* 0x58-0x5B */
+	0xD8, 0xD3, 0xD8, 0xCD, 0xD4, 0xDE, 0xDA, 0x53, /* 0x5C-0x5F */
+	0xD4, 0xF9, 0xC9, 0xC4, 0xD3, 0xAE, 0xB8, 0xD3, /* 0x60-0x63 */
+	0xB3, 0xE0, 0xDA, 0x54, 0xC9, 0xE2, 0xF4, 0xF6, /* 0x64-0x67 */
+	0xDA, 0x55, 0xDA, 0x56, 0xDA, 0x57, 0xBA, 0xD5, /* 0x68-0x6B */
+	0xDA, 0x58, 0xF4, 0xF7, 0xDA, 0x59, 0xDA, 0x5A, /* 0x6C-0x6F */
+	0xD7, 0xDF, 0xDA, 0x5B, 0xDA, 0x5C, 0xF4, 0xF1, /* 0x70-0x73 */
+	0xB8, 0xB0, 0xD5, 0xD4, 0xB8, 0xCF, 0xC6, 0xF0, /* 0x74-0x77 */
+	0xDA, 0x5D, 0xDA, 0x5E, 0xDA, 0x5F, 0xDA, 0x60, /* 0x78-0x7B */
+	0xDA, 0x61, 0xDA, 0x62, 0xDA, 0x63, 0xDA, 0x64, /* 0x7C-0x7F */
+	
+	0xDA, 0x65, 0xB3, 0xC3, 0xDA, 0x66, 0xDA, 0x67, /* 0x80-0x83 */
+	0xF4, 0xF2, 0xB3, 0xAC, 0xDA, 0x68, 0xDA, 0x69, /* 0x84-0x87 */
+	0xDA, 0x6A, 0xDA, 0x6B, 0xD4, 0xBD, 0xC7, 0xF7, /* 0x88-0x8B */
+	0xDA, 0x6C, 0xDA, 0x6D, 0xDA, 0x6E, 0xDA, 0x6F, /* 0x8C-0x8F */
+	0xDA, 0x70, 0xF4, 0xF4, 0xDA, 0x71, 0xDA, 0x72, /* 0x90-0x93 */
+	0xF4, 0xF3, 0xDA, 0x73, 0xDA, 0x74, 0xDA, 0x75, /* 0x94-0x97 */
+	0xDA, 0x76, 0xDA, 0x77, 0xDA, 0x78, 0xDA, 0x79, /* 0x98-0x9B */
+	0xDA, 0x7A, 0xDA, 0x7B, 0xDA, 0x7C, 0xCC, 0xCB, /* 0x9C-0x9F */
+	0xDA, 0x7D, 0xDA, 0x7E, 0xDA, 0x80, 0xC8, 0xA4, /* 0xA0-0xA3 */
+	0xDA, 0x81, 0xDA, 0x82, 0xDA, 0x83, 0xDA, 0x84, /* 0xA4-0xA7 */
+	0xDA, 0x85, 0xDA, 0x86, 0xDA, 0x87, 0xDA, 0x88, /* 0xA8-0xAB */
+	0xDA, 0x89, 0xDA, 0x8A, 0xDA, 0x8B, 0xDA, 0x8C, /* 0xAC-0xAF */
+	0xDA, 0x8D, 0xF4, 0xF5, 0xDA, 0x8E, 0xD7, 0xE3, /* 0xB0-0xB3 */
+	0xC5, 0xBF, 0xF5, 0xC0, 0xDA, 0x8F, 0xDA, 0x90, /* 0xB4-0xB7 */
+	0xF5, 0xBB, 0xDA, 0x91, 0xF5, 0xC3, 0xDA, 0x92, /* 0xB8-0xBB */
+	0xF5, 0xC2, 0xDA, 0x93, 0xD6, 0xBA, 0xF5, 0xC1, /* 0xBC-0xBF */
+	0xDA, 0x94, 0xDA, 0x95, 0xDA, 0x96, 0xD4, 0xBE, /* 0xC0-0xC3 */
+	0xF5, 0xC4, 0xDA, 0x97, 0xF5, 0xCC, 0xDA, 0x98, /* 0xC4-0xC7 */
+	0xDA, 0x99, 0xDA, 0x9A, 0xDA, 0x9B, 0xB0, 0xCF, /* 0xC8-0xCB */
+	0xB5, 0xF8, 0xDA, 0x9C, 0xF5, 0xC9, 0xF5, 0xCA, /* 0xCC-0xCF */
+	0xDA, 0x9D, 0xC5, 0xDC, 0xDA, 0x9E, 0xDA, 0x9F, /* 0xD0-0xD3 */
+	0xDA, 0xA0, 0xDB, 0x40, 0xF5, 0xC5, 0xF5, 0xC6, /* 0xD4-0xD7 */
+	0xDB, 0x41, 0xDB, 0x42, 0xF5, 0xC7, 0xF5, 0xCB, /* 0xD8-0xDB */
+	0xDB, 0x43, 0xBE, 0xE0, 0xF5, 0xC8, 0xB8, 0xFA, /* 0xDC-0xDF */
+	0xDB, 0x44, 0xDB, 0x45, 0xDB, 0x46, 0xF5, 0xD0, /* 0xE0-0xE3 */
+	0xF5, 0xD3, 0xDB, 0x47, 0xDB, 0x48, 0xDB, 0x49, /* 0xE4-0xE7 */
+	0xBF, 0xE7, 0xDB, 0x4A, 0xB9, 0xF2, 0xF5, 0xBC, /* 0xE8-0xEB */
+	0xF5, 0xCD, 0xDB, 0x4B, 0xDB, 0x4C, 0xC2, 0xB7, /* 0xEC-0xEF */
+	0xDB, 0x4D, 0xDB, 0x4E, 0xDB, 0x4F, 0xCC, 0xF8, /* 0xF0-0xF3 */
+	0xDB, 0x50, 0xBC, 0xF9, 0xDB, 0x51, 0xF5, 0xCE, /* 0xF4-0xF7 */
+	0xF5, 0xCF, 0xF5, 0xD1, 0xB6, 0xE5, 0xF5, 0xD2, /* 0xF8-0xFB */
+	0xDB, 0x52, 0xF5, 0xD5, 0xDB, 0x53, 0xDB, 0x54, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8E[512] = {
+	0xDB, 0x55, 0xDB, 0x56, 0xDB, 0x57, 0xDB, 0x58, /* 0x00-0x03 */
+	0xDB, 0x59, 0xF5, 0xBD, 0xDB, 0x5A, 0xDB, 0x5B, /* 0x04-0x07 */
+	0xDB, 0x5C, 0xF5, 0xD4, 0xD3, 0xBB, 0xDB, 0x5D, /* 0x08-0x0B */
+	0xB3, 0xEC, 0xDB, 0x5E, 0xDB, 0x5F, 0xCC, 0xA4, /* 0x0C-0x0F */
+	0xDB, 0x60, 0xDB, 0x61, 0xDB, 0x62, 0xDB, 0x63, /* 0x10-0x13 */
+	0xF5, 0xD6, 0xDB, 0x64, 0xDB, 0x65, 0xDB, 0x66, /* 0x14-0x17 */
+	0xDB, 0x67, 0xDB, 0x68, 0xDB, 0x69, 0xDB, 0x6A, /* 0x18-0x1B */
+	0xDB, 0x6B, 0xF5, 0xD7, 0xBE, 0xE1, 0xF5, 0xD8, /* 0x1C-0x1F */
+	0xDB, 0x6C, 0xDB, 0x6D, 0xCC, 0xDF, 0xF5, 0xDB, /* 0x20-0x23 */
+	0xDB, 0x6E, 0xDB, 0x6F, 0xDB, 0x70, 0xDB, 0x71, /* 0x24-0x27 */
+	0xDB, 0x72, 0xB2, 0xC8, 0xD7, 0xD9, 0xDB, 0x73, /* 0x28-0x2B */
+	0xF5, 0xD9, 0xDB, 0x74, 0xF5, 0xDA, 0xF5, 0xDC, /* 0x2C-0x2F */
+	0xDB, 0x75, 0xF5, 0xE2, 0xDB, 0x76, 0xDB, 0x77, /* 0x30-0x33 */
+	0xDB, 0x78, 0xF5, 0xE0, 0xDB, 0x79, 0xDB, 0x7A, /* 0x34-0x37 */
+	0xDB, 0x7B, 0xF5, 0xDF, 0xF5, 0xDD, 0xDB, 0x7C, /* 0x38-0x3B */
+	0xDB, 0x7D, 0xF5, 0xE1, 0xDB, 0x7E, 0xDB, 0x80, /* 0x3C-0x3F */
+	0xF5, 0xDE, 0xF5, 0xE4, 0xF5, 0xE5, 0xDB, 0x81, /* 0x40-0x43 */
+	0xCC, 0xE3, 0xDB, 0x82, 0xDB, 0x83, 0xE5, 0xBF, /* 0x44-0x47 */
+	0xB5, 0xB8, 0xF5, 0xE3, 0xF5, 0xE8, 0xCC, 0xA3, /* 0x48-0x4B */
+	0xDB, 0x84, 0xDB, 0x85, 0xDB, 0x86, 0xDB, 0x87, /* 0x4C-0x4F */
+	0xDB, 0x88, 0xF5, 0xE6, 0xF5, 0xE7, 0xDB, 0x89, /* 0x50-0x53 */
+	0xDB, 0x8A, 0xDB, 0x8B, 0xDB, 0x8C, 0xDB, 0x8D, /* 0x54-0x57 */
+	0xDB, 0x8E, 0xF5, 0xBE, 0xDB, 0x8F, 0xDB, 0x90, /* 0x58-0x5B */
+	0xDB, 0x91, 0xDB, 0x92, 0xDB, 0x93, 0xDB, 0x94, /* 0x5C-0x5F */
+	0xDB, 0x95, 0xDB, 0x96, 0xDB, 0x97, 0xDB, 0x98, /* 0x60-0x63 */
+	0xDB, 0x99, 0xDB, 0x9A, 0xB1, 0xC4, 0xDB, 0x9B, /* 0x64-0x67 */
+	0xDB, 0x9C, 0xF5, 0xBF, 0xDB, 0x9D, 0xDB, 0x9E, /* 0x68-0x6B */
+	0xB5, 0xC5, 0xB2, 0xE4, 0xDB, 0x9F, 0xF5, 0xEC, /* 0x6C-0x6F */
+	0xF5, 0xE9, 0xDB, 0xA0, 0xB6, 0xD7, 0xDC, 0x40, /* 0x70-0x73 */
+	0xF5, 0xED, 0xDC, 0x41, 0xF5, 0xEA, 0xDC, 0x42, /* 0x74-0x77 */
+	0xDC, 0x43, 0xDC, 0x44, 0xDC, 0x45, 0xDC, 0x46, /* 0x78-0x7B */
+	0xF5, 0xEB, 0xDC, 0x47, 0xDC, 0x48, 0xB4, 0xDA, /* 0x7C-0x7F */
+	
+	0xDC, 0x49, 0xD4, 0xEA, 0xDC, 0x4A, 0xDC, 0x4B, /* 0x80-0x83 */
+	0xDC, 0x4C, 0xF5, 0xEE, 0xDC, 0x4D, 0xB3, 0xF9, /* 0x84-0x87 */
+	0xDC, 0x4E, 0xDC, 0x4F, 0xDC, 0x50, 0xDC, 0x51, /* 0x88-0x8B */
+	0xDC, 0x52, 0xDC, 0x53, 0xDC, 0x54, 0xF5, 0xEF, /* 0x8C-0x8F */
+	0xF5, 0xF1, 0xDC, 0x55, 0xDC, 0x56, 0xDC, 0x57, /* 0x90-0x93 */
+	0xF5, 0xF0, 0xDC, 0x58, 0xDC, 0x59, 0xDC, 0x5A, /* 0x94-0x97 */
+	0xDC, 0x5B, 0xDC, 0x5C, 0xDC, 0x5D, 0xDC, 0x5E, /* 0x98-0x9B */
+	0xF5, 0xF2, 0xDC, 0x5F, 0xF5, 0xF3, 0xDC, 0x60, /* 0x9C-0x9F */
+	0xDC, 0x61, 0xDC, 0x62, 0xDC, 0x63, 0xDC, 0x64, /* 0xA0-0xA3 */
+	0xDC, 0x65, 0xDC, 0x66, 0xDC, 0x67, 0xDC, 0x68, /* 0xA4-0xA7 */
+	0xDC, 0x69, 0xDC, 0x6A, 0xDC, 0x6B, 0xC9, 0xED, /* 0xA8-0xAB */
+	0xB9, 0xAA, 0xDC, 0x6C, 0xDC, 0x6D, 0xC7, 0xFB, /* 0xAC-0xAF */
+	0xDC, 0x6E, 0xDC, 0x6F, 0xB6, 0xE3, 0xDC, 0x70, /* 0xB0-0xB3 */
+	0xDC, 0x71, 0xDC, 0x72, 0xDC, 0x73, 0xDC, 0x74, /* 0xB4-0xB7 */
+	0xDC, 0x75, 0xDC, 0x76, 0xCC, 0xC9, 0xDC, 0x77, /* 0xB8-0xBB */
+	0xDC, 0x78, 0xDC, 0x79, 0xDC, 0x7A, 0xDC, 0x7B, /* 0xBC-0xBF */
+	0xDC, 0x7C, 0xDC, 0x7D, 0xDC, 0x7E, 0xDC, 0x80, /* 0xC0-0xC3 */
+	0xDC, 0x81, 0xDC, 0x82, 0xDC, 0x83, 0xDC, 0x84, /* 0xC4-0xC7 */
+	0xDC, 0x85, 0xDC, 0x86, 0xDC, 0x87, 0xDC, 0x88, /* 0xC8-0xCB */
+	0xDC, 0x89, 0xDC, 0x8A, 0xEA, 0xA6, 0xDC, 0x8B, /* 0xCC-0xCF */
+	0xDC, 0x8C, 0xDC, 0x8D, 0xDC, 0x8E, 0xDC, 0x8F, /* 0xD0-0xD3 */
+	0xDC, 0x90, 0xDC, 0x91, 0xDC, 0x92, 0xDC, 0x93, /* 0xD4-0xD7 */
+	0xDC, 0x94, 0xDC, 0x95, 0xDC, 0x96, 0xDC, 0x97, /* 0xD8-0xDB */
+	0xDC, 0x98, 0xDC, 0x99, 0xDC, 0x9A, 0xDC, 0x9B, /* 0xDC-0xDF */
+	0xDC, 0x9C, 0xDC, 0x9D, 0xDC, 0x9E, 0xDC, 0x9F, /* 0xE0-0xE3 */
+	0xDC, 0xA0, 0xDD, 0x40, 0xDD, 0x41, 0xDD, 0x42, /* 0xE4-0xE7 */
+	0xDD, 0x43, 0xDD, 0x44, 0xDD, 0x45, 0xDD, 0x46, /* 0xE8-0xEB */
+	0xDD, 0x47, 0xDD, 0x48, 0xDD, 0x49, 0xDD, 0x4A, /* 0xEC-0xEF */
+	0xDD, 0x4B, 0xDD, 0x4C, 0xDD, 0x4D, 0xDD, 0x4E, /* 0xF0-0xF3 */
+	0xDD, 0x4F, 0xDD, 0x50, 0xDD, 0x51, 0xDD, 0x52, /* 0xF4-0xF7 */
+	0xDD, 0x53, 0xDD, 0x54, 0xDD, 0x55, 0xDD, 0x56, /* 0xF8-0xFB */
+	0xDD, 0x57, 0xDD, 0x58, 0xDD, 0x59, 0xDD, 0x5A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8F[512] = {
+	0xDD, 0x5B, 0xDD, 0x5C, 0xDD, 0x5D, 0xDD, 0x5E, /* 0x00-0x03 */
+	0xDD, 0x5F, 0xDD, 0x60, 0xDD, 0x61, 0xDD, 0x62, /* 0x04-0x07 */
+	0xDD, 0x63, 0xDD, 0x64, 0xDD, 0x65, 0xDD, 0x66, /* 0x08-0x0B */
+	0xDD, 0x67, 0xDD, 0x68, 0xDD, 0x69, 0xDD, 0x6A, /* 0x0C-0x0F */
+	0xDD, 0x6B, 0xDD, 0x6C, 0xDD, 0x6D, 0xDD, 0x6E, /* 0x10-0x13 */
+	0xDD, 0x6F, 0xDD, 0x70, 0xDD, 0x71, 0xDD, 0x72, /* 0x14-0x17 */
+	0xDD, 0x73, 0xDD, 0x74, 0xDD, 0x75, 0xDD, 0x76, /* 0x18-0x1B */
+	0xDD, 0x77, 0xDD, 0x78, 0xDD, 0x79, 0xDD, 0x7A, /* 0x1C-0x1F */
+	0xDD, 0x7B, 0xDD, 0x7C, 0xDD, 0x7D, 0xDD, 0x7E, /* 0x20-0x23 */
+	0xDD, 0x80, 0xDD, 0x81, 0xDD, 0x82, 0xDD, 0x83, /* 0x24-0x27 */
+	0xDD, 0x84, 0xDD, 0x85, 0xDD, 0x86, 0xDD, 0x87, /* 0x28-0x2B */
+	0xDD, 0x88, 0xDD, 0x89, 0xDD, 0x8A, 0xDD, 0x8B, /* 0x2C-0x2F */
+	0xDD, 0x8C, 0xDD, 0x8D, 0xDD, 0x8E, 0xDD, 0x8F, /* 0x30-0x33 */
+	0xDD, 0x90, 0xDD, 0x91, 0xDD, 0x92, 0xDD, 0x93, /* 0x34-0x37 */
+	0xDD, 0x94, 0xDD, 0x95, 0xDD, 0x96, 0xDD, 0x97, /* 0x38-0x3B */
+	0xDD, 0x98, 0xDD, 0x99, 0xDD, 0x9A, 0xDD, 0x9B, /* 0x3C-0x3F */
+	0xDD, 0x9C, 0xDD, 0x9D, 0xDD, 0x9E, 0xDD, 0x9F, /* 0x40-0x43 */
+	0xDD, 0xA0, 0xDE, 0x40, 0xDE, 0x41, 0xDE, 0x42, /* 0x44-0x47 */
+	0xDE, 0x43, 0xDE, 0x44, 0xDE, 0x45, 0xDE, 0x46, /* 0x48-0x4B */
+	0xDE, 0x47, 0xDE, 0x48, 0xDE, 0x49, 0xDE, 0x4A, /* 0x4C-0x4F */
+	0xDE, 0x4B, 0xDE, 0x4C, 0xDE, 0x4D, 0xDE, 0x4E, /* 0x50-0x53 */
+	0xDE, 0x4F, 0xDE, 0x50, 0xDE, 0x51, 0xDE, 0x52, /* 0x54-0x57 */
+	0xDE, 0x53, 0xDE, 0x54, 0xDE, 0x55, 0xDE, 0x56, /* 0x58-0x5B */
+	0xDE, 0x57, 0xDE, 0x58, 0xDE, 0x59, 0xDE, 0x5A, /* 0x5C-0x5F */
+	0xDE, 0x5B, 0xDE, 0x5C, 0xDE, 0x5D, 0xDE, 0x5E, /* 0x60-0x63 */
+	0xDE, 0x5F, 0xDE, 0x60, 0xB3, 0xB5, 0xD4, 0xFE, /* 0x64-0x67 */
+	0xB9, 0xEC, 0xD0, 0xF9, 0xDE, 0x61, 0xE9, 0xED, /* 0x68-0x6B */
+	0xD7, 0xAA, 0xE9, 0xEE, 0xC2, 0xD6, 0xC8, 0xED, /* 0x6C-0x6F */
+	0xBA, 0xE4, 0xE9, 0xEF, 0xE9, 0xF0, 0xE9, 0xF1, /* 0x70-0x73 */
+	0xD6, 0xE1, 0xE9, 0xF2, 0xE9, 0xF3, 0xE9, 0xF5, /* 0x74-0x77 */
+	0xE9, 0xF4, 0xE9, 0xF6, 0xE9, 0xF7, 0xC7, 0xE1, /* 0x78-0x7B */
+	0xE9, 0xF8, 0xD4, 0xD8, 0xE9, 0xF9, 0xBD, 0xCE, /* 0x7C-0x7F */
+	
+	0xDE, 0x62, 0xE9, 0xFA, 0xE9, 0xFB, 0xBD, 0xCF, /* 0x80-0x83 */
+	0xE9, 0xFC, 0xB8, 0xA8, 0xC1, 0xBE, 0xE9, 0xFD, /* 0x84-0x87 */
+	0xB1, 0xB2, 0xBB, 0xD4, 0xB9, 0xF5, 0xE9, 0xFE, /* 0x88-0x8B */
+	0xDE, 0x63, 0xEA, 0xA1, 0xEA, 0xA2, 0xEA, 0xA3, /* 0x8C-0x8F */
+	0xB7, 0xF8, 0xBC, 0xAD, 0xDE, 0x64, 0xCA, 0xE4, /* 0x90-0x93 */
+	0xE0, 0xCE, 0xD4, 0xAF, 0xCF, 0xBD, 0xD5, 0xB7, /* 0x94-0x97 */
+	0xEA, 0xA4, 0xD5, 0xDE, 0xEA, 0xA5, 0xD0, 0xC1, /* 0x98-0x9B */
+	0xB9, 0xBC, 0xDE, 0x65, 0xB4, 0xC7, 0xB1, 0xD9, /* 0x9C-0x9F */
+	0xDE, 0x66, 0xDE, 0x67, 0xDE, 0x68, 0xC0, 0xB1, /* 0xA0-0xA3 */
+	0xDE, 0x69, 0xDE, 0x6A, 0xDE, 0x6B, 0xDE, 0x6C, /* 0xA4-0xA7 */
+	0xB1, 0xE6, 0xB1, 0xE7, 0xDE, 0x6D, 0xB1, 0xE8, /* 0xA8-0xAB */
+	0xDE, 0x6E, 0xDE, 0x6F, 0xDE, 0x70, 0xDE, 0x71, /* 0xAC-0xAF */
+	0xB3, 0xBD, 0xC8, 0xE8, 0xDE, 0x72, 0xDE, 0x73, /* 0xB0-0xB3 */
+	0xDE, 0x74, 0xDE, 0x75, 0xE5, 0xC1, 0xDE, 0x76, /* 0xB4-0xB7 */
+	0xDE, 0x77, 0xB1, 0xDF, 0xDE, 0x78, 0xDE, 0x79, /* 0xB8-0xBB */
+	0xDE, 0x7A, 0xC1, 0xC9, 0xB4, 0xEF, 0xDE, 0x7B, /* 0xBC-0xBF */
+	0xDE, 0x7C, 0xC7, 0xA8, 0xD3, 0xD8, 0xDE, 0x7D, /* 0xC0-0xC3 */
+	0xC6, 0xF9, 0xD1, 0xB8, 0xDE, 0x7E, 0xB9, 0xFD, /* 0xC4-0xC7 */
+	0xC2, 0xF5, 0xDE, 0x80, 0xDE, 0x81, 0xDE, 0x82, /* 0xC8-0xCB */
+	0xDE, 0x83, 0xDE, 0x84, 0xD3, 0xAD, 0xDE, 0x85, /* 0xCC-0xCF */
+	0xD4, 0xCB, 0xBD, 0xFC, 0xDE, 0x86, 0xE5, 0xC2, /* 0xD0-0xD3 */
+	0xB7, 0xB5, 0xE5, 0xC3, 0xDE, 0x87, 0xDE, 0x88, /* 0xD4-0xD7 */
+	0xBB, 0xB9, 0xD5, 0xE2, 0xDE, 0x89, 0xBD, 0xF8, /* 0xD8-0xDB */
+	0xD4, 0xB6, 0xCE, 0xA5, 0xC1, 0xAC, 0xB3, 0xD9, /* 0xDC-0xDF */
+	0xDE, 0x8A, 0xDE, 0x8B, 0xCC, 0xF6, 0xDE, 0x8C, /* 0xE0-0xE3 */
+	0xE5, 0xC6, 0xE5, 0xC4, 0xE5, 0xC8, 0xDE, 0x8D, /* 0xE4-0xE7 */
+	0xE5, 0xCA, 0xE5, 0xC7, 0xB5, 0xCF, 0xC6, 0xC8, /* 0xE8-0xEB */
+	0xDE, 0x8E, 0xB5, 0xFC, 0xE5, 0xC5, 0xDE, 0x8F, /* 0xEC-0xEF */
+	0xCA, 0xF6, 0xDE, 0x90, 0xDE, 0x91, 0xE5, 0xC9, /* 0xF0-0xF3 */
+	0xDE, 0x92, 0xDE, 0x93, 0xDE, 0x94, 0xC3, 0xD4, /* 0xF4-0xF7 */
+	0xB1, 0xC5, 0xBC, 0xA3, 0xDE, 0x95, 0xDE, 0x96, /* 0xF8-0xFB */
+	0xDE, 0x97, 0xD7, 0xB7, 0xDE, 0x98, 0xDE, 0x99, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_90[512] = {
+	0xCD, 0xCB, 0xCB, 0xCD, 0xCA, 0xCA, 0xCC, 0xD3, /* 0x00-0x03 */
+	0xE5, 0xCC, 0xE5, 0xCB, 0xC4, 0xE6, 0xDE, 0x9A, /* 0x04-0x07 */
+	0xDE, 0x9B, 0xD1, 0xA1, 0xD1, 0xB7, 0xE5, 0xCD, /* 0x08-0x0B */
+	0xDE, 0x9C, 0xE5, 0xD0, 0xDE, 0x9D, 0xCD, 0xB8, /* 0x0C-0x0F */
+	0xD6, 0xF0, 0xE5, 0xCF, 0xB5, 0xDD, 0xDE, 0x9E, /* 0x10-0x13 */
+	0xCD, 0xBE, 0xDE, 0x9F, 0xE5, 0xD1, 0xB6, 0xBA, /* 0x14-0x17 */
+	0xDE, 0xA0, 0xDF, 0x40, 0xCD, 0xA8, 0xB9, 0xE4, /* 0x18-0x1B */
+	0xDF, 0x41, 0xCA, 0xC5, 0xB3, 0xD1, 0xCB, 0xD9, /* 0x1C-0x1F */
+	0xD4, 0xEC, 0xE5, 0xD2, 0xB7, 0xEA, 0xDF, 0x42, /* 0x20-0x23 */
+	0xDF, 0x43, 0xDF, 0x44, 0xE5, 0xCE, 0xDF, 0x45, /* 0x24-0x27 */
+	0xDF, 0x46, 0xDF, 0x47, 0xDF, 0x48, 0xDF, 0x49, /* 0x28-0x2B */
+	0xDF, 0x4A, 0xE5, 0xD5, 0xB4, 0xFE, 0xE5, 0xD6, /* 0x2C-0x2F */
+	0xDF, 0x4B, 0xDF, 0x4C, 0xDF, 0x4D, 0xDF, 0x4E, /* 0x30-0x33 */
+	0xDF, 0x4F, 0xE5, 0xD3, 0xE5, 0xD4, 0xDF, 0x50, /* 0x34-0x37 */
+	0xD2, 0xDD, 0xDF, 0x51, 0xDF, 0x52, 0xC2, 0xDF, /* 0x38-0x3B */
+	0xB1, 0xC6, 0xDF, 0x53, 0xD3, 0xE2, 0xDF, 0x54, /* 0x3C-0x3F */
+	0xDF, 0x55, 0xB6, 0xDD, 0xCB, 0xEC, 0xDF, 0x56, /* 0x40-0x43 */
+	0xE5, 0xD7, 0xDF, 0x57, 0xDF, 0x58, 0xD3, 0xF6, /* 0x44-0x47 */
+	0xDF, 0x59, 0xDF, 0x5A, 0xDF, 0x5B, 0xDF, 0x5C, /* 0x48-0x4B */
+	0xDF, 0x5D, 0xB1, 0xE9, 0xDF, 0x5E, 0xB6, 0xF4, /* 0x4C-0x4F */
+	0xE5, 0xDA, 0xE5, 0xD8, 0xE5, 0xD9, 0xB5, 0xC0, /* 0x50-0x53 */
+	0xDF, 0x5F, 0xDF, 0x60, 0xDF, 0x61, 0xD2, 0xC5, /* 0x54-0x57 */
+	0xE5, 0xDC, 0xDF, 0x62, 0xDF, 0x63, 0xE5, 0xDE, /* 0x58-0x5B */
+	0xDF, 0x64, 0xDF, 0x65, 0xDF, 0x66, 0xDF, 0x67, /* 0x5C-0x5F */
+	0xDF, 0x68, 0xDF, 0x69, 0xE5, 0xDD, 0xC7, 0xB2, /* 0x60-0x63 */
+	0xDF, 0x6A, 0xD2, 0xA3, 0xDF, 0x6B, 0xDF, 0x6C, /* 0x64-0x67 */
+	0xE5, 0xDB, 0xDF, 0x6D, 0xDF, 0x6E, 0xDF, 0x6F, /* 0x68-0x6B */
+	0xDF, 0x70, 0xD4, 0xE2, 0xD5, 0xDA, 0xDF, 0x71, /* 0x6C-0x6F */
+	0xDF, 0x72, 0xDF, 0x73, 0xDF, 0x74, 0xDF, 0x75, /* 0x70-0x73 */
+	0xE5, 0xE0, 0xD7, 0xF1, 0xDF, 0x76, 0xDF, 0x77, /* 0x74-0x77 */
+	0xDF, 0x78, 0xDF, 0x79, 0xDF, 0x7A, 0xDF, 0x7B, /* 0x78-0x7B */
+	0xDF, 0x7C, 0xE5, 0xE1, 0xDF, 0x7D, 0xB1, 0xDC, /* 0x7C-0x7F */
+	
+	0xD1, 0xFB, 0xDF, 0x7E, 0xE5, 0xE2, 0xE5, 0xE4, /* 0x80-0x83 */
+	0xDF, 0x80, 0xDF, 0x81, 0xDF, 0x82, 0xDF, 0x83, /* 0x84-0x87 */
+	0xE5, 0xE3, 0xDF, 0x84, 0xDF, 0x85, 0xE5, 0xE5, /* 0x88-0x8B */
+	0xDF, 0x86, 0xDF, 0x87, 0xDF, 0x88, 0xDF, 0x89, /* 0x8C-0x8F */
+	0xDF, 0x8A, 0xD2, 0xD8, 0xDF, 0x8B, 0xB5, 0xCB, /* 0x90-0x93 */
+	0xDF, 0x8C, 0xE7, 0xDF, 0xDF, 0x8D, 0xDA, 0xF5, /* 0x94-0x97 */
+	0xDF, 0x8E, 0xDA, 0xF8, 0xDF, 0x8F, 0xDA, 0xF6, /* 0x98-0x9B */
+	0xDF, 0x90, 0xDA, 0xF7, 0xDF, 0x91, 0xDF, 0x92, /* 0x9C-0x9F */
+	0xDF, 0x93, 0xDA, 0xFA, 0xD0, 0xCF, 0xC4, 0xC7, /* 0xA0-0xA3 */
+	0xDF, 0x94, 0xDF, 0x95, 0xB0, 0xEE, 0xDF, 0x96, /* 0xA4-0xA7 */
+	0xDF, 0x97, 0xDF, 0x98, 0xD0, 0xB0, 0xDF, 0x99, /* 0xA8-0xAB */
+	0xDA, 0xF9, 0xDF, 0x9A, 0xD3, 0xCA, 0xBA, 0xAA, /* 0xAC-0xAF */
+	0xDB, 0xA2, 0xC7, 0xF1, 0xDF, 0x9B, 0xDA, 0xFC, /* 0xB0-0xB3 */
+	0xDA, 0xFB, 0xC9, 0xDB, 0xDA, 0xFD, 0xDF, 0x9C, /* 0xB4-0xB7 */
+	0xDB, 0xA1, 0xD7, 0xDE, 0xDA, 0xFE, 0xC1, 0xDA, /* 0xB8-0xBB */
+	0xDF, 0x9D, 0xDF, 0x9E, 0xDB, 0xA5, 0xDF, 0x9F, /* 0xBC-0xBF */
+	0xDF, 0xA0, 0xD3, 0xF4, 0xE0, 0x40, 0xE0, 0x41, /* 0xC0-0xC3 */
+	0xDB, 0xA7, 0xDB, 0xA4, 0xE0, 0x42, 0xDB, 0xA8, /* 0xC4-0xC7 */
+	0xE0, 0x43, 0xE0, 0x44, 0xBD, 0xBC, 0xE0, 0x45, /* 0xC8-0xCB */
+	0xE0, 0x46, 0xE0, 0x47, 0xC0, 0xC9, 0xDB, 0xA3, /* 0xCC-0xCF */
+	0xDB, 0xA6, 0xD6, 0xA3, 0xE0, 0x48, 0xDB, 0xA9, /* 0xD0-0xD3 */
+	0xE0, 0x49, 0xE0, 0x4A, 0xE0, 0x4B, 0xDB, 0xAD, /* 0xD4-0xD7 */
+	0xE0, 0x4C, 0xE0, 0x4D, 0xE0, 0x4E, 0xDB, 0xAE, /* 0xD8-0xDB */
+	0xDB, 0xAC, 0xBA, 0xC2, 0xE0, 0x4F, 0xE0, 0x50, /* 0xDC-0xDF */
+	0xE0, 0x51, 0xBF, 0xA4, 0xDB, 0xAB, 0xE0, 0x52, /* 0xE0-0xE3 */
+	0xE0, 0x53, 0xE0, 0x54, 0xDB, 0xAA, 0xD4, 0xC7, /* 0xE4-0xE7 */
+	0xB2, 0xBF, 0xE0, 0x55, 0xE0, 0x56, 0xDB, 0xAF, /* 0xE8-0xEB */
+	0xE0, 0x57, 0xB9, 0xF9, 0xE0, 0x58, 0xDB, 0xB0, /* 0xEC-0xEF */
+	0xE0, 0x59, 0xE0, 0x5A, 0xE0, 0x5B, 0xE0, 0x5C, /* 0xF0-0xF3 */
+	0xB3, 0xBB, 0xE0, 0x5D, 0xE0, 0x5E, 0xE0, 0x5F, /* 0xF4-0xF7 */
+	0xB5, 0xA6, 0xE0, 0x60, 0xE0, 0x61, 0xE0, 0x62, /* 0xF8-0xFB */
+	0xE0, 0x63, 0xB6, 0xBC, 0xDB, 0xB1, 0xE0, 0x64, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_91[512] = {
+	0xE0, 0x65, 0xE0, 0x66, 0xB6, 0xF5, 0xE0, 0x67, /* 0x00-0x03 */
+	0xDB, 0xB2, 0xE0, 0x68, 0xE0, 0x69, 0xE0, 0x6A, /* 0x04-0x07 */
+	0xE0, 0x6B, 0xE0, 0x6C, 0xE0, 0x6D, 0xE0, 0x6E, /* 0x08-0x0B */
+	0xE0, 0x6F, 0xE0, 0x70, 0xE0, 0x71, 0xE0, 0x72, /* 0x0C-0x0F */
+	0xE0, 0x73, 0xE0, 0x74, 0xE0, 0x75, 0xE0, 0x76, /* 0x10-0x13 */
+	0xE0, 0x77, 0xE0, 0x78, 0xE0, 0x79, 0xE0, 0x7A, /* 0x14-0x17 */
+	0xE0, 0x7B, 0xB1, 0xC9, 0xE0, 0x7C, 0xE0, 0x7D, /* 0x18-0x1B */
+	0xE0, 0x7E, 0xE0, 0x80, 0xDB, 0xB4, 0xE0, 0x81, /* 0x1C-0x1F */
+	0xE0, 0x82, 0xE0, 0x83, 0xDB, 0xB3, 0xDB, 0xB5, /* 0x20-0x23 */
+	0xE0, 0x84, 0xE0, 0x85, 0xE0, 0x86, 0xE0, 0x87, /* 0x24-0x27 */
+	0xE0, 0x88, 0xE0, 0x89, 0xE0, 0x8A, 0xE0, 0x8B, /* 0x28-0x2B */
+	0xE0, 0x8C, 0xE0, 0x8D, 0xE0, 0x8E, 0xDB, 0xB7, /* 0x2C-0x2F */
+	0xE0, 0x8F, 0xDB, 0xB6, 0xE0, 0x90, 0xE0, 0x91, /* 0x30-0x33 */
+	0xE0, 0x92, 0xE0, 0x93, 0xE0, 0x94, 0xE0, 0x95, /* 0x34-0x37 */
+	0xE0, 0x96, 0xDB, 0xB8, 0xE0, 0x97, 0xE0, 0x98, /* 0x38-0x3B */
+	0xE0, 0x99, 0xE0, 0x9A, 0xE0, 0x9B, 0xE0, 0x9C, /* 0x3C-0x3F */
+	0xE0, 0x9D, 0xE0, 0x9E, 0xE0, 0x9F, 0xDB, 0xB9, /* 0x40-0x43 */
+	0xE0, 0xA0, 0xE1, 0x40, 0xDB, 0xBA, 0xE1, 0x41, /* 0x44-0x47 */
+	0xE1, 0x42, 0xD3, 0xCF, 0xF4, 0xFA, 0xC7, 0xF5, /* 0x48-0x4B */
+	0xD7, 0xC3, 0xC5, 0xE4, 0xF4, 0xFC, 0xF4, 0xFD, /* 0x4C-0x4F */
+	0xF4, 0xFB, 0xE1, 0x43, 0xBE, 0xC6, 0xE1, 0x44, /* 0x50-0x53 */
+	0xE1, 0x45, 0xE1, 0x46, 0xE1, 0x47, 0xD0, 0xEF, /* 0x54-0x57 */
+	0xE1, 0x48, 0xE1, 0x49, 0xB7, 0xD3, 0xE1, 0x4A, /* 0x58-0x5B */
+	0xE1, 0x4B, 0xD4, 0xCD, 0xCC, 0xAA, 0xE1, 0x4C, /* 0x5C-0x5F */
+	0xE1, 0x4D, 0xF5, 0xA2, 0xF5, 0xA1, 0xBA, 0xA8, /* 0x60-0x63 */
+	0xF4, 0xFE, 0xCB, 0xD6, 0xE1, 0x4E, 0xE1, 0x4F, /* 0x64-0x67 */
+	0xE1, 0x50, 0xF5, 0xA4, 0xC0, 0xD2, 0xE1, 0x51, /* 0x68-0x6B */
+	0xB3, 0xEA, 0xE1, 0x52, 0xCD, 0xAA, 0xF5, 0xA5, /* 0x6C-0x6F */
+	0xF5, 0xA3, 0xBD, 0xB4, 0xF5, 0xA8, 0xE1, 0x53, /* 0x70-0x73 */
+	0xF5, 0xA9, 0xBD, 0xCD, 0xC3, 0xB8, 0xBF, 0xE1, /* 0x74-0x77 */
+	0xCB, 0xE1, 0xF5, 0xAA, 0xE1, 0x54, 0xE1, 0x55, /* 0x78-0x7B */
+	0xE1, 0x56, 0xF5, 0xA6, 0xF5, 0xA7, 0xC4, 0xF0, /* 0x7C-0x7F */
+	
+	0xE1, 0x57, 0xE1, 0x58, 0xE1, 0x59, 0xE1, 0x5A, /* 0x80-0x83 */
+	0xE1, 0x5B, 0xF5, 0xAC, 0xE1, 0x5C, 0xB4, 0xBC, /* 0x84-0x87 */
+	0xE1, 0x5D, 0xD7, 0xED, 0xE1, 0x5E, 0xB4, 0xD7, /* 0x88-0x8B */
+	0xF5, 0xAB, 0xF5, 0xAE, 0xE1, 0x5F, 0xE1, 0x60, /* 0x8C-0x8F */
+	0xF5, 0xAD, 0xF5, 0xAF, 0xD0, 0xD1, 0xE1, 0x61, /* 0x90-0x93 */
+	0xE1, 0x62, 0xE1, 0x63, 0xE1, 0x64, 0xE1, 0x65, /* 0x94-0x97 */
+	0xE1, 0x66, 0xE1, 0x67, 0xC3, 0xD1, 0xC8, 0xA9, /* 0x98-0x9B */
+	0xE1, 0x68, 0xE1, 0x69, 0xE1, 0x6A, 0xE1, 0x6B, /* 0x9C-0x9F */
+	0xE1, 0x6C, 0xE1, 0x6D, 0xF5, 0xB0, 0xF5, 0xB1, /* 0xA0-0xA3 */
+	0xE1, 0x6E, 0xE1, 0x6F, 0xE1, 0x70, 0xE1, 0x71, /* 0xA4-0xA7 */
+	0xE1, 0x72, 0xE1, 0x73, 0xF5, 0xB2, 0xE1, 0x74, /* 0xA8-0xAB */
+	0xE1, 0x75, 0xF5, 0xB3, 0xF5, 0xB4, 0xF5, 0xB5, /* 0xAC-0xAF */
+	0xE1, 0x76, 0xE1, 0x77, 0xE1, 0x78, 0xE1, 0x79, /* 0xB0-0xB3 */
+	0xF5, 0xB7, 0xF5, 0xB6, 0xE1, 0x7A, 0xE1, 0x7B, /* 0xB4-0xB7 */
+	0xE1, 0x7C, 0xE1, 0x7D, 0xF5, 0xB8, 0xE1, 0x7E, /* 0xB8-0xBB */
+	0xE1, 0x80, 0xE1, 0x81, 0xE1, 0x82, 0xE1, 0x83, /* 0xBC-0xBF */
+	0xE1, 0x84, 0xE1, 0x85, 0xE1, 0x86, 0xE1, 0x87, /* 0xC0-0xC3 */
+	0xE1, 0x88, 0xE1, 0x89, 0xE1, 0x8A, 0xB2, 0xC9, /* 0xC4-0xC7 */
+	0xE1, 0x8B, 0xD3, 0xD4, 0xCA, 0xCD, 0xE1, 0x8C, /* 0xC8-0xCB */
+	0xC0, 0xEF, 0xD6, 0xD8, 0xD2, 0xB0, 0xC1, 0xBF, /* 0xCC-0xCF */
+	0xE1, 0x8D, 0xBD, 0xF0, 0xE1, 0x8E, 0xE1, 0x8F, /* 0xD0-0xD3 */
+	0xE1, 0x90, 0xE1, 0x91, 0xE1, 0x92, 0xE1, 0x93, /* 0xD4-0xD7 */
+	0xE1, 0x94, 0xE1, 0x95, 0xE1, 0x96, 0xE1, 0x97, /* 0xD8-0xDB */
+	0xB8, 0xAA, 0xE1, 0x98, 0xE1, 0x99, 0xE1, 0x9A, /* 0xDC-0xDF */
+	0xE1, 0x9B, 0xE1, 0x9C, 0xE1, 0x9D, 0xE1, 0x9E, /* 0xE0-0xE3 */
+	0xE1, 0x9F, 0xE1, 0xA0, 0xE2, 0x40, 0xE2, 0x41, /* 0xE4-0xE7 */
+	0xE2, 0x42, 0xE2, 0x43, 0xE2, 0x44, 0xE2, 0x45, /* 0xE8-0xEB */
+	0xE2, 0x46, 0xE2, 0x47, 0xE2, 0x48, 0xE2, 0x49, /* 0xEC-0xEF */
+	0xE2, 0x4A, 0xE2, 0x4B, 0xE2, 0x4C, 0xE2, 0x4D, /* 0xF0-0xF3 */
+	0xE2, 0x4E, 0xE2, 0x4F, 0xE2, 0x50, 0xE2, 0x51, /* 0xF4-0xF7 */
+	0xE2, 0x52, 0xE2, 0x53, 0xE2, 0x54, 0xE2, 0x55, /* 0xF8-0xFB */
+	0xE2, 0x56, 0xE2, 0x57, 0xE2, 0x58, 0xE2, 0x59, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_92[512] = {
+	0xE2, 0x5A, 0xE2, 0x5B, 0xE2, 0x5C, 0xE2, 0x5D, /* 0x00-0x03 */
+	0xE2, 0x5E, 0xE2, 0x5F, 0xE2, 0x60, 0xE2, 0x61, /* 0x04-0x07 */
+	0xE2, 0x62, 0xE2, 0x63, 0xE2, 0x64, 0xE2, 0x65, /* 0x08-0x0B */
+	0xE2, 0x66, 0xE2, 0x67, 0xE2, 0x68, 0xE2, 0x69, /* 0x0C-0x0F */
+	0xE2, 0x6A, 0xE2, 0x6B, 0xE2, 0x6C, 0xE2, 0x6D, /* 0x10-0x13 */
+	0xE2, 0x6E, 0xE2, 0x6F, 0xE2, 0x70, 0xE2, 0x71, /* 0x14-0x17 */
+	0xE2, 0x72, 0xE2, 0x73, 0xE2, 0x74, 0xE2, 0x75, /* 0x18-0x1B */
+	0xE2, 0x76, 0xE2, 0x77, 0xE2, 0x78, 0xE2, 0x79, /* 0x1C-0x1F */
+	0xE2, 0x7A, 0xE2, 0x7B, 0xE2, 0x7C, 0xE2, 0x7D, /* 0x20-0x23 */
+	0xE2, 0x7E, 0xE2, 0x80, 0xE2, 0x81, 0xE2, 0x82, /* 0x24-0x27 */
+	0xE2, 0x83, 0xE2, 0x84, 0xE2, 0x85, 0xE2, 0x86, /* 0x28-0x2B */
+	0xE2, 0x87, 0xE2, 0x88, 0xE2, 0x89, 0xE2, 0x8A, /* 0x2C-0x2F */
+	0xE2, 0x8B, 0xE2, 0x8C, 0xE2, 0x8D, 0xE2, 0x8E, /* 0x30-0x33 */
+	0xE2, 0x8F, 0xE2, 0x90, 0xE2, 0x91, 0xE2, 0x92, /* 0x34-0x37 */
+	0xE2, 0x93, 0xE2, 0x94, 0xE2, 0x95, 0xE2, 0x96, /* 0x38-0x3B */
+	0xE2, 0x97, 0xE2, 0x98, 0xE2, 0x99, 0xE2, 0x9A, /* 0x3C-0x3F */
+	0xE2, 0x9B, 0xE2, 0x9C, 0xE2, 0x9D, 0xE2, 0x9E, /* 0x40-0x43 */
+	0xE2, 0x9F, 0xE2, 0xA0, 0xE3, 0x40, 0xE3, 0x41, /* 0x44-0x47 */
+	0xE3, 0x42, 0xE3, 0x43, 0xE3, 0x44, 0xE3, 0x45, /* 0x48-0x4B */
+	0xE3, 0x46, 0xE3, 0x47, 0xE3, 0x48, 0xE3, 0x49, /* 0x4C-0x4F */
+	0xE3, 0x4A, 0xE3, 0x4B, 0xE3, 0x4C, 0xE3, 0x4D, /* 0x50-0x53 */
+	0xE3, 0x4E, 0xE3, 0x4F, 0xE3, 0x50, 0xE3, 0x51, /* 0x54-0x57 */
+	0xE3, 0x52, 0xE3, 0x53, 0xE3, 0x54, 0xE3, 0x55, /* 0x58-0x5B */
+	0xE3, 0x56, 0xE3, 0x57, 0xE3, 0x58, 0xE3, 0x59, /* 0x5C-0x5F */
+	0xE3, 0x5A, 0xE3, 0x5B, 0xE3, 0x5C, 0xE3, 0x5D, /* 0x60-0x63 */
+	0xE3, 0x5E, 0xE3, 0x5F, 0xE3, 0x60, 0xE3, 0x61, /* 0x64-0x67 */
+	0xE3, 0x62, 0xE3, 0x63, 0xE3, 0x64, 0xE3, 0x65, /* 0x68-0x6B */
+	0xE3, 0x66, 0xE3, 0x67, 0xE3, 0x68, 0xE3, 0x69, /* 0x6C-0x6F */
+	0xE3, 0x6A, 0xE3, 0x6B, 0xE3, 0x6C, 0xE3, 0x6D, /* 0x70-0x73 */
+	0xBC, 0xF8, 0xE3, 0x6E, 0xE3, 0x6F, 0xE3, 0x70, /* 0x74-0x77 */
+	0xE3, 0x71, 0xE3, 0x72, 0xE3, 0x73, 0xE3, 0x74, /* 0x78-0x7B */
+	0xE3, 0x75, 0xE3, 0x76, 0xE3, 0x77, 0xE3, 0x78, /* 0x7C-0x7F */
+	
+	0xE3, 0x79, 0xE3, 0x7A, 0xE3, 0x7B, 0xE3, 0x7C, /* 0x80-0x83 */
+	0xE3, 0x7D, 0xE3, 0x7E, 0xE3, 0x80, 0xE3, 0x81, /* 0x84-0x87 */
+	0xE3, 0x82, 0xE3, 0x83, 0xE3, 0x84, 0xE3, 0x85, /* 0x88-0x8B */
+	0xE3, 0x86, 0xE3, 0x87, 0xF6, 0xC6, 0xE3, 0x88, /* 0x8C-0x8F */
+	0xE3, 0x89, 0xE3, 0x8A, 0xE3, 0x8B, 0xE3, 0x8C, /* 0x90-0x93 */
+	0xE3, 0x8D, 0xE3, 0x8E, 0xE3, 0x8F, 0xE3, 0x90, /* 0x94-0x97 */
+	0xE3, 0x91, 0xE3, 0x92, 0xE3, 0x93, 0xE3, 0x94, /* 0x98-0x9B */
+	0xE3, 0x95, 0xE3, 0x96, 0xE3, 0x97, 0xE3, 0x98, /* 0x9C-0x9F */
+	0xE3, 0x99, 0xE3, 0x9A, 0xE3, 0x9B, 0xE3, 0x9C, /* 0xA0-0xA3 */
+	0xE3, 0x9D, 0xE3, 0x9E, 0xE3, 0x9F, 0xE3, 0xA0, /* 0xA4-0xA7 */
+	0xE4, 0x40, 0xE4, 0x41, 0xE4, 0x42, 0xE4, 0x43, /* 0xA8-0xAB */
+	0xE4, 0x44, 0xE4, 0x45, 0xF6, 0xC7, 0xE4, 0x46, /* 0xAC-0xAF */
+	0xE4, 0x47, 0xE4, 0x48, 0xE4, 0x49, 0xE4, 0x4A, /* 0xB0-0xB3 */
+	0xE4, 0x4B, 0xE4, 0x4C, 0xE4, 0x4D, 0xE4, 0x4E, /* 0xB4-0xB7 */
+	0xE4, 0x4F, 0xE4, 0x50, 0xE4, 0x51, 0xE4, 0x52, /* 0xB8-0xBB */
+	0xE4, 0x53, 0xE4, 0x54, 0xE4, 0x55, 0xE4, 0x56, /* 0xBC-0xBF */
+	0xE4, 0x57, 0xE4, 0x58, 0xE4, 0x59, 0xE4, 0x5A, /* 0xC0-0xC3 */
+	0xE4, 0x5B, 0xE4, 0x5C, 0xE4, 0x5D, 0xE4, 0x5E, /* 0xC4-0xC7 */
+	0xF6, 0xC8, 0xE4, 0x5F, 0xE4, 0x60, 0xE4, 0x61, /* 0xC8-0xCB */
+	0xE4, 0x62, 0xE4, 0x63, 0xE4, 0x64, 0xE4, 0x65, /* 0xCC-0xCF */
+	0xE4, 0x66, 0xE4, 0x67, 0xE4, 0x68, 0xE4, 0x69, /* 0xD0-0xD3 */
+	0xE4, 0x6A, 0xE4, 0x6B, 0xE4, 0x6C, 0xE4, 0x6D, /* 0xD4-0xD7 */
+	0xE4, 0x6E, 0xE4, 0x6F, 0xE4, 0x70, 0xE4, 0x71, /* 0xD8-0xDB */
+	0xE4, 0x72, 0xE4, 0x73, 0xE4, 0x74, 0xE4, 0x75, /* 0xDC-0xDF */
+	0xE4, 0x76, 0xE4, 0x77, 0xE4, 0x78, 0xE4, 0x79, /* 0xE0-0xE3 */
+	0xE4, 0x7A, 0xE4, 0x7B, 0xE4, 0x7C, 0xE4, 0x7D, /* 0xE4-0xE7 */
+	0xE4, 0x7E, 0xE4, 0x80, 0xE4, 0x81, 0xE4, 0x82, /* 0xE8-0xEB */
+	0xE4, 0x83, 0xE4, 0x84, 0xE4, 0x85, 0xE4, 0x86, /* 0xEC-0xEF */
+	0xE4, 0x87, 0xE4, 0x88, 0xE4, 0x89, 0xE4, 0x8A, /* 0xF0-0xF3 */
+	0xE4, 0x8B, 0xE4, 0x8C, 0xE4, 0x8D, 0xE4, 0x8E, /* 0xF4-0xF7 */
+	0xE4, 0x8F, 0xE4, 0x90, 0xE4, 0x91, 0xE4, 0x92, /* 0xF8-0xFB */
+	0xE4, 0x93, 0xE4, 0x94, 0xE4, 0x95, 0xE4, 0x96, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_93[512] = {
+	0xE4, 0x97, 0xE4, 0x98, 0xE4, 0x99, 0xE4, 0x9A, /* 0x00-0x03 */
+	0xE4, 0x9B, 0xE4, 0x9C, 0xE4, 0x9D, 0xE4, 0x9E, /* 0x04-0x07 */
+	0xE4, 0x9F, 0xE4, 0xA0, 0xE5, 0x40, 0xE5, 0x41, /* 0x08-0x0B */
+	0xE5, 0x42, 0xE5, 0x43, 0xE5, 0x44, 0xE5, 0x45, /* 0x0C-0x0F */
+	0xE5, 0x46, 0xE5, 0x47, 0xE5, 0x48, 0xE5, 0x49, /* 0x10-0x13 */
+	0xE5, 0x4A, 0xE5, 0x4B, 0xE5, 0x4C, 0xE5, 0x4D, /* 0x14-0x17 */
+	0xE5, 0x4E, 0xE5, 0x4F, 0xE5, 0x50, 0xE5, 0x51, /* 0x18-0x1B */
+	0xE5, 0x52, 0xE5, 0x53, 0xE5, 0x54, 0xE5, 0x55, /* 0x1C-0x1F */
+	0xE5, 0x56, 0xE5, 0x57, 0xE5, 0x58, 0xE5, 0x59, /* 0x20-0x23 */
+	0xE5, 0x5A, 0xE5, 0x5B, 0xE5, 0x5C, 0xE5, 0x5D, /* 0x24-0x27 */
+	0xE5, 0x5E, 0xE5, 0x5F, 0xE5, 0x60, 0xE5, 0x61, /* 0x28-0x2B */
+	0xE5, 0x62, 0xE5, 0x63, 0xE5, 0x64, 0xE5, 0x65, /* 0x2C-0x2F */
+	0xE5, 0x66, 0xE5, 0x67, 0xE5, 0x68, 0xE5, 0x69, /* 0x30-0x33 */
+	0xE5, 0x6A, 0xE5, 0x6B, 0xE5, 0x6C, 0xE5, 0x6D, /* 0x34-0x37 */
+	0xE5, 0x6E, 0xE5, 0x6F, 0xE5, 0x70, 0xE5, 0x71, /* 0x38-0x3B */
+	0xE5, 0x72, 0xE5, 0x73, 0xF6, 0xC9, 0xE5, 0x74, /* 0x3C-0x3F */
+	0xE5, 0x75, 0xE5, 0x76, 0xE5, 0x77, 0xE5, 0x78, /* 0x40-0x43 */
+	0xE5, 0x79, 0xE5, 0x7A, 0xE5, 0x7B, 0xE5, 0x7C, /* 0x44-0x47 */
+	0xE5, 0x7D, 0xE5, 0x7E, 0xE5, 0x80, 0xE5, 0x81, /* 0x48-0x4B */
+	0xE5, 0x82, 0xE5, 0x83, 0xE5, 0x84, 0xE5, 0x85, /* 0x4C-0x4F */
+	0xE5, 0x86, 0xE5, 0x87, 0xE5, 0x88, 0xE5, 0x89, /* 0x50-0x53 */
+	0xE5, 0x8A, 0xE5, 0x8B, 0xE5, 0x8C, 0xE5, 0x8D, /* 0x54-0x57 */
+	0xE5, 0x8E, 0xE5, 0x8F, 0xE5, 0x90, 0xE5, 0x91, /* 0x58-0x5B */
+	0xE5, 0x92, 0xE5, 0x93, 0xE5, 0x94, 0xE5, 0x95, /* 0x5C-0x5F */
+	0xE5, 0x96, 0xE5, 0x97, 0xE5, 0x98, 0xE5, 0x99, /* 0x60-0x63 */
+	0xE5, 0x9A, 0xE5, 0x9B, 0xE5, 0x9C, 0xE5, 0x9D, /* 0x64-0x67 */
+	0xE5, 0x9E, 0xE5, 0x9F, 0xF6, 0xCA, 0xE5, 0xA0, /* 0x68-0x6B */
+	0xE6, 0x40, 0xE6, 0x41, 0xE6, 0x42, 0xE6, 0x43, /* 0x6C-0x6F */
+	0xE6, 0x44, 0xE6, 0x45, 0xE6, 0x46, 0xE6, 0x47, /* 0x70-0x73 */
+	0xE6, 0x48, 0xE6, 0x49, 0xE6, 0x4A, 0xE6, 0x4B, /* 0x74-0x77 */
+	0xE6, 0x4C, 0xE6, 0x4D, 0xE6, 0x4E, 0xE6, 0x4F, /* 0x78-0x7B */
+	0xE6, 0x50, 0xE6, 0x51, 0xE6, 0x52, 0xE6, 0x53, /* 0x7C-0x7F */
+	
+	0xE6, 0x54, 0xE6, 0x55, 0xE6, 0x56, 0xE6, 0x57, /* 0x80-0x83 */
+	0xE6, 0x58, 0xE6, 0x59, 0xE6, 0x5A, 0xE6, 0x5B, /* 0x84-0x87 */
+	0xE6, 0x5C, 0xE6, 0x5D, 0xE6, 0x5E, 0xE6, 0x5F, /* 0x88-0x8B */
+	0xE6, 0x60, 0xE6, 0x61, 0xE6, 0x62, 0xF6, 0xCC, /* 0x8C-0x8F */
+	0xE6, 0x63, 0xE6, 0x64, 0xE6, 0x65, 0xE6, 0x66, /* 0x90-0x93 */
+	0xE6, 0x67, 0xE6, 0x68, 0xE6, 0x69, 0xE6, 0x6A, /* 0x94-0x97 */
+	0xE6, 0x6B, 0xE6, 0x6C, 0xE6, 0x6D, 0xE6, 0x6E, /* 0x98-0x9B */
+	0xE6, 0x6F, 0xE6, 0x70, 0xE6, 0x71, 0xE6, 0x72, /* 0x9C-0x9F */
+	0xE6, 0x73, 0xE6, 0x74, 0xE6, 0x75, 0xE6, 0x76, /* 0xA0-0xA3 */
+	0xE6, 0x77, 0xE6, 0x78, 0xE6, 0x79, 0xE6, 0x7A, /* 0xA4-0xA7 */
+	0xE6, 0x7B, 0xE6, 0x7C, 0xE6, 0x7D, 0xE6, 0x7E, /* 0xA8-0xAB */
+	0xE6, 0x80, 0xE6, 0x81, 0xE6, 0x82, 0xE6, 0x83, /* 0xAC-0xAF */
+	0xE6, 0x84, 0xE6, 0x85, 0xE6, 0x86, 0xE6, 0x87, /* 0xB0-0xB3 */
+	0xE6, 0x88, 0xE6, 0x89, 0xE6, 0x8A, 0xE6, 0x8B, /* 0xB4-0xB7 */
+	0xE6, 0x8C, 0xE6, 0x8D, 0xE6, 0x8E, 0xE6, 0x8F, /* 0xB8-0xBB */
+	0xE6, 0x90, 0xE6, 0x91, 0xE6, 0x92, 0xE6, 0x93, /* 0xBC-0xBF */
+	0xE6, 0x94, 0xE6, 0x95, 0xE6, 0x96, 0xE6, 0x97, /* 0xC0-0xC3 */
+	0xE6, 0x98, 0xE6, 0x99, 0xE6, 0x9A, 0xE6, 0x9B, /* 0xC4-0xC7 */
+	0xE6, 0x9C, 0xE6, 0x9D, 0xF6, 0xCB, 0xE6, 0x9E, /* 0xC8-0xCB */
+	0xE6, 0x9F, 0xE6, 0xA0, 0xE7, 0x40, 0xE7, 0x41, /* 0xCC-0xCF */
+	0xE7, 0x42, 0xE7, 0x43, 0xE7, 0x44, 0xE7, 0x45, /* 0xD0-0xD3 */
+	0xE7, 0x46, 0xE7, 0x47, 0xF7, 0xE9, 0xE7, 0x48, /* 0xD4-0xD7 */
+	0xE7, 0x49, 0xE7, 0x4A, 0xE7, 0x4B, 0xE7, 0x4C, /* 0xD8-0xDB */
+	0xE7, 0x4D, 0xE7, 0x4E, 0xE7, 0x4F, 0xE7, 0x50, /* 0xDC-0xDF */
+	0xE7, 0x51, 0xE7, 0x52, 0xE7, 0x53, 0xE7, 0x54, /* 0xE0-0xE3 */
+	0xE7, 0x55, 0xE7, 0x56, 0xE7, 0x57, 0xE7, 0x58, /* 0xE4-0xE7 */
+	0xE7, 0x59, 0xE7, 0x5A, 0xE7, 0x5B, 0xE7, 0x5C, /* 0xE8-0xEB */
+	0xE7, 0x5D, 0xE7, 0x5E, 0xE7, 0x5F, 0xE7, 0x60, /* 0xEC-0xEF */
+	0xE7, 0x61, 0xE7, 0x62, 0xE7, 0x63, 0xE7, 0x64, /* 0xF0-0xF3 */
+	0xE7, 0x65, 0xE7, 0x66, 0xE7, 0x67, 0xE7, 0x68, /* 0xF4-0xF7 */
+	0xE7, 0x69, 0xE7, 0x6A, 0xE7, 0x6B, 0xE7, 0x6C, /* 0xF8-0xFB */
+	0xE7, 0x6D, 0xE7, 0x6E, 0xE7, 0x6F, 0xE7, 0x70, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_94[512] = {
+	0xE7, 0x71, 0xE7, 0x72, 0xE7, 0x73, 0xE7, 0x74, /* 0x00-0x03 */
+	0xE7, 0x75, 0xE7, 0x76, 0xE7, 0x77, 0xE7, 0x78, /* 0x04-0x07 */
+	0xE7, 0x79, 0xE7, 0x7A, 0xE7, 0x7B, 0xE7, 0x7C, /* 0x08-0x0B */
+	0xE7, 0x7D, 0xE7, 0x7E, 0xE7, 0x80, 0xE7, 0x81, /* 0x0C-0x0F */
+	0xE7, 0x82, 0xE7, 0x83, 0xE7, 0x84, 0xE7, 0x85, /* 0x10-0x13 */
+	0xE7, 0x86, 0xE7, 0x87, 0xE7, 0x88, 0xE7, 0x89, /* 0x14-0x17 */
+	0xE7, 0x8A, 0xE7, 0x8B, 0xE7, 0x8C, 0xE7, 0x8D, /* 0x18-0x1B */
+	0xE7, 0x8E, 0xE7, 0x8F, 0xE7, 0x90, 0xE7, 0x91, /* 0x1C-0x1F */
+	0xE7, 0x92, 0xE7, 0x93, 0xE7, 0x94, 0xE7, 0x95, /* 0x20-0x23 */
+	0xE7, 0x96, 0xE7, 0x97, 0xE7, 0x98, 0xE7, 0x99, /* 0x24-0x27 */
+	0xE7, 0x9A, 0xE7, 0x9B, 0xE7, 0x9C, 0xE7, 0x9D, /* 0x28-0x2B */
+	0xE7, 0x9E, 0xE7, 0x9F, 0xE7, 0xA0, 0xE8, 0x40, /* 0x2C-0x2F */
+	0xE8, 0x41, 0xE8, 0x42, 0xE8, 0x43, 0xE8, 0x44, /* 0x30-0x33 */
+	0xE8, 0x45, 0xE8, 0x46, 0xE8, 0x47, 0xE8, 0x48, /* 0x34-0x37 */
+	0xE8, 0x49, 0xE8, 0x4A, 0xE8, 0x4B, 0xE8, 0x4C, /* 0x38-0x3B */
+	0xE8, 0x4D, 0xE8, 0x4E, 0xF6, 0xCD, 0xE8, 0x4F, /* 0x3C-0x3F */
+	0xE8, 0x50, 0xE8, 0x51, 0xE8, 0x52, 0xE8, 0x53, /* 0x40-0x43 */
+	0xE8, 0x54, 0xE8, 0x55, 0xE8, 0x56, 0xE8, 0x57, /* 0x44-0x47 */
+	0xE8, 0x58, 0xE8, 0x59, 0xE8, 0x5A, 0xE8, 0x5B, /* 0x48-0x4B */
+	0xE8, 0x5C, 0xE8, 0x5D, 0xE8, 0x5E, 0xE8, 0x5F, /* 0x4C-0x4F */
+	0xE8, 0x60, 0xE8, 0x61, 0xE8, 0x62, 0xE8, 0x63, /* 0x50-0x53 */
+	0xE8, 0x64, 0xE8, 0x65, 0xE8, 0x66, 0xE8, 0x67, /* 0x54-0x57 */
+	0xE8, 0x68, 0xE8, 0x69, 0xE8, 0x6A, 0xE8, 0x6B, /* 0x58-0x5B */
+	0xE8, 0x6C, 0xE8, 0x6D, 0xE8, 0x6E, 0xE8, 0x6F, /* 0x5C-0x5F */
+	0xE8, 0x70, 0xE8, 0x71, 0xE8, 0x72, 0xE8, 0x73, /* 0x60-0x63 */
+	0xE8, 0x74, 0xE8, 0x75, 0xE8, 0x76, 0xE8, 0x77, /* 0x64-0x67 */
+	0xE8, 0x78, 0xE8, 0x79, 0xE8, 0x7A, 0xF6, 0xCE, /* 0x68-0x6B */
+	0xE8, 0x7B, 0xE8, 0x7C, 0xE8, 0x7D, 0xE8, 0x7E, /* 0x6C-0x6F */
+	0xE8, 0x80, 0xE8, 0x81, 0xE8, 0x82, 0xE8, 0x83, /* 0x70-0x73 */
+	0xE8, 0x84, 0xE8, 0x85, 0xE8, 0x86, 0xE8, 0x87, /* 0x74-0x77 */
+	0xE8, 0x88, 0xE8, 0x89, 0xE8, 0x8A, 0xE8, 0x8B, /* 0x78-0x7B */
+	0xE8, 0x8C, 0xE8, 0x8D, 0xE8, 0x8E, 0xE8, 0x8F, /* 0x7C-0x7F */
+	
+	0xE8, 0x90, 0xE8, 0x91, 0xE8, 0x92, 0xE8, 0x93, /* 0x80-0x83 */
+	0xE8, 0x94, 0xEE, 0xC4, 0xEE, 0xC5, 0xEE, 0xC6, /* 0x84-0x87 */
+	0xD5, 0xEB, 0xB6, 0xA4, 0xEE, 0xC8, 0xEE, 0xC7, /* 0x88-0x8B */
+	0xEE, 0xC9, 0xEE, 0xCA, 0xC7, 0xA5, 0xEE, 0xCB, /* 0x8C-0x8F */
+	0xEE, 0xCC, 0xE8, 0x95, 0xB7, 0xB0, 0xB5, 0xF6, /* 0x90-0x93 */
+	0xEE, 0xCD, 0xEE, 0xCF, 0xE8, 0x96, 0xEE, 0xCE, /* 0x94-0x97 */
+	0xE8, 0x97, 0xB8, 0xC6, 0xEE, 0xD0, 0xEE, 0xD1, /* 0x98-0x9B */
+	0xEE, 0xD2, 0xB6, 0xDB, 0xB3, 0xAE, 0xD6, 0xD3, /* 0x9C-0x9F */
+	0xC4, 0xC6, 0xB1, 0xB5, 0xB8, 0xD6, 0xEE, 0xD3, /* 0xA0-0xA3 */
+	0xEE, 0xD4, 0xD4, 0xBF, 0xC7, 0xD5, 0xBE, 0xFB, /* 0xA4-0xA7 */
+	0xCE, 0xD9, 0xB9, 0xB3, 0xEE, 0xD6, 0xEE, 0xD5, /* 0xA8-0xAB */
+	0xEE, 0xD8, 0xEE, 0xD7, 0xC5, 0xA5, 0xEE, 0xD9, /* 0xAC-0xAF */
+	0xEE, 0xDA, 0xC7, 0xAE, 0xEE, 0xDB, 0xC7, 0xAF, /* 0xB0-0xB3 */
+	0xEE, 0xDC, 0xB2, 0xA7, 0xEE, 0xDD, 0xEE, 0xDE, /* 0xB4-0xB7 */
+	0xEE, 0xDF, 0xEE, 0xE0, 0xEE, 0xE1, 0xD7, 0xEA, /* 0xB8-0xBB */
+	0xEE, 0xE2, 0xEE, 0xE3, 0xBC, 0xD8, 0xEE, 0xE4, /* 0xBC-0xBF */
+	0xD3, 0xCB, 0xCC, 0xFA, 0xB2, 0xAC, 0xC1, 0xE5, /* 0xC0-0xC3 */
+	0xEE, 0xE5, 0xC7, 0xA6, 0xC3, 0xAD, 0xE8, 0x98, /* 0xC4-0xC7 */
+	0xEE, 0xE6, 0xEE, 0xE7, 0xEE, 0xE8, 0xEE, 0xE9, /* 0xC8-0xCB */
+	0xEE, 0xEA, 0xEE, 0xEB, 0xEE, 0xEC, 0xE8, 0x99, /* 0xCC-0xCF */
+	0xEE, 0xED, 0xEE, 0xEE, 0xEE, 0xEF, 0xE8, 0x9A, /* 0xD0-0xD3 */
+	0xE8, 0x9B, 0xEE, 0xF0, 0xEE, 0xF1, 0xEE, 0xF2, /* 0xD4-0xD7 */
+	0xEE, 0xF4, 0xEE, 0xF3, 0xE8, 0x9C, 0xEE, 0xF5, /* 0xD8-0xDB */
+	0xCD, 0xAD, 0xC2, 0xC1, 0xEE, 0xF6, 0xEE, 0xF7, /* 0xDC-0xDF */
+	0xEE, 0xF8, 0xD5, 0xA1, 0xEE, 0xF9, 0xCF, 0xB3, /* 0xE0-0xE3 */
+	0xEE, 0xFA, 0xEE, 0xFB, 0xE8, 0x9D, 0xEE, 0xFC, /* 0xE4-0xE7 */
+	0xEE, 0xFD, 0xEF, 0xA1, 0xEE, 0xFE, 0xEF, 0xA2, /* 0xE8-0xEB */
+	0xB8, 0xF5, 0xC3, 0xFA, 0xEF, 0xA3, 0xEF, 0xA4, /* 0xEC-0xEF */
+	0xBD, 0xC2, 0xD2, 0xBF, 0xB2, 0xF9, 0xEF, 0xA5, /* 0xF0-0xF3 */
+	0xEF, 0xA6, 0xEF, 0xA7, 0xD2, 0xF8, 0xEF, 0xA8, /* 0xF4-0xF7 */
+	0xD6, 0xFD, 0xEF, 0xA9, 0xC6, 0xCC, 0xE8, 0x9E, /* 0xF8-0xFB */
+	0xEF, 0xAA, 0xEF, 0xAB, 0xC1, 0xB4, 0xEF, 0xAC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_95[512] = {
+	0xCF, 0xFA, 0xCB, 0xF8, 0xEF, 0xAE, 0xEF, 0xAD, /* 0x00-0x03 */
+	0xB3, 0xFA, 0xB9, 0xF8, 0xEF, 0xAF, 0xEF, 0xB0, /* 0x04-0x07 */
+	0xD0, 0xE2, 0xEF, 0xB1, 0xEF, 0xB2, 0xB7, 0xE6, /* 0x08-0x0B */
+	0xD0, 0xBF, 0xEF, 0xB3, 0xEF, 0xB4, 0xEF, 0xB5, /* 0x0C-0x0F */
+	0xC8, 0xF1, 0xCC, 0xE0, 0xEF, 0xB6, 0xEF, 0xB7, /* 0x10-0x13 */
+	0xEF, 0xB8, 0xEF, 0xB9, 0xEF, 0xBA, 0xD5, 0xE0, /* 0x14-0x17 */
+	0xEF, 0xBB, 0xB4, 0xED, 0xC3, 0xAA, 0xEF, 0xBC, /* 0x18-0x1B */
+	0xE8, 0x9F, 0xEF, 0xBD, 0xEF, 0xBE, 0xEF, 0xBF, /* 0x1C-0x1F */
+	0xE8, 0xA0, 0xCE, 0xFD, 0xEF, 0xC0, 0xC2, 0xE0, /* 0x20-0x23 */
+	0xB4, 0xB8, 0xD7, 0xB6, 0xBD, 0xF5, 0xE9, 0x40, /* 0x24-0x27 */
+	0xCF, 0xC7, 0xEF, 0xC3, 0xEF, 0xC1, 0xEF, 0xC2, /* 0x28-0x2B */
+	0xEF, 0xC4, 0xB6, 0xA7, 0xBC, 0xFC, 0xBE, 0xE2, /* 0x2C-0x2F */
+	0xC3, 0xCC, 0xEF, 0xC5, 0xEF, 0xC6, 0xE9, 0x41, /* 0x30-0x33 */
+	0xEF, 0xC7, 0xEF, 0xCF, 0xEF, 0xC8, 0xEF, 0xC9, /* 0x34-0x37 */
+	0xEF, 0xCA, 0xC7, 0xC2, 0xEF, 0xF1, 0xB6, 0xCD, /* 0x38-0x3B */
+	0xEF, 0xCB, 0xE9, 0x42, 0xEF, 0xCC, 0xEF, 0xCD, /* 0x3C-0x3F */
+	0xB6, 0xC6, 0xC3, 0xBE, 0xEF, 0xCE, 0xE9, 0x43, /* 0x40-0x43 */
+	0xEF, 0xD0, 0xEF, 0xD1, 0xEF, 0xD2, 0xD5, 0xF2, /* 0x44-0x47 */
+	0xE9, 0x44, 0xEF, 0xD3, 0xC4, 0xF7, 0xE9, 0x45, /* 0x48-0x4B */
+	0xEF, 0xD4, 0xC4, 0xF8, 0xEF, 0xD5, 0xEF, 0xD6, /* 0x4C-0x4F */
+	0xB8, 0xE4, 0xB0, 0xF7, 0xEF, 0xD7, 0xEF, 0xD8, /* 0x50-0x53 */
+	0xEF, 0xD9, 0xE9, 0x46, 0xEF, 0xDA, 0xEF, 0xDB, /* 0x54-0x57 */
+	0xEF, 0xDC, 0xEF, 0xDD, 0xE9, 0x47, 0xEF, 0xDE, /* 0x58-0x5B */
+	0xBE, 0xB5, 0xEF, 0xE1, 0xEF, 0xDF, 0xEF, 0xE0, /* 0x5C-0x5F */
+	0xE9, 0x48, 0xEF, 0xE2, 0xEF, 0xE3, 0xC1, 0xCD, /* 0x60-0x63 */
+	0xEF, 0xE4, 0xEF, 0xE5, 0xEF, 0xE6, 0xEF, 0xE7, /* 0x64-0x67 */
+	0xEF, 0xE8, 0xEF, 0xE9, 0xEF, 0xEA, 0xEF, 0xEB, /* 0x68-0x6B */
+	0xEF, 0xEC, 0xC0, 0xD8, 0xE9, 0x49, 0xEF, 0xED, /* 0x6C-0x6F */
+	0xC1, 0xAD, 0xEF, 0xEE, 0xEF, 0xEF, 0xEF, 0xF0, /* 0x70-0x73 */
+	0xE9, 0x4A, 0xE9, 0x4B, 0xCF, 0xE2, 0xE9, 0x4C, /* 0x74-0x77 */
+	0xE9, 0x4D, 0xE9, 0x4E, 0xE9, 0x4F, 0xE9, 0x50, /* 0x78-0x7B */
+	0xE9, 0x51, 0xE9, 0x52, 0xE9, 0x53, 0xB3, 0xA4, /* 0x7C-0x7F */
+	
+	0xE9, 0x54, 0xE9, 0x55, 0xE9, 0x56, 0xE9, 0x57, /* 0x80-0x83 */
+	0xE9, 0x58, 0xE9, 0x59, 0xE9, 0x5A, 0xE9, 0x5B, /* 0x84-0x87 */
+	0xE9, 0x5C, 0xE9, 0x5D, 0xE9, 0x5E, 0xE9, 0x5F, /* 0x88-0x8B */
+	0xE9, 0x60, 0xE9, 0x61, 0xE9, 0x62, 0xE9, 0x63, /* 0x8C-0x8F */
+	0xE9, 0x64, 0xE9, 0x65, 0xE9, 0x66, 0xE9, 0x67, /* 0x90-0x93 */
+	0xE9, 0x68, 0xE9, 0x69, 0xE9, 0x6A, 0xE9, 0x6B, /* 0x94-0x97 */
+	0xE9, 0x6C, 0xE9, 0x6D, 0xE9, 0x6E, 0xE9, 0x6F, /* 0x98-0x9B */
+	0xE9, 0x70, 0xE9, 0x71, 0xE9, 0x72, 0xE9, 0x73, /* 0x9C-0x9F */
+	0xE9, 0x74, 0xE9, 0x75, 0xE9, 0x76, 0xE9, 0x77, /* 0xA0-0xA3 */
+	0xE9, 0x78, 0xE9, 0x79, 0xE9, 0x7A, 0xE9, 0x7B, /* 0xA4-0xA7 */
+	0xE9, 0x7C, 0xE9, 0x7D, 0xE9, 0x7E, 0xE9, 0x80, /* 0xA8-0xAB */
+	0xE9, 0x81, 0xE9, 0x82, 0xE9, 0x83, 0xE9, 0x84, /* 0xAC-0xAF */
+	0xE9, 0x85, 0xE9, 0x86, 0xE9, 0x87, 0xE9, 0x88, /* 0xB0-0xB3 */
+	0xE9, 0x89, 0xE9, 0x8A, 0xE9, 0x8B, 0xE9, 0x8C, /* 0xB4-0xB7 */
+	0xE9, 0x8D, 0xE9, 0x8E, 0xE9, 0x8F, 0xE9, 0x90, /* 0xB8-0xBB */
+	0xE9, 0x91, 0xE9, 0x92, 0xE9, 0x93, 0xE9, 0x94, /* 0xBC-0xBF */
+	0xE9, 0x95, 0xE9, 0x96, 0xE9, 0x97, 0xE9, 0x98, /* 0xC0-0xC3 */
+	0xE9, 0x99, 0xE9, 0x9A, 0xE9, 0x9B, 0xE9, 0x9C, /* 0xC4-0xC7 */
+	0xE9, 0x9D, 0xE9, 0x9E, 0xE9, 0x9F, 0xE9, 0xA0, /* 0xC8-0xCB */
+	0xEA, 0x40, 0xEA, 0x41, 0xEA, 0x42, 0xEA, 0x43, /* 0xCC-0xCF */
+	0xEA, 0x44, 0xEA, 0x45, 0xEA, 0x46, 0xEA, 0x47, /* 0xD0-0xD3 */
+	0xEA, 0x48, 0xEA, 0x49, 0xEA, 0x4A, 0xEA, 0x4B, /* 0xD4-0xD7 */
+	0xEA, 0x4C, 0xEA, 0x4D, 0xEA, 0x4E, 0xEA, 0x4F, /* 0xD8-0xDB */
+	0xEA, 0x50, 0xEA, 0x51, 0xEA, 0x52, 0xEA, 0x53, /* 0xDC-0xDF */
+	0xEA, 0x54, 0xEA, 0x55, 0xEA, 0x56, 0xEA, 0x57, /* 0xE0-0xE3 */
+	0xEA, 0x58, 0xEA, 0x59, 0xEA, 0x5A, 0xEA, 0x5B, /* 0xE4-0xE7 */
+	0xC3, 0xC5, 0xE3, 0xC5, 0xC9, 0xC1, 0xE3, 0xC6, /* 0xE8-0xEB */
+	0xEA, 0x5C, 0xB1, 0xD5, 0xCE, 0xCA, 0xB4, 0xB3, /* 0xEC-0xEF */
+	0xC8, 0xF2, 0xE3, 0xC7, 0xCF, 0xD0, 0xE3, 0xC8, /* 0xF0-0xF3 */
+	0xBC, 0xE4, 0xE3, 0xC9, 0xE3, 0xCA, 0xC3, 0xC6, /* 0xF4-0xF7 */
+	0xD5, 0xA2, 0xC4, 0xD6, 0xB9, 0xEB, 0xCE, 0xC5, /* 0xF8-0xFB */
+	0xE3, 0xCB, 0xC3, 0xF6, 0xE3, 0xCC, 0xEA, 0x5D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_96[512] = {
+	0xB7, 0xA7, 0xB8, 0xF3, 0xBA, 0xD2, 0xE3, 0xCD, /* 0x00-0x03 */
+	0xE3, 0xCE, 0xD4, 0xC4, 0xE3, 0xCF, 0xEA, 0x5E, /* 0x04-0x07 */
+	0xE3, 0xD0, 0xD1, 0xCB, 0xE3, 0xD1, 0xE3, 0xD2, /* 0x08-0x0B */
+	0xE3, 0xD3, 0xE3, 0xD4, 0xD1, 0xD6, 0xE3, 0xD5, /* 0x0C-0x0F */
+	0xB2, 0xFB, 0xC0, 0xBB, 0xE3, 0xD6, 0xEA, 0x5F, /* 0x10-0x13 */
+	0xC0, 0xAB, 0xE3, 0xD7, 0xE3, 0xD8, 0xE3, 0xD9, /* 0x14-0x17 */
+	0xEA, 0x60, 0xE3, 0xDA, 0xE3, 0xDB, 0xEA, 0x61, /* 0x18-0x1B */
+	0xB8, 0xB7, 0xDA, 0xE2, 0xEA, 0x62, 0xB6, 0xD3, /* 0x1C-0x1F */
+	0xEA, 0x63, 0xDA, 0xE4, 0xDA, 0xE3, 0xEA, 0x64, /* 0x20-0x23 */
+	0xEA, 0x65, 0xEA, 0x66, 0xEA, 0x67, 0xEA, 0x68, /* 0x24-0x27 */
+	0xEA, 0x69, 0xEA, 0x6A, 0xDA, 0xE6, 0xEA, 0x6B, /* 0x28-0x2B */
+	0xEA, 0x6C, 0xEA, 0x6D, 0xC8, 0xEE, 0xEA, 0x6E, /* 0x2C-0x2F */
+	0xEA, 0x6F, 0xDA, 0xE5, 0xB7, 0xC0, 0xD1, 0xF4, /* 0x30-0x33 */
+	0xD2, 0xF5, 0xD5, 0xF3, 0xBD, 0xD7, 0xEA, 0x70, /* 0x34-0x37 */
+	0xEA, 0x71, 0xEA, 0x72, 0xEA, 0x73, 0xD7, 0xE8, /* 0x38-0x3B */
+	0xDA, 0xE8, 0xDA, 0xE7, 0xEA, 0x74, 0xB0, 0xA2, /* 0x3C-0x3F */
+	0xCD, 0xD3, 0xEA, 0x75, 0xDA, 0xE9, 0xEA, 0x76, /* 0x40-0x43 */
+	0xB8, 0xBD, 0xBC, 0xCA, 0xC2, 0xBD, 0xC2, 0xA4, /* 0x44-0x47 */
+	0xB3, 0xC2, 0xDA, 0xEA, 0xEA, 0x77, 0xC2, 0xAA, /* 0x48-0x4B */
+	0xC4, 0xB0, 0xBD, 0xB5, 0xEA, 0x78, 0xEA, 0x79, /* 0x4C-0x4F */
+	0xCF, 0xDE, 0xEA, 0x7A, 0xEA, 0x7B, 0xEA, 0x7C, /* 0x50-0x53 */
+	0xDA, 0xEB, 0xC9, 0xC2, 0xEA, 0x7D, 0xEA, 0x7E, /* 0x54-0x57 */
+	0xEA, 0x80, 0xEA, 0x81, 0xEA, 0x82, 0xB1, 0xDD, /* 0x58-0x5B */
+	0xEA, 0x83, 0xEA, 0x84, 0xEA, 0x85, 0xDA, 0xEC, /* 0x5C-0x5F */
+	0xEA, 0x86, 0xB6, 0xB8, 0xD4, 0xBA, 0xEA, 0x87, /* 0x60-0x63 */
+	0xB3, 0xFD, 0xEA, 0x88, 0xEA, 0x89, 0xDA, 0xED, /* 0x64-0x67 */
+	0xD4, 0xC9, 0xCF, 0xD5, 0xC5, 0xE3, 0xEA, 0x8A, /* 0x68-0x6B */
+	0xDA, 0xEE, 0xEA, 0x8B, 0xEA, 0x8C, 0xEA, 0x8D, /* 0x6C-0x6F */
+	0xEA, 0x8E, 0xEA, 0x8F, 0xDA, 0xEF, 0xEA, 0x90, /* 0x70-0x73 */
+	0xDA, 0xF0, 0xC1, 0xEA, 0xCC, 0xD5, 0xCF, 0xDD, /* 0x74-0x77 */
+	0xEA, 0x91, 0xEA, 0x92, 0xEA, 0x93, 0xEA, 0x94, /* 0x78-0x7B */
+	0xEA, 0x95, 0xEA, 0x96, 0xEA, 0x97, 0xEA, 0x98, /* 0x7C-0x7F */
+	
+	0xEA, 0x99, 0xEA, 0x9A, 0xEA, 0x9B, 0xEA, 0x9C, /* 0x80-0x83 */
+	0xEA, 0x9D, 0xD3, 0xE7, 0xC2, 0xA1, 0xEA, 0x9E, /* 0x84-0x87 */
+	0xDA, 0xF1, 0xEA, 0x9F, 0xEA, 0xA0, 0xCB, 0xE5, /* 0x88-0x8B */
+	0xEB, 0x40, 0xDA, 0xF2, 0xEB, 0x41, 0xCB, 0xE6, /* 0x8C-0x8F */
+	0xD2, 0xFE, 0xEB, 0x42, 0xEB, 0x43, 0xEB, 0x44, /* 0x90-0x93 */
+	0xB8, 0xF4, 0xEB, 0x45, 0xEB, 0x46, 0xDA, 0xF3, /* 0x94-0x97 */
+	0xB0, 0xAF, 0xCF, 0xB6, 0xEB, 0x47, 0xEB, 0x48, /* 0x98-0x9B */
+	0xD5, 0xCF, 0xEB, 0x49, 0xEB, 0x4A, 0xEB, 0x4B, /* 0x9C-0x9F */
+	0xEB, 0x4C, 0xEB, 0x4D, 0xEB, 0x4E, 0xEB, 0x4F, /* 0xA0-0xA3 */
+	0xEB, 0x50, 0xEB, 0x51, 0xEB, 0x52, 0xCB, 0xED, /* 0xA4-0xA7 */
+	0xEB, 0x53, 0xEB, 0x54, 0xEB, 0x55, 0xEB, 0x56, /* 0xA8-0xAB */
+	0xEB, 0x57, 0xEB, 0x58, 0xEB, 0x59, 0xEB, 0x5A, /* 0xAC-0xAF */
+	0xDA, 0xF4, 0xEB, 0x5B, 0xEB, 0x5C, 0xE3, 0xC4, /* 0xB0-0xB3 */
+	0xEB, 0x5D, 0xEB, 0x5E, 0xC1, 0xA5, 0xEB, 0x5F, /* 0xB4-0xB7 */
+	0xEB, 0x60, 0xF6, 0xBF, 0xEB, 0x61, 0xEB, 0x62, /* 0xB8-0xBB */
+	0xF6, 0xC0, 0xF6, 0xC1, 0xC4, 0xD1, 0xEB, 0x63, /* 0xBC-0xBF */
+	0xC8, 0xB8, 0xD1, 0xE3, 0xEB, 0x64, 0xEB, 0x65, /* 0xC0-0xC3 */
+	0xD0, 0xDB, 0xD1, 0xC5, 0xBC, 0xAF, 0xB9, 0xCD, /* 0xC4-0xC7 */
+	0xEB, 0x66, 0xEF, 0xF4, 0xEB, 0x67, 0xEB, 0x68, /* 0xC8-0xCB */
+	0xB4, 0xC6, 0xD3, 0xBA, 0xF6, 0xC2, 0xB3, 0xFB, /* 0xCC-0xCF */
+	0xEB, 0x69, 0xEB, 0x6A, 0xF6, 0xC3, 0xEB, 0x6B, /* 0xD0-0xD3 */
+	0xEB, 0x6C, 0xB5, 0xF1, 0xEB, 0x6D, 0xEB, 0x6E, /* 0xD4-0xD7 */
+	0xEB, 0x6F, 0xEB, 0x70, 0xEB, 0x71, 0xEB, 0x72, /* 0xD8-0xDB */
+	0xEB, 0x73, 0xEB, 0x74, 0xEB, 0x75, 0xEB, 0x76, /* 0xDC-0xDF */
+	0xF6, 0xC5, 0xEB, 0x77, 0xEB, 0x78, 0xEB, 0x79, /* 0xE0-0xE3 */
+	0xEB, 0x7A, 0xEB, 0x7B, 0xEB, 0x7C, 0xEB, 0x7D, /* 0xE4-0xE7 */
+	0xD3, 0xEA, 0xF6, 0xA7, 0xD1, 0xA9, 0xEB, 0x7E, /* 0xE8-0xEB */
+	0xEB, 0x80, 0xEB, 0x81, 0xEB, 0x82, 0xF6, 0xA9, /* 0xEC-0xEF */
+	0xEB, 0x83, 0xEB, 0x84, 0xEB, 0x85, 0xF6, 0xA8, /* 0xF0-0xF3 */
+	0xEB, 0x86, 0xEB, 0x87, 0xC1, 0xE3, 0xC0, 0xD7, /* 0xF4-0xF7 */
+	0xEB, 0x88, 0xB1, 0xA2, 0xEB, 0x89, 0xEB, 0x8A, /* 0xF8-0xFB */
+	0xEB, 0x8B, 0xEB, 0x8C, 0xCE, 0xED, 0xEB, 0x8D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_97[512] = {
+	0xD0, 0xE8, 0xF6, 0xAB, 0xEB, 0x8E, 0xEB, 0x8F, /* 0x00-0x03 */
+	0xCF, 0xF6, 0xEB, 0x90, 0xF6, 0xAA, 0xD5, 0xF0, /* 0x04-0x07 */
+	0xF6, 0xAC, 0xC3, 0xB9, 0xEB, 0x91, 0xEB, 0x92, /* 0x08-0x0B */
+	0xEB, 0x93, 0xBB, 0xF4, 0xF6, 0xAE, 0xF6, 0xAD, /* 0x0C-0x0F */
+	0xEB, 0x94, 0xEB, 0x95, 0xEB, 0x96, 0xC4, 0xDE, /* 0x10-0x13 */
+	0xEB, 0x97, 0xEB, 0x98, 0xC1, 0xD8, 0xEB, 0x99, /* 0x14-0x17 */
+	0xEB, 0x9A, 0xEB, 0x9B, 0xEB, 0x9C, 0xEB, 0x9D, /* 0x18-0x1B */
+	0xCB, 0xAA, 0xEB, 0x9E, 0xCF, 0xBC, 0xEB, 0x9F, /* 0x1C-0x1F */
+	0xEB, 0xA0, 0xEC, 0x40, 0xEC, 0x41, 0xEC, 0x42, /* 0x20-0x23 */
+	0xEC, 0x43, 0xEC, 0x44, 0xEC, 0x45, 0xEC, 0x46, /* 0x24-0x27 */
+	0xEC, 0x47, 0xEC, 0x48, 0xF6, 0xAF, 0xEC, 0x49, /* 0x28-0x2B */
+	0xEC, 0x4A, 0xF6, 0xB0, 0xEC, 0x4B, 0xEC, 0x4C, /* 0x2C-0x2F */
+	0xF6, 0xB1, 0xEC, 0x4D, 0xC2, 0xB6, 0xEC, 0x4E, /* 0x30-0x33 */
+	0xEC, 0x4F, 0xEC, 0x50, 0xEC, 0x51, 0xEC, 0x52, /* 0x34-0x37 */
+	0xB0, 0xD4, 0xC5, 0xF9, 0xEC, 0x53, 0xEC, 0x54, /* 0x38-0x3B */
+	0xEC, 0x55, 0xEC, 0x56, 0xF6, 0xB2, 0xEC, 0x57, /* 0x3C-0x3F */
+	0xEC, 0x58, 0xEC, 0x59, 0xEC, 0x5A, 0xEC, 0x5B, /* 0x40-0x43 */
+	0xEC, 0x5C, 0xEC, 0x5D, 0xEC, 0x5E, 0xEC, 0x5F, /* 0x44-0x47 */
+	0xEC, 0x60, 0xEC, 0x61, 0xEC, 0x62, 0xEC, 0x63, /* 0x48-0x4B */
+	0xEC, 0x64, 0xEC, 0x65, 0xEC, 0x66, 0xEC, 0x67, /* 0x4C-0x4F */
+	0xEC, 0x68, 0xEC, 0x69, 0xC7, 0xE0, 0xF6, 0xA6, /* 0x50-0x53 */
+	0xEC, 0x6A, 0xEC, 0x6B, 0xBE, 0xB8, 0xEC, 0x6C, /* 0x54-0x57 */
+	0xEC, 0x6D, 0xBE, 0xB2, 0xEC, 0x6E, 0xB5, 0xE5, /* 0x58-0x5B */
+	0xEC, 0x6F, 0xEC, 0x70, 0xB7, 0xC7, 0xEC, 0x71, /* 0x5C-0x5F */
+	0xBF, 0xBF, 0xC3, 0xD2, 0xC3, 0xE6, 0xEC, 0x72, /* 0x60-0x63 */
+	0xEC, 0x73, 0xD8, 0xCC, 0xEC, 0x74, 0xEC, 0x75, /* 0x64-0x67 */
+	0xEC, 0x76, 0xB8, 0xEF, 0xEC, 0x77, 0xEC, 0x78, /* 0x68-0x6B */
+	0xEC, 0x79, 0xEC, 0x7A, 0xEC, 0x7B, 0xEC, 0x7C, /* 0x6C-0x6F */
+	0xEC, 0x7D, 0xEC, 0x7E, 0xEC, 0x80, 0xBD, 0xF9, /* 0x70-0x73 */
+	0xD1, 0xA5, 0xEC, 0x81, 0xB0, 0xD0, 0xEC, 0x82, /* 0x74-0x77 */
+	0xEC, 0x83, 0xEC, 0x84, 0xEC, 0x85, 0xEC, 0x86, /* 0x78-0x7B */
+	0xF7, 0xB0, 0xEC, 0x87, 0xEC, 0x88, 0xEC, 0x89, /* 0x7C-0x7F */
+	
+	0xEC, 0x8A, 0xEC, 0x8B, 0xEC, 0x8C, 0xEC, 0x8D, /* 0x80-0x83 */
+	0xEC, 0x8E, 0xF7, 0xB1, 0xEC, 0x8F, 0xEC, 0x90, /* 0x84-0x87 */
+	0xEC, 0x91, 0xEC, 0x92, 0xEC, 0x93, 0xD0, 0xAC, /* 0x88-0x8B */
+	0xEC, 0x94, 0xB0, 0xB0, 0xEC, 0x95, 0xEC, 0x96, /* 0x8C-0x8F */
+	0xEC, 0x97, 0xF7, 0xB2, 0xF7, 0xB3, 0xEC, 0x98, /* 0x90-0x93 */
+	0xF7, 0xB4, 0xEC, 0x99, 0xEC, 0x9A, 0xEC, 0x9B, /* 0x94-0x97 */
+	0xC7, 0xCA, 0xEC, 0x9C, 0xEC, 0x9D, 0xEC, 0x9E, /* 0x98-0x9B */
+	0xEC, 0x9F, 0xEC, 0xA0, 0xED, 0x40, 0xED, 0x41, /* 0x9C-0x9F */
+	0xBE, 0xCF, 0xED, 0x42, 0xED, 0x43, 0xF7, 0xB7, /* 0xA0-0xA3 */
+	0xED, 0x44, 0xED, 0x45, 0xED, 0x46, 0xED, 0x47, /* 0xA4-0xA7 */
+	0xED, 0x48, 0xED, 0x49, 0xED, 0x4A, 0xF7, 0xB6, /* 0xA8-0xAB */
+	0xED, 0x4B, 0xB1, 0xDE, 0xED, 0x4C, 0xF7, 0xB5, /* 0xAC-0xAF */
+	0xED, 0x4D, 0xED, 0x4E, 0xF7, 0xB8, 0xED, 0x4F, /* 0xB0-0xB3 */
+	0xF7, 0xB9, 0xED, 0x50, 0xED, 0x51, 0xED, 0x52, /* 0xB4-0xB7 */
+	0xED, 0x53, 0xED, 0x54, 0xED, 0x55, 0xED, 0x56, /* 0xB8-0xBB */
+	0xED, 0x57, 0xED, 0x58, 0xED, 0x59, 0xED, 0x5A, /* 0xBC-0xBF */
+	0xED, 0x5B, 0xED, 0x5C, 0xED, 0x5D, 0xED, 0x5E, /* 0xC0-0xC3 */
+	0xED, 0x5F, 0xED, 0x60, 0xED, 0x61, 0xED, 0x62, /* 0xC4-0xC7 */
+	0xED, 0x63, 0xED, 0x64, 0xED, 0x65, 0xED, 0x66, /* 0xC8-0xCB */
+	0xED, 0x67, 0xED, 0x68, 0xED, 0x69, 0xED, 0x6A, /* 0xCC-0xCF */
+	0xED, 0x6B, 0xED, 0x6C, 0xED, 0x6D, 0xED, 0x6E, /* 0xD0-0xD3 */
+	0xED, 0x6F, 0xED, 0x70, 0xED, 0x71, 0xED, 0x72, /* 0xD4-0xD7 */
+	0xED, 0x73, 0xED, 0x74, 0xED, 0x75, 0xED, 0x76, /* 0xD8-0xDB */
+	0xED, 0x77, 0xED, 0x78, 0xED, 0x79, 0xED, 0x7A, /* 0xDC-0xDF */
+	0xED, 0x7B, 0xED, 0x7C, 0xED, 0x7D, 0xED, 0x7E, /* 0xE0-0xE3 */
+	0xED, 0x80, 0xED, 0x81, 0xCE, 0xA4, 0xC8, 0xCD, /* 0xE4-0xE7 */
+	0xED, 0x82, 0xBA, 0xAB, 0xE8, 0xB8, 0xE8, 0xB9, /* 0xE8-0xEB */
+	0xE8, 0xBA, 0xBE, 0xC2, 0xED, 0x83, 0xED, 0x84, /* 0xEC-0xEF */
+	0xED, 0x85, 0xED, 0x86, 0xED, 0x87, 0xD2, 0xF4, /* 0xF0-0xF3 */
+	0xED, 0x88, 0xD4, 0xCF, 0xC9, 0xD8, 0xED, 0x89, /* 0xF4-0xF7 */
+	0xED, 0x8A, 0xED, 0x8B, 0xED, 0x8C, 0xED, 0x8D, /* 0xF8-0xFB */
+	0xED, 0x8E, 0xED, 0x8F, 0xED, 0x90, 0xED, 0x91, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_98[512] = {
+	0xED, 0x92, 0xED, 0x93, 0xED, 0x94, 0xED, 0x95, /* 0x00-0x03 */
+	0xED, 0x96, 0xED, 0x97, 0xED, 0x98, 0xED, 0x99, /* 0x04-0x07 */
+	0xED, 0x9A, 0xED, 0x9B, 0xED, 0x9C, 0xED, 0x9D, /* 0x08-0x0B */
+	0xED, 0x9E, 0xED, 0x9F, 0xED, 0xA0, 0xEE, 0x40, /* 0x0C-0x0F */
+	0xEE, 0x41, 0xEE, 0x42, 0xEE, 0x43, 0xEE, 0x44, /* 0x10-0x13 */
+	0xEE, 0x45, 0xEE, 0x46, 0xEE, 0x47, 0xEE, 0x48, /* 0x14-0x17 */
+	0xEE, 0x49, 0xEE, 0x4A, 0xEE, 0x4B, 0xEE, 0x4C, /* 0x18-0x1B */
+	0xEE, 0x4D, 0xEE, 0x4E, 0xEE, 0x4F, 0xEE, 0x50, /* 0x1C-0x1F */
+	0xEE, 0x51, 0xEE, 0x52, 0xEE, 0x53, 0xEE, 0x54, /* 0x20-0x23 */
+	0xEE, 0x55, 0xEE, 0x56, 0xEE, 0x57, 0xEE, 0x58, /* 0x24-0x27 */
+	0xEE, 0x59, 0xEE, 0x5A, 0xEE, 0x5B, 0xEE, 0x5C, /* 0x28-0x2B */
+	0xEE, 0x5D, 0xEE, 0x5E, 0xEE, 0x5F, 0xEE, 0x60, /* 0x2C-0x2F */
+	0xEE, 0x61, 0xEE, 0x62, 0xEE, 0x63, 0xEE, 0x64, /* 0x30-0x33 */
+	0xEE, 0x65, 0xEE, 0x66, 0xEE, 0x67, 0xEE, 0x68, /* 0x34-0x37 */
+	0xEE, 0x69, 0xEE, 0x6A, 0xEE, 0x6B, 0xEE, 0x6C, /* 0x38-0x3B */
+	0xEE, 0x6D, 0xEE, 0x6E, 0xEE, 0x6F, 0xEE, 0x70, /* 0x3C-0x3F */
+	0xEE, 0x71, 0xEE, 0x72, 0xEE, 0x73, 0xEE, 0x74, /* 0x40-0x43 */
+	0xEE, 0x75, 0xEE, 0x76, 0xEE, 0x77, 0xEE, 0x78, /* 0x44-0x47 */
+	0xEE, 0x79, 0xEE, 0x7A, 0xEE, 0x7B, 0xEE, 0x7C, /* 0x48-0x4B */
+	0xEE, 0x7D, 0xEE, 0x7E, 0xEE, 0x80, 0xEE, 0x81, /* 0x4C-0x4F */
+	0xEE, 0x82, 0xEE, 0x83, 0xEE, 0x84, 0xEE, 0x85, /* 0x50-0x53 */
+	0xEE, 0x86, 0xEE, 0x87, 0xEE, 0x88, 0xEE, 0x89, /* 0x54-0x57 */
+	0xEE, 0x8A, 0xEE, 0x8B, 0xEE, 0x8C, 0xEE, 0x8D, /* 0x58-0x5B */
+	0xEE, 0x8E, 0xEE, 0x8F, 0xEE, 0x90, 0xEE, 0x91, /* 0x5C-0x5F */
+	0xEE, 0x92, 0xEE, 0x93, 0xEE, 0x94, 0xEE, 0x95, /* 0x60-0x63 */
+	0xEE, 0x96, 0xEE, 0x97, 0xEE, 0x98, 0xEE, 0x99, /* 0x64-0x67 */
+	0xEE, 0x9A, 0xEE, 0x9B, 0xEE, 0x9C, 0xEE, 0x9D, /* 0x68-0x6B */
+	0xEE, 0x9E, 0xEE, 0x9F, 0xEE, 0xA0, 0xEF, 0x40, /* 0x6C-0x6F */
+	0xEF, 0x41, 0xEF, 0x42, 0xEF, 0x43, 0xEF, 0x44, /* 0x70-0x73 */
+	0xEF, 0x45, 0xD2, 0xB3, 0xB6, 0xA5, 0xC7, 0xEA, /* 0x74-0x77 */
+	0xF1, 0xFC, 0xCF, 0xEE, 0xCB, 0xB3, 0xD0, 0xEB, /* 0x78-0x7B */
+	0xE7, 0xEF, 0xCD, 0xE7, 0xB9, 0xCB, 0xB6, 0xD9, /* 0x7C-0x7F */
+	
+	0xF1, 0xFD, 0xB0, 0xE4, 0xCB, 0xCC, 0xF1, 0xFE, /* 0x80-0x83 */
+	0xD4, 0xA4, 0xC2, 0xAD, 0xC1, 0xEC, 0xC6, 0xC4, /* 0x84-0x87 */
+	0xBE, 0xB1, 0xF2, 0xA1, 0xBC, 0xD5, 0xEF, 0x46, /* 0x88-0x8B */
+	0xF2, 0xA2, 0xF2, 0xA3, 0xEF, 0x47, 0xF2, 0xA4, /* 0x8C-0x8F */
+	0xD2, 0xC3, 0xC6, 0xB5, 0xEF, 0x48, 0xCD, 0xC7, /* 0x90-0x93 */
+	0xF2, 0xA5, 0xEF, 0x49, 0xD3, 0xB1, 0xBF, 0xC5, /* 0x94-0x97 */
+	0xCC, 0xE2, 0xEF, 0x4A, 0xF2, 0xA6, 0xF2, 0xA7, /* 0x98-0x9B */
+	0xD1, 0xD5, 0xB6, 0xEE, 0xF2, 0xA8, 0xF2, 0xA9, /* 0x9C-0x9F */
+	0xB5, 0xDF, 0xF2, 0xAA, 0xF2, 0xAB, 0xEF, 0x4B, /* 0xA0-0xA3 */
+	0xB2, 0xFC, 0xF2, 0xAC, 0xF2, 0xAD, 0xC8, 0xA7, /* 0xA4-0xA7 */
+	0xEF, 0x4C, 0xEF, 0x4D, 0xEF, 0x4E, 0xEF, 0x4F, /* 0xA8-0xAB */
+	0xEF, 0x50, 0xEF, 0x51, 0xEF, 0x52, 0xEF, 0x53, /* 0xAC-0xAF */
+	0xEF, 0x54, 0xEF, 0x55, 0xEF, 0x56, 0xEF, 0x57, /* 0xB0-0xB3 */
+	0xEF, 0x58, 0xEF, 0x59, 0xEF, 0x5A, 0xEF, 0x5B, /* 0xB4-0xB7 */
+	0xEF, 0x5C, 0xEF, 0x5D, 0xEF, 0x5E, 0xEF, 0x5F, /* 0xB8-0xBB */
+	0xEF, 0x60, 0xEF, 0x61, 0xEF, 0x62, 0xEF, 0x63, /* 0xBC-0xBF */
+	0xEF, 0x64, 0xEF, 0x65, 0xEF, 0x66, 0xEF, 0x67, /* 0xC0-0xC3 */
+	0xEF, 0x68, 0xEF, 0x69, 0xEF, 0x6A, 0xEF, 0x6B, /* 0xC4-0xC7 */
+	0xEF, 0x6C, 0xEF, 0x6D, 0xEF, 0x6E, 0xEF, 0x6F, /* 0xC8-0xCB */
+	0xEF, 0x70, 0xEF, 0x71, 0xB7, 0xE7, 0xEF, 0x72, /* 0xCC-0xCF */
+	0xEF, 0x73, 0xEC, 0xA9, 0xEC, 0xAA, 0xEC, 0xAB, /* 0xD0-0xD3 */
+	0xEF, 0x74, 0xEC, 0xAC, 0xEF, 0x75, 0xEF, 0x76, /* 0xD4-0xD7 */
+	0xC6, 0xAE, 0xEC, 0xAD, 0xEC, 0xAE, 0xEF, 0x77, /* 0xD8-0xDB */
+	0xEF, 0x78, 0xEF, 0x79, 0xB7, 0xC9, 0xCA, 0xB3, /* 0xDC-0xDF */
+	0xEF, 0x7A, 0xEF, 0x7B, 0xEF, 0x7C, 0xEF, 0x7D, /* 0xE0-0xE3 */
+	0xEF, 0x7E, 0xEF, 0x80, 0xEF, 0x81, 0xE2, 0xB8, /* 0xE4-0xE7 */
+	0xF7, 0xCF, 0xEF, 0x82, 0xEF, 0x83, 0xEF, 0x84, /* 0xE8-0xEB */
+	0xEF, 0x85, 0xEF, 0x86, 0xEF, 0x87, 0xEF, 0x88, /* 0xEC-0xEF */
+	0xEF, 0x89, 0xEF, 0x8A, 0xEF, 0x8B, 0xEF, 0x8C, /* 0xF0-0xF3 */
+	0xEF, 0x8D, 0xEF, 0x8E, 0xEF, 0x8F, 0xEF, 0x90, /* 0xF4-0xF7 */
+	0xEF, 0x91, 0xEF, 0x92, 0xEF, 0x93, 0xEF, 0x94, /* 0xF8-0xFB */
+	0xEF, 0x95, 0xEF, 0x96, 0xEF, 0x97, 0xEF, 0x98, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_99[512] = {
+	0xEF, 0x99, 0xEF, 0x9A, 0xEF, 0x9B, 0xEF, 0x9C, /* 0x00-0x03 */
+	0xEF, 0x9D, 0xEF, 0x9E, 0xEF, 0x9F, 0xEF, 0xA0, /* 0x04-0x07 */
+	0xF0, 0x40, 0xF0, 0x41, 0xF0, 0x42, 0xF0, 0x43, /* 0x08-0x0B */
+	0xF0, 0x44, 0xF7, 0xD0, 0xF0, 0x45, 0xF0, 0x46, /* 0x0C-0x0F */
+	0xB2, 0xCD, 0xF0, 0x47, 0xF0, 0x48, 0xF0, 0x49, /* 0x10-0x13 */
+	0xF0, 0x4A, 0xF0, 0x4B, 0xF0, 0x4C, 0xF0, 0x4D, /* 0x14-0x17 */
+	0xF0, 0x4E, 0xF0, 0x4F, 0xF0, 0x50, 0xF0, 0x51, /* 0x18-0x1B */
+	0xF0, 0x52, 0xF0, 0x53, 0xF0, 0x54, 0xF0, 0x55, /* 0x1C-0x1F */
+	0xF0, 0x56, 0xF0, 0x57, 0xF0, 0x58, 0xF0, 0x59, /* 0x20-0x23 */
+	0xF0, 0x5A, 0xF0, 0x5B, 0xF0, 0x5C, 0xF0, 0x5D, /* 0x24-0x27 */
+	0xF0, 0x5E, 0xF0, 0x5F, 0xF0, 0x60, 0xF0, 0x61, /* 0x28-0x2B */
+	0xF0, 0x62, 0xF0, 0x63, 0xF7, 0xD1, 0xF0, 0x64, /* 0x2C-0x2F */
+	0xF0, 0x65, 0xF0, 0x66, 0xF0, 0x67, 0xF0, 0x68, /* 0x30-0x33 */
+	0xF0, 0x69, 0xF0, 0x6A, 0xF0, 0x6B, 0xF0, 0x6C, /* 0x34-0x37 */
+	0xF0, 0x6D, 0xF0, 0x6E, 0xF0, 0x6F, 0xF0, 0x70, /* 0x38-0x3B */
+	0xF0, 0x71, 0xF0, 0x72, 0xF0, 0x73, 0xF0, 0x74, /* 0x3C-0x3F */
+	0xF0, 0x75, 0xF0, 0x76, 0xF0, 0x77, 0xF0, 0x78, /* 0x40-0x43 */
+	0xF0, 0x79, 0xF0, 0x7A, 0xF0, 0x7B, 0xF0, 0x7C, /* 0x44-0x47 */
+	0xF0, 0x7D, 0xF0, 0x7E, 0xF0, 0x80, 0xF0, 0x81, /* 0x48-0x4B */
+	0xF0, 0x82, 0xF0, 0x83, 0xF0, 0x84, 0xF0, 0x85, /* 0x4C-0x4F */
+	0xF0, 0x86, 0xF0, 0x87, 0xF0, 0x88, 0xF0, 0x89, /* 0x50-0x53 */
+	0xF7, 0xD3, 0xF7, 0xD2, 0xF0, 0x8A, 0xF0, 0x8B, /* 0x54-0x57 */
+	0xF0, 0x8C, 0xF0, 0x8D, 0xF0, 0x8E, 0xF0, 0x8F, /* 0x58-0x5B */
+	0xF0, 0x90, 0xF0, 0x91, 0xF0, 0x92, 0xF0, 0x93, /* 0x5C-0x5F */
+	0xF0, 0x94, 0xF0, 0x95, 0xF0, 0x96, 0xE2, 0xBB, /* 0x60-0x63 */
+	0xF0, 0x97, 0xBC, 0xA2, 0xF0, 0x98, 0xE2, 0xBC, /* 0x64-0x67 */
+	0xE2, 0xBD, 0xE2, 0xBE, 0xE2, 0xBF, 0xE2, 0xC0, /* 0x68-0x6B */
+	0xE2, 0xC1, 0xB7, 0xB9, 0xD2, 0xFB, 0xBD, 0xA4, /* 0x6C-0x6F */
+	0xCA, 0xCE, 0xB1, 0xA5, 0xCB, 0xC7, 0xF0, 0x99, /* 0x70-0x73 */
+	0xE2, 0xC2, 0xB6, 0xFC, 0xC8, 0xC4, 0xE2, 0xC3, /* 0x74-0x77 */
+	0xF0, 0x9A, 0xF0, 0x9B, 0xBD, 0xC8, 0xF0, 0x9C, /* 0x78-0x7B */
+	0xB1, 0xFD, 0xE2, 0xC4, 0xF0, 0x9D, 0xB6, 0xF6, /* 0x7C-0x7F */
+	
+	0xE2, 0xC5, 0xC4, 0xD9, 0xF0, 0x9E, 0xF0, 0x9F, /* 0x80-0x83 */
+	0xE2, 0xC6, 0xCF, 0xDA, 0xB9, 0xDD, 0xE2, 0xC7, /* 0x84-0x87 */
+	0xC0, 0xA1, 0xF0, 0xA0, 0xE2, 0xC8, 0xB2, 0xF6, /* 0x88-0x8B */
+	0xF1, 0x40, 0xE2, 0xC9, 0xF1, 0x41, 0xC1, 0xF3, /* 0x8C-0x8F */
+	0xE2, 0xCA, 0xE2, 0xCB, 0xC2, 0xF8, 0xE2, 0xCC, /* 0x90-0x93 */
+	0xE2, 0xCD, 0xE2, 0xCE, 0xCA, 0xD7, 0xD8, 0xB8, /* 0x94-0x97 */
+	0xD9, 0xE5, 0xCF, 0xE3, 0xF1, 0x42, 0xF1, 0x43, /* 0x98-0x9B */
+	0xF1, 0x44, 0xF1, 0x45, 0xF1, 0x46, 0xF1, 0x47, /* 0x9C-0x9F */
+	0xF1, 0x48, 0xF1, 0x49, 0xF1, 0x4A, 0xF1, 0x4B, /* 0xA0-0xA3 */
+	0xF1, 0x4C, 0xF0, 0xA5, 0xF1, 0x4D, 0xF1, 0x4E, /* 0xA4-0xA7 */
+	0xDC, 0xB0, 0xF1, 0x4F, 0xF1, 0x50, 0xF1, 0x51, /* 0xA8-0xAB */
+	0xF1, 0x52, 0xF1, 0x53, 0xF1, 0x54, 0xF1, 0x55, /* 0xAC-0xAF */
+	0xF1, 0x56, 0xF1, 0x57, 0xF1, 0x58, 0xF1, 0x59, /* 0xB0-0xB3 */
+	0xF1, 0x5A, 0xF1, 0x5B, 0xF1, 0x5C, 0xF1, 0x5D, /* 0xB4-0xB7 */
+	0xF1, 0x5E, 0xF1, 0x5F, 0xF1, 0x60, 0xF1, 0x61, /* 0xB8-0xBB */
+	0xF1, 0x62, 0xF1, 0x63, 0xF1, 0x64, 0xF1, 0x65, /* 0xBC-0xBF */
+	0xF1, 0x66, 0xF1, 0x67, 0xF1, 0x68, 0xF1, 0x69, /* 0xC0-0xC3 */
+	0xF1, 0x6A, 0xF1, 0x6B, 0xF1, 0x6C, 0xF1, 0x6D, /* 0xC4-0xC7 */
+	0xF1, 0x6E, 0xF1, 0x6F, 0xF1, 0x70, 0xF1, 0x71, /* 0xC8-0xCB */
+	0xF1, 0x72, 0xF1, 0x73, 0xF1, 0x74, 0xF1, 0x75, /* 0xCC-0xCF */
+	0xF1, 0x76, 0xF1, 0x77, 0xF1, 0x78, 0xF1, 0x79, /* 0xD0-0xD3 */
+	0xF1, 0x7A, 0xF1, 0x7B, 0xF1, 0x7C, 0xF1, 0x7D, /* 0xD4-0xD7 */
+	0xF1, 0x7E, 0xF1, 0x80, 0xF1, 0x81, 0xF1, 0x82, /* 0xD8-0xDB */
+	0xF1, 0x83, 0xF1, 0x84, 0xF1, 0x85, 0xF1, 0x86, /* 0xDC-0xDF */
+	0xF1, 0x87, 0xF1, 0x88, 0xF1, 0x89, 0xF1, 0x8A, /* 0xE0-0xE3 */
+	0xF1, 0x8B, 0xF1, 0x8C, 0xF1, 0x8D, 0xF1, 0x8E, /* 0xE4-0xE7 */
+	0xF1, 0x8F, 0xF1, 0x90, 0xF1, 0x91, 0xF1, 0x92, /* 0xE8-0xEB */
+	0xF1, 0x93, 0xF1, 0x94, 0xF1, 0x95, 0xF1, 0x96, /* 0xEC-0xEF */
+	0xF1, 0x97, 0xF1, 0x98, 0xF1, 0x99, 0xF1, 0x9A, /* 0xF0-0xF3 */
+	0xF1, 0x9B, 0xF1, 0x9C, 0xF1, 0x9D, 0xF1, 0x9E, /* 0xF4-0xF7 */
+	0xF1, 0x9F, 0xF1, 0xA0, 0xF2, 0x40, 0xF2, 0x41, /* 0xF8-0xFB */
+	0xF2, 0x42, 0xF2, 0x43, 0xF2, 0x44, 0xF2, 0x45, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9A[512] = {
+	0xF2, 0x46, 0xF2, 0x47, 0xF2, 0x48, 0xF2, 0x49, /* 0x00-0x03 */
+	0xF2, 0x4A, 0xF2, 0x4B, 0xF2, 0x4C, 0xF2, 0x4D, /* 0x04-0x07 */
+	0xF2, 0x4E, 0xF2, 0x4F, 0xF2, 0x50, 0xF2, 0x51, /* 0x08-0x0B */
+	0xF2, 0x52, 0xF2, 0x53, 0xF2, 0x54, 0xF2, 0x55, /* 0x0C-0x0F */
+	0xF2, 0x56, 0xF2, 0x57, 0xF2, 0x58, 0xF2, 0x59, /* 0x10-0x13 */
+	0xF2, 0x5A, 0xF2, 0x5B, 0xF2, 0x5C, 0xF2, 0x5D, /* 0x14-0x17 */
+	0xF2, 0x5E, 0xF2, 0x5F, 0xF2, 0x60, 0xF2, 0x61, /* 0x18-0x1B */
+	0xF2, 0x62, 0xF2, 0x63, 0xF2, 0x64, 0xF2, 0x65, /* 0x1C-0x1F */
+	0xF2, 0x66, 0xF2, 0x67, 0xF2, 0x68, 0xF2, 0x69, /* 0x20-0x23 */
+	0xF2, 0x6A, 0xF2, 0x6B, 0xF2, 0x6C, 0xF2, 0x6D, /* 0x24-0x27 */
+	0xF2, 0x6E, 0xF2, 0x6F, 0xF2, 0x70, 0xF2, 0x71, /* 0x28-0x2B */
+	0xF2, 0x72, 0xF2, 0x73, 0xF2, 0x74, 0xF2, 0x75, /* 0x2C-0x2F */
+	0xF2, 0x76, 0xF2, 0x77, 0xF2, 0x78, 0xF2, 0x79, /* 0x30-0x33 */
+	0xF2, 0x7A, 0xF2, 0x7B, 0xF2, 0x7C, 0xF2, 0x7D, /* 0x34-0x37 */
+	0xF2, 0x7E, 0xF2, 0x80, 0xF2, 0x81, 0xF2, 0x82, /* 0x38-0x3B */
+	0xF2, 0x83, 0xF2, 0x84, 0xF2, 0x85, 0xF2, 0x86, /* 0x3C-0x3F */
+	0xF2, 0x87, 0xF2, 0x88, 0xF2, 0x89, 0xF2, 0x8A, /* 0x40-0x43 */
+	0xF2, 0x8B, 0xF2, 0x8C, 0xF2, 0x8D, 0xF2, 0x8E, /* 0x44-0x47 */
+	0xF2, 0x8F, 0xF2, 0x90, 0xF2, 0x91, 0xF2, 0x92, /* 0x48-0x4B */
+	0xF2, 0x93, 0xF2, 0x94, 0xF2, 0x95, 0xF2, 0x96, /* 0x4C-0x4F */
+	0xF2, 0x97, 0xF2, 0x98, 0xF2, 0x99, 0xF2, 0x9A, /* 0x50-0x53 */
+	0xF2, 0x9B, 0xF2, 0x9C, 0xF2, 0x9D, 0xF2, 0x9E, /* 0x54-0x57 */
+	0xF2, 0x9F, 0xF2, 0xA0, 0xF3, 0x40, 0xF3, 0x41, /* 0x58-0x5B */
+	0xF3, 0x42, 0xF3, 0x43, 0xF3, 0x44, 0xF3, 0x45, /* 0x5C-0x5F */
+	0xF3, 0x46, 0xF3, 0x47, 0xF3, 0x48, 0xF3, 0x49, /* 0x60-0x63 */
+	0xF3, 0x4A, 0xF3, 0x4B, 0xF3, 0x4C, 0xF3, 0x4D, /* 0x64-0x67 */
+	0xF3, 0x4E, 0xF3, 0x4F, 0xF3, 0x50, 0xF3, 0x51, /* 0x68-0x6B */
+	0xC2, 0xED, 0xD4, 0xA6, 0xCD, 0xD4, 0xD1, 0xB1, /* 0x6C-0x6F */
+	0xB3, 0xDB, 0xC7, 0xFD, 0xF3, 0x52, 0xB2, 0xB5, /* 0x70-0x73 */
+	0xC2, 0xBF, 0xE6, 0xE0, 0xCA, 0xBB, 0xE6, 0xE1, /* 0x74-0x77 */
+	0xE6, 0xE2, 0xBE, 0xD4, 0xE6, 0xE3, 0xD7, 0xA4, /* 0x78-0x7B */
+	0xCD, 0xD5, 0xE6, 0xE5, 0xBC, 0xDD, 0xE6, 0xE4, /* 0x7C-0x7F */
+	
+	0xE6, 0xE6, 0xE6, 0xE7, 0xC2, 0xEE, 0xF3, 0x53, /* 0x80-0x83 */
+	0xBD, 0xBE, 0xE6, 0xE8, 0xC2, 0xE6, 0xBA, 0xA7, /* 0x84-0x87 */
+	0xE6, 0xE9, 0xF3, 0x54, 0xE6, 0xEA, 0xB3, 0xD2, /* 0x88-0x8B */
+	0xD1, 0xE9, 0xF3, 0x55, 0xF3, 0x56, 0xBF, 0xA5, /* 0x8C-0x8F */
+	0xE6, 0xEB, 0xC6, 0xEF, 0xE6, 0xEC, 0xE6, 0xED, /* 0x90-0x93 */
+	0xF3, 0x57, 0xF3, 0x58, 0xE6, 0xEE, 0xC6, 0xAD, /* 0x94-0x97 */
+	0xE6, 0xEF, 0xF3, 0x59, 0xC9, 0xA7, 0xE6, 0xF0, /* 0x98-0x9B */
+	0xE6, 0xF1, 0xE6, 0xF2, 0xE5, 0xB9, 0xE6, 0xF3, /* 0x9C-0x9F */
+	0xE6, 0xF4, 0xC2, 0xE2, 0xE6, 0xF5, 0xE6, 0xF6, /* 0xA0-0xA3 */
+	0xD6, 0xE8, 0xE6, 0xF7, 0xF3, 0x5A, 0xE6, 0xF8, /* 0xA4-0xA7 */
+	0xB9, 0xC7, 0xF3, 0x5B, 0xF3, 0x5C, 0xF3, 0x5D, /* 0xA8-0xAB */
+	0xF3, 0x5E, 0xF3, 0x5F, 0xF3, 0x60, 0xF3, 0x61, /* 0xAC-0xAF */
+	0xF7, 0xBB, 0xF7, 0xBA, 0xF3, 0x62, 0xF3, 0x63, /* 0xB0-0xB3 */
+	0xF3, 0x64, 0xF3, 0x65, 0xF7, 0xBE, 0xF7, 0xBC, /* 0xB4-0xB7 */
+	0xBA, 0xA1, 0xF3, 0x66, 0xF7, 0xBF, 0xF3, 0x67, /* 0xB8-0xBB */
+	0xF7, 0xC0, 0xF3, 0x68, 0xF3, 0x69, 0xF3, 0x6A, /* 0xBC-0xBF */
+	0xF7, 0xC2, 0xF7, 0xC1, 0xF7, 0xC4, 0xF3, 0x6B, /* 0xC0-0xC3 */
+	0xF3, 0x6C, 0xF7, 0xC3, 0xF3, 0x6D, 0xF3, 0x6E, /* 0xC4-0xC7 */
+	0xF3, 0x6F, 0xF3, 0x70, 0xF3, 0x71, 0xF7, 0xC5, /* 0xC8-0xCB */
+	0xF7, 0xC6, 0xF3, 0x72, 0xF3, 0x73, 0xF3, 0x74, /* 0xCC-0xCF */
+	0xF3, 0x75, 0xF7, 0xC7, 0xF3, 0x76, 0xCB, 0xE8, /* 0xD0-0xD3 */
+	0xF3, 0x77, 0xF3, 0x78, 0xF3, 0x79, 0xF3, 0x7A, /* 0xD4-0xD7 */
+	0xB8, 0xDF, 0xF3, 0x7B, 0xF3, 0x7C, 0xF3, 0x7D, /* 0xD8-0xDB */
+	0xF3, 0x7E, 0xF3, 0x80, 0xF3, 0x81, 0xF7, 0xD4, /* 0xDC-0xDF */
+	0xF3, 0x82, 0xF7, 0xD5, 0xF3, 0x83, 0xF3, 0x84, /* 0xE0-0xE3 */
+	0xF3, 0x85, 0xF3, 0x86, 0xF7, 0xD6, 0xF3, 0x87, /* 0xE4-0xE7 */
+	0xF3, 0x88, 0xF3, 0x89, 0xF3, 0x8A, 0xF7, 0xD8, /* 0xE8-0xEB */
+	0xF3, 0x8B, 0xF7, 0xDA, 0xF3, 0x8C, 0xF7, 0xD7, /* 0xEC-0xEF */
+	0xF3, 0x8D, 0xF3, 0x8E, 0xF3, 0x8F, 0xF3, 0x90, /* 0xF0-0xF3 */
+	0xF3, 0x91, 0xF3, 0x92, 0xF3, 0x93, 0xF3, 0x94, /* 0xF4-0xF7 */
+	0xF3, 0x95, 0xF7, 0xDB, 0xF3, 0x96, 0xF7, 0xD9, /* 0xF8-0xFB */
+	0xF3, 0x97, 0xF3, 0x98, 0xF3, 0x99, 0xF3, 0x9A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9B[512] = {
+	0xF3, 0x9B, 0xF3, 0x9C, 0xF3, 0x9D, 0xD7, 0xD7, /* 0x00-0x03 */
+	0xF3, 0x9E, 0xF3, 0x9F, 0xF3, 0xA0, 0xF4, 0x40, /* 0x04-0x07 */
+	0xF7, 0xDC, 0xF4, 0x41, 0xF4, 0x42, 0xF4, 0x43, /* 0x08-0x0B */
+	0xF4, 0x44, 0xF4, 0x45, 0xF4, 0x46, 0xF7, 0xDD, /* 0x0C-0x0F */
+	0xF4, 0x47, 0xF4, 0x48, 0xF4, 0x49, 0xF7, 0xDE, /* 0x10-0x13 */
+	0xF4, 0x4A, 0xF4, 0x4B, 0xF4, 0x4C, 0xF4, 0x4D, /* 0x14-0x17 */
+	0xF4, 0x4E, 0xF4, 0x4F, 0xF4, 0x50, 0xF4, 0x51, /* 0x18-0x1B */
+	0xF4, 0x52, 0xF4, 0x53, 0xF4, 0x54, 0xF7, 0xDF, /* 0x1C-0x1F */
+	0xF4, 0x55, 0xF4, 0x56, 0xF4, 0x57, 0xF7, 0xE0, /* 0x20-0x23 */
+	0xF4, 0x58, 0xF4, 0x59, 0xF4, 0x5A, 0xF4, 0x5B, /* 0x24-0x27 */
+	0xF4, 0x5C, 0xF4, 0x5D, 0xF4, 0x5E, 0xF4, 0x5F, /* 0x28-0x2B */
+	0xF4, 0x60, 0xF4, 0x61, 0xF4, 0x62, 0xDB, 0xCB, /* 0x2C-0x2F */
+	0xF4, 0x63, 0xF4, 0x64, 0xD8, 0xAA, 0xF4, 0x65, /* 0x30-0x33 */
+	0xF4, 0x66, 0xF4, 0x67, 0xF4, 0x68, 0xF4, 0x69, /* 0x34-0x37 */
+	0xF4, 0x6A, 0xF4, 0x6B, 0xF4, 0x6C, 0xE5, 0xF7, /* 0x38-0x3B */
+	0xB9, 0xED, 0xF4, 0x6D, 0xF4, 0x6E, 0xF4, 0x6F, /* 0x3C-0x3F */
+	0xF4, 0x70, 0xBF, 0xFD, 0xBB, 0xEA, 0xF7, 0xC9, /* 0x40-0x43 */
+	0xC6, 0xC7, 0xF7, 0xC8, 0xF4, 0x71, 0xF7, 0xCA, /* 0x44-0x47 */
+	0xF7, 0xCC, 0xF7, 0xCB, 0xF4, 0x72, 0xF4, 0x73, /* 0x48-0x4B */
+	0xF4, 0x74, 0xF7, 0xCD, 0xF4, 0x75, 0xCE, 0xBA, /* 0x4C-0x4F */
+	0xF4, 0x76, 0xF7, 0xCE, 0xF4, 0x77, 0xF4, 0x78, /* 0x50-0x53 */
+	0xC4, 0xA7, 0xF4, 0x79, 0xF4, 0x7A, 0xF4, 0x7B, /* 0x54-0x57 */
+	0xF4, 0x7C, 0xF4, 0x7D, 0xF4, 0x7E, 0xF4, 0x80, /* 0x58-0x5B */
+	0xF4, 0x81, 0xF4, 0x82, 0xF4, 0x83, 0xF4, 0x84, /* 0x5C-0x5F */
+	0xF4, 0x85, 0xF4, 0x86, 0xF4, 0x87, 0xF4, 0x88, /* 0x60-0x63 */
+	0xF4, 0x89, 0xF4, 0x8A, 0xF4, 0x8B, 0xF4, 0x8C, /* 0x64-0x67 */
+	0xF4, 0x8D, 0xF4, 0x8E, 0xF4, 0x8F, 0xF4, 0x90, /* 0x68-0x6B */
+	0xF4, 0x91, 0xF4, 0x92, 0xF4, 0x93, 0xF4, 0x94, /* 0x6C-0x6F */
+	0xF4, 0x95, 0xF4, 0x96, 0xF4, 0x97, 0xF4, 0x98, /* 0x70-0x73 */
+	0xF4, 0x99, 0xF4, 0x9A, 0xF4, 0x9B, 0xF4, 0x9C, /* 0x74-0x77 */
+	0xF4, 0x9D, 0xF4, 0x9E, 0xF4, 0x9F, 0xF4, 0xA0, /* 0x78-0x7B */
+	0xF5, 0x40, 0xF5, 0x41, 0xF5, 0x42, 0xF5, 0x43, /* 0x7C-0x7F */
+	
+	0xF5, 0x44, 0xF5, 0x45, 0xF5, 0x46, 0xF5, 0x47, /* 0x80-0x83 */
+	0xF5, 0x48, 0xF5, 0x49, 0xF5, 0x4A, 0xF5, 0x4B, /* 0x84-0x87 */
+	0xF5, 0x4C, 0xF5, 0x4D, 0xF5, 0x4E, 0xF5, 0x4F, /* 0x88-0x8B */
+	0xF5, 0x50, 0xF5, 0x51, 0xF5, 0x52, 0xF5, 0x53, /* 0x8C-0x8F */
+	0xF5, 0x54, 0xF5, 0x55, 0xF5, 0x56, 0xF5, 0x57, /* 0x90-0x93 */
+	0xF5, 0x58, 0xF5, 0x59, 0xF5, 0x5A, 0xF5, 0x5B, /* 0x94-0x97 */
+	0xF5, 0x5C, 0xF5, 0x5D, 0xF5, 0x5E, 0xF5, 0x5F, /* 0x98-0x9B */
+	0xF5, 0x60, 0xF5, 0x61, 0xF5, 0x62, 0xF5, 0x63, /* 0x9C-0x9F */
+	0xF5, 0x64, 0xF5, 0x65, 0xF5, 0x66, 0xF5, 0x67, /* 0xA0-0xA3 */
+	0xF5, 0x68, 0xF5, 0x69, 0xF5, 0x6A, 0xF5, 0x6B, /* 0xA4-0xA7 */
+	0xF5, 0x6C, 0xF5, 0x6D, 0xF5, 0x6E, 0xF5, 0x6F, /* 0xA8-0xAB */
+	0xF5, 0x70, 0xF5, 0x71, 0xF5, 0x72, 0xF5, 0x73, /* 0xAC-0xAF */
+	0xF5, 0x74, 0xF5, 0x75, 0xF5, 0x76, 0xF5, 0x77, /* 0xB0-0xB3 */
+	0xF5, 0x78, 0xF5, 0x79, 0xF5, 0x7A, 0xF5, 0x7B, /* 0xB4-0xB7 */
+	0xF5, 0x7C, 0xF5, 0x7D, 0xF5, 0x7E, 0xF5, 0x80, /* 0xB8-0xBB */
+	0xF5, 0x81, 0xF5, 0x82, 0xF5, 0x83, 0xF5, 0x84, /* 0xBC-0xBF */
+	0xF5, 0x85, 0xF5, 0x86, 0xF5, 0x87, 0xF5, 0x88, /* 0xC0-0xC3 */
+	0xF5, 0x89, 0xF5, 0x8A, 0xF5, 0x8B, 0xF5, 0x8C, /* 0xC4-0xC7 */
+	0xF5, 0x8D, 0xF5, 0x8E, 0xF5, 0x8F, 0xF5, 0x90, /* 0xC8-0xCB */
+	0xF5, 0x91, 0xF5, 0x92, 0xF5, 0x93, 0xF5, 0x94, /* 0xCC-0xCF */
+	0xF5, 0x95, 0xF5, 0x96, 0xF5, 0x97, 0xF5, 0x98, /* 0xD0-0xD3 */
+	0xF5, 0x99, 0xF5, 0x9A, 0xF5, 0x9B, 0xF5, 0x9C, /* 0xD4-0xD7 */
+	0xF5, 0x9D, 0xF5, 0x9E, 0xF5, 0x9F, 0xF5, 0xA0, /* 0xD8-0xDB */
+	0xF6, 0x40, 0xF6, 0x41, 0xF6, 0x42, 0xF6, 0x43, /* 0xDC-0xDF */
+	0xF6, 0x44, 0xF6, 0x45, 0xF6, 0x46, 0xF6, 0x47, /* 0xE0-0xE3 */
+	0xF6, 0x48, 0xF6, 0x49, 0xF6, 0x4A, 0xF6, 0x4B, /* 0xE4-0xE7 */
+	0xF6, 0x4C, 0xF6, 0x4D, 0xF6, 0x4E, 0xF6, 0x4F, /* 0xE8-0xEB */
+	0xF6, 0x50, 0xF6, 0x51, 0xF6, 0x52, 0xF6, 0x53, /* 0xEC-0xEF */
+	0xF6, 0x54, 0xF6, 0x55, 0xF6, 0x56, 0xF6, 0x57, /* 0xF0-0xF3 */
+	0xF6, 0x58, 0xF6, 0x59, 0xF6, 0x5A, 0xF6, 0x5B, /* 0xF4-0xF7 */
+	0xF6, 0x5C, 0xF6, 0x5D, 0xF6, 0x5E, 0xF6, 0x5F, /* 0xF8-0xFB */
+	0xF6, 0x60, 0xF6, 0x61, 0xF6, 0x62, 0xF6, 0x63, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9C[512] = {
+	0xF6, 0x64, 0xF6, 0x65, 0xF6, 0x66, 0xF6, 0x67, /* 0x00-0x03 */
+	0xF6, 0x68, 0xF6, 0x69, 0xF6, 0x6A, 0xF6, 0x6B, /* 0x04-0x07 */
+	0xF6, 0x6C, 0xF6, 0x6D, 0xF6, 0x6E, 0xF6, 0x6F, /* 0x08-0x0B */
+	0xF6, 0x70, 0xF6, 0x71, 0xF6, 0x72, 0xF6, 0x73, /* 0x0C-0x0F */
+	0xF6, 0x74, 0xF6, 0x75, 0xF6, 0x76, 0xF6, 0x77, /* 0x10-0x13 */
+	0xF6, 0x78, 0xF6, 0x79, 0xF6, 0x7A, 0xF6, 0x7B, /* 0x14-0x17 */
+	0xF6, 0x7C, 0xF6, 0x7D, 0xF6, 0x7E, 0xF6, 0x80, /* 0x18-0x1B */
+	0xF6, 0x81, 0xF6, 0x82, 0xF6, 0x83, 0xF6, 0x84, /* 0x1C-0x1F */
+	0xF6, 0x85, 0xF6, 0x86, 0xF6, 0x87, 0xF6, 0x88, /* 0x20-0x23 */
+	0xF6, 0x89, 0xF6, 0x8A, 0xF6, 0x8B, 0xF6, 0x8C, /* 0x24-0x27 */
+	0xF6, 0x8D, 0xF6, 0x8E, 0xF6, 0x8F, 0xF6, 0x90, /* 0x28-0x2B */
+	0xF6, 0x91, 0xF6, 0x92, 0xF6, 0x93, 0xF6, 0x94, /* 0x2C-0x2F */
+	0xF6, 0x95, 0xF6, 0x96, 0xF6, 0x97, 0xF6, 0x98, /* 0x30-0x33 */
+	0xF6, 0x99, 0xF6, 0x9A, 0xF6, 0x9B, 0xF6, 0x9C, /* 0x34-0x37 */
+	0xF6, 0x9D, 0xF6, 0x9E, 0xF6, 0x9F, 0xF6, 0xA0, /* 0x38-0x3B */
+	0xF7, 0x40, 0xF7, 0x41, 0xF7, 0x42, 0xF7, 0x43, /* 0x3C-0x3F */
+	0xF7, 0x44, 0xF7, 0x45, 0xF7, 0x46, 0xF7, 0x47, /* 0x40-0x43 */
+	0xF7, 0x48, 0xF7, 0x49, 0xF7, 0x4A, 0xF7, 0x4B, /* 0x44-0x47 */
+	0xF7, 0x4C, 0xF7, 0x4D, 0xF7, 0x4E, 0xF7, 0x4F, /* 0x48-0x4B */
+	0xF7, 0x50, 0xF7, 0x51, 0xF7, 0x52, 0xF7, 0x53, /* 0x4C-0x4F */
+	0xF7, 0x54, 0xF7, 0x55, 0xF7, 0x56, 0xF7, 0x57, /* 0x50-0x53 */
+	0xF7, 0x58, 0xF7, 0x59, 0xF7, 0x5A, 0xF7, 0x5B, /* 0x54-0x57 */
+	0xF7, 0x5C, 0xF7, 0x5D, 0xF7, 0x5E, 0xF7, 0x5F, /* 0x58-0x5B */
+	0xF7, 0x60, 0xF7, 0x61, 0xF7, 0x62, 0xF7, 0x63, /* 0x5C-0x5F */
+	0xF7, 0x64, 0xF7, 0x65, 0xF7, 0x66, 0xF7, 0x67, /* 0x60-0x63 */
+	0xF7, 0x68, 0xF7, 0x69, 0xF7, 0x6A, 0xF7, 0x6B, /* 0x64-0x67 */
+	0xF7, 0x6C, 0xF7, 0x6D, 0xF7, 0x6E, 0xF7, 0x6F, /* 0x68-0x6B */
+	0xF7, 0x70, 0xF7, 0x71, 0xF7, 0x72, 0xF7, 0x73, /* 0x6C-0x6F */
+	0xF7, 0x74, 0xF7, 0x75, 0xF7, 0x76, 0xF7, 0x77, /* 0x70-0x73 */
+	0xF7, 0x78, 0xF7, 0x79, 0xF7, 0x7A, 0xF7, 0x7B, /* 0x74-0x77 */
+	0xF7, 0x7C, 0xF7, 0x7D, 0xF7, 0x7E, 0xF7, 0x80, /* 0x78-0x7B */
+	0xD3, 0xE3, 0xF7, 0x81, 0xF7, 0x82, 0xF6, 0xCF, /* 0x7C-0x7F */
+	
+	0xF7, 0x83, 0xC2, 0xB3, 0xF6, 0xD0, 0xF7, 0x84, /* 0x80-0x83 */
+	0xF7, 0x85, 0xF6, 0xD1, 0xF6, 0xD2, 0xF6, 0xD3, /* 0x84-0x87 */
+	0xF6, 0xD4, 0xF7, 0x86, 0xF7, 0x87, 0xF6, 0xD6, /* 0x88-0x8B */
+	0xF7, 0x88, 0xB1, 0xAB, 0xF6, 0xD7, 0xF7, 0x89, /* 0x8C-0x8F */
+	0xF6, 0xD8, 0xF6, 0xD9, 0xF6, 0xDA, 0xF7, 0x8A, /* 0x90-0x93 */
+	0xF6, 0xDB, 0xF6, 0xDC, 0xF7, 0x8B, 0xF7, 0x8C, /* 0x94-0x97 */
+	0xF7, 0x8D, 0xF7, 0x8E, 0xF6, 0xDD, 0xF6, 0xDE, /* 0x98-0x9B */
+	0xCF, 0xCA, 0xF7, 0x8F, 0xF6, 0xDF, 0xF6, 0xE0, /* 0x9C-0x9F */
+	0xF6, 0xE1, 0xF6, 0xE2, 0xF6, 0xE3, 0xF6, 0xE4, /* 0xA0-0xA3 */
+	0xC0, 0xF0, 0xF6, 0xE5, 0xF6, 0xE6, 0xF6, 0xE7, /* 0xA4-0xA7 */
+	0xF6, 0xE8, 0xF6, 0xE9, 0xF7, 0x90, 0xF6, 0xEA, /* 0xA8-0xAB */
+	0xF7, 0x91, 0xF6, 0xEB, 0xF6, 0xEC, 0xF7, 0x92, /* 0xAC-0xAF */
+	0xF6, 0xED, 0xF6, 0xEE, 0xF6, 0xEF, 0xF6, 0xF0, /* 0xB0-0xB3 */
+	0xF6, 0xF1, 0xF6, 0xF2, 0xF6, 0xF3, 0xF6, 0xF4, /* 0xB4-0xB7 */
+	0xBE, 0xA8, 0xF7, 0x93, 0xF6, 0xF5, 0xF6, 0xF6, /* 0xB8-0xBB */
+	0xF6, 0xF7, 0xF6, 0xF8, 0xF7, 0x94, 0xF7, 0x95, /* 0xBC-0xBF */
+	0xF7, 0x96, 0xF7, 0x97, 0xF7, 0x98, 0xC8, 0xFA, /* 0xC0-0xC3 */
+	0xF6, 0xF9, 0xF6, 0xFA, 0xF6, 0xFB, 0xF6, 0xFC, /* 0xC4-0xC7 */
+	0xF7, 0x99, 0xF7, 0x9A, 0xF6, 0xFD, 0xF6, 0xFE, /* 0xC8-0xCB */
+	0xF7, 0xA1, 0xF7, 0xA2, 0xF7, 0xA3, 0xF7, 0xA4, /* 0xCC-0xCF */
+	0xF7, 0xA5, 0xF7, 0x9B, 0xF7, 0x9C, 0xF7, 0xA6, /* 0xD0-0xD3 */
+	0xF7, 0xA7, 0xF7, 0xA8, 0xB1, 0xEE, 0xF7, 0xA9, /* 0xD4-0xD7 */
+	0xF7, 0xAA, 0xF7, 0xAB, 0xF7, 0x9D, 0xF7, 0x9E, /* 0xD8-0xDB */
+	0xF7, 0xAC, 0xF7, 0xAD, 0xC1, 0xDB, 0xF7, 0xAE, /* 0xDC-0xDF */
+	0xF7, 0x9F, 0xF7, 0xA0, 0xF7, 0xAF, 0xF8, 0x40, /* 0xE0-0xE3 */
+	0xF8, 0x41, 0xF8, 0x42, 0xF8, 0x43, 0xF8, 0x44, /* 0xE4-0xE7 */
+	0xF8, 0x45, 0xF8, 0x46, 0xF8, 0x47, 0xF8, 0x48, /* 0xE8-0xEB */
+	0xF8, 0x49, 0xF8, 0x4A, 0xF8, 0x4B, 0xF8, 0x4C, /* 0xEC-0xEF */
+	0xF8, 0x4D, 0xF8, 0x4E, 0xF8, 0x4F, 0xF8, 0x50, /* 0xF0-0xF3 */
+	0xF8, 0x51, 0xF8, 0x52, 0xF8, 0x53, 0xF8, 0x54, /* 0xF4-0xF7 */
+	0xF8, 0x55, 0xF8, 0x56, 0xF8, 0x57, 0xF8, 0x58, /* 0xF8-0xFB */
+	0xF8, 0x59, 0xF8, 0x5A, 0xF8, 0x5B, 0xF8, 0x5C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9D[512] = {
+	0xF8, 0x5D, 0xF8, 0x5E, 0xF8, 0x5F, 0xF8, 0x60, /* 0x00-0x03 */
+	0xF8, 0x61, 0xF8, 0x62, 0xF8, 0x63, 0xF8, 0x64, /* 0x04-0x07 */
+	0xF8, 0x65, 0xF8, 0x66, 0xF8, 0x67, 0xF8, 0x68, /* 0x08-0x0B */
+	0xF8, 0x69, 0xF8, 0x6A, 0xF8, 0x6B, 0xF8, 0x6C, /* 0x0C-0x0F */
+	0xF8, 0x6D, 0xF8, 0x6E, 0xF8, 0x6F, 0xF8, 0x70, /* 0x10-0x13 */
+	0xF8, 0x71, 0xF8, 0x72, 0xF8, 0x73, 0xF8, 0x74, /* 0x14-0x17 */
+	0xF8, 0x75, 0xF8, 0x76, 0xF8, 0x77, 0xF8, 0x78, /* 0x18-0x1B */
+	0xF8, 0x79, 0xF8, 0x7A, 0xF8, 0x7B, 0xF8, 0x7C, /* 0x1C-0x1F */
+	0xF8, 0x7D, 0xF8, 0x7E, 0xF8, 0x80, 0xF8, 0x81, /* 0x20-0x23 */
+	0xF8, 0x82, 0xF8, 0x83, 0xF8, 0x84, 0xF8, 0x85, /* 0x24-0x27 */
+	0xF8, 0x86, 0xF8, 0x87, 0xF8, 0x88, 0xF8, 0x89, /* 0x28-0x2B */
+	0xF8, 0x8A, 0xF8, 0x8B, 0xF8, 0x8C, 0xF8, 0x8D, /* 0x2C-0x2F */
+	0xF8, 0x8E, 0xF8, 0x8F, 0xF8, 0x90, 0xF8, 0x91, /* 0x30-0x33 */
+	0xF8, 0x92, 0xF8, 0x93, 0xF8, 0x94, 0xF8, 0x95, /* 0x34-0x37 */
+	0xF8, 0x96, 0xF8, 0x97, 0xF8, 0x98, 0xF8, 0x99, /* 0x38-0x3B */
+	0xF8, 0x9A, 0xF8, 0x9B, 0xF8, 0x9C, 0xF8, 0x9D, /* 0x3C-0x3F */
+	0xF8, 0x9E, 0xF8, 0x9F, 0xF8, 0xA0, 0xF9, 0x40, /* 0x40-0x43 */
+	0xF9, 0x41, 0xF9, 0x42, 0xF9, 0x43, 0xF9, 0x44, /* 0x44-0x47 */
+	0xF9, 0x45, 0xF9, 0x46, 0xF9, 0x47, 0xF9, 0x48, /* 0x48-0x4B */
+	0xF9, 0x49, 0xF9, 0x4A, 0xF9, 0x4B, 0xF9, 0x4C, /* 0x4C-0x4F */
+	0xF9, 0x4D, 0xF9, 0x4E, 0xF9, 0x4F, 0xF9, 0x50, /* 0x50-0x53 */
+	0xF9, 0x51, 0xF9, 0x52, 0xF9, 0x53, 0xF9, 0x54, /* 0x54-0x57 */
+	0xF9, 0x55, 0xF9, 0x56, 0xF9, 0x57, 0xF9, 0x58, /* 0x58-0x5B */
+	0xF9, 0x59, 0xF9, 0x5A, 0xF9, 0x5B, 0xF9, 0x5C, /* 0x5C-0x5F */
+	0xF9, 0x5D, 0xF9, 0x5E, 0xF9, 0x5F, 0xF9, 0x60, /* 0x60-0x63 */
+	0xF9, 0x61, 0xF9, 0x62, 0xF9, 0x63, 0xF9, 0x64, /* 0x64-0x67 */
+	0xF9, 0x65, 0xF9, 0x66, 0xF9, 0x67, 0xF9, 0x68, /* 0x68-0x6B */
+	0xF9, 0x69, 0xF9, 0x6A, 0xF9, 0x6B, 0xF9, 0x6C, /* 0x6C-0x6F */
+	0xF9, 0x6D, 0xF9, 0x6E, 0xF9, 0x6F, 0xF9, 0x70, /* 0x70-0x73 */
+	0xF9, 0x71, 0xF9, 0x72, 0xF9, 0x73, 0xF9, 0x74, /* 0x74-0x77 */
+	0xF9, 0x75, 0xF9, 0x76, 0xF9, 0x77, 0xF9, 0x78, /* 0x78-0x7B */
+	0xF9, 0x79, 0xF9, 0x7A, 0xF9, 0x7B, 0xF9, 0x7C, /* 0x7C-0x7F */
+	
+	0xF9, 0x7D, 0xF9, 0x7E, 0xF9, 0x80, 0xF9, 0x81, /* 0x80-0x83 */
+	0xF9, 0x82, 0xF9, 0x83, 0xF9, 0x84, 0xF9, 0x85, /* 0x84-0x87 */
+	0xF9, 0x86, 0xF9, 0x87, 0xF9, 0x88, 0xF9, 0x89, /* 0x88-0x8B */
+	0xF9, 0x8A, 0xF9, 0x8B, 0xF9, 0x8C, 0xF9, 0x8D, /* 0x8C-0x8F */
+	0xF9, 0x8E, 0xF9, 0x8F, 0xF9, 0x90, 0xF9, 0x91, /* 0x90-0x93 */
+	0xF9, 0x92, 0xF9, 0x93, 0xF9, 0x94, 0xF9, 0x95, /* 0x94-0x97 */
+	0xF9, 0x96, 0xF9, 0x97, 0xF9, 0x98, 0xF9, 0x99, /* 0x98-0x9B */
+	0xF9, 0x9A, 0xF9, 0x9B, 0xF9, 0x9C, 0xF9, 0x9D, /* 0x9C-0x9F */
+	0xF9, 0x9E, 0xF9, 0x9F, 0xF9, 0xA0, 0xFA, 0x40, /* 0xA0-0xA3 */
+	0xFA, 0x41, 0xFA, 0x42, 0xFA, 0x43, 0xFA, 0x44, /* 0xA4-0xA7 */
+	0xFA, 0x45, 0xFA, 0x46, 0xFA, 0x47, 0xFA, 0x48, /* 0xA8-0xAB */
+	0xFA, 0x49, 0xFA, 0x4A, 0xFA, 0x4B, 0xFA, 0x4C, /* 0xAC-0xAF */
+	0xFA, 0x4D, 0xFA, 0x4E, 0xFA, 0x4F, 0xFA, 0x50, /* 0xB0-0xB3 */
+	0xFA, 0x51, 0xFA, 0x52, 0xFA, 0x53, 0xFA, 0x54, /* 0xB4-0xB7 */
+	0xFA, 0x55, 0xFA, 0x56, 0xFA, 0x57, 0xFA, 0x58, /* 0xB8-0xBB */
+	0xFA, 0x59, 0xFA, 0x5A, 0xFA, 0x5B, 0xFA, 0x5C, /* 0xBC-0xBF */
+	0xFA, 0x5D, 0xFA, 0x5E, 0xFA, 0x5F, 0xFA, 0x60, /* 0xC0-0xC3 */
+	0xFA, 0x61, 0xFA, 0x62, 0xFA, 0x63, 0xFA, 0x64, /* 0xC4-0xC7 */
+	0xFA, 0x65, 0xFA, 0x66, 0xFA, 0x67, 0xFA, 0x68, /* 0xC8-0xCB */
+	0xFA, 0x69, 0xFA, 0x6A, 0xFA, 0x6B, 0xFA, 0x6C, /* 0xCC-0xCF */
+	0xFA, 0x6D, 0xFA, 0x6E, 0xFA, 0x6F, 0xFA, 0x70, /* 0xD0-0xD3 */
+	0xFA, 0x71, 0xFA, 0x72, 0xFA, 0x73, 0xFA, 0x74, /* 0xD4-0xD7 */
+	0xFA, 0x75, 0xFA, 0x76, 0xFA, 0x77, 0xFA, 0x78, /* 0xD8-0xDB */
+	0xFA, 0x79, 0xFA, 0x7A, 0xFA, 0x7B, 0xFA, 0x7C, /* 0xDC-0xDF */
+	0xFA, 0x7D, 0xFA, 0x7E, 0xFA, 0x80, 0xFA, 0x81, /* 0xE0-0xE3 */
+	0xFA, 0x82, 0xFA, 0x83, 0xFA, 0x84, 0xFA, 0x85, /* 0xE4-0xE7 */
+	0xFA, 0x86, 0xFA, 0x87, 0xFA, 0x88, 0xFA, 0x89, /* 0xE8-0xEB */
+	0xFA, 0x8A, 0xFA, 0x8B, 0xFA, 0x8C, 0xFA, 0x8D, /* 0xEC-0xEF */
+	0xFA, 0x8E, 0xFA, 0x8F, 0xFA, 0x90, 0xFA, 0x91, /* 0xF0-0xF3 */
+	0xFA, 0x92, 0xFA, 0x93, 0xFA, 0x94, 0xFA, 0x95, /* 0xF4-0xF7 */
+	0xFA, 0x96, 0xFA, 0x97, 0xFA, 0x98, 0xFA, 0x99, /* 0xF8-0xFB */
+	0xFA, 0x9A, 0xFA, 0x9B, 0xFA, 0x9C, 0xFA, 0x9D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9E[512] = {
+	0xFA, 0x9E, 0xFA, 0x9F, 0xFA, 0xA0, 0xFB, 0x40, /* 0x00-0x03 */
+	0xFB, 0x41, 0xFB, 0x42, 0xFB, 0x43, 0xFB, 0x44, /* 0x04-0x07 */
+	0xFB, 0x45, 0xFB, 0x46, 0xFB, 0x47, 0xFB, 0x48, /* 0x08-0x0B */
+	0xFB, 0x49, 0xFB, 0x4A, 0xFB, 0x4B, 0xFB, 0x4C, /* 0x0C-0x0F */
+	0xFB, 0x4D, 0xFB, 0x4E, 0xFB, 0x4F, 0xFB, 0x50, /* 0x10-0x13 */
+	0xFB, 0x51, 0xFB, 0x52, 0xFB, 0x53, 0xFB, 0x54, /* 0x14-0x17 */
+	0xFB, 0x55, 0xFB, 0x56, 0xFB, 0x57, 0xFB, 0x58, /* 0x18-0x1B */
+	0xFB, 0x59, 0xFB, 0x5A, 0xFB, 0x5B, 0xC4, 0xF1, /* 0x1C-0x1F */
+	0xF0, 0xAF, 0xBC, 0xA6, 0xF0, 0xB0, 0xC3, 0xF9, /* 0x20-0x23 */
+	0xFB, 0x5C, 0xC5, 0xB8, 0xD1, 0xBB, 0xFB, 0x5D, /* 0x24-0x27 */
+	0xF0, 0xB1, 0xF0, 0xB2, 0xF0, 0xB3, 0xF0, 0xB4, /* 0x28-0x2B */
+	0xF0, 0xB5, 0xD1, 0xBC, 0xFB, 0x5E, 0xD1, 0xEC, /* 0x2C-0x2F */
+	0xFB, 0x5F, 0xF0, 0xB7, 0xF0, 0xB6, 0xD4, 0xA7, /* 0x30-0x33 */
+	0xFB, 0x60, 0xCD, 0xD2, 0xF0, 0xB8, 0xF0, 0xBA, /* 0x34-0x37 */
+	0xF0, 0xB9, 0xF0, 0xBB, 0xF0, 0xBC, 0xFB, 0x61, /* 0x38-0x3B */
+	0xFB, 0x62, 0xB8, 0xEB, 0xF0, 0xBD, 0xBA, 0xE8, /* 0x3C-0x3F */
+	0xFB, 0x63, 0xF0, 0xBE, 0xF0, 0xBF, 0xBE, 0xE9, /* 0x40-0x43 */
+	0xF0, 0xC0, 0xB6, 0xEC, 0xF0, 0xC1, 0xF0, 0xC2, /* 0x44-0x47 */
+	0xF0, 0xC3, 0xF0, 0xC4, 0xC8, 0xB5, 0xF0, 0xC5, /* 0x48-0x4B */
+	0xF0, 0xC6, 0xFB, 0x64, 0xF0, 0xC7, 0xC5, 0xF4, /* 0x4C-0x4F */
+	0xFB, 0x65, 0xF0, 0xC8, 0xFB, 0x66, 0xFB, 0x67, /* 0x50-0x53 */
+	0xFB, 0x68, 0xF0, 0xC9, 0xFB, 0x69, 0xF0, 0xCA, /* 0x54-0x57 */
+	0xF7, 0xBD, 0xFB, 0x6A, 0xF0, 0xCB, 0xF0, 0xCC, /* 0x58-0x5B */
+	0xF0, 0xCD, 0xFB, 0x6B, 0xF0, 0xCE, 0xFB, 0x6C, /* 0x5C-0x5F */
+	0xFB, 0x6D, 0xFB, 0x6E, 0xFB, 0x6F, 0xF0, 0xCF, /* 0x60-0x63 */
+	0xBA, 0xD7, 0xFB, 0x70, 0xF0, 0xD0, 0xF0, 0xD1, /* 0x64-0x67 */
+	0xF0, 0xD2, 0xF0, 0xD3, 0xF0, 0xD4, 0xF0, 0xD5, /* 0x68-0x6B */
+	0xF0, 0xD6, 0xF0, 0xD8, 0xFB, 0x71, 0xFB, 0x72, /* 0x6C-0x6F */
+	0xD3, 0xA5, 0xF0, 0xD7, 0xFB, 0x73, 0xF0, 0xD9, /* 0x70-0x73 */
+	0xFB, 0x74, 0xFB, 0x75, 0xFB, 0x76, 0xFB, 0x77, /* 0x74-0x77 */
+	0xFB, 0x78, 0xFB, 0x79, 0xFB, 0x7A, 0xFB, 0x7B, /* 0x78-0x7B */
+	0xFB, 0x7C, 0xFB, 0x7D, 0xF5, 0xBA, 0xC2, 0xB9, /* 0x7C-0x7F */
+	
+	0xFB, 0x7E, 0xFB, 0x80, 0xF7, 0xE4, 0xFB, 0x81, /* 0x80-0x83 */
+	0xFB, 0x82, 0xFB, 0x83, 0xFB, 0x84, 0xF7, 0xE5, /* 0x84-0x87 */
+	0xF7, 0xE6, 0xFB, 0x85, 0xFB, 0x86, 0xF7, 0xE7, /* 0x88-0x8B */
+	0xFB, 0x87, 0xFB, 0x88, 0xFB, 0x89, 0xFB, 0x8A, /* 0x8C-0x8F */
+	0xFB, 0x8B, 0xFB, 0x8C, 0xF7, 0xE8, 0xC2, 0xB4, /* 0x90-0x93 */
+	0xFB, 0x8D, 0xFB, 0x8E, 0xFB, 0x8F, 0xFB, 0x90, /* 0x94-0x97 */
+	0xFB, 0x91, 0xFB, 0x92, 0xFB, 0x93, 0xFB, 0x94, /* 0x98-0x9B */
+	0xFB, 0x95, 0xF7, 0xEA, 0xFB, 0x96, 0xF7, 0xEB, /* 0x9C-0x9F */
+	0xFB, 0x97, 0xFB, 0x98, 0xFB, 0x99, 0xFB, 0x9A, /* 0xA0-0xA3 */
+	0xFB, 0x9B, 0xFB, 0x9C, 0xC2, 0xF3, 0xFB, 0x9D, /* 0xA4-0xA7 */
+	0xFB, 0x9E, 0xFB, 0x9F, 0xFB, 0xA0, 0xFC, 0x40, /* 0xA8-0xAB */
+	0xFC, 0x41, 0xFC, 0x42, 0xFC, 0x43, 0xFC, 0x44, /* 0xAC-0xAF */
+	0xFC, 0x45, 0xFC, 0x46, 0xFC, 0x47, 0xFC, 0x48, /* 0xB0-0xB3 */
+	0xF4, 0xF0, 0xFC, 0x49, 0xFC, 0x4A, 0xFC, 0x4B, /* 0xB4-0xB7 */
+	0xF4, 0xEF, 0xFC, 0x4C, 0xFC, 0x4D, 0xC2, 0xE9, /* 0xB8-0xBB */
+	0xFC, 0x4E, 0xF7, 0xE1, 0xF7, 0xE2, 0xFC, 0x4F, /* 0xBC-0xBF */
+	0xFC, 0x50, 0xFC, 0x51, 0xFC, 0x52, 0xFC, 0x53, /* 0xC0-0xC3 */
+	0xBB, 0xC6, 0xFC, 0x54, 0xFC, 0x55, 0xFC, 0x56, /* 0xC4-0xC7 */
+	0xFC, 0x57, 0xD9, 0xE4, 0xFC, 0x58, 0xFC, 0x59, /* 0xC8-0xCB */
+	0xFC, 0x5A, 0xCA, 0xF2, 0xC0, 0xE8, 0xF0, 0xA4, /* 0xCC-0xCF */
+	0xFC, 0x5B, 0xBA, 0xDA, 0xFC, 0x5C, 0xFC, 0x5D, /* 0xD0-0xD3 */
+	0xC7, 0xAD, 0xFC, 0x5E, 0xFC, 0x5F, 0xFC, 0x60, /* 0xD4-0xD7 */
+	0xC4, 0xAC, 0xFC, 0x61, 0xFC, 0x62, 0xF7, 0xEC, /* 0xD8-0xDB */
+	0xF7, 0xED, 0xF7, 0xEE, 0xFC, 0x63, 0xF7, 0xF0, /* 0xDC-0xDF */
+	0xF7, 0xEF, 0xFC, 0x64, 0xF7, 0xF1, 0xFC, 0x65, /* 0xE0-0xE3 */
+	0xFC, 0x66, 0xF7, 0xF4, 0xFC, 0x67, 0xF7, 0xF3, /* 0xE4-0xE7 */
+	0xFC, 0x68, 0xF7, 0xF2, 0xF7, 0xF5, 0xFC, 0x69, /* 0xE8-0xEB */
+	0xFC, 0x6A, 0xFC, 0x6B, 0xFC, 0x6C, 0xF7, 0xF6, /* 0xEC-0xEF */
+	0xFC, 0x6D, 0xFC, 0x6E, 0xFC, 0x6F, 0xFC, 0x70, /* 0xF0-0xF3 */
+	0xFC, 0x71, 0xFC, 0x72, 0xFC, 0x73, 0xFC, 0x74, /* 0xF4-0xF7 */
+	0xFC, 0x75, 0xED, 0xE9, 0xFC, 0x76, 0xED, 0xEA, /* 0xF8-0xFB */
+	0xED, 0xEB, 0xFC, 0x77, 0xF6, 0xBC, 0xFC, 0x78, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9F[512] = {
+	0xFC, 0x79, 0xFC, 0x7A, 0xFC, 0x7B, 0xFC, 0x7C, /* 0x00-0x03 */
+	0xFC, 0x7D, 0xFC, 0x7E, 0xFC, 0x80, 0xFC, 0x81, /* 0x04-0x07 */
+	0xFC, 0x82, 0xFC, 0x83, 0xFC, 0x84, 0xF6, 0xBD, /* 0x08-0x0B */
+	0xFC, 0x85, 0xF6, 0xBE, 0xB6, 0xA6, 0xFC, 0x86, /* 0x0C-0x0F */
+	0xD8, 0xBE, 0xFC, 0x87, 0xFC, 0x88, 0xB9, 0xC4, /* 0x10-0x13 */
+	0xFC, 0x89, 0xFC, 0x8A, 0xFC, 0x8B, 0xD8, 0xBB, /* 0x14-0x17 */
+	0xFC, 0x8C, 0xDC, 0xB1, 0xFC, 0x8D, 0xFC, 0x8E, /* 0x18-0x1B */
+	0xFC, 0x8F, 0xFC, 0x90, 0xFC, 0x91, 0xFC, 0x92, /* 0x1C-0x1F */
+	0xCA, 0xF3, 0xFC, 0x93, 0xF7, 0xF7, 0xFC, 0x94, /* 0x20-0x23 */
+	0xFC, 0x95, 0xFC, 0x96, 0xFC, 0x97, 0xFC, 0x98, /* 0x24-0x27 */
+	0xFC, 0x99, 0xFC, 0x9A, 0xFC, 0x9B, 0xFC, 0x9C, /* 0x28-0x2B */
+	0xF7, 0xF8, 0xFC, 0x9D, 0xFC, 0x9E, 0xF7, 0xF9, /* 0x2C-0x2F */
+	0xFC, 0x9F, 0xFC, 0xA0, 0xFD, 0x40, 0xFD, 0x41, /* 0x30-0x33 */
+	0xFD, 0x42, 0xFD, 0x43, 0xFD, 0x44, 0xF7, 0xFB, /* 0x34-0x37 */
+	0xFD, 0x45, 0xF7, 0xFA, 0xFD, 0x46, 0xB1, 0xC7, /* 0x38-0x3B */
+	0xFD, 0x47, 0xF7, 0xFC, 0xF7, 0xFD, 0xFD, 0x48, /* 0x3C-0x3F */
+	0xFD, 0x49, 0xFD, 0x4A, 0xFD, 0x4B, 0xFD, 0x4C, /* 0x40-0x43 */
+	0xF7, 0xFE, 0xFD, 0x4D, 0xFD, 0x4E, 0xFD, 0x4F, /* 0x44-0x47 */
+	0xFD, 0x50, 0xFD, 0x51, 0xFD, 0x52, 0xFD, 0x53, /* 0x48-0x4B */
+	0xFD, 0x54, 0xFD, 0x55, 0xFD, 0x56, 0xFD, 0x57, /* 0x4C-0x4F */
+	0xC6, 0xEB, 0xEC, 0xB4, 0xFD, 0x58, 0xFD, 0x59, /* 0x50-0x53 */
+	0xFD, 0x5A, 0xFD, 0x5B, 0xFD, 0x5C, 0xFD, 0x5D, /* 0x54-0x57 */
+	0xFD, 0x5E, 0xFD, 0x5F, 0xFD, 0x60, 0xFD, 0x61, /* 0x58-0x5B */
+	0xFD, 0x62, 0xFD, 0x63, 0xFD, 0x64, 0xFD, 0x65, /* 0x5C-0x5F */
+	0xFD, 0x66, 0xFD, 0x67, 0xFD, 0x68, 0xFD, 0x69, /* 0x60-0x63 */
+	0xFD, 0x6A, 0xFD, 0x6B, 0xFD, 0x6C, 0xFD, 0x6D, /* 0x64-0x67 */
+	0xFD, 0x6E, 0xFD, 0x6F, 0xFD, 0x70, 0xFD, 0x71, /* 0x68-0x6B */
+	0xFD, 0x72, 0xFD, 0x73, 0xFD, 0x74, 0xFD, 0x75, /* 0x6C-0x6F */
+	0xFD, 0x76, 0xFD, 0x77, 0xFD, 0x78, 0xFD, 0x79, /* 0x70-0x73 */
+	0xFD, 0x7A, 0xFD, 0x7B, 0xFD, 0x7C, 0xFD, 0x7D, /* 0x74-0x77 */
+	0xFD, 0x7E, 0xFD, 0x80, 0xFD, 0x81, 0xFD, 0x82, /* 0x78-0x7B */
+	0xFD, 0x83, 0xFD, 0x84, 0xFD, 0x85, 0xB3, 0xDD, /* 0x7C-0x7F */
+	
+	0xF6, 0xB3, 0xFD, 0x86, 0xFD, 0x87, 0xF6, 0xB4, /* 0x80-0x83 */
+	0xC1, 0xE4, 0xF6, 0xB5, 0xF6, 0xB6, 0xF6, 0xB7, /* 0x84-0x87 */
+	0xF6, 0xB8, 0xF6, 0xB9, 0xF6, 0xBA, 0xC8, 0xA3, /* 0x88-0x8B */
+	0xF6, 0xBB, 0xFD, 0x88, 0xFD, 0x89, 0xFD, 0x8A, /* 0x8C-0x8F */
+	0xFD, 0x8B, 0xFD, 0x8C, 0xFD, 0x8D, 0xFD, 0x8E, /* 0x90-0x93 */
+	0xFD, 0x8F, 0xFD, 0x90, 0xFD, 0x91, 0xFD, 0x92, /* 0x94-0x97 */
+	0xFD, 0x93, 0xC1, 0xFA, 0xB9, 0xA8, 0xED, 0xE8, /* 0x98-0x9B */
+	0xFD, 0x94, 0xFD, 0x95, 0xFD, 0x96, 0xB9, 0xEA, /* 0x9C-0x9F */
+	0xD9, 0xDF, 0xFD, 0x97, 0xFD, 0x98, 0xFD, 0x99, /* 0xA0-0xA3 */
+	0xFD, 0x9A, 0xFD, 0x9B, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+};
+
+static unsigned char u2c_DC[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+};
+
+static unsigned char u2c_F9[512] = {
+	0xD8, 0x4D, 0xB8, 0xFC, 0xDC, 0x87, 0xD9, 0x5A, /* 0x00-0x03 */
+	0xBB, 0xAC, 0xB4, 0xAE, 0xBE, 0xE4, 0xFD, 0x94, /* 0x04-0x07 */
+	0xFD, 0x94, 0xC6, 0xF5, 0xBD, 0xF0, 0xC0, 0xAE, /* 0x08-0x0B */
+	0xC4, 0xCE, 0x91, 0xD0, 0xB0, 0x5D, 0xC1, 0x5F, /* 0x0C-0x0F */
+	0xCC, 0x7D, 0xC2, 0xDD, 0xC2, 0xE3, 0xDF, 0x89, /* 0x10-0x13 */
+	0x98, 0xB7, 0xC2, 0xE5, 0xC0, 0xD3, 0xE7, 0xF3, /* 0x14-0x17 */
+	0xC2, 0xE4, 0xC0, 0xD2, 0xF1, 0x98, 0x81, 0x79, /* 0x18-0x1B */
+	0xC2, 0xD1, 0x99, 0xDA, 0xA0, 0x80, 0xCC, 0x6D, /* 0x1C-0x1F */
+	0xFB, 0x5B, 0x8D, 0xB9, 0x9E, 0x45, 0xCB, 0x7B, /* 0x20-0x23 */
+	0xD2, 0x68, 0xC0, 0xAD, 0xC5, 0x44, 0xCF, 0x9E, /* 0x24-0x27 */
+	0xC0, 0xC8, 0xC0, 0xCA, 0xC0, 0xCB, 0xC0, 0xC7, /* 0x28-0x2B */
+	0xFD, 0x9C, 0x81, 0xED, 0xC0, 0xE4, 0x84, 0xDA, /* 0x2C-0x2F */
+	0x93, 0xEF, 0x99, 0xA9, 0xA0, 0x74, 0xB1, 0x52, /* 0x30-0x33 */
+	0xC0, 0xCF, 0xCC, 0x4A, 0xCC, 0x94, 0xC2, 0xB7, /* 0x34-0x37 */
+	0xC2, 0xB6, 0xF4, 0x94, 0xFA, 0x98, 0xC2, 0xB5, /* 0x38-0x3B */
+	0xB5, 0x93, 0xBE, 0x47, 0xC7, 0x8A, 0xE4, 0x9B, /* 0x3C-0x3F */
+	0xC2, 0xB9, 0xD5, 0x93, 0x89, 0xC5, 0xC5, 0xAA, /* 0x40-0x43 */
+	0xBB, 0x5C, 0xC3, 0x40, 0xC0, 0xCE, 0xC0, 0xDA, /* 0x44-0x47 */
+	0xD9, 0x54, 0xC0, 0xD7, 0x89, 0xBE, 0x8C, 0xD2, /* 0x48-0x4B */
+	0x98, 0xC7, 0x9C, 0x49, 0xC2, 0xA9, 0xC0, 0xDB, /* 0x4C-0x4F */
+	0xBF, 0x7C, 0xC2, 0xAA, 0xC0, 0xD5, 0xC0, 0xDF, /* 0x50-0x53 */
+	0x84, 0x43, 0xC1, 0xE8, 0xB6, 0xA0, 0xBE, 0x63, /* 0x54-0x57 */
+	0xC1, 0xE2, 0xC1, 0xEA, 0xD7, 0x78, 0x92, 0x82, /* 0x58-0x5B */
+	0x98, 0xB7, 0xD6, 0x5A, 0xB5, 0xA4, 0x8C, 0x8E, /* 0x5C-0x5F */
+	0xC5, 0xAD, 0xC2, 0xCA, 0xAE, 0x90, 0xB1, 0xB1, /* 0x60-0x63 */
+	0xB4, 0x91, 0xB1, 0xE3, 0x8F, 0xCD, 0xB2, 0xBB, /* 0x64-0x67 */
+	0xC3, 0xDA, 0x94, 0xB5, 0xCB, 0xF7, 0x85, 0xA2, /* 0x68-0x6B */
+	0xC8, 0xFB, 0xCA, 0xA1, 0xC8, 0x7E, 0xD5, 0x66, /* 0x6C-0x6F */
+	0x9A, 0xA2, 0xB3, 0xBD, 0xC9, 0xF2, 0xCA, 0xB0, /* 0x70-0x73 */
+	0xC8, 0xF4, 0xC2, 0xD3, 0xC2, 0xD4, 0xC1, 0xC1, /* 0x74-0x77 */
+	0x83, 0xC9, 0xFD, 0x9D, 0xC1, 0xBA, 0xBC, 0x5A, /* 0x78-0x7B */
+	0xC1, 0xBC, 0xD5, 0x8F, 0xC1, 0xBF, 0x84, 0xEE, /* 0x7C-0x7F */
+	
+	0x85, 0xCE, 0xC5, 0xAE, 0x8F, 0x5D, 0xC2, 0xC3, /* 0x80-0x83 */
+	0x9E, 0x56, 0xB5, 0x5A, 0xE9, 0x82, 0xF3, 0x50, /* 0x84-0x87 */
+	0xFB, 0x90, 0xC0, 0xE8, 0xC1, 0xA6, 0x95, 0xD1, /* 0x88-0x8B */
+	0x9A, 0x76, 0xDE, 0x5D, 0xC4, 0xEA, 0x91, 0x7A, /* 0x8C-0x8F */
+	0x91, 0xD9, 0x93, 0xD3, 0x9D, 0x69, 0x9F, 0x92, /* 0x90-0x93 */
+	0xAD, 0x49, 0xFD, 0x9E, 0xBE, 0x9A, 0xC2, 0x93, /* 0x94-0x97 */
+	0xDD, 0x82, 0xC9, 0x8F, 0xDF, 0x42, 0xE5, 0x80, /* 0x98-0x9B */
+	0xC1, 0xD0, 0xC1, 0xD3, 0xD1, 0xCA, 0xC1, 0xD2, /* 0x9C-0x9F */
+	0xC1, 0xD1, 0xD5, 0x66, 0xC1, 0xAE, 0xC4, 0xEE, /* 0xA0-0xA3 */
+	0xC4, 0xED, 0x9A, 0x9A, 0xBA, 0x9F, 0xAB, 0x43, /* 0xA4-0xA7 */
+	0xC1, 0xEE, 0xE0, 0xF2, 0x8C, 0x8E, 0x8E, 0x58, /* 0xA8-0xAB */
+	0xC1, 0xAF, 0xC1, 0xE1, 0xAC, 0x93, 0xC1, 0xE7, /* 0xAC-0xAF */
+	0xF1, 0xF6, 0xE2, 0x8F, 0xC1, 0xE3, 0xEC, 0x60, /* 0xB0-0xB3 */
+	0xEE, 0x49, 0xC0, 0xFD, 0xB6, 0x59, 0xF5, 0xB7, /* 0xB4-0xB7 */
+	0xEB, 0x60, 0x90, 0xBA, 0xC1, 0xCB, 0xC1, 0xC5, /* 0xB8-0xBB */
+	0xE5, 0xBC, 0xC4, 0xF2, 0xC1, 0xCF, 0x98, 0xB7, /* 0xBC-0xBF */
+	0xC1, 0xC7, 0xAF, 0x9F, 0xDE, 0xA4, 0xDF, 0x7C, /* 0xC0-0xC3 */
+	0xFD, 0x88, 0x95, 0x9E, 0xC8, 0xEE, 0x84, 0xA2, /* 0xC4-0xC7 */
+	0x96, 0x83, 0xC1, 0xF8, 0xC1, 0xF7, 0xC1, 0xEF, /* 0xC8-0xCB */
+	0xC1, 0xF0, 0xC1, 0xF4, 0xC1, 0xF2, 0xBC, 0x7E, /* 0xCC-0xCF */
+	0xEE, 0x90, 0xC1, 0xF9, 0xC2, 0xBE, 0xEA, 0x91, /* 0xD0-0xD3 */
+	0x82, 0x90, 0x8D, 0x91, 0x9C, 0x53, 0xDD, 0x86, /* 0xD4-0xD7 */
+	0xC2, 0xC9, 0x90, 0xFC, 0xC0, 0xF5, 0xC2, 0xCA, /* 0xD8-0xDB */
+	0xC2, 0xA1, 0xC0, 0xFB, 0xC0, 0xF4, 0xC2, 0xC4, /* 0xDC-0xDF */
+	0xD2, 0xD7, 0xC0, 0xEE, 0xC0, 0xE6, 0xC4, 0xE0, /* 0xE0-0xE3 */
+	0xC0, 0xED, 0xC1, 0xA1, 0xEE, 0xBE, 0xFD, 0x9F, /* 0xE4-0xE7 */
+	0xD1, 0x65, 0xC0, 0xEF, 0xEB, 0x78, 0xC4, 0xE4, /* 0xE8-0xEB */
+	0xC4, 0xE7, 0xC1, 0xDF, 0x9F, 0xFB, 0xAD, 0x55, /* 0xEC-0xEF */
+	0xCC, 0x41, 0xFD, 0xA0, 0xF7, 0x5B, 0xF7, 0xEB, /* 0xF0-0xF3 */
+	0xC1, 0xD6, 0xC1, 0xDC, 0xC5, 0x52, 0xC1, 0xA2, /* 0xF4-0xF7 */
+	0xF3, 0xD2, 0xC1, 0xA3, 0xA0, 0xEE, 0xD6, 0xCB, /* 0xF8-0xFB */
+	0xD7, 0x52, 0xCA, 0xB2, 0xB2, 0xE8, 0xB4, 0xCC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_FA[512] = {
+	0xC7, 0xD0, 0xB6, 0xC8, 0xCD, 0xD8, 0xCC, 0xC7, /* 0x00-0x03 */
+	0xD5, 0xAC, 0xB6, 0xB4, 0xB1, 0xA9, 0xDD, 0x97, /* 0x04-0x07 */
+	0xD0, 0xD0, 0xBD, 0xB5, 0xD2, 0x8A, 0xC0, 0xAA, /* 0x08-0x0B */
+	0xFE, 0x40, 0xFE, 0x41, 0xFE, 0x42, 0xFE, 0x43, /* 0x0C-0x0F */
+	0x89, 0x56, 0xFE, 0x44, 0xC7, 0xE7, 0xFE, 0x45, /* 0x10-0x13 */
+	0xFE, 0x46, 0x84, 0x44, 0xD8, 0x69, 0xD2, 0xE6, /* 0x14-0x17 */
+	0xFE, 0x47, 0xC9, 0xF1, 0xCF, 0xE9, 0xB8, 0xA3, /* 0x18-0x1B */
+	0xBE, 0xB8, 0xBE, 0xAB, 0xD3, 0xF0, 0xFE, 0x48, /* 0x1C-0x1F */
+	0xFE, 0x49, 0xFE, 0x4A, 0xD6, 0x54, 0xFE, 0x4B, /* 0x20-0x23 */
+	0xFE, 0x4C, 0xD2, 0xDD, 0xB6, 0xBC, 0xFE, 0x4D, /* 0x24-0x27 */
+	0xFE, 0x4E, 0xFE, 0x4F, 0xEF, 0x88, 0xEF, 0x95, /* 0x28-0x2B */
+	0xF0, 0x5E, 0xFA, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+};
+
+static unsigned char u2c_FE[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA9, 0x55, 0xA6, 0xF2, 0x00, 0x00, 0xA6, 0xF4, /* 0x30-0x33 */
+	0xA6, 0xF5, 0xA6, 0xE0, 0xA6, 0xE1, 0xA6, 0xF0, /* 0x34-0x37 */
+	0xA6, 0xF1, 0xA6, 0xE2, 0xA6, 0xE3, 0xA6, 0xEE, /* 0x38-0x3B */
+	0xA6, 0xEF, 0xA6, 0xE6, 0xA6, 0xE7, 0xA6, 0xE4, /* 0x3C-0x3F */
+	0xA6, 0xE5, 0xA6, 0xE8, 0xA6, 0xE9, 0xA6, 0xEA, /* 0x40-0x43 */
+	0xA6, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xA9, 0x68, 0xA9, 0x69, 0xA9, 0x6A, /* 0x48-0x4B */
+	0xA9, 0x6B, 0xA9, 0x6C, 0xA9, 0x6D, 0xA9, 0x6E, /* 0x4C-0x4F */
+	0xA9, 0x6F, 0xA9, 0x70, 0xA9, 0x71, 0x00, 0x00, /* 0x50-0x53 */
+	0xA9, 0x72, 0xA9, 0x73, 0xA9, 0x74, 0xA9, 0x75, /* 0x54-0x57 */
+	0x00, 0x00, 0xA9, 0x76, 0xA9, 0x77, 0xA9, 0x78, /* 0x58-0x5B */
+	0xA9, 0x79, 0xA9, 0x7A, 0xA9, 0x7B, 0xA9, 0x7C, /* 0x5C-0x5F */
+	0xA9, 0x7D, 0xA9, 0x7E, 0xA9, 0x80, 0xA9, 0x81, /* 0x60-0x63 */
+	0xA9, 0x82, 0xA9, 0x83, 0xA9, 0x84, 0x00, 0x00, /* 0x64-0x67 */
+	0xA9, 0x85, 0xA9, 0x86, 0xA9, 0x87, 0xA9, 0x88, /* 0x68-0x6B */
+};
+
+static unsigned char u2c_FF[512] = {
+	0x00, 0x00, 0xA3, 0xA1, 0xA3, 0xA2, 0xA3, 0xA3, /* 0x00-0x03 */
+	0xA1, 0xE7, 0xA3, 0xA5, 0xA3, 0xA6, 0xA3, 0xA7, /* 0x04-0x07 */
+	0xA3, 0xA8, 0xA3, 0xA9, 0xA3, 0xAA, 0xA3, 0xAB, /* 0x08-0x0B */
+	0xA3, 0xAC, 0xA3, 0xAD, 0xA3, 0xAE, 0xA3, 0xAF, /* 0x0C-0x0F */
+	0xA3, 0xB0, 0xA3, 0xB1, 0xA3, 0xB2, 0xA3, 0xB3, /* 0x10-0x13 */
+	0xA3, 0xB4, 0xA3, 0xB5, 0xA3, 0xB6, 0xA3, 0xB7, /* 0x14-0x17 */
+	0xA3, 0xB8, 0xA3, 0xB9, 0xA3, 0xBA, 0xA3, 0xBB, /* 0x18-0x1B */
+	0xA3, 0xBC, 0xA3, 0xBD, 0xA3, 0xBE, 0xA3, 0xBF, /* 0x1C-0x1F */
+	0xA3, 0xC0, 0xA3, 0xC1, 0xA3, 0xC2, 0xA3, 0xC3, /* 0x20-0x23 */
+	0xA3, 0xC4, 0xA3, 0xC5, 0xA3, 0xC6, 0xA3, 0xC7, /* 0x24-0x27 */
+	0xA3, 0xC8, 0xA3, 0xC9, 0xA3, 0xCA, 0xA3, 0xCB, /* 0x28-0x2B */
+	0xA3, 0xCC, 0xA3, 0xCD, 0xA3, 0xCE, 0xA3, 0xCF, /* 0x2C-0x2F */
+	0xA3, 0xD0, 0xA3, 0xD1, 0xA3, 0xD2, 0xA3, 0xD3, /* 0x30-0x33 */
+	0xA3, 0xD4, 0xA3, 0xD5, 0xA3, 0xD6, 0xA3, 0xD7, /* 0x34-0x37 */
+	0xA3, 0xD8, 0xA3, 0xD9, 0xA3, 0xDA, 0xA3, 0xDB, /* 0x38-0x3B */
+	0xA3, 0xDC, 0xA3, 0xDD, 0xA3, 0xDE, 0xA3, 0xDF, /* 0x3C-0x3F */
+	0xA3, 0xE0, 0xA3, 0xE1, 0xA3, 0xE2, 0xA3, 0xE3, /* 0x40-0x43 */
+	0xA3, 0xE4, 0xA3, 0xE5, 0xA3, 0xE6, 0xA3, 0xE7, /* 0x44-0x47 */
+	0xA3, 0xE8, 0xA3, 0xE9, 0xA3, 0xEA, 0xA3, 0xEB, /* 0x48-0x4B */
+	0xA3, 0xEC, 0xA3, 0xED, 0xA3, 0xEE, 0xA3, 0xEF, /* 0x4C-0x4F */
+	0xA3, 0xF0, 0xA3, 0xF1, 0xA3, 0xF2, 0xA3, 0xF3, /* 0x50-0x53 */
+	0xA3, 0xF4, 0xA3, 0xF5, 0xA3, 0xF6, 0xA3, 0xF7, /* 0x54-0x57 */
+	0xA3, 0xF8, 0xA3, 0xF9, 0xA3, 0xFA, 0xA3, 0xFB, /* 0x58-0x5B */
+	0xA3, 0xFC, 0xA3, 0xFD, 0xA1, 0xAB, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA1, 0xE9, 0xA1, 0xEA, 0xA9, 0x56, 0xA3, 0xFE, /* 0xE0-0xE3 */
+	0xA9, 0x57, 0xA3, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	NULL,   u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_20, u2c_21, u2c_22, u2c_23, u2c_24, u2c_25, u2c_26, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_30, u2c_31, u2c_32, u2c_33, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   u2c_4E, u2c_4F, 
+	u2c_50, u2c_51, u2c_52, u2c_53, u2c_54, u2c_55, u2c_56, u2c_57, 
+	u2c_58, u2c_59, u2c_5A, u2c_5B, u2c_5C, u2c_5D, u2c_5E, u2c_5F, 
+	u2c_60, u2c_61, u2c_62, u2c_63, u2c_64, u2c_65, u2c_66, u2c_67, 
+	u2c_68, u2c_69, u2c_6A, u2c_6B, u2c_6C, u2c_6D, u2c_6E, u2c_6F, 
+	u2c_70, u2c_71, u2c_72, u2c_73, u2c_74, u2c_75, u2c_76, u2c_77, 
+	u2c_78, u2c_79, u2c_7A, u2c_7B, u2c_7C, u2c_7D, u2c_7E, u2c_7F, 
+	u2c_80, u2c_81, u2c_82, u2c_83, u2c_84, u2c_85, u2c_86, u2c_87, 
+	u2c_88, u2c_89, u2c_8A, u2c_8B, u2c_8C, u2c_8D, u2c_8E, u2c_8F, 
+	u2c_90, u2c_91, u2c_92, u2c_93, u2c_94, u2c_95, u2c_96, u2c_97, 
+	u2c_98, u2c_99, u2c_9A, u2c_9B, u2c_9C, u2c_9D, u2c_9E, u2c_9F, 
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   u2c_DC, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   u2c_F9, u2c_FA, NULL,   NULL,   NULL,   u2c_FE, u2c_FF, };
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(const wchar_t uni,
+			unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni&0xFF;
+	unsigned char ch = (uni>>8)&0xFF;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset) {
+		if (boundlen <= 1)
+			return -ENAMETOOLONG;
+		out[0] = uni2charset[cl*2];
+		out[1] = uni2charset[cl*2+1];
+		if (out[0] == 0x00 && out[1] == 0x00)
+			return -EINVAL;
+		n = 2;
+	} else if (ch==0 && cl) {
+		out[0] = cl;
+		n = 1;
+	}
+	else
+		return -EINVAL;
+
+	return n;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+			wchar_t *uni)
+{
+	unsigned char ch, cl;
+	wchar_t *charset2uni;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if (boundlen == 1) {
+		*uni = rawstring[0];
+		return 1;
+	}
+
+	ch = rawstring[0];
+	cl = rawstring[1];
+
+	charset2uni = page_charset2uni[ch];
+	if (charset2uni && cl) {
+		*uni = charset2uni[cl];
+		if (*uni == 0x0000)
+			return -EINVAL;
+		n = 2;
+	} else{
+		*uni = ch;
+		n = 1;
+	}
+	return n;
+}
+
+static struct nls_table table = {
+	.charset	= "cp936",
+	.alias		= "gb2312",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp936(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp936(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp936)
+module_exit(exit_nls_cp936)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(gb2312);
diff --git a/fs/nls/nls_cp949.c b/fs/nls/nls_cp949.c
new file mode 100644
index 0000000..4351ae2
--- /dev/null
+++ b/fs/nls/nls_cp949.c
@@ -0,0 +1,13947 @@
+/*
+ * linux/fs/nls_cp949.c
+ *
+ * Charset cp949 translation tables.
+ * This translation table was generated automatically, the
+ * original table can be download from the Microsoft website.
+ * (http://www.microsoft.com/typography/unicode/unicodecp.htm)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t c2u_81[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xAC02,0xAC03,0xAC05,0xAC06,0xAC0B,0xAC0C,0xAC0D,/* 0x40-0x47 */
+	0xAC0E,0xAC0F,0xAC18,0xAC1E,0xAC1F,0xAC21,0xAC22,0xAC23,/* 0x48-0x4F */
+	0xAC25,0xAC26,0xAC27,0xAC28,0xAC29,0xAC2A,0xAC2B,0xAC2E,/* 0x50-0x57 */
+	0xAC32,0xAC33,0xAC34,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xAC35,0xAC36,0xAC37,0xAC3A,0xAC3B,0xAC3D,0xAC3E,/* 0x60-0x67 */
+	0xAC3F,0xAC41,0xAC42,0xAC43,0xAC44,0xAC45,0xAC46,0xAC47,/* 0x68-0x6F */
+	0xAC48,0xAC49,0xAC4A,0xAC4C,0xAC4E,0xAC4F,0xAC50,0xAC51,/* 0x70-0x77 */
+	0xAC52,0xAC53,0xAC55,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xAC56,0xAC57,0xAC59,0xAC5A,0xAC5B,0xAC5D,0xAC5E,/* 0x80-0x87 */
+	0xAC5F,0xAC60,0xAC61,0xAC62,0xAC63,0xAC64,0xAC65,0xAC66,/* 0x88-0x8F */
+	0xAC67,0xAC68,0xAC69,0xAC6A,0xAC6B,0xAC6C,0xAC6D,0xAC6E,/* 0x90-0x97 */
+	0xAC6F,0xAC72,0xAC73,0xAC75,0xAC76,0xAC79,0xAC7B,0xAC7C,/* 0x98-0x9F */
+	0xAC7D,0xAC7E,0xAC7F,0xAC82,0xAC87,0xAC88,0xAC8D,0xAC8E,/* 0xA0-0xA7 */
+	0xAC8F,0xAC91,0xAC92,0xAC93,0xAC95,0xAC96,0xAC97,0xAC98,/* 0xA8-0xAF */
+	0xAC99,0xAC9A,0xAC9B,0xAC9E,0xACA2,0xACA3,0xACA4,0xACA5,/* 0xB0-0xB7 */
+	0xACA6,0xACA7,0xACAB,0xACAD,0xACAE,0xACB1,0xACB2,0xACB3,/* 0xB8-0xBF */
+	0xACB4,0xACB5,0xACB6,0xACB7,0xACBA,0xACBE,0xACBF,0xACC0,/* 0xC0-0xC7 */
+	0xACC2,0xACC3,0xACC5,0xACC6,0xACC7,0xACC9,0xACCA,0xACCB,/* 0xC8-0xCF */
+	0xACCD,0xACCE,0xACCF,0xACD0,0xACD1,0xACD2,0xACD3,0xACD4,/* 0xD0-0xD7 */
+	0xACD6,0xACD8,0xACD9,0xACDA,0xACDB,0xACDC,0xACDD,0xACDE,/* 0xD8-0xDF */
+	0xACDF,0xACE2,0xACE3,0xACE5,0xACE6,0xACE9,0xACEB,0xACED,/* 0xE0-0xE7 */
+	0xACEE,0xACF2,0xACF4,0xACF7,0xACF8,0xACF9,0xACFA,0xACFB,/* 0xE8-0xEF */
+	0xACFE,0xACFF,0xAD01,0xAD02,0xAD03,0xAD05,0xAD07,0xAD08,/* 0xF0-0xF7 */
+	0xAD09,0xAD0A,0xAD0B,0xAD0E,0xAD10,0xAD12,0xAD13,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_82[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xAD14,0xAD15,0xAD16,0xAD17,0xAD19,0xAD1A,0xAD1B,/* 0x40-0x47 */
+	0xAD1D,0xAD1E,0xAD1F,0xAD21,0xAD22,0xAD23,0xAD24,0xAD25,/* 0x48-0x4F */
+	0xAD26,0xAD27,0xAD28,0xAD2A,0xAD2B,0xAD2E,0xAD2F,0xAD30,/* 0x50-0x57 */
+	0xAD31,0xAD32,0xAD33,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xAD36,0xAD37,0xAD39,0xAD3A,0xAD3B,0xAD3D,0xAD3E,/* 0x60-0x67 */
+	0xAD3F,0xAD40,0xAD41,0xAD42,0xAD43,0xAD46,0xAD48,0xAD4A,/* 0x68-0x6F */
+	0xAD4B,0xAD4C,0xAD4D,0xAD4E,0xAD4F,0xAD51,0xAD52,0xAD53,/* 0x70-0x77 */
+	0xAD55,0xAD56,0xAD57,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xAD59,0xAD5A,0xAD5B,0xAD5C,0xAD5D,0xAD5E,0xAD5F,/* 0x80-0x87 */
+	0xAD60,0xAD62,0xAD64,0xAD65,0xAD66,0xAD67,0xAD68,0xAD69,/* 0x88-0x8F */
+	0xAD6A,0xAD6B,0xAD6E,0xAD6F,0xAD71,0xAD72,0xAD77,0xAD78,/* 0x90-0x97 */
+	0xAD79,0xAD7A,0xAD7E,0xAD80,0xAD83,0xAD84,0xAD85,0xAD86,/* 0x98-0x9F */
+	0xAD87,0xAD8A,0xAD8B,0xAD8D,0xAD8E,0xAD8F,0xAD91,0xAD92,/* 0xA0-0xA7 */
+	0xAD93,0xAD94,0xAD95,0xAD96,0xAD97,0xAD98,0xAD99,0xAD9A,/* 0xA8-0xAF */
+	0xAD9B,0xAD9E,0xAD9F,0xADA0,0xADA1,0xADA2,0xADA3,0xADA5,/* 0xB0-0xB7 */
+	0xADA6,0xADA7,0xADA8,0xADA9,0xADAA,0xADAB,0xADAC,0xADAD,/* 0xB8-0xBF */
+	0xADAE,0xADAF,0xADB0,0xADB1,0xADB2,0xADB3,0xADB4,0xADB5,/* 0xC0-0xC7 */
+	0xADB6,0xADB8,0xADB9,0xADBA,0xADBB,0xADBC,0xADBD,0xADBE,/* 0xC8-0xCF */
+	0xADBF,0xADC2,0xADC3,0xADC5,0xADC6,0xADC7,0xADC9,0xADCA,/* 0xD0-0xD7 */
+	0xADCB,0xADCC,0xADCD,0xADCE,0xADCF,0xADD2,0xADD4,0xADD5,/* 0xD8-0xDF */
+	0xADD6,0xADD7,0xADD8,0xADD9,0xADDA,0xADDB,0xADDD,0xADDE,/* 0xE0-0xE7 */
+	0xADDF,0xADE1,0xADE2,0xADE3,0xADE5,0xADE6,0xADE7,0xADE8,/* 0xE8-0xEF */
+	0xADE9,0xADEA,0xADEB,0xADEC,0xADED,0xADEE,0xADEF,0xADF0,/* 0xF0-0xF7 */
+	0xADF1,0xADF2,0xADF3,0xADF4,0xADF5,0xADF6,0xADF7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_83[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xADFA,0xADFB,0xADFD,0xADFE,0xAE02,0xAE03,0xAE04,/* 0x40-0x47 */
+	0xAE05,0xAE06,0xAE07,0xAE0A,0xAE0C,0xAE0E,0xAE0F,0xAE10,/* 0x48-0x4F */
+	0xAE11,0xAE12,0xAE13,0xAE15,0xAE16,0xAE17,0xAE18,0xAE19,/* 0x50-0x57 */
+	0xAE1A,0xAE1B,0xAE1C,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xAE1D,0xAE1E,0xAE1F,0xAE20,0xAE21,0xAE22,0xAE23,/* 0x60-0x67 */
+	0xAE24,0xAE25,0xAE26,0xAE27,0xAE28,0xAE29,0xAE2A,0xAE2B,/* 0x68-0x6F */
+	0xAE2C,0xAE2D,0xAE2E,0xAE2F,0xAE32,0xAE33,0xAE35,0xAE36,/* 0x70-0x77 */
+	0xAE39,0xAE3B,0xAE3C,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xAE3D,0xAE3E,0xAE3F,0xAE42,0xAE44,0xAE47,0xAE48,/* 0x80-0x87 */
+	0xAE49,0xAE4B,0xAE4F,0xAE51,0xAE52,0xAE53,0xAE55,0xAE57,/* 0x88-0x8F */
+	0xAE58,0xAE59,0xAE5A,0xAE5B,0xAE5E,0xAE62,0xAE63,0xAE64,/* 0x90-0x97 */
+	0xAE66,0xAE67,0xAE6A,0xAE6B,0xAE6D,0xAE6E,0xAE6F,0xAE71,/* 0x98-0x9F */
+	0xAE72,0xAE73,0xAE74,0xAE75,0xAE76,0xAE77,0xAE7A,0xAE7E,/* 0xA0-0xA7 */
+	0xAE7F,0xAE80,0xAE81,0xAE82,0xAE83,0xAE86,0xAE87,0xAE88,/* 0xA8-0xAF */
+	0xAE89,0xAE8A,0xAE8B,0xAE8D,0xAE8E,0xAE8F,0xAE90,0xAE91,/* 0xB0-0xB7 */
+	0xAE92,0xAE93,0xAE94,0xAE95,0xAE96,0xAE97,0xAE98,0xAE99,/* 0xB8-0xBF */
+	0xAE9A,0xAE9B,0xAE9C,0xAE9D,0xAE9E,0xAE9F,0xAEA0,0xAEA1,/* 0xC0-0xC7 */
+	0xAEA2,0xAEA3,0xAEA4,0xAEA5,0xAEA6,0xAEA7,0xAEA8,0xAEA9,/* 0xC8-0xCF */
+	0xAEAA,0xAEAB,0xAEAC,0xAEAD,0xAEAE,0xAEAF,0xAEB0,0xAEB1,/* 0xD0-0xD7 */
+	0xAEB2,0xAEB3,0xAEB4,0xAEB5,0xAEB6,0xAEB7,0xAEB8,0xAEB9,/* 0xD8-0xDF */
+	0xAEBA,0xAEBB,0xAEBF,0xAEC1,0xAEC2,0xAEC3,0xAEC5,0xAEC6,/* 0xE0-0xE7 */
+	0xAEC7,0xAEC8,0xAEC9,0xAECA,0xAECB,0xAECE,0xAED2,0xAED3,/* 0xE8-0xEF */
+	0xAED4,0xAED5,0xAED6,0xAED7,0xAEDA,0xAEDB,0xAEDD,0xAEDE,/* 0xF0-0xF7 */
+	0xAEDF,0xAEE0,0xAEE1,0xAEE2,0xAEE3,0xAEE4,0xAEE5,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_84[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xAEE6,0xAEE7,0xAEE9,0xAEEA,0xAEEC,0xAEEE,0xAEEF,/* 0x40-0x47 */
+	0xAEF0,0xAEF1,0xAEF2,0xAEF3,0xAEF5,0xAEF6,0xAEF7,0xAEF9,/* 0x48-0x4F */
+	0xAEFA,0xAEFB,0xAEFD,0xAEFE,0xAEFF,0xAF00,0xAF01,0xAF02,/* 0x50-0x57 */
+	0xAF03,0xAF04,0xAF05,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xAF06,0xAF09,0xAF0A,0xAF0B,0xAF0C,0xAF0E,0xAF0F,/* 0x60-0x67 */
+	0xAF11,0xAF12,0xAF13,0xAF14,0xAF15,0xAF16,0xAF17,0xAF18,/* 0x68-0x6F */
+	0xAF19,0xAF1A,0xAF1B,0xAF1C,0xAF1D,0xAF1E,0xAF1F,0xAF20,/* 0x70-0x77 */
+	0xAF21,0xAF22,0xAF23,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xAF24,0xAF25,0xAF26,0xAF27,0xAF28,0xAF29,0xAF2A,/* 0x80-0x87 */
+	0xAF2B,0xAF2E,0xAF2F,0xAF31,0xAF33,0xAF35,0xAF36,0xAF37,/* 0x88-0x8F */
+	0xAF38,0xAF39,0xAF3A,0xAF3B,0xAF3E,0xAF40,0xAF44,0xAF45,/* 0x90-0x97 */
+	0xAF46,0xAF47,0xAF4A,0xAF4B,0xAF4C,0xAF4D,0xAF4E,0xAF4F,/* 0x98-0x9F */
+	0xAF51,0xAF52,0xAF53,0xAF54,0xAF55,0xAF56,0xAF57,0xAF58,/* 0xA0-0xA7 */
+	0xAF59,0xAF5A,0xAF5B,0xAF5E,0xAF5F,0xAF60,0xAF61,0xAF62,/* 0xA8-0xAF */
+	0xAF63,0xAF66,0xAF67,0xAF68,0xAF69,0xAF6A,0xAF6B,0xAF6C,/* 0xB0-0xB7 */
+	0xAF6D,0xAF6E,0xAF6F,0xAF70,0xAF71,0xAF72,0xAF73,0xAF74,/* 0xB8-0xBF */
+	0xAF75,0xAF76,0xAF77,0xAF78,0xAF7A,0xAF7B,0xAF7C,0xAF7D,/* 0xC0-0xC7 */
+	0xAF7E,0xAF7F,0xAF81,0xAF82,0xAF83,0xAF85,0xAF86,0xAF87,/* 0xC8-0xCF */
+	0xAF89,0xAF8A,0xAF8B,0xAF8C,0xAF8D,0xAF8E,0xAF8F,0xAF92,/* 0xD0-0xD7 */
+	0xAF93,0xAF94,0xAF96,0xAF97,0xAF98,0xAF99,0xAF9A,0xAF9B,/* 0xD8-0xDF */
+	0xAF9D,0xAF9E,0xAF9F,0xAFA0,0xAFA1,0xAFA2,0xAFA3,0xAFA4,/* 0xE0-0xE7 */
+	0xAFA5,0xAFA6,0xAFA7,0xAFA8,0xAFA9,0xAFAA,0xAFAB,0xAFAC,/* 0xE8-0xEF */
+	0xAFAD,0xAFAE,0xAFAF,0xAFB0,0xAFB1,0xAFB2,0xAFB3,0xAFB4,/* 0xF0-0xF7 */
+	0xAFB5,0xAFB6,0xAFB7,0xAFBA,0xAFBB,0xAFBD,0xAFBE,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_85[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xAFBF,0xAFC1,0xAFC2,0xAFC3,0xAFC4,0xAFC5,0xAFC6,/* 0x40-0x47 */
+	0xAFCA,0xAFCC,0xAFCF,0xAFD0,0xAFD1,0xAFD2,0xAFD3,0xAFD5,/* 0x48-0x4F */
+	0xAFD6,0xAFD7,0xAFD8,0xAFD9,0xAFDA,0xAFDB,0xAFDD,0xAFDE,/* 0x50-0x57 */
+	0xAFDF,0xAFE0,0xAFE1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xAFE2,0xAFE3,0xAFE4,0xAFE5,0xAFE6,0xAFE7,0xAFEA,/* 0x60-0x67 */
+	0xAFEB,0xAFEC,0xAFED,0xAFEE,0xAFEF,0xAFF2,0xAFF3,0xAFF5,/* 0x68-0x6F */
+	0xAFF6,0xAFF7,0xAFF9,0xAFFA,0xAFFB,0xAFFC,0xAFFD,0xAFFE,/* 0x70-0x77 */
+	0xAFFF,0xB002,0xB003,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB005,0xB006,0xB007,0xB008,0xB009,0xB00A,0xB00B,/* 0x80-0x87 */
+	0xB00D,0xB00E,0xB00F,0xB011,0xB012,0xB013,0xB015,0xB016,/* 0x88-0x8F */
+	0xB017,0xB018,0xB019,0xB01A,0xB01B,0xB01E,0xB01F,0xB020,/* 0x90-0x97 */
+	0xB021,0xB022,0xB023,0xB024,0xB025,0xB026,0xB027,0xB029,/* 0x98-0x9F */
+	0xB02A,0xB02B,0xB02C,0xB02D,0xB02E,0xB02F,0xB030,0xB031,/* 0xA0-0xA7 */
+	0xB032,0xB033,0xB034,0xB035,0xB036,0xB037,0xB038,0xB039,/* 0xA8-0xAF */
+	0xB03A,0xB03B,0xB03C,0xB03D,0xB03E,0xB03F,0xB040,0xB041,/* 0xB0-0xB7 */
+	0xB042,0xB043,0xB046,0xB047,0xB049,0xB04B,0xB04D,0xB04F,/* 0xB8-0xBF */
+	0xB050,0xB051,0xB052,0xB056,0xB058,0xB05A,0xB05B,0xB05C,/* 0xC0-0xC7 */
+	0xB05E,0xB05F,0xB060,0xB061,0xB062,0xB063,0xB064,0xB065,/* 0xC8-0xCF */
+	0xB066,0xB067,0xB068,0xB069,0xB06A,0xB06B,0xB06C,0xB06D,/* 0xD0-0xD7 */
+	0xB06E,0xB06F,0xB070,0xB071,0xB072,0xB073,0xB074,0xB075,/* 0xD8-0xDF */
+	0xB076,0xB077,0xB078,0xB079,0xB07A,0xB07B,0xB07E,0xB07F,/* 0xE0-0xE7 */
+	0xB081,0xB082,0xB083,0xB085,0xB086,0xB087,0xB088,0xB089,/* 0xE8-0xEF */
+	0xB08A,0xB08B,0xB08E,0xB090,0xB092,0xB093,0xB094,0xB095,/* 0xF0-0xF7 */
+	0xB096,0xB097,0xB09B,0xB09D,0xB09E,0xB0A3,0xB0A4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_86[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB0A5,0xB0A6,0xB0A7,0xB0AA,0xB0B0,0xB0B2,0xB0B6,/* 0x40-0x47 */
+	0xB0B7,0xB0B9,0xB0BA,0xB0BB,0xB0BD,0xB0BE,0xB0BF,0xB0C0,/* 0x48-0x4F */
+	0xB0C1,0xB0C2,0xB0C3,0xB0C6,0xB0CA,0xB0CB,0xB0CC,0xB0CD,/* 0x50-0x57 */
+	0xB0CE,0xB0CF,0xB0D2,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB0D3,0xB0D5,0xB0D6,0xB0D7,0xB0D9,0xB0DA,0xB0DB,/* 0x60-0x67 */
+	0xB0DC,0xB0DD,0xB0DE,0xB0DF,0xB0E1,0xB0E2,0xB0E3,0xB0E4,/* 0x68-0x6F */
+	0xB0E6,0xB0E7,0xB0E8,0xB0E9,0xB0EA,0xB0EB,0xB0EC,0xB0ED,/* 0x70-0x77 */
+	0xB0EE,0xB0EF,0xB0F0,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB0F1,0xB0F2,0xB0F3,0xB0F4,0xB0F5,0xB0F6,0xB0F7,/* 0x80-0x87 */
+	0xB0F8,0xB0F9,0xB0FA,0xB0FB,0xB0FC,0xB0FD,0xB0FE,0xB0FF,/* 0x88-0x8F */
+	0xB100,0xB101,0xB102,0xB103,0xB104,0xB105,0xB106,0xB107,/* 0x90-0x97 */
+	0xB10A,0xB10D,0xB10E,0xB10F,0xB111,0xB114,0xB115,0xB116,/* 0x98-0x9F */
+	0xB117,0xB11A,0xB11E,0xB11F,0xB120,0xB121,0xB122,0xB126,/* 0xA0-0xA7 */
+	0xB127,0xB129,0xB12A,0xB12B,0xB12D,0xB12E,0xB12F,0xB130,/* 0xA8-0xAF */
+	0xB131,0xB132,0xB133,0xB136,0xB13A,0xB13B,0xB13C,0xB13D,/* 0xB0-0xB7 */
+	0xB13E,0xB13F,0xB142,0xB143,0xB145,0xB146,0xB147,0xB149,/* 0xB8-0xBF */
+	0xB14A,0xB14B,0xB14C,0xB14D,0xB14E,0xB14F,0xB152,0xB153,/* 0xC0-0xC7 */
+	0xB156,0xB157,0xB159,0xB15A,0xB15B,0xB15D,0xB15E,0xB15F,/* 0xC8-0xCF */
+	0xB161,0xB162,0xB163,0xB164,0xB165,0xB166,0xB167,0xB168,/* 0xD0-0xD7 */
+	0xB169,0xB16A,0xB16B,0xB16C,0xB16D,0xB16E,0xB16F,0xB170,/* 0xD8-0xDF */
+	0xB171,0xB172,0xB173,0xB174,0xB175,0xB176,0xB177,0xB17A,/* 0xE0-0xE7 */
+	0xB17B,0xB17D,0xB17E,0xB17F,0xB181,0xB183,0xB184,0xB185,/* 0xE8-0xEF */
+	0xB186,0xB187,0xB18A,0xB18C,0xB18E,0xB18F,0xB190,0xB191,/* 0xF0-0xF7 */
+	0xB195,0xB196,0xB197,0xB199,0xB19A,0xB19B,0xB19D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_87[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB19E,0xB19F,0xB1A0,0xB1A1,0xB1A2,0xB1A3,0xB1A4,/* 0x40-0x47 */
+	0xB1A5,0xB1A6,0xB1A7,0xB1A9,0xB1AA,0xB1AB,0xB1AC,0xB1AD,/* 0x48-0x4F */
+	0xB1AE,0xB1AF,0xB1B0,0xB1B1,0xB1B2,0xB1B3,0xB1B4,0xB1B5,/* 0x50-0x57 */
+	0xB1B6,0xB1B7,0xB1B8,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB1B9,0xB1BA,0xB1BB,0xB1BC,0xB1BD,0xB1BE,0xB1BF,/* 0x60-0x67 */
+	0xB1C0,0xB1C1,0xB1C2,0xB1C3,0xB1C4,0xB1C5,0xB1C6,0xB1C7,/* 0x68-0x6F */
+	0xB1C8,0xB1C9,0xB1CA,0xB1CB,0xB1CD,0xB1CE,0xB1CF,0xB1D1,/* 0x70-0x77 */
+	0xB1D2,0xB1D3,0xB1D5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB1D6,0xB1D7,0xB1D8,0xB1D9,0xB1DA,0xB1DB,0xB1DE,/* 0x80-0x87 */
+	0xB1E0,0xB1E1,0xB1E2,0xB1E3,0xB1E4,0xB1E5,0xB1E6,0xB1E7,/* 0x88-0x8F */
+	0xB1EA,0xB1EB,0xB1ED,0xB1EE,0xB1EF,0xB1F1,0xB1F2,0xB1F3,/* 0x90-0x97 */
+	0xB1F4,0xB1F5,0xB1F6,0xB1F7,0xB1F8,0xB1FA,0xB1FC,0xB1FE,/* 0x98-0x9F */
+	0xB1FF,0xB200,0xB201,0xB202,0xB203,0xB206,0xB207,0xB209,/* 0xA0-0xA7 */
+	0xB20A,0xB20D,0xB20E,0xB20F,0xB210,0xB211,0xB212,0xB213,/* 0xA8-0xAF */
+	0xB216,0xB218,0xB21A,0xB21B,0xB21C,0xB21D,0xB21E,0xB21F,/* 0xB0-0xB7 */
+	0xB221,0xB222,0xB223,0xB224,0xB225,0xB226,0xB227,0xB228,/* 0xB8-0xBF */
+	0xB229,0xB22A,0xB22B,0xB22C,0xB22D,0xB22E,0xB22F,0xB230,/* 0xC0-0xC7 */
+	0xB231,0xB232,0xB233,0xB235,0xB236,0xB237,0xB238,0xB239,/* 0xC8-0xCF */
+	0xB23A,0xB23B,0xB23D,0xB23E,0xB23F,0xB240,0xB241,0xB242,/* 0xD0-0xD7 */
+	0xB243,0xB244,0xB245,0xB246,0xB247,0xB248,0xB249,0xB24A,/* 0xD8-0xDF */
+	0xB24B,0xB24C,0xB24D,0xB24E,0xB24F,0xB250,0xB251,0xB252,/* 0xE0-0xE7 */
+	0xB253,0xB254,0xB255,0xB256,0xB257,0xB259,0xB25A,0xB25B,/* 0xE8-0xEF */
+	0xB25D,0xB25E,0xB25F,0xB261,0xB262,0xB263,0xB264,0xB265,/* 0xF0-0xF7 */
+	0xB266,0xB267,0xB26A,0xB26B,0xB26C,0xB26D,0xB26E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_88[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB26F,0xB270,0xB271,0xB272,0xB273,0xB276,0xB277,/* 0x40-0x47 */
+	0xB278,0xB279,0xB27A,0xB27B,0xB27D,0xB27E,0xB27F,0xB280,/* 0x48-0x4F */
+	0xB281,0xB282,0xB283,0xB286,0xB287,0xB288,0xB28A,0xB28B,/* 0x50-0x57 */
+	0xB28C,0xB28D,0xB28E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB28F,0xB292,0xB293,0xB295,0xB296,0xB297,0xB29B,/* 0x60-0x67 */
+	0xB29C,0xB29D,0xB29E,0xB29F,0xB2A2,0xB2A4,0xB2A7,0xB2A8,/* 0x68-0x6F */
+	0xB2A9,0xB2AB,0xB2AD,0xB2AE,0xB2AF,0xB2B1,0xB2B2,0xB2B3,/* 0x70-0x77 */
+	0xB2B5,0xB2B6,0xB2B7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB2B8,0xB2B9,0xB2BA,0xB2BB,0xB2BC,0xB2BD,0xB2BE,/* 0x80-0x87 */
+	0xB2BF,0xB2C0,0xB2C1,0xB2C2,0xB2C3,0xB2C4,0xB2C5,0xB2C6,/* 0x88-0x8F */
+	0xB2C7,0xB2CA,0xB2CB,0xB2CD,0xB2CE,0xB2CF,0xB2D1,0xB2D3,/* 0x90-0x97 */
+	0xB2D4,0xB2D5,0xB2D6,0xB2D7,0xB2DA,0xB2DC,0xB2DE,0xB2DF,/* 0x98-0x9F */
+	0xB2E0,0xB2E1,0xB2E3,0xB2E7,0xB2E9,0xB2EA,0xB2F0,0xB2F1,/* 0xA0-0xA7 */
+	0xB2F2,0xB2F6,0xB2FC,0xB2FD,0xB2FE,0xB302,0xB303,0xB305,/* 0xA8-0xAF */
+	0xB306,0xB307,0xB309,0xB30A,0xB30B,0xB30C,0xB30D,0xB30E,/* 0xB0-0xB7 */
+	0xB30F,0xB312,0xB316,0xB317,0xB318,0xB319,0xB31A,0xB31B,/* 0xB8-0xBF */
+	0xB31D,0xB31E,0xB31F,0xB320,0xB321,0xB322,0xB323,0xB324,/* 0xC0-0xC7 */
+	0xB325,0xB326,0xB327,0xB328,0xB329,0xB32A,0xB32B,0xB32C,/* 0xC8-0xCF */
+	0xB32D,0xB32E,0xB32F,0xB330,0xB331,0xB332,0xB333,0xB334,/* 0xD0-0xD7 */
+	0xB335,0xB336,0xB337,0xB338,0xB339,0xB33A,0xB33B,0xB33C,/* 0xD8-0xDF */
+	0xB33D,0xB33E,0xB33F,0xB340,0xB341,0xB342,0xB343,0xB344,/* 0xE0-0xE7 */
+	0xB345,0xB346,0xB347,0xB348,0xB349,0xB34A,0xB34B,0xB34C,/* 0xE8-0xEF */
+	0xB34D,0xB34E,0xB34F,0xB350,0xB351,0xB352,0xB353,0xB357,/* 0xF0-0xF7 */
+	0xB359,0xB35A,0xB35D,0xB360,0xB361,0xB362,0xB363,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_89[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB366,0xB368,0xB36A,0xB36C,0xB36D,0xB36F,0xB372,/* 0x40-0x47 */
+	0xB373,0xB375,0xB376,0xB377,0xB379,0xB37A,0xB37B,0xB37C,/* 0x48-0x4F */
+	0xB37D,0xB37E,0xB37F,0xB382,0xB386,0xB387,0xB388,0xB389,/* 0x50-0x57 */
+	0xB38A,0xB38B,0xB38D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB38E,0xB38F,0xB391,0xB392,0xB393,0xB395,0xB396,/* 0x60-0x67 */
+	0xB397,0xB398,0xB399,0xB39A,0xB39B,0xB39C,0xB39D,0xB39E,/* 0x68-0x6F */
+	0xB39F,0xB3A2,0xB3A3,0xB3A4,0xB3A5,0xB3A6,0xB3A7,0xB3A9,/* 0x70-0x77 */
+	0xB3AA,0xB3AB,0xB3AD,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB3AE,0xB3AF,0xB3B0,0xB3B1,0xB3B2,0xB3B3,0xB3B4,/* 0x80-0x87 */
+	0xB3B5,0xB3B6,0xB3B7,0xB3B8,0xB3B9,0xB3BA,0xB3BB,0xB3BC,/* 0x88-0x8F */
+	0xB3BD,0xB3BE,0xB3BF,0xB3C0,0xB3C1,0xB3C2,0xB3C3,0xB3C6,/* 0x90-0x97 */
+	0xB3C7,0xB3C9,0xB3CA,0xB3CD,0xB3CF,0xB3D1,0xB3D2,0xB3D3,/* 0x98-0x9F */
+	0xB3D6,0xB3D8,0xB3DA,0xB3DC,0xB3DE,0xB3DF,0xB3E1,0xB3E2,/* 0xA0-0xA7 */
+	0xB3E3,0xB3E5,0xB3E6,0xB3E7,0xB3E9,0xB3EA,0xB3EB,0xB3EC,/* 0xA8-0xAF */
+	0xB3ED,0xB3EE,0xB3EF,0xB3F0,0xB3F1,0xB3F2,0xB3F3,0xB3F4,/* 0xB0-0xB7 */
+	0xB3F5,0xB3F6,0xB3F7,0xB3F8,0xB3F9,0xB3FA,0xB3FB,0xB3FD,/* 0xB8-0xBF */
+	0xB3FE,0xB3FF,0xB400,0xB401,0xB402,0xB403,0xB404,0xB405,/* 0xC0-0xC7 */
+	0xB406,0xB407,0xB408,0xB409,0xB40A,0xB40B,0xB40C,0xB40D,/* 0xC8-0xCF */
+	0xB40E,0xB40F,0xB411,0xB412,0xB413,0xB414,0xB415,0xB416,/* 0xD0-0xD7 */
+	0xB417,0xB419,0xB41A,0xB41B,0xB41D,0xB41E,0xB41F,0xB421,/* 0xD8-0xDF */
+	0xB422,0xB423,0xB424,0xB425,0xB426,0xB427,0xB42A,0xB42C,/* 0xE0-0xE7 */
+	0xB42D,0xB42E,0xB42F,0xB430,0xB431,0xB432,0xB433,0xB435,/* 0xE8-0xEF */
+	0xB436,0xB437,0xB438,0xB439,0xB43A,0xB43B,0xB43C,0xB43D,/* 0xF0-0xF7 */
+	0xB43E,0xB43F,0xB440,0xB441,0xB442,0xB443,0xB444,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB445,0xB446,0xB447,0xB448,0xB449,0xB44A,0xB44B,/* 0x40-0x47 */
+	0xB44C,0xB44D,0xB44E,0xB44F,0xB452,0xB453,0xB455,0xB456,/* 0x48-0x4F */
+	0xB457,0xB459,0xB45A,0xB45B,0xB45C,0xB45D,0xB45E,0xB45F,/* 0x50-0x57 */
+	0xB462,0xB464,0xB466,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB467,0xB468,0xB469,0xB46A,0xB46B,0xB46D,0xB46E,/* 0x60-0x67 */
+	0xB46F,0xB470,0xB471,0xB472,0xB473,0xB474,0xB475,0xB476,/* 0x68-0x6F */
+	0xB477,0xB478,0xB479,0xB47A,0xB47B,0xB47C,0xB47D,0xB47E,/* 0x70-0x77 */
+	0xB47F,0xB481,0xB482,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB483,0xB484,0xB485,0xB486,0xB487,0xB489,0xB48A,/* 0x80-0x87 */
+	0xB48B,0xB48C,0xB48D,0xB48E,0xB48F,0xB490,0xB491,0xB492,/* 0x88-0x8F */
+	0xB493,0xB494,0xB495,0xB496,0xB497,0xB498,0xB499,0xB49A,/* 0x90-0x97 */
+	0xB49B,0xB49C,0xB49E,0xB49F,0xB4A0,0xB4A1,0xB4A2,0xB4A3,/* 0x98-0x9F */
+	0xB4A5,0xB4A6,0xB4A7,0xB4A9,0xB4AA,0xB4AB,0xB4AD,0xB4AE,/* 0xA0-0xA7 */
+	0xB4AF,0xB4B0,0xB4B1,0xB4B2,0xB4B3,0xB4B4,0xB4B6,0xB4B8,/* 0xA8-0xAF */
+	0xB4BA,0xB4BB,0xB4BC,0xB4BD,0xB4BE,0xB4BF,0xB4C1,0xB4C2,/* 0xB0-0xB7 */
+	0xB4C3,0xB4C5,0xB4C6,0xB4C7,0xB4C9,0xB4CA,0xB4CB,0xB4CC,/* 0xB8-0xBF */
+	0xB4CD,0xB4CE,0xB4CF,0xB4D1,0xB4D2,0xB4D3,0xB4D4,0xB4D6,/* 0xC0-0xC7 */
+	0xB4D7,0xB4D8,0xB4D9,0xB4DA,0xB4DB,0xB4DE,0xB4DF,0xB4E1,/* 0xC8-0xCF */
+	0xB4E2,0xB4E5,0xB4E7,0xB4E8,0xB4E9,0xB4EA,0xB4EB,0xB4EE,/* 0xD0-0xD7 */
+	0xB4F0,0xB4F2,0xB4F3,0xB4F4,0xB4F5,0xB4F6,0xB4F7,0xB4F9,/* 0xD8-0xDF */
+	0xB4FA,0xB4FB,0xB4FC,0xB4FD,0xB4FE,0xB4FF,0xB500,0xB501,/* 0xE0-0xE7 */
+	0xB502,0xB503,0xB504,0xB505,0xB506,0xB507,0xB508,0xB509,/* 0xE8-0xEF */
+	0xB50A,0xB50B,0xB50C,0xB50D,0xB50E,0xB50F,0xB510,0xB511,/* 0xF0-0xF7 */
+	0xB512,0xB513,0xB516,0xB517,0xB519,0xB51A,0xB51D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB51E,0xB51F,0xB520,0xB521,0xB522,0xB523,0xB526,/* 0x40-0x47 */
+	0xB52B,0xB52C,0xB52D,0xB52E,0xB52F,0xB532,0xB533,0xB535,/* 0x48-0x4F */
+	0xB536,0xB537,0xB539,0xB53A,0xB53B,0xB53C,0xB53D,0xB53E,/* 0x50-0x57 */
+	0xB53F,0xB542,0xB546,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB547,0xB548,0xB549,0xB54A,0xB54E,0xB54F,0xB551,/* 0x60-0x67 */
+	0xB552,0xB553,0xB555,0xB556,0xB557,0xB558,0xB559,0xB55A,/* 0x68-0x6F */
+	0xB55B,0xB55E,0xB562,0xB563,0xB564,0xB565,0xB566,0xB567,/* 0x70-0x77 */
+	0xB568,0xB569,0xB56A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB56B,0xB56C,0xB56D,0xB56E,0xB56F,0xB570,0xB571,/* 0x80-0x87 */
+	0xB572,0xB573,0xB574,0xB575,0xB576,0xB577,0xB578,0xB579,/* 0x88-0x8F */
+	0xB57A,0xB57B,0xB57C,0xB57D,0xB57E,0xB57F,0xB580,0xB581,/* 0x90-0x97 */
+	0xB582,0xB583,0xB584,0xB585,0xB586,0xB587,0xB588,0xB589,/* 0x98-0x9F */
+	0xB58A,0xB58B,0xB58C,0xB58D,0xB58E,0xB58F,0xB590,0xB591,/* 0xA0-0xA7 */
+	0xB592,0xB593,0xB594,0xB595,0xB596,0xB597,0xB598,0xB599,/* 0xA8-0xAF */
+	0xB59A,0xB59B,0xB59C,0xB59D,0xB59E,0xB59F,0xB5A2,0xB5A3,/* 0xB0-0xB7 */
+	0xB5A5,0xB5A6,0xB5A7,0xB5A9,0xB5AC,0xB5AD,0xB5AE,0xB5AF,/* 0xB8-0xBF */
+	0xB5B2,0xB5B6,0xB5B7,0xB5B8,0xB5B9,0xB5BA,0xB5BE,0xB5BF,/* 0xC0-0xC7 */
+	0xB5C1,0xB5C2,0xB5C3,0xB5C5,0xB5C6,0xB5C7,0xB5C8,0xB5C9,/* 0xC8-0xCF */
+	0xB5CA,0xB5CB,0xB5CE,0xB5D2,0xB5D3,0xB5D4,0xB5D5,0xB5D6,/* 0xD0-0xD7 */
+	0xB5D7,0xB5D9,0xB5DA,0xB5DB,0xB5DC,0xB5DD,0xB5DE,0xB5DF,/* 0xD8-0xDF */
+	0xB5E0,0xB5E1,0xB5E2,0xB5E3,0xB5E4,0xB5E5,0xB5E6,0xB5E7,/* 0xE0-0xE7 */
+	0xB5E8,0xB5E9,0xB5EA,0xB5EB,0xB5ED,0xB5EE,0xB5EF,0xB5F0,/* 0xE8-0xEF */
+	0xB5F1,0xB5F2,0xB5F3,0xB5F4,0xB5F5,0xB5F6,0xB5F7,0xB5F8,/* 0xF0-0xF7 */
+	0xB5F9,0xB5FA,0xB5FB,0xB5FC,0xB5FD,0xB5FE,0xB5FF,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB600,0xB601,0xB602,0xB603,0xB604,0xB605,0xB606,/* 0x40-0x47 */
+	0xB607,0xB608,0xB609,0xB60A,0xB60B,0xB60C,0xB60D,0xB60E,/* 0x48-0x4F */
+	0xB60F,0xB612,0xB613,0xB615,0xB616,0xB617,0xB619,0xB61A,/* 0x50-0x57 */
+	0xB61B,0xB61C,0xB61D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB61E,0xB61F,0xB620,0xB621,0xB622,0xB623,0xB624,/* 0x60-0x67 */
+	0xB626,0xB627,0xB628,0xB629,0xB62A,0xB62B,0xB62D,0xB62E,/* 0x68-0x6F */
+	0xB62F,0xB630,0xB631,0xB632,0xB633,0xB635,0xB636,0xB637,/* 0x70-0x77 */
+	0xB638,0xB639,0xB63A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB63B,0xB63C,0xB63D,0xB63E,0xB63F,0xB640,0xB641,/* 0x80-0x87 */
+	0xB642,0xB643,0xB644,0xB645,0xB646,0xB647,0xB649,0xB64A,/* 0x88-0x8F */
+	0xB64B,0xB64C,0xB64D,0xB64E,0xB64F,0xB650,0xB651,0xB652,/* 0x90-0x97 */
+	0xB653,0xB654,0xB655,0xB656,0xB657,0xB658,0xB659,0xB65A,/* 0x98-0x9F */
+	0xB65B,0xB65C,0xB65D,0xB65E,0xB65F,0xB660,0xB661,0xB662,/* 0xA0-0xA7 */
+	0xB663,0xB665,0xB666,0xB667,0xB669,0xB66A,0xB66B,0xB66C,/* 0xA8-0xAF */
+	0xB66D,0xB66E,0xB66F,0xB670,0xB671,0xB672,0xB673,0xB674,/* 0xB0-0xB7 */
+	0xB675,0xB676,0xB677,0xB678,0xB679,0xB67A,0xB67B,0xB67C,/* 0xB8-0xBF */
+	0xB67D,0xB67E,0xB67F,0xB680,0xB681,0xB682,0xB683,0xB684,/* 0xC0-0xC7 */
+	0xB685,0xB686,0xB687,0xB688,0xB689,0xB68A,0xB68B,0xB68C,/* 0xC8-0xCF */
+	0xB68D,0xB68E,0xB68F,0xB690,0xB691,0xB692,0xB693,0xB694,/* 0xD0-0xD7 */
+	0xB695,0xB696,0xB697,0xB698,0xB699,0xB69A,0xB69B,0xB69E,/* 0xD8-0xDF */
+	0xB69F,0xB6A1,0xB6A2,0xB6A3,0xB6A5,0xB6A6,0xB6A7,0xB6A8,/* 0xE0-0xE7 */
+	0xB6A9,0xB6AA,0xB6AD,0xB6AE,0xB6AF,0xB6B0,0xB6B2,0xB6B3,/* 0xE8-0xEF */
+	0xB6B4,0xB6B5,0xB6B6,0xB6B7,0xB6B8,0xB6B9,0xB6BA,0xB6BB,/* 0xF0-0xF7 */
+	0xB6BC,0xB6BD,0xB6BE,0xB6BF,0xB6C0,0xB6C1,0xB6C2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB6C3,0xB6C4,0xB6C5,0xB6C6,0xB6C7,0xB6C8,0xB6C9,/* 0x40-0x47 */
+	0xB6CA,0xB6CB,0xB6CC,0xB6CD,0xB6CE,0xB6CF,0xB6D0,0xB6D1,/* 0x48-0x4F */
+	0xB6D2,0xB6D3,0xB6D5,0xB6D6,0xB6D7,0xB6D8,0xB6D9,0xB6DA,/* 0x50-0x57 */
+	0xB6DB,0xB6DC,0xB6DD,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB6DE,0xB6DF,0xB6E0,0xB6E1,0xB6E2,0xB6E3,0xB6E4,/* 0x60-0x67 */
+	0xB6E5,0xB6E6,0xB6E7,0xB6E8,0xB6E9,0xB6EA,0xB6EB,0xB6EC,/* 0x68-0x6F */
+	0xB6ED,0xB6EE,0xB6EF,0xB6F1,0xB6F2,0xB6F3,0xB6F5,0xB6F6,/* 0x70-0x77 */
+	0xB6F7,0xB6F9,0xB6FA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB6FB,0xB6FC,0xB6FD,0xB6FE,0xB6FF,0xB702,0xB703,/* 0x80-0x87 */
+	0xB704,0xB706,0xB707,0xB708,0xB709,0xB70A,0xB70B,0xB70C,/* 0x88-0x8F */
+	0xB70D,0xB70E,0xB70F,0xB710,0xB711,0xB712,0xB713,0xB714,/* 0x90-0x97 */
+	0xB715,0xB716,0xB717,0xB718,0xB719,0xB71A,0xB71B,0xB71C,/* 0x98-0x9F */
+	0xB71D,0xB71E,0xB71F,0xB720,0xB721,0xB722,0xB723,0xB724,/* 0xA0-0xA7 */
+	0xB725,0xB726,0xB727,0xB72A,0xB72B,0xB72D,0xB72E,0xB731,/* 0xA8-0xAF */
+	0xB732,0xB733,0xB734,0xB735,0xB736,0xB737,0xB73A,0xB73C,/* 0xB0-0xB7 */
+	0xB73D,0xB73E,0xB73F,0xB740,0xB741,0xB742,0xB743,0xB745,/* 0xB8-0xBF */
+	0xB746,0xB747,0xB749,0xB74A,0xB74B,0xB74D,0xB74E,0xB74F,/* 0xC0-0xC7 */
+	0xB750,0xB751,0xB752,0xB753,0xB756,0xB757,0xB758,0xB759,/* 0xC8-0xCF */
+	0xB75A,0xB75B,0xB75C,0xB75D,0xB75E,0xB75F,0xB761,0xB762,/* 0xD0-0xD7 */
+	0xB763,0xB765,0xB766,0xB767,0xB769,0xB76A,0xB76B,0xB76C,/* 0xD8-0xDF */
+	0xB76D,0xB76E,0xB76F,0xB772,0xB774,0xB776,0xB777,0xB778,/* 0xE0-0xE7 */
+	0xB779,0xB77A,0xB77B,0xB77E,0xB77F,0xB781,0xB782,0xB783,/* 0xE8-0xEF */
+	0xB785,0xB786,0xB787,0xB788,0xB789,0xB78A,0xB78B,0xB78E,/* 0xF0-0xF7 */
+	0xB793,0xB794,0xB795,0xB79A,0xB79B,0xB79D,0xB79E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB79F,0xB7A1,0xB7A2,0xB7A3,0xB7A4,0xB7A5,0xB7A6,/* 0x40-0x47 */
+	0xB7A7,0xB7AA,0xB7AE,0xB7AF,0xB7B0,0xB7B1,0xB7B2,0xB7B3,/* 0x48-0x4F */
+	0xB7B6,0xB7B7,0xB7B9,0xB7BA,0xB7BB,0xB7BC,0xB7BD,0xB7BE,/* 0x50-0x57 */
+	0xB7BF,0xB7C0,0xB7C1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB7C2,0xB7C3,0xB7C4,0xB7C5,0xB7C6,0xB7C8,0xB7CA,/* 0x60-0x67 */
+	0xB7CB,0xB7CC,0xB7CD,0xB7CE,0xB7CF,0xB7D0,0xB7D1,0xB7D2,/* 0x68-0x6F */
+	0xB7D3,0xB7D4,0xB7D5,0xB7D6,0xB7D7,0xB7D8,0xB7D9,0xB7DA,/* 0x70-0x77 */
+	0xB7DB,0xB7DC,0xB7DD,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB7DE,0xB7DF,0xB7E0,0xB7E1,0xB7E2,0xB7E3,0xB7E4,/* 0x80-0x87 */
+	0xB7E5,0xB7E6,0xB7E7,0xB7E8,0xB7E9,0xB7EA,0xB7EB,0xB7EE,/* 0x88-0x8F */
+	0xB7EF,0xB7F1,0xB7F2,0xB7F3,0xB7F5,0xB7F6,0xB7F7,0xB7F8,/* 0x90-0x97 */
+	0xB7F9,0xB7FA,0xB7FB,0xB7FE,0xB802,0xB803,0xB804,0xB805,/* 0x98-0x9F */
+	0xB806,0xB80A,0xB80B,0xB80D,0xB80E,0xB80F,0xB811,0xB812,/* 0xA0-0xA7 */
+	0xB813,0xB814,0xB815,0xB816,0xB817,0xB81A,0xB81C,0xB81E,/* 0xA8-0xAF */
+	0xB81F,0xB820,0xB821,0xB822,0xB823,0xB826,0xB827,0xB829,/* 0xB0-0xB7 */
+	0xB82A,0xB82B,0xB82D,0xB82E,0xB82F,0xB830,0xB831,0xB832,/* 0xB8-0xBF */
+	0xB833,0xB836,0xB83A,0xB83B,0xB83C,0xB83D,0xB83E,0xB83F,/* 0xC0-0xC7 */
+	0xB841,0xB842,0xB843,0xB845,0xB846,0xB847,0xB848,0xB849,/* 0xC8-0xCF */
+	0xB84A,0xB84B,0xB84C,0xB84D,0xB84E,0xB84F,0xB850,0xB852,/* 0xD0-0xD7 */
+	0xB854,0xB855,0xB856,0xB857,0xB858,0xB859,0xB85A,0xB85B,/* 0xD8-0xDF */
+	0xB85E,0xB85F,0xB861,0xB862,0xB863,0xB865,0xB866,0xB867,/* 0xE0-0xE7 */
+	0xB868,0xB869,0xB86A,0xB86B,0xB86E,0xB870,0xB872,0xB873,/* 0xE8-0xEF */
+	0xB874,0xB875,0xB876,0xB877,0xB879,0xB87A,0xB87B,0xB87D,/* 0xF0-0xF7 */
+	0xB87E,0xB87F,0xB880,0xB881,0xB882,0xB883,0xB884,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_8F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB885,0xB886,0xB887,0xB888,0xB889,0xB88A,0xB88B,/* 0x40-0x47 */
+	0xB88C,0xB88E,0xB88F,0xB890,0xB891,0xB892,0xB893,0xB894,/* 0x48-0x4F */
+	0xB895,0xB896,0xB897,0xB898,0xB899,0xB89A,0xB89B,0xB89C,/* 0x50-0x57 */
+	0xB89D,0xB89E,0xB89F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB8A0,0xB8A1,0xB8A2,0xB8A3,0xB8A4,0xB8A5,0xB8A6,/* 0x60-0x67 */
+	0xB8A7,0xB8A9,0xB8AA,0xB8AB,0xB8AC,0xB8AD,0xB8AE,0xB8AF,/* 0x68-0x6F */
+	0xB8B1,0xB8B2,0xB8B3,0xB8B5,0xB8B6,0xB8B7,0xB8B9,0xB8BA,/* 0x70-0x77 */
+	0xB8BB,0xB8BC,0xB8BD,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB8BE,0xB8BF,0xB8C2,0xB8C4,0xB8C6,0xB8C7,0xB8C8,/* 0x80-0x87 */
+	0xB8C9,0xB8CA,0xB8CB,0xB8CD,0xB8CE,0xB8CF,0xB8D1,0xB8D2,/* 0x88-0x8F */
+	0xB8D3,0xB8D5,0xB8D6,0xB8D7,0xB8D8,0xB8D9,0xB8DA,0xB8DB,/* 0x90-0x97 */
+	0xB8DC,0xB8DE,0xB8E0,0xB8E2,0xB8E3,0xB8E4,0xB8E5,0xB8E6,/* 0x98-0x9F */
+	0xB8E7,0xB8EA,0xB8EB,0xB8ED,0xB8EE,0xB8EF,0xB8F1,0xB8F2,/* 0xA0-0xA7 */
+	0xB8F3,0xB8F4,0xB8F5,0xB8F6,0xB8F7,0xB8FA,0xB8FC,0xB8FE,/* 0xA8-0xAF */
+	0xB8FF,0xB900,0xB901,0xB902,0xB903,0xB905,0xB906,0xB907,/* 0xB0-0xB7 */
+	0xB908,0xB909,0xB90A,0xB90B,0xB90C,0xB90D,0xB90E,0xB90F,/* 0xB8-0xBF */
+	0xB910,0xB911,0xB912,0xB913,0xB914,0xB915,0xB916,0xB917,/* 0xC0-0xC7 */
+	0xB919,0xB91A,0xB91B,0xB91C,0xB91D,0xB91E,0xB91F,0xB921,/* 0xC8-0xCF */
+	0xB922,0xB923,0xB924,0xB925,0xB926,0xB927,0xB928,0xB929,/* 0xD0-0xD7 */
+	0xB92A,0xB92B,0xB92C,0xB92D,0xB92E,0xB92F,0xB930,0xB931,/* 0xD8-0xDF */
+	0xB932,0xB933,0xB934,0xB935,0xB936,0xB937,0xB938,0xB939,/* 0xE0-0xE7 */
+	0xB93A,0xB93B,0xB93E,0xB93F,0xB941,0xB942,0xB943,0xB945,/* 0xE8-0xEF */
+	0xB946,0xB947,0xB948,0xB949,0xB94A,0xB94B,0xB94D,0xB94E,/* 0xF0-0xF7 */
+	0xB950,0xB952,0xB953,0xB954,0xB955,0xB956,0xB957,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_90[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xB95A,0xB95B,0xB95D,0xB95E,0xB95F,0xB961,0xB962,/* 0x40-0x47 */
+	0xB963,0xB964,0xB965,0xB966,0xB967,0xB96A,0xB96C,0xB96E,/* 0x48-0x4F */
+	0xB96F,0xB970,0xB971,0xB972,0xB973,0xB976,0xB977,0xB979,/* 0x50-0x57 */
+	0xB97A,0xB97B,0xB97D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xB97E,0xB97F,0xB980,0xB981,0xB982,0xB983,0xB986,/* 0x60-0x67 */
+	0xB988,0xB98B,0xB98C,0xB98F,0xB990,0xB991,0xB992,0xB993,/* 0x68-0x6F */
+	0xB994,0xB995,0xB996,0xB997,0xB998,0xB999,0xB99A,0xB99B,/* 0x70-0x77 */
+	0xB99C,0xB99D,0xB99E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xB99F,0xB9A0,0xB9A1,0xB9A2,0xB9A3,0xB9A4,0xB9A5,/* 0x80-0x87 */
+	0xB9A6,0xB9A7,0xB9A8,0xB9A9,0xB9AA,0xB9AB,0xB9AE,0xB9AF,/* 0x88-0x8F */
+	0xB9B1,0xB9B2,0xB9B3,0xB9B5,0xB9B6,0xB9B7,0xB9B8,0xB9B9,/* 0x90-0x97 */
+	0xB9BA,0xB9BB,0xB9BE,0xB9C0,0xB9C2,0xB9C3,0xB9C4,0xB9C5,/* 0x98-0x9F */
+	0xB9C6,0xB9C7,0xB9CA,0xB9CB,0xB9CD,0xB9D3,0xB9D4,0xB9D5,/* 0xA0-0xA7 */
+	0xB9D6,0xB9D7,0xB9DA,0xB9DC,0xB9DF,0xB9E0,0xB9E2,0xB9E6,/* 0xA8-0xAF */
+	0xB9E7,0xB9E9,0xB9EA,0xB9EB,0xB9ED,0xB9EE,0xB9EF,0xB9F0,/* 0xB0-0xB7 */
+	0xB9F1,0xB9F2,0xB9F3,0xB9F6,0xB9FB,0xB9FC,0xB9FD,0xB9FE,/* 0xB8-0xBF */
+	0xB9FF,0xBA02,0xBA03,0xBA04,0xBA05,0xBA06,0xBA07,0xBA09,/* 0xC0-0xC7 */
+	0xBA0A,0xBA0B,0xBA0C,0xBA0D,0xBA0E,0xBA0F,0xBA10,0xBA11,/* 0xC8-0xCF */
+	0xBA12,0xBA13,0xBA14,0xBA16,0xBA17,0xBA18,0xBA19,0xBA1A,/* 0xD0-0xD7 */
+	0xBA1B,0xBA1C,0xBA1D,0xBA1E,0xBA1F,0xBA20,0xBA21,0xBA22,/* 0xD8-0xDF */
+	0xBA23,0xBA24,0xBA25,0xBA26,0xBA27,0xBA28,0xBA29,0xBA2A,/* 0xE0-0xE7 */
+	0xBA2B,0xBA2C,0xBA2D,0xBA2E,0xBA2F,0xBA30,0xBA31,0xBA32,/* 0xE8-0xEF */
+	0xBA33,0xBA34,0xBA35,0xBA36,0xBA37,0xBA3A,0xBA3B,0xBA3D,/* 0xF0-0xF7 */
+	0xBA3E,0xBA3F,0xBA41,0xBA43,0xBA44,0xBA45,0xBA46,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_91[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBA47,0xBA4A,0xBA4C,0xBA4F,0xBA50,0xBA51,0xBA52,/* 0x40-0x47 */
+	0xBA56,0xBA57,0xBA59,0xBA5A,0xBA5B,0xBA5D,0xBA5E,0xBA5F,/* 0x48-0x4F */
+	0xBA60,0xBA61,0xBA62,0xBA63,0xBA66,0xBA6A,0xBA6B,0xBA6C,/* 0x50-0x57 */
+	0xBA6D,0xBA6E,0xBA6F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBA72,0xBA73,0xBA75,0xBA76,0xBA77,0xBA79,0xBA7A,/* 0x60-0x67 */
+	0xBA7B,0xBA7C,0xBA7D,0xBA7E,0xBA7F,0xBA80,0xBA81,0xBA82,/* 0x68-0x6F */
+	0xBA86,0xBA88,0xBA89,0xBA8A,0xBA8B,0xBA8D,0xBA8E,0xBA8F,/* 0x70-0x77 */
+	0xBA90,0xBA91,0xBA92,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBA93,0xBA94,0xBA95,0xBA96,0xBA97,0xBA98,0xBA99,/* 0x80-0x87 */
+	0xBA9A,0xBA9B,0xBA9C,0xBA9D,0xBA9E,0xBA9F,0xBAA0,0xBAA1,/* 0x88-0x8F */
+	0xBAA2,0xBAA3,0xBAA4,0xBAA5,0xBAA6,0xBAA7,0xBAAA,0xBAAD,/* 0x90-0x97 */
+	0xBAAE,0xBAAF,0xBAB1,0xBAB3,0xBAB4,0xBAB5,0xBAB6,0xBAB7,/* 0x98-0x9F */
+	0xBABA,0xBABC,0xBABE,0xBABF,0xBAC0,0xBAC1,0xBAC2,0xBAC3,/* 0xA0-0xA7 */
+	0xBAC5,0xBAC6,0xBAC7,0xBAC9,0xBACA,0xBACB,0xBACC,0xBACD,/* 0xA8-0xAF */
+	0xBACE,0xBACF,0xBAD0,0xBAD1,0xBAD2,0xBAD3,0xBAD4,0xBAD5,/* 0xB0-0xB7 */
+	0xBAD6,0xBAD7,0xBADA,0xBADB,0xBADC,0xBADD,0xBADE,0xBADF,/* 0xB8-0xBF */
+	0xBAE0,0xBAE1,0xBAE2,0xBAE3,0xBAE4,0xBAE5,0xBAE6,0xBAE7,/* 0xC0-0xC7 */
+	0xBAE8,0xBAE9,0xBAEA,0xBAEB,0xBAEC,0xBAED,0xBAEE,0xBAEF,/* 0xC8-0xCF */
+	0xBAF0,0xBAF1,0xBAF2,0xBAF3,0xBAF4,0xBAF5,0xBAF6,0xBAF7,/* 0xD0-0xD7 */
+	0xBAF8,0xBAF9,0xBAFA,0xBAFB,0xBAFD,0xBAFE,0xBAFF,0xBB01,/* 0xD8-0xDF */
+	0xBB02,0xBB03,0xBB05,0xBB06,0xBB07,0xBB08,0xBB09,0xBB0A,/* 0xE0-0xE7 */
+	0xBB0B,0xBB0C,0xBB0E,0xBB10,0xBB12,0xBB13,0xBB14,0xBB15,/* 0xE8-0xEF */
+	0xBB16,0xBB17,0xBB19,0xBB1A,0xBB1B,0xBB1D,0xBB1E,0xBB1F,/* 0xF0-0xF7 */
+	0xBB21,0xBB22,0xBB23,0xBB24,0xBB25,0xBB26,0xBB27,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_92[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBB28,0xBB2A,0xBB2C,0xBB2D,0xBB2E,0xBB2F,0xBB30,/* 0x40-0x47 */
+	0xBB31,0xBB32,0xBB33,0xBB37,0xBB39,0xBB3A,0xBB3F,0xBB40,/* 0x48-0x4F */
+	0xBB41,0xBB42,0xBB43,0xBB46,0xBB48,0xBB4A,0xBB4B,0xBB4C,/* 0x50-0x57 */
+	0xBB4E,0xBB51,0xBB52,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBB53,0xBB55,0xBB56,0xBB57,0xBB59,0xBB5A,0xBB5B,/* 0x60-0x67 */
+	0xBB5C,0xBB5D,0xBB5E,0xBB5F,0xBB60,0xBB62,0xBB64,0xBB65,/* 0x68-0x6F */
+	0xBB66,0xBB67,0xBB68,0xBB69,0xBB6A,0xBB6B,0xBB6D,0xBB6E,/* 0x70-0x77 */
+	0xBB6F,0xBB70,0xBB71,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBB72,0xBB73,0xBB74,0xBB75,0xBB76,0xBB77,0xBB78,/* 0x80-0x87 */
+	0xBB79,0xBB7A,0xBB7B,0xBB7C,0xBB7D,0xBB7E,0xBB7F,0xBB80,/* 0x88-0x8F */
+	0xBB81,0xBB82,0xBB83,0xBB84,0xBB85,0xBB86,0xBB87,0xBB89,/* 0x90-0x97 */
+	0xBB8A,0xBB8B,0xBB8D,0xBB8E,0xBB8F,0xBB91,0xBB92,0xBB93,/* 0x98-0x9F */
+	0xBB94,0xBB95,0xBB96,0xBB97,0xBB98,0xBB99,0xBB9A,0xBB9B,/* 0xA0-0xA7 */
+	0xBB9C,0xBB9D,0xBB9E,0xBB9F,0xBBA0,0xBBA1,0xBBA2,0xBBA3,/* 0xA8-0xAF */
+	0xBBA5,0xBBA6,0xBBA7,0xBBA9,0xBBAA,0xBBAB,0xBBAD,0xBBAE,/* 0xB0-0xB7 */
+	0xBBAF,0xBBB0,0xBBB1,0xBBB2,0xBBB3,0xBBB5,0xBBB6,0xBBB8,/* 0xB8-0xBF */
+	0xBBB9,0xBBBA,0xBBBB,0xBBBC,0xBBBD,0xBBBE,0xBBBF,0xBBC1,/* 0xC0-0xC7 */
+	0xBBC2,0xBBC3,0xBBC5,0xBBC6,0xBBC7,0xBBC9,0xBBCA,0xBBCB,/* 0xC8-0xCF */
+	0xBBCC,0xBBCD,0xBBCE,0xBBCF,0xBBD1,0xBBD2,0xBBD4,0xBBD5,/* 0xD0-0xD7 */
+	0xBBD6,0xBBD7,0xBBD8,0xBBD9,0xBBDA,0xBBDB,0xBBDC,0xBBDD,/* 0xD8-0xDF */
+	0xBBDE,0xBBDF,0xBBE0,0xBBE1,0xBBE2,0xBBE3,0xBBE4,0xBBE5,/* 0xE0-0xE7 */
+	0xBBE6,0xBBE7,0xBBE8,0xBBE9,0xBBEA,0xBBEB,0xBBEC,0xBBED,/* 0xE8-0xEF */
+	0xBBEE,0xBBEF,0xBBF0,0xBBF1,0xBBF2,0xBBF3,0xBBF4,0xBBF5,/* 0xF0-0xF7 */
+	0xBBF6,0xBBF7,0xBBFA,0xBBFB,0xBBFD,0xBBFE,0xBC01,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_93[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBC03,0xBC04,0xBC05,0xBC06,0xBC07,0xBC0A,0xBC0E,/* 0x40-0x47 */
+	0xBC10,0xBC12,0xBC13,0xBC19,0xBC1A,0xBC20,0xBC21,0xBC22,/* 0x48-0x4F */
+	0xBC23,0xBC26,0xBC28,0xBC2A,0xBC2B,0xBC2C,0xBC2E,0xBC2F,/* 0x50-0x57 */
+	0xBC32,0xBC33,0xBC35,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBC36,0xBC37,0xBC39,0xBC3A,0xBC3B,0xBC3C,0xBC3D,/* 0x60-0x67 */
+	0xBC3E,0xBC3F,0xBC42,0xBC46,0xBC47,0xBC48,0xBC4A,0xBC4B,/* 0x68-0x6F */
+	0xBC4E,0xBC4F,0xBC51,0xBC52,0xBC53,0xBC54,0xBC55,0xBC56,/* 0x70-0x77 */
+	0xBC57,0xBC58,0xBC59,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBC5A,0xBC5B,0xBC5C,0xBC5E,0xBC5F,0xBC60,0xBC61,/* 0x80-0x87 */
+	0xBC62,0xBC63,0xBC64,0xBC65,0xBC66,0xBC67,0xBC68,0xBC69,/* 0x88-0x8F */
+	0xBC6A,0xBC6B,0xBC6C,0xBC6D,0xBC6E,0xBC6F,0xBC70,0xBC71,/* 0x90-0x97 */
+	0xBC72,0xBC73,0xBC74,0xBC75,0xBC76,0xBC77,0xBC78,0xBC79,/* 0x98-0x9F */
+	0xBC7A,0xBC7B,0xBC7C,0xBC7D,0xBC7E,0xBC7F,0xBC80,0xBC81,/* 0xA0-0xA7 */
+	0xBC82,0xBC83,0xBC86,0xBC87,0xBC89,0xBC8A,0xBC8D,0xBC8F,/* 0xA8-0xAF */
+	0xBC90,0xBC91,0xBC92,0xBC93,0xBC96,0xBC98,0xBC9B,0xBC9C,/* 0xB0-0xB7 */
+	0xBC9D,0xBC9E,0xBC9F,0xBCA2,0xBCA3,0xBCA5,0xBCA6,0xBCA9,/* 0xB8-0xBF */
+	0xBCAA,0xBCAB,0xBCAC,0xBCAD,0xBCAE,0xBCAF,0xBCB2,0xBCB6,/* 0xC0-0xC7 */
+	0xBCB7,0xBCB8,0xBCB9,0xBCBA,0xBCBB,0xBCBE,0xBCBF,0xBCC1,/* 0xC8-0xCF */
+	0xBCC2,0xBCC3,0xBCC5,0xBCC6,0xBCC7,0xBCC8,0xBCC9,0xBCCA,/* 0xD0-0xD7 */
+	0xBCCB,0xBCCC,0xBCCE,0xBCD2,0xBCD3,0xBCD4,0xBCD6,0xBCD7,/* 0xD8-0xDF */
+	0xBCD9,0xBCDA,0xBCDB,0xBCDD,0xBCDE,0xBCDF,0xBCE0,0xBCE1,/* 0xE0-0xE7 */
+	0xBCE2,0xBCE3,0xBCE4,0xBCE5,0xBCE6,0xBCE7,0xBCE8,0xBCE9,/* 0xE8-0xEF */
+	0xBCEA,0xBCEB,0xBCEC,0xBCED,0xBCEE,0xBCEF,0xBCF0,0xBCF1,/* 0xF0-0xF7 */
+	0xBCF2,0xBCF3,0xBCF7,0xBCF9,0xBCFA,0xBCFB,0xBCFD,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_94[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBCFE,0xBCFF,0xBD00,0xBD01,0xBD02,0xBD03,0xBD06,/* 0x40-0x47 */
+	0xBD08,0xBD0A,0xBD0B,0xBD0C,0xBD0D,0xBD0E,0xBD0F,0xBD11,/* 0x48-0x4F */
+	0xBD12,0xBD13,0xBD15,0xBD16,0xBD17,0xBD18,0xBD19,0xBD1A,/* 0x50-0x57 */
+	0xBD1B,0xBD1C,0xBD1D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBD1E,0xBD1F,0xBD20,0xBD21,0xBD22,0xBD23,0xBD25,/* 0x60-0x67 */
+	0xBD26,0xBD27,0xBD28,0xBD29,0xBD2A,0xBD2B,0xBD2D,0xBD2E,/* 0x68-0x6F */
+	0xBD2F,0xBD30,0xBD31,0xBD32,0xBD33,0xBD34,0xBD35,0xBD36,/* 0x70-0x77 */
+	0xBD37,0xBD38,0xBD39,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBD3A,0xBD3B,0xBD3C,0xBD3D,0xBD3E,0xBD3F,0xBD41,/* 0x80-0x87 */
+	0xBD42,0xBD43,0xBD44,0xBD45,0xBD46,0xBD47,0xBD4A,0xBD4B,/* 0x88-0x8F */
+	0xBD4D,0xBD4E,0xBD4F,0xBD51,0xBD52,0xBD53,0xBD54,0xBD55,/* 0x90-0x97 */
+	0xBD56,0xBD57,0xBD5A,0xBD5B,0xBD5C,0xBD5D,0xBD5E,0xBD5F,/* 0x98-0x9F */
+	0xBD60,0xBD61,0xBD62,0xBD63,0xBD65,0xBD66,0xBD67,0xBD69,/* 0xA0-0xA7 */
+	0xBD6A,0xBD6B,0xBD6C,0xBD6D,0xBD6E,0xBD6F,0xBD70,0xBD71,/* 0xA8-0xAF */
+	0xBD72,0xBD73,0xBD74,0xBD75,0xBD76,0xBD77,0xBD78,0xBD79,/* 0xB0-0xB7 */
+	0xBD7A,0xBD7B,0xBD7C,0xBD7D,0xBD7E,0xBD7F,0xBD82,0xBD83,/* 0xB8-0xBF */
+	0xBD85,0xBD86,0xBD8B,0xBD8C,0xBD8D,0xBD8E,0xBD8F,0xBD92,/* 0xC0-0xC7 */
+	0xBD94,0xBD96,0xBD97,0xBD98,0xBD9B,0xBD9D,0xBD9E,0xBD9F,/* 0xC8-0xCF */
+	0xBDA0,0xBDA1,0xBDA2,0xBDA3,0xBDA5,0xBDA6,0xBDA7,0xBDA8,/* 0xD0-0xD7 */
+	0xBDA9,0xBDAA,0xBDAB,0xBDAC,0xBDAD,0xBDAE,0xBDAF,0xBDB1,/* 0xD8-0xDF */
+	0xBDB2,0xBDB3,0xBDB4,0xBDB5,0xBDB6,0xBDB7,0xBDB9,0xBDBA,/* 0xE0-0xE7 */
+	0xBDBB,0xBDBC,0xBDBD,0xBDBE,0xBDBF,0xBDC0,0xBDC1,0xBDC2,/* 0xE8-0xEF */
+	0xBDC3,0xBDC4,0xBDC5,0xBDC6,0xBDC7,0xBDC8,0xBDC9,0xBDCA,/* 0xF0-0xF7 */
+	0xBDCB,0xBDCC,0xBDCD,0xBDCE,0xBDCF,0xBDD0,0xBDD1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_95[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBDD2,0xBDD3,0xBDD6,0xBDD7,0xBDD9,0xBDDA,0xBDDB,/* 0x40-0x47 */
+	0xBDDD,0xBDDE,0xBDDF,0xBDE0,0xBDE1,0xBDE2,0xBDE3,0xBDE4,/* 0x48-0x4F */
+	0xBDE5,0xBDE6,0xBDE7,0xBDE8,0xBDEA,0xBDEB,0xBDEC,0xBDED,/* 0x50-0x57 */
+	0xBDEE,0xBDEF,0xBDF1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBDF2,0xBDF3,0xBDF5,0xBDF6,0xBDF7,0xBDF9,0xBDFA,/* 0x60-0x67 */
+	0xBDFB,0xBDFC,0xBDFD,0xBDFE,0xBDFF,0xBE01,0xBE02,0xBE04,/* 0x68-0x6F */
+	0xBE06,0xBE07,0xBE08,0xBE09,0xBE0A,0xBE0B,0xBE0E,0xBE0F,/* 0x70-0x77 */
+	0xBE11,0xBE12,0xBE13,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBE15,0xBE16,0xBE17,0xBE18,0xBE19,0xBE1A,0xBE1B,/* 0x80-0x87 */
+	0xBE1E,0xBE20,0xBE21,0xBE22,0xBE23,0xBE24,0xBE25,0xBE26,/* 0x88-0x8F */
+	0xBE27,0xBE28,0xBE29,0xBE2A,0xBE2B,0xBE2C,0xBE2D,0xBE2E,/* 0x90-0x97 */
+	0xBE2F,0xBE30,0xBE31,0xBE32,0xBE33,0xBE34,0xBE35,0xBE36,/* 0x98-0x9F */
+	0xBE37,0xBE38,0xBE39,0xBE3A,0xBE3B,0xBE3C,0xBE3D,0xBE3E,/* 0xA0-0xA7 */
+	0xBE3F,0xBE40,0xBE41,0xBE42,0xBE43,0xBE46,0xBE47,0xBE49,/* 0xA8-0xAF */
+	0xBE4A,0xBE4B,0xBE4D,0xBE4F,0xBE50,0xBE51,0xBE52,0xBE53,/* 0xB0-0xB7 */
+	0xBE56,0xBE58,0xBE5C,0xBE5D,0xBE5E,0xBE5F,0xBE62,0xBE63,/* 0xB8-0xBF */
+	0xBE65,0xBE66,0xBE67,0xBE69,0xBE6B,0xBE6C,0xBE6D,0xBE6E,/* 0xC0-0xC7 */
+	0xBE6F,0xBE72,0xBE76,0xBE77,0xBE78,0xBE79,0xBE7A,0xBE7E,/* 0xC8-0xCF */
+	0xBE7F,0xBE81,0xBE82,0xBE83,0xBE85,0xBE86,0xBE87,0xBE88,/* 0xD0-0xD7 */
+	0xBE89,0xBE8A,0xBE8B,0xBE8E,0xBE92,0xBE93,0xBE94,0xBE95,/* 0xD8-0xDF */
+	0xBE96,0xBE97,0xBE9A,0xBE9B,0xBE9C,0xBE9D,0xBE9E,0xBE9F,/* 0xE0-0xE7 */
+	0xBEA0,0xBEA1,0xBEA2,0xBEA3,0xBEA4,0xBEA5,0xBEA6,0xBEA7,/* 0xE8-0xEF */
+	0xBEA9,0xBEAA,0xBEAB,0xBEAC,0xBEAD,0xBEAE,0xBEAF,0xBEB0,/* 0xF0-0xF7 */
+	0xBEB1,0xBEB2,0xBEB3,0xBEB4,0xBEB5,0xBEB6,0xBEB7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_96[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBEB8,0xBEB9,0xBEBA,0xBEBB,0xBEBC,0xBEBD,0xBEBE,/* 0x40-0x47 */
+	0xBEBF,0xBEC0,0xBEC1,0xBEC2,0xBEC3,0xBEC4,0xBEC5,0xBEC6,/* 0x48-0x4F */
+	0xBEC7,0xBEC8,0xBEC9,0xBECA,0xBECB,0xBECC,0xBECD,0xBECE,/* 0x50-0x57 */
+	0xBECF,0xBED2,0xBED3,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBED5,0xBED6,0xBED9,0xBEDA,0xBEDB,0xBEDC,0xBEDD,/* 0x60-0x67 */
+	0xBEDE,0xBEDF,0xBEE1,0xBEE2,0xBEE6,0xBEE7,0xBEE8,0xBEE9,/* 0x68-0x6F */
+	0xBEEA,0xBEEB,0xBEED,0xBEEE,0xBEEF,0xBEF0,0xBEF1,0xBEF2,/* 0x70-0x77 */
+	0xBEF3,0xBEF4,0xBEF5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBEF6,0xBEF7,0xBEF8,0xBEF9,0xBEFA,0xBEFB,0xBEFC,/* 0x80-0x87 */
+	0xBEFD,0xBEFE,0xBEFF,0xBF00,0xBF02,0xBF03,0xBF04,0xBF05,/* 0x88-0x8F */
+	0xBF06,0xBF07,0xBF0A,0xBF0B,0xBF0C,0xBF0D,0xBF0E,0xBF0F,/* 0x90-0x97 */
+	0xBF10,0xBF11,0xBF12,0xBF13,0xBF14,0xBF15,0xBF16,0xBF17,/* 0x98-0x9F */
+	0xBF1A,0xBF1E,0xBF1F,0xBF20,0xBF21,0xBF22,0xBF23,0xBF24,/* 0xA0-0xA7 */
+	0xBF25,0xBF26,0xBF27,0xBF28,0xBF29,0xBF2A,0xBF2B,0xBF2C,/* 0xA8-0xAF */
+	0xBF2D,0xBF2E,0xBF2F,0xBF30,0xBF31,0xBF32,0xBF33,0xBF34,/* 0xB0-0xB7 */
+	0xBF35,0xBF36,0xBF37,0xBF38,0xBF39,0xBF3A,0xBF3B,0xBF3C,/* 0xB8-0xBF */
+	0xBF3D,0xBF3E,0xBF3F,0xBF42,0xBF43,0xBF45,0xBF46,0xBF47,/* 0xC0-0xC7 */
+	0xBF49,0xBF4A,0xBF4B,0xBF4C,0xBF4D,0xBF4E,0xBF4F,0xBF52,/* 0xC8-0xCF */
+	0xBF53,0xBF54,0xBF56,0xBF57,0xBF58,0xBF59,0xBF5A,0xBF5B,/* 0xD0-0xD7 */
+	0xBF5C,0xBF5D,0xBF5E,0xBF5F,0xBF60,0xBF61,0xBF62,0xBF63,/* 0xD8-0xDF */
+	0xBF64,0xBF65,0xBF66,0xBF67,0xBF68,0xBF69,0xBF6A,0xBF6B,/* 0xE0-0xE7 */
+	0xBF6C,0xBF6D,0xBF6E,0xBF6F,0xBF70,0xBF71,0xBF72,0xBF73,/* 0xE8-0xEF */
+	0xBF74,0xBF75,0xBF76,0xBF77,0xBF78,0xBF79,0xBF7A,0xBF7B,/* 0xF0-0xF7 */
+	0xBF7C,0xBF7D,0xBF7E,0xBF7F,0xBF80,0xBF81,0xBF82,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_97[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xBF83,0xBF84,0xBF85,0xBF86,0xBF87,0xBF88,0xBF89,/* 0x40-0x47 */
+	0xBF8A,0xBF8B,0xBF8C,0xBF8D,0xBF8E,0xBF8F,0xBF90,0xBF91,/* 0x48-0x4F */
+	0xBF92,0xBF93,0xBF95,0xBF96,0xBF97,0xBF98,0xBF99,0xBF9A,/* 0x50-0x57 */
+	0xBF9B,0xBF9C,0xBF9D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xBF9E,0xBF9F,0xBFA0,0xBFA1,0xBFA2,0xBFA3,0xBFA4,/* 0x60-0x67 */
+	0xBFA5,0xBFA6,0xBFA7,0xBFA8,0xBFA9,0xBFAA,0xBFAB,0xBFAC,/* 0x68-0x6F */
+	0xBFAD,0xBFAE,0xBFAF,0xBFB1,0xBFB2,0xBFB3,0xBFB4,0xBFB5,/* 0x70-0x77 */
+	0xBFB6,0xBFB7,0xBFB8,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xBFB9,0xBFBA,0xBFBB,0xBFBC,0xBFBD,0xBFBE,0xBFBF,/* 0x80-0x87 */
+	0xBFC0,0xBFC1,0xBFC2,0xBFC3,0xBFC4,0xBFC6,0xBFC7,0xBFC8,/* 0x88-0x8F */
+	0xBFC9,0xBFCA,0xBFCB,0xBFCE,0xBFCF,0xBFD1,0xBFD2,0xBFD3,/* 0x90-0x97 */
+	0xBFD5,0xBFD6,0xBFD7,0xBFD8,0xBFD9,0xBFDA,0xBFDB,0xBFDD,/* 0x98-0x9F */
+	0xBFDE,0xBFE0,0xBFE2,0xBFE3,0xBFE4,0xBFE5,0xBFE6,0xBFE7,/* 0xA0-0xA7 */
+	0xBFE8,0xBFE9,0xBFEA,0xBFEB,0xBFEC,0xBFED,0xBFEE,0xBFEF,/* 0xA8-0xAF */
+	0xBFF0,0xBFF1,0xBFF2,0xBFF3,0xBFF4,0xBFF5,0xBFF6,0xBFF7,/* 0xB0-0xB7 */
+	0xBFF8,0xBFF9,0xBFFA,0xBFFB,0xBFFC,0xBFFD,0xBFFE,0xBFFF,/* 0xB8-0xBF */
+	0xC000,0xC001,0xC002,0xC003,0xC004,0xC005,0xC006,0xC007,/* 0xC0-0xC7 */
+	0xC008,0xC009,0xC00A,0xC00B,0xC00C,0xC00D,0xC00E,0xC00F,/* 0xC8-0xCF */
+	0xC010,0xC011,0xC012,0xC013,0xC014,0xC015,0xC016,0xC017,/* 0xD0-0xD7 */
+	0xC018,0xC019,0xC01A,0xC01B,0xC01C,0xC01D,0xC01E,0xC01F,/* 0xD8-0xDF */
+	0xC020,0xC021,0xC022,0xC023,0xC024,0xC025,0xC026,0xC027,/* 0xE0-0xE7 */
+	0xC028,0xC029,0xC02A,0xC02B,0xC02C,0xC02D,0xC02E,0xC02F,/* 0xE8-0xEF */
+	0xC030,0xC031,0xC032,0xC033,0xC034,0xC035,0xC036,0xC037,/* 0xF0-0xF7 */
+	0xC038,0xC039,0xC03A,0xC03B,0xC03D,0xC03E,0xC03F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_98[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC040,0xC041,0xC042,0xC043,0xC044,0xC045,0xC046,/* 0x40-0x47 */
+	0xC047,0xC048,0xC049,0xC04A,0xC04B,0xC04C,0xC04D,0xC04E,/* 0x48-0x4F */
+	0xC04F,0xC050,0xC052,0xC053,0xC054,0xC055,0xC056,0xC057,/* 0x50-0x57 */
+	0xC059,0xC05A,0xC05B,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC05D,0xC05E,0xC05F,0xC061,0xC062,0xC063,0xC064,/* 0x60-0x67 */
+	0xC065,0xC066,0xC067,0xC06A,0xC06B,0xC06C,0xC06D,0xC06E,/* 0x68-0x6F */
+	0xC06F,0xC070,0xC071,0xC072,0xC073,0xC074,0xC075,0xC076,/* 0x70-0x77 */
+	0xC077,0xC078,0xC079,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC07A,0xC07B,0xC07C,0xC07D,0xC07E,0xC07F,0xC080,/* 0x80-0x87 */
+	0xC081,0xC082,0xC083,0xC084,0xC085,0xC086,0xC087,0xC088,/* 0x88-0x8F */
+	0xC089,0xC08A,0xC08B,0xC08C,0xC08D,0xC08E,0xC08F,0xC092,/* 0x90-0x97 */
+	0xC093,0xC095,0xC096,0xC097,0xC099,0xC09A,0xC09B,0xC09C,/* 0x98-0x9F */
+	0xC09D,0xC09E,0xC09F,0xC0A2,0xC0A4,0xC0A6,0xC0A7,0xC0A8,/* 0xA0-0xA7 */
+	0xC0A9,0xC0AA,0xC0AB,0xC0AE,0xC0B1,0xC0B2,0xC0B7,0xC0B8,/* 0xA8-0xAF */
+	0xC0B9,0xC0BA,0xC0BB,0xC0BE,0xC0C2,0xC0C3,0xC0C4,0xC0C6,/* 0xB0-0xB7 */
+	0xC0C7,0xC0CA,0xC0CB,0xC0CD,0xC0CE,0xC0CF,0xC0D1,0xC0D2,/* 0xB8-0xBF */
+	0xC0D3,0xC0D4,0xC0D5,0xC0D6,0xC0D7,0xC0DA,0xC0DE,0xC0DF,/* 0xC0-0xC7 */
+	0xC0E0,0xC0E1,0xC0E2,0xC0E3,0xC0E6,0xC0E7,0xC0E9,0xC0EA,/* 0xC8-0xCF */
+	0xC0EB,0xC0ED,0xC0EE,0xC0EF,0xC0F0,0xC0F1,0xC0F2,0xC0F3,/* 0xD0-0xD7 */
+	0xC0F6,0xC0F8,0xC0FA,0xC0FB,0xC0FC,0xC0FD,0xC0FE,0xC0FF,/* 0xD8-0xDF */
+	0xC101,0xC102,0xC103,0xC105,0xC106,0xC107,0xC109,0xC10A,/* 0xE0-0xE7 */
+	0xC10B,0xC10C,0xC10D,0xC10E,0xC10F,0xC111,0xC112,0xC113,/* 0xE8-0xEF */
+	0xC114,0xC116,0xC117,0xC118,0xC119,0xC11A,0xC11B,0xC121,/* 0xF0-0xF7 */
+	0xC122,0xC125,0xC128,0xC129,0xC12A,0xC12B,0xC12E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_99[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC132,0xC133,0xC134,0xC135,0xC137,0xC13A,0xC13B,/* 0x40-0x47 */
+	0xC13D,0xC13E,0xC13F,0xC141,0xC142,0xC143,0xC144,0xC145,/* 0x48-0x4F */
+	0xC146,0xC147,0xC14A,0xC14E,0xC14F,0xC150,0xC151,0xC152,/* 0x50-0x57 */
+	0xC153,0xC156,0xC157,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC159,0xC15A,0xC15B,0xC15D,0xC15E,0xC15F,0xC160,/* 0x60-0x67 */
+	0xC161,0xC162,0xC163,0xC166,0xC16A,0xC16B,0xC16C,0xC16D,/* 0x68-0x6F */
+	0xC16E,0xC16F,0xC171,0xC172,0xC173,0xC175,0xC176,0xC177,/* 0x70-0x77 */
+	0xC179,0xC17A,0xC17B,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC17C,0xC17D,0xC17E,0xC17F,0xC180,0xC181,0xC182,/* 0x80-0x87 */
+	0xC183,0xC184,0xC186,0xC187,0xC188,0xC189,0xC18A,0xC18B,/* 0x88-0x8F */
+	0xC18F,0xC191,0xC192,0xC193,0xC195,0xC197,0xC198,0xC199,/* 0x90-0x97 */
+	0xC19A,0xC19B,0xC19E,0xC1A0,0xC1A2,0xC1A3,0xC1A4,0xC1A6,/* 0x98-0x9F */
+	0xC1A7,0xC1AA,0xC1AB,0xC1AD,0xC1AE,0xC1AF,0xC1B1,0xC1B2,/* 0xA0-0xA7 */
+	0xC1B3,0xC1B4,0xC1B5,0xC1B6,0xC1B7,0xC1B8,0xC1B9,0xC1BA,/* 0xA8-0xAF */
+	0xC1BB,0xC1BC,0xC1BE,0xC1BF,0xC1C0,0xC1C1,0xC1C2,0xC1C3,/* 0xB0-0xB7 */
+	0xC1C5,0xC1C6,0xC1C7,0xC1C9,0xC1CA,0xC1CB,0xC1CD,0xC1CE,/* 0xB8-0xBF */
+	0xC1CF,0xC1D0,0xC1D1,0xC1D2,0xC1D3,0xC1D5,0xC1D6,0xC1D9,/* 0xC0-0xC7 */
+	0xC1DA,0xC1DB,0xC1DC,0xC1DD,0xC1DE,0xC1DF,0xC1E1,0xC1E2,/* 0xC8-0xCF */
+	0xC1E3,0xC1E5,0xC1E6,0xC1E7,0xC1E9,0xC1EA,0xC1EB,0xC1EC,/* 0xD0-0xD7 */
+	0xC1ED,0xC1EE,0xC1EF,0xC1F2,0xC1F4,0xC1F5,0xC1F6,0xC1F7,/* 0xD8-0xDF */
+	0xC1F8,0xC1F9,0xC1FA,0xC1FB,0xC1FE,0xC1FF,0xC201,0xC202,/* 0xE0-0xE7 */
+	0xC203,0xC205,0xC206,0xC207,0xC208,0xC209,0xC20A,0xC20B,/* 0xE8-0xEF */
+	0xC20E,0xC210,0xC212,0xC213,0xC214,0xC215,0xC216,0xC217,/* 0xF0-0xF7 */
+	0xC21A,0xC21B,0xC21D,0xC21E,0xC221,0xC222,0xC223,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9A[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC224,0xC225,0xC226,0xC227,0xC22A,0xC22C,0xC22E,/* 0x40-0x47 */
+	0xC230,0xC233,0xC235,0xC236,0xC237,0xC238,0xC239,0xC23A,/* 0x48-0x4F */
+	0xC23B,0xC23C,0xC23D,0xC23E,0xC23F,0xC240,0xC241,0xC242,/* 0x50-0x57 */
+	0xC243,0xC244,0xC245,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC246,0xC247,0xC249,0xC24A,0xC24B,0xC24C,0xC24D,/* 0x60-0x67 */
+	0xC24E,0xC24F,0xC252,0xC253,0xC255,0xC256,0xC257,0xC259,/* 0x68-0x6F */
+	0xC25A,0xC25B,0xC25C,0xC25D,0xC25E,0xC25F,0xC261,0xC262,/* 0x70-0x77 */
+	0xC263,0xC264,0xC266,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC267,0xC268,0xC269,0xC26A,0xC26B,0xC26E,0xC26F,/* 0x80-0x87 */
+	0xC271,0xC272,0xC273,0xC275,0xC276,0xC277,0xC278,0xC279,/* 0x88-0x8F */
+	0xC27A,0xC27B,0xC27E,0xC280,0xC282,0xC283,0xC284,0xC285,/* 0x90-0x97 */
+	0xC286,0xC287,0xC28A,0xC28B,0xC28C,0xC28D,0xC28E,0xC28F,/* 0x98-0x9F */
+	0xC291,0xC292,0xC293,0xC294,0xC295,0xC296,0xC297,0xC299,/* 0xA0-0xA7 */
+	0xC29A,0xC29C,0xC29E,0xC29F,0xC2A0,0xC2A1,0xC2A2,0xC2A3,/* 0xA8-0xAF */
+	0xC2A6,0xC2A7,0xC2A9,0xC2AA,0xC2AB,0xC2AE,0xC2AF,0xC2B0,/* 0xB0-0xB7 */
+	0xC2B1,0xC2B2,0xC2B3,0xC2B6,0xC2B8,0xC2BA,0xC2BB,0xC2BC,/* 0xB8-0xBF */
+	0xC2BD,0xC2BE,0xC2BF,0xC2C0,0xC2C1,0xC2C2,0xC2C3,0xC2C4,/* 0xC0-0xC7 */
+	0xC2C5,0xC2C6,0xC2C7,0xC2C8,0xC2C9,0xC2CA,0xC2CB,0xC2CC,/* 0xC8-0xCF */
+	0xC2CD,0xC2CE,0xC2CF,0xC2D0,0xC2D1,0xC2D2,0xC2D3,0xC2D4,/* 0xD0-0xD7 */
+	0xC2D5,0xC2D6,0xC2D7,0xC2D8,0xC2D9,0xC2DA,0xC2DB,0xC2DE,/* 0xD8-0xDF */
+	0xC2DF,0xC2E1,0xC2E2,0xC2E5,0xC2E6,0xC2E7,0xC2E8,0xC2E9,/* 0xE0-0xE7 */
+	0xC2EA,0xC2EE,0xC2F0,0xC2F2,0xC2F3,0xC2F4,0xC2F5,0xC2F7,/* 0xE8-0xEF */
+	0xC2FA,0xC2FD,0xC2FE,0xC2FF,0xC301,0xC302,0xC303,0xC304,/* 0xF0-0xF7 */
+	0xC305,0xC306,0xC307,0xC30A,0xC30B,0xC30E,0xC30F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9B[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC310,0xC311,0xC312,0xC316,0xC317,0xC319,0xC31A,/* 0x40-0x47 */
+	0xC31B,0xC31D,0xC31E,0xC31F,0xC320,0xC321,0xC322,0xC323,/* 0x48-0x4F */
+	0xC326,0xC327,0xC32A,0xC32B,0xC32C,0xC32D,0xC32E,0xC32F,/* 0x50-0x57 */
+	0xC330,0xC331,0xC332,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC333,0xC334,0xC335,0xC336,0xC337,0xC338,0xC339,/* 0x60-0x67 */
+	0xC33A,0xC33B,0xC33C,0xC33D,0xC33E,0xC33F,0xC340,0xC341,/* 0x68-0x6F */
+	0xC342,0xC343,0xC344,0xC346,0xC347,0xC348,0xC349,0xC34A,/* 0x70-0x77 */
+	0xC34B,0xC34C,0xC34D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC34E,0xC34F,0xC350,0xC351,0xC352,0xC353,0xC354,/* 0x80-0x87 */
+	0xC355,0xC356,0xC357,0xC358,0xC359,0xC35A,0xC35B,0xC35C,/* 0x88-0x8F */
+	0xC35D,0xC35E,0xC35F,0xC360,0xC361,0xC362,0xC363,0xC364,/* 0x90-0x97 */
+	0xC365,0xC366,0xC367,0xC36A,0xC36B,0xC36D,0xC36E,0xC36F,/* 0x98-0x9F */
+	0xC371,0xC373,0xC374,0xC375,0xC376,0xC377,0xC37A,0xC37B,/* 0xA0-0xA7 */
+	0xC37E,0xC37F,0xC380,0xC381,0xC382,0xC383,0xC385,0xC386,/* 0xA8-0xAF */
+	0xC387,0xC389,0xC38A,0xC38B,0xC38D,0xC38E,0xC38F,0xC390,/* 0xB0-0xB7 */
+	0xC391,0xC392,0xC393,0xC394,0xC395,0xC396,0xC397,0xC398,/* 0xB8-0xBF */
+	0xC399,0xC39A,0xC39B,0xC39C,0xC39D,0xC39E,0xC39F,0xC3A0,/* 0xC0-0xC7 */
+	0xC3A1,0xC3A2,0xC3A3,0xC3A4,0xC3A5,0xC3A6,0xC3A7,0xC3A8,/* 0xC8-0xCF */
+	0xC3A9,0xC3AA,0xC3AB,0xC3AC,0xC3AD,0xC3AE,0xC3AF,0xC3B0,/* 0xD0-0xD7 */
+	0xC3B1,0xC3B2,0xC3B3,0xC3B4,0xC3B5,0xC3B6,0xC3B7,0xC3B8,/* 0xD8-0xDF */
+	0xC3B9,0xC3BA,0xC3BB,0xC3BC,0xC3BD,0xC3BE,0xC3BF,0xC3C1,/* 0xE0-0xE7 */
+	0xC3C2,0xC3C3,0xC3C4,0xC3C5,0xC3C6,0xC3C7,0xC3C8,0xC3C9,/* 0xE8-0xEF */
+	0xC3CA,0xC3CB,0xC3CC,0xC3CD,0xC3CE,0xC3CF,0xC3D0,0xC3D1,/* 0xF0-0xF7 */
+	0xC3D2,0xC3D3,0xC3D4,0xC3D5,0xC3D6,0xC3D7,0xC3DA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9C[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC3DB,0xC3DD,0xC3DE,0xC3E1,0xC3E3,0xC3E4,0xC3E5,/* 0x40-0x47 */
+	0xC3E6,0xC3E7,0xC3EA,0xC3EB,0xC3EC,0xC3EE,0xC3EF,0xC3F0,/* 0x48-0x4F */
+	0xC3F1,0xC3F2,0xC3F3,0xC3F6,0xC3F7,0xC3F9,0xC3FA,0xC3FB,/* 0x50-0x57 */
+	0xC3FC,0xC3FD,0xC3FE,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC3FF,0xC400,0xC401,0xC402,0xC403,0xC404,0xC405,/* 0x60-0x67 */
+	0xC406,0xC407,0xC409,0xC40A,0xC40B,0xC40C,0xC40D,0xC40E,/* 0x68-0x6F */
+	0xC40F,0xC411,0xC412,0xC413,0xC414,0xC415,0xC416,0xC417,/* 0x70-0x77 */
+	0xC418,0xC419,0xC41A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC41B,0xC41C,0xC41D,0xC41E,0xC41F,0xC420,0xC421,/* 0x80-0x87 */
+	0xC422,0xC423,0xC425,0xC426,0xC427,0xC428,0xC429,0xC42A,/* 0x88-0x8F */
+	0xC42B,0xC42D,0xC42E,0xC42F,0xC431,0xC432,0xC433,0xC435,/* 0x90-0x97 */
+	0xC436,0xC437,0xC438,0xC439,0xC43A,0xC43B,0xC43E,0xC43F,/* 0x98-0x9F */
+	0xC440,0xC441,0xC442,0xC443,0xC444,0xC445,0xC446,0xC447,/* 0xA0-0xA7 */
+	0xC449,0xC44A,0xC44B,0xC44C,0xC44D,0xC44E,0xC44F,0xC450,/* 0xA8-0xAF */
+	0xC451,0xC452,0xC453,0xC454,0xC455,0xC456,0xC457,0xC458,/* 0xB0-0xB7 */
+	0xC459,0xC45A,0xC45B,0xC45C,0xC45D,0xC45E,0xC45F,0xC460,/* 0xB8-0xBF */
+	0xC461,0xC462,0xC463,0xC466,0xC467,0xC469,0xC46A,0xC46B,/* 0xC0-0xC7 */
+	0xC46D,0xC46E,0xC46F,0xC470,0xC471,0xC472,0xC473,0xC476,/* 0xC8-0xCF */
+	0xC477,0xC478,0xC47A,0xC47B,0xC47C,0xC47D,0xC47E,0xC47F,/* 0xD0-0xD7 */
+	0xC481,0xC482,0xC483,0xC484,0xC485,0xC486,0xC487,0xC488,/* 0xD8-0xDF */
+	0xC489,0xC48A,0xC48B,0xC48C,0xC48D,0xC48E,0xC48F,0xC490,/* 0xE0-0xE7 */
+	0xC491,0xC492,0xC493,0xC495,0xC496,0xC497,0xC498,0xC499,/* 0xE8-0xEF */
+	0xC49A,0xC49B,0xC49D,0xC49E,0xC49F,0xC4A0,0xC4A1,0xC4A2,/* 0xF0-0xF7 */
+	0xC4A3,0xC4A4,0xC4A5,0xC4A6,0xC4A7,0xC4A8,0xC4A9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9D[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC4AA,0xC4AB,0xC4AC,0xC4AD,0xC4AE,0xC4AF,0xC4B0,/* 0x40-0x47 */
+	0xC4B1,0xC4B2,0xC4B3,0xC4B4,0xC4B5,0xC4B6,0xC4B7,0xC4B9,/* 0x48-0x4F */
+	0xC4BA,0xC4BB,0xC4BD,0xC4BE,0xC4BF,0xC4C0,0xC4C1,0xC4C2,/* 0x50-0x57 */
+	0xC4C3,0xC4C4,0xC4C5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC4C6,0xC4C7,0xC4C8,0xC4C9,0xC4CA,0xC4CB,0xC4CC,/* 0x60-0x67 */
+	0xC4CD,0xC4CE,0xC4CF,0xC4D0,0xC4D1,0xC4D2,0xC4D3,0xC4D4,/* 0x68-0x6F */
+	0xC4D5,0xC4D6,0xC4D7,0xC4D8,0xC4D9,0xC4DA,0xC4DB,0xC4DC,/* 0x70-0x77 */
+	0xC4DD,0xC4DE,0xC4DF,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC4E0,0xC4E1,0xC4E2,0xC4E3,0xC4E4,0xC4E5,0xC4E6,/* 0x80-0x87 */
+	0xC4E7,0xC4E8,0xC4EA,0xC4EB,0xC4EC,0xC4ED,0xC4EE,0xC4EF,/* 0x88-0x8F */
+	0xC4F2,0xC4F3,0xC4F5,0xC4F6,0xC4F7,0xC4F9,0xC4FB,0xC4FC,/* 0x90-0x97 */
+	0xC4FD,0xC4FE,0xC502,0xC503,0xC504,0xC505,0xC506,0xC507,/* 0x98-0x9F */
+	0xC508,0xC509,0xC50A,0xC50B,0xC50D,0xC50E,0xC50F,0xC511,/* 0xA0-0xA7 */
+	0xC512,0xC513,0xC515,0xC516,0xC517,0xC518,0xC519,0xC51A,/* 0xA8-0xAF */
+	0xC51B,0xC51D,0xC51E,0xC51F,0xC520,0xC521,0xC522,0xC523,/* 0xB0-0xB7 */
+	0xC524,0xC525,0xC526,0xC527,0xC52A,0xC52B,0xC52D,0xC52E,/* 0xB8-0xBF */
+	0xC52F,0xC531,0xC532,0xC533,0xC534,0xC535,0xC536,0xC537,/* 0xC0-0xC7 */
+	0xC53A,0xC53C,0xC53E,0xC53F,0xC540,0xC541,0xC542,0xC543,/* 0xC8-0xCF */
+	0xC546,0xC547,0xC54B,0xC54F,0xC550,0xC551,0xC552,0xC556,/* 0xD0-0xD7 */
+	0xC55A,0xC55B,0xC55C,0xC55F,0xC562,0xC563,0xC565,0xC566,/* 0xD8-0xDF */
+	0xC567,0xC569,0xC56A,0xC56B,0xC56C,0xC56D,0xC56E,0xC56F,/* 0xE0-0xE7 */
+	0xC572,0xC576,0xC577,0xC578,0xC579,0xC57A,0xC57B,0xC57E,/* 0xE8-0xEF */
+	0xC57F,0xC581,0xC582,0xC583,0xC585,0xC586,0xC588,0xC589,/* 0xF0-0xF7 */
+	0xC58A,0xC58B,0xC58E,0xC590,0xC592,0xC593,0xC594,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9E[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC596,0xC599,0xC59A,0xC59B,0xC59D,0xC59E,0xC59F,/* 0x40-0x47 */
+	0xC5A1,0xC5A2,0xC5A3,0xC5A4,0xC5A5,0xC5A6,0xC5A7,0xC5A8,/* 0x48-0x4F */
+	0xC5AA,0xC5AB,0xC5AC,0xC5AD,0xC5AE,0xC5AF,0xC5B0,0xC5B1,/* 0x50-0x57 */
+	0xC5B2,0xC5B3,0xC5B6,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC5B7,0xC5BA,0xC5BF,0xC5C0,0xC5C1,0xC5C2,0xC5C3,/* 0x60-0x67 */
+	0xC5CB,0xC5CD,0xC5CF,0xC5D2,0xC5D3,0xC5D5,0xC5D6,0xC5D7,/* 0x68-0x6F */
+	0xC5D9,0xC5DA,0xC5DB,0xC5DC,0xC5DD,0xC5DE,0xC5DF,0xC5E2,/* 0x70-0x77 */
+	0xC5E4,0xC5E6,0xC5E7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC5E8,0xC5E9,0xC5EA,0xC5EB,0xC5EF,0xC5F1,0xC5F2,/* 0x80-0x87 */
+	0xC5F3,0xC5F5,0xC5F8,0xC5F9,0xC5FA,0xC5FB,0xC602,0xC603,/* 0x88-0x8F */
+	0xC604,0xC609,0xC60A,0xC60B,0xC60D,0xC60E,0xC60F,0xC611,/* 0x90-0x97 */
+	0xC612,0xC613,0xC614,0xC615,0xC616,0xC617,0xC61A,0xC61D,/* 0x98-0x9F */
+	0xC61E,0xC61F,0xC620,0xC621,0xC622,0xC623,0xC626,0xC627,/* 0xA0-0xA7 */
+	0xC629,0xC62A,0xC62B,0xC62F,0xC631,0xC632,0xC636,0xC638,/* 0xA8-0xAF */
+	0xC63A,0xC63C,0xC63D,0xC63E,0xC63F,0xC642,0xC643,0xC645,/* 0xB0-0xB7 */
+	0xC646,0xC647,0xC649,0xC64A,0xC64B,0xC64C,0xC64D,0xC64E,/* 0xB8-0xBF */
+	0xC64F,0xC652,0xC656,0xC657,0xC658,0xC659,0xC65A,0xC65B,/* 0xC0-0xC7 */
+	0xC65E,0xC65F,0xC661,0xC662,0xC663,0xC664,0xC665,0xC666,/* 0xC8-0xCF */
+	0xC667,0xC668,0xC669,0xC66A,0xC66B,0xC66D,0xC66E,0xC670,/* 0xD0-0xD7 */
+	0xC672,0xC673,0xC674,0xC675,0xC676,0xC677,0xC67A,0xC67B,/* 0xD8-0xDF */
+	0xC67D,0xC67E,0xC67F,0xC681,0xC682,0xC683,0xC684,0xC685,/* 0xE0-0xE7 */
+	0xC686,0xC687,0xC68A,0xC68C,0xC68E,0xC68F,0xC690,0xC691,/* 0xE8-0xEF */
+	0xC692,0xC693,0xC696,0xC697,0xC699,0xC69A,0xC69B,0xC69D,/* 0xF0-0xF7 */
+	0xC69E,0xC69F,0xC6A0,0xC6A1,0xC6A2,0xC6A3,0xC6A6,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_9F[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC6A8,0xC6AA,0xC6AB,0xC6AC,0xC6AD,0xC6AE,0xC6AF,/* 0x40-0x47 */
+	0xC6B2,0xC6B3,0xC6B5,0xC6B6,0xC6B7,0xC6BB,0xC6BC,0xC6BD,/* 0x48-0x4F */
+	0xC6BE,0xC6BF,0xC6C2,0xC6C4,0xC6C6,0xC6C7,0xC6C8,0xC6C9,/* 0x50-0x57 */
+	0xC6CA,0xC6CB,0xC6CE,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC6CF,0xC6D1,0xC6D2,0xC6D3,0xC6D5,0xC6D6,0xC6D7,/* 0x60-0x67 */
+	0xC6D8,0xC6D9,0xC6DA,0xC6DB,0xC6DE,0xC6DF,0xC6E2,0xC6E3,/* 0x68-0x6F */
+	0xC6E4,0xC6E5,0xC6E6,0xC6E7,0xC6EA,0xC6EB,0xC6ED,0xC6EE,/* 0x70-0x77 */
+	0xC6EF,0xC6F1,0xC6F2,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC6F3,0xC6F4,0xC6F5,0xC6F6,0xC6F7,0xC6FA,0xC6FB,/* 0x80-0x87 */
+	0xC6FC,0xC6FE,0xC6FF,0xC700,0xC701,0xC702,0xC703,0xC706,/* 0x88-0x8F */
+	0xC707,0xC709,0xC70A,0xC70B,0xC70D,0xC70E,0xC70F,0xC710,/* 0x90-0x97 */
+	0xC711,0xC712,0xC713,0xC716,0xC718,0xC71A,0xC71B,0xC71C,/* 0x98-0x9F */
+	0xC71D,0xC71E,0xC71F,0xC722,0xC723,0xC725,0xC726,0xC727,/* 0xA0-0xA7 */
+	0xC729,0xC72A,0xC72B,0xC72C,0xC72D,0xC72E,0xC72F,0xC732,/* 0xA8-0xAF */
+	0xC734,0xC736,0xC738,0xC739,0xC73A,0xC73B,0xC73E,0xC73F,/* 0xB0-0xB7 */
+	0xC741,0xC742,0xC743,0xC745,0xC746,0xC747,0xC748,0xC749,/* 0xB8-0xBF */
+	0xC74B,0xC74E,0xC750,0xC759,0xC75A,0xC75B,0xC75D,0xC75E,/* 0xC0-0xC7 */
+	0xC75F,0xC761,0xC762,0xC763,0xC764,0xC765,0xC766,0xC767,/* 0xC8-0xCF */
+	0xC769,0xC76A,0xC76C,0xC76D,0xC76E,0xC76F,0xC770,0xC771,/* 0xD0-0xD7 */
+	0xC772,0xC773,0xC776,0xC777,0xC779,0xC77A,0xC77B,0xC77F,/* 0xD8-0xDF */
+	0xC780,0xC781,0xC782,0xC786,0xC78B,0xC78C,0xC78D,0xC78F,/* 0xE0-0xE7 */
+	0xC792,0xC793,0xC795,0xC799,0xC79B,0xC79C,0xC79D,0xC79E,/* 0xE8-0xEF */
+	0xC79F,0xC7A2,0xC7A7,0xC7A8,0xC7A9,0xC7AA,0xC7AB,0xC7AE,/* 0xF0-0xF7 */
+	0xC7AF,0xC7B1,0xC7B2,0xC7B3,0xC7B5,0xC7B6,0xC7B7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC7B8,0xC7B9,0xC7BA,0xC7BB,0xC7BE,0xC7C2,0xC7C3,/* 0x40-0x47 */
+	0xC7C4,0xC7C5,0xC7C6,0xC7C7,0xC7CA,0xC7CB,0xC7CD,0xC7CF,/* 0x48-0x4F */
+	0xC7D1,0xC7D2,0xC7D3,0xC7D4,0xC7D5,0xC7D6,0xC7D7,0xC7D9,/* 0x50-0x57 */
+	0xC7DA,0xC7DB,0xC7DC,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC7DE,0xC7DF,0xC7E0,0xC7E1,0xC7E2,0xC7E3,0xC7E5,/* 0x60-0x67 */
+	0xC7E6,0xC7E7,0xC7E9,0xC7EA,0xC7EB,0xC7ED,0xC7EE,0xC7EF,/* 0x68-0x6F */
+	0xC7F0,0xC7F1,0xC7F2,0xC7F3,0xC7F4,0xC7F5,0xC7F6,0xC7F7,/* 0x70-0x77 */
+	0xC7F8,0xC7F9,0xC7FA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC7FB,0xC7FC,0xC7FD,0xC7FE,0xC7FF,0xC802,0xC803,/* 0x80-0x87 */
+	0xC805,0xC806,0xC807,0xC809,0xC80B,0xC80C,0xC80D,0xC80E,/* 0x88-0x8F */
+	0xC80F,0xC812,0xC814,0xC817,0xC818,0xC819,0xC81A,0xC81B,/* 0x90-0x97 */
+	0xC81E,0xC81F,0xC821,0xC822,0xC823,0xC825,0xC826,0xC827,/* 0x98-0x9F */
+	0xC828,0xC829,0xC82A,0xC82B,0xC82E,0xC830,0xC832,0xC833,/* 0xA0-0xA7 */
+	0xC834,0xC835,0xC836,0xC837,0xC839,0xC83A,0xC83B,0xC83D,/* 0xA8-0xAF */
+	0xC83E,0xC83F,0xC841,0xC842,0xC843,0xC844,0xC845,0xC846,/* 0xB0-0xB7 */
+	0xC847,0xC84A,0xC84B,0xC84E,0xC84F,0xC850,0xC851,0xC852,/* 0xB8-0xBF */
+	0xC853,0xC855,0xC856,0xC857,0xC858,0xC859,0xC85A,0xC85B,/* 0xC0-0xC7 */
+	0xC85C,0xC85D,0xC85E,0xC85F,0xC860,0xC861,0xC862,0xC863,/* 0xC8-0xCF */
+	0xC864,0xC865,0xC866,0xC867,0xC868,0xC869,0xC86A,0xC86B,/* 0xD0-0xD7 */
+	0xC86C,0xC86D,0xC86E,0xC86F,0xC872,0xC873,0xC875,0xC876,/* 0xD8-0xDF */
+	0xC877,0xC879,0xC87B,0xC87C,0xC87D,0xC87E,0xC87F,0xC882,/* 0xE0-0xE7 */
+	0xC884,0xC888,0xC889,0xC88A,0xC88E,0xC88F,0xC890,0xC891,/* 0xE8-0xEF */
+	0xC892,0xC893,0xC895,0xC896,0xC897,0xC898,0xC899,0xC89A,/* 0xF0-0xF7 */
+	0xC89B,0xC89C,0xC89E,0xC8A0,0xC8A2,0xC8A3,0xC8A4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC8A5,0xC8A6,0xC8A7,0xC8A9,0xC8AA,0xC8AB,0xC8AC,/* 0x40-0x47 */
+	0xC8AD,0xC8AE,0xC8AF,0xC8B0,0xC8B1,0xC8B2,0xC8B3,0xC8B4,/* 0x48-0x4F */
+	0xC8B5,0xC8B6,0xC8B7,0xC8B8,0xC8B9,0xC8BA,0xC8BB,0xC8BE,/* 0x50-0x57 */
+	0xC8BF,0xC8C0,0xC8C1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC8C2,0xC8C3,0xC8C5,0xC8C6,0xC8C7,0xC8C9,0xC8CA,/* 0x60-0x67 */
+	0xC8CB,0xC8CD,0xC8CE,0xC8CF,0xC8D0,0xC8D1,0xC8D2,0xC8D3,/* 0x68-0x6F */
+	0xC8D6,0xC8D8,0xC8DA,0xC8DB,0xC8DC,0xC8DD,0xC8DE,0xC8DF,/* 0x70-0x77 */
+	0xC8E2,0xC8E3,0xC8E5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC8E6,0xC8E7,0xC8E8,0xC8E9,0xC8EA,0xC8EB,0xC8EC,/* 0x80-0x87 */
+	0xC8ED,0xC8EE,0xC8EF,0xC8F0,0xC8F1,0xC8F2,0xC8F3,0xC8F4,/* 0x88-0x8F */
+	0xC8F6,0xC8F7,0xC8F8,0xC8F9,0xC8FA,0xC8FB,0xC8FE,0xC8FF,/* 0x90-0x97 */
+	0xC901,0xC902,0xC903,0xC907,0xC908,0xC909,0xC90A,0xC90B,/* 0x98-0x9F */
+	0xC90E,0x3000,0x3001,0x3002,0x00B7,0x2025,0x2026,0x00A8,/* 0xA0-0xA7 */
+	0x3003,0x00AD,0x2015,0x2225,0xFF3C,0x223C,0x2018,0x2019,/* 0xA8-0xAF */
+	0x201C,0x201D,0x3014,0x3015,0x3008,0x3009,0x300A,0x300B,/* 0xB0-0xB7 */
+	0x300C,0x300D,0x300E,0x300F,0x3010,0x3011,0x00B1,0x00D7,/* 0xB8-0xBF */
+	0x00F7,0x2260,0x2264,0x2265,0x221E,0x2234,0x00B0,0x2032,/* 0xC0-0xC7 */
+	0x2033,0x2103,0x212B,0xFFE0,0xFFE1,0xFFE5,0x2642,0x2640,/* 0xC8-0xCF */
+	0x2220,0x22A5,0x2312,0x2202,0x2207,0x2261,0x2252,0x00A7,/* 0xD0-0xD7 */
+	0x203B,0x2606,0x2605,0x25CB,0x25CF,0x25CE,0x25C7,0x25C6,/* 0xD8-0xDF */
+	0x25A1,0x25A0,0x25B3,0x25B2,0x25BD,0x25BC,0x2192,0x2190,/* 0xE0-0xE7 */
+	0x2191,0x2193,0x2194,0x3013,0x226A,0x226B,0x221A,0x223D,/* 0xE8-0xEF */
+	0x221D,0x2235,0x222B,0x222C,0x2208,0x220B,0x2286,0x2287,/* 0xF0-0xF7 */
+	0x2282,0x2283,0x222A,0x2229,0x2227,0x2228,0xFFE2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC910,0xC912,0xC913,0xC914,0xC915,0xC916,0xC917,/* 0x40-0x47 */
+	0xC919,0xC91A,0xC91B,0xC91C,0xC91D,0xC91E,0xC91F,0xC920,/* 0x48-0x4F */
+	0xC921,0xC922,0xC923,0xC924,0xC925,0xC926,0xC927,0xC928,/* 0x50-0x57 */
+	0xC929,0xC92A,0xC92B,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC92D,0xC92E,0xC92F,0xC930,0xC931,0xC932,0xC933,/* 0x60-0x67 */
+	0xC935,0xC936,0xC937,0xC938,0xC939,0xC93A,0xC93B,0xC93C,/* 0x68-0x6F */
+	0xC93D,0xC93E,0xC93F,0xC940,0xC941,0xC942,0xC943,0xC944,/* 0x70-0x77 */
+	0xC945,0xC946,0xC947,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC948,0xC949,0xC94A,0xC94B,0xC94C,0xC94D,0xC94E,/* 0x80-0x87 */
+	0xC94F,0xC952,0xC953,0xC955,0xC956,0xC957,0xC959,0xC95A,/* 0x88-0x8F */
+	0xC95B,0xC95C,0xC95D,0xC95E,0xC95F,0xC962,0xC964,0xC965,/* 0x90-0x97 */
+	0xC966,0xC967,0xC968,0xC969,0xC96A,0xC96B,0xC96D,0xC96E,/* 0x98-0x9F */
+	0xC96F,0x21D2,0x21D4,0x2200,0x2203,0x00B4,0xFF5E,0x02C7,/* 0xA0-0xA7 */
+	0x02D8,0x02DD,0x02DA,0x02D9,0x00B8,0x02DB,0x00A1,0x00BF,/* 0xA8-0xAF */
+	0x02D0,0x222E,0x2211,0x220F,0x00A4,0x2109,0x2030,0x25C1,/* 0xB0-0xB7 */
+	0x25C0,0x25B7,0x25B6,0x2664,0x2660,0x2661,0x2665,0x2667,/* 0xB8-0xBF */
+	0x2663,0x2299,0x25C8,0x25A3,0x25D0,0x25D1,0x2592,0x25A4,/* 0xC0-0xC7 */
+	0x25A5,0x25A8,0x25A7,0x25A6,0x25A9,0x2668,0x260F,0x260E,/* 0xC8-0xCF */
+	0x261C,0x261E,0x00B6,0x2020,0x2021,0x2195,0x2197,0x2199,/* 0xD0-0xD7 */
+	0x2196,0x2198,0x266D,0x2669,0x266A,0x266C,0x327F,0x321C,/* 0xD8-0xDF */
+	0x2116,0x33C7,0x2122,0x33C2,0x33D8,0x2121,0x20AC,0x00AE,/* 0xE0-0xE7 */
+};
+
+static wchar_t c2u_A3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC971,0xC972,0xC973,0xC975,0xC976,0xC977,0xC978,/* 0x40-0x47 */
+	0xC979,0xC97A,0xC97B,0xC97D,0xC97E,0xC97F,0xC980,0xC981,/* 0x48-0x4F */
+	0xC982,0xC983,0xC984,0xC985,0xC986,0xC987,0xC98A,0xC98B,/* 0x50-0x57 */
+	0xC98D,0xC98E,0xC98F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xC991,0xC992,0xC993,0xC994,0xC995,0xC996,0xC997,/* 0x60-0x67 */
+	0xC99A,0xC99C,0xC99E,0xC99F,0xC9A0,0xC9A1,0xC9A2,0xC9A3,/* 0x68-0x6F */
+	0xC9A4,0xC9A5,0xC9A6,0xC9A7,0xC9A8,0xC9A9,0xC9AA,0xC9AB,/* 0x70-0x77 */
+	0xC9AC,0xC9AD,0xC9AE,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xC9AF,0xC9B0,0xC9B1,0xC9B2,0xC9B3,0xC9B4,0xC9B5,/* 0x80-0x87 */
+	0xC9B6,0xC9B7,0xC9B8,0xC9B9,0xC9BA,0xC9BB,0xC9BC,0xC9BD,/* 0x88-0x8F */
+	0xC9BE,0xC9BF,0xC9C2,0xC9C3,0xC9C5,0xC9C6,0xC9C9,0xC9CB,/* 0x90-0x97 */
+	0xC9CC,0xC9CD,0xC9CE,0xC9CF,0xC9D2,0xC9D4,0xC9D7,0xC9D8,/* 0x98-0x9F */
+	0xC9DB,0xFF01,0xFF02,0xFF03,0xFF04,0xFF05,0xFF06,0xFF07,/* 0xA0-0xA7 */
+	0xFF08,0xFF09,0xFF0A,0xFF0B,0xFF0C,0xFF0D,0xFF0E,0xFF0F,/* 0xA8-0xAF */
+	0xFF10,0xFF11,0xFF12,0xFF13,0xFF14,0xFF15,0xFF16,0xFF17,/* 0xB0-0xB7 */
+	0xFF18,0xFF19,0xFF1A,0xFF1B,0xFF1C,0xFF1D,0xFF1E,0xFF1F,/* 0xB8-0xBF */
+	0xFF20,0xFF21,0xFF22,0xFF23,0xFF24,0xFF25,0xFF26,0xFF27,/* 0xC0-0xC7 */
+	0xFF28,0xFF29,0xFF2A,0xFF2B,0xFF2C,0xFF2D,0xFF2E,0xFF2F,/* 0xC8-0xCF */
+	0xFF30,0xFF31,0xFF32,0xFF33,0xFF34,0xFF35,0xFF36,0xFF37,/* 0xD0-0xD7 */
+	0xFF38,0xFF39,0xFF3A,0xFF3B,0xFFE6,0xFF3D,0xFF3E,0xFF3F,/* 0xD8-0xDF */
+	0xFF40,0xFF41,0xFF42,0xFF43,0xFF44,0xFF45,0xFF46,0xFF47,/* 0xE0-0xE7 */
+	0xFF48,0xFF49,0xFF4A,0xFF4B,0xFF4C,0xFF4D,0xFF4E,0xFF4F,/* 0xE8-0xEF */
+	0xFF50,0xFF51,0xFF52,0xFF53,0xFF54,0xFF55,0xFF56,0xFF57,/* 0xF0-0xF7 */
+	0xFF58,0xFF59,0xFF5A,0xFF5B,0xFF5C,0xFF5D,0xFFE3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xC9DE,0xC9DF,0xC9E1,0xC9E3,0xC9E5,0xC9E6,0xC9E8,/* 0x40-0x47 */
+	0xC9E9,0xC9EA,0xC9EB,0xC9EE,0xC9F2,0xC9F3,0xC9F4,0xC9F5,/* 0x48-0x4F */
+	0xC9F6,0xC9F7,0xC9FA,0xC9FB,0xC9FD,0xC9FE,0xC9FF,0xCA01,/* 0x50-0x57 */
+	0xCA02,0xCA03,0xCA04,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCA05,0xCA06,0xCA07,0xCA0A,0xCA0E,0xCA0F,0xCA10,/* 0x60-0x67 */
+	0xCA11,0xCA12,0xCA13,0xCA15,0xCA16,0xCA17,0xCA19,0xCA1A,/* 0x68-0x6F */
+	0xCA1B,0xCA1C,0xCA1D,0xCA1E,0xCA1F,0xCA20,0xCA21,0xCA22,/* 0x70-0x77 */
+	0xCA23,0xCA24,0xCA25,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCA26,0xCA27,0xCA28,0xCA2A,0xCA2B,0xCA2C,0xCA2D,/* 0x80-0x87 */
+	0xCA2E,0xCA2F,0xCA30,0xCA31,0xCA32,0xCA33,0xCA34,0xCA35,/* 0x88-0x8F */
+	0xCA36,0xCA37,0xCA38,0xCA39,0xCA3A,0xCA3B,0xCA3C,0xCA3D,/* 0x90-0x97 */
+	0xCA3E,0xCA3F,0xCA40,0xCA41,0xCA42,0xCA43,0xCA44,0xCA45,/* 0x98-0x9F */
+	0xCA46,0x3131,0x3132,0x3133,0x3134,0x3135,0x3136,0x3137,/* 0xA0-0xA7 */
+	0x3138,0x3139,0x313A,0x313B,0x313C,0x313D,0x313E,0x313F,/* 0xA8-0xAF */
+	0x3140,0x3141,0x3142,0x3143,0x3144,0x3145,0x3146,0x3147,/* 0xB0-0xB7 */
+	0x3148,0x3149,0x314A,0x314B,0x314C,0x314D,0x314E,0x314F,/* 0xB8-0xBF */
+	0x3150,0x3151,0x3152,0x3153,0x3154,0x3155,0x3156,0x3157,/* 0xC0-0xC7 */
+	0x3158,0x3159,0x315A,0x315B,0x315C,0x315D,0x315E,0x315F,/* 0xC8-0xCF */
+	0x3160,0x3161,0x3162,0x3163,0x3164,0x3165,0x3166,0x3167,/* 0xD0-0xD7 */
+	0x3168,0x3169,0x316A,0x316B,0x316C,0x316D,0x316E,0x316F,/* 0xD8-0xDF */
+	0x3170,0x3171,0x3172,0x3173,0x3174,0x3175,0x3176,0x3177,/* 0xE0-0xE7 */
+	0x3178,0x3179,0x317A,0x317B,0x317C,0x317D,0x317E,0x317F,/* 0xE8-0xEF */
+	0x3180,0x3181,0x3182,0x3183,0x3184,0x3185,0x3186,0x3187,/* 0xF0-0xF7 */
+	0x3188,0x3189,0x318A,0x318B,0x318C,0x318D,0x318E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCA47,0xCA48,0xCA49,0xCA4A,0xCA4B,0xCA4E,0xCA4F,/* 0x40-0x47 */
+	0xCA51,0xCA52,0xCA53,0xCA55,0xCA56,0xCA57,0xCA58,0xCA59,/* 0x48-0x4F */
+	0xCA5A,0xCA5B,0xCA5E,0xCA62,0xCA63,0xCA64,0xCA65,0xCA66,/* 0x50-0x57 */
+	0xCA67,0xCA69,0xCA6A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCA6B,0xCA6C,0xCA6D,0xCA6E,0xCA6F,0xCA70,0xCA71,/* 0x60-0x67 */
+	0xCA72,0xCA73,0xCA74,0xCA75,0xCA76,0xCA77,0xCA78,0xCA79,/* 0x68-0x6F */
+	0xCA7A,0xCA7B,0xCA7C,0xCA7E,0xCA7F,0xCA80,0xCA81,0xCA82,/* 0x70-0x77 */
+	0xCA83,0xCA85,0xCA86,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCA87,0xCA88,0xCA89,0xCA8A,0xCA8B,0xCA8C,0xCA8D,/* 0x80-0x87 */
+	0xCA8E,0xCA8F,0xCA90,0xCA91,0xCA92,0xCA93,0xCA94,0xCA95,/* 0x88-0x8F */
+	0xCA96,0xCA97,0xCA99,0xCA9A,0xCA9B,0xCA9C,0xCA9D,0xCA9E,/* 0x90-0x97 */
+	0xCA9F,0xCAA0,0xCAA1,0xCAA2,0xCAA3,0xCAA4,0xCAA5,0xCAA6,/* 0x98-0x9F */
+	0xCAA7,0x2170,0x2171,0x2172,0x2173,0x2174,0x2175,0x2176,/* 0xA0-0xA7 */
+	0x2177,0x2178,0x2179,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA8-0xAF */
+	0x2160,0x2161,0x2162,0x2163,0x2164,0x2165,0x2166,0x2167,/* 0xB0-0xB7 */
+	0x2168,0x2169,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xB8-0xBF */
+	0x0000,0x0391,0x0392,0x0393,0x0394,0x0395,0x0396,0x0397,/* 0xC0-0xC7 */
+	0x0398,0x0399,0x039A,0x039B,0x039C,0x039D,0x039E,0x039F,/* 0xC8-0xCF */
+	0x03A0,0x03A1,0x03A3,0x03A4,0x03A5,0x03A6,0x03A7,0x03A8,/* 0xD0-0xD7 */
+	0x03A9,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xD8-0xDF */
+	0x0000,0x03B1,0x03B2,0x03B3,0x03B4,0x03B5,0x03B6,0x03B7,/* 0xE0-0xE7 */
+	0x03B8,0x03B9,0x03BA,0x03BB,0x03BC,0x03BD,0x03BE,0x03BF,/* 0xE8-0xEF */
+	0x03C0,0x03C1,0x03C3,0x03C4,0x03C5,0x03C6,0x03C7,0x03C8,/* 0xF0-0xF7 */
+	0x03C9,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCAA8,0xCAA9,0xCAAA,0xCAAB,0xCAAC,0xCAAD,0xCAAE,/* 0x40-0x47 */
+	0xCAAF,0xCAB0,0xCAB1,0xCAB2,0xCAB3,0xCAB4,0xCAB5,0xCAB6,/* 0x48-0x4F */
+	0xCAB7,0xCAB8,0xCAB9,0xCABA,0xCABB,0xCABE,0xCABF,0xCAC1,/* 0x50-0x57 */
+	0xCAC2,0xCAC3,0xCAC5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCAC6,0xCAC7,0xCAC8,0xCAC9,0xCACA,0xCACB,0xCACE,/* 0x60-0x67 */
+	0xCAD0,0xCAD2,0xCAD4,0xCAD5,0xCAD6,0xCAD7,0xCADA,0xCADB,/* 0x68-0x6F */
+	0xCADC,0xCADD,0xCADE,0xCADF,0xCAE1,0xCAE2,0xCAE3,0xCAE4,/* 0x70-0x77 */
+	0xCAE5,0xCAE6,0xCAE7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCAE8,0xCAE9,0xCAEA,0xCAEB,0xCAED,0xCAEE,0xCAEF,/* 0x80-0x87 */
+	0xCAF0,0xCAF1,0xCAF2,0xCAF3,0xCAF5,0xCAF6,0xCAF7,0xCAF8,/* 0x88-0x8F */
+	0xCAF9,0xCAFA,0xCAFB,0xCAFC,0xCAFD,0xCAFE,0xCAFF,0xCB00,/* 0x90-0x97 */
+	0xCB01,0xCB02,0xCB03,0xCB04,0xCB05,0xCB06,0xCB07,0xCB09,/* 0x98-0x9F */
+	0xCB0A,0x2500,0x2502,0x250C,0x2510,0x2518,0x2514,0x251C,/* 0xA0-0xA7 */
+	0x252C,0x2524,0x2534,0x253C,0x2501,0x2503,0x250F,0x2513,/* 0xA8-0xAF */
+	0x251B,0x2517,0x2523,0x2533,0x252B,0x253B,0x254B,0x2520,/* 0xB0-0xB7 */
+	0x252F,0x2528,0x2537,0x253F,0x251D,0x2530,0x2525,0x2538,/* 0xB8-0xBF */
+	0x2542,0x2512,0x2511,0x251A,0x2519,0x2516,0x2515,0x250E,/* 0xC0-0xC7 */
+	0x250D,0x251E,0x251F,0x2521,0x2522,0x2526,0x2527,0x2529,/* 0xC8-0xCF */
+	0x252A,0x252D,0x252E,0x2531,0x2532,0x2535,0x2536,0x2539,/* 0xD0-0xD7 */
+	0x253A,0x253D,0x253E,0x2540,0x2541,0x2543,0x2544,0x2545,/* 0xD8-0xDF */
+	0x2546,0x2547,0x2548,0x2549,0x254A,0x0000,0x0000,0x0000,/* 0xE0-0xE7 */
+};
+
+static wchar_t c2u_A7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCB0B,0xCB0C,0xCB0D,0xCB0E,0xCB0F,0xCB11,0xCB12,/* 0x40-0x47 */
+	0xCB13,0xCB15,0xCB16,0xCB17,0xCB19,0xCB1A,0xCB1B,0xCB1C,/* 0x48-0x4F */
+	0xCB1D,0xCB1E,0xCB1F,0xCB22,0xCB23,0xCB24,0xCB25,0xCB26,/* 0x50-0x57 */
+	0xCB27,0xCB28,0xCB29,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCB2A,0xCB2B,0xCB2C,0xCB2D,0xCB2E,0xCB2F,0xCB30,/* 0x60-0x67 */
+	0xCB31,0xCB32,0xCB33,0xCB34,0xCB35,0xCB36,0xCB37,0xCB38,/* 0x68-0x6F */
+	0xCB39,0xCB3A,0xCB3B,0xCB3C,0xCB3D,0xCB3E,0xCB3F,0xCB40,/* 0x70-0x77 */
+	0xCB42,0xCB43,0xCB44,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCB45,0xCB46,0xCB47,0xCB4A,0xCB4B,0xCB4D,0xCB4E,/* 0x80-0x87 */
+	0xCB4F,0xCB51,0xCB52,0xCB53,0xCB54,0xCB55,0xCB56,0xCB57,/* 0x88-0x8F */
+	0xCB5A,0xCB5B,0xCB5C,0xCB5E,0xCB5F,0xCB60,0xCB61,0xCB62,/* 0x90-0x97 */
+	0xCB63,0xCB65,0xCB66,0xCB67,0xCB68,0xCB69,0xCB6A,0xCB6B,/* 0x98-0x9F */
+	0xCB6C,0x3395,0x3396,0x3397,0x2113,0x3398,0x33C4,0x33A3,/* 0xA0-0xA7 */
+	0x33A4,0x33A5,0x33A6,0x3399,0x339A,0x339B,0x339C,0x339D,/* 0xA8-0xAF */
+	0x339E,0x339F,0x33A0,0x33A1,0x33A2,0x33CA,0x338D,0x338E,/* 0xB0-0xB7 */
+	0x338F,0x33CF,0x3388,0x3389,0x33C8,0x33A7,0x33A8,0x33B0,/* 0xB8-0xBF */
+	0x33B1,0x33B2,0x33B3,0x33B4,0x33B5,0x33B6,0x33B7,0x33B8,/* 0xC0-0xC7 */
+	0x33B9,0x3380,0x3381,0x3382,0x3383,0x3384,0x33BA,0x33BB,/* 0xC8-0xCF */
+	0x33BC,0x33BD,0x33BE,0x33BF,0x3390,0x3391,0x3392,0x3393,/* 0xD0-0xD7 */
+	0x3394,0x2126,0x33C0,0x33C1,0x338A,0x338B,0x338C,0x33D6,/* 0xD8-0xDF */
+	0x33C5,0x33AD,0x33AE,0x33AF,0x33DB,0x33A9,0x33AA,0x33AB,/* 0xE0-0xE7 */
+	0x33AC,0x33DD,0x33D0,0x33D3,0x33C3,0x33C9,0x33DC,0x33C6,/* 0xE8-0xEF */
+};
+
+static wchar_t c2u_A8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCB6D,0xCB6E,0xCB6F,0xCB70,0xCB71,0xCB72,0xCB73,/* 0x40-0x47 */
+	0xCB74,0xCB75,0xCB76,0xCB77,0xCB7A,0xCB7B,0xCB7C,0xCB7D,/* 0x48-0x4F */
+	0xCB7E,0xCB7F,0xCB80,0xCB81,0xCB82,0xCB83,0xCB84,0xCB85,/* 0x50-0x57 */
+	0xCB86,0xCB87,0xCB88,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCB89,0xCB8A,0xCB8B,0xCB8C,0xCB8D,0xCB8E,0xCB8F,/* 0x60-0x67 */
+	0xCB90,0xCB91,0xCB92,0xCB93,0xCB94,0xCB95,0xCB96,0xCB97,/* 0x68-0x6F */
+	0xCB98,0xCB99,0xCB9A,0xCB9B,0xCB9D,0xCB9E,0xCB9F,0xCBA0,/* 0x70-0x77 */
+	0xCBA1,0xCBA2,0xCBA3,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCBA4,0xCBA5,0xCBA6,0xCBA7,0xCBA8,0xCBA9,0xCBAA,/* 0x80-0x87 */
+	0xCBAB,0xCBAC,0xCBAD,0xCBAE,0xCBAF,0xCBB0,0xCBB1,0xCBB2,/* 0x88-0x8F */
+	0xCBB3,0xCBB4,0xCBB5,0xCBB6,0xCBB7,0xCBB9,0xCBBA,0xCBBB,/* 0x90-0x97 */
+	0xCBBC,0xCBBD,0xCBBE,0xCBBF,0xCBC0,0xCBC1,0xCBC2,0xCBC3,/* 0x98-0x9F */
+	0xCBC4,0x00C6,0x00D0,0x00AA,0x0126,0x0000,0x0132,0x0000,/* 0xA0-0xA7 */
+	0x013F,0x0141,0x00D8,0x0152,0x00BA,0x00DE,0x0166,0x014A,/* 0xA8-0xAF */
+	0x0000,0x3260,0x3261,0x3262,0x3263,0x3264,0x3265,0x3266,/* 0xB0-0xB7 */
+	0x3267,0x3268,0x3269,0x326A,0x326B,0x326C,0x326D,0x326E,/* 0xB8-0xBF */
+	0x326F,0x3270,0x3271,0x3272,0x3273,0x3274,0x3275,0x3276,/* 0xC0-0xC7 */
+	0x3277,0x3278,0x3279,0x327A,0x327B,0x24D0,0x24D1,0x24D2,/* 0xC8-0xCF */
+	0x24D3,0x24D4,0x24D5,0x24D6,0x24D7,0x24D8,0x24D9,0x24DA,/* 0xD0-0xD7 */
+	0x24DB,0x24DC,0x24DD,0x24DE,0x24DF,0x24E0,0x24E1,0x24E2,/* 0xD8-0xDF */
+	0x24E3,0x24E4,0x24E5,0x24E6,0x24E7,0x24E8,0x24E9,0x2460,/* 0xE0-0xE7 */
+	0x2461,0x2462,0x2463,0x2464,0x2465,0x2466,0x2467,0x2468,/* 0xE8-0xEF */
+	0x2469,0x246A,0x246B,0x246C,0x246D,0x246E,0x00BD,0x2153,/* 0xF0-0xF7 */
+	0x2154,0x00BC,0x00BE,0x215B,0x215C,0x215D,0x215E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCBC5,0xCBC6,0xCBC7,0xCBC8,0xCBC9,0xCBCA,0xCBCB,/* 0x40-0x47 */
+	0xCBCC,0xCBCD,0xCBCE,0xCBCF,0xCBD0,0xCBD1,0xCBD2,0xCBD3,/* 0x48-0x4F */
+	0xCBD5,0xCBD6,0xCBD7,0xCBD8,0xCBD9,0xCBDA,0xCBDB,0xCBDC,/* 0x50-0x57 */
+	0xCBDD,0xCBDE,0xCBDF,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCBE0,0xCBE1,0xCBE2,0xCBE3,0xCBE5,0xCBE6,0xCBE8,/* 0x60-0x67 */
+	0xCBEA,0xCBEB,0xCBEC,0xCBED,0xCBEE,0xCBEF,0xCBF0,0xCBF1,/* 0x68-0x6F */
+	0xCBF2,0xCBF3,0xCBF4,0xCBF5,0xCBF6,0xCBF7,0xCBF8,0xCBF9,/* 0x70-0x77 */
+	0xCBFA,0xCBFB,0xCBFC,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCBFD,0xCBFE,0xCBFF,0xCC00,0xCC01,0xCC02,0xCC03,/* 0x80-0x87 */
+	0xCC04,0xCC05,0xCC06,0xCC07,0xCC08,0xCC09,0xCC0A,0xCC0B,/* 0x88-0x8F */
+	0xCC0E,0xCC0F,0xCC11,0xCC12,0xCC13,0xCC15,0xCC16,0xCC17,/* 0x90-0x97 */
+	0xCC18,0xCC19,0xCC1A,0xCC1B,0xCC1E,0xCC1F,0xCC20,0xCC23,/* 0x98-0x9F */
+	0xCC24,0x00E6,0x0111,0x00F0,0x0127,0x0131,0x0133,0x0138,/* 0xA0-0xA7 */
+	0x0140,0x0142,0x00F8,0x0153,0x00DF,0x00FE,0x0167,0x014B,/* 0xA8-0xAF */
+	0x0149,0x3200,0x3201,0x3202,0x3203,0x3204,0x3205,0x3206,/* 0xB0-0xB7 */
+	0x3207,0x3208,0x3209,0x320A,0x320B,0x320C,0x320D,0x320E,/* 0xB8-0xBF */
+	0x320F,0x3210,0x3211,0x3212,0x3213,0x3214,0x3215,0x3216,/* 0xC0-0xC7 */
+	0x3217,0x3218,0x3219,0x321A,0x321B,0x249C,0x249D,0x249E,/* 0xC8-0xCF */
+	0x249F,0x24A0,0x24A1,0x24A2,0x24A3,0x24A4,0x24A5,0x24A6,/* 0xD0-0xD7 */
+	0x24A7,0x24A8,0x24A9,0x24AA,0x24AB,0x24AC,0x24AD,0x24AE,/* 0xD8-0xDF */
+	0x24AF,0x24B0,0x24B1,0x24B2,0x24B3,0x24B4,0x24B5,0x2474,/* 0xE0-0xE7 */
+	0x2475,0x2476,0x2477,0x2478,0x2479,0x247A,0x247B,0x247C,/* 0xE8-0xEF */
+	0x247D,0x247E,0x247F,0x2480,0x2481,0x2482,0x00B9,0x00B2,/* 0xF0-0xF7 */
+	0x00B3,0x2074,0x207F,0x2081,0x2082,0x2083,0x2084,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCC25,0xCC26,0xCC2A,0xCC2B,0xCC2D,0xCC2F,0xCC31,/* 0x40-0x47 */
+	0xCC32,0xCC33,0xCC34,0xCC35,0xCC36,0xCC37,0xCC3A,0xCC3F,/* 0x48-0x4F */
+	0xCC40,0xCC41,0xCC42,0xCC43,0xCC46,0xCC47,0xCC49,0xCC4A,/* 0x50-0x57 */
+	0xCC4B,0xCC4D,0xCC4E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCC4F,0xCC50,0xCC51,0xCC52,0xCC53,0xCC56,0xCC5A,/* 0x60-0x67 */
+	0xCC5B,0xCC5C,0xCC5D,0xCC5E,0xCC5F,0xCC61,0xCC62,0xCC63,/* 0x68-0x6F */
+	0xCC65,0xCC67,0xCC69,0xCC6A,0xCC6B,0xCC6C,0xCC6D,0xCC6E,/* 0x70-0x77 */
+	0xCC6F,0xCC71,0xCC72,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCC73,0xCC74,0xCC76,0xCC77,0xCC78,0xCC79,0xCC7A,/* 0x80-0x87 */
+	0xCC7B,0xCC7C,0xCC7D,0xCC7E,0xCC7F,0xCC80,0xCC81,0xCC82,/* 0x88-0x8F */
+	0xCC83,0xCC84,0xCC85,0xCC86,0xCC87,0xCC88,0xCC89,0xCC8A,/* 0x90-0x97 */
+	0xCC8B,0xCC8C,0xCC8D,0xCC8E,0xCC8F,0xCC90,0xCC91,0xCC92,/* 0x98-0x9F */
+	0xCC93,0x3041,0x3042,0x3043,0x3044,0x3045,0x3046,0x3047,/* 0xA0-0xA7 */
+	0x3048,0x3049,0x304A,0x304B,0x304C,0x304D,0x304E,0x304F,/* 0xA8-0xAF */
+	0x3050,0x3051,0x3052,0x3053,0x3054,0x3055,0x3056,0x3057,/* 0xB0-0xB7 */
+	0x3058,0x3059,0x305A,0x305B,0x305C,0x305D,0x305E,0x305F,/* 0xB8-0xBF */
+	0x3060,0x3061,0x3062,0x3063,0x3064,0x3065,0x3066,0x3067,/* 0xC0-0xC7 */
+	0x3068,0x3069,0x306A,0x306B,0x306C,0x306D,0x306E,0x306F,/* 0xC8-0xCF */
+	0x3070,0x3071,0x3072,0x3073,0x3074,0x3075,0x3076,0x3077,/* 0xD0-0xD7 */
+	0x3078,0x3079,0x307A,0x307B,0x307C,0x307D,0x307E,0x307F,/* 0xD8-0xDF */
+	0x3080,0x3081,0x3082,0x3083,0x3084,0x3085,0x3086,0x3087,/* 0xE0-0xE7 */
+	0x3088,0x3089,0x308A,0x308B,0x308C,0x308D,0x308E,0x308F,/* 0xE8-0xEF */
+	0x3090,0x3091,0x3092,0x3093,0x0000,0x0000,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_AB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCC94,0xCC95,0xCC96,0xCC97,0xCC9A,0xCC9B,0xCC9D,/* 0x40-0x47 */
+	0xCC9E,0xCC9F,0xCCA1,0xCCA2,0xCCA3,0xCCA4,0xCCA5,0xCCA6,/* 0x48-0x4F */
+	0xCCA7,0xCCAA,0xCCAE,0xCCAF,0xCCB0,0xCCB1,0xCCB2,0xCCB3,/* 0x50-0x57 */
+	0xCCB6,0xCCB7,0xCCB9,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCCBA,0xCCBB,0xCCBD,0xCCBE,0xCCBF,0xCCC0,0xCCC1,/* 0x60-0x67 */
+	0xCCC2,0xCCC3,0xCCC6,0xCCC8,0xCCCA,0xCCCB,0xCCCC,0xCCCD,/* 0x68-0x6F */
+	0xCCCE,0xCCCF,0xCCD1,0xCCD2,0xCCD3,0xCCD5,0xCCD6,0xCCD7,/* 0x70-0x77 */
+	0xCCD8,0xCCD9,0xCCDA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCCDB,0xCCDC,0xCCDD,0xCCDE,0xCCDF,0xCCE0,0xCCE1,/* 0x80-0x87 */
+	0xCCE2,0xCCE3,0xCCE5,0xCCE6,0xCCE7,0xCCE8,0xCCE9,0xCCEA,/* 0x88-0x8F */
+	0xCCEB,0xCCED,0xCCEE,0xCCEF,0xCCF1,0xCCF2,0xCCF3,0xCCF4,/* 0x90-0x97 */
+	0xCCF5,0xCCF6,0xCCF7,0xCCF8,0xCCF9,0xCCFA,0xCCFB,0xCCFC,/* 0x98-0x9F */
+	0xCCFD,0x30A1,0x30A2,0x30A3,0x30A4,0x30A5,0x30A6,0x30A7,/* 0xA0-0xA7 */
+	0x30A8,0x30A9,0x30AA,0x30AB,0x30AC,0x30AD,0x30AE,0x30AF,/* 0xA8-0xAF */
+	0x30B0,0x30B1,0x30B2,0x30B3,0x30B4,0x30B5,0x30B6,0x30B7,/* 0xB0-0xB7 */
+	0x30B8,0x30B9,0x30BA,0x30BB,0x30BC,0x30BD,0x30BE,0x30BF,/* 0xB8-0xBF */
+	0x30C0,0x30C1,0x30C2,0x30C3,0x30C4,0x30C5,0x30C6,0x30C7,/* 0xC0-0xC7 */
+	0x30C8,0x30C9,0x30CA,0x30CB,0x30CC,0x30CD,0x30CE,0x30CF,/* 0xC8-0xCF */
+	0x30D0,0x30D1,0x30D2,0x30D3,0x30D4,0x30D5,0x30D6,0x30D7,/* 0xD0-0xD7 */
+	0x30D8,0x30D9,0x30DA,0x30DB,0x30DC,0x30DD,0x30DE,0x30DF,/* 0xD8-0xDF */
+	0x30E0,0x30E1,0x30E2,0x30E3,0x30E4,0x30E5,0x30E6,0x30E7,/* 0xE0-0xE7 */
+	0x30E8,0x30E9,0x30EA,0x30EB,0x30EC,0x30ED,0x30EE,0x30EF,/* 0xE8-0xEF */
+	0x30F0,0x30F1,0x30F2,0x30F3,0x30F4,0x30F5,0x30F6,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_AC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCCFE,0xCCFF,0xCD00,0xCD02,0xCD03,0xCD04,0xCD05,/* 0x40-0x47 */
+	0xCD06,0xCD07,0xCD0A,0xCD0B,0xCD0D,0xCD0E,0xCD0F,0xCD11,/* 0x48-0x4F */
+	0xCD12,0xCD13,0xCD14,0xCD15,0xCD16,0xCD17,0xCD1A,0xCD1C,/* 0x50-0x57 */
+	0xCD1E,0xCD1F,0xCD20,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCD21,0xCD22,0xCD23,0xCD25,0xCD26,0xCD27,0xCD29,/* 0x60-0x67 */
+	0xCD2A,0xCD2B,0xCD2D,0xCD2E,0xCD2F,0xCD30,0xCD31,0xCD32,/* 0x68-0x6F */
+	0xCD33,0xCD34,0xCD35,0xCD36,0xCD37,0xCD38,0xCD3A,0xCD3B,/* 0x70-0x77 */
+	0xCD3C,0xCD3D,0xCD3E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCD3F,0xCD40,0xCD41,0xCD42,0xCD43,0xCD44,0xCD45,/* 0x80-0x87 */
+	0xCD46,0xCD47,0xCD48,0xCD49,0xCD4A,0xCD4B,0xCD4C,0xCD4D,/* 0x88-0x8F */
+	0xCD4E,0xCD4F,0xCD50,0xCD51,0xCD52,0xCD53,0xCD54,0xCD55,/* 0x90-0x97 */
+	0xCD56,0xCD57,0xCD58,0xCD59,0xCD5A,0xCD5B,0xCD5D,0xCD5E,/* 0x98-0x9F */
+	0xCD5F,0x0410,0x0411,0x0412,0x0413,0x0414,0x0415,0x0401,/* 0xA0-0xA7 */
+	0x0416,0x0417,0x0418,0x0419,0x041A,0x041B,0x041C,0x041D,/* 0xA8-0xAF */
+	0x041E,0x041F,0x0420,0x0421,0x0422,0x0423,0x0424,0x0425,/* 0xB0-0xB7 */
+	0x0426,0x0427,0x0428,0x0429,0x042A,0x042B,0x042C,0x042D,/* 0xB8-0xBF */
+	0x042E,0x042F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC0-0xC7 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC8-0xCF */
+	0x0000,0x0430,0x0431,0x0432,0x0433,0x0434,0x0435,0x0451,/* 0xD0-0xD7 */
+	0x0436,0x0437,0x0438,0x0439,0x043A,0x043B,0x043C,0x043D,/* 0xD8-0xDF */
+	0x043E,0x043F,0x0440,0x0441,0x0442,0x0443,0x0444,0x0445,/* 0xE0-0xE7 */
+	0x0446,0x0447,0x0448,0x0449,0x044A,0x044B,0x044C,0x044D,/* 0xE8-0xEF */
+	0x044E,0x044F,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xF0-0xF7 */
+};
+
+static wchar_t c2u_AD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCD61,0xCD62,0xCD63,0xCD65,0xCD66,0xCD67,0xCD68,/* 0x40-0x47 */
+	0xCD69,0xCD6A,0xCD6B,0xCD6E,0xCD70,0xCD72,0xCD73,0xCD74,/* 0x48-0x4F */
+	0xCD75,0xCD76,0xCD77,0xCD79,0xCD7A,0xCD7B,0xCD7C,0xCD7D,/* 0x50-0x57 */
+	0xCD7E,0xCD7F,0xCD80,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCD81,0xCD82,0xCD83,0xCD84,0xCD85,0xCD86,0xCD87,/* 0x60-0x67 */
+	0xCD89,0xCD8A,0xCD8B,0xCD8C,0xCD8D,0xCD8E,0xCD8F,0xCD90,/* 0x68-0x6F */
+	0xCD91,0xCD92,0xCD93,0xCD96,0xCD97,0xCD99,0xCD9A,0xCD9B,/* 0x70-0x77 */
+	0xCD9D,0xCD9E,0xCD9F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCDA0,0xCDA1,0xCDA2,0xCDA3,0xCDA6,0xCDA8,0xCDAA,/* 0x80-0x87 */
+	0xCDAB,0xCDAC,0xCDAD,0xCDAE,0xCDAF,0xCDB1,0xCDB2,0xCDB3,/* 0x88-0x8F */
+	0xCDB4,0xCDB5,0xCDB6,0xCDB7,0xCDB8,0xCDB9,0xCDBA,0xCDBB,/* 0x90-0x97 */
+	0xCDBC,0xCDBD,0xCDBE,0xCDBF,0xCDC0,0xCDC1,0xCDC2,0xCDC3,/* 0x98-0x9F */
+	0xCDC5,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCDC6,0xCDC7,0xCDC8,0xCDC9,0xCDCA,0xCDCB,0xCDCD,/* 0x40-0x47 */
+	0xCDCE,0xCDCF,0xCDD1,0xCDD2,0xCDD3,0xCDD4,0xCDD5,0xCDD6,/* 0x48-0x4F */
+	0xCDD7,0xCDD8,0xCDD9,0xCDDA,0xCDDB,0xCDDC,0xCDDD,0xCDDE,/* 0x50-0x57 */
+	0xCDDF,0xCDE0,0xCDE1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCDE2,0xCDE3,0xCDE4,0xCDE5,0xCDE6,0xCDE7,0xCDE9,/* 0x60-0x67 */
+	0xCDEA,0xCDEB,0xCDED,0xCDEE,0xCDEF,0xCDF1,0xCDF2,0xCDF3,/* 0x68-0x6F */
+	0xCDF4,0xCDF5,0xCDF6,0xCDF7,0xCDFA,0xCDFC,0xCDFE,0xCDFF,/* 0x70-0x77 */
+	0xCE00,0xCE01,0xCE02,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCE03,0xCE05,0xCE06,0xCE07,0xCE09,0xCE0A,0xCE0B,/* 0x80-0x87 */
+	0xCE0D,0xCE0E,0xCE0F,0xCE10,0xCE11,0xCE12,0xCE13,0xCE15,/* 0x88-0x8F */
+	0xCE16,0xCE17,0xCE18,0xCE1A,0xCE1B,0xCE1C,0xCE1D,0xCE1E,/* 0x90-0x97 */
+	0xCE1F,0xCE22,0xCE23,0xCE25,0xCE26,0xCE27,0xCE29,0xCE2A,/* 0x98-0x9F */
+	0xCE2B,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_AF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCE2C,0xCE2D,0xCE2E,0xCE2F,0xCE32,0xCE34,0xCE36,/* 0x40-0x47 */
+	0xCE37,0xCE38,0xCE39,0xCE3A,0xCE3B,0xCE3C,0xCE3D,0xCE3E,/* 0x48-0x4F */
+	0xCE3F,0xCE40,0xCE41,0xCE42,0xCE43,0xCE44,0xCE45,0xCE46,/* 0x50-0x57 */
+	0xCE47,0xCE48,0xCE49,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCE4A,0xCE4B,0xCE4C,0xCE4D,0xCE4E,0xCE4F,0xCE50,/* 0x60-0x67 */
+	0xCE51,0xCE52,0xCE53,0xCE54,0xCE55,0xCE56,0xCE57,0xCE5A,/* 0x68-0x6F */
+	0xCE5B,0xCE5D,0xCE5E,0xCE62,0xCE63,0xCE64,0xCE65,0xCE66,/* 0x70-0x77 */
+	0xCE67,0xCE6A,0xCE6C,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCE6E,0xCE6F,0xCE70,0xCE71,0xCE72,0xCE73,0xCE76,/* 0x80-0x87 */
+	0xCE77,0xCE79,0xCE7A,0xCE7B,0xCE7D,0xCE7E,0xCE7F,0xCE80,/* 0x88-0x8F */
+	0xCE81,0xCE82,0xCE83,0xCE86,0xCE88,0xCE8A,0xCE8B,0xCE8C,/* 0x90-0x97 */
+	0xCE8D,0xCE8E,0xCE8F,0xCE92,0xCE93,0xCE95,0xCE96,0xCE97,/* 0x98-0x9F */
+	0xCE99,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xA0-0xA7 */
+};
+
+static wchar_t c2u_B0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCE9A,0xCE9B,0xCE9C,0xCE9D,0xCE9E,0xCE9F,0xCEA2,/* 0x40-0x47 */
+	0xCEA6,0xCEA7,0xCEA8,0xCEA9,0xCEAA,0xCEAB,0xCEAE,0xCEAF,/* 0x48-0x4F */
+	0xCEB0,0xCEB1,0xCEB2,0xCEB3,0xCEB4,0xCEB5,0xCEB6,0xCEB7,/* 0x50-0x57 */
+	0xCEB8,0xCEB9,0xCEBA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCEBB,0xCEBC,0xCEBD,0xCEBE,0xCEBF,0xCEC0,0xCEC2,/* 0x60-0x67 */
+	0xCEC3,0xCEC4,0xCEC5,0xCEC6,0xCEC7,0xCEC8,0xCEC9,0xCECA,/* 0x68-0x6F */
+	0xCECB,0xCECC,0xCECD,0xCECE,0xCECF,0xCED0,0xCED1,0xCED2,/* 0x70-0x77 */
+	0xCED3,0xCED4,0xCED5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCED6,0xCED7,0xCED8,0xCED9,0xCEDA,0xCEDB,0xCEDC,/* 0x80-0x87 */
+	0xCEDD,0xCEDE,0xCEDF,0xCEE0,0xCEE1,0xCEE2,0xCEE3,0xCEE6,/* 0x88-0x8F */
+	0xCEE7,0xCEE9,0xCEEA,0xCEED,0xCEEE,0xCEEF,0xCEF0,0xCEF1,/* 0x90-0x97 */
+	0xCEF2,0xCEF3,0xCEF6,0xCEFA,0xCEFB,0xCEFC,0xCEFD,0xCEFE,/* 0x98-0x9F */
+	0xCEFF,0xAC00,0xAC01,0xAC04,0xAC07,0xAC08,0xAC09,0xAC0A,/* 0xA0-0xA7 */
+	0xAC10,0xAC11,0xAC12,0xAC13,0xAC14,0xAC15,0xAC16,0xAC17,/* 0xA8-0xAF */
+	0xAC19,0xAC1A,0xAC1B,0xAC1C,0xAC1D,0xAC20,0xAC24,0xAC2C,/* 0xB0-0xB7 */
+	0xAC2D,0xAC2F,0xAC30,0xAC31,0xAC38,0xAC39,0xAC3C,0xAC40,/* 0xB8-0xBF */
+	0xAC4B,0xAC4D,0xAC54,0xAC58,0xAC5C,0xAC70,0xAC71,0xAC74,/* 0xC0-0xC7 */
+	0xAC77,0xAC78,0xAC7A,0xAC80,0xAC81,0xAC83,0xAC84,0xAC85,/* 0xC8-0xCF */
+	0xAC86,0xAC89,0xAC8A,0xAC8B,0xAC8C,0xAC90,0xAC94,0xAC9C,/* 0xD0-0xD7 */
+	0xAC9D,0xAC9F,0xACA0,0xACA1,0xACA8,0xACA9,0xACAA,0xACAC,/* 0xD8-0xDF */
+	0xACAF,0xACB0,0xACB8,0xACB9,0xACBB,0xACBC,0xACBD,0xACC1,/* 0xE0-0xE7 */
+	0xACC4,0xACC8,0xACCC,0xACD5,0xACD7,0xACE0,0xACE1,0xACE4,/* 0xE8-0xEF */
+	0xACE7,0xACE8,0xACEA,0xACEC,0xACEF,0xACF0,0xACF1,0xACF3,/* 0xF0-0xF7 */
+	0xACF5,0xACF6,0xACFC,0xACFD,0xAD00,0xAD04,0xAD06,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCF02,0xCF03,0xCF05,0xCF06,0xCF07,0xCF09,0xCF0A,/* 0x40-0x47 */
+	0xCF0B,0xCF0C,0xCF0D,0xCF0E,0xCF0F,0xCF12,0xCF14,0xCF16,/* 0x48-0x4F */
+	0xCF17,0xCF18,0xCF19,0xCF1A,0xCF1B,0xCF1D,0xCF1E,0xCF1F,/* 0x50-0x57 */
+	0xCF21,0xCF22,0xCF23,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCF25,0xCF26,0xCF27,0xCF28,0xCF29,0xCF2A,0xCF2B,/* 0x60-0x67 */
+	0xCF2E,0xCF32,0xCF33,0xCF34,0xCF35,0xCF36,0xCF37,0xCF39,/* 0x68-0x6F */
+	0xCF3A,0xCF3B,0xCF3C,0xCF3D,0xCF3E,0xCF3F,0xCF40,0xCF41,/* 0x70-0x77 */
+	0xCF42,0xCF43,0xCF44,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCF45,0xCF46,0xCF47,0xCF48,0xCF49,0xCF4A,0xCF4B,/* 0x80-0x87 */
+	0xCF4C,0xCF4D,0xCF4E,0xCF4F,0xCF50,0xCF51,0xCF52,0xCF53,/* 0x88-0x8F */
+	0xCF56,0xCF57,0xCF59,0xCF5A,0xCF5B,0xCF5D,0xCF5E,0xCF5F,/* 0x90-0x97 */
+	0xCF60,0xCF61,0xCF62,0xCF63,0xCF66,0xCF68,0xCF6A,0xCF6B,/* 0x98-0x9F */
+	0xCF6C,0xAD0C,0xAD0D,0xAD0F,0xAD11,0xAD18,0xAD1C,0xAD20,/* 0xA0-0xA7 */
+	0xAD29,0xAD2C,0xAD2D,0xAD34,0xAD35,0xAD38,0xAD3C,0xAD44,/* 0xA8-0xAF */
+	0xAD45,0xAD47,0xAD49,0xAD50,0xAD54,0xAD58,0xAD61,0xAD63,/* 0xB0-0xB7 */
+	0xAD6C,0xAD6D,0xAD70,0xAD73,0xAD74,0xAD75,0xAD76,0xAD7B,/* 0xB8-0xBF */
+	0xAD7C,0xAD7D,0xAD7F,0xAD81,0xAD82,0xAD88,0xAD89,0xAD8C,/* 0xC0-0xC7 */
+	0xAD90,0xAD9C,0xAD9D,0xADA4,0xADB7,0xADC0,0xADC1,0xADC4,/* 0xC8-0xCF */
+	0xADC8,0xADD0,0xADD1,0xADD3,0xADDC,0xADE0,0xADE4,0xADF8,/* 0xD0-0xD7 */
+	0xADF9,0xADFC,0xADFF,0xAE00,0xAE01,0xAE08,0xAE09,0xAE0B,/* 0xD8-0xDF */
+	0xAE0D,0xAE14,0xAE30,0xAE31,0xAE34,0xAE37,0xAE38,0xAE3A,/* 0xE0-0xE7 */
+	0xAE40,0xAE41,0xAE43,0xAE45,0xAE46,0xAE4A,0xAE4C,0xAE4D,/* 0xE8-0xEF */
+	0xAE4E,0xAE50,0xAE54,0xAE56,0xAE5C,0xAE5D,0xAE5F,0xAE60,/* 0xF0-0xF7 */
+	0xAE61,0xAE65,0xAE68,0xAE69,0xAE6C,0xAE70,0xAE78,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCF6D,0xCF6E,0xCF6F,0xCF72,0xCF73,0xCF75,0xCF76,/* 0x40-0x47 */
+	0xCF77,0xCF79,0xCF7A,0xCF7B,0xCF7C,0xCF7D,0xCF7E,0xCF7F,/* 0x48-0x4F */
+	0xCF81,0xCF82,0xCF83,0xCF84,0xCF86,0xCF87,0xCF88,0xCF89,/* 0x50-0x57 */
+	0xCF8A,0xCF8B,0xCF8D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCF8E,0xCF8F,0xCF90,0xCF91,0xCF92,0xCF93,0xCF94,/* 0x60-0x67 */
+	0xCF95,0xCF96,0xCF97,0xCF98,0xCF99,0xCF9A,0xCF9B,0xCF9C,/* 0x68-0x6F */
+	0xCF9D,0xCF9E,0xCF9F,0xCFA0,0xCFA2,0xCFA3,0xCFA4,0xCFA5,/* 0x70-0x77 */
+	0xCFA6,0xCFA7,0xCFA9,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xCFAA,0xCFAB,0xCFAC,0xCFAD,0xCFAE,0xCFAF,0xCFB1,/* 0x80-0x87 */
+	0xCFB2,0xCFB3,0xCFB4,0xCFB5,0xCFB6,0xCFB7,0xCFB8,0xCFB9,/* 0x88-0x8F */
+	0xCFBA,0xCFBB,0xCFBC,0xCFBD,0xCFBE,0xCFBF,0xCFC0,0xCFC1,/* 0x90-0x97 */
+	0xCFC2,0xCFC3,0xCFC5,0xCFC6,0xCFC7,0xCFC8,0xCFC9,0xCFCA,/* 0x98-0x9F */
+	0xCFCB,0xAE79,0xAE7B,0xAE7C,0xAE7D,0xAE84,0xAE85,0xAE8C,/* 0xA0-0xA7 */
+	0xAEBC,0xAEBD,0xAEBE,0xAEC0,0xAEC4,0xAECC,0xAECD,0xAECF,/* 0xA8-0xAF */
+	0xAED0,0xAED1,0xAED8,0xAED9,0xAEDC,0xAEE8,0xAEEB,0xAEED,/* 0xB0-0xB7 */
+	0xAEF4,0xAEF8,0xAEFC,0xAF07,0xAF08,0xAF0D,0xAF10,0xAF2C,/* 0xB8-0xBF */
+	0xAF2D,0xAF30,0xAF32,0xAF34,0xAF3C,0xAF3D,0xAF3F,0xAF41,/* 0xC0-0xC7 */
+	0xAF42,0xAF43,0xAF48,0xAF49,0xAF50,0xAF5C,0xAF5D,0xAF64,/* 0xC8-0xCF */
+	0xAF65,0xAF79,0xAF80,0xAF84,0xAF88,0xAF90,0xAF91,0xAF95,/* 0xD0-0xD7 */
+	0xAF9C,0xAFB8,0xAFB9,0xAFBC,0xAFC0,0xAFC7,0xAFC8,0xAFC9,/* 0xD8-0xDF */
+	0xAFCB,0xAFCD,0xAFCE,0xAFD4,0xAFDC,0xAFE8,0xAFE9,0xAFF0,/* 0xE0-0xE7 */
+	0xAFF1,0xAFF4,0xAFF8,0xB000,0xB001,0xB004,0xB00C,0xB010,/* 0xE8-0xEF */
+	0xB014,0xB01C,0xB01D,0xB028,0xB044,0xB045,0xB048,0xB04A,/* 0xF0-0xF7 */
+	0xB04C,0xB04E,0xB053,0xB054,0xB055,0xB057,0xB059,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xCFCC,0xCFCD,0xCFCE,0xCFCF,0xCFD0,0xCFD1,0xCFD2,/* 0x40-0x47 */
+	0xCFD3,0xCFD4,0xCFD5,0xCFD6,0xCFD7,0xCFD8,0xCFD9,0xCFDA,/* 0x48-0x4F */
+	0xCFDB,0xCFDC,0xCFDD,0xCFDE,0xCFDF,0xCFE2,0xCFE3,0xCFE5,/* 0x50-0x57 */
+	0xCFE6,0xCFE7,0xCFE9,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xCFEA,0xCFEB,0xCFEC,0xCFED,0xCFEE,0xCFEF,0xCFF2,/* 0x60-0x67 */
+	0xCFF4,0xCFF6,0xCFF7,0xCFF8,0xCFF9,0xCFFA,0xCFFB,0xCFFD,/* 0x68-0x6F */
+	0xCFFE,0xCFFF,0xD001,0xD002,0xD003,0xD005,0xD006,0xD007,/* 0x70-0x77 */
+	0xD008,0xD009,0xD00A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD00B,0xD00C,0xD00D,0xD00E,0xD00F,0xD010,0xD012,/* 0x80-0x87 */
+	0xD013,0xD014,0xD015,0xD016,0xD017,0xD019,0xD01A,0xD01B,/* 0x88-0x8F */
+	0xD01C,0xD01D,0xD01E,0xD01F,0xD020,0xD021,0xD022,0xD023,/* 0x90-0x97 */
+	0xD024,0xD025,0xD026,0xD027,0xD028,0xD029,0xD02A,0xD02B,/* 0x98-0x9F */
+	0xD02C,0xB05D,0xB07C,0xB07D,0xB080,0xB084,0xB08C,0xB08D,/* 0xA0-0xA7 */
+	0xB08F,0xB091,0xB098,0xB099,0xB09A,0xB09C,0xB09F,0xB0A0,/* 0xA8-0xAF */
+	0xB0A1,0xB0A2,0xB0A8,0xB0A9,0xB0AB,0xB0AC,0xB0AD,0xB0AE,/* 0xB0-0xB7 */
+	0xB0AF,0xB0B1,0xB0B3,0xB0B4,0xB0B5,0xB0B8,0xB0BC,0xB0C4,/* 0xB8-0xBF */
+	0xB0C5,0xB0C7,0xB0C8,0xB0C9,0xB0D0,0xB0D1,0xB0D4,0xB0D8,/* 0xC0-0xC7 */
+	0xB0E0,0xB0E5,0xB108,0xB109,0xB10B,0xB10C,0xB110,0xB112,/* 0xC8-0xCF */
+	0xB113,0xB118,0xB119,0xB11B,0xB11C,0xB11D,0xB123,0xB124,/* 0xD0-0xD7 */
+	0xB125,0xB128,0xB12C,0xB134,0xB135,0xB137,0xB138,0xB139,/* 0xD8-0xDF */
+	0xB140,0xB141,0xB144,0xB148,0xB150,0xB151,0xB154,0xB155,/* 0xE0-0xE7 */
+	0xB158,0xB15C,0xB160,0xB178,0xB179,0xB17C,0xB180,0xB182,/* 0xE8-0xEF */
+	0xB188,0xB189,0xB18B,0xB18D,0xB192,0xB193,0xB194,0xB198,/* 0xF0-0xF7 */
+	0xB19C,0xB1A8,0xB1CC,0xB1D0,0xB1D4,0xB1DC,0xB1DD,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD02E,0xD02F,0xD030,0xD031,0xD032,0xD033,0xD036,/* 0x40-0x47 */
+	0xD037,0xD039,0xD03A,0xD03B,0xD03D,0xD03E,0xD03F,0xD040,/* 0x48-0x4F */
+	0xD041,0xD042,0xD043,0xD046,0xD048,0xD04A,0xD04B,0xD04C,/* 0x50-0x57 */
+	0xD04D,0xD04E,0xD04F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD051,0xD052,0xD053,0xD055,0xD056,0xD057,0xD059,/* 0x60-0x67 */
+	0xD05A,0xD05B,0xD05C,0xD05D,0xD05E,0xD05F,0xD061,0xD062,/* 0x68-0x6F */
+	0xD063,0xD064,0xD065,0xD066,0xD067,0xD068,0xD069,0xD06A,/* 0x70-0x77 */
+	0xD06B,0xD06E,0xD06F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD071,0xD072,0xD073,0xD075,0xD076,0xD077,0xD078,/* 0x80-0x87 */
+	0xD079,0xD07A,0xD07B,0xD07E,0xD07F,0xD080,0xD082,0xD083,/* 0x88-0x8F */
+	0xD084,0xD085,0xD086,0xD087,0xD088,0xD089,0xD08A,0xD08B,/* 0x90-0x97 */
+	0xD08C,0xD08D,0xD08E,0xD08F,0xD090,0xD091,0xD092,0xD093,/* 0x98-0x9F */
+	0xD094,0xB1DF,0xB1E8,0xB1E9,0xB1EC,0xB1F0,0xB1F9,0xB1FB,/* 0xA0-0xA7 */
+	0xB1FD,0xB204,0xB205,0xB208,0xB20B,0xB20C,0xB214,0xB215,/* 0xA8-0xAF */
+	0xB217,0xB219,0xB220,0xB234,0xB23C,0xB258,0xB25C,0xB260,/* 0xB0-0xB7 */
+	0xB268,0xB269,0xB274,0xB275,0xB27C,0xB284,0xB285,0xB289,/* 0xB8-0xBF */
+	0xB290,0xB291,0xB294,0xB298,0xB299,0xB29A,0xB2A0,0xB2A1,/* 0xC0-0xC7 */
+	0xB2A3,0xB2A5,0xB2A6,0xB2AA,0xB2AC,0xB2B0,0xB2B4,0xB2C8,/* 0xC8-0xCF */
+	0xB2C9,0xB2CC,0xB2D0,0xB2D2,0xB2D8,0xB2D9,0xB2DB,0xB2DD,/* 0xD0-0xD7 */
+	0xB2E2,0xB2E4,0xB2E5,0xB2E6,0xB2E8,0xB2EB,0xB2EC,0xB2ED,/* 0xD8-0xDF */
+	0xB2EE,0xB2EF,0xB2F3,0xB2F4,0xB2F5,0xB2F7,0xB2F8,0xB2F9,/* 0xE0-0xE7 */
+	0xB2FA,0xB2FB,0xB2FF,0xB300,0xB301,0xB304,0xB308,0xB310,/* 0xE8-0xEF */
+	0xB311,0xB313,0xB314,0xB315,0xB31C,0xB354,0xB355,0xB356,/* 0xF0-0xF7 */
+	0xB358,0xB35B,0xB35C,0xB35E,0xB35F,0xB364,0xB365,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD095,0xD096,0xD097,0xD098,0xD099,0xD09A,0xD09B,/* 0x40-0x47 */
+	0xD09C,0xD09D,0xD09E,0xD09F,0xD0A0,0xD0A1,0xD0A2,0xD0A3,/* 0x48-0x4F */
+	0xD0A6,0xD0A7,0xD0A9,0xD0AA,0xD0AB,0xD0AD,0xD0AE,0xD0AF,/* 0x50-0x57 */
+	0xD0B0,0xD0B1,0xD0B2,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD0B3,0xD0B6,0xD0B8,0xD0BA,0xD0BB,0xD0BC,0xD0BD,/* 0x60-0x67 */
+	0xD0BE,0xD0BF,0xD0C2,0xD0C3,0xD0C5,0xD0C6,0xD0C7,0xD0CA,/* 0x68-0x6F */
+	0xD0CB,0xD0CC,0xD0CD,0xD0CE,0xD0CF,0xD0D2,0xD0D6,0xD0D7,/* 0x70-0x77 */
+	0xD0D8,0xD0D9,0xD0DA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD0DB,0xD0DE,0xD0DF,0xD0E1,0xD0E2,0xD0E3,0xD0E5,/* 0x80-0x87 */
+	0xD0E6,0xD0E7,0xD0E8,0xD0E9,0xD0EA,0xD0EB,0xD0EE,0xD0F2,/* 0x88-0x8F */
+	0xD0F3,0xD0F4,0xD0F5,0xD0F6,0xD0F7,0xD0F9,0xD0FA,0xD0FB,/* 0x90-0x97 */
+	0xD0FC,0xD0FD,0xD0FE,0xD0FF,0xD100,0xD101,0xD102,0xD103,/* 0x98-0x9F */
+	0xD104,0xB367,0xB369,0xB36B,0xB36E,0xB370,0xB371,0xB374,/* 0xA0-0xA7 */
+	0xB378,0xB380,0xB381,0xB383,0xB384,0xB385,0xB38C,0xB390,/* 0xA8-0xAF */
+	0xB394,0xB3A0,0xB3A1,0xB3A8,0xB3AC,0xB3C4,0xB3C5,0xB3C8,/* 0xB0-0xB7 */
+	0xB3CB,0xB3CC,0xB3CE,0xB3D0,0xB3D4,0xB3D5,0xB3D7,0xB3D9,/* 0xB8-0xBF */
+	0xB3DB,0xB3DD,0xB3E0,0xB3E4,0xB3E8,0xB3FC,0xB410,0xB418,/* 0xC0-0xC7 */
+	0xB41C,0xB420,0xB428,0xB429,0xB42B,0xB434,0xB450,0xB451,/* 0xC8-0xCF */
+	0xB454,0xB458,0xB460,0xB461,0xB463,0xB465,0xB46C,0xB480,/* 0xD0-0xD7 */
+	0xB488,0xB49D,0xB4A4,0xB4A8,0xB4AC,0xB4B5,0xB4B7,0xB4B9,/* 0xD8-0xDF */
+	0xB4C0,0xB4C4,0xB4C8,0xB4D0,0xB4D5,0xB4DC,0xB4DD,0xB4E0,/* 0xE0-0xE7 */
+	0xB4E3,0xB4E4,0xB4E6,0xB4EC,0xB4ED,0xB4EF,0xB4F1,0xB4F8,/* 0xE8-0xEF */
+	0xB514,0xB515,0xB518,0xB51B,0xB51C,0xB524,0xB525,0xB527,/* 0xF0-0xF7 */
+	0xB528,0xB529,0xB52A,0xB530,0xB531,0xB534,0xB538,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD105,0xD106,0xD107,0xD108,0xD109,0xD10A,0xD10B,/* 0x40-0x47 */
+	0xD10C,0xD10E,0xD10F,0xD110,0xD111,0xD112,0xD113,0xD114,/* 0x48-0x4F */
+	0xD115,0xD116,0xD117,0xD118,0xD119,0xD11A,0xD11B,0xD11C,/* 0x50-0x57 */
+	0xD11D,0xD11E,0xD11F,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD120,0xD121,0xD122,0xD123,0xD124,0xD125,0xD126,/* 0x60-0x67 */
+	0xD127,0xD128,0xD129,0xD12A,0xD12B,0xD12C,0xD12D,0xD12E,/* 0x68-0x6F */
+	0xD12F,0xD132,0xD133,0xD135,0xD136,0xD137,0xD139,0xD13B,/* 0x70-0x77 */
+	0xD13C,0xD13D,0xD13E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD13F,0xD142,0xD146,0xD147,0xD148,0xD149,0xD14A,/* 0x80-0x87 */
+	0xD14B,0xD14E,0xD14F,0xD151,0xD152,0xD153,0xD155,0xD156,/* 0x88-0x8F */
+	0xD157,0xD158,0xD159,0xD15A,0xD15B,0xD15E,0xD160,0xD162,/* 0x90-0x97 */
+	0xD163,0xD164,0xD165,0xD166,0xD167,0xD169,0xD16A,0xD16B,/* 0x98-0x9F */
+	0xD16D,0xB540,0xB541,0xB543,0xB544,0xB545,0xB54B,0xB54C,/* 0xA0-0xA7 */
+	0xB54D,0xB550,0xB554,0xB55C,0xB55D,0xB55F,0xB560,0xB561,/* 0xA8-0xAF */
+	0xB5A0,0xB5A1,0xB5A4,0xB5A8,0xB5AA,0xB5AB,0xB5B0,0xB5B1,/* 0xB0-0xB7 */
+	0xB5B3,0xB5B4,0xB5B5,0xB5BB,0xB5BC,0xB5BD,0xB5C0,0xB5C4,/* 0xB8-0xBF */
+	0xB5CC,0xB5CD,0xB5CF,0xB5D0,0xB5D1,0xB5D8,0xB5EC,0xB610,/* 0xC0-0xC7 */
+	0xB611,0xB614,0xB618,0xB625,0xB62C,0xB634,0xB648,0xB664,/* 0xC8-0xCF */
+	0xB668,0xB69C,0xB69D,0xB6A0,0xB6A4,0xB6AB,0xB6AC,0xB6B1,/* 0xD0-0xD7 */
+	0xB6D4,0xB6F0,0xB6F4,0xB6F8,0xB700,0xB701,0xB705,0xB728,/* 0xD8-0xDF */
+	0xB729,0xB72C,0xB72F,0xB730,0xB738,0xB739,0xB73B,0xB744,/* 0xE0-0xE7 */
+	0xB748,0xB74C,0xB754,0xB755,0xB760,0xB764,0xB768,0xB770,/* 0xE8-0xEF */
+	0xB771,0xB773,0xB775,0xB77C,0xB77D,0xB780,0xB784,0xB78C,/* 0xF0-0xF7 */
+	0xB78D,0xB78F,0xB790,0xB791,0xB792,0xB796,0xB797,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD16E,0xD16F,0xD170,0xD171,0xD172,0xD173,0xD174,/* 0x40-0x47 */
+	0xD175,0xD176,0xD177,0xD178,0xD179,0xD17A,0xD17B,0xD17D,/* 0x48-0x4F */
+	0xD17E,0xD17F,0xD180,0xD181,0xD182,0xD183,0xD185,0xD186,/* 0x50-0x57 */
+	0xD187,0xD189,0xD18A,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD18B,0xD18C,0xD18D,0xD18E,0xD18F,0xD190,0xD191,/* 0x60-0x67 */
+	0xD192,0xD193,0xD194,0xD195,0xD196,0xD197,0xD198,0xD199,/* 0x68-0x6F */
+	0xD19A,0xD19B,0xD19C,0xD19D,0xD19E,0xD19F,0xD1A2,0xD1A3,/* 0x70-0x77 */
+	0xD1A5,0xD1A6,0xD1A7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD1A9,0xD1AA,0xD1AB,0xD1AC,0xD1AD,0xD1AE,0xD1AF,/* 0x80-0x87 */
+	0xD1B2,0xD1B4,0xD1B6,0xD1B7,0xD1B8,0xD1B9,0xD1BB,0xD1BD,/* 0x88-0x8F */
+	0xD1BE,0xD1BF,0xD1C1,0xD1C2,0xD1C3,0xD1C4,0xD1C5,0xD1C6,/* 0x90-0x97 */
+	0xD1C7,0xD1C8,0xD1C9,0xD1CA,0xD1CB,0xD1CC,0xD1CD,0xD1CE,/* 0x98-0x9F */
+	0xD1CF,0xB798,0xB799,0xB79C,0xB7A0,0xB7A8,0xB7A9,0xB7AB,/* 0xA0-0xA7 */
+	0xB7AC,0xB7AD,0xB7B4,0xB7B5,0xB7B8,0xB7C7,0xB7C9,0xB7EC,/* 0xA8-0xAF */
+	0xB7ED,0xB7F0,0xB7F4,0xB7FC,0xB7FD,0xB7FF,0xB800,0xB801,/* 0xB0-0xB7 */
+	0xB807,0xB808,0xB809,0xB80C,0xB810,0xB818,0xB819,0xB81B,/* 0xB8-0xBF */
+	0xB81D,0xB824,0xB825,0xB828,0xB82C,0xB834,0xB835,0xB837,/* 0xC0-0xC7 */
+	0xB838,0xB839,0xB840,0xB844,0xB851,0xB853,0xB85C,0xB85D,/* 0xC8-0xCF */
+	0xB860,0xB864,0xB86C,0xB86D,0xB86F,0xB871,0xB878,0xB87C,/* 0xD0-0xD7 */
+	0xB88D,0xB8A8,0xB8B0,0xB8B4,0xB8B8,0xB8C0,0xB8C1,0xB8C3,/* 0xD8-0xDF */
+	0xB8C5,0xB8CC,0xB8D0,0xB8D4,0xB8DD,0xB8DF,0xB8E1,0xB8E8,/* 0xE0-0xE7 */
+	0xB8E9,0xB8EC,0xB8F0,0xB8F8,0xB8F9,0xB8FB,0xB8FD,0xB904,/* 0xE8-0xEF */
+	0xB918,0xB920,0xB93C,0xB93D,0xB940,0xB944,0xB94C,0xB94F,/* 0xF0-0xF7 */
+	0xB951,0xB958,0xB959,0xB95C,0xB960,0xB968,0xB969,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD1D0,0xD1D1,0xD1D2,0xD1D3,0xD1D4,0xD1D5,0xD1D6,/* 0x40-0x47 */
+	0xD1D7,0xD1D9,0xD1DA,0xD1DB,0xD1DC,0xD1DD,0xD1DE,0xD1DF,/* 0x48-0x4F */
+	0xD1E0,0xD1E1,0xD1E2,0xD1E3,0xD1E4,0xD1E5,0xD1E6,0xD1E7,/* 0x50-0x57 */
+	0xD1E8,0xD1E9,0xD1EA,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD1EB,0xD1EC,0xD1ED,0xD1EE,0xD1EF,0xD1F0,0xD1F1,/* 0x60-0x67 */
+	0xD1F2,0xD1F3,0xD1F5,0xD1F6,0xD1F7,0xD1F9,0xD1FA,0xD1FB,/* 0x68-0x6F */
+	0xD1FC,0xD1FD,0xD1FE,0xD1FF,0xD200,0xD201,0xD202,0xD203,/* 0x70-0x77 */
+	0xD204,0xD205,0xD206,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD208,0xD20A,0xD20B,0xD20C,0xD20D,0xD20E,0xD20F,/* 0x80-0x87 */
+	0xD211,0xD212,0xD213,0xD214,0xD215,0xD216,0xD217,0xD218,/* 0x88-0x8F */
+	0xD219,0xD21A,0xD21B,0xD21C,0xD21D,0xD21E,0xD21F,0xD220,/* 0x90-0x97 */
+	0xD221,0xD222,0xD223,0xD224,0xD225,0xD226,0xD227,0xD228,/* 0x98-0x9F */
+	0xD229,0xB96B,0xB96D,0xB974,0xB975,0xB978,0xB97C,0xB984,/* 0xA0-0xA7 */
+	0xB985,0xB987,0xB989,0xB98A,0xB98D,0xB98E,0xB9AC,0xB9AD,/* 0xA8-0xAF */
+	0xB9B0,0xB9B4,0xB9BC,0xB9BD,0xB9BF,0xB9C1,0xB9C8,0xB9C9,/* 0xB0-0xB7 */
+	0xB9CC,0xB9CE,0xB9CF,0xB9D0,0xB9D1,0xB9D2,0xB9D8,0xB9D9,/* 0xB8-0xBF */
+	0xB9DB,0xB9DD,0xB9DE,0xB9E1,0xB9E3,0xB9E4,0xB9E5,0xB9E8,/* 0xC0-0xC7 */
+	0xB9EC,0xB9F4,0xB9F5,0xB9F7,0xB9F8,0xB9F9,0xB9FA,0xBA00,/* 0xC8-0xCF */
+	0xBA01,0xBA08,0xBA15,0xBA38,0xBA39,0xBA3C,0xBA40,0xBA42,/* 0xD0-0xD7 */
+	0xBA48,0xBA49,0xBA4B,0xBA4D,0xBA4E,0xBA53,0xBA54,0xBA55,/* 0xD8-0xDF */
+	0xBA58,0xBA5C,0xBA64,0xBA65,0xBA67,0xBA68,0xBA69,0xBA70,/* 0xE0-0xE7 */
+	0xBA71,0xBA74,0xBA78,0xBA83,0xBA84,0xBA85,0xBA87,0xBA8C,/* 0xE8-0xEF */
+	0xBAA8,0xBAA9,0xBAAB,0xBAAC,0xBAB0,0xBAB2,0xBAB8,0xBAB9,/* 0xF0-0xF7 */
+	0xBABB,0xBABD,0xBAC4,0xBAC8,0xBAD8,0xBAD9,0xBAFC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD22A,0xD22B,0xD22E,0xD22F,0xD231,0xD232,0xD233,/* 0x40-0x47 */
+	0xD235,0xD236,0xD237,0xD238,0xD239,0xD23A,0xD23B,0xD23E,/* 0x48-0x4F */
+	0xD240,0xD242,0xD243,0xD244,0xD245,0xD246,0xD247,0xD249,/* 0x50-0x57 */
+	0xD24A,0xD24B,0xD24C,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD24D,0xD24E,0xD24F,0xD250,0xD251,0xD252,0xD253,/* 0x60-0x67 */
+	0xD254,0xD255,0xD256,0xD257,0xD258,0xD259,0xD25A,0xD25B,/* 0x68-0x6F */
+	0xD25D,0xD25E,0xD25F,0xD260,0xD261,0xD262,0xD263,0xD265,/* 0x70-0x77 */
+	0xD266,0xD267,0xD268,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD269,0xD26A,0xD26B,0xD26C,0xD26D,0xD26E,0xD26F,/* 0x80-0x87 */
+	0xD270,0xD271,0xD272,0xD273,0xD274,0xD275,0xD276,0xD277,/* 0x88-0x8F */
+	0xD278,0xD279,0xD27A,0xD27B,0xD27C,0xD27D,0xD27E,0xD27F,/* 0x90-0x97 */
+	0xD282,0xD283,0xD285,0xD286,0xD287,0xD289,0xD28A,0xD28B,/* 0x98-0x9F */
+	0xD28C,0xBB00,0xBB04,0xBB0D,0xBB0F,0xBB11,0xBB18,0xBB1C,/* 0xA0-0xA7 */
+	0xBB20,0xBB29,0xBB2B,0xBB34,0xBB35,0xBB36,0xBB38,0xBB3B,/* 0xA8-0xAF */
+	0xBB3C,0xBB3D,0xBB3E,0xBB44,0xBB45,0xBB47,0xBB49,0xBB4D,/* 0xB0-0xB7 */
+	0xBB4F,0xBB50,0xBB54,0xBB58,0xBB61,0xBB63,0xBB6C,0xBB88,/* 0xB8-0xBF */
+	0xBB8C,0xBB90,0xBBA4,0xBBA8,0xBBAC,0xBBB4,0xBBB7,0xBBC0,/* 0xC0-0xC7 */
+	0xBBC4,0xBBC8,0xBBD0,0xBBD3,0xBBF8,0xBBF9,0xBBFC,0xBBFF,/* 0xC8-0xCF */
+	0xBC00,0xBC02,0xBC08,0xBC09,0xBC0B,0xBC0C,0xBC0D,0xBC0F,/* 0xD0-0xD7 */
+	0xBC11,0xBC14,0xBC15,0xBC16,0xBC17,0xBC18,0xBC1B,0xBC1C,/* 0xD8-0xDF */
+	0xBC1D,0xBC1E,0xBC1F,0xBC24,0xBC25,0xBC27,0xBC29,0xBC2D,/* 0xE0-0xE7 */
+	0xBC30,0xBC31,0xBC34,0xBC38,0xBC40,0xBC41,0xBC43,0xBC44,/* 0xE8-0xEF */
+	0xBC45,0xBC49,0xBC4C,0xBC4D,0xBC50,0xBC5D,0xBC84,0xBC85,/* 0xF0-0xF7 */
+	0xBC88,0xBC8B,0xBC8C,0xBC8E,0xBC94,0xBC95,0xBC97,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD28D,0xD28E,0xD28F,0xD292,0xD293,0xD294,0xD296,/* 0x40-0x47 */
+	0xD297,0xD298,0xD299,0xD29A,0xD29B,0xD29D,0xD29E,0xD29F,/* 0x48-0x4F */
+	0xD2A1,0xD2A2,0xD2A3,0xD2A5,0xD2A6,0xD2A7,0xD2A8,0xD2A9,/* 0x50-0x57 */
+	0xD2AA,0xD2AB,0xD2AD,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD2AE,0xD2AF,0xD2B0,0xD2B2,0xD2B3,0xD2B4,0xD2B5,/* 0x60-0x67 */
+	0xD2B6,0xD2B7,0xD2BA,0xD2BB,0xD2BD,0xD2BE,0xD2C1,0xD2C3,/* 0x68-0x6F */
+	0xD2C4,0xD2C5,0xD2C6,0xD2C7,0xD2CA,0xD2CC,0xD2CD,0xD2CE,/* 0x70-0x77 */
+	0xD2CF,0xD2D0,0xD2D1,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD2D2,0xD2D3,0xD2D5,0xD2D6,0xD2D7,0xD2D9,0xD2DA,/* 0x80-0x87 */
+	0xD2DB,0xD2DD,0xD2DE,0xD2DF,0xD2E0,0xD2E1,0xD2E2,0xD2E3,/* 0x88-0x8F */
+	0xD2E6,0xD2E7,0xD2E8,0xD2E9,0xD2EA,0xD2EB,0xD2EC,0xD2ED,/* 0x90-0x97 */
+	0xD2EE,0xD2EF,0xD2F2,0xD2F3,0xD2F5,0xD2F6,0xD2F7,0xD2F9,/* 0x98-0x9F */
+	0xD2FA,0xBC99,0xBC9A,0xBCA0,0xBCA1,0xBCA4,0xBCA7,0xBCA8,/* 0xA0-0xA7 */
+	0xBCB0,0xBCB1,0xBCB3,0xBCB4,0xBCB5,0xBCBC,0xBCBD,0xBCC0,/* 0xA8-0xAF */
+	0xBCC4,0xBCCD,0xBCCF,0xBCD0,0xBCD1,0xBCD5,0xBCD8,0xBCDC,/* 0xB0-0xB7 */
+	0xBCF4,0xBCF5,0xBCF6,0xBCF8,0xBCFC,0xBD04,0xBD05,0xBD07,/* 0xB8-0xBF */
+	0xBD09,0xBD10,0xBD14,0xBD24,0xBD2C,0xBD40,0xBD48,0xBD49,/* 0xC0-0xC7 */
+	0xBD4C,0xBD50,0xBD58,0xBD59,0xBD64,0xBD68,0xBD80,0xBD81,/* 0xC8-0xCF */
+	0xBD84,0xBD87,0xBD88,0xBD89,0xBD8A,0xBD90,0xBD91,0xBD93,/* 0xD0-0xD7 */
+	0xBD95,0xBD99,0xBD9A,0xBD9C,0xBDA4,0xBDB0,0xBDB8,0xBDD4,/* 0xD8-0xDF */
+	0xBDD5,0xBDD8,0xBDDC,0xBDE9,0xBDF0,0xBDF4,0xBDF8,0xBE00,/* 0xE0-0xE7 */
+	0xBE03,0xBE05,0xBE0C,0xBE0D,0xBE10,0xBE14,0xBE1C,0xBE1D,/* 0xE8-0xEF */
+	0xBE1F,0xBE44,0xBE45,0xBE48,0xBE4C,0xBE4E,0xBE54,0xBE55,/* 0xF0-0xF7 */
+	0xBE57,0xBE59,0xBE5A,0xBE5B,0xBE60,0xBE61,0xBE64,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD2FB,0xD2FC,0xD2FD,0xD2FE,0xD2FF,0xD302,0xD304,/* 0x40-0x47 */
+	0xD306,0xD307,0xD308,0xD309,0xD30A,0xD30B,0xD30F,0xD311,/* 0x48-0x4F */
+	0xD312,0xD313,0xD315,0xD317,0xD318,0xD319,0xD31A,0xD31B,/* 0x50-0x57 */
+	0xD31E,0xD322,0xD323,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD324,0xD326,0xD327,0xD32A,0xD32B,0xD32D,0xD32E,/* 0x60-0x67 */
+	0xD32F,0xD331,0xD332,0xD333,0xD334,0xD335,0xD336,0xD337,/* 0x68-0x6F */
+	0xD33A,0xD33E,0xD33F,0xD340,0xD341,0xD342,0xD343,0xD346,/* 0x70-0x77 */
+	0xD347,0xD348,0xD349,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD34A,0xD34B,0xD34C,0xD34D,0xD34E,0xD34F,0xD350,/* 0x80-0x87 */
+	0xD351,0xD352,0xD353,0xD354,0xD355,0xD356,0xD357,0xD358,/* 0x88-0x8F */
+	0xD359,0xD35A,0xD35B,0xD35C,0xD35D,0xD35E,0xD35F,0xD360,/* 0x90-0x97 */
+	0xD361,0xD362,0xD363,0xD364,0xD365,0xD366,0xD367,0xD368,/* 0x98-0x9F */
+	0xD369,0xBE68,0xBE6A,0xBE70,0xBE71,0xBE73,0xBE74,0xBE75,/* 0xA0-0xA7 */
+	0xBE7B,0xBE7C,0xBE7D,0xBE80,0xBE84,0xBE8C,0xBE8D,0xBE8F,/* 0xA8-0xAF */
+	0xBE90,0xBE91,0xBE98,0xBE99,0xBEA8,0xBED0,0xBED1,0xBED4,/* 0xB0-0xB7 */
+	0xBED7,0xBED8,0xBEE0,0xBEE3,0xBEE4,0xBEE5,0xBEEC,0xBF01,/* 0xB8-0xBF */
+	0xBF08,0xBF09,0xBF18,0xBF19,0xBF1B,0xBF1C,0xBF1D,0xBF40,/* 0xC0-0xC7 */
+	0xBF41,0xBF44,0xBF48,0xBF50,0xBF51,0xBF55,0xBF94,0xBFB0,/* 0xC8-0xCF */
+	0xBFC5,0xBFCC,0xBFCD,0xBFD0,0xBFD4,0xBFDC,0xBFDF,0xBFE1,/* 0xD0-0xD7 */
+	0xC03C,0xC051,0xC058,0xC05C,0xC060,0xC068,0xC069,0xC090,/* 0xD8-0xDF */
+	0xC091,0xC094,0xC098,0xC0A0,0xC0A1,0xC0A3,0xC0A5,0xC0AC,/* 0xE0-0xE7 */
+	0xC0AD,0xC0AF,0xC0B0,0xC0B3,0xC0B4,0xC0B5,0xC0B6,0xC0BC,/* 0xE8-0xEF */
+	0xC0BD,0xC0BF,0xC0C0,0xC0C1,0xC0C5,0xC0C8,0xC0C9,0xC0CC,/* 0xF0-0xF7 */
+	0xC0D0,0xC0D8,0xC0D9,0xC0DB,0xC0DC,0xC0DD,0xC0E4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD36A,0xD36B,0xD36C,0xD36D,0xD36E,0xD36F,0xD370,/* 0x40-0x47 */
+	0xD371,0xD372,0xD373,0xD374,0xD375,0xD376,0xD377,0xD378,/* 0x48-0x4F */
+	0xD379,0xD37A,0xD37B,0xD37E,0xD37F,0xD381,0xD382,0xD383,/* 0x50-0x57 */
+	0xD385,0xD386,0xD387,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD388,0xD389,0xD38A,0xD38B,0xD38E,0xD392,0xD393,/* 0x60-0x67 */
+	0xD394,0xD395,0xD396,0xD397,0xD39A,0xD39B,0xD39D,0xD39E,/* 0x68-0x6F */
+	0xD39F,0xD3A1,0xD3A2,0xD3A3,0xD3A4,0xD3A5,0xD3A6,0xD3A7,/* 0x70-0x77 */
+	0xD3AA,0xD3AC,0xD3AE,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD3AF,0xD3B0,0xD3B1,0xD3B2,0xD3B3,0xD3B5,0xD3B6,/* 0x80-0x87 */
+	0xD3B7,0xD3B9,0xD3BA,0xD3BB,0xD3BD,0xD3BE,0xD3BF,0xD3C0,/* 0x88-0x8F */
+	0xD3C1,0xD3C2,0xD3C3,0xD3C6,0xD3C7,0xD3CA,0xD3CB,0xD3CC,/* 0x90-0x97 */
+	0xD3CD,0xD3CE,0xD3CF,0xD3D1,0xD3D2,0xD3D3,0xD3D4,0xD3D5,/* 0x98-0x9F */
+	0xD3D6,0xC0E5,0xC0E8,0xC0EC,0xC0F4,0xC0F5,0xC0F7,0xC0F9,/* 0xA0-0xA7 */
+	0xC100,0xC104,0xC108,0xC110,0xC115,0xC11C,0xC11D,0xC11E,/* 0xA8-0xAF */
+	0xC11F,0xC120,0xC123,0xC124,0xC126,0xC127,0xC12C,0xC12D,/* 0xB0-0xB7 */
+	0xC12F,0xC130,0xC131,0xC136,0xC138,0xC139,0xC13C,0xC140,/* 0xB8-0xBF */
+	0xC148,0xC149,0xC14B,0xC14C,0xC14D,0xC154,0xC155,0xC158,/* 0xC0-0xC7 */
+	0xC15C,0xC164,0xC165,0xC167,0xC168,0xC169,0xC170,0xC174,/* 0xC8-0xCF */
+	0xC178,0xC185,0xC18C,0xC18D,0xC18E,0xC190,0xC194,0xC196,/* 0xD0-0xD7 */
+	0xC19C,0xC19D,0xC19F,0xC1A1,0xC1A5,0xC1A8,0xC1A9,0xC1AC,/* 0xD8-0xDF */
+	0xC1B0,0xC1BD,0xC1C4,0xC1C8,0xC1CC,0xC1D4,0xC1D7,0xC1D8,/* 0xE0-0xE7 */
+	0xC1E0,0xC1E4,0xC1E8,0xC1F0,0xC1F1,0xC1F3,0xC1FC,0xC1FD,/* 0xE8-0xEF */
+	0xC200,0xC204,0xC20C,0xC20D,0xC20F,0xC211,0xC218,0xC219,/* 0xF0-0xF7 */
+	0xC21C,0xC21F,0xC220,0xC228,0xC229,0xC22B,0xC22D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD3D7,0xD3D9,0xD3DA,0xD3DB,0xD3DC,0xD3DD,0xD3DE,/* 0x40-0x47 */
+	0xD3DF,0xD3E0,0xD3E2,0xD3E4,0xD3E5,0xD3E6,0xD3E7,0xD3E8,/* 0x48-0x4F */
+	0xD3E9,0xD3EA,0xD3EB,0xD3EE,0xD3EF,0xD3F1,0xD3F2,0xD3F3,/* 0x50-0x57 */
+	0xD3F5,0xD3F6,0xD3F7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD3F8,0xD3F9,0xD3FA,0xD3FB,0xD3FE,0xD400,0xD402,/* 0x60-0x67 */
+	0xD403,0xD404,0xD405,0xD406,0xD407,0xD409,0xD40A,0xD40B,/* 0x68-0x6F */
+	0xD40C,0xD40D,0xD40E,0xD40F,0xD410,0xD411,0xD412,0xD413,/* 0x70-0x77 */
+	0xD414,0xD415,0xD416,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD417,0xD418,0xD419,0xD41A,0xD41B,0xD41C,0xD41E,/* 0x80-0x87 */
+	0xD41F,0xD420,0xD421,0xD422,0xD423,0xD424,0xD425,0xD426,/* 0x88-0x8F */
+	0xD427,0xD428,0xD429,0xD42A,0xD42B,0xD42C,0xD42D,0xD42E,/* 0x90-0x97 */
+	0xD42F,0xD430,0xD431,0xD432,0xD433,0xD434,0xD435,0xD436,/* 0x98-0x9F */
+	0xD437,0xC22F,0xC231,0xC232,0xC234,0xC248,0xC250,0xC251,/* 0xA0-0xA7 */
+	0xC254,0xC258,0xC260,0xC265,0xC26C,0xC26D,0xC270,0xC274,/* 0xA8-0xAF */
+	0xC27C,0xC27D,0xC27F,0xC281,0xC288,0xC289,0xC290,0xC298,/* 0xB0-0xB7 */
+	0xC29B,0xC29D,0xC2A4,0xC2A5,0xC2A8,0xC2AC,0xC2AD,0xC2B4,/* 0xB8-0xBF */
+	0xC2B5,0xC2B7,0xC2B9,0xC2DC,0xC2DD,0xC2E0,0xC2E3,0xC2E4,/* 0xC0-0xC7 */
+	0xC2EB,0xC2EC,0xC2ED,0xC2EF,0xC2F1,0xC2F6,0xC2F8,0xC2F9,/* 0xC8-0xCF */
+	0xC2FB,0xC2FC,0xC300,0xC308,0xC309,0xC30C,0xC30D,0xC313,/* 0xD0-0xD7 */
+	0xC314,0xC315,0xC318,0xC31C,0xC324,0xC325,0xC328,0xC329,/* 0xD8-0xDF */
+	0xC345,0xC368,0xC369,0xC36C,0xC370,0xC372,0xC378,0xC379,/* 0xE0-0xE7 */
+	0xC37C,0xC37D,0xC384,0xC388,0xC38C,0xC3C0,0xC3D8,0xC3D9,/* 0xE8-0xEF */
+	0xC3DC,0xC3DF,0xC3E0,0xC3E2,0xC3E8,0xC3E9,0xC3ED,0xC3F4,/* 0xF0-0xF7 */
+	0xC3F5,0xC3F8,0xC408,0xC410,0xC424,0xC42C,0xC430,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD438,0xD439,0xD43A,0xD43B,0xD43C,0xD43D,0xD43E,/* 0x40-0x47 */
+	0xD43F,0xD441,0xD442,0xD443,0xD445,0xD446,0xD447,0xD448,/* 0x48-0x4F */
+	0xD449,0xD44A,0xD44B,0xD44C,0xD44D,0xD44E,0xD44F,0xD450,/* 0x50-0x57 */
+	0xD451,0xD452,0xD453,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD454,0xD455,0xD456,0xD457,0xD458,0xD459,0xD45A,/* 0x60-0x67 */
+	0xD45B,0xD45D,0xD45E,0xD45F,0xD461,0xD462,0xD463,0xD465,/* 0x68-0x6F */
+	0xD466,0xD467,0xD468,0xD469,0xD46A,0xD46B,0xD46C,0xD46E,/* 0x70-0x77 */
+	0xD470,0xD471,0xD472,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD473,0xD474,0xD475,0xD476,0xD477,0xD47A,0xD47B,/* 0x80-0x87 */
+	0xD47D,0xD47E,0xD481,0xD483,0xD484,0xD485,0xD486,0xD487,/* 0x88-0x8F */
+	0xD48A,0xD48C,0xD48E,0xD48F,0xD490,0xD491,0xD492,0xD493,/* 0x90-0x97 */
+	0xD495,0xD496,0xD497,0xD498,0xD499,0xD49A,0xD49B,0xD49C,/* 0x98-0x9F */
+	0xD49D,0xC434,0xC43C,0xC43D,0xC448,0xC464,0xC465,0xC468,/* 0xA0-0xA7 */
+	0xC46C,0xC474,0xC475,0xC479,0xC480,0xC494,0xC49C,0xC4B8,/* 0xA8-0xAF */
+	0xC4BC,0xC4E9,0xC4F0,0xC4F1,0xC4F4,0xC4F8,0xC4FA,0xC4FF,/* 0xB0-0xB7 */
+	0xC500,0xC501,0xC50C,0xC510,0xC514,0xC51C,0xC528,0xC529,/* 0xB8-0xBF */
+	0xC52C,0xC530,0xC538,0xC539,0xC53B,0xC53D,0xC544,0xC545,/* 0xC0-0xC7 */
+	0xC548,0xC549,0xC54A,0xC54C,0xC54D,0xC54E,0xC553,0xC554,/* 0xC8-0xCF */
+	0xC555,0xC557,0xC558,0xC559,0xC55D,0xC55E,0xC560,0xC561,/* 0xD0-0xD7 */
+	0xC564,0xC568,0xC570,0xC571,0xC573,0xC574,0xC575,0xC57C,/* 0xD8-0xDF */
+	0xC57D,0xC580,0xC584,0xC587,0xC58C,0xC58D,0xC58F,0xC591,/* 0xE0-0xE7 */
+	0xC595,0xC597,0xC598,0xC59C,0xC5A0,0xC5A9,0xC5B4,0xC5B5,/* 0xE8-0xEF */
+	0xC5B8,0xC5B9,0xC5BB,0xC5BC,0xC5BD,0xC5BE,0xC5C4,0xC5C5,/* 0xF0-0xF7 */
+	0xC5C6,0xC5C7,0xC5C8,0xC5C9,0xC5CA,0xC5CC,0xC5CE,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD49E,0xD49F,0xD4A0,0xD4A1,0xD4A2,0xD4A3,0xD4A4,/* 0x40-0x47 */
+	0xD4A5,0xD4A6,0xD4A7,0xD4A8,0xD4AA,0xD4AB,0xD4AC,0xD4AD,/* 0x48-0x4F */
+	0xD4AE,0xD4AF,0xD4B0,0xD4B1,0xD4B2,0xD4B3,0xD4B4,0xD4B5,/* 0x50-0x57 */
+	0xD4B6,0xD4B7,0xD4B8,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD4B9,0xD4BA,0xD4BB,0xD4BC,0xD4BD,0xD4BE,0xD4BF,/* 0x60-0x67 */
+	0xD4C0,0xD4C1,0xD4C2,0xD4C3,0xD4C4,0xD4C5,0xD4C6,0xD4C7,/* 0x68-0x6F */
+	0xD4C8,0xD4C9,0xD4CA,0xD4CB,0xD4CD,0xD4CE,0xD4CF,0xD4D1,/* 0x70-0x77 */
+	0xD4D2,0xD4D3,0xD4D5,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD4D6,0xD4D7,0xD4D8,0xD4D9,0xD4DA,0xD4DB,0xD4DD,/* 0x80-0x87 */
+	0xD4DE,0xD4E0,0xD4E1,0xD4E2,0xD4E3,0xD4E4,0xD4E5,0xD4E6,/* 0x88-0x8F */
+	0xD4E7,0xD4E9,0xD4EA,0xD4EB,0xD4ED,0xD4EE,0xD4EF,0xD4F1,/* 0x90-0x97 */
+	0xD4F2,0xD4F3,0xD4F4,0xD4F5,0xD4F6,0xD4F7,0xD4F9,0xD4FA,/* 0x98-0x9F */
+	0xD4FC,0xC5D0,0xC5D1,0xC5D4,0xC5D8,0xC5E0,0xC5E1,0xC5E3,/* 0xA0-0xA7 */
+	0xC5E5,0xC5EC,0xC5ED,0xC5EE,0xC5F0,0xC5F4,0xC5F6,0xC5F7,/* 0xA8-0xAF */
+	0xC5FC,0xC5FD,0xC5FE,0xC5FF,0xC600,0xC601,0xC605,0xC606,/* 0xB0-0xB7 */
+	0xC607,0xC608,0xC60C,0xC610,0xC618,0xC619,0xC61B,0xC61C,/* 0xB8-0xBF */
+	0xC624,0xC625,0xC628,0xC62C,0xC62D,0xC62E,0xC630,0xC633,/* 0xC0-0xC7 */
+	0xC634,0xC635,0xC637,0xC639,0xC63B,0xC640,0xC641,0xC644,/* 0xC8-0xCF */
+	0xC648,0xC650,0xC651,0xC653,0xC654,0xC655,0xC65C,0xC65D,/* 0xD0-0xD7 */
+	0xC660,0xC66C,0xC66F,0xC671,0xC678,0xC679,0xC67C,0xC680,/* 0xD8-0xDF */
+	0xC688,0xC689,0xC68B,0xC68D,0xC694,0xC695,0xC698,0xC69C,/* 0xE0-0xE7 */
+	0xC6A4,0xC6A5,0xC6A7,0xC6A9,0xC6B0,0xC6B1,0xC6B4,0xC6B8,/* 0xE8-0xEF */
+	0xC6B9,0xC6BA,0xC6C0,0xC6C1,0xC6C3,0xC6C5,0xC6CC,0xC6CD,/* 0xF0-0xF7 */
+	0xC6D0,0xC6D4,0xC6DC,0xC6DD,0xC6E0,0xC6E1,0xC6E8,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD4FE,0xD4FF,0xD500,0xD501,0xD502,0xD503,0xD505,/* 0x40-0x47 */
+	0xD506,0xD507,0xD509,0xD50A,0xD50B,0xD50D,0xD50E,0xD50F,/* 0x48-0x4F */
+	0xD510,0xD511,0xD512,0xD513,0xD516,0xD518,0xD519,0xD51A,/* 0x50-0x57 */
+	0xD51B,0xD51C,0xD51D,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD51E,0xD51F,0xD520,0xD521,0xD522,0xD523,0xD524,/* 0x60-0x67 */
+	0xD525,0xD526,0xD527,0xD528,0xD529,0xD52A,0xD52B,0xD52C,/* 0x68-0x6F */
+	0xD52D,0xD52E,0xD52F,0xD530,0xD531,0xD532,0xD533,0xD534,/* 0x70-0x77 */
+	0xD535,0xD536,0xD537,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD538,0xD539,0xD53A,0xD53B,0xD53E,0xD53F,0xD541,/* 0x80-0x87 */
+	0xD542,0xD543,0xD545,0xD546,0xD547,0xD548,0xD549,0xD54A,/* 0x88-0x8F */
+	0xD54B,0xD54E,0xD550,0xD552,0xD553,0xD554,0xD555,0xD556,/* 0x90-0x97 */
+	0xD557,0xD55A,0xD55B,0xD55D,0xD55E,0xD55F,0xD561,0xD562,/* 0x98-0x9F */
+	0xD563,0xC6E9,0xC6EC,0xC6F0,0xC6F8,0xC6F9,0xC6FD,0xC704,/* 0xA0-0xA7 */
+	0xC705,0xC708,0xC70C,0xC714,0xC715,0xC717,0xC719,0xC720,/* 0xA8-0xAF */
+	0xC721,0xC724,0xC728,0xC730,0xC731,0xC733,0xC735,0xC737,/* 0xB0-0xB7 */
+	0xC73C,0xC73D,0xC740,0xC744,0xC74A,0xC74C,0xC74D,0xC74F,/* 0xB8-0xBF */
+	0xC751,0xC752,0xC753,0xC754,0xC755,0xC756,0xC757,0xC758,/* 0xC0-0xC7 */
+	0xC75C,0xC760,0xC768,0xC76B,0xC774,0xC775,0xC778,0xC77C,/* 0xC8-0xCF */
+	0xC77D,0xC77E,0xC783,0xC784,0xC785,0xC787,0xC788,0xC789,/* 0xD0-0xD7 */
+	0xC78A,0xC78E,0xC790,0xC791,0xC794,0xC796,0xC797,0xC798,/* 0xD8-0xDF */
+	0xC79A,0xC7A0,0xC7A1,0xC7A3,0xC7A4,0xC7A5,0xC7A6,0xC7AC,/* 0xE0-0xE7 */
+	0xC7AD,0xC7B0,0xC7B4,0xC7BC,0xC7BD,0xC7BF,0xC7C0,0xC7C1,/* 0xE8-0xEF */
+	0xC7C8,0xC7C9,0xC7CC,0xC7CE,0xC7D0,0xC7D8,0xC7DD,0xC7E4,/* 0xF0-0xF7 */
+	0xC7E8,0xC7EC,0xC800,0xC801,0xC804,0xC808,0xC80A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD564,0xD566,0xD567,0xD56A,0xD56C,0xD56E,0xD56F,/* 0x40-0x47 */
+	0xD570,0xD571,0xD572,0xD573,0xD576,0xD577,0xD579,0xD57A,/* 0x48-0x4F */
+	0xD57B,0xD57D,0xD57E,0xD57F,0xD580,0xD581,0xD582,0xD583,/* 0x50-0x57 */
+	0xD586,0xD58A,0xD58B,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD58C,0xD58D,0xD58E,0xD58F,0xD591,0xD592,0xD593,/* 0x60-0x67 */
+	0xD594,0xD595,0xD596,0xD597,0xD598,0xD599,0xD59A,0xD59B,/* 0x68-0x6F */
+	0xD59C,0xD59D,0xD59E,0xD59F,0xD5A0,0xD5A1,0xD5A2,0xD5A3,/* 0x70-0x77 */
+	0xD5A4,0xD5A6,0xD5A7,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD5A8,0xD5A9,0xD5AA,0xD5AB,0xD5AC,0xD5AD,0xD5AE,/* 0x80-0x87 */
+	0xD5AF,0xD5B0,0xD5B1,0xD5B2,0xD5B3,0xD5B4,0xD5B5,0xD5B6,/* 0x88-0x8F */
+	0xD5B7,0xD5B8,0xD5B9,0xD5BA,0xD5BB,0xD5BC,0xD5BD,0xD5BE,/* 0x90-0x97 */
+	0xD5BF,0xD5C0,0xD5C1,0xD5C2,0xD5C3,0xD5C4,0xD5C5,0xD5C6,/* 0x98-0x9F */
+	0xD5C7,0xC810,0xC811,0xC813,0xC815,0xC816,0xC81C,0xC81D,/* 0xA0-0xA7 */
+	0xC820,0xC824,0xC82C,0xC82D,0xC82F,0xC831,0xC838,0xC83C,/* 0xA8-0xAF */
+	0xC840,0xC848,0xC849,0xC84C,0xC84D,0xC854,0xC870,0xC871,/* 0xB0-0xB7 */
+	0xC874,0xC878,0xC87A,0xC880,0xC881,0xC883,0xC885,0xC886,/* 0xB8-0xBF */
+	0xC887,0xC88B,0xC88C,0xC88D,0xC894,0xC89D,0xC89F,0xC8A1,/* 0xC0-0xC7 */
+	0xC8A8,0xC8BC,0xC8BD,0xC8C4,0xC8C8,0xC8CC,0xC8D4,0xC8D5,/* 0xC8-0xCF */
+	0xC8D7,0xC8D9,0xC8E0,0xC8E1,0xC8E4,0xC8F5,0xC8FC,0xC8FD,/* 0xD0-0xD7 */
+	0xC900,0xC904,0xC905,0xC906,0xC90C,0xC90D,0xC90F,0xC911,/* 0xD8-0xDF */
+	0xC918,0xC92C,0xC934,0xC950,0xC951,0xC954,0xC958,0xC960,/* 0xE0-0xE7 */
+	0xC961,0xC963,0xC96C,0xC970,0xC974,0xC97C,0xC988,0xC989,/* 0xE8-0xEF */
+	0xC98C,0xC990,0xC998,0xC999,0xC99B,0xC99D,0xC9C0,0xC9C1,/* 0xF0-0xF7 */
+	0xC9C4,0xC9C7,0xC9C8,0xC9CA,0xC9D0,0xC9D1,0xC9D3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD5CA,0xD5CB,0xD5CD,0xD5CE,0xD5CF,0xD5D1,0xD5D3,/* 0x40-0x47 */
+	0xD5D4,0xD5D5,0xD5D6,0xD5D7,0xD5DA,0xD5DC,0xD5DE,0xD5DF,/* 0x48-0x4F */
+	0xD5E0,0xD5E1,0xD5E2,0xD5E3,0xD5E6,0xD5E7,0xD5E9,0xD5EA,/* 0x50-0x57 */
+	0xD5EB,0xD5ED,0xD5EE,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD5EF,0xD5F0,0xD5F1,0xD5F2,0xD5F3,0xD5F6,0xD5F8,/* 0x60-0x67 */
+	0xD5FA,0xD5FB,0xD5FC,0xD5FD,0xD5FE,0xD5FF,0xD602,0xD603,/* 0x68-0x6F */
+	0xD605,0xD606,0xD607,0xD609,0xD60A,0xD60B,0xD60C,0xD60D,/* 0x70-0x77 */
+	0xD60E,0xD60F,0xD612,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD616,0xD617,0xD618,0xD619,0xD61A,0xD61B,0xD61D,/* 0x80-0x87 */
+	0xD61E,0xD61F,0xD621,0xD622,0xD623,0xD625,0xD626,0xD627,/* 0x88-0x8F */
+	0xD628,0xD629,0xD62A,0xD62B,0xD62C,0xD62E,0xD62F,0xD630,/* 0x90-0x97 */
+	0xD631,0xD632,0xD633,0xD634,0xD635,0xD636,0xD637,0xD63A,/* 0x98-0x9F */
+	0xD63B,0xC9D5,0xC9D6,0xC9D9,0xC9DA,0xC9DC,0xC9DD,0xC9E0,/* 0xA0-0xA7 */
+	0xC9E2,0xC9E4,0xC9E7,0xC9EC,0xC9ED,0xC9EF,0xC9F0,0xC9F1,/* 0xA8-0xAF */
+	0xC9F8,0xC9F9,0xC9FC,0xCA00,0xCA08,0xCA09,0xCA0B,0xCA0C,/* 0xB0-0xB7 */
+	0xCA0D,0xCA14,0xCA18,0xCA29,0xCA4C,0xCA4D,0xCA50,0xCA54,/* 0xB8-0xBF */
+	0xCA5C,0xCA5D,0xCA5F,0xCA60,0xCA61,0xCA68,0xCA7D,0xCA84,/* 0xC0-0xC7 */
+	0xCA98,0xCABC,0xCABD,0xCAC0,0xCAC4,0xCACC,0xCACD,0xCACF,/* 0xC8-0xCF */
+	0xCAD1,0xCAD3,0xCAD8,0xCAD9,0xCAE0,0xCAEC,0xCAF4,0xCB08,/* 0xD0-0xD7 */
+	0xCB10,0xCB14,0xCB18,0xCB20,0xCB21,0xCB41,0xCB48,0xCB49,/* 0xD8-0xDF */
+	0xCB4C,0xCB50,0xCB58,0xCB59,0xCB5D,0xCB64,0xCB78,0xCB79,/* 0xE0-0xE7 */
+	0xCB9C,0xCBB8,0xCBD4,0xCBE4,0xCBE7,0xCBE9,0xCC0C,0xCC0D,/* 0xE8-0xEF */
+	0xCC10,0xCC14,0xCC1C,0xCC1D,0xCC21,0xCC22,0xCC27,0xCC28,/* 0xF0-0xF7 */
+	0xCC29,0xCC2C,0xCC2E,0xCC30,0xCC38,0xCC39,0xCC3B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD63D,0xD63E,0xD63F,0xD641,0xD642,0xD643,0xD644,/* 0x40-0x47 */
+	0xD646,0xD647,0xD64A,0xD64C,0xD64E,0xD64F,0xD650,0xD652,/* 0x48-0x4F */
+	0xD653,0xD656,0xD657,0xD659,0xD65A,0xD65B,0xD65D,0xD65E,/* 0x50-0x57 */
+	0xD65F,0xD660,0xD661,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD662,0xD663,0xD664,0xD665,0xD666,0xD668,0xD66A,/* 0x60-0x67 */
+	0xD66B,0xD66C,0xD66D,0xD66E,0xD66F,0xD672,0xD673,0xD675,/* 0x68-0x6F */
+	0xD676,0xD677,0xD678,0xD679,0xD67A,0xD67B,0xD67C,0xD67D,/* 0x70-0x77 */
+	0xD67E,0xD67F,0xD680,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD681,0xD682,0xD684,0xD686,0xD687,0xD688,0xD689,/* 0x80-0x87 */
+	0xD68A,0xD68B,0xD68E,0xD68F,0xD691,0xD692,0xD693,0xD695,/* 0x88-0x8F */
+	0xD696,0xD697,0xD698,0xD699,0xD69A,0xD69B,0xD69C,0xD69E,/* 0x90-0x97 */
+	0xD6A0,0xD6A2,0xD6A3,0xD6A4,0xD6A5,0xD6A6,0xD6A7,0xD6A9,/* 0x98-0x9F */
+	0xD6AA,0xCC3C,0xCC3D,0xCC3E,0xCC44,0xCC45,0xCC48,0xCC4C,/* 0xA0-0xA7 */
+	0xCC54,0xCC55,0xCC57,0xCC58,0xCC59,0xCC60,0xCC64,0xCC66,/* 0xA8-0xAF */
+	0xCC68,0xCC70,0xCC75,0xCC98,0xCC99,0xCC9C,0xCCA0,0xCCA8,/* 0xB0-0xB7 */
+	0xCCA9,0xCCAB,0xCCAC,0xCCAD,0xCCB4,0xCCB5,0xCCB8,0xCCBC,/* 0xB8-0xBF */
+	0xCCC4,0xCCC5,0xCCC7,0xCCC9,0xCCD0,0xCCD4,0xCCE4,0xCCEC,/* 0xC0-0xC7 */
+	0xCCF0,0xCD01,0xCD08,0xCD09,0xCD0C,0xCD10,0xCD18,0xCD19,/* 0xC8-0xCF */
+	0xCD1B,0xCD1D,0xCD24,0xCD28,0xCD2C,0xCD39,0xCD5C,0xCD60,/* 0xD0-0xD7 */
+	0xCD64,0xCD6C,0xCD6D,0xCD6F,0xCD71,0xCD78,0xCD88,0xCD94,/* 0xD8-0xDF */
+	0xCD95,0xCD98,0xCD9C,0xCDA4,0xCDA5,0xCDA7,0xCDA9,0xCDB0,/* 0xE0-0xE7 */
+	0xCDC4,0xCDCC,0xCDD0,0xCDE8,0xCDEC,0xCDF0,0xCDF8,0xCDF9,/* 0xE8-0xEF */
+	0xCDFB,0xCDFD,0xCE04,0xCE08,0xCE0C,0xCE14,0xCE19,0xCE20,/* 0xF0-0xF7 */
+	0xCE21,0xCE24,0xCE28,0xCE30,0xCE31,0xCE33,0xCE35,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD6AB,0xD6AD,0xD6AE,0xD6AF,0xD6B1,0xD6B2,0xD6B3,/* 0x40-0x47 */
+	0xD6B4,0xD6B5,0xD6B6,0xD6B7,0xD6B8,0xD6BA,0xD6BC,0xD6BD,/* 0x48-0x4F */
+	0xD6BE,0xD6BF,0xD6C0,0xD6C1,0xD6C2,0xD6C3,0xD6C6,0xD6C7,/* 0x50-0x57 */
+	0xD6C9,0xD6CA,0xD6CB,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD6CD,0xD6CE,0xD6CF,0xD6D0,0xD6D2,0xD6D3,0xD6D5,/* 0x60-0x67 */
+	0xD6D6,0xD6D8,0xD6DA,0xD6DB,0xD6DC,0xD6DD,0xD6DE,0xD6DF,/* 0x68-0x6F */
+	0xD6E1,0xD6E2,0xD6E3,0xD6E5,0xD6E6,0xD6E7,0xD6E9,0xD6EA,/* 0x70-0x77 */
+	0xD6EB,0xD6EC,0xD6ED,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD6EE,0xD6EF,0xD6F1,0xD6F2,0xD6F3,0xD6F4,0xD6F6,/* 0x80-0x87 */
+	0xD6F7,0xD6F8,0xD6F9,0xD6FA,0xD6FB,0xD6FE,0xD6FF,0xD701,/* 0x88-0x8F */
+	0xD702,0xD703,0xD705,0xD706,0xD707,0xD708,0xD709,0xD70A,/* 0x90-0x97 */
+	0xD70B,0xD70C,0xD70D,0xD70E,0xD70F,0xD710,0xD712,0xD713,/* 0x98-0x9F */
+	0xD714,0xCE58,0xCE59,0xCE5C,0xCE5F,0xCE60,0xCE61,0xCE68,/* 0xA0-0xA7 */
+	0xCE69,0xCE6B,0xCE6D,0xCE74,0xCE75,0xCE78,0xCE7C,0xCE84,/* 0xA8-0xAF */
+	0xCE85,0xCE87,0xCE89,0xCE90,0xCE91,0xCE94,0xCE98,0xCEA0,/* 0xB0-0xB7 */
+	0xCEA1,0xCEA3,0xCEA4,0xCEA5,0xCEAC,0xCEAD,0xCEC1,0xCEE4,/* 0xB8-0xBF */
+	0xCEE5,0xCEE8,0xCEEB,0xCEEC,0xCEF4,0xCEF5,0xCEF7,0xCEF8,/* 0xC0-0xC7 */
+	0xCEF9,0xCF00,0xCF01,0xCF04,0xCF08,0xCF10,0xCF11,0xCF13,/* 0xC8-0xCF */
+	0xCF15,0xCF1C,0xCF20,0xCF24,0xCF2C,0xCF2D,0xCF2F,0xCF30,/* 0xD0-0xD7 */
+	0xCF31,0xCF38,0xCF54,0xCF55,0xCF58,0xCF5C,0xCF64,0xCF65,/* 0xD8-0xDF */
+	0xCF67,0xCF69,0xCF70,0xCF71,0xCF74,0xCF78,0xCF80,0xCF85,/* 0xE0-0xE7 */
+	0xCF8C,0xCFA1,0xCFA8,0xCFB0,0xCFC4,0xCFE0,0xCFE1,0xCFE4,/* 0xE8-0xEF */
+	0xCFE8,0xCFF0,0xCFF1,0xCFF3,0xCFF5,0xCFFC,0xD000,0xD004,/* 0xF0-0xF7 */
+	0xD011,0xD018,0xD02D,0xD034,0xD035,0xD038,0xD03C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD715,0xD716,0xD717,0xD71A,0xD71B,0xD71D,0xD71E,/* 0x40-0x47 */
+	0xD71F,0xD721,0xD722,0xD723,0xD724,0xD725,0xD726,0xD727,/* 0x48-0x4F */
+	0xD72A,0xD72C,0xD72E,0xD72F,0xD730,0xD731,0xD732,0xD733,/* 0x50-0x57 */
+	0xD736,0xD737,0xD739,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0xD73A,0xD73B,0xD73D,0xD73E,0xD73F,0xD740,0xD741,/* 0x60-0x67 */
+	0xD742,0xD743,0xD745,0xD746,0xD748,0xD74A,0xD74B,0xD74C,/* 0x68-0x6F */
+	0xD74D,0xD74E,0xD74F,0xD752,0xD753,0xD755,0xD75A,0xD75B,/* 0x70-0x77 */
+	0xD75C,0xD75D,0xD75E,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0xD75F,0xD762,0xD764,0xD766,0xD767,0xD768,0xD76A,/* 0x80-0x87 */
+	0xD76B,0xD76D,0xD76E,0xD76F,0xD771,0xD772,0xD773,0xD775,/* 0x88-0x8F */
+	0xD776,0xD777,0xD778,0xD779,0xD77A,0xD77B,0xD77E,0xD77F,/* 0x90-0x97 */
+	0xD780,0xD782,0xD783,0xD784,0xD785,0xD786,0xD787,0xD78A,/* 0x98-0x9F */
+	0xD78B,0xD044,0xD045,0xD047,0xD049,0xD050,0xD054,0xD058,/* 0xA0-0xA7 */
+	0xD060,0xD06C,0xD06D,0xD070,0xD074,0xD07C,0xD07D,0xD081,/* 0xA8-0xAF */
+	0xD0A4,0xD0A5,0xD0A8,0xD0AC,0xD0B4,0xD0B5,0xD0B7,0xD0B9,/* 0xB0-0xB7 */
+	0xD0C0,0xD0C1,0xD0C4,0xD0C8,0xD0C9,0xD0D0,0xD0D1,0xD0D3,/* 0xB8-0xBF */
+	0xD0D4,0xD0D5,0xD0DC,0xD0DD,0xD0E0,0xD0E4,0xD0EC,0xD0ED,/* 0xC0-0xC7 */
+	0xD0EF,0xD0F0,0xD0F1,0xD0F8,0xD10D,0xD130,0xD131,0xD134,/* 0xC8-0xCF */
+	0xD138,0xD13A,0xD140,0xD141,0xD143,0xD144,0xD145,0xD14C,/* 0xD0-0xD7 */
+	0xD14D,0xD150,0xD154,0xD15C,0xD15D,0xD15F,0xD161,0xD168,/* 0xD8-0xDF */
+	0xD16C,0xD17C,0xD184,0xD188,0xD1A0,0xD1A1,0xD1A4,0xD1A8,/* 0xE0-0xE7 */
+	0xD1B0,0xD1B1,0xD1B3,0xD1B5,0xD1BA,0xD1BC,0xD1C0,0xD1D8,/* 0xE8-0xEF */
+	0xD1F4,0xD1F8,0xD207,0xD209,0xD210,0xD22C,0xD22D,0xD230,/* 0xF0-0xF7 */
+	0xD234,0xD23C,0xD23D,0xD23F,0xD241,0xD248,0xD25C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0xD78D,0xD78E,0xD78F,0xD791,0xD792,0xD793,0xD794,/* 0x40-0x47 */
+	0xD795,0xD796,0xD797,0xD79A,0xD79C,0xD79E,0xD79F,0xD7A0,/* 0x48-0x4F */
+	0xD7A1,0xD7A2,0xD7A3,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xD264,0xD280,0xD281,0xD284,0xD288,0xD290,0xD291,/* 0xA0-0xA7 */
+	0xD295,0xD29C,0xD2A0,0xD2A4,0xD2AC,0xD2B1,0xD2B8,0xD2B9,/* 0xA8-0xAF */
+	0xD2BC,0xD2BF,0xD2C0,0xD2C2,0xD2C8,0xD2C9,0xD2CB,0xD2D4,/* 0xB0-0xB7 */
+	0xD2D8,0xD2DC,0xD2E4,0xD2E5,0xD2F0,0xD2F1,0xD2F4,0xD2F8,/* 0xB8-0xBF */
+	0xD300,0xD301,0xD303,0xD305,0xD30C,0xD30D,0xD30E,0xD310,/* 0xC0-0xC7 */
+	0xD314,0xD316,0xD31C,0xD31D,0xD31F,0xD320,0xD321,0xD325,/* 0xC8-0xCF */
+	0xD328,0xD329,0xD32C,0xD330,0xD338,0xD339,0xD33B,0xD33C,/* 0xD0-0xD7 */
+	0xD33D,0xD344,0xD345,0xD37C,0xD37D,0xD380,0xD384,0xD38C,/* 0xD8-0xDF */
+	0xD38D,0xD38F,0xD390,0xD391,0xD398,0xD399,0xD39C,0xD3A0,/* 0xE0-0xE7 */
+	0xD3A8,0xD3A9,0xD3AB,0xD3AD,0xD3B4,0xD3B8,0xD3BC,0xD3C4,/* 0xE8-0xEF */
+	0xD3C5,0xD3C8,0xD3C9,0xD3D0,0xD3D8,0xD3E1,0xD3E3,0xD3EC,/* 0xF0-0xF7 */
+	0xD3ED,0xD3F0,0xD3F4,0xD3FC,0xD3FD,0xD3FF,0xD401,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xD408,0xD41D,0xD440,0xD444,0xD45C,0xD460,0xD464,/* 0xA0-0xA7 */
+	0xD46D,0xD46F,0xD478,0xD479,0xD47C,0xD47F,0xD480,0xD482,/* 0xA8-0xAF */
+	0xD488,0xD489,0xD48B,0xD48D,0xD494,0xD4A9,0xD4CC,0xD4D0,/* 0xB0-0xB7 */
+	0xD4D4,0xD4DC,0xD4DF,0xD4E8,0xD4EC,0xD4F0,0xD4F8,0xD4FB,/* 0xB8-0xBF */
+	0xD4FD,0xD504,0xD508,0xD50C,0xD514,0xD515,0xD517,0xD53C,/* 0xC0-0xC7 */
+	0xD53D,0xD540,0xD544,0xD54C,0xD54D,0xD54F,0xD551,0xD558,/* 0xC8-0xCF */
+	0xD559,0xD55C,0xD560,0xD565,0xD568,0xD569,0xD56B,0xD56D,/* 0xD0-0xD7 */
+	0xD574,0xD575,0xD578,0xD57C,0xD584,0xD585,0xD587,0xD588,/* 0xD8-0xDF */
+	0xD589,0xD590,0xD5A5,0xD5C8,0xD5C9,0xD5CC,0xD5D0,0xD5D2,/* 0xE0-0xE7 */
+	0xD5D8,0xD5D9,0xD5DB,0xD5DD,0xD5E4,0xD5E5,0xD5E8,0xD5EC,/* 0xE8-0xEF */
+	0xD5F4,0xD5F5,0xD5F7,0xD5F9,0xD600,0xD601,0xD604,0xD608,/* 0xF0-0xF7 */
+	0xD610,0xD611,0xD613,0xD614,0xD615,0xD61C,0xD620,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xD624,0xD62D,0xD638,0xD639,0xD63C,0xD640,0xD645,/* 0xA0-0xA7 */
+	0xD648,0xD649,0xD64B,0xD64D,0xD651,0xD654,0xD655,0xD658,/* 0xA8-0xAF */
+	0xD65C,0xD667,0xD669,0xD670,0xD671,0xD674,0xD683,0xD685,/* 0xB0-0xB7 */
+	0xD68C,0xD68D,0xD690,0xD694,0xD69D,0xD69F,0xD6A1,0xD6A8,/* 0xB8-0xBF */
+	0xD6AC,0xD6B0,0xD6B9,0xD6BB,0xD6C4,0xD6C5,0xD6C8,0xD6CC,/* 0xC0-0xC7 */
+	0xD6D1,0xD6D4,0xD6D7,0xD6D9,0xD6E0,0xD6E4,0xD6E8,0xD6F0,/* 0xC8-0xCF */
+	0xD6F5,0xD6FC,0xD6FD,0xD700,0xD704,0xD711,0xD718,0xD719,/* 0xD0-0xD7 */
+	0xD71C,0xD720,0xD728,0xD729,0xD72B,0xD72D,0xD734,0xD735,/* 0xD8-0xDF */
+	0xD738,0xD73C,0xD744,0xD747,0xD749,0xD750,0xD751,0xD754,/* 0xE0-0xE7 */
+	0xD756,0xD757,0xD758,0xD759,0xD760,0xD761,0xD763,0xD765,/* 0xE8-0xEF */
+	0xD769,0xD76C,0xD770,0xD774,0xD77C,0xD77D,0xD781,0xD788,/* 0xF0-0xF7 */
+	0xD789,0xD78C,0xD790,0xD798,0xD799,0xD79B,0xD79D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x4F3D,0x4F73,0x5047,0x50F9,0x52A0,0x53EF,0x5475,/* 0xA0-0xA7 */
+	0x54E5,0x5609,0x5AC1,0x5BB6,0x6687,0x67B6,0x67B7,0x67EF,/* 0xA8-0xAF */
+	0x6B4C,0x73C2,0x75C2,0x7A3C,0x82DB,0x8304,0x8857,0x8888,/* 0xB0-0xB7 */
+	0x8A36,0x8CC8,0x8DCF,0x8EFB,0x8FE6,0x99D5,0x523B,0x5374,/* 0xB8-0xBF */
+	0x5404,0x606A,0x6164,0x6BBC,0x73CF,0x811A,0x89BA,0x89D2,/* 0xC0-0xC7 */
+	0x95A3,0x4F83,0x520A,0x58BE,0x5978,0x59E6,0x5E72,0x5E79,/* 0xC8-0xCF */
+	0x61C7,0x63C0,0x6746,0x67EC,0x687F,0x6F97,0x764E,0x770B,/* 0xD0-0xD7 */
+	0x78F5,0x7A08,0x7AFF,0x7C21,0x809D,0x826E,0x8271,0x8AEB,/* 0xD8-0xDF */
+	0x9593,0x4E6B,0x559D,0x66F7,0x6E34,0x78A3,0x7AED,0x845B,/* 0xE0-0xE7 */
+	0x8910,0x874E,0x97A8,0x52D8,0x574E,0x582A,0x5D4C,0x611F,/* 0xE8-0xEF */
+	0x61BE,0x6221,0x6562,0x67D1,0x6A44,0x6E1B,0x7518,0x75B3,/* 0xF0-0xF7 */
+	0x76E3,0x77B0,0x7D3A,0x90AF,0x9451,0x9452,0x9F95,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5323,0x5CAC,0x7532,0x80DB,0x9240,0x9598,0x525B,/* 0xA0-0xA7 */
+	0x5808,0x59DC,0x5CA1,0x5D17,0x5EB7,0x5F3A,0x5F4A,0x6177,/* 0xA8-0xAF */
+	0x6C5F,0x757A,0x7586,0x7CE0,0x7D73,0x7DB1,0x7F8C,0x8154,/* 0xB0-0xB7 */
+	0x8221,0x8591,0x8941,0x8B1B,0x92FC,0x964D,0x9C47,0x4ECB,/* 0xB8-0xBF */
+	0x4EF7,0x500B,0x51F1,0x584F,0x6137,0x613E,0x6168,0x6539,/* 0xC0-0xC7 */
+	0x69EA,0x6F11,0x75A5,0x7686,0x76D6,0x7B87,0x82A5,0x84CB,/* 0xC8-0xCF */
+	0xF900,0x93A7,0x958B,0x5580,0x5BA2,0x5751,0xF901,0x7CB3,/* 0xD0-0xD7 */
+	0x7FB9,0x91B5,0x5028,0x53BB,0x5C45,0x5DE8,0x62D2,0x636E,/* 0xD8-0xDF */
+	0x64DA,0x64E7,0x6E20,0x70AC,0x795B,0x8DDD,0x8E1E,0xF902,/* 0xE0-0xE7 */
+	0x907D,0x9245,0x92F8,0x4E7E,0x4EF6,0x5065,0x5DFE,0x5EFA,/* 0xE8-0xEF */
+	0x6106,0x6957,0x8171,0x8654,0x8E47,0x9375,0x9A2B,0x4E5E,/* 0xF0-0xF7 */
+	0x5091,0x6770,0x6840,0x5109,0x528D,0x5292,0x6AA2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x77BC,0x9210,0x9ED4,0x52AB,0x602F,0x8FF2,0x5048,/* 0xA0-0xA7 */
+	0x61A9,0x63ED,0x64CA,0x683C,0x6A84,0x6FC0,0x8188,0x89A1,/* 0xA8-0xAF */
+	0x9694,0x5805,0x727D,0x72AC,0x7504,0x7D79,0x7E6D,0x80A9,/* 0xB0-0xB7 */
+	0x898B,0x8B74,0x9063,0x9D51,0x6289,0x6C7A,0x6F54,0x7D50,/* 0xB8-0xBF */
+	0x7F3A,0x8A23,0x517C,0x614A,0x7B9D,0x8B19,0x9257,0x938C,/* 0xC0-0xC7 */
+	0x4EAC,0x4FD3,0x501E,0x50BE,0x5106,0x52C1,0x52CD,0x537F,/* 0xC8-0xCF */
+	0x5770,0x5883,0x5E9A,0x5F91,0x6176,0x61AC,0x64CE,0x656C,/* 0xD0-0xD7 */
+	0x666F,0x66BB,0x66F4,0x6897,0x6D87,0x7085,0x70F1,0x749F,/* 0xD8-0xDF */
+	0x74A5,0x74CA,0x75D9,0x786C,0x78EC,0x7ADF,0x7AF6,0x7D45,/* 0xE0-0xE7 */
+	0x7D93,0x8015,0x803F,0x811B,0x8396,0x8B66,0x8F15,0x9015,/* 0xE8-0xEF */
+	0x93E1,0x9803,0x9838,0x9A5A,0x9BE8,0x4FC2,0x5553,0x583A,/* 0xF0-0xF7 */
+	0x5951,0x5B63,0x5C46,0x60B8,0x6212,0x6842,0x68B0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x68E8,0x6EAA,0x754C,0x7678,0x78CE,0x7A3D,0x7CFB,/* 0xA0-0xA7 */
+	0x7E6B,0x7E7C,0x8A08,0x8AA1,0x8C3F,0x968E,0x9DC4,0x53E4,/* 0xA8-0xAF */
+	0x53E9,0x544A,0x5471,0x56FA,0x59D1,0x5B64,0x5C3B,0x5EAB,/* 0xB0-0xB7 */
+	0x62F7,0x6537,0x6545,0x6572,0x66A0,0x67AF,0x69C1,0x6CBD,/* 0xB8-0xBF */
+	0x75FC,0x7690,0x777E,0x7A3F,0x7F94,0x8003,0x80A1,0x818F,/* 0xC0-0xC7 */
+	0x82E6,0x82FD,0x83F0,0x85C1,0x8831,0x88B4,0x8AA5,0xF903,/* 0xC8-0xCF */
+	0x8F9C,0x932E,0x96C7,0x9867,0x9AD8,0x9F13,0x54ED,0x659B,/* 0xD0-0xD7 */
+	0x66F2,0x688F,0x7A40,0x8C37,0x9D60,0x56F0,0x5764,0x5D11,/* 0xD8-0xDF */
+	0x6606,0x68B1,0x68CD,0x6EFE,0x7428,0x889E,0x9BE4,0x6C68,/* 0xE0-0xE7 */
+	0xF904,0x9AA8,0x4F9B,0x516C,0x5171,0x529F,0x5B54,0x5DE5,/* 0xE8-0xEF */
+	0x6050,0x606D,0x62F1,0x63A7,0x653B,0x73D9,0x7A7A,0x86A3,/* 0xF0-0xF7 */
+	0x8CA2,0x978F,0x4E32,0x5BE1,0x6208,0x679C,0x74DC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x79D1,0x83D3,0x8A87,0x8AB2,0x8DE8,0x904E,0x934B,/* 0xA0-0xA7 */
+	0x9846,0x5ED3,0x69E8,0x85FF,0x90ED,0xF905,0x51A0,0x5B98,/* 0xA8-0xAF */
+	0x5BEC,0x6163,0x68FA,0x6B3E,0x704C,0x742F,0x74D8,0x7BA1,/* 0xB0-0xB7 */
+	0x7F50,0x83C5,0x89C0,0x8CAB,0x95DC,0x9928,0x522E,0x605D,/* 0xB8-0xBF */
+	0x62EC,0x9002,0x4F8A,0x5149,0x5321,0x58D9,0x5EE3,0x66E0,/* 0xC0-0xC7 */
+	0x6D38,0x709A,0x72C2,0x73D6,0x7B50,0x80F1,0x945B,0x5366,/* 0xC8-0xCF */
+	0x639B,0x7F6B,0x4E56,0x5080,0x584A,0x58DE,0x602A,0x6127,/* 0xD0-0xD7 */
+	0x62D0,0x69D0,0x9B41,0x5B8F,0x7D18,0x80B1,0x8F5F,0x4EA4,/* 0xD8-0xDF */
+	0x50D1,0x54AC,0x55AC,0x5B0C,0x5DA0,0x5DE7,0x652A,0x654E,/* 0xE0-0xE7 */
+	0x6821,0x6A4B,0x72E1,0x768E,0x77EF,0x7D5E,0x7FF9,0x81A0,/* 0xE8-0xEF */
+	0x854E,0x86DF,0x8F03,0x8F4E,0x90CA,0x9903,0x9A55,0x9BAB,/* 0xF0-0xF7 */
+	0x4E18,0x4E45,0x4E5D,0x4EC7,0x4FF1,0x5177,0x52FE,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5340,0x53E3,0x53E5,0x548E,0x5614,0x5775,0x57A2,/* 0xA0-0xA7 */
+	0x5BC7,0x5D87,0x5ED0,0x61FC,0x62D8,0x6551,0x67B8,0x67E9,/* 0xA8-0xAF */
+	0x69CB,0x6B50,0x6BC6,0x6BEC,0x6C42,0x6E9D,0x7078,0x72D7,/* 0xB0-0xB7 */
+	0x7396,0x7403,0x77BF,0x77E9,0x7A76,0x7D7F,0x8009,0x81FC,/* 0xB8-0xBF */
+	0x8205,0x820A,0x82DF,0x8862,0x8B33,0x8CFC,0x8EC0,0x9011,/* 0xC0-0xC7 */
+	0x90B1,0x9264,0x92B6,0x99D2,0x9A45,0x9CE9,0x9DD7,0x9F9C,/* 0xC8-0xCF */
+	0x570B,0x5C40,0x83CA,0x97A0,0x97AB,0x9EB4,0x541B,0x7A98,/* 0xD0-0xD7 */
+	0x7FA4,0x88D9,0x8ECD,0x90E1,0x5800,0x5C48,0x6398,0x7A9F,/* 0xD8-0xDF */
+	0x5BAE,0x5F13,0x7A79,0x7AAE,0x828E,0x8EAC,0x5026,0x5238,/* 0xE0-0xE7 */
+	0x52F8,0x5377,0x5708,0x62F3,0x6372,0x6B0A,0x6DC3,0x7737,/* 0xE8-0xEF */
+	0x53A5,0x7357,0x8568,0x8E76,0x95D5,0x673A,0x6AC3,0x6F70,/* 0xF0-0xF7 */
+	0x8A6D,0x8ECC,0x994B,0xF906,0x6677,0x6B78,0x8CB4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9B3C,0xF907,0x53EB,0x572D,0x594E,0x63C6,0x69FB,/* 0xA0-0xA7 */
+	0x73EA,0x7845,0x7ABA,0x7AC5,0x7CFE,0x8475,0x898F,0x8D73,/* 0xA8-0xAF */
+	0x9035,0x95A8,0x52FB,0x5747,0x7547,0x7B60,0x83CC,0x921E,/* 0xB0-0xB7 */
+	0xF908,0x6A58,0x514B,0x524B,0x5287,0x621F,0x68D8,0x6975,/* 0xB8-0xBF */
+	0x9699,0x50C5,0x52A4,0x52E4,0x61C3,0x65A4,0x6839,0x69FF,/* 0xC0-0xC7 */
+	0x747E,0x7B4B,0x82B9,0x83EB,0x89B2,0x8B39,0x8FD1,0x9949,/* 0xC8-0xCF */
+	0xF909,0x4ECA,0x5997,0x64D2,0x6611,0x6A8E,0x7434,0x7981,/* 0xD0-0xD7 */
+	0x79BD,0x82A9,0x887E,0x887F,0x895F,0xF90A,0x9326,0x4F0B,/* 0xD8-0xDF */
+	0x53CA,0x6025,0x6271,0x6C72,0x7D1A,0x7D66,0x4E98,0x5162,/* 0xE0-0xE7 */
+	0x77DC,0x80AF,0x4F01,0x4F0E,0x5176,0x5180,0x55DC,0x5668,/* 0xE8-0xEF */
+	0x573B,0x57FA,0x57FC,0x5914,0x5947,0x5993,0x5BC4,0x5C90,/* 0xF0-0xF7 */
+	0x5D0E,0x5DF1,0x5E7E,0x5FCC,0x6280,0x65D7,0x65E3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x671E,0x671F,0x675E,0x68CB,0x68C4,0x6A5F,0x6B3A,/* 0xA0-0xA7 */
+	0x6C23,0x6C7D,0x6C82,0x6DC7,0x7398,0x7426,0x742A,0x7482,/* 0xA8-0xAF */
+	0x74A3,0x7578,0x757F,0x7881,0x78EF,0x7941,0x7947,0x7948,/* 0xB0-0xB7 */
+	0x797A,0x7B95,0x7D00,0x7DBA,0x7F88,0x8006,0x802D,0x808C,/* 0xB8-0xBF */
+	0x8A18,0x8B4F,0x8C48,0x8D77,0x9321,0x9324,0x98E2,0x9951,/* 0xC0-0xC7 */
+	0x9A0E,0x9A0F,0x9A65,0x9E92,0x7DCA,0x4F76,0x5409,0x62EE,/* 0xC8-0xCF */
+	0x6854,0x91D1,0x55AB,0x513A,0xF90B,0xF90C,0x5A1C,0x61E6,/* 0xD0-0xD7 */
+	0xF90D,0x62CF,0x62FF,0xF90E,0xF90F,0xF910,0xF911,0xF912,/* 0xD8-0xDF */
+	0xF913,0x90A3,0xF914,0xF915,0xF916,0xF917,0xF918,0x8AFE,/* 0xE0-0xE7 */
+	0xF919,0xF91A,0xF91B,0xF91C,0x6696,0xF91D,0x7156,0xF91E,/* 0xE8-0xEF */
+	0xF91F,0x96E3,0xF920,0x634F,0x637A,0x5357,0xF921,0x678F,/* 0xF0-0xF7 */
+	0x6960,0x6E73,0xF922,0x7537,0xF923,0xF924,0xF925,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7D0D,0xF926,0xF927,0x8872,0x56CA,0x5A18,0xF928,/* 0xA0-0xA7 */
+	0xF929,0xF92A,0xF92B,0xF92C,0x4E43,0xF92D,0x5167,0x5948,/* 0xA8-0xAF */
+	0x67F0,0x8010,0xF92E,0x5973,0x5E74,0x649A,0x79CA,0x5FF5,/* 0xB0-0xB7 */
+	0x606C,0x62C8,0x637B,0x5BE7,0x5BD7,0x52AA,0xF92F,0x5974,/* 0xB8-0xBF */
+	0x5F29,0x6012,0xF930,0xF931,0xF932,0x7459,0xF933,0xF934,/* 0xC0-0xC7 */
+	0xF935,0xF936,0xF937,0xF938,0x99D1,0xF939,0xF93A,0xF93B,/* 0xC8-0xCF */
+	0xF93C,0xF93D,0xF93E,0xF93F,0xF940,0xF941,0xF942,0xF943,/* 0xD0-0xD7 */
+	0x6FC3,0xF944,0xF945,0x81BF,0x8FB2,0x60F1,0xF946,0xF947,/* 0xD8-0xDF */
+	0x8166,0xF948,0xF949,0x5C3F,0xF94A,0xF94B,0xF94C,0xF94D,/* 0xE0-0xE7 */
+	0xF94E,0xF94F,0xF950,0xF951,0x5AE9,0x8A25,0x677B,0x7D10,/* 0xE8-0xEF */
+	0xF952,0xF953,0xF954,0xF955,0xF956,0xF957,0x80FD,0xF958,/* 0xF0-0xF7 */
+	0xF959,0x5C3C,0x6CE5,0x533F,0x6EBA,0x591A,0x8336,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x4E39,0x4EB6,0x4F46,0x55AE,0x5718,0x58C7,0x5F56,/* 0xA0-0xA7 */
+	0x65B7,0x65E6,0x6A80,0x6BB5,0x6E4D,0x77ED,0x7AEF,0x7C1E,/* 0xA8-0xAF */
+	0x7DDE,0x86CB,0x8892,0x9132,0x935B,0x64BB,0x6FBE,0x737A,/* 0xB0-0xB7 */
+	0x75B8,0x9054,0x5556,0x574D,0x61BA,0x64D4,0x66C7,0x6DE1,/* 0xB8-0xBF */
+	0x6E5B,0x6F6D,0x6FB9,0x75F0,0x8043,0x81BD,0x8541,0x8983,/* 0xC0-0xC7 */
+	0x8AC7,0x8B5A,0x931F,0x6C93,0x7553,0x7B54,0x8E0F,0x905D,/* 0xC8-0xCF */
+	0x5510,0x5802,0x5858,0x5E62,0x6207,0x649E,0x68E0,0x7576,/* 0xD0-0xD7 */
+	0x7CD6,0x87B3,0x9EE8,0x4EE3,0x5788,0x576E,0x5927,0x5C0D,/* 0xD8-0xDF */
+	0x5CB1,0x5E36,0x5F85,0x6234,0x64E1,0x73B3,0x81FA,0x888B,/* 0xE0-0xE7 */
+	0x8CB8,0x968A,0x9EDB,0x5B85,0x5FB7,0x60B3,0x5012,0x5200,/* 0xE8-0xEF */
+	0x5230,0x5716,0x5835,0x5857,0x5C0E,0x5C60,0x5CF6,0x5D8B,/* 0xF0-0xF7 */
+	0x5EA6,0x5F92,0x60BC,0x6311,0x6389,0x6417,0x6843,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x68F9,0x6AC2,0x6DD8,0x6E21,0x6ED4,0x6FE4,0x71FE,/* 0xA0-0xA7 */
+	0x76DC,0x7779,0x79B1,0x7A3B,0x8404,0x89A9,0x8CED,0x8DF3,/* 0xA8-0xAF */
+	0x8E48,0x9003,0x9014,0x9053,0x90FD,0x934D,0x9676,0x97DC,/* 0xB0-0xB7 */
+	0x6BD2,0x7006,0x7258,0x72A2,0x7368,0x7763,0x79BF,0x7BE4,/* 0xB8-0xBF */
+	0x7E9B,0x8B80,0x58A9,0x60C7,0x6566,0x65FD,0x66BE,0x6C8C,/* 0xC0-0xC7 */
+	0x711E,0x71C9,0x8C5A,0x9813,0x4E6D,0x7A81,0x4EDD,0x51AC,/* 0xC8-0xCF */
+	0x51CD,0x52D5,0x540C,0x61A7,0x6771,0x6850,0x68DF,0x6D1E,/* 0xD0-0xD7 */
+	0x6F7C,0x75BC,0x77B3,0x7AE5,0x80F4,0x8463,0x9285,0x515C,/* 0xD8-0xDF */
+	0x6597,0x675C,0x6793,0x75D8,0x7AC7,0x8373,0xF95A,0x8C46,/* 0xE0-0xE7 */
+	0x9017,0x982D,0x5C6F,0x81C0,0x829A,0x9041,0x906F,0x920D,/* 0xE8-0xEF */
+	0x5F97,0x5D9D,0x6A59,0x71C8,0x767B,0x7B49,0x85E4,0x8B04,/* 0xF0-0xF7 */
+	0x9127,0x9A30,0x5587,0x61F6,0xF95B,0x7669,0x7F85,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x863F,0x87BA,0x88F8,0x908F,0xF95C,0x6D1B,0x70D9,/* 0xA0-0xA7 */
+	0x73DE,0x7D61,0x843D,0xF95D,0x916A,0x99F1,0xF95E,0x4E82,/* 0xA8-0xAF */
+	0x5375,0x6B04,0x6B12,0x703E,0x721B,0x862D,0x9E1E,0x524C,/* 0xB0-0xB7 */
+	0x8FA3,0x5D50,0x64E5,0x652C,0x6B16,0x6FEB,0x7C43,0x7E9C,/* 0xB8-0xBF */
+	0x85CD,0x8964,0x89BD,0x62C9,0x81D8,0x881F,0x5ECA,0x6717,/* 0xC0-0xC7 */
+	0x6D6A,0x72FC,0x7405,0x746F,0x8782,0x90DE,0x4F86,0x5D0D,/* 0xC8-0xCF */
+	0x5FA0,0x840A,0x51B7,0x63A0,0x7565,0x4EAE,0x5006,0x5169,/* 0xD0-0xD7 */
+	0x51C9,0x6881,0x6A11,0x7CAE,0x7CB1,0x7CE7,0x826F,0x8AD2,/* 0xD8-0xDF */
+	0x8F1B,0x91CF,0x4FB6,0x5137,0x52F5,0x5442,0x5EEC,0x616E,/* 0xE0-0xE7 */
+	0x623E,0x65C5,0x6ADA,0x6FFE,0x792A,0x85DC,0x8823,0x95AD,/* 0xE8-0xEF */
+	0x9A62,0x9A6A,0x9E97,0x9ECE,0x529B,0x66C6,0x6B77,0x701D,/* 0xF0-0xF7 */
+	0x792B,0x8F62,0x9742,0x6190,0x6200,0x6523,0x6F23,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7149,0x7489,0x7DF4,0x806F,0x84EE,0x8F26,0x9023,/* 0xA0-0xA7 */
+	0x934A,0x51BD,0x5217,0x52A3,0x6D0C,0x70C8,0x88C2,0x5EC9,/* 0xA8-0xAF */
+	0x6582,0x6BAE,0x6FC2,0x7C3E,0x7375,0x4EE4,0x4F36,0x56F9,/* 0xB0-0xB7 */
+	0xF95F,0x5CBA,0x5DBA,0x601C,0x73B2,0x7B2D,0x7F9A,0x7FCE,/* 0xB8-0xBF */
+	0x8046,0x901E,0x9234,0x96F6,0x9748,0x9818,0x9F61,0x4F8B,/* 0xC0-0xC7 */
+	0x6FA7,0x79AE,0x91B4,0x96B7,0x52DE,0xF960,0x6488,0x64C4,/* 0xC8-0xCF */
+	0x6AD3,0x6F5E,0x7018,0x7210,0x76E7,0x8001,0x8606,0x865C,/* 0xD0-0xD7 */
+	0x8DEF,0x8F05,0x9732,0x9B6F,0x9DFA,0x9E75,0x788C,0x797F,/* 0xD8-0xDF */
+	0x7DA0,0x83C9,0x9304,0x9E7F,0x9E93,0x8AD6,0x58DF,0x5F04,/* 0xE0-0xE7 */
+	0x6727,0x7027,0x74CF,0x7C60,0x807E,0x5121,0x7028,0x7262,/* 0xE8-0xEF */
+	0x78CA,0x8CC2,0x8CDA,0x8CF4,0x96F7,0x4E86,0x50DA,0x5BEE,/* 0xF0-0xF7 */
+	0x5ED6,0x6599,0x71CE,0x7642,0x77AD,0x804A,0x84FC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x907C,0x9B27,0x9F8D,0x58D8,0x5A41,0x5C62,0x6A13,/* 0xA0-0xA7 */
+	0x6DDA,0x6F0F,0x763B,0x7D2F,0x7E37,0x851E,0x8938,0x93E4,/* 0xA8-0xAF */
+	0x964B,0x5289,0x65D2,0x67F3,0x69B4,0x6D41,0x6E9C,0x700F,/* 0xB0-0xB7 */
+	0x7409,0x7460,0x7559,0x7624,0x786B,0x8B2C,0x985E,0x516D,/* 0xB8-0xBF */
+	0x622E,0x9678,0x4F96,0x502B,0x5D19,0x6DEA,0x7DB8,0x8F2A,/* 0xC0-0xC7 */
+	0x5F8B,0x6144,0x6817,0xF961,0x9686,0x52D2,0x808B,0x51DC,/* 0xC8-0xCF */
+	0x51CC,0x695E,0x7A1C,0x7DBE,0x83F1,0x9675,0x4FDA,0x5229,/* 0xD0-0xD7 */
+	0x5398,0x540F,0x550E,0x5C65,0x60A7,0x674E,0x68A8,0x6D6C,/* 0xD8-0xDF */
+	0x7281,0x72F8,0x7406,0x7483,0xF962,0x75E2,0x7C6C,0x7F79,/* 0xE0-0xE7 */
+	0x7FB8,0x8389,0x88CF,0x88E1,0x91CC,0x91D0,0x96E2,0x9BC9,/* 0xE8-0xEF */
+	0x541D,0x6F7E,0x71D0,0x7498,0x85FA,0x8EAA,0x96A3,0x9C57,/* 0xF0-0xF7 */
+	0x9E9F,0x6797,0x6DCB,0x7433,0x81E8,0x9716,0x782C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7ACB,0x7B20,0x7C92,0x6469,0x746A,0x75F2,0x78BC,/* 0xA0-0xA7 */
+	0x78E8,0x99AC,0x9B54,0x9EBB,0x5BDE,0x5E55,0x6F20,0x819C,/* 0xA8-0xAF */
+	0x83AB,0x9088,0x4E07,0x534D,0x5A29,0x5DD2,0x5F4E,0x6162,/* 0xB0-0xB7 */
+	0x633D,0x6669,0x66FC,0x6EFF,0x6F2B,0x7063,0x779E,0x842C,/* 0xB8-0xBF */
+	0x8513,0x883B,0x8F13,0x9945,0x9C3B,0x551C,0x62B9,0x672B,/* 0xC0-0xC7 */
+	0x6CAB,0x8309,0x896A,0x977A,0x4EA1,0x5984,0x5FD8,0x5FD9,/* 0xC8-0xCF */
+	0x671B,0x7DB2,0x7F54,0x8292,0x832B,0x83BD,0x8F1E,0x9099,/* 0xD0-0xD7 */
+	0x57CB,0x59B9,0x5A92,0x5BD0,0x6627,0x679A,0x6885,0x6BCF,/* 0xD8-0xDF */
+	0x7164,0x7F75,0x8CB7,0x8CE3,0x9081,0x9B45,0x8108,0x8C8A,/* 0xE0-0xE7 */
+	0x964C,0x9A40,0x9EA5,0x5B5F,0x6C13,0x731B,0x76F2,0x76DF,/* 0xE8-0xEF */
+	0x840C,0x51AA,0x8993,0x514D,0x5195,0x52C9,0x68C9,0x6C94,/* 0xF0-0xF7 */
+	0x7704,0x7720,0x7DBF,0x7DEC,0x9762,0x9EB5,0x6EC5,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8511,0x51A5,0x540D,0x547D,0x660E,0x669D,0x6927,/* 0xA0-0xA7 */
+	0x6E9F,0x76BF,0x7791,0x8317,0x84C2,0x879F,0x9169,0x9298,/* 0xA8-0xAF */
+	0x9CF4,0x8882,0x4FAE,0x5192,0x52DF,0x59C6,0x5E3D,0x6155,/* 0xB0-0xB7 */
+	0x6478,0x6479,0x66AE,0x67D0,0x6A21,0x6BCD,0x6BDB,0x725F,/* 0xB8-0xBF */
+	0x7261,0x7441,0x7738,0x77DB,0x8017,0x82BC,0x8305,0x8B00,/* 0xC0-0xC7 */
+	0x8B28,0x8C8C,0x6728,0x6C90,0x7267,0x76EE,0x7766,0x7A46,/* 0xC8-0xCF */
+	0x9DA9,0x6B7F,0x6C92,0x5922,0x6726,0x8499,0x536F,0x5893,/* 0xD0-0xD7 */
+	0x5999,0x5EDF,0x63CF,0x6634,0x6773,0x6E3A,0x732B,0x7AD7,/* 0xD8-0xDF */
+	0x82D7,0x9328,0x52D9,0x5DEB,0x61AE,0x61CB,0x620A,0x62C7,/* 0xE0-0xE7 */
+	0x64AB,0x65E0,0x6959,0x6B66,0x6BCB,0x7121,0x73F7,0x755D,/* 0xE8-0xEF */
+	0x7E46,0x821E,0x8302,0x856A,0x8AA3,0x8CBF,0x9727,0x9D61,/* 0xF0-0xF7 */
+	0x58A8,0x9ED8,0x5011,0x520E,0x543B,0x554F,0x6587,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6C76,0x7D0A,0x7D0B,0x805E,0x868A,0x9580,0x96EF,/* 0xA0-0xA7 */
+	0x52FF,0x6C95,0x7269,0x5473,0x5A9A,0x5C3E,0x5D4B,0x5F4C,/* 0xA8-0xAF */
+	0x5FAE,0x672A,0x68B6,0x6963,0x6E3C,0x6E44,0x7709,0x7C73,/* 0xB0-0xB7 */
+	0x7F8E,0x8587,0x8B0E,0x8FF7,0x9761,0x9EF4,0x5CB7,0x60B6,/* 0xB8-0xBF */
+	0x610D,0x61AB,0x654F,0x65FB,0x65FC,0x6C11,0x6CEF,0x739F,/* 0xC0-0xC7 */
+	0x73C9,0x7DE1,0x9594,0x5BC6,0x871C,0x8B10,0x525D,0x535A,/* 0xC8-0xCF */
+	0x62CD,0x640F,0x64B2,0x6734,0x6A38,0x6CCA,0x73C0,0x749E,/* 0xD0-0xD7 */
+	0x7B94,0x7C95,0x7E1B,0x818A,0x8236,0x8584,0x8FEB,0x96F9,/* 0xD8-0xDF */
+	0x99C1,0x4F34,0x534A,0x53CD,0x53DB,0x62CC,0x642C,0x6500,/* 0xE0-0xE7 */
+	0x6591,0x69C3,0x6CEE,0x6F58,0x73ED,0x7554,0x7622,0x76E4,/* 0xE8-0xEF */
+	0x76FC,0x78D0,0x78FB,0x792C,0x7D46,0x822C,0x87E0,0x8FD4,/* 0xF0-0xF7 */
+	0x9812,0x98EF,0x52C3,0x62D4,0x64A5,0x6E24,0x6F51,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x767C,0x8DCB,0x91B1,0x9262,0x9AEE,0x9B43,0x5023,/* 0xA0-0xA7 */
+	0x508D,0x574A,0x59A8,0x5C28,0x5E47,0x5F77,0x623F,0x653E,/* 0xA8-0xAF */
+	0x65B9,0x65C1,0x6609,0x678B,0x699C,0x6EC2,0x78C5,0x7D21,/* 0xB0-0xB7 */
+	0x80AA,0x8180,0x822B,0x82B3,0x84A1,0x868C,0x8A2A,0x8B17,/* 0xB8-0xBF */
+	0x90A6,0x9632,0x9F90,0x500D,0x4FF3,0xF963,0x57F9,0x5F98,/* 0xC0-0xC7 */
+	0x62DC,0x6392,0x676F,0x6E43,0x7119,0x76C3,0x80CC,0x80DA,/* 0xC8-0xCF */
+	0x88F4,0x88F5,0x8919,0x8CE0,0x8F29,0x914D,0x966A,0x4F2F,/* 0xD0-0xD7 */
+	0x4F70,0x5E1B,0x67CF,0x6822,0x767D,0x767E,0x9B44,0x5E61,/* 0xD8-0xDF */
+	0x6A0A,0x7169,0x71D4,0x756A,0xF964,0x7E41,0x8543,0x85E9,/* 0xE0-0xE7 */
+	0x98DC,0x4F10,0x7B4F,0x7F70,0x95A5,0x51E1,0x5E06,0x68B5,/* 0xE8-0xEF */
+	0x6C3E,0x6C4E,0x6CDB,0x72AF,0x7BC4,0x8303,0x6CD5,0x743A,/* 0xF0-0xF7 */
+	0x50FB,0x5288,0x58C1,0x64D8,0x6A97,0x74A7,0x7656,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x78A7,0x8617,0x95E2,0x9739,0xF965,0x535E,0x5F01,/* 0xA0-0xA7 */
+	0x8B8A,0x8FA8,0x8FAF,0x908A,0x5225,0x77A5,0x9C49,0x9F08,/* 0xA8-0xAF */
+	0x4E19,0x5002,0x5175,0x5C5B,0x5E77,0x661E,0x663A,0x67C4,/* 0xB0-0xB7 */
+	0x68C5,0x70B3,0x7501,0x75C5,0x79C9,0x7ADD,0x8F27,0x9920,/* 0xB8-0xBF */
+	0x9A08,0x4FDD,0x5821,0x5831,0x5BF6,0x666E,0x6B65,0x6D11,/* 0xC0-0xC7 */
+	0x6E7A,0x6F7D,0x73E4,0x752B,0x83E9,0x88DC,0x8913,0x8B5C,/* 0xC8-0xCF */
+	0x8F14,0x4F0F,0x50D5,0x5310,0x535C,0x5B93,0x5FA9,0x670D,/* 0xD0-0xD7 */
+	0x798F,0x8179,0x832F,0x8514,0x8907,0x8986,0x8F39,0x8F3B,/* 0xD8-0xDF */
+	0x99A5,0x9C12,0x672C,0x4E76,0x4FF8,0x5949,0x5C01,0x5CEF,/* 0xE0-0xE7 */
+	0x5CF0,0x6367,0x68D2,0x70FD,0x71A2,0x742B,0x7E2B,0x84EC,/* 0xE8-0xEF */
+	0x8702,0x9022,0x92D2,0x9CF3,0x4E0D,0x4ED8,0x4FEF,0x5085,/* 0xF0-0xF7 */
+	0x5256,0x526F,0x5426,0x5490,0x57E0,0x592B,0x5A66,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5B5A,0x5B75,0x5BCC,0x5E9C,0xF966,0x6276,0x6577,/* 0xA0-0xA7 */
+	0x65A7,0x6D6E,0x6EA5,0x7236,0x7B26,0x7C3F,0x7F36,0x8150,/* 0xA8-0xAF */
+	0x8151,0x819A,0x8240,0x8299,0x83A9,0x8A03,0x8CA0,0x8CE6,/* 0xB0-0xB7 */
+	0x8CFB,0x8D74,0x8DBA,0x90E8,0x91DC,0x961C,0x9644,0x99D9,/* 0xB8-0xBF */
+	0x9CE7,0x5317,0x5206,0x5429,0x5674,0x58B3,0x5954,0x596E,/* 0xC0-0xC7 */
+	0x5FFF,0x61A4,0x626E,0x6610,0x6C7E,0x711A,0x76C6,0x7C89,/* 0xC8-0xCF */
+	0x7CDE,0x7D1B,0x82AC,0x8CC1,0x96F0,0xF967,0x4F5B,0x5F17,/* 0xD0-0xD7 */
+	0x5F7F,0x62C2,0x5D29,0x670B,0x68DA,0x787C,0x7E43,0x9D6C,/* 0xD8-0xDF */
+	0x4E15,0x5099,0x5315,0x532A,0x5351,0x5983,0x5A62,0x5E87,/* 0xE0-0xE7 */
+	0x60B2,0x618A,0x6249,0x6279,0x6590,0x6787,0x69A7,0x6BD4,/* 0xE8-0xEF */
+	0x6BD6,0x6BD7,0x6BD8,0x6CB8,0xF968,0x7435,0x75FA,0x7812,/* 0xF0-0xF7 */
+	0x7891,0x79D5,0x79D8,0x7C83,0x7DCB,0x7FE1,0x80A5,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x813E,0x81C2,0x83F2,0x871A,0x88E8,0x8AB9,0x8B6C,/* 0xA0-0xA7 */
+	0x8CBB,0x9119,0x975E,0x98DB,0x9F3B,0x56AC,0x5B2A,0x5F6C,/* 0xA8-0xAF */
+	0x658C,0x6AB3,0x6BAF,0x6D5C,0x6FF1,0x7015,0x725D,0x73AD,/* 0xB0-0xB7 */
+	0x8CA7,0x8CD3,0x983B,0x6191,0x6C37,0x8058,0x9A01,0x4E4D,/* 0xB8-0xBF */
+	0x4E8B,0x4E9B,0x4ED5,0x4F3A,0x4F3C,0x4F7F,0x4FDF,0x50FF,/* 0xC0-0xC7 */
+	0x53F2,0x53F8,0x5506,0x55E3,0x56DB,0x58EB,0x5962,0x5A11,/* 0xC8-0xCF */
+	0x5BEB,0x5BFA,0x5C04,0x5DF3,0x5E2B,0x5F99,0x601D,0x6368,/* 0xD0-0xD7 */
+	0x659C,0x65AF,0x67F6,0x67FB,0x68AD,0x6B7B,0x6C99,0x6CD7,/* 0xD8-0xDF */
+	0x6E23,0x7009,0x7345,0x7802,0x793E,0x7940,0x7960,0x79C1,/* 0xE0-0xE7 */
+	0x7BE9,0x7D17,0x7D72,0x8086,0x820D,0x838E,0x84D1,0x86C7,/* 0xE8-0xEF */
+	0x88DF,0x8A50,0x8A5E,0x8B1D,0x8CDC,0x8D66,0x8FAD,0x90AA,/* 0xF0-0xF7 */
+	0x98FC,0x99DF,0x9E9D,0x524A,0xF969,0x6714,0xF96A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5098,0x522A,0x5C71,0x6563,0x6C55,0x73CA,0x7523,/* 0xA0-0xA7 */
+	0x759D,0x7B97,0x849C,0x9178,0x9730,0x4E77,0x6492,0x6BBA,/* 0xA8-0xAF */
+	0x715E,0x85A9,0x4E09,0xF96B,0x6749,0x68EE,0x6E17,0x829F,/* 0xB0-0xB7 */
+	0x8518,0x886B,0x63F7,0x6F81,0x9212,0x98AF,0x4E0A,0x50B7,/* 0xB8-0xBF */
+	0x50CF,0x511F,0x5546,0x55AA,0x5617,0x5B40,0x5C19,0x5CE0,/* 0xC0-0xC7 */
+	0x5E38,0x5E8A,0x5EA0,0x5EC2,0x60F3,0x6851,0x6A61,0x6E58,/* 0xC8-0xCF */
+	0x723D,0x7240,0x72C0,0x76F8,0x7965,0x7BB1,0x7FD4,0x88F3,/* 0xD0-0xD7 */
+	0x89F4,0x8A73,0x8C61,0x8CDE,0x971C,0x585E,0x74BD,0x8CFD,/* 0xD8-0xDF */
+	0x55C7,0xF96C,0x7A61,0x7D22,0x8272,0x7272,0x751F,0x7525,/* 0xE0-0xE7 */
+	0xF96D,0x7B19,0x5885,0x58FB,0x5DBC,0x5E8F,0x5EB6,0x5F90,/* 0xE8-0xEF */
+	0x6055,0x6292,0x637F,0x654D,0x6691,0x66D9,0x66F8,0x6816,/* 0xF0-0xF7 */
+	0x68F2,0x7280,0x745E,0x7B6E,0x7D6E,0x7DD6,0x7F72,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x80E5,0x8212,0x85AF,0x897F,0x8A93,0x901D,0x92E4,/* 0xA0-0xA7 */
+	0x9ECD,0x9F20,0x5915,0x596D,0x5E2D,0x60DC,0x6614,0x6673,/* 0xA8-0xAF */
+	0x6790,0x6C50,0x6DC5,0x6F5F,0x77F3,0x78A9,0x84C6,0x91CB,/* 0xB0-0xB7 */
+	0x932B,0x4ED9,0x50CA,0x5148,0x5584,0x5B0B,0x5BA3,0x6247,/* 0xB8-0xBF */
+	0x657E,0x65CB,0x6E32,0x717D,0x7401,0x7444,0x7487,0x74BF,/* 0xC0-0xC7 */
+	0x766C,0x79AA,0x7DDA,0x7E55,0x7FA8,0x817A,0x81B3,0x8239,/* 0xC8-0xCF */
+	0x861A,0x87EC,0x8A75,0x8DE3,0x9078,0x9291,0x9425,0x994D,/* 0xD0-0xD7 */
+	0x9BAE,0x5368,0x5C51,0x6954,0x6CC4,0x6D29,0x6E2B,0x820C,/* 0xD8-0xDF */
+	0x859B,0x893B,0x8A2D,0x8AAA,0x96EA,0x9F67,0x5261,0x66B9,/* 0xE0-0xE7 */
+	0x6BB2,0x7E96,0x87FE,0x8D0D,0x9583,0x965D,0x651D,0x6D89,/* 0xE8-0xEF */
+	0x71EE,0xF96E,0x57CE,0x59D3,0x5BAC,0x6027,0x60FA,0x6210,/* 0xF0-0xF7 */
+	0x661F,0x665F,0x7329,0x73F9,0x76DB,0x7701,0x7B6C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8056,0x8072,0x8165,0x8AA0,0x9192,0x4E16,0x52E2,/* 0xA0-0xA7 */
+	0x6B72,0x6D17,0x7A05,0x7B39,0x7D30,0xF96F,0x8CB0,0x53EC,/* 0xA8-0xAF */
+	0x562F,0x5851,0x5BB5,0x5C0F,0x5C11,0x5DE2,0x6240,0x6383,/* 0xB0-0xB7 */
+	0x6414,0x662D,0x68B3,0x6CBC,0x6D88,0x6EAF,0x701F,0x70A4,/* 0xB8-0xBF */
+	0x71D2,0x7526,0x758F,0x758E,0x7619,0x7B11,0x7BE0,0x7C2B,/* 0xC0-0xC7 */
+	0x7D20,0x7D39,0x852C,0x856D,0x8607,0x8A34,0x900D,0x9061,/* 0xC8-0xCF */
+	0x90B5,0x92B7,0x97F6,0x9A37,0x4FD7,0x5C6C,0x675F,0x6D91,/* 0xD0-0xD7 */
+	0x7C9F,0x7E8C,0x8B16,0x8D16,0x901F,0x5B6B,0x5DFD,0x640D,/* 0xD8-0xDF */
+	0x84C0,0x905C,0x98E1,0x7387,0x5B8B,0x609A,0x677E,0x6DDE,/* 0xE0-0xE7 */
+	0x8A1F,0x8AA6,0x9001,0x980C,0x5237,0xF970,0x7051,0x788E,/* 0xE8-0xEF */
+	0x9396,0x8870,0x91D7,0x4FEE,0x53D7,0x55FD,0x56DA,0x5782,/* 0xF0-0xF7 */
+	0x58FD,0x5AC2,0x5B88,0x5CAB,0x5CC0,0x5E25,0x6101,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x620D,0x624B,0x6388,0x641C,0x6536,0x6578,0x6A39,/* 0xA0-0xA7 */
+	0x6B8A,0x6C34,0x6D19,0x6F31,0x71E7,0x72E9,0x7378,0x7407,/* 0xA8-0xAF */
+	0x74B2,0x7626,0x7761,0x79C0,0x7A57,0x7AEA,0x7CB9,0x7D8F,/* 0xB0-0xB7 */
+	0x7DAC,0x7E61,0x7F9E,0x8129,0x8331,0x8490,0x84DA,0x85EA,/* 0xB8-0xBF */
+	0x8896,0x8AB0,0x8B90,0x8F38,0x9042,0x9083,0x916C,0x9296,/* 0xC0-0xC7 */
+	0x92B9,0x968B,0x96A7,0x96A8,0x96D6,0x9700,0x9808,0x9996,/* 0xC8-0xCF */
+	0x9AD3,0x9B1A,0x53D4,0x587E,0x5919,0x5B70,0x5BBF,0x6DD1,/* 0xD0-0xD7 */
+	0x6F5A,0x719F,0x7421,0x74B9,0x8085,0x83FD,0x5DE1,0x5F87,/* 0xD8-0xDF */
+	0x5FAA,0x6042,0x65EC,0x6812,0x696F,0x6A53,0x6B89,0x6D35,/* 0xE0-0xE7 */
+	0x6DF3,0x73E3,0x76FE,0x77AC,0x7B4D,0x7D14,0x8123,0x821C,/* 0xE8-0xEF */
+	0x8340,0x84F4,0x8563,0x8A62,0x8AC4,0x9187,0x931E,0x9806,/* 0xF0-0xF7 */
+	0x99B4,0x620C,0x8853,0x8FF0,0x9265,0x5D07,0x5D27,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5D69,0x745F,0x819D,0x8768,0x6FD5,0x62FE,0x7FD2,/* 0xA0-0xA7 */
+	0x8936,0x8972,0x4E1E,0x4E58,0x50E7,0x52DD,0x5347,0x627F,/* 0xA8-0xAF */
+	0x6607,0x7E69,0x8805,0x965E,0x4F8D,0x5319,0x5636,0x59CB,/* 0xB0-0xB7 */
+	0x5AA4,0x5C38,0x5C4E,0x5C4D,0x5E02,0x5F11,0x6043,0x65BD,/* 0xB8-0xBF */
+	0x662F,0x6642,0x67BE,0x67F4,0x731C,0x77E2,0x793A,0x7FC5,/* 0xC0-0xC7 */
+	0x8494,0x84CD,0x8996,0x8A66,0x8A69,0x8AE1,0x8C55,0x8C7A,/* 0xC8-0xCF */
+	0x57F4,0x5BD4,0x5F0F,0x606F,0x62ED,0x690D,0x6B96,0x6E5C,/* 0xD0-0xD7 */
+	0x7184,0x7BD2,0x8755,0x8B58,0x8EFE,0x98DF,0x98FE,0x4F38,/* 0xD8-0xDF */
+	0x4F81,0x4FE1,0x547B,0x5A20,0x5BB8,0x613C,0x65B0,0x6668,/* 0xE0-0xE7 */
+	0x71FC,0x7533,0x795E,0x7D33,0x814E,0x81E3,0x8398,0x85AA,/* 0xE8-0xEF */
+	0x85CE,0x8703,0x8A0A,0x8EAB,0x8F9B,0xF971,0x8FC5,0x5931,/* 0xF0-0xF7 */
+	0x5BA4,0x5BE6,0x6089,0x5BE9,0x5C0B,0x5FC3,0x6C81,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xF972,0x6DF1,0x700B,0x751A,0x82AF,0x8AF6,0x4EC0,/* 0xA0-0xA7 */
+	0x5341,0xF973,0x96D9,0x6C0F,0x4E9E,0x4FC4,0x5152,0x555E,/* 0xA8-0xAF */
+	0x5A25,0x5CE8,0x6211,0x7259,0x82BD,0x83AA,0x86FE,0x8859,/* 0xB0-0xB7 */
+	0x8A1D,0x963F,0x96C5,0x9913,0x9D09,0x9D5D,0x580A,0x5CB3,/* 0xB8-0xBF */
+	0x5DBD,0x5E44,0x60E1,0x6115,0x63E1,0x6A02,0x6E25,0x9102,/* 0xC0-0xC7 */
+	0x9354,0x984E,0x9C10,0x9F77,0x5B89,0x5CB8,0x6309,0x664F,/* 0xC8-0xCF */
+	0x6848,0x773C,0x96C1,0x978D,0x9854,0x9B9F,0x65A1,0x8B01,/* 0xD0-0xD7 */
+	0x8ECB,0x95BC,0x5535,0x5CA9,0x5DD6,0x5EB5,0x6697,0x764C,/* 0xD8-0xDF */
+	0x83F4,0x95C7,0x58D3,0x62BC,0x72CE,0x9D28,0x4EF0,0x592E,/* 0xE0-0xE7 */
+	0x600F,0x663B,0x6B83,0x79E7,0x9D26,0x5393,0x54C0,0x57C3,/* 0xE8-0xEF */
+	0x5D16,0x611B,0x66D6,0x6DAF,0x788D,0x827E,0x9698,0x9744,/* 0xF0-0xF7 */
+	0x5384,0x627C,0x6396,0x6DB2,0x7E0A,0x814B,0x984D,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6AFB,0x7F4C,0x9DAF,0x9E1A,0x4E5F,0x503B,0x51B6,/* 0xA0-0xA7 */
+	0x591C,0x60F9,0x63F6,0x6930,0x723A,0x8036,0xF974,0x91CE,/* 0xA8-0xAF */
+	0x5F31,0xF975,0xF976,0x7D04,0x82E5,0x846F,0x84BB,0x85E5,/* 0xB0-0xB7 */
+	0x8E8D,0xF977,0x4F6F,0xF978,0xF979,0x58E4,0x5B43,0x6059,/* 0xB8-0xBF */
+	0x63DA,0x6518,0x656D,0x6698,0xF97A,0x694A,0x6A23,0x6D0B,/* 0xC0-0xC7 */
+	0x7001,0x716C,0x75D2,0x760D,0x79B3,0x7A70,0xF97B,0x7F8A,/* 0xC8-0xCF */
+	0xF97C,0x8944,0xF97D,0x8B93,0x91C0,0x967D,0xF97E,0x990A,/* 0xD0-0xD7 */
+	0x5704,0x5FA1,0x65BC,0x6F01,0x7600,0x79A6,0x8A9E,0x99AD,/* 0xD8-0xDF */
+	0x9B5A,0x9F6C,0x5104,0x61B6,0x6291,0x6A8D,0x81C6,0x5043,/* 0xE0-0xE7 */
+	0x5830,0x5F66,0x7109,0x8A00,0x8AFA,0x5B7C,0x8616,0x4FFA,/* 0xE8-0xEF */
+	0x513C,0x56B4,0x5944,0x63A9,0x6DF9,0x5DAA,0x696D,0x5186,/* 0xF0-0xF7 */
+	0x4E88,0x4F59,0xF97F,0xF980,0xF981,0x5982,0xF982,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xF983,0x6B5F,0x6C5D,0xF984,0x74B5,0x7916,0xF985,/* 0xA0-0xA7 */
+	0x8207,0x8245,0x8339,0x8F3F,0x8F5D,0xF986,0x9918,0xF987,/* 0xA8-0xAF */
+	0xF988,0xF989,0x4EA6,0xF98A,0x57DF,0x5F79,0x6613,0xF98B,/* 0xB0-0xB7 */
+	0xF98C,0x75AB,0x7E79,0x8B6F,0xF98D,0x9006,0x9A5B,0x56A5,/* 0xB8-0xBF */
+	0x5827,0x59F8,0x5A1F,0x5BB4,0xF98E,0x5EF6,0xF98F,0xF990,/* 0xC0-0xC7 */
+	0x6350,0x633B,0xF991,0x693D,0x6C87,0x6CBF,0x6D8E,0x6D93,/* 0xC8-0xCF */
+	0x6DF5,0x6F14,0xF992,0x70DF,0x7136,0x7159,0xF993,0x71C3,/* 0xD0-0xD7 */
+	0x71D5,0xF994,0x784F,0x786F,0xF995,0x7B75,0x7DE3,0xF996,/* 0xD8-0xDF */
+	0x7E2F,0xF997,0x884D,0x8EDF,0xF998,0xF999,0xF99A,0x925B,/* 0xE0-0xE7 */
+	0xF99B,0x9CF6,0xF99C,0xF99D,0xF99E,0x6085,0x6D85,0xF99F,/* 0xE8-0xEF */
+	0x71B1,0xF9A0,0xF9A1,0x95B1,0x53AD,0xF9A2,0xF9A3,0xF9A4,/* 0xF0-0xF7 */
+	0x67D3,0xF9A5,0x708E,0x7130,0x7430,0x8276,0x82D2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xF9A6,0x95BB,0x9AE5,0x9E7D,0x66C4,0xF9A7,0x71C1,/* 0xA0-0xA7 */
+	0x8449,0xF9A8,0xF9A9,0x584B,0xF9AA,0xF9AB,0x5DB8,0x5F71,/* 0xA8-0xAF */
+	0xF9AC,0x6620,0x668E,0x6979,0x69AE,0x6C38,0x6CF3,0x6E36,/* 0xB0-0xB7 */
+	0x6F41,0x6FDA,0x701B,0x702F,0x7150,0x71DF,0x7370,0xF9AD,/* 0xB8-0xBF */
+	0x745B,0xF9AE,0x74D4,0x76C8,0x7A4E,0x7E93,0xF9AF,0xF9B0,/* 0xC0-0xC7 */
+	0x82F1,0x8A60,0x8FCE,0xF9B1,0x9348,0xF9B2,0x9719,0xF9B3,/* 0xC8-0xCF */
+	0xF9B4,0x4E42,0x502A,0xF9B5,0x5208,0x53E1,0x66F3,0x6C6D,/* 0xD0-0xD7 */
+	0x6FCA,0x730A,0x777F,0x7A62,0x82AE,0x85DD,0x8602,0xF9B6,/* 0xD8-0xDF */
+	0x88D4,0x8A63,0x8B7D,0x8C6B,0xF9B7,0x92B3,0xF9B8,0x9713,/* 0xE0-0xE7 */
+	0x9810,0x4E94,0x4F0D,0x4FC9,0x50B2,0x5348,0x543E,0x5433,/* 0xE8-0xEF */
+	0x55DA,0x5862,0x58BA,0x5967,0x5A1B,0x5BE4,0x609F,0xF9B9,/* 0xF0-0xF7 */
+	0x61CA,0x6556,0x65FF,0x6664,0x68A7,0x6C5A,0x6FB3,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x70CF,0x71AC,0x7352,0x7B7D,0x8708,0x8AA4,0x9C32,/* 0xA0-0xA7 */
+	0x9F07,0x5C4B,0x6C83,0x7344,0x7389,0x923A,0x6EAB,0x7465,/* 0xA8-0xAF */
+	0x761F,0x7A69,0x7E15,0x860A,0x5140,0x58C5,0x64C1,0x74EE,/* 0xB0-0xB7 */
+	0x7515,0x7670,0x7FC1,0x9095,0x96CD,0x9954,0x6E26,0x74E6,/* 0xB8-0xBF */
+	0x7AA9,0x7AAA,0x81E5,0x86D9,0x8778,0x8A1B,0x5A49,0x5B8C,/* 0xC0-0xC7 */
+	0x5B9B,0x68A1,0x6900,0x6D63,0x73A9,0x7413,0x742C,0x7897,/* 0xC8-0xCF */
+	0x7DE9,0x7FEB,0x8118,0x8155,0x839E,0x8C4C,0x962E,0x9811,/* 0xD0-0xD7 */
+	0x66F0,0x5F80,0x65FA,0x6789,0x6C6A,0x738B,0x502D,0x5A03,/* 0xD8-0xDF */
+	0x6B6A,0x77EE,0x5916,0x5D6C,0x5DCD,0x7325,0x754F,0xF9BA,/* 0xE0-0xE7 */
+	0xF9BB,0x50E5,0x51F9,0x582F,0x592D,0x5996,0x59DA,0x5BE5,/* 0xE8-0xEF */
+	0xF9BC,0xF9BD,0x5DA2,0x62D7,0x6416,0x6493,0x64FE,0xF9BE,/* 0xF0-0xF7 */
+	0x66DC,0xF9BF,0x6A48,0xF9C0,0x71FF,0x7464,0xF9C1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7A88,0x7AAF,0x7E47,0x7E5E,0x8000,0x8170,0xF9C2,/* 0xA0-0xA7 */
+	0x87EF,0x8981,0x8B20,0x9059,0xF9C3,0x9080,0x9952,0x617E,/* 0xA8-0xAF */
+	0x6B32,0x6D74,0x7E1F,0x8925,0x8FB1,0x4FD1,0x50AD,0x5197,/* 0xB0-0xB7 */
+	0x52C7,0x57C7,0x5889,0x5BB9,0x5EB8,0x6142,0x6995,0x6D8C,/* 0xB8-0xBF */
+	0x6E67,0x6EB6,0x7194,0x7462,0x7528,0x752C,0x8073,0x8338,/* 0xC0-0xC7 */
+	0x84C9,0x8E0A,0x9394,0x93DE,0xF9C4,0x4E8E,0x4F51,0x5076,/* 0xC8-0xCF */
+	0x512A,0x53C8,0x53CB,0x53F3,0x5B87,0x5BD3,0x5C24,0x611A,/* 0xD0-0xD7 */
+	0x6182,0x65F4,0x725B,0x7397,0x7440,0x76C2,0x7950,0x7991,/* 0xD8-0xDF */
+	0x79B9,0x7D06,0x7FBD,0x828B,0x85D5,0x865E,0x8FC2,0x9047,/* 0xE0-0xE7 */
+	0x90F5,0x91EA,0x9685,0x96E8,0x96E9,0x52D6,0x5F67,0x65ED,/* 0xE8-0xEF */
+	0x6631,0x682F,0x715C,0x7A36,0x90C1,0x980A,0x4E91,0xF9C5,/* 0xF0-0xF7 */
+	0x6A52,0x6B9E,0x6F90,0x7189,0x8018,0x82B8,0x8553,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x904B,0x9695,0x96F2,0x97FB,0x851A,0x9B31,0x4E90,/* 0xA0-0xA7 */
+	0x718A,0x96C4,0x5143,0x539F,0x54E1,0x5713,0x5712,0x57A3,/* 0xA8-0xAF */
+	0x5A9B,0x5AC4,0x5BC3,0x6028,0x613F,0x63F4,0x6C85,0x6D39,/* 0xB0-0xB7 */
+	0x6E72,0x6E90,0x7230,0x733F,0x7457,0x82D1,0x8881,0x8F45,/* 0xB8-0xBF */
+	0x9060,0xF9C6,0x9662,0x9858,0x9D1B,0x6708,0x8D8A,0x925E,/* 0xC0-0xC7 */
+	0x4F4D,0x5049,0x50DE,0x5371,0x570D,0x59D4,0x5A01,0x5C09,/* 0xC8-0xCF */
+	0x6170,0x6690,0x6E2D,0x7232,0x744B,0x7DEF,0x80C3,0x840E,/* 0xD0-0xD7 */
+	0x8466,0x853F,0x875F,0x885B,0x8918,0x8B02,0x9055,0x97CB,/* 0xD8-0xDF */
+	0x9B4F,0x4E73,0x4F91,0x5112,0x516A,0xF9C7,0x552F,0x55A9,/* 0xE0-0xE7 */
+	0x5B7A,0x5BA5,0x5E7C,0x5E7D,0x5EBE,0x60A0,0x60DF,0x6108,/* 0xE8-0xEF */
+	0x6109,0x63C4,0x6538,0x6709,0xF9C8,0x67D4,0x67DA,0xF9C9,/* 0xF0-0xF7 */
+	0x6961,0x6962,0x6CB9,0x6D27,0xF9CA,0x6E38,0xF9CB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6FE1,0x7336,0x7337,0xF9CC,0x745C,0x7531,0xF9CD,/* 0xA0-0xA7 */
+	0x7652,0xF9CE,0xF9CF,0x7DAD,0x81FE,0x8438,0x88D5,0x8A98,/* 0xA8-0xAF */
+	0x8ADB,0x8AED,0x8E30,0x8E42,0x904A,0x903E,0x907A,0x9149,/* 0xB0-0xB7 */
+	0x91C9,0x936E,0xF9D0,0xF9D1,0x5809,0xF9D2,0x6BD3,0x8089,/* 0xB8-0xBF */
+	0x80B2,0xF9D3,0xF9D4,0x5141,0x596B,0x5C39,0xF9D5,0xF9D6,/* 0xC0-0xC7 */
+	0x6F64,0x73A7,0x80E4,0x8D07,0xF9D7,0x9217,0x958F,0xF9D8,/* 0xC8-0xCF */
+	0xF9D9,0xF9DA,0xF9DB,0x807F,0x620E,0x701C,0x7D68,0x878D,/* 0xD0-0xD7 */
+	0xF9DC,0x57A0,0x6069,0x6147,0x6BB7,0x8ABE,0x9280,0x96B1,/* 0xD8-0xDF */
+	0x4E59,0x541F,0x6DEB,0x852D,0x9670,0x97F3,0x98EE,0x63D6,/* 0xE0-0xE7 */
+	0x6CE3,0x9091,0x51DD,0x61C9,0x81BA,0x9DF9,0x4F9D,0x501A,/* 0xE8-0xEF */
+	0x5100,0x5B9C,0x610F,0x61FF,0x64EC,0x6905,0x6BC5,0x7591,/* 0xF0-0xF7 */
+	0x77E3,0x7FA9,0x8264,0x858F,0x87FB,0x8863,0x8ABC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8B70,0x91AB,0x4E8C,0x4EE5,0x4F0A,0xF9DD,0xF9DE,/* 0xA0-0xA7 */
+	0x5937,0x59E8,0xF9DF,0x5DF2,0x5F1B,0x5F5B,0x6021,0xF9E0,/* 0xA8-0xAF */
+	0xF9E1,0xF9E2,0xF9E3,0x723E,0x73E5,0xF9E4,0x7570,0x75CD,/* 0xB0-0xB7 */
+	0xF9E5,0x79FB,0xF9E6,0x800C,0x8033,0x8084,0x82E1,0x8351,/* 0xB8-0xBF */
+	0xF9E7,0xF9E8,0x8CBD,0x8CB3,0x9087,0xF9E9,0xF9EA,0x98F4,/* 0xC0-0xC7 */
+	0x990C,0xF9EB,0xF9EC,0x7037,0x76CA,0x7FCA,0x7FCC,0x7FFC,/* 0xC8-0xCF */
+	0x8B1A,0x4EBA,0x4EC1,0x5203,0x5370,0xF9ED,0x54BD,0x56E0,/* 0xD0-0xD7 */
+	0x59FB,0x5BC5,0x5F15,0x5FCD,0x6E6E,0xF9EE,0xF9EF,0x7D6A,/* 0xD8-0xDF */
+	0x8335,0xF9F0,0x8693,0x8A8D,0xF9F1,0x976D,0x9777,0xF9F2,/* 0xE0-0xE7 */
+	0xF9F3,0x4E00,0x4F5A,0x4F7E,0x58F9,0x65E5,0x6EA2,0x9038,/* 0xE8-0xEF */
+	0x93B0,0x99B9,0x4EFB,0x58EC,0x598A,0x59D9,0x6041,0xF9F4,/* 0xF0-0xF7 */
+	0xF9F5,0x7A14,0xF9F6,0x834F,0x8CC3,0x5165,0x5344,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_ED[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xF9F7,0xF9F8,0xF9F9,0x4ECD,0x5269,0x5B55,0x82BF,/* 0xA0-0xA7 */
+	0x4ED4,0x523A,0x54A8,0x59C9,0x59FF,0x5B50,0x5B57,0x5B5C,/* 0xA8-0xAF */
+	0x6063,0x6148,0x6ECB,0x7099,0x716E,0x7386,0x74F7,0x75B5,/* 0xB0-0xB7 */
+	0x78C1,0x7D2B,0x8005,0x81EA,0x8328,0x8517,0x85C9,0x8AEE,/* 0xB8-0xBF */
+	0x8CC7,0x96CC,0x4F5C,0x52FA,0x56BC,0x65AB,0x6628,0x707C,/* 0xC0-0xC7 */
+	0x70B8,0x7235,0x7DBD,0x828D,0x914C,0x96C0,0x9D72,0x5B71,/* 0xC8-0xCF */
+	0x68E7,0x6B98,0x6F7A,0x76DE,0x5C91,0x66AB,0x6F5B,0x7BB4,/* 0xD0-0xD7 */
+	0x7C2A,0x8836,0x96DC,0x4E08,0x4ED7,0x5320,0x5834,0x58BB,/* 0xD8-0xDF */
+	0x58EF,0x596C,0x5C07,0x5E33,0x5E84,0x5F35,0x638C,0x66B2,/* 0xE0-0xE7 */
+	0x6756,0x6A1F,0x6AA3,0x6B0C,0x6F3F,0x7246,0xF9FA,0x7350,/* 0xE8-0xEF */
+	0x748B,0x7AE0,0x7CA7,0x8178,0x81DF,0x81E7,0x838A,0x846C,/* 0xF0-0xF7 */
+	0x8523,0x8594,0x85CF,0x88DD,0x8D13,0x91AC,0x9577,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x969C,0x518D,0x54C9,0x5728,0x5BB0,0x624D,0x6750,/* 0xA0-0xA7 */
+	0x683D,0x6893,0x6E3D,0x6ED3,0x707D,0x7E21,0x88C1,0x8CA1,/* 0xA8-0xAF */
+	0x8F09,0x9F4B,0x9F4E,0x722D,0x7B8F,0x8ACD,0x931A,0x4F47,/* 0xB0-0xB7 */
+	0x4F4E,0x5132,0x5480,0x59D0,0x5E95,0x62B5,0x6775,0x696E,/* 0xB8-0xBF */
+	0x6A17,0x6CAE,0x6E1A,0x72D9,0x732A,0x75BD,0x7BB8,0x7D35,/* 0xC0-0xC7 */
+	0x82E7,0x83F9,0x8457,0x85F7,0x8A5B,0x8CAF,0x8E87,0x9019,/* 0xC8-0xCF */
+	0x90B8,0x96CE,0x9F5F,0x52E3,0x540A,0x5AE1,0x5BC2,0x6458,/* 0xD0-0xD7 */
+	0x6575,0x6EF4,0x72C4,0xF9FB,0x7684,0x7A4D,0x7B1B,0x7C4D,/* 0xD8-0xDF */
+	0x7E3E,0x7FDF,0x837B,0x8B2B,0x8CCA,0x8D64,0x8DE1,0x8E5F,/* 0xE0-0xE7 */
+	0x8FEA,0x8FF9,0x9069,0x93D1,0x4F43,0x4F7A,0x50B3,0x5168,/* 0xE8-0xEF */
+	0x5178,0x524D,0x526A,0x5861,0x587C,0x5960,0x5C08,0x5C55,/* 0xF0-0xF7 */
+	0x5EDB,0x609B,0x6230,0x6813,0x6BBF,0x6C08,0x6FB1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x714E,0x7420,0x7530,0x7538,0x7551,0x7672,0x7B4C,/* 0xA0-0xA7 */
+	0x7B8B,0x7BAD,0x7BC6,0x7E8F,0x8A6E,0x8F3E,0x8F49,0x923F,/* 0xA8-0xAF */
+	0x9293,0x9322,0x942B,0x96FB,0x985A,0x986B,0x991E,0x5207,/* 0xB0-0xB7 */
+	0x622A,0x6298,0x6D59,0x7664,0x7ACA,0x7BC0,0x7D76,0x5360,/* 0xB8-0xBF */
+	0x5CBE,0x5E97,0x6F38,0x70B9,0x7C98,0x9711,0x9B8E,0x9EDE,/* 0xC0-0xC7 */
+	0x63A5,0x647A,0x8776,0x4E01,0x4E95,0x4EAD,0x505C,0x5075,/* 0xC8-0xCF */
+	0x5448,0x59C3,0x5B9A,0x5E40,0x5EAD,0x5EF7,0x5F81,0x60C5,/* 0xD0-0xD7 */
+	0x633A,0x653F,0x6574,0x65CC,0x6676,0x6678,0x67FE,0x6968,/* 0xD8-0xDF */
+	0x6A89,0x6B63,0x6C40,0x6DC0,0x6DE8,0x6E1F,0x6E5E,0x701E,/* 0xE0-0xE7 */
+	0x70A1,0x738E,0x73FD,0x753A,0x775B,0x7887,0x798E,0x7A0B,/* 0xE8-0xEF */
+	0x7A7D,0x7CBE,0x7D8E,0x8247,0x8A02,0x8AEA,0x8C9E,0x912D,/* 0xF0-0xF7 */
+	0x914A,0x91D8,0x9266,0x92CC,0x9320,0x9706,0x9756,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x975C,0x9802,0x9F0E,0x5236,0x5291,0x557C,0x5824,/* 0xA0-0xA7 */
+	0x5E1D,0x5F1F,0x608C,0x63D0,0x68AF,0x6FDF,0x796D,0x7B2C,/* 0xA8-0xAF */
+	0x81CD,0x85BA,0x88FD,0x8AF8,0x8E44,0x918D,0x9664,0x969B,/* 0xB0-0xB7 */
+	0x973D,0x984C,0x9F4A,0x4FCE,0x5146,0x51CB,0x52A9,0x5632,/* 0xB8-0xBF */
+	0x5F14,0x5F6B,0x63AA,0x64CD,0x65E9,0x6641,0x66FA,0x66F9,/* 0xC0-0xC7 */
+	0x671D,0x689D,0x68D7,0x69FD,0x6F15,0x6F6E,0x7167,0x71E5,/* 0xC8-0xCF */
+	0x722A,0x74AA,0x773A,0x7956,0x795A,0x79DF,0x7A20,0x7A95,/* 0xD0-0xD7 */
+	0x7C97,0x7CDF,0x7D44,0x7E70,0x8087,0x85FB,0x86A4,0x8A54,/* 0xD8-0xDF */
+	0x8ABF,0x8D99,0x8E81,0x9020,0x906D,0x91E3,0x963B,0x96D5,/* 0xE0-0xE7 */
+	0x9CE5,0x65CF,0x7C07,0x8DB3,0x93C3,0x5B58,0x5C0A,0x5352,/* 0xE8-0xEF */
+	0x62D9,0x731D,0x5027,0x5B97,0x5F9E,0x60B0,0x616B,0x68D5,/* 0xF0-0xF7 */
+	0x6DD9,0x742E,0x7A2E,0x7D42,0x7D9C,0x7E31,0x816B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8E2A,0x8E35,0x937E,0x9418,0x4F50,0x5750,0x5DE6,/* 0xA0-0xA7 */
+	0x5EA7,0x632B,0x7F6A,0x4E3B,0x4F4F,0x4F8F,0x505A,0x59DD,/* 0xA8-0xAF */
+	0x80C4,0x546A,0x5468,0x55FE,0x594F,0x5B99,0x5DDE,0x5EDA,/* 0xB0-0xB7 */
+	0x665D,0x6731,0x67F1,0x682A,0x6CE8,0x6D32,0x6E4A,0x6F8D,/* 0xB8-0xBF */
+	0x70B7,0x73E0,0x7587,0x7C4C,0x7D02,0x7D2C,0x7DA2,0x821F,/* 0xC0-0xC7 */
+	0x86DB,0x8A3B,0x8A85,0x8D70,0x8E8A,0x8F33,0x9031,0x914E,/* 0xC8-0xCF */
+	0x9152,0x9444,0x99D0,0x7AF9,0x7CA5,0x4FCA,0x5101,0x51C6,/* 0xD0-0xD7 */
+	0x57C8,0x5BEF,0x5CFB,0x6659,0x6A3D,0x6D5A,0x6E96,0x6FEC,/* 0xD8-0xDF */
+	0x710C,0x756F,0x7AE3,0x8822,0x9021,0x9075,0x96CB,0x99FF,/* 0xE0-0xE7 */
+	0x8301,0x4E2D,0x4EF2,0x8846,0x91CD,0x537D,0x6ADB,0x696B,/* 0xE8-0xEF */
+	0x6C41,0x847A,0x589E,0x618E,0x66FE,0x62EF,0x70DD,0x7511,/* 0xF0-0xF7 */
+	0x75C7,0x7E52,0x84B8,0x8B49,0x8D08,0x4E4B,0x53EA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x54AB,0x5730,0x5740,0x5FD7,0x6301,0x6307,0x646F,/* 0xA0-0xA7 */
+	0x652F,0x65E8,0x667A,0x679D,0x67B3,0x6B62,0x6C60,0x6C9A,/* 0xA8-0xAF */
+	0x6F2C,0x77E5,0x7825,0x7949,0x7957,0x7D19,0x80A2,0x8102,/* 0xB0-0xB7 */
+	0x81F3,0x829D,0x82B7,0x8718,0x8A8C,0xF9FC,0x8D04,0x8DBE,/* 0xB8-0xBF */
+	0x9072,0x76F4,0x7A19,0x7A37,0x7E54,0x8077,0x5507,0x55D4,/* 0xC0-0xC7 */
+	0x5875,0x632F,0x6422,0x6649,0x664B,0x686D,0x699B,0x6B84,/* 0xC8-0xCF */
+	0x6D25,0x6EB1,0x73CD,0x7468,0x74A1,0x755B,0x75B9,0x76E1,/* 0xD0-0xD7 */
+	0x771E,0x778B,0x79E6,0x7E09,0x7E1D,0x81FB,0x852F,0x8897,/* 0xD8-0xDF */
+	0x8A3A,0x8CD1,0x8EEB,0x8FB0,0x9032,0x93AD,0x9663,0x9673,/* 0xE0-0xE7 */
+	0x9707,0x4F84,0x53F1,0x59EA,0x5AC9,0x5E19,0x684E,0x74C6,/* 0xE8-0xEF */
+	0x75BE,0x79E9,0x7A92,0x81A3,0x86ED,0x8CEA,0x8DCC,0x8FED,/* 0xF0-0xF7 */
+	0x659F,0x6715,0xF9FD,0x57F7,0x6F57,0x7DDD,0x8F2F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x93F6,0x96C6,0x5FB5,0x61F2,0x6F84,0x4E14,0x4F98,/* 0xA0-0xA7 */
+	0x501F,0x53C9,0x55DF,0x5D6F,0x5DEE,0x6B21,0x6B64,0x78CB,/* 0xA8-0xAF */
+	0x7B9A,0xF9FE,0x8E49,0x8ECA,0x906E,0x6349,0x643E,0x7740,/* 0xB0-0xB7 */
+	0x7A84,0x932F,0x947F,0x9F6A,0x64B0,0x6FAF,0x71E6,0x74A8,/* 0xB8-0xBF */
+	0x74DA,0x7AC4,0x7C12,0x7E82,0x7CB2,0x7E98,0x8B9A,0x8D0A,/* 0xC0-0xC7 */
+	0x947D,0x9910,0x994C,0x5239,0x5BDF,0x64E6,0x672D,0x7D2E,/* 0xC8-0xCF */
+	0x50ED,0x53C3,0x5879,0x6158,0x6159,0x61FA,0x65AC,0x7AD9,/* 0xD0-0xD7 */
+	0x8B92,0x8B96,0x5009,0x5021,0x5275,0x5531,0x5A3C,0x5EE0,/* 0xD8-0xDF */
+	0x5F70,0x6134,0x655E,0x660C,0x6636,0x66A2,0x69CD,0x6EC4,/* 0xE0-0xE7 */
+	0x6F32,0x7316,0x7621,0x7A93,0x8139,0x8259,0x83D6,0x84BC,/* 0xE8-0xEF */
+	0x50B5,0x57F0,0x5BC0,0x5BE8,0x5F69,0x63A1,0x7826,0x7DB5,/* 0xF0-0xF7 */
+	0x83DC,0x8521,0x91C7,0x91F5,0x518A,0x67F5,0x7B56,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8CAC,0x51C4,0x59BB,0x60BD,0x8655,0x501C,0xF9FF,/* 0xA0-0xA7 */
+	0x5254,0x5C3A,0x617D,0x621A,0x62D3,0x64F2,0x65A5,0x6ECC,/* 0xA8-0xAF */
+	0x7620,0x810A,0x8E60,0x965F,0x96BB,0x4EDF,0x5343,0x5598,/* 0xB0-0xB7 */
+	0x5929,0x5DDD,0x64C5,0x6CC9,0x6DFA,0x7394,0x7A7F,0x821B,/* 0xB8-0xBF */
+	0x85A6,0x8CE4,0x8E10,0x9077,0x91E7,0x95E1,0x9621,0x97C6,/* 0xC0-0xC7 */
+	0x51F8,0x54F2,0x5586,0x5FB9,0x64A4,0x6F88,0x7DB4,0x8F1F,/* 0xC8-0xCF */
+	0x8F4D,0x9435,0x50C9,0x5C16,0x6CBE,0x6DFB,0x751B,0x77BB,/* 0xD0-0xD7 */
+	0x7C3D,0x7C64,0x8A79,0x8AC2,0x581E,0x59BE,0x5E16,0x6377,/* 0xD8-0xDF */
+	0x7252,0x758A,0x776B,0x8ADC,0x8CBC,0x8F12,0x5EF3,0x6674,/* 0xE0-0xE7 */
+	0x6DF8,0x807D,0x83C1,0x8ACB,0x9751,0x9BD6,0xFA00,0x5243,/* 0xE8-0xEF */
+	0x66FF,0x6D95,0x6EEF,0x7DE0,0x8AE6,0x902E,0x905E,0x9AD4,/* 0xF0-0xF7 */
+	0x521D,0x527F,0x54E8,0x6194,0x6284,0x62DB,0x68A2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6912,0x695A,0x6A35,0x7092,0x7126,0x785D,0x7901,/* 0xA0-0xA7 */
+	0x790E,0x79D2,0x7A0D,0x8096,0x8278,0x82D5,0x8349,0x8549,/* 0xA8-0xAF */
+	0x8C82,0x8D85,0x9162,0x918B,0x91AE,0x4FC3,0x56D1,0x71ED,/* 0xB0-0xB7 */
+	0x77D7,0x8700,0x89F8,0x5BF8,0x5FD6,0x6751,0x90A8,0x53E2,/* 0xB8-0xBF */
+	0x585A,0x5BF5,0x60A4,0x6181,0x6460,0x7E3D,0x8070,0x8525,/* 0xC0-0xC7 */
+	0x9283,0x64AE,0x50AC,0x5D14,0x6700,0x589C,0x62BD,0x63A8,/* 0xC8-0xCF */
+	0x690E,0x6978,0x6A1E,0x6E6B,0x76BA,0x79CB,0x82BB,0x8429,/* 0xD0-0xD7 */
+	0x8ACF,0x8DA8,0x8FFD,0x9112,0x914B,0x919C,0x9310,0x9318,/* 0xD8-0xDF */
+	0x939A,0x96DB,0x9A36,0x9C0D,0x4E11,0x755C,0x795D,0x7AFA,/* 0xE0-0xE7 */
+	0x7B51,0x7BC9,0x7E2E,0x84C4,0x8E59,0x8E74,0x8EF8,0x9010,/* 0xE8-0xEF */
+	0x6625,0x693F,0x7443,0x51FA,0x672E,0x9EDC,0x5145,0x5FE0,/* 0xF0-0xF7 */
+	0x6C96,0x87F2,0x885D,0x8877,0x60B4,0x81B5,0x8403,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8D05,0x53D6,0x5439,0x5634,0x5A36,0x5C31,0x708A,/* 0xA0-0xA7 */
+	0x7FE0,0x805A,0x8106,0x81ED,0x8DA3,0x9189,0x9A5F,0x9DF2,/* 0xA8-0xAF */
+	0x5074,0x4EC4,0x53A0,0x60FB,0x6E2C,0x5C64,0x4F88,0x5024,/* 0xB0-0xB7 */
+	0x55E4,0x5CD9,0x5E5F,0x6065,0x6894,0x6CBB,0x6DC4,0x71BE,/* 0xB8-0xBF */
+	0x75D4,0x75F4,0x7661,0x7A1A,0x7A49,0x7DC7,0x7DFB,0x7F6E,/* 0xC0-0xC7 */
+	0x81F4,0x86A9,0x8F1C,0x96C9,0x99B3,0x9F52,0x5247,0x52C5,/* 0xC8-0xCF */
+	0x98ED,0x89AA,0x4E03,0x67D2,0x6F06,0x4FB5,0x5BE2,0x6795,/* 0xD0-0xD7 */
+	0x6C88,0x6D78,0x741B,0x7827,0x91DD,0x937C,0x87C4,0x79E4,/* 0xD8-0xDF */
+	0x7A31,0x5FEB,0x4ED6,0x54A4,0x553E,0x58AE,0x59A5,0x60F0,/* 0xE0-0xE7 */
+	0x6253,0x62D6,0x6736,0x6955,0x8235,0x9640,0x99B1,0x99DD,/* 0xE8-0xEF */
+	0x502C,0x5353,0x5544,0x577C,0xFA01,0x6258,0xFA02,0x64E2,/* 0xF0-0xF7 */
+	0x666B,0x67DD,0x6FC1,0x6FEF,0x7422,0x7438,0x8A17,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9438,0x5451,0x5606,0x5766,0x5F48,0x619A,0x6B4E,/* 0xA0-0xA7 */
+	0x7058,0x70AD,0x7DBB,0x8A95,0x596A,0x812B,0x63A2,0x7708,/* 0xA8-0xAF */
+	0x803D,0x8CAA,0x5854,0x642D,0x69BB,0x5B95,0x5E11,0x6E6F,/* 0xB0-0xB7 */
+	0xFA03,0x8569,0x514C,0x53F0,0x592A,0x6020,0x614B,0x6B86,/* 0xB8-0xBF */
+	0x6C70,0x6CF0,0x7B1E,0x80CE,0x82D4,0x8DC6,0x90B0,0x98B1,/* 0xC0-0xC7 */
+	0xFA04,0x64C7,0x6FA4,0x6491,0x6504,0x514E,0x5410,0x571F,/* 0xC8-0xCF */
+	0x8A0E,0x615F,0x6876,0xFA05,0x75DB,0x7B52,0x7D71,0x901A,/* 0xD0-0xD7 */
+	0x5806,0x69CC,0x817F,0x892A,0x9000,0x9839,0x5078,0x5957,/* 0xD8-0xDF */
+	0x59AC,0x6295,0x900F,0x9B2A,0x615D,0x7279,0x95D6,0x5761,/* 0xE0-0xE7 */
+	0x5A46,0x5DF4,0x628A,0x64AD,0x64FA,0x6777,0x6CE2,0x6D3E,/* 0xE8-0xEF */
+	0x722C,0x7436,0x7834,0x7F77,0x82AD,0x8DDB,0x9817,0x5224,/* 0xF0-0xF7 */
+	0x5742,0x677F,0x7248,0x74E3,0x8CA9,0x8FA6,0x9211,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x962A,0x516B,0x53ED,0x634C,0x4F69,0x5504,0x6096,/* 0xA0-0xA7 */
+	0x6557,0x6C9B,0x6D7F,0x724C,0x72FD,0x7A17,0x8987,0x8C9D,/* 0xA8-0xAF */
+	0x5F6D,0x6F8E,0x70F9,0x81A8,0x610E,0x4FBF,0x504F,0x6241,/* 0xB0-0xB7 */
+	0x7247,0x7BC7,0x7DE8,0x7FE9,0x904D,0x97AD,0x9A19,0x8CB6,/* 0xB8-0xBF */
+	0x576A,0x5E73,0x67B0,0x840D,0x8A55,0x5420,0x5B16,0x5E63,/* 0xC0-0xC7 */
+	0x5EE2,0x5F0A,0x6583,0x80BA,0x853D,0x9589,0x965B,0x4F48,/* 0xC8-0xCF */
+	0x5305,0x530D,0x530F,0x5486,0x54FA,0x5703,0x5E03,0x6016,/* 0xD0-0xD7 */
+	0x629B,0x62B1,0x6355,0xFA06,0x6CE1,0x6D66,0x75B1,0x7832,/* 0xD8-0xDF */
+	0x80DE,0x812F,0x82DE,0x8461,0x84B2,0x888D,0x8912,0x900B,/* 0xE0-0xE7 */
+	0x92EA,0x98FD,0x9B91,0x5E45,0x66B4,0x66DD,0x7011,0x7206,/* 0xE8-0xEF */
+	0xFA07,0x4FF5,0x527D,0x5F6A,0x6153,0x6753,0x6A19,0x6F02,/* 0xF0-0xF7 */
+	0x74E2,0x7968,0x8868,0x8C79,0x98C7,0x98C4,0x9A43,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x54C1,0x7A1F,0x6953,0x8AF7,0x8C4A,0x98A8,0x99AE,/* 0xA0-0xA7 */
+	0x5F7C,0x62AB,0x75B2,0x76AE,0x88AB,0x907F,0x9642,0x5339,/* 0xA8-0xAF */
+	0x5F3C,0x5FC5,0x6CCC,0x73CC,0x7562,0x758B,0x7B46,0x82FE,/* 0xB0-0xB7 */
+	0x999D,0x4E4F,0x903C,0x4E0B,0x4F55,0x53A6,0x590F,0x5EC8,/* 0xB8-0xBF */
+	0x6630,0x6CB3,0x7455,0x8377,0x8766,0x8CC0,0x9050,0x971E,/* 0xC0-0xC7 */
+	0x9C15,0x58D1,0x5B78,0x8650,0x8B14,0x9DB4,0x5BD2,0x6068,/* 0xC8-0xCF */
+	0x608D,0x65F1,0x6C57,0x6F22,0x6FA3,0x701A,0x7F55,0x7FF0,/* 0xD0-0xD7 */
+	0x9591,0x9592,0x9650,0x97D3,0x5272,0x8F44,0x51FD,0x542B,/* 0xD8-0xDF */
+	0x54B8,0x5563,0x558A,0x6ABB,0x6DB5,0x7DD8,0x8266,0x929C,/* 0xE0-0xE7 */
+	0x9677,0x9E79,0x5408,0x54C8,0x76D2,0x86E4,0x95A4,0x95D4,/* 0xE8-0xEF */
+	0x965C,0x4EA2,0x4F09,0x59EE,0x5AE6,0x5DF7,0x6052,0x6297,/* 0xF0-0xF7 */
+	0x676D,0x6841,0x6C86,0x6E2F,0x7F38,0x809B,0x822A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xFA08,0xFA09,0x9805,0x4EA5,0x5055,0x54B3,0x5793,/* 0xA0-0xA7 */
+	0x595A,0x5B69,0x5BB3,0x61C8,0x6977,0x6D77,0x7023,0x87F9,/* 0xA8-0xAF */
+	0x89E3,0x8A72,0x8AE7,0x9082,0x99ED,0x9AB8,0x52BE,0x6838,/* 0xB0-0xB7 */
+	0x5016,0x5E78,0x674F,0x8347,0x884C,0x4EAB,0x5411,0x56AE,/* 0xB8-0xBF */
+	0x73E6,0x9115,0x97FF,0x9909,0x9957,0x9999,0x5653,0x589F,/* 0xC0-0xC7 */
+	0x865B,0x8A31,0x61B2,0x6AF6,0x737B,0x8ED2,0x6B47,0x96AA,/* 0xC8-0xCF */
+	0x9A57,0x5955,0x7200,0x8D6B,0x9769,0x4FD4,0x5CF4,0x5F26,/* 0xD0-0xD7 */
+	0x61F8,0x665B,0x6CEB,0x70AB,0x7384,0x73B9,0x73FE,0x7729,/* 0xD8-0xDF */
+	0x774D,0x7D43,0x7D62,0x7E23,0x8237,0x8852,0xFA0A,0x8CE2,/* 0xE0-0xE7 */
+	0x9249,0x986F,0x5B51,0x7A74,0x8840,0x9801,0x5ACC,0x4FE0,/* 0xE8-0xEF */
+	0x5354,0x593E,0x5CFD,0x633E,0x6D79,0x72F9,0x8105,0x8107,/* 0xF0-0xF7 */
+	0x83A2,0x92CF,0x9830,0x4EA8,0x5144,0x5211,0x578B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5F62,0x6CC2,0x6ECE,0x7005,0x7050,0x70AF,0x7192,/* 0xA0-0xA7 */
+	0x73E9,0x7469,0x834A,0x87A2,0x8861,0x9008,0x90A2,0x93A3,/* 0xA8-0xAF */
+	0x99A8,0x516E,0x5F57,0x60E0,0x6167,0x66B3,0x8559,0x8E4A,/* 0xB0-0xB7 */
+	0x91AF,0x978B,0x4E4E,0x4E92,0x547C,0x58D5,0x58FA,0x597D,/* 0xB8-0xBF */
+	0x5CB5,0x5F27,0x6236,0x6248,0x660A,0x6667,0x6BEB,0x6D69,/* 0xC0-0xC7 */
+	0x6DCF,0x6E56,0x6EF8,0x6F94,0x6FE0,0x6FE9,0x705D,0x72D0,/* 0xC8-0xCF */
+	0x7425,0x745A,0x74E0,0x7693,0x795C,0x7CCA,0x7E1E,0x80E1,/* 0xD0-0xD7 */
+	0x82A6,0x846B,0x84BF,0x864E,0x865F,0x8774,0x8B77,0x8C6A,/* 0xD8-0xDF */
+	0x93AC,0x9800,0x9865,0x60D1,0x6216,0x9177,0x5A5A,0x660F,/* 0xE0-0xE7 */
+	0x6DF7,0x6E3E,0x743F,0x9B42,0x5FFD,0x60DA,0x7B0F,0x54C4,/* 0xE8-0xEF */
+	0x5F18,0x6C5E,0x6CD3,0x6D2A,0x70D8,0x7D05,0x8679,0x8A0C,/* 0xF0-0xF7 */
+	0x9D3B,0x5316,0x548C,0x5B05,0x6A3A,0x706B,0x7575,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x798D,0x79BE,0x82B1,0x83EF,0x8A71,0x8B41,0x8CA8,/* 0xA0-0xA7 */
+	0x9774,0xFA0B,0x64F4,0x652B,0x78BA,0x78BB,0x7A6B,0x4E38,/* 0xA8-0xAF */
+	0x559A,0x5950,0x5BA6,0x5E7B,0x60A3,0x63DB,0x6B61,0x6665,/* 0xB0-0xB7 */
+	0x6853,0x6E19,0x7165,0x74B0,0x7D08,0x9084,0x9A69,0x9C25,/* 0xB8-0xBF */
+	0x6D3B,0x6ED1,0x733E,0x8C41,0x95CA,0x51F0,0x5E4C,0x5FA8,/* 0xC0-0xC7 */
+	0x604D,0x60F6,0x6130,0x614C,0x6643,0x6644,0x69A5,0x6CC1,/* 0xC8-0xCF */
+	0x6E5F,0x6EC9,0x6F62,0x714C,0x749C,0x7687,0x7BC1,0x7C27,/* 0xD0-0xD7 */
+	0x8352,0x8757,0x9051,0x968D,0x9EC3,0x532F,0x56DE,0x5EFB,/* 0xD8-0xDF */
+	0x5F8A,0x6062,0x6094,0x61F7,0x6666,0x6703,0x6A9C,0x6DEE,/* 0xE0-0xE7 */
+	0x6FAE,0x7070,0x736A,0x7E6A,0x81BE,0x8334,0x86D4,0x8AA8,/* 0xE8-0xEF */
+	0x8CC4,0x5283,0x7372,0x5B96,0x6A6B,0x9404,0x54EE,0x5686,/* 0xF0-0xF7 */
+	0x5B5D,0x6548,0x6585,0x66C9,0x689F,0x6D8D,0x6DC6,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_FD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x40-0x47 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x48-0x4F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x50-0x57 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x58-0x5F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x60-0x67 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x68-0x6F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x70-0x77 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x723B,0x80B4,0x9175,0x9A4D,0x4FAF,0x5019,0x539A,/* 0xA0-0xA7 */
+	0x540E,0x543C,0x5589,0x55C5,0x5E3F,0x5F8C,0x673D,0x7166,/* 0xA8-0xAF */
+	0x73DD,0x9005,0x52DB,0x52F3,0x5864,0x58CE,0x7104,0x718F,/* 0xB0-0xB7 */
+	0x71FB,0x85B0,0x8A13,0x6688,0x85A8,0x55A7,0x6684,0x714A,/* 0xB8-0xBF */
+	0x8431,0x5349,0x5599,0x6BC1,0x5F59,0x5FBD,0x63EE,0x6689,/* 0xC0-0xC7 */
+	0x7147,0x8AF1,0x8F1D,0x9EBE,0x4F11,0x643A,0x70CB,0x7566,/* 0xC8-0xCF */
+	0x8667,0x6064,0x8B4E,0x9DF8,0x5147,0x51F6,0x5308,0x6D36,/* 0xD0-0xD7 */
+	0x80F8,0x9ED1,0x6615,0x6B23,0x7098,0x75D5,0x5403,0x5C79,/* 0xD8-0xDF */
+	0x7D07,0x8A16,0x6B20,0x6B3D,0x6B46,0x5438,0x6070,0x6D3D,/* 0xE0-0xE7 */
+	0x7FD5,0x8208,0x50D6,0x51DE,0x559C,0x566B,0x56CD,0x59EC,/* 0xE8-0xEF */
+	0x5B09,0x5E0C,0x6199,0x6198,0x6231,0x665E,0x66E6,0x7199,/* 0xF0-0xF7 */
+	0x71B9,0x71BA,0x72A7,0x79A7,0x7A00,0x7FB2,0x8A70,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t *page_charset2uni[256] = {
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   c2u_81, c2u_82, c2u_83, c2u_84, c2u_85, c2u_86, c2u_87, 
+	c2u_88, c2u_89, c2u_8A, c2u_8B, c2u_8C, c2u_8D, c2u_8E, c2u_8F, 
+	c2u_90, c2u_91, c2u_92, c2u_93, c2u_94, c2u_95, c2u_96, c2u_97, 
+	c2u_98, c2u_99, c2u_9A, c2u_9B, c2u_9C, c2u_9D, c2u_9E, c2u_9F, 
+	c2u_A0, c2u_A1, c2u_A2, c2u_A3, c2u_A4, c2u_A5, c2u_A6, c2u_A7, 
+	c2u_A8, c2u_A9, c2u_AA, c2u_AB, c2u_AC, c2u_AD, c2u_AE, c2u_AF, 
+	c2u_B0, c2u_B1, c2u_B2, c2u_B3, c2u_B4, c2u_B5, c2u_B6, c2u_B7, 
+	c2u_B8, c2u_B9, c2u_BA, c2u_BB, c2u_BC, c2u_BD, c2u_BE, c2u_BF, 
+	c2u_C0, c2u_C1, c2u_C2, c2u_C3, c2u_C4, c2u_C5, c2u_C6, c2u_C7, 
+	c2u_C8, NULL,   c2u_CA, c2u_CB, c2u_CC, c2u_CD, c2u_CE, c2u_CF, 
+	c2u_D0, c2u_D1, c2u_D2, c2u_D3, c2u_D4, c2u_D5, c2u_D6, c2u_D7, 
+	c2u_D8, c2u_D9, c2u_DA, c2u_DB, c2u_DC, c2u_DD, c2u_DE, c2u_DF, 
+	c2u_E0, c2u_E1, c2u_E2, c2u_E3, c2u_E4, c2u_E5, c2u_E6, c2u_E7, 
+	c2u_E8, c2u_E9, c2u_EA, c2u_EB, c2u_EC, c2u_ED, c2u_EE, c2u_EF, 
+	c2u_F0, c2u_F1, c2u_F2, c2u_F3, c2u_F4, c2u_F5, c2u_F6, c2u_F7, 
+	c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, NULL,   NULL,   
+};
+
+static unsigned char u2c_01[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xA9, 0xA2, 0xA9, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xA4, 0xA9, 0xA4, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xA9, 0xA5, 0xA8, 0xA6, 0xA9, 0xA6, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xA9, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xA8, /* 0x3C-0x3F */
+	0xA9, 0xA8, 0xA8, 0xA9, 0xA9, 0xA9, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xA9, 0xB0, 0xA8, 0xAF, 0xA9, 0xAF, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xAB, 0xA9, 0xAB, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xAE, 0xA9, 0xAE, /* 0x64-0x67 */
+};
+
+static unsigned char u2c_02[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA2, 0xA7, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xA2, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xA2, 0xA8, 0xA2, 0xAB, 0xA2, 0xAA, 0xA2, 0xAD, /* 0xD8-0xDB */
+	0x00, 0x00, 0xA2, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+};
+
+static unsigned char u2c_03[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA5, 0xC1, 0xA5, 0xC2, 0xA5, 0xC3, /* 0x90-0x93 */
+	0xA5, 0xC4, 0xA5, 0xC5, 0xA5, 0xC6, 0xA5, 0xC7, /* 0x94-0x97 */
+	0xA5, 0xC8, 0xA5, 0xC9, 0xA5, 0xCA, 0xA5, 0xCB, /* 0x98-0x9B */
+	0xA5, 0xCC, 0xA5, 0xCD, 0xA5, 0xCE, 0xA5, 0xCF, /* 0x9C-0x9F */
+	0xA5, 0xD0, 0xA5, 0xD1, 0x00, 0x00, 0xA5, 0xD2, /* 0xA0-0xA3 */
+	0xA5, 0xD3, 0xA5, 0xD4, 0xA5, 0xD5, 0xA5, 0xD6, /* 0xA4-0xA7 */
+	0xA5, 0xD7, 0xA5, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xA5, 0xE1, 0xA5, 0xE2, 0xA5, 0xE3, /* 0xB0-0xB3 */
+	0xA5, 0xE4, 0xA5, 0xE5, 0xA5, 0xE6, 0xA5, 0xE7, /* 0xB4-0xB7 */
+	0xA5, 0xE8, 0xA5, 0xE9, 0xA5, 0xEA, 0xA5, 0xEB, /* 0xB8-0xBB */
+	0xA5, 0xEC, 0xA5, 0xED, 0xA5, 0xEE, 0xA5, 0xEF, /* 0xBC-0xBF */
+	0xA5, 0xF0, 0xA5, 0xF1, 0x00, 0x00, 0xA5, 0xF2, /* 0xC0-0xC3 */
+	0xA5, 0xF3, 0xA5, 0xF4, 0xA5, 0xF5, 0xA5, 0xF6, /* 0xC4-0xC7 */
+	0xA5, 0xF7, 0xA5, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+};
+
+static unsigned char u2c_04[512] = {
+	0x00, 0x00, 0xAC, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xAC, 0xA1, 0xAC, 0xA2, 0xAC, 0xA3, 0xAC, 0xA4, /* 0x10-0x13 */
+	0xAC, 0xA5, 0xAC, 0xA6, 0xAC, 0xA8, 0xAC, 0xA9, /* 0x14-0x17 */
+	0xAC, 0xAA, 0xAC, 0xAB, 0xAC, 0xAC, 0xAC, 0xAD, /* 0x18-0x1B */
+	0xAC, 0xAE, 0xAC, 0xAF, 0xAC, 0xB0, 0xAC, 0xB1, /* 0x1C-0x1F */
+	0xAC, 0xB2, 0xAC, 0xB3, 0xAC, 0xB4, 0xAC, 0xB5, /* 0x20-0x23 */
+	0xAC, 0xB6, 0xAC, 0xB7, 0xAC, 0xB8, 0xAC, 0xB9, /* 0x24-0x27 */
+	0xAC, 0xBA, 0xAC, 0xBB, 0xAC, 0xBC, 0xAC, 0xBD, /* 0x28-0x2B */
+	0xAC, 0xBE, 0xAC, 0xBF, 0xAC, 0xC0, 0xAC, 0xC1, /* 0x2C-0x2F */
+	0xAC, 0xD1, 0xAC, 0xD2, 0xAC, 0xD3, 0xAC, 0xD4, /* 0x30-0x33 */
+	0xAC, 0xD5, 0xAC, 0xD6, 0xAC, 0xD8, 0xAC, 0xD9, /* 0x34-0x37 */
+	0xAC, 0xDA, 0xAC, 0xDB, 0xAC, 0xDC, 0xAC, 0xDD, /* 0x38-0x3B */
+	0xAC, 0xDE, 0xAC, 0xDF, 0xAC, 0xE0, 0xAC, 0xE1, /* 0x3C-0x3F */
+	0xAC, 0xE2, 0xAC, 0xE3, 0xAC, 0xE4, 0xAC, 0xE5, /* 0x40-0x43 */
+	0xAC, 0xE6, 0xAC, 0xE7, 0xAC, 0xE8, 0xAC, 0xE9, /* 0x44-0x47 */
+	0xAC, 0xEA, 0xAC, 0xEB, 0xAC, 0xEC, 0xAC, 0xED, /* 0x48-0x4B */
+	0xAC, 0xEE, 0xAC, 0xEF, 0xAC, 0xF0, 0xAC, 0xF1, /* 0x4C-0x4F */
+	0x00, 0x00, 0xAC, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+};
+
+static unsigned char u2c_11[512] = {
+	0xA4, 0xA1, 0xA4, 0xA2, 0xA4, 0xA4, 0xA4, 0xA7, /* 0x00-0x03 */
+	0xA4, 0xA8, 0xA4, 0xA9, 0xA4, 0xB1, 0xA4, 0xB2, /* 0x04-0x07 */
+	0xA4, 0xB3, 0xA4, 0xB5, 0xA4, 0xB6, 0xA4, 0xB7, /* 0x08-0x0B */
+	0xA4, 0xB8, 0xA4, 0xB9, 0xA4, 0xBA, 0xA4, 0xBB, /* 0x0C-0x0F */
+	0xA4, 0xBC, 0xA4, 0xBD, 0xA4, 0xBE, 0x00, 0x00, /* 0x10-0x13 */
+	0xA4, 0xD5, 0xA4, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xDD, 0x00, 0x00, /* 0x18-0x1B */
+	0xA4, 0xDE, 0xA4, 0xE1, 0xA4, 0xE2, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA4, 0xE3, 0xA4, 0xB4, 0xA4, 0xE4, 0xA4, 0xE5, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xE6, /* 0x24-0x27 */
+	0x00, 0x00, 0xA4, 0xE7, 0x00, 0x00, 0xA4, 0xE8, /* 0x28-0x2B */
+	0xA4, 0xE9, 0xA4, 0xEA, 0xA4, 0xEB, 0xA4, 0xEC, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xED, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xEE, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xA4, 0xB5, 0xA4, 0xB6, 0xA4, 0xB5, 0xA4, 0xB6, /* 0x3C-0x3F */
+	0xA4, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xA4, 0xF2, 0xA4, 0xF3, 0xA4, 0xF0, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xA4, 0xB7, 0x00, 0x00, 0xA4, 0xB8, 0xA4, 0xB9, /* 0x4C-0x4F */
+	0xA4, 0xB8, 0xA4, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xA4, 0xBA, 0xA4, 0xBA, 0x00, 0x00, 0xA4, 0xF4, /* 0x54-0x57 */
+	0xA4, 0xF5, 0xA4, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xD4, /* 0x5C-0x5F */
+	0x00, 0x00, 0xA4, 0xBF, 0xA4, 0xC0, 0xA4, 0xC1, /* 0x60-0x63 */
+	0xA4, 0xC2, 0xA4, 0xC3, 0xA4, 0xC4, 0xA4, 0xC5, /* 0x64-0x67 */
+	0xA4, 0xC6, 0xA4, 0xC7, 0xA4, 0xC8, 0xA4, 0xC9, /* 0x68-0x6B */
+	0xA4, 0xCA, 0xA4, 0xCB, 0xA4, 0xCC, 0xA4, 0xCD, /* 0x6C-0x6F */
+	0xA4, 0xCE, 0xA4, 0xCF, 0xA4, 0xD0, 0xA4, 0xD1, /* 0x70-0x73 */
+	0xA4, 0xD2, 0xA4, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xA4, 0xF7, 0xA4, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xA4, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA4, 0xFA, 0xA4, 0xFB, 0x00, 0x00, /* 0x90-0x93 */
+	0xA4, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xFD, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xA4, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xA4, 0xA1, 0xA4, 0xA2, 0xA4, 0xA3, 0xA4, 0xA4, /* 0xA8-0xAB */
+	0xA4, 0xA5, 0xA4, 0xA6, 0xA4, 0xA7, 0xA4, 0xA9, /* 0xAC-0xAF */
+	0xA4, 0xAA, 0xA4, 0xAB, 0xA4, 0xAC, 0xA4, 0xAD, /* 0xB0-0xB3 */
+	0xA4, 0xAE, 0xA4, 0xAF, 0xA4, 0xB0, 0xA4, 0xB1, /* 0xB4-0xB7 */
+	0xA4, 0xB2, 0xA4, 0xB4, 0xA4, 0xB5, 0xA4, 0xB6, /* 0xB8-0xBB */
+	0xA4, 0xB7, 0xA4, 0xB8, 0xA4, 0xBA, 0xA4, 0xBB, /* 0xBC-0xBF */
+	0xA4, 0xBC, 0xA4, 0xBD, 0xA4, 0xBE, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xD6, 0xA4, 0xD7, /* 0xC4-0xC7 */
+	0xA4, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xA4, 0xD9, 0x00, 0x00, 0xA4, 0xDA, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xDB, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xDC, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xA4, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xA4, 0xDE, 0xA4, 0xDF, 0x00, 0x00, 0xA4, 0xE0, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xE8, 0xA4, 0xEA, /* 0xE4-0xE7 */
+	0xA4, 0xEC, 0x00, 0x00, 0xA4, 0xED, 0xA4, 0xEF, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xF0, 0x00, 0x00, /* 0xEC-0xEF */
+	0xA4, 0xB7, 0xA4, 0xF2, 0xA4, 0xF3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xA4, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xA4, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_20[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xA1, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xA1, 0xAE, 0xA1, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA1, 0xB0, 0xA1, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA2, 0xD3, 0xA2, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xA1, 0xA5, 0xA1, 0xA6, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA2, 0xB6, 0x00, 0x00, 0xA1, 0xC7, 0xA1, 0xC8, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xD8, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xA9, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA9, 0xFA, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xA9, 0xFB, 0xA9, 0xFC, 0xA9, 0xFD, /* 0x80-0x83 */
+	0xA9, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+};
+
+static unsigned char u2c_21[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC9, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xA2, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xA4, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xE0, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xA2, 0xE5, 0xA2, 0xE2, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xD9, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xCA, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xF7, /* 0x50-0x53 */
+	0xA8, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xFB, /* 0x58-0x5B */
+	0xA8, 0xFC, 0xA8, 0xFD, 0xA8, 0xFE, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA5, 0xB0, 0xA5, 0xB1, 0xA5, 0xB2, 0xA5, 0xB3, /* 0x60-0x63 */
+	0xA5, 0xB4, 0xA5, 0xB5, 0xA5, 0xB6, 0xA5, 0xB7, /* 0x64-0x67 */
+	0xA5, 0xB8, 0xA5, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xA5, 0xA1, 0xA5, 0xA2, 0xA5, 0xA3, 0xA5, 0xA4, /* 0x70-0x73 */
+	0xA5, 0xA5, 0xA5, 0xA6, 0xA5, 0xA7, 0xA5, 0xA8, /* 0x74-0x77 */
+	0xA5, 0xA9, 0xA5, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xA1, 0xE7, 0xA1, 0xE8, 0xA1, 0xE6, 0xA1, 0xE9, /* 0x90-0x93 */
+	0xA1, 0xEA, 0xA2, 0xD5, 0xA2, 0xD8, 0xA2, 0xD6, /* 0x94-0x97 */
+	0xA2, 0xD9, 0xA2, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xA1, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xA2, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+};
+
+static unsigned char u2c_22[512] = {
+	0xA2, 0xA3, 0x00, 0x00, 0xA1, 0xD3, 0xA2, 0xA4, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xD4, /* 0x04-0x07 */
+	0xA1, 0xF4, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xF5, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA2, 0xB3, /* 0x0C-0x0F */
+	0x00, 0x00, 0xA2, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xEE, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xA1, 0xF0, 0xA1, 0xC4, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA1, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xA1, 0xAB, 0x00, 0x00, 0xA1, 0xFC, /* 0x24-0x27 */
+	0xA1, 0xFD, 0xA1, 0xFB, 0xA1, 0xFA, 0xA1, 0xF2, /* 0x28-0x2B */
+	0xA1, 0xF3, 0x00, 0x00, 0xA2, 0xB1, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xA1, 0xC5, 0xA1, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xA1, 0xAD, 0xA1, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xD6, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA1, 0xC1, 0xA1, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xA1, 0xC2, 0xA1, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, 0xA1, 0xED, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF8, 0xA1, 0xF9, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF6, 0xA1, 0xF7, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xA2, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xA1, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+};
+
+static unsigned char u2c_23[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xD2, 0x00, 0x00, /* 0x10-0x13 */
+};
+
+static unsigned char u2c_24[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA8, 0xE7, 0xA8, 0xE8, 0xA8, 0xE9, 0xA8, 0xEA, /* 0x60-0x63 */
+	0xA8, 0xEB, 0xA8, 0xEC, 0xA8, 0xED, 0xA8, 0xEE, /* 0x64-0x67 */
+	0xA8, 0xEF, 0xA8, 0xF0, 0xA8, 0xF1, 0xA8, 0xF2, /* 0x68-0x6B */
+	0xA8, 0xF3, 0xA8, 0xF4, 0xA8, 0xF5, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xA9, 0xE7, 0xA9, 0xE8, 0xA9, 0xE9, 0xA9, 0xEA, /* 0x74-0x77 */
+	0xA9, 0xEB, 0xA9, 0xEC, 0xA9, 0xED, 0xA9, 0xEE, /* 0x78-0x7B */
+	0xA9, 0xEF, 0xA9, 0xF0, 0xA9, 0xF1, 0xA9, 0xF2, /* 0x7C-0x7F */
+	
+	0xA9, 0xF3, 0xA9, 0xF4, 0xA9, 0xF5, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xA9, 0xCD, 0xA9, 0xCE, 0xA9, 0xCF, 0xA9, 0xD0, /* 0x9C-0x9F */
+	0xA9, 0xD1, 0xA9, 0xD2, 0xA9, 0xD3, 0xA9, 0xD4, /* 0xA0-0xA3 */
+	0xA9, 0xD5, 0xA9, 0xD6, 0xA9, 0xD7, 0xA9, 0xD8, /* 0xA4-0xA7 */
+	0xA9, 0xD9, 0xA9, 0xDA, 0xA9, 0xDB, 0xA9, 0xDC, /* 0xA8-0xAB */
+	0xA9, 0xDD, 0xA9, 0xDE, 0xA9, 0xDF, 0xA9, 0xE0, /* 0xAC-0xAF */
+	0xA9, 0xE1, 0xA9, 0xE2, 0xA9, 0xE3, 0xA9, 0xE4, /* 0xB0-0xB3 */
+	0xA9, 0xE5, 0xA9, 0xE6, 0xA8, 0xCD, 0xA8, 0xCE, /* 0xB4-0xB7 */
+	0xA8, 0xCF, 0xA8, 0xD0, 0xA8, 0xD1, 0xA8, 0xD2, /* 0xB8-0xBB */
+	0xA8, 0xD3, 0xA8, 0xD4, 0xA8, 0xD5, 0xA8, 0xD6, /* 0xBC-0xBF */
+	0xA8, 0xD7, 0xA8, 0xD8, 0xA8, 0xD9, 0xA8, 0xDA, /* 0xC0-0xC3 */
+	0xA8, 0xDB, 0xA8, 0xDC, 0xA8, 0xDD, 0xA8, 0xDE, /* 0xC4-0xC7 */
+	0xA8, 0xDF, 0xA8, 0xE0, 0xA8, 0xE1, 0xA8, 0xE2, /* 0xC8-0xCB */
+	0xA8, 0xE3, 0xA8, 0xE4, 0xA8, 0xE5, 0xA8, 0xE6, /* 0xCC-0xCF */
+	0xA8, 0xCD, 0xA8, 0xCE, 0xA8, 0xCF, 0xA8, 0xD0, /* 0xD0-0xD3 */
+	0xA8, 0xD1, 0xA8, 0xD2, 0xA8, 0xD3, 0xA8, 0xD4, /* 0xD4-0xD7 */
+	0xA8, 0xD5, 0xA8, 0xD6, 0xA8, 0xD7, 0xA8, 0xD8, /* 0xD8-0xDB */
+	0xA8, 0xD9, 0xA8, 0xDA, 0xA8, 0xDB, 0xA8, 0xDC, /* 0xDC-0xDF */
+	0xA8, 0xDD, 0xA8, 0xDE, 0xA8, 0xDF, 0xA8, 0xE0, /* 0xE0-0xE3 */
+	0xA8, 0xE1, 0xA8, 0xE2, 0xA8, 0xE3, 0xA8, 0xE4, /* 0xE4-0xE7 */
+	0xA8, 0xE5, 0xA8, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+};
+
+static unsigned char u2c_25[512] = {
+	0xA6, 0xA1, 0xA6, 0xAC, 0xA6, 0xA2, 0xA6, 0xAD, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xA6, 0xA3, 0xA6, 0xC8, 0xA6, 0xC7, 0xA6, 0xAE, /* 0x0C-0x0F */
+	0xA6, 0xA4, 0xA6, 0xC2, 0xA6, 0xC1, 0xA6, 0xAF, /* 0x10-0x13 */
+	0xA6, 0xA6, 0xA6, 0xC6, 0xA6, 0xC5, 0xA6, 0xB1, /* 0x14-0x17 */
+	0xA6, 0xA5, 0xA6, 0xC4, 0xA6, 0xC3, 0xA6, 0xB0, /* 0x18-0x1B */
+	0xA6, 0xA7, 0xA6, 0xBC, 0xA6, 0xC9, 0xA6, 0xCA, /* 0x1C-0x1F */
+	0xA6, 0xB7, 0xA6, 0xCB, 0xA6, 0xCC, 0xA6, 0xB2, /* 0x20-0x23 */
+	0xA6, 0xA9, 0xA6, 0xBE, 0xA6, 0xCD, 0xA6, 0xCE, /* 0x24-0x27 */
+	0xA6, 0xB9, 0xA6, 0xCF, 0xA6, 0xD0, 0xA6, 0xB4, /* 0x28-0x2B */
+	0xA6, 0xA8, 0xA6, 0xD1, 0xA6, 0xD2, 0xA6, 0xB8, /* 0x2C-0x2F */
+	0xA6, 0xBD, 0xA6, 0xD3, 0xA6, 0xD4, 0xA6, 0xB3, /* 0x30-0x33 */
+	0xA6, 0xAA, 0xA6, 0xD5, 0xA6, 0xD6, 0xA6, 0xBA, /* 0x34-0x37 */
+	0xA6, 0xBF, 0xA6, 0xD7, 0xA6, 0xD8, 0xA6, 0xB5, /* 0x38-0x3B */
+	0xA6, 0xAB, 0xA6, 0xD9, 0xA6, 0xDA, 0xA6, 0xBB, /* 0x3C-0x3F */
+	0xA6, 0xDB, 0xA6, 0xDC, 0xA6, 0xC0, 0xA6, 0xDD, /* 0x40-0x43 */
+	0xA6, 0xDE, 0xA6, 0xDF, 0xA6, 0xE0, 0xA6, 0xE1, /* 0x44-0x47 */
+	0xA6, 0xE2, 0xA6, 0xE3, 0xA6, 0xE4, 0xA6, 0xB6, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xC6, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xA1, 0xE1, 0xA1, 0xE0, 0x00, 0x00, 0xA2, 0xC3, /* 0xA0-0xA3 */
+	0xA2, 0xC7, 0xA2, 0xC8, 0xA2, 0xCB, 0xA2, 0xCA, /* 0xA4-0xA7 */
+	0xA2, 0xC9, 0xA2, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xE3, 0xA1, 0xE2, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xBA, 0xA2, 0xB9, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xA1, 0xE5, 0xA1, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xA2, 0xB8, 0xA2, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xDF, 0xA1, 0xDE, /* 0xC4-0xC7 */
+	0xA2, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xDB, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xDD, 0xA1, 0xDC, /* 0xCC-0xCF */
+	0xA2, 0xC4, 0xA2, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+};
+
+static unsigned char u2c_26[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xDA, 0xA1, 0xD9, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xCF, 0xA2, 0xCE, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA2, 0xD0, 0x00, 0x00, 0xA2, 0xD1, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xA1, 0xCF, 0x00, 0x00, 0xA1, 0xCE, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA2, 0xBC, 0xA2, 0xBD, 0x00, 0x00, 0xA2, 0xC0, /* 0x60-0x63 */
+	0xA2, 0xBB, 0xA2, 0xBE, 0x00, 0x00, 0xA2, 0xBF, /* 0x64-0x67 */
+	0xA2, 0xCD, 0xA2, 0xDB, 0xA2, 0xDC, 0x00, 0x00, /* 0x68-0x6B */
+	0xA2, 0xDD, 0xA2, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+};
+
+static unsigned char u2c_30[512] = {
+	0xA1, 0xA1, 0xA1, 0xA2, 0xA1, 0xA3, 0xA1, 0xA8, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xA1, 0xB4, 0xA1, 0xB5, 0xA1, 0xB6, 0xA1, 0xB7, /* 0x08-0x0B */
+	0xA1, 0xB8, 0xA1, 0xB9, 0xA1, 0xBA, 0xA1, 0xBB, /* 0x0C-0x0F */
+	0xA1, 0xBC, 0xA1, 0xBD, 0x00, 0x00, 0xA1, 0xEB, /* 0x10-0x13 */
+	0xA1, 0xB2, 0xA1, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xAA, 0xA1, 0xAA, 0xA2, 0xAA, 0xA3, /* 0x40-0x43 */
+	0xAA, 0xA4, 0xAA, 0xA5, 0xAA, 0xA6, 0xAA, 0xA7, /* 0x44-0x47 */
+	0xAA, 0xA8, 0xAA, 0xA9, 0xAA, 0xAA, 0xAA, 0xAB, /* 0x48-0x4B */
+	0xAA, 0xAC, 0xAA, 0xAD, 0xAA, 0xAE, 0xAA, 0xAF, /* 0x4C-0x4F */
+	0xAA, 0xB0, 0xAA, 0xB1, 0xAA, 0xB2, 0xAA, 0xB3, /* 0x50-0x53 */
+	0xAA, 0xB4, 0xAA, 0xB5, 0xAA, 0xB6, 0xAA, 0xB7, /* 0x54-0x57 */
+	0xAA, 0xB8, 0xAA, 0xB9, 0xAA, 0xBA, 0xAA, 0xBB, /* 0x58-0x5B */
+	0xAA, 0xBC, 0xAA, 0xBD, 0xAA, 0xBE, 0xAA, 0xBF, /* 0x5C-0x5F */
+	0xAA, 0xC0, 0xAA, 0xC1, 0xAA, 0xC2, 0xAA, 0xC3, /* 0x60-0x63 */
+	0xAA, 0xC4, 0xAA, 0xC5, 0xAA, 0xC6, 0xAA, 0xC7, /* 0x64-0x67 */
+	0xAA, 0xC8, 0xAA, 0xC9, 0xAA, 0xCA, 0xAA, 0xCB, /* 0x68-0x6B */
+	0xAA, 0xCC, 0xAA, 0xCD, 0xAA, 0xCE, 0xAA, 0xCF, /* 0x6C-0x6F */
+	0xAA, 0xD0, 0xAA, 0xD1, 0xAA, 0xD2, 0xAA, 0xD3, /* 0x70-0x73 */
+	0xAA, 0xD4, 0xAA, 0xD5, 0xAA, 0xD6, 0xAA, 0xD7, /* 0x74-0x77 */
+	0xAA, 0xD8, 0xAA, 0xD9, 0xAA, 0xDA, 0xAA, 0xDB, /* 0x78-0x7B */
+	0xAA, 0xDC, 0xAA, 0xDD, 0xAA, 0xDE, 0xAA, 0xDF, /* 0x7C-0x7F */
+	
+	0xAA, 0xE0, 0xAA, 0xE1, 0xAA, 0xE2, 0xAA, 0xE3, /* 0x80-0x83 */
+	0xAA, 0xE4, 0xAA, 0xE5, 0xAA, 0xE6, 0xAA, 0xE7, /* 0x84-0x87 */
+	0xAA, 0xE8, 0xAA, 0xE9, 0xAA, 0xEA, 0xAA, 0xEB, /* 0x88-0x8B */
+	0xAA, 0xEC, 0xAA, 0xED, 0xAA, 0xEE, 0xAA, 0xEF, /* 0x8C-0x8F */
+	0xAA, 0xF0, 0xAA, 0xF1, 0xAA, 0xF2, 0xAA, 0xF3, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xAB, 0xA1, 0xAB, 0xA2, 0xAB, 0xA3, /* 0xA0-0xA3 */
+	0xAB, 0xA4, 0xAB, 0xA5, 0xAB, 0xA6, 0xAB, 0xA7, /* 0xA4-0xA7 */
+	0xAB, 0xA8, 0xAB, 0xA9, 0xAB, 0xAA, 0xAB, 0xAB, /* 0xA8-0xAB */
+	0xAB, 0xAC, 0xAB, 0xAD, 0xAB, 0xAE, 0xAB, 0xAF, /* 0xAC-0xAF */
+	0xAB, 0xB0, 0xAB, 0xB1, 0xAB, 0xB2, 0xAB, 0xB3, /* 0xB0-0xB3 */
+	0xAB, 0xB4, 0xAB, 0xB5, 0xAB, 0xB6, 0xAB, 0xB7, /* 0xB4-0xB7 */
+	0xAB, 0xB8, 0xAB, 0xB9, 0xAB, 0xBA, 0xAB, 0xBB, /* 0xB8-0xBB */
+	0xAB, 0xBC, 0xAB, 0xBD, 0xAB, 0xBE, 0xAB, 0xBF, /* 0xBC-0xBF */
+	0xAB, 0xC0, 0xAB, 0xC1, 0xAB, 0xC2, 0xAB, 0xC3, /* 0xC0-0xC3 */
+	0xAB, 0xC4, 0xAB, 0xC5, 0xAB, 0xC6, 0xAB, 0xC7, /* 0xC4-0xC7 */
+	0xAB, 0xC8, 0xAB, 0xC9, 0xAB, 0xCA, 0xAB, 0xCB, /* 0xC8-0xCB */
+	0xAB, 0xCC, 0xAB, 0xCD, 0xAB, 0xCE, 0xAB, 0xCF, /* 0xCC-0xCF */
+	0xAB, 0xD0, 0xAB, 0xD1, 0xAB, 0xD2, 0xAB, 0xD3, /* 0xD0-0xD3 */
+	0xAB, 0xD4, 0xAB, 0xD5, 0xAB, 0xD6, 0xAB, 0xD7, /* 0xD4-0xD7 */
+	0xAB, 0xD8, 0xAB, 0xD9, 0xAB, 0xDA, 0xAB, 0xDB, /* 0xD8-0xDB */
+	0xAB, 0xDC, 0xAB, 0xDD, 0xAB, 0xDE, 0xAB, 0xDF, /* 0xDC-0xDF */
+	0xAB, 0xE0, 0xAB, 0xE1, 0xAB, 0xE2, 0xAB, 0xE3, /* 0xE0-0xE3 */
+	0xAB, 0xE4, 0xAB, 0xE5, 0xAB, 0xE6, 0xAB, 0xE7, /* 0xE4-0xE7 */
+	0xAB, 0xE8, 0xAB, 0xE9, 0xAB, 0xEA, 0xAB, 0xEB, /* 0xE8-0xEB */
+	0xAB, 0xEC, 0xAB, 0xED, 0xAB, 0xEE, 0xAB, 0xEF, /* 0xEC-0xEF */
+	0xAB, 0xF0, 0xAB, 0xF1, 0xAB, 0xF2, 0xAB, 0xF3, /* 0xF0-0xF3 */
+	0xAB, 0xF4, 0xAB, 0xF5, 0xAB, 0xF6, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_31[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xA4, 0xA1, 0xA4, 0xA2, 0xA4, 0xA3, /* 0x30-0x33 */
+	0xA4, 0xA4, 0xA4, 0xA5, 0xA4, 0xA6, 0xA4, 0xA7, /* 0x34-0x37 */
+	0xA4, 0xA8, 0xA4, 0xA9, 0xA4, 0xAA, 0xA4, 0xAB, /* 0x38-0x3B */
+	0xA4, 0xAC, 0xA4, 0xAD, 0xA4, 0xAE, 0xA4, 0xAF, /* 0x3C-0x3F */
+	0xA4, 0xB0, 0xA4, 0xB1, 0xA4, 0xB2, 0xA4, 0xB3, /* 0x40-0x43 */
+	0xA4, 0xB4, 0xA4, 0xB5, 0xA4, 0xB6, 0xA4, 0xB7, /* 0x44-0x47 */
+	0xA4, 0xB8, 0xA4, 0xB9, 0xA4, 0xBA, 0xA4, 0xBB, /* 0x48-0x4B */
+	0xA4, 0xBC, 0xA4, 0xBD, 0xA4, 0xBE, 0xA4, 0xBF, /* 0x4C-0x4F */
+	0xA4, 0xC0, 0xA4, 0xC1, 0xA4, 0xC2, 0xA4, 0xC3, /* 0x50-0x53 */
+	0xA4, 0xC4, 0xA4, 0xC5, 0xA4, 0xC6, 0xA4, 0xC7, /* 0x54-0x57 */
+	0xA4, 0xC8, 0xA4, 0xC9, 0xA4, 0xCA, 0xA4, 0xCB, /* 0x58-0x5B */
+	0xA4, 0xCC, 0xA4, 0xCD, 0xA4, 0xCE, 0xA4, 0xCF, /* 0x5C-0x5F */
+	0xA4, 0xD0, 0xA4, 0xD1, 0xA4, 0xD2, 0xA4, 0xD3, /* 0x60-0x63 */
+	0xA4, 0xD4, 0xA4, 0xD5, 0xA4, 0xD6, 0xA4, 0xD7, /* 0x64-0x67 */
+	0xA4, 0xD8, 0xA4, 0xD9, 0xA4, 0xDA, 0xA4, 0xDB, /* 0x68-0x6B */
+	0xA4, 0xDC, 0xA4, 0xDD, 0xA4, 0xDE, 0xA4, 0xDF, /* 0x6C-0x6F */
+	0xA4, 0xE0, 0xA4, 0xE1, 0xA4, 0xE2, 0xA4, 0xE3, /* 0x70-0x73 */
+	0xA4, 0xE4, 0xA4, 0xE5, 0xA4, 0xE6, 0xA4, 0xE7, /* 0x74-0x77 */
+	0xA4, 0xE8, 0xA4, 0xE9, 0xA4, 0xEA, 0xA4, 0xEB, /* 0x78-0x7B */
+	0xA4, 0xEC, 0xA4, 0xED, 0xA4, 0xEE, 0xA4, 0xEF, /* 0x7C-0x7F */
+	
+	0xA4, 0xF0, 0xA4, 0xF1, 0xA4, 0xF2, 0xA4, 0xF3, /* 0x80-0x83 */
+	0xA4, 0xF4, 0xA4, 0xF5, 0xA4, 0xF6, 0xA4, 0xF7, /* 0x84-0x87 */
+	0xA4, 0xF8, 0xA4, 0xF9, 0xA4, 0xFA, 0xA4, 0xFB, /* 0x88-0x8B */
+	0xA4, 0xFC, 0xA4, 0xFD, 0xA4, 0xFE, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xE9, 0xEC, 0xA3, /* 0x90-0x93 */
+	0xDF, 0xB2, 0xDE, 0xCC, 0xDF, 0xBE, 0xF1, 0xE9, /* 0x94-0x97 */
+	0xF9, 0xBB, 0xCB, 0xA3, 0xEB, 0xE0, 0xDC, 0xB0, /* 0x98-0x9B */
+	0xEF, 0xCB, 0xF4, 0xB8, 0xF2, 0xA2, 0xEC, 0xD1, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_32[512] = {
+	0xA9, 0xB1, 0xA9, 0xB2, 0xA9, 0xB3, 0xA9, 0xB4, /* 0x00-0x03 */
+	0xA9, 0xB5, 0xA9, 0xB6, 0xA9, 0xB7, 0xA9, 0xB8, /* 0x04-0x07 */
+	0xA9, 0xB9, 0xA9, 0xBA, 0xA9, 0xBB, 0xA9, 0xBC, /* 0x08-0x0B */
+	0xA9, 0xBD, 0xA9, 0xBE, 0xA9, 0xBF, 0xA9, 0xC0, /* 0x0C-0x0F */
+	0xA9, 0xC1, 0xA9, 0xC2, 0xA9, 0xC3, 0xA9, 0xC4, /* 0x10-0x13 */
+	0xA9, 0xC5, 0xA9, 0xC6, 0xA9, 0xC7, 0xA9, 0xC8, /* 0x14-0x17 */
+	0xA9, 0xC9, 0xA9, 0xCA, 0xA9, 0xCB, 0xA9, 0xCC, /* 0x18-0x1B */
+	0xA2, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xEC, 0xE9, 0xEC, 0xA3, 0xDF, 0xB2, 0xDE, 0xCC, /* 0x20-0x23 */
+	0xE7, 0xE9, 0xD7, 0xBF, 0xF6, 0xD2, 0xF8, 0xA2, /* 0x24-0x27 */
+	0xCE, 0xFA, 0xE4, 0xA8, 0xEA, 0xC5, 0xFB, 0xFD, /* 0x28-0x2B */
+	0xE2, 0xA9, 0xD9, 0xCA, 0xD1, 0xD1, 0xF7, 0xCF, /* 0x2C-0x2F */
+	0xEC, 0xED, 0xF1, 0xBB, 0xEA, 0xF3, 0xDE, 0xE4, /* 0x30-0x33 */
+	0xD9, 0xA3, 0xF7, 0xE5, 0xEE, 0xAF, 0xF5, 0xE6, /* 0x34-0x37 */
+	0xD6, 0xCC, 0xD3, 0xDB, 0xFB, 0xBC, 0xF9, 0xCA, /* 0x38-0x3B */
+	0xCA, 0xF8, 0xD0, 0xEA, 0xED, 0xC0, 0xFA, 0xF0, /* 0x3C-0x3F */
+	0xF0, 0xAE, 0xFD, 0xCC, 0xED, 0xBB, 0xF2, 0xB8, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA8, 0xB1, 0xA8, 0xB2, 0xA8, 0xB3, 0xA8, 0xB4, /* 0x60-0x63 */
+	0xA8, 0xB5, 0xA8, 0xB6, 0xA8, 0xB7, 0xA8, 0xB8, /* 0x64-0x67 */
+	0xA8, 0xB9, 0xA8, 0xBA, 0xA8, 0xBB, 0xA8, 0xBC, /* 0x68-0x6B */
+	0xA8, 0xBD, 0xA8, 0xBE, 0xA8, 0xBF, 0xA8, 0xC0, /* 0x6C-0x6F */
+	0xA8, 0xC1, 0xA8, 0xC2, 0xA8, 0xC3, 0xA8, 0xC4, /* 0x70-0x73 */
+	0xA8, 0xC5, 0xA8, 0xC6, 0xA8, 0xC7, 0xA8, 0xC8, /* 0x74-0x77 */
+	0xA8, 0xC9, 0xA8, 0xCA, 0xA8, 0xCB, 0xA8, 0xCC, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA2, 0xDE, /* 0x7C-0x7F */
+	
+	0xEC, 0xE9, 0xEC, 0xA3, 0xDF, 0xB2, 0xDE, 0xCC, /* 0x80-0x83 */
+	0xE7, 0xE9, 0xD7, 0xBF, 0xF6, 0xD2, 0xF8, 0xA2, /* 0x84-0x87 */
+	0xCE, 0xFA, 0xE4, 0xA8, 0xEA, 0xC5, 0xFB, 0xFD, /* 0x88-0x8B */
+	0xE2, 0xA9, 0xD9, 0xCA, 0xD1, 0xD1, 0xF7, 0xCF, /* 0x8C-0x8F */
+	0xEC, 0xED, 0xF1, 0xBB, 0xEA, 0xF3, 0xDE, 0xE4, /* 0x90-0x93 */
+	0xD9, 0xA3, 0xF7, 0xE5, 0xEE, 0xAF, 0xF5, 0xE6, /* 0x94-0x97 */
+	0xD6, 0xCC, 0xDD, 0xFA, 0xD1, 0xFB, 0xD2, 0xB3, /* 0x98-0x9B */
+	0xEE, 0xEA, 0xE9, 0xD0, 0xEC, 0xD4, 0xF1, 0xBC, /* 0x9C-0x9F */
+	0xFA, 0xA3, 0xFD, 0xCC, 0xDE, 0xD0, 0xEF, 0xE1, /* 0xA0-0xA3 */
+	0xDF, 0xBE, 0xF1, 0xE9, 0xF9, 0xBB, 0xF1, 0xA7, /* 0xA4-0xA7 */
+	0xE9, 0xD3, 0xEC, 0xA2, 0xF0, 0xF3, 0xF9, 0xCA, /* 0xA8-0xAB */
+	0xCA, 0xF8, 0xD0, 0xEA, 0xED, 0xC0, 0xFA, 0xF0, /* 0xAC-0xAF */
+	0xE5, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+};
+
+static unsigned char u2c_33[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xA7, 0xC9, 0xA7, 0xCA, 0xA7, 0xCB, 0xA7, 0xCC, /* 0x80-0x83 */
+	0xA7, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xA7, 0xBA, 0xA7, 0xBB, 0xA7, 0xDC, 0xA7, 0xDD, /* 0x88-0x8B */
+	0xA7, 0xDE, 0xA7, 0xB6, 0xA7, 0xB7, 0xA7, 0xB8, /* 0x8C-0x8F */
+	0xA7, 0xD4, 0xA7, 0xD5, 0xA7, 0xD6, 0xA7, 0xD7, /* 0x90-0x93 */
+	0xA7, 0xD8, 0xA7, 0xA1, 0xA7, 0xA2, 0xA7, 0xA3, /* 0x94-0x97 */
+	0xA7, 0xA5, 0xA7, 0xAB, 0xA7, 0xAC, 0xA7, 0xAD, /* 0x98-0x9B */
+	0xA7, 0xAE, 0xA7, 0xAF, 0xA7, 0xB0, 0xA7, 0xB1, /* 0x9C-0x9F */
+	0xA7, 0xB2, 0xA7, 0xB3, 0xA7, 0xB4, 0xA7, 0xA7, /* 0xA0-0xA3 */
+	0xA7, 0xA8, 0xA7, 0xA9, 0xA7, 0xAA, 0xA7, 0xBD, /* 0xA4-0xA7 */
+	0xA7, 0xBE, 0xA7, 0xE5, 0xA7, 0xE6, 0xA7, 0xE7, /* 0xA8-0xAB */
+	0xA7, 0xE8, 0xA7, 0xE1, 0xA7, 0xE2, 0xA7, 0xE3, /* 0xAC-0xAF */
+	0xA7, 0xBF, 0xA7, 0xC0, 0xA7, 0xC1, 0xA7, 0xC2, /* 0xB0-0xB3 */
+	0xA7, 0xC3, 0xA7, 0xC4, 0xA7, 0xC5, 0xA7, 0xC6, /* 0xB4-0xB7 */
+	0xA7, 0xC7, 0xA7, 0xC8, 0xA7, 0xCE, 0xA7, 0xCF, /* 0xB8-0xBB */
+	0xA7, 0xD0, 0xA7, 0xD1, 0xA7, 0xD2, 0xA7, 0xD3, /* 0xBC-0xBF */
+	0xA7, 0xDA, 0xA7, 0xDB, 0xA2, 0xE3, 0xA7, 0xEC, /* 0xC0-0xC3 */
+	0xA7, 0xA6, 0xA7, 0xE0, 0xA7, 0xEF, 0xA2, 0xE1, /* 0xC4-0xC7 */
+	0xA7, 0xBC, 0xA7, 0xED, 0xA7, 0xB5, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xB9, /* 0xCC-0xCF */
+	0xA7, 0xEA, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xEB, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xDF, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xA2, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xE4, /* 0xD8-0xDB */
+	0xA7, 0xEE, 0xA7, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+};
+
+static unsigned char u2c_4E[512] = {
+	0xEC, 0xE9, 0xEF, 0xCB, 0x00, 0x00, 0xF6, 0xD2, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xB2, /* 0x04-0x07 */
+	0xED, 0xDB, 0xDF, 0xB2, 0xDF, 0xBE, 0xF9, 0xBB, /* 0x08-0x0B */
+	0x00, 0x00, 0xDC, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xF5, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xF3, 0xA6, 0xDD, 0xE0, 0xE1, 0xA6, 0x00, 0x00, /* 0x14-0x17 */
+	0xCE, 0xF8, 0xDC, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xAA, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xF1, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xFA, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xFC, 0xAF, 0xD3, 0xA1, 0x00, 0x00, 0xF1, 0xAB, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xD1, 0xD2, 0xAC, /* 0x40-0x43 */
+	0x00, 0x00, 0xCE, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xFD, /* 0x48-0x4B */
+	0x00, 0x00, 0xDE, 0xBF, 0xFB, 0xBA, 0xF9, 0xB9, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xD2, 0x00, 0x00, /* 0x54-0x57 */
+	0xE3, 0xAB, 0xEB, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xCE, 0xFA, 0xCB, 0xF7, 0xE5, 0xA5, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xE1, /* 0x68-0x6B */
+	0x00, 0x00, 0xD4, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xE1, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xE3, 0xDF, 0xAD, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xEB, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xAF, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xF5, 0x00, 0x00, /* 0x84-0x87 */
+	0xE5, 0xF8, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xC0, /* 0x88-0x8B */
+	0xEC, 0xA3, 0x00, 0x00, 0xE9, 0xCD, 0x00, 0x00, /* 0x8C-0x8F */
+	0xEA, 0xA7, 0xE9, 0xF6, 0xFB, 0xBB, 0x00, 0x00, /* 0x90-0x93 */
+	0xE7, 0xE9, 0xEF, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xD0, 0xE6, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xC1, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xAC, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xD8, 0xCC, 0xF9, 0xF1, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xCE, 0xDF, 0xFA, 0xA4, 0xE6, 0xB2, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xFA, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xBD, /* 0xA8-0xAB */
+	0xCC, 0xC8, 0xEF, 0xCD, 0xD5, 0xD5, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xA2, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xD1, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE4, 0xA7, 0xEC, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF6, 0xB1, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xFB, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xD1, 0xCB, 0xBF, /* 0xC8-0xCB */
+	0x00, 0x00, 0xED, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xED, 0xA8, 0xDE, 0xC2, 0xF6, 0xE2, 0xED, 0xDC, /* 0xD4-0xD7 */
+	0xDC, 0xF5, 0xE0, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xD4, 0xCE, 0x00, 0x00, 0xF4, 0xB5, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xDB, /* 0xE0-0xE3 */
+	0xD6, 0xB5, 0xEC, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE4, 0xE6, 0x00, 0x00, 0xF1, 0xEA, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xEC, 0xCB, 0xC0, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xF2, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_4F[512] = {
+	0x00, 0x00, 0xD0, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xF9, 0xF2, 0xEC, 0xA5, 0xD0, 0xDF, /* 0x08-0x0B */
+	0x00, 0x00, 0xE7, 0xEA, 0xD0, 0xEB, 0xDC, 0xD1, /* 0x0C-0x0F */
+	0xDB, 0xE9, 0xFD, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xD7, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xDA, 0xE1, 0x00, 0x00, 0xD6, 0xB6, 0x00, 0x00, /* 0x34-0x37 */
+	0xE3, 0xDF, 0x00, 0x00, 0xDE, 0xC3, 0x00, 0x00, /* 0x38-0x3B */
+	0xDE, 0xC4, 0xCA, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xEC, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xA3, 0xEE, 0xB7, /* 0x44-0x47 */
+	0xF8, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xEA, 0xC8, 0xEE, 0xB8, 0xF1, 0xAC, /* 0x4C-0x4F */
+	0xF1, 0xA5, 0xE9, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xF9, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE5, 0xF9, 0xEC, 0xEA, 0xDD, 0xD6, /* 0x58-0x5B */
+	0xED, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xF8, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xBA, /* 0x6C-0x6F */
+	0xDB, 0xD8, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xA2, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xCD, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xED, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xEB, 0xDE, 0xC5, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE3, 0xE0, 0x00, 0x00, 0xCA, 0xC9, /* 0x80-0x83 */
+	0xF2, 0xE9, 0x00, 0x00, 0xD5, 0xCE, 0x00, 0x00, /* 0x84-0x87 */
+	0xF6, 0xB6, 0x00, 0x00, 0xCE, 0xC2, 0xD6, 0xC7, /* 0x88-0x8B */
+	0x00, 0x00, 0xE3, 0xB4, 0x00, 0x00, 0xF1, 0xAD, /* 0x8C-0x8F */
+	0x00, 0x00, 0xEA, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xC2, 0x00, 0x00, /* 0x94-0x97 */
+	0xF3, 0xA7, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xEA, /* 0x98-0x9B */
+	0x00, 0x00, 0xEB, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xB2, 0xFD, 0xA5, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF6, 0xD5, 0xD5, 0xE2, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xB5, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xF5, 0xF5, 0xB5, /* 0xC0-0xC3 */
+	0xE4, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE7, 0xEB, 0xF1, 0xD5, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xBB, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE9, 0xB5, 0x00, 0x00, 0xCC, 0xC9, /* 0xD0-0xD3 */
+	0xFA, 0xD5, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xD4, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xD6, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xDC, 0xC1, 0x00, 0x00, 0xDE, 0xC6, /* 0xDC-0xDF */
+	0xFA, 0xEF, 0xE3, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xF3, 0xDC, 0xF6, /* 0xEC-0xEF */
+	0x00, 0x00, 0xCE, 0xFC, 0x00, 0x00, 0xDB, 0xC4, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xF8, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xDC, 0xE4, 0x00, 0x00, 0xE5, 0xEF, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_50[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xB1, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xD6, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xF3, 0xDA, 0x00, 0x00, 0xCB, 0xC1, /* 0x08-0x0B */
+	0x00, 0x00, 0xDB, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xD9, 0xFA, 0xD3, 0xEE, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xB8, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xFD, 0xA6, 0xEB, 0xEF, 0x00, 0x00, /* 0x18-0x1B */
+	0xF4, 0xA6, 0x00, 0x00, 0xCC, 0xCA, 0xF3, 0xA8, /* 0x1C-0x1F */
+	0x00, 0x00, 0xF3, 0xDB, 0x00, 0x00, 0xDB, 0xA7, /* 0x20-0x23 */
+	0xF6, 0xB7, 0x00, 0x00, 0xCF, 0xE6, 0xF0, 0xF2, /* 0x24-0x27 */
+	0xCB, 0xDA, 0x00, 0x00, 0xE7, 0xD2, 0xD7, 0xC3, /* 0x28-0x2B */
+	0xF6, 0xF0, 0xE8, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA6, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xE7, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xA3, /* 0x44-0x47 */
+	0xCC, 0xA7, 0xEA, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xB6, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xFA, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xAE, 0x00, 0x00, /* 0x58-0x5B */
+	0xEF, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xCB, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xF6, 0xB0, 0xEF, 0xCF, 0xE9, 0xCF, 0x00, 0x00, /* 0x74-0x77 */
+	0xF7, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xCE, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xDC, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xDB, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xCB, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xDF, 0xA1, 0xDD, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xF5, 0xCA, 0xE9, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xEC, 0xEE, 0xEE, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF3, 0xF0, 0x00, 0x00, 0xDF, 0xBF, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xCB, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xD0, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF4, 0xD2, 0xE0, 0xBA, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xC0, /* 0xCC-0xCF */
+	0x00, 0x00, 0xCE, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xDC, 0xD2, 0xFD, 0xEA, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xF6, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xCA, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE8, 0xE9, 0x00, 0x00, 0xE3, 0xAC, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF3, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xCA, 0xA4, 0x00, 0x00, 0xDB, 0xF8, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xC7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_51[512] = {
+	0xEB, 0xF0, 0xF1, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE5, 0xE2, 0x00, 0x00, 0xCC, 0xCC, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xCB, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xE3, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xC1, /* 0x1C-0x1F */
+	0x00, 0x00, 0xD6, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xD0, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xB9, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xE3, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xD3, 0x00, 0x00, /* 0x38-0x3B */
+	0xE5, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE8, 0xB4, 0xEB, 0xC3, 0x00, 0x00, 0xEA, 0xAA, /* 0x40-0x43 */
+	0xFA, 0xFC, 0xF5, 0xF6, 0xF0, 0xBC, 0xFD, 0xD4, /* 0x44-0x47 */
+	0xE0, 0xBB, 0xCE, 0xC3, 0x00, 0x00, 0xD0, 0xBA, /* 0x48-0x4B */
+	0xF7, 0xBA, 0xD8, 0xF3, 0xF7, 0xCD, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xAE, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xD4, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xE7, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xEC, 0xFD, 0x00, 0x00, 0xD2, 0xAE, /* 0x64-0x67 */
+	0xEE, 0xEF, 0xD5, 0xD7, 0xEA, 0xE4, 0xF8, 0xA2, /* 0x68-0x6B */
+	0xCD, 0xEB, 0xD7, 0xBF, 0xFB, 0xB1, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xCD, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xDC, 0xB2, 0xD0, 0xEC, 0xCE, 0xFD, /* 0x74-0x77 */
+	0xEE, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xCC, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xD0, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xF7, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xFC, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xEE, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xB3, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xD8, 0xF4, 0x00, 0x00, 0xE9, 0xB7, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCE, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xD9, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xF1, 0x00, 0x00, /* 0xA8-0xAB */
+	0xD4, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xA7, 0xD5, 0xD2, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD6, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF4, 0xA2, 0x00, 0x00, 0xF1, 0xD7, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD5, 0xD8, 0x00, 0x00, 0xF0, 0xBD, /* 0xC8-0xCB */
+	0xD7, 0xD0, 0xD4, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD7, 0xCF, 0xEB, 0xEA, 0xFD, 0xEB, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xDB, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xFC, 0xC5, 0xCB, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xD5, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xF4, 0xC8, 0xE8, 0xEA, 0xF5, 0xF3, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF9, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_52[512] = {
+	0xD3, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD3, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xC2, 0xEF, 0xB7, /* 0x04-0x07 */
+	0xE7, 0xD4, 0x00, 0x00, 0xCA, 0xCA, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xFB, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xFA, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xAA, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xF4, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xF7, 0xF7, 0xDC, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xD7, 0xD7, 0xDF, 0xA2, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xBE, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD3, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xA4, 0xE1, 0xEC, /* 0x34-0x37 */
+	0xCF, 0xE7, 0xF3, 0xCB, 0xED, 0xA9, 0xCA, 0xBE, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xEF, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xCE, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xFB, 0xD0, 0xBB, /* 0x48-0x4B */
+	0xD5, 0xB7, 0xEE, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xF4, 0xA8, 0x00, 0x00, 0xDC, 0xF8, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xA7, /* 0x58-0x5B */
+	0x00, 0x00, 0xDA, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE0, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xED, 0xA5, 0xEE, 0xF2, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xF9, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xDC, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xF3, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xF8, 0xF2, 0x00, 0x00, 0xF4, 0xF9, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xF1, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBC, /* 0x84-0x87 */
+	0xDB, 0xF9, 0xD7, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xCB, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF0, 0xA5, 0xCB, 0xFD, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xF4, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xED, /* 0x9C-0x9F */
+	0xCA, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xAB, /* 0xA0-0xA3 */
+	0xD0, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xF0, 0xBE, 0xD2, 0xBD, 0xCC, 0xA4, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xB6, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xCC, 0xCD, 0x00, 0x00, 0xDA, 0xFA, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xF6, 0xCF, 0x00, 0x00, 0xE9, 0xB8, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD8, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xCC, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xCD, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xD4, 0xD1, 0xE9, 0xED, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xCA, 0xEB, 0xD9, 0xE2, 0x00, 0x00, 0xFD, 0xB2, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE3, 0xAD, 0xD6, 0xCC, 0xD9, 0xB4, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA7, 0xEE, 0xD3, /* 0xE0-0xE3 */
+	0xD0, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xB3, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xD5, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xCF, 0xE8, 0x00, 0x00, 0xED, 0xC3, 0xD0, 0xB2, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xFE, 0xDA, 0xA8, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_53[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xF8, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xFD, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xF8, 0xD1, 0x00, 0x00, 0xF8, 0xD2, /* 0x0C-0x0F */
+	0xDC, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xDD, 0xE2, 0xFB, 0xF9, 0xDD, 0xC1, /* 0x14-0x17 */
+	0x00, 0x00, 0xE3, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xED, 0xDD, 0xCE, 0xC4, 0x00, 0x00, 0xCB, 0xA1, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xE3, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xDD, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xF9, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xFB, /* 0x3C-0x3F */
+	0xCF, 0xA1, 0xE4, 0xA8, 0x00, 0x00, 0xF4, 0xB6, /* 0x40-0x43 */
+	0xEC, 0xFE, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xAE, /* 0x44-0x47 */
+	0xE7, 0xED, 0xFD, 0xC1, 0xDA, 0xE2, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xD8, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xDD, 0xE4, 0xF0, 0xEF, 0xF6, 0xF1, /* 0x50-0x53 */
+	0xFA, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xF5, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xCF, 0x00, 0x00, /* 0x58-0x5B */
+	0xDC, 0xD4, 0x00, 0x00, 0xDC, 0xA6, 0x00, 0x00, /* 0x5C-0x5F */
+	0xEF, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xCF, 0x00, 0x00, /* 0x64-0x67 */
+	0xE0, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xD6, /* 0x6C-0x6F */
+	0xEC, 0xD4, 0xEA, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xCA, 0xBF, 0xD5, 0xB0, 0x00, 0x00, 0xCF, 0xE9, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xF1, 0xED, 0x00, 0x00, 0xCC, 0xCF, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE4, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xED, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xD7, 0xD8, 0x00, 0x00, 0xFD, 0xA7, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xAB, /* 0x9C-0x9F */
+	0xF6, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCF, 0xF0, 0xF9, 0xBD, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE6, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xDB, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xD1, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE9, 0xD1, 0xF3, 0xA9, 0xD0, 0xE0, 0xE9, 0xD2, /* 0xC8-0xCB */
+	0x00, 0x00, 0xDA, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE2, 0xD2, 0x00, 0x00, 0xF6, 0xA2, 0xE1, 0xF4, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xE4, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE7, 0xD5, 0xF5, 0xBF, 0xCF, 0xA2, /* 0xE0-0xE3 */
+	0xCD, 0xAF, 0xCF, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xCD, 0xB0, 0xF1, 0xFE, 0xD0, 0xA3, /* 0xE8-0xEB */
+	0xE1, 0xAF, 0xF8, 0xA3, 0x00, 0x00, 0xCA, 0xA6, /* 0xEC-0xEF */
+	0xF7, 0xBB, 0xF2, 0xEA, 0xDE, 0xC8, 0xE9, 0xD3, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xDE, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_54[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xDE, /* 0x00-0x03 */
+	0xCA, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xF9, 0xEA, 0xD1, 0xCE, 0xEE, 0xD4, 0x00, 0x00, /* 0x08-0x0B */
+	0xD4, 0xD2, 0xD9, 0xA3, 0xFD, 0xA8, 0xD7, 0xD9, /* 0x0C-0x0F */
+	0xF7, 0xCE, 0xFA, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xD6, /* 0x18-0x1B */
+	0x00, 0x00, 0xD7, 0xF0, 0x00, 0x00, 0xEB, 0xE1, /* 0x1C-0x1F */
+	0xF8, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xFA, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xDD, 0xC3, 0x00, 0x00, 0xF9, 0xDF, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xEF, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xFD, 0xE5, 0xF6, 0xA3, 0x00, 0x00, 0xD9, 0xFC, /* 0x38-0x3B */
+	0xFD, 0xA9, 0x00, 0x00, 0xE7, 0xEE, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xE5, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xEF, 0xD0, 0x00, 0x00, 0xCD, 0xB1, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xF7, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF1, 0xB2, 0x00, 0x00, 0xF1, 0xB1, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xCD, 0xB2, 0x00, 0x00, 0xDA, 0xAB, /* 0x70-0x73 */
+	0x00, 0x00, 0xCA, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xE2, /* 0x78-0x7B */
+	0xFB, 0xBC, 0xD9, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xEE, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xD3, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xFB, 0xFA, 0x00, 0x00, 0xCF, 0xA4, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDC, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xF6, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xED, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA1, /* 0xA8-0xAB */
+	0xCE, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xA6, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xF9, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xEC, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE4, 0xEE, 0xF9, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xFB, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xF9, 0xEB, 0xEE, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xEA, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xCA, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF4, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xCD, 0xD6, 0xFC, 0xF6, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xC9, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xD4, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_55[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xF8, 0xA6, 0x00, 0x00, 0xDE, 0xCA, 0xF2, 0xC6, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xDA, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD3, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xD8, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xE6, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF3, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE4, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xE4, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xF6, 0xF2, 0x00, 0x00, 0xDF, 0xC2, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xFD, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xF6, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xBA, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xAF, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xE1, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xF0, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xCB, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE0, 0xBC, 0x00, 0x00, 0xF4, 0xCA, 0xD4, 0xFA, /* 0x84-0x87 */
+	0x00, 0x00, 0xFD, 0xAA, 0xF9, 0xE2, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xF4, 0xB7, 0xFD, 0xC2, 0xFC, 0xB0, 0x00, 0x00, /* 0x98-0x9B */
+	0xFD, 0xEC, 0xCA, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xBD, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xEA, 0xE7, 0xDF, 0xC3, 0xD1, 0xD2, /* 0xA8-0xAB */
+	0xCE, 0xE2, 0x00, 0x00, 0xD3, 0xA4, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xFD, 0xAB, 0x00, 0x00, 0xDF, 0xE0, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xF2, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xF0, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD0, 0xEE, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xAA, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xCB, /* 0xE0-0xE3 */
+	0xF6, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE1, 0xF5, 0xF1, 0xB3, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_56[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xA3, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xCA, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xCF, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xC4, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xB0, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xBF, 0x00, 0x00, /* 0x30-0x33 */
+	0xF6, 0xA4, 0x00, 0x00, 0xE3, 0xB6, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xC6, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xD0, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xED, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xDD, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xF7, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xE6, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xDE, 0xAD, 0x00, 0x00, 0xFA, 0xBF, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE5, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xED, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xA5, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xFD, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xF5, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xF6, 0xDE, 0xCC, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xDE, 0x00, 0x00, /* 0xDC-0xDF */
+	0xEC, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xCD, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xD6, 0xB7, 0xCD, 0xB3, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_57[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xD5, /* 0x00-0x03 */
+	0xE5, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xCF, 0xEA, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xD0, /* 0x08-0x0B */
+	0x00, 0x00, 0xEA, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xAE, 0xEA, 0xAD, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xF1, 0x00, 0x00, /* 0x14-0x17 */
+	0xD3, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xCF, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xEE, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD0, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xF2, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xF0, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xF2, 0xA3, 0x00, 0x00, 0xF7, 0xF8, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xB3, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xA9, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xD3, 0xBB, 0xCA, 0xEC, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF1, 0xA6, 0xCB, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xF7, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xCD, 0xDE, 0x00, 0x00, 0xF7, 0xA4, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xC0, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xDD, 0x00, 0x00, /* 0x6C-0x6F */
+	0xCC, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xCF, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xF6, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xF7, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xD3, 0xDC, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xFE, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xA7, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xEB, 0xD9, 0x00, 0x00, 0xCF, 0xA7, 0xEA, 0xAF, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xEF, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xB9, /* 0xC4-0xC7 */
+	0xF1, 0xD8, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xD8, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xF2, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xB4, /* 0xDC-0xDF */
+	0xDC, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xF3, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE3, 0xD0, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xFB, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xDB, 0xC6, 0xD0, 0xF1, 0x00, 0x00, /* 0xF8-0xFB */
+	0xD0, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_58[512] = {
+	0xCF, 0xDC, 0x00, 0x00, 0xD3, 0xD1, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xCC, 0xB1, 0xF7, 0xD8, 0x00, 0x00, /* 0x04-0x07 */
+	0xCB, 0xA8, 0xEB, 0xBC, 0xE4, 0xBE, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xDC, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xDC, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xF0, 0xA7, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xC0, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xED, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xEB, /* 0x2C-0x2F */
+	0xE5, 0xE8, 0xDC, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xED, 0xDE, 0xD3, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xF7, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xD4, 0xE7, 0xAB, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xC3, /* 0x4C-0x4F */
+	0x00, 0x00, 0xE1, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xF7, 0xB2, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xF3, /* 0x54-0x57 */
+	0xD3, 0xD2, 0x00, 0x00, 0xF5, 0xC0, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xDD, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xEE, 0xF3, 0xE7, 0xF1, 0x00, 0x00, /* 0x60-0x63 */
+	0xFD, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xF2, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF3, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xEE, 0xF4, 0x00, 0x00, 0xE2, 0xD3, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xD1, /* 0x80-0x83 */
+	0x00, 0x00, 0xDF, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE9, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xD7, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xF5, 0xCD, 0x00, 0x00, 0xF1, 0xF2, 0xFA, 0xC7, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xD9, 0xF8, 0xD4, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xE5, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xC5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xF2, 0xED, 0xDF, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xCB, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xDB, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE8, 0xB5, 0x00, 0x00, 0xD3, 0xA6, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xB5, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xF9, 0xC9, 0x00, 0x00, 0xE4, 0xE2, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xFB, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD7, 0xA4, 0xCE, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xD5, 0xD6, 0xE6, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE5, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xCD, /* 0xE8-0xEB */
+	0xEC, 0xF3, 0x00, 0x00, 0x00, 0x00, 0xED, 0xE0, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xEC, 0xEC, 0xFB, 0xBE, 0xDF, 0xEB, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE1, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_59[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xBE, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xD0, 0xF3, 0xE0, 0xAA, 0xE8, 0xE2, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE2, 0xD4, 0xD2, 0xFD, 0x00, 0x00, /* 0x18-0x1B */
+	0xE5, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xD3, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xDE, /* 0x24-0x27 */
+	0x00, 0x00, 0xF4, 0xB8, 0xF7, 0xBC, 0xDC, 0xFD, /* 0x28-0x2B */
+	0x00, 0x00, 0xE8, 0xEC, 0xE4, 0xE7, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE3, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xA8, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xF1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE5, 0xF2, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xF4, /* 0x44-0x47 */
+	0xD2, 0xAF, 0xDC, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xA5, 0xF1, 0xB4, /* 0x4C-0x4F */
+	0xFC, 0xB1, 0xCC, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xDD, 0xC6, 0xFA, 0xD1, 0x00, 0x00, 0xF7, 0xDF, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xA8, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xEE, 0xF5, 0x00, 0x00, 0xDE, 0xCE, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF3, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xAC, 0xEB, 0xC4, /* 0x68-0x6B */
+	0xED, 0xE1, 0xE0, 0xAB, 0xDD, 0xC7, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xB3, /* 0x70-0x73 */
+	0xD2, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xCA, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xFB, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xFD, 0xDD, 0xE5, /* 0x80-0x83 */
+	0xD8, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xF4, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xF5, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xED, 0xD0, 0xD2, /* 0x94-0x97 */
+	0x00, 0x00, 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xF6, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xDB, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xF7, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xD8, 0xD9, 0x00, 0x00, 0xF4, 0xA3, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xDD, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xD1, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xB5, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xED, 0xAB, 0x00, 0x00, 0xE3, 0xB7, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xEE, 0xBB, 0xCD, 0xB4, 0x00, 0x00, 0xE0, 0xF3, /* 0xD0-0xD3 */
+	0xEA, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xEC, 0xF5, 0xE8, 0xEE, 0x00, 0x00, /* 0xD8-0xDB */
+	0xCB, 0xA9, 0xF1, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xCD, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xEC, 0xA9, 0x00, 0x00, 0xF2, 0xEB, 0x00, 0x00, /* 0xE8-0xEB */
+	0xFD, 0xEF, 0x00, 0x00, 0xF9, 0xF3, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xE6, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xD8, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xAC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5A[512] = {
+	0x00, 0x00, 0xEA, 0xCE, 0x00, 0x00, 0xE8, 0xDF, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xDE, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xD2, 0xA6, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF4, /* 0x18-0x1B */
+	0xD1, 0xD6, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xC2, /* 0x1C-0x1F */
+	0xE3, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE4, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xD8, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xA5, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xF3, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xD7, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xE8, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE8, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xE6, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xE6, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xFE, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xDA, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xAC, 0xEA, 0xB0, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE3, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xCA, 0xAA, 0xE1, 0xF9, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xEA, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF2, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xFA, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xEE, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xF4, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xD2, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+};
+
+static unsigned char u2c_5B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xFB, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xFD, 0xF0, 0x00, 0x00, 0xE0, 0xBD, /* 0x08-0x0B */
+	0xCE, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xC6, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xAE, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDF, 0xC5, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xBE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xED, 0xAD, 0xFA, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCD, 0xEE, 0xED, 0xA6, 0x00, 0x00, 0xED, 0xAE, /* 0x54-0x57 */
+	0xF0, 0xED, 0x00, 0x00, 0xDD, 0xA1, 0x00, 0x00, /* 0x58-0x5B */
+	0xED, 0xAF, 0xFC, 0xF8, 0x00, 0x00, 0xD8, 0xEB, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xF9, /* 0x60-0x63 */
+	0xCD, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xFA, 0xA9, 0x00, 0x00, 0xE1, 0xDD, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE2, 0xD5, 0xED, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xDD, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xF9, 0xCA, 0x00, 0x00, 0xEA, 0xE8, 0x00, 0x00, /* 0x78-0x7B */
+	0xE5, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xD3, 0xEB, 0x00, 0x00, 0xE9, 0xD4, /* 0x84-0x87 */
+	0xE1, 0xFA, 0xE4, 0xCC, 0x00, 0x00, 0xE1, 0xE4, /* 0x88-0x8B */
+	0xE8, 0xC7, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xDB, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xD5, /* 0x90-0x93 */
+	0x00, 0x00, 0xF7, 0xB5, 0xFC, 0xF3, 0xF0, 0xF3, /* 0x94-0x97 */
+	0xCE, 0xAF, 0xF1, 0xB5, 0xEF, 0xD2, 0xE8, 0xC8, /* 0x98-0x9B */
+	0xEB, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xD4, 0xE0, 0xBE, /* 0xA0-0xA3 */
+	0xE3, 0xF8, 0xEA, 0xE9, 0xFC, 0xB2, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE0, 0xF4, 0x00, 0x00, 0xCF, 0xE0, 0x00, 0x00, /* 0xAC-0xAF */
+	0xEE, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xAA, /* 0xB0-0xB3 */
+	0xE6, 0xC3, 0xE1, 0xB2, 0xCA, 0xAB, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE3, 0xE4, 0xE9, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD6, /* 0xBC-0xBF */
+	0xF3, 0xF2, 0x00, 0x00, 0xEE, 0xD6, 0xEA, 0xB2, /* 0xC0-0xC3 */
+	0xD0, 0xF6, 0xEC, 0xD9, 0xDA, 0xCB, 0xCF, 0xA8, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xDD, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xD8, 0xDB, 0x00, 0x00, 0xF9, 0xCE, 0xE9, 0xD5, /* 0xD0-0xD3 */
+	0xE3, 0xD1, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xBC, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xAC, 0xF3, 0xCC, /* 0xDC-0xDF */
+	0x00, 0x00, 0xCD, 0xFB, 0xF6, 0xD6, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE7, 0xF5, 0xE8, 0xEF, 0xE3, 0xF9, 0xD2, 0xBB, /* 0xE4-0xE7 */
+	0xF3, 0xF3, 0xE3, 0xFB, 0x00, 0x00, 0xDE, 0xD0, /* 0xE8-0xEB */
+	0xCE, 0xB0, 0x00, 0x00, 0xD6, 0xF7, 0xF1, 0xD9, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xF5, 0xC1, 0xDC, 0xC4, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xF5, 0xBB, 0x00, 0x00, 0xDE, 0xD1, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_5C[512] = {
+	0x00, 0x00, 0xDC, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xDE, 0xD2, 0x00, 0x00, 0x00, 0x00, 0xED, 0xE2, /* 0x04-0x07 */
+	0xEE, 0xF6, 0xEA, 0xCF, 0xF0, 0xEE, 0xE3, 0xFC, /* 0x08-0x0B */
+	0x00, 0x00, 0xD3, 0xDF, 0xD3, 0xF4, 0xE1, 0xB3, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE1, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xD3, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xDF, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE9, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xDB, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF6, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xE3, 0xB9, 0xEB, 0xC5, 0xF4, 0xA9, 0xCD, 0xB6, /* 0x38-0x3B */
+	0xD2, 0xF9, 0x00, 0x00, 0xDA, 0xAD, 0xD2, 0xE3, /* 0x3C-0x3F */
+	0xCF, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xCB, 0xDC, 0xCC, 0xFA, 0x00, 0x00, /* 0x44-0x47 */
+	0xCF, 0xDD, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xA9, /* 0x48-0x4B */
+	0x00, 0x00, 0xE3, 0xBB, 0xE3, 0xBA, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xE0, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xEE, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xB3, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xD3, 0xF5, 0x00, 0x00, 0xD7, 0xA6, 0x00, 0x00, /* 0x60-0x63 */
+	0xF6, 0xB5, 0xD7, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE1, 0xD5, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xEA, /* 0x6C-0x6F */
+	0x00, 0x00, 0xDF, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xFD, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD0, 0xF7, 0xED, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xCB, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE4, 0xDB, 0x00, 0x00, 0xE1, 0xFB, /* 0xA8-0xAB */
+	0xCB, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xD3, 0xE0, 0x00, 0x00, 0xE4, 0xBF, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xFB, 0xC0, 0x00, 0x00, 0xDA, 0xBE, /* 0xB4-0xB7 */
+	0xE4, 0xCD, 0x00, 0x00, 0xD6, 0xB9, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xC0, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE1, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xF6, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xDF, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE4, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xE7, /* 0xEC-0xEF */
+	0xDC, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xFA, 0xD6, 0x00, 0x00, 0xD3, 0xF6, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xDA, /* 0xF8-0xFB */
+	0x00, 0x00, 0xFA, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xFD, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xD5, 0xCF, 0xD0, 0xF8, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xCD, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xF5, 0xCB, 0x00, 0x00, 0xE4, 0xF0, 0xCB, 0xAB, /* 0x14-0x17 */
+	0x00, 0x00, 0xD7, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xFE, /* 0x24-0x27 */
+	0x00, 0x00, 0xDD, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xAE, /* 0x48-0x4B */
+	0xCA, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xD5, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE3, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE8, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xAB, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xA9, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xF7, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xD4, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCE, 0xE4, 0x00, 0x00, 0xE8, 0xF2, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xF5, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE7, 0xAE, 0x00, 0x00, 0xD6, 0xBA, 0x00, 0x00, /* 0xB8-0xBB */
+	0xDF, 0xEC, 0xE4, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE8, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xB5, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xDC, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF4, 0xB9, 0xF1, 0xB6, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE2, 0xDE, 0xE1, 0xB5, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xCD, 0xEF, 0xF1, 0xA7, 0xCE, 0xE5, /* 0xE4-0xE7 */
+	0xCB, 0xDD, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xE3, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xAC, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xD0, 0xF9, 0xEC, 0xAB, 0xDE, 0xD3, /* 0xF0-0xF3 */
+	0xF7, 0xE9, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xF5, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE1, 0xDE, 0xCB, 0xEE, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xBC, 0xF8, 0xD6, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xEE, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xFD, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xF7, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xDE, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xF2, 0xED, 0x00, 0x00, 0xDB, 0xD9, /* 0x18-0x1B */
+	0x00, 0x00, 0xF0, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE1, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD4, /* 0x28-0x2B */
+	0x00, 0x00, 0xE0, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xE3, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xE1, 0x00, 0x00, /* 0x34-0x37 */
+	0xDF, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xD9, 0xB6, 0x00, 0x00, 0xFD, 0xAC, /* 0x3C-0x3F */
+	0xEF, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE4, 0xC1, 0xF8, 0xEB, 0x00, 0x00, 0xDB, 0xAC, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xFC, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xD8, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xBA, /* 0x5C-0x5F */
+	0x00, 0x00, 0xDB, 0xDF, 0xD3, 0xD3, 0xF8, 0xC7, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xCE, 0xF8, 0xC1, /* 0x70-0x73 */
+	0xD2, 0xB4, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xB4, /* 0x74-0x77 */
+	0xFA, 0xB9, 0xCA, 0xCF, 0x00, 0x00, 0xFC, 0xB3, /* 0x78-0x7B */
+	0xEA, 0xEA, 0xEA, 0xEB, 0xD0, 0xFA, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xED, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xE7, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xC9, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xED, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xEE, 0xBC, 0x00, 0x00, 0xEF, 0xC1, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xD2, 0x00, 0x00, /* 0x98-0x9B */
+	0xDD, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xDF, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xF8, 0xF1, 0xA8, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xB7, /* 0xA8-0xAB */
+	0x00, 0x00, 0xEF, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE4, 0xDD, 0xDF, 0xEE, 0xCB, 0xAC, /* 0xB4-0xB7 */
+	0xE9, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xEC, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xCB, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xF9, 0xBF, 0xD6, 0xAF, 0xD5, 0xC6, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xCF, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xA9, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xF8, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xB7, 0xEE, 0xF8, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xD9, /* 0xDC-0xDF */
+	0xF3, 0xDF, 0x00, 0x00, 0xF8, 0xC8, 0xCE, 0xC6, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xD5, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xE6, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xC5, 0xEF, 0xD5, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xEF, 0xFC, 0xDF, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_5F[512] = {
+	0x00, 0x00, 0xDC, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD6, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xC9, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xD2, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE3, 0xBD, 0x00, 0x00, 0xCF, 0xE1, /* 0x10-0x13 */
+	0xF0, 0xC0, 0xEC, 0xDA, 0x00, 0x00, 0xDD, 0xD7, /* 0x14-0x17 */
+	0xFB, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xAC, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xA9, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xD7, 0xFB, 0xC1, /* 0x24-0x27 */
+	0x00, 0x00, 0xD2, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE5, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xED, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xAD, 0x00, 0x00, /* 0x38-0x3B */
+	0xF9, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xF7, 0xA5, 0x00, 0x00, 0xCB, 0xAE, 0x00, 0x00, /* 0x48-0x4B */
+	0xDA, 0xAF, 0x00, 0x00, 0xD8, 0xB6, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xA7, 0xFB, 0xB2, /* 0x54-0x57 */
+	0x00, 0x00, 0xFD, 0xC4, 0x00, 0x00, 0xEC, 0xAD, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xA1, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xE9, 0xE9, 0xEE, /* 0x64-0x67 */
+	0x00, 0x00, 0xF3, 0xF4, 0xF8, 0xF3, 0xF0, 0xC1, /* 0x68-0x6B */
+	0xDE, 0xAF, 0xF8, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF3, 0xE0, 0xE7, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xAD, /* 0x74-0x77 */
+	0x00, 0x00, 0xE6, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xF9, 0xA8, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xD8, /* 0x7C-0x7F */
+	
+	0xE8, 0xD9, 0xEF, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xD3, 0xE2, 0x00, 0x00, 0xE2, 0xDF, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xE0, 0xD7, 0xC8, /* 0x88-0x8B */
+	0xFD, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDF, 0xEF, 0xCC, 0xD3, 0xD3, 0xF9, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xF0, /* 0x94-0x97 */
+	0xDB, 0xC7, 0xDE, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xF4, 0x00, 0x00, /* 0x9C-0x9F */
+	0xD5, 0xD0, 0xE5, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xFC, 0xC7, 0xDC, 0xD6, 0xE2, 0xE0, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xB0, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF3, 0xA3, 0x00, 0x00, 0xD3, 0xEC, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF4, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xFD, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xFD, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xF9, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xD0, 0xFB, 0xEC, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xBC, 0xF2, 0xA4, /* 0xD4-0xD7 */
+	0xD8, 0xCE, 0xD8, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xF5, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xE1, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xD2, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xFB, 0xEC, 0x00, 0x00, 0xDD, 0xC8, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_60[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xE8, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xC1, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xD7, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xD6, 0xBB, 0xDE, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xF7, 0xBD, 0xEC, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xD0, 0xE1, 0x00, 0x00, 0xE0, 0xF5, /* 0x24-0x27 */
+	0xEA, 0xB3, 0x00, 0x00, 0xCE, 0xD6, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xA5, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xEC, 0xF6, 0xE2, 0xE1, 0xE3, 0xBE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xFC, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xCD, 0xF0, 0x00, 0x00, 0xF9, 0xF6, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xDF, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE5, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xCE, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xE1, 0xED, 0xB0, /* 0x60-0x63 */
+	0xFD, 0xD1, 0xF6, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF9, 0xCF, 0xEB, 0xDA, 0xCA, 0xC1, 0x00, 0x00, /* 0x68-0x6B */
+	0xD2, 0xB8, 0xCD, 0xF1, 0x00, 0x00, 0xE3, 0xD3, /* 0x6C-0x6F */
+	0xFD, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE6, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE3, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xF0, 0xAA, 0xF9, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xFC, 0xE2, 0x00, 0x00, 0xF8, 0xA7, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE5, 0xEE, 0xF9, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xF6, /* 0x9C-0x9F */
+	0xEA, 0xED, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xB4, /* 0xA0-0xA3 */
+	0xF5, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xDC, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xF0, 0xF5, 0x00, 0x00, 0xDD, 0xE8, 0xD3, 0xED, /* 0xB0-0xB3 */
+	0xF5, 0xFC, 0x00, 0x00, 0xDA, 0xBF, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xCC, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xD3, 0xFA, 0xF4, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xEF, 0xD7, 0x00, 0x00, 0xD4, 0xC3, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xFB, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xED, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE0, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xEE, /* 0xDC-0xDF */
+	0xFB, 0xB3, 0xE4, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xF6, 0xE7, 0xD2, 0xDD, 0x00, 0x00, 0xDF, 0xCC, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xC9, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE5, 0xA9, 0xE0, 0xF6, 0xF6, 0xB3, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_61[512] = {
+	0x00, 0x00, 0xE1, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xF0, 0x00, 0x00, /* 0x04-0x07 */
+	0xEA, 0xEF, 0xEA, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xDA, 0xC0, 0xF8, 0xB4, 0xEB, 0xF2, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE4, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xD7, 0xE4, 0xF1, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xEF, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xD7, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xFC, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xF3, 0xE1, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xC4, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xE3, 0xE5, 0x00, 0x00, 0xCB, 0xC5, 0xEA, 0xB4, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xBD, 0x00, 0x00, /* 0x40-0x43 */
+	0xD7, 0xC9, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xDB, /* 0x44-0x47 */
+	0xED, 0xB1, 0x00, 0x00, 0xCC, 0xC3, 0xF7, 0xBE, /* 0x48-0x4B */
+	0xFC, 0xCB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xF4, /* 0x50-0x53 */
+	0x00, 0x00, 0xD9, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xF3, 0xD3, 0xF3, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xF7, 0xE4, 0x00, 0x00, 0xF7, 0xD1, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xB7, 0xCE, 0xB1, /* 0x60-0x63 */
+	0xCA, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xB4, /* 0x64-0x67 */
+	0xCB, 0xC6, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xF6, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xE7, 0x00, 0x00, /* 0x6C-0x6F */
+	0xEA, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xD4, 0xCB, 0xAF, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xF4, 0xAA, 0xE9, 0xAF, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xF5, 0xC3, 0xE9, 0xD8, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xE9, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xF3, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD5, 0xFB, 0xDE, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xF4, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xFD, 0xF3, 0xFD, 0xF2, 0xF7, 0xA6, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xDD, 0xC9, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xD3, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xCC, 0xA8, 0x00, 0x00, 0xDA, 0xC1, /* 0xA8-0xAB */
+	0xCC, 0xD5, 0x00, 0x00, 0xD9, 0xE4, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xCA, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xE3, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xBC, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xF0, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xC4, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xD0, /* 0xC4-0xC7 */
+	0xFA, 0xAB, 0xEB, 0xEB, 0xE7, 0xF8, 0xD9, 0xE5, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xD7, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xA4, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xFB, 0xFC, 0xE3, /* 0xF4-0xF7 */
+	0xFA, 0xD8, 0x00, 0x00, 0xF3, 0xD5, 0x00, 0x00, /* 0xF8-0xFB */
+	0xCF, 0xAB, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xF3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_62[512] = {
+	0xD5, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xD4, /* 0x04-0x07 */
+	0xCD, 0xFC, 0x00, 0x00, 0xD9, 0xE6, 0x00, 0x00, /* 0x08-0x0B */
+	0xE2, 0xF9, 0xE2, 0xA1, 0xEB, 0xD4, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE0, 0xF7, 0xE4, 0xB2, 0xCC, 0xFC, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xE4, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xAB, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBD, /* 0x1C-0x1F */
+	0x00, 0x00, 0xCA, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xB8, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xC0, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEE, 0xFA, 0xFD, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xD3, 0xE3, 0x00, 0x00, 0xFB, 0xC2, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xE8, 0xDB, 0xAE, /* 0x3C-0x3F */
+	0xE1, 0xB6, 0xF8, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xBF, /* 0x44-0x47 */
+	0xFB, 0xC3, 0xDD, 0xEA, 0x00, 0x00, 0xE2, 0xA2, /* 0x48-0x4B */
+	0x00, 0x00, 0xEE, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xE8, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xF6, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xCA, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xD0, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xA6, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xDD, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE4, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xAF, /* 0x7C-0x7F */
+	
+	0xD0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xF4, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xCC, 0xBC, 0xF7, 0xEA, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE5, 0xE4, 0xDF, 0xF1, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xF7, 0xE1, 0x00, 0x00, 0xF9, 0xF7, /* 0x94-0x97 */
+	0xEF, 0xB9, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xD8, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xA9, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF8, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xEE, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xD8, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE4, 0xE3, 0xF5, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xD9, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xE7, /* 0xC4-0xC7 */
+	0xD2, 0xB9, 0xD5, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xDA, 0xE5, 0xDA, 0xD0, 0x00, 0x00, 0xD1, 0xD9, /* 0xCC-0xCF */
+	0xCE, 0xD8, 0x00, 0x00, 0xCB, 0xDE, 0xF4, 0xAC, /* 0xD0-0xD3 */
+	0xDA, 0xFB, 0x00, 0x00, 0xF6, 0xE9, 0xE8, 0xF3, /* 0xD4-0xD7 */
+	0xCF, 0xAC, 0xF0, 0xF0, 0x00, 0x00, 0xF4, 0xFD, /* 0xD8-0xDB */
+	0xDB, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xCE, 0xC0, 0xE3, 0xD4, 0xD1, 0xCF, 0xF1, 0xF5, /* 0xEC-0xEF */
+	0x00, 0x00, 0xCD, 0xF2, 0x00, 0x00, 0xCF, 0xEB, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xB8, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xA6, 0xD1, 0xDA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_63[512] = {
+	0x00, 0x00, 0xF2, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA6, /* 0x04-0x07 */
+	0x00, 0x00, 0xE4, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xD3, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xA9, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xC9, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xD8, 0xE6, 0xC9, /* 0x38-0x3B */
+	0x00, 0x00, 0xD8, 0xB8, 0xFA, 0xF3, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF3, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xF8, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xF3, /* 0x4C-0x4F */
+	0xE6, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xF8, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xE9, /* 0x64-0x67 */
+	0xDE, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xDF, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xEC, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xDF, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xF4, 0xD2, 0xBA, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xF2, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xB7, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE2, 0xA3, 0xD3, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xED, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xC9, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xFA, 0x00, 0x00, /* 0x94-0x97 */
+	0xCF, 0xDE, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xD0, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xD5, 0xD3, 0xF3, 0xF5, 0xF7, 0xAE, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xEF, 0xC8, 0x00, 0x00, 0xCD, 0xF3, /* 0xA4-0xA7 */
+	0xF5, 0xCF, 0xE5, 0xF3, 0xF0, 0xC2, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCA, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xEA, 0xF1, 0x00, 0x00, 0xD0, 0xA6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xDA, /* 0xCC-0xCF */
+	0xF0, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xE7, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xC0, 0xFC, 0xB5, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE4, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xCC, 0xA9, 0xFD, 0xC6, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEA, 0xB5, 0x00, 0x00, 0xE5, 0xAA, 0xDF, 0xBA, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_64[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE1, 0xDF, 0x00, 0x00, 0xDA, 0xD1, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xE1, 0xB8, 0x00, 0x00, 0xE8, 0xF4, 0xD3, 0xFD, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xE2, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xCA, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xDA, 0xE6, 0xF7, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xCD, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xB6, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xEE, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xF5, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xD8, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA7, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xD9, 0xB8, 0xD9, 0xB9, 0xEF, 0xC9, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xD6, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF7, 0xCB, 0xDF, 0xAE, 0xE8, 0xF5, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xB5, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xD5, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xF4, 0xCC, 0xDA, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xE8, /* 0xA8-0xAB */
+	0x00, 0x00, 0xF7, 0xEB, 0xF5, 0xC9, 0x00, 0x00, /* 0xAC-0xAF */
+	0xF3, 0xBC, 0x00, 0x00, 0xDA, 0xD2, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xB5, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE8, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xD6, 0xCF, 0xF4, 0xBA, 0x00, 0x00, 0xF7, 0xC9, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xAA, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xF0, 0xC3, 0xCC, 0xD6, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xD3, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xD3, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xDB, 0xFB, 0x00, 0x00, 0xCB, 0xE0, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xD3, 0xE4, 0xF6, 0xF7, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xD5, 0xBA, 0xF3, 0xCD, 0xCB, 0xE1, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xEB, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xAD, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xFC, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xEC, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xF6, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_65[512] = {
+	0xDA, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xF7, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE5, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xE0, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xFD, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xE6, 0xFC, 0xAB, /* 0x28-0x2B */
+	0xD5, 0xBB, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA8, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xA5, 0xCD, 0xB9, /* 0x34-0x37 */
+	0xEA, 0xF2, 0xCB, 0xC7, 0x00, 0x00, 0xCD, 0xF4, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xAF, 0xEF, 0xD9, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xCD, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xFC, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xDF, 0xF3, 0xCE, 0xE7, 0xDA, 0xC2, /* 0x4C-0x4F */
+	0x00, 0x00, 0xCF, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xF9, 0xF8, 0xA8, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xE2, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xF2, 0xDF, 0xA4, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xC4, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xCC, 0xD7, 0xE5, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xBB, 0x00, 0x00, /* 0x70-0x73 */
+	0xEF, 0xDA, 0xEE, 0xD8, 0x00, 0x00, 0xDD, 0xA7, /* 0x74-0x77 */
+	0xE2, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xC0, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xB0, 0xF8, 0xCA, /* 0x80-0x83 */
+	0x00, 0x00, 0xFC, 0xFA, 0x00, 0x00, 0xD9, 0xFE, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xDE, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDD, 0xEC, 0xDA, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xE0, /* 0x94-0x97 */
+	0x00, 0x00, 0xD6, 0xF9, 0x00, 0x00, 0xCD, 0xD7, /* 0x98-0x9B */
+	0xDE, 0xD8, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xF8, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE4, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xD0, 0xC5, 0xF4, 0xAE, 0x00, 0x00, 0xDD, 0xA8, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xC5, /* 0xA8-0xAB */
+	0xF3, 0xD6, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xD9, /* 0xAC-0xAF */
+	0xE3, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xA8, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xDB, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE5, 0xDA, 0xE3, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xDB, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xD5, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xC1, /* 0xC8-0xCB */
+	0xEF, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xE9, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xB2, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xFD, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xD9, 0xE9, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xFE, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xEC, 0xED, 0xD3, 0xA9, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF2, 0xA9, 0xF0, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE2, 0xE2, 0xE9, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xF9, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE9, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xDA, 0xDA, 0xC3, /* 0xF8-0xFB */
+	0xDA, 0xC4, 0xD4, 0xC5, 0x00, 0x00, 0xE7, 0xFA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_66[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xE0, 0xE3, 0xB0, /* 0x04-0x07 */
+	0x00, 0x00, 0xDB, 0xB2, 0xFB, 0xC4, 0x00, 0x00, /* 0x08-0x0B */
+	0xF3, 0xE3, 0x00, 0x00, 0xD9, 0xA5, 0xFB, 0xE7, /* 0x0C-0x0F */
+	0xDD, 0xCB, 0xD0, 0xD4, 0x00, 0x00, 0xE6, 0xB6, /* 0x10-0x13 */
+	0xE0, 0xAE, 0xFD, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xB5, 0xE0, 0xF8, /* 0x1C-0x1F */
+	0xE7, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xF5, 0xF0, 0x00, 0x00, 0xD8, 0xDC, /* 0x24-0x27 */
+	0xED, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xE1, 0xB9, 0x00, 0x00, 0xE3, 0xC0, /* 0x2C-0x2F */
+	0xF9, 0xC0, 0xE9, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xD9, 0xDB, 0x00, 0x00, 0xF3, 0xE4, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xB6, 0xE4, 0xE9, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xF0, 0xC5, 0xE3, 0xC1, 0xFC, 0xCC, /* 0x40-0x43 */
+	0xFC, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF2, 0xCB, 0x00, 0x00, 0xF2, 0xCC, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xCF, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xF1, 0xDB, 0x00, 0x00, 0xFA, 0xD9, /* 0x58-0x5B */
+	0x00, 0x00, 0xF1, 0xB8, 0xFD, 0xF5, 0xE0, 0xF9, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xE7, 0xFB, 0xFC, 0xB7, 0xFC, 0xE4, 0xFB, 0xC5, /* 0x64-0x67 */
+	0xE3, 0xE7, 0xD8, 0xB9, 0x00, 0x00, 0xF6, 0xF8, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xC5, 0xCC, 0xD8, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xAF, /* 0x70-0x73 */
+	0xF4, 0xE7, 0x00, 0x00, 0xEF, 0xDC, 0xCF, 0xFC, /* 0x74-0x77 */
+	0xEF, 0xDD, 0x00, 0x00, 0xF2, 0xAA, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xFD, 0xBE, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xAC, /* 0x84-0x87 */
+	0xFD, 0xBB, 0xFD, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xB2, 0x00, 0x00, /* 0x8C-0x8F */
+	0xEA, 0xD1, 0xDF, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xEC, 0xE4, 0xDE, /* 0x94-0x97 */
+	0xE5, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xD9, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCD, 0xBC, 0x00, 0x00, 0xF3, 0xE5, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xD5, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xBA, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xE7, 0xFB, 0xB5, /* 0xB0-0xB3 */
+	0xF8, 0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE0, 0xE7, 0x00, 0x00, 0xCC, 0xD9, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xC6, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE7, 0xA5, 0x00, 0x00, 0xD5, 0xF5, 0xD3, 0xBE, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xFC, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xF2, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xDF, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xE8, 0xF8, 0xF8, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xCE, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xF6, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE8, 0xD8, 0x00, 0x00, 0xCD, 0xD8, 0xE7, 0xD6, /* 0xF0-0xF3 */
+	0xCC, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xE3, /* 0xF4-0xF7 */
+	0xDF, 0xF6, 0xF0, 0xC7, 0xF0, 0xC6, 0x00, 0x00, /* 0xF8-0xFB */
+	0xD8, 0xBA, 0x00, 0x00, 0xF1, 0xF4, 0xF4, 0xF0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_67[512] = {
+	0xF5, 0xCC, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xE5, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xEA, 0xC5, 0xEA, 0xF3, 0x00, 0x00, 0xDD, 0xDB, /* 0x08-0x0B */
+	0x00, 0x00, 0xDC, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xDE, 0xFD, 0xF2, 0xF9, 0x00, 0x00, 0xD5, 0xC7, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xD0, /* 0x18-0x1B */
+	0x00, 0x00, 0xF0, 0xC8, 0xD1, 0xA1, 0xD1, 0xA2, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xD4, 0xD6, 0xE8, /* 0x24-0x27 */
+	0xD9, 0xCA, 0x00, 0x00, 0xDA, 0xB1, 0xD8, 0xC7, /* 0x28-0x2B */
+	0xDC, 0xE2, 0xF3, 0xCE, 0xF5, 0xF4, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF1, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xDA, 0xD3, 0x00, 0x00, 0xF6, 0xEA, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xF5, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xFD, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xD2, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xDF, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xDD, 0xFA, 0xBA, /* 0x4C-0x4F */
+	0xEE, 0xA7, 0xF5, 0xBD, 0x00, 0x00, 0xF8, 0xF5, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xE8, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xD4, 0xE1, 0x00, 0x00, 0xD1, 0xA3, 0xE1, 0xD6, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xF9, 0xF8, 0x00, 0x00, 0xDB, 0xCA, /* 0x6C-0x6F */
+	0xCB, 0xF9, 0xD4, 0xD4, 0x00, 0x00, 0xD9, 0xDC, /* 0x70-0x73 */
+	0x00, 0x00, 0xEE, 0xBE, 0x00, 0x00, 0xF7, 0xED, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xEE, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE6, 0xF7, 0xF9, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xED, /* 0x84-0x87 */
+	0x00, 0x00, 0xE8, 0xDB, 0x00, 0x00, 0xDB, 0xB3, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xF7, /* 0x8C-0x8F */
+	0xE0, 0xB0, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xE2, /* 0x90-0x93 */
+	0x00, 0x00, 0xF6, 0xD7, 0x00, 0x00, 0xD7, 0xF9, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xDD, 0x00, 0x00, /* 0x98-0x9B */
+	0xCD, 0xFD, 0xF2, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xBD, /* 0xAC-0xAF */
+	0xF8, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xAC, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xAD, 0xCA, 0xAE, /* 0xB4-0xB7 */
+	0xCF, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xC2, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xDC, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xDA, /* 0xCC-0xCF */
+	0xD9, 0xBB, 0xCA, 0xF3, 0xF6, 0xD3, 0xE6, 0xF8, /* 0xD0-0xD3 */
+	0xEA, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xF6, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF6, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xCF, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xCA, 0xD3, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xAF, /* 0xEC-0xEF */
+	0xD2, 0xB0, 0xF1, 0xBA, 0x00, 0x00, 0xD7, 0xB3, /* 0xF0-0xF3 */
+	0xE3, 0xC3, 0xF3, 0xFD, 0xDE, 0xDA, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xDB, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xDE, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_68[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xE3, 0xEE, 0xFB, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xF7, 0xD7, 0xCA, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xCE, 0xE8, 0xDB, 0xDB, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xBB, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xF1, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xFA, 0xB7, 0xD0, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xCC, 0xAB, 0xEE, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xCB, 0xFA, 0xF9, 0xF9, 0xCC, 0xFD, 0xD3, 0xFE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xE4, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xEE, 0x00, 0x00, /* 0x4C-0x4F */
+	0xD4, 0xD5, 0xDF, 0xCD, 0x00, 0x00, 0xFC, 0xB8, /* 0x50-0x53 */
+	0xD1, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xF2, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xD2, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xD4, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xD5, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xD8, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xD9, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xA9, /* 0x90-0x93 */
+	0xF6, 0xBC, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xDB, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xF0, 0xC9, 0x00, 0x00, 0xFC, 0xFC, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE8, 0xC9, 0xF4, 0xFE, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xFC, /* 0xA4-0xA7 */
+	0xD7, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xDE, 0xDC, 0x00, 0x00, 0xF0, 0xAC, /* 0xAC-0xAF */
+	0xCC, 0xFE, 0xCD, 0xE1, 0x00, 0x00, 0xE1, 0xBA, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xDB, 0xEF, 0xDA, 0xB2, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xD1, 0xA5, 0xDC, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD8, 0xF6, 0x00, 0x00, 0xD1, 0xA4, /* 0xC8-0xCB */
+	0x00, 0x00, 0xCD, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xEA, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xF0, 0xF7, 0x00, 0x00, 0xF0, 0xCA, /* 0xD4-0xD7 */
+	0xD0, 0xBE, 0x00, 0x00, 0xDD, 0xDC, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xD6, /* 0xDC-0xDF */
+	0xD3, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xD0, /* 0xE4-0xE7 */
+	0xCD, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xB5, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xF8, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xD4, 0xA1, 0xCE, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_69[512] = {
+	0xE8, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xEB, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE3, 0xD5, 0xF5, 0xD0, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xA1, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xA7, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE5, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xE6, 0xCB, 0x00, 0x00, 0xF5, 0xF1, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xC5, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xA3, /* 0x50-0x53 */
+	0xE0, 0xDB, 0xF6, 0xEB, 0x00, 0x00, 0xCB, 0xF1, /* 0x54-0x57 */
+	0x00, 0x00, 0xD9, 0xEA, 0xF5, 0xA2, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xD1, 0x00, 0x00, /* 0x5C-0x5F */
+	0xD1, 0xF8, 0xEA, 0xF8, 0xEA, 0xF9, 0xDA, 0xB3, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xEF, 0xDF, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xEF, /* 0x68-0x6B */
+	0x00, 0x00, 0xE5, 0xF6, 0xEE, 0xBF, 0xE2, 0xE4, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD0, 0xBF, 0x00, 0x00, 0xFA, 0xAC, /* 0x74-0x77 */
+	0xF5, 0xD1, 0xE7, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xE9, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xCE, /* 0x98-0x9B */
+	0xDB, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xFC, 0xCE, 0x00, 0x00, 0xDD, 0xEE, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xB4, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xD7, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xB4, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xCD, 0xBE, 0x00, 0x00, 0xDA, 0xE9, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xB0, /* 0xC8-0xCB */
+	0xF7, 0xD9, 0xF3, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xCE, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCE, 0xAA, 0x00, 0x00, 0xCB, 0xC8, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xA7, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF0, 0xCB, 0x00, 0x00, 0xD0, 0xC7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6A[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xC5, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xE0, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xD5, 0xDA, 0x00, 0x00, 0xD7, 0xA7, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xC0, /* 0x14-0x17 */
+	0x00, 0x00, 0xF8, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xD2, 0xED, 0xE9, /* 0x1C-0x1F */
+	0x00, 0x00, 0xD9, 0xBC, 0x00, 0x00, 0xE5, 0xC6, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF5, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xDA, 0xD4, 0xE2, 0xA7, 0xFB, 0xFC, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF1, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xCA, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xE8, 0xFA, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xE9, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF8, 0xE2, 0xE5, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xD0, 0xB9, 0xD4, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xA6, /* 0x5C-0x5F */
+	0x00, 0x00, 0xDF, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xF4, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xD3, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xCC, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xEF, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xE5, 0xE5, 0xD0, 0xD5, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xFC, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xFC, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xFE, 0xED, 0xEA, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xB1, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xE3, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xA2, 0xCF, 0xF6, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xD0, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xEA, 0xF1, 0xEE, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xCB, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA1, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_6B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD5, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xED, 0x00, 0x00, /* 0x08-0x0B */
+	0xED, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xB2, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xBC, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xFD, 0xE2, 0xF3, 0xAD, 0x00, 0x00, 0xFD, 0xDB, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xB0, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xA7, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xFD, 0xE3, 0xCE, 0xB3, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xE4, 0xFA, 0xCE, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xCA, 0xB0, 0x00, 0x00, 0xF7, 0xA7, 0x00, 0x00, /* 0x4C-0x4F */
+	0xCF, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xA2, /* 0x5C-0x5F */
+	0x00, 0x00, 0xFC, 0xB6, 0xF2, 0xAD, 0xEF, 0xE1, /* 0x60-0x63 */
+	0xF3, 0xAE, 0xDC, 0xC6, 0xD9, 0xEB, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xE0, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA8, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xF6, /* 0x74-0x77 */
+	0xCF, 0xFD, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xDD, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xD1, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xEA, /* 0x80-0x83 */
+	0xF2, 0xCF, 0x00, 0x00, 0xF7, 0xBF, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE2, 0xE6, 0xE2, 0xA8, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xD6, 0x00, 0x00, /* 0x94-0x97 */
+	0xED, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF9, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xB1, 0xDE, 0xB2, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE8, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xD3, 0xAB, 0x00, 0x00, 0xEB, 0xDC, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xAF, 0x00, 0x00, /* 0xB8-0xBB */
+	0xCA, 0xC3, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xFC, /* 0xBC-0xBF */
+	0x00, 0x00, 0xFD, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xEB, 0xF6, 0xCF, 0xB2, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xEC, /* 0xC8-0xCB */
+	0x00, 0x00, 0xD9, 0xBD, 0x00, 0x00, 0xD8, 0xDF, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xB8, 0xEB, 0xBE, /* 0xD0-0xD3 */
+	0xDD, 0xEF, 0x00, 0x00, 0xDD, 0xF0, 0xDD, 0xF1, /* 0xD4-0xD7 */
+	0xDD, 0xF2, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xBE, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xC6, /* 0xE8-0xEB */
+	0xCF, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+};
+
+static unsigned char u2c_6C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xEE, 0xFD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xAB, /* 0x0C-0x0F */
+	0x00, 0x00, 0xDA, 0xC5, 0x00, 0x00, 0xD8, 0xEC, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xA8, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE2, 0xA9, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xBC, /* 0x34-0x37 */
+	0xE7, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xF0, 0x00, 0x00, /* 0x3C-0x3F */
+	0xEF, 0xE2, 0xF1, 0xF0, 0xCF, 0xB4, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xF1, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE0, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xDF, 0xA5, 0x00, 0x00, 0xF9, 0xD2, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xFD, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE6, 0xA3, 0xFB, 0xF1, 0xCB, 0xB0, /* 0x5C-0x5F */
+	0xF2, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xCD, 0xE7, 0x00, 0x00, 0xE8, 0xDC, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xE7, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF7, 0xC0, 0x00, 0x00, 0xD0, 0xE3, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xA1, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xBD, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xD1, 0xA9, 0xDD, 0xCC, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE3, 0xFE, 0xD1, 0xAA, 0xE8, 0xAA, /* 0x80-0x83 */
+	0x00, 0x00, 0xEA, 0xB6, 0xF9, 0xFA, 0xE6, 0xCC, /* 0x84-0x87 */
+	0xF6, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xD4, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD9, 0xCB, 0x00, 0x00, 0xD9, 0xD2, 0xD3, 0xCB, /* 0x90-0x93 */
+	0xD8, 0xF7, 0xDA, 0xA9, 0xF5, 0xF8, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xDE, 0xDE, 0xF2, 0xAF, 0xF8, 0xA9, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xC8, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xC1, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xC1, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xDD, 0xF3, 0xEA, 0xFA, 0x00, 0x00, 0xF6, 0xBD, /* 0xB8-0xBB */
+	0xE1, 0xBB, 0xCD, 0xBF, 0xF4, 0xD4, 0xE6, 0xCD, /* 0xBC-0xBF */
+	0x00, 0x00, 0xFC, 0xCF, 0xFB, 0xA2, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE0, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF4, 0xBB, 0xDA, 0xD5, 0x00, 0x00, /* 0xC8-0xCB */
+	0xF9, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xF2, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xDB, 0xF6, 0x00, 0x00, 0xDE, 0xDF, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xF2, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF8, 0xDC, 0xF7, 0xEE, 0xEB, 0xE8, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xD2, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF1, 0xBC, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xDA, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xEA, 0xDA, 0xC6, /* 0xEC-0xEF */
+	0xF7, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xB6, /* 0xF0-0xF3 */
+};
+
+static unsigned char u2c_6D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xC7, /* 0x08-0x0B */
+	0xD6, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xDC, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xA9, /* 0x14-0x17 */
+	0x00, 0x00, 0xE2, 0xAA, 0x00, 0x00, 0xD5, 0xA6, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xD7, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xF2, 0xD0, 0x00, 0x00, 0xEA, 0xFB, /* 0x24-0x27 */
+	0x00, 0x00, 0xE0, 0xDD, 0xFB, 0xF3, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xBD, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xE2, 0xE7, 0xFD, 0xD7, 0x00, 0x00, /* 0x34-0x37 */
+	0xCE, 0xC8, 0xEA, 0xB7, 0x00, 0x00, 0xFC, 0xC0, /* 0x38-0x3B */
+	0x00, 0x00, 0xFD, 0xE7, 0xF7, 0xEF, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xD7, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xEF, 0xBA, 0xF1, 0xDD, 0x00, 0x00, /* 0x58-0x5B */
+	0xDE, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xCB, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xDD, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xFB, 0xC7, 0xD5, 0xC8, 0x00, 0x00, /* 0x68-0x6B */
+	0xD7, 0xDF, 0x00, 0x00, 0xDD, 0xA9, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE9, 0xB1, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xAD, /* 0x74-0x77 */
+	0xF6, 0xD9, 0xFA, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xAA, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE6, 0xEE, 0x00, 0x00, 0xCC, 0xDC, /* 0x84-0x87 */
+	0xE1, 0xBC, 0xE0, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xE9, 0xBF, 0xFC, 0xFD, 0xE6, 0xCE, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE1, 0xD7, 0x00, 0x00, 0xE6, 0xCF, /* 0x90-0x93 */
+	0x00, 0x00, 0xF4, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xF3, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xFB, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF9, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xEF, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xEE, /* 0xC0-0xC3 */
+	0xF6, 0xBE, 0xE0, 0xB2, 0xFC, 0xFE, 0xD1, 0xAB, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xFA, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xC8, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE2, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD4, 0xA3, 0xF0, 0xF8, 0xD7, 0xA8, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE7, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xD3, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xEF, 0xE4, 0x00, 0x00, 0xD7, 0xC5, 0xEB, 0xE2, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xE7, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE4, 0xA2, 0x00, 0x00, 0xE2, 0xE8, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE6, 0xD0, 0x00, 0x00, 0xFB, 0xE8, /* 0xF4-0xF7 */
+	0xF4, 0xE8, 0xE5, 0xF4, 0xF4, 0xBC, 0xF4, 0xD5, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_6E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xB6, /* 0x14-0x17 */
+	0x00, 0x00, 0xFC, 0xB9, 0xEE, 0xC2, 0xCA, 0xF5, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xE5, /* 0x1C-0x1F */
+	0xCB, 0xE2, 0xD4, 0xA4, 0x00, 0x00, 0xDE, 0xE0, /* 0x20-0x23 */
+	0xDA, 0xFD, 0xE4, 0xC6, 0xE8, 0xBE, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xDE, /* 0x28-0x2B */
+	0xF6, 0xB4, 0xEA, 0xD2, 0x00, 0x00, 0xF9, 0xFB, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xC2, 0x00, 0x00, /* 0x30-0x33 */
+	0xCA, 0xE4, 0x00, 0x00, 0xE7, 0xB7, 0x00, 0x00, /* 0x34-0x37 */
+	0xEA, 0xFD, 0x00, 0x00, 0xD9, 0xDD, 0x00, 0x00, /* 0x38-0x3B */
+	0xDA, 0xB4, 0xEE, 0xAA, 0xFB, 0xE9, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xCB, /* 0x40-0x43 */
+	0xDA, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xBE, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xD3, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xC9, 0x00, 0x00, /* 0x54-0x57 */
+	0xDF, 0xCF, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xC0, /* 0x58-0x5B */
+	0xE3, 0xD7, 0x00, 0x00, 0xEF, 0xE6, 0xFC, 0xD0, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xC0, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xD3, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xDC, 0xF7, 0xB7, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xB8, 0xD1, 0xF9, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xC8, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xEA, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xDE, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xD7, 0xB6, 0xCF, 0xB5, 0x00, 0x00, 0xD9, 0xA8, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xEE, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xDD, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xA2, 0xE8, 0xAE, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xBD, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF2, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xC1, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xFC, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xB5, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF3, 0xE7, 0xD8, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xFC, 0xD1, 0x00, 0x00, 0xED, 0xB2, /* 0xC8-0xCB */
+	0xF4, 0xAF, 0x00, 0x00, 0xFB, 0xA3, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xFC, 0xC1, 0x00, 0x00, 0xEE, 0xAB, /* 0xD0-0xD3 */
+	0xD4, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xF2, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEE, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xFB, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xE3, 0xD8, 0xBB, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6F[512] = {
+	0x00, 0x00, 0xE5, 0xDB, 0xF8, 0xF7, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xD4, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xA9, /* 0x0C-0x0F */
+	0x00, 0x00, 0xCB, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xE6, 0xD1, 0xF0, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xD8, 0xAE, 0x00, 0x00, 0xF9, 0xD3, 0xD5, 0xFE, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xBC, /* 0x28-0x2B */
+	0xF2, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE2, 0xAB, 0xF3, 0xE8, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xEF, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xEC, /* 0x3C-0x3F */
+	0x00, 0x00, 0xE7, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xDA, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCC, 0xBE, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xFC, /* 0x54-0x57 */
+	0xDA, 0xEB, 0x00, 0x00, 0xE2, 0xD8, 0xED, 0xD6, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xD1, 0xE0, 0xB3, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xD2, 0x00, 0x00, /* 0x60-0x63 */
+	0xEB, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xD3, 0xC1, 0xF0, 0xCD, 0x00, 0x00, /* 0x6C-0x6F */
+	0xCF, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD2, 0x00, 0x00, /* 0x78-0x7B */
+	0xD4, 0xD8, 0xDC, 0xC9, 0xD7, 0xF1, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xDF, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xF3, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xF4, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xF1, 0xBF, 0xF8, 0xB1, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE9, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xFB, 0xCB, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xD5, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xD4, /* 0xA0-0xA3 */
+	0xF7, 0xCA, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xC8, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xE8, 0xF3, 0xBD, /* 0xAC-0xAF */
+	0x00, 0x00, 0xEE, 0xFE, 0x00, 0x00, 0xE7, 0xFE, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xD3, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xB6, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCC, 0xAD, 0xF6, 0xFA, 0xD6, 0xB2, 0xD2, 0xD8, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xD8, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE3, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xB9, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xAD, /* 0xDC-0xDF */
+	0xFB, 0xCC, 0xEB, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD4, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xFB, 0xCD, 0x00, 0x00, 0xD5, 0xBD, /* 0xE8-0xEB */
+	0xF1, 0xDF, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xFB, /* 0xEC-0xEF */
+	0x00, 0x00, 0xDE, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xEB, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_70[512] = {
+	0x00, 0x00, 0xE5, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xFB, 0xA4, 0xD4, 0xB9, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xDE, 0xE1, 0x00, 0x00, 0xE4, 0xA3, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xB7, /* 0x0C-0x0F */
+	0x00, 0x00, 0xF8, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xDE, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xD6, 0xD2, 0x00, 0x00, 0xF9, 0xD5, 0xE7, 0xBA, /* 0x18-0x1B */
+	0xEB, 0xD5, 0xD5, 0xF7, 0xEF, 0xE7, 0xE1, 0xBE, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xAE, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xE9, /* 0x24-0x27 */
+	0xD6, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xBB, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xCB, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xB3, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xCE, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xFB, 0xA5, 0xE1, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xF7, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xFB, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xBD, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xFD, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xFC, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xCF, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xED, 0xC7, 0xEE, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xCC, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xA7, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xFA, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xA4, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xFD, 0xDC, 0xED, 0xB3, 0xCE, 0xC9, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xEF, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE1, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xDB, /* 0xA8-0xAB */
+	0xCB, 0xE3, 0xF7, 0xA9, 0x00, 0x00, 0xFB, 0xA6, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xB9, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xC0, /* 0xB4-0xB7 */
+	0xED, 0xC8, 0xEF, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xD6, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xCE, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xA1, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xFB, 0xF4, 0xD5, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF1, 0xF6, 0x00, 0x00, 0xE6, 0xD3, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xCC, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xF8, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xDC, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_71[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xFD, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE5, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xF1, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xDB, 0xCC, 0xDD, 0xCD, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xC8, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xD9, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xA5, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE6, 0xFB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xD4, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xC8, /* 0x44-0x47 */
+	0x00, 0x00, 0xD6, 0xA1, 0xFD, 0xBF, 0x00, 0x00, /* 0x48-0x4B */
+	0xFC, 0xD3, 0x00, 0x00, 0xEF, 0xA1, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE7, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xEE, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE6, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xE9, 0xF2, 0x00, 0x00, 0xDF, 0xB0, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xD8, 0xE0, 0xFC, 0xBA, 0xFD, 0xAF, 0xF0, 0xCE, /* 0x64-0x67 */
+	0x00, 0x00, 0xDB, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE5, 0xC9, 0x00, 0x00, 0xED, 0xB4, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE0, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xE3, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE9, 0xFB, 0xEA, 0xA8, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xB7, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xA7, 0x00, 0x00, /* 0x90-0x93 */
+	0xE9, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xFD, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD9, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xEC, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE8, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE6, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xFD, 0xF8, 0xFD, 0xF9, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xBF, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE7, 0xA7, 0x00, 0x00, 0xE6, 0xD7, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xD4, 0xF3, 0xD4, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xFA, 0x00, 0x00, /* 0xCC-0xCF */
+	0xD7, 0xF2, 0x00, 0x00, 0xE1, 0xC0, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xDB, 0xE2, 0xE6, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xBD, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xF0, 0xCF, 0xF3, 0xBE, 0xE2, 0xAC, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF5, 0xB7, 0xE0, 0xF0, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xB8, /* 0xF8-0xFB */
+	0xE3, 0xE8, 0x00, 0x00, 0xD4, 0xA7, 0xE8, 0xFC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_72[512] = {
+	0xFA, 0xD2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xEF, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD6, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xB4, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xD0, 0x00, 0x00, /* 0x28-0x2B */
+	0xF7, 0xF0, 0xEE, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEA, 0xBA, 0x00, 0x00, 0xEA, 0xD3, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xED, 0xC9, 0xDD, 0xAB, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xAC, 0xFD, 0xA1, /* 0x38-0x3B */
+	0x00, 0x00, 0xDF, 0xD0, 0xEC, 0xB3, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDF, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xED, 0xF8, 0xB8, /* 0x44-0x47 */
+	0xF7, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xF8, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xE0, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xD4, 0xBA, 0xE4, 0xB3, 0x00, 0x00, 0xE9, 0xDA, /* 0x58-0x5B */
+	0x00, 0x00, 0xDE, 0xB6, 0x00, 0x00, 0xD9, 0xBF, /* 0x5C-0x5F */
+	0x00, 0x00, 0xD9, 0xC0, 0xD6, 0xEF, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xCC, /* 0x64-0x67 */
+	0x00, 0x00, 0xDA, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xE5, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF7, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xCC, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xDF, 0xF9, 0xD7, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xBB, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xFA, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xCC, 0xB3, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xF3, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xDF, 0xD2, 0x00, 0x00, 0xCE, 0xCA, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xEE, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xE4, 0x00, 0x00, /* 0xCC-0xCF */
+	0xFB, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xB7, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xEE, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xCE, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE2, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xD7, 0xE1, 0xFA, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xD5, 0xC9, 0xF8, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_73[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xD9, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xE9, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xED, /* 0x18-0x1B */
+	0xE3, 0xC4, 0xF0, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE8, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE0, 0xFA, 0xEE, 0xC4, 0xD9, 0xDE, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xA2, 0xEB, 0xA3, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xC2, 0xEA, 0xBB, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE8, 0xAB, 0xDE, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xED, 0xEF, 0x00, 0x00, 0xE8, 0xA3, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xF1, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xD4, 0xBC, 0x00, 0x00, 0xFC, 0xEA, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE7, 0xBE, 0x00, 0x00, 0xFC, 0xF2, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD6, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xE2, 0xAE, 0x00, 0x00, 0xD3, 0xB7, 0xFA, 0xCC, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xFA, 0xDC, 0x00, 0x00, 0xED, 0xB5, 0xE1, 0xE3, /* 0x84-0x87 */
+	0x00, 0x00, 0xE8, 0xAC, 0x00, 0x00, 0xE8, 0xDD, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xE9, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xF4, 0xBD, 0x00, 0x00, 0xCF, 0xB8, 0xE9, 0xDB, /* 0x94-0x97 */
+	0xD1, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xC7, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xC9, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE8, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xDE, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xBC, 0xD3, 0xE5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xFA, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xDA, 0xD6, 0x00, 0x00, 0xCA, 0xB1, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xDA, 0xC8, 0xDF, 0xA6, 0x00, 0x00, /* 0xC8-0xCB */
+	0xF9, 0xB3, 0xF2, 0xD2, 0x00, 0x00, 0xCA, 0xC4, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xCB, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xCD, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xFD, 0xB0, 0xD5, 0xA8, 0x00, 0x00, /* 0xDC-0xDF */
+	0xF1, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xE9, /* 0xE0-0xE3 */
+	0xDC, 0xCA, 0xEC, 0xB4, 0xFA, 0xC0, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xFB, 0xA8, 0xD0, 0xA8, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xDA, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xEE, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE0, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xEF, 0xEA, 0xFA, 0xDE, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_74[512] = {
+	0x00, 0x00, 0xE0, 0xC4, 0x00, 0x00, 0xCF, 0xB9, /* 0x00-0x03 */
+	0x00, 0x00, 0xD5, 0xCA, 0xD7, 0xE2, 0xE2, 0xAF, /* 0x04-0x07 */
+	0x00, 0x00, 0xD7, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xCD, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xDA, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xEF, 0xA2, 0xE2, 0xDA, 0xF6, 0xFC, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xFB, 0xD0, 0xD1, 0xAD, 0x00, 0x00, /* 0x24-0x27 */
+	0xCD, 0xE4, 0x00, 0x00, 0xD1, 0xAE, 0xDC, 0xED, /* 0x28-0x2B */
+	0xE8, 0xCE, 0x00, 0x00, 0xF0, 0xF9, 0xCE, 0xB5, /* 0x2C-0x2F */
+	0xE6, 0xFC, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xFB, /* 0x30-0x33 */
+	0xD0, 0xD6, 0xDD, 0xF5, 0xF7, 0xF1, 0x00, 0x00, /* 0x34-0x37 */
+	0xF6, 0xFD, 0x00, 0x00, 0xDB, 0xF7, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xEA, /* 0x3C-0x3F */
+	0xE9, 0xDC, 0xD9, 0xC1, 0x00, 0x00, 0xF5, 0xF2, /* 0x40-0x43 */
+	0xE0, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xD4, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xF9, 0xC2, 0x00, 0x00, 0xEA, 0xBC, /* 0x54-0x57 */
+	0x00, 0x00, 0xD2, 0xC5, 0xFB, 0xD1, 0xE7, 0xC0, /* 0x58-0x5B */
+	0xEB, 0xA5, 0x00, 0x00, 0xDF, 0xFA, 0xE3, 0xA2, /* 0x5C-0x5F */
+	0xD7, 0xB9, 0x00, 0x00, 0xE9, 0xC3, 0x00, 0x00, /* 0x60-0x63 */
+	0xE8, 0xFD, 0xE8, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF2, 0xD3, 0xFB, 0xA9, 0xD8, 0xA5, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xCB, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xC8, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xAF, 0xD7, 0xE3, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xC6, /* 0x84-0x87 */
+	0x00, 0x00, 0xD6, 0xA2, 0x00, 0x00, 0xED, 0xF0, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xD7, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xFC, 0xD4, 0x00, 0x00, 0xDA, 0xD7, 0xCC, 0xDF, /* 0x9C-0x9F */
+	0x00, 0x00, 0xF2, 0xD4, 0x00, 0x00, 0xD1, 0xB0, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCC, 0xE0, 0x00, 0x00, 0xDB, 0xFD, /* 0xA4-0xA7 */
+	0xF3, 0xBF, 0x00, 0x00, 0xF0, 0xD1, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xFC, 0xBB, 0x00, 0x00, 0xE2, 0xB0, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE6, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xDF, 0xDE, 0x00, 0x00, 0xE0, 0xC7, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xEF, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xE1, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xEA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE7, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xCE, 0xB6, 0x00, 0x00, 0xF3, 0xC0, 0x00, 0x00, /* 0xD8-0xDB */
+	0xCD, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xFB, 0xD2, 0x00, 0x00, 0xF8, 0xF8, 0xF7, 0xFB, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xBF, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xB7, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xB6, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_75[512] = {
+	0x00, 0x00, 0xDC, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xCC, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xF1, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE8, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xCA, 0xF6, 0x00, 0x00, 0xE4, 0xA4, 0xF4, 0xD6, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xE6, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xA7, /* 0x20-0x23 */
+	0x00, 0x00, 0xDF, 0xE7, 0xE1, 0xC1, 0x00, 0x00, /* 0x24-0x27 */
+	0xE9, 0xC4, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xCB, /* 0x28-0x2B */
+	0xE9, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEF, 0xA3, 0xEB, 0xA6, 0xCB, 0xA3, 0xE3, 0xE9, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xFB, /* 0x34-0x37 */
+	0xEF, 0xA4, 0x00, 0x00, 0xEF, 0xEB, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xB4, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xCD, 0xA3, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xE6, /* 0x4C-0x4F */
+	0x00, 0x00, 0xEF, 0xA5, 0x00, 0x00, 0xD3, 0xCC, /* 0x50-0x53 */
+	0xDA, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xD7, 0xBA, 0x00, 0x00, 0xF2, 0xD5, /* 0x58-0x5B */
+	0xF5, 0xE5, 0xD9, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xB4, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xD5, 0xD4, 0xFD, 0xCF, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xE3, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xE1, /* 0x6C-0x6F */
+	0xEC, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xFB, 0xFE, 0xD3, 0xD7, 0x00, 0x00, /* 0x74-0x77 */
+	0xD1, 0xB1, 0x00, 0x00, 0xCB, 0xB1, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xB2, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xB2, 0xF1, 0xC2, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xE1, 0xF9, 0xB5, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xC3, 0xE1, 0xC2, /* 0x8C-0x8F */
+	0x00, 0x00, 0xEB, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xDF, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCB, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xB9, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF8, 0xDE, 0xF9, 0xAA, 0xCA, 0xF7, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xED, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xD3, 0xB8, 0xF2, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xD4, 0xD9, 0xEE, 0xC5, 0xF2, 0xF0, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xB2, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xDC, 0xBB, 0x00, 0x00, 0xF1, 0xF8, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xEC, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xCA, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xF6, 0xC0, 0xFD, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD4, 0xE3, 0xCC, 0xE2, 0x00, 0x00, 0xF7, 0xD4, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xE5, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xD3, 0xC3, 0x00, 0x00, 0xD8, 0xA6, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xF6, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xF6, 0x00, 0x00, /* 0xF8-0xFB */
+	0xCD, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_76[512] = {
+	0xE5, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE5, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE1, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xB0, /* 0x1C-0x1F */
+	0xF4, 0xB0, 0xF3, 0xEA, 0xDA, 0xEE, 0x00, 0x00, /* 0x20-0x23 */
+	0xD7, 0xBB, 0x00, 0x00, 0xE2, 0xB1, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xAA, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xFB, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE4, 0xDF, 0x00, 0x00, 0xCA, 0xD6, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xA8, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xFE, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xF6, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xEF, 0xBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xD4, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE0, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE8, 0xB9, 0x00, 0x00, 0xEF, 0xA6, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xCD, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xF4, /* 0x78-0x7B */
+	0xDB, 0xA1, 0xDB, 0xDC, 0xDB, 0xDD, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xEE, 0xDC, 0x00, 0x00, 0xCB, 0xCB, 0xFC, 0xD5, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xEB, 0x00, 0x00, /* 0x8C-0x8F */
+	0xCD, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xD3, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xAB, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xD4, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xA9, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xDD, 0xDB, 0xCD, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xCE, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE7, 0xC3, 0x00, 0x00, 0xEC, 0xCC, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xEC, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xCC, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xFC, /* 0xD8-0xDB */
+	0xD4, 0xA8, 0x00, 0x00, 0xED, 0xD3, 0xD8, 0xEF, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF2, 0xD7, 0x00, 0x00, 0xCA, 0xF8, /* 0xE0-0xE3 */
+	0xDA, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xD4, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xCD, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xEE, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xF2, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xDF, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xDA, 0xF0, 0x00, 0x00, 0xE2, 0xEA, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_77[512] = {
+	0x00, 0x00, 0xE0, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD8, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xF7, 0xAF, 0xDA, 0xB6, 0x00, 0x00, 0xCA, 0xD7, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xD8, 0x00, 0x00, /* 0x1C-0x1F */
+	0xD8, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xFA, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xEF, /* 0x34-0x37 */
+	0xD9, 0xC2, 0x00, 0x00, 0xF0, 0xD2, 0x00, 0x00, /* 0x38-0x3B */
+	0xE4, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xF3, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xFA, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xEC, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE2, 0xB2, 0x00, 0x00, 0xD4, 0xBD, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xCE, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xE2, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xD4, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xC2, 0xE7, 0xDA, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xD9, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD9, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xBE, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xDC, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE2, 0xEB, 0xD6, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xCA, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xDA, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xD7, /* 0xB8-0xBB */
+	0xCC, 0xA1, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xBA, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xB8, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xC3, /* 0xD8-0xDB */
+	0xD0, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xC5, 0xEB, 0xF8, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xF2, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xCF, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xD3, 0xAD, 0xE8, 0xE1, 0xCE, 0xEC, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xB4, /* 0xF0-0xF3 */
+};
+
+static unsigned char u2c_78[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xE3, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xF7, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xF2, 0xB2, 0xF3, 0xF6, 0xF6, 0xDB, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xD7, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xDF, 0x00, 0x00, /* 0x30-0x33 */
+	0xF7, 0xF2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xD0, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xDA, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xF5, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xBC, /* 0x68-0x6B */
+	0xCC, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xDB, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xDD, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xD1, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xED, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xD6, 0xDE, 0xE4, 0xF4, 0xE1, 0xEF, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xDD, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xCF, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xE5, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xA1, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE0, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xAC, 0xFC, 0xAD, /* 0xB8-0xBB */
+	0xD8, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xED, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xDB, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xF0, 0xF3, 0xAF, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xA5, 0x00, 0x00, /* 0xCC-0xCF */
+	0xDA, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xD8, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xCC, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xB4, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xCA, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xF2, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_79[512] = {
+	0x00, 0x00, 0xF5, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xA8, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xA6, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xEC, 0xD5, 0xF8, /* 0x28-0x2B */
+	0xDA, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xC6, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xE4, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDE, 0xE5, 0xD1, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xB6, /* 0x44-0x47 */
+	0xD1, 0xB7, 0xF2, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE9, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xD3, 0xF2, 0xB4, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xD4, 0xCB, 0xE4, /* 0x58-0x5B */
+	0xFB, 0xD4, 0xF5, 0xE6, 0xE3, 0xEA, 0x00, 0x00, /* 0x5C-0x5F */
+	0xDE, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xDF, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF8, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xF0, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xB8, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xDF, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xD0, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xFC, 0xA1, 0xEF, 0xEE, 0xDC, 0xD8, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE9, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xDD, 0xFD, 0xFB, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xC9, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xC9, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xD4, 0xAA, 0x00, 0x00, 0xE5, 0xCC, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE9, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD0, 0xD8, 0xFC, 0xA2, 0xD4, 0xBE, /* 0xBC-0xBF */
+	0xE2, 0xB3, 0xDE, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xDC, 0xBC, 0xD2, 0xB6, 0xF5, 0xD5, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xCE, 0xA1, 0xF5, 0xA9, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xDD, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xDD, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xD5, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xF6, 0xDF, 0x00, 0x00, 0xF2, 0xDA, 0xE4, 0xEB, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xF2, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xB9, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_7A[512] = {
+	0xFD, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE1, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xCA, 0xD9, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xEF, /* 0x08-0x0B */
+	0x00, 0x00, 0xF5, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xEC, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xAD, /* 0x14-0x17 */
+	0x00, 0x00, 0xF2, 0xC2, 0xF6, 0xC3, 0x00, 0x00, /* 0x18-0x1B */
+	0xD7, 0xD2, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xA2, /* 0x1C-0x1F */
+	0xF0, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xFA, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF6, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF3, 0xF2, 0xC3, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xAB, /* 0x38-0x3B */
+	0xCA, 0xB3, 0xCD, 0xA6, 0x00, 0x00, 0xCD, 0xC3, /* 0x3C-0x3F */
+	0xCD, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xCF, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF6, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xEE, 0xDD, 0xE7, 0xC4, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xB4, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xDF, 0xE2, 0xE7, 0xDB, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE8, 0xB1, 0x00, 0x00, 0xFC, 0xAE, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE5, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xFA, 0xEB, 0x00, 0x00, 0xCF, 0xBC, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xCF, 0xE2, 0xCD, 0xF6, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xEF, 0xF0, 0x00, 0x00, 0xF4, 0xBE, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xD4, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xF3, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE9, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xF2, 0xF3, 0xEB, /* 0x90-0x93 */
+	0x00, 0x00, 0xF0, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xCF, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xDF, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE8, 0xC0, 0xE8, 0xC1, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xE3, 0xE9, 0xA2, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xAA, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF3, 0xC1, 0xD0, 0xAB, 0x00, 0x00, 0xD4, 0xE4, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xBC, 0xD8, 0xA1, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xDF, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xF3, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xDC, 0xBD, 0x00, 0x00, 0xCC, 0xE5, /* 0xDC-0xDF */
+	0xED, 0xF1, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xE2, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xD4, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xB5, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xCA, 0xE6, 0x00, 0x00, 0xD3, 0xAE, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xE6, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xF1, 0xD3, 0xF5, 0xE7, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xDA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xEE, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE1, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xDF, 0xE9, 0x00, 0x00, 0xEE, 0xDE, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xC2, 0x00, 0x00, /* 0x1C-0x1F */
+	0xD8, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xAC, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xF0, 0xAF, 0xD6, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xE1, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xB6, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xD4, 0xF5, 0x00, 0x00, 0xD0, 0xC9, /* 0x48-0x4B */
+	0xEF, 0xA7, 0xE2, 0xEC, 0x00, 0x00, 0xDB, 0xEA, /* 0x4C-0x4F */
+	0xCE, 0xCC, 0xF5, 0xE8, 0xF7, 0xD5, 0x00, 0x00, /* 0x50-0x53 */
+	0xD3, 0xCD, 0x00, 0x00, 0xF3, 0xFE, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xD0, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE0, 0xFE, 0x00, 0x00, 0xDF, 0xFB, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xE6, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE8, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xCD, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xA8, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xB4, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xDA, 0xD8, 0xD1, 0xB9, 0x00, 0x00, 0xDF, 0xA9, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xB0, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xCC, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xCE, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xEF, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xDF, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xED, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xEE, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xEF, 0xBD, 0xFC, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xDB, 0xF4, 0x00, 0x00, 0xEF, 0xAA, 0xF8, 0xB9, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF5, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xD9, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xE1, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD4, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xDE, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+};
+
+static unsigned char u2c_7C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xEA, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xC2, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xAF, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xCA, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xD7, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD8, 0xE1, 0xC7, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF4, 0xD8, 0xD6, 0xB3, 0xDD, 0xAD, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xBE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xF1, 0xC3, 0xEE, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xD6, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xF4, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xD7, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xB7, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xFB, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xDD, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xA3, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xDA, 0xD9, 0x00, 0x00, 0xF0, 0xD8, /* 0x94-0x97 */
+	0xEF, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xD8, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xF1, 0xD4, 0x00, 0x00, 0xED, 0xF2, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xDB, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xD5, 0xDC, 0xF3, 0xC4, 0xCB, 0xD7, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xF1, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xD5, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xD8, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xD0, 0xF0, 0xD9, /* 0xDC-0xDF */
+	0xCB, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xDD, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xA7, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xAC, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7D[512] = {
+	0xD1, 0xBA, 0x00, 0x00, 0xF1, 0xC4, 0x00, 0x00, /* 0x00-0x03 */
+	0xE5, 0xB3, 0xFB, 0xF5, 0xE9, 0xE1, 0xFD, 0xE0, /* 0x04-0x07 */
+	0xFC, 0xBC, 0x00, 0x00, 0xDA, 0xA2, 0xDA, 0xA3, /* 0x08-0x0B */
+	0x00, 0x00, 0xD2, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD2, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xE2, 0xED, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xE9, /* 0x14-0x17 */
+	0xCE, 0xDC, 0xF2, 0xB5, 0xD0, 0xE4, 0xDD, 0xD1, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE1, 0xC8, 0xDB, 0xB7, 0xDF, 0xE3, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xB9, /* 0x28-0x2B */
+	0xF1, 0xC5, 0x00, 0x00, 0xF3, 0xCF, 0xD7, 0xAB, /* 0x2C-0x2F */
+	0xE1, 0xAC, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xEB, /* 0x30-0x33 */
+	0x00, 0x00, 0xEE, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xE1, 0xC9, 0xCA, 0xFA, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xFB, 0xFA, 0xE1, /* 0x40-0x43 */
+	0xF0, 0xDA, 0xCC, 0xE7, 0xDA, 0xF4, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xCC, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xED, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xD5, 0xA9, 0xFA, 0xE2, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xE5, 0x00, 0x00, /* 0x64-0x67 */
+	0xEB, 0xD6, 0x00, 0x00, 0xEC, 0xDF, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xFC, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xF7, 0xD6, 0xDE, 0xEA, 0xCB, 0xB4, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xBE, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xCC, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xBD, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xF2, 0xE2, 0xB7, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xE8, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xF0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xD6, 0xE0, 0x00, 0x00, 0xF1, 0xC6, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE2, 0xB8, 0xEB, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xCB, 0xB5, 0xD8, 0xD1, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xF4, 0xCE, 0xF3, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xD7, 0xC6, 0x00, 0x00, 0xD1, 0xBB, 0xF7, 0xAA, /* 0xB8-0xBB */
+	0x00, 0x00, 0xED, 0xCA, 0xD7, 0xD3, 0xD8, 0xFA, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xC5, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xCC, 0xDD, 0xFC, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xFD, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xF9, 0xE5, 0x00, 0x00, 0xE0, 0xCA, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF2, 0xFD, 0xD3, 0xB0, 0x00, 0x00, /* 0xDC-0xDF */
+	0xF4, 0xF3, 0xDA, 0xC9, 0x00, 0x00, 0xE6, 0xDE, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF8, 0xBA, 0xE8, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xD8, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xD5, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xD6, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xC6, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_7E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xF2, 0xDB, 0xE4, 0xFC, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE8, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xDA, /* 0x18-0x1B */
+	0x00, 0x00, 0xF2, 0xDC, 0xFB, 0xD6, 0xE9, 0xB2, /* 0x1C-0x1F */
+	0x00, 0x00, 0xEE, 0xAD, 0x00, 0x00, 0xFA, 0xE3, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xEE, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xEA, 0xE6, 0xE0, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF0, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xAC, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF5, 0xC5, 0xEE, 0xE0, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xDB, 0xE5, 0x00, 0x00, 0xDD, 0xDE, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xF0, 0xE9, 0xA3, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xF9, 0x00, 0x00, /* 0x50-0x53 */
+	0xF2, 0xC4, 0xE0, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xA4, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE2, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE3, 0xB1, 0xFC, 0xEB, 0xCD, 0xA8, /* 0x68-0x6B */
+	0x00, 0x00, 0xCC, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF0, 0xDB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE6, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xCD, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xC3, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xE1, 0xD9, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xAB, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xC5, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xE9, 0x00, 0x00, /* 0x94-0x97 */
+	0xF3, 0xC5, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xC0, /* 0x98-0x9B */
+	0xD5, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_7F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xAE, 0x00, 0x00, /* 0x34-0x37 */
+	0xF9, 0xFC, 0x00, 0x00, 0xCC, 0xC0, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xE5, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xCE, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xD8, 0xD2, 0xF9, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xAA, 0xCE, 0xD1, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xC7, 0x00, 0x00, /* 0x6C-0x6F */
+	0xDB, 0xEB, 0x00, 0x00, 0xDF, 0xFE, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD8, 0xE1, 0x00, 0x00, 0xF7, 0xF3, /* 0x74-0x77 */
+	0x00, 0x00, 0xD7, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xD4, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xD1, 0xBC, 0x00, 0x00, 0xE5, 0xCF, 0x00, 0x00, /* 0x88-0x8B */
+	0xCB, 0xB6, 0x00, 0x00, 0xDA, 0xB8, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xCD, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xBE, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xBA, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xCF, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE0, 0xCC, 0xEB, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xFD, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xD7, 0xE8, 0xCB, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE9, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE8, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE3, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xCD, 0x00, 0x00, /* 0xC8-0xCB */
+	0xEC, 0xCE, 0x00, 0x00, 0xD6, 0xBF, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xA7, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xDF, 0xD6, 0xFD, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xE1, /* 0xDC-0xDF */
+	0xF6, 0xA8, 0xDD, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xF8, 0xBB, 0x00, 0x00, 0xE8, 0xD1, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xF9, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xCE, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xEC, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_80[512] = {
+	0xE9, 0xA5, 0xD6, 0xD5, 0x00, 0x00, 0xCD, 0xC5, /* 0x00-0x03 */
+	0x00, 0x00, 0xED, 0xBA, 0xD1, 0xBD, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xCF, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xEC, 0xBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD2, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xCC, 0xE9, 0x00, 0x00, 0xD9, 0xC4, /* 0x14-0x17 */
+	0xE9, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD1, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xBC, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xAD, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF7, 0xB0, 0x00, 0x00, 0xCC, 0xEA, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xC4, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xC0, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xFD, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA1, 0x00, 0x00, /* 0x54-0x57 */
+	0xDE, 0xBD, 0x00, 0x00, 0xF6, 0xA9, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xA4, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xA4, /* 0x6C-0x6F */
+	0xF5, 0xC6, 0x00, 0x00, 0xE1, 0xA2, 0xE9, 0xC6, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xC5, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xF4, 0xE9, 0xD6, 0xEC, 0xEB, 0xD3, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xEC, 0xBD, 0xE2, 0xDC, 0xDE, 0xEB, 0xF0, 0xDC, /* 0x84-0x87 */
+	0x00, 0x00, 0xEB, 0xBF, 0x00, 0x00, 0xD7, 0xCE, /* 0x88-0x8B */
+	0xD1, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xAB, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xFD, /* 0x98-0x9B */
+	0x00, 0x00, 0xCA, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xCD, 0xC6, 0xF2, 0xB6, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xDD, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xCC, 0xB7, 0xDB, 0xB8, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xE9, /* 0xAC-0xAF */
+	0x00, 0x00, 0xCE, 0xDD, 0xEB, 0xC0, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xFD, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xCB, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xD6, /* 0xC0-0xC3 */
+	0xF1, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xDB, 0xCE, 0x00, 0x00, 0xF7, 0xC3, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xCF, 0xCB, 0xA4, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xE0, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xFB, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xEB, 0xCA, 0xE0, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xCE, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xD4, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xFD, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xD2, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_81[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xB7, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xFA, 0xF6, 0xF6, 0xAA, 0xFA, 0xF7, /* 0x04-0x07 */
+	0xD8, 0xE6, 0x00, 0x00, 0xF4, 0xB1, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE8, 0xD2, 0x00, 0x00, 0xCA, 0xC5, 0xCC, 0xEB, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xEE, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xE2, 0xBB, 0x00, 0x00, 0xF7, 0xAD, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xE1, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xF3, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xA1, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xFD, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xEC, 0x00, 0x00, /* 0x4C-0x4F */
+	0xDD, 0xAF, 0xDD, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCB, 0xB7, 0xE8, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xE1, 0xA3, 0xD2, 0xE0, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xFE, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE9, 0xA6, 0xCB, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xED, 0xF3, 0xDC, 0xD9, 0xE0, 0xCD, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xDA, /* 0x7C-0x7F */
+	
+	0xDB, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xCC, 0xAE, 0x00, 0x00, 0xDA, 0xDB, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xC7, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xB1, 0x00, 0x00, /* 0x98-0x9B */
+	0xD8, 0xAF, 0xE3, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCE, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xF3, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF8, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xCE, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF5, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xEC, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD3, 0xC5, 0xFC, 0xEC, 0xD2, 0xDB, /* 0xBC-0xBF */
+	0xD4, 0xEB, 0x00, 0x00, 0xDE, 0xA2, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xE6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xF0, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD5, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xF4, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xED, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE8, 0xC2, 0x00, 0x00, 0xED, 0xF5, /* 0xE4-0xE7 */
+	0xD7, 0xFC, 0x00, 0x00, 0xED, 0xBB, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF6, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xB8, /* 0xF0-0xF3 */
+	0xF6, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xE6, 0xF2, 0xDD, /* 0xF8-0xFB */
+	0xCF, 0xBF, 0x00, 0x00, 0xEB, 0xAC, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_82[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xCF, 0xC0, 0x00, 0x00, 0xE6, 0xA8, /* 0x04-0x07 */
+	0xFD, 0xE9, 0x00, 0x00, 0xCF, 0xC1, 0x00, 0x00, /* 0x08-0x0B */
+	0xE0, 0xDF, 0xDE, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xA2, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xBF, /* 0x18-0x1B */
+	0xE2, 0xEF, 0x00, 0x00, 0xD9, 0xF1, 0xF1, 0xC7, /* 0x1C-0x1F */
+	0x00, 0x00, 0xCB, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xFE, 0xDB, 0xBA, /* 0x28-0x2B */
+	0xDA, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF6, 0xEC, 0xDA, 0xDC, 0xFA, 0xE4, /* 0x34-0x37 */
+	0x00, 0x00, 0xE0, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDD, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xE6, 0xA9, 0x00, 0x00, 0xEF, 0xF3, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xF3, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xEB, 0xFA, 0x00, 0x00, 0xF9, 0xE6, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xDD, 0xD5, 0xDE, /* 0x6C-0x6F */
+	0x00, 0x00, 0xCA, 0xDE, 0xDF, 0xE4, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xFD, 0x00, 0x00, /* 0x74-0x77 */
+	0xF5, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xF5, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xE3, /* 0x88-0x8B */
+	0x00, 0x00, 0xED, 0xCB, 0xCF, 0xE4, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xD3, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xDD, 0xB3, 0xD4, 0xEC, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xF2, 0xB9, 0x00, 0x00, 0xDF, 0xB7, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCB, 0xCE, 0xFB, 0xD8, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xD0, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xDD, 0xD2, 0xF7, 0xF4, 0xE7, 0xDC, 0xE4, 0xA5, /* 0xAC-0xAF */
+	0x00, 0x00, 0xFC, 0xA3, 0x00, 0x00, 0xDB, 0xBB, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xBA, /* 0xB4-0xB7 */
+	0xE9, 0xFD, 0xD0, 0xCA, 0x00, 0x00, 0xF5, 0xD6, /* 0xB8-0xBB */
+	0xD9, 0xC5, 0xE4, 0xB4, 0x00, 0x00, 0xED, 0xA7, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xEA, 0xBD, 0xE6, 0xFE, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xF7, 0xC4, 0xF5, 0xAD, 0x00, 0x00, 0xD9, 0xE0, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xB4, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xE2, 0xCF, 0xC2, /* 0xDC-0xDF */
+	0x00, 0x00, 0xEC, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE5, 0xB4, 0xCD, 0xC8, 0xEE, 0xC8, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE7, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xCD, 0xC9, 0xF9, 0xB7, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_83[512] = {
+	0x00, 0x00, 0xF1, 0xE8, 0xD9, 0xF2, 0xDB, 0xF5, /* 0x00-0x03 */
+	0xCA, 0xB5, 0xD9, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xD8, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xAB, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xED, 0xBC, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xD4, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xDA, /* 0x2C-0x2F */
+	0x00, 0x00, 0xE2, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xFC, 0xED, 0xEC, 0xE0, 0xD2, 0xFE, 0x00, 0x00, /* 0x34-0x37 */
+	0xE9, 0xC7, 0xE6, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xE2, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xBB, /* 0x44-0x47 */
+	0x00, 0x00, 0xF5, 0xAE, 0xFB, 0xAA, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xFB, /* 0x4C-0x4F */
+	0x00, 0x00, 0xEC, 0xBF, 0xFC, 0xD8, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xE5, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xC3, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xE2, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xD7, 0xE9, 0xED, 0xF6, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xED, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xEC, 0x00, 0x00, /* 0x94-0x97 */
+	0xE3, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xD4, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xF8, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xDD, 0xB4, 0xE4, 0xB5, 0xD8, 0xB0, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD8, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xF4, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xCE, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD6, 0xE1, 0xCF, 0xD2, 0x00, 0x00, /* 0xC8-0xCB */
+	0xD0, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xA2, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xEE, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xF3, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xDC, 0xCC, 0x00, 0x00, 0xD0, 0xCB, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xA4, /* 0xEC-0xEF */
+	0xCD, 0xCA, 0xD7, 0xD4, 0xDE, 0xA3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE4, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xEE, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE2, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_84[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xFE, /* 0x00-0x03 */
+	0xD4, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xD1, 0x00, 0x00, /* 0x08-0x0B */
+	0xD8, 0xF0, 0xF8, 0xC3, 0xEA, 0xD7, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xF5, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xD8, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xFD, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xEB, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xD5, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xE7, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xCA, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xE7, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xF8, 0xE3, 0x00, 0x00, 0xD4, 0xDD, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xD8, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xD9, /* 0x68-0x6B */
+	0xED, 0xF7, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xB5, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD0, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xF1, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE2, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE3, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xD9, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xDF, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xDB, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xE4, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xF1, 0xFA, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xB6, /* 0xB8-0xBB */
+	0xF3, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xDA, /* 0xBC-0xBF */
+	0xE1, 0xE0, 0x00, 0x00, 0xD9, 0xAC, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF5, 0xEB, 0x00, 0x00, 0xE0, 0xB6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE9, 0xC8, 0x00, 0x00, 0xCB, 0xCF, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE3, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xDE, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xBE, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xDC, 0xEF, 0x00, 0x00, 0xD6, 0xA5, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xE2, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xD6, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_85[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xD9, 0xA1, 0x00, 0x00, 0xD8, 0xC0, /* 0x10-0x13 */
+	0xDC, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xED, 0xBD, /* 0x14-0x17 */
+	0xDF, 0xB8, 0x00, 0x00, 0xEA, 0xA5, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xAD, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xF3, 0xF9, 0x00, 0x00, 0xED, 0xF8, /* 0x20-0x23 */
+	0x00, 0x00, 0xF5, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xE1, 0xCA, 0xEB, 0xE3, 0x00, 0x00, 0xF2, 0xDE, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF8, 0xCC, 0x00, 0x00, 0xEA, 0xD9, /* 0x3C-0x3F */
+	0x00, 0x00, 0xD3, 0xC6, 0x00, 0x00, 0xDB, 0xE6, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF5, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xF0, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xFE, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xFB, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xF2, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xCF, 0xF2, 0xF7, 0xB9, 0xD9, 0xF3, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xE1, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xDA, 0xDD, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xB9, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xFB, /* 0x8C-0x8F */
+	0x00, 0x00, 0xCB, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xED, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE0, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xC0, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xFD, 0xBC, 0xDF, 0xB1, 0xE3, 0xEF, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xA3, /* 0xAC-0xAF */
+	0xFD, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xB1, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xCD, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xED, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xD5, 0xC0, 0xE3, 0xF0, 0xED, 0xFA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE9, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD5, 0xED, 0xE7, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD4, 0xF6, 0xE5, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xDB, 0xE7, 0xE2, 0xBF, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xCB, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xF4, 0xF0, 0xDD, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xAB, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_86[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xDE, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xD6, 0xE1, 0xCC, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xB3, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xEE, 0xDC, 0xA2, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xD0, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD5, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xA1, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xDB, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF9, 0xCB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCB, 0xF3, 0xF4, 0xA5, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xC8, /* 0x58-0x5B */
+	0xD6, 0xD7, 0x00, 0x00, 0xE9, 0xE5, 0xFB, 0xDC, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xD0, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xFB, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xA5, 0x00, 0x00, /* 0x88-0x8B */
+	0xDB, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xE2, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xF7, /* 0xA0-0xA3 */
+	0xF0, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xF6, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xEF, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xB1, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xFC, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE8, 0xC3, 0x00, 0x00, 0xF1, 0xC8, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xF1, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xF9, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF2, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xB6, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_87[512] = {
+	0xF5, 0xB9, 0x00, 0x00, 0xDC, 0xF0, 0xE3, 0xF1, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xE8, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xF2, 0xBB, 0x00, 0x00, 0xDE, 0xA4, 0x00, 0x00, /* 0x18-0x1B */
+	0xDA, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xE9, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE3, 0xDA, 0x00, 0x00, 0xFC, 0xD9, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xDA, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xC4, 0x00, 0x00, /* 0x64-0x67 */
+	0xE3, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xFB, 0xDD, 0x00, 0x00, 0xEF, 0xCA, 0x00, 0x00, /* 0x74-0x77 */
+	0xE8, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xCC, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xEB, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xAD, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xAB, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xD9, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xA2, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF6, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xDA, 0xF6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE0, 0xD1, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xA8, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xF9, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xFA, 0xAF, 0x00, 0x00, 0xEB, 0xFC, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xEA, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_88[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE3, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xC5, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xE3, 0xD5, 0xEE, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xCD, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xD9, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xC1, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xFA, 0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xEB, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xFA, 0xBC, 0xE6, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xE5, 0xE2, 0xFA, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xB6, /* 0x54-0x57 */
+	0x00, 0x00, 0xE4, 0xB7, 0x00, 0x00, 0xEA, 0xDB, /* 0x58-0x5B */
+	0x00, 0x00, 0xF5, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xFB, 0xAC, 0xCF, 0xC3, 0xEB, 0xFD, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF8, 0xFA, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xB9, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xE1, 0xF1, 0x00, 0x00, 0xD2, 0xA4, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xFB, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xDA, 0xD0, 0xDB, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xEA, 0xBE, 0xD9, 0xB1, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xCA, 0xB7, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xE7, /* 0x88-0x8B */
+	0x00, 0x00, 0xF8, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xB2, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xC0, 0xF2, 0xDF, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xE5, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xAC, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xCD, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xEE, 0xAE, 0xD6, 0xAE, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xEA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE7, 0xE0, 0xEB, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xCF, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xDC, 0xCD, 0xED, 0xFB, 0x00, 0x00, 0xDE, 0xF0, /* 0xDC-0xDF */
+	0x00, 0x00, 0xD7, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xDE, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xD7, /* 0xF0-0xF3 */
+	0xDB, 0xD0, 0xDB, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xD5, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF0, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_89[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xDC, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xCA, 0xE8, 0x00, 0x00, 0xF8, 0xE6, 0xDC, 0xCE, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xEA, 0xDC, 0xDB, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE9, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xDB, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xA8, 0x00, 0x00, /* 0x34-0x37 */
+	0xD7, 0xAE, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE1, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xCB, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xE5, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xDC, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xD5, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xCA, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xA9, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xA4, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE9, 0xA9, 0x00, 0x00, 0xD3, 0xC7, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xDD, 0xF8, 0xAE, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xB8, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xAE, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xF2, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xCA, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xCC, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xD4, 0xAD, 0xF6, 0xD1, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xCC, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xC6, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD5, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCE, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xC7, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xB0, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xDF, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xF5, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_8A[512] = {
+	0xE5, 0xEB, 0x00, 0x00, 0xEF, 0xF4, 0xDD, 0xB5, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xCD, 0xAA, 0x00, 0x00, 0xE3, 0xF2, 0x00, 0x00, /* 0x08-0x0B */
+	0xFB, 0xF7, 0x00, 0x00, 0xF7, 0xD0, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xBA, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xE1, 0xF6, 0xFE, /* 0x14-0x17 */
+	0xD1, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xC5, /* 0x18-0x1B */
+	0x00, 0x00, 0xE4, 0xB8, 0x00, 0x00, 0xE1, 0xE8, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xC1, /* 0x20-0x23 */
+	0x00, 0x00, 0xD2, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xBE, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xE0, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xFA, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE1, 0xCD, 0x00, 0x00, 0xCA, 0xB8, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xE0, 0xF1, 0xC9, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xDE, 0xF1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xF0, 0xDF, 0xF8, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xCC, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xF2, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE7, 0xC9, 0x00, 0x00, 0xE2, 0xF3, 0xE7, 0xE1, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xCB, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xE3, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xCF, 0xF8, 0xEF, 0xAC, 0x00, 0x00, /* 0x6C-0x6F */
+	0xFD, 0xFE, 0xFC, 0xA5, 0xFA, 0xB1, 0xDF, 0xD9, /* 0x70-0x73 */
+	0x00, 0x00, 0xE0, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF4, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xF1, 0xCA, 0x00, 0x00, 0xCE, 0xA3, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xF2, 0xBC, 0xEC, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xA5, /* 0x90-0x93 */
+	0x00, 0x00, 0xF7, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xEB, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xDE, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE1, 0xA4, 0xCD, 0xAB, 0x00, 0x00, 0xD9, 0xF4, /* 0xA0-0xA3 */
+	0xE8, 0xA6, 0xCD, 0xCE, 0xE1, 0xE9, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xFC, 0xEF, 0x00, 0x00, 0xE0, 0xE3, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE2, 0xC1, 0x00, 0x00, 0xCE, 0xA4, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xDE, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xEB, 0xFE, 0x00, 0x00, 0xEB, 0xDD, 0xF0, 0xE0, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xDB, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE2, 0xF4, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xC8, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xEB, /* 0xC8-0xCB */
+	0x00, 0x00, 0xEE, 0xB5, 0x00, 0x00, 0xF5, 0xD8, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xDF, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xE5, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xB0, /* 0xD8-0xDB */
+	0xF4, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE3, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xF4, 0xFA, 0xB2, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xF5, 0xCA, 0xDF, /* 0xE8-0xEB */
+	0x00, 0x00, 0xEB, 0xB1, 0xED, 0xBF, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xFD, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xA6, 0xF9, 0xA4, /* 0xF4-0xF7 */
+	0xF0, 0xB3, 0x00, 0x00, 0xE5, 0xEC, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xE7, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8B[512] = {
+	0xD9, 0xC7, 0xE4, 0xD7, 0xEA, 0xDD, 0x00, 0x00, /* 0x00-0x03 */
+	0xD4, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xBA, 0x00, 0x00, /* 0x0C-0x0F */
+	0xDA, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xF9, 0xCC, 0x00, 0x00, 0xE1, 0xDA, 0xDB, 0xBF, /* 0x14-0x17 */
+	0x00, 0x00, 0xCC, 0xC5, 0xEC, 0xD0, 0xCB, 0xBB, /* 0x18-0x1B */
+	0x00, 0x00, 0xDE, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE9, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xD9, 0xC8, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xE3, /* 0x28-0x2B */
+	0xD7, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xC4, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xD0, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xFC, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF1, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xD2, 0xD1, 0xC1, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xE3, 0xDB, 0x00, 0x00, 0xD3, 0xC9, 0x00, 0x00, /* 0x58-0x5B */
+	0xDC, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xED, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xDE, 0xA7, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xBB, /* 0x6C-0x6F */
+	0xEC, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xCC, 0xB9, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xDE, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE7, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xD4, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xA8, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE2, 0xC2, 0x00, 0x00, 0xF3, 0xD8, 0xE5, 0xD3, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xD9, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xC6, 0x00, 0x00, /* 0x98-0x9B */
+};
+
+static unsigned char u2c_8C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xDB, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xAC, /* 0x3C-0x3F */
+	0x00, 0x00, 0xFC, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xE7, 0x00, 0x00, /* 0x44-0x47 */
+	0xD1, 0xC2, 0x00, 0x00, 0xF9, 0xA5, 0x00, 0x00, /* 0x48-0x4B */
+	0xE8, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xE3, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0xCA, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xDF, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xDF, 0xE7, 0xE3, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF8, 0xFB, 0xE3, 0xCF, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xB0, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xE7, 0x00, 0x00, /* 0x88-0x8B */
+	0xD9, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xF8, 0xAF, 0xEF, 0xF6, 0x00, 0x00, /* 0x9C-0x9F */
+	0xDD, 0xB6, 0xEE, 0xAF, 0xCD, 0xF8, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xB8, /* 0xA4-0xA7 */
+	0xFC, 0xA7, 0xF7, 0xFC, 0xF7, 0xB1, 0xCE, 0xBB, /* 0xA8-0xAB */
+	0xF4, 0xA1, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xCD, /* 0xAC-0xAF */
+	0xE1, 0xAE, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xC3, /* 0xB0-0xB3 */
+	0xCF, 0xFE, 0x00, 0x00, 0xF8, 0xBF, 0xD8, 0xE2, /* 0xB4-0xB7 */
+	0xD3, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xA8, /* 0xB8-0xBB */
+	0xF4, 0xE4, 0xEC, 0xC2, 0x00, 0x00, 0xD9, 0xF5, /* 0xBC-0xBF */
+	0xF9, 0xC5, 0xDD, 0xD3, 0xD6, 0xF1, 0xEC, 0xFC, /* 0xC0-0xC3 */
+	0xFC, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xED, 0xC0, /* 0xC4-0xC7 */
+	0xCA, 0xB9, 0x00, 0x00, 0xEE, 0xE4, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xF2, 0xE1, 0x00, 0x00, 0xDE, 0xB9, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xF2, 0x00, 0x00, /* 0xD8-0xDB */
+	0xDE, 0xF4, 0x00, 0x00, 0xDF, 0xDB, 0x00, 0x00, /* 0xDC-0xDF */
+	0xDB, 0xD3, 0x00, 0x00, 0xFA, 0xE7, 0xD8, 0xE3, /* 0xE0-0xE3 */
+	0xF4, 0xC1, 0x00, 0x00, 0xDD, 0xB7, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xF5, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xD4, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xD6, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xB8, /* 0xF8-0xFB */
+	0xCF, 0xC5, 0xDF, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xF2, 0xBE, 0xF6, 0xA1, 0x00, 0x00, 0xEB, 0xCB, /* 0x04-0x07 */
+	0xF1, 0xFC, 0x00, 0x00, 0xF3, 0xC7, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE0, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xFC, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xDB, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xEE, 0xE5, 0x00, 0x00, 0xDE, 0xF5, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xD3, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF1, 0xCB, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xAF, /* 0x70-0x73 */
+	0xDD, 0xB9, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xC3, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xF5, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xC6, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xF0, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xAC, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF5, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xEB, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0xBA, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xBF, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xC5, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xA2, /* 0xC8-0xCB */
+	0xF2, 0xF6, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xBA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xF5, /* 0xD8-0xDB */
+	0x00, 0x00, 0xCB, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xEE, 0xE6, 0x00, 0x00, 0xE0, 0xD3, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCE, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xD8, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xAF, /* 0xF0-0xF3 */
+};
+
+static unsigned char u2c_8E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xC9, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xCE, /* 0x0C-0x0F */
+	0xF4, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xE6, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xA1, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEB, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF1, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xB3, 0x00, 0x00, /* 0x40-0x43 */
+	0xF0, 0xB4, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xF4, /* 0x44-0x47 */
+	0xD4, 0xB0, 0xF3, 0xB2, 0xFB, 0xB7, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xF5, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xE7, /* 0x5C-0x5F */
+	0xF4, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xF5, 0xED, 0x00, 0x00, 0xCF, 0xF3, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xF0, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xCE, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xCC, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xE5, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xF5, 0xE3, 0xF3, /* 0xA8-0xAB */
+	0xCF, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCF, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xB3, 0xE4, 0xD8, /* 0xC8-0xCB */
+	0xCF, 0xF9, 0xCF, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xFA, 0xCD, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE3, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xE2, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xF5, 0xEE, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xBB, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xDC, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xF2, /* 0x00-0x03 */
+	0x00, 0x00, 0xD6, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xEE, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xE5, 0xD8, 0xC2, /* 0x10-0x13 */
+	0xDC, 0xD0, 0xCC, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xE0, /* 0x18-0x1B */
+	0xF6, 0xCA, 0xFD, 0xCA, 0xD8, 0xD6, 0xF4, 0xCF, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xA6, 0xDC, 0xBE, /* 0x24-0x27 */
+	0x00, 0x00, 0xDB, 0xD4, 0xD7, 0xC7, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xFE, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xCD, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xE2, 0xC3, 0xDC, 0xDE, 0x00, 0x00, 0xDC, 0xDF, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xAD, 0xE6, 0xAB, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xF9, 0xDD, 0xEA, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xEF, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xF4, 0xD0, 0xCE, 0xF3, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE6, 0xAC, 0x00, 0x00, 0xCE, 0xDE, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xF9, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xF4, /* 0x98-0x9B */
+	0xCD, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xB8, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xFD, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xDC, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xDE, 0xF6, 0x00, 0x00, 0xDC, 0xAA, /* 0xAC-0xAF */
+	0xF2, 0xE3, 0xE9, 0xB4, 0xD2, 0xDC, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE6, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE3, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xCA, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xD0, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xDA, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xBC, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xE8, 0xDA, 0xDE, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF2, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE2, 0xFB, 0x00, 0x00, 0xCC, 0xA6, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xBB, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xEE, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF5, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_90[512] = {
+	0xF7, 0xDC, 0xE1, 0xEA, 0xCE, 0xC1, 0xD4, 0xB1, /* 0x00-0x03 */
+	0x00, 0x00, 0xFD, 0xB1, 0xE6, 0xBD, 0x00, 0x00, /* 0x04-0x07 */
+	0xFB, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xE7, /* 0x08-0x0B */
+	0x00, 0x00, 0xE1, 0xCE, 0x00, 0x00, 0xF7, 0xE2, /* 0x0C-0x0F */
+	0xF5, 0xEF, 0xCF, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xD4, 0xB2, 0xCC, 0xEF, 0x00, 0x00, 0xD4, 0xE8, /* 0x14-0x17 */
+	0x00, 0x00, 0xEE, 0xCF, 0xF7, 0xD7, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xE0, 0xA6, 0xD6, 0xC1, 0xE1, 0xDC, /* 0x1C-0x1F */
+	0xF0, 0xE3, 0xF1, 0xE4, 0xDC, 0xF1, 0xD6, 0xA7, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xF5, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF1, 0xCE, 0xF2, 0xE4, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xD0, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xEC, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xF9, 0xBA, 0x00, 0x00, 0xEB, 0xB5, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xD4, 0xED, 0xE2, 0xC4, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xE7, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xB4, 0xEA, 0xA1, /* 0x48-0x4B */
+	0x00, 0x00, 0xF8, 0xBC, 0xCE, 0xA6, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF9, 0xC6, 0xFC, 0xDA, 0x00, 0x00, 0xD4, 0xB3, /* 0x50-0x53 */
+	0xD3, 0xB9, 0xEA, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xE9, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xE1, 0xE1, 0xD3, 0xCF, 0xF4, 0xF6, 0x00, 0x00, /* 0x5C-0x5F */
+	0xEA, 0xC0, 0xE1, 0xCF, 0x00, 0x00, 0xCC, 0xBA, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xEE, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xF0, 0xE4, 0xF3, 0xB4, 0xD4, 0xEE, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xC0, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xF1, 0xE5, 0x00, 0x00, 0xF4, 0xC3, /* 0x74-0x77 */
+	0xE0, 0xD4, 0x00, 0x00, 0xEB, 0xB6, 0x00, 0x00, /* 0x78-0x7B */
+	0xD7, 0xA1, 0xCB, 0xE8, 0x00, 0x00, 0xF9, 0xAD, /* 0x7C-0x7F */
+	
+	0xE9, 0xAD, 0xD8, 0xE4, 0xFA, 0xB3, 0xE2, 0xC5, /* 0x80-0x83 */
+	0xFC, 0xBD, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xC4, /* 0x84-0x87 */
+	0xD8, 0xB1, 0x00, 0x00, 0xDC, 0xAB, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xA4, /* 0x8C-0x8F */
+	0x00, 0x00, 0xEB, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xE8, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xFB, 0xAE, 0xD1, 0xE1, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xC0, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF5, 0xBE, 0x00, 0x00, 0xDE, 0xF7, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xFB, /* 0xAC-0xAF */
+	0xF7, 0xC6, 0xCF, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE1, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xEE, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE9, 0xF4, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xF4, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xCD, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xCF, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xDD, 0xBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xCE, 0xAC, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE9, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xD4, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_91[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xC7, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xDB, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xFA, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xDE, 0xA9, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xF8, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xEF, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xB3, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xEB, 0xB7, 0xEF, 0xF8, 0xF5, 0xDC, /* 0x48-0x4B */
+	0xED, 0xCC, 0xDB, 0xD5, 0xF1, 0xCF, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xD0, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xB2, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xD9, 0xAE, 0xD5, 0xAC, 0x00, 0x00, /* 0x68-0x6B */
+	0xE2, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xFD, 0xA3, 0x00, 0x00, 0xFB, 0xE5, /* 0x74-0x77 */
+	0xDF, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xF5, /* 0x84-0x87 */
+	0x00, 0x00, 0xF6, 0xAD, 0x00, 0x00, 0xF5, 0xB3, /* 0x88-0x8B */
+	0x00, 0x00, 0xF0, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xA5, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xF5, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xA2, /* 0xA8-0xAB */
+	0xED, 0xFD, 0x00, 0x00, 0xF5, 0xB4, 0xFB, 0xB8, /* 0xAC-0xAF */
+	0x00, 0x00, 0xDB, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xD6, 0xCA, 0xCB, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE5, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xFA, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xEB, 0xB8, 0x00, 0x00, 0xE0, 0xB7, /* 0xC8-0xCB */
+	0xD7, 0xEC, 0xF1, 0xEC, 0xE5, 0xAF, 0xD5, 0xE1, /* 0xCC-0xCF */
+	0xD7, 0xED, 0xD1, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xF2, /* 0xD4-0xD7 */
+	0xEF, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xDD, 0xBC, 0xF6, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xE5, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xC4, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE9, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xF3, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_92[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xD4, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xCC, 0xA2, 0xF7, 0xFE, 0xDF, 0xBC, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xCD, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xB7, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xD6, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xAD, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xAF, /* 0x3C-0x3F */
+	0xCB, 0xA5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xCB, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xFA, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xC6, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE7, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xC7, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xA4, 0x00, 0x00, /* 0x60-0x63 */
+	0xCF, 0xC9, 0xE2, 0xFC, 0xEF, 0xFA, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xEB, 0xDE, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xC8, /* 0x80-0x83 */
+	0x00, 0x00, 0xD4, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE0, 0xD5, 0x00, 0x00, 0xEF, 0xB0, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xC7, 0x00, 0x00, /* 0x94-0x97 */
+	0xD9, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xF9, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xE5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xCA, 0xE1, 0xD1, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xEF, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xF9, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xF2, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE0, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xE8, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xCB, 0xEA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xCB, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_93[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD6, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF5, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xF5, 0xDF, 0x00, 0x00, 0xEE, 0xB6, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xF6, 0xD3, 0xCA, /* 0x1C-0x1F */
+	0xEF, 0xFC, 0xD1, 0xC4, 0xEF, 0xB1, 0x00, 0x00, /* 0x20-0x23 */
+	0xD1, 0xC5, 0x00, 0x00, 0xD0, 0xDE, 0x00, 0x00, /* 0x24-0x27 */
+	0xD9, 0xE1, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xB8, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xD1, 0xF3, 0xB9, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xE7, 0xCC, 0x00, 0x00, 0xD6, 0xA8, 0xCE, 0xA7, /* 0x48-0x4B */
+	0x00, 0x00, 0xD4, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE4, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xB4, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xB9, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xCB, 0xF5, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xF6, 0xDD, 0x00, 0x00, 0xF1, 0xA3, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xCC, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE9, 0xCA, 0x00, 0x00, 0xE1, 0xF0, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xE0, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xAF, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xD1, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xFB, 0xE0, 0xF2, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xEC, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xEC, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xEE, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xCB, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xCC, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD7, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xA1, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_94[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xFC, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xF1, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xE0, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xB2, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF4, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xF7, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xF1, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xCA, 0xFC, 0xCA, 0xFD, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xCE, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xF3, 0xC8, 0x00, 0x00, 0xF3, 0xBA, /* 0x7C-0x7F */
+};
+
+static unsigned char u2c_95[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xED, 0xFE, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xDA, 0xA6, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xEC, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xF8, 0xCD, 0x00, 0x00, 0xCB, 0xD2, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xCE, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF9, 0xD8, 0xF9, 0xD9, 0xCA, 0xE0, /* 0x90-0x93 */
+	0xDA, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xCB, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCA, 0xC8, /* 0xA0-0xA3 */
+	0xF9, 0xEE, 0xDB, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xD0, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xD5, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE6, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xA2, /* 0xB8-0xBB */
+	0xE4, 0xD9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xE1, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xFC, 0xC4, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xF9, 0xEF, 0xCF, 0xF4, 0xF7, 0xE6, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xCE, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF4, 0xC5, 0xDC, 0xA3, 0x00, 0x00, /* 0xE0-0xE3 */
+};
+
+static unsigned char u2c_96[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xDD, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xF4, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xA1, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xD6, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xC1, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xE6, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xB9, /* 0x3C-0x3F */
+	0xF6, 0xED, 0x00, 0x00, 0xF9, 0xAE, 0x00, 0x00, /* 0x40-0x43 */
+	0xDD, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xB0, /* 0x48-0x4B */
+	0xD8, 0xE8, 0xCB, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF9, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xCE, /* 0x58-0x5B */
+	0xF9, 0xF0, 0xE0, 0xED, 0xE3, 0xB3, 0xF4, 0xB3, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xC2, 0xF2, 0xE6, /* 0x60-0x63 */
+	0xF0, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xD6, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xEB, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xE7, /* 0x70-0x73 */
+	0x00, 0x00, 0xD7, 0xD5, 0xD4, 0xB6, 0xF9, 0xE8, /* 0x74-0x77 */
+	0xD7, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE5, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE9, 0xEA, 0xD7, 0xCC, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xE9, 0xE2, 0xC9, /* 0x88-0x8B */
+	0x00, 0x00, 0xFC, 0xDB, 0xCD, 0xAD, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xCC, 0xB0, 0xEA, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xE4, 0xF6, 0xD0, 0xC0, 0x00, 0x00, 0xF0, 0xB7, /* 0x98-0x9B */
+	0xEE, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xF6, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xCA, /* 0xA4-0xA7 */
+	0xE2, 0xCB, 0x00, 0x00, 0xFA, 0xCF, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xEB, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xCB, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xB4, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xED, 0xCD, 0xE4, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xEA, 0xA9, 0xE4, 0xBA, 0xF3, 0xA2, 0xCD, 0xD2, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF6, 0xCB, 0x00, 0x00, 0xF1, 0xE6, /* 0xC8-0xCB */
+	0xED, 0xC1, 0xE8, 0xBC, 0xEE, 0xD1, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xF0, 0xE7, 0xE2, 0xCC, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE4, 0xAA, 0x00, 0x00, 0xF5, 0xE1, /* 0xD8-0xDB */
+	0xED, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xEE, 0xD1, 0xF1, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE9, 0xEB, 0xE9, 0xEC, 0xE0, 0xE4, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xA7, /* 0xEC-0xEF */
+	0xDD, 0xD4, 0x00, 0x00, 0xEA, 0xA3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xC3, 0xD6, 0xF4, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xDA, 0xDF, 0x00, 0x00, 0xEF, 0xB3, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_97[512] = {
+	0xE2, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xFD, 0xF2, 0xE8, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xEF, 0xC5, 0x00, 0x00, 0xE7, 0xE7, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xFD, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE7, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xDF, 0xDC, 0x00, 0x00, 0xF9, 0xC7, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xF6, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xDF, 0xAC, 0x00, 0x00, 0xD6, 0xDA, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xDC, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF0, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xFA, 0x00, 0x00, /* 0x40-0x43 */
+	0xE4, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xD6, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xF4, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xFE, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xF0, 0xA1, 0x00, 0x00, 0xDE, 0xAA, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xDA, 0xBC, 0xD8, 0xFC, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xFA, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xEC, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xFC, 0xA8, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xE6, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xCB, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xB9, /* 0x88-0x8B */
+	0x00, 0x00, 0xE4, 0xD3, 0x00, 0x00, 0xCD, 0xF9, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCF, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xCA, 0xEA, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xD4, /* 0xA8-0xAB */
+	0x00, 0x00, 0xF8, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xC7, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xDF, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xDB, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD4, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xE5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xD2, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xA4, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xC2, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_98[512] = {
+	0xFB, 0xE1, 0xFA, 0xED, 0xF0, 0xA2, 0xCC, 0xF1, /* 0x00-0x03 */
+	0x00, 0x00, 0xFA, 0xA3, 0xE2, 0xF7, 0x00, 0x00, /* 0x04-0x07 */
+	0xE2, 0xCE, 0x00, 0x00, 0xE9, 0xF5, 0x00, 0x00, /* 0x08-0x0B */
+	0xE1, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE7, 0xE8, 0xE8, 0xD7, 0xDA, 0xF8, 0xD4, 0xCB, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xF6, /* 0x14-0x17 */
+	0xD6, 0xC5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD4, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xFA, 0xFA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xCC, 0xF2, 0xF7, 0xDD, 0x00, 0x00, 0xDE, 0xBA, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xA8, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xF0, 0xB9, 0xE4, 0xFE, 0xE4, 0xC9, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE4, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xEA, 0xC3, 0x00, 0x00, 0xEF, 0xB4, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xBE, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xFB, 0xE2, 0x00, 0x00, 0xCD, 0xD3, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xB5, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xE9, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF9, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xBD, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF7, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF8, 0xFD, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xFC, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAB, /* 0xD8-0xDB */
+	0xDB, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xDD, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE1, 0xE2, 0xD1, 0xC6, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF6, 0xD0, 0xEB, 0xE6, 0xDA, 0xF9, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEC, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xDE, 0xF8, 0xF8, 0xE9, 0xE3, 0xDE, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_99[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xF5, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xFA, 0xC3, 0xE5, 0xD7, 0x00, 0x00, /* 0x08-0x0B */
+	0xEC, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF3, 0xC9, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xBB, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE6, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xB6, 0x00, 0x00, /* 0x1C-0x1F */
+	0xDC, 0xBF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xCE, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xD8, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xD0, 0xCF, 0x00, 0x00, 0xCF, 0xFA, /* 0x48-0x4B */
+	0xF3, 0xCA, 0xE0, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xD1, 0xC7, 0xE9, 0xAE, 0x00, 0x00, /* 0x50-0x53 */
+	0xE8, 0xBD, 0x00, 0x00, 0x00, 0x00, 0xFA, 0xC4, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xCF, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xFA, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xF9, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xDC, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xFB, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xD8, 0xA9, 0xE5, 0xDF, 0xF9, 0xA7, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF6, 0xEE, 0x00, 0x00, 0xF6, 0xCC, /* 0xB0-0xB3 */
+	0xE2, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xEC, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xDA, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xF1, 0xD2, 0xD2, 0xCC, 0xCF, 0xCB, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xCA, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xDD, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF6, 0xEF, 0x00, 0x00, 0xDE, 0xF9, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xFA, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xD5, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xE7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9A[512] = {
+	0x00, 0x00, 0xDE, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xDC, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xC8, 0xD1, 0xC9, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xF8, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xF6, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD4, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xE2, 0xE1, 0xD3, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xD8, 0xE9, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xFE, /* 0x40-0x43 */
+	0x00, 0x00, 0xCF, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xFD, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xCE, 0xF6, 0x00, 0x00, 0xFA, 0xD0, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xF3, 0xE6, 0xBE, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xAE, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xF0, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xD1, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xFC, 0xBE, 0xD5, 0xF1, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xCD, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xFA, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD0, /* 0xD0-0xD3 */
+	0xF4, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xCD, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE7, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xA5, 0x00, 0x00, /* 0xEC-0xEF */
+};
+
+static unsigned char u2c_9B[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xD1, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xA2, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xE3, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xEA, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xD0, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xCE, 0xDA, 0xFB, 0xEB, 0xDB, 0xA6, /* 0x40-0x43 */
+	0xDB, 0xDE, 0xD8, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xE0, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xD8, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xE0, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xDB, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xC6, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF8, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xD5, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xF7, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xD8, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD7, 0xEF, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xED, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xCD, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCC, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+};
+
+static unsigned char u2c_9C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xF5, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE4, 0xCA, 0x00, 0x00, 0xDC, 0xE1, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xF9, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xFC, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xE8, 0xA7, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xC4, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xBE, /* 0x44-0x47 */
+	0x00, 0x00, 0xDC, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xF7, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xF0, 0xE8, 0x00, 0x00, 0xDD, 0xC0, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xCF, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xF3, /* 0xF0-0xF3 */
+	0xD9, 0xB0, 0x00, 0x00, 0xE6, 0xE9, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_9D[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xE4, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xC4, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xEC, 0x00, 0x00, /* 0x24-0x27 */
+	0xE4, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0xF8, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xCC, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xE4, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xCD, 0xDC, 0xD9, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xDD, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xCE, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xD9, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xA3, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xF9, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xCD, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xCE, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xAF, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xFD, 0xD3, 0xEB, 0xED, 0xD6, 0xDC, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_9E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xA4, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0xB6, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD6, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF9, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE7, 0xA4, 0x00, 0x00, 0xD6, 0xE3, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xCB, 0xD6, 0xE4, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xF2, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xDE, 0xFA, 0x00, 0x00, 0xD7, 0xF8, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xD8, 0xEA, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xCF, 0xD5, 0xD8, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xAB, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xFD, 0xCB, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xDC, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xE0, 0xA8, 0xD5, 0xF3, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xFD, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xCC, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD9, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xD3, 0xEA, /* 0xD8-0xDB */
+	0xF5, 0xF5, 0x00, 0x00, 0xEF, 0xC7, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xD3, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xDA, 0xBD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+};
+
+static unsigned char u2c_9F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xA8, /* 0x04-0x07 */
+	0xDC, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xA3, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xD5, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE0, 0xA9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAC, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xBA, 0xEE, 0xB1, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xB2, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xCD, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xD2, /* 0x5C-0x5F */
+	0x00, 0x00, 0xD6, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE5, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xBB, 0x00, 0x00, /* 0x68-0x6B */
+	0xE5, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xCB, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xD7, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDB, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xCA, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xCF, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_AC[512] = {
+	0xB0, 0xA1, 0xB0, 0xA2, 0x81, 0x41, 0x81, 0x42, /* 0x00-0x03 */
+	0xB0, 0xA3, 0x81, 0x43, 0x81, 0x44, 0xB0, 0xA4, /* 0x04-0x07 */
+	0xB0, 0xA5, 0xB0, 0xA6, 0xB0, 0xA7, 0x81, 0x45, /* 0x08-0x0B */
+	0x81, 0x46, 0x81, 0x47, 0x81, 0x48, 0x81, 0x49, /* 0x0C-0x0F */
+	0xB0, 0xA8, 0xB0, 0xA9, 0xB0, 0xAA, 0xB0, 0xAB, /* 0x10-0x13 */
+	0xB0, 0xAC, 0xB0, 0xAD, 0xB0, 0xAE, 0xB0, 0xAF, /* 0x14-0x17 */
+	0x81, 0x4A, 0xB0, 0xB0, 0xB0, 0xB1, 0xB0, 0xB2, /* 0x18-0x1B */
+	0xB0, 0xB3, 0xB0, 0xB4, 0x81, 0x4B, 0x81, 0x4C, /* 0x1C-0x1F */
+	0xB0, 0xB5, 0x81, 0x4D, 0x81, 0x4E, 0x81, 0x4F, /* 0x20-0x23 */
+	0xB0, 0xB6, 0x81, 0x50, 0x81, 0x51, 0x81, 0x52, /* 0x24-0x27 */
+	0x81, 0x53, 0x81, 0x54, 0x81, 0x55, 0x81, 0x56, /* 0x28-0x2B */
+	0xB0, 0xB7, 0xB0, 0xB8, 0x81, 0x57, 0xB0, 0xB9, /* 0x2C-0x2F */
+	0xB0, 0xBA, 0xB0, 0xBB, 0x81, 0x58, 0x81, 0x59, /* 0x30-0x33 */
+	0x81, 0x5A, 0x81, 0x61, 0x81, 0x62, 0x81, 0x63, /* 0x34-0x37 */
+	0xB0, 0xBC, 0xB0, 0xBD, 0x81, 0x64, 0x81, 0x65, /* 0x38-0x3B */
+	0xB0, 0xBE, 0x81, 0x66, 0x81, 0x67, 0x81, 0x68, /* 0x3C-0x3F */
+	0xB0, 0xBF, 0x81, 0x69, 0x81, 0x6A, 0x81, 0x6B, /* 0x40-0x43 */
+	0x81, 0x6C, 0x81, 0x6D, 0x81, 0x6E, 0x81, 0x6F, /* 0x44-0x47 */
+	0x81, 0x70, 0x81, 0x71, 0x81, 0x72, 0xB0, 0xC0, /* 0x48-0x4B */
+	0x81, 0x73, 0xB0, 0xC1, 0x81, 0x74, 0x81, 0x75, /* 0x4C-0x4F */
+	0x81, 0x76, 0x81, 0x77, 0x81, 0x78, 0x81, 0x79, /* 0x50-0x53 */
+	0xB0, 0xC2, 0x81, 0x7A, 0x81, 0x81, 0x81, 0x82, /* 0x54-0x57 */
+	0xB0, 0xC3, 0x81, 0x83, 0x81, 0x84, 0x81, 0x85, /* 0x58-0x5B */
+	0xB0, 0xC4, 0x81, 0x86, 0x81, 0x87, 0x81, 0x88, /* 0x5C-0x5F */
+	0x81, 0x89, 0x81, 0x8A, 0x81, 0x8B, 0x81, 0x8C, /* 0x60-0x63 */
+	0x81, 0x8D, 0x81, 0x8E, 0x81, 0x8F, 0x81, 0x90, /* 0x64-0x67 */
+	0x81, 0x91, 0x81, 0x92, 0x81, 0x93, 0x81, 0x94, /* 0x68-0x6B */
+	0x81, 0x95, 0x81, 0x96, 0x81, 0x97, 0x81, 0x98, /* 0x6C-0x6F */
+	0xB0, 0xC5, 0xB0, 0xC6, 0x81, 0x99, 0x81, 0x9A, /* 0x70-0x73 */
+	0xB0, 0xC7, 0x81, 0x9B, 0x81, 0x9C, 0xB0, 0xC8, /* 0x74-0x77 */
+	0xB0, 0xC9, 0x81, 0x9D, 0xB0, 0xCA, 0x81, 0x9E, /* 0x78-0x7B */
+	0x81, 0x9F, 0x81, 0xA0, 0x81, 0xA1, 0x81, 0xA2, /* 0x7C-0x7F */
+	
+	0xB0, 0xCB, 0xB0, 0xCC, 0x81, 0xA3, 0xB0, 0xCD, /* 0x80-0x83 */
+	0xB0, 0xCE, 0xB0, 0xCF, 0xB0, 0xD0, 0x81, 0xA4, /* 0x84-0x87 */
+	0x81, 0xA5, 0xB0, 0xD1, 0xB0, 0xD2, 0xB0, 0xD3, /* 0x88-0x8B */
+	0xB0, 0xD4, 0x81, 0xA6, 0x81, 0xA7, 0x81, 0xA8, /* 0x8C-0x8F */
+	0xB0, 0xD5, 0x81, 0xA9, 0x81, 0xAA, 0x81, 0xAB, /* 0x90-0x93 */
+	0xB0, 0xD6, 0x81, 0xAC, 0x81, 0xAD, 0x81, 0xAE, /* 0x94-0x97 */
+	0x81, 0xAF, 0x81, 0xB0, 0x81, 0xB1, 0x81, 0xB2, /* 0x98-0x9B */
+	0xB0, 0xD7, 0xB0, 0xD8, 0x81, 0xB3, 0xB0, 0xD9, /* 0x9C-0x9F */
+	0xB0, 0xDA, 0xB0, 0xDB, 0x81, 0xB4, 0x81, 0xB5, /* 0xA0-0xA3 */
+	0x81, 0xB6, 0x81, 0xB7, 0x81, 0xB8, 0x81, 0xB9, /* 0xA4-0xA7 */
+	0xB0, 0xDC, 0xB0, 0xDD, 0xB0, 0xDE, 0x81, 0xBA, /* 0xA8-0xAB */
+	0xB0, 0xDF, 0x81, 0xBB, 0x81, 0xBC, 0xB0, 0xE0, /* 0xAC-0xAF */
+	0xB0, 0xE1, 0x81, 0xBD, 0x81, 0xBE, 0x81, 0xBF, /* 0xB0-0xB3 */
+	0x81, 0xC0, 0x81, 0xC1, 0x81, 0xC2, 0x81, 0xC3, /* 0xB4-0xB7 */
+	0xB0, 0xE2, 0xB0, 0xE3, 0x81, 0xC4, 0xB0, 0xE4, /* 0xB8-0xBB */
+	0xB0, 0xE5, 0xB0, 0xE6, 0x81, 0xC5, 0x81, 0xC6, /* 0xBC-0xBF */
+	0x81, 0xC7, 0xB0, 0xE7, 0x81, 0xC8, 0x81, 0xC9, /* 0xC0-0xC3 */
+	0xB0, 0xE8, 0x81, 0xCA, 0x81, 0xCB, 0x81, 0xCC, /* 0xC4-0xC7 */
+	0xB0, 0xE9, 0x81, 0xCD, 0x81, 0xCE, 0x81, 0xCF, /* 0xC8-0xCB */
+	0xB0, 0xEA, 0x81, 0xD0, 0x81, 0xD1, 0x81, 0xD2, /* 0xCC-0xCF */
+	0x81, 0xD3, 0x81, 0xD4, 0x81, 0xD5, 0x81, 0xD6, /* 0xD0-0xD3 */
+	0x81, 0xD7, 0xB0, 0xEB, 0x81, 0xD8, 0xB0, 0xEC, /* 0xD4-0xD7 */
+	0x81, 0xD9, 0x81, 0xDA, 0x81, 0xDB, 0x81, 0xDC, /* 0xD8-0xDB */
+	0x81, 0xDD, 0x81, 0xDE, 0x81, 0xDF, 0x81, 0xE0, /* 0xDC-0xDF */
+	0xB0, 0xED, 0xB0, 0xEE, 0x81, 0xE1, 0x81, 0xE2, /* 0xE0-0xE3 */
+	0xB0, 0xEF, 0x81, 0xE3, 0x81, 0xE4, 0xB0, 0xF0, /* 0xE4-0xE7 */
+	0xB0, 0xF1, 0x81, 0xE5, 0xB0, 0xF2, 0x81, 0xE6, /* 0xE8-0xEB */
+	0xB0, 0xF3, 0x81, 0xE7, 0x81, 0xE8, 0xB0, 0xF4, /* 0xEC-0xEF */
+	0xB0, 0xF5, 0xB0, 0xF6, 0x81, 0xE9, 0xB0, 0xF7, /* 0xF0-0xF3 */
+	0x81, 0xEA, 0xB0, 0xF8, 0xB0, 0xF9, 0x81, 0xEB, /* 0xF4-0xF7 */
+	0x81, 0xEC, 0x81, 0xED, 0x81, 0xEE, 0x81, 0xEF, /* 0xF8-0xFB */
+	0xB0, 0xFA, 0xB0, 0xFB, 0x81, 0xF0, 0x81, 0xF1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_AD[512] = {
+	0xB0, 0xFC, 0x81, 0xF2, 0x81, 0xF3, 0x81, 0xF4, /* 0x00-0x03 */
+	0xB0, 0xFD, 0x81, 0xF5, 0xB0, 0xFE, 0x81, 0xF6, /* 0x04-0x07 */
+	0x81, 0xF7, 0x81, 0xF8, 0x81, 0xF9, 0x81, 0xFA, /* 0x08-0x0B */
+	0xB1, 0xA1, 0xB1, 0xA2, 0x81, 0xFB, 0xB1, 0xA3, /* 0x0C-0x0F */
+	0x81, 0xFC, 0xB1, 0xA4, 0x81, 0xFD, 0x81, 0xFE, /* 0x10-0x13 */
+	0x82, 0x41, 0x82, 0x42, 0x82, 0x43, 0x82, 0x44, /* 0x14-0x17 */
+	0xB1, 0xA5, 0x82, 0x45, 0x82, 0x46, 0x82, 0x47, /* 0x18-0x1B */
+	0xB1, 0xA6, 0x82, 0x48, 0x82, 0x49, 0x82, 0x4A, /* 0x1C-0x1F */
+	0xB1, 0xA7, 0x82, 0x4B, 0x82, 0x4C, 0x82, 0x4D, /* 0x20-0x23 */
+	0x82, 0x4E, 0x82, 0x4F, 0x82, 0x50, 0x82, 0x51, /* 0x24-0x27 */
+	0x82, 0x52, 0xB1, 0xA8, 0x82, 0x53, 0x82, 0x54, /* 0x28-0x2B */
+	0xB1, 0xA9, 0xB1, 0xAA, 0x82, 0x55, 0x82, 0x56, /* 0x2C-0x2F */
+	0x82, 0x57, 0x82, 0x58, 0x82, 0x59, 0x82, 0x5A, /* 0x30-0x33 */
+	0xB1, 0xAB, 0xB1, 0xAC, 0x82, 0x61, 0x82, 0x62, /* 0x34-0x37 */
+	0xB1, 0xAD, 0x82, 0x63, 0x82, 0x64, 0x82, 0x65, /* 0x38-0x3B */
+	0xB1, 0xAE, 0x82, 0x66, 0x82, 0x67, 0x82, 0x68, /* 0x3C-0x3F */
+	0x82, 0x69, 0x82, 0x6A, 0x82, 0x6B, 0x82, 0x6C, /* 0x40-0x43 */
+	0xB1, 0xAF, 0xB1, 0xB0, 0x82, 0x6D, 0xB1, 0xB1, /* 0x44-0x47 */
+	0x82, 0x6E, 0xB1, 0xB2, 0x82, 0x6F, 0x82, 0x70, /* 0x48-0x4B */
+	0x82, 0x71, 0x82, 0x72, 0x82, 0x73, 0x82, 0x74, /* 0x4C-0x4F */
+	0xB1, 0xB3, 0x82, 0x75, 0x82, 0x76, 0x82, 0x77, /* 0x50-0x53 */
+	0xB1, 0xB4, 0x82, 0x78, 0x82, 0x79, 0x82, 0x7A, /* 0x54-0x57 */
+	0xB1, 0xB5, 0x82, 0x81, 0x82, 0x82, 0x82, 0x83, /* 0x58-0x5B */
+	0x82, 0x84, 0x82, 0x85, 0x82, 0x86, 0x82, 0x87, /* 0x5C-0x5F */
+	0x82, 0x88, 0xB1, 0xB6, 0x82, 0x89, 0xB1, 0xB7, /* 0x60-0x63 */
+	0x82, 0x8A, 0x82, 0x8B, 0x82, 0x8C, 0x82, 0x8D, /* 0x64-0x67 */
+	0x82, 0x8E, 0x82, 0x8F, 0x82, 0x90, 0x82, 0x91, /* 0x68-0x6B */
+	0xB1, 0xB8, 0xB1, 0xB9, 0x82, 0x92, 0x82, 0x93, /* 0x6C-0x6F */
+	0xB1, 0xBA, 0x82, 0x94, 0x82, 0x95, 0xB1, 0xBB, /* 0x70-0x73 */
+	0xB1, 0xBC, 0xB1, 0xBD, 0xB1, 0xBE, 0x82, 0x96, /* 0x74-0x77 */
+	0x82, 0x97, 0x82, 0x98, 0x82, 0x99, 0xB1, 0xBF, /* 0x78-0x7B */
+	0xB1, 0xC0, 0xB1, 0xC1, 0x82, 0x9A, 0xB1, 0xC2, /* 0x7C-0x7F */
+	
+	0x82, 0x9B, 0xB1, 0xC3, 0xB1, 0xC4, 0x82, 0x9C, /* 0x80-0x83 */
+	0x82, 0x9D, 0x82, 0x9E, 0x82, 0x9F, 0x82, 0xA0, /* 0x84-0x87 */
+	0xB1, 0xC5, 0xB1, 0xC6, 0x82, 0xA1, 0x82, 0xA2, /* 0x88-0x8B */
+	0xB1, 0xC7, 0x82, 0xA3, 0x82, 0xA4, 0x82, 0xA5, /* 0x8C-0x8F */
+	0xB1, 0xC8, 0x82, 0xA6, 0x82, 0xA7, 0x82, 0xA8, /* 0x90-0x93 */
+	0x82, 0xA9, 0x82, 0xAA, 0x82, 0xAB, 0x82, 0xAC, /* 0x94-0x97 */
+	0x82, 0xAD, 0x82, 0xAE, 0x82, 0xAF, 0x82, 0xB0, /* 0x98-0x9B */
+	0xB1, 0xC9, 0xB1, 0xCA, 0x82, 0xB1, 0x82, 0xB2, /* 0x9C-0x9F */
+	0x82, 0xB3, 0x82, 0xB4, 0x82, 0xB5, 0x82, 0xB6, /* 0xA0-0xA3 */
+	0xB1, 0xCB, 0x82, 0xB7, 0x82, 0xB8, 0x82, 0xB9, /* 0xA4-0xA7 */
+	0x82, 0xBA, 0x82, 0xBB, 0x82, 0xBC, 0x82, 0xBD, /* 0xA8-0xAB */
+	0x82, 0xBE, 0x82, 0xBF, 0x82, 0xC0, 0x82, 0xC1, /* 0xAC-0xAF */
+	0x82, 0xC2, 0x82, 0xC3, 0x82, 0xC4, 0x82, 0xC5, /* 0xB0-0xB3 */
+	0x82, 0xC6, 0x82, 0xC7, 0x82, 0xC8, 0xB1, 0xCC, /* 0xB4-0xB7 */
+	0x82, 0xC9, 0x82, 0xCA, 0x82, 0xCB, 0x82, 0xCC, /* 0xB8-0xBB */
+	0x82, 0xCD, 0x82, 0xCE, 0x82, 0xCF, 0x82, 0xD0, /* 0xBC-0xBF */
+	0xB1, 0xCD, 0xB1, 0xCE, 0x82, 0xD1, 0x82, 0xD2, /* 0xC0-0xC3 */
+	0xB1, 0xCF, 0x82, 0xD3, 0x82, 0xD4, 0x82, 0xD5, /* 0xC4-0xC7 */
+	0xB1, 0xD0, 0x82, 0xD6, 0x82, 0xD7, 0x82, 0xD8, /* 0xC8-0xCB */
+	0x82, 0xD9, 0x82, 0xDA, 0x82, 0xDB, 0x82, 0xDC, /* 0xCC-0xCF */
+	0xB1, 0xD1, 0xB1, 0xD2, 0x82, 0xDD, 0xB1, 0xD3, /* 0xD0-0xD3 */
+	0x82, 0xDE, 0x82, 0xDF, 0x82, 0xE0, 0x82, 0xE1, /* 0xD4-0xD7 */
+	0x82, 0xE2, 0x82, 0xE3, 0x82, 0xE4, 0x82, 0xE5, /* 0xD8-0xDB */
+	0xB1, 0xD4, 0x82, 0xE6, 0x82, 0xE7, 0x82, 0xE8, /* 0xDC-0xDF */
+	0xB1, 0xD5, 0x82, 0xE9, 0x82, 0xEA, 0x82, 0xEB, /* 0xE0-0xE3 */
+	0xB1, 0xD6, 0x82, 0xEC, 0x82, 0xED, 0x82, 0xEE, /* 0xE4-0xE7 */
+	0x82, 0xEF, 0x82, 0xF0, 0x82, 0xF1, 0x82, 0xF2, /* 0xE8-0xEB */
+	0x82, 0xF3, 0x82, 0xF4, 0x82, 0xF5, 0x82, 0xF6, /* 0xEC-0xEF */
+	0x82, 0xF7, 0x82, 0xF8, 0x82, 0xF9, 0x82, 0xFA, /* 0xF0-0xF3 */
+	0x82, 0xFB, 0x82, 0xFC, 0x82, 0xFD, 0x82, 0xFE, /* 0xF4-0xF7 */
+	0xB1, 0xD7, 0xB1, 0xD8, 0x83, 0x41, 0x83, 0x42, /* 0xF8-0xFB */
+	0xB1, 0xD9, 0x83, 0x43, 0x83, 0x44, 0xB1, 0xDA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_AE[512] = {
+	0xB1, 0xDB, 0xB1, 0xDC, 0x83, 0x45, 0x83, 0x46, /* 0x00-0x03 */
+	0x83, 0x47, 0x83, 0x48, 0x83, 0x49, 0x83, 0x4A, /* 0x04-0x07 */
+	0xB1, 0xDD, 0xB1, 0xDE, 0x83, 0x4B, 0xB1, 0xDF, /* 0x08-0x0B */
+	0x83, 0x4C, 0xB1, 0xE0, 0x83, 0x4D, 0x83, 0x4E, /* 0x0C-0x0F */
+	0x83, 0x4F, 0x83, 0x50, 0x83, 0x51, 0x83, 0x52, /* 0x10-0x13 */
+	0xB1, 0xE1, 0x83, 0x53, 0x83, 0x54, 0x83, 0x55, /* 0x14-0x17 */
+	0x83, 0x56, 0x83, 0x57, 0x83, 0x58, 0x83, 0x59, /* 0x18-0x1B */
+	0x83, 0x5A, 0x83, 0x61, 0x83, 0x62, 0x83, 0x63, /* 0x1C-0x1F */
+	0x83, 0x64, 0x83, 0x65, 0x83, 0x66, 0x83, 0x67, /* 0x20-0x23 */
+	0x83, 0x68, 0x83, 0x69, 0x83, 0x6A, 0x83, 0x6B, /* 0x24-0x27 */
+	0x83, 0x6C, 0x83, 0x6D, 0x83, 0x6E, 0x83, 0x6F, /* 0x28-0x2B */
+	0x83, 0x70, 0x83, 0x71, 0x83, 0x72, 0x83, 0x73, /* 0x2C-0x2F */
+	0xB1, 0xE2, 0xB1, 0xE3, 0x83, 0x74, 0x83, 0x75, /* 0x30-0x33 */
+	0xB1, 0xE4, 0x83, 0x76, 0x83, 0x77, 0xB1, 0xE5, /* 0x34-0x37 */
+	0xB1, 0xE6, 0x83, 0x78, 0xB1, 0xE7, 0x83, 0x79, /* 0x38-0x3B */
+	0x83, 0x7A, 0x83, 0x81, 0x83, 0x82, 0x83, 0x83, /* 0x3C-0x3F */
+	0xB1, 0xE8, 0xB1, 0xE9, 0x83, 0x84, 0xB1, 0xEA, /* 0x40-0x43 */
+	0x83, 0x85, 0xB1, 0xEB, 0xB1, 0xEC, 0x83, 0x86, /* 0x44-0x47 */
+	0x83, 0x87, 0x83, 0x88, 0xB1, 0xED, 0x83, 0x89, /* 0x48-0x4B */
+	0xB1, 0xEE, 0xB1, 0xEF, 0xB1, 0xF0, 0x83, 0x8A, /* 0x4C-0x4F */
+	0xB1, 0xF1, 0x83, 0x8B, 0x83, 0x8C, 0x83, 0x8D, /* 0x50-0x53 */
+	0xB1, 0xF2, 0x83, 0x8E, 0xB1, 0xF3, 0x83, 0x8F, /* 0x54-0x57 */
+	0x83, 0x90, 0x83, 0x91, 0x83, 0x92, 0x83, 0x93, /* 0x58-0x5B */
+	0xB1, 0xF4, 0xB1, 0xF5, 0x83, 0x94, 0xB1, 0xF6, /* 0x5C-0x5F */
+	0xB1, 0xF7, 0xB1, 0xF8, 0x83, 0x95, 0x83, 0x96, /* 0x60-0x63 */
+	0x83, 0x97, 0xB1, 0xF9, 0x83, 0x98, 0x83, 0x99, /* 0x64-0x67 */
+	0xB1, 0xFA, 0xB1, 0xFB, 0x83, 0x9A, 0x83, 0x9B, /* 0x68-0x6B */
+	0xB1, 0xFC, 0x83, 0x9C, 0x83, 0x9D, 0x83, 0x9E, /* 0x6C-0x6F */
+	0xB1, 0xFD, 0x83, 0x9F, 0x83, 0xA0, 0x83, 0xA1, /* 0x70-0x73 */
+	0x83, 0xA2, 0x83, 0xA3, 0x83, 0xA4, 0x83, 0xA5, /* 0x74-0x77 */
+	0xB1, 0xFE, 0xB2, 0xA1, 0x83, 0xA6, 0xB2, 0xA2, /* 0x78-0x7B */
+	0xB2, 0xA3, 0xB2, 0xA4, 0x83, 0xA7, 0x83, 0xA8, /* 0x7C-0x7F */
+	
+	0x83, 0xA9, 0x83, 0xAA, 0x83, 0xAB, 0x83, 0xAC, /* 0x80-0x83 */
+	0xB2, 0xA5, 0xB2, 0xA6, 0x83, 0xAD, 0x83, 0xAE, /* 0x84-0x87 */
+	0x83, 0xAF, 0x83, 0xB0, 0x83, 0xB1, 0x83, 0xB2, /* 0x88-0x8B */
+	0xB2, 0xA7, 0x83, 0xB3, 0x83, 0xB4, 0x83, 0xB5, /* 0x8C-0x8F */
+	0x83, 0xB6, 0x83, 0xB7, 0x83, 0xB8, 0x83, 0xB9, /* 0x90-0x93 */
+	0x83, 0xBA, 0x83, 0xBB, 0x83, 0xBC, 0x83, 0xBD, /* 0x94-0x97 */
+	0x83, 0xBE, 0x83, 0xBF, 0x83, 0xC0, 0x83, 0xC1, /* 0x98-0x9B */
+	0x83, 0xC2, 0x83, 0xC3, 0x83, 0xC4, 0x83, 0xC5, /* 0x9C-0x9F */
+	0x83, 0xC6, 0x83, 0xC7, 0x83, 0xC8, 0x83, 0xC9, /* 0xA0-0xA3 */
+	0x83, 0xCA, 0x83, 0xCB, 0x83, 0xCC, 0x83, 0xCD, /* 0xA4-0xA7 */
+	0x83, 0xCE, 0x83, 0xCF, 0x83, 0xD0, 0x83, 0xD1, /* 0xA8-0xAB */
+	0x83, 0xD2, 0x83, 0xD3, 0x83, 0xD4, 0x83, 0xD5, /* 0xAC-0xAF */
+	0x83, 0xD6, 0x83, 0xD7, 0x83, 0xD8, 0x83, 0xD9, /* 0xB0-0xB3 */
+	0x83, 0xDA, 0x83, 0xDB, 0x83, 0xDC, 0x83, 0xDD, /* 0xB4-0xB7 */
+	0x83, 0xDE, 0x83, 0xDF, 0x83, 0xE0, 0x83, 0xE1, /* 0xB8-0xBB */
+	0xB2, 0xA8, 0xB2, 0xA9, 0xB2, 0xAA, 0x83, 0xE2, /* 0xBC-0xBF */
+	0xB2, 0xAB, 0x83, 0xE3, 0x83, 0xE4, 0x83, 0xE5, /* 0xC0-0xC3 */
+	0xB2, 0xAC, 0x83, 0xE6, 0x83, 0xE7, 0x83, 0xE8, /* 0xC4-0xC7 */
+	0x83, 0xE9, 0x83, 0xEA, 0x83, 0xEB, 0x83, 0xEC, /* 0xC8-0xCB */
+	0xB2, 0xAD, 0xB2, 0xAE, 0x83, 0xED, 0xB2, 0xAF, /* 0xCC-0xCF */
+	0xB2, 0xB0, 0xB2, 0xB1, 0x83, 0xEE, 0x83, 0xEF, /* 0xD0-0xD3 */
+	0x83, 0xF0, 0x83, 0xF1, 0x83, 0xF2, 0x83, 0xF3, /* 0xD4-0xD7 */
+	0xB2, 0xB2, 0xB2, 0xB3, 0x83, 0xF4, 0x83, 0xF5, /* 0xD8-0xDB */
+	0xB2, 0xB4, 0x83, 0xF6, 0x83, 0xF7, 0x83, 0xF8, /* 0xDC-0xDF */
+	0x83, 0xF9, 0x83, 0xFA, 0x83, 0xFB, 0x83, 0xFC, /* 0xE0-0xE3 */
+	0x83, 0xFD, 0x83, 0xFE, 0x84, 0x41, 0x84, 0x42, /* 0xE4-0xE7 */
+	0xB2, 0xB5, 0x84, 0x43, 0x84, 0x44, 0xB2, 0xB6, /* 0xE8-0xEB */
+	0x84, 0x45, 0xB2, 0xB7, 0x84, 0x46, 0x84, 0x47, /* 0xEC-0xEF */
+	0x84, 0x48, 0x84, 0x49, 0x84, 0x4A, 0x84, 0x4B, /* 0xF0-0xF3 */
+	0xB2, 0xB8, 0x84, 0x4C, 0x84, 0x4D, 0x84, 0x4E, /* 0xF4-0xF7 */
+	0xB2, 0xB9, 0x84, 0x4F, 0x84, 0x50, 0x84, 0x51, /* 0xF8-0xFB */
+	0xB2, 0xBA, 0x84, 0x52, 0x84, 0x53, 0x84, 0x54, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_AF[512] = {
+	0x84, 0x55, 0x84, 0x56, 0x84, 0x57, 0x84, 0x58, /* 0x00-0x03 */
+	0x84, 0x59, 0x84, 0x5A, 0x84, 0x61, 0xB2, 0xBB, /* 0x04-0x07 */
+	0xB2, 0xBC, 0x84, 0x62, 0x84, 0x63, 0x84, 0x64, /* 0x08-0x0B */
+	0x84, 0x65, 0xB2, 0xBD, 0x84, 0x66, 0x84, 0x67, /* 0x0C-0x0F */
+	0xB2, 0xBE, 0x84, 0x68, 0x84, 0x69, 0x84, 0x6A, /* 0x10-0x13 */
+	0x84, 0x6B, 0x84, 0x6C, 0x84, 0x6D, 0x84, 0x6E, /* 0x14-0x17 */
+	0x84, 0x6F, 0x84, 0x70, 0x84, 0x71, 0x84, 0x72, /* 0x18-0x1B */
+	0x84, 0x73, 0x84, 0x74, 0x84, 0x75, 0x84, 0x76, /* 0x1C-0x1F */
+	0x84, 0x77, 0x84, 0x78, 0x84, 0x79, 0x84, 0x7A, /* 0x20-0x23 */
+	0x84, 0x81, 0x84, 0x82, 0x84, 0x83, 0x84, 0x84, /* 0x24-0x27 */
+	0x84, 0x85, 0x84, 0x86, 0x84, 0x87, 0x84, 0x88, /* 0x28-0x2B */
+	0xB2, 0xBF, 0xB2, 0xC0, 0x84, 0x89, 0x84, 0x8A, /* 0x2C-0x2F */
+	0xB2, 0xC1, 0x84, 0x8B, 0xB2, 0xC2, 0x84, 0x8C, /* 0x30-0x33 */
+	0xB2, 0xC3, 0x84, 0x8D, 0x84, 0x8E, 0x84, 0x8F, /* 0x34-0x37 */
+	0x84, 0x90, 0x84, 0x91, 0x84, 0x92, 0x84, 0x93, /* 0x38-0x3B */
+	0xB2, 0xC4, 0xB2, 0xC5, 0x84, 0x94, 0xB2, 0xC6, /* 0x3C-0x3F */
+	0x84, 0x95, 0xB2, 0xC7, 0xB2, 0xC8, 0xB2, 0xC9, /* 0x40-0x43 */
+	0x84, 0x96, 0x84, 0x97, 0x84, 0x98, 0x84, 0x99, /* 0x44-0x47 */
+	0xB2, 0xCA, 0xB2, 0xCB, 0x84, 0x9A, 0x84, 0x9B, /* 0x48-0x4B */
+	0x84, 0x9C, 0x84, 0x9D, 0x84, 0x9E, 0x84, 0x9F, /* 0x4C-0x4F */
+	0xB2, 0xCC, 0x84, 0xA0, 0x84, 0xA1, 0x84, 0xA2, /* 0x50-0x53 */
+	0x84, 0xA3, 0x84, 0xA4, 0x84, 0xA5, 0x84, 0xA6, /* 0x54-0x57 */
+	0x84, 0xA7, 0x84, 0xA8, 0x84, 0xA9, 0x84, 0xAA, /* 0x58-0x5B */
+	0xB2, 0xCD, 0xB2, 0xCE, 0x84, 0xAB, 0x84, 0xAC, /* 0x5C-0x5F */
+	0x84, 0xAD, 0x84, 0xAE, 0x84, 0xAF, 0x84, 0xB0, /* 0x60-0x63 */
+	0xB2, 0xCF, 0xB2, 0xD0, 0x84, 0xB1, 0x84, 0xB2, /* 0x64-0x67 */
+	0x84, 0xB3, 0x84, 0xB4, 0x84, 0xB5, 0x84, 0xB6, /* 0x68-0x6B */
+	0x84, 0xB7, 0x84, 0xB8, 0x84, 0xB9, 0x84, 0xBA, /* 0x6C-0x6F */
+	0x84, 0xBB, 0x84, 0xBC, 0x84, 0xBD, 0x84, 0xBE, /* 0x70-0x73 */
+	0x84, 0xBF, 0x84, 0xC0, 0x84, 0xC1, 0x84, 0xC2, /* 0x74-0x77 */
+	0x84, 0xC3, 0xB2, 0xD1, 0x84, 0xC4, 0x84, 0xC5, /* 0x78-0x7B */
+	0x84, 0xC6, 0x84, 0xC7, 0x84, 0xC8, 0x84, 0xC9, /* 0x7C-0x7F */
+	
+	0xB2, 0xD2, 0x84, 0xCA, 0x84, 0xCB, 0x84, 0xCC, /* 0x80-0x83 */
+	0xB2, 0xD3, 0x84, 0xCD, 0x84, 0xCE, 0x84, 0xCF, /* 0x84-0x87 */
+	0xB2, 0xD4, 0x84, 0xD0, 0x84, 0xD1, 0x84, 0xD2, /* 0x88-0x8B */
+	0x84, 0xD3, 0x84, 0xD4, 0x84, 0xD5, 0x84, 0xD6, /* 0x8C-0x8F */
+	0xB2, 0xD5, 0xB2, 0xD6, 0x84, 0xD7, 0x84, 0xD8, /* 0x90-0x93 */
+	0x84, 0xD9, 0xB2, 0xD7, 0x84, 0xDA, 0x84, 0xDB, /* 0x94-0x97 */
+	0x84, 0xDC, 0x84, 0xDD, 0x84, 0xDE, 0x84, 0xDF, /* 0x98-0x9B */
+	0xB2, 0xD8, 0x84, 0xE0, 0x84, 0xE1, 0x84, 0xE2, /* 0x9C-0x9F */
+	0x84, 0xE3, 0x84, 0xE4, 0x84, 0xE5, 0x84, 0xE6, /* 0xA0-0xA3 */
+	0x84, 0xE7, 0x84, 0xE8, 0x84, 0xE9, 0x84, 0xEA, /* 0xA4-0xA7 */
+	0x84, 0xEB, 0x84, 0xEC, 0x84, 0xED, 0x84, 0xEE, /* 0xA8-0xAB */
+	0x84, 0xEF, 0x84, 0xF0, 0x84, 0xF1, 0x84, 0xF2, /* 0xAC-0xAF */
+	0x84, 0xF3, 0x84, 0xF4, 0x84, 0xF5, 0x84, 0xF6, /* 0xB0-0xB3 */
+	0x84, 0xF7, 0x84, 0xF8, 0x84, 0xF9, 0x84, 0xFA, /* 0xB4-0xB7 */
+	0xB2, 0xD9, 0xB2, 0xDA, 0x84, 0xFB, 0x84, 0xFC, /* 0xB8-0xBB */
+	0xB2, 0xDB, 0x84, 0xFD, 0x84, 0xFE, 0x85, 0x41, /* 0xBC-0xBF */
+	0xB2, 0xDC, 0x85, 0x42, 0x85, 0x43, 0x85, 0x44, /* 0xC0-0xC3 */
+	0x85, 0x45, 0x85, 0x46, 0x85, 0x47, 0xB2, 0xDD, /* 0xC4-0xC7 */
+	0xB2, 0xDE, 0xB2, 0xDF, 0x85, 0x48, 0xB2, 0xE0, /* 0xC8-0xCB */
+	0x85, 0x49, 0xB2, 0xE1, 0xB2, 0xE2, 0x85, 0x4A, /* 0xCC-0xCF */
+	0x85, 0x4B, 0x85, 0x4C, 0x85, 0x4D, 0x85, 0x4E, /* 0xD0-0xD3 */
+	0xB2, 0xE3, 0x85, 0x4F, 0x85, 0x50, 0x85, 0x51, /* 0xD4-0xD7 */
+	0x85, 0x52, 0x85, 0x53, 0x85, 0x54, 0x85, 0x55, /* 0xD8-0xDB */
+	0xB2, 0xE4, 0x85, 0x56, 0x85, 0x57, 0x85, 0x58, /* 0xDC-0xDF */
+	0x85, 0x59, 0x85, 0x5A, 0x85, 0x61, 0x85, 0x62, /* 0xE0-0xE3 */
+	0x85, 0x63, 0x85, 0x64, 0x85, 0x65, 0x85, 0x66, /* 0xE4-0xE7 */
+	0xB2, 0xE5, 0xB2, 0xE6, 0x85, 0x67, 0x85, 0x68, /* 0xE8-0xEB */
+	0x85, 0x69, 0x85, 0x6A, 0x85, 0x6B, 0x85, 0x6C, /* 0xEC-0xEF */
+	0xB2, 0xE7, 0xB2, 0xE8, 0x85, 0x6D, 0x85, 0x6E, /* 0xF0-0xF3 */
+	0xB2, 0xE9, 0x85, 0x6F, 0x85, 0x70, 0x85, 0x71, /* 0xF4-0xF7 */
+	0xB2, 0xEA, 0x85, 0x72, 0x85, 0x73, 0x85, 0x74, /* 0xF8-0xFB */
+	0x85, 0x75, 0x85, 0x76, 0x85, 0x77, 0x85, 0x78, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B0[512] = {
+	0xB2, 0xEB, 0xB2, 0xEC, 0x85, 0x79, 0x85, 0x7A, /* 0x00-0x03 */
+	0xB2, 0xED, 0x85, 0x81, 0x85, 0x82, 0x85, 0x83, /* 0x04-0x07 */
+	0x85, 0x84, 0x85, 0x85, 0x85, 0x86, 0x85, 0x87, /* 0x08-0x0B */
+	0xB2, 0xEE, 0x85, 0x88, 0x85, 0x89, 0x85, 0x8A, /* 0x0C-0x0F */
+	0xB2, 0xEF, 0x85, 0x8B, 0x85, 0x8C, 0x85, 0x8D, /* 0x10-0x13 */
+	0xB2, 0xF0, 0x85, 0x8E, 0x85, 0x8F, 0x85, 0x90, /* 0x14-0x17 */
+	0x85, 0x91, 0x85, 0x92, 0x85, 0x93, 0x85, 0x94, /* 0x18-0x1B */
+	0xB2, 0xF1, 0xB2, 0xF2, 0x85, 0x95, 0x85, 0x96, /* 0x1C-0x1F */
+	0x85, 0x97, 0x85, 0x98, 0x85, 0x99, 0x85, 0x9A, /* 0x20-0x23 */
+	0x85, 0x9B, 0x85, 0x9C, 0x85, 0x9D, 0x85, 0x9E, /* 0x24-0x27 */
+	0xB2, 0xF3, 0x85, 0x9F, 0x85, 0xA0, 0x85, 0xA1, /* 0x28-0x2B */
+	0x85, 0xA2, 0x85, 0xA3, 0x85, 0xA4, 0x85, 0xA5, /* 0x2C-0x2F */
+	0x85, 0xA6, 0x85, 0xA7, 0x85, 0xA8, 0x85, 0xA9, /* 0x30-0x33 */
+	0x85, 0xAA, 0x85, 0xAB, 0x85, 0xAC, 0x85, 0xAD, /* 0x34-0x37 */
+	0x85, 0xAE, 0x85, 0xAF, 0x85, 0xB0, 0x85, 0xB1, /* 0x38-0x3B */
+	0x85, 0xB2, 0x85, 0xB3, 0x85, 0xB4, 0x85, 0xB5, /* 0x3C-0x3F */
+	0x85, 0xB6, 0x85, 0xB7, 0x85, 0xB8, 0x85, 0xB9, /* 0x40-0x43 */
+	0xB2, 0xF4, 0xB2, 0xF5, 0x85, 0xBA, 0x85, 0xBB, /* 0x44-0x47 */
+	0xB2, 0xF6, 0x85, 0xBC, 0xB2, 0xF7, 0x85, 0xBD, /* 0x48-0x4B */
+	0xB2, 0xF8, 0x85, 0xBE, 0xB2, 0xF9, 0x85, 0xBF, /* 0x4C-0x4F */
+	0x85, 0xC0, 0x85, 0xC1, 0x85, 0xC2, 0xB2, 0xFA, /* 0x50-0x53 */
+	0xB2, 0xFB, 0xB2, 0xFC, 0x85, 0xC3, 0xB2, 0xFD, /* 0x54-0x57 */
+	0x85, 0xC4, 0xB2, 0xFE, 0x85, 0xC5, 0x85, 0xC6, /* 0x58-0x5B */
+	0x85, 0xC7, 0xB3, 0xA1, 0x85, 0xC8, 0x85, 0xC9, /* 0x5C-0x5F */
+	0x85, 0xCA, 0x85, 0xCB, 0x85, 0xCC, 0x85, 0xCD, /* 0x60-0x63 */
+	0x85, 0xCE, 0x85, 0xCF, 0x85, 0xD0, 0x85, 0xD1, /* 0x64-0x67 */
+	0x85, 0xD2, 0x85, 0xD3, 0x85, 0xD4, 0x85, 0xD5, /* 0x68-0x6B */
+	0x85, 0xD6, 0x85, 0xD7, 0x85, 0xD8, 0x85, 0xD9, /* 0x6C-0x6F */
+	0x85, 0xDA, 0x85, 0xDB, 0x85, 0xDC, 0x85, 0xDD, /* 0x70-0x73 */
+	0x85, 0xDE, 0x85, 0xDF, 0x85, 0xE0, 0x85, 0xE1, /* 0x74-0x77 */
+	0x85, 0xE2, 0x85, 0xE3, 0x85, 0xE4, 0x85, 0xE5, /* 0x78-0x7B */
+	0xB3, 0xA2, 0xB3, 0xA3, 0x85, 0xE6, 0x85, 0xE7, /* 0x7C-0x7F */
+	
+	0xB3, 0xA4, 0x85, 0xE8, 0x85, 0xE9, 0x85, 0xEA, /* 0x80-0x83 */
+	0xB3, 0xA5, 0x85, 0xEB, 0x85, 0xEC, 0x85, 0xED, /* 0x84-0x87 */
+	0x85, 0xEE, 0x85, 0xEF, 0x85, 0xF0, 0x85, 0xF1, /* 0x88-0x8B */
+	0xB3, 0xA6, 0xB3, 0xA7, 0x85, 0xF2, 0xB3, 0xA8, /* 0x8C-0x8F */
+	0x85, 0xF3, 0xB3, 0xA9, 0x85, 0xF4, 0x85, 0xF5, /* 0x90-0x93 */
+	0x85, 0xF6, 0x85, 0xF7, 0x85, 0xF8, 0x85, 0xF9, /* 0x94-0x97 */
+	0xB3, 0xAA, 0xB3, 0xAB, 0xB3, 0xAC, 0x85, 0xFA, /* 0x98-0x9B */
+	0xB3, 0xAD, 0x85, 0xFB, 0x85, 0xFC, 0xB3, 0xAE, /* 0x9C-0x9F */
+	0xB3, 0xAF, 0xB3, 0xB0, 0xB3, 0xB1, 0x85, 0xFD, /* 0xA0-0xA3 */
+	0x85, 0xFE, 0x86, 0x41, 0x86, 0x42, 0x86, 0x43, /* 0xA4-0xA7 */
+	0xB3, 0xB2, 0xB3, 0xB3, 0x86, 0x44, 0xB3, 0xB4, /* 0xA8-0xAB */
+	0xB3, 0xB5, 0xB3, 0xB6, 0xB3, 0xB7, 0xB3, 0xB8, /* 0xAC-0xAF */
+	0x86, 0x45, 0xB3, 0xB9, 0x86, 0x46, 0xB3, 0xBA, /* 0xB0-0xB3 */
+	0xB3, 0xBB, 0xB3, 0xBC, 0x86, 0x47, 0x86, 0x48, /* 0xB4-0xB7 */
+	0xB3, 0xBD, 0x86, 0x49, 0x86, 0x4A, 0x86, 0x4B, /* 0xB8-0xBB */
+	0xB3, 0xBE, 0x86, 0x4C, 0x86, 0x4D, 0x86, 0x4E, /* 0xBC-0xBF */
+	0x86, 0x4F, 0x86, 0x50, 0x86, 0x51, 0x86, 0x52, /* 0xC0-0xC3 */
+	0xB3, 0xBF, 0xB3, 0xC0, 0x86, 0x53, 0xB3, 0xC1, /* 0xC4-0xC7 */
+	0xB3, 0xC2, 0xB3, 0xC3, 0x86, 0x54, 0x86, 0x55, /* 0xC8-0xCB */
+	0x86, 0x56, 0x86, 0x57, 0x86, 0x58, 0x86, 0x59, /* 0xCC-0xCF */
+	0xB3, 0xC4, 0xB3, 0xC5, 0x86, 0x5A, 0x86, 0x61, /* 0xD0-0xD3 */
+	0xB3, 0xC6, 0x86, 0x62, 0x86, 0x63, 0x86, 0x64, /* 0xD4-0xD7 */
+	0xB3, 0xC7, 0x86, 0x65, 0x86, 0x66, 0x86, 0x67, /* 0xD8-0xDB */
+	0x86, 0x68, 0x86, 0x69, 0x86, 0x6A, 0x86, 0x6B, /* 0xDC-0xDF */
+	0xB3, 0xC8, 0x86, 0x6C, 0x86, 0x6D, 0x86, 0x6E, /* 0xE0-0xE3 */
+	0x86, 0x6F, 0xB3, 0xC9, 0x86, 0x70, 0x86, 0x71, /* 0xE4-0xE7 */
+	0x86, 0x72, 0x86, 0x73, 0x86, 0x74, 0x86, 0x75, /* 0xE8-0xEB */
+	0x86, 0x76, 0x86, 0x77, 0x86, 0x78, 0x86, 0x79, /* 0xEC-0xEF */
+	0x86, 0x7A, 0x86, 0x81, 0x86, 0x82, 0x86, 0x83, /* 0xF0-0xF3 */
+	0x86, 0x84, 0x86, 0x85, 0x86, 0x86, 0x86, 0x87, /* 0xF4-0xF7 */
+	0x86, 0x88, 0x86, 0x89, 0x86, 0x8A, 0x86, 0x8B, /* 0xF8-0xFB */
+	0x86, 0x8C, 0x86, 0x8D, 0x86, 0x8E, 0x86, 0x8F, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B1[512] = {
+	0x86, 0x90, 0x86, 0x91, 0x86, 0x92, 0x86, 0x93, /* 0x00-0x03 */
+	0x86, 0x94, 0x86, 0x95, 0x86, 0x96, 0x86, 0x97, /* 0x04-0x07 */
+	0xB3, 0xCA, 0xB3, 0xCB, 0x86, 0x98, 0xB3, 0xCC, /* 0x08-0x0B */
+	0xB3, 0xCD, 0x86, 0x99, 0x86, 0x9A, 0x86, 0x9B, /* 0x0C-0x0F */
+	0xB3, 0xCE, 0x86, 0x9C, 0xB3, 0xCF, 0xB3, 0xD0, /* 0x10-0x13 */
+	0x86, 0x9D, 0x86, 0x9E, 0x86, 0x9F, 0x86, 0xA0, /* 0x14-0x17 */
+	0xB3, 0xD1, 0xB3, 0xD2, 0x86, 0xA1, 0xB3, 0xD3, /* 0x18-0x1B */
+	0xB3, 0xD4, 0xB3, 0xD5, 0x86, 0xA2, 0x86, 0xA3, /* 0x1C-0x1F */
+	0x86, 0xA4, 0x86, 0xA5, 0x86, 0xA6, 0xB3, 0xD6, /* 0x20-0x23 */
+	0xB3, 0xD7, 0xB3, 0xD8, 0x86, 0xA7, 0x86, 0xA8, /* 0x24-0x27 */
+	0xB3, 0xD9, 0x86, 0xA9, 0x86, 0xAA, 0x86, 0xAB, /* 0x28-0x2B */
+	0xB3, 0xDA, 0x86, 0xAC, 0x86, 0xAD, 0x86, 0xAE, /* 0x2C-0x2F */
+	0x86, 0xAF, 0x86, 0xB0, 0x86, 0xB1, 0x86, 0xB2, /* 0x30-0x33 */
+	0xB3, 0xDB, 0xB3, 0xDC, 0x86, 0xB3, 0xB3, 0xDD, /* 0x34-0x37 */
+	0xB3, 0xDE, 0xB3, 0xDF, 0x86, 0xB4, 0x86, 0xB5, /* 0x38-0x3B */
+	0x86, 0xB6, 0x86, 0xB7, 0x86, 0xB8, 0x86, 0xB9, /* 0x3C-0x3F */
+	0xB3, 0xE0, 0xB3, 0xE1, 0x86, 0xBA, 0x86, 0xBB, /* 0x40-0x43 */
+	0xB3, 0xE2, 0x86, 0xBC, 0x86, 0xBD, 0x86, 0xBE, /* 0x44-0x47 */
+	0xB3, 0xE3, 0x86, 0xBF, 0x86, 0xC0, 0x86, 0xC1, /* 0x48-0x4B */
+	0x86, 0xC2, 0x86, 0xC3, 0x86, 0xC4, 0x86, 0xC5, /* 0x4C-0x4F */
+	0xB3, 0xE4, 0xB3, 0xE5, 0x86, 0xC6, 0x86, 0xC7, /* 0x50-0x53 */
+	0xB3, 0xE6, 0xB3, 0xE7, 0x86, 0xC8, 0x86, 0xC9, /* 0x54-0x57 */
+	0xB3, 0xE8, 0x86, 0xCA, 0x86, 0xCB, 0x86, 0xCC, /* 0x58-0x5B */
+	0xB3, 0xE9, 0x86, 0xCD, 0x86, 0xCE, 0x86, 0xCF, /* 0x5C-0x5F */
+	0xB3, 0xEA, 0x86, 0xD0, 0x86, 0xD1, 0x86, 0xD2, /* 0x60-0x63 */
+	0x86, 0xD3, 0x86, 0xD4, 0x86, 0xD5, 0x86, 0xD6, /* 0x64-0x67 */
+	0x86, 0xD7, 0x86, 0xD8, 0x86, 0xD9, 0x86, 0xDA, /* 0x68-0x6B */
+	0x86, 0xDB, 0x86, 0xDC, 0x86, 0xDD, 0x86, 0xDE, /* 0x6C-0x6F */
+	0x86, 0xDF, 0x86, 0xE0, 0x86, 0xE1, 0x86, 0xE2, /* 0x70-0x73 */
+	0x86, 0xE3, 0x86, 0xE4, 0x86, 0xE5, 0x86, 0xE6, /* 0x74-0x77 */
+	0xB3, 0xEB, 0xB3, 0xEC, 0x86, 0xE7, 0x86, 0xE8, /* 0x78-0x7B */
+	0xB3, 0xED, 0x86, 0xE9, 0x86, 0xEA, 0x86, 0xEB, /* 0x7C-0x7F */
+	
+	0xB3, 0xEE, 0x86, 0xEC, 0xB3, 0xEF, 0x86, 0xED, /* 0x80-0x83 */
+	0x86, 0xEE, 0x86, 0xEF, 0x86, 0xF0, 0x86, 0xF1, /* 0x84-0x87 */
+	0xB3, 0xF0, 0xB3, 0xF1, 0x86, 0xF2, 0xB3, 0xF2, /* 0x88-0x8B */
+	0x86, 0xF3, 0xB3, 0xF3, 0x86, 0xF4, 0x86, 0xF5, /* 0x8C-0x8F */
+	0x86, 0xF6, 0x86, 0xF7, 0xB3, 0xF4, 0xB3, 0xF5, /* 0x90-0x93 */
+	0xB3, 0xF6, 0x86, 0xF8, 0x86, 0xF9, 0x86, 0xFA, /* 0x94-0x97 */
+	0xB3, 0xF7, 0x86, 0xFB, 0x86, 0xFC, 0x86, 0xFD, /* 0x98-0x9B */
+	0xB3, 0xF8, 0x86, 0xFE, 0x87, 0x41, 0x87, 0x42, /* 0x9C-0x9F */
+	0x87, 0x43, 0x87, 0x44, 0x87, 0x45, 0x87, 0x46, /* 0xA0-0xA3 */
+	0x87, 0x47, 0x87, 0x48, 0x87, 0x49, 0x87, 0x4A, /* 0xA4-0xA7 */
+	0xB3, 0xF9, 0x87, 0x4B, 0x87, 0x4C, 0x87, 0x4D, /* 0xA8-0xAB */
+	0x87, 0x4E, 0x87, 0x4F, 0x87, 0x50, 0x87, 0x51, /* 0xAC-0xAF */
+	0x87, 0x52, 0x87, 0x53, 0x87, 0x54, 0x87, 0x55, /* 0xB0-0xB3 */
+	0x87, 0x56, 0x87, 0x57, 0x87, 0x58, 0x87, 0x59, /* 0xB4-0xB7 */
+	0x87, 0x5A, 0x87, 0x61, 0x87, 0x62, 0x87, 0x63, /* 0xB8-0xBB */
+	0x87, 0x64, 0x87, 0x65, 0x87, 0x66, 0x87, 0x67, /* 0xBC-0xBF */
+	0x87, 0x68, 0x87, 0x69, 0x87, 0x6A, 0x87, 0x6B, /* 0xC0-0xC3 */
+	0x87, 0x6C, 0x87, 0x6D, 0x87, 0x6E, 0x87, 0x6F, /* 0xC4-0xC7 */
+	0x87, 0x70, 0x87, 0x71, 0x87, 0x72, 0x87, 0x73, /* 0xC8-0xCB */
+	0xB3, 0xFA, 0x87, 0x74, 0x87, 0x75, 0x87, 0x76, /* 0xCC-0xCF */
+	0xB3, 0xFB, 0x87, 0x77, 0x87, 0x78, 0x87, 0x79, /* 0xD0-0xD3 */
+	0xB3, 0xFC, 0x87, 0x7A, 0x87, 0x81, 0x87, 0x82, /* 0xD4-0xD7 */
+	0x87, 0x83, 0x87, 0x84, 0x87, 0x85, 0x87, 0x86, /* 0xD8-0xDB */
+	0xB3, 0xFD, 0xB3, 0xFE, 0x87, 0x87, 0xB4, 0xA1, /* 0xDC-0xDF */
+	0x87, 0x88, 0x87, 0x89, 0x87, 0x8A, 0x87, 0x8B, /* 0xE0-0xE3 */
+	0x87, 0x8C, 0x87, 0x8D, 0x87, 0x8E, 0x87, 0x8F, /* 0xE4-0xE7 */
+	0xB4, 0xA2, 0xB4, 0xA3, 0x87, 0x90, 0x87, 0x91, /* 0xE8-0xEB */
+	0xB4, 0xA4, 0x87, 0x92, 0x87, 0x93, 0x87, 0x94, /* 0xEC-0xEF */
+	0xB4, 0xA5, 0x87, 0x95, 0x87, 0x96, 0x87, 0x97, /* 0xF0-0xF3 */
+	0x87, 0x98, 0x87, 0x99, 0x87, 0x9A, 0x87, 0x9B, /* 0xF4-0xF7 */
+	0x87, 0x9C, 0xB4, 0xA6, 0x87, 0x9D, 0xB4, 0xA7, /* 0xF8-0xFB */
+	0x87, 0x9E, 0xB4, 0xA8, 0x87, 0x9F, 0x87, 0xA0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B2[512] = {
+	0x87, 0xA1, 0x87, 0xA2, 0x87, 0xA3, 0x87, 0xA4, /* 0x00-0x03 */
+	0xB4, 0xA9, 0xB4, 0xAA, 0x87, 0xA5, 0x87, 0xA6, /* 0x04-0x07 */
+	0xB4, 0xAB, 0x87, 0xA7, 0x87, 0xA8, 0xB4, 0xAC, /* 0x08-0x0B */
+	0xB4, 0xAD, 0x87, 0xA9, 0x87, 0xAA, 0x87, 0xAB, /* 0x0C-0x0F */
+	0x87, 0xAC, 0x87, 0xAD, 0x87, 0xAE, 0x87, 0xAF, /* 0x10-0x13 */
+	0xB4, 0xAE, 0xB4, 0xAF, 0x87, 0xB0, 0xB4, 0xB0, /* 0x14-0x17 */
+	0x87, 0xB1, 0xB4, 0xB1, 0x87, 0xB2, 0x87, 0xB3, /* 0x18-0x1B */
+	0x87, 0xB4, 0x87, 0xB5, 0x87, 0xB6, 0x87, 0xB7, /* 0x1C-0x1F */
+	0xB4, 0xB2, 0x87, 0xB8, 0x87, 0xB9, 0x87, 0xBA, /* 0x20-0x23 */
+	0x87, 0xBB, 0x87, 0xBC, 0x87, 0xBD, 0x87, 0xBE, /* 0x24-0x27 */
+	0x87, 0xBF, 0x87, 0xC0, 0x87, 0xC1, 0x87, 0xC2, /* 0x28-0x2B */
+	0x87, 0xC3, 0x87, 0xC4, 0x87, 0xC5, 0x87, 0xC6, /* 0x2C-0x2F */
+	0x87, 0xC7, 0x87, 0xC8, 0x87, 0xC9, 0x87, 0xCA, /* 0x30-0x33 */
+	0xB4, 0xB3, 0x87, 0xCB, 0x87, 0xCC, 0x87, 0xCD, /* 0x34-0x37 */
+	0x87, 0xCE, 0x87, 0xCF, 0x87, 0xD0, 0x87, 0xD1, /* 0x38-0x3B */
+	0xB4, 0xB4, 0x87, 0xD2, 0x87, 0xD3, 0x87, 0xD4, /* 0x3C-0x3F */
+	0x87, 0xD5, 0x87, 0xD6, 0x87, 0xD7, 0x87, 0xD8, /* 0x40-0x43 */
+	0x87, 0xD9, 0x87, 0xDA, 0x87, 0xDB, 0x87, 0xDC, /* 0x44-0x47 */
+	0x87, 0xDD, 0x87, 0xDE, 0x87, 0xDF, 0x87, 0xE0, /* 0x48-0x4B */
+	0x87, 0xE1, 0x87, 0xE2, 0x87, 0xE3, 0x87, 0xE4, /* 0x4C-0x4F */
+	0x87, 0xE5, 0x87, 0xE6, 0x87, 0xE7, 0x87, 0xE8, /* 0x50-0x53 */
+	0x87, 0xE9, 0x87, 0xEA, 0x87, 0xEB, 0x87, 0xEC, /* 0x54-0x57 */
+	0xB4, 0xB5, 0x87, 0xED, 0x87, 0xEE, 0x87, 0xEF, /* 0x58-0x5B */
+	0xB4, 0xB6, 0x87, 0xF0, 0x87, 0xF1, 0x87, 0xF2, /* 0x5C-0x5F */
+	0xB4, 0xB7, 0x87, 0xF3, 0x87, 0xF4, 0x87, 0xF5, /* 0x60-0x63 */
+	0x87, 0xF6, 0x87, 0xF7, 0x87, 0xF8, 0x87, 0xF9, /* 0x64-0x67 */
+	0xB4, 0xB8, 0xB4, 0xB9, 0x87, 0xFA, 0x87, 0xFB, /* 0x68-0x6B */
+	0x87, 0xFC, 0x87, 0xFD, 0x87, 0xFE, 0x88, 0x41, /* 0x6C-0x6F */
+	0x88, 0x42, 0x88, 0x43, 0x88, 0x44, 0x88, 0x45, /* 0x70-0x73 */
+	0xB4, 0xBA, 0xB4, 0xBB, 0x88, 0x46, 0x88, 0x47, /* 0x74-0x77 */
+	0x88, 0x48, 0x88, 0x49, 0x88, 0x4A, 0x88, 0x4B, /* 0x78-0x7B */
+	0xB4, 0xBC, 0x88, 0x4C, 0x88, 0x4D, 0x88, 0x4E, /* 0x7C-0x7F */
+	
+	0x88, 0x4F, 0x88, 0x50, 0x88, 0x51, 0x88, 0x52, /* 0x80-0x83 */
+	0xB4, 0xBD, 0xB4, 0xBE, 0x88, 0x53, 0x88, 0x54, /* 0x84-0x87 */
+	0x88, 0x55, 0xB4, 0xBF, 0x88, 0x56, 0x88, 0x57, /* 0x88-0x8B */
+	0x88, 0x58, 0x88, 0x59, 0x88, 0x5A, 0x88, 0x61, /* 0x8C-0x8F */
+	0xB4, 0xC0, 0xB4, 0xC1, 0x88, 0x62, 0x88, 0x63, /* 0x90-0x93 */
+	0xB4, 0xC2, 0x88, 0x64, 0x88, 0x65, 0x88, 0x66, /* 0x94-0x97 */
+	0xB4, 0xC3, 0xB4, 0xC4, 0xB4, 0xC5, 0x88, 0x67, /* 0x98-0x9B */
+	0x88, 0x68, 0x88, 0x69, 0x88, 0x6A, 0x88, 0x6B, /* 0x9C-0x9F */
+	0xB4, 0xC6, 0xB4, 0xC7, 0x88, 0x6C, 0xB4, 0xC8, /* 0xA0-0xA3 */
+	0x88, 0x6D, 0xB4, 0xC9, 0xB4, 0xCA, 0x88, 0x6E, /* 0xA4-0xA7 */
+	0x88, 0x6F, 0x88, 0x70, 0xB4, 0xCB, 0x88, 0x71, /* 0xA8-0xAB */
+	0xB4, 0xCC, 0x88, 0x72, 0x88, 0x73, 0x88, 0x74, /* 0xAC-0xAF */
+	0xB4, 0xCD, 0x88, 0x75, 0x88, 0x76, 0x88, 0x77, /* 0xB0-0xB3 */
+	0xB4, 0xCE, 0x88, 0x78, 0x88, 0x79, 0x88, 0x7A, /* 0xB4-0xB7 */
+	0x88, 0x81, 0x88, 0x82, 0x88, 0x83, 0x88, 0x84, /* 0xB8-0xBB */
+	0x88, 0x85, 0x88, 0x86, 0x88, 0x87, 0x88, 0x88, /* 0xBC-0xBF */
+	0x88, 0x89, 0x88, 0x8A, 0x88, 0x8B, 0x88, 0x8C, /* 0xC0-0xC3 */
+	0x88, 0x8D, 0x88, 0x8E, 0x88, 0x8F, 0x88, 0x90, /* 0xC4-0xC7 */
+	0xB4, 0xCF, 0xB4, 0xD0, 0x88, 0x91, 0x88, 0x92, /* 0xC8-0xCB */
+	0xB4, 0xD1, 0x88, 0x93, 0x88, 0x94, 0x88, 0x95, /* 0xCC-0xCF */
+	0xB4, 0xD2, 0x88, 0x96, 0xB4, 0xD3, 0x88, 0x97, /* 0xD0-0xD3 */
+	0x88, 0x98, 0x88, 0x99, 0x88, 0x9A, 0x88, 0x9B, /* 0xD4-0xD7 */
+	0xB4, 0xD4, 0xB4, 0xD5, 0x88, 0x9C, 0xB4, 0xD6, /* 0xD8-0xDB */
+	0x88, 0x9D, 0xB4, 0xD7, 0x88, 0x9E, 0x88, 0x9F, /* 0xDC-0xDF */
+	0x88, 0xA0, 0x88, 0xA1, 0xB4, 0xD8, 0x88, 0xA2, /* 0xE0-0xE3 */
+	0xB4, 0xD9, 0xB4, 0xDA, 0xB4, 0xDB, 0x88, 0xA3, /* 0xE4-0xE7 */
+	0xB4, 0xDC, 0x88, 0xA4, 0x88, 0xA5, 0xB4, 0xDD, /* 0xE8-0xEB */
+	0xB4, 0xDE, 0xB4, 0xDF, 0xB4, 0xE0, 0xB4, 0xE1, /* 0xEC-0xEF */
+	0x88, 0xA6, 0x88, 0xA7, 0x88, 0xA8, 0xB4, 0xE2, /* 0xF0-0xF3 */
+	0xB4, 0xE3, 0xB4, 0xE4, 0x88, 0xA9, 0xB4, 0xE5, /* 0xF4-0xF7 */
+	0xB4, 0xE6, 0xB4, 0xE7, 0xB4, 0xE8, 0xB4, 0xE9, /* 0xF8-0xFB */
+	0x88, 0xAA, 0x88, 0xAB, 0x88, 0xAC, 0xB4, 0xEA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B3[512] = {
+	0xB4, 0xEB, 0xB4, 0xEC, 0x88, 0xAD, 0x88, 0xAE, /* 0x00-0x03 */
+	0xB4, 0xED, 0x88, 0xAF, 0x88, 0xB0, 0x88, 0xB1, /* 0x04-0x07 */
+	0xB4, 0xEE, 0x88, 0xB2, 0x88, 0xB3, 0x88, 0xB4, /* 0x08-0x0B */
+	0x88, 0xB5, 0x88, 0xB6, 0x88, 0xB7, 0x88, 0xB8, /* 0x0C-0x0F */
+	0xB4, 0xEF, 0xB4, 0xF0, 0x88, 0xB9, 0xB4, 0xF1, /* 0x10-0x13 */
+	0xB4, 0xF2, 0xB4, 0xF3, 0x88, 0xBA, 0x88, 0xBB, /* 0x14-0x17 */
+	0x88, 0xBC, 0x88, 0xBD, 0x88, 0xBE, 0x88, 0xBF, /* 0x18-0x1B */
+	0xB4, 0xF4, 0x88, 0xC0, 0x88, 0xC1, 0x88, 0xC2, /* 0x1C-0x1F */
+	0x88, 0xC3, 0x88, 0xC4, 0x88, 0xC5, 0x88, 0xC6, /* 0x20-0x23 */
+	0x88, 0xC7, 0x88, 0xC8, 0x88, 0xC9, 0x88, 0xCA, /* 0x24-0x27 */
+	0x88, 0xCB, 0x88, 0xCC, 0x88, 0xCD, 0x88, 0xCE, /* 0x28-0x2B */
+	0x88, 0xCF, 0x88, 0xD0, 0x88, 0xD1, 0x88, 0xD2, /* 0x2C-0x2F */
+	0x88, 0xD3, 0x88, 0xD4, 0x88, 0xD5, 0x88, 0xD6, /* 0x30-0x33 */
+	0x88, 0xD7, 0x88, 0xD8, 0x88, 0xD9, 0x88, 0xDA, /* 0x34-0x37 */
+	0x88, 0xDB, 0x88, 0xDC, 0x88, 0xDD, 0x88, 0xDE, /* 0x38-0x3B */
+	0x88, 0xDF, 0x88, 0xE0, 0x88, 0xE1, 0x88, 0xE2, /* 0x3C-0x3F */
+	0x88, 0xE3, 0x88, 0xE4, 0x88, 0xE5, 0x88, 0xE6, /* 0x40-0x43 */
+	0x88, 0xE7, 0x88, 0xE8, 0x88, 0xE9, 0x88, 0xEA, /* 0x44-0x47 */
+	0x88, 0xEB, 0x88, 0xEC, 0x88, 0xED, 0x88, 0xEE, /* 0x48-0x4B */
+	0x88, 0xEF, 0x88, 0xF0, 0x88, 0xF1, 0x88, 0xF2, /* 0x4C-0x4F */
+	0x88, 0xF3, 0x88, 0xF4, 0x88, 0xF5, 0x88, 0xF6, /* 0x50-0x53 */
+	0xB4, 0xF5, 0xB4, 0xF6, 0xB4, 0xF7, 0x88, 0xF7, /* 0x54-0x57 */
+	0xB4, 0xF8, 0x88, 0xF8, 0x88, 0xF9, 0xB4, 0xF9, /* 0x58-0x5B */
+	0xB4, 0xFA, 0x88, 0xFA, 0xB4, 0xFB, 0xB4, 0xFC, /* 0x5C-0x5F */
+	0x88, 0xFB, 0x88, 0xFC, 0x88, 0xFD, 0x88, 0xFE, /* 0x60-0x63 */
+	0xB4, 0xFD, 0xB4, 0xFE, 0x89, 0x41, 0xB5, 0xA1, /* 0x64-0x67 */
+	0x89, 0x42, 0xB5, 0xA2, 0x89, 0x43, 0xB5, 0xA3, /* 0x68-0x6B */
+	0x89, 0x44, 0x89, 0x45, 0xB5, 0xA4, 0x89, 0x46, /* 0x6C-0x6F */
+	0xB5, 0xA5, 0xB5, 0xA6, 0x89, 0x47, 0x89, 0x48, /* 0x70-0x73 */
+	0xB5, 0xA7, 0x89, 0x49, 0x89, 0x4A, 0x89, 0x4B, /* 0x74-0x77 */
+	0xB5, 0xA8, 0x89, 0x4C, 0x89, 0x4D, 0x89, 0x4E, /* 0x78-0x7B */
+	0x89, 0x4F, 0x89, 0x50, 0x89, 0x51, 0x89, 0x52, /* 0x7C-0x7F */
+	
+	0xB5, 0xA9, 0xB5, 0xAA, 0x89, 0x53, 0xB5, 0xAB, /* 0x80-0x83 */
+	0xB5, 0xAC, 0xB5, 0xAD, 0x89, 0x54, 0x89, 0x55, /* 0x84-0x87 */
+	0x89, 0x56, 0x89, 0x57, 0x89, 0x58, 0x89, 0x59, /* 0x88-0x8B */
+	0xB5, 0xAE, 0x89, 0x5A, 0x89, 0x61, 0x89, 0x62, /* 0x8C-0x8F */
+	0xB5, 0xAF, 0x89, 0x63, 0x89, 0x64, 0x89, 0x65, /* 0x90-0x93 */
+	0xB5, 0xB0, 0x89, 0x66, 0x89, 0x67, 0x89, 0x68, /* 0x94-0x97 */
+	0x89, 0x69, 0x89, 0x6A, 0x89, 0x6B, 0x89, 0x6C, /* 0x98-0x9B */
+	0x89, 0x6D, 0x89, 0x6E, 0x89, 0x6F, 0x89, 0x70, /* 0x9C-0x9F */
+	0xB5, 0xB1, 0xB5, 0xB2, 0x89, 0x71, 0x89, 0x72, /* 0xA0-0xA3 */
+	0x89, 0x73, 0x89, 0x74, 0x89, 0x75, 0x89, 0x76, /* 0xA4-0xA7 */
+	0xB5, 0xB3, 0x89, 0x77, 0x89, 0x78, 0x89, 0x79, /* 0xA8-0xAB */
+	0xB5, 0xB4, 0x89, 0x7A, 0x89, 0x81, 0x89, 0x82, /* 0xAC-0xAF */
+	0x89, 0x83, 0x89, 0x84, 0x89, 0x85, 0x89, 0x86, /* 0xB0-0xB3 */
+	0x89, 0x87, 0x89, 0x88, 0x89, 0x89, 0x89, 0x8A, /* 0xB4-0xB7 */
+	0x89, 0x8B, 0x89, 0x8C, 0x89, 0x8D, 0x89, 0x8E, /* 0xB8-0xBB */
+	0x89, 0x8F, 0x89, 0x90, 0x89, 0x91, 0x89, 0x92, /* 0xBC-0xBF */
+	0x89, 0x93, 0x89, 0x94, 0x89, 0x95, 0x89, 0x96, /* 0xC0-0xC3 */
+	0xB5, 0xB5, 0xB5, 0xB6, 0x89, 0x97, 0x89, 0x98, /* 0xC4-0xC7 */
+	0xB5, 0xB7, 0x89, 0x99, 0x89, 0x9A, 0xB5, 0xB8, /* 0xC8-0xCB */
+	0xB5, 0xB9, 0x89, 0x9B, 0xB5, 0xBA, 0x89, 0x9C, /* 0xCC-0xCF */
+	0xB5, 0xBB, 0x89, 0x9D, 0x89, 0x9E, 0x89, 0x9F, /* 0xD0-0xD3 */
+	0xB5, 0xBC, 0xB5, 0xBD, 0x89, 0xA0, 0xB5, 0xBE, /* 0xD4-0xD7 */
+	0x89, 0xA1, 0xB5, 0xBF, 0x89, 0xA2, 0xB5, 0xC0, /* 0xD8-0xDB */
+	0x89, 0xA3, 0xB5, 0xC1, 0x89, 0xA4, 0x89, 0xA5, /* 0xDC-0xDF */
+	0xB5, 0xC2, 0x89, 0xA6, 0x89, 0xA7, 0x89, 0xA8, /* 0xE0-0xE3 */
+	0xB5, 0xC3, 0x89, 0xA9, 0x89, 0xAA, 0x89, 0xAB, /* 0xE4-0xE7 */
+	0xB5, 0xC4, 0x89, 0xAC, 0x89, 0xAD, 0x89, 0xAE, /* 0xE8-0xEB */
+	0x89, 0xAF, 0x89, 0xB0, 0x89, 0xB1, 0x89, 0xB2, /* 0xEC-0xEF */
+	0x89, 0xB3, 0x89, 0xB4, 0x89, 0xB5, 0x89, 0xB6, /* 0xF0-0xF3 */
+	0x89, 0xB7, 0x89, 0xB8, 0x89, 0xB9, 0x89, 0xBA, /* 0xF4-0xF7 */
+	0x89, 0xBB, 0x89, 0xBC, 0x89, 0xBD, 0x89, 0xBE, /* 0xF8-0xFB */
+	0xB5, 0xC5, 0x89, 0xBF, 0x89, 0xC0, 0x89, 0xC1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B4[512] = {
+	0x89, 0xC2, 0x89, 0xC3, 0x89, 0xC4, 0x89, 0xC5, /* 0x00-0x03 */
+	0x89, 0xC6, 0x89, 0xC7, 0x89, 0xC8, 0x89, 0xC9, /* 0x04-0x07 */
+	0x89, 0xCA, 0x89, 0xCB, 0x89, 0xCC, 0x89, 0xCD, /* 0x08-0x0B */
+	0x89, 0xCE, 0x89, 0xCF, 0x89, 0xD0, 0x89, 0xD1, /* 0x0C-0x0F */
+	0xB5, 0xC6, 0x89, 0xD2, 0x89, 0xD3, 0x89, 0xD4, /* 0x10-0x13 */
+	0x89, 0xD5, 0x89, 0xD6, 0x89, 0xD7, 0x89, 0xD8, /* 0x14-0x17 */
+	0xB5, 0xC7, 0x89, 0xD9, 0x89, 0xDA, 0x89, 0xDB, /* 0x18-0x1B */
+	0xB5, 0xC8, 0x89, 0xDC, 0x89, 0xDD, 0x89, 0xDE, /* 0x1C-0x1F */
+	0xB5, 0xC9, 0x89, 0xDF, 0x89, 0xE0, 0x89, 0xE1, /* 0x20-0x23 */
+	0x89, 0xE2, 0x89, 0xE3, 0x89, 0xE4, 0x89, 0xE5, /* 0x24-0x27 */
+	0xB5, 0xCA, 0xB5, 0xCB, 0x89, 0xE6, 0xB5, 0xCC, /* 0x28-0x2B */
+	0x89, 0xE7, 0x89, 0xE8, 0x89, 0xE9, 0x89, 0xEA, /* 0x2C-0x2F */
+	0x89, 0xEB, 0x89, 0xEC, 0x89, 0xED, 0x89, 0xEE, /* 0x30-0x33 */
+	0xB5, 0xCD, 0x89, 0xEF, 0x89, 0xF0, 0x89, 0xF1, /* 0x34-0x37 */
+	0x89, 0xF2, 0x89, 0xF3, 0x89, 0xF4, 0x89, 0xF5, /* 0x38-0x3B */
+	0x89, 0xF6, 0x89, 0xF7, 0x89, 0xF8, 0x89, 0xF9, /* 0x3C-0x3F */
+	0x89, 0xFA, 0x89, 0xFB, 0x89, 0xFC, 0x89, 0xFD, /* 0x40-0x43 */
+	0x89, 0xFE, 0x8A, 0x41, 0x8A, 0x42, 0x8A, 0x43, /* 0x44-0x47 */
+	0x8A, 0x44, 0x8A, 0x45, 0x8A, 0x46, 0x8A, 0x47, /* 0x48-0x4B */
+	0x8A, 0x48, 0x8A, 0x49, 0x8A, 0x4A, 0x8A, 0x4B, /* 0x4C-0x4F */
+	0xB5, 0xCE, 0xB5, 0xCF, 0x8A, 0x4C, 0x8A, 0x4D, /* 0x50-0x53 */
+	0xB5, 0xD0, 0x8A, 0x4E, 0x8A, 0x4F, 0x8A, 0x50, /* 0x54-0x57 */
+	0xB5, 0xD1, 0x8A, 0x51, 0x8A, 0x52, 0x8A, 0x53, /* 0x58-0x5B */
+	0x8A, 0x54, 0x8A, 0x55, 0x8A, 0x56, 0x8A, 0x57, /* 0x5C-0x5F */
+	0xB5, 0xD2, 0xB5, 0xD3, 0x8A, 0x58, 0xB5, 0xD4, /* 0x60-0x63 */
+	0x8A, 0x59, 0xB5, 0xD5, 0x8A, 0x5A, 0x8A, 0x61, /* 0x64-0x67 */
+	0x8A, 0x62, 0x8A, 0x63, 0x8A, 0x64, 0x8A, 0x65, /* 0x68-0x6B */
+	0xB5, 0xD6, 0x8A, 0x66, 0x8A, 0x67, 0x8A, 0x68, /* 0x6C-0x6F */
+	0x8A, 0x69, 0x8A, 0x6A, 0x8A, 0x6B, 0x8A, 0x6C, /* 0x70-0x73 */
+	0x8A, 0x6D, 0x8A, 0x6E, 0x8A, 0x6F, 0x8A, 0x70, /* 0x74-0x77 */
+	0x8A, 0x71, 0x8A, 0x72, 0x8A, 0x73, 0x8A, 0x74, /* 0x78-0x7B */
+	0x8A, 0x75, 0x8A, 0x76, 0x8A, 0x77, 0x8A, 0x78, /* 0x7C-0x7F */
+	
+	0xB5, 0xD7, 0x8A, 0x79, 0x8A, 0x7A, 0x8A, 0x81, /* 0x80-0x83 */
+	0x8A, 0x82, 0x8A, 0x83, 0x8A, 0x84, 0x8A, 0x85, /* 0x84-0x87 */
+	0xB5, 0xD8, 0x8A, 0x86, 0x8A, 0x87, 0x8A, 0x88, /* 0x88-0x8B */
+	0x8A, 0x89, 0x8A, 0x8A, 0x8A, 0x8B, 0x8A, 0x8C, /* 0x8C-0x8F */
+	0x8A, 0x8D, 0x8A, 0x8E, 0x8A, 0x8F, 0x8A, 0x90, /* 0x90-0x93 */
+	0x8A, 0x91, 0x8A, 0x92, 0x8A, 0x93, 0x8A, 0x94, /* 0x94-0x97 */
+	0x8A, 0x95, 0x8A, 0x96, 0x8A, 0x97, 0x8A, 0x98, /* 0x98-0x9B */
+	0x8A, 0x99, 0xB5, 0xD9, 0x8A, 0x9A, 0x8A, 0x9B, /* 0x9C-0x9F */
+	0x8A, 0x9C, 0x8A, 0x9D, 0x8A, 0x9E, 0x8A, 0x9F, /* 0xA0-0xA3 */
+	0xB5, 0xDA, 0x8A, 0xA0, 0x8A, 0xA1, 0x8A, 0xA2, /* 0xA4-0xA7 */
+	0xB5, 0xDB, 0x8A, 0xA3, 0x8A, 0xA4, 0x8A, 0xA5, /* 0xA8-0xAB */
+	0xB5, 0xDC, 0x8A, 0xA6, 0x8A, 0xA7, 0x8A, 0xA8, /* 0xAC-0xAF */
+	0x8A, 0xA9, 0x8A, 0xAA, 0x8A, 0xAB, 0x8A, 0xAC, /* 0xB0-0xB3 */
+	0x8A, 0xAD, 0xB5, 0xDD, 0x8A, 0xAE, 0xB5, 0xDE, /* 0xB4-0xB7 */
+	0x8A, 0xAF, 0xB5, 0xDF, 0x8A, 0xB0, 0x8A, 0xB1, /* 0xB8-0xBB */
+	0x8A, 0xB2, 0x8A, 0xB3, 0x8A, 0xB4, 0x8A, 0xB5, /* 0xBC-0xBF */
+	0xB5, 0xE0, 0x8A, 0xB6, 0x8A, 0xB7, 0x8A, 0xB8, /* 0xC0-0xC3 */
+	0xB5, 0xE1, 0x8A, 0xB9, 0x8A, 0xBA, 0x8A, 0xBB, /* 0xC4-0xC7 */
+	0xB5, 0xE2, 0x8A, 0xBC, 0x8A, 0xBD, 0x8A, 0xBE, /* 0xC8-0xCB */
+	0x8A, 0xBF, 0x8A, 0xC0, 0x8A, 0xC1, 0x8A, 0xC2, /* 0xCC-0xCF */
+	0xB5, 0xE3, 0x8A, 0xC3, 0x8A, 0xC4, 0x8A, 0xC5, /* 0xD0-0xD3 */
+	0x8A, 0xC6, 0xB5, 0xE4, 0x8A, 0xC7, 0x8A, 0xC8, /* 0xD4-0xD7 */
+	0x8A, 0xC9, 0x8A, 0xCA, 0x8A, 0xCB, 0x8A, 0xCC, /* 0xD8-0xDB */
+	0xB5, 0xE5, 0xB5, 0xE6, 0x8A, 0xCD, 0x8A, 0xCE, /* 0xDC-0xDF */
+	0xB5, 0xE7, 0x8A, 0xCF, 0x8A, 0xD0, 0xB5, 0xE8, /* 0xE0-0xE3 */
+	0xB5, 0xE9, 0x8A, 0xD1, 0xB5, 0xEA, 0x8A, 0xD2, /* 0xE4-0xE7 */
+	0x8A, 0xD3, 0x8A, 0xD4, 0x8A, 0xD5, 0x8A, 0xD6, /* 0xE8-0xEB */
+	0xB5, 0xEB, 0xB5, 0xEC, 0x8A, 0xD7, 0xB5, 0xED, /* 0xEC-0xEF */
+	0x8A, 0xD8, 0xB5, 0xEE, 0x8A, 0xD9, 0x8A, 0xDA, /* 0xF0-0xF3 */
+	0x8A, 0xDB, 0x8A, 0xDC, 0x8A, 0xDD, 0x8A, 0xDE, /* 0xF4-0xF7 */
+	0xB5, 0xEF, 0x8A, 0xDF, 0x8A, 0xE0, 0x8A, 0xE1, /* 0xF8-0xFB */
+	0x8A, 0xE2, 0x8A, 0xE3, 0x8A, 0xE4, 0x8A, 0xE5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B5[512] = {
+	0x8A, 0xE6, 0x8A, 0xE7, 0x8A, 0xE8, 0x8A, 0xE9, /* 0x00-0x03 */
+	0x8A, 0xEA, 0x8A, 0xEB, 0x8A, 0xEC, 0x8A, 0xED, /* 0x04-0x07 */
+	0x8A, 0xEE, 0x8A, 0xEF, 0x8A, 0xF0, 0x8A, 0xF1, /* 0x08-0x0B */
+	0x8A, 0xF2, 0x8A, 0xF3, 0x8A, 0xF4, 0x8A, 0xF5, /* 0x0C-0x0F */
+	0x8A, 0xF6, 0x8A, 0xF7, 0x8A, 0xF8, 0x8A, 0xF9, /* 0x10-0x13 */
+	0xB5, 0xF0, 0xB5, 0xF1, 0x8A, 0xFA, 0x8A, 0xFB, /* 0x14-0x17 */
+	0xB5, 0xF2, 0x8A, 0xFC, 0x8A, 0xFD, 0xB5, 0xF3, /* 0x18-0x1B */
+	0xB5, 0xF4, 0x8A, 0xFE, 0x8B, 0x41, 0x8B, 0x42, /* 0x1C-0x1F */
+	0x8B, 0x43, 0x8B, 0x44, 0x8B, 0x45, 0x8B, 0x46, /* 0x20-0x23 */
+	0xB5, 0xF5, 0xB5, 0xF6, 0x8B, 0x47, 0xB5, 0xF7, /* 0x24-0x27 */
+	0xB5, 0xF8, 0xB5, 0xF9, 0xB5, 0xFA, 0x8B, 0x48, /* 0x28-0x2B */
+	0x8B, 0x49, 0x8B, 0x4A, 0x8B, 0x4B, 0x8B, 0x4C, /* 0x2C-0x2F */
+	0xB5, 0xFB, 0xB5, 0xFC, 0x8B, 0x4D, 0x8B, 0x4E, /* 0x30-0x33 */
+	0xB5, 0xFD, 0x8B, 0x4F, 0x8B, 0x50, 0x8B, 0x51, /* 0x34-0x37 */
+	0xB5, 0xFE, 0x8B, 0x52, 0x8B, 0x53, 0x8B, 0x54, /* 0x38-0x3B */
+	0x8B, 0x55, 0x8B, 0x56, 0x8B, 0x57, 0x8B, 0x58, /* 0x3C-0x3F */
+	0xB6, 0xA1, 0xB6, 0xA2, 0x8B, 0x59, 0xB6, 0xA3, /* 0x40-0x43 */
+	0xB6, 0xA4, 0xB6, 0xA5, 0x8B, 0x5A, 0x8B, 0x61, /* 0x44-0x47 */
+	0x8B, 0x62, 0x8B, 0x63, 0x8B, 0x64, 0xB6, 0xA6, /* 0x48-0x4B */
+	0xB6, 0xA7, 0xB6, 0xA8, 0x8B, 0x65, 0x8B, 0x66, /* 0x4C-0x4F */
+	0xB6, 0xA9, 0x8B, 0x67, 0x8B, 0x68, 0x8B, 0x69, /* 0x50-0x53 */
+	0xB6, 0xAA, 0x8B, 0x6A, 0x8B, 0x6B, 0x8B, 0x6C, /* 0x54-0x57 */
+	0x8B, 0x6D, 0x8B, 0x6E, 0x8B, 0x6F, 0x8B, 0x70, /* 0x58-0x5B */
+	0xB6, 0xAB, 0xB6, 0xAC, 0x8B, 0x71, 0xB6, 0xAD, /* 0x5C-0x5F */
+	0xB6, 0xAE, 0xB6, 0xAF, 0x8B, 0x72, 0x8B, 0x73, /* 0x60-0x63 */
+	0x8B, 0x74, 0x8B, 0x75, 0x8B, 0x76, 0x8B, 0x77, /* 0x64-0x67 */
+	0x8B, 0x78, 0x8B, 0x79, 0x8B, 0x7A, 0x8B, 0x81, /* 0x68-0x6B */
+	0x8B, 0x82, 0x8B, 0x83, 0x8B, 0x84, 0x8B, 0x85, /* 0x6C-0x6F */
+	0x8B, 0x86, 0x8B, 0x87, 0x8B, 0x88, 0x8B, 0x89, /* 0x70-0x73 */
+	0x8B, 0x8A, 0x8B, 0x8B, 0x8B, 0x8C, 0x8B, 0x8D, /* 0x74-0x77 */
+	0x8B, 0x8E, 0x8B, 0x8F, 0x8B, 0x90, 0x8B, 0x91, /* 0x78-0x7B */
+	0x8B, 0x92, 0x8B, 0x93, 0x8B, 0x94, 0x8B, 0x95, /* 0x7C-0x7F */
+	
+	0x8B, 0x96, 0x8B, 0x97, 0x8B, 0x98, 0x8B, 0x99, /* 0x80-0x83 */
+	0x8B, 0x9A, 0x8B, 0x9B, 0x8B, 0x9C, 0x8B, 0x9D, /* 0x84-0x87 */
+	0x8B, 0x9E, 0x8B, 0x9F, 0x8B, 0xA0, 0x8B, 0xA1, /* 0x88-0x8B */
+	0x8B, 0xA2, 0x8B, 0xA3, 0x8B, 0xA4, 0x8B, 0xA5, /* 0x8C-0x8F */
+	0x8B, 0xA6, 0x8B, 0xA7, 0x8B, 0xA8, 0x8B, 0xA9, /* 0x90-0x93 */
+	0x8B, 0xAA, 0x8B, 0xAB, 0x8B, 0xAC, 0x8B, 0xAD, /* 0x94-0x97 */
+	0x8B, 0xAE, 0x8B, 0xAF, 0x8B, 0xB0, 0x8B, 0xB1, /* 0x98-0x9B */
+	0x8B, 0xB2, 0x8B, 0xB3, 0x8B, 0xB4, 0x8B, 0xB5, /* 0x9C-0x9F */
+	0xB6, 0xB0, 0xB6, 0xB1, 0x8B, 0xB6, 0x8B, 0xB7, /* 0xA0-0xA3 */
+	0xB6, 0xB2, 0x8B, 0xB8, 0x8B, 0xB9, 0x8B, 0xBA, /* 0xA4-0xA7 */
+	0xB6, 0xB3, 0x8B, 0xBB, 0xB6, 0xB4, 0xB6, 0xB5, /* 0xA8-0xAB */
+	0x8B, 0xBC, 0x8B, 0xBD, 0x8B, 0xBE, 0x8B, 0xBF, /* 0xAC-0xAF */
+	0xB6, 0xB6, 0xB6, 0xB7, 0x8B, 0xC0, 0xB6, 0xB8, /* 0xB0-0xB3 */
+	0xB6, 0xB9, 0xB6, 0xBA, 0x8B, 0xC1, 0x8B, 0xC2, /* 0xB4-0xB7 */
+	0x8B, 0xC3, 0x8B, 0xC4, 0x8B, 0xC5, 0xB6, 0xBB, /* 0xB8-0xBB */
+	0xB6, 0xBC, 0xB6, 0xBD, 0x8B, 0xC6, 0x8B, 0xC7, /* 0xBC-0xBF */
+	0xB6, 0xBE, 0x8B, 0xC8, 0x8B, 0xC9, 0x8B, 0xCA, /* 0xC0-0xC3 */
+	0xB6, 0xBF, 0x8B, 0xCB, 0x8B, 0xCC, 0x8B, 0xCD, /* 0xC4-0xC7 */
+	0x8B, 0xCE, 0x8B, 0xCF, 0x8B, 0xD0, 0x8B, 0xD1, /* 0xC8-0xCB */
+	0xB6, 0xC0, 0xB6, 0xC1, 0x8B, 0xD2, 0xB6, 0xC2, /* 0xCC-0xCF */
+	0xB6, 0xC3, 0xB6, 0xC4, 0x8B, 0xD3, 0x8B, 0xD4, /* 0xD0-0xD3 */
+	0x8B, 0xD5, 0x8B, 0xD6, 0x8B, 0xD7, 0x8B, 0xD8, /* 0xD4-0xD7 */
+	0xB6, 0xC5, 0x8B, 0xD9, 0x8B, 0xDA, 0x8B, 0xDB, /* 0xD8-0xDB */
+	0x8B, 0xDC, 0x8B, 0xDD, 0x8B, 0xDE, 0x8B, 0xDF, /* 0xDC-0xDF */
+	0x8B, 0xE0, 0x8B, 0xE1, 0x8B, 0xE2, 0x8B, 0xE3, /* 0xE0-0xE3 */
+	0x8B, 0xE4, 0x8B, 0xE5, 0x8B, 0xE6, 0x8B, 0xE7, /* 0xE4-0xE7 */
+	0x8B, 0xE8, 0x8B, 0xE9, 0x8B, 0xEA, 0x8B, 0xEB, /* 0xE8-0xEB */
+	0xB6, 0xC6, 0x8B, 0xEC, 0x8B, 0xED, 0x8B, 0xEE, /* 0xEC-0xEF */
+	0x8B, 0xEF, 0x8B, 0xF0, 0x8B, 0xF1, 0x8B, 0xF2, /* 0xF0-0xF3 */
+	0x8B, 0xF3, 0x8B, 0xF4, 0x8B, 0xF5, 0x8B, 0xF6, /* 0xF4-0xF7 */
+	0x8B, 0xF7, 0x8B, 0xF8, 0x8B, 0xF9, 0x8B, 0xFA, /* 0xF8-0xFB */
+	0x8B, 0xFB, 0x8B, 0xFC, 0x8B, 0xFD, 0x8B, 0xFE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B6[512] = {
+	0x8C, 0x41, 0x8C, 0x42, 0x8C, 0x43, 0x8C, 0x44, /* 0x00-0x03 */
+	0x8C, 0x45, 0x8C, 0x46, 0x8C, 0x47, 0x8C, 0x48, /* 0x04-0x07 */
+	0x8C, 0x49, 0x8C, 0x4A, 0x8C, 0x4B, 0x8C, 0x4C, /* 0x08-0x0B */
+	0x8C, 0x4D, 0x8C, 0x4E, 0x8C, 0x4F, 0x8C, 0x50, /* 0x0C-0x0F */
+	0xB6, 0xC7, 0xB6, 0xC8, 0x8C, 0x51, 0x8C, 0x52, /* 0x10-0x13 */
+	0xB6, 0xC9, 0x8C, 0x53, 0x8C, 0x54, 0x8C, 0x55, /* 0x14-0x17 */
+	0xB6, 0xCA, 0x8C, 0x56, 0x8C, 0x57, 0x8C, 0x58, /* 0x18-0x1B */
+	0x8C, 0x59, 0x8C, 0x5A, 0x8C, 0x61, 0x8C, 0x62, /* 0x1C-0x1F */
+	0x8C, 0x63, 0x8C, 0x64, 0x8C, 0x65, 0x8C, 0x66, /* 0x20-0x23 */
+	0x8C, 0x67, 0xB6, 0xCB, 0x8C, 0x68, 0x8C, 0x69, /* 0x24-0x27 */
+	0x8C, 0x6A, 0x8C, 0x6B, 0x8C, 0x6C, 0x8C, 0x6D, /* 0x28-0x2B */
+	0xB6, 0xCC, 0x8C, 0x6E, 0x8C, 0x6F, 0x8C, 0x70, /* 0x2C-0x2F */
+	0x8C, 0x71, 0x8C, 0x72, 0x8C, 0x73, 0x8C, 0x74, /* 0x30-0x33 */
+	0xB6, 0xCD, 0x8C, 0x75, 0x8C, 0x76, 0x8C, 0x77, /* 0x34-0x37 */
+	0x8C, 0x78, 0x8C, 0x79, 0x8C, 0x7A, 0x8C, 0x81, /* 0x38-0x3B */
+	0x8C, 0x82, 0x8C, 0x83, 0x8C, 0x84, 0x8C, 0x85, /* 0x3C-0x3F */
+	0x8C, 0x86, 0x8C, 0x87, 0x8C, 0x88, 0x8C, 0x89, /* 0x40-0x43 */
+	0x8C, 0x8A, 0x8C, 0x8B, 0x8C, 0x8C, 0x8C, 0x8D, /* 0x44-0x47 */
+	0xB6, 0xCE, 0x8C, 0x8E, 0x8C, 0x8F, 0x8C, 0x90, /* 0x48-0x4B */
+	0x8C, 0x91, 0x8C, 0x92, 0x8C, 0x93, 0x8C, 0x94, /* 0x4C-0x4F */
+	0x8C, 0x95, 0x8C, 0x96, 0x8C, 0x97, 0x8C, 0x98, /* 0x50-0x53 */
+	0x8C, 0x99, 0x8C, 0x9A, 0x8C, 0x9B, 0x8C, 0x9C, /* 0x54-0x57 */
+	0x8C, 0x9D, 0x8C, 0x9E, 0x8C, 0x9F, 0x8C, 0xA0, /* 0x58-0x5B */
+	0x8C, 0xA1, 0x8C, 0xA2, 0x8C, 0xA3, 0x8C, 0xA4, /* 0x5C-0x5F */
+	0x8C, 0xA5, 0x8C, 0xA6, 0x8C, 0xA7, 0x8C, 0xA8, /* 0x60-0x63 */
+	0xB6, 0xCF, 0x8C, 0xA9, 0x8C, 0xAA, 0x8C, 0xAB, /* 0x64-0x67 */
+	0xB6, 0xD0, 0x8C, 0xAC, 0x8C, 0xAD, 0x8C, 0xAE, /* 0x68-0x6B */
+	0x8C, 0xAF, 0x8C, 0xB0, 0x8C, 0xB1, 0x8C, 0xB2, /* 0x6C-0x6F */
+	0x8C, 0xB3, 0x8C, 0xB4, 0x8C, 0xB5, 0x8C, 0xB6, /* 0x70-0x73 */
+	0x8C, 0xB7, 0x8C, 0xB8, 0x8C, 0xB9, 0x8C, 0xBA, /* 0x74-0x77 */
+	0x8C, 0xBB, 0x8C, 0xBC, 0x8C, 0xBD, 0x8C, 0xBE, /* 0x78-0x7B */
+	0x8C, 0xBF, 0x8C, 0xC0, 0x8C, 0xC1, 0x8C, 0xC2, /* 0x7C-0x7F */
+	
+	0x8C, 0xC3, 0x8C, 0xC4, 0x8C, 0xC5, 0x8C, 0xC6, /* 0x80-0x83 */
+	0x8C, 0xC7, 0x8C, 0xC8, 0x8C, 0xC9, 0x8C, 0xCA, /* 0x84-0x87 */
+	0x8C, 0xCB, 0x8C, 0xCC, 0x8C, 0xCD, 0x8C, 0xCE, /* 0x88-0x8B */
+	0x8C, 0xCF, 0x8C, 0xD0, 0x8C, 0xD1, 0x8C, 0xD2, /* 0x8C-0x8F */
+	0x8C, 0xD3, 0x8C, 0xD4, 0x8C, 0xD5, 0x8C, 0xD6, /* 0x90-0x93 */
+	0x8C, 0xD7, 0x8C, 0xD8, 0x8C, 0xD9, 0x8C, 0xDA, /* 0x94-0x97 */
+	0x8C, 0xDB, 0x8C, 0xDC, 0x8C, 0xDD, 0x8C, 0xDE, /* 0x98-0x9B */
+	0xB6, 0xD1, 0xB6, 0xD2, 0x8C, 0xDF, 0x8C, 0xE0, /* 0x9C-0x9F */
+	0xB6, 0xD3, 0x8C, 0xE1, 0x8C, 0xE2, 0x8C, 0xE3, /* 0xA0-0xA3 */
+	0xB6, 0xD4, 0x8C, 0xE4, 0x8C, 0xE5, 0x8C, 0xE6, /* 0xA4-0xA7 */
+	0x8C, 0xE7, 0x8C, 0xE8, 0x8C, 0xE9, 0xB6, 0xD5, /* 0xA8-0xAB */
+	0xB6, 0xD6, 0x8C, 0xEA, 0x8C, 0xEB, 0x8C, 0xEC, /* 0xAC-0xAF */
+	0x8C, 0xED, 0xB6, 0xD7, 0x8C, 0xEE, 0x8C, 0xEF, /* 0xB0-0xB3 */
+	0x8C, 0xF0, 0x8C, 0xF1, 0x8C, 0xF2, 0x8C, 0xF3, /* 0xB4-0xB7 */
+	0x8C, 0xF4, 0x8C, 0xF5, 0x8C, 0xF6, 0x8C, 0xF7, /* 0xB8-0xBB */
+	0x8C, 0xF8, 0x8C, 0xF9, 0x8C, 0xFA, 0x8C, 0xFB, /* 0xBC-0xBF */
+	0x8C, 0xFC, 0x8C, 0xFD, 0x8C, 0xFE, 0x8D, 0x41, /* 0xC0-0xC3 */
+	0x8D, 0x42, 0x8D, 0x43, 0x8D, 0x44, 0x8D, 0x45, /* 0xC4-0xC7 */
+	0x8D, 0x46, 0x8D, 0x47, 0x8D, 0x48, 0x8D, 0x49, /* 0xC8-0xCB */
+	0x8D, 0x4A, 0x8D, 0x4B, 0x8D, 0x4C, 0x8D, 0x4D, /* 0xCC-0xCF */
+	0x8D, 0x4E, 0x8D, 0x4F, 0x8D, 0x50, 0x8D, 0x51, /* 0xD0-0xD3 */
+	0xB6, 0xD8, 0x8D, 0x52, 0x8D, 0x53, 0x8D, 0x54, /* 0xD4-0xD7 */
+	0x8D, 0x55, 0x8D, 0x56, 0x8D, 0x57, 0x8D, 0x58, /* 0xD8-0xDB */
+	0x8D, 0x59, 0x8D, 0x5A, 0x8D, 0x61, 0x8D, 0x62, /* 0xDC-0xDF */
+	0x8D, 0x63, 0x8D, 0x64, 0x8D, 0x65, 0x8D, 0x66, /* 0xE0-0xE3 */
+	0x8D, 0x67, 0x8D, 0x68, 0x8D, 0x69, 0x8D, 0x6A, /* 0xE4-0xE7 */
+	0x8D, 0x6B, 0x8D, 0x6C, 0x8D, 0x6D, 0x8D, 0x6E, /* 0xE8-0xEB */
+	0x8D, 0x6F, 0x8D, 0x70, 0x8D, 0x71, 0x8D, 0x72, /* 0xEC-0xEF */
+	0xB6, 0xD9, 0x8D, 0x73, 0x8D, 0x74, 0x8D, 0x75, /* 0xF0-0xF3 */
+	0xB6, 0xDA, 0x8D, 0x76, 0x8D, 0x77, 0x8D, 0x78, /* 0xF4-0xF7 */
+	0xB6, 0xDB, 0x8D, 0x79, 0x8D, 0x7A, 0x8D, 0x81, /* 0xF8-0xFB */
+	0x8D, 0x82, 0x8D, 0x83, 0x8D, 0x84, 0x8D, 0x85, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B7[512] = {
+	0xB6, 0xDC, 0xB6, 0xDD, 0x8D, 0x86, 0x8D, 0x87, /* 0x00-0x03 */
+	0x8D, 0x88, 0xB6, 0xDE, 0x8D, 0x89, 0x8D, 0x8A, /* 0x04-0x07 */
+	0x8D, 0x8B, 0x8D, 0x8C, 0x8D, 0x8D, 0x8D, 0x8E, /* 0x08-0x0B */
+	0x8D, 0x8F, 0x8D, 0x90, 0x8D, 0x91, 0x8D, 0x92, /* 0x0C-0x0F */
+	0x8D, 0x93, 0x8D, 0x94, 0x8D, 0x95, 0x8D, 0x96, /* 0x10-0x13 */
+	0x8D, 0x97, 0x8D, 0x98, 0x8D, 0x99, 0x8D, 0x9A, /* 0x14-0x17 */
+	0x8D, 0x9B, 0x8D, 0x9C, 0x8D, 0x9D, 0x8D, 0x9E, /* 0x18-0x1B */
+	0x8D, 0x9F, 0x8D, 0xA0, 0x8D, 0xA1, 0x8D, 0xA2, /* 0x1C-0x1F */
+	0x8D, 0xA3, 0x8D, 0xA4, 0x8D, 0xA5, 0x8D, 0xA6, /* 0x20-0x23 */
+	0x8D, 0xA7, 0x8D, 0xA8, 0x8D, 0xA9, 0x8D, 0xAA, /* 0x24-0x27 */
+	0xB6, 0xDF, 0xB6, 0xE0, 0x8D, 0xAB, 0x8D, 0xAC, /* 0x28-0x2B */
+	0xB6, 0xE1, 0x8D, 0xAD, 0x8D, 0xAE, 0xB6, 0xE2, /* 0x2C-0x2F */
+	0xB6, 0xE3, 0x8D, 0xAF, 0x8D, 0xB0, 0x8D, 0xB1, /* 0x30-0x33 */
+	0x8D, 0xB2, 0x8D, 0xB3, 0x8D, 0xB4, 0x8D, 0xB5, /* 0x34-0x37 */
+	0xB6, 0xE4, 0xB6, 0xE5, 0x8D, 0xB6, 0xB6, 0xE6, /* 0x38-0x3B */
+	0x8D, 0xB7, 0x8D, 0xB8, 0x8D, 0xB9, 0x8D, 0xBA, /* 0x3C-0x3F */
+	0x8D, 0xBB, 0x8D, 0xBC, 0x8D, 0xBD, 0x8D, 0xBE, /* 0x40-0x43 */
+	0xB6, 0xE7, 0x8D, 0xBF, 0x8D, 0xC0, 0x8D, 0xC1, /* 0x44-0x47 */
+	0xB6, 0xE8, 0x8D, 0xC2, 0x8D, 0xC3, 0x8D, 0xC4, /* 0x48-0x4B */
+	0xB6, 0xE9, 0x8D, 0xC5, 0x8D, 0xC6, 0x8D, 0xC7, /* 0x4C-0x4F */
+	0x8D, 0xC8, 0x8D, 0xC9, 0x8D, 0xCA, 0x8D, 0xCB, /* 0x50-0x53 */
+	0xB6, 0xEA, 0xB6, 0xEB, 0x8D, 0xCC, 0x8D, 0xCD, /* 0x54-0x57 */
+	0x8D, 0xCE, 0x8D, 0xCF, 0x8D, 0xD0, 0x8D, 0xD1, /* 0x58-0x5B */
+	0x8D, 0xD2, 0x8D, 0xD3, 0x8D, 0xD4, 0x8D, 0xD5, /* 0x5C-0x5F */
+	0xB6, 0xEC, 0x8D, 0xD6, 0x8D, 0xD7, 0x8D, 0xD8, /* 0x60-0x63 */
+	0xB6, 0xED, 0x8D, 0xD9, 0x8D, 0xDA, 0x8D, 0xDB, /* 0x64-0x67 */
+	0xB6, 0xEE, 0x8D, 0xDC, 0x8D, 0xDD, 0x8D, 0xDE, /* 0x68-0x6B */
+	0x8D, 0xDF, 0x8D, 0xE0, 0x8D, 0xE1, 0x8D, 0xE2, /* 0x6C-0x6F */
+	0xB6, 0xEF, 0xB6, 0xF0, 0x8D, 0xE3, 0xB6, 0xF1, /* 0x70-0x73 */
+	0x8D, 0xE4, 0xB6, 0xF2, 0x8D, 0xE5, 0x8D, 0xE6, /* 0x74-0x77 */
+	0x8D, 0xE7, 0x8D, 0xE8, 0x8D, 0xE9, 0x8D, 0xEA, /* 0x78-0x7B */
+	0xB6, 0xF3, 0xB6, 0xF4, 0x8D, 0xEB, 0x8D, 0xEC, /* 0x7C-0x7F */
+	
+	0xB6, 0xF5, 0x8D, 0xED, 0x8D, 0xEE, 0x8D, 0xEF, /* 0x80-0x83 */
+	0xB6, 0xF6, 0x8D, 0xF0, 0x8D, 0xF1, 0x8D, 0xF2, /* 0x84-0x87 */
+	0x8D, 0xF3, 0x8D, 0xF4, 0x8D, 0xF5, 0x8D, 0xF6, /* 0x88-0x8B */
+	0xB6, 0xF7, 0xB6, 0xF8, 0x8D, 0xF7, 0xB6, 0xF9, /* 0x8C-0x8F */
+	0xB6, 0xFA, 0xB6, 0xFB, 0xB6, 0xFC, 0x8D, 0xF8, /* 0x90-0x93 */
+	0x8D, 0xF9, 0x8D, 0xFA, 0xB6, 0xFD, 0xB6, 0xFE, /* 0x94-0x97 */
+	0xB7, 0xA1, 0xB7, 0xA2, 0x8D, 0xFB, 0x8D, 0xFC, /* 0x98-0x9B */
+	0xB7, 0xA3, 0x8D, 0xFD, 0x8D, 0xFE, 0x8E, 0x41, /* 0x9C-0x9F */
+	0xB7, 0xA4, 0x8E, 0x42, 0x8E, 0x43, 0x8E, 0x44, /* 0xA0-0xA3 */
+	0x8E, 0x45, 0x8E, 0x46, 0x8E, 0x47, 0x8E, 0x48, /* 0xA4-0xA7 */
+	0xB7, 0xA5, 0xB7, 0xA6, 0x8E, 0x49, 0xB7, 0xA7, /* 0xA8-0xAB */
+	0xB7, 0xA8, 0xB7, 0xA9, 0x8E, 0x4A, 0x8E, 0x4B, /* 0xAC-0xAF */
+	0x8E, 0x4C, 0x8E, 0x4D, 0x8E, 0x4E, 0x8E, 0x4F, /* 0xB0-0xB3 */
+	0xB7, 0xAA, 0xB7, 0xAB, 0x8E, 0x50, 0x8E, 0x51, /* 0xB4-0xB7 */
+	0xB7, 0xAC, 0x8E, 0x52, 0x8E, 0x53, 0x8E, 0x54, /* 0xB8-0xBB */
+	0x8E, 0x55, 0x8E, 0x56, 0x8E, 0x57, 0x8E, 0x58, /* 0xBC-0xBF */
+	0x8E, 0x59, 0x8E, 0x5A, 0x8E, 0x61, 0x8E, 0x62, /* 0xC0-0xC3 */
+	0x8E, 0x63, 0x8E, 0x64, 0x8E, 0x65, 0xB7, 0xAD, /* 0xC4-0xC7 */
+	0x8E, 0x66, 0xB7, 0xAE, 0x8E, 0x67, 0x8E, 0x68, /* 0xC8-0xCB */
+	0x8E, 0x69, 0x8E, 0x6A, 0x8E, 0x6B, 0x8E, 0x6C, /* 0xCC-0xCF */
+	0x8E, 0x6D, 0x8E, 0x6E, 0x8E, 0x6F, 0x8E, 0x70, /* 0xD0-0xD3 */
+	0x8E, 0x71, 0x8E, 0x72, 0x8E, 0x73, 0x8E, 0x74, /* 0xD4-0xD7 */
+	0x8E, 0x75, 0x8E, 0x76, 0x8E, 0x77, 0x8E, 0x78, /* 0xD8-0xDB */
+	0x8E, 0x79, 0x8E, 0x7A, 0x8E, 0x81, 0x8E, 0x82, /* 0xDC-0xDF */
+	0x8E, 0x83, 0x8E, 0x84, 0x8E, 0x85, 0x8E, 0x86, /* 0xE0-0xE3 */
+	0x8E, 0x87, 0x8E, 0x88, 0x8E, 0x89, 0x8E, 0x8A, /* 0xE4-0xE7 */
+	0x8E, 0x8B, 0x8E, 0x8C, 0x8E, 0x8D, 0x8E, 0x8E, /* 0xE8-0xEB */
+	0xB7, 0xAF, 0xB7, 0xB0, 0x8E, 0x8F, 0x8E, 0x90, /* 0xEC-0xEF */
+	0xB7, 0xB1, 0x8E, 0x91, 0x8E, 0x92, 0x8E, 0x93, /* 0xF0-0xF3 */
+	0xB7, 0xB2, 0x8E, 0x94, 0x8E, 0x95, 0x8E, 0x96, /* 0xF4-0xF7 */
+	0x8E, 0x97, 0x8E, 0x98, 0x8E, 0x99, 0x8E, 0x9A, /* 0xF8-0xFB */
+	0xB7, 0xB3, 0xB7, 0xB4, 0x8E, 0x9B, 0xB7, 0xB5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B8[512] = {
+	0xB7, 0xB6, 0xB7, 0xB7, 0x8E, 0x9C, 0x8E, 0x9D, /* 0x00-0x03 */
+	0x8E, 0x9E, 0x8E, 0x9F, 0x8E, 0xA0, 0xB7, 0xB8, /* 0x04-0x07 */
+	0xB7, 0xB9, 0xB7, 0xBA, 0x8E, 0xA1, 0x8E, 0xA2, /* 0x08-0x0B */
+	0xB7, 0xBB, 0x8E, 0xA3, 0x8E, 0xA4, 0x8E, 0xA5, /* 0x0C-0x0F */
+	0xB7, 0xBC, 0x8E, 0xA6, 0x8E, 0xA7, 0x8E, 0xA8, /* 0x10-0x13 */
+	0x8E, 0xA9, 0x8E, 0xAA, 0x8E, 0xAB, 0x8E, 0xAC, /* 0x14-0x17 */
+	0xB7, 0xBD, 0xB7, 0xBE, 0x8E, 0xAD, 0xB7, 0xBF, /* 0x18-0x1B */
+	0x8E, 0xAE, 0xB7, 0xC0, 0x8E, 0xAF, 0x8E, 0xB0, /* 0x1C-0x1F */
+	0x8E, 0xB1, 0x8E, 0xB2, 0x8E, 0xB3, 0x8E, 0xB4, /* 0x20-0x23 */
+	0xB7, 0xC1, 0xB7, 0xC2, 0x8E, 0xB5, 0x8E, 0xB6, /* 0x24-0x27 */
+	0xB7, 0xC3, 0x8E, 0xB7, 0x8E, 0xB8, 0x8E, 0xB9, /* 0x28-0x2B */
+	0xB7, 0xC4, 0x8E, 0xBA, 0x8E, 0xBB, 0x8E, 0xBC, /* 0x2C-0x2F */
+	0x8E, 0xBD, 0x8E, 0xBE, 0x8E, 0xBF, 0x8E, 0xC0, /* 0x30-0x33 */
+	0xB7, 0xC5, 0xB7, 0xC6, 0x8E, 0xC1, 0xB7, 0xC7, /* 0x34-0x37 */
+	0xB7, 0xC8, 0xB7, 0xC9, 0x8E, 0xC2, 0x8E, 0xC3, /* 0x38-0x3B */
+	0x8E, 0xC4, 0x8E, 0xC5, 0x8E, 0xC6, 0x8E, 0xC7, /* 0x3C-0x3F */
+	0xB7, 0xCA, 0x8E, 0xC8, 0x8E, 0xC9, 0x8E, 0xCA, /* 0x40-0x43 */
+	0xB7, 0xCB, 0x8E, 0xCB, 0x8E, 0xCC, 0x8E, 0xCD, /* 0x44-0x47 */
+	0x8E, 0xCE, 0x8E, 0xCF, 0x8E, 0xD0, 0x8E, 0xD1, /* 0x48-0x4B */
+	0x8E, 0xD2, 0x8E, 0xD3, 0x8E, 0xD4, 0x8E, 0xD5, /* 0x4C-0x4F */
+	0x8E, 0xD6, 0xB7, 0xCC, 0x8E, 0xD7, 0xB7, 0xCD, /* 0x50-0x53 */
+	0x8E, 0xD8, 0x8E, 0xD9, 0x8E, 0xDA, 0x8E, 0xDB, /* 0x54-0x57 */
+	0x8E, 0xDC, 0x8E, 0xDD, 0x8E, 0xDE, 0x8E, 0xDF, /* 0x58-0x5B */
+	0xB7, 0xCE, 0xB7, 0xCF, 0x8E, 0xE0, 0x8E, 0xE1, /* 0x5C-0x5F */
+	0xB7, 0xD0, 0x8E, 0xE2, 0x8E, 0xE3, 0x8E, 0xE4, /* 0x60-0x63 */
+	0xB7, 0xD1, 0x8E, 0xE5, 0x8E, 0xE6, 0x8E, 0xE7, /* 0x64-0x67 */
+	0x8E, 0xE8, 0x8E, 0xE9, 0x8E, 0xEA, 0x8E, 0xEB, /* 0x68-0x6B */
+	0xB7, 0xD2, 0xB7, 0xD3, 0x8E, 0xEC, 0xB7, 0xD4, /* 0x6C-0x6F */
+	0x8E, 0xED, 0xB7, 0xD5, 0x8E, 0xEE, 0x8E, 0xEF, /* 0x70-0x73 */
+	0x8E, 0xF0, 0x8E, 0xF1, 0x8E, 0xF2, 0x8E, 0xF3, /* 0x74-0x77 */
+	0xB7, 0xD6, 0x8E, 0xF4, 0x8E, 0xF5, 0x8E, 0xF6, /* 0x78-0x7B */
+	0xB7, 0xD7, 0x8E, 0xF7, 0x8E, 0xF8, 0x8E, 0xF9, /* 0x7C-0x7F */
+	
+	0x8E, 0xFA, 0x8E, 0xFB, 0x8E, 0xFC, 0x8E, 0xFD, /* 0x80-0x83 */
+	0x8E, 0xFE, 0x8F, 0x41, 0x8F, 0x42, 0x8F, 0x43, /* 0x84-0x87 */
+	0x8F, 0x44, 0x8F, 0x45, 0x8F, 0x46, 0x8F, 0x47, /* 0x88-0x8B */
+	0x8F, 0x48, 0xB7, 0xD8, 0x8F, 0x49, 0x8F, 0x4A, /* 0x8C-0x8F */
+	0x8F, 0x4B, 0x8F, 0x4C, 0x8F, 0x4D, 0x8F, 0x4E, /* 0x90-0x93 */
+	0x8F, 0x4F, 0x8F, 0x50, 0x8F, 0x51, 0x8F, 0x52, /* 0x94-0x97 */
+	0x8F, 0x53, 0x8F, 0x54, 0x8F, 0x55, 0x8F, 0x56, /* 0x98-0x9B */
+	0x8F, 0x57, 0x8F, 0x58, 0x8F, 0x59, 0x8F, 0x5A, /* 0x9C-0x9F */
+	0x8F, 0x61, 0x8F, 0x62, 0x8F, 0x63, 0x8F, 0x64, /* 0xA0-0xA3 */
+	0x8F, 0x65, 0x8F, 0x66, 0x8F, 0x67, 0x8F, 0x68, /* 0xA4-0xA7 */
+	0xB7, 0xD9, 0x8F, 0x69, 0x8F, 0x6A, 0x8F, 0x6B, /* 0xA8-0xAB */
+	0x8F, 0x6C, 0x8F, 0x6D, 0x8F, 0x6E, 0x8F, 0x6F, /* 0xAC-0xAF */
+	0xB7, 0xDA, 0x8F, 0x70, 0x8F, 0x71, 0x8F, 0x72, /* 0xB0-0xB3 */
+	0xB7, 0xDB, 0x8F, 0x73, 0x8F, 0x74, 0x8F, 0x75, /* 0xB4-0xB7 */
+	0xB7, 0xDC, 0x8F, 0x76, 0x8F, 0x77, 0x8F, 0x78, /* 0xB8-0xBB */
+	0x8F, 0x79, 0x8F, 0x7A, 0x8F, 0x81, 0x8F, 0x82, /* 0xBC-0xBF */
+	0xB7, 0xDD, 0xB7, 0xDE, 0x8F, 0x83, 0xB7, 0xDF, /* 0xC0-0xC3 */
+	0x8F, 0x84, 0xB7, 0xE0, 0x8F, 0x85, 0x8F, 0x86, /* 0xC4-0xC7 */
+	0x8F, 0x87, 0x8F, 0x88, 0x8F, 0x89, 0x8F, 0x8A, /* 0xC8-0xCB */
+	0xB7, 0xE1, 0x8F, 0x8B, 0x8F, 0x8C, 0x8F, 0x8D, /* 0xCC-0xCF */
+	0xB7, 0xE2, 0x8F, 0x8E, 0x8F, 0x8F, 0x8F, 0x90, /* 0xD0-0xD3 */
+	0xB7, 0xE3, 0x8F, 0x91, 0x8F, 0x92, 0x8F, 0x93, /* 0xD4-0xD7 */
+	0x8F, 0x94, 0x8F, 0x95, 0x8F, 0x96, 0x8F, 0x97, /* 0xD8-0xDB */
+	0x8F, 0x98, 0xB7, 0xE4, 0x8F, 0x99, 0xB7, 0xE5, /* 0xDC-0xDF */
+	0x8F, 0x9A, 0xB7, 0xE6, 0x8F, 0x9B, 0x8F, 0x9C, /* 0xE0-0xE3 */
+	0x8F, 0x9D, 0x8F, 0x9E, 0x8F, 0x9F, 0x8F, 0xA0, /* 0xE4-0xE7 */
+	0xB7, 0xE7, 0xB7, 0xE8, 0x8F, 0xA1, 0x8F, 0xA2, /* 0xE8-0xEB */
+	0xB7, 0xE9, 0x8F, 0xA3, 0x8F, 0xA4, 0x8F, 0xA5, /* 0xEC-0xEF */
+	0xB7, 0xEA, 0x8F, 0xA6, 0x8F, 0xA7, 0x8F, 0xA8, /* 0xF0-0xF3 */
+	0x8F, 0xA9, 0x8F, 0xAA, 0x8F, 0xAB, 0x8F, 0xAC, /* 0xF4-0xF7 */
+	0xB7, 0xEB, 0xB7, 0xEC, 0x8F, 0xAD, 0xB7, 0xED, /* 0xF8-0xFB */
+	0x8F, 0xAE, 0xB7, 0xEE, 0x8F, 0xAF, 0x8F, 0xB0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_B9[512] = {
+	0x8F, 0xB1, 0x8F, 0xB2, 0x8F, 0xB3, 0x8F, 0xB4, /* 0x00-0x03 */
+	0xB7, 0xEF, 0x8F, 0xB5, 0x8F, 0xB6, 0x8F, 0xB7, /* 0x04-0x07 */
+	0x8F, 0xB8, 0x8F, 0xB9, 0x8F, 0xBA, 0x8F, 0xBB, /* 0x08-0x0B */
+	0x8F, 0xBC, 0x8F, 0xBD, 0x8F, 0xBE, 0x8F, 0xBF, /* 0x0C-0x0F */
+	0x8F, 0xC0, 0x8F, 0xC1, 0x8F, 0xC2, 0x8F, 0xC3, /* 0x10-0x13 */
+	0x8F, 0xC4, 0x8F, 0xC5, 0x8F, 0xC6, 0x8F, 0xC7, /* 0x14-0x17 */
+	0xB7, 0xF0, 0x8F, 0xC8, 0x8F, 0xC9, 0x8F, 0xCA, /* 0x18-0x1B */
+	0x8F, 0xCB, 0x8F, 0xCC, 0x8F, 0xCD, 0x8F, 0xCE, /* 0x1C-0x1F */
+	0xB7, 0xF1, 0x8F, 0xCF, 0x8F, 0xD0, 0x8F, 0xD1, /* 0x20-0x23 */
+	0x8F, 0xD2, 0x8F, 0xD3, 0x8F, 0xD4, 0x8F, 0xD5, /* 0x24-0x27 */
+	0x8F, 0xD6, 0x8F, 0xD7, 0x8F, 0xD8, 0x8F, 0xD9, /* 0x28-0x2B */
+	0x8F, 0xDA, 0x8F, 0xDB, 0x8F, 0xDC, 0x8F, 0xDD, /* 0x2C-0x2F */
+	0x8F, 0xDE, 0x8F, 0xDF, 0x8F, 0xE0, 0x8F, 0xE1, /* 0x30-0x33 */
+	0x8F, 0xE2, 0x8F, 0xE3, 0x8F, 0xE4, 0x8F, 0xE5, /* 0x34-0x37 */
+	0x8F, 0xE6, 0x8F, 0xE7, 0x8F, 0xE8, 0x8F, 0xE9, /* 0x38-0x3B */
+	0xB7, 0xF2, 0xB7, 0xF3, 0x8F, 0xEA, 0x8F, 0xEB, /* 0x3C-0x3F */
+	0xB7, 0xF4, 0x8F, 0xEC, 0x8F, 0xED, 0x8F, 0xEE, /* 0x40-0x43 */
+	0xB7, 0xF5, 0x8F, 0xEF, 0x8F, 0xF0, 0x8F, 0xF1, /* 0x44-0x47 */
+	0x8F, 0xF2, 0x8F, 0xF3, 0x8F, 0xF4, 0x8F, 0xF5, /* 0x48-0x4B */
+	0xB7, 0xF6, 0x8F, 0xF6, 0x8F, 0xF7, 0xB7, 0xF7, /* 0x4C-0x4F */
+	0x8F, 0xF8, 0xB7, 0xF8, 0x8F, 0xF9, 0x8F, 0xFA, /* 0x50-0x53 */
+	0x8F, 0xFB, 0x8F, 0xFC, 0x8F, 0xFD, 0x8F, 0xFE, /* 0x54-0x57 */
+	0xB7, 0xF9, 0xB7, 0xFA, 0x90, 0x41, 0x90, 0x42, /* 0x58-0x5B */
+	0xB7, 0xFB, 0x90, 0x43, 0x90, 0x44, 0x90, 0x45, /* 0x5C-0x5F */
+	0xB7, 0xFC, 0x90, 0x46, 0x90, 0x47, 0x90, 0x48, /* 0x60-0x63 */
+	0x90, 0x49, 0x90, 0x4A, 0x90, 0x4B, 0x90, 0x4C, /* 0x64-0x67 */
+	0xB7, 0xFD, 0xB7, 0xFE, 0x90, 0x4D, 0xB8, 0xA1, /* 0x68-0x6B */
+	0x90, 0x4E, 0xB8, 0xA2, 0x90, 0x4F, 0x90, 0x50, /* 0x6C-0x6F */
+	0x90, 0x51, 0x90, 0x52, 0x90, 0x53, 0x90, 0x54, /* 0x70-0x73 */
+	0xB8, 0xA3, 0xB8, 0xA4, 0x90, 0x55, 0x90, 0x56, /* 0x74-0x77 */
+	0xB8, 0xA5, 0x90, 0x57, 0x90, 0x58, 0x90, 0x59, /* 0x78-0x7B */
+	0xB8, 0xA6, 0x90, 0x5A, 0x90, 0x61, 0x90, 0x62, /* 0x7C-0x7F */
+	
+	0x90, 0x63, 0x90, 0x64, 0x90, 0x65, 0x90, 0x66, /* 0x80-0x83 */
+	0xB8, 0xA7, 0xB8, 0xA8, 0x90, 0x67, 0xB8, 0xA9, /* 0x84-0x87 */
+	0x90, 0x68, 0xB8, 0xAA, 0xB8, 0xAB, 0x90, 0x69, /* 0x88-0x8B */
+	0x90, 0x6A, 0xB8, 0xAC, 0xB8, 0xAD, 0x90, 0x6B, /* 0x8C-0x8F */
+	0x90, 0x6C, 0x90, 0x6D, 0x90, 0x6E, 0x90, 0x6F, /* 0x90-0x93 */
+	0x90, 0x70, 0x90, 0x71, 0x90, 0x72, 0x90, 0x73, /* 0x94-0x97 */
+	0x90, 0x74, 0x90, 0x75, 0x90, 0x76, 0x90, 0x77, /* 0x98-0x9B */
+	0x90, 0x78, 0x90, 0x79, 0x90, 0x7A, 0x90, 0x81, /* 0x9C-0x9F */
+	0x90, 0x82, 0x90, 0x83, 0x90, 0x84, 0x90, 0x85, /* 0xA0-0xA3 */
+	0x90, 0x86, 0x90, 0x87, 0x90, 0x88, 0x90, 0x89, /* 0xA4-0xA7 */
+	0x90, 0x8A, 0x90, 0x8B, 0x90, 0x8C, 0x90, 0x8D, /* 0xA8-0xAB */
+	0xB8, 0xAE, 0xB8, 0xAF, 0x90, 0x8E, 0x90, 0x8F, /* 0xAC-0xAF */
+	0xB8, 0xB0, 0x90, 0x90, 0x90, 0x91, 0x90, 0x92, /* 0xB0-0xB3 */
+	0xB8, 0xB1, 0x90, 0x93, 0x90, 0x94, 0x90, 0x95, /* 0xB4-0xB7 */
+	0x90, 0x96, 0x90, 0x97, 0x90, 0x98, 0x90, 0x99, /* 0xB8-0xBB */
+	0xB8, 0xB2, 0xB8, 0xB3, 0x90, 0x9A, 0xB8, 0xB4, /* 0xBC-0xBF */
+	0x90, 0x9B, 0xB8, 0xB5, 0x90, 0x9C, 0x90, 0x9D, /* 0xC0-0xC3 */
+	0x90, 0x9E, 0x90, 0x9F, 0x90, 0xA0, 0x90, 0xA1, /* 0xC4-0xC7 */
+	0xB8, 0xB6, 0xB8, 0xB7, 0x90, 0xA2, 0x90, 0xA3, /* 0xC8-0xCB */
+	0xB8, 0xB8, 0x90, 0xA4, 0xB8, 0xB9, 0xB8, 0xBA, /* 0xCC-0xCF */
+	0xB8, 0xBB, 0xB8, 0xBC, 0xB8, 0xBD, 0x90, 0xA5, /* 0xD0-0xD3 */
+	0x90, 0xA6, 0x90, 0xA7, 0x90, 0xA8, 0x90, 0xA9, /* 0xD4-0xD7 */
+	0xB8, 0xBE, 0xB8, 0xBF, 0x90, 0xAA, 0xB8, 0xC0, /* 0xD8-0xDB */
+	0x90, 0xAB, 0xB8, 0xC1, 0xB8, 0xC2, 0x90, 0xAC, /* 0xDC-0xDF */
+	0x90, 0xAD, 0xB8, 0xC3, 0x90, 0xAE, 0xB8, 0xC4, /* 0xE0-0xE3 */
+	0xB8, 0xC5, 0xB8, 0xC6, 0x90, 0xAF, 0x90, 0xB0, /* 0xE4-0xE7 */
+	0xB8, 0xC7, 0x90, 0xB1, 0x90, 0xB2, 0x90, 0xB3, /* 0xE8-0xEB */
+	0xB8, 0xC8, 0x90, 0xB4, 0x90, 0xB5, 0x90, 0xB6, /* 0xEC-0xEF */
+	0x90, 0xB7, 0x90, 0xB8, 0x90, 0xB9, 0x90, 0xBA, /* 0xF0-0xF3 */
+	0xB8, 0xC9, 0xB8, 0xCA, 0x90, 0xBB, 0xB8, 0xCB, /* 0xF4-0xF7 */
+	0xB8, 0xCC, 0xB8, 0xCD, 0xB8, 0xCE, 0x90, 0xBC, /* 0xF8-0xFB */
+	0x90, 0xBD, 0x90, 0xBE, 0x90, 0xBF, 0x90, 0xC0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BA[512] = {
+	0xB8, 0xCF, 0xB8, 0xD0, 0x90, 0xC1, 0x90, 0xC2, /* 0x00-0x03 */
+	0x90, 0xC3, 0x90, 0xC4, 0x90, 0xC5, 0x90, 0xC6, /* 0x04-0x07 */
+	0xB8, 0xD1, 0x90, 0xC7, 0x90, 0xC8, 0x90, 0xC9, /* 0x08-0x0B */
+	0x90, 0xCA, 0x90, 0xCB, 0x90, 0xCC, 0x90, 0xCD, /* 0x0C-0x0F */
+	0x90, 0xCE, 0x90, 0xCF, 0x90, 0xD0, 0x90, 0xD1, /* 0x10-0x13 */
+	0x90, 0xD2, 0xB8, 0xD2, 0x90, 0xD3, 0x90, 0xD4, /* 0x14-0x17 */
+	0x90, 0xD5, 0x90, 0xD6, 0x90, 0xD7, 0x90, 0xD8, /* 0x18-0x1B */
+	0x90, 0xD9, 0x90, 0xDA, 0x90, 0xDB, 0x90, 0xDC, /* 0x1C-0x1F */
+	0x90, 0xDD, 0x90, 0xDE, 0x90, 0xDF, 0x90, 0xE0, /* 0x20-0x23 */
+	0x90, 0xE1, 0x90, 0xE2, 0x90, 0xE3, 0x90, 0xE4, /* 0x24-0x27 */
+	0x90, 0xE5, 0x90, 0xE6, 0x90, 0xE7, 0x90, 0xE8, /* 0x28-0x2B */
+	0x90, 0xE9, 0x90, 0xEA, 0x90, 0xEB, 0x90, 0xEC, /* 0x2C-0x2F */
+	0x90, 0xED, 0x90, 0xEE, 0x90, 0xEF, 0x90, 0xF0, /* 0x30-0x33 */
+	0x90, 0xF1, 0x90, 0xF2, 0x90, 0xF3, 0x90, 0xF4, /* 0x34-0x37 */
+	0xB8, 0xD3, 0xB8, 0xD4, 0x90, 0xF5, 0x90, 0xF6, /* 0x38-0x3B */
+	0xB8, 0xD5, 0x90, 0xF7, 0x90, 0xF8, 0x90, 0xF9, /* 0x3C-0x3F */
+	0xB8, 0xD6, 0x90, 0xFA, 0xB8, 0xD7, 0x90, 0xFB, /* 0x40-0x43 */
+	0x90, 0xFC, 0x90, 0xFD, 0x90, 0xFE, 0x91, 0x41, /* 0x44-0x47 */
+	0xB8, 0xD8, 0xB8, 0xD9, 0x91, 0x42, 0xB8, 0xDA, /* 0x48-0x4B */
+	0x91, 0x43, 0xB8, 0xDB, 0xB8, 0xDC, 0x91, 0x44, /* 0x4C-0x4F */
+	0x91, 0x45, 0x91, 0x46, 0x91, 0x47, 0xB8, 0xDD, /* 0x50-0x53 */
+	0xB8, 0xDE, 0xB8, 0xDF, 0x91, 0x48, 0x91, 0x49, /* 0x54-0x57 */
+	0xB8, 0xE0, 0x91, 0x4A, 0x91, 0x4B, 0x91, 0x4C, /* 0x58-0x5B */
+	0xB8, 0xE1, 0x91, 0x4D, 0x91, 0x4E, 0x91, 0x4F, /* 0x5C-0x5F */
+	0x91, 0x50, 0x91, 0x51, 0x91, 0x52, 0x91, 0x53, /* 0x60-0x63 */
+	0xB8, 0xE2, 0xB8, 0xE3, 0x91, 0x54, 0xB8, 0xE4, /* 0x64-0x67 */
+	0xB8, 0xE5, 0xB8, 0xE6, 0x91, 0x55, 0x91, 0x56, /* 0x68-0x6B */
+	0x91, 0x57, 0x91, 0x58, 0x91, 0x59, 0x91, 0x5A, /* 0x6C-0x6F */
+	0xB8, 0xE7, 0xB8, 0xE8, 0x91, 0x61, 0x91, 0x62, /* 0x70-0x73 */
+	0xB8, 0xE9, 0x91, 0x63, 0x91, 0x64, 0x91, 0x65, /* 0x74-0x77 */
+	0xB8, 0xEA, 0x91, 0x66, 0x91, 0x67, 0x91, 0x68, /* 0x78-0x7B */
+	0x91, 0x69, 0x91, 0x6A, 0x91, 0x6B, 0x91, 0x6C, /* 0x7C-0x7F */
+	
+	0x91, 0x6D, 0x91, 0x6E, 0x91, 0x6F, 0xB8, 0xEB, /* 0x80-0x83 */
+	0xB8, 0xEC, 0xB8, 0xED, 0x91, 0x70, 0xB8, 0xEE, /* 0x84-0x87 */
+	0x91, 0x71, 0x91, 0x72, 0x91, 0x73, 0x91, 0x74, /* 0x88-0x8B */
+	0xB8, 0xEF, 0x91, 0x75, 0x91, 0x76, 0x91, 0x77, /* 0x8C-0x8F */
+	0x91, 0x78, 0x91, 0x79, 0x91, 0x7A, 0x91, 0x81, /* 0x90-0x93 */
+	0x91, 0x82, 0x91, 0x83, 0x91, 0x84, 0x91, 0x85, /* 0x94-0x97 */
+	0x91, 0x86, 0x91, 0x87, 0x91, 0x88, 0x91, 0x89, /* 0x98-0x9B */
+	0x91, 0x8A, 0x91, 0x8B, 0x91, 0x8C, 0x91, 0x8D, /* 0x9C-0x9F */
+	0x91, 0x8E, 0x91, 0x8F, 0x91, 0x90, 0x91, 0x91, /* 0xA0-0xA3 */
+	0x91, 0x92, 0x91, 0x93, 0x91, 0x94, 0x91, 0x95, /* 0xA4-0xA7 */
+	0xB8, 0xF0, 0xB8, 0xF1, 0x91, 0x96, 0xB8, 0xF2, /* 0xA8-0xAB */
+	0xB8, 0xF3, 0x91, 0x97, 0x91, 0x98, 0x91, 0x99, /* 0xAC-0xAF */
+	0xB8, 0xF4, 0x91, 0x9A, 0xB8, 0xF5, 0x91, 0x9B, /* 0xB0-0xB3 */
+	0x91, 0x9C, 0x91, 0x9D, 0x91, 0x9E, 0x91, 0x9F, /* 0xB4-0xB7 */
+	0xB8, 0xF6, 0xB8, 0xF7, 0x91, 0xA0, 0xB8, 0xF8, /* 0xB8-0xBB */
+	0x91, 0xA1, 0xB8, 0xF9, 0x91, 0xA2, 0x91, 0xA3, /* 0xBC-0xBF */
+	0x91, 0xA4, 0x91, 0xA5, 0x91, 0xA6, 0x91, 0xA7, /* 0xC0-0xC3 */
+	0xB8, 0xFA, 0x91, 0xA8, 0x91, 0xA9, 0x91, 0xAA, /* 0xC4-0xC7 */
+	0xB8, 0xFB, 0x91, 0xAB, 0x91, 0xAC, 0x91, 0xAD, /* 0xC8-0xCB */
+	0x91, 0xAE, 0x91, 0xAF, 0x91, 0xB0, 0x91, 0xB1, /* 0xCC-0xCF */
+	0x91, 0xB2, 0x91, 0xB3, 0x91, 0xB4, 0x91, 0xB5, /* 0xD0-0xD3 */
+	0x91, 0xB6, 0x91, 0xB7, 0x91, 0xB8, 0x91, 0xB9, /* 0xD4-0xD7 */
+	0xB8, 0xFC, 0xB8, 0xFD, 0x91, 0xBA, 0x91, 0xBB, /* 0xD8-0xDB */
+	0x91, 0xBC, 0x91, 0xBD, 0x91, 0xBE, 0x91, 0xBF, /* 0xDC-0xDF */
+	0x91, 0xC0, 0x91, 0xC1, 0x91, 0xC2, 0x91, 0xC3, /* 0xE0-0xE3 */
+	0x91, 0xC4, 0x91, 0xC5, 0x91, 0xC6, 0x91, 0xC7, /* 0xE4-0xE7 */
+	0x91, 0xC8, 0x91, 0xC9, 0x91, 0xCA, 0x91, 0xCB, /* 0xE8-0xEB */
+	0x91, 0xCC, 0x91, 0xCD, 0x91, 0xCE, 0x91, 0xCF, /* 0xEC-0xEF */
+	0x91, 0xD0, 0x91, 0xD1, 0x91, 0xD2, 0x91, 0xD3, /* 0xF0-0xF3 */
+	0x91, 0xD4, 0x91, 0xD5, 0x91, 0xD6, 0x91, 0xD7, /* 0xF4-0xF7 */
+	0x91, 0xD8, 0x91, 0xD9, 0x91, 0xDA, 0x91, 0xDB, /* 0xF8-0xFB */
+	0xB8, 0xFE, 0x91, 0xDC, 0x91, 0xDD, 0x91, 0xDE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BB[512] = {
+	0xB9, 0xA1, 0x91, 0xDF, 0x91, 0xE0, 0x91, 0xE1, /* 0x00-0x03 */
+	0xB9, 0xA2, 0x91, 0xE2, 0x91, 0xE3, 0x91, 0xE4, /* 0x04-0x07 */
+	0x91, 0xE5, 0x91, 0xE6, 0x91, 0xE7, 0x91, 0xE8, /* 0x08-0x0B */
+	0x91, 0xE9, 0xB9, 0xA3, 0x91, 0xEA, 0xB9, 0xA4, /* 0x0C-0x0F */
+	0x91, 0xEB, 0xB9, 0xA5, 0x91, 0xEC, 0x91, 0xED, /* 0x10-0x13 */
+	0x91, 0xEE, 0x91, 0xEF, 0x91, 0xF0, 0x91, 0xF1, /* 0x14-0x17 */
+	0xB9, 0xA6, 0x91, 0xF2, 0x91, 0xF3, 0x91, 0xF4, /* 0x18-0x1B */
+	0xB9, 0xA7, 0x91, 0xF5, 0x91, 0xF6, 0x91, 0xF7, /* 0x1C-0x1F */
+	0xB9, 0xA8, 0x91, 0xF8, 0x91, 0xF9, 0x91, 0xFA, /* 0x20-0x23 */
+	0x91, 0xFB, 0x91, 0xFC, 0x91, 0xFD, 0x91, 0xFE, /* 0x24-0x27 */
+	0x92, 0x41, 0xB9, 0xA9, 0x92, 0x42, 0xB9, 0xAA, /* 0x28-0x2B */
+	0x92, 0x43, 0x92, 0x44, 0x92, 0x45, 0x92, 0x46, /* 0x2C-0x2F */
+	0x92, 0x47, 0x92, 0x48, 0x92, 0x49, 0x92, 0x4A, /* 0x30-0x33 */
+	0xB9, 0xAB, 0xB9, 0xAC, 0xB9, 0xAD, 0x92, 0x4B, /* 0x34-0x37 */
+	0xB9, 0xAE, 0x92, 0x4C, 0x92, 0x4D, 0xB9, 0xAF, /* 0x38-0x3B */
+	0xB9, 0xB0, 0xB9, 0xB1, 0xB9, 0xB2, 0x92, 0x4E, /* 0x3C-0x3F */
+	0x92, 0x4F, 0x92, 0x50, 0x92, 0x51, 0x92, 0x52, /* 0x40-0x43 */
+	0xB9, 0xB3, 0xB9, 0xB4, 0x92, 0x53, 0xB9, 0xB5, /* 0x44-0x47 */
+	0x92, 0x54, 0xB9, 0xB6, 0x92, 0x55, 0x92, 0x56, /* 0x48-0x4B */
+	0x92, 0x57, 0xB9, 0xB7, 0x92, 0x58, 0xB9, 0xB8, /* 0x4C-0x4F */
+	0xB9, 0xB9, 0x92, 0x59, 0x92, 0x5A, 0x92, 0x61, /* 0x50-0x53 */
+	0xB9, 0xBA, 0x92, 0x62, 0x92, 0x63, 0x92, 0x64, /* 0x54-0x57 */
+	0xB9, 0xBB, 0x92, 0x65, 0x92, 0x66, 0x92, 0x67, /* 0x58-0x5B */
+	0x92, 0x68, 0x92, 0x69, 0x92, 0x6A, 0x92, 0x6B, /* 0x5C-0x5F */
+	0x92, 0x6C, 0xB9, 0xBC, 0x92, 0x6D, 0xB9, 0xBD, /* 0x60-0x63 */
+	0x92, 0x6E, 0x92, 0x6F, 0x92, 0x70, 0x92, 0x71, /* 0x64-0x67 */
+	0x92, 0x72, 0x92, 0x73, 0x92, 0x74, 0x92, 0x75, /* 0x68-0x6B */
+	0xB9, 0xBE, 0x92, 0x76, 0x92, 0x77, 0x92, 0x78, /* 0x6C-0x6F */
+	0x92, 0x79, 0x92, 0x7A, 0x92, 0x81, 0x92, 0x82, /* 0x70-0x73 */
+	0x92, 0x83, 0x92, 0x84, 0x92, 0x85, 0x92, 0x86, /* 0x74-0x77 */
+	0x92, 0x87, 0x92, 0x88, 0x92, 0x89, 0x92, 0x8A, /* 0x78-0x7B */
+	0x92, 0x8B, 0x92, 0x8C, 0x92, 0x8D, 0x92, 0x8E, /* 0x7C-0x7F */
+	
+	0x92, 0x8F, 0x92, 0x90, 0x92, 0x91, 0x92, 0x92, /* 0x80-0x83 */
+	0x92, 0x93, 0x92, 0x94, 0x92, 0x95, 0x92, 0x96, /* 0x84-0x87 */
+	0xB9, 0xBF, 0x92, 0x97, 0x92, 0x98, 0x92, 0x99, /* 0x88-0x8B */
+	0xB9, 0xC0, 0x92, 0x9A, 0x92, 0x9B, 0x92, 0x9C, /* 0x8C-0x8F */
+	0xB9, 0xC1, 0x92, 0x9D, 0x92, 0x9E, 0x92, 0x9F, /* 0x90-0x93 */
+	0x92, 0xA0, 0x92, 0xA1, 0x92, 0xA2, 0x92, 0xA3, /* 0x94-0x97 */
+	0x92, 0xA4, 0x92, 0xA5, 0x92, 0xA6, 0x92, 0xA7, /* 0x98-0x9B */
+	0x92, 0xA8, 0x92, 0xA9, 0x92, 0xAA, 0x92, 0xAB, /* 0x9C-0x9F */
+	0x92, 0xAC, 0x92, 0xAD, 0x92, 0xAE, 0x92, 0xAF, /* 0xA0-0xA3 */
+	0xB9, 0xC2, 0x92, 0xB0, 0x92, 0xB1, 0x92, 0xB2, /* 0xA4-0xA7 */
+	0xB9, 0xC3, 0x92, 0xB3, 0x92, 0xB4, 0x92, 0xB5, /* 0xA8-0xAB */
+	0xB9, 0xC4, 0x92, 0xB6, 0x92, 0xB7, 0x92, 0xB8, /* 0xAC-0xAF */
+	0x92, 0xB9, 0x92, 0xBA, 0x92, 0xBB, 0x92, 0xBC, /* 0xB0-0xB3 */
+	0xB9, 0xC5, 0x92, 0xBD, 0x92, 0xBE, 0xB9, 0xC6, /* 0xB4-0xB7 */
+	0x92, 0xBF, 0x92, 0xC0, 0x92, 0xC1, 0x92, 0xC2, /* 0xB8-0xBB */
+	0x92, 0xC3, 0x92, 0xC4, 0x92, 0xC5, 0x92, 0xC6, /* 0xBC-0xBF */
+	0xB9, 0xC7, 0x92, 0xC7, 0x92, 0xC8, 0x92, 0xC9, /* 0xC0-0xC3 */
+	0xB9, 0xC8, 0x92, 0xCA, 0x92, 0xCB, 0x92, 0xCC, /* 0xC4-0xC7 */
+	0xB9, 0xC9, 0x92, 0xCD, 0x92, 0xCE, 0x92, 0xCF, /* 0xC8-0xCB */
+	0x92, 0xD0, 0x92, 0xD1, 0x92, 0xD2, 0x92, 0xD3, /* 0xCC-0xCF */
+	0xB9, 0xCA, 0x92, 0xD4, 0x92, 0xD5, 0xB9, 0xCB, /* 0xD0-0xD3 */
+	0x92, 0xD6, 0x92, 0xD7, 0x92, 0xD8, 0x92, 0xD9, /* 0xD4-0xD7 */
+	0x92, 0xDA, 0x92, 0xDB, 0x92, 0xDC, 0x92, 0xDD, /* 0xD8-0xDB */
+	0x92, 0xDE, 0x92, 0xDF, 0x92, 0xE0, 0x92, 0xE1, /* 0xDC-0xDF */
+	0x92, 0xE2, 0x92, 0xE3, 0x92, 0xE4, 0x92, 0xE5, /* 0xE0-0xE3 */
+	0x92, 0xE6, 0x92, 0xE7, 0x92, 0xE8, 0x92, 0xE9, /* 0xE4-0xE7 */
+	0x92, 0xEA, 0x92, 0xEB, 0x92, 0xEC, 0x92, 0xED, /* 0xE8-0xEB */
+	0x92, 0xEE, 0x92, 0xEF, 0x92, 0xF0, 0x92, 0xF1, /* 0xEC-0xEF */
+	0x92, 0xF2, 0x92, 0xF3, 0x92, 0xF4, 0x92, 0xF5, /* 0xF0-0xF3 */
+	0x92, 0xF6, 0x92, 0xF7, 0x92, 0xF8, 0x92, 0xF9, /* 0xF4-0xF7 */
+	0xB9, 0xCC, 0xB9, 0xCD, 0x92, 0xFA, 0x92, 0xFB, /* 0xF8-0xFB */
+	0xB9, 0xCE, 0x92, 0xFC, 0x92, 0xFD, 0xB9, 0xCF, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BC[512] = {
+	0xB9, 0xD0, 0x92, 0xFE, 0xB9, 0xD1, 0x93, 0x41, /* 0x00-0x03 */
+	0x93, 0x42, 0x93, 0x43, 0x93, 0x44, 0x93, 0x45, /* 0x04-0x07 */
+	0xB9, 0xD2, 0xB9, 0xD3, 0x93, 0x46, 0xB9, 0xD4, /* 0x08-0x0B */
+	0xB9, 0xD5, 0xB9, 0xD6, 0x93, 0x47, 0xB9, 0xD7, /* 0x0C-0x0F */
+	0x93, 0x48, 0xB9, 0xD8, 0x93, 0x49, 0x93, 0x4A, /* 0x10-0x13 */
+	0xB9, 0xD9, 0xB9, 0xDA, 0xB9, 0xDB, 0xB9, 0xDC, /* 0x14-0x17 */
+	0xB9, 0xDD, 0x93, 0x4B, 0x93, 0x4C, 0xB9, 0xDE, /* 0x18-0x1B */
+	0xB9, 0xDF, 0xB9, 0xE0, 0xB9, 0xE1, 0xB9, 0xE2, /* 0x1C-0x1F */
+	0x93, 0x4D, 0x93, 0x4E, 0x93, 0x4F, 0x93, 0x50, /* 0x20-0x23 */
+	0xB9, 0xE3, 0xB9, 0xE4, 0x93, 0x51, 0xB9, 0xE5, /* 0x24-0x27 */
+	0x93, 0x52, 0xB9, 0xE6, 0x93, 0x53, 0x93, 0x54, /* 0x28-0x2B */
+	0x93, 0x55, 0xB9, 0xE7, 0x93, 0x56, 0x93, 0x57, /* 0x2C-0x2F */
+	0xB9, 0xE8, 0xB9, 0xE9, 0x93, 0x58, 0x93, 0x59, /* 0x30-0x33 */
+	0xB9, 0xEA, 0x93, 0x5A, 0x93, 0x61, 0x93, 0x62, /* 0x34-0x37 */
+	0xB9, 0xEB, 0x93, 0x63, 0x93, 0x64, 0x93, 0x65, /* 0x38-0x3B */
+	0x93, 0x66, 0x93, 0x67, 0x93, 0x68, 0x93, 0x69, /* 0x3C-0x3F */
+	0xB9, 0xEC, 0xB9, 0xED, 0x93, 0x6A, 0xB9, 0xEE, /* 0x40-0x43 */
+	0xB9, 0xEF, 0xB9, 0xF0, 0x93, 0x6B, 0x93, 0x6C, /* 0x44-0x47 */
+	0x93, 0x6D, 0xB9, 0xF1, 0x93, 0x6E, 0x93, 0x6F, /* 0x48-0x4B */
+	0xB9, 0xF2, 0xB9, 0xF3, 0x93, 0x70, 0x93, 0x71, /* 0x4C-0x4F */
+	0xB9, 0xF4, 0x93, 0x72, 0x93, 0x73, 0x93, 0x74, /* 0x50-0x53 */
+	0x93, 0x75, 0x93, 0x76, 0x93, 0x77, 0x93, 0x78, /* 0x54-0x57 */
+	0x93, 0x79, 0x93, 0x7A, 0x93, 0x81, 0x93, 0x82, /* 0x58-0x5B */
+	0x93, 0x83, 0xB9, 0xF5, 0x93, 0x84, 0x93, 0x85, /* 0x5C-0x5F */
+	0x93, 0x86, 0x93, 0x87, 0x93, 0x88, 0x93, 0x89, /* 0x60-0x63 */
+	0x93, 0x8A, 0x93, 0x8B, 0x93, 0x8C, 0x93, 0x8D, /* 0x64-0x67 */
+	0x93, 0x8E, 0x93, 0x8F, 0x93, 0x90, 0x93, 0x91, /* 0x68-0x6B */
+	0x93, 0x92, 0x93, 0x93, 0x93, 0x94, 0x93, 0x95, /* 0x6C-0x6F */
+	0x93, 0x96, 0x93, 0x97, 0x93, 0x98, 0x93, 0x99, /* 0x70-0x73 */
+	0x93, 0x9A, 0x93, 0x9B, 0x93, 0x9C, 0x93, 0x9D, /* 0x74-0x77 */
+	0x93, 0x9E, 0x93, 0x9F, 0x93, 0xA0, 0x93, 0xA1, /* 0x78-0x7B */
+	0x93, 0xA2, 0x93, 0xA3, 0x93, 0xA4, 0x93, 0xA5, /* 0x7C-0x7F */
+	
+	0x93, 0xA6, 0x93, 0xA7, 0x93, 0xA8, 0x93, 0xA9, /* 0x80-0x83 */
+	0xB9, 0xF6, 0xB9, 0xF7, 0x93, 0xAA, 0x93, 0xAB, /* 0x84-0x87 */
+	0xB9, 0xF8, 0x93, 0xAC, 0x93, 0xAD, 0xB9, 0xF9, /* 0x88-0x8B */
+	0xB9, 0xFA, 0x93, 0xAE, 0xB9, 0xFB, 0x93, 0xAF, /* 0x8C-0x8F */
+	0x93, 0xB0, 0x93, 0xB1, 0x93, 0xB2, 0x93, 0xB3, /* 0x90-0x93 */
+	0xB9, 0xFC, 0xB9, 0xFD, 0x93, 0xB4, 0xB9, 0xFE, /* 0x94-0x97 */
+	0x93, 0xB5, 0xBA, 0xA1, 0xBA, 0xA2, 0x93, 0xB6, /* 0x98-0x9B */
+	0x93, 0xB7, 0x93, 0xB8, 0x93, 0xB9, 0x93, 0xBA, /* 0x9C-0x9F */
+	0xBA, 0xA3, 0xBA, 0xA4, 0x93, 0xBB, 0x93, 0xBC, /* 0xA0-0xA3 */
+	0xBA, 0xA5, 0x93, 0xBD, 0x93, 0xBE, 0xBA, 0xA6, /* 0xA4-0xA7 */
+	0xBA, 0xA7, 0x93, 0xBF, 0x93, 0xC0, 0x93, 0xC1, /* 0xA8-0xAB */
+	0x93, 0xC2, 0x93, 0xC3, 0x93, 0xC4, 0x93, 0xC5, /* 0xAC-0xAF */
+	0xBA, 0xA8, 0xBA, 0xA9, 0x93, 0xC6, 0xBA, 0xAA, /* 0xB0-0xB3 */
+	0xBA, 0xAB, 0xBA, 0xAC, 0x93, 0xC7, 0x93, 0xC8, /* 0xB4-0xB7 */
+	0x93, 0xC9, 0x93, 0xCA, 0x93, 0xCB, 0x93, 0xCC, /* 0xB8-0xBB */
+	0xBA, 0xAD, 0xBA, 0xAE, 0x93, 0xCD, 0x93, 0xCE, /* 0xBC-0xBF */
+	0xBA, 0xAF, 0x93, 0xCF, 0x93, 0xD0, 0x93, 0xD1, /* 0xC0-0xC3 */
+	0xBA, 0xB0, 0x93, 0xD2, 0x93, 0xD3, 0x93, 0xD4, /* 0xC4-0xC7 */
+	0x93, 0xD5, 0x93, 0xD6, 0x93, 0xD7, 0x93, 0xD8, /* 0xC8-0xCB */
+	0x93, 0xD9, 0xBA, 0xB1, 0x93, 0xDA, 0xBA, 0xB2, /* 0xCC-0xCF */
+	0xBA, 0xB3, 0xBA, 0xB4, 0x93, 0xDB, 0x93, 0xDC, /* 0xD0-0xD3 */
+	0x93, 0xDD, 0xBA, 0xB5, 0x93, 0xDE, 0x93, 0xDF, /* 0xD4-0xD7 */
+	0xBA, 0xB6, 0x93, 0xE0, 0x93, 0xE1, 0x93, 0xE2, /* 0xD8-0xDB */
+	0xBA, 0xB7, 0x93, 0xE3, 0x93, 0xE4, 0x93, 0xE5, /* 0xDC-0xDF */
+	0x93, 0xE6, 0x93, 0xE7, 0x93, 0xE8, 0x93, 0xE9, /* 0xE0-0xE3 */
+	0x93, 0xEA, 0x93, 0xEB, 0x93, 0xEC, 0x93, 0xED, /* 0xE4-0xE7 */
+	0x93, 0xEE, 0x93, 0xEF, 0x93, 0xF0, 0x93, 0xF1, /* 0xE8-0xEB */
+	0x93, 0xF2, 0x93, 0xF3, 0x93, 0xF4, 0x93, 0xF5, /* 0xEC-0xEF */
+	0x93, 0xF6, 0x93, 0xF7, 0x93, 0xF8, 0x93, 0xF9, /* 0xF0-0xF3 */
+	0xBA, 0xB8, 0xBA, 0xB9, 0xBA, 0xBA, 0x93, 0xFA, /* 0xF4-0xF7 */
+	0xBA, 0xBB, 0x93, 0xFB, 0x93, 0xFC, 0x93, 0xFD, /* 0xF8-0xFB */
+	0xBA, 0xBC, 0x93, 0xFE, 0x94, 0x41, 0x94, 0x42, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BD[512] = {
+	0x94, 0x43, 0x94, 0x44, 0x94, 0x45, 0x94, 0x46, /* 0x00-0x03 */
+	0xBA, 0xBD, 0xBA, 0xBE, 0x94, 0x47, 0xBA, 0xBF, /* 0x04-0x07 */
+	0x94, 0x48, 0xBA, 0xC0, 0x94, 0x49, 0x94, 0x4A, /* 0x08-0x0B */
+	0x94, 0x4B, 0x94, 0x4C, 0x94, 0x4D, 0x94, 0x4E, /* 0x0C-0x0F */
+	0xBA, 0xC1, 0x94, 0x4F, 0x94, 0x50, 0x94, 0x51, /* 0x10-0x13 */
+	0xBA, 0xC2, 0x94, 0x52, 0x94, 0x53, 0x94, 0x54, /* 0x14-0x17 */
+	0x94, 0x55, 0x94, 0x56, 0x94, 0x57, 0x94, 0x58, /* 0x18-0x1B */
+	0x94, 0x59, 0x94, 0x5A, 0x94, 0x61, 0x94, 0x62, /* 0x1C-0x1F */
+	0x94, 0x63, 0x94, 0x64, 0x94, 0x65, 0x94, 0x66, /* 0x20-0x23 */
+	0xBA, 0xC3, 0x94, 0x67, 0x94, 0x68, 0x94, 0x69, /* 0x24-0x27 */
+	0x94, 0x6A, 0x94, 0x6B, 0x94, 0x6C, 0x94, 0x6D, /* 0x28-0x2B */
+	0xBA, 0xC4, 0x94, 0x6E, 0x94, 0x6F, 0x94, 0x70, /* 0x2C-0x2F */
+	0x94, 0x71, 0x94, 0x72, 0x94, 0x73, 0x94, 0x74, /* 0x30-0x33 */
+	0x94, 0x75, 0x94, 0x76, 0x94, 0x77, 0x94, 0x78, /* 0x34-0x37 */
+	0x94, 0x79, 0x94, 0x7A, 0x94, 0x81, 0x94, 0x82, /* 0x38-0x3B */
+	0x94, 0x83, 0x94, 0x84, 0x94, 0x85, 0x94, 0x86, /* 0x3C-0x3F */
+	0xBA, 0xC5, 0x94, 0x87, 0x94, 0x88, 0x94, 0x89, /* 0x40-0x43 */
+	0x94, 0x8A, 0x94, 0x8B, 0x94, 0x8C, 0x94, 0x8D, /* 0x44-0x47 */
+	0xBA, 0xC6, 0xBA, 0xC7, 0x94, 0x8E, 0x94, 0x8F, /* 0x48-0x4B */
+	0xBA, 0xC8, 0x94, 0x90, 0x94, 0x91, 0x94, 0x92, /* 0x4C-0x4F */
+	0xBA, 0xC9, 0x94, 0x93, 0x94, 0x94, 0x94, 0x95, /* 0x50-0x53 */
+	0x94, 0x96, 0x94, 0x97, 0x94, 0x98, 0x94, 0x99, /* 0x54-0x57 */
+	0xBA, 0xCA, 0xBA, 0xCB, 0x94, 0x9A, 0x94, 0x9B, /* 0x58-0x5B */
+	0x94, 0x9C, 0x94, 0x9D, 0x94, 0x9E, 0x94, 0x9F, /* 0x5C-0x5F */
+	0x94, 0xA0, 0x94, 0xA1, 0x94, 0xA2, 0x94, 0xA3, /* 0x60-0x63 */
+	0xBA, 0xCC, 0x94, 0xA4, 0x94, 0xA5, 0x94, 0xA6, /* 0x64-0x67 */
+	0xBA, 0xCD, 0x94, 0xA7, 0x94, 0xA8, 0x94, 0xA9, /* 0x68-0x6B */
+	0x94, 0xAA, 0x94, 0xAB, 0x94, 0xAC, 0x94, 0xAD, /* 0x6C-0x6F */
+	0x94, 0xAE, 0x94, 0xAF, 0x94, 0xB0, 0x94, 0xB1, /* 0x70-0x73 */
+	0x94, 0xB2, 0x94, 0xB3, 0x94, 0xB4, 0x94, 0xB5, /* 0x74-0x77 */
+	0x94, 0xB6, 0x94, 0xB7, 0x94, 0xB8, 0x94, 0xB9, /* 0x78-0x7B */
+	0x94, 0xBA, 0x94, 0xBB, 0x94, 0xBC, 0x94, 0xBD, /* 0x7C-0x7F */
+	
+	0xBA, 0xCE, 0xBA, 0xCF, 0x94, 0xBE, 0x94, 0xBF, /* 0x80-0x83 */
+	0xBA, 0xD0, 0x94, 0xC0, 0x94, 0xC1, 0xBA, 0xD1, /* 0x84-0x87 */
+	0xBA, 0xD2, 0xBA, 0xD3, 0xBA, 0xD4, 0x94, 0xC2, /* 0x88-0x8B */
+	0x94, 0xC3, 0x94, 0xC4, 0x94, 0xC5, 0x94, 0xC6, /* 0x8C-0x8F */
+	0xBA, 0xD5, 0xBA, 0xD6, 0x94, 0xC7, 0xBA, 0xD7, /* 0x90-0x93 */
+	0x94, 0xC8, 0xBA, 0xD8, 0x94, 0xC9, 0x94, 0xCA, /* 0x94-0x97 */
+	0x94, 0xCB, 0xBA, 0xD9, 0xBA, 0xDA, 0x94, 0xCC, /* 0x98-0x9B */
+	0xBA, 0xDB, 0x94, 0xCD, 0x94, 0xCE, 0x94, 0xCF, /* 0x9C-0x9F */
+	0x94, 0xD0, 0x94, 0xD1, 0x94, 0xD2, 0x94, 0xD3, /* 0xA0-0xA3 */
+	0xBA, 0xDC, 0x94, 0xD4, 0x94, 0xD5, 0x94, 0xD6, /* 0xA4-0xA7 */
+	0x94, 0xD7, 0x94, 0xD8, 0x94, 0xD9, 0x94, 0xDA, /* 0xA8-0xAB */
+	0x94, 0xDB, 0x94, 0xDC, 0x94, 0xDD, 0x94, 0xDE, /* 0xAC-0xAF */
+	0xBA, 0xDD, 0x94, 0xDF, 0x94, 0xE0, 0x94, 0xE1, /* 0xB0-0xB3 */
+	0x94, 0xE2, 0x94, 0xE3, 0x94, 0xE4, 0x94, 0xE5, /* 0xB4-0xB7 */
+	0xBA, 0xDE, 0x94, 0xE6, 0x94, 0xE7, 0x94, 0xE8, /* 0xB8-0xBB */
+	0x94, 0xE9, 0x94, 0xEA, 0x94, 0xEB, 0x94, 0xEC, /* 0xBC-0xBF */
+	0x94, 0xED, 0x94, 0xEE, 0x94, 0xEF, 0x94, 0xF0, /* 0xC0-0xC3 */
+	0x94, 0xF1, 0x94, 0xF2, 0x94, 0xF3, 0x94, 0xF4, /* 0xC4-0xC7 */
+	0x94, 0xF5, 0x94, 0xF6, 0x94, 0xF7, 0x94, 0xF8, /* 0xC8-0xCB */
+	0x94, 0xF9, 0x94, 0xFA, 0x94, 0xFB, 0x94, 0xFC, /* 0xCC-0xCF */
+	0x94, 0xFD, 0x94, 0xFE, 0x95, 0x41, 0x95, 0x42, /* 0xD0-0xD3 */
+	0xBA, 0xDF, 0xBA, 0xE0, 0x95, 0x43, 0x95, 0x44, /* 0xD4-0xD7 */
+	0xBA, 0xE1, 0x95, 0x45, 0x95, 0x46, 0x95, 0x47, /* 0xD8-0xDB */
+	0xBA, 0xE2, 0x95, 0x48, 0x95, 0x49, 0x95, 0x4A, /* 0xDC-0xDF */
+	0x95, 0x4B, 0x95, 0x4C, 0x95, 0x4D, 0x95, 0x4E, /* 0xE0-0xE3 */
+	0x95, 0x4F, 0x95, 0x50, 0x95, 0x51, 0x95, 0x52, /* 0xE4-0xE7 */
+	0x95, 0x53, 0xBA, 0xE3, 0x95, 0x54, 0x95, 0x55, /* 0xE8-0xEB */
+	0x95, 0x56, 0x95, 0x57, 0x95, 0x58, 0x95, 0x59, /* 0xEC-0xEF */
+	0xBA, 0xE4, 0x95, 0x5A, 0x95, 0x61, 0x95, 0x62, /* 0xF0-0xF3 */
+	0xBA, 0xE5, 0x95, 0x63, 0x95, 0x64, 0x95, 0x65, /* 0xF4-0xF7 */
+	0xBA, 0xE6, 0x95, 0x66, 0x95, 0x67, 0x95, 0x68, /* 0xF8-0xFB */
+	0x95, 0x69, 0x95, 0x6A, 0x95, 0x6B, 0x95, 0x6C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BE[512] = {
+	0xBA, 0xE7, 0x95, 0x6D, 0x95, 0x6E, 0xBA, 0xE8, /* 0x00-0x03 */
+	0x95, 0x6F, 0xBA, 0xE9, 0x95, 0x70, 0x95, 0x71, /* 0x04-0x07 */
+	0x95, 0x72, 0x95, 0x73, 0x95, 0x74, 0x95, 0x75, /* 0x08-0x0B */
+	0xBA, 0xEA, 0xBA, 0xEB, 0x95, 0x76, 0x95, 0x77, /* 0x0C-0x0F */
+	0xBA, 0xEC, 0x95, 0x78, 0x95, 0x79, 0x95, 0x7A, /* 0x10-0x13 */
+	0xBA, 0xED, 0x95, 0x81, 0x95, 0x82, 0x95, 0x83, /* 0x14-0x17 */
+	0x95, 0x84, 0x95, 0x85, 0x95, 0x86, 0x95, 0x87, /* 0x18-0x1B */
+	0xBA, 0xEE, 0xBA, 0xEF, 0x95, 0x88, 0xBA, 0xF0, /* 0x1C-0x1F */
+	0x95, 0x89, 0x95, 0x8A, 0x95, 0x8B, 0x95, 0x8C, /* 0x20-0x23 */
+	0x95, 0x8D, 0x95, 0x8E, 0x95, 0x8F, 0x95, 0x90, /* 0x24-0x27 */
+	0x95, 0x91, 0x95, 0x92, 0x95, 0x93, 0x95, 0x94, /* 0x28-0x2B */
+	0x95, 0x95, 0x95, 0x96, 0x95, 0x97, 0x95, 0x98, /* 0x2C-0x2F */
+	0x95, 0x99, 0x95, 0x9A, 0x95, 0x9B, 0x95, 0x9C, /* 0x30-0x33 */
+	0x95, 0x9D, 0x95, 0x9E, 0x95, 0x9F, 0x95, 0xA0, /* 0x34-0x37 */
+	0x95, 0xA1, 0x95, 0xA2, 0x95, 0xA3, 0x95, 0xA4, /* 0x38-0x3B */
+	0x95, 0xA5, 0x95, 0xA6, 0x95, 0xA7, 0x95, 0xA8, /* 0x3C-0x3F */
+	0x95, 0xA9, 0x95, 0xAA, 0x95, 0xAB, 0x95, 0xAC, /* 0x40-0x43 */
+	0xBA, 0xF1, 0xBA, 0xF2, 0x95, 0xAD, 0x95, 0xAE, /* 0x44-0x47 */
+	0xBA, 0xF3, 0x95, 0xAF, 0x95, 0xB0, 0x95, 0xB1, /* 0x48-0x4B */
+	0xBA, 0xF4, 0x95, 0xB2, 0xBA, 0xF5, 0x95, 0xB3, /* 0x4C-0x4F */
+	0x95, 0xB4, 0x95, 0xB5, 0x95, 0xB6, 0x95, 0xB7, /* 0x50-0x53 */
+	0xBA, 0xF6, 0xBA, 0xF7, 0x95, 0xB8, 0xBA, 0xF8, /* 0x54-0x57 */
+	0x95, 0xB9, 0xBA, 0xF9, 0xBA, 0xFA, 0xBA, 0xFB, /* 0x58-0x5B */
+	0x95, 0xBA, 0x95, 0xBB, 0x95, 0xBC, 0x95, 0xBD, /* 0x5C-0x5F */
+	0xBA, 0xFC, 0xBA, 0xFD, 0x95, 0xBE, 0x95, 0xBF, /* 0x60-0x63 */
+	0xBA, 0xFE, 0x95, 0xC0, 0x95, 0xC1, 0x95, 0xC2, /* 0x64-0x67 */
+	0xBB, 0xA1, 0x95, 0xC3, 0xBB, 0xA2, 0x95, 0xC4, /* 0x68-0x6B */
+	0x95, 0xC5, 0x95, 0xC6, 0x95, 0xC7, 0x95, 0xC8, /* 0x6C-0x6F */
+	0xBB, 0xA3, 0xBB, 0xA4, 0x95, 0xC9, 0xBB, 0xA5, /* 0x70-0x73 */
+	0xBB, 0xA6, 0xBB, 0xA7, 0x95, 0xCA, 0x95, 0xCB, /* 0x74-0x77 */
+	0x95, 0xCC, 0x95, 0xCD, 0x95, 0xCE, 0xBB, 0xA8, /* 0x78-0x7B */
+	0xBB, 0xA9, 0xBB, 0xAA, 0x95, 0xCF, 0x95, 0xD0, /* 0x7C-0x7F */
+	
+	0xBB, 0xAB, 0x95, 0xD1, 0x95, 0xD2, 0x95, 0xD3, /* 0x80-0x83 */
+	0xBB, 0xAC, 0x95, 0xD4, 0x95, 0xD5, 0x95, 0xD6, /* 0x84-0x87 */
+	0x95, 0xD7, 0x95, 0xD8, 0x95, 0xD9, 0x95, 0xDA, /* 0x88-0x8B */
+	0xBB, 0xAD, 0xBB, 0xAE, 0x95, 0xDB, 0xBB, 0xAF, /* 0x8C-0x8F */
+	0xBB, 0xB0, 0xBB, 0xB1, 0x95, 0xDC, 0x95, 0xDD, /* 0x90-0x93 */
+	0x95, 0xDE, 0x95, 0xDF, 0x95, 0xE0, 0x95, 0xE1, /* 0x94-0x97 */
+	0xBB, 0xB2, 0xBB, 0xB3, 0x95, 0xE2, 0x95, 0xE3, /* 0x98-0x9B */
+	0x95, 0xE4, 0x95, 0xE5, 0x95, 0xE6, 0x95, 0xE7, /* 0x9C-0x9F */
+	0x95, 0xE8, 0x95, 0xE9, 0x95, 0xEA, 0x95, 0xEB, /* 0xA0-0xA3 */
+	0x95, 0xEC, 0x95, 0xED, 0x95, 0xEE, 0x95, 0xEF, /* 0xA4-0xA7 */
+	0xBB, 0xB4, 0x95, 0xF0, 0x95, 0xF1, 0x95, 0xF2, /* 0xA8-0xAB */
+	0x95, 0xF3, 0x95, 0xF4, 0x95, 0xF5, 0x95, 0xF6, /* 0xAC-0xAF */
+	0x95, 0xF7, 0x95, 0xF8, 0x95, 0xF9, 0x95, 0xFA, /* 0xB0-0xB3 */
+	0x95, 0xFB, 0x95, 0xFC, 0x95, 0xFD, 0x95, 0xFE, /* 0xB4-0xB7 */
+	0x96, 0x41, 0x96, 0x42, 0x96, 0x43, 0x96, 0x44, /* 0xB8-0xBB */
+	0x96, 0x45, 0x96, 0x46, 0x96, 0x47, 0x96, 0x48, /* 0xBC-0xBF */
+	0x96, 0x49, 0x96, 0x4A, 0x96, 0x4B, 0x96, 0x4C, /* 0xC0-0xC3 */
+	0x96, 0x4D, 0x96, 0x4E, 0x96, 0x4F, 0x96, 0x50, /* 0xC4-0xC7 */
+	0x96, 0x51, 0x96, 0x52, 0x96, 0x53, 0x96, 0x54, /* 0xC8-0xCB */
+	0x96, 0x55, 0x96, 0x56, 0x96, 0x57, 0x96, 0x58, /* 0xCC-0xCF */
+	0xBB, 0xB5, 0xBB, 0xB6, 0x96, 0x59, 0x96, 0x5A, /* 0xD0-0xD3 */
+	0xBB, 0xB7, 0x96, 0x61, 0x96, 0x62, 0xBB, 0xB8, /* 0xD4-0xD7 */
+	0xBB, 0xB9, 0x96, 0x63, 0x96, 0x64, 0x96, 0x65, /* 0xD8-0xDB */
+	0x96, 0x66, 0x96, 0x67, 0x96, 0x68, 0x96, 0x69, /* 0xDC-0xDF */
+	0xBB, 0xBA, 0x96, 0x6A, 0x96, 0x6B, 0xBB, 0xBB, /* 0xE0-0xE3 */
+	0xBB, 0xBC, 0xBB, 0xBD, 0x96, 0x6C, 0x96, 0x6D, /* 0xE4-0xE7 */
+	0x96, 0x6E, 0x96, 0x6F, 0x96, 0x70, 0x96, 0x71, /* 0xE8-0xEB */
+	0xBB, 0xBE, 0x96, 0x72, 0x96, 0x73, 0x96, 0x74, /* 0xEC-0xEF */
+	0x96, 0x75, 0x96, 0x76, 0x96, 0x77, 0x96, 0x78, /* 0xF0-0xF3 */
+	0x96, 0x79, 0x96, 0x7A, 0x96, 0x81, 0x96, 0x82, /* 0xF4-0xF7 */
+	0x96, 0x83, 0x96, 0x84, 0x96, 0x85, 0x96, 0x86, /* 0xF8-0xFB */
+	0x96, 0x87, 0x96, 0x88, 0x96, 0x89, 0x96, 0x8A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_BF[512] = {
+	0x96, 0x8B, 0xBB, 0xBF, 0x96, 0x8C, 0x96, 0x8D, /* 0x00-0x03 */
+	0x96, 0x8E, 0x96, 0x8F, 0x96, 0x90, 0x96, 0x91, /* 0x04-0x07 */
+	0xBB, 0xC0, 0xBB, 0xC1, 0x96, 0x92, 0x96, 0x93, /* 0x08-0x0B */
+	0x96, 0x94, 0x96, 0x95, 0x96, 0x96, 0x96, 0x97, /* 0x0C-0x0F */
+	0x96, 0x98, 0x96, 0x99, 0x96, 0x9A, 0x96, 0x9B, /* 0x10-0x13 */
+	0x96, 0x9C, 0x96, 0x9D, 0x96, 0x9E, 0x96, 0x9F, /* 0x14-0x17 */
+	0xBB, 0xC2, 0xBB, 0xC3, 0x96, 0xA0, 0xBB, 0xC4, /* 0x18-0x1B */
+	0xBB, 0xC5, 0xBB, 0xC6, 0x96, 0xA1, 0x96, 0xA2, /* 0x1C-0x1F */
+	0x96, 0xA3, 0x96, 0xA4, 0x96, 0xA5, 0x96, 0xA6, /* 0x20-0x23 */
+	0x96, 0xA7, 0x96, 0xA8, 0x96, 0xA9, 0x96, 0xAA, /* 0x24-0x27 */
+	0x96, 0xAB, 0x96, 0xAC, 0x96, 0xAD, 0x96, 0xAE, /* 0x28-0x2B */
+	0x96, 0xAF, 0x96, 0xB0, 0x96, 0xB1, 0x96, 0xB2, /* 0x2C-0x2F */
+	0x96, 0xB3, 0x96, 0xB4, 0x96, 0xB5, 0x96, 0xB6, /* 0x30-0x33 */
+	0x96, 0xB7, 0x96, 0xB8, 0x96, 0xB9, 0x96, 0xBA, /* 0x34-0x37 */
+	0x96, 0xBB, 0x96, 0xBC, 0x96, 0xBD, 0x96, 0xBE, /* 0x38-0x3B */
+	0x96, 0xBF, 0x96, 0xC0, 0x96, 0xC1, 0x96, 0xC2, /* 0x3C-0x3F */
+	0xBB, 0xC7, 0xBB, 0xC8, 0x96, 0xC3, 0x96, 0xC4, /* 0x40-0x43 */
+	0xBB, 0xC9, 0x96, 0xC5, 0x96, 0xC6, 0x96, 0xC7, /* 0x44-0x47 */
+	0xBB, 0xCA, 0x96, 0xC8, 0x96, 0xC9, 0x96, 0xCA, /* 0x48-0x4B */
+	0x96, 0xCB, 0x96, 0xCC, 0x96, 0xCD, 0x96, 0xCE, /* 0x4C-0x4F */
+	0xBB, 0xCB, 0xBB, 0xCC, 0x96, 0xCF, 0x96, 0xD0, /* 0x50-0x53 */
+	0x96, 0xD1, 0xBB, 0xCD, 0x96, 0xD2, 0x96, 0xD3, /* 0x54-0x57 */
+	0x96, 0xD4, 0x96, 0xD5, 0x96, 0xD6, 0x96, 0xD7, /* 0x58-0x5B */
+	0x96, 0xD8, 0x96, 0xD9, 0x96, 0xDA, 0x96, 0xDB, /* 0x5C-0x5F */
+	0x96, 0xDC, 0x96, 0xDD, 0x96, 0xDE, 0x96, 0xDF, /* 0x60-0x63 */
+	0x96, 0xE0, 0x96, 0xE1, 0x96, 0xE2, 0x96, 0xE3, /* 0x64-0x67 */
+	0x96, 0xE4, 0x96, 0xE5, 0x96, 0xE6, 0x96, 0xE7, /* 0x68-0x6B */
+	0x96, 0xE8, 0x96, 0xE9, 0x96, 0xEA, 0x96, 0xEB, /* 0x6C-0x6F */
+	0x96, 0xEC, 0x96, 0xED, 0x96, 0xEE, 0x96, 0xEF, /* 0x70-0x73 */
+	0x96, 0xF0, 0x96, 0xF1, 0x96, 0xF2, 0x96, 0xF3, /* 0x74-0x77 */
+	0x96, 0xF4, 0x96, 0xF5, 0x96, 0xF6, 0x96, 0xF7, /* 0x78-0x7B */
+	0x96, 0xF8, 0x96, 0xF9, 0x96, 0xFA, 0x96, 0xFB, /* 0x7C-0x7F */
+	
+	0x96, 0xFC, 0x96, 0xFD, 0x96, 0xFE, 0x97, 0x41, /* 0x80-0x83 */
+	0x97, 0x42, 0x97, 0x43, 0x97, 0x44, 0x97, 0x45, /* 0x84-0x87 */
+	0x97, 0x46, 0x97, 0x47, 0x97, 0x48, 0x97, 0x49, /* 0x88-0x8B */
+	0x97, 0x4A, 0x97, 0x4B, 0x97, 0x4C, 0x97, 0x4D, /* 0x8C-0x8F */
+	0x97, 0x4E, 0x97, 0x4F, 0x97, 0x50, 0x97, 0x51, /* 0x90-0x93 */
+	0xBB, 0xCE, 0x97, 0x52, 0x97, 0x53, 0x97, 0x54, /* 0x94-0x97 */
+	0x97, 0x55, 0x97, 0x56, 0x97, 0x57, 0x97, 0x58, /* 0x98-0x9B */
+	0x97, 0x59, 0x97, 0x5A, 0x97, 0x61, 0x97, 0x62, /* 0x9C-0x9F */
+	0x97, 0x63, 0x97, 0x64, 0x97, 0x65, 0x97, 0x66, /* 0xA0-0xA3 */
+	0x97, 0x67, 0x97, 0x68, 0x97, 0x69, 0x97, 0x6A, /* 0xA4-0xA7 */
+	0x97, 0x6B, 0x97, 0x6C, 0x97, 0x6D, 0x97, 0x6E, /* 0xA8-0xAB */
+	0x97, 0x6F, 0x97, 0x70, 0x97, 0x71, 0x97, 0x72, /* 0xAC-0xAF */
+	0xBB, 0xCF, 0x97, 0x73, 0x97, 0x74, 0x97, 0x75, /* 0xB0-0xB3 */
+	0x97, 0x76, 0x97, 0x77, 0x97, 0x78, 0x97, 0x79, /* 0xB4-0xB7 */
+	0x97, 0x7A, 0x97, 0x81, 0x97, 0x82, 0x97, 0x83, /* 0xB8-0xBB */
+	0x97, 0x84, 0x97, 0x85, 0x97, 0x86, 0x97, 0x87, /* 0xBC-0xBF */
+	0x97, 0x88, 0x97, 0x89, 0x97, 0x8A, 0x97, 0x8B, /* 0xC0-0xC3 */
+	0x97, 0x8C, 0xBB, 0xD0, 0x97, 0x8D, 0x97, 0x8E, /* 0xC4-0xC7 */
+	0x97, 0x8F, 0x97, 0x90, 0x97, 0x91, 0x97, 0x92, /* 0xC8-0xCB */
+	0xBB, 0xD1, 0xBB, 0xD2, 0x97, 0x93, 0x97, 0x94, /* 0xCC-0xCF */
+	0xBB, 0xD3, 0x97, 0x95, 0x97, 0x96, 0x97, 0x97, /* 0xD0-0xD3 */
+	0xBB, 0xD4, 0x97, 0x98, 0x97, 0x99, 0x97, 0x9A, /* 0xD4-0xD7 */
+	0x97, 0x9B, 0x97, 0x9C, 0x97, 0x9D, 0x97, 0x9E, /* 0xD8-0xDB */
+	0xBB, 0xD5, 0x97, 0x9F, 0x97, 0xA0, 0xBB, 0xD6, /* 0xDC-0xDF */
+	0x97, 0xA1, 0xBB, 0xD7, 0x97, 0xA2, 0x97, 0xA3, /* 0xE0-0xE3 */
+	0x97, 0xA4, 0x97, 0xA5, 0x97, 0xA6, 0x97, 0xA7, /* 0xE4-0xE7 */
+	0x97, 0xA8, 0x97, 0xA9, 0x97, 0xAA, 0x97, 0xAB, /* 0xE8-0xEB */
+	0x97, 0xAC, 0x97, 0xAD, 0x97, 0xAE, 0x97, 0xAF, /* 0xEC-0xEF */
+	0x97, 0xB0, 0x97, 0xB1, 0x97, 0xB2, 0x97, 0xB3, /* 0xF0-0xF3 */
+	0x97, 0xB4, 0x97, 0xB5, 0x97, 0xB6, 0x97, 0xB7, /* 0xF4-0xF7 */
+	0x97, 0xB8, 0x97, 0xB9, 0x97, 0xBA, 0x97, 0xBB, /* 0xF8-0xFB */
+	0x97, 0xBC, 0x97, 0xBD, 0x97, 0xBE, 0x97, 0xBF, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C0[512] = {
+	0x97, 0xC0, 0x97, 0xC1, 0x97, 0xC2, 0x97, 0xC3, /* 0x00-0x03 */
+	0x97, 0xC4, 0x97, 0xC5, 0x97, 0xC6, 0x97, 0xC7, /* 0x04-0x07 */
+	0x97, 0xC8, 0x97, 0xC9, 0x97, 0xCA, 0x97, 0xCB, /* 0x08-0x0B */
+	0x97, 0xCC, 0x97, 0xCD, 0x97, 0xCE, 0x97, 0xCF, /* 0x0C-0x0F */
+	0x97, 0xD0, 0x97, 0xD1, 0x97, 0xD2, 0x97, 0xD3, /* 0x10-0x13 */
+	0x97, 0xD4, 0x97, 0xD5, 0x97, 0xD6, 0x97, 0xD7, /* 0x14-0x17 */
+	0x97, 0xD8, 0x97, 0xD9, 0x97, 0xDA, 0x97, 0xDB, /* 0x18-0x1B */
+	0x97, 0xDC, 0x97, 0xDD, 0x97, 0xDE, 0x97, 0xDF, /* 0x1C-0x1F */
+	0x97, 0xE0, 0x97, 0xE1, 0x97, 0xE2, 0x97, 0xE3, /* 0x20-0x23 */
+	0x97, 0xE4, 0x97, 0xE5, 0x97, 0xE6, 0x97, 0xE7, /* 0x24-0x27 */
+	0x97, 0xE8, 0x97, 0xE9, 0x97, 0xEA, 0x97, 0xEB, /* 0x28-0x2B */
+	0x97, 0xEC, 0x97, 0xED, 0x97, 0xEE, 0x97, 0xEF, /* 0x2C-0x2F */
+	0x97, 0xF0, 0x97, 0xF1, 0x97, 0xF2, 0x97, 0xF3, /* 0x30-0x33 */
+	0x97, 0xF4, 0x97, 0xF5, 0x97, 0xF6, 0x97, 0xF7, /* 0x34-0x37 */
+	0x97, 0xF8, 0x97, 0xF9, 0x97, 0xFA, 0x97, 0xFB, /* 0x38-0x3B */
+	0xBB, 0xD8, 0x97, 0xFC, 0x97, 0xFD, 0x97, 0xFE, /* 0x3C-0x3F */
+	0x98, 0x41, 0x98, 0x42, 0x98, 0x43, 0x98, 0x44, /* 0x40-0x43 */
+	0x98, 0x45, 0x98, 0x46, 0x98, 0x47, 0x98, 0x48, /* 0x44-0x47 */
+	0x98, 0x49, 0x98, 0x4A, 0x98, 0x4B, 0x98, 0x4C, /* 0x48-0x4B */
+	0x98, 0x4D, 0x98, 0x4E, 0x98, 0x4F, 0x98, 0x50, /* 0x4C-0x4F */
+	0x98, 0x51, 0xBB, 0xD9, 0x98, 0x52, 0x98, 0x53, /* 0x50-0x53 */
+	0x98, 0x54, 0x98, 0x55, 0x98, 0x56, 0x98, 0x57, /* 0x54-0x57 */
+	0xBB, 0xDA, 0x98, 0x58, 0x98, 0x59, 0x98, 0x5A, /* 0x58-0x5B */
+	0xBB, 0xDB, 0x98, 0x61, 0x98, 0x62, 0x98, 0x63, /* 0x5C-0x5F */
+	0xBB, 0xDC, 0x98, 0x64, 0x98, 0x65, 0x98, 0x66, /* 0x60-0x63 */
+	0x98, 0x67, 0x98, 0x68, 0x98, 0x69, 0x98, 0x6A, /* 0x64-0x67 */
+	0xBB, 0xDD, 0xBB, 0xDE, 0x98, 0x6B, 0x98, 0x6C, /* 0x68-0x6B */
+	0x98, 0x6D, 0x98, 0x6E, 0x98, 0x6F, 0x98, 0x70, /* 0x6C-0x6F */
+	0x98, 0x71, 0x98, 0x72, 0x98, 0x73, 0x98, 0x74, /* 0x70-0x73 */
+	0x98, 0x75, 0x98, 0x76, 0x98, 0x77, 0x98, 0x78, /* 0x74-0x77 */
+	0x98, 0x79, 0x98, 0x7A, 0x98, 0x81, 0x98, 0x82, /* 0x78-0x7B */
+	0x98, 0x83, 0x98, 0x84, 0x98, 0x85, 0x98, 0x86, /* 0x7C-0x7F */
+	
+	0x98, 0x87, 0x98, 0x88, 0x98, 0x89, 0x98, 0x8A, /* 0x80-0x83 */
+	0x98, 0x8B, 0x98, 0x8C, 0x98, 0x8D, 0x98, 0x8E, /* 0x84-0x87 */
+	0x98, 0x8F, 0x98, 0x90, 0x98, 0x91, 0x98, 0x92, /* 0x88-0x8B */
+	0x98, 0x93, 0x98, 0x94, 0x98, 0x95, 0x98, 0x96, /* 0x8C-0x8F */
+	0xBB, 0xDF, 0xBB, 0xE0, 0x98, 0x97, 0x98, 0x98, /* 0x90-0x93 */
+	0xBB, 0xE1, 0x98, 0x99, 0x98, 0x9A, 0x98, 0x9B, /* 0x94-0x97 */
+	0xBB, 0xE2, 0x98, 0x9C, 0x98, 0x9D, 0x98, 0x9E, /* 0x98-0x9B */
+	0x98, 0x9F, 0x98, 0xA0, 0x98, 0xA1, 0x98, 0xA2, /* 0x9C-0x9F */
+	0xBB, 0xE3, 0xBB, 0xE4, 0x98, 0xA3, 0xBB, 0xE5, /* 0xA0-0xA3 */
+	0x98, 0xA4, 0xBB, 0xE6, 0x98, 0xA5, 0x98, 0xA6, /* 0xA4-0xA7 */
+	0x98, 0xA7, 0x98, 0xA8, 0x98, 0xA9, 0x98, 0xAA, /* 0xA8-0xAB */
+	0xBB, 0xE7, 0xBB, 0xE8, 0x98, 0xAB, 0xBB, 0xE9, /* 0xAC-0xAF */
+	0xBB, 0xEA, 0x98, 0xAC, 0x98, 0xAD, 0xBB, 0xEB, /* 0xB0-0xB3 */
+	0xBB, 0xEC, 0xBB, 0xED, 0xBB, 0xEE, 0x98, 0xAE, /* 0xB4-0xB7 */
+	0x98, 0xAF, 0x98, 0xB0, 0x98, 0xB1, 0x98, 0xB2, /* 0xB8-0xBB */
+	0xBB, 0xEF, 0xBB, 0xF0, 0x98, 0xB3, 0xBB, 0xF1, /* 0xBC-0xBF */
+	0xBB, 0xF2, 0xBB, 0xF3, 0x98, 0xB4, 0x98, 0xB5, /* 0xC0-0xC3 */
+	0x98, 0xB6, 0xBB, 0xF4, 0x98, 0xB7, 0x98, 0xB8, /* 0xC4-0xC7 */
+	0xBB, 0xF5, 0xBB, 0xF6, 0x98, 0xB9, 0x98, 0xBA, /* 0xC8-0xCB */
+	0xBB, 0xF7, 0x98, 0xBB, 0x98, 0xBC, 0x98, 0xBD, /* 0xCC-0xCF */
+	0xBB, 0xF8, 0x98, 0xBE, 0x98, 0xBF, 0x98, 0xC0, /* 0xD0-0xD3 */
+	0x98, 0xC1, 0x98, 0xC2, 0x98, 0xC3, 0x98, 0xC4, /* 0xD4-0xD7 */
+	0xBB, 0xF9, 0xBB, 0xFA, 0x98, 0xC5, 0xBB, 0xFB, /* 0xD8-0xDB */
+	0xBB, 0xFC, 0xBB, 0xFD, 0x98, 0xC6, 0x98, 0xC7, /* 0xDC-0xDF */
+	0x98, 0xC8, 0x98, 0xC9, 0x98, 0xCA, 0x98, 0xCB, /* 0xE0-0xE3 */
+	0xBB, 0xFE, 0xBC, 0xA1, 0x98, 0xCC, 0x98, 0xCD, /* 0xE4-0xE7 */
+	0xBC, 0xA2, 0x98, 0xCE, 0x98, 0xCF, 0x98, 0xD0, /* 0xE8-0xEB */
+	0xBC, 0xA3, 0x98, 0xD1, 0x98, 0xD2, 0x98, 0xD3, /* 0xEC-0xEF */
+	0x98, 0xD4, 0x98, 0xD5, 0x98, 0xD6, 0x98, 0xD7, /* 0xF0-0xF3 */
+	0xBC, 0xA4, 0xBC, 0xA5, 0x98, 0xD8, 0xBC, 0xA6, /* 0xF4-0xF7 */
+	0x98, 0xD9, 0xBC, 0xA7, 0x98, 0xDA, 0x98, 0xDB, /* 0xF8-0xFB */
+	0x98, 0xDC, 0x98, 0xDD, 0x98, 0xDE, 0x98, 0xDF, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C1[512] = {
+	0xBC, 0xA8, 0x98, 0xE0, 0x98, 0xE1, 0x98, 0xE2, /* 0x00-0x03 */
+	0xBC, 0xA9, 0x98, 0xE3, 0x98, 0xE4, 0x98, 0xE5, /* 0x04-0x07 */
+	0xBC, 0xAA, 0x98, 0xE6, 0x98, 0xE7, 0x98, 0xE8, /* 0x08-0x0B */
+	0x98, 0xE9, 0x98, 0xEA, 0x98, 0xEB, 0x98, 0xEC, /* 0x0C-0x0F */
+	0xBC, 0xAB, 0x98, 0xED, 0x98, 0xEE, 0x98, 0xEF, /* 0x10-0x13 */
+	0x98, 0xF0, 0xBC, 0xAC, 0x98, 0xF1, 0x98, 0xF2, /* 0x14-0x17 */
+	0x98, 0xF3, 0x98, 0xF4, 0x98, 0xF5, 0x98, 0xF6, /* 0x18-0x1B */
+	0xBC, 0xAD, 0xBC, 0xAE, 0xBC, 0xAF, 0xBC, 0xB0, /* 0x1C-0x1F */
+	0xBC, 0xB1, 0x98, 0xF7, 0x98, 0xF8, 0xBC, 0xB2, /* 0x20-0x23 */
+	0xBC, 0xB3, 0x98, 0xF9, 0xBC, 0xB4, 0xBC, 0xB5, /* 0x24-0x27 */
+	0x98, 0xFA, 0x98, 0xFB, 0x98, 0xFC, 0x98, 0xFD, /* 0x28-0x2B */
+	0xBC, 0xB6, 0xBC, 0xB7, 0x98, 0xFE, 0xBC, 0xB8, /* 0x2C-0x2F */
+	0xBC, 0xB9, 0xBC, 0xBA, 0x99, 0x41, 0x99, 0x42, /* 0x30-0x33 */
+	0x99, 0x43, 0x99, 0x44, 0xBC, 0xBB, 0x99, 0x45, /* 0x34-0x37 */
+	0xBC, 0xBC, 0xBC, 0xBD, 0x99, 0x46, 0x99, 0x47, /* 0x38-0x3B */
+	0xBC, 0xBE, 0x99, 0x48, 0x99, 0x49, 0x99, 0x4A, /* 0x3C-0x3F */
+	0xBC, 0xBF, 0x99, 0x4B, 0x99, 0x4C, 0x99, 0x4D, /* 0x40-0x43 */
+	0x99, 0x4E, 0x99, 0x4F, 0x99, 0x50, 0x99, 0x51, /* 0x44-0x47 */
+	0xBC, 0xC0, 0xBC, 0xC1, 0x99, 0x52, 0xBC, 0xC2, /* 0x48-0x4B */
+	0xBC, 0xC3, 0xBC, 0xC4, 0x99, 0x53, 0x99, 0x54, /* 0x4C-0x4F */
+	0x99, 0x55, 0x99, 0x56, 0x99, 0x57, 0x99, 0x58, /* 0x50-0x53 */
+	0xBC, 0xC5, 0xBC, 0xC6, 0x99, 0x59, 0x99, 0x5A, /* 0x54-0x57 */
+	0xBC, 0xC7, 0x99, 0x61, 0x99, 0x62, 0x99, 0x63, /* 0x58-0x5B */
+	0xBC, 0xC8, 0x99, 0x64, 0x99, 0x65, 0x99, 0x66, /* 0x5C-0x5F */
+	0x99, 0x67, 0x99, 0x68, 0x99, 0x69, 0x99, 0x6A, /* 0x60-0x63 */
+	0xBC, 0xC9, 0xBC, 0xCA, 0x99, 0x6B, 0xBC, 0xCB, /* 0x64-0x67 */
+	0xBC, 0xCC, 0xBC, 0xCD, 0x99, 0x6C, 0x99, 0x6D, /* 0x68-0x6B */
+	0x99, 0x6E, 0x99, 0x6F, 0x99, 0x70, 0x99, 0x71, /* 0x6C-0x6F */
+	0xBC, 0xCE, 0x99, 0x72, 0x99, 0x73, 0x99, 0x74, /* 0x70-0x73 */
+	0xBC, 0xCF, 0x99, 0x75, 0x99, 0x76, 0x99, 0x77, /* 0x74-0x77 */
+	0xBC, 0xD0, 0x99, 0x78, 0x99, 0x79, 0x99, 0x7A, /* 0x78-0x7B */
+	0x99, 0x81, 0x99, 0x82, 0x99, 0x83, 0x99, 0x84, /* 0x7C-0x7F */
+	
+	0x99, 0x85, 0x99, 0x86, 0x99, 0x87, 0x99, 0x88, /* 0x80-0x83 */
+	0x99, 0x89, 0xBC, 0xD1, 0x99, 0x8A, 0x99, 0x8B, /* 0x84-0x87 */
+	0x99, 0x8C, 0x99, 0x8D, 0x99, 0x8E, 0x99, 0x8F, /* 0x88-0x8B */
+	0xBC, 0xD2, 0xBC, 0xD3, 0xBC, 0xD4, 0x99, 0x90, /* 0x8C-0x8F */
+	0xBC, 0xD5, 0x99, 0x91, 0x99, 0x92, 0x99, 0x93, /* 0x90-0x93 */
+	0xBC, 0xD6, 0x99, 0x94, 0xBC, 0xD7, 0x99, 0x95, /* 0x94-0x97 */
+	0x99, 0x96, 0x99, 0x97, 0x99, 0x98, 0x99, 0x99, /* 0x98-0x9B */
+	0xBC, 0xD8, 0xBC, 0xD9, 0x99, 0x9A, 0xBC, 0xDA, /* 0x9C-0x9F */
+	0x99, 0x9B, 0xBC, 0xDB, 0x99, 0x9C, 0x99, 0x9D, /* 0xA0-0xA3 */
+	0x99, 0x9E, 0xBC, 0xDC, 0x99, 0x9F, 0x99, 0xA0, /* 0xA4-0xA7 */
+	0xBC, 0xDD, 0xBC, 0xDE, 0x99, 0xA1, 0x99, 0xA2, /* 0xA8-0xAB */
+	0xBC, 0xDF, 0x99, 0xA3, 0x99, 0xA4, 0x99, 0xA5, /* 0xAC-0xAF */
+	0xBC, 0xE0, 0x99, 0xA6, 0x99, 0xA7, 0x99, 0xA8, /* 0xB0-0xB3 */
+	0x99, 0xA9, 0x99, 0xAA, 0x99, 0xAB, 0x99, 0xAC, /* 0xB4-0xB7 */
+	0x99, 0xAD, 0x99, 0xAE, 0x99, 0xAF, 0x99, 0xB0, /* 0xB8-0xBB */
+	0x99, 0xB1, 0xBC, 0xE1, 0x99, 0xB2, 0x99, 0xB3, /* 0xBC-0xBF */
+	0x99, 0xB4, 0x99, 0xB5, 0x99, 0xB6, 0x99, 0xB7, /* 0xC0-0xC3 */
+	0xBC, 0xE2, 0x99, 0xB8, 0x99, 0xB9, 0x99, 0xBA, /* 0xC4-0xC7 */
+	0xBC, 0xE3, 0x99, 0xBB, 0x99, 0xBC, 0x99, 0xBD, /* 0xC8-0xCB */
+	0xBC, 0xE4, 0x99, 0xBE, 0x99, 0xBF, 0x99, 0xC0, /* 0xCC-0xCF */
+	0x99, 0xC1, 0x99, 0xC2, 0x99, 0xC3, 0x99, 0xC4, /* 0xD0-0xD3 */
+	0xBC, 0xE5, 0x99, 0xC5, 0x99, 0xC6, 0xBC, 0xE6, /* 0xD4-0xD7 */
+	0xBC, 0xE7, 0x99, 0xC7, 0x99, 0xC8, 0x99, 0xC9, /* 0xD8-0xDB */
+	0x99, 0xCA, 0x99, 0xCB, 0x99, 0xCC, 0x99, 0xCD, /* 0xDC-0xDF */
+	0xBC, 0xE8, 0x99, 0xCE, 0x99, 0xCF, 0x99, 0xD0, /* 0xE0-0xE3 */
+	0xBC, 0xE9, 0x99, 0xD1, 0x99, 0xD2, 0x99, 0xD3, /* 0xE4-0xE7 */
+	0xBC, 0xEA, 0x99, 0xD4, 0x99, 0xD5, 0x99, 0xD6, /* 0xE8-0xEB */
+	0x99, 0xD7, 0x99, 0xD8, 0x99, 0xD9, 0x99, 0xDA, /* 0xEC-0xEF */
+	0xBC, 0xEB, 0xBC, 0xEC, 0x99, 0xDB, 0xBC, 0xED, /* 0xF0-0xF3 */
+	0x99, 0xDC, 0x99, 0xDD, 0x99, 0xDE, 0x99, 0xDF, /* 0xF4-0xF7 */
+	0x99, 0xE0, 0x99, 0xE1, 0x99, 0xE2, 0x99, 0xE3, /* 0xF8-0xFB */
+	0xBC, 0xEE, 0xBC, 0xEF, 0x99, 0xE4, 0x99, 0xE5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C2[512] = {
+	0xBC, 0xF0, 0x99, 0xE6, 0x99, 0xE7, 0x99, 0xE8, /* 0x00-0x03 */
+	0xBC, 0xF1, 0x99, 0xE9, 0x99, 0xEA, 0x99, 0xEB, /* 0x04-0x07 */
+	0x99, 0xEC, 0x99, 0xED, 0x99, 0xEE, 0x99, 0xEF, /* 0x08-0x0B */
+	0xBC, 0xF2, 0xBC, 0xF3, 0x99, 0xF0, 0xBC, 0xF4, /* 0x0C-0x0F */
+	0x99, 0xF1, 0xBC, 0xF5, 0x99, 0xF2, 0x99, 0xF3, /* 0x10-0x13 */
+	0x99, 0xF4, 0x99, 0xF5, 0x99, 0xF6, 0x99, 0xF7, /* 0x14-0x17 */
+	0xBC, 0xF6, 0xBC, 0xF7, 0x99, 0xF8, 0x99, 0xF9, /* 0x18-0x1B */
+	0xBC, 0xF8, 0x99, 0xFA, 0x99, 0xFB, 0xBC, 0xF9, /* 0x1C-0x1F */
+	0xBC, 0xFA, 0x99, 0xFC, 0x99, 0xFD, 0x99, 0xFE, /* 0x20-0x23 */
+	0x9A, 0x41, 0x9A, 0x42, 0x9A, 0x43, 0x9A, 0x44, /* 0x24-0x27 */
+	0xBC, 0xFB, 0xBC, 0xFC, 0x9A, 0x45, 0xBC, 0xFD, /* 0x28-0x2B */
+	0x9A, 0x46, 0xBC, 0xFE, 0x9A, 0x47, 0xBD, 0xA1, /* 0x2C-0x2F */
+	0x9A, 0x48, 0xBD, 0xA2, 0xBD, 0xA3, 0x9A, 0x49, /* 0x30-0x33 */
+	0xBD, 0xA4, 0x9A, 0x4A, 0x9A, 0x4B, 0x9A, 0x4C, /* 0x34-0x37 */
+	0x9A, 0x4D, 0x9A, 0x4E, 0x9A, 0x4F, 0x9A, 0x50, /* 0x38-0x3B */
+	0x9A, 0x51, 0x9A, 0x52, 0x9A, 0x53, 0x9A, 0x54, /* 0x3C-0x3F */
+	0x9A, 0x55, 0x9A, 0x56, 0x9A, 0x57, 0x9A, 0x58, /* 0x40-0x43 */
+	0x9A, 0x59, 0x9A, 0x5A, 0x9A, 0x61, 0x9A, 0x62, /* 0x44-0x47 */
+	0xBD, 0xA5, 0x9A, 0x63, 0x9A, 0x64, 0x9A, 0x65, /* 0x48-0x4B */
+	0x9A, 0x66, 0x9A, 0x67, 0x9A, 0x68, 0x9A, 0x69, /* 0x4C-0x4F */
+	0xBD, 0xA6, 0xBD, 0xA7, 0x9A, 0x6A, 0x9A, 0x6B, /* 0x50-0x53 */
+	0xBD, 0xA8, 0x9A, 0x6C, 0x9A, 0x6D, 0x9A, 0x6E, /* 0x54-0x57 */
+	0xBD, 0xA9, 0x9A, 0x6F, 0x9A, 0x70, 0x9A, 0x71, /* 0x58-0x5B */
+	0x9A, 0x72, 0x9A, 0x73, 0x9A, 0x74, 0x9A, 0x75, /* 0x5C-0x5F */
+	0xBD, 0xAA, 0x9A, 0x76, 0x9A, 0x77, 0x9A, 0x78, /* 0x60-0x63 */
+	0x9A, 0x79, 0xBD, 0xAB, 0x9A, 0x7A, 0x9A, 0x81, /* 0x64-0x67 */
+	0x9A, 0x82, 0x9A, 0x83, 0x9A, 0x84, 0x9A, 0x85, /* 0x68-0x6B */
+	0xBD, 0xAC, 0xBD, 0xAD, 0x9A, 0x86, 0x9A, 0x87, /* 0x6C-0x6F */
+	0xBD, 0xAE, 0x9A, 0x88, 0x9A, 0x89, 0x9A, 0x8A, /* 0x70-0x73 */
+	0xBD, 0xAF, 0x9A, 0x8B, 0x9A, 0x8C, 0x9A, 0x8D, /* 0x74-0x77 */
+	0x9A, 0x8E, 0x9A, 0x8F, 0x9A, 0x90, 0x9A, 0x91, /* 0x78-0x7B */
+	0xBD, 0xB0, 0xBD, 0xB1, 0x9A, 0x92, 0xBD, 0xB2, /* 0x7C-0x7F */
+	
+	0x9A, 0x93, 0xBD, 0xB3, 0x9A, 0x94, 0x9A, 0x95, /* 0x80-0x83 */
+	0x9A, 0x96, 0x9A, 0x97, 0x9A, 0x98, 0x9A, 0x99, /* 0x84-0x87 */
+	0xBD, 0xB4, 0xBD, 0xB5, 0x9A, 0x9A, 0x9A, 0x9B, /* 0x88-0x8B */
+	0x9A, 0x9C, 0x9A, 0x9D, 0x9A, 0x9E, 0x9A, 0x9F, /* 0x8C-0x8F */
+	0xBD, 0xB6, 0x9A, 0xA0, 0x9A, 0xA1, 0x9A, 0xA2, /* 0x90-0x93 */
+	0x9A, 0xA3, 0x9A, 0xA4, 0x9A, 0xA5, 0x9A, 0xA6, /* 0x94-0x97 */
+	0xBD, 0xB7, 0x9A, 0xA7, 0x9A, 0xA8, 0xBD, 0xB8, /* 0x98-0x9B */
+	0x9A, 0xA9, 0xBD, 0xB9, 0x9A, 0xAA, 0x9A, 0xAB, /* 0x9C-0x9F */
+	0x9A, 0xAC, 0x9A, 0xAD, 0x9A, 0xAE, 0x9A, 0xAF, /* 0xA0-0xA3 */
+	0xBD, 0xBA, 0xBD, 0xBB, 0x9A, 0xB0, 0x9A, 0xB1, /* 0xA4-0xA7 */
+	0xBD, 0xBC, 0x9A, 0xB2, 0x9A, 0xB3, 0x9A, 0xB4, /* 0xA8-0xAB */
+	0xBD, 0xBD, 0xBD, 0xBE, 0x9A, 0xB5, 0x9A, 0xB6, /* 0xAC-0xAF */
+	0x9A, 0xB7, 0x9A, 0xB8, 0x9A, 0xB9, 0x9A, 0xBA, /* 0xB0-0xB3 */
+	0xBD, 0xBF, 0xBD, 0xC0, 0x9A, 0xBB, 0xBD, 0xC1, /* 0xB4-0xB7 */
+	0x9A, 0xBC, 0xBD, 0xC2, 0x9A, 0xBD, 0x9A, 0xBE, /* 0xB8-0xBB */
+	0x9A, 0xBF, 0x9A, 0xC0, 0x9A, 0xC1, 0x9A, 0xC2, /* 0xBC-0xBF */
+	0x9A, 0xC3, 0x9A, 0xC4, 0x9A, 0xC5, 0x9A, 0xC6, /* 0xC0-0xC3 */
+	0x9A, 0xC7, 0x9A, 0xC8, 0x9A, 0xC9, 0x9A, 0xCA, /* 0xC4-0xC7 */
+	0x9A, 0xCB, 0x9A, 0xCC, 0x9A, 0xCD, 0x9A, 0xCE, /* 0xC8-0xCB */
+	0x9A, 0xCF, 0x9A, 0xD0, 0x9A, 0xD1, 0x9A, 0xD2, /* 0xCC-0xCF */
+	0x9A, 0xD3, 0x9A, 0xD4, 0x9A, 0xD5, 0x9A, 0xD6, /* 0xD0-0xD3 */
+	0x9A, 0xD7, 0x9A, 0xD8, 0x9A, 0xD9, 0x9A, 0xDA, /* 0xD4-0xD7 */
+	0x9A, 0xDB, 0x9A, 0xDC, 0x9A, 0xDD, 0x9A, 0xDE, /* 0xD8-0xDB */
+	0xBD, 0xC3, 0xBD, 0xC4, 0x9A, 0xDF, 0x9A, 0xE0, /* 0xDC-0xDF */
+	0xBD, 0xC5, 0x9A, 0xE1, 0x9A, 0xE2, 0xBD, 0xC6, /* 0xE0-0xE3 */
+	0xBD, 0xC7, 0x9A, 0xE3, 0x9A, 0xE4, 0x9A, 0xE5, /* 0xE4-0xE7 */
+	0x9A, 0xE6, 0x9A, 0xE7, 0x9A, 0xE8, 0xBD, 0xC8, /* 0xE8-0xEB */
+	0xBD, 0xC9, 0xBD, 0xCA, 0x9A, 0xE9, 0xBD, 0xCB, /* 0xEC-0xEF */
+	0x9A, 0xEA, 0xBD, 0xCC, 0x9A, 0xEB, 0x9A, 0xEC, /* 0xF0-0xF3 */
+	0x9A, 0xED, 0x9A, 0xEE, 0xBD, 0xCD, 0x9A, 0xEF, /* 0xF4-0xF7 */
+	0xBD, 0xCE, 0xBD, 0xCF, 0x9A, 0xF0, 0xBD, 0xD0, /* 0xF8-0xFB */
+	0xBD, 0xD1, 0x9A, 0xF1, 0x9A, 0xF2, 0x9A, 0xF3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C3[512] = {
+	0xBD, 0xD2, 0x9A, 0xF4, 0x9A, 0xF5, 0x9A, 0xF6, /* 0x00-0x03 */
+	0x9A, 0xF7, 0x9A, 0xF8, 0x9A, 0xF9, 0x9A, 0xFA, /* 0x04-0x07 */
+	0xBD, 0xD3, 0xBD, 0xD4, 0x9A, 0xFB, 0x9A, 0xFC, /* 0x08-0x0B */
+	0xBD, 0xD5, 0xBD, 0xD6, 0x9A, 0xFD, 0x9A, 0xFE, /* 0x0C-0x0F */
+	0x9B, 0x41, 0x9B, 0x42, 0x9B, 0x43, 0xBD, 0xD7, /* 0x10-0x13 */
+	0xBD, 0xD8, 0xBD, 0xD9, 0x9B, 0x44, 0x9B, 0x45, /* 0x14-0x17 */
+	0xBD, 0xDA, 0x9B, 0x46, 0x9B, 0x47, 0x9B, 0x48, /* 0x18-0x1B */
+	0xBD, 0xDB, 0x9B, 0x49, 0x9B, 0x4A, 0x9B, 0x4B, /* 0x1C-0x1F */
+	0x9B, 0x4C, 0x9B, 0x4D, 0x9B, 0x4E, 0x9B, 0x4F, /* 0x20-0x23 */
+	0xBD, 0xDC, 0xBD, 0xDD, 0x9B, 0x50, 0x9B, 0x51, /* 0x24-0x27 */
+	0xBD, 0xDE, 0xBD, 0xDF, 0x9B, 0x52, 0x9B, 0x53, /* 0x28-0x2B */
+	0x9B, 0x54, 0x9B, 0x55, 0x9B, 0x56, 0x9B, 0x57, /* 0x2C-0x2F */
+	0x9B, 0x58, 0x9B, 0x59, 0x9B, 0x5A, 0x9B, 0x61, /* 0x30-0x33 */
+	0x9B, 0x62, 0x9B, 0x63, 0x9B, 0x64, 0x9B, 0x65, /* 0x34-0x37 */
+	0x9B, 0x66, 0x9B, 0x67, 0x9B, 0x68, 0x9B, 0x69, /* 0x38-0x3B */
+	0x9B, 0x6A, 0x9B, 0x6B, 0x9B, 0x6C, 0x9B, 0x6D, /* 0x3C-0x3F */
+	0x9B, 0x6E, 0x9B, 0x6F, 0x9B, 0x70, 0x9B, 0x71, /* 0x40-0x43 */
+	0x9B, 0x72, 0xBD, 0xE0, 0x9B, 0x73, 0x9B, 0x74, /* 0x44-0x47 */
+	0x9B, 0x75, 0x9B, 0x76, 0x9B, 0x77, 0x9B, 0x78, /* 0x48-0x4B */
+	0x9B, 0x79, 0x9B, 0x7A, 0x9B, 0x81, 0x9B, 0x82, /* 0x4C-0x4F */
+	0x9B, 0x83, 0x9B, 0x84, 0x9B, 0x85, 0x9B, 0x86, /* 0x50-0x53 */
+	0x9B, 0x87, 0x9B, 0x88, 0x9B, 0x89, 0x9B, 0x8A, /* 0x54-0x57 */
+	0x9B, 0x8B, 0x9B, 0x8C, 0x9B, 0x8D, 0x9B, 0x8E, /* 0x58-0x5B */
+	0x9B, 0x8F, 0x9B, 0x90, 0x9B, 0x91, 0x9B, 0x92, /* 0x5C-0x5F */
+	0x9B, 0x93, 0x9B, 0x94, 0x9B, 0x95, 0x9B, 0x96, /* 0x60-0x63 */
+	0x9B, 0x97, 0x9B, 0x98, 0x9B, 0x99, 0x9B, 0x9A, /* 0x64-0x67 */
+	0xBD, 0xE1, 0xBD, 0xE2, 0x9B, 0x9B, 0x9B, 0x9C, /* 0x68-0x6B */
+	0xBD, 0xE3, 0x9B, 0x9D, 0x9B, 0x9E, 0x9B, 0x9F, /* 0x6C-0x6F */
+	0xBD, 0xE4, 0x9B, 0xA0, 0xBD, 0xE5, 0x9B, 0xA1, /* 0x70-0x73 */
+	0x9B, 0xA2, 0x9B, 0xA3, 0x9B, 0xA4, 0x9B, 0xA5, /* 0x74-0x77 */
+	0xBD, 0xE6, 0xBD, 0xE7, 0x9B, 0xA6, 0x9B, 0xA7, /* 0x78-0x7B */
+	0xBD, 0xE8, 0xBD, 0xE9, 0x9B, 0xA8, 0x9B, 0xA9, /* 0x7C-0x7F */
+	
+	0x9B, 0xAA, 0x9B, 0xAB, 0x9B, 0xAC, 0x9B, 0xAD, /* 0x80-0x83 */
+	0xBD, 0xEA, 0x9B, 0xAE, 0x9B, 0xAF, 0x9B, 0xB0, /* 0x84-0x87 */
+	0xBD, 0xEB, 0x9B, 0xB1, 0x9B, 0xB2, 0x9B, 0xB3, /* 0x88-0x8B */
+	0xBD, 0xEC, 0x9B, 0xB4, 0x9B, 0xB5, 0x9B, 0xB6, /* 0x8C-0x8F */
+	0x9B, 0xB7, 0x9B, 0xB8, 0x9B, 0xB9, 0x9B, 0xBA, /* 0x90-0x93 */
+	0x9B, 0xBB, 0x9B, 0xBC, 0x9B, 0xBD, 0x9B, 0xBE, /* 0x94-0x97 */
+	0x9B, 0xBF, 0x9B, 0xC0, 0x9B, 0xC1, 0x9B, 0xC2, /* 0x98-0x9B */
+	0x9B, 0xC3, 0x9B, 0xC4, 0x9B, 0xC5, 0x9B, 0xC6, /* 0x9C-0x9F */
+	0x9B, 0xC7, 0x9B, 0xC8, 0x9B, 0xC9, 0x9B, 0xCA, /* 0xA0-0xA3 */
+	0x9B, 0xCB, 0x9B, 0xCC, 0x9B, 0xCD, 0x9B, 0xCE, /* 0xA4-0xA7 */
+	0x9B, 0xCF, 0x9B, 0xD0, 0x9B, 0xD1, 0x9B, 0xD2, /* 0xA8-0xAB */
+	0x9B, 0xD3, 0x9B, 0xD4, 0x9B, 0xD5, 0x9B, 0xD6, /* 0xAC-0xAF */
+	0x9B, 0xD7, 0x9B, 0xD8, 0x9B, 0xD9, 0x9B, 0xDA, /* 0xB0-0xB3 */
+	0x9B, 0xDB, 0x9B, 0xDC, 0x9B, 0xDD, 0x9B, 0xDE, /* 0xB4-0xB7 */
+	0x9B, 0xDF, 0x9B, 0xE0, 0x9B, 0xE1, 0x9B, 0xE2, /* 0xB8-0xBB */
+	0x9B, 0xE3, 0x9B, 0xE4, 0x9B, 0xE5, 0x9B, 0xE6, /* 0xBC-0xBF */
+	0xBD, 0xED, 0x9B, 0xE7, 0x9B, 0xE8, 0x9B, 0xE9, /* 0xC0-0xC3 */
+	0x9B, 0xEA, 0x9B, 0xEB, 0x9B, 0xEC, 0x9B, 0xED, /* 0xC4-0xC7 */
+	0x9B, 0xEE, 0x9B, 0xEF, 0x9B, 0xF0, 0x9B, 0xF1, /* 0xC8-0xCB */
+	0x9B, 0xF2, 0x9B, 0xF3, 0x9B, 0xF4, 0x9B, 0xF5, /* 0xCC-0xCF */
+	0x9B, 0xF6, 0x9B, 0xF7, 0x9B, 0xF8, 0x9B, 0xF9, /* 0xD0-0xD3 */
+	0x9B, 0xFA, 0x9B, 0xFB, 0x9B, 0xFC, 0x9B, 0xFD, /* 0xD4-0xD7 */
+	0xBD, 0xEE, 0xBD, 0xEF, 0x9B, 0xFE, 0x9C, 0x41, /* 0xD8-0xDB */
+	0xBD, 0xF0, 0x9C, 0x42, 0x9C, 0x43, 0xBD, 0xF1, /* 0xDC-0xDF */
+	0xBD, 0xF2, 0x9C, 0x44, 0xBD, 0xF3, 0x9C, 0x45, /* 0xE0-0xE3 */
+	0x9C, 0x46, 0x9C, 0x47, 0x9C, 0x48, 0x9C, 0x49, /* 0xE4-0xE7 */
+	0xBD, 0xF4, 0xBD, 0xF5, 0x9C, 0x4A, 0x9C, 0x4B, /* 0xE8-0xEB */
+	0x9C, 0x4C, 0xBD, 0xF6, 0x9C, 0x4D, 0x9C, 0x4E, /* 0xEC-0xEF */
+	0x9C, 0x4F, 0x9C, 0x50, 0x9C, 0x51, 0x9C, 0x52, /* 0xF0-0xF3 */
+	0xBD, 0xF7, 0xBD, 0xF8, 0x9C, 0x53, 0x9C, 0x54, /* 0xF4-0xF7 */
+	0xBD, 0xF9, 0x9C, 0x55, 0x9C, 0x56, 0x9C, 0x57, /* 0xF8-0xFB */
+	0x9C, 0x58, 0x9C, 0x59, 0x9C, 0x5A, 0x9C, 0x61, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C4[512] = {
+	0x9C, 0x62, 0x9C, 0x63, 0x9C, 0x64, 0x9C, 0x65, /* 0x00-0x03 */
+	0x9C, 0x66, 0x9C, 0x67, 0x9C, 0x68, 0x9C, 0x69, /* 0x04-0x07 */
+	0xBD, 0xFA, 0x9C, 0x6A, 0x9C, 0x6B, 0x9C, 0x6C, /* 0x08-0x0B */
+	0x9C, 0x6D, 0x9C, 0x6E, 0x9C, 0x6F, 0x9C, 0x70, /* 0x0C-0x0F */
+	0xBD, 0xFB, 0x9C, 0x71, 0x9C, 0x72, 0x9C, 0x73, /* 0x10-0x13 */
+	0x9C, 0x74, 0x9C, 0x75, 0x9C, 0x76, 0x9C, 0x77, /* 0x14-0x17 */
+	0x9C, 0x78, 0x9C, 0x79, 0x9C, 0x7A, 0x9C, 0x81, /* 0x18-0x1B */
+	0x9C, 0x82, 0x9C, 0x83, 0x9C, 0x84, 0x9C, 0x85, /* 0x1C-0x1F */
+	0x9C, 0x86, 0x9C, 0x87, 0x9C, 0x88, 0x9C, 0x89, /* 0x20-0x23 */
+	0xBD, 0xFC, 0x9C, 0x8A, 0x9C, 0x8B, 0x9C, 0x8C, /* 0x24-0x27 */
+	0x9C, 0x8D, 0x9C, 0x8E, 0x9C, 0x8F, 0x9C, 0x90, /* 0x28-0x2B */
+	0xBD, 0xFD, 0x9C, 0x91, 0x9C, 0x92, 0x9C, 0x93, /* 0x2C-0x2F */
+	0xBD, 0xFE, 0x9C, 0x94, 0x9C, 0x95, 0x9C, 0x96, /* 0x30-0x33 */
+	0xBE, 0xA1, 0x9C, 0x97, 0x9C, 0x98, 0x9C, 0x99, /* 0x34-0x37 */
+	0x9C, 0x9A, 0x9C, 0x9B, 0x9C, 0x9C, 0x9C, 0x9D, /* 0x38-0x3B */
+	0xBE, 0xA2, 0xBE, 0xA3, 0x9C, 0x9E, 0x9C, 0x9F, /* 0x3C-0x3F */
+	0x9C, 0xA0, 0x9C, 0xA1, 0x9C, 0xA2, 0x9C, 0xA3, /* 0x40-0x43 */
+	0x9C, 0xA4, 0x9C, 0xA5, 0x9C, 0xA6, 0x9C, 0xA7, /* 0x44-0x47 */
+	0xBE, 0xA4, 0x9C, 0xA8, 0x9C, 0xA9, 0x9C, 0xAA, /* 0x48-0x4B */
+	0x9C, 0xAB, 0x9C, 0xAC, 0x9C, 0xAD, 0x9C, 0xAE, /* 0x4C-0x4F */
+	0x9C, 0xAF, 0x9C, 0xB0, 0x9C, 0xB1, 0x9C, 0xB2, /* 0x50-0x53 */
+	0x9C, 0xB3, 0x9C, 0xB4, 0x9C, 0xB5, 0x9C, 0xB6, /* 0x54-0x57 */
+	0x9C, 0xB7, 0x9C, 0xB8, 0x9C, 0xB9, 0x9C, 0xBA, /* 0x58-0x5B */
+	0x9C, 0xBB, 0x9C, 0xBC, 0x9C, 0xBD, 0x9C, 0xBE, /* 0x5C-0x5F */
+	0x9C, 0xBF, 0x9C, 0xC0, 0x9C, 0xC1, 0x9C, 0xC2, /* 0x60-0x63 */
+	0xBE, 0xA5, 0xBE, 0xA6, 0x9C, 0xC3, 0x9C, 0xC4, /* 0x64-0x67 */
+	0xBE, 0xA7, 0x9C, 0xC5, 0x9C, 0xC6, 0x9C, 0xC7, /* 0x68-0x6B */
+	0xBE, 0xA8, 0x9C, 0xC8, 0x9C, 0xC9, 0x9C, 0xCA, /* 0x6C-0x6F */
+	0x9C, 0xCB, 0x9C, 0xCC, 0x9C, 0xCD, 0x9C, 0xCE, /* 0x70-0x73 */
+	0xBE, 0xA9, 0xBE, 0xAA, 0x9C, 0xCF, 0x9C, 0xD0, /* 0x74-0x77 */
+	0x9C, 0xD1, 0xBE, 0xAB, 0x9C, 0xD2, 0x9C, 0xD3, /* 0x78-0x7B */
+	0x9C, 0xD4, 0x9C, 0xD5, 0x9C, 0xD6, 0x9C, 0xD7, /* 0x7C-0x7F */
+	
+	0xBE, 0xAC, 0x9C, 0xD8, 0x9C, 0xD9, 0x9C, 0xDA, /* 0x80-0x83 */
+	0x9C, 0xDB, 0x9C, 0xDC, 0x9C, 0xDD, 0x9C, 0xDE, /* 0x84-0x87 */
+	0x9C, 0xDF, 0x9C, 0xE0, 0x9C, 0xE1, 0x9C, 0xE2, /* 0x88-0x8B */
+	0x9C, 0xE3, 0x9C, 0xE4, 0x9C, 0xE5, 0x9C, 0xE6, /* 0x8C-0x8F */
+	0x9C, 0xE7, 0x9C, 0xE8, 0x9C, 0xE9, 0x9C, 0xEA, /* 0x90-0x93 */
+	0xBE, 0xAD, 0x9C, 0xEB, 0x9C, 0xEC, 0x9C, 0xED, /* 0x94-0x97 */
+	0x9C, 0xEE, 0x9C, 0xEF, 0x9C, 0xF0, 0x9C, 0xF1, /* 0x98-0x9B */
+	0xBE, 0xAE, 0x9C, 0xF2, 0x9C, 0xF3, 0x9C, 0xF4, /* 0x9C-0x9F */
+	0x9C, 0xF5, 0x9C, 0xF6, 0x9C, 0xF7, 0x9C, 0xF8, /* 0xA0-0xA3 */
+	0x9C, 0xF9, 0x9C, 0xFA, 0x9C, 0xFB, 0x9C, 0xFC, /* 0xA4-0xA7 */
+	0x9C, 0xFD, 0x9C, 0xFE, 0x9D, 0x41, 0x9D, 0x42, /* 0xA8-0xAB */
+	0x9D, 0x43, 0x9D, 0x44, 0x9D, 0x45, 0x9D, 0x46, /* 0xAC-0xAF */
+	0x9D, 0x47, 0x9D, 0x48, 0x9D, 0x49, 0x9D, 0x4A, /* 0xB0-0xB3 */
+	0x9D, 0x4B, 0x9D, 0x4C, 0x9D, 0x4D, 0x9D, 0x4E, /* 0xB4-0xB7 */
+	0xBE, 0xAF, 0x9D, 0x4F, 0x9D, 0x50, 0x9D, 0x51, /* 0xB8-0xBB */
+	0xBE, 0xB0, 0x9D, 0x52, 0x9D, 0x53, 0x9D, 0x54, /* 0xBC-0xBF */
+	0x9D, 0x55, 0x9D, 0x56, 0x9D, 0x57, 0x9D, 0x58, /* 0xC0-0xC3 */
+	0x9D, 0x59, 0x9D, 0x5A, 0x9D, 0x61, 0x9D, 0x62, /* 0xC4-0xC7 */
+	0x9D, 0x63, 0x9D, 0x64, 0x9D, 0x65, 0x9D, 0x66, /* 0xC8-0xCB */
+	0x9D, 0x67, 0x9D, 0x68, 0x9D, 0x69, 0x9D, 0x6A, /* 0xCC-0xCF */
+	0x9D, 0x6B, 0x9D, 0x6C, 0x9D, 0x6D, 0x9D, 0x6E, /* 0xD0-0xD3 */
+	0x9D, 0x6F, 0x9D, 0x70, 0x9D, 0x71, 0x9D, 0x72, /* 0xD4-0xD7 */
+	0x9D, 0x73, 0x9D, 0x74, 0x9D, 0x75, 0x9D, 0x76, /* 0xD8-0xDB */
+	0x9D, 0x77, 0x9D, 0x78, 0x9D, 0x79, 0x9D, 0x7A, /* 0xDC-0xDF */
+	0x9D, 0x81, 0x9D, 0x82, 0x9D, 0x83, 0x9D, 0x84, /* 0xE0-0xE3 */
+	0x9D, 0x85, 0x9D, 0x86, 0x9D, 0x87, 0x9D, 0x88, /* 0xE4-0xE7 */
+	0x9D, 0x89, 0xBE, 0xB1, 0x9D, 0x8A, 0x9D, 0x8B, /* 0xE8-0xEB */
+	0x9D, 0x8C, 0x9D, 0x8D, 0x9D, 0x8E, 0x9D, 0x8F, /* 0xEC-0xEF */
+	0xBE, 0xB2, 0xBE, 0xB3, 0x9D, 0x90, 0x9D, 0x91, /* 0xF0-0xF3 */
+	0xBE, 0xB4, 0x9D, 0x92, 0x9D, 0x93, 0x9D, 0x94, /* 0xF4-0xF7 */
+	0xBE, 0xB5, 0x9D, 0x95, 0xBE, 0xB6, 0x9D, 0x96, /* 0xF8-0xFB */
+	0x9D, 0x97, 0x9D, 0x98, 0x9D, 0x99, 0xBE, 0xB7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C5[512] = {
+	0xBE, 0xB8, 0xBE, 0xB9, 0x9D, 0x9A, 0x9D, 0x9B, /* 0x00-0x03 */
+	0x9D, 0x9C, 0x9D, 0x9D, 0x9D, 0x9E, 0x9D, 0x9F, /* 0x04-0x07 */
+	0x9D, 0xA0, 0x9D, 0xA1, 0x9D, 0xA2, 0x9D, 0xA3, /* 0x08-0x0B */
+	0xBE, 0xBA, 0x9D, 0xA4, 0x9D, 0xA5, 0x9D, 0xA6, /* 0x0C-0x0F */
+	0xBE, 0xBB, 0x9D, 0xA7, 0x9D, 0xA8, 0x9D, 0xA9, /* 0x10-0x13 */
+	0xBE, 0xBC, 0x9D, 0xAA, 0x9D, 0xAB, 0x9D, 0xAC, /* 0x14-0x17 */
+	0x9D, 0xAD, 0x9D, 0xAE, 0x9D, 0xAF, 0x9D, 0xB0, /* 0x18-0x1B */
+	0xBE, 0xBD, 0x9D, 0xB1, 0x9D, 0xB2, 0x9D, 0xB3, /* 0x1C-0x1F */
+	0x9D, 0xB4, 0x9D, 0xB5, 0x9D, 0xB6, 0x9D, 0xB7, /* 0x20-0x23 */
+	0x9D, 0xB8, 0x9D, 0xB9, 0x9D, 0xBA, 0x9D, 0xBB, /* 0x24-0x27 */
+	0xBE, 0xBE, 0xBE, 0xBF, 0x9D, 0xBC, 0x9D, 0xBD, /* 0x28-0x2B */
+	0xBE, 0xC0, 0x9D, 0xBE, 0x9D, 0xBF, 0x9D, 0xC0, /* 0x2C-0x2F */
+	0xBE, 0xC1, 0x9D, 0xC1, 0x9D, 0xC2, 0x9D, 0xC3, /* 0x30-0x33 */
+	0x9D, 0xC4, 0x9D, 0xC5, 0x9D, 0xC6, 0x9D, 0xC7, /* 0x34-0x37 */
+	0xBE, 0xC2, 0xBE, 0xC3, 0x9D, 0xC8, 0xBE, 0xC4, /* 0x38-0x3B */
+	0x9D, 0xC9, 0xBE, 0xC5, 0x9D, 0xCA, 0x9D, 0xCB, /* 0x3C-0x3F */
+	0x9D, 0xCC, 0x9D, 0xCD, 0x9D, 0xCE, 0x9D, 0xCF, /* 0x40-0x43 */
+	0xBE, 0xC6, 0xBE, 0xC7, 0x9D, 0xD0, 0x9D, 0xD1, /* 0x44-0x47 */
+	0xBE, 0xC8, 0xBE, 0xC9, 0xBE, 0xCA, 0x9D, 0xD2, /* 0x48-0x4B */
+	0xBE, 0xCB, 0xBE, 0xCC, 0xBE, 0xCD, 0x9D, 0xD3, /* 0x4C-0x4F */
+	0x9D, 0xD4, 0x9D, 0xD5, 0x9D, 0xD6, 0xBE, 0xCE, /* 0x50-0x53 */
+	0xBE, 0xCF, 0xBE, 0xD0, 0x9D, 0xD7, 0xBE, 0xD1, /* 0x54-0x57 */
+	0xBE, 0xD2, 0xBE, 0xD3, 0x9D, 0xD8, 0x9D, 0xD9, /* 0x58-0x5B */
+	0x9D, 0xDA, 0xBE, 0xD4, 0xBE, 0xD5, 0x9D, 0xDB, /* 0x5C-0x5F */
+	0xBE, 0xD6, 0xBE, 0xD7, 0x9D, 0xDC, 0x9D, 0xDD, /* 0x60-0x63 */
+	0xBE, 0xD8, 0x9D, 0xDE, 0x9D, 0xDF, 0x9D, 0xE0, /* 0x64-0x67 */
+	0xBE, 0xD9, 0x9D, 0xE1, 0x9D, 0xE2, 0x9D, 0xE3, /* 0x68-0x6B */
+	0x9D, 0xE4, 0x9D, 0xE5, 0x9D, 0xE6, 0x9D, 0xE7, /* 0x6C-0x6F */
+	0xBE, 0xDA, 0xBE, 0xDB, 0x9D, 0xE8, 0xBE, 0xDC, /* 0x70-0x73 */
+	0xBE, 0xDD, 0xBE, 0xDE, 0x9D, 0xE9, 0x9D, 0xEA, /* 0x74-0x77 */
+	0x9D, 0xEB, 0x9D, 0xEC, 0x9D, 0xED, 0x9D, 0xEE, /* 0x78-0x7B */
+	0xBE, 0xDF, 0xBE, 0xE0, 0x9D, 0xEF, 0x9D, 0xF0, /* 0x7C-0x7F */
+	
+	0xBE, 0xE1, 0x9D, 0xF1, 0x9D, 0xF2, 0x9D, 0xF3, /* 0x80-0x83 */
+	0xBE, 0xE2, 0x9D, 0xF4, 0x9D, 0xF5, 0xBE, 0xE3, /* 0x84-0x87 */
+	0x9D, 0xF6, 0x9D, 0xF7, 0x9D, 0xF8, 0x9D, 0xF9, /* 0x88-0x8B */
+	0xBE, 0xE4, 0xBE, 0xE5, 0x9D, 0xFA, 0xBE, 0xE6, /* 0x8C-0x8F */
+	0x9D, 0xFB, 0xBE, 0xE7, 0x9D, 0xFC, 0x9D, 0xFD, /* 0x90-0x93 */
+	0x9D, 0xFE, 0xBE, 0xE8, 0x9E, 0x41, 0xBE, 0xE9, /* 0x94-0x97 */
+	0xBE, 0xEA, 0x9E, 0x42, 0x9E, 0x43, 0x9E, 0x44, /* 0x98-0x9B */
+	0xBE, 0xEB, 0x9E, 0x45, 0x9E, 0x46, 0x9E, 0x47, /* 0x9C-0x9F */
+	0xBE, 0xEC, 0x9E, 0x48, 0x9E, 0x49, 0x9E, 0x4A, /* 0xA0-0xA3 */
+	0x9E, 0x4B, 0x9E, 0x4C, 0x9E, 0x4D, 0x9E, 0x4E, /* 0xA4-0xA7 */
+	0x9E, 0x4F, 0xBE, 0xED, 0x9E, 0x50, 0x9E, 0x51, /* 0xA8-0xAB */
+	0x9E, 0x52, 0x9E, 0x53, 0x9E, 0x54, 0x9E, 0x55, /* 0xAC-0xAF */
+	0x9E, 0x56, 0x9E, 0x57, 0x9E, 0x58, 0x9E, 0x59, /* 0xB0-0xB3 */
+	0xBE, 0xEE, 0xBE, 0xEF, 0x9E, 0x5A, 0x9E, 0x61, /* 0xB4-0xB7 */
+	0xBE, 0xF0, 0xBE, 0xF1, 0x9E, 0x62, 0xBE, 0xF2, /* 0xB8-0xBB */
+	0xBE, 0xF3, 0xBE, 0xF4, 0xBE, 0xF5, 0x9E, 0x63, /* 0xBC-0xBF */
+	0x9E, 0x64, 0x9E, 0x65, 0x9E, 0x66, 0x9E, 0x67, /* 0xC0-0xC3 */
+	0xBE, 0xF6, 0xBE, 0xF7, 0xBE, 0xF8, 0xBE, 0xF9, /* 0xC4-0xC7 */
+	0xBE, 0xFA, 0xBE, 0xFB, 0xBE, 0xFC, 0x9E, 0x68, /* 0xC8-0xCB */
+	0xBE, 0xFD, 0x9E, 0x69, 0xBE, 0xFE, 0x9E, 0x6A, /* 0xCC-0xCF */
+	0xBF, 0xA1, 0xBF, 0xA2, 0x9E, 0x6B, 0x9E, 0x6C, /* 0xD0-0xD3 */
+	0xBF, 0xA3, 0x9E, 0x6D, 0x9E, 0x6E, 0x9E, 0x6F, /* 0xD4-0xD7 */
+	0xBF, 0xA4, 0x9E, 0x70, 0x9E, 0x71, 0x9E, 0x72, /* 0xD8-0xDB */
+	0x9E, 0x73, 0x9E, 0x74, 0x9E, 0x75, 0x9E, 0x76, /* 0xDC-0xDF */
+	0xBF, 0xA5, 0xBF, 0xA6, 0x9E, 0x77, 0xBF, 0xA7, /* 0xE0-0xE3 */
+	0x9E, 0x78, 0xBF, 0xA8, 0x9E, 0x79, 0x9E, 0x7A, /* 0xE4-0xE7 */
+	0x9E, 0x81, 0x9E, 0x82, 0x9E, 0x83, 0x9E, 0x84, /* 0xE8-0xEB */
+	0xBF, 0xA9, 0xBF, 0xAA, 0xBF, 0xAB, 0x9E, 0x85, /* 0xEC-0xEF */
+	0xBF, 0xAC, 0x9E, 0x86, 0x9E, 0x87, 0x9E, 0x88, /* 0xF0-0xF3 */
+	0xBF, 0xAD, 0x9E, 0x89, 0xBF, 0xAE, 0xBF, 0xAF, /* 0xF4-0xF7 */
+	0x9E, 0x8A, 0x9E, 0x8B, 0x9E, 0x8C, 0x9E, 0x8D, /* 0xF8-0xFB */
+	0xBF, 0xB0, 0xBF, 0xB1, 0xBF, 0xB2, 0xBF, 0xB3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C6[512] = {
+	0xBF, 0xB4, 0xBF, 0xB5, 0x9E, 0x8E, 0x9E, 0x8F, /* 0x00-0x03 */
+	0x9E, 0x90, 0xBF, 0xB6, 0xBF, 0xB7, 0xBF, 0xB8, /* 0x04-0x07 */
+	0xBF, 0xB9, 0x9E, 0x91, 0x9E, 0x92, 0x9E, 0x93, /* 0x08-0x0B */
+	0xBF, 0xBA, 0x9E, 0x94, 0x9E, 0x95, 0x9E, 0x96, /* 0x0C-0x0F */
+	0xBF, 0xBB, 0x9E, 0x97, 0x9E, 0x98, 0x9E, 0x99, /* 0x10-0x13 */
+	0x9E, 0x9A, 0x9E, 0x9B, 0x9E, 0x9C, 0x9E, 0x9D, /* 0x14-0x17 */
+	0xBF, 0xBC, 0xBF, 0xBD, 0x9E, 0x9E, 0xBF, 0xBE, /* 0x18-0x1B */
+	0xBF, 0xBF, 0x9E, 0x9F, 0x9E, 0xA0, 0x9E, 0xA1, /* 0x1C-0x1F */
+	0x9E, 0xA2, 0x9E, 0xA3, 0x9E, 0xA4, 0x9E, 0xA5, /* 0x20-0x23 */
+	0xBF, 0xC0, 0xBF, 0xC1, 0x9E, 0xA6, 0x9E, 0xA7, /* 0x24-0x27 */
+	0xBF, 0xC2, 0x9E, 0xA8, 0x9E, 0xA9, 0x9E, 0xAA, /* 0x28-0x2B */
+	0xBF, 0xC3, 0xBF, 0xC4, 0xBF, 0xC5, 0x9E, 0xAB, /* 0x2C-0x2F */
+	0xBF, 0xC6, 0x9E, 0xAC, 0x9E, 0xAD, 0xBF, 0xC7, /* 0x30-0x33 */
+	0xBF, 0xC8, 0xBF, 0xC9, 0x9E, 0xAE, 0xBF, 0xCA, /* 0x34-0x37 */
+	0x9E, 0xAF, 0xBF, 0xCB, 0x9E, 0xB0, 0xBF, 0xCC, /* 0x38-0x3B */
+	0x9E, 0xB1, 0x9E, 0xB2, 0x9E, 0xB3, 0x9E, 0xB4, /* 0x3C-0x3F */
+	0xBF, 0xCD, 0xBF, 0xCE, 0x9E, 0xB5, 0x9E, 0xB6, /* 0x40-0x43 */
+	0xBF, 0xCF, 0x9E, 0xB7, 0x9E, 0xB8, 0x9E, 0xB9, /* 0x44-0x47 */
+	0xBF, 0xD0, 0x9E, 0xBA, 0x9E, 0xBB, 0x9E, 0xBC, /* 0x48-0x4B */
+	0x9E, 0xBD, 0x9E, 0xBE, 0x9E, 0xBF, 0x9E, 0xC0, /* 0x4C-0x4F */
+	0xBF, 0xD1, 0xBF, 0xD2, 0x9E, 0xC1, 0xBF, 0xD3, /* 0x50-0x53 */
+	0xBF, 0xD4, 0xBF, 0xD5, 0x9E, 0xC2, 0x9E, 0xC3, /* 0x54-0x57 */
+	0x9E, 0xC4, 0x9E, 0xC5, 0x9E, 0xC6, 0x9E, 0xC7, /* 0x58-0x5B */
+	0xBF, 0xD6, 0xBF, 0xD7, 0x9E, 0xC8, 0x9E, 0xC9, /* 0x5C-0x5F */
+	0xBF, 0xD8, 0x9E, 0xCA, 0x9E, 0xCB, 0x9E, 0xCC, /* 0x60-0x63 */
+	0x9E, 0xCD, 0x9E, 0xCE, 0x9E, 0xCF, 0x9E, 0xD0, /* 0x64-0x67 */
+	0x9E, 0xD1, 0x9E, 0xD2, 0x9E, 0xD3, 0x9E, 0xD4, /* 0x68-0x6B */
+	0xBF, 0xD9, 0x9E, 0xD5, 0x9E, 0xD6, 0xBF, 0xDA, /* 0x6C-0x6F */
+	0x9E, 0xD7, 0xBF, 0xDB, 0x9E, 0xD8, 0x9E, 0xD9, /* 0x70-0x73 */
+	0x9E, 0xDA, 0x9E, 0xDB, 0x9E, 0xDC, 0x9E, 0xDD, /* 0x74-0x77 */
+	0xBF, 0xDC, 0xBF, 0xDD, 0x9E, 0xDE, 0x9E, 0xDF, /* 0x78-0x7B */
+	0xBF, 0xDE, 0x9E, 0xE0, 0x9E, 0xE1, 0x9E, 0xE2, /* 0x7C-0x7F */
+	
+	0xBF, 0xDF, 0x9E, 0xE3, 0x9E, 0xE4, 0x9E, 0xE5, /* 0x80-0x83 */
+	0x9E, 0xE6, 0x9E, 0xE7, 0x9E, 0xE8, 0x9E, 0xE9, /* 0x84-0x87 */
+	0xBF, 0xE0, 0xBF, 0xE1, 0x9E, 0xEA, 0xBF, 0xE2, /* 0x88-0x8B */
+	0x9E, 0xEB, 0xBF, 0xE3, 0x9E, 0xEC, 0x9E, 0xED, /* 0x8C-0x8F */
+	0x9E, 0xEE, 0x9E, 0xEF, 0x9E, 0xF0, 0x9E, 0xF1, /* 0x90-0x93 */
+	0xBF, 0xE4, 0xBF, 0xE5, 0x9E, 0xF2, 0x9E, 0xF3, /* 0x94-0x97 */
+	0xBF, 0xE6, 0x9E, 0xF4, 0x9E, 0xF5, 0x9E, 0xF6, /* 0x98-0x9B */
+	0xBF, 0xE7, 0x9E, 0xF7, 0x9E, 0xF8, 0x9E, 0xF9, /* 0x9C-0x9F */
+	0x9E, 0xFA, 0x9E, 0xFB, 0x9E, 0xFC, 0x9E, 0xFD, /* 0xA0-0xA3 */
+	0xBF, 0xE8, 0xBF, 0xE9, 0x9E, 0xFE, 0xBF, 0xEA, /* 0xA4-0xA7 */
+	0x9F, 0x41, 0xBF, 0xEB, 0x9F, 0x42, 0x9F, 0x43, /* 0xA8-0xAB */
+	0x9F, 0x44, 0x9F, 0x45, 0x9F, 0x46, 0x9F, 0x47, /* 0xAC-0xAF */
+	0xBF, 0xEC, 0xBF, 0xED, 0x9F, 0x48, 0x9F, 0x49, /* 0xB0-0xB3 */
+	0xBF, 0xEE, 0x9F, 0x4A, 0x9F, 0x4B, 0x9F, 0x4C, /* 0xB4-0xB7 */
+	0xBF, 0xEF, 0xBF, 0xF0, 0xBF, 0xF1, 0x9F, 0x4D, /* 0xB8-0xBB */
+	0x9F, 0x4E, 0x9F, 0x4F, 0x9F, 0x50, 0x9F, 0x51, /* 0xBC-0xBF */
+	0xBF, 0xF2, 0xBF, 0xF3, 0x9F, 0x52, 0xBF, 0xF4, /* 0xC0-0xC3 */
+	0x9F, 0x53, 0xBF, 0xF5, 0x9F, 0x54, 0x9F, 0x55, /* 0xC4-0xC7 */
+	0x9F, 0x56, 0x9F, 0x57, 0x9F, 0x58, 0x9F, 0x59, /* 0xC8-0xCB */
+	0xBF, 0xF6, 0xBF, 0xF7, 0x9F, 0x5A, 0x9F, 0x61, /* 0xCC-0xCF */
+	0xBF, 0xF8, 0x9F, 0x62, 0x9F, 0x63, 0x9F, 0x64, /* 0xD0-0xD3 */
+	0xBF, 0xF9, 0x9F, 0x65, 0x9F, 0x66, 0x9F, 0x67, /* 0xD4-0xD7 */
+	0x9F, 0x68, 0x9F, 0x69, 0x9F, 0x6A, 0x9F, 0x6B, /* 0xD8-0xDB */
+	0xBF, 0xFA, 0xBF, 0xFB, 0x9F, 0x6C, 0x9F, 0x6D, /* 0xDC-0xDF */
+	0xBF, 0xFC, 0xBF, 0xFD, 0x9F, 0x6E, 0x9F, 0x6F, /* 0xE0-0xE3 */
+	0x9F, 0x70, 0x9F, 0x71, 0x9F, 0x72, 0x9F, 0x73, /* 0xE4-0xE7 */
+	0xBF, 0xFE, 0xC0, 0xA1, 0x9F, 0x74, 0x9F, 0x75, /* 0xE8-0xEB */
+	0xC0, 0xA2, 0x9F, 0x76, 0x9F, 0x77, 0x9F, 0x78, /* 0xEC-0xEF */
+	0xC0, 0xA3, 0x9F, 0x79, 0x9F, 0x7A, 0x9F, 0x81, /* 0xF0-0xF3 */
+	0x9F, 0x82, 0x9F, 0x83, 0x9F, 0x84, 0x9F, 0x85, /* 0xF4-0xF7 */
+	0xC0, 0xA4, 0xC0, 0xA5, 0x9F, 0x86, 0x9F, 0x87, /* 0xF8-0xFB */
+	0x9F, 0x88, 0xC0, 0xA6, 0x9F, 0x89, 0x9F, 0x8A, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C7[512] = {
+	0x9F, 0x8B, 0x9F, 0x8C, 0x9F, 0x8D, 0x9F, 0x8E, /* 0x00-0x03 */
+	0xC0, 0xA7, 0xC0, 0xA8, 0x9F, 0x8F, 0x9F, 0x90, /* 0x04-0x07 */
+	0xC0, 0xA9, 0x9F, 0x91, 0x9F, 0x92, 0x9F, 0x93, /* 0x08-0x0B */
+	0xC0, 0xAA, 0x9F, 0x94, 0x9F, 0x95, 0x9F, 0x96, /* 0x0C-0x0F */
+	0x9F, 0x97, 0x9F, 0x98, 0x9F, 0x99, 0x9F, 0x9A, /* 0x10-0x13 */
+	0xC0, 0xAB, 0xC0, 0xAC, 0x9F, 0x9B, 0xC0, 0xAD, /* 0x14-0x17 */
+	0x9F, 0x9C, 0xC0, 0xAE, 0x9F, 0x9D, 0x9F, 0x9E, /* 0x18-0x1B */
+	0x9F, 0x9F, 0x9F, 0xA0, 0x9F, 0xA1, 0x9F, 0xA2, /* 0x1C-0x1F */
+	0xC0, 0xAF, 0xC0, 0xB0, 0x9F, 0xA3, 0x9F, 0xA4, /* 0x20-0x23 */
+	0xC0, 0xB1, 0x9F, 0xA5, 0x9F, 0xA6, 0x9F, 0xA7, /* 0x24-0x27 */
+	0xC0, 0xB2, 0x9F, 0xA8, 0x9F, 0xA9, 0x9F, 0xAA, /* 0x28-0x2B */
+	0x9F, 0xAB, 0x9F, 0xAC, 0x9F, 0xAD, 0x9F, 0xAE, /* 0x2C-0x2F */
+	0xC0, 0xB3, 0xC0, 0xB4, 0x9F, 0xAF, 0xC0, 0xB5, /* 0x30-0x33 */
+	0x9F, 0xB0, 0xC0, 0xB6, 0x9F, 0xB1, 0xC0, 0xB7, /* 0x34-0x37 */
+	0x9F, 0xB2, 0x9F, 0xB3, 0x9F, 0xB4, 0x9F, 0xB5, /* 0x38-0x3B */
+	0xC0, 0xB8, 0xC0, 0xB9, 0x9F, 0xB6, 0x9F, 0xB7, /* 0x3C-0x3F */
+	0xC0, 0xBA, 0x9F, 0xB8, 0x9F, 0xB9, 0x9F, 0xBA, /* 0x40-0x43 */
+	0xC0, 0xBB, 0x9F, 0xBB, 0x9F, 0xBC, 0x9F, 0xBD, /* 0x44-0x47 */
+	0x9F, 0xBE, 0x9F, 0xBF, 0xC0, 0xBC, 0x9F, 0xC0, /* 0x48-0x4B */
+	0xC0, 0xBD, 0xC0, 0xBE, 0x9F, 0xC1, 0xC0, 0xBF, /* 0x4C-0x4F */
+	0x9F, 0xC2, 0xC0, 0xC0, 0xC0, 0xC1, 0xC0, 0xC2, /* 0x50-0x53 */
+	0xC0, 0xC3, 0xC0, 0xC4, 0xC0, 0xC5, 0xC0, 0xC6, /* 0x54-0x57 */
+	0xC0, 0xC7, 0x9F, 0xC3, 0x9F, 0xC4, 0x9F, 0xC5, /* 0x58-0x5B */
+	0xC0, 0xC8, 0x9F, 0xC6, 0x9F, 0xC7, 0x9F, 0xC8, /* 0x5C-0x5F */
+	0xC0, 0xC9, 0x9F, 0xC9, 0x9F, 0xCA, 0x9F, 0xCB, /* 0x60-0x63 */
+	0x9F, 0xCC, 0x9F, 0xCD, 0x9F, 0xCE, 0x9F, 0xCF, /* 0x64-0x67 */
+	0xC0, 0xCA, 0x9F, 0xD0, 0x9F, 0xD1, 0xC0, 0xCB, /* 0x68-0x6B */
+	0x9F, 0xD2, 0x9F, 0xD3, 0x9F, 0xD4, 0x9F, 0xD5, /* 0x6C-0x6F */
+	0x9F, 0xD6, 0x9F, 0xD7, 0x9F, 0xD8, 0x9F, 0xD9, /* 0x70-0x73 */
+	0xC0, 0xCC, 0xC0, 0xCD, 0x9F, 0xDA, 0x9F, 0xDB, /* 0x74-0x77 */
+	0xC0, 0xCE, 0x9F, 0xDC, 0x9F, 0xDD, 0x9F, 0xDE, /* 0x78-0x7B */
+	0xC0, 0xCF, 0xC0, 0xD0, 0xC0, 0xD1, 0x9F, 0xDF, /* 0x7C-0x7F */
+	
+	0x9F, 0xE0, 0x9F, 0xE1, 0x9F, 0xE2, 0xC0, 0xD2, /* 0x80-0x83 */
+	0xC0, 0xD3, 0xC0, 0xD4, 0x9F, 0xE3, 0xC0, 0xD5, /* 0x84-0x87 */
+	0xC0, 0xD6, 0xC0, 0xD7, 0xC0, 0xD8, 0x9F, 0xE4, /* 0x88-0x8B */
+	0x9F, 0xE5, 0x9F, 0xE6, 0xC0, 0xD9, 0x9F, 0xE7, /* 0x8C-0x8F */
+	0xC0, 0xDA, 0xC0, 0xDB, 0x9F, 0xE8, 0x9F, 0xE9, /* 0x90-0x93 */
+	0xC0, 0xDC, 0x9F, 0xEA, 0xC0, 0xDD, 0xC0, 0xDE, /* 0x94-0x97 */
+	0xC0, 0xDF, 0x9F, 0xEB, 0xC0, 0xE0, 0x9F, 0xEC, /* 0x98-0x9B */
+	0x9F, 0xED, 0x9F, 0xEE, 0x9F, 0xEF, 0x9F, 0xF0, /* 0x9C-0x9F */
+	0xC0, 0xE1, 0xC0, 0xE2, 0x9F, 0xF1, 0xC0, 0xE3, /* 0xA0-0xA3 */
+	0xC0, 0xE4, 0xC0, 0xE5, 0xC0, 0xE6, 0x9F, 0xF2, /* 0xA4-0xA7 */
+	0x9F, 0xF3, 0x9F, 0xF4, 0x9F, 0xF5, 0x9F, 0xF6, /* 0xA8-0xAB */
+	0xC0, 0xE7, 0xC0, 0xE8, 0x9F, 0xF7, 0x9F, 0xF8, /* 0xAC-0xAF */
+	0xC0, 0xE9, 0x9F, 0xF9, 0x9F, 0xFA, 0x9F, 0xFB, /* 0xB0-0xB3 */
+	0xC0, 0xEA, 0x9F, 0xFC, 0x9F, 0xFD, 0x9F, 0xFE, /* 0xB4-0xB7 */
+	0xA0, 0x41, 0xA0, 0x42, 0xA0, 0x43, 0xA0, 0x44, /* 0xB8-0xBB */
+	0xC0, 0xEB, 0xC0, 0xEC, 0xA0, 0x45, 0xC0, 0xED, /* 0xBC-0xBF */
+	0xC0, 0xEE, 0xC0, 0xEF, 0xA0, 0x46, 0xA0, 0x47, /* 0xC0-0xC3 */
+	0xA0, 0x48, 0xA0, 0x49, 0xA0, 0x4A, 0xA0, 0x4B, /* 0xC4-0xC7 */
+	0xC0, 0xF0, 0xC0, 0xF1, 0xA0, 0x4C, 0xA0, 0x4D, /* 0xC8-0xCB */
+	0xC0, 0xF2, 0xA0, 0x4E, 0xC0, 0xF3, 0xA0, 0x4F, /* 0xCC-0xCF */
+	0xC0, 0xF4, 0xA0, 0x50, 0xA0, 0x51, 0xA0, 0x52, /* 0xD0-0xD3 */
+	0xA0, 0x53, 0xA0, 0x54, 0xA0, 0x55, 0xA0, 0x56, /* 0xD4-0xD7 */
+	0xC0, 0xF5, 0xA0, 0x57, 0xA0, 0x58, 0xA0, 0x59, /* 0xD8-0xDB */
+	0xA0, 0x5A, 0xC0, 0xF6, 0xA0, 0x61, 0xA0, 0x62, /* 0xDC-0xDF */
+	0xA0, 0x63, 0xA0, 0x64, 0xA0, 0x65, 0xA0, 0x66, /* 0xE0-0xE3 */
+	0xC0, 0xF7, 0xA0, 0x67, 0xA0, 0x68, 0xA0, 0x69, /* 0xE4-0xE7 */
+	0xC0, 0xF8, 0xA0, 0x6A, 0xA0, 0x6B, 0xA0, 0x6C, /* 0xE8-0xEB */
+	0xC0, 0xF9, 0xA0, 0x6D, 0xA0, 0x6E, 0xA0, 0x6F, /* 0xEC-0xEF */
+	0xA0, 0x70, 0xA0, 0x71, 0xA0, 0x72, 0xA0, 0x73, /* 0xF0-0xF3 */
+	0xA0, 0x74, 0xA0, 0x75, 0xA0, 0x76, 0xA0, 0x77, /* 0xF4-0xF7 */
+	0xA0, 0x78, 0xA0, 0x79, 0xA0, 0x7A, 0xA0, 0x81, /* 0xF8-0xFB */
+	0xA0, 0x82, 0xA0, 0x83, 0xA0, 0x84, 0xA0, 0x85, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C8[512] = {
+	0xC0, 0xFA, 0xC0, 0xFB, 0xA0, 0x86, 0xA0, 0x87, /* 0x00-0x03 */
+	0xC0, 0xFC, 0xA0, 0x88, 0xA0, 0x89, 0xA0, 0x8A, /* 0x04-0x07 */
+	0xC0, 0xFD, 0xA0, 0x8B, 0xC0, 0xFE, 0xA0, 0x8C, /* 0x08-0x0B */
+	0xA0, 0x8D, 0xA0, 0x8E, 0xA0, 0x8F, 0xA0, 0x90, /* 0x0C-0x0F */
+	0xC1, 0xA1, 0xC1, 0xA2, 0xA0, 0x91, 0xC1, 0xA3, /* 0x10-0x13 */
+	0xA0, 0x92, 0xC1, 0xA4, 0xC1, 0xA5, 0xA0, 0x93, /* 0x14-0x17 */
+	0xA0, 0x94, 0xA0, 0x95, 0xA0, 0x96, 0xA0, 0x97, /* 0x18-0x1B */
+	0xC1, 0xA6, 0xC1, 0xA7, 0xA0, 0x98, 0xA0, 0x99, /* 0x1C-0x1F */
+	0xC1, 0xA8, 0xA0, 0x9A, 0xA0, 0x9B, 0xA0, 0x9C, /* 0x20-0x23 */
+	0xC1, 0xA9, 0xA0, 0x9D, 0xA0, 0x9E, 0xA0, 0x9F, /* 0x24-0x27 */
+	0xA0, 0xA0, 0xA0, 0xA1, 0xA0, 0xA2, 0xA0, 0xA3, /* 0x28-0x2B */
+	0xC1, 0xAA, 0xC1, 0xAB, 0xA0, 0xA4, 0xC1, 0xAC, /* 0x2C-0x2F */
+	0xA0, 0xA5, 0xC1, 0xAD, 0xA0, 0xA6, 0xA0, 0xA7, /* 0x30-0x33 */
+	0xA0, 0xA8, 0xA0, 0xA9, 0xA0, 0xAA, 0xA0, 0xAB, /* 0x34-0x37 */
+	0xC1, 0xAE, 0xA0, 0xAC, 0xA0, 0xAD, 0xA0, 0xAE, /* 0x38-0x3B */
+	0xC1, 0xAF, 0xA0, 0xAF, 0xA0, 0xB0, 0xA0, 0xB1, /* 0x3C-0x3F */
+	0xC1, 0xB0, 0xA0, 0xB2, 0xA0, 0xB3, 0xA0, 0xB4, /* 0x40-0x43 */
+	0xA0, 0xB5, 0xA0, 0xB6, 0xA0, 0xB7, 0xA0, 0xB8, /* 0x44-0x47 */
+	0xC1, 0xB1, 0xC1, 0xB2, 0xA0, 0xB9, 0xA0, 0xBA, /* 0x48-0x4B */
+	0xC1, 0xB3, 0xC1, 0xB4, 0xA0, 0xBB, 0xA0, 0xBC, /* 0x4C-0x4F */
+	0xA0, 0xBD, 0xA0, 0xBE, 0xA0, 0xBF, 0xA0, 0xC0, /* 0x50-0x53 */
+	0xC1, 0xB5, 0xA0, 0xC1, 0xA0, 0xC2, 0xA0, 0xC3, /* 0x54-0x57 */
+	0xA0, 0xC4, 0xA0, 0xC5, 0xA0, 0xC6, 0xA0, 0xC7, /* 0x58-0x5B */
+	0xA0, 0xC8, 0xA0, 0xC9, 0xA0, 0xCA, 0xA0, 0xCB, /* 0x5C-0x5F */
+	0xA0, 0xCC, 0xA0, 0xCD, 0xA0, 0xCE, 0xA0, 0xCF, /* 0x60-0x63 */
+	0xA0, 0xD0, 0xA0, 0xD1, 0xA0, 0xD2, 0xA0, 0xD3, /* 0x64-0x67 */
+	0xA0, 0xD4, 0xA0, 0xD5, 0xA0, 0xD6, 0xA0, 0xD7, /* 0x68-0x6B */
+	0xA0, 0xD8, 0xA0, 0xD9, 0xA0, 0xDA, 0xA0, 0xDB, /* 0x6C-0x6F */
+	0xC1, 0xB6, 0xC1, 0xB7, 0xA0, 0xDC, 0xA0, 0xDD, /* 0x70-0x73 */
+	0xC1, 0xB8, 0xA0, 0xDE, 0xA0, 0xDF, 0xA0, 0xE0, /* 0x74-0x77 */
+	0xC1, 0xB9, 0xA0, 0xE1, 0xC1, 0xBA, 0xA0, 0xE2, /* 0x78-0x7B */
+	0xA0, 0xE3, 0xA0, 0xE4, 0xA0, 0xE5, 0xA0, 0xE6, /* 0x7C-0x7F */
+	
+	0xC1, 0xBB, 0xC1, 0xBC, 0xA0, 0xE7, 0xC1, 0xBD, /* 0x80-0x83 */
+	0xA0, 0xE8, 0xC1, 0xBE, 0xC1, 0xBF, 0xC1, 0xC0, /* 0x84-0x87 */
+	0xA0, 0xE9, 0xA0, 0xEA, 0xA0, 0xEB, 0xC1, 0xC1, /* 0x88-0x8B */
+	0xC1, 0xC2, 0xC1, 0xC3, 0xA0, 0xEC, 0xA0, 0xED, /* 0x8C-0x8F */
+	0xA0, 0xEE, 0xA0, 0xEF, 0xA0, 0xF0, 0xA0, 0xF1, /* 0x90-0x93 */
+	0xC1, 0xC4, 0xA0, 0xF2, 0xA0, 0xF3, 0xA0, 0xF4, /* 0x94-0x97 */
+	0xA0, 0xF5, 0xA0, 0xF6, 0xA0, 0xF7, 0xA0, 0xF8, /* 0x98-0x9B */
+	0xA0, 0xF9, 0xC1, 0xC5, 0xA0, 0xFA, 0xC1, 0xC6, /* 0x9C-0x9F */
+	0xA0, 0xFB, 0xC1, 0xC7, 0xA0, 0xFC, 0xA0, 0xFD, /* 0xA0-0xA3 */
+	0xA0, 0xFE, 0xA1, 0x41, 0xA1, 0x42, 0xA1, 0x43, /* 0xA4-0xA7 */
+	0xC1, 0xC8, 0xA1, 0x44, 0xA1, 0x45, 0xA1, 0x46, /* 0xA8-0xAB */
+	0xA1, 0x47, 0xA1, 0x48, 0xA1, 0x49, 0xA1, 0x4A, /* 0xAC-0xAF */
+	0xA1, 0x4B, 0xA1, 0x4C, 0xA1, 0x4D, 0xA1, 0x4E, /* 0xB0-0xB3 */
+	0xA1, 0x4F, 0xA1, 0x50, 0xA1, 0x51, 0xA1, 0x52, /* 0xB4-0xB7 */
+	0xA1, 0x53, 0xA1, 0x54, 0xA1, 0x55, 0xA1, 0x56, /* 0xB8-0xBB */
+	0xC1, 0xC9, 0xC1, 0xCA, 0xA1, 0x57, 0xA1, 0x58, /* 0xBC-0xBF */
+	0xA1, 0x59, 0xA1, 0x5A, 0xA1, 0x61, 0xA1, 0x62, /* 0xC0-0xC3 */
+	0xC1, 0xCB, 0xA1, 0x63, 0xA1, 0x64, 0xA1, 0x65, /* 0xC4-0xC7 */
+	0xC1, 0xCC, 0xA1, 0x66, 0xA1, 0x67, 0xA1, 0x68, /* 0xC8-0xCB */
+	0xC1, 0xCD, 0xA1, 0x69, 0xA1, 0x6A, 0xA1, 0x6B, /* 0xCC-0xCF */
+	0xA1, 0x6C, 0xA1, 0x6D, 0xA1, 0x6E, 0xA1, 0x6F, /* 0xD0-0xD3 */
+	0xC1, 0xCE, 0xC1, 0xCF, 0xA1, 0x70, 0xC1, 0xD0, /* 0xD4-0xD7 */
+	0xA1, 0x71, 0xC1, 0xD1, 0xA1, 0x72, 0xA1, 0x73, /* 0xD8-0xDB */
+	0xA1, 0x74, 0xA1, 0x75, 0xA1, 0x76, 0xA1, 0x77, /* 0xDC-0xDF */
+	0xC1, 0xD2, 0xC1, 0xD3, 0xA1, 0x78, 0xA1, 0x79, /* 0xE0-0xE3 */
+	0xC1, 0xD4, 0xA1, 0x7A, 0xA1, 0x81, 0xA1, 0x82, /* 0xE4-0xE7 */
+	0xA1, 0x83, 0xA1, 0x84, 0xA1, 0x85, 0xA1, 0x86, /* 0xE8-0xEB */
+	0xA1, 0x87, 0xA1, 0x88, 0xA1, 0x89, 0xA1, 0x8A, /* 0xEC-0xEF */
+	0xA1, 0x8B, 0xA1, 0x8C, 0xA1, 0x8D, 0xA1, 0x8E, /* 0xF0-0xF3 */
+	0xA1, 0x8F, 0xC1, 0xD5, 0xA1, 0x90, 0xA1, 0x91, /* 0xF4-0xF7 */
+	0xA1, 0x92, 0xA1, 0x93, 0xA1, 0x94, 0xA1, 0x95, /* 0xF8-0xFB */
+	0xC1, 0xD6, 0xC1, 0xD7, 0xA1, 0x96, 0xA1, 0x97, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_C9[512] = {
+	0xC1, 0xD8, 0xA1, 0x98, 0xA1, 0x99, 0xA1, 0x9A, /* 0x00-0x03 */
+	0xC1, 0xD9, 0xC1, 0xDA, 0xC1, 0xDB, 0xA1, 0x9B, /* 0x04-0x07 */
+	0xA1, 0x9C, 0xA1, 0x9D, 0xA1, 0x9E, 0xA1, 0x9F, /* 0x08-0x0B */
+	0xC1, 0xDC, 0xC1, 0xDD, 0xA1, 0xA0, 0xC1, 0xDE, /* 0x0C-0x0F */
+	0xA2, 0x41, 0xC1, 0xDF, 0xA2, 0x42, 0xA2, 0x43, /* 0x10-0x13 */
+	0xA2, 0x44, 0xA2, 0x45, 0xA2, 0x46, 0xA2, 0x47, /* 0x14-0x17 */
+	0xC1, 0xE0, 0xA2, 0x48, 0xA2, 0x49, 0xA2, 0x4A, /* 0x18-0x1B */
+	0xA2, 0x4B, 0xA2, 0x4C, 0xA2, 0x4D, 0xA2, 0x4E, /* 0x1C-0x1F */
+	0xA2, 0x4F, 0xA2, 0x50, 0xA2, 0x51, 0xA2, 0x52, /* 0x20-0x23 */
+	0xA2, 0x53, 0xA2, 0x54, 0xA2, 0x55, 0xA2, 0x56, /* 0x24-0x27 */
+	0xA2, 0x57, 0xA2, 0x58, 0xA2, 0x59, 0xA2, 0x5A, /* 0x28-0x2B */
+	0xC1, 0xE1, 0xA2, 0x61, 0xA2, 0x62, 0xA2, 0x63, /* 0x2C-0x2F */
+	0xA2, 0x64, 0xA2, 0x65, 0xA2, 0x66, 0xA2, 0x67, /* 0x30-0x33 */
+	0xC1, 0xE2, 0xA2, 0x68, 0xA2, 0x69, 0xA2, 0x6A, /* 0x34-0x37 */
+	0xA2, 0x6B, 0xA2, 0x6C, 0xA2, 0x6D, 0xA2, 0x6E, /* 0x38-0x3B */
+	0xA2, 0x6F, 0xA2, 0x70, 0xA2, 0x71, 0xA2, 0x72, /* 0x3C-0x3F */
+	0xA2, 0x73, 0xA2, 0x74, 0xA2, 0x75, 0xA2, 0x76, /* 0x40-0x43 */
+	0xA2, 0x77, 0xA2, 0x78, 0xA2, 0x79, 0xA2, 0x7A, /* 0x44-0x47 */
+	0xA2, 0x81, 0xA2, 0x82, 0xA2, 0x83, 0xA2, 0x84, /* 0x48-0x4B */
+	0xA2, 0x85, 0xA2, 0x86, 0xA2, 0x87, 0xA2, 0x88, /* 0x4C-0x4F */
+	0xC1, 0xE3, 0xC1, 0xE4, 0xA2, 0x89, 0xA2, 0x8A, /* 0x50-0x53 */
+	0xC1, 0xE5, 0xA2, 0x8B, 0xA2, 0x8C, 0xA2, 0x8D, /* 0x54-0x57 */
+	0xC1, 0xE6, 0xA2, 0x8E, 0xA2, 0x8F, 0xA2, 0x90, /* 0x58-0x5B */
+	0xA2, 0x91, 0xA2, 0x92, 0xA2, 0x93, 0xA2, 0x94, /* 0x5C-0x5F */
+	0xC1, 0xE7, 0xC1, 0xE8, 0xA2, 0x95, 0xC1, 0xE9, /* 0x60-0x63 */
+	0xA2, 0x96, 0xA2, 0x97, 0xA2, 0x98, 0xA2, 0x99, /* 0x64-0x67 */
+	0xA2, 0x9A, 0xA2, 0x9B, 0xA2, 0x9C, 0xA2, 0x9D, /* 0x68-0x6B */
+	0xC1, 0xEA, 0xA2, 0x9E, 0xA2, 0x9F, 0xA2, 0xA0, /* 0x6C-0x6F */
+	0xC1, 0xEB, 0xA3, 0x41, 0xA3, 0x42, 0xA3, 0x43, /* 0x70-0x73 */
+	0xC1, 0xEC, 0xA3, 0x44, 0xA3, 0x45, 0xA3, 0x46, /* 0x74-0x77 */
+	0xA3, 0x47, 0xA3, 0x48, 0xA3, 0x49, 0xA3, 0x4A, /* 0x78-0x7B */
+	0xC1, 0xED, 0xA3, 0x4B, 0xA3, 0x4C, 0xA3, 0x4D, /* 0x7C-0x7F */
+	
+	0xA3, 0x4E, 0xA3, 0x4F, 0xA3, 0x50, 0xA3, 0x51, /* 0x80-0x83 */
+	0xA3, 0x52, 0xA3, 0x53, 0xA3, 0x54, 0xA3, 0x55, /* 0x84-0x87 */
+	0xC1, 0xEE, 0xC1, 0xEF, 0xA3, 0x56, 0xA3, 0x57, /* 0x88-0x8B */
+	0xC1, 0xF0, 0xA3, 0x58, 0xA3, 0x59, 0xA3, 0x5A, /* 0x8C-0x8F */
+	0xC1, 0xF1, 0xA3, 0x61, 0xA3, 0x62, 0xA3, 0x63, /* 0x90-0x93 */
+	0xA3, 0x64, 0xA3, 0x65, 0xA3, 0x66, 0xA3, 0x67, /* 0x94-0x97 */
+	0xC1, 0xF2, 0xC1, 0xF3, 0xA3, 0x68, 0xC1, 0xF4, /* 0x98-0x9B */
+	0xA3, 0x69, 0xC1, 0xF5, 0xA3, 0x6A, 0xA3, 0x6B, /* 0x9C-0x9F */
+	0xA3, 0x6C, 0xA3, 0x6D, 0xA3, 0x6E, 0xA3, 0x6F, /* 0xA0-0xA3 */
+	0xA3, 0x70, 0xA3, 0x71, 0xA3, 0x72, 0xA3, 0x73, /* 0xA4-0xA7 */
+	0xA3, 0x74, 0xA3, 0x75, 0xA3, 0x76, 0xA3, 0x77, /* 0xA8-0xAB */
+	0xA3, 0x78, 0xA3, 0x79, 0xA3, 0x7A, 0xA3, 0x81, /* 0xAC-0xAF */
+	0xA3, 0x82, 0xA3, 0x83, 0xA3, 0x84, 0xA3, 0x85, /* 0xB0-0xB3 */
+	0xA3, 0x86, 0xA3, 0x87, 0xA3, 0x88, 0xA3, 0x89, /* 0xB4-0xB7 */
+	0xA3, 0x8A, 0xA3, 0x8B, 0xA3, 0x8C, 0xA3, 0x8D, /* 0xB8-0xBB */
+	0xA3, 0x8E, 0xA3, 0x8F, 0xA3, 0x90, 0xA3, 0x91, /* 0xBC-0xBF */
+	0xC1, 0xF6, 0xC1, 0xF7, 0xA3, 0x92, 0xA3, 0x93, /* 0xC0-0xC3 */
+	0xC1, 0xF8, 0xA3, 0x94, 0xA3, 0x95, 0xC1, 0xF9, /* 0xC4-0xC7 */
+	0xC1, 0xFA, 0xA3, 0x96, 0xC1, 0xFB, 0xA3, 0x97, /* 0xC8-0xCB */
+	0xA3, 0x98, 0xA3, 0x99, 0xA3, 0x9A, 0xA3, 0x9B, /* 0xCC-0xCF */
+	0xC1, 0xFC, 0xC1, 0xFD, 0xA3, 0x9C, 0xC1, 0xFE, /* 0xD0-0xD3 */
+	0xA3, 0x9D, 0xC2, 0xA1, 0xC2, 0xA2, 0xA3, 0x9E, /* 0xD4-0xD7 */
+	0xA3, 0x9F, 0xC2, 0xA3, 0xC2, 0xA4, 0xA3, 0xA0, /* 0xD8-0xDB */
+	0xC2, 0xA5, 0xC2, 0xA6, 0xA4, 0x41, 0xA4, 0x42, /* 0xDC-0xDF */
+	0xC2, 0xA7, 0xA4, 0x43, 0xC2, 0xA8, 0xA4, 0x44, /* 0xE0-0xE3 */
+	0xC2, 0xA9, 0xA4, 0x45, 0xA4, 0x46, 0xC2, 0xAA, /* 0xE4-0xE7 */
+	0xA4, 0x47, 0xA4, 0x48, 0xA4, 0x49, 0xA4, 0x4A, /* 0xE8-0xEB */
+	0xC2, 0xAB, 0xC2, 0xAC, 0xA4, 0x4B, 0xC2, 0xAD, /* 0xEC-0xEF */
+	0xC2, 0xAE, 0xC2, 0xAF, 0xA4, 0x4C, 0xA4, 0x4D, /* 0xF0-0xF3 */
+	0xA4, 0x4E, 0xA4, 0x4F, 0xA4, 0x50, 0xA4, 0x51, /* 0xF4-0xF7 */
+	0xC2, 0xB0, 0xC2, 0xB1, 0xA4, 0x52, 0xA4, 0x53, /* 0xF8-0xFB */
+	0xC2, 0xB2, 0xA4, 0x54, 0xA4, 0x55, 0xA4, 0x56, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CA[512] = {
+	0xC2, 0xB3, 0xA4, 0x57, 0xA4, 0x58, 0xA4, 0x59, /* 0x00-0x03 */
+	0xA4, 0x5A, 0xA4, 0x61, 0xA4, 0x62, 0xA4, 0x63, /* 0x04-0x07 */
+	0xC2, 0xB4, 0xC2, 0xB5, 0xA4, 0x64, 0xC2, 0xB6, /* 0x08-0x0B */
+	0xC2, 0xB7, 0xC2, 0xB8, 0xA4, 0x65, 0xA4, 0x66, /* 0x0C-0x0F */
+	0xA4, 0x67, 0xA4, 0x68, 0xA4, 0x69, 0xA4, 0x6A, /* 0x10-0x13 */
+	0xC2, 0xB9, 0xA4, 0x6B, 0xA4, 0x6C, 0xA4, 0x6D, /* 0x14-0x17 */
+	0xC2, 0xBA, 0xA4, 0x6E, 0xA4, 0x6F, 0xA4, 0x70, /* 0x18-0x1B */
+	0xA4, 0x71, 0xA4, 0x72, 0xA4, 0x73, 0xA4, 0x74, /* 0x1C-0x1F */
+	0xA4, 0x75, 0xA4, 0x76, 0xA4, 0x77, 0xA4, 0x78, /* 0x20-0x23 */
+	0xA4, 0x79, 0xA4, 0x7A, 0xA4, 0x81, 0xA4, 0x82, /* 0x24-0x27 */
+	0xA4, 0x83, 0xC2, 0xBB, 0xA4, 0x84, 0xA4, 0x85, /* 0x28-0x2B */
+	0xA4, 0x86, 0xA4, 0x87, 0xA4, 0x88, 0xA4, 0x89, /* 0x2C-0x2F */
+	0xA4, 0x8A, 0xA4, 0x8B, 0xA4, 0x8C, 0xA4, 0x8D, /* 0x30-0x33 */
+	0xA4, 0x8E, 0xA4, 0x8F, 0xA4, 0x90, 0xA4, 0x91, /* 0x34-0x37 */
+	0xA4, 0x92, 0xA4, 0x93, 0xA4, 0x94, 0xA4, 0x95, /* 0x38-0x3B */
+	0xA4, 0x96, 0xA4, 0x97, 0xA4, 0x98, 0xA4, 0x99, /* 0x3C-0x3F */
+	0xA4, 0x9A, 0xA4, 0x9B, 0xA4, 0x9C, 0xA4, 0x9D, /* 0x40-0x43 */
+	0xA4, 0x9E, 0xA4, 0x9F, 0xA4, 0xA0, 0xA5, 0x41, /* 0x44-0x47 */
+	0xA5, 0x42, 0xA5, 0x43, 0xA5, 0x44, 0xA5, 0x45, /* 0x48-0x4B */
+	0xC2, 0xBC, 0xC2, 0xBD, 0xA5, 0x46, 0xA5, 0x47, /* 0x4C-0x4F */
+	0xC2, 0xBE, 0xA5, 0x48, 0xA5, 0x49, 0xA5, 0x4A, /* 0x50-0x53 */
+	0xC2, 0xBF, 0xA5, 0x4B, 0xA5, 0x4C, 0xA5, 0x4D, /* 0x54-0x57 */
+	0xA5, 0x4E, 0xA5, 0x4F, 0xA5, 0x50, 0xA5, 0x51, /* 0x58-0x5B */
+	0xC2, 0xC0, 0xC2, 0xC1, 0xA5, 0x52, 0xC2, 0xC2, /* 0x5C-0x5F */
+	0xC2, 0xC3, 0xC2, 0xC4, 0xA5, 0x53, 0xA5, 0x54, /* 0x60-0x63 */
+	0xA5, 0x55, 0xA5, 0x56, 0xA5, 0x57, 0xA5, 0x58, /* 0x64-0x67 */
+	0xC2, 0xC5, 0xA5, 0x59, 0xA5, 0x5A, 0xA5, 0x61, /* 0x68-0x6B */
+	0xA5, 0x62, 0xA5, 0x63, 0xA5, 0x64, 0xA5, 0x65, /* 0x6C-0x6F */
+	0xA5, 0x66, 0xA5, 0x67, 0xA5, 0x68, 0xA5, 0x69, /* 0x70-0x73 */
+	0xA5, 0x6A, 0xA5, 0x6B, 0xA5, 0x6C, 0xA5, 0x6D, /* 0x74-0x77 */
+	0xA5, 0x6E, 0xA5, 0x6F, 0xA5, 0x70, 0xA5, 0x71, /* 0x78-0x7B */
+	0xA5, 0x72, 0xC2, 0xC6, 0xA5, 0x73, 0xA5, 0x74, /* 0x7C-0x7F */
+	
+	0xA5, 0x75, 0xA5, 0x76, 0xA5, 0x77, 0xA5, 0x78, /* 0x80-0x83 */
+	0xC2, 0xC7, 0xA5, 0x79, 0xA5, 0x7A, 0xA5, 0x81, /* 0x84-0x87 */
+	0xA5, 0x82, 0xA5, 0x83, 0xA5, 0x84, 0xA5, 0x85, /* 0x88-0x8B */
+	0xA5, 0x86, 0xA5, 0x87, 0xA5, 0x88, 0xA5, 0x89, /* 0x8C-0x8F */
+	0xA5, 0x8A, 0xA5, 0x8B, 0xA5, 0x8C, 0xA5, 0x8D, /* 0x90-0x93 */
+	0xA5, 0x8E, 0xA5, 0x8F, 0xA5, 0x90, 0xA5, 0x91, /* 0x94-0x97 */
+	0xC2, 0xC8, 0xA5, 0x92, 0xA5, 0x93, 0xA5, 0x94, /* 0x98-0x9B */
+	0xA5, 0x95, 0xA5, 0x96, 0xA5, 0x97, 0xA5, 0x98, /* 0x9C-0x9F */
+	0xA5, 0x99, 0xA5, 0x9A, 0xA5, 0x9B, 0xA5, 0x9C, /* 0xA0-0xA3 */
+	0xA5, 0x9D, 0xA5, 0x9E, 0xA5, 0x9F, 0xA5, 0xA0, /* 0xA4-0xA7 */
+	0xA6, 0x41, 0xA6, 0x42, 0xA6, 0x43, 0xA6, 0x44, /* 0xA8-0xAB */
+	0xA6, 0x45, 0xA6, 0x46, 0xA6, 0x47, 0xA6, 0x48, /* 0xAC-0xAF */
+	0xA6, 0x49, 0xA6, 0x4A, 0xA6, 0x4B, 0xA6, 0x4C, /* 0xB0-0xB3 */
+	0xA6, 0x4D, 0xA6, 0x4E, 0xA6, 0x4F, 0xA6, 0x50, /* 0xB4-0xB7 */
+	0xA6, 0x51, 0xA6, 0x52, 0xA6, 0x53, 0xA6, 0x54, /* 0xB8-0xBB */
+	0xC2, 0xC9, 0xC2, 0xCA, 0xA6, 0x55, 0xA6, 0x56, /* 0xBC-0xBF */
+	0xC2, 0xCB, 0xA6, 0x57, 0xA6, 0x58, 0xA6, 0x59, /* 0xC0-0xC3 */
+	0xC2, 0xCC, 0xA6, 0x5A, 0xA6, 0x61, 0xA6, 0x62, /* 0xC4-0xC7 */
+	0xA6, 0x63, 0xA6, 0x64, 0xA6, 0x65, 0xA6, 0x66, /* 0xC8-0xCB */
+	0xC2, 0xCD, 0xC2, 0xCE, 0xA6, 0x67, 0xC2, 0xCF, /* 0xCC-0xCF */
+	0xA6, 0x68, 0xC2, 0xD0, 0xA6, 0x69, 0xC2, 0xD1, /* 0xD0-0xD3 */
+	0xA6, 0x6A, 0xA6, 0x6B, 0xA6, 0x6C, 0xA6, 0x6D, /* 0xD4-0xD7 */
+	0xC2, 0xD2, 0xC2, 0xD3, 0xA6, 0x6E, 0xA6, 0x6F, /* 0xD8-0xDB */
+	0xA6, 0x70, 0xA6, 0x71, 0xA6, 0x72, 0xA6, 0x73, /* 0xDC-0xDF */
+	0xC2, 0xD4, 0xA6, 0x74, 0xA6, 0x75, 0xA6, 0x76, /* 0xE0-0xE3 */
+	0xA6, 0x77, 0xA6, 0x78, 0xA6, 0x79, 0xA6, 0x7A, /* 0xE4-0xE7 */
+	0xA6, 0x81, 0xA6, 0x82, 0xA6, 0x83, 0xA6, 0x84, /* 0xE8-0xEB */
+	0xC2, 0xD5, 0xA6, 0x85, 0xA6, 0x86, 0xA6, 0x87, /* 0xEC-0xEF */
+	0xA6, 0x88, 0xA6, 0x89, 0xA6, 0x8A, 0xA6, 0x8B, /* 0xF0-0xF3 */
+	0xC2, 0xD6, 0xA6, 0x8C, 0xA6, 0x8D, 0xA6, 0x8E, /* 0xF4-0xF7 */
+	0xA6, 0x8F, 0xA6, 0x90, 0xA6, 0x91, 0xA6, 0x92, /* 0xF8-0xFB */
+	0xA6, 0x93, 0xA6, 0x94, 0xA6, 0x95, 0xA6, 0x96, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CB[512] = {
+	0xA6, 0x97, 0xA6, 0x98, 0xA6, 0x99, 0xA6, 0x9A, /* 0x00-0x03 */
+	0xA6, 0x9B, 0xA6, 0x9C, 0xA6, 0x9D, 0xA6, 0x9E, /* 0x04-0x07 */
+	0xC2, 0xD7, 0xA6, 0x9F, 0xA6, 0xA0, 0xA7, 0x41, /* 0x08-0x0B */
+	0xA7, 0x42, 0xA7, 0x43, 0xA7, 0x44, 0xA7, 0x45, /* 0x0C-0x0F */
+	0xC2, 0xD8, 0xA7, 0x46, 0xA7, 0x47, 0xA7, 0x48, /* 0x10-0x13 */
+	0xC2, 0xD9, 0xA7, 0x49, 0xA7, 0x4A, 0xA7, 0x4B, /* 0x14-0x17 */
+	0xC2, 0xDA, 0xA7, 0x4C, 0xA7, 0x4D, 0xA7, 0x4E, /* 0x18-0x1B */
+	0xA7, 0x4F, 0xA7, 0x50, 0xA7, 0x51, 0xA7, 0x52, /* 0x1C-0x1F */
+	0xC2, 0xDB, 0xC2, 0xDC, 0xA7, 0x53, 0xA7, 0x54, /* 0x20-0x23 */
+	0xA7, 0x55, 0xA7, 0x56, 0xA7, 0x57, 0xA7, 0x58, /* 0x24-0x27 */
+	0xA7, 0x59, 0xA7, 0x5A, 0xA7, 0x61, 0xA7, 0x62, /* 0x28-0x2B */
+	0xA7, 0x63, 0xA7, 0x64, 0xA7, 0x65, 0xA7, 0x66, /* 0x2C-0x2F */
+	0xA7, 0x67, 0xA7, 0x68, 0xA7, 0x69, 0xA7, 0x6A, /* 0x30-0x33 */
+	0xA7, 0x6B, 0xA7, 0x6C, 0xA7, 0x6D, 0xA7, 0x6E, /* 0x34-0x37 */
+	0xA7, 0x6F, 0xA7, 0x70, 0xA7, 0x71, 0xA7, 0x72, /* 0x38-0x3B */
+	0xA7, 0x73, 0xA7, 0x74, 0xA7, 0x75, 0xA7, 0x76, /* 0x3C-0x3F */
+	0xA7, 0x77, 0xC2, 0xDD, 0xA7, 0x78, 0xA7, 0x79, /* 0x40-0x43 */
+	0xA7, 0x7A, 0xA7, 0x81, 0xA7, 0x82, 0xA7, 0x83, /* 0x44-0x47 */
+	0xC2, 0xDE, 0xC2, 0xDF, 0xA7, 0x84, 0xA7, 0x85, /* 0x48-0x4B */
+	0xC2, 0xE0, 0xA7, 0x86, 0xA7, 0x87, 0xA7, 0x88, /* 0x4C-0x4F */
+	0xC2, 0xE1, 0xA7, 0x89, 0xA7, 0x8A, 0xA7, 0x8B, /* 0x50-0x53 */
+	0xA7, 0x8C, 0xA7, 0x8D, 0xA7, 0x8E, 0xA7, 0x8F, /* 0x54-0x57 */
+	0xC2, 0xE2, 0xC2, 0xE3, 0xA7, 0x90, 0xA7, 0x91, /* 0x58-0x5B */
+	0xA7, 0x92, 0xC2, 0xE4, 0xA7, 0x93, 0xA7, 0x94, /* 0x5C-0x5F */
+	0xA7, 0x95, 0xA7, 0x96, 0xA7, 0x97, 0xA7, 0x98, /* 0x60-0x63 */
+	0xC2, 0xE5, 0xA7, 0x99, 0xA7, 0x9A, 0xA7, 0x9B, /* 0x64-0x67 */
+	0xA7, 0x9C, 0xA7, 0x9D, 0xA7, 0x9E, 0xA7, 0x9F, /* 0x68-0x6B */
+	0xA7, 0xA0, 0xA8, 0x41, 0xA8, 0x42, 0xA8, 0x43, /* 0x6C-0x6F */
+	0xA8, 0x44, 0xA8, 0x45, 0xA8, 0x46, 0xA8, 0x47, /* 0x70-0x73 */
+	0xA8, 0x48, 0xA8, 0x49, 0xA8, 0x4A, 0xA8, 0x4B, /* 0x74-0x77 */
+	0xC2, 0xE6, 0xC2, 0xE7, 0xA8, 0x4C, 0xA8, 0x4D, /* 0x78-0x7B */
+	0xA8, 0x4E, 0xA8, 0x4F, 0xA8, 0x50, 0xA8, 0x51, /* 0x7C-0x7F */
+	
+	0xA8, 0x52, 0xA8, 0x53, 0xA8, 0x54, 0xA8, 0x55, /* 0x80-0x83 */
+	0xA8, 0x56, 0xA8, 0x57, 0xA8, 0x58, 0xA8, 0x59, /* 0x84-0x87 */
+	0xA8, 0x5A, 0xA8, 0x61, 0xA8, 0x62, 0xA8, 0x63, /* 0x88-0x8B */
+	0xA8, 0x64, 0xA8, 0x65, 0xA8, 0x66, 0xA8, 0x67, /* 0x8C-0x8F */
+	0xA8, 0x68, 0xA8, 0x69, 0xA8, 0x6A, 0xA8, 0x6B, /* 0x90-0x93 */
+	0xA8, 0x6C, 0xA8, 0x6D, 0xA8, 0x6E, 0xA8, 0x6F, /* 0x94-0x97 */
+	0xA8, 0x70, 0xA8, 0x71, 0xA8, 0x72, 0xA8, 0x73, /* 0x98-0x9B */
+	0xC2, 0xE8, 0xA8, 0x74, 0xA8, 0x75, 0xA8, 0x76, /* 0x9C-0x9F */
+	0xA8, 0x77, 0xA8, 0x78, 0xA8, 0x79, 0xA8, 0x7A, /* 0xA0-0xA3 */
+	0xA8, 0x81, 0xA8, 0x82, 0xA8, 0x83, 0xA8, 0x84, /* 0xA4-0xA7 */
+	0xA8, 0x85, 0xA8, 0x86, 0xA8, 0x87, 0xA8, 0x88, /* 0xA8-0xAB */
+	0xA8, 0x89, 0xA8, 0x8A, 0xA8, 0x8B, 0xA8, 0x8C, /* 0xAC-0xAF */
+	0xA8, 0x8D, 0xA8, 0x8E, 0xA8, 0x8F, 0xA8, 0x90, /* 0xB0-0xB3 */
+	0xA8, 0x91, 0xA8, 0x92, 0xA8, 0x93, 0xA8, 0x94, /* 0xB4-0xB7 */
+	0xC2, 0xE9, 0xA8, 0x95, 0xA8, 0x96, 0xA8, 0x97, /* 0xB8-0xBB */
+	0xA8, 0x98, 0xA8, 0x99, 0xA8, 0x9A, 0xA8, 0x9B, /* 0xBC-0xBF */
+	0xA8, 0x9C, 0xA8, 0x9D, 0xA8, 0x9E, 0xA8, 0x9F, /* 0xC0-0xC3 */
+	0xA8, 0xA0, 0xA9, 0x41, 0xA9, 0x42, 0xA9, 0x43, /* 0xC4-0xC7 */
+	0xA9, 0x44, 0xA9, 0x45, 0xA9, 0x46, 0xA9, 0x47, /* 0xC8-0xCB */
+	0xA9, 0x48, 0xA9, 0x49, 0xA9, 0x4A, 0xA9, 0x4B, /* 0xCC-0xCF */
+	0xA9, 0x4C, 0xA9, 0x4D, 0xA9, 0x4E, 0xA9, 0x4F, /* 0xD0-0xD3 */
+	0xC2, 0xEA, 0xA9, 0x50, 0xA9, 0x51, 0xA9, 0x52, /* 0xD4-0xD7 */
+	0xA9, 0x53, 0xA9, 0x54, 0xA9, 0x55, 0xA9, 0x56, /* 0xD8-0xDB */
+	0xA9, 0x57, 0xA9, 0x58, 0xA9, 0x59, 0xA9, 0x5A, /* 0xDC-0xDF */
+	0xA9, 0x61, 0xA9, 0x62, 0xA9, 0x63, 0xA9, 0x64, /* 0xE0-0xE3 */
+	0xC2, 0xEB, 0xA9, 0x65, 0xA9, 0x66, 0xC2, 0xEC, /* 0xE4-0xE7 */
+	0xA9, 0x67, 0xC2, 0xED, 0xA9, 0x68, 0xA9, 0x69, /* 0xE8-0xEB */
+	0xA9, 0x6A, 0xA9, 0x6B, 0xA9, 0x6C, 0xA9, 0x6D, /* 0xEC-0xEF */
+	0xA9, 0x6E, 0xA9, 0x6F, 0xA9, 0x70, 0xA9, 0x71, /* 0xF0-0xF3 */
+	0xA9, 0x72, 0xA9, 0x73, 0xA9, 0x74, 0xA9, 0x75, /* 0xF4-0xF7 */
+	0xA9, 0x76, 0xA9, 0x77, 0xA9, 0x78, 0xA9, 0x79, /* 0xF8-0xFB */
+	0xA9, 0x7A, 0xA9, 0x81, 0xA9, 0x82, 0xA9, 0x83, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CC[512] = {
+	0xA9, 0x84, 0xA9, 0x85, 0xA9, 0x86, 0xA9, 0x87, /* 0x00-0x03 */
+	0xA9, 0x88, 0xA9, 0x89, 0xA9, 0x8A, 0xA9, 0x8B, /* 0x04-0x07 */
+	0xA9, 0x8C, 0xA9, 0x8D, 0xA9, 0x8E, 0xA9, 0x8F, /* 0x08-0x0B */
+	0xC2, 0xEE, 0xC2, 0xEF, 0xA9, 0x90, 0xA9, 0x91, /* 0x0C-0x0F */
+	0xC2, 0xF0, 0xA9, 0x92, 0xA9, 0x93, 0xA9, 0x94, /* 0x10-0x13 */
+	0xC2, 0xF1, 0xA9, 0x95, 0xA9, 0x96, 0xA9, 0x97, /* 0x14-0x17 */
+	0xA9, 0x98, 0xA9, 0x99, 0xA9, 0x9A, 0xA9, 0x9B, /* 0x18-0x1B */
+	0xC2, 0xF2, 0xC2, 0xF3, 0xA9, 0x9C, 0xA9, 0x9D, /* 0x1C-0x1F */
+	0xA9, 0x9E, 0xC2, 0xF4, 0xC2, 0xF5, 0xA9, 0x9F, /* 0x20-0x23 */
+	0xA9, 0xA0, 0xAA, 0x41, 0xAA, 0x42, 0xC2, 0xF6, /* 0x24-0x27 */
+	0xC2, 0xF7, 0xC2, 0xF8, 0xAA, 0x43, 0xAA, 0x44, /* 0x28-0x2B */
+	0xC2, 0xF9, 0xAA, 0x45, 0xC2, 0xFA, 0xAA, 0x46, /* 0x2C-0x2F */
+	0xC2, 0xFB, 0xAA, 0x47, 0xAA, 0x48, 0xAA, 0x49, /* 0x30-0x33 */
+	0xAA, 0x4A, 0xAA, 0x4B, 0xAA, 0x4C, 0xAA, 0x4D, /* 0x34-0x37 */
+	0xC2, 0xFC, 0xC2, 0xFD, 0xAA, 0x4E, 0xC2, 0xFE, /* 0x38-0x3B */
+	0xC3, 0xA1, 0xC3, 0xA2, 0xC3, 0xA3, 0xAA, 0x4F, /* 0x3C-0x3F */
+	0xAA, 0x50, 0xAA, 0x51, 0xAA, 0x52, 0xAA, 0x53, /* 0x40-0x43 */
+	0xC3, 0xA4, 0xC3, 0xA5, 0xAA, 0x54, 0xAA, 0x55, /* 0x44-0x47 */
+	0xC3, 0xA6, 0xAA, 0x56, 0xAA, 0x57, 0xAA, 0x58, /* 0x48-0x4B */
+	0xC3, 0xA7, 0xAA, 0x59, 0xAA, 0x5A, 0xAA, 0x61, /* 0x4C-0x4F */
+	0xAA, 0x62, 0xAA, 0x63, 0xAA, 0x64, 0xAA, 0x65, /* 0x50-0x53 */
+	0xC3, 0xA8, 0xC3, 0xA9, 0xAA, 0x66, 0xC3, 0xAA, /* 0x54-0x57 */
+	0xC3, 0xAB, 0xC3, 0xAC, 0xAA, 0x67, 0xAA, 0x68, /* 0x58-0x5B */
+	0xAA, 0x69, 0xAA, 0x6A, 0xAA, 0x6B, 0xAA, 0x6C, /* 0x5C-0x5F */
+	0xC3, 0xAD, 0xAA, 0x6D, 0xAA, 0x6E, 0xAA, 0x6F, /* 0x60-0x63 */
+	0xC3, 0xAE, 0xAA, 0x70, 0xC3, 0xAF, 0xAA, 0x71, /* 0x64-0x67 */
+	0xC3, 0xB0, 0xAA, 0x72, 0xAA, 0x73, 0xAA, 0x74, /* 0x68-0x6B */
+	0xAA, 0x75, 0xAA, 0x76, 0xAA, 0x77, 0xAA, 0x78, /* 0x6C-0x6F */
+	0xC3, 0xB1, 0xAA, 0x79, 0xAA, 0x7A, 0xAA, 0x81, /* 0x70-0x73 */
+	0xAA, 0x82, 0xC3, 0xB2, 0xAA, 0x83, 0xAA, 0x84, /* 0x74-0x77 */
+	0xAA, 0x85, 0xAA, 0x86, 0xAA, 0x87, 0xAA, 0x88, /* 0x78-0x7B */
+	0xAA, 0x89, 0xAA, 0x8A, 0xAA, 0x8B, 0xAA, 0x8C, /* 0x7C-0x7F */
+	
+	0xAA, 0x8D, 0xAA, 0x8E, 0xAA, 0x8F, 0xAA, 0x90, /* 0x80-0x83 */
+	0xAA, 0x91, 0xAA, 0x92, 0xAA, 0x93, 0xAA, 0x94, /* 0x84-0x87 */
+	0xAA, 0x95, 0xAA, 0x96, 0xAA, 0x97, 0xAA, 0x98, /* 0x88-0x8B */
+	0xAA, 0x99, 0xAA, 0x9A, 0xAA, 0x9B, 0xAA, 0x9C, /* 0x8C-0x8F */
+	0xAA, 0x9D, 0xAA, 0x9E, 0xAA, 0x9F, 0xAA, 0xA0, /* 0x90-0x93 */
+	0xAB, 0x41, 0xAB, 0x42, 0xAB, 0x43, 0xAB, 0x44, /* 0x94-0x97 */
+	0xC3, 0xB3, 0xC3, 0xB4, 0xAB, 0x45, 0xAB, 0x46, /* 0x98-0x9B */
+	0xC3, 0xB5, 0xAB, 0x47, 0xAB, 0x48, 0xAB, 0x49, /* 0x9C-0x9F */
+	0xC3, 0xB6, 0xAB, 0x4A, 0xAB, 0x4B, 0xAB, 0x4C, /* 0xA0-0xA3 */
+	0xAB, 0x4D, 0xAB, 0x4E, 0xAB, 0x4F, 0xAB, 0x50, /* 0xA4-0xA7 */
+	0xC3, 0xB7, 0xC3, 0xB8, 0xAB, 0x51, 0xC3, 0xB9, /* 0xA8-0xAB */
+	0xC3, 0xBA, 0xC3, 0xBB, 0xAB, 0x52, 0xAB, 0x53, /* 0xAC-0xAF */
+	0xAB, 0x54, 0xAB, 0x55, 0xAB, 0x56, 0xAB, 0x57, /* 0xB0-0xB3 */
+	0xC3, 0xBC, 0xC3, 0xBD, 0xAB, 0x58, 0xAB, 0x59, /* 0xB4-0xB7 */
+	0xC3, 0xBE, 0xAB, 0x5A, 0xAB, 0x61, 0xAB, 0x62, /* 0xB8-0xBB */
+	0xC3, 0xBF, 0xAB, 0x63, 0xAB, 0x64, 0xAB, 0x65, /* 0xBC-0xBF */
+	0xAB, 0x66, 0xAB, 0x67, 0xAB, 0x68, 0xAB, 0x69, /* 0xC0-0xC3 */
+	0xC3, 0xC0, 0xC3, 0xC1, 0xAB, 0x6A, 0xC3, 0xC2, /* 0xC4-0xC7 */
+	0xAB, 0x6B, 0xC3, 0xC3, 0xAB, 0x6C, 0xAB, 0x6D, /* 0xC8-0xCB */
+	0xAB, 0x6E, 0xAB, 0x6F, 0xAB, 0x70, 0xAB, 0x71, /* 0xCC-0xCF */
+	0xC3, 0xC4, 0xAB, 0x72, 0xAB, 0x73, 0xAB, 0x74, /* 0xD0-0xD3 */
+	0xC3, 0xC5, 0xAB, 0x75, 0xAB, 0x76, 0xAB, 0x77, /* 0xD4-0xD7 */
+	0xAB, 0x78, 0xAB, 0x79, 0xAB, 0x7A, 0xAB, 0x81, /* 0xD8-0xDB */
+	0xAB, 0x82, 0xAB, 0x83, 0xAB, 0x84, 0xAB, 0x85, /* 0xDC-0xDF */
+	0xAB, 0x86, 0xAB, 0x87, 0xAB, 0x88, 0xAB, 0x89, /* 0xE0-0xE3 */
+	0xC3, 0xC6, 0xAB, 0x8A, 0xAB, 0x8B, 0xAB, 0x8C, /* 0xE4-0xE7 */
+	0xAB, 0x8D, 0xAB, 0x8E, 0xAB, 0x8F, 0xAB, 0x90, /* 0xE8-0xEB */
+	0xC3, 0xC7, 0xAB, 0x91, 0xAB, 0x92, 0xAB, 0x93, /* 0xEC-0xEF */
+	0xC3, 0xC8, 0xAB, 0x94, 0xAB, 0x95, 0xAB, 0x96, /* 0xF0-0xF3 */
+	0xAB, 0x97, 0xAB, 0x98, 0xAB, 0x99, 0xAB, 0x9A, /* 0xF4-0xF7 */
+	0xAB, 0x9B, 0xAB, 0x9C, 0xAB, 0x9D, 0xAB, 0x9E, /* 0xF8-0xFB */
+	0xAB, 0x9F, 0xAB, 0xA0, 0xAC, 0x41, 0xAC, 0x42, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CD[512] = {
+	0xAC, 0x43, 0xC3, 0xC9, 0xAC, 0x44, 0xAC, 0x45, /* 0x00-0x03 */
+	0xAC, 0x46, 0xAC, 0x47, 0xAC, 0x48, 0xAC, 0x49, /* 0x04-0x07 */
+	0xC3, 0xCA, 0xC3, 0xCB, 0xAC, 0x4A, 0xAC, 0x4B, /* 0x08-0x0B */
+	0xC3, 0xCC, 0xAC, 0x4C, 0xAC, 0x4D, 0xAC, 0x4E, /* 0x0C-0x0F */
+	0xC3, 0xCD, 0xAC, 0x4F, 0xAC, 0x50, 0xAC, 0x51, /* 0x10-0x13 */
+	0xAC, 0x52, 0xAC, 0x53, 0xAC, 0x54, 0xAC, 0x55, /* 0x14-0x17 */
+	0xC3, 0xCE, 0xC3, 0xCF, 0xAC, 0x56, 0xC3, 0xD0, /* 0x18-0x1B */
+	0xAC, 0x57, 0xC3, 0xD1, 0xAC, 0x58, 0xAC, 0x59, /* 0x1C-0x1F */
+	0xAC, 0x5A, 0xAC, 0x61, 0xAC, 0x62, 0xAC, 0x63, /* 0x20-0x23 */
+	0xC3, 0xD2, 0xAC, 0x64, 0xAC, 0x65, 0xAC, 0x66, /* 0x24-0x27 */
+	0xC3, 0xD3, 0xAC, 0x67, 0xAC, 0x68, 0xAC, 0x69, /* 0x28-0x2B */
+	0xC3, 0xD4, 0xAC, 0x6A, 0xAC, 0x6B, 0xAC, 0x6C, /* 0x2C-0x2F */
+	0xAC, 0x6D, 0xAC, 0x6E, 0xAC, 0x6F, 0xAC, 0x70, /* 0x30-0x33 */
+	0xAC, 0x71, 0xAC, 0x72, 0xAC, 0x73, 0xAC, 0x74, /* 0x34-0x37 */
+	0xAC, 0x75, 0xC3, 0xD5, 0xAC, 0x76, 0xAC, 0x77, /* 0x38-0x3B */
+	0xAC, 0x78, 0xAC, 0x79, 0xAC, 0x7A, 0xAC, 0x81, /* 0x3C-0x3F */
+	0xAC, 0x82, 0xAC, 0x83, 0xAC, 0x84, 0xAC, 0x85, /* 0x40-0x43 */
+	0xAC, 0x86, 0xAC, 0x87, 0xAC, 0x88, 0xAC, 0x89, /* 0x44-0x47 */
+	0xAC, 0x8A, 0xAC, 0x8B, 0xAC, 0x8C, 0xAC, 0x8D, /* 0x48-0x4B */
+	0xAC, 0x8E, 0xAC, 0x8F, 0xAC, 0x90, 0xAC, 0x91, /* 0x4C-0x4F */
+	0xAC, 0x92, 0xAC, 0x93, 0xAC, 0x94, 0xAC, 0x95, /* 0x50-0x53 */
+	0xAC, 0x96, 0xAC, 0x97, 0xAC, 0x98, 0xAC, 0x99, /* 0x54-0x57 */
+	0xAC, 0x9A, 0xAC, 0x9B, 0xAC, 0x9C, 0xAC, 0x9D, /* 0x58-0x5B */
+	0xC3, 0xD6, 0xAC, 0x9E, 0xAC, 0x9F, 0xAC, 0xA0, /* 0x5C-0x5F */
+	0xC3, 0xD7, 0xAD, 0x41, 0xAD, 0x42, 0xAD, 0x43, /* 0x60-0x63 */
+	0xC3, 0xD8, 0xAD, 0x44, 0xAD, 0x45, 0xAD, 0x46, /* 0x64-0x67 */
+	0xAD, 0x47, 0xAD, 0x48, 0xAD, 0x49, 0xAD, 0x4A, /* 0x68-0x6B */
+	0xC3, 0xD9, 0xC3, 0xDA, 0xAD, 0x4B, 0xC3, 0xDB, /* 0x6C-0x6F */
+	0xAD, 0x4C, 0xC3, 0xDC, 0xAD, 0x4D, 0xAD, 0x4E, /* 0x70-0x73 */
+	0xAD, 0x4F, 0xAD, 0x50, 0xAD, 0x51, 0xAD, 0x52, /* 0x74-0x77 */
+	0xC3, 0xDD, 0xAD, 0x53, 0xAD, 0x54, 0xAD, 0x55, /* 0x78-0x7B */
+	0xAD, 0x56, 0xAD, 0x57, 0xAD, 0x58, 0xAD, 0x59, /* 0x7C-0x7F */
+	
+	0xAD, 0x5A, 0xAD, 0x61, 0xAD, 0x62, 0xAD, 0x63, /* 0x80-0x83 */
+	0xAD, 0x64, 0xAD, 0x65, 0xAD, 0x66, 0xAD, 0x67, /* 0x84-0x87 */
+	0xC3, 0xDE, 0xAD, 0x68, 0xAD, 0x69, 0xAD, 0x6A, /* 0x88-0x8B */
+	0xAD, 0x6B, 0xAD, 0x6C, 0xAD, 0x6D, 0xAD, 0x6E, /* 0x8C-0x8F */
+	0xAD, 0x6F, 0xAD, 0x70, 0xAD, 0x71, 0xAD, 0x72, /* 0x90-0x93 */
+	0xC3, 0xDF, 0xC3, 0xE0, 0xAD, 0x73, 0xAD, 0x74, /* 0x94-0x97 */
+	0xC3, 0xE1, 0xAD, 0x75, 0xAD, 0x76, 0xAD, 0x77, /* 0x98-0x9B */
+	0xC3, 0xE2, 0xAD, 0x78, 0xAD, 0x79, 0xAD, 0x7A, /* 0x9C-0x9F */
+	0xAD, 0x81, 0xAD, 0x82, 0xAD, 0x83, 0xAD, 0x84, /* 0xA0-0xA3 */
+	0xC3, 0xE3, 0xC3, 0xE4, 0xAD, 0x85, 0xC3, 0xE5, /* 0xA4-0xA7 */
+	0xAD, 0x86, 0xC3, 0xE6, 0xAD, 0x87, 0xAD, 0x88, /* 0xA8-0xAB */
+	0xAD, 0x89, 0xAD, 0x8A, 0xAD, 0x8B, 0xAD, 0x8C, /* 0xAC-0xAF */
+	0xC3, 0xE7, 0xAD, 0x8D, 0xAD, 0x8E, 0xAD, 0x8F, /* 0xB0-0xB3 */
+	0xAD, 0x90, 0xAD, 0x91, 0xAD, 0x92, 0xAD, 0x93, /* 0xB4-0xB7 */
+	0xAD, 0x94, 0xAD, 0x95, 0xAD, 0x96, 0xAD, 0x97, /* 0xB8-0xBB */
+	0xAD, 0x98, 0xAD, 0x99, 0xAD, 0x9A, 0xAD, 0x9B, /* 0xBC-0xBF */
+	0xAD, 0x9C, 0xAD, 0x9D, 0xAD, 0x9E, 0xAD, 0x9F, /* 0xC0-0xC3 */
+	0xC3, 0xE8, 0xAD, 0xA0, 0xAE, 0x41, 0xAE, 0x42, /* 0xC4-0xC7 */
+	0xAE, 0x43, 0xAE, 0x44, 0xAE, 0x45, 0xAE, 0x46, /* 0xC8-0xCB */
+	0xC3, 0xE9, 0xAE, 0x47, 0xAE, 0x48, 0xAE, 0x49, /* 0xCC-0xCF */
+	0xC3, 0xEA, 0xAE, 0x4A, 0xAE, 0x4B, 0xAE, 0x4C, /* 0xD0-0xD3 */
+	0xAE, 0x4D, 0xAE, 0x4E, 0xAE, 0x4F, 0xAE, 0x50, /* 0xD4-0xD7 */
+	0xAE, 0x51, 0xAE, 0x52, 0xAE, 0x53, 0xAE, 0x54, /* 0xD8-0xDB */
+	0xAE, 0x55, 0xAE, 0x56, 0xAE, 0x57, 0xAE, 0x58, /* 0xDC-0xDF */
+	0xAE, 0x59, 0xAE, 0x5A, 0xAE, 0x61, 0xAE, 0x62, /* 0xE0-0xE3 */
+	0xAE, 0x63, 0xAE, 0x64, 0xAE, 0x65, 0xAE, 0x66, /* 0xE4-0xE7 */
+	0xC3, 0xEB, 0xAE, 0x67, 0xAE, 0x68, 0xAE, 0x69, /* 0xE8-0xEB */
+	0xC3, 0xEC, 0xAE, 0x6A, 0xAE, 0x6B, 0xAE, 0x6C, /* 0xEC-0xEF */
+	0xC3, 0xED, 0xAE, 0x6D, 0xAE, 0x6E, 0xAE, 0x6F, /* 0xF0-0xF3 */
+	0xAE, 0x70, 0xAE, 0x71, 0xAE, 0x72, 0xAE, 0x73, /* 0xF4-0xF7 */
+	0xC3, 0xEE, 0xC3, 0xEF, 0xAE, 0x74, 0xC3, 0xF0, /* 0xF8-0xFB */
+	0xAE, 0x75, 0xC3, 0xF1, 0xAE, 0x76, 0xAE, 0x77, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CE[512] = {
+	0xAE, 0x78, 0xAE, 0x79, 0xAE, 0x7A, 0xAE, 0x81, /* 0x00-0x03 */
+	0xC3, 0xF2, 0xAE, 0x82, 0xAE, 0x83, 0xAE, 0x84, /* 0x04-0x07 */
+	0xC3, 0xF3, 0xAE, 0x85, 0xAE, 0x86, 0xAE, 0x87, /* 0x08-0x0B */
+	0xC3, 0xF4, 0xAE, 0x88, 0xAE, 0x89, 0xAE, 0x8A, /* 0x0C-0x0F */
+	0xAE, 0x8B, 0xAE, 0x8C, 0xAE, 0x8D, 0xAE, 0x8E, /* 0x10-0x13 */
+	0xC3, 0xF5, 0xAE, 0x8F, 0xAE, 0x90, 0xAE, 0x91, /* 0x14-0x17 */
+	0xAE, 0x92, 0xC3, 0xF6, 0xAE, 0x93, 0xAE, 0x94, /* 0x18-0x1B */
+	0xAE, 0x95, 0xAE, 0x96, 0xAE, 0x97, 0xAE, 0x98, /* 0x1C-0x1F */
+	0xC3, 0xF7, 0xC3, 0xF8, 0xAE, 0x99, 0xAE, 0x9A, /* 0x20-0x23 */
+	0xC3, 0xF9, 0xAE, 0x9B, 0xAE, 0x9C, 0xAE, 0x9D, /* 0x24-0x27 */
+	0xC3, 0xFA, 0xAE, 0x9E, 0xAE, 0x9F, 0xAE, 0xA0, /* 0x28-0x2B */
+	0xAF, 0x41, 0xAF, 0x42, 0xAF, 0x43, 0xAF, 0x44, /* 0x2C-0x2F */
+	0xC3, 0xFB, 0xC3, 0xFC, 0xAF, 0x45, 0xC3, 0xFD, /* 0x30-0x33 */
+	0xAF, 0x46, 0xC3, 0xFE, 0xAF, 0x47, 0xAF, 0x48, /* 0x34-0x37 */
+	0xAF, 0x49, 0xAF, 0x4A, 0xAF, 0x4B, 0xAF, 0x4C, /* 0x38-0x3B */
+	0xAF, 0x4D, 0xAF, 0x4E, 0xAF, 0x4F, 0xAF, 0x50, /* 0x3C-0x3F */
+	0xAF, 0x51, 0xAF, 0x52, 0xAF, 0x53, 0xAF, 0x54, /* 0x40-0x43 */
+	0xAF, 0x55, 0xAF, 0x56, 0xAF, 0x57, 0xAF, 0x58, /* 0x44-0x47 */
+	0xAF, 0x59, 0xAF, 0x5A, 0xAF, 0x61, 0xAF, 0x62, /* 0x48-0x4B */
+	0xAF, 0x63, 0xAF, 0x64, 0xAF, 0x65, 0xAF, 0x66, /* 0x4C-0x4F */
+	0xAF, 0x67, 0xAF, 0x68, 0xAF, 0x69, 0xAF, 0x6A, /* 0x50-0x53 */
+	0xAF, 0x6B, 0xAF, 0x6C, 0xAF, 0x6D, 0xAF, 0x6E, /* 0x54-0x57 */
+	0xC4, 0xA1, 0xC4, 0xA2, 0xAF, 0x6F, 0xAF, 0x70, /* 0x58-0x5B */
+	0xC4, 0xA3, 0xAF, 0x71, 0xAF, 0x72, 0xC4, 0xA4, /* 0x5C-0x5F */
+	0xC4, 0xA5, 0xC4, 0xA6, 0xAF, 0x73, 0xAF, 0x74, /* 0x60-0x63 */
+	0xAF, 0x75, 0xAF, 0x76, 0xAF, 0x77, 0xAF, 0x78, /* 0x64-0x67 */
+	0xC4, 0xA7, 0xC4, 0xA8, 0xAF, 0x79, 0xC4, 0xA9, /* 0x68-0x6B */
+	0xAF, 0x7A, 0xC4, 0xAA, 0xAF, 0x81, 0xAF, 0x82, /* 0x6C-0x6F */
+	0xAF, 0x83, 0xAF, 0x84, 0xAF, 0x85, 0xAF, 0x86, /* 0x70-0x73 */
+	0xC4, 0xAB, 0xC4, 0xAC, 0xAF, 0x87, 0xAF, 0x88, /* 0x74-0x77 */
+	0xC4, 0xAD, 0xAF, 0x89, 0xAF, 0x8A, 0xAF, 0x8B, /* 0x78-0x7B */
+	0xC4, 0xAE, 0xAF, 0x8C, 0xAF, 0x8D, 0xAF, 0x8E, /* 0x7C-0x7F */
+	
+	0xAF, 0x8F, 0xAF, 0x90, 0xAF, 0x91, 0xAF, 0x92, /* 0x80-0x83 */
+	0xC4, 0xAF, 0xC4, 0xB0, 0xAF, 0x93, 0xC4, 0xB1, /* 0x84-0x87 */
+	0xAF, 0x94, 0xC4, 0xB2, 0xAF, 0x95, 0xAF, 0x96, /* 0x88-0x8B */
+	0xAF, 0x97, 0xAF, 0x98, 0xAF, 0x99, 0xAF, 0x9A, /* 0x8C-0x8F */
+	0xC4, 0xB3, 0xC4, 0xB4, 0xAF, 0x9B, 0xAF, 0x9C, /* 0x90-0x93 */
+	0xC4, 0xB5, 0xAF, 0x9D, 0xAF, 0x9E, 0xAF, 0x9F, /* 0x94-0x97 */
+	0xC4, 0xB6, 0xAF, 0xA0, 0xB0, 0x41, 0xB0, 0x42, /* 0x98-0x9B */
+	0xB0, 0x43, 0xB0, 0x44, 0xB0, 0x45, 0xB0, 0x46, /* 0x9C-0x9F */
+	0xC4, 0xB7, 0xC4, 0xB8, 0xB0, 0x47, 0xC4, 0xB9, /* 0xA0-0xA3 */
+	0xC4, 0xBA, 0xC4, 0xBB, 0xB0, 0x48, 0xB0, 0x49, /* 0xA4-0xA7 */
+	0xB0, 0x4A, 0xB0, 0x4B, 0xB0, 0x4C, 0xB0, 0x4D, /* 0xA8-0xAB */
+	0xC4, 0xBC, 0xC4, 0xBD, 0xB0, 0x4E, 0xB0, 0x4F, /* 0xAC-0xAF */
+	0xB0, 0x50, 0xB0, 0x51, 0xB0, 0x52, 0xB0, 0x53, /* 0xB0-0xB3 */
+	0xB0, 0x54, 0xB0, 0x55, 0xB0, 0x56, 0xB0, 0x57, /* 0xB4-0xB7 */
+	0xB0, 0x58, 0xB0, 0x59, 0xB0, 0x5A, 0xB0, 0x61, /* 0xB8-0xBB */
+	0xB0, 0x62, 0xB0, 0x63, 0xB0, 0x64, 0xB0, 0x65, /* 0xBC-0xBF */
+	0xB0, 0x66, 0xC4, 0xBE, 0xB0, 0x67, 0xB0, 0x68, /* 0xC0-0xC3 */
+	0xB0, 0x69, 0xB0, 0x6A, 0xB0, 0x6B, 0xB0, 0x6C, /* 0xC4-0xC7 */
+	0xB0, 0x6D, 0xB0, 0x6E, 0xB0, 0x6F, 0xB0, 0x70, /* 0xC8-0xCB */
+	0xB0, 0x71, 0xB0, 0x72, 0xB0, 0x73, 0xB0, 0x74, /* 0xCC-0xCF */
+	0xB0, 0x75, 0xB0, 0x76, 0xB0, 0x77, 0xB0, 0x78, /* 0xD0-0xD3 */
+	0xB0, 0x79, 0xB0, 0x7A, 0xB0, 0x81, 0xB0, 0x82, /* 0xD4-0xD7 */
+	0xB0, 0x83, 0xB0, 0x84, 0xB0, 0x85, 0xB0, 0x86, /* 0xD8-0xDB */
+	0xB0, 0x87, 0xB0, 0x88, 0xB0, 0x89, 0xB0, 0x8A, /* 0xDC-0xDF */
+	0xB0, 0x8B, 0xB0, 0x8C, 0xB0, 0x8D, 0xB0, 0x8E, /* 0xE0-0xE3 */
+	0xC4, 0xBF, 0xC4, 0xC0, 0xB0, 0x8F, 0xB0, 0x90, /* 0xE4-0xE7 */
+	0xC4, 0xC1, 0xB0, 0x91, 0xB0, 0x92, 0xC4, 0xC2, /* 0xE8-0xEB */
+	0xC4, 0xC3, 0xB0, 0x93, 0xB0, 0x94, 0xB0, 0x95, /* 0xEC-0xEF */
+	0xB0, 0x96, 0xB0, 0x97, 0xB0, 0x98, 0xB0, 0x99, /* 0xF0-0xF3 */
+	0xC4, 0xC4, 0xC4, 0xC5, 0xB0, 0x9A, 0xC4, 0xC6, /* 0xF4-0xF7 */
+	0xC4, 0xC7, 0xC4, 0xC8, 0xB0, 0x9B, 0xB0, 0x9C, /* 0xF8-0xFB */
+	0xB0, 0x9D, 0xB0, 0x9E, 0xB0, 0x9F, 0xB0, 0xA0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_CF[512] = {
+	0xC4, 0xC9, 0xC4, 0xCA, 0xB1, 0x41, 0xB1, 0x42, /* 0x00-0x03 */
+	0xC4, 0xCB, 0xB1, 0x43, 0xB1, 0x44, 0xB1, 0x45, /* 0x04-0x07 */
+	0xC4, 0xCC, 0xB1, 0x46, 0xB1, 0x47, 0xB1, 0x48, /* 0x08-0x0B */
+	0xB1, 0x49, 0xB1, 0x4A, 0xB1, 0x4B, 0xB1, 0x4C, /* 0x0C-0x0F */
+	0xC4, 0xCD, 0xC4, 0xCE, 0xB1, 0x4D, 0xC4, 0xCF, /* 0x10-0x13 */
+	0xB1, 0x4E, 0xC4, 0xD0, 0xB1, 0x4F, 0xB1, 0x50, /* 0x14-0x17 */
+	0xB1, 0x51, 0xB1, 0x52, 0xB1, 0x53, 0xB1, 0x54, /* 0x18-0x1B */
+	0xC4, 0xD1, 0xB1, 0x55, 0xB1, 0x56, 0xB1, 0x57, /* 0x1C-0x1F */
+	0xC4, 0xD2, 0xB1, 0x58, 0xB1, 0x59, 0xB1, 0x5A, /* 0x20-0x23 */
+	0xC4, 0xD3, 0xB1, 0x61, 0xB1, 0x62, 0xB1, 0x63, /* 0x24-0x27 */
+	0xB1, 0x64, 0xB1, 0x65, 0xB1, 0x66, 0xB1, 0x67, /* 0x28-0x2B */
+	0xC4, 0xD4, 0xC4, 0xD5, 0xB1, 0x68, 0xC4, 0xD6, /* 0x2C-0x2F */
+	0xC4, 0xD7, 0xC4, 0xD8, 0xB1, 0x69, 0xB1, 0x6A, /* 0x30-0x33 */
+	0xB1, 0x6B, 0xB1, 0x6C, 0xB1, 0x6D, 0xB1, 0x6E, /* 0x34-0x37 */
+	0xC4, 0xD9, 0xB1, 0x6F, 0xB1, 0x70, 0xB1, 0x71, /* 0x38-0x3B */
+	0xB1, 0x72, 0xB1, 0x73, 0xB1, 0x74, 0xB1, 0x75, /* 0x3C-0x3F */
+	0xB1, 0x76, 0xB1, 0x77, 0xB1, 0x78, 0xB1, 0x79, /* 0x40-0x43 */
+	0xB1, 0x7A, 0xB1, 0x81, 0xB1, 0x82, 0xB1, 0x83, /* 0x44-0x47 */
+	0xB1, 0x84, 0xB1, 0x85, 0xB1, 0x86, 0xB1, 0x87, /* 0x48-0x4B */
+	0xB1, 0x88, 0xB1, 0x89, 0xB1, 0x8A, 0xB1, 0x8B, /* 0x4C-0x4F */
+	0xB1, 0x8C, 0xB1, 0x8D, 0xB1, 0x8E, 0xB1, 0x8F, /* 0x50-0x53 */
+	0xC4, 0xDA, 0xC4, 0xDB, 0xB1, 0x90, 0xB1, 0x91, /* 0x54-0x57 */
+	0xC4, 0xDC, 0xB1, 0x92, 0xB1, 0x93, 0xB1, 0x94, /* 0x58-0x5B */
+	0xC4, 0xDD, 0xB1, 0x95, 0xB1, 0x96, 0xB1, 0x97, /* 0x5C-0x5F */
+	0xB1, 0x98, 0xB1, 0x99, 0xB1, 0x9A, 0xB1, 0x9B, /* 0x60-0x63 */
+	0xC4, 0xDE, 0xC4, 0xDF, 0xB1, 0x9C, 0xC4, 0xE0, /* 0x64-0x67 */
+	0xB1, 0x9D, 0xC4, 0xE1, 0xB1, 0x9E, 0xB1, 0x9F, /* 0x68-0x6B */
+	0xB1, 0xA0, 0xB2, 0x41, 0xB2, 0x42, 0xB2, 0x43, /* 0x6C-0x6F */
+	0xC4, 0xE2, 0xC4, 0xE3, 0xB2, 0x44, 0xB2, 0x45, /* 0x70-0x73 */
+	0xC4, 0xE4, 0xB2, 0x46, 0xB2, 0x47, 0xB2, 0x48, /* 0x74-0x77 */
+	0xC4, 0xE5, 0xB2, 0x49, 0xB2, 0x4A, 0xB2, 0x4B, /* 0x78-0x7B */
+	0xB2, 0x4C, 0xB2, 0x4D, 0xB2, 0x4E, 0xB2, 0x4F, /* 0x7C-0x7F */
+	
+	0xC4, 0xE6, 0xB2, 0x50, 0xB2, 0x51, 0xB2, 0x52, /* 0x80-0x83 */
+	0xB2, 0x53, 0xC4, 0xE7, 0xB2, 0x54, 0xB2, 0x55, /* 0x84-0x87 */
+	0xB2, 0x56, 0xB2, 0x57, 0xB2, 0x58, 0xB2, 0x59, /* 0x88-0x8B */
+	0xC4, 0xE8, 0xB2, 0x5A, 0xB2, 0x61, 0xB2, 0x62, /* 0x8C-0x8F */
+	0xB2, 0x63, 0xB2, 0x64, 0xB2, 0x65, 0xB2, 0x66, /* 0x90-0x93 */
+	0xB2, 0x67, 0xB2, 0x68, 0xB2, 0x69, 0xB2, 0x6A, /* 0x94-0x97 */
+	0xB2, 0x6B, 0xB2, 0x6C, 0xB2, 0x6D, 0xB2, 0x6E, /* 0x98-0x9B */
+	0xB2, 0x6F, 0xB2, 0x70, 0xB2, 0x71, 0xB2, 0x72, /* 0x9C-0x9F */
+	0xB2, 0x73, 0xC4, 0xE9, 0xB2, 0x74, 0xB2, 0x75, /* 0xA0-0xA3 */
+	0xB2, 0x76, 0xB2, 0x77, 0xB2, 0x78, 0xB2, 0x79, /* 0xA4-0xA7 */
+	0xC4, 0xEA, 0xB2, 0x7A, 0xB2, 0x81, 0xB2, 0x82, /* 0xA8-0xAB */
+	0xB2, 0x83, 0xB2, 0x84, 0xB2, 0x85, 0xB2, 0x86, /* 0xAC-0xAF */
+	0xC4, 0xEB, 0xB2, 0x87, 0xB2, 0x88, 0xB2, 0x89, /* 0xB0-0xB3 */
+	0xB2, 0x8A, 0xB2, 0x8B, 0xB2, 0x8C, 0xB2, 0x8D, /* 0xB4-0xB7 */
+	0xB2, 0x8E, 0xB2, 0x8F, 0xB2, 0x90, 0xB2, 0x91, /* 0xB8-0xBB */
+	0xB2, 0x92, 0xB2, 0x93, 0xB2, 0x94, 0xB2, 0x95, /* 0xBC-0xBF */
+	0xB2, 0x96, 0xB2, 0x97, 0xB2, 0x98, 0xB2, 0x99, /* 0xC0-0xC3 */
+	0xC4, 0xEC, 0xB2, 0x9A, 0xB2, 0x9B, 0xB2, 0x9C, /* 0xC4-0xC7 */
+	0xB2, 0x9D, 0xB2, 0x9E, 0xB2, 0x9F, 0xB2, 0xA0, /* 0xC8-0xCB */
+	0xB3, 0x41, 0xB3, 0x42, 0xB3, 0x43, 0xB3, 0x44, /* 0xCC-0xCF */
+	0xB3, 0x45, 0xB3, 0x46, 0xB3, 0x47, 0xB3, 0x48, /* 0xD0-0xD3 */
+	0xB3, 0x49, 0xB3, 0x4A, 0xB3, 0x4B, 0xB3, 0x4C, /* 0xD4-0xD7 */
+	0xB3, 0x4D, 0xB3, 0x4E, 0xB3, 0x4F, 0xB3, 0x50, /* 0xD8-0xDB */
+	0xB3, 0x51, 0xB3, 0x52, 0xB3, 0x53, 0xB3, 0x54, /* 0xDC-0xDF */
+	0xC4, 0xED, 0xC4, 0xEE, 0xB3, 0x55, 0xB3, 0x56, /* 0xE0-0xE3 */
+	0xC4, 0xEF, 0xB3, 0x57, 0xB3, 0x58, 0xB3, 0x59, /* 0xE4-0xE7 */
+	0xC4, 0xF0, 0xB3, 0x5A, 0xB3, 0x61, 0xB3, 0x62, /* 0xE8-0xEB */
+	0xB3, 0x63, 0xB3, 0x64, 0xB3, 0x65, 0xB3, 0x66, /* 0xEC-0xEF */
+	0xC4, 0xF1, 0xC4, 0xF2, 0xB3, 0x67, 0xC4, 0xF3, /* 0xF0-0xF3 */
+	0xB3, 0x68, 0xC4, 0xF4, 0xB3, 0x69, 0xB3, 0x6A, /* 0xF4-0xF7 */
+	0xB3, 0x6B, 0xB3, 0x6C, 0xB3, 0x6D, 0xB3, 0x6E, /* 0xF8-0xFB */
+	0xC4, 0xF5, 0xB3, 0x6F, 0xB3, 0x70, 0xB3, 0x71, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D0[512] = {
+	0xC4, 0xF6, 0xB3, 0x72, 0xB3, 0x73, 0xB3, 0x74, /* 0x00-0x03 */
+	0xC4, 0xF7, 0xB3, 0x75, 0xB3, 0x76, 0xB3, 0x77, /* 0x04-0x07 */
+	0xB3, 0x78, 0xB3, 0x79, 0xB3, 0x7A, 0xB3, 0x81, /* 0x08-0x0B */
+	0xB3, 0x82, 0xB3, 0x83, 0xB3, 0x84, 0xB3, 0x85, /* 0x0C-0x0F */
+	0xB3, 0x86, 0xC4, 0xF8, 0xB3, 0x87, 0xB3, 0x88, /* 0x10-0x13 */
+	0xB3, 0x89, 0xB3, 0x8A, 0xB3, 0x8B, 0xB3, 0x8C, /* 0x14-0x17 */
+	0xC4, 0xF9, 0xB3, 0x8D, 0xB3, 0x8E, 0xB3, 0x8F, /* 0x18-0x1B */
+	0xB3, 0x90, 0xB3, 0x91, 0xB3, 0x92, 0xB3, 0x93, /* 0x1C-0x1F */
+	0xB3, 0x94, 0xB3, 0x95, 0xB3, 0x96, 0xB3, 0x97, /* 0x20-0x23 */
+	0xB3, 0x98, 0xB3, 0x99, 0xB3, 0x9A, 0xB3, 0x9B, /* 0x24-0x27 */
+	0xB3, 0x9C, 0xB3, 0x9D, 0xB3, 0x9E, 0xB3, 0x9F, /* 0x28-0x2B */
+	0xB3, 0xA0, 0xC4, 0xFA, 0xB4, 0x41, 0xB4, 0x42, /* 0x2C-0x2F */
+	0xB4, 0x43, 0xB4, 0x44, 0xB4, 0x45, 0xB4, 0x46, /* 0x30-0x33 */
+	0xC4, 0xFB, 0xC4, 0xFC, 0xB4, 0x47, 0xB4, 0x48, /* 0x34-0x37 */
+	0xC4, 0xFD, 0xB4, 0x49, 0xB4, 0x4A, 0xB4, 0x4B, /* 0x38-0x3B */
+	0xC4, 0xFE, 0xB4, 0x4C, 0xB4, 0x4D, 0xB4, 0x4E, /* 0x3C-0x3F */
+	0xB4, 0x4F, 0xB4, 0x50, 0xB4, 0x51, 0xB4, 0x52, /* 0x40-0x43 */
+	0xC5, 0xA1, 0xC5, 0xA2, 0xB4, 0x53, 0xC5, 0xA3, /* 0x44-0x47 */
+	0xB4, 0x54, 0xC5, 0xA4, 0xB4, 0x55, 0xB4, 0x56, /* 0x48-0x4B */
+	0xB4, 0x57, 0xB4, 0x58, 0xB4, 0x59, 0xB4, 0x5A, /* 0x4C-0x4F */
+	0xC5, 0xA5, 0xB4, 0x61, 0xB4, 0x62, 0xB4, 0x63, /* 0x50-0x53 */
+	0xC5, 0xA6, 0xB4, 0x64, 0xB4, 0x65, 0xB4, 0x66, /* 0x54-0x57 */
+	0xC5, 0xA7, 0xB4, 0x67, 0xB4, 0x68, 0xB4, 0x69, /* 0x58-0x5B */
+	0xB4, 0x6A, 0xB4, 0x6B, 0xB4, 0x6C, 0xB4, 0x6D, /* 0x5C-0x5F */
+	0xC5, 0xA8, 0xB4, 0x6E, 0xB4, 0x6F, 0xB4, 0x70, /* 0x60-0x63 */
+	0xB4, 0x71, 0xB4, 0x72, 0xB4, 0x73, 0xB4, 0x74, /* 0x64-0x67 */
+	0xB4, 0x75, 0xB4, 0x76, 0xB4, 0x77, 0xB4, 0x78, /* 0x68-0x6B */
+	0xC5, 0xA9, 0xC5, 0xAA, 0xB4, 0x79, 0xB4, 0x7A, /* 0x6C-0x6F */
+	0xC5, 0xAB, 0xB4, 0x81, 0xB4, 0x82, 0xB4, 0x83, /* 0x70-0x73 */
+	0xC5, 0xAC, 0xB4, 0x84, 0xB4, 0x85, 0xB4, 0x86, /* 0x74-0x77 */
+	0xB4, 0x87, 0xB4, 0x88, 0xB4, 0x89, 0xB4, 0x8A, /* 0x78-0x7B */
+	0xC5, 0xAD, 0xC5, 0xAE, 0xB4, 0x8B, 0xB4, 0x8C, /* 0x7C-0x7F */
+	
+	0xB4, 0x8D, 0xC5, 0xAF, 0xB4, 0x8E, 0xB4, 0x8F, /* 0x80-0x83 */
+	0xB4, 0x90, 0xB4, 0x91, 0xB4, 0x92, 0xB4, 0x93, /* 0x84-0x87 */
+	0xB4, 0x94, 0xB4, 0x95, 0xB4, 0x96, 0xB4, 0x97, /* 0x88-0x8B */
+	0xB4, 0x98, 0xB4, 0x99, 0xB4, 0x9A, 0xB4, 0x9B, /* 0x8C-0x8F */
+	0xB4, 0x9C, 0xB4, 0x9D, 0xB4, 0x9E, 0xB4, 0x9F, /* 0x90-0x93 */
+	0xB4, 0xA0, 0xB5, 0x41, 0xB5, 0x42, 0xB5, 0x43, /* 0x94-0x97 */
+	0xB5, 0x44, 0xB5, 0x45, 0xB5, 0x46, 0xB5, 0x47, /* 0x98-0x9B */
+	0xB5, 0x48, 0xB5, 0x49, 0xB5, 0x4A, 0xB5, 0x4B, /* 0x9C-0x9F */
+	0xB5, 0x4C, 0xB5, 0x4D, 0xB5, 0x4E, 0xB5, 0x4F, /* 0xA0-0xA3 */
+	0xC5, 0xB0, 0xC5, 0xB1, 0xB5, 0x50, 0xB5, 0x51, /* 0xA4-0xA7 */
+	0xC5, 0xB2, 0xB5, 0x52, 0xB5, 0x53, 0xB5, 0x54, /* 0xA8-0xAB */
+	0xC5, 0xB3, 0xB5, 0x55, 0xB5, 0x56, 0xB5, 0x57, /* 0xAC-0xAF */
+	0xB5, 0x58, 0xB5, 0x59, 0xB5, 0x5A, 0xB5, 0x61, /* 0xB0-0xB3 */
+	0xC5, 0xB4, 0xC5, 0xB5, 0xB5, 0x62, 0xC5, 0xB6, /* 0xB4-0xB7 */
+	0xB5, 0x63, 0xC5, 0xB7, 0xB5, 0x64, 0xB5, 0x65, /* 0xB8-0xBB */
+	0xB5, 0x66, 0xB5, 0x67, 0xB5, 0x68, 0xB5, 0x69, /* 0xBC-0xBF */
+	0xC5, 0xB8, 0xC5, 0xB9, 0xB5, 0x6A, 0xB5, 0x6B, /* 0xC0-0xC3 */
+	0xC5, 0xBA, 0xB5, 0x6C, 0xB5, 0x6D, 0xB5, 0x6E, /* 0xC4-0xC7 */
+	0xC5, 0xBB, 0xC5, 0xBC, 0xB5, 0x6F, 0xB5, 0x70, /* 0xC8-0xCB */
+	0xB5, 0x71, 0xB5, 0x72, 0xB5, 0x73, 0xB5, 0x74, /* 0xCC-0xCF */
+	0xC5, 0xBD, 0xC5, 0xBE, 0xB5, 0x75, 0xC5, 0xBF, /* 0xD0-0xD3 */
+	0xC5, 0xC0, 0xC5, 0xC1, 0xB5, 0x76, 0xB5, 0x77, /* 0xD4-0xD7 */
+	0xB5, 0x78, 0xB5, 0x79, 0xB5, 0x7A, 0xB5, 0x81, /* 0xD8-0xDB */
+	0xC5, 0xC2, 0xC5, 0xC3, 0xB5, 0x82, 0xB5, 0x83, /* 0xDC-0xDF */
+	0xC5, 0xC4, 0xB5, 0x84, 0xB5, 0x85, 0xB5, 0x86, /* 0xE0-0xE3 */
+	0xC5, 0xC5, 0xB5, 0x87, 0xB5, 0x88, 0xB5, 0x89, /* 0xE4-0xE7 */
+	0xB5, 0x8A, 0xB5, 0x8B, 0xB5, 0x8C, 0xB5, 0x8D, /* 0xE8-0xEB */
+	0xC5, 0xC6, 0xC5, 0xC7, 0xB5, 0x8E, 0xC5, 0xC8, /* 0xEC-0xEF */
+	0xC5, 0xC9, 0xC5, 0xCA, 0xB5, 0x8F, 0xB5, 0x90, /* 0xF0-0xF3 */
+	0xB5, 0x91, 0xB5, 0x92, 0xB5, 0x93, 0xB5, 0x94, /* 0xF4-0xF7 */
+	0xC5, 0xCB, 0xB5, 0x95, 0xB5, 0x96, 0xB5, 0x97, /* 0xF8-0xFB */
+	0xB5, 0x98, 0xB5, 0x99, 0xB5, 0x9A, 0xB5, 0x9B, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D1[512] = {
+	0xB5, 0x9C, 0xB5, 0x9D, 0xB5, 0x9E, 0xB5, 0x9F, /* 0x00-0x03 */
+	0xB5, 0xA0, 0xB6, 0x41, 0xB6, 0x42, 0xB6, 0x43, /* 0x04-0x07 */
+	0xB6, 0x44, 0xB6, 0x45, 0xB6, 0x46, 0xB6, 0x47, /* 0x08-0x0B */
+	0xB6, 0x48, 0xC5, 0xCC, 0xB6, 0x49, 0xB6, 0x4A, /* 0x0C-0x0F */
+	0xB6, 0x4B, 0xB6, 0x4C, 0xB6, 0x4D, 0xB6, 0x4E, /* 0x10-0x13 */
+	0xB6, 0x4F, 0xB6, 0x50, 0xB6, 0x51, 0xB6, 0x52, /* 0x14-0x17 */
+	0xB6, 0x53, 0xB6, 0x54, 0xB6, 0x55, 0xB6, 0x56, /* 0x18-0x1B */
+	0xB6, 0x57, 0xB6, 0x58, 0xB6, 0x59, 0xB6, 0x5A, /* 0x1C-0x1F */
+	0xB6, 0x61, 0xB6, 0x62, 0xB6, 0x63, 0xB6, 0x64, /* 0x20-0x23 */
+	0xB6, 0x65, 0xB6, 0x66, 0xB6, 0x67, 0xB6, 0x68, /* 0x24-0x27 */
+	0xB6, 0x69, 0xB6, 0x6A, 0xB6, 0x6B, 0xB6, 0x6C, /* 0x28-0x2B */
+	0xB6, 0x6D, 0xB6, 0x6E, 0xB6, 0x6F, 0xB6, 0x70, /* 0x2C-0x2F */
+	0xC5, 0xCD, 0xC5, 0xCE, 0xB6, 0x71, 0xB6, 0x72, /* 0x30-0x33 */
+	0xC5, 0xCF, 0xB6, 0x73, 0xB6, 0x74, 0xB6, 0x75, /* 0x34-0x37 */
+	0xC5, 0xD0, 0xB6, 0x76, 0xC5, 0xD1, 0xB6, 0x77, /* 0x38-0x3B */
+	0xB6, 0x78, 0xB6, 0x79, 0xB6, 0x7A, 0xB6, 0x81, /* 0x3C-0x3F */
+	0xC5, 0xD2, 0xC5, 0xD3, 0xB6, 0x82, 0xC5, 0xD4, /* 0x40-0x43 */
+	0xC5, 0xD5, 0xC5, 0xD6, 0xB6, 0x83, 0xB6, 0x84, /* 0x44-0x47 */
+	0xB6, 0x85, 0xB6, 0x86, 0xB6, 0x87, 0xB6, 0x88, /* 0x48-0x4B */
+	0xC5, 0xD7, 0xC5, 0xD8, 0xB6, 0x89, 0xB6, 0x8A, /* 0x4C-0x4F */
+	0xC5, 0xD9, 0xB6, 0x8B, 0xB6, 0x8C, 0xB6, 0x8D, /* 0x50-0x53 */
+	0xC5, 0xDA, 0xB6, 0x8E, 0xB6, 0x8F, 0xB6, 0x90, /* 0x54-0x57 */
+	0xB6, 0x91, 0xB6, 0x92, 0xB6, 0x93, 0xB6, 0x94, /* 0x58-0x5B */
+	0xC5, 0xDB, 0xC5, 0xDC, 0xB6, 0x95, 0xC5, 0xDD, /* 0x5C-0x5F */
+	0xB6, 0x96, 0xC5, 0xDE, 0xB6, 0x97, 0xB6, 0x98, /* 0x60-0x63 */
+	0xB6, 0x99, 0xB6, 0x9A, 0xB6, 0x9B, 0xB6, 0x9C, /* 0x64-0x67 */
+	0xC5, 0xDF, 0xB6, 0x9D, 0xB6, 0x9E, 0xB6, 0x9F, /* 0x68-0x6B */
+	0xC5, 0xE0, 0xB6, 0xA0, 0xB7, 0x41, 0xB7, 0x42, /* 0x6C-0x6F */
+	0xB7, 0x43, 0xB7, 0x44, 0xB7, 0x45, 0xB7, 0x46, /* 0x70-0x73 */
+	0xB7, 0x47, 0xB7, 0x48, 0xB7, 0x49, 0xB7, 0x4A, /* 0x74-0x77 */
+	0xB7, 0x4B, 0xB7, 0x4C, 0xB7, 0x4D, 0xB7, 0x4E, /* 0x78-0x7B */
+	0xC5, 0xE1, 0xB7, 0x4F, 0xB7, 0x50, 0xB7, 0x51, /* 0x7C-0x7F */
+	
+	0xB7, 0x52, 0xB7, 0x53, 0xB7, 0x54, 0xB7, 0x55, /* 0x80-0x83 */
+	0xC5, 0xE2, 0xB7, 0x56, 0xB7, 0x57, 0xB7, 0x58, /* 0x84-0x87 */
+	0xC5, 0xE3, 0xB7, 0x59, 0xB7, 0x5A, 0xB7, 0x61, /* 0x88-0x8B */
+	0xB7, 0x62, 0xB7, 0x63, 0xB7, 0x64, 0xB7, 0x65, /* 0x8C-0x8F */
+	0xB7, 0x66, 0xB7, 0x67, 0xB7, 0x68, 0xB7, 0x69, /* 0x90-0x93 */
+	0xB7, 0x6A, 0xB7, 0x6B, 0xB7, 0x6C, 0xB7, 0x6D, /* 0x94-0x97 */
+	0xB7, 0x6E, 0xB7, 0x6F, 0xB7, 0x70, 0xB7, 0x71, /* 0x98-0x9B */
+	0xB7, 0x72, 0xB7, 0x73, 0xB7, 0x74, 0xB7, 0x75, /* 0x9C-0x9F */
+	0xC5, 0xE4, 0xC5, 0xE5, 0xB7, 0x76, 0xB7, 0x77, /* 0xA0-0xA3 */
+	0xC5, 0xE6, 0xB7, 0x78, 0xB7, 0x79, 0xB7, 0x7A, /* 0xA4-0xA7 */
+	0xC5, 0xE7, 0xB7, 0x81, 0xB7, 0x82, 0xB7, 0x83, /* 0xA8-0xAB */
+	0xB7, 0x84, 0xB7, 0x85, 0xB7, 0x86, 0xB7, 0x87, /* 0xAC-0xAF */
+	0xC5, 0xE8, 0xC5, 0xE9, 0xB7, 0x88, 0xC5, 0xEA, /* 0xB0-0xB3 */
+	0xB7, 0x89, 0xC5, 0xEB, 0xB7, 0x8A, 0xB7, 0x8B, /* 0xB4-0xB7 */
+	0xB7, 0x8C, 0xB7, 0x8D, 0xC5, 0xEC, 0xB7, 0x8E, /* 0xB8-0xBB */
+	0xC5, 0xED, 0xB7, 0x8F, 0xB7, 0x90, 0xB7, 0x91, /* 0xBC-0xBF */
+	0xC5, 0xEE, 0xB7, 0x92, 0xB7, 0x93, 0xB7, 0x94, /* 0xC0-0xC3 */
+	0xB7, 0x95, 0xB7, 0x96, 0xB7, 0x97, 0xB7, 0x98, /* 0xC4-0xC7 */
+	0xB7, 0x99, 0xB7, 0x9A, 0xB7, 0x9B, 0xB7, 0x9C, /* 0xC8-0xCB */
+	0xB7, 0x9D, 0xB7, 0x9E, 0xB7, 0x9F, 0xB7, 0xA0, /* 0xCC-0xCF */
+	0xB8, 0x41, 0xB8, 0x42, 0xB8, 0x43, 0xB8, 0x44, /* 0xD0-0xD3 */
+	0xB8, 0x45, 0xB8, 0x46, 0xB8, 0x47, 0xB8, 0x48, /* 0xD4-0xD7 */
+	0xC5, 0xEF, 0xB8, 0x49, 0xB8, 0x4A, 0xB8, 0x4B, /* 0xD8-0xDB */
+	0xB8, 0x4C, 0xB8, 0x4D, 0xB8, 0x4E, 0xB8, 0x4F, /* 0xDC-0xDF */
+	0xB8, 0x50, 0xB8, 0x51, 0xB8, 0x52, 0xB8, 0x53, /* 0xE0-0xE3 */
+	0xB8, 0x54, 0xB8, 0x55, 0xB8, 0x56, 0xB8, 0x57, /* 0xE4-0xE7 */
+	0xB8, 0x58, 0xB8, 0x59, 0xB8, 0x5A, 0xB8, 0x61, /* 0xE8-0xEB */
+	0xB8, 0x62, 0xB8, 0x63, 0xB8, 0x64, 0xB8, 0x65, /* 0xEC-0xEF */
+	0xB8, 0x66, 0xB8, 0x67, 0xB8, 0x68, 0xB8, 0x69, /* 0xF0-0xF3 */
+	0xC5, 0xF0, 0xB8, 0x6A, 0xB8, 0x6B, 0xB8, 0x6C, /* 0xF4-0xF7 */
+	0xC5, 0xF1, 0xB8, 0x6D, 0xB8, 0x6E, 0xB8, 0x6F, /* 0xF8-0xFB */
+	0xB8, 0x70, 0xB8, 0x71, 0xB8, 0x72, 0xB8, 0x73, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D2[512] = {
+	0xB8, 0x74, 0xB8, 0x75, 0xB8, 0x76, 0xB8, 0x77, /* 0x00-0x03 */
+	0xB8, 0x78, 0xB8, 0x79, 0xB8, 0x7A, 0xC5, 0xF2, /* 0x04-0x07 */
+	0xB8, 0x81, 0xC5, 0xF3, 0xB8, 0x82, 0xB8, 0x83, /* 0x08-0x0B */
+	0xB8, 0x84, 0xB8, 0x85, 0xB8, 0x86, 0xB8, 0x87, /* 0x0C-0x0F */
+	0xC5, 0xF4, 0xB8, 0x88, 0xB8, 0x89, 0xB8, 0x8A, /* 0x10-0x13 */
+	0xB8, 0x8B, 0xB8, 0x8C, 0xB8, 0x8D, 0xB8, 0x8E, /* 0x14-0x17 */
+	0xB8, 0x8F, 0xB8, 0x90, 0xB8, 0x91, 0xB8, 0x92, /* 0x18-0x1B */
+	0xB8, 0x93, 0xB8, 0x94, 0xB8, 0x95, 0xB8, 0x96, /* 0x1C-0x1F */
+	0xB8, 0x97, 0xB8, 0x98, 0xB8, 0x99, 0xB8, 0x9A, /* 0x20-0x23 */
+	0xB8, 0x9B, 0xB8, 0x9C, 0xB8, 0x9D, 0xB8, 0x9E, /* 0x24-0x27 */
+	0xB8, 0x9F, 0xB8, 0xA0, 0xB9, 0x41, 0xB9, 0x42, /* 0x28-0x2B */
+	0xC5, 0xF5, 0xC5, 0xF6, 0xB9, 0x43, 0xB9, 0x44, /* 0x2C-0x2F */
+	0xC5, 0xF7, 0xB9, 0x45, 0xB9, 0x46, 0xB9, 0x47, /* 0x30-0x33 */
+	0xC5, 0xF8, 0xB9, 0x48, 0xB9, 0x49, 0xB9, 0x4A, /* 0x34-0x37 */
+	0xB9, 0x4B, 0xB9, 0x4C, 0xB9, 0x4D, 0xB9, 0x4E, /* 0x38-0x3B */
+	0xC5, 0xF9, 0xC5, 0xFA, 0xB9, 0x4F, 0xC5, 0xFB, /* 0x3C-0x3F */
+	0xB9, 0x50, 0xC5, 0xFC, 0xB9, 0x51, 0xB9, 0x52, /* 0x40-0x43 */
+	0xB9, 0x53, 0xB9, 0x54, 0xB9, 0x55, 0xB9, 0x56, /* 0x44-0x47 */
+	0xC5, 0xFD, 0xB9, 0x57, 0xB9, 0x58, 0xB9, 0x59, /* 0x48-0x4B */
+	0xB9, 0x5A, 0xB9, 0x61, 0xB9, 0x62, 0xB9, 0x63, /* 0x4C-0x4F */
+	0xB9, 0x64, 0xB9, 0x65, 0xB9, 0x66, 0xB9, 0x67, /* 0x50-0x53 */
+	0xB9, 0x68, 0xB9, 0x69, 0xB9, 0x6A, 0xB9, 0x6B, /* 0x54-0x57 */
+	0xB9, 0x6C, 0xB9, 0x6D, 0xB9, 0x6E, 0xB9, 0x6F, /* 0x58-0x5B */
+	0xC5, 0xFE, 0xB9, 0x70, 0xB9, 0x71, 0xB9, 0x72, /* 0x5C-0x5F */
+	0xB9, 0x73, 0xB9, 0x74, 0xB9, 0x75, 0xB9, 0x76, /* 0x60-0x63 */
+	0xC6, 0xA1, 0xB9, 0x77, 0xB9, 0x78, 0xB9, 0x79, /* 0x64-0x67 */
+	0xB9, 0x7A, 0xB9, 0x81, 0xB9, 0x82, 0xB9, 0x83, /* 0x68-0x6B */
+	0xB9, 0x84, 0xB9, 0x85, 0xB9, 0x86, 0xB9, 0x87, /* 0x6C-0x6F */
+	0xB9, 0x88, 0xB9, 0x89, 0xB9, 0x8A, 0xB9, 0x8B, /* 0x70-0x73 */
+	0xB9, 0x8C, 0xB9, 0x8D, 0xB9, 0x8E, 0xB9, 0x8F, /* 0x74-0x77 */
+	0xB9, 0x90, 0xB9, 0x91, 0xB9, 0x92, 0xB9, 0x93, /* 0x78-0x7B */
+	0xB9, 0x94, 0xB9, 0x95, 0xB9, 0x96, 0xB9, 0x97, /* 0x7C-0x7F */
+	
+	0xC6, 0xA2, 0xC6, 0xA3, 0xB9, 0x98, 0xB9, 0x99, /* 0x80-0x83 */
+	0xC6, 0xA4, 0xB9, 0x9A, 0xB9, 0x9B, 0xB9, 0x9C, /* 0x84-0x87 */
+	0xC6, 0xA5, 0xB9, 0x9D, 0xB9, 0x9E, 0xB9, 0x9F, /* 0x88-0x8B */
+	0xB9, 0xA0, 0xBA, 0x41, 0xBA, 0x42, 0xBA, 0x43, /* 0x8C-0x8F */
+	0xC6, 0xA6, 0xC6, 0xA7, 0xBA, 0x44, 0xBA, 0x45, /* 0x90-0x93 */
+	0xBA, 0x46, 0xC6, 0xA8, 0xBA, 0x47, 0xBA, 0x48, /* 0x94-0x97 */
+	0xBA, 0x49, 0xBA, 0x4A, 0xBA, 0x4B, 0xBA, 0x4C, /* 0x98-0x9B */
+	0xC6, 0xA9, 0xBA, 0x4D, 0xBA, 0x4E, 0xBA, 0x4F, /* 0x9C-0x9F */
+	0xC6, 0xAA, 0xBA, 0x50, 0xBA, 0x51, 0xBA, 0x52, /* 0xA0-0xA3 */
+	0xC6, 0xAB, 0xBA, 0x53, 0xBA, 0x54, 0xBA, 0x55, /* 0xA4-0xA7 */
+	0xBA, 0x56, 0xBA, 0x57, 0xBA, 0x58, 0xBA, 0x59, /* 0xA8-0xAB */
+	0xC6, 0xAC, 0xBA, 0x5A, 0xBA, 0x61, 0xBA, 0x62, /* 0xAC-0xAF */
+	0xBA, 0x63, 0xC6, 0xAD, 0xBA, 0x64, 0xBA, 0x65, /* 0xB0-0xB3 */
+	0xBA, 0x66, 0xBA, 0x67, 0xBA, 0x68, 0xBA, 0x69, /* 0xB4-0xB7 */
+	0xC6, 0xAE, 0xC6, 0xAF, 0xBA, 0x6A, 0xBA, 0x6B, /* 0xB8-0xBB */
+	0xC6, 0xB0, 0xBA, 0x6C, 0xBA, 0x6D, 0xC6, 0xB1, /* 0xBC-0xBF */
+	0xC6, 0xB2, 0xBA, 0x6E, 0xC6, 0xB3, 0xBA, 0x6F, /* 0xC0-0xC3 */
+	0xBA, 0x70, 0xBA, 0x71, 0xBA, 0x72, 0xBA, 0x73, /* 0xC4-0xC7 */
+	0xC6, 0xB4, 0xC6, 0xB5, 0xBA, 0x74, 0xC6, 0xB6, /* 0xC8-0xCB */
+	0xBA, 0x75, 0xBA, 0x76, 0xBA, 0x77, 0xBA, 0x78, /* 0xCC-0xCF */
+	0xBA, 0x79, 0xBA, 0x7A, 0xBA, 0x81, 0xBA, 0x82, /* 0xD0-0xD3 */
+	0xC6, 0xB7, 0xBA, 0x83, 0xBA, 0x84, 0xBA, 0x85, /* 0xD4-0xD7 */
+	0xC6, 0xB8, 0xBA, 0x86, 0xBA, 0x87, 0xBA, 0x88, /* 0xD8-0xDB */
+	0xC6, 0xB9, 0xBA, 0x89, 0xBA, 0x8A, 0xBA, 0x8B, /* 0xDC-0xDF */
+	0xBA, 0x8C, 0xBA, 0x8D, 0xBA, 0x8E, 0xBA, 0x8F, /* 0xE0-0xE3 */
+	0xC6, 0xBA, 0xC6, 0xBB, 0xBA, 0x90, 0xBA, 0x91, /* 0xE4-0xE7 */
+	0xBA, 0x92, 0xBA, 0x93, 0xBA, 0x94, 0xBA, 0x95, /* 0xE8-0xEB */
+	0xBA, 0x96, 0xBA, 0x97, 0xBA, 0x98, 0xBA, 0x99, /* 0xEC-0xEF */
+	0xC6, 0xBC, 0xC6, 0xBD, 0xBA, 0x9A, 0xBA, 0x9B, /* 0xF0-0xF3 */
+	0xC6, 0xBE, 0xBA, 0x9C, 0xBA, 0x9D, 0xBA, 0x9E, /* 0xF4-0xF7 */
+	0xC6, 0xBF, 0xBA, 0x9F, 0xBA, 0xA0, 0xBB, 0x41, /* 0xF8-0xFB */
+	0xBB, 0x42, 0xBB, 0x43, 0xBB, 0x44, 0xBB, 0x45, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D3[512] = {
+	0xC6, 0xC0, 0xC6, 0xC1, 0xBB, 0x46, 0xC6, 0xC2, /* 0x00-0x03 */
+	0xBB, 0x47, 0xC6, 0xC3, 0xBB, 0x48, 0xBB, 0x49, /* 0x04-0x07 */
+	0xBB, 0x4A, 0xBB, 0x4B, 0xBB, 0x4C, 0xBB, 0x4D, /* 0x08-0x0B */
+	0xC6, 0xC4, 0xC6, 0xC5, 0xC6, 0xC6, 0xBB, 0x4E, /* 0x0C-0x0F */
+	0xC6, 0xC7, 0xBB, 0x4F, 0xBB, 0x50, 0xBB, 0x51, /* 0x10-0x13 */
+	0xC6, 0xC8, 0xBB, 0x52, 0xC6, 0xC9, 0xBB, 0x53, /* 0x14-0x17 */
+	0xBB, 0x54, 0xBB, 0x55, 0xBB, 0x56, 0xBB, 0x57, /* 0x18-0x1B */
+	0xC6, 0xCA, 0xC6, 0xCB, 0xBB, 0x58, 0xC6, 0xCC, /* 0x1C-0x1F */
+	0xC6, 0xCD, 0xC6, 0xCE, 0xBB, 0x59, 0xBB, 0x5A, /* 0x20-0x23 */
+	0xBB, 0x61, 0xC6, 0xCF, 0xBB, 0x62, 0xBB, 0x63, /* 0x24-0x27 */
+	0xC6, 0xD0, 0xC6, 0xD1, 0xBB, 0x64, 0xBB, 0x65, /* 0x28-0x2B */
+	0xC6, 0xD2, 0xBB, 0x66, 0xBB, 0x67, 0xBB, 0x68, /* 0x2C-0x2F */
+	0xC6, 0xD3, 0xBB, 0x69, 0xBB, 0x6A, 0xBB, 0x6B, /* 0x30-0x33 */
+	0xBB, 0x6C, 0xBB, 0x6D, 0xBB, 0x6E, 0xBB, 0x6F, /* 0x34-0x37 */
+	0xC6, 0xD4, 0xC6, 0xD5, 0xBB, 0x70, 0xC6, 0xD6, /* 0x38-0x3B */
+	0xC6, 0xD7, 0xC6, 0xD8, 0xBB, 0x71, 0xBB, 0x72, /* 0x3C-0x3F */
+	0xBB, 0x73, 0xBB, 0x74, 0xBB, 0x75, 0xBB, 0x76, /* 0x40-0x43 */
+	0xC6, 0xD9, 0xC6, 0xDA, 0xBB, 0x77, 0xBB, 0x78, /* 0x44-0x47 */
+	0xBB, 0x79, 0xBB, 0x7A, 0xBB, 0x81, 0xBB, 0x82, /* 0x48-0x4B */
+	0xBB, 0x83, 0xBB, 0x84, 0xBB, 0x85, 0xBB, 0x86, /* 0x4C-0x4F */
+	0xBB, 0x87, 0xBB, 0x88, 0xBB, 0x89, 0xBB, 0x8A, /* 0x50-0x53 */
+	0xBB, 0x8B, 0xBB, 0x8C, 0xBB, 0x8D, 0xBB, 0x8E, /* 0x54-0x57 */
+	0xBB, 0x8F, 0xBB, 0x90, 0xBB, 0x91, 0xBB, 0x92, /* 0x58-0x5B */
+	0xBB, 0x93, 0xBB, 0x94, 0xBB, 0x95, 0xBB, 0x96, /* 0x5C-0x5F */
+	0xBB, 0x97, 0xBB, 0x98, 0xBB, 0x99, 0xBB, 0x9A, /* 0x60-0x63 */
+	0xBB, 0x9B, 0xBB, 0x9C, 0xBB, 0x9D, 0xBB, 0x9E, /* 0x64-0x67 */
+	0xBB, 0x9F, 0xBB, 0xA0, 0xBC, 0x41, 0xBC, 0x42, /* 0x68-0x6B */
+	0xBC, 0x43, 0xBC, 0x44, 0xBC, 0x45, 0xBC, 0x46, /* 0x6C-0x6F */
+	0xBC, 0x47, 0xBC, 0x48, 0xBC, 0x49, 0xBC, 0x4A, /* 0x70-0x73 */
+	0xBC, 0x4B, 0xBC, 0x4C, 0xBC, 0x4D, 0xBC, 0x4E, /* 0x74-0x77 */
+	0xBC, 0x4F, 0xBC, 0x50, 0xBC, 0x51, 0xBC, 0x52, /* 0x78-0x7B */
+	0xC6, 0xDB, 0xC6, 0xDC, 0xBC, 0x53, 0xBC, 0x54, /* 0x7C-0x7F */
+	
+	0xC6, 0xDD, 0xBC, 0x55, 0xBC, 0x56, 0xBC, 0x57, /* 0x80-0x83 */
+	0xC6, 0xDE, 0xBC, 0x58, 0xBC, 0x59, 0xBC, 0x5A, /* 0x84-0x87 */
+	0xBC, 0x61, 0xBC, 0x62, 0xBC, 0x63, 0xBC, 0x64, /* 0x88-0x8B */
+	0xC6, 0xDF, 0xC6, 0xE0, 0xBC, 0x65, 0xC6, 0xE1, /* 0x8C-0x8F */
+	0xC6, 0xE2, 0xC6, 0xE3, 0xBC, 0x66, 0xBC, 0x67, /* 0x90-0x93 */
+	0xBC, 0x68, 0xBC, 0x69, 0xBC, 0x6A, 0xBC, 0x6B, /* 0x94-0x97 */
+	0xC6, 0xE4, 0xC6, 0xE5, 0xBC, 0x6C, 0xBC, 0x6D, /* 0x98-0x9B */
+	0xC6, 0xE6, 0xBC, 0x6E, 0xBC, 0x6F, 0xBC, 0x70, /* 0x9C-0x9F */
+	0xC6, 0xE7, 0xBC, 0x71, 0xBC, 0x72, 0xBC, 0x73, /* 0xA0-0xA3 */
+	0xBC, 0x74, 0xBC, 0x75, 0xBC, 0x76, 0xBC, 0x77, /* 0xA4-0xA7 */
+	0xC6, 0xE8, 0xC6, 0xE9, 0xBC, 0x78, 0xC6, 0xEA, /* 0xA8-0xAB */
+	0xBC, 0x79, 0xC6, 0xEB, 0xBC, 0x7A, 0xBC, 0x81, /* 0xAC-0xAF */
+	0xBC, 0x82, 0xBC, 0x83, 0xBC, 0x84, 0xBC, 0x85, /* 0xB0-0xB3 */
+	0xC6, 0xEC, 0xBC, 0x86, 0xBC, 0x87, 0xBC, 0x88, /* 0xB4-0xB7 */
+	0xC6, 0xED, 0xBC, 0x89, 0xBC, 0x8A, 0xBC, 0x8B, /* 0xB8-0xBB */
+	0xC6, 0xEE, 0xBC, 0x8C, 0xBC, 0x8D, 0xBC, 0x8E, /* 0xBC-0xBF */
+	0xBC, 0x8F, 0xBC, 0x90, 0xBC, 0x91, 0xBC, 0x92, /* 0xC0-0xC3 */
+	0xC6, 0xEF, 0xC6, 0xF0, 0xBC, 0x93, 0xBC, 0x94, /* 0xC4-0xC7 */
+	0xC6, 0xF1, 0xC6, 0xF2, 0xBC, 0x95, 0xBC, 0x96, /* 0xC8-0xCB */
+	0xBC, 0x97, 0xBC, 0x98, 0xBC, 0x99, 0xBC, 0x9A, /* 0xCC-0xCF */
+	0xC6, 0xF3, 0xBC, 0x9B, 0xBC, 0x9C, 0xBC, 0x9D, /* 0xD0-0xD3 */
+	0xBC, 0x9E, 0xBC, 0x9F, 0xBC, 0xA0, 0xBD, 0x41, /* 0xD4-0xD7 */
+	0xC6, 0xF4, 0xBD, 0x42, 0xBD, 0x43, 0xBD, 0x44, /* 0xD8-0xDB */
+	0xBD, 0x45, 0xBD, 0x46, 0xBD, 0x47, 0xBD, 0x48, /* 0xDC-0xDF */
+	0xBD, 0x49, 0xC6, 0xF5, 0xBD, 0x4A, 0xC6, 0xF6, /* 0xE0-0xE3 */
+	0xBD, 0x4B, 0xBD, 0x4C, 0xBD, 0x4D, 0xBD, 0x4E, /* 0xE4-0xE7 */
+	0xBD, 0x4F, 0xBD, 0x50, 0xBD, 0x51, 0xBD, 0x52, /* 0xE8-0xEB */
+	0xC6, 0xF7, 0xC6, 0xF8, 0xBD, 0x53, 0xBD, 0x54, /* 0xEC-0xEF */
+	0xC6, 0xF9, 0xBD, 0x55, 0xBD, 0x56, 0xBD, 0x57, /* 0xF0-0xF3 */
+	0xC6, 0xFA, 0xBD, 0x58, 0xBD, 0x59, 0xBD, 0x5A, /* 0xF4-0xF7 */
+	0xBD, 0x61, 0xBD, 0x62, 0xBD, 0x63, 0xBD, 0x64, /* 0xF8-0xFB */
+	0xC6, 0xFB, 0xC6, 0xFC, 0xBD, 0x65, 0xC6, 0xFD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D4[512] = {
+	0xBD, 0x66, 0xC6, 0xFE, 0xBD, 0x67, 0xBD, 0x68, /* 0x00-0x03 */
+	0xBD, 0x69, 0xBD, 0x6A, 0xBD, 0x6B, 0xBD, 0x6C, /* 0x04-0x07 */
+	0xC7, 0xA1, 0xBD, 0x6D, 0xBD, 0x6E, 0xBD, 0x6F, /* 0x08-0x0B */
+	0xBD, 0x70, 0xBD, 0x71, 0xBD, 0x72, 0xBD, 0x73, /* 0x0C-0x0F */
+	0xBD, 0x74, 0xBD, 0x75, 0xBD, 0x76, 0xBD, 0x77, /* 0x10-0x13 */
+	0xBD, 0x78, 0xBD, 0x79, 0xBD, 0x7A, 0xBD, 0x81, /* 0x14-0x17 */
+	0xBD, 0x82, 0xBD, 0x83, 0xBD, 0x84, 0xBD, 0x85, /* 0x18-0x1B */
+	0xBD, 0x86, 0xC7, 0xA2, 0xBD, 0x87, 0xBD, 0x88, /* 0x1C-0x1F */
+	0xBD, 0x89, 0xBD, 0x8A, 0xBD, 0x8B, 0xBD, 0x8C, /* 0x20-0x23 */
+	0xBD, 0x8D, 0xBD, 0x8E, 0xBD, 0x8F, 0xBD, 0x90, /* 0x24-0x27 */
+	0xBD, 0x91, 0xBD, 0x92, 0xBD, 0x93, 0xBD, 0x94, /* 0x28-0x2B */
+	0xBD, 0x95, 0xBD, 0x96, 0xBD, 0x97, 0xBD, 0x98, /* 0x2C-0x2F */
+	0xBD, 0x99, 0xBD, 0x9A, 0xBD, 0x9B, 0xBD, 0x9C, /* 0x30-0x33 */
+	0xBD, 0x9D, 0xBD, 0x9E, 0xBD, 0x9F, 0xBD, 0xA0, /* 0x34-0x37 */
+	0xBE, 0x41, 0xBE, 0x42, 0xBE, 0x43, 0xBE, 0x44, /* 0x38-0x3B */
+	0xBE, 0x45, 0xBE, 0x46, 0xBE, 0x47, 0xBE, 0x48, /* 0x3C-0x3F */
+	0xC7, 0xA3, 0xBE, 0x49, 0xBE, 0x4A, 0xBE, 0x4B, /* 0x40-0x43 */
+	0xC7, 0xA4, 0xBE, 0x4C, 0xBE, 0x4D, 0xBE, 0x4E, /* 0x44-0x47 */
+	0xBE, 0x4F, 0xBE, 0x50, 0xBE, 0x51, 0xBE, 0x52, /* 0x48-0x4B */
+	0xBE, 0x53, 0xBE, 0x54, 0xBE, 0x55, 0xBE, 0x56, /* 0x4C-0x4F */
+	0xBE, 0x57, 0xBE, 0x58, 0xBE, 0x59, 0xBE, 0x5A, /* 0x50-0x53 */
+	0xBE, 0x61, 0xBE, 0x62, 0xBE, 0x63, 0xBE, 0x64, /* 0x54-0x57 */
+	0xBE, 0x65, 0xBE, 0x66, 0xBE, 0x67, 0xBE, 0x68, /* 0x58-0x5B */
+	0xC7, 0xA5, 0xBE, 0x69, 0xBE, 0x6A, 0xBE, 0x6B, /* 0x5C-0x5F */
+	0xC7, 0xA6, 0xBE, 0x6C, 0xBE, 0x6D, 0xBE, 0x6E, /* 0x60-0x63 */
+	0xC7, 0xA7, 0xBE, 0x6F, 0xBE, 0x70, 0xBE, 0x71, /* 0x64-0x67 */
+	0xBE, 0x72, 0xBE, 0x73, 0xBE, 0x74, 0xBE, 0x75, /* 0x68-0x6B */
+	0xBE, 0x76, 0xC7, 0xA8, 0xBE, 0x77, 0xC7, 0xA9, /* 0x6C-0x6F */
+	0xBE, 0x78, 0xBE, 0x79, 0xBE, 0x7A, 0xBE, 0x81, /* 0x70-0x73 */
+	0xBE, 0x82, 0xBE, 0x83, 0xBE, 0x84, 0xBE, 0x85, /* 0x74-0x77 */
+	0xC7, 0xAA, 0xC7, 0xAB, 0xBE, 0x86, 0xBE, 0x87, /* 0x78-0x7B */
+	0xC7, 0xAC, 0xBE, 0x88, 0xBE, 0x89, 0xC7, 0xAD, /* 0x7C-0x7F */
+	
+	0xC7, 0xAE, 0xBE, 0x8A, 0xC7, 0xAF, 0xBE, 0x8B, /* 0x80-0x83 */
+	0xBE, 0x8C, 0xBE, 0x8D, 0xBE, 0x8E, 0xBE, 0x8F, /* 0x84-0x87 */
+	0xC7, 0xB0, 0xC7, 0xB1, 0xBE, 0x90, 0xC7, 0xB2, /* 0x88-0x8B */
+	0xBE, 0x91, 0xC7, 0xB3, 0xBE, 0x92, 0xBE, 0x93, /* 0x8C-0x8F */
+	0xBE, 0x94, 0xBE, 0x95, 0xBE, 0x96, 0xBE, 0x97, /* 0x90-0x93 */
+	0xC7, 0xB4, 0xBE, 0x98, 0xBE, 0x99, 0xBE, 0x9A, /* 0x94-0x97 */
+	0xBE, 0x9B, 0xBE, 0x9C, 0xBE, 0x9D, 0xBE, 0x9E, /* 0x98-0x9B */
+	0xBE, 0x9F, 0xBE, 0xA0, 0xBF, 0x41, 0xBF, 0x42, /* 0x9C-0x9F */
+	0xBF, 0x43, 0xBF, 0x44, 0xBF, 0x45, 0xBF, 0x46, /* 0xA0-0xA3 */
+	0xBF, 0x47, 0xBF, 0x48, 0xBF, 0x49, 0xBF, 0x4A, /* 0xA4-0xA7 */
+	0xBF, 0x4B, 0xC7, 0xB5, 0xBF, 0x4C, 0xBF, 0x4D, /* 0xA8-0xAB */
+	0xBF, 0x4E, 0xBF, 0x4F, 0xBF, 0x50, 0xBF, 0x51, /* 0xAC-0xAF */
+	0xBF, 0x52, 0xBF, 0x53, 0xBF, 0x54, 0xBF, 0x55, /* 0xB0-0xB3 */
+	0xBF, 0x56, 0xBF, 0x57, 0xBF, 0x58, 0xBF, 0x59, /* 0xB4-0xB7 */
+	0xBF, 0x5A, 0xBF, 0x61, 0xBF, 0x62, 0xBF, 0x63, /* 0xB8-0xBB */
+	0xBF, 0x64, 0xBF, 0x65, 0xBF, 0x66, 0xBF, 0x67, /* 0xBC-0xBF */
+	0xBF, 0x68, 0xBF, 0x69, 0xBF, 0x6A, 0xBF, 0x6B, /* 0xC0-0xC3 */
+	0xBF, 0x6C, 0xBF, 0x6D, 0xBF, 0x6E, 0xBF, 0x6F, /* 0xC4-0xC7 */
+	0xBF, 0x70, 0xBF, 0x71, 0xBF, 0x72, 0xBF, 0x73, /* 0xC8-0xCB */
+	0xC7, 0xB6, 0xBF, 0x74, 0xBF, 0x75, 0xBF, 0x76, /* 0xCC-0xCF */
+	0xC7, 0xB7, 0xBF, 0x77, 0xBF, 0x78, 0xBF, 0x79, /* 0xD0-0xD3 */
+	0xC7, 0xB8, 0xBF, 0x7A, 0xBF, 0x81, 0xBF, 0x82, /* 0xD4-0xD7 */
+	0xBF, 0x83, 0xBF, 0x84, 0xBF, 0x85, 0xBF, 0x86, /* 0xD8-0xDB */
+	0xC7, 0xB9, 0xBF, 0x87, 0xBF, 0x88, 0xC7, 0xBA, /* 0xDC-0xDF */
+	0xBF, 0x89, 0xBF, 0x8A, 0xBF, 0x8B, 0xBF, 0x8C, /* 0xE0-0xE3 */
+	0xBF, 0x8D, 0xBF, 0x8E, 0xBF, 0x8F, 0xBF, 0x90, /* 0xE4-0xE7 */
+	0xC7, 0xBB, 0xBF, 0x91, 0xBF, 0x92, 0xBF, 0x93, /* 0xE8-0xEB */
+	0xC7, 0xBC, 0xBF, 0x94, 0xBF, 0x95, 0xBF, 0x96, /* 0xEC-0xEF */
+	0xC7, 0xBD, 0xBF, 0x97, 0xBF, 0x98, 0xBF, 0x99, /* 0xF0-0xF3 */
+	0xBF, 0x9A, 0xBF, 0x9B, 0xBF, 0x9C, 0xBF, 0x9D, /* 0xF4-0xF7 */
+	0xC7, 0xBE, 0xBF, 0x9E, 0xBF, 0x9F, 0xC7, 0xBF, /* 0xF8-0xFB */
+	0xBF, 0xA0, 0xC7, 0xC0, 0xC0, 0x41, 0xC0, 0x42, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D5[512] = {
+	0xC0, 0x43, 0xC0, 0x44, 0xC0, 0x45, 0xC0, 0x46, /* 0x00-0x03 */
+	0xC7, 0xC1, 0xC0, 0x47, 0xC0, 0x48, 0xC0, 0x49, /* 0x04-0x07 */
+	0xC7, 0xC2, 0xC0, 0x4A, 0xC0, 0x4B, 0xC0, 0x4C, /* 0x08-0x0B */
+	0xC7, 0xC3, 0xC0, 0x4D, 0xC0, 0x4E, 0xC0, 0x4F, /* 0x0C-0x0F */
+	0xC0, 0x50, 0xC0, 0x51, 0xC0, 0x52, 0xC0, 0x53, /* 0x10-0x13 */
+	0xC7, 0xC4, 0xC7, 0xC5, 0xC0, 0x54, 0xC7, 0xC6, /* 0x14-0x17 */
+	0xC0, 0x55, 0xC0, 0x56, 0xC0, 0x57, 0xC0, 0x58, /* 0x18-0x1B */
+	0xC0, 0x59, 0xC0, 0x5A, 0xC0, 0x61, 0xC0, 0x62, /* 0x1C-0x1F */
+	0xC0, 0x63, 0xC0, 0x64, 0xC0, 0x65, 0xC0, 0x66, /* 0x20-0x23 */
+	0xC0, 0x67, 0xC0, 0x68, 0xC0, 0x69, 0xC0, 0x6A, /* 0x24-0x27 */
+	0xC0, 0x6B, 0xC0, 0x6C, 0xC0, 0x6D, 0xC0, 0x6E, /* 0x28-0x2B */
+	0xC0, 0x6F, 0xC0, 0x70, 0xC0, 0x71, 0xC0, 0x72, /* 0x2C-0x2F */
+	0xC0, 0x73, 0xC0, 0x74, 0xC0, 0x75, 0xC0, 0x76, /* 0x30-0x33 */
+	0xC0, 0x77, 0xC0, 0x78, 0xC0, 0x79, 0xC0, 0x7A, /* 0x34-0x37 */
+	0xC0, 0x81, 0xC0, 0x82, 0xC0, 0x83, 0xC0, 0x84, /* 0x38-0x3B */
+	0xC7, 0xC7, 0xC7, 0xC8, 0xC0, 0x85, 0xC0, 0x86, /* 0x3C-0x3F */
+	0xC7, 0xC9, 0xC0, 0x87, 0xC0, 0x88, 0xC0, 0x89, /* 0x40-0x43 */
+	0xC7, 0xCA, 0xC0, 0x8A, 0xC0, 0x8B, 0xC0, 0x8C, /* 0x44-0x47 */
+	0xC0, 0x8D, 0xC0, 0x8E, 0xC0, 0x8F, 0xC0, 0x90, /* 0x48-0x4B */
+	0xC7, 0xCB, 0xC7, 0xCC, 0xC0, 0x91, 0xC7, 0xCD, /* 0x4C-0x4F */
+	0xC0, 0x92, 0xC7, 0xCE, 0xC0, 0x93, 0xC0, 0x94, /* 0x50-0x53 */
+	0xC0, 0x95, 0xC0, 0x96, 0xC0, 0x97, 0xC0, 0x98, /* 0x54-0x57 */
+	0xC7, 0xCF, 0xC7, 0xD0, 0xC0, 0x99, 0xC0, 0x9A, /* 0x58-0x5B */
+	0xC7, 0xD1, 0xC0, 0x9B, 0xC0, 0x9C, 0xC0, 0x9D, /* 0x5C-0x5F */
+	0xC7, 0xD2, 0xC0, 0x9E, 0xC0, 0x9F, 0xC0, 0xA0, /* 0x60-0x63 */
+	0xC1, 0x41, 0xC7, 0xD3, 0xC1, 0x42, 0xC1, 0x43, /* 0x64-0x67 */
+	0xC7, 0xD4, 0xC7, 0xD5, 0xC1, 0x44, 0xC7, 0xD6, /* 0x68-0x6B */
+	0xC1, 0x45, 0xC7, 0xD7, 0xC1, 0x46, 0xC1, 0x47, /* 0x6C-0x6F */
+	0xC1, 0x48, 0xC1, 0x49, 0xC1, 0x4A, 0xC1, 0x4B, /* 0x70-0x73 */
+	0xC7, 0xD8, 0xC7, 0xD9, 0xC1, 0x4C, 0xC1, 0x4D, /* 0x74-0x77 */
+	0xC7, 0xDA, 0xC1, 0x4E, 0xC1, 0x4F, 0xC1, 0x50, /* 0x78-0x7B */
+	0xC7, 0xDB, 0xC1, 0x51, 0xC1, 0x52, 0xC1, 0x53, /* 0x7C-0x7F */
+	
+	0xC1, 0x54, 0xC1, 0x55, 0xC1, 0x56, 0xC1, 0x57, /* 0x80-0x83 */
+	0xC7, 0xDC, 0xC7, 0xDD, 0xC1, 0x58, 0xC7, 0xDE, /* 0x84-0x87 */
+	0xC7, 0xDF, 0xC7, 0xE0, 0xC1, 0x59, 0xC1, 0x5A, /* 0x88-0x8B */
+	0xC1, 0x61, 0xC1, 0x62, 0xC1, 0x63, 0xC1, 0x64, /* 0x8C-0x8F */
+	0xC7, 0xE1, 0xC1, 0x65, 0xC1, 0x66, 0xC1, 0x67, /* 0x90-0x93 */
+	0xC1, 0x68, 0xC1, 0x69, 0xC1, 0x6A, 0xC1, 0x6B, /* 0x94-0x97 */
+	0xC1, 0x6C, 0xC1, 0x6D, 0xC1, 0x6E, 0xC1, 0x6F, /* 0x98-0x9B */
+	0xC1, 0x70, 0xC1, 0x71, 0xC1, 0x72, 0xC1, 0x73, /* 0x9C-0x9F */
+	0xC1, 0x74, 0xC1, 0x75, 0xC1, 0x76, 0xC1, 0x77, /* 0xA0-0xA3 */
+	0xC1, 0x78, 0xC7, 0xE2, 0xC1, 0x79, 0xC1, 0x7A, /* 0xA4-0xA7 */
+	0xC1, 0x81, 0xC1, 0x82, 0xC1, 0x83, 0xC1, 0x84, /* 0xA8-0xAB */
+	0xC1, 0x85, 0xC1, 0x86, 0xC1, 0x87, 0xC1, 0x88, /* 0xAC-0xAF */
+	0xC1, 0x89, 0xC1, 0x8A, 0xC1, 0x8B, 0xC1, 0x8C, /* 0xB0-0xB3 */
+	0xC1, 0x8D, 0xC1, 0x8E, 0xC1, 0x8F, 0xC1, 0x90, /* 0xB4-0xB7 */
+	0xC1, 0x91, 0xC1, 0x92, 0xC1, 0x93, 0xC1, 0x94, /* 0xB8-0xBB */
+	0xC1, 0x95, 0xC1, 0x96, 0xC1, 0x97, 0xC1, 0x98, /* 0xBC-0xBF */
+	0xC1, 0x99, 0xC1, 0x9A, 0xC1, 0x9B, 0xC1, 0x9C, /* 0xC0-0xC3 */
+	0xC1, 0x9D, 0xC1, 0x9E, 0xC1, 0x9F, 0xC1, 0xA0, /* 0xC4-0xC7 */
+	0xC7, 0xE3, 0xC7, 0xE4, 0xC2, 0x41, 0xC2, 0x42, /* 0xC8-0xCB */
+	0xC7, 0xE5, 0xC2, 0x43, 0xC2, 0x44, 0xC2, 0x45, /* 0xCC-0xCF */
+	0xC7, 0xE6, 0xC2, 0x46, 0xC7, 0xE7, 0xC2, 0x47, /* 0xD0-0xD3 */
+	0xC2, 0x48, 0xC2, 0x49, 0xC2, 0x4A, 0xC2, 0x4B, /* 0xD4-0xD7 */
+	0xC7, 0xE8, 0xC7, 0xE9, 0xC2, 0x4C, 0xC7, 0xEA, /* 0xD8-0xDB */
+	0xC2, 0x4D, 0xC7, 0xEB, 0xC2, 0x4E, 0xC2, 0x4F, /* 0xDC-0xDF */
+	0xC2, 0x50, 0xC2, 0x51, 0xC2, 0x52, 0xC2, 0x53, /* 0xE0-0xE3 */
+	0xC7, 0xEC, 0xC7, 0xED, 0xC2, 0x54, 0xC2, 0x55, /* 0xE4-0xE7 */
+	0xC7, 0xEE, 0xC2, 0x56, 0xC2, 0x57, 0xC2, 0x58, /* 0xE8-0xEB */
+	0xC7, 0xEF, 0xC2, 0x59, 0xC2, 0x5A, 0xC2, 0x61, /* 0xEC-0xEF */
+	0xC2, 0x62, 0xC2, 0x63, 0xC2, 0x64, 0xC2, 0x65, /* 0xF0-0xF3 */
+	0xC7, 0xF0, 0xC7, 0xF1, 0xC2, 0x66, 0xC7, 0xF2, /* 0xF4-0xF7 */
+	0xC2, 0x67, 0xC7, 0xF3, 0xC2, 0x68, 0xC2, 0x69, /* 0xF8-0xFB */
+	0xC2, 0x6A, 0xC2, 0x6B, 0xC2, 0x6C, 0xC2, 0x6D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D6[512] = {
+	0xC7, 0xF4, 0xC7, 0xF5, 0xC2, 0x6E, 0xC2, 0x6F, /* 0x00-0x03 */
+	0xC7, 0xF6, 0xC2, 0x70, 0xC2, 0x71, 0xC2, 0x72, /* 0x04-0x07 */
+	0xC7, 0xF7, 0xC2, 0x73, 0xC2, 0x74, 0xC2, 0x75, /* 0x08-0x0B */
+	0xC2, 0x76, 0xC2, 0x77, 0xC2, 0x78, 0xC2, 0x79, /* 0x0C-0x0F */
+	0xC7, 0xF8, 0xC7, 0xF9, 0xC2, 0x7A, 0xC7, 0xFA, /* 0x10-0x13 */
+	0xC7, 0xFB, 0xC7, 0xFC, 0xC2, 0x81, 0xC2, 0x82, /* 0x14-0x17 */
+	0xC2, 0x83, 0xC2, 0x84, 0xC2, 0x85, 0xC2, 0x86, /* 0x18-0x1B */
+	0xC7, 0xFD, 0xC2, 0x87, 0xC2, 0x88, 0xC2, 0x89, /* 0x1C-0x1F */
+	0xC7, 0xFE, 0xC2, 0x8A, 0xC2, 0x8B, 0xC2, 0x8C, /* 0x20-0x23 */
+	0xC8, 0xA1, 0xC2, 0x8D, 0xC2, 0x8E, 0xC2, 0x8F, /* 0x24-0x27 */
+	0xC2, 0x90, 0xC2, 0x91, 0xC2, 0x92, 0xC2, 0x93, /* 0x28-0x2B */
+	0xC2, 0x94, 0xC8, 0xA2, 0xC2, 0x95, 0xC2, 0x96, /* 0x2C-0x2F */
+	0xC2, 0x97, 0xC2, 0x98, 0xC2, 0x99, 0xC2, 0x9A, /* 0x30-0x33 */
+	0xC2, 0x9B, 0xC2, 0x9C, 0xC2, 0x9D, 0xC2, 0x9E, /* 0x34-0x37 */
+	0xC8, 0xA3, 0xC8, 0xA4, 0xC2, 0x9F, 0xC2, 0xA0, /* 0x38-0x3B */
+	0xC8, 0xA5, 0xC3, 0x41, 0xC3, 0x42, 0xC3, 0x43, /* 0x3C-0x3F */
+	0xC8, 0xA6, 0xC3, 0x44, 0xC3, 0x45, 0xC3, 0x46, /* 0x40-0x43 */
+	0xC3, 0x47, 0xC8, 0xA7, 0xC3, 0x48, 0xC3, 0x49, /* 0x44-0x47 */
+	0xC8, 0xA8, 0xC8, 0xA9, 0xC3, 0x4A, 0xC8, 0xAA, /* 0x48-0x4B */
+	0xC3, 0x4B, 0xC8, 0xAB, 0xC3, 0x4C, 0xC3, 0x4D, /* 0x4C-0x4F */
+	0xC3, 0x4E, 0xC8, 0xAC, 0xC3, 0x4F, 0xC3, 0x50, /* 0x50-0x53 */
+	0xC8, 0xAD, 0xC8, 0xAE, 0xC3, 0x51, 0xC3, 0x52, /* 0x54-0x57 */
+	0xC8, 0xAF, 0xC3, 0x53, 0xC3, 0x54, 0xC3, 0x55, /* 0x58-0x5B */
+	0xC8, 0xB0, 0xC3, 0x56, 0xC3, 0x57, 0xC3, 0x58, /* 0x5C-0x5F */
+	0xC3, 0x59, 0xC3, 0x5A, 0xC3, 0x61, 0xC3, 0x62, /* 0x60-0x63 */
+	0xC3, 0x63, 0xC3, 0x64, 0xC3, 0x65, 0xC8, 0xB1, /* 0x64-0x67 */
+	0xC3, 0x66, 0xC8, 0xB2, 0xC3, 0x67, 0xC3, 0x68, /* 0x68-0x6B */
+	0xC3, 0x69, 0xC3, 0x6A, 0xC3, 0x6B, 0xC3, 0x6C, /* 0x6C-0x6F */
+	0xC8, 0xB3, 0xC8, 0xB4, 0xC3, 0x6D, 0xC3, 0x6E, /* 0x70-0x73 */
+	0xC8, 0xB5, 0xC3, 0x6F, 0xC3, 0x70, 0xC3, 0x71, /* 0x74-0x77 */
+	0xC3, 0x72, 0xC3, 0x73, 0xC3, 0x74, 0xC3, 0x75, /* 0x78-0x7B */
+	0xC3, 0x76, 0xC3, 0x77, 0xC3, 0x78, 0xC3, 0x79, /* 0x7C-0x7F */
+	
+	0xC3, 0x7A, 0xC3, 0x81, 0xC3, 0x82, 0xC8, 0xB6, /* 0x80-0x83 */
+	0xC3, 0x83, 0xC8, 0xB7, 0xC3, 0x84, 0xC3, 0x85, /* 0x84-0x87 */
+	0xC3, 0x86, 0xC3, 0x87, 0xC3, 0x88, 0xC3, 0x89, /* 0x88-0x8B */
+	0xC8, 0xB8, 0xC8, 0xB9, 0xC3, 0x8A, 0xC3, 0x8B, /* 0x8C-0x8F */
+	0xC8, 0xBA, 0xC3, 0x8C, 0xC3, 0x8D, 0xC3, 0x8E, /* 0x90-0x93 */
+	0xC8, 0xBB, 0xC3, 0x8F, 0xC3, 0x90, 0xC3, 0x91, /* 0x94-0x97 */
+	0xC3, 0x92, 0xC3, 0x93, 0xC3, 0x94, 0xC3, 0x95, /* 0x98-0x9B */
+	0xC3, 0x96, 0xC8, 0xBC, 0xC3, 0x97, 0xC8, 0xBD, /* 0x9C-0x9F */
+	0xC3, 0x98, 0xC8, 0xBE, 0xC3, 0x99, 0xC3, 0x9A, /* 0xA0-0xA3 */
+	0xC3, 0x9B, 0xC3, 0x9C, 0xC3, 0x9D, 0xC3, 0x9E, /* 0xA4-0xA7 */
+	0xC8, 0xBF, 0xC3, 0x9F, 0xC3, 0xA0, 0xC4, 0x41, /* 0xA8-0xAB */
+	0xC8, 0xC0, 0xC4, 0x42, 0xC4, 0x43, 0xC4, 0x44, /* 0xAC-0xAF */
+	0xC8, 0xC1, 0xC4, 0x45, 0xC4, 0x46, 0xC4, 0x47, /* 0xB0-0xB3 */
+	0xC4, 0x48, 0xC4, 0x49, 0xC4, 0x4A, 0xC4, 0x4B, /* 0xB4-0xB7 */
+	0xC4, 0x4C, 0xC8, 0xC2, 0xC4, 0x4D, 0xC8, 0xC3, /* 0xB8-0xBB */
+	0xC4, 0x4E, 0xC4, 0x4F, 0xC4, 0x50, 0xC4, 0x51, /* 0xBC-0xBF */
+	0xC4, 0x52, 0xC4, 0x53, 0xC4, 0x54, 0xC4, 0x55, /* 0xC0-0xC3 */
+	0xC8, 0xC4, 0xC8, 0xC5, 0xC4, 0x56, 0xC4, 0x57, /* 0xC4-0xC7 */
+	0xC8, 0xC6, 0xC4, 0x58, 0xC4, 0x59, 0xC4, 0x5A, /* 0xC8-0xCB */
+	0xC8, 0xC7, 0xC4, 0x61, 0xC4, 0x62, 0xC4, 0x63, /* 0xCC-0xCF */
+	0xC4, 0x64, 0xC8, 0xC8, 0xC4, 0x65, 0xC4, 0x66, /* 0xD0-0xD3 */
+	0xC8, 0xC9, 0xC4, 0x67, 0xC4, 0x68, 0xC8, 0xCA, /* 0xD4-0xD7 */
+	0xC4, 0x69, 0xC8, 0xCB, 0xC4, 0x6A, 0xC4, 0x6B, /* 0xD8-0xDB */
+	0xC4, 0x6C, 0xC4, 0x6D, 0xC4, 0x6E, 0xC4, 0x6F, /* 0xDC-0xDF */
+	0xC8, 0xCC, 0xC4, 0x70, 0xC4, 0x71, 0xC4, 0x72, /* 0xE0-0xE3 */
+	0xC8, 0xCD, 0xC4, 0x73, 0xC4, 0x74, 0xC4, 0x75, /* 0xE4-0xE7 */
+	0xC8, 0xCE, 0xC4, 0x76, 0xC4, 0x77, 0xC4, 0x78, /* 0xE8-0xEB */
+	0xC4, 0x79, 0xC4, 0x7A, 0xC4, 0x81, 0xC4, 0x82, /* 0xEC-0xEF */
+	0xC8, 0xCF, 0xC4, 0x83, 0xC4, 0x84, 0xC4, 0x85, /* 0xF0-0xF3 */
+	0xC4, 0x86, 0xC8, 0xD0, 0xC4, 0x87, 0xC4, 0x88, /* 0xF4-0xF7 */
+	0xC4, 0x89, 0xC4, 0x8A, 0xC4, 0x8B, 0xC4, 0x8C, /* 0xF8-0xFB */
+	0xC8, 0xD1, 0xC8, 0xD2, 0xC4, 0x8D, 0xC4, 0x8E, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_D7[512] = {
+	0xC8, 0xD3, 0xC4, 0x8F, 0xC4, 0x90, 0xC4, 0x91, /* 0x00-0x03 */
+	0xC8, 0xD4, 0xC4, 0x92, 0xC4, 0x93, 0xC4, 0x94, /* 0x04-0x07 */
+	0xC4, 0x95, 0xC4, 0x96, 0xC4, 0x97, 0xC4, 0x98, /* 0x08-0x0B */
+	0xC4, 0x99, 0xC4, 0x9A, 0xC4, 0x9B, 0xC4, 0x9C, /* 0x0C-0x0F */
+	0xC4, 0x9D, 0xC8, 0xD5, 0xC4, 0x9E, 0xC4, 0x9F, /* 0x10-0x13 */
+	0xC4, 0xA0, 0xC5, 0x41, 0xC5, 0x42, 0xC5, 0x43, /* 0x14-0x17 */
+	0xC8, 0xD6, 0xC8, 0xD7, 0xC5, 0x44, 0xC5, 0x45, /* 0x18-0x1B */
+	0xC8, 0xD8, 0xC5, 0x46, 0xC5, 0x47, 0xC5, 0x48, /* 0x1C-0x1F */
+	0xC8, 0xD9, 0xC5, 0x49, 0xC5, 0x4A, 0xC5, 0x4B, /* 0x20-0x23 */
+	0xC5, 0x4C, 0xC5, 0x4D, 0xC5, 0x4E, 0xC5, 0x4F, /* 0x24-0x27 */
+	0xC8, 0xDA, 0xC8, 0xDB, 0xC5, 0x50, 0xC8, 0xDC, /* 0x28-0x2B */
+	0xC5, 0x51, 0xC8, 0xDD, 0xC5, 0x52, 0xC5, 0x53, /* 0x2C-0x2F */
+	0xC5, 0x54, 0xC5, 0x55, 0xC5, 0x56, 0xC5, 0x57, /* 0x30-0x33 */
+	0xC8, 0xDE, 0xC8, 0xDF, 0xC5, 0x58, 0xC5, 0x59, /* 0x34-0x37 */
+	0xC8, 0xE0, 0xC5, 0x5A, 0xC5, 0x61, 0xC5, 0x62, /* 0x38-0x3B */
+	0xC8, 0xE1, 0xC5, 0x63, 0xC5, 0x64, 0xC5, 0x65, /* 0x3C-0x3F */
+	0xC5, 0x66, 0xC5, 0x67, 0xC5, 0x68, 0xC5, 0x69, /* 0x40-0x43 */
+	0xC8, 0xE2, 0xC5, 0x6A, 0xC5, 0x6B, 0xC8, 0xE3, /* 0x44-0x47 */
+	0xC5, 0x6C, 0xC8, 0xE4, 0xC5, 0x6D, 0xC5, 0x6E, /* 0x48-0x4B */
+	0xC5, 0x6F, 0xC5, 0x70, 0xC5, 0x71, 0xC5, 0x72, /* 0x4C-0x4F */
+	0xC8, 0xE5, 0xC8, 0xE6, 0xC5, 0x73, 0xC5, 0x74, /* 0x50-0x53 */
+	0xC8, 0xE7, 0xC5, 0x75, 0xC8, 0xE8, 0xC8, 0xE9, /* 0x54-0x57 */
+	0xC8, 0xEA, 0xC8, 0xEB, 0xC5, 0x76, 0xC5, 0x77, /* 0x58-0x5B */
+	0xC5, 0x78, 0xC5, 0x79, 0xC5, 0x7A, 0xC5, 0x81, /* 0x5C-0x5F */
+	0xC8, 0xEC, 0xC8, 0xED, 0xC5, 0x82, 0xC8, 0xEE, /* 0x60-0x63 */
+	0xC5, 0x83, 0xC8, 0xEF, 0xC5, 0x84, 0xC5, 0x85, /* 0x64-0x67 */
+	0xC5, 0x86, 0xC8, 0xF0, 0xC5, 0x87, 0xC5, 0x88, /* 0x68-0x6B */
+	0xC8, 0xF1, 0xC5, 0x89, 0xC5, 0x8A, 0xC5, 0x8B, /* 0x6C-0x6F */
+	0xC8, 0xF2, 0xC5, 0x8C, 0xC5, 0x8D, 0xC5, 0x8E, /* 0x70-0x73 */
+	0xC8, 0xF3, 0xC5, 0x8F, 0xC5, 0x90, 0xC5, 0x91, /* 0x74-0x77 */
+	0xC5, 0x92, 0xC5, 0x93, 0xC5, 0x94, 0xC5, 0x95, /* 0x78-0x7B */
+	0xC8, 0xF4, 0xC8, 0xF5, 0xC5, 0x96, 0xC5, 0x97, /* 0x7C-0x7F */
+	
+	0xC5, 0x98, 0xC8, 0xF6, 0xC5, 0x99, 0xC5, 0x9A, /* 0x80-0x83 */
+	0xC5, 0x9B, 0xC5, 0x9C, 0xC5, 0x9D, 0xC5, 0x9E, /* 0x84-0x87 */
+	0xC8, 0xF7, 0xC8, 0xF8, 0xC5, 0x9F, 0xC5, 0xA0, /* 0x88-0x8B */
+	0xC8, 0xF9, 0xC6, 0x41, 0xC6, 0x42, 0xC6, 0x43, /* 0x8C-0x8F */
+	0xC8, 0xFA, 0xC6, 0x44, 0xC6, 0x45, 0xC6, 0x46, /* 0x90-0x93 */
+	0xC6, 0x47, 0xC6, 0x48, 0xC6, 0x49, 0xC6, 0x4A, /* 0x94-0x97 */
+	0xC8, 0xFB, 0xC8, 0xFC, 0xC6, 0x4B, 0xC8, 0xFD, /* 0x98-0x9B */
+	0xC6, 0x4C, 0xC8, 0xFE, 0xC6, 0x4D, 0xC6, 0x4E, /* 0x9C-0x9F */
+	0xC6, 0x4F, 0xC6, 0x50, 0xC6, 0x51, 0xC6, 0x52, /* 0xA0-0xA3 */
+};
+
+static unsigned char u2c_DC[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+};
+
+static unsigned char u2c_F9[512] = {
+	0xCB, 0xD0, 0xCB, 0xD6, 0xCB, 0xE7, 0xCD, 0xCF, /* 0x00-0x03 */
+	0xCD, 0xE8, 0xCE, 0xAD, 0xCF, 0xFB, 0xD0, 0xA2, /* 0x04-0x07 */
+	0xD0, 0xB8, 0xD0, 0xD0, 0xD0, 0xDD, 0xD1, 0xD4, /* 0x08-0x0B */
+	0xD1, 0xD5, 0xD1, 0xD8, 0xD1, 0xDB, 0xD1, 0xDC, /* 0x0C-0x0F */
+	0xD1, 0xDD, 0xD1, 0xDE, 0xD1, 0xDF, 0xD1, 0xE0, /* 0x10-0x13 */
+	0xD1, 0xE2, 0xD1, 0xE3, 0xD1, 0xE4, 0xD1, 0xE5, /* 0x14-0x17 */
+	0xD1, 0xE6, 0xD1, 0xE8, 0xD1, 0xE9, 0xD1, 0xEA, /* 0x18-0x1B */
+	0xD1, 0xEB, 0xD1, 0xED, 0xD1, 0xEF, 0xD1, 0xF0, /* 0x1C-0x1F */
+	0xD1, 0xF2, 0xD1, 0xF6, 0xD1, 0xFA, 0xD1, 0xFC, /* 0x20-0x23 */
+	0xD1, 0xFD, 0xD1, 0xFE, 0xD2, 0xA2, 0xD2, 0xA3, /* 0x24-0x27 */
+	0xD2, 0xA7, 0xD2, 0xA8, 0xD2, 0xA9, 0xD2, 0xAA, /* 0x28-0x2B */
+	0xD2, 0xAB, 0xD2, 0xAD, 0xD2, 0xB2, 0xD2, 0xBE, /* 0x2C-0x2F */
+	0xD2, 0xC2, 0xD2, 0xC3, 0xD2, 0xC4, 0xD2, 0xC6, /* 0x30-0x33 */
+	0xD2, 0xC7, 0xD2, 0xC8, 0xD2, 0xC9, 0xD2, 0xCA, /* 0x34-0x37 */
+	0xD2, 0xCB, 0xD2, 0xCD, 0xD2, 0xCE, 0xD2, 0xCF, /* 0x38-0x3B */
+	0xD2, 0xD0, 0xD2, 0xD1, 0xD2, 0xD2, 0xD2, 0xD3, /* 0x3C-0x3F */
+	0xD2, 0xD4, 0xD2, 0xD5, 0xD2, 0xD6, 0xD2, 0xD7, /* 0x40-0x43 */
+	0xD2, 0xD9, 0xD2, 0xDA, 0xD2, 0xDE, 0xD2, 0xDF, /* 0x44-0x47 */
+	0xD2, 0xE1, 0xD2, 0xE2, 0xD2, 0xE4, 0xD2, 0xE5, /* 0x48-0x4B */
+	0xD2, 0xE6, 0xD2, 0xE7, 0xD2, 0xE8, 0xD2, 0xE9, /* 0x4C-0x4F */
+	0xD2, 0xEA, 0xD2, 0xEB, 0xD2, 0xF0, 0xD2, 0xF1, /* 0x50-0x53 */
+	0xD2, 0xF2, 0xD2, 0xF3, 0xD2, 0xF4, 0xD2, 0xF5, /* 0x54-0x57 */
+	0xD2, 0xF7, 0xD2, 0xF8, 0xD4, 0xE6, 0xD4, 0xFC, /* 0x58-0x5B */
+	0xD5, 0xA5, 0xD5, 0xAB, 0xD5, 0xAE, 0xD6, 0xB8, /* 0x5C-0x5F */
+	0xD6, 0xCD, 0xD7, 0xCB, 0xD7, 0xE4, 0xDB, 0xC5, /* 0x60-0x63 */
+	0xDB, 0xE4, 0xDC, 0xA5, 0xDD, 0xA5, 0xDD, 0xD5, /* 0x64-0x67 */
+	0xDD, 0xF4, 0xDE, 0xFC, 0xDE, 0xFE, 0xDF, 0xB3, /* 0x68-0x6B */
+	0xDF, 0xE1, 0xDF, 0xE8, 0xE0, 0xF1, 0xE1, 0xAD, /* 0x6C-0x6F */
+	0xE1, 0xED, 0xE3, 0xF5, 0xE4, 0xA1, 0xE4, 0xA9, /* 0x70-0x73 */
+	0xE5, 0xAE, 0xE5, 0xB1, 0xE5, 0xB2, 0xE5, 0xB9, /* 0x74-0x77 */
+	0xE5, 0xBB, 0xE5, 0xBC, 0xE5, 0xC4, 0xE5, 0xCE, /* 0x78-0x7B */
+	0xE5, 0xD0, 0xE5, 0xD2, 0xE5, 0xD6, 0xE5, 0xFA, /* 0x7C-0x7F */
+	
+	0xE5, 0xFB, 0xE5, 0xFC, 0xE5, 0xFE, 0xE6, 0xA1, /* 0x80-0x83 */
+	0xE6, 0xA4, 0xE6, 0xA7, 0xE6, 0xAD, 0xE6, 0xAF, /* 0x84-0x87 */
+	0xE6, 0xB0, 0xE6, 0xB1, 0xE6, 0xB3, 0xE6, 0xB7, /* 0x88-0x8B */
+	0xE6, 0xB8, 0xE6, 0xBC, 0xE6, 0xC4, 0xE6, 0xC6, /* 0x8C-0x8F */
+	0xE6, 0xC7, 0xE6, 0xCA, 0xE6, 0xD2, 0xE6, 0xD6, /* 0x90-0x93 */
+	0xE6, 0xD9, 0xE6, 0xDC, 0xE6, 0xDF, 0xE6, 0xE1, /* 0x94-0x97 */
+	0xE6, 0xE4, 0xE6, 0xE5, 0xE6, 0xE6, 0xE6, 0xE8, /* 0x98-0x9B */
+	0xE6, 0xEA, 0xE6, 0xEB, 0xE6, 0xEC, 0xE6, 0xEF, /* 0x9C-0x9F */
+	0xE6, 0xF1, 0xE6, 0xF2, 0xE6, 0xF5, 0xE6, 0xF6, /* 0xA0-0xA3 */
+	0xE6, 0xF7, 0xE6, 0xF9, 0xE7, 0xA1, 0xE7, 0xA6, /* 0xA4-0xA7 */
+	0xE7, 0xA9, 0xE7, 0xAA, 0xE7, 0xAC, 0xE7, 0xAD, /* 0xA8-0xAB */
+	0xE7, 0xB0, 0xE7, 0xBF, 0xE7, 0xC1, 0xE7, 0xC6, /* 0xAC-0xAF */
+	0xE7, 0xC7, 0xE7, 0xCB, 0xE7, 0xCD, 0xE7, 0xCF, /* 0xB0-0xB3 */
+	0xE7, 0xD0, 0xE7, 0xD3, 0xE7, 0xDF, 0xE7, 0xE4, /* 0xB4-0xB7 */
+	0xE7, 0xE6, 0xE7, 0xF7, 0xE8, 0xE7, 0xE8, 0xE8, /* 0xB8-0xBB */
+	0xE8, 0xF0, 0xE8, 0xF1, 0xE8, 0xF7, 0xE8, 0xF9, /* 0xBC-0xBF */
+	0xE8, 0xFB, 0xE8, 0xFE, 0xE9, 0xA7, 0xE9, 0xAC, /* 0xC0-0xC3 */
+	0xE9, 0xCC, 0xE9, 0xF7, 0xEA, 0xC1, 0xEA, 0xE5, /* 0xC4-0xC7 */
+	0xEA, 0xF4, 0xEA, 0xF7, 0xEA, 0xFC, 0xEA, 0xFE, /* 0xC8-0xCB */
+	0xEB, 0xA4, 0xEB, 0xA7, 0xEB, 0xA9, 0xEB, 0xAA, /* 0xCC-0xCF */
+	0xEB, 0xBA, 0xEB, 0xBB, 0xEB, 0xBD, 0xEB, 0xC1, /* 0xD0-0xD3 */
+	0xEB, 0xC2, 0xEB, 0xC6, 0xEB, 0xC7, 0xEB, 0xCC, /* 0xD4-0xD7 */
+	0xEB, 0xCF, 0xEB, 0xD0, 0xEB, 0xD1, 0xEB, 0xD2, /* 0xD8-0xDB */
+	0xEB, 0xD8, 0xEC, 0xA6, 0xEC, 0xA7, 0xEC, 0xAA, /* 0xDC-0xDF */
+	0xEC, 0xAF, 0xEC, 0xB0, 0xEC, 0xB1, 0xEC, 0xB2, /* 0xE0-0xE3 */
+	0xEC, 0xB5, 0xEC, 0xB8, 0xEC, 0xBA, 0xEC, 0xC0, /* 0xE4-0xE7 */
+	0xEC, 0xC1, 0xEC, 0xC5, 0xEC, 0xC6, 0xEC, 0xC9, /* 0xE8-0xEB */
+	0xEC, 0xCA, 0xEC, 0xD5, 0xEC, 0xDD, 0xEC, 0xDE, /* 0xEC-0xEF */
+	0xEC, 0xE1, 0xEC, 0xE4, 0xEC, 0xE7, 0xEC, 0xE8, /* 0xF0-0xF3 */
+	0xEC, 0xF7, 0xEC, 0xF8, 0xEC, 0xFA, 0xED, 0xA1, /* 0xF4-0xF7 */
+	0xED, 0xA2, 0xED, 0xA3, 0xED, 0xEE, 0xEE, 0xDB, /* 0xF8-0xFB */
+	0xF2, 0xBD, 0xF2, 0xFA, 0xF3, 0xB1, 0xF4, 0xA7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_FA[512] = {
+	0xF4, 0xEE, 0xF6, 0xF4, 0xF6, 0xF6, 0xF7, 0xB8, /* 0x00-0x03 */
+	0xF7, 0xC8, 0xF7, 0xD3, 0xF8, 0xDB, 0xF8, 0xF0, /* 0x04-0x07 */
+	0xFA, 0xA1, 0xFA, 0xA2, 0xFA, 0xE6, 0xFC, 0xA9, /* 0x08-0x0B */
+	0xE8, 0xB4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF5, 0xC0, 0x00, 0x00, 0xF4, 0xE7, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xFD, 0xEB, 0x00, 0x00, 0xEC, 0xCC, /* 0x14-0x17 */
+	0x00, 0x00, 0xE3, 0xEA, 0xDF, 0xD4, 0xDC, 0xD8, /* 0x18-0x1B */
+	0xEF, 0xFE, 0xEF, 0xF1, 0xE9, 0xE2, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0xB3, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xEC, 0xEF, 0xD4, 0xB4, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xF9, 0xDE, 0xF8, /* 0x28-0x2B */
+	0xCE, 0xBD, 0xF9, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+};
+
+static unsigned char u2c_FF[512] = {
+	0x00, 0x00, 0xA3, 0xA1, 0xA3, 0xA2, 0xA3, 0xA3, /* 0x00-0x03 */
+	0xA3, 0xA4, 0xA3, 0xA5, 0xA3, 0xA6, 0xA3, 0xA7, /* 0x04-0x07 */
+	0xA3, 0xA8, 0xA3, 0xA9, 0xA3, 0xAA, 0xA3, 0xAB, /* 0x08-0x0B */
+	0xA3, 0xAC, 0xA3, 0xAD, 0xA3, 0xAE, 0xA3, 0xAF, /* 0x0C-0x0F */
+	0xA3, 0xB0, 0xA3, 0xB1, 0xA3, 0xB2, 0xA3, 0xB3, /* 0x10-0x13 */
+	0xA3, 0xB4, 0xA3, 0xB5, 0xA3, 0xB6, 0xA3, 0xB7, /* 0x14-0x17 */
+	0xA3, 0xB8, 0xA3, 0xB9, 0xA3, 0xBA, 0xA3, 0xBB, /* 0x18-0x1B */
+	0xA3, 0xBC, 0xA3, 0xBD, 0xA3, 0xBE, 0xA3, 0xBF, /* 0x1C-0x1F */
+	0xA3, 0xC0, 0xA3, 0xC1, 0xA3, 0xC2, 0xA3, 0xC3, /* 0x20-0x23 */
+	0xA3, 0xC4, 0xA3, 0xC5, 0xA3, 0xC6, 0xA3, 0xC7, /* 0x24-0x27 */
+	0xA3, 0xC8, 0xA3, 0xC9, 0xA3, 0xCA, 0xA3, 0xCB, /* 0x28-0x2B */
+	0xA3, 0xCC, 0xA3, 0xCD, 0xA3, 0xCE, 0xA3, 0xCF, /* 0x2C-0x2F */
+	0xA3, 0xD0, 0xA3, 0xD1, 0xA3, 0xD2, 0xA3, 0xD3, /* 0x30-0x33 */
+	0xA3, 0xD4, 0xA3, 0xD5, 0xA3, 0xD6, 0xA3, 0xD7, /* 0x34-0x37 */
+	0xA3, 0xD8, 0xA3, 0xD9, 0xA3, 0xDA, 0xA3, 0xDB, /* 0x38-0x3B */
+	0xA1, 0xAC, 0xA3, 0xDD, 0xA3, 0xDE, 0xA3, 0xDF, /* 0x3C-0x3F */
+	0xA3, 0xE0, 0xA3, 0xE1, 0xA3, 0xE2, 0xA3, 0xE3, /* 0x40-0x43 */
+	0xA3, 0xE4, 0xA3, 0xE5, 0xA3, 0xE6, 0xA3, 0xE7, /* 0x44-0x47 */
+	0xA3, 0xE8, 0xA3, 0xE9, 0xA3, 0xEA, 0xA3, 0xEB, /* 0x48-0x4B */
+	0xA3, 0xEC, 0xA3, 0xED, 0xA3, 0xEE, 0xA3, 0xEF, /* 0x4C-0x4F */
+	0xA3, 0xF0, 0xA3, 0xF1, 0xA3, 0xF2, 0xA3, 0xF3, /* 0x50-0x53 */
+	0xA3, 0xF4, 0xA3, 0xF5, 0xA3, 0xF6, 0xA3, 0xF7, /* 0x54-0x57 */
+	0xA3, 0xF8, 0xA3, 0xF9, 0xA3, 0xFA, 0xA3, 0xFB, /* 0x58-0x5B */
+	0xA3, 0xFC, 0xA3, 0xFD, 0xA2, 0xA6, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xA4, 0xD4, 0xA4, 0xA1, 0xA4, 0xA2, 0xA4, 0xA3, /* 0xA0-0xA3 */
+	0xA4, 0xA4, 0xA4, 0xA5, 0xA4, 0xA6, 0xA4, 0xA7, /* 0xA4-0xA7 */
+	0xA4, 0xA8, 0xA4, 0xA9, 0xA4, 0xAA, 0xA4, 0xAB, /* 0xA8-0xAB */
+	0xA4, 0xAC, 0xA4, 0xAD, 0xA4, 0xAE, 0xA4, 0xAF, /* 0xAC-0xAF */
+	0xA4, 0xB0, 0xA4, 0xB1, 0xA4, 0xB2, 0xA4, 0xB3, /* 0xB0-0xB3 */
+	0xA4, 0xB4, 0xA4, 0xB5, 0xA4, 0xB6, 0xA4, 0xB7, /* 0xB4-0xB7 */
+	0xA4, 0xB8, 0xA4, 0xB9, 0xA4, 0xBA, 0xA4, 0xBB, /* 0xB8-0xBB */
+	0xA4, 0xBC, 0xA4, 0xBD, 0xA4, 0xBE, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xBF, 0xA4, 0xC0, /* 0xC0-0xC3 */
+	0xA4, 0xC1, 0xA4, 0xC2, 0xA4, 0xC3, 0xA4, 0xC4, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xC5, 0xA4, 0xC6, /* 0xC8-0xCB */
+	0xA4, 0xC7, 0xA4, 0xC8, 0xA4, 0xC9, 0xA4, 0xCA, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xCB, 0xA4, 0xCC, /* 0xD0-0xD3 */
+	0xA4, 0xCD, 0xA4, 0xCE, 0xA4, 0xCF, 0xA4, 0xD0, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xD1, 0xA4, 0xD2, /* 0xD8-0xDB */
+	0xA4, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA1, 0xCB, 0xA1, 0xCC, 0xA1, 0xFE, 0xA3, 0xFE, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xA1, 0xCD, 0xA3, 0xDC, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	NULL,   u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   u2c_11, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_20, u2c_21, u2c_22, u2c_23, u2c_24, u2c_25, u2c_26, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_30, u2c_31, u2c_32, u2c_33, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   u2c_4E, u2c_4F, 
+	u2c_50, u2c_51, u2c_52, u2c_53, u2c_54, u2c_55, u2c_56, u2c_57, 
+	u2c_58, u2c_59, u2c_5A, u2c_5B, u2c_5C, u2c_5D, u2c_5E, u2c_5F, 
+	u2c_60, u2c_61, u2c_62, u2c_63, u2c_64, u2c_65, u2c_66, u2c_67, 
+	u2c_68, u2c_69, u2c_6A, u2c_6B, u2c_6C, u2c_6D, u2c_6E, u2c_6F, 
+	u2c_70, u2c_71, u2c_72, u2c_73, u2c_74, u2c_75, u2c_76, u2c_77, 
+	u2c_78, u2c_79, u2c_7A, u2c_7B, u2c_7C, u2c_7D, u2c_7E, u2c_7F, 
+	u2c_80, u2c_81, u2c_82, u2c_83, u2c_84, u2c_85, u2c_86, u2c_87, 
+	u2c_88, u2c_89, u2c_8A, u2c_8B, u2c_8C, u2c_8D, u2c_8E, u2c_8F, 
+	u2c_90, u2c_91, u2c_92, u2c_93, u2c_94, u2c_95, u2c_96, u2c_97, 
+	u2c_98, u2c_99, u2c_9A, u2c_9B, u2c_9C, u2c_9D, u2c_9E, u2c_9F, 
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   u2c_AC, u2c_AD, u2c_AE, u2c_AF, 
+	u2c_B0, u2c_B1, u2c_B2, u2c_B3, u2c_B4, u2c_B5, u2c_B6, u2c_B7, 
+	u2c_B8, u2c_B9, u2c_BA, u2c_BB, u2c_BC, u2c_BD, u2c_BE, u2c_BF, 
+	u2c_C0, u2c_C1, u2c_C2, u2c_C3, u2c_C4, u2c_C5, u2c_C6, u2c_C7, 
+	u2c_C8, u2c_C9, u2c_CA, u2c_CB, u2c_CC, u2c_CD, u2c_CE, u2c_CF, 
+	u2c_D0, u2c_D1, u2c_D2, u2c_D3, u2c_D4, u2c_D5, u2c_D6, u2c_D7, 
+	NULL,   NULL,   NULL,   NULL,   u2c_DC, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   u2c_F9, u2c_FA, NULL,   NULL,   NULL,   NULL,   u2c_FF, };
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(const wchar_t uni,
+			unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni&0xFF;
+	unsigned char ch = (uni>>8)&0xFF;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset) {
+		if (boundlen <= 1)
+			return -ENAMETOOLONG;
+		out[0] = uni2charset[cl*2];
+		out[1] = uni2charset[cl*2+1];
+		if (out[0] == 0x00 && out[1] == 0x00)
+			return -EINVAL;
+		n = 2;
+	} else if (ch==0 && cl) {
+		out[0] = cl;
+		n = 1;
+	}
+	else
+		return -EINVAL;
+
+	return n;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+			wchar_t *uni)
+{
+	unsigned char ch, cl;
+	wchar_t *charset2uni;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if (boundlen == 1) {
+		*uni = rawstring[0];
+		return 1;
+	}
+
+	ch = rawstring[0];
+	cl = rawstring[1];
+
+	charset2uni = page_charset2uni[ch];
+	if (charset2uni && cl) {
+		*uni = charset2uni[cl];
+		if (*uni == 0x0000)
+			return -EINVAL;
+		n = 2;
+	} else{
+		*uni = ch;
+		n = 1;
+	}
+	return n;
+}
+
+static struct nls_table table = {
+	.charset	= "cp949",
+	.alias		= "euc-kr",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp949(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp949(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp949)
+module_exit(exit_nls_cp949)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(euc-kr);
diff --git a/fs/nls/nls_cp950.c b/fs/nls/nls_cp950.c
new file mode 100644
index 0000000..8167a28
--- /dev/null
+++ b/fs/nls/nls_cp950.c
@@ -0,0 +1,9483 @@
+/*
+ * linux/fs/nls_cp950.c
+ *
+ * Charset cp950 translation tables.
+ * This translation table was generated automatically, the
+ * original table can be download from the Microsoft website.
+ * (http://www.microsoft.com/typography/unicode/unicodecp.htm)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t c2u_A1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x3000,0xFF0C,0x3001,0x3002,0xFF0E,0x2027,0xFF1B,0xFF1A,/* 0x40-0x47 */
+	0xFF1F,0xFF01,0xFE30,0x2026,0x2025,0xFE50,0xFE51,0xFE52,/* 0x48-0x4F */
+	0x00B7,0xFE54,0xFE55,0xFE56,0xFE57,0xFF5C,0x2013,0xFE31,/* 0x50-0x57 */
+	0x2014,0xFE33,0x2574,0xFE34,0xFE4F,0xFF08,0xFF09,0xFE35,/* 0x58-0x5F */
+	0xFE36,0xFF5B,0xFF5D,0xFE37,0xFE38,0x3014,0x3015,0xFE39,/* 0x60-0x67 */
+	0xFE3A,0x3010,0x3011,0xFE3B,0xFE3C,0x300A,0x300B,0xFE3D,/* 0x68-0x6F */
+	0xFE3E,0x3008,0x3009,0xFE3F,0xFE40,0x300C,0x300D,0xFE41,/* 0x70-0x77 */
+	0xFE42,0x300E,0x300F,0xFE43,0xFE44,0xFE59,0xFE5A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0xFE5B,0xFE5C,0xFE5D,0xFE5E,0x2018,0x2019,0x201C,/* 0xA0-0xA7 */
+	0x201D,0x301D,0x301E,0x2035,0x2032,0xFF03,0xFF06,0xFF0A,/* 0xA8-0xAF */
+	0x203B,0x00A7,0x3003,0x25CB,0x25CF,0x25B3,0x25B2,0x25CE,/* 0xB0-0xB7 */
+	0x2606,0x2605,0x25C7,0x25C6,0x25A1,0x25A0,0x25BD,0x25BC,/* 0xB8-0xBF */
+	0x32A3,0x2105,0x00AF,0xFFE3,0xFF3F,0x02CD,0xFE49,0xFE4A,/* 0xC0-0xC7 */
+	0xFE4D,0xFE4E,0xFE4B,0xFE4C,0xFE5F,0xFE60,0xFE61,0xFF0B,/* 0xC8-0xCF */
+	0xFF0D,0x00D7,0x00F7,0x00B1,0x221A,0xFF1C,0xFF1E,0xFF1D,/* 0xD0-0xD7 */
+	0x2266,0x2267,0x2260,0x221E,0x2252,0x2261,0xFE62,0xFE63,/* 0xD8-0xDF */
+	0xFE64,0xFE65,0xFE66,0xFF5E,0x2229,0x222A,0x22A5,0x2220,/* 0xE0-0xE7 */
+	0x221F,0x22BF,0x33D2,0x33D1,0x222B,0x222E,0x2235,0x2234,/* 0xE8-0xEF */
+	0x2640,0x2642,0x2295,0x2299,0x2191,0x2193,0x2190,0x2192,/* 0xF0-0xF7 */
+	0x2196,0x2197,0x2199,0x2198,0x2225,0x2223,0xFF0F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0xFF3C,0x2215,0xFE68,0xFF04,0xFFE5,0x3012,0xFFE0,0xFFE1,/* 0x40-0x47 */
+	0xFF05,0xFF20,0x2103,0x2109,0xFE69,0xFE6A,0xFE6B,0x33D5,/* 0x48-0x4F */
+	0x339C,0x339D,0x339E,0x33CE,0x33A1,0x338E,0x338F,0x33C4,/* 0x50-0x57 */
+	0x00B0,0x5159,0x515B,0x515E,0x515D,0x5161,0x5163,0x55E7,/* 0x58-0x5F */
+	0x74E9,0x7CCE,0x2581,0x2582,0x2583,0x2584,0x2585,0x2586,/* 0x60-0x67 */
+	0x2587,0x2588,0x258F,0x258E,0x258D,0x258C,0x258B,0x258A,/* 0x68-0x6F */
+	0x2589,0x253C,0x2534,0x252C,0x2524,0x251C,0x2594,0x2500,/* 0x70-0x77 */
+	0x2502,0x2595,0x250C,0x2510,0x2514,0x2518,0x256D,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x256E,0x2570,0x256F,0x2550,0x255E,0x256A,0x2561,/* 0xA0-0xA7 */
+	0x25E2,0x25E3,0x25E5,0x25E4,0x2571,0x2572,0x2573,0xFF10,/* 0xA8-0xAF */
+	0xFF11,0xFF12,0xFF13,0xFF14,0xFF15,0xFF16,0xFF17,0xFF18,/* 0xB0-0xB7 */
+	0xFF19,0x2160,0x2161,0x2162,0x2163,0x2164,0x2165,0x2166,/* 0xB8-0xBF */
+	0x2167,0x2168,0x2169,0x3021,0x3022,0x3023,0x3024,0x3025,/* 0xC0-0xC7 */
+	0x3026,0x3027,0x3028,0x3029,0x5341,0x5344,0x5345,0xFF21,/* 0xC8-0xCF */
+	0xFF22,0xFF23,0xFF24,0xFF25,0xFF26,0xFF27,0xFF28,0xFF29,/* 0xD0-0xD7 */
+	0xFF2A,0xFF2B,0xFF2C,0xFF2D,0xFF2E,0xFF2F,0xFF30,0xFF31,/* 0xD8-0xDF */
+	0xFF32,0xFF33,0xFF34,0xFF35,0xFF36,0xFF37,0xFF38,0xFF39,/* 0xE0-0xE7 */
+	0xFF3A,0xFF41,0xFF42,0xFF43,0xFF44,0xFF45,0xFF46,0xFF47,/* 0xE8-0xEF */
+	0xFF48,0xFF49,0xFF4A,0xFF4B,0xFF4C,0xFF4D,0xFF4E,0xFF4F,/* 0xF0-0xF7 */
+	0xFF50,0xFF51,0xFF52,0xFF53,0xFF54,0xFF55,0xFF56,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0xFF57,0xFF58,0xFF59,0xFF5A,0x0391,0x0392,0x0393,0x0394,/* 0x40-0x47 */
+	0x0395,0x0396,0x0397,0x0398,0x0399,0x039A,0x039B,0x039C,/* 0x48-0x4F */
+	0x039D,0x039E,0x039F,0x03A0,0x03A1,0x03A3,0x03A4,0x03A5,/* 0x50-0x57 */
+	0x03A6,0x03A7,0x03A8,0x03A9,0x03B1,0x03B2,0x03B3,0x03B4,/* 0x58-0x5F */
+	0x03B5,0x03B6,0x03B7,0x03B8,0x03B9,0x03BA,0x03BB,0x03BC,/* 0x60-0x67 */
+	0x03BD,0x03BE,0x03BF,0x03C0,0x03C1,0x03C3,0x03C4,0x03C5,/* 0x68-0x6F */
+	0x03C6,0x03C7,0x03C8,0x03C9,0x3105,0x3106,0x3107,0x3108,/* 0x70-0x77 */
+	0x3109,0x310A,0x310B,0x310C,0x310D,0x310E,0x310F,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x3110,0x3111,0x3112,0x3113,0x3114,0x3115,0x3116,/* 0xA0-0xA7 */
+	0x3117,0x3118,0x3119,0x311A,0x311B,0x311C,0x311D,0x311E,/* 0xA8-0xAF */
+	0x311F,0x3120,0x3121,0x3122,0x3123,0x3124,0x3125,0x3126,/* 0xB0-0xB7 */
+	0x3127,0x3128,0x3129,0x02D9,0x02C9,0x02CA,0x02C7,0x02CB,/* 0xB8-0xBF */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC0-0xC7 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xC8-0xCF */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xD0-0xD7 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xD8-0xDF */
+	0x0000,0x20AC,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0xE0-0xE7 */
+};
+
+static wchar_t c2u_A4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4E00,0x4E59,0x4E01,0x4E03,0x4E43,0x4E5D,0x4E86,0x4E8C,/* 0x40-0x47 */
+	0x4EBA,0x513F,0x5165,0x516B,0x51E0,0x5200,0x5201,0x529B,/* 0x48-0x4F */
+	0x5315,0x5341,0x535C,0x53C8,0x4E09,0x4E0B,0x4E08,0x4E0A,/* 0x50-0x57 */
+	0x4E2B,0x4E38,0x51E1,0x4E45,0x4E48,0x4E5F,0x4E5E,0x4E8E,/* 0x58-0x5F */
+	0x4EA1,0x5140,0x5203,0x52FA,0x5343,0x53C9,0x53E3,0x571F,/* 0x60-0x67 */
+	0x58EB,0x5915,0x5927,0x5973,0x5B50,0x5B51,0x5B53,0x5BF8,/* 0x68-0x6F */
+	0x5C0F,0x5C22,0x5C38,0x5C71,0x5DDD,0x5DE5,0x5DF1,0x5DF2,/* 0x70-0x77 */
+	0x5DF3,0x5DFE,0x5E72,0x5EFE,0x5F0B,0x5F13,0x624D,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x4E11,0x4E10,0x4E0D,0x4E2D,0x4E30,0x4E39,0x4E4B,/* 0xA0-0xA7 */
+	0x5C39,0x4E88,0x4E91,0x4E95,0x4E92,0x4E94,0x4EA2,0x4EC1,/* 0xA8-0xAF */
+	0x4EC0,0x4EC3,0x4EC6,0x4EC7,0x4ECD,0x4ECA,0x4ECB,0x4EC4,/* 0xB0-0xB7 */
+	0x5143,0x5141,0x5167,0x516D,0x516E,0x516C,0x5197,0x51F6,/* 0xB8-0xBF */
+	0x5206,0x5207,0x5208,0x52FB,0x52FE,0x52FF,0x5316,0x5339,/* 0xC0-0xC7 */
+	0x5348,0x5347,0x5345,0x535E,0x5384,0x53CB,0x53CA,0x53CD,/* 0xC8-0xCF */
+	0x58EC,0x5929,0x592B,0x592A,0x592D,0x5B54,0x5C11,0x5C24,/* 0xD0-0xD7 */
+	0x5C3A,0x5C6F,0x5DF4,0x5E7B,0x5EFF,0x5F14,0x5F15,0x5FC3,/* 0xD8-0xDF */
+	0x6208,0x6236,0x624B,0x624E,0x652F,0x6587,0x6597,0x65A4,/* 0xE0-0xE7 */
+	0x65B9,0x65E5,0x66F0,0x6708,0x6728,0x6B20,0x6B62,0x6B79,/* 0xE8-0xEF */
+	0x6BCB,0x6BD4,0x6BDB,0x6C0F,0x6C34,0x706B,0x722A,0x7236,/* 0xF0-0xF7 */
+	0x723B,0x7247,0x7259,0x725B,0x72AC,0x738B,0x4E19,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4E16,0x4E15,0x4E14,0x4E18,0x4E3B,0x4E4D,0x4E4F,0x4E4E,/* 0x40-0x47 */
+	0x4EE5,0x4ED8,0x4ED4,0x4ED5,0x4ED6,0x4ED7,0x4EE3,0x4EE4,/* 0x48-0x4F */
+	0x4ED9,0x4EDE,0x5145,0x5144,0x5189,0x518A,0x51AC,0x51F9,/* 0x50-0x57 */
+	0x51FA,0x51F8,0x520A,0x52A0,0x529F,0x5305,0x5306,0x5317,/* 0x58-0x5F */
+	0x531D,0x4EDF,0x534A,0x5349,0x5361,0x5360,0x536F,0x536E,/* 0x60-0x67 */
+	0x53BB,0x53EF,0x53E4,0x53F3,0x53EC,0x53EE,0x53E9,0x53E8,/* 0x68-0x6F */
+	0x53FC,0x53F8,0x53F5,0x53EB,0x53E6,0x53EA,0x53F2,0x53F1,/* 0x70-0x77 */
+	0x53F0,0x53E5,0x53ED,0x53FB,0x56DB,0x56DA,0x5916,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x592E,0x5931,0x5974,0x5976,0x5B55,0x5B83,0x5C3C,/* 0xA0-0xA7 */
+	0x5DE8,0x5DE7,0x5DE6,0x5E02,0x5E03,0x5E73,0x5E7C,0x5F01,/* 0xA8-0xAF */
+	0x5F18,0x5F17,0x5FC5,0x620A,0x6253,0x6254,0x6252,0x6251,/* 0xB0-0xB7 */
+	0x65A5,0x65E6,0x672E,0x672C,0x672A,0x672B,0x672D,0x6B63,/* 0xB8-0xBF */
+	0x6BCD,0x6C11,0x6C10,0x6C38,0x6C41,0x6C40,0x6C3E,0x72AF,/* 0xC0-0xC7 */
+	0x7384,0x7389,0x74DC,0x74E6,0x7518,0x751F,0x7528,0x7529,/* 0xC8-0xCF */
+	0x7530,0x7531,0x7532,0x7533,0x758B,0x767D,0x76AE,0x76BF,/* 0xD0-0xD7 */
+	0x76EE,0x77DB,0x77E2,0x77F3,0x793A,0x79BE,0x7A74,0x7ACB,/* 0xD8-0xDF */
+	0x4E1E,0x4E1F,0x4E52,0x4E53,0x4E69,0x4E99,0x4EA4,0x4EA6,/* 0xE0-0xE7 */
+	0x4EA5,0x4EFF,0x4F09,0x4F19,0x4F0A,0x4F15,0x4F0D,0x4F10,/* 0xE8-0xEF */
+	0x4F11,0x4F0F,0x4EF2,0x4EF6,0x4EFB,0x4EF0,0x4EF3,0x4EFD,/* 0xF0-0xF7 */
+	0x4F01,0x4F0B,0x5149,0x5147,0x5146,0x5148,0x5168,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5171,0x518D,0x51B0,0x5217,0x5211,0x5212,0x520E,0x5216,/* 0x40-0x47 */
+	0x52A3,0x5308,0x5321,0x5320,0x5370,0x5371,0x5409,0x540F,/* 0x48-0x4F */
+	0x540C,0x540A,0x5410,0x5401,0x540B,0x5404,0x5411,0x540D,/* 0x50-0x57 */
+	0x5408,0x5403,0x540E,0x5406,0x5412,0x56E0,0x56DE,0x56DD,/* 0x58-0x5F */
+	0x5733,0x5730,0x5728,0x572D,0x572C,0x572F,0x5729,0x5919,/* 0x60-0x67 */
+	0x591A,0x5937,0x5938,0x5984,0x5978,0x5983,0x597D,0x5979,/* 0x68-0x6F */
+	0x5982,0x5981,0x5B57,0x5B58,0x5B87,0x5B88,0x5B85,0x5B89,/* 0x70-0x77 */
+	0x5BFA,0x5C16,0x5C79,0x5DDE,0x5E06,0x5E76,0x5E74,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5F0F,0x5F1B,0x5FD9,0x5FD6,0x620E,0x620C,0x620D,/* 0xA0-0xA7 */
+	0x6210,0x6263,0x625B,0x6258,0x6536,0x65E9,0x65E8,0x65EC,/* 0xA8-0xAF */
+	0x65ED,0x66F2,0x66F3,0x6709,0x673D,0x6734,0x6731,0x6735,/* 0xB0-0xB7 */
+	0x6B21,0x6B64,0x6B7B,0x6C16,0x6C5D,0x6C57,0x6C59,0x6C5F,/* 0xB8-0xBF */
+	0x6C60,0x6C50,0x6C55,0x6C61,0x6C5B,0x6C4D,0x6C4E,0x7070,/* 0xC0-0xC7 */
+	0x725F,0x725D,0x767E,0x7AF9,0x7C73,0x7CF8,0x7F36,0x7F8A,/* 0xC8-0xCF */
+	0x7FBD,0x8001,0x8003,0x800C,0x8012,0x8033,0x807F,0x8089,/* 0xD0-0xD7 */
+	0x808B,0x808C,0x81E3,0x81EA,0x81F3,0x81FC,0x820C,0x821B,/* 0xD8-0xDF */
+	0x821F,0x826E,0x8272,0x827E,0x866B,0x8840,0x884C,0x8863,/* 0xE0-0xE7 */
+	0x897F,0x9621,0x4E32,0x4EA8,0x4F4D,0x4F4F,0x4F47,0x4F57,/* 0xE8-0xEF */
+	0x4F5E,0x4F34,0x4F5B,0x4F55,0x4F30,0x4F50,0x4F51,0x4F3D,/* 0xF0-0xF7 */
+	0x4F3A,0x4F38,0x4F43,0x4F54,0x4F3C,0x4F46,0x4F63,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4F5C,0x4F60,0x4F2F,0x4F4E,0x4F36,0x4F59,0x4F5D,0x4F48,/* 0x40-0x47 */
+	0x4F5A,0x514C,0x514B,0x514D,0x5175,0x51B6,0x51B7,0x5225,/* 0x48-0x4F */
+	0x5224,0x5229,0x522A,0x5228,0x52AB,0x52A9,0x52AA,0x52AC,/* 0x50-0x57 */
+	0x5323,0x5373,0x5375,0x541D,0x542D,0x541E,0x543E,0x5426,/* 0x58-0x5F */
+	0x544E,0x5427,0x5446,0x5443,0x5433,0x5448,0x5442,0x541B,/* 0x60-0x67 */
+	0x5429,0x544A,0x5439,0x543B,0x5438,0x542E,0x5435,0x5436,/* 0x68-0x6F */
+	0x5420,0x543C,0x5440,0x5431,0x542B,0x541F,0x542C,0x56EA,/* 0x70-0x77 */
+	0x56F0,0x56E4,0x56EB,0x574A,0x5751,0x5740,0x574D,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5747,0x574E,0x573E,0x5750,0x574F,0x573B,0x58EF,/* 0xA0-0xA7 */
+	0x593E,0x599D,0x5992,0x59A8,0x599E,0x59A3,0x5999,0x5996,/* 0xA8-0xAF */
+	0x598D,0x59A4,0x5993,0x598A,0x59A5,0x5B5D,0x5B5C,0x5B5A,/* 0xB0-0xB7 */
+	0x5B5B,0x5B8C,0x5B8B,0x5B8F,0x5C2C,0x5C40,0x5C41,0x5C3F,/* 0xB8-0xBF */
+	0x5C3E,0x5C90,0x5C91,0x5C94,0x5C8C,0x5DEB,0x5E0C,0x5E8F,/* 0xC0-0xC7 */
+	0x5E87,0x5E8A,0x5EF7,0x5F04,0x5F1F,0x5F64,0x5F62,0x5F77,/* 0xC8-0xCF */
+	0x5F79,0x5FD8,0x5FCC,0x5FD7,0x5FCD,0x5FF1,0x5FEB,0x5FF8,/* 0xD0-0xD7 */
+	0x5FEA,0x6212,0x6211,0x6284,0x6297,0x6296,0x6280,0x6276,/* 0xD8-0xDF */
+	0x6289,0x626D,0x628A,0x627C,0x627E,0x6279,0x6273,0x6292,/* 0xE0-0xE7 */
+	0x626F,0x6298,0x626E,0x6295,0x6293,0x6291,0x6286,0x6539,/* 0xE8-0xEF */
+	0x653B,0x6538,0x65F1,0x66F4,0x675F,0x674E,0x674F,0x6750,/* 0xF0-0xF7 */
+	0x6751,0x675C,0x6756,0x675E,0x6749,0x6746,0x6760,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6753,0x6757,0x6B65,0x6BCF,0x6C42,0x6C5E,0x6C99,0x6C81,/* 0x40-0x47 */
+	0x6C88,0x6C89,0x6C85,0x6C9B,0x6C6A,0x6C7A,0x6C90,0x6C70,/* 0x48-0x4F */
+	0x6C8C,0x6C68,0x6C96,0x6C92,0x6C7D,0x6C83,0x6C72,0x6C7E,/* 0x50-0x57 */
+	0x6C74,0x6C86,0x6C76,0x6C8D,0x6C94,0x6C98,0x6C82,0x7076,/* 0x58-0x5F */
+	0x707C,0x707D,0x7078,0x7262,0x7261,0x7260,0x72C4,0x72C2,/* 0x60-0x67 */
+	0x7396,0x752C,0x752B,0x7537,0x7538,0x7682,0x76EF,0x77E3,/* 0x68-0x6F */
+	0x79C1,0x79C0,0x79BF,0x7A76,0x7CFB,0x7F55,0x8096,0x8093,/* 0x70-0x77 */
+	0x809D,0x8098,0x809B,0x809A,0x80B2,0x826F,0x8292,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x828B,0x828D,0x898B,0x89D2,0x8A00,0x8C37,0x8C46,/* 0xA0-0xA7 */
+	0x8C55,0x8C9D,0x8D64,0x8D70,0x8DB3,0x8EAB,0x8ECA,0x8F9B,/* 0xA8-0xAF */
+	0x8FB0,0x8FC2,0x8FC6,0x8FC5,0x8FC4,0x5DE1,0x9091,0x90A2,/* 0xB0-0xB7 */
+	0x90AA,0x90A6,0x90A3,0x9149,0x91C6,0x91CC,0x9632,0x962E,/* 0xB8-0xBF */
+	0x9631,0x962A,0x962C,0x4E26,0x4E56,0x4E73,0x4E8B,0x4E9B,/* 0xC0-0xC7 */
+	0x4E9E,0x4EAB,0x4EAC,0x4F6F,0x4F9D,0x4F8D,0x4F73,0x4F7F,/* 0xC8-0xCF */
+	0x4F6C,0x4F9B,0x4F8B,0x4F86,0x4F83,0x4F70,0x4F75,0x4F88,/* 0xD0-0xD7 */
+	0x4F69,0x4F7B,0x4F96,0x4F7E,0x4F8F,0x4F91,0x4F7A,0x5154,/* 0xD8-0xDF */
+	0x5152,0x5155,0x5169,0x5177,0x5176,0x5178,0x51BD,0x51FD,/* 0xE0-0xE7 */
+	0x523B,0x5238,0x5237,0x523A,0x5230,0x522E,0x5236,0x5241,/* 0xE8-0xEF */
+	0x52BE,0x52BB,0x5352,0x5354,0x5353,0x5351,0x5366,0x5377,/* 0xF0-0xF7 */
+	0x5378,0x5379,0x53D6,0x53D4,0x53D7,0x5473,0x5475,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_A9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5496,0x5478,0x5495,0x5480,0x547B,0x5477,0x5484,0x5492,/* 0x40-0x47 */
+	0x5486,0x547C,0x5490,0x5471,0x5476,0x548C,0x549A,0x5462,/* 0x48-0x4F */
+	0x5468,0x548B,0x547D,0x548E,0x56FA,0x5783,0x5777,0x576A,/* 0x50-0x57 */
+	0x5769,0x5761,0x5766,0x5764,0x577C,0x591C,0x5949,0x5947,/* 0x58-0x5F */
+	0x5948,0x5944,0x5954,0x59BE,0x59BB,0x59D4,0x59B9,0x59AE,/* 0x60-0x67 */
+	0x59D1,0x59C6,0x59D0,0x59CD,0x59CB,0x59D3,0x59CA,0x59AF,/* 0x68-0x6F */
+	0x59B3,0x59D2,0x59C5,0x5B5F,0x5B64,0x5B63,0x5B97,0x5B9A,/* 0x70-0x77 */
+	0x5B98,0x5B9C,0x5B99,0x5B9B,0x5C1A,0x5C48,0x5C45,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5C46,0x5CB7,0x5CA1,0x5CB8,0x5CA9,0x5CAB,0x5CB1,/* 0xA0-0xA7 */
+	0x5CB3,0x5E18,0x5E1A,0x5E16,0x5E15,0x5E1B,0x5E11,0x5E78,/* 0xA8-0xAF */
+	0x5E9A,0x5E97,0x5E9C,0x5E95,0x5E96,0x5EF6,0x5F26,0x5F27,/* 0xB0-0xB7 */
+	0x5F29,0x5F80,0x5F81,0x5F7F,0x5F7C,0x5FDD,0x5FE0,0x5FFD,/* 0xB8-0xBF */
+	0x5FF5,0x5FFF,0x600F,0x6014,0x602F,0x6035,0x6016,0x602A,/* 0xC0-0xC7 */
+	0x6015,0x6021,0x6027,0x6029,0x602B,0x601B,0x6216,0x6215,/* 0xC8-0xCF */
+	0x623F,0x623E,0x6240,0x627F,0x62C9,0x62CC,0x62C4,0x62BF,/* 0xD0-0xD7 */
+	0x62C2,0x62B9,0x62D2,0x62DB,0x62AB,0x62D3,0x62D4,0x62CB,/* 0xD8-0xDF */
+	0x62C8,0x62A8,0x62BD,0x62BC,0x62D0,0x62D9,0x62C7,0x62CD,/* 0xE0-0xE7 */
+	0x62B5,0x62DA,0x62B1,0x62D8,0x62D6,0x62D7,0x62C6,0x62AC,/* 0xE8-0xEF */
+	0x62CE,0x653E,0x65A7,0x65BC,0x65FA,0x6614,0x6613,0x660C,/* 0xF0-0xF7 */
+	0x6606,0x6602,0x660E,0x6600,0x660F,0x6615,0x660A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6607,0x670D,0x670B,0x676D,0x678B,0x6795,0x6771,0x679C,/* 0x40-0x47 */
+	0x6773,0x6777,0x6787,0x679D,0x6797,0x676F,0x6770,0x677F,/* 0x48-0x4F */
+	0x6789,0x677E,0x6790,0x6775,0x679A,0x6793,0x677C,0x676A,/* 0x50-0x57 */
+	0x6772,0x6B23,0x6B66,0x6B67,0x6B7F,0x6C13,0x6C1B,0x6CE3,/* 0x58-0x5F */
+	0x6CE8,0x6CF3,0x6CB1,0x6CCC,0x6CE5,0x6CB3,0x6CBD,0x6CBE,/* 0x60-0x67 */
+	0x6CBC,0x6CE2,0x6CAB,0x6CD5,0x6CD3,0x6CB8,0x6CC4,0x6CB9,/* 0x68-0x6F */
+	0x6CC1,0x6CAE,0x6CD7,0x6CC5,0x6CF1,0x6CBF,0x6CBB,0x6CE1,/* 0x70-0x77 */
+	0x6CDB,0x6CCA,0x6CAC,0x6CEF,0x6CDC,0x6CD6,0x6CE0,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7095,0x708E,0x7092,0x708A,0x7099,0x722C,0x722D,/* 0xA0-0xA7 */
+	0x7238,0x7248,0x7267,0x7269,0x72C0,0x72CE,0x72D9,0x72D7,/* 0xA8-0xAF */
+	0x72D0,0x73A9,0x73A8,0x739F,0x73AB,0x73A5,0x753D,0x759D,/* 0xB0-0xB7 */
+	0x7599,0x759A,0x7684,0x76C2,0x76F2,0x76F4,0x77E5,0x77FD,/* 0xB8-0xBF */
+	0x793E,0x7940,0x7941,0x79C9,0x79C8,0x7A7A,0x7A79,0x7AFA,/* 0xC0-0xC7 */
+	0x7CFE,0x7F54,0x7F8C,0x7F8B,0x8005,0x80BA,0x80A5,0x80A2,/* 0xC8-0xCF */
+	0x80B1,0x80A1,0x80AB,0x80A9,0x80B4,0x80AA,0x80AF,0x81E5,/* 0xD0-0xD7 */
+	0x81FE,0x820D,0x82B3,0x829D,0x8299,0x82AD,0x82BD,0x829F,/* 0xD8-0xDF */
+	0x82B9,0x82B1,0x82AC,0x82A5,0x82AF,0x82B8,0x82A3,0x82B0,/* 0xE0-0xE7 */
+	0x82BE,0x82B7,0x864E,0x8671,0x521D,0x8868,0x8ECB,0x8FCE,/* 0xE8-0xEF */
+	0x8FD4,0x8FD1,0x90B5,0x90B8,0x90B1,0x90B6,0x91C7,0x91D1,/* 0xF0-0xF7 */
+	0x9577,0x9580,0x961C,0x9640,0x963F,0x963B,0x9644,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9642,0x96B9,0x96E8,0x9752,0x975E,0x4E9F,0x4EAD,0x4EAE,/* 0x40-0x47 */
+	0x4FE1,0x4FB5,0x4FAF,0x4FBF,0x4FE0,0x4FD1,0x4FCF,0x4FDD,/* 0x48-0x4F */
+	0x4FC3,0x4FB6,0x4FD8,0x4FDF,0x4FCA,0x4FD7,0x4FAE,0x4FD0,/* 0x50-0x57 */
+	0x4FC4,0x4FC2,0x4FDA,0x4FCE,0x4FDE,0x4FB7,0x5157,0x5192,/* 0x58-0x5F */
+	0x5191,0x51A0,0x524E,0x5243,0x524A,0x524D,0x524C,0x524B,/* 0x60-0x67 */
+	0x5247,0x52C7,0x52C9,0x52C3,0x52C1,0x530D,0x5357,0x537B,/* 0x68-0x6F */
+	0x539A,0x53DB,0x54AC,0x54C0,0x54A8,0x54CE,0x54C9,0x54B8,/* 0x70-0x77 */
+	0x54A6,0x54B3,0x54C7,0x54C2,0x54BD,0x54AA,0x54C1,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x54C4,0x54C8,0x54AF,0x54AB,0x54B1,0x54BB,0x54A9,/* 0xA0-0xA7 */
+	0x54A7,0x54BF,0x56FF,0x5782,0x578B,0x57A0,0x57A3,0x57A2,/* 0xA8-0xAF */
+	0x57CE,0x57AE,0x5793,0x5955,0x5951,0x594F,0x594E,0x5950,/* 0xB0-0xB7 */
+	0x59DC,0x59D8,0x59FF,0x59E3,0x59E8,0x5A03,0x59E5,0x59EA,/* 0xB8-0xBF */
+	0x59DA,0x59E6,0x5A01,0x59FB,0x5B69,0x5BA3,0x5BA6,0x5BA4,/* 0xC0-0xC7 */
+	0x5BA2,0x5BA5,0x5C01,0x5C4E,0x5C4F,0x5C4D,0x5C4B,0x5CD9,/* 0xC8-0xCF */
+	0x5CD2,0x5DF7,0x5E1D,0x5E25,0x5E1F,0x5E7D,0x5EA0,0x5EA6,/* 0xD0-0xD7 */
+	0x5EFA,0x5F08,0x5F2D,0x5F65,0x5F88,0x5F85,0x5F8A,0x5F8B,/* 0xD8-0xDF */
+	0x5F87,0x5F8C,0x5F89,0x6012,0x601D,0x6020,0x6025,0x600E,/* 0xE0-0xE7 */
+	0x6028,0x604D,0x6070,0x6068,0x6062,0x6046,0x6043,0x606C,/* 0xE8-0xEF */
+	0x606B,0x606A,0x6064,0x6241,0x62DC,0x6316,0x6309,0x62FC,/* 0xF0-0xF7 */
+	0x62ED,0x6301,0x62EE,0x62FD,0x6307,0x62F1,0x62F7,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x62EF,0x62EC,0x62FE,0x62F4,0x6311,0x6302,0x653F,0x6545,/* 0x40-0x47 */
+	0x65AB,0x65BD,0x65E2,0x6625,0x662D,0x6620,0x6627,0x662F,/* 0x48-0x4F */
+	0x661F,0x6628,0x6631,0x6624,0x66F7,0x67FF,0x67D3,0x67F1,/* 0x50-0x57 */
+	0x67D4,0x67D0,0x67EC,0x67B6,0x67AF,0x67F5,0x67E9,0x67EF,/* 0x58-0x5F */
+	0x67C4,0x67D1,0x67B4,0x67DA,0x67E5,0x67B8,0x67CF,0x67DE,/* 0x60-0x67 */
+	0x67F3,0x67B0,0x67D9,0x67E2,0x67DD,0x67D2,0x6B6A,0x6B83,/* 0x68-0x6F */
+	0x6B86,0x6BB5,0x6BD2,0x6BD7,0x6C1F,0x6CC9,0x6D0B,0x6D32,/* 0x70-0x77 */
+	0x6D2A,0x6D41,0x6D25,0x6D0C,0x6D31,0x6D1E,0x6D17,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6D3B,0x6D3D,0x6D3E,0x6D36,0x6D1B,0x6CF5,0x6D39,/* 0xA0-0xA7 */
+	0x6D27,0x6D38,0x6D29,0x6D2E,0x6D35,0x6D0E,0x6D2B,0x70AB,/* 0xA8-0xAF */
+	0x70BA,0x70B3,0x70AC,0x70AF,0x70AD,0x70B8,0x70AE,0x70A4,/* 0xB0-0xB7 */
+	0x7230,0x7272,0x726F,0x7274,0x72E9,0x72E0,0x72E1,0x73B7,/* 0xB8-0xBF */
+	0x73CA,0x73BB,0x73B2,0x73CD,0x73C0,0x73B3,0x751A,0x752D,/* 0xC0-0xC7 */
+	0x754F,0x754C,0x754E,0x754B,0x75AB,0x75A4,0x75A5,0x75A2,/* 0xC8-0xCF */
+	0x75A3,0x7678,0x7686,0x7687,0x7688,0x76C8,0x76C6,0x76C3,/* 0xD0-0xD7 */
+	0x76C5,0x7701,0x76F9,0x76F8,0x7709,0x770B,0x76FE,0x76FC,/* 0xD8-0xDF */
+	0x7707,0x77DC,0x7802,0x7814,0x780C,0x780D,0x7946,0x7949,/* 0xE0-0xE7 */
+	0x7948,0x7947,0x79B9,0x79BA,0x79D1,0x79D2,0x79CB,0x7A7F,/* 0xE8-0xEF */
+	0x7A81,0x7AFF,0x7AFD,0x7C7D,0x7D02,0x7D05,0x7D00,0x7D09,/* 0xF0-0xF7 */
+	0x7D07,0x7D04,0x7D06,0x7F38,0x7F8E,0x7FBF,0x8004,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8010,0x800D,0x8011,0x8036,0x80D6,0x80E5,0x80DA,0x80C3,/* 0x40-0x47 */
+	0x80C4,0x80CC,0x80E1,0x80DB,0x80CE,0x80DE,0x80E4,0x80DD,/* 0x48-0x4F */
+	0x81F4,0x8222,0x82E7,0x8303,0x8305,0x82E3,0x82DB,0x82E6,/* 0x50-0x57 */
+	0x8304,0x82E5,0x8302,0x8309,0x82D2,0x82D7,0x82F1,0x8301,/* 0x58-0x5F */
+	0x82DC,0x82D4,0x82D1,0x82DE,0x82D3,0x82DF,0x82EF,0x8306,/* 0x60-0x67 */
+	0x8650,0x8679,0x867B,0x867A,0x884D,0x886B,0x8981,0x89D4,/* 0x68-0x6F */
+	0x8A08,0x8A02,0x8A03,0x8C9E,0x8CA0,0x8D74,0x8D73,0x8DB4,/* 0x70-0x77 */
+	0x8ECD,0x8ECC,0x8FF0,0x8FE6,0x8FE2,0x8FEA,0x8FE5,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8FED,0x8FEB,0x8FE4,0x8FE8,0x90CA,0x90CE,0x90C1,/* 0xA0-0xA7 */
+	0x90C3,0x914B,0x914A,0x91CD,0x9582,0x9650,0x964B,0x964C,/* 0xA8-0xAF */
+	0x964D,0x9762,0x9769,0x97CB,0x97ED,0x97F3,0x9801,0x98A8,/* 0xB0-0xB7 */
+	0x98DB,0x98DF,0x9996,0x9999,0x4E58,0x4EB3,0x500C,0x500D,/* 0xB8-0xBF */
+	0x5023,0x4FEF,0x5026,0x5025,0x4FF8,0x5029,0x5016,0x5006,/* 0xC0-0xC7 */
+	0x503C,0x501F,0x501A,0x5012,0x5011,0x4FFA,0x5000,0x5014,/* 0xC8-0xCF */
+	0x5028,0x4FF1,0x5021,0x500B,0x5019,0x5018,0x4FF3,0x4FEE,/* 0xD0-0xD7 */
+	0x502D,0x502A,0x4FFE,0x502B,0x5009,0x517C,0x51A4,0x51A5,/* 0xD8-0xDF */
+	0x51A2,0x51CD,0x51CC,0x51C6,0x51CB,0x5256,0x525C,0x5254,/* 0xE0-0xE7 */
+	0x525B,0x525D,0x532A,0x537F,0x539F,0x539D,0x53DF,0x54E8,/* 0xE8-0xEF */
+	0x5510,0x5501,0x5537,0x54FC,0x54E5,0x54F2,0x5506,0x54FA,/* 0xF0-0xF7 */
+	0x5514,0x54E9,0x54ED,0x54E1,0x5509,0x54EE,0x54EA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x54E6,0x5527,0x5507,0x54FD,0x550F,0x5703,0x5704,0x57C2,/* 0x40-0x47 */
+	0x57D4,0x57CB,0x57C3,0x5809,0x590F,0x5957,0x5958,0x595A,/* 0x48-0x4F */
+	0x5A11,0x5A18,0x5A1C,0x5A1F,0x5A1B,0x5A13,0x59EC,0x5A20,/* 0x50-0x57 */
+	0x5A23,0x5A29,0x5A25,0x5A0C,0x5A09,0x5B6B,0x5C58,0x5BB0,/* 0x58-0x5F */
+	0x5BB3,0x5BB6,0x5BB4,0x5BAE,0x5BB5,0x5BB9,0x5BB8,0x5C04,/* 0x60-0x67 */
+	0x5C51,0x5C55,0x5C50,0x5CED,0x5CFD,0x5CFB,0x5CEA,0x5CE8,/* 0x68-0x6F */
+	0x5CF0,0x5CF6,0x5D01,0x5CF4,0x5DEE,0x5E2D,0x5E2B,0x5EAB,/* 0x70-0x77 */
+	0x5EAD,0x5EA7,0x5F31,0x5F92,0x5F91,0x5F90,0x6059,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6063,0x6065,0x6050,0x6055,0x606D,0x6069,0x606F,/* 0xA0-0xA7 */
+	0x6084,0x609F,0x609A,0x608D,0x6094,0x608C,0x6085,0x6096,/* 0xA8-0xAF */
+	0x6247,0x62F3,0x6308,0x62FF,0x634E,0x633E,0x632F,0x6355,/* 0xB0-0xB7 */
+	0x6342,0x6346,0x634F,0x6349,0x633A,0x6350,0x633D,0x632A,/* 0xB8-0xBF */
+	0x632B,0x6328,0x634D,0x634C,0x6548,0x6549,0x6599,0x65C1,/* 0xC0-0xC7 */
+	0x65C5,0x6642,0x6649,0x664F,0x6643,0x6652,0x664C,0x6645,/* 0xC8-0xCF */
+	0x6641,0x66F8,0x6714,0x6715,0x6717,0x6821,0x6838,0x6848,/* 0xD0-0xD7 */
+	0x6846,0x6853,0x6839,0x6842,0x6854,0x6829,0x68B3,0x6817,/* 0xD8-0xDF */
+	0x684C,0x6851,0x683D,0x67F4,0x6850,0x6840,0x683C,0x6843,/* 0xE0-0xE7 */
+	0x682A,0x6845,0x6813,0x6818,0x6841,0x6B8A,0x6B89,0x6BB7,/* 0xE8-0xEF */
+	0x6C23,0x6C27,0x6C28,0x6C26,0x6C24,0x6CF0,0x6D6A,0x6D95,/* 0xF0-0xF7 */
+	0x6D88,0x6D87,0x6D66,0x6D78,0x6D77,0x6D59,0x6D93,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_AF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6D6C,0x6D89,0x6D6E,0x6D5A,0x6D74,0x6D69,0x6D8C,0x6D8A,/* 0x40-0x47 */
+	0x6D79,0x6D85,0x6D65,0x6D94,0x70CA,0x70D8,0x70E4,0x70D9,/* 0x48-0x4F */
+	0x70C8,0x70CF,0x7239,0x7279,0x72FC,0x72F9,0x72FD,0x72F8,/* 0x50-0x57 */
+	0x72F7,0x7386,0x73ED,0x7409,0x73EE,0x73E0,0x73EA,0x73DE,/* 0x58-0x5F */
+	0x7554,0x755D,0x755C,0x755A,0x7559,0x75BE,0x75C5,0x75C7,/* 0x60-0x67 */
+	0x75B2,0x75B3,0x75BD,0x75BC,0x75B9,0x75C2,0x75B8,0x768B,/* 0x68-0x6F */
+	0x76B0,0x76CA,0x76CD,0x76CE,0x7729,0x771F,0x7720,0x7728,/* 0x70-0x77 */
+	0x77E9,0x7830,0x7827,0x7838,0x781D,0x7834,0x7837,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7825,0x782D,0x7820,0x781F,0x7832,0x7955,0x7950,/* 0xA0-0xA7 */
+	0x7960,0x795F,0x7956,0x795E,0x795D,0x7957,0x795A,0x79E4,/* 0xA8-0xAF */
+	0x79E3,0x79E7,0x79DF,0x79E6,0x79E9,0x79D8,0x7A84,0x7A88,/* 0xB0-0xB7 */
+	0x7AD9,0x7B06,0x7B11,0x7C89,0x7D21,0x7D17,0x7D0B,0x7D0A,/* 0xB8-0xBF */
+	0x7D20,0x7D22,0x7D14,0x7D10,0x7D15,0x7D1A,0x7D1C,0x7D0D,/* 0xC0-0xC7 */
+	0x7D19,0x7D1B,0x7F3A,0x7F5F,0x7F94,0x7FC5,0x7FC1,0x8006,/* 0xC8-0xCF */
+	0x8018,0x8015,0x8019,0x8017,0x803D,0x803F,0x80F1,0x8102,/* 0xD0-0xD7 */
+	0x80F0,0x8105,0x80ED,0x80F4,0x8106,0x80F8,0x80F3,0x8108,/* 0xD8-0xDF */
+	0x80FD,0x810A,0x80FC,0x80EF,0x81ED,0x81EC,0x8200,0x8210,/* 0xE0-0xE7 */
+	0x822A,0x822B,0x8228,0x822C,0x82BB,0x832B,0x8352,0x8354,/* 0xE8-0xEF */
+	0x834A,0x8338,0x8350,0x8349,0x8335,0x8334,0x834F,0x8332,/* 0xF0-0xF7 */
+	0x8339,0x8336,0x8317,0x8340,0x8331,0x8328,0x8343,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8654,0x868A,0x86AA,0x8693,0x86A4,0x86A9,0x868C,0x86A3,/* 0x40-0x47 */
+	0x869C,0x8870,0x8877,0x8881,0x8882,0x887D,0x8879,0x8A18,/* 0x48-0x4F */
+	0x8A10,0x8A0E,0x8A0C,0x8A15,0x8A0A,0x8A17,0x8A13,0x8A16,/* 0x50-0x57 */
+	0x8A0F,0x8A11,0x8C48,0x8C7A,0x8C79,0x8CA1,0x8CA2,0x8D77,/* 0x58-0x5F */
+	0x8EAC,0x8ED2,0x8ED4,0x8ECF,0x8FB1,0x9001,0x9006,0x8FF7,/* 0x60-0x67 */
+	0x9000,0x8FFA,0x8FF4,0x9003,0x8FFD,0x9005,0x8FF8,0x9095,/* 0x68-0x6F */
+	0x90E1,0x90DD,0x90E2,0x9152,0x914D,0x914C,0x91D8,0x91DD,/* 0x70-0x77 */
+	0x91D7,0x91DC,0x91D9,0x9583,0x9662,0x9663,0x9661,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x965B,0x965D,0x9664,0x9658,0x965E,0x96BB,0x98E2,/* 0xA0-0xA7 */
+	0x99AC,0x9AA8,0x9AD8,0x9B25,0x9B32,0x9B3C,0x4E7E,0x507A,/* 0xA8-0xAF */
+	0x507D,0x505C,0x5047,0x5043,0x504C,0x505A,0x5049,0x5065,/* 0xB0-0xB7 */
+	0x5076,0x504E,0x5055,0x5075,0x5074,0x5077,0x504F,0x500F,/* 0xB8-0xBF */
+	0x506F,0x506D,0x515C,0x5195,0x51F0,0x526A,0x526F,0x52D2,/* 0xC0-0xC7 */
+	0x52D9,0x52D8,0x52D5,0x5310,0x530F,0x5319,0x533F,0x5340,/* 0xC8-0xCF */
+	0x533E,0x53C3,0x66FC,0x5546,0x556A,0x5566,0x5544,0x555E,/* 0xD0-0xD7 */
+	0x5561,0x5543,0x554A,0x5531,0x5556,0x554F,0x5555,0x552F,/* 0xD8-0xDF */
+	0x5564,0x5538,0x552E,0x555C,0x552C,0x5563,0x5533,0x5541,/* 0xE0-0xE7 */
+	0x5557,0x5708,0x570B,0x5709,0x57DF,0x5805,0x580A,0x5806,/* 0xE8-0xEF */
+	0x57E0,0x57E4,0x57FA,0x5802,0x5835,0x57F7,0x57F9,0x5920,/* 0xF0-0xF7 */
+	0x5962,0x5A36,0x5A41,0x5A49,0x5A66,0x5A6A,0x5A40,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5A3C,0x5A62,0x5A5A,0x5A46,0x5A4A,0x5B70,0x5BC7,0x5BC5,/* 0x40-0x47 */
+	0x5BC4,0x5BC2,0x5BBF,0x5BC6,0x5C09,0x5C08,0x5C07,0x5C60,/* 0x48-0x4F */
+	0x5C5C,0x5C5D,0x5D07,0x5D06,0x5D0E,0x5D1B,0x5D16,0x5D22,/* 0x50-0x57 */
+	0x5D11,0x5D29,0x5D14,0x5D19,0x5D24,0x5D27,0x5D17,0x5DE2,/* 0x58-0x5F */
+	0x5E38,0x5E36,0x5E33,0x5E37,0x5EB7,0x5EB8,0x5EB6,0x5EB5,/* 0x60-0x67 */
+	0x5EBE,0x5F35,0x5F37,0x5F57,0x5F6C,0x5F69,0x5F6B,0x5F97,/* 0x68-0x6F */
+	0x5F99,0x5F9E,0x5F98,0x5FA1,0x5FA0,0x5F9C,0x607F,0x60A3,/* 0x70-0x77 */
+	0x6089,0x60A0,0x60A8,0x60CB,0x60B4,0x60E6,0x60BD,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x60C5,0x60BB,0x60B5,0x60DC,0x60BC,0x60D8,0x60D5,/* 0xA0-0xA7 */
+	0x60C6,0x60DF,0x60B8,0x60DA,0x60C7,0x621A,0x621B,0x6248,/* 0xA8-0xAF */
+	0x63A0,0x63A7,0x6372,0x6396,0x63A2,0x63A5,0x6377,0x6367,/* 0xB0-0xB7 */
+	0x6398,0x63AA,0x6371,0x63A9,0x6389,0x6383,0x639B,0x636B,/* 0xB8-0xBF */
+	0x63A8,0x6384,0x6388,0x6399,0x63A1,0x63AC,0x6392,0x638F,/* 0xC0-0xC7 */
+	0x6380,0x637B,0x6369,0x6368,0x637A,0x655D,0x6556,0x6551,/* 0xC8-0xCF */
+	0x6559,0x6557,0x555F,0x654F,0x6558,0x6555,0x6554,0x659C,/* 0xD0-0xD7 */
+	0x659B,0x65AC,0x65CF,0x65CB,0x65CC,0x65CE,0x665D,0x665A,/* 0xD8-0xDF */
+	0x6664,0x6668,0x6666,0x665E,0x66F9,0x52D7,0x671B,0x6881,/* 0xE0-0xE7 */
+	0x68AF,0x68A2,0x6893,0x68B5,0x687F,0x6876,0x68B1,0x68A7,/* 0xE8-0xEF */
+	0x6897,0x68B0,0x6883,0x68C4,0x68AD,0x6886,0x6885,0x6894,/* 0xF0-0xF7 */
+	0x689D,0x68A8,0x689F,0x68A1,0x6882,0x6B32,0x6BBA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6BEB,0x6BEC,0x6C2B,0x6D8E,0x6DBC,0x6DF3,0x6DD9,0x6DB2,/* 0x40-0x47 */
+	0x6DE1,0x6DCC,0x6DE4,0x6DFB,0x6DFA,0x6E05,0x6DC7,0x6DCB,/* 0x48-0x4F */
+	0x6DAF,0x6DD1,0x6DAE,0x6DDE,0x6DF9,0x6DB8,0x6DF7,0x6DF5,/* 0x50-0x57 */
+	0x6DC5,0x6DD2,0x6E1A,0x6DB5,0x6DDA,0x6DEB,0x6DD8,0x6DEA,/* 0x58-0x5F */
+	0x6DF1,0x6DEE,0x6DE8,0x6DC6,0x6DC4,0x6DAA,0x6DEC,0x6DBF,/* 0x60-0x67 */
+	0x6DE6,0x70F9,0x7109,0x710A,0x70FD,0x70EF,0x723D,0x727D,/* 0x68-0x6F */
+	0x7281,0x731C,0x731B,0x7316,0x7313,0x7319,0x7387,0x7405,/* 0x70-0x77 */
+	0x740A,0x7403,0x7406,0x73FE,0x740D,0x74E0,0x74F6,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x74F7,0x751C,0x7522,0x7565,0x7566,0x7562,0x7570,/* 0xA0-0xA7 */
+	0x758F,0x75D4,0x75D5,0x75B5,0x75CA,0x75CD,0x768E,0x76D4,/* 0xA8-0xAF */
+	0x76D2,0x76DB,0x7737,0x773E,0x773C,0x7736,0x7738,0x773A,/* 0xB0-0xB7 */
+	0x786B,0x7843,0x784E,0x7965,0x7968,0x796D,0x79FB,0x7A92,/* 0xB8-0xBF */
+	0x7A95,0x7B20,0x7B28,0x7B1B,0x7B2C,0x7B26,0x7B19,0x7B1E,/* 0xC0-0xC7 */
+	0x7B2E,0x7C92,0x7C97,0x7C95,0x7D46,0x7D43,0x7D71,0x7D2E,/* 0xC8-0xCF */
+	0x7D39,0x7D3C,0x7D40,0x7D30,0x7D33,0x7D44,0x7D2F,0x7D42,/* 0xD0-0xD7 */
+	0x7D32,0x7D31,0x7F3D,0x7F9E,0x7F9A,0x7FCC,0x7FCE,0x7FD2,/* 0xD8-0xDF */
+	0x801C,0x804A,0x8046,0x812F,0x8116,0x8123,0x812B,0x8129,/* 0xE0-0xE7 */
+	0x8130,0x8124,0x8202,0x8235,0x8237,0x8236,0x8239,0x838E,/* 0xE8-0xEF */
+	0x839E,0x8398,0x8378,0x83A2,0x8396,0x83BD,0x83AB,0x8392,/* 0xF0-0xF7 */
+	0x838A,0x8393,0x8389,0x83A0,0x8377,0x837B,0x837C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8386,0x83A7,0x8655,0x5F6A,0x86C7,0x86C0,0x86B6,0x86C4,/* 0x40-0x47 */
+	0x86B5,0x86C6,0x86CB,0x86B1,0x86AF,0x86C9,0x8853,0x889E,/* 0x48-0x4F */
+	0x8888,0x88AB,0x8892,0x8896,0x888D,0x888B,0x8993,0x898F,/* 0x50-0x57 */
+	0x8A2A,0x8A1D,0x8A23,0x8A25,0x8A31,0x8A2D,0x8A1F,0x8A1B,/* 0x58-0x5F */
+	0x8A22,0x8C49,0x8C5A,0x8CA9,0x8CAC,0x8CAB,0x8CA8,0x8CAA,/* 0x60-0x67 */
+	0x8CA7,0x8D67,0x8D66,0x8DBE,0x8DBA,0x8EDB,0x8EDF,0x9019,/* 0x68-0x6F */
+	0x900D,0x901A,0x9017,0x9023,0x901F,0x901D,0x9010,0x9015,/* 0x70-0x77 */
+	0x901E,0x9020,0x900F,0x9022,0x9016,0x901B,0x9014,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x90E8,0x90ED,0x90FD,0x9157,0x91CE,0x91F5,0x91E6,/* 0xA0-0xA7 */
+	0x91E3,0x91E7,0x91ED,0x91E9,0x9589,0x966A,0x9675,0x9673,/* 0xA8-0xAF */
+	0x9678,0x9670,0x9674,0x9676,0x9677,0x966C,0x96C0,0x96EA,/* 0xB0-0xB7 */
+	0x96E9,0x7AE0,0x7ADF,0x9802,0x9803,0x9B5A,0x9CE5,0x9E75,/* 0xB8-0xBF */
+	0x9E7F,0x9EA5,0x9EBB,0x50A2,0x508D,0x5085,0x5099,0x5091,/* 0xC0-0xC7 */
+	0x5080,0x5096,0x5098,0x509A,0x6700,0x51F1,0x5272,0x5274,/* 0xC8-0xCF */
+	0x5275,0x5269,0x52DE,0x52DD,0x52DB,0x535A,0x53A5,0x557B,/* 0xD0-0xD7 */
+	0x5580,0x55A7,0x557C,0x558A,0x559D,0x5598,0x5582,0x559C,/* 0xD8-0xDF */
+	0x55AA,0x5594,0x5587,0x558B,0x5583,0x55B3,0x55AE,0x559F,/* 0xE0-0xE7 */
+	0x553E,0x55B2,0x559A,0x55BB,0x55AC,0x55B1,0x557E,0x5589,/* 0xE8-0xEF */
+	0x55AB,0x5599,0x570D,0x582F,0x582A,0x5834,0x5824,0x5830,/* 0xF0-0xF7 */
+	0x5831,0x5821,0x581D,0x5820,0x58F9,0x58FA,0x5960,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5A77,0x5A9A,0x5A7F,0x5A92,0x5A9B,0x5AA7,0x5B73,0x5B71,/* 0x40-0x47 */
+	0x5BD2,0x5BCC,0x5BD3,0x5BD0,0x5C0A,0x5C0B,0x5C31,0x5D4C,/* 0x48-0x4F */
+	0x5D50,0x5D34,0x5D47,0x5DFD,0x5E45,0x5E3D,0x5E40,0x5E43,/* 0x50-0x57 */
+	0x5E7E,0x5ECA,0x5EC1,0x5EC2,0x5EC4,0x5F3C,0x5F6D,0x5FA9,/* 0x58-0x5F */
+	0x5FAA,0x5FA8,0x60D1,0x60E1,0x60B2,0x60B6,0x60E0,0x611C,/* 0x60-0x67 */
+	0x6123,0x60FA,0x6115,0x60F0,0x60FB,0x60F4,0x6168,0x60F1,/* 0x68-0x6F */
+	0x610E,0x60F6,0x6109,0x6100,0x6112,0x621F,0x6249,0x63A3,/* 0x70-0x77 */
+	0x638C,0x63CF,0x63C0,0x63E9,0x63C9,0x63C6,0x63CD,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x63D2,0x63E3,0x63D0,0x63E1,0x63D6,0x63ED,0x63EE,/* 0xA0-0xA7 */
+	0x6376,0x63F4,0x63EA,0x63DB,0x6452,0x63DA,0x63F9,0x655E,/* 0xA8-0xAF */
+	0x6566,0x6562,0x6563,0x6591,0x6590,0x65AF,0x666E,0x6670,/* 0xB0-0xB7 */
+	0x6674,0x6676,0x666F,0x6691,0x667A,0x667E,0x6677,0x66FE,/* 0xB8-0xBF */
+	0x66FF,0x671F,0x671D,0x68FA,0x68D5,0x68E0,0x68D8,0x68D7,/* 0xC0-0xC7 */
+	0x6905,0x68DF,0x68F5,0x68EE,0x68E7,0x68F9,0x68D2,0x68F2,/* 0xC8-0xCF */
+	0x68E3,0x68CB,0x68CD,0x690D,0x6912,0x690E,0x68C9,0x68DA,/* 0xD0-0xD7 */
+	0x696E,0x68FB,0x6B3E,0x6B3A,0x6B3D,0x6B98,0x6B96,0x6BBC,/* 0xD8-0xDF */
+	0x6BEF,0x6C2E,0x6C2F,0x6C2C,0x6E2F,0x6E38,0x6E54,0x6E21,/* 0xE0-0xE7 */
+	0x6E32,0x6E67,0x6E4A,0x6E20,0x6E25,0x6E23,0x6E1B,0x6E5B,/* 0xE8-0xEF */
+	0x6E58,0x6E24,0x6E56,0x6E6E,0x6E2D,0x6E26,0x6E6F,0x6E34,/* 0xF0-0xF7 */
+	0x6E4D,0x6E3A,0x6E2C,0x6E43,0x6E1D,0x6E3E,0x6ECB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6E89,0x6E19,0x6E4E,0x6E63,0x6E44,0x6E72,0x6E69,0x6E5F,/* 0x40-0x47 */
+	0x7119,0x711A,0x7126,0x7130,0x7121,0x7136,0x716E,0x711C,/* 0x48-0x4F */
+	0x724C,0x7284,0x7280,0x7336,0x7325,0x7334,0x7329,0x743A,/* 0x50-0x57 */
+	0x742A,0x7433,0x7422,0x7425,0x7435,0x7436,0x7434,0x742F,/* 0x58-0x5F */
+	0x741B,0x7426,0x7428,0x7525,0x7526,0x756B,0x756A,0x75E2,/* 0x60-0x67 */
+	0x75DB,0x75E3,0x75D9,0x75D8,0x75DE,0x75E0,0x767B,0x767C,/* 0x68-0x6F */
+	0x7696,0x7693,0x76B4,0x76DC,0x774F,0x77ED,0x785D,0x786C,/* 0x70-0x77 */
+	0x786F,0x7A0D,0x7A08,0x7A0B,0x7A05,0x7A00,0x7A98,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7A97,0x7A96,0x7AE5,0x7AE3,0x7B49,0x7B56,0x7B46,/* 0xA0-0xA7 */
+	0x7B50,0x7B52,0x7B54,0x7B4D,0x7B4B,0x7B4F,0x7B51,0x7C9F,/* 0xA8-0xAF */
+	0x7CA5,0x7D5E,0x7D50,0x7D68,0x7D55,0x7D2B,0x7D6E,0x7D72,/* 0xB0-0xB7 */
+	0x7D61,0x7D66,0x7D62,0x7D70,0x7D73,0x5584,0x7FD4,0x7FD5,/* 0xB8-0xBF */
+	0x800B,0x8052,0x8085,0x8155,0x8154,0x814B,0x8151,0x814E,/* 0xC0-0xC7 */
+	0x8139,0x8146,0x813E,0x814C,0x8153,0x8174,0x8212,0x821C,/* 0xC8-0xCF */
+	0x83E9,0x8403,0x83F8,0x840D,0x83E0,0x83C5,0x840B,0x83C1,/* 0xD0-0xD7 */
+	0x83EF,0x83F1,0x83F4,0x8457,0x840A,0x83F0,0x840C,0x83CC,/* 0xD8-0xDF */
+	0x83FD,0x83F2,0x83CA,0x8438,0x840E,0x8404,0x83DC,0x8407,/* 0xE0-0xE7 */
+	0x83D4,0x83DF,0x865B,0x86DF,0x86D9,0x86ED,0x86D4,0x86DB,/* 0xE8-0xEF */
+	0x86E4,0x86D0,0x86DE,0x8857,0x88C1,0x88C2,0x88B1,0x8983,/* 0xF0-0xF7 */
+	0x8996,0x8A3B,0x8A60,0x8A55,0x8A5E,0x8A3C,0x8A41,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8A54,0x8A5B,0x8A50,0x8A46,0x8A34,0x8A3A,0x8A36,0x8A56,/* 0x40-0x47 */
+	0x8C61,0x8C82,0x8CAF,0x8CBC,0x8CB3,0x8CBD,0x8CC1,0x8CBB,/* 0x48-0x4F */
+	0x8CC0,0x8CB4,0x8CB7,0x8CB6,0x8CBF,0x8CB8,0x8D8A,0x8D85,/* 0x50-0x57 */
+	0x8D81,0x8DCE,0x8DDD,0x8DCB,0x8DDA,0x8DD1,0x8DCC,0x8DDB,/* 0x58-0x5F */
+	0x8DC6,0x8EFB,0x8EF8,0x8EFC,0x8F9C,0x902E,0x9035,0x9031,/* 0x60-0x67 */
+	0x9038,0x9032,0x9036,0x9102,0x90F5,0x9109,0x90FE,0x9163,/* 0x68-0x6F */
+	0x9165,0x91CF,0x9214,0x9215,0x9223,0x9209,0x921E,0x920D,/* 0x70-0x77 */
+	0x9210,0x9207,0x9211,0x9594,0x958F,0x958B,0x9591,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9593,0x9592,0x958E,0x968A,0x968E,0x968B,0x967D,/* 0xA0-0xA7 */
+	0x9685,0x9686,0x968D,0x9672,0x9684,0x96C1,0x96C5,0x96C4,/* 0xA8-0xAF */
+	0x96C6,0x96C7,0x96EF,0x96F2,0x97CC,0x9805,0x9806,0x9808,/* 0xB0-0xB7 */
+	0x98E7,0x98EA,0x98EF,0x98E9,0x98F2,0x98ED,0x99AE,0x99AD,/* 0xB8-0xBF */
+	0x9EC3,0x9ECD,0x9ED1,0x4E82,0x50AD,0x50B5,0x50B2,0x50B3,/* 0xC0-0xC7 */
+	0x50C5,0x50BE,0x50AC,0x50B7,0x50BB,0x50AF,0x50C7,0x527F,/* 0xC8-0xCF */
+	0x5277,0x527D,0x52DF,0x52E6,0x52E4,0x52E2,0x52E3,0x532F,/* 0xD0-0xD7 */
+	0x55DF,0x55E8,0x55D3,0x55E6,0x55CE,0x55DC,0x55C7,0x55D1,/* 0xD8-0xDF */
+	0x55E3,0x55E4,0x55EF,0x55DA,0x55E1,0x55C5,0x55C6,0x55E5,/* 0xE0-0xE7 */
+	0x55C9,0x5712,0x5713,0x585E,0x5851,0x5858,0x5857,0x585A,/* 0xE8-0xEF */
+	0x5854,0x586B,0x584C,0x586D,0x584A,0x5862,0x5852,0x584B,/* 0xF0-0xF7 */
+	0x5967,0x5AC1,0x5AC9,0x5ACC,0x5ABE,0x5ABD,0x5ABC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5AB3,0x5AC2,0x5AB2,0x5D69,0x5D6F,0x5E4C,0x5E79,0x5EC9,/* 0x40-0x47 */
+	0x5EC8,0x5F12,0x5F59,0x5FAC,0x5FAE,0x611A,0x610F,0x6148,/* 0x48-0x4F */
+	0x611F,0x60F3,0x611B,0x60F9,0x6101,0x6108,0x614E,0x614C,/* 0x50-0x57 */
+	0x6144,0x614D,0x613E,0x6134,0x6127,0x610D,0x6106,0x6137,/* 0x58-0x5F */
+	0x6221,0x6222,0x6413,0x643E,0x641E,0x642A,0x642D,0x643D,/* 0x60-0x67 */
+	0x642C,0x640F,0x641C,0x6414,0x640D,0x6436,0x6416,0x6417,/* 0x68-0x6F */
+	0x6406,0x656C,0x659F,0x65B0,0x6697,0x6689,0x6687,0x6688,/* 0x70-0x77 */
+	0x6696,0x6684,0x6698,0x668D,0x6703,0x6994,0x696D,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x695A,0x6977,0x6960,0x6954,0x6975,0x6930,0x6982,/* 0xA0-0xA7 */
+	0x694A,0x6968,0x696B,0x695E,0x6953,0x6979,0x6986,0x695D,/* 0xA8-0xAF */
+	0x6963,0x695B,0x6B47,0x6B72,0x6BC0,0x6BBF,0x6BD3,0x6BFD,/* 0xB0-0xB7 */
+	0x6EA2,0x6EAF,0x6ED3,0x6EB6,0x6EC2,0x6E90,0x6E9D,0x6EC7,/* 0xB8-0xBF */
+	0x6EC5,0x6EA5,0x6E98,0x6EBC,0x6EBA,0x6EAB,0x6ED1,0x6E96,/* 0xC0-0xC7 */
+	0x6E9C,0x6EC4,0x6ED4,0x6EAA,0x6EA7,0x6EB4,0x714E,0x7159,/* 0xC8-0xCF */
+	0x7169,0x7164,0x7149,0x7167,0x715C,0x716C,0x7166,0x714C,/* 0xD0-0xD7 */
+	0x7165,0x715E,0x7146,0x7168,0x7156,0x723A,0x7252,0x7337,/* 0xD8-0xDF */
+	0x7345,0x733F,0x733E,0x746F,0x745A,0x7455,0x745F,0x745E,/* 0xE0-0xE7 */
+	0x7441,0x743F,0x7459,0x745B,0x745C,0x7576,0x7578,0x7600,/* 0xE8-0xEF */
+	0x75F0,0x7601,0x75F2,0x75F1,0x75FA,0x75FF,0x75F4,0x75F3,/* 0xF0-0xF7 */
+	0x76DE,0x76DF,0x775B,0x776B,0x7766,0x775E,0x7763,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7779,0x776A,0x776C,0x775C,0x7765,0x7768,0x7762,0x77EE,/* 0x40-0x47 */
+	0x788E,0x78B0,0x7897,0x7898,0x788C,0x7889,0x787C,0x7891,/* 0x48-0x4F */
+	0x7893,0x787F,0x797A,0x797F,0x7981,0x842C,0x79BD,0x7A1C,/* 0x50-0x57 */
+	0x7A1A,0x7A20,0x7A14,0x7A1F,0x7A1E,0x7A9F,0x7AA0,0x7B77,/* 0x58-0x5F */
+	0x7BC0,0x7B60,0x7B6E,0x7B67,0x7CB1,0x7CB3,0x7CB5,0x7D93,/* 0x60-0x67 */
+	0x7D79,0x7D91,0x7D81,0x7D8F,0x7D5B,0x7F6E,0x7F69,0x7F6A,/* 0x68-0x6F */
+	0x7F72,0x7FA9,0x7FA8,0x7FA4,0x8056,0x8058,0x8086,0x8084,/* 0x70-0x77 */
+	0x8171,0x8170,0x8178,0x8165,0x816E,0x8173,0x816B,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8179,0x817A,0x8166,0x8205,0x8247,0x8482,0x8477,/* 0xA0-0xA7 */
+	0x843D,0x8431,0x8475,0x8466,0x846B,0x8449,0x846C,0x845B,/* 0xA8-0xAF */
+	0x843C,0x8435,0x8461,0x8463,0x8469,0x846D,0x8446,0x865E,/* 0xB0-0xB7 */
+	0x865C,0x865F,0x86F9,0x8713,0x8708,0x8707,0x8700,0x86FE,/* 0xB8-0xBF */
+	0x86FB,0x8702,0x8703,0x8706,0x870A,0x8859,0x88DF,0x88D4,/* 0xC0-0xC7 */
+	0x88D9,0x88DC,0x88D8,0x88DD,0x88E1,0x88CA,0x88D5,0x88D2,/* 0xC8-0xCF */
+	0x899C,0x89E3,0x8A6B,0x8A72,0x8A73,0x8A66,0x8A69,0x8A70,/* 0xD0-0xD7 */
+	0x8A87,0x8A7C,0x8A63,0x8AA0,0x8A71,0x8A85,0x8A6D,0x8A62,/* 0xD8-0xDF */
+	0x8A6E,0x8A6C,0x8A79,0x8A7B,0x8A3E,0x8A68,0x8C62,0x8C8A,/* 0xE0-0xE7 */
+	0x8C89,0x8CCA,0x8CC7,0x8CC8,0x8CC4,0x8CB2,0x8CC3,0x8CC2,/* 0xE8-0xEF */
+	0x8CC5,0x8DE1,0x8DDF,0x8DE8,0x8DEF,0x8DF3,0x8DFA,0x8DEA,/* 0xF0-0xF7 */
+	0x8DE4,0x8DE6,0x8EB2,0x8F03,0x8F09,0x8EFE,0x8F0A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_B9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8F9F,0x8FB2,0x904B,0x904A,0x9053,0x9042,0x9054,0x903C,/* 0x40-0x47 */
+	0x9055,0x9050,0x9047,0x904F,0x904E,0x904D,0x9051,0x903E,/* 0x48-0x4F */
+	0x9041,0x9112,0x9117,0x916C,0x916A,0x9169,0x91C9,0x9237,/* 0x50-0x57 */
+	0x9257,0x9238,0x923D,0x9240,0x923E,0x925B,0x924B,0x9264,/* 0x58-0x5F */
+	0x9251,0x9234,0x9249,0x924D,0x9245,0x9239,0x923F,0x925A,/* 0x60-0x67 */
+	0x9598,0x9698,0x9694,0x9695,0x96CD,0x96CB,0x96C9,0x96CA,/* 0x68-0x6F */
+	0x96F7,0x96FB,0x96F9,0x96F6,0x9756,0x9774,0x9776,0x9810,/* 0x70-0x77 */
+	0x9811,0x9813,0x980A,0x9812,0x980C,0x98FC,0x98F4,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x98FD,0x98FE,0x99B3,0x99B1,0x99B4,0x9AE1,0x9CE9,/* 0xA0-0xA7 */
+	0x9E82,0x9F0E,0x9F13,0x9F20,0x50E7,0x50EE,0x50E5,0x50D6,/* 0xA8-0xAF */
+	0x50ED,0x50DA,0x50D5,0x50CF,0x50D1,0x50F1,0x50CE,0x50E9,/* 0xB0-0xB7 */
+	0x5162,0x51F3,0x5283,0x5282,0x5331,0x53AD,0x55FE,0x5600,/* 0xB8-0xBF */
+	0x561B,0x5617,0x55FD,0x5614,0x5606,0x5609,0x560D,0x560E,/* 0xC0-0xC7 */
+	0x55F7,0x5616,0x561F,0x5608,0x5610,0x55F6,0x5718,0x5716,/* 0xC8-0xCF */
+	0x5875,0x587E,0x5883,0x5893,0x588A,0x5879,0x5885,0x587D,/* 0xD0-0xD7 */
+	0x58FD,0x5925,0x5922,0x5924,0x596A,0x5969,0x5AE1,0x5AE6,/* 0xD8-0xDF */
+	0x5AE9,0x5AD7,0x5AD6,0x5AD8,0x5AE3,0x5B75,0x5BDE,0x5BE7,/* 0xE0-0xE7 */
+	0x5BE1,0x5BE5,0x5BE6,0x5BE8,0x5BE2,0x5BE4,0x5BDF,0x5C0D,/* 0xE8-0xEF */
+	0x5C62,0x5D84,0x5D87,0x5E5B,0x5E63,0x5E55,0x5E57,0x5E54,/* 0xF0-0xF7 */
+	0x5ED3,0x5ED6,0x5F0A,0x5F46,0x5F70,0x5FB9,0x6147,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x613F,0x614B,0x6177,0x6162,0x6163,0x615F,0x615A,0x6158,/* 0x40-0x47 */
+	0x6175,0x622A,0x6487,0x6458,0x6454,0x64A4,0x6478,0x645F,/* 0x48-0x4F */
+	0x647A,0x6451,0x6467,0x6434,0x646D,0x647B,0x6572,0x65A1,/* 0x50-0x57 */
+	0x65D7,0x65D6,0x66A2,0x66A8,0x669D,0x699C,0x69A8,0x6995,/* 0x58-0x5F */
+	0x69C1,0x69AE,0x69D3,0x69CB,0x699B,0x69B7,0x69BB,0x69AB,/* 0x60-0x67 */
+	0x69B4,0x69D0,0x69CD,0x69AD,0x69CC,0x69A6,0x69C3,0x69A3,/* 0x68-0x6F */
+	0x6B49,0x6B4C,0x6C33,0x6F33,0x6F14,0x6EFE,0x6F13,0x6EF4,/* 0x70-0x77 */
+	0x6F29,0x6F3E,0x6F20,0x6F2C,0x6F0F,0x6F02,0x6F22,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6EFF,0x6EEF,0x6F06,0x6F31,0x6F38,0x6F32,0x6F23,/* 0xA0-0xA7 */
+	0x6F15,0x6F2B,0x6F2F,0x6F88,0x6F2A,0x6EEC,0x6F01,0x6EF2,/* 0xA8-0xAF */
+	0x6ECC,0x6EF7,0x7194,0x7199,0x717D,0x718A,0x7184,0x7192,/* 0xB0-0xB7 */
+	0x723E,0x7292,0x7296,0x7344,0x7350,0x7464,0x7463,0x746A,/* 0xB8-0xBF */
+	0x7470,0x746D,0x7504,0x7591,0x7627,0x760D,0x760B,0x7609,/* 0xC0-0xC7 */
+	0x7613,0x76E1,0x76E3,0x7784,0x777D,0x777F,0x7761,0x78C1,/* 0xC8-0xCF */
+	0x789F,0x78A7,0x78B3,0x78A9,0x78A3,0x798E,0x798F,0x798D,/* 0xD0-0xD7 */
+	0x7A2E,0x7A31,0x7AAA,0x7AA9,0x7AED,0x7AEF,0x7BA1,0x7B95,/* 0xD8-0xDF */
+	0x7B8B,0x7B75,0x7B97,0x7B9D,0x7B94,0x7B8F,0x7BB8,0x7B87,/* 0xE0-0xE7 */
+	0x7B84,0x7CB9,0x7CBD,0x7CBE,0x7DBB,0x7DB0,0x7D9C,0x7DBD,/* 0xE8-0xEF */
+	0x7DBE,0x7DA0,0x7DCA,0x7DB4,0x7DB2,0x7DB1,0x7DBA,0x7DA2,/* 0xF0-0xF7 */
+	0x7DBF,0x7DB5,0x7DB8,0x7DAD,0x7DD2,0x7DC7,0x7DAC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7F70,0x7FE0,0x7FE1,0x7FDF,0x805E,0x805A,0x8087,0x8150,/* 0x40-0x47 */
+	0x8180,0x818F,0x8188,0x818A,0x817F,0x8182,0x81E7,0x81FA,/* 0x48-0x4F */
+	0x8207,0x8214,0x821E,0x824B,0x84C9,0x84BF,0x84C6,0x84C4,/* 0x50-0x57 */
+	0x8499,0x849E,0x84B2,0x849C,0x84CB,0x84B8,0x84C0,0x84D3,/* 0x58-0x5F */
+	0x8490,0x84BC,0x84D1,0x84CA,0x873F,0x871C,0x873B,0x8722,/* 0x60-0x67 */
+	0x8725,0x8734,0x8718,0x8755,0x8737,0x8729,0x88F3,0x8902,/* 0x68-0x6F */
+	0x88F4,0x88F9,0x88F8,0x88FD,0x88E8,0x891A,0x88EF,0x8AA6,/* 0x70-0x77 */
+	0x8A8C,0x8A9E,0x8AA3,0x8A8D,0x8AA1,0x8A93,0x8AA4,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8AAA,0x8AA5,0x8AA8,0x8A98,0x8A91,0x8A9A,0x8AA7,/* 0xA0-0xA7 */
+	0x8C6A,0x8C8D,0x8C8C,0x8CD3,0x8CD1,0x8CD2,0x8D6B,0x8D99,/* 0xA8-0xAF */
+	0x8D95,0x8DFC,0x8F14,0x8F12,0x8F15,0x8F13,0x8FA3,0x9060,/* 0xB0-0xB7 */
+	0x9058,0x905C,0x9063,0x9059,0x905E,0x9062,0x905D,0x905B,/* 0xB8-0xBF */
+	0x9119,0x9118,0x911E,0x9175,0x9178,0x9177,0x9174,0x9278,/* 0xC0-0xC7 */
+	0x9280,0x9285,0x9298,0x9296,0x927B,0x9293,0x929C,0x92A8,/* 0xC8-0xCF */
+	0x927C,0x9291,0x95A1,0x95A8,0x95A9,0x95A3,0x95A5,0x95A4,/* 0xD0-0xD7 */
+	0x9699,0x969C,0x969B,0x96CC,0x96D2,0x9700,0x977C,0x9785,/* 0xD8-0xDF */
+	0x97F6,0x9817,0x9818,0x98AF,0x98B1,0x9903,0x9905,0x990C,/* 0xE0-0xE7 */
+	0x9909,0x99C1,0x9AAF,0x9AB0,0x9AE6,0x9B41,0x9B42,0x9CF4,/* 0xE8-0xEF */
+	0x9CF6,0x9CF3,0x9EBC,0x9F3B,0x9F4A,0x5104,0x5100,0x50FB,/* 0xF0-0xF7 */
+	0x50F5,0x50F9,0x5102,0x5108,0x5109,0x5105,0x51DC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5287,0x5288,0x5289,0x528D,0x528A,0x52F0,0x53B2,0x562E,/* 0x40-0x47 */
+	0x563B,0x5639,0x5632,0x563F,0x5634,0x5629,0x5653,0x564E,/* 0x48-0x4F */
+	0x5657,0x5674,0x5636,0x562F,0x5630,0x5880,0x589F,0x589E,/* 0x50-0x57 */
+	0x58B3,0x589C,0x58AE,0x58A9,0x58A6,0x596D,0x5B09,0x5AFB,/* 0x58-0x5F */
+	0x5B0B,0x5AF5,0x5B0C,0x5B08,0x5BEE,0x5BEC,0x5BE9,0x5BEB,/* 0x60-0x67 */
+	0x5C64,0x5C65,0x5D9D,0x5D94,0x5E62,0x5E5F,0x5E61,0x5EE2,/* 0x68-0x6F */
+	0x5EDA,0x5EDF,0x5EDD,0x5EE3,0x5EE0,0x5F48,0x5F71,0x5FB7,/* 0x70-0x77 */
+	0x5FB5,0x6176,0x6167,0x616E,0x615D,0x6155,0x6182,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x617C,0x6170,0x616B,0x617E,0x61A7,0x6190,0x61AB,/* 0xA0-0xA7 */
+	0x618E,0x61AC,0x619A,0x61A4,0x6194,0x61AE,0x622E,0x6469,/* 0xA8-0xAF */
+	0x646F,0x6479,0x649E,0x64B2,0x6488,0x6490,0x64B0,0x64A5,/* 0xB0-0xB7 */
+	0x6493,0x6495,0x64A9,0x6492,0x64AE,0x64AD,0x64AB,0x649A,/* 0xB8-0xBF */
+	0x64AC,0x6499,0x64A2,0x64B3,0x6575,0x6577,0x6578,0x66AE,/* 0xC0-0xC7 */
+	0x66AB,0x66B4,0x66B1,0x6A23,0x6A1F,0x69E8,0x6A01,0x6A1E,/* 0xC8-0xCF */
+	0x6A19,0x69FD,0x6A21,0x6A13,0x6A0A,0x69F3,0x6A02,0x6A05,/* 0xD0-0xD7 */
+	0x69ED,0x6A11,0x6B50,0x6B4E,0x6BA4,0x6BC5,0x6BC6,0x6F3F,/* 0xD8-0xDF */
+	0x6F7C,0x6F84,0x6F51,0x6F66,0x6F54,0x6F86,0x6F6D,0x6F5B,/* 0xE0-0xE7 */
+	0x6F78,0x6F6E,0x6F8E,0x6F7A,0x6F70,0x6F64,0x6F97,0x6F58,/* 0xE8-0xEF */
+	0x6ED5,0x6F6F,0x6F60,0x6F5F,0x719F,0x71AC,0x71B1,0x71A8,/* 0xF0-0xF7 */
+	0x7256,0x729B,0x734E,0x7357,0x7469,0x748B,0x7483,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x747E,0x7480,0x757F,0x7620,0x7629,0x761F,0x7624,0x7626,/* 0x40-0x47 */
+	0x7621,0x7622,0x769A,0x76BA,0x76E4,0x778E,0x7787,0x778C,/* 0x48-0x4F */
+	0x7791,0x778B,0x78CB,0x78C5,0x78BA,0x78CA,0x78BE,0x78D5,/* 0x50-0x57 */
+	0x78BC,0x78D0,0x7A3F,0x7A3C,0x7A40,0x7A3D,0x7A37,0x7A3B,/* 0x58-0x5F */
+	0x7AAF,0x7AAE,0x7BAD,0x7BB1,0x7BC4,0x7BB4,0x7BC6,0x7BC7,/* 0x60-0x67 */
+	0x7BC1,0x7BA0,0x7BCC,0x7CCA,0x7DE0,0x7DF4,0x7DEF,0x7DFB,/* 0x68-0x6F */
+	0x7DD8,0x7DEC,0x7DDD,0x7DE8,0x7DE3,0x7DDA,0x7DDE,0x7DE9,/* 0x70-0x77 */
+	0x7D9E,0x7DD9,0x7DF2,0x7DF9,0x7F75,0x7F77,0x7FAF,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7FE9,0x8026,0x819B,0x819C,0x819D,0x81A0,0x819A,/* 0xA0-0xA7 */
+	0x8198,0x8517,0x853D,0x851A,0x84EE,0x852C,0x852D,0x8513,/* 0xA8-0xAF */
+	0x8511,0x8523,0x8521,0x8514,0x84EC,0x8525,0x84FF,0x8506,/* 0xB0-0xB7 */
+	0x8782,0x8774,0x8776,0x8760,0x8766,0x8778,0x8768,0x8759,/* 0xB8-0xBF */
+	0x8757,0x874C,0x8753,0x885B,0x885D,0x8910,0x8907,0x8912,/* 0xC0-0xC7 */
+	0x8913,0x8915,0x890A,0x8ABC,0x8AD2,0x8AC7,0x8AC4,0x8A95,/* 0xC8-0xCF */
+	0x8ACB,0x8AF8,0x8AB2,0x8AC9,0x8AC2,0x8ABF,0x8AB0,0x8AD6,/* 0xD0-0xD7 */
+	0x8ACD,0x8AB6,0x8AB9,0x8ADB,0x8C4C,0x8C4E,0x8C6C,0x8CE0,/* 0xD8-0xDF */
+	0x8CDE,0x8CE6,0x8CE4,0x8CEC,0x8CED,0x8CE2,0x8CE3,0x8CDC,/* 0xE0-0xE7 */
+	0x8CEA,0x8CE1,0x8D6D,0x8D9F,0x8DA3,0x8E2B,0x8E10,0x8E1D,/* 0xE8-0xEF */
+	0x8E22,0x8E0F,0x8E29,0x8E1F,0x8E21,0x8E1E,0x8EBA,0x8F1D,/* 0xF0-0xF7 */
+	0x8F1B,0x8F1F,0x8F29,0x8F26,0x8F2A,0x8F1C,0x8F1E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8F25,0x9069,0x906E,0x9068,0x906D,0x9077,0x9130,0x912D,/* 0x40-0x47 */
+	0x9127,0x9131,0x9187,0x9189,0x918B,0x9183,0x92C5,0x92BB,/* 0x48-0x4F */
+	0x92B7,0x92EA,0x92AC,0x92E4,0x92C1,0x92B3,0x92BC,0x92D2,/* 0x50-0x57 */
+	0x92C7,0x92F0,0x92B2,0x95AD,0x95B1,0x9704,0x9706,0x9707,/* 0x58-0x5F */
+	0x9709,0x9760,0x978D,0x978B,0x978F,0x9821,0x982B,0x981C,/* 0x60-0x67 */
+	0x98B3,0x990A,0x9913,0x9912,0x9918,0x99DD,0x99D0,0x99DF,/* 0x68-0x6F */
+	0x99DB,0x99D1,0x99D5,0x99D2,0x99D9,0x9AB7,0x9AEE,0x9AEF,/* 0x70-0x77 */
+	0x9B27,0x9B45,0x9B44,0x9B77,0x9B6F,0x9D06,0x9D09,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9D03,0x9EA9,0x9EBE,0x9ECE,0x58A8,0x9F52,0x5112,/* 0xA0-0xA7 */
+	0x5118,0x5114,0x5110,0x5115,0x5180,0x51AA,0x51DD,0x5291,/* 0xA8-0xAF */
+	0x5293,0x52F3,0x5659,0x566B,0x5679,0x5669,0x5664,0x5678,/* 0xB0-0xB7 */
+	0x566A,0x5668,0x5665,0x5671,0x566F,0x566C,0x5662,0x5676,/* 0xB8-0xBF */
+	0x58C1,0x58BE,0x58C7,0x58C5,0x596E,0x5B1D,0x5B34,0x5B78,/* 0xC0-0xC7 */
+	0x5BF0,0x5C0E,0x5F4A,0x61B2,0x6191,0x61A9,0x618A,0x61CD,/* 0xC8-0xCF */
+	0x61B6,0x61BE,0x61CA,0x61C8,0x6230,0x64C5,0x64C1,0x64CB,/* 0xD0-0xD7 */
+	0x64BB,0x64BC,0x64DA,0x64C4,0x64C7,0x64C2,0x64CD,0x64BF,/* 0xD8-0xDF */
+	0x64D2,0x64D4,0x64BE,0x6574,0x66C6,0x66C9,0x66B9,0x66C4,/* 0xE0-0xE7 */
+	0x66C7,0x66B8,0x6A3D,0x6A38,0x6A3A,0x6A59,0x6A6B,0x6A58,/* 0xE8-0xEF */
+	0x6A39,0x6A44,0x6A62,0x6A61,0x6A4B,0x6A47,0x6A35,0x6A5F,/* 0xF0-0xF7 */
+	0x6A48,0x6B59,0x6B77,0x6C05,0x6FC2,0x6FB1,0x6FA1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_BF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6FC3,0x6FA4,0x6FC1,0x6FA7,0x6FB3,0x6FC0,0x6FB9,0x6FB6,/* 0x40-0x47 */
+	0x6FA6,0x6FA0,0x6FB4,0x71BE,0x71C9,0x71D0,0x71D2,0x71C8,/* 0x48-0x4F */
+	0x71D5,0x71B9,0x71CE,0x71D9,0x71DC,0x71C3,0x71C4,0x7368,/* 0x50-0x57 */
+	0x749C,0x74A3,0x7498,0x749F,0x749E,0x74E2,0x750C,0x750D,/* 0x58-0x5F */
+	0x7634,0x7638,0x763A,0x76E7,0x76E5,0x77A0,0x779E,0x779F,/* 0x60-0x67 */
+	0x77A5,0x78E8,0x78DA,0x78EC,0x78E7,0x79A6,0x7A4D,0x7A4E,/* 0x68-0x6F */
+	0x7A46,0x7A4C,0x7A4B,0x7ABA,0x7BD9,0x7C11,0x7BC9,0x7BE4,/* 0x70-0x77 */
+	0x7BDB,0x7BE1,0x7BE9,0x7BE6,0x7CD5,0x7CD6,0x7E0A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7E11,0x7E08,0x7E1B,0x7E23,0x7E1E,0x7E1D,0x7E09,/* 0xA0-0xA7 */
+	0x7E10,0x7F79,0x7FB2,0x7FF0,0x7FF1,0x7FEE,0x8028,0x81B3,/* 0xA8-0xAF */
+	0x81A9,0x81A8,0x81FB,0x8208,0x8258,0x8259,0x854A,0x8559,/* 0xB0-0xB7 */
+	0x8548,0x8568,0x8569,0x8543,0x8549,0x856D,0x856A,0x855E,/* 0xB8-0xBF */
+	0x8783,0x879F,0x879E,0x87A2,0x878D,0x8861,0x892A,0x8932,/* 0xC0-0xC7 */
+	0x8925,0x892B,0x8921,0x89AA,0x89A6,0x8AE6,0x8AFA,0x8AEB,/* 0xC8-0xCF */
+	0x8AF1,0x8B00,0x8ADC,0x8AE7,0x8AEE,0x8AFE,0x8B01,0x8B02,/* 0xD0-0xD7 */
+	0x8AF7,0x8AED,0x8AF3,0x8AF6,0x8AFC,0x8C6B,0x8C6D,0x8C93,/* 0xD8-0xDF */
+	0x8CF4,0x8E44,0x8E31,0x8E34,0x8E42,0x8E39,0x8E35,0x8F3B,/* 0xE0-0xE7 */
+	0x8F2F,0x8F38,0x8F33,0x8FA8,0x8FA6,0x9075,0x9074,0x9078,/* 0xE8-0xEF */
+	0x9072,0x907C,0x907A,0x9134,0x9192,0x9320,0x9336,0x92F8,/* 0xF0-0xF7 */
+	0x9333,0x932F,0x9322,0x92FC,0x932B,0x9304,0x931A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9310,0x9326,0x9321,0x9315,0x932E,0x9319,0x95BB,0x96A7,/* 0x40-0x47 */
+	0x96A8,0x96AA,0x96D5,0x970E,0x9711,0x9716,0x970D,0x9713,/* 0x48-0x4F */
+	0x970F,0x975B,0x975C,0x9766,0x9798,0x9830,0x9838,0x983B,/* 0x50-0x57 */
+	0x9837,0x982D,0x9839,0x9824,0x9910,0x9928,0x991E,0x991B,/* 0x58-0x5F */
+	0x9921,0x991A,0x99ED,0x99E2,0x99F1,0x9AB8,0x9ABC,0x9AFB,/* 0x60-0x67 */
+	0x9AED,0x9B28,0x9B91,0x9D15,0x9D23,0x9D26,0x9D28,0x9D12,/* 0x68-0x6F */
+	0x9D1B,0x9ED8,0x9ED4,0x9F8D,0x9F9C,0x512A,0x511F,0x5121,/* 0x70-0x77 */
+	0x5132,0x52F5,0x568E,0x5680,0x5690,0x5685,0x5687,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x568F,0x58D5,0x58D3,0x58D1,0x58CE,0x5B30,0x5B2A,/* 0xA0-0xA7 */
+	0x5B24,0x5B7A,0x5C37,0x5C68,0x5DBC,0x5DBA,0x5DBD,0x5DB8,/* 0xA8-0xAF */
+	0x5E6B,0x5F4C,0x5FBD,0x61C9,0x61C2,0x61C7,0x61E6,0x61CB,/* 0xB0-0xB7 */
+	0x6232,0x6234,0x64CE,0x64CA,0x64D8,0x64E0,0x64F0,0x64E6,/* 0xB8-0xBF */
+	0x64EC,0x64F1,0x64E2,0x64ED,0x6582,0x6583,0x66D9,0x66D6,/* 0xC0-0xC7 */
+	0x6A80,0x6A94,0x6A84,0x6AA2,0x6A9C,0x6ADB,0x6AA3,0x6A7E,/* 0xC8-0xCF */
+	0x6A97,0x6A90,0x6AA0,0x6B5C,0x6BAE,0x6BDA,0x6C08,0x6FD8,/* 0xD0-0xD7 */
+	0x6FF1,0x6FDF,0x6FE0,0x6FDB,0x6FE4,0x6FEB,0x6FEF,0x6F80,/* 0xD8-0xDF */
+	0x6FEC,0x6FE1,0x6FE9,0x6FD5,0x6FEE,0x6FF0,0x71E7,0x71DF,/* 0xE0-0xE7 */
+	0x71EE,0x71E6,0x71E5,0x71ED,0x71EC,0x71F4,0x71E0,0x7235,/* 0xE8-0xEF */
+	0x7246,0x7370,0x7372,0x74A9,0x74B0,0x74A6,0x74A8,0x7646,/* 0xF0-0xF7 */
+	0x7642,0x764C,0x76EA,0x77B3,0x77AA,0x77B0,0x77AC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x77A7,0x77AD,0x77EF,0x78F7,0x78FA,0x78F4,0x78EF,0x7901,/* 0x40-0x47 */
+	0x79A7,0x79AA,0x7A57,0x7ABF,0x7C07,0x7C0D,0x7BFE,0x7BF7,/* 0x48-0x4F */
+	0x7C0C,0x7BE0,0x7CE0,0x7CDC,0x7CDE,0x7CE2,0x7CDF,0x7CD9,/* 0x50-0x57 */
+	0x7CDD,0x7E2E,0x7E3E,0x7E46,0x7E37,0x7E32,0x7E43,0x7E2B,/* 0x58-0x5F */
+	0x7E3D,0x7E31,0x7E45,0x7E41,0x7E34,0x7E39,0x7E48,0x7E35,/* 0x60-0x67 */
+	0x7E3F,0x7E2F,0x7F44,0x7FF3,0x7FFC,0x8071,0x8072,0x8070,/* 0x68-0x6F */
+	0x806F,0x8073,0x81C6,0x81C3,0x81BA,0x81C2,0x81C0,0x81BF,/* 0x70-0x77 */
+	0x81BD,0x81C9,0x81BE,0x81E8,0x8209,0x8271,0x85AA,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8584,0x857E,0x859C,0x8591,0x8594,0x85AF,0x859B,/* 0xA0-0xA7 */
+	0x8587,0x85A8,0x858A,0x8667,0x87C0,0x87D1,0x87B3,0x87D2,/* 0xA8-0xAF */
+	0x87C6,0x87AB,0x87BB,0x87BA,0x87C8,0x87CB,0x893B,0x8936,/* 0xB0-0xB7 */
+	0x8944,0x8938,0x893D,0x89AC,0x8B0E,0x8B17,0x8B19,0x8B1B,/* 0xB8-0xBF */
+	0x8B0A,0x8B20,0x8B1D,0x8B04,0x8B10,0x8C41,0x8C3F,0x8C73,/* 0xC0-0xC7 */
+	0x8CFA,0x8CFD,0x8CFC,0x8CF8,0x8CFB,0x8DA8,0x8E49,0x8E4B,/* 0xC8-0xCF */
+	0x8E48,0x8E4A,0x8F44,0x8F3E,0x8F42,0x8F45,0x8F3F,0x907F,/* 0xD0-0xD7 */
+	0x907D,0x9084,0x9081,0x9082,0x9080,0x9139,0x91A3,0x919E,/* 0xD8-0xDF */
+	0x919C,0x934D,0x9382,0x9328,0x9375,0x934A,0x9365,0x934B,/* 0xE0-0xE7 */
+	0x9318,0x937E,0x936C,0x935B,0x9370,0x935A,0x9354,0x95CA,/* 0xE8-0xEF */
+	0x95CB,0x95CC,0x95C8,0x95C6,0x96B1,0x96B8,0x96D6,0x971C,/* 0xF0-0xF7 */
+	0x971E,0x97A0,0x97D3,0x9846,0x98B6,0x9935,0x9A01,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x99FF,0x9BAE,0x9BAB,0x9BAA,0x9BAD,0x9D3B,0x9D3F,0x9E8B,/* 0x40-0x47 */
+	0x9ECF,0x9EDE,0x9EDC,0x9EDD,0x9EDB,0x9F3E,0x9F4B,0x53E2,/* 0x48-0x4F */
+	0x5695,0x56AE,0x58D9,0x58D8,0x5B38,0x5F5D,0x61E3,0x6233,/* 0x50-0x57 */
+	0x64F4,0x64F2,0x64FE,0x6506,0x64FA,0x64FB,0x64F7,0x65B7,/* 0x58-0x5F */
+	0x66DC,0x6726,0x6AB3,0x6AAC,0x6AC3,0x6ABB,0x6AB8,0x6AC2,/* 0x60-0x67 */
+	0x6AAE,0x6AAF,0x6B5F,0x6B78,0x6BAF,0x7009,0x700B,0x6FFE,/* 0x68-0x6F */
+	0x7006,0x6FFA,0x7011,0x700F,0x71FB,0x71FC,0x71FE,0x71F8,/* 0x70-0x77 */
+	0x7377,0x7375,0x74A7,0x74BF,0x7515,0x7656,0x7658,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7652,0x77BD,0x77BF,0x77BB,0x77BC,0x790E,0x79AE,/* 0xA0-0xA7 */
+	0x7A61,0x7A62,0x7A60,0x7AC4,0x7AC5,0x7C2B,0x7C27,0x7C2A,/* 0xA8-0xAF */
+	0x7C1E,0x7C23,0x7C21,0x7CE7,0x7E54,0x7E55,0x7E5E,0x7E5A,/* 0xB0-0xB7 */
+	0x7E61,0x7E52,0x7E59,0x7F48,0x7FF9,0x7FFB,0x8077,0x8076,/* 0xB8-0xBF */
+	0x81CD,0x81CF,0x820A,0x85CF,0x85A9,0x85CD,0x85D0,0x85C9,/* 0xC0-0xC7 */
+	0x85B0,0x85BA,0x85B9,0x85A6,0x87EF,0x87EC,0x87F2,0x87E0,/* 0xC8-0xCF */
+	0x8986,0x89B2,0x89F4,0x8B28,0x8B39,0x8B2C,0x8B2B,0x8C50,/* 0xD0-0xD7 */
+	0x8D05,0x8E59,0x8E63,0x8E66,0x8E64,0x8E5F,0x8E55,0x8EC0,/* 0xD8-0xDF */
+	0x8F49,0x8F4D,0x9087,0x9083,0x9088,0x91AB,0x91AC,0x91D0,/* 0xE0-0xE7 */
+	0x9394,0x938A,0x9396,0x93A2,0x93B3,0x93AE,0x93AC,0x93B0,/* 0xE8-0xEF */
+	0x9398,0x939A,0x9397,0x95D4,0x95D6,0x95D0,0x95D5,0x96E2,/* 0xF0-0xF7 */
+	0x96DC,0x96D9,0x96DB,0x96DE,0x9724,0x97A3,0x97A6,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x97AD,0x97F9,0x984D,0x984F,0x984C,0x984E,0x9853,0x98BA,/* 0x40-0x47 */
+	0x993E,0x993F,0x993D,0x992E,0x99A5,0x9A0E,0x9AC1,0x9B03,/* 0x48-0x4F */
+	0x9B06,0x9B4F,0x9B4E,0x9B4D,0x9BCA,0x9BC9,0x9BFD,0x9BC8,/* 0x50-0x57 */
+	0x9BC0,0x9D51,0x9D5D,0x9D60,0x9EE0,0x9F15,0x9F2C,0x5133,/* 0x58-0x5F */
+	0x56A5,0x58DE,0x58DF,0x58E2,0x5BF5,0x9F90,0x5EEC,0x61F2,/* 0x60-0x67 */
+	0x61F7,0x61F6,0x61F5,0x6500,0x650F,0x66E0,0x66DD,0x6AE5,/* 0x68-0x6F */
+	0x6ADD,0x6ADA,0x6AD3,0x701B,0x701F,0x7028,0x701A,0x701D,/* 0x70-0x77 */
+	0x7015,0x7018,0x7206,0x720D,0x7258,0x72A2,0x7378,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x737A,0x74BD,0x74CA,0x74E3,0x7587,0x7586,0x765F,/* 0xA0-0xA7 */
+	0x7661,0x77C7,0x7919,0x79B1,0x7A6B,0x7A69,0x7C3E,0x7C3F,/* 0xA8-0xAF */
+	0x7C38,0x7C3D,0x7C37,0x7C40,0x7E6B,0x7E6D,0x7E79,0x7E69,/* 0xB0-0xB7 */
+	0x7E6A,0x7F85,0x7E73,0x7FB6,0x7FB9,0x7FB8,0x81D8,0x85E9,/* 0xB8-0xBF */
+	0x85DD,0x85EA,0x85D5,0x85E4,0x85E5,0x85F7,0x87FB,0x8805,/* 0xC0-0xC7 */
+	0x880D,0x87F9,0x87FE,0x8960,0x895F,0x8956,0x895E,0x8B41,/* 0xC8-0xCF */
+	0x8B5C,0x8B58,0x8B49,0x8B5A,0x8B4E,0x8B4F,0x8B46,0x8B59,/* 0xD0-0xD7 */
+	0x8D08,0x8D0A,0x8E7C,0x8E72,0x8E87,0x8E76,0x8E6C,0x8E7A,/* 0xD8-0xDF */
+	0x8E74,0x8F54,0x8F4E,0x8FAD,0x908A,0x908B,0x91B1,0x91AE,/* 0xE0-0xE7 */
+	0x93E1,0x93D1,0x93DF,0x93C3,0x93C8,0x93DC,0x93DD,0x93D6,/* 0xE8-0xEF */
+	0x93E2,0x93CD,0x93D8,0x93E4,0x93D7,0x93E8,0x95DC,0x96B4,/* 0xF0-0xF7 */
+	0x96E3,0x972A,0x9727,0x9761,0x97DC,0x97FB,0x985E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x9858,0x985B,0x98BC,0x9945,0x9949,0x9A16,0x9A19,0x9B0D,/* 0x40-0x47 */
+	0x9BE8,0x9BE7,0x9BD6,0x9BDB,0x9D89,0x9D61,0x9D72,0x9D6A,/* 0x48-0x4F */
+	0x9D6C,0x9E92,0x9E97,0x9E93,0x9EB4,0x52F8,0x56A8,0x56B7,/* 0x50-0x57 */
+	0x56B6,0x56B4,0x56BC,0x58E4,0x5B40,0x5B43,0x5B7D,0x5BF6,/* 0x58-0x5F */
+	0x5DC9,0x61F8,0x61FA,0x6518,0x6514,0x6519,0x66E6,0x6727,/* 0x60-0x67 */
+	0x6AEC,0x703E,0x7030,0x7032,0x7210,0x737B,0x74CF,0x7662,/* 0x68-0x6F */
+	0x7665,0x7926,0x792A,0x792C,0x792B,0x7AC7,0x7AF6,0x7C4C,/* 0x70-0x77 */
+	0x7C43,0x7C4D,0x7CEF,0x7CF0,0x8FAE,0x7E7D,0x7E7C,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7E82,0x7F4C,0x8000,0x81DA,0x8266,0x85FB,0x85F9,/* 0xA0-0xA7 */
+	0x8611,0x85FA,0x8606,0x860B,0x8607,0x860A,0x8814,0x8815,/* 0xA8-0xAF */
+	0x8964,0x89BA,0x89F8,0x8B70,0x8B6C,0x8B66,0x8B6F,0x8B5F,/* 0xB0-0xB7 */
+	0x8B6B,0x8D0F,0x8D0D,0x8E89,0x8E81,0x8E85,0x8E82,0x91B4,/* 0xB8-0xBF */
+	0x91CB,0x9418,0x9403,0x93FD,0x95E1,0x9730,0x98C4,0x9952,/* 0xC0-0xC7 */
+	0x9951,0x99A8,0x9A2B,0x9A30,0x9A37,0x9A35,0x9C13,0x9C0D,/* 0xC8-0xCF */
+	0x9E79,0x9EB5,0x9EE8,0x9F2F,0x9F5F,0x9F63,0x9F61,0x5137,/* 0xD0-0xD7 */
+	0x5138,0x56C1,0x56C0,0x56C2,0x5914,0x5C6C,0x5DCD,0x61FC,/* 0xD8-0xDF */
+	0x61FE,0x651D,0x651C,0x6595,0x66E9,0x6AFB,0x6B04,0x6AFA,/* 0xE0-0xE7 */
+	0x6BB2,0x704C,0x721B,0x72A7,0x74D6,0x74D4,0x7669,0x77D3,/* 0xE8-0xEF */
+	0x7C50,0x7E8F,0x7E8C,0x7FBC,0x8617,0x862D,0x861A,0x8823,/* 0xF0-0xF7 */
+	0x8822,0x8821,0x881F,0x896A,0x896C,0x89BD,0x8B74,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8B77,0x8B7D,0x8D13,0x8E8A,0x8E8D,0x8E8B,0x8F5F,0x8FAF,/* 0x40-0x47 */
+	0x91BA,0x942E,0x9433,0x9435,0x943A,0x9438,0x9432,0x942B,/* 0x48-0x4F */
+	0x95E2,0x9738,0x9739,0x9732,0x97FF,0x9867,0x9865,0x9957,/* 0x50-0x57 */
+	0x9A45,0x9A43,0x9A40,0x9A3E,0x9ACF,0x9B54,0x9B51,0x9C2D,/* 0x58-0x5F */
+	0x9C25,0x9DAF,0x9DB4,0x9DC2,0x9DB8,0x9E9D,0x9EEF,0x9F19,/* 0x60-0x67 */
+	0x9F5C,0x9F66,0x9F67,0x513C,0x513B,0x56C8,0x56CA,0x56C9,/* 0x68-0x6F */
+	0x5B7F,0x5DD4,0x5DD2,0x5F4E,0x61FF,0x6524,0x6B0A,0x6B61,/* 0x70-0x77 */
+	0x7051,0x7058,0x7380,0x74E4,0x758A,0x766E,0x766C,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x79B3,0x7C60,0x7C5F,0x807E,0x807D,0x81DF,0x8972,/* 0xA0-0xA7 */
+	0x896F,0x89FC,0x8B80,0x8D16,0x8D17,0x8E91,0x8E93,0x8F61,/* 0xA8-0xAF */
+	0x9148,0x9444,0x9451,0x9452,0x973D,0x973E,0x97C3,0x97C1,/* 0xB0-0xB7 */
+	0x986B,0x9955,0x9A55,0x9A4D,0x9AD2,0x9B1A,0x9C49,0x9C31,/* 0xB8-0xBF */
+	0x9C3E,0x9C3B,0x9DD3,0x9DD7,0x9F34,0x9F6C,0x9F6A,0x9F94,/* 0xC0-0xC7 */
+	0x56CC,0x5DD6,0x6200,0x6523,0x652B,0x652A,0x66EC,0x6B10,/* 0xC8-0xCF */
+	0x74DA,0x7ACA,0x7C64,0x7C63,0x7C65,0x7E93,0x7E96,0x7E94,/* 0xD0-0xD7 */
+	0x81E2,0x8638,0x863F,0x8831,0x8B8A,0x9090,0x908F,0x9463,/* 0xD8-0xDF */
+	0x9460,0x9464,0x9768,0x986F,0x995C,0x9A5A,0x9A5B,0x9A57,/* 0xE0-0xE7 */
+	0x9AD3,0x9AD4,0x9AD1,0x9C54,0x9C57,0x9C56,0x9DE5,0x9E9F,/* 0xE8-0xEF */
+	0x9EF4,0x56D1,0x58E9,0x652C,0x705E,0x7671,0x7672,0x77D7,/* 0xF0-0xF7 */
+	0x7F50,0x7F88,0x8836,0x8839,0x8862,0x8B93,0x8B92,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_C6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8B96,0x8277,0x8D1B,0x91C0,0x946A,0x9742,0x9748,0x9744,/* 0x40-0x47 */
+	0x97C6,0x9870,0x9A5F,0x9B22,0x9B58,0x9C5F,0x9DF9,0x9DFA,/* 0x48-0x4F */
+	0x9E7C,0x9E7D,0x9F07,0x9F77,0x9F72,0x5EF3,0x6B16,0x7063,/* 0x50-0x57 */
+	0x7C6C,0x7C6E,0x883B,0x89C0,0x8EA1,0x91C1,0x9472,0x9470,/* 0x58-0x5F */
+	0x9871,0x995E,0x9AD6,0x9B23,0x9ECC,0x7064,0x77DA,0x8B9A,/* 0x60-0x67 */
+	0x9477,0x97C9,0x9A62,0x9A65,0x7E9C,0x8B9C,0x8EAA,0x91C5,/* 0x68-0x6F */
+	0x947D,0x947E,0x947C,0x9C77,0x9C78,0x9EF7,0x8C54,0x947F,/* 0x70-0x77 */
+	0x9E1A,0x7228,0x9A6A,0x9B31,0x9E1B,0x9E1E,0x7C72,0x0000,/* 0x78-0x7F */
+};
+
+static wchar_t c2u_C9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x4E42,0x4E5C,0x51F5,0x531A,0x5382,0x4E07,0x4E0C,0x4E47,/* 0x40-0x47 */
+	0x4E8D,0x56D7,0xFA0C,0x5C6E,0x5F73,0x4E0F,0x5187,0x4E0E,/* 0x48-0x4F */
+	0x4E2E,0x4E93,0x4EC2,0x4EC9,0x4EC8,0x5198,0x52FC,0x536C,/* 0x50-0x57 */
+	0x53B9,0x5720,0x5903,0x592C,0x5C10,0x5DFF,0x65E1,0x6BB3,/* 0x58-0x5F */
+	0x6BCC,0x6C14,0x723F,0x4E31,0x4E3C,0x4EE8,0x4EDC,0x4EE9,/* 0x60-0x67 */
+	0x4EE1,0x4EDD,0x4EDA,0x520C,0x531C,0x534C,0x5722,0x5723,/* 0x68-0x6F */
+	0x5917,0x592F,0x5B81,0x5B84,0x5C12,0x5C3B,0x5C74,0x5C73,/* 0x70-0x77 */
+	0x5E04,0x5E80,0x5E82,0x5FC9,0x6209,0x6250,0x6C15,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6C36,0x6C43,0x6C3F,0x6C3B,0x72AE,0x72B0,0x738A,/* 0xA0-0xA7 */
+	0x79B8,0x808A,0x961E,0x4F0E,0x4F18,0x4F2C,0x4EF5,0x4F14,/* 0xA8-0xAF */
+	0x4EF1,0x4F00,0x4EF7,0x4F08,0x4F1D,0x4F02,0x4F05,0x4F22,/* 0xB0-0xB7 */
+	0x4F13,0x4F04,0x4EF4,0x4F12,0x51B1,0x5213,0x5209,0x5210,/* 0xB8-0xBF */
+	0x52A6,0x5322,0x531F,0x534D,0x538A,0x5407,0x56E1,0x56DF,/* 0xC0-0xC7 */
+	0x572E,0x572A,0x5734,0x593C,0x5980,0x597C,0x5985,0x597B,/* 0xC8-0xCF */
+	0x597E,0x5977,0x597F,0x5B56,0x5C15,0x5C25,0x5C7C,0x5C7A,/* 0xD0-0xD7 */
+	0x5C7B,0x5C7E,0x5DDF,0x5E75,0x5E84,0x5F02,0x5F1A,0x5F74,/* 0xD8-0xDF */
+	0x5FD5,0x5FD4,0x5FCF,0x625C,0x625E,0x6264,0x6261,0x6266,/* 0xE0-0xE7 */
+	0x6262,0x6259,0x6260,0x625A,0x6265,0x65EF,0x65EE,0x673E,/* 0xE8-0xEF */
+	0x6739,0x6738,0x673B,0x673A,0x673F,0x673C,0x6733,0x6C18,/* 0xF0-0xF7 */
+	0x6C46,0x6C52,0x6C5C,0x6C4F,0x6C4A,0x6C54,0x6C4B,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6C4C,0x7071,0x725E,0x72B4,0x72B5,0x738E,0x752A,0x767F,/* 0x40-0x47 */
+	0x7A75,0x7F51,0x8278,0x827C,0x8280,0x827D,0x827F,0x864D,/* 0x48-0x4F */
+	0x897E,0x9099,0x9097,0x9098,0x909B,0x9094,0x9622,0x9624,/* 0x50-0x57 */
+	0x9620,0x9623,0x4F56,0x4F3B,0x4F62,0x4F49,0x4F53,0x4F64,/* 0x58-0x5F */
+	0x4F3E,0x4F67,0x4F52,0x4F5F,0x4F41,0x4F58,0x4F2D,0x4F33,/* 0x60-0x67 */
+	0x4F3F,0x4F61,0x518F,0x51B9,0x521C,0x521E,0x5221,0x52AD,/* 0x68-0x6F */
+	0x52AE,0x5309,0x5363,0x5372,0x538E,0x538F,0x5430,0x5437,/* 0x70-0x77 */
+	0x542A,0x5454,0x5445,0x5419,0x541C,0x5425,0x5418,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x543D,0x544F,0x5441,0x5428,0x5424,0x5447,0x56EE,/* 0xA0-0xA7 */
+	0x56E7,0x56E5,0x5741,0x5745,0x574C,0x5749,0x574B,0x5752,/* 0xA8-0xAF */
+	0x5906,0x5940,0x59A6,0x5998,0x59A0,0x5997,0x598E,0x59A2,/* 0xB0-0xB7 */
+	0x5990,0x598F,0x59A7,0x59A1,0x5B8E,0x5B92,0x5C28,0x5C2A,/* 0xB8-0xBF */
+	0x5C8D,0x5C8F,0x5C88,0x5C8B,0x5C89,0x5C92,0x5C8A,0x5C86,/* 0xC0-0xC7 */
+	0x5C93,0x5C95,0x5DE0,0x5E0A,0x5E0E,0x5E8B,0x5E89,0x5E8C,/* 0xC8-0xCF */
+	0x5E88,0x5E8D,0x5F05,0x5F1D,0x5F78,0x5F76,0x5FD2,0x5FD1,/* 0xD0-0xD7 */
+	0x5FD0,0x5FED,0x5FE8,0x5FEE,0x5FF3,0x5FE1,0x5FE4,0x5FE3,/* 0xD8-0xDF */
+	0x5FFA,0x5FEF,0x5FF7,0x5FFB,0x6000,0x5FF4,0x623A,0x6283,/* 0xE0-0xE7 */
+	0x628C,0x628E,0x628F,0x6294,0x6287,0x6271,0x627B,0x627A,/* 0xE8-0xEF */
+	0x6270,0x6281,0x6288,0x6277,0x627D,0x6272,0x6274,0x6537,/* 0xF0-0xF7 */
+	0x65F0,0x65F4,0x65F3,0x65F2,0x65F5,0x6745,0x6747,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6759,0x6755,0x674C,0x6748,0x675D,0x674D,0x675A,0x674B,/* 0x40-0x47 */
+	0x6BD0,0x6C19,0x6C1A,0x6C78,0x6C67,0x6C6B,0x6C84,0x6C8B,/* 0x48-0x4F */
+	0x6C8F,0x6C71,0x6C6F,0x6C69,0x6C9A,0x6C6D,0x6C87,0x6C95,/* 0x50-0x57 */
+	0x6C9C,0x6C66,0x6C73,0x6C65,0x6C7B,0x6C8E,0x7074,0x707A,/* 0x58-0x5F */
+	0x7263,0x72BF,0x72BD,0x72C3,0x72C6,0x72C1,0x72BA,0x72C5,/* 0x60-0x67 */
+	0x7395,0x7397,0x7393,0x7394,0x7392,0x753A,0x7539,0x7594,/* 0x68-0x6F */
+	0x7595,0x7681,0x793D,0x8034,0x8095,0x8099,0x8090,0x8092,/* 0x70-0x77 */
+	0x809C,0x8290,0x828F,0x8285,0x828E,0x8291,0x8293,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x828A,0x8283,0x8284,0x8C78,0x8FC9,0x8FBF,0x909F,/* 0xA0-0xA7 */
+	0x90A1,0x90A5,0x909E,0x90A7,0x90A0,0x9630,0x9628,0x962F,/* 0xA8-0xAF */
+	0x962D,0x4E33,0x4F98,0x4F7C,0x4F85,0x4F7D,0x4F80,0x4F87,/* 0xB0-0xB7 */
+	0x4F76,0x4F74,0x4F89,0x4F84,0x4F77,0x4F4C,0x4F97,0x4F6A,/* 0xB8-0xBF */
+	0x4F9A,0x4F79,0x4F81,0x4F78,0x4F90,0x4F9C,0x4F94,0x4F9E,/* 0xC0-0xC7 */
+	0x4F92,0x4F82,0x4F95,0x4F6B,0x4F6E,0x519E,0x51BC,0x51BE,/* 0xC8-0xCF */
+	0x5235,0x5232,0x5233,0x5246,0x5231,0x52BC,0x530A,0x530B,/* 0xD0-0xD7 */
+	0x533C,0x5392,0x5394,0x5487,0x547F,0x5481,0x5491,0x5482,/* 0xD8-0xDF */
+	0x5488,0x546B,0x547A,0x547E,0x5465,0x546C,0x5474,0x5466,/* 0xE0-0xE7 */
+	0x548D,0x546F,0x5461,0x5460,0x5498,0x5463,0x5467,0x5464,/* 0xE8-0xEF */
+	0x56F7,0x56F9,0x576F,0x5772,0x576D,0x576B,0x5771,0x5770,/* 0xF0-0xF7 */
+	0x5776,0x5780,0x5775,0x577B,0x5773,0x5774,0x5762,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5768,0x577D,0x590C,0x5945,0x59B5,0x59BA,0x59CF,0x59CE,/* 0x40-0x47 */
+	0x59B2,0x59CC,0x59C1,0x59B6,0x59BC,0x59C3,0x59D6,0x59B1,/* 0x48-0x4F */
+	0x59BD,0x59C0,0x59C8,0x59B4,0x59C7,0x5B62,0x5B65,0x5B93,/* 0x50-0x57 */
+	0x5B95,0x5C44,0x5C47,0x5CAE,0x5CA4,0x5CA0,0x5CB5,0x5CAF,/* 0x58-0x5F */
+	0x5CA8,0x5CAC,0x5C9F,0x5CA3,0x5CAD,0x5CA2,0x5CAA,0x5CA7,/* 0x60-0x67 */
+	0x5C9D,0x5CA5,0x5CB6,0x5CB0,0x5CA6,0x5E17,0x5E14,0x5E19,/* 0x68-0x6F */
+	0x5F28,0x5F22,0x5F23,0x5F24,0x5F54,0x5F82,0x5F7E,0x5F7D,/* 0x70-0x77 */
+	0x5FDE,0x5FE5,0x602D,0x6026,0x6019,0x6032,0x600B,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6034,0x600A,0x6017,0x6033,0x601A,0x601E,0x602C,/* 0xA0-0xA7 */
+	0x6022,0x600D,0x6010,0x602E,0x6013,0x6011,0x600C,0x6009,/* 0xA8-0xAF */
+	0x601C,0x6214,0x623D,0x62AD,0x62B4,0x62D1,0x62BE,0x62AA,/* 0xB0-0xB7 */
+	0x62B6,0x62CA,0x62AE,0x62B3,0x62AF,0x62BB,0x62A9,0x62B0,/* 0xB8-0xBF */
+	0x62B8,0x653D,0x65A8,0x65BB,0x6609,0x65FC,0x6604,0x6612,/* 0xC0-0xC7 */
+	0x6608,0x65FB,0x6603,0x660B,0x660D,0x6605,0x65FD,0x6611,/* 0xC8-0xCF */
+	0x6610,0x66F6,0x670A,0x6785,0x676C,0x678E,0x6792,0x6776,/* 0xD0-0xD7 */
+	0x677B,0x6798,0x6786,0x6784,0x6774,0x678D,0x678C,0x677A,/* 0xD8-0xDF */
+	0x679F,0x6791,0x6799,0x6783,0x677D,0x6781,0x6778,0x6779,/* 0xE0-0xE7 */
+	0x6794,0x6B25,0x6B80,0x6B7E,0x6BDE,0x6C1D,0x6C93,0x6CEC,/* 0xE8-0xEF */
+	0x6CEB,0x6CEE,0x6CD9,0x6CB6,0x6CD4,0x6CAD,0x6CE7,0x6CB7,/* 0xF0-0xF7 */
+	0x6CD0,0x6CC2,0x6CBA,0x6CC3,0x6CC6,0x6CED,0x6CF2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6CD2,0x6CDD,0x6CB4,0x6C8A,0x6C9D,0x6C80,0x6CDE,0x6CC0,/* 0x40-0x47 */
+	0x6D30,0x6CCD,0x6CC7,0x6CB0,0x6CF9,0x6CCF,0x6CE9,0x6CD1,/* 0x48-0x4F */
+	0x7094,0x7098,0x7085,0x7093,0x7086,0x7084,0x7091,0x7096,/* 0x50-0x57 */
+	0x7082,0x709A,0x7083,0x726A,0x72D6,0x72CB,0x72D8,0x72C9,/* 0x58-0x5F */
+	0x72DC,0x72D2,0x72D4,0x72DA,0x72CC,0x72D1,0x73A4,0x73A1,/* 0x60-0x67 */
+	0x73AD,0x73A6,0x73A2,0x73A0,0x73AC,0x739D,0x74DD,0x74E8,/* 0x68-0x6F */
+	0x753F,0x7540,0x753E,0x758C,0x7598,0x76AF,0x76F3,0x76F1,/* 0x70-0x77 */
+	0x76F0,0x76F5,0x77F8,0x77FC,0x77F9,0x77FB,0x77FA,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x77F7,0x7942,0x793F,0x79C5,0x7A78,0x7A7B,0x7AFB,/* 0xA0-0xA7 */
+	0x7C75,0x7CFD,0x8035,0x808F,0x80AE,0x80A3,0x80B8,0x80B5,/* 0xA8-0xAF */
+	0x80AD,0x8220,0x82A0,0x82C0,0x82AB,0x829A,0x8298,0x829B,/* 0xB0-0xB7 */
+	0x82B5,0x82A7,0x82AE,0x82BC,0x829E,0x82BA,0x82B4,0x82A8,/* 0xB8-0xBF */
+	0x82A1,0x82A9,0x82C2,0x82A4,0x82C3,0x82B6,0x82A2,0x8670,/* 0xC0-0xC7 */
+	0x866F,0x866D,0x866E,0x8C56,0x8FD2,0x8FCB,0x8FD3,0x8FCD,/* 0xC8-0xCF */
+	0x8FD6,0x8FD5,0x8FD7,0x90B2,0x90B4,0x90AF,0x90B3,0x90B0,/* 0xD0-0xD7 */
+	0x9639,0x963D,0x963C,0x963A,0x9643,0x4FCD,0x4FC5,0x4FD3,/* 0xD8-0xDF */
+	0x4FB2,0x4FC9,0x4FCB,0x4FC1,0x4FD4,0x4FDC,0x4FD9,0x4FBB,/* 0xE0-0xE7 */
+	0x4FB3,0x4FDB,0x4FC7,0x4FD6,0x4FBA,0x4FC0,0x4FB9,0x4FEC,/* 0xE8-0xEF */
+	0x5244,0x5249,0x52C0,0x52C2,0x533D,0x537C,0x5397,0x5396,/* 0xF0-0xF7 */
+	0x5399,0x5398,0x54BA,0x54A1,0x54AD,0x54A5,0x54CF,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x54C3,0x830D,0x54B7,0x54AE,0x54D6,0x54B6,0x54C5,0x54C6,/* 0x40-0x47 */
+	0x54A0,0x5470,0x54BC,0x54A2,0x54BE,0x5472,0x54DE,0x54B0,/* 0x48-0x4F */
+	0x57B5,0x579E,0x579F,0x57A4,0x578C,0x5797,0x579D,0x579B,/* 0x50-0x57 */
+	0x5794,0x5798,0x578F,0x5799,0x57A5,0x579A,0x5795,0x58F4,/* 0x58-0x5F */
+	0x590D,0x5953,0x59E1,0x59DE,0x59EE,0x5A00,0x59F1,0x59DD,/* 0x60-0x67 */
+	0x59FA,0x59FD,0x59FC,0x59F6,0x59E4,0x59F2,0x59F7,0x59DB,/* 0x68-0x6F */
+	0x59E9,0x59F3,0x59F5,0x59E0,0x59FE,0x59F4,0x59ED,0x5BA8,/* 0x70-0x77 */
+	0x5C4C,0x5CD0,0x5CD8,0x5CCC,0x5CD7,0x5CCB,0x5CDB,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5CDE,0x5CDA,0x5CC9,0x5CC7,0x5CCA,0x5CD6,0x5CD3,/* 0xA0-0xA7 */
+	0x5CD4,0x5CCF,0x5CC8,0x5CC6,0x5CCE,0x5CDF,0x5CF8,0x5DF9,/* 0xA8-0xAF */
+	0x5E21,0x5E22,0x5E23,0x5E20,0x5E24,0x5EB0,0x5EA4,0x5EA2,/* 0xB0-0xB7 */
+	0x5E9B,0x5EA3,0x5EA5,0x5F07,0x5F2E,0x5F56,0x5F86,0x6037,/* 0xB8-0xBF */
+	0x6039,0x6054,0x6072,0x605E,0x6045,0x6053,0x6047,0x6049,/* 0xC0-0xC7 */
+	0x605B,0x604C,0x6040,0x6042,0x605F,0x6024,0x6044,0x6058,/* 0xC8-0xCF */
+	0x6066,0x606E,0x6242,0x6243,0x62CF,0x630D,0x630B,0x62F5,/* 0xD0-0xD7 */
+	0x630E,0x6303,0x62EB,0x62F9,0x630F,0x630C,0x62F8,0x62F6,/* 0xD8-0xDF */
+	0x6300,0x6313,0x6314,0x62FA,0x6315,0x62FB,0x62F0,0x6541,/* 0xE0-0xE7 */
+	0x6543,0x65AA,0x65BF,0x6636,0x6621,0x6632,0x6635,0x661C,/* 0xE8-0xEF */
+	0x6626,0x6622,0x6633,0x662B,0x663A,0x661D,0x6634,0x6639,/* 0xF0-0xF7 */
+	0x662E,0x670F,0x6710,0x67C1,0x67F2,0x67C8,0x67BA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_CF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x67DC,0x67BB,0x67F8,0x67D8,0x67C0,0x67B7,0x67C5,0x67EB,/* 0x40-0x47 */
+	0x67E4,0x67DF,0x67B5,0x67CD,0x67B3,0x67F7,0x67F6,0x67EE,/* 0x48-0x4F */
+	0x67E3,0x67C2,0x67B9,0x67CE,0x67E7,0x67F0,0x67B2,0x67FC,/* 0x50-0x57 */
+	0x67C6,0x67ED,0x67CC,0x67AE,0x67E6,0x67DB,0x67FA,0x67C9,/* 0x58-0x5F */
+	0x67CA,0x67C3,0x67EA,0x67CB,0x6B28,0x6B82,0x6B84,0x6BB6,/* 0x60-0x67 */
+	0x6BD6,0x6BD8,0x6BE0,0x6C20,0x6C21,0x6D28,0x6D34,0x6D2D,/* 0x68-0x6F */
+	0x6D1F,0x6D3C,0x6D3F,0x6D12,0x6D0A,0x6CDA,0x6D33,0x6D04,/* 0x70-0x77 */
+	0x6D19,0x6D3A,0x6D1A,0x6D11,0x6D00,0x6D1D,0x6D42,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6D01,0x6D18,0x6D37,0x6D03,0x6D0F,0x6D40,0x6D07,/* 0xA0-0xA7 */
+	0x6D20,0x6D2C,0x6D08,0x6D22,0x6D09,0x6D10,0x70B7,0x709F,/* 0xA8-0xAF */
+	0x70BE,0x70B1,0x70B0,0x70A1,0x70B4,0x70B5,0x70A9,0x7241,/* 0xB0-0xB7 */
+	0x7249,0x724A,0x726C,0x7270,0x7273,0x726E,0x72CA,0x72E4,/* 0xB8-0xBF */
+	0x72E8,0x72EB,0x72DF,0x72EA,0x72E6,0x72E3,0x7385,0x73CC,/* 0xC0-0xC7 */
+	0x73C2,0x73C8,0x73C5,0x73B9,0x73B6,0x73B5,0x73B4,0x73EB,/* 0xC8-0xCF */
+	0x73BF,0x73C7,0x73BE,0x73C3,0x73C6,0x73B8,0x73CB,0x74EC,/* 0xD0-0xD7 */
+	0x74EE,0x752E,0x7547,0x7548,0x75A7,0x75AA,0x7679,0x76C4,/* 0xD8-0xDF */
+	0x7708,0x7703,0x7704,0x7705,0x770A,0x76F7,0x76FB,0x76FA,/* 0xE0-0xE7 */
+	0x77E7,0x77E8,0x7806,0x7811,0x7812,0x7805,0x7810,0x780F,/* 0xE8-0xEF */
+	0x780E,0x7809,0x7803,0x7813,0x794A,0x794C,0x794B,0x7945,/* 0xF0-0xF7 */
+	0x7944,0x79D5,0x79CD,0x79CF,0x79D6,0x79CE,0x7A80,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7A7E,0x7AD1,0x7B00,0x7B01,0x7C7A,0x7C78,0x7C79,0x7C7F,/* 0x40-0x47 */
+	0x7C80,0x7C81,0x7D03,0x7D08,0x7D01,0x7F58,0x7F91,0x7F8D,/* 0x48-0x4F */
+	0x7FBE,0x8007,0x800E,0x800F,0x8014,0x8037,0x80D8,0x80C7,/* 0x50-0x57 */
+	0x80E0,0x80D1,0x80C8,0x80C2,0x80D0,0x80C5,0x80E3,0x80D9,/* 0x58-0x5F */
+	0x80DC,0x80CA,0x80D5,0x80C9,0x80CF,0x80D7,0x80E6,0x80CD,/* 0x60-0x67 */
+	0x81FF,0x8221,0x8294,0x82D9,0x82FE,0x82F9,0x8307,0x82E8,/* 0x68-0x6F */
+	0x8300,0x82D5,0x833A,0x82EB,0x82D6,0x82F4,0x82EC,0x82E1,/* 0x70-0x77 */
+	0x82F2,0x82F5,0x830C,0x82FB,0x82F6,0x82F0,0x82EA,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x82E4,0x82E0,0x82FA,0x82F3,0x82ED,0x8677,0x8674,/* 0xA0-0xA7 */
+	0x867C,0x8673,0x8841,0x884E,0x8867,0x886A,0x8869,0x89D3,/* 0xA8-0xAF */
+	0x8A04,0x8A07,0x8D72,0x8FE3,0x8FE1,0x8FEE,0x8FE0,0x90F1,/* 0xB0-0xB7 */
+	0x90BD,0x90BF,0x90D5,0x90C5,0x90BE,0x90C7,0x90CB,0x90C8,/* 0xB8-0xBF */
+	0x91D4,0x91D3,0x9654,0x964F,0x9651,0x9653,0x964A,0x964E,/* 0xC0-0xC7 */
+	0x501E,0x5005,0x5007,0x5013,0x5022,0x5030,0x501B,0x4FF5,/* 0xC8-0xCF */
+	0x4FF4,0x5033,0x5037,0x502C,0x4FF6,0x4FF7,0x5017,0x501C,/* 0xD0-0xD7 */
+	0x5020,0x5027,0x5035,0x502F,0x5031,0x500E,0x515A,0x5194,/* 0xD8-0xDF */
+	0x5193,0x51CA,0x51C4,0x51C5,0x51C8,0x51CE,0x5261,0x525A,/* 0xE0-0xE7 */
+	0x5252,0x525E,0x525F,0x5255,0x5262,0x52CD,0x530E,0x539E,/* 0xE8-0xEF */
+	0x5526,0x54E2,0x5517,0x5512,0x54E7,0x54F3,0x54E4,0x551A,/* 0xF0-0xF7 */
+	0x54FF,0x5504,0x5508,0x54EB,0x5511,0x5505,0x54F1,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x550A,0x54FB,0x54F7,0x54F8,0x54E0,0x550E,0x5503,0x550B,/* 0x40-0x47 */
+	0x5701,0x5702,0x57CC,0x5832,0x57D5,0x57D2,0x57BA,0x57C6,/* 0x48-0x4F */
+	0x57BD,0x57BC,0x57B8,0x57B6,0x57BF,0x57C7,0x57D0,0x57B9,/* 0x50-0x57 */
+	0x57C1,0x590E,0x594A,0x5A19,0x5A16,0x5A2D,0x5A2E,0x5A15,/* 0x58-0x5F */
+	0x5A0F,0x5A17,0x5A0A,0x5A1E,0x5A33,0x5B6C,0x5BA7,0x5BAD,/* 0x60-0x67 */
+	0x5BAC,0x5C03,0x5C56,0x5C54,0x5CEC,0x5CFF,0x5CEE,0x5CF1,/* 0x68-0x6F */
+	0x5CF7,0x5D00,0x5CF9,0x5E29,0x5E28,0x5EA8,0x5EAE,0x5EAA,/* 0x70-0x77 */
+	0x5EAC,0x5F33,0x5F30,0x5F67,0x605D,0x605A,0x6067,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6041,0x60A2,0x6088,0x6080,0x6092,0x6081,0x609D,/* 0xA0-0xA7 */
+	0x6083,0x6095,0x609B,0x6097,0x6087,0x609C,0x608E,0x6219,/* 0xA8-0xAF */
+	0x6246,0x62F2,0x6310,0x6356,0x632C,0x6344,0x6345,0x6336,/* 0xB0-0xB7 */
+	0x6343,0x63E4,0x6339,0x634B,0x634A,0x633C,0x6329,0x6341,/* 0xB8-0xBF */
+	0x6334,0x6358,0x6354,0x6359,0x632D,0x6347,0x6333,0x635A,/* 0xC0-0xC7 */
+	0x6351,0x6338,0x6357,0x6340,0x6348,0x654A,0x6546,0x65C6,/* 0xC8-0xCF */
+	0x65C3,0x65C4,0x65C2,0x664A,0x665F,0x6647,0x6651,0x6712,/* 0xD0-0xD7 */
+	0x6713,0x681F,0x681A,0x6849,0x6832,0x6833,0x683B,0x684B,/* 0xD8-0xDF */
+	0x684F,0x6816,0x6831,0x681C,0x6835,0x682B,0x682D,0x682F,/* 0xE0-0xE7 */
+	0x684E,0x6844,0x6834,0x681D,0x6812,0x6814,0x6826,0x6828,/* 0xE8-0xEF */
+	0x682E,0x684D,0x683A,0x6825,0x6820,0x6B2C,0x6B2F,0x6B2D,/* 0xF0-0xF7 */
+	0x6B31,0x6B34,0x6B6D,0x8082,0x6B88,0x6BE6,0x6BE4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6BE8,0x6BE3,0x6BE2,0x6BE7,0x6C25,0x6D7A,0x6D63,0x6D64,/* 0x40-0x47 */
+	0x6D76,0x6D0D,0x6D61,0x6D92,0x6D58,0x6D62,0x6D6D,0x6D6F,/* 0x48-0x4F */
+	0x6D91,0x6D8D,0x6DEF,0x6D7F,0x6D86,0x6D5E,0x6D67,0x6D60,/* 0x50-0x57 */
+	0x6D97,0x6D70,0x6D7C,0x6D5F,0x6D82,0x6D98,0x6D2F,0x6D68,/* 0x58-0x5F */
+	0x6D8B,0x6D7E,0x6D80,0x6D84,0x6D16,0x6D83,0x6D7B,0x6D7D,/* 0x60-0x67 */
+	0x6D75,0x6D90,0x70DC,0x70D3,0x70D1,0x70DD,0x70CB,0x7F39,/* 0x68-0x6F */
+	0x70E2,0x70D7,0x70D2,0x70DE,0x70E0,0x70D4,0x70CD,0x70C5,/* 0x70-0x77 */
+	0x70C6,0x70C7,0x70DA,0x70CE,0x70E1,0x7242,0x7278,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7277,0x7276,0x7300,0x72FA,0x72F4,0x72FE,0x72F6,/* 0xA0-0xA7 */
+	0x72F3,0x72FB,0x7301,0x73D3,0x73D9,0x73E5,0x73D6,0x73BC,/* 0xA8-0xAF */
+	0x73E7,0x73E3,0x73E9,0x73DC,0x73D2,0x73DB,0x73D4,0x73DD,/* 0xB0-0xB7 */
+	0x73DA,0x73D7,0x73D8,0x73E8,0x74DE,0x74DF,0x74F4,0x74F5,/* 0xB8-0xBF */
+	0x7521,0x755B,0x755F,0x75B0,0x75C1,0x75BB,0x75C4,0x75C0,/* 0xC0-0xC7 */
+	0x75BF,0x75B6,0x75BA,0x768A,0x76C9,0x771D,0x771B,0x7710,/* 0xC8-0xCF */
+	0x7713,0x7712,0x7723,0x7711,0x7715,0x7719,0x771A,0x7722,/* 0xD0-0xD7 */
+	0x7727,0x7823,0x782C,0x7822,0x7835,0x782F,0x7828,0x782E,/* 0xD8-0xDF */
+	0x782B,0x7821,0x7829,0x7833,0x782A,0x7831,0x7954,0x795B,/* 0xE0-0xE7 */
+	0x794F,0x795C,0x7953,0x7952,0x7951,0x79EB,0x79EC,0x79E0,/* 0xE8-0xEF */
+	0x79EE,0x79ED,0x79EA,0x79DC,0x79DE,0x79DD,0x7A86,0x7A89,/* 0xF0-0xF7 */
+	0x7A85,0x7A8B,0x7A8C,0x7A8A,0x7A87,0x7AD8,0x7B10,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7B04,0x7B13,0x7B05,0x7B0F,0x7B08,0x7B0A,0x7B0E,0x7B09,/* 0x40-0x47 */
+	0x7B12,0x7C84,0x7C91,0x7C8A,0x7C8C,0x7C88,0x7C8D,0x7C85,/* 0x48-0x4F */
+	0x7D1E,0x7D1D,0x7D11,0x7D0E,0x7D18,0x7D16,0x7D13,0x7D1F,/* 0x50-0x57 */
+	0x7D12,0x7D0F,0x7D0C,0x7F5C,0x7F61,0x7F5E,0x7F60,0x7F5D,/* 0x58-0x5F */
+	0x7F5B,0x7F96,0x7F92,0x7FC3,0x7FC2,0x7FC0,0x8016,0x803E,/* 0x60-0x67 */
+	0x8039,0x80FA,0x80F2,0x80F9,0x80F5,0x8101,0x80FB,0x8100,/* 0x68-0x6F */
+	0x8201,0x822F,0x8225,0x8333,0x832D,0x8344,0x8319,0x8351,/* 0x70-0x77 */
+	0x8325,0x8356,0x833F,0x8341,0x8326,0x831C,0x8322,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8342,0x834E,0x831B,0x832A,0x8308,0x833C,0x834D,/* 0xA0-0xA7 */
+	0x8316,0x8324,0x8320,0x8337,0x832F,0x8329,0x8347,0x8345,/* 0xA8-0xAF */
+	0x834C,0x8353,0x831E,0x832C,0x834B,0x8327,0x8348,0x8653,/* 0xB0-0xB7 */
+	0x8652,0x86A2,0x86A8,0x8696,0x868D,0x8691,0x869E,0x8687,/* 0xB8-0xBF */
+	0x8697,0x8686,0x868B,0x869A,0x8685,0x86A5,0x8699,0x86A1,/* 0xC0-0xC7 */
+	0x86A7,0x8695,0x8698,0x868E,0x869D,0x8690,0x8694,0x8843,/* 0xC8-0xCF */
+	0x8844,0x886D,0x8875,0x8876,0x8872,0x8880,0x8871,0x887F,/* 0xD0-0xD7 */
+	0x886F,0x8883,0x887E,0x8874,0x887C,0x8A12,0x8C47,0x8C57,/* 0xD8-0xDF */
+	0x8C7B,0x8CA4,0x8CA3,0x8D76,0x8D78,0x8DB5,0x8DB7,0x8DB6,/* 0xE0-0xE7 */
+	0x8ED1,0x8ED3,0x8FFE,0x8FF5,0x9002,0x8FFF,0x8FFB,0x9004,/* 0xE8-0xEF */
+	0x8FFC,0x8FF6,0x90D6,0x90E0,0x90D9,0x90DA,0x90E3,0x90DF,/* 0xF0-0xF7 */
+	0x90E5,0x90D8,0x90DB,0x90D7,0x90DC,0x90E4,0x9150,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x914E,0x914F,0x91D5,0x91E2,0x91DA,0x965C,0x965F,0x96BC,/* 0x40-0x47 */
+	0x98E3,0x9ADF,0x9B2F,0x4E7F,0x5070,0x506A,0x5061,0x505E,/* 0x48-0x4F */
+	0x5060,0x5053,0x504B,0x505D,0x5072,0x5048,0x504D,0x5041,/* 0x50-0x57 */
+	0x505B,0x504A,0x5062,0x5015,0x5045,0x505F,0x5069,0x506B,/* 0x58-0x5F */
+	0x5063,0x5064,0x5046,0x5040,0x506E,0x5073,0x5057,0x5051,/* 0x60-0x67 */
+	0x51D0,0x526B,0x526D,0x526C,0x526E,0x52D6,0x52D3,0x532D,/* 0x68-0x6F */
+	0x539C,0x5575,0x5576,0x553C,0x554D,0x5550,0x5534,0x552A,/* 0x70-0x77 */
+	0x5551,0x5562,0x5536,0x5535,0x5530,0x5552,0x5545,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x550C,0x5532,0x5565,0x554E,0x5539,0x5548,0x552D,/* 0xA0-0xA7 */
+	0x553B,0x5540,0x554B,0x570A,0x5707,0x57FB,0x5814,0x57E2,/* 0xA8-0xAF */
+	0x57F6,0x57DC,0x57F4,0x5800,0x57ED,0x57FD,0x5808,0x57F8,/* 0xB0-0xB7 */
+	0x580B,0x57F3,0x57CF,0x5807,0x57EE,0x57E3,0x57F2,0x57E5,/* 0xB8-0xBF */
+	0x57EC,0x57E1,0x580E,0x57FC,0x5810,0x57E7,0x5801,0x580C,/* 0xC0-0xC7 */
+	0x57F1,0x57E9,0x57F0,0x580D,0x5804,0x595C,0x5A60,0x5A58,/* 0xC8-0xCF */
+	0x5A55,0x5A67,0x5A5E,0x5A38,0x5A35,0x5A6D,0x5A50,0x5A5F,/* 0xD0-0xD7 */
+	0x5A65,0x5A6C,0x5A53,0x5A64,0x5A57,0x5A43,0x5A5D,0x5A52,/* 0xD8-0xDF */
+	0x5A44,0x5A5B,0x5A48,0x5A8E,0x5A3E,0x5A4D,0x5A39,0x5A4C,/* 0xE0-0xE7 */
+	0x5A70,0x5A69,0x5A47,0x5A51,0x5A56,0x5A42,0x5A5C,0x5B72,/* 0xE8-0xEF */
+	0x5B6E,0x5BC1,0x5BC0,0x5C59,0x5D1E,0x5D0B,0x5D1D,0x5D1A,/* 0xF0-0xF7 */
+	0x5D20,0x5D0C,0x5D28,0x5D0D,0x5D26,0x5D25,0x5D0F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5D30,0x5D12,0x5D23,0x5D1F,0x5D2E,0x5E3E,0x5E34,0x5EB1,/* 0x40-0x47 */
+	0x5EB4,0x5EB9,0x5EB2,0x5EB3,0x5F36,0x5F38,0x5F9B,0x5F96,/* 0x48-0x4F */
+	0x5F9F,0x608A,0x6090,0x6086,0x60BE,0x60B0,0x60BA,0x60D3,/* 0x50-0x57 */
+	0x60D4,0x60CF,0x60E4,0x60D9,0x60DD,0x60C8,0x60B1,0x60DB,/* 0x58-0x5F */
+	0x60B7,0x60CA,0x60BF,0x60C3,0x60CD,0x60C0,0x6332,0x6365,/* 0x60-0x67 */
+	0x638A,0x6382,0x637D,0x63BD,0x639E,0x63AD,0x639D,0x6397,/* 0x68-0x6F */
+	0x63AB,0x638E,0x636F,0x6387,0x6390,0x636E,0x63AF,0x6375,/* 0x70-0x77 */
+	0x639C,0x636D,0x63AE,0x637C,0x63A4,0x633B,0x639F,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6378,0x6385,0x6381,0x6391,0x638D,0x6370,0x6553,/* 0xA0-0xA7 */
+	0x65CD,0x6665,0x6661,0x665B,0x6659,0x665C,0x6662,0x6718,/* 0xA8-0xAF */
+	0x6879,0x6887,0x6890,0x689C,0x686D,0x686E,0x68AE,0x68AB,/* 0xB0-0xB7 */
+	0x6956,0x686F,0x68A3,0x68AC,0x68A9,0x6875,0x6874,0x68B2,/* 0xB8-0xBF */
+	0x688F,0x6877,0x6892,0x687C,0x686B,0x6872,0x68AA,0x6880,/* 0xC0-0xC7 */
+	0x6871,0x687E,0x689B,0x6896,0x688B,0x68A0,0x6889,0x68A4,/* 0xC8-0xCF */
+	0x6878,0x687B,0x6891,0x688C,0x688A,0x687D,0x6B36,0x6B33,/* 0xD0-0xD7 */
+	0x6B37,0x6B38,0x6B91,0x6B8F,0x6B8D,0x6B8E,0x6B8C,0x6C2A,/* 0xD8-0xDF */
+	0x6DC0,0x6DAB,0x6DB4,0x6DB3,0x6E74,0x6DAC,0x6DE9,0x6DE2,/* 0xE0-0xE7 */
+	0x6DB7,0x6DF6,0x6DD4,0x6E00,0x6DC8,0x6DE0,0x6DDF,0x6DD6,/* 0xE8-0xEF */
+	0x6DBE,0x6DE5,0x6DDC,0x6DDD,0x6DDB,0x6DF4,0x6DCA,0x6DBD,/* 0xF0-0xF7 */
+	0x6DED,0x6DF0,0x6DBA,0x6DD5,0x6DC2,0x6DCF,0x6DC9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6DD0,0x6DF2,0x6DD3,0x6DFD,0x6DD7,0x6DCD,0x6DE3,0x6DBB,/* 0x40-0x47 */
+	0x70FA,0x710D,0x70F7,0x7117,0x70F4,0x710C,0x70F0,0x7104,/* 0x48-0x4F */
+	0x70F3,0x7110,0x70FC,0x70FF,0x7106,0x7113,0x7100,0x70F8,/* 0x50-0x57 */
+	0x70F6,0x710B,0x7102,0x710E,0x727E,0x727B,0x727C,0x727F,/* 0x58-0x5F */
+	0x731D,0x7317,0x7307,0x7311,0x7318,0x730A,0x7308,0x72FF,/* 0x60-0x67 */
+	0x730F,0x731E,0x7388,0x73F6,0x73F8,0x73F5,0x7404,0x7401,/* 0x68-0x6F */
+	0x73FD,0x7407,0x7400,0x73FA,0x73FC,0x73FF,0x740C,0x740B,/* 0x70-0x77 */
+	0x73F4,0x7408,0x7564,0x7563,0x75CE,0x75D2,0x75CF,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x75CB,0x75CC,0x75D1,0x75D0,0x768F,0x7689,0x76D3,/* 0xA0-0xA7 */
+	0x7739,0x772F,0x772D,0x7731,0x7732,0x7734,0x7733,0x773D,/* 0xA8-0xAF */
+	0x7725,0x773B,0x7735,0x7848,0x7852,0x7849,0x784D,0x784A,/* 0xB0-0xB7 */
+	0x784C,0x7826,0x7845,0x7850,0x7964,0x7967,0x7969,0x796A,/* 0xB8-0xBF */
+	0x7963,0x796B,0x7961,0x79BB,0x79FA,0x79F8,0x79F6,0x79F7,/* 0xC0-0xC7 */
+	0x7A8F,0x7A94,0x7A90,0x7B35,0x7B47,0x7B34,0x7B25,0x7B30,/* 0xC8-0xCF */
+	0x7B22,0x7B24,0x7B33,0x7B18,0x7B2A,0x7B1D,0x7B31,0x7B2B,/* 0xD0-0xD7 */
+	0x7B2D,0x7B2F,0x7B32,0x7B38,0x7B1A,0x7B23,0x7C94,0x7C98,/* 0xD8-0xDF */
+	0x7C96,0x7CA3,0x7D35,0x7D3D,0x7D38,0x7D36,0x7D3A,0x7D45,/* 0xE0-0xE7 */
+	0x7D2C,0x7D29,0x7D41,0x7D47,0x7D3E,0x7D3F,0x7D4A,0x7D3B,/* 0xE8-0xEF */
+	0x7D28,0x7F63,0x7F95,0x7F9C,0x7F9D,0x7F9B,0x7FCA,0x7FCB,/* 0xF0-0xF7 */
+	0x7FCD,0x7FD0,0x7FD1,0x7FC7,0x7FCF,0x7FC9,0x801F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x801E,0x801B,0x8047,0x8043,0x8048,0x8118,0x8125,0x8119,/* 0x40-0x47 */
+	0x811B,0x812D,0x811F,0x812C,0x811E,0x8121,0x8115,0x8127,/* 0x48-0x4F */
+	0x811D,0x8122,0x8211,0x8238,0x8233,0x823A,0x8234,0x8232,/* 0x50-0x57 */
+	0x8274,0x8390,0x83A3,0x83A8,0x838D,0x837A,0x8373,0x83A4,/* 0x58-0x5F */
+	0x8374,0x838F,0x8381,0x8395,0x8399,0x8375,0x8394,0x83A9,/* 0x60-0x67 */
+	0x837D,0x8383,0x838C,0x839D,0x839B,0x83AA,0x838B,0x837E,/* 0x68-0x6F */
+	0x83A5,0x83AF,0x8388,0x8397,0x83B0,0x837F,0x83A6,0x8387,/* 0x70-0x77 */
+	0x83AE,0x8376,0x839A,0x8659,0x8656,0x86BF,0x86B7,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x86C2,0x86C1,0x86C5,0x86BA,0x86B0,0x86C8,0x86B9,/* 0xA0-0xA7 */
+	0x86B3,0x86B8,0x86CC,0x86B4,0x86BB,0x86BC,0x86C3,0x86BD,/* 0xA8-0xAF */
+	0x86BE,0x8852,0x8889,0x8895,0x88A8,0x88A2,0x88AA,0x889A,/* 0xB0-0xB7 */
+	0x8891,0x88A1,0x889F,0x8898,0x88A7,0x8899,0x889B,0x8897,/* 0xB8-0xBF */
+	0x88A4,0x88AC,0x888C,0x8893,0x888E,0x8982,0x89D6,0x89D9,/* 0xC0-0xC7 */
+	0x89D5,0x8A30,0x8A27,0x8A2C,0x8A1E,0x8C39,0x8C3B,0x8C5C,/* 0xC8-0xCF */
+	0x8C5D,0x8C7D,0x8CA5,0x8D7D,0x8D7B,0x8D79,0x8DBC,0x8DC2,/* 0xD0-0xD7 */
+	0x8DB9,0x8DBF,0x8DC1,0x8ED8,0x8EDE,0x8EDD,0x8EDC,0x8ED7,/* 0xD8-0xDF */
+	0x8EE0,0x8EE1,0x9024,0x900B,0x9011,0x901C,0x900C,0x9021,/* 0xE0-0xE7 */
+	0x90EF,0x90EA,0x90F0,0x90F4,0x90F2,0x90F3,0x90D4,0x90EB,/* 0xE8-0xEF */
+	0x90EC,0x90E9,0x9156,0x9158,0x915A,0x9153,0x9155,0x91EC,/* 0xF0-0xF7 */
+	0x91F4,0x91F1,0x91F3,0x91F8,0x91E4,0x91F9,0x91EA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x91EB,0x91F7,0x91E8,0x91EE,0x957A,0x9586,0x9588,0x967C,/* 0x40-0x47 */
+	0x966D,0x966B,0x9671,0x966F,0x96BF,0x976A,0x9804,0x98E5,/* 0x48-0x4F */
+	0x9997,0x509B,0x5095,0x5094,0x509E,0x508B,0x50A3,0x5083,/* 0x50-0x57 */
+	0x508C,0x508E,0x509D,0x5068,0x509C,0x5092,0x5082,0x5087,/* 0x58-0x5F */
+	0x515F,0x51D4,0x5312,0x5311,0x53A4,0x53A7,0x5591,0x55A8,/* 0x60-0x67 */
+	0x55A5,0x55AD,0x5577,0x5645,0x55A2,0x5593,0x5588,0x558F,/* 0x68-0x6F */
+	0x55B5,0x5581,0x55A3,0x5592,0x55A4,0x557D,0x558C,0x55A6,/* 0x70-0x77 */
+	0x557F,0x5595,0x55A1,0x558E,0x570C,0x5829,0x5837,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5819,0x581E,0x5827,0x5823,0x5828,0x57F5,0x5848,/* 0xA0-0xA7 */
+	0x5825,0x581C,0x581B,0x5833,0x583F,0x5836,0x582E,0x5839,/* 0xA8-0xAF */
+	0x5838,0x582D,0x582C,0x583B,0x5961,0x5AAF,0x5A94,0x5A9F,/* 0xB0-0xB7 */
+	0x5A7A,0x5AA2,0x5A9E,0x5A78,0x5AA6,0x5A7C,0x5AA5,0x5AAC,/* 0xB8-0xBF */
+	0x5A95,0x5AAE,0x5A37,0x5A84,0x5A8A,0x5A97,0x5A83,0x5A8B,/* 0xC0-0xC7 */
+	0x5AA9,0x5A7B,0x5A7D,0x5A8C,0x5A9C,0x5A8F,0x5A93,0x5A9D,/* 0xC8-0xCF */
+	0x5BEA,0x5BCD,0x5BCB,0x5BD4,0x5BD1,0x5BCA,0x5BCE,0x5C0C,/* 0xD0-0xD7 */
+	0x5C30,0x5D37,0x5D43,0x5D6B,0x5D41,0x5D4B,0x5D3F,0x5D35,/* 0xD8-0xDF */
+	0x5D51,0x5D4E,0x5D55,0x5D33,0x5D3A,0x5D52,0x5D3D,0x5D31,/* 0xE0-0xE7 */
+	0x5D59,0x5D42,0x5D39,0x5D49,0x5D38,0x5D3C,0x5D32,0x5D36,/* 0xE8-0xEF */
+	0x5D40,0x5D45,0x5E44,0x5E41,0x5F58,0x5FA6,0x5FA5,0x5FAB,/* 0xF0-0xF7 */
+	0x60C9,0x60B9,0x60CC,0x60E2,0x60CE,0x60C4,0x6114,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_D9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x60F2,0x610A,0x6116,0x6105,0x60F5,0x6113,0x60F8,0x60FC,/* 0x40-0x47 */
+	0x60FE,0x60C1,0x6103,0x6118,0x611D,0x6110,0x60FF,0x6104,/* 0x48-0x4F */
+	0x610B,0x624A,0x6394,0x63B1,0x63B0,0x63CE,0x63E5,0x63E8,/* 0x50-0x57 */
+	0x63EF,0x63C3,0x649D,0x63F3,0x63CA,0x63E0,0x63F6,0x63D5,/* 0x58-0x5F */
+	0x63F2,0x63F5,0x6461,0x63DF,0x63BE,0x63DD,0x63DC,0x63C4,/* 0x60-0x67 */
+	0x63D8,0x63D3,0x63C2,0x63C7,0x63CC,0x63CB,0x63C8,0x63F0,/* 0x68-0x6F */
+	0x63D7,0x63D9,0x6532,0x6567,0x656A,0x6564,0x655C,0x6568,/* 0x70-0x77 */
+	0x6565,0x658C,0x659D,0x659E,0x65AE,0x65D0,0x65D2,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x667C,0x666C,0x667B,0x6680,0x6671,0x6679,0x666A,/* 0xA0-0xA7 */
+	0x6672,0x6701,0x690C,0x68D3,0x6904,0x68DC,0x692A,0x68EC,/* 0xA8-0xAF */
+	0x68EA,0x68F1,0x690F,0x68D6,0x68F7,0x68EB,0x68E4,0x68F6,/* 0xB0-0xB7 */
+	0x6913,0x6910,0x68F3,0x68E1,0x6907,0x68CC,0x6908,0x6970,/* 0xB8-0xBF */
+	0x68B4,0x6911,0x68EF,0x68C6,0x6914,0x68F8,0x68D0,0x68FD,/* 0xC0-0xC7 */
+	0x68FC,0x68E8,0x690B,0x690A,0x6917,0x68CE,0x68C8,0x68DD,/* 0xC8-0xCF */
+	0x68DE,0x68E6,0x68F4,0x68D1,0x6906,0x68D4,0x68E9,0x6915,/* 0xD0-0xD7 */
+	0x6925,0x68C7,0x6B39,0x6B3B,0x6B3F,0x6B3C,0x6B94,0x6B97,/* 0xD8-0xDF */
+	0x6B99,0x6B95,0x6BBD,0x6BF0,0x6BF2,0x6BF3,0x6C30,0x6DFC,/* 0xE0-0xE7 */
+	0x6E46,0x6E47,0x6E1F,0x6E49,0x6E88,0x6E3C,0x6E3D,0x6E45,/* 0xE8-0xEF */
+	0x6E62,0x6E2B,0x6E3F,0x6E41,0x6E5D,0x6E73,0x6E1C,0x6E33,/* 0xF0-0xF7 */
+	0x6E4B,0x6E40,0x6E51,0x6E3B,0x6E03,0x6E2E,0x6E5E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6E68,0x6E5C,0x6E61,0x6E31,0x6E28,0x6E60,0x6E71,0x6E6B,/* 0x40-0x47 */
+	0x6E39,0x6E22,0x6E30,0x6E53,0x6E65,0x6E27,0x6E78,0x6E64,/* 0x48-0x4F */
+	0x6E77,0x6E55,0x6E79,0x6E52,0x6E66,0x6E35,0x6E36,0x6E5A,/* 0x50-0x57 */
+	0x7120,0x711E,0x712F,0x70FB,0x712E,0x7131,0x7123,0x7125,/* 0x58-0x5F */
+	0x7122,0x7132,0x711F,0x7128,0x713A,0x711B,0x724B,0x725A,/* 0x60-0x67 */
+	0x7288,0x7289,0x7286,0x7285,0x728B,0x7312,0x730B,0x7330,/* 0x68-0x6F */
+	0x7322,0x7331,0x7333,0x7327,0x7332,0x732D,0x7326,0x7323,/* 0x70-0x77 */
+	0x7335,0x730C,0x742E,0x742C,0x7430,0x742B,0x7416,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x741A,0x7421,0x742D,0x7431,0x7424,0x7423,0x741D,/* 0xA0-0xA7 */
+	0x7429,0x7420,0x7432,0x74FB,0x752F,0x756F,0x756C,0x75E7,/* 0xA8-0xAF */
+	0x75DA,0x75E1,0x75E6,0x75DD,0x75DF,0x75E4,0x75D7,0x7695,/* 0xB0-0xB7 */
+	0x7692,0x76DA,0x7746,0x7747,0x7744,0x774D,0x7745,0x774A,/* 0xB8-0xBF */
+	0x774E,0x774B,0x774C,0x77DE,0x77EC,0x7860,0x7864,0x7865,/* 0xC0-0xC7 */
+	0x785C,0x786D,0x7871,0x786A,0x786E,0x7870,0x7869,0x7868,/* 0xC8-0xCF */
+	0x785E,0x7862,0x7974,0x7973,0x7972,0x7970,0x7A02,0x7A0A,/* 0xD0-0xD7 */
+	0x7A03,0x7A0C,0x7A04,0x7A99,0x7AE6,0x7AE4,0x7B4A,0x7B3B,/* 0xD8-0xDF */
+	0x7B44,0x7B48,0x7B4C,0x7B4E,0x7B40,0x7B58,0x7B45,0x7CA2,/* 0xE0-0xE7 */
+	0x7C9E,0x7CA8,0x7CA1,0x7D58,0x7D6F,0x7D63,0x7D53,0x7D56,/* 0xE8-0xEF */
+	0x7D67,0x7D6A,0x7D4F,0x7D6D,0x7D5C,0x7D6B,0x7D52,0x7D54,/* 0xF0-0xF7 */
+	0x7D69,0x7D51,0x7D5F,0x7D4E,0x7F3E,0x7F3F,0x7F65,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7F66,0x7FA2,0x7FA0,0x7FA1,0x7FD7,0x8051,0x804F,0x8050,/* 0x40-0x47 */
+	0x80FE,0x80D4,0x8143,0x814A,0x8152,0x814F,0x8147,0x813D,/* 0x48-0x4F */
+	0x814D,0x813A,0x81E6,0x81EE,0x81F7,0x81F8,0x81F9,0x8204,/* 0x50-0x57 */
+	0x823C,0x823D,0x823F,0x8275,0x833B,0x83CF,0x83F9,0x8423,/* 0x58-0x5F */
+	0x83C0,0x83E8,0x8412,0x83E7,0x83E4,0x83FC,0x83F6,0x8410,/* 0x60-0x67 */
+	0x83C6,0x83C8,0x83EB,0x83E3,0x83BF,0x8401,0x83DD,0x83E5,/* 0x68-0x6F */
+	0x83D8,0x83FF,0x83E1,0x83CB,0x83CE,0x83D6,0x83F5,0x83C9,/* 0x70-0x77 */
+	0x8409,0x840F,0x83DE,0x8411,0x8406,0x83C2,0x83F3,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x83D5,0x83FA,0x83C7,0x83D1,0x83EA,0x8413,0x83C3,/* 0xA0-0xA7 */
+	0x83EC,0x83EE,0x83C4,0x83FB,0x83D7,0x83E2,0x841B,0x83DB,/* 0xA8-0xAF */
+	0x83FE,0x86D8,0x86E2,0x86E6,0x86D3,0x86E3,0x86DA,0x86EA,/* 0xB0-0xB7 */
+	0x86DD,0x86EB,0x86DC,0x86EC,0x86E9,0x86D7,0x86E8,0x86D1,/* 0xB8-0xBF */
+	0x8848,0x8856,0x8855,0x88BA,0x88D7,0x88B9,0x88B8,0x88C0,/* 0xC0-0xC7 */
+	0x88BE,0x88B6,0x88BC,0x88B7,0x88BD,0x88B2,0x8901,0x88C9,/* 0xC8-0xCF */
+	0x8995,0x8998,0x8997,0x89DD,0x89DA,0x89DB,0x8A4E,0x8A4D,/* 0xD0-0xD7 */
+	0x8A39,0x8A59,0x8A40,0x8A57,0x8A58,0x8A44,0x8A45,0x8A52,/* 0xD8-0xDF */
+	0x8A48,0x8A51,0x8A4A,0x8A4C,0x8A4F,0x8C5F,0x8C81,0x8C80,/* 0xE0-0xE7 */
+	0x8CBA,0x8CBE,0x8CB0,0x8CB9,0x8CB5,0x8D84,0x8D80,0x8D89,/* 0xE8-0xEF */
+	0x8DD8,0x8DD3,0x8DCD,0x8DC7,0x8DD6,0x8DDC,0x8DCF,0x8DD5,/* 0xF0-0xF7 */
+	0x8DD9,0x8DC8,0x8DD7,0x8DC5,0x8EEF,0x8EF7,0x8EFA,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8EF9,0x8EE6,0x8EEE,0x8EE5,0x8EF5,0x8EE7,0x8EE8,0x8EF6,/* 0x40-0x47 */
+	0x8EEB,0x8EF1,0x8EEC,0x8EF4,0x8EE9,0x902D,0x9034,0x902F,/* 0x48-0x4F */
+	0x9106,0x912C,0x9104,0x90FF,0x90FC,0x9108,0x90F9,0x90FB,/* 0x50-0x57 */
+	0x9101,0x9100,0x9107,0x9105,0x9103,0x9161,0x9164,0x915F,/* 0x58-0x5F */
+	0x9162,0x9160,0x9201,0x920A,0x9225,0x9203,0x921A,0x9226,/* 0x60-0x67 */
+	0x920F,0x920C,0x9200,0x9212,0x91FF,0x91FD,0x9206,0x9204,/* 0x68-0x6F */
+	0x9227,0x9202,0x921C,0x9224,0x9219,0x9217,0x9205,0x9216,/* 0x70-0x77 */
+	0x957B,0x958D,0x958C,0x9590,0x9687,0x967E,0x9688,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9689,0x9683,0x9680,0x96C2,0x96C8,0x96C3,0x96F1,/* 0xA0-0xA7 */
+	0x96F0,0x976C,0x9770,0x976E,0x9807,0x98A9,0x98EB,0x9CE6,/* 0xA8-0xAF */
+	0x9EF9,0x4E83,0x4E84,0x4EB6,0x50BD,0x50BF,0x50C6,0x50AE,/* 0xB0-0xB7 */
+	0x50C4,0x50CA,0x50B4,0x50C8,0x50C2,0x50B0,0x50C1,0x50BA,/* 0xB8-0xBF */
+	0x50B1,0x50CB,0x50C9,0x50B6,0x50B8,0x51D7,0x527A,0x5278,/* 0xC0-0xC7 */
+	0x527B,0x527C,0x55C3,0x55DB,0x55CC,0x55D0,0x55CB,0x55CA,/* 0xC8-0xCF */
+	0x55DD,0x55C0,0x55D4,0x55C4,0x55E9,0x55BF,0x55D2,0x558D,/* 0xD0-0xD7 */
+	0x55CF,0x55D5,0x55E2,0x55D6,0x55C8,0x55F2,0x55CD,0x55D9,/* 0xD8-0xDF */
+	0x55C2,0x5714,0x5853,0x5868,0x5864,0x584F,0x584D,0x5849,/* 0xE0-0xE7 */
+	0x586F,0x5855,0x584E,0x585D,0x5859,0x5865,0x585B,0x583D,/* 0xE8-0xEF */
+	0x5863,0x5871,0x58FC,0x5AC7,0x5AC4,0x5ACB,0x5ABA,0x5AB8,/* 0xF0-0xF7 */
+	0x5AB1,0x5AB5,0x5AB0,0x5ABF,0x5AC8,0x5ABB,0x5AC6,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DD[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5AB7,0x5AC0,0x5ACA,0x5AB4,0x5AB6,0x5ACD,0x5AB9,0x5A90,/* 0x40-0x47 */
+	0x5BD6,0x5BD8,0x5BD9,0x5C1F,0x5C33,0x5D71,0x5D63,0x5D4A,/* 0x48-0x4F */
+	0x5D65,0x5D72,0x5D6C,0x5D5E,0x5D68,0x5D67,0x5D62,0x5DF0,/* 0x50-0x57 */
+	0x5E4F,0x5E4E,0x5E4A,0x5E4D,0x5E4B,0x5EC5,0x5ECC,0x5EC6,/* 0x58-0x5F */
+	0x5ECB,0x5EC7,0x5F40,0x5FAF,0x5FAD,0x60F7,0x6149,0x614A,/* 0x60-0x67 */
+	0x612B,0x6145,0x6136,0x6132,0x612E,0x6146,0x612F,0x614F,/* 0x68-0x6F */
+	0x6129,0x6140,0x6220,0x9168,0x6223,0x6225,0x6224,0x63C5,/* 0x70-0x77 */
+	0x63F1,0x63EB,0x6410,0x6412,0x6409,0x6420,0x6424,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6433,0x6443,0x641F,0x6415,0x6418,0x6439,0x6437,/* 0xA0-0xA7 */
+	0x6422,0x6423,0x640C,0x6426,0x6430,0x6428,0x6441,0x6435,/* 0xA8-0xAF */
+	0x642F,0x640A,0x641A,0x6440,0x6425,0x6427,0x640B,0x63E7,/* 0xB0-0xB7 */
+	0x641B,0x642E,0x6421,0x640E,0x656F,0x6592,0x65D3,0x6686,/* 0xB8-0xBF */
+	0x668C,0x6695,0x6690,0x668B,0x668A,0x6699,0x6694,0x6678,/* 0xC0-0xC7 */
+	0x6720,0x6966,0x695F,0x6938,0x694E,0x6962,0x6971,0x693F,/* 0xC8-0xCF */
+	0x6945,0x696A,0x6939,0x6942,0x6957,0x6959,0x697A,0x6948,/* 0xD0-0xD7 */
+	0x6949,0x6935,0x696C,0x6933,0x693D,0x6965,0x68F0,0x6978,/* 0xD8-0xDF */
+	0x6934,0x6969,0x6940,0x696F,0x6944,0x6976,0x6958,0x6941,/* 0xE0-0xE7 */
+	0x6974,0x694C,0x693B,0x694B,0x6937,0x695C,0x694F,0x6951,/* 0xE8-0xEF */
+	0x6932,0x6952,0x692F,0x697B,0x693C,0x6B46,0x6B45,0x6B43,/* 0xF0-0xF7 */
+	0x6B42,0x6B48,0x6B41,0x6B9B,0xFA0D,0x6BFB,0x6BFC,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6BF9,0x6BF7,0x6BF8,0x6E9B,0x6ED6,0x6EC8,0x6E8F,0x6EC0,/* 0x40-0x47 */
+	0x6E9F,0x6E93,0x6E94,0x6EA0,0x6EB1,0x6EB9,0x6EC6,0x6ED2,/* 0x48-0x4F */
+	0x6EBD,0x6EC1,0x6E9E,0x6EC9,0x6EB7,0x6EB0,0x6ECD,0x6EA6,/* 0x50-0x57 */
+	0x6ECF,0x6EB2,0x6EBE,0x6EC3,0x6EDC,0x6ED8,0x6E99,0x6E92,/* 0x58-0x5F */
+	0x6E8E,0x6E8D,0x6EA4,0x6EA1,0x6EBF,0x6EB3,0x6ED0,0x6ECA,/* 0x60-0x67 */
+	0x6E97,0x6EAE,0x6EA3,0x7147,0x7154,0x7152,0x7163,0x7160,/* 0x68-0x6F */
+	0x7141,0x715D,0x7162,0x7172,0x7178,0x716A,0x7161,0x7142,/* 0x70-0x77 */
+	0x7158,0x7143,0x714B,0x7170,0x715F,0x7150,0x7153,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7144,0x714D,0x715A,0x724F,0x728D,0x728C,0x7291,/* 0xA0-0xA7 */
+	0x7290,0x728E,0x733C,0x7342,0x733B,0x733A,0x7340,0x734A,/* 0xA8-0xAF */
+	0x7349,0x7444,0x744A,0x744B,0x7452,0x7451,0x7457,0x7440,/* 0xB0-0xB7 */
+	0x744F,0x7450,0x744E,0x7442,0x7446,0x744D,0x7454,0x74E1,/* 0xB8-0xBF */
+	0x74FF,0x74FE,0x74FD,0x751D,0x7579,0x7577,0x6983,0x75EF,/* 0xC0-0xC7 */
+	0x760F,0x7603,0x75F7,0x75FE,0x75FC,0x75F9,0x75F8,0x7610,/* 0xC8-0xCF */
+	0x75FB,0x75F6,0x75ED,0x75F5,0x75FD,0x7699,0x76B5,0x76DD,/* 0xD0-0xD7 */
+	0x7755,0x775F,0x7760,0x7752,0x7756,0x775A,0x7769,0x7767,/* 0xD8-0xDF */
+	0x7754,0x7759,0x776D,0x77E0,0x7887,0x789A,0x7894,0x788F,/* 0xE0-0xE7 */
+	0x7884,0x7895,0x7885,0x7886,0x78A1,0x7883,0x7879,0x7899,/* 0xE8-0xEF */
+	0x7880,0x7896,0x787B,0x797C,0x7982,0x797D,0x7979,0x7A11,/* 0xF0-0xF7 */
+	0x7A18,0x7A19,0x7A12,0x7A17,0x7A15,0x7A22,0x7A13,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_DF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7A1B,0x7A10,0x7AA3,0x7AA2,0x7A9E,0x7AEB,0x7B66,0x7B64,/* 0x40-0x47 */
+	0x7B6D,0x7B74,0x7B69,0x7B72,0x7B65,0x7B73,0x7B71,0x7B70,/* 0x48-0x4F */
+	0x7B61,0x7B78,0x7B76,0x7B63,0x7CB2,0x7CB4,0x7CAF,0x7D88,/* 0x50-0x57 */
+	0x7D86,0x7D80,0x7D8D,0x7D7F,0x7D85,0x7D7A,0x7D8E,0x7D7B,/* 0x58-0x5F */
+	0x7D83,0x7D7C,0x7D8C,0x7D94,0x7D84,0x7D7D,0x7D92,0x7F6D,/* 0x60-0x67 */
+	0x7F6B,0x7F67,0x7F68,0x7F6C,0x7FA6,0x7FA5,0x7FA7,0x7FDB,/* 0x68-0x6F */
+	0x7FDC,0x8021,0x8164,0x8160,0x8177,0x815C,0x8169,0x815B,/* 0x70-0x77 */
+	0x8162,0x8172,0x6721,0x815E,0x8176,0x8167,0x816F,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8144,0x8161,0x821D,0x8249,0x8244,0x8240,0x8242,/* 0xA0-0xA7 */
+	0x8245,0x84F1,0x843F,0x8456,0x8476,0x8479,0x848F,0x848D,/* 0xA8-0xAF */
+	0x8465,0x8451,0x8440,0x8486,0x8467,0x8430,0x844D,0x847D,/* 0xB0-0xB7 */
+	0x845A,0x8459,0x8474,0x8473,0x845D,0x8507,0x845E,0x8437,/* 0xB8-0xBF */
+	0x843A,0x8434,0x847A,0x8443,0x8478,0x8432,0x8445,0x8429,/* 0xC0-0xC7 */
+	0x83D9,0x844B,0x842F,0x8442,0x842D,0x845F,0x8470,0x8439,/* 0xC8-0xCF */
+	0x844E,0x844C,0x8452,0x846F,0x84C5,0x848E,0x843B,0x8447,/* 0xD0-0xD7 */
+	0x8436,0x8433,0x8468,0x847E,0x8444,0x842B,0x8460,0x8454,/* 0xD8-0xDF */
+	0x846E,0x8450,0x870B,0x8704,0x86F7,0x870C,0x86FA,0x86D6,/* 0xE0-0xE7 */
+	0x86F5,0x874D,0x86F8,0x870E,0x8709,0x8701,0x86F6,0x870D,/* 0xE8-0xEF */
+	0x8705,0x88D6,0x88CB,0x88CD,0x88CE,0x88DE,0x88DB,0x88DA,/* 0xF0-0xF7 */
+	0x88CC,0x88D0,0x8985,0x899B,0x89DF,0x89E5,0x89E4,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x89E1,0x89E0,0x89E2,0x89DC,0x89E6,0x8A76,0x8A86,0x8A7F,/* 0x40-0x47 */
+	0x8A61,0x8A3F,0x8A77,0x8A82,0x8A84,0x8A75,0x8A83,0x8A81,/* 0x48-0x4F */
+	0x8A74,0x8A7A,0x8C3C,0x8C4B,0x8C4A,0x8C65,0x8C64,0x8C66,/* 0x50-0x57 */
+	0x8C86,0x8C84,0x8C85,0x8CCC,0x8D68,0x8D69,0x8D91,0x8D8C,/* 0x58-0x5F */
+	0x8D8E,0x8D8F,0x8D8D,0x8D93,0x8D94,0x8D90,0x8D92,0x8DF0,/* 0x60-0x67 */
+	0x8DE0,0x8DEC,0x8DF1,0x8DEE,0x8DD0,0x8DE9,0x8DE3,0x8DE2,/* 0x68-0x6F */
+	0x8DE7,0x8DF2,0x8DEB,0x8DF4,0x8F06,0x8EFF,0x8F01,0x8F00,/* 0x70-0x77 */
+	0x8F05,0x8F07,0x8F08,0x8F02,0x8F0B,0x9052,0x903F,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9044,0x9049,0x903D,0x9110,0x910D,0x910F,0x9111,/* 0xA0-0xA7 */
+	0x9116,0x9114,0x910B,0x910E,0x916E,0x916F,0x9248,0x9252,/* 0xA8-0xAF */
+	0x9230,0x923A,0x9266,0x9233,0x9265,0x925E,0x9283,0x922E,/* 0xB0-0xB7 */
+	0x924A,0x9246,0x926D,0x926C,0x924F,0x9260,0x9267,0x926F,/* 0xB8-0xBF */
+	0x9236,0x9261,0x9270,0x9231,0x9254,0x9263,0x9250,0x9272,/* 0xC0-0xC7 */
+	0x924E,0x9253,0x924C,0x9256,0x9232,0x959F,0x959C,0x959E,/* 0xC8-0xCF */
+	0x959B,0x9692,0x9693,0x9691,0x9697,0x96CE,0x96FA,0x96FD,/* 0xD0-0xD7 */
+	0x96F8,0x96F5,0x9773,0x9777,0x9778,0x9772,0x980F,0x980D,/* 0xD8-0xDF */
+	0x980E,0x98AC,0x98F6,0x98F9,0x99AF,0x99B2,0x99B0,0x99B5,/* 0xE0-0xE7 */
+	0x9AAD,0x9AAB,0x9B5B,0x9CEA,0x9CED,0x9CE7,0x9E80,0x9EFD,/* 0xE8-0xEF */
+	0x50E6,0x50D4,0x50D7,0x50E8,0x50F3,0x50DB,0x50EA,0x50DD,/* 0xF0-0xF7 */
+	0x50E4,0x50D3,0x50EC,0x50F0,0x50EF,0x50E3,0x50E0,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x51D8,0x5280,0x5281,0x52E9,0x52EB,0x5330,0x53AC,0x5627,/* 0x40-0x47 */
+	0x5615,0x560C,0x5612,0x55FC,0x560F,0x561C,0x5601,0x5613,/* 0x48-0x4F */
+	0x5602,0x55FA,0x561D,0x5604,0x55FF,0x55F9,0x5889,0x587C,/* 0x50-0x57 */
+	0x5890,0x5898,0x5886,0x5881,0x587F,0x5874,0x588B,0x587A,/* 0x58-0x5F */
+	0x5887,0x5891,0x588E,0x5876,0x5882,0x5888,0x587B,0x5894,/* 0x60-0x67 */
+	0x588F,0x58FE,0x596B,0x5ADC,0x5AEE,0x5AE5,0x5AD5,0x5AEA,/* 0x68-0x6F */
+	0x5ADA,0x5AED,0x5AEB,0x5AF3,0x5AE2,0x5AE0,0x5ADB,0x5AEC,/* 0x70-0x77 */
+	0x5ADE,0x5ADD,0x5AD9,0x5AE8,0x5ADF,0x5B77,0x5BE0,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x5BE3,0x5C63,0x5D82,0x5D80,0x5D7D,0x5D86,0x5D7A,/* 0xA0-0xA7 */
+	0x5D81,0x5D77,0x5D8A,0x5D89,0x5D88,0x5D7E,0x5D7C,0x5D8D,/* 0xA8-0xAF */
+	0x5D79,0x5D7F,0x5E58,0x5E59,0x5E53,0x5ED8,0x5ED1,0x5ED7,/* 0xB0-0xB7 */
+	0x5ECE,0x5EDC,0x5ED5,0x5ED9,0x5ED2,0x5ED4,0x5F44,0x5F43,/* 0xB8-0xBF */
+	0x5F6F,0x5FB6,0x612C,0x6128,0x6141,0x615E,0x6171,0x6173,/* 0xC0-0xC7 */
+	0x6152,0x6153,0x6172,0x616C,0x6180,0x6174,0x6154,0x617A,/* 0xC8-0xCF */
+	0x615B,0x6165,0x613B,0x616A,0x6161,0x6156,0x6229,0x6227,/* 0xD0-0xD7 */
+	0x622B,0x642B,0x644D,0x645B,0x645D,0x6474,0x6476,0x6472,/* 0xD8-0xDF */
+	0x6473,0x647D,0x6475,0x6466,0x64A6,0x644E,0x6482,0x645E,/* 0xE0-0xE7 */
+	0x645C,0x644B,0x6453,0x6460,0x6450,0x647F,0x643F,0x646C,/* 0xE8-0xEF */
+	0x646B,0x6459,0x6465,0x6477,0x6573,0x65A0,0x66A1,0x66A0,/* 0xF0-0xF7 */
+	0x669F,0x6705,0x6704,0x6722,0x69B1,0x69B6,0x69C9,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x69A0,0x69CE,0x6996,0x69B0,0x69AC,0x69BC,0x6991,0x6999,/* 0x40-0x47 */
+	0x698E,0x69A7,0x698D,0x69A9,0x69BE,0x69AF,0x69BF,0x69C4,/* 0x48-0x4F */
+	0x69BD,0x69A4,0x69D4,0x69B9,0x69CA,0x699A,0x69CF,0x69B3,/* 0x50-0x57 */
+	0x6993,0x69AA,0x69A1,0x699E,0x69D9,0x6997,0x6990,0x69C2,/* 0x58-0x5F */
+	0x69B5,0x69A5,0x69C6,0x6B4A,0x6B4D,0x6B4B,0x6B9E,0x6B9F,/* 0x60-0x67 */
+	0x6BA0,0x6BC3,0x6BC4,0x6BFE,0x6ECE,0x6EF5,0x6EF1,0x6F03,/* 0x68-0x6F */
+	0x6F25,0x6EF8,0x6F37,0x6EFB,0x6F2E,0x6F09,0x6F4E,0x6F19,/* 0x70-0x77 */
+	0x6F1A,0x6F27,0x6F18,0x6F3B,0x6F12,0x6EED,0x6F0A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x6F36,0x6F73,0x6EF9,0x6EEE,0x6F2D,0x6F40,0x6F30,/* 0xA0-0xA7 */
+	0x6F3C,0x6F35,0x6EEB,0x6F07,0x6F0E,0x6F43,0x6F05,0x6EFD,/* 0xA8-0xAF */
+	0x6EF6,0x6F39,0x6F1C,0x6EFC,0x6F3A,0x6F1F,0x6F0D,0x6F1E,/* 0xB0-0xB7 */
+	0x6F08,0x6F21,0x7187,0x7190,0x7189,0x7180,0x7185,0x7182,/* 0xB8-0xBF */
+	0x718F,0x717B,0x7186,0x7181,0x7197,0x7244,0x7253,0x7297,/* 0xC0-0xC7 */
+	0x7295,0x7293,0x7343,0x734D,0x7351,0x734C,0x7462,0x7473,/* 0xC8-0xCF */
+	0x7471,0x7475,0x7472,0x7467,0x746E,0x7500,0x7502,0x7503,/* 0xD0-0xD7 */
+	0x757D,0x7590,0x7616,0x7608,0x760C,0x7615,0x7611,0x760A,/* 0xD8-0xDF */
+	0x7614,0x76B8,0x7781,0x777C,0x7785,0x7782,0x776E,0x7780,/* 0xE0-0xE7 */
+	0x776F,0x777E,0x7783,0x78B2,0x78AA,0x78B4,0x78AD,0x78A8,/* 0xE8-0xEF */
+	0x787E,0x78AB,0x789E,0x78A5,0x78A0,0x78AC,0x78A2,0x78A4,/* 0xF0-0xF7 */
+	0x7998,0x798A,0x798B,0x7996,0x7995,0x7994,0x7993,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7997,0x7988,0x7992,0x7990,0x7A2B,0x7A4A,0x7A30,0x7A2F,/* 0x40-0x47 */
+	0x7A28,0x7A26,0x7AA8,0x7AAB,0x7AAC,0x7AEE,0x7B88,0x7B9C,/* 0x48-0x4F */
+	0x7B8A,0x7B91,0x7B90,0x7B96,0x7B8D,0x7B8C,0x7B9B,0x7B8E,/* 0x50-0x57 */
+	0x7B85,0x7B98,0x5284,0x7B99,0x7BA4,0x7B82,0x7CBB,0x7CBF,/* 0x58-0x5F */
+	0x7CBC,0x7CBA,0x7DA7,0x7DB7,0x7DC2,0x7DA3,0x7DAA,0x7DC1,/* 0x60-0x67 */
+	0x7DC0,0x7DC5,0x7D9D,0x7DCE,0x7DC4,0x7DC6,0x7DCB,0x7DCC,/* 0x68-0x6F */
+	0x7DAF,0x7DB9,0x7D96,0x7DBC,0x7D9F,0x7DA6,0x7DAE,0x7DA9,/* 0x70-0x77 */
+	0x7DA1,0x7DC9,0x7F73,0x7FE2,0x7FE3,0x7FE5,0x7FDE,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8024,0x805D,0x805C,0x8189,0x8186,0x8183,0x8187,/* 0xA0-0xA7 */
+	0x818D,0x818C,0x818B,0x8215,0x8497,0x84A4,0x84A1,0x849F,/* 0xA8-0xAF */
+	0x84BA,0x84CE,0x84C2,0x84AC,0x84AE,0x84AB,0x84B9,0x84B4,/* 0xB0-0xB7 */
+	0x84C1,0x84CD,0x84AA,0x849A,0x84B1,0x84D0,0x849D,0x84A7,/* 0xB8-0xBF */
+	0x84BB,0x84A2,0x8494,0x84C7,0x84CC,0x849B,0x84A9,0x84AF,/* 0xC0-0xC7 */
+	0x84A8,0x84D6,0x8498,0x84B6,0x84CF,0x84A0,0x84D7,0x84D4,/* 0xC8-0xCF */
+	0x84D2,0x84DB,0x84B0,0x8491,0x8661,0x8733,0x8723,0x8728,/* 0xD0-0xD7 */
+	0x876B,0x8740,0x872E,0x871E,0x8721,0x8719,0x871B,0x8743,/* 0xD8-0xDF */
+	0x872C,0x8741,0x873E,0x8746,0x8720,0x8732,0x872A,0x872D,/* 0xE0-0xE7 */
+	0x873C,0x8712,0x873A,0x8731,0x8735,0x8742,0x8726,0x8727,/* 0xE8-0xEF */
+	0x8738,0x8724,0x871A,0x8730,0x8711,0x88F7,0x88E7,0x88F1,/* 0xF0-0xF7 */
+	0x88F2,0x88FA,0x88FE,0x88EE,0x88FC,0x88F6,0x88FB,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x88F0,0x88EC,0x88EB,0x899D,0x89A1,0x899F,0x899E,0x89E9,/* 0x40-0x47 */
+	0x89EB,0x89E8,0x8AAB,0x8A99,0x8A8B,0x8A92,0x8A8F,0x8A96,/* 0x48-0x4F */
+	0x8C3D,0x8C68,0x8C69,0x8CD5,0x8CCF,0x8CD7,0x8D96,0x8E09,/* 0x50-0x57 */
+	0x8E02,0x8DFF,0x8E0D,0x8DFD,0x8E0A,0x8E03,0x8E07,0x8E06,/* 0x58-0x5F */
+	0x8E05,0x8DFE,0x8E00,0x8E04,0x8F10,0x8F11,0x8F0E,0x8F0D,/* 0x60-0x67 */
+	0x9123,0x911C,0x9120,0x9122,0x911F,0x911D,0x911A,0x9124,/* 0x68-0x6F */
+	0x9121,0x911B,0x917A,0x9172,0x9179,0x9173,0x92A5,0x92A4,/* 0x70-0x77 */
+	0x9276,0x929B,0x927A,0x92A0,0x9294,0x92AA,0x928D,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x92A6,0x929A,0x92AB,0x9279,0x9297,0x927F,0x92A3,/* 0xA0-0xA7 */
+	0x92EE,0x928E,0x9282,0x9295,0x92A2,0x927D,0x9288,0x92A1,/* 0xA8-0xAF */
+	0x928A,0x9286,0x928C,0x9299,0x92A7,0x927E,0x9287,0x92A9,/* 0xB0-0xB7 */
+	0x929D,0x928B,0x922D,0x969E,0x96A1,0x96FF,0x9758,0x977D,/* 0xB8-0xBF */
+	0x977A,0x977E,0x9783,0x9780,0x9782,0x977B,0x9784,0x9781,/* 0xC0-0xC7 */
+	0x977F,0x97CE,0x97CD,0x9816,0x98AD,0x98AE,0x9902,0x9900,/* 0xC8-0xCF */
+	0x9907,0x999D,0x999C,0x99C3,0x99B9,0x99BB,0x99BA,0x99C2,/* 0xD0-0xD7 */
+	0x99BD,0x99C7,0x9AB1,0x9AE3,0x9AE7,0x9B3E,0x9B3F,0x9B60,/* 0xD8-0xDF */
+	0x9B61,0x9B5F,0x9CF1,0x9CF2,0x9CF5,0x9EA7,0x50FF,0x5103,/* 0xE0-0xE7 */
+	0x5130,0x50F8,0x5106,0x5107,0x50F6,0x50FE,0x510B,0x510C,/* 0xE8-0xEF */
+	0x50FD,0x510A,0x528B,0x528C,0x52F1,0x52EF,0x5648,0x5642,/* 0xF0-0xF7 */
+	0x564C,0x5635,0x5641,0x564A,0x5649,0x5646,0x5658,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x565A,0x5640,0x5633,0x563D,0x562C,0x563E,0x5638,0x562A,/* 0x40-0x47 */
+	0x563A,0x571A,0x58AB,0x589D,0x58B1,0x58A0,0x58A3,0x58AF,/* 0x48-0x4F */
+	0x58AC,0x58A5,0x58A1,0x58FF,0x5AFF,0x5AF4,0x5AFD,0x5AF7,/* 0x50-0x57 */
+	0x5AF6,0x5B03,0x5AF8,0x5B02,0x5AF9,0x5B01,0x5B07,0x5B05,/* 0x58-0x5F */
+	0x5B0F,0x5C67,0x5D99,0x5D97,0x5D9F,0x5D92,0x5DA2,0x5D93,/* 0x60-0x67 */
+	0x5D95,0x5DA0,0x5D9C,0x5DA1,0x5D9A,0x5D9E,0x5E69,0x5E5D,/* 0x68-0x6F */
+	0x5E60,0x5E5C,0x7DF3,0x5EDB,0x5EDE,0x5EE1,0x5F49,0x5FB2,/* 0x70-0x77 */
+	0x618B,0x6183,0x6179,0x61B1,0x61B0,0x61A2,0x6189,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x619B,0x6193,0x61AF,0x61AD,0x619F,0x6192,0x61AA,/* 0xA0-0xA7 */
+	0x61A1,0x618D,0x6166,0x61B3,0x622D,0x646E,0x6470,0x6496,/* 0xA8-0xAF */
+	0x64A0,0x6485,0x6497,0x649C,0x648F,0x648B,0x648A,0x648C,/* 0xB0-0xB7 */
+	0x64A3,0x649F,0x6468,0x64B1,0x6498,0x6576,0x657A,0x6579,/* 0xB8-0xBF */
+	0x657B,0x65B2,0x65B3,0x66B5,0x66B0,0x66A9,0x66B2,0x66B7,/* 0xC0-0xC7 */
+	0x66AA,0x66AF,0x6A00,0x6A06,0x6A17,0x69E5,0x69F8,0x6A15,/* 0xC8-0xCF */
+	0x69F1,0x69E4,0x6A20,0x69FF,0x69EC,0x69E2,0x6A1B,0x6A1D,/* 0xD0-0xD7 */
+	0x69FE,0x6A27,0x69F2,0x69EE,0x6A14,0x69F7,0x69E7,0x6A40,/* 0xD8-0xDF */
+	0x6A08,0x69E6,0x69FB,0x6A0D,0x69FC,0x69EB,0x6A09,0x6A04,/* 0xE0-0xE7 */
+	0x6A18,0x6A25,0x6A0F,0x69F6,0x6A26,0x6A07,0x69F4,0x6A16,/* 0xE8-0xEF */
+	0x6B51,0x6BA5,0x6BA3,0x6BA2,0x6BA6,0x6C01,0x6C00,0x6BFF,/* 0xF0-0xF7 */
+	0x6C02,0x6F41,0x6F26,0x6F7E,0x6F87,0x6FC6,0x6F92,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6F8D,0x6F89,0x6F8C,0x6F62,0x6F4F,0x6F85,0x6F5A,0x6F96,/* 0x40-0x47 */
+	0x6F76,0x6F6C,0x6F82,0x6F55,0x6F72,0x6F52,0x6F50,0x6F57,/* 0x48-0x4F */
+	0x6F94,0x6F93,0x6F5D,0x6F00,0x6F61,0x6F6B,0x6F7D,0x6F67,/* 0x50-0x57 */
+	0x6F90,0x6F53,0x6F8B,0x6F69,0x6F7F,0x6F95,0x6F63,0x6F77,/* 0x58-0x5F */
+	0x6F6A,0x6F7B,0x71B2,0x71AF,0x719B,0x71B0,0x71A0,0x719A,/* 0x60-0x67 */
+	0x71A9,0x71B5,0x719D,0x71A5,0x719E,0x71A4,0x71A1,0x71AA,/* 0x68-0x6F */
+	0x719C,0x71A7,0x71B3,0x7298,0x729A,0x7358,0x7352,0x735E,/* 0x70-0x77 */
+	0x735F,0x7360,0x735D,0x735B,0x7361,0x735A,0x7359,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7362,0x7487,0x7489,0x748A,0x7486,0x7481,0x747D,/* 0xA0-0xA7 */
+	0x7485,0x7488,0x747C,0x7479,0x7508,0x7507,0x757E,0x7625,/* 0xA8-0xAF */
+	0x761E,0x7619,0x761D,0x761C,0x7623,0x761A,0x7628,0x761B,/* 0xB0-0xB7 */
+	0x769C,0x769D,0x769E,0x769B,0x778D,0x778F,0x7789,0x7788,/* 0xB8-0xBF */
+	0x78CD,0x78BB,0x78CF,0x78CC,0x78D1,0x78CE,0x78D4,0x78C8,/* 0xC0-0xC7 */
+	0x78C3,0x78C4,0x78C9,0x799A,0x79A1,0x79A0,0x799C,0x79A2,/* 0xC8-0xCF */
+	0x799B,0x6B76,0x7A39,0x7AB2,0x7AB4,0x7AB3,0x7BB7,0x7BCB,/* 0xD0-0xD7 */
+	0x7BBE,0x7BAC,0x7BCE,0x7BAF,0x7BB9,0x7BCA,0x7BB5,0x7CC5,/* 0xD8-0xDF */
+	0x7CC8,0x7CCC,0x7CCB,0x7DF7,0x7DDB,0x7DEA,0x7DE7,0x7DD7,/* 0xE0-0xE7 */
+	0x7DE1,0x7E03,0x7DFA,0x7DE6,0x7DF6,0x7DF1,0x7DF0,0x7DEE,/* 0xE8-0xEF */
+	0x7DDF,0x7F76,0x7FAC,0x7FB0,0x7FAD,0x7FED,0x7FEB,0x7FEA,/* 0xF0-0xF7 */
+	0x7FEC,0x7FE6,0x7FE8,0x8064,0x8067,0x81A3,0x819F,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x819E,0x8195,0x81A2,0x8199,0x8197,0x8216,0x824F,0x8253,/* 0x40-0x47 */
+	0x8252,0x8250,0x824E,0x8251,0x8524,0x853B,0x850F,0x8500,/* 0x48-0x4F */
+	0x8529,0x850E,0x8509,0x850D,0x851F,0x850A,0x8527,0x851C,/* 0x50-0x57 */
+	0x84FB,0x852B,0x84FA,0x8508,0x850C,0x84F4,0x852A,0x84F2,/* 0x58-0x5F */
+	0x8515,0x84F7,0x84EB,0x84F3,0x84FC,0x8512,0x84EA,0x84E9,/* 0x60-0x67 */
+	0x8516,0x84FE,0x8528,0x851D,0x852E,0x8502,0x84FD,0x851E,/* 0x68-0x6F */
+	0x84F6,0x8531,0x8526,0x84E7,0x84E8,0x84F0,0x84EF,0x84F9,/* 0x70-0x77 */
+	0x8518,0x8520,0x8530,0x850B,0x8519,0x852F,0x8662,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8756,0x8763,0x8764,0x8777,0x87E1,0x8773,0x8758,/* 0xA0-0xA7 */
+	0x8754,0x875B,0x8752,0x8761,0x875A,0x8751,0x875E,0x876D,/* 0xA8-0xAF */
+	0x876A,0x8750,0x874E,0x875F,0x875D,0x876F,0x876C,0x877A,/* 0xB0-0xB7 */
+	0x876E,0x875C,0x8765,0x874F,0x877B,0x8775,0x8762,0x8767,/* 0xB8-0xBF */
+	0x8769,0x885A,0x8905,0x890C,0x8914,0x890B,0x8917,0x8918,/* 0xC0-0xC7 */
+	0x8919,0x8906,0x8916,0x8911,0x890E,0x8909,0x89A2,0x89A4,/* 0xC8-0xCF */
+	0x89A3,0x89ED,0x89F0,0x89EC,0x8ACF,0x8AC6,0x8AB8,0x8AD3,/* 0xD0-0xD7 */
+	0x8AD1,0x8AD4,0x8AD5,0x8ABB,0x8AD7,0x8ABE,0x8AC0,0x8AC5,/* 0xD8-0xDF */
+	0x8AD8,0x8AC3,0x8ABA,0x8ABD,0x8AD9,0x8C3E,0x8C4D,0x8C8F,/* 0xE0-0xE7 */
+	0x8CE5,0x8CDF,0x8CD9,0x8CE8,0x8CDA,0x8CDD,0x8CE7,0x8DA0,/* 0xE8-0xEF */
+	0x8D9C,0x8DA1,0x8D9B,0x8E20,0x8E23,0x8E25,0x8E24,0x8E2E,/* 0xF0-0xF7 */
+	0x8E15,0x8E1B,0x8E16,0x8E11,0x8E19,0x8E26,0x8E27,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8E14,0x8E12,0x8E18,0x8E13,0x8E1C,0x8E17,0x8E1A,0x8F2C,/* 0x40-0x47 */
+	0x8F24,0x8F18,0x8F1A,0x8F20,0x8F23,0x8F16,0x8F17,0x9073,/* 0x48-0x4F */
+	0x9070,0x906F,0x9067,0x906B,0x912F,0x912B,0x9129,0x912A,/* 0x50-0x57 */
+	0x9132,0x9126,0x912E,0x9185,0x9186,0x918A,0x9181,0x9182,/* 0x58-0x5F */
+	0x9184,0x9180,0x92D0,0x92C3,0x92C4,0x92C0,0x92D9,0x92B6,/* 0x60-0x67 */
+	0x92CF,0x92F1,0x92DF,0x92D8,0x92E9,0x92D7,0x92DD,0x92CC,/* 0x68-0x6F */
+	0x92EF,0x92C2,0x92E8,0x92CA,0x92C8,0x92CE,0x92E6,0x92CD,/* 0x70-0x77 */
+	0x92D5,0x92C9,0x92E0,0x92DE,0x92E7,0x92D1,0x92D3,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x92B5,0x92E1,0x92C6,0x92B4,0x957C,0x95AC,0x95AB,/* 0xA0-0xA7 */
+	0x95AE,0x95B0,0x96A4,0x96A2,0x96D3,0x9705,0x9708,0x9702,/* 0xA8-0xAF */
+	0x975A,0x978A,0x978E,0x9788,0x97D0,0x97CF,0x981E,0x981D,/* 0xB0-0xB7 */
+	0x9826,0x9829,0x9828,0x9820,0x981B,0x9827,0x98B2,0x9908,/* 0xB8-0xBF */
+	0x98FA,0x9911,0x9914,0x9916,0x9917,0x9915,0x99DC,0x99CD,/* 0xC0-0xC7 */
+	0x99CF,0x99D3,0x99D4,0x99CE,0x99C9,0x99D6,0x99D8,0x99CB,/* 0xC8-0xCF */
+	0x99D7,0x99CC,0x9AB3,0x9AEC,0x9AEB,0x9AF3,0x9AF2,0x9AF1,/* 0xD0-0xD7 */
+	0x9B46,0x9B43,0x9B67,0x9B74,0x9B71,0x9B66,0x9B76,0x9B75,/* 0xD8-0xDF */
+	0x9B70,0x9B68,0x9B64,0x9B6C,0x9CFC,0x9CFA,0x9CFD,0x9CFF,/* 0xE0-0xE7 */
+	0x9CF7,0x9D07,0x9D00,0x9CF9,0x9CFB,0x9D08,0x9D05,0x9D04,/* 0xE8-0xEF */
+	0x9E83,0x9ED3,0x9F0F,0x9F10,0x511C,0x5113,0x5117,0x511A,/* 0xF0-0xF7 */
+	0x5111,0x51DE,0x5334,0x53E1,0x5670,0x5660,0x566E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_E9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5673,0x5666,0x5663,0x566D,0x5672,0x565E,0x5677,0x571C,/* 0x40-0x47 */
+	0x571B,0x58C8,0x58BD,0x58C9,0x58BF,0x58BA,0x58C2,0x58BC,/* 0x48-0x4F */
+	0x58C6,0x5B17,0x5B19,0x5B1B,0x5B21,0x5B14,0x5B13,0x5B10,/* 0x50-0x57 */
+	0x5B16,0x5B28,0x5B1A,0x5B20,0x5B1E,0x5BEF,0x5DAC,0x5DB1,/* 0x58-0x5F */
+	0x5DA9,0x5DA7,0x5DB5,0x5DB0,0x5DAE,0x5DAA,0x5DA8,0x5DB2,/* 0x60-0x67 */
+	0x5DAD,0x5DAF,0x5DB4,0x5E67,0x5E68,0x5E66,0x5E6F,0x5EE9,/* 0x68-0x6F */
+	0x5EE7,0x5EE6,0x5EE8,0x5EE5,0x5F4B,0x5FBC,0x619D,0x61A8,/* 0x70-0x77 */
+	0x6196,0x61C5,0x61B4,0x61C6,0x61C1,0x61CC,0x61BA,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x61BF,0x61B8,0x618C,0x64D7,0x64D6,0x64D0,0x64CF,/* 0xA0-0xA7 */
+	0x64C9,0x64BD,0x6489,0x64C3,0x64DB,0x64F3,0x64D9,0x6533,/* 0xA8-0xAF */
+	0x657F,0x657C,0x65A2,0x66C8,0x66BE,0x66C0,0x66CA,0x66CB,/* 0xB0-0xB7 */
+	0x66CF,0x66BD,0x66BB,0x66BA,0x66CC,0x6723,0x6A34,0x6A66,/* 0xB8-0xBF */
+	0x6A49,0x6A67,0x6A32,0x6A68,0x6A3E,0x6A5D,0x6A6D,0x6A76,/* 0xC0-0xC7 */
+	0x6A5B,0x6A51,0x6A28,0x6A5A,0x6A3B,0x6A3F,0x6A41,0x6A6A,/* 0xC8-0xCF */
+	0x6A64,0x6A50,0x6A4F,0x6A54,0x6A6F,0x6A69,0x6A60,0x6A3C,/* 0xD0-0xD7 */
+	0x6A5E,0x6A56,0x6A55,0x6A4D,0x6A4E,0x6A46,0x6B55,0x6B54,/* 0xD8-0xDF */
+	0x6B56,0x6BA7,0x6BAA,0x6BAB,0x6BC8,0x6BC7,0x6C04,0x6C03,/* 0xE0-0xE7 */
+	0x6C06,0x6FAD,0x6FCB,0x6FA3,0x6FC7,0x6FBC,0x6FCE,0x6FC8,/* 0xE8-0xEF */
+	0x6F5E,0x6FC4,0x6FBD,0x6F9E,0x6FCA,0x6FA8,0x7004,0x6FA5,/* 0xF0-0xF7 */
+	0x6FAE,0x6FBA,0x6FAC,0x6FAA,0x6FCF,0x6FBF,0x6FB8,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EA[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6FA2,0x6FC9,0x6FAB,0x6FCD,0x6FAF,0x6FB2,0x6FB0,0x71C5,/* 0x40-0x47 */
+	0x71C2,0x71BF,0x71B8,0x71D6,0x71C0,0x71C1,0x71CB,0x71D4,/* 0x48-0x4F */
+	0x71CA,0x71C7,0x71CF,0x71BD,0x71D8,0x71BC,0x71C6,0x71DA,/* 0x50-0x57 */
+	0x71DB,0x729D,0x729E,0x7369,0x7366,0x7367,0x736C,0x7365,/* 0x58-0x5F */
+	0x736B,0x736A,0x747F,0x749A,0x74A0,0x7494,0x7492,0x7495,/* 0x60-0x67 */
+	0x74A1,0x750B,0x7580,0x762F,0x762D,0x7631,0x763D,0x7633,/* 0x68-0x6F */
+	0x763C,0x7635,0x7632,0x7630,0x76BB,0x76E6,0x779A,0x779D,/* 0x70-0x77 */
+	0x77A1,0x779C,0x779B,0x77A2,0x77A3,0x7795,0x7799,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7797,0x78DD,0x78E9,0x78E5,0x78EA,0x78DE,0x78E3,/* 0xA0-0xA7 */
+	0x78DB,0x78E1,0x78E2,0x78ED,0x78DF,0x78E0,0x79A4,0x7A44,/* 0xA8-0xAF */
+	0x7A48,0x7A47,0x7AB6,0x7AB8,0x7AB5,0x7AB1,0x7AB7,0x7BDE,/* 0xB0-0xB7 */
+	0x7BE3,0x7BE7,0x7BDD,0x7BD5,0x7BE5,0x7BDA,0x7BE8,0x7BF9,/* 0xB8-0xBF */
+	0x7BD4,0x7BEA,0x7BE2,0x7BDC,0x7BEB,0x7BD8,0x7BDF,0x7CD2,/* 0xC0-0xC7 */
+	0x7CD4,0x7CD7,0x7CD0,0x7CD1,0x7E12,0x7E21,0x7E17,0x7E0C,/* 0xC8-0xCF */
+	0x7E1F,0x7E20,0x7E13,0x7E0E,0x7E1C,0x7E15,0x7E1A,0x7E22,/* 0xD0-0xD7 */
+	0x7E0B,0x7E0F,0x7E16,0x7E0D,0x7E14,0x7E25,0x7E24,0x7F43,/* 0xD8-0xDF */
+	0x7F7B,0x7F7C,0x7F7A,0x7FB1,0x7FEF,0x802A,0x8029,0x806C,/* 0xE0-0xE7 */
+	0x81B1,0x81A6,0x81AE,0x81B9,0x81B5,0x81AB,0x81B0,0x81AC,/* 0xE8-0xEF */
+	0x81B4,0x81B2,0x81B7,0x81A7,0x81F2,0x8255,0x8256,0x8257,/* 0xF0-0xF7 */
+	0x8556,0x8545,0x856B,0x854D,0x8553,0x8561,0x8558,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EB[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8540,0x8546,0x8564,0x8541,0x8562,0x8544,0x8551,0x8547,/* 0x40-0x47 */
+	0x8563,0x853E,0x855B,0x8571,0x854E,0x856E,0x8575,0x8555,/* 0x48-0x4F */
+	0x8567,0x8560,0x858C,0x8566,0x855D,0x8554,0x8565,0x856C,/* 0x50-0x57 */
+	0x8663,0x8665,0x8664,0x879B,0x878F,0x8797,0x8793,0x8792,/* 0x58-0x5F */
+	0x8788,0x8781,0x8796,0x8798,0x8779,0x8787,0x87A3,0x8785,/* 0x60-0x67 */
+	0x8790,0x8791,0x879D,0x8784,0x8794,0x879C,0x879A,0x8789,/* 0x68-0x6F */
+	0x891E,0x8926,0x8930,0x892D,0x892E,0x8927,0x8931,0x8922,/* 0x70-0x77 */
+	0x8929,0x8923,0x892F,0x892C,0x891F,0x89F1,0x8AE0,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8AE2,0x8AF2,0x8AF4,0x8AF5,0x8ADD,0x8B14,0x8AE4,/* 0xA0-0xA7 */
+	0x8ADF,0x8AF0,0x8AC8,0x8ADE,0x8AE1,0x8AE8,0x8AFF,0x8AEF,/* 0xA8-0xAF */
+	0x8AFB,0x8C91,0x8C92,0x8C90,0x8CF5,0x8CEE,0x8CF1,0x8CF0,/* 0xB0-0xB7 */
+	0x8CF3,0x8D6C,0x8D6E,0x8DA5,0x8DA7,0x8E33,0x8E3E,0x8E38,/* 0xB8-0xBF */
+	0x8E40,0x8E45,0x8E36,0x8E3C,0x8E3D,0x8E41,0x8E30,0x8E3F,/* 0xC0-0xC7 */
+	0x8EBD,0x8F36,0x8F2E,0x8F35,0x8F32,0x8F39,0x8F37,0x8F34,/* 0xC8-0xCF */
+	0x9076,0x9079,0x907B,0x9086,0x90FA,0x9133,0x9135,0x9136,/* 0xD0-0xD7 */
+	0x9193,0x9190,0x9191,0x918D,0x918F,0x9327,0x931E,0x9308,/* 0xD8-0xDF */
+	0x931F,0x9306,0x930F,0x937A,0x9338,0x933C,0x931B,0x9323,/* 0xE0-0xE7 */
+	0x9312,0x9301,0x9346,0x932D,0x930E,0x930D,0x92CB,0x931D,/* 0xE8-0xEF */
+	0x92FA,0x9325,0x9313,0x92F9,0x92F7,0x9334,0x9302,0x9324,/* 0xF0-0xF7 */
+	0x92FF,0x9329,0x9339,0x9335,0x932A,0x9314,0x930C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EC[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x930B,0x92FE,0x9309,0x9300,0x92FB,0x9316,0x95BC,0x95CD,/* 0x40-0x47 */
+	0x95BE,0x95B9,0x95BA,0x95B6,0x95BF,0x95B5,0x95BD,0x96A9,/* 0x48-0x4F */
+	0x96D4,0x970B,0x9712,0x9710,0x9799,0x9797,0x9794,0x97F0,/* 0x50-0x57 */
+	0x97F8,0x9835,0x982F,0x9832,0x9924,0x991F,0x9927,0x9929,/* 0x58-0x5F */
+	0x999E,0x99EE,0x99EC,0x99E5,0x99E4,0x99F0,0x99E3,0x99EA,/* 0x60-0x67 */
+	0x99E9,0x99E7,0x9AB9,0x9ABF,0x9AB4,0x9ABB,0x9AF6,0x9AFA,/* 0x68-0x6F */
+	0x9AF9,0x9AF7,0x9B33,0x9B80,0x9B85,0x9B87,0x9B7C,0x9B7E,/* 0x70-0x77 */
+	0x9B7B,0x9B82,0x9B93,0x9B92,0x9B90,0x9B7A,0x9B95,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9B7D,0x9B88,0x9D25,0x9D17,0x9D20,0x9D1E,0x9D14,/* 0xA0-0xA7 */
+	0x9D29,0x9D1D,0x9D18,0x9D22,0x9D10,0x9D19,0x9D1F,0x9E88,/* 0xA8-0xAF */
+	0x9E86,0x9E87,0x9EAE,0x9EAD,0x9ED5,0x9ED6,0x9EFA,0x9F12,/* 0xB0-0xB7 */
+	0x9F3D,0x5126,0x5125,0x5122,0x5124,0x5120,0x5129,0x52F4,/* 0xB8-0xBF */
+	0x5693,0x568C,0x568D,0x5686,0x5684,0x5683,0x567E,0x5682,/* 0xC0-0xC7 */
+	0x567F,0x5681,0x58D6,0x58D4,0x58CF,0x58D2,0x5B2D,0x5B25,/* 0xC8-0xCF */
+	0x5B32,0x5B23,0x5B2C,0x5B27,0x5B26,0x5B2F,0x5B2E,0x5B7B,/* 0xD0-0xD7 */
+	0x5BF1,0x5BF2,0x5DB7,0x5E6C,0x5E6A,0x5FBE,0x5FBB,0x61C3,/* 0xD8-0xDF */
+	0x61B5,0x61BC,0x61E7,0x61E0,0x61E5,0x61E4,0x61E8,0x61DE,/* 0xE0-0xE7 */
+	0x64EF,0x64E9,0x64E3,0x64EB,0x64E4,0x64E8,0x6581,0x6580,/* 0xE8-0xEF */
+	0x65B6,0x65DA,0x66D2,0x6A8D,0x6A96,0x6A81,0x6AA5,0x6A89,/* 0xF0-0xF7 */
+	0x6A9F,0x6A9B,0x6AA1,0x6A9E,0x6A87,0x6A93,0x6A8E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_ED[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x6A95,0x6A83,0x6AA8,0x6AA4,0x6A91,0x6A7F,0x6AA6,0x6A9A,/* 0x40-0x47 */
+	0x6A85,0x6A8C,0x6A92,0x6B5B,0x6BAD,0x6C09,0x6FCC,0x6FA9,/* 0x48-0x4F */
+	0x6FF4,0x6FD4,0x6FE3,0x6FDC,0x6FED,0x6FE7,0x6FE6,0x6FDE,/* 0x50-0x57 */
+	0x6FF2,0x6FDD,0x6FE2,0x6FE8,0x71E1,0x71F1,0x71E8,0x71F2,/* 0x58-0x5F */
+	0x71E4,0x71F0,0x71E2,0x7373,0x736E,0x736F,0x7497,0x74B2,/* 0x60-0x67 */
+	0x74AB,0x7490,0x74AA,0x74AD,0x74B1,0x74A5,0x74AF,0x7510,/* 0x68-0x6F */
+	0x7511,0x7512,0x750F,0x7584,0x7643,0x7648,0x7649,0x7647,/* 0x70-0x77 */
+	0x76A4,0x76E9,0x77B5,0x77AB,0x77B2,0x77B7,0x77B6,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x77B4,0x77B1,0x77A8,0x77F0,0x78F3,0x78FD,0x7902,/* 0xA0-0xA7 */
+	0x78FB,0x78FC,0x78F2,0x7905,0x78F9,0x78FE,0x7904,0x79AB,/* 0xA8-0xAF */
+	0x79A8,0x7A5C,0x7A5B,0x7A56,0x7A58,0x7A54,0x7A5A,0x7ABE,/* 0xB0-0xB7 */
+	0x7AC0,0x7AC1,0x7C05,0x7C0F,0x7BF2,0x7C00,0x7BFF,0x7BFB,/* 0xB8-0xBF */
+	0x7C0E,0x7BF4,0x7C0B,0x7BF3,0x7C02,0x7C09,0x7C03,0x7C01,/* 0xC0-0xC7 */
+	0x7BF8,0x7BFD,0x7C06,0x7BF0,0x7BF1,0x7C10,0x7C0A,0x7CE8,/* 0xC8-0xCF */
+	0x7E2D,0x7E3C,0x7E42,0x7E33,0x9848,0x7E38,0x7E2A,0x7E49,/* 0xD0-0xD7 */
+	0x7E40,0x7E47,0x7E29,0x7E4C,0x7E30,0x7E3B,0x7E36,0x7E44,/* 0xD8-0xDF */
+	0x7E3A,0x7F45,0x7F7F,0x7F7E,0x7F7D,0x7FF4,0x7FF2,0x802C,/* 0xE0-0xE7 */
+	0x81BB,0x81C4,0x81CC,0x81CA,0x81C5,0x81C7,0x81BC,0x81E9,/* 0xE8-0xEF */
+	0x825B,0x825A,0x825C,0x8583,0x8580,0x858F,0x85A7,0x8595,/* 0xF0-0xF7 */
+	0x85A0,0x858B,0x85A3,0x857B,0x85A4,0x859A,0x859E,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EE[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8577,0x857C,0x8589,0x85A1,0x857A,0x8578,0x8557,0x858E,/* 0x40-0x47 */
+	0x8596,0x8586,0x858D,0x8599,0x859D,0x8581,0x85A2,0x8582,/* 0x48-0x4F */
+	0x8588,0x8585,0x8579,0x8576,0x8598,0x8590,0x859F,0x8668,/* 0x50-0x57 */
+	0x87BE,0x87AA,0x87AD,0x87C5,0x87B0,0x87AC,0x87B9,0x87B5,/* 0x58-0x5F */
+	0x87BC,0x87AE,0x87C9,0x87C3,0x87C2,0x87CC,0x87B7,0x87AF,/* 0x60-0x67 */
+	0x87C4,0x87CA,0x87B4,0x87B6,0x87BF,0x87B8,0x87BD,0x87DE,/* 0x68-0x6F */
+	0x87B2,0x8935,0x8933,0x893C,0x893E,0x8941,0x8952,0x8937,/* 0x70-0x77 */
+	0x8942,0x89AD,0x89AF,0x89AE,0x89F2,0x89F3,0x8B1E,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x8B18,0x8B16,0x8B11,0x8B05,0x8B0B,0x8B22,0x8B0F,/* 0xA0-0xA7 */
+	0x8B12,0x8B15,0x8B07,0x8B0D,0x8B08,0x8B06,0x8B1C,0x8B13,/* 0xA8-0xAF */
+	0x8B1A,0x8C4F,0x8C70,0x8C72,0x8C71,0x8C6F,0x8C95,0x8C94,/* 0xB0-0xB7 */
+	0x8CF9,0x8D6F,0x8E4E,0x8E4D,0x8E53,0x8E50,0x8E4C,0x8E47,/* 0xB8-0xBF */
+	0x8F43,0x8F40,0x9085,0x907E,0x9138,0x919A,0x91A2,0x919B,/* 0xC0-0xC7 */
+	0x9199,0x919F,0x91A1,0x919D,0x91A0,0x93A1,0x9383,0x93AF,/* 0xC8-0xCF */
+	0x9364,0x9356,0x9347,0x937C,0x9358,0x935C,0x9376,0x9349,/* 0xD0-0xD7 */
+	0x9350,0x9351,0x9360,0x936D,0x938F,0x934C,0x936A,0x9379,/* 0xD8-0xDF */
+	0x9357,0x9355,0x9352,0x934F,0x9371,0x9377,0x937B,0x9361,/* 0xE0-0xE7 */
+	0x935E,0x9363,0x9367,0x9380,0x934E,0x9359,0x95C7,0x95C0,/* 0xE8-0xEF */
+	0x95C9,0x95C3,0x95C5,0x95B7,0x96AE,0x96B0,0x96AC,0x9720,/* 0xF0-0xF7 */
+	0x971F,0x9718,0x971D,0x9719,0x979A,0x97A1,0x979C,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_EF[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x979E,0x979D,0x97D5,0x97D4,0x97F1,0x9841,0x9844,0x984A,/* 0x40-0x47 */
+	0x9849,0x9845,0x9843,0x9925,0x992B,0x992C,0x992A,0x9933,/* 0x48-0x4F */
+	0x9932,0x992F,0x992D,0x9931,0x9930,0x9998,0x99A3,0x99A1,/* 0x50-0x57 */
+	0x9A02,0x99FA,0x99F4,0x99F7,0x99F9,0x99F8,0x99F6,0x99FB,/* 0x58-0x5F */
+	0x99FD,0x99FE,0x99FC,0x9A03,0x9ABE,0x9AFE,0x9AFD,0x9B01,/* 0x60-0x67 */
+	0x9AFC,0x9B48,0x9B9A,0x9BA8,0x9B9E,0x9B9B,0x9BA6,0x9BA1,/* 0x68-0x6F */
+	0x9BA5,0x9BA4,0x9B86,0x9BA2,0x9BA0,0x9BAF,0x9D33,0x9D41,/* 0x70-0x77 */
+	0x9D67,0x9D36,0x9D2E,0x9D2F,0x9D31,0x9D38,0x9D30,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9D45,0x9D42,0x9D43,0x9D3E,0x9D37,0x9D40,0x9D3D,/* 0xA0-0xA7 */
+	0x7FF5,0x9D2D,0x9E8A,0x9E89,0x9E8D,0x9EB0,0x9EC8,0x9EDA,/* 0xA8-0xAF */
+	0x9EFB,0x9EFF,0x9F24,0x9F23,0x9F22,0x9F54,0x9FA0,0x5131,/* 0xB0-0xB7 */
+	0x512D,0x512E,0x5698,0x569C,0x5697,0x569A,0x569D,0x5699,/* 0xB8-0xBF */
+	0x5970,0x5B3C,0x5C69,0x5C6A,0x5DC0,0x5E6D,0x5E6E,0x61D8,/* 0xC0-0xC7 */
+	0x61DF,0x61ED,0x61EE,0x61F1,0x61EA,0x61F0,0x61EB,0x61D6,/* 0xC8-0xCF */
+	0x61E9,0x64FF,0x6504,0x64FD,0x64F8,0x6501,0x6503,0x64FC,/* 0xD0-0xD7 */
+	0x6594,0x65DB,0x66DA,0x66DB,0x66D8,0x6AC5,0x6AB9,0x6ABD,/* 0xD8-0xDF */
+	0x6AE1,0x6AC6,0x6ABA,0x6AB6,0x6AB7,0x6AC7,0x6AB4,0x6AAD,/* 0xE0-0xE7 */
+	0x6B5E,0x6BC9,0x6C0B,0x7007,0x700C,0x700D,0x7001,0x7005,/* 0xE8-0xEF */
+	0x7014,0x700E,0x6FFF,0x7000,0x6FFB,0x7026,0x6FFC,0x6FF7,/* 0xF0-0xF7 */
+	0x700A,0x7201,0x71FF,0x71F9,0x7203,0x71FD,0x7376,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F0[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x74B8,0x74C0,0x74B5,0x74C1,0x74BE,0x74B6,0x74BB,0x74C2,/* 0x40-0x47 */
+	0x7514,0x7513,0x765C,0x7664,0x7659,0x7650,0x7653,0x7657,/* 0x48-0x4F */
+	0x765A,0x76A6,0x76BD,0x76EC,0x77C2,0x77BA,0x78FF,0x790C,/* 0x50-0x57 */
+	0x7913,0x7914,0x7909,0x7910,0x7912,0x7911,0x79AD,0x79AC,/* 0x58-0x5F */
+	0x7A5F,0x7C1C,0x7C29,0x7C19,0x7C20,0x7C1F,0x7C2D,0x7C1D,/* 0x60-0x67 */
+	0x7C26,0x7C28,0x7C22,0x7C25,0x7C30,0x7E5C,0x7E50,0x7E56,/* 0x68-0x6F */
+	0x7E63,0x7E58,0x7E62,0x7E5F,0x7E51,0x7E60,0x7E57,0x7E53,/* 0x70-0x77 */
+	0x7FB5,0x7FB3,0x7FF7,0x7FF8,0x8075,0x81D1,0x81D2,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x81D0,0x825F,0x825E,0x85B4,0x85C6,0x85C0,0x85C3,/* 0xA0-0xA7 */
+	0x85C2,0x85B3,0x85B5,0x85BD,0x85C7,0x85C4,0x85BF,0x85CB,/* 0xA8-0xAF */
+	0x85CE,0x85C8,0x85C5,0x85B1,0x85B6,0x85D2,0x8624,0x85B8,/* 0xB0-0xB7 */
+	0x85B7,0x85BE,0x8669,0x87E7,0x87E6,0x87E2,0x87DB,0x87EB,/* 0xB8-0xBF */
+	0x87EA,0x87E5,0x87DF,0x87F3,0x87E4,0x87D4,0x87DC,0x87D3,/* 0xC0-0xC7 */
+	0x87ED,0x87D8,0x87E3,0x87A4,0x87D7,0x87D9,0x8801,0x87F4,/* 0xC8-0xCF */
+	0x87E8,0x87DD,0x8953,0x894B,0x894F,0x894C,0x8946,0x8950,/* 0xD0-0xD7 */
+	0x8951,0x8949,0x8B2A,0x8B27,0x8B23,0x8B33,0x8B30,0x8B35,/* 0xD8-0xDF */
+	0x8B47,0x8B2F,0x8B3C,0x8B3E,0x8B31,0x8B25,0x8B37,0x8B26,/* 0xE0-0xE7 */
+	0x8B36,0x8B2E,0x8B24,0x8B3B,0x8B3D,0x8B3A,0x8C42,0x8C75,/* 0xE8-0xEF */
+	0x8C99,0x8C98,0x8C97,0x8CFE,0x8D04,0x8D02,0x8D00,0x8E5C,/* 0xF0-0xF7 */
+	0x8E62,0x8E60,0x8E57,0x8E56,0x8E5E,0x8E65,0x8E67,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F1[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8E5B,0x8E5A,0x8E61,0x8E5D,0x8E69,0x8E54,0x8F46,0x8F47,/* 0x40-0x47 */
+	0x8F48,0x8F4B,0x9128,0x913A,0x913B,0x913E,0x91A8,0x91A5,/* 0x48-0x4F */
+	0x91A7,0x91AF,0x91AA,0x93B5,0x938C,0x9392,0x93B7,0x939B,/* 0x50-0x57 */
+	0x939D,0x9389,0x93A7,0x938E,0x93AA,0x939E,0x93A6,0x9395,/* 0x58-0x5F */
+	0x9388,0x9399,0x939F,0x938D,0x93B1,0x9391,0x93B2,0x93A4,/* 0x60-0x67 */
+	0x93A8,0x93B4,0x93A3,0x93A5,0x95D2,0x95D3,0x95D1,0x96B3,/* 0x68-0x6F */
+	0x96D7,0x96DA,0x5DC2,0x96DF,0x96D8,0x96DD,0x9723,0x9722,/* 0x70-0x77 */
+	0x9725,0x97AC,0x97AE,0x97A8,0x97AB,0x97A4,0x97AA,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x97A2,0x97A5,0x97D7,0x97D9,0x97D6,0x97D8,0x97FA,/* 0xA0-0xA7 */
+	0x9850,0x9851,0x9852,0x98B8,0x9941,0x993C,0x993A,0x9A0F,/* 0xA8-0xAF */
+	0x9A0B,0x9A09,0x9A0D,0x9A04,0x9A11,0x9A0A,0x9A05,0x9A07,/* 0xB0-0xB7 */
+	0x9A06,0x9AC0,0x9ADC,0x9B08,0x9B04,0x9B05,0x9B29,0x9B35,/* 0xB8-0xBF */
+	0x9B4A,0x9B4C,0x9B4B,0x9BC7,0x9BC6,0x9BC3,0x9BBF,0x9BC1,/* 0xC0-0xC7 */
+	0x9BB5,0x9BB8,0x9BD3,0x9BB6,0x9BC4,0x9BB9,0x9BBD,0x9D5C,/* 0xC8-0xCF */
+	0x9D53,0x9D4F,0x9D4A,0x9D5B,0x9D4B,0x9D59,0x9D56,0x9D4C,/* 0xD0-0xD7 */
+	0x9D57,0x9D52,0x9D54,0x9D5F,0x9D58,0x9D5A,0x9E8E,0x9E8C,/* 0xD8-0xDF */
+	0x9EDF,0x9F01,0x9F00,0x9F16,0x9F25,0x9F2B,0x9F2A,0x9F29,/* 0xE0-0xE7 */
+	0x9F28,0x9F4C,0x9F55,0x5134,0x5135,0x5296,0x52F7,0x53B4,/* 0xE8-0xEF */
+	0x56AB,0x56AD,0x56A6,0x56A7,0x56AA,0x56AC,0x58DA,0x58DD,/* 0xF0-0xF7 */
+	0x58DB,0x5912,0x5B3D,0x5B3E,0x5B3F,0x5DC3,0x5E70,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F2[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x5FBF,0x61FB,0x6507,0x6510,0x650D,0x6509,0x650C,0x650E,/* 0x40-0x47 */
+	0x6584,0x65DE,0x65DD,0x66DE,0x6AE7,0x6AE0,0x6ACC,0x6AD1,/* 0x48-0x4F */
+	0x6AD9,0x6ACB,0x6ADF,0x6ADC,0x6AD0,0x6AEB,0x6ACF,0x6ACD,/* 0x50-0x57 */
+	0x6ADE,0x6B60,0x6BB0,0x6C0C,0x7019,0x7027,0x7020,0x7016,/* 0x58-0x5F */
+	0x702B,0x7021,0x7022,0x7023,0x7029,0x7017,0x7024,0x701C,/* 0x60-0x67 */
+	0x702A,0x720C,0x720A,0x7207,0x7202,0x7205,0x72A5,0x72A6,/* 0x68-0x6F */
+	0x72A4,0x72A3,0x72A1,0x74CB,0x74C5,0x74B7,0x74C3,0x7516,/* 0x70-0x77 */
+	0x7660,0x77C9,0x77CA,0x77C4,0x77F1,0x791D,0x791B,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x7921,0x791C,0x7917,0x791E,0x79B0,0x7A67,0x7A68,/* 0xA0-0xA7 */
+	0x7C33,0x7C3C,0x7C39,0x7C2C,0x7C3B,0x7CEC,0x7CEA,0x7E76,/* 0xA8-0xAF */
+	0x7E75,0x7E78,0x7E70,0x7E77,0x7E6F,0x7E7A,0x7E72,0x7E74,/* 0xB0-0xB7 */
+	0x7E68,0x7F4B,0x7F4A,0x7F83,0x7F86,0x7FB7,0x7FFD,0x7FFE,/* 0xB8-0xBF */
+	0x8078,0x81D7,0x81D5,0x8264,0x8261,0x8263,0x85EB,0x85F1,/* 0xC0-0xC7 */
+	0x85ED,0x85D9,0x85E1,0x85E8,0x85DA,0x85D7,0x85EC,0x85F2,/* 0xC8-0xCF */
+	0x85F8,0x85D8,0x85DF,0x85E3,0x85DC,0x85D1,0x85F0,0x85E6,/* 0xD0-0xD7 */
+	0x85EF,0x85DE,0x85E2,0x8800,0x87FA,0x8803,0x87F6,0x87F7,/* 0xD8-0xDF */
+	0x8809,0x880C,0x880B,0x8806,0x87FC,0x8808,0x87FF,0x880A,/* 0xE0-0xE7 */
+	0x8802,0x8962,0x895A,0x895B,0x8957,0x8961,0x895C,0x8958,/* 0xE8-0xEF */
+	0x895D,0x8959,0x8988,0x89B7,0x89B6,0x89F6,0x8B50,0x8B48,/* 0xF0-0xF7 */
+	0x8B4A,0x8B40,0x8B53,0x8B56,0x8B54,0x8B4B,0x8B55,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F3[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8B51,0x8B42,0x8B52,0x8B57,0x8C43,0x8C77,0x8C76,0x8C9A,/* 0x40-0x47 */
+	0x8D06,0x8D07,0x8D09,0x8DAC,0x8DAA,0x8DAD,0x8DAB,0x8E6D,/* 0x48-0x4F */
+	0x8E78,0x8E73,0x8E6A,0x8E6F,0x8E7B,0x8EC2,0x8F52,0x8F51,/* 0x50-0x57 */
+	0x8F4F,0x8F50,0x8F53,0x8FB4,0x9140,0x913F,0x91B0,0x91AD,/* 0x58-0x5F */
+	0x93DE,0x93C7,0x93CF,0x93C2,0x93DA,0x93D0,0x93F9,0x93EC,/* 0x60-0x67 */
+	0x93CC,0x93D9,0x93A9,0x93E6,0x93CA,0x93D4,0x93EE,0x93E3,/* 0x68-0x6F */
+	0x93D5,0x93C4,0x93CE,0x93C0,0x93D2,0x93E7,0x957D,0x95DA,/* 0x70-0x77 */
+	0x95DB,0x96E1,0x9729,0x972B,0x972C,0x9728,0x9726,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x97B3,0x97B7,0x97B6,0x97DD,0x97DE,0x97DF,0x985C,/* 0xA0-0xA7 */
+	0x9859,0x985D,0x9857,0x98BF,0x98BD,0x98BB,0x98BE,0x9948,/* 0xA8-0xAF */
+	0x9947,0x9943,0x99A6,0x99A7,0x9A1A,0x9A15,0x9A25,0x9A1D,/* 0xB0-0xB7 */
+	0x9A24,0x9A1B,0x9A22,0x9A20,0x9A27,0x9A23,0x9A1E,0x9A1C,/* 0xB8-0xBF */
+	0x9A14,0x9AC2,0x9B0B,0x9B0A,0x9B0E,0x9B0C,0x9B37,0x9BEA,/* 0xC0-0xC7 */
+	0x9BEB,0x9BE0,0x9BDE,0x9BE4,0x9BE6,0x9BE2,0x9BF0,0x9BD4,/* 0xC8-0xCF */
+	0x9BD7,0x9BEC,0x9BDC,0x9BD9,0x9BE5,0x9BD5,0x9BE1,0x9BDA,/* 0xD0-0xD7 */
+	0x9D77,0x9D81,0x9D8A,0x9D84,0x9D88,0x9D71,0x9D80,0x9D78,/* 0xD8-0xDF */
+	0x9D86,0x9D8B,0x9D8C,0x9D7D,0x9D6B,0x9D74,0x9D75,0x9D70,/* 0xE0-0xE7 */
+	0x9D69,0x9D85,0x9D73,0x9D7B,0x9D82,0x9D6F,0x9D79,0x9D7F,/* 0xE8-0xEF */
+	0x9D87,0x9D68,0x9E94,0x9E91,0x9EC0,0x9EFC,0x9F2D,0x9F40,/* 0xF0-0xF7 */
+	0x9F41,0x9F4D,0x9F56,0x9F57,0x9F58,0x5337,0x56B2,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F4[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x56B5,0x56B3,0x58E3,0x5B45,0x5DC6,0x5DC7,0x5EEE,0x5EEF,/* 0x40-0x47 */
+	0x5FC0,0x5FC1,0x61F9,0x6517,0x6516,0x6515,0x6513,0x65DF,/* 0x48-0x4F */
+	0x66E8,0x66E3,0x66E4,0x6AF3,0x6AF0,0x6AEA,0x6AE8,0x6AF9,/* 0x50-0x57 */
+	0x6AF1,0x6AEE,0x6AEF,0x703C,0x7035,0x702F,0x7037,0x7034,/* 0x58-0x5F */
+	0x7031,0x7042,0x7038,0x703F,0x703A,0x7039,0x7040,0x703B,/* 0x60-0x67 */
+	0x7033,0x7041,0x7213,0x7214,0x72A8,0x737D,0x737C,0x74BA,/* 0x68-0x6F */
+	0x76AB,0x76AA,0x76BE,0x76ED,0x77CC,0x77CE,0x77CF,0x77CD,/* 0x70-0x77 */
+	0x77F2,0x7925,0x7923,0x7927,0x7928,0x7924,0x7929,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x79B2,0x7A6E,0x7A6C,0x7A6D,0x7AF7,0x7C49,0x7C48,/* 0xA0-0xA7 */
+	0x7C4A,0x7C47,0x7C45,0x7CEE,0x7E7B,0x7E7E,0x7E81,0x7E80,/* 0xA8-0xAF */
+	0x7FBA,0x7FFF,0x8079,0x81DB,0x81D9,0x820B,0x8268,0x8269,/* 0xB0-0xB7 */
+	0x8622,0x85FF,0x8601,0x85FE,0x861B,0x8600,0x85F6,0x8604,/* 0xB8-0xBF */
+	0x8609,0x8605,0x860C,0x85FD,0x8819,0x8810,0x8811,0x8817,/* 0xC0-0xC7 */
+	0x8813,0x8816,0x8963,0x8966,0x89B9,0x89F7,0x8B60,0x8B6A,/* 0xC8-0xCF */
+	0x8B5D,0x8B68,0x8B63,0x8B65,0x8B67,0x8B6D,0x8DAE,0x8E86,/* 0xD0-0xD7 */
+	0x8E88,0x8E84,0x8F59,0x8F56,0x8F57,0x8F55,0x8F58,0x8F5A,/* 0xD8-0xDF */
+	0x908D,0x9143,0x9141,0x91B7,0x91B5,0x91B2,0x91B3,0x940B,/* 0xE0-0xE7 */
+	0x9413,0x93FB,0x9420,0x940F,0x9414,0x93FE,0x9415,0x9410,/* 0xE8-0xEF */
+	0x9428,0x9419,0x940D,0x93F5,0x9400,0x93F7,0x9407,0x940E,/* 0xF0-0xF7 */
+	0x9416,0x9412,0x93FA,0x9409,0x93F8,0x940A,0x93FF,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F5[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x93FC,0x940C,0x93F6,0x9411,0x9406,0x95DE,0x95E0,0x95DF,/* 0x40-0x47 */
+	0x972E,0x972F,0x97B9,0x97BB,0x97FD,0x97FE,0x9860,0x9862,/* 0x48-0x4F */
+	0x9863,0x985F,0x98C1,0x98C2,0x9950,0x994E,0x9959,0x994C,/* 0x50-0x57 */
+	0x994B,0x9953,0x9A32,0x9A34,0x9A31,0x9A2C,0x9A2A,0x9A36,/* 0x58-0x5F */
+	0x9A29,0x9A2E,0x9A38,0x9A2D,0x9AC7,0x9ACA,0x9AC6,0x9B10,/* 0x60-0x67 */
+	0x9B12,0x9B11,0x9C0B,0x9C08,0x9BF7,0x9C05,0x9C12,0x9BF8,/* 0x68-0x6F */
+	0x9C40,0x9C07,0x9C0E,0x9C06,0x9C17,0x9C14,0x9C09,0x9D9F,/* 0x70-0x77 */
+	0x9D99,0x9DA4,0x9D9D,0x9D92,0x9D98,0x9D90,0x9D9B,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9DA0,0x9D94,0x9D9C,0x9DAA,0x9D97,0x9DA1,0x9D9A,/* 0xA0-0xA7 */
+	0x9DA2,0x9DA8,0x9D9E,0x9DA3,0x9DBF,0x9DA9,0x9D96,0x9DA6,/* 0xA8-0xAF */
+	0x9DA7,0x9E99,0x9E9B,0x9E9A,0x9EE5,0x9EE4,0x9EE7,0x9EE6,/* 0xB0-0xB7 */
+	0x9F30,0x9F2E,0x9F5B,0x9F60,0x9F5E,0x9F5D,0x9F59,0x9F91,/* 0xB8-0xBF */
+	0x513A,0x5139,0x5298,0x5297,0x56C3,0x56BD,0x56BE,0x5B48,/* 0xC0-0xC7 */
+	0x5B47,0x5DCB,0x5DCF,0x5EF1,0x61FD,0x651B,0x6B02,0x6AFC,/* 0xC8-0xCF */
+	0x6B03,0x6AF8,0x6B00,0x7043,0x7044,0x704A,0x7048,0x7049,/* 0xD0-0xD7 */
+	0x7045,0x7046,0x721D,0x721A,0x7219,0x737E,0x7517,0x766A,/* 0xD8-0xDF */
+	0x77D0,0x792D,0x7931,0x792F,0x7C54,0x7C53,0x7CF2,0x7E8A,/* 0xE0-0xE7 */
+	0x7E87,0x7E88,0x7E8B,0x7E86,0x7E8D,0x7F4D,0x7FBB,0x8030,/* 0xE8-0xEF */
+	0x81DD,0x8618,0x862A,0x8626,0x861F,0x8623,0x861C,0x8619,/* 0xF0-0xF7 */
+	0x8627,0x862E,0x8621,0x8620,0x8629,0x861E,0x8625,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F6[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8829,0x881D,0x881B,0x8820,0x8824,0x881C,0x882B,0x884A,/* 0x40-0x47 */
+	0x896D,0x8969,0x896E,0x896B,0x89FA,0x8B79,0x8B78,0x8B45,/* 0x48-0x4F */
+	0x8B7A,0x8B7B,0x8D10,0x8D14,0x8DAF,0x8E8E,0x8E8C,0x8F5E,/* 0x50-0x57 */
+	0x8F5B,0x8F5D,0x9146,0x9144,0x9145,0x91B9,0x943F,0x943B,/* 0x58-0x5F */
+	0x9436,0x9429,0x943D,0x943C,0x9430,0x9439,0x942A,0x9437,/* 0x60-0x67 */
+	0x942C,0x9440,0x9431,0x95E5,0x95E4,0x95E3,0x9735,0x973A,/* 0x68-0x6F */
+	0x97BF,0x97E1,0x9864,0x98C9,0x98C6,0x98C0,0x9958,0x9956,/* 0x70-0x77 */
+	0x9A39,0x9A3D,0x9A46,0x9A44,0x9A42,0x9A41,0x9A3A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9A3F,0x9ACD,0x9B15,0x9B17,0x9B18,0x9B16,0x9B3A,/* 0xA0-0xA7 */
+	0x9B52,0x9C2B,0x9C1D,0x9C1C,0x9C2C,0x9C23,0x9C28,0x9C29,/* 0xA8-0xAF */
+	0x9C24,0x9C21,0x9DB7,0x9DB6,0x9DBC,0x9DC1,0x9DC7,0x9DCA,/* 0xB0-0xB7 */
+	0x9DCF,0x9DBE,0x9DC5,0x9DC3,0x9DBB,0x9DB5,0x9DCE,0x9DB9,/* 0xB8-0xBF */
+	0x9DBA,0x9DAC,0x9DC8,0x9DB1,0x9DAD,0x9DCC,0x9DB3,0x9DCD,/* 0xC0-0xC7 */
+	0x9DB2,0x9E7A,0x9E9C,0x9EEB,0x9EEE,0x9EED,0x9F1B,0x9F18,/* 0xC8-0xCF */
+	0x9F1A,0x9F31,0x9F4E,0x9F65,0x9F64,0x9F92,0x4EB9,0x56C6,/* 0xD0-0xD7 */
+	0x56C5,0x56CB,0x5971,0x5B4B,0x5B4C,0x5DD5,0x5DD1,0x5EF2,/* 0xD8-0xDF */
+	0x6521,0x6520,0x6526,0x6522,0x6B0B,0x6B08,0x6B09,0x6C0D,/* 0xE0-0xE7 */
+	0x7055,0x7056,0x7057,0x7052,0x721E,0x721F,0x72A9,0x737F,/* 0xE8-0xEF */
+	0x74D8,0x74D5,0x74D9,0x74D7,0x766D,0x76AD,0x7935,0x79B4,/* 0xF0-0xF7 */
+	0x7A70,0x7A71,0x7C57,0x7C5C,0x7C59,0x7C5B,0x7C5A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F7[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7CF4,0x7CF1,0x7E91,0x7F4F,0x7F87,0x81DE,0x826B,0x8634,/* 0x40-0x47 */
+	0x8635,0x8633,0x862C,0x8632,0x8636,0x882C,0x8828,0x8826,/* 0x48-0x4F */
+	0x882A,0x8825,0x8971,0x89BF,0x89BE,0x89FB,0x8B7E,0x8B84,/* 0x50-0x57 */
+	0x8B82,0x8B86,0x8B85,0x8B7F,0x8D15,0x8E95,0x8E94,0x8E9A,/* 0x58-0x5F */
+	0x8E92,0x8E90,0x8E96,0x8E97,0x8F60,0x8F62,0x9147,0x944C,/* 0x60-0x67 */
+	0x9450,0x944A,0x944B,0x944F,0x9447,0x9445,0x9448,0x9449,/* 0x68-0x6F */
+	0x9446,0x973F,0x97E3,0x986A,0x9869,0x98CB,0x9954,0x995B,/* 0x70-0x77 */
+	0x9A4E,0x9A53,0x9A54,0x9A4C,0x9A4F,0x9A48,0x9A4A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9A49,0x9A52,0x9A50,0x9AD0,0x9B19,0x9B2B,0x9B3B,/* 0xA0-0xA7 */
+	0x9B56,0x9B55,0x9C46,0x9C48,0x9C3F,0x9C44,0x9C39,0x9C33,/* 0xA8-0xAF */
+	0x9C41,0x9C3C,0x9C37,0x9C34,0x9C32,0x9C3D,0x9C36,0x9DDB,/* 0xB0-0xB7 */
+	0x9DD2,0x9DDE,0x9DDA,0x9DCB,0x9DD0,0x9DDC,0x9DD1,0x9DDF,/* 0xB8-0xBF */
+	0x9DE9,0x9DD9,0x9DD8,0x9DD6,0x9DF5,0x9DD5,0x9DDD,0x9EB6,/* 0xC0-0xC7 */
+	0x9EF0,0x9F35,0x9F33,0x9F32,0x9F42,0x9F6B,0x9F95,0x9FA2,/* 0xC8-0xCF */
+	0x513D,0x5299,0x58E8,0x58E7,0x5972,0x5B4D,0x5DD8,0x882F,/* 0xD0-0xD7 */
+	0x5F4F,0x6201,0x6203,0x6204,0x6529,0x6525,0x6596,0x66EB,/* 0xD8-0xDF */
+	0x6B11,0x6B12,0x6B0F,0x6BCA,0x705B,0x705A,0x7222,0x7382,/* 0xE0-0xE7 */
+	0x7381,0x7383,0x7670,0x77D4,0x7C67,0x7C66,0x7E95,0x826C,/* 0xE8-0xEF */
+	0x863A,0x8640,0x8639,0x863C,0x8631,0x863B,0x863E,0x8830,/* 0xF0-0xF7 */
+	0x8832,0x882E,0x8833,0x8976,0x8974,0x8973,0x89FE,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F8[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x8B8C,0x8B8E,0x8B8B,0x8B88,0x8C45,0x8D19,0x8E98,0x8F64,/* 0x40-0x47 */
+	0x8F63,0x91BC,0x9462,0x9455,0x945D,0x9457,0x945E,0x97C4,/* 0x48-0x4F */
+	0x97C5,0x9800,0x9A56,0x9A59,0x9B1E,0x9B1F,0x9B20,0x9C52,/* 0x50-0x57 */
+	0x9C58,0x9C50,0x9C4A,0x9C4D,0x9C4B,0x9C55,0x9C59,0x9C4C,/* 0x58-0x5F */
+	0x9C4E,0x9DFB,0x9DF7,0x9DEF,0x9DE3,0x9DEB,0x9DF8,0x9DE4,/* 0x60-0x67 */
+	0x9DF6,0x9DE1,0x9DEE,0x9DE6,0x9DF2,0x9DF0,0x9DE2,0x9DEC,/* 0x68-0x6F */
+	0x9DF4,0x9DF3,0x9DE8,0x9DED,0x9EC2,0x9ED0,0x9EF2,0x9EF3,/* 0x70-0x77 */
+	0x9F06,0x9F1C,0x9F38,0x9F37,0x9F36,0x9F43,0x9F4F,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9F71,0x9F70,0x9F6E,0x9F6F,0x56D3,0x56CD,0x5B4E,/* 0xA0-0xA7 */
+	0x5C6D,0x652D,0x66ED,0x66EE,0x6B13,0x705F,0x7061,0x705D,/* 0xA8-0xAF */
+	0x7060,0x7223,0x74DB,0x74E5,0x77D5,0x7938,0x79B7,0x79B6,/* 0xB0-0xB7 */
+	0x7C6A,0x7E97,0x7F89,0x826D,0x8643,0x8838,0x8837,0x8835,/* 0xB8-0xBF */
+	0x884B,0x8B94,0x8B95,0x8E9E,0x8E9F,0x8EA0,0x8E9D,0x91BE,/* 0xC0-0xC7 */
+	0x91BD,0x91C2,0x946B,0x9468,0x9469,0x96E5,0x9746,0x9743,/* 0xC8-0xCF */
+	0x9747,0x97C7,0x97E5,0x9A5E,0x9AD5,0x9B59,0x9C63,0x9C67,/* 0xD0-0xD7 */
+	0x9C66,0x9C62,0x9C5E,0x9C60,0x9E02,0x9DFE,0x9E07,0x9E03,/* 0xD8-0xDF */
+	0x9E06,0x9E05,0x9E00,0x9E01,0x9E09,0x9DFF,0x9DFD,0x9E04,/* 0xE0-0xE7 */
+	0x9EA0,0x9F1E,0x9F46,0x9F74,0x9F75,0x9F76,0x56D4,0x652E,/* 0xE8-0xEF */
+	0x65B8,0x6B18,0x6B19,0x6B17,0x6B1A,0x7062,0x7226,0x72AA,/* 0xF0-0xF7 */
+	0x77D8,0x77D9,0x7939,0x7C69,0x7C6B,0x7CF6,0x7E9A,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t c2u_F9[256] = {
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x00-0x07 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x08-0x0F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x10-0x17 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x18-0x1F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x20-0x27 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x28-0x2F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x30-0x37 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x38-0x3F */
+	0x7E98,0x7E9B,0x7E99,0x81E0,0x81E1,0x8646,0x8647,0x8648,/* 0x40-0x47 */
+	0x8979,0x897A,0x897C,0x897B,0x89FF,0x8B98,0x8B99,0x8EA5,/* 0x48-0x4F */
+	0x8EA4,0x8EA3,0x946E,0x946D,0x946F,0x9471,0x9473,0x9749,/* 0x50-0x57 */
+	0x9872,0x995F,0x9C68,0x9C6E,0x9C6D,0x9E0B,0x9E0D,0x9E10,/* 0x58-0x5F */
+	0x9E0F,0x9E12,0x9E11,0x9EA1,0x9EF5,0x9F09,0x9F47,0x9F78,/* 0x60-0x67 */
+	0x9F7B,0x9F7A,0x9F79,0x571E,0x7066,0x7C6F,0x883C,0x8DB2,/* 0x68-0x6F */
+	0x8EA6,0x91C3,0x9474,0x9478,0x9476,0x9475,0x9A60,0x9C74,/* 0x70-0x77 */
+	0x9C73,0x9C71,0x9C75,0x9E14,0x9E13,0x9EF6,0x9F0A,0x0000,/* 0x78-0x7F */
+
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x80-0x87 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x88-0x8F */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x90-0x97 */
+	0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,/* 0x98-0x9F */
+	0x0000,0x9FA4,0x7068,0x7065,0x7CF7,0x866A,0x883E,0x883D,/* 0xA0-0xA7 */
+	0x883F,0x8B9E,0x8C9C,0x8EA9,0x8EC9,0x974B,0x9873,0x9874,/* 0xA8-0xAF */
+	0x98CC,0x9961,0x99AB,0x9A64,0x9A66,0x9A67,0x9B24,0x9E15,/* 0xB0-0xB7 */
+	0x9E17,0x9F48,0x6207,0x6B1E,0x7227,0x864C,0x8EA8,0x9482,/* 0xB8-0xBF */
+	0x9480,0x9481,0x9A69,0x9A68,0x9B2E,0x9E19,0x7229,0x864B,/* 0xC0-0xC7 */
+	0x8B9F,0x9483,0x9C79,0x9EB7,0x7675,0x9A6B,0x9C7A,0x9E1D,/* 0xC8-0xCF */
+	0x7069,0x706A,0x9EA4,0x9F7E,0x9F49,0x9F98,0x7881,0x92B9,/* 0xD0-0xD7 */
+	0x88CF,0x58BB,0x6052,0x7CA7,0x5AFA,0x2554,0x2566,0x2557,/* 0xD8-0xDF */
+	0x2560,0x256C,0x2563,0x255A,0x2569,0x255D,0x2552,0x2564,/* 0xE0-0xE7 */
+	0x2555,0x255E,0x256A,0x2561,0x2558,0x2567,0x255B,0x2553,/* 0xE8-0xEF */
+	0x2565,0x2556,0x255F,0x256B,0x2562,0x2559,0x2568,0x255C,/* 0xF0-0xF7 */
+	0x2551,0x2550,0x256D,0x256E,0x2570,0x256F,0x2593,0x0000,/* 0xF8-0xFF */
+};
+
+static wchar_t *page_charset2uni[256] = {
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   c2u_A1, c2u_A2, c2u_A3, c2u_A4, c2u_A5, c2u_A6, c2u_A7, 
+	c2u_A8, c2u_A9, c2u_AA, c2u_AB, c2u_AC, c2u_AD, c2u_AE, c2u_AF, 
+	c2u_B0, c2u_B1, c2u_B2, c2u_B3, c2u_B4, c2u_B5, c2u_B6, c2u_B7, 
+	c2u_B8, c2u_B9, c2u_BA, c2u_BB, c2u_BC, c2u_BD, c2u_BE, c2u_BF, 
+	c2u_C0, c2u_C1, c2u_C2, c2u_C3, c2u_C4, c2u_C5, c2u_C6, NULL,   
+	NULL,   c2u_C9, c2u_CA, c2u_CB, c2u_CC, c2u_CD, c2u_CE, c2u_CF, 
+	c2u_D0, c2u_D1, c2u_D2, c2u_D3, c2u_D4, c2u_D5, c2u_D6, c2u_D7, 
+	c2u_D8, c2u_D9, c2u_DA, c2u_DB, c2u_DC, c2u_DD, c2u_DE, c2u_DF, 
+	c2u_E0, c2u_E1, c2u_E2, c2u_E3, c2u_E4, c2u_E5, c2u_E6, c2u_E7, 
+	c2u_E8, c2u_E9, c2u_EA, c2u_EB, c2u_EC, c2u_ED, c2u_EE, c2u_EF, 
+	c2u_F0, c2u_F1, c2u_F2, c2u_F3, c2u_F4, c2u_F5, c2u_F6, c2u_F7, 
+	c2u_F8, c2u_F9, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char u2c_02[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0xBE, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xA3, 0xBC, 0xA3, 0xBD, 0xA3, 0xBF, /* 0xC8-0xCB */
+	0x00, 0x00, 0xA1, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xA3, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+};
+
+static unsigned char u2c_03[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA3, 0x44, 0xA3, 0x45, 0xA3, 0x46, /* 0x90-0x93 */
+	0xA3, 0x47, 0xA3, 0x48, 0xA3, 0x49, 0xA3, 0x4A, /* 0x94-0x97 */
+	0xA3, 0x4B, 0xA3, 0x4C, 0xA3, 0x4D, 0xA3, 0x4E, /* 0x98-0x9B */
+	0xA3, 0x4F, 0xA3, 0x50, 0xA3, 0x51, 0xA3, 0x52, /* 0x9C-0x9F */
+	0xA3, 0x53, 0xA3, 0x54, 0x00, 0x00, 0xA3, 0x55, /* 0xA0-0xA3 */
+	0xA3, 0x56, 0xA3, 0x57, 0xA3, 0x58, 0xA3, 0x59, /* 0xA4-0xA7 */
+	0xA3, 0x5A, 0xA3, 0x5B, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xA3, 0x5C, 0xA3, 0x5D, 0xA3, 0x5E, /* 0xB0-0xB3 */
+	0xA3, 0x5F, 0xA3, 0x60, 0xA3, 0x61, 0xA3, 0x62, /* 0xB4-0xB7 */
+	0xA3, 0x63, 0xA3, 0x64, 0xA3, 0x65, 0xA3, 0x66, /* 0xB8-0xBB */
+	0xA3, 0x67, 0xA3, 0x68, 0xA3, 0x69, 0xA3, 0x6A, /* 0xBC-0xBF */
+	0xA3, 0x6B, 0xA3, 0x6C, 0x00, 0x00, 0xA3, 0x6D, /* 0xC0-0xC3 */
+	0xA3, 0x6E, 0xA3, 0x6F, 0xA3, 0x70, 0xA3, 0x71, /* 0xC4-0xC7 */
+	0xA3, 0x72, 0xA3, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+};
+
+static unsigned char u2c_20[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0x56, /* 0x10-0x13 */
+	0xA1, 0x58, 0xA2, 0x77, 0xA1, 0xFC, 0x00, 0x00, /* 0x14-0x17 */
+	0xA1, 0xA5, 0xA1, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA1, 0xA7, 0xA1, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0x45, 0x00, 0x00, /* 0x20-0x23 */
+	0xA3, 0xBB, 0xA1, 0x4C, 0xA1, 0x4B, 0xA1, 0x45, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xAC, 0xA1, 0xB2, /* 0x30-0x33 */
+	0x00, 0x00, 0xA1, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xB0, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xC3, 0x00, 0x00, /* 0x3C-0x3F */
+};
+
+static unsigned char u2c_21[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA2, 0x4A, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xC1, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xA2, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA2, 0xB9, 0xA2, 0xBA, 0xA2, 0xBB, 0xA2, 0xBC, /* 0x60-0x63 */
+	0xA2, 0xBD, 0xA2, 0xBE, 0xA2, 0xBF, 0xA2, 0xC0, /* 0x64-0x67 */
+	0xA2, 0xC1, 0xA2, 0xC2, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xA1, 0xF6, 0xA1, 0xF4, 0xA1, 0xF7, 0xA1, 0xF5, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xF8, 0xA1, 0xF9, /* 0x94-0x97 */
+	0xA1, 0xFB, 0xA1, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+};
+
+static unsigned char u2c_22[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xA2, 0x41, 0xA2, 0x42, 0x00, 0x00, /* 0x14-0x17 */
+	0xA2, 0x58, 0x00, 0x00, 0xA1, 0xD4, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xDB, 0xA1, 0xE8, /* 0x1C-0x1F */
+	0xA1, 0xE7, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xFD, /* 0x20-0x23 */
+	0x00, 0x00, 0xA1, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xA1, 0xE4, 0xA1, 0xE5, 0xA1, 0xEC, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xED, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xA1, 0xEF, 0xA1, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xDC, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA1, 0xDA, 0xA1, 0xDD, 0x00, 0x00, 0xA1, 0xDD, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xD8, 0xA1, 0xD9, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xA1, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xA1, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xA1, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xE9, /* 0xBC-0xBF */
+};
+
+static unsigned char u2c_23[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0x5B, /* 0x04-0x07 */
+};
+
+static unsigned char u2c_25[512] = {
+	0xA2, 0x77, 0x00, 0x00, 0xA2, 0x78, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xA2, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xA2, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xA2, 0x7C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xA2, 0x7D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA2, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xA2, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xA2, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xA2, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xA2, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF9, 0xF9, 0xF9, 0xF8, 0xF9, 0xE6, 0xF9, 0xEF, /* 0x50-0x53 */
+	0xF9, 0xDD, 0xF9, 0xE8, 0xF9, 0xF1, 0xF9, 0xDF, /* 0x54-0x57 */
+	0xF9, 0xEC, 0xF9, 0xF5, 0xF9, 0xE3, 0xF9, 0xEE, /* 0x58-0x5B */
+	0xF9, 0xF7, 0xF9, 0xE5, 0xF9, 0xE9, 0xF9, 0xF2, /* 0x5C-0x5F */
+	0xF9, 0xE0, 0xF9, 0xEB, 0xF9, 0xF4, 0xF9, 0xE2, /* 0x60-0x63 */
+	0xF9, 0xE7, 0xF9, 0xF0, 0xF9, 0xDE, 0xF9, 0xED, /* 0x64-0x67 */
+	0xF9, 0xF6, 0xF9, 0xE4, 0xF9, 0xEA, 0xF9, 0xF3, /* 0x68-0x6B */
+	0xF9, 0xE1, 0xA2, 0x7E, 0xA2, 0xA1, 0xA2, 0xA3, /* 0x6C-0x6F */
+	0xA2, 0xA2, 0xA2, 0xAC, 0xA2, 0xAD, 0xA2, 0xAE, /* 0x70-0x73 */
+	0xA1, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xA2, 0x62, 0xA2, 0x63, 0xA2, 0x64, /* 0x80-0x83 */
+	0xA2, 0x65, 0xA2, 0x66, 0xA2, 0x67, 0xA2, 0x68, /* 0x84-0x87 */
+	0xA2, 0x69, 0xA2, 0x70, 0xA2, 0x6F, 0xA2, 0x6E, /* 0x88-0x8B */
+	0xA2, 0x6D, 0xA2, 0x6C, 0xA2, 0x6B, 0xA2, 0x6A, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xFE, /* 0x90-0x93 */
+	0xA2, 0x76, 0xA2, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xA1, 0xBD, 0xA1, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xB6, 0xA1, 0xB5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xA1, 0xBF, 0xA1, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xBB, 0xA1, 0xBA, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xB3, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xA1, 0xB7, 0xA1, 0xB4, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0xA8, 0xA2, 0xA9, /* 0xE0-0xE3 */
+	0xA2, 0xAB, 0xA2, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char u2c_26[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA1, 0xB9, 0xA1, 0xB8, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xA1, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xA1, 0xF0, 0xA1, 0xF2, 0xA1, 0xF1, 0x00, 0x00, /* 0x40-0x43 */
+};
+
+static unsigned char u2c_30[512] = {
+	0xA1, 0x40, 0xA1, 0x42, 0xA1, 0x43, 0xA1, 0xB2, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xA1, 0x71, 0xA1, 0x72, 0xA1, 0x6D, 0xA1, 0x6E, /* 0x08-0x0B */
+	0xA1, 0x75, 0xA1, 0x76, 0xA1, 0x79, 0xA1, 0x7A, /* 0x0C-0x0F */
+	0xA1, 0x69, 0xA1, 0x6A, 0xA2, 0x45, 0x00, 0x00, /* 0x10-0x13 */
+	0xA1, 0x65, 0xA1, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xA1, 0xE3, 0xA1, 0xA9, 0xA1, 0xAA, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xA2, 0xC3, 0xA2, 0xC4, 0xA2, 0xC5, /* 0x20-0x23 */
+	0xA2, 0xC6, 0xA2, 0xC7, 0xA2, 0xC8, 0xA2, 0xC9, /* 0x24-0x27 */
+	0xA2, 0xCA, 0xA2, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA1, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+};
+
+static unsigned char u2c_31[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA3, 0x74, 0xA3, 0x75, 0xA3, 0x76, /* 0x04-0x07 */
+	0xA3, 0x77, 0xA3, 0x78, 0xA3, 0x79, 0xA3, 0x7A, /* 0x08-0x0B */
+	0xA3, 0x7B, 0xA3, 0x7C, 0xA3, 0x7D, 0xA3, 0x7E, /* 0x0C-0x0F */
+	0xA3, 0xA1, 0xA3, 0xA2, 0xA3, 0xA3, 0xA3, 0xA4, /* 0x10-0x13 */
+	0xA3, 0xA5, 0xA3, 0xA6, 0xA3, 0xA7, 0xA3, 0xA8, /* 0x14-0x17 */
+	0xA3, 0xA9, 0xA3, 0xAA, 0xA3, 0xAB, 0xA3, 0xAC, /* 0x18-0x1B */
+	0xA3, 0xAD, 0xA3, 0xAE, 0xA3, 0xAF, 0xA3, 0xB0, /* 0x1C-0x1F */
+	0xA3, 0xB1, 0xA3, 0xB2, 0xA3, 0xB3, 0xA3, 0xB4, /* 0x20-0x23 */
+	0xA3, 0xB5, 0xA3, 0xB6, 0xA3, 0xB7, 0xA3, 0xB8, /* 0x24-0x27 */
+	0xA3, 0xB9, 0xA3, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0x40, 0xA4, 0x47, /* 0x90-0x93 */
+	0xA4, 0x54, 0xA5, 0x7C, 0xA4, 0x57, 0xA4, 0xA4, /* 0x94-0x97 */
+	0xA4, 0x55, 0xA5, 0xD2, 0xA4, 0x41, 0xA4, 0xFE, /* 0x98-0x9B */
+	0xA4, 0x42, 0xA4, 0xD1, 0xA6, 0x61, 0xA4, 0x48, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_32[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA4, 0x40, 0xA4, 0x47, 0xA4, 0x54, 0xA5, 0x7C, /* 0x20-0x23 */
+	0xA4, 0xAD, 0xA4, 0xBB, 0xA4, 0x43, 0xA4, 0x4B, /* 0x24-0x27 */
+	0xA4, 0x45, 0xA4, 0x51, 0xA4, 0xEB, 0xA4, 0xF5, /* 0x28-0x2B */
+	0xA4, 0xF4, 0xA4, 0xEC, 0xAA, 0xF7, 0xA4, 0x67, /* 0x2C-0x2F */
+	0xA4, 0xE9, 0xAE, 0xE8, 0xA6, 0xB3, 0xAA, 0xC0, /* 0x30-0x33 */
+	0xA6, 0x57, 0xAF, 0x53, 0xB0, 0x5D, 0xAF, 0xAC, /* 0x34-0x37 */
+	0xB3, 0xD2, 0xA5, 0x4E, 0xA9, 0x49, 0xBE, 0xC7, /* 0x38-0x3B */
+	0xBA, 0xCA, 0xA5, 0xF8, 0xB8, 0xEA, 0xA8, 0xF3, /* 0x3C-0x3F */
+	0xB2, 0xBD, 0xA5, 0xF0, 0xA6, 0xDB, 0xA6, 0xDC, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xA4, 0x40, 0xA4, 0x47, 0xA4, 0x54, 0xA5, 0x7C, /* 0x80-0x83 */
+	0xA4, 0xAD, 0xA4, 0xBB, 0xA4, 0x43, 0xA4, 0x4B, /* 0x84-0x87 */
+	0xA4, 0x45, 0xA4, 0x51, 0xA4, 0xEB, 0xA4, 0xF5, /* 0x88-0x8B */
+	0xA4, 0xF4, 0xA4, 0xEC, 0xAA, 0xF7, 0xA4, 0x67, /* 0x8C-0x8F */
+	0xA4, 0xE9, 0xAE, 0xE8, 0xA6, 0xB3, 0xAA, 0xC0, /* 0x90-0x93 */
+	0xA6, 0x57, 0xAF, 0x53, 0xB0, 0x5D, 0xAF, 0xAC, /* 0x94-0x97 */
+	0xB3, 0xD2, 0xAF, 0xB5, 0xA8, 0x6B, 0xA4, 0x6B, /* 0x98-0x9B */
+	0xBE, 0x41, 0xC0, 0x75, 0xA6, 0x4C, 0xAA, 0x60, /* 0x9C-0x9F */
+	0xB6, 0xB5, 0xA5, 0xF0, 0xBC, 0x67, 0xA1, 0xC0, /* 0xA0-0xA3 */
+	0xA4, 0x57, 0xA4, 0xA4, 0xA4, 0x55, 0xA5, 0xAA, /* 0xA4-0xA7 */
+	0xA5, 0x6B, 0xC2, 0xE5, 0xA9, 0x76, 0xBE, 0xC7, /* 0xA8-0xAB */
+	0xBA, 0xCA, 0xA5, 0xF8, 0xB8, 0xEA, 0xA8, 0xF3, /* 0xAC-0xAF */
+	0xA9, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+};
+
+static unsigned char u2c_33[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0x55, 0xA2, 0x56, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xA2, 0x50, 0xA2, 0x51, 0xA2, 0x52, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xA2, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xA2, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xA2, 0x53, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xA1, 0xEB, 0xA1, 0xEA, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xA2, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+};
+
+static unsigned char u2c_4E[512] = {
+	0xA4, 0x40, 0xA4, 0x42, 0x00, 0x00, 0xA4, 0x43, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC9, 0x45, /* 0x04-0x07 */
+	0xA4, 0x56, 0xA4, 0x54, 0xA4, 0x57, 0xA4, 0x55, /* 0x08-0x0B */
+	0xC9, 0x46, 0xA4, 0xA3, 0xC9, 0x4F, 0xC9, 0x4D, /* 0x0C-0x0F */
+	0xA4, 0xA2, 0xA4, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xA5, 0x42, 0xA5, 0x41, 0xA5, 0x40, 0x00, 0x00, /* 0x14-0x17 */
+	0xA5, 0x43, 0xA4, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xA5, 0xE0, 0xA5, 0xE1, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xC3, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0x58, /* 0x28-0x2B */
+	0x00, 0x00, 0xA4, 0xA4, 0xC9, 0x50, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA4, 0xA5, 0xC9, 0x63, 0xA6, 0xEA, 0xCB, 0xB1, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xA4, 0x59, 0xA4, 0xA6, 0x00, 0x00, 0xA5, 0x44, /* 0x38-0x3B */
+	0xC9, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0x40, 0xA4, 0x44, /* 0x40-0x43 */
+	0x00, 0x00, 0xA4, 0x5B, 0x00, 0x00, 0xC9, 0x47, /* 0x44-0x47 */
+	0xA4, 0x5C, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xA7, /* 0x48-0x4B */
+	0x00, 0x00, 0xA5, 0x45, 0xA5, 0x47, 0xA5, 0x46, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA5, 0xE2, 0xA5, 0xE3, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xC4, 0x00, 0x00, /* 0x54-0x57 */
+	0xAD, 0xBC, 0xA4, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xC9, 0x41, 0xA4, 0x45, 0xA4, 0x5E, 0xA4, 0x5D, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xA5, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xC5, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xAE, 0xD4, 0x4B, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xC3, 0xDC, 0xB1, /* 0x80-0x83 */
+	0xDC, 0xB2, 0x00, 0x00, 0xA4, 0x46, 0x00, 0x00, /* 0x84-0x87 */
+	0xA4, 0xA9, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xC6, /* 0x88-0x8B */
+	0xA4, 0x47, 0xC9, 0x48, 0xA4, 0x5F, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA4, 0xAA, 0xA4, 0xAC, 0xC9, 0x51, /* 0x90-0x93 */
+	0xA4, 0xAD, 0xA4, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xA5, 0xE5, 0x00, 0x00, 0xA8, 0xC7, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xC8, 0xAB, 0x45, /* 0x9C-0x9F */
+	0x00, 0x00, 0xA4, 0x60, 0xA4, 0xAE, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xA5, 0xE6, 0xA5, 0xE8, 0xA5, 0xE7, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xA6, 0xEB, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xC9, /* 0xA8-0xAB */
+	0xA8, 0xCA, 0xAB, 0x46, 0xAB, 0x47, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAD, 0xBD, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xB3, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF6, 0xD6, 0xA4, 0x48, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xA4, 0xB0, 0xA4, 0xAF, 0xC9, 0x52, 0xA4, 0xB1, /* 0xC0-0xC3 */
+	0xA4, 0xB7, 0x00, 0x00, 0xA4, 0xB2, 0xA4, 0xB3, /* 0xC4-0xC7 */
+	0xC9, 0x54, 0xC9, 0x53, 0xA4, 0xB5, 0xA4, 0xB6, /* 0xC8-0xCB */
+	0x00, 0x00, 0xA4, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xA5, 0x4A, 0xA5, 0x4B, 0xA5, 0x4C, 0xA5, 0x4D, /* 0xD4-0xD7 */
+	0xA5, 0x49, 0xA5, 0x50, 0xC9, 0x6A, 0x00, 0x00, /* 0xD8-0xDB */
+	0xC9, 0x66, 0xC9, 0x69, 0xA5, 0x51, 0xA5, 0x61, /* 0xDC-0xDF */
+	0x00, 0x00, 0xC9, 0x68, 0x00, 0x00, 0xA5, 0x4E, /* 0xE0-0xE3 */
+	0xA5, 0x4F, 0xA5, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xC9, 0x65, 0xC9, 0x67, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xA5, 0xF5, 0xC9, 0xB0, 0xA5, 0xF2, 0xA5, 0xF6, /* 0xF0-0xF3 */
+	0xC9, 0xBA, 0xC9, 0xAE, 0xA5, 0xF3, 0xC9, 0xB2, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA5, 0xF4, /* 0xF8-0xFB */
+	0x00, 0x00, 0xA5, 0xF7, 0x00, 0x00, 0xA5, 0xE9, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_4F[512] = {
+	0xC9, 0xB1, 0xA5, 0xF8, 0xC9, 0xB5, 0x00, 0x00, /* 0x00-0x03 */
+	0xC9, 0xB9, 0xC9, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xC9, 0xB3, 0xA5, 0xEA, 0xA5, 0xEC, 0xA5, 0xF9, /* 0x08-0x0B */
+	0x00, 0x00, 0xA5, 0xEE, 0xC9, 0xAB, 0xA5, 0xF1, /* 0x0C-0x0F */
+	0xA5, 0xEF, 0xA5, 0xF0, 0xC9, 0xBB, 0xC9, 0xB8, /* 0x10-0x13 */
+	0xC9, 0xAF, 0xA5, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xC9, 0xAC, 0xA5, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xC9, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0xB7, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xC9, 0xAD, 0xCA, 0x66, 0x00, 0x00, 0xA7, 0x42, /* 0x2C-0x2F */
+	0xA6, 0xF4, 0x00, 0x00, 0x00, 0x00, 0xCA, 0x67, /* 0x30-0x33 */
+	0xA6, 0xF1, 0x00, 0x00, 0xA7, 0x44, 0x00, 0x00, /* 0x34-0x37 */
+	0xA6, 0xF9, 0x00, 0x00, 0xA6, 0xF8, 0xCA, 0x5B, /* 0x38-0x3B */
+	0xA6, 0xFC, 0xA6, 0xF7, 0xCA, 0x60, 0xCA, 0x68, /* 0x3C-0x3F */
+	0x00, 0x00, 0xCA, 0x64, 0x00, 0x00, 0xA6, 0xFA, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xA6, 0xFD, 0xA6, 0xEE, /* 0x44-0x47 */
+	0xA7, 0x47, 0xCA, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xCB, 0xBD, 0xA6, 0xEC, 0xA7, 0x43, 0xA6, 0xED, /* 0x4C-0x4F */
+	0xA6, 0xF5, 0xA6, 0xF6, 0xCA, 0x62, 0xCA, 0x5E, /* 0x50-0x53 */
+	0xA6, 0xFB, 0xA6, 0xF3, 0xCA, 0x5A, 0xA6, 0xEF, /* 0x54-0x57 */
+	0xCA, 0x65, 0xA7, 0x45, 0xA7, 0x48, 0xA6, 0xF2, /* 0x58-0x5B */
+	0xA7, 0x40, 0xA7, 0x46, 0xA6, 0xF0, 0xCA, 0x63, /* 0x5C-0x5F */
+	0xA7, 0x41, 0xCA, 0x69, 0xCA, 0x5C, 0xA6, 0xFE, /* 0x60-0x63 */
+	0xCA, 0x5F, 0x00, 0x00, 0x00, 0x00, 0xCA, 0x61, /* 0x64-0x67 */
+	0x00, 0x00, 0xA8, 0xD8, 0xCB, 0xBF, 0xCB, 0xCB, /* 0x68-0x6B */
+	0xA8, 0xD0, 0x00, 0x00, 0xCB, 0xCC, 0xA8, 0xCB, /* 0x6C-0x6F */
+	0xA8, 0xD5, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xCE, /* 0x70-0x73 */
+	0xCB, 0xB9, 0xA8, 0xD6, 0xCB, 0xB8, 0xCB, 0xBC, /* 0x74-0x77 */
+	0xCB, 0xC3, 0xCB, 0xC1, 0xA8, 0xDE, 0xA8, 0xD9, /* 0x78-0x7B */
+	0xCB, 0xB3, 0xCB, 0xB5, 0xA8, 0xDB, 0xA8, 0xCF, /* 0x7C-0x7F */
+	
+	0xCB, 0xB6, 0xCB, 0xC2, 0xCB, 0xC9, 0xA8, 0xD4, /* 0x80-0x83 */
+	0xCB, 0xBB, 0xCB, 0xB4, 0xA8, 0xD3, 0xCB, 0xB7, /* 0x84-0x87 */
+	0xA8, 0xD7, 0xCB, 0xBA, 0x00, 0x00, 0xA8, 0xD2, /* 0x88-0x8B */
+	0x00, 0x00, 0xA8, 0xCD, 0x00, 0x00, 0xA8, 0xDC, /* 0x8C-0x8F */
+	0xCB, 0xC4, 0xA8, 0xDD, 0xCB, 0xC8, 0x00, 0x00, /* 0x90-0x93 */
+	0xCB, 0xC6, 0xCB, 0xCA, 0xA8, 0xDA, 0xCB, 0xBE, /* 0x94-0x97 */
+	0xCB, 0xB2, 0x00, 0x00, 0xCB, 0xC0, 0xA8, 0xD1, /* 0x98-0x9B */
+	0xCB, 0xC5, 0xA8, 0xCC, 0xCB, 0xC7, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0x56, 0xAB, 0x4A, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0xE0, 0xCD, 0xE8, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xAB, 0x49, 0xAB, 0x51, 0xAB, 0x5D, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xCD, 0xEE, 0xCD, 0xEC, 0xCD, 0xE7, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0x4B, /* 0xBC-0xBF */
+	0xCD, 0xED, 0xCD, 0xE3, 0xAB, 0x59, 0xAB, 0x50, /* 0xC0-0xC3 */
+	0xAB, 0x58, 0xCD, 0xDE, 0x00, 0x00, 0xCD, 0xEA, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xCD, 0xE1, 0xAB, 0x54, 0xCD, 0xE2, /* 0xC8-0xCB */
+	0x00, 0x00, 0xCD, 0xDD, 0xAB, 0x5B, 0xAB, 0x4E, /* 0xCC-0xCF */
+	0xAB, 0x57, 0xAB, 0x4D, 0x00, 0x00, 0xCD, 0xDF, /* 0xD0-0xD3 */
+	0xCD, 0xE4, 0x00, 0x00, 0xCD, 0xEB, 0xAB, 0x55, /* 0xD4-0xD7 */
+	0xAB, 0x52, 0xCD, 0xE6, 0xAB, 0x5A, 0xCD, 0xE9, /* 0xD8-0xDB */
+	0xCD, 0xE5, 0xAB, 0x4F, 0xAB, 0x5C, 0xAB, 0x53, /* 0xDC-0xDF */
+	0xAB, 0x4C, 0xAB, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xCD, 0xEF, 0x00, 0x00, 0xAD, 0xD7, 0xAD, 0xC1, /* 0xEC-0xEF */
+	0x00, 0x00, 0xAD, 0xD1, 0x00, 0x00, 0xAD, 0xD6, /* 0xF0-0xF3 */
+	0xD0, 0xD0, 0xD0, 0xCF, 0xD0, 0xD4, 0xD0, 0xD5, /* 0xF4-0xF7 */
+	0xAD, 0xC4, 0x00, 0x00, 0xAD, 0xCD, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xAD, 0xDA, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_50[512] = {
+	0xAD, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xD0, 0xC9, 0xAD, 0xC7, 0xD0, 0xCA, /* 0x04-0x07 */
+	0x00, 0x00, 0xAD, 0xDC, 0x00, 0x00, 0xAD, 0xD3, /* 0x08-0x0B */
+	0xAD, 0xBE, 0xAD, 0xBF, 0xD0, 0xDD, 0xB0, 0xBF, /* 0x0C-0x0F */
+	0x00, 0x00, 0xAD, 0xCC, 0xAD, 0xCB, 0xD0, 0xCB, /* 0x10-0x13 */
+	0xAD, 0xCF, 0xD4, 0x5B, 0xAD, 0xC6, 0xD0, 0xD6, /* 0x14-0x17 */
+	0xAD, 0xD5, 0xAD, 0xD4, 0xAD, 0xCA, 0xD0, 0xCE, /* 0x18-0x1B */
+	0xD0, 0xD7, 0x00, 0x00, 0xD0, 0xC8, 0xAD, 0xC9, /* 0x1C-0x1F */
+	0xD0, 0xD8, 0xAD, 0xD2, 0xD0, 0xCC, 0xAD, 0xC0, /* 0x20-0x23 */
+	0x00, 0x00, 0xAD, 0xC3, 0xAD, 0xC2, 0xD0, 0xD9, /* 0x24-0x27 */
+	0xAD, 0xD0, 0xAD, 0xC5, 0xAD, 0xD9, 0xAD, 0xDB, /* 0x28-0x2B */
+	0xD0, 0xD3, 0xAD, 0xD8, 0x00, 0x00, 0xD0, 0xDB, /* 0x2C-0x2F */
+	0xD0, 0xCD, 0xD0, 0xDC, 0x00, 0x00, 0xD0, 0xD1, /* 0x30-0x33 */
+	0x00, 0x00, 0xD0, 0xDA, 0x00, 0x00, 0xD0, 0xD2, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xAD, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xD4, 0x63, 0xD4, 0x57, 0x00, 0x00, 0xB0, 0xB3, /* 0x40-0x43 */
+	0x00, 0x00, 0xD4, 0x5C, 0xD4, 0x62, 0xB0, 0xB2, /* 0x44-0x47 */
+	0xD4, 0x55, 0xB0, 0xB6, 0xD4, 0x59, 0xD4, 0x52, /* 0x48-0x4B */
+	0xB0, 0xB4, 0xD4, 0x56, 0xB0, 0xB9, 0xB0, 0xBE, /* 0x4C-0x4F */
+	0x00, 0x00, 0xD4, 0x67, 0x00, 0x00, 0xD4, 0x51, /* 0x50-0x53 */
+	0x00, 0x00, 0xB0, 0xBA, 0x00, 0x00, 0xD4, 0x66, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xB5, 0xD4, 0x58, /* 0x58-0x5B */
+	0xB0, 0xB1, 0xD4, 0x53, 0xD4, 0x4F, 0xD4, 0x5D, /* 0x5C-0x5F */
+	0xD4, 0x50, 0xD4, 0x4E, 0xD4, 0x5A, 0xD4, 0x60, /* 0x60-0x63 */
+	0xD4, 0x61, 0xB0, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xD8, 0x5B, 0xD4, 0x5E, 0xD4, 0x4D, 0xD4, 0x5F, /* 0x68-0x6B */
+	0x00, 0x00, 0xB0, 0xC1, 0xD4, 0x64, 0xB0, 0xC0, /* 0x6C-0x6F */
+	0xD4, 0x4C, 0x00, 0x00, 0xD4, 0x54, 0xD4, 0x65, /* 0x70-0x73 */
+	0xB0, 0xBC, 0xB0, 0xBB, 0xB0, 0xB8, 0xB0, 0xBD, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xAF, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xB0, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xB3, 0xC8, 0x00, 0x00, 0xD8, 0x5E, 0xD8, 0x57, /* 0x80-0x83 */
+	0x00, 0x00, 0xB3, 0xC5, 0x00, 0x00, 0xD8, 0x5F, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0x55, /* 0x88-0x8B */
+	0xD8, 0x58, 0xB3, 0xC4, 0xD8, 0x59, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xB3, 0xC7, 0xD8, 0x5D, 0x00, 0x00, /* 0x90-0x93 */
+	0xD8, 0x53, 0xD8, 0x52, 0xB3, 0xC9, 0x00, 0x00, /* 0x94-0x97 */
+	0xB3, 0xCA, 0xB3, 0xC6, 0xB3, 0xCB, 0xD8, 0x51, /* 0x98-0x9B */
+	0xD8, 0x5C, 0xD8, 0x5A, 0xD8, 0x54, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0xC3, 0xD8, 0x56, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xB6, 0xCA, 0xB6, 0xC4, 0xDC, 0xB7, 0xB6, 0xCD, /* 0xAC-0xAF */
+	0xDC, 0xBD, 0xDC, 0xC0, 0xB6, 0xC6, 0xB6, 0xC7, /* 0xB0-0xB3 */
+	0xDC, 0xBA, 0xB6, 0xC5, 0xDC, 0xC3, 0xB6, 0xCB, /* 0xB4-0xB7 */
+	0xDC, 0xC4, 0x00, 0x00, 0xDC, 0xBF, 0xB6, 0xCC, /* 0xB8-0xBB */
+	0x00, 0x00, 0xDC, 0xB4, 0xB6, 0xC9, 0xDC, 0xB5, /* 0xBC-0xBF */
+	0x00, 0x00, 0xDC, 0xBE, 0xDC, 0xBC, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xDC, 0xB8, 0xB6, 0xC8, 0xDC, 0xB6, 0xB6, 0xCE, /* 0xC4-0xC7 */
+	0xDC, 0xBB, 0xDC, 0xC2, 0xDC, 0xB9, 0xDC, 0xC1, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0xB6, 0xB9, 0xB3, /* 0xCC-0xCF */
+	0x00, 0x00, 0xB9, 0xB4, 0x00, 0x00, 0xE0, 0xF9, /* 0xD0-0xD3 */
+	0xE0, 0xF1, 0xB9, 0xB2, 0xB9, 0xAF, 0xE0, 0xF2, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0xB1, 0xE0, 0xF5, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE0, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xE0, 0xFE, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xFD, /* 0xE0-0xE3 */
+	0xE0, 0xF8, 0xB9, 0xAE, 0xE0, 0xF0, 0xB9, 0xAC, /* 0xE4-0xE7 */
+	0xE0, 0xF3, 0xB9, 0xB7, 0xE0, 0xF6, 0x00, 0x00, /* 0xE8-0xEB */
+	0xE0, 0xFA, 0xB9, 0xB0, 0xB9, 0xAD, 0xE0, 0xFC, /* 0xEC-0xEF */
+	0xE0, 0xFB, 0xB9, 0xB5, 0x00, 0x00, 0xE0, 0xF4, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xBB, 0xF8, 0xE4, 0xEC, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xE4, 0xE9, 0xBB, 0xF9, 0x00, 0x00, 0xBB, 0xF7, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE4, 0xF0, 0xE4, 0xED, 0xE4, 0xE6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_51[512] = {
+	0xBB, 0xF6, 0x00, 0x00, 0xBB, 0xFA, 0xE4, 0xE7, /* 0x00-0x03 */
+	0xBB, 0xF5, 0xBB, 0xFD, 0xE4, 0xEA, 0xE4, 0xEB, /* 0x04-0x07 */
+	0xBB, 0xFB, 0xBB, 0xFC, 0xE4, 0xF1, 0xE4, 0xEE, /* 0x08-0x0B */
+	0xE4, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xBE, 0xAA, 0xE8, 0xF8, 0xBE, 0xA7, 0xE8, 0xF5, /* 0x10-0x13 */
+	0xBE, 0xA9, 0xBE, 0xAB, 0x00, 0x00, 0xE8, 0xF6, /* 0x14-0x17 */
+	0xBE, 0xA8, 0x00, 0x00, 0xE8, 0xF7, 0x00, 0x00, /* 0x18-0x1B */
+	0xE8, 0xF4, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x76, /* 0x1C-0x1F */
+	0xEC, 0xBD, 0xC0, 0x77, 0xEC, 0xBB, 0x00, 0x00, /* 0x20-0x23 */
+	0xEC, 0xBC, 0xEC, 0xBA, 0xEC, 0xB9, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xEC, 0xBE, 0xC0, 0x75, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xEF, 0xB8, 0xEF, 0xB9, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE4, 0xE8, 0xEF, 0xB7, 0xC0, 0x78, 0xC3, 0x5F, /* 0x30-0x33 */
+	0xF1, 0xEB, 0xF1, 0xEC, 0x00, 0x00, 0xC4, 0xD7, /* 0x34-0x37 */
+	0xC4, 0xD8, 0xF5, 0xC1, 0xF5, 0xC0, 0xC5, 0x6C, /* 0x38-0x3B */
+	0xC5, 0x6B, 0xF7, 0xD0, 0x00, 0x00, 0xA4, 0x49, /* 0x3C-0x3F */
+	0xA4, 0x61, 0xA4, 0xB9, 0x00, 0x00, 0xA4, 0xB8, /* 0x40-0x43 */
+	0xA5, 0x53, 0xA5, 0x52, 0xA5, 0xFC, 0xA5, 0xFB, /* 0x44-0x47 */
+	0xA5, 0xFD, 0xA5, 0xFA, 0x00, 0x00, 0xA7, 0x4A, /* 0x48-0x4B */
+	0xA7, 0x49, 0xA7, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xE0, 0x00, 0x00, /* 0x50-0x53 */
+	0xA8, 0xDF, 0xA8, 0xE1, 0x00, 0x00, 0xAB, 0x5E, /* 0x54-0x57 */
+	0x00, 0x00, 0xA2, 0x59, 0xD0, 0xDE, 0xA2, 0x5A, /* 0x58-0x5B */
+	0xB0, 0xC2, 0xA2, 0x5C, 0xA2, 0x5B, 0xD8, 0x60, /* 0x5C-0x5F */
+	0x00, 0x00, 0xA2, 0x5D, 0xB9, 0xB8, 0xA2, 0x5E, /* 0x60-0x63 */
+	0x00, 0x00, 0xA4, 0x4A, 0x00, 0x00, 0xA4, 0xBA, /* 0x64-0x67 */
+	0xA5, 0xFE, 0xA8, 0xE2, 0x00, 0x00, 0xA4, 0x4B, /* 0x68-0x6B */
+	0xA4, 0xBD, 0xA4, 0xBB, 0xA4, 0xBC, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xA6, 0x40, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xA7, 0x4C, 0xA8, 0xE4, 0xA8, 0xE3, /* 0x74-0x77 */
+	0xA8, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xAD, 0xDD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xBE, 0xAC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC9, 0x4E, /* 0x84-0x87 */
+	0x00, 0x00, 0xA5, 0x54, 0xA5, 0x55, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xA6, 0x41, 0x00, 0x00, 0xCA, 0x6A, /* 0x8C-0x8F */
+	0x00, 0x00, 0xAB, 0x60, 0xAB, 0x5F, 0xD0, 0xE0, /* 0x90-0x93 */
+	0xD0, 0xDF, 0xB0, 0xC3, 0x00, 0x00, 0xA4, 0xBE, /* 0x94-0x97 */
+	0xC9, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xCD, 0x00, 0x00, /* 0x9C-0x9F */
+	0xAB, 0x61, 0x00, 0x00, 0xAD, 0xE0, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xAD, 0xDE, 0xAD, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xBE, 0xAD, 0x00, 0x00, /* 0xA8-0xAB */
+	0xA5, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xA6, 0x42, 0xC9, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0x4D, 0xA7, 0x4E, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xCA, 0x6B, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xCB, 0xCE, 0xA8, 0xE6, 0xCB, 0xCF, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xD0, 0xE2, 0xD0, 0xE3, 0xAD, 0xE3, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xD0, 0xE4, 0x00, 0x00, 0xD0, 0xE1, 0xAD, 0xE4, /* 0xC8-0xCB */
+	0xAD, 0xE2, 0xAD, 0xE1, 0xD0, 0xE5, 0x00, 0x00, /* 0xCC-0xCF */
+	0xD4, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xD8, 0x61, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xC5, /* 0xD4-0xD7 */
+	0xE1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xBB, 0xFE, 0xBE, 0xAE, 0xE8, 0xF9, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA4, 0x4C, 0xA4, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xB0, 0xC4, 0xB3, 0xCD, 0x00, 0x00, 0xB9, 0xB9, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xC9, 0x42, 0xA4, 0xBF, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xA5, 0x59, 0xA5, 0x57, 0xA5, 0x58, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xA8, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_52[512] = {
+	0xA4, 0x4D, 0xA4, 0x4E, 0x00, 0x00, 0xA4, 0x62, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0xC0, 0xA4, 0xC1, /* 0x04-0x07 */
+	0xA4, 0xC2, 0xC9, 0xBE, 0xA5, 0x5A, 0x00, 0x00, /* 0x08-0x0B */
+	0xC9, 0x6B, 0x00, 0x00, 0xA6, 0x46, 0x00, 0x00, /* 0x0C-0x0F */
+	0xC9, 0xBF, 0xA6, 0x44, 0xA6, 0x45, 0xC9, 0xBD, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xA6, 0x47, 0xA6, 0x43, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xCA, 0x6C, 0xAA, 0xEC, 0xCA, 0x6D, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xCA, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xA7, 0x50, 0xA7, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xA7, 0x53, 0xA7, 0x51, 0xA7, 0x52, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xED, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA8, 0xEC, 0xCB, 0xD4, 0xCB, 0xD1, 0xCB, 0xD2, /* 0x30-0x33 */
+	0x00, 0x00, 0xCB, 0xD0, 0xA8, 0xEE, 0xA8, 0xEA, /* 0x34-0x37 */
+	0xA8, 0xE9, 0x00, 0x00, 0xA8, 0xEB, 0xA8, 0xE8, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xA8, 0xEF, 0x00, 0x00, 0xAB, 0x63, /* 0x40-0x43 */
+	0xCD, 0xF0, 0x00, 0x00, 0xCB, 0xD3, 0xAB, 0x68, /* 0x44-0x47 */
+	0x00, 0x00, 0xCD, 0xF1, 0xAB, 0x64, 0xAB, 0x67, /* 0x48-0x4B */
+	0xAB, 0x66, 0xAB, 0x65, 0xAB, 0x62, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xE8, 0x00, 0x00, /* 0x50-0x53 */
+	0xAD, 0xE7, 0xD0, 0xEB, 0xAD, 0xE5, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xE7, 0xAD, 0xE8, /* 0x58-0x5B */
+	0xAD, 0xE6, 0xAD, 0xE9, 0xD0, 0xE9, 0xD0, 0xEA, /* 0x5C-0x5F */
+	0x00, 0x00, 0xD0, 0xE6, 0xD0, 0xEC, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xB3, 0xD1, 0xB0, 0xC5, 0xD4, 0x69, /* 0x68-0x6B */
+	0xD4, 0x6B, 0xD4, 0x6A, 0xD4, 0x6C, 0xB0, 0xC6, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0xCE, 0x00, 0x00, /* 0x70-0x73 */
+	0xB3, 0xCF, 0xB3, 0xD0, 0x00, 0x00, 0xB6, 0xD0, /* 0x74-0x77 */
+	0xDC, 0xC7, 0x00, 0x00, 0xDC, 0xC6, 0xDC, 0xC8, /* 0x78-0x7B */
+	0xDC, 0xC9, 0xB6, 0xD1, 0x00, 0x00, 0xB6, 0xCF, /* 0x7C-0x7F */
+	
+	0xE1, 0x41, 0xE1, 0x42, 0xB9, 0xBB, 0xB9, 0xBA, /* 0x80-0x83 */
+	0xE3, 0x5A, 0x00, 0x00, 0x00, 0x00, 0xBC, 0x40, /* 0x84-0x87 */
+	0xBC, 0x41, 0xBC, 0x42, 0xBC, 0x44, 0xE4, 0xF2, /* 0x88-0x8B */
+	0xE4, 0xF3, 0xBC, 0x43, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xBE, 0xAF, 0x00, 0x00, 0xBE, 0xB0, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xED, 0xF5, 0xC3, /* 0x94-0x97 */
+	0xF5, 0xC2, 0xF7, 0xD1, 0x00, 0x00, 0xA4, 0x4F, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA5, 0x5C, /* 0x9C-0x9F */
+	0xA5, 0x5B, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x48, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0xC0, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xA7, 0x55, 0xA7, 0x56, 0xA7, 0x54, /* 0xA8-0xAB */
+	0xA7, 0x57, 0xCA, 0x6F, 0xCA, 0x70, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xF1, /* 0xB8-0xBB */
+	0xCB, 0xD5, 0x00, 0x00, 0xA8, 0xF0, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCD, 0xF2, 0xAB, 0x6C, 0xCD, 0xF3, 0xAB, 0x6B, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0x69, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xAB, 0x6A, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xD0, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xC7, 0xD4, 0x6E, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xB0, 0xCA, 0xD4, 0x6D, 0xB1, 0xE5, /* 0xD4-0xD7 */
+	0xB0, 0xC9, 0xB0, 0xC8, 0x00, 0x00, 0xB3, 0xD4, /* 0xD8-0xDB */
+	0x00, 0x00, 0xB3, 0xD3, 0xB3, 0xD2, 0xB6, 0xD2, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xD5, 0xB6, 0xD6, /* 0xE0-0xE3 */
+	0xB6, 0xD4, 0x00, 0x00, 0xB6, 0xD3, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xE1, 0x43, 0x00, 0x00, 0xE1, 0x44, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xF5, /* 0xEC-0xEF */
+	0xBC, 0x45, 0xE4, 0xF4, 0x00, 0x00, 0xBE, 0xB1, /* 0xF0-0xF3 */
+	0xEC, 0xBF, 0xC0, 0x79, 0x00, 0x00, 0xF1, 0xEE, /* 0xF4-0xF7 */
+	0xC4, 0x55, 0x00, 0x00, 0xA4, 0x63, 0xA4, 0xC3, /* 0xF8-0xFB */
+	0xC9, 0x56, 0x00, 0x00, 0xA4, 0xC4, 0xA4, 0xC5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_53[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xA5, 0x5D, 0xA5, 0x5E, 0x00, 0x00, /* 0x04-0x07 */
+	0xA6, 0x49, 0xCA, 0x71, 0xCB, 0xD6, 0xCB, 0xD7, /* 0x08-0x0B */
+	0x00, 0x00, 0xAB, 0x6D, 0xD0, 0xEE, 0xB0, 0xCC, /* 0x0C-0x0F */
+	0xB0, 0xCB, 0xD8, 0x63, 0xD8, 0x62, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xA4, 0x50, 0xA4, 0xC6, 0xA5, 0x5F, /* 0x14-0x17 */
+	0x00, 0x00, 0xB0, 0xCD, 0xC9, 0x43, 0x00, 0x00, /* 0x18-0x1B */
+	0xC9, 0x6C, 0xA5, 0x60, 0x00, 0x00, 0xC9, 0xC2, /* 0x1C-0x1F */
+	0xA6, 0x4B, 0xA6, 0x4A, 0xC9, 0xC1, 0xA7, 0x58, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xAD, 0xEA, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD4, 0x6F, 0x00, 0x00, 0xB6, 0xD7, /* 0x2C-0x2F */
+	0xE1, 0x45, 0xB9, 0xBC, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xE8, 0xFA, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xFD, /* 0x34-0x37 */
+	0x00, 0x00, 0xA4, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xCB, 0xD8, 0xCD, 0xF4, 0xB0, 0xD0, 0xB0, 0xCE, /* 0x3C-0x3F */
+	0xB0, 0xCF, 0xA4, 0x51, 0x00, 0x00, 0xA4, 0x64, /* 0x40-0x43 */
+	0xA2, 0xCD, 0xA4, 0xCA, 0x00, 0x00, 0xA4, 0xC9, /* 0x44-0x47 */
+	0xA4, 0xC8, 0xA5, 0x63, 0xA5, 0x62, 0x00, 0x00, /* 0x48-0x4B */
+	0xC9, 0x6D, 0xC9, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xA8, 0xF5, 0xA8, 0xF2, 0xA8, 0xF4, /* 0x50-0x53 */
+	0xA8, 0xF3, 0x00, 0x00, 0x00, 0x00, 0xAB, 0x6E, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0xD5, 0x00, 0x00, /* 0x58-0x5B */
+	0xA4, 0x52, 0x00, 0x00, 0xA4, 0xCB, 0x00, 0x00, /* 0x5C-0x5F */
+	0xA5, 0x65, 0xA5, 0x64, 0x00, 0x00, 0xCA, 0x72, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xF6, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xC9, 0x57, 0x00, 0x00, 0xA5, 0x67, 0xA5, 0x66, /* 0x6C-0x6F */
+	0xA6, 0x4C, 0xA6, 0x4D, 0xCA, 0x73, 0xA7, 0x59, /* 0x70-0x73 */
+	0x00, 0x00, 0xA7, 0x5A, 0x00, 0x00, 0xA8, 0xF7, /* 0x74-0x77 */
+	0xA8, 0xF8, 0xA8, 0xF9, 0x00, 0x00, 0xAB, 0x6F, /* 0x78-0x7B */
+	0xCD, 0xF5, 0x00, 0x00, 0x00, 0x00, 0xAD, 0xEB, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0x44, 0x00, 0x00, /* 0x80-0x83 */
+	0xA4, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0xC4, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0x74, 0xCA, 0x75, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xD9, 0x00, 0x00, /* 0x90-0x93 */
+	0xCB, 0xDA, 0x00, 0x00, 0xCD, 0xF7, 0xCD, 0xF6, /* 0x94-0x97 */
+	0xCD, 0xF9, 0xCD, 0xF8, 0xAB, 0x70, 0x00, 0x00, /* 0x98-0x9B */
+	0xD4, 0x70, 0xAD, 0xED, 0xD0, 0xEF, 0xAD, 0xEC, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xD8, 0x64, 0xB3, 0xD6, 0x00, 0x00, 0xD8, 0x65, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE1, 0x46, 0xB9, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xBC, 0x46, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xF1, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xC9, 0x58, 0x00, 0x00, 0xA5, 0x68, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB0, 0xD1, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xA4, 0x53, 0xA4, 0x65, 0xA4, 0xCE, 0xA4, 0xCD, /* 0xC8-0xCB */
+	0x00, 0x00, 0xA4, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xA8, 0xFB, 0x00, 0x00, 0xA8, 0xFA, 0xA8, 0xFC, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0x71, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAD, 0xEE, /* 0xDC-0xDF */
+	0x00, 0x00, 0xE8, 0xFB, 0xC2, 0x4F, 0xA4, 0x66, /* 0xE0-0xE3 */
+	0xA5, 0x6A, 0xA5, 0x79, 0xA5, 0x74, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xA5, 0x6F, 0xA5, 0x6E, 0xA5, 0x75, 0xA5, 0x73, /* 0xE8-0xEB */
+	0xA5, 0x6C, 0xA5, 0x7A, 0xA5, 0x6D, 0xA5, 0x69, /* 0xEC-0xEF */
+	0xA5, 0x78, 0xA5, 0x77, 0xA5, 0x76, 0xA5, 0x6B, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xA5, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xA5, 0x71, 0x00, 0x00, 0x00, 0x00, 0xA5, 0x7B, /* 0xF8-0xFB */
+	0xA5, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_54[512] = {
+	0x00, 0x00, 0xA6, 0x53, 0x00, 0x00, 0xA6, 0x59, /* 0x00-0x03 */
+	0xA6, 0x55, 0x00, 0x00, 0xA6, 0x5B, 0xC9, 0xC5, /* 0x04-0x07 */
+	0xA6, 0x58, 0xA6, 0x4E, 0xA6, 0x51, 0xA6, 0x54, /* 0x08-0x0B */
+	0xA6, 0x50, 0xA6, 0x57, 0xA6, 0x5A, 0xA6, 0x4F, /* 0x0C-0x0F */
+	0xA6, 0x52, 0xA6, 0x56, 0xA6, 0x5C, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xCA, 0x7E, 0xCA, 0x7B, 0x00, 0x00, 0xA7, 0x67, /* 0x18-0x1B */
+	0xCA, 0x7C, 0xA7, 0x5B, 0xA7, 0x5D, 0xA7, 0x75, /* 0x1C-0x1F */
+	0xA7, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xCA, 0xA5, 0xCA, 0x7D, 0xA7, 0x5F, 0xA7, 0x61, /* 0x24-0x27 */
+	0xCA, 0xA4, 0xA7, 0x68, 0xCA, 0x78, 0xA7, 0x74, /* 0x28-0x2B */
+	0xA7, 0x76, 0xA7, 0x5C, 0xA7, 0x6D, 0x00, 0x00, /* 0x2C-0x2F */
+	0xCA, 0x76, 0xA7, 0x73, 0x00, 0x00, 0xA7, 0x64, /* 0x30-0x33 */
+	0x00, 0x00, 0xA7, 0x6E, 0xA7, 0x6F, 0xCA, 0x77, /* 0x34-0x37 */
+	0xA7, 0x6C, 0xA7, 0x6A, 0x00, 0x00, 0xA7, 0x6B, /* 0x38-0x3B */
+	0xA7, 0x71, 0xCA, 0xA1, 0xA7, 0x5E, 0x00, 0x00, /* 0x3C-0x3F */
+	0xA7, 0x72, 0xCA, 0xA3, 0xA7, 0x66, 0xA7, 0x63, /* 0x40-0x43 */
+	0x00, 0x00, 0xCA, 0x7A, 0xA7, 0x62, 0xCA, 0xA6, /* 0x44-0x47 */
+	0xA7, 0x65, 0x00, 0x00, 0xA7, 0x69, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0x60, 0xCA, 0xA2, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCA, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xCB, 0xEB, 0xCB, 0xEA, 0xA9, 0x4F, 0xCB, 0xED, /* 0x60-0x63 */
+	0xCB, 0xEF, 0xCB, 0xE4, 0xCB, 0xE7, 0xCB, 0xEE, /* 0x64-0x67 */
+	0xA9, 0x50, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xE1, /* 0x68-0x6B */
+	0xCB, 0xE5, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xE9, /* 0x6C-0x6F */
+	0xCE, 0x49, 0xA9, 0x4B, 0xCE, 0x4D, 0xA8, 0xFD, /* 0x70-0x73 */
+	0xCB, 0xE6, 0xA8, 0xFE, 0xA9, 0x4C, 0xA9, 0x45, /* 0x74-0x77 */
+	0xA9, 0x41, 0x00, 0x00, 0xCB, 0xE2, 0xA9, 0x44, /* 0x78-0x7B */
+	0xA9, 0x49, 0xA9, 0x52, 0xCB, 0xE3, 0xCB, 0xDC, /* 0x7C-0x7F */
+	
+	0xA9, 0x43, 0xCB, 0xDD, 0xCB, 0xDF, 0x00, 0x00, /* 0x80-0x83 */
+	0xA9, 0x46, 0x00, 0x00, 0xA9, 0x48, 0xCB, 0xDB, /* 0x84-0x87 */
+	0xCB, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xA9, 0x51, /* 0x88-0x8B */
+	0xA9, 0x4D, 0xCB, 0xE8, 0xA9, 0x53, 0x00, 0x00, /* 0x8C-0x8F */
+	0xA9, 0x4A, 0xCB, 0xDE, 0xA9, 0x47, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xA9, 0x42, 0xA9, 0x40, 0x00, 0x00, /* 0x94-0x97 */
+	0xCB, 0xEC, 0x00, 0x00, 0xA9, 0x4E, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCE, 0x48, 0xCD, 0xFB, 0xCE, 0x4B, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCD, 0xFD, 0xAB, 0x78, 0xAB, 0xA8, /* 0xA4-0xA7 */
+	0xAB, 0x74, 0xAB, 0xA7, 0xAB, 0x7D, 0xAB, 0xA4, /* 0xA8-0xAB */
+	0xAB, 0x72, 0xCD, 0xFC, 0xCE, 0x43, 0xAB, 0xA3, /* 0xAC-0xAF */
+	0xCE, 0x4F, 0xAB, 0xA5, 0x00, 0x00, 0xAB, 0x79, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0x45, 0xCE, 0x42, /* 0xB4-0xB7 */
+	0xAB, 0x77, 0x00, 0x00, 0xCD, 0xFA, 0xAB, 0xA6, /* 0xB8-0xBB */
+	0xCE, 0x4A, 0xAB, 0x7C, 0xCE, 0x4C, 0xAB, 0xA9, /* 0xBC-0xBF */
+	0xAB, 0x73, 0xAB, 0x7E, 0xAB, 0x7B, 0xCE, 0x40, /* 0xC0-0xC3 */
+	0xAB, 0xA1, 0xCE, 0x46, 0xCE, 0x47, 0xAB, 0x7A, /* 0xC4-0xC7 */
+	0xAB, 0xA2, 0xAB, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0x75, 0xCD, 0xFE, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0x44, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0x4E, 0x00, 0x00, /* 0xDC-0xDF */
+	0xD1, 0x44, 0xAD, 0xFB, 0xD0, 0xF1, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD0, 0xF6, 0xAD, 0xF4, 0xAE, 0x40, 0xD0, 0xF4, /* 0xE4-0xE7 */
+	0xAD, 0xEF, 0xAD, 0xF9, 0xAD, 0xFE, 0xD0, 0xFB, /* 0xE8-0xEB */
+	0x00, 0x00, 0xAD, 0xFA, 0xAD, 0xFD, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xD0, 0xFE, 0xAD, 0xF5, 0xD0, 0xF5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0x42, /* 0xF4-0xF7 */
+	0xD1, 0x43, 0x00, 0x00, 0xAD, 0xF7, 0xD1, 0x41, /* 0xF8-0xFB */
+	0xAD, 0xF3, 0xAE, 0x43, 0x00, 0x00, 0xD0, 0xF8, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_55[512] = {
+	0x00, 0x00, 0xAD, 0xF1, 0x00, 0x00, 0xD1, 0x46, /* 0x00-0x03 */
+	0xD0, 0xF9, 0xD0, 0xFD, 0xAD, 0xF6, 0xAE, 0x42, /* 0x04-0x07 */
+	0xD0, 0xFA, 0xAD, 0xFC, 0xD1, 0x40, 0xD1, 0x47, /* 0x08-0x0B */
+	0xD4, 0xA1, 0x00, 0x00, 0xD1, 0x45, 0xAE, 0x44, /* 0x0C-0x0F */
+	0xAD, 0xF0, 0xD0, 0xFC, 0xD0, 0xF3, 0x00, 0x00, /* 0x10-0x13 */
+	0xAD, 0xF8, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xF2, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xF7, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xF0, 0xAE, 0x41, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0x77, 0x00, 0x00, /* 0x28-0x2B */
+	0xB0, 0xE4, 0xD4, 0xA7, 0xB0, 0xE2, 0xB0, 0xDF, /* 0x2C-0x2F */
+	0xD4, 0x7C, 0xB0, 0xDB, 0xD4, 0xA2, 0xB0, 0xE6, /* 0x30-0x33 */
+	0xD4, 0x76, 0xD4, 0x7B, 0xD4, 0x7A, 0xAD, 0xF2, /* 0x34-0x37 */
+	0xB0, 0xE1, 0xD4, 0xA5, 0x00, 0x00, 0xD4, 0xA8, /* 0x38-0x3B */
+	0xD4, 0x73, 0x00, 0x00, 0xB3, 0xE8, 0x00, 0x00, /* 0x3C-0x3F */
+	0xD4, 0xA9, 0xB0, 0xE7, 0x00, 0x00, 0xB0, 0xD9, /* 0x40-0x43 */
+	0xB0, 0xD6, 0xD4, 0x7E, 0xB0, 0xD3, 0x00, 0x00, /* 0x44-0x47 */
+	0xD4, 0xA6, 0x00, 0x00, 0xB0, 0xDA, 0xD4, 0xAA, /* 0x48-0x4B */
+	0x00, 0x00, 0xD4, 0x74, 0xD4, 0xA4, 0xB0, 0xDD, /* 0x4C-0x4F */
+	0xD4, 0x75, 0xD4, 0x78, 0xD4, 0x7D, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xB0, 0xDE, 0xB0, 0xDC, 0xB0, 0xE8, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xB0, 0xE3, 0x00, 0x00, 0xB0, 0xD7, 0xB1, 0xD2, /* 0x5C-0x5F */
+	0x00, 0x00, 0xB0, 0xD8, 0xD4, 0x79, 0xB0, 0xE5, /* 0x60-0x63 */
+	0xB0, 0xE0, 0xD4, 0xA3, 0xB0, 0xD5, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xD4, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD4, 0x71, 0xD4, 0x72, 0xD8, 0x6A, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0xD7, /* 0x78-0x7B */
+	0xB3, 0xDA, 0xD8, 0x75, 0xB3, 0xEE, 0xD8, 0x78, /* 0x7C-0x7F */
+	
+	0xB3, 0xD8, 0xD8, 0x71, 0xB3, 0xDE, 0xB3, 0xE4, /* 0x80-0x83 */
+	0xB5, 0xBD, 0x00, 0x00, 0x00, 0x00, 0xB3, 0xE2, /* 0x84-0x87 */
+	0xD8, 0x6E, 0xB3, 0xEF, 0xB3, 0xDB, 0xB3, 0xE3, /* 0x88-0x8B */
+	0xD8, 0x76, 0xDC, 0xD7, 0xD8, 0x7B, 0xD8, 0x6F, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD8, 0x66, 0xD8, 0x73, 0xD8, 0x6D, /* 0x90-0x93 */
+	0xB3, 0xE1, 0xD8, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xB3, 0xDD, 0xB3, 0xF1, 0xB3, 0xEA, 0x00, 0x00, /* 0x98-0x9B */
+	0xB3, 0xDF, 0xB3, 0xDC, 0x00, 0x00, 0xB3, 0xE7, /* 0x9C-0x9F */
+	0x00, 0x00, 0xD8, 0x7A, 0xD8, 0x6C, 0xD8, 0x72, /* 0xA0-0xA3 */
+	0xD8, 0x74, 0xD8, 0x68, 0xD8, 0x77, 0xB3, 0xD9, /* 0xA4-0xA7 */
+	0xD8, 0x67, 0x00, 0x00, 0xB3, 0xE0, 0xB3, 0xF0, /* 0xA8-0xAB */
+	0xB3, 0xEC, 0xD8, 0x69, 0xB3, 0xE6, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xB3, 0xED, 0xB3, 0xE9, 0xB3, 0xE5, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xD8, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0xEB, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xD5, /* 0xBC-0xBF */
+	0xDC, 0xD1, 0x00, 0x00, 0xDC, 0xE0, 0xDC, 0xCA, /* 0xC0-0xC3 */
+	0xDC, 0xD3, 0xB6, 0xE5, 0xB6, 0xE6, 0xB6, 0xDE, /* 0xC4-0xC7 */
+	0xDC, 0xDC, 0xB6, 0xE8, 0xDC, 0xCF, 0xDC, 0xCE, /* 0xC8-0xCB */
+	0xDC, 0xCC, 0xDC, 0xDE, 0xB6, 0xDC, 0xDC, 0xD8, /* 0xCC-0xCF */
+	0xDC, 0xCD, 0xB6, 0xDF, 0xDC, 0xD6, 0xB6, 0xDA, /* 0xD0-0xD3 */
+	0xDC, 0xD2, 0xDC, 0xD9, 0xDC, 0xDB, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xDC, 0xDF, 0xB6, 0xE3, 0xDC, 0xCB, /* 0xD8-0xDB */
+	0xB6, 0xDD, 0xDC, 0xD0, 0x00, 0x00, 0xB6, 0xD8, /* 0xDC-0xDF */
+	0x00, 0x00, 0xB6, 0xE4, 0xDC, 0xDA, 0xB6, 0xE0, /* 0xE0-0xE3 */
+	0xB6, 0xE1, 0xB6, 0xE7, 0xB6, 0xDB, 0xA2, 0x5F, /* 0xE4-0xE7 */
+	0xB6, 0xD9, 0xDC, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB6, 0xE2, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xDC, 0xDD, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0xCD, 0xB9, 0xC8, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE1, 0x55, 0xE1, 0x51, 0x00, 0x00, /* 0xF8-0xFB */
+	0xE1, 0x4B, 0xB9, 0xC2, 0xB9, 0xBE, 0xE1, 0x54, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_56[512] = {
+	0xB9, 0xBF, 0xE1, 0x4E, 0xE1, 0x50, 0x00, 0x00, /* 0x00-0x03 */
+	0xE1, 0x53, 0x00, 0x00, 0xB9, 0xC4, 0x00, 0x00, /* 0x04-0x07 */
+	0xB9, 0xCB, 0xB9, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xE1, 0x49, 0xB9, 0xC6, 0xB9, 0xC7, 0xE1, 0x4C, /* 0x0C-0x0F */
+	0xB9, 0xCC, 0x00, 0x00, 0xE1, 0x4A, 0xE1, 0x4F, /* 0x10-0x13 */
+	0xB9, 0xC3, 0xE1, 0x48, 0xB9, 0xC9, 0xB9, 0xC1, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB9, 0xC0, /* 0x18-0x1B */
+	0xE1, 0x4D, 0xE1, 0x52, 0x00, 0x00, 0xB9, 0xCA, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x47, /* 0x24-0x27 */
+	0x00, 0x00, 0xBC, 0x4D, 0xE5, 0x47, 0x00, 0x00, /* 0x28-0x2B */
+	0xE5, 0x44, 0x00, 0x00, 0xBC, 0x47, 0xBC, 0x53, /* 0x2C-0x2F */
+	0xBC, 0x54, 0x00, 0x00, 0xBC, 0x4A, 0xE5, 0x42, /* 0x30-0x33 */
+	0xBC, 0x4C, 0xE4, 0xF9, 0xBC, 0x52, 0x00, 0x00, /* 0x34-0x37 */
+	0xE5, 0x46, 0xBC, 0x49, 0xE5, 0x48, 0xBC, 0x48, /* 0x38-0x3B */
+	0x00, 0x00, 0xE5, 0x43, 0xE5, 0x45, 0xBC, 0x4B, /* 0x3C-0x3F */
+	0xE5, 0x41, 0xE4, 0xFA, 0xE4, 0xF7, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xD8, 0x6B, 0xE4, 0xFD, 0x00, 0x00, /* 0x44-0x47 */
+	0xE4, 0xF6, 0xE4, 0xFC, 0xE4, 0xFB, 0x00, 0x00, /* 0x48-0x4B */
+	0xE4, 0xF8, 0x00, 0x00, 0xBC, 0x4F, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBC, 0x4E, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBC, 0x50, /* 0x54-0x57 */
+	0xE4, 0xFE, 0xBE, 0xB2, 0xE5, 0x40, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x45, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE8, 0xFD, 0x00, 0x00, 0xBE, 0xBE, 0xE9, 0x42, /* 0x60-0x63 */
+	0xBE, 0xB6, 0xBE, 0xBA, 0xE9, 0x41, 0x00, 0x00, /* 0x64-0x67 */
+	0xBE, 0xB9, 0xBE, 0xB5, 0xBE, 0xB8, 0xBE, 0xB3, /* 0x68-0x6B */
+	0xBE, 0xBD, 0xE9, 0x43, 0xE8, 0xFE, 0xBE, 0xBC, /* 0x6C-0x6F */
+	0xE8, 0xFC, 0xBE, 0xBB, 0xE9, 0x44, 0xE9, 0x40, /* 0x70-0x73 */
+	0xBC, 0x51, 0x00, 0x00, 0xBE, 0xBF, 0xE9, 0x46, /* 0x74-0x77 */
+	0xBE, 0xB7, 0xBE, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xC6, 0xEC, 0xC8, /* 0x7C-0x7F */
+	
+	0xC0, 0x7B, 0xEC, 0xC9, 0xEC, 0xC7, 0xEC, 0xC5, /* 0x80-0x83 */
+	0xEC, 0xC4, 0xC0, 0x7D, 0xEC, 0xC3, 0xC0, 0x7E, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xEC, 0xC1, 0xEC, 0xC2, 0xC0, 0x7A, 0xC0, 0xA1, /* 0x8C-0x8F */
+	0xC0, 0x7C, 0x00, 0x00, 0x00, 0x00, 0xEC, 0xC0, /* 0x90-0x93 */
+	0x00, 0x00, 0xC2, 0x50, 0x00, 0x00, 0xEF, 0xBC, /* 0x94-0x97 */
+	0xEF, 0xBA, 0xEF, 0xBF, 0xEF, 0xBD, 0x00, 0x00, /* 0x98-0x9B */
+	0xEF, 0xBB, 0xEF, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xC3, 0x60, 0xF1, 0xF2, 0xF1, 0xF3, /* 0xA4-0xA7 */
+	0xC4, 0x56, 0x00, 0x00, 0xF1, 0xF4, 0xF1, 0xF0, /* 0xA8-0xAB */
+	0xF1, 0xF5, 0xF1, 0xF1, 0xC2, 0x51, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xFE, 0xF4, 0x41, /* 0xB0-0xB3 */
+	0xC4, 0x59, 0xF4, 0x40, 0xC4, 0x58, 0xC4, 0x57, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xC4, 0x5A, 0xF5, 0xC5, 0xF5, 0xC6, 0x00, 0x00, /* 0xBC-0xBF */
+	0xC4, 0xDA, 0xC4, 0xD9, 0xC4, 0xDB, 0xF5, 0xC4, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xF6, 0xD8, 0xF6, 0xD7, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xC5, 0x6D, 0xC5, 0x6F, 0xC5, 0x6E, 0xF6, 0xD9, /* 0xC8-0xCB */
+	0xC5, 0xC8, 0xF8, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xC5, 0xF1, 0x00, 0x00, 0xF8, 0xA5, /* 0xD0-0xD3 */
+	0xF8, 0xEE, 0x00, 0x00, 0x00, 0x00, 0xC9, 0x49, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xA5, 0x7D, 0xA5, 0x7C, /* 0xD8-0xDB */
+	0x00, 0x00, 0xA6, 0x5F, 0xA6, 0x5E, 0xC9, 0xC7, /* 0xDC-0xDF */
+	0xA6, 0x5D, 0xC9, 0xC6, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xA7, 0x79, 0xCA, 0xA9, 0x00, 0x00, 0xCA, 0xA8, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0x77, 0xA7, 0x7A, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xA7, 0x00, 0x00, /* 0xEC-0xEF */
+	0xA7, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xF0, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xCB, 0xF1, 0xA9, 0x54, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xAA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_57[512] = {
+	0x00, 0x00, 0xD1, 0x48, 0xD1, 0x49, 0xAE, 0x45, /* 0x00-0x03 */
+	0xAE, 0x46, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xAC, /* 0x04-0x07 */
+	0xB0, 0xE9, 0xB0, 0xEB, 0xD4, 0xAB, 0xB0, 0xEA, /* 0x08-0x0B */
+	0xD8, 0x7C, 0xB3, 0xF2, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xE9, 0xB6, 0xEA, /* 0x10-0x13 */
+	0xDC, 0xE1, 0x00, 0x00, 0xB9, 0xCF, 0x00, 0x00, /* 0x14-0x17 */
+	0xB9, 0xCE, 0x00, 0x00, 0xE5, 0x49, 0xE9, 0x48, /* 0x18-0x1B */
+	0xE9, 0x47, 0x00, 0x00, 0xF9, 0x6B, 0xA4, 0x67, /* 0x1C-0x1F */
+	0xC9, 0x59, 0x00, 0x00, 0xC9, 0x6E, 0xC9, 0x6F, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xA6, 0x62, 0xA6, 0x66, 0xC9, 0xC9, 0x00, 0x00, /* 0x28-0x2B */
+	0xA6, 0x64, 0xA6, 0x63, 0xC9, 0xC8, 0xA6, 0x65, /* 0x2C-0x2F */
+	0xA6, 0x61, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x60, /* 0x30-0x33 */
+	0xC9, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xA6, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xA3, 0x00, 0x00, /* 0x3C-0x3F */
+	0xA7, 0x7D, 0xCA, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xCA, 0xAB, 0x00, 0x00, 0xA7, 0xA1, /* 0x44-0x47 */
+	0x00, 0x00, 0xCA, 0xAD, 0xA7, 0x7B, 0xCA, 0xAE, /* 0x48-0x4B */
+	0xCA, 0xAC, 0xA7, 0x7E, 0xA7, 0xA2, 0xA7, 0xA5, /* 0x4C-0x4F */
+	0xA7, 0xA4, 0xA7, 0x7C, 0xCA, 0xAF, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xA9, 0x59, 0xCB, 0xFE, 0x00, 0x00, /* 0x60-0x63 */
+	0xA9, 0x5B, 0x00, 0x00, 0xA9, 0x5A, 0x00, 0x00, /* 0x64-0x67 */
+	0xCC, 0x40, 0xA9, 0x58, 0xA9, 0x57, 0xCB, 0xF5, /* 0x68-0x6B */
+	0x00, 0x00, 0xCB, 0xF4, 0x00, 0x00, 0xCB, 0xF2, /* 0x6C-0x6F */
+	0xCB, 0xF7, 0xCB, 0xF6, 0xCB, 0xF3, 0xCB, 0xFC, /* 0x70-0x73 */
+	0xCB, 0xFD, 0xCB, 0xFA, 0xCB, 0xF8, 0xA9, 0x56, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xFB, /* 0x78-0x7B */
+	0xA9, 0x5C, 0xCC, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xCB, 0xF9, 0x00, 0x00, 0xAB, 0xAB, 0xA9, 0x55, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xAC, /* 0x88-0x8B */
+	0xCE, 0x54, 0x00, 0x00, 0x00, 0x00, 0xCE, 0x5A, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xB2, /* 0x90-0x93 */
+	0xCE, 0x58, 0xCE, 0x5E, 0x00, 0x00, 0xCE, 0x55, /* 0x94-0x97 */
+	0xCE, 0x59, 0xCE, 0x5B, 0xCE, 0x5D, 0xCE, 0x57, /* 0x98-0x9B */
+	0x00, 0x00, 0xCE, 0x56, 0xCE, 0x51, 0xCE, 0x52, /* 0x9C-0x9F */
+	0xAB, 0xAD, 0x00, 0x00, 0xAB, 0xAF, 0xAB, 0xAE, /* 0xA0-0xA3 */
+	0xCE, 0x53, 0xCE, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0xB1, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xCE, 0x50, 0xD1, 0x53, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xD1, 0x52, 0xD1, 0x57, 0xD1, 0x4E, 0x00, 0x00, /* 0xB8-0xBB */
+	0xD1, 0x51, 0xD1, 0x50, 0x00, 0x00, 0xD1, 0x54, /* 0xBC-0xBF */
+	0x00, 0x00, 0xD1, 0x58, 0xAE, 0x47, 0xAE, 0x4A, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0x4F, 0xD1, 0x55, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAE, 0x49, /* 0xC8-0xCB */
+	0xD1, 0x4A, 0x00, 0x00, 0xAB, 0xB0, 0xD4, 0xBA, /* 0xCC-0xCF */
+	0xD1, 0x56, 0x00, 0x00, 0xD1, 0x4D, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xAE, 0x48, 0xD1, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD4, 0xB1, 0x00, 0x00, 0x00, 0x00, 0xB0, 0xEC, /* 0xDC-0xDF */
+	0xB0, 0xF0, 0xD4, 0xC1, 0xD4, 0xAF, 0xD4, 0xBD, /* 0xE0-0xE3 */
+	0xB0, 0xF1, 0xD4, 0xBF, 0x00, 0x00, 0xD4, 0xC5, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xD4, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xD4, 0xC0, 0xD4, 0xB4, 0xD4, 0xBC, 0x00, 0x00, /* 0xEC-0xEF */
+	0xD4, 0xCA, 0xD4, 0xC8, 0xD4, 0xBE, 0xD4, 0xB9, /* 0xF0-0xF3 */
+	0xD4, 0xB2, 0xD8, 0xA6, 0xD4, 0xB0, 0xB0, 0xF5, /* 0xF4-0xF7 */
+	0xD4, 0xB7, 0xB0, 0xF6, 0xB0, 0xF2, 0xD4, 0xAD, /* 0xF8-0xFB */
+	0xD4, 0xC3, 0xD4, 0xB5, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_58[512] = {
+	0xD4, 0xB3, 0xD4, 0xC6, 0xB0, 0xF3, 0x00, 0x00, /* 0x00-0x03 */
+	0xD4, 0xCC, 0xB0, 0xED, 0xB0, 0xEF, 0xD4, 0xBB, /* 0x04-0x07 */
+	0xD4, 0xB6, 0xAE, 0x4B, 0xB0, 0xEE, 0xD4, 0xB8, /* 0x08-0x0B */
+	0xD4, 0xC7, 0xD4, 0xCB, 0xD4, 0xC2, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD4, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xD4, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xD8, 0xA1, 0x00, 0x00, 0xD8, 0xAA, /* 0x18-0x1B */
+	0xD8, 0xA9, 0xB3, 0xFA, 0xD8, 0xA2, 0x00, 0x00, /* 0x1C-0x1F */
+	0xB3, 0xFB, 0xB3, 0xF9, 0x00, 0x00, 0xD8, 0xA4, /* 0x20-0x23 */
+	0xB3, 0xF6, 0xD8, 0xA8, 0x00, 0x00, 0xD8, 0xA3, /* 0x24-0x27 */
+	0xD8, 0xA5, 0xD8, 0x7D, 0xB3, 0xF4, 0x00, 0x00, /* 0x28-0x2B */
+	0xD8, 0xB2, 0xD8, 0xB1, 0xD8, 0xAE, 0xB3, 0xF3, /* 0x2C-0x2F */
+	0xB3, 0xF7, 0xB3, 0xF8, 0xD1, 0x4B, 0xD8, 0xAB, /* 0x30-0x33 */
+	0xB3, 0xF5, 0xB0, 0xF4, 0xD8, 0xAD, 0xD8, 0x7E, /* 0x34-0x37 */
+	0xD8, 0xB0, 0xD8, 0xAF, 0x00, 0x00, 0xD8, 0xB3, /* 0x38-0x3B */
+	0x00, 0x00, 0xDC, 0xEF, 0x00, 0x00, 0xD8, 0xAC, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xD8, 0xA7, 0xDC, 0xE7, 0xB6, 0xF4, 0xB6, 0xF7, /* 0x48-0x4B */
+	0xB6, 0xF2, 0xDC, 0xE6, 0xDC, 0xEA, 0xDC, 0xE5, /* 0x4C-0x4F */
+	0x00, 0x00, 0xB6, 0xEC, 0xB6, 0xF6, 0xDC, 0xE2, /* 0x50-0x53 */
+	0xB6, 0xF0, 0xDC, 0xE9, 0x00, 0x00, 0xB6, 0xEE, /* 0x54-0x57 */
+	0xB6, 0xED, 0xDC, 0xEC, 0xB6, 0xEF, 0xDC, 0xEE, /* 0x58-0x5B */
+	0x00, 0x00, 0xDC, 0xEB, 0xB6, 0xEB, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xF5, 0xDC, 0xF0, /* 0x60-0x63 */
+	0xDC, 0xE4, 0xDC, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xDC, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xB6, 0xF1, /* 0x68-0x6B */
+	0x00, 0x00, 0xB6, 0xF3, 0x00, 0x00, 0xDC, 0xE8, /* 0x6C-0x6F */
+	0x00, 0x00, 0xDC, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE1, 0x5D, 0xB9, 0xD0, 0xE1, 0x63, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xB9, 0xD5, 0xE1, 0x5F, 0xE1, 0x66, /* 0x78-0x7B */
+	0xE1, 0x57, 0xB9, 0xD7, 0xB9, 0xD1, 0xE1, 0x5C, /* 0x7C-0x7F */
+	
+	0xBC, 0x55, 0xE1, 0x5B, 0xE1, 0x64, 0xB9, 0xD2, /* 0x80-0x83 */
+	0x00, 0x00, 0xB9, 0xD6, 0xE1, 0x5A, 0xE1, 0x60, /* 0x84-0x87 */
+	0xE1, 0x65, 0xE1, 0x56, 0xB9, 0xD4, 0xE1, 0x5E, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0x62, 0xE1, 0x68, /* 0x8C-0x8F */
+	0xE1, 0x58, 0xE1, 0x61, 0x00, 0x00, 0xB9, 0xD3, /* 0x90-0x93 */
+	0xE1, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xE1, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xBC, 0x59, 0xE5, 0x4B, 0xBC, 0x57, 0xBC, 0x56, /* 0x9C-0x9F */
+	0xE5, 0x4D, 0xE5, 0x52, 0x00, 0x00, 0xE5, 0x4E, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xE5, 0x51, 0xBC, 0x5C, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xBE, 0xA5, 0xBC, 0x5B, 0x00, 0x00, 0xE5, 0x4A, /* 0xA8-0xAB */
+	0xE5, 0x50, 0x00, 0x00, 0xBC, 0x5A, 0xE5, 0x4F, /* 0xAC-0xAF */
+	0x00, 0x00, 0xE5, 0x4C, 0x00, 0x00, 0xBC, 0x58, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x4D, 0xF9, 0xD9, /* 0xB8-0xBB */
+	0xE9, 0x4F, 0xE9, 0x4A, 0xBE, 0xC1, 0xE9, 0x4C, /* 0xBC-0xBF */
+	0x00, 0x00, 0xBE, 0xC0, 0xE9, 0x4E, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xBE, 0xC3, 0xE9, 0x50, 0xBE, 0xC2, /* 0xC4-0xC7 */
+	0xE9, 0x49, 0xE9, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0xA5, 0xEC, 0xCC, /* 0xCC-0xCF */
+	0x00, 0x00, 0xC0, 0xA4, 0xEC, 0xCD, 0xC0, 0xA3, /* 0xD0-0xD3 */
+	0xEC, 0xCB, 0xC0, 0xA2, 0xEC, 0xCA, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xC2, 0x53, 0xC2, 0x52, 0xF1, 0xF6, 0xF1, 0xF8, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF1, 0xF7, 0xC3, 0x61, 0xC3, 0x62, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xC3, 0x63, 0xF4, 0x42, /* 0xE0-0xE3 */
+	0xC4, 0x5B, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xD3, /* 0xE4-0xE7 */
+	0xF7, 0xD2, 0xC5, 0xF2, 0x00, 0x00, 0xA4, 0x68, /* 0xE8-0xEB */
+	0xA4, 0xD0, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xA7, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xCE, 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xB3, 0xFC, 0xB3, 0xFD, 0x00, 0x00, /* 0xF8-0xFB */
+	0xDC, 0xF2, 0xB9, 0xD8, 0xE1, 0x69, 0xE5, 0x53, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_59[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC9, 0x5A, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xB0, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xCC, 0x42, 0xCE, 0x60, 0xD1, 0x59, 0xAE, 0x4C, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xF9, 0x00, 0x00, /* 0x10-0x13 */
+	0xC4, 0xDC, 0xA4, 0x69, 0xA5, 0x7E, 0xC9, 0x70, /* 0x14-0x17 */
+	0x00, 0x00, 0xA6, 0x67, 0xA6, 0x68, 0x00, 0x00, /* 0x18-0x1B */
+	0xA9, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xB0, 0xF7, 0x00, 0x00, 0xB9, 0xDA, 0x00, 0x00, /* 0x20-0x23 */
+	0xB9, 0xDB, 0xB9, 0xD9, 0x00, 0x00, 0xA4, 0x6A, /* 0x24-0x27 */
+	0x00, 0x00, 0xA4, 0xD1, 0xA4, 0xD3, 0xA4, 0xD2, /* 0x28-0x2B */
+	0xC9, 0x5B, 0xA4, 0xD4, 0xA5, 0xA1, 0xC9, 0x71, /* 0x2C-0x2F */
+	0x00, 0x00, 0xA5, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x69, /* 0x34-0x37 */
+	0xA6, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xC9, 0xCB, 0x00, 0x00, 0xA7, 0xA8, 0x00, 0x00, /* 0x3C-0x3F */
+	0xCA, 0xB1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xA9, 0x61, 0xCC, 0x43, 0x00, 0x00, 0xA9, 0x5F, /* 0x44-0x47 */
+	0xA9, 0x60, 0xA9, 0x5E, 0xD1, 0x5A, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0xB6, 0xAB, 0xB5, /* 0x4C-0x4F */
+	0xAB, 0xB7, 0xAB, 0xB4, 0x00, 0x00, 0xCE, 0x61, /* 0x50-0x53 */
+	0xA9, 0x62, 0xAB, 0xB3, 0x00, 0x00, 0xAE, 0x4D, /* 0x54-0x57 */
+	0xAE, 0x4E, 0x00, 0x00, 0xAE, 0x4F, 0x00, 0x00, /* 0x58-0x5B */
+	0xD4, 0xCD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xB3, 0xFE, 0xD8, 0xB4, 0xB0, 0xF8, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB6, 0xF8, /* 0x64-0x67 */
+	0x00, 0x00, 0xB9, 0xDD, 0xB9, 0xDC, 0xE1, 0x6A, /* 0x68-0x6B */
+	0x00, 0x00, 0xBC, 0x5D, 0xBE, 0xC4, 0x00, 0x00, /* 0x6C-0x6F */
+	0xEF, 0xC0, 0xF6, 0xDA, 0xF7, 0xD4, 0xA4, 0x6B, /* 0x70-0x73 */
+	0xA5, 0xA3, 0x00, 0x00, 0xA5, 0xA4, 0xC9, 0xD1, /* 0x74-0x77 */
+	0xA6, 0x6C, 0xA6, 0x6F, 0x00, 0x00, 0xC9, 0xCF, /* 0x78-0x7B */
+	0xC9, 0xCD, 0xA6, 0x6E, 0xC9, 0xD0, 0xC9, 0xD2, /* 0x7C-0x7F */
+	
+	0xC9, 0xCC, 0xA6, 0x71, 0xA6, 0x70, 0xA6, 0x6D, /* 0x80-0x83 */
+	0xA6, 0x6B, 0xC9, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xB3, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xA7, 0xB0, 0xCA, 0xB6, 0xCA, 0xB9, /* 0x8C-0x8F */
+	0xCA, 0xB8, 0x00, 0x00, 0xA7, 0xAA, 0xA7, 0xB2, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xAF, 0xCA, 0xB5, /* 0x94-0x97 */
+	0xCA, 0xB3, 0xA7, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xA7, 0xA9, 0xA7, 0xAC, 0x00, 0x00, /* 0x9C-0x9F */
+	0xCA, 0xB4, 0xCA, 0xBB, 0xCA, 0xB7, 0xA7, 0xAD, /* 0xA0-0xA3 */
+	0xA7, 0xB1, 0xA7, 0xB4, 0xCA, 0xB2, 0xCA, 0xBA, /* 0xA4-0xA7 */
+	0xA7, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0x67, 0xA9, 0x6F, /* 0xAC-0xAF */
+	0x00, 0x00, 0xCC, 0x4F, 0xCC, 0x48, 0xA9, 0x70, /* 0xB0-0xB3 */
+	0xCC, 0x53, 0xCC, 0x44, 0xCC, 0x4B, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xA9, 0x66, 0xCC, 0x45, 0xA9, 0x64, /* 0xB8-0xBB */
+	0xCC, 0x4C, 0xCC, 0x50, 0xA9, 0x63, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCC, 0x51, 0xCC, 0x4A, 0x00, 0x00, 0xCC, 0x4D, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xA9, 0x72, 0xA9, 0x69, 0xCC, 0x54, /* 0xC4-0xC7 */
+	0xCC, 0x52, 0x00, 0x00, 0xA9, 0x6E, 0xA9, 0x6C, /* 0xC8-0xCB */
+	0xCC, 0x49, 0xA9, 0x6B, 0xCC, 0x47, 0xCC, 0x46, /* 0xCC-0xCF */
+	0xA9, 0x6A, 0xA9, 0x68, 0xA9, 0x71, 0xA9, 0x6D, /* 0xD0-0xD3 */
+	0xA9, 0x65, 0x00, 0x00, 0xCC, 0x4E, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xAB, 0xB9, 0x00, 0x00, 0xAB, 0xC0, 0xCE, 0x6F, /* 0xD8-0xDB */
+	0xAB, 0xB8, 0xCE, 0x67, 0xCE, 0x63, 0x00, 0x00, /* 0xDC-0xDF */
+	0xCE, 0x73, 0xCE, 0x62, 0x00, 0x00, 0xAB, 0xBB, /* 0xE0-0xE3 */
+	0xCE, 0x6C, 0xAB, 0xBE, 0xAB, 0xC1, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xAB, 0xBC, 0xCE, 0x70, 0xAB, 0xBF, 0x00, 0x00, /* 0xE8-0xEB */
+	0xAE, 0x56, 0xCE, 0x76, 0xCE, 0x64, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xCE, 0x66, 0xCE, 0x6D, 0xCE, 0x71, /* 0xF0-0xF3 */
+	0xCE, 0x75, 0xCE, 0x72, 0xCE, 0x6B, 0xCE, 0x6E, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0x68, 0xAB, 0xC3, /* 0xF8-0xFB */
+	0xCE, 0x6A, 0xCE, 0x69, 0xCE, 0x74, 0xAB, 0xBA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5A[512] = {
+	0xCE, 0x65, 0xAB, 0xC2, 0x00, 0x00, 0xAB, 0xBD, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xAE, 0x5C, 0xD1, 0x62, 0x00, 0x00, /* 0x08-0x0B */
+	0xAE, 0x5B, 0x00, 0x00, 0x00, 0x00, 0xD1, 0x60, /* 0x0C-0x0F */
+	0x00, 0x00, 0xAE, 0x50, 0x00, 0x00, 0xAE, 0x55, /* 0x10-0x13 */
+	0x00, 0x00, 0xD1, 0x5F, 0xD1, 0x5C, 0xD1, 0x61, /* 0x14-0x17 */
+	0xAE, 0x51, 0xD1, 0x5B, 0x00, 0x00, 0xAE, 0x54, /* 0x18-0x1B */
+	0xAE, 0x52, 0x00, 0x00, 0xD1, 0x63, 0xAE, 0x53, /* 0x1C-0x1F */
+	0xAE, 0x57, 0x00, 0x00, 0x00, 0x00, 0xAE, 0x58, /* 0x20-0x23 */
+	0x00, 0x00, 0xAE, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xAE, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD1, 0x5D, 0xD1, 0x5E, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0x64, /* 0x30-0x33 */
+	0x00, 0x00, 0xD4, 0xD4, 0xB0, 0xF9, 0xD8, 0xC2, /* 0x34-0x37 */
+	0xD4, 0xD3, 0xD4, 0xE6, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xB1, 0x40, 0x00, 0x00, 0xD4, 0xE4, 0x00, 0x00, /* 0x3C-0x3F */
+	0xB0, 0xFE, 0xB0, 0xFA, 0xD4, 0xED, 0xD4, 0xDD, /* 0x40-0x43 */
+	0xD4, 0xE0, 0x00, 0x00, 0xB1, 0x43, 0xD4, 0xEA, /* 0x44-0x47 */
+	0xD4, 0xE2, 0xB0, 0xFB, 0xB1, 0x44, 0x00, 0x00, /* 0x48-0x4B */
+	0xD4, 0xE7, 0xD4, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xD4, 0xD6, 0xD4, 0xEB, 0xD4, 0xDF, 0xD4, 0xDA, /* 0x50-0x53 */
+	0x00, 0x00, 0xD4, 0xD0, 0xD4, 0xEC, 0xD4, 0xDC, /* 0x54-0x57 */
+	0xD4, 0xCF, 0x00, 0x00, 0xB1, 0x42, 0xD4, 0xE1, /* 0x58-0x5B */
+	0xD4, 0xEE, 0xD4, 0xDE, 0xD4, 0xD2, 0xD4, 0xD7, /* 0x5C-0x5F */
+	0xD4, 0xCE, 0x00, 0x00, 0xB1, 0x41, 0x00, 0x00, /* 0x60-0x63 */
+	0xD4, 0xDB, 0xD4, 0xD8, 0xB0, 0xFC, 0xD4, 0xD1, /* 0x64-0x67 */
+	0x00, 0x00, 0xD4, 0xE9, 0xB0, 0xFD, 0x00, 0x00, /* 0x68-0x6B */
+	0xD4, 0xD9, 0xD4, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xD4, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0x40, /* 0x74-0x77 */
+	0xD8, 0xBB, 0x00, 0x00, 0xD8, 0xB8, 0xD8, 0xC9, /* 0x78-0x7B */
+	0xD8, 0xBD, 0xD8, 0xCA, 0x00, 0x00, 0xB4, 0x42, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0xC6, /* 0x80-0x83 */
+	0xD8, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xC4, 0xD8, 0xC7, /* 0x88-0x8B */
+	0xD8, 0xCB, 0x00, 0x00, 0xD4, 0xE3, 0xD8, 0xCD, /* 0x8C-0x8F */
+	0xDD, 0x47, 0x00, 0x00, 0xB4, 0x43, 0xD8, 0xCE, /* 0x90-0x93 */
+	0xD8, 0xB6, 0xD8, 0xC0, 0x00, 0x00, 0xD8, 0xC5, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xB4, 0x41, 0xB4, 0x44, /* 0x98-0x9B */
+	0xD8, 0xCC, 0xD8, 0xCF, 0xD8, 0xBA, 0xD8, 0xB7, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xB9, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xD8, 0xBE, 0xD8, 0xBC, 0xB4, 0x45, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xD8, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xD8, 0xBF, 0x00, 0x00, 0xD8, 0xC1, 0xD8, 0xB5, /* 0xAC-0xAF */
+	0xDC, 0xFA, 0xDC, 0xF8, 0xB7, 0x42, 0xB7, 0x40, /* 0xB0-0xB3 */
+	0xDD, 0x43, 0xDC, 0xF9, 0xDD, 0x44, 0xDD, 0x40, /* 0xB4-0xB7 */
+	0xDC, 0xF7, 0xDD, 0x46, 0xDC, 0xF6, 0xDC, 0xFD, /* 0xB8-0xBB */
+	0xB6, 0xFE, 0xB6, 0xFD, 0xB6, 0xFC, 0xDC, 0xFB, /* 0xBC-0xBF */
+	0xDD, 0x41, 0xB6, 0xF9, 0xB7, 0x41, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xDC, 0xF4, 0x00, 0x00, 0xDC, 0xFE, 0xDC, 0xF3, /* 0xC4-0xC7 */
+	0xDC, 0xFC, 0xB6, 0xFA, 0xDD, 0x42, 0xDC, 0xF5, /* 0xC8-0xCB */
+	0xB6, 0xFB, 0xDD, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE1, 0x6E, 0xB9, 0xE2, 0xB9, 0xE1, /* 0xD4-0xD7 */
+	0xB9, 0xE3, 0xE1, 0x7A, 0xE1, 0x70, 0xE1, 0x76, /* 0xD8-0xDB */
+	0xE1, 0x6B, 0xE1, 0x79, 0xE1, 0x78, 0xE1, 0x7C, /* 0xDC-0xDF */
+	0xE1, 0x75, 0xB9, 0xDE, 0xE1, 0x74, 0xB9, 0xE4, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE1, 0x6D, 0xB9, 0xDF, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE1, 0x7B, 0xB9, 0xE0, 0xE1, 0x6F, 0xE1, 0x72, /* 0xE8-0xEB */
+	0xE1, 0x77, 0xE1, 0x71, 0xE1, 0x6C, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0x73, /* 0xF0-0xF3 */
+	0xE5, 0x55, 0xBC, 0x61, 0xE5, 0x58, 0xE5, 0x57, /* 0xF4-0xF7 */
+	0xE5, 0x5A, 0xE5, 0x5C, 0xF9, 0xDC, 0xBC, 0x5F, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE5, 0x56, 0x00, 0x00, 0xE5, 0x54, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5B[512] = {
+	0x00, 0x00, 0xE5, 0x5D, 0xE5, 0x5B, 0xE5, 0x59, /* 0x00-0x03 */
+	0x00, 0x00, 0xE5, 0x5F, 0x00, 0x00, 0xE5, 0x5E, /* 0x04-0x07 */
+	0xBC, 0x63, 0xBC, 0x5E, 0x00, 0x00, 0xBC, 0x60, /* 0x08-0x0B */
+	0xBC, 0x62, 0x00, 0x00, 0x00, 0x00, 0xE5, 0x60, /* 0x0C-0x0F */
+	0xE9, 0x57, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x56, /* 0x10-0x13 */
+	0xE9, 0x55, 0x00, 0x00, 0xE9, 0x58, 0xE9, 0x51, /* 0x14-0x17 */
+	0x00, 0x00, 0xE9, 0x52, 0xE9, 0x5A, 0xE9, 0x53, /* 0x18-0x1B */
+	0x00, 0x00, 0xBE, 0xC5, 0xE9, 0x5C, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE9, 0x5B, 0xE9, 0x54, 0x00, 0x00, 0xEC, 0xD1, /* 0x20-0x23 */
+	0xC0, 0xA8, 0xEC, 0xCF, 0xEC, 0xD4, 0xEC, 0xD3, /* 0x24-0x27 */
+	0xE9, 0x59, 0x00, 0x00, 0xC0, 0xA7, 0x00, 0x00, /* 0x28-0x2B */
+	0xEC, 0xD2, 0xEC, 0xCE, 0xEC, 0xD6, 0xEC, 0xD5, /* 0x2C-0x2F */
+	0xC0, 0xA6, 0x00, 0x00, 0xEC, 0xD0, 0x00, 0x00, /* 0x30-0x33 */
+	0xBE, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xC2, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xEF, 0xC1, 0xF1, 0xFA, 0xF1, 0xFB, 0xF1, 0xFC, /* 0x3C-0x3F */
+	0xC4, 0x5C, 0x00, 0x00, 0x00, 0x00, 0xC4, 0x5D, /* 0x40-0x43 */
+	0x00, 0x00, 0xF4, 0x43, 0x00, 0x00, 0xF5, 0xC8, /* 0x44-0x47 */
+	0xF5, 0xC7, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xDB, /* 0x48-0x4B */
+	0xF6, 0xDC, 0xF7, 0xD5, 0xF8, 0xA7, 0x00, 0x00, /* 0x4C-0x4F */
+	0xA4, 0x6C, 0xA4, 0x6D, 0x00, 0x00, 0xA4, 0x6E, /* 0x50-0x53 */
+	0xA4, 0xD5, 0xA5, 0xA5, 0xC9, 0xD3, 0xA6, 0x72, /* 0x54-0x57 */
+	0xA6, 0x73, 0x00, 0x00, 0xA7, 0xB7, 0xA7, 0xB8, /* 0x58-0x5B */
+	0xA7, 0xB6, 0xA7, 0xB5, 0x00, 0x00, 0xA9, 0x73, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0x55, 0xA9, 0x75, /* 0x60-0x63 */
+	0xA9, 0x74, 0xCC, 0x56, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xAB, 0xC4, 0x00, 0x00, 0xAE, 0x5D, /* 0x68-0x6B */
+	0xD1, 0x65, 0x00, 0x00, 0xD4, 0xF0, 0x00, 0x00, /* 0x6C-0x6F */
+	0xB1, 0x45, 0xB4, 0x47, 0xD4, 0xEF, 0xB4, 0x46, /* 0x70-0x73 */
+	0x00, 0x00, 0xB9, 0xE5, 0x00, 0x00, 0xE1, 0x7D, /* 0x74-0x77 */
+	0xBE, 0xC7, 0x00, 0x00, 0xC0, 0xA9, 0xEC, 0xD7, /* 0x78-0x7B */
+	0x00, 0x00, 0xC4, 0x5E, 0x00, 0x00, 0xC5, 0x70, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xC9, 0x72, 0x00, 0x00, 0xA5, 0xA6, /* 0x80-0x83 */
+	0xC9, 0x73, 0xA6, 0x76, 0x00, 0x00, 0xA6, 0x74, /* 0x84-0x87 */
+	0xA6, 0x75, 0xA6, 0x77, 0x00, 0x00, 0xA7, 0xBA, /* 0x88-0x8B */
+	0xA7, 0xB9, 0x00, 0x00, 0xCA, 0xBC, 0xA7, 0xBB, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xBD, 0xCC, 0x57, /* 0x90-0x93 */
+	0x00, 0x00, 0xCC, 0x58, 0x00, 0x00, 0xA9, 0x76, /* 0x94-0x97 */
+	0xA9, 0x78, 0xA9, 0x7A, 0xA9, 0x77, 0xA9, 0x7B, /* 0x98-0x9B */
+	0xA9, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0xC8, 0xAB, 0xC5, /* 0xA0-0xA3 */
+	0xAB, 0xC7, 0xAB, 0xC9, 0xAB, 0xC6, 0xD1, 0x66, /* 0xA4-0xA7 */
+	0xCE, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xD1, 0x68, 0xD1, 0x67, 0xAE, 0x63, 0x00, 0x00, /* 0xAC-0xAF */
+	0xAE, 0x5F, 0x00, 0x00, 0x00, 0x00, 0xAE, 0x60, /* 0xB0-0xB3 */
+	0xAE, 0x62, 0xAE, 0x64, 0xAE, 0x61, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xAE, 0x66, 0xAE, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB1, 0x4A, /* 0xBC-0xBF */
+	0xD4, 0xF2, 0xD4, 0xF1, 0xB1, 0x49, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xB1, 0x48, 0xB1, 0x47, 0xB1, 0x4B, 0xB1, 0x46, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0xD5, 0xD8, 0xD2, /* 0xC8-0xCB */
+	0xB4, 0x49, 0xD8, 0xD1, 0xD8, 0xD6, 0x00, 0x00, /* 0xCC-0xCF */
+	0xB4, 0x4B, 0xD8, 0xD4, 0xB4, 0x48, 0xB4, 0x4A, /* 0xD0-0xD3 */
+	0xD8, 0xD3, 0x00, 0x00, 0xDD, 0x48, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xDD, 0x49, 0xDD, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0xE6, 0xB9, 0xEE, /* 0xDC-0xDF */
+	0xE1, 0x7E, 0xB9, 0xE8, 0xB9, 0xEC, 0xE1, 0xA1, /* 0xE0-0xE3 */
+	0xB9, 0xED, 0xB9, 0xE9, 0xB9, 0xEA, 0xB9, 0xE7, /* 0xE4-0xE7 */
+	0xB9, 0xEB, 0xBC, 0x66, 0xD8, 0xD0, 0xBC, 0x67, /* 0xE8-0xEB */
+	0xBC, 0x65, 0x00, 0x00, 0xBC, 0x64, 0xE9, 0x5D, /* 0xEC-0xEF */
+	0xBE, 0xC8, 0xEC, 0xD8, 0xEC, 0xD9, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xC3, 0x64, 0xC4, 0x5F, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xA4, 0x6F, 0x00, 0x00, 0xA6, 0x78, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5C[512] = {
+	0x00, 0x00, 0xAB, 0xCA, 0x00, 0x00, 0xD1, 0x69, /* 0x00-0x03 */
+	0xAE, 0x67, 0x00, 0x00, 0x00, 0x00, 0xB1, 0x4E, /* 0x04-0x07 */
+	0xB1, 0x4D, 0xB1, 0x4C, 0xB4, 0x4C, 0xB4, 0x4D, /* 0x08-0x0B */
+	0xD8, 0xD7, 0xB9, 0xEF, 0xBE, 0xC9, 0xA4, 0x70, /* 0x0C-0x0F */
+	0xC9, 0x5C, 0xA4, 0xD6, 0xC9, 0x74, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xC9, 0xD4, 0xA6, 0x79, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0x7C, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0x4B, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0x71, 0x00, 0x00, /* 0x20-0x23 */
+	0xA4, 0xD7, 0xC9, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xCA, 0xBE, 0x00, 0x00, 0xCA, 0xBF, 0x00, 0x00, /* 0x28-0x2B */
+	0xA7, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD8, 0xD8, 0xB4, 0x4E, 0x00, 0x00, 0xDD, 0x4C, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0xAA, /* 0x34-0x37 */
+	0xA4, 0x72, 0xA4, 0xA8, 0xA4, 0xD8, 0xC9, 0x75, /* 0x38-0x3B */
+	0xA5, 0xA7, 0x00, 0x00, 0xA7, 0xC0, 0xA7, 0xBF, /* 0x3C-0x3F */
+	0xA7, 0xBD, 0xA7, 0xBE, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xCC, 0x59, 0xA9, 0x7E, 0xA9, 0xA1, 0xCC, 0x5A, /* 0x44-0x47 */
+	0xA9, 0x7D, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xCE, /* 0x48-0x4B */
+	0xCE, 0x78, 0xAB, 0xCD, 0xAB, 0xCB, 0xAB, 0xCC, /* 0x4C-0x4F */
+	0xAE, 0x6A, 0xAE, 0x68, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xD1, 0x6B, 0xAE, 0x69, 0xD1, 0x6A, 0x00, 0x00, /* 0x54-0x57 */
+	0xAE, 0x5E, 0xD4, 0xF3, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xB1, 0x50, 0xB1, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xB1, 0x4F, 0x00, 0x00, 0xB9, 0xF0, 0xE1, 0xA2, /* 0x60-0x63 */
+	0xBC, 0x68, 0xBC, 0x69, 0x00, 0x00, 0xE5, 0x61, /* 0x64-0x67 */
+	0xC0, 0xAB, 0xEF, 0xC2, 0xEF, 0xC3, 0x00, 0x00, /* 0x68-0x6B */
+	0xC4, 0xDD, 0xF8, 0xA8, 0xC9, 0x4B, 0xA4, 0xD9, /* 0x6C-0x6F */
+	0x00, 0x00, 0xA4, 0x73, 0x00, 0x00, 0xC9, 0x77, /* 0x70-0x73 */
+	0xC9, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xA6, 0x7A, 0xC9, 0xD7, 0xC9, 0xD8, /* 0x78-0x7B */
+	0xC9, 0xD6, 0x00, 0x00, 0xC9, 0xD9, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xC7, 0x00, 0x00, /* 0x84-0x87 */
+	0xCA, 0xC2, 0xCA, 0xC4, 0xCA, 0xC6, 0xCA, 0xC3, /* 0x88-0x8B */
+	0xA7, 0xC4, 0xCA, 0xC0, 0x00, 0x00, 0xCA, 0xC1, /* 0x8C-0x8F */
+	0xA7, 0xC1, 0xA7, 0xC2, 0xCA, 0xC5, 0xCA, 0xC8, /* 0x90-0x93 */
+	0xA7, 0xC3, 0xCA, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xCC, 0x68, 0x00, 0x00, 0xCC, 0x62, /* 0x9C-0x9F */
+	0xCC, 0x5D, 0xA9, 0xA3, 0xCC, 0x65, 0xCC, 0x63, /* 0xA0-0xA3 */
+	0xCC, 0x5C, 0xCC, 0x69, 0xCC, 0x6C, 0xCC, 0x67, /* 0xA4-0xA7 */
+	0xCC, 0x60, 0xA9, 0xA5, 0xCC, 0x66, 0xA9, 0xA6, /* 0xA8-0xAB */
+	0xCC, 0x61, 0xCC, 0x64, 0xCC, 0x5B, 0xCC, 0x5F, /* 0xAC-0xAF */
+	0xCC, 0x6B, 0xA9, 0xA7, 0x00, 0x00, 0xA9, 0xA8, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xCC, 0x5E, 0xCC, 0x6A, 0xA9, 0xA2, /* 0xB4-0xB7 */
+	0xA9, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xAB, 0xCE, 0xA4, /* 0xC4-0xC7 */
+	0xCE, 0xAA, 0xCE, 0xA3, 0xCE, 0xA5, 0xCE, 0x7D, /* 0xC8-0xCB */
+	0xCE, 0x7B, 0x00, 0x00, 0xCE, 0xAC, 0xCE, 0xA9, /* 0xCC-0xCF */
+	0xCE, 0x79, 0x00, 0x00, 0xAB, 0xD0, 0xCE, 0xA7, /* 0xD0-0xD3 */
+	0xCE, 0xA8, 0x00, 0x00, 0xCE, 0xA6, 0xCE, 0x7C, /* 0xD4-0xD7 */
+	0xCE, 0x7A, 0xAB, 0xCF, 0xCE, 0xA2, 0xCE, 0x7E, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xCE, 0xA1, 0xCE, 0xAD, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xAE, 0x6F, 0x00, 0x00, 0xAE, 0x6E, 0x00, 0x00, /* 0xE8-0xEB */
+	0xD1, 0x6C, 0xAE, 0x6B, 0xD1, 0x6E, 0x00, 0x00, /* 0xEC-0xEF */
+	0xAE, 0x70, 0xD1, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xAE, 0x73, 0x00, 0x00, 0xAE, 0x71, 0xD1, 0x70, /* 0xF4-0xF7 */
+	0xCE, 0xAE, 0xD1, 0x72, 0x00, 0x00, 0xAE, 0x6D, /* 0xF8-0xFB */
+	0x00, 0x00, 0xAE, 0x6C, 0x00, 0x00, 0xD1, 0x6D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5D[512] = {
+	0xD1, 0x71, 0xAE, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xB1, 0x53, 0xB1, 0x52, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD4, 0xF5, /* 0x08-0x0B */
+	0xD4, 0xF9, 0xD4, 0xFB, 0xB1, 0x54, 0xD4, 0xFE, /* 0x0C-0x0F */
+	0x00, 0x00, 0xB1, 0x58, 0xD5, 0x41, 0x00, 0x00, /* 0x10-0x13 */
+	0xB1, 0x5A, 0x00, 0x00, 0xB1, 0x56, 0xB1, 0x5E, /* 0x14-0x17 */
+	0x00, 0x00, 0xB1, 0x5B, 0xD4, 0xF7, 0xB1, 0x55, /* 0x18-0x1B */
+	0x00, 0x00, 0xD4, 0xF6, 0xD4, 0xF4, 0xD5, 0x43, /* 0x1C-0x1F */
+	0xD4, 0xF8, 0x00, 0x00, 0xB1, 0x57, 0xD5, 0x42, /* 0x20-0x23 */
+	0xB1, 0x5C, 0xD4, 0xFD, 0xD4, 0xFC, 0xB1, 0x5D, /* 0x24-0x27 */
+	0xD4, 0xFA, 0xB1, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0x44, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD5, 0x40, 0xD8, 0xE7, 0xD8, 0xEE, 0xD8, 0xE3, /* 0x30-0x33 */
+	0xB4, 0x51, 0xD8, 0xDF, 0xD8, 0xEF, 0xD8, 0xD9, /* 0x34-0x37 */
+	0xD8, 0xEC, 0xD8, 0xEA, 0xD8, 0xE4, 0x00, 0x00, /* 0x38-0x3B */
+	0xD8, 0xED, 0xD8, 0xE6, 0x00, 0x00, 0xD8, 0xDE, /* 0x3C-0x3F */
+	0xD8, 0xF0, 0xD8, 0xDC, 0xD8, 0xE9, 0xD8, 0xDA, /* 0x40-0x43 */
+	0x00, 0x00, 0xD8, 0xF1, 0x00, 0x00, 0xB4, 0x52, /* 0x44-0x47 */
+	0x00, 0x00, 0xD8, 0xEB, 0xDD, 0x4F, 0xD8, 0xDD, /* 0x48-0x4B */
+	0xB4, 0x4F, 0x00, 0x00, 0xD8, 0xE1, 0x00, 0x00, /* 0x4C-0x4F */
+	0xB4, 0x50, 0xD8, 0xE0, 0xD8, 0xE5, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xD8, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xD8, 0xE8, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0x53, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0x56, 0xDD, 0x4E, /* 0x60-0x63 */
+	0x00, 0x00, 0xDD, 0x50, 0x00, 0x00, 0xDD, 0x55, /* 0x64-0x67 */
+	0xDD, 0x54, 0xB7, 0x43, 0x00, 0x00, 0xD8, 0xDB, /* 0x68-0x6B */
+	0xDD, 0x52, 0x00, 0x00, 0x00, 0x00, 0xB7, 0x44, /* 0x6C-0x6F */
+	0x00, 0x00, 0xDD, 0x4D, 0xDD, 0x51, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xA9, /* 0x74-0x77 */
+	0x00, 0x00, 0xE1, 0xB0, 0xE1, 0xA7, 0x00, 0x00, /* 0x78-0x7B */
+	0xE1, 0xAE, 0xE1, 0xA5, 0xE1, 0xAD, 0xE1, 0xB1, /* 0x7C-0x7F */
+	
+	0xE1, 0xA4, 0xE1, 0xA8, 0xE1, 0xA3, 0x00, 0x00, /* 0x80-0x83 */
+	0xB9, 0xF1, 0x00, 0x00, 0xE1, 0xA6, 0xB9, 0xF2, /* 0x84-0x87 */
+	0xE1, 0xAC, 0xE1, 0xAB, 0xE1, 0xAA, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xE1, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x65, 0xE5, 0x67, /* 0x90-0x93 */
+	0xBC, 0x6B, 0xE5, 0x68, 0x00, 0x00, 0xE5, 0x63, /* 0x94-0x97 */
+	0x00, 0x00, 0xE5, 0x62, 0xE5, 0x6C, 0x00, 0x00, /* 0x98-0x9B */
+	0xE5, 0x6A, 0xBC, 0x6A, 0xE5, 0x6D, 0xE5, 0x64, /* 0x9C-0x9F */
+	0xE5, 0x69, 0xE5, 0x6B, 0xE5, 0x66, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE9, 0x61, /* 0xA4-0xA7 */
+	0xE9, 0x66, 0xE9, 0x60, 0xE9, 0x65, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE9, 0x5E, 0xE9, 0x68, 0xE9, 0x64, 0xE9, 0x69, /* 0xAC-0xAF */
+	0xE9, 0x63, 0xE9, 0x5F, 0xE9, 0x67, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE9, 0x6A, 0xE9, 0x62, 0x00, 0x00, 0xEC, 0xDA, /* 0xB4-0xB7 */
+	0xC0, 0xAF, 0x00, 0x00, 0xC0, 0xAD, 0x00, 0x00, /* 0xB8-0xBB */
+	0xC0, 0xAC, 0xC0, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xEF, 0xC4, 0x00, 0x00, 0xF1, 0x72, 0xF1, 0xFD, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0x44, 0xF4, 0x45, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xC4, 0x60, 0x00, 0x00, 0xF5, 0xC9, /* 0xC8-0xCB */
+	0x00, 0x00, 0xC4, 0xDE, 0x00, 0x00, 0xF5, 0xCA, /* 0xCC-0xCF */
+	0x00, 0x00, 0xF6, 0xDE, 0xC5, 0x72, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xC5, 0x71, 0xF6, 0xDD, 0xC5, 0xC9, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xF7, 0xD6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xA4, 0x74, 0xA6, 0x7B, 0xC9, 0xDA, /* 0xDC-0xDF */
+	0xCA, 0xCA, 0xA8, 0xB5, 0xB1, 0x5F, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xA4, 0x75, 0xA5, 0xAA, 0xA5, 0xA9, /* 0xE4-0xE7 */
+	0xA5, 0xA8, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xC5, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xAE, 0x74, 0x00, 0x00, /* 0xEC-0xEF */
+	0xDD, 0x57, 0xA4, 0x76, 0xA4, 0x77, 0xA4, 0x78, /* 0xF0-0xF3 */
+	0xA4, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xD1, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xCE, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xB4, 0x53, 0xA4, 0x79, 0xC9, 0x5D, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xA5, 0xAB, 0xA5, 0xAC, /* 0x00-0x03 */
+	0xC9, 0x78, 0x00, 0x00, 0xA6, 0x7C, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xCB, 0x00, 0x00, /* 0x08-0x0B */
+	0xA7, 0xC6, 0x00, 0x00, 0xCA, 0xCC, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xA9, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xCC, 0x6E, 0xA9, 0xAC, 0xA9, 0xAB, 0xCC, 0x6D, /* 0x14-0x17 */
+	0xA9, 0xA9, 0xCC, 0x6F, 0xA9, 0xAA, 0xA9, 0xAD, /* 0x18-0x1B */
+	0x00, 0x00, 0xAB, 0xD2, 0x00, 0x00, 0xAB, 0xD4, /* 0x1C-0x1F */
+	0xCE, 0xB3, 0xCE, 0xB0, 0xCE, 0xB1, 0xCE, 0xB2, /* 0x20-0x23 */
+	0xCE, 0xB4, 0xAB, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xD1, 0x74, 0xD1, 0x73, 0x00, 0x00, 0xAE, 0x76, /* 0x28-0x2B */
+	0x00, 0x00, 0xAE, 0x75, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB1, 0x62, /* 0x30-0x33 */
+	0xD5, 0x46, 0x00, 0x00, 0xB1, 0x61, 0xB1, 0x63, /* 0x34-0x37 */
+	0xB1, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xB4, 0x55, 0xD5, 0x45, 0x00, 0x00, /* 0x3C-0x3F */
+	0xB4, 0x56, 0xD8, 0xF3, 0x00, 0x00, 0xB4, 0x57, /* 0x40-0x43 */
+	0xD8, 0xF2, 0xB4, 0x54, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0x5A, 0xDD, 0x5C, /* 0x48-0x4B */
+	0xB7, 0x45, 0xDD, 0x5B, 0xDD, 0x59, 0xDD, 0x58, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xB4, /* 0x50-0x53 */
+	0xB9, 0xF7, 0xB9, 0xF5, 0x00, 0x00, 0xB9, 0xF6, /* 0x54-0x57 */
+	0xE1, 0xB2, 0xE1, 0xB3, 0x00, 0x00, 0xB9, 0xF3, /* 0x58-0x5B */
+	0xE5, 0x71, 0xE5, 0x6F, 0x00, 0x00, 0xBC, 0x6D, /* 0x5C-0x5F */
+	0xE5, 0x70, 0xBC, 0x6E, 0xBC, 0x6C, 0xB9, 0xF4, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0x6D, 0xE9, 0x6B, /* 0x64-0x67 */
+	0xE9, 0x6C, 0xE5, 0x6E, 0xEC, 0xDC, 0xC0, 0xB0, /* 0x68-0x6B */
+	0xEC, 0xDB, 0xEF, 0xC5, 0xEF, 0xC6, 0xE9, 0x6E, /* 0x6C-0x6F */
+	0xF1, 0xFE, 0x00, 0x00, 0xA4, 0x7A, 0xA5, 0xAD, /* 0x70-0x73 */
+	0xA6, 0x7E, 0xC9, 0xDB, 0xA6, 0x7D, 0x00, 0x00, /* 0x74-0x77 */
+	0xA9, 0xAF, 0xB7, 0x46, 0x00, 0x00, 0xA4, 0xDB, /* 0x78-0x7B */
+	0xA5, 0xAE, 0xAB, 0xD5, 0xB4, 0x58, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xC9, 0x79, 0x00, 0x00, 0xC9, 0x7A, 0x00, 0x00, /* 0x80-0x83 */
+	0xC9, 0xDC, 0x00, 0x00, 0x00, 0x00, 0xA7, 0xC8, /* 0x84-0x87 */
+	0xCA, 0xD0, 0xCA, 0xCE, 0xA7, 0xC9, 0xCA, 0xCD, /* 0x88-0x8B */
+	0xCA, 0xCF, 0xCA, 0xD1, 0x00, 0x00, 0xA7, 0xC7, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xA9, 0xB3, 0xA9, 0xB4, 0xA9, 0xB1, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0xB0, 0xCE, 0xB8, /* 0x98-0x9B */
+	0xA9, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xAB, 0xD6, 0x00, 0x00, 0xCE, 0xB7, 0xCE, 0xB9, /* 0xA0-0xA3 */
+	0xCE, 0xB6, 0xCE, 0xBA, 0xAB, 0xD7, 0xAE, 0x79, /* 0xA4-0xA7 */
+	0xD1, 0x75, 0x00, 0x00, 0xD1, 0x77, 0xAE, 0x77, /* 0xA8-0xAB */
+	0xD1, 0x78, 0xAE, 0x78, 0xD1, 0x76, 0x00, 0x00, /* 0xAC-0xAF */
+	0xCE, 0xB5, 0xD5, 0x47, 0xD5, 0x4A, 0xD5, 0x4B, /* 0xB0-0xB3 */
+	0xD5, 0x48, 0xB1, 0x67, 0xB1, 0x66, 0xB1, 0x64, /* 0xB4-0xB7 */
+	0xB1, 0x65, 0xD5, 0x49, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xB1, 0x68, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xB4, 0x5A, 0xB4, 0x5B, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xB4, 0x5C, 0xDD, 0x5D, 0xDD, 0x5F, 0xDD, 0x61, /* 0xC4-0xC7 */
+	0xB7, 0x48, 0xB7, 0x47, 0xB4, 0x59, 0xDD, 0x60, /* 0xC8-0xCB */
+	0xDD, 0x5E, 0x00, 0x00, 0xE1, 0xB8, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE1, 0xB6, 0xE1, 0xBC, 0xB9, 0xF8, /* 0xD0-0xD3 */
+	0xE1, 0xBD, 0xE1, 0xBA, 0xB9, 0xF9, 0xE1, 0xB7, /* 0xD4-0xD7 */
+	0xE1, 0xB5, 0xE1, 0xBB, 0xBC, 0x70, 0xE5, 0x73, /* 0xD8-0xDB */
+	0xE1, 0xB9, 0xBC, 0x72, 0xE5, 0x74, 0xBC, 0x71, /* 0xDC-0xDF */
+	0xBC, 0x74, 0xE5, 0x75, 0xBC, 0x6F, 0xBC, 0x73, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE9, 0x73, 0xE9, 0x71, 0xE9, 0x70, /* 0xE4-0xE7 */
+	0xE9, 0x72, 0xE9, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xC3, 0x66, 0x00, 0x00, 0xF4, 0x46, 0xF4, 0x47, /* 0xEC-0xEF */
+	0x00, 0x00, 0xF5, 0xCB, 0xF6, 0xDF, 0xC6, 0x55, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0xB5, 0xA7, 0xCA, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0xD8, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xA4, 0x7B, 0xA4, 0xDC, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_5F[512] = {
+	0x00, 0x00, 0xA5, 0xAF, 0xC9, 0xDD, 0x00, 0x00, /* 0x00-0x03 */
+	0xA7, 0xCB, 0xCA, 0xD2, 0x00, 0x00, 0xCE, 0xBB, /* 0x04-0x07 */
+	0xAB, 0xD9, 0x00, 0x00, 0xB9, 0xFA, 0xA4, 0x7C, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA6, 0xA1, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0x49, 0xA4, 0x7D, /* 0x10-0x13 */
+	0xA4, 0xDD, 0xA4, 0xDE, 0x00, 0x00, 0xA5, 0xB1, /* 0x14-0x17 */
+	0xA5, 0xB0, 0x00, 0x00, 0xC9, 0xDE, 0xA6, 0xA2, /* 0x18-0x1B */
+	0x00, 0x00, 0xCA, 0xD3, 0x00, 0x00, 0xA7, 0xCC, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0x71, 0xCC, 0x72, /* 0x20-0x23 */
+	0xCC, 0x73, 0x00, 0x00, 0xA9, 0xB6, 0xA9, 0xB7, /* 0x24-0x27 */
+	0xCC, 0x70, 0xA9, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xAB, 0xDA, 0xCE, 0xBC, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD1, 0x7A, 0xAE, 0x7A, 0x00, 0x00, 0xD1, 0x79, /* 0x30-0x33 */
+	0x00, 0x00, 0xB1, 0x69, 0xD5, 0x4C, 0xB1, 0x6A, /* 0x34-0x37 */
+	0xD5, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xB4, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDD, 0x62, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xBF, /* 0x40-0x43 */
+	0xE1, 0xBE, 0x00, 0x00, 0xB9, 0xFB, 0x00, 0x00, /* 0x44-0x47 */
+	0xBC, 0x75, 0xE5, 0x76, 0xBE, 0xCA, 0xE9, 0x74, /* 0x48-0x4B */
+	0xC0, 0xB1, 0x00, 0x00, 0xC5, 0x73, 0xF7, 0xD8, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xCC, 0x74, 0x00, 0x00, 0xCE, 0xBD, 0xB1, 0x6B, /* 0x54-0x57 */
+	0xD8, 0xF4, 0xB7, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xC2, 0x55, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xA7, 0xCE, 0x00, 0x00, /* 0x60-0x63 */
+	0xA7, 0xCD, 0xAB, 0xDB, 0x00, 0x00, 0xD1, 0x7B, /* 0x64-0x67 */
+	0x00, 0x00, 0xB1, 0x6D, 0xB3, 0x43, 0xB1, 0x6E, /* 0x68-0x6B */
+	0xB1, 0x6C, 0xB4, 0x5E, 0x00, 0x00, 0xE1, 0xC0, /* 0x6C-0x6F */
+	0xB9, 0xFC, 0xBC, 0x76, 0x00, 0x00, 0xC9, 0x4C, /* 0x70-0x73 */
+	0xC9, 0xDF, 0x00, 0x00, 0xCA, 0xD5, 0xA7, 0xCF, /* 0x74-0x77 */
+	0xCA, 0xD4, 0xA7, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xA9, 0xBC, 0xCC, 0x77, 0xCC, 0x76, 0xA9, 0xBB, /* 0x7C-0x7F */
+	
+	0xA9, 0xB9, 0xA9, 0xBA, 0xCC, 0x75, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xAB, 0xDD, 0xCE, 0xBE, 0xAB, 0xE0, /* 0x84-0x87 */
+	0xAB, 0xDC, 0xAB, 0xE2, 0xAB, 0xDE, 0xAB, 0xDF, /* 0x88-0x8B */
+	0xAB, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xAE, 0x7D, 0xAE, 0x7C, 0xAE, 0x7B, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0x4F, 0xB1, 0x6F, /* 0x94-0x97 */
+	0xB1, 0x72, 0xB1, 0x70, 0x00, 0x00, 0xD5, 0x4E, /* 0x98-0x9B */
+	0xB1, 0x75, 0x00, 0x00, 0xB1, 0x71, 0xD5, 0x50, /* 0x9C-0x9F */
+	0xB1, 0x74, 0xB1, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xD8, 0xF6, 0xD8, 0xF5, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xB4, 0x61, 0xB4, 0x5F, 0xB4, 0x60, 0xD8, 0xF7, /* 0xA8-0xAB */
+	0xB7, 0x4B, 0xDD, 0x64, 0xB7, 0x4C, 0xDD, 0x63, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0x77, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xBC, 0x78, 0xE1, 0xC1, 0xBC, 0x77, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xB9, 0xFD, 0x00, 0x00, 0xEC, 0xDE, /* 0xB8-0xBB */
+	0xE9, 0x75, 0xC0, 0xB2, 0xEC, 0xDD, 0xF2, 0x40, /* 0xBC-0xBF */
+	0xF4, 0x48, 0xF4, 0x49, 0x00, 0x00, 0xA4, 0xDF, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xA5, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xC9, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xA7, 0xD2, 0xA7, 0xD4, 0x00, 0x00, 0xC9, 0xE2, /* 0xCC-0xCF */
+	0xCA, 0xD8, 0xCA, 0xD7, 0xCA, 0xD6, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xC9, 0xE1, 0xC9, 0xE0, 0xA6, 0xA4, 0xA7, 0xD3, /* 0xD4-0xD7 */
+	0xA7, 0xD1, 0xA6, 0xA3, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xA9, 0xBD, 0xCC, 0x78, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA9, 0xBE, 0xCA, 0xDD, 0x00, 0x00, 0xCA, 0xDF, /* 0xE0-0xE3 */
+	0xCA, 0xDE, 0xCC, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCA, 0xDA, 0x00, 0x00, 0xA7, 0xD8, 0xA7, 0xD6, /* 0xE8-0xEB */
+	0x00, 0x00, 0xCA, 0xD9, 0xCA, 0xDB, 0xCA, 0xE1, /* 0xEC-0xEF */
+	0x00, 0x00, 0xA7, 0xD5, 0x00, 0x00, 0xCA, 0xDC, /* 0xF0-0xF3 */
+	0xCA, 0xE5, 0xA9, 0xC0, 0x00, 0x00, 0xCA, 0xE2, /* 0xF4-0xF7 */
+	0xA7, 0xD7, 0x00, 0x00, 0xCA, 0xE0, 0xCA, 0xE3, /* 0xF8-0xFB */
+	0x00, 0x00, 0xA9, 0xBF, 0x00, 0x00, 0xA9, 0xC1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_60[512] = {
+	0xCA, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xCC, 0xAF, 0xCC, 0xA2, 0xCC, 0x7E, /* 0x08-0x0B */
+	0xCC, 0xAE, 0xCC, 0xA9, 0xAB, 0xE7, 0xA9, 0xC2, /* 0x0C-0x0F */
+	0xCC, 0xAA, 0xCC, 0xAD, 0xAB, 0xE3, 0xCC, 0xAC, /* 0x10-0x13 */
+	0xA9, 0xC3, 0xA9, 0xC8, 0xA9, 0xC6, 0xCC, 0xA3, /* 0x14-0x17 */
+	0x00, 0x00, 0xCC, 0x7C, 0xCC, 0xA5, 0xA9, 0xCD, /* 0x18-0x1B */
+	0xCC, 0xB0, 0xAB, 0xE4, 0xCC, 0xA6, 0x00, 0x00, /* 0x1C-0x1F */
+	0xAB, 0xE5, 0xA9, 0xC9, 0xCC, 0xA8, 0x00, 0x00, /* 0x20-0x23 */
+	0xCE, 0xCD, 0xAB, 0xE6, 0xCC, 0x7B, 0xA9, 0xCA, /* 0x24-0x27 */
+	0xAB, 0xE8, 0xA9, 0xCB, 0xA9, 0xC7, 0xA9, 0xCC, /* 0x28-0x2B */
+	0xCC, 0xA7, 0xCC, 0x7A, 0xCC, 0xAB, 0xA9, 0xC4, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0x7D, 0xCC, 0xA4, /* 0x30-0x33 */
+	0xCC, 0xA1, 0xA9, 0xC5, 0x00, 0x00, 0xCE, 0xBF, /* 0x34-0x37 */
+	0x00, 0x00, 0xCE, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xCE, 0xCA, 0xD1, 0xA1, 0xCE, 0xCB, 0xAB, 0xEE, /* 0x40-0x43 */
+	0xCE, 0xCE, 0xCE, 0xC4, 0xAB, 0xED, 0xCE, 0xC6, /* 0x44-0x47 */
+	0x00, 0x00, 0xCE, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xCE, 0xC9, 0xAB, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xAE, 0xA3, 0x00, 0x00, 0xF9, 0xDA, 0xCE, 0xC5, /* 0x50-0x53 */
+	0xCE, 0xC1, 0xAE, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xCE, 0xCF, 0xAE, 0x7E, 0xD1, 0x7D, 0xCE, 0xC8, /* 0x58-0x5B */
+	0x00, 0x00, 0xD1, 0x7C, 0xCE, 0xC3, 0xCE, 0xCC, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0xEC, 0xAE, 0xA1, /* 0x60-0x63 */
+	0xAB, 0xF2, 0xAE, 0xA2, 0xCE, 0xD0, 0xD1, 0x7E, /* 0x64-0x67 */
+	0xAB, 0xEB, 0xAE, 0xA6, 0xAB, 0xF1, 0xAB, 0xF0, /* 0x68-0x6B */
+	0xAB, 0xEF, 0xAE, 0xA5, 0xCE, 0xD1, 0xAE, 0xA7, /* 0x6C-0x6F */
+	0xAB, 0xEA, 0x00, 0x00, 0xCE, 0xC2, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB1, 0x76, /* 0x7C-0x7F */
+	
+	0xD1, 0xA4, 0xD1, 0xA6, 0x00, 0x00, 0xD1, 0xA8, /* 0x80-0x83 */
+	0xAE, 0xA8, 0xAE, 0xAE, 0xD5, 0x53, 0xD1, 0xAC, /* 0x84-0x87 */
+	0xD1, 0xA3, 0xB1, 0x78, 0xD5, 0x51, 0x00, 0x00, /* 0x88-0x8B */
+	0xAE, 0xAD, 0xAE, 0xAB, 0xD1, 0xAE, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD5, 0x52, 0x00, 0x00, 0xD1, 0xA5, 0x00, 0x00, /* 0x90-0x93 */
+	0xAE, 0xAC, 0xD1, 0xA9, 0xAE, 0xAF, 0xD1, 0xAB, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xAE, 0xAA, 0xD1, 0xAA, /* 0x98-0x9B */
+	0xD1, 0xAD, 0xD1, 0xA7, 0x00, 0x00, 0xAE, 0xA9, /* 0x9C-0x9F */
+	0xB1, 0x79, 0x00, 0x00, 0xD1, 0xA2, 0xB1, 0x77, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xB1, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xD5, 0x55, 0xD5, 0x5E, 0xB4, 0x64, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xB1, 0x7C, 0xB1, 0xA3, 0xB4, 0x65, 0xD5, 0x60, /* 0xB4-0xB7 */
+	0xB1, 0xAA, 0xD8, 0xF9, 0xD5, 0x56, 0xB1, 0xA2, /* 0xB8-0xBB */
+	0xB1, 0xA5, 0xB1, 0x7E, 0xD5, 0x54, 0xD5, 0x62, /* 0xBC-0xBF */
+	0xD5, 0x65, 0xD9, 0x49, 0x00, 0x00, 0xD5, 0x63, /* 0xC0-0xC3 */
+	0xD8, 0xFD, 0xB1, 0xA1, 0xB1, 0xA8, 0xB1, 0xAC, /* 0xC4-0xC7 */
+	0xD5, 0x5D, 0xD8, 0xF8, 0xD5, 0x61, 0xB1, 0x7B, /* 0xC8-0xCB */
+	0xD8, 0xFA, 0xD5, 0x64, 0xD8, 0xFC, 0xD5, 0x59, /* 0xCC-0xCF */
+	0x00, 0x00, 0xB4, 0x62, 0x00, 0x00, 0xD5, 0x57, /* 0xD0-0xD3 */
+	0xD5, 0x58, 0xB1, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xB1, 0xA6, 0xD5, 0x5B, 0xB1, 0xAB, 0xD5, 0x5F, /* 0xD8-0xDB */
+	0xB1, 0xA4, 0xD5, 0x5C, 0x00, 0x00, 0xB1, 0xA9, /* 0xDC-0xDF */
+	0xB4, 0x66, 0xB4, 0x63, 0xD8, 0xFB, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xD5, 0x5A, 0x00, 0x00, 0xB1, 0x7D, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xB4, 0x6B, 0xB4, 0x6F, 0xD9, 0x40, 0xB7, 0x51, /* 0xF0-0xF3 */
+	0xB4, 0x6D, 0xD9, 0x44, 0xB4, 0x71, 0xDD, 0x65, /* 0xF4-0xF7 */
+	0xD9, 0x46, 0xB7, 0x53, 0xB4, 0x69, 0xB4, 0x6C, /* 0xF8-0xFB */
+	0xD9, 0x47, 0x00, 0x00, 0xD9, 0x48, 0xD9, 0x4E, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_61[512] = {
+	0xB4, 0x73, 0xB7, 0x54, 0x00, 0x00, 0xD9, 0x4A, /* 0x00-0x03 */
+	0xD9, 0x4F, 0xD9, 0x43, 0xB7, 0x5E, 0x00, 0x00, /* 0x04-0x07 */
+	0xB7, 0x55, 0xB4, 0x72, 0xD9, 0x41, 0xD9, 0x50, /* 0x08-0x0B */
+	0x00, 0x00, 0xB7, 0x5D, 0xB4, 0x70, 0xB7, 0x4E, /* 0x0C-0x0F */
+	0xD9, 0x4D, 0x00, 0x00, 0xB4, 0x74, 0xD9, 0x45, /* 0x10-0x13 */
+	0xD8, 0xFE, 0xB4, 0x6A, 0xD9, 0x42, 0x00, 0x00, /* 0x14-0x17 */
+	0xD9, 0x4B, 0x00, 0x00, 0xB7, 0x4D, 0xB7, 0x52, /* 0x18-0x1B */
+	0xB4, 0x67, 0xD9, 0x4C, 0x00, 0x00, 0xB7, 0x50, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0x68, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB7, 0x5C, /* 0x24-0x27 */
+	0xE1, 0xC3, 0xDD, 0x70, 0x00, 0x00, 0xDD, 0x68, /* 0x28-0x2B */
+	0xE1, 0xC2, 0x00, 0x00, 0xDD, 0x6C, 0xDD, 0x6E, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xDD, 0x6B, 0x00, 0x00, /* 0x30-0x33 */
+	0xB7, 0x5B, 0x00, 0x00, 0xDD, 0x6A, 0xB7, 0x5F, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xD2, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0x5A, 0xBA, 0x40, /* 0x3C-0x3F */
+	0xDD, 0x71, 0xE1, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xB7, 0x58, 0xDD, 0x69, 0xDD, 0x6D, 0xB9, 0xFE, /* 0x44-0x47 */
+	0xB7, 0x4F, 0xDD, 0x66, 0xDD, 0x67, 0xBA, 0x41, /* 0x48-0x4B */
+	0xB7, 0x57, 0xB7, 0x59, 0xB7, 0x56, 0xDD, 0x6F, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xC8, 0xE1, 0xC9, /* 0x50-0x53 */
+	0xE1, 0xCE, 0xBC, 0x7D, 0xE1, 0xD5, 0x00, 0x00, /* 0x54-0x57 */
+	0xBA, 0x47, 0x00, 0x00, 0xBA, 0x46, 0xE1, 0xD0, /* 0x58-0x5B */
+	0x00, 0x00, 0xBC, 0x7C, 0xE1, 0xC5, 0xBA, 0x45, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE1, 0xD4, 0xBA, 0x43, 0xBA, 0x44, /* 0x60-0x63 */
+	0x00, 0x00, 0xE1, 0xD1, 0xE5, 0xAA, 0xBC, 0x7A, /* 0x64-0x67 */
+	0xB4, 0x6E, 0x00, 0x00, 0xE1, 0xD3, 0xBC, 0xA3, /* 0x68-0x6B */
+	0xE1, 0xCB, 0x00, 0x00, 0xBC, 0x7B, 0x00, 0x00, /* 0x6C-0x6F */
+	0xBC, 0xA2, 0xE1, 0xC6, 0xE1, 0xCA, 0xE1, 0xC7, /* 0x70-0x73 */
+	0xE1, 0xCD, 0xBA, 0x48, 0xBC, 0x79, 0xBA, 0x42, /* 0x74-0x77 */
+	0x00, 0x00, 0xE5, 0x7A, 0xE1, 0xCF, 0x00, 0x00, /* 0x78-0x7B */
+	0xBC, 0xA1, 0x00, 0x00, 0xBC, 0xA4, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE1, 0xCC, 0x00, 0x00, 0xBC, 0x7E, 0xE5, 0x79, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xE5, 0x7E, 0xBE, 0xCE, 0xE5, 0x78, /* 0x88-0x8B */
+	0xE9, 0xA3, 0xE5, 0xA9, 0xBC, 0xA8, 0x00, 0x00, /* 0x8C-0x8F */
+	0xBC, 0xA6, 0xBE, 0xCC, 0xE5, 0xA6, 0xE5, 0xA2, /* 0x90-0x93 */
+	0xBC, 0xAC, 0x00, 0x00, 0xE9, 0x78, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xBC, 0xAA, 0xE5, 0xA1, /* 0x98-0x9B */
+	0x00, 0x00, 0xE9, 0x76, 0x00, 0x00, 0xE5, 0xA5, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE5, 0xA8, 0xE5, 0x7D, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xBC, 0xAB, 0x00, 0x00, 0x00, 0x00, 0xBC, 0xA5, /* 0xA4-0xA7 */
+	0xE9, 0x77, 0xBE, 0xCD, 0xE5, 0xA7, 0xBC, 0xA7, /* 0xA8-0xAB */
+	0xBC, 0xA9, 0xE5, 0xA4, 0xBC, 0xAD, 0xE5, 0xA3, /* 0xAC-0xAF */
+	0xE5, 0x7C, 0xE5, 0x7B, 0xBE, 0xCB, 0xE5, 0xAB, /* 0xB0-0xB3 */
+	0xE9, 0x7A, 0xEC, 0xE0, 0xBE, 0xD0, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE9, 0xA2, 0x00, 0x00, 0xE9, 0x7E, 0x00, 0x00, /* 0xB8-0xBB */
+	0xEC, 0xE1, 0x00, 0x00, 0xBE, 0xD1, 0xE9, 0xA1, /* 0xBC-0xBF */
+	0x00, 0x00, 0xE9, 0x7C, 0xC0, 0xB4, 0xEC, 0xDF, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE9, 0x79, 0xE9, 0x7B, 0xC0, 0xB5, /* 0xC4-0xC7 */
+	0xBE, 0xD3, 0xC0, 0xB3, 0xBE, 0xD2, 0xC0, 0xB7, /* 0xC8-0xCB */
+	0xE9, 0x7D, 0xBE, 0xCF, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0xCF, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xEF, 0xC7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xE7, 0xEF, 0xC8, /* 0xDC-0xDF */
+	0xEC, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xC2, 0x56, /* 0xE0-0xE3 */
+	0xEC, 0xE5, 0xEC, 0xE4, 0xC0, 0xB6, 0xEC, 0xE2, /* 0xE4-0xE7 */
+	0xEC, 0xE6, 0xEF, 0xD0, 0xEF, 0xCC, 0xEF, 0xCE, /* 0xE8-0xEB */
+	0x00, 0x00, 0xEF, 0xC9, 0xEF, 0xCA, 0x00, 0x00, /* 0xEC-0xEF */
+	0xEF, 0xCD, 0xEF, 0xCB, 0xC3, 0x67, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xC3, 0x6A, 0xC3, 0x69, 0xC3, 0x68, /* 0xF4-0xF7 */
+	0xC4, 0x61, 0xF4, 0x4A, 0xC4, 0x62, 0xF2, 0x41, /* 0xF8-0xFB */
+	0xC4, 0xDF, 0xF5, 0xCC, 0xC4, 0xE0, 0xC5, 0x74, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_62[512] = {
+	0xC5, 0xCA, 0xF7, 0xD9, 0x00, 0x00, 0xF7, 0xDA, /* 0x00-0x03 */
+	0xF7, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xBA, /* 0x04-0x07 */
+	0xA4, 0xE0, 0xC9, 0x7C, 0xA5, 0xB3, 0x00, 0x00, /* 0x08-0x0B */
+	0xA6, 0xA6, 0xA6, 0xA7, 0xA6, 0xA5, 0x00, 0x00, /* 0x0C-0x0F */
+	0xA6, 0xA8, 0xA7, 0xDA, 0xA7, 0xD9, 0x00, 0x00, /* 0x10-0x13 */
+	0xCC, 0xB1, 0xA9, 0xCF, 0xA9, 0xCE, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xD1, 0xAF, 0xB1, 0xAD, 0xB1, 0xAE, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB4, 0x75, /* 0x1C-0x1F */
+	0xDD, 0x72, 0xB7, 0x60, 0xB7, 0x61, 0xDD, 0x74, /* 0x20-0x23 */
+	0xDD, 0x76, 0xDD, 0x75, 0x00, 0x00, 0xE1, 0xD7, /* 0x24-0x27 */
+	0x00, 0x00, 0xE1, 0xD6, 0xBA, 0x49, 0xE1, 0xD8, /* 0x28-0x2B */
+	0x00, 0x00, 0xE5, 0xAC, 0xBC, 0xAE, 0x00, 0x00, /* 0x2C-0x2F */
+	0xBE, 0xD4, 0x00, 0x00, 0xC0, 0xB8, 0xC2, 0x57, /* 0x30-0x33 */
+	0xC0, 0xB9, 0x00, 0x00, 0xA4, 0xE1, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0xE6, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xCC, 0xB2, 0xA9, 0xD1, 0xA9, 0xD0, /* 0x3C-0x3F */
+	0xA9, 0xD2, 0xAB, 0xF3, 0xCE, 0xD2, 0xCE, 0xD3, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xB0, 0xAE, 0xB0, /* 0x44-0x47 */
+	0xB1, 0xAF, 0xB4, 0x76, 0xD9, 0x51, 0xA4, 0xE2, /* 0x48-0x4B */
+	0x00, 0x00, 0xA4, 0x7E, 0xA4, 0xE3, 0x00, 0x00, /* 0x4C-0x4F */
+	0xC9, 0x7D, 0xA5, 0xB7, 0xA5, 0xB6, 0xA5, 0xB4, /* 0x50-0x53 */
+	0xA5, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xA6, 0xAB, 0xC9, 0xE9, 0xC9, 0xEB, 0xA6, 0xAA, /* 0x58-0x5B */
+	0xC9, 0xE3, 0x00, 0x00, 0xC9, 0xE4, 0x00, 0x00, /* 0x5C-0x5F */
+	0xC9, 0xEA, 0xC9, 0xE6, 0xC9, 0xE8, 0xA6, 0xA9, /* 0x60-0x63 */
+	0xC9, 0xE5, 0xC9, 0xEC, 0xC9, 0xE7, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xA7, 0xE1, 0xA7, 0xEA, 0xA7, 0xE8, /* 0x6C-0x6F */
+	0xCA, 0xF0, 0xCA, 0xED, 0xCA, 0xF5, 0xA7, 0xE6, /* 0x70-0x73 */
+	0xCA, 0xF6, 0x00, 0x00, 0xA7, 0xDF, 0xCA, 0xF3, /* 0x74-0x77 */
+	0x00, 0x00, 0xA7, 0xE5, 0xCA, 0xEF, 0xCA, 0xEE, /* 0x78-0x7B */
+	0xA7, 0xE3, 0xCA, 0xF4, 0xA7, 0xE4, 0xA9, 0xD3, /* 0x7C-0x7F */
+	
+	0xA7, 0xDE, 0xCA, 0xF1, 0x00, 0x00, 0xCA, 0xE7, /* 0x80-0x83 */
+	0xA7, 0xDB, 0x00, 0x00, 0xA7, 0xEE, 0xCA, 0xEC, /* 0x84-0x87 */
+	0xCA, 0xF2, 0xA7, 0xE0, 0xA7, 0xE2, 0x00, 0x00, /* 0x88-0x8B */
+	0xCA, 0xE8, 0x00, 0x00, 0xCA, 0xE9, 0xCA, 0xEA, /* 0x8C-0x8F */
+	0x00, 0x00, 0xA7, 0xED, 0xA7, 0xE7, 0xA7, 0xEC, /* 0x90-0x93 */
+	0xCA, 0xEB, 0xA7, 0xEB, 0xA7, 0xDD, 0xA7, 0xDC, /* 0x94-0x97 */
+	0xA7, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xA9, 0xE1, 0xCC, 0xBE, 0xCC, 0xB7, 0xA9, 0xDC, /* 0xA8-0xAB */
+	0xA9, 0xEF, 0xCC, 0xB3, 0xCC, 0xBA, 0xCC, 0xBC, /* 0xAC-0xAF */
+	0xCC, 0xBF, 0xA9, 0xEA, 0x00, 0x00, 0xCC, 0xBB, /* 0xB0-0xB3 */
+	0xCC, 0xB4, 0xA9, 0xE8, 0xCC, 0xB8, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xCC, 0xC0, 0xA9, 0xD9, 0x00, 0x00, 0xCC, 0xBD, /* 0xB8-0xBB */
+	0xA9, 0xE3, 0xA9, 0xE2, 0xCC, 0xB6, 0xA9, 0xD7, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0xD8, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xA9, 0xD6, 0x00, 0x00, 0xA9, 0xEE, 0xA9, 0xE6, /* 0xC4-0xC7 */
+	0xA9, 0xE0, 0xA9, 0xD4, 0xCC, 0xB9, 0xA9, 0xDF, /* 0xC8-0xCB */
+	0xA9, 0xD5, 0xA9, 0xE7, 0xA9, 0xF0, 0xCE, 0xD4, /* 0xCC-0xCF */
+	0xA9, 0xE4, 0xCC, 0xB5, 0xA9, 0xDA, 0xA9, 0xDD, /* 0xD0-0xD3 */
+	0xA9, 0xDE, 0x00, 0x00, 0xA9, 0xEC, 0xA9, 0xED, /* 0xD4-0xD7 */
+	0xA9, 0xEB, 0xA9, 0xE5, 0xA9, 0xE9, 0xA9, 0xDB, /* 0xD8-0xDB */
+	0xAB, 0xF4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xDA, /* 0xE8-0xEB */
+	0xAC, 0x41, 0xAB, 0xF8, 0xAB, 0xFA, 0xAC, 0x40, /* 0xEC-0xEF */
+	0xCE, 0xE6, 0xAB, 0xFD, 0xD1, 0xB1, 0xAE, 0xB1, /* 0xF0-0xF3 */
+	0xAC, 0x43, 0xCE, 0xD7, 0xCE, 0xDF, 0xAB, 0xFE, /* 0xF4-0xF7 */
+	0xCE, 0xDE, 0xCE, 0xDB, 0xCE, 0xE3, 0xCE, 0xE5, /* 0xF8-0xFB */
+	0xAB, 0xF7, 0xAB, 0xFB, 0xAC, 0x42, 0xAE, 0xB3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_63[512] = {
+	0xCE, 0xE0, 0xAB, 0xF9, 0xAC, 0x45, 0xCE, 0xD9, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xFC, /* 0x04-0x07 */
+	0xAE, 0xB2, 0xAB, 0xF6, 0x00, 0x00, 0xCE, 0xD6, /* 0x08-0x0B */
+	0xCE, 0xDD, 0xCE, 0xD5, 0xCE, 0xD8, 0xCE, 0xDC, /* 0x0C-0x0F */
+	0xD1, 0xB2, 0xAC, 0x44, 0x00, 0x00, 0xCE, 0xE1, /* 0x10-0x13 */
+	0xCE, 0xE2, 0xCE, 0xE4, 0xAB, 0xF5, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xAE, 0xC1, 0xD1, 0xBE, 0xAE, 0xBF, 0xAE, 0xC0, /* 0x28-0x2B */
+	0xD1, 0xB4, 0xD1, 0xC4, 0x00, 0x00, 0xAE, 0xB6, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xD5, 0x66, 0xD1, 0xC6, /* 0x30-0x33 */
+	0xD1, 0xC0, 0x00, 0x00, 0xD1, 0xB7, 0x00, 0x00, /* 0x34-0x37 */
+	0xD1, 0xC9, 0xD1, 0xBA, 0xAE, 0xBC, 0xD5, 0x7D, /* 0x38-0x3B */
+	0xD1, 0xBD, 0xAE, 0xBE, 0xAE, 0xB5, 0x00, 0x00, /* 0x3C-0x3F */
+	0xD1, 0xCB, 0xD1, 0xBF, 0xAE, 0xB8, 0xD1, 0xB8, /* 0x40-0x43 */
+	0xD1, 0xB5, 0xD1, 0xB6, 0xAE, 0xB9, 0xD1, 0xC5, /* 0x44-0x47 */
+	0xD1, 0xCC, 0xAE, 0xBB, 0xD1, 0xBC, 0xD1, 0xBB, /* 0x48-0x4B */
+	0xAE, 0xC3, 0xAE, 0xC2, 0xAE, 0xB4, 0xAE, 0xBA, /* 0x4C-0x4F */
+	0xAE, 0xBD, 0xD1, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xD1, 0xC2, 0xAE, 0xB7, 0xD1, 0xB3, 0xD1, 0xCA, /* 0x54-0x57 */
+	0xD1, 0xC1, 0xD1, 0xC3, 0xD1, 0xC7, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xD5, 0x67, 0x00, 0x00, 0xB1, 0xB7, /* 0x64-0x67 */
+	0xB1, 0xCB, 0xB1, 0xCA, 0x00, 0x00, 0xB1, 0xBF, /* 0x68-0x6B */
+	0x00, 0x00, 0xD5, 0x79, 0xD5, 0x75, 0xD5, 0x72, /* 0x6C-0x6F */
+	0xD5, 0xA6, 0xB1, 0xBA, 0xB1, 0xB2, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xD5, 0x77, 0xB4, 0xA8, 0xB1, 0xB6, /* 0x74-0x77 */
+	0xD5, 0xA1, 0x00, 0x00, 0xB1, 0xCC, 0xB1, 0xC9, /* 0x78-0x7B */
+	0xD5, 0x7B, 0xD5, 0x6A, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xB1, 0xC8, 0xD5, 0xA3, 0xD5, 0x69, 0xB1, 0xBD, /* 0x80-0x83 */
+	0xB1, 0xC1, 0xD5, 0xA2, 0x00, 0x00, 0xD5, 0x73, /* 0x84-0x87 */
+	0xB1, 0xC2, 0xB1, 0xBC, 0xD5, 0x68, 0x00, 0x00, /* 0x88-0x8B */
+	0xB4, 0x78, 0xD5, 0xA5, 0xD5, 0x71, 0xB1, 0xC7, /* 0x8C-0x8F */
+	0xD5, 0x74, 0xD5, 0xA4, 0xB1, 0xC6, 0x00, 0x00, /* 0x90-0x93 */
+	0xD9, 0x52, 0x00, 0x00, 0xB1, 0xB3, 0xD5, 0x6F, /* 0x94-0x97 */
+	0xB1, 0xB8, 0xB1, 0xC3, 0x00, 0x00, 0xB1, 0xBE, /* 0x98-0x9B */
+	0xD5, 0x78, 0xD5, 0x6E, 0xD5, 0x6C, 0xD5, 0x7E, /* 0x9C-0x9F */
+	0xB1, 0xB0, 0xB1, 0xC4, 0xB1, 0xB4, 0xB4, 0x77, /* 0xA0-0xA3 */
+	0xD5, 0x7C, 0xB1, 0xB5, 0x00, 0x00, 0xB1, 0xB1, /* 0xA4-0xA7 */
+	0xB1, 0xC0, 0xB1, 0xBB, 0xB1, 0xB9, 0xD5, 0x70, /* 0xA8-0xAB */
+	0xB1, 0xC5, 0xD5, 0x6D, 0xD5, 0x7A, 0xD5, 0x76, /* 0xAC-0xAF */
+	0xD9, 0x54, 0xD9, 0x53, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD5, 0x6B, 0xD9, 0x64, 0x00, 0x00, /* 0xBC-0xBF */
+	0xB4, 0x7A, 0x00, 0x00, 0xD9, 0x6A, 0xD9, 0x59, /* 0xC0-0xC3 */
+	0xD9, 0x67, 0xDD, 0x77, 0xB4, 0x7D, 0xD9, 0x6B, /* 0xC4-0xC7 */
+	0xD9, 0x6E, 0xB4, 0x7C, 0xD9, 0x5C, 0xD9, 0x6D, /* 0xC8-0xCB */
+	0xD9, 0x6C, 0xB4, 0x7E, 0xD9, 0x55, 0xB4, 0x79, /* 0xCC-0xCF */
+	0xB4, 0xA3, 0x00, 0x00, 0xB4, 0xA1, 0xD9, 0x69, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xD9, 0x5F, 0xB4, 0xA5, 0xD9, 0x70, /* 0xD4-0xD7 */
+	0xD9, 0x68, 0xD9, 0x71, 0xB4, 0xAD, 0xB4, 0xAB, /* 0xD8-0xDB */
+	0xD9, 0x66, 0xD9, 0x65, 0x00, 0x00, 0xD9, 0x63, /* 0xDC-0xDF */
+	0xD9, 0x5D, 0xB4, 0xA4, 0x00, 0x00, 0xB4, 0xA2, /* 0xE0-0xE3 */
+	0xD1, 0xB9, 0xD9, 0x56, 0x00, 0x00, 0xDD, 0xB7, /* 0xE4-0xE7 */
+	0xD9, 0x57, 0xB4, 0x7B, 0xB4, 0xAA, 0xDD, 0x79, /* 0xE8-0xEB */
+	0x00, 0x00, 0xB4, 0xA6, 0xB4, 0xA7, 0xD9, 0x58, /* 0xEC-0xEF */
+	0xD9, 0x6F, 0xDD, 0x78, 0xD9, 0x60, 0xD9, 0x5B, /* 0xF0-0xF3 */
+	0xB4, 0xA9, 0xD9, 0x61, 0xD9, 0x5E, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xB4, 0xAE, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_64[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0x70, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xDD, 0x7C, 0xDD, 0xB1, 0xDD, 0xB6, /* 0x08-0x0B */
+	0xDD, 0xAA, 0xB7, 0x6C, 0xDD, 0xBB, 0xB7, 0x69, /* 0x0C-0x0F */
+	0xDD, 0x7A, 0x00, 0x00, 0xDD, 0x7B, 0xB7, 0x62, /* 0x10-0x13 */
+	0xB7, 0x6B, 0xDD, 0xA4, 0xB7, 0x6E, 0xB7, 0x6F, /* 0x14-0x17 */
+	0xDD, 0xA5, 0x00, 0x00, 0xDD, 0xB2, 0xDD, 0xB8, /* 0x18-0x1B */
+	0xB7, 0x6A, 0x00, 0x00, 0xB7, 0x64, 0xDD, 0xA3, /* 0x1C-0x1F */
+	0xDD, 0x7D, 0xDD, 0xBA, 0xDD, 0xA8, 0xDD, 0xA9, /* 0x20-0x23 */
+	0xDD, 0x7E, 0xDD, 0xB4, 0xDD, 0xAB, 0xDD, 0xB5, /* 0x24-0x27 */
+	0xDD, 0xAD, 0x00, 0x00, 0xB7, 0x65, 0xE1, 0xD9, /* 0x28-0x2B */
+	0xB7, 0x68, 0xB7, 0x66, 0xDD, 0xB9, 0xDD, 0xB0, /* 0x2C-0x2F */
+	0xDD, 0xAC, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xA1, /* 0x30-0x33 */
+	0xBA, 0x53, 0xDD, 0xAF, 0xB7, 0x6D, 0xDD, 0xA7, /* 0x34-0x37 */
+	0x00, 0x00, 0xDD, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xB7, 0x67, 0xB7, 0x63, 0xE1, 0xEE, /* 0x3C-0x3F */
+	0xDD, 0xB3, 0xDD, 0xAE, 0x00, 0x00, 0xDD, 0xA2, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xE9, /* 0x48-0x4B */
+	0x00, 0x00, 0xE1, 0xDA, 0xE1, 0xE5, 0x00, 0x00, /* 0x4C-0x4F */
+	0xE1, 0xEC, 0xBA, 0x51, 0xB4, 0xAC, 0xE1, 0xEA, /* 0x50-0x53 */
+	0xBA, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xBA, 0x4B, 0xE1, 0xF1, 0x00, 0x00, 0xE1, 0xDB, /* 0x58-0x5B */
+	0xE1, 0xE8, 0xE1, 0xDC, 0xE1, 0xE7, 0xBA, 0x4F, /* 0x5C-0x5F */
+	0xE1, 0xEB, 0xD9, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xE1, 0xF2, 0xE1, 0xE3, 0xBA, 0x52, /* 0x64-0x67 */
+	0xE5, 0xBA, 0xBC, 0xAF, 0x00, 0x00, 0xE1, 0xF0, /* 0x68-0x6B */
+	0xE1, 0xEF, 0xBA, 0x54, 0xE5, 0xAD, 0xBC, 0xB0, /* 0x6C-0x6F */
+	0xE5, 0xAE, 0x00, 0x00, 0xE1, 0xDF, 0xE1, 0xE0, /* 0x70-0x73 */
+	0xE1, 0xDD, 0xE1, 0xE2, 0xE1, 0xDE, 0xE1, 0xF3, /* 0x74-0x77 */
+	0xBA, 0x4E, 0xBC, 0xB1, 0xBA, 0x50, 0xBA, 0x55, /* 0x78-0x7B */
+	0x00, 0x00, 0xE1, 0xE1, 0x00, 0x00, 0xE1, 0xED, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE1, 0xE6, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xE5, 0xB1, 0x00, 0x00, 0xBA, 0x4A, /* 0x84-0x87 */
+	0xBC, 0xB4, 0xE9, 0xAA, 0xE5, 0xB6, 0xE5, 0xB5, /* 0x88-0x8B */
+	0xE5, 0xB7, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xB4, /* 0x8C-0x8F */
+	0xBC, 0xB5, 0x00, 0x00, 0xBC, 0xBB, 0xBC, 0xB8, /* 0x90-0x93 */
+	0x00, 0x00, 0xBC, 0xB9, 0xE5, 0xAF, 0xE5, 0xB2, /* 0x94-0x97 */
+	0xE5, 0xBC, 0xBC, 0xC1, 0xBC, 0xBF, 0x00, 0x00, /* 0x98-0x9B */
+	0xE5, 0xB3, 0xD9, 0x5A, 0xBC, 0xB2, 0xE5, 0xB9, /* 0x9C-0x9F */
+	0xE5, 0xB0, 0x00, 0x00, 0xBC, 0xC2, 0xE5, 0xB8, /* 0xA0-0xA3 */
+	0xBA, 0x4D, 0xBC, 0xB7, 0xE1, 0xE4, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xBC, 0xBA, 0x00, 0x00, 0xBC, 0xBE, /* 0xA8-0xAB */
+	0xBC, 0xC0, 0xBC, 0xBD, 0xBC, 0xBC, 0x00, 0x00, /* 0xAC-0xAF */
+	0xBC, 0xB6, 0xE5, 0xBB, 0xBC, 0xB3, 0xBC, 0xC3, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBE, 0xD8, /* 0xB8-0xBB */
+	0xBE, 0xD9, 0xE9, 0xA9, 0xBE, 0xE2, 0xBE, 0xDF, /* 0xBC-0xBF */
+	0x00, 0x00, 0xBE, 0xD6, 0xBE, 0xDD, 0xE9, 0xAB, /* 0xC0-0xC3 */
+	0xBE, 0xDB, 0xBE, 0xD5, 0x00, 0x00, 0xBE, 0xDC, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE9, 0xA8, 0xC0, 0xBB, 0xBE, 0xD7, /* 0xC8-0xCB */
+	0x00, 0x00, 0xBE, 0xDE, 0xC0, 0xBA, 0xE9, 0xA7, /* 0xCC-0xCF */
+	0xE9, 0xA6, 0x00, 0x00, 0xBE, 0xE0, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xBE, 0xE1, 0x00, 0x00, 0xE9, 0xA5, 0xE9, 0xA4, /* 0xD4-0xD7 */
+	0xC0, 0xBC, 0xE9, 0xAE, 0xBE, 0xDA, 0xE9, 0xAC, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xC0, 0xBD, 0x00, 0x00, 0xC0, 0xC2, 0xEC, 0xEA, /* 0xE0-0xE3 */
+	0xEC, 0xEC, 0x00, 0x00, 0xC0, 0xBF, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xEC, 0xED, 0xEC, 0xE9, 0x00, 0x00, 0xEC, 0xEB, /* 0xE8-0xEB */
+	0xC0, 0xC0, 0xC0, 0xC3, 0x00, 0x00, 0xEC, 0xE8, /* 0xEC-0xEF */
+	0xC0, 0xBE, 0xC0, 0xC1, 0xC2, 0x59, 0xE9, 0xAD, /* 0xF0-0xF3 */
+	0xC2, 0x58, 0x00, 0x00, 0x00, 0x00, 0xC2, 0x5E, /* 0xF4-0xF7 */
+	0xEF, 0xD4, 0x00, 0x00, 0xC2, 0x5C, 0xC2, 0x5D, /* 0xF8-0xFB */
+	0xEF, 0xD7, 0xEF, 0xD3, 0xC2, 0x5A, 0xEF, 0xD1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_65[512] = {
+	0xC3, 0x6B, 0xEF, 0xD5, 0x00, 0x00, 0xEF, 0xD6, /* 0x00-0x03 */
+	0xEF, 0xD2, 0x00, 0x00, 0xC2, 0x5B, 0xF2, 0x42, /* 0x04-0x07 */
+	0x00, 0x00, 0xF2, 0x45, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xF2, 0x46, 0xF2, 0x44, 0xF2, 0x47, 0xC3, 0x6C, /* 0x0C-0x0F */
+	0xF2, 0x43, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x4E, /* 0x10-0x13 */
+	0xC4, 0x64, 0xF4, 0x4D, 0xF4, 0x4C, 0xF4, 0x4B, /* 0x14-0x17 */
+	0xC4, 0x63, 0xC4, 0x65, 0x00, 0x00, 0xF5, 0xCD, /* 0x18-0x1B */
+	0xC4, 0xE2, 0xC4, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xF6, 0xE1, 0xF6, 0xE0, 0xF6, 0xE3, 0xC5, 0xCB, /* 0x20-0x23 */
+	0xC5, 0x75, 0xF7, 0xDD, 0xF6, 0xE2, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xF7, 0xDC, 0xC5, 0xCD, 0xC5, 0xCC, /* 0x28-0x2B */
+	0xC5, 0xF3, 0xF8, 0xA9, 0xF8, 0xEF, 0xA4, 0xE4, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0x72, 0xE9, 0xAF, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xA6, 0xAC, 0xCA, 0xF7, /* 0x34-0x37 */
+	0xA7, 0xF1, 0xA7, 0xEF, 0x00, 0x00, 0xA7, 0xF0, /* 0x38-0x3B */
+	0x00, 0x00, 0xCC, 0xC1, 0xA9, 0xF1, 0xAC, 0x46, /* 0x3C-0x3F */
+	0x00, 0x00, 0xCE, 0xE7, 0x00, 0x00, 0xCE, 0xE8, /* 0x40-0x43 */
+	0x00, 0x00, 0xAC, 0x47, 0xD1, 0xCE, 0x00, 0x00, /* 0x44-0x47 */
+	0xAE, 0xC4, 0xAE, 0xC5, 0xD1, 0xCD, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB1, 0xD3, /* 0x4C-0x4F */
+	0x00, 0x00, 0xB1, 0xCF, 0x00, 0x00, 0xD5, 0xA7, /* 0x50-0x53 */
+	0xB1, 0xD6, 0xB1, 0xD5, 0xB1, 0xCE, 0xB1, 0xD1, /* 0x54-0x57 */
+	0xB1, 0xD4, 0xB1, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xD9, 0x76, 0xB1, 0xCD, 0xB4, 0xAF, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xB4, 0xB1, 0xB4, 0xB2, /* 0x60-0x63 */
+	0xD9, 0x75, 0xD9, 0x78, 0xB4, 0xB0, 0xD9, 0x73, /* 0x64-0x67 */
+	0xD9, 0x77, 0x00, 0x00, 0xD9, 0x74, 0x00, 0x00, /* 0x68-0x6B */
+	0xB7, 0x71, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xBC, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xBA, 0x56, 0xE1, 0xF4, /* 0x70-0x73 */
+	0xBE, 0xE3, 0xBC, 0xC4, 0xE5, 0xBD, 0xBC, 0xC5, /* 0x74-0x77 */
+	0xBC, 0xC6, 0xE5, 0xBF, 0xE5, 0xBE, 0xE5, 0xC0, /* 0x78-0x7B */
+	0xE9, 0xB1, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xB0, /* 0x7C-0x7F */
+	
+	0xEC, 0xEF, 0xEC, 0xEE, 0xC0, 0xC4, 0xC0, 0xC5, /* 0x80-0x83 */
+	0xF2, 0x48, 0x00, 0x00, 0x00, 0x00, 0xA4, 0xE5, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xD9, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xB4, 0xB4, 0xB4, 0xB3, 0xDD, 0xBD, 0x00, 0x00, /* 0x90-0x93 */
+	0xEF, 0xD8, 0xC4, 0xE3, 0xF7, 0xDE, 0xA4, 0xE6, /* 0x94-0x97 */
+	0x00, 0x00, 0xAE, 0xC6, 0x00, 0x00, 0xB1, 0xD8, /* 0x98-0x9B */
+	0xB1, 0xD7, 0xD9, 0x7A, 0xD9, 0x7B, 0xB7, 0x72, /* 0x9C-0x9F */
+	0xE1, 0xF5, 0xBA, 0x57, 0xE9, 0xB2, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xA4, 0xE7, 0xA5, 0xB8, 0x00, 0x00, 0xA9, 0xF2, /* 0xA4-0xA7 */
+	0xCC, 0xC2, 0x00, 0x00, 0xCE, 0xE9, 0xAC, 0x48, /* 0xA8-0xAB */
+	0xB1, 0xD9, 0x00, 0x00, 0xD9, 0x7C, 0xB4, 0xB5, /* 0xAC-0xAF */
+	0xB7, 0x73, 0x00, 0x00, 0xE5, 0xC1, 0xE5, 0xC2, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xF0, 0xC2, 0x5F, /* 0xB4-0xB7 */
+	0xF8, 0xF0, 0xA4, 0xE8, 0x00, 0x00, 0xCC, 0xC3, /* 0xB8-0xBB */
+	0xA9, 0xF3, 0xAC, 0x49, 0x00, 0x00, 0xCE, 0xEA, /* 0xBC-0xBF */
+	0x00, 0x00, 0xAE, 0xC7, 0xD1, 0xD2, 0xD1, 0xD0, /* 0xC0-0xC3 */
+	0xD1, 0xD1, 0xAE, 0xC8, 0xD1, 0xCF, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB1, 0xDB, /* 0xC8-0xCB */
+	0xB1, 0xDC, 0xD5, 0xA8, 0xB1, 0xDD, 0xB1, 0xDA, /* 0xCC-0xCF */
+	0xD9, 0x7D, 0x00, 0x00, 0xD9, 0x7E, 0xDD, 0xBE, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xBA, 0x59, 0xBA, 0x58, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xF1, 0xEF, 0xD9, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF2, 0x4A, 0xF2, 0x49, 0xF4, 0x4F, /* 0xDC-0xDF */
+	0x00, 0x00, 0xC9, 0x5E, 0xAC, 0x4A, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xA4, 0xE9, 0xA5, 0xB9, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xA6, 0xAE, 0xA6, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xA6, 0xAF, 0xA6, 0xB0, 0xC9, 0xEE, 0xC9, 0xED, /* 0xEC-0xEF */
+	0xCA, 0xF8, 0xA7, 0xF2, 0xCA, 0xFB, 0xCA, 0xFA, /* 0xF0-0xF3 */
+	0xCA, 0xF9, 0xCA, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xA9, 0xF4, 0xCC, 0xC9, /* 0xF8-0xFB */
+	0xCC, 0xC5, 0xCC, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_66[512] = {
+	0xA9, 0xFB, 0x00, 0x00, 0xA9, 0xF9, 0xCC, 0xCA, /* 0x00-0x03 */
+	0xCC, 0xC6, 0xCC, 0xCD, 0xA9, 0xF8, 0xAA, 0x40, /* 0x04-0x07 */
+	0xCC, 0xC8, 0xCC, 0xC4, 0xA9, 0xFE, 0xCC, 0xCB, /* 0x08-0x0B */
+	0xA9, 0xF7, 0xCC, 0xCC, 0xA9, 0xFA, 0xA9, 0xFC, /* 0x0C-0x0F */
+	0xCC, 0xD0, 0xCC, 0xCF, 0xCC, 0xC7, 0xA9, 0xF6, /* 0x10-0x13 */
+	0xA9, 0xF5, 0xA9, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xCE, 0xEF, 0xCE, 0xF5, 0x00, 0x00, 0xAC, 0x50, /* 0x1C-0x1F */
+	0xAC, 0x4D, 0xCE, 0xEC, 0xCE, 0xF1, 0x00, 0x00, /* 0x20-0x23 */
+	0xAC, 0x53, 0xAC, 0x4B, 0xCE, 0xF0, 0xAC, 0x4E, /* 0x24-0x27 */
+	0xAC, 0x51, 0x00, 0x00, 0x00, 0x00, 0xCE, 0xF3, /* 0x28-0x2B */
+	0x00, 0x00, 0xAC, 0x4C, 0xCE, 0xF8, 0xAC, 0x4F, /* 0x2C-0x2F */
+	0x00, 0x00, 0xAC, 0x52, 0xCE, 0xED, 0xCE, 0xF2, /* 0x30-0x33 */
+	0xCE, 0xF6, 0xCE, 0xEE, 0xCE, 0xEB, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xCE, 0xF7, 0xCE, 0xF4, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xAE, 0xD0, 0xAE, 0xC9, 0xAE, 0xCC, /* 0x40-0x43 */
+	0x00, 0x00, 0xAE, 0xCF, 0x00, 0x00, 0xD1, 0xD5, /* 0x44-0x47 */
+	0x00, 0x00, 0xAE, 0xCA, 0xD1, 0xD3, 0x00, 0x00, /* 0x48-0x4B */
+	0xAE, 0xCE, 0x00, 0x00, 0x00, 0x00, 0xAE, 0xCB, /* 0x4C-0x4F */
+	0x00, 0x00, 0xD1, 0xD6, 0xAE, 0xCD, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xD5, 0xAC, 0xB1, 0xDF, 0xD5, 0xAB, /* 0x58-0x5B */
+	0xD5, 0xAD, 0xB1, 0xDE, 0xB1, 0xE3, 0xD1, 0xD4, /* 0x5C-0x5F */
+	0x00, 0x00, 0xD5, 0xAA, 0xD5, 0xAE, 0x00, 0x00, /* 0x60-0x63 */
+	0xB1, 0xE0, 0xD5, 0xA9, 0xB1, 0xE2, 0x00, 0x00, /* 0x64-0x67 */
+	0xB1, 0xE1, 0x00, 0x00, 0xD9, 0xA7, 0x00, 0x00, /* 0x68-0x6B */
+	0xD9, 0xA2, 0x00, 0x00, 0xB4, 0xB6, 0xB4, 0xBA, /* 0x6C-0x6F */
+	0xB4, 0xB7, 0xD9, 0xA5, 0xD9, 0xA8, 0x00, 0x00, /* 0x70-0x73 */
+	0xB4, 0xB8, 0x00, 0x00, 0xB4, 0xB9, 0xB4, 0xBE, /* 0x74-0x77 */
+	0xDD, 0xC7, 0xD9, 0xA6, 0xB4, 0xBC, 0xD9, 0xA3, /* 0x78-0x7B */
+	0xD9, 0xA1, 0x00, 0x00, 0xB4, 0xBD, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xD9, 0xA4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xB7, 0x79, 0x00, 0x00, 0xDD, 0xBF, 0xB7, 0x76, /* 0x84-0x87 */
+	0xB7, 0x77, 0xB7, 0x75, 0xDD, 0xC4, 0xDD, 0xC3, /* 0x88-0x8B */
+	0xDD, 0xC0, 0xB7, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDD, 0xC2, 0xB4, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xDD, 0xC6, 0xDD, 0xC1, 0xB7, 0x78, 0xB7, 0x74, /* 0x94-0x97 */
+	0xB7, 0x7A, 0xDD, 0xC5, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xBA, 0x5C, 0x00, 0x00, 0xE1, 0xF8, /* 0x9C-0x9F */
+	0xE1, 0xF7, 0xE1, 0xF6, 0xBA, 0x5A, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xBA, 0x5B, 0xE5, 0xC5, 0xE5, 0xC8, 0xBC, 0xC8, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xBC, 0xC7, 0xE5, 0xC9, /* 0xAC-0xAF */
+	0xE5, 0xC4, 0xBC, 0xCA, 0xE5, 0xC6, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xBC, 0xC9, 0xE5, 0xC3, 0x00, 0x00, 0xE5, 0xC7, /* 0xB4-0xB7 */
+	0xBE, 0xE9, 0xBE, 0xE6, 0xE9, 0xBB, 0xE9, 0xBA, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE9, 0xB9, 0xE9, 0xB4, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE9, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xBE, 0xE7, 0x00, 0x00, 0xBE, 0xE4, 0xBE, 0xE8, /* 0xC4-0xC7 */
+	0xE9, 0xB3, 0xBE, 0xE5, 0xE9, 0xB6, 0xE9, 0xB7, /* 0xC8-0xCB */
+	0xE9, 0xBC, 0x00, 0x00, 0x00, 0x00, 0xE9, 0xB8, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xF2, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0xC7, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xEF, 0xDC, 0xC0, 0xC6, 0xEF, 0xDA, 0xEF, 0xDB, /* 0xD8-0xDB */
+	0xC2, 0x60, 0xC3, 0x6E, 0xF2, 0x4B, 0x00, 0x00, /* 0xDC-0xDF */
+	0xC3, 0x6D, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x51, /* 0xE0-0xE3 */
+	0xF4, 0x52, 0x00, 0x00, 0xC4, 0x66, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF4, 0x50, 0xC4, 0xE4, 0x00, 0x00, 0xF7, 0xDF, /* 0xE8-0xEB */
+	0xC5, 0xCE, 0xF8, 0xAA, 0xF8, 0xAB, 0x00, 0x00, /* 0xEC-0xEF */
+	0xA4, 0xEA, 0x00, 0x00, 0xA6, 0xB1, 0xA6, 0xB2, /* 0xF0-0xF3 */
+	0xA7, 0xF3, 0x00, 0x00, 0xCC, 0xD1, 0xAC, 0x54, /* 0xF4-0xF7 */
+	0xAE, 0xD1, 0xB1, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0xB0, 0xD2, 0x00, 0x00, 0xB4, 0xBF, 0xB4, 0xC0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_67[512] = {
+	0xB3, 0xCC, 0xD9, 0xA9, 0x00, 0x00, 0xB7, 0x7C, /* 0x00-0x03 */
+	0xE1, 0xFA, 0xE1, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xA4, 0xEB, 0xA6, 0xB3, 0xCC, 0xD2, 0xAA, 0x42, /* 0x08-0x0B */
+	0x00, 0x00, 0xAA, 0x41, 0x00, 0x00, 0xCE, 0xF9, /* 0x0C-0x0F */
+	0xCE, 0xFA, 0x00, 0x00, 0xD1, 0xD7, 0xD1, 0xD8, /* 0x10-0x13 */
+	0xAE, 0xD2, 0xAE, 0xD3, 0x00, 0x00, 0xAE, 0xD4, /* 0x14-0x17 */
+	0xD5, 0xAF, 0x00, 0x00, 0x00, 0x00, 0xB1, 0xE6, /* 0x18-0x1B */
+	0x00, 0x00, 0xB4, 0xC2, 0x00, 0x00, 0xB4, 0xC1, /* 0x1C-0x1F */
+	0xDD, 0xC8, 0xDF, 0x7A, 0xE1, 0xFB, 0xE9, 0xBD, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0x61, 0xC4, 0x67, /* 0x24-0x27 */
+	0xA4, 0xEC, 0x00, 0x00, 0xA5, 0xBC, 0xA5, 0xBD, /* 0x28-0x2B */
+	0xA5, 0xBB, 0xA5, 0xBE, 0xA5, 0xBA, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xA6, 0xB6, 0x00, 0x00, 0xC9, 0xF6, /* 0x30-0x33 */
+	0xA6, 0xB5, 0xA6, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xC9, 0xF1, 0xC9, 0xF0, 0xC9, 0xF3, 0xC9, 0xF2, /* 0x38-0x3B */
+	0xC9, 0xF5, 0xA6, 0xB4, 0xC9, 0xEF, 0xC9, 0xF4, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xCA, 0xFD, 0xA7, 0xFD, 0xCA, 0xFE, /* 0x44-0x47 */
+	0xCB, 0x43, 0xA7, 0xFC, 0x00, 0x00, 0xCB, 0x47, /* 0x48-0x4B */
+	0xCB, 0x42, 0xCB, 0x45, 0xA7, 0xF5, 0xA7, 0xF6, /* 0x4C-0x4F */
+	0xA7, 0xF7, 0xA7, 0xF8, 0x00, 0x00, 0xA8, 0x40, /* 0x50-0x53 */
+	0x00, 0x00, 0xCB, 0x41, 0xA7, 0xFA, 0xA8, 0x41, /* 0x54-0x57 */
+	0x00, 0x00, 0xCB, 0x40, 0xCB, 0x46, 0x00, 0x00, /* 0x58-0x5B */
+	0xA7, 0xF9, 0xCB, 0x44, 0xA7, 0xFB, 0xA7, 0xF4, /* 0x5C-0x5F */
+	0xA7, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xAA, 0x57, 0x00, 0x00, /* 0x68-0x6B */
+	0xCC, 0xD4, 0xAA, 0x43, 0x00, 0x00, 0xAA, 0x4D, /* 0x6C-0x6F */
+	0xAA, 0x4E, 0xAA, 0x46, 0xAA, 0x58, 0xAA, 0x48, /* 0x70-0x73 */
+	0xCC, 0xDC, 0xAA, 0x53, 0xCC, 0xD7, 0xAA, 0x49, /* 0x74-0x77 */
+	0xCC, 0xE6, 0xCC, 0xE7, 0xCC, 0xDF, 0xCC, 0xD8, /* 0x78-0x7B */
+	0xAA, 0x56, 0xCC, 0xE4, 0xAA, 0x51, 0xAA, 0x4F, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xCC, 0xE5, 0x00, 0x00, 0xCC, 0xE3, /* 0x80-0x83 */
+	0xCC, 0xDB, 0xCC, 0xD3, 0xCC, 0xDA, 0xAA, 0x4A, /* 0x84-0x87 */
+	0x00, 0x00, 0xAA, 0x50, 0x00, 0x00, 0xAA, 0x44, /* 0x88-0x8B */
+	0xCC, 0xDE, 0xCC, 0xDD, 0xCC, 0xD5, 0x00, 0x00, /* 0x8C-0x8F */
+	0xAA, 0x52, 0xCC, 0xE1, 0xCC, 0xD6, 0xAA, 0x55, /* 0x90-0x93 */
+	0xCC, 0xE8, 0xAA, 0x45, 0x00, 0x00, 0xAA, 0x4C, /* 0x94-0x97 */
+	0xCC, 0xD9, 0xCC, 0xE2, 0xAA, 0x54, 0x00, 0x00, /* 0x98-0x9B */
+	0xAA, 0x47, 0xAA, 0x4B, 0x00, 0x00, 0xCC, 0xE0, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0x5B, 0xAC, 0x5C, /* 0xAC-0xAF */
+	0xAC, 0x69, 0x00, 0x00, 0xCF, 0x56, 0xCF, 0x4C, /* 0xB0-0xB3 */
+	0xAC, 0x62, 0xCF, 0x4A, 0xAC, 0x5B, 0xCF, 0x45, /* 0xB4-0xB7 */
+	0xAC, 0x65, 0xCF, 0x52, 0xCE, 0xFE, 0xCF, 0x41, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCF, 0x44, 0xCE, 0xFB, 0xCF, 0x51, 0xCF, 0x61, /* 0xC0-0xC3 */
+	0xAC, 0x60, 0xCF, 0x46, 0xCF, 0x58, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xCE, 0xFD, 0xCF, 0x5F, 0xCF, 0x60, 0xCF, 0x63, /* 0xC8-0xCB */
+	0xCF, 0x5A, 0xCF, 0x4B, 0xCF, 0x53, 0xAC, 0x66, /* 0xCC-0xCF */
+	0xAC, 0x59, 0xAC, 0x61, 0xAC, 0x6D, 0xAC, 0x56, /* 0xD0-0xD3 */
+	0xAC, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xCF, 0x43, 0xAC, 0x6A, 0xAC, 0x63, 0xCF, 0x5D, /* 0xD8-0xDB */
+	0xCF, 0x40, 0xAC, 0x6C, 0xAC, 0x67, 0xCF, 0x49, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xAC, 0x6B, 0xCF, 0x50, /* 0xE0-0xE3 */
+	0xCF, 0x48, 0xAC, 0x64, 0xCF, 0x5C, 0xCF, 0x54, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xAC, 0x5E, 0xCF, 0x62, 0xCF, 0x47, /* 0xE8-0xEB */
+	0xAC, 0x5A, 0xCF, 0x59, 0xCF, 0x4F, 0xAC, 0x5F, /* 0xEC-0xEF */
+	0xCF, 0x55, 0xAC, 0x57, 0xCE, 0xFC, 0xAC, 0x68, /* 0xF0-0xF3 */
+	0xAE, 0xE3, 0xAC, 0x5D, 0xCF, 0x4E, 0xCF, 0x4D, /* 0xF4-0xF7 */
+	0xCF, 0x42, 0x00, 0x00, 0xCF, 0x5E, 0x00, 0x00, /* 0xF8-0xFB */
+	0xCF, 0x57, 0x00, 0x00, 0x00, 0x00, 0xAC, 0x55, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_68[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xEC, 0xAE, 0xEA, /* 0x10-0x13 */
+	0xD1, 0xED, 0x00, 0x00, 0xD1, 0xE1, 0xAE, 0xDF, /* 0x14-0x17 */
+	0xAE, 0xEB, 0x00, 0x00, 0xD1, 0xDA, 0x00, 0x00, /* 0x18-0x1B */
+	0xD1, 0xE3, 0xD1, 0xEB, 0x00, 0x00, 0xD1, 0xD9, /* 0x1C-0x1F */
+	0xD1, 0xF4, 0xAE, 0xD5, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xD1, 0xF3, 0xD1, 0xEE, 0x00, 0x00, /* 0x24-0x27 */
+	0xD1, 0xEF, 0xAE, 0xDD, 0xAE, 0xE8, 0xD1, 0xE5, /* 0x28-0x2B */
+	0x00, 0x00, 0xD1, 0xE6, 0xD1, 0xF0, 0xD1, 0xE7, /* 0x2C-0x2F */
+	0x00, 0x00, 0xD1, 0xE2, 0xD1, 0xDC, 0xD1, 0xDD, /* 0x30-0x33 */
+	0xD1, 0xEA, 0xD1, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xAE, 0xD6, 0xAE, 0xDA, 0xD1, 0xF2, 0xD1, 0xDE, /* 0x38-0x3B */
+	0xAE, 0xE6, 0xAE, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xAE, 0xE5, 0xAE, 0xEC, 0xAE, 0xDB, 0xAE, 0xE7, /* 0x40-0x43 */
+	0xD1, 0xE9, 0xAE, 0xE9, 0xAE, 0xD8, 0x00, 0x00, /* 0x44-0x47 */
+	0xAE, 0xD7, 0xD1, 0xDB, 0x00, 0x00, 0xD1, 0xDF, /* 0x48-0x4B */
+	0xAE, 0xE0, 0xD1, 0xF1, 0xD1, 0xE8, 0xD1, 0xE0, /* 0x4C-0x4F */
+	0xAE, 0xE4, 0xAE, 0xE1, 0x00, 0x00, 0xAE, 0xD9, /* 0x50-0x53 */
+	0xAE, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xC4, /* 0x68-0x6B */
+	0x00, 0x00, 0xD5, 0xB4, 0xD5, 0xB5, 0xD5, 0xB9, /* 0x6C-0x6F */
+	0x00, 0x00, 0xD5, 0xC8, 0xD5, 0xC5, 0x00, 0x00, /* 0x70-0x73 */
+	0xD5, 0xBE, 0xD5, 0xBD, 0xB1, 0xED, 0xD5, 0xC1, /* 0x74-0x77 */
+	0xD5, 0xD0, 0xD5, 0xB0, 0x00, 0x00, 0xD5, 0xD1, /* 0x78-0x7B */
+	0xD5, 0xC3, 0xD5, 0xD5, 0xD5, 0xC9, 0xB1, 0xEC, /* 0x7C-0x7F */
+	
+	0xD5, 0xC7, 0xB1, 0xE7, 0xB1, 0xFC, 0xB1, 0xF2, /* 0x80-0x83 */
+	0x00, 0x00, 0xB1, 0xF6, 0xB1, 0xF5, 0xD5, 0xB1, /* 0x84-0x87 */
+	0x00, 0x00, 0xD5, 0xCE, 0xD5, 0xD4, 0xD5, 0xCC, /* 0x88-0x8B */
+	0xD5, 0xD3, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xC0, /* 0x8C-0x8F */
+	0xD5, 0xB2, 0xD5, 0xD2, 0xD5, 0xC2, 0xB1, 0xEA, /* 0x90-0x93 */
+	0xB1, 0xF7, 0x00, 0x00, 0xD5, 0xCB, 0xB1, 0xF0, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD5, 0xCA, /* 0x98-0x9B */
+	0xD5, 0xB3, 0xB1, 0xF8, 0x00, 0x00, 0xB1, 0xFA, /* 0x9C-0x9F */
+	0xD5, 0xCD, 0xB1, 0xFB, 0xB1, 0xE9, 0xD5, 0xBA, /* 0xA0-0xA3 */
+	0xD5, 0xCF, 0x00, 0x00, 0x00, 0x00, 0xB1, 0xEF, /* 0xA4-0xA7 */
+	0xB1, 0xF9, 0xD5, 0xBC, 0xD5, 0xC6, 0xD5, 0xB7, /* 0xA8-0xAB */
+	0xD5, 0xBB, 0xB1, 0xF4, 0xD5, 0xB6, 0xB1, 0xE8, /* 0xAC-0xAF */
+	0xB1, 0xF1, 0xB1, 0xEE, 0xD5, 0xBF, 0xAE, 0xDE, /* 0xB0-0xB3 */
+	0xD9, 0xC0, 0xB1, 0xEB, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xB1, 0xF3, 0x00, 0x00, 0xD9, 0xC3, 0xD9, 0xD9, /* 0xC4-0xC7 */
+	0xD9, 0xCE, 0xB4, 0xD6, 0x00, 0x00, 0xB4, 0xD1, /* 0xC8-0xCB */
+	0xD9, 0xBD, 0xB4, 0xD2, 0xD9, 0xCD, 0x00, 0x00, /* 0xCC-0xCF */
+	0xD9, 0xC6, 0xD9, 0xD3, 0xB4, 0xCE, 0xD9, 0xAB, /* 0xD0-0xD3 */
+	0xD9, 0xD5, 0xB4, 0xC4, 0xD9, 0xB3, 0xB4, 0xC7, /* 0xD4-0xD7 */
+	0xB4, 0xC6, 0x00, 0x00, 0xB4, 0xD7, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD9, 0xAD, 0xD9, 0xCF, 0xD9, 0xD0, 0xB4, 0xC9, /* 0xDC-0xDF */
+	0xB4, 0xC5, 0xD9, 0xBB, 0x00, 0x00, 0xB4, 0xD0, /* 0xE0-0xE3 */
+	0xD9, 0xB6, 0x00, 0x00, 0xD9, 0xD1, 0xB4, 0xCC, /* 0xE4-0xE7 */
+	0xD9, 0xC9, 0xD9, 0xD6, 0xD9, 0xB0, 0xD9, 0xB5, /* 0xE8-0xEB */
+	0xD9, 0xAF, 0x00, 0x00, 0xB4, 0xCB, 0xD9, 0xC2, /* 0xEC-0xEF */
+	0xDD, 0xDE, 0xD9, 0xB1, 0xB4, 0xCF, 0xD9, 0xBA, /* 0xF0-0xF3 */
+	0xD9, 0xD2, 0xB4, 0xCA, 0xD9, 0xB7, 0xD9, 0xB4, /* 0xF4-0xF7 */
+	0xD9, 0xC5, 0xB4, 0xCD, 0xB4, 0xC3, 0xB4, 0xD9, /* 0xF8-0xFB */
+	0xD9, 0xC8, 0xD9, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_69[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD9, 0xAC, 0xB4, 0xC8, 0xD9, 0xD4, 0xD9, 0xBC, /* 0x04-0x07 */
+	0xD9, 0xBE, 0x00, 0x00, 0xD9, 0xCB, 0xD9, 0xCA, /* 0x08-0x0B */
+	0xD9, 0xAA, 0xB4, 0xD3, 0xB4, 0xD5, 0xD9, 0xB2, /* 0x0C-0x0F */
+	0xD9, 0xB9, 0xD9, 0xC1, 0xB4, 0xD4, 0xD9, 0xB8, /* 0x10-0x13 */
+	0xD9, 0xC4, 0xD9, 0xD7, 0x00, 0x00, 0xD9, 0xCC, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xD9, 0xAE, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0xF2, /* 0x2C-0x2F */
+	0xB7, 0xA6, 0x00, 0x00, 0xDD, 0xF0, 0xDD, 0xDB, /* 0x30-0x33 */
+	0xDD, 0xE0, 0xDD, 0xD9, 0x00, 0x00, 0xDD, 0xEC, /* 0x34-0x37 */
+	0xDD, 0xCB, 0xDD, 0xD2, 0x00, 0x00, 0xDD, 0xEA, /* 0x38-0x3B */
+	0xDD, 0xF4, 0xDD, 0xDC, 0x00, 0x00, 0xDD, 0xCF, /* 0x3C-0x3F */
+	0xDD, 0xE2, 0xDD, 0xE7, 0xDD, 0xD3, 0x00, 0x00, /* 0x40-0x43 */
+	0xDD, 0xE4, 0xDD, 0xD0, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xDD, 0xD7, 0xDD, 0xD8, 0xB7, 0xA8, 0xDD, 0xEB, /* 0x48-0x4B */
+	0xDD, 0xE9, 0x00, 0x00, 0xDD, 0xCC, 0xDD, 0xEE, /* 0x4C-0x4F */
+	0x00, 0x00, 0xDD, 0xEF, 0xDD, 0xF1, 0xB7, 0xAC, /* 0x50-0x53 */
+	0xB7, 0xA4, 0x00, 0x00, 0xD5, 0xB8, 0xDD, 0xD4, /* 0x54-0x57 */
+	0xDD, 0xE6, 0xDD, 0xD5, 0xB7, 0xA1, 0xB7, 0xB1, /* 0x58-0x5B */
+	0xDD, 0xED, 0xB7, 0xAF, 0xB7, 0xAB, 0xDD, 0xCA, /* 0x5C-0x5F */
+	0xB7, 0xA3, 0x00, 0x00, 0xDD, 0xCD, 0xB7, 0xB0, /* 0x60-0x63 */
+	0x00, 0x00, 0xDD, 0xDD, 0xDD, 0xC9, 0x00, 0x00, /* 0x64-0x67 */
+	0xB7, 0xA9, 0xDD, 0xE1, 0xDD, 0xD1, 0xB7, 0xAA, /* 0x68-0x6B */
+	0xDD, 0xDA, 0xB7, 0x7E, 0xB4, 0xD8, 0xDD, 0xE3, /* 0x6C-0x6F */
+	0xD9, 0xBF, 0xDD, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xDD, 0xE8, 0xB7, 0xA5, 0xDD, 0xE5, 0xB7, 0xA2, /* 0x74-0x77 */
+	0xDD, 0xDF, 0xB7, 0xAD, 0xDD, 0xD6, 0xDD, 0xF3, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xA7, 0xDE, 0xC6, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xAE, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xE2, 0x4A, 0xE2, 0x48, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE2, 0x5E, 0xE2, 0x46, 0x00, 0x00, 0xE2, 0x58, /* 0x90-0x93 */
+	0xB7, 0x7D, 0xBA, 0x5F, 0xE2, 0x42, 0xE2, 0x5D, /* 0x94-0x97 */
+	0x00, 0x00, 0xE2, 0x47, 0xE2, 0x55, 0xBA, 0x64, /* 0x98-0x9B */
+	0xBA, 0x5D, 0x00, 0x00, 0xE2, 0x5B, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE2, 0x40, 0xE2, 0x5A, 0x00, 0x00, 0xBA, 0x6F, /* 0xA0-0xA3 */
+	0xE2, 0x51, 0xE2, 0x61, 0xBA, 0x6D, 0xE2, 0x49, /* 0xA4-0xA7 */
+	0xBA, 0x5E, 0xE2, 0x4B, 0xE2, 0x59, 0xBA, 0x67, /* 0xA8-0xAB */
+	0xE2, 0x44, 0xBA, 0x6B, 0xBA, 0x61, 0xE2, 0x4D, /* 0xAC-0xAF */
+	0xE2, 0x43, 0xE1, 0xFC, 0x00, 0x00, 0xE2, 0x57, /* 0xB0-0xB3 */
+	0xBA, 0x68, 0xE2, 0x60, 0xE1, 0xFD, 0xBA, 0x65, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE2, 0x53, 0x00, 0x00, 0xBA, 0x66, /* 0xB8-0xBB */
+	0xE2, 0x45, 0xE2, 0x50, 0xE2, 0x4C, 0xE2, 0x4E, /* 0xBC-0xBF */
+	0x00, 0x00, 0xBA, 0x60, 0xE2, 0x5F, 0xBA, 0x6E, /* 0xC0-0xC3 */
+	0xE2, 0x4F, 0x00, 0x00, 0xE2, 0x62, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE1, 0xFE, 0xE2, 0x54, 0xBA, 0x63, /* 0xC8-0xCB */
+	0xBA, 0x6C, 0xBA, 0x6A, 0xE2, 0x41, 0xE2, 0x56, /* 0xCC-0xCF */
+	0xBA, 0x69, 0x00, 0x00, 0x00, 0x00, 0xBA, 0x62, /* 0xD0-0xD3 */
+	0xE2, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE2, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xE5, 0xD5, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xE5, 0xD1, 0xE5, 0xCD, 0xE5, 0xE1, 0xE5, 0xDE, /* 0xE4-0xE7 */
+	0xBC, 0xCD, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xE5, /* 0xE8-0xEB */
+	0xE5, 0xD4, 0xBC, 0xD8, 0xE5, 0xDB, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE5, 0xD0, 0xE5, 0xDA, 0xBC, 0xD5, /* 0xF0-0xF3 */
+	0xE5, 0xEE, 0x00, 0x00, 0xE5, 0xEB, 0xE5, 0xDD, /* 0xF4-0xF7 */
+	0xE5, 0xCE, 0x00, 0x00, 0x00, 0x00, 0xE5, 0xE2, /* 0xF8-0xFB */
+	0xE5, 0xE4, 0xBC, 0xD1, 0xE5, 0xD8, 0xE5, 0xD3, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6A[512] = {
+	0xE5, 0xCA, 0xBC, 0xCE, 0xBC, 0xD6, 0x00, 0x00, /* 0x00-0x03 */
+	0xE5, 0xE7, 0xBC, 0xD7, 0xE5, 0xCB, 0xE5, 0xED, /* 0x04-0x07 */
+	0xE5, 0xE0, 0xE5, 0xE6, 0xBC, 0xD4, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE5, 0xE3, 0x00, 0x00, 0xE5, 0xEA, /* 0x0C-0x0F */
+	0x00, 0x00, 0xBC, 0xD9, 0x00, 0x00, 0xBC, 0xD3, /* 0x10-0x13 */
+	0xE5, 0xDC, 0xE5, 0xCF, 0xE5, 0xEF, 0xE5, 0xCC, /* 0x14-0x17 */
+	0xE5, 0xE8, 0xBC, 0xD0, 0x00, 0x00, 0xE5, 0xD6, /* 0x18-0x1B */
+	0x00, 0x00, 0xE5, 0xD7, 0xBC, 0xCF, 0xBC, 0xCC, /* 0x1C-0x1F */
+	0xE5, 0xD2, 0xBC, 0xD2, 0x00, 0x00, 0xBC, 0xCB, /* 0x20-0x23 */
+	0x00, 0x00, 0xE5, 0xE9, 0xE5, 0xEC, 0xE5, 0xD9, /* 0x24-0x27 */
+	0xE9, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xC2, 0x00, 0x00, /* 0x30-0x33 */
+	0xE9, 0xBE, 0xBE, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xBE, 0xEB, 0xBE, 0xF0, 0xBE, 0xEC, 0xE9, 0xCC, /* 0x38-0x3B */
+	0xE9, 0xD7, 0xBE, 0xEA, 0xE9, 0xC4, 0xE9, 0xCD, /* 0x3C-0x3F */
+	0xE5, 0xDF, 0xE9, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xBE, 0xF1, 0x00, 0x00, 0xE9, 0xDD, 0xBE, 0xF5, /* 0x44-0x47 */
+	0xBE, 0xF8, 0xE9, 0xC0, 0x00, 0x00, 0xBE, 0xF4, /* 0x48-0x4B */
+	0x00, 0x00, 0xE9, 0xDB, 0xE9, 0xDC, 0xE9, 0xD2, /* 0x4C-0x4F */
+	0xE9, 0xD1, 0xE9, 0xC9, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE9, 0xD3, 0xE9, 0xDA, 0xE9, 0xD9, 0x00, 0x00, /* 0x54-0x57 */
+	0xBE, 0xEF, 0xBE, 0xED, 0xE9, 0xCB, 0xE9, 0xC8, /* 0x58-0x5B */
+	0x00, 0x00, 0xE9, 0xC5, 0xE9, 0xD8, 0xBE, 0xF7, /* 0x5C-0x5F */
+	0xE9, 0xD6, 0xBE, 0xF3, 0xBE, 0xF2, 0x00, 0x00, /* 0x60-0x63 */
+	0xE9, 0xD0, 0x00, 0x00, 0xE9, 0xBF, 0xE9, 0xC1, /* 0x64-0x67 */
+	0xE9, 0xC3, 0xE9, 0xD5, 0xE9, 0xCF, 0xBE, 0xEE, /* 0x68-0x6B */
+	0x00, 0x00, 0xE9, 0xC6, 0x00, 0x00, 0xE9, 0xD4, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xC7, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0xCF, 0xED, 0x45, /* 0x7C-0x7F */
+	
+	0xC0, 0xC8, 0xEC, 0xF5, 0x00, 0x00, 0xED, 0x41, /* 0x80-0x83 */
+	0xC0, 0xCA, 0xED, 0x48, 0x00, 0x00, 0xEC, 0xFC, /* 0x84-0x87 */
+	0x00, 0x00, 0xEC, 0xF7, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xED, 0x49, 0xEC, 0xF3, 0xEC, 0xFE, 0x00, 0x00, /* 0x8C-0x8F */
+	0xC0, 0xD1, 0xED, 0x44, 0xED, 0x4A, 0xEC, 0xFD, /* 0x90-0x93 */
+	0xC0, 0xC9, 0xED, 0x40, 0xEC, 0xF4, 0xC0, 0xD0, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0x47, 0xEC, 0xF9, /* 0x98-0x9B */
+	0xC0, 0xCC, 0x00, 0x00, 0xEC, 0xFB, 0xEC, 0xF8, /* 0x9C-0x9F */
+	0xC0, 0xD2, 0xEC, 0xFA, 0xC0, 0xCB, 0xC0, 0xCE, /* 0xA0-0xA3 */
+	0xED, 0x43, 0xEC, 0xF6, 0xED, 0x46, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xED, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xC2, 0x63, 0xEF, 0xE7, 0xC2, 0x68, 0xC2, 0x69, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC2, 0x62, /* 0xB0-0xB3 */
+	0xEF, 0xE6, 0x00, 0x00, 0xEF, 0xE3, 0xEF, 0xE4, /* 0xB4-0xB7 */
+	0xC2, 0x66, 0xEF, 0xDE, 0xEF, 0xE2, 0xC2, 0x65, /* 0xB8-0xBB */
+	0x00, 0x00, 0xEF, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0x67, 0xC2, 0x64, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xEF, 0xDD, 0xEF, 0xE1, 0xEF, 0xE5, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0x51, /* 0xC8-0xCB */
+	0xF2, 0x4E, 0xF2, 0x57, 0x00, 0x00, 0xF2, 0x56, /* 0xCC-0xCF */
+	0xF2, 0x54, 0xF2, 0x4F, 0x00, 0x00, 0xC3, 0x72, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xF2, 0x50, 0xC3, 0x71, 0xC0, 0xCD, /* 0xD8-0xDB */
+	0xF2, 0x53, 0xC3, 0x70, 0xF2, 0x58, 0xF2, 0x52, /* 0xDC-0xDF */
+	0xF2, 0x4D, 0xEF, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xC3, 0x6F, 0x00, 0x00, 0xF2, 0x4C, /* 0xE4-0xE7 */
+	0xF4, 0x56, 0x00, 0x00, 0xF4, 0x55, 0xF2, 0x55, /* 0xE8-0xEB */
+	0xC4, 0x68, 0x00, 0x00, 0xF4, 0x59, 0xF4, 0x5A, /* 0xEC-0xEF */
+	0xF4, 0x54, 0xF4, 0x58, 0x00, 0x00, 0xF4, 0x53, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xF5, 0xD1, 0xF4, 0x57, 0xC4, 0xE7, 0xC4, 0xE5, /* 0xF8-0xFB */
+	0xF5, 0xCF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6B[512] = {
+	0xF5, 0xD2, 0x00, 0x00, 0xF5, 0xCE, 0xF5, 0xD0, /* 0x00-0x03 */
+	0xC4, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xF6, 0xE5, 0xF6, 0xE6, 0xC5, 0x76, 0xF6, 0xE4, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0xE2, /* 0x0C-0x0F */
+	0xC5, 0xCF, 0xF7, 0xE0, 0xF7, 0xE1, 0xF8, 0xAC, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xC6, 0x56, 0xF8, 0xF3, /* 0x14-0x17 */
+	0xF8, 0xF1, 0xF8, 0xF2, 0xF8, 0xF4, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xBB, 0x00, 0x00, /* 0x1C-0x1F */
+	0xA4, 0xED, 0xA6, 0xB8, 0x00, 0x00, 0xAA, 0x59, /* 0x20-0x23 */
+	0x00, 0x00, 0xCC, 0xE9, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xCF, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0xD1, 0xF5, 0xD1, 0xF7, 0x00, 0x00, 0xD1, 0xF6, /* 0x2C-0x2F */
+	0x00, 0x00, 0xD1, 0xF8, 0xB1, 0xFD, 0xD5, 0xD7, /* 0x30-0x33 */
+	0xD1, 0xF9, 0x00, 0x00, 0xD5, 0xD6, 0xD5, 0xD8, /* 0x34-0x37 */
+	0xD5, 0xD9, 0xD9, 0xDA, 0xB4, 0xDB, 0xD9, 0xDB, /* 0x38-0x3B */
+	0xD9, 0xDD, 0xB4, 0xDC, 0xB4, 0xDA, 0xD9, 0xDC, /* 0x3C-0x3F */
+	0x00, 0x00, 0xDD, 0xFA, 0xDD, 0xF8, 0xDD, 0xF7, /* 0x40-0x43 */
+	0x00, 0x00, 0xDD, 0xF6, 0xDD, 0xF5, 0xB7, 0xB2, /* 0x44-0x47 */
+	0xDD, 0xF9, 0xBA, 0x70, 0xE2, 0x63, 0xE2, 0x65, /* 0x48-0x4B */
+	0xBA, 0x71, 0xE2, 0x64, 0xBC, 0xDB, 0x00, 0x00, /* 0x4C-0x4F */
+	0xBC, 0xDA, 0xE5, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xE9, 0xDF, 0xE9, 0xDE, 0xE9, 0xE0, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xBE, 0xF9, 0x00, 0x00, 0xED, 0x4B, /* 0x58-0x5B */
+	0xC0, 0xD3, 0x00, 0x00, 0xEF, 0xE8, 0xC2, 0x6A, /* 0x5C-0x5F */
+	0xF2, 0x59, 0xC5, 0x77, 0xA4, 0xEE, 0xA5, 0xBF, /* 0x60-0x63 */
+	0xA6, 0xB9, 0xA8, 0x42, 0xAA, 0x5A, 0xAA, 0x5B, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xAC, 0x6E, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xD1, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xB3, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xD1, 0xBE, 0xFA, /* 0x74-0x77 */
+	0xC2, 0x6B, 0xA4, 0xEF, 0x00, 0x00, 0xA6, 0xBA, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xEB, 0xAA, 0x5C, /* 0x7C-0x7F */
+	
+	0xCC, 0xEA, 0x00, 0x00, 0xCF, 0x65, 0xAC, 0x6F, /* 0x80-0x83 */
+	0xCF, 0x66, 0x00, 0x00, 0xAC, 0x70, 0x00, 0x00, /* 0x84-0x87 */
+	0xD1, 0xFC, 0xAE, 0xEE, 0xAE, 0xED, 0x00, 0x00, /* 0x88-0x8B */
+	0xD5, 0xDE, 0xD5, 0xDC, 0xD5, 0xDD, 0xD5, 0xDB, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD5, 0xDA, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xD9, 0xDE, 0xD9, 0xE1, 0xB4, 0xDE, 0xD9, 0xDF, /* 0x94-0x97 */
+	0xB4, 0xDD, 0xD9, 0xE0, 0x00, 0x00, 0xDD, 0xFB, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x66, 0xE2, 0x67, /* 0x9C-0x9F */
+	0xE2, 0x68, 0x00, 0x00, 0xE5, 0xF3, 0xE5, 0xF2, /* 0xA0-0xA3 */
+	0xBC, 0xDC, 0xE5, 0xF1, 0xE5, 0xF4, 0xE9, 0xE1, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xE2, 0xE9, 0xE3, /* 0xA8-0xAB */
+	0x00, 0x00, 0xED, 0x4C, 0xC0, 0xD4, 0xC2, 0x6C, /* 0xAC-0xAF */
+	0xF2, 0x5A, 0x00, 0x00, 0xC4, 0xE8, 0xC9, 0x5F, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xAC, 0x71, 0xCF, 0x67, 0xAE, 0xEF, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xB1, 0xFE, 0x00, 0x00, /* 0xB8-0xBB */
+	0xB4, 0xDF, 0xD9, 0xE2, 0x00, 0x00, 0xB7, 0xB5, /* 0xBC-0xBF */
+	0xB7, 0xB4, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x69, /* 0xC0-0xC3 */
+	0xE2, 0x6A, 0xBC, 0xDD, 0xBC, 0xDE, 0xE9, 0xE5, /* 0xC4-0xC7 */
+	0xE9, 0xE4, 0xEF, 0xE9, 0xF7, 0xE3, 0xA4, 0xF0, /* 0xC8-0xCB */
+	0xC9, 0x60, 0xA5, 0xC0, 0x00, 0x00, 0xA8, 0x43, /* 0xCC-0xCF */
+	0xCB, 0x48, 0x00, 0x00, 0xAC, 0x72, 0xB7, 0xB6, /* 0xD0-0xD3 */
+	0xA4, 0xF1, 0x00, 0x00, 0xCF, 0x68, 0xAC, 0x73, /* 0xD4-0xD7 */
+	0xCF, 0x69, 0x00, 0x00, 0xC0, 0xD5, 0xA4, 0xF2, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0xCC, 0xEC, 0x00, 0x00, /* 0xDC-0xDF */
+	0xCF, 0x6A, 0x00, 0x00, 0xD2, 0x42, 0xD2, 0x41, /* 0xE0-0xE3 */
+	0xD1, 0xFE, 0x00, 0x00, 0xD1, 0xFD, 0xD2, 0x43, /* 0xE4-0xE7 */
+	0xD2, 0x40, 0x00, 0x00, 0x00, 0x00, 0xB2, 0x40, /* 0xE8-0xEB */
+	0xB2, 0x41, 0x00, 0x00, 0x00, 0x00, 0xB4, 0xE0, /* 0xEC-0xEF */
+	0xD9, 0xE3, 0x00, 0x00, 0xD9, 0xE4, 0xD9, 0xE5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0x41, /* 0xF4-0xF7 */
+	0xDE, 0x42, 0xDE, 0x40, 0x00, 0x00, 0xDD, 0xFD, /* 0xF8-0xFB */
+	0xDD, 0xFE, 0xB7, 0xB7, 0xE2, 0x6B, 0xE5, 0xF7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6C[512] = {
+	0xE5, 0xF6, 0xE5, 0xF5, 0xE5, 0xF8, 0xE9, 0xE7, /* 0x00-0x03 */
+	0xE9, 0xE6, 0xBE, 0xFB, 0xE9, 0xE8, 0x00, 0x00, /* 0x04-0x07 */
+	0xC0, 0xD6, 0xED, 0x4D, 0x00, 0x00, 0xEF, 0xEA, /* 0x08-0x0B */
+	0xF2, 0x5B, 0xF6, 0xE7, 0x00, 0x00, 0xA4, 0xF3, /* 0x0C-0x0F */
+	0xA5, 0xC2, 0xA5, 0xC1, 0x00, 0x00, 0xAA, 0x5D, /* 0x10-0x13 */
+	0xC9, 0x61, 0xC9, 0x7E, 0xA6, 0xBB, 0x00, 0x00, /* 0x14-0x17 */
+	0xC9, 0xF7, 0xCB, 0x49, 0xCB, 0x4A, 0xAA, 0x5E, /* 0x18-0x1B */
+	0x00, 0x00, 0xCC, 0xED, 0x00, 0x00, 0xAC, 0x74, /* 0x1C-0x1F */
+	0xCF, 0x6B, 0xCF, 0x6C, 0x00, 0x00, 0xAE, 0xF0, /* 0x20-0x23 */
+	0xAE, 0xF4, 0xD2, 0x44, 0xAE, 0xF3, 0xAE, 0xF1, /* 0x24-0x27 */
+	0xAE, 0xF2, 0x00, 0x00, 0xD5, 0xDF, 0xB2, 0x42, /* 0x28-0x2B */
+	0xB4, 0xE3, 0x00, 0x00, 0xB4, 0xE1, 0xB4, 0xE2, /* 0x2C-0x2F */
+	0xD9, 0xE6, 0x00, 0x00, 0x00, 0x00, 0xBA, 0x72, /* 0x30-0x33 */
+	0xA4, 0xF4, 0x00, 0x00, 0xC9, 0xA1, 0x00, 0x00, /* 0x34-0x37 */
+	0xA5, 0xC3, 0x00, 0x00, 0x00, 0x00, 0xC9, 0xA4, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xA5, 0xC6, 0xC9, 0xA3, /* 0x3C-0x3F */
+	0xA5, 0xC5, 0xA5, 0xC4, 0xA8, 0x44, 0xC9, 0xA2, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0xF8, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xC9, 0xFC, 0xC9, 0xFE, /* 0x48-0x4B */
+	0xCA, 0x40, 0xA6, 0xC5, 0xA6, 0xC6, 0xC9, 0xFB, /* 0x4C-0x4F */
+	0xA6, 0xC1, 0x00, 0x00, 0xC9, 0xF9, 0x00, 0x00, /* 0x50-0x53 */
+	0xC9, 0xFD, 0xA6, 0xC2, 0x00, 0x00, 0xA6, 0xBD, /* 0x54-0x57 */
+	0x00, 0x00, 0xA6, 0xBE, 0x00, 0x00, 0xA6, 0xC4, /* 0x58-0x5B */
+	0xC9, 0xFA, 0xA6, 0xBC, 0xA8, 0x45, 0xA6, 0xBF, /* 0x5C-0x5F */
+	0xA6, 0xC0, 0xA6, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xCB, 0x5B, 0xCB, 0x59, 0xCB, 0x4C, /* 0x64-0x67 */
+	0xA8, 0x51, 0xCB, 0x53, 0xA8, 0x4C, 0xCB, 0x4D, /* 0x68-0x6B */
+	0x00, 0x00, 0xCB, 0x55, 0x00, 0x00, 0xCB, 0x52, /* 0x6C-0x6F */
+	0xA8, 0x4F, 0xCB, 0x51, 0xA8, 0x56, 0xCB, 0x5A, /* 0x70-0x73 */
+	0xA8, 0x58, 0x00, 0x00, 0xA8, 0x5A, 0x00, 0x00, /* 0x74-0x77 */
+	0xCB, 0x4B, 0x00, 0x00, 0xA8, 0x4D, 0xCB, 0x5C, /* 0x78-0x7B */
+	0x00, 0x00, 0xA8, 0x54, 0xA8, 0x57, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xCD, 0x45, 0xA8, 0x47, 0xA8, 0x5E, 0xA8, 0x55, /* 0x80-0x83 */
+	0xCB, 0x4E, 0xA8, 0x4A, 0xA8, 0x59, 0xCB, 0x56, /* 0x84-0x87 */
+	0xA8, 0x48, 0xA8, 0x49, 0xCD, 0x43, 0xCB, 0x4F, /* 0x88-0x8B */
+	0xA8, 0x50, 0xA8, 0x5B, 0xCB, 0x5D, 0xCB, 0x50, /* 0x8C-0x8F */
+	0xA8, 0x4E, 0x00, 0x00, 0xA8, 0x53, 0xCC, 0xEE, /* 0x90-0x93 */
+	0xA8, 0x5C, 0xCB, 0x57, 0xA8, 0x52, 0x00, 0x00, /* 0x94-0x97 */
+	0xA8, 0x5D, 0xA8, 0x46, 0xCB, 0x54, 0xA8, 0x4B, /* 0x98-0x9B */
+	0xCB, 0x58, 0xCD, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAA, 0x6A, /* 0xA8-0xAB */
+	0xAA, 0x7A, 0xCC, 0xF5, 0xAA, 0x71, 0x00, 0x00, /* 0xAC-0xAF */
+	0xCD, 0x4B, 0xAA, 0x62, 0x00, 0x00, 0xAA, 0x65, /* 0xB0-0xB3 */
+	0xCD, 0x42, 0x00, 0x00, 0xCC, 0xF3, 0xCC, 0xF7, /* 0xB4-0xB7 */
+	0xAA, 0x6D, 0xAA, 0x6F, 0xCC, 0xFA, 0xAA, 0x76, /* 0xB8-0xBB */
+	0xAA, 0x68, 0xAA, 0x66, 0xAA, 0x67, 0xAA, 0x75, /* 0xBC-0xBF */
+	0xCD, 0x47, 0xAA, 0x70, 0xCC, 0xF9, 0xCC, 0xFB, /* 0xC0-0xC3 */
+	0xAA, 0x6E, 0xAA, 0x73, 0xCC, 0xFC, 0xCD, 0x4A, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xAC, 0x75, 0xAA, 0x79, 0x00, 0x00, /* 0xC8-0xCB */
+	0xAA, 0x63, 0xCD, 0x49, 0x00, 0x00, 0xCD, 0x4D, /* 0xCC-0xCF */
+	0xCC, 0xF8, 0xCD, 0x4F, 0xCD, 0x40, 0xAA, 0x6C, /* 0xD0-0xD3 */
+	0xCC, 0xF4, 0xAA, 0x6B, 0xAA, 0x7D, 0xAA, 0x72, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xCC, 0xF2, 0xCF, 0x75, 0xAA, 0x78, /* 0xD8-0xDB */
+	0xAA, 0x7C, 0xCD, 0x41, 0xCD, 0x46, 0x00, 0x00, /* 0xDC-0xDF */
+	0xAA, 0x7E, 0xAA, 0x77, 0xAA, 0x69, 0xAA, 0x5F, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xAA, 0x64, 0x00, 0x00, 0xCC, 0xF6, /* 0xE4-0xE7 */
+	0xAA, 0x60, 0xCD, 0x4E, 0x00, 0x00, 0xCC, 0xF0, /* 0xE8-0xEB */
+	0xCC, 0xEF, 0xCC, 0xFD, 0xCC, 0xF1, 0xAA, 0x7B, /* 0xEC-0xEF */
+	0xAE, 0xF5, 0xAA, 0x74, 0xCC, 0xFE, 0xAA, 0x61, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xAC, 0xA6, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xCD, 0x4C, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_6D[512] = {
+	0xCF, 0x7C, 0xCF, 0xA1, 0x00, 0x00, 0xCF, 0xA4, /* 0x00-0x03 */
+	0xCF, 0x77, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xA7, /* 0x04-0x07 */
+	0xCF, 0xAA, 0xCF, 0xAC, 0xCF, 0x74, 0xAC, 0x76, /* 0x08-0x0B */
+	0xAC, 0x7B, 0xD2, 0x49, 0xAC, 0xAD, 0xCF, 0xA5, /* 0x0C-0x0F */
+	0xCF, 0xAD, 0xCF, 0x7B, 0xCF, 0x73, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0x64, 0xAC, 0x7E, /* 0x14-0x17 */
+	0xCF, 0xA2, 0xCF, 0x78, 0xCF, 0x7A, 0xAC, 0xA5, /* 0x18-0x1B */
+	0x00, 0x00, 0xCF, 0x7D, 0xAC, 0x7D, 0xCF, 0x70, /* 0x1C-0x1F */
+	0xCF, 0xA8, 0x00, 0x00, 0xCF, 0xAB, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xAC, 0x7A, 0x00, 0x00, 0xAC, 0xA8, /* 0x24-0x27 */
+	0xCF, 0x6D, 0xAC, 0xAA, 0xAC, 0x78, 0xAC, 0xAE, /* 0x28-0x2B */
+	0xCF, 0xA9, 0xCF, 0x6F, 0xAC, 0xAB, 0xD2, 0x5E, /* 0x2C-0x2F */
+	0xCD, 0x48, 0xAC, 0x7C, 0xAC, 0x77, 0xCF, 0x76, /* 0x30-0x33 */
+	0xCF, 0x6E, 0xAC, 0xAC, 0xAC, 0xA4, 0xCF, 0xA3, /* 0x34-0x37 */
+	0xAC, 0xA9, 0xAC, 0xA7, 0xCF, 0x79, 0xAC, 0xA1, /* 0x38-0x3B */
+	0xCF, 0x71, 0xAC, 0xA2, 0xAC, 0xA3, 0xCF, 0x72, /* 0x3C-0x3F */
+	0xCF, 0xA6, 0xAC, 0x79, 0xCF, 0x7E, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xD2, 0x4C, 0xAE, 0xFD, 0xAF, 0x43, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0x55, 0xD2, 0x5B, /* 0x5C-0x5F */
+	0xD2, 0x57, 0xD2, 0x4A, 0xD2, 0x4D, 0xD2, 0x46, /* 0x60-0x63 */
+	0xD2, 0x47, 0xAF, 0x4A, 0xAE, 0xFA, 0xD2, 0x56, /* 0x64-0x67 */
+	0xD2, 0x5F, 0xAF, 0x45, 0xAE, 0xF6, 0x00, 0x00, /* 0x68-0x6B */
+	0xAF, 0x40, 0xD2, 0x4E, 0xAF, 0x42, 0xD2, 0x4F, /* 0x6C-0x6F */
+	0xD2, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xAF, 0x44, 0xD2, 0x68, 0xD2, 0x48, 0xAE, 0xFC, /* 0x74-0x77 */
+	0xAE, 0xFB, 0xAF, 0x48, 0xD2, 0x45, 0xD2, 0x66, /* 0x78-0x7B */
+	0xD2, 0x5A, 0xD2, 0x67, 0xD2, 0x61, 0xD2, 0x53, /* 0x7C-0x7F */
+	
+	0xD2, 0x62, 0x00, 0x00, 0xD2, 0x5C, 0xD2, 0x65, /* 0x80-0x83 */
+	0xD2, 0x63, 0xAF, 0x49, 0xD2, 0x54, 0xAE, 0xF9, /* 0x84-0x87 */
+	0xAE, 0xF8, 0xAF, 0x41, 0xAF, 0x47, 0xD2, 0x60, /* 0x88-0x8B */
+	0xAF, 0x46, 0xD2, 0x51, 0xB2, 0x43, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD2, 0x69, 0xD2, 0x50, 0xD2, 0x4B, 0xAE, 0xFE, /* 0x90-0x93 */
+	0xAF, 0x4B, 0xAE, 0xF7, 0x00, 0x00, 0xD2, 0x58, /* 0x94-0x97 */
+	0xD2, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0x65, 0xD5, 0xE1, /* 0xA8-0xAB */
+	0xD5, 0xE5, 0x00, 0x00, 0xB2, 0x52, 0xB2, 0x50, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0x47, 0xD5, 0xE3, /* 0xB0-0xB3 */
+	0xD5, 0xE2, 0xB2, 0x5B, 0x00, 0x00, 0xD5, 0xE8, /* 0xB4-0xB7 */
+	0xB2, 0x55, 0x00, 0x00, 0xD5, 0xFA, 0xD6, 0x47, /* 0xB8-0xBB */
+	0xB2, 0x44, 0xD5, 0xF7, 0xD5, 0xF0, 0xB2, 0x67, /* 0xBC-0xBF */
+	0xD5, 0xE0, 0x00, 0x00, 0xD5, 0xFC, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xB2, 0x64, 0xB2, 0x58, 0xB2, 0x63, 0xB2, 0x4E, /* 0xC4-0xC7 */
+	0xD5, 0xEC, 0xD5, 0xFE, 0xD5, 0xF6, 0xB2, 0x4F, /* 0xC8-0xCB */
+	0xB2, 0x49, 0xD6, 0x45, 0x00, 0x00, 0xD5, 0xFD, /* 0xCC-0xCF */
+	0xD6, 0x40, 0xB2, 0x51, 0xB2, 0x59, 0xD6, 0x42, /* 0xD0-0xD3 */
+	0xD5, 0xEA, 0xD5, 0xFB, 0xD5, 0xEF, 0xD6, 0x44, /* 0xD4-0xD7 */
+	0xB2, 0x5E, 0xB2, 0x46, 0xB2, 0x5C, 0xD5, 0xF4, /* 0xD8-0xDB */
+	0xD5, 0xF2, 0xD5, 0xF3, 0xB2, 0x53, 0xD5, 0xEE, /* 0xDC-0xDF */
+	0xD5, 0xED, 0xB2, 0x48, 0xD5, 0xE7, 0xD6, 0x46, /* 0xE0-0xE3 */
+	0xB2, 0x4A, 0xD5, 0xF1, 0xB2, 0x68, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xB2, 0x62, 0xD5, 0xE6, 0xB2, 0x5F, 0xB2, 0x5D, /* 0xE8-0xEB */
+	0xB2, 0x66, 0xD5, 0xF8, 0xB2, 0x61, 0xD2, 0x52, /* 0xEC-0xEF */
+	0xD5, 0xF9, 0xB2, 0x60, 0xD6, 0x41, 0xB2, 0x45, /* 0xF0-0xF3 */
+	0xD5, 0xF5, 0xB2, 0x57, 0xD5, 0xE9, 0xB2, 0x56, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xB2, 0x54, 0xB2, 0x4C, 0xB2, 0x4B, /* 0xF8-0xFB */
+	0xD9, 0xE7, 0xD6, 0x43, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6E[512] = {
+	0xD5, 0xEB, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xFC, /* 0x00-0x03 */
+	0x00, 0x00, 0xB2, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xB5, 0x41, 0xB2, 0x5A, 0xB4, 0xEE, /* 0x18-0x1B */
+	0xD9, 0xF6, 0xB4, 0xFC, 0x00, 0x00, 0xD9, 0xEA, /* 0x1C-0x1F */
+	0xB4, 0xEB, 0xB4, 0xE7, 0xDA, 0x49, 0xB4, 0xED, /* 0x20-0x23 */
+	0xB4, 0xF1, 0xB4, 0xEC, 0xB4, 0xF5, 0xDA, 0x4D, /* 0x24-0x27 */
+	0xDA, 0x44, 0x00, 0x00, 0x00, 0x00, 0xD9, 0xF1, /* 0x28-0x2B */
+	0xB4, 0xFA, 0xB4, 0xF4, 0xD9, 0xFD, 0xB4, 0xE4, /* 0x2C-0x2F */
+	0xDA, 0x4A, 0xDA, 0x43, 0xB4, 0xE8, 0xD9, 0xF7, /* 0x30-0x33 */
+	0xB4, 0xF7, 0xDA, 0x55, 0xDA, 0x56, 0x00, 0x00, /* 0x34-0x37 */
+	0xB4, 0xE5, 0xDA, 0x48, 0xB4, 0xF9, 0xD9, 0xFB, /* 0x38-0x3B */
+	0xD9, 0xED, 0xD9, 0xEE, 0xB4, 0xFD, 0xD9, 0xF2, /* 0x3C-0x3F */
+	0xD9, 0xF9, 0xD9, 0xF3, 0x00, 0x00, 0xB4, 0xFB, /* 0x40-0x43 */
+	0xB5, 0x44, 0xD9, 0xEF, 0xD9, 0xE8, 0xD9, 0xE9, /* 0x44-0x47 */
+	0x00, 0x00, 0xD9, 0xEB, 0xB4, 0xEA, 0xD9, 0xF8, /* 0x48-0x4B */
+	0x00, 0x00, 0xB4, 0xF8, 0xB5, 0x42, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xD9, 0xFA, 0xDA, 0x53, 0xDA, 0x4B, /* 0x50-0x53 */
+	0xB4, 0xE6, 0xDA, 0x51, 0xB4, 0xF2, 0x00, 0x00, /* 0x54-0x57 */
+	0xB4, 0xF0, 0x00, 0x00, 0xDA, 0x57, 0xB4, 0xEF, /* 0x58-0x5B */
+	0xDA, 0x41, 0xD9, 0xF4, 0xD9, 0xFE, 0xB5, 0x47, /* 0x5C-0x5F */
+	0xDA, 0x45, 0xDA, 0x42, 0xD9, 0xF0, 0xB5, 0x43, /* 0x60-0x63 */
+	0xDA, 0x4F, 0xDA, 0x4C, 0xDA, 0x54, 0xB4, 0xE9, /* 0x64-0x67 */
+	0xDA, 0x40, 0xB5, 0x46, 0x00, 0x00, 0xDA, 0x47, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xB4, 0xF3, 0xB4, 0xF6, /* 0x6C-0x6F */
+	0x00, 0x00, 0xDA, 0x46, 0xB5, 0x45, 0xD9, 0xF5, /* 0x70-0x73 */
+	0xD5, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xDA, 0x50, /* 0x74-0x77 */
+	0xDA, 0x4E, 0xDA, 0x52, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xD9, 0xEC, 0xB5, 0x40, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xDE, 0x61, 0xDE, 0x60, 0xDE, 0x46, /* 0x8C-0x8F */
+	0xB7, 0xBD, 0x00, 0x00, 0xDE, 0x5F, 0xDE, 0x49, /* 0x90-0x93 */
+	0xDE, 0x4A, 0x00, 0x00, 0xB7, 0xC7, 0xDE, 0x68, /* 0x94-0x97 */
+	0xB7, 0xC2, 0xDE, 0x5E, 0x00, 0x00, 0xDE, 0x43, /* 0x98-0x9B */
+	0xB7, 0xC8, 0xB7, 0xBE, 0xDE, 0x52, 0xDE, 0x48, /* 0x9C-0x9F */
+	0xDE, 0x4B, 0xDE, 0x63, 0xB7, 0xB8, 0xDE, 0x6A, /* 0xA0-0xA3 */
+	0xDE, 0x62, 0xB7, 0xC1, 0xDE, 0x57, 0xB7, 0xCC, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xCB, 0xB7, 0xC5, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0x69, 0xB7, 0xB9, /* 0xAC-0xAF */
+	0xDE, 0x55, 0xDE, 0x4C, 0xDE, 0x59, 0xDE, 0x65, /* 0xB0-0xB3 */
+	0xB7, 0xCD, 0x00, 0x00, 0xB7, 0xBB, 0xDE, 0x54, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xDE, 0x4D, 0xB7, 0xC4, 0x00, 0x00, /* 0xB8-0xBB */
+	0xB7, 0xC3, 0xDE, 0x50, 0xDE, 0x5A, 0xDE, 0x64, /* 0xBC-0xBF */
+	0xDE, 0x47, 0xDE, 0x51, 0xB7, 0xBC, 0xDE, 0x5B, /* 0xC0-0xC3 */
+	0xB7, 0xC9, 0xB7, 0xC0, 0xDE, 0x4E, 0xB7, 0xBF, /* 0xC4-0xC7 */
+	0xDE, 0x45, 0xDE, 0x53, 0xDE, 0x67, 0xB4, 0xFE, /* 0xC8-0xCB */
+	0xBA, 0xB0, 0xDE, 0x56, 0xE2, 0x6C, 0xDE, 0x58, /* 0xCC-0xCF */
+	0xDE, 0x66, 0xB7, 0xC6, 0xDE, 0x4F, 0xB7, 0xBA, /* 0xD0-0xD3 */
+	0xB7, 0xCA, 0xBC, 0xF0, 0xDE, 0x44, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xDE, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xDE, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xAA, /* 0xE8-0xEB */
+	0xBA, 0xAD, 0xE2, 0x7D, 0xE2, 0xA4, 0xBA, 0xA2, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE2, 0x6E, 0xBA, 0xAF, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xBA, 0x77, 0xE2, 0x6D, 0xE2, 0xB0, 0xBA, 0xB1, /* 0xF4-0xF7 */
+	0xE2, 0x71, 0xE2, 0xA3, 0x00, 0x00, 0xE2, 0x73, /* 0xF8-0xFB */
+	0xE2, 0xB3, 0xE2, 0xAF, 0xBA, 0x75, 0xBA, 0xA1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_6F[512] = {
+	0xE6, 0x53, 0xBA, 0xAE, 0xBA, 0x7D, 0xE2, 0x6F, /* 0x00-0x03 */
+	0x00, 0x00, 0xE2, 0xAE, 0xBA, 0xA3, 0xE2, 0xAB, /* 0x04-0x07 */
+	0xE2, 0xB8, 0xE2, 0x75, 0xE2, 0x7E, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE2, 0xB6, 0xE2, 0xAC, 0xBA, 0x7C, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x7C, 0xBA, 0x76, /* 0x10-0x13 */
+	0xBA, 0x74, 0xBA, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xE2, 0x7A, 0xE2, 0x77, 0xE2, 0x78, 0x00, 0x00, /* 0x18-0x1B */
+	0xE2, 0xB2, 0x00, 0x00, 0xE2, 0xB7, 0xE2, 0xB5, /* 0x1C-0x1F */
+	0xBA, 0x7A, 0xE2, 0xB9, 0xBA, 0x7E, 0xBA, 0xA7, /* 0x20-0x23 */
+	0x00, 0x00, 0xE2, 0x70, 0xE5, 0xFA, 0xE2, 0x79, /* 0x24-0x27 */
+	0x00, 0x00, 0xBA, 0x78, 0xBA, 0xAC, 0xBA, 0xA9, /* 0x28-0x2B */
+	0xBA, 0x7B, 0xE2, 0xA5, 0xE2, 0x74, 0xBA, 0xAA, /* 0x2C-0x2F */
+	0xE2, 0xA7, 0xBA, 0xA4, 0xBA, 0xA6, 0xBA, 0x73, /* 0x30-0x33 */
+	0x00, 0x00, 0xE2, 0xA9, 0xE2, 0xA1, 0xE2, 0x72, /* 0x34-0x37 */
+	0xBA, 0xA5, 0xE2, 0xB1, 0xE2, 0xB4, 0xE2, 0x7B, /* 0x38-0x3B */
+	0xE2, 0xA8, 0x00, 0x00, 0xBA, 0x79, 0xBC, 0xDF, /* 0x3C-0x3F */
+	0xE2, 0xA6, 0xE5, 0xF9, 0x00, 0x00, 0xE2, 0xAD, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0x76, 0xE6, 0x44, /* 0x4C-0x4F */
+	0xE6, 0x4E, 0xBC, 0xE2, 0xE6, 0x4D, 0xE6, 0x59, /* 0x50-0x53 */
+	0xBC, 0xE4, 0xE6, 0x4B, 0x00, 0x00, 0xE6, 0x4F, /* 0x54-0x57 */
+	0xBC, 0xEF, 0x00, 0x00, 0xE6, 0x46, 0xBC, 0xE7, /* 0x58-0x5B */
+	0x00, 0x00, 0xE6, 0x52, 0xE9, 0xF0, 0xBC, 0xF3, /* 0x5C-0x5F */
+	0xBC, 0xF2, 0xE6, 0x54, 0xE6, 0x43, 0xE6, 0x5E, /* 0x60-0x63 */
+	0xBC, 0xED, 0x00, 0x00, 0xBC, 0xE3, 0xE6, 0x57, /* 0x64-0x67 */
+	0x00, 0x00, 0xE6, 0x5B, 0xE6, 0x60, 0xE6, 0x55, /* 0x68-0x6B */
+	0xE6, 0x49, 0xBC, 0xE6, 0xBC, 0xE9, 0xBC, 0xF1, /* 0x6C-0x6F */
+	0xBC, 0xEC, 0x00, 0x00, 0xE6, 0x4C, 0xE2, 0xA2, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0x48, 0xE6, 0x5F, /* 0x74-0x77 */
+	0xBC, 0xE8, 0x00, 0x00, 0xBC, 0xEB, 0xE6, 0x61, /* 0x78-0x7B */
+	0xBC, 0xE0, 0xE6, 0x56, 0xE5, 0xFB, 0xE6, 0x5C, /* 0x7C-0x7F */
+	
+	0xC0, 0xDF, 0x00, 0x00, 0xE6, 0x4A, 0x00, 0x00, /* 0x80-0x83 */
+	0xBC, 0xE1, 0xE6, 0x45, 0xBC, 0xE5, 0xE5, 0xFC, /* 0x84-0x87 */
+	0xBA, 0xAB, 0xE6, 0x41, 0x00, 0x00, 0xE6, 0x5A, /* 0x88-0x8B */
+	0xE6, 0x42, 0xE6, 0x40, 0xBC, 0xEA, 0x00, 0x00, /* 0x8C-0x8F */
+	0xE6, 0x58, 0x00, 0x00, 0xE5, 0xFE, 0xE6, 0x51, /* 0x90-0x93 */
+	0xE6, 0x50, 0xE6, 0x5D, 0xE6, 0x47, 0xBC, 0xEE, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE9, 0xF3, 0x00, 0x00, /* 0x9C-0x9F */
+	0xBF, 0x49, 0xBE, 0xFE, 0xEA, 0x40, 0xE9, 0xEB, /* 0xA0-0xA3 */
+	0xBF, 0x41, 0xE9, 0xF7, 0xBF, 0x48, 0xBF, 0x43, /* 0xA4-0xA7 */
+	0xE9, 0xF5, 0xED, 0x4F, 0xE9, 0xFB, 0xEA, 0x42, /* 0xA8-0xAB */
+	0xE9, 0xFA, 0xE9, 0xE9, 0xE9, 0xF8, 0xEA, 0x44, /* 0xAC-0xAF */
+	0xEA, 0x46, 0xBE, 0xFD, 0xEA, 0x45, 0xBF, 0x44, /* 0xB0-0xB3 */
+	0xBF, 0x4A, 0x00, 0x00, 0xBF, 0x47, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE9, 0xFE, 0xBF, 0x46, 0xE9, 0xF9, 0x00, 0x00, /* 0xB8-0xBB */
+	0xE9, 0xED, 0xE9, 0xF2, 0x00, 0x00, 0xE9, 0xFD, /* 0xBC-0xBF */
+	0xBF, 0x45, 0xBF, 0x42, 0xBE, 0xFC, 0xBF, 0x40, /* 0xC0-0xC3 */
+	0xE9, 0xF1, 0x00, 0x00, 0xE5, 0xFD, 0xE9, 0xEC, /* 0xC4-0xC7 */
+	0xE9, 0xEF, 0xEA, 0x41, 0xE9, 0xF4, 0xE9, 0xEA, /* 0xC8-0xCB */
+	0xED, 0x4E, 0xEA, 0x43, 0xE9, 0xEE, 0xE9, 0xFC, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xED, 0x51, 0xC0, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xC0, 0xD7, 0x00, 0x00, 0x00, 0x00, 0xC0, 0xDB, /* 0xD8-0xDB */
+	0xED, 0x53, 0xED, 0x59, 0xED, 0x57, 0xC0, 0xD9, /* 0xDC-0xDF */
+	0xC0, 0xDA, 0xC0, 0xE1, 0xED, 0x5A, 0xED, 0x52, /* 0xE0-0xE3 */
+	0xC0, 0xDC, 0x00, 0x00, 0xED, 0x56, 0xED, 0x55, /* 0xE4-0xE7 */
+	0xED, 0x5B, 0xC0, 0xE2, 0x00, 0x00, 0xC0, 0xDD, /* 0xE8-0xEB */
+	0xC0, 0xE0, 0xED, 0x54, 0xC0, 0xE4, 0xC0, 0xDE, /* 0xEC-0xEF */
+	0xC0, 0xE5, 0xC0, 0xD8, 0xED, 0x58, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xED, 0x50, 0x00, 0x00, 0x00, 0x00, 0xEF, 0xF7, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0x71, 0xEF, 0xF4, /* 0xF8-0xFB */
+	0xEF, 0xF6, 0x00, 0x00, 0xC2, 0x6F, 0xEF, 0xF2, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_70[512] = {
+	0xEF, 0xF3, 0xEF, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xE9, 0xF6, 0xEF, 0xEF, 0xC2, 0x70, 0xEF, 0xEB, /* 0x04-0x07 */
+	0x00, 0x00, 0xC2, 0x6D, 0xEF, 0xF8, 0xC2, 0x6E, /* 0x08-0x0B */
+	0xEF, 0xEC, 0xEF, 0xED, 0xEF, 0xF1, 0xC2, 0x73, /* 0x0C-0x0F */
+	0x00, 0x00, 0xC2, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xEF, 0xF0, 0xC3, 0x78, 0xF2, 0x5F, 0xF2, 0x65, /* 0x14-0x17 */
+	0xC3, 0x79, 0xF2, 0x5C, 0xC3, 0x76, 0xC3, 0x73, /* 0x18-0x1B */
+	0xF2, 0x67, 0xC3, 0x77, 0x00, 0x00, 0xC3, 0x74, /* 0x1C-0x1F */
+	0xF2, 0x5E, 0xF2, 0x61, 0xF2, 0x62, 0xF2, 0x63, /* 0x20-0x23 */
+	0xF2, 0x66, 0x00, 0x00, 0xEF, 0xF5, 0xF2, 0x5D, /* 0x24-0x27 */
+	0xC3, 0x75, 0xF2, 0x64, 0xF2, 0x68, 0xF2, 0x60, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x5D, /* 0x2C-0x2F */
+	0xC4, 0x6A, 0xF4, 0x60, 0xC4, 0x6B, 0xF4, 0x68, /* 0x30-0x33 */
+	0xF4, 0x5F, 0xF4, 0x5C, 0x00, 0x00, 0xF4, 0x5E, /* 0x34-0x37 */
+	0xF4, 0x62, 0xF4, 0x65, 0xF4, 0x64, 0xF4, 0x67, /* 0x38-0x3B */
+	0xF4, 0x5B, 0x00, 0x00, 0xC4, 0x69, 0xF4, 0x63, /* 0x3C-0x3F */
+	0xF4, 0x66, 0xF4, 0x69, 0xF4, 0x61, 0xF5, 0xD3, /* 0x40-0x43 */
+	0xF5, 0xD4, 0xF5, 0xD8, 0xF5, 0xD9, 0x00, 0x00, /* 0x44-0x47 */
+	0xF5, 0xD6, 0xF5, 0xD7, 0xF5, 0xD5, 0x00, 0x00, /* 0x48-0x4B */
+	0xC4, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xC5, 0x78, 0xF6, 0xEB, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xF6, 0xE8, 0xF6, 0xE9, 0xF6, 0xEA, /* 0x54-0x57 */
+	0xC5, 0x79, 0x00, 0x00, 0xF7, 0xE5, 0xF7, 0xE4, /* 0x58-0x5B */
+	0x00, 0x00, 0xF8, 0xAF, 0xC5, 0xF4, 0xF8, 0xAD, /* 0x5C-0x5F */
+	0xF8, 0xB0, 0xF8, 0xAE, 0xF8, 0xF5, 0xC6, 0x57, /* 0x60-0x63 */
+	0xC6, 0x65, 0xF9, 0xA3, 0xF9, 0x6C, 0x00, 0x00, /* 0x64-0x67 */
+	0xF9, 0xA2, 0xF9, 0xD0, 0xF9, 0xD1, 0xA4, 0xF5, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xA6, 0xC7, 0xCA, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xCB, 0x5E, 0x00, 0x00, 0xA8, 0x5F, 0x00, 0x00, /* 0x74-0x77 */
+	0xA8, 0x62, 0x00, 0x00, 0xCB, 0x5F, 0x00, 0x00, /* 0x78-0x7B */
+	0xA8, 0x60, 0xA8, 0x61, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xCD, 0x58, 0xCD, 0x5A, /* 0x80-0x83 */
+	0xCD, 0x55, 0xCD, 0x52, 0xCD, 0x54, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xAA, 0xA4, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xAA, 0xA2, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xCD, 0x56, 0xAA, 0xA3, 0xCD, 0x53, /* 0x90-0x93 */
+	0xCD, 0x50, 0xAA, 0xA1, 0xCD, 0x57, 0x00, 0x00, /* 0x94-0x97 */
+	0xCD, 0x51, 0xAA, 0xA5, 0xCD, 0x59, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xAF, /* 0x9C-0x9F */
+	0x00, 0x00, 0xCF, 0xB3, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xAC, 0xB7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xCF, 0xB6, 0x00, 0x00, 0xAC, 0xAF, /* 0xA8-0xAB */
+	0xAC, 0xB2, 0xAC, 0xB4, 0xAC, 0xB6, 0xAC, 0xB3, /* 0xAC-0xAF */
+	0xCF, 0xB2, 0xCF, 0xB1, 0x00, 0x00, 0xAC, 0xB1, /* 0xB0-0xB3 */
+	0xCF, 0xB4, 0xCF, 0xB5, 0x00, 0x00, 0xCF, 0xAE, /* 0xB4-0xB7 */
+	0xAC, 0xB5, 0x00, 0x00, 0xAC, 0xB0, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xB0, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xD2, 0x77, 0xD2, 0x78, 0xD2, 0x79, /* 0xC4-0xC7 */
+	0xAF, 0x50, 0x00, 0x00, 0xAF, 0x4C, 0xD2, 0x6E, /* 0xC8-0xCB */
+	0x00, 0x00, 0xD2, 0x76, 0xD2, 0x7B, 0xAF, 0x51, /* 0xCC-0xCF */
+	0x00, 0x00, 0xD2, 0x6C, 0xD2, 0x72, 0xD2, 0x6B, /* 0xD0-0xD3 */
+	0xD2, 0x75, 0x00, 0x00, 0x00, 0x00, 0xD2, 0x71, /* 0xD4-0xD7 */
+	0xAF, 0x4D, 0xAF, 0x4F, 0xD2, 0x7A, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD2, 0x6A, 0xD2, 0x6D, 0xD2, 0x73, 0x00, 0x00, /* 0xDC-0xDF */
+	0xD2, 0x74, 0xD2, 0x7C, 0xD2, 0x70, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xAF, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB2, 0x6D, /* 0xEC-0xEF */
+	0xD6, 0x4E, 0x00, 0x00, 0x00, 0x00, 0xD6, 0x50, /* 0xF0-0xF3 */
+	0xD6, 0x4C, 0x00, 0x00, 0xD6, 0x58, 0xD6, 0x4A, /* 0xF4-0xF7 */
+	0xD6, 0x57, 0xB2, 0x69, 0xD6, 0x48, 0xDA, 0x5B, /* 0xF8-0xFB */
+	0xD6, 0x52, 0xB2, 0x6C, 0x00, 0x00, 0xD6, 0x53, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_71[512] = {
+	0xD6, 0x56, 0x00, 0x00, 0xD6, 0x5A, 0x00, 0x00, /* 0x00-0x03 */
+	0xD6, 0x4F, 0x00, 0x00, 0xD6, 0x54, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xB2, 0x6A, 0xB2, 0x6B, 0xD6, 0x59, /* 0x08-0x0B */
+	0xD6, 0x4D, 0xD6, 0x49, 0xD6, 0x5B, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD6, 0x51, 0x00, 0x00, 0x00, 0x00, 0xD6, 0x55, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0x4B, /* 0x14-0x17 */
+	0x00, 0x00, 0xB5, 0x48, 0xB5, 0x49, 0xDA, 0x65, /* 0x18-0x1B */
+	0xB5, 0x4F, 0x00, 0x00, 0xDA, 0x59, 0xDA, 0x62, /* 0x1C-0x1F */
+	0xDA, 0x58, 0xB5, 0x4C, 0xDA, 0x60, 0xDA, 0x5E, /* 0x20-0x23 */
+	0x00, 0x00, 0xDA, 0x5F, 0xB5, 0x4A, 0x00, 0x00, /* 0x24-0x27 */
+	0xDA, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0x5C, 0xDA, 0x5A, /* 0x2C-0x2F */
+	0xB5, 0x4B, 0xDA, 0x5D, 0xDA, 0x61, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xB5, 0x4D, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0x64, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xDE, 0x70, 0xDE, 0x77, 0xDE, 0x79, /* 0x40-0x43 */
+	0xDE, 0xA1, 0x00, 0x00, 0xB7, 0xDA, 0xDE, 0x6B, /* 0x44-0x47 */
+	0x00, 0x00, 0xB7, 0xD2, 0x00, 0x00, 0xDE, 0x7A, /* 0x48-0x4B */
+	0xB7, 0xD7, 0xDE, 0xA2, 0xB7, 0xCE, 0x00, 0x00, /* 0x4C-0x4F */
+	0xDE, 0x7D, 0x00, 0x00, 0xDE, 0x6D, 0xDE, 0x7E, /* 0x50-0x53 */
+	0xDE, 0x6C, 0x00, 0x00, 0xB7, 0xDC, 0x00, 0x00, /* 0x54-0x57 */
+	0xDE, 0x78, 0xB7, 0xCF, 0xDE, 0xA3, 0x00, 0x00, /* 0x58-0x5B */
+	0xB7, 0xD4, 0xDE, 0x71, 0xB7, 0xD9, 0xDE, 0x7C, /* 0x5C-0x5F */
+	0xDE, 0x6F, 0xDE, 0x76, 0xDE, 0x72, 0xDE, 0x6E, /* 0x60-0x63 */
+	0xB7, 0xD1, 0xB7, 0xD8, 0xB7, 0xD6, 0xB7, 0xD3, /* 0x64-0x67 */
+	0xB7, 0xDB, 0xB7, 0xD0, 0xDE, 0x75, 0x00, 0x00, /* 0x68-0x6B */
+	0xB7, 0xD5, 0x00, 0x00, 0xB5, 0x4E, 0x00, 0x00, /* 0x6C-0x6F */
+	0xDE, 0x7B, 0x00, 0x00, 0xDE, 0x73, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xDE, 0x74, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xC1, /* 0x78-0x7B */
+	0x00, 0x00, 0xBA, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE2, 0xBD, 0xE2, 0xC3, 0xE2, 0xBF, 0x00, 0x00, /* 0x80-0x83 */
+	0xBA, 0xB6, 0xE2, 0xBE, 0xE2, 0xC2, 0xE2, 0xBA, /* 0x84-0x87 */
+	0x00, 0x00, 0xE2, 0xBC, 0xBA, 0xB5, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xC0, /* 0x8C-0x8F */
+	0xE2, 0xBB, 0x00, 0x00, 0xBA, 0xB7, 0x00, 0x00, /* 0x90-0x93 */
+	0xBA, 0xB2, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xC4, /* 0x94-0x97 */
+	0x00, 0x00, 0xBA, 0xB3, 0xE6, 0x67, 0xE6, 0x64, /* 0x98-0x9B */
+	0xE6, 0x70, 0xE6, 0x6A, 0xE6, 0x6C, 0xBC, 0xF4, /* 0x9C-0x9F */
+	0xE6, 0x66, 0xE6, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE6, 0x6D, 0xE6, 0x6B, 0x00, 0x00, 0xE6, 0x71, /* 0xA4-0xA7 */
+	0xBC, 0xF7, 0xE6, 0x68, 0xE6, 0x6F, 0x00, 0x00, /* 0xA8-0xAB */
+	0xBC, 0xF5, 0x00, 0x00, 0x00, 0x00, 0xE6, 0x63, /* 0xAC-0xAF */
+	0xE6, 0x65, 0xBC, 0xF6, 0xE6, 0x62, 0xE6, 0x72, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xE6, 0x69, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xEA, 0x4A, 0xBF, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0xEA, 0x55, 0xEA, 0x53, 0xBF, 0x4B, 0xEA, 0x49, /* 0xBC-0xBF */
+	0xEA, 0x4C, 0xEA, 0x4D, 0xEA, 0x48, 0xBF, 0x55, /* 0xC0-0xC3 */
+	0xBF, 0x56, 0xEA, 0x47, 0xEA, 0x56, 0xEA, 0x51, /* 0xC4-0xC7 */
+	0xBF, 0x4F, 0xBF, 0x4C, 0xEA, 0x50, 0xEA, 0x4E, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xBF, 0x52, 0xEA, 0x52, /* 0xCC-0xCF */
+	0xBF, 0x4D, 0x00, 0x00, 0xBF, 0x4E, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xEA, 0x4F, 0xBF, 0x50, 0xEA, 0x4B, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xEA, 0x54, 0xBF, 0x53, 0xEA, 0x57, 0xEA, 0x58, /* 0xD8-0xDB */
+	0xBF, 0x54, 0x00, 0x00, 0x00, 0x00, 0xC0, 0xE7, /* 0xDC-0xDF */
+	0xC0, 0xEE, 0xED, 0x5C, 0xED, 0x62, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xED, 0x60, 0xC0, 0xEA, 0xC0, 0xE9, 0xC0, 0xE6, /* 0xE4-0xE7 */
+	0xED, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xC0, 0xEC, 0xC0, 0xEB, 0xC0, 0xE8, 0x00, 0x00, /* 0xEC-0xEF */
+	0xED, 0x61, 0xED, 0x5D, 0xED, 0x5F, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xC0, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xC2, 0x77, 0xEF, 0xFB, 0x00, 0x00, 0xC2, 0x74, /* 0xF8-0xFB */
+	0xC2, 0x75, 0xEF, 0xFD, 0xC2, 0x76, 0xEF, 0xFA, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_72[512] = {
+	0x00, 0x00, 0xEF, 0xF9, 0xF2, 0x6C, 0xEF, 0xFC, /* 0x00-0x03 */
+	0x00, 0x00, 0xF2, 0x6D, 0xC3, 0x7A, 0xF2, 0x6B, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0x6A, 0x00, 0x00, /* 0x08-0x0B */
+	0xF2, 0x69, 0xC3, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xC4, 0x6C, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x6A, /* 0x10-0x13 */
+	0xF4, 0x6B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xF5, 0xDC, 0xF5, 0xDB, 0xC4, 0xEA, /* 0x18-0x1B */
+	0x00, 0x00, 0xF5, 0xDA, 0xF6, 0xEC, 0xF6, 0xED, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xF7, 0xE6, 0xF8, 0xB1, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xF6, 0xF9, 0xBC, /* 0x24-0x27 */
+	0xC6, 0x79, 0xF9, 0xC6, 0xA4, 0xF6, 0x00, 0x00, /* 0x28-0x2B */
+	0xAA, 0xA6, 0xAA, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xAC, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xC0, 0xEF, 0xA4, 0xF7, 0x00, 0x00, /* 0x34-0x37 */
+	0xAA, 0xA8, 0xAF, 0x52, 0xB7, 0xDD, 0xA4, 0xF8, /* 0x38-0x3B */
+	0x00, 0x00, 0xB2, 0x6E, 0xBA, 0xB8, 0xC9, 0x62, /* 0x3C-0x3F */
+	0x00, 0x00, 0xCF, 0xB7, 0xD2, 0x7D, 0x00, 0x00, /* 0x40-0x43 */
+	0xE2, 0xC5, 0x00, 0x00, 0xC0, 0xF0, 0xA4, 0xF9, /* 0x44-0x47 */
+	0xAA, 0xA9, 0xCF, 0xB8, 0xCF, 0xB9, 0xDA, 0x66, /* 0x48-0x4B */
+	0xB5, 0x50, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xA4, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xDE, 0xE2, 0xC6, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xBC, 0xF8, 0x00, 0x00, /* 0x54-0x57 */
+	0xC3, 0x7C, 0xA4, 0xFA, 0xDA, 0x67, 0xA4, 0xFB, /* 0x58-0x5B */
+	0x00, 0x00, 0xA6, 0xC9, 0xCA, 0x42, 0xA6, 0xC8, /* 0x5C-0x5F */
+	0xA8, 0x65, 0xA8, 0x64, 0xA8, 0x63, 0xCB, 0x60, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAA, 0xAA, /* 0x64-0x67 */
+	0x00, 0x00, 0xAA, 0xAB, 0xCD, 0x5B, 0x00, 0x00, /* 0x68-0x6B */
+	0xCF, 0xBA, 0x00, 0x00, 0xCF, 0xBD, 0xAC, 0xBA, /* 0x6C-0x6F */
+	0xCF, 0xBB, 0x00, 0x00, 0xAC, 0xB9, 0xCF, 0xBC, /* 0x70-0x73 */
+	0xAC, 0xBB, 0x00, 0x00, 0xD2, 0xA2, 0xD2, 0xA1, /* 0x74-0x77 */
+	0xD2, 0x7E, 0xAF, 0x53, 0x00, 0x00, 0xD6, 0x5D, /* 0x78-0x7B */
+	0xD6, 0x5E, 0xB2, 0x6F, 0xD6, 0x5C, 0xD6, 0x5F, /* 0x7C-0x7F */
+	
+	0xB5, 0x52, 0xB2, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xB5, 0x51, 0xDA, 0x6B, 0xDA, 0x6A, 0x00, 0x00, /* 0x84-0x87 */
+	0xDA, 0x68, 0xDA, 0x69, 0x00, 0x00, 0xDA, 0x6C, /* 0x88-0x8B */
+	0xDE, 0xA6, 0xDE, 0xA5, 0xDE, 0xA9, 0x00, 0x00, /* 0x8C-0x8F */
+	0xDE, 0xA8, 0xDE, 0xA7, 0xBA, 0xB9, 0xE2, 0xC9, /* 0x90-0x93 */
+	0x00, 0x00, 0xE2, 0xC8, 0xBA, 0xBA, 0xE2, 0xC7, /* 0x94-0x97 */
+	0xE6, 0x73, 0x00, 0x00, 0xE6, 0x74, 0xBC, 0xF9, /* 0x98-0x9B */
+	0x00, 0x00, 0xEA, 0x59, 0xEA, 0x5A, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xF2, 0x72, 0xC3, 0x7D, 0xF2, 0x71, /* 0xA0-0xA3 */
+	0xF2, 0x70, 0xF2, 0x6E, 0xF2, 0x6F, 0xC4, 0xEB, /* 0xA4-0xA7 */
+	0xF4, 0x6C, 0xF6, 0xEE, 0xF8, 0xF7, 0x00, 0x00, /* 0xA8-0xAB */
+	0xA4, 0xFC, 0x00, 0x00, 0xC9, 0xA5, 0xA5, 0xC7, /* 0xAC-0xAF */
+	0xC9, 0xA6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xCA, 0x43, 0xCA, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0x66, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xCB, 0x62, 0x00, 0x00, 0xCB, 0x61, /* 0xBC-0xBF */
+	0xAA, 0xAC, 0xCB, 0x65, 0xA8, 0x67, 0xCB, 0x63, /* 0xC0-0xC3 */
+	0xA8, 0x66, 0xCB, 0x67, 0xCB, 0x64, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xCD, 0x5F, 0xCF, 0xBE, 0xCD, 0x5D, /* 0xC8-0xCB */
+	0xCD, 0x64, 0x00, 0x00, 0xAA, 0xAD, 0x00, 0x00, /* 0xCC-0xCF */
+	0xAA, 0xB0, 0xCD, 0x65, 0xCD, 0x61, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xCD, 0x62, 0x00, 0x00, 0xCD, 0x5C, 0xAA, 0xAF, /* 0xD4-0xD7 */
+	0xCD, 0x5E, 0xAA, 0xAE, 0xCD, 0x63, 0x00, 0x00, /* 0xD8-0xDB */
+	0xCD, 0x60, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xC2, /* 0xDC-0xDF */
+	0xAC, 0xBD, 0xAC, 0xBE, 0x00, 0x00, 0xCF, 0xC5, /* 0xE0-0xE3 */
+	0xCF, 0xBF, 0x00, 0x00, 0xCF, 0xC4, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCF, 0xC0, 0xAC, 0xBC, 0xCF, 0xC3, 0xCF, 0xC1, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xA8, /* 0xF0-0xF3 */
+	0xD2, 0xA5, 0x00, 0x00, 0xD2, 0xA7, 0xAF, 0x58, /* 0xF4-0xF7 */
+	0xAF, 0x57, 0xAF, 0x55, 0xD2, 0xA4, 0xD2, 0xA9, /* 0xF8-0xFB */
+	0xAF, 0x54, 0xAF, 0x56, 0xD2, 0xA6, 0xD6, 0x67, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_73[512] = {
+	0xD2, 0xA3, 0xD2, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD6, 0x62, /* 0x04-0x07 */
+	0xD6, 0x66, 0x00, 0x00, 0xD6, 0x65, 0xDA, 0x6E, /* 0x08-0x0B */
+	0xDA, 0x79, 0x00, 0x00, 0x00, 0x00, 0xD6, 0x68, /* 0x0C-0x0F */
+	0x00, 0x00, 0xD6, 0x63, 0xDA, 0x6D, 0xB2, 0x74, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0x73, 0xD6, 0x61, /* 0x14-0x17 */
+	0xD6, 0x64, 0xB2, 0x75, 0x00, 0x00, 0xB2, 0x72, /* 0x18-0x1B */
+	0xB2, 0x71, 0xD6, 0x60, 0xD6, 0x69, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0x70, 0xDA, 0x77, /* 0x20-0x23 */
+	0x00, 0x00, 0xB5, 0x54, 0xDA, 0x76, 0xDA, 0x73, /* 0x24-0x27 */
+	0x00, 0x00, 0xB5, 0x56, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xDA, 0x75, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xDA, 0x6F, 0xDA, 0x71, 0xDA, 0x74, 0xDA, 0x72, /* 0x30-0x33 */
+	0xB5, 0x55, 0xDA, 0x78, 0xB5, 0x53, 0xB7, 0xDF, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xDE, 0xAC, /* 0x38-0x3B */
+	0xDE, 0xAA, 0x00, 0x00, 0xB7, 0xE2, 0xB7, 0xE1, /* 0x3C-0x3F */
+	0xDE, 0xAE, 0x00, 0x00, 0xDE, 0xAB, 0xE2, 0xCA, /* 0x40-0x43 */
+	0xBA, 0xBB, 0xB7, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xDE, 0xB0, 0xDE, 0xAF, 0x00, 0x00, /* 0x48-0x4B */
+	0xE2, 0xCD, 0xE2, 0xCB, 0xBC, 0xFA, 0x00, 0x00, /* 0x4C-0x4F */
+	0xBA, 0xBC, 0xE2, 0xCC, 0xE6, 0x76, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBC, 0xFB, /* 0x54-0x57 */
+	0xE6, 0x75, 0xE6, 0x7E, 0xE6, 0x7D, 0xE6, 0x7B, /* 0x58-0x5B */
+	0x00, 0x00, 0xE6, 0x7A, 0xE6, 0x77, 0xE6, 0x78, /* 0x5C-0x5F */
+	0xE6, 0x79, 0xE6, 0x7C, 0xE6, 0xA1, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0xEA, 0x5F, 0xEA, 0x5C, 0xEA, 0x5D, /* 0x64-0x67 */
+	0xBF, 0x57, 0xEA, 0x5B, 0xEA, 0x61, 0xEA, 0x60, /* 0x68-0x6B */
+	0xEA, 0x5E, 0x00, 0x00, 0xED, 0x64, 0xED, 0x65, /* 0x6C-0x6F */
+	0xC0, 0xF1, 0x00, 0x00, 0xC0, 0xF2, 0xED, 0x63, /* 0x70-0x73 */
+	0x00, 0x00, 0xC2, 0x79, 0xEF, 0xFE, 0xC2, 0x78, /* 0x74-0x77 */
+	0xC3, 0x7E, 0x00, 0x00, 0xC3, 0xA1, 0xC4, 0x6D, /* 0x78-0x7B */
+	0xF4, 0x6E, 0xF4, 0x6D, 0xF5, 0xDD, 0xF6, 0xEF, /* 0x7C-0x7F */
+	
+	0xC5, 0x7A, 0xF7, 0xE8, 0xF7, 0xE7, 0xF7, 0xE9, /* 0x80-0x83 */
+	0xA5, 0xC8, 0xCF, 0xC6, 0xAF, 0x59, 0xB2, 0x76, /* 0x84-0x87 */
+	0xD6, 0x6A, 0xA5, 0xC9, 0xC9, 0xA7, 0xA4, 0xFD, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xCA, 0x45, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0x6C, 0xCB, 0x6A, /* 0x90-0x93 */
+	0xCB, 0x6B, 0xCB, 0x68, 0xA8, 0x68, 0xCB, 0x69, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xCD, 0x6D, 0x00, 0x00, 0xAA, 0xB3, /* 0x9C-0x9F */
+	0xCD, 0x6B, 0xCD, 0x67, 0xCD, 0x6A, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xCD, 0x66, 0xAA, 0xB5, 0xCD, 0x69, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xAA, 0xB2, 0xAA, 0xB1, 0x00, 0x00, 0xAA, 0xB4, /* 0xA8-0xAB */
+	0xCD, 0x6C, 0xCD, 0x68, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xAC, 0xC2, 0xAC, 0xC5, /* 0xB0-0xB3 */
+	0xCF, 0xCE, 0xCF, 0xCD, 0xCF, 0xCC, 0xAC, 0xBF, /* 0xB4-0xB7 */
+	0xCF, 0xD5, 0xCF, 0xCB, 0x00, 0x00, 0xAC, 0xC1, /* 0xB8-0xBB */
+	0xD2, 0xAF, 0x00, 0x00, 0xCF, 0xD2, 0xCF, 0xD0, /* 0xBC-0xBF */
+	0xAC, 0xC4, 0x00, 0x00, 0xCF, 0xC8, 0xCF, 0xD3, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xCF, 0xCA, 0xCF, 0xD4, 0xCF, 0xD1, /* 0xC4-0xC7 */
+	0xCF, 0xC9, 0x00, 0x00, 0xAC, 0xC0, 0xCF, 0xD6, /* 0xC8-0xCB */
+	0xCF, 0xC7, 0xAC, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xD2, 0xB4, 0xD2, 0xAB, /* 0xD0-0xD3 */
+	0xD2, 0xB6, 0x00, 0x00, 0xD2, 0xAE, 0xD2, 0xB9, /* 0xD4-0xD7 */
+	0xD2, 0xBA, 0xD2, 0xAC, 0xD2, 0xB8, 0xD2, 0xB5, /* 0xD8-0xDB */
+	0xD2, 0xB3, 0xD2, 0xB7, 0xAF, 0x5F, 0x00, 0x00, /* 0xDC-0xDF */
+	0xAF, 0x5D, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xB1, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xD2, 0xAD, 0x00, 0x00, 0xD2, 0xB0, /* 0xE4-0xE7 */
+	0xD2, 0xBB, 0xD2, 0xB2, 0xAF, 0x5E, 0xCF, 0xCF, /* 0xE8-0xEB */
+	0x00, 0x00, 0xAF, 0x5A, 0xAF, 0x5C, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xD6, 0x78, 0xD6, 0x6D, 0xD6, 0x6B, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xD6, 0x6C, 0x00, 0x00, 0xD6, 0x73, 0x00, 0x00, /* 0xF8-0xFB */
+	0xD6, 0x74, 0xD6, 0x70, 0xB2, 0x7B, 0xD6, 0x75, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_74[512] = {
+	0xD6, 0x72, 0xD6, 0x6F, 0x00, 0x00, 0xB2, 0x79, /* 0x00-0x03 */
+	0xD6, 0x6E, 0xB2, 0x77, 0xB2, 0x7A, 0xD6, 0x71, /* 0x04-0x07 */
+	0xD6, 0x79, 0xAF, 0x5B, 0xB2, 0x78, 0xD6, 0x77, /* 0x08-0x0B */
+	0xD6, 0x76, 0xB2, 0x7C, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0x7E, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xA1, 0xB5, 0x60, /* 0x18-0x1B */
+	0x00, 0x00, 0xDA, 0xA7, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xDA, 0xA9, 0xDA, 0xA2, 0xB5, 0x5A, 0xDA, 0xA6, /* 0x20-0x23 */
+	0xDA, 0xA5, 0xB5, 0x5B, 0xB5, 0x61, 0x00, 0x00, /* 0x24-0x27 */
+	0xB5, 0x62, 0xDA, 0xA8, 0xB5, 0x58, 0xDA, 0x7D, /* 0x28-0x2B */
+	0xDA, 0x7B, 0xDA, 0xA3, 0xDA, 0x7A, 0xB5, 0x5F, /* 0x2C-0x2F */
+	0xDA, 0x7C, 0xDA, 0xA4, 0xDA, 0xAA, 0xB5, 0x59, /* 0x30-0x33 */
+	0xB5, 0x5E, 0xB5, 0x5C, 0xB5, 0x5D, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xB5, 0x57, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB7, 0xE9, /* 0x3C-0x3F */
+	0xDE, 0xB7, 0xB7, 0xE8, 0xDE, 0xBB, 0x00, 0x00, /* 0x40-0x43 */
+	0xDE, 0xB1, 0x00, 0x00, 0xDE, 0xBC, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xB2, 0xDE, 0xB3, /* 0x48-0x4B */
+	0x00, 0x00, 0xDE, 0xBD, 0xDE, 0xBA, 0xDE, 0xB8, /* 0x4C-0x4F */
+	0xDE, 0xB9, 0xDE, 0xB5, 0xDE, 0xB4, 0x00, 0x00, /* 0x50-0x53 */
+	0xDE, 0xBE, 0xB7, 0xE5, 0x00, 0x00, 0xDE, 0xB6, /* 0x54-0x57 */
+	0x00, 0x00, 0xB7, 0xEA, 0xB7, 0xE4, 0xB7, 0xEB, /* 0x58-0x5B */
+	0xB7, 0xEC, 0x00, 0x00, 0xB7, 0xE7, 0xB7, 0xE6, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xCE, 0xBA, 0xBE, /* 0x60-0x63 */
+	0xBA, 0xBD, 0x00, 0x00, 0x00, 0x00, 0xE2, 0xD3, /* 0x64-0x67 */
+	0x00, 0x00, 0xBC, 0xFC, 0xBA, 0xBF, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xBA, 0xC1, 0xE2, 0xD4, 0xB7, 0xE3, /* 0x6C-0x6F */
+	0xBA, 0xC0, 0xE2, 0xD0, 0xE2, 0xD2, 0xE2, 0xCF, /* 0x70-0x73 */
+	0x00, 0x00, 0xE2, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xE6, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE6, 0xAA, 0xE6, 0xA7, 0xBD, 0x40, 0xEA, 0x62, /* 0x7C-0x7F */
+	
+	0xBD, 0x41, 0xE6, 0xA6, 0x00, 0x00, 0xBC, 0xFE, /* 0x80-0x83 */
+	0x00, 0x00, 0xE6, 0xA8, 0xE6, 0xA5, 0xE6, 0xA2, /* 0x84-0x87 */
+	0xE6, 0xA9, 0xE6, 0xA3, 0xE6, 0xA4, 0xBC, 0xFD, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xED, 0x69, 0x00, 0x00, 0xEA, 0x66, 0x00, 0x00, /* 0x90-0x93 */
+	0xEA, 0x65, 0xEA, 0x67, 0x00, 0x00, 0xED, 0x66, /* 0x94-0x97 */
+	0xBF, 0x5A, 0x00, 0x00, 0xEA, 0x63, 0x00, 0x00, /* 0x98-0x9B */
+	0xBF, 0x58, 0x00, 0x00, 0xBF, 0x5C, 0xBF, 0x5B, /* 0x9C-0x9F */
+	0xEA, 0x64, 0xEA, 0x68, 0x00, 0x00, 0xBF, 0x59, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xED, 0x6D, 0xC0, 0xF5, 0xC2, 0x7A, /* 0xA4-0xA7 */
+	0xC0, 0xF6, 0xC0, 0xF3, 0xED, 0x6A, 0xED, 0x68, /* 0xA8-0xAB */
+	0x00, 0x00, 0xED, 0x6B, 0x00, 0x00, 0xED, 0x6E, /* 0xAC-0xAF */
+	0xC0, 0xF4, 0xED, 0x6C, 0xED, 0x67, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF0, 0x42, 0xF0, 0x45, 0xF2, 0x75, /* 0xB4-0xB7 */
+	0xF0, 0x40, 0x00, 0x00, 0xF4, 0x6F, 0xF0, 0x46, /* 0xB8-0xBB */
+	0x00, 0x00, 0xC3, 0xA2, 0xF0, 0x44, 0xC2, 0x7B, /* 0xBC-0xBF */
+	0xF0, 0x41, 0xF0, 0x43, 0xF0, 0x47, 0xF2, 0x76, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xF2, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xC3, 0xA3, 0xF2, 0x73, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC4, 0x6E, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xC4, 0xED, 0xF6, 0xF1, 0xC4, 0xEC, 0xF6, 0xF3, /* 0xD4-0xD7 */
+	0xF6, 0xF0, 0xF6, 0xF2, 0xC5, 0xD0, 0xF8, 0xB2, /* 0xD8-0xDB */
+	0xA5, 0xCA, 0xCD, 0x6E, 0xD2, 0xBC, 0xD2, 0xBD, /* 0xDC-0xDF */
+	0xB2, 0x7D, 0xDE, 0xBF, 0xBF, 0x5D, 0xC3, 0xA4, /* 0xE0-0xE3 */
+	0xC5, 0x7B, 0xF8, 0xB3, 0xA5, 0xCB, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xCD, 0x6F, 0xA2, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xCF, 0xD7, 0x00, 0x00, 0xCF, 0xD8, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xD2, 0xBE, 0xD2, 0xBF, 0xB2, 0x7E, 0xB2, 0xA1, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xAB, /* 0xF8-0xFB */
+	0x00, 0x00, 0xDE, 0xC2, 0xDE, 0xC1, 0xDE, 0xC0, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_75[512] = {
+	0xE2, 0xD5, 0x00, 0x00, 0xE2, 0xD6, 0xE2, 0xD7, /* 0x00-0x03 */
+	0xBA, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xAD, /* 0x04-0x07 */
+	0xE6, 0xAC, 0x00, 0x00, 0x00, 0x00, 0xEA, 0x69, /* 0x08-0x0B */
+	0xBF, 0x5E, 0xBF, 0x5F, 0x00, 0x00, 0xED, 0x72, /* 0x0C-0x0F */
+	0xED, 0x6F, 0xED, 0x70, 0xED, 0x71, 0xF0, 0x49, /* 0x10-0x13 */
+	0xF0, 0x48, 0xC2, 0x7C, 0xF2, 0x77, 0xF5, 0xDE, /* 0x14-0x17 */
+	0xA5, 0xCC, 0x00, 0x00, 0xAC, 0xC6, 0x00, 0x00, /* 0x18-0x1B */
+	0xB2, 0xA2, 0xDE, 0xC3, 0x00, 0x00, 0xA5, 0xCD, /* 0x1C-0x1F */
+	0x00, 0x00, 0xD2, 0xC0, 0xB2, 0xA3, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xB5, 0x63, 0xB5, 0x64, 0x00, 0x00, /* 0x24-0x27 */
+	0xA5, 0xCE, 0xA5, 0xCF, 0xCA, 0x46, 0xA8, 0x6A, /* 0x28-0x2B */
+	0xA8, 0x69, 0xAC, 0xC7, 0xCF, 0xD9, 0xDA, 0xAC, /* 0x2C-0x2F */
+	0xA5, 0xD0, 0xA5, 0xD1, 0xA5, 0xD2, 0xA5, 0xD3, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x6B, /* 0x34-0x37 */
+	0xA8, 0x6C, 0xCB, 0x6E, 0xCB, 0x6D, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xAA, 0xB6, 0xCD, 0x72, 0xCD, 0x70, /* 0x3C-0x3F */
+	0xCD, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCF, 0xDA, /* 0x44-0x47 */
+	0xCF, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xAC, 0xCB, /* 0x48-0x4B */
+	0xAC, 0xC9, 0x00, 0x00, 0xAC, 0xCA, 0xAC, 0xC8, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xAF, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xAF, 0x64, 0xAF, 0x63, 0xD2, 0xC1, /* 0x58-0x5B */
+	0xAF, 0x62, 0xAF, 0x61, 0x00, 0x00, 0xD2, 0xC2, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xA6, 0xD6, 0x7B, /* 0x60-0x63 */
+	0xD6, 0x7A, 0xB2, 0xA4, 0xB2, 0xA5, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xB5, 0x66, 0xB5, 0x65, /* 0x68-0x6B */
+	0xDA, 0xAE, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xAD, /* 0x6C-0x6F */
+	0xB2, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xB7, 0xED, 0xDE, 0xC5, /* 0x74-0x77 */
+	0xB7, 0xEE, 0xDE, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xE2, 0xD8, 0xE6, 0xAE, 0xBD, 0x42, /* 0x7C-0x7F */
+	
+	0xEA, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xED, 0x73, 0x00, 0x00, 0xC3, 0xA6, 0xC3, 0xA5, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xC5, 0x7C, 0xA5, 0xD4, /* 0x88-0x8B */
+	0xCD, 0x73, 0x00, 0x00, 0x00, 0x00, 0xB2, 0xA8, /* 0x8C-0x8F */
+	0xE2, 0xD9, 0xBA, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xCB, 0x6F, 0xCB, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xCD, 0x74, 0xAA, 0xB8, 0xAA, 0xB9, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xAA, 0xB7, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xAC, 0xCF, 0xAC, 0xD0, /* 0xA0-0xA3 */
+	0xAC, 0xCD, 0xAC, 0xCE, 0x00, 0x00, 0xCF, 0xDC, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xCF, 0xDD, 0xAC, 0xCC, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xD2, 0xC3, 0x00, 0x00, 0xAF, 0x68, 0xAF, 0x69, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xB2, 0xAB, 0xD2, 0xC9, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xAF, 0x6E, 0xAF, 0x6C, 0xD2, 0xCA, 0xD2, 0xC5, /* 0xB8-0xBB */
+	0xAF, 0x6B, 0xAF, 0x6A, 0xAF, 0x65, 0xD2, 0xC8, /* 0xBC-0xBF */
+	0xD2, 0xC7, 0xD2, 0xC4, 0xAF, 0x6D, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xD2, 0xC6, 0xAF, 0x66, 0x00, 0x00, 0xAF, 0x67, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xAC, 0xD6, 0xA1, /* 0xC8-0xCB */
+	0xD6, 0xA2, 0xB2, 0xAD, 0xD6, 0x7C, 0xD6, 0x7E, /* 0xCC-0xCF */
+	0xD6, 0xA4, 0xD6, 0xA3, 0xD6, 0x7D, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xB2, 0xA9, 0xB2, 0xAA, 0x00, 0x00, 0xDA, 0xB6, /* 0xD4-0xD7 */
+	0xB5, 0x6B, 0xB5, 0x6A, 0xDA, 0xB0, 0xB5, 0x68, /* 0xD8-0xDB */
+	0x00, 0x00, 0xDA, 0xB3, 0xB5, 0x6C, 0xDA, 0xB4, /* 0xDC-0xDF */
+	0xB5, 0x6D, 0xDA, 0xB1, 0xB5, 0x67, 0xB5, 0x69, /* 0xE0-0xE3 */
+	0xDA, 0xB5, 0x00, 0x00, 0xDA, 0xB2, 0xDA, 0xAF, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xDE, 0xD2, 0x00, 0x00, 0xDE, 0xC7, /* 0xEC-0xEF */
+	0xB7, 0xF0, 0xB7, 0xF3, 0xB7, 0xF2, 0xB7, 0xF7, /* 0xF0-0xF3 */
+	0xB7, 0xF6, 0xDE, 0xD3, 0xDE, 0xD1, 0xDE, 0xCA, /* 0xF4-0xF7 */
+	0xDE, 0xCE, 0xDE, 0xCD, 0xB7, 0xF4, 0xDE, 0xD0, /* 0xF8-0xFB */
+	0xDE, 0xCC, 0xDE, 0xD4, 0xDE, 0xCB, 0xB7, 0xF5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_76[512] = {
+	0xB7, 0xEF, 0xB7, 0xF1, 0x00, 0x00, 0xDE, 0xC9, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xE2, 0xDB, 0xBA, 0xC7, 0xE2, 0xDF, 0xBA, 0xC6, /* 0x08-0x0B */
+	0xE2, 0xDC, 0xBA, 0xC5, 0x00, 0x00, 0xDE, 0xC8, /* 0x0C-0x0F */
+	0xDE, 0xCF, 0xE2, 0xDE, 0x00, 0x00, 0xBA, 0xC8, /* 0x10-0x13 */
+	0xE2, 0xE0, 0xE2, 0xDD, 0xE2, 0xDA, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xE6, 0xB1, 0xE6, 0xB5, 0xE6, 0xB7, /* 0x18-0x1B */
+	0xE6, 0xB3, 0xE6, 0xB2, 0xE6, 0xB0, 0xBD, 0x45, /* 0x1C-0x1F */
+	0xBD, 0x43, 0xBD, 0x48, 0xBD, 0x49, 0xE6, 0xB4, /* 0x20-0x23 */
+	0xBD, 0x46, 0xE6, 0xAF, 0xBD, 0x47, 0xBA, 0xC4, /* 0x24-0x27 */
+	0xE6, 0xB6, 0xBD, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xEA, 0x6C, 0x00, 0x00, 0xEA, 0x6B, /* 0x2C-0x2F */
+	0xEA, 0x73, 0xEA, 0x6D, 0xEA, 0x72, 0xEA, 0x6F, /* 0x30-0x33 */
+	0xBF, 0x60, 0xEA, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xBF, 0x61, 0x00, 0x00, 0xBF, 0x62, 0x00, 0x00, /* 0x38-0x3B */
+	0xEA, 0x70, 0xEA, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0xF8, 0xED, 0x74, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0xF7, 0xED, 0x77, /* 0x44-0x47 */
+	0xED, 0x75, 0xED, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xC0, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF0, 0x4D, 0x00, 0x00, 0xC2, 0xA1, 0xF0, 0x4E, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0x7D, 0xF0, 0x4F, /* 0x54-0x57 */
+	0xC2, 0x7E, 0xF0, 0x4C, 0xF0, 0x50, 0x00, 0x00, /* 0x58-0x5B */
+	0xF0, 0x4A, 0x00, 0x00, 0x00, 0x00, 0xC3, 0xA7, /* 0x5C-0x5F */
+	0xF2, 0x78, 0xC3, 0xA8, 0xC4, 0x6F, 0x00, 0x00, /* 0x60-0x63 */
+	0xF0, 0x4B, 0xC4, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xC4, 0xEE, 0xF5, 0xDF, 0x00, 0x00, /* 0x68-0x6B */
+	0xC5, 0x7E, 0xF6, 0xF4, 0xC5, 0x7D, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF7, 0xEA, 0xC5, 0xF5, 0xC5, 0xF6, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xF9, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xAC, 0xD1, 0xCF, 0xDE, 0x00, 0x00, 0xB5, 0x6E, /* 0x78-0x7B */
+	0xB5, 0x6F, 0xA5, 0xD5, 0xA6, 0xCA, 0xCA, 0x47, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xCB, 0x71, 0xA8, 0x6D, 0x00, 0x00, /* 0x80-0x83 */
+	0xAA, 0xBA, 0x00, 0x00, 0xAC, 0xD2, 0xAC, 0xD3, /* 0x84-0x87 */
+	0xAC, 0xD4, 0xD6, 0xA6, 0xD2, 0xCB, 0xAF, 0x6F, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xAE, 0xD6, 0xA5, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xB8, 0xB5, 0x71, /* 0x90-0x93 */
+	0x00, 0x00, 0xDA, 0xB7, 0xB5, 0x70, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xDE, 0xD5, 0xBD, 0x4A, 0xE6, 0xBB, /* 0x98-0x9B */
+	0xE6, 0xB8, 0xE6, 0xB9, 0xE6, 0xBA, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xED, 0x78, 0x00, 0x00, 0xF0, 0x51, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0x71, 0xF4, 0x70, /* 0xA8-0xAB */
+	0x00, 0x00, 0xF6, 0xF5, 0xA5, 0xD6, 0xCD, 0x75, /* 0xAC-0xAF */
+	0xAF, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xB5, 0x72, 0xDE, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE2, 0xE1, 0x00, 0x00, 0xBD, 0x4B, 0xEA, 0x74, /* 0xB8-0xBB */
+	0x00, 0x00, 0xF0, 0x52, 0xF4, 0x72, 0xA5, 0xD7, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xAC, 0xD7, /* 0xC0-0xC3 */
+	0xCF, 0xDF, 0xAC, 0xD8, 0xAC, 0xD6, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xAC, 0xD5, 0xD2, 0xCC, 0xAF, 0x71, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xAF, 0x72, 0xAF, 0x73, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xB0, 0xD6, 0xA7, /* 0xD0-0xD3 */
+	0xB2, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xB9, 0xB2, 0xB1, /* 0xD8-0xDB */
+	0xB5, 0x73, 0xDE, 0xD7, 0xB7, 0xF8, 0xB7, 0xF9, /* 0xDC-0xDF */
+	0x00, 0x00, 0xBA, 0xC9, 0x00, 0x00, 0xBA, 0xCA, /* 0xE0-0xE3 */
+	0xBD, 0x4C, 0xBF, 0x64, 0xEA, 0x75, 0xBF, 0x63, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xED, 0x79, 0xC0, 0xFA, 0x00, 0x00, /* 0xE8-0xEB */
+	0xF0, 0x53, 0xF4, 0x73, 0xA5, 0xD8, 0xA8, 0x6E, /* 0xEC-0xEF */
+	0xCD, 0x78, 0xCD, 0x77, 0xAA, 0xBC, 0xCD, 0x76, /* 0xF0-0xF3 */
+	0xAA, 0xBD, 0xCD, 0x79, 0x00, 0x00, 0xCF, 0xE5, /* 0xF4-0xF7 */
+	0xAC, 0xDB, 0xAC, 0xDA, 0xCF, 0xE7, 0xCF, 0xE6, /* 0xF8-0xFB */
+	0xAC, 0xDF, 0x00, 0x00, 0xAC, 0xDE, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_77[512] = {
+	0x00, 0x00, 0xAC, 0xD9, 0x00, 0x00, 0xCF, 0xE1, /* 0x00-0x03 */
+	0xCF, 0xE2, 0xCF, 0xE3, 0x00, 0x00, 0xAC, 0xE0, /* 0x04-0x07 */
+	0xCF, 0xE0, 0xAC, 0xDC, 0xCF, 0xE4, 0xAC, 0xDD, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xD2, 0xCF, 0xD2, 0xD3, 0xD2, 0xD1, 0xD2, 0xD0, /* 0x10-0x13 */
+	0x00, 0x00, 0xD2, 0xD4, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xD2, 0xD5, 0xD2, 0xD6, 0xD2, 0xCE, /* 0x18-0x1B */
+	0x00, 0x00, 0xD2, 0xCD, 0x00, 0x00, 0xAF, 0x75, /* 0x1C-0x1F */
+	0xAF, 0x76, 0x00, 0x00, 0xD2, 0xD7, 0xD2, 0xD2, /* 0x20-0x23 */
+	0x00, 0x00, 0xD6, 0xB0, 0x00, 0x00, 0xD2, 0xD8, /* 0x24-0x27 */
+	0xAF, 0x77, 0xAF, 0x74, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xD6, 0xAA, 0x00, 0x00, 0xD6, 0xA9, /* 0x2C-0x2F */
+	0x00, 0x00, 0xD6, 0xAB, 0xD6, 0xAC, 0xD6, 0xAE, /* 0x30-0x33 */
+	0xD6, 0xAD, 0xD6, 0xB2, 0xB2, 0xB5, 0xB2, 0xB2, /* 0x34-0x37 */
+	0xB2, 0xB6, 0xD6, 0xA8, 0xB2, 0xB7, 0xD6, 0xB1, /* 0x38-0x3B */
+	0xB2, 0xB4, 0xD6, 0xAF, 0xB2, 0xB3, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xDA, 0xBC, 0xDA, 0xBE, 0xDA, 0xBA, 0xDA, 0xBB, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xBF, 0xDA, 0xC1, /* 0x48-0x4B */
+	0xDA, 0xC2, 0xDA, 0xBD, 0xDA, 0xC0, 0xB5, 0x74, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xDE, 0xDB, 0x00, 0x00, /* 0x50-0x53 */
+	0xDE, 0xE0, 0xDE, 0xD8, 0xDE, 0xDC, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xDE, 0xE1, 0xDE, 0xDD, 0xB7, 0xFA, /* 0x58-0x5B */
+	0xB8, 0x43, 0x00, 0x00, 0xB7, 0xFD, 0xDE, 0xD9, /* 0x5C-0x5F */
+	0xDE, 0xDA, 0xBA, 0xCE, 0xB8, 0x46, 0xB7, 0xFE, /* 0x60-0x63 */
+	0x00, 0x00, 0xB8, 0x44, 0xB7, 0xFC, 0xDE, 0xDF, /* 0x64-0x67 */
+	0xB8, 0x45, 0xDE, 0xDE, 0xB8, 0x41, 0xB7, 0xFB, /* 0x68-0x6B */
+	0xB8, 0x42, 0xDE, 0xE2, 0xE2, 0xE6, 0xE2, 0xE8, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xB8, 0x40, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xE2, 0xE3, 0xBA, 0xCC, 0xE2, 0xE9, 0xBA, 0xCD, /* 0x7C-0x7F */
+	
+	0xE2, 0xE7, 0xE2, 0xE2, 0xE2, 0xE5, 0xE2, 0xEA, /* 0x80-0x83 */
+	0xBA, 0xCB, 0xE2, 0xE4, 0x00, 0x00, 0xBD, 0x4E, /* 0x84-0x87 */
+	0xE6, 0xBF, 0xE6, 0xBE, 0x00, 0x00, 0xBD, 0x51, /* 0x88-0x8B */
+	0xBD, 0x4F, 0xE6, 0xBC, 0xBD, 0x4D, 0xE6, 0xBD, /* 0x8C-0x8F */
+	0x00, 0x00, 0xBD, 0x50, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xEA, 0x7D, 0x00, 0x00, 0xEA, 0xA1, /* 0x94-0x97 */
+	0x00, 0x00, 0xEA, 0x7E, 0xEA, 0x76, 0xEA, 0x7A, /* 0x98-0x9B */
+	0xEA, 0x79, 0xEA, 0x77, 0xBF, 0x66, 0xBF, 0x67, /* 0x9C-0x9F */
+	0xBF, 0x65, 0xEA, 0x78, 0xEA, 0x7B, 0xEA, 0x7C, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xBF, 0x68, 0x00, 0x00, 0xC1, 0x40, /* 0xA4-0xA7 */
+	0xED, 0xA3, 0x00, 0x00, 0xC0, 0xFC, 0xED, 0x7B, /* 0xA8-0xAB */
+	0xC0, 0xFE, 0xC1, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xC0, 0xFD, 0xED, 0xA2, 0xED, 0x7C, 0xC0, 0xFB, /* 0xB0-0xB3 */
+	0xED, 0xA1, 0xED, 0x7A, 0xED, 0x7E, 0xED, 0x7D, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0x55, 0xC2, 0xA4, /* 0xB8-0xBB */
+	0xC2, 0xA5, 0xC2, 0xA2, 0x00, 0x00, 0xC2, 0xA3, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xF0, 0x54, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xF2, 0x7B, 0x00, 0x00, 0x00, 0x00, 0xC3, 0xA9, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF2, 0x79, 0xF2, 0x7A, 0x00, 0x00, /* 0xC8-0xCB */
+	0xF4, 0x74, 0xF4, 0x77, 0xF4, 0x75, 0xF4, 0x76, /* 0xCC-0xCF */
+	0xF5, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xC4, 0xEF, /* 0xD0-0xD3 */
+	0xF7, 0xEB, 0xF8, 0xB4, 0x00, 0x00, 0xC5, 0xF7, /* 0xD4-0xD7 */
+	0xF8, 0xF8, 0xF8, 0xF9, 0xC6, 0x66, 0xA5, 0xD9, /* 0xD8-0xDB */
+	0xAC, 0xE1, 0x00, 0x00, 0xDA, 0xC3, 0x00, 0x00, /* 0xDC-0xDF */
+	0xDE, 0xE3, 0x00, 0x00, 0xA5, 0xDA, 0xA8, 0x6F, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xAA, 0xBE, 0x00, 0x00, 0xCF, 0xE8, /* 0xE4-0xE7 */
+	0xCF, 0xE9, 0xAF, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xDA, 0xC4, 0xB5, 0x75, 0xB8, 0x47, 0xC1, 0x42, /* 0xEC-0xEF */
+	0xED, 0xA4, 0xF2, 0x7C, 0xF4, 0x78, 0xA5, 0xDB, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xA1, /* 0xF4-0xF7 */
+	0xCD, 0x7A, 0xCD, 0x7C, 0xCD, 0x7E, 0xCD, 0x7D, /* 0xF8-0xFB */
+	0xCD, 0x7B, 0xAA, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_78[512] = {
+	0x00, 0x00, 0x00, 0x00, 0xAC, 0xE2, 0xCF, 0xF2, /* 0x00-0x03 */
+	0x00, 0x00, 0xCF, 0xED, 0xCF, 0xEA, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xCF, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xAC, 0xE4, 0xAC, 0xE5, 0xCF, 0xF0, 0xCF, 0xEF, /* 0x0C-0x0F */
+	0xCF, 0xEE, 0xCF, 0xEB, 0xCF, 0xEC, 0xCF, 0xF3, /* 0x10-0x13 */
+	0xAC, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0xAF, 0x7C, 0x00, 0x00, 0xAF, 0xA4, /* 0x1C-0x1F */
+	0xAF, 0xA3, 0xD2, 0xE1, 0xD2, 0xDB, 0xD2, 0xD9, /* 0x20-0x23 */
+	0x00, 0x00, 0xAF, 0xA1, 0xD6, 0xB9, 0xAF, 0x7A, /* 0x24-0x27 */
+	0xD2, 0xDE, 0xD2, 0xE2, 0xD2, 0xE4, 0xD2, 0xE0, /* 0x28-0x2B */
+	0xD2, 0xDA, 0xAF, 0xA2, 0xD2, 0xDF, 0xD2, 0xDD, /* 0x2C-0x2F */
+	0xAF, 0x79, 0xD2, 0xE5, 0xAF, 0xA5, 0xD2, 0xE3, /* 0x30-0x33 */
+	0xAF, 0x7D, 0xD2, 0xDC, 0x00, 0x00, 0xAF, 0x7E, /* 0x34-0x37 */
+	0xAF, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB2, 0xB9, /* 0x40-0x43 */
+	0x00, 0x00, 0xD6, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xD6, 0xB3, 0xD6, 0xB5, 0xD6, 0xB7, 0x00, 0x00, /* 0x48-0x4B */
+	0xD6, 0xB8, 0xD6, 0xB6, 0xB2, 0xBA, 0x00, 0x00, /* 0x4C-0x4F */
+	0xD6, 0xBB, 0x00, 0x00, 0xD6, 0xB4, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0xDA, 0xC8, 0xB5, 0x76, 0xDA, 0xD0, 0x00, 0x00, /* 0x5C-0x5F */
+	0xDA, 0xC5, 0x00, 0x00, 0xDA, 0xD1, 0x00, 0x00, /* 0x60-0x63 */
+	0xDA, 0xC6, 0xDA, 0xC7, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xDA, 0xCF, 0xDA, 0xCE, 0xDA, 0xCB, 0xB2, 0xB8, /* 0x68-0x6B */
+	0xB5, 0x77, 0xDA, 0xC9, 0xDA, 0xCC, 0xB5, 0x78, /* 0x6C-0x6F */
+	0xDA, 0xCD, 0xDA, 0xCA, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xDE, 0xEE, 0x00, 0x00, 0xDE, 0xF2, /* 0x78-0x7B */
+	0xB8, 0x4E, 0x00, 0x00, 0xE2, 0xF0, 0xB8, 0x51, /* 0x7C-0x7F */
+	
+	0xDE, 0xF0, 0xF9, 0xD6, 0x00, 0x00, 0xDE, 0xED, /* 0x80-0x83 */
+	0xDE, 0xE8, 0xDE, 0xEA, 0xDE, 0xEB, 0xDE, 0xE4, /* 0x84-0x87 */
+	0x00, 0x00, 0xB8, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xB8, 0x4C, 0x00, 0x00, 0xB8, 0x48, 0xDE, 0xE7, /* 0x8C-0x8F */
+	0x00, 0x00, 0xB8, 0x4F, 0x00, 0x00, 0xB8, 0x50, /* 0x90-0x93 */
+	0xDE, 0xE6, 0xDE, 0xE9, 0xDE, 0xF1, 0xB8, 0x4A, /* 0x94-0x97 */
+	0xB8, 0x4B, 0xDE, 0xEF, 0xDE, 0xE5, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xE2, 0xF2, 0xBA, 0xD0, /* 0x9C-0x9F */
+	0xE2, 0xF4, 0xDE, 0xEC, 0xE2, 0xF6, 0xBA, 0xD4, /* 0xA0-0xA3 */
+	0xE2, 0xF7, 0xE2, 0xF3, 0x00, 0x00, 0xBA, 0xD1, /* 0xA4-0xA7 */
+	0xE2, 0xEF, 0xBA, 0xD3, 0xE2, 0xEC, 0xE2, 0xF1, /* 0xA8-0xAB */
+	0xE2, 0xF5, 0xE2, 0xEE, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xB8, 0x49, 0x00, 0x00, 0xE2, 0xEB, 0xBA, 0xD2, /* 0xB0-0xB3 */
+	0xE2, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xBD, 0x54, 0xE6, 0xC1, /* 0xB8-0xBB */
+	0xBD, 0x58, 0x00, 0x00, 0xBD, 0x56, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xBA, 0xCF, 0x00, 0x00, 0xE6, 0xC8, /* 0xC0-0xC3 */
+	0xE6, 0xC9, 0xBD, 0x53, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE6, 0xC7, 0xE6, 0xCA, 0xBD, 0x55, 0xBD, 0x52, /* 0xC8-0xCB */
+	0xE6, 0xC3, 0xE6, 0xC0, 0xE6, 0xC5, 0xE6, 0xC2, /* 0xCC-0xCF */
+	0xBD, 0x59, 0xE6, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xE6, 0xC6, 0xBD, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xBF, 0x6A, 0xEA, 0xA8, /* 0xD8-0xDB */
+	0x00, 0x00, 0xEA, 0xA2, 0xEA, 0xA6, 0xEA, 0xAC, /* 0xDC-0xDF */
+	0xEA, 0xAD, 0xEA, 0xA9, 0xEA, 0xAA, 0xEA, 0xA7, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xEA, 0xA4, 0x00, 0x00, 0xBF, 0x6C, /* 0xE4-0xE7 */
+	0xBF, 0x69, 0xEA, 0xA3, 0xEA, 0xA5, 0x00, 0x00, /* 0xE8-0xEB */
+	0xBF, 0x6B, 0xEA, 0xAB, 0x00, 0x00, 0xC1, 0x46, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xAA, 0xED, 0xA5, /* 0xF0-0xF3 */
+	0xC1, 0x45, 0x00, 0x00, 0x00, 0x00, 0xC1, 0x43, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xED, 0xAC, 0xC1, 0x44, 0xED, 0xA8, /* 0xF8-0xFB */
+	0xED, 0xA9, 0xED, 0xA6, 0xED, 0xAD, 0xF0, 0x56, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_79[512] = {
+	0x00, 0x00, 0xC1, 0x47, 0xED, 0xA7, 0x00, 0x00, /* 0x00-0x03 */
+	0xED, 0xAE, 0xED, 0xAB, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0xF0, 0x5A, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xF0, 0x57, 0x00, 0x00, 0xC2, 0xA6, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF0, 0x5B, 0xF0, 0x5D, 0xF0, 0x5C, 0xF0, 0x58, /* 0x10-0x13 */
+	0xF0, 0x59, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA3, /* 0x14-0x17 */
+	0x00, 0x00, 0xC3, 0xAA, 0x00, 0x00, 0xF2, 0x7E, /* 0x18-0x1B */
+	0xF2, 0xA2, 0xF2, 0x7D, 0xF2, 0xA4, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xF2, 0xA1, 0x00, 0x00, 0xF4, 0x7A, /* 0x20-0x23 */
+	0xF4, 0x7D, 0xF4, 0x79, 0xC4, 0x71, 0xF4, 0x7B, /* 0x24-0x27 */
+	0xF4, 0x7C, 0xF4, 0x7E, 0xC4, 0x72, 0xC4, 0x74, /* 0x28-0x2B */
+	0xC4, 0x73, 0xF5, 0xE1, 0x00, 0x00, 0xF5, 0xE3, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF5, 0xE2, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF6, 0xF6, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xF8, 0xB5, 0xF8, 0xFA, 0xA5, 0xDC, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xCB, 0x72, 0xAA, 0xC0, 0xCD, 0xA3, /* 0x3C-0x3F */
+	0xAA, 0xC1, 0xAA, 0xC2, 0xCD, 0xA2, 0x00, 0x00, /* 0x40-0x43 */
+	0xCF, 0xF8, 0xCF, 0xF7, 0xAC, 0xE6, 0xAC, 0xE9, /* 0x44-0x47 */
+	0xAC, 0xE8, 0xAC, 0xE7, 0xCF, 0xF4, 0xCF, 0xF6, /* 0x48-0x4B */
+	0xCF, 0xF5, 0x00, 0x00, 0x00, 0x00, 0xD2, 0xE8, /* 0x4C-0x4F */
+	0xAF, 0xA7, 0xD2, 0xEC, 0xD2, 0xEB, 0xD2, 0xEA, /* 0x50-0x53 */
+	0xD2, 0xE6, 0xAF, 0xA6, 0xAF, 0xAA, 0xAF, 0xAD, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xAF, 0xAE, 0xD2, 0xE7, /* 0x58-0x5B */
+	0xD2, 0xE9, 0xAF, 0xAC, 0xAF, 0xAB, 0xAF, 0xA9, /* 0x5C-0x5F */
+	0xAF, 0xA8, 0xD6, 0xC2, 0x00, 0x00, 0xD6, 0xC0, /* 0x60-0x63 */
+	0xD6, 0xBC, 0xB2, 0xBB, 0x00, 0x00, 0xD6, 0xBD, /* 0x64-0x67 */
+	0xB2, 0xBC, 0xD6, 0xBE, 0xD6, 0xBF, 0xD6, 0xC1, /* 0x68-0x6B */
+	0x00, 0x00, 0xB2, 0xBD, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xDA, 0xD5, 0x00, 0x00, 0xDA, 0xD4, 0xDA, 0xD3, /* 0x70-0x73 */
+	0xDA, 0xD2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xDE, 0xF6, 0xB8, 0x52, 0x00, 0x00, /* 0x78-0x7B */
+	0xDE, 0xF3, 0xDE, 0xF5, 0x00, 0x00, 0xB8, 0x53, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xB8, 0x54, 0xDE, 0xF4, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE3, 0x41, 0x00, 0x00, 0xE2, 0xF9, 0xE2, 0xFA, /* 0x88-0x8B */
+	0x00, 0x00, 0xBA, 0xD7, 0xBA, 0xD5, 0xBA, 0xD6, /* 0x8C-0x8F */
+	0xE3, 0x43, 0x00, 0x00, 0xE3, 0x42, 0xE2, 0xFE, /* 0x90-0x93 */
+	0xE2, 0xFD, 0xE2, 0xFC, 0xE2, 0xFB, 0xE3, 0x40, /* 0x94-0x97 */
+	0xE2, 0xF8, 0x00, 0x00, 0xE6, 0xCB, 0xE6, 0xD0, /* 0x98-0x9B */
+	0xE6, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE6, 0xCD, 0xE6, 0xCC, 0xE6, 0xCF, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xEA, 0xAE, 0x00, 0x00, 0xBF, 0x6D, 0xC1, 0x48, /* 0xA4-0xA7 */
+	0xED, 0xB0, 0x00, 0x00, 0xC1, 0x49, 0xED, 0xAF, /* 0xA8-0xAB */
+	0xF0, 0x5F, 0xF0, 0x5E, 0xC2, 0xA7, 0x00, 0x00, /* 0xAC-0xAF */
+	0xF2, 0xA5, 0xC3, 0xAB, 0xF4, 0xA1, 0xC5, 0xA1, /* 0xB0-0xB3 */
+	0xF6, 0xF7, 0x00, 0x00, 0xF8, 0xB7, 0xF8, 0xB6, /* 0xB4-0xB7 */
+	0xC9, 0xA8, 0xAC, 0xEA, 0xAC, 0xEB, 0xD6, 0xC3, /* 0xB8-0xBB */
+	0x00, 0x00, 0xB8, 0x56, 0xA5, 0xDD, 0xA8, 0x72, /* 0xBC-0xBF */
+	0xA8, 0x71, 0xA8, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xCD, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xAA, 0xC4, 0xAA, 0xC3, 0x00, 0x00, 0xAC, 0xEE, /* 0xC8-0xCB */
+	0x00, 0x00, 0xCF, 0xFA, 0xCF, 0xFD, 0xCF, 0xFB, /* 0xCC-0xCF */
+	0x00, 0x00, 0xAC, 0xEC, 0xAC, 0xED, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xCF, 0xF9, 0xCF, 0xFC, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xAF, 0xB5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xD2, 0xF3, 0xD2, 0xF5, 0xD2, 0xF4, 0xAF, 0xB2, /* 0xDC-0xDF */
+	0xD2, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xAF, 0xB0, /* 0xE0-0xE3 */
+	0xAF, 0xAF, 0x00, 0x00, 0xAF, 0xB3, 0xAF, 0xB1, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xAF, 0xB4, 0xD2, 0xF2, 0xD2, 0xED, /* 0xE8-0xEB */
+	0xD2, 0xEE, 0xD2, 0xF1, 0xD2, 0xF0, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xC6, 0xD6, 0xC7, /* 0xF4-0xF7 */
+	0xD6, 0xC5, 0x00, 0x00, 0xD6, 0xC4, 0xB2, 0xBE, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7A[512] = {
+	0xB5, 0x7D, 0x00, 0x00, 0xDA, 0xD6, 0xDA, 0xD8, /* 0x00-0x03 */
+	0xDA, 0xDA, 0xB5, 0x7C, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xB5, 0x7A, 0x00, 0x00, 0xDA, 0xD7, 0xB5, 0x7B, /* 0x08-0x0B */
+	0xDA, 0xD9, 0xB5, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xDF, 0x41, 0xDE, 0xF7, 0xDE, 0xFA, 0xDE, 0xFE, /* 0x10-0x13 */
+	0xB8, 0x5A, 0xDE, 0xFC, 0x00, 0x00, 0xDE, 0xFB, /* 0x14-0x17 */
+	0xDE, 0xF8, 0xDE, 0xF9, 0xB8, 0x58, 0xDF, 0x40, /* 0x18-0x1B */
+	0xB8, 0x57, 0x00, 0x00, 0xB8, 0x5C, 0xB8, 0x5B, /* 0x1C-0x1F */
+	0xB8, 0x59, 0x00, 0x00, 0xDE, 0xFD, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x49, 0x00, 0x00, /* 0x24-0x27 */
+	0xE3, 0x48, 0x00, 0x00, 0x00, 0x00, 0xE3, 0x44, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xBA, 0xD8, 0xE3, 0x47, /* 0x2C-0x2F */
+	0xE3, 0x46, 0xBA, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBD, 0x5E, /* 0x34-0x37 */
+	0x00, 0x00, 0xE6, 0xD2, 0x00, 0x00, 0xBD, 0x5F, /* 0x38-0x3B */
+	0xBD, 0x5B, 0xBD, 0x5D, 0x00, 0x00, 0xBD, 0x5A, /* 0x3C-0x3F */
+	0xBD, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xEA, 0xAF, 0x00, 0x00, 0xBF, 0x70, 0xEA, 0xB1, /* 0x44-0x47 */
+	0xEA, 0xB0, 0x00, 0x00, 0xE3, 0x45, 0xBF, 0x72, /* 0x48-0x4B */
+	0xBF, 0x71, 0xBF, 0x6E, 0xBF, 0x6F, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xED, 0xB5, 0x00, 0x00, 0xED, 0xB3, 0xC1, 0x4A, /* 0x54-0x57 */
+	0xED, 0xB4, 0x00, 0x00, 0xED, 0xB6, 0xED, 0xB2, /* 0x58-0x5B */
+	0xED, 0xB1, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x60, /* 0x5C-0x5F */
+	0xC2, 0xAA, 0xC2, 0xA8, 0xC2, 0xA9, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA6, /* 0x64-0x67 */
+	0xF2, 0xA7, 0xC3, 0xAD, 0x00, 0x00, 0xC3, 0xAC, /* 0x68-0x6B */
+	0xF4, 0xA3, 0xF4, 0xA4, 0xF4, 0xA2, 0x00, 0x00, /* 0x6C-0x6F */
+	0xF6, 0xF8, 0xF6, 0xF9, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xA5, 0xDE, 0xCA, 0x48, 0xA8, 0x73, 0x00, 0x00, /* 0x74-0x77 */
+	0xCD, 0xA5, 0xAA, 0xC6, 0xAA, 0xC5, 0xCD, 0xA6, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0x40, 0xAC, 0xEF, /* 0x7C-0x7F */
+	
+	0xCF, 0xFE, 0xAC, 0xF0, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xAF, 0xB6, 0xD2, 0xF8, 0xD2, 0xF6, 0xD2, 0xFC, /* 0x84-0x87 */
+	0xAF, 0xB7, 0xD2, 0xF7, 0xD2, 0xFB, 0xD2, 0xF9, /* 0x88-0x8B */
+	0xD2, 0xFA, 0x00, 0x00, 0x00, 0x00, 0xD6, 0xC8, /* 0x8C-0x8F */
+	0xD6, 0xCA, 0x00, 0x00, 0xB2, 0xBF, 0x00, 0x00, /* 0x90-0x93 */
+	0xD6, 0xC9, 0xB2, 0xC0, 0xB5, 0xA2, 0xB5, 0xA1, /* 0x94-0x97 */
+	0xB5, 0x7E, 0xDA, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0x44, 0xB8, 0x5D, /* 0x9C-0x9F */
+	0xB8, 0x5E, 0x00, 0x00, 0xDF, 0x43, 0xDF, 0x42, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xE3, 0x4A, 0xBA, 0xDB, 0xBA, 0xDA, 0xE3, 0x4B, /* 0xA8-0xAB */
+	0xE3, 0x4C, 0x00, 0x00, 0xBD, 0x61, 0xBD, 0x60, /* 0xAC-0xAF */
+	0x00, 0x00, 0xEA, 0xB5, 0xE6, 0xD3, 0xE6, 0xD5, /* 0xB0-0xB3 */
+	0xE6, 0xD4, 0xEA, 0xB4, 0xEA, 0xB2, 0xEA, 0xB6, /* 0xB4-0xB7 */
+	0xEA, 0xB3, 0x00, 0x00, 0xBF, 0x73, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xED, 0xB7, 0xC1, 0x4B, /* 0xBC-0xBF */
+	0xED, 0xB8, 0xED, 0xB9, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xC2, 0xAB, 0xC2, 0xAC, 0x00, 0x00, 0xC4, 0x75, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xC5, 0xD1, 0xA5, 0xDF, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xD0, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xD2, 0xFD, 0xAF, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0xBA, /* 0xDC-0xDF */
+	0xB3, 0xB9, 0x00, 0x00, 0x00, 0x00, 0xB5, 0xA4, /* 0xE0-0xE3 */
+	0xDA, 0xDD, 0xB5, 0xA3, 0xDA, 0xDC, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0x45, /* 0xE8-0xEB */
+	0x00, 0x00, 0xBA, 0xDC, 0xE3, 0x4D, 0xBA, 0xDD, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xC4, 0x76, 0xF4, 0xA5, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xA6, 0xCB, 0xAA, 0xC7, 0xCD, 0xA7, /* 0xF8-0xFB */
+	0x00, 0x00, 0xAC, 0xF2, 0x00, 0x00, 0xAC, 0xF1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7B[512] = {
+	0xD0, 0x42, 0xD0, 0x43, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xD3, 0x40, 0xD3, 0x42, 0xAF, 0xB9, 0x00, 0x00, /* 0x04-0x07 */
+	0xD3, 0x44, 0xD3, 0x47, 0xD3, 0x45, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0x46, 0xD3, 0x43, /* 0x0C-0x0F */
+	0xD2, 0xFE, 0xAF, 0xBA, 0xD3, 0x48, 0xD3, 0x41, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xD6, 0xD3, 0xB2, 0xC6, 0xD6, 0xDC, 0xB2, 0xC3, /* 0x18-0x1B */
+	0x00, 0x00, 0xD6, 0xD5, 0xB2, 0xC7, 0x00, 0x00, /* 0x1C-0x1F */
+	0xB2, 0xC1, 0x00, 0x00, 0xD6, 0xD0, 0xD6, 0xDD, /* 0x20-0x23 */
+	0xD6, 0xD1, 0xD6, 0xCE, 0xB2, 0xC5, 0x00, 0x00, /* 0x24-0x27 */
+	0xB2, 0xC2, 0x00, 0x00, 0xD6, 0xD4, 0xD6, 0xD7, /* 0x28-0x2B */
+	0xB2, 0xC4, 0xD6, 0xD8, 0xB2, 0xC8, 0xD6, 0xD9, /* 0x2C-0x2F */
+	0xD6, 0xCF, 0xD6, 0xD6, 0xD6, 0xDA, 0xD6, 0xD2, /* 0x30-0x33 */
+	0xD6, 0xCD, 0xD6, 0xCB, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xD6, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xDA, 0xDF, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0xDA, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xDA, 0xE0, 0xDA, 0xE6, 0xB5, 0xA7, 0xD6, 0xCC, /* 0x44-0x47 */
+	0xDA, 0xE1, 0xB5, 0xA5, 0xDA, 0xDE, 0xB5, 0xAC, /* 0x48-0x4B */
+	0xDA, 0xE2, 0xB5, 0xAB, 0xDA, 0xE3, 0xB5, 0xAD, /* 0x4C-0x4F */
+	0xB5, 0xA8, 0xB5, 0xAE, 0xB5, 0xA9, 0x00, 0x00, /* 0x50-0x53 */
+	0xB5, 0xAA, 0x00, 0x00, 0xB5, 0xA6, 0x00, 0x00, /* 0x54-0x57 */
+	0xDA, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0xB8, 0x61, 0xDF, 0x50, 0x00, 0x00, 0xDF, 0x53, /* 0x60-0x63 */
+	0xDF, 0x47, 0xDF, 0x4C, 0xDF, 0x46, 0xB8, 0x63, /* 0x64-0x67 */
+	0x00, 0x00, 0xDF, 0x4A, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xDF, 0x48, 0xB8, 0x62, 0x00, 0x00, /* 0x6C-0x6F */
+	0xDF, 0x4F, 0xDF, 0x4E, 0xDF, 0x4B, 0xDF, 0x4D, /* 0x70-0x73 */
+	0xDF, 0x49, 0xBA, 0xE1, 0xDF, 0x52, 0xB8, 0x5F, /* 0x74-0x77 */
+	0xDF, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x5D, 0x00, 0x00, /* 0x80-0x83 */
+	0xBA, 0xE8, 0xE3, 0x58, 0x00, 0x00, 0xBA, 0xE7, /* 0x84-0x87 */
+	0xE3, 0x4E, 0x00, 0x00, 0xE3, 0x50, 0xBA, 0xE0, /* 0x88-0x8B */
+	0xE3, 0x55, 0xE3, 0x54, 0xE3, 0x57, 0xBA, 0xE5, /* 0x8C-0x8F */
+	0xE3, 0x52, 0xE3, 0x51, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xBA, 0xE4, 0xBA, 0xDF, 0xE3, 0x53, 0xBA, 0xE2, /* 0x94-0x97 */
+	0xE3, 0x59, 0xE3, 0x5B, 0x00, 0x00, 0xE3, 0x56, /* 0x98-0x9B */
+	0xE3, 0x4F, 0xBA, 0xE3, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xBD, 0x69, 0xBA, 0xDE, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE3, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE6, 0xD9, 0xBD, 0x62, 0x00, 0x00, 0xE6, 0xDB, /* 0xAC-0xAF */
+	0x00, 0x00, 0xBD, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xBD, 0x65, 0xE6, 0xDE, 0x00, 0x00, 0xE6, 0xD6, /* 0xB4-0xB7 */
+	0xBA, 0xE6, 0xE6, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xD8, 0x00, 0x00, /* 0xBC-0xBF */
+	0xB8, 0x60, 0xBD, 0x68, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xBD, 0x64, 0x00, 0x00, 0xBD, 0x66, 0xBD, 0x67, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xBF, 0x76, 0xE6, 0xDD, 0xE6, 0xD7, /* 0xC8-0xCB */
+	0xBD, 0x6A, 0x00, 0x00, 0xE6, 0xDA, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xEA, 0xC0, 0xEA, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xEA, 0xC5, 0xBF, 0x74, 0xEA, 0xBD, 0xBF, 0x78, /* 0xD8-0xDB */
+	0xEA, 0xC3, 0xEA, 0xBA, 0xEA, 0xB7, 0xEA, 0xC6, /* 0xDC-0xDF */
+	0xC1, 0x51, 0xBF, 0x79, 0xEA, 0xC2, 0xEA, 0xB8, /* 0xE0-0xE3 */
+	0xBF, 0x77, 0xEA, 0xBC, 0xBF, 0x7B, 0xEA, 0xB9, /* 0xE4-0xE7 */
+	0xEA, 0xBE, 0xBF, 0x7A, 0xEA, 0xC1, 0xEA, 0xC4, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xED, 0xCB, 0xED, 0xCC, 0xED, 0xBC, 0xED, 0xC3, /* 0xF0-0xF3 */
+	0xED, 0xC1, 0x00, 0x00, 0x00, 0x00, 0xC1, 0x4F, /* 0xF4-0xF7 */
+	0xED, 0xC8, 0xEA, 0xBF, 0x00, 0x00, 0xED, 0xBF, /* 0xF8-0xFB */
+	0x00, 0x00, 0xED, 0xC9, 0xC1, 0x4E, 0xED, 0xBE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7C[512] = {
+	0xED, 0xBD, 0xED, 0xC7, 0xED, 0xC4, 0xED, 0xC6, /* 0x00-0x03 */
+	0x00, 0x00, 0xED, 0xBA, 0xED, 0xCA, 0xC1, 0x4C, /* 0x04-0x07 */
+	0x00, 0x00, 0xED, 0xC5, 0xED, 0xCE, 0xED, 0xC2, /* 0x08-0x0B */
+	0xC1, 0x50, 0xC1, 0x4D, 0xED, 0xC0, 0xED, 0xBB, /* 0x0C-0x0F */
+	0xED, 0xCD, 0xBF, 0x75, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xF0, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xF0, 0x61, 0xF0, 0x67, 0xC2, 0xB0, 0xF0, 0x65, /* 0x1C-0x1F */
+	0xF0, 0x64, 0xC2, 0xB2, 0xF0, 0x6A, 0xC2, 0xB1, /* 0x20-0x23 */
+	0x00, 0x00, 0xF0, 0x6B, 0xF0, 0x68, 0xC2, 0xAE, /* 0x24-0x27 */
+	0xF0, 0x69, 0xF0, 0x62, 0xC2, 0xAF, 0xC2, 0xAD, /* 0x28-0x2B */
+	0xF2, 0xAB, 0xF0, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xF0, 0x6C, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xA8, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC3, 0xB2, /* 0x34-0x37 */
+	0xC3, 0xB0, 0xF2, 0xAA, 0x00, 0x00, 0xF2, 0xAC, /* 0x38-0x3B */
+	0xF2, 0xA9, 0xC3, 0xB1, 0xC3, 0xAE, 0xC3, 0xAF, /* 0x3C-0x3F */
+	0xC3, 0xB3, 0x00, 0x00, 0x00, 0x00, 0xC4, 0x78, /* 0x40-0x43 */
+	0x00, 0x00, 0xF4, 0xAA, 0x00, 0x00, 0xF4, 0xA9, /* 0x44-0x47 */
+	0xF4, 0xA7, 0xF4, 0xA6, 0xF4, 0xA8, 0x00, 0x00, /* 0x48-0x4B */
+	0xC4, 0x77, 0xC4, 0x79, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xC4, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xF5, 0xE5, /* 0x50-0x53 */
+	0xF5, 0xE4, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xFA, /* 0x54-0x57 */
+	0x00, 0x00, 0xF6, 0xFC, 0xF6, 0xFE, 0xF6, 0xFD, /* 0x58-0x5B */
+	0xF6, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xC5, 0xA3, /* 0x5C-0x5F */
+	0xC5, 0xA2, 0x00, 0x00, 0x00, 0x00, 0xC5, 0xD3, /* 0x60-0x63 */
+	0xC5, 0xD2, 0xC5, 0xD4, 0xF7, 0xED, 0xF7, 0xEC, /* 0x64-0x67 */
+	0x00, 0x00, 0xF8, 0xFB, 0xF8, 0xB8, 0xF8, 0xFC, /* 0x68-0x6B */
+	0xC6, 0x58, 0x00, 0x00, 0xC6, 0x59, 0xF9, 0x6D, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xC6, 0x7E, 0xA6, 0xCC, /* 0x70-0x73 */
+	0x00, 0x00, 0xCD, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0xD0, 0x45, 0xD0, 0x46, 0xD0, 0x44, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xAC, 0xF3, 0x00, 0x00, 0xD0, 0x47, /* 0x7C-0x7F */
+	
+	0xD0, 0x48, 0xD0, 0x49, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xD3, 0x49, 0xD3, 0x4F, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xD3, 0x4D, 0xAF, 0xBB, 0xD3, 0x4B, 0x00, 0x00, /* 0x88-0x8B */
+	0xD3, 0x4C, 0xD3, 0x4E, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD3, 0x4A, 0xB2, 0xC9, 0x00, 0x00, /* 0x90-0x93 */
+	0xD6, 0xDE, 0xB2, 0xCB, 0xD6, 0xE0, 0xB2, 0xCA, /* 0x94-0x97 */
+	0xD6, 0xDF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xE8, 0xB5, 0xAF, /* 0x9C-0x9F */
+	0x00, 0x00, 0xDA, 0xEA, 0xDA, 0xE7, 0xD6, 0xE1, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xB5, 0xB0, 0x00, 0x00, 0xF9, 0xDB, /* 0xA4-0xA7 */
+	0xDA, 0xE9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0x56, /* 0xAC-0xAF */
+	0x00, 0x00, 0xB8, 0x64, 0xDF, 0x54, 0xB8, 0x65, /* 0xB0-0xB3 */
+	0xDF, 0x55, 0xB8, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xBA, 0xE9, 0xE3, 0x61, 0xE3, 0x5E, /* 0xB8-0xBB */
+	0xE3, 0x60, 0xBA, 0xEA, 0xBA, 0xEB, 0xE3, 0x5F, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xE6, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xE6, 0xE0, 0x00, 0x00, 0xBD, 0x6B, 0xE6, 0xE2, /* 0xC8-0xCB */
+	0xE6, 0xE1, 0x00, 0x00, 0xA2, 0x61, 0x00, 0x00, /* 0xCC-0xCF */
+	0xEA, 0xCA, 0xEA, 0xCB, 0xEA, 0xC7, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xEA, 0xC8, 0xBF, 0x7C, 0xBF, 0x7D, 0xEA, 0xC9, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xC1, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xC1, 0x53, 0xC1, 0x58, 0xC1, 0x54, 0xC1, 0x56, /* 0xDC-0xDF */
+	0xC1, 0x52, 0x00, 0x00, 0xC1, 0x55, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC2, 0xB3, /* 0xE4-0xE7 */
+	0xED, 0xCF, 0x00, 0x00, 0xF2, 0xAE, 0x00, 0x00, /* 0xE8-0xEB */
+	0xF2, 0xAD, 0x00, 0x00, 0xF4, 0xAB, 0xC4, 0x7A, /* 0xEC-0xEF */
+	0xC4, 0x7B, 0xF7, 0x41, 0xF5, 0xE6, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xF7, 0x40, 0x00, 0x00, 0xF8, 0xFD, 0xF9, 0xA4, /* 0xF4-0xF7 */
+	0xA6, 0xCD, 0x00, 0x00, 0x00, 0x00, 0xA8, 0x74, /* 0xF8-0xFB */
+	0x00, 0x00, 0xCD, 0xA9, 0xAA, 0xC8, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_7D[512] = {
+	0xAC, 0xF6, 0xD0, 0x4C, 0xAC, 0xF4, 0xD0, 0x4A, /* 0x00-0x03 */
+	0xAC, 0xF9, 0xAC, 0xF5, 0xAC, 0xFA, 0xAC, 0xF8, /* 0x04-0x07 */
+	0xD0, 0x4B, 0xAC, 0xF7, 0xAF, 0xBF, 0xAF, 0xBE, /* 0x08-0x0B */
+	0xD3, 0x5A, 0xAF, 0xC7, 0xD3, 0x53, 0xD3, 0x59, /* 0x0C-0x0F */
+	0xAF, 0xC3, 0xD3, 0x52, 0xD3, 0x58, 0xD3, 0x56, /* 0x10-0x13 */
+	0xAF, 0xC2, 0xAF, 0xC4, 0xD3, 0x55, 0xAF, 0xBD, /* 0x14-0x17 */
+	0xD3, 0x54, 0xAF, 0xC8, 0xAF, 0xC5, 0xAF, 0xC9, /* 0x18-0x1B */
+	0xAF, 0xC6, 0xD3, 0x51, 0xD3, 0x50, 0xD3, 0x57, /* 0x1C-0x1F */
+	0xAF, 0xC0, 0xAF, 0xBC, 0xAF, 0xC1, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xD6, 0xF0, 0xD6, 0xE9, 0x00, 0x00, 0xB5, 0xB5, /* 0x28-0x2B */
+	0xD6, 0xE8, 0x00, 0x00, 0xB2, 0xCF, 0xB2, 0xD6, /* 0x2C-0x2F */
+	0xB2, 0xD3, 0xB2, 0xD9, 0xB2, 0xD8, 0xB2, 0xD4, /* 0x30-0x33 */
+	0x00, 0x00, 0xD6, 0xE2, 0xD6, 0xE5, 0x00, 0x00, /* 0x34-0x37 */
+	0xD6, 0xE4, 0xB2, 0xD0, 0xD6, 0xE6, 0xD6, 0xEF, /* 0x38-0x3B */
+	0xB2, 0xD1, 0xD6, 0xE3, 0xD6, 0xEC, 0xD6, 0xED, /* 0x3C-0x3F */
+	0xB2, 0xD2, 0xD6, 0xEA, 0xB2, 0xD7, 0xB2, 0xCD, /* 0x40-0x43 */
+	0xB2, 0xD5, 0xD6, 0xE7, 0xB2, 0xCC, 0xD6, 0xEB, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xD6, 0xEE, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xDA, 0xFB, 0xDA, 0xF2, /* 0x4C-0x4F */
+	0xB5, 0xB2, 0xDA, 0xF9, 0xDA, 0xF6, 0xDA, 0xEE, /* 0x50-0x53 */
+	0xDA, 0xF7, 0xB5, 0xB4, 0xDA, 0xEF, 0x00, 0x00, /* 0x54-0x57 */
+	0xDA, 0xEB, 0x00, 0x00, 0x00, 0x00, 0xB8, 0x6C, /* 0x58-0x5B */
+	0xDA, 0xF4, 0x00, 0x00, 0xB5, 0xB1, 0xDA, 0xFA, /* 0x5C-0x5F */
+	0x00, 0x00, 0xB5, 0xB8, 0xB5, 0xBA, 0xDA, 0xED, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xB5, 0xB9, 0xDA, 0xF0, /* 0x64-0x67 */
+	0xB5, 0xB3, 0xDA, 0xF8, 0xDA, 0xF1, 0xDA, 0xF5, /* 0x68-0x6B */
+	0x00, 0x00, 0xDA, 0xF3, 0xB5, 0xB6, 0xDA, 0xEC, /* 0x6C-0x6F */
+	0xB5, 0xBB, 0xB2, 0xCE, 0xB5, 0xB7, 0xB5, 0xBC, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xB8, 0x68, 0xDF, 0x5D, 0xDF, 0x5F, /* 0x78-0x7B */
+	0xDF, 0x61, 0xDF, 0x65, 0x00, 0x00, 0xDF, 0x5B, /* 0x7C-0x7F */
+	
+	0xDF, 0x59, 0xB8, 0x6A, 0x00, 0x00, 0xDF, 0x60, /* 0x80-0x83 */
+	0xDF, 0x64, 0xDF, 0x5C, 0xDF, 0x58, 0x00, 0x00, /* 0x84-0x87 */
+	0xDF, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0xDF, 0x62, 0xDF, 0x5A, 0xDF, 0x5E, 0xB8, 0x6B, /* 0x8C-0x8F */
+	0x00, 0x00, 0xB8, 0x69, 0xDF, 0x66, 0xB8, 0x67, /* 0x90-0x93 */
+	0xDF, 0x63, 0x00, 0x00, 0xE3, 0x72, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xBA, 0xEE, 0xE3, 0x6A, 0xBD, 0x78, 0xE3, 0x74, /* 0x9C-0x9F */
+	0xBA, 0xF1, 0xE3, 0x78, 0xBA, 0xF7, 0xE3, 0x65, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0x75, 0xE3, 0x62, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xE3, 0x77, 0xE3, 0x66, 0x00, 0x00, /* 0xA8-0xAB */
+	0xBA, 0xFE, 0xBA, 0xFB, 0xE3, 0x76, 0xE3, 0x70, /* 0xAC-0xAF */
+	0xBA, 0xED, 0xBA, 0xF5, 0xBA, 0xF4, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xBA, 0xF3, 0xBA, 0xF9, 0x00, 0x00, 0xE3, 0x63, /* 0xB4-0xB7 */
+	0xBA, 0xFA, 0xE3, 0x71, 0xBA, 0xF6, 0xBA, 0xEC, /* 0xB8-0xBB */
+	0xE3, 0x73, 0xBA, 0xEF, 0xBA, 0xF0, 0xBA, 0xF8, /* 0xBC-0xBF */
+	0xE3, 0x68, 0xE3, 0x67, 0xE3, 0x64, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xE3, 0x6C, 0xE3, 0x69, 0xE3, 0x6D, 0xBA, 0xFD, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE3, 0x79, 0xBA, 0xF2, 0xE3, 0x6E, /* 0xC8-0xCB */
+	0xE3, 0x6F, 0x00, 0x00, 0xE3, 0x6B, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xBA, 0xFC, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE7, /* 0xD4-0xD7 */
+	0xBD, 0x70, 0xBD, 0x79, 0xBD, 0x75, 0xE6, 0xE4, /* 0xD8-0xDB */
+	0x00, 0x00, 0xBD, 0x72, 0xBD, 0x76, 0xE6, 0xF0, /* 0xDC-0xDF */
+	0xBD, 0x6C, 0xE6, 0xE8, 0x00, 0x00, 0xBD, 0x74, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xE6, 0xEB, 0xE6, 0xE6, /* 0xE4-0xE7 */
+	0xBD, 0x73, 0xBD, 0x77, 0xE6, 0xE5, 0x00, 0x00, /* 0xE8-0xEB */
+	0xBD, 0x71, 0x00, 0x00, 0xE6, 0xEF, 0xBD, 0x6E, /* 0xEC-0xEF */
+	0xE6, 0xEE, 0xE6, 0xED, 0xBD, 0x7A, 0xE5, 0x72, /* 0xF0-0xF3 */
+	0xBD, 0x6D, 0x00, 0x00, 0xE6, 0xEC, 0xE6, 0xE3, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xBD, 0x7B, 0xE6, 0xEA, 0xBD, 0x6F, /* 0xF8-0xFB */
+};
+
+static unsigned char u2c_7E[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xE9, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0xBF, 0xA2, 0xBF, 0xA7, 0xBF, 0x7E, 0xEA, 0xD8, /* 0x08-0x0B */
+	0xEA, 0xCF, 0xEA, 0xDB, 0xEA, 0xD3, 0xEA, 0xD9, /* 0x0C-0x0F */
+	0xBF, 0xA8, 0xBF, 0xA1, 0xEA, 0xCC, 0xEA, 0xD2, /* 0x10-0x13 */
+	0xEA, 0xDC, 0xEA, 0xD5, 0xEA, 0xDA, 0xEA, 0xCE, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xD6, 0xBF, 0xA3, /* 0x18-0x1B */
+	0xEA, 0xD4, 0xBF, 0xA6, 0xBF, 0xA5, 0xEA, 0xD0, /* 0x1C-0x1F */
+	0xEA, 0xD1, 0xEA, 0xCD, 0xEA, 0xD7, 0xBF, 0xA4, /* 0x20-0x23 */
+	0xEA, 0xDE, 0xEA, 0xDD, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xED, 0xDA, 0xED, 0xD6, 0xC1, 0x5F, /* 0x28-0x2B */
+	0x00, 0x00, 0xED, 0xD0, 0xC1, 0x59, 0xC1, 0x69, /* 0x2C-0x2F */
+	0xED, 0xDC, 0xC1, 0x61, 0xC1, 0x5D, 0xED, 0xD3, /* 0x30-0x33 */
+	0xC1, 0x64, 0xC1, 0x67, 0xED, 0xDE, 0xC1, 0x5C, /* 0x34-0x37 */
+	0xED, 0xD5, 0xC1, 0x65, 0xED, 0xE0, 0xED, 0xDD, /* 0x38-0x3B */
+	0xED, 0xD1, 0xC1, 0x60, 0xC1, 0x5A, 0xC1, 0x68, /* 0x3C-0x3F */
+	0xED, 0xD8, 0xC1, 0x63, 0xED, 0xD2, 0xC1, 0x5E, /* 0x40-0x43 */
+	0xED, 0xDF, 0xC1, 0x62, 0xC1, 0x5B, 0xED, 0xD9, /* 0x44-0x47 */
+	0xC1, 0x66, 0xED, 0xD7, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xED, 0xDB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF0, 0x6E, 0xF0, 0x74, 0xC2, 0xB9, 0xF0, 0x77, /* 0x50-0x53 */
+	0xC2, 0xB4, 0xC2, 0xB5, 0xF0, 0x6F, 0xF0, 0x76, /* 0x54-0x57 */
+	0xF0, 0x71, 0xC2, 0xBA, 0xC2, 0xB7, 0x00, 0x00, /* 0x58-0x5B */
+	0xF0, 0x6D, 0x00, 0x00, 0xC2, 0xB6, 0xF0, 0x73, /* 0x5C-0x5F */
+	0xF0, 0x75, 0xC2, 0xB8, 0xF0, 0x72, 0xF0, 0x70, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF2, 0xB8, 0xC3, 0xB7, 0xC3, 0xB8, 0xC3, 0xB4, /* 0x68-0x6B */
+	0x00, 0x00, 0xC3, 0xB5, 0x00, 0x00, 0xF2, 0xB4, /* 0x6C-0x6F */
+	0xF2, 0xB2, 0x00, 0x00, 0xF2, 0xB6, 0xC3, 0xBA, /* 0x70-0x73 */
+	0xF2, 0xB7, 0xF2, 0xB0, 0xF2, 0xAF, 0xF2, 0xB3, /* 0x74-0x77 */
+	0xF2, 0xB1, 0xC3, 0xB6, 0xF2, 0xB5, 0xF4, 0xAC, /* 0x78-0x7B */
+	0xC4, 0x7E, 0xC4, 0x7D, 0xF4, 0xAD, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xF4, 0xAF, 0xF4, 0xAE, 0xC4, 0xA1, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0xEB, 0xF5, 0xE8, /* 0x84-0x87 */
+	0xF5, 0xE9, 0x00, 0x00, 0xF5, 0xE7, 0xF5, 0xEA, /* 0x88-0x8B */
+	0xC4, 0xF2, 0xF5, 0xEC, 0x00, 0x00, 0xC4, 0xF1, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF7, 0x42, 0x00, 0x00, 0xC5, 0xD5, /* 0x90-0x93 */
+	0xC5, 0xD7, 0xF7, 0xEE, 0xC5, 0xD6, 0xF8, 0xB9, /* 0x94-0x97 */
+	0xF9, 0x40, 0xF9, 0x42, 0xF8, 0xFE, 0xF9, 0x41, /* 0x98-0x9B */
+	0xC6, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_7F[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xA6, 0xCE, 0x00, 0x00, /* 0x34-0x37 */
+	0xAC, 0xFB, 0xD2, 0x6F, 0xAF, 0xCA, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xB2, 0xDA, 0xDA, 0xFC, 0xDA, 0xFD, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEA, 0xDF, /* 0x40-0x43 */
+	0xC1, 0x6A, 0xED, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xC2, 0xBB, 0x00, 0x00, 0xF2, 0xBA, 0xF2, 0xB9, /* 0x48-0x4B */
+	0xC4, 0xA2, 0xF5, 0xED, 0x00, 0x00, 0xF7, 0x43, /* 0x4C-0x4F */
+	0xC5, 0xF8, 0xCA, 0x49, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xAA, 0xC9, 0xA8, 0x75, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xD0, 0x4D, 0x00, 0x00, 0x00, 0x00, 0xD3, 0x60, /* 0x58-0x5B */
+	0xD3, 0x5B, 0xD3, 0x5F, 0xD3, 0x5D, 0xAF, 0xCB, /* 0x5C-0x5F */
+	0xD3, 0x5E, 0xD3, 0x5C, 0x00, 0x00, 0xD6, 0xF1, /* 0x60-0x63 */
+	0x00, 0x00, 0xDA, 0xFE, 0xDB, 0x40, 0xDF, 0x69, /* 0x64-0x67 */
+	0xDF, 0x6A, 0xB8, 0x6E, 0xB8, 0x6F, 0xDF, 0x68, /* 0x68-0x6B */
+	0xDF, 0x6B, 0xDF, 0x67, 0xB8, 0x6D, 0x00, 0x00, /* 0x6C-0x6F */
+	0xBB, 0x40, 0x00, 0x00, 0xB8, 0x70, 0xE3, 0x7A, /* 0x70-0x73 */
+	0x00, 0x00, 0xBD, 0x7C, 0xE6, 0xF1, 0xBD, 0x7D, /* 0x74-0x77 */
+	0x00, 0x00, 0xBF, 0xA9, 0xEA, 0xE2, 0xEA, 0xE0, /* 0x78-0x7B */
+	0xEA, 0xE1, 0xED, 0xE4, 0xED, 0xE3, 0xED, 0xE2, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF2, 0xBB, /* 0x80-0x83 */
+	0x00, 0x00, 0xC3, 0xB9, 0xF2, 0xBC, 0xF7, 0x44, /* 0x84-0x87 */
+	0xC5, 0xF9, 0xF8, 0xBA, 0xA6, 0xCF, 0xAA, 0xCB, /* 0x88-0x8B */
+	0xAA, 0xCA, 0xD0, 0x4F, 0xAC, 0xFC, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD0, 0x4E, 0xD3, 0x62, 0x00, 0x00, /* 0x90-0x93 */
+	0xAF, 0xCC, 0xD6, 0xF2, 0xD3, 0x61, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xDC, 0xD6, 0xF5, /* 0x98-0x9B */
+	0xD6, 0xF3, 0xD6, 0xF4, 0xB2, 0xDB, 0x00, 0x00, /* 0x9C-0x9F */
+	0xDB, 0x42, 0xDB, 0x43, 0xDB, 0x41, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xB8, 0x73, 0xDF, 0x6D, 0xDF, 0x6C, 0xDF, 0x6E, /* 0xA4-0xA7 */
+	0xB8, 0x72, 0xB8, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE6, 0xF2, 0xE6, 0xF4, 0x00, 0x00, 0xBD, 0x7E, /* 0xAC-0xAF */
+	0xE6, 0xF3, 0xEA, 0xE3, 0xBF, 0xAA, 0xF0, 0x79, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF0, 0x78, 0xC3, 0xBB, 0xF2, 0xBD, /* 0xB4-0xB7 */
+	0xC3, 0xBD, 0xC3, 0xBC, 0xF4, 0xB0, 0xF5, 0xEE, /* 0xB8-0xBB */
+	0xC4, 0xF3, 0xA6, 0xD0, 0xD0, 0x50, 0xAC, 0xFD, /* 0xBC-0xBF */
+	0xD3, 0x65, 0xAF, 0xCE, 0xD3, 0x64, 0xD3, 0x63, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xAF, 0xCD, 0x00, 0x00, 0xD6, 0xFB, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xD6, 0xFD, 0xD6, 0xF6, 0xD6, 0xF7, /* 0xC8-0xCB */
+	0xB2, 0xDD, 0xD6, 0xF8, 0xB2, 0xDE, 0xD6, 0xFC, /* 0xCC-0xCF */
+	0xD6, 0xF9, 0xD6, 0xFA, 0xB2, 0xDF, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xB5, 0xBE, 0xB5, 0xBF, 0x00, 0x00, 0xDB, 0x44, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0x6F, /* 0xD8-0xDB */
+	0xDF, 0x70, 0x00, 0x00, 0xE3, 0x7E, 0xBB, 0x43, /* 0xDC-0xDF */
+	0xBB, 0x41, 0xBB, 0x42, 0xE3, 0x7B, 0xE3, 0x7C, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xE3, 0x7D, 0xE6, 0xF9, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE6, 0xFA, 0xBD, 0xA1, 0xE6, 0xF7, 0xE6, 0xF6, /* 0xE8-0xEB */
+	0xE6, 0xF8, 0xE6, 0xF5, 0xBF, 0xAD, 0xEA, 0xE4, /* 0xEC-0xEF */
+	0xBF, 0xAB, 0xBF, 0xAC, 0xED, 0xE6, 0xC1, 0x6B, /* 0xF0-0xF3 */
+	0xED, 0xE5, 0xEF, 0xA8, 0x00, 0x00, 0xF0, 0x7A, /* 0xF4-0xF7 */
+	0xF0, 0x7B, 0xC2, 0xBC, 0x00, 0x00, 0xC2, 0xBD, /* 0xF8-0xFB */
+	0xC1, 0x6C, 0xF2, 0xBE, 0xF2, 0xBF, 0xF4, 0xB1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_80[512] = {
+	0xC4, 0xA3, 0xA6, 0xD1, 0x00, 0x00, 0xA6, 0xD2, /* 0x00-0x03 */
+	0xAC, 0xFE, 0xAA, 0xCC, 0xAF, 0xCF, 0xD0, 0x51, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB5, 0xC0, /* 0x08-0x0B */
+	0xA6, 0xD3, 0xAD, 0x41, 0xD0, 0x52, 0xD0, 0x53, /* 0x0C-0x0F */
+	0xAD, 0x40, 0xAD, 0x42, 0xA6, 0xD4, 0x00, 0x00, /* 0x10-0x13 */
+	0xD0, 0x54, 0xAF, 0xD1, 0xD3, 0x66, 0xAF, 0xD3, /* 0x14-0x17 */
+	0xAF, 0xD0, 0xAF, 0xD2, 0x00, 0x00, 0xD7, 0x41, /* 0x18-0x1B */
+	0xB2, 0xE0, 0x00, 0x00, 0xD7, 0x40, 0xD6, 0xFE, /* 0x1C-0x1F */
+	0x00, 0x00, 0xDF, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xE3, 0xA1, 0x00, 0x00, 0xBD, 0xA2, 0x00, 0x00, /* 0x24-0x27 */
+	0xBF, 0xAE, 0xEA, 0xE6, 0xEA, 0xE5, 0x00, 0x00, /* 0x28-0x2B */
+	0xED, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xF5, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xA6, 0xD5, /* 0x30-0x33 */
+	0xCB, 0x73, 0xCD, 0xAA, 0xAD, 0x43, 0xD0, 0x55, /* 0x34-0x37 */
+	0x00, 0x00, 0xD3, 0x68, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xAF, 0xD4, 0xD3, 0x67, 0xAF, 0xD5, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0x43, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xB2, 0xE2, 0xD7, 0x42, /* 0x44-0x47 */
+	0xD7, 0x44, 0x00, 0x00, 0xB2, 0xE1, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0x46, /* 0x4C-0x4F */
+	0xDB, 0x47, 0xDB, 0x45, 0xB5, 0xC1, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0x74, 0x00, 0x00, /* 0x54-0x57 */
+	0xB8, 0x75, 0x00, 0x00, 0xBB, 0x45, 0x00, 0x00, /* 0x58-0x5B */
+	0xE3, 0xA3, 0xE3, 0xA2, 0xBB, 0x44, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xE6, 0xFB, 0x00, 0x00, 0x00, 0x00, 0xE6, 0xFC, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xEA, 0xE7, 0x00, 0x00, 0x00, 0x00, 0xC1, 0x70, /* 0x6C-0x6F */
+	0xC1, 0x6F, 0xC1, 0x6D, 0xC1, 0x6E, 0xC1, 0x71, /* 0x70-0x73 */
+	0x00, 0x00, 0xF0, 0x7C, 0xC2, 0xBF, 0xC2, 0xBE, /* 0x74-0x77 */
+	0xF2, 0xC0, 0xF4, 0xB2, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xC5, 0xA5, 0xC5, 0xA4, 0xA6, 0xD6, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xD1, 0xFB, 0x00, 0x00, /* 0x80-0x83 */
+	0xB8, 0x77, 0xB5, 0xC2, 0xB8, 0x76, 0xBB, 0x46, /* 0x84-0x87 */
+	0x00, 0x00, 0xA6, 0xD7, 0xC9, 0xA9, 0xA6, 0xD8, /* 0x88-0x8B */
+	0xA6, 0xD9, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xAB, /* 0x8C-0x8F */
+	0xCB, 0x76, 0x00, 0x00, 0xCB, 0x77, 0xA8, 0x77, /* 0x90-0x93 */
+	0x00, 0x00, 0xCB, 0x74, 0xA8, 0x76, 0x00, 0x00, /* 0x94-0x97 */
+	0xA8, 0x79, 0xCB, 0x75, 0xA8, 0x7B, 0xA8, 0x7A, /* 0x98-0x9B */
+	0xCB, 0x78, 0xA8, 0x78, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xAA, 0xD1, 0xAA, 0xCF, 0xCD, 0xAD, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xAA, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xAA, 0xD3, 0xAA, 0xD5, 0xAA, 0xD2, /* 0xA8-0xAB */
+	0x00, 0x00, 0xCD, 0xB0, 0xCD, 0xAC, 0xAA, 0xD6, /* 0xAC-0xAF */
+	0x00, 0x00, 0xAA, 0xD0, 0xA8, 0x7C, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xAA, 0xD4, 0xCD, 0xAF, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xCD, 0xAE, 0x00, 0x00, 0xAA, 0xCD, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0x5B, 0xAD, 0x47, /* 0xC0-0xC3 */
+	0xAD, 0x48, 0xD0, 0x5D, 0x00, 0x00, 0xD0, 0x57, /* 0xC4-0xC7 */
+	0xD0, 0x5A, 0xD0, 0x63, 0xD0, 0x61, 0x00, 0x00, /* 0xC8-0xCB */
+	0xAD, 0x49, 0xD0, 0x67, 0xAD, 0x4C, 0xD0, 0x64, /* 0xCC-0xCF */
+	0xD0, 0x5C, 0xD0, 0x59, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xDB, 0x49, 0xD0, 0x62, 0xAD, 0x44, 0xD0, 0x65, /* 0xD4-0xD7 */
+	0xD0, 0x56, 0xD0, 0x5F, 0xAD, 0x46, 0xAD, 0x4B, /* 0xD8-0xDB */
+	0xD0, 0x60, 0xAD, 0x4F, 0xAD, 0x4D, 0x00, 0x00, /* 0xDC-0xDF */
+	0xD0, 0x58, 0xAD, 0x4A, 0x00, 0x00, 0xD0, 0x5E, /* 0xE0-0xE3 */
+	0xAD, 0x4E, 0xAD, 0x45, 0xD0, 0x66, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xAF, 0xDA, 0x00, 0x00, 0xAF, 0xE3, /* 0xEC-0xEF */
+	0xAF, 0xD8, 0xAF, 0xD6, 0xD3, 0x6A, 0xAF, 0xDE, /* 0xF0-0xF3 */
+	0xAF, 0xDB, 0xD3, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xAF, 0xDD, 0xD3, 0x6B, 0xD3, 0x69, 0xD3, 0x6E, /* 0xF8-0xFB */
+	0xAF, 0xE2, 0xAF, 0xE0, 0xDB, 0x48, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_81[512] = {
+	0xD3, 0x6F, 0xD3, 0x6D, 0xAF, 0xD7, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xAF, 0xD9, 0xAF, 0xDC, 0x00, 0x00, /* 0x04-0x07 */
+	0xAF, 0xDF, 0x00, 0x00, 0xAF, 0xE1, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xD7, 0x4E, 0xB2, 0xE4, 0x00, 0x00, /* 0x14-0x17 */
+	0xD7, 0x45, 0xD7, 0x47, 0x00, 0x00, 0xD7, 0x48, /* 0x18-0x1B */
+	0x00, 0x00, 0xD7, 0x50, 0xD7, 0x4C, 0xD7, 0x4A, /* 0x1C-0x1F */
+	0x00, 0x00, 0xD7, 0x4D, 0xD7, 0x51, 0xB2, 0xE5, /* 0x20-0x23 */
+	0xB2, 0xE9, 0xD7, 0x46, 0x00, 0x00, 0xD7, 0x4F, /* 0x24-0x27 */
+	0x00, 0x00, 0xB2, 0xE7, 0x00, 0x00, 0xB2, 0xE6, /* 0x28-0x2B */
+	0xD7, 0x4B, 0xD7, 0x49, 0x00, 0x00, 0xB2, 0xE3, /* 0x2C-0x2F */
+	0xB2, 0xE8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xB5, 0xC8, 0xDB, 0x51, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xDB, 0x4F, 0xB5, 0xCA, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0x4A, /* 0x40-0x43 */
+	0xDF, 0xA1, 0x00, 0x00, 0xB5, 0xC9, 0xDB, 0x4E, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0x4B, 0xB5, 0xC5, /* 0x48-0x4B */
+	0xB5, 0xCB, 0xDB, 0x50, 0xB5, 0xC7, 0xDB, 0x4D, /* 0x4C-0x4F */
+	0xBB, 0x47, 0xB5, 0xC6, 0xDB, 0x4C, 0xB5, 0xCC, /* 0x50-0x53 */
+	0xB5, 0xC4, 0xB5, 0xC3, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0x77, /* 0x58-0x5B */
+	0xDF, 0x75, 0x00, 0x00, 0xDF, 0x7B, 0x00, 0x00, /* 0x5C-0x5F */
+	0xDF, 0x73, 0xDF, 0xA2, 0xDF, 0x78, 0x00, 0x00, /* 0x60-0x63 */
+	0xDF, 0x72, 0xB8, 0x7B, 0xB8, 0xA3, 0xDF, 0x7D, /* 0x64-0x67 */
+	0x00, 0x00, 0xDF, 0x76, 0x00, 0x00, 0xB8, 0x7E, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0x7C, 0xDF, 0x7E, /* 0x6C-0x6F */
+	0xB8, 0x79, 0xB8, 0x78, 0xDF, 0x79, 0xB8, 0x7D, /* 0x70-0x73 */
+	0xB5, 0xCD, 0x00, 0x00, 0xDF, 0x7C, 0xDF, 0x74, /* 0x74-0x77 */
+	0xB8, 0x7A, 0xB8, 0xA1, 0xB8, 0xA2, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBB, 0x4C, /* 0x7C-0x7F */
+	
+	0xBB, 0x48, 0x00, 0x00, 0xBB, 0x4D, 0xE3, 0xA6, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xA5, 0xE3, 0xA7, /* 0x84-0x87 */
+	0xBB, 0x4A, 0xE3, 0xA4, 0xBB, 0x4B, 0xE3, 0xAA, /* 0x88-0x8B */
+	0xE3, 0xA9, 0xE3, 0xA8, 0x00, 0x00, 0xBB, 0x49, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0xE7, 0x41, 0x00, 0x00, 0xE7, 0x44, /* 0x94-0x97 */
+	0xBD, 0xA8, 0xE7, 0x43, 0xBD, 0xA7, 0xBD, 0xA3, /* 0x98-0x9B */
+	0xBD, 0xA4, 0xBD, 0xA5, 0xE7, 0x40, 0xE6, 0xFE, /* 0x9C-0x9F */
+	0xBD, 0xA6, 0x00, 0x00, 0xE7, 0x42, 0xE6, 0xFD, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xE9, 0xEA, 0xF3, /* 0xA4-0xA7 */
+	0xBF, 0xB1, 0xBF, 0xB0, 0x00, 0x00, 0xEA, 0xED, /* 0xA8-0xAB */
+	0xEA, 0xEF, 0x00, 0x00, 0xEA, 0xEA, 0x00, 0x00, /* 0xAC-0xAF */
+	0xEA, 0xEE, 0xEA, 0xE8, 0xEA, 0xF1, 0xBF, 0xAF, /* 0xB0-0xB3 */
+	0xEA, 0xF0, 0xEA, 0xEC, 0x00, 0x00, 0xEA, 0xF2, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xEA, 0xEB, 0xC1, 0x74, 0xED, 0xE8, /* 0xB8-0xBB */
+	0xED, 0xEE, 0xC1, 0x78, 0xC1, 0x7A, 0xC1, 0x77, /* 0xBC-0xBF */
+	0xC1, 0x76, 0x00, 0x00, 0xC1, 0x75, 0xC1, 0x73, /* 0xC0-0xC3 */
+	0xED, 0xE9, 0xED, 0xEC, 0xC1, 0x72, 0xED, 0xED, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xC1, 0x79, 0xED, 0xEB, 0x00, 0x00, /* 0xC8-0xCB */
+	0xED, 0xEA, 0xC2, 0xC0, 0x00, 0x00, 0xC2, 0xC1, /* 0xCC-0xCF */
+	0xF0, 0xA1, 0xF0, 0x7D, 0xF0, 0x7E, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xF2, 0xC2, 0x00, 0x00, 0xF2, 0xC1, /* 0xD4-0xD7 */
+	0xC3, 0xBE, 0xF4, 0xB4, 0xC4, 0xA4, 0xF4, 0xB3, /* 0xD8-0xDB */
+	0x00, 0x00, 0xF5, 0xF0, 0xF7, 0x45, 0xC5, 0xA6, /* 0xDC-0xDF */
+	0xF9, 0x43, 0xF9, 0x44, 0xC5, 0xD8, 0xA6, 0xDA, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xAA, 0xD7, 0xDB, 0x52, 0xBB, 0x4E, /* 0xE4-0xE7 */
+	0xC1, 0x7B, 0xED, 0xEF, 0xA6, 0xDB, 0x00, 0x00, /* 0xE8-0xEB */
+	0xAF, 0xE5, 0xAF, 0xE4, 0xDB, 0x53, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xEA, 0xF4, 0xA6, 0xDC, /* 0xF0-0xF3 */
+	0xAD, 0x50, 0x00, 0x00, 0x00, 0x00, 0xDB, 0x54, /* 0xF4-0xF7 */
+	0xDB, 0x55, 0xDB, 0x56, 0xBB, 0x4F, 0xBF, 0xB2, /* 0xF8-0xFB */
+	0xA6, 0xDD, 0x00, 0x00, 0xAA, 0xD8, 0xD0, 0x68, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_82[512] = {
+	0xAF, 0xE6, 0xD3, 0x70, 0xB2, 0xEA, 0x00, 0x00, /* 0x00-0x03 */
+	0xDB, 0x57, 0xB8, 0xA4, 0x00, 0x00, 0xBB, 0x50, /* 0x04-0x07 */
+	0xBF, 0xB3, 0xC1, 0x7C, 0xC2, 0xC2, 0xF4, 0xB5, /* 0x08-0x0B */
+	0xA6, 0xDE, 0xAA, 0xD9, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xAF, 0xE7, 0xD7, 0x52, 0xB5, 0xCE, 0x00, 0x00, /* 0x10-0x13 */
+	0xBB, 0x51, 0xE3, 0xAB, 0xE7, 0x45, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA6, 0xDF, /* 0x18-0x1B */
+	0xB5, 0xCF, 0xDF, 0xA3, 0xBB, 0x52, 0xA6, 0xE0, /* 0x1C-0x1F */
+	0xCD, 0xB1, 0xD0, 0x69, 0xAD, 0x51, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xD3, 0x72, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xAF, 0xEA, 0x00, 0x00, 0xAF, 0xE8, 0xAF, 0xE9, /* 0x28-0x2B */
+	0xAF, 0xEB, 0x00, 0x00, 0x00, 0x00, 0xD3, 0x71, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0x57, 0xD7, 0x54, /* 0x30-0x33 */
+	0xD7, 0x56, 0xB2, 0xEB, 0xB2, 0xED, 0xB2, 0xEC, /* 0x34-0x37 */
+	0xD7, 0x53, 0xB2, 0xEE, 0xD7, 0x55, 0x00, 0x00, /* 0x38-0x3B */
+	0xDB, 0x58, 0xDB, 0x59, 0x00, 0x00, 0xDB, 0x5A, /* 0x3C-0x3F */
+	0xDF, 0xA6, 0x00, 0x00, 0xDF, 0xA7, 0x00, 0x00, /* 0x40-0x43 */
+	0xDF, 0xA5, 0xDF, 0xA8, 0x00, 0x00, 0xB8, 0xA5, /* 0x44-0x47 */
+	0x00, 0x00, 0xDF, 0xA4, 0x00, 0x00, 0xBB, 0x53, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0x4A, 0xE7, 0x46, /* 0x4C-0x4F */
+	0xE7, 0x49, 0xE7, 0x4B, 0xE7, 0x48, 0xE7, 0x47, /* 0x50-0x53 */
+	0x00, 0x00, 0xEA, 0xF5, 0xEA, 0xF6, 0xEA, 0xF7, /* 0x54-0x57 */
+	0xBF, 0xB4, 0xBF, 0xB5, 0xED, 0xF1, 0xED, 0xF0, /* 0x58-0x5B */
+	0xED, 0xF2, 0x00, 0x00, 0xF0, 0xA3, 0xF0, 0xA2, /* 0x5C-0x5F */
+	0x00, 0x00, 0xF2, 0xC4, 0x00, 0x00, 0xF2, 0xC5, /* 0x60-0x63 */
+	0xF2, 0xC3, 0x00, 0x00, 0xC4, 0xA5, 0x00, 0x00, /* 0x64-0x67 */
+	0xF4, 0xB6, 0xF4, 0xB7, 0x00, 0x00, 0xF7, 0x46, /* 0x68-0x6B */
+	0xF7, 0xEF, 0xF8, 0xBB, 0xA6, 0xE1, 0xA8, 0x7D, /* 0x6C-0x6F */
+	0x00, 0x00, 0xC1, 0x7D, 0xA6, 0xE2, 0x00, 0x00, /* 0x70-0x73 */
+	0xD7, 0x58, 0xDB, 0x5B, 0x00, 0x00, 0xC6, 0x41, /* 0x74-0x77 */
+	0xCA, 0x4A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xCA, 0x4B, 0xCA, 0x4D, 0xA6, 0xE3, 0xCA, 0x4E, /* 0x7C-0x7F */
+	
+	0xCA, 0x4C, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xA2, /* 0x80-0x83 */
+	0xCB, 0xA3, 0xCB, 0x7B, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xA1, 0xA8, 0xA1, /* 0x88-0x8B */
+	0x00, 0x00, 0xA8, 0xA2, 0xCB, 0x7C, 0xCB, 0x7A, /* 0x8C-0x8F */
+	0xCB, 0x79, 0xCB, 0x7D, 0xA8, 0x7E, 0xCB, 0x7E, /* 0x90-0x93 */
+	0xD0, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xCD, 0xB6, 0xAA, 0xDC, 0xCD, 0xB5, 0xCD, 0xB7, /* 0x98-0x9B */
+	0x00, 0x00, 0xAA, 0xDB, 0xCD, 0xBC, 0xAA, 0xDF, /* 0x9C-0x9F */
+	0xCD, 0xB2, 0xCD, 0xC0, 0xCD, 0xC6, 0xAA, 0xE6, /* 0xA0-0xA3 */
+	0xCD, 0xC3, 0xAA, 0xE3, 0x00, 0x00, 0xCD, 0xB9, /* 0xA4-0xA7 */
+	0xCD, 0xBF, 0xCD, 0xC1, 0x00, 0x00, 0xCD, 0xB4, /* 0xA8-0xAB */
+	0xAA, 0xE2, 0xAA, 0xDD, 0xCD, 0xBA, 0xAA, 0xE4, /* 0xAC-0xAF */
+	0xAA, 0xE7, 0xAA, 0xE1, 0x00, 0x00, 0xAA, 0xDA, /* 0xB0-0xB3 */
+	0xCD, 0xBE, 0xCD, 0xB8, 0xCD, 0xC5, 0xAA, 0xE9, /* 0xB4-0xB7 */
+	0xAA, 0xE5, 0xAA, 0xE0, 0xCD, 0xBD, 0xAF, 0xEC, /* 0xB8-0xBB */
+	0xCD, 0xBB, 0xAA, 0xDE, 0xAA, 0xE8, 0x00, 0x00, /* 0xBC-0xBF */
+	0xCD, 0xB3, 0x00, 0x00, 0xCD, 0xC2, 0xCD, 0xC4, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xAD, 0x62, 0xAD, 0x5C, 0xAD, 0x64, /* 0xD0-0xD3 */
+	0xAD, 0x61, 0xD0, 0x71, 0xD0, 0x74, 0xAD, 0x5D, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xD0, 0x6B, 0x00, 0x00, 0xAD, 0x56, /* 0xD8-0xDB */
+	0xAD, 0x60, 0x00, 0x00, 0xAD, 0x63, 0xAD, 0x65, /* 0xDC-0xDF */
+	0xD0, 0xA2, 0xD0, 0x77, 0x00, 0x00, 0xAD, 0x55, /* 0xE0-0xE3 */
+	0xD0, 0xA1, 0xAD, 0x59, 0xAD, 0x57, 0xAD, 0x52, /* 0xE4-0xE7 */
+	0xD0, 0x6F, 0x00, 0x00, 0xD0, 0x7E, 0xD0, 0x73, /* 0xE8-0xEB */
+	0xD0, 0x76, 0xD0, 0xA5, 0x00, 0x00, 0xAD, 0x66, /* 0xEC-0xEF */
+	0xD0, 0x7D, 0xAD, 0x5E, 0xD0, 0x78, 0xD0, 0xA4, /* 0xF0-0xF3 */
+	0xD0, 0x75, 0xD0, 0x79, 0xD0, 0x7C, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xD0, 0x6D, 0xD0, 0xA3, 0xD0, 0x7B, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0x6C, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_83[512] = {
+	0xD0, 0x70, 0xAD, 0x5F, 0xAD, 0x5A, 0xAD, 0x53, /* 0x00-0x03 */
+	0xAD, 0x58, 0xAD, 0x54, 0xAD, 0x67, 0xD0, 0x6E, /* 0x04-0x07 */
+	0xD3, 0xA5, 0xAD, 0x5B, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0xD0, 0x7A, 0xCE, 0x41, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xD3, 0xA8, 0xAF, 0xFA, /* 0x14-0x17 */
+	0x00, 0x00, 0xD3, 0x76, 0x00, 0x00, 0xD3, 0xA3, /* 0x18-0x1B */
+	0xD3, 0x7D, 0x00, 0x00, 0xD3, 0xB2, 0x00, 0x00, /* 0x1C-0x1F */
+	0xD3, 0xAA, 0x00, 0x00, 0xD3, 0x7E, 0x00, 0x00, /* 0x20-0x23 */
+	0xD3, 0xA9, 0xD3, 0x78, 0xD3, 0x7C, 0xD3, 0xB5, /* 0x24-0x27 */
+	0xAF, 0xFD, 0xD3, 0xAD, 0xD3, 0xA4, 0xAF, 0xED, /* 0x28-0x2B */
+	0xD3, 0xB3, 0xD3, 0x74, 0x00, 0x00, 0xD3, 0xAC, /* 0x2C-0x2F */
+	0x00, 0x00, 0xAF, 0xFC, 0xAF, 0xF7, 0xD3, 0x73, /* 0x30-0x33 */
+	0xAF, 0xF5, 0xAF, 0xF4, 0xAF, 0xF9, 0xD3, 0xAB, /* 0x34-0x37 */
+	0xAF, 0xF1, 0xAF, 0xF8, 0xD0, 0x72, 0xDB, 0x5C, /* 0x38-0x3B */
+	0xD3, 0xA6, 0x00, 0x00, 0x00, 0x00, 0xD3, 0x7A, /* 0x3C-0x3F */
+	0xAF, 0xFB, 0xD3, 0x7B, 0xD3, 0xA1, 0xAF, 0xFE, /* 0x40-0x43 */
+	0xD3, 0x75, 0xD3, 0xAF, 0x00, 0x00, 0xD3, 0xAE, /* 0x44-0x47 */
+	0xD3, 0xB6, 0xAF, 0xF3, 0xAF, 0xF0, 0xD3, 0xB4, /* 0x48-0x4B */
+	0xD3, 0xB0, 0xD3, 0xA7, 0xD3, 0xA2, 0xAF, 0xF6, /* 0x4C-0x4F */
+	0xAF, 0xF2, 0xD3, 0x77, 0xAF, 0xEE, 0xD3, 0xB1, /* 0x50-0x53 */
+	0xAF, 0xEF, 0x00, 0x00, 0xD3, 0x79, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0x5E, /* 0x70-0x73 */
+	0xD7, 0x60, 0xD7, 0x65, 0xD7, 0x79, 0xB2, 0xFC, /* 0x74-0x77 */
+	0xB2, 0xF2, 0x00, 0x00, 0xD7, 0x5D, 0xB2, 0xFD, /* 0x78-0x7B */
+	0xB2, 0xFE, 0xD7, 0x68, 0xD7, 0x6F, 0xD7, 0x75, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xD7, 0x62, 0x00, 0x00, 0xD7, 0x69, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0x40, 0xD7, 0x77, /* 0x84-0x87 */
+	0xD7, 0x72, 0xB2, 0xFA, 0xB2, 0xF8, 0xD7, 0x6E, /* 0x88-0x8B */
+	0xD7, 0x6A, 0xD7, 0x5C, 0xB2, 0xEF, 0xD7, 0x61, /* 0x8C-0x8F */
+	0xD7, 0x59, 0x00, 0x00, 0xB2, 0xF7, 0xB2, 0xF9, /* 0x90-0x93 */
+	0xD7, 0x66, 0xD7, 0x63, 0xB2, 0xF4, 0xD7, 0x73, /* 0x94-0x97 */
+	0xB2, 0xF1, 0xD7, 0x64, 0xD7, 0x7A, 0xD7, 0x6C, /* 0x98-0x9B */
+	0x00, 0x00, 0xD7, 0x6B, 0xB2, 0xF0, 0x00, 0x00, /* 0x9C-0x9F */
+	0xB2, 0xFB, 0x00, 0x00, 0xB2, 0xF3, 0xD7, 0x5A, /* 0xA0-0xA3 */
+	0xD7, 0x5F, 0xD7, 0x70, 0xD7, 0x76, 0xB3, 0x41, /* 0xA4-0xA7 */
+	0xD7, 0x5B, 0xD7, 0x67, 0xD7, 0x6D, 0xB2, 0xF6, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0x78, 0xD7, 0x71, /* 0xAC-0xAF */
+	0xD7, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xB2, 0xF5, 0x00, 0x00, 0xDB, 0x6C, /* 0xBC-0xBF */
+	0xDB, 0x60, 0xB5, 0xD7, 0xDB, 0x7D, 0xDB, 0xA7, /* 0xC0-0xC3 */
+	0xDB, 0xAA, 0xB5, 0xD5, 0xDB, 0x68, 0xDB, 0xA3, /* 0xC4-0xC7 */
+	0xDB, 0x69, 0xDB, 0x77, 0xB5, 0xE2, 0xDB, 0x73, /* 0xC8-0xCB */
+	0xB5, 0xDF, 0x00, 0x00, 0xDB, 0x74, 0xDB, 0x5D, /* 0xCC-0xCF */
+	0x00, 0x00, 0xDB, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xB5, 0xE8, 0xDB, 0xA1, 0xDB, 0x75, 0xDB, 0xAC, /* 0xD4-0xD7 */
+	0xDB, 0x70, 0xDF, 0xC8, 0x00, 0x00, 0xDB, 0xAF, /* 0xD8-0xDB */
+	0xB5, 0xE6, 0xDB, 0x6E, 0xDB, 0x7A, 0xB5, 0xE9, /* 0xDC-0xDF */
+	0xB5, 0xD4, 0xDB, 0x72, 0xDB, 0xAD, 0xDB, 0x6B, /* 0xE0-0xE3 */
+	0xDB, 0x64, 0xDB, 0x6F, 0x00, 0x00, 0xDB, 0x63, /* 0xE4-0xE7 */
+	0xDB, 0x61, 0xB5, 0xD0, 0xDB, 0xA5, 0xDB, 0x6A, /* 0xE8-0xEB */
+	0xDB, 0xA8, 0x00, 0x00, 0xDB, 0xA9, 0xB5, 0xD8, /* 0xEC-0xEF */
+	0xB5, 0xDD, 0xB5, 0xD9, 0xB5, 0xE1, 0xDB, 0x7E, /* 0xF0-0xF3 */
+	0xB5, 0xDA, 0xDB, 0x76, 0xDB, 0x66, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xB5, 0xD2, 0xDB, 0x5E, 0xDB, 0xA2, 0xDB, 0xAB, /* 0xF8-0xFB */
+	0xDB, 0x65, 0xB5, 0xE0, 0xDB, 0xB0, 0xDB, 0x71, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_84[512] = {
+	0x00, 0x00, 0xDB, 0x6D, 0x00, 0x00, 0xB5, 0xD1, /* 0x00-0x03 */
+	0xB5, 0xE5, 0x00, 0x00, 0xDB, 0x7C, 0xB5, 0xE7, /* 0x04-0x07 */
+	0x00, 0x00, 0xDB, 0x78, 0xB5, 0xDC, 0xB5, 0xD6, /* 0x08-0x0B */
+	0xB5, 0xDE, 0xB5, 0xD3, 0xB5, 0xE4, 0xDB, 0x79, /* 0x0C-0x0F */
+	0xDB, 0x67, 0xDB, 0x7B, 0xDB, 0x62, 0xDB, 0xA6, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0xAE, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB, 0x5F, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xDF, 0xC7, 0x00, 0x00, 0xDF, 0xDD, /* 0x28-0x2B */
+	0xB8, 0x55, 0xDF, 0xCC, 0x00, 0x00, 0xDF, 0xCA, /* 0x2C-0x2F */
+	0xDF, 0xB5, 0xB8, 0xA9, 0xDF, 0xC5, 0xDF, 0xD9, /* 0x30-0x33 */
+	0xDF, 0xC1, 0xB8, 0xB1, 0xDF, 0xD8, 0xDF, 0xBF, /* 0x34-0x37 */
+	0xB5, 0xE3, 0xDF, 0xCF, 0xDF, 0xC0, 0xDF, 0xD6, /* 0x38-0x3B */
+	0xB8, 0xB0, 0xB8, 0xA8, 0x00, 0x00, 0xDF, 0xAA, /* 0x3C-0x3F */
+	0xDF, 0xB2, 0x00, 0x00, 0xDF, 0xCB, 0xDF, 0xC3, /* 0x40-0x43 */
+	0xDF, 0xDC, 0xDF, 0xC6, 0xB8, 0xB6, 0xDF, 0xD7, /* 0x44-0x47 */
+	0x00, 0x00, 0xB8, 0xAD, 0x00, 0x00, 0xDF, 0xC9, /* 0x48-0x4B */
+	0xDF, 0xD1, 0xDF, 0xB6, 0xDF, 0xD0, 0x00, 0x00, /* 0x4C-0x4F */
+	0xDF, 0xE1, 0xDF, 0xB1, 0xDF, 0xD2, 0x00, 0x00, /* 0x50-0x53 */
+	0xDF, 0xDF, 0x00, 0x00, 0xDF, 0xAB, 0xB5, 0xDB, /* 0x54-0x57 */
+	0x00, 0x00, 0xDF, 0xB9, 0xDF, 0xB8, 0xB8, 0xAF, /* 0x58-0x5B */
+	0x00, 0x00, 0xDF, 0xBC, 0xDF, 0xBE, 0xDF, 0xCD, /* 0x5C-0x5F */
+	0xDF, 0xDE, 0xB8, 0xB2, 0x00, 0x00, 0xB8, 0xB3, /* 0x60-0x63 */
+	0x00, 0x00, 0xDF, 0xB0, 0xB8, 0xAB, 0xDF, 0xB4, /* 0x64-0x67 */
+	0xDF, 0xDA, 0xB8, 0xB4, 0x00, 0x00, 0xB8, 0xAC, /* 0x68-0x6B */
+	0xB8, 0xAE, 0xB8, 0xB5, 0xDF, 0xE0, 0xDF, 0xD3, /* 0x6C-0x6F */
+	0xDF, 0xCE, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xBB, /* 0x70-0x73 */
+	0xDF, 0xBA, 0xB8, 0xAA, 0xDF, 0xAC, 0xB8, 0xA7, /* 0x74-0x77 */
+	0xDF, 0xC4, 0xDF, 0xAD, 0xDF, 0xC2, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0xDF, 0xB7, 0xDF, 0xDB, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0xA6, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xDF, 0xB3, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xDF, 0xAF, 0xDF, 0xD5, 0xDF, 0xAE, /* 0x8C-0x8F */
+	0xBB, 0x60, 0xE3, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xE3, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xAC, /* 0x94-0x97 */
+	0xE3, 0xCA, 0xBB, 0x58, 0xE3, 0xBB, 0xE3, 0xC5, /* 0x98-0x9B */
+	0xBB, 0x5B, 0xE3, 0xBE, 0xBB, 0x59, 0xE3, 0xAF, /* 0x9C-0x9F */
+	0xE3, 0xCD, 0xE3, 0xAE, 0xE3, 0xC1, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE3, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xBF, /* 0xA4-0xA7 */
+	0xE3, 0xC8, 0xE3, 0xC6, 0xE3, 0xBA, 0xE3, 0xB5, /* 0xA8-0xAB */
+	0xE3, 0xB3, 0x00, 0x00, 0xE3, 0xB4, 0xE3, 0xC7, /* 0xAC-0xAF */
+	0xE3, 0xD2, 0xE3, 0xBC, 0xBB, 0x5A, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xE3, 0xB7, 0x00, 0x00, 0xE3, 0xCB, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xBB, 0x5D, 0xE3, 0xB6, 0xE3, 0xB0, 0xE3, 0xC0, /* 0xB8-0xBB */
+	0xBB, 0x61, 0x00, 0x00, 0x00, 0x00, 0xBB, 0x55, /* 0xBC-0xBF */
+	0xBB, 0x5E, 0xE3, 0xB8, 0xE3, 0xB2, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xBB, 0x57, 0xDF, 0xD4, 0xBB, 0x56, 0xE3, 0xC3, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xBB, 0x54, 0xBB, 0x63, 0xBB, 0x5C, /* 0xC8-0xCB */
+	0xE3, 0xC4, 0xE3, 0xB9, 0xE3, 0xB1, 0xE3, 0xCC, /* 0xCC-0xCF */
+	0xE3, 0xBD, 0xBB, 0x62, 0xE3, 0xD0, 0xBB, 0x5F, /* 0xD0-0xD3 */
+	0xE3, 0xCF, 0x00, 0x00, 0xE3, 0xC9, 0xE3, 0xCE, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xD1, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x73, /* 0xE4-0xE7 */
+	0xE7, 0x74, 0xE7, 0x67, 0xE7, 0x66, 0xE7, 0x62, /* 0xE8-0xEB */
+	0xBD, 0xB4, 0x00, 0x00, 0xBD, 0xAC, 0xE7, 0x76, /* 0xEC-0xEF */
+	0xE7, 0x75, 0xDF, 0xA9, 0xE7, 0x5F, 0xE7, 0x63, /* 0xF0-0xF3 */
+	0xE7, 0x5D, 0x00, 0x00, 0xE7, 0x70, 0xE7, 0x61, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE7, 0x77, 0xE7, 0x5A, 0xE7, 0x58, /* 0xF8-0xFB */
+	0xE7, 0x64, 0xE7, 0x6E, 0xE7, 0x69, 0xBD, 0xB6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_85[512] = {
+	0xE7, 0x4F, 0x00, 0x00, 0xE7, 0x6D, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xBD, 0xB7, 0xDF, 0xBD, /* 0x04-0x07 */
+	0xE7, 0x5B, 0xE7, 0x52, 0xE7, 0x55, 0xE7, 0x7B, /* 0x08-0x0B */
+	0xE7, 0x5C, 0xE7, 0x53, 0xE7, 0x51, 0xE7, 0x4E, /* 0x0C-0x0F */
+	0x00, 0x00, 0xBD, 0xB0, 0xE7, 0x65, 0xBD, 0xAF, /* 0x10-0x13 */
+	0xBD, 0xB3, 0xE7, 0x60, 0xE7, 0x68, 0xBD, 0xA9, /* 0x14-0x17 */
+	0xE7, 0x78, 0xE7, 0x7C, 0xBD, 0xAB, 0x00, 0x00, /* 0x18-0x1B */
+	0xE7, 0x57, 0xE7, 0x6B, 0xE7, 0x6F, 0xE7, 0x54, /* 0x1C-0x1F */
+	0xE7, 0x79, 0xBD, 0xB2, 0x00, 0x00, 0xBD, 0xB1, /* 0x20-0x23 */
+	0xE7, 0x4C, 0xBD, 0xB5, 0xE7, 0x72, 0xE7, 0x56, /* 0x24-0x27 */
+	0xE7, 0x6A, 0xE7, 0x50, 0xE7, 0x5E, 0xE7, 0x59, /* 0x28-0x2B */
+	0xBD, 0xAD, 0xBD, 0xAE, 0xE7, 0x6C, 0xE7, 0x7D, /* 0x2C-0x2F */
+	0xE7, 0x7A, 0xE7, 0x71, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0x4D, /* 0x38-0x3B */
+	0x00, 0x00, 0xBD, 0xAA, 0xEB, 0x49, 0x00, 0x00, /* 0x3C-0x3F */
+	0xEB, 0x40, 0xEB, 0x43, 0x00, 0x00, 0xBF, 0xBB, /* 0x40-0x43 */
+	0xEB, 0x45, 0xEA, 0xF9, 0xEB, 0x41, 0xEB, 0x47, /* 0x44-0x47 */
+	0xBF, 0xB8, 0xBF, 0xBC, 0xBF, 0xB6, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0xEA, 0xFB, 0xEB, 0x4C, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0xEB, 0x46, 0x00, 0x00, 0xEA, 0xFC, /* 0x50-0x53 */
+	0xEB, 0x55, 0xEB, 0x4F, 0xEA, 0xF8, 0xEE, 0x46, /* 0x54-0x57 */
+	0xEA, 0xFE, 0xBF, 0xB7, 0x00, 0x00, 0xEB, 0x4A, /* 0x58-0x5B */
+	0x00, 0x00, 0xEB, 0x54, 0xBF, 0xBF, 0x00, 0x00, /* 0x5C-0x5F */
+	0xEB, 0x51, 0xEA, 0xFD, 0xEB, 0x44, 0xEB, 0x48, /* 0x60-0x63 */
+	0xEB, 0x42, 0xEB, 0x56, 0xEB, 0x53, 0xEB, 0x50, /* 0x64-0x67 */
+	0xBF, 0xB9, 0xBF, 0xBA, 0xBF, 0xBE, 0xEA, 0xFA, /* 0x68-0x6B */
+	0xEB, 0x57, 0xBF, 0xBD, 0xEB, 0x4D, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xEB, 0x4B, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xEB, 0x4E, 0xEE, 0x53, 0xEE, 0x40, /* 0x74-0x77 */
+	0xEE, 0x45, 0xEE, 0x52, 0xEE, 0x44, 0xED, 0xFB, /* 0x78-0x7B */
+	0xEE, 0x41, 0x00, 0x00, 0xC1, 0xA2, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xED, 0xF4, 0xEE, 0x4D, 0xEE, 0x4F, 0xED, 0xF3, /* 0x80-0x83 */
+	0xC1, 0xA1, 0xEE, 0x51, 0xEE, 0x49, 0xC1, 0xA8, /* 0x84-0x87 */
+	0xEE, 0x50, 0xEE, 0x42, 0xC1, 0xAA, 0xED, 0xF9, /* 0x88-0x8B */
+	0xEB, 0x52, 0xEE, 0x4A, 0xEE, 0x47, 0xED, 0xF5, /* 0x8C-0x8F */
+	0xEE, 0x55, 0xC1, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xC1, 0xA5, 0xED, 0xF7, 0xEE, 0x48, 0x00, 0x00, /* 0x94-0x97 */
+	0xEE, 0x54, 0xEE, 0x4B, 0xED, 0xFD, 0xC1, 0xA7, /* 0x98-0x9B */
+	0xC1, 0xA3, 0xEE, 0x4C, 0xED, 0xFE, 0xEE, 0x56, /* 0x9C-0x9F */
+	0xED, 0xF8, 0xEE, 0x43, 0xEE, 0x4E, 0xED, 0xFA, /* 0xA0-0xA3 */
+	0xED, 0xFC, 0x00, 0x00, 0xC2, 0xCB, 0xED, 0xF6, /* 0xA4-0xA7 */
+	0xC1, 0xA9, 0xC2, 0xC4, 0xC1, 0x7E, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC1, 0xA6, /* 0xAC-0xAF */
+	0xC2, 0xC8, 0xF0, 0xB3, 0x00, 0x00, 0xF0, 0xA9, /* 0xB0-0xB3 */
+	0xF0, 0xA4, 0xF0, 0xAA, 0xF0, 0xB4, 0xF0, 0xB8, /* 0xB4-0xB7 */
+	0xF0, 0xB7, 0xC2, 0xCA, 0xC2, 0xC9, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xF0, 0xAB, 0xF0, 0xB9, 0xF0, 0xAE, /* 0xBC-0xBF */
+	0xF0, 0xA6, 0x00, 0x00, 0xF0, 0xA8, 0xF0, 0xA7, /* 0xC0-0xC3 */
+	0xF0, 0xAD, 0xF0, 0xB2, 0xF0, 0xA5, 0xF0, 0xAC, /* 0xC4-0xC7 */
+	0xF0, 0xB1, 0xC2, 0xC7, 0x00, 0x00, 0xF0, 0xAF, /* 0xC8-0xCB */
+	0x00, 0x00, 0xC2, 0xC5, 0xF0, 0xB0, 0xC2, 0xC3, /* 0xCC-0xCF */
+	0xC2, 0xC6, 0xF2, 0xD5, 0xF0, 0xB5, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xC3, 0xC2, 0x00, 0x00, 0xF2, 0xCD, /* 0xD4-0xD7 */
+	0xF2, 0xD1, 0xF2, 0xC9, 0xF2, 0xCC, 0x00, 0x00, /* 0xD8-0xDB */
+	0xF2, 0xD4, 0xC3, 0xC0, 0xF2, 0xD9, 0xF2, 0xD2, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF2, 0xCA, 0xF2, 0xDA, 0xF2, 0xD3, /* 0xE0-0xE3 */
+	0xC3, 0xC3, 0xC3, 0xC4, 0xF2, 0xD7, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF2, 0xCB, 0xC3, 0xBF, 0xC3, 0xC1, 0xF2, 0xC6, /* 0xE8-0xEB */
+	0xF2, 0xCE, 0xF2, 0xC8, 0x00, 0x00, 0xF2, 0xD8, /* 0xEC-0xEF */
+	0xF2, 0xD6, 0xF2, 0xC7, 0xF2, 0xCF, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xF4, 0xBE, 0xC3, 0xC5, /* 0xF4-0xF7 */
+	0xF2, 0xD0, 0xC4, 0xA7, 0xC4, 0xA9, 0xC4, 0xA6, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF4, 0xC3, 0xF4, 0xBB, 0xF4, 0xB9, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_86[512] = {
+	0xF4, 0xBD, 0xF4, 0xBA, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0xF4, 0xBF, 0xF4, 0xC1, 0xC4, 0xAA, 0xC4, 0xAC, /* 0x04-0x07 */
+	0x00, 0x00, 0xF4, 0xC0, 0xC4, 0xAD, 0xC4, 0xAB, /* 0x08-0x0B */
+	0xF4, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xC4, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC4, 0xF4, /* 0x14-0x17 */
+	0xF5, 0xF1, 0xF5, 0xF7, 0xC4, 0xF6, 0xF4, 0xBC, /* 0x18-0x1B */
+	0xF5, 0xF6, 0x00, 0x00, 0xF5, 0xFD, 0xF5, 0xF4, /* 0x1C-0x1F */
+	0xF5, 0xFB, 0xF5, 0xFA, 0xF4, 0xB8, 0xF5, 0xF5, /* 0x20-0x23 */
+	0xF0, 0xB6, 0xF5, 0xFE, 0xF5, 0xF3, 0xF5, 0xF8, /* 0x24-0x27 */
+	0x00, 0x00, 0xF5, 0xFC, 0xF5, 0xF2, 0x00, 0x00, /* 0x28-0x2B */
+	0xF7, 0x4A, 0xC4, 0xF5, 0xF5, 0xF9, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xF7, 0xF4, 0xF7, 0x4B, 0xF7, 0x49, /* 0x30-0x33 */
+	0xF7, 0x47, 0xF7, 0x48, 0xF7, 0x4C, 0x00, 0x00, /* 0x34-0x37 */
+	0xC5, 0xD9, 0xF7, 0xF2, 0xF7, 0xF0, 0xF7, 0xF5, /* 0x38-0x3B */
+	0xF7, 0xF3, 0x00, 0x00, 0xF7, 0xF6, 0xC5, 0xDA, /* 0x3C-0x3F */
+	0xF7, 0xF1, 0x00, 0x00, 0x00, 0x00, 0xF8, 0xBC, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0x45, 0xF9, 0x46, /* 0x44-0x47 */
+	0xF9, 0x47, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xC7, /* 0x48-0x4B */
+	0xF9, 0xBD, 0xCA, 0x4F, 0xAA, 0xEA, 0x00, 0x00, /* 0x4C-0x4F */
+	0xAD, 0x68, 0x00, 0x00, 0xD3, 0xB8, 0xD3, 0xB7, /* 0x50-0x53 */
+	0xB0, 0x40, 0xB3, 0x42, 0xD7, 0x7C, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0xD7, 0x7B, 0x00, 0x00, 0xB5, 0xEA, /* 0x58-0x5B */
+	0xB8, 0xB8, 0x00, 0x00, 0xB8, 0xB7, 0xB8, 0xB9, /* 0x5C-0x5F */
+	0x00, 0x00, 0xE3, 0xD4, 0xE7, 0x7E, 0xEB, 0x58, /* 0x60-0x63 */
+	0xEB, 0x5A, 0xEB, 0x59, 0x00, 0x00, 0xC1, 0xAB, /* 0x64-0x67 */
+	0xEE, 0x57, 0xF0, 0xBA, 0xF9, 0xA5, 0xA6, 0xE4, /* 0x68-0x6B */
+	0x00, 0x00, 0xCD, 0xC9, 0xCD, 0xCA, 0xCD, 0xC8, /* 0x6C-0x6F */
+	0xCD, 0xC7, 0xAA, 0xEB, 0x00, 0x00, 0xD0, 0xA9, /* 0x70-0x73 */
+	0xD0, 0xA7, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xA6, /* 0x74-0x77 */
+	0x00, 0x00, 0xAD, 0x69, 0xAD, 0x6B, 0xAD, 0x6A, /* 0x78-0x7B */
+	0xD0, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xD3, 0xC4, 0xD3, 0xC1, 0xD3, 0xBF, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0x41, 0xD3, 0xC2, /* 0x88-0x8B */
+	0xB0, 0x46, 0xD3, 0xBC, 0xD3, 0xCB, 0x00, 0x00, /* 0x8C-0x8F */
+	0xD3, 0xCD, 0xD3, 0xBD, 0x00, 0x00, 0xB0, 0x43, /* 0x90-0x93 */
+	0xD3, 0xCE, 0xD3, 0xC9, 0xD3, 0xBB, 0xD3, 0xC0, /* 0x94-0x97 */
+	0xD3, 0xCA, 0xD3, 0xC6, 0xD3, 0xC3, 0x00, 0x00, /* 0x98-0x9B */
+	0xB0, 0x48, 0xD3, 0xCC, 0xD3, 0xBE, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xD3, 0xC7, 0xD3, 0xB9, 0xB0, 0x47, /* 0xA0-0xA3 */
+	0xB0, 0x44, 0xD3, 0xC5, 0x00, 0x00, 0xD3, 0xC8, /* 0xA4-0xA7 */
+	0xD3, 0xBA, 0xB0, 0x45, 0xB0, 0x42, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0x4C, /* 0xAC-0xAF */
+	0xD7, 0xA5, 0xB3, 0x4B, 0x00, 0x00, 0xD7, 0xA8, /* 0xB0-0xB3 */
+	0xD7, 0xAB, 0xB3, 0x48, 0xB3, 0x46, 0xD7, 0x7E, /* 0xB4-0xB7 */
+	0xD7, 0xA9, 0xD7, 0xA7, 0xD7, 0xA4, 0xD7, 0xAC, /* 0xB8-0xBB */
+	0xD7, 0xAD, 0xD7, 0xAF, 0xD7, 0xB0, 0xD7, 0x7D, /* 0xBC-0xBF */
+	0xB3, 0x45, 0xD7, 0xA2, 0xD7, 0xA1, 0xD7, 0xAE, /* 0xC0-0xC3 */
+	0xB3, 0x47, 0xD7, 0xA3, 0xB3, 0x49, 0xB3, 0x44, /* 0xC4-0xC7 */
+	0xD7, 0xA6, 0xB3, 0x4D, 0x00, 0x00, 0xB3, 0x4A, /* 0xC8-0xCB */
+	0xD7, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xB5, 0xF1, 0xDB, 0xBF, 0x00, 0x00, 0xDB, 0xB4, /* 0xD0-0xD3 */
+	0xB5, 0xEE, 0x00, 0x00, 0xDF, 0xE7, 0xDB, 0xBD, /* 0xD4-0xD7 */
+	0xDB, 0xB1, 0xB5, 0xEC, 0xDB, 0xB6, 0xB5, 0xEF, /* 0xD8-0xDB */
+	0xDB, 0xBA, 0xDB, 0xB8, 0xB5, 0xF2, 0xB5, 0xEB, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xB2, 0xDB, 0xB5, /* 0xE0-0xE3 */
+	0xB5, 0xF0, 0x00, 0x00, 0xDB, 0xB3, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xDB, 0xBE, 0xDB, 0xBC, 0xDB, 0xB7, 0xDB, 0xB9, /* 0xE8-0xEB */
+	0xDB, 0xBB, 0xB5, 0xED, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xDF, 0xE8, 0xDF, 0xEE, 0xDF, 0xE4, /* 0xF4-0xF7 */
+	0xDF, 0xEA, 0xB8, 0xBA, 0xDF, 0xE6, 0xB8, 0xC0, /* 0xF8-0xFB */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0xBF, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_87[512] = {
+	0xB8, 0xBE, 0xDF, 0xED, 0xB8, 0xC1, 0xB8, 0xC2, /* 0x00-0x03 */
+	0xDF, 0xE3, 0xDF, 0xF0, 0xB8, 0xC3, 0xB8, 0xBD, /* 0x04-0x07 */
+	0xB8, 0xBC, 0xDF, 0xEC, 0xB8, 0xC4, 0xDF, 0xE2, /* 0x08-0x0B */
+	0xDF, 0xE5, 0xDF, 0xEF, 0xDF, 0xEB, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0xE3, 0xF4, 0xE3, 0xE9, 0xB8, 0xBB, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0xBB, 0x6A, 0xE3, 0xDD, 0xE3, 0xF2, 0xE3, 0xDE, /* 0x18-0x1B */
+	0xBB, 0x65, 0x00, 0x00, 0xE3, 0xDB, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE3, 0xE4, 0xE3, 0xDC, 0xBB, 0x67, 0xE3, 0xD6, /* 0x20-0x23 */
+	0xE3, 0xF1, 0xBB, 0x68, 0xE3, 0xEE, 0xE3, 0xEF, /* 0x24-0x27 */
+	0xE3, 0xD7, 0xBB, 0x6D, 0xE3, 0xE6, 0x00, 0x00, /* 0x28-0x2B */
+	0xE3, 0xE0, 0xE3, 0xE7, 0xE3, 0xDA, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE3, 0xF3, 0xE3, 0xEB, 0xE3, 0xE5, 0xE3, 0xD5, /* 0x30-0x33 */
+	0xBB, 0x69, 0xE3, 0xEC, 0x00, 0x00, 0xBB, 0x6C, /* 0x34-0x37 */
+	0xE3, 0xF0, 0x00, 0x00, 0xE3, 0xEA, 0xBB, 0x66, /* 0x38-0x3B */
+	0xE3, 0xE8, 0x00, 0x00, 0xE3, 0xE2, 0xBB, 0x64, /* 0x3C-0x3F */
+	0xE3, 0xD9, 0xE3, 0xE1, 0xE3, 0xED, 0xE3, 0xDF, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xE3, 0xE3, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0xBD, 0xC1, 0xDF, 0xE9, 0xE7, 0xB2, 0xE7, 0xBB, /* 0x4C-0x4F */
+	0xE7, 0xB1, 0xE7, 0xAD, 0xE7, 0xAA, 0xBD, 0xC2, /* 0x50-0x53 */
+	0xE7, 0xA8, 0xBB, 0x6B, 0xE7, 0xA1, 0xBD, 0xC0, /* 0x54-0x57 */
+	0xE7, 0xA7, 0xBD, 0xBF, 0xE7, 0xAC, 0xE7, 0xA9, /* 0x58-0x5B */
+	0xE7, 0xB9, 0xE7, 0xB4, 0xE7, 0xAE, 0xE7, 0xB3, /* 0x5C-0x5F */
+	0xBD, 0xBB, 0xE7, 0xAB, 0xE7, 0xBE, 0xE7, 0xA2, /* 0x60-0x63 */
+	0xE7, 0xA3, 0xE7, 0xBA, 0xBD, 0xBC, 0xE7, 0xBF, /* 0x64-0x67 */
+	0xBD, 0xBE, 0xE7, 0xC0, 0xE7, 0xB0, 0xE3, 0xD8, /* 0x68-0x6B */
+	0xE7, 0xB6, 0xE7, 0xAF, 0xE7, 0xB8, 0xE7, 0xB5, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE7, 0xA6, /* 0x70-0x73 */
+	0xBD, 0xB9, 0xE7, 0xBD, 0xBD, 0xBA, 0xE7, 0xA4, /* 0x74-0x77 */
+	0xBD, 0xBD, 0xEB, 0x64, 0xE7, 0xB7, 0xE7, 0xBC, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xEB, 0x61, 0xBD, 0xB8, 0xBF, 0xC0, /* 0x80-0x83 */
+	0xEB, 0x6B, 0xEB, 0x67, 0x00, 0x00, 0xEB, 0x65, /* 0x84-0x87 */
+	0xEB, 0x60, 0xEB, 0x6F, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xBF, 0xC4, 0x00, 0x00, 0xEB, 0x5C, /* 0x8C-0x8F */
+	0xEB, 0x68, 0xEB, 0x69, 0xEB, 0x5F, 0xEB, 0x5E, /* 0x90-0x93 */
+	0xEB, 0x6C, 0x00, 0x00, 0xEB, 0x62, 0xEB, 0x5D, /* 0x94-0x97 */
+	0xEB, 0x63, 0x00, 0x00, 0xEB, 0x6E, 0xEB, 0x5B, /* 0x98-0x9B */
+	0xEB, 0x6D, 0xEB, 0x6A, 0xBF, 0xC2, 0xBF, 0xC1, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0xBF, 0xC3, 0xEB, 0x66, /* 0xA0-0xA3 */
+	0xF0, 0xCB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0x59, 0xC1, 0xB1, /* 0xA8-0xAB */
+	0xEE, 0x5D, 0xEE, 0x5A, 0xEE, 0x61, 0xEE, 0x67, /* 0xAC-0xAF */
+	0xEE, 0x5C, 0x00, 0x00, 0xEE, 0x70, 0xC1, 0xAE, /* 0xB0-0xB3 */
+	0xEE, 0x6A, 0xEE, 0x5F, 0xEE, 0x6B, 0xEE, 0x66, /* 0xB4-0xB7 */
+	0xEE, 0x6D, 0xEE, 0x5E, 0xC1, 0xB3, 0xC1, 0xB2, /* 0xB8-0xBB */
+	0xEE, 0x60, 0xEE, 0x6E, 0xEE, 0x58, 0xEE, 0x6C, /* 0xBC-0xBF */
+	0xC1, 0xAC, 0x00, 0x00, 0xEE, 0x64, 0xEE, 0x63, /* 0xC0-0xC3 */
+	0xEE, 0x68, 0xEE, 0x5B, 0xC1, 0xB0, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xC1, 0xB4, 0xEE, 0x62, 0xEE, 0x69, 0xC1, 0xB5, /* 0xC8-0xCB */
+	0xEE, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xC1, 0xAD, 0xC1, 0xAF, 0xF0, 0xC7, /* 0xD0-0xD3 */
+	0xF0, 0xC5, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xCC, /* 0xD4-0xD7 */
+	0xF0, 0xC9, 0xF0, 0xCD, 0x00, 0x00, 0xF0, 0xBE, /* 0xD8-0xDB */
+	0xF0, 0xC6, 0xF0, 0xD1, 0xEE, 0x6F, 0xF0, 0xC2, /* 0xDC-0xDF */
+	0xC2, 0xCF, 0xE7, 0xA5, 0xF0, 0xBD, 0xF0, 0xCA, /* 0xE0-0xE3 */
+	0xF0, 0xC4, 0xF0, 0xC1, 0xF0, 0xBC, 0xF0, 0xBB, /* 0xE4-0xE7 */
+	0xF0, 0xD0, 0x00, 0x00, 0xF0, 0xC0, 0xF0, 0xBF, /* 0xE8-0xEB */
+	0xC2, 0xCD, 0xF0, 0xC8, 0x00, 0x00, 0xC2, 0xCC, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0xCE, 0xF0, 0xC3, /* 0xF0-0xF3 */
+	0xF0, 0xCF, 0x00, 0x00, 0xF2, 0xDE, 0xF2, 0xDF, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xC3, 0xC9, 0xF2, 0xDC, 0xC3, 0xC6, /* 0xF8-0xFB */
+	0xF2, 0xE4, 0x00, 0x00, 0xC3, 0xCA, 0xF2, 0xE6, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_88[512] = {
+	0xF2, 0xDB, 0xF0, 0xCE, 0xF2, 0xE8, 0xF2, 0xDD, /* 0x00-0x03 */
+	0x00, 0x00, 0xC3, 0xC7, 0xF2, 0xE3, 0x00, 0x00, /* 0x04-0x07 */
+	0xF2, 0xE5, 0xF2, 0xE0, 0xF2, 0xE7, 0xF2, 0xE2, /* 0x08-0x0B */
+	0xF2, 0xE1, 0xC3, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF4, 0xC5, 0xF4, 0xC6, 0x00, 0x00, 0xF4, 0xC8, /* 0x10-0x13 */
+	0xC4, 0xAE, 0xC4, 0xAF, 0xF4, 0xC9, 0xF4, 0xC7, /* 0x14-0x17 */
+	0x00, 0x00, 0xF4, 0xC4, 0x00, 0x00, 0xF6, 0x42, /* 0x18-0x1B */
+	0xF6, 0x45, 0xF6, 0x41, 0x00, 0x00, 0xC4, 0xFA, /* 0x1C-0x1F */
+	0xF6, 0x43, 0xC4, 0xF9, 0xC4, 0xF8, 0xC4, 0xF7, /* 0x20-0x23 */
+	0xF6, 0x44, 0xF7, 0x51, 0xF7, 0x4F, 0x00, 0x00, /* 0x24-0x27 */
+	0xF7, 0x4E, 0xF6, 0x40, 0xF7, 0x50, 0xF6, 0x46, /* 0x28-0x2B */
+	0xF7, 0x4D, 0x00, 0x00, 0xF7, 0xF9, 0xF7, 0xD7, /* 0x2C-0x2F */
+	0xF7, 0xF7, 0xC5, 0xDB, 0xF7, 0xF8, 0xF7, 0xFA, /* 0x30-0x33 */
+	0x00, 0x00, 0xF8, 0xBF, 0xC5, 0xFA, 0xF8, 0xBE, /* 0x34-0x37 */
+	0xF8, 0xBD, 0xC5, 0xFB, 0x00, 0x00, 0xC6, 0x5A, /* 0x38-0x3B */
+	0xF9, 0x6E, 0xF9, 0xA7, 0xF9, 0xA6, 0xF9, 0xA8, /* 0x3C-0x3F */
+	0xA6, 0xE5, 0xD0, 0xAA, 0x00, 0x00, 0xD3, 0xCF, /* 0x40-0x43 */
+	0xD3, 0xD0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0xDB, 0xC0, 0x00, 0x00, 0xF6, 0x47, 0xF8, 0xC0, /* 0x48-0x4B */
+	0xA6, 0xE6, 0xAD, 0x6C, 0xD0, 0xAB, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xD7, 0xB1, 0xB3, 0x4E, /* 0x50-0x53 */
+	0x00, 0x00, 0xDB, 0xC2, 0xDB, 0xC1, 0xB5, 0xF3, /* 0x54-0x57 */
+	0x00, 0x00, 0xB8, 0xC5, 0xE7, 0xC1, 0xBD, 0xC3, /* 0x58-0x5B */
+	0x00, 0x00, 0xBD, 0xC4, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0xBF, 0xC5, 0xC5, 0xFC, 0xA6, 0xE7, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xAC, /* 0x64-0x67 */
+	0xAA, 0xED, 0xD0, 0xAE, 0xD0, 0xAD, 0xAD, 0x6D, /* 0x68-0x6B */
+	0x00, 0x00, 0xD3, 0xD1, 0x00, 0x00, 0xD3, 0xD8, /* 0x6C-0x6F */
+	0xB0, 0x49, 0xD3, 0xD6, 0xD3, 0xD4, 0x00, 0x00, /* 0x70-0x73 */
+	0xD3, 0xDB, 0xD3, 0xD2, 0xD3, 0xD3, 0xB0, 0x4A, /* 0x74-0x77 */
+	0x00, 0x00, 0xB0, 0x4E, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xD3, 0xDC, 0xB0, 0x4D, 0xD3, 0xDA, 0xD3, 0xD7, /* 0x7C-0x7F */
+	
+	0xD3, 0xD5, 0xB0, 0x4B, 0xB0, 0x4C, 0xD3, 0xD9, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xB3, 0x50, 0xD7, 0xB2, 0x00, 0x00, 0xB3, 0x55, /* 0x88-0x8B */
+	0xD7, 0xC2, 0xB3, 0x54, 0xD7, 0xC4, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xD7, 0xB8, 0xB3, 0x52, 0xD7, 0xC3, /* 0x90-0x93 */
+	0x00, 0x00, 0xD7, 0xB3, 0xB3, 0x53, 0xD7, 0xBF, /* 0x94-0x97 */
+	0xD7, 0xBB, 0xD7, 0xBD, 0xD7, 0xB7, 0xD7, 0xBE, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0x4F, 0xD7, 0xBA, /* 0x9C-0x9F */
+	0x00, 0x00, 0xD7, 0xB9, 0xD7, 0xB5, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xD7, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xBC, /* 0xA4-0xA7 */
+	0xD7, 0xB4, 0x00, 0x00, 0xD7, 0xB6, 0xB3, 0x51, /* 0xA8-0xAB */
+	0xD7, 0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0xB5, 0xF6, 0xDB, 0xCD, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xDB, 0xC9, 0xDB, 0xCB, /* 0xB4-0xB7 */
+	0xDB, 0xC6, 0xDB, 0xC5, 0xDB, 0xC3, 0x00, 0x00, /* 0xB8-0xBB */
+	0xDB, 0xCA, 0xDB, 0xCC, 0xDB, 0xC8, 0x00, 0x00, /* 0xBC-0xBF */
+	0xDB, 0xC7, 0xB5, 0xF4, 0xB5, 0xF5, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xDB, 0xCF, 0xB8, 0xCD, 0xDF, 0xF2, /* 0xC8-0xCB */
+	0xDF, 0xF8, 0xDF, 0xF3, 0xDF, 0xF4, 0xF9, 0xD8, /* 0xCC-0xCF */
+	0xDF, 0xF9, 0x00, 0x00, 0xB8, 0xCF, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xB8, 0xC7, 0xB8, 0xCE, 0xDF, 0xF1, 0xDB, 0xC4, /* 0xD4-0xD7 */
+	0xB8, 0xCA, 0xB8, 0xC8, 0xDF, 0xF7, 0xDF, 0xF6, /* 0xD8-0xDB */
+	0xB8, 0xC9, 0xB8, 0xCB, 0xDF, 0xF5, 0xB8, 0xC6, /* 0xDC-0xDF */
+	0x00, 0x00, 0xB8, 0xCC, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, 0xF6, /* 0xE4-0xE7 */
+	0xBB, 0x74, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x42, /* 0xE8-0xEB */
+	0xE4, 0x41, 0x00, 0x00, 0xE3, 0xFB, 0xBB, 0x76, /* 0xEC-0xEF */
+	0xE4, 0x40, 0xE3, 0xF7, 0xE3, 0xF8, 0xBB, 0x6E, /* 0xF0-0xF3 */
+	0xBB, 0x70, 0x00, 0x00, 0xE3, 0xFD, 0xE3, 0xF5, /* 0xF4-0xF7 */
+	0xBB, 0x72, 0xBB, 0x71, 0xE3, 0xF9, 0xE3, 0xFE, /* 0xF8-0xFB */
+	0xE3, 0xFC, 0xBB, 0x73, 0xE3, 0xFA, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_89[512] = {
+	0x00, 0x00, 0xDB, 0xCE, 0xBB, 0x6F, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xE7, 0xC2, 0xE7, 0xC9, 0xBD, 0xC6, /* 0x04-0x07 */
+	0x00, 0x00, 0xE7, 0xCD, 0xBD, 0xCA, 0xE7, 0xC5, /* 0x08-0x0B */
+	0xE7, 0xC3, 0x00, 0x00, 0xE7, 0xCC, 0x00, 0x00, /* 0x0C-0x0F */
+	0xBD, 0xC5, 0xE7, 0xCB, 0xBD, 0xC7, 0xBD, 0xC8, /* 0x10-0x13 */
+	0xE7, 0xC4, 0xBD, 0xC9, 0xE7, 0xCA, 0xE7, 0xC6, /* 0x14-0x17 */
+	0xE7, 0xC7, 0xE7, 0xC8, 0xBB, 0x75, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0x70, 0xEB, 0x7C, /* 0x1C-0x1F */
+	0x00, 0x00, 0xBF, 0xCA, 0xEB, 0x77, 0xEB, 0x79, /* 0x20-0x23 */
+	0x00, 0x00, 0xBF, 0xC8, 0xEB, 0x71, 0xEB, 0x75, /* 0x24-0x27 */
+	0x00, 0x00, 0xEB, 0x78, 0xBF, 0xC6, 0xBF, 0xC9, /* 0x28-0x2B */
+	0xEB, 0x7B, 0xEB, 0x73, 0xEB, 0x74, 0xEB, 0x7A, /* 0x2C-0x2F */
+	0xEB, 0x72, 0xEB, 0x76, 0xBF, 0xC7, 0xEE, 0x72, /* 0x30-0x33 */
+	0x00, 0x00, 0xEE, 0x71, 0xC1, 0xB7, 0xEE, 0x77, /* 0x34-0x37 */
+	0xC1, 0xB9, 0x00, 0x00, 0x00, 0x00, 0xC1, 0xB6, /* 0x38-0x3B */
+	0xEE, 0x73, 0xC1, 0xBA, 0xEE, 0x74, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xEE, 0x75, 0xEE, 0x78, 0x00, 0x00, /* 0x40-0x43 */
+	0xC1, 0xB8, 0x00, 0x00, 0xF0, 0xD6, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xF0, 0xD9, 0x00, 0x00, 0xF0, 0xD3, /* 0x48-0x4B */
+	0xF0, 0xD5, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xD4, /* 0x4C-0x4F */
+	0xF0, 0xD7, 0xF0, 0xD8, 0xEE, 0x76, 0xF0, 0xD2, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xC3, 0xCD, 0xF2, 0xEC, /* 0x54-0x57 */
+	0xF2, 0xEF, 0xF2, 0xF1, 0xF2, 0xEA, 0xF2, 0xEB, /* 0x58-0x5B */
+	0xF2, 0xEE, 0xF2, 0xF0, 0xC3, 0xCE, 0xC3, 0xCC, /* 0x5C-0x5F */
+	0xC3, 0xCB, 0xF2, 0xED, 0xF2, 0xE9, 0xF4, 0xCA, /* 0x60-0x63 */
+	0xC4, 0xB0, 0x00, 0x00, 0xF4, 0xCB, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0xF6, 0x49, 0xC4, 0xFB, 0xF6, 0x4B, /* 0x68-0x6B */
+	0xC4, 0xFC, 0xF6, 0x48, 0xF6, 0x4A, 0xC5, 0xA8, /* 0x6C-0x6F */
+	0x00, 0x00, 0xF7, 0x52, 0xC5, 0xA7, 0xF7, 0xFD, /* 0x70-0x73 */
+	0xF7, 0xFC, 0x00, 0x00, 0xF7, 0xFB, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xF9, 0x48, 0xF9, 0x49, 0xF9, 0x4B, /* 0x78-0x7B */
+	0xF9, 0x4A, 0x00, 0x00, 0xCA, 0x50, 0xA6, 0xE8, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xAD, 0x6E, 0xD7, 0xC5, 0xB5, 0xF7, /* 0x80-0x83 */
+	0x00, 0x00, 0xDF, 0xFA, 0xC2, 0xD0, 0x00, 0x00, /* 0x84-0x87 */
+	0xF2, 0xF2, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xA3, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0x57, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0x56, /* 0x90-0x93 */
+	0x00, 0x00, 0xDB, 0xD0, 0xB5, 0xF8, 0xDB, 0xD2, /* 0x94-0x97 */
+	0xDB, 0xD1, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xFB, /* 0x98-0x9B */
+	0xB8, 0xD0, 0xE4, 0x43, 0xE4, 0x46, 0xE4, 0x45, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE4, 0x44, 0xE7, 0xCE, 0xE7, 0xD0, /* 0xA0-0xA3 */
+	0xE7, 0xCF, 0x00, 0x00, 0xBF, 0xCC, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xBF, 0xCB, 0x00, 0x00, /* 0xA8-0xAB */
+	0xC1, 0xBB, 0xEE, 0x79, 0xEE, 0x7B, 0xEE, 0x7A, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xC2, 0xD1, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xF2, 0xF4, 0xF2, 0xF3, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF4, 0xCC, 0xC4, 0xB1, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xC4, 0xFD, 0xF7, 0x54, 0xF7, 0x53, /* 0xBC-0xBF */
+	0xC6, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xA4, 0xD0, 0xAF, /* 0xD0-0xD3 */
+	0xAD, 0x6F, 0xD7, 0xC8, 0xD7, 0xC6, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xD7, 0xC7, 0xDB, 0xD4, 0xDB, 0xD5, /* 0xD8-0xDB */
+	0xE0, 0x43, 0xDB, 0xD3, 0x00, 0x00, 0xDF, 0xFC, /* 0xDC-0xDF */
+	0xE0, 0x41, 0xE0, 0x40, 0xE0, 0x42, 0xB8, 0xD1, /* 0xE0-0xE3 */
+	0xDF, 0xFE, 0xDF, 0xFD, 0xE0, 0x44, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xE4, 0x49, 0xE4, 0x47, 0x00, 0x00, 0xE4, 0x48, /* 0xE8-0xEB */
+	0xE7, 0xD3, 0xE7, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xE7, 0xD2, 0xEB, 0x7D, 0xEE, 0x7C, 0xEE, 0x7D, /* 0xF0-0xF3 */
+	0xC2, 0xD2, 0x00, 0x00, 0xF2, 0xF5, 0xF4, 0xCD, /* 0xF4-0xF7 */
+	0xC4, 0xB2, 0x00, 0x00, 0xF6, 0x4C, 0xF7, 0x55, /* 0xF8-0xFB */
+	0xC5, 0xA9, 0x00, 0x00, 0xF7, 0xFE, 0xF9, 0x4C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8A[512] = {
+	0xA8, 0xA5, 0x00, 0x00, 0xAD, 0x71, 0xAD, 0x72, /* 0x00-0x03 */
+	0xD0, 0xB0, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xB1, /* 0x04-0x07 */
+	0xAD, 0x70, 0x00, 0x00, 0xB0, 0x54, 0x00, 0x00, /* 0x08-0x0B */
+	0xB0, 0x52, 0x00, 0x00, 0xB0, 0x51, 0xB0, 0x58, /* 0x0C-0x0F */
+	0xB0, 0x50, 0xB0, 0x59, 0xD3, 0xDD, 0xB0, 0x56, /* 0x10-0x13 */
+	0x00, 0x00, 0xB0, 0x53, 0xB0, 0x57, 0xB0, 0x55, /* 0x14-0x17 */
+	0xB0, 0x4F, 0x00, 0x00, 0x00, 0x00, 0xB3, 0x5F, /* 0x18-0x1B */
+	0x00, 0x00, 0xB3, 0x59, 0xD7, 0xCC, 0xB3, 0x5E, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0x60, 0xB3, 0x5A, /* 0x20-0x23 */
+	0x00, 0x00, 0xB3, 0x5B, 0x00, 0x00, 0xD7, 0xCA, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0x58, 0x00, 0x00, /* 0x28-0x2B */
+	0xD7, 0xCB, 0xB3, 0x5D, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xD7, 0xC9, 0xB3, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0xB6, 0x44, 0x00, 0x00, 0xB6, 0x46, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xDB, 0xD8, 0xB6, 0x45, 0xB5, 0xF9, /* 0x38-0x3B */
+	0xB5, 0xFD, 0x00, 0x00, 0xB8, 0xE4, 0xE0, 0x49, /* 0x3C-0x3F */
+	0xDB, 0xDA, 0xB5, 0xFE, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xDB, 0xDD, 0xDB, 0xDE, 0xB6, 0x43, 0x00, 0x00, /* 0x44-0x47 */
+	0xDB, 0xE0, 0x00, 0x00, 0xDB, 0xE2, 0x00, 0x00, /* 0x48-0x4B */
+	0xDB, 0xE3, 0xDB, 0xD7, 0xDB, 0xD6, 0xDB, 0xE4, /* 0x4C-0x4F */
+	0xB6, 0x42, 0xDB, 0xE1, 0xDB, 0xDF, 0x00, 0x00, /* 0x50-0x53 */
+	0xB6, 0x40, 0xB5, 0xFB, 0xB6, 0x47, 0xDB, 0xDB, /* 0x54-0x57 */
+	0xDB, 0xDC, 0xDB, 0xD9, 0x00, 0x00, 0xB6, 0x41, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xB5, 0xFC, 0x00, 0x00, /* 0x5C-0x5F */
+	0xB5, 0xFA, 0xE0, 0x48, 0xB8, 0xDF, 0xB8, 0xDA, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0xD5, 0x00, 0x00, /* 0x64-0x67 */
+	0xB8, 0xE5, 0xB8, 0xD6, 0x00, 0x00, 0xB8, 0xD2, /* 0x68-0x6B */
+	0xB8, 0xE1, 0xB8, 0xDE, 0xB8, 0xE0, 0x00, 0x00, /* 0x6C-0x6F */
+	0xB8, 0xD7, 0xB8, 0xDC, 0xB8, 0xD3, 0xB8, 0xD4, /* 0x70-0x73 */
+	0xE0, 0x50, 0xE0, 0x4D, 0xE0, 0x45, 0xE0, 0x4A, /* 0x74-0x77 */
+	0x00, 0x00, 0xB8, 0xE2, 0xE0, 0x51, 0xB8, 0xE3, /* 0x78-0x7B */
+	0xB8, 0xD9, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x47, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xE0, 0x4F, 0xE0, 0x4B, 0xE0, 0x4E, /* 0x80-0x83 */
+	0xE0, 0x4C, 0xB8, 0xDD, 0xE0, 0x46, 0xB8, 0xD8, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x4C, /* 0x88-0x8B */
+	0xBB, 0x78, 0xBB, 0x7B, 0x00, 0x00, 0xE4, 0x4E, /* 0x8C-0x8F */
+	0x00, 0x00, 0xBB, 0xA5, 0xE4, 0x4D, 0xBB, 0x7D, /* 0x90-0x93 */
+	0x00, 0x00, 0xBD, 0xCF, 0xE4, 0x4F, 0x00, 0x00, /* 0x94-0x97 */
+	0xBB, 0xA4, 0xE4, 0x4B, 0xBB, 0xA6, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xBB, 0x79, 0x00, 0x00, /* 0x9C-0x9F */
+	0xB8, 0xDB, 0xBB, 0x7C, 0x00, 0x00, 0xBB, 0x7A, /* 0xA0-0xA3 */
+	0xBB, 0x7E, 0xBB, 0xA2, 0xBB, 0x77, 0xBB, 0xA7, /* 0xA4-0xA7 */
+	0xBB, 0xA3, 0x00, 0x00, 0xBB, 0xA1, 0xE4, 0x4A, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xBD, 0xD6, 0x00, 0x00, 0xBD, 0xD2, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xBD, 0xD9, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xE7, 0xD6, 0xBD, 0xDA, 0xE7, 0xE2, 0xE7, 0xDB, /* 0xB8-0xBB */
+	0xBD, 0xCB, 0xE7, 0xE3, 0xE7, 0xDD, 0xBD, 0xD5, /* 0xBC-0xBF */
+	0xE7, 0xDE, 0x00, 0x00, 0xBD, 0xD4, 0xE7, 0xE1, /* 0xC0-0xC3 */
+	0xBD, 0xCE, 0xE7, 0xDF, 0xE7, 0xD5, 0xBD, 0xCD, /* 0xC4-0xC7 */
+	0xEB, 0xAA, 0xBD, 0xD3, 0x00, 0x00, 0xBD, 0xD0, /* 0xC8-0xCB */
+	0x00, 0x00, 0xBD, 0xD8, 0x00, 0x00, 0xE7, 0xD4, /* 0xCC-0xCF */
+	0x00, 0x00, 0xE7, 0xD8, 0xBD, 0xCC, 0xE7, 0xD7, /* 0xD0-0xD3 */
+	0xE7, 0xD9, 0xE7, 0xDA, 0xBD, 0xD7, 0xE7, 0xDC, /* 0xD4-0xD7 */
+	0xE7, 0xE0, 0xE7, 0xE4, 0x00, 0x00, 0xBD, 0xDB, /* 0xD8-0xDB */
+	0xBF, 0xD2, 0xEB, 0xA5, 0xEB, 0xAB, 0xEB, 0xA8, /* 0xDC-0xDF */
+	0xEB, 0x7E, 0xEB, 0xAC, 0xEB, 0xA1, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xEB, 0xA7, 0x00, 0x00, 0xBF, 0xCD, 0xBF, 0xD3, /* 0xE4-0xE7 */
+	0xEB, 0xAD, 0x00, 0x00, 0x00, 0x00, 0xBF, 0xCF, /* 0xE8-0xEB */
+	0x00, 0x00, 0xBF, 0xD9, 0xBF, 0xD4, 0xEB, 0xAF, /* 0xEC-0xEF */
+	0xEB, 0xA9, 0xBF, 0xD0, 0xEB, 0xA2, 0xBF, 0xDA, /* 0xF0-0xF3 */
+	0xEB, 0xA3, 0xEB, 0xA4, 0xBF, 0xDB, 0xBF, 0xD8, /* 0xF4-0xF7 */
+	0xBD, 0xD1, 0x00, 0x00, 0xBF, 0xCE, 0xEB, 0xB0, /* 0xF8-0xFB */
+	0xBF, 0xDC, 0x00, 0x00, 0xBF, 0xD5, 0xEB, 0xAE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8B[512] = {
+	0xBF, 0xD1, 0xBF, 0xD6, 0xBF, 0xD7, 0x00, 0x00, /* 0x00-0x03 */
+	0xC1, 0xC3, 0xEE, 0xA4, 0xEE, 0xAD, 0xEE, 0xAA, /* 0x04-0x07 */
+	0xEE, 0xAC, 0x00, 0x00, 0xC1, 0xC0, 0xEE, 0xA5, /* 0x08-0x0B */
+	0x00, 0x00, 0xEE, 0xAB, 0xC1, 0xBC, 0xEE, 0xA7, /* 0x0C-0x0F */
+	0xC1, 0xC4, 0xEE, 0xA3, 0xEE, 0xA8, 0xEE, 0xAF, /* 0x10-0x13 */
+	0xEB, 0xA6, 0xEE, 0xA9, 0xEE, 0xA2, 0xC1, 0xBD, /* 0x14-0x17 */
+	0xEE, 0xA1, 0xC1, 0xBE, 0xEE, 0xB0, 0xC1, 0xBF, /* 0x18-0x1B */
+	0xEE, 0xAE, 0xC1, 0xC2, 0xEE, 0x7E, 0x00, 0x00, /* 0x1C-0x1F */
+	0xC1, 0xC1, 0x00, 0x00, 0xEE, 0xA6, 0xF0, 0xDC, /* 0x20-0x23 */
+	0xF0, 0xEA, 0xF0, 0xE5, 0xF0, 0xE7, 0xF0, 0xDB, /* 0x24-0x27 */
+	0xC2, 0xD3, 0x00, 0x00, 0xF0, 0xDA, 0xC2, 0xD6, /* 0x28-0x2B */
+	0xC2, 0xD5, 0x00, 0x00, 0xF0, 0xE9, 0xF0, 0xE1, /* 0x2C-0x2F */
+	0xF0, 0xDE, 0xF0, 0xE4, 0x00, 0x00, 0xF0, 0xDD, /* 0x30-0x33 */
+	0x00, 0x00, 0xF0, 0xDF, 0xF0, 0xE8, 0xF0, 0xE6, /* 0x34-0x37 */
+	0x00, 0x00, 0xC2, 0xD4, 0xF0, 0xED, 0xF0, 0xEB, /* 0x38-0x3B */
+	0xF0, 0xE2, 0xF0, 0xEC, 0xF0, 0xE3, 0x00, 0x00, /* 0x3C-0x3F */
+	0xF2, 0xF9, 0xC3, 0xCF, 0xF3, 0x41, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xF6, 0x4F, 0xC3, 0xD6, 0xF0, 0xE0, /* 0x44-0x47 */
+	0xF2, 0xF7, 0xC3, 0xD2, 0xF2, 0xF8, 0xF2, 0xFD, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0xC3, 0xD4, 0xC3, 0xD5, /* 0x4C-0x4F */
+	0xF2, 0xF6, 0xF3, 0x40, 0xF3, 0x42, 0xF2, 0xFA, /* 0x50-0x53 */
+	0xF2, 0xFC, 0xF2, 0xFE, 0xF2, 0xFB, 0xF3, 0x43, /* 0x54-0x57 */
+	0xC3, 0xD1, 0xC3, 0xD7, 0xC3, 0xD3, 0x00, 0x00, /* 0x58-0x5B */
+	0xC3, 0xD0, 0xF4, 0xD0, 0x00, 0x00, 0xC4, 0xB7, /* 0x5C-0x5F */
+	0xF4, 0xCE, 0x00, 0x00, 0x00, 0x00, 0xF4, 0xD2, /* 0x60-0x63 */
+	0x00, 0x00, 0xF4, 0xD3, 0xC4, 0xB5, 0xF4, 0xD4, /* 0x64-0x67 */
+	0xF4, 0xD1, 0x00, 0x00, 0xF4, 0xCF, 0xC4, 0xB8, /* 0x68-0x6B */
+	0xC4, 0xB4, 0xF4, 0xD5, 0x00, 0x00, 0xC4, 0xB6, /* 0x6C-0x6F */
+	0xC4, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xC4, 0xFE, 0x00, 0x00, 0x00, 0x00, 0xC5, 0x40, /* 0x74-0x77 */
+	0xF6, 0x4E, 0xF6, 0x4D, 0xF6, 0x50, 0xF6, 0x51, /* 0x78-0x7B */
+	0x00, 0x00, 0xC5, 0x41, 0xF7, 0x56, 0xF7, 0x5B, /* 0x7C-0x7F */
+	
+	0xC5, 0xAA, 0x00, 0x00, 0xF7, 0x58, 0x00, 0x00, /* 0x80-0x83 */
+	0xF7, 0x57, 0xF7, 0x5A, 0xF7, 0x59, 0x00, 0x00, /* 0x84-0x87 */
+	0xF8, 0x43, 0x00, 0x00, 0xC5, 0xDC, 0xF8, 0x42, /* 0x88-0x8B */
+	0xF8, 0x40, 0x00, 0x00, 0xF8, 0x41, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0xC5, 0xFE, 0xC5, 0xFD, /* 0x90-0x93 */
+	0xF8, 0xC1, 0xF8, 0xC2, 0xC6, 0x40, 0x00, 0x00, /* 0x94-0x97 */
+	0xF9, 0x4D, 0xF9, 0x4E, 0xC6, 0x67, 0x00, 0x00, /* 0x98-0x9B */
+	0xC6, 0x6D, 0x00, 0x00, 0xF9, 0xA9, 0xF9, 0xC8, /* 0x9C-0x9F */
+};
+
+static unsigned char u2c_8C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xA6, /* 0x34-0x37 */
+	0x00, 0x00, 0xD7, 0xCD, 0x00, 0x00, 0xD7, 0xCE, /* 0x38-0x3B */
+	0xE0, 0x52, 0xE4, 0x50, 0xE7, 0xE5, 0xC1, 0xC6, /* 0x3C-0x3F */
+	0x00, 0x00, 0xC1, 0xC5, 0xF0, 0xEE, 0xF3, 0x44, /* 0x40-0x43 */
+	0x00, 0x00, 0xF8, 0x44, 0xA8, 0xA7, 0xD3, 0xDE, /* 0x44-0x47 */
+	0xB0, 0x5A, 0xB3, 0x61, 0xE0, 0x54, 0xE0, 0x53, /* 0x48-0x4B */
+	0xBD, 0xDC, 0xE7, 0xE6, 0xBD, 0xDD, 0xEE, 0xB1, /* 0x4C-0x4F */
+	0xC2, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0xC6, 0x76, 0xA8, 0xA8, 0xCD, 0xCB, 0xD3, 0xDF, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0x62, 0x00, 0x00, /* 0x58-0x5B */
+	0xD7, 0xCF, 0xD7, 0xD0, 0x00, 0x00, 0xDB, 0xE5, /* 0x5C-0x5F */
+	0x00, 0x00, 0xB6, 0x48, 0xB8, 0xE6, 0x00, 0x00, /* 0x60-0x63 */
+	0xE0, 0x56, 0xE0, 0x55, 0xE0, 0x57, 0x00, 0x00, /* 0x64-0x67 */
+	0xE4, 0x51, 0xE4, 0x52, 0xBB, 0xA8, 0xBF, 0xDD, /* 0x68-0x6B */
+	0xBD, 0xDE, 0xBF, 0xDE, 0x00, 0x00, 0xEE, 0xB5, /* 0x6C-0x6F */
+	0xEE, 0xB2, 0xEE, 0xB4, 0xEE, 0xB3, 0xC1, 0xC7, /* 0x70-0x73 */
+	0x00, 0x00, 0xF0, 0xEF, 0xF3, 0x46, 0xF3, 0x45, /* 0x74-0x77 */
+	0xCB, 0xA4, 0xB0, 0x5C, 0xB0, 0x5B, 0xD3, 0xE0, /* 0x78-0x7B */
+	0x00, 0x00, 0xD7, 0xD1, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xDB, 0xE7, 0xDB, 0xE6, 0xB6, 0x49, 0x00, 0x00, /* 0x80-0x83 */
+	0xE0, 0x59, 0xE0, 0x5A, 0xE0, 0x58, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xB8, 0xE8, 0xB8, 0xE7, 0x00, 0x00, /* 0x88-0x8B */
+	0xBB, 0xAA, 0xBB, 0xA9, 0x00, 0x00, 0xE7, 0xE7, /* 0x8C-0x8F */
+	0xEB, 0xB3, 0xEB, 0xB1, 0xEB, 0xB2, 0xBF, 0xDF, /* 0x90-0x93 */
+	0xEE, 0xB7, 0xEE, 0xB6, 0x00, 0x00, 0xF0, 0xF2, /* 0x94-0x97 */
+	0xF0, 0xF1, 0xF0, 0xF0, 0xF3, 0x47, 0x00, 0x00, /* 0x98-0x9B */
+	0xF9, 0xAA, 0xA8, 0xA9, 0xAD, 0x73, 0x00, 0x00, /* 0x9C-0x9F */
+	0xAD, 0x74, 0xB0, 0x5D, 0xB0, 0x5E, 0xD3, 0xE2, /* 0xA0-0xA3 */
+	0xD3, 0xE1, 0xD7, 0xD2, 0x00, 0x00, 0xB3, 0x68, /* 0xA4-0xA7 */
+	0xB3, 0x66, 0xB3, 0x63, 0xB3, 0x67, 0xB3, 0x65, /* 0xA8-0xAB */
+	0xB3, 0x64, 0x00, 0x00, 0x00, 0x00, 0xB6, 0x4A, /* 0xAC-0xAF */
+	0xDB, 0xEA, 0x00, 0x00, 0xB8, 0xED, 0xB6, 0x4C, /* 0xB0-0xB3 */
+	0xB6, 0x51, 0xDB, 0xEC, 0xB6, 0x53, 0xB6, 0x52, /* 0xB4-0xB7 */
+	0xB6, 0x55, 0xDB, 0xEB, 0xDB, 0xE8, 0xB6, 0x4F, /* 0xB8-0xBB */
+	0xB6, 0x4B, 0xB6, 0x4D, 0xDB, 0xE9, 0xB6, 0x54, /* 0xBC-0xBF */
+	0xB6, 0x50, 0xB6, 0x4E, 0xB8, 0xEF, 0xB8, 0xEE, /* 0xC0-0xC3 */
+	0xB8, 0xEC, 0xB8, 0xF0, 0x00, 0x00, 0xB8, 0xEA, /* 0xC4-0xC7 */
+	0xB8, 0xEB, 0x00, 0x00, 0xB8, 0xE9, 0x00, 0x00, /* 0xC8-0xCB */
+	0xE0, 0x5B, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x54, /* 0xCC-0xCF */
+	0x00, 0x00, 0xBB, 0xAC, 0xBB, 0xAD, 0xBB, 0xAB, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE4, 0x53, 0x00, 0x00, 0xE4, 0x55, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xE7, 0xEA, 0xE7, 0xEC, 0x00, 0x00, /* 0xD8-0xDB */
+	0xBD, 0xE7, 0xE7, 0xED, 0xBD, 0xE0, 0xE7, 0xE9, /* 0xDC-0xDF */
+	0xBD, 0xDF, 0xBD, 0xE9, 0xBD, 0xE5, 0xBD, 0xE6, /* 0xE0-0xE3 */
+	0xBD, 0xE2, 0xE7, 0xE8, 0xBD, 0xE1, 0xE7, 0xEE, /* 0xE4-0xE7 */
+	0xE7, 0xEB, 0x00, 0x00, 0xBD, 0xE8, 0x00, 0x00, /* 0xE8-0xEB */
+	0xBD, 0xE3, 0xBD, 0xE4, 0xEB, 0xB5, 0x00, 0x00, /* 0xEC-0xEF */
+	0xEB, 0xB7, 0xEB, 0xB6, 0x00, 0x00, 0xEB, 0xB8, /* 0xF0-0xF3 */
+	0xBF, 0xE0, 0xEB, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xC1, 0xCB, 0xEE, 0xB8, 0xC1, 0xC8, 0xC1, 0xCC, /* 0xF8-0xFB */
+	0xC1, 0xCA, 0xC1, 0xC9, 0xF0, 0xF3, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8D[512] = {
+	0xF0, 0xF6, 0x00, 0x00, 0xF0, 0xF5, 0x00, 0x00, /* 0x00-0x03 */
+	0xF0, 0xF4, 0xC2, 0xD8, 0xF3, 0x48, 0xF3, 0x49, /* 0x04-0x07 */
+	0xC3, 0xD8, 0xF3, 0x4A, 0xC3, 0xD9, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xC4, 0xBA, 0x00, 0x00, 0xC4, 0xB9, /* 0x0C-0x0F */
+	0xF6, 0x52, 0x00, 0x00, 0x00, 0x00, 0xC5, 0x42, /* 0x10-0x13 */
+	0xF6, 0x53, 0xF7, 0x5C, 0xC5, 0xAB, 0xC5, 0xAC, /* 0x14-0x17 */
+	0x00, 0x00, 0xF8, 0x45, 0x00, 0x00, 0xC6, 0x42, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xA8, 0xAA, 0x00, 0x00, 0xB3, 0x6A, 0xB3, 0x69, /* 0x64-0x67 */
+	0xE0, 0x5C, 0xE0, 0x5D, 0x00, 0x00, 0xBB, 0xAE, /* 0x68-0x6B */
+	0xEB, 0xB9, 0xBD, 0xEA, 0xEB, 0xBA, 0xEE, 0xB9, /* 0x6C-0x6F */
+	0xA8, 0xAB, 0x00, 0x00, 0xD0, 0xB2, 0xAD, 0x76, /* 0x70-0x73 */
+	0xAD, 0x75, 0x00, 0x00, 0xD3, 0xE3, 0xB0, 0x5F, /* 0x74-0x77 */
+	0xD3, 0xE4, 0xD7, 0xD5, 0x00, 0x00, 0xD7, 0xD4, /* 0x78-0x7B */
+	0x00, 0x00, 0xD7, 0xD3, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xDB, 0xEE, 0xB6, 0x58, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0xDB, 0xED, 0xB6, 0x57, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0xDB, 0xEF, 0xB6, 0x56, 0x00, 0x00, /* 0x88-0x8B */
+	0xE0, 0x5F, 0xE0, 0x62, 0xE0, 0x60, 0xE0, 0x61, /* 0x8C-0x8F */
+	0xE0, 0x65, 0xE0, 0x5E, 0xE0, 0x66, 0xE0, 0x63, /* 0x90-0x93 */
+	0xE0, 0x64, 0xBB, 0xB0, 0xE4, 0x56, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xBB, 0xAF, 0x00, 0x00, 0xE7, 0xF2, /* 0x98-0x9B */
+	0xE7, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xBD, 0xEB, /* 0x9C-0x9F */
+	0xE7, 0xEF, 0xE7, 0xF1, 0x00, 0x00, 0xBD, 0xEC, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xEB, 0xBB, 0x00, 0x00, 0xEB, 0xBC, /* 0xA4-0xA7 */
+	0xC1, 0xCD, 0x00, 0x00, 0xF3, 0x4C, 0xF3, 0x4E, /* 0xA8-0xAB */
+	0xF3, 0x4B, 0xF3, 0x4D, 0xF4, 0xD6, 0xF6, 0x54, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0x6F, 0xA8, 0xAC, /* 0xB0-0xB3 */
+	0xAD, 0x77, 0xD3, 0xE5, 0xD3, 0xE7, 0xD3, 0xE6, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xD7, 0xD8, 0xB3, 0x6C, 0x00, 0x00, /* 0xB8-0xBB */
+	0xD7, 0xD6, 0x00, 0x00, 0xB3, 0x6B, 0xD7, 0xD9, /* 0xBC-0xBF */
+	0x00, 0x00, 0xD7, 0xDA, 0xD7, 0xD7, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xDB, 0xFB, 0xB6, 0x60, 0xDB, 0xF3, /* 0xC4-0xC7 */
+	0xDB, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xB6, 0x5B, /* 0xC8-0xCB */
+	0xB6, 0x5E, 0xDB, 0xF2, 0xB6, 0x59, 0xDB, 0xF6, /* 0xCC-0xCF */
+	0xE0, 0x6C, 0xB6, 0x5D, 0x00, 0x00, 0xDB, 0xF1, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xDB, 0xF7, 0xDB, 0xF4, 0xDB, 0xFA, /* 0xD4-0xD7 */
+	0xDB, 0xF0, 0xDB, 0xF8, 0xB6, 0x5C, 0xB6, 0x5F, /* 0xD8-0xDB */
+	0xDB, 0xF5, 0xB6, 0x5A, 0x00, 0x00, 0xB8, 0xF2, /* 0xDC-0xDF */
+	0xE0, 0x68, 0xB8, 0xF1, 0xE0, 0x6F, 0xE0, 0x6E, /* 0xE0-0xE3 */
+	0xB8, 0xF8, 0x00, 0x00, 0xB8, 0xF9, 0xE0, 0x70, /* 0xE4-0xE7 */
+	0xB8, 0xF3, 0xE0, 0x6D, 0xB8, 0xF7, 0xE0, 0x72, /* 0xE8-0xEB */
+	0xE0, 0x69, 0x00, 0x00, 0xE0, 0x6B, 0xB8, 0xF4, /* 0xEC-0xEF */
+	0xE0, 0x67, 0xE0, 0x6A, 0xE0, 0x71, 0xB8, 0xF5, /* 0xF0-0xF3 */
+	0xE0, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0xF6, 0x00, 0x00, /* 0xF8-0xFB */
+	0xBB, 0xB1, 0xE4, 0x5B, 0xE4, 0x61, 0xE4, 0x59, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8E[512] = {
+	0xE4, 0x62, 0x00, 0x00, 0xE4, 0x58, 0xE4, 0x5D, /* 0x00-0x03 */
+	0xE4, 0x63, 0xE4, 0x60, 0xE4, 0x5F, 0xE4, 0x5E, /* 0x04-0x07 */
+	0x00, 0x00, 0xE4, 0x57, 0xE4, 0x5C, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0xE4, 0x5A, 0x00, 0x00, 0xBD, 0xF1, /* 0x0C-0x0F */
+	0xBD, 0xEE, 0xE7, 0xFB, 0xE8, 0x41, 0xE8, 0x43, /* 0x10-0x13 */
+	0xE8, 0x40, 0xE7, 0xF8, 0xE7, 0xFA, 0xE8, 0x45, /* 0x14-0x17 */
+	0xE8, 0x42, 0xE7, 0xFC, 0xE8, 0x46, 0xE7, 0xF9, /* 0x18-0x1B */
+	0xE8, 0x44, 0xBD, 0xEF, 0xBD, 0xF5, 0xBD, 0xF3, /* 0x1C-0x1F */
+	0xE7, 0xF3, 0xBD, 0xF4, 0xBD, 0xF0, 0xE7, 0xF4, /* 0x20-0x23 */
+	0xE7, 0xF6, 0xE7, 0xF5, 0xE7, 0xFD, 0xE7, 0xFE, /* 0x24-0x27 */
+	0x00, 0x00, 0xBD, 0xF2, 0x00, 0x00, 0xBD, 0xED, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xE7, 0xF7, 0x00, 0x00, /* 0x2C-0x2F */
+	0xEB, 0xC6, 0xBF, 0xE2, 0x00, 0x00, 0xEB, 0xBD, /* 0x30-0x33 */
+	0xBF, 0xE3, 0xBF, 0xE6, 0xEB, 0xC2, 0x00, 0x00, /* 0x34-0x37 */
+	0xEB, 0xBF, 0xBF, 0xE5, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xEB, 0xC3, 0xEB, 0xC4, 0xEB, 0xBE, 0xEB, 0xC7, /* 0x3C-0x3F */
+	0xEB, 0xC0, 0xEB, 0xC5, 0xBF, 0xE4, 0x00, 0x00, /* 0x40-0x43 */
+	0xBF, 0xE1, 0xEB, 0xC1, 0x00, 0x00, 0xEE, 0xBF, /* 0x44-0x47 */
+	0xC1, 0xD0, 0xC1, 0xCE, 0xC1, 0xD1, 0xC1, 0xCF, /* 0x48-0x4B */
+	0xEE, 0xBE, 0xEE, 0xBB, 0xEE, 0xBA, 0x00, 0x00, /* 0x4C-0x4F */
+	0xEE, 0xBD, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xBC, /* 0x50-0x53 */
+	0xF1, 0x45, 0xC2, 0xDE, 0xF0, 0xFB, 0xF0, 0xFA, /* 0x54-0x57 */
+	0x00, 0x00, 0xC2, 0xD9, 0xF1, 0x41, 0xF1, 0x40, /* 0x58-0x5B */
+	0xF0, 0xF7, 0xF1, 0x43, 0xF0, 0xFC, 0xC2, 0xDD, /* 0x5C-0x5F */
+	0xF0, 0xF9, 0xF1, 0x42, 0xF0, 0xF8, 0xC2, 0xDA, /* 0x60-0x63 */
+	0xC2, 0xDC, 0xF0, 0xFD, 0xC2, 0xDB, 0xF0, 0xFE, /* 0x64-0x67 */
+	0x00, 0x00, 0xF1, 0x44, 0xF3, 0x52, 0x00, 0x00, /* 0x68-0x6B */
+	0xC3, 0xDE, 0xF3, 0x4F, 0x00, 0x00, 0xF3, 0x53, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xC3, 0xDB, 0xF3, 0x51, /* 0x70-0x73 */
+	0xC3, 0xE0, 0x00, 0x00, 0xC3, 0xDD, 0x00, 0x00, /* 0x74-0x77 */
+	0xF3, 0x50, 0x00, 0x00, 0xC3, 0xDF, 0xF3, 0x54, /* 0x78-0x7B */
+	0xC3, 0xDA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0xC4, 0xBC, 0xC4, 0xBE, 0x00, 0x00, /* 0x80-0x83 */
+	0xF4, 0xD9, 0xC4, 0xBD, 0xF4, 0xD7, 0xC3, 0xDC, /* 0x84-0x87 */
+	0xF4, 0xD8, 0xC4, 0xBB, 0xC5, 0x43, 0xC5, 0x45, /* 0x88-0x8B */
+	0xF6, 0x56, 0xC5, 0x44, 0xF6, 0x55, 0x00, 0x00, /* 0x8C-0x8F */
+	0xF7, 0x61, 0xC5, 0xAD, 0xF7, 0x60, 0xC5, 0xAE, /* 0x90-0x93 */
+	0xF7, 0x5E, 0xF7, 0x5D, 0xF7, 0x62, 0xF7, 0x63, /* 0x94-0x97 */
+	0xF8, 0x46, 0x00, 0x00, 0xF7, 0x5F, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0xF8, 0xC6, 0xF8, 0xC3, 0xF8, 0xC4, /* 0x9C-0x9F */
+	0xF8, 0xC5, 0xC6, 0x5C, 0x00, 0x00, 0xF9, 0x51, /* 0xA0-0xA3 */
+	0xF9, 0x50, 0xF9, 0x4F, 0xF9, 0x70, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF9, 0xBE, 0xF9, 0xAB, 0xC6, 0x6E, 0xA8, 0xAD, /* 0xA8-0xAB */
+	0xB0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xB8, 0xFA, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0xBD, 0xF6, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xEB, 0xC8, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xC2, 0xDF, 0x00, 0x00, 0xF3, 0x55, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF9, 0xAC, 0xA8, 0xAE, 0xAA, 0xEE, /* 0xC8-0xCB */
+	0xAD, 0x79, 0xAD, 0x78, 0x00, 0x00, 0xB0, 0x63, /* 0xCC-0xCF */
+	0x00, 0x00, 0xD3, 0xE8, 0xB0, 0x61, 0xD3, 0xE9, /* 0xD0-0xD3 */
+	0xB0, 0x62, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xDF, /* 0xD4-0xD7 */
+	0xD7, 0xDB, 0x00, 0x00, 0x00, 0x00, 0xB3, 0x6D, /* 0xD8-0xDB */
+	0xD7, 0xDE, 0xD7, 0xDD, 0xD7, 0xDC, 0xB3, 0x6E, /* 0xDC-0xDF */
+	0xD7, 0xE0, 0xD7, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xDC, 0x43, 0xDC, 0x41, 0xDC, 0x45, /* 0xE4-0xE7 */
+	0xDC, 0x46, 0xDC, 0x4C, 0x00, 0x00, 0xDC, 0x48, /* 0xE8-0xEB */
+	0xDC, 0x4A, 0x00, 0x00, 0xDC, 0x42, 0xDB, 0xFC, /* 0xEC-0xEF */
+	0x00, 0x00, 0xDC, 0x49, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xDC, 0x4B, 0xDC, 0x44, 0xDC, 0x47, 0xDB, 0xFD, /* 0xF4-0xF7 */
+	0xB6, 0x62, 0xDC, 0x40, 0xDB, 0xFE, 0xB6, 0x61, /* 0xF8-0xFB */
+	0xB6, 0x63, 0x00, 0x00, 0xB8, 0xFD, 0xE0, 0x75, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_8F[512] = {
+	0xE0, 0x77, 0xE0, 0x76, 0xE0, 0x7B, 0xB8, 0xFB, /* 0x00-0x03 */
+	0x00, 0x00, 0xE0, 0x78, 0xE0, 0x74, 0xE0, 0x79, /* 0x04-0x07 */
+	0xE0, 0x7A, 0xB8, 0xFC, 0xB8, 0xFE, 0xE0, 0x7C, /* 0x08-0x0B */
+	0x00, 0x00, 0xE4, 0x67, 0xE4, 0x66, 0x00, 0x00, /* 0x0C-0x0F */
+	0xE4, 0x64, 0xE4, 0x65, 0xBB, 0xB3, 0xBB, 0xB5, /* 0x10-0x13 */
+	0xBB, 0xB2, 0xBB, 0xB4, 0xE8, 0x4D, 0xE8, 0x4E, /* 0x14-0x17 */
+	0xE8, 0x49, 0x00, 0x00, 0xE8, 0x4A, 0xBD, 0xF8, /* 0x18-0x1B */
+	0xBD, 0xFD, 0xBD, 0xF7, 0xBD, 0xFE, 0xBD, 0xF9, /* 0x1C-0x1F */
+	0xE8, 0x4B, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x4C, /* 0x20-0x23 */
+	0xE8, 0x48, 0xBE, 0x40, 0xBD, 0xFB, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0xBD, 0xFA, 0xBD, 0xFC, 0x00, 0x00, /* 0x28-0x2B */
+	0xE8, 0x47, 0x00, 0x00, 0xEB, 0xCA, 0xBF, 0xE8, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xCC, 0xBF, 0xEA, /* 0x30-0x33 */
+	0xEB, 0xCF, 0xEB, 0xCB, 0xEB, 0xC9, 0xEB, 0xCE, /* 0x34-0x37 */
+	0xBF, 0xE9, 0xEB, 0xCD, 0x00, 0x00, 0xBF, 0xE7, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xC1, 0xD3, 0xC1, 0xD6, /* 0x3C-0x3F */
+	0xEE, 0xC1, 0x00, 0x00, 0xC1, 0xD4, 0xEE, 0xC0, /* 0x40-0x43 */
+	0xC1, 0xD2, 0xC1, 0xD5, 0xF1, 0x46, 0xF1, 0x47, /* 0x44-0x47 */
+	0xF1, 0x48, 0xC2, 0xE0, 0x00, 0x00, 0xF1, 0x49, /* 0x48-0x4B */
+	0x00, 0x00, 0xC2, 0xE1, 0xC3, 0xE2, 0xF3, 0x58, /* 0x4C-0x4F */
+	0xF3, 0x59, 0xF3, 0x57, 0xF3, 0x56, 0xF3, 0x5A, /* 0x50-0x53 */
+	0xC3, 0xE1, 0xF4, 0xDD, 0xF4, 0xDB, 0xF4, 0xDC, /* 0x54-0x57 */
+	0xF4, 0xDE, 0xF4, 0xDA, 0xF4, 0xDF, 0xF6, 0x58, /* 0x58-0x5B */
+	0x00, 0x00, 0xF6, 0x59, 0xF6, 0x57, 0xC5, 0x46, /* 0x5C-0x5F */
+	0xF7, 0x64, 0xC5, 0xAF, 0xF7, 0x65, 0xF8, 0x48, /* 0x60-0x63 */
+	0xF8, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA8, 0xAF, /* 0x98-0x9B */
+	0xB6, 0x64, 0x00, 0x00, 0x00, 0x00, 0xB9, 0x40, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBB, 0xB6, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0xBF, 0xEC, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xBF, 0xEB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xC3, 0xE3, 0xC4, 0x7C, 0xC5, 0x47, /* 0xAC-0xAF */
+	0xA8, 0xB0, 0xB0, 0x64, 0xB9, 0x41, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xF3, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xA6, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xB1, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xA8, 0xB4, 0xA8, 0xB3, 0xA8, 0xB2, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xCB, 0xA5, 0x00, 0x00, 0xCD, 0xCD, /* 0xC8-0xCB */
+	0x00, 0x00, 0xCD, 0xCF, 0xAA, 0xEF, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0xAA, 0xF1, 0xCD, 0xCC, 0xCD, 0xCE, /* 0xD0-0xD3 */
+	0xAA, 0xF0, 0xCD, 0xD1, 0xCD, 0xD0, 0xCD, 0xD2, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xD0, 0xB6, 0xD0, 0xB4, 0xAD, 0x7C, 0xD0, 0xB3, /* 0xE0-0xE3 */
+	0xAD, 0xA3, 0xAD, 0x7E, 0xAD, 0x7B, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xAD, 0xA4, 0x00, 0x00, 0xAD, 0x7D, 0xAD, 0xA2, /* 0xE8-0xEB */
+	0x00, 0x00, 0xAD, 0xA1, 0xD0, 0xB5, 0x00, 0x00, /* 0xEC-0xEF */
+	0xAD, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xB0, 0x6A, 0xD3, 0xEB, 0xD3, 0xF1, 0xB0, 0x67, /* 0xF4-0xF7 */
+	0xB0, 0x6E, 0x00, 0x00, 0xB0, 0x69, 0xD3, 0xEE, /* 0xF8-0xFB */
+	0xD3, 0xF0, 0xB0, 0x6C, 0xD3, 0xEA, 0xD3, 0xED, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_90[512] = {
+	0xB0, 0x68, 0xB0, 0x65, 0xD3, 0xEC, 0xB0, 0x6B, /* 0x00-0x03 */
+	0xD3, 0xEF, 0xB0, 0x6D, 0xB0, 0x66, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD7, 0xE3, /* 0x08-0x0B */
+	0xD7, 0xE6, 0xB3, 0x70, 0x00, 0x00, 0xB3, 0x7A, /* 0x0C-0x0F */
+	0xB3, 0x76, 0xD7, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xB3, 0x7E, 0xB3, 0x77, 0xB3, 0x7C, 0xB3, 0x72, /* 0x14-0x17 */
+	0x00, 0x00, 0xB3, 0x6F, 0xB3, 0x71, 0xB3, 0x7D, /* 0x18-0x1B */
+	0xD7, 0xE5, 0xB3, 0x75, 0xB3, 0x78, 0xB3, 0x74, /* 0x1C-0x1F */
+	0xB3, 0x79, 0xD7, 0xE7, 0xB3, 0x7B, 0xB3, 0x73, /* 0x20-0x23 */
+	0xD7, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xDC, 0x4D, 0xB6, 0x65, 0xDC, 0x4F, /* 0x2C-0x2F */
+	0x00, 0x00, 0xB6, 0x67, 0xB6, 0x69, 0x00, 0x00, /* 0x30-0x33 */
+	0xDC, 0x4E, 0xB6, 0x66, 0xB6, 0x6A, 0x00, 0x00, /* 0x34-0x37 */
+	0xB6, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xB9, 0x47, 0xE0, 0xA3, 0xB9, 0x4F, 0xE0, 0x7E, /* 0x3C-0x3F */
+	0x00, 0x00, 0xB9, 0x50, 0xB9, 0x45, 0x00, 0x00, /* 0x40-0x43 */
+	0xE0, 0xA1, 0x00, 0x00, 0x00, 0x00, 0xB9, 0x4A, /* 0x44-0x47 */
+	0x00, 0x00, 0xE0, 0xA2, 0xB9, 0x43, 0xB9, 0x42, /* 0x48-0x4B */
+	0x00, 0x00, 0xB9, 0x4D, 0xB9, 0x4C, 0xB9, 0x4B, /* 0x4C-0x4F */
+	0xB9, 0x49, 0xB9, 0x4E, 0xE0, 0x7D, 0xB9, 0x44, /* 0x50-0x53 */
+	0xB9, 0x46, 0xB9, 0x48, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xBB, 0xB8, 0xBB, 0xBB, 0x00, 0x00, 0xBB, 0xBF, /* 0x58-0x5B */
+	0xBB, 0xB9, 0xBB, 0xBE, 0xBB, 0xBC, 0x00, 0x00, /* 0x5C-0x5F */
+	0xBB, 0xB7, 0x00, 0x00, 0xBB, 0xBD, 0xBB, 0xBA, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0x52, /* 0x64-0x67 */
+	0xBE, 0x43, 0xBE, 0x41, 0x00, 0x00, 0xE8, 0x53, /* 0x68-0x6B */
+	0x00, 0x00, 0xBE, 0x44, 0xBE, 0x42, 0xE8, 0x51, /* 0x6C-0x6F */
+	0xE8, 0x50, 0x00, 0x00, 0xBF, 0xF0, 0xE8, 0x4F, /* 0x70-0x73 */
+	0xBF, 0xEE, 0xBF, 0xED, 0xEB, 0xD0, 0xBE, 0x45, /* 0x74-0x77 */
+	0xBF, 0xEF, 0xEB, 0xD1, 0xBF, 0xF2, 0xEB, 0xD2, /* 0x78-0x7B */
+	0xBF, 0xF1, 0xC1, 0xD8, 0xEE, 0xC3, 0xC1, 0xD7, /* 0x7C-0x7F */
+	
+	0xC1, 0xDC, 0xC1, 0xDA, 0xC1, 0xDB, 0xC2, 0xE3, /* 0x80-0x83 */
+	0xC1, 0xD9, 0xEE, 0xC2, 0xEB, 0xD3, 0xC2, 0xE2, /* 0x84-0x87 */
+	0xC2, 0xE4, 0x00, 0x00, 0xC3, 0xE4, 0xC3, 0xE5, /* 0x88-0x8B */
+	0x00, 0x00, 0xF4, 0xE0, 0x00, 0x00, 0xC5, 0xDE, /* 0x8C-0x8F */
+	0xC5, 0xDD, 0xA8, 0xB6, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xCA, 0x55, 0xB0, 0x6F, 0x00, 0x00, 0xCA, 0x52, /* 0x94-0x97 */
+	0xCA, 0x53, 0xCA, 0x51, 0x00, 0x00, 0xCA, 0x54, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xCB, 0xAA, 0xCB, 0xA7, /* 0x9C-0x9F */
+	0xCB, 0xAC, 0xCB, 0xA8, 0xA8, 0xB7, 0xA8, 0xBA, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xCB, 0xA9, 0xA8, 0xB9, 0xCB, 0xAB, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xB8, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCD, 0xD5, /* 0xAC-0xAF */
+	0xCD, 0xD7, 0xAA, 0xF4, 0xCD, 0xD3, 0xCD, 0xD6, /* 0xB0-0xB3 */
+	0xCD, 0xD4, 0xAA, 0xF2, 0xAA, 0xF5, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xAA, 0xF3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xD0, 0xB8, 0xD0, 0xBC, 0xD0, 0xB9, /* 0xBC-0xBF */
+	0x00, 0x00, 0xAD, 0xA7, 0x00, 0x00, 0xAD, 0xA8, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xD0, 0xBB, 0x00, 0x00, 0xD0, 0xBD, /* 0xC4-0xC7 */
+	0xD0, 0xBF, 0x00, 0x00, 0xAD, 0xA5, 0xD0, 0xBE, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0xAD, 0xA6, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xD7, 0xEE, 0xD0, 0xBA, 0xD3, 0xF2, 0xD3, 0xFB, /* 0xD4-0xD7 */
+	0xD3, 0xF9, 0xD3, 0xF4, 0xD3, 0xF5, 0xD3, 0xFA, /* 0xD8-0xDB */
+	0xD3, 0xFC, 0xB0, 0x71, 0x00, 0x00, 0xD3, 0xF7, /* 0xDC-0xDF */
+	0xD3, 0xF3, 0xB0, 0x70, 0xB0, 0x72, 0xD3, 0xF6, /* 0xE0-0xE3 */
+	0xD3, 0xFD, 0xD3, 0xF8, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xB3, 0xA1, 0xD7, 0xF1, 0xD7, 0xE9, 0xD7, 0xEF, /* 0xE8-0xEB */
+	0xD7, 0xF0, 0xB3, 0xA2, 0x00, 0x00, 0xD7, 0xE8, /* 0xEC-0xEF */
+	0xD7, 0xEA, 0xD0, 0xB7, 0xD7, 0xEC, 0xD7, 0xED, /* 0xF0-0xF3 */
+	0xD7, 0xEB, 0xB6, 0x6C, 0x00, 0x00, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xDC, 0x56, 0xEB, 0xD4, 0xDC, 0x57, /* 0xF8-0xFB */
+	0xDC, 0x54, 0xB3, 0xA3, 0xB6, 0x6E, 0xDC, 0x53, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_91[512] = {
+	0xDC, 0x59, 0xDC, 0x58, 0xB6, 0x6B, 0xDC, 0x5C, /* 0x00-0x03 */
+	0xDC, 0x52, 0xDC, 0x5B, 0xDC, 0x50, 0xDC, 0x5A, /* 0x04-0x07 */
+	0xDC, 0x55, 0xB6, 0x6D, 0x00, 0x00, 0xE0, 0xAA, /* 0x08-0x0B */
+	0x00, 0x00, 0xE0, 0xA5, 0xE0, 0xAB, 0xE0, 0xA6, /* 0x0C-0x0F */
+	0xE0, 0xA4, 0xE0, 0xA7, 0xB9, 0x51, 0x00, 0x00, /* 0x10-0x13 */
+	0xE0, 0xA9, 0x00, 0x00, 0xE0, 0xA8, 0xB9, 0x52, /* 0x14-0x17 */
+	0xBB, 0xC1, 0xBB, 0xC0, 0xE4, 0x6E, 0xE4, 0x71, /* 0x18-0x1B */
+	0xE4, 0x69, 0xE4, 0x6D, 0xBB, 0xC2, 0xE4, 0x6C, /* 0x1C-0x1F */
+	0xE4, 0x6A, 0xE4, 0x70, 0xE4, 0x6B, 0xE4, 0x68, /* 0x20-0x23 */
+	0xE4, 0x6F, 0x00, 0x00, 0xE8, 0x59, 0xBE, 0x48, /* 0x24-0x27 */
+	0xF1, 0x4A, 0xE8, 0x56, 0xE8, 0x57, 0xE8, 0x55, /* 0x28-0x2B */
+	0xDC, 0x51, 0xBE, 0x47, 0xE8, 0x5A, 0xE8, 0x54, /* 0x2C-0x2F */
+	0xBE, 0x46, 0xBE, 0x49, 0xE8, 0x58, 0xEB, 0xD5, /* 0x30-0x33 */
+	0xBF, 0xF3, 0xEB, 0xD6, 0xEB, 0xD7, 0x00, 0x00, /* 0x34-0x37 */
+	0xEE, 0xC4, 0xC1, 0xDD, 0xF1, 0x4B, 0xF1, 0x4C, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0x4D, 0xF3, 0x5D, /* 0x3C-0x3F */
+	0xF3, 0x5C, 0xF4, 0xE2, 0x00, 0x00, 0xF4, 0xE1, /* 0x40-0x43 */
+	0xF6, 0x5B, 0xF6, 0x5C, 0xF6, 0x5A, 0xF7, 0x66, /* 0x44-0x47 */
+	0xC5, 0xB0, 0xA8, 0xBB, 0xAD, 0xAA, 0xAD, 0xA9, /* 0x48-0x4B */
+	0xB0, 0x75, 0xB0, 0x74, 0xD4, 0x40, 0xD4, 0x41, /* 0x4C-0x4F */
+	0xD3, 0xFE, 0x00, 0x00, 0xB0, 0x73, 0xD7, 0xF5, /* 0x50-0x53 */
+	0x00, 0x00, 0xD7, 0xF6, 0xD7, 0xF2, 0xB3, 0xA4, /* 0x54-0x57 */
+	0xD7, 0xF3, 0x00, 0x00, 0xD7, 0xF4, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDC, 0x5F, /* 0x5C-0x5F */
+	0xDC, 0x61, 0xDC, 0x5D, 0xDC, 0x60, 0xB6, 0x6F, /* 0x60-0x63 */
+	0xDC, 0x5E, 0xB6, 0x70, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xDD, 0x73, 0xB9, 0x55, 0xB9, 0x54, 0x00, 0x00, /* 0x68-0x6B */
+	0xB9, 0x53, 0x00, 0x00, 0xE0, 0xAC, 0xE0, 0xAD, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x73, 0xE4, 0x75, /* 0x70-0x73 */
+	0xBB, 0xC6, 0xBB, 0xC3, 0x00, 0x00, 0xBB, 0xC5, /* 0x74-0x77 */
+	0xBB, 0xC4, 0xE4, 0x74, 0xE4, 0x72, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xE8, 0x61, 0xE8, 0x5E, 0xE8, 0x5F, 0xBE, 0x4D, /* 0x80-0x83 */
+	0xE8, 0x60, 0xE8, 0x5B, 0xE8, 0x5C, 0xBE, 0x4A, /* 0x84-0x87 */
+	0x00, 0x00, 0xBE, 0x4B, 0xE8, 0x5D, 0xBE, 0x4C, /* 0x88-0x8B */
+	0x00, 0x00, 0xEB, 0xDB, 0x00, 0x00, 0xEB, 0xDC, /* 0x8C-0x8F */
+	0xEB, 0xD9, 0xEB, 0xDA, 0xBF, 0xF4, 0xEB, 0xD8, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0xEE, 0xC8, 0xEE, 0xC5, 0xEE, 0xC7, /* 0x98-0x9B */
+	0xC1, 0xE0, 0xEE, 0xCB, 0xC1, 0xDF, 0xEE, 0xC9, /* 0x9C-0x9F */
+	0xEE, 0xCC, 0xEE, 0xCA, 0xEE, 0xC6, 0xC1, 0xDE, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xF1, 0x4F, 0x00, 0x00, 0xF1, 0x50, /* 0xA4-0xA7 */
+	0xF1, 0x4E, 0x00, 0x00, 0xF1, 0x52, 0xC2, 0xE5, /* 0xA8-0xAB */
+	0xC2, 0xE6, 0xF3, 0x5F, 0xC3, 0xE7, 0xF1, 0x51, /* 0xAC-0xAF */
+	0xF3, 0x5E, 0xC3, 0xE6, 0xF4, 0xE5, 0xF4, 0xE6, /* 0xB0-0xB3 */
+	0xC4, 0xBF, 0xF4, 0xE4, 0x00, 0x00, 0xF4, 0xE3, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF6, 0x5D, 0xC5, 0x48, 0x00, 0x00, /* 0xB8-0xBB */
+	0xF8, 0x49, 0xF8, 0xC8, 0xF8, 0xC7, 0x00, 0x00, /* 0xBC-0xBF */
+	0xC6, 0x43, 0xC6, 0x5D, 0xF8, 0xC9, 0xF9, 0x71, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xC6, 0x6F, 0xA8, 0xBC, 0xAA, 0xF6, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xB9, 0x56, 0x00, 0x00, 0xC4, 0xC0, /* 0xC8-0xCB */
+	0xA8, 0xBD, 0xAD, 0xAB, 0xB3, 0xA5, 0xB6, 0x71, /* 0xCC-0xCF */
+	0xC2, 0xE7, 0xAA, 0xF7, 0x00, 0x00, 0xD0, 0xC1, /* 0xD0-0xD3 */
+	0xD0, 0xC0, 0xD4, 0x42, 0x00, 0x00, 0xB0, 0x78, /* 0xD4-0xD7 */
+	0xB0, 0x76, 0xB0, 0x7A, 0xD4, 0x44, 0x00, 0x00, /* 0xD8-0xDB */
+	0xB0, 0x79, 0xB0, 0x77, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xD4, 0x43, 0xB3, 0xA8, /* 0xE0-0xE3 */
+	0xD7, 0xFC, 0x00, 0x00, 0xB3, 0xA7, 0xB3, 0xA9, /* 0xE4-0xE7 */
+	0xD8, 0x42, 0xB3, 0xAB, 0xD7, 0xFE, 0xD8, 0x40, /* 0xE8-0xEB */
+	0xD7, 0xF7, 0xB3, 0xAA, 0xD8, 0x43, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xD7, 0xF9, 0x00, 0x00, 0xD7, 0xFA, /* 0xF0-0xF3 */
+	0xD7, 0xF8, 0xB3, 0xA6, 0x00, 0x00, 0xD8, 0x41, /* 0xF4-0xF7 */
+	0xD7, 0xFB, 0xD7, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xDC, 0x6D, 0x00, 0x00, 0xDC, 0x6C, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_92[512] = {
+	0xDC, 0x6A, 0xDC, 0x62, 0xDC, 0x71, 0xDC, 0x65, /* 0x00-0x03 */
+	0xDC, 0x6F, 0xDC, 0x76, 0xDC, 0x6E, 0xB6, 0x79, /* 0x04-0x07 */
+	0x00, 0x00, 0xB6, 0x75, 0xDC, 0x63, 0x00, 0x00, /* 0x08-0x0B */
+	0xDC, 0x69, 0xB6, 0x77, 0x00, 0x00, 0xDC, 0x68, /* 0x0C-0x0F */
+	0xB6, 0x78, 0xB6, 0x7A, 0xDC, 0x6B, 0x00, 0x00, /* 0x10-0x13 */
+	0xB6, 0x72, 0xB6, 0x73, 0xDC, 0x77, 0xDC, 0x75, /* 0x14-0x17 */
+	0x00, 0x00, 0xDC, 0x74, 0xDC, 0x66, 0x00, 0x00, /* 0x18-0x1B */
+	0xDC, 0x72, 0x00, 0x00, 0xB6, 0x76, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB6, 0x74, /* 0x20-0x23 */
+	0xDC, 0x73, 0xDC, 0x64, 0xDC, 0x67, 0xDC, 0x70, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xE4, 0xBA, 0xE0, 0xB7, 0x00, 0x00, /* 0x2C-0x2F */
+	0xE0, 0xB0, 0xE0, 0xC3, 0xE0, 0xCC, 0xE0, 0xB3, /* 0x30-0x33 */
+	0xB9, 0x61, 0x00, 0x00, 0xE0, 0xC0, 0xB9, 0x57, /* 0x34-0x37 */
+	0xB9, 0x59, 0xB9, 0x65, 0xE0, 0xB1, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xB9, 0x5A, 0xB9, 0x5C, 0xB9, 0x66, /* 0x3C-0x3F */
+	0xB9, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0xB9, 0x64, 0xE0, 0xB9, 0x00, 0x00, /* 0x44-0x47 */
+	0xE0, 0xAE, 0xB9, 0x62, 0xE0, 0xB8, 0xB9, 0x5E, /* 0x48-0x4B */
+	0xE0, 0xCA, 0xB9, 0x63, 0xE0, 0xC8, 0xE0, 0xBC, /* 0x4C-0x4F */
+	0xE0, 0xC6, 0xB9, 0x60, 0xE0, 0xAF, 0xE0, 0xC9, /* 0x50-0x53 */
+	0xE0, 0xC4, 0x00, 0x00, 0xE0, 0xCB, 0xB9, 0x58, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0x67, 0xB9, 0x5D, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xE0, 0xB5, 0x00, 0x00, /* 0x5C-0x5F */
+	0xE0, 0xBD, 0xE0, 0xC1, 0x00, 0x00, 0xE0, 0xC5, /* 0x60-0x63 */
+	0xB9, 0x5F, 0xE0, 0xB4, 0xE0, 0xB2, 0xE0, 0xBE, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE0, 0xBB, 0xE0, 0xBA, 0x00, 0x00, 0xE0, 0xBF, /* 0x6C-0x6F */
+	0xE0, 0xC2, 0x00, 0x00, 0xE0, 0xC7, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0x78, 0x00, 0x00, /* 0x74-0x77 */
+	0xBB, 0xC7, 0xE4, 0xA4, 0xE4, 0x7A, 0xBB, 0xCC, /* 0x78-0x7B */
+	0xBB, 0xD0, 0xE4, 0xAD, 0xE4, 0xB5, 0xE4, 0xA6, /* 0x7C-0x7F */
+	
+	0xBB, 0xC8, 0x00, 0x00, 0xE4, 0xAA, 0xE0, 0xB6, /* 0x80-0x83 */
+	0x00, 0x00, 0xBB, 0xC9, 0xE4, 0xB1, 0xE4, 0xB6, /* 0x84-0x87 */
+	0xE4, 0xAE, 0x00, 0x00, 0xE4, 0xB0, 0xE4, 0xB9, /* 0x88-0x8B */
+	0xE4, 0xB2, 0xE4, 0x7E, 0xE4, 0xA9, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xBB, 0xD1, 0x00, 0x00, 0xBB, 0xCD, /* 0x90-0x93 */
+	0xE4, 0x7C, 0xE4, 0xAB, 0xBB, 0xCB, 0xE4, 0xA5, /* 0x94-0x97 */
+	0xBB, 0xCA, 0xE4, 0xB3, 0xE4, 0xA2, 0xE4, 0x79, /* 0x98-0x9B */
+	0xBB, 0xCE, 0xE4, 0xB8, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xE4, 0x7B, 0xE4, 0xAF, 0xE4, 0xAC, 0xE4, 0xA7, /* 0xA0-0xA3 */
+	0xE4, 0x77, 0xE4, 0x76, 0xE4, 0xA1, 0xE4, 0xB4, /* 0xA4-0xA7 */
+	0xBB, 0xCF, 0xE4, 0xB7, 0xE4, 0x7D, 0xE4, 0xA3, /* 0xA8-0xAB */
+	0xBE, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0xBE, 0x5A, 0xBE, 0x55, /* 0xB0-0xB3 */
+	0xE8, 0xA4, 0xE8, 0xA1, 0xE8, 0x67, 0xBE, 0x50, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF9, 0xD7, 0x00, 0x00, 0xBE, 0x4F, /* 0xB8-0xBB */
+	0xBE, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xE8, 0x65, 0xBE, 0x54, 0xE8, 0x71, 0xE8, 0x63, /* 0xC0-0xC3 */
+	0xE8, 0x64, 0xBE, 0x4E, 0xE8, 0xA3, 0xBE, 0x58, /* 0xC4-0xC7 */
+	0xE8, 0x74, 0xE8, 0x79, 0xE8, 0x73, 0xEB, 0xEE, /* 0xC8-0xCB */
+	0xE8, 0x6F, 0xE8, 0x77, 0xE8, 0x75, 0xE8, 0x68, /* 0xCC-0xCF */
+	0xE8, 0x62, 0xE8, 0x7D, 0xBE, 0x57, 0xE8, 0x7E, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xE8, 0x78, 0x00, 0x00, 0xE8, 0x6D, /* 0xD4-0xD7 */
+	0xE8, 0x6B, 0xE8, 0x66, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0xE8, 0x6E, 0xE8, 0x7B, 0xE8, 0x6A, /* 0xDC-0xDF */
+	0xE8, 0x7A, 0xE8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xBE, 0x53, 0x00, 0x00, 0xE8, 0x76, 0xE8, 0x7C, /* 0xE4-0xE7 */
+	0xE8, 0x72, 0xE8, 0x6C, 0xBE, 0x51, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xA8, 0xE8, 0x70, /* 0xEC-0xEF */
+	0xBE, 0x59, 0xE8, 0x69, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEB, 0xF4, /* 0xF4-0xF7 */
+	0xBF, 0xF7, 0xEB, 0xF3, 0xEB, 0xF0, 0xEC, 0x44, /* 0xF8-0xFB */
+	0xBF, 0xFB, 0x00, 0x00, 0xEC, 0x41, 0xEB, 0xF8, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_93[512] = {
+	0xEC, 0x43, 0xEB, 0xE9, 0xEB, 0xF6, 0x00, 0x00, /* 0x00-0x03 */
+	0xBF, 0xFD, 0x00, 0x00, 0xEB, 0xE1, 0x00, 0x00, /* 0x04-0x07 */
+	0xEB, 0xDF, 0xEC, 0x42, 0x00, 0x00, 0xEC, 0x40, /* 0x08-0x0B */
+	0xEB, 0xFE, 0xEB, 0xED, 0xEB, 0xEC, 0xEB, 0xE2, /* 0x0C-0x0F */
+	0xC0, 0x40, 0x00, 0x00, 0xEB, 0xE8, 0xEB, 0xF2, /* 0x10-0x13 */
+	0xEB, 0xFD, 0xC0, 0x43, 0xEC, 0x45, 0x00, 0x00, /* 0x14-0x17 */
+	0xC1, 0xE8, 0xC0, 0x45, 0xBF, 0xFE, 0xEB, 0xE6, /* 0x18-0x1B */
+	0x00, 0x00, 0xEB, 0xEF, 0xEB, 0xDE, 0xEB, 0xE0, /* 0x1C-0x1F */
+	0xBF, 0xF5, 0xC0, 0x42, 0xBF, 0xFA, 0xEB, 0xE7, /* 0x20-0x23 */
+	0xEB, 0xF7, 0xEB, 0xF1, 0xC0, 0x41, 0xEB, 0xDD, /* 0x24-0x27 */
+	0xC1, 0xE3, 0xEB, 0xF9, 0xEB, 0xFC, 0xBF, 0xFC, /* 0x28-0x2B */
+	0x00, 0x00, 0xEB, 0xEB, 0xC0, 0x44, 0xBF, 0xF9, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0xF8, /* 0x30-0x33 */
+	0xEB, 0xF5, 0xEB, 0xFB, 0xBF, 0xF6, 0x00, 0x00, /* 0x34-0x37 */
+	0xEB, 0xE4, 0xEB, 0xFA, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0xEB, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xEB, 0xEA, 0xEE, 0xD2, /* 0x44-0x47 */
+	0x00, 0x00, 0xEE, 0xD7, 0xC1, 0xE5, 0xC1, 0xE7, /* 0x48-0x4B */
+	0xEE, 0xDD, 0xC1, 0xE1, 0xEE, 0xEC, 0xEE, 0xE3, /* 0x4C-0x4F */
+	0xEE, 0xD8, 0xEE, 0xD9, 0xEE, 0xE2, 0x00, 0x00, /* 0x50-0x53 */
+	0xC1, 0xEE, 0xEE, 0xE1, 0xEE, 0xD1, 0xEE, 0xE0, /* 0x54-0x57 */
+	0xEE, 0xD4, 0xEE, 0xED, 0xC1, 0xED, 0xC1, 0xEB, /* 0x58-0x5B */
+	0xEE, 0xD5, 0x00, 0x00, 0xEE, 0xE8, 0x00, 0x00, /* 0x5C-0x5F */
+	0xEE, 0xDA, 0xEE, 0xE7, 0x00, 0x00, 0xEE, 0xE9, /* 0x60-0x63 */
+	0xEE, 0xD0, 0xC1, 0xE6, 0x00, 0x00, 0xEE, 0xEA, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xEE, 0xDE, 0x00, 0x00, /* 0x68-0x6B */
+	0xC1, 0xEA, 0xEE, 0xDB, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0xC1, 0xEC, 0xEE, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xC1, 0xE4, 0xEE, 0xD6, 0xEE, 0xE5, /* 0x74-0x77 */
+	0x00, 0x00, 0xEE, 0xDF, 0xEB, 0xE3, 0xEE, 0xE6, /* 0x78-0x7B */
+	0xEE, 0xD3, 0x00, 0x00, 0xC1, 0xE9, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xEE, 0xEB, 0x00, 0x00, 0xC1, 0xE2, 0xEE, 0xCE, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xF1, 0x60, 0xF1, 0x59, 0xC2, 0xE9, 0x00, 0x00, /* 0x88-0x8B */
+	0xF1, 0x54, 0xF1, 0x63, 0xF1, 0x5B, 0xEE, 0xDC, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF1, 0x65, 0xF1, 0x55, 0x00, 0x00, /* 0x90-0x93 */
+	0xC2, 0xE8, 0xF1, 0x5F, 0xC2, 0xEA, 0xC2, 0xF2, /* 0x94-0x97 */
+	0xC2, 0xF0, 0xF1, 0x61, 0xC2, 0xF1, 0xF1, 0x57, /* 0x98-0x9B */
+	0x00, 0x00, 0xF1, 0x58, 0xF1, 0x5D, 0xF1, 0x62, /* 0x9C-0x9F */
+	0x00, 0x00, 0xEE, 0xCD, 0xC2, 0xEB, 0xF1, 0x6A, /* 0xA0-0xA3 */
+	0xF1, 0x67, 0xF1, 0x6B, 0xF1, 0x5E, 0xF1, 0x5A, /* 0xA4-0xA7 */
+	0xF1, 0x68, 0xF3, 0x6A, 0xF1, 0x5C, 0x00, 0x00, /* 0xA8-0xAB */
+	0xC2, 0xEE, 0x00, 0x00, 0xC2, 0xED, 0xEE, 0xCF, /* 0xAC-0xAF */
+	0xC2, 0xEF, 0xF1, 0x64, 0xF1, 0x66, 0xC2, 0xEC, /* 0xB0-0xB3 */
+	0xF1, 0x69, 0xF1, 0x53, 0x00, 0x00, 0xF1, 0x56, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0xF3, 0x73, 0x00, 0x00, 0xF3, 0x63, 0xC3, 0xEB, /* 0xC0-0xC3 */
+	0xF3, 0x71, 0x00, 0x00, 0x00, 0x00, 0xF3, 0x61, /* 0xC4-0xC7 */
+	0xC3, 0xEC, 0x00, 0x00, 0xF3, 0x6C, 0x00, 0x00, /* 0xC8-0xCB */
+	0xF3, 0x68, 0xC3, 0xF1, 0xF3, 0x72, 0xF3, 0x62, /* 0xCC-0xCF */
+	0xF3, 0x65, 0xC3, 0xE9, 0xF3, 0x74, 0x00, 0x00, /* 0xD0-0xD3 */
+	0xF3, 0x6D, 0xF3, 0x70, 0xC3, 0xEF, 0xC3, 0xF4, /* 0xD4-0xD7 */
+	0xC3, 0xF2, 0xF3, 0x69, 0xF3, 0x64, 0x00, 0x00, /* 0xD8-0xDB */
+	0xC3, 0xED, 0xC3, 0xEE, 0xF3, 0x60, 0xC3, 0xEA, /* 0xDC-0xDF */
+	0x00, 0x00, 0xC3, 0xE8, 0xC3, 0xF0, 0xF3, 0x6F, /* 0xE0-0xE3 */
+	0xC3, 0xF3, 0x00, 0x00, 0xF3, 0x6B, 0xF3, 0x75, /* 0xE4-0xE7 */
+	0xC3, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0xF3, 0x67, 0x00, 0x00, 0xF3, 0x6E, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xF4, 0xF3, 0xF5, 0x42, 0xF4, 0xF5, /* 0xF4-0xF7 */
+	0xF4, 0xFC, 0xF3, 0x66, 0xF4, 0xFA, 0xF4, 0xE9, /* 0xF8-0xFB */
+	0xF5, 0x40, 0xC4, 0xC3, 0xF4, 0xED, 0xF4, 0xFE, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_94[512] = {
+	0xF4, 0xF4, 0x00, 0x00, 0x00, 0x00, 0xC4, 0xC2, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0x44, 0xF4, 0xF6, /* 0x04-0x07 */
+	0x00, 0x00, 0xF4, 0xFB, 0xF4, 0xFD, 0xF4, 0xE7, /* 0x08-0x0B */
+	0xF5, 0x41, 0xF4, 0xF2, 0xF4, 0xF7, 0xF4, 0xEB, /* 0x0C-0x0F */
+	0xF4, 0xEF, 0xF5, 0x43, 0xF4, 0xF9, 0xF4, 0xE8, /* 0x10-0x13 */
+	0xF4, 0xEC, 0xF4, 0xEE, 0xF4, 0xF8, 0x00, 0x00, /* 0x14-0x17 */
+	0xC4, 0xC1, 0xF4, 0xF1, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0xF4, 0xEA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xF4, 0xF0, 0xF6, 0x61, 0xF6, 0x66, 0xC5, 0x4F, /* 0x28-0x2B */
+	0xF6, 0x68, 0x00, 0x00, 0xC5, 0x49, 0x00, 0x00, /* 0x2C-0x2F */
+	0xF6, 0x64, 0xF6, 0x6A, 0xC5, 0x4E, 0xC5, 0x4A, /* 0x30-0x33 */
+	0x00, 0x00, 0xC5, 0x4B, 0xF6, 0x60, 0xF6, 0x67, /* 0x34-0x37 */
+	0xC5, 0x4D, 0xF6, 0x65, 0xC5, 0x4C, 0xF6, 0x5F, /* 0x38-0x3B */
+	0xF6, 0x63, 0xF6, 0x62, 0x00, 0x00, 0xF6, 0x5E, /* 0x3C-0x3F */
+	0xF6, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xC5, 0xB1, 0xF7, 0x6D, 0xF7, 0x70, 0xF7, 0x6C, /* 0x44-0x47 */
+	0xF7, 0x6E, 0xF7, 0x6F, 0xF7, 0x69, 0xF7, 0x6A, /* 0x48-0x4B */
+	0xF7, 0x67, 0x00, 0x00, 0x00, 0x00, 0xF7, 0x6B, /* 0x4C-0x4F */
+	0xF7, 0x68, 0xC5, 0xB2, 0xC5, 0xB3, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0xF8, 0x4B, 0x00, 0x00, 0xF8, 0x4D, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0xF8, 0x4C, 0xF8, 0x4E, 0x00, 0x00, /* 0x5C-0x5F */
+	0xC5, 0xE0, 0x00, 0x00, 0xF8, 0x4A, 0xC5, 0xDF, /* 0x60-0x63 */
+	0xC5, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0xF8, 0xCB, 0xF8, 0xCC, 0xC6, 0x44, 0xF8, 0xCA, /* 0x68-0x6B */
+	0x00, 0x00, 0xF9, 0x53, 0xF9, 0x52, 0xF9, 0x54, /* 0x6C-0x6F */
+	0xC6, 0x5F, 0xF9, 0x55, 0xC6, 0x5E, 0xF9, 0x56, /* 0x70-0x73 */
+	0xF9, 0x72, 0xF9, 0x75, 0xF9, 0x74, 0xC6, 0x68, /* 0x74-0x77 */
+	0xF9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xC6, 0x72, 0xC6, 0x70, 0xC6, 0x71, 0xC6, 0x77, /* 0x7C-0x7F */
+	
+	0xF9, 0xC0, 0xF9, 0xC1, 0xF9, 0xBF, 0xF9, 0xC9, /* 0x80-0x83 */
+};
+
+static unsigned char u2c_95[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAA, 0xF8, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0x44, 0xDC, 0x78, /* 0x78-0x7B */
+	0xE8, 0xA5, 0xF3, 0x76, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xAA, 0xF9, 0x00, 0x00, 0xAD, 0xAC, 0xB0, 0x7B, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xD8, 0x45, 0x00, 0x00, /* 0x84-0x87 */
+	0xD8, 0x46, 0xB3, 0xAC, 0x00, 0x00, 0xB6, 0x7D, /* 0x88-0x8B */
+	0xDC, 0x7A, 0xDC, 0x79, 0xB6, 0xA3, 0xB6, 0x7C, /* 0x8C-0x8F */
+	0xDC, 0x7B, 0xB6, 0x7E, 0xB6, 0xA2, 0xB6, 0xA1, /* 0x90-0x93 */
+	0xB6, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xB9, 0x68, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xD0, /* 0x98-0x9B */
+	0xE0, 0xCE, 0x00, 0x00, 0xE0, 0xCF, 0xE0, 0xCD, /* 0x9C-0x9F */
+	0x00, 0x00, 0xBB, 0xD2, 0x00, 0x00, 0xBB, 0xD5, /* 0xA0-0xA3 */
+	0xBB, 0xD7, 0xBB, 0xD6, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xBB, 0xD3, 0xBB, 0xD4, 0x00, 0x00, 0xE8, 0xA7, /* 0xA8-0xAB */
+	0xE8, 0xA6, 0xBE, 0x5B, 0xE8, 0xA8, 0x00, 0x00, /* 0xAC-0xAF */
+	0xE8, 0xA9, 0xBE, 0x5C, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xEC, 0x4D, 0xEC, 0x4B, 0xEE, 0xF3, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xEC, 0x49, 0xEC, 0x4A, 0xC0, 0x46, /* 0xB8-0xBB */
+	0xEC, 0x46, 0xEC, 0x4E, 0xEC, 0x48, 0xEC, 0x4C, /* 0xBC-0xBF */
+	0xEE, 0xEF, 0x00, 0x00, 0x00, 0x00, 0xEE, 0xF1, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xEE, 0xF2, 0xC1, 0xF3, 0xEE, 0xEE, /* 0xC4-0xC7 */
+	0xC1, 0xF2, 0xEE, 0xF0, 0xC1, 0xEF, 0xC1, 0xF0, /* 0xC8-0xCB */
+	0xC1, 0xF1, 0xEC, 0x47, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0xC2, 0xF5, 0xF1, 0x6E, 0xF1, 0x6C, 0xF1, 0x6D, /* 0xD0-0xD3 */
+	0xC2, 0xF3, 0xC2, 0xF6, 0xC2, 0xF4, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0x77, 0xF3, 0x78, /* 0xD8-0xDB */
+	0xC3, 0xF6, 0x00, 0x00, 0xF5, 0x45, 0xF5, 0x47, /* 0xDC-0xDF */
+	0xF5, 0x46, 0xC4, 0xC4, 0xC5, 0x50, 0xF6, 0x6D, /* 0xE0-0xE3 */
+	0xF6, 0x6C, 0xF6, 0x6B, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char u2c_96[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xAA, 0xFA, 0x00, 0x00, 0xC9, 0xAA, 0x00, 0x00, /* 0x1C-0x1F */
+	0xCA, 0x58, 0xA6, 0xE9, 0xCA, 0x56, 0xCA, 0x59, /* 0x20-0x23 */
+	0xCA, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xCB, 0xAE, 0x00, 0x00, 0xA8, 0xC1, 0x00, 0x00, /* 0x28-0x2B */
+	0xA8, 0xC2, 0xCB, 0xB0, 0xA8, 0xBF, 0xCB, 0xAF, /* 0x2C-0x2F */
+	0xCB, 0xAD, 0xA8, 0xC0, 0xA8, 0xBE, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0xCD, 0xD8, 0xCD, 0xDB, 0xAA, 0xFD, /* 0x38-0x3B */
+	0xCD, 0xDA, 0xCD, 0xD9, 0x00, 0x00, 0xAA, 0xFC, /* 0x3C-0x3F */
+	0xAA, 0xFB, 0x00, 0x00, 0xAB, 0x40, 0xCD, 0xDC, /* 0x40-0x43 */
+	0xAA, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xD0, 0xC6, 0xAD, 0xAE, /* 0x48-0x4B */
+	0xAD, 0xAF, 0xAD, 0xB0, 0xD0, 0xC7, 0xD0, 0xC3, /* 0x4C-0x4F */
+	0xAD, 0xAD, 0xD0, 0xC4, 0x00, 0x00, 0xD0, 0xC5, /* 0x50-0x53 */
+	0xD0, 0xC2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0xB0, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xB0, 0xA1, /* 0x58-0x5B */
+	0xD4, 0x45, 0xB0, 0xA2, 0xB0, 0xA5, 0xD4, 0x46, /* 0x5C-0x5F */
+	0x00, 0x00, 0xB0, 0x7E, 0xB0, 0x7C, 0xB0, 0x7D, /* 0x60-0x63 */
+	0xB0, 0xA3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xB3, 0xAD, 0xD8, 0x49, /* 0x68-0x6B */
+	0xB3, 0xB5, 0xD8, 0x48, 0x00, 0x00, 0xD8, 0x4B, /* 0x6C-0x6F */
+	0xB3, 0xB1, 0xD8, 0x4A, 0xB6, 0xAB, 0xB3, 0xAF, /* 0x70-0x73 */
+	0xB3, 0xB2, 0xB3, 0xAE, 0xB3, 0xB3, 0xB3, 0xB4, /* 0x74-0x77 */
+	0xB3, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0xD8, 0x47, 0xB6, 0xA7, 0xDC, 0x7D, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xDC, 0xA3, 0x00, 0x00, 0x00, 0x00, 0xDC, 0xA2, /* 0x80-0x83 */
+	0xB6, 0xAC, 0xB6, 0xA8, 0xB6, 0xA9, 0xDC, 0x7C, /* 0x84-0x87 */
+	0xDC, 0x7E, 0xDC, 0xA1, 0xB6, 0xA4, 0xB6, 0xA6, /* 0x88-0x8B */
+	0x00, 0x00, 0xB6, 0xAA, 0xB6, 0xA5, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xE0, 0xD3, 0xE0, 0xD1, 0xE0, 0xD2, /* 0x90-0x93 */
+	0xB9, 0x6A, 0xB9, 0x6B, 0x00, 0x00, 0xE0, 0xD4, /* 0x94-0x97 */
+	0xB9, 0x69, 0xBB, 0xD8, 0x00, 0x00, 0xBB, 0xDA, /* 0x98-0x9B */
+	0xBB, 0xD9, 0x00, 0x00, 0xE4, 0xBB, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xE4, 0xBC, 0xE8, 0xAB, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xE8, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x47, /* 0xA4-0xA7 */
+	0xC0, 0x48, 0xEC, 0x4F, 0xC0, 0x49, 0x00, 0x00, /* 0xA8-0xAB */
+	0xEE, 0xF6, 0x00, 0x00, 0xEE, 0xF4, 0x00, 0x00, /* 0xAC-0xAF */
+	0xEE, 0xF5, 0xC1, 0xF4, 0x00, 0x00, 0xF1, 0x6F, /* 0xB0-0xB3 */
+	0xC3, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xC1, 0xF5, 0xAB, 0x41, 0x00, 0x00, 0xB0, 0xA6, /* 0xB8-0xBB */
+	0xD4, 0x47, 0x00, 0x00, 0x00, 0x00, 0xD8, 0x4C, /* 0xBC-0xBF */
+	0xB3, 0xB6, 0xB6, 0xAD, 0xDC, 0xA4, 0xDC, 0xA6, /* 0xC0-0xC3 */
+	0xB6, 0xAF, 0xB6, 0xAE, 0xB6, 0xB0, 0xB6, 0xB1, /* 0xC4-0xC7 */
+	0xDC, 0xA5, 0xB9, 0x6E, 0xB9, 0x6F, 0xB9, 0x6D, /* 0xC8-0xCB */
+	0xBB, 0xDB, 0xB9, 0x6C, 0xE0, 0xD5, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0xBB, 0xDC, 0xE8, 0xAC, /* 0xD0-0xD3 */
+	0xEC, 0x50, 0xC0, 0x4A, 0xC1, 0xF6, 0xF1, 0x70, /* 0xD4-0xD7 */
+	0xF1, 0x74, 0xC2, 0xF9, 0xF1, 0x71, 0xC2, 0xFA, /* 0xD8-0xDB */
+	0xC2, 0xF8, 0xF1, 0x75, 0xC2, 0xFB, 0xF1, 0x73, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF3, 0x79, 0xC2, 0xF7, 0xC3, 0xF8, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xF8, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xAB, 0x42, 0xB3, 0xB8, 0xB3, 0xB7, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB6, 0xB2, /* 0xEC-0xEF */
+	0xDC, 0xA8, 0xDC, 0xA7, 0xB6, 0xB3, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0xE0, 0xD9, 0xB9, 0x73, 0xB9, 0x70, /* 0xF4-0xF7 */
+	0xE0, 0xD8, 0xB9, 0x72, 0xE0, 0xD6, 0xB9, 0x71, /* 0xF8-0xFB */
+	0x00, 0x00, 0xE0, 0xD7, 0x00, 0x00, 0xE4, 0xBD, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_97[512] = {
+	0xBB, 0xDD, 0x00, 0x00, 0xE8, 0xAF, 0x00, 0x00, /* 0x00-0x03 */
+	0xBE, 0x5D, 0xE8, 0xAD, 0xBE, 0x5E, 0xBE, 0x5F, /* 0x04-0x07 */
+	0xE8, 0xAE, 0xBE, 0x60, 0x00, 0x00, 0xEC, 0x51, /* 0x08-0x0B */
+	0x00, 0x00, 0xC0, 0x4E, 0xC0, 0x4B, 0xC0, 0x50, /* 0x0C-0x0F */
+	0xEC, 0x53, 0xC0, 0x4C, 0xEC, 0x52, 0xC0, 0x4F, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0x4D, 0x00, 0x00, /* 0x14-0x17 */
+	0xEE, 0xF9, 0xEE, 0xFB, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xC1, 0xF7, 0xEE, 0xFA, 0xC1, 0xF8, 0xEE, 0xF8, /* 0x1C-0x1F */
+	0xEE, 0xF7, 0x00, 0x00, 0xF1, 0x77, 0xF1, 0x76, /* 0x20-0x23 */
+	0xC2, 0xFC, 0xF1, 0x78, 0xF3, 0x7E, 0xC3, 0xFA, /* 0x24-0x27 */
+	0xF3, 0x7D, 0xF3, 0x7A, 0xC3, 0xF9, 0xF3, 0x7B, /* 0x28-0x2B */
+	0xF3, 0x7C, 0x00, 0x00, 0xF5, 0x48, 0xF5, 0x49, /* 0x2C-0x2F */
+	0xC4, 0xC5, 0x00, 0x00, 0xC5, 0x53, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xF6, 0x6E, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0xC5, 0x51, 0xC5, 0x52, 0xF6, 0x6F, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xC5, 0xB4, 0xC5, 0xB5, 0xF7, 0x71, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0xC6, 0x45, 0xF8, 0xCF, /* 0x40-0x43 */
+	0xC6, 0x47, 0x00, 0x00, 0xF8, 0xCE, 0xF8, 0xD0, /* 0x44-0x47 */
+	0xC6, 0x46, 0xF9, 0x57, 0x00, 0x00, 0xF9, 0xAD, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xAB, 0x43, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0x74, 0x00, 0x00, /* 0x54-0x57 */
+	0xE4, 0xBE, 0x00, 0x00, 0xE8, 0xB0, 0xC0, 0x51, /* 0x58-0x5B */
+	0xC0, 0x52, 0x00, 0x00, 0xAB, 0x44, 0x00, 0x00, /* 0x5C-0x5F */
+	0xBE, 0x61, 0xC3, 0xFB, 0xAD, 0xB1, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0x53, 0x00, 0x00, /* 0x64-0x67 */
+	0xC5, 0xE2, 0xAD, 0xB2, 0xD8, 0x4D, 0x00, 0x00, /* 0x68-0x6B */
+	0xDC, 0xA9, 0x00, 0x00, 0xDC, 0xAB, 0x00, 0x00, /* 0x6C-0x6F */
+	0xDC, 0xAA, 0x00, 0x00, 0xE0, 0xDD, 0xE0, 0xDA, /* 0x70-0x73 */
+	0xB9, 0x75, 0x00, 0x00, 0xB9, 0x76, 0xE0, 0xDB, /* 0x74-0x77 */
+	0xE0, 0xDC, 0x00, 0x00, 0xE4, 0xC0, 0xE4, 0xC5, /* 0x78-0x7B */
+	0xBB, 0xDE, 0xE4, 0xBF, 0xE4, 0xC1, 0xE4, 0xC8, /* 0x7C-0x7F */
+	
+	0xE4, 0xC3, 0xE4, 0xC7, 0xE4, 0xC4, 0xE4, 0xC2, /* 0x80-0x83 */
+	0xE4, 0xC6, 0xBB, 0xDF, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0xE8, 0xB3, 0x00, 0x00, 0xE8, 0xB1, 0xBE, 0x63, /* 0x88-0x8B */
+	0x00, 0x00, 0xBE, 0x62, 0xE8, 0xB2, 0xBE, 0x64, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0xEC, 0x56, 0x00, 0x00, 0x00, 0x00, 0xEC, 0x55, /* 0x94-0x97 */
+	0xC0, 0x54, 0xEC, 0x54, 0xEE, 0xFC, 0x00, 0x00, /* 0x98-0x9B */
+	0xEE, 0xFE, 0xEF, 0x41, 0xEF, 0x40, 0x00, 0x00, /* 0x9C-0x9F */
+	0xC1, 0xF9, 0xEE, 0xFD, 0xF1, 0xA1, 0xC2, 0xFD, /* 0xA0-0xA3 */
+	0xF1, 0x7D, 0xF1, 0xA2, 0xC2, 0xFE, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xF1, 0x7B, 0x00, 0x00, 0xF1, 0x7E, 0xF1, 0x7C, /* 0xA8-0xAB */
+	0xF1, 0x79, 0xC3, 0x40, 0xF1, 0x7A, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xA1, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xF3, 0xA3, 0xF3, 0xA2, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xF5, 0x4A, 0x00, 0x00, 0xF5, 0x4B, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF6, 0x70, /* 0xBC-0xBF */
+	0x00, 0x00, 0xC5, 0xB7, 0x00, 0x00, 0xC5, 0xB6, /* 0xC0-0xC3 */
+	0xF8, 0x4F, 0xF8, 0x50, 0xC6, 0x48, 0xF8, 0xD1, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xC6, 0x69, 0x00, 0x00, 0xAD, 0xB3, /* 0xC8-0xCB */
+	0xB6, 0xB4, 0xE4, 0xCA, 0xE4, 0xC9, 0xE8, 0xB5, /* 0xCC-0xCF */
+	0xE8, 0xB4, 0x00, 0x00, 0x00, 0x00, 0xC1, 0xFA, /* 0xD0-0xD3 */
+	0xEF, 0x43, 0xEF, 0x42, 0xF1, 0xA5, 0xF1, 0xA3, /* 0xD4-0xD7 */
+	0xF1, 0xA6, 0xF1, 0xA4, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xC3, 0xFC, 0xF3, 0xA4, 0xF3, 0xA5, 0xF3, 0xA6, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF6, 0x71, 0x00, 0x00, 0xF7, 0x72, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xF8, 0xD2, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xAD, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xEC, 0x57, 0xEF, 0x44, 0x00, 0x00, 0xAD, 0xB5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xBB, 0xE0, 0x00, 0x00, /* 0xF4-0xF7 */
+	0xEC, 0x58, 0xC3, 0x41, 0xF1, 0xA7, 0xC3, 0xFD, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF5, 0x4C, 0xF5, 0x4D, 0xC5, 0x54, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_98[512] = {
+	0xF8, 0x51, 0xAD, 0xB6, 0xB3, 0xBB, 0xB3, 0xBC, /* 0x00-0x03 */
+	0xD8, 0x4E, 0xB6, 0xB5, 0xB6, 0xB6, 0xDC, 0xAC, /* 0x04-0x07 */
+	0xB6, 0xB7, 0x00, 0x00, 0xB9, 0x7A, 0x00, 0x00, /* 0x08-0x0B */
+	0xB9, 0x7C, 0xE0, 0xDF, 0xE0, 0xE0, 0xE0, 0xDE, /* 0x0C-0x0F */
+	0xB9, 0x77, 0xB9, 0x78, 0xB9, 0x7B, 0xB9, 0x79, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0xE4, 0xCB, 0xBB, 0xE1, /* 0x14-0x17 */
+	0xBB, 0xE2, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xBC, /* 0x18-0x1B */
+	0xBE, 0x67, 0xE8, 0xB7, 0xE8, 0xB6, 0x00, 0x00, /* 0x1C-0x1F */
+	0xE8, 0xBB, 0xBE, 0x65, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xC0, 0x5B, 0x00, 0x00, 0xE8, 0xB8, 0xE8, 0xBD, /* 0x24-0x27 */
+	0xE8, 0xBA, 0xE8, 0xB9, 0x00, 0x00, 0xBE, 0x66, /* 0x28-0x2B */
+	0x00, 0x00, 0xC0, 0x59, 0x00, 0x00, 0xEC, 0x5A, /* 0x2C-0x2F */
+	0xC0, 0x55, 0x00, 0x00, 0xEC, 0x5B, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0xEC, 0x59, 0x00, 0x00, 0xC0, 0x58, /* 0x34-0x37 */
+	0xC0, 0x56, 0xC0, 0x5A, 0x00, 0x00, 0xC0, 0x57, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0xEF, 0x45, 0x00, 0x00, 0xEF, 0x4A, /* 0x40-0x43 */
+	0xEF, 0x46, 0xEF, 0x49, 0xC1, 0xFB, 0x00, 0x00, /* 0x44-0x47 */
+	0xED, 0xD4, 0xEF, 0x48, 0xEF, 0x47, 0x00, 0x00, /* 0x48-0x4B */
+	0xC3, 0x44, 0xC3, 0x42, 0xC3, 0x45, 0xC3, 0x43, /* 0x4C-0x4F */
+	0xF1, 0xA8, 0xF1, 0xA9, 0xF1, 0xAA, 0xC3, 0x46, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xAA, /* 0x54-0x57 */
+	0xC4, 0x40, 0xF3, 0xA8, 0x00, 0x00, 0xC4, 0x41, /* 0x58-0x5B */
+	0xF3, 0xA7, 0xF3, 0xA9, 0xC3, 0xFE, 0xF5, 0x51, /* 0x5C-0x5F */
+	0xF5, 0x4E, 0x00, 0x00, 0xF5, 0x4F, 0xF5, 0x50, /* 0x60-0x63 */
+	0xF6, 0x72, 0xC5, 0x56, 0x00, 0x00, 0xC5, 0x55, /* 0x64-0x67 */
+	0x00, 0x00, 0xF7, 0x74, 0xF7, 0x73, 0xC5, 0xB8, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC5, 0xE3, /* 0x6C-0x6F */
+	0xC6, 0x49, 0xC6, 0x60, 0xF9, 0x58, 0xF9, 0xAE, /* 0x70-0x73 */
+	0xF9, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xAD, 0xB7, 0xDC, 0xAD, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0xE0, 0xE1, 0xE4, 0xCC, 0xE4, 0xCD, 0xBB, 0xE3, /* 0xAC-0xAF */
+	0x00, 0x00, 0xBB, 0xE4, 0xE8, 0xBE, 0xBE, 0x68, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0xC1, 0xFC, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xF1, 0xAB, 0x00, 0x00, 0xC3, 0x47, 0xF3, 0xAD, /* 0xB8-0xBB */
+	0xC4, 0x42, 0xF3, 0xAC, 0xF3, 0xAE, 0xF3, 0xAB, /* 0xBC-0xBF */
+	0xF6, 0x75, 0xF5, 0x52, 0xF5, 0x53, 0x00, 0x00, /* 0xC0-0xC3 */
+	0xC4, 0xC6, 0x00, 0x00, 0xF6, 0x74, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xF6, 0x73, 0x00, 0x00, 0xF7, 0x75, /* 0xC8-0xCB */
+	0xF9, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAD, 0xB8, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAD, 0xB9, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xB0, 0xA7, 0xD4, 0x48, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xD8, 0x4F, 0x00, 0x00, 0xB6, 0xB8, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xB6, 0xBB, 0xB6, 0xB9, 0xDC, 0xAE, /* 0xE8-0xEB */
+	0x00, 0x00, 0xB6, 0xBD, 0x00, 0x00, 0xB6, 0xBA, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xBC, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xB9, 0x7E, 0x00, 0x00, 0xE0, 0xE2, 0x00, 0x00, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE0, 0xE3, 0xE8, 0xC0, 0x00, 0x00, /* 0xF8-0xFB */
+	0xB9, 0x7D, 0xB9, 0xA1, 0xB9, 0xA2, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_99[512] = {
+	0xE4, 0xCF, 0x00, 0x00, 0xE4, 0xCE, 0xBB, 0xE5, /* 0x00-0x03 */
+	0x00, 0x00, 0xBB, 0xE6, 0x00, 0x00, 0xE4, 0xD0, /* 0x04-0x07 */
+	0xE8, 0xBF, 0xBB, 0xE8, 0xBE, 0x69, 0x00, 0x00, /* 0x08-0x0B */
+	0xBB, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xC0, 0x5C, 0xE8, 0xC1, 0xBE, 0x6B, 0xBE, 0x6A, /* 0x10-0x13 */
+	0xE8, 0xC2, 0xE8, 0xC5, 0xE8, 0xC3, 0xE8, 0xC4, /* 0x14-0x17 */
+	0xBE, 0x6C, 0x00, 0x00, 0xC0, 0x61, 0xC0, 0x5F, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0x5E, 0xEC, 0x5D, /* 0x1C-0x1F */
+	0x00, 0x00, 0xC0, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0xEC, 0x5C, 0xEF, 0x4B, 0x00, 0x00, 0xEC, 0x5E, /* 0x24-0x27 */
+	0xC0, 0x5D, 0xEC, 0x5F, 0xEF, 0x4E, 0xEF, 0x4C, /* 0x28-0x2B */
+	0xEF, 0x4D, 0xEF, 0x52, 0xC3, 0x4B, 0xEF, 0x51, /* 0x2C-0x2F */
+	0xEF, 0x54, 0xEF, 0x53, 0xEF, 0x50, 0xEF, 0x4F, /* 0x30-0x33 */
+	0x00, 0x00, 0xC1, 0xFD, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xAE, 0x00, 0x00, /* 0x38-0x3B */
+	0xF1, 0xAD, 0xC3, 0x4A, 0xC3, 0x48, 0xC3, 0x49, /* 0x3C-0x3F */
+	0x00, 0x00, 0xF1, 0xAC, 0x00, 0x00, 0xF3, 0xB1, /* 0x40-0x43 */
+	0x00, 0x00, 0xC4, 0x43, 0x00, 0x00, 0xF3, 0xB0, /* 0x44-0x47 */
+	0xF3, 0xAF, 0xC4, 0x44, 0x00, 0x00, 0xF5, 0x58, /* 0x48-0x4B */
+	0xF5, 0x57, 0x00, 0x00, 0xF5, 0x55, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF5, 0x54, 0xC4, 0xC8, 0xC4, 0xC7, 0xF5, 0x59, /* 0x50-0x53 */
+	0xF7, 0x76, 0xC5, 0xB9, 0xF6, 0x77, 0xC5, 0x57, /* 0x54-0x57 */
+	0xF6, 0x76, 0xF5, 0x56, 0x00, 0x00, 0xF7, 0x77, /* 0x58-0x5B */
+	0xC5, 0xE4, 0x00, 0x00, 0xC6, 0x61, 0xF9, 0x59, /* 0x5C-0x5F */
+	0x00, 0x00, 0xF9, 0xB1, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0xAD, 0xBA, 0xD8, 0x50, /* 0x94-0x97 */
+	0xEF, 0x55, 0xAD, 0xBB, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xE4, 0xD2, 0xE4, 0xD1, 0xEC, 0x60, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0xEF, 0x57, 0x00, 0x00, 0xEF, 0x56, /* 0xA0-0xA3 */
+	0x00, 0x00, 0xC3, 0x4C, 0xF3, 0xB2, 0xF3, 0xB3, /* 0xA4-0xA7 */
+	0xC4, 0xC9, 0x00, 0x00, 0x00, 0x00, 0xF9, 0xB2, /* 0xA8-0xAB */
+	0xB0, 0xA8, 0xB6, 0xBF, 0xB6, 0xBE, 0xE0, 0xE4, /* 0xAC-0xAF */
+	0xE0, 0xE6, 0xB9, 0xA4, 0xE0, 0xE5, 0xB9, 0xA3, /* 0xB0-0xB3 */
+	0xB9, 0xA5, 0xE0, 0xE7, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0xE4, 0xD4, 0xE4, 0xD6, 0xE4, 0xD5, /* 0xB8-0xBB */
+	0x00, 0x00, 0xE4, 0xD8, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0xBB, 0xE9, 0xE4, 0xD7, 0xE4, 0xD3, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xD9, /* 0xC4-0xC7 */
+	0x00, 0x00, 0xE8, 0xCC, 0x00, 0x00, 0xE8, 0xCF, /* 0xC8-0xCB */
+	0xE8, 0xD1, 0xE8, 0xC7, 0xE8, 0xCB, 0xE8, 0xC8, /* 0xCC-0xCF */
+	0xBE, 0x6E, 0xBE, 0x71, 0xBE, 0x73, 0xE8, 0xC9, /* 0xD0-0xD3 */
+	0xE8, 0xCA, 0xBE, 0x72, 0xE8, 0xCD, 0xE8, 0xD0, /* 0xD4-0xD7 */
+	0xE8, 0xCE, 0xBE, 0x74, 0x00, 0x00, 0xBE, 0x70, /* 0xD8-0xDB */
+	0xE8, 0xC6, 0xBE, 0x6D, 0x00, 0x00, 0xBE, 0x6F, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0xC0, 0x63, 0xEC, 0x66, /* 0xE0-0xE3 */
+	0xEC, 0x64, 0xEC, 0x63, 0x00, 0x00, 0xEC, 0x69, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xEC, 0x68, 0xEC, 0x67, 0x00, 0x00, /* 0xE8-0xEB */
+	0xEC, 0x62, 0xC0, 0x62, 0xEC, 0x61, 0x00, 0x00, /* 0xEC-0xEF */
+	0xEC, 0x65, 0xC0, 0x64, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0xEF, 0x5A, 0x00, 0x00, 0xEF, 0x5E, 0xEF, 0x5B, /* 0xF4-0xF7 */
+	0xEF, 0x5D, 0xEF, 0x5C, 0xEF, 0x59, 0xEF, 0x5F, /* 0xF8-0xFB */
+	0xEF, 0x62, 0xEF, 0x60, 0xEF, 0x61, 0xC2, 0x40, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9A[512] = {
+	0x00, 0x00, 0xC1, 0xFE, 0xEF, 0x58, 0xEF, 0x63, /* 0x00-0x03 */
+	0xF1, 0xB3, 0xF1, 0xB6, 0xF1, 0xB8, 0xF1, 0xB7, /* 0x04-0x07 */
+	0x00, 0x00, 0xF1, 0xB1, 0xF1, 0xB5, 0xF1, 0xB0, /* 0x08-0x0B */
+	0x00, 0x00, 0xF1, 0xB2, 0xC3, 0x4D, 0xF1, 0xAF, /* 0x0C-0x0F */
+	0x00, 0x00, 0xF1, 0xB4, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0xF3, 0xC0, 0xF3, 0xB5, 0xC4, 0x45, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0xC4, 0x46, 0xF3, 0xB4, 0xF3, 0xB9, /* 0x18-0x1B */
+	0xF3, 0xBF, 0xF3, 0xB7, 0xF3, 0xBE, 0x00, 0x00, /* 0x1C-0x1F */
+	0xF3, 0xBB, 0x00, 0x00, 0xF3, 0xBA, 0xF3, 0xBD, /* 0x20-0x23 */
+	0xF3, 0xB8, 0xF3, 0xB6, 0x00, 0x00, 0xF3, 0xBC, /* 0x24-0x27 */
+	0x00, 0x00, 0xF5, 0x60, 0xF5, 0x5E, 0xC4, 0xCA, /* 0x28-0x2B */
+	0xF5, 0x5D, 0xF5, 0x63, 0xF5, 0x61, 0x00, 0x00, /* 0x2C-0x2F */
+	0xC4, 0xCB, 0xF5, 0x5C, 0xF5, 0x5A, 0x00, 0x00, /* 0x30-0x33 */
+	0xF5, 0x5B, 0xC4, 0xCD, 0xF5, 0x5F, 0xC4, 0xCC, /* 0x34-0x37 */
+	0xF5, 0x62, 0xF6, 0x78, 0xF6, 0x7E, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0xF6, 0x79, 0xC5, 0x5B, 0xF6, 0xA1, /* 0x3C-0x3F */
+	0xC5, 0x5A, 0xF6, 0x7D, 0xF6, 0x7C, 0xC5, 0x59, /* 0x40-0x43 */
+	0xF6, 0x7B, 0xC5, 0x58, 0xF6, 0x7A, 0x00, 0x00, /* 0x44-0x47 */
+	0xF7, 0x7D, 0xF7, 0xA1, 0xF7, 0x7E, 0x00, 0x00, /* 0x48-0x4B */
+	0xF7, 0x7B, 0xC5, 0xBB, 0xF7, 0x78, 0xF7, 0x7C, /* 0x4C-0x4F */
+	0xF7, 0xA3, 0x00, 0x00, 0xF7, 0xA2, 0xF7, 0x79, /* 0x50-0x53 */
+	0xF7, 0x7A, 0xC5, 0xBA, 0xF8, 0x52, 0xC5, 0xE7, /* 0x54-0x57 */
+	0x00, 0x00, 0xF8, 0x53, 0xC5, 0xE5, 0xC5, 0xE6, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xD3, 0xC6, 0x4A, /* 0x5C-0x5F */
+	0xF9, 0x76, 0x00, 0x00, 0xC6, 0x6A, 0x00, 0x00, /* 0x60-0x63 */
+	0xF9, 0xB3, 0xC6, 0x6B, 0xF9, 0xB4, 0xF9, 0xB5, /* 0x64-0x67 */
+	0xF9, 0xC3, 0xF9, 0xC2, 0xC6, 0x7A, 0xF9, 0xCD, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xB0, 0xA9, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE9, /* 0xA8-0xAB */
+	0x00, 0x00, 0xE0, 0xE8, 0x00, 0x00, 0xBB, 0xEA, /* 0xAC-0xAF */
+	0xBB, 0xEB, 0xE4, 0xDA, 0x00, 0x00, 0xE8, 0xD2, /* 0xB0-0xB3 */
+	0xEC, 0x6C, 0x00, 0x00, 0x00, 0x00, 0xBE, 0x75, /* 0xB4-0xB7 */
+	0xC0, 0x65, 0xEC, 0x6A, 0x00, 0x00, 0xEC, 0x6D, /* 0xB8-0xBB */
+	0xC0, 0x66, 0x00, 0x00, 0xEF, 0x64, 0xEC, 0x6B, /* 0xBC-0xBF */
+	0xF1, 0xB9, 0xC3, 0x4E, 0xF3, 0xC1, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0x66, 0xF5, 0x64, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0x65, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0xF6, 0xA2, 0x00, 0x00, 0xC5, 0x5C, /* 0xCC-0xCF */
+	0xF7, 0xA4, 0xC5, 0xEA, 0xC5, 0xBC, 0xC5, 0xE8, /* 0xD0-0xD3 */
+	0xC5, 0xE9, 0xF8, 0xD4, 0xC6, 0x62, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xB0, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0xF1, 0xBA, 0x00, 0x00, 0x00, 0x00, 0xD4, 0x49, /* 0xDC-0xDF */
+	0x00, 0x00, 0xB9, 0xA6, 0x00, 0x00, 0xE4, 0xDB, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0xBB, 0xEC, 0xE4, 0xDC, /* 0xE4-0xE7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE8, 0xD4, /* 0xE8-0xEB */
+	0xE8, 0xD3, 0xC0, 0x68, 0xBE, 0x76, 0xBE, 0x77, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE8, 0xD7, 0xE8, 0xD6, 0xE8, 0xD5, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0x6E, 0xEC, 0x71, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xEC, 0x70, 0xEC, 0x6F, 0xC0, 0x67, /* 0xF8-0xFB */
+	0xEF, 0x68, 0xEF, 0x66, 0xEF, 0x65, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9B[512] = {
+	0x00, 0x00, 0xEF, 0x67, 0x00, 0x00, 0xC3, 0x4F, /* 0x00-0x03 */
+	0xF1, 0xBC, 0xF1, 0xBD, 0xC3, 0x50, 0x00, 0x00, /* 0x04-0x07 */
+	0xF1, 0xBB, 0x00, 0x00, 0xF3, 0xC3, 0xF3, 0xC2, /* 0x08-0x0B */
+	0xF3, 0xC5, 0xC4, 0x47, 0xF3, 0xC4, 0x00, 0x00, /* 0x0C-0x0F */
+	0xF5, 0x67, 0xF5, 0x69, 0xF5, 0x68, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xF6, 0xA3, 0xF6, 0xA6, 0xF6, 0xA4, /* 0x14-0x17 */
+	0xF6, 0xA5, 0xF7, 0xA5, 0xC5, 0xBD, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0x54, 0xF8, 0x55, /* 0x1C-0x1F */
+	0xF8, 0x56, 0x00, 0x00, 0xC6, 0x4B, 0xC6, 0x63, /* 0x20-0x23 */
+	0xF9, 0xB6, 0xB0, 0xAB, 0x00, 0x00, 0xBE, 0x78, /* 0x24-0x27 */
+	0xC0, 0x69, 0xF1, 0xBE, 0x00, 0x00, 0xF7, 0xA6, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xC4, 0xD4, 0x4A, /* 0x2C-0x2F */
+	0x00, 0x00, 0xC6, 0x7B, 0xB0, 0xAC, 0xEC, 0x72, /* 0x30-0x33 */
+	0x00, 0x00, 0xF1, 0xBF, 0x00, 0x00, 0xF3, 0xC6, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0xF6, 0xA7, 0xF7, 0xA7, /* 0x38-0x3B */
+	0xB0, 0xAD, 0x00, 0x00, 0xE4, 0xDD, 0xE4, 0xDE, /* 0x3C-0x3F */
+	0x00, 0x00, 0xBB, 0xED, 0xBB, 0xEE, 0xE8, 0xD9, /* 0x40-0x43 */
+	0xBE, 0x7A, 0xBE, 0x79, 0xE8, 0xD8, 0x00, 0x00, /* 0x44-0x47 */
+	0xEF, 0x69, 0x00, 0x00, 0xF1, 0xC0, 0xF1, 0xC2, /* 0x48-0x4B */
+	0xF1, 0xC1, 0xC3, 0x53, 0xC3, 0x52, 0xC3, 0x51, /* 0x4C-0x4F */
+	0x00, 0x00, 0xC5, 0x5E, 0xF6, 0xA8, 0x00, 0x00, /* 0x50-0x53 */
+	0xC5, 0x5D, 0xF7, 0xA9, 0xF7, 0xA8, 0x00, 0x00, /* 0x54-0x57 */
+	0xC6, 0x4C, 0xF8, 0xD5, 0xB3, 0xBD, 0xE0, 0xEA, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE4, 0xE1, /* 0x5C-0x5F */
+	0xE4, 0xDF, 0xE4, 0xE0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xE8, 0xE2, 0x00, 0x00, 0xE8, 0xDD, 0xE8, 0xDA, /* 0x64-0x67 */
+	0xE8, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0xE8, 0xE3, 0x00, 0x00, 0x00, 0x00, 0xBE, 0x7C, /* 0x6C-0x6F */
+	0xE8, 0xE0, 0xE8, 0xDC, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0xE8, 0xDB, 0xE8, 0xDF, 0xE8, 0xDE, 0xBE, 0x7B, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0x7D, 0xEC, 0x78, /* 0x78-0x7B */
+	0xEC, 0x76, 0xEC, 0xA1, 0xEC, 0x77, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0xEC, 0x73, 0x00, 0x00, 0xEC, 0x79, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0xEC, 0x74, 0xEF, 0x72, 0xEC, 0x75, /* 0x84-0x87 */
+	0xEC, 0xA2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xEC, 0x7C, 0xC0, 0x6A, 0xEC, 0x7B, 0xEC, 0x7A, /* 0x90-0x93 */
+	0x00, 0x00, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0x6A, 0xEF, 0x6D, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0x6C, 0x00, 0x00, /* 0x9C-0x9F */
+	0xEF, 0x74, 0xEF, 0x6F, 0xEF, 0x73, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xEF, 0x71, 0xEF, 0x70, 0xEF, 0x6E, 0x00, 0x00, /* 0xA4-0xA7 */
+	0xEF, 0x6B, 0x00, 0x00, 0xC2, 0x43, 0xC2, 0x42, /* 0xA8-0xAB */
+	0x00, 0x00, 0xC2, 0x44, 0xC2, 0x41, 0xEF, 0x75, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0xF1, 0xC8, 0xF1, 0xCB, 0x00, 0x00, /* 0xB4-0xB7 */
+	0xF1, 0xC9, 0xF1, 0xCD, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0xF1, 0xCE, 0x00, 0x00, 0xF1, 0xC6, /* 0xBC-0xBF */
+	0xC3, 0x58, 0xF1, 0xC7, 0x00, 0x00, 0xF1, 0xC5, /* 0xC0-0xC3 */
+	0xF1, 0xCC, 0x00, 0x00, 0xF1, 0xC4, 0xF1, 0xC3, /* 0xC4-0xC7 */
+	0xC3, 0x57, 0xC3, 0x55, 0xC3, 0x54, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xCA, /* 0xD0-0xD3 */
+	0xF3, 0xCF, 0xF3, 0xD5, 0xC4, 0x4A, 0xF3, 0xD0, /* 0xD4-0xD7 */
+	0x00, 0x00, 0xF3, 0xD3, 0xF3, 0xD7, 0xC4, 0x4B, /* 0xD8-0xDB */
+	0xF3, 0xD2, 0x00, 0x00, 0xF3, 0xCA, 0x00, 0x00, /* 0xDC-0xDF */
+	0xF3, 0xC9, 0xF3, 0xD6, 0xF3, 0xCD, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xF3, 0xCB, 0xF3, 0xD4, 0xF3, 0xCC, 0xC4, 0x49, /* 0xE4-0xE7 */
+	0xC4, 0x48, 0x00, 0x00, 0xF3, 0xC7, 0xF3, 0xC8, /* 0xE8-0xEB */
+	0xF3, 0xD1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0xF3, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF5, 0x6C, /* 0xF4-0xF7 */
+	0xF5, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xF8-0xFB */
+	0x00, 0x00, 0xC3, 0x56, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9C[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0xF5, 0x6D, 0xF5, 0x73, 0xF5, 0x71, /* 0x04-0x07 */
+	0xF5, 0x6B, 0xF5, 0x76, 0x00, 0x00, 0xF5, 0x6A, /* 0x08-0x0B */
+	0x00, 0x00, 0xC4, 0xCF, 0xF5, 0x72, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0xF5, 0x6E, 0xC4, 0xCE, /* 0x10-0x13 */
+	0xF5, 0x75, 0x00, 0x00, 0x00, 0x00, 0xF5, 0x74, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0xF6, 0xAB, 0xF6, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0xF6, 0xB1, 0x00, 0x00, 0xF6, 0xAD, /* 0x20-0x23 */
+	0xF6, 0xB0, 0xC5, 0x60, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xF6, 0xAE, 0xF6, 0xAF, 0x00, 0x00, 0xF6, 0xA9, /* 0x28-0x2B */
+	0xF6, 0xAC, 0xC5, 0x5F, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0xC5, 0xBF, 0xF7, 0xB4, 0xF7, 0xAF, /* 0x30-0x33 */
+	0xF7, 0xB3, 0x00, 0x00, 0xF7, 0xB6, 0xF7, 0xB2, /* 0x34-0x37 */
+	0x00, 0x00, 0xF7, 0xAE, 0x00, 0x00, 0xC5, 0xC1, /* 0x38-0x3B */
+	0xF7, 0xB1, 0xF7, 0xB5, 0xC5, 0xC0, 0xF7, 0xAC, /* 0x3C-0x3F */
+	0xF5, 0x70, 0xF7, 0xB0, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0xF7, 0xAD, 0x00, 0x00, 0xF7, 0xAA, 0x00, 0x00, /* 0x44-0x47 */
+	0xF7, 0xAB, 0xC5, 0xBE, 0xF8, 0x5A, 0xF8, 0x5C, /* 0x48-0x4B */
+	0xF8, 0x5F, 0xF8, 0x5B, 0xF8, 0x60, 0x00, 0x00, /* 0x4C-0x4F */
+	0xF8, 0x59, 0x00, 0x00, 0xF8, 0x57, 0x00, 0x00, /* 0x50-0x53 */
+	0xC5, 0xEB, 0xF8, 0x5D, 0xC5, 0xED, 0xC5, 0xEC, /* 0x54-0x57 */
+	0xF8, 0x58, 0xF8, 0x5E, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xDA, 0xC6, 0x4D, /* 0x5C-0x5F */
+	0xF8, 0xDB, 0x00, 0x00, 0xF8, 0xD9, 0xF8, 0xD6, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xD8, 0xF8, 0xD7, /* 0x64-0x67 */
+	0xF9, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0xF9, 0x5C, 0xF9, 0x5B, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0xF9, 0x79, 0x00, 0x00, 0xF9, 0x78, /* 0x70-0x73 */
+	0xF9, 0x77, 0xF9, 0x7A, 0x00, 0x00, 0xC6, 0x73, /* 0x74-0x77 */
+	0xC6, 0x74, 0xF9, 0xCA, 0xF9, 0xCE, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xB3, 0xBE, 0xDC, 0xAF, 0xE0, 0xED, /* 0xE4-0xE7 */
+	0x00, 0x00, 0xB9, 0xA7, 0xE0, 0xEB, 0x00, 0x00, /* 0xE8-0xEB */
+	0x00, 0x00, 0xE0, 0xEC, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0xE4, 0xE2, 0xE4, 0xE3, 0xBB, 0xF1, /* 0xF0-0xF3 */
+	0xBB, 0xEF, 0xE4, 0xE4, 0xBB, 0xF0, 0xE8, 0xE8, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xE8, 0xEB, 0xE8, 0xE5, 0xE8, 0xEC, /* 0xF8-0xFB */
+	0xE8, 0xE4, 0xE8, 0xE6, 0x00, 0x00, 0xE8, 0xE7, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9D[512] = {
+	0xE8, 0xEA, 0x00, 0x00, 0x00, 0x00, 0xBE, 0xA1, /* 0x00-0x03 */
+	0xE8, 0xEF, 0xE8, 0xEE, 0xBE, 0x7D, 0xE8, 0xE9, /* 0x04-0x07 */
+	0xE8, 0xED, 0xBE, 0x7E, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xEC, 0xAC, 0x00, 0x00, 0xC0, 0x6F, 0x00, 0x00, /* 0x10-0x13 */
+	0xEC, 0xA7, 0xC0, 0x6B, 0x00, 0x00, 0xEC, 0xA4, /* 0x14-0x17 */
+	0xEC, 0xAA, 0xEC, 0xAD, 0x00, 0x00, 0xC0, 0x70, /* 0x18-0x1B */
+	0x00, 0x00, 0xEC, 0xA9, 0xEC, 0xA6, 0xEC, 0xAE, /* 0x1C-0x1F */
+	0xEC, 0xA5, 0x00, 0x00, 0xEC, 0xAB, 0xC0, 0x6C, /* 0x20-0x23 */
+	0x00, 0x00, 0xEC, 0xA3, 0xC0, 0x6D, 0x00, 0x00, /* 0x24-0x27 */
+	0xC0, 0x6E, 0xEC, 0xA8, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0xEF, 0xA9, 0xEF, 0x7A, 0xEF, 0x7B, /* 0x2C-0x2F */
+	0xEF, 0x7E, 0xEF, 0x7C, 0x00, 0x00, 0xEF, 0x76, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0xEF, 0x79, 0xEF, 0xA5, /* 0x34-0x37 */
+	0xEF, 0x7D, 0x00, 0x00, 0x00, 0x00, 0xC2, 0x45, /* 0x38-0x3B */
+	0x00, 0x00, 0xEF, 0xA7, 0xEF, 0xA4, 0xC2, 0x46, /* 0x3C-0x3F */
+	0xEF, 0xA6, 0xEF, 0x77, 0xEF, 0xA2, 0xEF, 0xA3, /* 0x40-0x43 */
+	0x00, 0x00, 0xEF, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xF1, 0xD2, 0xF1, 0xD4, /* 0x48-0x4B */
+	0xF1, 0xD7, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xD1, /* 0x4C-0x4F */
+	0x00, 0x00, 0xC3, 0x59, 0xF1, 0xD9, 0xF1, 0xD0, /* 0x50-0x53 */
+	0xF1, 0xDA, 0x00, 0x00, 0xF1, 0xD6, 0xF1, 0xD8, /* 0x54-0x57 */
+	0xF1, 0xDC, 0xF1, 0xD5, 0xF1, 0xDD, 0xF1, 0xD3, /* 0x58-0x5B */
+	0xF1, 0xCF, 0xC3, 0x5A, 0x00, 0x00, 0xF1, 0xDB, /* 0x5C-0x5F */
+	0xC3, 0x5B, 0xC4, 0x4D, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x78, /* 0x64-0x67 */
+	0xF3, 0xF1, 0xF3, 0xE8, 0xC4, 0x4F, 0xF3, 0xE4, /* 0x68-0x6B */
+	0xC4, 0x50, 0x00, 0x00, 0x00, 0x00, 0xF3, 0xED, /* 0x6C-0x6F */
+	0xF3, 0xE7, 0xF3, 0xDD, 0xC4, 0x4E, 0xF3, 0xEA, /* 0x70-0x73 */
+	0xF3, 0xE5, 0xF3, 0xE6, 0x00, 0x00, 0xF3, 0xD8, /* 0x74-0x77 */
+	0xF3, 0xDF, 0xF3, 0xEE, 0x00, 0x00, 0xF3, 0xEB, /* 0x78-0x7B */
+	0x00, 0x00, 0xF3, 0xE3, 0x00, 0x00, 0xF3, 0xEF, /* 0x7C-0x7F */
+	
+	0xF3, 0xDE, 0xF3, 0xD9, 0xF3, 0xEC, 0x00, 0x00, /* 0x80-0x83 */
+	0xF3, 0xDB, 0xF3, 0xE9, 0xF3, 0xE0, 0xF3, 0xF0, /* 0x84-0x87 */
+	0xF3, 0xDC, 0xC4, 0x4C, 0xF3, 0xDA, 0xF3, 0xE1, /* 0x88-0x8B */
+	0xF3, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xF5, 0x7D, 0x00, 0x00, 0xF5, 0x7B, 0x00, 0x00, /* 0x90-0x93 */
+	0xF5, 0xA2, 0x00, 0x00, 0xF5, 0xAE, 0xF5, 0xA5, /* 0x94-0x97 */
+	0xF5, 0x7C, 0xF5, 0x78, 0xF5, 0xA7, 0xF5, 0x7E, /* 0x98-0x9B */
+	0xF5, 0xA3, 0xF5, 0x7A, 0xF5, 0xAA, 0xF5, 0x77, /* 0x9C-0x9F */
+	0xF5, 0xA1, 0xF5, 0xA6, 0xF5, 0xA8, 0xF5, 0xAB, /* 0xA0-0xA3 */
+	0xF5, 0x79, 0x00, 0x00, 0xF5, 0xAF, 0xF5, 0xB0, /* 0xA4-0xA7 */
+	0xF5, 0xA9, 0xF5, 0xAD, 0xF5, 0xA4, 0x00, 0x00, /* 0xA8-0xAB */
+	0xF6, 0xC1, 0xF6, 0xC4, 0x00, 0x00, 0xC5, 0x61, /* 0xAC-0xAF */
+	0x00, 0x00, 0xF6, 0xC3, 0xF6, 0xC8, 0xF6, 0xC6, /* 0xB0-0xB3 */
+	0xC5, 0x62, 0xF6, 0xBD, 0xF6, 0xB3, 0xF6, 0xB2, /* 0xB4-0xB7 */
+	0xC5, 0x64, 0xF6, 0xBF, 0xF6, 0xC0, 0xF6, 0xBC, /* 0xB8-0xBB */
+	0xF6, 0xB4, 0x00, 0x00, 0xF6, 0xB9, 0xF5, 0xAC, /* 0xBC-0xBF */
+	0x00, 0x00, 0xF6, 0xB5, 0xC5, 0x63, 0xF6, 0xBB, /* 0xC0-0xC3 */
+	0x00, 0x00, 0xF6, 0xBA, 0x00, 0x00, 0xF6, 0xB6, /* 0xC4-0xC7 */
+	0xF6, 0xC2, 0x00, 0x00, 0xF6, 0xB7, 0xF7, 0xBB, /* 0xC8-0xCB */
+	0xF6, 0xC5, 0xF6, 0xC7, 0xF6, 0xBE, 0xF6, 0xB8, /* 0xCC-0xCF */
+	0xF7, 0xBC, 0xF7, 0xBE, 0xF7, 0xB8, 0xC5, 0xC2, /* 0xD0-0xD3 */
+	0x00, 0x00, 0xF7, 0xC5, 0xF7, 0xC3, 0xC5, 0xC3, /* 0xD4-0xD7 */
+	0xF7, 0xC2, 0xF7, 0xC1, 0xF7, 0xBA, 0xF7, 0xB7, /* 0xD8-0xDB */
+	0xF7, 0xBD, 0xF7, 0xC6, 0xF7, 0xB9, 0xF7, 0xBF, /* 0xDC-0xDF */
+	0x00, 0x00, 0xF8, 0x69, 0xF8, 0x6E, 0xF8, 0x64, /* 0xE0-0xE3 */
+	0xF8, 0x67, 0xC5, 0xEE, 0xF8, 0x6B, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xF8, 0x72, 0xF7, 0xC0, 0x00, 0x00, 0xF8, 0x65, /* 0xE8-0xEB */
+	0xF8, 0x6F, 0xF8, 0x73, 0xF8, 0x6A, 0xF8, 0x63, /* 0xEC-0xEF */
+	0xF8, 0x6D, 0x00, 0x00, 0xF8, 0x6C, 0xF8, 0x71, /* 0xF0-0xF3 */
+	0xF8, 0x70, 0xF7, 0xC4, 0xF8, 0x68, 0xF8, 0x62, /* 0xF4-0xF7 */
+	0xF8, 0x66, 0xC6, 0x4E, 0xC6, 0x4F, 0xF8, 0x61, /* 0xF8-0xFB */
+	0x00, 0x00, 0xF8, 0xE6, 0xF8, 0xDD, 0xF8, 0xE5, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9E[512] = {
+	0xF8, 0xE2, 0xF8, 0xE3, 0xF8, 0xDC, 0xF8, 0xDF, /* 0x00-0x03 */
+	0xF8, 0xE7, 0xF8, 0xE1, 0xF8, 0xE0, 0xF8, 0xDE, /* 0x04-0x07 */
+	0x00, 0x00, 0xF8, 0xE4, 0x00, 0x00, 0xF9, 0x5D, /* 0x08-0x0B */
+	0x00, 0x00, 0xF9, 0x5E, 0x00, 0x00, 0xF9, 0x60, /* 0x0C-0x0F */
+	0xF9, 0x5F, 0xF9, 0x62, 0xF9, 0x61, 0xF9, 0x7C, /* 0x10-0x13 */
+	0xF9, 0x7B, 0xF9, 0xB7, 0x00, 0x00, 0xF9, 0xB8, /* 0x14-0x17 */
+	0x00, 0x00, 0xF9, 0xC5, 0xC6, 0x78, 0xC6, 0x7C, /* 0x18-0x1B */
+	0x00, 0x00, 0xF9, 0xCF, 0xC6, 0x7D, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0xB3, 0xBF, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0xC4, 0xD0, 0xF6, 0xC9, 0x00, 0x00, /* 0x78-0x7B */
+	0xC6, 0x50, 0xC6, 0x51, 0x00, 0x00, 0xB3, 0xC0, /* 0x7C-0x7F */
+	
+	0xE0, 0xEE, 0x00, 0x00, 0xB9, 0xA8, 0xE8, 0xF0, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0xEC, 0xB0, 0xEC, 0xB1, /* 0x84-0x87 */
+	0xEC, 0xAF, 0xEF, 0xAB, 0xEF, 0xAA, 0xC2, 0x47, /* 0x88-0x8B */
+	0xF1, 0xDF, 0xEF, 0xAC, 0xF1, 0xDE, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0xF3, 0xF3, 0xC4, 0x51, 0xC4, 0x53, /* 0x90-0x93 */
+	0xF3, 0xF2, 0x00, 0x00, 0x00, 0x00, 0xC4, 0x52, /* 0x94-0x97 */
+	0x00, 0x00, 0xF5, 0xB1, 0xF5, 0xB3, 0xF5, 0xB2, /* 0x98-0x9B */
+	0xF6, 0xCA, 0xC5, 0x65, 0x00, 0x00, 0xC5, 0xEF, /* 0x9C-0x9F */
+	0xF8, 0xE8, 0xF9, 0x63, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xF9, 0xD2, 0xB3, 0xC1, 0x00, 0x00, 0xE4, 0xE5, /* 0xA4-0xA7 */
+	0x00, 0x00, 0xBE, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0xEC, 0xB3, 0xEC, 0xB2, 0x00, 0x00, /* 0xAC-0xAF */
+	0xEF, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0xC4, 0x54, 0xC4, 0xD1, 0xF7, 0xC7, 0xF9, 0xCB, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB3, 0xC2, /* 0xB8-0xBB */
+	0xBB, 0xF2, 0x00, 0x00, 0xBE, 0xA3, 0x00, 0x00, /* 0xBC-0xBF */
+	0xF3, 0xF4, 0x00, 0x00, 0xF8, 0x74, 0xB6, 0xC0, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0xEF, 0xAE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0xC6, 0x64, 0xB6, 0xC1, 0xBE, 0xA4, 0xC2, 0x48, /* 0xCC-0xCF */
+	0xF8, 0x75, 0xB6, 0xC2, 0x00, 0x00, 0xE8, 0xF1, /* 0xD0-0xD3 */
+	0xC0, 0x72, 0xEC, 0xB4, 0xEC, 0xB5, 0x00, 0x00, /* 0xD4-0xD7 */
+	0xC0, 0x71, 0x00, 0x00, 0xEF, 0xAF, 0xC2, 0x4C, /* 0xD8-0xDB */
+	0xC2, 0x4A, 0xC2, 0x4B, 0xC2, 0x49, 0xF1, 0xE0, /* 0xDC-0xDF */
+	0xC3, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0xF5, 0xB5, 0xF5, 0xB4, 0xF5, 0xB7, 0xF5, 0xB6, /* 0xE4-0xE7 */
+	0xC4, 0xD2, 0x00, 0x00, 0x00, 0x00, 0xF6, 0xCB, /* 0xE8-0xEB */
+	0x00, 0x00, 0xF6, 0xCD, 0xF6, 0xCC, 0xC5, 0x66, /* 0xEC-0xEF */
+	0xF7, 0xC8, 0x00, 0x00, 0xF8, 0x76, 0xF8, 0x77, /* 0xF0-0xF3 */
+	0xC5, 0xF0, 0xF9, 0x64, 0xF9, 0x7D, 0xC6, 0x75, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xDC, 0xB0, 0xEC, 0xB6, 0xEF, 0xB0, /* 0xF8-0xFB */
+	0xF3, 0xF5, 0xE0, 0xEF, 0x00, 0x00, 0xEF, 0xB1, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_9F[512] = {
+	0xF1, 0xE2, 0xF1, 0xE1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0x78, 0xC6, 0x52, /* 0x04-0x07 */
+	0x00, 0x00, 0xF9, 0x65, 0xF9, 0x7E, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0xB9, 0xA9, 0xE8, 0xF2, /* 0x0C-0x0F */
+	0xE8, 0xF3, 0x00, 0x00, 0xEC, 0xB7, 0xB9, 0xAA, /* 0x10-0x13 */
+	0x00, 0x00, 0xC3, 0x5D, 0xF1, 0xE3, 0x00, 0x00, /* 0x14-0x17 */
+	0xF6, 0xCF, 0xC5, 0x67, 0xF6, 0xD0, 0xF6, 0xCE, /* 0x18-0x1B */
+	0xF8, 0x79, 0x00, 0x00, 0xF8, 0xE9, 0x00, 0x00, /* 0x1C-0x1F */
+	0xB9, 0xAB, 0x00, 0x00, 0xEF, 0xB4, 0xEF, 0xB3, /* 0x20-0x23 */
+	0xEF, 0xB2, 0xF1, 0xE4, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0xF1, 0xE8, 0xF1, 0xE7, 0xF1, 0xE6, 0xF1, 0xE5, /* 0x28-0x2B */
+	0xC3, 0x5E, 0xF3, 0xF6, 0xF5, 0xB9, 0xC4, 0xD3, /* 0x2C-0x2F */
+	0xF5, 0xB8, 0xF6, 0xD1, 0xF7, 0xCB, 0xF7, 0xCA, /* 0x30-0x33 */
+	0xC5, 0xC4, 0xF7, 0xC9, 0xF8, 0x7C, 0xF8, 0x7B, /* 0x34-0x37 */
+	0xF8, 0x7A, 0x00, 0x00, 0x00, 0x00, 0xBB, 0xF3, /* 0x38-0x3B */
+	0x00, 0x00, 0xEC, 0xB8, 0xC2, 0x4D, 0x00, 0x00, /* 0x3C-0x3F */
+	0xF3, 0xF7, 0xF3, 0xF8, 0xF7, 0xCC, 0xF8, 0x7D, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0xF8, 0xEA, 0xF9, 0x66, /* 0x44-0x47 */
+	0xF9, 0xB9, 0xF9, 0xD4, 0xBB, 0xF4, 0xC2, 0x4E, /* 0x48-0x4B */
+	0xF1, 0xE9, 0xF3, 0xF9, 0xF6, 0xD2, 0xF8, 0x7E, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0xBE, 0xA6, 0x00, 0x00, /* 0x50-0x53 */
+	0xEF, 0xB5, 0xF1, 0xEA, 0xF3, 0xFA, 0xF3, 0xFB, /* 0x54-0x57 */
+	0xF3, 0xFC, 0xF5, 0xBE, 0x00, 0x00, 0xF5, 0xBA, /* 0x58-0x5B */
+	0xC5, 0x68, 0xF5, 0xBD, 0xF5, 0xBC, 0xC4, 0xD4, /* 0x5C-0x5F */
+	0xF5, 0xBB, 0xC4, 0xD6, 0x00, 0x00, 0xC4, 0xD5, /* 0x60-0x63 */
+	0xF6, 0xD4, 0xF6, 0xD3, 0xC5, 0x69, 0xC5, 0x6A, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xC5, 0xC6, 0xF7, 0xCD, /* 0x68-0x6B */
+	0xC5, 0xC5, 0x00, 0x00, 0xF8, 0xA3, 0xF8, 0xA4, /* 0x6C-0x6F */
+	0xF8, 0xA2, 0xF8, 0xA1, 0xC6, 0x54, 0x00, 0x00, /* 0x70-0x73 */
+	0xF8, 0xEB, 0xF8, 0xEC, 0xF8, 0xED, 0xC6, 0x53, /* 0x74-0x77 */
+	0xF9, 0x67, 0xF9, 0x6A, 0xF9, 0x69, 0xF9, 0x68, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0xF9, 0xD3, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0xC0, 0x73, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0xC3, 0x65, 0xF5, 0xBF, 0xF6, 0xD5, 0x00, 0x00, /* 0x90-0x93 */
+	0xC5, 0xC7, 0xF7, 0xCE, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0xF9, 0xD5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0xC0, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0xEF, 0xB6, 0x00, 0x00, 0xF7, 0xCF, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xF9, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+};
+
+static unsigned char u2c_DC[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+};
+
+static unsigned char u2c_F9[512] = {
+	0xB0, 0x5A, 0xA7, 0xF3, 0xA8, 0xAE, 0xB8, 0xEB, /* 0x00-0x03 */
+	0xB7, 0xC6, 0xA6, 0xEA, 0xA5, 0x79, 0xC0, 0x74, /* 0x04-0x07 */
+	0xC0, 0x74, 0xAB, 0xB4, 0xAA, 0xF7, 0xB3, 0xE2, /* 0x08-0x0B */
+	0xA9, 0x60, 0xC3, 0x69, 0xC4, 0xEE, 0xC3, 0xB9, /* 0x0C-0x0F */
+	0xC5, 0xDA, 0xC1, 0xB3, 0xBB, 0x72, 0xC5, 0xDE, /* 0x10-0x13 */
+	0xBC, 0xD6, 0xAC, 0xA5, 0xAF, 0x4F, 0xAF, 0x5F, /* 0x14-0x17 */
+	0xB8, 0xA8, 0xB9, 0x54, 0xC0, 0x64, 0xB6, 0xC3, /* 0x18-0x1B */
+	0xA7, 0x5A, 0xC4, 0xE6, 0xC4, 0xEA, 0xC4, 0xF5, /* 0x1C-0x1F */
+	0xC6, 0x7D, 0xB4, 0x50, 0xC0, 0xDD, 0xC2, 0xC5, /* 0x20-0x23 */
+	0xC4, 0xB0, 0xA9, 0xD4, 0xC3, 0xBE, 0xC4, 0xFA, /* 0x24-0x27 */
+	0xB4, 0x59, 0xAE, 0xD4, 0xAE, 0xF6, 0xAF, 0x54, /* 0x28-0x2B */
+	0x00, 0x00, 0xA8, 0xD3, 0xA7, 0x4E, 0xB3, 0xD2, /* 0x2C-0x2F */
+	0xBE, 0xDB, 0xC3, 0x72, 0xC4, 0x6C, 0xBF, 0x63, /* 0x30-0x33 */
+	0xA6, 0xD1, 0xC4, 0xAA, 0xB8, 0xB8, 0xB8, 0xF4, /* 0x34-0x37 */
+	0xC5, 0x53, 0xBE, 0x7C, 0xC6, 0x4F, 0xB8, 0x4C, /* 0x38-0x3B */
+	0xB8, 0x53, 0xBA, 0xF1, 0xDB, 0x77, 0xBF, 0xFD, /* 0x3C-0x3F */
+	0xB3, 0xC0, 0xBD, 0xD7, 0xC3, 0x62, 0xA7, 0xCB, /* 0x40-0x43 */
+	0xC5, 0xA2, 0xC5, 0xA4, 0xA8, 0x63, 0xBD, 0x55, /* 0x44-0x47 */
+	0xB8, 0xEF, 0xB9, 0x70, 0xC2, 0x53, 0xB9, 0xF0, /* 0x48-0x4B */
+	0xBC, 0xD3, 0xB2, 0x5C, 0xBA, 0x7C, 0xB2, 0xD6, /* 0x4C-0x4F */
+	0xC1, 0x5C, 0xAD, 0xAE, 0xB0, 0xC7, 0xA6, 0xD8, /* 0x50-0x53 */
+	0xBB, 0xFE, 0xAD, 0xE2, 0xB8, 0x57, 0xBA, 0xF0, /* 0x54-0x57 */
+	0xB5, 0xD9, 0xB3, 0xAE, 0xC5, 0xAA, 0xCE, 0xD4, /* 0x58-0x5B */
+	0xBC, 0xD6, 0xBF, 0xD5, 0xA4, 0xA6, 0xB9, 0xE7, /* 0x5C-0x5F */
+	0xAB, 0xE3, 0xB2, 0x76, 0xB2, 0xA7, 0xA5, 0x5F, /* 0x60-0x63 */
+	0xED, 0xA8, 0xAB, 0x4B, 0xB4, 0x5F, 0xA4, 0xA3, /* 0x64-0x67 */
+	0xAA, 0x63, 0xBC, 0xC6, 0xAF, 0xC1, 0xB0, 0xD1, /* 0x68-0x6B */
+	0xB6, 0xEB, 0xAC, 0xD9, 0xB8, 0xAD, 0xBB, 0xA1, /* 0x6C-0x6F */
+	0xB1, 0xFE, 0xA8, 0xB0, 0xA8, 0x48, 0xAC, 0x42, /* 0x70-0x73 */
+	0xAD, 0x59, 0xB1, 0xB0, 0xB2, 0xA4, 0xAB, 0x47, /* 0x74-0x77 */
+	0xA8, 0xE2, 0x00, 0x00, 0xB1, 0xE7, 0xC2, 0xB3, /* 0x78-0x7B */
+	0xA8, 0x7D, 0xBD, 0xCC, 0xB6, 0x71, 0xC0, 0x79, /* 0x7C-0x7F */
+	
+	0xA7, 0x66, 0xA4, 0x6B, 0xC3, 0x66, 0xAE, 0xC8, /* 0x80-0x83 */
+	0xC2, 0x6F, 0xC4, 0x72, 0xBE, 0x5B, 0xC6, 0x7A, /* 0x84-0x87 */
+	0xC4, 0x52, 0xBE, 0xA4, 0xA4, 0x4F, 0xBE, 0xE4, /* 0x88-0x8B */
+	0xBE, 0xFA, 0xF7, 0x65, 0xA6, 0x7E, 0xBC, 0xA6, /* 0x8C-0x8F */
+	0xC5, 0xCA, 0xBC, 0xBF, 0xBA, 0xA7, 0xB7, 0xD2, /* 0x90-0x93 */
+	0xE6, 0xA3, 0x00, 0x00, 0xBD, 0x6D, 0xC1, 0x70, /* 0x94-0x97 */
+	0xBD, 0xFB, 0xBD, 0xAC, 0xB3, 0x73, 0xC1, 0xE5, /* 0x98-0x9B */
+	0xA6, 0x43, 0xA6, 0x48, 0xAB, 0x7C, 0xAF, 0x50, /* 0x9C-0x9F */
+	0xB5, 0xF5, 0xBB, 0xA1, 0xB7, 0x47, 0xA9, 0xC0, /* 0xA0-0xA3 */
+	0xB1, 0xC9, 0xC0, 0xD4, 0xC3, 0xAE, 0xC2, 0x79, /* 0xA4-0xA7 */
+	0xA5, 0x4F, 0xCB, 0xF1, 0xB9, 0xE7, 0xC0, 0xAD, /* 0xA8-0xAB */
+	0xCC, 0xB0, 0xAC, 0xC2, 0xBC, 0xFC, 0xB2, 0xDC, /* 0xAC-0xAF */
+	0xB2, 0xE2, 0xB9, 0x61, 0xB9, 0x73, 0xC6, 0x46, /* 0xB0-0xB3 */
+	0xBB, 0xE2, 0xA8, 0xD2, 0xC2, 0xA7, 0xC4, 0xBF, /* 0xB4-0xB7 */
+	0xC1, 0xF5, 0xB4, 0x63, 0xA4, 0x46, 0xB9, 0xB1, /* 0xB8-0xBB */
+	0xBC, 0x64, 0xA7, 0xBF, 0xAE, 0xC6, 0xBC, 0xD6, /* 0xBC-0xBF */
+	0xBF, 0x52, 0xC0, 0xF8, 0xE7, 0x64, 0xBF, 0xF1, /* 0xC0-0xC3 */
+	0xC0, 0x73, 0xB7, 0x77, 0xA8, 0xBF, 0xBC, 0x42, /* 0xC4-0xC7 */
+	0xCC, 0xD8, 0xAC, 0x68, 0xAC, 0x79, 0xB7, 0xC8, /* 0xC8-0xCB */
+	0xAF, 0x5B, 0xAF, 0x64, 0xB2, 0xB8, 0xAF, 0xC3, /* 0xCC-0xCF */
+	0xC3, 0xFE, 0xA4, 0xBB, 0xBC, 0xAE, 0xB3, 0xB0, /* 0xD0-0xD3 */
+	0xAD, 0xDB, 0xB1, 0x5B, 0xB2, 0x5F, 0xBD, 0xFC, /* 0xD4-0xD7 */
+	0xAB, 0xDF, 0xB7, 0x58, 0xAE, 0xDF, 0xB2, 0x76, /* 0xD8-0xDB */
+	0xB6, 0xA9, 0xA7, 0x51, 0xA6, 0x4F, 0xBC, 0x69, /* 0xDC-0xDF */
+	0xA9, 0xF6, 0xA7, 0xF5, 0xB1, 0xF9, 0xAA, 0x64, /* 0xE0-0xE3 */
+	0xB2, 0x7A, 0xB5, 0x67, 0xBF, 0xA9, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xB8, 0xCC, 0xA8, 0xBD, 0xC2, 0xF7, 0xB0, 0xCE, /* 0xE8-0xEB */
+	0xB7, 0xC4, 0xA7, 0x5B, 0xBF, 0x4D, 0xBF, 0x5A, /* 0xEC-0xEF */
+	0xC4, 0xA9, 0x00, 0x00, 0xC5, 0xEC, 0xC5, 0xEF, /* 0xF0-0xF3 */
+	0xAA, 0x4C, 0xB2, 0x4F, 0xC1, 0x7B, 0xA5, 0xDF, /* 0xF4-0xF7 */
+	0xB2, 0xC1, 0xB2, 0xC9, 0xAA, 0xAC, 0xAA, 0xA5, /* 0xF8-0xFB */
+	0xC3, 0xD1, 0xA4, 0xB0, 0xAF, 0xF9, 0xA8, 0xEB, /* 0xFC-0xFF */
+};
+
+static unsigned char u2c_FA[512] = {
+	0xA4, 0xC1, 0xAB, 0xD7, 0xA9, 0xDD, 0xBF, 0x7D, /* 0x00-0x03 */
+	0xA6, 0x76, 0xAC, 0x7D, 0xBC, 0xC9, 0xBF, 0xE7, /* 0x04-0x07 */
+	0xA6, 0xE6, 0xAD, 0xB0, 0xA8, 0xA3, 0xB9, 0xF8, /* 0x08-0x0B */
+	0xC9, 0x4A, 0xDD, 0xFC, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0xB6, 0xEF, 0x00, 0x00, 0xB4, 0xB8, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0xE8, 0xF9, 0xBD, 0xDE, 0xAF, 0x71, /* 0x14-0x17 */
+	0x00, 0x00, 0xAF, 0xAB, 0xB2, 0xBB, 0xBA, 0xD6, /* 0x18-0x1B */
+	0xB9, 0x74, 0xBA, 0xEB, 0xA6, 0xD0, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0xBD, 0xD1, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0xB6, 0x68, 0xB3, 0xA3, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0xB6, 0xBA, 0xB9, 0x7D, /* 0x28-0x2B */
+	0xC0, 0x5D, 0xC5, 0x62, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+};
+
+static unsigned char u2c_FE[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0xA1, 0x4A, 0xA1, 0x57, 0x00, 0x00, 0xA1, 0x59, /* 0x30-0x33 */
+	0xA1, 0x5B, 0xA1, 0x5F, 0xA1, 0x60, 0xA1, 0x63, /* 0x34-0x37 */
+	0xA1, 0x64, 0xA1, 0x67, 0xA1, 0x68, 0xA1, 0x6B, /* 0x38-0x3B */
+	0xA1, 0x6C, 0xA1, 0x6F, 0xA1, 0x70, 0xA1, 0x73, /* 0x3C-0x3F */
+	0xA1, 0x74, 0xA1, 0x77, 0xA1, 0x78, 0xA1, 0x7B, /* 0x40-0x43 */
+	0xA1, 0x7C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0xA1, 0xC6, 0xA1, 0xC7, 0xA1, 0xCA, /* 0x48-0x4B */
+	0xA1, 0xCB, 0xA1, 0xC8, 0xA1, 0xC9, 0xA1, 0x5C, /* 0x4C-0x4F */
+	0xA1, 0x4D, 0xA1, 0x4E, 0xA1, 0x4F, 0x00, 0x00, /* 0x50-0x53 */
+	0xA1, 0x51, 0xA1, 0x52, 0xA1, 0x53, 0xA1, 0x54, /* 0x54-0x57 */
+	0x00, 0x00, 0xA1, 0x7D, 0xA1, 0x7E, 0xA1, 0xA1, /* 0x58-0x5B */
+	0xA1, 0xA2, 0xA1, 0xA3, 0xA1, 0xA4, 0xA1, 0xCC, /* 0x5C-0x5F */
+	0xA1, 0xCD, 0xA1, 0xCE, 0xA1, 0xDE, 0xA1, 0xDF, /* 0x60-0x63 */
+	0xA1, 0xE0, 0xA1, 0xE1, 0xA1, 0xE2, 0x00, 0x00, /* 0x64-0x67 */
+	0xA2, 0x42, 0xA2, 0x4C, 0xA2, 0x4D, 0xA2, 0x4E, /* 0x68-0x6B */
+};
+
+static unsigned char u2c_FF[512] = {
+	0x00, 0x00, 0xA1, 0x49, 0xA1, 0xA8, 0xA1, 0xAD, /* 0x00-0x03 */
+	0xA2, 0x43, 0xA2, 0x48, 0xA1, 0xAE, 0xA1, 0xA6, /* 0x04-0x07 */
+	0xA1, 0x5D, 0xA1, 0x5E, 0xA1, 0xAF, 0xA1, 0xCF, /* 0x08-0x0B */
+	0xA1, 0x41, 0xA1, 0xD0, 0xA1, 0x44, 0xA1, 0xFE, /* 0x0C-0x0F */
+	0xA2, 0xAF, 0xA2, 0xB0, 0xA2, 0xB1, 0xA2, 0xB2, /* 0x10-0x13 */
+	0xA2, 0xB3, 0xA2, 0xB4, 0xA2, 0xB5, 0xA2, 0xB6, /* 0x14-0x17 */
+	0xA2, 0xB7, 0xA2, 0xB8, 0xA1, 0x47, 0xA1, 0x46, /* 0x18-0x1B */
+	0xA1, 0xD5, 0xA1, 0xD7, 0xA1, 0xD6, 0xA1, 0x48, /* 0x1C-0x1F */
+	0xA2, 0x49, 0xA2, 0xCF, 0xA2, 0xD0, 0xA2, 0xD1, /* 0x20-0x23 */
+	0xA2, 0xD2, 0xA2, 0xD3, 0xA2, 0xD4, 0xA2, 0xD5, /* 0x24-0x27 */
+	0xA2, 0xD6, 0xA2, 0xD7, 0xA2, 0xD8, 0xA2, 0xD9, /* 0x28-0x2B */
+	0xA2, 0xDA, 0xA2, 0xDB, 0xA2, 0xDC, 0xA2, 0xDD, /* 0x2C-0x2F */
+	0xA2, 0xDE, 0xA2, 0xDF, 0xA2, 0xE0, 0xA2, 0xE1, /* 0x30-0x33 */
+	0xA2, 0xE2, 0xA2, 0xE3, 0xA2, 0xE4, 0xA2, 0xE5, /* 0x34-0x37 */
+	0xA2, 0xE6, 0xA2, 0xE7, 0xA2, 0xE8, 0xA1, 0x65, /* 0x38-0x3B */
+	0xA2, 0x40, 0xA1, 0x66, 0xA1, 0x73, 0xA1, 0xC4, /* 0x3C-0x3F */
+	0xA1, 0xA5, 0xA2, 0xE9, 0xA2, 0xEA, 0xA2, 0xEB, /* 0x40-0x43 */
+	0xA2, 0xEC, 0xA2, 0xED, 0xA2, 0xEE, 0xA2, 0xEF, /* 0x44-0x47 */
+	0xA2, 0xF0, 0xA2, 0xF1, 0xA2, 0xF2, 0xA2, 0xF3, /* 0x48-0x4B */
+	0xA2, 0xF4, 0xA2, 0xF5, 0xA2, 0xF6, 0xA2, 0xF7, /* 0x4C-0x4F */
+	0xA2, 0xF8, 0xA2, 0xF9, 0xA2, 0xFA, 0xA2, 0xFB, /* 0x50-0x53 */
+	0xA2, 0xFC, 0xA2, 0xFD, 0xA2, 0xFE, 0xA3, 0x40, /* 0x54-0x57 */
+	0xA3, 0x41, 0xA3, 0x42, 0xA3, 0x43, 0xA1, 0x61, /* 0x58-0x5B */
+	0xA1, 0x55, 0xA1, 0x62, 0xA1, 0xE3, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0xA1, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA4-0xA7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA2, 0x46, 0xA2, 0x47, 0x00, 0x00, 0xA1, 0xC3, /* 0xE0-0xE3 */
+	0x00, 0x00, 0xA2, 0x44, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	NULL,   NULL,   u2c_02, u2c_03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_20, u2c_21, u2c_22, u2c_23, NULL,   u2c_25, u2c_26, NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	u2c_30, u2c_31, u2c_32, u2c_33, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   u2c_4E, u2c_4F, 
+	u2c_50, u2c_51, u2c_52, u2c_53, u2c_54, u2c_55, u2c_56, u2c_57, 
+	u2c_58, u2c_59, u2c_5A, u2c_5B, u2c_5C, u2c_5D, u2c_5E, u2c_5F, 
+	u2c_60, u2c_61, u2c_62, u2c_63, u2c_64, u2c_65, u2c_66, u2c_67, 
+	u2c_68, u2c_69, u2c_6A, u2c_6B, u2c_6C, u2c_6D, u2c_6E, u2c_6F, 
+	u2c_70, u2c_71, u2c_72, u2c_73, u2c_74, u2c_75, u2c_76, u2c_77, 
+	u2c_78, u2c_79, u2c_7A, u2c_7B, u2c_7C, u2c_7D, u2c_7E, u2c_7F, 
+	u2c_80, u2c_81, u2c_82, u2c_83, u2c_84, u2c_85, u2c_86, u2c_87, 
+	u2c_88, u2c_89, u2c_8A, u2c_8B, u2c_8C, u2c_8D, u2c_8E, u2c_8F, 
+	u2c_90, u2c_91, u2c_92, u2c_93, u2c_94, u2c_95, u2c_96, u2c_97, 
+	u2c_98, u2c_99, u2c_9A, u2c_9B, u2c_9C, u2c_9D, u2c_9E, u2c_9F, 
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   u2c_DC, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   u2c_F9, u2c_FA, NULL,   NULL,   NULL,   u2c_FE, u2c_FF, };
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(const wchar_t uni,
+			unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni&0xFF;
+	unsigned char ch = (uni>>8)&0xFF;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset) {
+		if (boundlen <= 1)
+			return -ENAMETOOLONG;
+		out[0] = uni2charset[cl*2];
+		out[1] = uni2charset[cl*2+1];
+		if (out[0] == 0x00 && out[1] == 0x00)
+			return -EINVAL;
+		n = 2;
+	} else if (ch==0 && cl) {
+		out[0] = cl;
+		n = 1;
+	}
+	else
+		return -EINVAL;
+
+	return n;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+			wchar_t *uni)
+{
+	unsigned char ch, cl;
+	wchar_t *charset2uni;
+	int n;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if (boundlen == 1) {
+		*uni = rawstring[0];
+		return 1;
+	}
+
+	ch = rawstring[0];
+	cl = rawstring[1];
+
+	charset2uni = page_charset2uni[ch];
+	if (charset2uni && cl) {
+		*uni = charset2uni[cl];
+		if (*uni == 0x0000)
+			return -EINVAL;
+		n = 2;
+	} else{
+		*uni = ch;
+		n = 1;
+	}
+	return n;
+}
+
+static struct nls_table table = {
+	.charset	= "cp950",
+	.alias		= "big5",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_cp950(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_cp950(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_cp950)
+module_exit(exit_nls_cp950)
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NLS(big5);
diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
new file mode 100644
index 0000000..80f108a
--- /dev/null
+++ b/fs/nls/nls_euc-jp.c
@@ -0,0 +1,583 @@
+/*
+ * linux/fs/nls_euc-jp.c
+ *
+ * Added `OSF/JVC Recommended Code Set Conversion Specification
+ * between Japanese EUC and Shift-JIS' support: <hirofumi@mail.parknet.co.jp>
+ * (http://www.opengroup.or.jp/jvc/cde/sjis-euc-e.html)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static struct nls_table *p_nls;
+
+#define IS_SJIS_LOW_BYTE(l)	((0x40 <= (l)) && ((l) <= 0xFC) && ((l) != 0x7F))
+/* JIS X 0208 (include NEC spesial characters) */
+#define IS_SJIS_JISX0208(h, l)	((((0x81 <= (h)) && ((h) <= 0x9F))	\
+				 || ((0xE0 <= (h)) && ((h) <= 0xEA)))	\
+				 && IS_SJIS_LOW_BYTE(l))
+#define IS_SJIS_JISX0201KANA(c)	((0xA1 <= (c)) && ((c) <= 0xDF))
+#define IS_SJIS_UDC_LOW(h, l)	(((0xF0 <= (h)) && ((h) <= 0xF4))	\
+				 && IS_SJIS_LOW_BYTE(l))
+#define IS_SJIS_UDC_HI(h, l)	(((0xF5 <= (h)) && ((h) <= 0xF9))	\
+				 && IS_SJIS_LOW_BYTE(l))
+#define IS_SJIS_IBM(h, l)	(((0xFA <= (h)) && ((h) <= 0xFC))	\
+				 && IS_SJIS_LOW_BYTE(l))
+#define IS_SJIS_NECIBM(h, l)	(((0xED <= (h)) && ((h) <= 0xEE))	\
+				 && IS_SJIS_LOW_BYTE(l))
+#define MAP_SJIS2EUC(sjis_hi, sjis_lo, sjis_p, euc_hi, euc_lo, euc_p) {		\
+	if ((sjis_lo) >= 0x9F) {						\
+		(euc_hi) = (sjis_hi) * 2 - (((sjis_p) * 2 - (euc_p)) - 1);	\
+		(euc_lo) = (sjis_lo) + 2;					\
+	} else {								\
+		(euc_hi) = (sjis_hi) * 2 - ((sjis_p) * 2 - (euc_p));		\
+		(euc_lo) = (sjis_lo) + ((sjis_lo) >= 0x7F ? 0x60 : 0x61);	\
+	}									\
+} while(0)
+
+#define SS2		(0x8E)		/* Single Shift 2 */
+#define SS3		(0x8F)		/* Single Shift 3 */
+#define IS_EUC_BYTE(c)		((0xA1 <= (c)) && ((c) <= 0xFE))
+#define IS_EUC_JISX0208(h, l)	(IS_EUC_BYTE(h) && IS_EUC_BYTE(l))
+#define IS_EUC_JISX0201KANA(h, l)	(((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF))
+#define IS_EUC_UDC_LOW(h, l)	(((0xF5 <= (h)) && ((h) <= 0xFE))	\
+				 && IS_EUC_BYTE(l))
+#define IS_EUC_UDC_HI(h, l)	IS_EUC_UDC_LOW(h, l) /* G3 block */
+#define MAP_EUC2SJIS(euc_hi, euc_lo, euc_p, sjis_hi, sjis_lo, sjis_p) {		\
+	if ((euc_hi) & 1) {							\
+		(sjis_hi) = (euc_hi) / 2 + ((sjis_p) - (euc_p) / 2);		\
+		(sjis_lo) = (euc_lo) - ((euc_lo) >= 0xE0 ? 0x60 : 0x61);	\
+	} else {								\
+		(sjis_hi) = (euc_hi) / 2 + (((sjis_p) - (euc_p) / 2) - 1);	\
+		(sjis_lo) = (euc_lo) - 2;					\
+	}									\
+} while(0)
+
+/* SJIS IBM extended characters to EUC map */
+static unsigned char sjisibm2euc_map[][2] = {
+	{0xF3, 0xF3}, {0xF3, 0xF4}, {0xF3, 0xF5}, {0xF3, 0xF6}, {0xF3, 0xF7},
+	{0xF3, 0xF8}, {0xF3, 0xF9}, {0xF3, 0xFA}, {0xF3, 0xFB}, {0xF3, 0xFC},
+	{0xF3, 0xFD}, {0xF3, 0xFE}, {0xF4, 0xA1}, {0xF4, 0xA2}, {0xF4, 0xA3},
+	{0xF4, 0xA4}, {0xF4, 0xA5}, {0xF4, 0xA6}, {0xF4, 0xA7}, {0xF4, 0xA8},
+	{0xA2, 0xCC}, {0xA2, 0xC3}, {0xF4, 0xA9}, {0xF4, 0xAA}, {0xF4, 0xAB},
+	{0xF4, 0xAC}, {0xF4, 0xAD}, {0xA2, 0xE8}, {0xD4, 0xE3}, {0xDC, 0xDF},
+	{0xE4, 0xE9}, {0xE3, 0xF8}, {0xD9, 0xA1}, {0xB1, 0xBB}, {0xF4, 0xAE},
+	{0xC2, 0xAD}, {0xC3, 0xFC}, {0xE4, 0xD0}, {0xC2, 0xBF}, {0xBC, 0xF4},
+	{0xB0, 0xA9}, {0xB0, 0xC8}, {0xF4, 0xAF}, {0xB0, 0xD2}, {0xB0, 0xD4},
+	{0xB0, 0xE3}, {0xB0, 0xEE}, {0xB1, 0xA7}, {0xB1, 0xA3}, {0xB1, 0xAC},
+	{0xB1, 0xA9}, {0xB1, 0xBE}, {0xB1, 0xDF}, {0xB1, 0xD8}, {0xB1, 0xC8},
+	{0xB1, 0xD7}, {0xB1, 0xE3}, {0xB1, 0xF4}, {0xB1, 0xE1}, {0xB2, 0xA3},
+	{0xF4, 0xB0}, {0xB2, 0xBB}, {0xB2, 0xE6}, {0x00, 0x00}, {0xB2, 0xED},
+	{0xB2, 0xF5}, {0xB2, 0xFC}, {0xF4, 0xB1}, {0xB3, 0xB5}, {0xB3, 0xD8},
+	{0xB3, 0xDB}, {0xB3, 0xE5}, {0xB3, 0xEE}, {0xB3, 0xFB}, {0xF4, 0xB2},
+	{0xF4, 0xB3}, {0xB4, 0xC0}, {0xB4, 0xC7}, {0xB4, 0xD0}, {0xB4, 0xDE},
+	{0xF4, 0xB4}, {0xB5, 0xAA}, {0xF4, 0xB5}, {0xB5, 0xAF}, {0xB5, 0xC4},
+	{0xB5, 0xE8}, {0xF4, 0xB6}, {0xB7, 0xC2}, {0xB7, 0xE4}, {0xB7, 0xE8},
+	{0xB7, 0xE7}, {0xF4, 0xB7}, {0xF4, 0xB8}, {0xF4, 0xB9}, {0xB8, 0xCE},
+	{0xB8, 0xE1}, {0xB8, 0xF5}, {0xB8, 0xF7}, {0xB8, 0xF8}, {0xB8, 0xFC},
+	{0xB9, 0xAF}, {0xB9, 0xB7}, {0xBA, 0xBE}, {0xBA, 0xDB}, {0xCD, 0xAA},
+	{0xBA, 0xE1}, {0xF4, 0xBA}, {0xBA, 0xEB}, {0xBB, 0xB3}, {0xBB, 0xB8},
+	{0xF4, 0xBB}, {0xBB, 0xCA}, {0xF4, 0xBC}, {0xF4, 0xBD}, {0xBB, 0xD0},
+	{0xBB, 0xDE}, {0xBB, 0xF4}, {0xBB, 0xF5}, {0xBB, 0xF9}, {0xBC, 0xE4},
+	{0xBC, 0xED}, {0xBC, 0xFE}, {0xF4, 0xBE}, {0xBD, 0xC2}, {0xBD, 0xE7},
+	{0xF4, 0xBF}, {0xBD, 0xF0}, {0xBE, 0xB0}, {0xBE, 0xAC}, {0xF4, 0xC0},
+	{0xBE, 0xB3}, {0xBE, 0xBD}, {0xBE, 0xCD}, {0xBE, 0xC9}, {0xBE, 0xE4},
+	{0xBF, 0xA8}, {0xBF, 0xC9}, {0xC0, 0xC4}, {0xC0, 0xE4}, {0xC0, 0xF4},
+	{0xC1, 0xA6}, {0xF4, 0xC1}, {0xC1, 0xF5}, {0xC1, 0xFC}, {0xF4, 0xC2},
+	{0xC1, 0xF8}, {0xC2, 0xAB}, {0xC2, 0xA1}, {0xC2, 0xA5}, {0xF4, 0xC3},
+	{0xC2, 0xB8}, {0xC2, 0xBA}, {0xF4, 0xC4}, {0xC2, 0xC4}, {0xC2, 0xD2},
+	{0xC2, 0xD7}, {0xC2, 0xDB}, {0xC2, 0xDE}, {0xC2, 0xED}, {0xC2, 0xF0},
+	{0xF4, 0xC5}, {0xC3, 0xA1}, {0xC3, 0xB5}, {0xC3, 0xC9}, {0xC3, 0xB9},
+	{0xF4, 0xC6}, {0xC3, 0xD8}, {0xC3, 0xFE}, {0xF4, 0xC7}, {0xC4, 0xCC},
+	{0xF4, 0xC8}, {0xC4, 0xD9}, {0xC4, 0xEA}, {0xC4, 0xFD}, {0xF4, 0xC9},
+	{0xC5, 0xA7}, {0xC5, 0xB5}, {0xC5, 0xB6}, {0xF4, 0xCA}, {0xC5, 0xD5},
+	{0xC6, 0xB8}, {0xC6, 0xD7}, {0xC6, 0xE0}, {0xC6, 0xEA}, {0xC6, 0xE3},
+	{0xC7, 0xA1}, {0xC7, 0xAB}, {0xC7, 0xC7}, {0xC7, 0xC3}, {0xC7, 0xCB},
+	{0xC7, 0xCF}, {0xC7, 0xD9}, {0xF4, 0xCB}, {0xF4, 0xCC}, {0xC7, 0xE6},
+	{0xC7, 0xEE}, {0xC7, 0xFC}, {0xC7, 0xEB}, {0xC7, 0xF0}, {0xC8, 0xB1},
+	{0xC8, 0xE5}, {0xC8, 0xF8}, {0xC9, 0xA6}, {0xC9, 0xAB}, {0xC9, 0xAD},
+	{0xF4, 0xCD}, {0xC9, 0xCA}, {0xC9, 0xD3}, {0xC9, 0xE9}, {0xC9, 0xE3},
+	{0xC9, 0xFC}, {0xC9, 0xF4}, {0xC9, 0xF5}, {0xF4, 0xCE}, {0xCA, 0xB3},
+	{0xCA, 0xBD}, {0xCA, 0xEF}, {0xCA, 0xF1}, {0xCB, 0xAE}, {0xF4, 0xCF},
+	{0xCB, 0xCA}, {0xCB, 0xE6}, {0xCB, 0xEA}, {0xCB, 0xF0}, {0xCB, 0xF4},
+	{0xCB, 0xEE}, {0xCC, 0xA5}, {0xCB, 0xF9}, {0xCC, 0xAB}, {0xCC, 0xAE},
+	{0xCC, 0xAD}, {0xCC, 0xB2}, {0xCC, 0xC2}, {0xCC, 0xD0}, {0xCC, 0xD9},
+	{0xF4, 0xD0}, {0xCD, 0xBB}, {0xF4, 0xD1}, {0xCE, 0xBB}, {0xF4, 0xD2},
+	{0xCE, 0xBA}, {0xCE, 0xC3}, {0xF4, 0xD3}, {0xCE, 0xF2}, {0xB3, 0xDD},
+	{0xCF, 0xD5}, {0xCF, 0xE2}, {0xCF, 0xE9}, {0xCF, 0xED}, {0xF4, 0xD4},
+	{0xF4, 0xD5}, {0xF4, 0xD6}, {0x00, 0x00}, {0xF4, 0xD7}, {0xD0, 0xE5},
+	{0xF4, 0xD8}, {0xD0, 0xE9}, {0xD1, 0xE8}, {0xF4, 0xD9}, {0xF4, 0xDA},
+	{0xD1, 0xEC}, {0xD2, 0xBB}, {0xF4, 0xDB}, {0xD3, 0xE1}, {0xD3, 0xE8},
+	{0xD4, 0xA7}, {0xF4, 0xDC}, {0xF4, 0xDD}, {0xD4, 0xD4}, {0xD4, 0xF2},
+	{0xD5, 0xAE}, {0xF4, 0xDE}, {0xD7, 0xDE}, {0xF4, 0xDF}, {0xD8, 0xA2},
+	{0xD8, 0xB7}, {0xD8, 0xC1}, {0xD8, 0xD1}, {0xD8, 0xF4}, {0xD9, 0xC6},
+	{0xD9, 0xC8}, {0xD9, 0xD1}, {0xF4, 0xE0}, {0xF4, 0xE1}, {0xF4, 0xE2},
+	{0xF4, 0xE3}, {0xF4, 0xE4}, {0xDC, 0xD3}, {0xDD, 0xC8}, {0xDD, 0xD4},
+	{0xDD, 0xEA}, {0xDD, 0xFA}, {0xDE, 0xA4}, {0xDE, 0xB0}, {0xF4, 0xE5},
+	{0xDE, 0xB5}, {0xDE, 0xCB}, {0xF4, 0xE6}, {0xDF, 0xB9}, {0xF4, 0xE7},
+	{0xDF, 0xC3}, {0xF4, 0xE8}, {0xF4, 0xE9}, {0xE0, 0xD9}, {0xF4, 0xEA},
+	{0xF4, 0xEB}, {0xE1, 0xE2}, {0xF4, 0xEC}, {0xF4, 0xED}, {0xF4, 0xEE},
+	{0xE2, 0xC7}, {0xE3, 0xA8}, {0xE3, 0xA6}, {0xE3, 0xA9}, {0xE3, 0xAF},
+	{0xE3, 0xB0}, {0xE3, 0xAA}, {0xE3, 0xAB}, {0xE3, 0xBC}, {0xE3, 0xC1},
+	{0xE3, 0xBF}, {0xE3, 0xD5}, {0xE3, 0xD8}, {0xE3, 0xD6}, {0xE3, 0xDF},
+	{0xE3, 0xE3}, {0xE3, 0xE1}, {0xE3, 0xD4}, {0xE3, 0xE9}, {0xE4, 0xA6},
+	{0xE3, 0xF1}, {0xE3, 0xF2}, {0xE4, 0xCB}, {0xE4, 0xC1}, {0xE4, 0xC3},
+	{0xE4, 0xBE}, {0xF4, 0xEF}, {0xE4, 0xC0}, {0xE4, 0xC7}, {0xE4, 0xBF},
+	{0xE4, 0xE0}, {0xE4, 0xDE}, {0xE4, 0xD1}, {0xF4, 0xF0}, {0xE4, 0xDC},
+	{0xE4, 0xD2}, {0xE4, 0xDB}, {0xE4, 0xD4}, {0xE4, 0xFA}, {0xE4, 0xEF},
+	{0xE5, 0xB3}, {0xE5, 0xBF}, {0xE5, 0xC9}, {0xE5, 0xD0}, {0xE5, 0xE2},
+	{0xE5, 0xEA}, {0xE5, 0xEB}, {0xF4, 0xF1}, {0xF4, 0xF2}, {0xF4, 0xF3},
+	{0xE6, 0xE8}, {0xE6, 0xEF}, {0xE7, 0xAC}, {0xF4, 0xF4}, {0xE7, 0xAE},
+	{0xF4, 0xF5}, {0xE7, 0xB1}, {0xF4, 0xF6}, {0xE7, 0xB2}, {0xE8, 0xB1},
+	{0xE8, 0xB6}, {0xF4, 0xF7}, {0xF4, 0xF8}, {0xE8, 0xDD}, {0xF4, 0xF9},
+	{0xF4, 0xFA}, {0xE9, 0xD1}, {0xF4, 0xFB}, {0xE9, 0xED}, {0xEA, 0xCD},
+	{0xF4, 0xFC}, {0xEA, 0xDB}, {0xEA, 0xE6}, {0xEA, 0xEA}, {0xEB, 0xA5},
+	{0xEB, 0xFB}, {0xEB, 0xFA}, {0xF4, 0xFD}, {0xEC, 0xD6}, {0xF4, 0xFE},
+};
+
+#define IS_EUC_IBM2JISX0208(h, l) \
+		(((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8))
+
+/* EUC to SJIS IBM extended characters map (G3 JIS X 0212 block) */
+static struct {
+	unsigned short euc;
+	unsigned char sjis[2];
+} euc2sjisibm_jisx0212_map[] = {
+	{0xA2C3, {0xFA, 0x55}}, {0xB0A9, {0xFA, 0x68}}, {0xB0C8, {0xFA, 0x69}},
+	{0xB0D2, {0xFA, 0x6B}}, {0xB0D4, {0xFA, 0x6C}}, {0xB0E3, {0xFA, 0x6D}},
+	{0xB0EE, {0xFA, 0x6E}}, {0xB1A3, {0xFA, 0x70}}, {0xB1A7, {0xFA, 0x6F}},
+	{0xB1A9, {0xFA, 0x72}}, {0xB1AC, {0xFA, 0x71}}, {0xB1BB, {0xFA, 0x61}},
+	{0xB1BE, {0xFA, 0x73}}, {0xB1C8, {0xFA, 0x76}}, {0xB1D7, {0xFA, 0x77}},
+	{0xB1D8, {0xFA, 0x75}}, {0xB1DF, {0xFA, 0x74}}, {0xB1E1, {0xFA, 0x7A}},
+	{0xB1E3, {0xFA, 0x78}}, {0xB1F4, {0xFA, 0x79}}, {0xB2A3, {0xFA, 0x7B}},
+	{0xB2BB, {0xFA, 0x7D}}, {0xB2E6, {0xFA, 0x7E}}, {0xB2ED, {0xFA, 0x80}},
+	{0xB2F5, {0xFA, 0x81}}, {0xB2FC, {0xFA, 0x82}}, {0xB3B5, {0xFA, 0x84}},
+	{0xB3D8, {0xFA, 0x85}}, {0xB3DB, {0xFA, 0x86}}, {0xB3DD, {0xFB, 0x77}},
+	{0xB3E5, {0xFA, 0x87}}, {0xB3EE, {0xFA, 0x88}}, {0xB3FB, {0xFA, 0x89}},
+	{0xB4C0, {0xFA, 0x8C}}, {0xB4C7, {0xFA, 0x8D}}, {0xB4D0, {0xFA, 0x8E}},
+	{0xB4DE, {0xFA, 0x8F}}, {0xB5AA, {0xFA, 0x91}}, {0xB5AF, {0xFA, 0x93}},
+	{0xB5C4, {0xFA, 0x94}}, {0xB5E8, {0xFA, 0x95}}, {0xB7C2, {0xFA, 0x97}},
+	{0xB7E4, {0xFA, 0x98}}, {0xB7E7, {0xFA, 0x9A}}, {0xB7E8, {0xFA, 0x99}},
+	{0xB8CE, {0xFA, 0x9E}}, {0xB8E1, {0xFA, 0x9F}}, {0xB8F5, {0xFA, 0xA0}},
+	{0xB8F7, {0xFA, 0xA1}}, {0xB8F8, {0xFA, 0xA2}}, {0xB8FC, {0xFA, 0xA3}},
+	{0xB9AF, {0xFA, 0xA4}}, {0xB9B7, {0xFA, 0xA5}}, {0xBABE, {0xFA, 0xA6}},
+	{0xBADB, {0xFA, 0xA7}}, {0xBAE1, {0xFA, 0xA9}}, {0xBAEB, {0xFA, 0xAB}},
+	{0xBBB3, {0xFA, 0xAC}}, {0xBBB8, {0xFA, 0xAD}}, {0xBBCA, {0xFA, 0xAF}},
+	{0xBBD0, {0xFA, 0xB2}}, {0xBBDE, {0xFA, 0xB3}}, {0xBBF4, {0xFA, 0xB4}},
+	{0xBBF5, {0xFA, 0xB5}}, {0xBBF9, {0xFA, 0xB6}}, {0xBCE4, {0xFA, 0xB7}},
+	{0xBCED, {0xFA, 0xB8}}, {0xBCF4, {0xFA, 0x67}}, {0xBCFE, {0xFA, 0xB9}},
+	{0xBDC2, {0xFA, 0xBB}}, {0xBDE7, {0xFA, 0xBC}}, {0xBDF0, {0xFA, 0xBE}},
+	{0xBEAC, {0xFA, 0xC0}}, {0xBEB0, {0xFA, 0xBF}}, {0xBEB3, {0xFA, 0xC2}},
+	{0xBEBD, {0xFA, 0xC3}}, {0xBEC9, {0xFA, 0xC5}}, {0xBECD, {0xFA, 0xC4}},
+	{0xBEE4, {0xFA, 0xC6}}, {0xBFA8, {0xFA, 0xC7}}, {0xBFC9, {0xFA, 0xC8}},
+	{0xC0C4, {0xFA, 0xC9}}, {0xC0E4, {0xFA, 0xCA}}, {0xC0F4, {0xFA, 0xCB}},
+	{0xC1A6, {0xFA, 0xCC}}, {0xC1F5, {0xFA, 0xCE}}, {0xC1F8, {0xFA, 0xD1}},
+	{0xC1FC, {0xFA, 0xCF}}, {0xC2A1, {0xFA, 0xD3}}, {0xC2A5, {0xFA, 0xD4}},
+	{0xC2AB, {0xFA, 0xD2}}, {0xC2AD, {0xFA, 0x63}}, {0xC2B8, {0xFA, 0xD6}},
+	{0xC2BA, {0xFA, 0xD7}}, {0xC2BF, {0xFA, 0x66}}, {0xC2C4, {0xFA, 0xD9}},
+	{0xC2D2, {0xFA, 0xDA}}, {0xC2D7, {0xFA, 0xDB}}, {0xC2DB, {0xFA, 0xDC}},
+	{0xC2DE, {0xFA, 0xDD}}, {0xC2ED, {0xFA, 0xDE}}, {0xC2F0, {0xFA, 0xDF}},
+	{0xC3A1, {0xFA, 0xE1}}, {0xC3B5, {0xFA, 0xE2}}, {0xC3B9, {0xFA, 0xE4}},
+	{0xC3C9, {0xFA, 0xE3}}, {0xC3D8, {0xFA, 0xE6}}, {0xC3FC, {0xFA, 0x64}},
+	{0xC3FE, {0xFA, 0xE7}}, {0xC4CC, {0xFA, 0xE9}}, {0xC4D9, {0xFA, 0xEB}},
+	{0xC4EA, {0xFA, 0xEC}}, {0xC4FD, {0xFA, 0xED}}, {0xC5A7, {0xFA, 0xEF}},
+	{0xC5B5, {0xFA, 0xF0}}, {0xC5B6, {0xFA, 0xF1}}, {0xC5D5, {0xFA, 0xF3}},
+	{0xC6B8, {0xFA, 0xF4}}, {0xC6D7, {0xFA, 0xF5}}, {0xC6E0, {0xFA, 0xF6}},
+	{0xC6E3, {0xFA, 0xF8}}, {0xC6EA, {0xFA, 0xF7}}, {0xC7A1, {0xFA, 0xF9}},
+	{0xC7AB, {0xFA, 0xFA}}, {0xC7C3, {0xFA, 0xFC}}, {0xC7C7, {0xFA, 0xFB}},
+	{0xC7CB, {0xFB, 0x40}}, {0xC7CF, {0xFB, 0x41}}, {0xC7D9, {0xFB, 0x42}},
+	{0xC7E6, {0xFB, 0x45}}, {0xC7EB, {0xFB, 0x48}}, {0xC7EE, {0xFB, 0x46}},
+	{0xC7F0, {0xFB, 0x49}}, {0xC7FC, {0xFB, 0x47}}, {0xC8B1, {0xFB, 0x4A}},
+	{0xC8E5, {0xFB, 0x4B}}, {0xC8F8, {0xFB, 0x4C}}, {0xC9A6, {0xFB, 0x4D}},
+	{0xC9AB, {0xFB, 0x4E}}, {0xC9AD, {0xFB, 0x4F}}, {0xC9CA, {0xFB, 0x51}},
+	{0xC9D3, {0xFB, 0x52}}, {0xC9E3, {0xFB, 0x54}}, {0xC9E9, {0xFB, 0x53}},
+	{0xC9F4, {0xFB, 0x56}}, {0xC9F5, {0xFB, 0x57}}, {0xC9FC, {0xFB, 0x55}},
+	{0xCAB3, {0xFB, 0x59}}, {0xCABD, {0xFB, 0x5A}}, {0xCAEF, {0xFB, 0x5B}},
+	{0xCAF1, {0xFB, 0x5C}}, {0xCBAE, {0xFB, 0x5D}}, {0xCBCA, {0xFB, 0x5F}},
+	{0xCBE6, {0xFB, 0x60}}, {0xCBEA, {0xFB, 0x61}}, {0xCBEE, {0xFB, 0x64}},
+	{0xCBF0, {0xFB, 0x62}}, {0xCBF4, {0xFB, 0x63}}, {0xCBF9, {0xFB, 0x66}},
+	{0xCCA5, {0xFB, 0x65}}, {0xCCAB, {0xFB, 0x67}}, {0xCCAD, {0xFB, 0x69}},
+	{0xCCAE, {0xFB, 0x68}}, {0xCCB2, {0xFB, 0x6A}}, {0xCCC2, {0xFB, 0x6B}},
+	{0xCCD0, {0xFB, 0x6C}}, {0xCCD9, {0xFB, 0x6D}}, {0xCDAA, {0xFA, 0xA8}},
+	{0xCDBB, {0xFB, 0x6F}}, {0xCEBA, {0xFB, 0x73}}, {0xCEBB, {0xFB, 0x71}},
+	{0xCEC3, {0xFB, 0x74}}, {0xCEF2, {0xFB, 0x76}}, {0xCFD5, {0xFB, 0x78}},
+	{0xCFE2, {0xFB, 0x79}}, {0xCFE9, {0xFB, 0x7A}}, {0xCFED, {0xFB, 0x7B}},
+	{0xD0E5, {0xFB, 0x81}}, {0xD0E9, {0xFB, 0x83}}, {0xD1E8, {0xFB, 0x84}},
+	{0xD1EC, {0xFB, 0x87}}, {0xD2BB, {0xFB, 0x88}}, {0xD3E1, {0xFB, 0x8A}},
+	{0xD3E8, {0xFB, 0x8B}}, {0xD4A7, {0xFB, 0x8C}}, {0xD4D4, {0xFB, 0x8F}},
+	{0xD4E3, {0xFA, 0x5C}}, {0xD4F2, {0xFB, 0x90}}, {0xD5AE, {0xFB, 0x91}},
+	{0xD7DE, {0xFB, 0x93}}, {0xD8A2, {0xFB, 0x95}}, {0xD8B7, {0xFB, 0x96}},
+	{0xD8C1, {0xFB, 0x97}}, {0xD8D1, {0xFB, 0x98}}, {0xD8F4, {0xFB, 0x99}},
+	{0xD9A1, {0xFA, 0x60}}, {0xD9C6, {0xFB, 0x9A}}, {0xD9C8, {0xFB, 0x9B}},
+	{0xD9D1, {0xFB, 0x9C}}, {0xDCD3, {0xFB, 0xA2}}, {0xDCDF, {0xFA, 0x5D}},
+	{0xDDC8, {0xFB, 0xA3}}, {0xDDD4, {0xFB, 0xA4}}, {0xDDEA, {0xFB, 0xA5}},
+	{0xDDFA, {0xFB, 0xA6}}, {0xDEA4, {0xFB, 0xA7}}, {0xDEB0, {0xFB, 0xA8}},
+	{0xDEB5, {0xFB, 0xAA}}, {0xDECB, {0xFB, 0xAB}}, {0xDFB9, {0xFB, 0xAD}},
+	{0xDFC3, {0xFB, 0xAF}}, {0xE0D9, {0xFB, 0xB2}}, {0xE1E2, {0xFB, 0xB5}},
+	{0xE2C7, {0xFB, 0xB9}}, {0xE3A6, {0xFB, 0xBB}}, {0xE3A8, {0xFB, 0xBA}},
+	{0xE3A9, {0xFB, 0xBC}}, {0xE3AA, {0xFB, 0xBF}}, {0xE3AB, {0xFB, 0xC0}},
+	{0xE3AF, {0xFB, 0xBD}}, {0xE3B0, {0xFB, 0xBE}}, {0xE3BC, {0xFB, 0xC1}},
+	{0xE3BF, {0xFB, 0xC3}}, {0xE3C1, {0xFB, 0xC2}}, {0xE3D4, {0xFB, 0xCA}},
+	{0xE3D5, {0xFB, 0xC4}}, {0xE3D6, {0xFB, 0xC6}}, {0xE3D8, {0xFB, 0xC5}},
+	{0xE3DF, {0xFB, 0xC7}}, {0xE3E1, {0xFB, 0xC9}}, {0xE3E3, {0xFB, 0xC8}},
+	{0xE3E9, {0xFB, 0xCB}}, {0xE3F1, {0xFB, 0xCD}}, {0xE3F2, {0xFB, 0xCE}},
+	{0xE3F8, {0xFA, 0x5F}}, {0xE4A6, {0xFB, 0xCC}}, {0xE4BE, {0xFB, 0xD2}},
+	{0xE4BF, {0xFB, 0xD6}}, {0xE4C0, {0xFB, 0xD4}}, {0xE4C1, {0xFB, 0xD0}},
+	{0xE4C3, {0xFB, 0xD1}}, {0xE4C7, {0xFB, 0xD5}}, {0xE4CB, {0xFB, 0xCF}},
+	{0xE4D0, {0xFA, 0x65}}, {0xE4D1, {0xFB, 0xD9}}, {0xE4D2, {0xFB, 0xDC}},
+	{0xE4D4, {0xFB, 0xDE}}, {0xE4DB, {0xFB, 0xDD}}, {0xE4DC, {0xFB, 0xDB}},
+	{0xE4DE, {0xFB, 0xD8}}, {0xE4E0, {0xFB, 0xD7}}, {0xE4E9, {0xFA, 0x5E}},
+	{0xE4EF, {0xFB, 0xE0}}, {0xE4FA, {0xFB, 0xDF}}, {0xE5B3, {0xFB, 0xE1}},
+	{0xE5BF, {0xFB, 0xE2}}, {0xE5C9, {0xFB, 0xE3}}, {0xE5D0, {0xFB, 0xE4}},
+	{0xE5E2, {0xFB, 0xE5}}, {0xE5EA, {0xFB, 0xE6}}, {0xE5EB, {0xFB, 0xE7}},
+	{0xE6E8, {0xFB, 0xEB}}, {0xE6EF, {0xFB, 0xEC}}, {0xE7AC, {0xFB, 0xED}},
+	{0xE7AE, {0xFB, 0xEF}}, {0xE7B1, {0xFB, 0xF1}}, {0xE7B2, {0xFB, 0xF3}},
+	{0xE8B1, {0xFB, 0xF4}}, {0xE8B6, {0xFB, 0xF5}}, {0xE8DD, {0xFB, 0xF8}},
+	{0xE9D1, {0xFB, 0xFB}}, {0xE9ED, {0xFC, 0x40}}, {0xEACD, {0xFC, 0x41}},
+	{0xEADB, {0xFC, 0x43}}, {0xEAE6, {0xFC, 0x44}}, {0xEAEA, {0xFC, 0x45}},
+	{0xEBA5, {0xFC, 0x46}}, {0xEBFA, {0xFC, 0x48}}, {0xEBFB, {0xFC, 0x47}},
+	{0xECD6, {0xFC, 0x4A}},
+};
+
+/* EUC to SJIS IBM extended characters map (G3 Upper block) */
+static unsigned char euc2sjisibm_g3upper_map[][2] = {
+	{0xFA, 0x40}, {0xFA, 0x41}, {0xFA, 0x42}, {0xFA, 0x43}, {0xFA, 0x44},
+	{0xFA, 0x45}, {0xFA, 0x46}, {0xFA, 0x47}, {0xFA, 0x48}, {0xFA, 0x49},
+	{0xFA, 0x4A}, {0xFA, 0x4B}, {0xFA, 0x4C}, {0xFA, 0x4D}, {0xFA, 0x4E},
+	{0xFA, 0x4F}, {0xFA, 0x50}, {0xFA, 0x51}, {0xFA, 0x52}, {0xFA, 0x53},
+	{0xFA, 0x56}, {0xFA, 0x57}, {0xFA, 0x58}, {0xFA, 0x59}, {0xFA, 0x5A},
+	{0xFA, 0x62}, {0xFA, 0x6A}, {0xFA, 0x7C}, {0xFA, 0x83}, {0xFA, 0x8A},
+	{0xFA, 0x8B}, {0xFA, 0x90}, {0xFA, 0x92}, {0xFA, 0x96}, {0xFA, 0x9B},
+	{0xFA, 0x9C}, {0xFA, 0x9D}, {0xFA, 0xAA}, {0xFA, 0xAE}, {0xFA, 0xB0},
+	{0xFA, 0xB1}, {0xFA, 0xBA}, {0xFA, 0xBD}, {0xFA, 0xC1}, {0xFA, 0xCD},
+	{0xFA, 0xD0}, {0xFA, 0xD5}, {0xFA, 0xD8}, {0xFA, 0xE0}, {0xFA, 0xE5},
+	{0xFA, 0xE8}, {0xFA, 0xEA}, {0xFA, 0xEE}, {0xFA, 0xF2}, {0xFB, 0x43},
+	{0xFB, 0x44}, {0xFB, 0x50}, {0xFB, 0x58}, {0xFB, 0x5E}, {0xFB, 0x6E},
+	{0xFB, 0x70}, {0xFB, 0x72}, {0xFB, 0x75}, {0xFB, 0x7C}, {0xFB, 0x7D},
+	{0xFB, 0x7E}, {0xFB, 0x80}, {0xFB, 0x82}, {0xFB, 0x85}, {0xFB, 0x86},
+	{0xFB, 0x89}, {0xFB, 0x8D}, {0xFB, 0x8E}, {0xFB, 0x92}, {0xFB, 0x94},
+	{0xFB, 0x9D}, {0xFB, 0x9E}, {0xFB, 0x9F}, {0xFB, 0xA0}, {0xFB, 0xA1},
+	{0xFB, 0xA9}, {0xFB, 0xAC}, {0xFB, 0xAE}, {0xFB, 0xB0}, {0xFB, 0xB1},
+	{0xFB, 0xB3}, {0xFB, 0xB4}, {0xFB, 0xB6}, {0xFB, 0xB7}, {0xFB, 0xB8},
+	{0xFB, 0xD3}, {0xFB, 0xDA}, {0xFB, 0xE8}, {0xFB, 0xE9}, {0xFB, 0xEA},
+	{0xFB, 0xEE}, {0xFB, 0xF0}, {0xFB, 0xF2}, {0xFB, 0xF6}, {0xFB, 0xF7},
+	{0xFB, 0xF9}, {0xFB, 0xFA}, {0xFB, 0xFC}, {0xFC, 0x42}, {0xFC, 0x49},
+	{0xFC, 0x4B},
+};
+
+#define MAP_ELEMENT_OF(map)	(sizeof(map) / sizeof(map[0]))
+
+static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi,
+			      const unsigned char sjis_lo);
+static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi,
+				       const unsigned char euc_lo);
+static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi,
+				      const unsigned char euc_lo);
+static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi,
+			      const unsigned char euc_lo);
+static inline int sjisnec2sjisibm(unsigned char *sjisibm,
+				  const unsigned char sjisnec_hi,
+				  const unsigned char sjisnec_lo);
+
+/* SJIS IBM extended characters to EUC */
+static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi,
+			      const unsigned char sjis_lo)
+{
+	int index;
+
+	index = ((sjis_hi - 0xFA) * (0xFD - 0x40)) + (sjis_lo - 0x40);
+	if (IS_EUC_IBM2JISX0208(sjisibm2euc_map[index][0],
+				sjisibm2euc_map[index][1])) {
+		euc[0] = sjisibm2euc_map[index][0];
+		euc[1] = sjisibm2euc_map[index][1];
+		return 2;
+	} else {
+		euc[0] = SS3;
+		euc[1] = sjisibm2euc_map[index][0];
+		euc[2] = sjisibm2euc_map[index][1];
+		return 3;
+	}
+}
+
+/* EUC to SJIS IBM extended characters (G3 JIS X 0212 block) */
+static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi,
+				       const unsigned char euc_lo)
+{
+	int index, min_index, max_index;
+	unsigned short euc;
+
+	min_index = 0;
+	max_index = MAP_ELEMENT_OF(euc2sjisibm_jisx0212_map) - 1;
+	euc = (euc_hi << 8) | euc_lo;
+
+	while (min_index <= max_index) {
+		index = (min_index + max_index) / 2;
+		if (euc < euc2sjisibm_jisx0212_map[index].euc)
+			max_index = index - 1;
+		else
+			min_index = index + 1;
+		if (euc == euc2sjisibm_jisx0212_map[index].euc) {
+			sjis[0] = euc2sjisibm_jisx0212_map[index].sjis[0];
+			sjis[1] = euc2sjisibm_jisx0212_map[index].sjis[1];
+			return 3;
+		}
+	}
+	return 0;
+}
+
+/* EUC to SJIS IBM extended characters (G3 Upper block) */
+static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi,
+				      const unsigned char euc_lo)
+{
+	int index;
+
+	if (euc_hi == 0xF3)
+		index = ((euc_hi << 8) | euc_lo) - 0xF3F3;
+	else
+		index = ((euc_hi << 8) | euc_lo) - 0xF4A1 + 12;
+
+	if ((index < 0) || (index >= MAP_ELEMENT_OF(euc2sjisibm_g3upper_map)))
+		return 0;
+
+	sjis[0] = euc2sjisibm_g3upper_map[index][0];
+	sjis[1] = euc2sjisibm_g3upper_map[index][1];
+
+	return 3;
+}
+
+/* EUC to SJIS IBM extended characters (G3 block) */
+static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi,
+			      const unsigned char euc_lo)
+{
+	int n;
+
+#if 0
+	if ((euc_hi == 0xA2) && (euc_lo == 0xCC)) {
+		sjis[0] = 0xFA;
+		sjis[1] = 0x54;
+		return 2;
+	} else if ((euc_hi == 0xA2) && (euc_lo == 0xE8)) {
+		sjis[0] = 0xFA;
+		sjis[1] = 0x5B;
+		return 2;
+	}
+#endif
+	if ((n = euc2sjisibm_g3upper(sjis, euc_hi, euc_lo))) {
+		return n;
+	} else if ((n = euc2sjisibm_jisx0212(sjis, euc_hi, euc_lo))) {
+		return n;
+	}
+
+	return 0;
+}
+
+/* NEC/IBM extended characters to IBM extended characters */
+static inline int sjisnec2sjisibm(unsigned char *sjisibm,
+				  const unsigned char sjisnec_hi,
+				  const unsigned char sjisnec_lo)
+{
+	int count;
+
+	if (! IS_SJIS_NECIBM(sjisnec_hi, sjisnec_lo))
+		return 0;
+
+	if ((sjisnec_hi == 0xEE) && (sjisnec_lo == 0xF9)) {
+		sjisibm[0] = 0x81;
+		sjisibm[1] = 0xCA;
+		return 2;
+	}
+
+	if ((sjisnec_hi == 0xEE) && (sjisnec_lo >= 0xEF)) {
+		count = (sjisnec_hi << 8 | sjisnec_lo)
+			- (sjisnec_lo <= 0xF9 ? 0xEEEF : (0xEEEF - 10));
+	} else {
+		count = (sjisnec_hi - 0xED) * (0xFC - 0x40)
+			+ (sjisnec_lo - 0x40) + (0x5C - 0x40);
+		if (sjisnec_lo >= 0x7F)
+			count--;
+	}
+
+	sjisibm[0] = 0xFA + (count / (0xFC - 0x40));
+	sjisibm[1] = 0x40 + (count % (0xFC - 0x40));
+	if (sjisibm[1] >= 0x7F)
+		sjisibm[1]++;
+
+	return 2;
+}
+
+static int uni2char(const wchar_t uni,
+		    unsigned char *out, int boundlen)
+{
+	int n;
+
+	if (!p_nls)
+		return -EINVAL;
+	if ((n = p_nls->uni2char(uni, out, boundlen)) < 0)
+		return n;
+
+	/* translate SJIS into EUC-JP */
+	if (n == 1) {
+		if (IS_SJIS_JISX0201KANA(out[0])) {
+			/* JIS X 0201 KANA */
+			if (boundlen < 2)
+				return -ENAMETOOLONG;
+
+			out[1] = out[0];
+			out[0] = SS2;
+			return 2;
+		}
+	} else if (n == 2) {
+		/* NEC/IBM extended characters to IBM extended characters */
+		sjisnec2sjisibm(out, out[0], out[1]);
+
+		if (IS_SJIS_UDC_LOW(out[0], out[1])) {
+			/* User defined characters half low */
+			MAP_SJIS2EUC(out[0], out[1], 0xF0, out[0], out[1], 0xF5);
+		} else if (IS_SJIS_UDC_HI(out[0], out[1])) {
+			/* User defined characters half high */
+			unsigned char ch, cl;
+
+			if (boundlen < 3)
+				return -ENAMETOOLONG;
+
+			n = 3; ch = out[0]; cl = out[1];
+			out[0] = SS3;
+			MAP_SJIS2EUC(ch, cl, 0xF5, out[1], out[2], 0xF5);
+		} else if (IS_SJIS_IBM(out[0], out[1])) {
+			/* IBM extended characters */
+			unsigned char euc[3], i;
+
+			n = sjisibm2euc(euc, out[0], out[1]);
+			if (boundlen < n)
+				return -ENAMETOOLONG;
+			for (i = 0; i < n; i++)
+				out[i] = euc[i];
+		} else if (IS_SJIS_JISX0208(out[0], out[1])) {
+			/* JIS X 0208 (include NEC special characters) */
+			out[0] = (out[0]^0xA0)*2 + 0x5F;
+			if (out[1] > 0x9E)
+				out[0]++;
+
+			if (out[1] < 0x7F)
+				out[1] = out[1] + 0x61;
+			else if (out[1] < 0x9F)
+				out[1] = out[1] + 0x60;
+			else
+				out[1] = out[1] + 0x02;
+		} else {
+			/* Invalid characters */
+			return -EINVAL;
+		}
+	}
+	else
+		return -EINVAL;
+
+	return n;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+		    wchar_t *uni)
+{
+	unsigned char sjis_temp[2];
+	int euc_offset, n;
+
+	if ( !p_nls )
+		return -EINVAL;
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	/* translate EUC-JP into SJIS */
+	if (rawstring[0] > 0x7F) {
+		if (rawstring[0] == SS3) {
+			if (boundlen < 3)
+				return -EINVAL;
+			euc_offset = 3;
+
+			if (IS_EUC_UDC_HI(rawstring[1], rawstring[2])) {
+				/* User defined characters half high */
+				MAP_EUC2SJIS(rawstring[1], rawstring[2], 0xF5,
+					     sjis_temp[0], sjis_temp[1], 0xF5);
+			} else if (euc2sjisibm(sjis_temp,rawstring[1],rawstring[2])) {
+				/* IBM extended characters */
+			} else {
+				/* JIS X 0212 and Invalid characters*/
+				return -EINVAL;
+
+				/* 'GETA' with SJIS coding */
+				/* sjis_temp[0] = 0x81; */
+				/* sjis_temp[1] = 0xAC; */
+			}
+		} else {
+			if (boundlen < 2)
+				return -EINVAL;
+			euc_offset = 2;
+
+			if (IS_EUC_JISX0201KANA(rawstring[0], rawstring[1])) {
+				/* JIS X 0201 KANA */
+				sjis_temp[0] = rawstring[1];
+				sjis_temp[1] = 0x00;
+			} else if (IS_EUC_UDC_LOW(rawstring[0], rawstring[1])) {
+				/* User defined characters half low */
+				MAP_EUC2SJIS(rawstring[0], rawstring[1], 0xF5,
+					     sjis_temp[0], sjis_temp[1], 0xF0);
+			} else if (IS_EUC_JISX0208(rawstring[0], rawstring[1])) {
+				/* JIS X 0208 (include NEC spesial characters) */
+				sjis_temp[0] = ((rawstring[0]-0x5f)/2) ^ 0xA0;
+				if (!(rawstring[0] & 1))
+					sjis_temp[1] = rawstring[1] - 0x02;
+				else if (rawstring[1] < 0xE0)
+					sjis_temp[1] = rawstring[1] - 0x61;
+				else
+					sjis_temp[1] = rawstring[1] - 0x60;
+			} else {
+				/* Invalid characters */
+				return -EINVAL;
+			}
+		}
+	} else {
+		euc_offset = 1;
+
+		/* JIS X 0201 ROMAJI */
+		sjis_temp[0] = rawstring[0];
+		sjis_temp[1] = 0x00;
+	}
+
+	if ( (n = p_nls->char2uni(sjis_temp, sizeof(sjis_temp), uni)) < 0)
+		return n;
+
+	return euc_offset;
+}
+
+static struct nls_table table = {
+	.charset	= "euc-jp",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_euc_jp(void)
+{
+	p_nls = load_nls("cp932");
+
+	if (p_nls) {
+		table.charset2upper = p_nls->charset2upper;
+		table.charset2lower = p_nls->charset2lower;
+		return register_nls(&table);
+	}
+
+	return -EINVAL;
+}
+
+static void __exit exit_nls_euc_jp(void)
+{
+	unregister_nls(&table);
+	unload_nls(p_nls);
+}
+
+module_init(init_nls_euc_jp)
+module_exit(exit_nls_euc_jp)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-1.c b/fs/nls/nls_iso8859-1.c
new file mode 100644
index 0000000..70a2c19
--- /dev/null
+++ b/fs/nls/nls_iso8859-1.c
@@ -0,0 +1,258 @@
+/*
+ * linux/fs/nls_iso8859-1.c
+ *
+ * Charset iso8859-1 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x00a1, 0x00a2, 0x00a3,
+	0x00a4, 0x00a5, 0x00a6, 0x00a7,
+	0x00a8, 0x00a9, 0x00aa, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x00af,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x00b4, 0x00b5, 0x00b6, 0x00b7,
+	0x00b8, 0x00b9, 0x00ba, 0x00bb,
+	0x00bc, 0x00bd, 0x00be, 0x00bf,
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x00c3,
+	0x00c4, 0x00c5, 0x00c6, 0x00c7,
+	0x00c8, 0x00c9, 0x00ca, 0x00cb,
+	0x00cc, 0x00cd, 0x00ce, 0x00cf,
+	/* 0xd0*/
+	0x00d0, 0x00d1, 0x00d2, 0x00d3,
+	0x00d4, 0x00d5, 0x00d6, 0x00d7,
+	0x00d8, 0x00d9, 0x00da, 0x00db,
+	0x00dc, 0x00dd, 0x00de, 0x00df,
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x00e3,
+	0x00e4, 0x00e5, 0x00e6, 0x00e7,
+	0x00e8, 0x00e9, 0x00ea, 0x00eb,
+	0x00ec, 0x00ed, 0x00ee, 0x00ef,
+	/* 0xf0*/
+	0x00f0, 0x00f1, 0x00f2, 0x00f3,
+	0x00f4, 0x00f5, 0x00f6, 0x00f7,
+	0x00f8, 0x00f9, 0x00fa, 0x00fb,
+	0x00fc, 0x00fd, 0x00fe, 0x00ff,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0x00, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-1",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_1(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_1(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_1)
+module_exit(exit_nls_iso8859_1)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-13.c b/fs/nls/nls_iso8859-13.c
new file mode 100644
index 0000000..4547035
--- /dev/null
+++ b/fs/nls/nls_iso8859-13.c
@@ -0,0 +1,286 @@
+/*
+ * linux/fs/nls_iso8859-13.c
+ *
+ * Charset iso8859-13 translation tables.
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x201d, 0x00a2, 0x00a3,
+	0x00a4, 0x201e, 0x00a6, 0x00a7,
+	0x00d8, 0x00a9, 0x0156, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x00c6,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x201c, 0x00b5, 0x00b6, 0x00b7,
+	0x00f8, 0x00b9, 0x0157, 0x00bb,
+	0x00bc, 0x00bd, 0x00be, 0x00e6,
+	/* 0xc0*/
+	0x0104, 0x012e, 0x0100, 0x0106,
+	0x00c4, 0x00c5, 0x0118, 0x0112,
+	0x010c, 0x00c9, 0x0179, 0x0116,
+	0x0122, 0x0136, 0x012a, 0x013b,
+	/* 0xd0*/
+	0x0160, 0x0143, 0x0145, 0x00d3,
+	0x014c, 0x00d5, 0x00d6, 0x00d7,
+	0x0172, 0x0141, 0x015a, 0x016a,
+	0x00dc, 0x017b, 0x017d, 0x00df,
+	/* 0xe0*/
+	0x0105, 0x012f, 0x0101, 0x0107,
+	0x00e4, 0x00e5, 0x0119, 0x0113,
+	0x010d, 0x00e9, 0x017a, 0x0117,
+	0x0123, 0x0137, 0x012b, 0x013c,
+	/* 0xf0*/
+	0x0161, 0x0144, 0x0146, 0x00f3,
+	0x014d, 0x00f5, 0x00f6, 0x00f7,
+	0x0173, 0x0142, 0x015b, 0x016b,
+	0x00fc, 0x017c, 0x017e, 0x2019,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0xa2, 0xa3, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0x00, 0xb9, 0x00, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0xc4, 0xc5, 0xaf, 0x00, /* 0xc0-0xc7 */
+	0x00, 0xc9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xd3, 0x00, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xa8, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0xe4, 0xe5, 0xbf, 0x00, /* 0xe0-0xe7 */
+	0x00, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xf3, 0x00, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xb8, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0xc2, 0xe2, 0x00, 0x00, 0xc0, 0xe0, 0xc3, 0xe3, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0xc7, 0xe7, 0x00, 0x00, 0xcb, 0xeb, /* 0x10-0x17 */
+	0xc6, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0xce, 0xee, 0x00, 0x00, 0xc1, 0xe1, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xed, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0xcf, 0xef, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0xd4, 0xf4, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xba, /* 0x50-0x57 */
+	0x00, 0x00, 0xda, 0xfa, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0xd8, 0xf8, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0xff, 0x00, 0x00, 0xb4, 0xa1, 0xa5, 0x00, /* 0x18-0x1f */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	  NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	  NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	  NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbf, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xbd, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-13",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_13(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_13(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_13)
+module_exit(exit_nls_iso8859_13)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-14.c b/fs/nls/nls_iso8859-14.c
new file mode 100644
index 0000000..13628d0
--- /dev/null
+++ b/fs/nls/nls_iso8859-14.c
@@ -0,0 +1,342 @@
+/*
+ * linux/fs/nls_iso8859-14.c
+ *
+ * Charset iso8859-14 translation tables.
+ *
+ * Generated automatically from the Unicode and charset table
+ * provided by the Unicode Organisation at
+ * http://www.unicode.org/
+ * The Unicode to charset table has only exact mappings.
+ *
+ * Rhys Jones, Swansea University Computer Society
+ * rhys@sucs.swan.ac.uk
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003, 
+	0x0004, 0x0005, 0x0006, 0x0007, 
+	0x0008, 0x0009, 0x000a, 0x000b, 
+	0x000c, 0x000d, 0x000e, 0x000f, 
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013, 
+	0x0014, 0x0015, 0x0016, 0x0017, 
+	0x0018, 0x0019, 0x001a, 0x001b, 
+	0x001c, 0x001d, 0x001e, 0x001f, 
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023, 
+	0x0024, 0x0025, 0x0026, 0x0027, 
+	0x0028, 0x0029, 0x002a, 0x002b, 
+	0x002c, 0x002d, 0x002e, 0x002f, 
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033, 
+	0x0034, 0x0035, 0x0036, 0x0037, 
+	0x0038, 0x0039, 0x003a, 0x003b, 
+	0x003c, 0x003d, 0x003e, 0x003f, 
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043, 
+	0x0044, 0x0045, 0x0046, 0x0047, 
+	0x0048, 0x0049, 0x004a, 0x004b, 
+	0x004c, 0x004d, 0x004e, 0x004f, 
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053, 
+	0x0054, 0x0055, 0x0056, 0x0057, 
+	0x0058, 0x0059, 0x005a, 0x005b, 
+	0x005c, 0x005d, 0x005e, 0x005f, 
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063, 
+	0x0064, 0x0065, 0x0066, 0x0067, 
+	0x0068, 0x0069, 0x006a, 0x006b, 
+	0x006c, 0x006d, 0x006e, 0x006f, 
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073, 
+	0x0074, 0x0075, 0x0076, 0x0077, 
+	0x0078, 0x0079, 0x007a, 0x007b, 
+	0x007c, 0x007d, 0x007e, 0x007f, 
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x1e02, 0x1e03, 0x00a3, 
+	0x010a, 0x010b, 0x1e0a, 0x00a7, 
+	0x1e80, 0x00a9, 0x1e82, 0x1e0b, 
+	0x1ef2, 0x00ad, 0x00ae, 0x0178, 
+	/* 0xb0*/
+	0x1e1e, 0x1e1f, 0x0120, 0x0121, 
+	0x1e40, 0x1e41, 0x00b6, 0x1e56, 
+	0x1e81, 0x1e57, 0x1e83, 0x1e60, 
+	0x1ef3, 0x1e84, 0x1e85, 0x1e61, 
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x00c3, 
+	0x00c4, 0x00c5, 0x00c6, 0x00c7, 
+	0x00c8, 0x00c9, 0x00ca, 0x00cb, 
+	0x00cc, 0x00cd, 0x00ce, 0x00cf, 
+	/* 0xd0*/
+	0x0174, 0x00d1, 0x00d2, 0x00d3, 
+	0x00d4, 0x00d5, 0x00d6, 0x1e6a, 
+	0x00d8, 0x00d9, 0x00da, 0x00db, 
+	0x00dc, 0x00dd, 0x0176, 0x00df, 
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x00e3, 
+	0x00e4, 0x00e5, 0x00e6, 0x00e7, 
+	0x00e8, 0x00e9, 0x00ea, 0x00eb, 
+	0x00ec, 0x00ed, 0x00ee, 0x00ef, 
+	/* 0xf0*/
+	0x0175, 0x00f1, 0x00f2, 0x00f3, 
+	0x00f4, 0x00f5, 0x00f6, 0x1e6b, 
+	0x00f8, 0x00f9, 0x00fa, 0x00fb, 
+	0x00fc, 0x00fd, 0x0177, 0x00ff, 
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0x00, 0x00, 0x00, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0x00, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0x00, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x00, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0xa1, 0xa2, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0xa6, 0xab, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xb1, /* 0x18-0x1f */
+	0xb2, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0xb4, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xb9, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xbb, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xd7, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0xd0, 0xf0, 0xde, 0xfe, /* 0x70-0x77 */
+	0xaf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+	
+	0xa8, 0xb8, 0xaa, 0xba, 0xbd, 0xbe, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0xac, 0xbc, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page1e[256] = {
+	0x00, 0x00, 0xa1, 0xa2, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0xa6, 0xab, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xb1, /* 0x18-0x1f */
+	0xb2, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0xb4, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xb9, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xbb, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0xd7, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0xd0, 0xf0, 0xde, 0xfe, /* 0x70-0x77 */
+	0xaf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0xa8, 0xb8, 0xaa, 0xba, 0xbd, 0xbe, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0xac, 0xbc, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00,	page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	page1e,	NULL,
+
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa2, 0xa2, 0xa3, 0xab, 0xab, 0xab, 0xa7, /* 0xa0-0xa7 */
+	0xb8, 0xa9, 0xba, 0xab, 0xbc, 0xad, 0xae, 0xff, /* 0xa8-0xaf */
+	0xb1, 0xb1, 0xb3, 0xb3, 0xb5, 0xb5, 0xb6, 0xb9, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbf, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa1, 0xa3, 0xa6, 0xa6, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xa6, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb0, 0xb2, 0xb2, 0xb4, 0xb4, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xa8, 0xb7, 0xaa, 0xbb, 0xac, 0xbd, 0xbd, 0xbb, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xaf, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-14",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_14(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_14(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_14)
+module_exit(exit_nls_iso8859_14)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-15.c b/fs/nls/nls_iso8859-15.c
new file mode 100644
index 0000000..88b924b
--- /dev/null
+++ b/fs/nls/nls_iso8859-15.c
@@ -0,0 +1,308 @@
+/*
+ * linux/fs/nls_iso8859-15.c
+ *
+ * Charset iso8859-15 translation tables.
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x00a1, 0x00a2, 0x00a3,
+	0x20ac, 0x00a5, 0x0160, 0x00a7,
+	0x0161, 0x00a9, 0x00aa, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x00af,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x017d, 0x00b5, 0x00b6, 0x00b7,
+	0x017e, 0x00b9, 0x00ba, 0x00bb,
+	0x0152, 0x0153, 0x0178, 0x00bf,
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x00c3,
+	0x00c4, 0x00c5, 0x00c6, 0x00c7,
+	0x00c8, 0x00c9, 0x00ca, 0x00cb,
+	0x00cc, 0x00cd, 0x00ce, 0x00cf,
+	/* 0xd0*/
+	0x00d0, 0x00d1, 0x00d2, 0x00d3,
+	0x00d4, 0x00d5, 0x00d6, 0x00d7,
+	0x00d8, 0x00d9, 0x00da, 0x00db,
+	0x00dc, 0x00dd, 0x00de, 0x00df,
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x00e3,
+	0x00e4, 0x00e5, 0x00e6, 0x00e7,
+	0x00e8, 0x00e9, 0x00ea, 0x00eb,
+	0x00ec, 0x00ed, 0x00ee, 0x00ef,
+	/* 0xf0*/
+	0x00f0, 0x00f1, 0x00f2, 0x00f3,
+	0x00f4, 0x00f5, 0x00f6, 0x00f7,
+	0x00f8, 0x00f9, 0x00fa, 0x00fb,
+	0x00fc, 0x00fd, 0x00fe, 0x00ff,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0xa5, 0x00, 0xa7, /* 0xa0-0xa7 */
+	0x00, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0x00, 0xb9, 0xba, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0xbc, 0xbd, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xa6, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0xbe, 0x00, 0x00, 0x00, 0x00, 0xb4, 0xb8, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01,	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+
+	page20,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,	NULL,
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa8, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb8, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbd, 0xbd, 0xff, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa6, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0x00, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb4, 0xb9, 0xba, 0xbb, 0xbc, 0xbc, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xbe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-15",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_15(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_15(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_15)
+module_exit(exit_nls_iso8859_15)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-2.c b/fs/nls/nls_iso8859-2.c
new file mode 100644
index 0000000..372528a
--- /dev/null
+++ b/fs/nls/nls_iso8859-2.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/nls_iso8859-2.c
+ *
+ * Charset iso8859-2 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x0104, 0x02d8, 0x0141,
+	0x00a4, 0x013d, 0x015a, 0x00a7,
+	0x00a8, 0x0160, 0x015e, 0x0164,
+	0x0179, 0x00ad, 0x017d, 0x017b,
+	/* 0xb0*/
+	0x00b0, 0x0105, 0x02db, 0x0142,
+	0x00b4, 0x013e, 0x015b, 0x02c7,
+	0x00b8, 0x0161, 0x015f, 0x0165,
+	0x017a, 0x02dd, 0x017e, 0x017c,
+	/* 0xc0*/
+	0x0154, 0x00c1, 0x00c2, 0x0102,
+	0x00c4, 0x0139, 0x0106, 0x00c7,
+	0x010c, 0x00c9, 0x0118, 0x00cb,
+	0x011a, 0x00cd, 0x00ce, 0x010e,
+	/* 0xd0*/
+	0x0110, 0x0143, 0x0147, 0x00d3,
+	0x00d4, 0x0150, 0x00d6, 0x00d7,
+	0x0158, 0x016e, 0x00da, 0x0170,
+	0x00dc, 0x00dd, 0x0162, 0x00df,
+	/* 0xe0*/
+	0x0155, 0x00e1, 0x00e2, 0x0103,
+	0x00e4, 0x013a, 0x0107, 0x00e7,
+	0x010d, 0x00e9, 0x0119, 0x00eb,
+	0x011b, 0x00ed, 0x00ee, 0x010f,
+	/* 0xf0*/
+	0x0111, 0x0144, 0x0148, 0x00f3,
+	0x00f4, 0x0151, 0x00f6, 0x00f7,
+	0x0159, 0x016f, 0x00fa, 0x0171,
+	0x00fc, 0x00fd, 0x0163, 0x02d9,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+	0xb0, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
+	0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
+	0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */
+	0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0xc5, 0xe5, 0x00, 0x00, 0xa5, 0xb5, 0x00, /* 0x38-0x3f */
+	0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */
+	0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */
+	0xd8, 0xf8, 0xa6, 0xb6, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */
+	0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */
+	0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-2",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_2(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_2(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_2)
+module_exit(exit_nls_iso8859_2)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-3.c b/fs/nls/nls_iso8859-3.c
new file mode 100644
index 0000000..81b45a2
--- /dev/null
+++ b/fs/nls/nls_iso8859-3.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/nls_iso8859-3.c
+ *
+ * Charset iso8859-3 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x0126, 0x02d8, 0x00a3,
+	0x00a4, 0x0000, 0x0124, 0x00a7,
+	0x00a8, 0x0130, 0x015e, 0x011e,
+	0x0134, 0x00ad, 0x0000, 0x017b,
+	/* 0xb0*/
+	0x00b0, 0x0127, 0x00b2, 0x00b3,
+	0x00b4, 0x00b5, 0x0125, 0x00b7,
+	0x00b8, 0x0131, 0x015f, 0x011f,
+	0x0135, 0x00bd, 0x0000, 0x017c,
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x0000,
+	0x00c4, 0x010a, 0x0108, 0x00c7,
+	0x00c8, 0x00c9, 0x00ca, 0x00cb,
+	0x00cc, 0x00cd, 0x00ce, 0x00cf,
+	/* 0xd0*/
+	0x0000, 0x00d1, 0x00d2, 0x00d3,
+	0x00d4, 0x0120, 0x00d6, 0x00d7,
+	0x011c, 0x00d9, 0x00da, 0x00db,
+	0x00dc, 0x016c, 0x015c, 0x00df,
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x0000,
+	0x00e4, 0x010b, 0x0109, 0x00e7,
+	0x00e8, 0x00e9, 0x00ea, 0x00eb,
+	0x00ec, 0x00ed, 0x00ee, 0x00ef,
+	/* 0xf0*/
+	0x0000, 0x00f1, 0x00f2, 0x00f3,
+	0x00f4, 0x0121, 0x00f6, 0x00f7,
+	0x011d, 0x00f9, 0x00fa, 0x00fb,
+	0x00fc, 0x016d, 0x015d, 0x02d9,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0xa3, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+	0xb0, 0x00, 0xb2, 0xb3, 0xb4, 0xb5, 0x00, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0x00, 0x00, 0x00, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0x00, 0xd9, 0xda, 0xdb, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0x00, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0xc6, 0xe6, 0xc5, 0xe5, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0xd8, 0xf8, 0xab, 0xbb, /* 0x18-0x1f */
+	0xd5, 0xf5, 0x00, 0x00, 0xa6, 0xb6, 0xa1, 0xb1, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xa9, 0xb9, 0x00, 0x00, 0xac, 0xbc, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0xde, 0xfe, 0xaa, 0xba, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0xdd, 0xfd, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0xaf, 0xbf, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0xa2, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xb1, 0xa2, 0xa3, 0xa4, 0x00, 0xb6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0x69, 0xba, 0xbb, 0xbc, 0xad, 0x00, 0xbf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0x00, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xa1, 0xb2, 0xb3, 0xb4, 0x00, 0xa6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0x49, 0xaa, 0xab, 0xac, 0xbd, 0x00, 0xaf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-3",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_3(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_3(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_3)
+module_exit(exit_nls_iso8859_3)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-4.c b/fs/nls/nls_iso8859-4.c
new file mode 100644
index 0000000..101b87f
--- /dev/null
+++ b/fs/nls/nls_iso8859-4.c
@@ -0,0 +1,309 @@
+/*
+ * linux/fs/nls_iso8859-4.c
+ *
+ * Charset iso8859-4 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x0104, 0x0138, 0x0156,
+	0x00a4, 0x0128, 0x013b, 0x00a7,
+	0x00a8, 0x0160, 0x0112, 0x0122,
+	0x0166, 0x00ad, 0x017d, 0x00af,
+	/* 0xb0*/
+	0x00b0, 0x0105, 0x02db, 0x0157,
+	0x00b4, 0x0129, 0x013c, 0x02c7,
+	0x00b8, 0x0161, 0x0113, 0x0123,
+	0x0167, 0x014a, 0x017e, 0x014b,
+	/* 0xc0*/
+	0x0100, 0x00c1, 0x00c2, 0x00c3,
+	0x00c4, 0x00c5, 0x00c6, 0x012e,
+	0x010c, 0x00c9, 0x0118, 0x00cb,
+	0x0116, 0x00cd, 0x00ce, 0x012a,
+	/* 0xd0*/
+	0x0110, 0x0145, 0x014c, 0x0136,
+	0x00d4, 0x00d5, 0x00d6, 0x00d7,
+	0x00d8, 0x0172, 0x00da, 0x00db,
+	0x00dc, 0x0168, 0x016a, 0x00df,
+	/* 0xe0*/
+	0x0101, 0x00e1, 0x00e2, 0x00e3,
+	0x00e4, 0x00e5, 0x00e6, 0x012f,
+	0x010d, 0x00e9, 0x0119, 0x00eb,
+	0x0117, 0x00ed, 0x00ee, 0x012b,
+	/* 0xf0*/
+	0x0111, 0x0146, 0x014d, 0x0137,
+	0x00f4, 0x00f5, 0x00f6, 0x00f7,
+	0x00f8, 0x0173, 0x00fa, 0x00fb,
+	0x00fc, 0x0169, 0x016b, 0x02d9,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0x00, /* 0xc0-0xc7 */
+	0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0x00, 0xda, 0xdb, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0x00, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0x00, /* 0xe0-0xe7 */
+	0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0x00, 0xfa, 0xfb, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0xc0, 0xe0, 0x00, 0x00, 0xa1, 0xb1, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+	0xd0, 0xf0, 0xaa, 0xba, 0x00, 0x00, 0xcc, 0xec, /* 0x10-0x17 */
+	0xca, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0xab, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0xa5, 0xb5, 0xcf, 0xef, 0x00, 0x00, 0xc7, 0xe7, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0xf3, /* 0x30-0x37 */
+	0xa2, 0x00, 0x00, 0xa6, 0xb6, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xf1, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0xbd, 0xbf, 0xd2, 0xf2, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0xb3, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0xac, 0xbc, /* 0x60-0x67 */
+	0xdd, 0xfd, 0xde, 0xfe, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0xd9, 0xf9, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0xff, 0x00, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbf, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xbd, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-4",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_4(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_4(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_4)
+module_exit(exit_nls_iso8859_4)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-5.c b/fs/nls/nls_iso8859-5.c
new file mode 100644
index 0000000..83b0084
--- /dev/null
+++ b/fs/nls/nls_iso8859-5.c
@@ -0,0 +1,273 @@
+/*
+ * linux/fs/nls_iso8859-5.c
+ *
+ * Charset iso8859-5 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x0401, 0x0402, 0x0403,
+	0x0404, 0x0405, 0x0406, 0x0407,
+	0x0408, 0x0409, 0x040a, 0x040b,
+	0x040c, 0x00ad, 0x040e, 0x040f,
+	/* 0xb0*/
+	0x0410, 0x0411, 0x0412, 0x0413,
+	0x0414, 0x0415, 0x0416, 0x0417,
+	0x0418, 0x0419, 0x041a, 0x041b,
+	0x041c, 0x041d, 0x041e, 0x041f,
+	/* 0xc0*/
+	0x0420, 0x0421, 0x0422, 0x0423,
+	0x0424, 0x0425, 0x0426, 0x0427,
+	0x0428, 0x0429, 0x042a, 0x042b,
+	0x042c, 0x042d, 0x042e, 0x042f,
+	/* 0xd0*/
+	0x0430, 0x0431, 0x0432, 0x0433,
+	0x0434, 0x0435, 0x0436, 0x0437,
+	0x0438, 0x0439, 0x043a, 0x043b,
+	0x043c, 0x043d, 0x043e, 0x043f,
+	/* 0xe0*/
+	0x0440, 0x0441, 0x0442, 0x0443,
+	0x0444, 0x0445, 0x0446, 0x0447,
+	0x0448, 0x0449, 0x044a, 0x044b,
+	0x044c, 0x044d, 0x044e, 0x044f,
+	/* 0xf0*/
+	0x2116, 0x0451, 0x0452, 0x0453,
+	0x0454, 0x0455, 0x0456, 0x0457,
+	0x0458, 0x0459, 0x045a, 0x045b,
+	0x045c, 0x00a7, 0x045e, 0x045f,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x00, 0xae, 0xaf, /* 0x08-0x0f */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x38-0x3f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0xfe, 0xff, /* 0x58-0x5f */
+};
+
+static unsigned char page21[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, /* 0x10-0x17 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   page21, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xa0-0xa7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xad, 0xfe, 0xff, /* 0xa8-0xaf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xb0-0xb7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xd0-0xd7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xf0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xf0-0xf7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xfd, 0xae, 0xaf, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-5",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_5(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_5(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_5)
+module_exit(exit_nls_iso8859_5)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-6.c b/fs/nls/nls_iso8859-6.c
new file mode 100644
index 0000000..0c519d6
--- /dev/null
+++ b/fs/nls/nls_iso8859-6.c
@@ -0,0 +1,264 @@
+/*
+ * linux/fs/nls_iso8859-6.c
+ *
+ * Charset iso8859-6 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0660, 0x0661, 0x0662, 0x0663,
+	0x0664, 0x0665, 0x0666, 0x0667,
+	0x0668, 0x0669, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x0000, 0x0000, 0x0000,
+	0x00a4, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x060c, 0x00ad, 0x0000, 0x0000,
+	/* 0xb0*/
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x061b,
+	0x0000, 0x0000, 0x0000, 0x061f,
+	/* 0xc0*/
+	0x0000, 0x0621, 0x0622, 0x0623,
+	0x0624, 0x0625, 0x0626, 0x0627,
+	0x0628, 0x0629, 0x062a, 0x062b,
+	0x062c, 0x062d, 0x062e, 0x062f,
+	/* 0xd0*/
+	0x0630, 0x0631, 0x0632, 0x0633,
+	0x0634, 0x0635, 0x0636, 0x0637,
+	0x0638, 0x0639, 0x063a, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	/* 0xe0*/
+	0x0640, 0x0641, 0x0642, 0x0643,
+	0x0644, 0x0645, 0x0646, 0x0647,
+	0x0648, 0x0649, 0x064a, 0x064b,
+	0x064c, 0x064d, 0x064e, 0x064f,
+	/* 0xf0*/
+	0x0650, 0x0651, 0x0652, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0000,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+};
+
+static unsigned char page06[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0x18-0x1f */
+	0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
+	0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x60-0x67 */
+	0x38, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   NULL,   NULL,   page06, NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0xb8-0xbf */
+	0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0xb8-0xbf */
+	0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-6",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_6(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_6(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_6)
+module_exit(exit_nls_iso8859_6)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-7.c b/fs/nls/nls_iso8859-7.c
new file mode 100644
index 0000000..bd08546
--- /dev/null
+++ b/fs/nls/nls_iso8859-7.c
@@ -0,0 +1,318 @@
+/*
+ * linux/fs/nls_iso8859-7.c
+ *
+ * Charset iso8859-7 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x02bd, 0x02bc, 0x00a3,
+	0x0000, 0x0000, 0x00a6, 0x00a7,
+	0x00a8, 0x00a9, 0x0000, 0x00ab,
+	0x00ac, 0x00ad, 0x0000, 0x2015,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x0384, 0x0385, 0x0386, 0x00b7,
+	0x0388, 0x0389, 0x038a, 0x00bb,
+	0x038c, 0x00bd, 0x038e, 0x038f,
+	/* 0xc0*/
+	0x0390, 0x0391, 0x0392, 0x0393,
+	0x0394, 0x0395, 0x0396, 0x0397,
+	0x0398, 0x0399, 0x039a, 0x039b,
+	0x039c, 0x039d, 0x039e, 0x039f,
+	/* 0xd0*/
+	0x03a0, 0x03a1, 0x0000, 0x03a3,
+	0x03a4, 0x03a5, 0x03a6, 0x03a7,
+	0x03a8, 0x03a9, 0x03aa, 0x03ab,
+	0x03ac, 0x03ad, 0x03ae, 0x03af,
+	/* 0xe0*/
+	0x03b0, 0x03b1, 0x03b2, 0x03b3,
+	0x03b4, 0x03b5, 0x03b6, 0x03b7,
+	0x03b8, 0x03b9, 0x03ba, 0x03bb,
+	0x03bc, 0x03bd, 0x03be, 0x03bf,
+	/* 0xf0*/
+	0x03c0, 0x03c1, 0x03c2, 0x03c3,
+	0x03c4, 0x03c5, 0x03c6, 0x03c7,
+	0x03c8, 0x03c9, 0x03ca, 0x03cb,
+	0x03cc, 0x03cd, 0x03ce, 0x0000,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0x00, 0x00, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0x00, 0x00, 0xb7, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0xbb, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */
+};
+
+static unsigned char page02[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0xa2, 0xa1, 0x00, 0x00, /* 0xb8-0xbf */
+};
+
+static unsigned char page03[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0x00, /* 0x80-0x87 */
+	0xb8, 0xb9, 0xba, 0x00, 0xbc, 0x00, 0xbe, 0xbf, /* 0x88-0x8f */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x90-0x97 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x98-0x9f */
+	0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xa0-0xa7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xa8-0xaf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xb0-0xb7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xb8-0xbf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xc0-0xc7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xc8-0xcf */
+};
+
+static unsigned char page20[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, /* 0x10-0x17 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   page02, page03, NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	page20, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xdc, 0xb7, /* 0xb0-0xb7 */
+	0xdd, 0xde, 0xdf, 0xbb, 0xfc, 0xbd, 0xfd, 0xfe, /* 0xb8-0xbf */
+	0xc0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xb6, 0xb8, 0xb9, 0xba, /* 0xd8-0xdf */
+	0xe0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd3, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xbc, 0xbe, 0xbf, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-7",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_7(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_7(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_7)
+module_exit(exit_nls_iso8859_7)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-9.c b/fs/nls/nls_iso8859-9.c
new file mode 100644
index 0000000..988eff7
--- /dev/null
+++ b/fs/nls/nls_iso8859-9.c
@@ -0,0 +1,273 @@
+/*
+ * linux/fs/nls_iso8859-9.c
+ *
+ * Charset iso8859-9 translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x0080, 0x0081, 0x0082, 0x0083,
+	0x0084, 0x0085, 0x0086, 0x0087,
+	0x0088, 0x0089, 0x008a, 0x008b,
+	0x008c, 0x008d, 0x008e, 0x008f,
+	/* 0x90*/
+	0x0090, 0x0091, 0x0092, 0x0093,
+	0x0094, 0x0095, 0x0096, 0x0097,
+	0x0098, 0x0099, 0x009a, 0x009b,
+	0x009c, 0x009d, 0x009e, 0x009f,
+	/* 0xa0*/
+	0x00a0, 0x00a1, 0x00a2, 0x00a3,
+	0x00a4, 0x00a5, 0x00a6, 0x00a7,
+	0x00a8, 0x00a9, 0x00aa, 0x00ab,
+	0x00ac, 0x00ad, 0x00ae, 0x00af,
+	/* 0xb0*/
+	0x00b0, 0x00b1, 0x00b2, 0x00b3,
+	0x00b4, 0x00b5, 0x00b6, 0x00b7,
+	0x00b8, 0x00b9, 0x00ba, 0x00bb,
+	0x00bc, 0x00bd, 0x00be, 0x00bf,
+	/* 0xc0*/
+	0x00c0, 0x00c1, 0x00c2, 0x00c3,
+	0x00c4, 0x00c5, 0x00c6, 0x00c7,
+	0x00c8, 0x00c9, 0x00ca, 0x00cb,
+	0x00cc, 0x00cd, 0x00ce, 0x00cf,
+	/* 0xd0*/
+	0x011e, 0x00d1, 0x00d2, 0x00d3,
+	0x00d4, 0x00d5, 0x00d6, 0x00d7,
+	0x00d8, 0x00d9, 0x00da, 0x00db,
+	0x00dc, 0x0130, 0x015e, 0x00df,
+	/* 0xe0*/
+	0x00e0, 0x00e1, 0x00e2, 0x00e3,
+	0x00e4, 0x00e5, 0x00e6, 0x00e7,
+	0x00e8, 0x00e9, 0x00ea, 0x00eb,
+	0x00ec, 0x00ed, 0x00ee, 0x00ef,
+	/* 0xf0*/
+	0x011f, 0x00f1, 0x00f2, 0x00f3,
+	0x00f4, 0x00f5, 0x00f6, 0x00f7,
+	0x00f8, 0x00f9, 0x00fa, 0x00fb,
+	0x00fc, 0x0131, 0x015f, 0x00ff,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0x00, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char page01[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0xf0, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0xdd, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xfe, /* 0x58-0x5f */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x69, 0xfe, 0xdf, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0x00, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x49, 0xde, 0x00, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "iso8859-9",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_iso8859_9(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_iso8859_9(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_iso8859_9)
+module_exit(exit_nls_iso8859_9)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-r.c b/fs/nls/nls_koi8-r.c
new file mode 100644
index 0000000..0ad22c2
--- /dev/null
+++ b/fs/nls/nls_koi8-r.c
@@ -0,0 +1,324 @@
+/*
+ * linux/fs/nls_koi8-r.c
+ *
+ * Charset koi8-r translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x2500, 0x2502, 0x250c, 0x2510,
+	0x2514, 0x2518, 0x251c, 0x2524,
+	0x252c, 0x2534, 0x253c, 0x2580,
+	0x2584, 0x2588, 0x258c, 0x2590,
+	/* 0x90*/
+	0x2591, 0x2592, 0x2593, 0x2320,
+	0x25a0, 0x2219, 0x221a, 0x2248,
+	0x2264, 0x2265, 0x00a0, 0x2321,
+	0x00b0, 0x00b2, 0x00b7, 0x00f7,
+	/* 0xa0*/
+	0x2550, 0x2551, 0x2552, 0x0451,
+	0x2553, 0x2554, 0x2555, 0x2556,
+	0x2557, 0x2558, 0x2559, 0x255a,
+	0x255b, 0x255c, 0x255d, 0x255e,
+	/* 0xb0*/
+	0x255f, 0x2560, 0x2561, 0x0401,
+	0x2562, 0x2563, 0x2564, 0x2565,
+	0x2566, 0x2567, 0x2568, 0x2569,
+	0x256a, 0x256b, 0x256c, 0x00a9,
+	/* 0xc0*/
+	0x044e, 0x0430, 0x0431, 0x0446,
+	0x0434, 0x0435, 0x0444, 0x0433,
+	0x0445, 0x0438, 0x0439, 0x043a,
+	0x043b, 0x043c, 0x043d, 0x043e,
+	/* 0xd0*/
+	0x043f, 0x044f, 0x0440, 0x0441,
+	0x0442, 0x0443, 0x0436, 0x0432,
+	0x044c, 0x044b, 0x0437, 0x0448,
+	0x044d, 0x0449, 0x0447, 0x044a,
+	/* 0xe0*/
+	0x042e, 0x0410, 0x0411, 0x0426,
+	0x0414, 0x0415, 0x0424, 0x0413,
+	0x0425, 0x0418, 0x0419, 0x041a,
+	0x041b, 0x041c, 0x041d, 0x041e,
+	/* 0xf0*/
+	0x041f, 0x042f, 0x0420, 0x0421,
+	0x0422, 0x0423, 0x0416, 0x0412,
+	0x042c, 0x042b, 0x0417, 0x0428,
+	0x042d, 0x0429, 0x0427, 0x042a,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x9c, 0x00, 0x9d, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, /* 0xf0-0xf7 */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, /* 0x10-0x17 */
+	0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, /* 0x18-0x1f */
+	0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, /* 0x20-0x27 */
+	0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, /* 0x28-0x2f */
+	0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, /* 0x30-0x37 */
+	0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, /* 0x38-0x3f */
+	0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, /* 0x40-0x47 */
+	0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, /* 0x48-0x4f */
+	0x00, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x95, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x99, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x93, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0x80, 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x83, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x85, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xa0, 0xa1, 0xa2, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, /* 0x50-0x57 */
+	0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, /* 0x58-0x5f */
+	0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, /* 0x60-0x67 */
+	0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x8b, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x8d, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x8f, 0x90, 0x91, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "koi8-r",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_koi8_r(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_koi8_r(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_koi8_r)
+module_exit(exit_nls_koi8_r)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
new file mode 100644
index 0000000..5db83ef
--- /dev/null
+++ b/fs/nls/nls_koi8-ru.c
@@ -0,0 +1,83 @@
+/*
+ * linux/fs/nls_koi8-ru.c
+ *
+ * Charset koi8-ru translation based on charset koi8-u.
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static struct nls_table *p_nls;
+
+static int uni2char(const wchar_t uni,
+		    unsigned char *out, int boundlen)
+{
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	if ((uni & 0xffaf) == 0x040e || (uni & 0xffce) == 0x254c) {
+		/* koi8-ru and koi8-u differ only on two characters */
+		if (uni == 0x040e)
+			out[0] = 0xbe;
+		else if (uni == 0x045e)
+			out[0] = 0xae;
+		else if (uni == 0x255d || uni == 0x256c)
+			return 0;
+		else
+			return p_nls->uni2char(uni, out, boundlen);
+		return 1;
+	}
+	else
+		/* fast path */
+		return p_nls->uni2char(uni, out, boundlen);
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen,
+		    wchar_t *uni)
+{
+	int n;
+
+	if ((*rawstring & 0xef) != 0xae) {
+		/* koi8-ru and koi8-u differ only on two characters */
+		*uni = (*rawstring & 0x10) ? 0x040e : 0x045e;
+		return 1;
+	}
+
+	n = p_nls->char2uni(rawstring, boundlen, uni);
+	return n;
+}
+
+static struct nls_table table = {
+	.charset	= "koi8-ru",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_koi8_ru(void)
+{
+	p_nls = load_nls("koi8-u");
+
+	if (p_nls) {
+		table.charset2upper = p_nls->charset2upper;
+		table.charset2lower = p_nls->charset2lower;
+		return register_nls(&table);
+	}
+
+	return -EINVAL;
+}
+
+static void __exit exit_nls_koi8_ru(void)
+{
+	unregister_nls(&table);
+	unload_nls(p_nls);
+}
+
+module_init(init_nls_koi8_ru)
+module_exit(exit_nls_koi8_ru)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-u.c b/fs/nls/nls_koi8-u.c
new file mode 100644
index 0000000..9d30fd6
--- /dev/null
+++ b/fs/nls/nls_koi8-u.c
@@ -0,0 +1,331 @@
+/*
+ * linux/fs/nls_koi8-u.c
+ *
+ * Charset koi8-u translation tables.
+ * The Unicode to charset table has only exact mappings.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static wchar_t charset2uni[256] = {
+	/* 0x00*/
+	0x0000, 0x0001, 0x0002, 0x0003,
+	0x0004, 0x0005, 0x0006, 0x0007,
+	0x0008, 0x0009, 0x000a, 0x000b,
+	0x000c, 0x000d, 0x000e, 0x000f,
+	/* 0x10*/
+	0x0010, 0x0011, 0x0012, 0x0013,
+	0x0014, 0x0015, 0x0016, 0x0017,
+	0x0018, 0x0019, 0x001a, 0x001b,
+	0x001c, 0x001d, 0x001e, 0x001f,
+	/* 0x20*/
+	0x0020, 0x0021, 0x0022, 0x0023,
+	0x0024, 0x0025, 0x0026, 0x0027,
+	0x0028, 0x0029, 0x002a, 0x002b,
+	0x002c, 0x002d, 0x002e, 0x002f,
+	/* 0x30*/
+	0x0030, 0x0031, 0x0032, 0x0033,
+	0x0034, 0x0035, 0x0036, 0x0037,
+	0x0038, 0x0039, 0x003a, 0x003b,
+	0x003c, 0x003d, 0x003e, 0x003f,
+	/* 0x40*/
+	0x0040, 0x0041, 0x0042, 0x0043,
+	0x0044, 0x0045, 0x0046, 0x0047,
+	0x0048, 0x0049, 0x004a, 0x004b,
+	0x004c, 0x004d, 0x004e, 0x004f,
+	/* 0x50*/
+	0x0050, 0x0051, 0x0052, 0x0053,
+	0x0054, 0x0055, 0x0056, 0x0057,
+	0x0058, 0x0059, 0x005a, 0x005b,
+	0x005c, 0x005d, 0x005e, 0x005f,
+	/* 0x60*/
+	0x0060, 0x0061, 0x0062, 0x0063,
+	0x0064, 0x0065, 0x0066, 0x0067,
+	0x0068, 0x0069, 0x006a, 0x006b,
+	0x006c, 0x006d, 0x006e, 0x006f,
+	/* 0x70*/
+	0x0070, 0x0071, 0x0072, 0x0073,
+	0x0074, 0x0075, 0x0076, 0x0077,
+	0x0078, 0x0079, 0x007a, 0x007b,
+	0x007c, 0x007d, 0x007e, 0x007f,
+	/* 0x80*/
+	0x2500, 0x2502, 0x250c, 0x2510,
+	0x2514, 0x2518, 0x251c, 0x2524,
+	0x252c, 0x2534, 0x253c, 0x2580,
+	0x2584, 0x2588, 0x258c, 0x2590,
+	/* 0x90*/
+	0x2591, 0x2592, 0x2593, 0x2320,
+	0x25a0, 0x2219, 0x221a, 0x2248,
+	0x2264, 0x2265, 0x00a0, 0x2321,
+	0x00b0, 0x00b2, 0x00b7, 0x00f7,
+	/* 0xa0*/
+	0x2550, 0x2551, 0x2552, 0x0451,
+	0x0454, 0x2554, 0x0456, 0x0457,
+	0x2557, 0x2558, 0x2559, 0x255a,
+	0x255b, 0x0491, 0x255d, 0x255e,
+	/* 0xb0*/
+	0x255f, 0x2560, 0x2561, 0x0401,
+	0x0404, 0x2563, 0x0406, 0x0407,
+	0x2566, 0x2567, 0x2568, 0x2569,
+	0x256a, 0x0490, 0x256c, 0x00a9,
+	/* 0xc0*/
+	0x044e, 0x0430, 0x0431, 0x0446,
+	0x0434, 0x0435, 0x0444, 0x0433,
+	0x0445, 0x0438, 0x0439, 0x043a,
+	0x043b, 0x043c, 0x043d, 0x043e,
+	/* 0xd0*/
+	0x043f, 0x044f, 0x0440, 0x0441,
+	0x0442, 0x0443, 0x0436, 0x0432,
+	0x044c, 0x044b, 0x0437, 0x0448,
+	0x044d, 0x0449, 0x0447, 0x044a,
+	/* 0xe0*/
+	0x042e, 0x0410, 0x0411, 0x0426,
+	0x0414, 0x0415, 0x0424, 0x0413,
+	0x0425, 0x0418, 0x0419, 0x041a,
+	0x041b, 0x041c, 0x041d, 0x041e,
+	/* 0xf0*/
+	0x041f, 0x042f, 0x0420, 0x0421,
+	0x0422, 0x0423, 0x0416, 0x0412,
+	0x042c, 0x042b, 0x0417, 0x0428,
+	0x042d, 0x0429, 0x0427, 0x042a,
+};
+
+static unsigned char page00[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+	0x00, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+	0x9c, 0x00, 0x9d, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xb0-0xb7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, /* 0xf0-0xf7 */
+};
+
+static unsigned char page04[256] = {
+	0x00, 0xb3, 0x00, 0x00, 0xb4, 0x00, 0xb6, 0xb7, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, /* 0x10-0x17 */
+	0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, /* 0x18-0x1f */
+	0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, /* 0x20-0x27 */
+	0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, /* 0x28-0x2f */
+	0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, /* 0x30-0x37 */
+	0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, /* 0x38-0x3f */
+	0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, /* 0x40-0x47 */
+	0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, /* 0x48-0x4f */
+	0x00, 0xa3, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0xbd, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+};
+
+static unsigned char page22[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x95, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+	0x00, 0x00, 0x00, 0x00, 0x98, 0x99, 0x00, 0x00, /* 0x60-0x67 */
+};
+
+static unsigned char page23[256] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x93, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+};
+
+static unsigned char page25[256] = {
+	0x80, 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+	0x83, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+	0x85, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+	0x00, 0x00, 0x00, 0x00, 0x87, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+	0x00, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+	0xa0, 0xa1, 0xa2, 0x00, 0xa5, 0x00, 0x00, 0xa8, /* 0x50-0x57 */
+	0xa9, 0xaa, 0xab, 0xac, 0x00, 0xae, 0xaf, 0xb0, /* 0x58-0x5f */
+	0xb1, 0xb2, 0x00, 0xb5, 0x00, 0x00, 0xb8, 0xb9, /* 0x60-0x67 */
+	0xba, 0xbb, 0xbc, 0x00, 0xbe, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+
+	0x8b, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+	0x8d, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+	0x8f, 0x90, 0x91, 0x92, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+	0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+};
+
+static unsigned char *page_uni2charset[256] = {
+	page00, NULL,   NULL,   NULL,   page04, NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
+	NULL,   NULL,   page22, page23, NULL,   page25, NULL,   NULL,   
+};
+
+static unsigned char charset2lower[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
+	0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xa3, 0xa4, 0xb5, 0xa6, 0xa7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */
+};
+
+static unsigned char charset2upper[256] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+	0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
+	0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
+	0xa0, 0xa1, 0xa2, 0xb3, 0xb4, 0xa5, 0xb6, 0xb7, /* 0xa0-0xa7 */
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf, /* 0xa8-0xaf */
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	unsigned char *uni2charset;
+	unsigned char cl = uni & 0x00ff;
+	unsigned char ch = (uni & 0xff00) >> 8;
+
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	uni2charset = page_uni2charset[ch];
+	if (uni2charset && uni2charset[cl])
+		out[0] = uni2charset[cl];
+	else
+		return -EINVAL;
+	return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	*uni = charset2uni[*rawstring];
+	if (*uni == 0x0000)
+		return -EINVAL;
+	return 1;
+}
+
+static struct nls_table table = {
+	.charset	= "koi8-u",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= charset2lower,
+	.charset2upper	= charset2upper,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_koi8_u(void)
+{
+	return register_nls(&table);
+}
+
+static void __exit exit_nls_koi8_u(void)
+{
+	unregister_nls(&table);
+}
+
+module_init(init_nls_koi8_u)
+module_exit(exit_nls_koi8_u)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_utf8.c b/fs/nls/nls_utf8.c
new file mode 100644
index 0000000..aa2c42f
--- /dev/null
+++ b/fs/nls/nls_utf8.c
@@ -0,0 +1,61 @@
+/*
+ * Module for handling utf8 just like any other charset.
+ * By Urban Widmark 2000
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static unsigned char identity[256];
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	int n;
+
+	if ( (n = utf8_wctomb(out, uni, boundlen)) == -1) {
+		*out = '?';
+		return -EINVAL;
+	}
+	return n;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	int n;
+
+	if ( (n = utf8_mbtowc(uni, rawstring, boundlen)) == -1) {
+		*uni = 0x003f;	/* ? */
+		n = -EINVAL;
+	}
+	return n;
+}
+
+static struct nls_table table = {
+	.charset	= "utf8",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+	.charset2lower	= identity,	/* no conversion */
+	.charset2upper	= identity,
+	.owner		= THIS_MODULE,
+};
+
+static int __init init_nls_utf8(void)
+{
+	int i;
+	for (i=0; i<256; i++)
+		identity[i] = i;
+
+        return register_nls(&table);
+}
+
+static void __exit exit_nls_utf8(void)
+{
+        unregister_nls(&table);
+}
+
+module_init(init_nls_utf8)
+module_exit(exit_nls_utf8)
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
new file mode 100644
index 0000000..1d2ad15
--- /dev/null
+++ b/fs/ntfs/ChangeLog
@@ -0,0 +1,1350 @@
+ToDo/Notes:
+	- Find and fix bugs.
+	- Checkpoint or disable the user space journal ($UsnJrnl).
+	- In between ntfs_prepare/commit_write, need exclusion between
+	  simultaneous file extensions. Need perhaps an NInoResizeUnderway()
+	  flag which we can set in ntfs_prepare_write() and clear again in
+	  ntfs_commit_write(). Just have to be careful in readpage/writepage,
+	  as well as in truncate, that we play nice... We might need to have
+	  a data_size field in the ntfs_inode to store the real attribute
+	  length. Also need to be careful with initialized_size extention in
+	  ntfs_prepare_write. Basically, just be _very_ careful in this code...
+	  OTOH, perhaps i_sem, which is held accross generic_file_write is
+	  sufficient for synchronisation here. We then just need to make sure
+	  ntfs_readpage/writepage/truncate interoperate properly with us.
+	  UPDATE: The above is all ok as it is due to i_sem held.  The only
+	  thing that needs to be checked is ntfs_writepage() which does not
+	  hold i_sem.  It cannot change i_size but it needs to cope with a
+	  concurrent i_size change.
+	- Implement mft.c::sync_mft_mirror_umount().  We currently will just
+	  leave the volume dirty on umount if the final iput(vol->mft_ino)
+	  causes a write of any mirrored mft records due to the mft mirror
+	  inode having been discarded already.  Whether this can actually ever
+	  happen is unclear however so it is worth waiting until someone hits
+	  the problem.
+	- Enable the code for setting the NT4 compatibility flag when we start
+	  making NTFS 1.2 specific modifications.
+
+2.1.23-WIP
+
+	- Add printk rate limiting for ntfs_warning() and ntfs_error() when
+	  compiled without debug.  This avoids a possible denial of service
+	  attack.  Thanks to Carl-Daniel Hailfinger from SuSE for pointing this
+	  out.
+
+2.1.22 - Many bug and race fixes and error handling improvements.
+
+	- Improve error handling in fs/ntfs/inode.c::ntfs_truncate().
+	- Change fs/ntfs/inode.c::ntfs_truncate() to return an error code
+	  instead of void and provide a helper ntfs_truncate_vfs() for the
+	  vfs ->truncate method.
+	- Add a new ntfs inode flag NInoTruncateFailed() and modify
+	  fs/ntfs/inode.c::ntfs_truncate() to set and clear it appropriately.
+	- Fix min_size and max_size definitions in ATTR_DEF structure in
+	  fs/ntfs/layout.h to be signed.
+	- Add attribute definition handling helpers to fs/ntfs/attrib.[hc]:
+	  ntfs_attr_size_bounds_check(), ntfs_attr_can_be_non_resident(), and
+	  ntfs_attr_can_be_resident(), which in turn use the new private helper
+	  ntfs_attr_find_in_attrdef().
+	- In fs/ntfs/aops.c::mark_ntfs_record_dirty(), take the
+	  mapping->private_lock around the dirtying of the buffer heads
+	  analagous to the way it is done in __set_page_dirty_buffers().
+	- Ensure the mft record size does not exceed the PAGE_CACHE_SIZE at
+	  mount time as this cannot work with the current implementation.
+	- Check for location of attribute name and improve error handling in
+	  general in fs/ntfs/inode.c::ntfs_read_locked_inode() and friends.
+	- In fs/ntfs/aops.c::ntfs_writepage(), if the page is fully outside
+	  i_size, i.e. race with truncate, invalidate the buffers on the page
+	  so that they become freeable and hence the page does not leak.
+	- Remove unused function fs/ntfs/runlist.c::ntfs_rl_merge().  (Adrian
+	  Bunk)
+	- Fix stupid bug in fs/ntfs/attrib.c::ntfs_attr_find() that resulted in
+	  a NULL pointer dereference in the error code path when a corrupt
+	  attribute was found.  (Thanks to Domen Puncer for the bug report.)
+	- Add MODULE_VERSION() to fs/ntfs/super.c.
+	- Make several functions and variables static.  (Adrian Bunk)
+	- Modify fs/ntfs/aops.c::mark_ntfs_record_dirty() so it allocates
+	  buffers for the page if they are not present and then marks the
+	  buffers belonging to the ntfs record dirty.  This causes the buffers
+	  to become busy and hence they are safe from removal until the page
+	  has been written out.
+	- Fix stupid bug in fs/ntfs/attrib.c::ntfs_external_attr_find() in the
+	  error handling code path that resulted in a BUG() due to trying to
+	  unmap an extent mft record when the mapping of it had failed and it
+	  thus was not mapped.  (Thanks to Ken MacFerrin for the bug report.)
+	- Drop the runlist lock after the vcn has been read in
+	  fs/ntfs/lcnalloc.c::__ntfs_cluster_free().
+	- Rewrite handling of multi sector transfer errors.  We now do not set
+	  PageError() when such errors are detected in the async i/o handler
+	  fs/ntfs/aops.c::ntfs_end_buffer_async_read().  All users of mst
+	  protected attributes now check the magic of each ntfs record as they
+	  use it and act appropriately.  This has the effect of making errors
+	  granular per ntfs record rather than per page which solves the case
+	  where we cannot access any of the ntfs records in a page when a
+	  single one of them had an mst error.  (Thanks to Ken MacFerrin for
+	  the bug report.)
+	- Fix error handling in fs/ntfs/quota.c::ntfs_mark_quotas_out_of_date()
+	  where we failed to release i_sem on the $Quota/$Q attribute inode.
+	- Fix bug in handling of bad inodes in fs/ntfs/namei.c::ntfs_lookup().
+	- Add mapping of unmapped buffers to all remaining code paths, i.e.
+	  fs/ntfs/aops.c::ntfs_write_mst_block(), mft.c::ntfs_sync_mft_mirror(),
+	  and write_mft_record_nolock().  From now on we require that the
+	  complete runlist for the mft mirror is always mapped into memory.
+	- Add creation of buffers to fs/ntfs/mft.c::ntfs_sync_mft_mirror().
+	- Improve error handling in fs/ntfs/aops.c::ntfs_{read,write}_block().
+	- Cleanup fs/ntfs/aops.c::ntfs_{read,write}page() since we know that a
+	  resident attribute will be smaller than a page which makes the code
+	  simpler.  Also make the code more tolerant to concurrent ->truncate.
+
+2.1.21 - Fix some races and bugs, rewrite mft write code, add mft allocator.
+
+	- Implement extent mft record deallocation
+	  fs/ntfs/mft.c::ntfs_extent_mft_record_free().
+	- Splitt runlist related functions off from attrib.[hc] to runlist.[hc].
+	- Add vol->mft_data_pos and initialize it at mount time.
+	- Rename init_runlist() to ntfs_init_runlist(), ntfs_vcn_to_lcn() to
+	  ntfs_rl_vcn_to_lcn(), decompress_mapping_pairs() to
+	  ntfs_mapping_pairs_decompress(), ntfs_merge_runlists() to
+	  ntfs_runlists_merge() and adapt all callers.
+	- Add fs/ntfs/runlist.[hc]::ntfs_get_nr_significant_bytes(),
+	  ntfs_get_size_for_mapping_pairs(), ntfs_write_significant_bytes(),
+	  and ntfs_mapping_pairs_build(), adapted from libntfs.
+	- Make fs/ntfs/lcnalloc.c::ntfs_cluster_free_from_rl_nolock() not
+	  static and add a declaration for it to lcnalloc.h.
+	- Add fs/ntfs/lcnalloc.h::ntfs_cluster_free_from_rl() which is a static
+	  inline wrapper for ntfs_cluster_free_from_rl_nolock() which takes the
+	  cluster bitmap lock for the duration of the call.
+	- Add fs/ntfs/attrib.[hc]::ntfs_attr_record_resize().
+	- Implement the equivalent of memset() for an ntfs attribute in
+	  fs/ntfs/attrib.[hc]::ntfs_attr_set() and switch
+	  fs/ntfs/logfile.c::ntfs_empty_logfile() to using it.
+	- Remove unnecessary casts from LCN_* constants.
+	- Implement fs/ntfs/runlist.c::ntfs_rl_truncate_nolock().
+	- Add MFT_RECORD_OLD as a copy of MFT_RECORD in fs/ntfs/layout.h and
+	  change MFT_RECORD to contain the NTFS 3.1+ specific fields.
+	- Add a helper function fs/ntfs/aops.c::mark_ntfs_record_dirty() which
+	  marks all buffers belonging to an ntfs record dirty, followed by
+	  marking the page the ntfs record is in dirty and also marking the vfs
+	  inode containing the ntfs record dirty (I_DIRTY_PAGES).
+	- Switch fs/ntfs/index.h::ntfs_index_entry_mark_dirty() to using the
+	  new helper fs/ntfs/aops.c::mark_ntfs_record_dirty() and remove the no
+	  longer needed fs/ntfs/index.[hc]::__ntfs_index_entry_mark_dirty().
+	- Move ntfs_{un,}map_page() from ntfs.h to aops.h and fix resulting
+	  include errors.
+	- Move the typedefs for runlist_element and runlist from types.h to
+	  runlist.h and fix resulting include errors.
+	- Remove unused {__,}format_mft_record() from fs/ntfs/mft.c.
+	- Modify fs/ntfs/mft.c::__mark_mft_record_dirty() to use the helper
+	  mark_ntfs_record_dirty() which also changes the behaviour in that we
+	  now set the buffers belonging to the mft record dirty as well as the
+	  page itself.
+	- Update fs/ntfs/mft.c::write_mft_record_nolock() and sync_mft_mirror()
+	  to cope with the fact that there now are dirty buffers in mft pages.
+	- Update fs/ntfs/inode.c::ntfs_write_inode() to also use the helper
+	  mark_ntfs_record_dirty() and thus to set the buffers belonging to the
+	  mft record dirty as well as the page itself.
+	- Fix compiler warnings on x86-64 in fs/ntfs/dir.c.  (Randy Dunlap,
+	  slightly modified by me)
+	- Add fs/ntfs/mft.c::try_map_mft_record() which fails with -EALREADY if
+	  the mft record is already locked and otherwise behaves the same way
+	  as fs/ntfs/mft.c::map_mft_record().
+	- Modify fs/ntfs/mft.c::write_mft_record_nolock() so that it only
+	  writes the mft record if the buffers belonging to it are dirty.
+	  Otherwise we assume that it was written out by other means already.
+	- Attempting to write outside initialized size is _not_ a bug so remove
+	  the bug check from fs/ntfs/aops.c::ntfs_write_mst_block().  It is in
+	  fact required to write outside initialized size when preparing to
+	  extend the initialized size.
+	- Map the page instead of using page_address() before writing to it in
+	  fs/ntfs/aops.c::ntfs_mft_writepage().
+	- Provide exclusion between opening an inode / mapping an mft record
+	  and accessing the mft record in fs/ntfs/mft.c::ntfs_mft_writepage()
+	  by setting the page not uptodate throughout ntfs_mft_writepage().
+	- Clear the page uptodate flag in fs/ntfs/aops.c::ntfs_write_mst_block()
+	  to ensure noone can see the page whilst the mst fixups are applied.
+	- Add the helper fs/ntfs/mft.c::ntfs_may_write_mft_record() which
+	  checks if an mft record may be written out safely obtaining any
+	  necessary locks in the process.  This is used by
+	  fs/ntfs/aops.c::ntfs_write_mst_block().
+	- Modify fs/ntfs/aops.c::ntfs_write_mst_block() to also work for
+	  writing mft records and improve its error handling in the process.
+	  Now if any of the records in the page fail to be written out, all
+	  other records will be written out instead of aborting completely.
+	- Remove ntfs_mft_aops and update all users to use ntfs_mst_aops.
+	- Modify fs/ntfs/inode.c::ntfs_read_locked_inode() to set the
+	  ntfs_mst_aops for all inodes which are NInoMstProtected() and
+	  ntfs_aops for all other inodes.
+	- Rename fs/ntfs/mft.c::sync_mft_mirror{,_umount}() to
+	  ntfs_sync_mft_mirror{,_umount}() and change their parameters so they
+	  no longer require an ntfs inode to be present.  Update all callers.
+	- Cleanup the error handling in fs/ntfs/mft.c::ntfs_sync_mft_mirror().
+	- Clear the page uptodate flag in fs/ntfs/mft.c::ntfs_sync_mft_mirror()
+	  to ensure noone can see the page whilst the mst fixups are applied.
+	- Remove the no longer needed fs/ntfs/mft.c::ntfs_mft_writepage() and
+	  fs/ntfs/mft.c::try_map_mft_record().
+	- Fix callers of fs/ntfs/aops.c::mark_ntfs_record_dirty() to call it
+	  with the ntfs inode which contains the page rather than the ntfs
+	  inode the mft record of which is in the page.
+	- Fix race condition in fs/ntfs/inode.c::ntfs_put_inode() by moving the
+	  index inode bitmap inode release code from there to
+	  fs/ntfs/inode.c::ntfs_clear_big_inode().  (Thanks to Christoph
+	  Hellwig for spotting this.)
+	- Fix race condition in fs/ntfs/inode.c::ntfs_put_inode() by taking the
+	  inode semaphore around the code that sets ni->itype.index.bmp_ino to
+	  NULL and reorganize the code to optimize it a bit.  (Thanks to
+	  Christoph Hellwig for spotting this.)
+	- Modify fs/ntfs/aops.c::mark_ntfs_record_dirty() to no longer take the
+	  ntfs inode as a parameter as this is confusing and misleading and the
+	  needed ntfs inode is available via NTFS_I(page->mapping->host).
+	  Adapt all callers to this change.
+	- Modify fs/ntfs/mft.c::write_mft_record_nolock() and
+	  fs/ntfs/aops.c::ntfs_write_mst_block() to only check the dirty state
+	  of the first buffer in a record and to take this as the ntfs record
+	  dirty state.  We cannot look at the dirty state for subsequent
+	  buffers because we might be racing with
+	  fs/ntfs/aops.c::mark_ntfs_record_dirty().
+	- Move the static inline ntfs_init_big_inode() from fs/ntfs/inode.c to
+	  inode.h and make fs/ntfs/inode.c::__ntfs_init_inode() non-static and
+	  add a declaration for it to inode.h.  Fix some compilation issues
+	  that resulted due to #includes and header file interdependencies.
+	- Simplify setup of i_mode in fs/ntfs/inode.c::ntfs_read_locked_inode().
+	- Add helpers fs/ntfs/layout.h::MK_MREF() and MK_LE_MREF().
+	- Modify fs/ntfs/mft.c::map_extent_mft_record() to only verify the mft
+	  record sequence number if it is specified (i.e. not zero).
+	- Add fs/ntfs/mft.[hc]::ntfs_mft_record_alloc() and various helper
+	  functions used by it.
+	- Update Documentation/filesystems/ntfs.txt with instructions on how to
+	  use the Device-Mapper driver with NTFS ftdisk/LDM raid.  This removes
+	  the linear raid problem with the Software RAID / MD driver when one
+	  or more of the devices has an odd number of sectors.
+
+2.1.20 - Fix two stupid bugs introduced in 2.1.18 release.
+
+	- Fix stupid bug in fs/ntfs/attrib.c::ntfs_attr_reinit_search_ctx()
+	  where we did not clear ctx->al_entry but it was still set due to
+	  changes in ntfs_attr_lookup() and ntfs_external_attr_find() in
+	  particular.
+	- Fix another stupid bug in fs/ntfs/attrib.c::ntfs_external_attr_find()
+	  where we forgot to unmap the extent mft record when we had finished
+	  enumerating an attribute which caused a bug check to trigger when the
+	  VFS calls ->clear_inode.
+
+2.1.19 - Many cleanups, improvements, and a minor bug fix.
+
+	- Update ->setattr (fs/ntfs/inode.c::ntfs_setattr()) to refuse to
+	  change the uid, gid, and mode of an inode as we do not support NTFS
+	  ACLs yet.
+	- Remove BKL use from ntfs_setattr() syncing up with the rest of the
+	  kernel.
+	- Get rid of the ugly transparent union in fs/ntfs/dir.c::ntfs_readdir()
+	  and ntfs_filldir() as per suggestion from Al Viro.
+	- Change '\0' and L'\0' to simply 0 as per advice from Linus Torvalds.
+	- Update ->truncate (fs/ntfs/inode.c::ntfs_truncate()) to check if the
+	  inode size has changed and to only output an error if so.
+	- Rename fs/ntfs/attrib.h::attribute_value_length() to ntfs_attr_size().
+	- Add le{16,32,64} as well as sle{16,32,64} data types to
+	  fs/ntfs/types.h.
+	- Change ntfschar to be le16 instead of u16 in fs/ntfs/types.h.
+	- Add le versions of VCN, LCN, and LSN called leVCN, leLCN, and leLSN,
+	  respectively, to fs/ntfs/types.h.
+	- Update endianness conversion macros in fs/ntfs/endian.h to use the
+	  new types as appropriate.
+	- Do proper type casting when using sle64_to_cpup() in fs/ntfs/dir.c
+	  and index.c.
+	- Add leMFT_REF data type to fs/ntfs/layout.h.
+	- Update all NTFS header files with the new little endian data types.
+	  Affected files are fs/ntfs/layout.h, logfile.h, and time.h.
+	- Do proper type casting when using ntfs_is_*_recordp() in
+	  fs/ntfs/logfile.c, mft.c, and super.c. 
+	- Fix all the sparse bitwise warnings.  Had to change all the typedef
+	  enums storing little endian values to simple enums plus a typedef for
+	  the datatype to make sparse happy.
+	- Fix a bug found by the new sparse bitwise warnings where the default
+	  upcase table was defined as a pointer to wchar_t rather than ntfschar
+	  in fs/ntfs/ntfs.h and super.c.
+	- Change {const_,}cpu_to_le{16,32}(0) to just 0 as suggested by Al Viro.
+
+2.1.18 - Fix scheduling latencies at mount time as well as an endianness bug.
+
+	- Remove vol->nr_mft_records as it was pretty meaningless and optimize
+	  the calculation of total/free inodes as used by statfs().
+	- Fix scheduling latencies in ntfs_fill_super() by dropping the BKL
+	  because the code itself is using the ntfs_lock semaphore which
+	  provides safe locking.  (Ingo Molnar)
+	- Fix a potential bug in fs/ntfs/mft.c::map_extent_mft_record() that
+	  could occur in the future for when we start closing/freeing extent
+	  inodes if we don't set base_ni->ext.extent_ntfs_inos to NULL after
+	  we free it.
+	- Rename {find,lookup}_attr() to ntfs_attr_{find,lookup}() as well as
+	  find_external_attr() to ntfs_external_attr_find() to cleanup the
+	  namespace a bit and to be more consistent with libntfs.
+	- Rename {{re,}init,get,put}_attr_search_ctx() to
+	  ntfs_attr_{{re,}init,get,put}_search_ctx() as well as the type
+	  attr_search_context to ntfs_attr_search_ctx.
+	- Force use of ntfs_attr_find() in ntfs_attr_lookup() when searching
+	  for the attribute list attribute itself.
+	- Fix endianness bug in ntfs_external_attr_find().
+	- Change ntfs_{external_,}attr_find() to return 0 on success, -ENOENT
+	  if the attribute is not found, and -EIO on real error.  In the case
+	  of -ENOENT, the search context is updated to describe the attribute
+	  before which the attribute being searched for would need to be
+	  inserted if such an action were to be desired and in the case of
+	  ntfs_external_attr_find() the search context is also updated to
+	  indicate the attribute list entry before which the attribute list
+	  entry of the attribute being searched for would need to be inserted
+	  if such an action were to be desired.  Also make ntfs_find_attr()
+	  static and remove its prototype from attrib.h as it is not used
+	  anywhere other than attrib.c.  Update ntfs_attr_lookup() and all
+	  callers of ntfs_{external,}attr_{find,lookup}() for the new return
+	  values.
+	- Minor cleanup of fs/ntfs/inode.c::ntfs_init_locked_inode().
+
+2.1.17 - Fix bugs in mount time error code paths and other updates.
+
+	- Implement bitmap modification code (fs/ntfs/bitmap.[hc]).  This
+	  includes functions to set/clear a single bit or a run of bits.
+	- Add fs/ntfs/attrib.[hc]::ntfs_find_vcn() which returns the locked
+	  runlist element containing a particular vcn.  It also takes care of
+	  mapping any needed runlist fragments.
+	- Implement cluster (de-)allocation code (fs/ntfs/lcnalloc.[hc]).
+	- Load attribute definition table from $AttrDef at mount time.
+	- Fix bugs in mount time error code paths involving (de)allocation of
+	  the default and volume upcase tables.
+	- Remove ntfs_nr_mounts as it is no longer used.
+
+2.1.16 - Implement access time updates, file sync, async io, and read/writev.
+
+	- Add support for readv/writev and aio_read/aio_write (fs/ntfs/file.c).
+	  This is done by setting the appropriate file operations pointers to
+	  the generic helper functions provided by mm/filemap.c.
+	- Implement fsync, fdatasync, and msync both for files (fs/ntfs/file.c)
+	  and directories (fs/ntfs/dir.c).
+	- Add support for {a,m,c}time updates to inode.c::ntfs_write_inode().
+	  Note, except for the root directory and any other system files opened
+	  by the user, the system files will not have their access times
+	  updated as they are only accessed at the inode level an hence the
+	  file level functions which cause the times to be updated are never
+	  invoked.
+
+2.1.15 - Invalidate quotas when (re)mounting read-write.
+
+	- Add new element itype.index.collation_rule to the ntfs inode
+	  structure and set it appropriately in ntfs_read_locked_inode().
+	- Implement a new inode type "index" to allow efficient access to the
+	  indices found in various system files and adapt inode handling
+	  accordingly (fs/ntfs/inode.[hc]).  An index inode is essentially an
+	  attribute inode (NInoAttr() is true) with an attribute type of
+	  AT_INDEX_ALLOCATION.  As such, it is no longer allowed to call
+	  ntfs_attr_iget() with an attribute type of AT_INDEX_ALLOCATION as
+	  there would be no way to distinguish between normal attribute inodes
+	  and index inodes.  The function to obtain an index inode is
+	  ntfs_index_iget() and it uses the helper function
+	  ntfs_read_locked_index_inode().  Note, we do not overload
+	  ntfs_attr_iget() as indices consist of multiple attributes so using
+	  ntfs_attr_iget() to obtain an index inode would be confusing.
+	- Ensure that there is no overflow when doing page->index <<
+	  PAGE_CACHE_SHIFT by casting page->index to s64 in fs/ntfs/aops.c.
+	- Use atomic kmap instead of kmap() in fs/ntfs/aops.c::ntfs_read_page()
+	  and ntfs_read_block().
+	- Use case sensitive attribute lookups instead of case insensitive ones.
+	- Lock all page cache pages belonging to mst protected attributes while
+	  accessing them to ensure we never see corrupt data while the page is
+	  under writeout.
+	- Add framework for generic ntfs collation (fs/ntfs/collation.[hc]).
+	  We have ntfs_is_collation_rule_supported() to check if the collation
+	  rule you want to use is supported and ntfs_collation() which actually
+	  collates two data items.  We currently only support COLLATION_BINARY
+	  and COLLATION_NTOFS_ULONG but support for other collation rules will
+	  be added as the need arises.
+	- Add a new type, ntfs_index_context, to allow retrieval of an index
+	  entry using the corresponding index key.  To get an index context,
+	  use ntfs_index_ctx_get() and to release it, use ntfs_index_ctx_put().
+	  This also adds a new slab cache for the index contexts.  To lookup a
+	  key in an index inode, use ntfs_index_lookup().  After modifying an
+	  index entry, call ntfs_index_entry_flush_dcache_page() followed by
+	  ntfs_index_entry_mark_dirty() to ensure the changes are written out
+	  to disk.  For details see fs/ntfs/index.[hc].  Note, at present, if
+	  an index entry is in the index allocation attribute rather than the
+	  index root attribute it will not be written out (you will get a
+	  warning message about discarded changes instead).
+	- Load the quota file ($Quota) and check if quota tracking is enabled
+	  and if so, mark the quotas out of date.  This causes windows to
+	  rescan the volume on boot and update all quota entries.
+	- Add a set_page_dirty address space operation for ntfs_m[fs]t_aops.
+	  It is simply set to __set_page_dirty_nobuffers() to make sure that
+	  running set_page_dirty() on a page containing mft/ntfs records will
+	  not affect the dirty state of the page buffers.
+	- Add fs/ntfs/index.c::__ntfs_index_entry_mark_dirty() which sets all
+	  buffers that are inside the ntfs record in the page dirty after which
+	  it sets the page dirty.  This allows ->writepage to only write the
+	  dirty index records rather than having to write all the records in
+	  the page.  Modify fs/ntfs/index.h::ntfs_index_entry_mark_dirty() to
+	  use this rather than __set_page_dirty_nobuffers().
+	- Implement fs/ntfs/aops.c::ntfs_write_mst_block() which enables the
+	  writing of page cache pages belonging to mst protected attributes
+	  like the index allocation attribute in directory indices and other
+	  indices like $Quota/$Q, etc.  This means that the quota is now marked
+	  out of date on all volumes rather than only on ones where the quota
+	  defaults entry is in the index root attribute of the $Quota/$Q index.
+
+2.1.14 - Fix an NFSd caused deadlock reported by several users.
+
+	- Modify fs/ntfs/ntfs_readdir() to copy the index root attribute value
+	  to a buffer so that we can put the search context and unmap the mft
+	  record before calling the filldir() callback.  We need to do this
+	  because of NFSd which calls ->lookup() from its filldir callback()
+	  and this causes NTFS to deadlock as ntfs_lookup() maps the mft record
+	  of the directory and since ntfs_readdir() has got it mapped already
+	  ntfs_lookup() deadlocks.
+
+2.1.13 - Enable overwriting of resident files and housekeeping of system files.
+
+	- Implement writing of mft records (fs/ntfs/mft.[hc]), which includes
+	  keeping the mft mirror in sync with the mft when mirrored mft records
+	  are written.  The functions are write_mft_record{,_nolock}().  The
+	  implementation is quite rudimentary for now with lots of things not
+	  implemented yet but I am not sure any of them can actually occur so
+	  I will wait for people to hit each one and only then implement it.
+	- Commit open system inodes at umount time.  This should make it
+	  virtually impossible for sync_mft_mirror_umount() to ever be needed.
+	- Implement ->write_inode (fs/ntfs/inode.c::ntfs_write_inode()) for the
+	  ntfs super operations.  This gives us inode writing via the VFS inode
+	  dirty code paths.  Note:  Access time updates are not implemented yet.
+	- Implement fs/ntfs/mft.[hc]::{,__}mark_mft_record_dirty() and make
+	  fs/ntfs/aops.c::ntfs_writepage() and ntfs_commit_write() use it, thus
+	  finally enabling resident file overwrite!  (-8  This also includes a
+	  placeholder for ->writepage (ntfs_mft_writepage()), which for now
+	  just redirties the page and returns.  Also, at umount time, we for
+	  now throw away all mft data page cache pages after the last call to
+	  ntfs_commit_inode() in the hope that all inodes will have been
+	  written out by then and hence no dirty (meta)data will be lost.  We
+	  also check for this case and emit an error message telling the user
+	  to run chkdsk.
+	- Use set_page_writeback() and end_page_writeback() in the resident
+	  attribute code path of fs/ntfs/aops.c::ntfs_writepage() otherwise
+	  the radix-tree tag PAGECACHE_TAG_DIRTY remains set even though the
+	  page is clean.
+	- Implement ntfs_mft_writepage() so it now checks if any of the mft
+	  records in the page are dirty and if so redirties the page and
+	  returns.  Otherwise it just returns (after doing set_page_writeback(),
+	  unlock_page(), end_page_writeback() or the radix-tree tag
+	  PAGECACHE_TAG_DIRTY remains set even though the page is clean), thus
+	  alowing the VM to do with the page as it pleases.  Also, at umount
+	  time, now only throw away dirty mft (meta)data pages if dirty inodes
+	  are present and ask the user to email us if they see this happening.
+	- Add functions ntfs_{clear,set}_volume_flags(), to modify the volume
+	  information flags (fs/ntfs/super.c).
+	- Mark the volume dirty when (re)mounting read-write and mark it clean
+	  when unmounting or remounting read-only.  If any volume errors are
+	  found, the volume is left marked dirty to force chkdsk to run.
+	- Add code to set the NT4 compatibility flag when (re)mounting
+	  read-write for newer NTFS versions but leave it commented out for now
+	  since we do not make any modifications that are NTFS 1.2 specific yet
+	  and since setting this flag breaks Captive-NTFS which is not nice.
+	  This code must be enabled once we start writing NTFS 1.2 specific
+	  changes otherwise Windows NTFS driver might crash / cause corruption.
+
+2.1.12 - Fix the second fix to the decompression engine and some cleanups.
+
+	- Add a new address space operations struct, ntfs_mst_aops, for mst
+	  protected attributes.  This is because the default ntfs_aops do not
+	  make sense with mst protected data and were they to write anything to
+	  such an attribute they would cause data corruption so we provide
+	  ntfs_mst_aops which does not have any write related operations set.
+	- Cleanup dirty ntfs inode handling (fs/ntfs/inode.[hc]) which also
+	  includes an adapted ntfs_commit_inode() and an implementation of
+	  ntfs_write_inode() which for now just cleans dirty inodes without
+	  writing them (it does emit a warning that this is happening).
+	- Undo the second decompression engine fix (see 2.1.9 release ChangeLog
+	  entry) as it was only fixing a theoretical bug but at the same time
+	  it badly broke the handling of sparse and uncompressed compression
+	  blocks.
+
+2.1.11 - Driver internal cleanups.
+
+	- Only build logfile.o if building the driver with read-write support.
+	- Really final white space cleanups.
+	- Use generic_ffs() instead of ffs() in logfile.c which allows the
+	  log_page_size variable to be optimized by gcc into a constant.
+	- Rename uchar_t to ntfschar everywhere as uchar_t is unsigned 1-byte
+	  char as defined by POSIX and as found on some systems.
+
+2.1.10 - Force read-only (re)mounting of volumes with unsupported volume flags.
+
+	- Finish off the white space cleanups (remove trailing spaces, etc).
+	- Clean up ntfs_fill_super() and ntfs_read_inode_mount() by removing
+	  the kludges around the first iget().  Instead of (re)setting ->s_op
+	  we have the $MFT inode set up by explicit new_inode() / set ->i_ino /
+	  insert_inode_hash() / call ntfs_read_inode_mount() directly.  This
+	  kills the need for second super_operations and allows to return error
+	  from ntfs_read_inode_mount() without resorting to ugly "poisoning"
+	  tricks.  (Al Viro)
+	- Force read-only (re)mounting if any of the following bits are set in
+	  the volume information flags:
+	  	VOLUME_IS_DIRTY, VOLUME_RESIZE_LOG_FILE,
+		VOLUME_UPGRADE_ON_MOUNT, VOLUME_DELETE_USN_UNDERWAY,
+		VOLUME_REPAIR_OBJECT_ID, VOLUME_MODIFIED_BY_CHKDSK
+	  To make this easier we define VOLUME_MUST_MOUNT_RO_MASK with all the
+	  above bits set so the test is made easy.
+
+2.1.9 - Fix two bugs in decompression engine.
+
+	- Fix a bug where we would not always detect that we have reached the
+	  end of a compression block because we were ending at minus one byte
+	  which is effectively the same as being at the end.  The fix is to
+	  check whether the uncompressed buffer has been fully filled and if so
+	  we assume we have reached the end of the compression block.  A big
+	  thank you to Marcin Gibuła for the bug report, the assistance in
+	  tracking down the bug and testing the fix.
+	- Fix a possible bug where when a compressed read is truncated to the
+	  end of the file, the offset inside the last page was not truncated.
+
+2.1.8 - Handle $MFT mirror and $LogFile, improve time handling, and cleanups.
+
+	- Use get_bh() instead of manual atomic_inc() in fs/ntfs/compress.c.
+	- Modify fs/ntfs/time.c::ntfs2utc(), get_current_ntfs_time(), and
+	  utc2ntfs() to work with struct timespec instead of time_t on the
+	  Linux UTC time side thus preserving the full precision of the NTFS
+	  time and only loosing up to 99 nano-seconds in the Linux UTC time.
+	- Move fs/ntfs/time.c to fs/ntfs/time.h and make the time functions
+	  static inline.
+	- Remove unused ntfs_dirty_inode().
+	- Cleanup super operations declaration in fs/ntfs/super.c.
+	- Wrap flush_dcache_mft_record_page() in #ifdef NTFS_RW.
+	- Add NInoTestSetFoo() and NInoTestClearFoo() macro magic to
+	  fs/ntfs/inode.h and use it to declare NInoTest{Set,Clear}Dirty.
+	- Move typedefs for ntfs_attr and test_t from fs/ntfs/inode.c to
+	  fs/ntfs/inode.h so they can be used elsewhere.
+	- Determine the mft mirror size as the number of mirrored mft records
+	  and store it in ntfs_volume->mftmirr_size (fs/ntfs/super.c).
+	- Load the mft mirror at mount time and compare the mft records stored
+	  in it to the ones in the mft.  Force a read-only mount if the two do
+	  not match (fs/ntfs/super.c).
+	- Fix type casting related warnings on 64-bit architectures.  Thanks
+	  to Meelis Roos for reporting them.
+	- Move %L to %ll as %L is floating point and %ll is integer which is
+	  what we want.
+	- Read the journal ($LogFile) and determine if the volume has been
+	  shutdown cleanly and force a read-only mount if not (fs/ntfs/super.c
+	  and fs/ntfs/logfile.c).  This is a little bit of a crude check in
+	  that we only look at the restart areas and not at the actual log
+	  records so that there will be a very small number of cases where we
+	  think that a volume is dirty when in fact it is clean.  This should
+	  only affect volumes that have not been shutdown cleanly and did not
+	  have any pending, non-check-pointed i/o.
+	- If the $LogFile indicates a clean shutdown and a read-write (re)mount
+	  is requested, empty $LogFile by overwriting it with 0xff bytes to
+	  ensure that Windows cannot cause data corruption by replaying a stale
+	  journal after Linux has written to the volume.
+
+2.1.7 - Enable NFS exporting of mounted NTFS volumes.
+
+	- Set i_generation in the VFS inode from the seq_no of the NTFS inode.
+	- Make ntfs_lookup() NFS export safe, i.e. use d_splice_alias(), etc.
+	- Implement ->get_dentry() in fs/ntfs/namei.c::ntfs_get_dentry() as the
+	  default doesn't allow inode number 0 which is a valid inode on NTFS
+	  and even if it did allow that it uses iget() instead of ntfs_iget()
+	  which makes it useless for us.
+	- Implement ->get_parent() in fs/ntfs/namei.c::ntfs_get_parent() as the
+	  default just returns -EACCES which is not very useful.
+	- Define export operations (->s_export_op) for NTFS (ntfs_export_ops)
+	  and set them up in the super block at mount time (super.c) this
+	  allows mounted NTFS volumes to be exported via NFS.
+	- Add missing return -EOPNOTSUPP; in
+	  fs/ntfs/aops.c::ntfs_commit_nonresident_write().
+	- Enforce no atime and no dir atime updates at mount/remount time as
+	  they are not implemented yet anyway.
+	- Move a few assignments in fs/ntfs/attrib.c::load_attribute_list() to
+	  after a NULL check.  Thanks to Dave Jones for pointing this out.
+
+2.1.6 - Fix minor bug in handling of compressed directories.
+
+	- Fix bug in handling of compressed directories.  A compressed
+	  directory is not really compressed so when we set the ->i_blocks
+	  field of a compressed directory inode we were setting it from the
+	  non-existing field ni->itype.compressed.size which gave random
+	  results...  For directories we now always use ni->allocated_size.
+
+2.1.5 - Fix minor bug in attribute list attribute handling.
+
+	- Fix bug in attribute list handling.  Actually it is not as much a bug
+	  as too much protection in that we were not allowing attribute lists
+	  which waste space on disk while Windows XP clearly allows it and in
+	  fact creates such attribute lists so our driver was failing.
+	- Update NTFS documentation ready for 2.6 kernel release.
+
+2.1.4 - Reduce compiler requirements.
+
+	- Remove all uses of unnamed structs and unions in the driver to make
+	  old and newer gcc versions happy. Makes it a bit uglier IMO but at
+	  least people will stop hassling me about it.
+
+2.1.3 - Important bug fixes in corner cases.
+
+	- super.c::parse_ntfs_boot_sector(): Correct the check for 64-bit
+	  clusters. (Philipp Thomas)
+	- attrib.c::load_attribute_list(): Fix bug when initialized_size is a
+	  multiple of the block_size but not the cluster size. (Szabolcs
+	  Szakacsits <szaka@sienet.hu>)
+
+2.1.2 - Important bug fixes aleviating the hangs in statfs.
+
+	- Fix buggy free cluster and free inode determination logic.
+
+2.1.1 - Minor updates.
+
+	- Add handling for initialized_size != data_size in compressed files.
+	- Reduce function local stack usage from 0x3d4 bytes to just noise in
+	  fs/ntfs/upcase.c. (Randy Dunlap <rddunlap@osdl.ord>)
+	- Remove compiler warnings for newer gcc.
+	- Pages are no longer kmapped by mm/filemap.c::generic_file_write()
+	  around calls to ->{prepare,commit}_write.  Adapt NTFS appropriately
+	  in fs/ntfs/aops.c::ntfs_prepare_nonresident_write() by using
+	  kmap_atomic(KM_USER0).
+
+2.1.0 - First steps towards write support: implement file overwrite.
+
+	- Add configuration option for developmental write support with an
+	  appropriately scary configuration help text.
+	- Initial implementation of fs/ntfs/aops.c::ntfs_writepage() and its
+	  helper fs/ntfs/aops.c::ntfs_write_block(). This enables mmap(2) based
+	  overwriting of existing files on ntfs. Note: Resident files are
+	  only written into memory, and not written out to disk at present, so
+	  avoid writing to files smaller than about 1kiB.
+	- Initial implementation of fs/ntfs/aops.c::ntfs_prepare_write(), its
+	  helper fs/ntfs/aops.c::ntfs_prepare_nonresident_write() and their
+	  counterparts, fs/ntfs/aops.c::ntfs_commit_write(), and
+	  fs/ntfs/aops.c::ntfs_commit_nonresident_write(), respectively. Also,
+	  add generic_file_write() to the ntfs file operations (fs/ntfs/file.c).
+	  This enables write(2) based overwriting of existing files on ntfs.
+	  Note: As with mmap(2) based overwriting, resident files are only
+	  written into memory, and not written out to disk at present, so avoid
+	  writing to files smaller than about 1kiB.
+	- Implement ->truncate (fs/ntfs/inode.c::ntfs_truncate()) and
+	  ->setattr() (fs/ntfs/inode.c::ntfs_setattr()) inode operations for
+	  files with the purpose of intercepting and aborting all i_size
+	  changes which we do not support yet. ntfs_truncate() actually only
+	  emits a warning message but AFAICS our interception of i_size changes
+	  elsewhere means ntfs_truncate() never gets called for i_size changes.
+	  It is only called from generic_file_write() when we fail in
+	  ntfs_prepare_{,nonresident_}write() in order to discard any
+	  instantiated buffers beyond i_size. Thus i_size is not actually
+	  changed so our warning message is enough. Unfortunately it is not
+	  possible to easily determine if i_size is being changed or not hence
+	  we just emit an appropriately worded error message.
+
+2.0.25 - Small bug fixes and cleanups.
+
+	- Unlock the page in an out of memory error code path in
+	  fs/ntfs/aops.c::ntfs_read_block().
+	- If fs/ntfs/aops.c::ntfs_read_page() is called on an uptodate page,
+	  just unlock the page and return. (This can happen due to ->writepage
+	  clearing PageUptodate() during write out of MstProtected()
+	  attributes.
+	- Remove leaked write code again.
+
+2.0.24 - Cleanups.
+
+	- Treat BUG_ON() as ASSERT() not VERIFY(), i.e. do not use side effects
+	  inside BUG_ON(). (Adam J. Richter)
+	- Split logical OR expressions inside BUG_ON() into individual BUG_ON()
+	  calls for improved debugging. (Adam J. Richter)
+	- Add errors flag to the ntfs volume state, accessed via
+	  NVol{,Set,Clear}Errors(vol).
+	- Do not allow read-write remounts of read-only volumes with errors.
+	- Clarify comment for ntfs file operation sendfile which was added by
+	  Christoph Hellwig a while ago (just using generic_file_sendfile())
+	  to say that ntfs ->sendfile is only used for the case where the
+	  source data is on the ntfs partition and the destination is
+	  somewhere else, i.e. nothing we need to concern ourselves with.
+	- Add generic_file_write() as our ntfs file write operation.
+
+2.0.23 - Major bug fixes (races, deadlocks, non-i386 architectures).
+
+	- Massive internal locking changes to mft record locking. Fixes lock
+	  recursion and replaces the mrec_lock read/write semaphore with a
+	  mutex. Also removes the now superfluous mft_count. This fixes several
+	  race conditions and deadlocks, especially in the future write code.
+	- Fix ntfs over loopback for compressed files by adding an
+	  optimization barrier. (gcc was screwing up otherwise ?)
+	- Miscellaneous cleanups all over the code and a fix or two in error
+	  handling code paths.
+	Thanks go to Christoph Hellwig for pointing out the following two:
+	- Remove now unused function fs/ntfs/malloc.h::vmalloc_nofs().
+	- Fix ntfs_free() for ia64 and parisc by checking for VMALLOC_END, too.
+
+2.0.22 - Cleanups, mainly to ntfs_readdir(), and use C99 initializers.
+
+	- Change fs/ntfs/dir.c::ntfs_reddir() to only read/write ->f_pos once
+	  at entry/exit respectively.
+	- Use C99 initializers for structures.
+	- Remove unused variable blocks from fs/ntfs/aops.c::ntfs_read_block().
+
+2.0.21 - Check for, and refuse to work with too large files/directories/volumes.
+
+	- Limit volume size at mount time to 2TiB on architectures where
+	  unsigned long is 32-bits (fs/ntfs/super.c::parse_ntfs_boot_sector()).
+	  This is the most we can do without overflowing the 32-bit limit of
+	  the block device size imposed on us by sb_bread() and sb_getblk()
+	  for the time being.
+	- Limit file/directory size at open() time to 16TiB on architectures
+	  where unsigned long is 32-bits (fs/ntfs/file.c::ntfs_file_open() and
+	  fs/ntfs/dir.c::ntfs_dir_open()). This is the most we can do without
+	  overflowing the page cache page index.
+
+2.0.20 - Support non-resident directory index bitmaps, fix page leak in readdir.
+
+	- Move the directory index bitmap to use an attribute inode instead of
+	  having special fields for it inside the ntfs inode structure. This
+	  means that the index bitmaps now use the page cache for i/o, too,
+	  and also as a side effect we get support for non-resident index
+	  bitmaps for free.
+	- Simplify/cleanup error handling in fs/ntfs/dir.c::ntfs_readdir() and
+	  fix a page leak that manifested itself in some cases.
+	- Add fs/ntfs/inode.c::ntfs_put_inode(), which we need to release the
+	  index bitmap inode on the final iput().
+
+2.0.19 - Fix race condition, improvements, and optimizations in i/o interface.
+
+	- Apply block optimization added to fs/ntfs/aops.c::ntfs_read_block()
+	  to fs/ntfs/compress.c::ntfs_file_read_compressed_block() as well.
+	- Drop the "file" from ntfs_file_read_compressed_block().
+	- Rename fs/ntfs/aops.c::ntfs_enb_buffer_read_async() to
+	  ntfs_end_buffer_async_read() (more like the fs/buffer.c counterpart).
+	- Update ntfs_end_buffer_async_read() with the improved logic from
+	  its updated counterpart fs/buffer.c::end_buffer_async_read(). Apply
+	  further logic improvements to better determine when we set PageError.
+	- Update submission of buffers in fs/ntfs/aops.c::ntfs_read_block() to
+	  check for the buffers being uptodate first in line with the updated
+	  fs/buffer.c::block_read_full_page(). This plugs a small race
+	  condition.
+
+2.0.18 - Fix race condition in reading of compressed files.
+
+	- There was a narrow window between checking a buffer head for being
+	  uptodate and locking it in ntfs_file_read_compressed_block(). We now
+	  lock the buffer and then check whether it is uptodate or not.
+
+2.0.17 - Cleanups and optimizations - shrinking the ToDo list.
+
+	- Modify fs/ntfs/inode.c::ntfs_read_locked_inode() to return an error
+	  code and update callers, i.e. ntfs_iget(), to pass that error code
+	  up instead of just using -EIO.
+	- Modifications to super.c to ensure that both mount and remount
+	  cannot set any write related options when the driver is compiled
+	  read-only.
+	- Optimize block resolution in fs/ntfs/aops.c::ntfs_read_block() to
+	  cache the current runlist element. This should improve performance
+	  when reading very large and/or very fragmented data.
+
+2.0.16 - Convert access to $MFT/$BITMAP to attribute inode API.
+
+	- Fix a stupid bug introduced in 2.0.15 where we were unmapping the
+	  wrong inode in fs/ntfs/inode.c::ntfs_attr_iget().
+	- Fix debugging check in fs/ntfs/aops.c::ntfs_read_block().
+	- Convert $MFT/$BITMAP access to attribute inode API and remove all
+	  remnants of the ugly mftbmp address space and operations hack. This
+	  means we finally have only one readpage function as well as only one
+	  async io completion handler. Yey! The mft bitmap is now just an
+	  attribute inode and is accessed from vol->mftbmp_ino just as if it
+	  were a normal file. Fake inodes rule. (-:
+
+2.0.15 - Fake inodes based attribute i/o via the pagecache, fixes and cleanups.
+
+	- Fix silly bug in fs/ntfs/super.c::parse_options() which was causing
+	  remounts to fail when the partition had an entry in /etc/fstab and
+	  the entry specified the nls= option.
+	- Apply same macro magic used in fs/ntfs/inode.h to fs/ntfs/volume.h to
+	  expand all the helper functions NVolFoo(), NVolSetFoo(), and
+	  NVolClearFoo().
+	- Move copyright statement from driver initialisation message to
+	  module description (fs/super.c). This makes the initialisation
+	  message fit on one line and fits in better with rest of kernel.
+	- Update fs/ntfs/attrib.c::map_run_list() to work on both real and
+	  attribute inodes, and both for files and directories.
+	- Implement fake attribute inodes allowing all attribute i/o to go via
+	  the page cache and to use all the normal vfs/mm functionality:
+	  - Add ntfs_attr_iget() and its helper ntfs_read_locked_attr_inode()
+	    to fs/ntfs/inode.c.
+	  - Add needed cleanup code to ntfs_clear_big_inode().
+	- Merge address space operations for files and directories (aops.c),
+	  now just have ntfs_aops:
+	  - Rename:
+		end_buffer_read_attr_async() ->	ntfs_end_buffer_read_async(),
+		ntfs_attr_read_block()	     ->	ntfs_read_block(),
+		ntfs_file_read_page()	     ->	ntfs_readpage().
+	  - Rewrite fs/ntfs/aops.c::ntfs_readpage() to work on both real and
+	    attribute inodes, and both for files and directories.
+	  - Remove obsolete fs/ntfs/aops.c::ntfs_mst_readpage().
+
+2.0.14 - Run list merging code cleanup, minor locking changes, typo fixes.
+
+	- Change fs/ntfs/super.c::ntfs_statfs() to not rely on BKL by moving
+	  the locking out of super.c::get_nr_free_mft_records() and taking and
+	  dropping the mftbmp_lock rw_semaphore in ntfs_statfs() itself.
+	- Bring attribute runlist merging code (fs/ntfs/attrib.c) in sync with
+	  current userspace ntfs library code. This means that if a merge
+	  fails the original runlists are always left unmodified instead of
+	  being silently corrupted.
+	- Misc typo fixes.
+
+2.0.13 - Use iget5_locked() in preparation for fake inodes and small cleanups.
+
+	- Remove nr_mft_bits and the now superfluous union with nr_mft_records
+	  from ntfs_volume structure.
+	- Remove nr_lcn_bits and the now superfluous union with nr_clusters
+	  from ntfs_volume structure.
+	- Use iget5_locked() and friends instead of conventional iget(). Wrap
+	  the call in fs/ntfs/inode.c::ntfs_iget() and update callers of iget()
+	  to use ntfs_iget(). Leave only one iget() call at mount time so we
+	  don't need an ntfs_iget_mount().
+	- Change fs/ntfs/inode.c::ntfs_new_extent_inode() to take mft_no as an
+	  additional argument.
+
+2.0.12 - Initial cleanup of address space operations following 2.0.11 changes.
+
+	- Merge fs/ntfs/aops.c::end_buffer_read_mst_async() and
+	  fs/ntfs/aops.c::end_buffer_read_file_async() into one function
+	  fs/ntfs/aops.c::end_buffer_read_attr_async() using NInoMstProtected()
+	  to determine whether to apply mst fixups or not.
+	- Above change allows merging fs/ntfs/aops.c::ntfs_file_read_block()
+	  and fs/ntfs/aops.c::ntfs_mst_readpage() into one function
+	  fs/ntfs/aops.c::ntfs_attr_read_block(). Also, create a tiny wrapper
+	  fs/ntfs/aops.c::ntfs_mst_readpage() to transform the parameters from
+	  the VFS readpage function prototype to the ntfs_attr_read_block()
+	  function prototype.
+
+2.0.11 - Initial preparations for fake inode based attribute i/o.
+
+	- Move definition of ntfs_inode_state_bits to fs/ntfs/inode.h and
+	  do some macro magic (adapted from include/linux/buffer_head.h) to
+	  expand all the helper functions NInoFoo(), NInoSetFoo(), and
+	  NInoClearFoo().
+	- Add new flag to ntfs_inode_state_bits: NI_Sparse.
+	- Add new fields to ntfs_inode structure to allow use of fake inodes
+	  for attribute i/o: type, name, name_len. Also add new state bits:
+	  NI_Attr, which, if set, indicates the inode is a fake inode, and
+	  NI_MstProtected, which, if set, indicates the attribute uses multi
+	  sector transfer protection, i.e. fixups need to be applied after
+	  reads and before/after writes.
+	- Rename fs/ntfs/inode.c::ntfs_{new,clear,destroy}_inode() to
+	  ntfs_{new,clear,destroy}_extent_inode() and update callers.
+	- Use ntfs_clear_extent_inode() in fs/ntfs/inode.c::__ntfs_clear_inode()
+	  instead of ntfs_destroy_extent_inode().
+	- Cleanup memory deallocations in {__,}ntfs_clear_{,big_}inode().
+	- Make all operations on ntfs inode state bits use the NIno* functions.
+	- Set up the new ntfs inode fields and state bits in
+	  fs/ntfs/inode.c::ntfs_read_inode() and add appropriate cleanup of
+	  allocated memory to __ntfs_clear_inode().
+	- Cleanup ntfs_inode structure a bit for better ordering of elements
+	  w.r.t. their size to allow better packing of the structure in memory.
+
+2.0.10 - There can only be 2^32 - 1 inodes on an NTFS volume.
+
+	- Add check at mount time to verify that the number of inodes on the
+	  volume does not exceed 2^32 - 1, which is the maximum allowed for
+	  NTFS according to Microsoft.
+	- Change mft_no member of ntfs_inode structure to be unsigned long.
+	  Update all users. This makes ntfs_inode->mft_no just a copy of struct
+	  inode->i_ino. But we can't just always use struct inode->i_ino and
+	  remove mft_no because extent inodes do not have an attached struct
+	  inode.
+
+2.0.9 - Decompression engine now uses a single buffer and other cleanups.
+
+	- Change decompression engine to use a single buffer protected by a
+	  spin lock instead of per-CPU buffers. (Rusty Russell)
+	- Do not update cb_pos when handling a partial final page during
+	  decompression of a sparse compression block, as the value is later
+	  reset without being read/used. (Rusty Russell)
+	- Switch to using the new KM_BIO_SRC_IRQ for atomic kmap()s. (Andrew
+	  Morton)
+	- Change buffer size in ntfs_readdir()/ntfs_filldir() to use
+	  NLS_MAX_CHARSET_SIZE which makes the buffers almost 1kiB each but
+	  it also makes everything safer so it is a good thing.
+	- Miscellaneous minor cleanups to comments.
+
+2.0.8 - Major updates for handling of case sensitivity and dcache aliasing.
+
+	Big thanks go to Al Viro and other inhabitants of #kernel for investing
+	their time to discuss the case sensitivity and dcache aliasing issues.
+
+	- Remove unused source file fs/ntfs/attraops.c.
+	- Remove show_inodes mount option(s), thus dropping support for
+	  displaying of short file names.
+	- Remove deprecated mount option posix.
+	- Restore show_sys_files mount option.
+	- Add new mount option case_sensitive, to determine if the driver
+	  treats file names as case sensitive or not. If case sensitive, create
+	  file names in the POSIX namespace. Otherwise create file names in the
+	  LONG/WIN32 namespace. Note, files remain accessible via their short
+	  file name, if it exists.
+	- Remove really dumb logic bug in boot sector recovery code.
+	- Fix dcache aliasing issues wrt short/long file names via changes
+	  to fs/ntfs/dir.c::ntfs_lookup_inode_by_name() and
+	  fs/ntfs/namei.c::ntfs_lookup():
+	  - Add additional argument to ntfs_lookup_inode_by_name() in which we
+	    return information about the matching file name if the case is not
+	    matching or the match is a short file name. See comments above the
+	    function definition for details.
+	  - Change ntfs_lookup() to only create dcache entries for the correctly
+	    cased file name and only for the WIN32 namespace counterpart of DOS
+	    namespace file names. This ensures we have only one dentry per
+	    directory and also removes all dcache aliasing issues between short
+	    and long file names once we add write support. See comments above
+	    function for details.
+	- Fix potential 1 byte overflow in fs/ntfs/unistr.c::ntfs_ucstonls().
+
+2.0.7 - Minor cleanups and updates for changes in core kernel code.
+
+	- Remove much of the NULL struct element initializers.
+	- Various updates to make compatible with recent kernels.
+	- Remove defines of MAX_BUF_PER_PAGE and include linux/buffer_head.h
+	  in fs/ntfs/ntfs.h instead.
+	- Remove no longer needed KERNEL_VERSION checks. We are now in the
+	  kernel proper so they are no longer needed.
+
+2.0.6 - Major bugfix to make compatible with other kernel changes.
+
+	- Initialize the mftbmp address space properly now that there are more
+	  fields in the struct address_space. This was leading to hangs and
+	  oopses on umount since 2.5.12 because of changes to other parts of
+	  the kernel. We probably want a kernel generic init_address_space()
+	  function...
+	- Drop BKL from ntfs_readdir() after consultation with Al Viro. The
+	  only caller of ->readdir() is vfs_readdir() which holds i_sem during
+	  the call, and i_sem is sufficient protection against changes in the
+	  directory inode (including ->i_size).
+	- Use generic_file_llseek() for directories (as opposed to
+	  default_llseek()) as this downs i_sem instead of the BKL which is
+	  what we now need for exclusion against ->f_pos changes considering we
+	  no longer take the BKL in ntfs_readdir().
+
+2.0.5 - Major bugfix. Buffer overflow in extent inode handling.
+
+	- No need to set old blocksize in super.c::ntfs_fill_super() as the
+	  VFS does so via invocation of deactivate_super() calling
+	  fs->fill_super() calling block_kill_super() which does it.
+	- BKL moved from VFS into dir.c::ntfs_readdir(). (Linus Torvalds)
+	  -> Do we really need it? I don't think so as we have exclusion on
+	  the directory ntfs_inode rw_semaphore mrec_lock. We mmight have to
+	  move the ->f_pos accesses under the mrec_lock though. Check this...
+	- Fix really, really, really stupid buffer overflow in extent inode
+	  handling in mft.c::map_extent_mft_record().
+
+2.0.4 - Cleanups and updates for kernel 2.5.11.
+
+	- Add documentation on how to use the MD driver to be able to use NTFS
+	  stripe and volume sets in Linux and generally cleanup documentation
+	  a bit.
+	Remove all uses of kdev_t in favour of struct block_device *:
+	- Change compress.c::ntfs_file_read_compressed_block() to use
+	  sb_getblk() instead of getblk().
+	- Change super.c::ntfs_fill_super() to use bdev_hardsect_size() instead
+	  of get_hardsect_size().
+	- No need to get old blocksize in super.c::ntfs_fill_super() as
+	  fs/super.c::get_sb_bdev() already does this.
+	- Set bh->b_bdev instead of bh->b_dev throughout aops.c.
+
+2.0.3 - Small bug fixes, cleanups, and performance improvements.
+
+	- Remove some dead code from mft.c.
+	- Optimize readpage and read_block functions throughout aops.c so that
+	  only initialized blocks are read. Non-initialized ones have their
+	  buffer head mapped, zeroed, and set up to date, without scheduling
+	  any i/o. Thanks to Al Viro for advice on how to avoid the device i/o.
+	Thanks go to Andrew Morton for spotting the below:
+	- Fix buglet in allocate_compression_buffers() error code path.
+	- Call flush_dcache_page() after modifying page cache page contents in
+	  ntfs_file_readpage().
+	- Check for existence of page buffers throughout aops.c before calling
+	  create_empty_buffers(). This happens when an I/O error occurs and the
+	  read is retried. (It also happens once writing is implemented so that
+	  needed doing anyway but I had left it for later...)
+	- Don't BUG_ON() uptodate and/or mapped buffers throughout aops.c in
+	  readpage and read_block functions. Reasoning same as above (i.e. I/O
+	  error retries and future write code paths.)
+
+2.0.2 - Minor updates and cleanups.
+
+	- Cleanup: rename mst.c::__post_read_mst_fixup to post_write_mst_fixup
+	  and cleanup the code a bit, removing the unused size parameter.
+	- Change default fmask to 0177 and update documentation.
+	- Change attrib.c::get_attr_search_ctx() to return the search context
+	  directly instead of taking the address of a pointer. A return value
+	  of NULL means the allocation failed. Updated all callers
+	  appropriately.
+	- Update to 2.5.9 kernel (preserving backwards compatibility) by
+	  replacing all occurences of page->buffers with page_buffers(page).
+	- Fix minor bugs in runlist merging, also minor cleanup.
+	- Updates to bootsector layout and mft mirror contents descriptions.
+	- Small bug fix in error detection in unistr.c and some cleanups.
+	- Grow name buffer allocations in unistr.c in aligned mutlipled of 64
+	  bytes.
+
+2.0.1 - Minor updates.
+
+	- Make default umask correspond to documentation.
+	- Improve documentation.
+	- Set default mode to include execute bit. The {u,f,d}mask can be used
+	  to take it away if desired. This allows binaries to be executed from
+	  a mounted ntfs partition.
+
+2.0.0 - New version number. Remove TNG from the name. Now in the kernel.
+
+	- Add kill_super, just keeping up with the vfs changes in the kernel.
+	- Repeat some changes from tng-0.0.8 that somehow got lost on the way
+	  from the CVS import into BitKeeper.
+	- Begin to implement proper handling of allocated_size vs
+	  initialized_size vs data_size (i.e. i_size). Done are
+	  mft.c::ntfs_mft_readpage(), aops.c::end_buffer_read_index_async(),
+	  and attrib.c::load_attribute_list().
+	- Lock the runlist in attrib.c::load_attribute_list() while using it.
+	- Fix memory leak in ntfs_file_read_compressed_block() and generally
+	  clean up compress.c a little, removing some uncommented/unused debug
+	  code.
+	- Tidy up dir.c a little bit.
+	- Don't bother getting the runlist in inode.c::ntfs_read_inode().
+	- Merge mft.c::ntfs_mft_readpage() and aops.c::ntfs_index_readpage()
+	  creating aops.c::ntfs_mst_readpage(), improving the handling of
+	  holes and overflow in the process and implementing the correct
+	  equivalent of ntfs_file_get_block() in ntfs_mst_readpage() itself.
+	  I am aiming for correctness at the moment. Modularisation can come
+	  later.
+	- Rename aops.c::end_buffer_read_index_async() to
+	  end_buffer_read_mst_async() and optimize the overflow checking and
+	  handling.
+	- Use the host of the mftbmp address space mapping to hold the ntfs
+	  volume. This is needed so the async i/o completion handler can
+	  retrieve a pointer to the volume. Hopefully this will not cause
+	  problems elsewhere in the kernel... Otherwise will need to use a
+	  fake inode.
+	- Complete implementation of proper handling of allocated_size vs
+	  initialized_size vs data_size (i.e. i_size) in whole driver.
+	  Basically aops.c is now completely rewritten.
+	- Change NTFS driver name to just NTFS and set version number to 2.0.0
+	  to make a clear distinction from the old driver which is still on
+	  version 1.1.22.
+
+tng-0.0.8 - 08/03/2002 - Now using BitKeeper, http://linux-ntfs.bkbits.net/
+
+	- Replace bdevname(sb->s_dev) with sb->s_id.
+	- Remove now superfluous new-line characters in all callers of
+	  ntfs_debug().
+	- Apply kludge in ntfs_read_inode(), setting i_nlink to 1 for
+	  directories. Without this the "find" utility gets very upset which is
+	  fair enough as Linux/Unix do not support directory hard links.
+	- Further runlist merging work. (Richard Russon)
+	- Backwards compatibility for gcc-2.95. (Richard Russon)
+	- Update to kernel 2.5.5-pre1 and rediff the now tiny patch.
+	- Convert to new file system declaration using ->ntfs_get_sb() and
+	  replacing ntfs_read_super() with ntfs_fill_super().
+	- Set s_maxbytes to MAX_LFS_FILESIZE to avoid page cache page index
+	  overflow on 32-bit architectures.
+	- Cleanup upcase loading code to use ntfs_(un)map_page().
+	- Disable/reenable preemtion in critical sections of compession engine.
+	- Replace device size determination in ntfs_fill_super() with
+	  sb->s_bdev->bd_inode->i_size (in bytes) and remove now superfluous
+	  function super.c::get_nr_blocks().
+	- Implement a mount time option (show_inodes) allowing choice of which
+	  types of inode names readdir() returns and modify ntfs_filldir()
+	  accordingly. There are several parameters to show_inodes:
+		system:	system files
+		win32:	long file names (including POSIX file names) [DEFAULT]
+		long:	same as win32
+		dos:	short file names only (excluding POSIX file names)
+		short:	same as dos
+		posix:	same as both win32 and dos
+		all:	all file names
+	  Note that the options are additive, i.e. specifying:
+		-o show_inodes=system,show_inodes=win32,show_inodes=dos
+	  is the same as specifying:
+		-o show_inodes=all
+	  Note that the "posix" and "all" options will show all directory
+	  names, BUT the link count on each directory inode entry is set to 1,
+	  due to Linux not supporting directory hard links. This may well
+	  confuse some userspace applications, since the directory names will
+	  have the same inode numbers. Thus it is NOT advisable to use the
+	  "posix" or "all" options. We provide them only for completeness sake.
+	- Add copies of allocated_size, initialized_size, and compressed_size to
+	  the ntfs inode structure and set them up in
+	  inode.c::ntfs_read_inode(). These reflect the unnamed data attribute
+	  for files and the index allocation attribute for directories.
+	- Add copies of allocated_size and initialized_size to ntfs inode for
+	  $BITMAP attribute of large directories and set them up in
+	  inode.c::ntfs_read_inode().
+	- Add copies of allocated_size and initialized_size to ntfs volume for
+	  $BITMAP attribute of $MFT and set them up in
+	  super.c::load_system_files().
+	- Parse deprecated ntfs driver options (iocharset, show_sys_files,
+	  posix, and utf8) and tell user what the new options to use are. Note
+	  we still do support them but they will be removed with kernel 2.7.x.
+	- Change all occurences of integer long long printf formatting to hex
+	  as printk() will not support long long integer format if/when the
+	  div64 patch goes into the kernel.
+	- Make slab caches have stable names and change the names to what they
+	  were intended to be. These changes are required/made possible by the
+	  new slab cache name handling which removes the length limitation by
+	  requiring the caller of kmem_cache_create() to supply a stable name
+	  which is then referenced but not copied.
+	- Rename run_list structure to run_list_element and create a new
+	  run_list structure containing a pointer to a run_list_element
+	  structure and a read/write semaphore. Adapt all users of runlists
+	  to new scheme and take and release the lock as needed. This fixes a
+	  nasty race as the run_list changes even when inodes are locked for
+	  reading and even when the inode isn't locked at all, so we really
+	  needed the serialization. We use a semaphore rather than a spinlock
+	  as memory allocations can sleep and doing everything GFP_ATOMIC
+	  would be silly.
+	- Cleanup read_inode() removing all code checking for lowest_vcn != 0.
+	  This can never happen due to the nature of lookup_attr() and how we
+	  support attribute lists. If it did happen it would imply the inode
+	  being corrupt.
+	- Check for lowest_vcn != 0 in ntfs_read_inode() and mark the inode as
+	  bad if found.
+	- Update to 2.5.6-pre2 changes in struct address_space.
+	- Use parent_ino() when accessing d_parent inode number in dir.c.
+	- Import Sourceforge CVS repository into BitKeeper repository:
+		http://linux-ntfs.bkbits.net/ntfs-tng-2.5
+	- Update fs/Makefile, fs/Config.help, fs/Config.in, and
+	  Documentation/filesystems/ntfs.txt for NTFS TNG.
+	- Create kernel configuration option controlling whether debugging
+	  is enabled or not.
+	- Add the required export of end_buffer_io_sync() from the patches
+	  directory to the kernel code.
+	- Update inode.c::ntfs_show_options() with show_inodes mount option.
+	- Update errors mount option.
+
+tng-0.0.7 - 13/02/2002 - The driver is now feature complete for read-only!
+
+	- Cleanup mft.c and it's debug/error output in particular. Fix a minor
+	  bug in mapping of extent inodes. Update all the comments to fit all
+	  the recent code changes.
+	- Modify vcn_to_lcn() to cope with entirely unmapped runlists.
+	- Cleanups in compress.c, mostly comments and folding help.
+	- Implement attrib.c::map_run_list() as a generic helper.
+	- Make compress.c::ntfs_file_read_compressed_block() use map_run_list()
+	  thus making code shorter and enabling attribute list support.
+	- Cleanup incorrect use of [su]64 with %L printf format specifier in
+	  all source files. Type casts to [unsigned] long long added to correct
+	  the mismatches (important for architectures which have long long not
+	  being 64 bits).
+	- Merge async io completion handlers for directory indexes and $MFT
+	  data into one by setting the index_block_size{_bits} of the ntfs
+	  inode for $MFT to the mft_record_size{_bits} of the ntfs_volume.
+	- Cleanup aops.c, update comments.
+	- Make ntfs_file_get_block() use map_run_list() so all files now
+	  support attribute lists.
+	- Make ntfs_dir_readpage() almost verbatim copy of
+	  block_read_full_page() by using ntfs_file_get_block() with only real
+	  difference being the use of our own async io completion handler
+	  rather than the default one, thus reducing the amount of code and
+	  automatically enabling attribute list support for directory indices.
+	- Fix bug in load_attribute_list() - forgot to call brelse in error
+	  code path.
+	- Change parameters to find_attr() and lookup_attr(). We no longer
+	  pass in the upcase table and its length. These can be gotten from
+	  ctx->ntfs_ino->vol->upcase{_len}. Update all callers.
+	- Cleanups in attrib.c.
+	- Implement merging of runlists, attrib.c::merge_run_lists() and its
+	  helpers. (Richard Russon)
+	- Attribute lists part 2, attribute extents and multi part runlists:
+	  enable proper support for LCN_RL_NOT_MAPPED and automatic mapping of
+	  further runlist parts via attrib.c::map_run_list().
+	- Tiny endianness bug fix in decompress_mapping_pairs().
+
+tng-0.0.6 - Encrypted directories, bug fixes, cleanups, debugging enhancements.
+
+	- Enable encrypted directories. (Their index root is marked encrypted
+	  to indicate that new files in that directory should be created
+	  encrypted.)
+	- Fix bug in NInoBmpNonResident() macro. (Cut and paste error.)
+	- Enable $Extend system directory. Most (if not all) extended system
+	  files do not have unnamed data attributes so ntfs_read_inode() had to
+	  special case them but that is ok, as the special casing recovery
+	  happens inside an error code path so there is zero slow down in the
+	  normal fast path. The special casing is done by introducing a new
+	  function inode.c::ntfs_is_extended_system_file() which checks if any
+	  of the hard links in the inode point to $Extend as being their parent
+	  directory and if they do we assume this is an extended system file.
+	- Create a sysctl/proc interface to allow {dis,en}abling of debug output
+	  when compiled with -DDEBUG. Default is debug messages to be disabled.
+	  To enable them, one writes a non-zero value to /proc/sys/fs/ntfs-debug
+	  (if /proc is enabled) or uses sysctl(2) to effect the same (if sysctl
+	  interface is enabled). Inspired by old ntfs driver.
+	- Add debug_msgs insmod/kernel boot parameter to set whether debug
+	  messages are {dis,en}abled. This is useful to enable debug messages
+	  during ntfs initialization and is the only way to activate debugging
+	  when the sysctl interface is not enabled.
+	- Cleanup debug output in various places.
+	- Remove all dollar signs ($) from the source (except comments) to
+	  enable compilation on architectures whose gcc compiler does not
+	  support dollar signs in the names of variables/constants. Attribute
+	  types now start with AT_ instead of $ and $I30 is now just I30.
+	- Cleanup ntfs_lookup() and add consistency check of sequence numbers.
+	- Load complete runlist for $MFT/$BITMAP during mount and cleanup
+	  access functions. This means we now cope with $MFT/$BITMAP being
+	  spread accross several mft records.
+	- Disable modification of mft_zone_multiplier on remount. We can always
+	  reenable this later on if we really want to, but we will need to make
+	  sure we readjust the mft_zone size / layout accordingly.
+
+tng-0.0.5 - Modernize for 2.5.x and further in line-ing with Al Viro's comments.
+
+	- Use sb_set_blocksize() instead of set_blocksize() and verify the
+	  return value.
+	- Use sb_bread() instead of bread() throughout.
+	- Add index_vcn_size{_bits} to ntfs_inode structure to store the size
+	  of a directory index block vcn. Apply resulting simplifications in
+	  dir.c everywhere.
+	- Fix a small bug somewhere (but forgot what it was).
+	- Change ntfs_{debug,error,warning} to enable gcc to do type checking
+	  on the printf-format parameter list and fix bugs reported by gcc
+	  as a result. (Richard Russon)
+	- Move inode allocation strategy to Al's new stuff but maintain the
+	  divorce of ntfs_inode from struct inode. To achieve this we have two
+	  separate slab caches, one for big ntfs inodes containing a struct
+	  inode and pure ntfs inodes and at the same time fix some faulty
+	  error code paths in ntfs_read_inode().
+	- Show mount options in proc (inode.c::ntfs_show_options()).
+
+tng-0.0.4 - Big changes, getting in line with Al Viro's comments.
+
+	- Modified (un)map_mft_record functions to be common for read and write
+	  case. To specify which is which, added extra parameter at front of
+	  parameter list. Pass either READ or WRITE to this, each has the
+	  obvious meaning.
+	- General cleanups to allow for easier folding in vi.
+	- attrib.c::decompress_mapping_pairs() now accepts the old runlist
+	  argument, and invokes attrib.c::merge_run_lists() to merge the old
+	  and the new runlists.
+	- Removed attrib.c::find_first_attr().
+	- Implemented loading of attribute list and complete runlist for $MFT.
+	  This means we now cope with $MFT being spread across several mft
+	  records.
+	- Adapt to 2.5.2-pre9 and the changed create_empty_buffers() syntax.
+	- Adapt major/minor/kdev_t/[bk]devname stuff to new 2.5.x kernels.
+	- Make ntfs_volume be allocated via kmalloc() instead of using a slab
+	  cache. There are too little ntfs_volume structures at any one time
+	  to justify a private slab cache.
+	- Fix bogus kmap() use in async io completion. Now use kmap_atomic().
+	  Use KM_BIO_IRQ on advice from IRC/kernel...
+	- Use ntfs_map_page() in map_mft_record() and create ->readpage method
+	  for reading $MFT (ntfs_mft_readpage). In the process create dedicated
+	  address space operations (ntfs_mft_aops) for $MFT inode mapping. Also
+	  removed the now superfluous exports from the kernel core patch.
+	- Fix a bug where kfree() was used insted of ntfs_free().
+	- Change map_mft_record() to take ntfs_inode as argument instead of
+	  vfs inode. Dito for unmap_mft_record(). Adapt all callers.
+	- Add pointer to ntfs_volume to ntfs_inode.
+	- Add mft record number and sequence number to ntfs_inode. Stop using
+	  i_ino and i_generation for in-driver purposes.
+	- Implement attrib.c::merge_run_lists(). (Richard Russon)
+	- Remove use of proper inodes by extent inodes. Move i_ino and
+	  i_generation to ntfs_inode to do this. Apply simplifications that
+	  result and remove iget_no_wait(), etc.
+	- Pass ntfs_inode everywhere in the driver (used to be struct inode).
+	- Add reference counting in ntfs_inode for the ntfs inode itself and
+	  for the mapped mft record.
+	- Extend mft record mapping so we can (un)map extent mft records (new
+	  functions (un)map_extent_mft_record), and so mappings are reference
+	  counted and don't have to happen twice if already mapped - just ref
+	  count increases.
+	- Add -o iocharset as alias to -o nls for backwards compatibility.
+	- The latest core patch is now tiny. In fact just a single additional
+	  export is necessary over the base kernel.
+
+tng-0.0.3 - Cleanups, enhancements, bug fixes.
+
+	- Work on attrib.c::decompress_mapping_pairs() to detect base extents
+	  and setup the runlist appropriately using knowledge provided by the
+	  sizes in the base attribute record.
+	- Balance the get_/put_attr_search_ctx() calls so we don't leak memory
+	  any more.
+	- Introduce ntfs_malloc_nofs() and ntfs_free() to allocate/free a single
+	  page or use vmalloc depending on the amount of memory requested.
+	- Cleanup error output. The __FUNCTION__ "(): " is now added
+	  automatically. Introduced a new header file debug.h to support this
+	  and also moved ntfs_debug() function into it.
+	- Make reading of compressed files more intelligent and especially get
+	  rid of the vmalloc_nofs() from readpage(). This now uses per CPU
+	  buffers (allocated at first mount with cluster size <= 4kiB and
+	  deallocated on last umount with cluster size <= 4kiB), and
+	  asynchronous io for the compressed data using a list of buffer heads.
+	  Er, we use synchronous io as async io only works on whole pages
+	  covered by buffers and not on individual buffer heads...
+	- Bug fix for reading compressed files with sparse compression blocks.
+
+tng-0.0.2 - Now handles larger/fragmented/compressed volumes/files/dirs.
+
+	- Fixed handling of directories when cluster size exceeds index block
+	  size.
+	- Hide DOS only name space directory entries from readdir() but allow
+	  them in lookup(). This should fix the problem that Linux doesn't
+	  support directory hard links, while still allowing access to entries
+	  via their short file name. This also has the benefit of mimicking
+	  what Windows users are used to, so it is the ideal solution.
+	- Implemented sync_page everywhere so no more hangs in D state when
+	  waiting for a page.
+	- Stop using bforget() in favour of brelse().
+	- Stop locking buffers unnecessarily.
+	- Implemented compressed files (inode->mapping contains uncompressed
+	  data, raw compressed data is currently bread() into a vmalloc()ed
+	  memory buffer).
+	- Enable compressed directories. (Their index root is marked compressed
+	  to indicate that new files in that directory should be created
+	  compressed.)
+	- Use vsnprintf rather than vsprintf in the ntfs_error and ntfs_warning
+	  functions. (Thanks to Will Dyson for pointing this out.)
+	- Moved the ntfs_inode and ntfs_volume (the former ntfs_inode_info and
+	  ntfs_sb_info) out of the common inode and super_block structures and
+	  started using the generic_ip and generic_sbp pointers instead. This
+	  makes ntfs entirely private with respect to the kernel tree.
+	- Detect compiler version and abort with error message if gcc less than
+	  2.96 is used.
+	- Fix bug in name comparison function in unistr.c.
+	- Implement attribute lists part 1, the infrastructure: search contexts
+	  and operations, find_external_attr(), lookup_attr()) and make the
+	  code use the infrastructure.
+	- Fix stupid buffer overflow bug that became apparent on larger run
+	  list containing attributes.
+	- Fix bugs in readdir() that became apparent on larger directories.
+
+	The driver is now really useful and survives the test
+		find . -type f -exec md5sum "{}" \;
+	without any error messages on a over 1GiB sized partition with >16k
+	files on it, including compressed files and directories and many files
+	and directories with attribute lists.
+
+tng-0.0.1 - The first useful version.
+
+	- Added ntfs_lookup().
+	- Added default upcase generation and handling.
+	- Added compile options to be shown on module init.
+	- Many bug fixes that were "hidden" before.
+	- Update to latest kernel.
+	- Added ntfs_readdir().
+	- Added file operations for mmap(), read(), open() and llseek(). We just
+	  use the generic ones. The whole point of going through implementing
+	  readpage() methods and where possible get_block() call backs is that
+	  this allows us to make use of the generic high level methods provided
+	  by the kernel.
+
+	The driver is now actually useful! Yey. (-: It undoubtedly has got bugs
+	though and it doesn't implement accesssing compressed files yet. Also,
+	accessing files with attribute list attributes is not implemented yet
+	either. But for small or simple file systems it should work and allow
+	you to list directories, use stat on directory entries and the file
+	system, open, read, mmap and llseek around in files. A big mile stone
+	has been reached!
+
+tng-0.0.0 - Initial version tag.
+
+	Initial driver implementation. The driver can mount and umount simple
+	NTFS file systems (i.e. ones without attribute lists in the system
+	files). If the mount fails there might be problems in the error handling
+	code paths, so be warned. Otherwise it seems to be loading the system
+	files nicely and the mft record read mapping/unmapping seems to be
+	working nicely, too. Proof of inode metadata in the page cache and non-
+	resident file unnamed stream data in the page cache concepts is thus
+	complete.
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
new file mode 100644
index 0000000..7b66381
--- /dev/null
+++ b/fs/ntfs/Makefile
@@ -0,0 +1,19 @@
+# Rules for making the NTFS driver.
+
+obj-$(CONFIG_NTFS_FS) += ntfs.o
+
+ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
+	     index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
+	     unistr.o upcase.o
+
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.22\"
+
+ifeq ($(CONFIG_NTFS_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+ifeq ($(CONFIG_NTFS_RW),y)
+EXTRA_CFLAGS += -DNTFS_RW
+
+ntfs-objs += bitmap.o lcnalloc.o logfile.o quota.o
+endif
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
new file mode 100644
index 0000000..45d56e4
--- /dev/null
+++ b/fs/ntfs/aops.c
@@ -0,0 +1,2324 @@
+/**
+ * aops.c - NTFS kernel address space operations and page cache handling.
+ *	    Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+
+#include "aops.h"
+#include "attrib.h"
+#include "debug.h"
+#include "inode.h"
+#include "mft.h"
+#include "runlist.h"
+#include "types.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_end_buffer_async_read - async io completion for reading attributes
+ * @bh:		buffer head on which io is completed
+ * @uptodate:	whether @bh is now uptodate or not
+ *
+ * Asynchronous I/O completion handler for reading pages belonging to the
+ * attribute address space of an inode.  The inodes can either be files or
+ * directories or they can be fake inodes describing some attribute.
+ *
+ * If NInoMstProtected(), perform the post read mst fixups when all IO on the
+ * page has been completed and mark the page uptodate or set the error bit on
+ * the page.  To determine the size of the records that need fixing up, we
+ * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
+ * record size, and index_block_size_bits, to the log(base 2) of the ntfs
+ * record size.
+ */
+static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+{
+	static DEFINE_SPINLOCK(page_uptodate_lock);
+	unsigned long flags;
+	struct buffer_head *tmp;
+	struct page *page;
+	ntfs_inode *ni;
+	int page_uptodate = 1;
+
+	page = bh->b_page;
+	ni = NTFS_I(page->mapping->host);
+
+	if (likely(uptodate)) {
+		s64 file_ofs;
+
+		set_buffer_uptodate(bh);
+
+		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
+				bh_offset(bh);
+		/* Check for the current buffer head overflowing. */
+		if (file_ofs + bh->b_size > ni->initialized_size) {
+			char *addr;
+			int ofs = 0;
+
+			if (file_ofs < ni->initialized_size)
+				ofs = ni->initialized_size - file_ofs;
+			addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+			memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
+			flush_dcache_page(page);
+			kunmap_atomic(addr, KM_BIO_SRC_IRQ);
+		}
+	} else {
+		clear_buffer_uptodate(bh);
+		ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
+				(unsigned long long)bh->b_blocknr);
+		SetPageError(page);
+	}
+	spin_lock_irqsave(&page_uptodate_lock, flags);
+	clear_buffer_async_read(bh);
+	unlock_buffer(bh);
+	tmp = bh;
+	do {
+		if (!buffer_uptodate(tmp))
+			page_uptodate = 0;
+		if (buffer_async_read(tmp)) {
+			if (likely(buffer_locked(tmp)))
+				goto still_busy;
+			/* Async buffers must be locked. */
+			BUG();
+		}
+		tmp = tmp->b_this_page;
+	} while (tmp != bh);
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+	/*
+	 * If none of the buffers had errors then we can set the page uptodate,
+	 * but we first have to perform the post read mst fixups, if the
+	 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
+	 * Note we ignore fixup errors as those are detected when
+	 * map_mft_record() is called which gives us per record granularity
+	 * rather than per page granularity.
+	 */
+	if (!NInoMstProtected(ni)) {
+		if (likely(page_uptodate && !PageError(page)))
+			SetPageUptodate(page);
+	} else {
+		char *addr;
+		unsigned int i, recs;
+		u32 rec_size;
+
+		rec_size = ni->itype.index.block_size;
+		recs = PAGE_CACHE_SIZE / rec_size;
+		/* Should have been verified before we got here... */
+		BUG_ON(!recs);
+		addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+		for (i = 0; i < recs; i++)
+			post_read_mst_fixup((NTFS_RECORD*)(addr +
+					i * rec_size), rec_size);
+		flush_dcache_page(page);
+		kunmap_atomic(addr, KM_BIO_SRC_IRQ);
+		if (likely(!PageError(page) && page_uptodate))
+			SetPageUptodate(page);
+	}
+	unlock_page(page);
+	return;
+still_busy:
+	spin_unlock_irqrestore(&page_uptodate_lock, flags);
+	return;
+}
+
+/**
+ * ntfs_read_block - fill a @page of an address space with data
+ * @page:	page cache page to fill with data
+ *
+ * Fill the page @page of the address space belonging to the @page->host inode.
+ * We read each buffer asynchronously and when all buffers are read in, our io
+ * completion handler ntfs_end_buffer_read_async(), if required, automatically
+ * applies the mst fixups to the page before finally marking it uptodate and
+ * unlocking it.
+ *
+ * We only enforce allocated_size limit because i_size is checked for in
+ * generic_file_read().
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Contains an adapted version of fs/buffer.c::block_read_full_page().
+ */
+static int ntfs_read_block(struct page *page)
+{
+	VCN vcn;
+	LCN lcn;
+	ntfs_inode *ni;
+	ntfs_volume *vol;
+	runlist_element *rl;
+	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+	sector_t iblock, lblock, zblock;
+	unsigned int blocksize, vcn_ofs;
+	int i, nr;
+	unsigned char blocksize_bits;
+
+	ni = NTFS_I(page->mapping->host);
+	vol = ni->vol;
+
+	/* $MFT/$DATA must have its complete runlist in memory at all times. */
+	BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
+
+	blocksize_bits = VFS_I(ni)->i_blkbits;
+	blocksize = 1 << blocksize_bits;
+
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+	bh = head = page_buffers(page);
+	if (unlikely(!bh)) {
+		unlock_page(page);
+		return -ENOMEM;
+	}
+
+	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+	lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
+	zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
+
+	/* Loop through all the buffers in the page. */
+	rl = NULL;
+	nr = i = 0;
+	do {
+		u8 *kaddr;
+
+		if (unlikely(buffer_uptodate(bh)))
+			continue;
+		if (unlikely(buffer_mapped(bh))) {
+			arr[nr++] = bh;
+			continue;
+		}
+		bh->b_bdev = vol->sb->s_bdev;
+		/* Is the block within the allowed limits? */
+		if (iblock < lblock) {
+			BOOL is_retry = FALSE;
+
+			/* Convert iblock into corresponding vcn and offset. */
+			vcn = (VCN)iblock << blocksize_bits >>
+					vol->cluster_size_bits;
+			vcn_ofs = ((VCN)iblock << blocksize_bits) &
+					vol->cluster_size_mask;
+			if (!rl) {
+lock_retry_remap:
+				down_read(&ni->runlist.lock);
+				rl = ni->runlist.rl;
+			}
+			if (likely(rl != NULL)) {
+				/* Seek to element containing target vcn. */
+				while (rl->length && rl[1].vcn <= vcn)
+					rl++;
+				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+			} else
+				lcn = LCN_RL_NOT_MAPPED;
+			/* Successful remap. */
+			if (lcn >= 0) {
+				/* Setup buffer head to correct block. */
+				bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+						+ vcn_ofs) >> blocksize_bits;
+				set_buffer_mapped(bh);
+				/* Only read initialized data blocks. */
+				if (iblock < zblock) {
+					arr[nr++] = bh;
+					continue;
+				}
+				/* Fully non-initialized data block, zero it. */
+				goto handle_zblock;
+			}
+			/* It is a hole, need to zero it. */
+			if (lcn == LCN_HOLE)
+				goto handle_hole;
+			/* If first try and runlist unmapped, map and retry. */
+			if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
+				int err;
+				is_retry = TRUE;
+				/*
+				 * Attempt to map runlist, dropping lock for
+				 * the duration.
+				 */
+				up_read(&ni->runlist.lock);
+				err = ntfs_map_runlist(ni, vcn);
+				if (likely(!err))
+					goto lock_retry_remap;
+				rl = NULL;
+				lcn = err;
+			}
+			/* Hard error, zero out region. */
+			bh->b_blocknr = -1;
+			SetPageError(page);
+			ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
+					"attribute type 0x%x, vcn 0x%llx, "
+					"offset 0x%x because its location on "
+					"disk could not be determined%s "
+					"(error code %lli).", ni->mft_no,
+					ni->type, (unsigned long long)vcn,
+					vcn_ofs, is_retry ? " even after "
+					"retrying" : "", (long long)lcn);
+		}
+		/*
+		 * Either iblock was outside lblock limits or
+		 * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion
+		 * of the page and set the buffer uptodate.
+		 */
+handle_hole:
+		bh->b_blocknr = -1UL;
+		clear_buffer_mapped(bh);
+handle_zblock:
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + i * blocksize, 0, blocksize);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		set_buffer_uptodate(bh);
+	} while (i++, iblock++, (bh = bh->b_this_page) != head);
+
+	/* Release the lock if we took it. */
+	if (rl)
+		up_read(&ni->runlist.lock);
+
+	/* Check we have at least one buffer ready for i/o. */
+	if (nr) {
+		struct buffer_head *tbh;
+
+		/* Lock the buffers. */
+		for (i = 0; i < nr; i++) {
+			tbh = arr[i];
+			lock_buffer(tbh);
+			tbh->b_end_io = ntfs_end_buffer_async_read;
+			set_buffer_async_read(tbh);
+		}
+		/* Finally, start i/o on the buffers. */
+		for (i = 0; i < nr; i++) {
+			tbh = arr[i];
+			if (likely(!buffer_uptodate(tbh)))
+				submit_bh(READ, tbh);
+			else
+				ntfs_end_buffer_async_read(tbh, 1);
+		}
+		return 0;
+	}
+	/* No i/o was scheduled on any of the buffers. */
+	if (likely(!PageError(page)))
+		SetPageUptodate(page);
+	else /* Signal synchronous i/o error. */
+		nr = -EIO;
+	unlock_page(page);
+	return nr;
+}
+
+/**
+ * ntfs_readpage - fill a @page of a @file with data from the device
+ * @file:	open file to which the page @page belongs or NULL
+ * @page:	page cache page to fill with data
+ *
+ * For non-resident attributes, ntfs_readpage() fills the @page of the open
+ * file @file by calling the ntfs version of the generic block_read_full_page()
+ * function, ntfs_read_block(), which in turn creates and reads in the buffers
+ * associated with the page asynchronously.
+ *
+ * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
+ * data from the mft record (which at this stage is most likely in memory) and
+ * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
+ * even if the mft record is not cached at this point in time, we need to wait
+ * for it to be read in before we can do the copy.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_readpage(struct file *file, struct page *page)
+{
+	loff_t i_size;
+	ntfs_inode *ni, *base_ni;
+	u8 *kaddr;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *mrec;
+	u32 attr_len;
+	int err = 0;
+
+	BUG_ON(!PageLocked(page));
+	/*
+	 * This can potentially happen because we clear PageUptodate() during
+	 * ntfs_writepage() of MstProtected() attributes.
+	 */
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		return 0;
+	}
+	ni = NTFS_I(page->mapping->host);
+
+	/* NInoNonResident() == NInoIndexAllocPresent() */
+	if (NInoNonResident(ni)) {
+		/*
+		 * Only unnamed $DATA attributes can be compressed or
+		 * encrypted.
+		 */
+		if (ni->type == AT_DATA && !ni->name_len) {
+			/* If file is encrypted, deny access, just like NT4. */
+			if (NInoEncrypted(ni)) {
+				err = -EACCES;
+				goto err_out;
+			}
+			/* Compressed data streams are handled in compress.c. */
+			if (NInoCompressed(ni))
+				return ntfs_read_compressed_block(page);
+		}
+		/* Normal data stream. */
+		return ntfs_read_block(page);
+	}
+	/*
+	 * Attribute is resident, implying it is not compressed or encrypted.
+	 * This also means the attribute is smaller than an mft record and
+	 * hence smaller than a page, so can simply zero out any pages with
+	 * index above 0.  We can also do this if the file size is 0.
+	 */
+	if (unlikely(page->index > 0 || !i_size_read(VFS_I(ni)))) {
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr, 0, PAGE_CACHE_SIZE);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		goto done;
+	}
+	if (!NInoAttr(ni))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+	/* Map, pin, and lock the mft record. */
+	mrec = map_mft_record(base_ni);
+	if (IS_ERR(mrec)) {
+		err = PTR_ERR(mrec);
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto unm_err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err))
+		goto put_unm_err_out;
+	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
+	i_size = i_size_read(VFS_I(ni));
+	if (unlikely(attr_len > i_size))
+		attr_len = i_size;
+	kaddr = kmap_atomic(page, KM_USER0);
+	/* Copy the data to the page. */
+	memcpy(kaddr, (u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset),
+			attr_len);
+	/* Zero the remainder of the page. */
+	memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+put_unm_err_out:
+	ntfs_attr_put_search_ctx(ctx);
+unm_err_out:
+	unmap_mft_record(base_ni);
+done:
+	SetPageUptodate(page);
+err_out:
+	unlock_page(page);
+	return err;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_write_block - write a @page to the backing store
+ * @page:	page cache page to write out
+ * @wbc:	writeback control structure
+ *
+ * This function is for writing pages belonging to non-resident, non-mst
+ * protected attributes to their backing store.
+ *
+ * For a page with buffers, map and write the dirty buffers asynchronously
+ * under page writeback. For a page without buffers, create buffers for the
+ * page, then proceed as above.
+ *
+ * If a page doesn't have buffers the page dirty state is definitive. If a page
+ * does have buffers, the page dirty state is just a hint, and the buffer dirty
+ * state is definitive. (A hint which has rules: dirty buffers against a clean
+ * page is illegal. Other combinations are legal and need to be handled. In
+ * particular a dirty page containing clean buffers for example.)
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Based on ntfs_read_block() and __block_write_full_page().
+ */
+static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
+{
+	VCN vcn;
+	LCN lcn;
+	sector_t block, dblock, iblock;
+	struct inode *vi;
+	ntfs_inode *ni;
+	ntfs_volume *vol;
+	runlist_element *rl;
+	struct buffer_head *bh, *head;
+	unsigned int blocksize, vcn_ofs;
+	int err;
+	BOOL need_end_writeback;
+	unsigned char blocksize_bits;
+
+	vi = page->mapping->host;
+	ni = NTFS_I(vi);
+	vol = ni->vol;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx.", ni->mft_no, ni->type, page->index);
+
+	BUG_ON(!NInoNonResident(ni));
+	BUG_ON(NInoMstProtected(ni));
+
+	blocksize_bits = vi->i_blkbits;
+	blocksize = 1 << blocksize_bits;
+
+	if (!page_has_buffers(page)) {
+		BUG_ON(!PageUptodate(page));
+		create_empty_buffers(page, blocksize,
+				(1 << BH_Uptodate) | (1 << BH_Dirty));
+	}
+	bh = head = page_buffers(page);
+	if (unlikely(!bh)) {
+		ntfs_warning(vol->sb, "Error allocating page buffers. "
+				"Redirtying page so we try again later.");
+		/*
+		 * Put the page back on mapping->dirty_pages, but leave its
+		 * buffer's dirty state as-is.
+		 */
+		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
+		return 0;
+	}
+
+	/* NOTE: Different naming scheme to ntfs_read_block()! */
+
+	/* The first block in the page. */
+	block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+
+	/* The first out of bounds block for the data size. */
+	dblock = (vi->i_size + blocksize - 1) >> blocksize_bits;
+
+	/* The last (fully or partially) initialized block. */
+	iblock = ni->initialized_size >> blocksize_bits;
+
+	/*
+	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
+	 * here, and the (potentially unmapped) buffers may become dirty at
+	 * any time.  If a buffer becomes dirty here after we've inspected it
+	 * then we just miss that fact, and the page stays dirty.
+	 *
+	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
+	 * handle that here by just cleaning them.
+	 */
+
+	/*
+	 * Loop through all the buffers in the page, mapping all the dirty
+	 * buffers to disk addresses and handling any aliases from the
+	 * underlying block device's mapping.
+	 */
+	rl = NULL;
+	err = 0;
+	do {
+		BOOL is_retry = FALSE;
+
+		if (unlikely(block >= dblock)) {
+			/*
+			 * Mapped buffers outside i_size will occur, because
+			 * this page can be outside i_size when there is a
+			 * truncate in progress. The contents of such buffers
+			 * were zeroed by ntfs_writepage().
+			 *
+			 * FIXME: What about the small race window where
+			 * ntfs_writepage() has not done any clearing because
+			 * the page was within i_size but before we get here,
+			 * vmtruncate() modifies i_size?
+			 */
+			clear_buffer_dirty(bh);
+			set_buffer_uptodate(bh);
+			continue;
+		}
+
+		/* Clean buffers are not written out, so no need to map them. */
+		if (!buffer_dirty(bh))
+			continue;
+
+		/* Make sure we have enough initialized size. */
+		if (unlikely((block >= iblock) &&
+				(ni->initialized_size < vi->i_size))) {
+			/*
+			 * If this page is fully outside initialized size, zero
+			 * out all pages between the current initialized size
+			 * and the current page. Just use ntfs_readpage() to do
+			 * the zeroing transparently.
+			 */
+			if (block > iblock) {
+				// TODO:
+				// For each page do:
+				// - read_cache_page()
+				// Again for each page do:
+				// - wait_on_page_locked()
+				// - Check (PageUptodate(page) &&
+				//			!PageError(page))
+				// Update initialized size in the attribute and
+				// in the inode.
+				// Again, for each page do:
+				//	__set_page_dirty_buffers();
+				// page_cache_release()
+				// We don't need to wait on the writes.
+				// Update iblock.
+			}
+			/*
+			 * The current page straddles initialized size. Zero
+			 * all non-uptodate buffers and set them uptodate (and
+			 * dirty?). Note, there aren't any non-uptodate buffers
+			 * if the page is uptodate.
+			 * FIXME: For an uptodate page, the buffers may need to
+			 * be written out because they were not initialized on
+			 * disk before.
+			 */
+			if (!PageUptodate(page)) {
+				// TODO:
+				// Zero any non-uptodate buffers up to i_size.
+				// Set them uptodate and dirty.
+			}
+			// TODO:
+			// Update initialized size in the attribute and in the
+			// inode (up to i_size).
+			// Update iblock.
+			// FIXME: This is inefficient. Try to batch the two
+			// size changes to happen in one go.
+			ntfs_error(vol->sb, "Writing beyond initialized size "
+					"is not supported yet. Sorry.");
+			err = -EOPNOTSUPP;
+			break;
+			// Do NOT set_buffer_new() BUT DO clear buffer range
+			// outside write request range.
+			// set_buffer_uptodate() on complete buffers as well as
+			// set_buffer_dirty().
+		}
+
+		/* No need to map buffers that are already mapped. */
+		if (buffer_mapped(bh))
+			continue;
+
+		/* Unmapped, dirty buffer. Need to map it. */
+		bh->b_bdev = vol->sb->s_bdev;
+
+		/* Convert block into corresponding vcn and offset. */
+		vcn = (VCN)block << blocksize_bits;
+		vcn_ofs = vcn & vol->cluster_size_mask;
+		vcn >>= vol->cluster_size_bits;
+		if (!rl) {
+lock_retry_remap:
+			down_read(&ni->runlist.lock);
+			rl = ni->runlist.rl;
+		}
+		if (likely(rl != NULL)) {
+			/* Seek to element containing target vcn. */
+			while (rl->length && rl[1].vcn <= vcn)
+				rl++;
+			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+		} else
+			lcn = LCN_RL_NOT_MAPPED;
+		/* Successful remap. */
+		if (lcn >= 0) {
+			/* Setup buffer head to point to correct block. */
+			bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
+					vcn_ofs) >> blocksize_bits;
+			set_buffer_mapped(bh);
+			continue;
+		}
+		/* It is a hole, need to instantiate it. */
+		if (lcn == LCN_HOLE) {
+			// TODO: Instantiate the hole.
+			// clear_buffer_new(bh);
+			// unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			ntfs_error(vol->sb, "Writing into sparse regions is "
+					"not supported yet. Sorry.");
+			err = -EOPNOTSUPP;
+			break;
+		}
+		/* If first try and runlist unmapped, map and retry. */
+		if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
+			is_retry = TRUE;
+			/*
+			 * Attempt to map runlist, dropping lock for
+			 * the duration.
+			 */
+			up_read(&ni->runlist.lock);
+			err = ntfs_map_runlist(ni, vcn);
+			if (likely(!err))
+				goto lock_retry_remap;
+			rl = NULL;
+			lcn = err;
+		}
+		/* Failed to map the buffer, even after retrying. */
+		bh->b_blocknr = -1;
+		ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
+				"attribute type 0x%x, vcn 0x%llx, offset 0x%x "
+				"because its location on disk could not be "
+				"determined%s (error code %lli).", ni->mft_no,
+				ni->type, (unsigned long long)vcn,
+				vcn_ofs, is_retry ? " even after "
+				"retrying" : "", (long long)lcn);
+		if (!err)
+			err = -EIO;
+		break;
+	} while (block++, (bh = bh->b_this_page) != head);
+
+	/* Release the lock if we took it. */
+	if (rl)
+		up_read(&ni->runlist.lock);
+
+	/* For the error case, need to reset bh to the beginning. */
+	bh = head;
+
+	/* Just an optimization, so ->readpage() isn't called later. */
+	if (unlikely(!PageUptodate(page))) {
+		int uptodate = 1;
+		do {
+			if (!buffer_uptodate(bh)) {
+				uptodate = 0;
+				bh = head;
+				break;
+			}
+		} while ((bh = bh->b_this_page) != head);
+		if (uptodate)
+			SetPageUptodate(page);
+	}
+
+	/* Setup all mapped, dirty buffers for async write i/o. */
+	do {
+		get_bh(bh);
+		if (buffer_mapped(bh) && buffer_dirty(bh)) {
+			lock_buffer(bh);
+			if (test_clear_buffer_dirty(bh)) {
+				BUG_ON(!buffer_uptodate(bh));
+				mark_buffer_async_write(bh);
+			} else
+				unlock_buffer(bh);
+		} else if (unlikely(err)) {
+			/*
+			 * For the error case. The buffer may have been set
+			 * dirty during attachment to a dirty page.
+			 */
+			if (err != -ENOMEM)
+				clear_buffer_dirty(bh);
+		}
+	} while ((bh = bh->b_this_page) != head);
+
+	if (unlikely(err)) {
+		// TODO: Remove the -EOPNOTSUPP check later on...
+		if (unlikely(err == -EOPNOTSUPP))
+			err = 0;
+		else if (err == -ENOMEM) {
+			ntfs_warning(vol->sb, "Error allocating memory. "
+					"Redirtying page so we try again "
+					"later.");
+			/*
+			 * Put the page back on mapping->dirty_pages, but
+			 * leave its buffer's dirty state as-is.
+			 */
+			redirty_page_for_writepage(wbc, page);
+			err = 0;
+		} else
+			SetPageError(page);
+	}
+
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);	/* Keeps try_to_free_buffers() away. */
+	unlock_page(page);
+
+	/*
+	 * Submit the prepared buffers for i/o. Note the page is unlocked,
+	 * and the async write i/o completion handler can end_page_writeback()
+	 * at any time after the *first* submit_bh(). So the buffers can then
+	 * disappear...
+	 */
+	need_end_writeback = TRUE;
+	do {
+		struct buffer_head *next = bh->b_this_page;
+		if (buffer_async_write(bh)) {
+			submit_bh(WRITE, bh);
+			need_end_writeback = FALSE;
+		}
+		put_bh(bh);
+		bh = next;
+	} while (bh != head);
+
+	/* If no i/o was started, need to end_page_writeback(). */
+	if (unlikely(need_end_writeback))
+		end_page_writeback(page);
+
+	ntfs_debug("Done.");
+	return err;
+}
+
+/**
+ * ntfs_write_mst_block - write a @page to the backing store
+ * @page:	page cache page to write out
+ * @wbc:	writeback control structure
+ *
+ * This function is for writing pages belonging to non-resident, mst protected
+ * attributes to their backing store.  The only supported attributes are index
+ * allocation and $MFT/$DATA.  Both directory inodes and index inodes are
+ * supported for the index allocation case.
+ *
+ * The page must remain locked for the duration of the write because we apply
+ * the mst fixups, write, and then undo the fixups, so if we were to unlock the
+ * page before undoing the fixups, any other user of the page will see the
+ * page contents as corrupt.
+ *
+ * We clear the page uptodate flag for the duration of the function to ensure
+ * exclusion for the $MFT/$DATA case against someone mapping an mft record we
+ * are about to apply the mst fixups to.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Based on ntfs_write_block(), ntfs_mft_writepage(), and
+ * write_mft_record_nolock().
+ */
+static int ntfs_write_mst_block(struct page *page,
+		struct writeback_control *wbc)
+{
+	sector_t block, dblock, rec_block;
+	struct inode *vi = page->mapping->host;
+	ntfs_inode *ni = NTFS_I(vi);
+	ntfs_volume *vol = ni->vol;
+	u8 *kaddr;
+	unsigned char bh_size_bits = vi->i_blkbits;
+	unsigned int bh_size = 1 << bh_size_bits;
+	unsigned int rec_size = ni->itype.index.block_size;
+	ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
+	struct buffer_head *bh, *head, *tbh, *rec_start_bh;
+	int max_bhs = PAGE_CACHE_SIZE / bh_size;
+	struct buffer_head *bhs[max_bhs];
+	runlist_element *rl;
+	int i, nr_locked_nis, nr_recs, nr_bhs, bhs_per_rec, err, err2;
+	unsigned rec_size_bits;
+	BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx.", vi->i_ino, ni->type, page->index);
+	BUG_ON(!NInoNonResident(ni));
+	BUG_ON(!NInoMstProtected(ni));
+	is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
+	/*
+	 * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
+	 * in its page cache were to be marked dirty.  However this should
+	 * never happen with the current driver and considering we do not
+	 * handle this case here we do want to BUG(), at least for now.
+	 */
+	BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
+			(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
+	BUG_ON(!max_bhs);
+
+	/* Were we called for sync purposes? */
+	sync = (wbc->sync_mode == WB_SYNC_ALL);
+
+	/* Make sure we have mapped buffers. */
+	BUG_ON(!page_has_buffers(page));
+	bh = head = page_buffers(page);
+	BUG_ON(!bh);
+
+	rec_size_bits = ni->itype.index.block_size_bits;
+	BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
+	bhs_per_rec = rec_size >> bh_size_bits;
+	BUG_ON(!bhs_per_rec);
+
+	/* The first block in the page. */
+	rec_block = block = (sector_t)page->index <<
+			(PAGE_CACHE_SHIFT - bh_size_bits);
+
+	/* The first out of bounds block for the data size. */
+	dblock = (vi->i_size + bh_size - 1) >> bh_size_bits;
+
+	rl = NULL;
+	err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
+	page_is_dirty = rec_is_dirty = FALSE;
+	rec_start_bh = NULL;
+	do {
+		BOOL is_retry = FALSE;
+
+		if (likely(block < rec_block)) {
+			if (unlikely(block >= dblock)) {
+				clear_buffer_dirty(bh);
+				continue;
+			}
+			/*
+			 * This block is not the first one in the record.  We
+			 * ignore the buffer's dirty state because we could
+			 * have raced with a parallel mark_ntfs_record_dirty().
+			 */
+			if (!rec_is_dirty)
+				continue;
+			if (unlikely(err2)) {
+				if (err2 != -ENOMEM)
+					clear_buffer_dirty(bh);
+				continue;
+			}
+		} else /* if (block == rec_block) */ {
+			BUG_ON(block > rec_block);
+			/* This block is the first one in the record. */
+			rec_block += bhs_per_rec;
+			err2 = 0;
+			if (unlikely(block >= dblock)) {
+				clear_buffer_dirty(bh);
+				continue;
+			}
+			if (!buffer_dirty(bh)) {
+				/* Clean records are not written out. */
+				rec_is_dirty = FALSE;
+				continue;
+			}
+			rec_is_dirty = TRUE;
+			rec_start_bh = bh;
+		}
+		/* Need to map the buffer if it is not mapped already. */
+		if (unlikely(!buffer_mapped(bh))) {
+			VCN vcn;
+			LCN lcn;
+			unsigned int vcn_ofs;
+
+			/* Obtain the vcn and offset of the current block. */
+			vcn = (VCN)block << bh_size_bits;
+			vcn_ofs = vcn & vol->cluster_size_mask;
+			vcn >>= vol->cluster_size_bits;
+			if (!rl) {
+lock_retry_remap:
+				down_read(&ni->runlist.lock);
+				rl = ni->runlist.rl;
+			}
+			if (likely(rl != NULL)) {
+				/* Seek to element containing target vcn. */
+				while (rl->length && rl[1].vcn <= vcn)
+					rl++;
+				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+			} else
+				lcn = LCN_RL_NOT_MAPPED;
+			/* Successful remap. */
+			if (likely(lcn >= 0)) {
+				/* Setup buffer head to correct block. */
+				bh->b_blocknr = ((lcn <<
+						vol->cluster_size_bits) +
+						vcn_ofs) >> bh_size_bits;
+				set_buffer_mapped(bh);
+			} else {
+				/*
+				 * Remap failed.  Retry to map the runlist once
+				 * unless we are working on $MFT which always
+				 * has the whole of its runlist in memory.
+				 */
+				if (!is_mft && !is_retry &&
+						lcn == LCN_RL_NOT_MAPPED) {
+					is_retry = TRUE;
+					/*
+					 * Attempt to map runlist, dropping
+					 * lock for the duration.
+					 */
+					up_read(&ni->runlist.lock);
+					err2 = ntfs_map_runlist(ni, vcn);
+					if (likely(!err2))
+						goto lock_retry_remap;
+					if (err2 == -ENOMEM)
+						page_is_dirty = TRUE;
+					lcn = err2;
+				} else
+					err2 = -EIO;
+				/* Hard error.  Abort writing this record. */
+				if (!err || err == -ENOMEM)
+					err = err2;
+				bh->b_blocknr = -1;
+				ntfs_error(vol->sb, "Cannot write ntfs record "
+						"0x%llx (inode 0x%lx, "
+						"attribute type 0x%x) because "
+						"its location on disk could "
+						"not be determined (error "
+						"code %lli).", (s64)block <<
+						bh_size_bits >>
+						vol->mft_record_size_bits,
+						ni->mft_no, ni->type,
+						(long long)lcn);
+				/*
+				 * If this is not the first buffer, remove the
+				 * buffers in this record from the list of
+				 * buffers to write and clear their dirty bit
+				 * if not error -ENOMEM.
+				 */
+				if (rec_start_bh != bh) {
+					while (bhs[--nr_bhs] != rec_start_bh)
+						;
+					if (err2 != -ENOMEM) {
+						do {
+							clear_buffer_dirty(
+								rec_start_bh);
+						} while ((rec_start_bh =
+								rec_start_bh->
+								b_this_page) !=
+								bh);
+					}
+				}
+				continue;
+			}
+		}
+		BUG_ON(!buffer_uptodate(bh));
+		BUG_ON(nr_bhs >= max_bhs);
+		bhs[nr_bhs++] = bh;
+	} while (block++, (bh = bh->b_this_page) != head);
+	if (unlikely(rl))
+		up_read(&ni->runlist.lock);
+	/* If there were no dirty buffers, we are done. */
+	if (!nr_bhs)
+		goto done;
+	/* Map the page so we can access its contents. */
+	kaddr = kmap(page);
+	/* Clear the page uptodate flag whilst the mst fixups are applied. */
+	BUG_ON(!PageUptodate(page));
+	ClearPageUptodate(page);
+	for (i = 0; i < nr_bhs; i++) {
+		unsigned int ofs;
+
+		/* Skip buffers which are not at the beginning of records. */
+		if (i % bhs_per_rec)
+			continue;
+		tbh = bhs[i];
+		ofs = bh_offset(tbh);
+		if (is_mft) {
+			ntfs_inode *tni;
+			unsigned long mft_no;
+
+			/* Get the mft record number. */
+			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+					>> rec_size_bits;
+			/* Check whether to write this mft record. */
+			tni = NULL;
+			if (!ntfs_may_write_mft_record(vol, mft_no,
+					(MFT_RECORD*)(kaddr + ofs), &tni)) {
+				/*
+				 * The record should not be written.  This
+				 * means we need to redirty the page before
+				 * returning.
+				 */
+				page_is_dirty = TRUE;
+				/*
+				 * Remove the buffers in this mft record from
+				 * the list of buffers to write.
+				 */
+				do {
+					bhs[i] = NULL;
+				} while (++i % bhs_per_rec);
+				continue;
+			}
+			/*
+			 * The record should be written.  If a locked ntfs
+			 * inode was returned, add it to the array of locked
+			 * ntfs inodes.
+			 */
+			if (tni)
+				locked_nis[nr_locked_nis++] = tni;
+		}
+		/* Apply the mst protection fixups. */
+		err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
+				rec_size);
+		if (unlikely(err2)) {
+			if (!err || err == -ENOMEM)
+				err = -EIO;
+			ntfs_error(vol->sb, "Failed to apply mst fixups "
+					"(inode 0x%lx, attribute type 0x%x, "
+					"page index 0x%lx, page offset 0x%x)!"
+					"  Unmount and run chkdsk.", vi->i_ino,
+					ni->type, page->index, ofs);
+			/*
+			 * Mark all the buffers in this record clean as we do
+			 * not want to write corrupt data to disk.
+			 */
+			do {
+				clear_buffer_dirty(bhs[i]);
+				bhs[i] = NULL;
+			} while (++i % bhs_per_rec);
+			continue;
+		}
+		nr_recs++;
+	}
+	/* If no records are to be written out, we are done. */
+	if (!nr_recs)
+		goto unm_done;
+	flush_dcache_page(page);
+	/* Lock buffers and start synchronous write i/o on them. */
+	for (i = 0; i < nr_bhs; i++) {
+		tbh = bhs[i];
+		if (!tbh)
+			continue;
+		if (unlikely(test_set_buffer_locked(tbh)))
+			BUG();
+		/* The buffer dirty state is now irrelevant, just clean it. */
+		clear_buffer_dirty(tbh);
+		BUG_ON(!buffer_uptodate(tbh));
+		BUG_ON(!buffer_mapped(tbh));
+		get_bh(tbh);
+		tbh->b_end_io = end_buffer_write_sync;
+		submit_bh(WRITE, tbh);
+	}
+	/* Synchronize the mft mirror now if not @sync. */
+	if (is_mft && !sync)
+		goto do_mirror;
+do_wait:
+	/* Wait on i/o completion of buffers. */
+	for (i = 0; i < nr_bhs; i++) {
+		tbh = bhs[i];
+		if (!tbh)
+			continue;
+		wait_on_buffer(tbh);
+		if (unlikely(!buffer_uptodate(tbh))) {
+			ntfs_error(vol->sb, "I/O error while writing ntfs "
+					"record buffer (inode 0x%lx, "
+					"attribute type 0x%x, page index "
+					"0x%lx, page offset 0x%lx)!  Unmount "
+					"and run chkdsk.", vi->i_ino, ni->type,
+					page->index, bh_offset(tbh));
+			if (!err || err == -ENOMEM)
+				err = -EIO;
+			/*
+			 * Set the buffer uptodate so the page and buffer
+			 * states do not become out of sync.
+			 */
+			set_buffer_uptodate(tbh);
+		}
+	}
+	/* If @sync, now synchronize the mft mirror. */
+	if (is_mft && sync) {
+do_mirror:
+		for (i = 0; i < nr_bhs; i++) {
+			unsigned long mft_no;
+			unsigned int ofs;
+
+			/*
+			 * Skip buffers which are not at the beginning of
+			 * records.
+			 */
+			if (i % bhs_per_rec)
+				continue;
+			tbh = bhs[i];
+			/* Skip removed buffers (and hence records). */
+			if (!tbh)
+				continue;
+			ofs = bh_offset(tbh);
+			/* Get the mft record number. */
+			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+					>> rec_size_bits;
+			if (mft_no < vol->mftmirr_size)
+				ntfs_sync_mft_mirror(vol, mft_no,
+						(MFT_RECORD*)(kaddr + ofs),
+						sync);
+		}
+		if (!sync)
+			goto do_wait;
+	}
+	/* Remove the mst protection fixups again. */
+	for (i = 0; i < nr_bhs; i++) {
+		if (!(i % bhs_per_rec)) {
+			tbh = bhs[i];
+			if (!tbh)
+				continue;
+			post_write_mst_fixup((NTFS_RECORD*)(kaddr +
+					bh_offset(tbh)));
+		}
+	}
+	flush_dcache_page(page);
+unm_done:
+	/* Unlock any locked inodes. */
+	while (nr_locked_nis-- > 0) {
+		ntfs_inode *tni, *base_tni;
+		
+		tni = locked_nis[nr_locked_nis];
+		/* Get the base inode. */
+		down(&tni->extent_lock);
+		if (tni->nr_extents >= 0)
+			base_tni = tni;
+		else {
+			base_tni = tni->ext.base_ntfs_ino;
+			BUG_ON(!base_tni);
+		}
+		up(&tni->extent_lock);
+		ntfs_debug("Unlocking %s inode 0x%lx.",
+				tni == base_tni ? "base" : "extent",
+				tni->mft_no);
+		up(&tni->mrec_lock);
+		atomic_dec(&tni->count);
+		iput(VFS_I(base_tni));
+	}
+	SetPageUptodate(page);
+	kunmap(page);
+done:
+	if (unlikely(err && err != -ENOMEM)) {
+		/*
+		 * Set page error if there is only one ntfs record in the page.
+		 * Otherwise we would loose per-record granularity.
+		 */
+		if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
+			SetPageError(page);
+		NVolSetErrors(vol);
+	}
+	if (page_is_dirty) {
+		ntfs_debug("Page still contains one or more dirty ntfs "
+				"records.  Redirtying the page starting at "
+				"record 0x%lx.", page->index <<
+				(PAGE_CACHE_SHIFT - rec_size_bits));
+		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
+	} else {
+		/*
+		 * Keep the VM happy.  This must be done otherwise the
+		 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
+		 * the page is clean.
+		 */
+		BUG_ON(PageWriteback(page));
+		set_page_writeback(page);
+		unlock_page(page);
+		end_page_writeback(page);
+	}
+	if (likely(!err))
+		ntfs_debug("Done.");
+	return err;
+}
+
+/**
+ * ntfs_writepage - write a @page to the backing store
+ * @page:	page cache page to write out
+ * @wbc:	writeback control structure
+ *
+ * This is called from the VM when it wants to have a dirty ntfs page cache
+ * page cleaned.  The VM has already locked the page and marked it clean.
+ *
+ * For non-resident attributes, ntfs_writepage() writes the @page by calling
+ * the ntfs version of the generic block_write_full_page() function,
+ * ntfs_write_block(), which in turn if necessary creates and writes the
+ * buffers associated with the page asynchronously.
+ *
+ * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
+ * the data to the mft record (which at this stage is most likely in memory).
+ * The mft record is then marked dirty and written out asynchronously via the
+ * vfs inode dirty code path for the inode the mft record belongs to or via the
+ * vm page dirty code path for the page the mft record is in.
+ *
+ * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	loff_t i_size;
+	struct inode *vi;
+	ntfs_inode *ni, *base_ni;
+	char *kaddr;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *m;
+	u32 attr_len;
+	int err;
+
+	BUG_ON(!PageLocked(page));
+
+	vi = page->mapping->host;
+	i_size = i_size_read(vi);
+
+	/* Is the page fully outside i_size? (truncate in progress) */
+	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
+			PAGE_CACHE_SHIFT)) {
+		/*
+		 * The page may have dirty, unmapped buffers.  Make them
+		 * freeable here, so the page does not leak.
+		 */
+		block_invalidatepage(page, 0);
+		unlock_page(page);
+		ntfs_debug("Write outside i_size - truncated?");
+		return 0;
+	}
+	ni = NTFS_I(vi);
+
+	/* NInoNonResident() == NInoIndexAllocPresent() */
+	if (NInoNonResident(ni)) {
+		/*
+		 * Only unnamed $DATA attributes can be compressed, encrypted,
+		 * and/or sparse.
+		 */
+		if (ni->type == AT_DATA && !ni->name_len) {
+			/* If file is encrypted, deny access, just like NT4. */
+			if (NInoEncrypted(ni)) {
+				unlock_page(page);
+				ntfs_debug("Denying write access to encrypted "
+						"file.");
+				return -EACCES;
+			}
+			/* Compressed data streams are handled in compress.c. */
+			if (NInoCompressed(ni)) {
+				// TODO: Implement and replace this check with
+				// return ntfs_write_compressed_block(page);
+				unlock_page(page);
+				ntfs_error(vi->i_sb, "Writing to compressed "
+						"files is not supported yet. "
+						"Sorry.");
+				return -EOPNOTSUPP;
+			}
+			// TODO: Implement and remove this check.
+			if (NInoSparse(ni)) {
+				unlock_page(page);
+				ntfs_error(vi->i_sb, "Writing to sparse files "
+						"is not supported yet. Sorry.");
+				return -EOPNOTSUPP;
+			}
+		}
+		/* We have to zero every time due to mmap-at-end-of-file. */
+		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
+			/* The page straddles i_size. */
+			unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+		}
+		/* Handle mst protected attributes. */
+		if (NInoMstProtected(ni))
+			return ntfs_write_mst_block(page, wbc);
+		/* Normal data stream. */
+		return ntfs_write_block(page, wbc);
+	}
+	/*
+	 * Attribute is resident, implying it is not compressed, encrypted,
+	 * sparse, or mst protected.  This also means the attribute is smaller
+	 * than an mft record and hence smaller than a page, so can simply
+	 * return error on any pages with index above 0.
+	 */
+	BUG_ON(page_has_buffers(page));
+	BUG_ON(!PageUptodate(page));
+	if (unlikely(page->index > 0)) {
+		ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "
+				"Aborting write.", page->index);
+		BUG_ON(PageWriteback(page));
+		set_page_writeback(page);
+		unlock_page(page);
+		end_page_writeback(page);
+		return -EIO;
+	}
+	if (!NInoAttr(ni))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+	/* Map, pin, and lock the mft record. */
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		m = NULL;
+		ctx = NULL;
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err))
+		goto err_out;
+	/*
+	 * Keep the VM happy.  This must be done otherwise the radix-tree tag
+	 * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
+	 */
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);
+	unlock_page(page);
+
+	/*
+	 * Here, we don't need to zero the out of bounds area everytime because
+	 * the below memcpy() already takes care of the mmap-at-end-of-file
+	 * requirements. If the file is converted to a non-resident one, then
+	 * the code path use is switched to the non-resident one where the
+	 * zeroing happens on each ntfs_writepage() invocation.
+	 *
+	 * The above also applies nicely when i_size is decreased.
+	 *
+	 * When i_size is increased, the memory between the old and new i_size
+	 * _must_ be zeroed (or overwritten with new data). Otherwise we will
+	 * expose data to userspace/disk which should never have been exposed.
+	 *
+	 * FIXME: Ensure that i_size increases do the zeroing/overwriting and
+	 * if we cannot guarantee that, then enable the zeroing below.  If the
+	 * zeroing below is enabled, we MUST move the unlock_page() from above
+	 * to after the kunmap_atomic(), i.e. just before the
+	 * end_page_writeback().
+	 * UPDATE: ntfs_prepare/commit_write() do the zeroing on i_size
+	 * increases for resident attributes so those are ok.
+	 * TODO: ntfs_truncate(), others?
+	 */
+
+	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
+	i_size = i_size_read(VFS_I(ni));
+	kaddr = kmap_atomic(page, KM_USER0);
+	if (unlikely(attr_len > i_size)) {
+		/* Zero out of bounds area in the mft record. */
+		memset((u8*)ctx->attr + le16_to_cpu(
+				ctx->attr->data.resident.value_offset) +
+				i_size, 0, attr_len - i_size);
+		attr_len = i_size;
+	}
+	/* Copy the data from the page to the mft record. */
+	memcpy((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset),
+			kaddr, attr_len);
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	/* Zero out of bounds area in the page cache page. */
+	memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+
+	end_page_writeback(page);
+
+	/* Mark the mft record dirty, so it gets written back. */
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+	return 0;
+err_out:
+	if (err == -ENOMEM) {
+		ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
+				"page so we try again later.");
+		/*
+		 * Put the page back on mapping->dirty_pages, but leave its
+		 * buffers' dirty state as-is.
+		 */
+		redirty_page_for_writepage(wbc, page);
+		err = 0;
+	} else {
+		ntfs_error(vi->i_sb, "Resident attribute write failed with "
+				"error %i.  Setting page error flag.", err);
+		SetPageError(page);
+	}
+	unlock_page(page);
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(base_ni);
+	return err;
+}
+
+/**
+ * ntfs_prepare_nonresident_write -
+ *
+ */
+static int ntfs_prepare_nonresident_write(struct page *page,
+		unsigned from, unsigned to)
+{
+	VCN vcn;
+	LCN lcn;
+	sector_t block, ablock, iblock;
+	struct inode *vi;
+	ntfs_inode *ni;
+	ntfs_volume *vol;
+	runlist_element *rl;
+	struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
+	unsigned int vcn_ofs, block_start, block_end, blocksize;
+	int err;
+	BOOL is_retry;
+	unsigned char blocksize_bits;
+
+	vi = page->mapping->host;
+	ni = NTFS_I(vi);
+	vol = ni->vol;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx, from = %u, to = %u.", ni->mft_no, ni->type,
+			page->index, from, to);
+
+	BUG_ON(!NInoNonResident(ni));
+
+	blocksize_bits = vi->i_blkbits;
+	blocksize = 1 << blocksize_bits;
+
+	/*
+	 * create_empty_buffers() will create uptodate/dirty buffers if the
+	 * page is uptodate/dirty.
+	 */
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+	bh = head = page_buffers(page);
+	if (unlikely(!bh))
+		return -ENOMEM;
+
+	/* The first block in the page. */
+	block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+
+	/*
+	 * The first out of bounds block for the allocated size. No need to
+	 * round up as allocated_size is in multiples of cluster size and the
+	 * minimum cluster size is 512 bytes, which is equal to the smallest
+	 * blocksize.
+	 */
+	ablock = ni->allocated_size >> blocksize_bits;
+
+	/* The last (fully or partially) initialized block. */
+	iblock = ni->initialized_size >> blocksize_bits;
+
+	/* Loop through all the buffers in the page. */
+	block_start = 0;
+	rl = NULL;
+	err = 0;
+	do {
+		block_end = block_start + blocksize;
+		/*
+		 * If buffer @bh is outside the write, just mark it uptodate
+		 * if the page is uptodate and continue with the next buffer.
+		 */
+		if (block_end <= from || block_start >= to) {
+			if (PageUptodate(page)) {
+				if (!buffer_uptodate(bh))
+					set_buffer_uptodate(bh);
+			}
+			continue;
+		}
+		/*
+		 * @bh is at least partially being written to.
+		 * Make sure it is not marked as new.
+		 */
+		//if (buffer_new(bh))
+		//	clear_buffer_new(bh);
+
+		if (block >= ablock) {
+			// TODO: block is above allocated_size, need to
+			// allocate it. Best done in one go to accommodate not
+			// only block but all above blocks up to and including:
+			// ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
+			// - 1) >> blobksize_bits. Obviously will need to round
+			// up to next cluster boundary, too. This should be
+			// done with a helper function, so it can be reused.
+			ntfs_error(vol->sb, "Writing beyond allocated size "
+					"is not supported yet. Sorry.");
+			err = -EOPNOTSUPP;
+			goto err_out;
+			// Need to update ablock.
+			// Need to set_buffer_new() on all block bhs that are
+			// newly allocated.
+		}
+		/*
+		 * Now we have enough allocated size to fulfill the whole
+		 * request, i.e. block < ablock is true.
+		 */
+		if (unlikely((block >= iblock) &&
+				(ni->initialized_size < vi->i_size))) {
+			/*
+			 * If this page is fully outside initialized size, zero
+			 * out all pages between the current initialized size
+			 * and the current page. Just use ntfs_readpage() to do
+			 * the zeroing transparently.
+			 */
+			if (block > iblock) {
+				// TODO:
+				// For each page do:
+				// - read_cache_page()
+				// Again for each page do:
+				// - wait_on_page_locked()
+				// - Check (PageUptodate(page) &&
+				//			!PageError(page))
+				// Update initialized size in the attribute and
+				// in the inode.
+				// Again, for each page do:
+				//	__set_page_dirty_buffers();
+				// page_cache_release()
+				// We don't need to wait on the writes.
+				// Update iblock.
+			}
+			/*
+			 * The current page straddles initialized size. Zero
+			 * all non-uptodate buffers and set them uptodate (and
+			 * dirty?). Note, there aren't any non-uptodate buffers
+			 * if the page is uptodate.
+			 * FIXME: For an uptodate page, the buffers may need to
+			 * be written out because they were not initialized on
+			 * disk before.
+			 */
+			if (!PageUptodate(page)) {
+				// TODO:
+				// Zero any non-uptodate buffers up to i_size.
+				// Set them uptodate and dirty.
+			}
+			// TODO:
+			// Update initialized size in the attribute and in the
+			// inode (up to i_size).
+			// Update iblock.
+			// FIXME: This is inefficient. Try to batch the two
+			// size changes to happen in one go.
+			ntfs_error(vol->sb, "Writing beyond initialized size "
+					"is not supported yet. Sorry.");
+			err = -EOPNOTSUPP;
+			goto err_out;
+			// Do NOT set_buffer_new() BUT DO clear buffer range
+			// outside write request range.
+			// set_buffer_uptodate() on complete buffers as well as
+			// set_buffer_dirty().
+		}
+
+		/* Need to map unmapped buffers. */
+		if (!buffer_mapped(bh)) {
+			/* Unmapped buffer. Need to map it. */
+			bh->b_bdev = vol->sb->s_bdev;
+
+			/* Convert block into corresponding vcn and offset. */
+			vcn = (VCN)block << blocksize_bits >>
+					vol->cluster_size_bits;
+			vcn_ofs = ((VCN)block << blocksize_bits) &
+					vol->cluster_size_mask;
+
+			is_retry = FALSE;
+			if (!rl) {
+lock_retry_remap:
+				down_read(&ni->runlist.lock);
+				rl = ni->runlist.rl;
+			}
+			if (likely(rl != NULL)) {
+				/* Seek to element containing target vcn. */
+				while (rl->length && rl[1].vcn <= vcn)
+					rl++;
+				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+			} else
+				lcn = LCN_RL_NOT_MAPPED;
+			if (unlikely(lcn < 0)) {
+				/*
+				 * We extended the attribute allocation above.
+				 * If we hit an ENOENT here it means that the
+				 * allocation was insufficient which is a bug.
+				 */
+				BUG_ON(lcn == LCN_ENOENT);
+
+				/* It is a hole, need to instantiate it. */
+				if (lcn == LCN_HOLE) {
+					// TODO: Instantiate the hole.
+					// clear_buffer_new(bh);
+					// unmap_underlying_metadata(bh->b_bdev,
+					//		bh->b_blocknr);
+					// For non-uptodate buffers, need to
+					// zero out the region outside the
+					// request in this bh or all bhs,
+					// depending on what we implemented
+					// above.
+					// Need to flush_dcache_page().
+					// Or could use set_buffer_new()
+					// instead?
+					ntfs_error(vol->sb, "Writing into "
+							"sparse regions is "
+							"not supported yet. "
+							"Sorry.");
+					err = -EOPNOTSUPP;
+					goto err_out;
+				} else if (!is_retry &&
+						lcn == LCN_RL_NOT_MAPPED) {
+					is_retry = TRUE;
+					/*
+					 * Attempt to map runlist, dropping
+					 * lock for the duration.
+					 */
+					up_read(&ni->runlist.lock);
+					err = ntfs_map_runlist(ni, vcn);
+					if (likely(!err))
+						goto lock_retry_remap;
+					rl = NULL;
+					lcn = err;
+				}
+				/*
+				 * Failed to map the buffer, even after
+				 * retrying.
+				 */
+				bh->b_blocknr = -1;
+				ntfs_error(vol->sb, "Failed to write to inode "
+						"0x%lx, attribute type 0x%x, "
+						"vcn 0x%llx, offset 0x%x "
+						"because its location on disk "
+						"could not be determined%s "
+						"(error code %lli).",
+						ni->mft_no, ni->type,
+						(unsigned long long)vcn,
+						vcn_ofs, is_retry ? " even "
+						"after retrying" : "",
+						(long long)lcn);
+				if (!err)
+					err = -EIO;
+				goto err_out;
+			}
+			/* We now have a successful remap, i.e. lcn >= 0. */
+
+			/* Setup buffer head to correct block. */
+			bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+					+ vcn_ofs) >> blocksize_bits;
+			set_buffer_mapped(bh);
+
+			// FIXME: Something analogous to this is needed for
+			// each newly allocated block, i.e. BH_New.
+			// FIXME: Might need to take this out of the
+			// if (!buffer_mapped(bh)) {}, depending on how we
+			// implement things during the allocated_size and
+			// initialized_size extension code above.
+			if (buffer_new(bh)) {
+				clear_buffer_new(bh);
+				unmap_underlying_metadata(bh->b_bdev,
+						bh->b_blocknr);
+				if (PageUptodate(page)) {
+					set_buffer_uptodate(bh);
+					continue;
+				}
+				/*
+				 * Page is _not_ uptodate, zero surrounding
+				 * region. NOTE: This is how we decide if to
+				 * zero or not!
+				 */
+				if (block_end > to || block_start < from) {
+					void *kaddr;
+
+					kaddr = kmap_atomic(page, KM_USER0);
+					if (block_end > to)
+						memset(kaddr + to, 0,
+								block_end - to);
+					if (block_start < from)
+						memset(kaddr + block_start, 0,
+								from -
+								block_start);
+					flush_dcache_page(page);
+					kunmap_atomic(kaddr, KM_USER0);
+				}
+				continue;
+			}
+		}
+		/* @bh is mapped, set it uptodate if the page is uptodate. */
+		if (PageUptodate(page)) {
+			if (!buffer_uptodate(bh))
+				set_buffer_uptodate(bh);
+			continue;
+		}
+		/*
+		 * The page is not uptodate. The buffer is mapped. If it is not
+		 * uptodate, and it is only partially being written to, we need
+		 * to read the buffer in before the write, i.e. right now.
+		 */
+		if (!buffer_uptodate(bh) &&
+				(block_start < from || block_end > to)) {
+			ll_rw_block(READ, 1, &bh);
+			*wait_bh++ = bh;
+		}
+	} while (block++, block_start = block_end,
+			(bh = bh->b_this_page) != head);
+
+	/* Release the lock if we took it. */
+	if (rl) {
+		up_read(&ni->runlist.lock);
+		rl = NULL;
+	}
+
+	/* If we issued read requests, let them complete. */
+	while (wait_bh > wait) {
+		wait_on_buffer(*--wait_bh);
+		if (!buffer_uptodate(*wait_bh))
+			return -EIO;
+	}
+
+	ntfs_debug("Done.");
+	return 0;
+err_out:
+	/*
+	 * Zero out any newly allocated blocks to avoid exposing stale data.
+	 * If BH_New is set, we know that the block was newly allocated in the
+	 * above loop.
+	 * FIXME: What about initialized_size increments? Have we done all the
+	 * required zeroing above? If not this error handling is broken, and
+	 * in particular the if (block_end <= from) check is completely bogus.
+	 */
+	bh = head;
+	block_start = 0;
+	is_retry = FALSE;
+	do {
+		block_end = block_start + blocksize;
+		if (block_end <= from)
+			continue;
+		if (block_start >= to)
+			break;
+		if (buffer_new(bh)) {
+			void *kaddr;
+
+			clear_buffer_new(bh);
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr + block_start, 0, bh->b_size);
+			kunmap_atomic(kaddr, KM_USER0);
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+			is_retry = TRUE;
+		}
+	} while (block_start = block_end, (bh = bh->b_this_page) != head);
+	if (is_retry)
+		flush_dcache_page(page);
+	if (rl)
+		up_read(&ni->runlist.lock);
+	return err;
+}
+
+/**
+ * ntfs_prepare_write - prepare a page for receiving data
+ *
+ * This is called from generic_file_write() with i_sem held on the inode
+ * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
+ * data has not yet been copied into the @page.
+ *
+ * Need to extend the attribute/fill in holes if necessary, create blocks and
+ * make partially overwritten blocks uptodate,
+ *
+ * i_size is not to be modified yet.
+ *
+ * Return 0 on success or -errno on error.
+ *
+ * Should be using block_prepare_write() [support for sparse files] or
+ * cont_prepare_write() [no support for sparse files].  Cannot do that due to
+ * ntfs specifics but can look at them for implementation guidance.
+ *
+ * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
+ * the first byte in the page that will be written to and @to is the first byte
+ * after the last byte that will be written to.
+ */
+static int ntfs_prepare_write(struct file *file, struct page *page,
+		unsigned from, unsigned to)
+{
+	s64 new_size;
+	struct inode *vi = page->mapping->host;
+	ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
+	ntfs_volume *vol = ni->vol;
+	ntfs_attr_search_ctx *ctx = NULL;
+	MFT_RECORD *m = NULL;
+	ATTR_RECORD *a;
+	u8 *kaddr;
+	u32 attr_len;
+	int err;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
+			page->index, from, to);
+	BUG_ON(!PageLocked(page));
+	BUG_ON(from > PAGE_CACHE_SIZE);
+	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(from > to);
+	BUG_ON(NInoMstProtected(ni));
+	/*
+	 * If a previous ntfs_truncate() failed, repeat it and abort if it
+	 * fails again.
+	 */
+	if (unlikely(NInoTruncateFailed(ni))) {
+		down_write(&vi->i_alloc_sem);
+		err = ntfs_truncate(vi);
+		up_write(&vi->i_alloc_sem);
+		if (err || NInoTruncateFailed(ni)) {
+			if (!err)
+				err = -EIO;
+			goto err_out;
+		}
+	}
+	/* If the attribute is not resident, deal with it elsewhere. */
+	if (NInoNonResident(ni)) {
+		/*
+		 * Only unnamed $DATA attributes can be compressed, encrypted,
+		 * and/or sparse.
+		 */
+		if (ni->type == AT_DATA && !ni->name_len) {
+			/* If file is encrypted, deny access, just like NT4. */
+			if (NInoEncrypted(ni)) {
+				ntfs_debug("Denying write access to encrypted "
+						"file.");
+				return -EACCES;
+			}
+			/* Compressed data streams are handled in compress.c. */
+			if (NInoCompressed(ni)) {
+				// TODO: Implement and replace this check with
+				// return ntfs_write_compressed_block(page);
+				ntfs_error(vi->i_sb, "Writing to compressed "
+						"files is not supported yet. "
+						"Sorry.");
+				return -EOPNOTSUPP;
+			}
+			// TODO: Implement and remove this check.
+			if (NInoSparse(ni)) {
+				ntfs_error(vi->i_sb, "Writing to sparse files "
+						"is not supported yet. Sorry.");
+				return -EOPNOTSUPP;
+			}
+		}
+		/* Normal data stream. */
+		return ntfs_prepare_nonresident_write(page, from, to);
+	}
+	/*
+	 * Attribute is resident, implying it is not compressed, encrypted, or
+	 * sparse.
+	 */
+	BUG_ON(page_has_buffers(page));
+	new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
+	/* If we do not need to resize the attribute allocation we are done. */
+	if (new_size <= vi->i_size)
+		goto done;
+
+	// FIXME: We abort for now as this code is not safe.
+	ntfs_error(vi->i_sb, "Changing the file size is not supported yet.  "
+			"Sorry.");
+	return -EOPNOTSUPP;
+
+	/* Map, pin, and lock the (base) mft record. */
+	if (!NInoAttr(ni))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		m = NULL;
+		ctx = NULL;
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT)
+			err = -EIO;
+		goto err_out;
+	}
+	m = ctx->mrec;
+	a = ctx->attr;
+	/* The total length of the attribute value. */
+	attr_len = le32_to_cpu(a->data.resident.value_length);
+	BUG_ON(vi->i_size != attr_len);
+	/* Check if new size is allowed in $AttrDef. */
+	err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
+	if (unlikely(err)) {
+		if (err == -ERANGE) {
+			ntfs_error(vol->sb, "Write would cause the inode "
+					"0x%lx to exceed the maximum size for "
+					"its attribute type (0x%x).  Aborting "
+					"write.", vi->i_ino,
+					le32_to_cpu(ni->type));
+		} else {
+			ntfs_error(vol->sb, "Inode 0x%lx has unknown "
+					"attribute type 0x%x.  Aborting "
+					"write.", vi->i_ino,
+					le32_to_cpu(ni->type));
+			err = -EIO;
+		}
+		goto err_out2;
+	}
+	/*
+	 * Extend the attribute record to be able to store the new attribute
+	 * size.
+	 */
+	if (new_size >= vol->mft_record_size || ntfs_attr_record_resize(m, a,
+			le16_to_cpu(a->data.resident.value_offset) +
+			new_size)) {
+		/* Not enough space in the mft record. */
+		ntfs_error(vol->sb, "Not enough space in the mft record for "
+				"the resized attribute value.  This is not "
+				"supported yet.  Aborting write.");
+		err = -EOPNOTSUPP;
+		goto err_out2;
+	}
+	/*
+	 * We have enough space in the mft record to fit the write.  This
+	 * implies the attribute is smaller than the mft record and hence the
+	 * attribute must be in a single page and hence page->index must be 0.
+	 */
+	BUG_ON(page->index);
+	/*
+	 * If the beginning of the write is past the old size, enlarge the
+	 * attribute value up to the beginning of the write and fill it with
+	 * zeroes.
+	 */
+	if (from > attr_len) {
+		memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
+				attr_len, 0, from - attr_len);
+		a->data.resident.value_length = cpu_to_le32(from);
+		/* Zero the corresponding area in the page as well. */
+		if (PageUptodate(page)) {
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr + attr_len, 0, from - attr_len);
+			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(page);
+		}
+	}
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+	/*
+	 * Because resident attributes are handled by memcpy() to/from the
+	 * corresponding MFT record, and because this form of i/o is byte
+	 * aligned rather than block aligned, there is no need to bring the
+	 * page uptodate here as in the non-resident case where we need to
+	 * bring the buffers straddled by the write uptodate before
+	 * generic_file_write() does the copying from userspace.
+	 *
+	 * We thus defer the uptodate bringing of the page region outside the
+	 * region written to to ntfs_commit_write(), which makes the code
+	 * simpler and saves one atomic kmap which is good.
+	 */
+done:
+	ntfs_debug("Done.");
+	return 0;
+err_out:
+	if (err == -ENOMEM)
+		ntfs_warning(vi->i_sb, "Error allocating memory required to "
+				"prepare the write.");
+	else {
+		ntfs_error(vi->i_sb, "Resident attribute prepare write failed "
+				"with error %i.", err);
+		NVolSetErrors(vol);
+		make_bad_inode(vi);
+	}
+err_out2:
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(base_ni);
+	return err;
+}
+
+/**
+ * ntfs_commit_nonresident_write -
+ *
+ */
+static int ntfs_commit_nonresident_write(struct page *page,
+		unsigned from, unsigned to)
+{
+	s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
+	struct inode *vi = page->mapping->host;
+	struct buffer_head *bh, *head;
+	unsigned int block_start, block_end, blocksize;
+	BOOL partial;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx, from = %u, to = %u.", vi->i_ino,
+			NTFS_I(vi)->type, page->index, from, to);
+	blocksize = 1 << vi->i_blkbits;
+
+	// FIXME: We need a whole slew of special cases in here for compressed
+	// files for example...
+	// For now, we know ntfs_prepare_write() would have failed so we can't
+	// get here in any of the cases which we have to special case, so we
+	// are just a ripped off, unrolled generic_commit_write().
+
+	bh = head = page_buffers(page);
+	block_start = 0;
+	partial = FALSE;
+	do {
+		block_end = block_start + blocksize;
+		if (block_end <= from || block_start >= to) {
+			if (!buffer_uptodate(bh))
+				partial = TRUE;
+		} else {
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+		}
+	} while (block_start = block_end, (bh = bh->b_this_page) != head);
+	/*
+	 * If this is a partial write which happened to make all buffers
+	 * uptodate then we can optimize away a bogus ->readpage() for the next
+	 * read().  Here we 'discover' whether the page went uptodate as a
+	 * result of this (potentially partial) write.
+	 */
+	if (!partial)
+		SetPageUptodate(page);
+	/*
+	 * Not convinced about this at all.  See disparity comment above.  For
+	 * now we know ntfs_prepare_write() would have failed in the write
+	 * exceeds i_size case, so this will never trigger which is fine.
+	 */
+	if (pos > vi->i_size) {
+		ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
+				"not supported yet.  Sorry.");
+		return -EOPNOTSUPP;
+		// vi->i_size = pos;
+		// mark_inode_dirty(vi);
+	}
+	ntfs_debug("Done.");
+	return 0;
+}
+
+/**
+ * ntfs_commit_write - commit the received data
+ *
+ * This is called from generic_file_write() with i_sem held on the inode
+ * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
+ * data has already been copied into the @page.  ntfs_prepare_write() has been
+ * called before the data copied and it returned success so we can take the
+ * results of various BUG checks and some error handling for granted.
+ *
+ * Need to mark modified blocks dirty so they get written out later when
+ * ntfs_writepage() is invoked by the VM.
+ *
+ * Return 0 on success or -errno on error.
+ *
+ * Should be using generic_commit_write().  This marks buffers uptodate and
+ * dirty, sets the page uptodate if all buffers in the page are uptodate, and
+ * updates i_size if the end of io is beyond i_size.  In that case, it also
+ * marks the inode dirty.
+ *
+ * Cannot use generic_commit_write() due to ntfs specialities but can look at
+ * it for implementation guidance.
+ *
+ * If things have gone as outlined in ntfs_prepare_write(), then we do not
+ * need to do any page content modifications here at all, except in the write
+ * to resident attribute case, where we need to do the uptodate bringing here
+ * which we combine with the copying into the mft record which means we save
+ * one atomic kmap.
+ */
+static int ntfs_commit_write(struct file *file, struct page *page,
+		unsigned from, unsigned to)
+{
+	struct inode *vi = page->mapping->host;
+	ntfs_inode *base_ni, *ni = NTFS_I(vi);
+	char *kaddr, *kattr;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *m;
+	ATTR_RECORD *a;
+	u32 attr_len;
+	int err;
+
+	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
+			"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
+			page->index, from, to);
+	/* If the attribute is not resident, deal with it elsewhere. */
+	if (NInoNonResident(ni)) {
+		/* Only unnamed $DATA attributes can be compressed/encrypted. */
+		if (ni->type == AT_DATA && !ni->name_len) {
+			/* Encrypted files need separate handling. */
+			if (NInoEncrypted(ni)) {
+				// We never get here at present!
+				BUG();
+			}
+			/* Compressed data streams are handled in compress.c. */
+			if (NInoCompressed(ni)) {
+				// TODO: Implement this!
+				// return ntfs_write_compressed_block(page);
+				// We never get here at present!
+				BUG();
+			}
+		}
+		/* Normal data stream. */
+		return ntfs_commit_nonresident_write(page, from, to);
+	}
+	/*
+	 * Attribute is resident, implying it is not compressed, encrypted, or
+	 * sparse.
+	 */
+	if (!NInoAttr(ni))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+	/* Map, pin, and lock the mft record. */
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		m = NULL;
+		ctx = NULL;
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT)
+			err = -EIO;
+		goto err_out;
+	}
+	a = ctx->attr;
+	/* The total length of the attribute value. */
+	attr_len = le32_to_cpu(a->data.resident.value_length);
+	BUG_ON(from > attr_len);
+	kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
+	kaddr = kmap_atomic(page, KM_USER0);
+	/* Copy the received data from the page to the mft record. */
+	memcpy(kattr + from, kaddr + from, to - from);
+	/* Update the attribute length if necessary. */
+	if (to > attr_len) {
+		attr_len = to;
+		a->data.resident.value_length = cpu_to_le32(attr_len);
+	}
+	/*
+	 * If the page is not uptodate, bring the out of bounds area(s)
+	 * uptodate by copying data from the mft record to the page.
+	 */
+	if (!PageUptodate(page)) {
+		if (from > 0)
+			memcpy(kaddr, kattr, from);
+		if (to < attr_len)
+			memcpy(kaddr + to, kattr + to, attr_len - to);
+		/* Zero the region outside the end of the attribute value. */
+		if (attr_len < PAGE_CACHE_SIZE)
+			memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+		/*
+		 * The probability of not having done any of the above is
+		 * extremely small, so we just flush unconditionally.
+		 */
+		flush_dcache_page(page);
+		SetPageUptodate(page);
+	}
+	kunmap_atomic(kaddr, KM_USER0);
+	/* Update i_size if necessary. */
+	if (vi->i_size < attr_len) {
+		ni->allocated_size = ni->initialized_size = attr_len;
+		i_size_write(vi, attr_len);
+	}
+	/* Mark the mft record dirty, so it gets written back. */
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+	ntfs_debug("Done.");
+	return 0;
+err_out:
+	if (err == -ENOMEM) {
+		ntfs_warning(vi->i_sb, "Error allocating memory required to "
+				"commit the write.");
+		if (PageUptodate(page)) {
+			ntfs_warning(vi->i_sb, "Page is uptodate, setting "
+					"dirty so the write will be retried "
+					"later on by the VM.");
+			/*
+			 * Put the page on mapping->dirty_pages, but leave its
+			 * buffers' dirty state as-is.
+			 */
+			__set_page_dirty_nobuffers(page);
+			err = 0;
+		} else
+			ntfs_error(vi->i_sb, "Page is not uptodate.  Written "
+					"data has been lost.");
+	} else {
+		ntfs_error(vi->i_sb, "Resident attribute commit write failed "
+				"with error %i.", err);
+		NVolSetErrors(ni->vol);
+		make_bad_inode(vi);
+	}
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(base_ni);
+	return err;
+}
+
+#endif	/* NTFS_RW */
+
+/**
+ * ntfs_aops - general address space operations for inodes and attributes
+ */
+struct address_space_operations ntfs_aops = {
+	.readpage	= ntfs_readpage,	/* Fill page with data. */
+	.sync_page	= block_sync_page,	/* Currently, just unplugs the
+						   disk request queue. */
+#ifdef NTFS_RW
+	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
+	.prepare_write	= ntfs_prepare_write,	/* Prepare page and buffers
+						   ready to receive data. */
+	.commit_write	= ntfs_commit_write,	/* Commit received data. */
+#endif /* NTFS_RW */
+};
+
+/**
+ * ntfs_mst_aops - general address space operations for mst protecteed inodes
+ *		   and attributes
+ */
+struct address_space_operations ntfs_mst_aops = {
+	.readpage	= ntfs_readpage,	/* Fill page with data. */
+	.sync_page	= block_sync_page,	/* Currently, just unplugs the
+						   disk request queue. */
+#ifdef NTFS_RW
+	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */
+	.set_page_dirty	= __set_page_dirty_nobuffers,	/* Set the page dirty
+						   without touching the buffers
+						   belonging to the page. */
+#endif /* NTFS_RW */
+};
+
+#ifdef NTFS_RW
+
+/**
+ * mark_ntfs_record_dirty - mark an ntfs record dirty
+ * @page:	page containing the ntfs record to mark dirty
+ * @ofs:	byte offset within @page at which the ntfs record begins
+ *
+ * Set the buffers and the page in which the ntfs record is located dirty.
+ *
+ * The latter also marks the vfs inode the ntfs record belongs to dirty
+ * (I_DIRTY_PAGES only).
+ *
+ * If the page does not have buffers, we create them and set them uptodate.
+ * The page may not be locked which is why we need to handle the buffers under
+ * the mapping->private_lock.  Once the buffers are marked dirty we no longer
+ * need the lock since try_to_free_buffers() does not free dirty buffers.
+ */
+void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
+	struct address_space *mapping = page->mapping;
+	ntfs_inode *ni = NTFS_I(mapping->host);
+	struct buffer_head *bh, *head, *buffers_to_free = NULL;
+	unsigned int end, bh_size, bh_ofs;
+
+	BUG_ON(!PageUptodate(page));
+	end = ofs + ni->itype.index.block_size;
+	bh_size = 1 << VFS_I(ni)->i_blkbits;
+	spin_lock(&mapping->private_lock);
+	if (unlikely(!page_has_buffers(page))) {
+		spin_unlock(&mapping->private_lock);
+		bh = head = alloc_page_buffers(page, bh_size, 1);
+		spin_lock(&mapping->private_lock);
+		if (likely(!page_has_buffers(page))) {
+			struct buffer_head *tail;
+
+			do {
+				set_buffer_uptodate(bh);
+				tail = bh;
+				bh = bh->b_this_page;
+			} while (bh);
+			tail->b_this_page = head;
+			attach_page_buffers(page, head);
+		} else
+			buffers_to_free = bh;
+	}
+	bh = head = page_buffers(page);
+	do {
+		bh_ofs = bh_offset(bh);
+		if (bh_ofs + bh_size <= ofs)
+			continue;
+		if (unlikely(bh_ofs >= end))
+			break;
+		set_buffer_dirty(bh);
+	} while ((bh = bh->b_this_page) != head);
+	spin_unlock(&mapping->private_lock);
+	__set_page_dirty_nobuffers(page);
+	if (unlikely(buffers_to_free)) {
+		do {
+			bh = buffers_to_free->b_this_page;
+			free_buffer_head(buffers_to_free);
+			buffers_to_free = bh;
+		} while (buffers_to_free);
+	}
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
new file mode 100644
index 0000000..3b74e66
--- /dev/null
+++ b/fs/ntfs/aops.h
@@ -0,0 +1,109 @@
+/**
+ * aops.h - Defines for NTFS kernel address space operations and page cache
+ *	    handling.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_AOPS_H
+#define _LINUX_NTFS_AOPS_H
+
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+
+#include "inode.h"
+
+/**
+ * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
+ * @page:	the page to release
+ *
+ * Unpin, unmap and release a page that was obtained from ntfs_map_page().
+ */
+static inline void ntfs_unmap_page(struct page *page)
+{
+	kunmap(page);
+	page_cache_release(page);
+}
+
+/**
+ * ntfs_map_page - map a page into accessible memory, reading it if necessary
+ * @mapping:	address space for which to obtain the page
+ * @index:	index into the page cache for @mapping of the page to map
+ *
+ * Read a page from the page cache of the address space @mapping at position
+ * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ *
+ * If the page is not in memory it is loaded from disk first using the readpage
+ * method defined in the address space operations of @mapping and the page is
+ * added to the page cache of @mapping in the process.
+ *
+ * If the page belongs to an mst protected attribute and it is marked as such
+ * in its ntfs inode (NInoMstProtected()) the mst fixups are applied but no
+ * error checking is performed.  This means the caller has to verify whether
+ * the ntfs record(s) contained in the page are valid or not using one of the
+ * ntfs_is_XXXX_record{,p}() macros, where XXXX is the record type you are
+ * expecting to see.  (For details of the macros, see fs/ntfs/layout.h.)
+ *
+ * If the page is in high memory it is mapped into memory directly addressible
+ * by the kernel.
+ *
+ * Finally the page count is incremented, thus pinning the page into place.
+ *
+ * The above means that page_address(page) can be used on all pages obtained
+ * with ntfs_map_page() to get the kernel virtual address of the page.
+ *
+ * When finished with the page, the caller has to call ntfs_unmap_page() to
+ * unpin, unmap and release the page.
+ *
+ * Note this does not grant exclusive access. If such is desired, the caller
+ * must provide it independently of the ntfs_{un}map_page() calls by using
+ * a {rw_}semaphore or other means of serialization. A spin lock cannot be
+ * used as ntfs_map_page() can block.
+ *
+ * The unlocked and uptodate page is returned on success or an encoded error
+ * on failure. Caller has to test for error using the IS_ERR() macro on the
+ * return value. If that evaluates to TRUE, the negative error code can be
+ * obtained using PTR_ERR() on the return value of ntfs_map_page().
+ */
+static inline struct page *ntfs_map_page(struct address_space *mapping,
+		unsigned long index)
+{
+	struct page *page = read_cache_page(mapping, index,
+			(filler_t*)mapping->a_ops->readpage, NULL);
+
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		kmap(page);
+		if (PageUptodate(page) && !PageError(page))
+			return page;
+		ntfs_unmap_page(page);
+		return ERR_PTR(-EIO);
+	}
+	return page;
+}
+
+#ifdef NTFS_RW
+
+extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_AOPS_H */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
new file mode 100644
index 0000000..1ff7f90
--- /dev/null
+++ b/fs/ntfs/attrib.c
@@ -0,0 +1,1258 @@
+/**
+ * attrib.c - NTFS attribute operations.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/buffer_head.h>
+
+#include "attrib.h"
+#include "debug.h"
+#include "layout.h"
+#include "mft.h"
+#include "ntfs.h"
+#include "types.h"
+
+/**
+ * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
+ * @ni:		ntfs inode for which to map (part of) a runlist
+ * @vcn:	map runlist part containing this vcn
+ *
+ * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - The runlist must be unlocked on entry and is unlocked on return.
+ *	    - This function takes the lock for writing and modifies the runlist.
+ */
+int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
+{
+	ntfs_inode *base_ni;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *mrec;
+	int err = 0;
+
+	ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
+			(unsigned long long)vcn);
+
+	if (!NInoAttr(ni))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+
+	mrec = map_mft_record(base_ni);
+	if (IS_ERR(mrec))
+		return PTR_ERR(mrec);
+	ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, vcn, NULL, 0, ctx);
+	if (unlikely(err))
+		goto put_err_out;
+
+	down_write(&ni->runlist.lock);
+	/* Make sure someone else didn't do the work while we were sleeping. */
+	if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
+			LCN_RL_NOT_MAPPED)) {
+		runlist_element *rl;
+
+		rl = ntfs_mapping_pairs_decompress(ni->vol, ctx->attr,
+				ni->runlist.rl);
+		if (IS_ERR(rl))
+			err = PTR_ERR(rl);
+		else
+			ni->runlist.rl = rl;
+	}
+	up_write(&ni->runlist.lock);
+
+put_err_out:
+	ntfs_attr_put_search_ctx(ctx);
+err_out:
+	unmap_mft_record(base_ni);
+	return err;
+}
+
+/**
+ * ntfs_find_vcn - find a vcn in the runlist described by an ntfs inode
+ * @ni:		ntfs inode describing the runlist to search
+ * @vcn:	vcn to find
+ * @need_write:	if false, lock for reading and if true, lock for writing
+ *
+ * Find the virtual cluster number @vcn in the runlist described by the ntfs
+ * inode @ni and return the address of the runlist element containing the @vcn.
+ * The runlist is left locked and the caller has to unlock it.  If @need_write
+ * is true, the runlist is locked for writing and if @need_write is false, the
+ * runlist is locked for reading.  In the error case, the runlist is not left
+ * locked.
+ *
+ * Note you need to distinguish between the lcn of the returned runlist element
+ * being >= 0 and LCN_HOLE.  In the later case you have to return zeroes on
+ * read and allocate clusters on write.
+ *
+ * Return the runlist element containing the @vcn on success and
+ * ERR_PTR(-errno) on error.  You need to test the return value with IS_ERR()
+ * to decide if the return is success or failure and PTR_ERR() to get to the
+ * error code if IS_ERR() is true.
+ *
+ * The possible error return codes are:
+ *	-ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
+ *	-ENOMEM - Not enough memory to map runlist.
+ *	-EIO	- Critical error (runlist/file is corrupt, i/o error, etc).
+ *
+ * Locking: - The runlist must be unlocked on entry.
+ *	    - On failing return, the runlist is unlocked.
+ *	    - On successful return, the runlist is locked.  If @need_write us
+ *	      true, it is locked for writing.  Otherwise is is locked for
+ *	      reading.
+ */
+runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
+		const BOOL need_write)
+{
+	runlist_element *rl;
+	int err = 0;
+	BOOL is_retry = FALSE;
+
+	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, lock for %sing.",
+			ni->mft_no, (unsigned long long)vcn,
+			!need_write ? "read" : "writ");
+	BUG_ON(!ni);
+	BUG_ON(!NInoNonResident(ni));
+	BUG_ON(vcn < 0);
+lock_retry_remap:
+	if (!need_write)
+		down_read(&ni->runlist.lock);
+	else
+		down_write(&ni->runlist.lock);
+	rl = ni->runlist.rl;
+	if (likely(rl && vcn >= rl[0].vcn)) {
+		while (likely(rl->length)) {
+			if (likely(vcn < rl[1].vcn)) {
+				if (likely(rl->lcn >= LCN_HOLE)) {
+					ntfs_debug("Done.");
+					return rl;
+				}
+				break;
+			}
+			rl++;
+		}
+		if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
+			if (likely(rl->lcn == LCN_ENOENT))
+				err = -ENOENT;
+			else
+				err = -EIO;
+		}
+	}
+	if (!need_write)
+		up_read(&ni->runlist.lock);
+	else
+		up_write(&ni->runlist.lock);
+	if (!err && !is_retry) {
+		/*
+		 * The @vcn is in an unmapped region, map the runlist and
+		 * retry.
+		 */
+		err = ntfs_map_runlist(ni, vcn);
+		if (likely(!err)) {
+			is_retry = TRUE;
+			goto lock_retry_remap;
+		}
+		/*
+		 * -EINVAL and -ENOENT coming from a failed mapping attempt are
+		 * equivalent to i/o errors for us as they should not happen in
+		 * our code paths.
+		 */
+		if (err == -EINVAL || err == -ENOENT)
+			err = -EIO;
+	} else if (!err)
+		err = -EIO;
+	ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
+	return ERR_PTR(err);
+}
+
+/**
+ * ntfs_attr_find - find (next) attribute in mft record
+ * @type:	attribute type to find
+ * @name:	attribute name to find (optional, i.e. NULL means don't care)
+ * @name_len:	attribute name length (only needed if @name present)
+ * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
+ * @val:	attribute value to find (optional, resident attributes only)
+ * @val_len:	attribute value length
+ * @ctx:	search context with mft record and attribute to search from
+ *
+ * You should not need to call this function directly.  Use ntfs_attr_lookup()
+ * instead.
+ *
+ * ntfs_attr_find() takes a search context @ctx as parameter and searches the
+ * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
+ * attribute of @type, optionally @name and @val.
+ *
+ * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
+ * point to the found attribute.
+ *
+ * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
+ * @ctx->attr will point to the attribute before which the attribute being
+ * searched for would need to be inserted if such an action were to be desired.
+ *
+ * On actual error, ntfs_attr_find() returns -EIO.  In this case @ctx->attr is
+ * undefined and in particular do not rely on it not changing.
+ *
+ * If @ctx->is_first is TRUE, the search begins with @ctx->attr itself.  If it
+ * is FALSE, the search begins after @ctx->attr.
+ *
+ * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
+ * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
+ * @ctx->mrec belongs.  This is so we can get at the ntfs volume and hence at
+ * the upcase table.  If @ic is CASE_SENSITIVE, the comparison is case
+ * sensitive.  When @name is present, @name_len is the @name length in Unicode
+ * characters.
+ *
+ * If @name is not present (NULL), we assume that the unnamed attribute is
+ * being searched for.
+ *
+ * Finally, the resident attribute value @val is looked for, if present.  If
+ * @val is not present (NULL), @val_len is ignored.
+ *
+ * ntfs_attr_find() only searches the specified mft record and it ignores the
+ * presence of an attribute list attribute (unless it is the one being searched
+ * for, obviously).  If you need to take attribute lists into consideration,
+ * use ntfs_attr_lookup() instead (see below).  This also means that you cannot
+ * use ntfs_attr_find() to search for extent records of non-resident
+ * attributes, as extents with lowest_vcn != 0 are usually described by the
+ * attribute list attribute only. - Note that it is possible that the first
+ * extent is only in the attribute list while the last extent is in the base
+ * mft record, so do not rely on being able to find the first extent in the
+ * base mft record.
+ *
+ * Warning: Never use @val when looking for attribute types which can be
+ *	    non-resident as this most likely will result in a crash!
+ */
+static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
+		const u32 name_len, const IGNORE_CASE_BOOL ic,
+		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
+{
+	ATTR_RECORD *a;
+	ntfs_volume *vol = ctx->ntfs_ino->vol;
+	ntfschar *upcase = vol->upcase;
+	u32 upcase_len = vol->upcase_len;
+
+	/*
+	 * Iterate over attributes in mft record starting at @ctx->attr, or the
+	 * attribute following that, if @ctx->is_first is TRUE.
+	 */
+	if (ctx->is_first) {
+		a = ctx->attr;
+		ctx->is_first = FALSE;
+	} else
+		a = (ATTR_RECORD*)((u8*)ctx->attr +
+				le32_to_cpu(ctx->attr->length));
+	for (;;	a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
+		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
+				le32_to_cpu(ctx->mrec->bytes_allocated))
+			break;
+		ctx->attr = a;
+		if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
+				a->type == AT_END))
+			return -ENOENT;
+		if (unlikely(!a->length))
+			break;
+		if (a->type != type)
+			continue;
+		/*
+		 * If @name is present, compare the two names.  If @name is
+		 * missing, assume we want an unnamed attribute.
+		 */
+		if (!name) {
+			/* The search failed if the found attribute is named. */
+			if (a->name_length)
+				return -ENOENT;
+		} else if (!ntfs_are_names_equal(name, name_len,
+			    (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
+			    a->name_length, ic, upcase, upcase_len)) {
+			register int rc;
+
+			rc = ntfs_collate_names(name, name_len,
+					(ntfschar*)((u8*)a +
+					le16_to_cpu(a->name_offset)),
+					a->name_length, 1, IGNORE_CASE,
+					upcase, upcase_len);
+			/*
+			 * If @name collates before a->name, there is no
+			 * matching attribute.
+			 */
+			if (rc == -1)
+				return -ENOENT;
+			/* If the strings are not equal, continue search. */
+			if (rc)
+				continue;
+			rc = ntfs_collate_names(name, name_len,
+					(ntfschar*)((u8*)a +
+					le16_to_cpu(a->name_offset)),
+					a->name_length, 1, CASE_SENSITIVE,
+					upcase, upcase_len);
+			if (rc == -1)
+				return -ENOENT;
+			if (rc)
+				continue;
+		}
+		/*
+		 * The names match or @name not present and attribute is
+		 * unnamed.  If no @val specified, we have found the attribute
+		 * and are done.
+		 */
+		if (!val)
+			return 0;
+		/* @val is present; compare values. */
+		else {
+			register int rc;
+
+			rc = memcmp(val, (u8*)a + le16_to_cpu(
+					a->data.resident.value_offset),
+					min_t(u32, val_len, le32_to_cpu(
+					a->data.resident.value_length)));
+			/*
+			 * If @val collates before the current attribute's
+			 * value, there is no matching attribute.
+			 */
+			if (!rc) {
+				register u32 avl;
+
+				avl = le32_to_cpu(
+						a->data.resident.value_length);
+				if (val_len == avl)
+					return 0;
+				if (val_len < avl)
+					return -ENOENT;
+			} else if (rc < 0)
+				return -ENOENT;
+		}
+	}
+	ntfs_error(vol->sb, "Inode is corrupt.  Run chkdsk.");
+	NVolSetErrors(vol);
+	return -EIO;
+}
+
+/**
+ * load_attribute_list - load an attribute list into memory
+ * @vol:		ntfs volume from which to read
+ * @runlist:		runlist of the attribute list
+ * @al_start:		destination buffer
+ * @size:		size of the destination buffer in bytes
+ * @initialized_size:	initialized size of the attribute list
+ *
+ * Walk the runlist @runlist and load all clusters from it copying them into
+ * the linear buffer @al. The maximum number of bytes copied to @al is @size
+ * bytes. Note, @size does not need to be a multiple of the cluster size. If
+ * @initialized_size is less than @size, the region in @al between
+ * @initialized_size and @size will be zeroed and not read from disk.
+ *
+ * Return 0 on success or -errno on error.
+ */
+int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
+		const s64 size, const s64 initialized_size)
+{
+	LCN lcn;
+	u8 *al = al_start;
+	u8 *al_end = al + initialized_size;
+	runlist_element *rl;
+	struct buffer_head *bh;
+	struct super_block *sb;
+	unsigned long block_size;
+	unsigned long block, max_block;
+	int err = 0;
+	unsigned char block_size_bits;
+
+	ntfs_debug("Entering.");
+	if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
+			initialized_size > size)
+		return -EINVAL;
+	if (!initialized_size) {
+		memset(al, 0, size);
+		return 0;
+	}
+	sb = vol->sb;
+	block_size = sb->s_blocksize;
+	block_size_bits = sb->s_blocksize_bits;
+	down_read(&runlist->lock);
+	rl = runlist->rl;
+	/* Read all clusters specified by the runlist one run at a time. */
+	while (rl->length) {
+		lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
+		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
+				(unsigned long long)rl->vcn,
+				(unsigned long long)lcn);
+		/* The attribute list cannot be sparse. */
+		if (lcn < 0) {
+			ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed.  Cannot "
+					"read attribute list.");
+			goto err_out;
+		}
+		block = lcn << vol->cluster_size_bits >> block_size_bits;
+		/* Read the run from device in chunks of block_size bytes. */
+		max_block = block + (rl->length << vol->cluster_size_bits >>
+				block_size_bits);
+		ntfs_debug("max_block = 0x%lx.", max_block);
+		do {
+			ntfs_debug("Reading block = 0x%lx.", block);
+			bh = sb_bread(sb, block);
+			if (!bh) {
+				ntfs_error(sb, "sb_bread() failed. Cannot "
+						"read attribute list.");
+				goto err_out;
+			}
+			if (al + block_size >= al_end)
+				goto do_final;
+			memcpy(al, bh->b_data, block_size);
+			brelse(bh);
+			al += block_size;
+		} while (++block < max_block);
+		rl++;
+	}
+	if (initialized_size < size) {
+initialize:
+		memset(al_start + initialized_size, 0, size - initialized_size);
+	}
+done:
+	up_read(&runlist->lock);
+	return err;
+do_final:
+	if (al < al_end) {
+		/*
+		 * Partial block.
+		 *
+		 * Note: The attribute list can be smaller than its allocation
+		 * by multiple clusters.  This has been encountered by at least
+		 * two people running Windows XP, thus we cannot do any
+		 * truncation sanity checking here. (AIA)
+		 */
+		memcpy(al, bh->b_data, al_end - al);
+		brelse(bh);
+		if (initialized_size < size)
+			goto initialize;
+		goto done;
+	}
+	brelse(bh);
+	/* Real overflow! */
+	ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
+			"is truncated.");
+err_out:
+	err = -EIO;
+	goto done;
+}
+
+/**
+ * ntfs_external_attr_find - find an attribute in the attribute list of an inode
+ * @type:	attribute type to find
+ * @name:	attribute name to find (optional, i.e. NULL means don't care)
+ * @name_len:	attribute name length (only needed if @name present)
+ * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
+ * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
+ * @val:	attribute value to find (optional, resident attributes only)
+ * @val_len:	attribute value length
+ * @ctx:	search context with mft record and attribute to search from
+ *
+ * You should not need to call this function directly.  Use ntfs_attr_lookup()
+ * instead.
+ *
+ * Find an attribute by searching the attribute list for the corresponding
+ * attribute list entry.  Having found the entry, map the mft record if the
+ * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
+ * in there and return it.
+ *
+ * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
+ * have been obtained from a call to ntfs_attr_get_search_ctx().  On subsequent
+ * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
+ * then the base inode).
+ *
+ * After finishing with the attribute/mft record you need to call
+ * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
+ * mapped inodes, etc).
+ *
+ * If the attribute is found, ntfs_external_attr_find() returns 0 and
+ * @ctx->attr will point to the found attribute.  @ctx->mrec will point to the
+ * mft record in which @ctx->attr is located and @ctx->al_entry will point to
+ * the attribute list entry for the attribute.
+ *
+ * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
+ * @ctx->attr will point to the attribute in the base mft record before which
+ * the attribute being searched for would need to be inserted if such an action
+ * were to be desired.  @ctx->mrec will point to the mft record in which
+ * @ctx->attr is located and @ctx->al_entry will point to the attribute list
+ * entry of the attribute before which the attribute being searched for would
+ * need to be inserted if such an action were to be desired.
+ *
+ * Thus to insert the not found attribute, one wants to add the attribute to
+ * @ctx->mrec (the base mft record) and if there is not enough space, the
+ * attribute should be placed in a newly allocated extent mft record.  The
+ * attribute list entry for the inserted attribute should be inserted in the
+ * attribute list attribute at @ctx->al_entry.
+ *
+ * On actual error, ntfs_external_attr_find() returns -EIO.  In this case
+ * @ctx->attr is undefined and in particular do not rely on it not changing.
+ */
+static int ntfs_external_attr_find(const ATTR_TYPE type,
+		const ntfschar *name, const u32 name_len,
+		const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
+		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
+{
+	ntfs_inode *base_ni, *ni;
+	ntfs_volume *vol;
+	ATTR_LIST_ENTRY *al_entry, *next_al_entry;
+	u8 *al_start, *al_end;
+	ATTR_RECORD *a;
+	ntfschar *al_name;
+	u32 al_name_len;
+	int err = 0;
+	static const char *es = " Unmount and run chkdsk.";
+
+	ni = ctx->ntfs_ino;
+	base_ni = ctx->base_ntfs_ino;
+	ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
+	if (!base_ni) {
+		/* First call happens with the base mft record. */
+		base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
+		ctx->base_mrec = ctx->mrec;
+	}
+	if (ni == base_ni)
+		ctx->base_attr = ctx->attr;
+	if (type == AT_END)
+		goto not_found;
+	vol = base_ni->vol;
+	al_start = base_ni->attr_list;
+	al_end = al_start + base_ni->attr_list_size;
+	if (!ctx->al_entry)
+		ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
+	/*
+	 * Iterate over entries in attribute list starting at @ctx->al_entry,
+	 * or the entry following that, if @ctx->is_first is TRUE.
+	 */
+	if (ctx->is_first) {
+		al_entry = ctx->al_entry;
+		ctx->is_first = FALSE;
+	} else
+		al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
+				le16_to_cpu(ctx->al_entry->length));
+	for (;; al_entry = next_al_entry) {
+		/* Out of bounds check. */
+		if ((u8*)al_entry < base_ni->attr_list ||
+				(u8*)al_entry > al_end)
+			break;	/* Inode is corrupt. */
+		ctx->al_entry = al_entry;
+		/* Catch the end of the attribute list. */
+		if ((u8*)al_entry == al_end)
+			goto not_found;
+		if (!al_entry->length)
+			break;
+		if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
+				le16_to_cpu(al_entry->length) > al_end)
+			break;
+		next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
+				le16_to_cpu(al_entry->length));
+		if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
+			goto not_found;
+		if (type != al_entry->type)
+			continue;
+		/*
+		 * If @name is present, compare the two names.  If @name is
+		 * missing, assume we want an unnamed attribute.
+		 */
+		al_name_len = al_entry->name_length;
+		al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
+		if (!name) {
+			if (al_name_len)
+				goto not_found;
+		} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
+				name_len, ic, vol->upcase, vol->upcase_len)) {
+			register int rc;
+
+			rc = ntfs_collate_names(name, name_len, al_name,
+					al_name_len, 1, IGNORE_CASE,
+					vol->upcase, vol->upcase_len);
+			/*
+			 * If @name collates before al_name, there is no
+			 * matching attribute.
+			 */
+			if (rc == -1)
+				goto not_found;
+			/* If the strings are not equal, continue search. */
+			if (rc)
+				continue;
+			/*
+			 * FIXME: Reverse engineering showed 0, IGNORE_CASE but
+			 * that is inconsistent with ntfs_attr_find().  The
+			 * subsequent rc checks were also different.  Perhaps I
+			 * made a mistake in one of the two.  Need to recheck
+			 * which is correct or at least see what is going on...
+			 * (AIA)
+			 */
+			rc = ntfs_collate_names(name, name_len, al_name,
+					al_name_len, 1, CASE_SENSITIVE,
+					vol->upcase, vol->upcase_len);
+			if (rc == -1)
+				goto not_found;
+			if (rc)
+				continue;
+		}
+		/*
+		 * The names match or @name not present and attribute is
+		 * unnamed.  Now check @lowest_vcn.  Continue search if the
+		 * next attribute list entry still fits @lowest_vcn.  Otherwise
+		 * we have reached the right one or the search has failed.
+		 */
+		if (lowest_vcn && (u8*)next_al_entry >= al_start	    &&
+				(u8*)next_al_entry + 6 < al_end		    &&
+				(u8*)next_al_entry + le16_to_cpu(
+					next_al_entry->length) <= al_end    &&
+				sle64_to_cpu(next_al_entry->lowest_vcn) <=
+					lowest_vcn			    &&
+				next_al_entry->type == al_entry->type	    &&
+				next_al_entry->name_length == al_name_len   &&
+				ntfs_are_names_equal((ntfschar*)((u8*)
+					next_al_entry +
+					next_al_entry->name_offset),
+					next_al_entry->name_length,
+					al_name, al_name_len, CASE_SENSITIVE,
+					vol->upcase, vol->upcase_len))
+			continue;
+		if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
+			if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
+				ntfs_error(vol->sb, "Found stale mft "
+						"reference in attribute list "
+						"of base inode 0x%lx.%s",
+						base_ni->mft_no, es);
+				err = -EIO;
+				break;
+			}
+		} else { /* Mft references do not match. */
+			/* If there is a mapped record unmap it first. */
+			if (ni != base_ni)
+				unmap_extent_mft_record(ni);
+			/* Do we want the base record back? */
+			if (MREF_LE(al_entry->mft_reference) ==
+					base_ni->mft_no) {
+				ni = ctx->ntfs_ino = base_ni;
+				ctx->mrec = ctx->base_mrec;
+			} else {
+				/* We want an extent record. */
+				ctx->mrec = map_extent_mft_record(base_ni,
+						le64_to_cpu(
+						al_entry->mft_reference), &ni);
+				if (IS_ERR(ctx->mrec)) {
+					ntfs_error(vol->sb, "Failed to map "
+							"extent mft record "
+							"0x%lx of base inode "
+							"0x%lx.%s",
+							MREF_LE(al_entry->
+							mft_reference),
+							base_ni->mft_no, es);
+					err = PTR_ERR(ctx->mrec);
+					if (err == -ENOENT)
+						err = -EIO;
+					/* Cause @ctx to be sanitized below. */
+					ni = NULL;
+					break;
+				}
+				ctx->ntfs_ino = ni;
+			}
+			ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
+					le16_to_cpu(ctx->mrec->attrs_offset));
+		}
+		/*
+		 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
+		 * mft record containing the attribute represented by the
+		 * current al_entry.
+		 */
+		/*
+		 * We could call into ntfs_attr_find() to find the right
+		 * attribute in this mft record but this would be less
+		 * efficient and not quite accurate as ntfs_attr_find() ignores
+		 * the attribute instance numbers for example which become
+		 * important when one plays with attribute lists.  Also,
+		 * because a proper match has been found in the attribute list
+		 * entry above, the comparison can now be optimized.  So it is
+		 * worth re-implementing a simplified ntfs_attr_find() here.
+		 */
+		a = ctx->attr;
+		/*
+		 * Use a manual loop so we can still use break and continue
+		 * with the same meanings as above.
+		 */
+do_next_attr_loop:
+		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
+				le32_to_cpu(ctx->mrec->bytes_allocated))
+			break;
+		if (a->type == AT_END)
+			continue;
+		if (!a->length)
+			break;
+		if (al_entry->instance != a->instance)
+			goto do_next_attr;
+		/*
+		 * If the type and/or the name are mismatched between the
+		 * attribute list entry and the attribute record, there is
+		 * corruption so we break and return error EIO.
+		 */
+		if (al_entry->type != a->type)
+			break;
+		if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
+				le16_to_cpu(a->name_offset)), a->name_length,
+				al_name, al_name_len, CASE_SENSITIVE,
+				vol->upcase, vol->upcase_len))
+			break;
+		ctx->attr = a;
+		/*
+		 * If no @val specified or @val specified and it matches, we
+		 * have found it!
+		 */
+		if (!val || (!a->non_resident && le32_to_cpu(
+				a->data.resident.value_length) == val_len &&
+				!memcmp((u8*)a +
+				le16_to_cpu(a->data.resident.value_offset),
+				val, val_len))) {
+			ntfs_debug("Done, found.");
+			return 0;
+		}
+do_next_attr:
+		/* Proceed to the next attribute in the current mft record. */
+		a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
+		goto do_next_attr_loop;
+	}
+	if (!err) {
+		ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
+				"attribute list attribute.%s", base_ni->mft_no,
+				es);
+		err = -EIO;
+	}
+	if (ni != base_ni) {
+		if (ni)
+			unmap_extent_mft_record(ni);
+		ctx->ntfs_ino = base_ni;
+		ctx->mrec = ctx->base_mrec;
+		ctx->attr = ctx->base_attr;
+	}
+	if (err != -ENOMEM)
+		NVolSetErrors(vol);
+	return err;
+not_found:
+	/*
+	 * If we were looking for AT_END, we reset the search context @ctx and
+	 * use ntfs_attr_find() to seek to the end of the base mft record.
+	 */
+	if (type == AT_END) {
+		ntfs_attr_reinit_search_ctx(ctx);
+		return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
+				ctx);
+	}
+	/*
+	 * The attribute was not found.  Before we return, we want to ensure
+	 * @ctx->mrec and @ctx->attr indicate the position at which the
+	 * attribute should be inserted in the base mft record.  Since we also
+	 * want to preserve @ctx->al_entry we cannot reinitialize the search
+	 * context using ntfs_attr_reinit_search_ctx() as this would set
+	 * @ctx->al_entry to NULL.  Thus we do the necessary bits manually (see
+	 * ntfs_attr_init_search_ctx() below).  Note, we _only_ preserve
+	 * @ctx->al_entry as the remaining fields (base_*) are identical to
+	 * their non base_ counterparts and we cannot set @ctx->base_attr
+	 * correctly yet as we do not know what @ctx->attr will be set to by
+	 * the call to ntfs_attr_find() below.
+	 */
+	if (ni != base_ni)
+		unmap_extent_mft_record(ni);
+	ctx->mrec = ctx->base_mrec;
+	ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
+			le16_to_cpu(ctx->mrec->attrs_offset));
+	ctx->is_first = TRUE;
+	ctx->ntfs_ino = base_ni;
+	ctx->base_ntfs_ino = NULL;
+	ctx->base_mrec = NULL;
+	ctx->base_attr = NULL;
+	/*
+	 * In case there are multiple matches in the base mft record, need to
+	 * keep enumerating until we get an attribute not found response (or
+	 * another error), otherwise we would keep returning the same attribute
+	 * over and over again and all programs using us for enumeration would
+	 * lock up in a tight loop.
+	 */
+	do {
+		err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
+				ctx);
+	} while (!err);
+	ntfs_debug("Done, not found.");
+	return err;
+}
+
+/**
+ * ntfs_attr_lookup - find an attribute in an ntfs inode
+ * @type:	attribute type to find
+ * @name:	attribute name to find (optional, i.e. NULL means don't care)
+ * @name_len:	attribute name length (only needed if @name present)
+ * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
+ * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
+ * @val:	attribute value to find (optional, resident attributes only)
+ * @val_len:	attribute value length
+ * @ctx:	search context with mft record and attribute to search from
+ *
+ * Find an attribute in an ntfs inode.  On first search @ctx->ntfs_ino must
+ * be the base mft record and @ctx must have been obtained from a call to
+ * ntfs_attr_get_search_ctx().
+ *
+ * This function transparently handles attribute lists and @ctx is used to
+ * continue searches where they were left off at.
+ *
+ * After finishing with the attribute/mft record you need to call
+ * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
+ * mapped inodes, etc).
+ *
+ * Return 0 if the search was successful and -errno if not.
+ *
+ * When 0, @ctx->attr is the found attribute and it is in mft record
+ * @ctx->mrec.  If an attribute list attribute is present, @ctx->al_entry is
+ * the attribute list entry of the found attribute.
+ *
+ * When -ENOENT, @ctx->attr is the attribute which collates just after the
+ * attribute being searched for, i.e. if one wants to add the attribute to the
+ * mft record this is the correct place to insert it into.  If an attribute
+ * list attribute is present, @ctx->al_entry is the attribute list entry which
+ * collates just after the attribute list entry of the attribute being searched
+ * for, i.e. if one wants to add the attribute to the mft record this is the
+ * correct place to insert its attribute list entry into.
+ *
+ * When -errno != -ENOENT, an error occured during the lookup.  @ctx->attr is
+ * then undefined and in particular you should not rely on it not changing.
+ */
+int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
+		const u32 name_len, const IGNORE_CASE_BOOL ic,
+		const VCN lowest_vcn, const u8 *val, const u32 val_len,
+		ntfs_attr_search_ctx *ctx)
+{
+	ntfs_inode *base_ni;
+
+	ntfs_debug("Entering.");
+	if (ctx->base_ntfs_ino)
+		base_ni = ctx->base_ntfs_ino;
+	else
+		base_ni = ctx->ntfs_ino;
+	/* Sanity check, just for debugging really. */
+	BUG_ON(!base_ni);
+	if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
+		return ntfs_attr_find(type, name, name_len, ic, val, val_len,
+				ctx);
+	return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
+			val, val_len, ctx);
+}
+
+/**
+ * ntfs_attr_init_search_ctx - initialize an attribute search context
+ * @ctx:	attribute search context to initialize
+ * @ni:		ntfs inode with which to initialize the search context
+ * @mrec:	mft record with which to initialize the search context
+ *
+ * Initialize the attribute search context @ctx with @ni and @mrec.
+ */
+static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
+		ntfs_inode *ni, MFT_RECORD *mrec)
+{
+	ctx->mrec = mrec;
+	/* Sanity checks are performed elsewhere. */
+	ctx->attr = (ATTR_RECORD*)((u8*)mrec + le16_to_cpu(mrec->attrs_offset));
+	ctx->is_first = TRUE;
+	ctx->ntfs_ino = ni;
+	ctx->al_entry = NULL;
+	ctx->base_ntfs_ino = NULL;
+	ctx->base_mrec = NULL;
+	ctx->base_attr = NULL;
+}
+
+/**
+ * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
+ * @ctx:	attribute search context to reinitialize
+ *
+ * Reinitialize the attribute search context @ctx, unmapping an associated
+ * extent mft record if present, and initialize the search context again.
+ *
+ * This is used when a search for a new attribute is being started to reset
+ * the search context to the beginning.
+ */
+void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
+{
+	if (likely(!ctx->base_ntfs_ino)) {
+		/* No attribute list. */
+		ctx->is_first = TRUE;
+		/* Sanity checks are performed elsewhere. */
+		ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
+				le16_to_cpu(ctx->mrec->attrs_offset));
+		/*
+		 * This needs resetting due to ntfs_external_attr_find() which
+		 * can leave it set despite having zeroed ctx->base_ntfs_ino.
+		 */
+		ctx->al_entry = NULL;
+		return;
+	} /* Attribute list. */
+	if (ctx->ntfs_ino != ctx->base_ntfs_ino)
+		unmap_extent_mft_record(ctx->ntfs_ino);
+	ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
+	return;
+}
+
+/**
+ * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
+ * @ni:		ntfs inode with which to initialize the search context
+ * @mrec:	mft record with which to initialize the search context
+ *
+ * Allocate a new attribute search context, initialize it with @ni and @mrec,
+ * and return it. Return NULL if allocation failed.
+ */
+ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
+{
+	ntfs_attr_search_ctx *ctx;
+
+	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS);
+	if (ctx)
+		ntfs_attr_init_search_ctx(ctx, ni, mrec);
+	return ctx;
+}
+
+/**
+ * ntfs_attr_put_search_ctx - release an attribute search context
+ * @ctx:	attribute search context to free
+ *
+ * Release the attribute search context @ctx, unmapping an associated extent
+ * mft record if present.
+ */
+void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
+{
+	if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
+		unmap_extent_mft_record(ctx->ntfs_ino);
+	kmem_cache_free(ntfs_attr_ctx_cache, ctx);
+	return;
+}
+
+/**
+ * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
+ * @vol:	ntfs volume to which the attribute belongs
+ * @type:	attribute type which to find
+ *
+ * Search for the attribute definition record corresponding to the attribute
+ * @type in the $AttrDef system file.
+ *
+ * Return the attribute type definition record if found and NULL if not found.
+ */
+static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
+		const ATTR_TYPE type)
+{
+	ATTR_DEF *ad;
+
+	BUG_ON(!vol->attrdef);
+	BUG_ON(!type);
+	for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
+			vol->attrdef_size && ad->type; ++ad) {
+		/* We have not found it yet, carry on searching. */
+		if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
+			continue;
+		/* We found the attribute; return it. */
+		if (likely(ad->type == type))
+			return ad;
+		/* We have gone too far already.  No point in continuing. */
+		break;
+	}
+	/* Attribute not found. */
+	ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
+			le32_to_cpu(type));
+	return NULL;
+}
+
+/**
+ * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
+ * @vol:	ntfs volume to which the attribute belongs
+ * @type:	attribute type which to check
+ * @size:	size which to check
+ *
+ * Check whether the @size in bytes is valid for an attribute of @type on the
+ * ntfs volume @vol.  This information is obtained from $AttrDef system file.
+ *
+ * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
+ * listed in $AttrDef.
+ */
+int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
+		const s64 size)
+{
+	ATTR_DEF *ad;
+
+	BUG_ON(size < 0);
+	/*
+	 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
+	 * listed in $AttrDef.
+	 */
+	if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
+		return -ERANGE;
+	/* Get the $AttrDef entry for the attribute @type. */
+	ad = ntfs_attr_find_in_attrdef(vol, type);
+	if (unlikely(!ad))
+		return -ENOENT;
+	/* Do the bounds check. */
+	if (((sle64_to_cpu(ad->min_size) > 0) &&
+			size < sle64_to_cpu(ad->min_size)) ||
+			((sle64_to_cpu(ad->max_size) > 0) && size >
+			sle64_to_cpu(ad->max_size)))
+		return -ERANGE;
+	return 0;
+}
+
+/**
+ * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
+ * @vol:	ntfs volume to which the attribute belongs
+ * @type:	attribute type which to check
+ *
+ * Check whether the attribute of @type on the ntfs volume @vol is allowed to
+ * be non-resident.  This information is obtained from $AttrDef system file.
+ *
+ * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, or
+ * -ENOENT if the attribute is not listed in $AttrDef.
+ */
+int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
+{
+	ATTR_DEF *ad;
+
+	/*
+	 * $DATA is always allowed to be non-resident even if $AttrDef does not
+	 * specify this in the flags of the $DATA attribute definition record.
+	 */
+	if (type == AT_DATA)
+		return 0;
+	/* Find the attribute definition record in $AttrDef. */
+	ad = ntfs_attr_find_in_attrdef(vol, type);
+	if (unlikely(!ad))
+		return -ENOENT;
+	/* Check the flags and return the result. */
+	if (ad->flags & CAN_BE_NON_RESIDENT)
+		return 0;
+	return -EPERM;
+}
+
+/**
+ * ntfs_attr_can_be_resident - check if an attribute can be resident
+ * @vol:	ntfs volume to which the attribute belongs
+ * @type:	attribute type which to check
+ *
+ * Check whether the attribute of @type on the ntfs volume @vol is allowed to
+ * be resident.  This information is derived from our ntfs knowledge and may
+ * not be completely accurate, especially when user defined attributes are
+ * present.  Basically we allow everything to be resident except for index
+ * allocation and $EA attributes.
+ *
+ * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
+ *
+ * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
+ *	    otherwise windows will not boot (blue screen of death)!  We cannot
+ *	    check for this here as we do not know which inode's $Bitmap is
+ *	    being asked about so the caller needs to special case this.
+ */
+int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
+{
+	if (type != AT_INDEX_ALLOCATION && type != AT_EA)
+		return 0;
+	return -EPERM;
+}
+
+/**
+ * ntfs_attr_record_resize - resize an attribute record
+ * @m:		mft record containing attribute record
+ * @a:		attribute record to resize
+ * @new_size:	new size in bytes to which to resize the attribute record @a
+ *
+ * Resize the attribute record @a, i.e. the resident part of the attribute, in
+ * the mft record @m to @new_size bytes.
+ *
+ * Return 0 on success and -errno on error.  The following error codes are
+ * defined:
+ *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
+ *
+ * Note: On error, no modifications have been performed whatsoever.
+ *
+ * Warning: If you make a record smaller without having copied all the data you
+ *	    are interested in the data may be overwritten.
+ */
+int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
+{
+	ntfs_debug("Entering for new_size %u.", new_size);
+	/* Align to 8 bytes if it is not already done. */
+	if (new_size & 7)
+		new_size = (new_size + 7) & ~7;
+	/* If the actual attribute length has changed, move things around. */
+	if (new_size != le32_to_cpu(a->length)) {
+		u32 new_muse = le32_to_cpu(m->bytes_in_use) -
+				le32_to_cpu(a->length) + new_size;
+		/* Not enough space in this mft record. */
+		if (new_muse > le32_to_cpu(m->bytes_allocated))
+			return -ENOSPC;
+		/* Move attributes following @a to their new location. */
+		memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
+				le32_to_cpu(m->bytes_in_use) - ((u8*)a -
+				(u8*)m) - le32_to_cpu(a->length));
+		/* Adjust @m to reflect the change in used space. */
+		m->bytes_in_use = cpu_to_le32(new_muse);
+		/* Adjust @a to reflect the new size. */
+		if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
+			a->length = cpu_to_le32(new_size);
+	}
+	return 0;
+}
+
+/**
+ * ntfs_attr_set - fill (a part of) an attribute with a byte
+ * @ni:		ntfs inode describing the attribute to fill
+ * @ofs:	offset inside the attribute at which to start to fill
+ * @cnt:	number of bytes to fill
+ * @val:	the unsigned 8-bit value with which to fill the attribute
+ *
+ * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
+ * byte offset @ofs inside the attribute with the constant byte @val.
+ *
+ * This function is effectively like memset() applied to an ntfs attribute.
+ *
+ * Return 0 on success and -errno on error.  An error code of -ESPIPE means
+ * that @ofs + @cnt were outside the end of the attribute and no write was
+ * performed.
+ */
+int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
+{
+	ntfs_volume *vol = ni->vol;
+	struct address_space *mapping;
+	struct page *page;
+	u8 *kaddr;
+	pgoff_t idx, end;
+	unsigned int start_ofs, end_ofs, size;
+
+	ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
+			(long long)ofs, (long long)cnt, val);
+	BUG_ON(ofs < 0);
+	BUG_ON(cnt < 0);
+	if (!cnt)
+		goto done;
+	mapping = VFS_I(ni)->i_mapping;
+	/* Work out the starting index and page offset. */
+	idx = ofs >> PAGE_CACHE_SHIFT;
+	start_ofs = ofs & ~PAGE_CACHE_MASK;
+	/* Work out the ending index and page offset. */
+	end = ofs + cnt;
+	end_ofs = end & ~PAGE_CACHE_MASK;
+	/* If the end is outside the inode size return -ESPIPE. */
+	if (unlikely(end > VFS_I(ni)->i_size)) {
+		ntfs_error(vol->sb, "Request exceeds end of attribute.");
+		return -ESPIPE;
+	}
+	end >>= PAGE_CACHE_SHIFT;
+	/* If there is a first partial page, need to do it the slow way. */
+	if (start_ofs) {
+		page = read_cache_page(mapping, idx,
+				(filler_t*)mapping->a_ops->readpage, NULL);
+		if (IS_ERR(page)) {
+			ntfs_error(vol->sb, "Failed to read first partial "
+					"page (sync error, index 0x%lx).", idx);
+			return PTR_ERR(page);
+		}
+		wait_on_page_locked(page);
+		if (unlikely(!PageUptodate(page))) {
+			ntfs_error(vol->sb, "Failed to read first partial page "
+					"(async error, index 0x%lx).", idx);
+			page_cache_release(page);
+			return PTR_ERR(page);
+		}
+		/*
+		 * If the last page is the same as the first page, need to
+		 * limit the write to the end offset.
+		 */
+		size = PAGE_CACHE_SIZE;
+		if (idx == end)
+			size = end_ofs;
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + start_ofs, val, size - start_ofs);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		set_page_dirty(page);
+		page_cache_release(page);
+		if (idx == end)
+			goto done;
+		idx++;
+	}
+	/* Do the whole pages the fast way. */
+	for (; idx < end; idx++) {
+		/* Find or create the current page.  (The page is locked.) */
+		page = grab_cache_page(mapping, idx);
+		if (unlikely(!page)) {
+			ntfs_error(vol->sb, "Insufficient memory to grab "
+					"page (index 0x%lx).", idx);
+			return -ENOMEM;
+		}
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr, val, PAGE_CACHE_SIZE);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		/*
+		 * If the page has buffers, mark them uptodate since buffer
+		 * state and not page state is definitive in 2.6 kernels.
+		 */
+		if (page_has_buffers(page)) {
+			struct buffer_head *bh, *head;
+
+			bh = head = page_buffers(page);
+			do {
+				set_buffer_uptodate(bh);
+			} while ((bh = bh->b_this_page) != head);
+		}
+		/* Now that buffers are uptodate, set the page uptodate, too. */
+		SetPageUptodate(page);
+		/*
+		 * Set the page and all its buffers dirty and mark the inode
+		 * dirty, too.  The VM will write the page later on.
+		 */
+		set_page_dirty(page);
+		/* Finally unlock and release the page. */
+		unlock_page(page);
+		page_cache_release(page);
+	}
+	/* If there is a last partial page, need to do it the slow way. */
+	if (end_ofs) {
+		page = read_cache_page(mapping, idx,
+				(filler_t*)mapping->a_ops->readpage, NULL);
+		if (IS_ERR(page)) {
+			ntfs_error(vol->sb, "Failed to read last partial page "
+					"(sync error, index 0x%lx).", idx);
+			return PTR_ERR(page);
+		}
+		wait_on_page_locked(page);
+		if (unlikely(!PageUptodate(page))) {
+			ntfs_error(vol->sb, "Failed to read last partial page "
+					"(async error, index 0x%lx).", idx);
+			page_cache_release(page);
+			return PTR_ERR(page);
+		}
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr, val, end_ofs);
+		flush_dcache_page(page);
+		kunmap_atomic(kaddr, KM_USER0);
+		set_page_dirty(page);
+		page_cache_release(page);
+	}
+done:
+	ntfs_debug("Done.");
+	return 0;
+}
diff --git a/fs/ntfs/attrib.h b/fs/ntfs/attrib.h
new file mode 100644
index 0000000..e0c2c6c
--- /dev/null
+++ b/fs/ntfs/attrib.h
@@ -0,0 +1,100 @@
+/*
+ * attrib.h - Defines for attribute handling in NTFS Linux kernel driver.
+ *	      Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_ATTRIB_H
+#define _LINUX_NTFS_ATTRIB_H
+
+#include "endian.h"
+#include "types.h"
+#include "layout.h"
+#include "inode.h"
+#include "runlist.h"
+#include "volume.h"
+
+/**
+ * ntfs_attr_search_ctx - used in attribute search functions
+ * @mrec:	buffer containing mft record to search
+ * @attr:	attribute record in @mrec where to begin/continue search
+ * @is_first:	if true ntfs_attr_lookup() begins search with @attr, else after
+ *
+ * Structure must be initialized to zero before the first call to one of the
+ * attribute search functions. Initialize @mrec to point to the mft record to
+ * search, and @attr to point to the first attribute within @mrec (not necessary
+ * if calling the _first() functions), and set @is_first to TRUE (not necessary
+ * if calling the _first() functions).
+ *
+ * If @is_first is TRUE, the search begins with @attr. If @is_first is FALSE,
+ * the search begins after @attr. This is so that, after the first call to one
+ * of the search attribute functions, we can call the function again, without
+ * any modification of the search context, to automagically get the next
+ * matching attribute.
+ */
+typedef struct {
+	MFT_RECORD *mrec;
+	ATTR_RECORD *attr;
+	BOOL is_first;
+	ntfs_inode *ntfs_ino;
+	ATTR_LIST_ENTRY *al_entry;
+	ntfs_inode *base_ntfs_ino;
+	MFT_RECORD *base_mrec;
+	ATTR_RECORD *base_attr;
+} ntfs_attr_search_ctx;
+
+extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);
+
+extern runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
+		const BOOL need_write);
+
+int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
+		const u32 name_len, const IGNORE_CASE_BOOL ic,
+		const VCN lowest_vcn, const u8 *val, const u32 val_len,
+		ntfs_attr_search_ctx *ctx);
+
+extern int load_attribute_list(ntfs_volume *vol, runlist *rl, u8 *al_start,
+		const s64 size, const s64 initialized_size);
+
+static inline s64 ntfs_attr_size(const ATTR_RECORD *a)
+{
+	if (!a->non_resident)
+		return (s64)le32_to_cpu(a->data.resident.value_length);
+	return sle64_to_cpu(a->data.non_resident.data_size);
+}
+
+extern void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx);
+extern ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni,
+		MFT_RECORD *mrec);
+extern void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx);
+
+extern int ntfs_attr_size_bounds_check(const ntfs_volume *vol,
+		const ATTR_TYPE type, const s64 size);
+extern int ntfs_attr_can_be_non_resident(const ntfs_volume *vol,
+		const ATTR_TYPE type);
+extern int ntfs_attr_can_be_resident(const ntfs_volume *vol,
+		const ATTR_TYPE type);
+
+extern int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size);
+
+extern int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt,
+		const u8 val);
+
+#endif /* _LINUX_NTFS_ATTRIB_H */
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
new file mode 100644
index 0000000..12cf2e3
--- /dev/null
+++ b/fs/ntfs/bitmap.c
@@ -0,0 +1,192 @@
+/*
+ * bitmap.c - NTFS kernel bitmap handling.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef NTFS_RW
+
+#include <linux/pagemap.h>
+
+#include "bitmap.h"
+#include "debug.h"
+#include "aops.h"
+#include "ntfs.h"
+
+/**
+ * __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
+ * @vi:			vfs inode describing the bitmap
+ * @start_bit:		first bit to set
+ * @count:		number of bits to set
+ * @value:		value to set the bits to (i.e. 0 or 1)
+ * @is_rollback:	if TRUE this is a rollback operation
+ *
+ * Set @count bits starting at bit @start_bit in the bitmap described by the
+ * vfs inode @vi to @value, where @value is either 0 or 1.
+ *
+ * @is_rollback should always be FALSE, it is for internal use to rollback
+ * errors.  You probably want to use ntfs_bitmap_set_bits_in_run() instead.
+ *
+ * Return 0 on success and -errno on error.
+ */
+int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
+		const s64 count, const u8 value, const BOOL is_rollback)
+{
+	s64 cnt = count;
+	pgoff_t index, end_index;
+	struct address_space *mapping;
+	struct page *page;
+	u8 *kaddr;
+	int pos, len;
+	u8 bit;
+
+	BUG_ON(!vi);
+	ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, "
+			"value %u.%s", vi->i_ino, (unsigned long long)start_bit,
+			(unsigned long long)cnt, (unsigned int)value,
+			is_rollback ? " (rollback)" : "");
+	BUG_ON(start_bit < 0);
+	BUG_ON(cnt < 0);
+	BUG_ON(value > 1);
+	/*
+	 * Calculate the indices for the pages containing the first and last
+	 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
+	 */
+	index = start_bit >> (3 + PAGE_CACHE_SHIFT);
+	end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
+
+	/* Get the page containing the first bit (@start_bit). */
+	mapping = vi->i_mapping;
+	page = ntfs_map_page(mapping, index);
+	if (IS_ERR(page)) {
+		if (!is_rollback)
+			ntfs_error(vi->i_sb, "Failed to map first page (error "
+					"%li), aborting.", PTR_ERR(page));
+		return PTR_ERR(page);
+	}
+	kaddr = page_address(page);
+
+	/* Set @pos to the position of the byte containing @start_bit. */
+	pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
+
+	/* Calculate the position of @start_bit in the first byte. */
+	bit = start_bit & 7;
+
+	/* If the first byte is partial, modify the appropriate bits in it. */
+	if (bit) {
+		u8 *byte = kaddr + pos;
+		while ((bit & 7) && cnt--) {
+			if (value)
+				*byte |= 1 << bit++;
+			else
+				*byte &= ~(1 << bit++);
+		}
+		/* If we are done, unmap the page and return success. */
+		if (!cnt)
+			goto done;
+
+		/* Update @pos to the new position. */
+		pos++;
+	}
+	/*
+	 * Depending on @value, modify all remaining whole bytes in the page up
+	 * to @cnt.
+	 */
+	len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
+	memset(kaddr + pos, value ? 0xff : 0, len);
+	cnt -= len << 3;
+
+	/* Update @len to point to the first not-done byte in the page. */
+	if (cnt < 8)
+		len += pos;
+
+	/* If we are not in the last page, deal with all subsequent pages. */
+	while (index < end_index) {
+		BUG_ON(cnt <= 0);
+
+		/* Update @index and get the next page. */
+		flush_dcache_page(page);
+		set_page_dirty(page);
+		ntfs_unmap_page(page);
+		page = ntfs_map_page(mapping, ++index);
+		if (IS_ERR(page))
+			goto rollback;
+		kaddr = page_address(page);
+		/*
+		 * Depending on @value, modify all remaining whole bytes in the
+		 * page up to @cnt.
+		 */
+		len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
+		memset(kaddr, value ? 0xff : 0, len);
+		cnt -= len << 3;
+	}
+	/*
+	 * The currently mapped page is the last one.  If the last byte is
+	 * partial, modify the appropriate bits in it.  Note, @len is the
+	 * position of the last byte inside the page.
+	 */
+	if (cnt) {
+		u8 *byte;
+
+		BUG_ON(cnt > 7);
+
+		bit = cnt;
+		byte = kaddr + len;
+		while (bit--) {
+			if (value)
+				*byte |= 1 << bit;
+			else
+				*byte &= ~(1 << bit);
+		}
+	}
+done:
+	/* We are done.  Unmap the page and return success. */
+	flush_dcache_page(page);
+	set_page_dirty(page);
+	ntfs_unmap_page(page);
+	ntfs_debug("Done.");
+	return 0;
+rollback:
+	/*
+	 * Current state:
+	 *	- no pages are mapped
+	 *	- @count - @cnt is the number of bits that have been modified
+	 */
+	if (is_rollback)
+		return PTR_ERR(page);
+	if (count != cnt)
+		pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
+				value ? 0 : 1, TRUE);
+	else
+		pos = 0;
+	if (!pos) {
+		/* Rollback was successful. */
+		ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
+				"%li), aborting.", PTR_ERR(page));
+	} else {
+		/* Rollback failed. */
+		ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
+				"%li) and rollback failed (error %i).  "
+				"Aborting and leaving inconsistent metadata.  "
+				"Unmount and run chkdsk.", PTR_ERR(page), pos);
+		NVolSetErrors(NTFS_SB(vi->i_sb));
+	}
+	return PTR_ERR(page);
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/bitmap.h b/fs/ntfs/bitmap.h
new file mode 100644
index 0000000..bb50d6b
--- /dev/null
+++ b/fs/ntfs/bitmap.h
@@ -0,0 +1,118 @@
+/*
+ * bitmap.h - Defines for NTFS kernel bitmap handling.  Part of the Linux-NTFS
+ *	      project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_BITMAP_H
+#define _LINUX_NTFS_BITMAP_H
+
+#ifdef NTFS_RW
+
+#include <linux/fs.h>
+
+#include "types.h"
+
+extern int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
+		const s64 count, const u8 value, const BOOL is_rollback);
+
+/**
+ * ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
+ * @vi:			vfs inode describing the bitmap
+ * @start_bit:		first bit to set
+ * @count:		number of bits to set
+ * @value:		value to set the bits to (i.e. 0 or 1)
+ *
+ * Set @count bits starting at bit @start_bit in the bitmap described by the
+ * vfs inode @vi to @value, where @value is either 0 or 1.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_bitmap_set_bits_in_run(struct inode *vi,
+		const s64 start_bit, const s64 count, const u8 value)
+{
+	return __ntfs_bitmap_set_bits_in_run(vi, start_bit, count, value,
+			FALSE);
+}
+
+/**
+ * ntfs_bitmap_set_run - set a run of bits in a bitmap
+ * @vi:		vfs inode describing the bitmap
+ * @start_bit:	first bit to set
+ * @count:	number of bits to set
+ *
+ * Set @count bits starting at bit @start_bit in the bitmap described by the
+ * vfs inode @vi.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_bitmap_set_run(struct inode *vi, const s64 start_bit,
+		const s64 count)
+{
+	return ntfs_bitmap_set_bits_in_run(vi, start_bit, count, 1);
+}
+
+/**
+ * ntfs_bitmap_clear_run - clear a run of bits in a bitmap
+ * @vi:		vfs inode describing the bitmap
+ * @start_bit:	first bit to clear
+ * @count:	number of bits to clear
+ *
+ * Clear @count bits starting at bit @start_bit in the bitmap described by the
+ * vfs inode @vi.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_bitmap_clear_run(struct inode *vi, const s64 start_bit,
+		const s64 count)
+{
+	return ntfs_bitmap_set_bits_in_run(vi, start_bit, count, 0);
+}
+
+/**
+ * ntfs_bitmap_set_bit - set a bit in a bitmap
+ * @vi:		vfs inode describing the bitmap
+ * @bit:	bit to set
+ *
+ * Set bit @bit in the bitmap described by the vfs inode @vi.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_bitmap_set_bit(struct inode *vi, const s64 bit)
+{
+	return ntfs_bitmap_set_run(vi, bit, 1);
+}
+
+/**
+ * ntfs_bitmap_clear_bit - clear a bit in a bitmap
+ * @vi:		vfs inode describing the bitmap
+ * @bit:	bit to clear
+ *
+ * Clear bit @bit in the bitmap described by the vfs inode @vi.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_bitmap_clear_bit(struct inode *vi, const s64 bit)
+{
+	return ntfs_bitmap_clear_run(vi, bit, 1);
+}
+
+#endif /* NTFS_RW */
+
+#endif /* defined _LINUX_NTFS_BITMAP_H */
diff --git a/fs/ntfs/collate.c b/fs/ntfs/collate.c
new file mode 100644
index 0000000..4a28ab3
--- /dev/null
+++ b/fs/ntfs/collate.c
@@ -0,0 +1,124 @@
+/*
+ * collate.c - NTFS kernel collation handling.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "collate.h"
+#include "debug.h"
+#include "ntfs.h"
+
+static int ntfs_collate_binary(ntfs_volume *vol,
+		const void *data1, const int data1_len,
+		const void *data2, const int data2_len)
+{
+	int rc;
+
+	ntfs_debug("Entering.");
+	rc = memcmp(data1, data2, min(data1_len, data2_len));
+	if (!rc && (data1_len != data2_len)) {
+		if (data1_len < data2_len)
+			rc = -1;
+		else
+			rc = 1;
+	}
+	ntfs_debug("Done, returning %i", rc);
+	return rc;
+}
+
+static int ntfs_collate_ntofs_ulong(ntfs_volume *vol,
+		const void *data1, const int data1_len,
+		const void *data2, const int data2_len)
+{
+	int rc;
+	u32 d1, d2;
+
+	ntfs_debug("Entering.");
+	// FIXME:  We don't really want to bug here.
+	BUG_ON(data1_len != data2_len);
+	BUG_ON(data1_len != 4);
+	d1 = le32_to_cpup(data1);
+	d2 = le32_to_cpup(data2);
+	if (d1 < d2)
+		rc = -1;
+	else {
+		if (d1 == d2)
+			rc = 0;
+		else
+			rc = 1;
+	}
+	ntfs_debug("Done, returning %i", rc);
+	return rc;
+}
+
+typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int,
+		const void *, const int);
+
+static ntfs_collate_func_t ntfs_do_collate0x0[3] = {
+	ntfs_collate_binary,
+	NULL/*ntfs_collate_file_name*/,
+	NULL/*ntfs_collate_unicode_string*/,
+};
+
+static ntfs_collate_func_t ntfs_do_collate0x1[4] = {
+	ntfs_collate_ntofs_ulong,
+	NULL/*ntfs_collate_ntofs_sid*/,
+	NULL/*ntfs_collate_ntofs_security_hash*/,
+	NULL/*ntfs_collate_ntofs_ulongs*/,
+};
+
+/**
+ * ntfs_collate - collate two data items using a specified collation rule
+ * @vol:	ntfs volume to which the data items belong
+ * @cr:		collation rule to use when comparing the items
+ * @data1:	first data item to collate
+ * @data1_len:	length in bytes of @data1
+ * @data2:	second data item to collate
+ * @data2_len:	length in bytes of @data2
+ *
+ * Collate the two data items @data1 and @data2 using the collation rule @cr
+ * and return -1, 0, ir 1 if @data1 is found, respectively, to collate before,
+ * to match, or to collate after @data2.
+ *
+ * For speed we use the collation rule @cr as an index into two tables of
+ * function pointers to call the appropriate collation function.
+ */
+int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
+		const void *data1, const int data1_len,
+		const void *data2, const int data2_len) {
+	int i;
+
+	ntfs_debug("Entering.");
+	/*
+	 * FIXME:  At the moment we only support COLLATION_BINARY and
+	 * COLLATION_NTOFS_ULONG, so we BUG() for everything else for now.
+	 */
+	BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG);
+	i = le32_to_cpu(cr);
+	BUG_ON(i < 0);
+	if (i <= 0x02)
+		return ntfs_do_collate0x0[i](vol, data1, data1_len,
+				data2, data2_len);
+	BUG_ON(i < 0x10);
+	i -= 0x10;
+	if (likely(i <= 3))
+		return ntfs_do_collate0x1[i](vol, data1, data1_len,
+				data2, data2_len);
+	BUG();
+	return 0;
+}
diff --git a/fs/ntfs/collate.h b/fs/ntfs/collate.h
new file mode 100644
index 0000000..e027f36
--- /dev/null
+++ b/fs/ntfs/collate.h
@@ -0,0 +1,50 @@
+/*
+ * collate.h - Defines for NTFS kernel collation handling.  Part of the
+ *	       Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_COLLATE_H
+#define _LINUX_NTFS_COLLATE_H
+
+#include "types.h"
+#include "volume.h"
+
+static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
+	int i;
+
+	/*
+	 * FIXME:  At the moment we only support COLLATION_BINARY and
+	 * COLLATION_NTOFS_ULONG, so we return false for everything else for
+	 * now.
+	 */
+	if (unlikely(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG))
+		return FALSE;
+	i = le32_to_cpu(cr);
+	if (likely(((i >= 0) && (i <= 0x02)) ||
+			((i >= 0x10) && (i <= 0x13))))
+		return TRUE;
+	return FALSE;
+}
+
+extern int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
+		const void *data1, const int data1_len,
+		const void *data2, const int data2_len);
+
+#endif /* _LINUX_NTFS_COLLATE_H */
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
new file mode 100644
index 0000000..ee5ae70
--- /dev/null
+++ b/fs/ntfs/compress.c
@@ -0,0 +1,957 @@
+/**
+ * compress.c - NTFS kernel compressed attributes handling.
+ *		Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+#include "attrib.h"
+#include "inode.h"
+#include "debug.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_compression_constants - enum of constants used in the compression code
+ */
+typedef enum {
+	/* Token types and access mask. */
+	NTFS_SYMBOL_TOKEN	=	0,
+	NTFS_PHRASE_TOKEN	=	1,
+	NTFS_TOKEN_MASK		=	1,
+
+	/* Compression sub-block constants. */
+	NTFS_SB_SIZE_MASK	=	0x0fff,
+	NTFS_SB_SIZE		=	0x1000,
+	NTFS_SB_IS_COMPRESSED	=	0x8000,
+
+	/*
+	 * The maximum compression block size is by definition 16 * the cluster
+	 * size, with the maximum supported cluster size being 4kiB. Thus the
+	 * maximum compression buffer size is 64kiB, so we use this when
+	 * initializing the compression buffer.
+	 */
+	NTFS_MAX_CB_SIZE	= 64 * 1024,
+} ntfs_compression_constants;
+
+/**
+ * ntfs_compression_buffer - one buffer for the decompression engine
+ */
+static u8 *ntfs_compression_buffer = NULL;
+
+/**
+ * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
+ */
+static DEFINE_SPINLOCK(ntfs_cb_lock);
+
+/**
+ * allocate_compression_buffers - allocate the decompression buffers
+ *
+ * Caller has to hold the ntfs_lock semaphore.
+ *
+ * Return 0 on success or -ENOMEM if the allocations failed.
+ */
+int allocate_compression_buffers(void)
+{
+	BUG_ON(ntfs_compression_buffer);
+
+	ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
+	if (!ntfs_compression_buffer)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * free_compression_buffers - free the decompression buffers
+ *
+ * Caller has to hold the ntfs_lock semaphore.
+ */
+void free_compression_buffers(void)
+{
+	BUG_ON(!ntfs_compression_buffer);
+	vfree(ntfs_compression_buffer);
+	ntfs_compression_buffer = NULL;
+}
+
+/**
+ * zero_partial_compressed_page - zero out of bounds compressed page region
+ */
+static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
+{
+	u8 *kp = page_address(page);
+	unsigned int kp_ofs;
+
+	ntfs_debug("Zeroing page region outside initialized size.");
+	if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) {
+		/*
+		 * FIXME: Using clear_page() will become wrong when we get
+		 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
+		 */
+		clear_page(kp);
+		return;
+	}
+	kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK;
+	memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
+	return;
+}
+
+/**
+ * handle_bounds_compressed_page - test for&handle out of bounds compressed page
+ */
+static inline void handle_bounds_compressed_page(ntfs_inode *ni,
+		struct page *page)
+{
+	if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) &&
+			(ni->initialized_size < VFS_I(ni)->i_size))
+		zero_partial_compressed_page(ni, page);
+	return;
+}
+
+/**
+ * ntfs_decompress - decompress a compression block into an array of pages
+ * @dest_pages:		destination array of pages
+ * @dest_index:		current index into @dest_pages (IN/OUT)
+ * @dest_ofs:		current offset within @dest_pages[@dest_index] (IN/OUT)
+ * @dest_max_index:	maximum index into @dest_pages (IN)
+ * @dest_max_ofs:	maximum offset within @dest_pages[@dest_max_index] (IN)
+ * @xpage:		the target page (-1 if none) (IN)
+ * @xpage_done:		set to 1 if xpage was completed successfully (IN/OUT)
+ * @cb_start:		compression block to decompress (IN)
+ * @cb_size:		size of compression block @cb_start in bytes (IN)
+ *
+ * The caller must have disabled preemption. ntfs_decompress() reenables it when
+ * the critical section is finished.
+ *
+ * This decompresses the compression block @cb_start into the array of
+ * destination pages @dest_pages starting at index @dest_index into @dest_pages
+ * and at offset @dest_pos into the page @dest_pages[@dest_index].
+ *
+ * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
+ * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
+ *
+ * @cb_start is a pointer to the compression block which needs decompressing
+ * and @cb_size is the size of @cb_start in bytes (8-64kiB).
+ *
+ * Return 0 if success or -EOVERFLOW on error in the compressed stream.
+ * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
+ * completed during the decompression of the compression block (@cb_start).
+ *
+ * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * unpredicatbly! You have been warned!
+ *
+ * Note to hackers: This function may not sleep until it has finished accessing
+ * the compression block @cb_start as it is a per-CPU buffer.
+ */
+static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
+		int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
+		const int xpage, char *xpage_done, u8 *const cb_start,
+		const u32 cb_size)
+{
+	/*
+	 * Pointers into the compressed data, i.e. the compression block (cb),
+	 * and the therein contained sub-blocks (sb).
+	 */
+	u8 *cb_end = cb_start + cb_size; /* End of cb. */
+	u8 *cb = cb_start;	/* Current position in cb. */
+	u8 *cb_sb_start = cb;	/* Beginning of the current sb in the cb. */
+	u8 *cb_sb_end;		/* End of current sb / beginning of next sb. */
+
+	/* Variables for uncompressed data / destination. */
+	struct page *dp;	/* Current destination page being worked on. */
+	u8 *dp_addr;		/* Current pointer into dp. */
+	u8 *dp_sb_start;	/* Start of current sub-block in dp. */
+	u8 *dp_sb_end;		/* End of current sb in dp (dp_sb_start +
+				   NTFS_SB_SIZE). */
+	u16 do_sb_start;	/* @dest_ofs when starting this sub-block. */
+	u16 do_sb_end;		/* @dest_ofs of end of this sb (do_sb_start +
+				   NTFS_SB_SIZE). */
+
+	/* Variables for tag and token parsing. */
+	u8 tag;			/* Current tag. */
+	int token;		/* Loop counter for the eight tokens in tag. */
+
+	/* Need this because we can't sleep, so need two stages. */
+	int completed_pages[dest_max_index - *dest_index + 1];
+	int nr_completed_pages = 0;
+
+	/* Default error code. */
+	int err = -EOVERFLOW;
+
+	ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
+do_next_sb:
+	ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
+			cb - cb_start);
+	/*
+	 * Have we reached the end of the compression block or the end of the
+	 * decompressed data?  The latter can happen for example if the current
+	 * position in the compression block is one byte before its end so the
+	 * first two checks do not detect it.
+	 */
+	if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
+			(*dest_index == dest_max_index &&
+			*dest_ofs == dest_max_ofs)) {
+		int i;
+
+		ntfs_debug("Completed. Returning success (0).");
+		err = 0;
+return_error:
+		/* We can sleep from now on, so we drop lock. */
+		spin_unlock(&ntfs_cb_lock);
+		/* Second stage: finalize completed pages. */
+		if (nr_completed_pages > 0) {
+			struct page *page = dest_pages[completed_pages[0]];
+			ntfs_inode *ni = NTFS_I(page->mapping->host);
+
+			for (i = 0; i < nr_completed_pages; i++) {
+				int di = completed_pages[i];
+
+				dp = dest_pages[di];
+				/*
+				 * If we are outside the initialized size, zero
+				 * the out of bounds page range.
+				 */
+				handle_bounds_compressed_page(ni, dp);
+				flush_dcache_page(dp);
+				kunmap(dp);
+				SetPageUptodate(dp);
+				unlock_page(dp);
+				if (di == xpage)
+					*xpage_done = 1;
+				else
+					page_cache_release(dp);
+				dest_pages[di] = NULL;
+			}
+		}
+		return err;
+	}
+
+	/* Setup offsets for the current sub-block destination. */
+	do_sb_start = *dest_ofs;
+	do_sb_end = do_sb_start + NTFS_SB_SIZE;
+
+	/* Check that we are still within allowed boundaries. */
+	if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
+		goto return_overflow;
+
+	/* Does the minimum size of a compressed sb overflow valid range? */
+	if (cb + 6 > cb_end)
+		goto return_overflow;
+
+	/* Setup the current sub-block source pointers and validate range. */
+	cb_sb_start = cb;
+	cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
+			+ 3;
+	if (cb_sb_end > cb_end)
+		goto return_overflow;
+
+	/* Get the current destination page. */
+	dp = dest_pages[*dest_index];
+	if (!dp) {
+		/* No page present. Skip decompression of this sub-block. */
+		cb = cb_sb_end;
+
+		/* Advance destination position to next sub-block. */
+		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
+		if (!*dest_ofs && (++*dest_index > dest_max_index))
+			goto return_overflow;
+		goto do_next_sb;
+	}
+
+	/* We have a valid destination page. Setup the destination pointers. */
+	dp_addr = (u8*)page_address(dp) + do_sb_start;
+
+	/* Now, we are ready to process the current sub-block (sb). */
+	if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
+		ntfs_debug("Found uncompressed sub-block.");
+		/* This sb is not compressed, just copy it into destination. */
+
+		/* Advance source position to first data byte. */
+		cb += 2;
+
+		/* An uncompressed sb must be full size. */
+		if (cb_sb_end - cb != NTFS_SB_SIZE)
+			goto return_overflow;
+
+		/* Copy the block and advance the source position. */
+		memcpy(dp_addr, cb, NTFS_SB_SIZE);
+		cb += NTFS_SB_SIZE;
+
+		/* Advance destination position to next sub-block. */
+		*dest_ofs += NTFS_SB_SIZE;
+		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
+finalize_page:
+			/*
+			 * First stage: add current page index to array of
+			 * completed pages.
+			 */
+			completed_pages[nr_completed_pages++] = *dest_index;
+			if (++*dest_index > dest_max_index)
+				goto return_overflow;
+		}
+		goto do_next_sb;
+	}
+	ntfs_debug("Found compressed sub-block.");
+	/* This sb is compressed, decompress it into destination. */
+
+	/* Setup destination pointers. */
+	dp_sb_start = dp_addr;
+	dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
+
+	/* Forward to the first tag in the sub-block. */
+	cb += 2;
+do_next_tag:
+	if (cb == cb_sb_end) {
+		/* Check if the decompressed sub-block was not full-length. */
+		if (dp_addr < dp_sb_end) {
+			int nr_bytes = do_sb_end - *dest_ofs;
+
+			ntfs_debug("Filling incomplete sub-block with "
+					"zeroes.");
+			/* Zero remainder and update destination position. */
+			memset(dp_addr, 0, nr_bytes);
+			*dest_ofs += nr_bytes;
+		}
+		/* We have finished the current sub-block. */
+		if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
+			goto finalize_page;
+		goto do_next_sb;
+	}
+
+	/* Check we are still in range. */
+	if (cb > cb_sb_end || dp_addr > dp_sb_end)
+		goto return_overflow;
+
+	/* Get the next tag and advance to first token. */
+	tag = *cb++;
+
+	/* Parse the eight tokens described by the tag. */
+	for (token = 0; token < 8; token++, tag >>= 1) {
+		u16 lg, pt, length, max_non_overlap;
+		register u16 i;
+		u8 *dp_back_addr;
+
+		/* Check if we are done / still in range. */
+		if (cb >= cb_sb_end || dp_addr > dp_sb_end)
+			break;
+
+		/* Determine token type and parse appropriately.*/
+		if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
+			/*
+			 * We have a symbol token, copy the symbol across, and
+			 * advance the source and destination positions.
+			 */
+			*dp_addr++ = *cb++;
+			++*dest_ofs;
+
+			/* Continue with the next token. */
+			continue;
+		}
+
+		/*
+		 * We have a phrase token. Make sure it is not the first tag in
+		 * the sb as this is illegal and would confuse the code below.
+		 */
+		if (dp_addr == dp_sb_start)
+			goto return_overflow;
+
+		/*
+		 * Determine the number of bytes to go back (p) and the number
+		 * of bytes to copy (l). We use an optimized algorithm in which
+		 * we first calculate log2(current destination position in sb),
+		 * which allows determination of l and p in O(1) rather than
+		 * O(n). We just need an arch-optimized log2() function now.
+		 */
+		lg = 0;
+		for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
+			lg++;
+
+		/* Get the phrase token into i. */
+		pt = le16_to_cpup((le16*)cb);
+
+		/*
+		 * Calculate starting position of the byte sequence in
+		 * the destination using the fact that p = (pt >> (12 - lg)) + 1
+		 * and make sure we don't go too far back.
+		 */
+		dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
+		if (dp_back_addr < dp_sb_start)
+			goto return_overflow;
+
+		/* Now calculate the length of the byte sequence. */
+		length = (pt & (0xfff >> lg)) + 3;
+
+		/* Advance destination position and verify it is in range. */
+		*dest_ofs += length;
+		if (*dest_ofs > do_sb_end)
+			goto return_overflow;
+
+		/* The number of non-overlapping bytes. */
+		max_non_overlap = dp_addr - dp_back_addr;
+
+		if (length <= max_non_overlap) {
+			/* The byte sequence doesn't overlap, just copy it. */
+			memcpy(dp_addr, dp_back_addr, length);
+
+			/* Advance destination pointer. */
+			dp_addr += length;
+		} else {
+			/*
+			 * The byte sequence does overlap, copy non-overlapping
+			 * part and then do a slow byte by byte copy for the
+			 * overlapping part. Also, advance the destination
+			 * pointer.
+			 */
+			memcpy(dp_addr, dp_back_addr, max_non_overlap);
+			dp_addr += max_non_overlap;
+			dp_back_addr += max_non_overlap;
+			length -= max_non_overlap;
+			while (length--)
+				*dp_addr++ = *dp_back_addr++;
+		}
+
+		/* Advance source position and continue with the next token. */
+		cb += 2;
+	}
+
+	/* No tokens left in the current tag. Continue with the next tag. */
+	goto do_next_tag;
+
+return_overflow:
+	ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
+	goto return_error;
+}
+
+/**
+ * ntfs_read_compressed_block - read a compressed block into the page cache
+ * @page:	locked page in the compression block(s) we need to read
+ *
+ * When we are called the page has already been verified to be locked and the
+ * attribute is known to be non-resident, not encrypted, but compressed.
+ *
+ * 1. Determine which compression block(s) @page is in.
+ * 2. Get hold of all pages corresponding to this/these compression block(s).
+ * 3. Read the (first) compression block.
+ * 4. Decompress it into the corresponding pages.
+ * 5. Throw the compressed data away and proceed to 3. for the next compression
+ *    block or return success if no more compression blocks left.
+ *
+ * Warning: We have to be careful what we do about existing pages. They might
+ * have been written to so that we would lose data if we were to just overwrite
+ * them with the out-of-date uncompressed data.
+ *
+ * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * the end of the file I think. We need to detect this case and zero the out
+ * of bounds remainder of the page in question and mark it as handled. At the
+ * moment we would just return -EIO on such a page. This bug will only become
+ * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
+ * clusters so is probably not going to be seen by anyone. Still this should
+ * be fixed. (AIA)
+ *
+ * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * handling sparse and compressed cbs. (AIA)
+ *
+ * FIXME: At the moment we don't do any zeroing out in the case that
+ * initialized_size is less than data_size. This should be safe because of the
+ * nature of the compression algorithm used. Just in case we check and output
+ * an error message in read inode if the two sizes are not equal for a
+ * compressed file. (AIA)
+ */
+int ntfs_read_compressed_block(struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+	ntfs_inode *ni = NTFS_I(mapping->host);
+	ntfs_volume *vol = ni->vol;
+	struct super_block *sb = vol->sb;
+	runlist_element *rl;
+	unsigned long block_size = sb->s_blocksize;
+	unsigned char block_size_bits = sb->s_blocksize_bits;
+	u8 *cb, *cb_pos, *cb_end;
+	struct buffer_head **bhs;
+	unsigned long offset, index = page->index;
+	u32 cb_size = ni->itype.compressed.block_size;
+	u64 cb_size_mask = cb_size - 1UL;
+	VCN vcn;
+	LCN lcn;
+	/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
+	VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
+			vol->cluster_size_bits;
+	/*
+	 * The first vcn after the last wanted vcn (minumum alignment is again
+	 * PAGE_CACHE_SIZE.
+	 */
+	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
+			& ~cb_size_mask) >> vol->cluster_size_bits;
+	/* Number of compression blocks (cbs) in the wanted vcn range. */
+	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
+			>> ni->itype.compressed.block_size_bits;
+	/*
+	 * Number of pages required to store the uncompressed data from all
+	 * compression blocks (cbs) overlapping @page. Due to alignment
+	 * guarantees of start_vcn and end_vcn, no need to round up here.
+	 */
+	unsigned int nr_pages = (end_vcn - start_vcn) <<
+			vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+	unsigned int xpage, max_page, cur_page, cur_ofs, i;
+	unsigned int cb_clusters, cb_max_ofs;
+	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
+	struct page **pages;
+	unsigned char xpage_done = 0;
+
+	ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
+			"%i.", index, cb_size, nr_pages);
+	/*
+	 * Bad things happen if we get here for anything that is not an
+	 * unnamed $DATA attribute.
+	 */
+	BUG_ON(ni->type != AT_DATA);
+	BUG_ON(ni->name_len);
+
+	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
+
+	/* Allocate memory to store the buffer heads we need. */
+	bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
+	bhs = kmalloc(bhs_size, GFP_NOFS);
+
+	if (unlikely(!pages || !bhs)) {
+		kfree(bhs);
+		kfree(pages);
+		SetPageError(page);
+		unlock_page(page);
+		ntfs_error(vol->sb, "Failed to allocate internal buffers.");
+		return -ENOMEM;
+	}
+
+	/*
+	 * We have already been given one page, this is the one we must do.
+	 * Once again, the alignment guarantees keep it simple.
+	 */
+	offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+	xpage = index - offset;
+	pages[xpage] = page;
+	/*
+	 * The remaining pages need to be allocated and inserted into the page
+	 * cache, alignment guarantees keep all the below much simpler. (-8
+	 */
+	max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >>
+			PAGE_CACHE_SHIFT) - offset;
+	if (nr_pages < max_page)
+		max_page = nr_pages;
+	for (i = 0; i < max_page; i++, offset++) {
+		if (i != xpage)
+			pages[i] = grab_cache_page_nowait(mapping, offset);
+		page = pages[i];
+		if (page) {
+			/*
+			 * We only (re)read the page if it isn't already read
+			 * in and/or dirty or we would be losing data or at
+			 * least wasting our time.
+			 */
+			if (!PageDirty(page) && (!PageUptodate(page) ||
+					PageError(page))) {
+				ClearPageError(page);
+				kmap(page);
+				continue;
+			}
+			unlock_page(page);
+			page_cache_release(page);
+			pages[i] = NULL;
+		}
+	}
+
+	/*
+	 * We have the runlist, and all the destination pages we need to fill.
+	 * Now read the first compression block.
+	 */
+	cur_page = 0;
+	cur_ofs = 0;
+	cb_clusters = ni->itype.compressed.block_clusters;
+do_next_cb:
+	nr_cbs--;
+	nr_bhs = 0;
+
+	/* Read all cb buffer heads one cluster at a time. */
+	rl = NULL;
+	for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
+			vcn++) {
+		BOOL is_retry = FALSE;
+
+		if (!rl) {
+lock_retry_remap:
+			down_read(&ni->runlist.lock);
+			rl = ni->runlist.rl;
+		}
+		if (likely(rl != NULL)) {
+			/* Seek to element containing target vcn. */
+			while (rl->length && rl[1].vcn <= vcn)
+				rl++;
+			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+		} else
+			lcn = LCN_RL_NOT_MAPPED;
+		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
+				(unsigned long long)vcn,
+				(unsigned long long)lcn);
+		if (lcn < 0) {
+			/*
+			 * When we reach the first sparse cluster we have
+			 * finished with the cb.
+			 */
+			if (lcn == LCN_HOLE)
+				break;
+			if (is_retry || lcn != LCN_RL_NOT_MAPPED)
+				goto rl_err;
+			is_retry = TRUE;
+			/*
+			 * Attempt to map runlist, dropping lock for the
+			 * duration.
+			 */
+			up_read(&ni->runlist.lock);
+			if (!ntfs_map_runlist(ni, vcn))
+				goto lock_retry_remap;
+			goto map_rl_err;
+		}
+		block = lcn << vol->cluster_size_bits >> block_size_bits;
+		/* Read the lcn from device in chunks of block_size bytes. */
+		max_block = block + (vol->cluster_size >> block_size_bits);
+		do {
+			ntfs_debug("block = 0x%x.", block);
+			if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
+				goto getblk_err;
+			nr_bhs++;
+		} while (++block < max_block);
+	}
+
+	/* Release the lock if we took it. */
+	if (rl)
+		up_read(&ni->runlist.lock);
+
+	/* Setup and initiate io on all buffer heads. */
+	for (i = 0; i < nr_bhs; i++) {
+		struct buffer_head *tbh = bhs[i];
+
+		if (unlikely(test_set_buffer_locked(tbh)))
+			continue;
+		if (unlikely(buffer_uptodate(tbh))) {
+			unlock_buffer(tbh);
+			continue;
+		}
+		get_bh(tbh);
+		tbh->b_end_io = end_buffer_read_sync;
+		submit_bh(READ, tbh);
+	}
+
+	/* Wait for io completion on all buffer heads. */
+	for (i = 0; i < nr_bhs; i++) {
+		struct buffer_head *tbh = bhs[i];
+
+		if (buffer_uptodate(tbh))
+			continue;
+		wait_on_buffer(tbh);
+		/*
+		 * We need an optimization barrier here, otherwise we start
+		 * hitting the below fixup code when accessing a loopback
+		 * mounted ntfs partition. This indicates either there is a
+		 * race condition in the loop driver or, more likely, gcc
+		 * overoptimises the code without the barrier and it doesn't
+		 * do the Right Thing(TM).
+		 */
+		barrier();
+		if (unlikely(!buffer_uptodate(tbh))) {
+			ntfs_warning(vol->sb, "Buffer is unlocked but not "
+					"uptodate! Unplugging the disk queue "
+					"and rescheduling.");
+			get_bh(tbh);
+			blk_run_address_space(mapping);
+			schedule();
+			put_bh(tbh);
+			if (unlikely(!buffer_uptodate(tbh)))
+				goto read_err;
+			ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
+		}
+	}
+
+	/*
+	 * Get the compression buffer. We must not sleep any more
+	 * until we are finished with it.
+	 */
+	spin_lock(&ntfs_cb_lock);
+	cb = ntfs_compression_buffer;
+
+	BUG_ON(!cb);
+
+	cb_pos = cb;
+	cb_end = cb + cb_size;
+
+	/* Copy the buffer heads into the contiguous buffer. */
+	for (i = 0; i < nr_bhs; i++) {
+		memcpy(cb_pos, bhs[i]->b_data, block_size);
+		cb_pos += block_size;
+	}
+
+	/* Just a precaution. */
+	if (cb_pos + 2 <= cb + cb_size)
+		*(u16*)cb_pos = 0;
+
+	/* Reset cb_pos back to the beginning. */
+	cb_pos = cb;
+
+	/* We now have both source (if present) and destination. */
+	ntfs_debug("Successfully read the compression block.");
+
+	/* The last page and maximum offset within it for the current cb. */
+	cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
+	cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
+	cb_max_page >>= PAGE_CACHE_SHIFT;
+
+	/* Catch end of file inside a compression block. */
+	if (cb_max_page > max_page)
+		cb_max_page = max_page;
+
+	if (vcn == start_vcn - cb_clusters) {
+		/* Sparse cb, zero out page range overlapping the cb. */
+		ntfs_debug("Found sparse compression block.");
+		/* We can sleep from now on, so we drop lock. */
+		spin_unlock(&ntfs_cb_lock);
+		if (cb_max_ofs)
+			cb_max_page--;
+		for (; cur_page < cb_max_page; cur_page++) {
+			page = pages[cur_page];
+			if (page) {
+				/*
+				 * FIXME: Using clear_page() will become wrong
+				 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
+				 * for now there is no problem.
+				 */
+				if (likely(!cur_ofs))
+					clear_page(page_address(page));
+				else
+					memset(page_address(page) + cur_ofs, 0,
+							PAGE_CACHE_SIZE -
+							cur_ofs);
+				flush_dcache_page(page);
+				kunmap(page);
+				SetPageUptodate(page);
+				unlock_page(page);
+				if (cur_page == xpage)
+					xpage_done = 1;
+				else
+					page_cache_release(page);
+				pages[cur_page] = NULL;
+			}
+			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+			cur_ofs = 0;
+			if (cb_pos >= cb_end)
+				break;
+		}
+		/* If we have a partial final page, deal with it now. */
+		if (cb_max_ofs && cb_pos < cb_end) {
+			page = pages[cur_page];
+			if (page)
+				memset(page_address(page) + cur_ofs, 0,
+						cb_max_ofs - cur_ofs);
+			/*
+			 * No need to update cb_pos at this stage:
+			 *	cb_pos += cb_max_ofs - cur_ofs;
+			 */
+			cur_ofs = cb_max_ofs;
+		}
+	} else if (vcn == start_vcn) {
+		/* We can't sleep so we need two stages. */
+		unsigned int cur2_page = cur_page;
+		unsigned int cur_ofs2 = cur_ofs;
+		u8 *cb_pos2 = cb_pos;
+
+		ntfs_debug("Found uncompressed compression block.");
+		/* Uncompressed cb, copy it to the destination pages. */
+		/*
+		 * TODO: As a big optimization, we could detect this case
+		 * before we read all the pages and use block_read_full_page()
+		 * on all full pages instead (we still have to treat partial
+		 * pages especially but at least we are getting rid of the
+		 * synchronous io for the majority of pages.
+		 * Or if we choose not to do the read-ahead/-behind stuff, we
+		 * could just return block_read_full_page(pages[xpage]) as long
+		 * as PAGE_CACHE_SIZE <= cb_size.
+		 */
+		if (cb_max_ofs)
+			cb_max_page--;
+		/* First stage: copy data into destination pages. */
+		for (; cur_page < cb_max_page; cur_page++) {
+			page = pages[cur_page];
+			if (page)
+				memcpy(page_address(page) + cur_ofs, cb_pos,
+						PAGE_CACHE_SIZE - cur_ofs);
+			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+			cur_ofs = 0;
+			if (cb_pos >= cb_end)
+				break;
+		}
+		/* If we have a partial final page, deal with it now. */
+		if (cb_max_ofs && cb_pos < cb_end) {
+			page = pages[cur_page];
+			if (page)
+				memcpy(page_address(page) + cur_ofs, cb_pos,
+						cb_max_ofs - cur_ofs);
+			cb_pos += cb_max_ofs - cur_ofs;
+			cur_ofs = cb_max_ofs;
+		}
+		/* We can sleep from now on, so drop lock. */
+		spin_unlock(&ntfs_cb_lock);
+		/* Second stage: finalize pages. */
+		for (; cur2_page < cb_max_page; cur2_page++) {
+			page = pages[cur2_page];
+			if (page) {
+				/*
+				 * If we are outside the initialized size, zero
+				 * the out of bounds page range.
+				 */
+				handle_bounds_compressed_page(ni, page);
+				flush_dcache_page(page);
+				kunmap(page);
+				SetPageUptodate(page);
+				unlock_page(page);
+				if (cur2_page == xpage)
+					xpage_done = 1;
+				else
+					page_cache_release(page);
+				pages[cur2_page] = NULL;
+			}
+			cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
+			cur_ofs2 = 0;
+			if (cb_pos2 >= cb_end)
+				break;
+		}
+	} else {
+		/* Compressed cb, decompress it into the destination page(s). */
+		unsigned int prev_cur_page = cur_page;
+
+		ntfs_debug("Found compressed compression block.");
+		err = ntfs_decompress(pages, &cur_page, &cur_ofs,
+				cb_max_page, cb_max_ofs, xpage, &xpage_done,
+				cb_pos,	cb_size - (cb_pos - cb));
+		/*
+		 * We can sleep from now on, lock already dropped by
+		 * ntfs_decompress().
+		 */
+		if (err) {
+			ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
+					"0x%lx with error code %i. Skipping "
+					"this compression block.",
+					ni->mft_no, -err);
+			/* Release the unfinished pages. */
+			for (; prev_cur_page < cur_page; prev_cur_page++) {
+				page = pages[prev_cur_page];
+				if (page) {
+					if (prev_cur_page == xpage &&
+							!xpage_done)
+						SetPageError(page);
+					flush_dcache_page(page);
+					kunmap(page);
+					unlock_page(page);
+					if (prev_cur_page != xpage)
+						page_cache_release(page);
+					pages[prev_cur_page] = NULL;
+				}
+			}
+		}
+	}
+
+	/* Release the buffer heads. */
+	for (i = 0; i < nr_bhs; i++)
+		brelse(bhs[i]);
+
+	/* Do we have more work to do? */
+	if (nr_cbs)
+		goto do_next_cb;
+
+	/* We no longer need the list of buffer heads. */
+	kfree(bhs);
+
+	/* Clean up if we have any pages left. Should never happen. */
+	for (cur_page = 0; cur_page < max_page; cur_page++) {
+		page = pages[cur_page];
+		if (page) {
+			ntfs_error(vol->sb, "Still have pages left! "
+					"Terminating them with extreme "
+					"prejudice.  Inode 0x%lx, page index "
+					"0x%lx.", ni->mft_no, page->index);
+			if (cur_page == xpage && !xpage_done)
+				SetPageError(page);
+			flush_dcache_page(page);
+			kunmap(page);
+			unlock_page(page);
+			if (cur_page != xpage)
+				page_cache_release(page);
+			pages[cur_page] = NULL;
+		}
+	}
+
+	/* We no longer need the list of pages. */
+	kfree(pages);
+
+	/* If we have completed the requested page, we return success. */
+	if (likely(xpage_done))
+		return 0;
+
+	ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
+			"EOVERFLOW" : (!err ? "EIO" : "unkown error"));
+	return err < 0 ? err : -EIO;
+
+read_err:
+	ntfs_error(vol->sb, "IO error while reading compressed data.");
+	/* Release the buffer heads. */
+	for (i = 0; i < nr_bhs; i++)
+		brelse(bhs[i]);
+	goto err_out;
+
+map_rl_err:
+	ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
+			"compression block.");
+	goto err_out;
+
+rl_err:
+	up_read(&ni->runlist.lock);
+	ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
+			"compression block.");
+	goto err_out;
+
+getblk_err:
+	up_read(&ni->runlist.lock);
+	ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
+
+err_out:
+	kfree(bhs);
+	for (i = cur_page; i < max_page; i++) {
+		page = pages[i];
+		if (page) {
+			if (i == xpage && !xpage_done)
+				SetPageError(page);
+			flush_dcache_page(page);
+			kunmap(page);
+			unlock_page(page);
+			if (i != xpage)
+				page_cache_release(page);
+		}
+	}
+	kfree(pages);
+	return -EIO;
+}
diff --git a/fs/ntfs/debug.c b/fs/ntfs/debug.c
new file mode 100644
index 0000000..6fb6bb5
--- /dev/null
+++ b/fs/ntfs/debug.c
@@ -0,0 +1,180 @@
+/*
+ * debug.c - NTFS kernel debug support. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "debug.h"
+
+/*
+ * A static buffer to hold the error string being displayed and a spinlock
+ * to protect concurrent accesses to it.
+ */
+static char err_buf[1024];
+static DEFINE_SPINLOCK(err_buf_lock);
+
+/**
+ * __ntfs_warning - output a warning to the syslog
+ * @function:	name of function outputting the warning
+ * @sb:		super block of mounted ntfs filesystem
+ * @fmt:	warning string containing format specifications
+ * @...:	a variable number of arguments specified in @fmt
+ *
+ * Outputs a warning to the syslog for the mounted ntfs filesystem described
+ * by @sb.
+ *
+ * @fmt and the corresponding @... is printf style format string containing
+ * the warning string and the corresponding format arguments, respectively.
+ *
+ * @function is the name of the function from which __ntfs_warning is being
+ * called.
+ *
+ * Note, you should be using debug.h::ntfs_warning(@sb, @fmt, @...) instead
+ * as this provides the @function parameter automatically.
+ */
+void __ntfs_warning(const char *function, const struct super_block *sb,
+		const char *fmt, ...)
+{
+	va_list args;
+	int flen = 0;
+
+#ifndef DEBUG
+	if (!printk_ratelimit())
+		return;
+#endif
+	if (function)
+		flen = strlen(function);
+	spin_lock(&err_buf_lock);
+	va_start(args, fmt);
+	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+	va_end(args);
+	if (sb)
+		printk(KERN_ERR "NTFS-fs warning (device %s): %s(): %s\n",
+				sb->s_id, flen ? function : "", err_buf);
+	else
+		printk(KERN_ERR "NTFS-fs warning: %s(): %s\n",
+				flen ? function : "", err_buf);
+	spin_unlock(&err_buf_lock);
+}
+
+/**
+ * __ntfs_error - output an error to the syslog
+ * @function:	name of function outputting the error
+ * @sb:		super block of mounted ntfs filesystem
+ * @fmt:	error string containing format specifications
+ * @...:	a variable number of arguments specified in @fmt
+ *
+ * Outputs an error to the syslog for the mounted ntfs filesystem described
+ * by @sb.
+ *
+ * @fmt and the corresponding @... is printf style format string containing
+ * the error string and the corresponding format arguments, respectively.
+ *
+ * @function is the name of the function from which __ntfs_error is being
+ * called.
+ *
+ * Note, you should be using debug.h::ntfs_error(@sb, @fmt, @...) instead
+ * as this provides the @function parameter automatically.
+ */
+void __ntfs_error(const char *function, const struct super_block *sb,
+		const char *fmt, ...)
+{
+	va_list args;
+	int flen = 0;
+
+#ifndef DEBUG
+	if (!printk_ratelimit())
+		return;
+#endif
+	if (function)
+		flen = strlen(function);
+	spin_lock(&err_buf_lock);
+	va_start(args, fmt);
+	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+	va_end(args);
+	if (sb)
+		printk(KERN_ERR "NTFS-fs error (device %s): %s(): %s\n",
+				sb->s_id, flen ? function : "", err_buf);
+	else
+		printk(KERN_ERR "NTFS-fs error: %s(): %s\n",
+				flen ? function : "", err_buf);
+	spin_unlock(&err_buf_lock);
+}
+
+#ifdef DEBUG
+
+/* If 1, output debug messages, and if 0, don't. */
+int debug_msgs = 0;
+
+void __ntfs_debug (const char *file, int line, const char *function,
+		const char *fmt, ...)
+{
+	va_list args;
+	int flen = 0;
+
+	if (!debug_msgs)
+		return;
+	if (function)
+		flen = strlen(function);
+	spin_lock(&err_buf_lock);
+	va_start(args, fmt);
+	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+	va_end(args);
+	printk(KERN_DEBUG "NTFS-fs DEBUG (%s, %d): %s(): %s\n", file, line,
+			flen ? function : "", err_buf);
+	spin_unlock(&err_buf_lock);
+}
+
+/* Dump a runlist. Caller has to provide synchronisation for @rl. */
+void ntfs_debug_dump_runlist(const runlist_element *rl)
+{
+	int i;
+	const char *lcn_str[5] = { "LCN_HOLE         ", "LCN_RL_NOT_MAPPED",
+				   "LCN_ENOENT       ", "LCN_unknown      " };
+
+	if (!debug_msgs)
+		return;
+	printk(KERN_DEBUG "NTFS-fs DEBUG: Dumping runlist (values in hex):\n");
+	if (!rl) {
+		printk(KERN_DEBUG "Run list not present.\n");
+		return;
+	}
+	printk(KERN_DEBUG "VCN              LCN               Run length\n");
+	for (i = 0; ; i++) {
+		LCN lcn = (rl + i)->lcn;
+
+		if (lcn < (LCN)0) {
+			int index = -lcn - 1;
+
+			if (index > -LCN_ENOENT - 1)
+				index = 3;
+			printk(KERN_DEBUG "%-16Lx %s %-16Lx%s\n",
+					(rl + i)->vcn, lcn_str[index],
+					(rl + i)->length, (rl + i)->length ?
+					"" : " (runlist end)");
+		} else
+			printk(KERN_DEBUG "%-16Lx %-16Lx  %-16Lx%s\n",
+					(rl + i)->vcn, (rl + i)->lcn,
+					(rl + i)->length, (rl + i)->length ?
+					"" : " (runlist end)");
+		if (!(rl + i)->length)
+			break;
+	}
+}
+
+#endif
diff --git a/fs/ntfs/debug.h b/fs/ntfs/debug.h
new file mode 100644
index 0000000..8ac37c3
--- /dev/null
+++ b/fs/ntfs/debug.h
@@ -0,0 +1,67 @@
+/*
+ * debug.h - NTFS kernel debug support. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_DEBUG_H
+#define _LINUX_NTFS_DEBUG_H
+
+#include <linux/fs.h>
+
+#include "runlist.h"
+
+#ifdef DEBUG
+
+extern int debug_msgs;
+
+#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+/**
+ * ntfs_debug - write a debug level message to syslog
+ * @f:		a printf format string containing the message
+ * @...:	the variables to substitute into @f
+ *
+ * ntfs_debug() writes a DEBUG level message to the syslog but only if the
+ * driver was compiled with -DDEBUG. Otherwise, the call turns into a NOP.
+ */
+static void ntfs_debug(const char *f, ...);
+#endif
+
+extern void __ntfs_debug (const char *file, int line, const char *function,
+	const char *format, ...) __attribute__ ((format (printf, 4, 5)));
+#define ntfs_debug(f, a...)						\
+	__ntfs_debug(__FILE__, __LINE__, __FUNCTION__, f, ##a)
+
+extern void ntfs_debug_dump_runlist(const runlist_element *rl);
+
+#else	/* !DEBUG */
+
+#define ntfs_debug(f, a...)		do {} while (0)
+#define ntfs_debug_dump_runlist(rl)	do {} while (0)
+
+#endif	/* !DEBUG */
+
+extern void __ntfs_warning(const char *function, const struct super_block *sb,
+		const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+#define ntfs_warning(sb, f, a...)	__ntfs_warning(__FUNCTION__, sb, f, ##a)
+
+extern void __ntfs_error(const char *function, const struct super_block *sb,
+		const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+#define ntfs_error(sb, f, a...)		__ntfs_error(__FUNCTION__, sb, f, ##a)
+
+#endif /* _LINUX_NTFS_DEBUG_H */
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
new file mode 100644
index 0000000..9357756
--- /dev/null
+++ b/fs/ntfs/dir.c
@@ -0,0 +1,1569 @@
+/**
+ * dir.c - NTFS kernel directory operations. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include "dir.h"
+#include "aops.h"
+#include "attrib.h"
+#include "mft.h"
+#include "debug.h"
+#include "ntfs.h"
+
+/**
+ * The little endian Unicode string $I30 as a global constant.
+ */
+ntfschar I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
+		const_cpu_to_le16('3'),	const_cpu_to_le16('0'), 0 };
+
+/**
+ * ntfs_lookup_inode_by_name - find an inode in a directory given its name
+ * @dir_ni:	ntfs inode of the directory in which to search for the name
+ * @uname:	Unicode name for which to search in the directory
+ * @uname_len:	length of the name @uname in Unicode characters
+ * @res:	return the found file name if necessary (see below)
+ *
+ * Look for an inode with name @uname in the directory with inode @dir_ni.
+ * ntfs_lookup_inode_by_name() walks the contents of the directory looking for
+ * the Unicode name. If the name is found in the directory, the corresponding
+ * inode number (>= 0) is returned as a mft reference in cpu format, i.e. it
+ * is a 64-bit number containing the sequence number.
+ *
+ * On error, a negative value is returned corresponding to the error code. In
+ * particular if the inode is not found -ENOENT is returned. Note that you
+ * can't just check the return value for being negative, you have to check the
+ * inode number for being negative which you can extract using MREC(return
+ * value).
+ *
+ * Note, @uname_len does not include the (optional) terminating NULL character.
+ *
+ * Note, we look for a case sensitive match first but we also look for a case
+ * insensitive match at the same time. If we find a case insensitive match, we
+ * save that for the case that we don't find an exact match, where we return
+ * the case insensitive match and setup @res (which we allocate!) with the mft
+ * reference, the file name type, length and with a copy of the little endian
+ * Unicode file name itself. If we match a file name which is in the DOS name
+ * space, we only return the mft reference and file name type in @res.
+ * ntfs_lookup() then uses this to find the long file name in the inode itself.
+ * This is to avoid polluting the dcache with short file names. We want them to
+ * work but we don't care for how quickly one can access them. This also fixes
+ * the dcache aliasing issues.
+ *
+ * Locking:  - Caller must hold i_sem on the directory.
+ *	     - Each page cache page in the index allocation mapping must be
+ *	       locked whilst being accessed otherwise we may find a corrupt
+ *	       page due to it being under ->writepage at the moment which
+ *	       applies the mst protection fixups before writing out and then
+ *	       removes them again after the write is complete after which it 
+ *	       unlocks the page.
+ */
+MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
+		const int uname_len, ntfs_name **res)
+{
+	ntfs_volume *vol = dir_ni->vol;
+	struct super_block *sb = vol->sb;
+	MFT_RECORD *m;
+	INDEX_ROOT *ir;
+	INDEX_ENTRY *ie;
+	INDEX_ALLOCATION *ia;
+	u8 *index_end;
+	u64 mref;
+	ntfs_attr_search_ctx *ctx;
+	int err, rc;
+	VCN vcn, old_vcn;
+	struct address_space *ia_mapping;
+	struct page *page;
+	u8 *kaddr;
+	ntfs_name *name = NULL;
+
+	BUG_ON(!S_ISDIR(VFS_I(dir_ni)->i_mode));
+	BUG_ON(NInoAttr(dir_ni));
+	/* Get hold of the mft record for the directory. */
+	m = map_mft_record(dir_ni);
+	if (IS_ERR(m)) {
+		ntfs_error(sb, "map_mft_record() failed with error code %ld.",
+				-PTR_ERR(m));
+		return ERR_MREF(PTR_ERR(m));
+	}
+	ctx = ntfs_attr_get_search_ctx(dir_ni, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Find the index root attribute in the mft record. */
+	err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
+			0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT) {
+			ntfs_error(sb, "Index root attribute missing in "
+					"directory inode 0x%lx.",
+					dir_ni->mft_no);
+			err = -EIO;
+		}
+		goto err_out;
+	}
+	/* Get to the index root value (it's been verified in read_inode). */
+	ir = (INDEX_ROOT*)((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ir->index +
+			le32_to_cpu(ir->index.entries_offset));
+	/*
+	 * Loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds checks. */
+		if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end)
+			goto dir_err_out;
+		/*
+		 * The last entry cannot contain a name. It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/*
+		 * We perform a case sensitive comparison and if that matches
+		 * we are done and return the mft reference of the inode (i.e.
+		 * the inode number together with the sequence number for
+		 * consistency checking). We convert it to cpu format before
+		 * returning.
+		 */
+		if (ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len)) {
+found_it:
+			/*
+			 * We have a perfect match, so we don't need to care
+			 * about having matched imperfectly before, so we can
+			 * free name and set *res to NULL.
+			 * However, if the perfect match is a short file name,
+			 * we need to signal this through *res, so that
+			 * ntfs_lookup() can fix dcache aliasing issues.
+			 * As an optimization we just reuse an existing
+			 * allocation of *res.
+			 */
+			if (ie->key.file_name.file_name_type == FILE_NAME_DOS) {
+				if (!name) {
+					name = kmalloc(sizeof(ntfs_name),
+							GFP_NOFS);
+					if (!name) {
+						err = -ENOMEM;
+						goto err_out;
+					}
+				}
+				name->mref = le64_to_cpu(
+						ie->data.dir.indexed_file);
+				name->type = FILE_NAME_DOS;
+				name->len = 0;
+				*res = name;
+			} else {
+				if (name)
+					kfree(name);
+				*res = NULL;
+			}
+			mref = le64_to_cpu(ie->data.dir.indexed_file);
+			ntfs_attr_put_search_ctx(ctx);
+			unmap_mft_record(dir_ni);
+			return mref;
+		}
+		/*
+		 * For a case insensitive mount, we also perform a case
+		 * insensitive comparison (provided the file name is not in the
+		 * POSIX namespace). If the comparison matches, and the name is
+		 * in the WIN32 namespace, we cache the filename in *res so
+		 * that the caller, ntfs_lookup(), can work on it. If the
+		 * comparison matches, and the name is in the DOS namespace, we
+		 * only cache the mft reference and the file name type (we set
+		 * the name length to zero for simplicity).
+		 */
+		if (!NVolCaseSensitive(vol) &&
+				ie->key.file_name.file_name_type &&
+				ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length,
+				IGNORE_CASE, vol->upcase, vol->upcase_len)) {
+			int name_size = sizeof(ntfs_name);
+			u8 type = ie->key.file_name.file_name_type;
+			u8 len = ie->key.file_name.file_name_length;
+
+			/* Only one case insensitive matching name allowed. */
+			if (name) {
+				ntfs_error(sb, "Found already allocated name "
+						"in phase 1. Please run chkdsk "
+						"and if that doesn't find any "
+						"errors please report you saw "
+						"this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net.");
+				goto dir_err_out;
+			}
+
+			if (type != FILE_NAME_DOS)
+				name_size += len * sizeof(ntfschar);
+			name = kmalloc(name_size, GFP_NOFS);
+			if (!name) {
+				err = -ENOMEM;
+				goto err_out;
+			}
+			name->mref = le64_to_cpu(ie->data.dir.indexed_file);
+			name->type = type;
+			if (type != FILE_NAME_DOS) {
+				name->len = len;
+				memcpy(name->name, ie->key.file_name.file_name,
+						len * sizeof(ntfschar));
+			} else
+				name->len = 0;
+			*res = name;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				IGNORE_CASE, vol->upcase, vol->upcase_len);
+		/*
+		 * If uname collates before the name of the current entry, there
+		 * is definitely no such name in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/* The names are not equal, continue the search. */
+		if (rc)
+			continue;
+		/*
+		 * Names match with case insensitive comparison, now try the
+		 * case sensitive comparison, which is required for proper
+		 * collation.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len);
+		if (rc == -1)
+			break;
+		if (rc)
+			continue;
+		/*
+		 * Perfect match, this will never happen as the
+		 * ntfs_are_names_equal() call will have gotten a match but we
+		 * still treat it correctly.
+		 */
+		goto found_it;
+	}
+	/*
+	 * We have finished with this index without success. Check for the
+	 * presence of a child node and if not present return -ENOENT, unless
+	 * we have got a matching name cached in name in which case return the
+	 * mft reference associated with it.
+	 */
+	if (!(ie->flags & INDEX_ENTRY_NODE)) {
+		if (name) {
+			ntfs_attr_put_search_ctx(ctx);
+			unmap_mft_record(dir_ni);
+			return name->mref;
+		}
+		ntfs_debug("Entry not found.");
+		err = -ENOENT;
+		goto err_out;
+	} /* Child node present, descend into it. */
+	/* Consistency check: Verify that an index allocation exists. */
+	if (!NInoIndexAllocPresent(dir_ni)) {
+		ntfs_error(sb, "No index allocation attribute but index entry "
+				"requires one. Directory inode 0x%lx is "
+				"corrupt or driver bug.", dir_ni->mft_no);
+		goto err_out;
+	}
+	/* Get the starting vcn of the index_block holding the child node. */
+	vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
+	ia_mapping = VFS_I(dir_ni)->i_mapping;
+	/*
+	 * We are done with the index root and the mft record. Release them,
+	 * otherwise we deadlock with ntfs_map_page().
+	 */
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(dir_ni);
+	m = NULL;
+	ctx = NULL;
+descend_into_child_node:
+	/*
+	 * Convert vcn to index into the index allocation attribute in units
+	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * disk if necessary.
+	 */
+	page = ntfs_map_page(ia_mapping, vcn <<
+			dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+	if (IS_ERR(page)) {
+		ntfs_error(sb, "Failed to map directory index page, error %ld.",
+				-PTR_ERR(page));
+		err = PTR_ERR(page);
+		goto err_out;
+	}
+	lock_page(page);
+	kaddr = (u8*)page_address(page);
+fast_descend_into_child_node:
+	/* Get to the index allocation block. */
+	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
+			dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+	/* Bounds checks. */
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+				"inode 0x%lx or driver bug.", dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* Catch multi sector transfer fixup errors. */
+	if (unlikely(!ntfs_is_indx_record(ia->magic))) {
+		ntfs_error(sb, "Directory index record with vcn 0x%llx is "
+				"corrupt.  Corrupt inode 0x%lx.  Run chkdsk.",
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
+		ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
+				"different from expected VCN (0x%llx). "
+				"Directory inode 0x%lx is corrupt or driver "
+				"bug.", (unsigned long long)
+				sle64_to_cpu(ia->index_block_vcn),
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
+			dir_ni->itype.index.block_size) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx has a size (%u) differing from the "
+				"directory specified size (%u). Directory "
+				"inode is corrupt or driver bug.",
+				(unsigned long long)vcn, dir_ni->mft_no,
+				le32_to_cpu(ia->index.allocated_size) + 0x18,
+				dir_ni->itype.index.block_size);
+		goto unm_err_out;
+	}
+	index_end = (u8*)ia + dir_ni->itype.index.block_size;
+	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx crosses page boundary. Impossible! "
+				"Cannot access! This is probably a bug in the "
+				"driver.", (unsigned long long)vcn,
+				dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
+	if (index_end > (u8*)ia + dir_ni->itype.index.block_size) {
+		ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
+				"inode 0x%lx exceeds maximum size.",
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ia->index +
+			le32_to_cpu(ia->index.entries_offset));
+	/*
+	 * Iterate similar to above big loop but applied to index buffer, thus
+	 * loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds check. */
+		if ((u8*)ie < (u8*)ia || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end) {
+			ntfs_error(sb, "Index entry out of bounds in "
+					"directory inode 0x%lx.",
+					dir_ni->mft_no);
+			goto unm_err_out;
+		}
+		/*
+		 * The last entry cannot contain a name. It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/*
+		 * We perform a case sensitive comparison and if that matches
+		 * we are done and return the mft reference of the inode (i.e.
+		 * the inode number together with the sequence number for
+		 * consistency checking). We convert it to cpu format before
+		 * returning.
+		 */
+		if (ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len)) {
+found_it2:
+			/*
+			 * We have a perfect match, so we don't need to care
+			 * about having matched imperfectly before, so we can
+			 * free name and set *res to NULL.
+			 * However, if the perfect match is a short file name,
+			 * we need to signal this through *res, so that
+			 * ntfs_lookup() can fix dcache aliasing issues.
+			 * As an optimization we just reuse an existing
+			 * allocation of *res.
+			 */
+			if (ie->key.file_name.file_name_type == FILE_NAME_DOS) {
+				if (!name) {
+					name = kmalloc(sizeof(ntfs_name),
+							GFP_NOFS);
+					if (!name) {
+						err = -ENOMEM;
+						goto unm_err_out;
+					}
+				}
+				name->mref = le64_to_cpu(
+						ie->data.dir.indexed_file);
+				name->type = FILE_NAME_DOS;
+				name->len = 0;
+				*res = name;
+			} else {
+				if (name)
+					kfree(name);
+				*res = NULL;
+			}
+			mref = le64_to_cpu(ie->data.dir.indexed_file);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			return mref;
+		}
+		/*
+		 * For a case insensitive mount, we also perform a case
+		 * insensitive comparison (provided the file name is not in the
+		 * POSIX namespace). If the comparison matches, and the name is
+		 * in the WIN32 namespace, we cache the filename in *res so
+		 * that the caller, ntfs_lookup(), can work on it. If the
+		 * comparison matches, and the name is in the DOS namespace, we
+		 * only cache the mft reference and the file name type (we set
+		 * the name length to zero for simplicity).
+		 */
+		if (!NVolCaseSensitive(vol) &&
+				ie->key.file_name.file_name_type &&
+				ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length,
+				IGNORE_CASE, vol->upcase, vol->upcase_len)) {
+			int name_size = sizeof(ntfs_name);
+			u8 type = ie->key.file_name.file_name_type;
+			u8 len = ie->key.file_name.file_name_length;
+
+			/* Only one case insensitive matching name allowed. */
+			if (name) {
+				ntfs_error(sb, "Found already allocated name "
+						"in phase 2. Please run chkdsk "
+						"and if that doesn't find any "
+						"errors please report you saw "
+						"this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net.");
+				unlock_page(page);
+				ntfs_unmap_page(page);
+				goto dir_err_out;
+			}
+
+			if (type != FILE_NAME_DOS)
+				name_size += len * sizeof(ntfschar);
+			name = kmalloc(name_size, GFP_NOFS);
+			if (!name) {
+				err = -ENOMEM;
+				goto unm_err_out;
+			}
+			name->mref = le64_to_cpu(ie->data.dir.indexed_file);
+			name->type = type;
+			if (type != FILE_NAME_DOS) {
+				name->len = len;
+				memcpy(name->name, ie->key.file_name.file_name,
+						len * sizeof(ntfschar));
+			} else
+				name->len = 0;
+			*res = name;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				IGNORE_CASE, vol->upcase, vol->upcase_len);
+		/*
+		 * If uname collates before the name of the current entry, there
+		 * is definitely no such name in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/* The names are not equal, continue the search. */
+		if (rc)
+			continue;
+		/*
+		 * Names match with case insensitive comparison, now try the
+		 * case sensitive comparison, which is required for proper
+		 * collation.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len);
+		if (rc == -1)
+			break;
+		if (rc)
+			continue;
+		/*
+		 * Perfect match, this will never happen as the
+		 * ntfs_are_names_equal() call will have gotten a match but we
+		 * still treat it correctly.
+		 */
+		goto found_it2;
+	}
+	/*
+	 * We have finished with this index buffer without success. Check for
+	 * the presence of a child node.
+	 */
+	if (ie->flags & INDEX_ENTRY_NODE) {
+		if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
+			ntfs_error(sb, "Index entry with child node found in "
+					"a leaf node in directory inode 0x%lx.",
+					dir_ni->mft_no);
+			goto unm_err_out;
+		}
+		/* Child node present, descend into it. */
+		old_vcn = vcn;
+		vcn = sle64_to_cpup((sle64*)((u8*)ie +
+				le16_to_cpu(ie->length) - 8));
+		if (vcn >= 0) {
+			/* If vcn is in the same page cache page as old_vcn we
+			 * recycle the mapped page. */
+			if (old_vcn << vol->cluster_size_bits >>
+					PAGE_CACHE_SHIFT == vcn <<
+					vol->cluster_size_bits >>
+					PAGE_CACHE_SHIFT)
+				goto fast_descend_into_child_node;
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			goto descend_into_child_node;
+		}
+		ntfs_error(sb, "Negative child node vcn in directory inode "
+				"0x%lx.", dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/*
+	 * No child node present, return -ENOENT, unless we have got a matching
+	 * name cached in name in which case return the mft reference
+	 * associated with it.
+	 */
+	if (name) {
+		unlock_page(page);
+		ntfs_unmap_page(page);
+		return name->mref;
+	}
+	ntfs_debug("Entry not found.");
+	err = -ENOENT;
+unm_err_out:
+	unlock_page(page);
+	ntfs_unmap_page(page);
+err_out:
+	if (!err)
+		err = -EIO;
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(dir_ni);
+	if (name) {
+		kfree(name);
+		*res = NULL;
+	}
+	return ERR_MREF(err);
+dir_err_out:
+	ntfs_error(sb, "Corrupt directory.  Aborting lookup.");
+	goto err_out;
+}
+
+#if 0
+
+// TODO: (AIA)
+// The algorithm embedded in this code will be required for the time when we
+// want to support adding of entries to directories, where we require correct
+// collation of file names in order not to cause corruption of the file system.
+
+/**
+ * ntfs_lookup_inode_by_name - find an inode in a directory given its name
+ * @dir_ni:	ntfs inode of the directory in which to search for the name
+ * @uname:	Unicode name for which to search in the directory
+ * @uname_len:	length of the name @uname in Unicode characters
+ *
+ * Look for an inode with name @uname in the directory with inode @dir_ni.
+ * ntfs_lookup_inode_by_name() walks the contents of the directory looking for
+ * the Unicode name. If the name is found in the directory, the corresponding
+ * inode number (>= 0) is returned as a mft reference in cpu format, i.e. it
+ * is a 64-bit number containing the sequence number.
+ *
+ * On error, a negative value is returned corresponding to the error code. In
+ * particular if the inode is not found -ENOENT is returned. Note that you
+ * can't just check the return value for being negative, you have to check the
+ * inode number for being negative which you can extract using MREC(return
+ * value).
+ *
+ * Note, @uname_len does not include the (optional) terminating NULL character.
+ */
+u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
+		const int uname_len)
+{
+	ntfs_volume *vol = dir_ni->vol;
+	struct super_block *sb = vol->sb;
+	MFT_RECORD *m;
+	INDEX_ROOT *ir;
+	INDEX_ENTRY *ie;
+	INDEX_ALLOCATION *ia;
+	u8 *index_end;
+	u64 mref;
+	ntfs_attr_search_ctx *ctx;
+	int err, rc;
+	IGNORE_CASE_BOOL ic;
+	VCN vcn, old_vcn;
+	struct address_space *ia_mapping;
+	struct page *page;
+	u8 *kaddr;
+
+	/* Get hold of the mft record for the directory. */
+	m = map_mft_record(dir_ni);
+	if (IS_ERR(m)) {
+		ntfs_error(sb, "map_mft_record() failed with error code %ld.",
+				-PTR_ERR(m));
+		return ERR_MREF(PTR_ERR(m));
+	}
+	ctx = ntfs_attr_get_search_ctx(dir_ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Find the index root attribute in the mft record. */
+	err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
+			0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT) {
+			ntfs_error(sb, "Index root attribute missing in "
+					"directory inode 0x%lx.",
+					dir_ni->mft_no);
+			err = -EIO;
+		}
+		goto err_out;
+	}
+	/* Get to the index root value (it's been verified in read_inode). */
+	ir = (INDEX_ROOT*)((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ir->index +
+			le32_to_cpu(ir->index.entries_offset));
+	/*
+	 * Loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds checks. */
+		if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end)
+			goto dir_err_out;
+		/*
+		 * The last entry cannot contain a name. It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/*
+		 * If the current entry has a name type of POSIX, the name is
+		 * case sensitive and not otherwise. This has the effect of us
+		 * not being able to access any POSIX file names which collate
+		 * after the non-POSIX one when they only differ in case, but
+		 * anyone doing screwy stuff like that deserves to burn in
+		 * hell... Doing that kind of stuff on NT4 actually causes
+		 * corruption on the partition even when using SP6a and Linux
+		 * is not involved at all.
+		 */
+		ic = ie->key.file_name.file_name_type ? IGNORE_CASE :
+				CASE_SENSITIVE;
+		/*
+		 * If the names match perfectly, we are done and return the
+		 * mft reference of the inode (i.e. the inode number together
+		 * with the sequence number for consistency checking. We
+		 * convert it to cpu format before returning.
+		 */
+		if (ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, ic,
+				vol->upcase, vol->upcase_len)) {
+found_it:
+			mref = le64_to_cpu(ie->data.dir.indexed_file);
+			ntfs_attr_put_search_ctx(ctx);
+			unmap_mft_record(dir_ni);
+			return mref;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				IGNORE_CASE, vol->upcase, vol->upcase_len);
+		/*
+		 * If uname collates before the name of the current entry, there
+		 * is definitely no such name in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/* The names are not equal, continue the search. */
+		if (rc)
+			continue;
+		/*
+		 * Names match with case insensitive comparison, now try the
+		 * case sensitive comparison, which is required for proper
+		 * collation.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len);
+		if (rc == -1)
+			break;
+		if (rc)
+			continue;
+		/*
+		 * Perfect match, this will never happen as the
+		 * ntfs_are_names_equal() call will have gotten a match but we
+		 * still treat it correctly.
+		 */
+		goto found_it;
+	}
+	/*
+	 * We have finished with this index without success. Check for the
+	 * presence of a child node.
+	 */
+	if (!(ie->flags & INDEX_ENTRY_NODE)) {
+		/* No child node, return -ENOENT. */
+		err = -ENOENT;
+		goto err_out;
+	} /* Child node present, descend into it. */
+	/* Consistency check: Verify that an index allocation exists. */
+	if (!NInoIndexAllocPresent(dir_ni)) {
+		ntfs_error(sb, "No index allocation attribute but index entry "
+				"requires one. Directory inode 0x%lx is "
+				"corrupt or driver bug.", dir_ni->mft_no);
+		goto err_out;
+	}
+	/* Get the starting vcn of the index_block holding the child node. */
+	vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8);
+	ia_mapping = VFS_I(dir_ni)->i_mapping;
+	/*
+	 * We are done with the index root and the mft record. Release them,
+	 * otherwise we deadlock with ntfs_map_page().
+	 */
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(dir_ni);
+	m = NULL;
+	ctx = NULL;
+descend_into_child_node:
+	/*
+	 * Convert vcn to index into the index allocation attribute in units
+	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * disk if necessary.
+	 */
+	page = ntfs_map_page(ia_mapping, vcn <<
+			dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+	if (IS_ERR(page)) {
+		ntfs_error(sb, "Failed to map directory index page, error %ld.",
+				-PTR_ERR(page));
+		err = PTR_ERR(page);
+		goto err_out;
+	}
+	lock_page(page);
+	kaddr = (u8*)page_address(page);
+fast_descend_into_child_node:
+	/* Get to the index allocation block. */
+	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
+			dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+	/* Bounds checks. */
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+				"inode 0x%lx or driver bug.", dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* Catch multi sector transfer fixup errors. */
+	if (unlikely(!ntfs_is_indx_record(ia->magic))) {
+		ntfs_error(sb, "Directory index record with vcn 0x%llx is "
+				"corrupt.  Corrupt inode 0x%lx.  Run chkdsk.",
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
+		ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
+				"different from expected VCN (0x%llx). "
+				"Directory inode 0x%lx is corrupt or driver "
+				"bug.", (unsigned long long)
+				sle64_to_cpu(ia->index_block_vcn),
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
+			dir_ni->itype.index.block_size) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx has a size (%u) differing from the "
+				"directory specified size (%u). Directory "
+				"inode is corrupt or driver bug.",
+				(unsigned long long)vcn, dir_ni->mft_no,
+				le32_to_cpu(ia->index.allocated_size) + 0x18,
+				dir_ni->itype.index.block_size);
+		goto unm_err_out;
+	}
+	index_end = (u8*)ia + dir_ni->itype.index.block_size;
+	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx crosses page boundary. Impossible! "
+				"Cannot access! This is probably a bug in the "
+				"driver.", (unsigned long long)vcn,
+				dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
+	if (index_end > (u8*)ia + dir_ni->itype.index.block_size) {
+		ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
+				"inode 0x%lx exceeds maximum size.",
+				(unsigned long long)vcn, dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ia->index +
+			le32_to_cpu(ia->index.entries_offset));
+	/*
+	 * Iterate similar to above big loop but applied to index buffer, thus
+	 * loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds check. */
+		if ((u8*)ie < (u8*)ia || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end) {
+			ntfs_error(sb, "Index entry out of bounds in "
+					"directory inode 0x%lx.",
+					dir_ni->mft_no);
+			goto unm_err_out;
+		}
+		/*
+		 * The last entry cannot contain a name. It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/*
+		 * If the current entry has a name type of POSIX, the name is
+		 * case sensitive and not otherwise. This has the effect of us
+		 * not being able to access any POSIX file names which collate
+		 * after the non-POSIX one when they only differ in case, but
+		 * anyone doing screwy stuff like that deserves to burn in
+		 * hell... Doing that kind of stuff on NT4 actually causes
+		 * corruption on the partition even when using SP6a and Linux
+		 * is not involved at all.
+		 */
+		ic = ie->key.file_name.file_name_type ? IGNORE_CASE :
+				CASE_SENSITIVE;
+		/*
+		 * If the names match perfectly, we are done and return the
+		 * mft reference of the inode (i.e. the inode number together
+		 * with the sequence number for consistency checking. We
+		 * convert it to cpu format before returning.
+		 */
+		if (ntfs_are_names_equal(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, ic,
+				vol->upcase, vol->upcase_len)) {
+found_it2:
+			mref = le64_to_cpu(ie->data.dir.indexed_file);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			return mref;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				IGNORE_CASE, vol->upcase, vol->upcase_len);
+		/*
+		 * If uname collates before the name of the current entry, there
+		 * is definitely no such name in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/* The names are not equal, continue the search. */
+		if (rc)
+			continue;
+		/*
+		 * Names match with case insensitive comparison, now try the
+		 * case sensitive comparison, which is required for proper
+		 * collation.
+		 */
+		rc = ntfs_collate_names(uname, uname_len,
+				(ntfschar*)&ie->key.file_name.file_name,
+				ie->key.file_name.file_name_length, 1,
+				CASE_SENSITIVE, vol->upcase, vol->upcase_len);
+		if (rc == -1)
+			break;
+		if (rc)
+			continue;
+		/*
+		 * Perfect match, this will never happen as the
+		 * ntfs_are_names_equal() call will have gotten a match but we
+		 * still treat it correctly.
+		 */
+		goto found_it2;
+	}
+	/*
+	 * We have finished with this index buffer without success. Check for
+	 * the presence of a child node.
+	 */
+	if (ie->flags & INDEX_ENTRY_NODE) {
+		if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
+			ntfs_error(sb, "Index entry with child node found in "
+					"a leaf node in directory inode 0x%lx.",
+					dir_ni->mft_no);
+			goto unm_err_out;
+		}
+		/* Child node present, descend into it. */
+		old_vcn = vcn;
+		vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8);
+		if (vcn >= 0) {
+			/* If vcn is in the same page cache page as old_vcn we
+			 * recycle the mapped page. */
+			if (old_vcn << vol->cluster_size_bits >>
+					PAGE_CACHE_SHIFT == vcn <<
+					vol->cluster_size_bits >>
+					PAGE_CACHE_SHIFT)
+				goto fast_descend_into_child_node;
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			goto descend_into_child_node;
+		}
+		ntfs_error(sb, "Negative child node vcn in directory inode "
+				"0x%lx.", dir_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* No child node, return -ENOENT. */
+	ntfs_debug("Entry not found.");
+	err = -ENOENT;
+unm_err_out:
+	unlock_page(page);
+	ntfs_unmap_page(page);
+err_out:
+	if (!err)
+		err = -EIO;
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(dir_ni);
+	return ERR_MREF(err);
+dir_err_out:
+	ntfs_error(sb, "Corrupt directory. Aborting lookup.");
+	goto err_out;
+}
+
+#endif
+
+/**
+ * ntfs_filldir - ntfs specific filldir method
+ * @vol:	current ntfs volume
+ * @fpos:	position in the directory
+ * @ndir:	ntfs inode of current directory
+ * @ia_page:	page in which the index allocation buffer @ie is in resides
+ * @ie:		current index entry
+ * @name:	buffer to use for the converted name
+ * @dirent:	vfs filldir callback context
+ * @filldir:	vfs filldir callback
+ *
+ * Convert the Unicode @name to the loaded NLS and pass it to the @filldir
+ * callback.
+ *
+ * If @ia_page is not NULL it is the locked page containing the index
+ * allocation block containing the index entry @ie.
+ *
+ * Note, we drop (and then reacquire) the page lock on @ia_page across the
+ * @filldir() call otherwise we would deadlock with NFSd when it calls ->lookup
+ * since ntfs_lookup() will lock the same page.  As an optimization, we do not
+ * retake the lock if we are returning a non-zero value as ntfs_readdir()
+ * would need to drop the lock immediately anyway.
+ */
+static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
+		ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie,
+		u8 *name, void *dirent, filldir_t filldir)
+{
+	unsigned long mref;
+	int name_len, rc;
+	unsigned dt_type;
+	FILE_NAME_TYPE_FLAGS name_type;
+
+	name_type = ie->key.file_name.file_name_type;
+	if (name_type == FILE_NAME_DOS) {
+		ntfs_debug("Skipping DOS name space entry.");
+		return 0;
+	}
+	if (MREF_LE(ie->data.dir.indexed_file) == FILE_root) {
+		ntfs_debug("Skipping root directory self reference entry.");
+		return 0;
+	}
+	if (MREF_LE(ie->data.dir.indexed_file) < FILE_first_user &&
+			!NVolShowSystemFiles(vol)) {
+		ntfs_debug("Skipping system file.");
+		return 0;
+	}
+	name_len = ntfs_ucstonls(vol, (ntfschar*)&ie->key.file_name.file_name,
+			ie->key.file_name.file_name_length, &name,
+			NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1);
+	if (name_len <= 0) {
+		ntfs_debug("Skipping unrepresentable file.");
+		return 0;
+	}
+	if (ie->key.file_name.file_attributes &
+			FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT)
+		dt_type = DT_DIR;
+	else
+		dt_type = DT_REG;
+	mref = MREF_LE(ie->data.dir.indexed_file);
+	/*
+	 * Drop the page lock otherwise we deadlock with NFS when it calls
+	 * ->lookup since ntfs_lookup() will lock the same page.
+	 */
+	if (ia_page)
+		unlock_page(ia_page);
+	ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode "
+			"0x%lx, DT_%s.", name, name_len, fpos, mref,
+			dt_type == DT_DIR ? "DIR" : "REG");
+	rc = filldir(dirent, name, name_len, fpos, mref, dt_type);
+	/* Relock the page but not if we are aborting ->readdir. */
+	if (!rc && ia_page)
+		lock_page(ia_page);
+	return rc;
+}
+
+/*
+ * We use the same basic approach as the old NTFS driver, i.e. we parse the
+ * index root entries and then the index allocation entries that are marked
+ * as in use in the index bitmap.
+ *
+ * While this will return the names in random order this doesn't matter for
+ * ->readdir but OTOH results in a faster ->readdir.
+ *
+ * VFS calls ->readdir without BKL but with i_sem held. This protects the VFS
+ * parts (e.g. ->f_pos and ->i_size, and it also protects against directory
+ * modifications).
+ *
+ * Locking:  - Caller must hold i_sem on the directory.
+ *	     - Each page cache page in the index allocation mapping must be
+ *	       locked whilst being accessed otherwise we may find a corrupt
+ *	       page due to it being under ->writepage at the moment which
+ *	       applies the mst protection fixups before writing out and then
+ *	       removes them again after the write is complete after which it 
+ *	       unlocks the page.
+ */
+static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
+	loff_t fpos;
+	struct inode *bmp_vi, *vdir = filp->f_dentry->d_inode;
+	struct super_block *sb = vdir->i_sb;
+	ntfs_inode *ndir = NTFS_I(vdir);
+	ntfs_volume *vol = NTFS_SB(sb);
+	MFT_RECORD *m;
+	INDEX_ROOT *ir = NULL;
+	INDEX_ENTRY *ie;
+	INDEX_ALLOCATION *ia;
+	u8 *name = NULL;
+	int rc, err, ir_pos, cur_bmp_pos;
+	struct address_space *ia_mapping, *bmp_mapping;
+	struct page *bmp_page = NULL, *ia_page = NULL;
+	u8 *kaddr, *bmp, *index_end;
+	ntfs_attr_search_ctx *ctx;
+
+	fpos = filp->f_pos;
+	ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.",
+			vdir->i_ino, fpos);
+	rc = err = 0;
+	/* Are we at end of dir yet? */
+	if (fpos >= vdir->i_size + vol->mft_record_size)
+		goto done;
+	/* Emulate . and .. for all directories. */
+	if (!fpos) {
+		ntfs_debug("Calling filldir for . with len 1, fpos 0x0, "
+				"inode 0x%lx, DT_DIR.", vdir->i_ino);
+		rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR);
+		if (rc)
+			goto done;
+		fpos++;
+	}
+	if (fpos == 1) {
+		ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, "
+				"inode 0x%lx, DT_DIR.",
+				parent_ino(filp->f_dentry));
+		rc = filldir(dirent, "..", 2, fpos,
+				parent_ino(filp->f_dentry), DT_DIR);
+		if (rc)
+			goto done;
+		fpos++;
+	}
+	m = NULL;
+	ctx = NULL;
+	/*
+	 * Allocate a buffer to store the current name being processed
+	 * converted to format determined by current NLS.
+	 */
+	name = (u8*)kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1,
+			GFP_NOFS);
+	if (unlikely(!name)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Are we jumping straight into the index allocation attribute? */
+	if (fpos >= vol->mft_record_size)
+		goto skip_index_root;
+	/* Get hold of the mft record for the directory. */
+	m = map_mft_record(ndir);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		m = NULL;
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(ndir, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Get the offset into the index root attribute. */
+	ir_pos = (s64)fpos;
+	/* Find the index root attribute in the mft record. */
+	err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
+			0, ctx);
+	if (unlikely(err)) {
+		ntfs_error(sb, "Index root attribute missing in directory "
+				"inode 0x%lx.", vdir->i_ino);
+		goto err_out;
+	}
+	/*
+	 * Copy the index root attribute value to a buffer so that we can put
+	 * the search context and unmap the mft record before calling the
+	 * filldir() callback.  We need to do this because of NFSd which calls
+	 * ->lookup() from its filldir callback() and this causes NTFS to
+	 * deadlock as ntfs_lookup() maps the mft record of the directory and
+	 * we have got it mapped here already.  The only solution is for us to
+	 * unmap the mft record here so that a call to ntfs_lookup() is able to
+	 * map the mft record without deadlocking.
+	 */
+	rc = le32_to_cpu(ctx->attr->data.resident.value_length);
+	ir = (INDEX_ROOT*)kmalloc(rc, GFP_NOFS);
+	if (unlikely(!ir)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Copy the index root value (it has been verified in read_inode). */
+	memcpy(ir, (u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset), rc);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(ndir);
+	ctx = NULL;
+	m = NULL;
+	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ir->index +
+			le32_to_cpu(ir->index.entries_offset));
+	/*
+	 * Loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry or until filldir tells us it has had enough
+	 * or signals an error (both covered by the rc test).
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);
+		/* Bounds checks. */
+		if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end))
+			goto err_out;
+		/* The last entry cannot contain a name. */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/* Skip index root entry if continuing previous readdir. */
+		if (ir_pos > (u8*)ie - (u8*)ir)
+			continue;
+		/* Advance the position even if going to skip the entry. */
+		fpos = (u8*)ie - (u8*)ir;
+		/* Submit the name to the filldir callback. */
+		rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent,
+				filldir);
+		if (rc) {
+			kfree(ir);
+			goto abort;
+		}
+	}
+	/* We are done with the index root and can free the buffer. */
+	kfree(ir);
+	ir = NULL;
+	/* If there is no index allocation attribute we are finished. */
+	if (!NInoIndexAllocPresent(ndir))
+		goto EOD;
+	/* Advance fpos to the beginning of the index allocation. */
+	fpos = vol->mft_record_size;
+skip_index_root:
+	kaddr = NULL;
+	prev_ia_pos = -1LL;
+	/* Get the offset into the index allocation attribute. */
+	ia_pos = (s64)fpos - vol->mft_record_size;
+	ia_mapping = vdir->i_mapping;
+	bmp_vi = ndir->itype.index.bmp_ino;
+	if (unlikely(!bmp_vi)) {
+		ntfs_debug("Inode 0x%lx, regetting index bitmap.", vdir->i_ino);
+		bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4);
+		if (IS_ERR(bmp_vi)) {
+			ntfs_error(sb, "Failed to get bitmap attribute.");
+			err = PTR_ERR(bmp_vi);
+			goto err_out;
+		}
+		ndir->itype.index.bmp_ino = bmp_vi;
+	}
+	bmp_mapping = bmp_vi->i_mapping;
+	/* Get the starting bitmap bit position and sanity check it. */
+	bmp_pos = ia_pos >> ndir->itype.index.block_size_bits;
+	if (unlikely(bmp_pos >> 3 >= bmp_vi->i_size)) {
+		ntfs_error(sb, "Current index allocation position exceeds "
+				"index bitmap size.");
+		goto err_out;
+	}
+	/* Get the starting bit position in the current bitmap page. */
+	cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1);
+	bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1);
+get_next_bmp_page:
+	ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
+			(unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),
+			(unsigned long long)bmp_pos &
+			(unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));
+	bmp_page = ntfs_map_page(bmp_mapping,
+			bmp_pos >> (3 + PAGE_CACHE_SHIFT));
+	if (IS_ERR(bmp_page)) {
+		ntfs_error(sb, "Reading index bitmap failed.");
+		err = PTR_ERR(bmp_page);
+		bmp_page = NULL;
+		goto err_out;
+	}
+	bmp = (u8*)page_address(bmp_page);
+	/* Find next index block in use. */
+	while (!(bmp[cur_bmp_pos >> 3] & (1 << (cur_bmp_pos & 7)))) {
+find_next_index_buffer:
+		cur_bmp_pos++;
+		/*
+		 * If we have reached the end of the bitmap page, get the next
+		 * page, and put away the old one.
+		 */
+		if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) {
+			ntfs_unmap_page(bmp_page);
+			bmp_pos += PAGE_CACHE_SIZE * 8;
+			cur_bmp_pos = 0;
+			goto get_next_bmp_page;
+		}
+		/* If we have reached the end of the bitmap, we are done. */
+		if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= vdir->i_size))
+			goto unm_EOD;
+		ia_pos = (bmp_pos + cur_bmp_pos) <<
+				ndir->itype.index.block_size_bits;
+	}
+	ntfs_debug("Handling index buffer 0x%llx.",
+			(unsigned long long)bmp_pos + cur_bmp_pos);
+	/* If the current index buffer is in the same page we reuse the page. */
+	if ((prev_ia_pos & PAGE_CACHE_MASK) != (ia_pos & PAGE_CACHE_MASK)) {
+		prev_ia_pos = ia_pos;
+		if (likely(ia_page != NULL)) {
+			unlock_page(ia_page);
+			ntfs_unmap_page(ia_page);
+		}
+		/*
+		 * Map the page cache page containing the current ia_pos,
+		 * reading it from disk if necessary.
+		 */
+		ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT);
+		if (IS_ERR(ia_page)) {
+			ntfs_error(sb, "Reading index allocation data failed.");
+			err = PTR_ERR(ia_page);
+			ia_page = NULL;
+			goto err_out;
+		}
+		lock_page(ia_page);
+		kaddr = (u8*)page_address(ia_page);
+	}
+	/* Get the current index buffer. */
+	ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
+			~(s64)(ndir->itype.index.block_size - 1)));
+	/* Bounds checks. */
+	if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+				"inode 0x%lx or driver bug.", vdir->i_ino);
+		goto err_out;
+	}
+	/* Catch multi sector transfer fixup errors. */
+	if (unlikely(!ntfs_is_indx_record(ia->magic))) {
+		ntfs_error(sb, "Directory index record with vcn 0x%llx is "
+				"corrupt.  Corrupt inode 0x%lx.  Run chkdsk.",
+				(unsigned long long)ia_pos >>
+				ndir->itype.index.vcn_size_bits, vdir->i_ino);
+		goto err_out;
+	}
+	if (unlikely(sle64_to_cpu(ia->index_block_vcn) != (ia_pos &
+			~(s64)(ndir->itype.index.block_size - 1)) >>
+			ndir->itype.index.vcn_size_bits)) {
+		ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
+				"different from expected VCN (0x%llx). "
+				"Directory inode 0x%lx is corrupt or driver "
+				"bug. ", (unsigned long long)
+				sle64_to_cpu(ia->index_block_vcn),
+				(unsigned long long)ia_pos >>
+				ndir->itype.index.vcn_size_bits, vdir->i_ino);
+		goto err_out;
+	}
+	if (unlikely(le32_to_cpu(ia->index.allocated_size) + 0x18 !=
+			ndir->itype.index.block_size)) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx has a size (%u) differing from the "
+				"directory specified size (%u). Directory "
+				"inode is corrupt or driver bug.",
+				(unsigned long long)ia_pos >>
+				ndir->itype.index.vcn_size_bits, vdir->i_ino,
+				le32_to_cpu(ia->index.allocated_size) + 0x18,
+				ndir->itype.index.block_size);
+		goto err_out;
+	}
+	index_end = (u8*)ia + ndir->itype.index.block_size;
+	if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
+				"0x%lx crosses page boundary. Impossible! "
+				"Cannot access! This is probably a bug in the "
+				"driver.", (unsigned long long)ia_pos >>
+				ndir->itype.index.vcn_size_bits, vdir->i_ino);
+		goto err_out;
+	}
+	ia_start = ia_pos & ~(s64)(ndir->itype.index.block_size - 1);
+	index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
+	if (unlikely(index_end > (u8*)ia + ndir->itype.index.block_size)) {
+		ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
+				"inode 0x%lx exceeds maximum size.",
+				(unsigned long long)ia_pos >>
+				ndir->itype.index.vcn_size_bits, vdir->i_ino);
+		goto err_out;
+	}
+	/* The first index entry in this index buffer. */
+	ie = (INDEX_ENTRY*)((u8*)&ia->index +
+			le32_to_cpu(ia->index.entries_offset));
+	/*
+	 * Loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry or until filldir tells us it has had enough
+	 * or signals an error (both covered by the rc test).
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		ntfs_debug("In index allocation, offset 0x%llx.",
+				(unsigned long long)ia_start +
+				(unsigned long long)((u8*)ie - (u8*)ia));
+		/* Bounds checks. */
+		if (unlikely((u8*)ie < (u8*)ia || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->key_length) >
+				index_end))
+			goto err_out;
+		/* The last entry cannot contain a name. */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/* Skip index block entry if continuing previous readdir. */
+		if (ia_pos - ia_start > (u8*)ie - (u8*)ia)
+			continue;
+		/* Advance the position even if going to skip the entry. */
+		fpos = (u8*)ie - (u8*)ia +
+				(sle64_to_cpu(ia->index_block_vcn) <<
+				ndir->itype.index.vcn_size_bits) +
+				vol->mft_record_size;
+		/*
+		 * Submit the name to the @filldir callback.  Note,
+		 * ntfs_filldir() drops the lock on @ia_page but it retakes it
+		 * before returning, unless a non-zero value is returned in
+		 * which case the page is left unlocked.
+		 */
+		rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent,
+				filldir);
+		if (rc) {
+			/* @ia_page is already unlocked in this case. */
+			ntfs_unmap_page(ia_page);
+			ntfs_unmap_page(bmp_page);
+			goto abort;
+		}
+	}
+	goto find_next_index_buffer;
+unm_EOD:
+	if (ia_page) {
+		unlock_page(ia_page);
+		ntfs_unmap_page(ia_page);
+	}
+	ntfs_unmap_page(bmp_page);
+EOD:
+	/* We are finished, set fpos to EOD. */
+	fpos = vdir->i_size + vol->mft_record_size;
+abort:
+	kfree(name);
+done:
+#ifdef DEBUG
+	if (!rc)
+		ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos);
+	else
+		ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.",
+				rc, fpos);
+#endif
+	filp->f_pos = fpos;
+	return 0;
+err_out:
+	if (bmp_page)
+		ntfs_unmap_page(bmp_page);
+	if (ia_page) {
+		unlock_page(ia_page);
+		ntfs_unmap_page(ia_page);
+	}
+	if (ir)
+		kfree(ir);
+	if (name)
+		kfree(name);
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(ndir);
+	if (!err)
+		err = -EIO;
+	ntfs_debug("Failed. Returning error code %i.", -err);
+	filp->f_pos = fpos;
+	return err;
+}
+
+/**
+ * ntfs_dir_open - called when an inode is about to be opened
+ * @vi:		inode to be opened
+ * @filp:	file structure describing the inode
+ *
+ * Limit directory size to the page cache limit on architectures where unsigned
+ * long is 32-bits. This is the most we can do for now without overflowing the
+ * page cache page index. Doing it this way means we don't run into problems
+ * because of existing too large directories. It would be better to allow the
+ * user to read the accessible part of the directory but I doubt very much
+ * anyone is going to hit this check on a 32-bit architecture, so there is no
+ * point in adding the extra complexity required to support this.
+ *
+ * On 64-bit architectures, the check is hopefully optimized away by the
+ * compiler.
+ */
+static int ntfs_dir_open(struct inode *vi, struct file *filp)
+{
+	if (sizeof(unsigned long) < 8) {
+		if (vi->i_size > MAX_LFS_FILESIZE)
+			return -EFBIG;
+	}
+	return 0;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_dir_fsync - sync a directory to disk
+ * @filp:	directory to be synced
+ * @dentry:	dentry describing the directory to sync
+ * @datasync:	if non-zero only flush user data and not metadata
+ *
+ * Data integrity sync of a directory to disk.  Used for fsync, fdatasync, and
+ * msync system calls.  This function is based on file.c::ntfs_file_fsync().
+ *
+ * Write the mft record and all associated extent mft records as well as the
+ * $INDEX_ALLOCATION and $BITMAP attributes and then sync the block device.
+ *
+ * If @datasync is true, we do not wait on the inode(s) to be written out
+ * but we always wait on the page cache pages to be written out.
+ *
+ * Note: In the past @filp could be NULL so we ignore it as we don't need it
+ * anyway.
+ *
+ * Locking: Caller must hold i_sem on the inode.
+ *
+ * TODO: We should probably also write all attribute/index inodes associated
+ * with this inode but since we have no simple way of getting to them we ignore
+ * this problem for now.  We do write the $BITMAP attribute if it is present
+ * which is the important one for a directory so things are not too bad.
+ */
+static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry,
+		int datasync)
+{
+	struct inode *vi = dentry->d_inode;
+	ntfs_inode *ni = NTFS_I(vi);
+	int err, ret;
+
+	ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
+	BUG_ON(!S_ISDIR(vi->i_mode));
+	if (NInoIndexAllocPresent(ni) && ni->itype.index.bmp_ino)
+		write_inode_now(ni->itype.index.bmp_ino, !datasync);
+	ret = ntfs_write_inode(vi, 1);
+	write_inode_now(vi, !datasync);
+	err = sync_blockdev(vi->i_sb->s_bdev);
+	if (unlikely(err && !ret))
+		ret = err;
+	if (likely(!ret))
+		ntfs_debug("Done.");
+	else
+		ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx.  Error "
+				"%u.", datasync ? "data" : "", vi->i_ino, -ret);
+	return ret;
+}
+
+#endif /* NTFS_RW */
+
+struct file_operations ntfs_dir_ops = {
+	.llseek		= generic_file_llseek,	/* Seek inside directory. */
+	.read		= generic_read_dir,	/* Return -EISDIR. */
+	.readdir	= ntfs_readdir,		/* Read directory contents. */
+#ifdef NTFS_RW
+	.fsync		= ntfs_dir_fsync,	/* Sync a directory to disk. */
+	/*.aio_fsync	= ,*/			/* Sync all outstanding async
+						   i/o operations on a kiocb. */
+#endif /* NTFS_RW */
+	/*.ioctl	= ,*/			/* Perform function on the
+						   mounted filesystem. */
+	.open		= ntfs_dir_open,	/* Open directory. */
+};
diff --git a/fs/ntfs/dir.h b/fs/ntfs/dir.h
new file mode 100644
index 0000000..aea7582
--- /dev/null
+++ b/fs/ntfs/dir.h
@@ -0,0 +1,48 @@
+/*
+ * dir.h - Defines for directory handling in NTFS Linux kernel driver. Part of
+ *	   the Linux-NTFS project.
+ *
+ * Copyright (c) 2002-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_DIR_H
+#define _LINUX_NTFS_DIR_H
+
+#include "layout.h"
+#include "inode.h"
+#include "types.h"
+
+/*
+ * ntfs_name is used to return the file name to the caller of
+ * ntfs_lookup_inode_by_name() in order for the caller (namei.c::ntfs_lookup())
+ * to be able to deal with dcache aliasing issues.
+ */
+typedef struct {
+	MFT_REF mref;
+	FILE_NAME_TYPE_FLAGS type;
+	u8 len;
+	ntfschar name[0];
+} __attribute__ ((__packed__)) ntfs_name;
+
+/* The little endian Unicode string $I30 as a global constant. */
+extern ntfschar I30[5];
+
+extern MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni,
+		const ntfschar *uname, const int uname_len, ntfs_name **res);
+
+#endif /* _LINUX_NTFS_FS_DIR_H */
diff --git a/fs/ntfs/endian.h b/fs/ntfs/endian.h
new file mode 100644
index 0000000..927b5bf
--- /dev/null
+++ b/fs/ntfs/endian.h
@@ -0,0 +1,93 @@
+/*
+ * endian.h - Defines for endianness handling in NTFS Linux kernel driver.
+ *	      Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_ENDIAN_H
+#define _LINUX_NTFS_ENDIAN_H
+
+#include <asm/byteorder.h>
+#include "types.h"
+
+/*
+ * Signed endianness conversion functions.
+ */
+
+static inline s16 sle16_to_cpu(sle16 x)
+{
+	return le16_to_cpu((__force le16)x);
+}
+
+static inline s32 sle32_to_cpu(sle32 x)
+{
+	return le32_to_cpu((__force le32)x);
+}
+
+static inline s64 sle64_to_cpu(sle64 x)
+{
+	return le64_to_cpu((__force le64)x);
+}
+
+static inline s16 sle16_to_cpup(sle16 *x)
+{
+	return le16_to_cpu(*(__force le16*)x);
+}
+
+static inline s32 sle32_to_cpup(sle32 *x)
+{
+	return le32_to_cpu(*(__force le32*)x);
+}
+
+static inline s64 sle64_to_cpup(sle64 *x)
+{
+	return le64_to_cpu(*(__force le64*)x);
+}
+
+static inline sle16 cpu_to_sle16(s16 x)
+{
+	return (__force sle16)cpu_to_le16(x);
+}
+
+static inline sle32 cpu_to_sle32(s32 x)
+{
+	return (__force sle32)cpu_to_le32(x);
+}
+
+static inline sle64 cpu_to_sle64(s64 x)
+{
+	return (__force sle64)cpu_to_le64(x);
+}
+
+static inline sle16 cpu_to_sle16p(s16 *x)
+{
+	return (__force sle16)cpu_to_le16(*x);
+}
+
+static inline sle32 cpu_to_sle32p(s32 *x)
+{
+	return (__force sle32)cpu_to_le32(*x);
+}
+
+static inline sle64 cpu_to_sle64p(s64 *x)
+{
+	return (__force sle64)cpu_to_le64(*x);
+}
+
+#endif /* _LINUX_NTFS_ENDIAN_H */
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
new file mode 100644
index 0000000..db8713e
--- /dev/null
+++ b/fs/ntfs/file.c
@@ -0,0 +1,155 @@
+/*
+ * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+
+#include "inode.h"
+#include "debug.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_file_open - called when an inode is about to be opened
+ * @vi:		inode to be opened
+ * @filp:	file structure describing the inode
+ *
+ * Limit file size to the page cache limit on architectures where unsigned long
+ * is 32-bits. This is the most we can do for now without overflowing the page
+ * cache page index. Doing it this way means we don't run into problems because
+ * of existing too large files. It would be better to allow the user to read
+ * the beginning of the file but I doubt very much anyone is going to hit this
+ * check on a 32-bit architecture, so there is no point in adding the extra
+ * complexity required to support this.
+ *
+ * On 64-bit architectures, the check is hopefully optimized away by the
+ * compiler.
+ *
+ * After the check passes, just call generic_file_open() to do its work.
+ */
+static int ntfs_file_open(struct inode *vi, struct file *filp)
+{
+	if (sizeof(unsigned long) < 8) {
+		if (vi->i_size > MAX_LFS_FILESIZE)
+			return -EFBIG;
+	}
+	return generic_file_open(vi, filp);
+}
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_file_fsync - sync a file to disk
+ * @filp:	file to be synced
+ * @dentry:	dentry describing the file to sync
+ * @datasync:	if non-zero only flush user data and not metadata
+ *
+ * Data integrity sync of a file to disk.  Used for fsync, fdatasync, and msync
+ * system calls.  This function is inspired by fs/buffer.c::file_fsync().
+ *
+ * If @datasync is false, write the mft record and all associated extent mft
+ * records as well as the $DATA attribute and then sync the block device.
+ *
+ * If @datasync is true and the attribute is non-resident, we skip the writing
+ * of the mft record and all associated extent mft records (this might still
+ * happen due to the write_inode_now() call).
+ *
+ * Also, if @datasync is true, we do not wait on the inode to be written out
+ * but we always wait on the page cache pages to be written out.
+ *
+ * Note: In the past @filp could be NULL so we ignore it as we don't need it
+ * anyway.
+ *
+ * Locking: Caller must hold i_sem on the inode.
+ *
+ * TODO: We should probably also write all attribute/index inodes associated
+ * with this inode but since we have no simple way of getting to them we ignore
+ * this problem for now.
+ */
+static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
+		int datasync)
+{
+	struct inode *vi = dentry->d_inode;
+	int err, ret = 0;
+
+	ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
+	BUG_ON(S_ISDIR(vi->i_mode));
+	if (!datasync || !NInoNonResident(NTFS_I(vi)))
+		ret = ntfs_write_inode(vi, 1);
+	write_inode_now(vi, !datasync);
+	err = sync_blockdev(vi->i_sb->s_bdev);
+	if (unlikely(err && !ret))
+		ret = err;
+	if (likely(!ret))
+		ntfs_debug("Done.");
+	else
+		ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx.  Error "
+				"%u.", datasync ? "data" : "", vi->i_ino, -ret);
+	return ret;
+}
+
+#endif /* NTFS_RW */
+
+struct file_operations ntfs_file_ops = {
+	.llseek		= generic_file_llseek,	  /* Seek inside file. */
+	.read		= generic_file_read,	  /* Read from file. */
+	.aio_read	= generic_file_aio_read,  /* Async read from file. */
+	.readv		= generic_file_readv,	  /* Read from file. */
+#ifdef NTFS_RW
+	.write		= generic_file_write,	  /* Write to file. */
+	.aio_write	= generic_file_aio_write, /* Async write to file. */
+	.writev		= generic_file_writev,	  /* Write to file. */
+	/*.release	= ,*/			  /* Last file is closed.  See
+						     fs/ext2/file.c::
+						     ext2_release_file() for
+						     how to use this to discard
+						     preallocated space for
+						     write opened files. */
+	.fsync		= ntfs_file_fsync,	  /* Sync a file to disk. */
+	/*.aio_fsync	= ,*/			  /* Sync all outstanding async
+						     i/o operations on a
+						     kiocb. */
+#endif /* NTFS_RW */
+	/*.ioctl	= ,*/			  /* Perform function on the
+						     mounted filesystem. */
+	.mmap		= generic_file_mmap,	  /* Mmap file. */
+	.open		= ntfs_file_open,	  /* Open file. */
+	.sendfile	= generic_file_sendfile,  /* Zero-copy data send with
+						     the data source being on
+						     the ntfs partition.  We
+						     do not need to care about
+						     the data destination. */
+	/*.sendpage	= ,*/			  /* Zero-copy data send with
+						     the data destination being
+						     on the ntfs partition.  We
+						     do not need to care about
+						     the data source. */
+};
+
+struct inode_operations ntfs_file_inode_ops = {
+#ifdef NTFS_RW
+	.truncate	= ntfs_truncate_vfs,
+	.setattr	= ntfs_setattr,
+#endif /* NTFS_RW */
+};
+
+struct file_operations ntfs_empty_file_ops = {};
+
+struct inode_operations ntfs_empty_inode_ops = {};
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
new file mode 100644
index 0000000..71bd2cd
--- /dev/null
+++ b/fs/ntfs/index.c
@@ -0,0 +1,461 @@
+/*
+ * index.c - NTFS kernel index handling.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "aops.h"
+#include "collate.h"
+#include "debug.h"
+#include "index.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_index_ctx_get - allocate and initialize a new index context
+ * @idx_ni:	ntfs index inode with which to initialize the context
+ *
+ * Allocate a new index context, initialize it with @idx_ni and return it.
+ * Return NULL if allocation failed.
+ *
+ * Locking:  Caller must hold i_sem on the index inode.
+ */
+ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
+{
+	ntfs_index_context *ictx;
+
+	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS);
+	if (ictx) {
+		ictx->idx_ni = idx_ni;
+		ictx->entry = NULL;
+		ictx->data = NULL;
+		ictx->data_len = 0;
+		ictx->is_in_root = 0;
+		ictx->ir = NULL;
+		ictx->actx = NULL;
+		ictx->base_ni = NULL;
+		ictx->ia = NULL;
+		ictx->page = NULL;
+	}
+	return ictx;
+}
+
+/**
+ * ntfs_index_ctx_put - release an index context
+ * @ictx:	index context to free
+ *
+ * Release the index context @ictx, releasing all associated resources.
+ *
+ * Locking:  Caller must hold i_sem on the index inode.
+ */
+void ntfs_index_ctx_put(ntfs_index_context *ictx)
+{
+	if (ictx->entry) {
+		if (ictx->is_in_root) {
+			if (ictx->actx)
+				ntfs_attr_put_search_ctx(ictx->actx);
+			if (ictx->base_ni)
+				unmap_mft_record(ictx->base_ni);
+		} else {
+			struct page *page = ictx->page;
+			if (page) {
+				BUG_ON(!PageLocked(page));
+				unlock_page(page);
+				ntfs_unmap_page(page);
+			}
+		}
+	}
+	kmem_cache_free(ntfs_index_ctx_cache, ictx);
+	return;
+}
+
+/**
+ * ntfs_index_lookup - find a key in an index and return its index entry
+ * @key:	[IN] key for which to search in the index
+ * @key_len:	[IN] length of @key in bytes
+ * @ictx:	[IN/OUT] context describing the index and the returned entry
+ *
+ * Before calling ntfs_index_lookup(), @ictx must have been obtained from a
+ * call to ntfs_index_ctx_get().
+ *
+ * Look for the @key in the index specified by the index lookup context @ictx.
+ * ntfs_index_lookup() walks the contents of the index looking for the @key.
+ *
+ * If the @key is found in the index, 0 is returned and @ictx is setup to
+ * describe the index entry containing the matching @key.  @ictx->entry is the
+ * index entry and @ictx->data and @ictx->data_len are the index entry data and
+ * its length in bytes, respectively.
+ *
+ * If the @key is not found in the index, -ENOENT is returned and @ictx is
+ * setup to describe the index entry whose key collates immediately after the
+ * search @key, i.e. this is the position in the index at which an index entry
+ * with a key of @key would need to be inserted.
+ *
+ * If an error occurs return the negative error code and @ictx is left
+ * untouched.
+ *
+ * When finished with the entry and its data, call ntfs_index_ctx_put() to free
+ * the context and other associated resources.
+ *
+ * If the index entry was modified, call flush_dcache_index_entry_page()
+ * immediately after the modification and either ntfs_index_entry_mark_dirty()
+ * or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
+ * ensure that the changes are written to disk.
+ *
+ * Locking:  - Caller must hold i_sem on the index inode.
+ *	     - Each page cache page in the index allocation mapping must be
+ *	       locked whilst being accessed otherwise we may find a corrupt
+ *	       page due to it being under ->writepage at the moment which
+ *	       applies the mst protection fixups before writing out and then
+ *	       removes them again after the write is complete after which it 
+ *	       unlocks the page.
+ */
+int ntfs_index_lookup(const void *key, const int key_len,
+		ntfs_index_context *ictx)
+{
+	VCN vcn, old_vcn;
+	ntfs_inode *idx_ni = ictx->idx_ni;
+	ntfs_volume *vol = idx_ni->vol;
+	struct super_block *sb = vol->sb;
+	ntfs_inode *base_ni = idx_ni->ext.base_ntfs_ino;
+	MFT_RECORD *m;
+	INDEX_ROOT *ir;
+	INDEX_ENTRY *ie;
+	INDEX_ALLOCATION *ia;
+	u8 *index_end, *kaddr;
+	ntfs_attr_search_ctx *actx;
+	struct address_space *ia_mapping;
+	struct page *page;
+	int rc, err = 0;
+
+	ntfs_debug("Entering.");
+	BUG_ON(!NInoAttr(idx_ni));
+	BUG_ON(idx_ni->type != AT_INDEX_ALLOCATION);
+	BUG_ON(idx_ni->nr_extents != -1);
+	BUG_ON(!base_ni);
+	BUG_ON(!key);
+	BUG_ON(key_len <= 0);
+	if (!ntfs_is_collation_rule_supported(
+			idx_ni->itype.index.collation_rule)) {
+		ntfs_error(sb, "Index uses unsupported collation rule 0x%x.  "
+				"Aborting lookup.", le32_to_cpu(
+				idx_ni->itype.index.collation_rule));
+		return -EOPNOTSUPP;
+	}
+	/* Get hold of the mft record for the index inode. */
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		ntfs_error(sb, "map_mft_record() failed with error code %ld.",
+				-PTR_ERR(m));
+		return PTR_ERR(m);
+	}
+	actx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (unlikely(!actx)) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	/* Find the index root attribute in the mft record. */
+	err = ntfs_attr_lookup(AT_INDEX_ROOT, idx_ni->name, idx_ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, actx);
+	if (unlikely(err)) {
+		if (err == -ENOENT) {
+			ntfs_error(sb, "Index root attribute missing in inode "
+					"0x%lx.", idx_ni->mft_no);
+			err = -EIO;
+		}
+		goto err_out;
+	}
+	/* Get to the index root value (it has been verified in read_inode). */
+	ir = (INDEX_ROOT*)((u8*)actx->attr +
+			le16_to_cpu(actx->attr->data.resident.value_offset));
+	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ir->index +
+			le32_to_cpu(ir->index.entries_offset));
+	/*
+	 * Loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds checks. */
+		if ((u8*)ie < (u8*)actx->mrec || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->length) > index_end)
+			goto idx_err_out;
+		/*
+		 * The last entry cannot contain a key.  It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/* Further bounds checks. */
+		if ((u32)sizeof(INDEX_ENTRY_HEADER) +
+				le16_to_cpu(ie->key_length) >
+				le16_to_cpu(ie->data.vi.data_offset) ||
+				(u32)le16_to_cpu(ie->data.vi.data_offset) +
+				le16_to_cpu(ie->data.vi.data_length) >
+				le16_to_cpu(ie->length))
+			goto idx_err_out;
+		/* If the keys match perfectly, we setup @ictx and return 0. */
+		if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
+				&ie->key, key_len)) {
+ir_done:
+			ictx->is_in_root = TRUE;
+			ictx->actx = actx;
+			ictx->base_ni = base_ni;
+			ictx->ia = NULL;
+			ictx->page = NULL;
+done:
+			ictx->entry = ie;
+			ictx->data = (u8*)ie +
+					le16_to_cpu(ie->data.vi.data_offset);
+			ictx->data_len = le16_to_cpu(ie->data.vi.data_length);
+			ntfs_debug("Done.");
+			return err;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
+				key_len, &ie->key, le16_to_cpu(ie->key_length));
+		/*
+		 * If @key collates before the key of the current entry, there
+		 * is definitely no such key in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/*
+		 * A match should never happen as the memcmp() call should have
+		 * cought it, but we still treat it correctly.
+		 */
+		if (!rc)
+			goto ir_done;
+		/* The keys are not equal, continue the search. */
+	}
+	/*
+	 * We have finished with this index without success.  Check for the
+	 * presence of a child node and if not present setup @ictx and return
+	 * -ENOENT.
+	 */
+	if (!(ie->flags & INDEX_ENTRY_NODE)) {
+		ntfs_debug("Entry not found.");
+		err = -ENOENT;
+		goto ir_done;
+	} /* Child node present, descend into it. */
+	/* Consistency check: Verify that an index allocation exists. */
+	if (!NInoIndexAllocPresent(idx_ni)) {
+		ntfs_error(sb, "No index allocation attribute but index entry "
+				"requires one.  Inode 0x%lx is corrupt or "
+				"driver bug.", idx_ni->mft_no);
+		goto err_out;
+	}
+	/* Get the starting vcn of the index_block holding the child node. */
+	vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
+	ia_mapping = VFS_I(idx_ni)->i_mapping;
+	/*
+	 * We are done with the index root and the mft record.  Release them,
+	 * otherwise we deadlock with ntfs_map_page().
+	 */
+	ntfs_attr_put_search_ctx(actx);
+	unmap_mft_record(base_ni);
+	m = NULL;
+	actx = NULL;
+descend_into_child_node:
+	/*
+	 * Convert vcn to index into the index allocation attribute in units
+	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * disk if necessary.
+	 */
+	page = ntfs_map_page(ia_mapping, vcn <<
+			idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+	if (IS_ERR(page)) {
+		ntfs_error(sb, "Failed to map index page, error %ld.",
+				-PTR_ERR(page));
+		err = PTR_ERR(page);
+		goto err_out;
+	}
+	lock_page(page);
+	kaddr = (u8*)page_address(page);
+fast_descend_into_child_node:
+	/* Get to the index allocation block. */
+	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
+			idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+	/* Bounds checks. */
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Out of bounds check failed.  Corrupt inode "
+				"0x%lx or driver bug.", idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* Catch multi sector transfer fixup errors. */
+	if (unlikely(!ntfs_is_indx_record(ia->magic))) {
+		ntfs_error(sb, "Index record with vcn 0x%llx is corrupt.  "
+				"Corrupt inode 0x%lx.  Run chkdsk.",
+				(long long)vcn, idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
+		ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
+				"different from expected VCN (0x%llx).  Inode "
+				"0x%lx is corrupt or driver bug.",
+				(unsigned long long)
+				sle64_to_cpu(ia->index_block_vcn),
+				(unsigned long long)vcn, idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
+			idx_ni->itype.index.block_size) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has "
+				"a size (%u) differing from the index "
+				"specified size (%u).  Inode is corrupt or "
+				"driver bug.", (unsigned long long)vcn,
+				idx_ni->mft_no,
+				le32_to_cpu(ia->index.allocated_size) + 0x18,
+				idx_ni->itype.index.block_size);
+		goto unm_err_out;
+	}
+	index_end = (u8*)ia + idx_ni->itype.index.block_size;
+	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+		ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
+				"crosses page boundary.  Impossible!  Cannot "
+				"access!  This is probably a bug in the "
+				"driver.", (unsigned long long)vcn,
+				idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
+	if (index_end > (u8*)ia + idx_ni->itype.index.block_size) {
+		ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode "
+				"0x%lx exceeds maximum size.",
+				(unsigned long long)vcn, idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* The first index entry. */
+	ie = (INDEX_ENTRY*)((u8*)&ia->index +
+			le32_to_cpu(ia->index.entries_offset));
+	/*
+	 * Iterate similar to above big loop but applied to index buffer, thus
+	 * loop until we exceed valid memory (corruption case) or until we
+	 * reach the last entry.
+	 */
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+		/* Bounds checks. */
+		if ((u8*)ie < (u8*)ia || (u8*)ie +
+				sizeof(INDEX_ENTRY_HEADER) > index_end ||
+				(u8*)ie + le16_to_cpu(ie->length) > index_end) {
+			ntfs_error(sb, "Index entry out of bounds in inode "
+					"0x%lx.", idx_ni->mft_no);
+			goto unm_err_out;
+		}
+		/*
+		 * The last entry cannot contain a key.  It can however contain
+		 * a pointer to a child node in the B+tree so we just break out.
+		 */
+		if (ie->flags & INDEX_ENTRY_END)
+			break;
+		/* Further bounds checks. */
+		if ((u32)sizeof(INDEX_ENTRY_HEADER) +
+				le16_to_cpu(ie->key_length) >
+				le16_to_cpu(ie->data.vi.data_offset) ||
+				(u32)le16_to_cpu(ie->data.vi.data_offset) +
+				le16_to_cpu(ie->data.vi.data_length) >
+				le16_to_cpu(ie->length)) {
+			ntfs_error(sb, "Index entry out of bounds in inode "
+					"0x%lx.", idx_ni->mft_no);
+			goto unm_err_out;
+		}
+		/* If the keys match perfectly, we setup @ictx and return 0. */
+		if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
+				&ie->key, key_len)) {
+ia_done:
+			ictx->is_in_root = FALSE;
+			ictx->actx = NULL;
+			ictx->base_ni = NULL;
+			ictx->ia = ia;
+			ictx->page = page;
+			goto done;
+		}
+		/*
+		 * Not a perfect match, need to do full blown collation so we
+		 * know which way in the B+tree we have to go.
+		 */
+		rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
+				key_len, &ie->key, le16_to_cpu(ie->key_length));
+		/*
+		 * If @key collates before the key of the current entry, there
+		 * is definitely no such key in this index but we might need to
+		 * descend into the B+tree so we just break out of the loop.
+		 */
+		if (rc == -1)
+			break;
+		/*
+		 * A match should never happen as the memcmp() call should have
+		 * cought it, but we still treat it correctly.
+		 */
+		if (!rc)
+			goto ia_done;
+		/* The keys are not equal, continue the search. */
+	}
+	/*
+	 * We have finished with this index buffer without success.  Check for
+	 * the presence of a child node and if not present return -ENOENT.
+	 */
+	if (!(ie->flags & INDEX_ENTRY_NODE)) {
+		ntfs_debug("Entry not found.");
+		err = -ENOENT;
+		goto ia_done;
+	}
+	if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
+		ntfs_error(sb, "Index entry with child node found in a leaf "
+				"node in inode 0x%lx.", idx_ni->mft_no);
+		goto unm_err_out;
+	}
+	/* Child node present, descend into it. */
+	old_vcn = vcn;
+	vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
+	if (vcn >= 0) {
+		/*
+		 * If vcn is in the same page cache page as old_vcn we recycle
+		 * the mapped page.
+		 */
+		if (old_vcn << vol->cluster_size_bits >>
+				PAGE_CACHE_SHIFT == vcn <<
+				vol->cluster_size_bits >>
+				PAGE_CACHE_SHIFT)
+			goto fast_descend_into_child_node;
+		unlock_page(page);
+		ntfs_unmap_page(page);
+		goto descend_into_child_node;
+	}
+	ntfs_error(sb, "Negative child node vcn in inode 0x%lx.",
+			idx_ni->mft_no);
+unm_err_out:
+	unlock_page(page);
+	ntfs_unmap_page(page);
+err_out:
+	if (!err)
+		err = -EIO;
+	if (actx)
+		ntfs_attr_put_search_ctx(actx);
+	if (m)
+		unmap_mft_record(base_ni);
+	return err;
+idx_err_out:
+	ntfs_error(sb, "Corrupt index.  Aborting lookup.");
+	goto err_out;
+}
diff --git a/fs/ntfs/index.h b/fs/ntfs/index.h
new file mode 100644
index 0000000..846a489
--- /dev/null
+++ b/fs/ntfs/index.h
@@ -0,0 +1,148 @@
+/*
+ * index.h - Defines for NTFS kernel index handling.  Part of the Linux-NTFS
+ *	     project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_INDEX_H
+#define _LINUX_NTFS_INDEX_H
+
+#include <linux/fs.h>
+
+#include "types.h"
+#include "layout.h"
+#include "inode.h"
+#include "attrib.h"
+#include "mft.h"
+#include "aops.h"
+
+/**
+ * @idx_ni:	index inode containing the @entry described by this context
+ * @entry:	index entry (points into @ir or @ia)
+ * @data:	index entry data (points into @entry)
+ * @data_len:	length in bytes of @data
+ * @is_in_root:	TRUE if @entry is in @ir and FALSE if it is in @ia
+ * @ir:		index root if @is_in_root and NULL otherwise
+ * @actx:	attribute search context if @is_in_root and NULL otherwise
+ * @base_ni:	base inode if @is_in_root and NULL otherwise
+ * @ia:		index block if @is_in_root is FALSE and NULL otherwise
+ * @page:	page if @is_in_root is FALSE and NULL otherwise
+ *
+ * @idx_ni is the index inode this context belongs to.
+ *
+ * @entry is the index entry described by this context.  @data and @data_len
+ * are the index entry data and its length in bytes, respectively.  @data
+ * simply points into @entry.  This is probably what the user is interested in.
+ *
+ * If @is_in_root is TRUE, @entry is in the index root attribute @ir described
+ * by the attribute search context @actx and the base inode @base_ni.  @ia and
+ * @page are NULL in this case.
+ *
+ * If @is_in_root is FALSE, @entry is in the index allocation attribute and @ia
+ * and @page point to the index allocation block and the mapped, locked page it
+ * is in, respectively.  @ir, @actx and @base_ni are NULL in this case.
+ *
+ * To obtain a context call ntfs_index_ctx_get().
+ *
+ * We use this context to allow ntfs_index_lookup() to return the found index
+ * @entry and its @data without having to allocate a buffer and copy the @entry
+ * and/or its @data into it.
+ *
+ * When finished with the @entry and its @data, call ntfs_index_ctx_put() to
+ * free the context and other associated resources.
+ *
+ * If the index entry was modified, call flush_dcache_index_entry_page()
+ * immediately after the modification and either ntfs_index_entry_mark_dirty()
+ * or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
+ * ensure that the changes are written to disk.
+ */
+typedef struct {
+	ntfs_inode *idx_ni;
+	INDEX_ENTRY *entry;
+	void *data;
+	u16 data_len;
+	BOOL is_in_root;
+	INDEX_ROOT *ir;
+	ntfs_attr_search_ctx *actx;
+	ntfs_inode *base_ni;
+	INDEX_ALLOCATION *ia;
+	struct page *page;
+} ntfs_index_context;
+
+extern ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni);
+extern void ntfs_index_ctx_put(ntfs_index_context *ictx);
+
+extern int ntfs_index_lookup(const void *key, const int key_len,
+		ntfs_index_context *ictx);
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_index_entry_flush_dcache_page - flush_dcache_page() for index entries
+ * @ictx:	ntfs index context describing the index entry
+ *
+ * Call flush_dcache_page() for the page in which an index entry resides.
+ *
+ * This must be called every time an index entry is modified, just after the
+ * modification.
+ *
+ * If the index entry is in the index root attribute, simply flush the page
+ * containing the mft record containing the index root attribute.
+ *
+ * If the index entry is in an index block belonging to the index allocation
+ * attribute, simply flush the page cache page containing the index block.
+ */
+static inline void ntfs_index_entry_flush_dcache_page(ntfs_index_context *ictx)
+{
+	if (ictx->is_in_root)
+		flush_dcache_mft_record_page(ictx->actx->ntfs_ino);
+	else
+		flush_dcache_page(ictx->page);
+}
+
+/**
+ * ntfs_index_entry_mark_dirty - mark an index entry dirty
+ * @ictx:	ntfs index context describing the index entry
+ *
+ * Mark the index entry described by the index entry context @ictx dirty.
+ *
+ * If the index entry is in the index root attribute, simply mark the mft
+ * record containing the index root attribute dirty.  This ensures the mft
+ * record, and hence the index root attribute, will be written out to disk
+ * later.
+ *
+ * If the index entry is in an index block belonging to the index allocation
+ * attribute, mark the buffers belonging to the index record as well as the
+ * page cache page the index block is in dirty.  This automatically marks the
+ * VFS inode of the ntfs index inode to which the index entry belongs dirty,
+ * too (I_DIRTY_PAGES) and this in turn ensures the page buffers, and hence the
+ * dirty index block, will be written out to disk later.
+ */
+static inline void ntfs_index_entry_mark_dirty(ntfs_index_context *ictx)
+{
+	if (ictx->is_in_root)
+		mark_mft_record_dirty(ictx->actx->ntfs_ino);
+	else
+		mark_ntfs_record_dirty(ictx->page,
+				(u8*)ictx->ia - (u8*)page_address(ictx->page));
+}
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_INDEX_H */
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
new file mode 100644
index 0000000..31840ba
--- /dev/null
+++ b/fs/ntfs/inode.c
@@ -0,0 +1,2616 @@
+/**
+ * inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <linux/quotaops.h>
+#include <linux/mount.h>
+
+#include "aops.h"
+#include "dir.h"
+#include "debug.h"
+#include "inode.h"
+#include "attrib.h"
+#include "malloc.h"
+#include "mft.h"
+#include "time.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_test_inode - compare two (possibly fake) inodes for equality
+ * @vi:		vfs inode which to test
+ * @na:		ntfs attribute which is being tested with
+ *
+ * Compare the ntfs attribute embedded in the ntfs specific part of the vfs
+ * inode @vi for equality with the ntfs attribute @na.
+ *
+ * If searching for the normal file/directory inode, set @na->type to AT_UNUSED.
+ * @na->name and @na->name_len are then ignored.
+ *
+ * Return 1 if the attributes match and 0 if not.
+ *
+ * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * allowed to sleep.
+ */
+int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
+{
+	ntfs_inode *ni;
+
+	if (vi->i_ino != na->mft_no)
+		return 0;
+	ni = NTFS_I(vi);
+	/* If !NInoAttr(ni), @vi is a normal file or directory inode. */
+	if (likely(!NInoAttr(ni))) {
+		/* If not looking for a normal inode this is a mismatch. */
+		if (unlikely(na->type != AT_UNUSED))
+			return 0;
+	} else {
+		/* A fake inode describing an attribute. */
+		if (ni->type != na->type)
+			return 0;
+		if (ni->name_len != na->name_len)
+			return 0;
+		if (na->name_len && memcmp(ni->name, na->name,
+				na->name_len * sizeof(ntfschar)))
+			return 0;
+	}
+	/* Match! */
+	return 1;
+}
+
+/**
+ * ntfs_init_locked_inode - initialize an inode
+ * @vi:		vfs inode to initialize
+ * @na:		ntfs attribute which to initialize @vi to
+ *
+ * Initialize the vfs inode @vi with the values from the ntfs attribute @na in
+ * order to enable ntfs_test_inode() to do its work.
+ *
+ * If initializing the normal file/directory inode, set @na->type to AT_UNUSED.
+ * In that case, @na->name and @na->name_len should be set to NULL and 0,
+ * respectively. Although that is not strictly necessary as
+ * ntfs_read_inode_locked() will fill them in later.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * allowed to sleep. (Hence the GFP_ATOMIC allocation.)
+ */
+static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
+{
+	ntfs_inode *ni = NTFS_I(vi);
+
+	vi->i_ino = na->mft_no;
+
+	ni->type = na->type;
+	if (na->type == AT_INDEX_ALLOCATION)
+		NInoSetMstProtected(ni);
+
+	ni->name = na->name;
+	ni->name_len = na->name_len;
+
+	/* If initializing a normal inode, we are done. */
+	if (likely(na->type == AT_UNUSED)) {
+		BUG_ON(na->name);
+		BUG_ON(na->name_len);
+		return 0;
+	}
+
+	/* It is a fake inode. */
+	NInoSetAttr(ni);
+
+	/*
+	 * We have I30 global constant as an optimization as it is the name
+	 * in >99.9% of named attributes! The other <0.1% incur a GFP_ATOMIC
+	 * allocation but that is ok. And most attributes are unnamed anyway,
+	 * thus the fraction of named attributes with name != I30 is actually
+	 * absolutely tiny.
+	 */
+	if (na->name_len && na->name != I30) {
+		unsigned int i;
+
+		BUG_ON(!na->name);
+		i = na->name_len * sizeof(ntfschar);
+		ni->name = (ntfschar*)kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
+		if (!ni->name)
+			return -ENOMEM;
+		memcpy(ni->name, na->name, i);
+		ni->name[i] = 0;
+	}
+	return 0;
+}
+
+typedef int (*set_t)(struct inode *, void *);
+static int ntfs_read_locked_inode(struct inode *vi);
+static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi);
+static int ntfs_read_locked_index_inode(struct inode *base_vi,
+		struct inode *vi);
+
+/**
+ * ntfs_iget - obtain a struct inode corresponding to a specific normal inode
+ * @sb:		super block of mounted volume
+ * @mft_no:	mft record number / inode number to obtain
+ *
+ * Obtain the struct inode corresponding to a specific normal inode (i.e. a
+ * file or directory).
+ *
+ * If the inode is in the cache, it is just returned with an increased
+ * reference count. Otherwise, a new struct inode is allocated and initialized,
+ * and finally ntfs_read_locked_inode() is called to read in the inode and
+ * fill in the remainder of the inode structure.
+ *
+ * Return the struct inode on success. Check the return value with IS_ERR() and
+ * if true, the function failed and the error code is obtained from PTR_ERR().
+ */
+struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no)
+{
+	struct inode *vi;
+	ntfs_attr na;
+	int err;
+
+	na.mft_no = mft_no;
+	na.type = AT_UNUSED;
+	na.name = NULL;
+	na.name_len = 0;
+
+	vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode,
+			(set_t)ntfs_init_locked_inode, &na);
+	if (!vi)
+		return ERR_PTR(-ENOMEM);
+
+	err = 0;
+
+	/* If this is a freshly allocated inode, need to read it now. */
+	if (vi->i_state & I_NEW) {
+		err = ntfs_read_locked_inode(vi);
+		unlock_new_inode(vi);
+	}
+	/*
+	 * There is no point in keeping bad inodes around if the failure was
+	 * due to ENOMEM. We want to be able to retry again later.
+	 */
+	if (err == -ENOMEM) {
+		iput(vi);
+		vi = ERR_PTR(err);
+	}
+	return vi;
+}
+
+/**
+ * ntfs_attr_iget - obtain a struct inode corresponding to an attribute
+ * @base_vi:	vfs base inode containing the attribute
+ * @type:	attribute type
+ * @name:	Unicode name of the attribute (NULL if unnamed)
+ * @name_len:	length of @name in Unicode characters (0 if unnamed)
+ *
+ * Obtain the (fake) struct inode corresponding to the attribute specified by
+ * @type, @name, and @name_len, which is present in the base mft record
+ * specified by the vfs inode @base_vi.
+ *
+ * If the attribute inode is in the cache, it is just returned with an
+ * increased reference count. Otherwise, a new struct inode is allocated and
+ * initialized, and finally ntfs_read_locked_attr_inode() is called to read the
+ * attribute and fill in the inode structure.
+ *
+ * Note, for index allocation attributes, you need to use ntfs_index_iget()
+ * instead of ntfs_attr_iget() as working with indices is a lot more complex.
+ *
+ * Return the struct inode of the attribute inode on success. Check the return
+ * value with IS_ERR() and if true, the function failed and the error code is
+ * obtained from PTR_ERR().
+ */
+struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
+		ntfschar *name, u32 name_len)
+{
+	struct inode *vi;
+	ntfs_attr na;
+	int err;
+
+	/* Make sure no one calls ntfs_attr_iget() for indices. */
+	BUG_ON(type == AT_INDEX_ALLOCATION);
+
+	na.mft_no = base_vi->i_ino;
+	na.type = type;
+	na.name = name;
+	na.name_len = name_len;
+
+	vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
+			(set_t)ntfs_init_locked_inode, &na);
+	if (!vi)
+		return ERR_PTR(-ENOMEM);
+
+	err = 0;
+
+	/* If this is a freshly allocated inode, need to read it now. */
+	if (vi->i_state & I_NEW) {
+		err = ntfs_read_locked_attr_inode(base_vi, vi);
+		unlock_new_inode(vi);
+	}
+	/*
+	 * There is no point in keeping bad attribute inodes around. This also
+	 * simplifies things in that we never need to check for bad attribute
+	 * inodes elsewhere.
+	 */
+	if (err) {
+		iput(vi);
+		vi = ERR_PTR(err);
+	}
+	return vi;
+}
+
+/**
+ * ntfs_index_iget - obtain a struct inode corresponding to an index
+ * @base_vi:	vfs base inode containing the index related attributes
+ * @name:	Unicode name of the index
+ * @name_len:	length of @name in Unicode characters
+ *
+ * Obtain the (fake) struct inode corresponding to the index specified by @name
+ * and @name_len, which is present in the base mft record specified by the vfs
+ * inode @base_vi.
+ *
+ * If the index inode is in the cache, it is just returned with an increased
+ * reference count.  Otherwise, a new struct inode is allocated and
+ * initialized, and finally ntfs_read_locked_index_inode() is called to read
+ * the index related attributes and fill in the inode structure.
+ *
+ * Return the struct inode of the index inode on success. Check the return
+ * value with IS_ERR() and if true, the function failed and the error code is
+ * obtained from PTR_ERR().
+ */
+struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
+		u32 name_len)
+{
+	struct inode *vi;
+	ntfs_attr na;
+	int err;
+
+	na.mft_no = base_vi->i_ino;
+	na.type = AT_INDEX_ALLOCATION;
+	na.name = name;
+	na.name_len = name_len;
+
+	vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
+			(set_t)ntfs_init_locked_inode, &na);
+	if (!vi)
+		return ERR_PTR(-ENOMEM);
+
+	err = 0;
+
+	/* If this is a freshly allocated inode, need to read it now. */
+	if (vi->i_state & I_NEW) {
+		err = ntfs_read_locked_index_inode(base_vi, vi);
+		unlock_new_inode(vi);
+	}
+	/*
+	 * There is no point in keeping bad index inodes around.  This also
+	 * simplifies things in that we never need to check for bad index
+	 * inodes elsewhere.
+	 */
+	if (err) {
+		iput(vi);
+		vi = ERR_PTR(err);
+	}
+	return vi;
+}
+
+struct inode *ntfs_alloc_big_inode(struct super_block *sb)
+{
+	ntfs_inode *ni;
+
+	ntfs_debug("Entering.");
+	ni = (ntfs_inode *)kmem_cache_alloc(ntfs_big_inode_cache,
+			SLAB_NOFS);
+	if (likely(ni != NULL)) {
+		ni->state = 0;
+		return VFS_I(ni);
+	}
+	ntfs_error(sb, "Allocation of NTFS big inode structure failed.");
+	return NULL;
+}
+
+void ntfs_destroy_big_inode(struct inode *inode)
+{
+	ntfs_inode *ni = NTFS_I(inode);
+
+	ntfs_debug("Entering.");
+	BUG_ON(ni->page);
+	if (!atomic_dec_and_test(&ni->count))
+		BUG();
+	kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
+}
+
+static inline ntfs_inode *ntfs_alloc_extent_inode(void)
+{
+	ntfs_inode *ni;
+
+	ntfs_debug("Entering.");
+	ni = (ntfs_inode *)kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS);
+	if (likely(ni != NULL)) {
+		ni->state = 0;
+		return ni;
+	}
+	ntfs_error(NULL, "Allocation of NTFS inode structure failed.");
+	return NULL;
+}
+
+static void ntfs_destroy_extent_inode(ntfs_inode *ni)
+{
+	ntfs_debug("Entering.");
+	BUG_ON(ni->page);
+	if (!atomic_dec_and_test(&ni->count))
+		BUG();
+	kmem_cache_free(ntfs_inode_cache, ni);
+}
+
+/**
+ * __ntfs_init_inode - initialize ntfs specific part of an inode
+ * @sb:		super block of mounted volume
+ * @ni:		freshly allocated ntfs inode which to initialize
+ *
+ * Initialize an ntfs inode to defaults.
+ *
+ * NOTE: ni->mft_no, ni->state, ni->type, ni->name, and ni->name_len are left
+ * untouched. Make sure to initialize them elsewhere.
+ *
+ * Return zero on success and -ENOMEM on error.
+ */
+void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
+{
+	ntfs_debug("Entering.");
+	ni->initialized_size = ni->allocated_size = 0;
+	ni->seq_no = 0;
+	atomic_set(&ni->count, 1);
+	ni->vol = NTFS_SB(sb);
+	ntfs_init_runlist(&ni->runlist);
+	init_MUTEX(&ni->mrec_lock);
+	ni->page = NULL;
+	ni->page_ofs = 0;
+	ni->attr_list_size = 0;
+	ni->attr_list = NULL;
+	ntfs_init_runlist(&ni->attr_list_rl);
+	ni->itype.index.bmp_ino = NULL;
+	ni->itype.index.block_size = 0;
+	ni->itype.index.vcn_size = 0;
+	ni->itype.index.collation_rule = 0;
+	ni->itype.index.block_size_bits = 0;
+	ni->itype.index.vcn_size_bits = 0;
+	init_MUTEX(&ni->extent_lock);
+	ni->nr_extents = 0;
+	ni->ext.base_ntfs_ino = NULL;
+}
+
+inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
+		unsigned long mft_no)
+{
+	ntfs_inode *ni = ntfs_alloc_extent_inode();
+
+	ntfs_debug("Entering.");
+	if (likely(ni != NULL)) {
+		__ntfs_init_inode(sb, ni);
+		ni->mft_no = mft_no;
+		ni->type = AT_UNUSED;
+		ni->name = NULL;
+		ni->name_len = 0;
+	}
+	return ni;
+}
+
+/**
+ * ntfs_is_extended_system_file - check if a file is in the $Extend directory
+ * @ctx:	initialized attribute search context
+ *
+ * Search all file name attributes in the inode described by the attribute
+ * search context @ctx and check if any of the names are in the $Extend system
+ * directory.
+ *
+ * Return values:
+ *	   1: file is in $Extend directory
+ *	   0: file is not in $Extend directory
+ *    -errno: failed to determine if the file is in the $Extend directory
+ */
+static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx)
+{
+	int nr_links, err;
+
+	/* Restart search. */
+	ntfs_attr_reinit_search_ctx(ctx);
+
+	/* Get number of hard links. */
+	nr_links = le16_to_cpu(ctx->mrec->link_count);
+
+	/* Loop through all hard links. */
+	while (!(err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0,
+			ctx))) {
+		FILE_NAME_ATTR *file_name_attr;
+		ATTR_RECORD *attr = ctx->attr;
+		u8 *p, *p2;
+
+		nr_links--;
+		/*
+		 * Maximum sanity checking as we are called on an inode that
+		 * we suspect might be corrupt.
+		 */
+		p = (u8*)attr + le32_to_cpu(attr->length);
+		if (p < (u8*)ctx->mrec || (u8*)p > (u8*)ctx->mrec +
+				le32_to_cpu(ctx->mrec->bytes_in_use)) {
+err_corrupt_attr:
+			ntfs_error(ctx->ntfs_ino->vol->sb, "Corrupt file name "
+					"attribute. You should run chkdsk.");
+			return -EIO;
+		}
+		if (attr->non_resident) {
+			ntfs_error(ctx->ntfs_ino->vol->sb, "Non-resident file "
+					"name. You should run chkdsk.");
+			return -EIO;
+		}
+		if (attr->flags) {
+			ntfs_error(ctx->ntfs_ino->vol->sb, "File name with "
+					"invalid flags. You should run "
+					"chkdsk.");
+			return -EIO;
+		}
+		if (!(attr->data.resident.flags & RESIDENT_ATTR_IS_INDEXED)) {
+			ntfs_error(ctx->ntfs_ino->vol->sb, "Unindexed file "
+					"name. You should run chkdsk.");
+			return -EIO;
+		}
+		file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
+				le16_to_cpu(attr->data.resident.value_offset));
+		p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
+		if (p2 < (u8*)attr || p2 > p)
+			goto err_corrupt_attr;
+		/* This attribute is ok, but is it in the $Extend directory? */
+		if (MREF_LE(file_name_attr->parent_directory) == FILE_Extend)
+			return 1;	/* YES, it's an extended system file. */
+	}
+	if (unlikely(err != -ENOENT))
+		return err;
+	if (unlikely(nr_links)) {
+		ntfs_error(ctx->ntfs_ino->vol->sb, "Inode hard link count "
+				"doesn't match number of name attributes. You "
+				"should run chkdsk.");
+		return -EIO;
+	}
+	return 0;	/* NO, it is not an extended system file. */
+}
+
+/**
+ * ntfs_read_locked_inode - read an inode from its device
+ * @vi:		inode to read
+ *
+ * ntfs_read_locked_inode() is called from ntfs_iget() to read the inode
+ * described by @vi into memory from the device.
+ *
+ * The only fields in @vi that we need to/can look at when the function is
+ * called are i_sb, pointing to the mounted device's super block, and i_ino,
+ * the number of the inode to load.
+ *
+ * ntfs_read_locked_inode() maps, pins and locks the mft record number i_ino
+ * for reading and sets up the necessary @vi fields as well as initializing
+ * the ntfs inode.
+ *
+ * Q: What locks are held when the function is called?
+ * A: i_state has I_LOCK set, hence the inode is locked, also
+ *    i_count is set to 1, so it is not going to go away
+ *    i_flags is set to 0 and we have no business touching it.  Only an ioctl()
+ *    is allowed to write to them. We should of course be honouring them but
+ *    we need to do that using the IS_* macros defined in include/linux/fs.h.
+ *    In any case ntfs_read_locked_inode() has nothing to do with i_flags.
+ *
+ * Return 0 on success and -errno on error.  In the error case, the inode will
+ * have had make_bad_inode() executed on it.
+ */
+static int ntfs_read_locked_inode(struct inode *vi)
+{
+	ntfs_volume *vol = NTFS_SB(vi->i_sb);
+	ntfs_inode *ni;
+	MFT_RECORD *m;
+	STANDARD_INFORMATION *si;
+	ntfs_attr_search_ctx *ctx;
+	int err = 0;
+
+	ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
+
+	/* Setup the generic vfs inode parts now. */
+
+	/* This is the optimal IO size (for stat), not the fs block size. */
+	vi->i_blksize = PAGE_CACHE_SIZE;
+	/*
+	 * This is for checking whether an inode has changed w.r.t. a file so
+	 * that the file can be updated if necessary (compare with f_version).
+	 */
+	vi->i_version = 1;
+
+	vi->i_uid = vol->uid;
+	vi->i_gid = vol->gid;
+	vi->i_mode = 0;
+
+	/*
+	 * Initialize the ntfs specific part of @vi special casing
+	 * FILE_MFT which we need to do at mount time.
+	 */
+	if (vi->i_ino != FILE_MFT)
+		ntfs_init_big_inode(vi);
+	ni = NTFS_I(vi);
+
+	m = map_mft_record(ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto unm_err_out;
+	}
+
+	if (!(m->flags & MFT_RECORD_IN_USE)) {
+		ntfs_error(vi->i_sb, "Inode is not in use!");
+		goto unm_err_out;
+	}
+	if (m->base_mft_record) {
+		ntfs_error(vi->i_sb, "Inode is an extent inode!");
+		goto unm_err_out;
+	}
+
+	/* Transfer information from mft record into vfs and ntfs inodes. */
+	vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
+
+	/*
+	 * FIXME: Keep in mind that link_count is two for files which have both
+	 * a long file name and a short file name as separate entries, so if
+	 * we are hiding short file names this will be too high. Either we need
+	 * to account for the short file names by subtracting them or we need
+	 * to make sure we delete files even though i_nlink is not zero which
+	 * might be tricky due to vfs interactions. Need to think about this
+	 * some more when implementing the unlink command.
+	 */
+	vi->i_nlink = le16_to_cpu(m->link_count);
+	/*
+	 * FIXME: Reparse points can have the directory bit set even though
+	 * they would be S_IFLNK. Need to deal with this further below when we
+	 * implement reparse points / symbolic links but it will do for now.
+	 * Also if not a directory, it could be something else, rather than
+	 * a regular file. But again, will do for now.
+	 */
+	/* Everyone gets all permissions. */
+	vi->i_mode |= S_IRWXUGO;
+	/* If read-only, noone gets write permissions. */
+	if (IS_RDONLY(vi))
+		vi->i_mode &= ~S_IWUGO;
+	if (m->flags & MFT_RECORD_IS_DIRECTORY) {
+		vi->i_mode |= S_IFDIR;
+		/*
+		 * Apply the directory permissions mask set in the mount
+		 * options.
+		 */
+		vi->i_mode &= ~vol->dmask;
+		/* Things break without this kludge! */
+		if (vi->i_nlink > 1)
+			vi->i_nlink = 1;
+	} else {
+		vi->i_mode |= S_IFREG;
+		/* Apply the file permissions mask set in the mount options. */
+		vi->i_mode &= ~vol->fmask;
+	}
+	/*
+	 * Find the standard information attribute in the mft record. At this
+	 * stage we haven't setup the attribute list stuff yet, so this could
+	 * in fact fail if the standard information is in an extent record, but
+	 * I don't think this actually ever happens.
+	 */
+	err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0, 0, 0, NULL, 0,
+			ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT) {
+			/*
+			 * TODO: We should be performing a hot fix here (if the
+			 * recover mount option is set) by creating a new
+			 * attribute.
+			 */
+			ntfs_error(vi->i_sb, "$STANDARD_INFORMATION attribute "
+					"is missing.");
+		}
+		goto unm_err_out;
+	}
+	/* Get the standard information attribute value. */
+	si = (STANDARD_INFORMATION*)((char*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+
+	/* Transfer information from the standard information into vi. */
+	/*
+	 * Note: The i_?times do not quite map perfectly onto the NTFS times,
+	 * but they are close enough, and in the end it doesn't really matter
+	 * that much...
+	 */
+	/*
+	 * mtime is the last change of the data within the file. Not changed
+	 * when only metadata is changed, e.g. a rename doesn't affect mtime.
+	 */
+	vi->i_mtime = ntfs2utc(si->last_data_change_time);
+	/*
+	 * ctime is the last change of the metadata of the file. This obviously
+	 * always changes, when mtime is changed. ctime can be changed on its
+	 * own, mtime is then not changed, e.g. when a file is renamed.
+	 */
+	vi->i_ctime = ntfs2utc(si->last_mft_change_time);
+	/*
+	 * Last access to the data within the file. Not changed during a rename
+	 * for example but changed whenever the file is written to.
+	 */
+	vi->i_atime = ntfs2utc(si->last_access_time);
+
+	/* Find the attribute list attribute if present. */
+	ntfs_attr_reinit_search_ctx(ctx);
+	err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
+	if (err) {
+		if (unlikely(err != -ENOENT)) {
+			ntfs_error(vi->i_sb, "Failed to lookup attribute list "
+					"attribute.");
+			goto unm_err_out;
+		}
+	} else /* if (!err) */ {
+		if (vi->i_ino == FILE_MFT)
+			goto skip_attr_list_load;
+		ntfs_debug("Attribute list found in inode 0x%lx.", vi->i_ino);
+		NInoSetAttrList(ni);
+		if (ctx->attr->flags & ATTR_IS_ENCRYPTED ||
+				ctx->attr->flags & ATTR_COMPRESSION_MASK ||
+				ctx->attr->flags & ATTR_IS_SPARSE) {
+			ntfs_error(vi->i_sb, "Attribute list attribute is "
+					"compressed/encrypted/sparse.");
+			goto unm_err_out;
+		}
+		/* Now allocate memory for the attribute list. */
+		ni->attr_list_size = (u32)ntfs_attr_size(ctx->attr);
+		ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
+		if (!ni->attr_list) {
+			ntfs_error(vi->i_sb, "Not enough memory to allocate "
+					"buffer for attribute list.");
+			err = -ENOMEM;
+			goto unm_err_out;
+		}
+		if (ctx->attr->non_resident) {
+			NInoSetAttrListNonResident(ni);
+			if (ctx->attr->data.non_resident.lowest_vcn) {
+				ntfs_error(vi->i_sb, "Attribute list has non "
+						"zero lowest_vcn.");
+				goto unm_err_out;
+			}
+			/*
+			 * Setup the runlist. No need for locking as we have
+			 * exclusive access to the inode at this time.
+			 */
+			ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
+					ctx->attr, NULL);
+			if (IS_ERR(ni->attr_list_rl.rl)) {
+				err = PTR_ERR(ni->attr_list_rl.rl);
+				ni->attr_list_rl.rl = NULL;
+				ntfs_error(vi->i_sb, "Mapping pairs "
+						"decompression failed.");
+				goto unm_err_out;
+			}
+			/* Now load the attribute list. */
+			if ((err = load_attribute_list(vol, &ni->attr_list_rl,
+					ni->attr_list, ni->attr_list_size,
+					sle64_to_cpu(ctx->attr->data.
+					non_resident.initialized_size)))) {
+				ntfs_error(vi->i_sb, "Failed to load "
+						"attribute list attribute.");
+				goto unm_err_out;
+			}
+		} else /* if (!ctx.attr->non_resident) */ {
+			if ((u8*)ctx->attr + le16_to_cpu(
+					ctx->attr->data.resident.value_offset) +
+					le32_to_cpu(
+					ctx->attr->data.resident.value_length) >
+					(u8*)ctx->mrec + vol->mft_record_size) {
+				ntfs_error(vi->i_sb, "Corrupt attribute list "
+						"in inode.");
+				goto unm_err_out;
+			}
+			/* Now copy the attribute list. */
+			memcpy(ni->attr_list, (u8*)ctx->attr + le16_to_cpu(
+					ctx->attr->data.resident.value_offset),
+					le32_to_cpu(
+					ctx->attr->data.resident.value_length));
+		}
+	}
+skip_attr_list_load:
+	/*
+	 * If an attribute list is present we now have the attribute list value
+	 * in ntfs_ino->attr_list and it is ntfs_ino->attr_list_size bytes.
+	 */
+	if (S_ISDIR(vi->i_mode)) {
+		struct inode *bvi;
+		ntfs_inode *bni;
+		INDEX_ROOT *ir;
+		char *ir_end, *index_end;
+
+		/* It is a directory, find index root attribute. */
+		ntfs_attr_reinit_search_ctx(ctx);
+		err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE,
+				0, NULL, 0, ctx);
+		if (unlikely(err)) {
+			if (err == -ENOENT) {
+				// FIXME: File is corrupt! Hot-fix with empty
+				// index root attribute if recovery option is
+				// set.
+				ntfs_error(vi->i_sb, "$INDEX_ROOT attribute "
+						"is missing.");
+			}
+			goto unm_err_out;
+		}
+		/* Set up the state. */
+		if (unlikely(ctx->attr->non_resident)) {
+			ntfs_error(vol->sb, "$INDEX_ROOT attribute is not "
+					"resident.");
+			goto unm_err_out;
+		}
+		/* Ensure the attribute name is placed before the value. */
+		if (unlikely(ctx->attr->name_length &&
+				(le16_to_cpu(ctx->attr->name_offset) >=
+				le16_to_cpu(ctx->attr->data.resident.
+				value_offset)))) {
+			ntfs_error(vol->sb, "$INDEX_ROOT attribute name is "
+					"placed after the attribute value.");
+			goto unm_err_out;
+		}
+		/*
+		 * Compressed/encrypted index root just means that the newly
+		 * created files in that directory should be created compressed/
+		 * encrypted. However index root cannot be both compressed and
+		 * encrypted.
+		 */
+		if (ctx->attr->flags & ATTR_COMPRESSION_MASK)
+			NInoSetCompressed(ni);
+		if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
+			if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+				ntfs_error(vi->i_sb, "Found encrypted and "
+						"compressed attribute.");
+				goto unm_err_out;
+			}
+			NInoSetEncrypted(ni);
+		}
+		if (ctx->attr->flags & ATTR_IS_SPARSE)
+			NInoSetSparse(ni);
+		ir = (INDEX_ROOT*)((char*)ctx->attr + le16_to_cpu(
+				ctx->attr->data.resident.value_offset));
+		ir_end = (char*)ir + le32_to_cpu(
+				ctx->attr->data.resident.value_length);
+		if (ir_end > (char*)ctx->mrec + vol->mft_record_size) {
+			ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
+					"corrupt.");
+			goto unm_err_out;
+		}
+		index_end = (char*)&ir->index +
+				le32_to_cpu(ir->index.index_length);
+		if (index_end > ir_end) {
+			ntfs_error(vi->i_sb, "Directory index is corrupt.");
+			goto unm_err_out;
+		}
+		if (ir->type != AT_FILE_NAME) {
+			ntfs_error(vi->i_sb, "Indexed attribute is not "
+					"$FILE_NAME.");
+			goto unm_err_out;
+		}
+		if (ir->collation_rule != COLLATION_FILE_NAME) {
+			ntfs_error(vi->i_sb, "Index collation rule is not "
+					"COLLATION_FILE_NAME.");
+			goto unm_err_out;
+		}
+		ni->itype.index.collation_rule = ir->collation_rule;
+		ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
+		if (ni->itype.index.block_size &
+				(ni->itype.index.block_size - 1)) {
+			ntfs_error(vi->i_sb, "Index block size (%u) is not a "
+					"power of two.",
+					ni->itype.index.block_size);
+			goto unm_err_out;
+		}
+		if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
+			ntfs_error(vi->i_sb, "Index block size (%u) > "
+					"PAGE_CACHE_SIZE (%ld) is not "
+					"supported.  Sorry.",
+					ni->itype.index.block_size,
+					PAGE_CACHE_SIZE);
+			err = -EOPNOTSUPP;
+			goto unm_err_out;
+		}
+		if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
+			ntfs_error(vi->i_sb, "Index block size (%u) < "
+					"NTFS_BLOCK_SIZE (%i) is not "
+					"supported.  Sorry.",
+					ni->itype.index.block_size,
+					NTFS_BLOCK_SIZE);
+			err = -EOPNOTSUPP;
+			goto unm_err_out;
+		}
+		ni->itype.index.block_size_bits =
+				ffs(ni->itype.index.block_size) - 1;
+		/* Determine the size of a vcn in the directory index. */
+		if (vol->cluster_size <= ni->itype.index.block_size) {
+			ni->itype.index.vcn_size = vol->cluster_size;
+			ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
+		} else {
+			ni->itype.index.vcn_size = vol->sector_size;
+			ni->itype.index.vcn_size_bits = vol->sector_size_bits;
+		}
+
+		/* Setup the index allocation attribute, even if not present. */
+		NInoSetMstProtected(ni);
+		ni->type = AT_INDEX_ALLOCATION;
+		ni->name = I30;
+		ni->name_len = 4;
+
+		if (!(ir->index.flags & LARGE_INDEX)) {
+			/* No index allocation. */
+			vi->i_size = ni->initialized_size =
+					ni->allocated_size = 0;
+			/* We are done with the mft record, so we release it. */
+			ntfs_attr_put_search_ctx(ctx);
+			unmap_mft_record(ni);
+			m = NULL;
+			ctx = NULL;
+			goto skip_large_dir_stuff;
+		} /* LARGE_INDEX: Index allocation present. Setup state. */
+		NInoSetIndexAllocPresent(ni);
+		/* Find index allocation attribute. */
+		ntfs_attr_reinit_search_ctx(ctx);
+		err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, I30, 4,
+				CASE_SENSITIVE, 0, NULL, 0, ctx);
+		if (unlikely(err)) {
+			if (err == -ENOENT)
+				ntfs_error(vi->i_sb, "$INDEX_ALLOCATION "
+						"attribute is not present but "
+						"$INDEX_ROOT indicated it is.");
+			else
+				ntfs_error(vi->i_sb, "Failed to lookup "
+						"$INDEX_ALLOCATION "
+						"attribute.");
+			goto unm_err_out;
+		}
+		if (!ctx->attr->non_resident) {
+			ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
+					"is resident.");
+			goto unm_err_out;
+		}
+		/*
+		 * Ensure the attribute name is placed before the mapping pairs
+		 * array.
+		 */
+		if (unlikely(ctx->attr->name_length &&
+				(le16_to_cpu(ctx->attr->name_offset) >=
+				le16_to_cpu(ctx->attr->data.non_resident.
+				mapping_pairs_offset)))) {
+			ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name "
+					"is placed after the mapping pairs "
+					"array.");
+			goto unm_err_out;
+		}
+		if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
+			ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
+					"is encrypted.");
+			goto unm_err_out;
+		}
+		if (ctx->attr->flags & ATTR_IS_SPARSE) {
+			ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
+					"is sparse.");
+			goto unm_err_out;
+		}
+		if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+			ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
+					"is compressed.");
+			goto unm_err_out;
+		}
+		if (ctx->attr->data.non_resident.lowest_vcn) {
+			ntfs_error(vi->i_sb, "First extent of "
+					"$INDEX_ALLOCATION attribute has non "
+					"zero lowest_vcn.");
+			goto unm_err_out;
+		}
+		vi->i_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.data_size);
+		ni->initialized_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.initialized_size);
+		ni->allocated_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.allocated_size);
+		/*
+		 * We are done with the mft record, so we release it. Otherwise
+		 * we would deadlock in ntfs_attr_iget().
+		 */
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(ni);
+		m = NULL;
+		ctx = NULL;
+		/* Get the index bitmap attribute inode. */
+		bvi = ntfs_attr_iget(vi, AT_BITMAP, I30, 4);
+		if (IS_ERR(bvi)) {
+			ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
+			err = PTR_ERR(bvi);
+			goto unm_err_out;
+		}
+		ni->itype.index.bmp_ino = bvi;
+		bni = NTFS_I(bvi);
+		if (NInoCompressed(bni) || NInoEncrypted(bni) ||
+				NInoSparse(bni)) {
+			ntfs_error(vi->i_sb, "$BITMAP attribute is compressed "
+					"and/or encrypted and/or sparse.");
+			goto unm_err_out;
+		}
+		/* Consistency check bitmap size vs. index allocation size. */
+		if ((bvi->i_size << 3) < (vi->i_size >>
+				ni->itype.index.block_size_bits)) {
+			ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) "
+					"for index allocation (0x%llx).",
+					bvi->i_size << 3, vi->i_size);
+			goto unm_err_out;
+		}
+skip_large_dir_stuff:
+		/* Setup the operations for this inode. */
+		vi->i_op = &ntfs_dir_inode_ops;
+		vi->i_fop = &ntfs_dir_ops;
+	} else {
+		/* It is a file. */
+		ntfs_attr_reinit_search_ctx(ctx);
+
+		/* Setup the data attribute, even if not present. */
+		ni->type = AT_DATA;
+		ni->name = NULL;
+		ni->name_len = 0;
+
+		/* Find first extent of the unnamed data attribute. */
+		err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx);
+		if (unlikely(err)) {
+			vi->i_size = ni->initialized_size =
+					ni->allocated_size = 0;
+			if (err != -ENOENT) {
+				ntfs_error(vi->i_sb, "Failed to lookup $DATA "
+						"attribute.");
+				goto unm_err_out;
+			}
+			/*
+			 * FILE_Secure does not have an unnamed $DATA
+			 * attribute, so we special case it here.
+			 */
+			if (vi->i_ino == FILE_Secure)
+				goto no_data_attr_special_case;
+			/*
+			 * Most if not all the system files in the $Extend
+			 * system directory do not have unnamed data
+			 * attributes so we need to check if the parent
+			 * directory of the file is FILE_Extend and if it is
+			 * ignore this error. To do this we need to get the
+			 * name of this inode from the mft record as the name
+			 * contains the back reference to the parent directory.
+			 */
+			if (ntfs_is_extended_system_file(ctx) > 0)
+				goto no_data_attr_special_case;
+			// FIXME: File is corrupt! Hot-fix with empty data
+			// attribute if recovery option is set.
+			ntfs_error(vi->i_sb, "$DATA attribute is missing.");
+			goto unm_err_out;
+		}
+		/* Setup the state. */
+		if (ctx->attr->non_resident) {
+			NInoSetNonResident(ni);
+			if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+				NInoSetCompressed(ni);
+				if (vol->cluster_size > 4096) {
+					ntfs_error(vi->i_sb, "Found "
+						"compressed data but "
+						"compression is disabled due "
+						"to cluster size (%i) > 4kiB.",
+						vol->cluster_size);
+					goto unm_err_out;
+				}
+				if ((ctx->attr->flags & ATTR_COMPRESSION_MASK)
+						!= ATTR_IS_COMPRESSED) {
+					ntfs_error(vi->i_sb, "Found "
+						"unknown compression method or "
+						"corrupt file.");
+					goto unm_err_out;
+				}
+				ni->itype.compressed.block_clusters = 1U <<
+						ctx->attr->data.non_resident.
+						compression_unit;
+				if (ctx->attr->data.non_resident.
+						compression_unit != 4) {
+					ntfs_error(vi->i_sb, "Found "
+						"nonstandard compression unit "
+						"(%u instead of 4).  Cannot "
+						"handle this.",
+						ctx->attr->data.non_resident.
+						compression_unit);
+					err = -EOPNOTSUPP;
+					goto unm_err_out;
+				}
+				ni->itype.compressed.block_size = 1U << (
+						ctx->attr->data.non_resident.
+						compression_unit +
+						vol->cluster_size_bits);
+				ni->itype.compressed.block_size_bits = ffs(
+					ni->itype.compressed.block_size) - 1;
+			}
+			if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
+				if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+					ntfs_error(vi->i_sb, "Found encrypted "
+							"and compressed data.");
+					goto unm_err_out;
+				}
+				NInoSetEncrypted(ni);
+			}
+			if (ctx->attr->flags & ATTR_IS_SPARSE)
+				NInoSetSparse(ni);
+			if (ctx->attr->data.non_resident.lowest_vcn) {
+				ntfs_error(vi->i_sb, "First extent of $DATA "
+						"attribute has non zero "
+						"lowest_vcn.");
+				goto unm_err_out;
+			}
+			/* Setup all the sizes. */
+			vi->i_size = sle64_to_cpu(
+					ctx->attr->data.non_resident.data_size);
+			ni->initialized_size = sle64_to_cpu(
+					ctx->attr->data.non_resident.
+					initialized_size);
+			ni->allocated_size = sle64_to_cpu(
+					ctx->attr->data.non_resident.
+					allocated_size);
+			if (NInoCompressed(ni)) {
+				ni->itype.compressed.size = sle64_to_cpu(
+						ctx->attr->data.non_resident.
+						compressed_size);
+			}
+		} else { /* Resident attribute. */
+			/*
+			 * Make all sizes equal for simplicity in read code
+			 * paths. FIXME: Need to keep this in mind when
+			 * converting to non-resident attribute in write code
+			 * path. (Probably only affects truncate().)
+			 */
+			vi->i_size = ni->initialized_size = ni->allocated_size =
+					le32_to_cpu(
+					ctx->attr->data.resident.value_length);
+		}
+no_data_attr_special_case:
+		/* We are done with the mft record, so we release it. */
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(ni);
+		m = NULL;
+		ctx = NULL;
+		/* Setup the operations for this inode. */
+		vi->i_op = &ntfs_file_inode_ops;
+		vi->i_fop = &ntfs_file_ops;
+	}
+	if (NInoMstProtected(ni))
+		vi->i_mapping->a_ops = &ntfs_mst_aops;
+	else
+		vi->i_mapping->a_ops = &ntfs_aops;
+	/*
+	 * The number of 512-byte blocks used on disk (for stat). This is in so
+	 * far inaccurate as it doesn't account for any named streams or other
+	 * special non-resident attributes, but that is how Windows works, too,
+	 * so we are at least consistent with Windows, if not entirely
+	 * consistent with the Linux Way. Doing it the Linux Way would cause a
+	 * significant slowdown as it would involve iterating over all
+	 * attributes in the mft record and adding the allocated/compressed
+	 * sizes of all non-resident attributes present to give us the Linux
+	 * correct size that should go into i_blocks (after division by 512).
+	 */
+	if (S_ISDIR(vi->i_mode) || !NInoCompressed(ni))
+		vi->i_blocks = ni->allocated_size >> 9;
+	else
+		vi->i_blocks = ni->itype.compressed.size >> 9;
+
+	ntfs_debug("Done.");
+	return 0;
+
+unm_err_out:
+	if (!err)
+		err = -EIO;
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(ni);
+err_out:
+	ntfs_error(vol->sb, "Failed with error code %i.  Marking corrupt "
+			"inode 0x%lx as bad.  Run chkdsk.", err, vi->i_ino);
+	make_bad_inode(vi);
+	if (err != -EOPNOTSUPP && err != -ENOMEM)
+		NVolSetErrors(vol);
+	return err;
+}
+
+/**
+ * ntfs_read_locked_attr_inode - read an attribute inode from its base inode
+ * @base_vi:	base inode
+ * @vi:		attribute inode to read
+ *
+ * ntfs_read_locked_attr_inode() is called from ntfs_attr_iget() to read the
+ * attribute inode described by @vi into memory from the base mft record
+ * described by @base_ni.
+ *
+ * ntfs_read_locked_attr_inode() maps, pins and locks the base inode for
+ * reading and looks up the attribute described by @vi before setting up the
+ * necessary fields in @vi as well as initializing the ntfs inode.
+ *
+ * Q: What locks are held when the function is called?
+ * A: i_state has I_LOCK set, hence the inode is locked, also
+ *    i_count is set to 1, so it is not going to go away
+ *
+ * Return 0 on success and -errno on error.  In the error case, the inode will
+ * have had make_bad_inode() executed on it.
+ */
+static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
+{
+	ntfs_volume *vol = NTFS_SB(vi->i_sb);
+	ntfs_inode *ni, *base_ni;
+	MFT_RECORD *m;
+	ntfs_attr_search_ctx *ctx;
+	int err = 0;
+
+	ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
+
+	ntfs_init_big_inode(vi);
+
+	ni	= NTFS_I(vi);
+	base_ni = NTFS_I(base_vi);
+
+	/* Just mirror the values from the base inode. */
+	vi->i_blksize	= base_vi->i_blksize;
+	vi->i_version	= base_vi->i_version;
+	vi->i_uid	= base_vi->i_uid;
+	vi->i_gid	= base_vi->i_gid;
+	vi->i_nlink	= base_vi->i_nlink;
+	vi->i_mtime	= base_vi->i_mtime;
+	vi->i_ctime	= base_vi->i_ctime;
+	vi->i_atime	= base_vi->i_atime;
+	vi->i_generation = ni->seq_no = base_ni->seq_no;
+
+	/* Set inode type to zero but preserve permissions. */
+	vi->i_mode	= base_vi->i_mode & ~S_IFMT;
+
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto unm_err_out;
+	}
+
+	/* Find the attribute. */
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err))
+		goto unm_err_out;
+
+	if (!ctx->attr->non_resident) {
+		/* Ensure the attribute name is placed before the value. */
+		if (unlikely(ctx->attr->name_length &&
+				(le16_to_cpu(ctx->attr->name_offset) >=
+				le16_to_cpu(ctx->attr->data.resident.
+				value_offset)))) {
+			ntfs_error(vol->sb, "Attribute name is placed after "
+					"the attribute value.");
+			goto unm_err_out;
+		}
+		if (NInoMstProtected(ni) || ctx->attr->flags) {
+			ntfs_error(vi->i_sb, "Found mst protected attribute "
+					"or attribute with non-zero flags but "
+					"the attribute is resident.  Please "
+					"report you saw this message to "
+					"linux-ntfs-dev@lists.sourceforge.net");
+			goto unm_err_out;
+		}
+		/*
+		 * Resident attribute. Make all sizes equal for simplicity in
+		 * read code paths.
+		 */
+		vi->i_size = ni->initialized_size = ni->allocated_size =
+			le32_to_cpu(ctx->attr->data.resident.value_length);
+	} else {
+		NInoSetNonResident(ni);
+		/*
+		 * Ensure the attribute name is placed before the mapping pairs
+		 * array.
+		 */
+		if (unlikely(ctx->attr->name_length &&
+				(le16_to_cpu(ctx->attr->name_offset) >=
+				le16_to_cpu(ctx->attr->data.non_resident.
+				mapping_pairs_offset)))) {
+			ntfs_error(vol->sb, "Attribute name is placed after "
+					"the mapping pairs array.");
+			goto unm_err_out;
+		}
+		if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+			if (NInoMstProtected(ni)) {
+				ntfs_error(vi->i_sb, "Found mst protected "
+						"attribute but the attribute "
+						"is compressed.  Please report "
+						"you saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				goto unm_err_out;
+			}
+			NInoSetCompressed(ni);
+			if ((ni->type != AT_DATA) || (ni->type == AT_DATA &&
+					ni->name_len)) {
+				ntfs_error(vi->i_sb, "Found compressed "
+						"non-data or named data "
+						"attribute.  Please report "
+						"you saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				goto unm_err_out;
+			}
+			if (vol->cluster_size > 4096) {
+				ntfs_error(vi->i_sb, "Found compressed "
+						"attribute but compression is "
+						"disabled due to cluster size "
+						"(%i) > 4kiB.",
+						vol->cluster_size);
+				goto unm_err_out;
+			}
+			if ((ctx->attr->flags & ATTR_COMPRESSION_MASK)
+					!= ATTR_IS_COMPRESSED) {
+				ntfs_error(vi->i_sb, "Found unknown "
+						"compression method.");
+				goto unm_err_out;
+			}
+			ni->itype.compressed.block_clusters = 1U <<
+					ctx->attr->data.non_resident.
+					compression_unit;
+			if (ctx->attr->data.non_resident.compression_unit !=
+					4) {
+				ntfs_error(vi->i_sb, "Found nonstandard "
+						"compression unit (%u instead "
+						"of 4).  Cannot handle this.",
+						ctx->attr->data.non_resident.
+						compression_unit);
+				err = -EOPNOTSUPP;
+				goto unm_err_out;
+			}
+			ni->itype.compressed.block_size = 1U << (
+					ctx->attr->data.non_resident.
+					compression_unit +
+					vol->cluster_size_bits);
+			ni->itype.compressed.block_size_bits = ffs(
+				ni->itype.compressed.block_size) - 1;
+		}
+		if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
+			if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+				ntfs_error(vi->i_sb, "Found encrypted "
+						"and compressed data.");
+				goto unm_err_out;
+			}
+			if (NInoMstProtected(ni)) {
+				ntfs_error(vi->i_sb, "Found mst protected "
+						"attribute but the attribute "
+						"is encrypted.  Please report "
+						"you saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				goto unm_err_out;
+			}
+			NInoSetEncrypted(ni);
+		}
+		if (ctx->attr->flags & ATTR_IS_SPARSE) {
+			if (NInoMstProtected(ni)) {
+				ntfs_error(vi->i_sb, "Found mst protected "
+						"attribute but the attribute "
+						"is sparse.  Please report "
+						"you saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				goto unm_err_out;
+			}
+			NInoSetSparse(ni);
+		}
+		if (ctx->attr->data.non_resident.lowest_vcn) {
+			ntfs_error(vi->i_sb, "First extent of attribute has "
+					"non-zero lowest_vcn.");
+			goto unm_err_out;
+		}
+		/* Setup all the sizes. */
+		vi->i_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.data_size);
+		ni->initialized_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.initialized_size);
+		ni->allocated_size = sle64_to_cpu(
+				ctx->attr->data.non_resident.allocated_size);
+		if (NInoCompressed(ni)) {
+			ni->itype.compressed.size = sle64_to_cpu(
+					ctx->attr->data.non_resident.
+					compressed_size);
+		}
+	}
+
+	/* Setup the operations for this attribute inode. */
+	vi->i_op = NULL;
+	vi->i_fop = NULL;
+	if (NInoMstProtected(ni))
+		vi->i_mapping->a_ops = &ntfs_mst_aops;
+	else
+		vi->i_mapping->a_ops = &ntfs_aops;
+
+	if (!NInoCompressed(ni))
+		vi->i_blocks = ni->allocated_size >> 9;
+	else
+		vi->i_blocks = ni->itype.compressed.size >> 9;
+
+	/*
+	 * Make sure the base inode doesn't go away and attach it to the
+	 * attribute inode.
+	 */
+	igrab(base_vi);
+	ni->ext.base_ntfs_ino = base_ni;
+	ni->nr_extents = -1;
+
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+
+	ntfs_debug("Done.");
+	return 0;
+
+unm_err_out:
+	if (!err)
+		err = -EIO;
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+err_out:
+	ntfs_error(vol->sb, "Failed with error code %i while reading attribute "
+			"inode (mft_no 0x%lx, type 0x%x, name_len %i).  "
+			"Marking corrupt inode and base inode 0x%lx as bad.  "
+			"Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len,
+			base_vi->i_ino);
+	make_bad_inode(vi);
+	make_bad_inode(base_vi);
+	if (err != -ENOMEM)
+		NVolSetErrors(vol);
+	return err;
+}
+
+/**
+ * ntfs_read_locked_index_inode - read an index inode from its base inode
+ * @base_vi:	base inode
+ * @vi:		index inode to read
+ *
+ * ntfs_read_locked_index_inode() is called from ntfs_index_iget() to read the
+ * index inode described by @vi into memory from the base mft record described
+ * by @base_ni.
+ *
+ * ntfs_read_locked_index_inode() maps, pins and locks the base inode for
+ * reading and looks up the attributes relating to the index described by @vi
+ * before setting up the necessary fields in @vi as well as initializing the
+ * ntfs inode.
+ *
+ * Note, index inodes are essentially attribute inodes (NInoAttr() is true)
+ * with the attribute type set to AT_INDEX_ALLOCATION.  Apart from that, they
+ * are setup like directory inodes since directories are a special case of
+ * indices ao they need to be treated in much the same way.  Most importantly,
+ * for small indices the index allocation attribute might not actually exist.
+ * However, the index root attribute always exists but this does not need to
+ * have an inode associated with it and this is why we define a new inode type
+ * index.  Also, like for directories, we need to have an attribute inode for
+ * the bitmap attribute corresponding to the index allocation attribute and we
+ * can store this in the appropriate field of the inode, just like we do for
+ * normal directory inodes.
+ *
+ * Q: What locks are held when the function is called?
+ * A: i_state has I_LOCK set, hence the inode is locked, also
+ *    i_count is set to 1, so it is not going to go away
+ *
+ * Return 0 on success and -errno on error.  In the error case, the inode will
+ * have had make_bad_inode() executed on it.
+ */
+static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
+{
+	ntfs_volume *vol = NTFS_SB(vi->i_sb);
+	ntfs_inode *ni, *base_ni, *bni;
+	struct inode *bvi;
+	MFT_RECORD *m;
+	ntfs_attr_search_ctx *ctx;
+	INDEX_ROOT *ir;
+	u8 *ir_end, *index_end;
+	int err = 0;
+
+	ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
+	ntfs_init_big_inode(vi);
+	ni	= NTFS_I(vi);
+	base_ni = NTFS_I(base_vi);
+	/* Just mirror the values from the base inode. */
+	vi->i_blksize	= base_vi->i_blksize;
+	vi->i_version	= base_vi->i_version;
+	vi->i_uid	= base_vi->i_uid;
+	vi->i_gid	= base_vi->i_gid;
+	vi->i_nlink	= base_vi->i_nlink;
+	vi->i_mtime	= base_vi->i_mtime;
+	vi->i_ctime	= base_vi->i_ctime;
+	vi->i_atime	= base_vi->i_atime;
+	vi->i_generation = ni->seq_no = base_ni->seq_no;
+	/* Set inode type to zero but preserve permissions. */
+	vi->i_mode	= base_vi->i_mode & ~S_IFMT;
+	/* Map the mft record for the base inode. */
+	m = map_mft_record(base_ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(base_ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto unm_err_out;
+	}
+	/* Find the index root attribute. */
+	err = ntfs_attr_lookup(AT_INDEX_ROOT, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT)
+			ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
+					"missing.");
+		goto unm_err_out;
+	}
+	/* Set up the state. */
+	if (unlikely(ctx->attr->non_resident)) {
+		ntfs_error(vol->sb, "$INDEX_ROOT attribute is not resident.");
+		goto unm_err_out;
+	}
+	/* Ensure the attribute name is placed before the value. */
+	if (unlikely(ctx->attr->name_length &&
+			(le16_to_cpu(ctx->attr->name_offset) >=
+			le16_to_cpu(ctx->attr->data.resident.
+			value_offset)))) {
+		ntfs_error(vol->sb, "$INDEX_ROOT attribute name is placed "
+				"after the attribute value.");
+		goto unm_err_out;
+	}
+	/* Compressed/encrypted/sparse index root is not allowed. */
+	if (ctx->attr->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_ENCRYPTED |
+			ATTR_IS_SPARSE)) {
+		ntfs_error(vi->i_sb, "Found compressed/encrypted/sparse index "
+				"root attribute.");
+		goto unm_err_out;
+	}
+	ir = (INDEX_ROOT*)((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	ir_end = (u8*)ir + le32_to_cpu(ctx->attr->data.resident.value_length);
+	if (ir_end > (u8*)ctx->mrec + vol->mft_record_size) {
+		ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is corrupt.");
+		goto unm_err_out;
+	}
+	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
+	if (index_end > ir_end) {
+		ntfs_error(vi->i_sb, "Index is corrupt.");
+		goto unm_err_out;
+	}
+	if (ir->type) {
+		ntfs_error(vi->i_sb, "Index type is not 0 (type is 0x%x).",
+				le32_to_cpu(ir->type));
+		goto unm_err_out;
+	}
+	ni->itype.index.collation_rule = ir->collation_rule;
+	ntfs_debug("Index collation rule is 0x%x.",
+			le32_to_cpu(ir->collation_rule));
+	ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
+	if (ni->itype.index.block_size & (ni->itype.index.block_size - 1)) {
+		ntfs_error(vi->i_sb, "Index block size (%u) is not a power of "
+				"two.", ni->itype.index.block_size);
+		goto unm_err_out;
+	}
+	if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
+		ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+				"(%ld) is not supported.  Sorry.",
+				ni->itype.index.block_size, PAGE_CACHE_SIZE);
+		err = -EOPNOTSUPP;
+		goto unm_err_out;
+	}
+	if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
+		ntfs_error(vi->i_sb, "Index block size (%u) < NTFS_BLOCK_SIZE "
+				"(%i) is not supported.  Sorry.",
+				ni->itype.index.block_size, NTFS_BLOCK_SIZE);
+		err = -EOPNOTSUPP;
+		goto unm_err_out;
+	}
+	ni->itype.index.block_size_bits = ffs(ni->itype.index.block_size) - 1;
+	/* Determine the size of a vcn in the index. */
+	if (vol->cluster_size <= ni->itype.index.block_size) {
+		ni->itype.index.vcn_size = vol->cluster_size;
+		ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
+	} else {
+		ni->itype.index.vcn_size = vol->sector_size;
+		ni->itype.index.vcn_size_bits = vol->sector_size_bits;
+	}
+	/* Check for presence of index allocation attribute. */
+	if (!(ir->index.flags & LARGE_INDEX)) {
+		/* No index allocation. */
+		vi->i_size = ni->initialized_size = ni->allocated_size = 0;
+		/* We are done with the mft record, so we release it. */
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(base_ni);
+		m = NULL;
+		ctx = NULL;
+		goto skip_large_index_stuff;
+	} /* LARGE_INDEX:  Index allocation present.  Setup state. */
+	NInoSetIndexAllocPresent(ni);
+	/* Find index allocation attribute. */
+	ntfs_attr_reinit_search_ctx(ctx);
+	err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT)
+			ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
+					"not present but $INDEX_ROOT "
+					"indicated it is.");
+		else
+			ntfs_error(vi->i_sb, "Failed to lookup "
+					"$INDEX_ALLOCATION attribute.");
+		goto unm_err_out;
+	}
+	if (!ctx->attr->non_resident) {
+		ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
+				"resident.");
+		goto unm_err_out;
+	}
+	/*
+	 * Ensure the attribute name is placed before the mapping pairs array.
+	 */
+	if (unlikely(ctx->attr->name_length && (le16_to_cpu(
+			ctx->attr->name_offset) >= le16_to_cpu(
+			ctx->attr->data.non_resident.mapping_pairs_offset)))) {
+		ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name is "
+				"placed after the mapping pairs array.");
+		goto unm_err_out;
+	}
+	if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
+		ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
+				"encrypted.");
+		goto unm_err_out;
+	}
+	if (ctx->attr->flags & ATTR_IS_SPARSE) {
+		ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is sparse.");
+		goto unm_err_out;
+	}
+	if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
+		ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
+				"compressed.");
+		goto unm_err_out;
+	}
+	if (ctx->attr->data.non_resident.lowest_vcn) {
+		ntfs_error(vi->i_sb, "First extent of $INDEX_ALLOCATION "
+				"attribute has non zero lowest_vcn.");
+		goto unm_err_out;
+	}
+	vi->i_size = sle64_to_cpu(ctx->attr->data.non_resident.data_size);
+	ni->initialized_size = sle64_to_cpu(
+			ctx->attr->data.non_resident.initialized_size);
+	ni->allocated_size = sle64_to_cpu(
+			ctx->attr->data.non_resident.allocated_size);
+	/*
+	 * We are done with the mft record, so we release it.  Otherwise
+	 * we would deadlock in ntfs_attr_iget().
+	 */
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(base_ni);
+	m = NULL;
+	ctx = NULL;
+	/* Get the index bitmap attribute inode. */
+	bvi = ntfs_attr_iget(base_vi, AT_BITMAP, ni->name, ni->name_len);
+	if (IS_ERR(bvi)) {
+		ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
+		err = PTR_ERR(bvi);
+		goto unm_err_out;
+	}
+	bni = NTFS_I(bvi);
+	if (NInoCompressed(bni) || NInoEncrypted(bni) ||
+			NInoSparse(bni)) {
+		ntfs_error(vi->i_sb, "$BITMAP attribute is compressed and/or "
+				"encrypted and/or sparse.");
+		goto iput_unm_err_out;
+	}
+	/* Consistency check bitmap size vs. index allocation size. */
+	if ((bvi->i_size << 3) < (vi->i_size >>
+			ni->itype.index.block_size_bits)) {
+		ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) for "
+				"index allocation (0x%llx).", bvi->i_size << 3,
+				vi->i_size);
+		goto iput_unm_err_out;
+	}
+	ni->itype.index.bmp_ino = bvi;
+skip_large_index_stuff:
+	/* Setup the operations for this index inode. */
+	vi->i_op = NULL;
+	vi->i_fop = NULL;
+	vi->i_mapping->a_ops = &ntfs_mst_aops;
+	vi->i_blocks = ni->allocated_size >> 9;
+
+	/*
+	 * Make sure the base inode doesn't go away and attach it to the
+	 * index inode.
+	 */
+	igrab(base_vi);
+	ni->ext.base_ntfs_ino = base_ni;
+	ni->nr_extents = -1;
+
+	ntfs_debug("Done.");
+	return 0;
+
+iput_unm_err_out:
+	iput(bvi);
+unm_err_out:
+	if (!err)
+		err = -EIO;
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(base_ni);
+err_out:
+	ntfs_error(vi->i_sb, "Failed with error code %i while reading index "
+			"inode (mft_no 0x%lx, name_len %i.", err, vi->i_ino,
+			ni->name_len);
+	make_bad_inode(vi);
+	if (err != -EOPNOTSUPP && err != -ENOMEM)
+		NVolSetErrors(vol);
+	return err;
+}
+
+/**
+ * ntfs_read_inode_mount - special read_inode for mount time use only
+ * @vi:		inode to read
+ *
+ * Read inode FILE_MFT at mount time, only called with super_block lock
+ * held from within the read_super() code path.
+ *
+ * This function exists because when it is called the page cache for $MFT/$DATA
+ * is not initialized and hence we cannot get at the contents of mft records
+ * by calling map_mft_record*().
+ *
+ * Further it needs to cope with the circular references problem, i.e. cannot
+ * load any attributes other than $ATTRIBUTE_LIST until $DATA is loaded, because
+ * we do not know where the other extent mft records are yet and again, because
+ * we cannot call map_mft_record*() yet.  Obviously this applies only when an
+ * attribute list is actually present in $MFT inode.
+ *
+ * We solve these problems by starting with the $DATA attribute before anything
+ * else and iterating using ntfs_attr_lookup($DATA) over all extents.  As each
+ * extent is found, we ntfs_mapping_pairs_decompress() including the implied
+ * ntfs_runlists_merge().  Each step of the iteration necessarily provides
+ * sufficient information for the next step to complete.
+ *
+ * This should work but there are two possible pit falls (see inline comments
+ * below), but only time will tell if they are real pits or just smoke...
+ */
+int ntfs_read_inode_mount(struct inode *vi)
+{
+	VCN next_vcn, last_vcn, highest_vcn;
+	s64 block;
+	struct super_block *sb = vi->i_sb;
+	ntfs_volume *vol = NTFS_SB(sb);
+	struct buffer_head *bh;
+	ntfs_inode *ni;
+	MFT_RECORD *m = NULL;
+	ATTR_RECORD *attr;
+	ntfs_attr_search_ctx *ctx;
+	unsigned int i, nr_blocks;
+	int err;
+
+	ntfs_debug("Entering.");
+
+	/* Initialize the ntfs specific part of @vi. */
+	ntfs_init_big_inode(vi);
+
+	ni = NTFS_I(vi);
+
+	/* Setup the data attribute. It is special as it is mst protected. */
+	NInoSetNonResident(ni);
+	NInoSetMstProtected(ni);
+	ni->type = AT_DATA;
+	ni->name = NULL;
+	ni->name_len = 0;
+
+	/*
+	 * This sets up our little cheat allowing us to reuse the async read io
+	 * completion handler for directories.
+	 */
+	ni->itype.index.block_size = vol->mft_record_size;
+	ni->itype.index.block_size_bits = vol->mft_record_size_bits;
+
+	/* Very important! Needed to be able to call map_mft_record*(). */
+	vol->mft_ino = vi;
+
+	/* Allocate enough memory to read the first mft record. */
+	if (vol->mft_record_size > 64 * 1024) {
+		ntfs_error(sb, "Unsupported mft record size %i (max 64kiB).",
+				vol->mft_record_size);
+		goto err_out;
+	}
+	i = vol->mft_record_size;
+	if (i < sb->s_blocksize)
+		i = sb->s_blocksize;
+	m = (MFT_RECORD*)ntfs_malloc_nofs(i);
+	if (!m) {
+		ntfs_error(sb, "Failed to allocate buffer for $MFT record 0.");
+		goto err_out;
+	}
+
+	/* Determine the first block of the $MFT/$DATA attribute. */
+	block = vol->mft_lcn << vol->cluster_size_bits >>
+			sb->s_blocksize_bits;
+	nr_blocks = vol->mft_record_size >> sb->s_blocksize_bits;
+	if (!nr_blocks)
+		nr_blocks = 1;
+
+	/* Load $MFT/$DATA's first mft record. */
+	for (i = 0; i < nr_blocks; i++) {
+		bh = sb_bread(sb, block++);
+		if (!bh) {
+			ntfs_error(sb, "Device read failed.");
+			goto err_out;
+		}
+		memcpy((char*)m + (i << sb->s_blocksize_bits), bh->b_data,
+				sb->s_blocksize);
+		brelse(bh);
+	}
+
+	/* Apply the mst fixups. */
+	if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
+		/* FIXME: Try to use the $MFTMirr now. */
+		ntfs_error(sb, "MST fixup failed. $MFT is corrupt.");
+		goto err_out;
+	}
+
+	/* Need this to sanity check attribute list references to $MFT. */
+	vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
+
+	/* Provides readpage() and sync_page() for map_mft_record(). */
+	vi->i_mapping->a_ops = &ntfs_mst_aops;
+
+	ctx = ntfs_attr_get_search_ctx(ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/* Find the attribute list attribute if present. */
+	err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
+	if (err) {
+		if (unlikely(err != -ENOENT)) {
+			ntfs_error(sb, "Failed to lookup attribute list "
+					"attribute. You should run chkdsk.");
+			goto put_err_out;
+		}
+	} else /* if (!err) */ {
+		ATTR_LIST_ENTRY *al_entry, *next_al_entry;
+		u8 *al_end;
+
+		ntfs_debug("Attribute list attribute found in $MFT.");
+		NInoSetAttrList(ni);
+		if (ctx->attr->flags & ATTR_IS_ENCRYPTED ||
+				ctx->attr->flags & ATTR_COMPRESSION_MASK ||
+				ctx->attr->flags & ATTR_IS_SPARSE) {
+			ntfs_error(sb, "Attribute list attribute is "
+					"compressed/encrypted/sparse. Not "
+					"allowed. $MFT is corrupt. You should "
+					"run chkdsk.");
+			goto put_err_out;
+		}
+		/* Now allocate memory for the attribute list. */
+		ni->attr_list_size = (u32)ntfs_attr_size(ctx->attr);
+		ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
+		if (!ni->attr_list) {
+			ntfs_error(sb, "Not enough memory to allocate buffer "
+					"for attribute list.");
+			goto put_err_out;
+		}
+		if (ctx->attr->non_resident) {
+			NInoSetAttrListNonResident(ni);
+			if (ctx->attr->data.non_resident.lowest_vcn) {
+				ntfs_error(sb, "Attribute list has non zero "
+						"lowest_vcn. $MFT is corrupt. "
+						"You should run chkdsk.");
+				goto put_err_out;
+			}
+			/* Setup the runlist. */
+			ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
+					ctx->attr, NULL);
+			if (IS_ERR(ni->attr_list_rl.rl)) {
+				err = PTR_ERR(ni->attr_list_rl.rl);
+				ni->attr_list_rl.rl = NULL;
+				ntfs_error(sb, "Mapping pairs decompression "
+						"failed with error code %i.",
+						-err);
+				goto put_err_out;
+			}
+			/* Now load the attribute list. */
+			if ((err = load_attribute_list(vol, &ni->attr_list_rl,
+					ni->attr_list, ni->attr_list_size,
+					sle64_to_cpu(ctx->attr->data.
+					non_resident.initialized_size)))) {
+				ntfs_error(sb, "Failed to load attribute list "
+						"attribute with error code %i.",
+						-err);
+				goto put_err_out;
+			}
+		} else /* if (!ctx.attr->non_resident) */ {
+			if ((u8*)ctx->attr + le16_to_cpu(
+					ctx->attr->data.resident.value_offset) +
+					le32_to_cpu(
+					ctx->attr->data.resident.value_length) >
+					(u8*)ctx->mrec + vol->mft_record_size) {
+				ntfs_error(sb, "Corrupt attribute list "
+						"attribute.");
+				goto put_err_out;
+			}
+			/* Now copy the attribute list. */
+			memcpy(ni->attr_list, (u8*)ctx->attr + le16_to_cpu(
+					ctx->attr->data.resident.value_offset),
+					le32_to_cpu(
+					ctx->attr->data.resident.value_length));
+		}
+		/* The attribute list is now setup in memory. */
+		/*
+		 * FIXME: I don't know if this case is actually possible.
+		 * According to logic it is not possible but I have seen too
+		 * many weird things in MS software to rely on logic... Thus we
+		 * perform a manual search and make sure the first $MFT/$DATA
+		 * extent is in the base inode. If it is not we abort with an
+		 * error and if we ever see a report of this error we will need
+		 * to do some magic in order to have the necessary mft record
+		 * loaded and in the right place in the page cache. But
+		 * hopefully logic will prevail and this never happens...
+		 */
+		al_entry = (ATTR_LIST_ENTRY*)ni->attr_list;
+		al_end = (u8*)al_entry + ni->attr_list_size;
+		for (;; al_entry = next_al_entry) {
+			/* Out of bounds check. */
+			if ((u8*)al_entry < ni->attr_list ||
+					(u8*)al_entry > al_end)
+				goto em_put_err_out;
+			/* Catch the end of the attribute list. */
+			if ((u8*)al_entry == al_end)
+				goto em_put_err_out;
+			if (!al_entry->length)
+				goto em_put_err_out;
+			if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
+					le16_to_cpu(al_entry->length) > al_end)
+				goto em_put_err_out;
+			next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
+					le16_to_cpu(al_entry->length));
+			if (le32_to_cpu(al_entry->type) >
+					const_le32_to_cpu(AT_DATA))
+				goto em_put_err_out;
+			if (AT_DATA != al_entry->type)
+				continue;
+			/* We want an unnamed attribute. */
+			if (al_entry->name_length)
+				goto em_put_err_out;
+			/* Want the first entry, i.e. lowest_vcn == 0. */
+			if (al_entry->lowest_vcn)
+				goto em_put_err_out;
+			/* First entry has to be in the base mft record. */
+			if (MREF_LE(al_entry->mft_reference) != vi->i_ino) {
+				/* MFT references do not match, logic fails. */
+				ntfs_error(sb, "BUG: The first $DATA extent "
+						"of $MFT is not in the base "
+						"mft record. Please report "
+						"you saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				goto put_err_out;
+			} else {
+				/* Sequence numbers must match. */
+				if (MSEQNO_LE(al_entry->mft_reference) !=
+						ni->seq_no)
+					goto em_put_err_out;
+				/* Got it. All is ok. We can stop now. */
+				break;
+			}
+		}
+	}
+
+	ntfs_attr_reinit_search_ctx(ctx);
+
+	/* Now load all attribute extents. */
+	attr = NULL;
+	next_vcn = last_vcn = highest_vcn = 0;
+	while (!(err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, next_vcn, NULL, 0,
+			ctx))) {
+		runlist_element *nrl;
+
+		/* Cache the current attribute. */
+		attr = ctx->attr;
+		/* $MFT must be non-resident. */
+		if (!attr->non_resident) {
+			ntfs_error(sb, "$MFT must be non-resident but a "
+					"resident extent was found. $MFT is "
+					"corrupt. Run chkdsk.");
+			goto put_err_out;
+		}
+		/* $MFT must be uncompressed and unencrypted. */
+		if (attr->flags & ATTR_COMPRESSION_MASK ||
+				attr->flags & ATTR_IS_ENCRYPTED ||
+				attr->flags & ATTR_IS_SPARSE) {
+			ntfs_error(sb, "$MFT must be uncompressed, "
+					"non-sparse, and unencrypted but a "
+					"compressed/sparse/encrypted extent "
+					"was found. $MFT is corrupt. Run "
+					"chkdsk.");
+			goto put_err_out;
+		}
+		/*
+		 * Decompress the mapping pairs array of this extent and merge
+		 * the result into the existing runlist. No need for locking
+		 * as we have exclusive access to the inode at this time and we
+		 * are a mount in progress task, too.
+		 */
+		nrl = ntfs_mapping_pairs_decompress(vol, attr, ni->runlist.rl);
+		if (IS_ERR(nrl)) {
+			ntfs_error(sb, "ntfs_mapping_pairs_decompress() "
+					"failed with error code %ld.  $MFT is "
+					"corrupt.", PTR_ERR(nrl));
+			goto put_err_out;
+		}
+		ni->runlist.rl = nrl;
+
+		/* Are we in the first extent? */
+		if (!next_vcn) {
+			if (attr->data.non_resident.lowest_vcn) {
+				ntfs_error(sb, "First extent of $DATA "
+						"attribute has non zero "
+						"lowest_vcn. $MFT is corrupt. "
+						"You should run chkdsk.");
+				goto put_err_out;
+			}
+			/* Get the last vcn in the $DATA attribute. */
+			last_vcn = sle64_to_cpu(
+					attr->data.non_resident.allocated_size)
+					>> vol->cluster_size_bits;
+			/* Fill in the inode size. */
+			vi->i_size = sle64_to_cpu(
+					attr->data.non_resident.data_size);
+			ni->initialized_size = sle64_to_cpu(attr->data.
+					non_resident.initialized_size);
+			ni->allocated_size = sle64_to_cpu(
+					attr->data.non_resident.allocated_size);
+			/*
+			 * Verify the number of mft records does not exceed
+			 * 2^32 - 1.
+			 */
+			if ((vi->i_size >> vol->mft_record_size_bits) >=
+					(1ULL << 32)) {
+				ntfs_error(sb, "$MFT is too big! Aborting.");
+				goto put_err_out;
+			}
+			/*
+			 * We have got the first extent of the runlist for
+			 * $MFT which means it is now relatively safe to call
+			 * the normal ntfs_read_inode() function.
+			 * Complete reading the inode, this will actually
+			 * re-read the mft record for $MFT, this time entering
+			 * it into the page cache with which we complete the
+			 * kick start of the volume. It should be safe to do
+			 * this now as the first extent of $MFT/$DATA is
+			 * already known and we would hope that we don't need
+			 * further extents in order to find the other
+			 * attributes belonging to $MFT. Only time will tell if
+			 * this is really the case. If not we will have to play
+			 * magic at this point, possibly duplicating a lot of
+			 * ntfs_read_inode() at this point. We will need to
+			 * ensure we do enough of its work to be able to call
+			 * ntfs_read_inode() on extents of $MFT/$DATA. But lets
+			 * hope this never happens...
+			 */
+			ntfs_read_locked_inode(vi);
+			if (is_bad_inode(vi)) {
+				ntfs_error(sb, "ntfs_read_inode() of $MFT "
+						"failed. BUG or corrupt $MFT. "
+						"Run chkdsk and if no errors "
+						"are found, please report you "
+						"saw this message to "
+						"linux-ntfs-dev@lists."
+						"sourceforge.net");
+				ntfs_attr_put_search_ctx(ctx);
+				/* Revert to the safe super operations. */
+				ntfs_free(m);
+				return -1;
+			}
+			/*
+			 * Re-initialize some specifics about $MFT's inode as
+			 * ntfs_read_inode() will have set up the default ones.
+			 */
+			/* Set uid and gid to root. */
+			vi->i_uid = vi->i_gid = 0;
+			/* Regular file. No access for anyone. */
+			vi->i_mode = S_IFREG;
+			/* No VFS initiated operations allowed for $MFT. */
+			vi->i_op = &ntfs_empty_inode_ops;
+			vi->i_fop = &ntfs_empty_file_ops;
+		}
+
+		/* Get the lowest vcn for the next extent. */
+		highest_vcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
+		next_vcn = highest_vcn + 1;
+
+		/* Only one extent or error, which we catch below. */
+		if (next_vcn <= 0)
+			break;
+
+		/* Avoid endless loops due to corruption. */
+		if (next_vcn < sle64_to_cpu(
+				attr->data.non_resident.lowest_vcn)) {
+			ntfs_error(sb, "$MFT has corrupt attribute list "
+					"attribute. Run chkdsk.");
+			goto put_err_out;
+		}
+	}
+	if (err != -ENOENT) {
+		ntfs_error(sb, "Failed to lookup $MFT/$DATA attribute extent. "
+				"$MFT is corrupt. Run chkdsk.");
+		goto put_err_out;
+	}
+	if (!attr) {
+		ntfs_error(sb, "$MFT/$DATA attribute not found. $MFT is "
+				"corrupt. Run chkdsk.");
+		goto put_err_out;
+	}
+	if (highest_vcn && highest_vcn != last_vcn - 1) {
+		ntfs_error(sb, "Failed to load the complete runlist for "
+				"$MFT/$DATA. Driver bug or corrupt $MFT. "
+				"Run chkdsk.");
+		ntfs_debug("highest_vcn = 0x%llx, last_vcn - 1 = 0x%llx",
+				(unsigned long long)highest_vcn,
+				(unsigned long long)last_vcn - 1);
+		goto put_err_out;
+	}
+	ntfs_attr_put_search_ctx(ctx);
+	ntfs_debug("Done.");
+	ntfs_free(m);
+	return 0;
+
+em_put_err_out:
+	ntfs_error(sb, "Couldn't find first extent of $DATA attribute in "
+			"attribute list. $MFT is corrupt. Run chkdsk.");
+put_err_out:
+	ntfs_attr_put_search_ctx(ctx);
+err_out:
+	ntfs_error(sb, "Failed. Marking inode as bad.");
+	make_bad_inode(vi);
+	ntfs_free(m);
+	return -1;
+}
+
+/**
+ * ntfs_put_inode - handler for when the inode reference count is decremented
+ * @vi:		vfs inode
+ *
+ * The VFS calls ntfs_put_inode() every time the inode reference count (i_count)
+ * is about to be decremented (but before the decrement itself.
+ *
+ * If the inode @vi is a directory with two references, one of which is being
+ * dropped, we need to put the attribute inode for the directory index bitmap,
+ * if it is present, otherwise the directory inode would remain pinned for
+ * ever.
+ */
+void ntfs_put_inode(struct inode *vi)
+{
+	if (S_ISDIR(vi->i_mode) && atomic_read(&vi->i_count) == 2) {
+		ntfs_inode *ni = NTFS_I(vi);
+		if (NInoIndexAllocPresent(ni)) {
+			struct inode *bvi = NULL;
+			down(&vi->i_sem);
+			if (atomic_read(&vi->i_count) == 2) {
+				bvi = ni->itype.index.bmp_ino;
+				if (bvi)
+					ni->itype.index.bmp_ino = NULL;
+			}
+			up(&vi->i_sem);
+			if (bvi)
+				iput(bvi);
+		}
+	}
+}
+
+static void __ntfs_clear_inode(ntfs_inode *ni)
+{
+	/* Free all alocated memory. */
+	down_write(&ni->runlist.lock);
+	if (ni->runlist.rl) {
+		ntfs_free(ni->runlist.rl);
+		ni->runlist.rl = NULL;
+	}
+	up_write(&ni->runlist.lock);
+
+	if (ni->attr_list) {
+		ntfs_free(ni->attr_list);
+		ni->attr_list = NULL;
+	}
+
+	down_write(&ni->attr_list_rl.lock);
+	if (ni->attr_list_rl.rl) {
+		ntfs_free(ni->attr_list_rl.rl);
+		ni->attr_list_rl.rl = NULL;
+	}
+	up_write(&ni->attr_list_rl.lock);
+
+	if (ni->name_len && ni->name != I30) {
+		/* Catch bugs... */
+		BUG_ON(!ni->name);
+		kfree(ni->name);
+	}
+}
+
+void ntfs_clear_extent_inode(ntfs_inode *ni)
+{
+	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
+
+	BUG_ON(NInoAttr(ni));
+	BUG_ON(ni->nr_extents != -1);
+
+#ifdef NTFS_RW
+	if (NInoDirty(ni)) {
+		if (!is_bad_inode(VFS_I(ni->ext.base_ntfs_ino)))
+			ntfs_error(ni->vol->sb, "Clearing dirty extent inode!  "
+					"Losing data!  This is a BUG!!!");
+		// FIXME:  Do something!!!
+	}
+#endif /* NTFS_RW */
+
+	__ntfs_clear_inode(ni);
+
+	/* Bye, bye... */
+	ntfs_destroy_extent_inode(ni);
+}
+
+/**
+ * ntfs_clear_big_inode - clean up the ntfs specific part of an inode
+ * @vi:		vfs inode pending annihilation
+ *
+ * When the VFS is going to remove an inode from memory, ntfs_clear_big_inode()
+ * is called, which deallocates all memory belonging to the NTFS specific part
+ * of the inode and returns.
+ *
+ * If the MFT record is dirty, we commit it before doing anything else.
+ */
+void ntfs_clear_big_inode(struct inode *vi)
+{
+	ntfs_inode *ni = NTFS_I(vi);
+
+	/*
+	 * If the inode @vi is an index inode we need to put the attribute
+	 * inode for the index bitmap, if it is present, otherwise the index
+	 * inode would disappear and the attribute inode for the index bitmap
+	 * would no longer be referenced from anywhere and thus it would remain
+	 * pinned for ever.
+	 */
+	if (NInoAttr(ni) && (ni->type == AT_INDEX_ALLOCATION) &&
+			NInoIndexAllocPresent(ni) && ni->itype.index.bmp_ino) {
+		iput(ni->itype.index.bmp_ino);
+		ni->itype.index.bmp_ino = NULL;
+	}
+#ifdef NTFS_RW
+	if (NInoDirty(ni)) {
+		BOOL was_bad = (is_bad_inode(vi));
+
+		/* Committing the inode also commits all extent inodes. */
+		ntfs_commit_inode(vi);
+
+		if (!was_bad && (is_bad_inode(vi) || NInoDirty(ni))) {
+			ntfs_error(vi->i_sb, "Failed to commit dirty inode "
+					"0x%lx.  Losing data!", vi->i_ino);
+			// FIXME:  Do something!!!
+		}
+	}
+#endif /* NTFS_RW */
+
+	/* No need to lock at this stage as no one else has a reference. */
+	if (ni->nr_extents > 0) {
+		int i;
+
+		for (i = 0; i < ni->nr_extents; i++)
+			ntfs_clear_extent_inode(ni->ext.extent_ntfs_inos[i]);
+		kfree(ni->ext.extent_ntfs_inos);
+	}
+
+	__ntfs_clear_inode(ni);
+
+	if (NInoAttr(ni)) {
+		/* Release the base inode if we are holding it. */
+		if (ni->nr_extents == -1) {
+			iput(VFS_I(ni->ext.base_ntfs_ino));
+			ni->nr_extents = 0;
+			ni->ext.base_ntfs_ino = NULL;
+		}
+	}
+	return;
+}
+
+/**
+ * ntfs_show_options - show mount options in /proc/mounts
+ * @sf:		seq_file in which to write our mount options
+ * @mnt:	vfs mount whose mount options to display
+ *
+ * Called by the VFS once for each mounted ntfs volume when someone reads
+ * /proc/mounts in order to display the NTFS specific mount options of each
+ * mount. The mount options of the vfs mount @mnt are written to the seq file
+ * @sf and success is returned.
+ */
+int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
+{
+	ntfs_volume *vol = NTFS_SB(mnt->mnt_sb);
+	int i;
+
+	seq_printf(sf, ",uid=%i", vol->uid);
+	seq_printf(sf, ",gid=%i", vol->gid);
+	if (vol->fmask == vol->dmask)
+		seq_printf(sf, ",umask=0%o", vol->fmask);
+	else {
+		seq_printf(sf, ",fmask=0%o", vol->fmask);
+		seq_printf(sf, ",dmask=0%o", vol->dmask);
+	}
+	seq_printf(sf, ",nls=%s", vol->nls_map->charset);
+	if (NVolCaseSensitive(vol))
+		seq_printf(sf, ",case_sensitive");
+	if (NVolShowSystemFiles(vol))
+		seq_printf(sf, ",show_sys_files");
+	for (i = 0; on_errors_arr[i].val; i++) {
+		if (on_errors_arr[i].val & vol->on_errors)
+			seq_printf(sf, ",errors=%s", on_errors_arr[i].str);
+	}
+	seq_printf(sf, ",mft_zone_multiplier=%i", vol->mft_zone_multiplier);
+	return 0;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_truncate - called when the i_size of an ntfs inode is changed
+ * @vi:		inode for which the i_size was changed
+ *
+ * We do not support i_size changes yet.
+ *
+ * The kernel guarantees that @vi is a regular file (S_ISREG() is true) and
+ * that the change is allowed.
+ *
+ * This implies for us that @vi is a file inode rather than a directory, index,
+ * or attribute inode as well as that @vi is a base inode.
+ *
+ * Returns 0 on success or -errno on error.
+ *
+ * Called with ->i_sem held.  In all but one case ->i_alloc_sem is held for
+ * writing.  The only case where ->i_alloc_sem is not held is
+ * mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
+ * with the current i_size as the offset which means that it is a noop as far
+ * as ntfs_truncate() is concerned.
+ */
+int ntfs_truncate(struct inode *vi)
+{
+	ntfs_inode *ni = NTFS_I(vi);
+	ntfs_volume *vol = ni->vol;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *m;
+	const char *te = "  Leaving file length out of sync with i_size.";
+	int err;
+
+	ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
+	BUG_ON(NInoAttr(ni));
+	BUG_ON(ni->nr_extents < 0);
+	m = map_mft_record(ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		ntfs_error(vi->i_sb, "Failed to map mft record for inode 0x%lx "
+				"(error code %d).%s", vi->i_ino, err, te);
+		ctx = NULL;
+		m = NULL;
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(ni, m);
+	if (unlikely(!ctx)) {
+		ntfs_error(vi->i_sb, "Failed to allocate a search context for "
+				"inode 0x%lx (not enough memory).%s",
+				vi->i_ino, te);
+		err = -ENOMEM;
+		goto err_out;
+	}
+	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		if (err == -ENOENT)
+			ntfs_error(vi->i_sb, "Open attribute is missing from "
+					"mft record.  Inode 0x%lx is corrupt.  "
+					"Run chkdsk.", vi->i_ino);
+		else
+			ntfs_error(vi->i_sb, "Failed to lookup attribute in "
+					"inode 0x%lx (error code %d).",
+					vi->i_ino, err);
+		goto err_out;
+	}
+	/* If the size has not changed there is nothing to do. */
+	if (ntfs_attr_size(ctx->attr) == i_size_read(vi))
+		goto done;
+	// TODO: Implement the truncate...
+	ntfs_error(vi->i_sb, "Inode size has changed but this is not "
+			"implemented yet.  Resetting inode size to old value. "
+			" This is most likely a bug in the ntfs driver!");
+	i_size_write(vi, ntfs_attr_size(ctx->attr)); 
+done:
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(ni);
+	NInoClearTruncateFailed(ni);
+	ntfs_debug("Done.");
+	return 0;
+err_out:
+	if (err != -ENOMEM) {
+		NVolSetErrors(vol);
+		make_bad_inode(vi);
+	}
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(ni);
+	NInoSetTruncateFailed(ni);
+	return err;
+}
+
+/**
+ * ntfs_truncate_vfs - wrapper for ntfs_truncate() that has no return value
+ * @vi:		inode for which the i_size was changed
+ *
+ * Wrapper for ntfs_truncate() that has no return value.
+ *
+ * See ntfs_truncate() description above for details.
+ */
+void ntfs_truncate_vfs(struct inode *vi) {
+	ntfs_truncate(vi);
+}
+
+/**
+ * ntfs_setattr - called from notify_change() when an attribute is being changed
+ * @dentry:	dentry whose attributes to change
+ * @attr:	structure describing the attributes and the changes
+ *
+ * We have to trap VFS attempts to truncate the file described by @dentry as
+ * soon as possible, because we do not implement changes in i_size yet.  So we
+ * abort all i_size changes here.
+ *
+ * We also abort all changes of user, group, and mode as we do not implement
+ * the NTFS ACLs yet.
+ *
+ * Called with ->i_sem held.  For the ATTR_SIZE (i.e. ->truncate) case, also
+ * called with ->i_alloc_sem held for writing.
+ *
+ * Basically this is a copy of generic notify_change() and inode_setattr()
+ * functionality, except we intercept and abort changes in i_size.
+ */
+int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *vi = dentry->d_inode;
+	int err;
+	unsigned int ia_valid = attr->ia_valid;
+
+	err = inode_change_ok(vi, attr);
+	if (err)
+		return err;
+
+	/* We do not support NTFS ACLs yet. */
+	if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) {
+		ntfs_warning(vi->i_sb, "Changes in user/group/mode are not "
+				"supported yet, ignoring.");
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (ia_valid & ATTR_SIZE) {
+		if (attr->ia_size != i_size_read(vi)) {
+			ntfs_warning(vi->i_sb, "Changes in inode size are not "
+					"supported yet, ignoring.");
+			err = -EOPNOTSUPP;
+			// TODO: Implement...
+			// err = vmtruncate(vi, attr->ia_size);
+			if (err || ia_valid == ATTR_SIZE)
+				goto out;
+		} else {
+			/*
+			 * We skipped the truncate but must still update
+			 * timestamps.
+			 */
+			ia_valid |= ATTR_MTIME|ATTR_CTIME;
+		}
+	}
+
+	if (ia_valid & ATTR_ATIME)
+		vi->i_atime = attr->ia_atime;
+	if (ia_valid & ATTR_MTIME)
+		vi->i_mtime = attr->ia_mtime;
+	if (ia_valid & ATTR_CTIME)
+		vi->i_ctime = attr->ia_ctime;
+	mark_inode_dirty(vi);
+out:
+	return err;
+}
+
+/**
+ * ntfs_write_inode - write out a dirty inode
+ * @vi:		inode to write out
+ * @sync:	if true, write out synchronously
+ *
+ * Write out a dirty inode to disk including any extent inodes if present.
+ *
+ * If @sync is true, commit the inode to disk and wait for io completion.  This
+ * is done using write_mft_record().
+ *
+ * If @sync is false, just schedule the write to happen but do not wait for i/o
+ * completion.  In 2.6 kernels, scheduling usually happens just by virtue of
+ * marking the page (and in this case mft record) dirty but we do not implement
+ * this yet as write_mft_record() largely ignores the @sync parameter and
+ * always performs synchronous writes.
+ *
+ * Return 0 on success and -errno on error.
+ */
+int ntfs_write_inode(struct inode *vi, int sync)
+{
+	sle64 nt;
+	ntfs_inode *ni = NTFS_I(vi);
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *m;
+	STANDARD_INFORMATION *si;
+	int err = 0;
+	BOOL modified = FALSE;
+
+	ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "",
+			vi->i_ino);
+	/*
+	 * Dirty attribute inodes are written via their real inodes so just
+	 * clean them here.  Access time updates are taken care off when the
+	 * real inode is written.
+	 */
+	if (NInoAttr(ni)) {
+		NInoClearDirty(ni);
+		ntfs_debug("Done.");
+		return 0;
+	}
+	/* Map, pin, and lock the mft record belonging to the inode. */
+	m = map_mft_record(ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		goto err_out;
+	}
+	/* Update the access times in the standard information attribute. */
+	ctx = ntfs_attr_get_search_ctx(ni, m);
+	if (unlikely(!ctx)) {
+		err = -ENOMEM;
+		goto unm_err_out;
+	}
+	err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		ntfs_attr_put_search_ctx(ctx);
+		goto unm_err_out;
+	}
+	si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	/* Update the access times if they have changed. */
+	nt = utc2ntfs(vi->i_mtime);
+	if (si->last_data_change_time != nt) {
+		ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
+				"new = 0x%llx", vi->i_ino,
+				sle64_to_cpu(si->last_data_change_time),
+				sle64_to_cpu(nt));
+		si->last_data_change_time = nt;
+		modified = TRUE;
+	}
+	nt = utc2ntfs(vi->i_ctime);
+	if (si->last_mft_change_time != nt) {
+		ntfs_debug("Updating ctime for inode 0x%lx: old = 0x%llx, "
+				"new = 0x%llx", vi->i_ino,
+				sle64_to_cpu(si->last_mft_change_time),
+				sle64_to_cpu(nt));
+		si->last_mft_change_time = nt;
+		modified = TRUE;
+	}
+	nt = utc2ntfs(vi->i_atime);
+	if (si->last_access_time != nt) {
+		ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
+				"new = 0x%llx", vi->i_ino,
+				sle64_to_cpu(si->last_access_time),
+				sle64_to_cpu(nt));
+		si->last_access_time = nt;
+		modified = TRUE;
+	}
+	/*
+	 * If we just modified the standard information attribute we need to
+	 * mark the mft record it is in dirty.  We do this manually so that
+	 * mark_inode_dirty() is not called which would redirty the inode and
+	 * hence result in an infinite loop of trying to write the inode.
+	 * There is no need to mark the base inode nor the base mft record
+	 * dirty, since we are going to write this mft record below in any case
+	 * and the base mft record may actually not have been modified so it
+	 * might not need to be written out.
+	 * NOTE: It is not a problem when the inode for $MFT itself is being
+	 * written out as mark_ntfs_record_dirty() will only set I_DIRTY_PAGES
+	 * on the $MFT inode and hence ntfs_write_inode() will not be
+	 * re-invoked because of it which in turn is ok since the dirtied mft
+	 * record will be cleaned and written out to disk below, i.e. before
+	 * this function returns.
+	 */
+	if (modified && !NInoTestSetDirty(ctx->ntfs_ino))
+		mark_ntfs_record_dirty(ctx->ntfs_ino->page,
+				ctx->ntfs_ino->page_ofs);
+	ntfs_attr_put_search_ctx(ctx);
+	/* Now the access times are updated, write the base mft record. */
+	if (NInoDirty(ni))
+		err = write_mft_record(ni, m, sync);
+	/* Write all attached extent mft records. */
+	down(&ni->extent_lock);
+	if (ni->nr_extents > 0) {
+		ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos;
+		int i;
+
+		ntfs_debug("Writing %i extent inodes.", ni->nr_extents);
+		for (i = 0; i < ni->nr_extents; i++) {
+			ntfs_inode *tni = extent_nis[i];
+
+			if (NInoDirty(tni)) {
+				MFT_RECORD *tm = map_mft_record(tni);
+				int ret;
+
+				if (IS_ERR(tm)) {
+					if (!err || err == -ENOMEM)
+						err = PTR_ERR(tm);
+					continue;
+				}
+				ret = write_mft_record(tni, tm, sync);
+				unmap_mft_record(tni);
+				if (unlikely(ret)) {
+					if (!err || err == -ENOMEM)
+						err = ret;
+				}
+			}
+		}
+	}
+	up(&ni->extent_lock);
+	unmap_mft_record(ni);
+	if (unlikely(err))
+		goto err_out;
+	ntfs_debug("Done.");
+	return 0;
+unm_err_out:
+	unmap_mft_record(ni);
+err_out:
+	if (err == -ENOMEM) {
+		ntfs_warning(vi->i_sb, "Not enough memory to write inode.  "
+				"Marking the inode dirty again, so the VFS "
+				"retries later.");
+		mark_inode_dirty(vi);
+	} else {
+		ntfs_error(vi->i_sb, "Failed (error code %i):  Marking inode "
+				"as bad.  You should run chkdsk.", -err);
+		make_bad_inode(vi);
+		NVolSetErrors(ni->vol);
+	}
+	return err;
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
new file mode 100644
index 0000000..9958045
--- /dev/null
+++ b/fs/ntfs/inode.h
@@ -0,0 +1,321 @@
+/*
+ * inode.h - Defines for inode structures NTFS Linux kernel driver. Part of
+ *	     the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_INODE_H
+#define _LINUX_NTFS_INODE_H
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+#include "layout.h"
+#include "volume.h"
+#include "types.h"
+#include "runlist.h"
+#include "debug.h"
+
+typedef struct _ntfs_inode ntfs_inode;
+
+/*
+ * The NTFS in-memory inode structure. It is just used as an extension to the
+ * fields already provided in the VFS inode.
+ */
+struct _ntfs_inode {
+	s64 initialized_size;	/* Copy from the attribute record. */
+	s64 allocated_size;	/* Copy from the attribute record. */
+	unsigned long state;	/* NTFS specific flags describing this inode.
+				   See ntfs_inode_state_bits below. */
+	unsigned long mft_no;	/* Number of the mft record / inode. */
+	u16 seq_no;		/* Sequence number of the mft record. */
+	atomic_t count;		/* Inode reference count for book keeping. */
+	ntfs_volume *vol;	/* Pointer to the ntfs volume of this inode. */
+	/*
+	 * If NInoAttr() is true, the below fields describe the attribute which
+	 * this fake inode belongs to. The actual inode of this attribute is
+	 * pointed to by base_ntfs_ino and nr_extents is always set to -1 (see
+	 * below). For real inodes, we also set the type (AT_DATA for files and
+	 * AT_INDEX_ALLOCATION for directories), with the name = NULL and
+	 * name_len = 0 for files and name = I30 (global constant) and
+	 * name_len = 4 for directories.
+	 */
+	ATTR_TYPE type;	/* Attribute type of this fake inode. */
+	ntfschar *name;		/* Attribute name of this fake inode. */
+	u32 name_len;		/* Attribute name length of this fake inode. */
+	runlist runlist;	/* If state has the NI_NonResident bit set,
+				   the runlist of the unnamed data attribute
+				   (if a file) or of the index allocation
+				   attribute (directory) or of the attribute
+				   described by the fake inode (if NInoAttr()).
+				   If runlist.rl is NULL, the runlist has not
+				   been read in yet or has been unmapped. If
+				   NI_NonResident is clear, the attribute is
+				   resident (file and fake inode) or there is
+				   no $I30 index allocation attribute
+				   (small directory). In the latter case
+				   runlist.rl is always NULL.*/
+	/*
+	 * The following fields are only valid for real inodes and extent
+	 * inodes.
+	 */
+	struct semaphore mrec_lock; /* Lock for serializing access to the
+				   mft record belonging to this inode. */
+	struct page *page;	/* The page containing the mft record of the
+				   inode. This should only be touched by the
+				   (un)map_mft_record*() functions. */
+	int page_ofs;		/* Offset into the page at which the mft record
+				   begins. This should only be touched by the
+				   (un)map_mft_record*() functions. */
+	/*
+	 * Attribute list support (only for use by the attribute lookup
+	 * functions). Setup during read_inode for all inodes with attribute
+	 * lists. Only valid if NI_AttrList is set in state, and attr_list_rl is
+	 * further only valid if NI_AttrListNonResident is set.
+	 */
+	u32 attr_list_size;	/* Length of attribute list value in bytes. */
+	u8 *attr_list;		/* Attribute list value itself. */
+	runlist attr_list_rl;	/* Run list for the attribute list value. */
+	union {
+		struct { /* It is a directory, $MFT, or an index inode. */
+			struct inode *bmp_ino;	/* Attribute inode for the
+						   index $BITMAP. */
+			u32 block_size;		/* Size of an index block. */
+			u32 vcn_size;		/* Size of a vcn in this
+						   index. */
+			COLLATION_RULE collation_rule; /* The collation rule
+						   for the index. */
+			u8 block_size_bits; 	/* Log2 of the above. */
+			u8 vcn_size_bits;	/* Log2 of the above. */
+		} index;
+		struct { /* It is a compressed file or an attribute inode. */
+			s64 size;		/* Copy of compressed_size from
+						   $DATA. */
+			u32 block_size;		/* Size of a compression block
+						   (cb). */
+			u8 block_size_bits;	/* Log2 of the size of a cb. */
+			u8 block_clusters;	/* Number of clusters per cb. */
+		} compressed;
+	} itype;
+	struct semaphore extent_lock;	/* Lock for accessing/modifying the
+					   below . */
+	s32 nr_extents;	/* For a base mft record, the number of attached extent
+			   inodes (0 if none), for extent records and for fake
+			   inodes describing an attribute this is -1. */
+	union {		/* This union is only used if nr_extents != 0. */
+		ntfs_inode **extent_ntfs_inos;	/* For nr_extents > 0, array of
+						   the ntfs inodes of the extent
+						   mft records belonging to
+						   this base inode which have
+						   been loaded. */
+		ntfs_inode *base_ntfs_ino;	/* For nr_extents == -1, the
+						   ntfs inode of the base mft
+						   record. For fake inodes, the
+						   real (base) inode to which
+						   the attribute belongs. */
+	} ext;
+};
+
+/*
+ * Defined bits for the state field in the ntfs_inode structure.
+ * (f) = files only, (d) = directories only, (a) = attributes/fake inodes only
+ */
+typedef enum {
+	NI_Dirty,		/* 1: Mft record needs to be written to disk. */
+	NI_AttrList,		/* 1: Mft record contains an attribute list. */
+	NI_AttrListNonResident,	/* 1: Attribute list is non-resident. Implies
+				      NI_AttrList is set. */
+
+	NI_Attr,		/* 1: Fake inode for attribute i/o.
+				   0: Real inode or extent inode. */
+
+	NI_MstProtected,	/* 1: Attribute is protected by MST fixups.
+				   0: Attribute is not protected by fixups. */
+	NI_NonResident,		/* 1: Unnamed data attr is non-resident (f).
+				   1: Attribute is non-resident (a). */
+	NI_IndexAllocPresent = NI_NonResident,	/* 1: $I30 index alloc attr is
+						   present (d). */
+	NI_Compressed,		/* 1: Unnamed data attr is compressed (f).
+				   1: Create compressed files by default (d).
+				   1: Attribute is compressed (a). */
+	NI_Encrypted,		/* 1: Unnamed data attr is encrypted (f).
+				   1: Create encrypted files by default (d).
+				   1: Attribute is encrypted (a). */
+	NI_Sparse,		/* 1: Unnamed data attr is sparse (f).
+				   1: Create sparse files by default (d).
+				   1: Attribute is sparse (a). */
+	NI_TruncateFailed,	/* 1: Last ntfs_truncate() call failed. */
+} ntfs_inode_state_bits;
+
+/*
+ * NOTE: We should be adding dirty mft records to a list somewhere and they
+ * should be independent of the (ntfs/vfs) inode structure so that an inode can
+ * be removed but the record can be left dirty for syncing later.
+ */
+
+/*
+ * Macro tricks to expand the NInoFoo(), NInoSetFoo(), and NInoClearFoo()
+ * functions.
+ */
+#define NINO_FNS(flag)					\
+static inline int NIno##flag(ntfs_inode *ni)		\
+{							\
+	return test_bit(NI_##flag, &(ni)->state);	\
+}							\
+static inline void NInoSet##flag(ntfs_inode *ni)	\
+{							\
+	set_bit(NI_##flag, &(ni)->state);		\
+}							\
+static inline void NInoClear##flag(ntfs_inode *ni)	\
+{							\
+	clear_bit(NI_##flag, &(ni)->state);		\
+}
+
+/*
+ * As above for NInoTestSetFoo() and NInoTestClearFoo().
+ */
+#define TAS_NINO_FNS(flag)					\
+static inline int NInoTestSet##flag(ntfs_inode *ni)		\
+{								\
+	return test_and_set_bit(NI_##flag, &(ni)->state);	\
+}								\
+static inline int NInoTestClear##flag(ntfs_inode *ni)		\
+{								\
+	return test_and_clear_bit(NI_##flag, &(ni)->state);	\
+}
+
+/* Emit the ntfs inode bitops functions. */
+NINO_FNS(Dirty)
+TAS_NINO_FNS(Dirty)
+NINO_FNS(AttrList)
+NINO_FNS(AttrListNonResident)
+NINO_FNS(Attr)
+NINO_FNS(MstProtected)
+NINO_FNS(NonResident)
+NINO_FNS(IndexAllocPresent)
+NINO_FNS(Compressed)
+NINO_FNS(Encrypted)
+NINO_FNS(Sparse)
+NINO_FNS(TruncateFailed)
+
+/*
+ * The full structure containing a ntfs_inode and a vfs struct inode. Used for
+ * all real and fake inodes but not for extent inodes which lack the vfs struct
+ * inode.
+ */
+typedef struct {
+	ntfs_inode ntfs_inode;
+	struct inode vfs_inode;		/* The vfs inode structure. */
+} big_ntfs_inode;
+
+/**
+ * NTFS_I - return the ntfs inode given a vfs inode
+ * @inode:	VFS inode
+ *
+ * NTFS_I() returns the ntfs inode associated with the VFS @inode.
+ */
+static inline ntfs_inode *NTFS_I(struct inode *inode)
+{
+	return (ntfs_inode *)list_entry(inode, big_ntfs_inode, vfs_inode);
+}
+
+static inline struct inode *VFS_I(ntfs_inode *ni)
+{
+	return &((big_ntfs_inode *)ni)->vfs_inode;
+}
+
+/**
+ * ntfs_attr - ntfs in memory attribute structure
+ * @mft_no:	mft record number of the base mft record of this attribute
+ * @name:	Unicode name of the attribute (NULL if unnamed)
+ * @name_len:	length of @name in Unicode characters (0 if unnamed)
+ * @type:	attribute type (see layout.h)
+ *
+ * This structure exists only to provide a small structure for the
+ * ntfs_{attr_}iget()/ntfs_test_inode()/ntfs_init_locked_inode() mechanism.
+ *
+ * NOTE: Elements are ordered by size to make the structure as compact as
+ * possible on all architectures.
+ */
+typedef struct {
+	unsigned long mft_no;
+	ntfschar *name;
+	u32 name_len;
+	ATTR_TYPE type;
+} ntfs_attr;
+
+typedef int (*test_t)(struct inode *, void *);
+
+extern int ntfs_test_inode(struct inode *vi, ntfs_attr *na);
+
+extern struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no);
+extern struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
+		ntfschar *name, u32 name_len);
+extern struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
+		u32 name_len);
+
+extern struct inode *ntfs_alloc_big_inode(struct super_block *sb);
+extern void ntfs_destroy_big_inode(struct inode *inode);
+extern void ntfs_clear_big_inode(struct inode *vi);
+
+extern void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni);
+
+static inline void ntfs_init_big_inode(struct inode *vi)
+{
+	ntfs_inode *ni = NTFS_I(vi);
+
+	ntfs_debug("Entering.");
+	__ntfs_init_inode(vi->i_sb, ni);
+	ni->mft_no = vi->i_ino;
+}
+
+extern ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
+		unsigned long mft_no);
+extern void ntfs_clear_extent_inode(ntfs_inode *ni);
+
+extern int ntfs_read_inode_mount(struct inode *vi);
+
+extern void ntfs_put_inode(struct inode *vi);
+
+extern int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt);
+
+#ifdef NTFS_RW
+
+extern int ntfs_truncate(struct inode *vi);
+extern void ntfs_truncate_vfs(struct inode *vi);
+
+extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr);
+
+extern int ntfs_write_inode(struct inode *vi, int sync);
+
+static inline void ntfs_commit_inode(struct inode *vi)
+{
+	if (!is_bad_inode(vi))
+		ntfs_write_inode(vi, 1);
+	return;
+}
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_INODE_H */
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
new file mode 100644
index 0000000..47b3389
--- /dev/null
+++ b/fs/ntfs/layout.h
@@ -0,0 +1,2413 @@
+/*
+ * layout.h - All NTFS associated on-disk structures. Part of the Linux-NTFS
+ *	      project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_LAYOUT_H
+#define _LINUX_NTFS_LAYOUT_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <asm/byteorder.h>
+
+#include "types.h"
+
+/*
+ * Constant endianness conversion defines.
+ */
+#define const_le16_to_cpu(x)	__constant_le16_to_cpu(x)
+#define const_le32_to_cpu(x)	__constant_le32_to_cpu(x)
+#define const_le64_to_cpu(x)	__constant_le64_to_cpu(x)
+
+#define const_cpu_to_le16(x)	__constant_cpu_to_le16(x)
+#define const_cpu_to_le32(x)	__constant_cpu_to_le32(x)
+#define const_cpu_to_le64(x)	__constant_cpu_to_le64(x)
+
+/* The NTFS oem_id "NTFS    " */
+#define magicNTFS	const_cpu_to_le64(0x202020205346544eULL)
+
+/*
+ * Location of bootsector on partition:
+ *	The standard NTFS_BOOT_SECTOR is on sector 0 of the partition.
+ *	On NT4 and above there is one backup copy of the boot sector to
+ *	be found on the last sector of the partition (not normally accessible
+ *	from within Windows as the bootsector contained number of sectors
+ *	value is one less than the actual value!).
+ *	On versions of NT 3.51 and earlier, the backup copy was located at
+ *	number of sectors/2 (integer divide), i.e. in the middle of the volume.
+ */
+
+/*
+ * BIOS parameter block (bpb) structure.
+ */
+typedef struct {
+	le16 bytes_per_sector;		/* Size of a sector in bytes. */
+	u8  sectors_per_cluster;	/* Size of a cluster in sectors. */
+	le16 reserved_sectors;		/* zero */
+	u8  fats;			/* zero */
+	le16 root_entries;		/* zero */
+	le16 sectors;			/* zero */
+	u8  media_type;			/* 0xf8 = hard disk */
+	le16 sectors_per_fat;		/* zero */
+	le16 sectors_per_track;		/* irrelevant */
+	le16 heads;			/* irrelevant */
+	le32 hidden_sectors;		/* zero */
+	le32 large_sectors;		/* zero */
+} __attribute__ ((__packed__)) BIOS_PARAMETER_BLOCK;
+
+/*
+ * NTFS boot sector structure.
+ */
+typedef struct {
+	u8  jump[3];			/* Irrelevant (jump to boot up code).*/
+	le64 oem_id;			/* Magic "NTFS    ". */
+	BIOS_PARAMETER_BLOCK bpb;	/* See BIOS_PARAMETER_BLOCK. */
+	u8  unused[4];			/* zero, NTFS diskedit.exe states that
+					   this is actually:
+						__u8 physical_drive;	// 0x80
+						__u8 current_head;	// zero
+						__u8 extended_boot_signature;
+									// 0x80
+						__u8 unused;		// zero
+					 */
+/*0x28*/sle64 number_of_sectors;	/* Number of sectors in volume. Gives
+					   maximum volume size of 2^63 sectors.
+					   Assuming standard sector size of 512
+					   bytes, the maximum byte size is
+					   approx. 4.7x10^21 bytes. (-; */
+	sle64 mft_lcn;			/* Cluster location of mft data. */
+	sle64 mftmirr_lcn;		/* Cluster location of copy of mft. */
+	s8  clusters_per_mft_record;	/* Mft record size in clusters. */
+	u8  reserved0[3];		/* zero */
+	s8  clusters_per_index_record;	/* Index block size in clusters. */
+	u8  reserved1[3];		/* zero */
+	le64 volume_serial_number;	/* Irrelevant (serial number). */
+	le32 checksum;			/* Boot sector checksum. */
+/*0x54*/u8  bootstrap[426];		/* Irrelevant (boot up code). */
+	le16 end_of_sector_marker;	/* End of bootsector magic. Always is
+					   0xaa55 in little endian. */
+/* sizeof() = 512 (0x200) bytes */
+} __attribute__ ((__packed__)) NTFS_BOOT_SECTOR;
+
+/*
+ * Magic identifiers present at the beginning of all ntfs record containing
+ * records (like mft records for example).
+ */
+enum {
+	/* Found in $MFT/$DATA. */
+	magic_FILE = const_cpu_to_le32(0x454c4946), /* Mft entry. */
+	magic_INDX = const_cpu_to_le32(0x58444e49), /* Index buffer. */
+	magic_HOLE = const_cpu_to_le32(0x454c4f48), /* ? (NTFS 3.0+?) */
+
+	/* Found in $LogFile/$DATA. */
+	magic_RSTR = const_cpu_to_le32(0x52545352), /* Restart page. */
+	magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */
+
+	/* Found in $LogFile/$DATA.  (May be found in $MFT/$DATA, also?) */
+	magic_CHKD = const_cpu_to_le32(0x424b4843), /* Modified by chkdsk. */
+
+	/* Found in all ntfs record containing records. */
+	magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector
+						       transfer was detected. */
+	/*
+	 * Found in $LogFile/$DATA when a page is full of 0xff bytes and is
+	 * thus not initialized.  Page must be initialized before using it.
+	 */
+	magic_empty = const_cpu_to_le32(0xffffffff) /* Record is empty. */
+};
+
+typedef le32 NTFS_RECORD_TYPE;
+
+/*
+ * Generic magic comparison macros. Finally found a use for the ## preprocessor
+ * operator! (-8
+ */
+
+static inline BOOL __ntfs_is_magic(le32 x, NTFS_RECORD_TYPE r)
+{
+	return (x == r);
+}
+#define ntfs_is_magic(x, m)	__ntfs_is_magic(x, magic_##m)
+
+static inline BOOL __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r)
+{
+	return (*p == r);
+}
+#define ntfs_is_magicp(p, m)	__ntfs_is_magicp(p, magic_##m)
+
+/*
+ * Specialised magic comparison macros for the NTFS_RECORD_TYPEs defined above.
+ */
+#define ntfs_is_file_record(x)		( ntfs_is_magic (x, FILE) )
+#define ntfs_is_file_recordp(p)		( ntfs_is_magicp(p, FILE) )
+#define ntfs_is_mft_record(x)		( ntfs_is_file_record (x) )
+#define ntfs_is_mft_recordp(p)		( ntfs_is_file_recordp(p) )
+#define ntfs_is_indx_record(x)		( ntfs_is_magic (x, INDX) )
+#define ntfs_is_indx_recordp(p)		( ntfs_is_magicp(p, INDX) )
+#define ntfs_is_hole_record(x)		( ntfs_is_magic (x, HOLE) )
+#define ntfs_is_hole_recordp(p)		( ntfs_is_magicp(p, HOLE) )
+
+#define ntfs_is_rstr_record(x)		( ntfs_is_magic (x, RSTR) )
+#define ntfs_is_rstr_recordp(p)		( ntfs_is_magicp(p, RSTR) )
+#define ntfs_is_rcrd_record(x)		( ntfs_is_magic (x, RCRD) )
+#define ntfs_is_rcrd_recordp(p)		( ntfs_is_magicp(p, RCRD) )
+
+#define ntfs_is_chkd_record(x)		( ntfs_is_magic (x, CHKD) )
+#define ntfs_is_chkd_recordp(p)		( ntfs_is_magicp(p, CHKD) )
+
+#define ntfs_is_baad_record(x)		( ntfs_is_magic (x, BAAD) )
+#define ntfs_is_baad_recordp(p)		( ntfs_is_magicp(p, BAAD) )
+
+#define ntfs_is_empty_record(x)		( ntfs_is_magic (x, empty) )
+#define ntfs_is_empty_recordp(p)	( ntfs_is_magicp(p, empty) )
+
+/*
+ * The Update Sequence Array (usa) is an array of the le16 values which belong
+ * to the end of each sector protected by the update sequence record in which
+ * this array is contained. Note that the first entry is the Update Sequence
+ * Number (usn), a cyclic counter of how many times the protected record has
+ * been written to disk. The values 0 and -1 (ie. 0xffff) are not used. All
+ * last le16's of each sector have to be equal to the usn (during reading) or
+ * are set to it (during writing). If they are not, an incomplete multi sector
+ * transfer has occurred when the data was written.
+ * The maximum size for the update sequence array is fixed to:
+ *	maximum size = usa_ofs + (usa_count * 2) = 510 bytes
+ * The 510 bytes comes from the fact that the last le16 in the array has to
+ * (obviously) finish before the last le16 of the first 512-byte sector.
+ * This formula can be used as a consistency check in that usa_ofs +
+ * (usa_count * 2) has to be less than or equal to 510.
+ */
+typedef struct {
+	NTFS_RECORD_TYPE magic;	/* A four-byte magic identifying the record
+				   type and/or status. */
+	le16 usa_ofs;		/* Offset to the Update Sequence Array (usa)
+				   from the start of the ntfs record. */
+	le16 usa_count;		/* Number of le16 sized entries in the usa
+				   including the Update Sequence Number (usn),
+				   thus the number of fixups is the usa_count
+				   minus 1. */
+} __attribute__ ((__packed__)) NTFS_RECORD;
+
+/*
+ * System files mft record numbers. All these files are always marked as used
+ * in the bitmap attribute of the mft; presumably in order to avoid accidental
+ * allocation for random other mft records. Also, the sequence number for each
+ * of the system files is always equal to their mft record number and it is
+ * never modified.
+ */
+typedef enum {
+	FILE_MFT       = 0,	/* Master file table (mft). Data attribute
+				   contains the entries and bitmap attribute
+				   records which ones are in use (bit==1). */
+	FILE_MFTMirr   = 1,	/* Mft mirror: copy of first four mft records
+				   in data attribute. If cluster size > 4kiB,
+				   copy of first N mft records, with
+					N = cluster_size / mft_record_size. */
+	FILE_LogFile   = 2,	/* Journalling log in data attribute. */
+	FILE_Volume    = 3,	/* Volume name attribute and volume information
+				   attribute (flags and ntfs version). Windows
+				   refers to this file as volume DASD (Direct
+				   Access Storage Device). */
+	FILE_AttrDef   = 4,	/* Array of attribute definitions in data
+				   attribute. */
+	FILE_root      = 5,	/* Root directory. */
+	FILE_Bitmap    = 6,	/* Allocation bitmap of all clusters (lcns) in
+				   data attribute. */
+	FILE_Boot      = 7,	/* Boot sector (always at cluster 0) in data
+				   attribute. */
+	FILE_BadClus   = 8,	/* Contains all bad clusters in the non-resident
+				   data attribute. */
+	FILE_Secure    = 9,	/* Shared security descriptors in data attribute
+				   and two indexes into the descriptors.
+				   Appeared in Windows 2000. Before that, this
+				   file was named $Quota but was unused. */
+	FILE_UpCase    = 10,	/* Uppercase equivalents of all 65536 Unicode
+				   characters in data attribute. */
+	FILE_Extend    = 11,	/* Directory containing other system files (eg.
+				   $ObjId, $Quota, $Reparse and $UsnJrnl). This
+				   is new to NTFS3.0. */
+	FILE_reserved12 = 12,	/* Reserved for future use (records 12-15). */
+	FILE_reserved13 = 13,
+	FILE_reserved14 = 14,
+	FILE_reserved15 = 15,
+	FILE_first_user = 16,	/* First user file, used as test limit for
+				   whether to allow opening a file or not. */
+} NTFS_SYSTEM_FILES;
+
+/*
+ * These are the so far known MFT_RECORD_* flags (16-bit) which contain
+ * information about the mft record in which they are present.
+ */
+enum {
+	MFT_RECORD_IN_USE	= const_cpu_to_le16(0x0001),
+	MFT_RECORD_IS_DIRECTORY = const_cpu_to_le16(0x0002),
+} __attribute__ ((__packed__));
+
+typedef le16 MFT_RECORD_FLAGS;
+
+/*
+ * mft references (aka file references or file record segment references) are
+ * used whenever a structure needs to refer to a record in the mft.
+ *
+ * A reference consists of a 48-bit index into the mft and a 16-bit sequence
+ * number used to detect stale references.
+ *
+ * For error reporting purposes we treat the 48-bit index as a signed quantity.
+ *
+ * The sequence number is a circular counter (skipping 0) describing how many
+ * times the referenced mft record has been (re)used. This has to match the
+ * sequence number of the mft record being referenced, otherwise the reference
+ * is considered stale and removed (FIXME: only ntfsck or the driver itself?).
+ *
+ * If the sequence number is zero it is assumed that no sequence number
+ * consistency checking should be performed.
+ *
+ * FIXME: Since inodes are 32-bit as of now, the driver needs to always check
+ * for high_part being 0 and if not either BUG(), cause a panic() or handle
+ * the situation in some other way. This shouldn't be a problem as a volume has
+ * to become HUGE in order to need more than 32-bits worth of mft records.
+ * Assuming the standard mft record size of 1kb only the records (never mind
+ * the non-resident attributes, etc.) would require 4Tb of space on their own
+ * for the first 32 bits worth of records. This is only if some strange person
+ * doesn't decide to foul play and make the mft sparse which would be a really
+ * horrible thing to do as it would trash our current driver implementation. )-:
+ * Do I hear screams "we want 64-bit inodes!" ?!? (-;
+ *
+ * FIXME: The mft zone is defined as the first 12% of the volume. This space is
+ * reserved so that the mft can grow contiguously and hence doesn't become
+ * fragmented. Volume free space includes the empty part of the mft zone and
+ * when the volume's free 88% are used up, the mft zone is shrunk by a factor
+ * of 2, thus making more space available for more files/data. This process is
+ * repeated everytime there is no more free space except for the mft zone until
+ * there really is no more free space.
+ */
+
+/*
+ * Typedef the MFT_REF as a 64-bit value for easier handling.
+ * Also define two unpacking macros to get to the reference (MREF) and
+ * sequence number (MSEQNO) respectively.
+ * The _LE versions are to be applied on little endian MFT_REFs.
+ * Note: The _LE versions will return a CPU endian formatted value!
+ */
+typedef enum {
+	MFT_REF_MASK_CPU	= 0x0000ffffffffffffULL,
+	MFT_REF_MASK_LE		= const_cpu_to_le64(0x0000ffffffffffffULL),
+} MFT_REF_CONSTS;
+
+typedef u64 MFT_REF;
+typedef le64 leMFT_REF;
+
+#define MK_MREF(m, s)	((MFT_REF)(((MFT_REF)(s) << 48) |		\
+					((MFT_REF)(m) & MFT_REF_MASK_CPU)))
+#define MK_LE_MREF(m, s) cpu_to_le64(MK_MREF(m, s))
+
+#define MREF(x)		((unsigned long)((x) & MFT_REF_MASK_CPU))
+#define MSEQNO(x)	((u16)(((x) >> 48) & 0xffff))
+#define MREF_LE(x)	((unsigned long)(le64_to_cpu(x) & MFT_REF_MASK_CPU))
+#define MSEQNO_LE(x)	((u16)((le64_to_cpu(x) >> 48) & 0xffff))
+
+#define IS_ERR_MREF(x)	(((x) & 0x0000800000000000ULL) ? 1 : 0)
+#define ERR_MREF(x)	((u64)((s64)(x)))
+#define MREF_ERR(x)	((int)((s64)(x)))
+
+/*
+ * The mft record header present at the beginning of every record in the mft.
+ * This is followed by a sequence of variable length attribute records which
+ * is terminated by an attribute of type AT_END which is a truncated attribute
+ * in that it only consists of the attribute type code AT_END and none of the
+ * other members of the attribute structure are present.
+ */
+typedef struct {
+/*Ofs*/
+/*  0	NTFS_RECORD; -- Unfolded here as gcc doesn't like unnamed structs. */
+	NTFS_RECORD_TYPE magic;	/* Usually the magic is "FILE". */
+	le16 usa_ofs;		/* See NTFS_RECORD definition above. */
+	le16 usa_count;		/* See NTFS_RECORD definition above. */
+
+/*  8*/	le64 lsn;		/* $LogFile sequence number for this record.
+				   Changed every time the record is modified. */
+/* 16*/	le16 sequence_number;	/* Number of times this mft record has been
+				   reused. (See description for MFT_REF
+				   above.) NOTE: The increment (skipping zero)
+				   is done when the file is deleted. NOTE: If
+				   this is zero it is left zero. */
+/* 18*/	le16 link_count;	/* Number of hard links, i.e. the number of
+				   directory entries referencing this record.
+				   NOTE: Only used in mft base records.
+				   NOTE: When deleting a directory entry we
+				   check the link_count and if it is 1 we
+				   delete the file. Otherwise we delete the
+				   FILE_NAME_ATTR being referenced by the
+				   directory entry from the mft record and
+				   decrement the link_count.
+				   FIXME: Careful with Win32 + DOS names! */
+/* 20*/	le16 attrs_offset;	/* Byte offset to the first attribute in this
+				   mft record from the start of the mft record.
+				   NOTE: Must be aligned to 8-byte boundary. */
+/* 22*/	MFT_RECORD_FLAGS flags;	/* Bit array of MFT_RECORD_FLAGS. When a file
+				   is deleted, the MFT_RECORD_IN_USE flag is
+				   set to zero. */
+/* 24*/	le32 bytes_in_use;	/* Number of bytes used in this mft record.
+				   NOTE: Must be aligned to 8-byte boundary. */
+/* 28*/	le32 bytes_allocated;	/* Number of bytes allocated for this mft
+				   record. This should be equal to the mft
+				   record size. */
+/* 32*/	leMFT_REF base_mft_record;/* This is zero for base mft records.
+				   When it is not zero it is a mft reference
+				   pointing to the base mft record to which
+				   this record belongs (this is then used to
+				   locate the attribute list attribute present
+				   in the base record which describes this
+				   extension record and hence might need
+				   modification when the extension record
+				   itself is modified, also locating the
+				   attribute list also means finding the other
+				   potential extents, belonging to the non-base
+				   mft record). */
+/* 40*/	le16 next_attr_instance;/* The instance number that will be assigned to
+				   the next attribute added to this mft record.
+				   NOTE: Incremented each time after it is used.
+				   NOTE: Every time the mft record is reused
+				   this number is set to zero.  NOTE: The first
+				   instance number is always 0. */
+/* The below fields are specific to NTFS 3.1+ (Windows XP and above): */
+/* 42*/ le16 reserved;		/* Reserved/alignment. */
+/* 44*/ le32 mft_record_number;	/* Number of this mft record. */
+/* sizeof() = 48 bytes */
+/*
+ * When (re)using the mft record, we place the update sequence array at this
+ * offset, i.e. before we start with the attributes.  This also makes sense,
+ * otherwise we could run into problems with the update sequence array
+ * containing in itself the last two bytes of a sector which would mean that
+ * multi sector transfer protection wouldn't work.  As you can't protect data
+ * by overwriting it since you then can't get it back...
+ * When reading we obviously use the data from the ntfs record header.
+ */
+} __attribute__ ((__packed__)) MFT_RECORD;
+
+/* This is the version without the NTFS 3.1+ specific fields. */
+typedef struct {
+/*Ofs*/
+/*  0	NTFS_RECORD; -- Unfolded here as gcc doesn't like unnamed structs. */
+	NTFS_RECORD_TYPE magic;	/* Usually the magic is "FILE". */
+	le16 usa_ofs;		/* See NTFS_RECORD definition above. */
+	le16 usa_count;		/* See NTFS_RECORD definition above. */
+
+/*  8*/	le64 lsn;		/* $LogFile sequence number for this record.
+				   Changed every time the record is modified. */
+/* 16*/	le16 sequence_number;	/* Number of times this mft record has been
+				   reused. (See description for MFT_REF
+				   above.) NOTE: The increment (skipping zero)
+				   is done when the file is deleted. NOTE: If
+				   this is zero it is left zero. */
+/* 18*/	le16 link_count;	/* Number of hard links, i.e. the number of
+				   directory entries referencing this record.
+				   NOTE: Only used in mft base records.
+				   NOTE: When deleting a directory entry we
+				   check the link_count and if it is 1 we
+				   delete the file. Otherwise we delete the
+				   FILE_NAME_ATTR being referenced by the
+				   directory entry from the mft record and
+				   decrement the link_count.
+				   FIXME: Careful with Win32 + DOS names! */
+/* 20*/	le16 attrs_offset;	/* Byte offset to the first attribute in this
+				   mft record from the start of the mft record.
+				   NOTE: Must be aligned to 8-byte boundary. */
+/* 22*/	MFT_RECORD_FLAGS flags;	/* Bit array of MFT_RECORD_FLAGS. When a file
+				   is deleted, the MFT_RECORD_IN_USE flag is
+				   set to zero. */
+/* 24*/	le32 bytes_in_use;	/* Number of bytes used in this mft record.
+				   NOTE: Must be aligned to 8-byte boundary. */
+/* 28*/	le32 bytes_allocated;	/* Number of bytes allocated for this mft
+				   record. This should be equal to the mft
+				   record size. */
+/* 32*/	leMFT_REF base_mft_record;/* This is zero for base mft records.
+				   When it is not zero it is a mft reference
+				   pointing to the base mft record to which
+				   this record belongs (this is then used to
+				   locate the attribute list attribute present
+				   in the base record which describes this
+				   extension record and hence might need
+				   modification when the extension record
+				   itself is modified, also locating the
+				   attribute list also means finding the other
+				   potential extents, belonging to the non-base
+				   mft record). */
+/* 40*/	le16 next_attr_instance;/* The instance number that will be assigned to
+				   the next attribute added to this mft record.
+				   NOTE: Incremented each time after it is used.
+				   NOTE: Every time the mft record is reused
+				   this number is set to zero.  NOTE: The first
+				   instance number is always 0. */
+/* sizeof() = 42 bytes */
+/*
+ * When (re)using the mft record, we place the update sequence array at this
+ * offset, i.e. before we start with the attributes.  This also makes sense,
+ * otherwise we could run into problems with the update sequence array
+ * containing in itself the last two bytes of a sector which would mean that
+ * multi sector transfer protection wouldn't work.  As you can't protect data
+ * by overwriting it since you then can't get it back...
+ * When reading we obviously use the data from the ntfs record header.
+ */
+} __attribute__ ((__packed__)) MFT_RECORD_OLD;
+
+/*
+ * System defined attributes (32-bit).  Each attribute type has a corresponding
+ * attribute name (Unicode string of maximum 64 character length) as described
+ * by the attribute definitions present in the data attribute of the $AttrDef
+ * system file.  On NTFS 3.0 volumes the names are just as the types are named
+ * in the below defines exchanging AT_ for the dollar sign ($).  If that is not
+ * a revealing choice of symbol I do not know what is... (-;
+ */
+enum {
+	AT_UNUSED			= const_cpu_to_le32(         0),
+	AT_STANDARD_INFORMATION		= const_cpu_to_le32(      0x10),
+	AT_ATTRIBUTE_LIST		= const_cpu_to_le32(      0x20),
+	AT_FILE_NAME			= const_cpu_to_le32(      0x30),
+	AT_OBJECT_ID			= const_cpu_to_le32(      0x40),
+	AT_SECURITY_DESCRIPTOR		= const_cpu_to_le32(      0x50),
+	AT_VOLUME_NAME			= const_cpu_to_le32(      0x60),
+	AT_VOLUME_INFORMATION		= const_cpu_to_le32(      0x70),
+	AT_DATA				= const_cpu_to_le32(      0x80),
+	AT_INDEX_ROOT			= const_cpu_to_le32(      0x90),
+	AT_INDEX_ALLOCATION		= const_cpu_to_le32(      0xa0),
+	AT_BITMAP			= const_cpu_to_le32(      0xb0),
+	AT_REPARSE_POINT		= const_cpu_to_le32(      0xc0),
+	AT_EA_INFORMATION		= const_cpu_to_le32(      0xd0),
+	AT_EA				= const_cpu_to_le32(      0xe0),
+	AT_PROPERTY_SET			= const_cpu_to_le32(      0xf0),
+	AT_LOGGED_UTILITY_STREAM	= const_cpu_to_le32(     0x100),
+	AT_FIRST_USER_DEFINED_ATTRIBUTE	= const_cpu_to_le32(    0x1000),
+	AT_END				= const_cpu_to_le32(0xffffffff)
+};
+
+typedef le32 ATTR_TYPE;
+
+/*
+ * The collation rules for sorting views/indexes/etc (32-bit).
+ *
+ * COLLATION_BINARY - Collate by binary compare where the first byte is most
+ *	significant.
+ * COLLATION_UNICODE_STRING - Collate Unicode strings by comparing their binary
+ *	Unicode values, except that when a character can be uppercased, the
+ *	upper case value collates before the lower case one.
+ * COLLATION_FILE_NAME - Collate file names as Unicode strings. The collation
+ *	is done very much like COLLATION_UNICODE_STRING. In fact I have no idea
+ *	what the difference is. Perhaps the difference is that file names
+ *	would treat some special characters in an odd way (see
+ *	unistr.c::ntfs_collate_names() and unistr.c::legal_ansi_char_array[]
+ *	for what I mean but COLLATION_UNICODE_STRING would not give any special
+ *	treatment to any characters at all, but this is speculation.
+ * COLLATION_NTOFS_ULONG - Sorting is done according to ascending le32 key
+ *	values. E.g. used for $SII index in FILE_Secure, which sorts by
+ *	security_id (le32).
+ * COLLATION_NTOFS_SID - Sorting is done according to ascending SID values.
+ *	E.g. used for $O index in FILE_Extend/$Quota.
+ * COLLATION_NTOFS_SECURITY_HASH - Sorting is done first by ascending hash
+ *	values and second by ascending security_id values. E.g. used for $SDH
+ *	index in FILE_Secure.
+ * COLLATION_NTOFS_ULONGS - Sorting is done according to a sequence of ascending
+ *	le32 key values. E.g. used for $O index in FILE_Extend/$ObjId, which
+ *	sorts by object_id (16-byte), by splitting up the object_id in four
+ *	le32 values and using them as individual keys. E.g. take the following
+ *	two security_ids, stored as follows on disk:
+ *		1st: a1 61 65 b7 65 7b d4 11 9e 3d 00 e0 81 10 42 59
+ *		2nd: 38 14 37 d2 d2 f3 d4 11 a5 21 c8 6b 79 b1 97 45
+ *	To compare them, they are split into four le32 values each, like so:
+ *		1st: 0xb76561a1 0x11d47b65 0xe0003d9e 0x59421081
+ *		2nd: 0xd2371438 0x11d4f3d2 0x6bc821a5 0x4597b179
+ *	Now, it is apparent why the 2nd object_id collates after the 1st: the
+ *	first le32 value of the 1st object_id is less than the first le32 of
+ *	the 2nd object_id. If the first le32 values of both object_ids were
+ *	equal then the second le32 values would be compared, etc.
+ */
+enum {
+	COLLATION_BINARY		= const_cpu_to_le32(0x00),
+	COLLATION_FILE_NAME		= const_cpu_to_le32(0x01),
+	COLLATION_UNICODE_STRING	= const_cpu_to_le32(0x02),
+	COLLATION_NTOFS_ULONG		= const_cpu_to_le32(0x10),
+	COLLATION_NTOFS_SID		= const_cpu_to_le32(0x11),
+	COLLATION_NTOFS_SECURITY_HASH	= const_cpu_to_le32(0x12),
+	COLLATION_NTOFS_ULONGS		= const_cpu_to_le32(0x13)
+};
+
+typedef le32 COLLATION_RULE;
+
+/*
+ * The flags (32-bit) describing attribute properties in the attribute
+ * definition structure.  FIXME: This information is from Regis's information
+ * and, according to him, it is not certain and probably incomplete.
+ * The INDEXABLE flag is fairly certainly correct as only the file name
+ * attribute has this flag set and this is the only attribute indexed in NT4.
+ */
+enum {
+	INDEXABLE	    = const_cpu_to_le32(0x02), /* Attribute can be
+							  indexed. */
+	NEED_TO_REGENERATE  = const_cpu_to_le32(0x40), /* Need to regenerate
+							  during regeneration
+							  phase. */
+	CAN_BE_NON_RESIDENT = const_cpu_to_le32(0x80), /* Attribute can be
+							  non-resident. */
+};
+
+typedef le32 ATTR_DEF_FLAGS;
+
+/*
+ * The data attribute of FILE_AttrDef contains a sequence of attribute
+ * definitions for the NTFS volume. With this, it is supposed to be safe for an
+ * older NTFS driver to mount a volume containing a newer NTFS version without
+ * damaging it (that's the theory. In practice it's: not damaging it too much).
+ * Entries are sorted by attribute type. The flags describe whether the
+ * attribute can be resident/non-resident and possibly other things, but the
+ * actual bits are unknown.
+ */
+typedef struct {
+/*hex ofs*/
+/*  0*/	ntfschar name[0x40];		/* Unicode name of the attribute. Zero
+					   terminated. */
+/* 80*/	ATTR_TYPE type;			/* Type of the attribute. */
+/* 84*/	le32 display_rule;		/* Default display rule.
+					   FIXME: What does it mean? (AIA) */
+/* 88*/ COLLATION_RULE collation_rule;	/* Default collation rule. */
+/* 8c*/	ATTR_DEF_FLAGS flags;		/* Flags describing the attribute. */
+/* 90*/	sle64 min_size;			/* Optional minimum attribute size. */
+/* 98*/	sle64 max_size;			/* Maximum size of attribute. */
+/* sizeof() = 0xa0 or 160 bytes */
+} __attribute__ ((__packed__)) ATTR_DEF;
+
+/*
+ * Attribute flags (16-bit).
+ */
+enum {
+	ATTR_IS_COMPRESSED    = const_cpu_to_le16(0x0001),
+	ATTR_COMPRESSION_MASK = const_cpu_to_le16(0x00ff), /* Compression method
+							      mask.  Also, first
+							      illegal value. */
+	ATTR_IS_ENCRYPTED     = const_cpu_to_le16(0x4000),
+	ATTR_IS_SPARSE	      = const_cpu_to_le16(0x8000),
+} __attribute__ ((__packed__));
+
+typedef le16 ATTR_FLAGS;
+
+/*
+ * Attribute compression.
+ *
+ * Only the data attribute is ever compressed in the current ntfs driver in
+ * Windows. Further, compression is only applied when the data attribute is
+ * non-resident. Finally, to use compression, the maximum allowed cluster size
+ * on a volume is 4kib.
+ *
+ * The compression method is based on independently compressing blocks of X
+ * clusters, where X is determined from the compression_unit value found in the
+ * non-resident attribute record header (more precisely: X = 2^compression_unit
+ * clusters). On Windows NT/2k, X always is 16 clusters (compression_unit = 4).
+ *
+ * There are three different cases of how a compression block of X clusters
+ * can be stored:
+ *
+ *   1) The data in the block is all zero (a sparse block):
+ *	  This is stored as a sparse block in the runlist, i.e. the runlist
+ *	  entry has length = X and lcn = -1. The mapping pairs array actually
+ *	  uses a delta_lcn value length of 0, i.e. delta_lcn is not present at
+ *	  all, which is then interpreted by the driver as lcn = -1.
+ *	  NOTE: Even uncompressed files can be sparse on NTFS 3.0 volumes, then
+ *	  the same principles apply as above, except that the length is not
+ *	  restricted to being any particular value.
+ *
+ *   2) The data in the block is not compressed:
+ *	  This happens when compression doesn't reduce the size of the block
+ *	  in clusters. I.e. if compression has a small effect so that the
+ *	  compressed data still occupies X clusters, then the uncompressed data
+ *	  is stored in the block.
+ *	  This case is recognised by the fact that the runlist entry has
+ *	  length = X and lcn >= 0. The mapping pairs array stores this as
+ *	  normal with a run length of X and some specific delta_lcn, i.e.
+ *	  delta_lcn has to be present.
+ *
+ *   3) The data in the block is compressed:
+ *	  The common case. This case is recognised by the fact that the run
+ *	  list entry has length L < X and lcn >= 0. The mapping pairs array
+ *	  stores this as normal with a run length of X and some specific
+ *	  delta_lcn, i.e. delta_lcn has to be present. This runlist entry is
+ *	  immediately followed by a sparse entry with length = X - L and
+ *	  lcn = -1. The latter entry is to make up the vcn counting to the
+ *	  full compression block size X.
+ *
+ * In fact, life is more complicated because adjacent entries of the same type
+ * can be coalesced. This means that one has to keep track of the number of
+ * clusters handled and work on a basis of X clusters at a time being one
+ * block. An example: if length L > X this means that this particular runlist
+ * entry contains a block of length X and part of one or more blocks of length
+ * L - X. Another example: if length L < X, this does not necessarily mean that
+ * the block is compressed as it might be that the lcn changes inside the block
+ * and hence the following runlist entry describes the continuation of the
+ * potentially compressed block. The block would be compressed if the
+ * following runlist entry describes at least X - L sparse clusters, thus
+ * making up the compression block length as described in point 3 above. (Of
+ * course, there can be several runlist entries with small lengths so that the
+ * sparse entry does not follow the first data containing entry with
+ * length < X.)
+ *
+ * NOTE: At the end of the compressed attribute value, there most likely is not
+ * just the right amount of data to make up a compression block, thus this data
+ * is not even attempted to be compressed. It is just stored as is, unless
+ * the number of clusters it occupies is reduced when compressed in which case
+ * it is stored as a compressed compression block, complete with sparse
+ * clusters at the end.
+ */
+
+/*
+ * Flags of resident attributes (8-bit).
+ */
+enum {
+	RESIDENT_ATTR_IS_INDEXED = 0x01, /* Attribute is referenced in an index
+					    (has implications for deleting and
+					    modifying the attribute). */
+} __attribute__ ((__packed__));
+
+typedef u8 RESIDENT_ATTR_FLAGS;
+
+/*
+ * Attribute record header. Always aligned to 8-byte boundary.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	ATTR_TYPE type;		/* The (32-bit) type of the attribute. */
+/*  4*/	le32 length;		/* Byte size of the resident part of the
+				   attribute (aligned to 8-byte boundary).
+				   Used to get to the next attribute. */
+/*  8*/	u8 non_resident;	/* If 0, attribute is resident.
+				   If 1, attribute is non-resident. */
+/*  9*/	u8 name_length;		/* Unicode character size of name of attribute.
+				   0 if unnamed. */
+/* 10*/	le16 name_offset;	/* If name_length != 0, the byte offset to the
+				   beginning of the name from the attribute
+				   record. Note that the name is stored as a
+				   Unicode string. When creating, place offset
+				   just at the end of the record header. Then,
+				   follow with attribute value or mapping pairs
+				   array, resident and non-resident attributes
+				   respectively, aligning to an 8-byte
+				   boundary. */
+/* 12*/	ATTR_FLAGS flags;	/* Flags describing the attribute. */
+/* 14*/	le16 instance;		/* The instance of this attribute record. This
+				   number is unique within this mft record (see
+				   MFT_RECORD/next_attribute_instance notes in
+				   in mft.h for more details). */
+/* 16*/	union {
+		/* Resident attributes. */
+		struct {
+/* 16 */		le32 value_length;/* Byte size of attribute value. */
+/* 20 */		le16 value_offset;/* Byte offset of the attribute
+					     value from the start of the
+					     attribute record. When creating,
+					     align to 8-byte boundary if we
+					     have a name present as this might
+					     not have a length of a multiple
+					     of 8-bytes. */
+/* 22 */		RESIDENT_ATTR_FLAGS flags; /* See above. */
+/* 23 */		s8 reserved;	  /* Reserved/alignment to 8-byte
+					     boundary. */
+		} __attribute__ ((__packed__)) resident;
+		/* Non-resident attributes. */
+		struct {
+/* 16*/			leVCN lowest_vcn;/* Lowest valid virtual cluster number
+				for this portion of the attribute value or
+				0 if this is the only extent (usually the
+				case). - Only when an attribute list is used
+				does lowest_vcn != 0 ever occur. */
+/* 24*/			leVCN highest_vcn;/* Highest valid vcn of this extent of
+				the attribute value. - Usually there is only one
+				portion, so this usually equals the attribute
+				value size in clusters minus 1. Can be -1 for
+				zero length files. Can be 0 for "single extent"
+				attributes. */
+/* 32*/			le16 mapping_pairs_offset; /* Byte offset from the
+				beginning of the structure to the mapping pairs
+				array which contains the mappings between the
+				vcns and the logical cluster numbers (lcns).
+				When creating, place this at the end of this
+				record header aligned to 8-byte boundary. */
+/* 34*/			u8 compression_unit; /* The compression unit expressed
+				as the log to the base 2 of the number of
+				clusters in a compression unit. 0 means not
+				compressed. (This effectively limits the
+				compression unit size to be a power of two
+				clusters.) WinNT4 only uses a value of 4. */
+/* 35*/			u8 reserved[5];		/* Align to 8-byte boundary. */
+/* The sizes below are only used when lowest_vcn is zero, as otherwise it would
+   be difficult to keep them up-to-date.*/
+/* 40*/			sle64 allocated_size;	/* Byte size of disk space
+				allocated to hold the attribute value. Always
+				is a multiple of the cluster size. When a file
+				is compressed, this field is a multiple of the
+				compression block size (2^compression_unit) and
+				it represents the logically allocated space
+				rather than the actual on disk usage. For this
+				use the compressed_size (see below). */
+/* 48*/			sle64 data_size;	/* Byte size of the attribute
+				value. Can be larger than allocated_size if
+				attribute value is compressed or sparse. */
+/* 56*/			sle64 initialized_size;	/* Byte size of initialized
+				portion of the attribute value. Usually equals
+				data_size. */
+/* sizeof(uncompressed attr) = 64*/
+/* 64*/			sle64 compressed_size;	/* Byte size of the attribute
+				value after compression. Only present when
+				compressed. Always is a multiple of the
+				cluster size. Represents the actual amount of
+				disk space being used on the disk. */
+/* sizeof(compressed attr) = 72*/
+		} __attribute__ ((__packed__)) non_resident;
+	} __attribute__ ((__packed__)) data;
+} __attribute__ ((__packed__)) ATTR_RECORD;
+
+typedef ATTR_RECORD ATTR_REC;
+
+/*
+ * File attribute flags (32-bit).
+ */
+enum {
+	/*
+	 * The following flags are only present in the STANDARD_INFORMATION
+	 * attribute (in the field file_attributes).
+	 */
+	FILE_ATTR_READONLY		= const_cpu_to_le32(0x00000001),
+	FILE_ATTR_HIDDEN		= const_cpu_to_le32(0x00000002),
+	FILE_ATTR_SYSTEM		= const_cpu_to_le32(0x00000004),
+	/* Old DOS volid. Unused in NT.	= const_cpu_to_le32(0x00000008), */
+
+	FILE_ATTR_DIRECTORY		= const_cpu_to_le32(0x00000010),
+	/* Note, FILE_ATTR_DIRECTORY is not considered valid in NT.  It is
+	   reserved for the DOS SUBDIRECTORY flag. */
+	FILE_ATTR_ARCHIVE		= const_cpu_to_le32(0x00000020),
+	FILE_ATTR_DEVICE		= const_cpu_to_le32(0x00000040),
+	FILE_ATTR_NORMAL		= const_cpu_to_le32(0x00000080),
+
+	FILE_ATTR_TEMPORARY		= const_cpu_to_le32(0x00000100),
+	FILE_ATTR_SPARSE_FILE		= const_cpu_to_le32(0x00000200),
+	FILE_ATTR_REPARSE_POINT		= const_cpu_to_le32(0x00000400),
+	FILE_ATTR_COMPRESSED		= const_cpu_to_le32(0x00000800),
+
+	FILE_ATTR_OFFLINE		= const_cpu_to_le32(0x00001000),
+	FILE_ATTR_NOT_CONTENT_INDEXED	= const_cpu_to_le32(0x00002000),
+	FILE_ATTR_ENCRYPTED		= const_cpu_to_le32(0x00004000),
+
+	FILE_ATTR_VALID_FLAGS		= const_cpu_to_le32(0x00007fb7),
+	/* Note, FILE_ATTR_VALID_FLAGS masks out the old DOS VolId and the
+	   FILE_ATTR_DEVICE and preserves everything else.  This mask is used
+	   to obtain all flags that are valid for reading. */
+	FILE_ATTR_VALID_SET_FLAGS	= const_cpu_to_le32(0x000031a7),
+	/* Note, FILE_ATTR_VALID_SET_FLAGS masks out the old DOS VolId, the
+	   F_A_DEVICE, F_A_DIRECTORY, F_A_SPARSE_FILE, F_A_REPARSE_POINT,
+	   F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest.  This mask
+	   is used to to obtain all flags that are valid for setting. */
+
+	/*
+	 * The following flags are only present in the FILE_NAME attribute (in
+	 * the field file_attributes).
+	 */
+	FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT	= const_cpu_to_le32(0x10000000),
+	/* Note, this is a copy of the corresponding bit from the mft record,
+	   telling us whether this is a directory or not, i.e. whether it has
+	   an index root attribute or not. */
+	FILE_ATTR_DUP_VIEW_INDEX_PRESENT	= const_cpu_to_le32(0x20000000),
+	/* Note, this is a copy of the corresponding bit from the mft record,
+	   telling us whether this file has a view index present (eg. object id
+	   index, quota index, one of the security indexes or the encrypting
+	   file system related indexes). */
+};
+
+typedef le32 FILE_ATTR_FLAGS;
+
+/*
+ * NOTE on times in NTFS: All times are in MS standard time format, i.e. they
+ * are the number of 100-nanosecond intervals since 1st January 1601, 00:00:00
+ * universal coordinated time (UTC). (In Linux time starts 1st January 1970,
+ * 00:00:00 UTC and is stored as the number of 1-second intervals since then.)
+ */
+
+/*
+ * Attribute: Standard information (0x10).
+ *
+ * NOTE: Always resident.
+ * NOTE: Present in all base file records on a volume.
+ * NOTE: There is conflicting information about the meaning of each of the time
+ *	 fields but the meaning as defined below has been verified to be
+ *	 correct by practical experimentation on Windows NT4 SP6a and is hence
+ *	 assumed to be the one and only correct interpretation.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	sle64 creation_time;		/* Time file was created. Updated when
+					   a filename is changed(?). */
+/*  8*/	sle64 last_data_change_time;	/* Time the data attribute was last
+					   modified. */
+/* 16*/	sle64 last_mft_change_time;	/* Time this mft record was last
+					   modified. */
+/* 24*/	sle64 last_access_time;		/* Approximate time when the file was
+					   last accessed (obviously this is not
+					   updated on read-only volumes). In
+					   Windows this is only updated when
+					   accessed if some time delta has
+					   passed since the last update. Also,
+					   last access times updates can be
+					   disabled altogether for speed. */
+/* 32*/	FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */
+/* 36*/	union {
+	/* NTFS 1.2 */
+		struct {
+		/* 36*/	u8 reserved12[12];	/* Reserved/alignment to 8-byte
+						   boundary. */
+		} __attribute__ ((__packed__)) v1;
+	/* sizeof() = 48 bytes */
+	/* NTFS 3.x */
+		struct {
+/*
+ * If a volume has been upgraded from a previous NTFS version, then these
+ * fields are present only if the file has been accessed since the upgrade.
+ * Recognize the difference by comparing the length of the resident attribute
+ * value. If it is 48, then the following fields are missing. If it is 72 then
+ * the fields are present. Maybe just check like this:
+ *	if (resident.ValueLength < sizeof(STANDARD_INFORMATION)) {
+ *		Assume NTFS 1.2- format.
+ *		If (volume version is 3.x)
+ *			Upgrade attribute to NTFS 3.x format.
+ *		else
+ *			Use NTFS 1.2- format for access.
+ *	} else
+ *		Use NTFS 3.x format for access.
+ * Only problem is that it might be legal to set the length of the value to
+ * arbitrarily large values thus spoiling this check. - But chkdsk probably
+ * views that as a corruption, assuming that it behaves like this for all
+ * attributes.
+ */
+		/* 36*/	le32 maximum_versions;	/* Maximum allowed versions for
+				file. Zero if version numbering is disabled. */
+		/* 40*/	le32 version_number;	/* This file's version (if any).
+				Set to zero if maximum_versions is zero. */
+		/* 44*/	le32 class_id;		/* Class id from bidirectional
+				class id index (?). */
+		/* 48*/	le32 owner_id;		/* Owner_id of the user owning
+				the file. Translate via $Q index in FILE_Extend
+				/$Quota to the quota control entry for the user
+				owning the file. Zero if quotas are disabled. */
+		/* 52*/	le32 security_id;	/* Security_id for the file.
+				Translate via $SII index and $SDS data stream
+				in FILE_Secure to the security descriptor. */
+		/* 56*/	le64 quota_charged;	/* Byte size of the charge to
+				the quota for all streams of the file. Note: Is
+				zero if quotas are disabled. */
+		/* 64*/	le64 usn;		/* Last update sequence number
+				of the file. This is a direct index into the
+				change (aka usn) journal file. It is zero if
+				the usn journal is disabled.
+				NOTE: To disable the journal need to delete
+				the journal file itself and to then walk the
+				whole mft and set all Usn entries in all mft
+				records to zero! (This can take a while!)
+				The journal is FILE_Extend/$UsnJrnl. Win2k
+				will recreate the journal and initiate
+				logging if necessary when mounting the
+				partition. This, in contrast to disabling the
+				journal is a very fast process, so the user
+				won't even notice it. */
+		} __attribute__ ((__packed__)) v3;
+	/* sizeof() = 72 bytes (NTFS 3.x) */
+	} __attribute__ ((__packed__)) ver;
+} __attribute__ ((__packed__)) STANDARD_INFORMATION;
+
+/*
+ * Attribute: Attribute list (0x20).
+ *
+ * - Can be either resident or non-resident.
+ * - Value consists of a sequence of variable length, 8-byte aligned,
+ * ATTR_LIST_ENTRY records.
+ * - The list is not terminated by anything at all! The only way to know when
+ * the end is reached is to keep track of the current offset and compare it to
+ * the attribute value size.
+ * - The attribute list attribute contains one entry for each attribute of
+ * the file in which the list is located, except for the list attribute
+ * itself. The list is sorted: first by attribute type, second by attribute
+ * name (if present), third by instance number. The extents of one
+ * non-resident attribute (if present) immediately follow after the initial
+ * extent. They are ordered by lowest_vcn and have their instace set to zero.
+ * It is not allowed to have two attributes with all sorting keys equal.
+ * - Further restrictions:
+ *	- If not resident, the vcn to lcn mapping array has to fit inside the
+ *	  base mft record.
+ *	- The attribute list attribute value has a maximum size of 256kb. This
+ *	  is imposed by the Windows cache manager.
+ * - Attribute lists are only used when the attributes of mft record do not
+ * fit inside the mft record despite all attributes (that can be made
+ * non-resident) having been made non-resident. This can happen e.g. when:
+ *	- File has a large number of hard links (lots of file name
+ *	  attributes present).
+ *	- The mapping pairs array of some non-resident attribute becomes so
+ *	  large due to fragmentation that it overflows the mft record.
+ *	- The security descriptor is very complex (not applicable to
+ *	  NTFS 3.0 volumes).
+ *	- There are many named streams.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	ATTR_TYPE type;		/* Type of referenced attribute. */
+/*  4*/	le16 length;		/* Byte size of this entry (8-byte aligned). */
+/*  6*/	u8 name_length;		/* Size in Unicode chars of the name of the
+				   attribute or 0 if unnamed. */
+/*  7*/	u8 name_offset;		/* Byte offset to beginning of attribute name
+				   (always set this to where the name would
+				   start even if unnamed). */
+/*  8*/	leVCN lowest_vcn;	/* Lowest virtual cluster number of this portion
+				   of the attribute value. This is usually 0. It
+				   is non-zero for the case where one attribute
+				   does not fit into one mft record and thus
+				   several mft records are allocated to hold
+				   this attribute. In the latter case, each mft
+				   record holds one extent of the attribute and
+				   there is one attribute list entry for each
+				   extent. NOTE: This is DEFINITELY a signed
+				   value! The windows driver uses cmp, followed
+				   by jg when comparing this, thus it treats it
+				   as signed. */
+/* 16*/	leMFT_REF mft_reference;/* The reference of the mft record holding
+				   the ATTR_RECORD for this portion of the
+				   attribute value. */
+/* 24*/	le16 instance;		/* If lowest_vcn = 0, the instance of the
+				   attribute being referenced; otherwise 0. */
+/* 26*/	ntfschar name[0];	/* Use when creating only. When reading use
+				   name_offset to determine the location of the
+				   name. */
+/* sizeof() = 26 + (attribute_name_length * 2) bytes */
+} __attribute__ ((__packed__)) ATTR_LIST_ENTRY;
+
+/*
+ * The maximum allowed length for a file name.
+ */
+#define MAXIMUM_FILE_NAME_LENGTH	255
+
+/*
+ * Possible namespaces for filenames in ntfs (8-bit).
+ */
+enum {
+	FILE_NAME_POSIX		= 0x00,
+	/* This is the largest namespace. It is case sensitive and allows all
+	   Unicode characters except for: '\0' and '/'.  Beware that in
+	   WinNT/2k files which eg have the same name except for their case
+	   will not be distinguished by the standard utilities and thus a "del
+	   filename" will delete both "filename" and "fileName" without
+	   warning. */
+	FILE_NAME_WIN32		= 0x01,
+	/* The standard WinNT/2k NTFS long filenames. Case insensitive.  All
+	   Unicode chars except: '\0', '"', '*', '/', ':', '<', '>', '?', '\',
+	   and '|'.  Further, names cannot end with a '.' or a space. */
+	FILE_NAME_DOS		= 0x02,
+	/* The standard DOS filenames (8.3 format). Uppercase only.  All 8-bit
+	   characters greater space, except: '"', '*', '+', ',', '/', ':', ';',
+	   '<', '=', '>', '?', and '\'. */
+	FILE_NAME_WIN32_AND_DOS	= 0x03,
+	/* 3 means that both the Win32 and the DOS filenames are identical and
+	   hence have been saved in this single filename record. */
+} __attribute__ ((__packed__));
+
+typedef u8 FILE_NAME_TYPE_FLAGS;
+
+/*
+ * Attribute: Filename (0x30).
+ *
+ * NOTE: Always resident.
+ * NOTE: All fields, except the parent_directory, are only updated when the
+ *	 filename is changed. Until then, they just become out of sync with
+ *	 reality and the more up to date values are present in the standard
+ *	 information attribute.
+ * NOTE: There is conflicting information about the meaning of each of the time
+ *	 fields but the meaning as defined below has been verified to be
+ *	 correct by practical experimentation on Windows NT4 SP6a and is hence
+ *	 assumed to be the one and only correct interpretation.
+ */
+typedef struct {
+/*hex ofs*/
+/*  0*/	leMFT_REF parent_directory;	/* Directory this filename is
+					   referenced from. */
+/*  8*/	sle64 creation_time;		/* Time file was created. */
+/* 10*/	sle64 last_data_change_time;	/* Time the data attribute was last
+					   modified. */
+/* 18*/	sle64 last_mft_change_time;	/* Time this mft record was last
+					   modified. */
+/* 20*/	sle64 last_access_time;		/* Time this mft record was last
+					   accessed. */
+/* 28*/	sle64 allocated_size;		/* Byte size of allocated space for the
+					   data attribute. NOTE: Is a multiple
+					   of the cluster size. */
+/* 30*/	sle64 data_size;		/* Byte size of actual data in data
+					   attribute. */
+/* 38*/	FILE_ATTR_FLAGS file_attributes;	/* Flags describing the file. */
+/* 3c*/	union {
+	/* 3c*/	struct {
+		/* 3c*/	le16 packed_ea_size;	/* Size of the buffer needed to
+						   pack the extended attributes
+						   (EAs), if such are present.*/
+		/* 3e*/	le16 reserved;		/* Reserved for alignment. */
+		} __attribute__ ((__packed__)) ea;
+	/* 3c*/	struct {
+		/* 3c*/	le32 reparse_point_tag;	/* Type of reparse point,
+						   present only in reparse
+						   points and only if there are
+						   no EAs. */
+		} __attribute__ ((__packed__)) rp;
+	} __attribute__ ((__packed__)) type;
+/* 40*/	u8 file_name_length;			/* Length of file name in
+						   (Unicode) characters. */
+/* 41*/	FILE_NAME_TYPE_FLAGS file_name_type;	/* Namespace of the file name.*/
+/* 42*/	ntfschar file_name[0];			/* File name in Unicode. */
+} __attribute__ ((__packed__)) FILE_NAME_ATTR;
+
+/*
+ * GUID structures store globally unique identifiers (GUID). A GUID is a
+ * 128-bit value consisting of one group of eight hexadecimal digits, followed
+ * by three groups of four hexadecimal digits each, followed by one group of
+ * twelve hexadecimal digits. GUIDs are Microsoft's implementation of the
+ * distributed computing environment (DCE) universally unique identifier (UUID).
+ * Example of a GUID:
+ *	1F010768-5A73-BC91-0010A52216A7
+ */
+typedef struct {
+	le32 data1;	/* The first eight hexadecimal digits of the GUID. */
+	le16 data2;	/* The first group of four hexadecimal digits. */
+	le16 data3;	/* The second group of four hexadecimal digits. */
+	u8 data4[8];	/* The first two bytes are the third group of four
+			   hexadecimal digits. The remaining six bytes are the
+			   final 12 hexadecimal digits. */
+} __attribute__ ((__packed__)) GUID;
+
+/*
+ * FILE_Extend/$ObjId contains an index named $O. This index contains all
+ * object_ids present on the volume as the index keys and the corresponding
+ * mft_record numbers as the index entry data parts. The data part (defined
+ * below) also contains three other object_ids:
+ *	birth_volume_id - object_id of FILE_Volume on which the file was first
+ *			  created. Optional (i.e. can be zero).
+ *	birth_object_id - object_id of file when it was first created. Usually
+ *			  equals the object_id. Optional (i.e. can be zero).
+ *	domain_id	- Reserved (always zero).
+ */
+typedef struct {
+	leMFT_REF mft_reference;/* Mft record containing the object_id in
+				   the index entry key. */
+	union {
+		struct {
+			GUID birth_volume_id;
+			GUID birth_object_id;
+			GUID domain_id;
+		} __attribute__ ((__packed__)) origin;
+		u8 extended_info[48];
+	} __attribute__ ((__packed__)) opt;
+} __attribute__ ((__packed__)) OBJ_ID_INDEX_DATA;
+
+/*
+ * Attribute: Object id (NTFS 3.0+) (0x40).
+ *
+ * NOTE: Always resident.
+ */
+typedef struct {
+	GUID object_id;				/* Unique id assigned to the
+						   file.*/
+	/* The following fields are optional. The attribute value size is 16
+	   bytes, i.e. sizeof(GUID), if these are not present at all. Note,
+	   the entries can be present but one or more (or all) can be zero
+	   meaning that that particular value(s) is(are) not defined. */
+	union {
+		struct {
+			GUID birth_volume_id;	/* Unique id of volume on which
+						   the file was first created.*/
+			GUID birth_object_id;	/* Unique id of file when it was
+						   first created. */
+			GUID domain_id;		/* Reserved, zero. */
+		} __attribute__ ((__packed__)) origin;
+		u8 extended_info[48];
+	} __attribute__ ((__packed__)) opt;
+} __attribute__ ((__packed__)) OBJECT_ID_ATTR;
+
+/*
+ * The pre-defined IDENTIFIER_AUTHORITIES used as SID_IDENTIFIER_AUTHORITY in
+ * the SID structure (see below).
+ */
+//typedef enum {					/* SID string prefix. */
+//	SECURITY_NULL_SID_AUTHORITY	= {0, 0, 0, 0, 0, 0},	/* S-1-0 */
+//	SECURITY_WORLD_SID_AUTHORITY	= {0, 0, 0, 0, 0, 1},	/* S-1-1 */
+//	SECURITY_LOCAL_SID_AUTHORITY	= {0, 0, 0, 0, 0, 2},	/* S-1-2 */
+//	SECURITY_CREATOR_SID_AUTHORITY	= {0, 0, 0, 0, 0, 3},	/* S-1-3 */
+//	SECURITY_NON_UNIQUE_AUTHORITY	= {0, 0, 0, 0, 0, 4},	/* S-1-4 */
+//	SECURITY_NT_SID_AUTHORITY	= {0, 0, 0, 0, 0, 5},	/* S-1-5 */
+//} IDENTIFIER_AUTHORITIES;
+
+/*
+ * These relative identifiers (RIDs) are used with the above identifier
+ * authorities to make up universal well-known SIDs.
+ *
+ * Note: The relative identifier (RID) refers to the portion of a SID, which
+ * identifies a user or group in relation to the authority that issued the SID.
+ * For example, the universal well-known SID Creator Owner ID (S-1-3-0) is
+ * made up of the identifier authority SECURITY_CREATOR_SID_AUTHORITY (3) and
+ * the relative identifier SECURITY_CREATOR_OWNER_RID (0).
+ */
+typedef enum {					/* Identifier authority. */
+	SECURITY_NULL_RID		  = 0,	/* S-1-0 */
+	SECURITY_WORLD_RID		  = 0,	/* S-1-1 */
+	SECURITY_LOCAL_RID		  = 0,	/* S-1-2 */
+
+	SECURITY_CREATOR_OWNER_RID	  = 0,	/* S-1-3 */
+	SECURITY_CREATOR_GROUP_RID	  = 1,	/* S-1-3 */
+
+	SECURITY_CREATOR_OWNER_SERVER_RID = 2,	/* S-1-3 */
+	SECURITY_CREATOR_GROUP_SERVER_RID = 3,	/* S-1-3 */
+
+	SECURITY_DIALUP_RID		  = 1,
+	SECURITY_NETWORK_RID		  = 2,
+	SECURITY_BATCH_RID		  = 3,
+	SECURITY_INTERACTIVE_RID	  = 4,
+	SECURITY_SERVICE_RID		  = 6,
+	SECURITY_ANONYMOUS_LOGON_RID	  = 7,
+	SECURITY_PROXY_RID		  = 8,
+	SECURITY_ENTERPRISE_CONTROLLERS_RID=9,
+	SECURITY_SERVER_LOGON_RID	  = 9,
+	SECURITY_PRINCIPAL_SELF_RID	  = 0xa,
+	SECURITY_AUTHENTICATED_USER_RID	  = 0xb,
+	SECURITY_RESTRICTED_CODE_RID	  = 0xc,
+	SECURITY_TERMINAL_SERVER_RID	  = 0xd,
+
+	SECURITY_LOGON_IDS_RID		  = 5,
+	SECURITY_LOGON_IDS_RID_COUNT	  = 3,
+
+	SECURITY_LOCAL_SYSTEM_RID	  = 0x12,
+
+	SECURITY_NT_NON_UNIQUE		  = 0x15,
+
+	SECURITY_BUILTIN_DOMAIN_RID	  = 0x20,
+
+	/*
+	 * Well-known domain relative sub-authority values (RIDs).
+	 */
+
+	/* Users. */
+	DOMAIN_USER_RID_ADMIN		  = 0x1f4,
+	DOMAIN_USER_RID_GUEST		  = 0x1f5,
+	DOMAIN_USER_RID_KRBTGT		  = 0x1f6,
+
+	/* Groups. */
+	DOMAIN_GROUP_RID_ADMINS		  = 0x200,
+	DOMAIN_GROUP_RID_USERS		  = 0x201,
+	DOMAIN_GROUP_RID_GUESTS		  = 0x202,
+	DOMAIN_GROUP_RID_COMPUTERS	  = 0x203,
+	DOMAIN_GROUP_RID_CONTROLLERS	  = 0x204,
+	DOMAIN_GROUP_RID_CERT_ADMINS	  = 0x205,
+	DOMAIN_GROUP_RID_SCHEMA_ADMINS	  = 0x206,
+	DOMAIN_GROUP_RID_ENTERPRISE_ADMINS= 0x207,
+	DOMAIN_GROUP_RID_POLICY_ADMINS	  = 0x208,
+
+	/* Aliases. */
+	DOMAIN_ALIAS_RID_ADMINS		  = 0x220,
+	DOMAIN_ALIAS_RID_USERS		  = 0x221,
+	DOMAIN_ALIAS_RID_GUESTS		  = 0x222,
+	DOMAIN_ALIAS_RID_POWER_USERS	  = 0x223,
+
+	DOMAIN_ALIAS_RID_ACCOUNT_OPS	  = 0x224,
+	DOMAIN_ALIAS_RID_SYSTEM_OPS	  = 0x225,
+	DOMAIN_ALIAS_RID_PRINT_OPS	  = 0x226,
+	DOMAIN_ALIAS_RID_BACKUP_OPS	  = 0x227,
+
+	DOMAIN_ALIAS_RID_REPLICATOR	  = 0x228,
+	DOMAIN_ALIAS_RID_RAS_SERVERS	  = 0x229,
+	DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a,
+} RELATIVE_IDENTIFIERS;
+
+/*
+ * The universal well-known SIDs:
+ *
+ *	NULL_SID			S-1-0-0
+ *	WORLD_SID			S-1-1-0
+ *	LOCAL_SID			S-1-2-0
+ *	CREATOR_OWNER_SID		S-1-3-0
+ *	CREATOR_GROUP_SID		S-1-3-1
+ *	CREATOR_OWNER_SERVER_SID	S-1-3-2
+ *	CREATOR_GROUP_SERVER_SID	S-1-3-3
+ *
+ *	(Non-unique IDs)		S-1-4
+ *
+ * NT well-known SIDs:
+ *
+ *	NT_AUTHORITY_SID	S-1-5
+ *	DIALUP_SID		S-1-5-1
+ *
+ *	NETWORD_SID		S-1-5-2
+ *	BATCH_SID		S-1-5-3
+ *	INTERACTIVE_SID		S-1-5-4
+ *	SERVICE_SID		S-1-5-6
+ *	ANONYMOUS_LOGON_SID	S-1-5-7		(aka null logon session)
+ *	PROXY_SID		S-1-5-8
+ *	SERVER_LOGON_SID	S-1-5-9		(aka domain controller account)
+ *	SELF_SID		S-1-5-10	(self RID)
+ *	AUTHENTICATED_USER_SID	S-1-5-11
+ *	RESTRICTED_CODE_SID	S-1-5-12	(running restricted code)
+ *	TERMINAL_SERVER_SID	S-1-5-13	(running on terminal server)
+ *
+ *	(Logon IDs)		S-1-5-5-X-Y
+ *
+ *	(NT non-unique IDs)	S-1-5-0x15-...
+ *
+ *	(Built-in domain)	S-1-5-0x20
+ */
+
+/*
+ * The SID_IDENTIFIER_AUTHORITY is a 48-bit value used in the SID structure.
+ *
+ * NOTE: This is stored as a big endian number, hence the high_part comes
+ * before the low_part.
+ */
+typedef union {
+	struct {
+		u16 high_part;	/* High 16-bits. */
+		u32 low_part;	/* Low 32-bits. */
+	} __attribute__ ((__packed__)) parts;
+	u8 value[6];		/* Value as individual bytes. */
+} __attribute__ ((__packed__)) SID_IDENTIFIER_AUTHORITY;
+
+/*
+ * The SID structure is a variable-length structure used to uniquely identify
+ * users or groups. SID stands for security identifier.
+ *
+ * The standard textual representation of the SID is of the form:
+ *	S-R-I-S-S...
+ * Where:
+ *    - The first "S" is the literal character 'S' identifying the following
+ *	digits as a SID.
+ *    - R is the revision level of the SID expressed as a sequence of digits
+ *	either in decimal or hexadecimal (if the later, prefixed by "0x").
+ *    - I is the 48-bit identifier_authority, expressed as digits as R above.
+ *    - S... is one or more sub_authority values, expressed as digits as above.
+ *
+ * Example SID; the domain-relative SID of the local Administrators group on
+ * Windows NT/2k:
+ *	S-1-5-32-544
+ * This translates to a SID with:
+ *	revision = 1,
+ *	sub_authority_count = 2,
+ *	identifier_authority = {0,0,0,0,0,5},	// SECURITY_NT_AUTHORITY
+ *	sub_authority[0] = 32,			// SECURITY_BUILTIN_DOMAIN_RID
+ *	sub_authority[1] = 544			// DOMAIN_ALIAS_RID_ADMINS
+ */
+typedef struct {
+	u8 revision;
+	u8 sub_authority_count;
+	SID_IDENTIFIER_AUTHORITY identifier_authority;
+	le32 sub_authority[1];		/* At least one sub_authority. */
+} __attribute__ ((__packed__)) SID;
+
+/*
+ * Current constants for SIDs.
+ */
+typedef enum {
+	SID_REVISION			=  1,	/* Current revision level. */
+	SID_MAX_SUB_AUTHORITIES		= 15,	/* Maximum number of those. */
+	SID_RECOMMENDED_SUB_AUTHORITIES	=  1,	/* Will change to around 6 in
+						   a future revision. */
+} SID_CONSTANTS;
+
+/*
+ * The predefined ACE types (8-bit, see below).
+ */
+enum {
+	ACCESS_MIN_MS_ACE_TYPE		= 0,
+	ACCESS_ALLOWED_ACE_TYPE		= 0,
+	ACCESS_DENIED_ACE_TYPE		= 1,
+	SYSTEM_AUDIT_ACE_TYPE		= 2,
+	SYSTEM_ALARM_ACE_TYPE		= 3, /* Not implemented as of Win2k. */
+	ACCESS_MAX_MS_V2_ACE_TYPE	= 3,
+
+	ACCESS_ALLOWED_COMPOUND_ACE_TYPE= 4,
+	ACCESS_MAX_MS_V3_ACE_TYPE	= 4,
+
+	/* The following are Win2k only. */
+	ACCESS_MIN_MS_OBJECT_ACE_TYPE	= 5,
+	ACCESS_ALLOWED_OBJECT_ACE_TYPE	= 5,
+	ACCESS_DENIED_OBJECT_ACE_TYPE	= 6,
+	SYSTEM_AUDIT_OBJECT_ACE_TYPE	= 7,
+	SYSTEM_ALARM_OBJECT_ACE_TYPE	= 8,
+	ACCESS_MAX_MS_OBJECT_ACE_TYPE	= 8,
+
+	ACCESS_MAX_MS_V4_ACE_TYPE	= 8,
+
+	/* This one is for WinNT/2k. */
+	ACCESS_MAX_MS_ACE_TYPE		= 8,
+} __attribute__ ((__packed__));
+
+typedef u8 ACE_TYPES;
+
+/*
+ * The ACE flags (8-bit) for audit and inheritance (see below).
+ *
+ * SUCCESSFUL_ACCESS_ACE_FLAG is only used with system audit and alarm ACE
+ * types to indicate that a message is generated (in Windows!) for successful
+ * accesses.
+ *
+ * FAILED_ACCESS_ACE_FLAG is only used with system audit and alarm ACE types
+ * to indicate that a message is generated (in Windows!) for failed accesses.
+ */
+enum {
+	/* The inheritance flags. */
+	OBJECT_INHERIT_ACE		= 0x01,
+	CONTAINER_INHERIT_ACE		= 0x02,
+	NO_PROPAGATE_INHERIT_ACE	= 0x04,
+	INHERIT_ONLY_ACE		= 0x08,
+	INHERITED_ACE			= 0x10,	/* Win2k only. */
+	VALID_INHERIT_FLAGS		= 0x1f,
+
+	/* The audit flags. */
+	SUCCESSFUL_ACCESS_ACE_FLAG	= 0x40,
+	FAILED_ACCESS_ACE_FLAG		= 0x80,
+} __attribute__ ((__packed__));
+
+typedef u8 ACE_FLAGS;
+
+/*
+ * An ACE is an access-control entry in an access-control list (ACL).
+ * An ACE defines access to an object for a specific user or group or defines
+ * the types of access that generate system-administration messages or alarms
+ * for a specific user or group. The user or group is identified by a security
+ * identifier (SID).
+ *
+ * Each ACE starts with an ACE_HEADER structure (aligned on 4-byte boundary),
+ * which specifies the type and size of the ACE. The format of the subsequent
+ * data depends on the ACE type.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	ACE_TYPES type;		/* Type of the ACE. */
+/*  1*/	ACE_FLAGS flags;	/* Flags describing the ACE. */
+/*  2*/	le16 size;		/* Size in bytes of the ACE. */
+} __attribute__ ((__packed__)) ACE_HEADER;
+
+/*
+ * The access mask (32-bit). Defines the access rights.
+ *
+ * The specific rights (bits 0 to 15).  These depend on the type of the object
+ * being secured by the ACE.
+ */
+enum {
+	/* Specific rights for files and directories are as follows: */
+
+	/* Right to read data from the file. (FILE) */
+	FILE_READ_DATA			= const_cpu_to_le32(0x00000001),
+	/* Right to list contents of a directory. (DIRECTORY) */
+	FILE_LIST_DIRECTORY		= const_cpu_to_le32(0x00000001),
+
+	/* Right to write data to the file. (FILE) */
+	FILE_WRITE_DATA			= const_cpu_to_le32(0x00000002),
+	/* Right to create a file in the directory. (DIRECTORY) */
+	FILE_ADD_FILE			= const_cpu_to_le32(0x00000002),
+
+	/* Right to append data to the file. (FILE) */
+	FILE_APPEND_DATA		= const_cpu_to_le32(0x00000004),
+	/* Right to create a subdirectory. (DIRECTORY) */
+	FILE_ADD_SUBDIRECTORY		= const_cpu_to_le32(0x00000004),
+
+	/* Right to read extended attributes. (FILE/DIRECTORY) */
+	FILE_READ_EA			= const_cpu_to_le32(0x00000008),
+
+	/* Right to write extended attributes. (FILE/DIRECTORY) */
+	FILE_WRITE_EA			= const_cpu_to_le32(0x00000010),
+
+	/* Right to execute a file. (FILE) */
+	FILE_EXECUTE			= const_cpu_to_le32(0x00000020),
+	/* Right to traverse the directory. (DIRECTORY) */
+	FILE_TRAVERSE			= const_cpu_to_le32(0x00000020),
+
+	/*
+	 * Right to delete a directory and all the files it contains (its
+	 * children), even if the files are read-only. (DIRECTORY)
+	 */
+	FILE_DELETE_CHILD		= const_cpu_to_le32(0x00000040),
+
+	/* Right to read file attributes. (FILE/DIRECTORY) */
+	FILE_READ_ATTRIBUTES		= const_cpu_to_le32(0x00000080),
+
+	/* Right to change file attributes. (FILE/DIRECTORY) */
+	FILE_WRITE_ATTRIBUTES		= const_cpu_to_le32(0x00000100),
+
+	/*
+	 * The standard rights (bits 16 to 23).  These are independent of the
+	 * type of object being secured.
+	 */
+
+	/* Right to delete the object. */
+	DELETE				= const_cpu_to_le32(0x00010000),
+
+	/*
+	 * Right to read the information in the object's security descriptor,
+	 * not including the information in the SACL, i.e. right to read the
+	 * security descriptor and owner.
+	 */
+	READ_CONTROL			= const_cpu_to_le32(0x00020000),
+
+	/* Right to modify the DACL in the object's security descriptor. */
+	WRITE_DAC			= const_cpu_to_le32(0x00040000),
+
+	/* Right to change the owner in the object's security descriptor. */
+	WRITE_OWNER			= const_cpu_to_le32(0x00080000),
+
+	/*
+	 * Right to use the object for synchronization.  Enables a process to
+	 * wait until the object is in the signalled state.  Some object types
+	 * do not support this access right.
+	 */
+	SYNCHRONIZE			= const_cpu_to_le32(0x00100000),
+
+	/*
+	 * The following STANDARD_RIGHTS_* are combinations of the above for
+	 * convenience and are defined by the Win32 API.
+	 */
+
+	/* These are currently defined to READ_CONTROL. */
+	STANDARD_RIGHTS_READ		= const_cpu_to_le32(0x00020000),
+	STANDARD_RIGHTS_WRITE		= const_cpu_to_le32(0x00020000),
+	STANDARD_RIGHTS_EXECUTE		= const_cpu_to_le32(0x00020000),
+
+	/* Combines DELETE, READ_CONTROL, WRITE_DAC, and WRITE_OWNER access. */
+	STANDARD_RIGHTS_REQUIRED	= const_cpu_to_le32(0x000f0000),
+
+	/*
+	 * Combines DELETE, READ_CONTROL, WRITE_DAC, WRITE_OWNER, and
+	 * SYNCHRONIZE access.
+	 */
+	STANDARD_RIGHTS_ALL		= const_cpu_to_le32(0x001f0000),
+
+	/*
+	 * The access system ACL and maximum allowed access types (bits 24 to
+	 * 25, bits 26 to 27 are reserved).
+	 */
+	ACCESS_SYSTEM_SECURITY		= const_cpu_to_le32(0x01000000),
+	MAXIMUM_ALLOWED			= const_cpu_to_le32(0x02000000),
+
+	/*
+	 * The generic rights (bits 28 to 31).  These map onto the standard and
+	 * specific rights.
+	 */
+
+	/* Read, write, and execute access. */
+	GENERIC_ALL			= const_cpu_to_le32(0x10000000),
+
+	/* Execute access. */
+	GENERIC_EXECUTE			= const_cpu_to_le32(0x20000000),
+
+	/*
+	 * Write access.  For files, this maps onto:
+	 *	FILE_APPEND_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_DATA |
+	 *	FILE_WRITE_EA | STANDARD_RIGHTS_WRITE | SYNCHRONIZE
+	 * For directories, the mapping has the same numerical value.  See
+	 * above for the descriptions of the rights granted.
+	 */
+	GENERIC_WRITE			= const_cpu_to_le32(0x40000000),
+
+	/*
+	 * Read access.  For files, this maps onto:
+	 *	FILE_READ_ATTRIBUTES | FILE_READ_DATA | FILE_READ_EA |
+	 *	STANDARD_RIGHTS_READ | SYNCHRONIZE
+	 * For directories, the mapping has the same numberical value.  See
+	 * above for the descriptions of the rights granted.
+	 */
+	GENERIC_READ			= const_cpu_to_le32(0x80000000),
+};
+
+typedef le32 ACCESS_MASK;
+
+/*
+ * The generic mapping array. Used to denote the mapping of each generic
+ * access right to a specific access mask.
+ *
+ * FIXME: What exactly is this and what is it for? (AIA)
+ */
+typedef struct {
+	ACCESS_MASK generic_read;
+	ACCESS_MASK generic_write;
+	ACCESS_MASK generic_execute;
+	ACCESS_MASK generic_all;
+} __attribute__ ((__packed__)) GENERIC_MAPPING;
+
+/*
+ * The predefined ACE type structures are as defined below.
+ */
+
+/*
+ * ACCESS_ALLOWED_ACE, ACCESS_DENIED_ACE, SYSTEM_AUDIT_ACE, SYSTEM_ALARM_ACE
+ */
+typedef struct {
+/*  0	ACE_HEADER; -- Unfolded here as gcc doesn't like unnamed structs. */
+	ACE_TYPES type;		/* Type of the ACE. */
+	ACE_FLAGS flags;	/* Flags describing the ACE. */
+	le16 size;		/* Size in bytes of the ACE. */
+/*  4*/	ACCESS_MASK mask;	/* Access mask associated with the ACE. */
+
+/*  8*/	SID sid;		/* The SID associated with the ACE. */
+} __attribute__ ((__packed__)) ACCESS_ALLOWED_ACE, ACCESS_DENIED_ACE,
+			       SYSTEM_AUDIT_ACE, SYSTEM_ALARM_ACE;
+
+/*
+ * The object ACE flags (32-bit).
+ */
+enum {
+	ACE_OBJECT_TYPE_PRESENT			= const_cpu_to_le32(1),
+	ACE_INHERITED_OBJECT_TYPE_PRESENT	= const_cpu_to_le32(2),
+};
+
+typedef le32 OBJECT_ACE_FLAGS;
+
+typedef struct {
+/*  0	ACE_HEADER; -- Unfolded here as gcc doesn't like unnamed structs. */
+	ACE_TYPES type;		/* Type of the ACE. */
+	ACE_FLAGS flags;	/* Flags describing the ACE. */
+	le16 size;		/* Size in bytes of the ACE. */
+/*  4*/	ACCESS_MASK mask;	/* Access mask associated with the ACE. */
+
+/*  8*/	OBJECT_ACE_FLAGS object_flags;	/* Flags describing the object ACE. */
+/* 12*/	GUID object_type;
+/* 28*/	GUID inherited_object_type;
+
+/* 44*/	SID sid;		/* The SID associated with the ACE. */
+} __attribute__ ((__packed__)) ACCESS_ALLOWED_OBJECT_ACE,
+			       ACCESS_DENIED_OBJECT_ACE,
+			       SYSTEM_AUDIT_OBJECT_ACE,
+			       SYSTEM_ALARM_OBJECT_ACE;
+
+/*
+ * An ACL is an access-control list (ACL).
+ * An ACL starts with an ACL header structure, which specifies the size of
+ * the ACL and the number of ACEs it contains. The ACL header is followed by
+ * zero or more access control entries (ACEs). The ACL as well as each ACE
+ * are aligned on 4-byte boundaries.
+ */
+typedef struct {
+	u8 revision;	/* Revision of this ACL. */
+	u8 alignment1;
+	le16 size;	/* Allocated space in bytes for ACL. Includes this
+			   header, the ACEs and the remaining free space. */
+	le16 ace_count;	/* Number of ACEs in the ACL. */
+	le16 alignment2;
+/* sizeof() = 8 bytes */
+} __attribute__ ((__packed__)) ACL;
+
+/*
+ * Current constants for ACLs.
+ */
+typedef enum {
+	/* Current revision. */
+	ACL_REVISION		= 2,
+	ACL_REVISION_DS		= 4,
+
+	/* History of revisions. */
+	ACL_REVISION1		= 1,
+	MIN_ACL_REVISION	= 2,
+	ACL_REVISION2		= 2,
+	ACL_REVISION3		= 3,
+	ACL_REVISION4		= 4,
+	MAX_ACL_REVISION	= 4,
+} ACL_CONSTANTS;
+
+/*
+ * The security descriptor control flags (16-bit).
+ *
+ * SE_OWNER_DEFAULTED - This boolean flag, when set, indicates that the SID
+ *	pointed to by the Owner field was provided by a defaulting mechanism
+ *	rather than explicitly provided by the original provider of the
+ *	security descriptor.  This may affect the treatment of the SID with
+ *	respect to inheritence of an owner.
+ *
+ * SE_GROUP_DEFAULTED - This boolean flag, when set, indicates that the SID in
+ *	the Group field was provided by a defaulting mechanism rather than
+ *	explicitly provided by the original provider of the security
+ *	descriptor.  This may affect the treatment of the SID with respect to
+ *	inheritence of a primary group.
+ *
+ * SE_DACL_PRESENT - This boolean flag, when set, indicates that the security
+ *	descriptor contains a discretionary ACL.  If this flag is set and the
+ *	Dacl field of the SECURITY_DESCRIPTOR is null, then a null ACL is
+ *	explicitly being specified.
+ *
+ * SE_DACL_DEFAULTED - This boolean flag, when set, indicates that the ACL
+ *	pointed to by the Dacl field was provided by a defaulting mechanism
+ *	rather than explicitly provided by the original provider of the
+ *	security descriptor.  This may affect the treatment of the ACL with
+ *	respect to inheritence of an ACL.  This flag is ignored if the
+ *	DaclPresent flag is not set.
+ *
+ * SE_SACL_PRESENT - This boolean flag, when set,  indicates that the security
+ *	descriptor contains a system ACL pointed to by the Sacl field.  If this
+ *	flag is set and the Sacl field of the SECURITY_DESCRIPTOR is null, then
+ *	an empty (but present) ACL is being specified.
+ *
+ * SE_SACL_DEFAULTED - This boolean flag, when set, indicates that the ACL
+ *	pointed to by the Sacl field was provided by a defaulting mechanism
+ *	rather than explicitly provided by the original provider of the
+ *	security descriptor.  This may affect the treatment of the ACL with
+ *	respect to inheritence of an ACL.  This flag is ignored if the
+ *	SaclPresent flag is not set.
+ *
+ * SE_SELF_RELATIVE - This boolean flag, when set, indicates that the security
+ *	descriptor is in self-relative form.  In this form, all fields of the
+ *	security descriptor are contiguous in memory and all pointer fields are
+ *	expressed as offsets from the beginning of the security descriptor.
+ */
+enum {
+	SE_OWNER_DEFAULTED		= const_cpu_to_le16(0x0001),
+	SE_GROUP_DEFAULTED		= const_cpu_to_le16(0x0002),
+	SE_DACL_PRESENT			= const_cpu_to_le16(0x0004),
+	SE_DACL_DEFAULTED		= const_cpu_to_le16(0x0008),
+
+	SE_SACL_PRESENT			= const_cpu_to_le16(0x0010),
+	SE_SACL_DEFAULTED		= const_cpu_to_le16(0x0020),
+
+	SE_DACL_AUTO_INHERIT_REQ	= const_cpu_to_le16(0x0100),
+	SE_SACL_AUTO_INHERIT_REQ	= const_cpu_to_le16(0x0200),
+	SE_DACL_AUTO_INHERITED		= const_cpu_to_le16(0x0400),
+	SE_SACL_AUTO_INHERITED		= const_cpu_to_le16(0x0800),
+
+	SE_DACL_PROTECTED		= const_cpu_to_le16(0x1000),
+	SE_SACL_PROTECTED		= const_cpu_to_le16(0x2000),
+	SE_RM_CONTROL_VALID		= const_cpu_to_le16(0x4000),
+	SE_SELF_RELATIVE		= const_cpu_to_le16(0x8000)
+} __attribute__ ((__packed__));
+
+typedef le16 SECURITY_DESCRIPTOR_CONTROL;
+
+/*
+ * Self-relative security descriptor. Contains the owner and group SIDs as well
+ * as the sacl and dacl ACLs inside the security descriptor itself.
+ */
+typedef struct {
+	u8 revision;	/* Revision level of the security descriptor. */
+	u8 alignment;
+	SECURITY_DESCRIPTOR_CONTROL control; /* Flags qualifying the type of
+			   the descriptor as well as the following fields. */
+	le32 owner;	/* Byte offset to a SID representing an object's
+			   owner. If this is NULL, no owner SID is present in
+			   the descriptor. */
+	le32 group;	/* Byte offset to a SID representing an object's
+			   primary group. If this is NULL, no primary group
+			   SID is present in the descriptor. */
+	le32 sacl;	/* Byte offset to a system ACL. Only valid, if
+			   SE_SACL_PRESENT is set in the control field. If
+			   SE_SACL_PRESENT is set but sacl is NULL, a NULL ACL
+			   is specified. */
+	le32 dacl;	/* Byte offset to a discretionary ACL. Only valid, if
+			   SE_DACL_PRESENT is set in the control field. If
+			   SE_DACL_PRESENT is set but dacl is NULL, a NULL ACL
+			   (unconditionally granting access) is specified. */
+/* sizeof() = 0x14 bytes */
+} __attribute__ ((__packed__)) SECURITY_DESCRIPTOR_RELATIVE;
+
+/*
+ * Absolute security descriptor. Does not contain the owner and group SIDs, nor
+ * the sacl and dacl ACLs inside the security descriptor. Instead, it contains
+ * pointers to these structures in memory. Obviously, absolute security
+ * descriptors are only useful for in memory representations of security
+ * descriptors. On disk, a self-relative security descriptor is used.
+ */
+typedef struct {
+	u8 revision;	/* Revision level of the security descriptor. */
+	u8 alignment;
+	SECURITY_DESCRIPTOR_CONTROL control;	/* Flags qualifying the type of
+			   the descriptor as well as the following fields. */
+	SID *owner;	/* Points to a SID representing an object's owner. If
+			   this is NULL, no owner SID is present in the
+			   descriptor. */
+	SID *group;	/* Points to a SID representing an object's primary
+			   group. If this is NULL, no primary group SID is
+			   present in the descriptor. */
+	ACL *sacl;	/* Points to a system ACL. Only valid, if
+			   SE_SACL_PRESENT is set in the control field. If
+			   SE_SACL_PRESENT is set but sacl is NULL, a NULL ACL
+			   is specified. */
+	ACL *dacl;	/* Points to a discretionary ACL. Only valid, if
+			   SE_DACL_PRESENT is set in the control field. If
+			   SE_DACL_PRESENT is set but dacl is NULL, a NULL ACL
+			   (unconditionally granting access) is specified. */
+} __attribute__ ((__packed__)) SECURITY_DESCRIPTOR;
+
+/*
+ * Current constants for security descriptors.
+ */
+typedef enum {
+	/* Current revision. */
+	SECURITY_DESCRIPTOR_REVISION	= 1,
+	SECURITY_DESCRIPTOR_REVISION1	= 1,
+
+	/* The sizes of both the absolute and relative security descriptors is
+	   the same as pointers, at least on ia32 architecture are 32-bit. */
+	SECURITY_DESCRIPTOR_MIN_LENGTH	= sizeof(SECURITY_DESCRIPTOR),
+} SECURITY_DESCRIPTOR_CONSTANTS;
+
+/*
+ * Attribute: Security descriptor (0x50). A standard self-relative security
+ * descriptor.
+ *
+ * NOTE: Can be resident or non-resident.
+ * NOTE: Not used in NTFS 3.0+, as security descriptors are stored centrally
+ * in FILE_Secure and the correct descriptor is found using the security_id
+ * from the standard information attribute.
+ */
+typedef SECURITY_DESCRIPTOR_RELATIVE SECURITY_DESCRIPTOR_ATTR;
+
+/*
+ * On NTFS 3.0+, all security descriptors are stored in FILE_Secure. Only one
+ * referenced instance of each unique security descriptor is stored.
+ *
+ * FILE_Secure contains no unnamed data attribute, i.e. it has zero length. It
+ * does, however, contain two indexes ($SDH and $SII) as well as a named data
+ * stream ($SDS).
+ *
+ * Every unique security descriptor is assigned a unique security identifier
+ * (security_id, not to be confused with a SID). The security_id is unique for
+ * the NTFS volume and is used as an index into the $SII index, which maps
+ * security_ids to the security descriptor's storage location within the $SDS
+ * data attribute. The $SII index is sorted by ascending security_id.
+ *
+ * A simple hash is computed from each security descriptor. This hash is used
+ * as an index into the $SDH index, which maps security descriptor hashes to
+ * the security descriptor's storage location within the $SDS data attribute.
+ * The $SDH index is sorted by security descriptor hash and is stored in a B+
+ * tree. When searching $SDH (with the intent of determining whether or not a
+ * new security descriptor is already present in the $SDS data stream), if a
+ * matching hash is found, but the security descriptors do not match, the
+ * search in the $SDH index is continued, searching for a next matching hash.
+ *
+ * When a precise match is found, the security_id coresponding to the security
+ * descriptor in the $SDS attribute is read from the found $SDH index entry and
+ * is stored in the $STANDARD_INFORMATION attribute of the file/directory to
+ * which the security descriptor is being applied. The $STANDARD_INFORMATION
+ * attribute is present in all base mft records (i.e. in all files and
+ * directories).
+ *
+ * If a match is not found, the security descriptor is assigned a new unique
+ * security_id and is added to the $SDS data attribute. Then, entries
+ * referencing the this security descriptor in the $SDS data attribute are
+ * added to the $SDH and $SII indexes.
+ *
+ * Note: Entries are never deleted from FILE_Secure, even if nothing
+ * references an entry any more.
+ */
+
+/*
+ * This header precedes each security descriptor in the $SDS data stream.
+ * This is also the index entry data part of both the $SII and $SDH indexes.
+ */
+typedef struct {
+	le32 hash;	  /* Hash of the security descriptor. */
+	le32 security_id; /* The security_id assigned to the descriptor. */
+	le64 offset;	  /* Byte offset of this entry in the $SDS stream. */
+	le32 length;	  /* Size in bytes of this entry in $SDS stream. */
+} __attribute__ ((__packed__)) SECURITY_DESCRIPTOR_HEADER;
+
+/*
+ * The $SDS data stream contains the security descriptors, aligned on 16-byte
+ * boundaries, sorted by security_id in a B+ tree. Security descriptors cannot
+ * cross 256kib boundaries (this restriction is imposed by the Windows cache
+ * manager). Each security descriptor is contained in a SDS_ENTRY structure.
+ * Also, each security descriptor is stored twice in the $SDS stream with a
+ * fixed offset of 0x40000 bytes (256kib, the Windows cache manager's max size)
+ * between them; i.e. if a SDS_ENTRY specifies an offset of 0x51d0, then the
+ * the first copy of the security descriptor will be at offset 0x51d0 in the
+ * $SDS data stream and the second copy will be at offset 0x451d0.
+ */
+typedef struct {
+/*Ofs*/
+/*  0	SECURITY_DESCRIPTOR_HEADER; -- Unfolded here as gcc doesn't like
+				       unnamed structs. */
+	le32 hash;	  /* Hash of the security descriptor. */
+	le32 security_id; /* The security_id assigned to the descriptor. */
+	le64 offset;	  /* Byte offset of this entry in the $SDS stream. */
+	le32 length;	  /* Size in bytes of this entry in $SDS stream. */
+/* 20*/	SECURITY_DESCRIPTOR_RELATIVE sid; /* The self-relative security
+					     descriptor. */
+} __attribute__ ((__packed__)) SDS_ENTRY;
+
+/*
+ * The index entry key used in the $SII index. The collation type is
+ * COLLATION_NTOFS_ULONG.
+ */
+typedef struct {
+	le32 security_id; /* The security_id assigned to the descriptor. */
+} __attribute__ ((__packed__)) SII_INDEX_KEY;
+
+/*
+ * The index entry key used in the $SDH index. The keys are sorted first by
+ * hash and then by security_id. The collation rule is
+ * COLLATION_NTOFS_SECURITY_HASH.
+ */
+typedef struct {
+	le32 hash;	  /* Hash of the security descriptor. */
+	le32 security_id; /* The security_id assigned to the descriptor. */
+} __attribute__ ((__packed__)) SDH_INDEX_KEY;
+
+/*
+ * Attribute: Volume name (0x60).
+ *
+ * NOTE: Always resident.
+ * NOTE: Present only in FILE_Volume.
+ */
+typedef struct {
+	ntfschar name[0];	/* The name of the volume in Unicode. */
+} __attribute__ ((__packed__)) VOLUME_NAME;
+
+/*
+ * Possible flags for the volume (16-bit).
+ */
+enum {
+	VOLUME_IS_DIRTY			= const_cpu_to_le16(0x0001),
+	VOLUME_RESIZE_LOG_FILE		= const_cpu_to_le16(0x0002),
+	VOLUME_UPGRADE_ON_MOUNT		= const_cpu_to_le16(0x0004),
+	VOLUME_MOUNTED_ON_NT4		= const_cpu_to_le16(0x0008),
+
+	VOLUME_DELETE_USN_UNDERWAY	= const_cpu_to_le16(0x0010),
+	VOLUME_REPAIR_OBJECT_ID		= const_cpu_to_le16(0x0020),
+
+	VOLUME_MODIFIED_BY_CHKDSK	= const_cpu_to_le16(0x8000),
+
+	VOLUME_FLAGS_MASK		= const_cpu_to_le16(0x803f),
+
+	/* To make our life easier when checking if we must mount read-only. */
+	VOLUME_MUST_MOUNT_RO_MASK	= const_cpu_to_le16(0x8037),
+} __attribute__ ((__packed__));
+
+typedef le16 VOLUME_FLAGS;
+
+/*
+ * Attribute: Volume information (0x70).
+ *
+ * NOTE: Always resident.
+ * NOTE: Present only in FILE_Volume.
+ * NOTE: Windows 2000 uses NTFS 3.0 while Windows NT4 service pack 6a uses
+ *	 NTFS 1.2. I haven't personally seen other values yet.
+ */
+typedef struct {
+	le64 reserved;		/* Not used (yet?). */
+	u8 major_ver;		/* Major version of the ntfs format. */
+	u8 minor_ver;		/* Minor version of the ntfs format. */
+	VOLUME_FLAGS flags;	/* Bit array of VOLUME_* flags. */
+} __attribute__ ((__packed__)) VOLUME_INFORMATION;
+
+/*
+ * Attribute: Data attribute (0x80).
+ *
+ * NOTE: Can be resident or non-resident.
+ *
+ * Data contents of a file (i.e. the unnamed stream) or of a named stream.
+ */
+typedef struct {
+	u8 data[0];		/* The file's data contents. */
+} __attribute__ ((__packed__)) DATA_ATTR;
+
+/*
+ * Index header flags (8-bit).
+ */
+enum {
+	/*
+	 * When index header is in an index root attribute:
+	 */
+	SMALL_INDEX = 0, /* The index is small enough to fit inside the index
+			    root attribute and there is no index allocation
+			    attribute present. */
+	LARGE_INDEX = 1, /* The index is too large to fit in the index root
+			    attribute and/or an index allocation attribute is
+			    present. */
+	/*
+	 * When index header is in an index block, i.e. is part of index
+	 * allocation attribute:
+	 */
+	LEAF_NODE  = 0, /* This is a leaf node, i.e. there are no more nodes
+			   branching off it. */
+	INDEX_NODE = 1, /* This node indexes other nodes, i.e. it is not a leaf
+			   node. */
+	NODE_MASK  = 1, /* Mask for accessing the *_NODE bits. */
+} __attribute__ ((__packed__));
+
+typedef u8 INDEX_HEADER_FLAGS;
+
+/*
+ * This is the header for indexes, describing the INDEX_ENTRY records, which
+ * follow the INDEX_HEADER. Together the index header and the index entries
+ * make up a complete index.
+ *
+ * IMPORTANT NOTE: The offset, length and size structure members are counted
+ * relative to the start of the index header structure and not relative to the
+ * start of the index root or index allocation structures themselves.
+ */
+typedef struct {
+	le32 entries_offset;		/* Byte offset to first INDEX_ENTRY
+					   aligned to 8-byte boundary. */
+	le32 index_length;		/* Data size of the index in bytes,
+					   i.e. bytes used from allocated
+					   size, aligned to 8-byte boundary. */
+	le32 allocated_size;		/* Byte size of this index (block),
+					   multiple of 8 bytes. */
+	/* NOTE: For the index root attribute, the above two numbers are always
+	   equal, as the attribute is resident and it is resized as needed. In
+	   the case of the index allocation attribute the attribute is not
+	   resident and hence the allocated_size is a fixed value and must
+	   equal the index_block_size specified by the INDEX_ROOT attribute
+	   corresponding to the INDEX_ALLOCATION attribute this INDEX_BLOCK
+	   belongs to. */
+	INDEX_HEADER_FLAGS flags;	/* Bit field of INDEX_HEADER_FLAGS. */
+	u8 reserved[3];			/* Reserved/align to 8-byte boundary. */
+} __attribute__ ((__packed__)) INDEX_HEADER;
+
+/*
+ * Attribute: Index root (0x90).
+ *
+ * NOTE: Always resident.
+ *
+ * This is followed by a sequence of index entries (INDEX_ENTRY structures)
+ * as described by the index header.
+ *
+ * When a directory is small enough to fit inside the index root then this
+ * is the only attribute describing the directory. When the directory is too
+ * large to fit in the index root, on the other hand, two aditional attributes
+ * are present: an index allocation attribute, containing sub-nodes of the B+
+ * directory tree (see below), and a bitmap attribute, describing which virtual
+ * cluster numbers (vcns) in the index allocation attribute are in use by an
+ * index block.
+ *
+ * NOTE: The root directory (FILE_root) contains an entry for itself. Other
+ * dircetories do not contain entries for themselves, though.
+ */
+typedef struct {
+	ATTR_TYPE type;			/* Type of the indexed attribute. Is
+					   $FILE_NAME for directories, zero
+					   for view indexes. No other values
+					   allowed. */
+	COLLATION_RULE collation_rule;	/* Collation rule used to sort the
+					   index entries. If type is $FILE_NAME,
+					   this must be COLLATION_FILE_NAME. */
+	le32 index_block_size;		/* Size of each index block in bytes (in
+					   the index allocation attribute). */
+	u8 clusters_per_index_block;	/* Cluster size of each index block (in
+					   the index allocation attribute), when
+					   an index block is >= than a cluster,
+					   otherwise this will be the log of
+					   the size (like how the encoding of
+					   the mft record size and the index
+					   record size found in the boot sector
+					   work). Has to be a power of 2. */
+	u8 reserved[3];			/* Reserved/align to 8-byte boundary. */
+	INDEX_HEADER index;		/* Index header describing the
+					   following index entries. */
+} __attribute__ ((__packed__)) INDEX_ROOT;
+
+/*
+ * Attribute: Index allocation (0xa0).
+ *
+ * NOTE: Always non-resident (doesn't make sense to be resident anyway!).
+ *
+ * This is an array of index blocks. Each index block starts with an
+ * INDEX_BLOCK structure containing an index header, followed by a sequence of
+ * index entries (INDEX_ENTRY structures), as described by the INDEX_HEADER.
+ */
+typedef struct {
+/*  0	NTFS_RECORD; -- Unfolded here as gcc doesn't like unnamed structs. */
+	NTFS_RECORD_TYPE magic;	/* Magic is "INDX". */
+	le16 usa_ofs;		/* See NTFS_RECORD definition. */
+	le16 usa_count;		/* See NTFS_RECORD definition. */
+
+/*  8*/	sle64 lsn;		/* $LogFile sequence number of the last
+				   modification of this index block. */
+/* 16*/	leVCN index_block_vcn;	/* Virtual cluster number of the index block.
+				   If the cluster_size on the volume is <= the
+				   index_block_size of the directory,
+				   index_block_vcn counts in units of clusters,
+				   and in units of sectors otherwise. */
+/* 24*/	INDEX_HEADER index;	/* Describes the following index entries. */
+/* sizeof()= 40 (0x28) bytes */
+/*
+ * When creating the index block, we place the update sequence array at this
+ * offset, i.e. before we start with the index entries. This also makes sense,
+ * otherwise we could run into problems with the update sequence array
+ * containing in itself the last two bytes of a sector which would mean that
+ * multi sector transfer protection wouldn't work. As you can't protect data
+ * by overwriting it since you then can't get it back...
+ * When reading use the data from the ntfs record header.
+ */
+} __attribute__ ((__packed__)) INDEX_BLOCK;
+
+typedef INDEX_BLOCK INDEX_ALLOCATION;
+
+/*
+ * The system file FILE_Extend/$Reparse contains an index named $R listing
+ * all reparse points on the volume. The index entry keys are as defined
+ * below. Note, that there is no index data associated with the index entries.
+ *
+ * The index entries are sorted by the index key file_id. The collation rule is
+ * COLLATION_NTOFS_ULONGS. FIXME: Verify whether the reparse_tag is not the
+ * primary key / is not a key at all. (AIA)
+ */
+typedef struct {
+	le32 reparse_tag;	/* Reparse point type (inc. flags). */
+	leMFT_REF file_id;	/* Mft record of the file containing the
+				   reparse point attribute. */
+} __attribute__ ((__packed__)) REPARSE_INDEX_KEY;
+
+/*
+ * Quota flags (32-bit).
+ *
+ * The user quota flags.  Names explain meaning.
+ */
+enum {
+	QUOTA_FLAG_DEFAULT_LIMITS	= const_cpu_to_le32(0x00000001),
+	QUOTA_FLAG_LIMIT_REACHED	= const_cpu_to_le32(0x00000002),
+	QUOTA_FLAG_ID_DELETED		= const_cpu_to_le32(0x00000004),
+
+	QUOTA_FLAG_USER_MASK		= const_cpu_to_le32(0x00000007),
+	/* This is a bit mask for the user quota flags. */
+
+	/*
+	 * These flags are only present in the quota defaults index entry, i.e.
+	 * in the entry where owner_id = QUOTA_DEFAULTS_ID.
+	 */
+	QUOTA_FLAG_TRACKING_ENABLED	= const_cpu_to_le32(0x00000010),
+	QUOTA_FLAG_ENFORCEMENT_ENABLED	= const_cpu_to_le32(0x00000020),
+	QUOTA_FLAG_TRACKING_REQUESTED	= const_cpu_to_le32(0x00000040),
+	QUOTA_FLAG_LOG_THRESHOLD	= const_cpu_to_le32(0x00000080),
+
+	QUOTA_FLAG_LOG_LIMIT		= const_cpu_to_le32(0x00000100),
+	QUOTA_FLAG_OUT_OF_DATE		= const_cpu_to_le32(0x00000200),
+	QUOTA_FLAG_CORRUPT		= const_cpu_to_le32(0x00000400),
+	QUOTA_FLAG_PENDING_DELETES	= const_cpu_to_le32(0x00000800),
+};
+
+typedef le32 QUOTA_FLAGS;
+
+/*
+ * The system file FILE_Extend/$Quota contains two indexes $O and $Q. Quotas
+ * are on a per volume and per user basis.
+ *
+ * The $Q index contains one entry for each existing user_id on the volume. The
+ * index key is the user_id of the user/group owning this quota control entry,
+ * i.e. the key is the owner_id. The user_id of the owner of a file, i.e. the
+ * owner_id, is found in the standard information attribute. The collation rule
+ * for $Q is COLLATION_NTOFS_ULONG.
+ *
+ * The $O index contains one entry for each user/group who has been assigned
+ * a quota on that volume. The index key holds the SID of the user_id the
+ * entry belongs to, i.e. the owner_id. The collation rule for $O is
+ * COLLATION_NTOFS_SID.
+ *
+ * The $O index entry data is the user_id of the user corresponding to the SID.
+ * This user_id is used as an index into $Q to find the quota control entry
+ * associated with the SID.
+ *
+ * The $Q index entry data is the quota control entry and is defined below.
+ */
+typedef struct {
+	le32 version;		/* Currently equals 2. */
+	QUOTA_FLAGS flags;	/* Flags describing this quota entry. */
+	le64 bytes_used;	/* How many bytes of the quota are in use. */
+	sle64 change_time;	/* Last time this quota entry was changed. */
+	sle64 threshold;	/* Soft quota (-1 if not limited). */
+	sle64 limit;		/* Hard quota (-1 if not limited). */
+	sle64 exceeded_time;	/* How long the soft quota has been exceeded. */
+	SID sid;		/* The SID of the user/object associated with
+				   this quota entry.  Equals zero for the quota
+				   defaults entry (and in fact on a WinXP
+				   volume, it is not present at all). */
+} __attribute__ ((__packed__)) QUOTA_CONTROL_ENTRY;
+
+/*
+ * Predefined owner_id values (32-bit).
+ */
+enum {
+	QUOTA_INVALID_ID	= const_cpu_to_le32(0x00000000),
+	QUOTA_DEFAULTS_ID	= const_cpu_to_le32(0x00000001),
+	QUOTA_FIRST_USER_ID	= const_cpu_to_le32(0x00000100),
+};
+
+/*
+ * Current constants for quota control entries.
+ */
+typedef enum {
+	/* Current version. */
+	QUOTA_VERSION	= 2,
+} QUOTA_CONTROL_ENTRY_CONSTANTS;
+
+/*
+ * Index entry flags (16-bit).
+ */
+enum {
+	INDEX_ENTRY_NODE = const_cpu_to_le16(1), /* This entry contains a
+			sub-node, i.e. a reference to an index block in form of
+			a virtual cluster number (see below). */
+	INDEX_ENTRY_END  = const_cpu_to_le16(2), /* This signifies the last
+			entry in an index block.  The index entry does not
+			represent a file but it can point to a sub-node. */
+
+	INDEX_ENTRY_SPACE_FILLER = const_cpu_to_le16(0xffff), /* gcc: Force
+			enum bit width to 16-bit. */
+} __attribute__ ((__packed__));
+
+typedef le16 INDEX_ENTRY_FLAGS;
+
+/*
+ * This the index entry header (see below).
+ */
+typedef struct {
+/*  0*/	union {
+		struct { /* Only valid when INDEX_ENTRY_END is not set. */
+			leMFT_REF indexed_file;	/* The mft reference of the file
+						   described by this index
+						   entry. Used for directory
+						   indexes. */
+		} __attribute__ ((__packed__)) dir;
+		struct { /* Used for views/indexes to find the entry's data. */
+			le16 data_offset;	/* Data byte offset from this
+						   INDEX_ENTRY. Follows the
+						   index key. */
+			le16 data_length;	/* Data length in bytes. */
+			le32 reservedV;		/* Reserved (zero). */
+		} __attribute__ ((__packed__)) vi;
+	} __attribute__ ((__packed__)) data;
+/*  8*/	le16 length;		 /* Byte size of this index entry, multiple of
+				    8-bytes. */
+/* 10*/	le16 key_length;	 /* Byte size of the key value, which is in the
+				    index entry. It follows field reserved. Not
+				    multiple of 8-bytes. */
+/* 12*/	INDEX_ENTRY_FLAGS flags; /* Bit field of INDEX_ENTRY_* flags. */
+/* 14*/	le16 reserved;		 /* Reserved/align to 8-byte boundary. */
+/* sizeof() = 16 bytes */
+} __attribute__ ((__packed__)) INDEX_ENTRY_HEADER;
+
+/*
+ * This is an index entry. A sequence of such entries follows each INDEX_HEADER
+ * structure. Together they make up a complete index. The index follows either
+ * an index root attribute or an index allocation attribute.
+ *
+ * NOTE: Before NTFS 3.0 only filename attributes were indexed.
+ */
+typedef struct {
+/*Ofs*/
+/*  0	INDEX_ENTRY_HEADER; -- Unfolded here as gcc dislikes unnamed structs. */
+	union {
+		struct { /* Only valid when INDEX_ENTRY_END is not set. */
+			leMFT_REF indexed_file;	/* The mft reference of the file
+						   described by this index
+						   entry. Used for directory
+						   indexes. */
+		} __attribute__ ((__packed__)) dir;
+		struct { /* Used for views/indexes to find the entry's data. */
+			le16 data_offset;	/* Data byte offset from this
+						   INDEX_ENTRY. Follows the
+						   index key. */
+			le16 data_length;	/* Data length in bytes. */
+			le32 reservedV;		/* Reserved (zero). */
+		} __attribute__ ((__packed__)) vi;
+	} __attribute__ ((__packed__)) data;
+	le16 length;		 /* Byte size of this index entry, multiple of
+				    8-bytes. */
+	le16 key_length;	 /* Byte size of the key value, which is in the
+				    index entry. It follows field reserved. Not
+				    multiple of 8-bytes. */
+	INDEX_ENTRY_FLAGS flags; /* Bit field of INDEX_ENTRY_* flags. */
+	le16 reserved;		 /* Reserved/align to 8-byte boundary. */
+
+/* 16*/	union {		/* The key of the indexed attribute. NOTE: Only present
+			   if INDEX_ENTRY_END bit in flags is not set. NOTE: On
+			   NTFS versions before 3.0 the only valid key is the
+			   FILE_NAME_ATTR. On NTFS 3.0+ the following
+			   additional index keys are defined: */
+		FILE_NAME_ATTR file_name;/* $I30 index in directories. */
+		SII_INDEX_KEY sii;	/* $SII index in $Secure. */
+		SDH_INDEX_KEY sdh;	/* $SDH index in $Secure. */
+		GUID object_id;		/* $O index in FILE_Extend/$ObjId: The
+					   object_id of the mft record found in
+					   the data part of the index. */
+		REPARSE_INDEX_KEY reparse;	/* $R index in
+						   FILE_Extend/$Reparse. */
+		SID sid;		/* $O index in FILE_Extend/$Quota:
+					   SID of the owner of the user_id. */
+		le32 owner_id;		/* $Q index in FILE_Extend/$Quota:
+					   user_id of the owner of the quota
+					   control entry in the data part of
+					   the index. */
+	} __attribute__ ((__packed__)) key;
+	/* The (optional) index data is inserted here when creating. */
+	// leVCN vcn;	/* If INDEX_ENTRY_NODE bit in flags is set, the last
+	//		   eight bytes of this index entry contain the virtual
+	//		   cluster number of the index block that holds the
+	//		   entries immediately preceding the current entry (the
+	//		   vcn references the corresponding cluster in the data
+	//		   of the non-resident index allocation attribute). If
+	//		   the key_length is zero, then the vcn immediately
+	//		   follows the INDEX_ENTRY_HEADER. Regardless of
+	//		   key_length, the address of the 8-byte boundary
+	//		   alligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
+	//		   (char*)ie + le16_to_cpu(ie*)->length) - sizeof(VCN),
+	//		   where sizeof(VCN) can be hardcoded as 8 if wanted. */
+} __attribute__ ((__packed__)) INDEX_ENTRY;
+
+/*
+ * Attribute: Bitmap (0xb0).
+ *
+ * Contains an array of bits (aka a bitfield).
+ *
+ * When used in conjunction with the index allocation attribute, each bit
+ * corresponds to one index block within the index allocation attribute. Thus
+ * the number of bits in the bitmap * index block size / cluster size is the
+ * number of clusters in the index allocation attribute.
+ */
+typedef struct {
+	u8 bitmap[0];			/* Array of bits. */
+} __attribute__ ((__packed__)) BITMAP_ATTR;
+
+/*
+ * The reparse point tag defines the type of the reparse point. It also
+ * includes several flags, which further describe the reparse point.
+ *
+ * The reparse point tag is an unsigned 32-bit value divided in three parts:
+ *
+ * 1. The least significant 16 bits (i.e. bits 0 to 15) specifiy the type of
+ *    the reparse point.
+ * 2. The 13 bits after this (i.e. bits 16 to 28) are reserved for future use.
+ * 3. The most significant three bits are flags describing the reparse point.
+ *    They are defined as follows:
+ *	bit 29: Name surrogate bit. If set, the filename is an alias for
+ *		another object in the system.
+ *	bit 30: High-latency bit. If set, accessing the first byte of data will
+ *		be slow. (E.g. the data is stored on a tape drive.)
+ *	bit 31: Microsoft bit. If set, the tag is owned by Microsoft. User
+ *		defined tags have to use zero here.
+ *
+ * These are the predefined reparse point tags:
+ */
+enum {
+	IO_REPARSE_TAG_IS_ALIAS		= const_cpu_to_le32(0x20000000),
+	IO_REPARSE_TAG_IS_HIGH_LATENCY	= const_cpu_to_le32(0x40000000),
+	IO_REPARSE_TAG_IS_MICROSOFT	= const_cpu_to_le32(0x80000000),
+
+	IO_REPARSE_TAG_RESERVED_ZERO	= const_cpu_to_le32(0x00000000),
+	IO_REPARSE_TAG_RESERVED_ONE	= const_cpu_to_le32(0x00000001),
+	IO_REPARSE_TAG_RESERVED_RANGE	= const_cpu_to_le32(0x00000001),
+
+	IO_REPARSE_TAG_NSS		= const_cpu_to_le32(0x68000005),
+	IO_REPARSE_TAG_NSS_RECOVER	= const_cpu_to_le32(0x68000006),
+	IO_REPARSE_TAG_SIS		= const_cpu_to_le32(0x68000007),
+	IO_REPARSE_TAG_DFS		= const_cpu_to_le32(0x68000008),
+
+	IO_REPARSE_TAG_MOUNT_POINT	= const_cpu_to_le32(0x88000003),
+
+	IO_REPARSE_TAG_HSM		= const_cpu_to_le32(0xa8000004),
+
+	IO_REPARSE_TAG_SYMBOLIC_LINK	= const_cpu_to_le32(0xe8000000),
+
+	IO_REPARSE_TAG_VALID_VALUES	= const_cpu_to_le32(0xe000ffff),
+};
+
+/*
+ * Attribute: Reparse point (0xc0).
+ *
+ * NOTE: Can be resident or non-resident.
+ */
+typedef struct {
+	le32 reparse_tag;		/* Reparse point type (inc. flags). */
+	le16 reparse_data_length;	/* Byte size of reparse data. */
+	le16 reserved;			/* Align to 8-byte boundary. */
+	u8 reparse_data[0];		/* Meaning depends on reparse_tag. */
+} __attribute__ ((__packed__)) REPARSE_POINT;
+
+/*
+ * Attribute: Extended attribute (EA) information (0xd0).
+ *
+ * NOTE: Always resident. (Is this true???)
+ */
+typedef struct {
+	le16 ea_length;		/* Byte size of the packed extended
+				   attributes. */
+	le16 need_ea_count;	/* The number of extended attributes which have
+				   the NEED_EA bit set. */
+	le32 ea_query_length;	/* Byte size of the buffer required to query
+				   the extended attributes when calling
+				   ZwQueryEaFile() in Windows NT/2k. I.e. the
+				   byte size of the unpacked extended
+				   attributes. */
+} __attribute__ ((__packed__)) EA_INFORMATION;
+
+/*
+ * Extended attribute flags (8-bit).
+ */
+enum {
+	NEED_EA	= 0x80
+} __attribute__ ((__packed__));
+
+typedef u8 EA_FLAGS;
+
+/*
+ * Attribute: Extended attribute (EA) (0xe0).
+ *
+ * NOTE: Always non-resident. (Is this true?)
+ *
+ * Like the attribute list and the index buffer list, the EA attribute value is
+ * a sequence of EA_ATTR variable length records.
+ *
+ * FIXME: It appears weird that the EA name is not unicode. Is it true?
+ */
+typedef struct {
+	le32 next_entry_offset;	/* Offset to the next EA_ATTR. */
+	EA_FLAGS flags;		/* Flags describing the EA. */
+	u8 ea_name_length;	/* Length of the name of the EA in bytes. */
+	le16 ea_value_length;	/* Byte size of the EA's value. */
+	u8 ea_name[0];		/* Name of the EA. */
+	u8 ea_value[0];		/* The value of the EA. Immediately follows
+				   the name. */
+} __attribute__ ((__packed__)) EA_ATTR;
+
+/*
+ * Attribute: Property set (0xf0).
+ *
+ * Intended to support Native Structure Storage (NSS) - a feature removed from
+ * NTFS 3.0 during beta testing.
+ */
+typedef struct {
+	/* Irrelevant as feature unused. */
+} __attribute__ ((__packed__)) PROPERTY_SET;
+
+/*
+ * Attribute: Logged utility stream (0x100).
+ *
+ * NOTE: Can be resident or non-resident.
+ *
+ * Operations on this attribute are logged to the journal ($LogFile) like
+ * normal metadata changes.
+ *
+ * Used by the Encrypting File System (EFS). All encrypted files have this
+ * attribute with the name $EFS.
+ */
+typedef struct {
+	/* Can be anything the creator chooses. */
+	/* EFS uses it as follows: */
+	// FIXME: Type this info, verifying it along the way. (AIA)
+} __attribute__ ((__packed__)) LOGGED_UTILITY_STREAM, EFS_ATTR;
+
+#endif /* _LINUX_NTFS_LAYOUT_H */
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
new file mode 100644
index 0000000..23fd911
--- /dev/null
+++ b/fs/ntfs/lcnalloc.c
@@ -0,0 +1,1002 @@
+/*
+ * lcnalloc.c - Cluster (de)allocation code.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef NTFS_RW
+
+#include <linux/pagemap.h>
+
+#include "lcnalloc.h"
+#include "debug.h"
+#include "bitmap.h"
+#include "inode.h"
+#include "volume.h"
+#include "attrib.h"
+#include "malloc.h"
+#include "aops.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_cluster_free_from_rl_nolock - free clusters from runlist
+ * @vol:	mounted ntfs volume on which to free the clusters
+ * @rl:		runlist describing the clusters to free
+ *
+ * Free all the clusters described by the runlist @rl on the volume @vol.  In
+ * the case of an error being returned, at least some of the clusters were not
+ * freed.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - The volume lcn bitmap must be locked for writing on entry and is
+ *	      left locked on return.
+ */
+int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
+		const runlist_element *rl)
+{
+	struct inode *lcnbmp_vi = vol->lcnbmp_ino;
+	int ret = 0;
+
+	ntfs_debug("Entering.");
+	for (; rl->length; rl++) {
+		int err;
+
+		if (rl->lcn < 0)
+			continue;
+		err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length);
+		if (unlikely(err && (!ret || ret == ENOMEM) && ret != err))
+			ret = err;
+	}
+	ntfs_debug("Done.");
+	return ret;
+}
+
+/**
+ * ntfs_cluster_alloc - allocate clusters on an ntfs volume
+ * @vol:	mounted ntfs volume on which to allocate the clusters
+ * @start_vcn:	vcn to use for the first allocated cluster
+ * @count:	number of clusters to allocate
+ * @start_lcn:	starting lcn at which to allocate the clusters (or -1 if none)
+ * @zone:	zone from which to allocate the clusters
+ *
+ * Allocate @count clusters preferably starting at cluster @start_lcn or at the
+ * current allocator position if @start_lcn is -1, on the mounted ntfs volume
+ * @vol. @zone is either DATA_ZONE for allocation of normal clusters or
+ * MFT_ZONE for allocation of clusters for the master file table, i.e. the
+ * $MFT/$DATA attribute.
+ *
+ * @start_vcn specifies the vcn of the first allocated cluster.  This makes
+ * merging the resulting runlist with the old runlist easier.
+ *
+ * You need to check the return value with IS_ERR().  If this is false, the
+ * function was successful and the return value is a runlist describing the
+ * allocated cluster(s).  If IS_ERR() is true, the function failed and
+ * PTR_ERR() gives you the error code.
+ *
+ * Notes on the allocation algorithm
+ * =================================
+ *
+ * There are two data zones.  First is the area between the end of the mft zone
+ * and the end of the volume, and second is the area between the start of the
+ * volume and the start of the mft zone.  On unmodified/standard NTFS 1.x
+ * volumes, the second data zone does not exist due to the mft zone being
+ * expanded to cover the start of the volume in order to reserve space for the
+ * mft bitmap attribute.
+ *
+ * This is not the prettiest function but the complexity stems from the need of
+ * implementing the mft vs data zoned approach and from the fact that we have
+ * access to the lcn bitmap in portions of up to 8192 bytes at a time, so we
+ * need to cope with crossing over boundaries of two buffers.  Further, the
+ * fact that the allocator allows for caller supplied hints as to the location
+ * of where allocation should begin and the fact that the allocator keeps track
+ * of where in the data zones the next natural allocation should occur,
+ * contribute to the complexity of the function.  But it should all be
+ * worthwhile, because this allocator should: 1) be a full implementation of
+ * the MFT zone approach used by Windows NT, 2) cause reduction in
+ * fragmentation, and 3) be speedy in allocations (the code is not optimized
+ * for speed, but the algorithm is, so further speed improvements are probably
+ * possible).
+ *
+ * FIXME: We should be monitoring cluster allocation and increment the MFT zone
+ * size dynamically but this is something for the future.  We will just cause
+ * heavier fragmentation by not doing it and I am not even sure Windows would
+ * grow the MFT zone dynamically, so it might even be correct not to do this.
+ * The overhead in doing dynamic MFT zone expansion would be very large and
+ * unlikely worth the effort. (AIA)
+ *
+ * TODO: I have added in double the required zone position pointer wrap around
+ * logic which can be optimized to having only one of the two logic sets.
+ * However, having the double logic will work fine, but if we have only one of
+ * the sets and we get it wrong somewhere, then we get into trouble, so
+ * removing the duplicate logic requires _very_ careful consideration of _all_
+ * possible code paths.  So at least for now, I am leaving the double logic -
+ * better safe than sorry... (AIA)
+ *
+ * Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked
+ *	      on return.
+ *	    - This function takes the volume lcn bitmap lock for writing and
+ *	      modifies the bitmap contents.
+ */
+runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
+		const s64 count, const LCN start_lcn,
+		const NTFS_CLUSTER_ALLOCATION_ZONES zone)
+{
+	LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn;
+	LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size;
+	s64 clusters;
+	struct inode *lcnbmp_vi;
+	runlist_element *rl = NULL;
+	struct address_space *mapping;
+	struct page *page = NULL;
+	u8 *buf, *byte;
+	int err = 0, rlpos, rlsize, buf_size;
+	u8 pass, done_zones, search_zone, need_writeback = 0, bit;
+
+	ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn "
+			"0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn,
+			(unsigned long long)count,
+			(unsigned long long)start_lcn,
+			zone == MFT_ZONE ? "MFT" : "DATA");
+	BUG_ON(!vol);
+	lcnbmp_vi = vol->lcnbmp_ino;
+	BUG_ON(!lcnbmp_vi);
+	BUG_ON(start_vcn < 0);
+	BUG_ON(count < 0);
+	BUG_ON(start_lcn < -1);
+	BUG_ON(zone < FIRST_ZONE);
+	BUG_ON(zone > LAST_ZONE);
+
+	/* Return empty runlist if @count == 0 */
+	// FIXME: Do we want to just return NULL instead? (AIA)
+	if (!count) {
+		rl = ntfs_malloc_nofs(PAGE_SIZE);
+		if (!rl)
+			return ERR_PTR(-ENOMEM);
+		rl[0].vcn = start_vcn;
+		rl[0].lcn = LCN_RL_NOT_MAPPED;
+		rl[0].length = 0;
+		return rl;
+	}
+	/* Take the lcnbmp lock for writing. */
+	down_write(&vol->lcnbmp_lock);
+	/*
+	 * If no specific @start_lcn was requested, use the current data zone
+	 * position, otherwise use the requested @start_lcn but make sure it
+	 * lies outside the mft zone.  Also set done_zones to 0 (no zones done)
+	 * and pass depending on whether we are starting inside a zone (1) or
+	 * at the beginning of a zone (2).  If requesting from the MFT_ZONE,
+	 * we either start at the current position within the mft zone or at
+	 * the specified position.  If the latter is out of bounds then we start
+	 * at the beginning of the MFT_ZONE.
+	 */
+	done_zones = 0;
+	pass = 1;
+	/*
+	 * zone_start and zone_end are the current search range.  search_zone
+	 * is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of
+	 * volume) and 4 for data zone 2 (start of volume till start of mft
+	 * zone).
+	 */
+	zone_start = start_lcn;
+	if (zone_start < 0) {
+		if (zone == DATA_ZONE)
+			zone_start = vol->data1_zone_pos;
+		else
+			zone_start = vol->mft_zone_pos;
+		if (!zone_start) {
+			/*
+			 * Zone starts at beginning of volume which means a
+			 * single pass is sufficient.
+			 */
+			pass = 2;
+		}
+	} else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start &&
+			zone_start < vol->mft_zone_end) {
+		zone_start = vol->mft_zone_end;
+		/*
+		 * Starting at beginning of data1_zone which means a single
+		 * pass in this zone is sufficient.
+		 */
+		pass = 2;
+	} else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start ||
+			zone_start >= vol->mft_zone_end)) {
+		zone_start = vol->mft_lcn;
+		if (!vol->mft_zone_end)
+			zone_start = 0;
+		/*
+		 * Starting at beginning of volume which means a single pass
+		 * is sufficient.
+		 */
+		pass = 2;
+	}
+	if (zone == MFT_ZONE) {
+		zone_end = vol->mft_zone_end;
+		search_zone = 1;
+	} else /* if (zone == DATA_ZONE) */ {
+		/* Skip searching the mft zone. */
+		done_zones |= 1;
+		if (zone_start >= vol->mft_zone_end) {
+			zone_end = vol->nr_clusters;
+			search_zone = 2;
+		} else {
+			zone_end = vol->mft_zone_start;
+			search_zone = 4;
+		}
+	}
+	/*
+	 * bmp_pos is the current bit position inside the bitmap.  We use
+	 * bmp_initial_pos to determine whether or not to do a zone switch.
+	 */
+	bmp_pos = bmp_initial_pos = zone_start;
+
+	/* Loop until all clusters are allocated, i.e. clusters == 0. */
+	clusters = count;
+	rlpos = rlsize = 0;
+	mapping = lcnbmp_vi->i_mapping;
+	while (1) {
+		ntfs_debug("Start of outer while loop: done_zones 0x%x, "
+				"search_zone %i, pass %i, zone_start 0x%llx, "
+				"zone_end 0x%llx, bmp_initial_pos 0x%llx, "
+				"bmp_pos 0x%llx, rlpos %i, rlsize %i.",
+				done_zones, search_zone, pass,
+				(unsigned long long)zone_start,
+				(unsigned long long)zone_end,
+				(unsigned long long)bmp_initial_pos,
+				(unsigned long long)bmp_pos, rlpos, rlsize);
+		/* Loop until we run out of free clusters. */
+		last_read_pos = bmp_pos >> 3;
+		ntfs_debug("last_read_pos 0x%llx.",
+				(unsigned long long)last_read_pos);
+		if (last_read_pos > lcnbmp_vi->i_size) {
+			ntfs_debug("End of attribute reached.  "
+					"Skipping to zone_pass_done.");
+			goto zone_pass_done;
+		}
+		if (likely(page)) {
+			if (need_writeback) {
+				ntfs_debug("Marking page dirty.");
+				flush_dcache_page(page);
+				set_page_dirty(page);
+				need_writeback = 0;
+			}
+			ntfs_unmap_page(page);
+		}
+		page = ntfs_map_page(mapping, last_read_pos >>
+				PAGE_CACHE_SHIFT);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			ntfs_error(vol->sb, "Failed to map page.");
+			goto out;
+		}
+		buf_size = last_read_pos & ~PAGE_CACHE_MASK;
+		buf = page_address(page) + buf_size;
+		buf_size = PAGE_CACHE_SIZE - buf_size;
+		if (unlikely(last_read_pos + buf_size > lcnbmp_vi->i_size))
+			buf_size = lcnbmp_vi->i_size - last_read_pos;
+		buf_size <<= 3;
+		lcn = bmp_pos & 7;
+		bmp_pos &= ~7;
+		ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, "
+				"bmp_pos 0x%llx, need_writeback %i.", buf_size,
+				(unsigned long long)lcn,
+				(unsigned long long)bmp_pos, need_writeback);
+		while (lcn < buf_size && lcn + bmp_pos < zone_end) {
+			byte = buf + (lcn >> 3);
+			ntfs_debug("In inner while loop: buf_size %i, "
+					"lcn 0x%llx, bmp_pos 0x%llx, "
+					"need_writeback %i, byte ofs 0x%x, "
+					"*byte 0x%x.", buf_size,
+					(unsigned long long)lcn,
+					(unsigned long long)bmp_pos,
+					need_writeback,
+					(unsigned int)(lcn >> 3),
+					(unsigned int)*byte);
+			/* Skip full bytes. */
+			if (*byte == 0xff) {
+				lcn = (lcn + 8) & ~7;
+				ntfs_debug("Continuing while loop 1.");
+				continue;
+			}
+			bit = 1 << (lcn & 7);
+			ntfs_debug("bit %i.", bit);
+			/* If the bit is already set, go onto the next one. */
+			if (*byte & bit) {
+				lcn++;
+				ntfs_debug("Continuing while loop 2.");
+				continue;
+			}
+			/*
+			 * Allocate more memory if needed, including space for
+			 * the terminator element.
+			 * ntfs_malloc_nofs() operates on whole pages only.
+			 */
+			if ((rlpos + 2) * sizeof(*rl) > rlsize) {
+				runlist_element *rl2;
+
+				ntfs_debug("Reallocating memory.");
+				if (!rl)
+					ntfs_debug("First free bit is at LCN "
+							"0x%llx.",
+							(unsigned long long)
+							(lcn + bmp_pos));
+				rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
+				if (unlikely(!rl2)) {
+					err = -ENOMEM;
+					ntfs_error(vol->sb, "Failed to "
+							"allocate memory.");
+					goto out;
+				}
+				memcpy(rl2, rl, rlsize);
+				ntfs_free(rl);
+				rl = rl2;
+				rlsize += PAGE_SIZE;
+				ntfs_debug("Reallocated memory, rlsize 0x%x.",
+						rlsize);
+			}
+			/* Allocate the bitmap bit. */
+			*byte |= bit;
+			/* We need to write this bitmap page to disk. */
+			need_writeback = 1;
+			ntfs_debug("*byte 0x%x, need_writeback is set.",
+					(unsigned int)*byte);
+			/*
+			 * Coalesce with previous run if adjacent LCNs.
+			 * Otherwise, append a new run.
+			 */
+			ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), "
+					"prev_lcn 0x%llx, lcn 0x%llx, "
+					"bmp_pos 0x%llx, prev_run_len 0x%llx, "
+					"rlpos %i.",
+					(unsigned long long)(lcn + bmp_pos),
+					1ULL, (unsigned long long)prev_lcn,
+					(unsigned long long)lcn,
+					(unsigned long long)bmp_pos,
+					(unsigned long long)prev_run_len,
+					rlpos);
+			if (prev_lcn == lcn + bmp_pos - prev_run_len && rlpos) {
+				ntfs_debug("Coalescing to run (lcn 0x%llx, "
+						"len 0x%llx).",
+						(unsigned long long)
+						rl[rlpos - 1].lcn,
+						(unsigned long long)
+						rl[rlpos - 1].length);
+				rl[rlpos - 1].length = ++prev_run_len;
+				ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), "
+						"prev_run_len 0x%llx.",
+						(unsigned long long)
+						rl[rlpos - 1].lcn,
+						(unsigned long long)
+						rl[rlpos - 1].length,
+						(unsigned long long)
+						prev_run_len);
+			} else {
+				if (likely(rlpos)) {
+					ntfs_debug("Adding new run, (previous "
+							"run lcn 0x%llx, "
+							"len 0x%llx).",
+							(unsigned long long)
+							rl[rlpos - 1].lcn,
+							(unsigned long long)
+							rl[rlpos - 1].length);
+					rl[rlpos].vcn = rl[rlpos - 1].vcn +
+							prev_run_len;
+				} else {
+					ntfs_debug("Adding new run, is first "
+							"run.");
+					rl[rlpos].vcn = start_vcn;
+				}
+				rl[rlpos].lcn = prev_lcn = lcn + bmp_pos;
+				rl[rlpos].length = prev_run_len = 1;
+				rlpos++;
+			}
+			/* Done? */
+			if (!--clusters) {
+				LCN tc;
+				/*
+				 * Update the current zone position.  Positions
+				 * of already scanned zones have been updated
+				 * during the respective zone switches.
+				 */
+				tc = lcn + bmp_pos + 1;
+				ntfs_debug("Done. Updating current zone "
+						"position, tc 0x%llx, "
+						"search_zone %i.",
+						(unsigned long long)tc,
+						search_zone);
+				switch (search_zone) {
+				case 1:
+					ntfs_debug("Before checks, "
+							"vol->mft_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->mft_zone_pos);
+					if (tc >= vol->mft_zone_end) {
+						vol->mft_zone_pos =
+								vol->mft_lcn;
+						if (!vol->mft_zone_end)
+							vol->mft_zone_pos = 0;
+					} else if ((bmp_initial_pos >=
+							vol->mft_zone_pos ||
+							tc > vol->mft_zone_pos)
+							&& tc >= vol->mft_lcn)
+						vol->mft_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->mft_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->mft_zone_pos);
+					break;
+				case 2:
+					ntfs_debug("Before checks, "
+							"vol->data1_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data1_zone_pos);
+					if (tc >= vol->nr_clusters)
+						vol->data1_zone_pos =
+							     vol->mft_zone_end;
+					else if ((bmp_initial_pos >=
+						    vol->data1_zone_pos ||
+						    tc > vol->data1_zone_pos)
+						    && tc >= vol->mft_zone_end)
+						vol->data1_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->data1_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data1_zone_pos);
+					break;
+				case 4:
+					ntfs_debug("Before checks, "
+							"vol->data2_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data2_zone_pos);
+					if (tc >= vol->mft_zone_start)
+						vol->data2_zone_pos = 0;
+					else if (bmp_initial_pos >=
+						      vol->data2_zone_pos ||
+						      tc > vol->data2_zone_pos)
+						vol->data2_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->data2_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data2_zone_pos);
+					break;
+				default:
+					BUG();
+				}
+				ntfs_debug("Finished.  Going to out.");
+				goto out;
+			}
+			lcn++;
+		}
+		bmp_pos += buf_size;
+		ntfs_debug("After inner while loop: buf_size 0x%x, lcn "
+				"0x%llx, bmp_pos 0x%llx, need_writeback %i.",
+				buf_size, (unsigned long long)lcn,
+				(unsigned long long)bmp_pos, need_writeback);
+		if (bmp_pos < zone_end) {
+			ntfs_debug("Continuing outer while loop, "
+					"bmp_pos 0x%llx, zone_end 0x%llx.",
+					(unsigned long long)bmp_pos,
+					(unsigned long long)zone_end);
+			continue;
+		}
+zone_pass_done:	/* Finished with the current zone pass. */
+		ntfs_debug("At zone_pass_done, pass %i.", pass);
+		if (pass == 1) {
+			/*
+			 * Now do pass 2, scanning the first part of the zone
+			 * we omitted in pass 1.
+			 */
+			pass = 2;
+			zone_end = zone_start;
+			switch (search_zone) {
+			case 1: /* mft_zone */
+				zone_start = vol->mft_zone_start;
+				break;
+			case 2: /* data1_zone */
+				zone_start = vol->mft_zone_end;
+				break;
+			case 4: /* data2_zone */
+				zone_start = 0;
+				break;
+			default:
+				BUG();
+			}
+			/* Sanity check. */
+			if (zone_end < zone_start)
+				zone_end = zone_start;
+			bmp_pos = zone_start;
+			ntfs_debug("Continuing outer while loop, pass 2, "
+					"zone_start 0x%llx, zone_end 0x%llx, "
+					"bmp_pos 0x%llx.",
+					(unsigned long long)zone_start,
+					(unsigned long long)zone_end,
+					(unsigned long long)bmp_pos);
+			continue;
+		} /* pass == 2 */
+done_zones_check:
+		ntfs_debug("At done_zones_check, search_zone %i, done_zones "
+				"before 0x%x, done_zones after 0x%x.",
+				search_zone, done_zones,
+				done_zones | search_zone);
+		done_zones |= search_zone;
+		if (done_zones < 7) {
+			ntfs_debug("Switching zone.");
+			/* Now switch to the next zone we haven't done yet. */
+			pass = 1;
+			switch (search_zone) {
+			case 1:
+				ntfs_debug("Switching from mft zone to data1 "
+						"zone.");
+				/* Update mft zone position. */
+				if (rlpos) {
+					LCN tc;
+
+					ntfs_debug("Before checks, "
+							"vol->mft_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->mft_zone_pos);
+					tc = rl[rlpos - 1].lcn +
+							rl[rlpos - 1].length;
+					if (tc >= vol->mft_zone_end) {
+						vol->mft_zone_pos =
+								vol->mft_lcn;
+						if (!vol->mft_zone_end)
+							vol->mft_zone_pos = 0;
+					} else if ((bmp_initial_pos >=
+							vol->mft_zone_pos ||
+							tc > vol->mft_zone_pos)
+							&& tc >= vol->mft_lcn)
+						vol->mft_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->mft_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->mft_zone_pos);
+				}
+				/* Switch from mft zone to data1 zone. */
+switch_to_data1_zone:		search_zone = 2;
+				zone_start = bmp_initial_pos =
+						vol->data1_zone_pos;
+				zone_end = vol->nr_clusters;
+				if (zone_start == vol->mft_zone_end)
+					pass = 2;
+				if (zone_start >= zone_end) {
+					vol->data1_zone_pos = zone_start =
+							vol->mft_zone_end;
+					pass = 2;
+				}
+				break;
+			case 2:
+				ntfs_debug("Switching from data1 zone to "
+						"data2 zone.");
+				/* Update data1 zone position. */
+				if (rlpos) {
+					LCN tc;
+
+					ntfs_debug("Before checks, "
+							"vol->data1_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data1_zone_pos);
+					tc = rl[rlpos - 1].lcn +
+							rl[rlpos - 1].length;
+					if (tc >= vol->nr_clusters)
+						vol->data1_zone_pos =
+							     vol->mft_zone_end;
+					else if ((bmp_initial_pos >=
+						    vol->data1_zone_pos ||
+						    tc > vol->data1_zone_pos)
+						    && tc >= vol->mft_zone_end)
+						vol->data1_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->data1_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data1_zone_pos);
+				}
+				/* Switch from data1 zone to data2 zone. */
+				search_zone = 4;
+				zone_start = bmp_initial_pos =
+						vol->data2_zone_pos;
+				zone_end = vol->mft_zone_start;
+				if (!zone_start)
+					pass = 2;
+				if (zone_start >= zone_end) {
+					vol->data2_zone_pos = zone_start =
+							bmp_initial_pos = 0;
+					pass = 2;
+				}
+				break;
+			case 4:
+				ntfs_debug("Switching from data2 zone to "
+						"data1 zone.");
+				/* Update data2 zone position. */
+				if (rlpos) {
+					LCN tc;
+
+					ntfs_debug("Before checks, "
+							"vol->data2_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data2_zone_pos);
+					tc = rl[rlpos - 1].lcn +
+							rl[rlpos - 1].length;
+					if (tc >= vol->mft_zone_start)
+						vol->data2_zone_pos = 0;
+					else if (bmp_initial_pos >=
+						      vol->data2_zone_pos ||
+						      tc > vol->data2_zone_pos)
+						vol->data2_zone_pos = tc;
+					ntfs_debug("After checks, "
+							"vol->data2_zone_pos "
+							"0x%llx.",
+							(unsigned long long)
+							vol->data2_zone_pos);
+				}
+				/* Switch from data2 zone to data1 zone. */
+				goto switch_to_data1_zone;
+			default:
+				BUG();
+			}
+			ntfs_debug("After zone switch, search_zone %i, "
+					"pass %i, bmp_initial_pos 0x%llx, "
+					"zone_start 0x%llx, zone_end 0x%llx.",
+					search_zone, pass,
+					(unsigned long long)bmp_initial_pos,
+					(unsigned long long)zone_start,
+					(unsigned long long)zone_end);
+			bmp_pos = zone_start;
+			if (zone_start == zone_end) {
+				ntfs_debug("Empty zone, going to "
+						"done_zones_check.");
+				/* Empty zone. Don't bother searching it. */
+				goto done_zones_check;
+			}
+			ntfs_debug("Continuing outer while loop.");
+			continue;
+		} /* done_zones == 7 */
+		ntfs_debug("All zones are finished.");
+		/*
+		 * All zones are finished!  If DATA_ZONE, shrink mft zone.  If
+		 * MFT_ZONE, we have really run out of space.
+		 */
+		mft_zone_size = vol->mft_zone_end - vol->mft_zone_start;
+		ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end "
+				"0x%llx, mft_zone_size 0x%llx.",
+				(unsigned long long)vol->mft_zone_start,
+				(unsigned long long)vol->mft_zone_end,
+				(unsigned long long)mft_zone_size);
+		if (zone == MFT_ZONE || mft_zone_size <= 0) {
+			ntfs_debug("No free clusters left, going to out.");
+			/* Really no more space left on device. */
+			err = ENOSPC;
+			goto out;
+		} /* zone == DATA_ZONE && mft_zone_size > 0 */
+		ntfs_debug("Shrinking mft zone.");
+		zone_end = vol->mft_zone_end;
+		mft_zone_size >>= 1;
+		if (mft_zone_size > 0)
+			vol->mft_zone_end = vol->mft_zone_start + mft_zone_size;
+		else /* mft zone and data2 zone no longer exist. */
+			vol->data2_zone_pos = vol->mft_zone_start =
+					vol->mft_zone_end = 0;
+		if (vol->mft_zone_pos >= vol->mft_zone_end) {
+			vol->mft_zone_pos = vol->mft_lcn;
+			if (!vol->mft_zone_end)
+				vol->mft_zone_pos = 0;
+		}
+		bmp_pos = zone_start = bmp_initial_pos =
+				vol->data1_zone_pos = vol->mft_zone_end;
+		search_zone = 2;
+		pass = 2;
+		done_zones &= ~2;
+		ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, "
+				"vol->mft_zone_start 0x%llx, "
+				"vol->mft_zone_end 0x%llx, "
+				"vol->mft_zone_pos 0x%llx, search_zone 2, "
+				"pass 2, dones_zones 0x%x, zone_start 0x%llx, "
+				"zone_end 0x%llx, vol->data1_zone_pos 0x%llx, "
+				"continuing outer while loop.",
+				(unsigned long long)mft_zone_size,
+				(unsigned long long)vol->mft_zone_start,
+				(unsigned long long)vol->mft_zone_end,
+				(unsigned long long)vol->mft_zone_pos,
+				done_zones, (unsigned long long)zone_start,
+				(unsigned long long)zone_end,
+				(unsigned long long)vol->data1_zone_pos);
+	}
+	ntfs_debug("After outer while loop.");
+out:
+	ntfs_debug("At out.");
+	/* Add runlist terminator element. */
+	if (likely(rl)) {
+		rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length;
+		rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
+		rl[rlpos].length = 0;
+	}
+	if (likely(page && !IS_ERR(page))) {
+		if (need_writeback) {
+			ntfs_debug("Marking page dirty.");
+			flush_dcache_page(page);
+			set_page_dirty(page);
+			need_writeback = 0;
+		}
+		ntfs_unmap_page(page);
+	}
+	if (likely(!err)) {
+		up_write(&vol->lcnbmp_lock);
+		ntfs_debug("Done.");
+		return rl;
+	}
+	ntfs_error(vol->sb, "Failed to allocate clusters, aborting "
+			"(error %i).", err);
+	if (rl) {
+		int err2;
+
+		if (err == ENOSPC)
+			ntfs_debug("Not enough space to complete allocation, "
+					"err ENOSPC, first free lcn 0x%llx, "
+					"could allocate up to 0x%llx "
+					"clusters.",
+					(unsigned long long)rl[0].lcn,
+					(unsigned long long)count - clusters);
+		/* Deallocate all allocated clusters. */
+		ntfs_debug("Attempting rollback...");
+		err2 = ntfs_cluster_free_from_rl_nolock(vol, rl);
+		if (err2) {
+			ntfs_error(vol->sb, "Failed to rollback (error %i).  "
+					"Leaving inconsistent metadata!  "
+					"Unmount and run chkdsk.", err2);
+			NVolSetErrors(vol);
+		}
+		/* Free the runlist. */
+		ntfs_free(rl);
+	} else if (err == ENOSPC)
+		ntfs_debug("No space left at all, err = ENOSPC, "
+				"first free lcn = 0x%llx.",
+				(unsigned long long)vol->data1_zone_pos);
+	up_write(&vol->lcnbmp_lock);
+	return ERR_PTR(err);
+}
+
+/**
+ * __ntfs_cluster_free - free clusters on an ntfs volume
+ * @vi:		vfs inode whose runlist describes the clusters to free
+ * @start_vcn:	vcn in the runlist of @vi at which to start freeing clusters
+ * @count:	number of clusters to free or -1 for all clusters
+ * @is_rollback:	if TRUE this is a rollback operation
+ *
+ * Free @count clusters starting at the cluster @start_vcn in the runlist
+ * described by the vfs inode @vi.
+ *
+ * If @count is -1, all clusters from @start_vcn to the end of the runlist are
+ * deallocated.  Thus, to completely free all clusters in a runlist, use
+ * @start_vcn = 0 and @count = -1.
+ *
+ * @is_rollback should always be FALSE, it is for internal use to rollback
+ * errors.  You probably want to use ntfs_cluster_free() instead.
+ *
+ * Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
+ * has to deal with it later.
+ *
+ * Return the number of deallocated clusters (not counting sparse ones) on
+ * success and -errno on error.
+ *
+ * Locking: - The runlist described by @vi must be unlocked on entry and is
+ *	      unlocked on return.
+ *	    - This function takes the runlist lock of @vi for reading and
+ *	      sometimes for writing and sometimes modifies the runlist.
+ *	    - The volume lcn bitmap must be unlocked on entry and is unlocked
+ *	      on return.
+ *	    - This function takes the volume lcn bitmap lock for writing and
+ *	      modifies the bitmap contents.
+ */
+s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
+		const BOOL is_rollback)
+{
+	s64 delta, to_free, total_freed, real_freed;
+	ntfs_inode *ni;
+	ntfs_volume *vol;
+	struct inode *lcnbmp_vi;
+	runlist_element *rl;
+	int err;
+
+	BUG_ON(!vi);
+	ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count "
+			"0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn,
+			(unsigned long long)count,
+			is_rollback ? " (rollback)" : "");
+	ni = NTFS_I(vi);
+	vol = ni->vol;
+	lcnbmp_vi = vol->lcnbmp_ino;
+	BUG_ON(!lcnbmp_vi);
+	BUG_ON(start_vcn < 0);
+	BUG_ON(count < -1);
+	/*
+	 * Lock the lcn bitmap for writing but only if not rolling back.  We
+	 * must hold the lock all the way including through rollback otherwise
+	 * rollback is not possible because once we have cleared a bit and
+	 * dropped the lock, anyone could have set the bit again, thus
+	 * allocating the cluster for another use.
+	 */
+	if (likely(!is_rollback))
+		down_write(&vol->lcnbmp_lock);
+
+	total_freed = real_freed = 0;
+
+	/* This returns with ni->runlist locked for reading on success. */
+	rl = ntfs_find_vcn(ni, start_vcn, FALSE);
+	if (IS_ERR(rl)) {
+		if (!is_rollback)
+			ntfs_error(vol->sb, "Failed to find first runlist "
+					"element (error %li), aborting.",
+					PTR_ERR(rl));
+		err = PTR_ERR(rl);
+		goto err_out;
+	}
+	if (unlikely(rl->lcn < LCN_HOLE)) {
+		if (!is_rollback)
+			ntfs_error(vol->sb, "First runlist element has "
+					"invalid lcn, aborting.");
+		err = -EIO;
+		goto unl_err_out;
+	}
+	/* Find the starting cluster inside the run that needs freeing. */
+	delta = start_vcn - rl->vcn;
+
+	/* The number of clusters in this run that need freeing. */
+	to_free = rl->length - delta;
+	if (count >= 0 && to_free > count)
+		to_free = count;
+
+	if (likely(rl->lcn >= 0)) {
+		/* Do the actual freeing of the clusters in this run. */
+		err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta,
+				to_free, likely(!is_rollback) ? 0 : 1);
+		if (unlikely(err)) {
+			if (!is_rollback)
+				ntfs_error(vol->sb, "Failed to clear first run "
+						"(error %i), aborting.", err);
+			goto unl_err_out;
+		}
+		/* We have freed @to_free real clusters. */
+		real_freed = to_free;
+	};
+	/* Go to the next run and adjust the number of clusters left to free. */
+	++rl;
+	if (count >= 0)
+		count -= to_free;
+
+	/* Keep track of the total "freed" clusters, including sparse ones. */
+	total_freed = to_free;
+	/*
+	 * Loop over the remaining runs, using @count as a capping value, and
+	 * free them.
+	 */
+	for (; rl->length && count != 0; ++rl) {
+		if (unlikely(rl->lcn < LCN_HOLE)) {
+			VCN vcn;
+
+			/*
+			 * Attempt to map runlist, dropping runlist lock for
+			 * the duration.
+			 */
+			vcn = rl->vcn;
+			up_read(&ni->runlist.lock);
+			err = ntfs_map_runlist(ni, vcn);
+			if (err) {
+				if (!is_rollback)
+					ntfs_error(vol->sb, "Failed to map "
+							"runlist fragment.");
+				if (err == -EINVAL || err == -ENOENT)
+					err = -EIO;
+				goto err_out;
+			}
+			/*
+			 * This returns with ni->runlist locked for reading on
+			 * success.
+			 */
+			rl = ntfs_find_vcn(ni, vcn, FALSE);
+			if (IS_ERR(rl)) {
+				err = PTR_ERR(rl);
+				if (!is_rollback)
+					ntfs_error(vol->sb, "Failed to find "
+							"subsequent runlist "
+							"element.");
+				goto err_out;
+			}
+			if (unlikely(rl->lcn < LCN_HOLE)) {
+				if (!is_rollback)
+					ntfs_error(vol->sb, "Runlist element "
+							"has invalid lcn "
+							"(0x%llx).",
+							(unsigned long long)
+							rl->lcn);
+				err = -EIO;
+				goto unl_err_out;
+			}
+		}
+		/* The number of clusters in this run that need freeing. */
+		to_free = rl->length;
+		if (count >= 0 && to_free > count)
+			to_free = count;
+
+		if (likely(rl->lcn >= 0)) {
+			/* Do the actual freeing of the clusters in the run. */
+			err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn,
+					to_free, likely(!is_rollback) ? 0 : 1);
+			if (unlikely(err)) {
+				if (!is_rollback)
+					ntfs_error(vol->sb, "Failed to clear "
+							"subsequent run.");
+				goto unl_err_out;
+			}
+			/* We have freed @to_free real clusters. */
+			real_freed += to_free;
+		}
+		/* Adjust the number of clusters left to free. */
+		if (count >= 0)
+			count -= to_free;
+	
+		/* Update the total done clusters. */
+		total_freed += to_free;
+	}
+	up_read(&ni->runlist.lock);
+	if (likely(!is_rollback))
+		up_write(&vol->lcnbmp_lock);
+
+	BUG_ON(count > 0);
+
+	/* We are done.  Return the number of actually freed clusters. */
+	ntfs_debug("Done.");
+	return real_freed;
+unl_err_out:
+	up_read(&ni->runlist.lock);
+err_out:
+	if (is_rollback)
+		return err;
+	/* If no real clusters were freed, no need to rollback. */
+	if (!real_freed) {
+		up_write(&vol->lcnbmp_lock);
+		return err;
+	}
+	/*
+	 * Attempt to rollback and if that succeeds just return the error code.
+	 * If rollback fails, set the volume errors flag, emit an error
+	 * message, and return the error code.
+	 */
+	delta = __ntfs_cluster_free(vi, start_vcn, total_freed, TRUE);
+	if (delta < 0) {
+		ntfs_error(vol->sb, "Failed to rollback (error %i).  Leaving "
+				"inconsistent metadata!  Unmount and run "
+				"chkdsk.", (int)delta);
+		NVolSetErrors(vol);
+	}
+	up_write(&vol->lcnbmp_lock);
+	ntfs_error(vol->sb, "Aborting (error %i).", err);
+	return err;
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h
new file mode 100644
index 0000000..4cac1c0
--- /dev/null
+++ b/fs/ntfs/lcnalloc.h
@@ -0,0 +1,112 @@
+/*
+ * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation.  Part of the
+ *		Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_LCNALLOC_H
+#define _LINUX_NTFS_LCNALLOC_H
+
+#ifdef NTFS_RW
+
+#include <linux/fs.h>
+
+#include "types.h"
+#include "runlist.h"
+#include "volume.h"
+
+typedef enum {
+	FIRST_ZONE	= 0,	/* For sanity checking. */
+	MFT_ZONE	= 0,	/* Allocate from $MFT zone. */
+	DATA_ZONE	= 1,	/* Allocate from $DATA zone. */
+	LAST_ZONE	= 1,	/* For sanity checking. */
+} NTFS_CLUSTER_ALLOCATION_ZONES;
+
+extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol,
+		const VCN start_vcn, const s64 count, const LCN start_lcn,
+		const NTFS_CLUSTER_ALLOCATION_ZONES zone);
+
+extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
+		s64 count, const BOOL is_rollback);
+
+/**
+ * ntfs_cluster_free - free clusters on an ntfs volume
+ * @vi:		vfs inode whose runlist describes the clusters to free
+ * @start_vcn:	vcn in the runlist of @vi at which to start freeing clusters
+ * @count:	number of clusters to free or -1 for all clusters
+ *
+ * Free @count clusters starting at the cluster @start_vcn in the runlist
+ * described by the vfs inode @vi.
+ *
+ * If @count is -1, all clusters from @start_vcn to the end of the runlist are
+ * deallocated.  Thus, to completely free all clusters in a runlist, use
+ * @start_vcn = 0 and @count = -1.
+ *
+ * Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
+ * has to deal with it later.
+ *
+ * Return the number of deallocated clusters (not counting sparse ones) on
+ * success and -errno on error.
+ *
+ * Locking: - The runlist described by @vi must be unlocked on entry and is
+ *	      unlocked on return.
+ *	    - This function takes the runlist lock of @vi for reading and
+ *	      sometimes for writing and sometimes modifies the runlist.
+ *	    - The volume lcn bitmap must be unlocked on entry and is unlocked
+ *	      on return.
+ *	    - This function takes the volume lcn bitmap lock for writing and
+ *	      modifies the bitmap contents.
+ */
+static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
+		s64 count)
+{
+	return __ntfs_cluster_free(vi, start_vcn, count, FALSE);
+}
+
+extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
+		const runlist_element *rl);
+
+/**
+ * ntfs_cluster_free_from_rl - free clusters from runlist
+ * @vol:	mounted ntfs volume on which to free the clusters
+ * @rl:		runlist describing the clusters to free
+ *
+ * Free all the clusters described by the runlist @rl on the volume @vol.  In
+ * the case of an error being returned, at least some of the clusters were not
+ * freed.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: This function takes the volume lcn bitmap lock for writing and
+ *	    modifies the bitmap contents.
+ */
+static inline int ntfs_cluster_free_from_rl(ntfs_volume *vol,
+		const runlist_element *rl)
+{
+	int ret;
+
+	down_write(&vol->lcnbmp_lock);
+	ret = ntfs_cluster_free_from_rl_nolock(vol, rl);
+	up_write(&vol->lcnbmp_lock);
+	return ret;
+}
+
+#endif /* NTFS_RW */
+
+#endif /* defined _LINUX_NTFS_LCNALLOC_H */
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
new file mode 100644
index 0000000..5e280ab
--- /dev/null
+++ b/fs/ntfs/logfile.c
@@ -0,0 +1,705 @@
+/*
+ * logfile.c - NTFS kernel journal handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2002-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef NTFS_RW
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/buffer_head.h>
+#include <linux/bitops.h>
+
+#include "attrib.h"
+#include "aops.h"
+#include "debug.h"
+#include "logfile.h"
+#include "malloc.h"
+#include "volume.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_check_restart_page_header - check the page header for consistency
+ * @vi:		$LogFile inode to which the restart page header belongs
+ * @rp:		restart page header to check
+ * @pos:	position in @vi at which the restart page header resides
+ *
+ * Check the restart page header @rp for consistency and return TRUE if it is
+ * consistent and FALSE otherwise.
+ *
+ * This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
+ * require the full restart page.
+ */
+static BOOL ntfs_check_restart_page_header(struct inode *vi,
+		RESTART_PAGE_HEADER *rp, s64 pos)
+{
+	u32 logfile_system_page_size, logfile_log_page_size;
+	u16 usa_count, usa_ofs, usa_end, ra_ofs;
+
+	ntfs_debug("Entering.");
+	/*
+	 * If the system or log page sizes are smaller than the ntfs block size
+	 * or either is not a power of 2 we cannot handle this log file.
+	 */
+	logfile_system_page_size = le32_to_cpu(rp->system_page_size);
+	logfile_log_page_size = le32_to_cpu(rp->log_page_size);
+	if (logfile_system_page_size < NTFS_BLOCK_SIZE ||
+			logfile_log_page_size < NTFS_BLOCK_SIZE ||
+			logfile_system_page_size &
+			(logfile_system_page_size - 1) ||
+			logfile_log_page_size & (logfile_log_page_size - 1)) {
+		ntfs_error(vi->i_sb, "$LogFile uses unsupported page size.");
+		return FALSE;
+	}
+	/*
+	 * We must be either at !pos (1st restart page) or at pos = system page
+	 * size (2nd restart page).
+	 */
+	if (pos && pos != logfile_system_page_size) {
+		ntfs_error(vi->i_sb, "Found restart area in incorrect "
+				"position in $LogFile.");
+		return FALSE;
+	}
+	/* We only know how to handle version 1.1. */
+	if (sle16_to_cpu(rp->major_ver) != 1 ||
+			sle16_to_cpu(rp->minor_ver) != 1) {
+		ntfs_error(vi->i_sb, "$LogFile version %i.%i is not "
+				"supported.  (This driver supports version "
+				"1.1 only.)", (int)sle16_to_cpu(rp->major_ver),
+				(int)sle16_to_cpu(rp->minor_ver));
+		return FALSE;
+	}
+	/* Verify the size of the update sequence array. */
+	usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS);
+	if (usa_count != le16_to_cpu(rp->usa_count)) {
+		ntfs_error(vi->i_sb, "$LogFile restart page specifies "
+				"inconsistent update sequence array count.");
+		return FALSE;
+	}
+	/* Verify the position of the update sequence array. */
+	usa_ofs = le16_to_cpu(rp->usa_ofs);
+	usa_end = usa_ofs + usa_count * sizeof(u16);
+	if (usa_ofs < sizeof(RESTART_PAGE_HEADER) ||
+			usa_end > NTFS_BLOCK_SIZE - sizeof(u16)) {
+		ntfs_error(vi->i_sb, "$LogFile restart page specifies "
+				"inconsistent update sequence array offset.");
+		return FALSE;
+	}
+	/*
+	 * Verify the position of the restart area.  It must be:
+	 *	- aligned to 8-byte boundary,
+	 *	- after the update sequence array, and
+	 *	- within the system page size.
+	 */
+	ra_ofs = le16_to_cpu(rp->restart_area_offset);
+	if (ra_ofs & 7 || ra_ofs < usa_end ||
+			ra_ofs > logfile_system_page_size) {
+		ntfs_error(vi->i_sb, "$LogFile restart page specifies "
+				"inconsistent restart area offset.");
+		return FALSE;
+	}
+	/*
+	 * Only restart pages modified by chkdsk are allowed to have chkdsk_lsn
+	 * set.
+	 */
+	if (!ntfs_is_chkd_record(rp->magic) && sle64_to_cpu(rp->chkdsk_lsn)) {
+		ntfs_error(vi->i_sb, "$LogFile restart page is not modified "
+				"chkdsk but a chkdsk LSN is specified.");
+		return FALSE;
+	}
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * ntfs_check_restart_area - check the restart area for consistency
+ * @vi:		$LogFile inode to which the restart page belongs
+ * @rp:		restart page whose restart area to check
+ *
+ * Check the restart area of the restart page @rp for consistency and return
+ * TRUE if it is consistent and FALSE otherwise.
+ *
+ * This function assumes that the restart page header has already been
+ * consistency checked.
+ *
+ * This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
+ * require the full restart page.
+ */
+static BOOL ntfs_check_restart_area(struct inode *vi, RESTART_PAGE_HEADER *rp)
+{
+	u64 file_size;
+	RESTART_AREA *ra;
+	u16 ra_ofs, ra_len, ca_ofs;
+	u8 fs_bits;
+
+	ntfs_debug("Entering.");
+	ra_ofs = le16_to_cpu(rp->restart_area_offset);
+	ra = (RESTART_AREA*)((u8*)rp + ra_ofs);
+	/*
+	 * Everything before ra->file_size must be before the first word
+	 * protected by an update sequence number.  This ensures that it is
+	 * safe to access ra->client_array_offset.
+	 */
+	if (ra_ofs + offsetof(RESTART_AREA, file_size) >
+			NTFS_BLOCK_SIZE - sizeof(u16)) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"inconsistent file offset.");
+		return FALSE;
+	}
+	/*
+	 * Now that we can access ra->client_array_offset, make sure everything
+	 * up to the log client array is before the first word protected by an
+	 * update sequence number.  This ensures we can access all of the
+	 * restart area elements safely.  Also, the client array offset must be
+	 * aligned to an 8-byte boundary.
+	 */
+	ca_ofs = le16_to_cpu(ra->client_array_offset);
+	if (((ca_ofs + 7) & ~7) != ca_ofs ||
+			ra_ofs + ca_ofs > NTFS_BLOCK_SIZE - sizeof(u16)) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"inconsistent client array offset.");
+		return FALSE;
+	}
+	/*
+	 * The restart area must end within the system page size both when
+	 * calculated manually and as specified by ra->restart_area_length.
+	 * Also, the calculated length must not exceed the specified length.
+	 */
+	ra_len = ca_ofs + le16_to_cpu(ra->log_clients) *
+			sizeof(LOG_CLIENT_RECORD);
+	if (ra_ofs + ra_len > le32_to_cpu(rp->system_page_size) ||
+			ra_ofs + le16_to_cpu(ra->restart_area_length) >
+			le32_to_cpu(rp->system_page_size) ||
+			ra_len > le16_to_cpu(ra->restart_area_length)) {
+		ntfs_error(vi->i_sb, "$LogFile restart area is out of bounds "
+				"of the system page size specified by the "
+				"restart page header and/or the specified "
+				"restart area length is inconsistent.");
+		return FALSE;
+	}
+	/*
+	 * The ra->client_free_list and ra->client_in_use_list must be either
+	 * LOGFILE_NO_CLIENT or less than ra->log_clients or they are
+	 * overflowing the client array.
+	 */
+	if ((ra->client_free_list != LOGFILE_NO_CLIENT &&
+			le16_to_cpu(ra->client_free_list) >=
+			le16_to_cpu(ra->log_clients)) ||
+			(ra->client_in_use_list != LOGFILE_NO_CLIENT &&
+			le16_to_cpu(ra->client_in_use_list) >=
+			le16_to_cpu(ra->log_clients))) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"overflowing client free and/or in use lists.");
+		return FALSE;
+	}
+	/*
+	 * Check ra->seq_number_bits against ra->file_size for consistency.
+	 * We cannot just use ffs() because the file size is not a power of 2.
+	 */
+	file_size = (u64)sle64_to_cpu(ra->file_size);
+	fs_bits = 0;
+	while (file_size) {
+		file_size >>= 1;
+		fs_bits++;
+	}
+	if (le32_to_cpu(ra->seq_number_bits) != 67 - fs_bits) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"inconsistent sequence number bits.");
+		return FALSE;
+	}
+	/* The log record header length must be a multiple of 8. */
+	if (((le16_to_cpu(ra->log_record_header_length) + 7) & ~7) !=
+			le16_to_cpu(ra->log_record_header_length)) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"inconsistent log record header length.");
+		return FALSE;
+	}
+	/* Dito for the log page data offset. */
+	if (((le16_to_cpu(ra->log_page_data_offset) + 7) & ~7) !=
+			le16_to_cpu(ra->log_page_data_offset)) {
+		ntfs_error(vi->i_sb, "$LogFile restart area specifies "
+				"inconsistent log page data offset.");
+		return FALSE;
+	}
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * ntfs_check_log_client_array - check the log client array for consistency
+ * @vi:		$LogFile inode to which the restart page belongs
+ * @rp:		restart page whose log client array to check
+ *
+ * Check the log client array of the restart page @rp for consistency and
+ * return TRUE if it is consistent and FALSE otherwise.
+ *
+ * This function assumes that the restart page header and the restart area have
+ * already been consistency checked.
+ *
+ * Unlike ntfs_check_restart_page_header() and ntfs_check_restart_area(), this
+ * function needs @rp->system_page_size bytes in @rp, i.e. it requires the full
+ * restart page and the page must be multi sector transfer deprotected.
+ */
+static BOOL ntfs_check_log_client_array(struct inode *vi,
+		RESTART_PAGE_HEADER *rp)
+{
+	RESTART_AREA *ra;
+	LOG_CLIENT_RECORD *ca, *cr;
+	u16 nr_clients, idx;
+	BOOL in_free_list, idx_is_first;
+
+	ntfs_debug("Entering.");
+	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
+	ca = (LOG_CLIENT_RECORD*)((u8*)ra +
+			le16_to_cpu(ra->client_array_offset));
+	/*
+	 * Check the ra->client_free_list first and then check the
+	 * ra->client_in_use_list.  Check each of the log client records in
+	 * each of the lists and check that the array does not overflow the
+	 * ra->log_clients value.  Also keep track of the number of records
+	 * visited as there cannot be more than ra->log_clients records and
+	 * that way we detect eventual loops in within a list.
+	 */
+	nr_clients = le16_to_cpu(ra->log_clients);
+	idx = le16_to_cpu(ra->client_free_list);
+	in_free_list = TRUE;
+check_list:
+	for (idx_is_first = TRUE; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--,
+			idx = le16_to_cpu(cr->next_client)) {
+		if (!nr_clients || idx >= le16_to_cpu(ra->log_clients))
+			goto err_out;
+		/* Set @cr to the current log client record. */
+		cr = ca + idx;
+		/* The first log client record must not have a prev_client. */
+		if (idx_is_first) {
+			if (cr->prev_client != LOGFILE_NO_CLIENT)
+				goto err_out;
+			idx_is_first = FALSE;
+		}
+	}
+	/* Switch to and check the in use list if we just did the free list. */
+	if (in_free_list) {
+		in_free_list = FALSE;
+		idx = le16_to_cpu(ra->client_in_use_list);
+		goto check_list;
+	}
+	ntfs_debug("Done.");
+	return TRUE;
+err_out:
+	ntfs_error(vi->i_sb, "$LogFile log client array is corrupt.");
+	return FALSE;
+}
+
+/**
+ * ntfs_check_and_load_restart_page - check the restart page for consistency
+ * @vi:		$LogFile inode to which the restart page belongs
+ * @rp:		restart page to check
+ * @pos:	position in @vi at which the restart page resides
+ * @wrp:	copy of the multi sector transfer deprotected restart page
+ *
+ * Check the restart page @rp for consistency and return TRUE if it is
+ * consistent and FALSE otherwise.
+ *
+ * This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
+ * require the full restart page.
+ *
+ * If @wrp is not NULL, on success, *@wrp will point to a buffer containing a
+ * copy of the complete multi sector transfer deprotected page.  On failure,
+ * *@wrp is undefined.
+ */
+static BOOL ntfs_check_and_load_restart_page(struct inode *vi,
+		RESTART_PAGE_HEADER *rp, s64 pos, RESTART_PAGE_HEADER **wrp)
+{
+	RESTART_AREA *ra;
+	RESTART_PAGE_HEADER *trp;
+	int size;
+	BOOL ret;
+
+	ntfs_debug("Entering.");
+	/* Check the restart page header for consistency. */
+	if (!ntfs_check_restart_page_header(vi, rp, pos)) {
+		/* Error output already done inside the function. */
+		return FALSE;
+	}
+	/* Check the restart area for consistency. */
+	if (!ntfs_check_restart_area(vi, rp)) {
+		/* Error output already done inside the function. */
+		return FALSE;
+	}
+	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
+	/*
+	 * Allocate a buffer to store the whole restart page so we can multi
+	 * sector transfer deprotect it.
+	 */
+	trp = ntfs_malloc_nofs(le32_to_cpu(rp->system_page_size));
+	if (!trp) {
+		ntfs_error(vi->i_sb, "Failed to allocate memory for $LogFile "
+				"restart page buffer.");
+		return FALSE;
+	}
+	/*
+	 * Read the whole of the restart page into the buffer.  If it fits
+	 * completely inside @rp, just copy it from there.  Otherwise map all
+	 * the required pages and copy the data from them.
+	 */
+	size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK);
+	if (size >= le32_to_cpu(rp->system_page_size)) {
+		memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
+	} else {
+		pgoff_t idx;
+		struct page *page;
+		int have_read, to_read;
+
+		/* First copy what we already have in @rp. */
+		memcpy(trp, rp, size);
+		/* Copy the remaining data one page at a time. */
+		have_read = size;
+		to_read = le32_to_cpu(rp->system_page_size) - size;
+		idx = (pos + size) >> PAGE_CACHE_SHIFT;
+		BUG_ON((pos + size) & ~PAGE_CACHE_MASK);
+		do {
+			page = ntfs_map_page(vi->i_mapping, idx);
+			if (IS_ERR(page)) {
+				ntfs_error(vi->i_sb, "Error mapping $LogFile "
+						"page (index %lu).", idx);
+				goto err_out;
+			}
+			size = min_t(int, to_read, PAGE_CACHE_SIZE);
+			memcpy((u8*)trp + have_read, page_address(page), size);
+			ntfs_unmap_page(page);
+			have_read += size;
+			to_read -= size;
+			idx++;
+		} while (to_read > 0);
+	}
+	/* Perform the multi sector transfer deprotection on the buffer. */
+	if (post_read_mst_fixup((NTFS_RECORD*)trp,
+			le32_to_cpu(rp->system_page_size))) {
+		ntfs_error(vi->i_sb, "Multi sector transfer error detected in "
+				"$LogFile restart page.");
+		goto err_out;
+	}
+	/* Check the log client records for consistency. */
+	ret = ntfs_check_log_client_array(vi, trp);
+	if (ret && wrp)
+		*wrp = trp;
+	else
+		ntfs_free(trp);
+	ntfs_debug("Done.");
+	return ret;
+err_out:
+	ntfs_free(trp);
+	return FALSE;
+}
+
+/**
+ * ntfs_ckeck_logfile - check in the journal if the volume is consistent
+ * @log_vi:	struct inode of loaded journal $LogFile to check
+ *
+ * Check the $LogFile journal for consistency and return TRUE if it is
+ * consistent and FALSE if not.
+ *
+ * At present we only check the two restart pages and ignore the log record
+ * pages.
+ *
+ * Note that the MstProtected flag is not set on the $LogFile inode and hence
+ * when reading pages they are not deprotected.  This is because we do not know
+ * if the $LogFile was created on a system with a different page size to ours
+ * yet and mst deprotection would fail if our page size is smaller.
+ */
+BOOL ntfs_check_logfile(struct inode *log_vi)
+{
+	s64 size, pos, rstr1_pos, rstr2_pos;
+	ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
+	struct address_space *mapping = log_vi->i_mapping;
+	struct page *page = NULL;
+	u8 *kaddr = NULL;
+	RESTART_PAGE_HEADER *rstr1_ph = NULL;
+	RESTART_PAGE_HEADER *rstr2_ph = NULL;
+	int log_page_size, log_page_mask, ofs;
+	BOOL logfile_is_empty = TRUE;
+	BOOL rstr1_found = FALSE;
+	BOOL rstr2_found = FALSE;
+	u8 log_page_bits;
+
+	ntfs_debug("Entering.");
+	/* An empty $LogFile must have been clean before it got emptied. */
+	if (NVolLogFileEmpty(vol))
+		goto is_empty;
+	size = log_vi->i_size;
+	/* Make sure the file doesn't exceed the maximum allowed size. */
+	if (size > MaxLogFileSize)
+		size = MaxLogFileSize;
+	/*
+	 * Truncate size to a multiple of the page cache size or the default
+	 * log page size if the page cache size is between the default log page
+	 * log page size if the page cache size is between the default log page
+	 * size and twice that.
+	 */
+	if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <=
+			DefaultLogPageSize * 2)
+		log_page_size = DefaultLogPageSize;
+	else
+		log_page_size = PAGE_CACHE_SIZE;
+	log_page_mask = log_page_size - 1;
+	/*
+	 * Use generic_ffs() instead of ffs() to enable the compiler to
+	 * optimize log_page_size and log_page_bits into constants.
+	 */
+	log_page_bits = generic_ffs(log_page_size) - 1;
+	size &= ~(log_page_size - 1);
+	/*
+	 * Ensure the log file is big enough to store at least the two restart
+	 * pages and the minimum number of log record pages.
+	 */
+	if (size < log_page_size * 2 || (size - log_page_size * 2) >>
+			log_page_bits < MinLogRecordPages) {
+		ntfs_error(vol->sb, "$LogFile is too small.");
+		return FALSE;
+	}
+	/*
+	 * Read through the file looking for a restart page.  Since the restart
+	 * page header is at the beginning of a page we only need to search at
+	 * what could be the beginning of a page (for each page size) rather
+	 * than scanning the whole file byte by byte.  If all potential places
+	 * contain empty and uninitialzed records, the log file can be assumed
+	 * to be empty.
+	 */
+	for (pos = 0; pos < size; pos <<= 1) {
+		pgoff_t idx = pos >> PAGE_CACHE_SHIFT;
+		if (!page || page->index != idx) {
+			if (page)
+				ntfs_unmap_page(page);
+			page = ntfs_map_page(mapping, idx);
+			if (IS_ERR(page)) {
+				ntfs_error(vol->sb, "Error mapping $LogFile "
+						"page (index %lu).", idx);
+				return FALSE;
+			}
+		}
+		kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK);
+		/*
+		 * A non-empty block means the logfile is not empty while an
+		 * empty block after a non-empty block has been encountered
+		 * means we are done.
+		 */
+		if (!ntfs_is_empty_recordp((le32*)kaddr))
+			logfile_is_empty = FALSE;
+		else if (!logfile_is_empty)
+			break;
+		/*
+		 * A log record page means there cannot be a restart page after
+		 * this so no need to continue searching.
+		 */
+		if (ntfs_is_rcrd_recordp((le32*)kaddr))
+			break;
+		/*
+		 * A modified by chkdsk restart page means we cannot handle
+		 * this log file.
+		 */
+		if (ntfs_is_chkd_recordp((le32*)kaddr)) {
+			ntfs_error(vol->sb, "$LogFile has been modified by "
+					"chkdsk.  Mount this volume in "
+					"Windows.");
+			goto err_out;
+		}
+		/* If not a restart page, continue. */
+		if (!ntfs_is_rstr_recordp((le32*)kaddr)) {
+			/* Skip to the minimum page size for the next one. */
+			if (!pos)
+				pos = NTFS_BLOCK_SIZE >> 1;
+			continue;
+		}
+		/* We now know we have a restart page. */
+		if (!pos) {
+			rstr1_found = TRUE;
+			rstr1_pos = pos;
+		} else {
+			if (rstr2_found) {
+				ntfs_error(vol->sb, "Found more than two "
+						"restart pages in $LogFile.");
+				goto err_out;
+			}
+			rstr2_found = TRUE;
+			rstr2_pos = pos;
+		}
+		/*
+		 * Check the restart page for consistency and get a copy of the
+		 * complete multi sector transfer deprotected restart page.
+		 */
+		if (!ntfs_check_and_load_restart_page(log_vi,
+				(RESTART_PAGE_HEADER*)kaddr, pos,
+				!pos ? &rstr1_ph : &rstr2_ph)) {
+			/* Error output already done inside the function. */
+			goto err_out;
+		}
+		/*
+		 * We have a valid restart page.  The next one must be after
+		 * a whole system page size as specified by the valid restart
+		 * page.
+		 */
+		if (!pos)
+			pos = le32_to_cpu(rstr1_ph->system_page_size) >> 1;
+	}
+	if (page) {
+		ntfs_unmap_page(page);
+		page = NULL;
+	}
+	if (logfile_is_empty) {
+		NVolSetLogFileEmpty(vol);
+is_empty:
+		ntfs_debug("Done.  ($LogFile is empty.)");
+		return TRUE;
+	}
+	if (!rstr1_found || !rstr2_found) {
+		ntfs_error(vol->sb, "Did not find two restart pages in "
+				"$LogFile.");
+		goto err_out;
+	}
+	/*
+	 * The two restart areas must be identical except for the update
+	 * sequence number.
+	 */
+	ofs = le16_to_cpu(rstr1_ph->usa_ofs);
+	if (memcmp(rstr1_ph, rstr2_ph, ofs) || (ofs += sizeof(u16),
+			memcmp((u8*)rstr1_ph + ofs, (u8*)rstr2_ph + ofs,
+			le32_to_cpu(rstr1_ph->system_page_size) - ofs))) {
+		ntfs_error(vol->sb, "The two restart pages in $LogFile do not "
+				"match.");
+		goto err_out;
+	}
+	ntfs_free(rstr1_ph);
+	ntfs_free(rstr2_ph);
+	/* All consistency checks passed. */
+	ntfs_debug("Done.");
+	return TRUE;
+err_out:
+	if (page)
+		ntfs_unmap_page(page);
+	if (rstr1_ph)
+		ntfs_free(rstr1_ph);
+	if (rstr2_ph)
+		ntfs_free(rstr2_ph);
+	return FALSE;
+}
+
+/**
+ * ntfs_is_logfile_clean - check in the journal if the volume is clean
+ * @log_vi:	struct inode of loaded journal $LogFile to check
+ *
+ * Analyze the $LogFile journal and return TRUE if it indicates the volume was
+ * shutdown cleanly and FALSE if not.
+ *
+ * At present we only look at the two restart pages and ignore the log record
+ * pages.  This is a little bit crude in that there will be a very small number
+ * of cases where we think that a volume is dirty when in fact it is clean.
+ * This should only affect volumes that have not been shutdown cleanly but did
+ * not have any pending, non-check-pointed i/o, i.e. they were completely idle
+ * at least for the five seconds preceeding the unclean shutdown.
+ *
+ * This function assumes that the $LogFile journal has already been consistency
+ * checked by a call to ntfs_check_logfile() and in particular if the $LogFile
+ * is empty this function requires that NVolLogFileEmpty() is true otherwise an
+ * empty volume will be reported as dirty.
+ */
+BOOL ntfs_is_logfile_clean(struct inode *log_vi)
+{
+	ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
+	struct page *page;
+	RESTART_PAGE_HEADER *rp;
+	RESTART_AREA *ra;
+
+	ntfs_debug("Entering.");
+	/* An empty $LogFile must have been clean before it got emptied. */
+	if (NVolLogFileEmpty(vol)) {
+		ntfs_debug("Done.  ($LogFile is empty.)");
+		return TRUE;
+	}
+	/*
+	 * Read the first restart page.  It will be possibly incomplete and
+	 * will not be multi sector transfer deprotected but we only need the
+	 * first NTFS_BLOCK_SIZE bytes so it does not matter.
+	 */
+	page = ntfs_map_page(log_vi->i_mapping, 0);
+	if (IS_ERR(page)) {
+		ntfs_error(vol->sb, "Error mapping $LogFile page (index 0).");
+		return FALSE;
+	}
+	rp = (RESTART_PAGE_HEADER*)page_address(page);
+	if (!ntfs_is_rstr_record(rp->magic)) {
+		ntfs_error(vol->sb, "No restart page found at offset zero in "
+				"$LogFile.  This is probably a bug in that "
+				"the $LogFile should have been consistency "
+				"checked before calling this function.");
+		goto err_out;
+	}
+	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
+	/*
+	 * If the $LogFile has active clients, i.e. it is open, and we do not
+	 * have the RESTART_VOLUME_IS_CLEAN bit set in the restart area flags,
+	 * we assume there was an unclean shutdown.
+	 */
+	if (ra->client_in_use_list != LOGFILE_NO_CLIENT &&
+			!(ra->flags & RESTART_VOLUME_IS_CLEAN)) {
+		ntfs_debug("Done.  $LogFile indicates a dirty shutdown.");
+		goto err_out;
+	}
+	ntfs_unmap_page(page);
+	/* $LogFile indicates a clean shutdown. */
+	ntfs_debug("Done.  $LogFile indicates a clean shutdown.");
+	return TRUE;
+err_out:
+	ntfs_unmap_page(page);
+	return FALSE;
+}
+
+/**
+ * ntfs_empty_logfile - empty the contents of the $LogFile journal
+ * @log_vi:	struct inode of loaded journal $LogFile to empty
+ *
+ * Empty the contents of the $LogFile journal @log_vi and return TRUE on
+ * success and FALSE on error.
+ *
+ * This function assumes that the $LogFile journal has already been consistency
+ * checked by a call to ntfs_check_logfile() and that ntfs_is_logfile_clean()
+ * has been used to ensure that the $LogFile is clean.
+ */
+BOOL ntfs_empty_logfile(struct inode *log_vi)
+{
+	ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
+
+	ntfs_debug("Entering.");
+	if (!NVolLogFileEmpty(vol)) {
+		int err;
+		
+		err = ntfs_attr_set(NTFS_I(log_vi), 0, log_vi->i_size, 0xff);
+		if (unlikely(err)) {
+			ntfs_error(vol->sb, "Failed to fill $LogFile with "
+					"0xff bytes (error code %i).", err);
+			return FALSE;
+		}
+		/* Set the flag so we do not have to do it again on remount. */
+		NVolSetLogFileEmpty(vol);
+	}
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h
new file mode 100644
index 0000000..4ee4378
--- /dev/null
+++ b/fs/ntfs/logfile.h
@@ -0,0 +1,307 @@
+/*
+ * logfile.h - Defines for NTFS kernel journal ($LogFile) handling.  Part of
+ *	       the Linux-NTFS project.
+ *
+ * Copyright (c) 2000-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_LOGFILE_H
+#define _LINUX_NTFS_LOGFILE_H
+
+#ifdef NTFS_RW
+
+#include <linux/fs.h>
+
+#include "types.h"
+#include "endian.h"
+#include "layout.h"
+
+/*
+ * Journal ($LogFile) organization:
+ *
+ * Two restart areas present in the first two pages (restart pages, one restart
+ * area in each page).  When the volume is dismounted they should be identical,
+ * except for the update sequence array which usually has a different update
+ * sequence number.
+ *
+ * These are followed by log records organized in pages headed by a log record
+ * header going up to log file size.  Not all pages contain log records when a
+ * volume is first formatted, but as the volume ages, all records will be used.
+ * When the log file fills up, the records at the beginning are purged (by
+ * modifying the oldest_lsn to a higher value presumably) and writing begins
+ * at the beginning of the file.  Effectively, the log file is viewed as a
+ * circular entity.
+ *
+ * NOTE: Windows NT, 2000, and XP all use log file version 1.1 but they accept
+ * versions <= 1.x, including 0.-1.  (Yes, that is a minus one in there!)  We
+ * probably only want to support 1.1 as this seems to be the current version
+ * and we don't know how that differs from the older versions.  The only
+ * exception is if the journal is clean as marked by the two restart pages
+ * then it doesn't matter whether we are on an earlier version.  We can just
+ * reinitialize the logfile and start again with version 1.1.
+ */
+
+/* Some $LogFile related constants. */
+#define MaxLogFileSize		0x100000000ULL
+#define DefaultLogPageSize	4096
+#define MinLogRecordPages	48
+
+/*
+ * Log file restart page header (begins the restart area).
+ */
+typedef struct {
+/*Ofs*/
+/*  0	NTFS_RECORD; -- Unfolded here as gcc doesn't like unnamed structs. */
+/*  0*/	NTFS_RECORD_TYPE magic;	/* The magic is "RSTR". */
+/*  4*/	le16 usa_ofs;		/* See NTFS_RECORD definition in layout.h.
+				   When creating, set this to be immediately
+				   after this header structure (without any
+				   alignment). */
+/*  6*/	le16 usa_count;		/* See NTFS_RECORD definition in layout.h. */
+
+/*  8*/	leLSN chkdsk_lsn;	/* The last log file sequence number found by
+				   chkdsk.  Only used when the magic is changed
+				   to "CHKD".  Otherwise this is zero. */
+/* 16*/	le32 system_page_size;	/* Byte size of system pages when the log file
+				   was created, has to be >= 512 and a power of
+				   2.  Use this to calculate the required size
+				   of the usa (usa_count) and add it to usa_ofs.
+				   Then verify that the result is less than the
+				   value of the restart_area_offset. */
+/* 20*/	le32 log_page_size;	/* Byte size of log file pages, has to be >=
+				   512 and a power of 2.  The default is 4096
+				   and is used when the system page size is
+				   between 4096 and 8192.  Otherwise this is
+				   set to the system page size instead. */
+/* 24*/	le16 restart_area_offset;/* Byte offset from the start of this header to
+				   the RESTART_AREA.  Value has to be aligned
+				   to 8-byte boundary.  When creating, set this
+				   to be after the usa. */
+/* 26*/	sle16 minor_ver;	/* Log file minor version.  Only check if major
+				   version is 1. */
+/* 28*/	sle16 major_ver;	/* Log file major version.  We only support
+				   version 1.1. */
+/* sizeof() = 30 (0x1e) bytes */
+} __attribute__ ((__packed__)) RESTART_PAGE_HEADER;
+
+/*
+ * Constant for the log client indices meaning that there are no client records
+ * in this particular client array.  Also inside the client records themselves,
+ * this means that there are no client records preceding or following this one.
+ */
+#define LOGFILE_NO_CLIENT	const_cpu_to_le16(0xffff)
+#define LOGFILE_NO_CLIENT_CPU	0xffff
+
+/*
+ * These are the so far known RESTART_AREA_* flags (16-bit) which contain
+ * information about the log file in which they are present.
+ */
+enum {
+	RESTART_VOLUME_IS_CLEAN	= const_cpu_to_le16(0x0002),
+	RESTART_SPACE_FILLER	= 0xffff, /* gcc: Force enum bit width to 16. */
+} __attribute__ ((__packed__));
+
+typedef le16 RESTART_AREA_FLAGS;
+
+/*
+ * Log file restart area record.  The offset of this record is found by adding
+ * the offset of the RESTART_PAGE_HEADER to the restart_area_offset value found
+ * in it.  See notes at restart_area_offset above.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	leLSN current_lsn;	/* The current, i.e. last LSN inside the log
+				   when the restart area was last written.
+				   This happens often but what is the interval?
+				   Is it just fixed time or is it every time a
+				   check point is written or somethine else?
+				   On create set to 0. */
+/*  8*/	le16 log_clients;	/* Number of log client records in the array of
+				   log client records which follows this
+				   restart area.  Must be 1.  */
+/* 10*/	le16 client_free_list;	/* The index of the first free log client record
+				   in the array of log client records.
+				   LOGFILE_NO_CLIENT means that there are no
+				   free log client records in the array.
+				   If != LOGFILE_NO_CLIENT, check that
+				   log_clients > client_free_list.  On Win2k
+				   and presumably earlier, on a clean volume
+				   this is != LOGFILE_NO_CLIENT, and it should
+				   be 0, i.e. the first (and only) client
+				   record is free and thus the logfile is
+				   closed and hence clean.  A dirty volume
+				   would have left the logfile open and hence
+				   this would be LOGFILE_NO_CLIENT.  On WinXP
+				   and presumably later, the logfile is always
+				   open, even on clean shutdown so this should
+				   always be LOGFILE_NO_CLIENT. */
+/* 12*/	le16 client_in_use_list;/* The index of the first in-use log client
+				   record in the array of log client records.
+				   LOGFILE_NO_CLIENT means that there are no
+				   in-use log client records in the array.  If
+				   != LOGFILE_NO_CLIENT check that log_clients
+				   > client_in_use_list.  On Win2k and
+				   presumably earlier, on a clean volume this
+				   is LOGFILE_NO_CLIENT, i.e. there are no
+				   client records in use and thus the logfile
+				   is closed and hence clean.  A dirty volume
+				   would have left the logfile open and hence
+				   this would be != LOGFILE_NO_CLIENT, and it
+				   should be 0, i.e. the first (and only)
+				   client record is in use.  On WinXP and
+				   presumably later, the logfile is always
+				   open, even on clean shutdown so this should
+				   always be 0. */
+/* 14*/	RESTART_AREA_FLAGS flags;/* Flags modifying LFS behaviour.  On Win2k
+				   and presumably earlier this is always 0.  On
+				   WinXP and presumably later, if the logfile
+				   was shutdown cleanly, the second bit,
+				   RESTART_VOLUME_IS_CLEAN, is set.  This bit
+				   is cleared when the volume is mounted by
+				   WinXP and set when the volume is dismounted,
+				   thus if the logfile is dirty, this bit is
+				   clear.  Thus we don't need to check the
+				   Windows version to determine if the logfile
+				   is clean.  Instead if the logfile is closed,
+				   we know it must be clean.  If it is open and
+				   this bit is set, we also know it must be
+				   clean.  If on the other hand the logfile is
+				   open and this bit is clear, we can be almost
+				   certain that the logfile is dirty. */
+/* 16*/	le32 seq_number_bits;	/* How many bits to use for the sequence
+				   number.  This is calculated as 67 - the
+				   number of bits required to store the logfile
+				   size in bytes and this can be used in with
+				   the specified file_size as a consistency
+				   check. */
+/* 20*/	le16 restart_area_length;/* Length of the restart area including the
+				   client array.  Following checks required if
+				   version matches.  Otherwise, skip them.
+				   restart_area_offset + restart_area_length
+				   has to be <= system_page_size.  Also,
+				   restart_area_length has to be >=
+				   client_array_offset + (log_clients *
+				   sizeof(log client record)). */
+/* 22*/	le16 client_array_offset;/* Offset from the start of this record to
+				   the first log client record if versions are
+				   matched.  When creating, set this to be
+				   after this restart area structure, aligned
+				   to 8-bytes boundary.  If the versions do not
+				   match, this is ignored and the offset is
+				   assumed to be (sizeof(RESTART_AREA) + 7) &
+				   ~7, i.e. rounded up to first 8-byte
+				   boundary.  Either way, client_array_offset
+				   has to be aligned to an 8-byte boundary.
+				   Also, restart_area_offset +
+				   client_array_offset has to be <= 510.
+				   Finally, client_array_offset + (log_clients
+				   * sizeof(log client record)) has to be <=
+				   system_page_size.  On Win2k and presumably
+				   earlier, this is 0x30, i.e. immediately
+				   following this record.  On WinXP and
+				   presumably later, this is 0x40, i.e. there
+				   are 16 extra bytes between this record and
+				   the client array.  This probably means that
+				   the RESTART_AREA record is actually bigger
+				   in WinXP and later. */
+/* 24*/	sle64 file_size;	/* Usable byte size of the log file.  If the
+				   restart_area_offset + the offset of the
+				   file_size are > 510 then corruption has
+				   occured.  This is the very first check when
+				   starting with the restart_area as if it
+				   fails it means that some of the above values
+				   will be corrupted by the multi sector
+				   transfer protection.  The file_size has to
+				   be rounded down to be a multiple of the
+				   log_page_size in the RESTART_PAGE_HEADER and
+				   then it has to be at least big enough to
+				   store the two restart pages and 48 (0x30)
+				   log record pages. */
+/* 32*/	le32 last_lsn_data_length;/* Length of data of last LSN, not including
+				   the log record header.  On create set to
+				   0. */
+/* 36*/	le16 log_record_header_length;/* Byte size of the log record header.
+				   If the version matches then check that the
+				   value of log_record_header_length is a
+				   multiple of 8, i.e.
+				   (log_record_header_length + 7) & ~7 ==
+				   log_record_header_length.  When creating set
+				   it to sizeof(LOG_RECORD_HEADER), aligned to
+				   8 bytes. */
+/* 38*/	le16 log_page_data_offset;/* Offset to the start of data in a log record
+				   page.  Must be a multiple of 8.  On create
+				   set it to immediately after the update
+				   sequence array of the log record page. */
+/* 40*/	le32 restart_log_open_count;/* A counter that gets incremented every
+				   time the logfile is restarted which happens
+				   at mount time when the logfile is opened.
+				   When creating set to a random value.  Win2k
+				   sets it to the low 32 bits of the current
+				   system time in NTFS format (see time.h). */
+/* 44*/	le32 reserved;		/* Reserved/alignment to 8-byte boundary. */
+/* sizeof() = 48 (0x30) bytes */
+} __attribute__ ((__packed__)) RESTART_AREA;
+
+/*
+ * Log client record.  The offset of this record is found by adding the offset
+ * of the RESTART_AREA to the client_array_offset value found in it.
+ */
+typedef struct {
+/*Ofs*/
+/*  0*/	leLSN oldest_lsn;	/* Oldest LSN needed by this client.  On create
+				   set to 0. */
+/*  8*/	leLSN client_restart_lsn;/* LSN at which this client needs to restart
+				   the volume, i.e. the current position within
+				   the log file.  At present, if clean this
+				   should = current_lsn in restart area but it
+				   probably also = current_lsn when dirty most
+				   of the time.  At create set to 0. */
+/* 16*/	le16 prev_client;	/* The offset to the previous log client record
+				   in the array of log client records.
+				   LOGFILE_NO_CLIENT means there is no previous
+				   client record, i.e. this is the first one.
+				   This is always LOGFILE_NO_CLIENT. */
+/* 18*/	le16 next_client;	/* The offset to the next log client record in
+				   the array of log client records.
+				   LOGFILE_NO_CLIENT means there are no next
+				   client records, i.e. this is the last one.
+				   This is always LOGFILE_NO_CLIENT. */
+/* 20*/	le16 seq_number;	/* On Win2k and presumably earlier, this is set
+				   to zero every time the logfile is restarted
+				   and it is incremented when the logfile is
+				   closed at dismount time.  Thus it is 0 when
+				   dirty and 1 when clean.  On WinXP and
+				   presumably later, this is always 0. */
+/* 22*/	u8 reserved[6];		/* Reserved/alignment. */
+/* 28*/	le32 client_name_length;/* Length of client name in bytes.  Should
+				   always be 8. */
+/* 32*/	ntfschar client_name[64];/* Name of the client in Unicode.  Should
+				   always be "NTFS" with the remaining bytes
+				   set to 0. */
+/* sizeof() = 160 (0xa0) bytes */
+} __attribute__ ((__packed__)) LOG_CLIENT_RECORD;
+
+extern BOOL ntfs_check_logfile(struct inode *log_vi);
+
+extern BOOL ntfs_is_logfile_clean(struct inode *log_vi);
+
+extern BOOL ntfs_empty_logfile(struct inode *log_vi);
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_LOGFILE_H */
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
new file mode 100644
index 0000000..fac5944
--- /dev/null
+++ b/fs/ntfs/malloc.h
@@ -0,0 +1,62 @@
+/*
+ * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_MALLOC_H
+#define _LINUX_NTFS_MALLOC_H
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+/**
+ * ntfs_malloc_nofs - allocate memory in multiples of pages
+ * @size	number of bytes to allocate
+ *
+ * Allocates @size bytes of memory, rounded up to multiples of PAGE_SIZE and
+ * returns a pointer to the allocated memory.
+ *
+ * If there was insufficient memory to complete the request, return NULL.
+ */
+static inline void *ntfs_malloc_nofs(unsigned long size)
+{
+	if (likely(size <= PAGE_SIZE)) {
+		BUG_ON(!size);
+		/* kmalloc() has per-CPU caches so is faster for now. */
+		return kmalloc(PAGE_SIZE, GFP_NOFS);
+		/* return (void *)__get_free_page(GFP_NOFS | __GFP_HIGHMEM); */
+	}
+	if (likely(size >> PAGE_SHIFT < num_physpages))
+		return __vmalloc(size, GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
+	return NULL;
+}
+
+static inline void ntfs_free(void *addr)
+{
+	if (likely(((unsigned long)addr < VMALLOC_START) ||
+			((unsigned long)addr >= VMALLOC_END ))) {
+		kfree(addr);
+		/* free_page((unsigned long)addr); */
+		return;
+	}
+	vfree(addr);
+}
+
+#endif /* _LINUX_NTFS_MALLOC_H */
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
new file mode 100644
index 0000000..dfa85ac
--- /dev/null
+++ b/fs/ntfs/mft.c
@@ -0,0 +1,2829 @@
+/**
+ * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/swap.h>
+
+#include "attrib.h"
+#include "aops.h"
+#include "bitmap.h"
+#include "debug.h"
+#include "dir.h"
+#include "lcnalloc.h"
+#include "malloc.h"
+#include "mft.h"
+#include "ntfs.h"
+
+/**
+ * map_mft_record_page - map the page in which a specific mft record resides
+ * @ni:		ntfs inode whose mft record page to map
+ *
+ * This maps the page in which the mft record of the ntfs inode @ni is situated
+ * and returns a pointer to the mft record within the mapped page.
+ *
+ * Return value needs to be checked with IS_ERR() and if that is true PTR_ERR()
+ * contains the negative error code returned.
+ */
+static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
+{
+	ntfs_volume *vol = ni->vol;
+	struct inode *mft_vi = vol->mft_ino;
+	struct page *page;
+	unsigned long index, ofs, end_index;
+
+	BUG_ON(ni->page);
+	/*
+	 * The index into the page cache and the offset within the page cache
+	 * page of the wanted mft record. FIXME: We need to check for
+	 * overflowing the unsigned long, but I don't think we would ever get
+	 * here if the volume was that big...
+	 */
+	index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+
+	/* The maximum valid index into the page cache for $MFT's data. */
+	end_index = mft_vi->i_size >> PAGE_CACHE_SHIFT;
+
+	/* If the wanted index is out of bounds the mft record doesn't exist. */
+	if (unlikely(index >= end_index)) {
+		if (index > end_index || (mft_vi->i_size & ~PAGE_CACHE_MASK) <
+				ofs + vol->mft_record_size) {
+			page = ERR_PTR(-ENOENT);
+			ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, "
+					"which is beyond the end of the mft.  "
+					"This is probably a bug in the ntfs "
+					"driver.", ni->mft_no);
+			goto err_out;
+		}
+	}
+	/* Read, map, and pin the page. */
+	page = ntfs_map_page(mft_vi->i_mapping, index);
+	if (likely(!IS_ERR(page))) {
+		/* Catch multi sector transfer fixup errors. */
+		if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
+				ofs)))) {
+			ni->page = page;
+			ni->page_ofs = ofs;
+			return page_address(page) + ofs;
+		}
+		ntfs_error(vol->sb, "Mft record 0x%lx is corrupt.  "
+				"Run chkdsk.", ni->mft_no);
+		ntfs_unmap_page(page);
+		page = ERR_PTR(-EIO);
+	}
+err_out:
+	ni->page = NULL;
+	ni->page_ofs = 0;
+	return (void*)page;
+}
+
+/**
+ * map_mft_record - map, pin and lock an mft record
+ * @ni:		ntfs inode whose MFT record to map
+ *
+ * First, take the mrec_lock semaphore. We might now be sleeping, while waiting
+ * for the semaphore if it was already locked by someone else.
+ *
+ * The page of the record is mapped using map_mft_record_page() before being
+ * returned to the caller.
+ *
+ * This in turn uses ntfs_map_page() to get the page containing the wanted mft
+ * record (it in turn calls read_cache_page() which reads it in from disk if
+ * necessary, increments the use count on the page so that it cannot disappear
+ * under us and returns a reference to the page cache page).
+ *
+ * If read_cache_page() invokes ntfs_readpage() to load the page from disk, it
+ * sets PG_locked and clears PG_uptodate on the page. Once I/O has completed
+ * and the post-read mst fixups on each mft record in the page have been
+ * performed, the page gets PG_uptodate set and PG_locked cleared (this is done
+ * in our asynchronous I/O completion handler end_buffer_read_mft_async()).
+ * ntfs_map_page() waits for PG_locked to become clear and checks if
+ * PG_uptodate is set and returns an error code if not. This provides
+ * sufficient protection against races when reading/using the page.
+ *
+ * However there is the write mapping to think about. Doing the above described
+ * checking here will be fine, because when initiating the write we will set
+ * PG_locked and clear PG_uptodate making sure nobody is touching the page
+ * contents. Doing the locking this way means that the commit to disk code in
+ * the page cache code paths is automatically sufficiently locked with us as
+ * we will not touch a page that has been locked or is not uptodate. The only
+ * locking problem then is them locking the page while we are accessing it.
+ *
+ * So that code will end up having to own the mrec_lock of all mft
+ * records/inodes present in the page before I/O can proceed. In that case we
+ * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be
+ * accessing anything without owning the mrec_lock semaphore. But we do need
+ * to use them because of the read_cache_page() invocation and the code becomes
+ * so much simpler this way that it is well worth it.
+ *
+ * The mft record is now ours and we return a pointer to it. You need to check
+ * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return
+ * the error code.
+ *
+ * NOTE: Caller is responsible for setting the mft record dirty before calling
+ * unmap_mft_record(). This is obviously only necessary if the caller really
+ * modified the mft record...
+ * Q: Do we want to recycle one of the VFS inode state bits instead?
+ * A: No, the inode ones mean we want to change the mft record, not we want to
+ * write it out.
+ */
+MFT_RECORD *map_mft_record(ntfs_inode *ni)
+{
+	MFT_RECORD *m;
+
+	ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
+
+	/* Make sure the ntfs inode doesn't go away. */
+	atomic_inc(&ni->count);
+
+	/* Serialize access to this mft record. */
+	down(&ni->mrec_lock);
+
+	m = map_mft_record_page(ni);
+	if (likely(!IS_ERR(m)))
+		return m;
+
+	up(&ni->mrec_lock);
+	atomic_dec(&ni->count);
+	ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m));
+	return m;
+}
+
+/**
+ * unmap_mft_record_page - unmap the page in which a specific mft record resides
+ * @ni:		ntfs inode whose mft record page to unmap
+ *
+ * This unmaps the page in which the mft record of the ntfs inode @ni is
+ * situated and returns. This is a NOOP if highmem is not configured.
+ *
+ * The unmap happens via ntfs_unmap_page() which in turn decrements the use
+ * count on the page thus releasing it from the pinned state.
+ *
+ * We do not actually unmap the page from memory of course, as that will be
+ * done by the page cache code itself when memory pressure increases or
+ * whatever.
+ */
+static inline void unmap_mft_record_page(ntfs_inode *ni)
+{
+	BUG_ON(!ni->page);
+
+	// TODO: If dirty, blah...
+	ntfs_unmap_page(ni->page);
+	ni->page = NULL;
+	ni->page_ofs = 0;
+	return;
+}
+
+/**
+ * unmap_mft_record - release a mapped mft record
+ * @ni:		ntfs inode whose MFT record to unmap
+ *
+ * We release the page mapping and the mrec_lock mutex which unmaps the mft
+ * record and releases it for others to get hold of. We also release the ntfs
+ * inode by decrementing the ntfs inode reference count.
+ *
+ * NOTE: If caller has modified the mft record, it is imperative to set the mft
+ * record dirty BEFORE calling unmap_mft_record().
+ */
+void unmap_mft_record(ntfs_inode *ni)
+{
+	struct page *page = ni->page;
+
+	BUG_ON(!page);
+
+	ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
+
+	unmap_mft_record_page(ni);
+	up(&ni->mrec_lock);
+	atomic_dec(&ni->count);
+	/*
+	 * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to
+	 * ntfs_clear_extent_inode() in the extent inode case, and to the
+	 * caller in the non-extent, yet pure ntfs inode case, to do the actual
+	 * tear down of all structures and freeing of all allocated memory.
+	 */
+	return;
+}
+
+/**
+ * map_extent_mft_record - load an extent inode and attach it to its base
+ * @base_ni:	base ntfs inode
+ * @mref:	mft reference of the extent inode to load
+ * @ntfs_ino:	on successful return, pointer to the ntfs_inode structure
+ *
+ * Load the extent mft record @mref and attach it to its base inode @base_ni.
+ * Return the mapped extent mft record if IS_ERR(result) is false.  Otherwise
+ * PTR_ERR(result) gives the negative error code.
+ *
+ * On successful return, @ntfs_ino contains a pointer to the ntfs_inode
+ * structure of the mapped extent inode.
+ */
+MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
+		ntfs_inode **ntfs_ino)
+{
+	MFT_RECORD *m;
+	ntfs_inode *ni = NULL;
+	ntfs_inode **extent_nis = NULL;
+	int i;
+	unsigned long mft_no = MREF(mref);
+	u16 seq_no = MSEQNO(mref);
+	BOOL destroy_ni = FALSE;
+
+	ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
+			mft_no, base_ni->mft_no);
+	/* Make sure the base ntfs inode doesn't go away. */
+	atomic_inc(&base_ni->count);
+	/*
+	 * Check if this extent inode has already been added to the base inode,
+	 * in which case just return it. If not found, add it to the base
+	 * inode before returning it.
+	 */
+	down(&base_ni->extent_lock);
+	if (base_ni->nr_extents > 0) {
+		extent_nis = base_ni->ext.extent_ntfs_inos;
+		for (i = 0; i < base_ni->nr_extents; i++) {
+			if (mft_no != extent_nis[i]->mft_no)
+				continue;
+			ni = extent_nis[i];
+			/* Make sure the ntfs inode doesn't go away. */
+			atomic_inc(&ni->count);
+			break;
+		}
+	}
+	if (likely(ni != NULL)) {
+		up(&base_ni->extent_lock);
+		atomic_dec(&base_ni->count);
+		/* We found the record; just have to map and return it. */
+		m = map_mft_record(ni);
+		/* map_mft_record() has incremented this on success. */
+		atomic_dec(&ni->count);
+		if (likely(!IS_ERR(m))) {
+			/* Verify the sequence number. */
+			if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
+				ntfs_debug("Done 1.");
+				*ntfs_ino = ni;
+				return m;
+			}
+			unmap_mft_record(ni);
+			ntfs_error(base_ni->vol->sb, "Found stale extent mft "
+					"reference! Corrupt file system. "
+					"Run chkdsk.");
+			return ERR_PTR(-EIO);
+		}
+map_err_out:
+		ntfs_error(base_ni->vol->sb, "Failed to map extent "
+				"mft record, error code %ld.", -PTR_ERR(m));
+		return m;
+	}
+	/* Record wasn't there. Get a new ntfs inode and initialize it. */
+	ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
+	if (unlikely(!ni)) {
+		up(&base_ni->extent_lock);
+		atomic_dec(&base_ni->count);
+		return ERR_PTR(-ENOMEM);
+	}
+	ni->vol = base_ni->vol;
+	ni->seq_no = seq_no;
+	ni->nr_extents = -1;
+	ni->ext.base_ntfs_ino = base_ni;
+	/* Now map the record. */
+	m = map_mft_record(ni);
+	if (IS_ERR(m)) {
+		up(&base_ni->extent_lock);
+		atomic_dec(&base_ni->count);
+		ntfs_clear_extent_inode(ni);
+		goto map_err_out;
+	}
+	/* Verify the sequence number if it is present. */
+	if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {
+		ntfs_error(base_ni->vol->sb, "Found stale extent mft "
+				"reference! Corrupt file system. Run chkdsk.");
+		destroy_ni = TRUE;
+		m = ERR_PTR(-EIO);
+		goto unm_err_out;
+	}
+	/* Attach extent inode to base inode, reallocating memory if needed. */
+	if (!(base_ni->nr_extents & 3)) {
+		ntfs_inode **tmp;
+		int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);
+
+		tmp = (ntfs_inode **)kmalloc(new_size, GFP_NOFS);
+		if (unlikely(!tmp)) {
+			ntfs_error(base_ni->vol->sb, "Failed to allocate "
+					"internal buffer.");
+			destroy_ni = TRUE;
+			m = ERR_PTR(-ENOMEM);
+			goto unm_err_out;
+		}
+		if (base_ni->nr_extents) {
+			BUG_ON(!base_ni->ext.extent_ntfs_inos);
+			memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -
+					4 * sizeof(ntfs_inode *));
+			kfree(base_ni->ext.extent_ntfs_inos);
+		}
+		base_ni->ext.extent_ntfs_inos = tmp;
+	}
+	base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
+	up(&base_ni->extent_lock);
+	atomic_dec(&base_ni->count);
+	ntfs_debug("Done 2.");
+	*ntfs_ino = ni;
+	return m;
+unm_err_out:
+	unmap_mft_record(ni);
+	up(&base_ni->extent_lock);
+	atomic_dec(&base_ni->count);
+	/*
+	 * If the extent inode was not attached to the base inode we need to
+	 * release it or we will leak memory.
+	 */
+	if (destroy_ni)
+		ntfs_clear_extent_inode(ni);
+	return m;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * __mark_mft_record_dirty - set the mft record and the page containing it dirty
+ * @ni:		ntfs inode describing the mapped mft record
+ *
+ * Internal function.  Users should call mark_mft_record_dirty() instead.
+ *
+ * Set the mapped (extent) mft record of the (base or extent) ntfs inode @ni,
+ * as well as the page containing the mft record, dirty.  Also, mark the base
+ * vfs inode dirty.  This ensures that any changes to the mft record are
+ * written out to disk.
+ *
+ * NOTE:  We only set I_DIRTY_SYNC and I_DIRTY_DATASYNC (and not I_DIRTY_PAGES)
+ * on the base vfs inode, because even though file data may have been modified,
+ * it is dirty in the inode meta data rather than the data page cache of the
+ * inode, and thus there are no data pages that need writing out.  Therefore, a
+ * full mark_inode_dirty() is overkill.  A mark_inode_dirty_sync(), on the
+ * other hand, is not sufficient, because I_DIRTY_DATASYNC needs to be set to
+ * ensure ->write_inode is called from generic_osync_inode() and this needs to
+ * happen or the file data would not necessarily hit the device synchronously,
+ * even though the vfs inode has the O_SYNC flag set.  Also, I_DIRTY_DATASYNC
+ * simply "feels" better than just I_DIRTY_SYNC, since the file data has not
+ * actually hit the block device yet, which is not what I_DIRTY_SYNC on its own
+ * would suggest.
+ */
+void __mark_mft_record_dirty(ntfs_inode *ni)
+{
+	ntfs_inode *base_ni;
+
+	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
+	BUG_ON(NInoAttr(ni));
+	mark_ntfs_record_dirty(ni->page, ni->page_ofs);
+	/* Determine the base vfs inode and mark it dirty, too. */
+	down(&ni->extent_lock);
+	if (likely(ni->nr_extents >= 0))
+		base_ni = ni;
+	else
+		base_ni = ni->ext.base_ntfs_ino;
+	up(&ni->extent_lock);
+	__mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+}
+
+static const char *ntfs_please_email = "Please email "
+		"linux-ntfs-dev@lists.sourceforge.net and say that you saw "
+		"this message.  Thank you.";
+
+/**
+ * ntfs_sync_mft_mirror_umount - synchronise an mft record to the mft mirror
+ * @vol:	ntfs volume on which the mft record to synchronize resides
+ * @mft_no:	mft record number of mft record to synchronize
+ * @m:		mapped, mst protected (extent) mft record to synchronize
+ *
+ * Write the mapped, mst protected (extent) mft record @m with mft record
+ * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol,
+ * bypassing the page cache and the $MFTMirr inode itself.
+ *
+ * This function is only for use at umount time when the mft mirror inode has
+ * already been disposed off.  We BUG() if we are called while the mft mirror
+ * inode is still attached to the volume.
+ *
+ * On success return 0.  On error return -errno.
+ *
+ * NOTE:  This function is not implemented yet as I am not convinced it can
+ * actually be triggered considering the sequence of commits we do in super.c::
+ * ntfs_put_super().  But just in case we provide this place holder as the
+ * alternative would be either to BUG() or to get a NULL pointer dereference
+ * and Oops.
+ */
+static int ntfs_sync_mft_mirror_umount(ntfs_volume *vol,
+		const unsigned long mft_no, MFT_RECORD *m)
+{
+	BUG_ON(vol->mftmirr_ino);
+	ntfs_error(vol->sb, "Umount time mft mirror syncing is not "
+			"implemented yet.  %s", ntfs_please_email);
+	return -EOPNOTSUPP;
+}
+
+/**
+ * ntfs_sync_mft_mirror - synchronize an mft record to the mft mirror
+ * @vol:	ntfs volume on which the mft record to synchronize resides
+ * @mft_no:	mft record number of mft record to synchronize
+ * @m:		mapped, mst protected (extent) mft record to synchronize
+ * @sync:	if true, wait for i/o completion
+ *
+ * Write the mapped, mst protected (extent) mft record @m with mft record
+ * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol.
+ *
+ * On success return 0.  On error return -errno and set the volume errors flag
+ * in the ntfs volume @vol.
+ *
+ * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
+ *
+ * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
+ * schedule i/o via ->writepage or do it via kntfsd or whatever.
+ */
+int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
+		MFT_RECORD *m, int sync)
+{
+	struct page *page;
+	unsigned int blocksize = vol->sb->s_blocksize;
+	int max_bhs = vol->mft_record_size / blocksize;
+	struct buffer_head *bhs[max_bhs];
+	struct buffer_head *bh, *head;
+	u8 *kmirr;
+	runlist_element *rl;
+	unsigned int block_start, block_end, m_start, m_end, page_ofs;
+	int i_bhs, nr_bhs, err = 0;
+	unsigned char blocksize_bits = vol->mftmirr_ino->i_blkbits;
+
+	ntfs_debug("Entering for inode 0x%lx.", mft_no);
+	BUG_ON(!max_bhs);
+	if (unlikely(!vol->mftmirr_ino)) {
+		/* This could happen during umount... */
+		err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);
+		if (likely(!err))
+			return err;
+		goto err_out;
+	}
+	/* Get the page containing the mirror copy of the mft record @m. */
+	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
+			(PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
+	if (IS_ERR(page)) {
+		ntfs_error(vol->sb, "Failed to map mft mirror page.");
+		err = PTR_ERR(page);
+		goto err_out;
+	}
+	lock_page(page);
+	BUG_ON(!PageUptodate(page));
+	ClearPageUptodate(page);
+	/* Offset of the mft mirror record inside the page. */
+	page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	/* The address in the page of the mirror copy of the mft record @m. */
+	kmirr = page_address(page) + page_ofs;
+	/* Copy the mst protected mft record to the mirror. */
+	memcpy(kmirr, m, vol->mft_record_size);
+	/* Create uptodate buffers if not present. */
+	if (unlikely(!page_has_buffers(page))) {
+		struct buffer_head *tail;
+
+		bh = head = alloc_page_buffers(page, blocksize, 1);
+		do {
+			set_buffer_uptodate(bh);
+			tail = bh;
+			bh = bh->b_this_page;
+		} while (bh);
+		tail->b_this_page = head;
+		attach_page_buffers(page, head);
+		BUG_ON(!page_has_buffers(page));
+	}
+	bh = head = page_buffers(page);
+	BUG_ON(!bh);
+	rl = NULL;
+	nr_bhs = 0;
+	block_start = 0;
+	m_start = kmirr - (u8*)page_address(page);
+	m_end = m_start + vol->mft_record_size;
+	do {
+		block_end = block_start + blocksize;
+		/* If the buffer is outside the mft record, skip it. */
+		if (block_end <= m_start)
+			continue;
+		if (unlikely(block_start >= m_end))
+			break;
+		/* Need to map the buffer if it is not mapped already. */
+		if (unlikely(!buffer_mapped(bh))) {
+			VCN vcn;
+			LCN lcn;
+			unsigned int vcn_ofs;
+
+			/* Obtain the vcn and offset of the current block. */
+			vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
+					(block_start - m_start);
+			vcn_ofs = vcn & vol->cluster_size_mask;
+			vcn >>= vol->cluster_size_bits;
+			if (!rl) {
+				down_read(&NTFS_I(vol->mftmirr_ino)->
+						runlist.lock);
+				rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;
+				/*
+				 * $MFTMirr always has the whole of its runlist
+				 * in memory.
+				 */
+				BUG_ON(!rl);
+			}
+			/* Seek to element containing target vcn. */
+			while (rl->length && rl[1].vcn <= vcn)
+				rl++;
+			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+			/* For $MFTMirr, only lcn >= 0 is a successful remap. */
+			if (likely(lcn >= 0)) {
+				/* Setup buffer head to correct block. */
+				bh->b_blocknr = ((lcn <<
+						vol->cluster_size_bits) +
+						vcn_ofs) >> blocksize_bits;
+				set_buffer_mapped(bh);
+			} else {
+				bh->b_blocknr = -1;
+				ntfs_error(vol->sb, "Cannot write mft mirror "
+						"record 0x%lx because its "
+						"location on disk could not "
+						"be determined (error code "
+						"%lli).", mft_no,
+						(long long)lcn);
+				err = -EIO;
+			}
+		}
+		BUG_ON(!buffer_uptodate(bh));
+		BUG_ON(!nr_bhs && (m_start != block_start));
+		BUG_ON(nr_bhs >= max_bhs);
+		bhs[nr_bhs++] = bh;
+		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
+	} while (block_start = block_end, (bh = bh->b_this_page) != head);
+	if (unlikely(rl))
+		up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock);
+	if (likely(!err)) {
+		/* Lock buffers and start synchronous write i/o on them. */
+		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
+			struct buffer_head *tbh = bhs[i_bhs];
+
+			if (unlikely(test_set_buffer_locked(tbh)))
+				BUG();
+			BUG_ON(!buffer_uptodate(tbh));
+			clear_buffer_dirty(tbh);
+			get_bh(tbh);
+			tbh->b_end_io = end_buffer_write_sync;
+			submit_bh(WRITE, tbh);
+		}
+		/* Wait on i/o completion of buffers. */
+		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
+			struct buffer_head *tbh = bhs[i_bhs];
+
+			wait_on_buffer(tbh);
+			if (unlikely(!buffer_uptodate(tbh))) {
+				err = -EIO;
+				/*
+				 * Set the buffer uptodate so the page and
+				 * buffer states do not become out of sync.
+				 */
+				set_buffer_uptodate(tbh);
+			}
+		}
+	} else /* if (unlikely(err)) */ {
+		/* Clean the buffers. */
+		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
+			clear_buffer_dirty(bhs[i_bhs]);
+	}
+	/* Current state: all buffers are clean, unlocked, and uptodate. */
+	/* Remove the mst protection fixups again. */
+	post_write_mst_fixup((NTFS_RECORD*)kmirr);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+	ntfs_unmap_page(page);
+	if (likely(!err)) {
+		ntfs_debug("Done.");
+	} else {
+		ntfs_error(vol->sb, "I/O error while writing mft mirror "
+				"record 0x%lx!", mft_no);
+err_out:
+		ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error "
+				"code %i).  Volume will be left marked dirty "
+				"on umount.  Run ntfsfix on the partition "
+				"after umounting to correct this.", -err);
+		NVolSetErrors(vol);
+	}
+	return err;
+}
+
+/**
+ * write_mft_record_nolock - write out a mapped (extent) mft record
+ * @ni:		ntfs inode describing the mapped (extent) mft record
+ * @m:		mapped (extent) mft record to write
+ * @sync:	if true, wait for i/o completion
+ *
+ * Write the mapped (extent) mft record @m described by the (regular or extent)
+ * ntfs inode @ni to backing store.  If the mft record @m has a counterpart in
+ * the mft mirror, that is also updated.
+ *
+ * We only write the mft record if the ntfs inode @ni is dirty and the first
+ * buffer belonging to its mft record is dirty, too.  We ignore the dirty state
+ * of subsequent buffers because we could have raced with
+ * fs/ntfs/aops.c::mark_ntfs_record_dirty().
+ *
+ * On success, clean the mft record and return 0.  On error, leave the mft
+ * record dirty and return -errno.  The caller should call make_bad_inode() on
+ * the base inode to ensure no more access happens to this inode.  We do not do
+ * it here as the caller may want to finish writing other extent mft records
+ * first to minimize on-disk metadata inconsistencies.
+ *
+ * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
+ * However, if the mft record has a counterpart in the mft mirror and @sync is
+ * true, we write the mft record, wait for i/o completion, and only then write
+ * the mft mirror copy.  This ensures that if the system crashes either the mft
+ * or the mft mirror will contain a self-consistent mft record @m.  If @sync is
+ * false on the other hand, we start i/o on both and then wait for completion
+ * on them.  This provides a speedup but no longer guarantees that you will end
+ * up with a self-consistent mft record in the case of a crash but if you asked
+ * for asynchronous writing you probably do not care about that anyway.
+ *
+ * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
+ * schedule i/o via ->writepage or do it via kntfsd or whatever.
+ */
+int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
+{
+	ntfs_volume *vol = ni->vol;
+	struct page *page = ni->page;
+	unsigned char blocksize_bits = vol->mft_ino->i_blkbits;
+	unsigned int blocksize = 1 << blocksize_bits;
+	int max_bhs = vol->mft_record_size / blocksize;
+	struct buffer_head *bhs[max_bhs];
+	struct buffer_head *bh, *head;
+	runlist_element *rl;
+	unsigned int block_start, block_end, m_start, m_end;
+	int i_bhs, nr_bhs, err = 0;
+
+	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
+	BUG_ON(NInoAttr(ni));
+	BUG_ON(!max_bhs);
+	BUG_ON(!PageLocked(page));
+	/*
+	 * If the ntfs_inode is clean no need to do anything.  If it is dirty,
+	 * mark it as clean now so that it can be redirtied later on if needed.
+	 * There is no danger of races since the caller is holding the locks
+	 * for the mft record @m and the page it is in.
+	 */
+	if (!NInoTestClearDirty(ni))
+		goto done;
+	BUG_ON(!page_has_buffers(page));
+	bh = head = page_buffers(page);
+	BUG_ON(!bh);
+	rl = NULL;
+	nr_bhs = 0;
+	block_start = 0;
+	m_start = ni->page_ofs;
+	m_end = m_start + vol->mft_record_size;
+	do {
+		block_end = block_start + blocksize;
+		/* If the buffer is outside the mft record, skip it. */
+		if (block_end <= m_start)
+			continue;
+		if (unlikely(block_start >= m_end))
+			break;
+		/*
+		 * If this block is not the first one in the record, we ignore
+		 * the buffer's dirty state because we could have raced with a
+		 * parallel mark_ntfs_record_dirty().
+		 */
+		if (block_start == m_start) {
+			/* This block is the first one in the record. */
+			if (!buffer_dirty(bh)) {
+				BUG_ON(nr_bhs);
+				/* Clean records are not written out. */
+				break;
+			}
+		}
+		/* Need to map the buffer if it is not mapped already. */
+		if (unlikely(!buffer_mapped(bh))) {
+			VCN vcn;
+			LCN lcn;
+			unsigned int vcn_ofs;
+
+			/* Obtain the vcn and offset of the current block. */
+			vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
+					(block_start - m_start);
+			vcn_ofs = vcn & vol->cluster_size_mask;
+			vcn >>= vol->cluster_size_bits;
+			if (!rl) {
+				down_read(&NTFS_I(vol->mft_ino)->runlist.lock);
+				rl = NTFS_I(vol->mft_ino)->runlist.rl;
+				BUG_ON(!rl);
+			}
+			/* Seek to element containing target vcn. */
+			while (rl->length && rl[1].vcn <= vcn)
+				rl++;
+			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
+			/* For $MFT, only lcn >= 0 is a successful remap. */
+			if (likely(lcn >= 0)) {
+				/* Setup buffer head to correct block. */
+				bh->b_blocknr = ((lcn <<
+						vol->cluster_size_bits) +
+						vcn_ofs) >> blocksize_bits;
+				set_buffer_mapped(bh);
+			} else {
+				bh->b_blocknr = -1;
+				ntfs_error(vol->sb, "Cannot write mft record "
+						"0x%lx because its location "
+						"on disk could not be "
+						"determined (error code %lli).",
+						ni->mft_no, (long long)lcn);
+				err = -EIO;
+			}
+		}
+		BUG_ON(!buffer_uptodate(bh));
+		BUG_ON(!nr_bhs && (m_start != block_start));
+		BUG_ON(nr_bhs >= max_bhs);
+		bhs[nr_bhs++] = bh;
+		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
+	} while (block_start = block_end, (bh = bh->b_this_page) != head);
+	if (unlikely(rl))
+		up_read(&NTFS_I(vol->mft_ino)->runlist.lock);
+	if (!nr_bhs)
+		goto done;
+	if (unlikely(err))
+		goto cleanup_out;
+	/* Apply the mst protection fixups. */
+	err = pre_write_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size);
+	if (err) {
+		ntfs_error(vol->sb, "Failed to apply mst fixups!");
+		goto cleanup_out;
+	}
+	flush_dcache_mft_record_page(ni);
+	/* Lock buffers and start synchronous write i/o on them. */
+	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
+		struct buffer_head *tbh = bhs[i_bhs];
+
+		if (unlikely(test_set_buffer_locked(tbh)))
+			BUG();
+		BUG_ON(!buffer_uptodate(tbh));
+		clear_buffer_dirty(tbh);
+		get_bh(tbh);
+		tbh->b_end_io = end_buffer_write_sync;
+		submit_bh(WRITE, tbh);
+	}
+	/* Synchronize the mft mirror now if not @sync. */
+	if (!sync && ni->mft_no < vol->mftmirr_size)
+		ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
+	/* Wait on i/o completion of buffers. */
+	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
+		struct buffer_head *tbh = bhs[i_bhs];
+
+		wait_on_buffer(tbh);
+		if (unlikely(!buffer_uptodate(tbh))) {
+			err = -EIO;
+			/*
+			 * Set the buffer uptodate so the page and buffer
+			 * states do not become out of sync.
+			 */
+			if (PageUptodate(page))
+				set_buffer_uptodate(tbh);
+		}
+	}
+	/* If @sync, now synchronize the mft mirror. */
+	if (sync && ni->mft_no < vol->mftmirr_size)
+		ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
+	/* Remove the mst protection fixups again. */
+	post_write_mst_fixup((NTFS_RECORD*)m);
+	flush_dcache_mft_record_page(ni);
+	if (unlikely(err)) {
+		/* I/O error during writing.  This is really bad! */
+		ntfs_error(vol->sb, "I/O error while writing mft record "
+				"0x%lx!  Marking base inode as bad.  You "
+				"should unmount the volume and run chkdsk.",
+				ni->mft_no);
+		goto err_out;
+	}
+done:
+	ntfs_debug("Done.");
+	return 0;
+cleanup_out:
+	/* Clean the buffers. */
+	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
+		clear_buffer_dirty(bhs[i_bhs]);
+err_out:
+	/*
+	 * Current state: all buffers are clean, unlocked, and uptodate.
+	 * The caller should mark the base inode as bad so that no more i/o
+	 * happens.  ->clear_inode() will still be invoked so all extent inodes
+	 * and other allocated memory will be freed.
+	 */
+	if (err == -ENOMEM) {
+		ntfs_error(vol->sb, "Not enough memory to write mft record.  "
+				"Redirtying so the write is retried later.");
+		mark_mft_record_dirty(ni);
+		err = 0;
+	} else
+		NVolSetErrors(vol);
+	return err;
+}
+
+/**
+ * ntfs_may_write_mft_record - check if an mft record may be written out
+ * @vol:	[IN]  ntfs volume on which the mft record to check resides
+ * @mft_no:	[IN]  mft record number of the mft record to check
+ * @m:		[IN]  mapped mft record to check
+ * @locked_ni:	[OUT] caller has to unlock this ntfs inode if one is returned
+ *
+ * Check if the mapped (base or extent) mft record @m with mft record number
+ * @mft_no belonging to the ntfs volume @vol may be written out.  If necessary
+ * and possible the ntfs inode of the mft record is locked and the base vfs
+ * inode is pinned.  The locked ntfs inode is then returned in @locked_ni.  The
+ * caller is responsible for unlocking the ntfs inode and unpinning the base
+ * vfs inode.
+ *
+ * Return TRUE if the mft record may be written out and FALSE if not.
+ *
+ * The caller has locked the page and cleared the uptodate flag on it which
+ * means that we can safely write out any dirty mft records that do not have
+ * their inodes in icache as determined by ilookup5() as anyone
+ * opening/creating such an inode would block when attempting to map the mft
+ * record in read_cache_page() until we are finished with the write out.
+ *
+ * Here is a description of the tests we perform:
+ *
+ * If the inode is found in icache we know the mft record must be a base mft
+ * record.  If it is dirty, we do not write it and return FALSE as the vfs
+ * inode write paths will result in the access times being updated which would
+ * cause the base mft record to be redirtied and written out again.  (We know
+ * the access time update will modify the base mft record because Windows
+ * chkdsk complains if the standard information attribute is not in the base
+ * mft record.)
+ *
+ * If the inode is in icache and not dirty, we attempt to lock the mft record
+ * and if we find the lock was already taken, it is not safe to write the mft
+ * record and we return FALSE.
+ *
+ * If we manage to obtain the lock we have exclusive access to the mft record,
+ * which also allows us safe writeout of the mft record.  We then set
+ * @locked_ni to the locked ntfs inode and return TRUE.
+ *
+ * Note we cannot just lock the mft record and sleep while waiting for the lock
+ * because this would deadlock due to lock reversal (normally the mft record is
+ * locked before the page is locked but we already have the page locked here
+ * when we try to lock the mft record).
+ *
+ * If the inode is not in icache we need to perform further checks.
+ *
+ * If the mft record is not a FILE record or it is a base mft record, we can
+ * safely write it and return TRUE.
+ *
+ * We now know the mft record is an extent mft record.  We check if the inode
+ * corresponding to its base mft record is in icache and obtain a reference to
+ * it if it is.  If it is not, we can safely write it and return TRUE.
+ *
+ * We now have the base inode for the extent mft record.  We check if it has an
+ * ntfs inode for the extent mft record attached and if not it is safe to write
+ * the extent mft record and we return TRUE.
+ *
+ * The ntfs inode for the extent mft record is attached to the base inode so we
+ * attempt to lock the extent mft record and if we find the lock was already
+ * taken, it is not safe to write the extent mft record and we return FALSE.
+ *
+ * If we manage to obtain the lock we have exclusive access to the extent mft
+ * record, which also allows us safe writeout of the extent mft record.  We
+ * set the ntfs inode of the extent mft record clean and then set @locked_ni to
+ * the now locked ntfs inode and return TRUE.
+ *
+ * Note, the reason for actually writing dirty mft records here and not just
+ * relying on the vfs inode dirty code paths is that we can have mft records
+ * modified without them ever having actual inodes in memory.  Also we can have
+ * dirty mft records with clean ntfs inodes in memory.  None of the described
+ * cases would result in the dirty mft records being written out if we only
+ * relied on the vfs inode dirty code paths.  And these cases can really occur
+ * during allocation of new mft records and in particular when the
+ * initialized_size of the $MFT/$DATA attribute is extended and the new space
+ * is initialized using ntfs_mft_record_format().  The clean inode can then
+ * appear if the mft record is reused for a new inode before it got written
+ * out.
+ */
+BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
+		const MFT_RECORD *m, ntfs_inode **locked_ni)
+{
+	struct super_block *sb = vol->sb;
+	struct inode *mft_vi = vol->mft_ino;
+	struct inode *vi;
+	ntfs_inode *ni, *eni, **extent_nis;
+	int i;
+	ntfs_attr na;
+
+	ntfs_debug("Entering for inode 0x%lx.", mft_no);
+	/*
+	 * Normally we do not return a locked inode so set @locked_ni to NULL.
+	 */
+	BUG_ON(!locked_ni);
+	*locked_ni = NULL;
+	/*
+	 * Check if the inode corresponding to this mft record is in the VFS
+	 * inode cache and obtain a reference to it if it is.
+	 */
+	ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
+	na.mft_no = mft_no;
+	na.name = NULL;
+	na.name_len = 0;
+	na.type = AT_UNUSED;
+	/*
+	 * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from here or
+	 * we deadlock because the inode is already locked by the kernel
+	 * (fs/fs-writeback.c::__sync_single_inode()) and ilookup5() waits
+	 * until the inode is unlocked before returning it and it never gets
+	 * unlocked because ntfs_should_write_mft_record() never returns.  )-:
+	 * Fortunately, we have inode 0 pinned in icache for the duration of
+	 * the mount so we can access it directly.
+	 */
+	if (!mft_no) {
+		/* Balance the below iput(). */
+		vi = igrab(mft_vi);
+		BUG_ON(vi != mft_vi);
+	} else
+		vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na);
+	if (vi) {
+		ntfs_debug("Base inode 0x%lx is in icache.", mft_no);
+		/* The inode is in icache. */
+		ni = NTFS_I(vi);
+		/* Take a reference to the ntfs inode. */
+		atomic_inc(&ni->count);
+		/* If the inode is dirty, do not write this record. */
+		if (NInoDirty(ni)) {
+			ntfs_debug("Inode 0x%lx is dirty, do not write it.",
+					mft_no);
+			atomic_dec(&ni->count);
+			iput(vi);
+			return FALSE;
+		}
+		ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
+		/* The inode is not dirty, try to take the mft record lock. */
+		if (unlikely(down_trylock(&ni->mrec_lock))) {
+			ntfs_debug("Mft record 0x%lx is already locked, do "
+					"not write it.", mft_no);
+			atomic_dec(&ni->count);
+			iput(vi);
+			return FALSE;
+		}
+		ntfs_debug("Managed to lock mft record 0x%lx, write it.",
+				mft_no);
+		/*
+		 * The write has to occur while we hold the mft record lock so
+		 * return the locked ntfs inode.
+		 */
+		*locked_ni = ni;
+		return TRUE;
+	}
+	ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
+	/* The inode is not in icache. */
+	/* Write the record if it is not a mft record (type "FILE"). */
+	if (!ntfs_is_mft_record(m->magic)) {
+		ntfs_debug("Mft record 0x%lx is not a FILE record, write it.",
+				mft_no);
+		return TRUE;
+	}
+	/* Write the mft record if it is a base inode. */
+	if (!m->base_mft_record) {
+		ntfs_debug("Mft record 0x%lx is a base record, write it.",
+				mft_no);
+		return TRUE;
+	}
+	/*
+	 * This is an extent mft record.  Check if the inode corresponding to
+	 * its base mft record is in icache and obtain a reference to it if it
+	 * is.
+	 */
+	na.mft_no = MREF_LE(m->base_mft_record);
+	ntfs_debug("Mft record 0x%lx is an extent record.  Looking for base "
+			"inode 0x%lx in icache.", mft_no, na.mft_no);
+	vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode, &na);
+	if (!vi) {
+		/*
+		 * The base inode is not in icache, write this extent mft
+		 * record.
+		 */
+		ntfs_debug("Base inode 0x%lx is not in icache, write the "
+				"extent record.", na.mft_no);
+		return TRUE;
+	}
+	ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
+	/*
+	 * The base inode is in icache.  Check if it has the extent inode
+	 * corresponding to this extent mft record attached.
+	 */
+	ni = NTFS_I(vi);
+	down(&ni->extent_lock);
+	if (ni->nr_extents <= 0) {
+		/*
+		 * The base inode has no attached extent inodes, write this
+		 * extent mft record.
+		 */
+		up(&ni->extent_lock);
+		iput(vi);
+		ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
+				"write the extent record.", na.mft_no);
+		return TRUE;
+	}
+	/* Iterate over the attached extent inodes. */
+	extent_nis = ni->ext.extent_ntfs_inos;
+	for (eni = NULL, i = 0; i < ni->nr_extents; ++i) {
+		if (mft_no == extent_nis[i]->mft_no) {
+			/*
+			 * Found the extent inode corresponding to this extent
+			 * mft record.
+			 */
+			eni = extent_nis[i];
+			break;
+		}
+	}
+	/*
+	 * If the extent inode was not attached to the base inode, write this
+	 * extent mft record.
+	 */
+	if (!eni) {
+		up(&ni->extent_lock);
+		iput(vi);
+		ntfs_debug("Extent inode 0x%lx is not attached to its base "
+				"inode 0x%lx, write the extent record.",
+				mft_no, na.mft_no);
+		return TRUE;
+	}
+	ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.",
+			mft_no, na.mft_no);
+	/* Take a reference to the extent ntfs inode. */
+	atomic_inc(&eni->count);
+	up(&ni->extent_lock);
+	/*
+	 * Found the extent inode coresponding to this extent mft record.
+	 * Try to take the mft record lock.
+	 */
+	if (unlikely(down_trylock(&eni->mrec_lock))) {
+		atomic_dec(&eni->count);
+		iput(vi);
+		ntfs_debug("Extent mft record 0x%lx is already locked, do "
+				"not write it.", mft_no);
+		return FALSE;
+	}
+	ntfs_debug("Managed to lock extent mft record 0x%lx, write it.",
+			mft_no);
+	if (NInoTestClearDirty(eni))
+		ntfs_debug("Extent inode 0x%lx is dirty, marking it clean.",
+				mft_no);
+	/*
+	 * The write has to occur while we hold the mft record lock so return
+	 * the locked extent ntfs inode.
+	 */
+	*locked_ni = eni;
+	return TRUE;
+}
+
+static const char *es = "  Leaving inconsistent metadata.  Unmount and run "
+		"chkdsk.";
+
+/**
+ * ntfs_mft_bitmap_find_and_alloc_free_rec_nolock - see name
+ * @vol:	volume on which to search for a free mft record
+ * @base_ni:	open base inode if allocating an extent mft record or NULL
+ *
+ * Search for a free mft record in the mft bitmap attribute on the ntfs volume
+ * @vol.
+ *
+ * If @base_ni is NULL start the search at the default allocator position.
+ *
+ * If @base_ni is not NULL start the search at the mft record after the base
+ * mft record @base_ni.
+ *
+ * Return the free mft record on success and -errno on error.  An error code of
+ * -ENOSPC means that there are no free mft records in the currently
+ * initialized mft bitmap.
+ *
+ * Locking: Caller must hold vol->mftbmp_lock for writing.
+ */
+static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
+		ntfs_inode *base_ni)
+{
+	s64 pass_end, ll, data_pos, pass_start, ofs, bit;
+	struct address_space *mftbmp_mapping;
+	u8 *buf, *byte;
+	struct page *page;
+	unsigned int page_ofs, size;
+	u8 pass, b;
+
+	ntfs_debug("Searching for free mft record in the currently "
+			"initialized mft bitmap.");
+	mftbmp_mapping = vol->mftbmp_ino->i_mapping;
+	/*
+	 * Set the end of the pass making sure we do not overflow the mft
+	 * bitmap.
+	 */
+	pass_end = NTFS_I(vol->mft_ino)->allocated_size >>
+			vol->mft_record_size_bits;
+	ll = NTFS_I(vol->mftbmp_ino)->initialized_size << 3;
+	if (pass_end > ll)
+		pass_end = ll;
+	pass = 1;
+	if (!base_ni)
+		data_pos = vol->mft_data_pos;
+	else
+		data_pos = base_ni->mft_no + 1;
+	if (data_pos < 24)
+		data_pos = 24;
+	if (data_pos >= pass_end) {
+		data_pos = 24;
+		pass = 2;
+		/* This happens on a freshly formatted volume. */
+		if (data_pos >= pass_end)
+			return -ENOSPC;
+	}
+	pass_start = data_pos;
+	ntfs_debug("Starting bitmap search: pass %u, pass_start 0x%llx, "
+			"pass_end 0x%llx, data_pos 0x%llx.", pass,
+			(long long)pass_start, (long long)pass_end,
+			(long long)data_pos);
+	/* Loop until a free mft record is found. */
+	for (; pass <= 2;) {
+		/* Cap size to pass_end. */
+		ofs = data_pos >> 3;
+		page_ofs = ofs & ~PAGE_CACHE_MASK;
+		size = PAGE_CACHE_SIZE - page_ofs;
+		ll = ((pass_end + 7) >> 3) - ofs;
+		if (size > ll)
+			size = ll;
+		size <<= 3;
+		/*
+		 * If we are still within the active pass, search the next page
+		 * for a zero bit.
+		 */
+		if (size) {
+			page = ntfs_map_page(mftbmp_mapping,
+					ofs >> PAGE_CACHE_SHIFT);
+			if (unlikely(IS_ERR(page))) {
+				ntfs_error(vol->sb, "Failed to read mft "
+						"bitmap, aborting.");
+				return PTR_ERR(page);
+			}
+			buf = (u8*)page_address(page) + page_ofs;
+			bit = data_pos & 7;
+			data_pos &= ~7ull;
+			ntfs_debug("Before inner for loop: size 0x%x, "
+					"data_pos 0x%llx, bit 0x%llx", size,
+					(long long)data_pos, (long long)bit);
+			for (; bit < size && data_pos + bit < pass_end;
+					bit &= ~7ull, bit += 8) {
+				byte = buf + (bit >> 3);
+				if (*byte == 0xff)
+					continue;
+				b = ffz((unsigned long)*byte);
+				if (b < 8 && b >= (bit & 7)) {
+					ll = data_pos + (bit & ~7ull) + b;
+					if (unlikely(ll > (1ll << 32))) {
+						ntfs_unmap_page(page);
+						return -ENOSPC;
+					}
+					*byte |= 1 << b;
+					flush_dcache_page(page);
+					set_page_dirty(page);
+					ntfs_unmap_page(page);
+					ntfs_debug("Done.  (Found and "
+							"allocated mft record "
+							"0x%llx.)",
+							(long long)ll);
+					return ll;
+				}
+			}
+			ntfs_debug("After inner for loop: size 0x%x, "
+					"data_pos 0x%llx, bit 0x%llx", size,
+					(long long)data_pos, (long long)bit);
+			data_pos += size;
+			ntfs_unmap_page(page);
+			/*
+			 * If the end of the pass has not been reached yet,
+			 * continue searching the mft bitmap for a zero bit.
+			 */
+			if (data_pos < pass_end)
+				continue;
+		}
+		/* Do the next pass. */
+		if (++pass == 2) {
+			/*
+			 * Starting the second pass, in which we scan the first
+			 * part of the zone which we omitted earlier.
+			 */
+			pass_end = pass_start;
+			data_pos = pass_start = 24;
+			ntfs_debug("pass %i, pass_start 0x%llx, pass_end "
+					"0x%llx.", pass, (long long)pass_start,
+					(long long)pass_end);
+			if (data_pos >= pass_end)
+				break;
+		}
+	}
+	/* No free mft records in currently initialized mft bitmap. */
+	ntfs_debug("Done.  (No free mft records left in currently initialized "
+			"mft bitmap.)");
+	return -ENOSPC;
+}
+
+/**
+ * ntfs_mft_bitmap_extend_allocation_nolock - extend mft bitmap by a cluster
+ * @vol:	volume on which to extend the mft bitmap attribute
+ *
+ * Extend the mft bitmap attribute on the ntfs volume @vol by one cluster.
+ *
+ * Note: Only changes allocated_size, i.e. does not touch initialized_size or
+ * data_size.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - Caller must hold vol->mftbmp_lock for writing.
+ *	    - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for
+ *	      writing and releases it before returning.
+ *	    - This function takes vol->lcnbmp_lock for writing and releases it
+ *	      before returning.
+ */
+static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
+{
+	LCN lcn;
+	s64 ll;
+	struct page *page;
+	ntfs_inode *mft_ni, *mftbmp_ni;
+	runlist_element *rl, *rl2 = NULL;
+	ntfs_attr_search_ctx *ctx = NULL;
+	MFT_RECORD *mrec;
+	ATTR_RECORD *a = NULL;
+	int ret, mp_size;
+	u32 old_alen = 0;
+	u8 *b, tb;
+	struct {
+		u8 added_cluster:1;
+		u8 added_run:1;
+		u8 mp_rebuilt:1;
+	} status = { 0, 0, 0 };
+
+	ntfs_debug("Extending mft bitmap allocation.");
+	mft_ni = NTFS_I(vol->mft_ino);
+	mftbmp_ni = NTFS_I(vol->mftbmp_ino);
+	/*
+	 * Determine the last lcn of the mft bitmap.  The allocated size of the
+	 * mft bitmap cannot be zero so we are ok to do this.
+	 * ntfs_find_vcn() returns the runlist locked on success.
+	 */
+	rl = ntfs_find_vcn(mftbmp_ni, (mftbmp_ni->allocated_size - 1) >>
+			vol->cluster_size_bits, TRUE);
+	if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+		ntfs_error(vol->sb, "Failed to determine last allocated "
+				"cluster of mft bitmap attribute.");
+		if (!IS_ERR(rl)) {
+			up_write(&mftbmp_ni->runlist.lock);
+			ret = -EIO;
+		} else
+			ret = PTR_ERR(rl);
+		return ret;
+	}
+	lcn = rl->lcn + rl->length;
+	ntfs_debug("Last lcn of mft bitmap attribute is 0x%llx.",
+			(long long)lcn);
+	/*
+	 * Attempt to get the cluster following the last allocated cluster by
+	 * hand as it may be in the MFT zone so the allocator would not give it
+	 * to us.
+	 */
+	ll = lcn >> 3;
+	page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
+			ll >> PAGE_CACHE_SHIFT);
+	if (IS_ERR(page)) {
+		up_write(&mftbmp_ni->runlist.lock);
+		ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
+		return PTR_ERR(page);
+	}
+	b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
+	tb = 1 << (lcn & 7ull);
+	down_write(&vol->lcnbmp_lock);
+	if (*b != 0xff && !(*b & tb)) {
+		/* Next cluster is free, allocate it. */
+		*b |= tb;
+		flush_dcache_page(page);
+		set_page_dirty(page);
+		up_write(&vol->lcnbmp_lock);
+		ntfs_unmap_page(page);
+		/* Update the mft bitmap runlist. */
+		rl->length++;
+		rl[1].vcn++;
+		status.added_cluster = 1;
+		ntfs_debug("Appending one cluster to mft bitmap.");
+	} else {
+		up_write(&vol->lcnbmp_lock);
+		ntfs_unmap_page(page);
+		/* Allocate a cluster from the DATA_ZONE. */
+		rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE);
+		if (IS_ERR(rl2)) {
+			up_write(&mftbmp_ni->runlist.lock);
+			ntfs_error(vol->sb, "Failed to allocate a cluster for "
+					"the mft bitmap.");
+			return PTR_ERR(rl2);
+		}
+		rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2);
+		if (IS_ERR(rl)) {
+			up_write(&mftbmp_ni->runlist.lock);
+			ntfs_error(vol->sb, "Failed to merge runlists for mft "
+					"bitmap.");
+			if (ntfs_cluster_free_from_rl(vol, rl2)) {
+				ntfs_error(vol->sb, "Failed to dealocate "
+						"allocated cluster.%s", es);
+				NVolSetErrors(vol);
+			}
+			ntfs_free(rl2);
+			return PTR_ERR(rl);
+		}
+		mftbmp_ni->runlist.rl = rl;
+		status.added_run = 1;
+		ntfs_debug("Adding one run to mft bitmap.");
+		/* Find the last run in the new runlist. */
+		for (; rl[1].length; rl++)
+			;
+	}
+	/*
+	 * Update the attribute record as well.  Note: @rl is the last
+	 * (non-terminator) runlist element of mft bitmap.
+	 */
+	mrec = map_mft_record(mft_ni);
+	if (IS_ERR(mrec)) {
+		ntfs_error(vol->sb, "Failed to map mft record.");
+		ret = PTR_ERR(mrec);
+		goto undo_alloc;
+	}
+	ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+	if (unlikely(!ctx)) {
+		ntfs_error(vol->sb, "Failed to get search context.");
+		ret = -ENOMEM;
+		goto undo_alloc;
+	}
+	ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+			mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
+			0, ctx);
+	if (unlikely(ret)) {
+		ntfs_error(vol->sb, "Failed to find last attribute extent of "
+				"mft bitmap attribute.");
+		if (ret == -ENOENT)
+			ret = -EIO;
+		goto undo_alloc;
+	}
+	a = ctx->attr;
+	ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
+	/* Search back for the previous last allocated cluster of mft bitmap. */
+	for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) {
+		if (ll >= rl2->vcn)
+			break;
+	}
+	BUG_ON(ll < rl2->vcn);
+	BUG_ON(ll >= rl2->vcn + rl2->length);
+	/* Get the size for the new mapping pairs array for this extent. */
+	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll);
+	if (unlikely(mp_size <= 0)) {
+		ntfs_error(vol->sb, "Get size for mapping pairs failed for "
+				"mft bitmap attribute extent.");
+		ret = mp_size;
+		if (!ret)
+			ret = -EIO;
+		goto undo_alloc;
+	}
+	/* Expand the attribute record if necessary. */
+	old_alen = le32_to_cpu(a->length);
+	ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
+			le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
+	if (unlikely(ret)) {
+		if (ret != -ENOSPC) {
+			ntfs_error(vol->sb, "Failed to resize attribute "
+					"record for mft bitmap attribute.");
+			goto undo_alloc;
+		}
+		// TODO: Deal with this by moving this extent to a new mft
+		// record or by starting a new extent in a new mft record or by
+		// moving other attributes out of this mft record.
+		ntfs_error(vol->sb, "Not enough space in this mft record to "
+				"accomodate extended mft bitmap attribute "
+				"extent.  Cannot handle this yet.");
+		ret = -EOPNOTSUPP;
+		goto undo_alloc;
+	}
+	status.mp_rebuilt = 1;
+	/* Generate the mapping pairs array directly into the attr record. */
+	ret = ntfs_mapping_pairs_build(vol, (u8*)a +
+			le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
+			mp_size, rl2, ll, NULL);
+	if (unlikely(ret)) {
+		ntfs_error(vol->sb, "Failed to build mapping pairs array for "
+				"mft bitmap attribute.");
+		goto undo_alloc;
+	}
+	/* Update the highest_vcn. */
+	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
+	/*
+	 * We now have extended the mft bitmap allocated_size by one cluster.
+	 * Reflect this in the ntfs_inode structure and the attribute record.
+	 */
+	if (a->data.non_resident.lowest_vcn) {
+		/*
+		 * We are not in the first attribute extent, switch to it, but
+		 * first ensure the changes will make it to disk later.
+		 */
+		flush_dcache_mft_record_page(ctx->ntfs_ino);
+		mark_mft_record_dirty(ctx->ntfs_ino);
+		ntfs_attr_reinit_search_ctx(ctx);
+		ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+				mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL,
+				0, ctx);
+		if (unlikely(ret)) {
+			ntfs_error(vol->sb, "Failed to find first attribute "
+					"extent of mft bitmap attribute.");
+			goto restore_undo_alloc;
+		}
+		a = ctx->attr;
+	}
+	mftbmp_ni->allocated_size += vol->cluster_size;
+	a->data.non_resident.allocated_size =
+			cpu_to_sle64(mftbmp_ni->allocated_size);
+	/* Ensure the changes make it to disk. */
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(mft_ni);
+	up_write(&mftbmp_ni->runlist.lock);
+	ntfs_debug("Done.");
+	return 0;
+restore_undo_alloc:
+	ntfs_attr_reinit_search_ctx(ctx);
+	if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+			mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
+			0, ctx)) {
+		ntfs_error(vol->sb, "Failed to find last attribute extent of "
+				"mft bitmap attribute.%s", es);
+		mftbmp_ni->allocated_size += vol->cluster_size;
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(mft_ni);
+		up_write(&mftbmp_ni->runlist.lock);
+		/*
+		 * The only thing that is now wrong is ->allocated_size of the
+		 * base attribute extent which chkdsk should be able to fix.
+		 */
+		NVolSetErrors(vol);
+		return ret;
+	}
+	a = ctx->attr;
+	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2);
+undo_alloc:
+	if (status.added_cluster) {
+		/* Truncate the last run in the runlist by one cluster. */
+		rl->length--;
+		rl[1].vcn--;
+	} else if (status.added_run) {
+		lcn = rl->lcn;
+		/* Remove the last run from the runlist. */
+		rl->lcn = rl[1].lcn;
+		rl->length = 0;
+	}
+	/* Deallocate the cluster. */
+	down_write(&vol->lcnbmp_lock);
+	if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
+		ntfs_error(vol->sb, "Failed to free allocated cluster.%s", es);
+		NVolSetErrors(vol);
+	}
+	up_write(&vol->lcnbmp_lock);
+	if (status.mp_rebuilt) {
+		if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
+				a->data.non_resident.mapping_pairs_offset),
+				old_alen - le16_to_cpu(
+				a->data.non_resident.mapping_pairs_offset),
+				rl2, ll, NULL)) {
+			ntfs_error(vol->sb, "Failed to restore mapping pairs "
+					"array.%s", es);
+			NVolSetErrors(vol);
+		}
+		if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
+			ntfs_error(vol->sb, "Failed to restore attribute "
+					"record.%s", es);
+			NVolSetErrors(vol);
+		}
+		flush_dcache_mft_record_page(ctx->ntfs_ino);
+		mark_mft_record_dirty(ctx->ntfs_ino);
+	}
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (!IS_ERR(mrec))
+		unmap_mft_record(mft_ni);
+	up_write(&mftbmp_ni->runlist.lock);
+	return ret;
+}
+
+/**
+ * ntfs_mft_bitmap_extend_initialized_nolock - extend mftbmp initialized data
+ * @vol:	volume on which to extend the mft bitmap attribute
+ *
+ * Extend the initialized portion of the mft bitmap attribute on the ntfs
+ * volume @vol by 8 bytes.
+ *
+ * Note:  Only changes initialized_size and data_size, i.e. requires that
+ * allocated_size is big enough to fit the new initialized_size.
+ *
+ * Return 0 on success and -error on error.
+ *
+ * Locking: Caller must hold vol->mftbmp_lock for writing.
+ */
+static int ntfs_mft_bitmap_extend_initialized_nolock(ntfs_volume *vol)
+{
+	s64 old_data_size, old_initialized_size;
+	struct inode *mftbmp_vi;
+	ntfs_inode *mft_ni, *mftbmp_ni;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *mrec;
+	ATTR_RECORD *a;
+	int ret;
+
+	ntfs_debug("Extending mft bitmap initiailized (and data) size.");
+	mft_ni = NTFS_I(vol->mft_ino);
+	mftbmp_vi = vol->mftbmp_ino;
+	mftbmp_ni = NTFS_I(mftbmp_vi);
+	/* Get the attribute record. */
+	mrec = map_mft_record(mft_ni);
+	if (IS_ERR(mrec)) {
+		ntfs_error(vol->sb, "Failed to map mft record.");
+		return PTR_ERR(mrec);
+	}
+	ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+	if (unlikely(!ctx)) {
+		ntfs_error(vol->sb, "Failed to get search context.");
+		ret = -ENOMEM;
+		goto unm_err_out;
+	}
+	ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+			mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(ret)) {
+		ntfs_error(vol->sb, "Failed to find first attribute extent of "
+				"mft bitmap attribute.");
+		if (ret == -ENOENT)
+			ret = -EIO;
+		goto put_err_out;
+	}
+	a = ctx->attr;
+	old_data_size = mftbmp_vi->i_size;
+	old_initialized_size = mftbmp_ni->initialized_size;
+	/*
+	 * We can simply update the initialized_size before filling the space
+	 * with zeroes because the caller is holding the mft bitmap lock for
+	 * writing which ensures that no one else is trying to access the data.
+	 */
+	mftbmp_ni->initialized_size += 8;
+	a->data.non_resident.initialized_size =
+			cpu_to_sle64(mftbmp_ni->initialized_size);
+	if (mftbmp_ni->initialized_size > mftbmp_vi->i_size) {
+		mftbmp_vi->i_size = mftbmp_ni->initialized_size;
+		a->data.non_resident.data_size =
+				cpu_to_sle64(mftbmp_vi->i_size);
+	}
+	/* Ensure the changes make it to disk. */
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(mft_ni);
+	/* Initialize the mft bitmap attribute value with zeroes. */
+	ret = ntfs_attr_set(mftbmp_ni, old_initialized_size, 8, 0);
+	if (likely(!ret)) {
+		ntfs_debug("Done.  (Wrote eight initialized bytes to mft "
+				"bitmap.");
+		return 0;
+	}
+	ntfs_error(vol->sb, "Failed to write to mft bitmap.");
+	/* Try to recover from the error. */
+	mrec = map_mft_record(mft_ni);
+	if (IS_ERR(mrec)) {
+		ntfs_error(vol->sb, "Failed to map mft record.%s", es);
+		NVolSetErrors(vol);
+		return ret;
+	}
+	ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+	if (unlikely(!ctx)) {
+		ntfs_error(vol->sb, "Failed to get search context.%s", es);
+		NVolSetErrors(vol);
+		goto unm_err_out;
+	}
+	if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+			mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx)) {
+		ntfs_error(vol->sb, "Failed to find first attribute extent of "
+				"mft bitmap attribute.%s", es);
+		NVolSetErrors(vol);
+put_err_out:
+		ntfs_attr_put_search_ctx(ctx);
+unm_err_out:
+		unmap_mft_record(mft_ni);
+		goto err_out;
+	}
+	a = ctx->attr;
+	mftbmp_ni->initialized_size = old_initialized_size;
+	a->data.non_resident.initialized_size =
+			cpu_to_sle64(old_initialized_size);
+	if (mftbmp_vi->i_size != old_data_size) {
+		mftbmp_vi->i_size = old_data_size;
+		a->data.non_resident.data_size = cpu_to_sle64(old_data_size);
+	}
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(mft_ni);
+	ntfs_debug("Restored status of mftbmp: allocated_size 0x%llx, "
+			"data_size 0x%llx, initialized_size 0x%llx.",
+			(long long)mftbmp_ni->allocated_size,
+			(long long)mftbmp_vi->i_size,
+			(long long)mftbmp_ni->initialized_size);
+err_out:
+	return ret;
+}
+
+/**
+ * ntfs_mft_data_extend_allocation_nolock - extend mft data attribute
+ * @vol:	volume on which to extend the mft data attribute
+ *
+ * Extend the mft data attribute on the ntfs volume @vol by 16 mft records
+ * worth of clusters or if not enough space for this by one mft record worth
+ * of clusters.
+ *
+ * Note:  Only changes allocated_size, i.e. does not touch initialized_size or
+ * data_size.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - Caller must hold vol->mftbmp_lock for writing.
+ *	    - This function takes NTFS_I(vol->mft_ino)->runlist.lock for
+ *	      writing and releases it before returning.
+ *	    - This function calls functions which take vol->lcnbmp_lock for
+ *	      writing and release it before returning.
+ */
+static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
+{
+	LCN lcn;
+	VCN old_last_vcn;
+	s64 min_nr, nr, ll = 0;
+	ntfs_inode *mft_ni;
+	runlist_element *rl, *rl2;
+	ntfs_attr_search_ctx *ctx = NULL;
+	MFT_RECORD *mrec;
+	ATTR_RECORD *a = NULL;
+	int ret, mp_size;
+	u32 old_alen = 0;
+	BOOL mp_rebuilt = FALSE;
+
+	ntfs_debug("Extending mft data allocation.");
+	mft_ni = NTFS_I(vol->mft_ino);
+	/*
+	 * Determine the preferred allocation location, i.e. the last lcn of
+	 * the mft data attribute.  The allocated size of the mft data
+	 * attribute cannot be zero so we are ok to do this.
+	 * ntfs_find_vcn() returns the runlist locked on success.
+	 */
+	rl = ntfs_find_vcn(mft_ni, (mft_ni->allocated_size - 1) >>
+			vol->cluster_size_bits, TRUE);
+	if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+		ntfs_error(vol->sb, "Failed to determine last allocated "
+				"cluster of mft data attribute.");
+		if (!IS_ERR(rl)) {
+			up_write(&mft_ni->runlist.lock);
+			ret = -EIO;
+		} else
+			ret = PTR_ERR(rl);
+		return ret;
+	}
+	lcn = rl->lcn + rl->length;
+	ntfs_debug("Last lcn of mft data attribute is 0x%llx.",
+			(long long)lcn);
+	/* Minimum allocation is one mft record worth of clusters. */
+	min_nr = vol->mft_record_size >> vol->cluster_size_bits;
+	if (!min_nr)
+		min_nr = 1;
+	/* Want to allocate 16 mft records worth of clusters. */
+	nr = vol->mft_record_size << 4 >> vol->cluster_size_bits;
+	if (!nr)
+		nr = min_nr;
+	/* Ensure we do not go above 2^32-1 mft records. */
+	if (unlikely((mft_ni->allocated_size +
+			(nr << vol->cluster_size_bits)) >>
+			vol->mft_record_size_bits >= (1ll << 32))) {
+		nr = min_nr;
+		if (unlikely((mft_ni->allocated_size +
+				(nr << vol->cluster_size_bits)) >>
+				vol->mft_record_size_bits >= (1ll << 32))) {
+			ntfs_warning(vol->sb, "Cannot allocate mft record "
+					"because the maximum number of inodes "
+					"(2^32) has already been reached.");
+			up_write(&mft_ni->runlist.lock);
+			return -ENOSPC;
+		}
+	}
+	ntfs_debug("Trying mft data allocation with %s cluster count %lli.",
+			nr > min_nr ? "default" : "minimal", (long long)nr);
+	old_last_vcn = rl[1].vcn;
+	do {
+		rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE);
+		if (likely(!IS_ERR(rl2)))
+			break;
+		if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
+			ntfs_error(vol->sb, "Failed to allocate the minimal "
+					"number of clusters (%lli) for the "
+					"mft data attribute.", (long long)nr);
+			up_write(&mft_ni->runlist.lock);
+			return PTR_ERR(rl2);
+		}
+		/*
+		 * There is not enough space to do the allocation, but there
+		 * might be enough space to do a minimal allocation so try that
+		 * before failing.
+		 */
+		nr = min_nr;
+		ntfs_debug("Retrying mft data allocation with minimal cluster "
+				"count %lli.", (long long)nr);
+	} while (1);
+	rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2);
+	if (IS_ERR(rl)) {
+		up_write(&mft_ni->runlist.lock);
+		ntfs_error(vol->sb, "Failed to merge runlists for mft data "
+				"attribute.");
+		if (ntfs_cluster_free_from_rl(vol, rl2)) {
+			ntfs_error(vol->sb, "Failed to dealocate clusters "
+					"from the mft data attribute.%s", es);
+			NVolSetErrors(vol);
+		}
+		ntfs_free(rl2);
+		return PTR_ERR(rl);
+	}
+	mft_ni->runlist.rl = rl;
+	ntfs_debug("Allocated %lli clusters.", nr);
+	/* Find the last run in the new runlist. */
+	for (; rl[1].length; rl++)
+		;
+	/* Update the attribute record as well. */
+	mrec = map_mft_record(mft_ni);
+	if (IS_ERR(mrec)) {
+		ntfs_error(vol->sb, "Failed to map mft record.");
+		ret = PTR_ERR(mrec);
+		goto undo_alloc;
+	}
+	ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+	if (unlikely(!ctx)) {
+		ntfs_error(vol->sb, "Failed to get search context.");
+		ret = -ENOMEM;
+		goto undo_alloc;
+	}
+	ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+			CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx);
+	if (unlikely(ret)) {
+		ntfs_error(vol->sb, "Failed to find last attribute extent of "
+				"mft data attribute.");
+		if (ret == -ENOENT)
+			ret = -EIO;
+		goto undo_alloc;
+	}
+	a = ctx->attr;
+	ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
+	/* Search back for the previous last allocated cluster of mft bitmap. */
+	for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) {
+		if (ll >= rl2->vcn)
+			break;
+	}
+	BUG_ON(ll < rl2->vcn);
+	BUG_ON(ll >= rl2->vcn + rl2->length);
+	/* Get the size for the new mapping pairs array for this extent. */
+	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll);
+	if (unlikely(mp_size <= 0)) {
+		ntfs_error(vol->sb, "Get size for mapping pairs failed for "
+				"mft data attribute extent.");
+		ret = mp_size;
+		if (!ret)
+			ret = -EIO;
+		goto undo_alloc;
+	}
+	/* Expand the attribute record if necessary. */
+	old_alen = le32_to_cpu(a->length);
+	ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
+			le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
+	if (unlikely(ret)) {
+		if (ret != -ENOSPC) {
+			ntfs_error(vol->sb, "Failed to resize attribute "
+					"record for mft data attribute.");
+			goto undo_alloc;
+		}
+		// TODO: Deal with this by moving this extent to a new mft
+		// record or by starting a new extent in a new mft record or by
+		// moving other attributes out of this mft record.
+		// Note: Use the special reserved mft records and ensure that
+		// this extent is not required to find the mft record in
+		// question.
+		ntfs_error(vol->sb, "Not enough space in this mft record to "
+				"accomodate extended mft data attribute "
+				"extent.  Cannot handle this yet.");
+		ret = -EOPNOTSUPP;
+		goto undo_alloc;
+	}
+	mp_rebuilt = TRUE;
+	/* Generate the mapping pairs array directly into the attr record. */
+	ret = ntfs_mapping_pairs_build(vol, (u8*)a +
+			le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
+			mp_size, rl2, ll, NULL);
+	if (unlikely(ret)) {
+		ntfs_error(vol->sb, "Failed to build mapping pairs array of "
+				"mft data attribute.");
+		goto undo_alloc;
+	}
+	/* Update the highest_vcn. */
+	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
+	/*
+	 * We now have extended the mft data allocated_size by nr clusters.
+	 * Reflect this in the ntfs_inode structure and the attribute record.
+	 * @rl is the last (non-terminator) runlist element of mft data
+	 * attribute.
+	 */
+	if (a->data.non_resident.lowest_vcn) {
+		/*
+		 * We are not in the first attribute extent, switch to it, but
+		 * first ensure the changes will make it to disk later.
+		 */
+		flush_dcache_mft_record_page(ctx->ntfs_ino);
+		mark_mft_record_dirty(ctx->ntfs_ino);
+		ntfs_attr_reinit_search_ctx(ctx);
+		ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name,
+				mft_ni->name_len, CASE_SENSITIVE, 0, NULL, 0,
+				ctx);
+		if (unlikely(ret)) {
+			ntfs_error(vol->sb, "Failed to find first attribute "
+					"extent of mft data attribute.");
+			goto restore_undo_alloc;
+		}
+		a = ctx->attr;
+	}
+	mft_ni->allocated_size += nr << vol->cluster_size_bits;
+	a->data.non_resident.allocated_size =
+			cpu_to_sle64(mft_ni->allocated_size);
+	/* Ensure the changes make it to disk. */
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(mft_ni);
+	up_write(&mft_ni->runlist.lock);
+	ntfs_debug("Done.");
+	return 0;
+restore_undo_alloc:
+	ntfs_attr_reinit_search_ctx(ctx);
+	if (ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+			CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) {
+		ntfs_error(vol->sb, "Failed to find last attribute extent of "
+				"mft data attribute.%s", es);
+		mft_ni->allocated_size += nr << vol->cluster_size_bits;
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(mft_ni);
+		up_write(&mft_ni->runlist.lock);
+		/*
+		 * The only thing that is now wrong is ->allocated_size of the
+		 * base attribute extent which chkdsk should be able to fix.
+		 */
+		NVolSetErrors(vol);
+		return ret;
+	}
+	a = ctx->attr;
+	a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1);
+undo_alloc:
+	if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1) < 0) {
+		ntfs_error(vol->sb, "Failed to free clusters from mft data "
+				"attribute.%s", es);
+		NVolSetErrors(vol);
+	}
+	if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
+		ntfs_error(vol->sb, "Failed to truncate mft data attribute "
+				"runlist.%s", es);
+		NVolSetErrors(vol);
+	}
+	if (mp_rebuilt) {
+		if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
+				a->data.non_resident.mapping_pairs_offset),
+				old_alen - le16_to_cpu(
+				a->data.non_resident.mapping_pairs_offset),
+				rl2, ll, NULL)) {
+			ntfs_error(vol->sb, "Failed to restore mapping pairs "
+					"array.%s", es);
+			NVolSetErrors(vol);
+		}
+		if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
+			ntfs_error(vol->sb, "Failed to restore attribute "
+					"record.%s", es);
+			NVolSetErrors(vol);
+		}
+		flush_dcache_mft_record_page(ctx->ntfs_ino);
+		mark_mft_record_dirty(ctx->ntfs_ino);
+	}
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (!IS_ERR(mrec))
+		unmap_mft_record(mft_ni);
+	up_write(&mft_ni->runlist.lock);
+	return ret;
+}
+
+/**
+ * ntfs_mft_record_layout - layout an mft record into a memory buffer
+ * @vol:	volume to which the mft record will belong
+ * @mft_no:	mft reference specifying the mft record number
+ * @m:		destination buffer of size >= @vol->mft_record_size bytes
+ *
+ * Layout an empty, unused mft record with the mft record number @mft_no into
+ * the buffer @m.  The volume @vol is needed because the mft record structure
+ * was modified in NTFS 3.1 so we need to know which volume version this mft
+ * record will be used on.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_mft_record_layout(const ntfs_volume *vol, const s64 mft_no,
+		MFT_RECORD *m)
+{
+	ATTR_RECORD *a;
+
+	ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
+	if (mft_no >= (1ll << 32)) {
+		ntfs_error(vol->sb, "Mft record number 0x%llx exceeds "
+				"maximum of 2^32.", (long long)mft_no);
+		return -ERANGE;
+	}
+	/* Start by clearing the whole mft record to gives us a clean slate. */
+	memset(m, 0, vol->mft_record_size);
+	/* Aligned to 2-byte boundary. */
+	if (vol->major_ver < 3 || (vol->major_ver == 3 && !vol->minor_ver))
+		m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD_OLD) + 1) & ~1);
+	else {
+		m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD) + 1) & ~1);
+		/*
+		 * Set the NTFS 3.1+ specific fields while we know that the
+		 * volume version is 3.1+.
+		 */
+		m->reserved = 0;
+		m->mft_record_number = cpu_to_le32((u32)mft_no);
+	}
+	m->magic = magic_FILE;
+	if (vol->mft_record_size >= NTFS_BLOCK_SIZE)
+		m->usa_count = cpu_to_le16(vol->mft_record_size /
+				NTFS_BLOCK_SIZE + 1);
+	else {
+		m->usa_count = cpu_to_le16(1);
+		ntfs_warning(vol->sb, "Sector size is bigger than mft record "
+				"size.  Setting usa_count to 1.  If chkdsk "
+				"reports this as corruption, please email "
+				"linux-ntfs-dev@lists.sourceforge.net stating "
+				"that you saw this message and that the "
+				"modified file system created was corrupt.  "
+				"Thank you.");
+	}
+	/* Set the update sequence number to 1. */
+	*(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = cpu_to_le16(1);
+	m->lsn = 0;
+	m->sequence_number = cpu_to_le16(1);
+	m->link_count = 0;
+	/*
+	 * Place the attributes straight after the update sequence array,
+	 * aligned to 8-byte boundary.
+	 */
+	m->attrs_offset = cpu_to_le16((le16_to_cpu(m->usa_ofs) +
+			(le16_to_cpu(m->usa_count) << 1) + 7) & ~7);
+	m->flags = 0;
+	/*
+	 * Using attrs_offset plus eight bytes (for the termination attribute).
+	 * attrs_offset is already aligned to 8-byte boundary, so no need to
+	 * align again.
+	 */
+	m->bytes_in_use = cpu_to_le32(le16_to_cpu(m->attrs_offset) + 8);
+	m->bytes_allocated = cpu_to_le32(vol->mft_record_size);
+	m->base_mft_record = 0;
+	m->next_attr_instance = 0;
+	/* Add the termination attribute. */
+	a = (ATTR_RECORD*)((u8*)m + le16_to_cpu(m->attrs_offset));
+	a->type = AT_END;
+	a->length = 0;
+	ntfs_debug("Done.");
+	return 0;
+}
+
+/**
+ * ntfs_mft_record_format - format an mft record on an ntfs volume
+ * @vol:	volume on which to format the mft record
+ * @mft_no:	mft record number to format
+ *
+ * Format the mft record @mft_no in $MFT/$DATA, i.e. lay out an empty, unused
+ * mft record into the appropriate place of the mft data attribute.  This is
+ * used when extending the mft data attribute.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
+{
+	struct inode *mft_vi = vol->mft_ino;
+	struct page *page;
+	MFT_RECORD *m;
+	pgoff_t index, end_index;
+	unsigned int ofs;
+	int err;
+
+	ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
+	/*
+	 * The index into the page cache and the offset within the page cache
+	 * page of the wanted mft record.
+	 */
+	index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+	ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	/* The maximum valid index into the page cache for $MFT's data. */
+	end_index = mft_vi->i_size >> PAGE_CACHE_SHIFT;
+	if (unlikely(index >= end_index)) {
+		if (unlikely(index > end_index || ofs + vol->mft_record_size >=
+				(mft_vi->i_size & ~PAGE_CACHE_MASK))) {
+			ntfs_error(vol->sb, "Tried to format non-existing mft "
+					"record 0x%llx.", (long long)mft_no);
+			return -ENOENT;
+		}
+	}
+	/* Read, map, and pin the page containing the mft record. */
+	page = ntfs_map_page(mft_vi->i_mapping, index);
+	if (unlikely(IS_ERR(page))) {
+		ntfs_error(vol->sb, "Failed to map page containing mft record "
+				"to format 0x%llx.", (long long)mft_no);
+		return PTR_ERR(page);
+	}
+	lock_page(page);
+	BUG_ON(!PageUptodate(page));
+	ClearPageUptodate(page);
+	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
+	err = ntfs_mft_record_layout(vol, mft_no, m);
+	if (unlikely(err)) {
+		ntfs_error(vol->sb, "Failed to layout mft record 0x%llx.",
+				(long long)mft_no);
+		SetPageUptodate(page);
+		unlock_page(page);
+		ntfs_unmap_page(page);
+		return err;
+	}
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+	/*
+	 * Make sure the mft record is written out to disk.  We could use
+	 * ilookup5() to check if an inode is in icache and so on but this is
+	 * unnecessary as ntfs_writepage() will write the dirty record anyway.
+	 */
+	mark_ntfs_record_dirty(page, ofs);
+	ntfs_unmap_page(page);
+	ntfs_debug("Done.");
+	return 0;
+}
+
+/**
+ * ntfs_mft_record_alloc - allocate an mft record on an ntfs volume
+ * @vol:	[IN]  volume on which to allocate the mft record
+ * @mode:	[IN]  mode if want a file or directory, i.e. base inode or 0
+ * @base_ni:	[IN]  open base inode if allocating an extent mft record or NULL
+ * @mrec:	[OUT] on successful return this is the mapped mft record
+ *
+ * Allocate an mft record in $MFT/$DATA of an open ntfs volume @vol.
+ *
+ * If @base_ni is NULL make the mft record a base mft record, i.e. a file or
+ * direvctory inode, and allocate it at the default allocator position.  In
+ * this case @mode is the file mode as given to us by the caller.  We in
+ * particular use @mode to distinguish whether a file or a directory is being
+ * created (S_IFDIR(mode) and S_IFREG(mode), respectively).
+ *
+ * If @base_ni is not NULL make the allocated mft record an extent record,
+ * allocate it starting at the mft record after the base mft record and attach
+ * the allocated and opened ntfs inode to the base inode @base_ni.  In this
+ * case @mode must be 0 as it is meaningless for extent inodes.
+ *
+ * You need to check the return value with IS_ERR().  If false, the function
+ * was successful and the return value is the now opened ntfs inode of the
+ * allocated mft record.  *@mrec is then set to the allocated, mapped, pinned,
+ * and locked mft record.  If IS_ERR() is true, the function failed and the
+ * error code is obtained from PTR_ERR(return value).  *@mrec is undefined in
+ * this case.
+ *
+ * Allocation strategy:
+ *
+ * To find a free mft record, we scan the mft bitmap for a zero bit.  To
+ * optimize this we start scanning at the place specified by @base_ni or if
+ * @base_ni is NULL we start where we last stopped and we perform wrap around
+ * when we reach the end.  Note, we do not try to allocate mft records below
+ * number 24 because numbers 0 to 15 are the defined system files anyway and 16
+ * to 24 are special in that they are used for storing extension mft records
+ * for the $DATA attribute of $MFT.  This is required to avoid the possibility
+ * of creating a runlist with a circular dependency which once written to disk
+ * can never be read in again.  Windows will only use records 16 to 24 for
+ * normal files if the volume is completely out of space.  We never use them
+ * which means that when the volume is really out of space we cannot create any
+ * more files while Windows can still create up to 8 small files.  We can start
+ * doing this at some later time, it does not matter much for now.
+ *
+ * When scanning the mft bitmap, we only search up to the last allocated mft
+ * record.  If there are no free records left in the range 24 to number of
+ * allocated mft records, then we extend the $MFT/$DATA attribute in order to
+ * create free mft records.  We extend the allocated size of $MFT/$DATA by 16
+ * records at a time or one cluster, if cluster size is above 16kiB.  If there
+ * is not sufficient space to do this, we try to extend by a single mft record
+ * or one cluster, if cluster size is above the mft record size.
+ *
+ * No matter how many mft records we allocate, we initialize only the first
+ * allocated mft record, incrementing mft data size and initialized size
+ * accordingly, open an ntfs_inode for it and return it to the caller, unless
+ * there are less than 24 mft records, in which case we allocate and initialize
+ * mft records until we reach record 24 which we consider as the first free mft
+ * record for use by normal files.
+ *
+ * If during any stage we overflow the initialized data in the mft bitmap, we
+ * extend the initialized size (and data size) by 8 bytes, allocating another
+ * cluster if required.  The bitmap data size has to be at least equal to the
+ * number of mft records in the mft, but it can be bigger, in which case the
+ * superflous bits are padded with zeroes.
+ *
+ * Thus, when we return successfully (IS_ERR() is false), we will have:
+ *	- initialized / extended the mft bitmap if necessary,
+ *	- initialized / extended the mft data if necessary,
+ *	- set the bit corresponding to the mft record being allocated in the
+ *	  mft bitmap,
+ *	- opened an ntfs_inode for the allocated mft record, and we will have
+ *	- returned the ntfs_inode as well as the allocated mapped, pinned, and
+ *	  locked mft record.
+ *
+ * On error, the volume will be left in a consistent state and no record will
+ * be allocated.  If rolling back a partial operation fails, we may leave some
+ * inconsistent metadata in which case we set NVolErrors() so the volume is
+ * left dirty when unmounted.
+ *
+ * Note, this function cannot make use of most of the normal functions, like
+ * for example for attribute resizing, etc, because when the run list overflows
+ * the base mft record and an attribute list is used, it is very important that
+ * the extension mft records used to store the $DATA attribute of $MFT can be
+ * reached without having to read the information contained inside them, as
+ * this would make it impossible to find them in the first place after the
+ * volume is unmounted.  $MFT/$BITMAP probably does not need to follow this
+ * rule because the bitmap is not essential for finding the mft records, but on
+ * the other hand, handling the bitmap in this special way would make life
+ * easier because otherwise there might be circular invocations of functions
+ * when reading the bitmap.
+ */
+ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
+		ntfs_inode *base_ni, MFT_RECORD **mrec)
+{
+	s64 ll, bit, old_data_initialized, old_data_size;
+	struct inode *vi;
+	struct page *page;
+	ntfs_inode *mft_ni, *mftbmp_ni, *ni;
+	ntfs_attr_search_ctx *ctx;
+	MFT_RECORD *m;
+	ATTR_RECORD *a;
+	pgoff_t index;
+	unsigned int ofs;
+	int err;
+	le16 seq_no, usn;
+	BOOL record_formatted = FALSE;
+
+	if (base_ni) {
+		ntfs_debug("Entering (allocating an extent mft record for "
+				"base mft record 0x%llx).",
+				(long long)base_ni->mft_no);
+		/* @mode and @base_ni are mutually exclusive. */
+		BUG_ON(mode);
+	} else
+		ntfs_debug("Entering (allocating a base mft record).");
+	if (mode) {
+		/* @mode and @base_ni are mutually exclusive. */
+		BUG_ON(base_ni);
+		/* We only support creation of normal files and directories. */
+		if (!S_ISREG(mode) && !S_ISDIR(mode))
+			return ERR_PTR(-EOPNOTSUPP);
+	}
+	BUG_ON(!mrec);
+	mft_ni = NTFS_I(vol->mft_ino);
+	mftbmp_ni = NTFS_I(vol->mftbmp_ino);
+	down_write(&vol->mftbmp_lock);
+	bit = ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(vol, base_ni);
+	if (bit >= 0) {
+		ntfs_debug("Found and allocated free record (#1), bit 0x%llx.",
+				(long long)bit);
+		goto have_alloc_rec;
+	}
+	if (bit != -ENOSPC) {
+		up_write(&vol->mftbmp_lock);
+		return ERR_PTR(bit);
+	}
+	/*
+	 * No free mft records left.  If the mft bitmap already covers more
+	 * than the currently used mft records, the next records are all free,
+	 * so we can simply allocate the first unused mft record.
+	 * Note: We also have to make sure that the mft bitmap at least covers
+	 * the first 24 mft records as they are special and whilst they may not
+	 * be in use, we do not allocate from them.
+	 */
+	ll = mft_ni->initialized_size >> vol->mft_record_size_bits;
+	if (mftbmp_ni->initialized_size << 3 > ll &&
+			mftbmp_ni->initialized_size > 3) {
+		bit = ll;
+		if (bit < 24)
+			bit = 24;
+		if (unlikely(bit >= (1ll << 32)))
+			goto max_err_out;
+		ntfs_debug("Found free record (#2), bit 0x%llx.",
+				(long long)bit);
+		goto found_free_rec;
+	}
+	/*
+	 * The mft bitmap needs to be expanded until it covers the first unused
+	 * mft record that we can allocate.
+	 * Note: The smallest mft record we allocate is mft record 24.
+	 */
+	bit = mftbmp_ni->initialized_size << 3;
+	if (unlikely(bit >= (1ll << 32)))
+		goto max_err_out;
+	ntfs_debug("Status of mftbmp before extension: allocated_size 0x%llx, "
+			"data_size 0x%llx, initialized_size 0x%llx.",
+			(long long)mftbmp_ni->allocated_size,
+			(long long)vol->mftbmp_ino->i_size,
+			(long long)mftbmp_ni->initialized_size);
+	if (mftbmp_ni->initialized_size + 8 > mftbmp_ni->allocated_size) {
+		/* Need to extend bitmap by one more cluster. */
+		ntfs_debug("mftbmp: initialized_size + 8 > allocated_size.");
+		err = ntfs_mft_bitmap_extend_allocation_nolock(vol);
+		if (unlikely(err)) {
+			up_write(&vol->mftbmp_lock);
+			goto err_out;
+		}
+		ntfs_debug("Status of mftbmp after allocation extension: "
+				"allocated_size 0x%llx, data_size 0x%llx, "
+				"initialized_size 0x%llx.",
+				(long long)mftbmp_ni->allocated_size,
+				(long long)vol->mftbmp_ino->i_size,
+				(long long)mftbmp_ni->initialized_size);
+	}
+	/*
+	 * We now have sufficient allocated space, extend the initialized_size
+	 * as well as the data_size if necessary and fill the new space with
+	 * zeroes.
+	 */
+	err = ntfs_mft_bitmap_extend_initialized_nolock(vol);
+	if (unlikely(err)) {
+		up_write(&vol->mftbmp_lock);
+		goto err_out;
+	}
+	ntfs_debug("Status of mftbmp after initialized extention: "
+			"allocated_size 0x%llx, data_size 0x%llx, "
+			"initialized_size 0x%llx.",
+			(long long)mftbmp_ni->allocated_size,
+			(long long)vol->mftbmp_ino->i_size,
+			(long long)mftbmp_ni->initialized_size);
+	ntfs_debug("Found free record (#3), bit 0x%llx.", (long long)bit);
+found_free_rec:
+	/* @bit is the found free mft record, allocate it in the mft bitmap. */
+	ntfs_debug("At found_free_rec.");
+	err = ntfs_bitmap_set_bit(vol->mftbmp_ino, bit);
+	if (unlikely(err)) {
+		ntfs_error(vol->sb, "Failed to allocate bit in mft bitmap.");
+		up_write(&vol->mftbmp_lock);
+		goto err_out;
+	}
+	ntfs_debug("Set bit 0x%llx in mft bitmap.", (long long)bit);
+have_alloc_rec:
+	/*
+	 * The mft bitmap is now uptodate.  Deal with mft data attribute now.
+	 * Note, we keep hold of the mft bitmap lock for writing until all
+	 * modifications to the mft data attribute are complete, too, as they
+	 * will impact decisions for mft bitmap and mft record allocation done
+	 * by a parallel allocation and if the lock is not maintained a
+	 * parallel allocation could allocate the same mft record as this one.
+	 */
+	ll = (bit + 1) << vol->mft_record_size_bits;
+	if (ll <= mft_ni->initialized_size) {
+		ntfs_debug("Allocated mft record already initialized.");
+		goto mft_rec_already_initialized;
+	}
+	ntfs_debug("Initializing allocated mft record.");
+	/*
+	 * The mft record is outside the initialized data.  Extend the mft data
+	 * attribute until it covers the allocated record.  The loop is only
+	 * actually traversed more than once when a freshly formatted volume is
+	 * first written to so it optimizes away nicely in the common case.
+	 */
+	ntfs_debug("Status of mft data before extension: "
+			"allocated_size 0x%llx, data_size 0x%llx, "
+			"initialized_size 0x%llx.",
+			(long long)mft_ni->allocated_size,
+			(long long)vol->mft_ino->i_size,
+			(long long)mft_ni->initialized_size);
+	while (ll > mft_ni->allocated_size) {
+		err = ntfs_mft_data_extend_allocation_nolock(vol);
+		if (unlikely(err)) {
+			ntfs_error(vol->sb, "Failed to extend mft data "
+					"allocation.");
+			goto undo_mftbmp_alloc_nolock;
+		}
+		ntfs_debug("Status of mft data after allocation extension: "
+				"allocated_size 0x%llx, data_size 0x%llx, "
+				"initialized_size 0x%llx.",
+				(long long)mft_ni->allocated_size,
+				(long long)vol->mft_ino->i_size,
+				(long long)mft_ni->initialized_size);
+	}
+	/*
+	 * Extend mft data initialized size (and data size of course) to reach
+	 * the allocated mft record, formatting the mft records allong the way.
+	 * Note: We only modify the ntfs_inode structure as that is all that is
+	 * needed by ntfs_mft_record_format().  We will update the attribute
+	 * record itself in one fell swoop later on.
+	 */
+	old_data_initialized = mft_ni->initialized_size;
+	old_data_size = vol->mft_ino->i_size;
+	while (ll > mft_ni->initialized_size) {
+		s64 new_initialized_size, mft_no;
+		
+		new_initialized_size = mft_ni->initialized_size +
+				vol->mft_record_size;
+		mft_no = mft_ni->initialized_size >> vol->mft_record_size_bits;
+		if (new_initialized_size > vol->mft_ino->i_size)
+			vol->mft_ino->i_size = new_initialized_size;
+		ntfs_debug("Initializing mft record 0x%llx.",
+				(long long)mft_no);
+		err = ntfs_mft_record_format(vol, mft_no);
+		if (unlikely(err)) {
+			ntfs_error(vol->sb, "Failed to format mft record.");
+			goto undo_data_init;
+		}
+		mft_ni->initialized_size = new_initialized_size;
+	}
+	record_formatted = TRUE;
+	/* Update the mft data attribute record to reflect the new sizes. */
+	m = map_mft_record(mft_ni);
+	if (IS_ERR(m)) {
+		ntfs_error(vol->sb, "Failed to map mft record.");
+		err = PTR_ERR(m);
+		goto undo_data_init;
+	}
+	ctx = ntfs_attr_get_search_ctx(mft_ni, m);
+	if (unlikely(!ctx)) {
+		ntfs_error(vol->sb, "Failed to get search context.");
+		err = -ENOMEM;
+		unmap_mft_record(mft_ni);
+		goto undo_data_init;
+	}
+	err = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+			CASE_SENSITIVE, 0, NULL, 0, ctx);
+	if (unlikely(err)) {
+		ntfs_error(vol->sb, "Failed to find first attribute extent of "
+				"mft data attribute.");
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(mft_ni);
+		goto undo_data_init;
+	}
+	a = ctx->attr;
+	a->data.non_resident.initialized_size =
+			cpu_to_sle64(mft_ni->initialized_size);
+	a->data.non_resident.data_size = cpu_to_sle64(vol->mft_ino->i_size);
+	/* Ensure the changes make it to disk. */
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(mft_ni);
+	ntfs_debug("Status of mft data after mft record initialization: "
+			"allocated_size 0x%llx, data_size 0x%llx, "
+			"initialized_size 0x%llx.",
+			(long long)mft_ni->allocated_size,
+			(long long)vol->mft_ino->i_size,
+			(long long)mft_ni->initialized_size);
+	BUG_ON(vol->mft_ino->i_size > mft_ni->allocated_size);
+	BUG_ON(mft_ni->initialized_size > vol->mft_ino->i_size);
+mft_rec_already_initialized:
+	/*
+	 * We can finally drop the mft bitmap lock as the mft data attribute
+	 * has been fully updated.  The only disparity left is that the
+	 * allocated mft record still needs to be marked as in use to match the
+	 * set bit in the mft bitmap but this is actually not a problem since
+	 * this mft record is not referenced from anywhere yet and the fact
+	 * that it is allocated in the mft bitmap means that no-one will try to
+	 * allocate it either.
+	 */
+	up_write(&vol->mftbmp_lock);
+	/*
+	 * We now have allocated and initialized the mft record.  Calculate the
+	 * index of and the offset within the page cache page the record is in.
+	 */
+	index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+	ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	/* Read, map, and pin the page containing the mft record. */
+	page = ntfs_map_page(vol->mft_ino->i_mapping, index);
+	if (unlikely(IS_ERR(page))) {
+		ntfs_error(vol->sb, "Failed to map page containing allocated "
+				"mft record 0x%llx.", (long long)bit);
+		err = PTR_ERR(page);
+		goto undo_mftbmp_alloc;
+	}
+	lock_page(page);
+	BUG_ON(!PageUptodate(page));
+	ClearPageUptodate(page);
+	m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
+	/* If we just formatted the mft record no need to do it again. */
+	if (!record_formatted) {
+		/* Sanity check that the mft record is really not in use. */
+		if (ntfs_is_file_record(m->magic) &&
+				(m->flags & MFT_RECORD_IN_USE)) {
+			ntfs_error(vol->sb, "Mft record 0x%llx was marked "
+					"free in mft bitmap but is marked "
+					"used itself.  Corrupt filesystem.  "
+					"Unmount and run chkdsk.",
+					(long long)bit);
+			err = -EIO;
+			SetPageUptodate(page);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			NVolSetErrors(vol);
+			goto undo_mftbmp_alloc;
+		}
+		/*
+		 * We need to (re-)format the mft record, preserving the
+		 * sequence number if it is not zero as well as the update
+		 * sequence number if it is not zero or -1 (0xffff).  This
+		 * means we do not need to care whether or not something went
+		 * wrong with the previous mft record.
+		 */
+		seq_no = m->sequence_number;
+		usn = *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs));
+		err = ntfs_mft_record_layout(vol, bit, m);
+		if (unlikely(err)) {
+			ntfs_error(vol->sb, "Failed to layout allocated mft "
+					"record 0x%llx.", (long long)bit);
+			SetPageUptodate(page);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			goto undo_mftbmp_alloc;
+		}
+		if (seq_no)
+			m->sequence_number = seq_no;
+		if (usn && le16_to_cpu(usn) != 0xffff)
+			*(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = usn;
+	}
+	/* Set the mft record itself in use. */
+	m->flags |= MFT_RECORD_IN_USE;
+	if (S_ISDIR(mode))
+		m->flags |= MFT_RECORD_IS_DIRECTORY;
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	if (base_ni) {
+		/*
+		 * Setup the base mft record in the extent mft record.  This
+		 * completes initialization of the allocated extent mft record
+		 * and we can simply use it with map_extent_mft_record().
+		 */
+		m->base_mft_record = MK_LE_MREF(base_ni->mft_no,
+				base_ni->seq_no);
+		/*
+		 * Allocate an extent inode structure for the new mft record,
+		 * attach it to the base inode @base_ni and map, pin, and lock
+		 * its, i.e. the allocated, mft record.
+		 */
+		m = map_extent_mft_record(base_ni, bit, &ni);
+		if (IS_ERR(m)) {
+			ntfs_error(vol->sb, "Failed to map allocated extent "
+					"mft record 0x%llx.", (long long)bit);
+			err = PTR_ERR(m);
+			/* Set the mft record itself not in use. */
+			m->flags &= cpu_to_le16(
+					~le16_to_cpu(MFT_RECORD_IN_USE));
+			flush_dcache_page(page);
+			/* Make sure the mft record is written out to disk. */
+			mark_ntfs_record_dirty(page, ofs);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			goto undo_mftbmp_alloc;
+		}
+		/*
+		 * Make sure the allocated mft record is written out to disk.
+		 * No need to set the inode dirty because the caller is going
+		 * to do that anyway after finishing with the new extent mft
+		 * record (e.g. at a minimum a new attribute will be added to
+		 * the mft record.
+		 */
+		mark_ntfs_record_dirty(page, ofs);
+		unlock_page(page);
+		/*
+		 * Need to unmap the page since map_extent_mft_record() mapped
+		 * it as well so we have it mapped twice at the moment.
+		 */
+		ntfs_unmap_page(page);
+	} else {
+		/*
+		 * Allocate a new VFS inode and set it up.  NOTE: @vi->i_nlink
+		 * is set to 1 but the mft record->link_count is 0.  The caller
+		 * needs to bear this in mind.
+		 */
+		vi = new_inode(vol->sb);
+		if (unlikely(!vi)) {
+			err = -ENOMEM;
+			/* Set the mft record itself not in use. */
+			m->flags &= cpu_to_le16(
+					~le16_to_cpu(MFT_RECORD_IN_USE));
+			flush_dcache_page(page);
+			/* Make sure the mft record is written out to disk. */
+			mark_ntfs_record_dirty(page, ofs);
+			unlock_page(page);
+			ntfs_unmap_page(page);
+			goto undo_mftbmp_alloc;
+		}
+		vi->i_ino = bit;
+		/*
+		 * This is the optimal IO size (for stat), not the fs block
+		 * size.
+		 */
+		vi->i_blksize = PAGE_CACHE_SIZE;
+		/*
+		 * This is for checking whether an inode has changed w.r.t. a
+		 * file so that the file can be updated if necessary (compare
+		 * with f_version).
+		 */
+		vi->i_version = 1;
+
+		/* The owner and group come from the ntfs volume. */
+		vi->i_uid = vol->uid;
+		vi->i_gid = vol->gid;
+
+		/* Initialize the ntfs specific part of @vi. */
+		ntfs_init_big_inode(vi);
+		ni = NTFS_I(vi);
+		/*
+		 * Set the appropriate mode, attribute type, and name.  For
+		 * directories, also setup the index values to the defaults.
+		 */
+		if (S_ISDIR(mode)) {
+			vi->i_mode = S_IFDIR | S_IRWXUGO;
+			vi->i_mode &= ~vol->dmask;
+
+			NInoSetMstProtected(ni);
+			ni->type = AT_INDEX_ALLOCATION;
+			ni->name = I30;
+			ni->name_len = 4;
+
+			ni->itype.index.block_size = 4096;
+			ni->itype.index.block_size_bits = generic_ffs(4096) - 1;
+			ni->itype.index.collation_rule = COLLATION_FILE_NAME;
+			if (vol->cluster_size <= ni->itype.index.block_size) {
+				ni->itype.index.vcn_size = vol->cluster_size;
+				ni->itype.index.vcn_size_bits =
+						vol->cluster_size_bits;
+			} else {
+				ni->itype.index.vcn_size = vol->sector_size;
+				ni->itype.index.vcn_size_bits =
+						vol->sector_size_bits;
+			}
+		} else {
+			vi->i_mode = S_IFREG | S_IRWXUGO;
+			vi->i_mode &= ~vol->fmask;
+
+			ni->type = AT_DATA;
+			ni->name = NULL;
+			ni->name_len = 0;
+		}
+		if (IS_RDONLY(vi))
+			vi->i_mode &= ~S_IWUGO;
+
+		/* Set the inode times to the current time. */
+		vi->i_atime = vi->i_mtime = vi->i_ctime =
+			current_fs_time(vi->i_sb);
+		/*
+		 * Set the file size to 0, the ntfs inode sizes are set to 0 by
+		 * the call to ntfs_init_big_inode() below.
+		 */
+		vi->i_size = 0;
+		vi->i_blocks = 0;
+
+		/* Set the sequence number. */
+		vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
+		/*
+		 * Manually map, pin, and lock the mft record as we already
+		 * have its page mapped and it is very easy to do.
+		 */
+		atomic_inc(&ni->count);
+		down(&ni->mrec_lock);
+		ni->page = page;
+		ni->page_ofs = ofs;
+		/*
+		 * Make sure the allocated mft record is written out to disk.
+		 * NOTE: We do not set the ntfs inode dirty because this would
+		 * fail in ntfs_write_inode() because the inode does not have a
+		 * standard information attribute yet.  Also, there is no need
+		 * to set the inode dirty because the caller is going to do
+		 * that anyway after finishing with the new mft record (e.g. at
+		 * a minimum some new attributes will be added to the mft
+		 * record.
+		 */
+		mark_ntfs_record_dirty(page, ofs);
+		unlock_page(page);
+
+		/* Add the inode to the inode hash for the superblock. */
+		insert_inode_hash(vi);
+
+		/* Update the default mft allocation position. */
+		vol->mft_data_pos = bit + 1;
+	}
+	/*
+	 * Return the opened, allocated inode of the allocated mft record as
+	 * well as the mapped, pinned, and locked mft record.
+	 */
+	ntfs_debug("Returning opened, allocated %sinode 0x%llx.",
+			base_ni ? "extent " : "", (long long)bit);
+	*mrec = m;
+	return ni;
+undo_data_init:
+	mft_ni->initialized_size = old_data_initialized;
+	vol->mft_ino->i_size = old_data_size;
+	goto undo_mftbmp_alloc_nolock;
+undo_mftbmp_alloc:
+	down_write(&vol->mftbmp_lock);
+undo_mftbmp_alloc_nolock:
+	if (ntfs_bitmap_clear_bit(vol->mftbmp_ino, bit)) {
+		ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
+		NVolSetErrors(vol);
+	}
+	up_write(&vol->mftbmp_lock);
+err_out:
+	return ERR_PTR(err);
+max_err_out:
+	ntfs_warning(vol->sb, "Cannot allocate mft record because the maximum "
+			"number of inodes (2^32) has already been reached.");
+	up_write(&vol->mftbmp_lock);
+	return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * ntfs_extent_mft_record_free - free an extent mft record on an ntfs volume
+ * @ni:		ntfs inode of the mapped extent mft record to free
+ * @m:		mapped extent mft record of the ntfs inode @ni
+ *
+ * Free the mapped extent mft record @m of the extent ntfs inode @ni.
+ *
+ * Note that this function unmaps the mft record and closes and destroys @ni
+ * internally and hence you cannot use either @ni nor @m any more after this
+ * function returns success.
+ *
+ * On success return 0 and on error return -errno.  @ni and @m are still valid
+ * in this case and have not been freed.
+ *
+ * For some errors an error message is displayed and the success code 0 is
+ * returned and the volume is then left dirty on umount.  This makes sense in
+ * case we could not rollback the changes that were already done since the
+ * caller no longer wants to reference this mft record so it does not matter to
+ * the caller if something is wrong with it as long as it is properly detached
+ * from the base inode.
+ */
+int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
+{
+	unsigned long mft_no = ni->mft_no;
+	ntfs_volume *vol = ni->vol;
+	ntfs_inode *base_ni;
+	ntfs_inode **extent_nis;
+	int i, err;
+	le16 old_seq_no;
+	u16 seq_no;
+	
+	BUG_ON(NInoAttr(ni));
+	BUG_ON(ni->nr_extents != -1);
+
+	down(&ni->extent_lock);
+	base_ni = ni->ext.base_ntfs_ino;
+	up(&ni->extent_lock);
+
+	BUG_ON(base_ni->nr_extents <= 0);
+
+	ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n",
+			mft_no, base_ni->mft_no);
+
+	down(&base_ni->extent_lock);
+
+	/* Make sure we are holding the only reference to the extent inode. */
+	if (atomic_read(&ni->count) > 2) {
+		ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, "
+				"not freeing.", base_ni->mft_no);
+		up(&base_ni->extent_lock);
+		return -EBUSY;
+	}
+
+	/* Dissociate the ntfs inode from the base inode. */
+	extent_nis = base_ni->ext.extent_ntfs_inos;
+	err = -ENOENT;
+	for (i = 0; i < base_ni->nr_extents; i++) {
+		if (ni != extent_nis[i])
+			continue;
+		extent_nis += i;
+		base_ni->nr_extents--;
+		memmove(extent_nis, extent_nis + 1, (base_ni->nr_extents - i) *
+				sizeof(ntfs_inode*));
+		err = 0;
+		break;
+	}
+
+	up(&base_ni->extent_lock);
+
+	if (unlikely(err)) {
+		ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to "
+				"its base inode 0x%lx.", mft_no,
+				base_ni->mft_no);
+		BUG();
+	}
+
+	/*
+	 * The extent inode is no longer attached to the base inode so no one
+	 * can get a reference to it any more.
+	 */
+
+	/* Mark the mft record as not in use. */
+	m->flags &= const_cpu_to_le16(~const_le16_to_cpu(MFT_RECORD_IN_USE));
+
+	/* Increment the sequence number, skipping zero, if it is not zero. */
+	old_seq_no = m->sequence_number;
+	seq_no = le16_to_cpu(old_seq_no);
+	if (seq_no == 0xffff)
+		seq_no = 1;
+	else if (seq_no)
+		seq_no++;
+	m->sequence_number = cpu_to_le16(seq_no);
+
+	/*
+	 * Set the ntfs inode dirty and write it out.  We do not need to worry
+	 * about the base inode here since whatever caused the extent mft
+	 * record to be freed is guaranteed to do it already.
+	 */
+	NInoSetDirty(ni);
+	err = write_mft_record(ni, m, 0);
+	if (unlikely(err)) {
+		ntfs_error(vol->sb, "Failed to write mft record 0x%lx, not "
+				"freeing.", mft_no);
+		goto rollback;
+	}
+rollback_error:
+	/* Unmap and throw away the now freed extent inode. */
+	unmap_extent_mft_record(ni);
+	ntfs_clear_extent_inode(ni);
+
+	/* Clear the bit in the $MFT/$BITMAP corresponding to this record. */
+	down_write(&vol->mftbmp_lock);
+	err = ntfs_bitmap_clear_bit(vol->mftbmp_ino, mft_no);
+	up_write(&vol->mftbmp_lock);
+	if (unlikely(err)) {
+		/*
+		 * The extent inode is gone but we failed to deallocate it in
+		 * the mft bitmap.  Just emit a warning and leave the volume
+		 * dirty on umount.
+		 */
+		ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
+		NVolSetErrors(vol);
+	}
+	return 0;
+rollback:
+	/* Rollback what we did... */
+	down(&base_ni->extent_lock);
+	extent_nis = base_ni->ext.extent_ntfs_inos;
+	if (!(base_ni->nr_extents & 3)) {
+		int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
+
+		extent_nis = (ntfs_inode**)kmalloc(new_size, GFP_NOFS);
+		if (unlikely(!extent_nis)) {
+			ntfs_error(vol->sb, "Failed to allocate internal "
+					"buffer during rollback.%s", es);
+			up(&base_ni->extent_lock);
+			NVolSetErrors(vol);
+			goto rollback_error;
+		}
+		if (base_ni->nr_extents) {
+			BUG_ON(!base_ni->ext.extent_ntfs_inos);
+			memcpy(extent_nis, base_ni->ext.extent_ntfs_inos,
+					new_size - 4 * sizeof(ntfs_inode*));
+			kfree(base_ni->ext.extent_ntfs_inos);
+		}
+		base_ni->ext.extent_ntfs_inos = extent_nis;
+	}
+	m->flags |= MFT_RECORD_IN_USE;
+	m->sequence_number = old_seq_no;
+	extent_nis[base_ni->nr_extents++] = ni;
+	up(&base_ni->extent_lock);
+	mark_mft_record_dirty(ni);
+	return err;
+}
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/mft.h b/fs/ntfs/mft.h
new file mode 100644
index 0000000..407de2c
--- /dev/null
+++ b/fs/ntfs/mft.h
@@ -0,0 +1,127 @@
+/*
+ * mft.h - Defines for mft record handling in NTFS Linux kernel driver.
+ *	   Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_MFT_H
+#define _LINUX_NTFS_MFT_H
+
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+
+#include "inode.h"
+
+extern MFT_RECORD *map_mft_record(ntfs_inode *ni);
+extern void unmap_mft_record(ntfs_inode *ni);
+
+extern MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
+		ntfs_inode **ntfs_ino);
+
+static inline void unmap_extent_mft_record(ntfs_inode *ni)
+{
+	unmap_mft_record(ni);
+	return;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * flush_dcache_mft_record_page - flush_dcache_page() for mft records
+ * @ni:		ntfs inode structure of mft record
+ *
+ * Call flush_dcache_page() for the page in which an mft record resides.
+ *
+ * This must be called every time an mft record is modified, just after the
+ * modification.
+ */
+static inline void flush_dcache_mft_record_page(ntfs_inode *ni)
+{
+	flush_dcache_page(ni->page);
+}
+
+extern void __mark_mft_record_dirty(ntfs_inode *ni);
+
+/**
+ * mark_mft_record_dirty - set the mft record and the page containing it dirty
+ * @ni:		ntfs inode describing the mapped mft record
+ *
+ * Set the mapped (extent) mft record of the (base or extent) ntfs inode @ni,
+ * as well as the page containing the mft record, dirty.  Also, mark the base
+ * vfs inode dirty.  This ensures that any changes to the mft record are
+ * written out to disk.
+ *
+ * NOTE:  Do not do anything if the mft record is already marked dirty.
+ */
+static inline void mark_mft_record_dirty(ntfs_inode *ni)
+{
+	if (!NInoTestSetDirty(ni))
+		__mark_mft_record_dirty(ni);
+}
+
+extern int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
+		MFT_RECORD *m, int sync);
+
+extern int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync);
+
+/**
+ * write_mft_record - write out a mapped (extent) mft record
+ * @ni:		ntfs inode describing the mapped (extent) mft record
+ * @m:		mapped (extent) mft record to write
+ * @sync:	if true, wait for i/o completion
+ *
+ * This is just a wrapper for write_mft_record_nolock() (see mft.c), which
+ * locks the page for the duration of the write.  This ensures that there are
+ * no race conditions between writing the mft record via the dirty inode code
+ * paths and via the page cache write back code paths or between writing
+ * neighbouring mft records residing in the same page.
+ *
+ * Locking the page also serializes us against ->readpage() if the page is not
+ * uptodate.
+ *
+ * On success, clean the mft record and return 0.  On error, leave the mft
+ * record dirty and return -errno.  The caller should call make_bad_inode() on
+ * the base inode to ensure no more access happens to this inode.  We do not do
+ * it here as the caller may want to finish writing other extent mft records
+ * first to minimize on-disk metadata inconsistencies.
+ */
+static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync)
+{
+	struct page *page = ni->page;
+	int err;
+
+	BUG_ON(!page);
+	lock_page(page);
+	err = write_mft_record_nolock(ni, m, sync);
+	unlock_page(page);
+	return err;
+}
+
+extern BOOL ntfs_may_write_mft_record(ntfs_volume *vol,
+		const unsigned long mft_no, const MFT_RECORD *m,
+		ntfs_inode **locked_ni);
+
+extern ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
+		ntfs_inode *base_ni, MFT_RECORD **mrec);
+extern int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m);
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_MFT_H */
diff --git a/fs/ntfs/mst.c b/fs/ntfs/mst.c
new file mode 100644
index 0000000..5a858d8
--- /dev/null
+++ b/fs/ntfs/mst.c
@@ -0,0 +1,203 @@
+/*
+ * mst.c - NTFS multi sector transfer protection handling code. Part of the
+ *	   Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "ntfs.h"
+
+/**
+ * post_read_mst_fixup - deprotect multi sector transfer protected data
+ * @b:		pointer to the data to deprotect
+ * @size:	size in bytes of @b
+ *
+ * Perform the necessary post read multi sector transfer fixup and detect the
+ * presence of incomplete multi sector transfers. - In that case, overwrite the
+ * magic of the ntfs record header being processed with "BAAD" (in memory only!)
+ * and abort processing.
+ *
+ * Return 0 on success and -EINVAL on error ("BAAD" magic will be present).
+ *
+ * NOTE: We consider the absence / invalidity of an update sequence array to
+ * mean that the structure is not protected at all and hence doesn't need to
+ * be fixed up. Thus, we return success and not failure in this case. This is
+ * in contrast to pre_write_mst_fixup(), see below.
+ */
+int post_read_mst_fixup(NTFS_RECORD *b, const u32 size)
+{
+	u16 usa_ofs, usa_count, usn;
+	u16 *usa_pos, *data_pos;
+
+	/* Setup the variables. */
+	usa_ofs = le16_to_cpu(b->usa_ofs);
+	/* Decrement usa_count to get number of fixups. */
+	usa_count = le16_to_cpu(b->usa_count) - 1;
+	/* Size and alignment checks. */
+	if ( size & (NTFS_BLOCK_SIZE - 1)	||
+	     usa_ofs & 1			||
+	     usa_ofs + (usa_count * 2) > size	||
+	     (size >> NTFS_BLOCK_SIZE_BITS) != usa_count)
+		return 0;
+	/* Position of usn in update sequence array. */
+	usa_pos = (u16*)b + usa_ofs/sizeof(u16);
+	/*
+	 * The update sequence number which has to be equal to each of the
+	 * u16 values before they are fixed up. Note no need to care for
+	 * endianness since we are comparing and moving data for on disk
+	 * structures which means the data is consistent. - If it is
+	 * consistenty the wrong endianness it doesn't make any difference.
+	 */
+	usn = *usa_pos;
+	/*
+	 * Position in protected data of first u16 that needs fixing up.
+	 */
+	data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1;
+	/*
+	 * Check for incomplete multi sector transfer(s).
+	 */
+	while (usa_count--) {
+		if (*data_pos != usn) {
+			/*
+			 * Incomplete multi sector transfer detected! )-:
+			 * Set the magic to "BAAD" and return failure.
+			 * Note that magic_BAAD is already converted to le32.
+			 */
+			b->magic = magic_BAAD;
+			return -EINVAL;
+		}
+		data_pos += NTFS_BLOCK_SIZE/sizeof(u16);
+	}
+	/* Re-setup the variables. */
+	usa_count = le16_to_cpu(b->usa_count) - 1;
+	data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1;
+	/* Fixup all sectors. */
+	while (usa_count--) {
+		/*
+		 * Increment position in usa and restore original data from
+		 * the usa into the data buffer.
+		 */
+		*data_pos = *(++usa_pos);
+		/* Increment position in data as well. */
+		data_pos += NTFS_BLOCK_SIZE/sizeof(u16);
+	}
+	return 0;
+}
+
+/**
+ * pre_write_mst_fixup - apply multi sector transfer protection
+ * @b:		pointer to the data to protect
+ * @size:	size in bytes of @b
+ *
+ * Perform the necessary pre write multi sector transfer fixup on the data
+ * pointer to by @b of @size.
+ *
+ * Return 0 if fixup applied (success) or -EINVAL if no fixup was performed
+ * (assumed not needed). This is in contrast to post_read_mst_fixup() above.
+ *
+ * NOTE: We consider the absence / invalidity of an update sequence array to
+ * mean that the structure is not subject to protection and hence doesn't need
+ * to be fixed up. This means that you have to create a valid update sequence
+ * array header in the ntfs record before calling this function, otherwise it
+ * will fail (the header needs to contain the position of the update sequence
+ * array together with the number of elements in the array). You also need to
+ * initialise the update sequence number before calling this function
+ * otherwise a random word will be used (whatever was in the record at that
+ * position at that time).
+ */
+int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size)
+{
+	le16 *usa_pos, *data_pos;
+	u16 usa_ofs, usa_count, usn;
+	le16 le_usn;
+
+	/* Sanity check + only fixup if it makes sense. */
+	if (!b || ntfs_is_baad_record(b->magic) ||
+			ntfs_is_hole_record(b->magic))
+		return -EINVAL;
+	/* Setup the variables. */
+	usa_ofs = le16_to_cpu(b->usa_ofs);
+	/* Decrement usa_count to get number of fixups. */
+	usa_count = le16_to_cpu(b->usa_count) - 1;
+	/* Size and alignment checks. */
+	if ( size & (NTFS_BLOCK_SIZE - 1)	||
+	     usa_ofs & 1			||
+	     usa_ofs + (usa_count * 2) > size	||
+	     (size >> NTFS_BLOCK_SIZE_BITS) != usa_count)
+		return -EINVAL;
+	/* Position of usn in update sequence array. */
+	usa_pos = (le16*)((u8*)b + usa_ofs);
+	/*
+	 * Cyclically increment the update sequence number
+	 * (skipping 0 and -1, i.e. 0xffff).
+	 */
+	usn = le16_to_cpup(usa_pos) + 1;
+	if (usn == 0xffff || !usn)
+		usn = 1;
+	le_usn = cpu_to_le16(usn);
+	*usa_pos = le_usn;
+	/* Position in data of first u16 that needs fixing up. */
+	data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1;
+	/* Fixup all sectors. */
+	while (usa_count--) {
+		/*
+		 * Increment the position in the usa and save the
+		 * original data from the data buffer into the usa.
+		 */
+		*(++usa_pos) = *data_pos;
+		/* Apply fixup to data. */
+		*data_pos = le_usn;
+		/* Increment position in data as well. */
+		data_pos += NTFS_BLOCK_SIZE/sizeof(le16);
+	}
+	return 0;
+}
+
+/**
+ * post_write_mst_fixup - fast deprotect multi sector transfer protected data
+ * @b:		pointer to the data to deprotect
+ *
+ * Perform the necessary post write multi sector transfer fixup, not checking
+ * for any errors, because we assume we have just used pre_write_mst_fixup(),
+ * thus the data will be fine or we would never have gotten here.
+ */
+void post_write_mst_fixup(NTFS_RECORD *b)
+{
+	le16 *usa_pos, *data_pos;
+
+	u16 usa_ofs = le16_to_cpu(b->usa_ofs);
+	u16 usa_count = le16_to_cpu(b->usa_count) - 1;
+
+	/* Position of usn in update sequence array. */
+	usa_pos = (le16*)b + usa_ofs/sizeof(le16);
+
+	/* Position in protected data of first u16 that needs fixing up. */
+	data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1;
+
+	/* Fixup all sectors. */
+	while (usa_count--) {
+		/*
+		 * Increment position in usa and restore original data from
+		 * the usa into the data buffer.
+		 */
+		*data_pos = *(++usa_pos);
+
+		/* Increment position in data as well. */
+		data_pos += NTFS_BLOCK_SIZE/sizeof(le16);
+	}
+}
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
new file mode 100644
index 0000000..7c7e13b
--- /dev/null
+++ b/fs/ntfs/namei.c
@@ -0,0 +1,498 @@
+/*
+ * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS
+ *	     project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/dcache.h>
+#include <linux/security.h>
+
+#include "attrib.h"
+#include "debug.h"
+#include "dir.h"
+#include "mft.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_lookup - find the inode represented by a dentry in a directory inode
+ * @dir_ino:	directory inode in which to look for the inode
+ * @dent:	dentry representing the inode to look for
+ * @nd:		lookup nameidata
+ *
+ * In short, ntfs_lookup() looks for the inode represented by the dentry @dent
+ * in the directory inode @dir_ino and if found attaches the inode to the
+ * dentry @dent.
+ *
+ * In more detail, the dentry @dent specifies which inode to look for by
+ * supplying the name of the inode in @dent->d_name.name. ntfs_lookup()
+ * converts the name to Unicode and walks the contents of the directory inode
+ * @dir_ino looking for the converted Unicode name. If the name is found in the
+ * directory, the corresponding inode is loaded by calling ntfs_iget() on its
+ * inode number and the inode is associated with the dentry @dent via a call to
+ * d_splice_alias().
+ *
+ * If the name is not found in the directory, a NULL inode is inserted into the
+ * dentry @dent via a call to d_add(). The dentry is then termed a negative
+ * dentry.
+ *
+ * Only if an actual error occurs, do we return an error via ERR_PTR().
+ *
+ * In order to handle the case insensitivity issues of NTFS with regards to the
+ * dcache and the dcache requiring only one dentry per directory, we deal with
+ * dentry aliases that only differ in case in ->ntfs_lookup() while maintaining
+ * a case sensitive dcache. This means that we get the full benefit of dcache
+ * speed when the file/directory is looked up with the same case as returned by
+ * ->ntfs_readdir() but that a lookup for any other case (or for the short file
+ * name) will not find anything in dcache and will enter ->ntfs_lookup()
+ * instead, where we search the directory for a fully matching file name
+ * (including case) and if that is not found, we search for a file name that
+ * matches with different case and if that has non-POSIX semantics we return
+ * that. We actually do only one search (case sensitive) and keep tabs on
+ * whether we have found a case insensitive match in the process.
+ *
+ * To simplify matters for us, we do not treat the short vs long filenames as
+ * two hard links but instead if the lookup matches a short filename, we
+ * return the dentry for the corresponding long filename instead.
+ *
+ * There are three cases we need to distinguish here:
+ *
+ * 1) @dent perfectly matches (i.e. including case) a directory entry with a
+ *    file name in the WIN32 or POSIX namespaces. In this case
+ *    ntfs_lookup_inode_by_name() will return with name set to NULL and we
+ *    just d_splice_alias() @dent.
+ * 2) @dent matches (not including case) a directory entry with a file name in
+ *    the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return
+ *    with name set to point to a kmalloc()ed ntfs_name structure containing
+ *    the properly cased little endian Unicode name. We convert the name to the
+ *    current NLS code page, search if a dentry with this name already exists
+ *    and if so return that instead of @dent.  At this point things are
+ *    complicated by the possibility of 'disconnected' dentries due to NFS
+ *    which we deal with appropriately (see the code comments).  The VFS will
+ *    then destroy the old @dent and use the one we returned.  If a dentry is
+ *    not found, we allocate a new one, d_splice_alias() it, and return it as
+ *    above.
+ * 3) @dent matches either perfectly or not (i.e. we don't care about case) a
+ *    directory entry with a file name in the DOS namespace. In this case
+ *    ntfs_lookup_inode_by_name() will return with name set to point to a
+ *    kmalloc()ed ntfs_name structure containing the mft reference (cpu endian)
+ *    of the inode. We use the mft reference to read the inode and to find the
+ *    file name in the WIN32 namespace corresponding to the matched short file
+ *    name. We then convert the name to the current NLS code page, and proceed
+ *    searching for a dentry with this name, etc, as in case 2), above.
+ *
+ * Locking: Caller must hold i_sem on the directory.
+ */
+static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
+		struct nameidata *nd)
+{
+	ntfs_volume *vol = NTFS_SB(dir_ino->i_sb);
+	struct inode *dent_inode;
+	ntfschar *uname;
+	ntfs_name *name = NULL;
+	MFT_REF mref;
+	unsigned long dent_ino;
+	int uname_len;
+
+	ntfs_debug("Looking up %s in directory inode 0x%lx.",
+			dent->d_name.name, dir_ino->i_ino);
+	/* Convert the name of the dentry to Unicode. */
+	uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len,
+			&uname);
+	if (uname_len < 0) {
+		ntfs_error(vol->sb, "Failed to convert name to Unicode.");
+		return ERR_PTR(uname_len);
+	}
+	mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len,
+			&name);
+	kmem_cache_free(ntfs_name_cache, uname);
+	if (!IS_ERR_MREF(mref)) {
+		dent_ino = MREF(mref);
+		ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
+		dent_inode = ntfs_iget(vol->sb, dent_ino);
+		if (likely(!IS_ERR(dent_inode))) {
+			/* Consistency check. */
+			if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
+					NTFS_I(dent_inode)->seq_no ||
+					dent_ino == FILE_MFT) {
+				/* Perfect WIN32/POSIX match. -- Case 1. */
+				if (!name) {
+					ntfs_debug("Done.  (Case 1.)");
+					return d_splice_alias(dent_inode, dent);
+				}
+				/*
+				 * We are too indented.  Handle imperfect
+				 * matches and short file names further below.
+				 */
+				goto handle_name;
+			}
+			ntfs_error(vol->sb, "Found stale reference to inode "
+					"0x%lx (reference sequence number = "
+					"0x%x, inode sequence number = 0x%x), "
+					"returning -EIO. Run chkdsk.",
+					dent_ino, MSEQNO(mref),
+					NTFS_I(dent_inode)->seq_no);
+			iput(dent_inode);
+			dent_inode = ERR_PTR(-EIO);
+		} else
+			ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with "
+					"error code %li.", dent_ino,
+					PTR_ERR(dent_inode));
+		if (name)
+			kfree(name);
+		/* Return the error code. */
+		return (struct dentry *)dent_inode;
+	}
+	/* It is guaranteed that name is no longer allocated at this point. */
+	if (MREF_ERR(mref) == -ENOENT) {
+		ntfs_debug("Entry was not found, adding negative dentry.");
+		/* The dcache will handle negative entries. */
+		d_add(dent, NULL);
+		ntfs_debug("Done.");
+		return NULL;
+	}
+	ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error "
+			"code %i.", -MREF_ERR(mref));
+	return ERR_PTR(MREF_ERR(mref));
+
+	// TODO: Consider moving this lot to a separate function! (AIA)
+handle_name:
+   {
+	struct dentry *real_dent, *new_dent;
+	MFT_RECORD *m;
+	ntfs_attr_search_ctx *ctx;
+	ntfs_inode *ni = NTFS_I(dent_inode);
+	int err;
+	struct qstr nls_name;
+
+	nls_name.name = NULL;
+	if (name->type != FILE_NAME_DOS) {			/* Case 2. */
+		ntfs_debug("Case 2.");
+		nls_name.len = (unsigned)ntfs_ucstonls(vol,
+				(ntfschar*)&name->name, name->len,
+				(unsigned char**)&nls_name.name, 0);
+		kfree(name);
+	} else /* if (name->type == FILE_NAME_DOS) */ {		/* Case 3. */
+		FILE_NAME_ATTR *fn;
+
+		ntfs_debug("Case 3.");
+		kfree(name);
+
+		/* Find the WIN32 name corresponding to the matched DOS name. */
+		ni = NTFS_I(dent_inode);
+		m = map_mft_record(ni);
+		if (IS_ERR(m)) {
+			err = PTR_ERR(m);
+			m = NULL;
+			ctx = NULL;
+			goto err_out;
+		}
+		ctx = ntfs_attr_get_search_ctx(ni, m);
+		if (unlikely(!ctx)) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+		do {
+			ATTR_RECORD *a;
+			u32 val_len;
+
+			err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0,
+					NULL, 0, ctx);
+			if (unlikely(err)) {
+				ntfs_error(vol->sb, "Inode corrupt: No WIN32 "
+						"namespace counterpart to DOS "
+						"file name. Run chkdsk.");
+				if (err == -ENOENT)
+					err = -EIO;
+				goto err_out;
+			}
+			/* Consistency checks. */
+			a = ctx->attr;
+			if (a->non_resident || a->flags)
+				goto eio_err_out;
+			val_len = le32_to_cpu(a->data.resident.value_length);
+			if (le16_to_cpu(a->data.resident.value_offset) +
+					val_len > le32_to_cpu(a->length))
+				goto eio_err_out;
+			fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu(
+					ctx->attr->data.resident.value_offset));
+			if ((u32)(fn->file_name_length * sizeof(ntfschar) +
+					sizeof(FILE_NAME_ATTR)) > val_len)
+				goto eio_err_out;
+		} while (fn->file_name_type != FILE_NAME_WIN32);
+
+		/* Convert the found WIN32 name to current NLS code page. */
+		nls_name.len = (unsigned)ntfs_ucstonls(vol,
+				(ntfschar*)&fn->file_name, fn->file_name_length,
+				(unsigned char**)&nls_name.name, 0);
+
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(ni);
+	}
+	m = NULL;
+	ctx = NULL;
+
+	/* Check if a conversion error occurred. */
+	if ((signed)nls_name.len < 0) {
+		err = (signed)nls_name.len;
+		goto err_out;
+	}
+	nls_name.hash = full_name_hash(nls_name.name, nls_name.len);
+
+	/*
+	 * Note: No need for dent->d_lock lock as i_sem is held on the
+	 * parent inode.
+	 */
+
+	/* Does a dentry matching the nls_name exist already? */
+	real_dent = d_lookup(dent->d_parent, &nls_name);
+	/* If not, create it now. */
+	if (!real_dent) {
+		real_dent = d_alloc(dent->d_parent, &nls_name);
+		kfree(nls_name.name);
+		if (!real_dent) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+		new_dent = d_splice_alias(dent_inode, real_dent);
+		if (new_dent)
+			dput(real_dent);
+		else
+			new_dent = real_dent;
+		ntfs_debug("Done.  (Created new dentry.)");
+		return new_dent;
+	}
+	kfree(nls_name.name);
+	/* Matching dentry exists, check if it is negative. */
+	if (real_dent->d_inode) {
+		if (unlikely(real_dent->d_inode != dent_inode)) {
+			/* This can happen because bad inodes are unhashed. */
+			BUG_ON(!is_bad_inode(dent_inode));
+			BUG_ON(!is_bad_inode(real_dent->d_inode));
+		}
+		/*
+		 * Already have the inode and the dentry attached, decrement
+		 * the reference count to balance the ntfs_iget() we did
+		 * earlier on.  We found the dentry using d_lookup() so it
+		 * cannot be disconnected and thus we do not need to worry
+		 * about any NFS/disconnectedness issues here.
+		 */
+		iput(dent_inode);
+		ntfs_debug("Done.  (Already had inode and dentry.)");
+		return real_dent;
+	}
+	/*
+	 * Negative dentry: instantiate it unless the inode is a directory and
+	 * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
+	 * in which case d_move() that in place of the found dentry.
+	 */
+	if (!S_ISDIR(dent_inode->i_mode)) {
+		/* Not a directory; everything is easy. */
+		d_instantiate(real_dent, dent_inode);
+		ntfs_debug("Done.  (Already had negative file dentry.)");
+		return real_dent;
+	}
+	spin_lock(&dcache_lock);
+	if (list_empty(&dent_inode->i_dentry)) {
+		/*
+		 * Directory without a 'disconnected' dentry; we need to do
+		 * d_instantiate() by hand because it takes dcache_lock which
+		 * we already hold.
+		 */
+		list_add(&real_dent->d_alias, &dent_inode->i_dentry);
+		real_dent->d_inode = dent_inode;
+		spin_unlock(&dcache_lock);
+		security_d_instantiate(real_dent, dent_inode);
+		ntfs_debug("Done.  (Already had negative directory dentry.)");
+		return real_dent;
+	}
+	/*
+	 * Directory with a 'disconnected' dentry; get a reference to the
+	 * 'disconnected' dentry.
+	 */
+	new_dent = list_entry(dent_inode->i_dentry.next, struct dentry,
+			d_alias);
+	dget_locked(new_dent);
+	spin_unlock(&dcache_lock);
+	/* Do security vodoo. */
+	security_d_instantiate(real_dent, dent_inode);
+	/* Move new_dent in place of real_dent. */
+	d_move(new_dent, real_dent);
+	/* Balance the ntfs_iget() we did above. */
+	iput(dent_inode);
+	/* Throw away real_dent. */
+	dput(real_dent);
+	/* Use new_dent as the actual dentry. */
+	ntfs_debug("Done.  (Already had negative, disconnected directory "
+			"dentry.)");
+	return new_dent;
+
+eio_err_out:
+	ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk.");
+	err = -EIO;
+err_out:
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	if (m)
+		unmap_mft_record(ni);
+	iput(dent_inode);
+	ntfs_error(vol->sb, "Failed, returning error code %i.", err);
+	return ERR_PTR(err);
+   }
+}
+
+/**
+ * Inode operations for directories.
+ */
+struct inode_operations ntfs_dir_inode_ops = {
+	.lookup	= ntfs_lookup,	/* VFS: Lookup directory. */
+};
+
+/**
+ * ntfs_get_parent - find the dentry of the parent of a given directory dentry
+ * @child_dent:		dentry of the directory whose parent directory to find
+ *
+ * Find the dentry for the parent directory of the directory specified by the
+ * dentry @child_dent.  This function is called from
+ * fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the
+ * default ->decode_fh() which is export_decode_fh() in the same file.
+ *
+ * The code is based on the ext3 ->get_parent() implementation found in
+ * fs/ext3/namei.c::ext3_get_parent().
+ *
+ * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_sem down.
+ *
+ * Return the dentry of the parent directory on success or the error code on
+ * error (IS_ERR() is true).
+ */
+struct dentry *ntfs_get_parent(struct dentry *child_dent)
+{
+	struct inode *vi = child_dent->d_inode;
+	ntfs_inode *ni = NTFS_I(vi);
+	MFT_RECORD *mrec;
+	ntfs_attr_search_ctx *ctx;
+	ATTR_RECORD *attr;
+	FILE_NAME_ATTR *fn;
+	struct inode *parent_vi;
+	struct dentry *parent_dent;
+	unsigned long parent_ino;
+	int err;
+
+	ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
+	/* Get the mft record of the inode belonging to the child dentry. */
+	mrec = map_mft_record(ni);
+	if (IS_ERR(mrec))
+		return (struct dentry *)mrec;
+	/* Find the first file name attribute in the mft record. */
+	ctx = ntfs_attr_get_search_ctx(ni, mrec);
+	if (unlikely(!ctx)) {
+		unmap_mft_record(ni);
+		return ERR_PTR(-ENOMEM);
+	}
+try_next:
+	err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL,
+			0, ctx);
+	if (unlikely(err)) {
+		ntfs_attr_put_search_ctx(ctx);
+		unmap_mft_record(ni);
+		if (err == -ENOENT)
+			ntfs_error(vi->i_sb, "Inode 0x%lx does not have a "
+					"file name attribute.  Run chkdsk.",
+					vi->i_ino);
+		return ERR_PTR(err);
+	}
+	attr = ctx->attr;
+	if (unlikely(attr->non_resident))
+		goto try_next;
+	fn = (FILE_NAME_ATTR *)((u8 *)attr +
+			le16_to_cpu(attr->data.resident.value_offset));
+	if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) >
+			(u8*)attr + le32_to_cpu(attr->length)))
+		goto try_next;
+	/* Get the inode number of the parent directory. */
+	parent_ino = MREF_LE(fn->parent_directory);
+	/* Release the search context and the mft record of the child. */
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(ni);
+	/* Get the inode of the parent directory. */
+	parent_vi = ntfs_iget(vi->i_sb, parent_ino);
+	if (IS_ERR(parent_vi) || unlikely(is_bad_inode(parent_vi))) {
+		if (!IS_ERR(parent_vi))
+			iput(parent_vi);
+		ntfs_error(vi->i_sb, "Failed to get parent directory inode "
+				"0x%lx of child inode 0x%lx.", parent_ino,
+				vi->i_ino);
+		return ERR_PTR(-EACCES);
+	}
+	/* Finally get a dentry for the parent directory and return it. */
+	parent_dent = d_alloc_anon(parent_vi);
+	if (unlikely(!parent_dent)) {
+		iput(parent_vi);
+		return ERR_PTR(-ENOMEM);
+	}
+	ntfs_debug("Done for inode 0x%lx.", vi->i_ino);
+	return parent_dent;
+}
+
+/**
+ * ntfs_get_dentry - find a dentry for the inode from a file handle sub-fragment
+ * @sb:		super block identifying the mounted ntfs volume
+ * @fh:		the file handle sub-fragment
+ *
+ * Find a dentry for the inode given a file handle sub-fragment.  This function
+ * is called from fs/exportfs/expfs.c::find_exported_dentry() which in turn is
+ * called from the default ->decode_fh() which is export_decode_fh() in the
+ * same file.  The code is closely based on the default ->get_dentry() helper
+ * fs/exportfs/expfs.c::get_object().
+ *
+ * The @fh contains two 32-bit unsigned values, the first one is the inode
+ * number and the second one is the inode generation.
+ *
+ * Return the dentry on success or the error code on error (IS_ERR() is true).
+ */
+struct dentry *ntfs_get_dentry(struct super_block *sb, void *fh)
+{
+	struct inode *vi;
+	struct dentry *dent;
+	unsigned long ino = ((u32 *)fh)[0];
+	u32 gen = ((u32 *)fh)[1];
+
+	ntfs_debug("Entering for inode 0x%lx, generation 0x%x.", ino, gen);
+	vi = ntfs_iget(sb, ino);
+	if (IS_ERR(vi)) {
+		ntfs_error(sb, "Failed to get inode 0x%lx.", ino);
+		return (struct dentry *)vi;
+	}
+	if (unlikely(is_bad_inode(vi) || vi->i_generation != gen)) {
+		/* We didn't find the right inode. */
+		ntfs_error(sb, "Inode 0x%lx, bad count: %d %d or version 0x%x "
+				"0x%x.", vi->i_ino, vi->i_nlink,
+				atomic_read(&vi->i_count), vi->i_generation,
+				gen);
+		iput(vi);
+		return ERR_PTR(-ESTALE);
+	}
+	/* Now find a dentry.  If possible, get a well-connected one. */
+	dent = d_alloc_anon(vi);
+	if (unlikely(!dent)) {
+		iput(vi);
+		return ERR_PTR(-ENOMEM);
+	}
+	ntfs_debug("Done for inode 0x%lx, generation 0x%x.", ino, gen);
+	return dent;
+}
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
new file mode 100644
index 0000000..720ffb7
--- /dev/null
+++ b/fs/ntfs/ntfs.h
@@ -0,0 +1,129 @@
+/*
+ * ntfs.h - Defines for NTFS Linux kernel driver. Part of the Linux-NTFS
+ *	    project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (C) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_H
+#define _LINUX_NTFS_H
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/nls.h>
+#include <linux/smp.h>
+
+#include "types.h"
+#include "volume.h"
+#include "layout.h"
+
+typedef enum {
+	NTFS_BLOCK_SIZE		= 512,
+	NTFS_BLOCK_SIZE_BITS	= 9,
+	NTFS_SB_MAGIC		= 0x5346544e,	/* 'NTFS' */
+	NTFS_MAX_NAME_LEN	= 255,
+} NTFS_CONSTANTS;
+
+/* Global variables. */
+
+/* Slab caches (from super.c). */
+extern kmem_cache_t *ntfs_name_cache;
+extern kmem_cache_t *ntfs_inode_cache;
+extern kmem_cache_t *ntfs_big_inode_cache;
+extern kmem_cache_t *ntfs_attr_ctx_cache;
+extern kmem_cache_t *ntfs_index_ctx_cache;
+
+/* The various operations structs defined throughout the driver files. */
+extern struct address_space_operations ntfs_aops;
+extern struct address_space_operations ntfs_mst_aops;
+
+extern struct  file_operations ntfs_file_ops;
+extern struct inode_operations ntfs_file_inode_ops;
+
+extern struct  file_operations ntfs_dir_ops;
+extern struct inode_operations ntfs_dir_inode_ops;
+
+extern struct  file_operations ntfs_empty_file_ops;
+extern struct inode_operations ntfs_empty_inode_ops;
+
+/**
+ * NTFS_SB - return the ntfs volume given a vfs super block
+ * @sb:		VFS super block
+ *
+ * NTFS_SB() returns the ntfs volume associated with the VFS super block @sb.
+ */
+static inline ntfs_volume *NTFS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/* Declarations of functions and global variables. */
+
+/* From fs/ntfs/compress.c */
+extern int ntfs_read_compressed_block(struct page *page);
+extern int allocate_compression_buffers(void);
+extern void free_compression_buffers(void);
+
+/* From fs/ntfs/super.c */
+#define default_upcase_len 0x10000
+extern struct semaphore ntfs_lock;
+
+typedef struct {
+	int val;
+	char *str;
+} option_t;
+extern const option_t on_errors_arr[];
+
+/* From fs/ntfs/mst.c */
+extern int post_read_mst_fixup(NTFS_RECORD *b, const u32 size);
+extern int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size);
+extern void post_write_mst_fixup(NTFS_RECORD *b);
+
+/* From fs/ntfs/unistr.c */
+extern BOOL ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
+		const ntfschar *s2, size_t s2_len,
+		const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_size);
+extern int ntfs_collate_names(const ntfschar *name1, const u32 name1_len,
+		const ntfschar *name2, const u32 name2_len,
+		const int err_val, const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_len);
+extern int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n);
+extern int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n,
+		const ntfschar *upcase, const u32 upcase_size);
+extern void ntfs_upcase_name(ntfschar *name, u32 name_len,
+		const ntfschar *upcase, const u32 upcase_len);
+extern void ntfs_file_upcase_value(FILE_NAME_ATTR *file_name_attr,
+		const ntfschar *upcase, const u32 upcase_len);
+extern int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1,
+		FILE_NAME_ATTR *file_name_attr2,
+		const int err_val, const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_len);
+extern int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
+		const int ins_len, ntfschar **outs);
+extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
+		const int ins_len, unsigned char **outs, int outs_len);
+
+/* From fs/ntfs/upcase.c */
+extern ntfschar *generate_default_upcase(void);
+
+#endif /* _LINUX_NTFS_H */
diff --git a/fs/ntfs/quota.c b/fs/ntfs/quota.c
new file mode 100644
index 0000000..833df2a
--- /dev/null
+++ b/fs/ntfs/quota.c
@@ -0,0 +1,117 @@
+/*
+ * quota.c - NTFS kernel quota ($Quota) handling.  Part of the Linux-NTFS
+ *	     project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef NTFS_RW
+
+#include "index.h"
+#include "quota.h"
+#include "debug.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume
+ * @vol:	ntfs volume on which to mark the quotas out of date
+ *
+ * Mark the quotas out of date on the ntfs volume @vol and return TRUE on
+ * success and FALSE on error.
+ */
+BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
+{
+	ntfs_index_context *ictx;
+	QUOTA_CONTROL_ENTRY *qce;
+	const le32 qid = QUOTA_DEFAULTS_ID;
+	int err;
+
+	ntfs_debug("Entering.");
+	if (NVolQuotaOutOfDate(vol))
+		goto done;
+	if (!vol->quota_ino || !vol->quota_q_ino) {
+		ntfs_error(vol->sb, "Quota inodes are not open.");
+		return FALSE;
+	}
+	down(&vol->quota_q_ino->i_sem);
+	ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
+	if (!ictx) {
+		ntfs_error(vol->sb, "Failed to get index context.");
+		goto err_out;
+	}
+	err = ntfs_index_lookup(&qid, sizeof(qid), ictx);
+	if (err) {
+		if (err == -ENOENT)
+			ntfs_error(vol->sb, "Quota defaults entry is not "
+					"present.");
+		else
+			ntfs_error(vol->sb, "Lookup of quota defaults entry "
+					"failed.");
+		goto err_out;
+	}
+	if (ictx->data_len < offsetof(QUOTA_CONTROL_ENTRY, sid)) {
+		ntfs_error(vol->sb, "Quota defaults entry size is invalid.  "
+				"Run chkdsk.");
+		goto err_out;
+	}
+	qce = (QUOTA_CONTROL_ENTRY*)ictx->data;
+	if (le32_to_cpu(qce->version) != QUOTA_VERSION) {
+		ntfs_error(vol->sb, "Quota defaults entry version 0x%x is not "
+				"supported.", le32_to_cpu(qce->version));
+		goto err_out;
+	}
+	ntfs_debug("Quota defaults flags = 0x%x.", le32_to_cpu(qce->flags));
+	/* If quotas are already marked out of date, no need to do anything. */
+	if (qce->flags & QUOTA_FLAG_OUT_OF_DATE)
+		goto set_done;
+	/*
+	 * If quota tracking is neither requested, nor enabled and there are no
+	 * pending deletes, no need to mark the quotas out of date.
+	 */
+	if (!(qce->flags & (QUOTA_FLAG_TRACKING_ENABLED |
+			QUOTA_FLAG_TRACKING_REQUESTED |
+			QUOTA_FLAG_PENDING_DELETES)))
+		goto set_done;
+	/*
+	 * Set the QUOTA_FLAG_OUT_OF_DATE bit thus marking quotas out of date.
+	 * This is verified on WinXP to be sufficient to cause windows to
+	 * rescan the volume on boot and update all quota entries.
+	 */
+	qce->flags |= QUOTA_FLAG_OUT_OF_DATE;
+	/* Ensure the modified flags are written to disk. */
+	ntfs_index_entry_flush_dcache_page(ictx);
+	ntfs_index_entry_mark_dirty(ictx);
+set_done:
+	ntfs_index_ctx_put(ictx);
+	up(&vol->quota_q_ino->i_sem);
+	/*
+	 * We set the flag so we do not try to mark the quotas out of date
+	 * again on remount.
+	 */
+	NVolSetQuotaOutOfDate(vol);
+done:
+	ntfs_debug("Done.");
+	return TRUE;
+err_out:
+	if (ictx)
+		ntfs_index_ctx_put(ictx);
+	up(&vol->quota_q_ino->i_sem);
+	return FALSE;
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/quota.h b/fs/ntfs/quota.h
new file mode 100644
index 0000000..40e4763
--- /dev/null
+++ b/fs/ntfs/quota.h
@@ -0,0 +1,35 @@
+/*
+ * quota.h - Defines for NTFS kernel quota ($Quota) handling.  Part of the
+ *	     Linux-NTFS project.
+ *
+ * Copyright (c) 2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_QUOTA_H
+#define _LINUX_NTFS_QUOTA_H
+
+#ifdef NTFS_RW
+
+#include "types.h"
+#include "volume.h"
+
+extern BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol);
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_QUOTA_H */
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
new file mode 100644
index 0000000..8438fb1
--- /dev/null
+++ b/fs/ntfs/runlist.c
@@ -0,0 +1,1438 @@
+/**
+ * runlist.c - NTFS runlist handling code.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "debug.h"
+#include "dir.h"
+#include "endian.h"
+#include "malloc.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_rl_mm - runlist memmove
+ *
+ * It is up to the caller to serialize access to the runlist @base.
+ */
+static inline void ntfs_rl_mm(runlist_element *base, int dst, int src,
+		int size)
+{
+	if (likely((dst != src) && (size > 0)))
+		memmove(base + dst, base + src, size * sizeof (*base));
+}
+
+/**
+ * ntfs_rl_mc - runlist memory copy
+ *
+ * It is up to the caller to serialize access to the runlists @dstbase and
+ * @srcbase.
+ */
+static inline void ntfs_rl_mc(runlist_element *dstbase, int dst,
+		runlist_element *srcbase, int src, int size)
+{
+	if (likely(size > 0))
+		memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
+}
+
+/**
+ * ntfs_rl_realloc - Reallocate memory for runlists
+ * @rl:		original runlist
+ * @old_size:	number of runlist elements in the original runlist @rl
+ * @new_size:	number of runlist elements we need space for
+ *
+ * As the runlists grow, more memory will be required.  To prevent the
+ * kernel having to allocate and reallocate large numbers of small bits of
+ * memory, this function returns and entire page of memory.
+ *
+ * It is up to the caller to serialize access to the runlist @rl.
+ *
+ * N.B.  If the new allocation doesn't require a different number of pages in
+ *       memory, the function will return the original pointer.
+ *
+ * On success, return a pointer to the newly allocated, or recycled, memory.
+ * On error, return -errno. The following error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
+		int old_size, int new_size)
+{
+	runlist_element *new_rl;
+
+	old_size = PAGE_ALIGN(old_size * sizeof(*rl));
+	new_size = PAGE_ALIGN(new_size * sizeof(*rl));
+	if (old_size == new_size)
+		return rl;
+
+	new_rl = ntfs_malloc_nofs(new_size);
+	if (unlikely(!new_rl))
+		return ERR_PTR(-ENOMEM);
+
+	if (likely(rl != NULL)) {
+		if (unlikely(old_size > new_size))
+			old_size = new_size;
+		memcpy(new_rl, rl, old_size);
+		ntfs_free(rl);
+	}
+	return new_rl;
+}
+
+/**
+ * ntfs_are_rl_mergeable - test if two runlists can be joined together
+ * @dst:	original runlist
+ * @src:	new runlist to test for mergeability with @dst
+ *
+ * Test if two runlists can be joined together. For this, their VCNs and LCNs
+ * must be adjacent.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * Return: TRUE   Success, the runlists can be merged.
+ *	   FALSE  Failure, the runlists cannot be merged.
+ */
+static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst,
+		runlist_element *src)
+{
+	BUG_ON(!dst);
+	BUG_ON(!src);
+
+	if ((dst->lcn < 0) || (src->lcn < 0))     /* Are we merging holes? */
+		return FALSE;
+	if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */
+		return FALSE;
+	if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */
+		return FALSE;
+
+	return TRUE;
+}
+
+/**
+ * __ntfs_rl_merge - merge two runlists without testing if they can be merged
+ * @dst:	original, destination runlist
+ * @src:	new runlist to merge with @dst
+ *
+ * Merge the two runlists, writing into the destination runlist @dst. The
+ * caller must make sure the runlists can be merged or this will corrupt the
+ * destination runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ */
+static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
+{
+	dst->length += src->length;
+}
+
+/**
+ * ntfs_rl_append - append a runlist after a given element
+ * @dst:	original runlist to be worked on
+ * @dsize:	number of elements in @dst (including end marker)
+ * @src:	runlist to be inserted into @dst
+ * @ssize:	number of elements in @src (excluding end marker)
+ * @loc:	append the new runlist @src after this element in @dst
+ *
+ * Append the runlist @src after element @loc in @dst.  Merge the right end of
+ * the new runlist, if necessary. Adjust the size of the hole before the
+ * appended runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_append(runlist_element *dst,
+		int dsize, runlist_element *src, int ssize, int loc)
+{
+	BOOL right;
+	int magic;
+
+	BUG_ON(!dst);
+	BUG_ON(!src);
+
+	/* First, check if the right hand end needs merging. */
+	right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+
+	/* Space required: @dst size + @src size, less one if we merged. */
+	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
+	if (IS_ERR(dst))
+		return dst;
+	/*
+	 * We are guaranteed to succeed from here so can start modifying the
+	 * original runlists.
+	 */
+
+	/* First, merge the right hand end, if necessary. */
+	if (right)
+		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
+
+	magic = loc + ssize;
+
+	/* Move the tail of @dst out of the way, then copy in @src. */
+	ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right);
+	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
+
+	/* Adjust the size of the preceding hole. */
+	dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
+
+	/* We may have changed the length of the file, so fix the end marker */
+	if (dst[magic + 1].lcn == LCN_ENOENT)
+		dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length;
+
+	return dst;
+}
+
+/**
+ * ntfs_rl_insert - insert a runlist into another
+ * @dst:	original runlist to be worked on
+ * @dsize:	number of elements in @dst (including end marker)
+ * @src:	new runlist to be inserted
+ * @ssize:	number of elements in @src (excluding end marker)
+ * @loc:	insert the new runlist @src before this element in @dst
+ *
+ * Insert the runlist @src before element @loc in the runlist @dst. Merge the
+ * left end of the new runlist, if necessary. Adjust the size of the hole
+ * after the inserted runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
+		int dsize, runlist_element *src, int ssize, int loc)
+{
+	BOOL left = FALSE;
+	BOOL disc = FALSE;	/* Discontinuity */
+	BOOL hole = FALSE;	/* Following a hole */
+	int magic;
+
+	BUG_ON(!dst);
+	BUG_ON(!src);
+
+	/* disc => Discontinuity between the end of @dst and the start of @src.
+	 *	   This means we might need to insert a hole.
+	 * hole => @dst ends with a hole or an unmapped region which we can
+	 *	   extend to match the discontinuity. */
+	if (loc == 0)
+		disc = (src[0].vcn > 0);
+	else {
+		s64 merged_length;
+
+		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
+
+		merged_length = dst[loc - 1].length;
+		if (left)
+			merged_length += src->length;
+
+		disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
+		if (disc)
+			hole = (dst[loc - 1].lcn == LCN_HOLE);
+	}
+
+	/* Space required: @dst size + @src size, less one if we merged, plus
+	 * one if there was a discontinuity, less one for a trailing hole. */
+	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole);
+	if (IS_ERR(dst))
+		return dst;
+	/*
+	 * We are guaranteed to succeed from here so can start modifying the
+	 * original runlist.
+	 */
+
+	if (left)
+		__ntfs_rl_merge(dst + loc - 1, src);
+
+	magic = loc + ssize - left + disc - hole;
+
+	/* Move the tail of @dst out of the way, then copy in @src. */
+	ntfs_rl_mm(dst, magic, loc, dsize - loc);
+	ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left);
+
+	/* Adjust the VCN of the last run ... */
+	if (dst[magic].lcn <= LCN_HOLE)
+		dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+	/* ... and the length. */
+	if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED)
+		dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn;
+
+	/* Writing beyond the end of the file and there's a discontinuity. */
+	if (disc) {
+		if (hole)
+			dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn;
+		else {
+			if (loc > 0) {
+				dst[loc].vcn = dst[loc - 1].vcn +
+						dst[loc - 1].length;
+				dst[loc].length = dst[loc + 1].vcn -
+						dst[loc].vcn;
+			} else {
+				dst[loc].vcn = 0;
+				dst[loc].length = dst[loc + 1].vcn;
+			}
+			dst[loc].lcn = LCN_RL_NOT_MAPPED;
+		}
+
+		magic += hole;
+
+		if (dst[magic].lcn == LCN_ENOENT)
+			dst[magic].vcn = dst[magic - 1].vcn +
+					dst[magic - 1].length;
+	}
+	return dst;
+}
+
+/**
+ * ntfs_rl_replace - overwrite a runlist element with another runlist
+ * @dst:	original runlist to be worked on
+ * @dsize:	number of elements in @dst (including end marker)
+ * @src:	new runlist to be inserted
+ * @ssize:	number of elements in @src (excluding end marker)
+ * @loc:	index in runlist @dst to overwrite with @src
+ *
+ * Replace the runlist element @dst at @loc with @src. Merge the left and
+ * right ends of the inserted runlist, if necessary.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
+		int dsize, runlist_element *src, int ssize, int loc)
+{
+	BOOL left = FALSE;
+	BOOL right;
+	int magic;
+
+	BUG_ON(!dst);
+	BUG_ON(!src);
+
+	/* First, merge the left and right ends, if necessary. */
+	right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+	if (loc > 0)
+		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
+
+	/* Allocate some space. We'll need less if the left, right, or both
+	 * ends were merged. */
+	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
+	if (IS_ERR(dst))
+		return dst;
+	/*
+	 * We are guaranteed to succeed from here so can start modifying the
+	 * original runlists.
+	 */
+	if (right)
+		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
+	if (left)
+		__ntfs_rl_merge(dst + loc - 1, src);
+
+	/* FIXME: What does this mean? (AIA) */
+	magic = loc + ssize - left;
+
+	/* Move the tail of @dst out of the way, then copy in @src. */
+	ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1);
+	ntfs_rl_mc(dst, loc, src, left, ssize - left);
+
+	/* We may have changed the length of the file, so fix the end marker */
+	if (dst[magic].lcn == LCN_ENOENT)
+		dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+	return dst;
+}
+
+/**
+ * ntfs_rl_split - insert a runlist into the centre of a hole
+ * @dst:	original runlist to be worked on
+ * @dsize:	number of elements in @dst (including end marker)
+ * @src:	new runlist to be inserted
+ * @ssize:	number of elements in @src (excluding end marker)
+ * @loc:	index in runlist @dst at which to split and insert @src
+ *
+ * Split the runlist @dst at @loc into two and insert @new in between the two
+ * fragments. No merging of runlists is necessary. Adjust the size of the
+ * holes either side.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize,
+		runlist_element *src, int ssize, int loc)
+{
+	BUG_ON(!dst);
+	BUG_ON(!src);
+
+	/* Space required: @dst size + @src size + one new hole. */
+	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
+	if (IS_ERR(dst))
+		return dst;
+	/*
+	 * We are guaranteed to succeed from here so can start modifying the
+	 * original runlists.
+	 */
+
+	/* Move the tail of @dst out of the way, then copy in @src. */
+	ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
+	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
+
+	/* Adjust the size of the holes either size of @src. */
+	dst[loc].length		= dst[loc+1].vcn       - dst[loc].vcn;
+	dst[loc+ssize+1].vcn    = dst[loc+ssize].vcn   + dst[loc+ssize].length;
+	dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
+
+	return dst;
+}
+
+/**
+ * ntfs_runlists_merge - merge two runlists into one
+ * @drl:	original runlist to be worked on
+ * @srl:	new runlist to be merged into @drl
+ *
+ * First we sanity check the two runlists @srl and @drl to make sure that they
+ * are sensible and can be merged. The runlist @srl must be either after the
+ * runlist @drl or completely within a hole (or unmapped region) in @drl.
+ *
+ * It is up to the caller to serialize access to the runlists @drl and @srl.
+ *
+ * Merging of runlists is necessary in two cases:
+ *   1. When attribute lists are used and a further extent is being mapped.
+ *   2. When new clusters are allocated to fill a hole or extend a file.
+ *
+ * There are four possible ways @srl can be merged. It can:
+ *	- be inserted at the beginning of a hole,
+ *	- split the hole in two and be inserted between the two fragments,
+ *	- be appended at the end of a hole, or it can
+ *	- replace the whole hole.
+ * It can also be appended to the end of the runlist, which is just a variant
+ * of the insert case.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @drl and @srl are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EINVAL	- Invalid parameters were passed in.
+ *	-ERANGE	- The runlists overlap and cannot be merged.
+ */
+runlist_element *ntfs_runlists_merge(runlist_element *drl,
+		runlist_element *srl)
+{
+	int di, si;		/* Current index into @[ds]rl. */
+	int sstart;		/* First index with lcn > LCN_RL_NOT_MAPPED. */
+	int dins;		/* Index into @drl at which to insert @srl. */
+	int dend, send;		/* Last index into @[ds]rl. */
+	int dfinal, sfinal;	/* The last index into @[ds]rl with
+				   lcn >= LCN_HOLE. */
+	int marker = 0;
+	VCN marker_vcn = 0;
+
+#ifdef DEBUG
+	ntfs_debug("dst:");
+	ntfs_debug_dump_runlist(drl);
+	ntfs_debug("src:");
+	ntfs_debug_dump_runlist(srl);
+#endif
+
+	/* Check for silly calling... */
+	if (unlikely(!srl))
+		return drl;
+	if (IS_ERR(srl) || IS_ERR(drl))
+		return ERR_PTR(-EINVAL);
+
+	/* Check for the case where the first mapping is being done now. */
+	if (unlikely(!drl)) {
+		drl = srl;
+		/* Complete the source runlist if necessary. */
+		if (unlikely(drl[0].vcn)) {
+			/* Scan to the end of the source runlist. */
+			for (dend = 0; likely(drl[dend].length); dend++)
+				;
+			drl = ntfs_rl_realloc(drl, dend, dend + 1);
+			if (IS_ERR(drl))
+				return drl;
+			/* Insert start element at the front of the runlist. */
+			ntfs_rl_mm(drl, 1, 0, dend);
+			drl[0].vcn = 0;
+			drl[0].lcn = LCN_RL_NOT_MAPPED;
+			drl[0].length = drl[1].vcn;
+		}
+		goto finished;
+	}
+
+	si = di = 0;
+
+	/* Skip any unmapped start element(s) in the source runlist. */
+	while (srl[si].length && srl[si].lcn < LCN_HOLE)
+		si++;
+
+	/* Can't have an entirely unmapped source runlist. */
+	BUG_ON(!srl[si].length);
+
+	/* Record the starting points. */
+	sstart = si;
+
+	/*
+	 * Skip forward in @drl until we reach the position where @srl needs to
+	 * be inserted. If we reach the end of @drl, @srl just needs to be
+	 * appended to @drl.
+	 */
+	for (; drl[di].length; di++) {
+		if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
+			break;
+	}
+	dins = di;
+
+	/* Sanity check for illegal overlaps. */
+	if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
+			(srl[si].lcn >= 0)) {
+		ntfs_error(NULL, "Run lists overlap. Cannot merge!");
+		return ERR_PTR(-ERANGE);
+	}
+
+	/* Scan to the end of both runlists in order to know their sizes. */
+	for (send = si; srl[send].length; send++)
+		;
+	for (dend = di; drl[dend].length; dend++)
+		;
+
+	if (srl[send].lcn == LCN_ENOENT)
+		marker_vcn = srl[marker = send].vcn;
+
+	/* Scan to the last element with lcn >= LCN_HOLE. */
+	for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
+		;
+	for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
+		;
+
+	{
+	BOOL start;
+	BOOL finish;
+	int ds = dend + 1;		/* Number of elements in drl & srl */
+	int ss = sfinal - sstart + 1;
+
+	start  = ((drl[dins].lcn <  LCN_RL_NOT_MAPPED) ||    /* End of file   */
+		  (drl[dins].vcn == srl[sstart].vcn));	     /* Start of hole */
+	finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) &&    /* End of file   */
+		 ((drl[dins].vcn + drl[dins].length) <=      /* End of hole   */
+		  (srl[send - 1].vcn + srl[send - 1].length)));
+
+	/* Or we'll lose an end marker */
+	if (start && finish && (drl[dins].length == 0))
+		ss++;
+	if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
+		finish = FALSE;
+#if 0
+	ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
+	ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
+	ntfs_debug("start = %i, finish = %i", start, finish);
+	ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
+#endif
+	if (start) {
+		if (finish)
+			drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
+		else
+			drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
+	} else {
+		if (finish)
+			drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
+		else
+			drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
+	}
+	if (IS_ERR(drl)) {
+		ntfs_error(NULL, "Merge failed.");
+		return drl;
+	}
+	ntfs_free(srl);
+	if (marker) {
+		ntfs_debug("Triggering marker code.");
+		for (ds = dend; drl[ds].length; ds++)
+			;
+		/* We only need to care if @srl ended after @drl. */
+		if (drl[ds].vcn <= marker_vcn) {
+			int slots = 0;
+
+			if (drl[ds].vcn == marker_vcn) {
+				ntfs_debug("Old marker = 0x%llx, replacing "
+						"with LCN_ENOENT.",
+						(unsigned long long)
+						drl[ds].lcn);
+				drl[ds].lcn = LCN_ENOENT;
+				goto finished;
+			}
+			/*
+			 * We need to create an unmapped runlist element in
+			 * @drl or extend an existing one before adding the
+			 * ENOENT terminator.
+			 */
+			if (drl[ds].lcn == LCN_ENOENT) {
+				ds--;
+				slots = 1;
+			}
+			if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
+				/* Add an unmapped runlist element. */
+				if (!slots) {
+					/* FIXME/TODO: We need to have the
+					 * extra memory already! (AIA) */
+					drl = ntfs_rl_realloc(drl, ds, ds + 2);
+					if (!drl)
+						goto critical_error;
+					slots = 2;
+				}
+				ds++;
+				/* Need to set vcn if it isn't set already. */
+				if (slots != 1)
+					drl[ds].vcn = drl[ds - 1].vcn +
+							drl[ds - 1].length;
+				drl[ds].lcn = LCN_RL_NOT_MAPPED;
+				/* We now used up a slot. */
+				slots--;
+			}
+			drl[ds].length = marker_vcn - drl[ds].vcn;
+			/* Finally add the ENOENT terminator. */
+			ds++;
+			if (!slots) {
+				/* FIXME/TODO: We need to have the extra
+				 * memory already! (AIA) */
+				drl = ntfs_rl_realloc(drl, ds, ds + 1);
+				if (!drl)
+					goto critical_error;
+			}
+			drl[ds].vcn = marker_vcn;
+			drl[ds].lcn = LCN_ENOENT;
+			drl[ds].length = (s64)0;
+		}
+	}
+	}
+
+finished:
+	/* The merge was completed successfully. */
+	ntfs_debug("Merged runlist:");
+	ntfs_debug_dump_runlist(drl);
+	return drl;
+
+critical_error:
+	/* Critical error! We cannot afford to fail here. */
+	ntfs_error(NULL, "Critical error! Not enough memory.");
+	panic("NTFS: Cannot continue.");
+}
+
+/**
+ * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
+ * @vol:	ntfs volume on which the attribute resides
+ * @attr:	attribute record whose mapping pairs array to decompress
+ * @old_rl:	optional runlist in which to insert @attr's runlist
+ *
+ * It is up to the caller to serialize access to the runlist @old_rl.
+ *
+ * Decompress the attribute @attr's mapping pairs array into a runlist. On
+ * success, return the decompressed runlist.
+ *
+ * If @old_rl is not NULL, decompressed runlist is inserted into the
+ * appropriate place in @old_rl and the resultant, combined runlist is
+ * returned. The original @old_rl is deallocated.
+ *
+ * On error, return -errno. @old_rl is left unmodified in that case.
+ *
+ * The following error codes are defined:
+ *	-ENOMEM	- Not enough memory to allocate runlist array.
+ *	-EIO	- Corrupt runlist.
+ *	-EINVAL	- Invalid parameters were passed in.
+ *	-ERANGE	- The two runlists overlap.
+ *
+ * FIXME: For now we take the conceptionally simplest approach of creating the
+ * new runlist disregarding the already existing one and then splicing the
+ * two into one, if that is possible (we check for overlap and discard the new
+ * runlist if overlap present before returning ERR_PTR(-ERANGE)).
+ */
+runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
+		const ATTR_RECORD *attr, runlist_element *old_rl)
+{
+	VCN vcn;		/* Current vcn. */
+	LCN lcn;		/* Current lcn. */
+	s64 deltaxcn;		/* Change in [vl]cn. */
+	runlist_element *rl;	/* The output runlist. */
+	u8 *buf;		/* Current position in mapping pairs array. */
+	u8 *attr_end;		/* End of attribute. */
+	int rlsize;		/* Size of runlist buffer. */
+	u16 rlpos;		/* Current runlist position in units of
+				   runlist_elements. */
+	u8 b;			/* Current byte offset in buf. */
+
+#ifdef DEBUG
+	/* Make sure attr exists and is non-resident. */
+	if (!attr || !attr->non_resident || sle64_to_cpu(
+			attr->data.non_resident.lowest_vcn) < (VCN)0) {
+		ntfs_error(vol->sb, "Invalid arguments.");
+		return ERR_PTR(-EINVAL);
+	}
+#endif
+	/* Start at vcn = lowest_vcn and lcn 0. */
+	vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn);
+	lcn = 0;
+	/* Get start of the mapping pairs array. */
+	buf = (u8*)attr + le16_to_cpu(
+			attr->data.non_resident.mapping_pairs_offset);
+	attr_end = (u8*)attr + le32_to_cpu(attr->length);
+	if (unlikely(buf < (u8*)attr || buf > attr_end)) {
+		ntfs_error(vol->sb, "Corrupt attribute.");
+		return ERR_PTR(-EIO);
+	}
+	/* Current position in runlist array. */
+	rlpos = 0;
+	/* Allocate first page and set current runlist size to one page. */
+	rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
+	if (unlikely(!rl))
+		return ERR_PTR(-ENOMEM);
+	/* Insert unmapped starting element if necessary. */
+	if (vcn) {
+		rl->vcn = 0;
+		rl->lcn = LCN_RL_NOT_MAPPED;
+		rl->length = vcn;
+		rlpos++;
+	}
+	while (buf < attr_end && *buf) {
+		/*
+		 * Allocate more memory if needed, including space for the
+		 * not-mapped and terminator elements. ntfs_malloc_nofs()
+		 * operates on whole pages only.
+		 */
+		if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
+			runlist_element *rl2;
+
+			rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
+			if (unlikely(!rl2)) {
+				ntfs_free(rl);
+				return ERR_PTR(-ENOMEM);
+			}
+			memcpy(rl2, rl, rlsize);
+			ntfs_free(rl);
+			rl = rl2;
+			rlsize += PAGE_SIZE;
+		}
+		/* Enter the current vcn into the current runlist element. */
+		rl[rlpos].vcn = vcn;
+		/*
+		 * Get the change in vcn, i.e. the run length in clusters.
+		 * Doing it this way ensures that we signextend negative values.
+		 * A negative run length doesn't make any sense, but hey, I
+		 * didn't make up the NTFS specs and Windows NT4 treats the run
+		 * length as a signed value so that's how it is...
+		 */
+		b = *buf & 0xf;
+		if (b) {
+			if (unlikely(buf + b > attr_end))
+				goto io_error;
+			for (deltaxcn = (s8)buf[b--]; b; b--)
+				deltaxcn = (deltaxcn << 8) + buf[b];
+		} else { /* The length entry is compulsory. */
+			ntfs_error(vol->sb, "Missing length entry in mapping "
+					"pairs array.");
+			deltaxcn = (s64)-1;
+		}
+		/*
+		 * Assume a negative length to indicate data corruption and
+		 * hence clean-up and return NULL.
+		 */
+		if (unlikely(deltaxcn < 0)) {
+			ntfs_error(vol->sb, "Invalid length in mapping pairs "
+					"array.");
+			goto err_out;
+		}
+		/*
+		 * Enter the current run length into the current runlist
+		 * element.
+		 */
+		rl[rlpos].length = deltaxcn;
+		/* Increment the current vcn by the current run length. */
+		vcn += deltaxcn;
+		/*
+		 * There might be no lcn change at all, as is the case for
+		 * sparse clusters on NTFS 3.0+, in which case we set the lcn
+		 * to LCN_HOLE.
+		 */
+		if (!(*buf & 0xf0))
+			rl[rlpos].lcn = LCN_HOLE;
+		else {
+			/* Get the lcn change which really can be negative. */
+			u8 b2 = *buf & 0xf;
+			b = b2 + ((*buf >> 4) & 0xf);
+			if (buf + b > attr_end)
+				goto io_error;
+			for (deltaxcn = (s8)buf[b--]; b > b2; b--)
+				deltaxcn = (deltaxcn << 8) + buf[b];
+			/* Change the current lcn to its new value. */
+			lcn += deltaxcn;
+#ifdef DEBUG
+			/*
+			 * On NTFS 1.2-, apparently can have lcn == -1 to
+			 * indicate a hole. But we haven't verified ourselves
+			 * whether it is really the lcn or the deltaxcn that is
+			 * -1. So if either is found give us a message so we
+			 * can investigate it further!
+			 */
+			if (vol->major_ver < 3) {
+				if (unlikely(deltaxcn == (LCN)-1))
+					ntfs_error(vol->sb, "lcn delta == -1");
+				if (unlikely(lcn == (LCN)-1))
+					ntfs_error(vol->sb, "lcn == -1");
+			}
+#endif
+			/* Check lcn is not below -1. */
+			if (unlikely(lcn < (LCN)-1)) {
+				ntfs_error(vol->sb, "Invalid LCN < -1 in "
+						"mapping pairs array.");
+				goto err_out;
+			}
+			/* Enter the current lcn into the runlist element. */
+			rl[rlpos].lcn = lcn;
+		}
+		/* Get to the next runlist element. */
+		rlpos++;
+		/* Increment the buffer position to the next mapping pair. */
+		buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
+	}
+	if (unlikely(buf >= attr_end))
+		goto io_error;
+	/*
+	 * If there is a highest_vcn specified, it must be equal to the final
+	 * vcn in the runlist - 1, or something has gone badly wrong.
+	 */
+	deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
+	if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
+mpa_err:
+		ntfs_error(vol->sb, "Corrupt mapping pairs array in "
+				"non-resident attribute.");
+		goto err_out;
+	}
+	/* Setup not mapped runlist element if this is the base extent. */
+	if (!attr->data.non_resident.lowest_vcn) {
+		VCN max_cluster;
+
+		max_cluster = (sle64_to_cpu(
+				attr->data.non_resident.allocated_size) +
+				vol->cluster_size - 1) >>
+				vol->cluster_size_bits;
+		/*
+		 * If there is a difference between the highest_vcn and the
+		 * highest cluster, the runlist is either corrupt or, more
+		 * likely, there are more extents following this one.
+		 */
+		if (deltaxcn < --max_cluster) {
+			ntfs_debug("More extents to follow; deltaxcn = 0x%llx, "
+					"max_cluster = 0x%llx",
+					(unsigned long long)deltaxcn,
+					(unsigned long long)max_cluster);
+			rl[rlpos].vcn = vcn;
+			vcn += rl[rlpos].length = max_cluster - deltaxcn;
+			rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
+			rlpos++;
+		} else if (unlikely(deltaxcn > max_cluster)) {
+			ntfs_error(vol->sb, "Corrupt attribute. deltaxcn = "
+					"0x%llx, max_cluster = 0x%llx",
+					(unsigned long long)deltaxcn,
+					(unsigned long long)max_cluster);
+			goto mpa_err;
+		}
+		rl[rlpos].lcn = LCN_ENOENT;
+	} else /* Not the base extent. There may be more extents to follow. */
+		rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
+
+	/* Setup terminating runlist element. */
+	rl[rlpos].vcn = vcn;
+	rl[rlpos].length = (s64)0;
+	/* If no existing runlist was specified, we are done. */
+	if (!old_rl) {
+		ntfs_debug("Mapping pairs array successfully decompressed:");
+		ntfs_debug_dump_runlist(rl);
+		return rl;
+	}
+	/* Now combine the new and old runlists checking for overlaps. */
+	old_rl = ntfs_runlists_merge(old_rl, rl);
+	if (likely(!IS_ERR(old_rl)))
+		return old_rl;
+	ntfs_free(rl);
+	ntfs_error(vol->sb, "Failed to merge runlists.");
+	return old_rl;
+io_error:
+	ntfs_error(vol->sb, "Corrupt attribute.");
+err_out:
+	ntfs_free(rl);
+	return ERR_PTR(-EIO);
+}
+
+/**
+ * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
+ * @rl:		runlist to use for conversion
+ * @vcn:	vcn to convert
+ *
+ * Convert the virtual cluster number @vcn of an attribute into a logical
+ * cluster number (lcn) of a device using the runlist @rl to map vcns to their
+ * corresponding lcns.
+ *
+ * It is up to the caller to serialize access to the runlist @rl.
+ *
+ * Since lcns must be >= 0, we use negative return values with special meaning:
+ *
+ * Return value			Meaning / Description
+ * ==================================================
+ *  -1 = LCN_HOLE		Hole / not allocated on disk.
+ *  -2 = LCN_RL_NOT_MAPPED	This is part of the runlist which has not been
+ *				inserted into the runlist yet.
+ *  -3 = LCN_ENOENT		There is no such vcn in the attribute.
+ *
+ * Locking: - The caller must have locked the runlist (for reading or writing).
+ *	    - This function does not touch the lock.
+ */
+LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
+{
+	int i;
+
+	BUG_ON(vcn < 0);
+	/*
+	 * If rl is NULL, assume that we have found an unmapped runlist. The
+	 * caller can then attempt to map it and fail appropriately if
+	 * necessary.
+	 */
+	if (unlikely(!rl))
+		return LCN_RL_NOT_MAPPED;
+
+	/* Catch out of lower bounds vcn. */
+	if (unlikely(vcn < rl[0].vcn))
+		return LCN_ENOENT;
+
+	for (i = 0; likely(rl[i].length); i++) {
+		if (unlikely(vcn < rl[i+1].vcn)) {
+			if (likely(rl[i].lcn >= (LCN)0))
+				return rl[i].lcn + (vcn - rl[i].vcn);
+			return rl[i].lcn;
+		}
+	}
+	/*
+	 * The terminator element is setup to the correct value, i.e. one of
+	 * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
+	 */
+	if (likely(rl[i].lcn < (LCN)0))
+		return rl[i].lcn;
+	/* Just in case... We could replace this with BUG() some day. */
+	return LCN_ENOENT;
+}
+
+/**
+ * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
+ * @n:		number for which to get the number of bytes for
+ *
+ * Return the number of bytes required to store @n unambiguously as
+ * a signed number.
+ *
+ * This is used in the context of the mapping pairs array to determine how
+ * many bytes will be needed in the array to store a given logical cluster
+ * number (lcn) or a specific run length.
+ *
+ * Return the number of bytes written.  This function cannot fail.
+ */
+static inline int ntfs_get_nr_significant_bytes(const s64 n)
+{
+	s64 l = n;
+	int i;
+	s8 j;
+
+	i = 0;
+	do {
+		l >>= 8;
+		i++;
+	} while (l != 0 && l != -1);
+	j = (n >> 8 * (i - 1)) & 0xff;
+	/* If the sign bit is wrong, we need an extra byte. */
+	if ((n < 0 && j >= 0) || (n > 0 && j < 0))
+		i++;
+	return i;
+}
+
+/**
+ * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
+ * @vol:	ntfs volume (needed for the ntfs version)
+ * @rl:		locked runlist to determine the size of the mapping pairs of
+ * @start_vcn:	vcn at which to start the mapping pairs array
+ *
+ * Walk the locked runlist @rl and calculate the size in bytes of the mapping
+ * pairs array corresponding to the runlist @rl, starting at vcn @start_vcn.
+ * This for example allows us to allocate a buffer of the right size when
+ * building the mapping pairs array.
+ *
+ * If @rl is NULL, just return 1 (for the single terminator byte).
+ *
+ * Return the calculated size in bytes on success.  On error, return -errno.
+ * The following error codes are defined:
+ *	-EINVAL	- Run list contains unmapped elements.  Make sure to only pass
+ *		  fully mapped runlists to this function.
+ *	-EIO	- The runlist is corrupt.
+ *
+ * Locking: @rl must be locked on entry (either for reading or writing), it
+ *	    remains locked throughout, and is left locked upon return.
+ */
+int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
+		const runlist_element *rl, const VCN start_vcn)
+{
+	LCN prev_lcn;
+	int rls;
+
+	BUG_ON(start_vcn < 0);
+	if (!rl) {
+		BUG_ON(start_vcn);
+		return 1;
+	}
+	/* Skip to runlist element containing @start_vcn. */
+	while (rl->length && start_vcn >= rl[1].vcn)
+		rl++;
+	if ((!rl->length && start_vcn > rl->vcn) || start_vcn < rl->vcn)
+		return -EINVAL;
+	prev_lcn = 0;
+	/* Always need the termining zero byte. */
+	rls = 1;
+	/* Do the first partial run if present. */
+	if (start_vcn > rl->vcn) {
+		s64 delta;
+
+		/* We know rl->length != 0 already. */
+		if (rl->length < 0 || rl->lcn < LCN_HOLE)
+			goto err_out;
+		delta = start_vcn - rl->vcn;
+		/* Header byte + length. */
+		rls += 1 + ntfs_get_nr_significant_bytes(rl->length - delta);
+		/*
+		 * If the logical cluster number (lcn) denotes a hole and we
+		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
+		 * zero space.  On earlier NTFS versions we just store the lcn.
+		 * Note: this assumes that on NTFS 1.2-, holes are stored with
+		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
+		 */
+		if (rl->lcn >= 0 || vol->major_ver < 3) {
+			prev_lcn = rl->lcn;
+			if (rl->lcn >= 0)
+				prev_lcn += delta;
+			/* Change in lcn. */
+			rls += ntfs_get_nr_significant_bytes(prev_lcn);
+		}
+		/* Go to next runlist element. */
+		rl++;
+	}
+	/* Do the full runs. */
+	for (; rl->length; rl++) {
+		if (rl->length < 0 || rl->lcn < LCN_HOLE)
+			goto err_out;
+		/* Header byte + length. */
+		rls += 1 + ntfs_get_nr_significant_bytes(rl->length);
+		/*
+		 * If the logical cluster number (lcn) denotes a hole and we
+		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
+		 * zero space.  On earlier NTFS versions we just store the lcn.
+		 * Note: this assumes that on NTFS 1.2-, holes are stored with
+		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
+		 */
+		if (rl->lcn >= 0 || vol->major_ver < 3) {
+			/* Change in lcn. */
+			rls += ntfs_get_nr_significant_bytes(rl->lcn -
+					prev_lcn);
+			prev_lcn = rl->lcn;
+		}
+	}
+	return rls;
+err_out:
+	if (rl->lcn == LCN_RL_NOT_MAPPED)
+		rls = -EINVAL;
+	else
+		rls = -EIO;
+	return rls;
+}
+
+/**
+ * ntfs_write_significant_bytes - write the significant bytes of a number
+ * @dst:	destination buffer to write to
+ * @dst_max:	pointer to last byte of destination buffer for bounds checking
+ * @n:		number whose significant bytes to write
+ *
+ * Store in @dst, the minimum bytes of the number @n which are required to
+ * identify @n unambiguously as a signed number, taking care not to exceed
+ * @dest_max, the maximum position within @dst to which we are allowed to
+ * write.
+ *
+ * This is used when building the mapping pairs array of a runlist to compress
+ * a given logical cluster number (lcn) or a specific run length to the minumum
+ * size possible.
+ *
+ * Return the number of bytes written on success.  On error, i.e. the
+ * destination buffer @dst is too small, return -ENOSPC.
+ */
+static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
+		const s64 n)
+{
+	s64 l = n;
+	int i;
+	s8 j;
+
+	i = 0;
+	do {
+		if (dst > dst_max)
+			goto err_out;
+		*dst++ = l & 0xffll;
+		l >>= 8;
+		i++;
+	} while (l != 0 && l != -1);
+	j = (n >> 8 * (i - 1)) & 0xff;
+	/* If the sign bit is wrong, we need an extra byte. */
+	if (n < 0 && j >= 0) {
+		if (dst > dst_max)
+			goto err_out;
+		i++;
+		*dst = (s8)-1;
+	} else if (n > 0 && j < 0) {
+		if (dst > dst_max)
+			goto err_out;
+		i++;
+		*dst = (s8)0;
+	}
+	return i;
+err_out:
+	return -ENOSPC;
+}
+
+/**
+ * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
+ * @vol:	ntfs volume (needed for the ntfs version)
+ * @dst:	destination buffer to which to write the mapping pairs array
+ * @dst_len:	size of destination buffer @dst in bytes
+ * @rl:		locked runlist for which to build the mapping pairs array
+ * @start_vcn:	vcn at which to start the mapping pairs array
+ * @stop_vcn:	first vcn outside destination buffer on success or -ENOSPC
+ *
+ * Create the mapping pairs array from the locked runlist @rl, starting at vcn
+ * @start_vcn and save the array in @dst.  @dst_len is the size of @dst in
+ * bytes and it should be at least equal to the value obtained by calling
+ * ntfs_get_size_for_mapping_pairs().
+ *
+ * If @rl is NULL, just write a single terminator byte to @dst.
+ *
+ * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
+ * the first vcn outside the destination buffer.  Note that on error, @dst has
+ * been filled with all the mapping pairs that will fit, thus it can be treated
+ * as partial success, in that a new attribute extent needs to be created or
+ * the next extent has to be used and the mapping pairs build has to be
+ * continued with @start_vcn set to *@stop_vcn.
+ *
+ * Return 0 on success and -errno on error.  The following error codes are
+ * defined:
+ *	-EINVAL	- Run list contains unmapped elements.  Make sure to only pass
+ *		  fully mapped runlists to this function.
+ *	-EIO	- The runlist is corrupt.
+ *	-ENOSPC	- The destination buffer is too small.
+ *
+ * Locking: @rl must be locked on entry (either for reading or writing), it
+ *	    remains locked throughout, and is left locked upon return.
+ */
+int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
+		const int dst_len, const runlist_element *rl,
+		const VCN start_vcn, VCN *const stop_vcn)
+{
+	LCN prev_lcn;
+	s8 *dst_max, *dst_next;
+	int err = -ENOSPC;
+	s8 len_len, lcn_len;
+
+	BUG_ON(start_vcn < 0);
+	BUG_ON(dst_len < 1);
+	if (!rl) {
+		BUG_ON(start_vcn);
+		if (stop_vcn)
+			*stop_vcn = 0;
+		/* Terminator byte. */
+		*dst = 0;
+		return 0;
+	}
+	/* Skip to runlist element containing @start_vcn. */
+	while (rl->length && start_vcn >= rl[1].vcn)
+		rl++;
+	if ((!rl->length && start_vcn > rl->vcn) || start_vcn < rl->vcn)
+		return -EINVAL;
+	/*
+	 * @dst_max is used for bounds checking in
+	 * ntfs_write_significant_bytes().
+	 */
+	dst_max = dst + dst_len - 1;
+	prev_lcn = 0;
+	/* Do the first partial run if present. */
+	if (start_vcn > rl->vcn) {
+		s64 delta;
+
+		/* We know rl->length != 0 already. */
+		if (rl->length < 0 || rl->lcn < LCN_HOLE)
+			goto err_out;
+		delta = start_vcn - rl->vcn;
+		/* Write length. */
+		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
+				rl->length - delta);
+		if (len_len < 0)
+			goto size_err;
+		/*
+		 * If the logical cluster number (lcn) denotes a hole and we
+		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
+		 * zero space.  On earlier NTFS versions we just write the lcn
+		 * change.  FIXME: Do we need to write the lcn change or just
+		 * the lcn in that case?  Not sure as I have never seen this
+		 * case on NT4. - We assume that we just need to write the lcn
+		 * change until someone tells us otherwise... (AIA)
+		 */
+		if (rl->lcn >= 0 || vol->major_ver < 3) {
+			prev_lcn = rl->lcn;
+			if (rl->lcn >= 0)
+				prev_lcn += delta;
+			/* Write change in lcn. */
+			lcn_len = ntfs_write_significant_bytes(dst + 1 +
+					len_len, dst_max, prev_lcn);
+			if (lcn_len < 0)
+				goto size_err;
+		} else
+			lcn_len = 0;
+		dst_next = dst + len_len + lcn_len + 1;
+		if (dst_next > dst_max)
+			goto size_err;
+		/* Update header byte. */
+		*dst = lcn_len << 4 | len_len;
+		/* Position at next mapping pairs array element. */
+		dst = dst_next;
+		/* Go to next runlist element. */
+		rl++;
+	}
+	/* Do the full runs. */
+	for (; rl->length; rl++) {
+		if (rl->length < 0 || rl->lcn < LCN_HOLE)
+			goto err_out;
+		/* Write length. */
+		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
+				rl->length);
+		if (len_len < 0)
+			goto size_err;
+		/*
+		 * If the logical cluster number (lcn) denotes a hole and we
+		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
+		 * zero space.  On earlier NTFS versions we just write the lcn
+		 * change.  FIXME: Do we need to write the lcn change or just
+		 * the lcn in that case?  Not sure as I have never seen this
+		 * case on NT4. - We assume that we just need to write the lcn
+		 * change until someone tells us otherwise... (AIA)
+		 */
+		if (rl->lcn >= 0 || vol->major_ver < 3) {
+			/* Write change in lcn. */
+			lcn_len = ntfs_write_significant_bytes(dst + 1 +
+					len_len, dst_max, rl->lcn - prev_lcn);
+			if (lcn_len < 0)
+				goto size_err;
+			prev_lcn = rl->lcn;
+		} else
+			lcn_len = 0;
+		dst_next = dst + len_len + lcn_len + 1;
+		if (dst_next > dst_max)
+			goto size_err;
+		/* Update header byte. */
+		*dst = lcn_len << 4 | len_len;
+		/* Position at next mapping pairs array element. */
+		dst = dst_next;
+	}
+	/* Success. */
+	err = 0;
+size_err:
+	/* Set stop vcn. */
+	if (stop_vcn)
+		*stop_vcn = rl->vcn;
+	/* Add terminator byte. */
+	*dst = 0;
+	return err;
+err_out:
+	if (rl->lcn == LCN_RL_NOT_MAPPED)
+		err = -EINVAL;
+	else
+		err = -EIO;
+	return err;
+}
+
+/**
+ * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
+ * @runlist:	runlist to truncate
+ * @new_length:	the new length of the runlist in VCNs
+ *
+ * Truncate the runlist described by @runlist as well as the memory buffer
+ * holding the runlist elements to a length of @new_length VCNs.
+ *
+ * If @new_length lies within the runlist, the runlist elements with VCNs of
+ * @new_length and above are discarded.
+ *
+ * If @new_length lies beyond the runlist, a sparse runlist element is added to
+ * the end of the runlist @runlist or if the last runlist element is a sparse
+ * one already, this is extended.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: The caller must hold @runlist->lock for writing.
+ */
+int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
+		const s64 new_length)
+{
+	runlist_element *rl;
+	int old_size;
+
+	ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
+	BUG_ON(!runlist);
+	BUG_ON(new_length < 0);
+	rl = runlist->rl;
+	if (unlikely(!rl)) {
+		/*
+		 * Create a runlist consisting of a sparse runlist element of
+		 * length @new_length followed by a terminator runlist element.
+		 */
+		rl = ntfs_malloc_nofs(PAGE_SIZE);
+		if (unlikely(!rl)) {
+			ntfs_error(vol->sb, "Not enough memory to allocate "
+					"runlist element buffer.");
+			return -ENOMEM;
+		}
+		runlist->rl = rl;
+		rl[1].length = rl->vcn = 0;
+		rl->lcn = LCN_HOLE;
+		rl[1].vcn = rl->length = new_length;
+		rl[1].lcn = LCN_ENOENT;
+		return 0;
+	}
+	BUG_ON(new_length < rl->vcn);
+	/* Find @new_length in the runlist. */
+	while (likely(rl->length && new_length >= rl[1].vcn))
+		rl++;
+	/*
+	 * If not at the end of the runlist we need to shrink it.
+	 * If at the end of the runlist we need to expand it.
+	 */
+	if (rl->length) {
+		runlist_element *trl;
+		BOOL is_end;
+
+		ntfs_debug("Shrinking runlist.");
+		/* Determine the runlist size. */
+		trl = rl + 1;
+		while (likely(trl->length))
+			trl++;
+		old_size = trl - runlist->rl + 1;
+		/* Truncate the run. */
+		rl->length = new_length - rl->vcn;
+		/*
+		 * If a run was partially truncated, make the following runlist
+		 * element a terminator.
+		 */
+		is_end = FALSE;
+		if (rl->length) {
+			rl++;
+			if (!rl->length)
+				is_end = TRUE;
+			rl->vcn = new_length;
+			rl->length = 0;
+		}
+		rl->lcn = LCN_ENOENT;
+		/* Reallocate memory if necessary. */
+		if (!is_end) {
+			int new_size = rl - runlist->rl + 1;
+			rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
+			if (IS_ERR(rl))
+				ntfs_warning(vol->sb, "Failed to shrink "
+						"runlist buffer.  This just "
+						"wastes a bit of memory "
+						"temporarily so we ignore it "
+						"and return success.");
+			else
+				runlist->rl = rl;
+		}
+	} else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
+		ntfs_debug("Expanding runlist.");
+		/*
+		 * If there is a previous runlist element and it is a sparse
+		 * one, extend it.  Otherwise need to add a new, sparse runlist
+		 * element.
+		 */
+		if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
+			(rl - 1)->length = new_length - (rl - 1)->vcn;
+		else {
+			/* Determine the runlist size. */
+			old_size = rl - runlist->rl + 1;
+			/* Reallocate memory if necessary. */
+			rl = ntfs_rl_realloc(runlist->rl, old_size,
+					old_size + 1);
+			if (IS_ERR(rl)) {
+				ntfs_error(vol->sb, "Failed to expand runlist "
+						"buffer, aborting.");
+				return PTR_ERR(rl);
+			}
+			runlist->rl = rl;
+			/*
+			 * Set @rl to the same runlist element in the new
+			 * runlist as before in the old runlist.
+			 */
+			rl += old_size - 1;
+			/* Add a new, sparse runlist element. */
+			rl->lcn = LCN_HOLE;
+			rl->length = new_length - rl->vcn;
+			/* Add a new terminator runlist element. */
+			rl++;
+			rl->length = 0;
+		}
+		rl->vcn = new_length;
+		rl->lcn = LCN_ENOENT;
+	} else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
+		/* Runlist already has same size as requested. */
+		rl->lcn = LCN_ENOENT;
+	}
+	ntfs_debug("Done.");
+	return 0;
+}
diff --git a/fs/ntfs/runlist.h b/fs/ntfs/runlist.h
new file mode 100644
index 0000000..7107fde
--- /dev/null
+++ b/fs/ntfs/runlist.h
@@ -0,0 +1,89 @@
+/*
+ * runlist.h - Defines for runlist handling in NTFS Linux kernel driver.
+ *	       Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_RUNLIST_H
+#define _LINUX_NTFS_RUNLIST_H
+
+#include "types.h"
+#include "layout.h"
+#include "volume.h"
+
+/**
+ * runlist_element - in memory vcn to lcn mapping array element
+ * @vcn:	starting vcn of the current array element
+ * @lcn:	starting lcn of the current array element
+ * @length:	length in clusters of the current array element
+ *
+ * The last vcn (in fact the last vcn + 1) is reached when length == 0.
+ *
+ * When lcn == -1 this means that the count vcns starting at vcn are not
+ * physically allocated (i.e. this is a hole / data is sparse).
+ */
+typedef struct {	/* In memory vcn to lcn mapping structure element. */
+	VCN vcn;	/* vcn = Starting virtual cluster number. */
+	LCN lcn;	/* lcn = Starting logical cluster number. */
+	s64 length;	/* Run length in clusters. */
+} runlist_element;
+
+/**
+ * runlist - in memory vcn to lcn mapping array including a read/write lock
+ * @rl:		pointer to an array of runlist elements
+ * @lock:	read/write spinlock for serializing access to @rl
+ *
+ */
+typedef struct {
+	runlist_element *rl;
+	struct rw_semaphore lock;
+} runlist;
+
+static inline void ntfs_init_runlist(runlist *rl)
+{
+	rl->rl = NULL;
+	init_rwsem(&rl->lock);
+}
+
+typedef enum {
+	LCN_HOLE		= -1,	/* Keep this as highest value or die! */
+	LCN_RL_NOT_MAPPED	= -2,
+	LCN_ENOENT		= -3,
+} LCN_SPECIAL_VALUES;
+
+extern runlist_element *ntfs_runlists_merge(runlist_element *drl,
+		runlist_element *srl);
+
+extern runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
+		const ATTR_RECORD *attr, runlist_element *old_rl);
+
+extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
+
+extern int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
+		const runlist_element *rl, const VCN start_vcn);
+
+extern int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
+		const int dst_len, const runlist_element *rl,
+		const VCN start_vcn, VCN *const stop_vcn);
+
+extern int ntfs_rl_truncate_nolock(const ntfs_volume *vol,
+		runlist *const runlist, const s64 new_length);
+
+#endif /* _LINUX_NTFS_RUNLIST_H */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
new file mode 100644
index 0000000..212a3d0
--- /dev/null
+++ b/fs/ntfs/super.c
@@ -0,0 +1,2771 @@
+/*
+ * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2001,2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>	/* For bdev_hardsect_size(). */
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/moduleparam.h>
+#include <linux/smp_lock.h>
+
+#include "sysctl.h"
+#include "logfile.h"
+#include "quota.h"
+#include "dir.h"
+#include "debug.h"
+#include "index.h"
+#include "aops.h"
+#include "malloc.h"
+#include "ntfs.h"
+
+/* Number of mounted file systems which have compression enabled. */
+static unsigned long ntfs_nr_compression_users;
+
+/* A global default upcase table and a corresponding reference count. */
+static ntfschar *default_upcase = NULL;
+static unsigned long ntfs_nr_upcase_users = 0;
+
+/* Error constants/strings used in inode.c::ntfs_show_options(). */
+typedef enum {
+	/* One of these must be present, default is ON_ERRORS_CONTINUE. */
+	ON_ERRORS_PANIC			= 0x01,
+	ON_ERRORS_REMOUNT_RO		= 0x02,
+	ON_ERRORS_CONTINUE		= 0x04,
+	/* Optional, can be combined with any of the above. */
+	ON_ERRORS_RECOVER		= 0x10,
+} ON_ERRORS_ACTIONS;
+
+const option_t on_errors_arr[] = {
+	{ ON_ERRORS_PANIC,	"panic" },
+	{ ON_ERRORS_REMOUNT_RO,	"remount-ro", },
+	{ ON_ERRORS_CONTINUE,	"continue", },
+	{ ON_ERRORS_RECOVER,	"recover" },
+	{ 0,			NULL }
+};
+
+/**
+ * simple_getbool -
+ *
+ * Copied from old ntfs driver (which copied from vfat driver).
+ */
+static int simple_getbool(char *s, BOOL *setval)
+{
+	if (s) {
+		if (!strcmp(s, "1") || !strcmp(s, "yes") || !strcmp(s, "true"))
+			*setval = TRUE;
+		else if (!strcmp(s, "0") || !strcmp(s, "no") ||
+							!strcmp(s, "false"))
+			*setval = FALSE;
+		else
+			return 0;
+	} else
+		*setval = TRUE;
+	return 1;
+}
+
+/**
+ * parse_options - parse the (re)mount options
+ * @vol:	ntfs volume
+ * @opt:	string containing the (re)mount options
+ *
+ * Parse the recognized options in @opt for the ntfs volume described by @vol.
+ */
+static BOOL parse_options(ntfs_volume *vol, char *opt)
+{
+	char *p, *v, *ov;
+	static char *utf8 = "utf8";
+	int errors = 0, sloppy = 0;
+	uid_t uid = (uid_t)-1;
+	gid_t gid = (gid_t)-1;
+	mode_t fmask = (mode_t)-1, dmask = (mode_t)-1;
+	int mft_zone_multiplier = -1, on_errors = -1;
+	int show_sys_files = -1, case_sensitive = -1;
+	struct nls_table *nls_map = NULL, *old_nls;
+
+	/* I am lazy... (-8 */
+#define NTFS_GETOPT_WITH_DEFAULT(option, variable, default_value)	\
+	if (!strcmp(p, option)) {					\
+		if (!v || !*v)						\
+			variable = default_value;			\
+		else {							\
+			variable = simple_strtoul(ov = v, &v, 0);	\
+			if (*v)						\
+				goto needs_val;				\
+		}							\
+	}
+#define NTFS_GETOPT(option, variable)					\
+	if (!strcmp(p, option)) {					\
+		if (!v || !*v)						\
+			goto needs_arg;					\
+		variable = simple_strtoul(ov = v, &v, 0);		\
+		if (*v)							\
+			goto needs_val;					\
+	}
+#define NTFS_GETOPT_BOOL(option, variable)				\
+	if (!strcmp(p, option)) {					\
+		BOOL val;						\
+		if (!simple_getbool(v, &val))				\
+			goto needs_bool;				\
+		variable = val;						\
+	}
+#define NTFS_GETOPT_OPTIONS_ARRAY(option, variable, opt_array)		\
+	if (!strcmp(p, option)) {					\
+		int _i;							\
+		if (!v || !*v)						\
+			goto needs_arg;					\
+		ov = v;							\
+		if (variable == -1)					\
+			variable = 0;					\
+		for (_i = 0; opt_array[_i].str && *opt_array[_i].str; _i++) \
+			if (!strcmp(opt_array[_i].str, v)) {		\
+				variable |= opt_array[_i].val;		\
+				break;					\
+			}						\
+		if (!opt_array[_i].str || !*opt_array[_i].str)		\
+			goto needs_val;					\
+	}
+	if (!opt || !*opt)
+		goto no_mount_options;
+	ntfs_debug("Entering with mount options string: %s", opt);
+	while ((p = strsep(&opt, ","))) {
+		if ((v = strchr(p, '=')))
+			*v++ = 0;
+		NTFS_GETOPT("uid", uid)
+		else NTFS_GETOPT("gid", gid)
+		else NTFS_GETOPT("umask", fmask = dmask)
+		else NTFS_GETOPT("fmask", fmask)
+		else NTFS_GETOPT("dmask", dmask)
+		else NTFS_GETOPT("mft_zone_multiplier", mft_zone_multiplier)
+		else NTFS_GETOPT_WITH_DEFAULT("sloppy", sloppy, TRUE)
+		else NTFS_GETOPT_BOOL("show_sys_files", show_sys_files)
+		else NTFS_GETOPT_BOOL("case_sensitive", case_sensitive)
+		else NTFS_GETOPT_OPTIONS_ARRAY("errors", on_errors,
+				on_errors_arr)
+		else if (!strcmp(p, "posix") || !strcmp(p, "show_inodes"))
+			ntfs_warning(vol->sb, "Ignoring obsolete option %s.",
+					p);
+		else if (!strcmp(p, "nls") || !strcmp(p, "iocharset")) {
+			if (!strcmp(p, "iocharset"))
+				ntfs_warning(vol->sb, "Option iocharset is "
+						"deprecated. Please use "
+						"option nls=<charsetname> in "
+						"the future.");
+			if (!v || !*v)
+				goto needs_arg;
+use_utf8:
+			old_nls = nls_map;
+			nls_map = load_nls(v);
+			if (!nls_map) {
+				if (!old_nls) {
+					ntfs_error(vol->sb, "NLS character set "
+							"%s not found.", v);
+					return FALSE;
+				}
+				ntfs_error(vol->sb, "NLS character set %s not "
+						"found. Using previous one %s.",
+						v, old_nls->charset);
+				nls_map = old_nls;
+			} else /* nls_map */ {
+				if (old_nls)
+					unload_nls(old_nls);
+			}
+		} else if (!strcmp(p, "utf8")) {
+			BOOL val = FALSE;
+			ntfs_warning(vol->sb, "Option utf8 is no longer "
+				   "supported, using option nls=utf8. Please "
+				   "use option nls=utf8 in the future and "
+				   "make sure utf8 is compiled either as a "
+				   "module or into the kernel.");
+			if (!v || !*v)
+				val = TRUE;
+			else if (!simple_getbool(v, &val))
+				goto needs_bool;
+			if (val) {
+				v = utf8;
+				goto use_utf8;
+			}
+		} else {
+			ntfs_error(vol->sb, "Unrecognized mount option %s.", p);
+			if (errors < INT_MAX)
+				errors++;
+		}
+#undef NTFS_GETOPT_OPTIONS_ARRAY
+#undef NTFS_GETOPT_BOOL
+#undef NTFS_GETOPT
+#undef NTFS_GETOPT_WITH_DEFAULT
+	}
+no_mount_options:
+	if (errors && !sloppy)
+		return FALSE;
+	if (sloppy)
+		ntfs_warning(vol->sb, "Sloppy option given. Ignoring "
+				"unrecognized mount option(s) and continuing.");
+	/* Keep this first! */
+	if (on_errors != -1) {
+		if (!on_errors) {
+			ntfs_error(vol->sb, "Invalid errors option argument "
+					"or bug in options parser.");
+			return FALSE;
+		}
+	}
+	if (nls_map) {
+		if (vol->nls_map && vol->nls_map != nls_map) {
+			ntfs_error(vol->sb, "Cannot change NLS character set "
+					"on remount.");
+			return FALSE;
+		} /* else (!vol->nls_map) */
+		ntfs_debug("Using NLS character set %s.", nls_map->charset);
+		vol->nls_map = nls_map;
+	} else /* (!nls_map) */ {
+		if (!vol->nls_map) {
+			vol->nls_map = load_nls_default();
+			if (!vol->nls_map) {
+				ntfs_error(vol->sb, "Failed to load default "
+						"NLS character set.");
+				return FALSE;
+			}
+			ntfs_debug("Using default NLS character set (%s).",
+					vol->nls_map->charset);
+		}
+	}
+	if (mft_zone_multiplier != -1) {
+		if (vol->mft_zone_multiplier && vol->mft_zone_multiplier !=
+				mft_zone_multiplier) {
+			ntfs_error(vol->sb, "Cannot change mft_zone_multiplier "
+					"on remount.");
+			return FALSE;
+		}
+		if (mft_zone_multiplier < 1 || mft_zone_multiplier > 4) {
+			ntfs_error(vol->sb, "Invalid mft_zone_multiplier. "
+					"Using default value, i.e. 1.");
+			mft_zone_multiplier = 1;
+		}
+		vol->mft_zone_multiplier = mft_zone_multiplier;
+	}
+	if (!vol->mft_zone_multiplier)
+		vol->mft_zone_multiplier = 1;
+	if (on_errors != -1)
+		vol->on_errors = on_errors;
+	if (!vol->on_errors || vol->on_errors == ON_ERRORS_RECOVER)
+		vol->on_errors |= ON_ERRORS_CONTINUE;
+	if (uid != (uid_t)-1)
+		vol->uid = uid;
+	if (gid != (gid_t)-1)
+		vol->gid = gid;
+	if (fmask != (mode_t)-1)
+		vol->fmask = fmask;
+	if (dmask != (mode_t)-1)
+		vol->dmask = dmask;
+	if (show_sys_files != -1) {
+		if (show_sys_files)
+			NVolSetShowSystemFiles(vol);
+		else
+			NVolClearShowSystemFiles(vol);
+	}
+	if (case_sensitive != -1) {
+		if (case_sensitive)
+			NVolSetCaseSensitive(vol);
+		else
+			NVolClearCaseSensitive(vol);
+	}
+	return TRUE;
+needs_arg:
+	ntfs_error(vol->sb, "The %s option requires an argument.", p);
+	return FALSE;
+needs_bool:
+	ntfs_error(vol->sb, "The %s option requires a boolean argument.", p);
+	return FALSE;
+needs_val:
+	ntfs_error(vol->sb, "Invalid %s option argument: %s", p, ov);
+	return FALSE;
+}
+
+#ifdef NTFS_RW
+
+/**
+ * ntfs_write_volume_flags - write new flags to the volume information flags
+ * @vol:	ntfs volume on which to modify the flags
+ * @flags:	new flags value for the volume information flags
+ *
+ * Internal function.  You probably want to use ntfs_{set,clear}_volume_flags()
+ * instead (see below).
+ *
+ * Replace the volume information flags on the volume @vol with the value
+ * supplied in @flags.  Note, this overwrites the volume information flags, so
+ * make sure to combine the flags you want to modify with the old flags and use
+ * the result when calling ntfs_write_volume_flags().
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_write_volume_flags(ntfs_volume *vol, const VOLUME_FLAGS flags)
+{
+	ntfs_inode *ni = NTFS_I(vol->vol_ino);
+	MFT_RECORD *m;
+	VOLUME_INFORMATION *vi;
+	ntfs_attr_search_ctx *ctx;
+	int err;
+
+	ntfs_debug("Entering, old flags = 0x%x, new flags = 0x%x.",
+			le16_to_cpu(vol->vol_flags), le16_to_cpu(flags));
+	if (vol->vol_flags == flags)
+		goto done;
+	BUG_ON(!ni);
+	m = map_mft_record(ni);
+	if (IS_ERR(m)) {
+		err = PTR_ERR(m);
+		goto err_out;
+	}
+	ctx = ntfs_attr_get_search_ctx(ni, m);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto put_unm_err_out;
+	}
+	err = ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
+			ctx);
+	if (err)
+		goto put_unm_err_out;
+	vi = (VOLUME_INFORMATION*)((u8*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	vol->vol_flags = vi->flags = flags;
+	flush_dcache_mft_record_page(ctx->ntfs_ino);
+	mark_mft_record_dirty(ctx->ntfs_ino);
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(ni);
+done:
+	ntfs_debug("Done.");
+	return 0;
+put_unm_err_out:
+	if (ctx)
+		ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(ni);
+err_out:
+	ntfs_error(vol->sb, "Failed with error code %i.", -err);
+	return err;
+}
+
+/**
+ * ntfs_set_volume_flags - set bits in the volume information flags
+ * @vol:	ntfs volume on which to modify the flags
+ * @flags:	flags to set on the volume
+ *
+ * Set the bits in @flags in the volume information flags on the volume @vol.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_set_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
+{
+	flags &= VOLUME_FLAGS_MASK;
+	return ntfs_write_volume_flags(vol, vol->vol_flags | flags);
+}
+
+/**
+ * ntfs_clear_volume_flags - clear bits in the volume information flags
+ * @vol:	ntfs volume on which to modify the flags
+ * @flags:	flags to clear on the volume
+ *
+ * Clear the bits in @flags in the volume information flags on the volume @vol.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static inline int ntfs_clear_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
+{
+	flags &= VOLUME_FLAGS_MASK;
+	flags = vol->vol_flags & cpu_to_le16(~le16_to_cpu(flags));
+	return ntfs_write_volume_flags(vol, flags);
+}
+
+#endif /* NTFS_RW */
+
+/**
+ * ntfs_remount - change the mount options of a mounted ntfs filesystem
+ * @sb:		superblock of mounted ntfs filesystem
+ * @flags:	remount flags
+ * @opt:	remount options string
+ *
+ * Change the mount options of an already mounted ntfs filesystem.
+ *
+ * NOTE:  The VFS sets the @sb->s_flags remount flags to @flags after
+ * ntfs_remount() returns successfully (i.e. returns 0).  Otherwise,
+ * @sb->s_flags are not changed.
+ */
+static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
+{
+	ntfs_volume *vol = NTFS_SB(sb);
+
+	ntfs_debug("Entering with remount options string: %s", opt);
+#ifndef NTFS_RW
+	/* For read-only compiled driver, enforce all read-only flags. */
+	*flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+#else /* NTFS_RW */
+	/*
+	 * For the read-write compiled driver, if we are remounting read-write,
+	 * make sure there are no volume errors and that no unsupported volume
+	 * flags are set.  Also, empty the logfile journal as it would become
+	 * stale as soon as something is written to the volume and mark the
+	 * volume dirty so that chkdsk is run if the volume is not umounted
+	 * cleanly.  Finally, mark the quotas out of date so Windows rescans
+	 * the volume on boot and updates them.
+	 *
+	 * When remounting read-only, mark the volume clean if no volume errors
+	 * have occured.
+	 */
+	if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+		static const char *es = ".  Cannot remount read-write.";
+
+		/* Remounting read-write. */
+		if (NVolErrors(vol)) {
+			ntfs_error(sb, "Volume has errors and is read-only%s",
+					es);
+			return -EROFS;
+		}
+		if (vol->vol_flags & VOLUME_IS_DIRTY) {
+			ntfs_error(sb, "Volume is dirty and read-only%s", es);
+			return -EROFS;
+		}
+		if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
+			ntfs_error(sb, "Volume has unsupported flags set and "
+					"is read-only%s", es);
+			return -EROFS;
+		}
+		if (ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
+			ntfs_error(sb, "Failed to set dirty bit in volume "
+					"information flags%s", es);
+			return -EROFS;
+		}
+#if 0
+		// TODO: Enable this code once we start modifying anything that
+		//	 is different between NTFS 1.2 and 3.x...
+		/* Set NT4 compatibility flag on newer NTFS version volumes. */
+		if ((vol->major_ver > 1)) {
+			if (ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
+				ntfs_error(sb, "Failed to set NT4 "
+						"compatibility flag%s", es);
+				NVolSetErrors(vol);
+				return -EROFS;
+			}
+		}
+#endif
+		if (!ntfs_empty_logfile(vol->logfile_ino)) {
+			ntfs_error(sb, "Failed to empty journal $LogFile%s",
+					es);
+			NVolSetErrors(vol);
+			return -EROFS;
+		}
+		if (!ntfs_mark_quotas_out_of_date(vol)) {
+			ntfs_error(sb, "Failed to mark quotas out of date%s",
+					es);
+			NVolSetErrors(vol);
+			return -EROFS;
+		}
+	} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
+		/* Remounting read-only. */
+		if (!NVolErrors(vol)) {
+			if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
+				ntfs_warning(sb, "Failed to clear dirty bit "
+						"in volume information "
+						"flags.  Run chkdsk.");
+		}
+	}
+#endif /* NTFS_RW */
+
+	// TODO: Deal with *flags.
+
+	if (!parse_options(vol, opt))
+		return -EINVAL;
+	ntfs_debug("Done.");
+	return 0;
+}
+
+/**
+ * is_boot_sector_ntfs - check whether a boot sector is a valid NTFS boot sector
+ * @sb:		Super block of the device to which @b belongs.
+ * @b:		Boot sector of device @sb to check.
+ * @silent:	If TRUE, all output will be silenced.
+ *
+ * is_boot_sector_ntfs() checks whether the boot sector @b is a valid NTFS boot
+ * sector. Returns TRUE if it is valid and FALSE if not.
+ *
+ * @sb is only needed for warning/error output, i.e. it can be NULL when silent
+ * is TRUE.
+ */
+static BOOL is_boot_sector_ntfs(const struct super_block *sb,
+		const NTFS_BOOT_SECTOR *b, const BOOL silent)
+{
+	/*
+	 * Check that checksum == sum of u32 values from b to the checksum
+	 * field. If checksum is zero, no checking is done.
+	 */
+	if ((void*)b < (void*)&b->checksum && b->checksum) {
+		le32 *u;
+		u32 i;
+
+		for (i = 0, u = (le32*)b; u < (le32*)(&b->checksum); ++u)
+			i += le32_to_cpup(u);
+		if (le32_to_cpu(b->checksum) != i)
+			goto not_ntfs;
+	}
+	/* Check OEMidentifier is "NTFS    " */
+	if (b->oem_id != magicNTFS)
+		goto not_ntfs;
+	/* Check bytes per sector value is between 256 and 4096. */
+	if (le16_to_cpu(b->bpb.bytes_per_sector) < 0x100 ||
+			le16_to_cpu(b->bpb.bytes_per_sector) > 0x1000)
+		goto not_ntfs;
+	/* Check sectors per cluster value is valid. */
+	switch (b->bpb.sectors_per_cluster) {
+	case 1: case 2: case 4: case 8: case 16: case 32: case 64: case 128:
+		break;
+	default:
+		goto not_ntfs;
+	}
+	/* Check the cluster size is not above 65536 bytes. */
+	if ((u32)le16_to_cpu(b->bpb.bytes_per_sector) *
+			b->bpb.sectors_per_cluster > 0x10000)
+		goto not_ntfs;
+	/* Check reserved/unused fields are really zero. */
+	if (le16_to_cpu(b->bpb.reserved_sectors) ||
+			le16_to_cpu(b->bpb.root_entries) ||
+			le16_to_cpu(b->bpb.sectors) ||
+			le16_to_cpu(b->bpb.sectors_per_fat) ||
+			le32_to_cpu(b->bpb.large_sectors) || b->bpb.fats)
+		goto not_ntfs;
+	/* Check clusters per file mft record value is valid. */
+	if ((u8)b->clusters_per_mft_record < 0xe1 ||
+			(u8)b->clusters_per_mft_record > 0xf7)
+		switch (b->clusters_per_mft_record) {
+		case 1: case 2: case 4: case 8: case 16: case 32: case 64:
+			break;
+		default:
+			goto not_ntfs;
+		}
+	/* Check clusters per index block value is valid. */
+	if ((u8)b->clusters_per_index_record < 0xe1 ||
+			(u8)b->clusters_per_index_record > 0xf7)
+		switch (b->clusters_per_index_record) {
+		case 1: case 2: case 4: case 8: case 16: case 32: case 64:
+			break;
+		default:
+			goto not_ntfs;
+		}
+	/*
+	 * Check for valid end of sector marker. We will work without it, but
+	 * many BIOSes will refuse to boot from a bootsector if the magic is
+	 * incorrect, so we emit a warning.
+	 */
+	if (!silent && b->end_of_sector_marker != cpu_to_le16(0xaa55))
+		ntfs_warning(sb, "Invalid end of sector marker.");
+	return TRUE;
+not_ntfs:
+	return FALSE;
+}
+
+/**
+ * read_ntfs_boot_sector - read the NTFS boot sector of a device
+ * @sb:		super block of device to read the boot sector from
+ * @silent:	if true, suppress all output
+ *
+ * Reads the boot sector from the device and validates it. If that fails, tries
+ * to read the backup boot sector, first from the end of the device a-la NT4 and
+ * later and then from the middle of the device a-la NT3.51 and before.
+ *
+ * If a valid boot sector is found but it is not the primary boot sector, we
+ * repair the primary boot sector silently (unless the device is read-only or
+ * the primary boot sector is not accessible).
+ *
+ * NOTE: To call this function, @sb must have the fields s_dev, the ntfs super
+ * block (u.ntfs_sb), nr_blocks and the device flags (s_flags) initialized
+ * to their respective values.
+ *
+ * Return the unlocked buffer head containing the boot sector or NULL on error.
+ */
+static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+		const int silent)
+{
+	const char *read_err_str = "Unable to read %s boot sector.";
+	struct buffer_head *bh_primary, *bh_backup;
+	long nr_blocks = NTFS_SB(sb)->nr_blocks;
+
+	/* Try to read primary boot sector. */
+	if ((bh_primary = sb_bread(sb, 0))) {
+		if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+				bh_primary->b_data, silent))
+			return bh_primary;
+		if (!silent)
+			ntfs_error(sb, "Primary boot sector is invalid.");
+	} else if (!silent)
+		ntfs_error(sb, read_err_str, "primary");
+	if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
+		if (bh_primary)
+			brelse(bh_primary);
+		if (!silent)
+			ntfs_error(sb, "Mount option errors=recover not used. "
+					"Aborting without trying to recover.");
+		return NULL;
+	}
+	/* Try to read NT4+ backup boot sector. */
+	if ((bh_backup = sb_bread(sb, nr_blocks - 1))) {
+		if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+				bh_backup->b_data, silent))
+			goto hotfix_primary_boot_sector;
+		brelse(bh_backup);
+	} else if (!silent)
+		ntfs_error(sb, read_err_str, "backup");
+	/* Try to read NT3.51- backup boot sector. */
+	if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
+		if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+				bh_backup->b_data, silent))
+			goto hotfix_primary_boot_sector;
+		if (!silent)
+			ntfs_error(sb, "Could not find a valid backup boot "
+					"sector.");
+		brelse(bh_backup);
+	} else if (!silent)
+		ntfs_error(sb, read_err_str, "backup");
+	/* We failed. Cleanup and return. */
+	if (bh_primary)
+		brelse(bh_primary);
+	return NULL;
+hotfix_primary_boot_sector:
+	if (bh_primary) {
+		/*
+		 * If we managed to read sector zero and the volume is not
+		 * read-only, copy the found, valid backup boot sector to the
+		 * primary boot sector.
+		 */
+		if (!(sb->s_flags & MS_RDONLY)) {
+			ntfs_warning(sb, "Hot-fix: Recovering invalid primary "
+					"boot sector from backup copy.");
+			memcpy(bh_primary->b_data, bh_backup->b_data,
+					sb->s_blocksize);
+			mark_buffer_dirty(bh_primary);
+			sync_dirty_buffer(bh_primary);
+			if (buffer_uptodate(bh_primary)) {
+				brelse(bh_backup);
+				return bh_primary;
+			}
+			ntfs_error(sb, "Hot-fix: Device write error while "
+					"recovering primary boot sector.");
+		} else {
+			ntfs_warning(sb, "Hot-fix: Recovery of primary boot "
+					"sector failed: Read-only mount.");
+		}
+		brelse(bh_primary);
+	}
+	ntfs_warning(sb, "Using backup boot sector.");
+	return bh_backup;
+}
+
+/**
+ * parse_ntfs_boot_sector - parse the boot sector and store the data in @vol
+ * @vol:	volume structure to initialise with data from boot sector
+ * @b:		boot sector to parse
+ *
+ * Parse the ntfs boot sector @b and store all imporant information therein in
+ * the ntfs super block @vol.  Return TRUE on success and FALSE on error.
+ */
+static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
+{
+	unsigned int sectors_per_cluster_bits, nr_hidden_sects;
+	int clusters_per_mft_record, clusters_per_index_record;
+	s64 ll;
+
+	vol->sector_size = le16_to_cpu(b->bpb.bytes_per_sector);
+	vol->sector_size_bits = ffs(vol->sector_size) - 1;
+	ntfs_debug("vol->sector_size = %i (0x%x)", vol->sector_size,
+			vol->sector_size);
+	ntfs_debug("vol->sector_size_bits = %i (0x%x)", vol->sector_size_bits,
+			vol->sector_size_bits);
+	if (vol->sector_size != vol->sb->s_blocksize)
+		ntfs_warning(vol->sb, "The boot sector indicates a sector size "
+				"different from the device sector size.");
+	ntfs_debug("sectors_per_cluster = 0x%x", b->bpb.sectors_per_cluster);
+	sectors_per_cluster_bits = ffs(b->bpb.sectors_per_cluster) - 1;
+	ntfs_debug("sectors_per_cluster_bits = 0x%x",
+			sectors_per_cluster_bits);
+	nr_hidden_sects = le32_to_cpu(b->bpb.hidden_sectors);
+	ntfs_debug("number of hidden sectors = 0x%x", nr_hidden_sects);
+	vol->cluster_size = vol->sector_size << sectors_per_cluster_bits;
+	vol->cluster_size_mask = vol->cluster_size - 1;
+	vol->cluster_size_bits = ffs(vol->cluster_size) - 1;
+	ntfs_debug("vol->cluster_size = %i (0x%x)", vol->cluster_size,
+			vol->cluster_size);
+	ntfs_debug("vol->cluster_size_mask = 0x%x", vol->cluster_size_mask);
+	ntfs_debug("vol->cluster_size_bits = %i (0x%x)",
+			vol->cluster_size_bits, vol->cluster_size_bits);
+	if (vol->sector_size > vol->cluster_size) {
+		ntfs_error(vol->sb, "Sector sizes above the cluster size are "
+				"not supported.  Sorry.");
+		return FALSE;
+	}
+	if (vol->sb->s_blocksize > vol->cluster_size) {
+		ntfs_error(vol->sb, "Cluster sizes smaller than the device "
+				"sector size are not supported.  Sorry.");
+		return FALSE;
+	}
+	clusters_per_mft_record = b->clusters_per_mft_record;
+	ntfs_debug("clusters_per_mft_record = %i (0x%x)",
+			clusters_per_mft_record, clusters_per_mft_record);
+	if (clusters_per_mft_record > 0)
+		vol->mft_record_size = vol->cluster_size <<
+				(ffs(clusters_per_mft_record) - 1);
+	else
+		/*
+		 * When mft_record_size < cluster_size, clusters_per_mft_record
+		 * = -log2(mft_record_size) bytes. mft_record_size normaly is
+		 * 1024 bytes, which is encoded as 0xF6 (-10 in decimal).
+		 */
+		vol->mft_record_size = 1 << -clusters_per_mft_record;
+	vol->mft_record_size_mask = vol->mft_record_size - 1;
+	vol->mft_record_size_bits = ffs(vol->mft_record_size) - 1;
+	ntfs_debug("vol->mft_record_size = %i (0x%x)", vol->mft_record_size,
+			vol->mft_record_size);
+	ntfs_debug("vol->mft_record_size_mask = 0x%x",
+			vol->mft_record_size_mask);
+	ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
+			vol->mft_record_size_bits, vol->mft_record_size_bits);
+	/*
+	 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+	 * we store $MFT/$DATA, the table of mft records in the page cache.
+	 */
+	if (vol->mft_record_size > PAGE_CACHE_SIZE) {
+		ntfs_error(vol->sb, "Mft record size %i (0x%x) exceeds the "
+				"page cache size on your system %lu (0x%lx).  "
+				"This is not supported.  Sorry.",
+				vol->mft_record_size, vol->mft_record_size,
+				PAGE_CACHE_SIZE, PAGE_CACHE_SIZE);
+		return FALSE;
+	}
+	clusters_per_index_record = b->clusters_per_index_record;
+	ntfs_debug("clusters_per_index_record = %i (0x%x)",
+			clusters_per_index_record, clusters_per_index_record);
+	if (clusters_per_index_record > 0)
+		vol->index_record_size = vol->cluster_size <<
+				(ffs(clusters_per_index_record) - 1);
+	else
+		/*
+		 * When index_record_size < cluster_size,
+		 * clusters_per_index_record = -log2(index_record_size) bytes.
+		 * index_record_size normaly equals 4096 bytes, which is
+		 * encoded as 0xF4 (-12 in decimal).
+		 */
+		vol->index_record_size = 1 << -clusters_per_index_record;
+	vol->index_record_size_mask = vol->index_record_size - 1;
+	vol->index_record_size_bits = ffs(vol->index_record_size) - 1;
+	ntfs_debug("vol->index_record_size = %i (0x%x)",
+			vol->index_record_size, vol->index_record_size);
+	ntfs_debug("vol->index_record_size_mask = 0x%x",
+			vol->index_record_size_mask);
+	ntfs_debug("vol->index_record_size_bits = %i (0x%x)",
+			vol->index_record_size_bits,
+			vol->index_record_size_bits);
+	/*
+	 * Get the size of the volume in clusters and check for 64-bit-ness.
+	 * Windows currently only uses 32 bits to save the clusters so we do
+	 * the same as it is much faster on 32-bit CPUs.
+	 */
+	ll = sle64_to_cpu(b->number_of_sectors) >> sectors_per_cluster_bits;
+	if ((u64)ll >= 1ULL << 32) {
+		ntfs_error(vol->sb, "Cannot handle 64-bit clusters.  Sorry.");
+		return FALSE;
+	}
+	vol->nr_clusters = ll;
+	ntfs_debug("vol->nr_clusters = 0x%llx", (long long)vol->nr_clusters);
+	/*
+	 * On an architecture where unsigned long is 32-bits, we restrict the
+	 * volume size to 2TiB (2^41). On a 64-bit architecture, the compiler
+	 * will hopefully optimize the whole check away.
+	 */
+	if (sizeof(unsigned long) < 8) {
+		if ((ll << vol->cluster_size_bits) >= (1ULL << 41)) {
+			ntfs_error(vol->sb, "Volume size (%lluTiB) is too "
+					"large for this architecture.  "
+					"Maximum supported is 2TiB.  Sorry.",
+					(unsigned long long)ll >> (40 -
+					vol->cluster_size_bits));
+			return FALSE;
+		}
+	}
+	ll = sle64_to_cpu(b->mft_lcn);
+	if (ll >= vol->nr_clusters) {
+		ntfs_error(vol->sb, "MFT LCN is beyond end of volume.  Weird.");
+		return FALSE;
+	}
+	vol->mft_lcn = ll;
+	ntfs_debug("vol->mft_lcn = 0x%llx", (long long)vol->mft_lcn);
+	ll = sle64_to_cpu(b->mftmirr_lcn);
+	if (ll >= vol->nr_clusters) {
+		ntfs_error(vol->sb, "MFTMirr LCN is beyond end of volume.  "
+				"Weird.");
+		return FALSE;
+	}
+	vol->mftmirr_lcn = ll;
+	ntfs_debug("vol->mftmirr_lcn = 0x%llx", (long long)vol->mftmirr_lcn);
+#ifdef NTFS_RW
+	/*
+	 * Work out the size of the mft mirror in number of mft records. If the
+	 * cluster size is less than or equal to the size taken by four mft
+	 * records, the mft mirror stores the first four mft records. If the
+	 * cluster size is bigger than the size taken by four mft records, the
+	 * mft mirror contains as many mft records as will fit into one
+	 * cluster.
+	 */
+	if (vol->cluster_size <= (4 << vol->mft_record_size_bits))
+		vol->mftmirr_size = 4;
+	else
+		vol->mftmirr_size = vol->cluster_size >>
+				vol->mft_record_size_bits;
+	ntfs_debug("vol->mftmirr_size = %i", vol->mftmirr_size);
+#endif /* NTFS_RW */
+	vol->serial_no = le64_to_cpu(b->volume_serial_number);
+	ntfs_debug("vol->serial_no = 0x%llx",
+			(unsigned long long)vol->serial_no);
+	return TRUE;
+}
+
+/**
+ * ntfs_setup_allocators - initialize the cluster and mft allocators
+ * @vol:	volume structure for which to setup the allocators
+ *
+ * Setup the cluster (lcn) and mft allocators to the starting values.
+ */
+static void ntfs_setup_allocators(ntfs_volume *vol)
+{
+#ifdef NTFS_RW
+	LCN mft_zone_size, mft_lcn;
+#endif /* NTFS_RW */
+
+	ntfs_debug("vol->mft_zone_multiplier = 0x%x",
+			vol->mft_zone_multiplier);
+#ifdef NTFS_RW
+	/* Determine the size of the MFT zone. */
+	mft_zone_size = vol->nr_clusters;
+	switch (vol->mft_zone_multiplier) {  /* % of volume size in clusters */
+	case 4:
+		mft_zone_size >>= 1;			/* 50%   */
+		break;
+	case 3:
+		mft_zone_size = (mft_zone_size +
+				(mft_zone_size >> 1)) >> 2;	/* 37.5% */
+		break;
+	case 2:
+		mft_zone_size >>= 2;			/* 25%   */
+		break;
+	/* case 1: */
+	default:
+		mft_zone_size >>= 3;			/* 12.5% */
+		break;
+	}
+	/* Setup the mft zone. */
+	vol->mft_zone_start = vol->mft_zone_pos = vol->mft_lcn;
+	ntfs_debug("vol->mft_zone_pos = 0x%llx",
+			(unsigned long long)vol->mft_zone_pos);
+	/*
+	 * Calculate the mft_lcn for an unmodified NTFS volume (see mkntfs
+	 * source) and if the actual mft_lcn is in the expected place or even
+	 * further to the front of the volume, extend the mft_zone to cover the
+	 * beginning of the volume as well.  This is in order to protect the
+	 * area reserved for the mft bitmap as well within the mft_zone itself.
+	 * On non-standard volumes we do not protect it as the overhead would
+	 * be higher than the speed increase we would get by doing it.
+	 */
+	mft_lcn = (8192 + 2 * vol->cluster_size - 1) / vol->cluster_size;
+	if (mft_lcn * vol->cluster_size < 16 * 1024)
+		mft_lcn = (16 * 1024 + vol->cluster_size - 1) /
+				vol->cluster_size;
+	if (vol->mft_zone_start <= mft_lcn)
+		vol->mft_zone_start = 0;
+	ntfs_debug("vol->mft_zone_start = 0x%llx",
+			(unsigned long long)vol->mft_zone_start);
+	/*
+	 * Need to cap the mft zone on non-standard volumes so that it does
+	 * not point outside the boundaries of the volume.  We do this by
+	 * halving the zone size until we are inside the volume.
+	 */
+	vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
+	while (vol->mft_zone_end >= vol->nr_clusters) {
+		mft_zone_size >>= 1;
+		vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
+	}
+	ntfs_debug("vol->mft_zone_end = 0x%llx",
+			(unsigned long long)vol->mft_zone_end);
+	/*
+	 * Set the current position within each data zone to the start of the
+	 * respective zone.
+	 */
+	vol->data1_zone_pos = vol->mft_zone_end;
+	ntfs_debug("vol->data1_zone_pos = 0x%llx",
+			(unsigned long long)vol->data1_zone_pos);
+	vol->data2_zone_pos = 0;
+	ntfs_debug("vol->data2_zone_pos = 0x%llx",
+			(unsigned long long)vol->data2_zone_pos);
+
+	/* Set the mft data allocation position to mft record 24. */
+	vol->mft_data_pos = 24;
+	ntfs_debug("vol->mft_data_pos = 0x%llx",
+			(unsigned long long)vol->mft_data_pos);
+#endif /* NTFS_RW */
+}
+
+#ifdef NTFS_RW
+
+/**
+ * load_and_init_mft_mirror - load and setup the mft mirror inode for a volume
+ * @vol:	ntfs super block describing device whose mft mirror to load
+ *
+ * Return TRUE on success or FALSE on error.
+ */
+static BOOL load_and_init_mft_mirror(ntfs_volume *vol)
+{
+	struct inode *tmp_ino;
+	ntfs_inode *tmp_ni;
+
+	ntfs_debug("Entering.");
+	/* Get mft mirror inode. */
+	tmp_ino = ntfs_iget(vol->sb, FILE_MFTMirr);
+	if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
+		if (!IS_ERR(tmp_ino))
+			iput(tmp_ino);
+		/* Caller will display error message. */
+		return FALSE;
+	}
+	/*
+	 * Re-initialize some specifics about $MFTMirr's inode as
+	 * ntfs_read_inode() will have set up the default ones.
+	 */
+	/* Set uid and gid to root. */
+	tmp_ino->i_uid = tmp_ino->i_gid = 0;
+	/* Regular file.  No access for anyone. */
+	tmp_ino->i_mode = S_IFREG;
+	/* No VFS initiated operations allowed for $MFTMirr. */
+	tmp_ino->i_op = &ntfs_empty_inode_ops;
+	tmp_ino->i_fop = &ntfs_empty_file_ops;
+	/* Put in our special address space operations. */
+	tmp_ino->i_mapping->a_ops = &ntfs_mst_aops;
+	tmp_ni = NTFS_I(tmp_ino);
+	/* The $MFTMirr, like the $MFT is multi sector transfer protected. */
+	NInoSetMstProtected(tmp_ni);
+	/*
+	 * Set up our little cheat allowing us to reuse the async read io
+	 * completion handler for directories.
+	 */
+	tmp_ni->itype.index.block_size = vol->mft_record_size;
+	tmp_ni->itype.index.block_size_bits = vol->mft_record_size_bits;
+	vol->mftmirr_ino = tmp_ino;
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * check_mft_mirror - compare contents of the mft mirror with the mft
+ * @vol:	ntfs super block describing device whose mft mirror to check
+ *
+ * Return TRUE on success or FALSE on error.
+ *
+ * Note, this function also results in the mft mirror runlist being completely
+ * mapped into memory.  The mft mirror write code requires this and will BUG()
+ * should it find an unmapped runlist element.
+ */
+static BOOL check_mft_mirror(ntfs_volume *vol)
+{
+	unsigned long index;
+	struct super_block *sb = vol->sb;
+	ntfs_inode *mirr_ni;
+	struct page *mft_page, *mirr_page;
+	u8 *kmft, *kmirr;
+	runlist_element *rl, rl2[2];
+	int mrecs_per_page, i;
+
+	ntfs_debug("Entering.");
+	/* Compare contents of $MFT and $MFTMirr. */
+	mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size;
+	BUG_ON(!mrecs_per_page);
+	BUG_ON(!vol->mftmirr_size);
+	mft_page = mirr_page = NULL;
+	kmft = kmirr = NULL;
+	index = i = 0;
+	do {
+		u32 bytes;
+
+		/* Switch pages if necessary. */
+		if (!(i % mrecs_per_page)) {
+			if (index) {
+				ntfs_unmap_page(mft_page);
+				ntfs_unmap_page(mirr_page);
+			}
+			/* Get the $MFT page. */
+			mft_page = ntfs_map_page(vol->mft_ino->i_mapping,
+					index);
+			if (IS_ERR(mft_page)) {
+				ntfs_error(sb, "Failed to read $MFT.");
+				return FALSE;
+			}
+			kmft = page_address(mft_page);
+			/* Get the $MFTMirr page. */
+			mirr_page = ntfs_map_page(vol->mftmirr_ino->i_mapping,
+					index);
+			if (IS_ERR(mirr_page)) {
+				ntfs_error(sb, "Failed to read $MFTMirr.");
+				goto mft_unmap_out;
+			}
+			kmirr = page_address(mirr_page);
+			++index;
+		}
+		/* Make sure the record is ok. */
+		if (ntfs_is_baad_recordp((le32*)kmft)) {
+			ntfs_error(sb, "Incomplete multi sector transfer "
+					"detected in mft record %i.", i);
+mm_unmap_out:
+			ntfs_unmap_page(mirr_page);
+mft_unmap_out:
+			ntfs_unmap_page(mft_page);
+			return FALSE;
+		}
+		if (ntfs_is_baad_recordp((le32*)kmirr)) {
+			ntfs_error(sb, "Incomplete multi sector transfer "
+					"detected in mft mirror record %i.", i);
+			goto mm_unmap_out;
+		}
+		/* Get the amount of data in the current record. */
+		bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use);
+		if (!bytes || bytes > vol->mft_record_size) {
+			bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use);
+			if (!bytes || bytes > vol->mft_record_size)
+				bytes = vol->mft_record_size;
+		}
+		/* Compare the two records. */
+		if (memcmp(kmft, kmirr, bytes)) {
+			ntfs_error(sb, "$MFT and $MFTMirr (record %i) do not "
+					"match.  Run ntfsfix or chkdsk.", i);
+			goto mm_unmap_out;
+		}
+		kmft += vol->mft_record_size;
+		kmirr += vol->mft_record_size;
+	} while (++i < vol->mftmirr_size);
+	/* Release the last pages. */
+	ntfs_unmap_page(mft_page);
+	ntfs_unmap_page(mirr_page);
+
+	/* Construct the mft mirror runlist by hand. */
+	rl2[0].vcn = 0;
+	rl2[0].lcn = vol->mftmirr_lcn;
+	rl2[0].length = (vol->mftmirr_size * vol->mft_record_size +
+			vol->cluster_size - 1) / vol->cluster_size;
+	rl2[1].vcn = rl2[0].length;
+	rl2[1].lcn = LCN_ENOENT;
+	rl2[1].length = 0;
+	/*
+	 * Because we have just read all of the mft mirror, we know we have
+	 * mapped the full runlist for it.
+	 */
+	mirr_ni = NTFS_I(vol->mftmirr_ino);
+	down_read(&mirr_ni->runlist.lock);
+	rl = mirr_ni->runlist.rl;
+	/* Compare the two runlists.  They must be identical. */
+	i = 0;
+	do {
+		if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn ||
+				rl2[i].length != rl[i].length) {
+			ntfs_error(sb, "$MFTMirr location mismatch.  "
+					"Run chkdsk.");
+			up_read(&mirr_ni->runlist.lock);
+			return FALSE;
+		}
+	} while (rl2[i++].length);
+	up_read(&mirr_ni->runlist.lock);
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * load_and_check_logfile - load and check the logfile inode for a volume
+ * @vol:	ntfs super block describing device whose logfile to load
+ *
+ * Return TRUE on success or FALSE on error.
+ */
+static BOOL load_and_check_logfile(ntfs_volume *vol)
+{
+	struct inode *tmp_ino;
+
+	ntfs_debug("Entering.");
+	tmp_ino = ntfs_iget(vol->sb, FILE_LogFile);
+	if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
+		if (!IS_ERR(tmp_ino))
+			iput(tmp_ino);
+		/* Caller will display error message. */
+		return FALSE;
+	}
+	if (!ntfs_check_logfile(tmp_ino)) {
+		iput(tmp_ino);
+		/* ntfs_check_logfile() will have displayed error output. */
+		return FALSE;
+	}
+	vol->logfile_ino = tmp_ino;
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * load_and_init_quota - load and setup the quota file for a volume if present
+ * @vol:	ntfs super block describing device whose quota file to load
+ *
+ * Return TRUE on success or FALSE on error.  If $Quota is not present, we
+ * leave vol->quota_ino as NULL and return success.
+ */
+static BOOL load_and_init_quota(ntfs_volume *vol)
+{
+	MFT_REF mref;
+	struct inode *tmp_ino;
+	ntfs_name *name = NULL;
+	static const ntfschar Quota[7] = { const_cpu_to_le16('$'),
+			const_cpu_to_le16('Q'), const_cpu_to_le16('u'),
+			const_cpu_to_le16('o'), const_cpu_to_le16('t'),
+			const_cpu_to_le16('a'), 0 };
+	static ntfschar Q[3] = { const_cpu_to_le16('$'),
+			const_cpu_to_le16('Q'), 0 };
+
+	ntfs_debug("Entering.");
+	/*
+	 * Find the inode number for the quota file by looking up the filename
+	 * $Quota in the extended system files directory $Extend.
+	 */
+	down(&vol->extend_ino->i_sem);
+	mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
+			&name);
+	up(&vol->extend_ino->i_sem);
+	if (IS_ERR_MREF(mref)) {
+		/*
+		 * If the file does not exist, quotas are disabled and have
+		 * never been enabled on this volume, just return success.
+		 */
+		if (MREF_ERR(mref) == -ENOENT) {
+			ntfs_debug("$Quota not present.  Volume does not have "
+					"quotas enabled.");
+			/*
+			 * No need to try to set quotas out of date if they are
+			 * not enabled.
+			 */
+			NVolSetQuotaOutOfDate(vol);
+			return TRUE;
+		}
+		/* A real error occured. */
+		ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
+		return FALSE;
+	}
+	/* We do not care for the type of match that was found. */
+	if (name)
+		kfree(name);
+	/* Get the inode. */
+	tmp_ino = ntfs_iget(vol->sb, MREF(mref));
+	if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
+		if (!IS_ERR(tmp_ino))
+			iput(tmp_ino);
+		ntfs_error(vol->sb, "Failed to load $Quota.");
+		return FALSE;
+	}
+	vol->quota_ino = tmp_ino;
+	/* Get the $Q index allocation attribute. */
+	tmp_ino = ntfs_index_iget(vol->quota_ino, Q, 2);
+	if (IS_ERR(tmp_ino)) {
+		ntfs_error(vol->sb, "Failed to load $Quota/$Q index.");
+		return FALSE;
+	}
+	vol->quota_q_ino = tmp_ino;
+	ntfs_debug("Done.");
+	return TRUE;
+}
+
+/**
+ * load_and_init_attrdef - load the attribute definitions table for a volume
+ * @vol:	ntfs super block describing device whose attrdef to load
+ *
+ * Return TRUE on success or FALSE on error.
+ */
+static BOOL load_and_init_attrdef(ntfs_volume *vol)
+{
+	struct super_block *sb = vol->sb;
+	struct inode *ino;
+	struct page *page;
+	unsigned long index, max_index;
+	unsigned int size;
+
+	ntfs_debug("Entering.");
+	/* Read attrdef table and setup vol->attrdef and vol->attrdef_size. */
+	ino = ntfs_iget(sb, FILE_AttrDef);
+	if (IS_ERR(ino) || is_bad_inode(ino)) {
+		if (!IS_ERR(ino))
+			iput(ino);
+		goto failed;
+	}
+	/* The size of FILE_AttrDef must be above 0 and fit inside 31 bits. */
+	if (!ino->i_size || ino->i_size > 0x7fffffff)
+		goto iput_failed;
+	vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(ino->i_size);
+	if (!vol->attrdef)
+		goto iput_failed;
+	index = 0;
+	max_index = ino->i_size >> PAGE_CACHE_SHIFT;
+	size = PAGE_CACHE_SIZE;
+	while (index < max_index) {
+		/* Read the attrdef table and copy it into the linear buffer. */
+read_partial_attrdef_page:
+		page = ntfs_map_page(ino->i_mapping, index);
+		if (IS_ERR(page))
+			goto free_iput_failed;
+		memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT),
+				page_address(page), size);
+		ntfs_unmap_page(page);
+	};
+	if (size == PAGE_CACHE_SIZE) {
+		size = ino->i_size & ~PAGE_CACHE_MASK;
+		if (size)
+			goto read_partial_attrdef_page;
+	}
+	vol->attrdef_size = ino->i_size;
+	ntfs_debug("Read %llu bytes from $AttrDef.", ino->i_size);
+	iput(ino);
+	return TRUE;
+free_iput_failed:
+	ntfs_free(vol->attrdef);
+	vol->attrdef = NULL;
+iput_failed:
+	iput(ino);
+failed:
+	ntfs_error(sb, "Failed to initialize attribute definition table.");
+	return FALSE;
+}
+
+#endif /* NTFS_RW */
+
+/**
+ * load_and_init_upcase - load the upcase table for an ntfs volume
+ * @vol:	ntfs super block describing device whose upcase to load
+ *
+ * Return TRUE on success or FALSE on error.
+ */
+static BOOL load_and_init_upcase(ntfs_volume *vol)
+{
+	struct super_block *sb = vol->sb;
+	struct inode *ino;
+	struct page *page;
+	unsigned long index, max_index;
+	unsigned int size;
+	int i, max;
+
+	ntfs_debug("Entering.");
+	/* Read upcase table and setup vol->upcase and vol->upcase_len. */
+	ino = ntfs_iget(sb, FILE_UpCase);
+	if (IS_ERR(ino) || is_bad_inode(ino)) {
+		if (!IS_ERR(ino))
+			iput(ino);
+		goto upcase_failed;
+	}
+	/*
+	 * The upcase size must not be above 64k Unicode characters, must not
+	 * be zero and must be a multiple of sizeof(ntfschar).
+	 */
+	if (!ino->i_size || ino->i_size & (sizeof(ntfschar) - 1) ||
+			ino->i_size > 64ULL * 1024 * sizeof(ntfschar))
+		goto iput_upcase_failed;
+	vol->upcase = (ntfschar*)ntfs_malloc_nofs(ino->i_size);
+	if (!vol->upcase)
+		goto iput_upcase_failed;
+	index = 0;
+	max_index = ino->i_size >> PAGE_CACHE_SHIFT;
+	size = PAGE_CACHE_SIZE;
+	while (index < max_index) {
+		/* Read the upcase table and copy it into the linear buffer. */
+read_partial_upcase_page:
+		page = ntfs_map_page(ino->i_mapping, index);
+		if (IS_ERR(page))
+			goto iput_upcase_failed;
+		memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT),
+				page_address(page), size);
+		ntfs_unmap_page(page);
+	};
+	if (size == PAGE_CACHE_SIZE) {
+		size = ino->i_size & ~PAGE_CACHE_MASK;
+		if (size)
+			goto read_partial_upcase_page;
+	}
+	vol->upcase_len = ino->i_size >> UCHAR_T_SIZE_BITS;
+	ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
+			ino->i_size, 64 * 1024 * sizeof(ntfschar));
+	iput(ino);
+	down(&ntfs_lock);
+	if (!default_upcase) {
+		ntfs_debug("Using volume specified $UpCase since default is "
+				"not present.");
+		up(&ntfs_lock);
+		return TRUE;
+	}
+	max = default_upcase_len;
+	if (max > vol->upcase_len)
+		max = vol->upcase_len;
+	for (i = 0; i < max; i++)
+		if (vol->upcase[i] != default_upcase[i])
+			break;
+	if (i == max) {
+		ntfs_free(vol->upcase);
+		vol->upcase = default_upcase;
+		vol->upcase_len = max;
+		ntfs_nr_upcase_users++;
+		up(&ntfs_lock);
+		ntfs_debug("Volume specified $UpCase matches default. Using "
+				"default.");
+		return TRUE;
+	}
+	up(&ntfs_lock);
+	ntfs_debug("Using volume specified $UpCase since it does not match "
+			"the default.");
+	return TRUE;
+iput_upcase_failed:
+	iput(ino);
+	ntfs_free(vol->upcase);
+	vol->upcase = NULL;
+upcase_failed:
+	down(&ntfs_lock);
+	if (default_upcase) {
+		vol->upcase = default_upcase;
+		vol->upcase_len = default_upcase_len;
+		ntfs_nr_upcase_users++;
+		up(&ntfs_lock);
+		ntfs_error(sb, "Failed to load $UpCase from the volume. Using "
+				"default.");
+		return TRUE;
+	}
+	up(&ntfs_lock);
+	ntfs_error(sb, "Failed to initialize upcase table.");
+	return FALSE;
+}
+
+/**
+ * load_system_files - open the system files using normal functions
+ * @vol:	ntfs super block describing device whose system files to load
+ *
+ * Open the system files with normal access functions and complete setting up
+ * the ntfs super block @vol.
+ *
+ * Return TRUE on success or FALSE on error.
+ */
+static BOOL load_system_files(ntfs_volume *vol)
+{
+	struct super_block *sb = vol->sb;
+	MFT_RECORD *m;
+	VOLUME_INFORMATION *vi;
+	ntfs_attr_search_ctx *ctx;
+
+	ntfs_debug("Entering.");
+#ifdef NTFS_RW
+	/* Get mft mirror inode compare the contents of $MFT and $MFTMirr. */
+	if (!load_and_init_mft_mirror(vol) || !check_mft_mirror(vol)) {
+		static const char *es1 = "Failed to load $MFTMirr";
+		static const char *es2 = "$MFTMirr does not match $MFT";
+		static const char *es3 = ".  Run ntfsfix and/or chkdsk.";
+
+		/* If a read-write mount, convert it to a read-only mount. */
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+					ON_ERRORS_CONTINUE))) {
+				ntfs_error(sb, "%s and neither on_errors="
+						"continue nor on_errors="
+						"remount-ro was specified%s",
+						!vol->mftmirr_ino ? es1 : es2,
+						es3);
+				goto iput_mirr_err_out;
+			}
+			sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+			ntfs_error(sb, "%s.  Mounting read-only%s",
+					!vol->mftmirr_ino ? es1 : es2, es3);
+		} else
+			ntfs_warning(sb, "%s.  Will not be able to remount "
+					"read-write%s",
+					!vol->mftmirr_ino ? es1 : es2, es3);
+		/* This will prevent a read-write remount. */
+		NVolSetErrors(vol);
+	}
+#endif /* NTFS_RW */
+	/* Get mft bitmap attribute inode. */
+	vol->mftbmp_ino = ntfs_attr_iget(vol->mft_ino, AT_BITMAP, NULL, 0);
+	if (IS_ERR(vol->mftbmp_ino)) {
+		ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute.");
+		goto iput_mirr_err_out;
+	}
+	/* Read upcase table and setup @vol->upcase and @vol->upcase_len. */
+	if (!load_and_init_upcase(vol))
+		goto iput_mftbmp_err_out;
+#ifdef NTFS_RW
+	/*
+	 * Read attribute definitions table and setup @vol->attrdef and
+	 * @vol->attrdef_size.
+	 */
+	if (!load_and_init_attrdef(vol))
+		goto iput_upcase_err_out;
+#endif /* NTFS_RW */
+	/*
+	 * Get the cluster allocation bitmap inode and verify the size, no
+	 * need for any locking at this stage as we are already running
+	 * exclusively as we are mount in progress task.
+	 */
+	vol->lcnbmp_ino = ntfs_iget(sb, FILE_Bitmap);
+	if (IS_ERR(vol->lcnbmp_ino) || is_bad_inode(vol->lcnbmp_ino)) {
+		if (!IS_ERR(vol->lcnbmp_ino))
+			iput(vol->lcnbmp_ino);
+		goto bitmap_failed;
+	}
+	if ((vol->nr_clusters + 7) >> 3 > vol->lcnbmp_ino->i_size) {
+		iput(vol->lcnbmp_ino);
+bitmap_failed:
+		ntfs_error(sb, "Failed to load $Bitmap.");
+		goto iput_attrdef_err_out;
+	}
+	/*
+	 * Get the volume inode and setup our cache of the volume flags and
+	 * version.
+	 */
+	vol->vol_ino = ntfs_iget(sb, FILE_Volume);
+	if (IS_ERR(vol->vol_ino) || is_bad_inode(vol->vol_ino)) {
+		if (!IS_ERR(vol->vol_ino))
+			iput(vol->vol_ino);
+volume_failed:
+		ntfs_error(sb, "Failed to load $Volume.");
+		goto iput_lcnbmp_err_out;
+	}
+	m = map_mft_record(NTFS_I(vol->vol_ino));
+	if (IS_ERR(m)) {
+iput_volume_failed:
+		iput(vol->vol_ino);
+		goto volume_failed;
+	}
+	if (!(ctx = ntfs_attr_get_search_ctx(NTFS_I(vol->vol_ino), m))) {
+		ntfs_error(sb, "Failed to get attribute search context.");
+		goto get_ctx_vol_failed;
+	}
+	if (ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
+			ctx) || ctx->attr->non_resident || ctx->attr->flags) {
+err_put_vol:
+		ntfs_attr_put_search_ctx(ctx);
+get_ctx_vol_failed:
+		unmap_mft_record(NTFS_I(vol->vol_ino));
+		goto iput_volume_failed;
+	}
+	vi = (VOLUME_INFORMATION*)((char*)ctx->attr +
+			le16_to_cpu(ctx->attr->data.resident.value_offset));
+	/* Some bounds checks. */
+	if ((u8*)vi < (u8*)ctx->attr || (u8*)vi +
+			le32_to_cpu(ctx->attr->data.resident.value_length) >
+			(u8*)ctx->attr + le32_to_cpu(ctx->attr->length))
+		goto err_put_vol;
+	/* Copy the volume flags and version to the ntfs_volume structure. */
+	vol->vol_flags = vi->flags;
+	vol->major_ver = vi->major_ver;
+	vol->minor_ver = vi->minor_ver;
+	ntfs_attr_put_search_ctx(ctx);
+	unmap_mft_record(NTFS_I(vol->vol_ino));
+	printk(KERN_INFO "NTFS volume version %i.%i.\n", vol->major_ver,
+			vol->minor_ver);
+#ifdef NTFS_RW
+	/* Make sure that no unsupported volume flags are set. */
+	if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
+		static const char *es1a = "Volume is dirty";
+		static const char *es1b = "Volume has unsupported flags set";
+		static const char *es2 = ".  Run chkdsk and mount in Windows.";
+		const char *es1;
+		
+		es1 = vol->vol_flags & VOLUME_IS_DIRTY ? es1a : es1b;
+		/* If a read-write mount, convert it to a read-only mount. */
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+					ON_ERRORS_CONTINUE))) {
+				ntfs_error(sb, "%s and neither on_errors="
+						"continue nor on_errors="
+						"remount-ro was specified%s",
+						es1, es2);
+				goto iput_vol_err_out;
+			}
+			sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+			ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		} else
+			ntfs_warning(sb, "%s.  Will not be able to remount "
+					"read-write%s", es1, es2);
+		/*
+		 * Do not set NVolErrors() because ntfs_remount() re-checks the
+		 * flags which we need to do in case any flags have changed.
+		 */
+	}
+	/*
+	 * Get the inode for the logfile, check it and determine if the volume
+	 * was shutdown cleanly.
+	 */
+	if (!load_and_check_logfile(vol) ||
+			!ntfs_is_logfile_clean(vol->logfile_ino)) {
+		static const char *es1a = "Failed to load $LogFile";
+		static const char *es1b = "$LogFile is not clean";
+		static const char *es2 = ".  Mount in Windows.";
+		const char *es1;
+
+		es1 = !vol->logfile_ino ? es1a : es1b;
+		/* If a read-write mount, convert it to a read-only mount. */
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+					ON_ERRORS_CONTINUE))) {
+				ntfs_error(sb, "%s and neither on_errors="
+						"continue nor on_errors="
+						"remount-ro was specified%s",
+						es1, es2);
+				goto iput_logfile_err_out;
+			}
+			sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+			ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		} else
+			ntfs_warning(sb, "%s.  Will not be able to remount "
+					"read-write%s", es1, es2);
+		/* This will prevent a read-write remount. */
+		NVolSetErrors(vol);
+	}
+	/* If (still) a read-write mount, mark the volume dirty. */
+	if (!(sb->s_flags & MS_RDONLY) &&
+			ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
+		static const char *es1 = "Failed to set dirty bit in volume "
+				"information flags";
+		static const char *es2 = ".  Run chkdsk.";
+
+		/* Convert to a read-only mount. */
+		if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+				ON_ERRORS_CONTINUE))) {
+			ntfs_error(sb, "%s and neither on_errors=continue nor "
+					"on_errors=remount-ro was specified%s",
+					es1, es2);
+			goto iput_logfile_err_out;
+		}
+		ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+		/*
+		 * Do not set NVolErrors() because ntfs_remount() might manage
+		 * to set the dirty flag in which case all would be well.
+		 */
+	}
+#if 0
+	// TODO: Enable this code once we start modifying anything that is
+	//	 different between NTFS 1.2 and 3.x...
+	/*
+	 * If (still) a read-write mount, set the NT4 compatibility flag on
+	 * newer NTFS version volumes.
+	 */
+	if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+			ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
+		static const char *es1 = "Failed to set NT4 compatibility flag";
+		static const char *es2 = ".  Run chkdsk.";
+
+		/* Convert to a read-only mount. */
+		if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+				ON_ERRORS_CONTINUE))) {
+			ntfs_error(sb, "%s and neither on_errors=continue nor "
+					"on_errors=remount-ro was specified%s",
+					es1, es2);
+			goto iput_logfile_err_out;
+		}
+		ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+		NVolSetErrors(vol);
+	}
+#endif
+	/* If (still) a read-write mount, empty the logfile. */
+	if (!(sb->s_flags & MS_RDONLY) &&
+			!ntfs_empty_logfile(vol->logfile_ino)) {
+		static const char *es1 = "Failed to empty $LogFile";
+		static const char *es2 = ".  Mount in Windows.";
+
+		/* Convert to a read-only mount. */
+		if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+				ON_ERRORS_CONTINUE))) {
+			ntfs_error(sb, "%s and neither on_errors=continue nor "
+					"on_errors=remount-ro was specified%s",
+					es1, es2);
+			goto iput_logfile_err_out;
+		}
+		ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+		NVolSetErrors(vol);
+	}
+#endif /* NTFS_RW */
+	/* Get the root directory inode. */
+	vol->root_ino = ntfs_iget(sb, FILE_root);
+	if (IS_ERR(vol->root_ino) || is_bad_inode(vol->root_ino)) {
+		if (!IS_ERR(vol->root_ino))
+			iput(vol->root_ino);
+		ntfs_error(sb, "Failed to load root directory.");
+		goto iput_logfile_err_out;
+	}
+	/* If on NTFS versions before 3.0, we are done. */
+	if (vol->major_ver < 3)
+		return TRUE;
+	/* NTFS 3.0+ specific initialization. */
+	/* Get the security descriptors inode. */
+	vol->secure_ino = ntfs_iget(sb, FILE_Secure);
+	if (IS_ERR(vol->secure_ino) || is_bad_inode(vol->secure_ino)) {
+		if (!IS_ERR(vol->secure_ino))
+			iput(vol->secure_ino);
+		ntfs_error(sb, "Failed to load $Secure.");
+		goto iput_root_err_out;
+	}
+	// FIXME: Initialize security.
+	/* Get the extended system files' directory inode. */
+	vol->extend_ino = ntfs_iget(sb, FILE_Extend);
+	if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
+		if (!IS_ERR(vol->extend_ino))
+			iput(vol->extend_ino);
+		ntfs_error(sb, "Failed to load $Extend.");
+		goto iput_sec_err_out;
+	}
+#ifdef NTFS_RW
+	/* Find the quota file, load it if present, and set it up. */
+	if (!load_and_init_quota(vol)) {
+		static const char *es1 = "Failed to load $Quota";
+		static const char *es2 = ".  Run chkdsk.";
+
+		/* If a read-write mount, convert it to a read-only mount. */
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+					ON_ERRORS_CONTINUE))) {
+				ntfs_error(sb, "%s and neither on_errors="
+						"continue nor on_errors="
+						"remount-ro was specified%s",
+						es1, es2);
+				goto iput_quota_err_out;
+			}
+			sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+			ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		} else
+			ntfs_warning(sb, "%s.  Will not be able to remount "
+					"read-write%s", es1, es2);
+		/* This will prevent a read-write remount. */
+		NVolSetErrors(vol);
+	}
+	/* If (still) a read-write mount, mark the quotas out of date. */
+	if (!(sb->s_flags & MS_RDONLY) &&
+			!ntfs_mark_quotas_out_of_date(vol)) {
+		static const char *es1 = "Failed to mark quotas out of date";
+		static const char *es2 = ".  Run chkdsk.";
+
+		/* Convert to a read-only mount. */
+		if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
+				ON_ERRORS_CONTINUE))) {
+			ntfs_error(sb, "%s and neither on_errors=continue nor "
+					"on_errors=remount-ro was specified%s",
+					es1, es2);
+			goto iput_quota_err_out;
+		}
+		ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
+		sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+		NVolSetErrors(vol);
+	}
+	// TODO: Delete or checkpoint the $UsnJrnl if it exists.
+#endif /* NTFS_RW */
+	return TRUE;
+#ifdef NTFS_RW
+iput_quota_err_out:
+	if (vol->quota_q_ino)
+		iput(vol->quota_q_ino);
+	if (vol->quota_ino)
+		iput(vol->quota_ino);
+	iput(vol->extend_ino);
+#endif /* NTFS_RW */
+iput_sec_err_out:
+	iput(vol->secure_ino);
+iput_root_err_out:
+	iput(vol->root_ino);
+iput_logfile_err_out:
+#ifdef NTFS_RW
+	if (vol->logfile_ino)
+		iput(vol->logfile_ino);
+iput_vol_err_out:
+#endif /* NTFS_RW */
+	iput(vol->vol_ino);
+iput_lcnbmp_err_out:
+	iput(vol->lcnbmp_ino);
+iput_attrdef_err_out:
+	vol->attrdef_size = 0;
+	if (vol->attrdef) {
+		ntfs_free(vol->attrdef);
+		vol->attrdef = NULL;
+	}
+#ifdef NTFS_RW
+iput_upcase_err_out:
+#endif /* NTFS_RW */
+	vol->upcase_len = 0;
+	down(&ntfs_lock);
+	if (vol->upcase == default_upcase) {
+		ntfs_nr_upcase_users--;
+		vol->upcase = NULL;
+	}
+	up(&ntfs_lock);
+	if (vol->upcase) {
+		ntfs_free(vol->upcase);
+		vol->upcase = NULL;
+	}
+iput_mftbmp_err_out:
+	iput(vol->mftbmp_ino);
+iput_mirr_err_out:
+#ifdef NTFS_RW
+	if (vol->mftmirr_ino)
+		iput(vol->mftmirr_ino);
+#endif /* NTFS_RW */
+	return FALSE;
+}
+
+/**
+ * ntfs_put_super - called by the vfs to unmount a volume
+ * @sb:		vfs superblock of volume to unmount
+ *
+ * ntfs_put_super() is called by the VFS (from fs/super.c::do_umount()) when
+ * the volume is being unmounted (umount system call has been invoked) and it
+ * releases all inodes and memory belonging to the NTFS specific part of the
+ * super block.
+ */
+static void ntfs_put_super(struct super_block *sb)
+{
+	ntfs_volume *vol = NTFS_SB(sb);
+
+	ntfs_debug("Entering.");
+#ifdef NTFS_RW
+	/*
+	 * Commit all inodes while they are still open in case some of them
+	 * cause others to be dirtied.
+	 */
+	ntfs_commit_inode(vol->vol_ino);
+
+	/* NTFS 3.0+ specific. */
+	if (vol->major_ver >= 3) {
+		if (vol->quota_q_ino)
+			ntfs_commit_inode(vol->quota_q_ino);
+		if (vol->quota_ino)
+			ntfs_commit_inode(vol->quota_ino);
+		if (vol->extend_ino)
+			ntfs_commit_inode(vol->extend_ino);
+		if (vol->secure_ino)
+			ntfs_commit_inode(vol->secure_ino);
+	}
+
+	ntfs_commit_inode(vol->root_ino);
+
+	down_write(&vol->lcnbmp_lock);
+	ntfs_commit_inode(vol->lcnbmp_ino);
+	up_write(&vol->lcnbmp_lock);
+
+	down_write(&vol->mftbmp_lock);
+	ntfs_commit_inode(vol->mftbmp_ino);
+	up_write(&vol->mftbmp_lock);
+
+	if (vol->logfile_ino)
+		ntfs_commit_inode(vol->logfile_ino);
+
+	if (vol->mftmirr_ino)
+		ntfs_commit_inode(vol->mftmirr_ino);
+	ntfs_commit_inode(vol->mft_ino);
+
+	/*
+	 * If a read-write mount and no volume errors have occured, mark the
+	 * volume clean.  Also, re-commit all affected inodes.
+	 */
+	if (!(sb->s_flags & MS_RDONLY)) {
+		if (!NVolErrors(vol)) {
+			if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
+				ntfs_warning(sb, "Failed to clear dirty bit "
+						"in volume information "
+						"flags.  Run chkdsk.");
+			ntfs_commit_inode(vol->vol_ino);
+			ntfs_commit_inode(vol->root_ino);
+			if (vol->mftmirr_ino)
+				ntfs_commit_inode(vol->mftmirr_ino);
+			ntfs_commit_inode(vol->mft_ino);
+		} else {
+			ntfs_warning(sb, "Volume has errors.  Leaving volume "
+					"marked dirty.  Run chkdsk.");
+		}
+	}
+#endif /* NTFS_RW */
+
+	iput(vol->vol_ino);
+	vol->vol_ino = NULL;
+
+	/* NTFS 3.0+ specific clean up. */
+	if (vol->major_ver >= 3) {
+#ifdef NTFS_RW
+		if (vol->quota_q_ino) {
+			iput(vol->quota_q_ino);
+			vol->quota_q_ino = NULL;
+		}
+		if (vol->quota_ino) {
+			iput(vol->quota_ino);
+			vol->quota_ino = NULL;
+		}
+#endif /* NTFS_RW */
+		if (vol->extend_ino) {
+			iput(vol->extend_ino);
+			vol->extend_ino = NULL;
+		}
+		if (vol->secure_ino) {
+			iput(vol->secure_ino);
+			vol->secure_ino = NULL;
+		}
+	}
+
+	iput(vol->root_ino);
+	vol->root_ino = NULL;
+
+	down_write(&vol->lcnbmp_lock);
+	iput(vol->lcnbmp_ino);
+	vol->lcnbmp_ino = NULL;
+	up_write(&vol->lcnbmp_lock);
+
+	down_write(&vol->mftbmp_lock);
+	iput(vol->mftbmp_ino);
+	vol->mftbmp_ino = NULL;
+	up_write(&vol->mftbmp_lock);
+
+#ifdef NTFS_RW
+	if (vol->logfile_ino) {
+		iput(vol->logfile_ino);
+		vol->logfile_ino = NULL;
+	}
+	if (vol->mftmirr_ino) {
+		/* Re-commit the mft mirror and mft just in case. */
+		ntfs_commit_inode(vol->mftmirr_ino);
+		ntfs_commit_inode(vol->mft_ino);
+		iput(vol->mftmirr_ino);
+		vol->mftmirr_ino = NULL;
+	}
+	/*
+	 * If any dirty inodes are left, throw away all mft data page cache
+	 * pages to allow a clean umount.  This should never happen any more
+	 * due to mft.c::ntfs_mft_writepage() cleaning all the dirty pages as
+	 * the underlying mft records are written out and cleaned.  If it does,
+	 * happen anyway, we want to know...
+	 */
+	ntfs_commit_inode(vol->mft_ino);
+	write_inode_now(vol->mft_ino, 1);
+	if (!list_empty(&sb->s_dirty)) {
+		const char *s1, *s2;
+
+		down(&vol->mft_ino->i_sem);
+		truncate_inode_pages(vol->mft_ino->i_mapping, 0);
+		up(&vol->mft_ino->i_sem);
+		write_inode_now(vol->mft_ino, 1);
+		if (!list_empty(&sb->s_dirty)) {
+			static const char *_s1 = "inodes";
+			static const char *_s2 = "";
+			s1 = _s1;
+			s2 = _s2;
+		} else {
+			static const char *_s1 = "mft pages";
+			static const char *_s2 = "They have been thrown "
+					"away.  ";
+			s1 = _s1;
+			s2 = _s2;
+		}
+		ntfs_error(sb, "Dirty %s found at umount time.  %sYou should "
+				"run chkdsk.  Please email "
+				"linux-ntfs-dev@lists.sourceforge.net and say "
+				"that you saw this message.  Thank you.", s1,
+				s2);
+	}
+#endif /* NTFS_RW */
+
+	iput(vol->mft_ino);
+	vol->mft_ino = NULL;
+
+	/* Throw away the table of attribute definitions. */
+	vol->attrdef_size = 0;
+	if (vol->attrdef) {
+		ntfs_free(vol->attrdef);
+		vol->attrdef = NULL;
+	}
+	vol->upcase_len = 0;
+	/*
+	 * Destroy the global default upcase table if necessary.  Also decrease
+	 * the number of upcase users if we are a user.
+	 */
+	down(&ntfs_lock);
+	if (vol->upcase == default_upcase) {
+		ntfs_nr_upcase_users--;
+		vol->upcase = NULL;
+	}
+	if (!ntfs_nr_upcase_users && default_upcase) {
+		ntfs_free(default_upcase);
+		default_upcase = NULL;
+	}
+	if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
+		free_compression_buffers();
+	up(&ntfs_lock);
+	if (vol->upcase) {
+		ntfs_free(vol->upcase);
+		vol->upcase = NULL;
+	}
+	if (vol->nls_map) {
+		unload_nls(vol->nls_map);
+		vol->nls_map = NULL;
+	}
+	sb->s_fs_info = NULL;
+	kfree(vol);
+	return;
+}
+
+/**
+ * get_nr_free_clusters - return the number of free clusters on a volume
+ * @vol:	ntfs volume for which to obtain free cluster count
+ *
+ * Calculate the number of free clusters on the mounted NTFS volume @vol. We
+ * actually calculate the number of clusters in use instead because this
+ * allows us to not care about partial pages as these will be just zero filled
+ * and hence not be counted as allocated clusters.
+ *
+ * The only particularity is that clusters beyond the end of the logical ntfs
+ * volume will be marked as allocated to prevent errors which means we have to
+ * discount those at the end. This is important as the cluster bitmap always
+ * has a size in multiples of 8 bytes, i.e. up to 63 clusters could be outside
+ * the logical volume and marked in use when they are not as they do not exist.
+ *
+ * If any pages cannot be read we assume all clusters in the erroring pages are
+ * in use. This means we return an underestimate on errors which is better than
+ * an overestimate.
+ */
+static s64 get_nr_free_clusters(ntfs_volume *vol)
+{
+	s64 nr_free = vol->nr_clusters;
+	u32 *kaddr;
+	struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
+	filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
+	struct page *page;
+	unsigned long index, max_index;
+	unsigned int max_size;
+
+	ntfs_debug("Entering.");
+	/* Serialize accesses to the cluster bitmap. */
+	down_read(&vol->lcnbmp_lock);
+	/*
+	 * Convert the number of bits into bytes rounded up, then convert into
+	 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+	 * full and one partial page max_index = 2.
+	 */
+	max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
+			PAGE_CACHE_SHIFT;
+	/* Use multiples of 4 bytes. */
+	max_size = PAGE_CACHE_SIZE >> 2;
+	ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%x.",
+			max_index, max_size);
+	for (index = 0UL; index < max_index; index++) {
+		unsigned int i;
+		/*
+		 * Read the page from page cache, getting it from backing store
+		 * if necessary, and increment the use count.
+		 */
+		page = read_cache_page(mapping, index, (filler_t*)readpage,
+				NULL);
+		/* Ignore pages which errored synchronously. */
+		if (IS_ERR(page)) {
+			ntfs_debug("Sync read_cache_page() error. Skipping "
+					"page (index 0x%lx).", index);
+			nr_free -= PAGE_CACHE_SIZE * 8;
+			continue;
+		}
+		wait_on_page_locked(page);
+		/* Ignore pages which errored asynchronously. */
+		if (!PageUptodate(page)) {
+			ntfs_debug("Async read_cache_page() error. Skipping "
+					"page (index 0x%lx).", index);
+			page_cache_release(page);
+			nr_free -= PAGE_CACHE_SIZE * 8;
+			continue;
+		}
+		kaddr = (u32*)kmap_atomic(page, KM_USER0);
+		/*
+		 * For each 4 bytes, subtract the number of set bits. If this
+		 * is the last page and it is partial we don't really care as
+		 * it just means we do a little extra work but it won't affect
+		 * the result as all out of range bytes are set to zero by
+		 * ntfs_readpage().
+		 */
+	  	for (i = 0; i < max_size; i++)
+			nr_free -= (s64)hweight32(kaddr[i]);
+		kunmap_atomic(kaddr, KM_USER0);
+		page_cache_release(page);
+	}
+	ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
+	/*
+	 * Fixup for eventual bits outside logical ntfs volume (see function
+	 * description above).
+	 */
+	if (vol->nr_clusters & 63)
+		nr_free += 64 - (vol->nr_clusters & 63);
+	up_read(&vol->lcnbmp_lock);
+	/* If errors occured we may well have gone below zero, fix this. */
+	if (nr_free < 0)
+		nr_free = 0;
+	ntfs_debug("Exiting.");
+	return nr_free;
+}
+
+/**
+ * __get_nr_free_mft_records - return the number of free inodes on a volume
+ * @vol:	ntfs volume for which to obtain free inode count
+ *
+ * Calculate the number of free mft records (inodes) on the mounted NTFS
+ * volume @vol. We actually calculate the number of mft records in use instead
+ * because this allows us to not care about partial pages as these will be just
+ * zero filled and hence not be counted as allocated mft record.
+ *
+ * If any pages cannot be read we assume all mft records in the erroring pages
+ * are in use. This means we return an underestimate on errors which is better
+ * than an overestimate.
+ *
+ * NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
+ */
+static unsigned long __get_nr_free_mft_records(ntfs_volume *vol)
+{
+	s64 nr_free;
+	u32 *kaddr;
+	struct address_space *mapping = vol->mftbmp_ino->i_mapping;
+	filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
+	struct page *page;
+	unsigned long index, max_index;
+	unsigned int max_size;
+
+	ntfs_debug("Entering.");
+	/* Number of mft records in file system (at this point in time). */
+	nr_free = vol->mft_ino->i_size >> vol->mft_record_size_bits;
+	/*
+	 * Convert the maximum number of set bits into bytes rounded up, then
+	 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+	 * have one full and one partial page max_index = 2.
+	 */
+	max_index = ((((NTFS_I(vol->mft_ino)->initialized_size >>
+			vol->mft_record_size_bits) + 7) >> 3) +
+			PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	/* Use multiples of 4 bytes. */
+	max_size = PAGE_CACHE_SIZE >> 2;
+	ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
+			"0x%x.", max_index, max_size);
+	for (index = 0UL; index < max_index; index++) {
+		unsigned int i;
+		/*
+		 * Read the page from page cache, getting it from backing store
+		 * if necessary, and increment the use count.
+		 */
+		page = read_cache_page(mapping, index, (filler_t*)readpage,
+				NULL);
+		/* Ignore pages which errored synchronously. */
+		if (IS_ERR(page)) {
+			ntfs_debug("Sync read_cache_page() error. Skipping "
+					"page (index 0x%lx).", index);
+			nr_free -= PAGE_CACHE_SIZE * 8;
+			continue;
+		}
+		wait_on_page_locked(page);
+		/* Ignore pages which errored asynchronously. */
+		if (!PageUptodate(page)) {
+			ntfs_debug("Async read_cache_page() error. Skipping "
+					"page (index 0x%lx).", index);
+			page_cache_release(page);
+			nr_free -= PAGE_CACHE_SIZE * 8;
+			continue;
+		}
+		kaddr = (u32*)kmap_atomic(page, KM_USER0);
+		/*
+		 * For each 4 bytes, subtract the number of set bits. If this
+		 * is the last page and it is partial we don't really care as
+		 * it just means we do a little extra work but it won't affect
+		 * the result as all out of range bytes are set to zero by
+		 * ntfs_readpage().
+		 */
+	  	for (i = 0; i < max_size; i++)
+			nr_free -= (s64)hweight32(kaddr[i]);
+		kunmap_atomic(kaddr, KM_USER0);
+		page_cache_release(page);
+	}
+	ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
+			index - 1);
+	/* If errors occured we may well have gone below zero, fix this. */
+	if (nr_free < 0)
+		nr_free = 0;
+	ntfs_debug("Exiting.");
+	return nr_free;
+}
+
+/**
+ * ntfs_statfs - return information about mounted NTFS volume
+ * @sb:		super block of mounted volume
+ * @sfs:	statfs structure in which to return the information
+ *
+ * Return information about the mounted NTFS volume @sb in the statfs structure
+ * pointed to by @sfs (this is initialized with zeros before ntfs_statfs is
+ * called). We interpret the values to be correct of the moment in time at
+ * which we are called. Most values are variable otherwise and this isn't just
+ * the free values but the totals as well. For example we can increase the
+ * total number of file nodes if we run out and we can keep doing this until
+ * there is no more space on the volume left at all.
+ *
+ * Called from vfs_statfs which is used to handle the statfs, fstatfs, and
+ * ustat system calls.
+ *
+ * Return 0 on success or -errno on error.
+ */
+static int ntfs_statfs(struct super_block *sb, struct kstatfs *sfs)
+{
+	ntfs_volume *vol = NTFS_SB(sb);
+	s64 size;
+
+	ntfs_debug("Entering.");
+	/* Type of filesystem. */
+	sfs->f_type   = NTFS_SB_MAGIC;
+	/* Optimal transfer block size. */
+	sfs->f_bsize  = PAGE_CACHE_SIZE;
+	/*
+	 * Total data blocks in file system in units of f_bsize and since
+	 * inodes are also stored in data blocs ($MFT is a file) this is just
+	 * the total clusters.
+	 */
+	sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
+				PAGE_CACHE_SHIFT;
+	/* Free data blocks in file system in units of f_bsize. */
+	size	      = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
+				PAGE_CACHE_SHIFT;
+	if (size < 0LL)
+		size = 0LL;
+	/* Free blocks avail to non-superuser, same as above on NTFS. */
+	sfs->f_bavail = sfs->f_bfree = size;
+	/* Serialize accesses to the inode bitmap. */
+	down_read(&vol->mftbmp_lock);
+	/* Number of inodes in file system (at this point in time). */
+	sfs->f_files = vol->mft_ino->i_size >> vol->mft_record_size_bits;
+	/* Free inodes in fs (based on current total count). */
+	sfs->f_ffree = __get_nr_free_mft_records(vol);
+	up_read(&vol->mftbmp_lock);
+	/*
+	 * File system id. This is extremely *nix flavour dependent and even
+	 * within Linux itself all fs do their own thing. I interpret this to
+	 * mean a unique id associated with the mounted fs and not the id
+	 * associated with the file system driver, the latter is already given
+	 * by the file system type in sfs->f_type. Thus we use the 64-bit
+	 * volume serial number splitting it into two 32-bit parts. We enter
+	 * the least significant 32-bits in f_fsid[0] and the most significant
+	 * 32-bits in f_fsid[1].
+	 */
+	sfs->f_fsid.val[0] = vol->serial_no & 0xffffffff;
+	sfs->f_fsid.val[1] = (vol->serial_no >> 32) & 0xffffffff;
+	/* Maximum length of filenames. */
+	sfs->f_namelen	   = NTFS_MAX_NAME_LEN;
+	return 0;
+}
+
+/**
+ * The complete super operations.
+ */
+static struct super_operations ntfs_sops = {
+	.alloc_inode	= ntfs_alloc_big_inode,	  /* VFS: Allocate new inode. */
+	.destroy_inode	= ntfs_destroy_big_inode, /* VFS: Deallocate inode. */
+	.put_inode	= ntfs_put_inode,	  /* VFS: Called just before
+						     the inode reference count
+						     is decreased. */
+#ifdef NTFS_RW
+	//.dirty_inode	= NULL,			/* VFS: Called from
+	//					   __mark_inode_dirty(). */
+	.write_inode	= ntfs_write_inode,	/* VFS: Write dirty inode to
+						   disk. */
+	//.drop_inode	= NULL,			/* VFS: Called just after the
+	//					   inode reference count has
+	//					   been decreased to zero.
+	//					   NOTE: The inode lock is
+	//					   held. See fs/inode.c::
+	//					   generic_drop_inode(). */
+	//.delete_inode	= NULL,			/* VFS: Delete inode from disk.
+	//					   Called when i_count becomes
+	//					   0 and i_nlink is also 0. */
+	//.write_super	= NULL,			/* Flush dirty super block to
+	//					   disk. */
+	//.sync_fs	= NULL,			/* ? */
+	//.write_super_lockfs	= NULL,		/* ? */
+	//.unlockfs	= NULL,			/* ? */
+#endif /* NTFS_RW */
+	.put_super	= ntfs_put_super,	/* Syscall: umount. */
+	.statfs		= ntfs_statfs,		/* Syscall: statfs */
+	.remount_fs	= ntfs_remount,		/* Syscall: mount -o remount. */
+	.clear_inode	= ntfs_clear_big_inode,	/* VFS: Called when an inode is
+						   removed from memory. */
+	//.umount_begin	= NULL,			/* Forced umount. */
+	.show_options	= ntfs_show_options,	/* Show mount options in
+						   proc. */
+};
+
+
+/**
+ * Declarations for NTFS specific export operations (fs/ntfs/namei.c).
+ */
+extern struct dentry *ntfs_get_parent(struct dentry *child_dent);
+extern struct dentry *ntfs_get_dentry(struct super_block *sb, void *fh);
+
+/**
+ * Export operations allowing NFS exporting of mounted NTFS partitions.
+ *
+ * We use the default ->decode_fh() and ->encode_fh() for now.  Note that they
+ * use 32 bits to store the inode number which is an unsigned long so on 64-bit
+ * architectures is usually 64 bits so it would all fail horribly on huge
+ * volumes.  I guess we need to define our own encode and decode fh functions
+ * that store 64-bit inode numbers at some point but for now we will ignore the
+ * problem...
+ *
+ * We also use the default ->get_name() helper (used by ->decode_fh() via
+ * fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs
+ * independent.
+ *
+ * The default ->get_parent() just returns -EACCES so we have to provide our
+ * own and the default ->get_dentry() is incompatible with NTFS due to not
+ * allowing the inode number 0 which is used in NTFS for the system file $MFT
+ * and due to using iget() whereas NTFS needs ntfs_iget().
+ */
+static struct export_operations ntfs_export_ops = {
+	.get_parent	= ntfs_get_parent,	/* Find the parent of a given
+						   directory. */
+	.get_dentry	= ntfs_get_dentry,	/* Find a dentry for the inode
+						   given a file handle
+						   sub-fragment. */
+};
+
+/**
+ * ntfs_fill_super - mount an ntfs files system
+ * @sb:		super block of ntfs file system to mount
+ * @opt:	string containing the mount options
+ * @silent:	silence error output
+ *
+ * ntfs_fill_super() is called by the VFS to mount the device described by @sb
+ * with the mount otions in @data with the NTFS file system.
+ *
+ * If @silent is true, remain silent even if errors are detected. This is used
+ * during bootup, when the kernel tries to mount the root file system with all
+ * registered file systems one after the other until one succeeds. This implies
+ * that all file systems except the correct one will quite correctly and
+ * expectedly return an error, but nobody wants to see error messages when in
+ * fact this is what is supposed to happen.
+ *
+ * NOTE: @sb->s_flags contains the mount options flags.
+ */
+static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
+{
+	ntfs_volume *vol;
+	struct buffer_head *bh;
+	struct inode *tmp_ino;
+	int result;
+
+	ntfs_debug("Entering.");
+#ifndef NTFS_RW
+	sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+#endif /* ! NTFS_RW */
+	/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
+	sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
+	vol = NTFS_SB(sb);
+	if (!vol) {
+		if (!silent)
+			ntfs_error(sb, "Allocation of NTFS volume structure "
+					"failed. Aborting mount...");
+		return -ENOMEM;
+	}
+	/* Initialize ntfs_volume structure. */
+	memset(vol, 0, sizeof(ntfs_volume));
+	vol->sb = sb;
+	vol->upcase = NULL;
+	vol->attrdef = NULL;
+	vol->mft_ino = NULL;
+	vol->mftbmp_ino = NULL;
+	init_rwsem(&vol->mftbmp_lock);
+#ifdef NTFS_RW
+	vol->mftmirr_ino = NULL;
+	vol->logfile_ino = NULL;
+#endif /* NTFS_RW */
+	vol->lcnbmp_ino = NULL;
+	init_rwsem(&vol->lcnbmp_lock);
+	vol->vol_ino = NULL;
+	vol->root_ino = NULL;
+	vol->secure_ino = NULL;
+	vol->extend_ino = NULL;
+#ifdef NTFS_RW
+	vol->quota_ino = NULL;
+	vol->quota_q_ino = NULL;
+#endif /* NTFS_RW */
+	vol->nls_map = NULL;
+
+	/*
+	 * Default is group and other don't have any access to files or
+	 * directories while owner has full access. Further, files by default
+	 * are not executable but directories are of course browseable.
+	 */
+	vol->fmask = 0177;
+	vol->dmask = 0077;
+
+	unlock_kernel();
+
+	/* Important to get the mount options dealt with now. */
+	if (!parse_options(vol, (char*)opt))
+		goto err_out_now;
+
+	/*
+	 * TODO: Fail safety check. In the future we should really be able to
+	 * cope with this being the case, but for now just bail out.
+	 */
+	if (bdev_hardsect_size(sb->s_bdev) > NTFS_BLOCK_SIZE) {
+		if (!silent)
+			ntfs_error(sb, "Device has unsupported hardsect_size.");
+		goto err_out_now;
+	}
+
+	/* Setup the device access block size to NTFS_BLOCK_SIZE. */
+	if (sb_set_blocksize(sb, NTFS_BLOCK_SIZE) != NTFS_BLOCK_SIZE) {
+		if (!silent)
+			ntfs_error(sb, "Unable to set block size.");
+		goto err_out_now;
+	}
+
+	/* Get the size of the device in units of NTFS_BLOCK_SIZE bytes. */
+	vol->nr_blocks = sb->s_bdev->bd_inode->i_size >> NTFS_BLOCK_SIZE_BITS;
+
+	/* Read the boot sector and return unlocked buffer head to it. */
+	if (!(bh = read_ntfs_boot_sector(sb, silent))) {
+		if (!silent)
+			ntfs_error(sb, "Not an NTFS volume.");
+		goto err_out_now;
+	}
+
+	/*
+	 * Extract the data from the boot sector and setup the ntfs super block
+	 * using it.
+	 */
+	result = parse_ntfs_boot_sector(vol, (NTFS_BOOT_SECTOR*)bh->b_data);
+
+	/* Initialize the cluster and mft allocators. */
+	ntfs_setup_allocators(vol);
+
+	brelse(bh);
+
+	if (!result) {
+		if (!silent)
+			ntfs_error(sb, "Unsupported NTFS filesystem.");
+		goto err_out_now;
+	}
+
+	/*
+	 * TODO: When we start coping with sector sizes different from
+	 * NTFS_BLOCK_SIZE, we now probably need to set the blocksize of the
+	 * device (probably to NTFS_BLOCK_SIZE).
+	 */
+
+	/* Setup remaining fields in the super block. */
+	sb->s_magic = NTFS_SB_MAGIC;
+
+	/*
+	 * Ntfs allows 63 bits for the file size, i.e. correct would be:
+	 *	sb->s_maxbytes = ~0ULL >> 1;
+	 * But the kernel uses a long as the page cache page index which on
+	 * 32-bit architectures is only 32-bits. MAX_LFS_FILESIZE is kernel
+	 * defined to the maximum the page cache page index can cope with
+	 * without overflowing the index or to 2^63 - 1, whichever is smaller.
+	 */
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+	sb->s_time_gran = 100;
+
+	/*
+	 * Now load the metadata required for the page cache and our address
+	 * space operations to function. We do this by setting up a specialised
+	 * read_inode method and then just calling the normal iget() to obtain
+	 * the inode for $MFT which is sufficient to allow our normal inode
+	 * operations and associated address space operations to function.
+	 */
+	sb->s_op = &ntfs_sops;
+	tmp_ino = new_inode(sb);
+	if (!tmp_ino) {
+		if (!silent)
+			ntfs_error(sb, "Failed to load essential metadata.");
+		goto err_out_now;
+	}
+	tmp_ino->i_ino = FILE_MFT;
+	insert_inode_hash(tmp_ino);
+	if (ntfs_read_inode_mount(tmp_ino) < 0) {
+		if (!silent)
+			ntfs_error(sb, "Failed to load essential metadata.");
+		goto iput_tmp_ino_err_out_now;
+	}
+	down(&ntfs_lock);
+	/*
+	 * The current mount is a compression user if the cluster size is
+	 * less than or equal 4kiB.
+	 */
+	if (vol->cluster_size <= 4096 && !ntfs_nr_compression_users++) {
+		result = allocate_compression_buffers();
+		if (result) {
+			ntfs_error(NULL, "Failed to allocate buffers "
+					"for compression engine.");
+			ntfs_nr_compression_users--;
+			up(&ntfs_lock);
+			goto iput_tmp_ino_err_out_now;
+		}
+	}
+	/*
+	 * Generate the global default upcase table if necessary.  Also
+	 * temporarily increment the number of upcase users to avoid race
+	 * conditions with concurrent (u)mounts.
+	 */
+	if (!default_upcase)
+		default_upcase = generate_default_upcase();
+	ntfs_nr_upcase_users++;
+	up(&ntfs_lock);
+	/*
+	 * From now on, ignore @silent parameter. If we fail below this line,
+	 * it will be due to a corrupt fs or a system error, so we report it.
+	 */
+	/*
+	 * Open the system files with normal access functions and complete
+	 * setting up the ntfs super block.
+	 */
+	if (!load_system_files(vol)) {
+		ntfs_error(sb, "Failed to load system files.");
+		goto unl_upcase_iput_tmp_ino_err_out_now;
+	}
+	if ((sb->s_root = d_alloc_root(vol->root_ino))) {
+		/* We increment i_count simulating an ntfs_iget(). */
+		atomic_inc(&vol->root_ino->i_count);
+		ntfs_debug("Exiting, status successful.");
+		/* Release the default upcase if it has no users. */
+		down(&ntfs_lock);
+		if (!--ntfs_nr_upcase_users && default_upcase) {
+			ntfs_free(default_upcase);
+			default_upcase = NULL;
+		}
+		up(&ntfs_lock);
+		sb->s_export_op = &ntfs_export_ops;
+		lock_kernel();
+		return 0;
+	}
+	ntfs_error(sb, "Failed to allocate root directory.");
+	/* Clean up after the successful load_system_files() call from above. */
+	// TODO: Use ntfs_put_super() instead of repeating all this code...
+	// FIXME: Should mark the volume clean as the error is most likely
+	// 	  -ENOMEM.
+	iput(vol->vol_ino);
+	vol->vol_ino = NULL;
+	/* NTFS 3.0+ specific clean up. */
+	if (vol->major_ver >= 3) {
+#ifdef NTFS_RW
+		if (vol->quota_q_ino) {
+			iput(vol->quota_q_ino);
+			vol->quota_q_ino = NULL;
+		}
+		if (vol->quota_ino) {
+			iput(vol->quota_ino);
+			vol->quota_ino = NULL;
+		}
+#endif /* NTFS_RW */
+		if (vol->extend_ino) {
+			iput(vol->extend_ino);
+			vol->extend_ino = NULL;
+		}
+		if (vol->secure_ino) {
+			iput(vol->secure_ino);
+			vol->secure_ino = NULL;
+		}
+	}
+	iput(vol->root_ino);
+	vol->root_ino = NULL;
+	iput(vol->lcnbmp_ino);
+	vol->lcnbmp_ino = NULL;
+	iput(vol->mftbmp_ino);
+	vol->mftbmp_ino = NULL;
+#ifdef NTFS_RW
+	if (vol->logfile_ino) {
+		iput(vol->logfile_ino);
+		vol->logfile_ino = NULL;
+	}
+	if (vol->mftmirr_ino) {
+		iput(vol->mftmirr_ino);
+		vol->mftmirr_ino = NULL;
+	}
+#endif /* NTFS_RW */
+	/* Throw away the table of attribute definitions. */
+	vol->attrdef_size = 0;
+	if (vol->attrdef) {
+		ntfs_free(vol->attrdef);
+		vol->attrdef = NULL;
+	}
+	vol->upcase_len = 0;
+	down(&ntfs_lock);
+	if (vol->upcase == default_upcase) {
+		ntfs_nr_upcase_users--;
+		vol->upcase = NULL;
+	}
+	up(&ntfs_lock);
+	if (vol->upcase) {
+		ntfs_free(vol->upcase);
+		vol->upcase = NULL;
+	}
+	if (vol->nls_map) {
+		unload_nls(vol->nls_map);
+		vol->nls_map = NULL;
+	}
+	/* Error exit code path. */
+unl_upcase_iput_tmp_ino_err_out_now:
+	/*
+	 * Decrease the number of upcase users and destroy the global default
+	 * upcase table if necessary.
+	 */
+	down(&ntfs_lock);
+	if (!--ntfs_nr_upcase_users && default_upcase) {
+		ntfs_free(default_upcase);
+		default_upcase = NULL;
+	}
+	if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
+		free_compression_buffers();
+	up(&ntfs_lock);
+iput_tmp_ino_err_out_now:
+	iput(tmp_ino);
+	if (vol->mft_ino && vol->mft_ino != tmp_ino)
+		iput(vol->mft_ino);
+	vol->mft_ino = NULL;
+	/*
+	 * This is needed to get ntfs_clear_extent_inode() called for each
+	 * inode we have ever called ntfs_iget()/iput() on, otherwise we A)
+	 * leak resources and B) a subsequent mount fails automatically due to
+	 * ntfs_iget() never calling down into our ntfs_read_locked_inode()
+	 * method again... FIXME: Do we need to do this twice now because of
+	 * attribute inodes? I think not, so leave as is for now... (AIA)
+	 */
+	if (invalidate_inodes(sb)) {
+		ntfs_error(sb, "Busy inodes left. This is most likely a NTFS "
+				"driver bug.");
+		/* Copied from fs/super.c. I just love this message. (-; */
+		printk("NTFS: Busy inodes after umount. Self-destruct in 5 "
+				"seconds.  Have a nice day...\n");
+	}
+	/* Errors at this stage are irrelevant. */
+err_out_now:
+	lock_kernel();
+	sb->s_fs_info = NULL;
+	kfree(vol);
+	ntfs_debug("Failed, returning -EINVAL.");
+	return -EINVAL;
+}
+
+/*
+ * This is a slab cache to optimize allocations and deallocations of Unicode
+ * strings of the maximum length allowed by NTFS, which is NTFS_MAX_NAME_LEN
+ * (255) Unicode characters + a terminating NULL Unicode character.
+ */
+kmem_cache_t *ntfs_name_cache;
+
+/* Slab caches for efficient allocation/deallocation of of inodes. */
+kmem_cache_t *ntfs_inode_cache;
+kmem_cache_t *ntfs_big_inode_cache;
+
+/* Init once constructor for the inode slab cache. */
+static void ntfs_big_inode_init_once(void *foo, kmem_cache_t *cachep,
+		unsigned long flags)
+{
+	ntfs_inode *ni = (ntfs_inode *)foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+			SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(VFS_I(ni));
+}
+
+/*
+ * Slab caches to optimize allocations and deallocations of attribute search
+ * contexts and index contexts, respectively.
+ */
+kmem_cache_t *ntfs_attr_ctx_cache;
+kmem_cache_t *ntfs_index_ctx_cache;
+
+/* Driver wide semaphore. */
+DECLARE_MUTEX(ntfs_lock);
+
+static struct super_block *ntfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+}
+
+static struct file_system_type ntfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ntfs",
+	.get_sb		= ntfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+/* Stable names for the slab caches. */
+static const char ntfs_index_ctx_cache_name[] = "ntfs_index_ctx_cache";
+static const char ntfs_attr_ctx_cache_name[] = "ntfs_attr_ctx_cache";
+static const char ntfs_name_cache_name[] = "ntfs_name_cache";
+static const char ntfs_inode_cache_name[] = "ntfs_inode_cache";
+static const char ntfs_big_inode_cache_name[] = "ntfs_big_inode_cache";
+
+static int __init init_ntfs_fs(void)
+{
+	int err = 0;
+
+	/* This may be ugly but it results in pretty output so who cares. (-8 */
+	printk(KERN_INFO "NTFS driver " NTFS_VERSION " [Flags: R/"
+#ifdef NTFS_RW
+			"W"
+#else
+			"O"
+#endif
+#ifdef DEBUG
+			" DEBUG"
+#endif
+#ifdef MODULE
+			" MODULE"
+#endif
+			"].\n");
+
+	ntfs_debug("Debug messages are enabled.");
+
+	ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name,
+			sizeof(ntfs_index_context), 0 /* offset */,
+			SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+	if (!ntfs_index_ctx_cache) {
+		printk(KERN_CRIT "NTFS: Failed to create %s!\n",
+				ntfs_index_ctx_cache_name);
+		goto ictx_err_out;
+	}
+	ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name,
+			sizeof(ntfs_attr_search_ctx), 0 /* offset */,
+			SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+	if (!ntfs_attr_ctx_cache) {
+		printk(KERN_CRIT "NTFS: Failed to create %s!\n",
+				ntfs_attr_ctx_cache_name);
+		goto actx_err_out;
+	}
+
+	ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name,
+			(NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0,
+			SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!ntfs_name_cache) {
+		printk(KERN_CRIT "NTFS: Failed to create %s!\n",
+				ntfs_name_cache_name);
+		goto name_err_out;
+	}
+
+	ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name,
+			sizeof(ntfs_inode), 0,
+			SLAB_RECLAIM_ACCOUNT, NULL, NULL);
+	if (!ntfs_inode_cache) {
+		printk(KERN_CRIT "NTFS: Failed to create %s!\n",
+				ntfs_inode_cache_name);
+		goto inode_err_out;
+	}
+
+	ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name,
+			sizeof(big_ntfs_inode), 0,
+			SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
+			ntfs_big_inode_init_once, NULL);
+	if (!ntfs_big_inode_cache) {
+		printk(KERN_CRIT "NTFS: Failed to create %s!\n",
+				ntfs_big_inode_cache_name);
+		goto big_inode_err_out;
+	}
+
+	/* Register the ntfs sysctls. */
+	err = ntfs_sysctl(1);
+	if (err) {
+		printk(KERN_CRIT "NTFS: Failed to register NTFS sysctls!\n");
+		goto sysctl_err_out;
+	}
+
+	err = register_filesystem(&ntfs_fs_type);
+	if (!err) {
+		ntfs_debug("NTFS driver registered successfully.");
+		return 0; /* Success! */
+	}
+	printk(KERN_CRIT "NTFS: Failed to register NTFS file system driver!\n");
+
+sysctl_err_out:
+	kmem_cache_destroy(ntfs_big_inode_cache);
+big_inode_err_out:
+	kmem_cache_destroy(ntfs_inode_cache);
+inode_err_out:
+	kmem_cache_destroy(ntfs_name_cache);
+name_err_out:
+	kmem_cache_destroy(ntfs_attr_ctx_cache);
+actx_err_out:
+	kmem_cache_destroy(ntfs_index_ctx_cache);
+ictx_err_out:
+	if (!err) {
+		printk(KERN_CRIT "NTFS: Aborting NTFS file system driver "
+				"registration...\n");
+		err = -ENOMEM;
+	}
+	return err;
+}
+
+static void __exit exit_ntfs_fs(void)
+{
+	int err = 0;
+
+	ntfs_debug("Unregistering NTFS driver.");
+
+	unregister_filesystem(&ntfs_fs_type);
+
+	if (kmem_cache_destroy(ntfs_big_inode_cache) && (err = 1))
+		printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
+				ntfs_big_inode_cache_name);
+	if (kmem_cache_destroy(ntfs_inode_cache) && (err = 1))
+		printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
+				ntfs_inode_cache_name);
+	if (kmem_cache_destroy(ntfs_name_cache) && (err = 1))
+		printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
+				ntfs_name_cache_name);
+	if (kmem_cache_destroy(ntfs_attr_ctx_cache) && (err = 1))
+		printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
+				ntfs_attr_ctx_cache_name);
+	if (kmem_cache_destroy(ntfs_index_ctx_cache) && (err = 1))
+		printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
+				ntfs_index_ctx_cache_name);
+	if (err)
+		printk(KERN_CRIT "NTFS: This causes memory to leak! There is "
+				"probably a BUG in the driver! Please report "
+				"you saw this message to "
+				"linux-ntfs-dev@lists.sourceforge.net\n");
+	/* Unregister the ntfs sysctls. */
+	ntfs_sysctl(0);
+}
+
+MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
+MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2004 Anton Altaparmakov");
+MODULE_VERSION(NTFS_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef DEBUG
+module_param(debug_msgs, bool, 0);
+MODULE_PARM_DESC(debug_msgs, "Enable debug messages.");
+#endif
+
+module_init(init_ntfs_fs)
+module_exit(exit_ntfs_fs)
diff --git a/fs/ntfs/sysctl.c b/fs/ntfs/sysctl.c
new file mode 100644
index 0000000..75067e4
--- /dev/null
+++ b/fs/ntfs/sysctl.c
@@ -0,0 +1,85 @@
+/*
+ * sysctl.c - Code for sysctl handling in NTFS Linux kernel driver. Part of
+ *	      the Linux-NTFS project. Adapted from the old NTFS driver,
+ *	      Copyright (C) 1997 Martin von Löwis, Régis Duchesne
+ *
+ * Copyright (c) 2002-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef DEBUG
+
+#include <linux/module.h>
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+
+#include "sysctl.h"
+#include "debug.h"
+
+#define FS_NTFS	1
+
+/* Definition of the ntfs sysctl. */
+static ctl_table ntfs_sysctls[] = {
+	{ FS_NTFS, "ntfs-debug",		/* Binary and text IDs. */
+	  &debug_msgs,sizeof(debug_msgs),	/* Data pointer and size. */
+	  0644,	NULL, &proc_dointvec },		/* Mode, child, proc handler. */
+	{ 0 }
+};
+
+/* Define the parent directory /proc/sys/fs. */
+static ctl_table sysctls_root[] = {
+	{ CTL_FS, "fs", NULL, 0, 0555, ntfs_sysctls },
+	{ 0 }
+};
+
+/* Storage for the sysctls header. */
+static struct ctl_table_header *sysctls_root_table = NULL;
+
+/**
+ * ntfs_sysctl - add or remove the debug sysctl
+ * @add:	add (1) or remove (0) the sysctl
+ *
+ * Add or remove the debug sysctl. Return 0 on success or -errno on error.
+ */
+int ntfs_sysctl(int add)
+{
+	if (add) {
+		BUG_ON(sysctls_root_table);
+		sysctls_root_table = register_sysctl_table(sysctls_root, 0);
+		if (!sysctls_root_table)
+			return -ENOMEM;
+#ifdef CONFIG_PROC_FS
+		/*
+		 * If the proc file system is in use and we are a module, need
+		 * to set the owner of our proc entry to our module. In the
+		 * non-modular case, THIS_MODULE is NULL, so this is ok.
+		 */
+		ntfs_sysctls[0].de->owner = THIS_MODULE;
+#endif
+	} else {
+		BUG_ON(!sysctls_root_table);
+		unregister_sysctl_table(sysctls_root_table);
+		sysctls_root_table = NULL;
+	}
+	return 0;
+}
+
+#endif /* CONFIG_SYSCTL */
+#endif /* DEBUG */
diff --git a/fs/ntfs/sysctl.h b/fs/ntfs/sysctl.h
new file mode 100644
index 0000000..df749cc
--- /dev/null
+++ b/fs/ntfs/sysctl.h
@@ -0,0 +1,42 @@
+/*
+ * sysctl.h - Defines for sysctl handling in NTFS Linux kernel driver. Part of
+ *	      the Linux-NTFS project. Adapted from the old NTFS driver,
+ *	      Copyright (C) 1997 Martin von Löwis, Régis Duchesne
+ *
+ * Copyright (c) 2002-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_SYSCTL_H
+#define _LINUX_NTFS_SYSCTL_H
+
+#include <linux/config.h>
+
+#if (DEBUG && CONFIG_SYSCTL)
+
+extern int ntfs_sysctl(int add);
+
+#else
+
+/* Just return success. */
+static inline int ntfs_sysctl(int add)
+{
+	return 0;
+}
+
+#endif /* DEBUG && CONFIG_SYSCTL */
+#endif /* _LINUX_NTFS_SYSCTL_H */
diff --git a/fs/ntfs/time.h b/fs/ntfs/time.h
new file mode 100644
index 0000000..a09a51d
--- /dev/null
+++ b/fs/ntfs/time.h
@@ -0,0 +1,100 @@
+/*
+ * time.h - NTFS time conversion functions.  Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_TIME_H
+#define _LINUX_NTFS_TIME_H
+
+#include <linux/time.h>		/* For current_kernel_time(). */
+#include <asm/div64.h>		/* For do_div(). */
+
+#include "endian.h"
+
+#define NTFS_TIME_OFFSET ((s64)(369 * 365 + 89) * 24 * 3600 * 10000000)
+
+/**
+ * utc2ntfs - convert Linux UTC time to NTFS time
+ * @ts:		Linux UTC time to convert to NTFS time
+ *
+ * Convert the Linux UTC time @ts to its corresponding NTFS time and return
+ * that in little endian format.
+ *
+ * Linux stores time in a struct timespec consisting of a time_t (long at
+ * present) tv_sec and a long tv_nsec where tv_sec is the number of 1-second
+ * intervals since 1st January 1970, 00:00:00 UTC and tv_nsec is the number of
+ * 1-nano-second intervals since the value of tv_sec.
+ *
+ * NTFS uses Microsoft's standard time format which is stored in a s64 and is
+ * measured as the number of 100-nano-second intervals since 1st January 1601,
+ * 00:00:00 UTC.
+ */
+static inline sle64 utc2ntfs(const struct timespec ts)
+{
+	/*
+	 * Convert the seconds to 100ns intervals, add the nano-seconds
+	 * converted to 100ns intervals, and then add the NTFS time offset.
+	 */
+	return cpu_to_sle64((s64)ts.tv_sec * 10000000 + ts.tv_nsec / 100 +
+			NTFS_TIME_OFFSET);
+}
+
+/**
+ * get_current_ntfs_time - get the current time in little endian NTFS format
+ *
+ * Get the current time from the Linux kernel, convert it to its corresponding
+ * NTFS time and return that in little endian format.
+ */
+static inline sle64 get_current_ntfs_time(void)
+{
+	return utc2ntfs(current_kernel_time());
+}
+
+/**
+ * ntfs2utc - convert NTFS time to Linux time
+ * @time:	NTFS time (little endian) to convert to Linux UTC
+ *
+ * Convert the little endian NTFS time @time to its corresponding Linux UTC
+ * time and return that in cpu format.
+ *
+ * Linux stores time in a struct timespec consisting of a time_t (long at
+ * present) tv_sec and a long tv_nsec where tv_sec is the number of 1-second
+ * intervals since 1st January 1970, 00:00:00 UTC and tv_nsec is the number of
+ * 1-nano-second intervals since the value of tv_sec.
+ *
+ * NTFS uses Microsoft's standard time format which is stored in a s64 and is
+ * measured as the number of 100 nano-second intervals since 1st January 1601,
+ * 00:00:00 UTC.
+ */
+static inline struct timespec ntfs2utc(const sle64 time)
+{
+	struct timespec ts;
+
+	/* Subtract the NTFS time offset. */
+	s64 t = sle64_to_cpu(time) - NTFS_TIME_OFFSET;
+	/*
+	 * Convert the time to 1-second intervals and the remainder to
+	 * 1-nano-second intervals.
+	 */
+	ts.tv_nsec = do_div(t, 10000000) * 100;
+	ts.tv_sec = t;
+	return ts;
+}
+
+#endif /* _LINUX_NTFS_TIME_H */
diff --git a/fs/ntfs/types.h b/fs/ntfs/types.h
new file mode 100644
index 0000000..08a55aa
--- /dev/null
+++ b/fs/ntfs/types.h
@@ -0,0 +1,66 @@
+/*
+ * types.h - Defines for NTFS Linux kernel driver specific types.
+ *	     Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_TYPES_H
+#define _LINUX_NTFS_TYPES_H
+
+#include <linux/types.h>
+
+typedef __le16 le16;
+typedef __le32 le32;
+typedef __le64 le64;
+typedef __u16 __bitwise sle16;
+typedef __u32 __bitwise sle32;
+typedef __u64 __bitwise sle64;
+
+/* 2-byte Unicode character type. */
+typedef le16 ntfschar;
+#define UCHAR_T_SIZE_BITS 1
+
+/*
+ * Clusters are signed 64-bit values on NTFS volumes. We define two types, LCN
+ * and VCN, to allow for type checking and better code readability.
+ */
+typedef s64 VCN;
+typedef sle64 leVCN;
+typedef s64 LCN;
+typedef sle64 leLCN;
+
+/*
+ * The NTFS journal $LogFile uses log sequence numbers which are signed 64-bit
+ * values.  We define our own type LSN, to allow for type checking and better
+ * code readability.
+ */
+typedef s64 LSN;
+typedef sle64 leLSN;
+
+typedef enum {
+	FALSE = 0,
+	TRUE = 1
+} BOOL;
+
+typedef enum {
+	CASE_SENSITIVE = 0,
+	IGNORE_CASE = 1,
+} IGNORE_CASE_BOOL;
+
+#endif /* _LINUX_NTFS_TYPES_H */
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
new file mode 100644
index 0000000..560b0ea
--- /dev/null
+++ b/fs/ntfs/unistr.c
@@ -0,0 +1,384 @@
+/*
+ * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "types.h"
+#include "debug.h"
+#include "ntfs.h"
+
+/*
+ * IMPORTANT
+ * =========
+ *
+ * All these routines assume that the Unicode characters are in little endian
+ * encoding inside the strings!!!
+ */
+
+/*
+ * This is used by the name collation functions to quickly determine what
+ * characters are (in)valid.
+ */
+static const u8 legal_ansi_char_array[0x40] = {
+	0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+	0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+
+	0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+	0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+
+	0x17, 0x07, 0x18, 0x17, 0x17, 0x17, 0x17, 0x17,
+	0x17, 0x17, 0x18, 0x16, 0x16, 0x17, 0x07, 0x00,
+
+	0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17,
+	0x17, 0x17, 0x04, 0x16, 0x18, 0x16, 0x18, 0x18,
+};
+
+/**
+ * ntfs_are_names_equal - compare two Unicode names for equality
+ * @s1:			name to compare to @s2
+ * @s1_len:		length in Unicode characters of @s1
+ * @s2:			name to compare to @s1
+ * @s2_len:		length in Unicode characters of @s2
+ * @ic:			ignore case bool
+ * @upcase:		upcase table (only if @ic == IGNORE_CASE)
+ * @upcase_size:	length in Unicode characters of @upcase (if present)
+ *
+ * Compare the names @s1 and @s2 and return TRUE (1) if the names are
+ * identical, or FALSE (0) if they are not identical. If @ic is IGNORE_CASE,
+ * the @upcase table is used to performa a case insensitive comparison.
+ */
+BOOL ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
+		const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_size)
+{
+	if (s1_len != s2_len)
+		return FALSE;
+	if (ic == CASE_SENSITIVE)
+		return !ntfs_ucsncmp(s1, s2, s1_len);
+	return !ntfs_ucsncasecmp(s1, s2, s1_len, upcase, upcase_size);
+}
+
+/**
+ * ntfs_collate_names - collate two Unicode names
+ * @name1:	first Unicode name to compare
+ * @name2:	second Unicode name to compare
+ * @err_val:	if @name1 contains an invalid character return this value
+ * @ic:		either CASE_SENSITIVE or IGNORE_CASE
+ * @upcase:	upcase table (ignored if @ic is CASE_SENSITIVE)
+ * @upcase_len:	upcase table size (ignored if @ic is CASE_SENSITIVE)
+ *
+ * ntfs_collate_names collates two Unicode names and returns:
+ *
+ *  -1 if the first name collates before the second one,
+ *   0 if the names match,
+ *   1 if the second name collates before the first one, or
+ * @err_val if an invalid character is found in @name1 during the comparison.
+ *
+ * The following characters are considered invalid: '"', '*', '<', '>' and '?'.
+ */
+int ntfs_collate_names(const ntfschar *name1, const u32 name1_len,
+		const ntfschar *name2, const u32 name2_len,
+		const int err_val, const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_len)
+{
+	u32 cnt, min_len;
+	u16 c1, c2;
+
+	min_len = name1_len;
+	if (name1_len > name2_len)
+		min_len = name2_len;
+	for (cnt = 0; cnt < min_len; ++cnt) {
+		c1 = le16_to_cpu(*name1++);
+		c2 = le16_to_cpu(*name2++);
+		if (ic) {
+			if (c1 < upcase_len)
+				c1 = le16_to_cpu(upcase[c1]);
+			if (c2 < upcase_len)
+				c2 = le16_to_cpu(upcase[c2]);
+		}
+		if (c1 < 64 && legal_ansi_char_array[c1] & 8)
+			return err_val;
+		if (c1 < c2)
+			return -1;
+		if (c1 > c2)
+			return 1;
+	}
+	if (name1_len < name2_len)
+		return -1;
+	if (name1_len == name2_len)
+		return 0;
+	/* name1_len > name2_len */
+	c1 = le16_to_cpu(*name1);
+	if (c1 < 64 && legal_ansi_char_array[c1] & 8)
+		return err_val;
+	return 1;
+}
+
+/**
+ * ntfs_ucsncmp - compare two little endian Unicode strings
+ * @s1:		first string
+ * @s2:		second string
+ * @n:		maximum unicode characters to compare
+ *
+ * Compare the first @n characters of the Unicode strings @s1 and @s2,
+ * The strings in little endian format and appropriate le16_to_cpu()
+ * conversion is performed on non-little endian machines.
+ *
+ * The function returns an integer less than, equal to, or greater than zero
+ * if @s1 (or the first @n Unicode characters thereof) is found, respectively,
+ * to be less than, to match, or be greater than @s2.
+ */
+int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n)
+{
+	u16 c1, c2;
+	size_t i;
+
+	for (i = 0; i < n; ++i) {
+		c1 = le16_to_cpu(s1[i]);
+		c2 = le16_to_cpu(s2[i]);
+		if (c1 < c2)
+			return -1;
+		if (c1 > c2)
+			return 1;
+		if (!c1)
+			break;
+	}
+	return 0;
+}
+
+/**
+ * ntfs_ucsncasecmp - compare two little endian Unicode strings, ignoring case
+ * @s1:			first string
+ * @s2:			second string
+ * @n:			maximum unicode characters to compare
+ * @upcase:		upcase table
+ * @upcase_size:	upcase table size in Unicode characters
+ *
+ * Compare the first @n characters of the Unicode strings @s1 and @s2,
+ * ignoring case. The strings in little endian format and appropriate
+ * le16_to_cpu() conversion is performed on non-little endian machines.
+ *
+ * Each character is uppercased using the @upcase table before the comparison.
+ *
+ * The function returns an integer less than, equal to, or greater than zero
+ * if @s1 (or the first @n Unicode characters thereof) is found, respectively,
+ * to be less than, to match, or be greater than @s2.
+ */
+int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n,
+		const ntfschar *upcase, const u32 upcase_size)
+{
+	size_t i;
+	u16 c1, c2;
+
+	for (i = 0; i < n; ++i) {
+		if ((c1 = le16_to_cpu(s1[i])) < upcase_size)
+			c1 = le16_to_cpu(upcase[c1]);
+		if ((c2 = le16_to_cpu(s2[i])) < upcase_size)
+			c2 = le16_to_cpu(upcase[c2]);
+		if (c1 < c2)
+			return -1;
+		if (c1 > c2)
+			return 1;
+		if (!c1)
+			break;
+	}
+	return 0;
+}
+
+void ntfs_upcase_name(ntfschar *name, u32 name_len, const ntfschar *upcase,
+		const u32 upcase_len)
+{
+	u32 i;
+	u16 u;
+
+	for (i = 0; i < name_len; i++)
+		if ((u = le16_to_cpu(name[i])) < upcase_len)
+			name[i] = upcase[u];
+}
+
+void ntfs_file_upcase_value(FILE_NAME_ATTR *file_name_attr,
+		const ntfschar *upcase, const u32 upcase_len)
+{
+	ntfs_upcase_name((ntfschar*)&file_name_attr->file_name,
+			file_name_attr->file_name_length, upcase, upcase_len);
+}
+
+int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1,
+		FILE_NAME_ATTR *file_name_attr2,
+		const int err_val, const IGNORE_CASE_BOOL ic,
+		const ntfschar *upcase, const u32 upcase_len)
+{
+	return ntfs_collate_names((ntfschar*)&file_name_attr1->file_name,
+			file_name_attr1->file_name_length,
+			(ntfschar*)&file_name_attr2->file_name,
+			file_name_attr2->file_name_length,
+			err_val, ic, upcase, upcase_len);
+}
+
+/**
+ * ntfs_nlstoucs - convert NLS string to little endian Unicode string
+ * @vol:	ntfs volume which we are working with
+ * @ins:	input NLS string buffer
+ * @ins_len:	length of input string in bytes
+ * @outs:	on return contains the allocated output Unicode string buffer
+ *
+ * Convert the input string @ins, which is in whatever format the loaded NLS
+ * map dictates, into a little endian, 2-byte Unicode string.
+ *
+ * This function allocates the string and the caller is responsible for
+ * calling kmem_cache_free(ntfs_name_cache, @outs); when finished with it.
+ *
+ * On success the function returns the number of Unicode characters written to
+ * the output string *@outs (>= 0), not counting the terminating Unicode NULL
+ * character. *@outs is set to the allocated output string buffer.
+ *
+ * On error, a negative number corresponding to the error code is returned. In
+ * that case the output string is not allocated. Both *@outs and *@outs_len
+ * are then undefined.
+ *
+ * This might look a bit odd due to fast path optimization...
+ */
+int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
+		const int ins_len, ntfschar **outs)
+{
+	struct nls_table *nls = vol->nls_map;
+	ntfschar *ucs;
+	wchar_t wc;
+	int i, o, wc_len;
+
+	/* We don't trust outside sources. */
+	if (ins) {
+		ucs = (ntfschar*)kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS);
+		if (ucs) {
+			for (i = o = 0; i < ins_len; i += wc_len) {
+				wc_len = nls->char2uni(ins + i, ins_len - i,
+						&wc);
+				if (wc_len >= 0) {
+					if (wc) {
+						ucs[o++] = cpu_to_le16(wc);
+						continue;
+					} /* else (!wc) */
+					break;
+				} /* else (wc_len < 0) */
+				goto conversion_err;
+			}
+			ucs[o] = 0;
+			*outs = ucs;
+			return o;
+		} /* else (!ucs) */
+		ntfs_error(vol->sb, "Failed to allocate name from "
+				"ntfs_name_cache!");
+		return -ENOMEM;
+	} /* else (!ins) */
+	ntfs_error(NULL, "Received NULL pointer.");
+	return -EINVAL;
+conversion_err:
+	ntfs_error(vol->sb, "Name using character set %s contains characters "
+			"that cannot be converted to Unicode.", nls->charset);
+	kmem_cache_free(ntfs_name_cache, ucs);
+	return -EILSEQ;
+}
+
+/**
+ * ntfs_ucstonls - convert little endian Unicode string to NLS string
+ * @vol:	ntfs volume which we are working with
+ * @ins:	input Unicode string buffer
+ * @ins_len:	length of input string in Unicode characters
+ * @outs:	on return contains the (allocated) output NLS string buffer
+ * @outs_len:	length of output string buffer in bytes
+ *
+ * Convert the input little endian, 2-byte Unicode string @ins, of length
+ * @ins_len into the string format dictated by the loaded NLS.
+ *
+ * If *@outs is NULL, this function allocates the string and the caller is
+ * responsible for calling kfree(*@outs); when finished with it. In this case
+ * @outs_len is ignored and can be 0.
+ *
+ * On success the function returns the number of bytes written to the output
+ * string *@outs (>= 0), not counting the terminating NULL byte. If the output
+ * string buffer was allocated, *@outs is set to it.
+ *
+ * On error, a negative number corresponding to the error code is returned. In
+ * that case the output string is not allocated. The contents of *@outs are
+ * then undefined.
+ *
+ * This might look a bit odd due to fast path optimization...
+ */
+int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
+		const int ins_len, unsigned char **outs, int outs_len)
+{
+	struct nls_table *nls = vol->nls_map;
+	unsigned char *ns;
+	int i, o, ns_len, wc;
+
+	/* We don't trust outside sources. */
+	if (ins) {
+		ns = *outs;
+		ns_len = outs_len;
+		if (ns && !ns_len) {
+			wc = -ENAMETOOLONG;
+			goto conversion_err;
+		}
+		if (!ns) {
+			ns_len = ins_len * NLS_MAX_CHARSET_SIZE;
+			ns = (unsigned char*)kmalloc(ns_len + 1, GFP_NOFS);
+			if (!ns)
+				goto mem_err_out;
+		}
+		for (i = o = 0; i < ins_len; i++) {
+retry:			wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o,
+					ns_len - o);
+			if (wc > 0) {
+				o += wc;
+				continue;
+			} else if (!wc)
+				break;
+			else if (wc == -ENAMETOOLONG && ns != *outs) {
+				unsigned char *tc;
+				/* Grow in multiples of 64 bytes. */
+				tc = (unsigned char*)kmalloc((ns_len + 64) &
+						~63, GFP_NOFS);
+				if (tc) {
+					memcpy(tc, ns, ns_len);
+					ns_len = ((ns_len + 64) & ~63) - 1;
+					kfree(ns);
+					ns = tc;
+					goto retry;
+				} /* No memory so goto conversion_error; */
+			} /* wc < 0, real error. */
+			goto conversion_err;
+		}
+		ns[o] = 0;
+		*outs = ns;
+		return o;
+	} /* else (!ins) */
+	ntfs_error(vol->sb, "Received NULL pointer.");
+	return -EINVAL;
+conversion_err:
+	ntfs_error(vol->sb, "Unicode name contains characters that cannot be "
+			"converted to character set %s.", nls->charset);
+	if (ns != *outs)
+		kfree(ns);
+	if (wc != -ENAMETOOLONG)
+		wc = -EILSEQ;
+	return wc;
+mem_err_out:
+	ntfs_error(vol->sb, "Failed to allocate name!");
+	return -ENOMEM;
+}
diff --git a/fs/ntfs/upcase.c b/fs/ntfs/upcase.c
new file mode 100644
index 0000000..879cdf1
--- /dev/null
+++ b/fs/ntfs/upcase.c
@@ -0,0 +1,90 @@
+/*
+ * upcase.c - Generate the full NTFS Unicode upcase table in little endian.
+ *	      Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001 Richard Russon <ntfs@flatcap.org>
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ *
+ * Modified for mkntfs inclusion 9 June 2001 by Anton Altaparmakov.
+ * Modified for kernel inclusion 10 September 2001 by Anton Altparmakov.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS source
+ * in the file COPYING); if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "malloc.h"
+#include "ntfs.h"
+
+ntfschar *generate_default_upcase(void)
+{
+	static const int uc_run_table[][3] = { /* Start, End, Add */
+	{0x0061, 0x007B,  -32}, {0x0451, 0x045D, -80}, {0x1F70, 0x1F72,  74},
+	{0x00E0, 0x00F7,  -32}, {0x045E, 0x0460, -80}, {0x1F72, 0x1F76,  86},
+	{0x00F8, 0x00FF,  -32}, {0x0561, 0x0587, -48}, {0x1F76, 0x1F78, 100},
+	{0x0256, 0x0258, -205}, {0x1F00, 0x1F08,   8}, {0x1F78, 0x1F7A, 128},
+	{0x028A, 0x028C, -217}, {0x1F10, 0x1F16,   8}, {0x1F7A, 0x1F7C, 112},
+	{0x03AC, 0x03AD,  -38}, {0x1F20, 0x1F28,   8}, {0x1F7C, 0x1F7E, 126},
+	{0x03AD, 0x03B0,  -37}, {0x1F30, 0x1F38,   8}, {0x1FB0, 0x1FB2,   8},
+	{0x03B1, 0x03C2,  -32}, {0x1F40, 0x1F46,   8}, {0x1FD0, 0x1FD2,   8},
+	{0x03C2, 0x03C3,  -31}, {0x1F51, 0x1F52,   8}, {0x1FE0, 0x1FE2,   8},
+	{0x03C3, 0x03CC,  -32}, {0x1F53, 0x1F54,   8}, {0x1FE5, 0x1FE6,   7},
+	{0x03CC, 0x03CD,  -64}, {0x1F55, 0x1F56,   8}, {0x2170, 0x2180, -16},
+	{0x03CD, 0x03CF,  -63}, {0x1F57, 0x1F58,   8}, {0x24D0, 0x24EA, -26},
+	{0x0430, 0x0450,  -32}, {0x1F60, 0x1F68,   8}, {0xFF41, 0xFF5B, -32},
+	{0}
+	};
+
+	static const int uc_dup_table[][2] = { /* Start, End */
+	{0x0100, 0x012F}, {0x01A0, 0x01A6}, {0x03E2, 0x03EF}, {0x04CB, 0x04CC},
+	{0x0132, 0x0137}, {0x01B3, 0x01B7}, {0x0460, 0x0481}, {0x04D0, 0x04EB},
+	{0x0139, 0x0149}, {0x01CD, 0x01DD}, {0x0490, 0x04BF}, {0x04EE, 0x04F5},
+	{0x014A, 0x0178}, {0x01DE, 0x01EF}, {0x04BF, 0x04BF}, {0x04F8, 0x04F9},
+	{0x0179, 0x017E}, {0x01F4, 0x01F5}, {0x04C1, 0x04C4}, {0x1E00, 0x1E95},
+	{0x018B, 0x018B}, {0x01FA, 0x0218}, {0x04C7, 0x04C8}, {0x1EA0, 0x1EF9},
+	{0}
+	};
+
+	static const int uc_word_table[][2] = { /* Offset, Value */
+	{0x00FF, 0x0178}, {0x01AD, 0x01AC}, {0x01F3, 0x01F1}, {0x0269, 0x0196},
+	{0x0183, 0x0182}, {0x01B0, 0x01AF}, {0x0253, 0x0181}, {0x026F, 0x019C},
+	{0x0185, 0x0184}, {0x01B9, 0x01B8}, {0x0254, 0x0186}, {0x0272, 0x019D},
+	{0x0188, 0x0187}, {0x01BD, 0x01BC}, {0x0259, 0x018F}, {0x0275, 0x019F},
+	{0x018C, 0x018B}, {0x01C6, 0x01C4}, {0x025B, 0x0190}, {0x0283, 0x01A9},
+	{0x0192, 0x0191}, {0x01C9, 0x01C7}, {0x0260, 0x0193}, {0x0288, 0x01AE},
+	{0x0199, 0x0198}, {0x01CC, 0x01CA}, {0x0263, 0x0194}, {0x0292, 0x01B7},
+	{0x01A8, 0x01A7}, {0x01DD, 0x018E}, {0x0268, 0x0197},
+	{0}
+	};
+
+	int i, r;
+	ntfschar *uc;
+
+	uc = ntfs_malloc_nofs(default_upcase_len * sizeof(ntfschar));
+	if (!uc)
+		return uc;
+	memset(uc, 0, default_upcase_len * sizeof(ntfschar));
+	for (i = 0; i < default_upcase_len; i++)
+		uc[i] = cpu_to_le16(i);
+	for (r = 0; uc_run_table[r][0]; r++)
+		for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++)
+			uc[i] = cpu_to_le16((le16_to_cpu(uc[i]) +
+					uc_run_table[r][2]));
+	for (r = 0; uc_dup_table[r][0]; r++)
+		for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2)
+			uc[i + 1] = cpu_to_le16(le16_to_cpu(uc[i + 1]) - 1);
+	for (r = 0; uc_word_table[r][0]; r++)
+		uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]);
+	return uc;
+}
diff --git a/fs/ntfs/volume.h b/fs/ntfs/volume.h
new file mode 100644
index 0000000..4b97fa8
--- /dev/null
+++ b/fs/ntfs/volume.h
@@ -0,0 +1,171 @@
+/*
+ * volume.h - Defines for volume structures in NTFS Linux kernel driver. Part
+ *	      of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _LINUX_NTFS_VOLUME_H
+#define _LINUX_NTFS_VOLUME_H
+
+#include <linux/rwsem.h>
+
+#include "types.h"
+#include "layout.h"
+
+/*
+ * The NTFS in memory super block structure.
+ */
+typedef struct {
+	/*
+	 * FIXME: Reorder to have commonly used together element within the
+	 * same cache line, aiming at a cache line size of 32 bytes. Aim for
+	 * 64 bytes for less commonly used together elements. Put most commonly
+	 * used elements to front of structure. Obviously do this only when the
+	 * structure has stabilized... (AIA)
+	 */
+	/* Device specifics. */
+	struct super_block *sb;		/* Pointer back to the super_block,
+					   so we don't have to get the offset
+					   every time. */
+	LCN nr_blocks;			/* Number of NTFS_BLOCK_SIZE bytes
+					   sized blocks on the device. */
+	/* Configuration provided by user at mount time. */
+	unsigned long flags;		/* Miscellaneous flags, see below. */
+	uid_t uid;			/* uid that files will be mounted as. */
+	gid_t gid;			/* gid that files will be mounted as. */
+	mode_t fmask;			/* The mask for file permissions. */
+	mode_t dmask;			/* The mask for directory
+					   permissions. */
+	u8 mft_zone_multiplier;		/* Initial mft zone multiplier. */
+	u8 on_errors;			/* What to do on file system errors. */
+	/* NTFS bootsector provided information. */
+	u16 sector_size;		/* in bytes */
+	u8 sector_size_bits;		/* log2(sector_size) */
+	u32 cluster_size;		/* in bytes */
+	u32 cluster_size_mask;		/* cluster_size - 1 */
+	u8 cluster_size_bits;		/* log2(cluster_size) */
+	u32 mft_record_size;		/* in bytes */
+	u32 mft_record_size_mask;	/* mft_record_size - 1 */
+	u8 mft_record_size_bits;	/* log2(mft_record_size) */
+	u32 index_record_size;		/* in bytes */
+	u32 index_record_size_mask;	/* index_record_size - 1 */
+	u8 index_record_size_bits;	/* log2(index_record_size) */
+	LCN nr_clusters;		/* Volume size in clusters == number of
+					   bits in lcn bitmap. */
+	LCN mft_lcn;			/* Cluster location of mft data. */
+	LCN mftmirr_lcn;		/* Cluster location of copy of mft. */
+	u64 serial_no;			/* The volume serial number. */
+	/* Mount specific NTFS information. */
+	u32 upcase_len;			/* Number of entries in upcase[]. */
+	ntfschar *upcase;		/* The upcase table. */
+
+	s32 attrdef_size;		/* Size of the attribute definition
+					   table in bytes. */
+	ATTR_DEF *attrdef;		/* Table of attribute definitions.
+					   Obtained from FILE_AttrDef. */
+
+#ifdef NTFS_RW
+	/* Variables used by the cluster and mft allocators. */
+	s64 mft_data_pos;		/* Mft record number at which to
+					   allocate the next mft record. */
+	LCN mft_zone_start;		/* First cluster of the mft zone. */
+	LCN mft_zone_end;		/* First cluster beyond the mft zone. */
+	LCN mft_zone_pos;		/* Current position in the mft zone. */
+	LCN data1_zone_pos;		/* Current position in the first data
+					   zone. */
+	LCN data2_zone_pos;		/* Current position in the second data
+					   zone. */
+#endif /* NTFS_RW */
+
+	struct inode *mft_ino;		/* The VFS inode of $MFT. */
+
+	struct inode *mftbmp_ino;	/* Attribute inode for $MFT/$BITMAP. */
+	struct rw_semaphore mftbmp_lock; /* Lock for serializing accesses to the
+					    mft record bitmap ($MFT/$BITMAP). */
+#ifdef NTFS_RW
+	struct inode *mftmirr_ino;	/* The VFS inode of $MFTMirr. */
+	int mftmirr_size;		/* Size of mft mirror in mft records. */
+
+	struct inode *logfile_ino;	/* The VFS inode of $LogFile. */
+#endif /* NTFS_RW */
+
+	struct inode *lcnbmp_ino;	/* The VFS inode of $Bitmap. */
+	struct rw_semaphore lcnbmp_lock; /* Lock for serializing accesses to the
+					    cluster bitmap ($Bitmap/$DATA). */
+
+	struct inode *vol_ino;		/* The VFS inode of $Volume. */
+	VOLUME_FLAGS vol_flags;		/* Volume flags. */
+	u8 major_ver;			/* Ntfs major version of volume. */
+	u8 minor_ver;			/* Ntfs minor version of volume. */
+
+	struct inode *root_ino;		/* The VFS inode of the root
+					   directory. */
+	struct inode *secure_ino;	/* The VFS inode of $Secure (NTFS3.0+
+					   only, otherwise NULL). */
+	struct inode *extend_ino;	/* The VFS inode of $Extend (NTFS3.0+
+					   only, otherwise NULL). */
+#ifdef NTFS_RW
+	/* $Quota stuff is NTFS3.0+ specific.  Unused/NULL otherwise. */
+	struct inode *quota_ino;	/* The VFS inode of $Quota. */
+	struct inode *quota_q_ino;	/* Attribute inode for $Quota/$Q. */
+#endif /* NTFS_RW */
+	struct nls_table *nls_map;
+} ntfs_volume;
+
+/*
+ * Defined bits for the flags field in the ntfs_volume structure.
+ */
+typedef enum {
+	NV_Errors,		/* 1: Volume has errors, prevent remount rw. */
+	NV_ShowSystemFiles,	/* 1: Return system files in ntfs_readdir(). */
+	NV_CaseSensitive,	/* 1: Treat file names as case sensitive and
+				      create filenames in the POSIX namespace.
+				      Otherwise be case insensitive and create
+				      file names in WIN32 namespace. */
+	NV_LogFileEmpty,	/* 1: $LogFile journal is empty. */
+	NV_QuotaOutOfDate,	/* 1: $Quota is out of date. */
+} ntfs_volume_flags;
+
+/*
+ * Macro tricks to expand the NVolFoo(), NVolSetFoo(), and NVolClearFoo()
+ * functions.
+ */
+#define NVOL_FNS(flag)					\
+static inline int NVol##flag(ntfs_volume *vol)		\
+{							\
+	return test_bit(NV_##flag, &(vol)->flags);	\
+}							\
+static inline void NVolSet##flag(ntfs_volume *vol)	\
+{							\
+	set_bit(NV_##flag, &(vol)->flags);		\
+}							\
+static inline void NVolClear##flag(ntfs_volume *vol)	\
+{							\
+	clear_bit(NV_##flag, &(vol)->flags);		\
+}
+
+/* Emit the ntfs volume bitops functions. */
+NVOL_FNS(Errors)
+NVOL_FNS(ShowSystemFiles)
+NVOL_FNS(CaseSensitive)
+NVOL_FNS(LogFileEmpty)
+NVOL_FNS(QuotaOutOfDate)
+
+#endif /* _LINUX_NTFS_VOLUME_H */
diff --git a/fs/open.c b/fs/open.c
new file mode 100644
index 0000000..963bd81
--- /dev/null
+++ b/fs/open.c
@@ -0,0 +1,1076 @@
+/*
+ *  linux/fs/open.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/utime.h>
+#include <linux/file.h>
+#include <linux/smp_lock.h>
+#include <linux/quotaops.h>
+#include <linux/dnotify.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/namei.h>
+#include <linux/backing-dev.h>
+#include <linux/security.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+
+#include <asm/unistd.h>
+
+int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	int retval = -ENODEV;
+
+	if (sb) {
+		retval = -ENOSYS;
+		if (sb->s_op->statfs) {
+			memset(buf, 0, sizeof(*buf));
+			retval = security_sb_statfs(sb);
+			if (retval)
+				return retval;
+			retval = sb->s_op->statfs(sb, buf);
+			if (retval == 0 && buf->f_frsize == 0)
+				buf->f_frsize = buf->f_bsize;
+		}
+	}
+	return retval;
+}
+
+EXPORT_SYMBOL(vfs_statfs);
+
+static int vfs_statfs_native(struct super_block *sb, struct statfs *buf)
+{
+	struct kstatfs st;
+	int retval;
+
+	retval = vfs_statfs(sb, &st);
+	if (retval)
+		return retval;
+
+	if (sizeof(*buf) == sizeof(st))
+		memcpy(buf, &st, sizeof(st));
+	else {
+		if (sizeof buf->f_blocks == 4) {
+			if ((st.f_blocks | st.f_bfree | st.f_bavail) &
+			    0xffffffff00000000ULL)
+				return -EOVERFLOW;
+			/*
+			 * f_files and f_ffree may be -1; it's okay to stuff
+			 * that into 32 bits
+			 */
+			if (st.f_files != -1 &&
+			    (st.f_files & 0xffffffff00000000ULL))
+				return -EOVERFLOW;
+			if (st.f_ffree != -1 &&
+			    (st.f_ffree & 0xffffffff00000000ULL))
+				return -EOVERFLOW;
+		}
+
+		buf->f_type = st.f_type;
+		buf->f_bsize = st.f_bsize;
+		buf->f_blocks = st.f_blocks;
+		buf->f_bfree = st.f_bfree;
+		buf->f_bavail = st.f_bavail;
+		buf->f_files = st.f_files;
+		buf->f_ffree = st.f_ffree;
+		buf->f_fsid = st.f_fsid;
+		buf->f_namelen = st.f_namelen;
+		buf->f_frsize = st.f_frsize;
+		memset(buf->f_spare, 0, sizeof(buf->f_spare));
+	}
+	return 0;
+}
+
+static int vfs_statfs64(struct super_block *sb, struct statfs64 *buf)
+{
+	struct kstatfs st;
+	int retval;
+
+	retval = vfs_statfs(sb, &st);
+	if (retval)
+		return retval;
+
+	if (sizeof(*buf) == sizeof(st))
+		memcpy(buf, &st, sizeof(st));
+	else {
+		buf->f_type = st.f_type;
+		buf->f_bsize = st.f_bsize;
+		buf->f_blocks = st.f_blocks;
+		buf->f_bfree = st.f_bfree;
+		buf->f_bavail = st.f_bavail;
+		buf->f_files = st.f_files;
+		buf->f_ffree = st.f_ffree;
+		buf->f_fsid = st.f_fsid;
+		buf->f_namelen = st.f_namelen;
+		buf->f_frsize = st.f_frsize;
+		memset(buf->f_spare, 0, sizeof(buf->f_spare));
+	}
+	return 0;
+}
+
+asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(path, &nd);
+	if (!error) {
+		struct statfs tmp;
+		error = vfs_statfs_native(nd.dentry->d_inode->i_sb, &tmp);
+		if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+			error = -EFAULT;
+		path_release(&nd);
+	}
+	return error;
+}
+
+
+asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64 __user *buf)
+{
+	struct nameidata nd;
+	long error;
+
+	if (sz != sizeof(*buf))
+		return -EINVAL;
+	error = user_path_walk(path, &nd);
+	if (!error) {
+		struct statfs64 tmp;
+		error = vfs_statfs64(nd.dentry->d_inode->i_sb, &tmp);
+		if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+			error = -EFAULT;
+		path_release(&nd);
+	}
+	return error;
+}
+
+
+asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf)
+{
+	struct file * file;
+	struct statfs tmp;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+	error = vfs_statfs_native(file->f_dentry->d_inode->i_sb, &tmp);
+	if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+		error = -EFAULT;
+	fput(file);
+out:
+	return error;
+}
+
+asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user *buf)
+{
+	struct file * file;
+	struct statfs64 tmp;
+	int error;
+
+	if (sz != sizeof(*buf))
+		return -EINVAL;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+	error = vfs_statfs64(file->f_dentry->d_inode->i_sb, &tmp);
+	if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+		error = -EFAULT;
+	fput(file);
+out:
+	return error;
+}
+
+int do_truncate(struct dentry *dentry, loff_t length)
+{
+	int err;
+	struct iattr newattrs;
+
+	/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
+	if (length < 0)
+		return -EINVAL;
+
+	newattrs.ia_size = length;
+	newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+
+	down(&dentry->d_inode->i_sem);
+	err = notify_change(dentry, &newattrs);
+	up(&dentry->d_inode->i_sem);
+	return err;
+}
+
+static inline long do_sys_truncate(const char __user * path, loff_t length)
+{
+	struct nameidata nd;
+	struct inode * inode;
+	int error;
+
+	error = -EINVAL;
+	if (length < 0)	/* sorry, but loff_t says... */
+		goto out;
+
+	error = user_path_walk(path, &nd);
+	if (error)
+		goto out;
+	inode = nd.dentry->d_inode;
+
+	/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
+	error = -EISDIR;
+	if (S_ISDIR(inode->i_mode))
+		goto dput_and_out;
+
+	error = -EINVAL;
+	if (!S_ISREG(inode->i_mode))
+		goto dput_and_out;
+
+	error = permission(inode,MAY_WRITE,&nd);
+	if (error)
+		goto dput_and_out;
+
+	error = -EROFS;
+	if (IS_RDONLY(inode))
+		goto dput_and_out;
+
+	error = -EPERM;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		goto dput_and_out;
+
+	/*
+	 * Make sure that there are no leases.
+	 */
+	error = break_lease(inode, FMODE_WRITE);
+	if (error)
+		goto dput_and_out;
+
+	error = get_write_access(inode);
+	if (error)
+		goto dput_and_out;
+
+	error = locks_verify_truncate(inode, NULL, length);
+	if (!error) {
+		DQUOT_INIT(inode);
+		error = do_truncate(nd.dentry, length);
+	}
+	put_write_access(inode);
+
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+asmlinkage long sys_truncate(const char __user * path, unsigned long length)
+{
+	/* on 32-bit boxen it will cut the range 2^31--2^32-1 off */
+	return do_sys_truncate(path, (long)length);
+}
+
+static inline long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+{
+	struct inode * inode;
+	struct dentry *dentry;
+	struct file * file;
+	int error;
+
+	error = -EINVAL;
+	if (length < 0)
+		goto out;
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	/* explicitly opened as large or we are on 64-bit box */
+	if (file->f_flags & O_LARGEFILE)
+		small = 0;
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	error = -EINVAL;
+	if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE))
+		goto out_putf;
+
+	error = -EINVAL;
+	/* Cannot ftruncate over 2^31 bytes without large file support */
+	if (small && length > MAX_NON_LFS)
+		goto out_putf;
+
+	error = -EPERM;
+	if (IS_APPEND(inode))
+		goto out_putf;
+
+	error = locks_verify_truncate(inode, file, length);
+	if (!error)
+		error = do_truncate(dentry, length);
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
+{
+	return do_sys_ftruncate(fd, length, 1);
+}
+
+/* LFS versions of truncate are only needed on 32 bit machines */
+#if BITS_PER_LONG == 32
+asmlinkage long sys_truncate64(const char __user * path, loff_t length)
+{
+	return do_sys_truncate(path, length);
+}
+
+asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
+{
+	return do_sys_ftruncate(fd, length, 0);
+}
+#endif
+
+#ifdef __ARCH_WANT_SYS_UTIME
+
+/*
+ * sys_utime() can be implemented in user-level using sys_utimes().
+ * Is this for backwards compatibility?  If so, why not move it
+ * into the appropriate arch directory (for those architectures that
+ * need it).
+ */
+
+/* If times==NULL, set access and modification to current time,
+ * must be owner or have write permission.
+ * Else, update from *times, must be owner or super user.
+ */
+asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
+{
+	int error;
+	struct nameidata nd;
+	struct inode * inode;
+	struct iattr newattrs;
+
+	error = user_path_walk(filename, &nd);
+	if (error)
+		goto out;
+	inode = nd.dentry->d_inode;
+
+	error = -EROFS;
+	if (IS_RDONLY(inode))
+		goto dput_and_out;
+
+	/* Don't worry, the checks are done in inode_change_ok() */
+	newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
+	if (times) {
+		error = -EPERM;
+		if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+			goto dput_and_out;
+
+		error = get_user(newattrs.ia_atime.tv_sec, &times->actime);
+		newattrs.ia_atime.tv_nsec = 0;
+		if (!error) 
+			error = get_user(newattrs.ia_mtime.tv_sec, &times->modtime);
+		newattrs.ia_mtime.tv_nsec = 0;
+		if (error)
+			goto dput_and_out;
+
+		newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET;
+	} else {
+                error = -EACCES;
+                if (IS_IMMUTABLE(inode))
+                        goto dput_and_out;
+
+		if (current->fsuid != inode->i_uid &&
+		    (error = permission(inode,MAY_WRITE,&nd)) != 0)
+			goto dput_and_out;
+	}
+	down(&inode->i_sem);
+	error = notify_change(nd.dentry, &newattrs);
+	up(&inode->i_sem);
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+#endif
+
+/* If times==NULL, set access and modification to current time,
+ * must be owner or have write permission.
+ * Else, update from *times, must be owner or super user.
+ */
+long do_utimes(char __user * filename, struct timeval * times)
+{
+	int error;
+	struct nameidata nd;
+	struct inode * inode;
+	struct iattr newattrs;
+
+	error = user_path_walk(filename, &nd);
+
+	if (error)
+		goto out;
+	inode = nd.dentry->d_inode;
+
+	error = -EROFS;
+	if (IS_RDONLY(inode))
+		goto dput_and_out;
+
+	/* Don't worry, the checks are done in inode_change_ok() */
+	newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
+	if (times) {
+		error = -EPERM;
+                if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+                        goto dput_and_out;
+
+		newattrs.ia_atime.tv_sec = times[0].tv_sec;
+		newattrs.ia_atime.tv_nsec = times[0].tv_usec * 1000;
+		newattrs.ia_mtime.tv_sec = times[1].tv_sec;
+		newattrs.ia_mtime.tv_nsec = times[1].tv_usec * 1000;
+		newattrs.ia_valid |= ATTR_ATIME_SET | ATTR_MTIME_SET;
+	} else {
+		error = -EACCES;
+                if (IS_IMMUTABLE(inode))
+                        goto dput_and_out;
+
+		if (current->fsuid != inode->i_uid &&
+		    (error = permission(inode,MAY_WRITE,&nd)) != 0)
+			goto dput_and_out;
+	}
+	down(&inode->i_sem);
+	error = notify_change(nd.dentry, &newattrs);
+	up(&inode->i_sem);
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+asmlinkage long sys_utimes(char __user * filename, struct timeval __user * utimes)
+{
+	struct timeval times[2];
+
+	if (utimes && copy_from_user(&times, utimes, sizeof(times)))
+		return -EFAULT;
+	return do_utimes(filename, utimes ? times : NULL);
+}
+
+
+/*
+ * access() needs to use the real uid/gid, not the effective uid/gid.
+ * We do this by temporarily clearing all FS-related capabilities and
+ * switching the fsuid/fsgid around to the real ones.
+ */
+asmlinkage long sys_access(const char __user * filename, int mode)
+{
+	struct nameidata nd;
+	int old_fsuid, old_fsgid;
+	kernel_cap_t old_cap;
+	int res;
+
+	if (mode & ~S_IRWXO)	/* where's F_OK, X_OK, W_OK, R_OK? */
+		return -EINVAL;
+
+	old_fsuid = current->fsuid;
+	old_fsgid = current->fsgid;
+	old_cap = current->cap_effective;
+
+	current->fsuid = current->uid;
+	current->fsgid = current->gid;
+
+	/*
+	 * Clear the capabilities if we switch to a non-root user
+	 *
+	 * FIXME: There is a race here against sys_capset.  The
+	 * capabilities can change yet we will restore the old
+	 * value below.  We should hold task_capabilities_lock,
+	 * but we cannot because user_path_walk can sleep.
+	 */
+	if (current->uid)
+		cap_clear(current->cap_effective);
+	else
+		current->cap_effective = current->cap_permitted;
+
+	res = __user_walk(filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd);
+	if (!res) {
+		res = permission(nd.dentry->d_inode, mode, &nd);
+		/* SuS v2 requires we report a read only fs too */
+		if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
+		   && !special_file(nd.dentry->d_inode->i_mode))
+			res = -EROFS;
+		path_release(&nd);
+	}
+
+	current->fsuid = old_fsuid;
+	current->fsgid = old_fsgid;
+	current->cap_effective = old_cap;
+
+	return res;
+}
+
+asmlinkage long sys_chdir(const char __user * filename)
+{
+	struct nameidata nd;
+	int error;
+
+	error = __user_walk(filename, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &nd);
+	if (error)
+		goto out;
+
+	error = permission(nd.dentry->d_inode,MAY_EXEC,&nd);
+	if (error)
+		goto dput_and_out;
+
+	set_fs_pwd(current->fs, nd.mnt, nd.dentry);
+
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+asmlinkage long sys_fchdir(unsigned int fd)
+{
+	struct file *file;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct vfsmount *mnt;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	dentry = file->f_dentry;
+	mnt = file->f_vfsmnt;
+	inode = dentry->d_inode;
+
+	error = -ENOTDIR;
+	if (!S_ISDIR(inode->i_mode))
+		goto out_putf;
+
+	error = permission(inode, MAY_EXEC, NULL);
+	if (!error)
+		set_fs_pwd(current->fs, mnt, dentry);
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+asmlinkage long sys_chroot(const char __user * filename)
+{
+	struct nameidata nd;
+	int error;
+
+	error = __user_walk(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY | LOOKUP_NOALT, &nd);
+	if (error)
+		goto out;
+
+	error = permission(nd.dentry->d_inode,MAY_EXEC,&nd);
+	if (error)
+		goto dput_and_out;
+
+	error = -EPERM;
+	if (!capable(CAP_SYS_CHROOT))
+		goto dput_and_out;
+
+	set_fs_root(current->fs, nd.mnt, nd.dentry);
+	set_fs_altroot();
+	error = 0;
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
+{
+	struct inode * inode;
+	struct dentry * dentry;
+	struct file * file;
+	int err = -EBADF;
+	struct iattr newattrs;
+
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+
+	err = -EROFS;
+	if (IS_RDONLY(inode))
+		goto out_putf;
+	err = -EPERM;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		goto out_putf;
+	down(&inode->i_sem);
+	if (mode == (mode_t) -1)
+		mode = inode->i_mode;
+	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+	err = notify_change(dentry, &newattrs);
+	up(&inode->i_sem);
+
+out_putf:
+	fput(file);
+out:
+	return err;
+}
+
+asmlinkage long sys_chmod(const char __user * filename, mode_t mode)
+{
+	struct nameidata nd;
+	struct inode * inode;
+	int error;
+	struct iattr newattrs;
+
+	error = user_path_walk(filename, &nd);
+	if (error)
+		goto out;
+	inode = nd.dentry->d_inode;
+
+	error = -EROFS;
+	if (IS_RDONLY(inode))
+		goto dput_and_out;
+
+	error = -EPERM;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		goto dput_and_out;
+
+	down(&inode->i_sem);
+	if (mode == (mode_t) -1)
+		mode = inode->i_mode;
+	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+	error = notify_change(nd.dentry, &newattrs);
+	up(&inode->i_sem);
+
+dput_and_out:
+	path_release(&nd);
+out:
+	return error;
+}
+
+static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
+{
+	struct inode * inode;
+	int error;
+	struct iattr newattrs;
+
+	error = -ENOENT;
+	if (!(inode = dentry->d_inode)) {
+		printk(KERN_ERR "chown_common: NULL inode\n");
+		goto out;
+	}
+	error = -EROFS;
+	if (IS_RDONLY(inode))
+		goto out;
+	error = -EPERM;
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		goto out;
+	newattrs.ia_valid =  ATTR_CTIME;
+	if (user != (uid_t) -1) {
+		newattrs.ia_valid |= ATTR_UID;
+		newattrs.ia_uid = user;
+	}
+	if (group != (gid_t) -1) {
+		newattrs.ia_valid |= ATTR_GID;
+		newattrs.ia_gid = group;
+	}
+	if (!S_ISDIR(inode->i_mode))
+		newattrs.ia_valid |= ATTR_KILL_SUID|ATTR_KILL_SGID;
+	down(&inode->i_sem);
+	error = notify_change(dentry, &newattrs);
+	up(&inode->i_sem);
+out:
+	return error;
+}
+
+asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(filename, &nd);
+	if (!error) {
+		error = chown_common(nd.dentry, user, group);
+		path_release(&nd);
+	}
+	return error;
+}
+
+asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk_link(filename, &nd);
+	if (!error) {
+		error = chown_common(nd.dentry, user, group);
+		path_release(&nd);
+	}
+	return error;
+}
+
+
+asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
+{
+	struct file * file;
+	int error = -EBADF;
+
+	file = fget(fd);
+	if (file) {
+		error = chown_common(file->f_dentry, user, group);
+		fput(file);
+	}
+	return error;
+}
+
+/*
+ * Note that while the flag value (low two bits) for sys_open means:
+ *	00 - read-only
+ *	01 - write-only
+ *	10 - read-write
+ *	11 - special
+ * it is changed into
+ *	00 - no permissions needed
+ *	01 - read-permission
+ *	10 - write-permission
+ *	11 - read-write
+ * for the internal routines (ie open_namei()/follow_link() etc). 00 is
+ * used by symlinks.
+ */
+struct file *filp_open(const char * filename, int flags, int mode)
+{
+	int namei_flags, error;
+	struct nameidata nd;
+
+	namei_flags = flags;
+	if ((namei_flags+1) & O_ACCMODE)
+		namei_flags++;
+	if (namei_flags & O_TRUNC)
+		namei_flags |= 2;
+
+	error = open_namei(filename, namei_flags, mode, &nd);
+	if (!error)
+		return dentry_open(nd.dentry, nd.mnt, flags);
+
+	return ERR_PTR(error);
+}
+
+EXPORT_SYMBOL(filp_open);
+
+struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
+{
+	struct file * f;
+	struct inode *inode;
+	int error;
+
+	error = -ENFILE;
+	f = get_empty_filp();
+	if (!f)
+		goto cleanup_dentry;
+	f->f_flags = flags;
+	f->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+	inode = dentry->d_inode;
+	if (f->f_mode & FMODE_WRITE) {
+		error = get_write_access(inode);
+		if (error)
+			goto cleanup_file;
+	}
+
+	f->f_mapping = inode->i_mapping;
+	f->f_dentry = dentry;
+	f->f_vfsmnt = mnt;
+	f->f_pos = 0;
+	f->f_op = fops_get(inode->i_fop);
+	file_move(f, &inode->i_sb->s_files);
+
+	if (f->f_op && f->f_op->open) {
+		error = f->f_op->open(inode,f);
+		if (error)
+			goto cleanup_all;
+	}
+	f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
+
+	file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
+
+	/* NB: we're sure to have correct a_ops only after f_op->open */
+	if (f->f_flags & O_DIRECT) {
+		if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO) {
+			fput(f);
+			f = ERR_PTR(-EINVAL);
+		}
+	}
+
+	return f;
+
+cleanup_all:
+	fops_put(f->f_op);
+	if (f->f_mode & FMODE_WRITE)
+		put_write_access(inode);
+	file_kill(f);
+	f->f_dentry = NULL;
+	f->f_vfsmnt = NULL;
+cleanup_file:
+	put_filp(f);
+cleanup_dentry:
+	dput(dentry);
+	mntput(mnt);
+	return ERR_PTR(error);
+}
+
+EXPORT_SYMBOL(dentry_open);
+
+/*
+ * Find an empty file descriptor entry, and mark it busy.
+ */
+int get_unused_fd(void)
+{
+	struct files_struct * files = current->files;
+	int fd, error;
+
+  	error = -EMFILE;
+	spin_lock(&files->file_lock);
+
+repeat:
+ 	fd = find_next_zero_bit(files->open_fds->fds_bits, 
+				files->max_fdset, 
+				files->next_fd);
+
+	/*
+	 * N.B. For clone tasks sharing a files structure, this test
+	 * will limit the total number of files that can be opened.
+	 */
+	if (fd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+		goto out;
+
+	/* Do we need to expand the fd array or fd set?  */
+	error = expand_files(files, fd);
+	if (error < 0)
+		goto out;
+
+	if (error) {
+		/*
+	 	 * If we needed to expand the fs array we
+		 * might have blocked - try again.
+		 */
+		error = -EMFILE;
+		goto repeat;
+	}
+
+	FD_SET(fd, files->open_fds);
+	FD_CLR(fd, files->close_on_exec);
+	files->next_fd = fd + 1;
+#if 1
+	/* Sanity check */
+	if (files->fd[fd] != NULL) {
+		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+		files->fd[fd] = NULL;
+	}
+#endif
+	error = fd;
+
+out:
+	spin_unlock(&files->file_lock);
+	return error;
+}
+
+EXPORT_SYMBOL(get_unused_fd);
+
+static inline void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+	__FD_CLR(fd, files->open_fds);
+	if (fd < files->next_fd)
+		files->next_fd = fd;
+}
+
+void fastcall put_unused_fd(unsigned int fd)
+{
+	struct files_struct *files = current->files;
+	spin_lock(&files->file_lock);
+	__put_unused_fd(files, fd);
+	spin_unlock(&files->file_lock);
+}
+
+EXPORT_SYMBOL(put_unused_fd);
+
+/*
+ * Install a file pointer in the fd array.  
+ *
+ * The VFS is full of places where we drop the files lock between
+ * setting the open_fds bitmap and installing the file in the file
+ * array.  At any such point, we are vulnerable to a dup2() race
+ * installing a file in the array before us.  We need to detect this and
+ * fput() the struct file we are about to overwrite in this case.
+ *
+ * It should never happen - if we allow dup2() do it, _really_ bad things
+ * will follow.
+ */
+
+void fastcall fd_install(unsigned int fd, struct file * file)
+{
+	struct files_struct *files = current->files;
+	spin_lock(&files->file_lock);
+	if (unlikely(files->fd[fd] != NULL))
+		BUG();
+	files->fd[fd] = file;
+	spin_unlock(&files->file_lock);
+}
+
+EXPORT_SYMBOL(fd_install);
+
+asmlinkage long sys_open(const char __user * filename, int flags, int mode)
+{
+	char * tmp;
+	int fd, error;
+
+#if BITS_PER_LONG != 32
+	flags |= O_LARGEFILE;
+#endif
+	tmp = getname(filename);
+	fd = PTR_ERR(tmp);
+	if (!IS_ERR(tmp)) {
+		fd = get_unused_fd();
+		if (fd >= 0) {
+			struct file *f = filp_open(tmp, flags, mode);
+			error = PTR_ERR(f);
+			if (IS_ERR(f))
+				goto out_error;
+			fd_install(fd, f);
+		}
+out:
+		putname(tmp);
+	}
+	return fd;
+
+out_error:
+	put_unused_fd(fd);
+	fd = error;
+	goto out;
+}
+EXPORT_SYMBOL_GPL(sys_open);
+
+#ifndef __alpha__
+
+/*
+ * For backward compatibility?  Maybe this should be moved
+ * into arch/i386 instead?
+ */
+asmlinkage long sys_creat(const char __user * pathname, int mode)
+{
+	return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
+}
+
+#endif
+
+/*
+ * "id" is the POSIX thread ID. We use the
+ * files pointer for this..
+ */
+int filp_close(struct file *filp, fl_owner_t id)
+{
+	int retval;
+
+	/* Report and clear outstanding errors */
+	retval = filp->f_error;
+	if (retval)
+		filp->f_error = 0;
+
+	if (!file_count(filp)) {
+		printk(KERN_ERR "VFS: Close: file count is 0\n");
+		return retval;
+	}
+
+	if (filp->f_op && filp->f_op->flush) {
+		int err = filp->f_op->flush(filp);
+		if (!retval)
+			retval = err;
+	}
+
+	dnotify_flush(filp, id);
+	locks_remove_posix(filp, id);
+	fput(filp);
+	return retval;
+}
+
+EXPORT_SYMBOL(filp_close);
+
+/*
+ * Careful here! We test whether the file pointer is NULL before
+ * releasing the fd. This ensures that one clone task can't release
+ * an fd while another clone is opening it.
+ */
+asmlinkage long sys_close(unsigned int fd)
+{
+	struct file * filp;
+	struct files_struct *files = current->files;
+
+	spin_lock(&files->file_lock);
+	if (fd >= files->max_fds)
+		goto out_unlock;
+	filp = files->fd[fd];
+	if (!filp)
+		goto out_unlock;
+	files->fd[fd] = NULL;
+	FD_CLR(fd, files->close_on_exec);
+	__put_unused_fd(files, fd);
+	spin_unlock(&files->file_lock);
+	return filp_close(filp, files);
+
+out_unlock:
+	spin_unlock(&files->file_lock);
+	return -EBADF;
+}
+
+EXPORT_SYMBOL(sys_close);
+
+/*
+ * This routine simulates a hangup on the tty, to arrange that users
+ * are given clean terminals at login time.
+ */
+asmlinkage long sys_vhangup(void)
+{
+	if (capable(CAP_SYS_TTY_CONFIG)) {
+		tty_vhangup(current->signal->tty);
+		return 0;
+	}
+	return -EPERM;
+}
+
+/*
+ * Called when an inode is about to be open.
+ * We use this to disallow opening large files on 32bit systems if
+ * the caller didn't specify O_LARGEFILE.  On 64bit systems we force
+ * on this flag in sys_open.
+ */
+int generic_file_open(struct inode * inode, struct file * filp)
+{
+	if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
+		return -EFBIG;
+	return 0;
+}
+
+EXPORT_SYMBOL(generic_file_open);
+
+/*
+ * This is used by subsystems that don't want seekable
+ * file descriptors
+ */
+int nonseekable_open(struct inode *inode, struct file *filp)
+{
+	filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+EXPORT_SYMBOL(nonseekable_open);
diff --git a/fs/openpromfs/Makefile b/fs/openpromfs/Makefile
new file mode 100644
index 0000000..4563199
--- /dev/null
+++ b/fs/openpromfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux Sun Openprom filesystem routines.
+#
+
+obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs.o
+
+openpromfs-objs := inode.o
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
new file mode 100644
index 0000000..1be11ce9
--- /dev/null
+++ b/fs/openpromfs/inode.c
@@ -0,0 +1,1098 @@
+/* $Id: inode.c,v 1.15 2001/11/12 09:43:39 davem Exp $
+ * openpromfs.c: /proc/openprom handling routines
+ *
+ * Copyright (C) 1996-1999 Jakub Jelinek  (jakub@redhat.com)
+ * Copyright (C) 1998      Eddie C. Dost  (ecd@skynet.be)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/openprom_fs.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+
+#define ALIASES_NNODES 64
+
+typedef struct {
+	u16	parent;
+	u16	next;
+	u16	child;
+	u16	first_prop;
+	u32	node;
+} openpromfs_node;
+
+typedef struct {
+#define OPP_STRING	0x10
+#define OPP_STRINGLIST	0x20
+#define OPP_BINARY	0x40
+#define OPP_HEXSTRING	0x80
+#define OPP_DIRTY	0x01
+#define OPP_QUOTED	0x02
+#define OPP_NOTQUOTED	0x04
+#define OPP_ASCIIZ	0x08
+	u32	flag;
+	u32	alloclen;
+	u32	len;
+	char	*value;
+	char	name[8];
+} openprom_property;
+
+static openpromfs_node *nodes;
+static int alloced;
+static u16 last_node;
+static u16 first_prop;
+static u16 options = 0xffff;
+static u16 aliases = 0xffff;
+static int aliases_nodes;
+static char *alias_names [ALIASES_NNODES];
+
+#define OPENPROM_ROOT_INO	16
+#define OPENPROM_FIRST_INO	OPENPROM_ROOT_INO
+#define NODE(ino) nodes[ino - OPENPROM_FIRST_INO]
+#define NODE2INO(node) (node + OPENPROM_FIRST_INO)
+#define NODEP2INO(no) (no + OPENPROM_FIRST_INO + last_node)
+
+static int openpromfs_create (struct inode *, struct dentry *, int, struct nameidata *);
+static int openpromfs_readdir(struct file *, void *, filldir_t);
+static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
+static int openpromfs_unlink (struct inode *, struct dentry *dentry);
+
+static ssize_t nodenum_read(struct file *file, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	char buffer[10];
+	
+	if (count < 0 || !inode->u.generic_ip)
+		return -EINVAL;
+	sprintf (buffer, "%8.8x\n", (u32)(long)(inode->u.generic_ip));
+	if (file->f_pos >= 9)
+		return 0;
+	if (count > 9 - file->f_pos)
+		count = 9 - file->f_pos;
+	if (copy_to_user(buf, buffer + file->f_pos, count))
+		return -EFAULT;
+	*ppos += count;
+	return count;
+}
+
+static ssize_t property_read(struct file *filp, char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int i, j, k;
+	u32 node;
+	char *p, *s;
+	u32 *q;
+	openprom_property *op;
+	char buffer[64];
+	
+	if (!filp->private_data) {
+		node = nodes[(u16)((long)inode->u.generic_ip)].node;
+		i = ((u32)(long)inode->u.generic_ip) >> 16;
+		if ((u16)((long)inode->u.generic_ip) == aliases) {
+			if (i >= aliases_nodes)
+				p = NULL;
+			else
+				p = alias_names [i];
+		} else
+			for (p = prom_firstprop (node, buffer);
+			     i && p && *p;
+			     p = prom_nextprop (node, p, buffer), i--)
+				/* nothing */ ;
+		if (!p || !*p)
+			return -EIO;
+		i = prom_getproplen (node, p);
+		if (i < 0) {
+			if ((u16)((long)inode->u.generic_ip) == aliases)
+				i = 0;
+			else
+				return -EIO;
+		}
+		k = i;
+		if (i < 64) i = 64;
+		filp->private_data = kmalloc (sizeof (openprom_property)
+					      + (j = strlen (p)) + 2 * i,
+					      GFP_KERNEL);
+		if (!filp->private_data)
+			return -ENOMEM;
+		op = (openprom_property *)filp->private_data;
+		op->flag = 0;
+		op->alloclen = 2 * i;
+		strcpy (op->name, p);
+		op->value = (char *)(((unsigned long)(op->name + j + 4)) & ~3);
+		op->len = k;
+		if (k && prom_getproperty (node, p, op->value, i) < 0)
+			return -EIO;
+		op->value [k] = 0;
+		if (k) {
+			for (s = NULL, p = op->value; p < op->value + k; p++) {
+				if ((*p >= ' ' && *p <= '~') || *p == '\n') {
+					op->flag |= OPP_STRING;
+					s = p;
+					continue;
+				}
+				if (p > op->value && !*p && s == p - 1) {
+					if (p < op->value + k - 1)
+						op->flag |= OPP_STRINGLIST;
+					else
+						op->flag |= OPP_ASCIIZ;
+					continue;
+				}
+				if (k == 1 && !*p) {
+					op->flag |= (OPP_STRING|OPP_ASCIIZ);
+					break;
+				}
+				op->flag &= ~(OPP_STRING|OPP_STRINGLIST);
+				if (k & 3)
+					op->flag |= OPP_HEXSTRING;
+				else
+					op->flag |= OPP_BINARY;
+				break;
+			}
+			if (op->flag & OPP_STRINGLIST)
+				op->flag &= ~(OPP_STRING);
+			if (op->flag & OPP_ASCIIZ)
+				op->len--;
+		}
+	} else
+		op = (openprom_property *)filp->private_data;
+	if (!count || !(op->len || (op->flag & OPP_ASCIIZ)))
+		return 0;
+	if (*ppos >= 0xffffff || count >= 0xffffff)
+		return -EINVAL;
+	if (op->flag & OPP_STRINGLIST) {
+		for (k = 0, p = op->value; p < op->value + op->len; p++)
+			if (!*p)
+				k++;
+		i = op->len + 4 * k + 3;
+	} else if (op->flag & OPP_STRING) {
+		i = op->len + 3;
+	} else if (op->flag & OPP_BINARY) {
+		i = (op->len * 9) >> 2;
+	} else {
+		i = (op->len << 1) + 1;
+	}
+	k = *ppos;
+	if (k >= i) return 0;
+	if (count > i - k) count = i - k;
+	if (op->flag & OPP_STRING) {
+		if (!k) {
+			if (put_user('\'', buf))
+				return -EFAULT;
+			k++;
+			count--;
+		}
+
+		if (k + count >= i - 2)
+			j = i - 2 - k;
+		else
+			j = count;
+
+		if (j >= 0) {
+			if (copy_to_user(buf + k - *ppos,
+					 op->value + k - 1, j))
+				return -EFAULT;
+			count -= j;
+			k += j;
+		}
+
+		if (count) {
+			if (put_user('\'', &buf [k++ - *ppos]))
+				return -EFAULT;
+		}
+		if (count > 1) {
+			if (put_user('\n', &buf [k++ - *ppos]))
+				return -EFAULT;
+		}
+	} else if (op->flag & OPP_STRINGLIST) {
+		char *tmp;
+
+		tmp = kmalloc (i, GFP_KERNEL);
+		if (!tmp)
+			return -ENOMEM;
+
+		s = tmp;
+		*s++ = '\'';
+		for (p = op->value; p < op->value + op->len; p++) {
+			if (!*p) {
+				strcpy(s, "' + '");
+				s += 5;
+				continue;
+			}
+			*s++ = *p;
+		}
+		strcpy(s, "'\n");
+
+		if (copy_to_user(buf, tmp + k, count))
+			return -EFAULT;
+
+		kfree(tmp);
+		k += count;
+
+	} else if (op->flag & OPP_BINARY) {
+		char buffer[10];
+		u32 *first, *last;
+		int first_off, last_cnt;
+
+		first = ((u32 *)op->value) + k / 9;
+		first_off = k % 9;
+		last = ((u32 *)op->value) + (k + count - 1) / 9;
+		last_cnt = (k + count) % 9;
+		if (!last_cnt) last_cnt = 9;
+
+		if (first == last) {
+			sprintf (buffer, "%08x.", *first);
+			if (copy_to_user(buf, buffer + first_off,
+					 last_cnt - first_off))
+				return -EFAULT;
+			buf += last_cnt - first_off;
+		} else {		
+			for (q = first; q <= last; q++) {
+				sprintf (buffer, "%08x.", *q);
+				if (q == first) {
+					if (copy_to_user(buf, buffer + first_off,
+							 9 - first_off))
+						return -EFAULT;
+					buf += 9 - first_off;
+				} else if (q == last) {
+					if (copy_to_user(buf, buffer, last_cnt))
+						return -EFAULT;
+					buf += last_cnt;
+				} else {
+					if (copy_to_user(buf, buffer, 9))
+						return -EFAULT;
+					buf += 9;
+				}
+			}
+		}
+
+		if (last == (u32 *)(op->value + op->len - 4) && last_cnt == 9) {
+			if (put_user('\n', (buf - 1)))
+				return -EFAULT;
+		}
+
+		k += count;
+
+	} else if (op->flag & OPP_HEXSTRING) {
+		char buffer[3];
+
+		if ((k < i - 1) && (k & 1)) {
+			sprintf (buffer, "%02x",
+				 (unsigned char) *(op->value + (k >> 1)) & 0xff);
+			if (put_user(buffer[1], &buf[k++ - *ppos]))
+				return -EFAULT;
+			count--;
+		}
+
+		for (; (count > 1) && (k < i - 1); k += 2) {
+			sprintf (buffer, "%02x",
+				 (unsigned char) *(op->value + (k >> 1)) & 0xff);
+			if (copy_to_user(buf + k - *ppos, buffer, 2))
+				return -EFAULT;
+			count -= 2;
+		}
+
+		if (count && (k < i - 1)) {
+			sprintf (buffer, "%02x",
+				 (unsigned char) *(op->value + (k >> 1)) & 0xff);
+			if (put_user(buffer[0], &buf[k++ - *ppos]))
+				return -EFAULT;
+			count--;
+		}
+
+		if (count) {
+			if (put_user('\n', &buf [k++ - *ppos]))
+				return -EFAULT;
+		}
+	}
+	count = k - *ppos;
+	*ppos = k;
+	return count;
+}
+
+static ssize_t property_write(struct file *filp, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int i, j, k;
+	char *p;
+	u32 *q;
+	void *b;
+	openprom_property *op;
+	
+	if (*ppos >= 0xffffff || count >= 0xffffff)
+		return -EINVAL;
+	if (!filp->private_data) {
+		i = property_read (filp, NULL, 0, NULL);
+		if (i)
+			return i;
+	}
+	k = *ppos;
+	op = (openprom_property *)filp->private_data;
+	if (!(op->flag & OPP_STRING)) {
+		u32 *first, *last;
+		int first_off, last_cnt;
+		u32 mask, mask2;
+		char tmp [9];
+		int forcelen = 0;
+		
+		j = k % 9;
+		for (i = 0; i < count; i++, j++) {
+			if (j == 9) j = 0;
+			if (!j) {
+				char ctmp;
+				if (get_user(ctmp, &buf[i]))
+					return -EFAULT;
+				if (ctmp != '.') {
+					if (ctmp != '\n') {
+						if (op->flag & OPP_BINARY)
+							return -EINVAL;
+						else
+							goto write_try_string;
+					} else {
+						count = i + 1;
+						forcelen = 1;
+						break;
+					}
+				}
+			} else {
+				char ctmp;
+				if (get_user(ctmp, &buf[i]))
+					return -EFAULT;
+				if (ctmp < '0' || 
+				    (ctmp > '9' && ctmp < 'A') ||
+				    (ctmp > 'F' && ctmp < 'a') ||
+				    ctmp > 'f') {
+					if (op->flag & OPP_BINARY)
+						return -EINVAL;
+					else
+						goto write_try_string;
+				}
+			}
+		}
+		op->flag |= OPP_BINARY;
+		tmp [8] = 0;
+		i = ((count + k + 8) / 9) << 2;
+		if (op->alloclen <= i) {
+			b = kmalloc (sizeof (openprom_property) + 2 * i,
+				     GFP_KERNEL);
+			if (!b)
+				return -ENOMEM;
+			memcpy (b, filp->private_data,
+				sizeof (openprom_property)
+				+ strlen (op->name) + op->alloclen);
+			memset (((char *)b) + sizeof (openprom_property)
+				+ strlen (op->name) + op->alloclen, 
+				0, 2 * i - op->alloclen);
+			op = (openprom_property *)b;
+			op->alloclen = 2*i;
+			b = filp->private_data;
+			filp->private_data = (void *)op;
+			kfree (b);
+		}
+		first = ((u32 *)op->value) + (k / 9);
+		first_off = k % 9;
+		last = (u32 *)(op->value + i);
+		last_cnt = (k + count) % 9;
+		if (first + 1 == last) {
+			memset (tmp, '0', 8);
+			if (copy_from_user(tmp + first_off, buf,
+					   (count + first_off > 8) ?
+					   8 - first_off : count))
+				return -EFAULT;
+			mask = 0xffffffff;
+			mask2 = 0xffffffff;
+			for (j = 0; j < first_off; j++)
+				mask >>= 1;
+			for (j = 8 - count - first_off; j > 0; j--)
+				mask2 <<= 1;
+			mask &= mask2;
+			if (mask) {
+				*first &= ~mask;
+				*first |= simple_strtoul (tmp, NULL, 16);
+				op->flag |= OPP_DIRTY;
+			}
+		} else {
+			op->flag |= OPP_DIRTY;
+			for (q = first; q < last; q++) {
+				if (q == first) {
+					if (first_off < 8) {
+						memset (tmp, '0', 8);
+						if (copy_from_user(tmp + first_off,
+								   buf,
+								   8 - first_off))
+							return -EFAULT;
+						mask = 0xffffffff;
+						for (j = 0; j < first_off; j++)
+							mask >>= 1;
+						*q &= ~mask;
+						*q |= simple_strtoul (tmp,NULL,16);
+					}
+					buf += 9;
+				} else if ((q == last - 1) && last_cnt
+					   && (last_cnt < 8)) {
+					memset (tmp, '0', 8);
+					if (copy_from_user(tmp, buf, last_cnt))
+						return -EFAULT;
+					mask = 0xffffffff;
+					for (j = 0; j < 8 - last_cnt; j++)
+						mask <<= 1;
+					*q &= ~mask;
+					*q |= simple_strtoul (tmp, NULL, 16);
+					buf += last_cnt;
+				} else {
+					char tchars[17]; /* XXX yuck... */
+
+					if (copy_from_user(tchars, buf, 16))
+						return -EFAULT;
+					*q = simple_strtoul (tchars, NULL, 16);
+					buf += 9;
+				}
+			}
+		}
+		if (!forcelen) {
+			if (op->len < i)
+				op->len = i;
+		} else
+			op->len = i;
+		*ppos += count;
+	}
+write_try_string:
+	if (!(op->flag & OPP_BINARY)) {
+		if (!(op->flag & (OPP_QUOTED | OPP_NOTQUOTED))) {
+			char ctmp;
+
+			/* No way, if somebody starts writing from the middle, 
+			 * we don't know whether he uses quotes around or not 
+			 */
+			if (k > 0)
+				return -EINVAL;
+			if (get_user(ctmp, buf))
+				return -EFAULT;
+			if (ctmp == '\'') {
+				op->flag |= OPP_QUOTED;
+				buf++;
+				count--;
+				(*ppos)++;
+				if (!count) {
+					op->flag |= OPP_STRING;
+					return 1;
+				}
+			} else
+				op->flag |= OPP_NOTQUOTED;
+		}
+		op->flag |= OPP_STRING;
+		if (op->alloclen <= count + *ppos) {
+			b = kmalloc (sizeof (openprom_property)
+				     + 2 * (count + *ppos), GFP_KERNEL);
+			if (!b)
+				return -ENOMEM;
+			memcpy (b, filp->private_data,
+				sizeof (openprom_property)
+				+ strlen (op->name) + op->alloclen);
+			memset (((char *)b) + sizeof (openprom_property)
+				+ strlen (op->name) + op->alloclen, 
+				0, 2*(count - *ppos) - op->alloclen);
+			op = (openprom_property *)b;
+			op->alloclen = 2*(count + *ppos);
+			b = filp->private_data;
+			filp->private_data = (void *)op;
+			kfree (b);
+		}
+		p = op->value + *ppos - ((op->flag & OPP_QUOTED) ? 1 : 0);
+		if (copy_from_user(p, buf, count))
+			return -EFAULT;
+		op->flag |= OPP_DIRTY;
+		for (i = 0; i < count; i++, p++)
+			if (*p == '\n') {
+				*p = 0;
+				break;
+			}
+		if (i < count) {
+			op->len = p - op->value;
+			*ppos += i + 1;
+			if ((p > op->value) && (op->flag & OPP_QUOTED)
+			    && (*(p - 1) == '\''))
+				op->len--;
+		} else {
+			if (p - op->value > op->len)
+				op->len = p - op->value;
+			*ppos += count;
+		}
+	}
+	return *ppos - k;
+}
+
+int property_release (struct inode *inode, struct file *filp)
+{
+	openprom_property *op = (openprom_property *)filp->private_data;
+	int error;
+	u32 node;
+	
+	if (!op)
+		return 0;
+	lock_kernel();
+	node = nodes[(u16)((long)inode->u.generic_ip)].node;
+	if ((u16)((long)inode->u.generic_ip) == aliases) {
+		if ((op->flag & OPP_DIRTY) && (op->flag & OPP_STRING)) {
+			char *p = op->name;
+			int i = (op->value - op->name) - strlen (op->name) - 1;
+			op->value [op->len] = 0;
+			*(op->value - 1) = ' ';
+			if (i) {
+				for (p = op->value - i - 2; p >= op->name; p--)
+					p[i] = *p;
+				p = op->name + i;
+			}
+			memcpy (p - 8, "nvalias ", 8);
+			prom_feval (p - 8);
+		}
+	} else if (op->flag & OPP_DIRTY) {
+		if (op->flag & OPP_STRING) {
+			op->value [op->len] = 0;
+			error = prom_setprop (node, op->name,
+					      op->value, op->len + 1);
+			if (error <= 0)
+				printk (KERN_WARNING "openpromfs: "
+					"Couldn't write property %s\n",
+					op->name);
+		} else if ((op->flag & OPP_BINARY) || !op->len) {
+			error = prom_setprop (node, op->name,
+					      op->value, op->len);
+			if (error <= 0)
+				printk (KERN_WARNING "openpromfs: "
+					"Couldn't write property %s\n",
+					op->name);
+		} else {
+			printk (KERN_WARNING "openpromfs: "
+				"Unknown property type of %s\n",
+				op->name);
+		}
+	}
+	unlock_kernel();
+	kfree (filp->private_data);
+	return 0;
+}
+
+static struct file_operations openpromfs_prop_ops = {
+	.read		= property_read,
+	.write		= property_write,
+	.release	= property_release,
+};
+
+static struct file_operations openpromfs_nodenum_ops = {
+	.read		= nodenum_read,
+};
+
+static struct file_operations openprom_operations = {
+	.read		= generic_read_dir,
+	.readdir	= openpromfs_readdir,
+};
+
+static struct inode_operations openprom_alias_inode_operations = {
+	.create		= openpromfs_create,
+	.lookup		= openpromfs_lookup,
+	.unlink		= openpromfs_unlink,
+};
+
+static struct inode_operations openprom_inode_operations = {
+	.lookup		= openpromfs_lookup,
+};
+
+static int lookup_children(u16 n, const char * name, int len)
+{
+	int ret;
+	u16 node;
+	for (; n != 0xffff; n = nodes[n].next) {
+		node = nodes[n].child;
+		if (node != 0xffff) {
+			char buffer[128];
+			int i;
+			char *p;
+			
+			while (node != 0xffff) {
+				if (prom_getname (nodes[node].node,
+						  buffer, 128) >= 0) {
+					i = strlen (buffer);
+					if ((len == i)
+					    && !strncmp (buffer, name, len))
+						return NODE2INO(node);
+					p = strchr (buffer, '@');
+					if (p && (len == p - buffer)
+					    && !strncmp (buffer, name, len))
+						return NODE2INO(node);
+				}
+				node = nodes[node].next;
+			}
+		} else
+			continue;
+		ret = lookup_children (nodes[n].child, name, len);
+		if (ret) return ret;
+	}
+	return 0;
+}
+
+static struct dentry *openpromfs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	int ino = 0;
+#define OPFSL_DIR	0
+#define OPFSL_PROPERTY	1
+#define OPFSL_NODENUM	2
+	int type = 0;
+	char buffer[128];
+	char *p;
+	const char *name;
+	u32 n;
+	u16 dirnode;
+	unsigned int len;
+	int i;
+	struct inode *inode;
+	char buffer2[64];
+	
+	inode = NULL;
+	name = dentry->d_name.name;
+	len = dentry->d_name.len;
+	lock_kernel();
+	if (name [0] == '.' && len == 5 && !strncmp (name + 1, "node", 4)) {
+		ino = NODEP2INO(NODE(dir->i_ino).first_prop);
+		type = OPFSL_NODENUM;
+	}
+	if (!ino) {
+		u16 node = NODE(dir->i_ino).child;
+		while (node != 0xffff) {
+			if (prom_getname (nodes[node].node, buffer, 128) >= 0) {
+				i = strlen (buffer);
+				if (len == i && !strncmp (buffer, name, len)) {
+					ino = NODE2INO(node);
+					type = OPFSL_DIR;
+					break;
+				}
+				p = strchr (buffer, '@');
+				if (p && (len == p - buffer)
+				    && !strncmp (buffer, name, len)) {
+					ino = NODE2INO(node);
+					type = OPFSL_DIR;
+					break;
+				}
+			}
+			node = nodes[node].next;
+		}
+	}
+	n = NODE(dir->i_ino).node;
+	dirnode = dir->i_ino - OPENPROM_FIRST_INO;
+	if (!ino) {
+		int j = NODEP2INO(NODE(dir->i_ino).first_prop);
+		if (dirnode != aliases) {
+			for (p = prom_firstprop (n, buffer2);
+			     p && *p;
+			     p = prom_nextprop (n, p, buffer2)) {
+				j++;
+				if ((len == strlen (p))
+				    && !strncmp (p, name, len)) {
+					ino = j;
+					type = OPFSL_PROPERTY;
+					break;
+				}
+			}
+		} else {
+			int k;
+			for (k = 0; k < aliases_nodes; k++) {
+				j++;
+				if (alias_names [k]
+				    && (len == strlen (alias_names [k]))
+				    && !strncmp (alias_names [k], name, len)) {
+					ino = j;
+					type = OPFSL_PROPERTY;
+					break;
+				}
+			}
+		}
+	}
+	if (!ino) {
+		ino = lookup_children (NODE(dir->i_ino).child, name, len);
+		if (ino)
+			type = OPFSL_DIR;
+		else {
+			unlock_kernel();
+			return ERR_PTR(-ENOENT);
+		}
+	}
+	inode = iget (dir->i_sb, ino);
+	unlock_kernel();
+	if (!inode)
+		return ERR_PTR(-EINVAL);
+	switch (type) {
+	case OPFSL_DIR:
+		inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+		if (ino == OPENPROM_FIRST_INO + aliases) {
+			inode->i_mode |= S_IWUSR;
+			inode->i_op = &openprom_alias_inode_operations;
+		} else
+			inode->i_op = &openprom_inode_operations;
+		inode->i_fop = &openprom_operations;
+		inode->i_nlink = 2;
+		break;
+	case OPFSL_NODENUM:
+		inode->i_mode = S_IFREG | S_IRUGO;
+		inode->i_fop = &openpromfs_nodenum_ops;
+		inode->i_nlink = 1;
+		inode->u.generic_ip = (void *)(long)(n);
+		break;
+	case OPFSL_PROPERTY:
+		if ((dirnode == options) && (len == 17)
+		    && !strncmp (name, "security-password", 17))
+			inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
+		else {
+			inode->i_mode = S_IFREG | S_IRUGO;
+			if (dirnode == options || dirnode == aliases) {
+				if (len != 4 || strncmp (name, "name", 4))
+					inode->i_mode |= S_IWUSR;
+			}
+		}
+		inode->i_fop = &openpromfs_prop_ops;
+		inode->i_nlink = 1;
+		if (inode->i_size < 0)
+			inode->i_size = 0;
+		inode->u.generic_ip = (void *)(long)(((u16)dirnode) | 
+			(((u16)(ino - NODEP2INO(NODE(dir->i_ino).first_prop) - 1)) << 16));
+		break;
+	}
+
+	inode->i_gid = 0;
+	inode->i_uid = 0;
+
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	unsigned int ino;
+	u32 n;
+	int i, j;
+	char buffer[128];
+	u16 node;
+	char *p;
+	char buffer2[64];
+
+	lock_kernel();
+	
+	ino = inode->i_ino;
+	i = filp->f_pos;
+	switch (i) {
+	case 0:
+		if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out;
+		i++;
+		filp->f_pos++;
+		/* fall thru */
+	case 1:
+		if (filldir(dirent, "..", 2, i, 
+			(NODE(ino).parent == 0xffff) ? 
+			OPENPROM_ROOT_INO : NODE2INO(NODE(ino).parent), DT_DIR) < 0) 
+			goto out;
+		i++;
+		filp->f_pos++;
+		/* fall thru */
+	default:
+		i -= 2;
+		node = NODE(ino).child;
+		while (i && node != 0xffff) {
+			node = nodes[node].next;
+			i--;
+		}
+		while (node != 0xffff) {
+			if (prom_getname (nodes[node].node, buffer, 128) < 0)
+				goto out;
+			if (filldir(dirent, buffer, strlen(buffer),
+				    filp->f_pos, NODE2INO(node), DT_DIR) < 0)
+				goto out;
+			filp->f_pos++;
+			node = nodes[node].next;
+		}
+		j = NODEP2INO(NODE(ino).first_prop);
+		if (!i) {
+			if (filldir(dirent, ".node", 5, filp->f_pos, j, DT_REG) < 0)
+				goto out;
+			filp->f_pos++;
+		} else
+			i--;
+		n = NODE(ino).node;
+		if (ino == OPENPROM_FIRST_INO + aliases) {
+			for (j++; i < aliases_nodes; i++, j++) {
+				if (alias_names [i]) {
+					if (filldir (dirent, alias_names [i], 
+						strlen (alias_names [i]), 
+						filp->f_pos, j, DT_REG) < 0) goto out; 
+					filp->f_pos++;
+				}
+			}
+		} else {
+			for (p = prom_firstprop (n, buffer2);
+			     p && *p;
+			     p = prom_nextprop (n, p, buffer2)) {
+				j++;
+				if (i) i--;
+				else {
+					if (filldir(dirent, p, strlen(p),
+						    filp->f_pos, j, DT_REG) < 0)
+						goto out;
+					filp->f_pos++;
+				}
+			}
+		}
+	}
+out:
+	unlock_kernel();
+	return 0;
+}
+
+static int openpromfs_create (struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	char *p;
+	struct inode *inode;
+	
+	if (!dir)
+		return -ENOENT;
+	if (dentry->d_name.len > 256)
+		return -EINVAL;
+	p = kmalloc (dentry->d_name.len + 1, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+	strncpy (p, dentry->d_name.name, dentry->d_name.len);
+	p [dentry->d_name.len] = 0;
+	lock_kernel();
+	if (aliases_nodes == ALIASES_NNODES) {
+		kfree(p);
+		unlock_kernel();
+		return -EIO;
+	}
+	alias_names [aliases_nodes++] = p;
+	inode = iget (dir->i_sb,
+			NODEP2INO(NODE(dir->i_ino).first_prop) + aliases_nodes);
+	if (!inode) {
+		unlock_kernel();
+		return -EINVAL;
+	}
+	inode->i_mode = S_IFREG | S_IRUGO | S_IWUSR;
+	inode->i_fop = &openpromfs_prop_ops;
+	inode->i_nlink = 1;
+	if (inode->i_size < 0) inode->i_size = 0;
+	inode->u.generic_ip = (void *)(long)(((u16)aliases) | 
+			(((u16)(aliases_nodes - 1)) << 16));
+	unlock_kernel();
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static int openpromfs_unlink (struct inode *dir, struct dentry *dentry)
+{
+	unsigned int len;
+	char *p;
+	const char *name;
+	int i;
+	
+	name = dentry->d_name.name;
+	len = dentry->d_name.len;
+	lock_kernel();
+	for (i = 0; i < aliases_nodes; i++)
+		if ((strlen (alias_names [i]) == len)
+		    && !strncmp (name, alias_names[i], len)) {
+			char buffer[512];
+			
+			p = alias_names [i];
+			alias_names [i] = NULL;
+			kfree (p);
+			strcpy (buffer, "nvunalias ");
+			memcpy (buffer + 10, name, len);
+			buffer [10 + len] = 0;
+			prom_feval (buffer);
+		}
+	unlock_kernel();
+	return 0;
+}
+
+/* {{{ init section */
+static int __init check_space (u16 n)
+{
+	unsigned long pages;
+
+	if ((1 << alloced) * PAGE_SIZE < (n + 2) * sizeof(openpromfs_node)) {
+		pages = __get_free_pages (GFP_KERNEL, alloced + 1);
+		if (!pages)
+			return -1;
+
+		if (nodes) {
+			memcpy ((char *)pages, (char *)nodes,
+				(1 << alloced) * PAGE_SIZE);
+			free_pages ((unsigned long)nodes, alloced);
+		}
+		alloced++;
+		nodes = (openpromfs_node *)pages;
+	}
+	return 0;
+}
+
+static u16 __init get_nodes (u16 parent, u32 node)
+{
+	char *p;
+	u16 n = last_node++, i;
+	char buffer[64];
+
+	if (check_space (n) < 0)
+		return 0xffff;
+	nodes[n].parent = parent;
+	nodes[n].node = node;
+	nodes[n].next = 0xffff;
+	nodes[n].child = 0xffff;
+	nodes[n].first_prop = first_prop++;
+	if (!parent) {
+		char buffer[8];
+		int j;
+		
+		if ((j = prom_getproperty (node, "name", buffer, 8)) >= 0) {
+		    buffer[j] = 0;
+		    if (!strcmp (buffer, "options"))
+			options = n;
+		    else if (!strcmp (buffer, "aliases"))
+		        aliases = n;
+		}
+	}
+	if (n != aliases)
+		for (p = prom_firstprop (node, buffer);
+		     p && p != (char *)-1 && *p;
+		     p = prom_nextprop (node, p, buffer))
+			first_prop++;
+	else {
+		char *q;
+		for (p = prom_firstprop (node, buffer);
+		     p && p != (char *)-1 && *p;
+		     p = prom_nextprop (node, p, buffer)) {
+			if (aliases_nodes == ALIASES_NNODES)
+				break;
+			for (i = 0; i < aliases_nodes; i++)
+				if (!strcmp (p, alias_names [i]))
+					break;
+			if (i < aliases_nodes)
+				continue;
+			q = kmalloc (strlen (p) + 1, GFP_KERNEL);
+			if (!q)
+				return 0xffff;
+			strcpy (q, p);
+			alias_names [aliases_nodes++] = q;
+		}
+		first_prop += ALIASES_NNODES;
+	}
+	node = prom_getchild (node);
+	if (node) {
+		parent = get_nodes (n, node);
+		if (parent == 0xffff)
+			return 0xffff;
+		nodes[n].child = parent;
+		while ((node = prom_getsibling (node)) != 0) {
+			i = get_nodes (n, node);
+			if (i == 0xffff)
+				return 0xffff;
+			nodes[parent].next = i;
+			parent = i;
+		}
+	}
+	return n;
+}
+
+static void openprom_read_inode(struct inode * inode)
+{
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	if (inode->i_ino == OPENPROM_ROOT_INO) {
+		inode->i_op = &openprom_inode_operations;
+		inode->i_fop = &openprom_operations;
+		inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+	}
+}
+
+static int openprom_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NOATIME;
+	return 0;
+}
+
+static struct super_operations openprom_sops = { 
+	.read_inode	= openprom_read_inode,
+	.statfs		= simple_statfs,
+	.remount_fs	= openprom_remount,
+};
+
+static int openprom_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct inode * root_inode;
+
+	s->s_flags |= MS_NOATIME;
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = OPENPROM_SUPER_MAGIC;
+	s->s_op = &openprom_sops;
+	s->s_time_gran = 1;
+	root_inode = iget(s, OPENPROM_ROOT_INO);
+	if (!root_inode)
+		goto out_no_root;
+	s->s_root = d_alloc_root(root_inode);
+	if (!s->s_root)
+		goto out_no_root;
+	return 0;
+
+out_no_root:
+	printk("openprom_fill_super: get root inode failed\n");
+	iput(root_inode);
+	return -ENOMEM;
+}
+
+static struct super_block *openprom_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, openprom_fill_super);
+}
+
+static struct file_system_type openprom_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "openpromfs",
+	.get_sb		= openprom_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init init_openprom_fs(void)
+{
+	nodes = (openpromfs_node *)__get_free_pages(GFP_KERNEL, 0);
+	if (!nodes) {
+		printk (KERN_WARNING "openpromfs: can't get free page\n");
+		return -EIO;
+	}
+	if (get_nodes (0xffff, prom_root_node) == 0xffff) {
+		printk (KERN_WARNING "openpromfs: couldn't setup tree\n");
+		return -EIO;
+	}
+	nodes[last_node].first_prop = first_prop;
+	return register_filesystem(&openprom_fs_type);
+}
+
+static void __exit exit_openprom_fs(void)
+{
+	int i;
+	unregister_filesystem(&openprom_fs_type);
+	free_pages ((unsigned long)nodes, alloced);
+	for (i = 0; i < aliases_nodes; i++)
+		if (alias_names [i])
+			kfree (alias_names [i]);
+	nodes = NULL;
+}
+
+module_init(init_openprom_fs)
+module_exit(exit_openprom_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
new file mode 100644
index 0000000..deb25b6
--- /dev/null
+++ b/fs/partitions/Kconfig
@@ -0,0 +1,228 @@
+#
+# Partition configuration
+#
+config PARTITION_ADVANCED
+	bool "Advanced partition selection"
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned under an operating system running on a different
+	  architecture than your Linux system.
+
+	  Note that the answer to this question won't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about foreign partitioning schemes.
+
+	  If unsure, say N.
+
+config ACORN_PARTITION
+	bool "Acorn partition support" if PARTITION_ADVANCED
+	default y if ARCH_ACORN
+	help
+	  Support hard disks partitioned under Acorn operating systems.
+
+config ACORN_PARTITION_CUMANA
+	bool "Cumana partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned using the Cumana interface on Acorn machines.
+
+config ACORN_PARTITION_EESOX
+	bool "EESOX partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+
+config ACORN_PARTITION_ICS
+	bool "ICS partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned using the ICS interface on Acorn machines.
+
+config ACORN_PARTITION_ADFS
+	bool "Native filecore partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+	help
+	  The Acorn Disc Filing System is the standard file system of the
+	  RiscOS operating system which runs on Acorn's ARM-based Risc PC
+	  systems and the Acorn Archimedes range of machines.  If you say
+	  `Y' here, Linux will support disk partitions created under ADFS.
+
+config ACORN_PARTITION_POWERTEC
+	bool "PowerTec partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+	help
+	  Support reading partition tables created on Acorn machines using
+	  the PowerTec SCSI drive.
+
+config ACORN_PARTITION_RISCIX
+	bool "RISCiX partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+	default y if ARCH_ACORN
+	help
+	  Once upon a time, there was a native Unix port for the Acorn series
+	  of machines called RISCiX.  If you say 'Y' here, Linux will be able
+	  to read disks partitioned under RISCiX.
+
+config OSF_PARTITION
+	bool "Alpha OSF partition support" if PARTITION_ADVANCED
+	default y if ALPHA
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned on an Alpha machine.
+
+config AMIGA_PARTITION
+	bool "Amiga partition table support" if PARTITION_ADVANCED
+	default y if (AMIGA || AFFS_FS=y)
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned under AmigaOS.
+
+config ATARI_PARTITION
+	bool "Atari partition table support" if PARTITION_ADVANCED
+	default y if ATARI
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned under the Atari OS.
+
+config IBM_PARTITION
+	bool "IBM disk label and partition support"
+	depends on PARTITION_ADVANCED && ARCH_S390
+	help
+	  Say Y here if you would like to be able to read the hard disk
+	  partition table format used by IBM DASD disks operating under CMS.
+	  Otherwise, say N.
+
+config MAC_PARTITION
+	bool "Macintosh partition map support" if PARTITION_ADVANCED
+	default y if MAC
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned on a Macintosh.
+
+config MSDOS_PARTITION
+	bool "PC BIOS (MSDOS partition tables) support" if PARTITION_ADVANCED
+	default y
+	help
+	  Say Y here.
+
+config BSD_DISKLABEL
+	bool "BSD disklabel (FreeBSD partition tables) support"
+	depends on PARTITION_ADVANCED && MSDOS_PARTITION
+	help
+	  FreeBSD uses its own hard disk partition scheme on your PC. It
+	  requires only one entry in the primary partition table of your disk
+	  and manages it similarly to DOS extended partitions, putting in its
+	  first sector a new partition table in BSD disklabel format. Saying Y
+	  here allows you to read these disklabels and further mount FreeBSD
+	  partitions from within Linux if you have also said Y to "UFS
+	  file system support", above. If you don't know what all this is
+	  about, say N.
+
+config MINIX_SUBPARTITION
+	bool "Minix subpartition support"
+	depends on PARTITION_ADVANCED && MSDOS_PARTITION
+	help
+	  Minix 2.0.0/2.0.2 subpartition table support for Linux.
+	  Say Y here if you want to mount and use Minix 2.0.0/2.0.2
+	  subpartitions.
+
+config SOLARIS_X86_PARTITION
+	bool "Solaris (x86) partition table support"
+	depends on PARTITION_ADVANCED && MSDOS_PARTITION
+	help
+	  Like most systems, Solaris x86 uses its own hard disk partition
+	  table format, incompatible with all others. Saying Y here allows you
+	  to read these partition tables and further mount Solaris x86
+	  partitions from within Linux if you have also said Y to "UFS
+	  file system support", above.
+
+config UNIXWARE_DISKLABEL
+	bool "Unixware slices support"
+	depends on PARTITION_ADVANCED && MSDOS_PARTITION
+	---help---
+	  Like some systems, UnixWare uses its own slice table inside a
+	  partition (VTOC - Virtual Table of Contents). Its format is
+	  incompatible with all other OSes. Saying Y here allows you to read
+	  VTOC and further mount UnixWare partitions read-only from within
+	  Linux if you have also said Y to "UFS file system support" or
+	  "System V and Coherent file system support", above.
+
+	  This is mainly used to carry data from a UnixWare box to your
+	  Linux box via a removable medium like magneto-optical, ZIP or
+	  removable IDE drives. Note, however, that a good portable way to
+	  transport files and directories between unixes (and even other
+	  operating systems) is given by the tar program ("man tar" or
+	  preferably "info tar").
+
+	  If you don't know what all this is about, say N.
+
+config LDM_PARTITION
+	bool "Windows Logical Disk Manager (Dynamic Disk) support"
+	depends on PARTITION_ADVANCED
+	---help---
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned using Windows 2000's or XP's Logical Disk Manager.
+	  They are also known as "Dynamic Disks".
+
+	  Windows 2000 introduced the concept of Dynamic Disks to get around
+	  the limitations of the PC's partitioning scheme.  The Logical Disk
+	  Manager allows the user to repartition a disk and create spanned,
+	  mirrored, striped or RAID volumes, all without the need for
+	  rebooting.
+
+	  Normal partitions are now called Basic Disks under Windows 2000 and
+	  XP.
+
+	  For a fuller description read <file:Documentation/ldm.txt>.
+
+	  If unsure, say N.
+
+config LDM_DEBUG
+	bool "Windows LDM extra logging"
+	depends on LDM_PARTITION
+	help
+	  Say Y here if you would like LDM to log verbosely.  This could be
+	  helpful if the driver doesn't work as expected and you'd like to
+	  report a bug.
+
+	  If unsure, say N.
+
+config SGI_PARTITION
+	bool "SGI partition support" if PARTITION_ADVANCED
+	default y if (SGI_IP22 || SGI_IP27 || ((MACH_JAZZ || SNI_RM200_PCI) && !CPU_LITTLE_ENDIAN))
+	help
+	  Say Y here if you would like to be able to read the hard disk
+	  partition table format used by SGI machines.
+
+config ULTRIX_PARTITION
+	bool "Ultrix partition table support" if PARTITION_ADVANCED
+	default y if MACH_DECSTATION
+	help
+	  Say Y here if you would like to be able to read the hard disk
+	  partition table format used by DEC (now Compaq) Ultrix machines.
+	  Otherwise, say N.
+
+config SUN_PARTITION
+	bool "Sun partition tables support" if PARTITION_ADVANCED
+	default y if (SPARC32 || SPARC64 || SUN3 || SUN3X)
+	---help---
+	  Like most systems, SunOS uses its own hard disk partition table
+	  format, incompatible with all others. Saying Y here allows you to
+	  read these partition tables and further mount SunOS partitions from
+	  within Linux if you have also said Y to "UFS file system support",
+	  above. This is mainly used to carry data from a SPARC under SunOS to
+	  your Linux box via a removable medium like magneto-optical or ZIP
+	  drives; note however that a good portable way to transport files and
+	  directories between unixes (and even other operating systems) is
+	  given by the tar program ("man tar" or preferably "info tar"). If
+	  you don't know what all this is about, say N.
+
+config EFI_PARTITION
+	bool "EFI GUID Partition support"
+	depends on PARTITION_ADVANCED
+	select CRC32
+	help
+	  Say Y here if you would like to use hard disks under Linux which
+	  were partitioned using EFI GPT.  Presently only useful on the
+	  IA-64 platform.
+
+#      define_bool CONFIG_ACORN_PARTITION_CUMANA y
diff --git a/fs/partitions/Makefile b/fs/partitions/Makefile
new file mode 100644
index 0000000..4c83c17
--- /dev/null
+++ b/fs/partitions/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := check.o
+
+obj-$(CONFIG_DEVFS_FS) += devfs.o
+obj-$(CONFIG_ACORN_PARTITION) += acorn.o
+obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
+obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_MAC_PARTITION) += mac.o
+obj-$(CONFIG_LDM_PARTITION) += ldm.o
+obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
+obj-$(CONFIG_OSF_PARTITION) += osf.o
+obj-$(CONFIG_SGI_PARTITION) += sgi.o
+obj-$(CONFIG_SUN_PARTITION) += sun.o
+obj-$(CONFIG_ULTRIX_PARTITION) += ultrix.o
+obj-$(CONFIG_IBM_PARTITION) += ibm.o
+obj-$(CONFIG_EFI_PARTITION) += efi.o
+obj-$(CONFIG_NEC98_PARTITION) += nec98.o msdos.o
diff --git a/fs/partitions/acorn.c b/fs/partitions/acorn.c
new file mode 100644
index 0000000..c050857
--- /dev/null
+++ b/fs/partitions/acorn.c
@@ -0,0 +1,557 @@
+/*
+ *  linux/fs/partitions/acorn.c
+ *
+ *  Copyright (c) 1996-2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Scan ADFS partitions on hard disk drives.  Unfortunately, there
+ *  isn't a standard for partitioning drives on Acorn machines, so
+ *  every single manufacturer of SCSI and IDE cards created their own
+ *  method.
+ */
+#include <linux/config.h>
+#include <linux/buffer_head.h>
+#include <linux/adfs_fs.h>
+
+#include "check.h"
+#include "acorn.h"
+
+/*
+ * Partition types. (Oh for reusability)
+ */
+#define PARTITION_RISCIX_MFM	1
+#define PARTITION_RISCIX_SCSI	2
+#define PARTITION_LINUX		9
+
+static struct adfs_discrecord *
+adfs_partition(struct parsed_partitions *state, char *name, char *data,
+	       unsigned long first_sector, int slot)
+{
+	struct adfs_discrecord *dr;
+	unsigned int nr_sects;
+
+	if (adfs_checkbblk(data))
+		return NULL;
+
+	dr = (struct adfs_discrecord *)(data + 0x1c0);
+
+	if (dr->disc_size == 0 && dr->disc_size_high == 0)
+		return NULL;
+
+	nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) |
+		   (le32_to_cpu(dr->disc_size) >> 9);
+
+	if (name)
+		printk(" [%s]", name);
+	put_partition(state, slot, first_sector, nr_sects);
+	return dr;
+}
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+
+struct riscix_part {
+	__le32	start;
+	__le32	length;
+	__le32	one;
+	char	name[16];
+};
+
+struct riscix_record {
+	__le32	magic;
+#define RISCIX_MAGIC	cpu_to_le32(0x4a657320)
+	__le32	date;
+	struct riscix_part part[8];
+};
+
+static int
+riscix_partition(struct parsed_partitions *state, struct block_device *bdev,
+		unsigned long first_sect, int slot, unsigned long nr_sects)
+{
+	Sector sect;
+	struct riscix_record *rr;
+	
+	rr = (struct riscix_record *)read_dev_sector(bdev, first_sect, &sect);
+	if (!rr)
+		return -1;
+
+	printk(" [RISCiX]");
+
+
+	if (rr->magic == RISCIX_MAGIC) {
+		unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+		int part;
+
+		printk(" <");
+
+		put_partition(state, slot++, first_sect, size);
+		for (part = 0; part < 8; part++) {
+			if (rr->part[part].one &&
+			    memcmp(rr->part[part].name, "All\0", 4)) {
+				put_partition(state, slot++,
+					le32_to_cpu(rr->part[part].start),
+					le32_to_cpu(rr->part[part].length));
+				printk("(%s)", rr->part[part].name);
+			}
+		}
+
+		printk(" >\n");
+	} else {
+		put_partition(state, slot++, first_sect, nr_sects);
+	}
+
+	put_dev_sector(sect);
+	return slot;
+}
+#endif
+
+#define LINUX_NATIVE_MAGIC 0xdeafa1de
+#define LINUX_SWAP_MAGIC   0xdeafab1e
+
+struct linux_part {
+	__le32 magic;
+	__le32 start_sect;
+	__le32 nr_sects;
+};
+
+static int
+linux_partition(struct parsed_partitions *state, struct block_device *bdev,
+		unsigned long first_sect, int slot, unsigned long nr_sects)
+{
+	Sector sect;
+	struct linux_part *linuxp;
+	unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+
+	printk(" [Linux]");
+
+	put_partition(state, slot++, first_sect, size);
+
+	linuxp = (struct linux_part *)read_dev_sector(bdev, first_sect, &sect);
+	if (!linuxp)
+		return -1;
+
+	printk(" <");
+	while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) ||
+	       linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) {
+		if (slot == state->limit)
+			break;
+		put_partition(state, slot++, first_sect +
+				 le32_to_cpu(linuxp->start_sect),
+				 le32_to_cpu(linuxp->nr_sects));
+		linuxp ++;
+	}
+	printk(" >");
+
+	put_dev_sector(sect);
+	return slot;
+}
+
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+int
+adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev)
+{
+	unsigned long first_sector = 0;
+	unsigned int start_blk = 0;
+	Sector sect;
+	unsigned char *data;
+	char *name = "CUMANA/ADFS";
+	int first = 1;
+	int slot = 1;
+
+	/*
+	 * Try Cumana style partitions - sector 6 contains ADFS boot block
+	 * with pointer to next 'drive'.
+	 *
+	 * There are unknowns in this code - is the 'cylinder number' of the
+	 * next partition relative to the start of this one - I'm assuming
+	 * it is.
+	 *
+	 * Also, which ID did Cumana use?
+	 *
+	 * This is totally unfinished, and will require more work to get it
+	 * going. Hence it is totally untested.
+	 */
+	do {
+		struct adfs_discrecord *dr;
+		unsigned int nr_sects;
+
+		data = read_dev_sector(bdev, start_blk * 2 + 6, &sect);
+		if (!data)
+			return -1;
+
+		if (slot == state->limit)
+			break;
+
+		dr = adfs_partition(state, name, data, first_sector, slot++);
+		if (!dr)
+			break;
+
+		name = NULL;
+
+		nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) *
+			   (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) *
+			   dr->secspertrack;
+
+		if (!nr_sects)
+			break;
+
+		first = 0;
+		first_sector += nr_sects;
+		start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9);
+		nr_sects = 0; /* hmm - should be partition size */
+
+		switch (data[0x1fc] & 15) {
+		case 0: /* No partition / ADFS? */
+			break;
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+		case PARTITION_RISCIX_SCSI:
+			/* RISCiX - we don't know how to find the next one. */
+			slot = riscix_partition(state, bdev, first_sector,
+						 slot, nr_sects);
+			break;
+#endif
+
+		case PARTITION_LINUX:
+			slot = linux_partition(state, bdev, first_sector,
+						slot, nr_sects);
+			break;
+		}
+		put_dev_sector(sect);
+		if (slot == -1)
+			return -1;
+	} while (1);
+	put_dev_sector(sect);
+	return first ? 0 : 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+/*
+ * Purpose: allocate ADFS partitions.
+ *
+ * Params : hd		- pointer to gendisk structure to store partition info.
+ *	    dev		- device number to access.
+ *
+ * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok.
+ *
+ * Alloc  : hda  = whole drive
+ *	    hda1 = ADFS partition on first drive.
+ *	    hda2 = non-ADFS partition.
+ */
+int
+adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev)
+{
+	unsigned long start_sect, nr_sects, sectscyl, heads;
+	Sector sect;
+	unsigned char *data;
+	struct adfs_discrecord *dr;
+	unsigned char id;
+	int slot = 1;
+
+	data = read_dev_sector(bdev, 6, &sect);
+	if (!data)
+		return -1;
+
+	dr = adfs_partition(state, "ADFS", data, 0, slot++);
+	if (!dr) {
+		put_dev_sector(sect);
+    		return 0;
+	}
+
+	heads = dr->heads + ((dr->lowsector >> 6) & 1);
+	sectscyl = dr->secspertrack * heads;
+	start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl;
+	id = data[0x1fc] & 15;
+	put_dev_sector(sect);
+
+#ifdef CONFIG_BLK_DEV_MFM
+	if (MAJOR(bdev->bd_dev) == MFM_ACORN_MAJOR) {
+		extern void xd_set_geometry(struct block_device *,
+			unsigned char, unsigned char, unsigned int);
+		xd_set_geometry(bdev, dr->secspertrack, heads, 1);
+		invalidate_bdev(bdev, 1);
+		truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+	}
+#endif
+
+	/*
+	 * Work out start of non-adfs partition.
+	 */
+	nr_sects = (bdev->bd_inode->i_size >> 9) - start_sect;
+
+	if (start_sect) {
+		switch (id) {
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+		case PARTITION_RISCIX_SCSI:
+		case PARTITION_RISCIX_MFM:
+			slot = riscix_partition(state, bdev, start_sect,
+						 slot, nr_sects);
+			break;
+#endif
+
+		case PARTITION_LINUX:
+			slot = linux_partition(state, bdev, start_sect,
+						slot, nr_sects);
+			break;
+		}
+	}
+	printk("\n");
+	return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ICS
+
+struct ics_part {
+	__le32 start;
+	__le32 size;
+};
+
+static int adfspart_check_ICSLinux(struct block_device *bdev, unsigned long block)
+{
+	Sector sect;
+	unsigned char *data = read_dev_sector(bdev, block, &sect);
+	int result = 0;
+
+	if (data) {
+		if (memcmp(data, "LinuxPart", 9) == 0)
+			result = 1;
+		put_dev_sector(sect);
+	}
+
+	return result;
+}
+
+/*
+ * Check for a valid ICS partition using the checksum.
+ */
+static inline int valid_ics_sector(const unsigned char *data)
+{
+	unsigned long sum;
+	int i;
+
+	for (i = 0, sum = 0x50617274; i < 508; i++)
+		sum += data[i];
+
+	sum -= le32_to_cpu(*(__le32 *)(&data[508]));
+
+	return sum == 0;
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd		- pointer to gendisk structure to store partition info.
+ *	    dev		- device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc  : hda  = whole drive
+ *	    hda1 = ADFS partition 0 on first drive.
+ *	    hda2 = ADFS partition 1 on first drive.
+ *		..etc..
+ */
+int
+adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev)
+{
+	const unsigned char *data;
+	const struct ics_part *p;
+	int slot;
+	Sector sect;
+
+	/*
+	 * Try ICS style partitions - sector 0 contains partition info.
+	 */
+	data = read_dev_sector(bdev, 0, &sect);
+	if (!data)
+	    	return -1;
+
+	if (!valid_ics_sector(data)) {
+	    	put_dev_sector(sect);
+		return 0;
+	}
+
+	printk(" [ICS]");
+
+	for (slot = 1, p = (const struct ics_part *)data; p->size; p++) {
+		u32 start = le32_to_cpu(p->start);
+		s32 size = le32_to_cpu(p->size); /* yes, it's signed. */
+
+		if (slot == state->limit)
+			break;
+
+		/*
+		 * Negative sizes tell the RISC OS ICS driver to ignore
+		 * this partition - in effect it says that this does not
+		 * contain an ADFS filesystem.
+		 */
+		if (size < 0) {
+			size = -size;
+
+			/*
+			 * Our own extension - We use the first sector
+			 * of the partition to identify what type this
+			 * partition is.  We must not make this visible
+			 * to the filesystem.
+			 */
+			if (size > 1 && adfspart_check_ICSLinux(bdev, start)) {
+				start += 1;
+				size -= 1;
+			}
+		}
+
+		if (size)
+			put_partition(state, slot++, start, size);
+	}
+
+	put_dev_sector(sect);
+	printk("\n");
+	return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+struct ptec_part {
+	__le32 unused1;
+	__le32 unused2;
+	__le32 start;
+	__le32 size;
+	__le32 unused5;
+	char type[8];
+};
+
+static inline int valid_ptec_sector(const unsigned char *data)
+{
+	unsigned char checksum = 0x2a;
+	int i;
+
+	/*
+	 * If it looks like a PC/BIOS partition, then it
+	 * probably isn't PowerTec.
+	 */
+	if (data[510] == 0x55 && data[511] == 0xaa)
+		return 0;
+
+	for (i = 0; i < 511; i++)
+		checksum += data[i];
+
+	return checksum == data[511];
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd		- pointer to gendisk structure to store partition info.
+ *	    dev		- device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc  : hda  = whole drive
+ *	    hda1 = ADFS partition 0 on first drive.
+ *	    hda2 = ADFS partition 1 on first drive.
+ *		..etc..
+ */
+int
+adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev)
+{
+	Sector sect;
+	const unsigned char *data;
+	const struct ptec_part *p;
+	int slot = 1;
+	int i;
+
+	data = read_dev_sector(bdev, 0, &sect);
+	if (!data)
+		return -1;
+
+	if (!valid_ptec_sector(data)) {
+		put_dev_sector(sect);
+		return 0;
+	}
+
+	printk(" [POWERTEC]");
+
+	for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) {
+		u32 start = le32_to_cpu(p->start);
+		u32 size = le32_to_cpu(p->size);
+
+		if (size)
+			put_partition(state, slot++, start, size);
+	}
+
+	put_dev_sector(sect);
+	printk("\n");
+	return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+struct eesox_part {
+	char	magic[6];
+	char	name[10];
+	__le32	start;
+	__le32	unused6;
+	__le32	unused7;
+	__le32	unused8;
+};
+
+/*
+ * Guess who created this format?
+ */
+static const char eesox_name[] = {
+	'N', 'e', 'i', 'l', ' ',
+	'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' '
+};
+
+/*
+ * EESOX SCSI partition format.
+ *
+ * This is a goddamned awful partition format.  We don't seem to store
+ * the size of the partition in this table, only the start addresses.
+ *
+ * There are two possibilities where the size comes from:
+ *  1. The individual ADFS boot block entries that are placed on the disk.
+ *  2. The start address of the next entry.
+ */
+int
+adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev)
+{
+	Sector sect;
+	const unsigned char *data;
+	unsigned char buffer[256];
+	struct eesox_part *p;
+	sector_t start = 0;
+	int i, slot = 1;
+
+	data = read_dev_sector(bdev, 7, &sect);
+	if (!data)
+		return -1;
+
+	/*
+	 * "Decrypt" the partition table.  God knows why...
+	 */
+	for (i = 0; i < 256; i++)
+		buffer[i] = data[i] ^ eesox_name[i & 15];
+
+	put_dev_sector(sect);
+
+	for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) {
+		sector_t next;
+
+		if (memcmp(p->magic, "Eesox", 6))
+			break;
+
+		next = le32_to_cpu(p->start);
+		if (i)
+			put_partition(state, slot++, start, next - start);
+		start = next;
+	}
+
+	if (i != 0) {
+		sector_t size;
+
+		size = get_capacity(bdev->bd_disk);
+		put_partition(state, slot++, start, size - start);
+		printk("\n");
+	}
+
+	return i ? 1 : 0;
+}
+#endif
diff --git a/fs/partitions/acorn.h b/fs/partitions/acorn.h
new file mode 100644
index 0000000..81fd50e
--- /dev/null
+++ b/fs/partitions/acorn.h
@@ -0,0 +1,14 @@
+/*
+ * linux/fs/partitions/acorn.h
+ *
+ * Copyright (C) 1996-2001 Russell King.
+ *
+ *  I _hate_ this partitioning mess - why can't we have one defined
+ *  format, and everyone stick to it?
+ */
+
+int adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
new file mode 100644
index 0000000..3068528
--- /dev/null
+++ b/fs/partitions/amiga.c
@@ -0,0 +1,128 @@
+/*
+ *  fs/partitions/amiga.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/types.h>
+#include <linux/affs_hardblocks.h>
+
+#include "check.h"
+#include "amiga.h"
+
+static __inline__ u32
+checksum_block(__be32 *m, int size)
+{
+	u32 sum = 0;
+
+	while (size--)
+		sum += be32_to_cpu(*m++);
+	return sum;
+}
+
+int
+amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	Sector sect;
+	unsigned char *data;
+	struct RigidDiskBlock *rdb;
+	struct PartitionBlock *pb;
+	int start_sect, nr_sects, blk, part, res = 0;
+	int blksize = 1;	/* Multiplier for disk block size */
+	int slot = 1;
+	char b[BDEVNAME_SIZE];
+
+	for (blk = 0; ; blk++, put_dev_sector(sect)) {
+		if (blk == RDB_ALLOCATION_LIMIT)
+			goto rdb_done;
+		data = read_dev_sector(bdev, blk, &sect);
+		if (!data) {
+			if (warn_no_part)
+				printk("Dev %s: unable to read RDB block %d\n",
+				       bdevname(bdev, b), blk);
+			goto rdb_done;
+		}
+		if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
+			continue;
+
+		rdb = (struct RigidDiskBlock *)data;
+		if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0)
+			break;
+		/* Try again with 0xdc..0xdf zeroed, Windows might have
+		 * trashed it.
+		 */
+		*(__be32 *)(data+0xdc) = 0;
+		if (checksum_block((__be32 *)data,
+				be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) {
+			printk("Warning: Trashed word at 0xd0 in block %d "
+				"ignored in checksum calculation\n",blk);
+			break;
+		}
+
+		printk("Dev %s: RDB in block %d has bad checksum\n",
+			       bdevname(bdev, b), blk);
+	}
+
+	/* blksize is blocks per 512 byte standard block */
+	blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512;
+
+	printk(" RDSK (%d)", blksize * 512);	/* Be more informative */
+	blk = be32_to_cpu(rdb->rdb_PartitionList);
+	put_dev_sector(sect);
+	for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
+		blk *= blksize;	/* Read in terms partition table understands */
+		data = read_dev_sector(bdev, blk, &sect);
+		if (!data) {
+			if (warn_no_part)
+				printk("Dev %s: unable to read partition block %d\n",
+				       bdevname(bdev, b), blk);
+			goto rdb_done;
+		}
+		pb  = (struct PartitionBlock *)data;
+		blk = be32_to_cpu(pb->pb_Next);
+		if (pb->pb_ID != cpu_to_be32(IDNAME_PARTITION))
+			continue;
+		if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 )
+			continue;
+
+		/* Tell Kernel about it */
+
+		nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
+			    be32_to_cpu(pb->pb_Environment[9])) *
+			   be32_to_cpu(pb->pb_Environment[3]) *
+			   be32_to_cpu(pb->pb_Environment[5]) *
+			   blksize;
+		if (!nr_sects)
+			continue;
+		start_sect = be32_to_cpu(pb->pb_Environment[9]) *
+			     be32_to_cpu(pb->pb_Environment[3]) *
+			     be32_to_cpu(pb->pb_Environment[5]) *
+			     blksize;
+		put_partition(state,slot++,start_sect,nr_sects);
+		{
+			/* Be even more informative to aid mounting */
+			char dostype[4];
+			__be32 *dt = (__be32 *)dostype;
+			*dt = pb->pb_Environment[16];
+			if (dostype[3] < ' ')
+				printk(" (%c%c%c^%c)",
+					dostype[0], dostype[1],
+					dostype[2], dostype[3] + '@' );
+			else
+				printk(" (%c%c%c%c)",
+					dostype[0], dostype[1],
+					dostype[2], dostype[3]);
+			printk("(res %d spb %d)",
+				be32_to_cpu(pb->pb_Environment[6]),
+				be32_to_cpu(pb->pb_Environment[4]));
+		}
+		res = 1;
+	}
+	printk("\n");
+
+rdb_done:
+	return res;
+}
diff --git a/fs/partitions/amiga.h b/fs/partitions/amiga.h
new file mode 100644
index 0000000..2f3e9ce
--- /dev/null
+++ b/fs/partitions/amiga.h
@@ -0,0 +1,6 @@
+/*
+ *  fs/partitions/amiga.h
+ */
+
+int amiga_partition(struct parsed_partitions *state, struct block_device *bdev);
+
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
new file mode 100644
index 0000000..192a6ad
--- /dev/null
+++ b/fs/partitions/atari.c
@@ -0,0 +1,149 @@
+/*
+ *  fs/partitions/atari.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/ctype.h>
+#include "check.h"
+#include "atari.h"
+
+/* ++guenther: this should be settable by the user ("make config")?.
+ */
+#define ICD_PARTS
+
+/* check if a partition entry looks valid -- Atari format is assumed if at
+   least one of the primary entries is ok this way */
+#define	VALID_PARTITION(pi,hdsiz)					     \
+    (((pi)->flg & 1) &&							     \
+     isalnum((pi)->id[0]) && isalnum((pi)->id[1]) && isalnum((pi)->id[2]) && \
+     be32_to_cpu((pi)->st) <= (hdsiz) &&				     \
+     be32_to_cpu((pi)->st) + be32_to_cpu((pi)->siz) <= (hdsiz))
+
+static inline int OK_id(char *s)
+{
+	return  memcmp (s, "GEM", 3) == 0 || memcmp (s, "BGM", 3) == 0 ||
+		memcmp (s, "LNX", 3) == 0 || memcmp (s, "SWP", 3) == 0 ||
+		memcmp (s, "RAW", 3) == 0 ;
+}
+
+int atari_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	Sector sect;
+	struct rootsector *rs;
+	struct partition_info *pi;
+	u32 extensect;
+	u32 hd_size;
+	int slot;
+#ifdef ICD_PARTS
+	int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
+#endif
+
+	rs = (struct rootsector *) read_dev_sector(bdev, 0, &sect);
+	if (!rs)
+		return -1;
+
+	/* Verify this is an Atari rootsector: */
+	hd_size = bdev->bd_inode->i_size >> 9;
+	if (!VALID_PARTITION(&rs->part[0], hd_size) &&
+	    !VALID_PARTITION(&rs->part[1], hd_size) &&
+	    !VALID_PARTITION(&rs->part[2], hd_size) &&
+	    !VALID_PARTITION(&rs->part[3], hd_size)) {
+		/*
+		 * if there's no valid primary partition, assume that no Atari
+		 * format partition table (there's no reliable magic or the like
+	         * :-()
+		 */
+		put_dev_sector(sect);
+		return 0;
+	}
+
+	pi = &rs->part[0];
+	printk (" AHDI");
+	for (slot = 1; pi < &rs->part[4] && slot < state->limit; slot++, pi++) {
+		struct rootsector *xrs;
+		Sector sect2;
+		ulong partsect;
+
+		if ( !(pi->flg & 1) )
+			continue;
+		/* active partition */
+		if (memcmp (pi->id, "XGM", 3) != 0) {
+			/* we don't care about other id's */
+			put_partition (state, slot, be32_to_cpu(pi->st),
+					be32_to_cpu(pi->siz));
+			continue;
+		}
+		/* extension partition */
+#ifdef ICD_PARTS
+		part_fmt = 1;
+#endif
+		printk(" XGM<");
+		partsect = extensect = be32_to_cpu(pi->st);
+		while (1) {
+			xrs = (struct rootsector *)read_dev_sector(bdev, partsect, &sect2);
+			if (!xrs) {
+				printk (" block %ld read failed\n", partsect);
+				put_dev_sector(sect);
+				return 0;
+			}
+
+			/* ++roman: sanity check: bit 0 of flg field must be set */
+			if (!(xrs->part[0].flg & 1)) {
+				printk( "\nFirst sub-partition in extended partition is not valid!\n" );
+				put_dev_sector(sect2);
+				break;
+			}
+
+			put_partition(state, slot,
+				   partsect + be32_to_cpu(xrs->part[0].st),
+				   be32_to_cpu(xrs->part[0].siz));
+
+			if (!(xrs->part[1].flg & 1)) {
+				/* end of linked partition list */
+				put_dev_sector(sect2);
+				break;
+			}
+			if (memcmp( xrs->part[1].id, "XGM", 3 ) != 0) {
+				printk("\nID of extended partition is not XGM!\n");
+				put_dev_sector(sect2);
+				break;
+			}
+
+			partsect = be32_to_cpu(xrs->part[1].st) + extensect;
+			put_dev_sector(sect2);
+			if (++slot == state->limit) {
+				printk( "\nMaximum number of partitions reached!\n" );
+				break;
+			}
+		}
+		printk(" >");
+	}
+#ifdef ICD_PARTS
+	if ( part_fmt!=1 ) { /* no extended partitions -> test ICD-format */
+		pi = &rs->icdpart[0];
+		/* sanity check: no ICD format if first partition invalid */
+		if (OK_id(pi->id)) {
+			printk(" ICD<");
+			for (; pi < &rs->icdpart[8] && slot < state->limit; slot++, pi++) {
+				/* accept only GEM,BGM,RAW,LNX,SWP partitions */
+				if (!((pi->flg & 1) && OK_id(pi->id)))
+					continue;
+				part_fmt = 2;
+				put_partition (state, slot,
+						be32_to_cpu(pi->st),
+						be32_to_cpu(pi->siz));
+			}
+			printk(" >");
+		}
+	}
+#endif
+	put_dev_sector(sect);
+
+	printk ("\n");
+
+	return 1;
+}
diff --git a/fs/partitions/atari.h b/fs/partitions/atari.h
new file mode 100644
index 0000000..63186b0
--- /dev/null
+++ b/fs/partitions/atari.h
@@ -0,0 +1,34 @@
+/*
+ *  fs/partitions/atari.h
+ *  Moved by Russell King from:
+ *
+ * linux/include/linux/atari_rootsec.h
+ * definitions for Atari Rootsector layout
+ * by Andreas Schwab (schwab@ls5.informatik.uni-dortmund.de)
+ *
+ * modified for ICD/Supra partitioning scheme restricted to at most 12
+ * partitions
+ * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
+ */
+
+struct partition_info
+{
+  u8 flg;			/* bit 0: active; bit 7: bootable */
+  char id[3];			/* "GEM", "BGM", "XGM", or other */
+  __be32 st;			/* start of partition */
+  __be32 siz;			/* length of partition */
+};
+
+struct rootsector
+{
+  char unused[0x156];		/* room for boot code */
+  struct partition_info icdpart[8];	/* info for ICD-partitions 5..12 */
+  char unused2[0xc];
+  u32 hd_siz;			/* size of disk in blocks */
+  struct partition_info part[4];
+  u32 bsl_st;			/* start of bad sector list */
+  u32 bsl_cnt;			/* length of bad sector list */
+  u16 checksum;			/* checksum for bootable disks */
+} __attribute__((__packed__));
+
+int atari_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
new file mode 100644
index 0000000..31cff78
--- /dev/null
+++ b/fs/partitions/check.c
@@ -0,0 +1,445 @@
+/*
+ *  fs/partitions/check.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ *
+ *  We now have independent partition support from the
+ *  block drivers, which allows all the partition code to
+ *  be grouped in one location, and it to be mostly self
+ *  contained.
+ *
+ *  Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kmod.h>
+#include <linux/ctype.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include "check.h"
+#include "devfs.h"
+
+#include "acorn.h"
+#include "amiga.h"
+#include "atari.h"
+#include "ldm.h"
+#include "mac.h"
+#include "msdos.h"
+#include "osf.h"
+#include "sgi.h"
+#include "sun.h"
+#include "ibm.h"
+#include "ultrix.h"
+#include "efi.h"
+
+#ifdef CONFIG_BLK_DEV_MD
+extern void md_autodetect_dev(dev_t dev);
+#endif
+
+int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
+
+static int (*check_part[])(struct parsed_partitions *, struct block_device *) = {
+	/*
+	 * Probe partition formats with tables at disk address 0
+	 * that also have an ADFS boot block at 0xdc0.
+	 */
+#ifdef CONFIG_ACORN_PARTITION_ICS
+	adfspart_check_ICS,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+	adfspart_check_POWERTEC,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+	adfspart_check_EESOX,
+#endif
+
+	/*
+	 * Now move on to formats that only have partition info at
+	 * disk address 0xdc0.  Since these may also have stale
+	 * PC/BIOS partition tables, they need to come before
+	 * the msdos entry.
+	 */
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+	adfspart_check_CUMANA,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+	adfspart_check_ADFS,
+#endif
+
+#ifdef CONFIG_EFI_PARTITION
+	efi_partition,		/* this must come before msdos */
+#endif
+#ifdef CONFIG_SGI_PARTITION
+	sgi_partition,
+#endif
+#ifdef CONFIG_LDM_PARTITION
+	ldm_partition,		/* this must come before msdos */
+#endif
+#ifdef CONFIG_NEC98_PARTITION
+	nec98_partition,	/* must be come before `msdos_partition' */
+#endif
+#ifdef CONFIG_MSDOS_PARTITION
+	msdos_partition,
+#endif
+#ifdef CONFIG_OSF_PARTITION
+	osf_partition,
+#endif
+#ifdef CONFIG_SUN_PARTITION
+	sun_partition,
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+	amiga_partition,
+#endif
+#ifdef CONFIG_ATARI_PARTITION
+	atari_partition,
+#endif
+#ifdef CONFIG_MAC_PARTITION
+	mac_partition,
+#endif
+#ifdef CONFIG_ULTRIX_PARTITION
+	ultrix_partition,
+#endif
+#ifdef CONFIG_IBM_PARTITION
+	ibm_partition,
+#endif
+	NULL
+};
+ 
+/*
+ * disk_name() is used by partition check code and the genhd driver.
+ * It formats the devicename of the indicated disk into
+ * the supplied buffer (of size at least 32), and returns
+ * a pointer to that same buffer (for convenience).
+ */
+
+char *disk_name(struct gendisk *hd, int part, char *buf)
+{
+	if (!part)
+		snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
+	else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
+		snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, part);
+	else
+		snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, part);
+
+	return buf;
+}
+
+const char *bdevname(struct block_device *bdev, char *buf)
+{
+	int part = MINOR(bdev->bd_dev) - bdev->bd_disk->first_minor;
+	return disk_name(bdev->bd_disk, part, buf);
+}
+
+EXPORT_SYMBOL(bdevname);
+
+/*
+ * There's very little reason to use this, you should really
+ * have a struct block_device just about everywhere and use
+ * bdevname() instead.
+ */
+const char *__bdevname(dev_t dev, char *buffer)
+{
+	scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)",
+				MAJOR(dev), MINOR(dev));
+	return buffer;
+}
+
+EXPORT_SYMBOL(__bdevname);
+
+static struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
+{
+	struct parsed_partitions *state;
+	int i, res;
+
+	state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+#ifdef CONFIG_DEVFS_FS
+	if (hd->devfs_name[0] != '\0') {
+		printk(KERN_INFO " /dev/%s:", hd->devfs_name);
+		sprintf(state->name, "p");
+	}
+#endif
+	else {
+		disk_name(hd, 0, state->name);
+		printk(KERN_INFO " %s:", state->name);
+		if (isdigit(state->name[strlen(state->name)-1]))
+			sprintf(state->name, "p");
+	}
+	state->limit = hd->minors;
+	i = res = 0;
+	while (!res && check_part[i]) {
+		memset(&state->parts, 0, sizeof(state->parts));
+		res = check_part[i++](state, bdev);
+	}
+	if (res > 0)
+		return state;
+	if (!res)
+		printk(" unknown partition table\n");
+	else if (warn_no_part)
+		printk(" unable to read partition table\n");
+	kfree(state);
+	return NULL;
+}
+
+/*
+ * sysfs bindings for partitions
+ */
+
+struct part_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct hd_struct *,char *);
+};
+
+static ssize_t 
+part_attr_show(struct kobject * kobj, struct attribute * attr, char * page)
+{
+	struct hd_struct * p = container_of(kobj,struct hd_struct,kobj);
+	struct part_attribute * part_attr = container_of(attr,struct part_attribute,attr);
+	ssize_t ret = 0;
+	if (part_attr->show)
+		ret = part_attr->show(p,page);
+	return ret;
+}
+
+static struct sysfs_ops part_sysfs_ops = {
+	.show	=	part_attr_show,
+};
+
+static ssize_t part_dev_read(struct hd_struct * p, char *page)
+{
+	struct gendisk *disk = container_of(p->kobj.parent,struct gendisk,kobj);
+	dev_t dev = MKDEV(disk->major, disk->first_minor + p->partno); 
+	return print_dev_t(page, dev);
+}
+static ssize_t part_start_read(struct hd_struct * p, char *page)
+{
+	return sprintf(page, "%llu\n",(unsigned long long)p->start_sect);
+}
+static ssize_t part_size_read(struct hd_struct * p, char *page)
+{
+	return sprintf(page, "%llu\n",(unsigned long long)p->nr_sects);
+}
+static ssize_t part_stat_read(struct hd_struct * p, char *page)
+{
+	return sprintf(page, "%8u %8llu %8u %8llu\n",
+		       p->reads, (unsigned long long)p->read_sectors,
+		       p->writes, (unsigned long long)p->write_sectors);
+}
+static struct part_attribute part_attr_dev = {
+	.attr = {.name = "dev", .mode = S_IRUGO },
+	.show	= part_dev_read
+};
+static struct part_attribute part_attr_start = {
+	.attr = {.name = "start", .mode = S_IRUGO },
+	.show	= part_start_read
+};
+static struct part_attribute part_attr_size = {
+	.attr = {.name = "size", .mode = S_IRUGO },
+	.show	= part_size_read
+};
+static struct part_attribute part_attr_stat = {
+	.attr = {.name = "stat", .mode = S_IRUGO },
+	.show	= part_stat_read
+};
+
+static struct attribute * default_attrs[] = {
+	&part_attr_dev.attr,
+	&part_attr_start.attr,
+	&part_attr_size.attr,
+	&part_attr_stat.attr,
+	NULL,
+};
+
+extern struct subsystem block_subsys;
+
+static void part_release(struct kobject *kobj)
+{
+	struct hd_struct * p = container_of(kobj,struct hd_struct,kobj);
+	kfree(p);
+}
+
+struct kobj_type ktype_part = {
+	.release	= part_release,
+	.default_attrs	= default_attrs,
+	.sysfs_ops	= &part_sysfs_ops,
+};
+
+void delete_partition(struct gendisk *disk, int part)
+{
+	struct hd_struct *p = disk->part[part-1];
+	if (!p)
+		return;
+	if (!p->nr_sects)
+		return;
+	disk->part[part-1] = NULL;
+	p->start_sect = 0;
+	p->nr_sects = 0;
+	p->reads = p->writes = p->read_sectors = p->write_sectors = 0;
+	devfs_remove("%s/part%d", disk->devfs_name, part);
+	kobject_unregister(&p->kobj);
+}
+
+void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len)
+{
+	struct hd_struct *p;
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+	
+	memset(p, 0, sizeof(*p));
+	p->start_sect = start;
+	p->nr_sects = len;
+	p->partno = part;
+
+	devfs_mk_bdev(MKDEV(disk->major, disk->first_minor + part),
+			S_IFBLK|S_IRUSR|S_IWUSR,
+			"%s/part%d", disk->devfs_name, part);
+
+	if (isdigit(disk->kobj.name[strlen(disk->kobj.name)-1]))
+		snprintf(p->kobj.name,KOBJ_NAME_LEN,"%sp%d",disk->kobj.name,part);
+	else
+		snprintf(p->kobj.name,KOBJ_NAME_LEN,"%s%d",disk->kobj.name,part);
+	p->kobj.parent = &disk->kobj;
+	p->kobj.ktype = &ktype_part;
+	kobject_register(&p->kobj);
+	disk->part[part-1] = p;
+}
+
+static void disk_sysfs_symlinks(struct gendisk *disk)
+{
+	struct device *target = get_device(disk->driverfs_dev);
+	if (target) {
+		sysfs_create_link(&disk->kobj,&target->kobj,"device");
+		sysfs_create_link(&target->kobj,&disk->kobj,"block");
+	}
+}
+
+/* Not exported, helper to add_disk(). */
+void register_disk(struct gendisk *disk)
+{
+	struct block_device *bdev;
+	char *s;
+	int err;
+
+	strlcpy(disk->kobj.name,disk->disk_name,KOBJ_NAME_LEN);
+	/* ewww... some of these buggers have / in name... */
+	s = strchr(disk->kobj.name, '/');
+	if (s)
+		*s = '!';
+	if ((err = kobject_add(&disk->kobj)))
+		return;
+	disk_sysfs_symlinks(disk);
+
+	/* No minors to use for partitions */
+	if (disk->minors == 1) {
+		if (disk->devfs_name[0] != '\0')
+			devfs_add_disk(disk);
+		return;
+	}
+
+	/* always add handle for the whole disk */
+	devfs_add_partitioned(disk);
+
+	/* No such device (e.g., media were just removed) */
+	if (!get_capacity(disk))
+		return;
+
+	bdev = bdget_disk(disk, 0);
+	if (!bdev)
+		return;
+
+	bdev->bd_invalidated = 1;
+	if (blkdev_get(bdev, FMODE_READ, 0) < 0)
+		return;
+	blkdev_put(bdev);
+}
+
+int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+	struct parsed_partitions *state;
+	int p, res;
+
+	if (bdev->bd_part_count)
+		return -EBUSY;
+	res = invalidate_partition(disk, 0);
+	if (res)
+		return res;
+	bdev->bd_invalidated = 0;
+	for (p = 1; p < disk->minors; p++)
+		delete_partition(disk, p);
+	if (disk->fops->revalidate_disk)
+		disk->fops->revalidate_disk(disk);
+	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+		return 0;
+	for (p = 1; p < state->limit; p++) {
+		sector_t size = state->parts[p].size;
+		sector_t from = state->parts[p].from;
+		if (!size)
+			continue;
+		add_partition(disk, p, from, size);
+#ifdef CONFIG_BLK_DEV_MD
+		if (state->parts[p].flags)
+			md_autodetect_dev(bdev->bd_dev+p);
+#endif
+	}
+	kfree(state);
+	return 0;
+}
+
+unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+{
+	struct address_space *mapping = bdev->bd_inode->i_mapping;
+	struct page *page;
+
+	page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+			(filler_t *)mapping->a_ops->readpage, NULL);
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		if (!PageUptodate(page))
+			goto fail;
+		if (PageError(page))
+			goto fail;
+		p->v = page;
+		return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+fail:
+		page_cache_release(page);
+	}
+	p->v = NULL;
+	return NULL;
+}
+
+EXPORT_SYMBOL(read_dev_sector);
+
+void del_gendisk(struct gendisk *disk)
+{
+	int p;
+
+	/* invalidate stuff */
+	for (p = disk->minors - 1; p > 0; p--) {
+		invalidate_partition(disk, p);
+		delete_partition(disk, p);
+	}
+	invalidate_partition(disk, 0);
+	disk->capacity = 0;
+	disk->flags &= ~GENHD_FL_UP;
+	unlink_gendisk(disk);
+	disk_stat_set_all(disk, 0);
+	disk->stamp = disk->stamp_idle = 0;
+
+	devfs_remove_disk(disk);
+
+	if (disk->driverfs_dev) {
+		sysfs_remove_link(&disk->kobj, "device");
+		sysfs_remove_link(&disk->driverfs_dev->kobj, "block");
+		put_device(disk->driverfs_dev);
+	}
+	kobject_del(&disk->kobj);
+}
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
new file mode 100644
index 0000000..43adcc6
--- /dev/null
+++ b/fs/partitions/check.h
@@ -0,0 +1,36 @@
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+
+/*
+ * add_gd_partition adds a partitions details to the devices partition
+ * description.
+ */
+enum { MAX_PART = 256 };
+
+struct parsed_partitions {
+	char name[BDEVNAME_SIZE];
+	struct {
+		sector_t from;
+		sector_t size;
+		int flags;
+	} parts[MAX_PART];
+	int next;
+	int limit;
+};
+
+static inline void
+put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
+{
+	if (n < p->limit) {
+		p->parts[n].from = from;
+		p->parts[n].size = size;
+		printk(" %s%d", p->name, n);
+	}
+}
+
+extern int warn_no_part;
+
+extern void parse_bsd(struct parsed_partitions *state,
+			struct block_device *bdev, u32 offset, u32 size,
+			int origin, char *flavour, int max_partitions);
+
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c
new file mode 100644
index 0000000..87f5044
--- /dev/null
+++ b/fs/partitions/devfs.c
@@ -0,0 +1,130 @@
+/*
+ * This tries to keep block devices away from devfs as much as possible.
+ */
+#include <linux/fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/genhd.h>
+#include <linux/bitops.h>
+#include <asm/semaphore.h>
+
+
+struct unique_numspace {
+	u32		  num_free;          /*  Num free in bits       */
+	u32		  length;            /*  Array length in bytes  */
+	unsigned long	  *bits;
+	struct semaphore  mutex;
+};
+
+static DECLARE_MUTEX(numspace_mutex);
+
+static int expand_numspace(struct unique_numspace *s)
+{
+	u32 length;
+	void *bits;
+
+	if (s->length < 16)
+		length = 16;
+	else
+		length = s->length << 1;
+
+	bits = vmalloc(length);
+	if (!bits)
+		return -ENOMEM;
+	if (s->bits) {
+		memcpy(bits, s->bits, s->length);
+		vfree(s->bits);
+	}
+		
+	s->num_free = (length - s->length) << 3;
+	s->bits = bits;
+	memset(bits + s->length, 0, length - s->length);
+	s->length = length;
+
+	return 0;
+}
+
+static int alloc_unique_number(struct unique_numspace *s)
+{
+	int rval = 0;
+
+	down(&numspace_mutex);
+	if (s->num_free < 1)
+		rval = expand_numspace(s);
+	if (!rval) {
+		rval = find_first_zero_bit(s->bits, s->length << 3);
+		--s->num_free;
+		__set_bit(rval, s->bits);
+	}
+	up(&numspace_mutex);
+
+	return rval;
+}
+
+static void dealloc_unique_number(struct unique_numspace *s, int number)
+{
+	int old_val;
+
+	if (number >= 0) {
+		down(&numspace_mutex);
+		old_val = __test_and_clear_bit(number, s->bits);
+		if (old_val)
+			++s->num_free;
+		up(&numspace_mutex);
+	}
+}
+
+static struct unique_numspace disc_numspace;
+static struct unique_numspace cdrom_numspace;
+
+void devfs_add_partitioned(struct gendisk *disk)
+{
+	char dirname[64], symlink[16];
+
+	devfs_mk_dir(disk->devfs_name);
+	devfs_mk_bdev(MKDEV(disk->major, disk->first_minor),
+			S_IFBLK|S_IRUSR|S_IWUSR,
+			"%s/disc", disk->devfs_name);
+
+	disk->number = alloc_unique_number(&disc_numspace);
+
+	sprintf(symlink, "discs/disc%d", disk->number);
+	sprintf(dirname, "../%s", disk->devfs_name);
+	devfs_mk_symlink(symlink, dirname);
+
+}
+
+void devfs_add_disk(struct gendisk *disk)
+{
+	devfs_mk_bdev(MKDEV(disk->major, disk->first_minor),
+			(disk->flags & GENHD_FL_CD) ?
+				S_IFBLK|S_IRUGO|S_IWUGO :
+				S_IFBLK|S_IRUSR|S_IWUSR,
+			"%s", disk->devfs_name);
+
+	if (disk->flags & GENHD_FL_CD) {
+		char dirname[64], symlink[16];
+
+		disk->number = alloc_unique_number(&cdrom_numspace);
+
+		sprintf(symlink, "cdroms/cdrom%d", disk->number);
+		sprintf(dirname, "../%s", disk->devfs_name);
+		devfs_mk_symlink(symlink, dirname);
+	}
+}
+
+void devfs_remove_disk(struct gendisk *disk)
+{
+	if (disk->minors != 1) {
+		devfs_remove("discs/disc%d", disk->number);
+		dealloc_unique_number(&disc_numspace, disk->number);
+		devfs_remove("%s/disc", disk->devfs_name);
+	}
+	if (disk->flags & GENHD_FL_CD) {
+		devfs_remove("cdroms/cdrom%d", disk->number);
+		dealloc_unique_number(&cdrom_numspace, disk->number);
+	}
+	devfs_remove(disk->devfs_name);
+}
+
+
diff --git a/fs/partitions/devfs.h b/fs/partitions/devfs.h
new file mode 100644
index 0000000..176118b
--- /dev/null
+++ b/fs/partitions/devfs.h
@@ -0,0 +1,10 @@
+
+#ifdef CONFIG_DEVFS_FS
+void devfs_add_disk(struct gendisk *dev);
+void devfs_add_partitioned(struct gendisk *dev);
+void devfs_remove_disk(struct gendisk *dev);
+#else
+# define devfs_add_disk(disk)			do { } while (0)
+# define devfs_add_partitioned(disk)		do { } while (0)
+# define devfs_remove_disk(disk)		do { } while (0)
+#endif
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
new file mode 100644
index 0000000..0f5b017
--- /dev/null
+++ b/fs/partitions/efi.c
@@ -0,0 +1,645 @@
+/************************************************************
+ * EFI GUID Partition Table handling
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
+ * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
+ *   Copyright 2000,2001,2002,2004 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ * TODO:
+ *
+ * Changelog:
+ * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
+ * - test for valid PMBR and valid PGPT before ever reading
+ *   AGPT, allow override with 'gpt' kernel command line option.
+ * - check for first/last_usable_lba outside of size of disk
+ *
+ * Tue  Mar 26 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.7-pre1 and 2.5.7-dj2
+ * - Applied patch to avoid fault in alternate header handling
+ * - cleaned up find_valid_gpt
+ * - On-disk structure and copy in memory is *always* LE now - 
+ *   swab fields as needed
+ * - remove print_gpt_header()
+ * - only use first max_p partition entries, to keep the kernel minor number
+ *   and partition numbers tied.
+ *
+ * Mon  Feb 04 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed __PRIPTR_PREFIX - not being used
+ *
+ * Mon  Jan 14 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied
+ *
+ * Thu Dec 6 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Added compare_gpts().
+ * - moved le_efi_guid_to_cpus() back into this file.  GPT is the only
+ *   thing that keeps EFI GUIDs on disk.
+ * - Changed gpt structure names and members to be simpler and more Linux-like.
+ * 
+ * Wed Oct 17 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck
+ *
+ * Wed Oct 10 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Changed function comments to DocBook style per Andreas Dilger suggestion.
+ *
+ * Mon Oct 08 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Change read_lba() to use the page cache per Al Viro's work.
+ * - print u64s properly on all architectures
+ * - fixed debug_printk(), now Dprintk()
+ *
+ * Mon Oct 01 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Style cleanups
+ * - made most functions static
+ * - Endianness addition
+ * - remove test for second alternate header, as it's not per spec,
+ *   and is unnecessary.  There's now a method to read/write the last
+ *   sector of an odd-sized disk from user space.  No tools have ever
+ *   been released which used this code, so it's effectively dead.
+ * - Per Asit Mallick of Intel, added a test for a valid PMBR.
+ * - Added kernel command line option 'gpt' to override valid PMBR test.
+ *
+ * Wed Jun  6 2001 Martin Wilck <Martin.Wilck@Fujitsu-Siemens.com>
+ * - added devfs volume UUID support (/dev/volumes/uuids) for
+ *   mounting file systems by the partition GUID. 
+ *
+ * Tue Dec  5 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Moved crc32() to linux/lib, added efi_crc32().
+ *
+ * Thu Nov 30 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Replaced Intel's CRC32 function with an equivalent
+ *   non-license-restricted version.
+ *
+ * Wed Oct 25 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Fixed the last_lba() call to return the proper last block
+ *
+ * Thu Oct 12 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Thanks to Andries Brouwer for his debugging assistance.
+ * - Code works, detects all the partitions.
+ *
+ ************************************************************/
+#include <linux/config.h>
+#include <linux/crc32.h>
+#include "check.h"
+#include "efi.h"
+
+#undef EFI_DEBUG
+#ifdef EFI_DEBUG
+#define Dprintk(x...) printk(KERN_DEBUG x)
+#else
+#define Dprintk(x...)
+#endif
+
+/* This allows a kernel command line option 'gpt' to override
+ * the test for invalid PMBR.  Not __initdata because reloading
+ * the partition tables happens after init too.
+ */
+static int force_gpt;
+static int __init
+force_gpt_fn(char *str)
+{
+	force_gpt = 1;
+	return 1;
+}
+__setup("gpt", force_gpt_fn);
+
+
+/**
+ * efi_crc32() - EFI version of crc32 function
+ * @buf: buffer to calculate crc32 of
+ * @len - length of buf
+ *
+ * Description: Returns EFI-style CRC32 value for @buf
+ * 
+ * This function uses the little endian Ethernet polynomial
+ * but seeds the function with ~0, and xor's with ~0 at the end.
+ * Note, the EFI Specification, v1.02, has a reference to
+ * Dr. Dobbs Journal, May 1994 (actually it's in May 1992).
+ */
+static inline u32
+efi_crc32(const void *buf, unsigned long len)
+{
+	return (crc32(~0L, buf, len) ^ ~0L);
+}
+
+/**
+ * last_lba(): return number of last logical block of device
+ * @bdev: block device
+ * 
+ * Description: Returns last LBA value on success, 0 on error.
+ * This is stored (by sd and ide-geometry) in
+ *  the part[0] entry for this disk, and is the number of
+ *  physical sectors available on the disk.
+ */
+static u64
+last_lba(struct block_device *bdev)
+{
+	if (!bdev || !bdev->bd_inode)
+		return 0;
+	return (bdev->bd_inode->i_size >> 9) - 1ULL;
+}
+
+static inline int
+pmbr_part_valid(struct partition *part, u64 lastlba)
+{
+        if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
+            le32_to_cpu(part->start_sect) == 1UL)
+                return 1;
+        return 0;
+}
+
+/**
+ * is_pmbr_valid(): test Protective MBR for validity
+ * @mbr: pointer to a legacy mbr structure
+ * @lastlba: last_lba for the whole device
+ *
+ * Description: Returns 1 if PMBR is valid, 0 otherwise.
+ * Validity depends on two things:
+ *  1) MSDOS signature is in the last two bytes of the MBR
+ *  2) One partition of type 0xEE is found
+ */
+static int
+is_pmbr_valid(legacy_mbr *mbr, u64 lastlba)
+{
+	int i;
+	if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
+                return 0;
+	for (i = 0; i < 4; i++)
+		if (pmbr_part_valid(&mbr->partition_record[i], lastlba))
+                        return 1;
+	return 0;
+}
+
+/**
+ * read_lba(): Read bytes from disk, starting at given LBA
+ * @bdev
+ * @lba
+ * @buffer
+ * @size_t
+ *
+ * Description:  Reads @count bytes from @bdev into @buffer.
+ * Returns number of bytes read on success, 0 on error.
+ */
+static size_t
+read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
+{
+	size_t totalreadcount = 0;
+
+	if (!bdev || !buffer || lba > last_lba(bdev))
+                return 0;
+
+	while (count) {
+		int copied = 512;
+		Sector sect;
+		unsigned char *data = read_dev_sector(bdev, lba++, &sect);
+		if (!data)
+			break;
+		if (copied > count)
+			copied = count;
+		memcpy(buffer, data, copied);
+		put_dev_sector(sect);
+		buffer += copied;
+		totalreadcount +=copied;
+		count -= copied;
+	}
+	return totalreadcount;
+}
+
+/**
+ * alloc_read_gpt_entries(): reads partition entries from disk
+ * @bdev
+ * @gpt - GPT header
+ * 
+ * Description: Returns ptes on success,  NULL on error.
+ * Allocates space for PTEs based on information found in @gpt.
+ * Notes: remember to free pte when you're done!
+ */
+static gpt_entry *
+alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
+{
+	size_t count;
+	gpt_entry *pte;
+	if (!bdev || !gpt)
+		return NULL;
+
+	count = le32_to_cpu(gpt->num_partition_entries) *
+                le32_to_cpu(gpt->sizeof_partition_entry);
+	if (!count)
+		return NULL;
+	pte = kmalloc(count, GFP_KERNEL);
+	if (!pte)
+		return NULL;
+	memset(pte, 0, count);
+
+	if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
+                     (u8 *) pte,
+		     count) < count) {
+		kfree(pte);
+                pte=NULL;
+		return NULL;
+	}
+	return pte;
+}
+
+/**
+ * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk
+ * @bdev
+ * @lba is the Logical Block Address of the partition table
+ * 
+ * Description: returns GPT header on success, NULL on error.   Allocates
+ * and fills a GPT header starting at @ from @bdev.
+ * Note: remember to free gpt when finished with it.
+ */
+static gpt_header *
+alloc_read_gpt_header(struct block_device *bdev, u64 lba)
+{
+	gpt_header *gpt;
+	if (!bdev)
+		return NULL;
+
+	gpt = kmalloc(sizeof (gpt_header), GFP_KERNEL);
+	if (!gpt)
+		return NULL;
+	memset(gpt, 0, sizeof (gpt_header));
+
+	if (read_lba(bdev, lba, (u8 *) gpt,
+		     sizeof (gpt_header)) < sizeof (gpt_header)) {
+		kfree(gpt);
+                gpt=NULL;
+		return NULL;
+	}
+
+	return gpt;
+}
+
+/**
+ * is_gpt_valid() - tests one GPT header and PTEs for validity
+ * @bdev
+ * @lba is the logical block address of the GPT header to test
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ *
+ * Description: returns 1 if valid,  0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ */
+static int
+is_gpt_valid(struct block_device *bdev, u64 lba,
+	     gpt_header **gpt, gpt_entry **ptes)
+{
+	u32 crc, origcrc;
+	u64 lastlba;
+
+	if (!bdev || !gpt || !ptes)
+		return 0;
+	if (!(*gpt = alloc_read_gpt_header(bdev, lba)))
+		return 0;
+
+	/* Check the GUID Partition Table signature */
+	if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) {
+		Dprintk("GUID Partition Table Header signature is wrong:"
+			"%lld != %lld\n",
+			(unsigned long long)le64_to_cpu((*gpt)->signature),
+			(unsigned long long)GPT_HEADER_SIGNATURE);
+		goto fail;
+	}
+
+	/* Check the GUID Partition Table CRC */
+	origcrc = le32_to_cpu((*gpt)->header_crc32);
+	(*gpt)->header_crc32 = 0;
+	crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
+
+	if (crc != origcrc) {
+		Dprintk
+		    ("GUID Partition Table Header CRC is wrong: %x != %x\n",
+		     crc, origcrc);
+		goto fail;
+	}
+	(*gpt)->header_crc32 = cpu_to_le32(origcrc);
+
+	/* Check that the my_lba entry points to the LBA that contains
+	 * the GUID Partition Table */
+	if (le64_to_cpu((*gpt)->my_lba) != lba) {
+		Dprintk("GPT my_lba incorrect: %lld != %lld\n",
+			(unsigned long long)le64_to_cpu((*gpt)->my_lba),
+			(unsigned long long)lba);
+		goto fail;
+	}
+
+	/* Check the first_usable_lba and last_usable_lba are
+	 * within the disk.
+	 */
+	lastlba = last_lba(bdev);
+	if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
+		Dprintk("GPT: first_usable_lba incorrect: %lld > %lld\n",
+			(unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
+			(unsigned long long)lastlba);
+		goto fail;
+	}
+	if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
+		Dprintk("GPT: last_usable_lba incorrect: %lld > %lld\n",
+			(unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+			(unsigned long long)lastlba);
+		goto fail;
+	}
+
+	if (!(*ptes = alloc_read_gpt_entries(bdev, *gpt)))
+		goto fail;
+
+	/* Check the GUID Partition Entry Array CRC */
+	crc = efi_crc32((const unsigned char *) (*ptes),
+			le32_to_cpu((*gpt)->num_partition_entries) *
+			le32_to_cpu((*gpt)->sizeof_partition_entry));
+
+	if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
+		Dprintk("GUID Partitition Entry Array CRC check failed.\n");
+		goto fail_ptes;
+	}
+
+	/* We're done, all's well */
+	return 1;
+
+ fail_ptes:
+	kfree(*ptes);
+	*ptes = NULL;
+ fail:
+	kfree(*gpt);
+	*gpt = NULL;
+	return 0;
+}
+
+/**
+ * is_pte_valid() - tests one PTE for validity
+ * @pte is the pte to check
+ * @lastlba is last lba of the disk
+ *
+ * Description: returns 1 if valid,  0 on error.
+ */
+static inline int
+is_pte_valid(const gpt_entry *pte, const u64 lastlba)
+{
+	if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
+	    le64_to_cpu(pte->starting_lba) > lastlba         ||
+	    le64_to_cpu(pte->ending_lba)   > lastlba)
+		return 0;
+	return 1;
+}
+
+/**
+ * compare_gpts() - Search disk for valid GPT headers and PTEs
+ * @pgpt is the primary GPT header
+ * @agpt is the alternate GPT header
+ * @lastlba is the last LBA number
+ * Description: Returns nothing.  Sanity checks pgpt and agpt fields
+ * and prints warnings on discrepancies.
+ * 
+ */
+static void
+compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
+{
+	int error_found = 0;
+	if (!pgpt || !agpt)
+		return;
+	if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
+		printk(KERN_WARNING
+		       "GPT:Primary header LBA != Alt. header alternate_lba\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		       (unsigned long long)le64_to_cpu(pgpt->my_lba),
+                       (unsigned long long)le64_to_cpu(agpt->alternate_lba));
+		error_found++;
+	}
+	if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
+		printk(KERN_WARNING
+		       "GPT:Primary header alternate_lba != Alt. header my_lba\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		       (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+                       (unsigned long long)le64_to_cpu(agpt->my_lba));
+		error_found++;
+	}
+	if (le64_to_cpu(pgpt->first_usable_lba) !=
+            le64_to_cpu(agpt->first_usable_lba)) {
+		printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		       (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
+                       (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
+		error_found++;
+	}
+	if (le64_to_cpu(pgpt->last_usable_lba) !=
+            le64_to_cpu(agpt->last_usable_lba)) {
+		printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		       (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
+                       (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
+		error_found++;
+	}
+	if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
+		printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+		error_found++;
+	}
+	if (le32_to_cpu(pgpt->num_partition_entries) !=
+            le32_to_cpu(agpt->num_partition_entries)) {
+		printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+		       "0x%x != 0x%x\n",
+		       le32_to_cpu(pgpt->num_partition_entries),
+		       le32_to_cpu(agpt->num_partition_entries));
+		error_found++;
+	}
+	if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
+            le32_to_cpu(agpt->sizeof_partition_entry)) {
+		printk(KERN_WARNING
+		       "GPT:sizeof_partition_entry values don't match: "
+		       "0x%x != 0x%x\n",
+                       le32_to_cpu(pgpt->sizeof_partition_entry),
+		       le32_to_cpu(agpt->sizeof_partition_entry));
+		error_found++;
+	}
+	if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
+            le32_to_cpu(agpt->partition_entry_array_crc32)) {
+		printk(KERN_WARNING
+		       "GPT:partition_entry_array_crc32 values don't match: "
+		       "0x%x != 0x%x\n",
+                       le32_to_cpu(pgpt->partition_entry_array_crc32),
+		       le32_to_cpu(agpt->partition_entry_array_crc32));
+		error_found++;
+	}
+	if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
+		printk(KERN_WARNING
+		       "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+			(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+			(unsigned long long)lastlba);
+		error_found++;
+	}
+
+	if (le64_to_cpu(agpt->my_lba) != lastlba) {
+		printk(KERN_WARNING
+		       "GPT:Alternate GPT header not at the end of the disk.\n");
+		printk(KERN_WARNING "GPT:%lld != %lld\n",
+			(unsigned long long)le64_to_cpu(agpt->my_lba),
+			(unsigned long long)lastlba);
+		error_found++;
+	}
+
+	if (error_found)
+		printk(KERN_WARNING
+		       "GPT: Use GNU Parted to correct GPT errors.\n");
+	return;
+}
+
+/**
+ * find_valid_gpt() - Search disk for valid GPT headers and PTEs
+ * @bdev
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ * Description: Returns 1 if valid, 0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ * Validity depends on PMBR being valid (or being overridden by the
+ * 'gpt' kernel command line option) and finding either the Primary
+ * GPT header and PTEs valid, or the Alternate GPT header and PTEs
+ * valid.  If the Primary GPT header is not valid, the Alternate GPT header
+ * is not checked unless the 'gpt' kernel command line option is passed.
+ * This protects against devices which misreport their size, and forces
+ * the user to decide to use the Alternate GPT.
+ */
+static int
+find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes)
+{
+	int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
+	gpt_header *pgpt = NULL, *agpt = NULL;
+	gpt_entry *pptes = NULL, *aptes = NULL;
+	legacy_mbr *legacymbr = NULL;
+	u64 lastlba;
+	if (!bdev || !gpt || !ptes)
+		return 0;
+
+	lastlba = last_lba(bdev);
+        if (!force_gpt) {
+                /* This will be added to the EFI Spec. per Intel after v1.02. */
+                legacymbr = kmalloc(sizeof (*legacymbr), GFP_KERNEL);
+                if (legacymbr) {
+                        memset(legacymbr, 0, sizeof (*legacymbr));
+                        read_lba(bdev, 0, (u8 *) legacymbr,
+                                 sizeof (*legacymbr));
+                        good_pmbr = is_pmbr_valid(legacymbr, lastlba);
+                        kfree(legacymbr);
+                        legacymbr=NULL;
+                }
+                if (!good_pmbr)
+                        goto fail;
+        }
+
+	good_pgpt = is_gpt_valid(bdev, GPT_PRIMARY_PARTITION_TABLE_LBA,
+				 &pgpt, &pptes);
+        if (good_pgpt)
+		good_agpt = is_gpt_valid(bdev,
+					 le64_to_cpu(pgpt->alternate_lba),
+					 &agpt, &aptes);
+        if (!good_agpt && force_gpt)
+                good_agpt = is_gpt_valid(bdev, lastlba,
+                                         &agpt, &aptes);
+
+        /* The obviously unsuccessful case */
+        if (!good_pgpt && !good_agpt)
+                goto fail;
+
+        compare_gpts(pgpt, agpt, lastlba);
+
+        /* The good cases */
+        if (good_pgpt) {
+                *gpt  = pgpt;
+                *ptes = pptes;
+                kfree(agpt);
+                kfree(aptes);
+                if (!good_agpt) {
+                        printk(KERN_WARNING 
+			       "Alternate GPT is invalid, "
+                               "using primary GPT.\n");
+                }
+                return 1;
+        }
+        else if (good_agpt) {
+                *gpt  = agpt;
+                *ptes = aptes;
+                kfree(pgpt);
+                kfree(pptes);
+                printk(KERN_WARNING 
+                       "Primary GPT is invalid, using alternate GPT.\n");
+                return 1;
+        }
+
+ fail:
+        kfree(pgpt);
+        kfree(agpt);
+        kfree(pptes);
+        kfree(aptes);
+        *gpt = NULL;
+        *ptes = NULL;
+        return 0;
+}
+
+/**
+ * efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+ * @state
+ * @bdev
+ *
+ * Description: called from check.c, if the disk contains GPT
+ * partitions, sets up partition entries in the kernel.
+ *
+ * If the first block on the disk is a legacy MBR,
+ * it will get handled by msdos_partition().
+ * If it's a Protective MBR, we'll handle it here.
+ *
+ * We do not create a Linux partition for GPT, but
+ * only for the actual data partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ *  0 if this isn't our partition table
+ *  1 if successful
+ *
+ */
+int
+efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	gpt_header *gpt = NULL;
+	gpt_entry *ptes = NULL;
+	u32 i;
+
+	if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
+		kfree(gpt);
+		kfree(ptes);
+		return 0;
+	}
+
+	Dprintk("GUID Partition Table is valid!  Yea!\n");
+
+	for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+		if (!is_pte_valid(&ptes[i], last_lba(bdev)))
+			continue;
+
+		put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba),
+				 (le64_to_cpu(ptes[i].ending_lba) -
+                                  le64_to_cpu(ptes[i].starting_lba) +
+				  1ULL));
+
+		/* If this is a RAID volume, tell md */
+		if (!efi_guidcmp(ptes[i].partition_type_guid,
+				 PARTITION_LINUX_RAID_GUID))
+			state->parts[i+1].flags = 1;
+	}
+	kfree(ptes);
+	kfree(gpt);
+	printk("\n");
+	return 1;
+}
diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h
new file mode 100644
index 0000000..c44fb05
--- /dev/null
+++ b/fs/partitions/efi.h
@@ -0,0 +1,131 @@
+/************************************************************
+ * EFI GUID Partition Table
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
+ *
+ * By Matt Domsch <Matt_Domsch@dell.com>  Fri Sep 22 22:15:56 CDT 2000  
+ *   Copyright 2000,2001 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ * 
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * 
+ ************************************************************/
+
+#ifndef FS_PART_EFI_H_INCLUDED
+#define FS_PART_EFI_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/efi.h>
+
+#define MSDOS_MBR_SIGNATURE 0xaa55
+#define EFI_PMBR_OSTYPE_EFI 0xEF
+#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+
+#define GPT_BLOCK_SIZE 512
+#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
+#define GPT_HEADER_REVISION_V1 0x00010000
+#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
+
+#define PARTITION_SYSTEM_GUID \
+    EFI_GUID( 0xC12A7328, 0xF81F, 0x11d2, \
+              0xBA, 0x4B, 0x00, 0xA0, 0xC9, 0x3E, 0xC9, 0x3B) 
+#define LEGACY_MBR_PARTITION_GUID \
+    EFI_GUID( 0x024DEE41, 0x33E7, 0x11d3, \
+              0x9D, 0x69, 0x00, 0x08, 0xC7, 0x81, 0xF3, 0x9F)
+#define PARTITION_MSFT_RESERVED_GUID \
+    EFI_GUID( 0xE3C9E316, 0x0B5C, 0x4DB8, \
+              0x81, 0x7D, 0xF9, 0x2D, 0xF0, 0x02, 0x15, 0xAE)
+#define PARTITION_BASIC_DATA_GUID \
+    EFI_GUID( 0xEBD0A0A2, 0xB9E5, 0x4433, \
+              0x87, 0xC0, 0x68, 0xB6, 0xB7, 0x26, 0x99, 0xC7)
+#define PARTITION_LINUX_RAID_GUID \
+    EFI_GUID( 0xa19d880f, 0x05fc, 0x4d3b, \
+              0xa0, 0x06, 0x74, 0x3f, 0x0f, 0x84, 0x91, 0x1e)
+#define PARTITION_LINUX_SWAP_GUID \
+    EFI_GUID( 0x0657fd6d, 0xa4ab, 0x43c4, \
+              0x84, 0xe5, 0x09, 0x33, 0xc8, 0x4b, 0x4f, 0x4f)
+#define PARTITION_LINUX_LVM_GUID \
+    EFI_GUID( 0xe6d6d379, 0xf507, 0x44c2, \
+              0xa2, 0x3c, 0x23, 0x8f, 0x2a, 0x3d, 0xf9, 0x28)
+
+typedef struct _gpt_header {
+	__le64 signature;
+	__le32 revision;
+	__le32 header_size;
+	__le32 header_crc32;
+	__le32 reserved1;
+	__le64 my_lba;
+	__le64 alternate_lba;
+	__le64 first_usable_lba;
+	__le64 last_usable_lba;
+	efi_guid_t disk_guid;
+	__le64 partition_entry_lba;
+	__le32 num_partition_entries;
+	__le32 sizeof_partition_entry;
+	__le32 partition_entry_array_crc32;
+	u8 reserved2[GPT_BLOCK_SIZE - 92];
+} __attribute__ ((packed)) gpt_header;
+
+typedef struct _gpt_entry_attributes {
+	u64 required_to_function:1;
+	u64 reserved:47;
+        u64 type_guid_specific:16;
+} __attribute__ ((packed)) gpt_entry_attributes;
+
+typedef struct _gpt_entry {
+	efi_guid_t partition_type_guid;
+	efi_guid_t unique_partition_guid;
+	__le64 starting_lba;
+	__le64 ending_lba;
+	gpt_entry_attributes attributes;
+	efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
+} __attribute__ ((packed)) gpt_entry;
+
+typedef struct _legacy_mbr {
+	u8 boot_code[440];
+	__le32 unique_mbr_signature;
+	__le16 unknown;
+	struct partition partition_record[4];
+	__le16 signature;
+} __attribute__ ((packed)) legacy_mbr;
+
+/* Functions */
+extern int efi_partition(struct parsed_partitions *state, struct block_device *bdev);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * --------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4 
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
new file mode 100644
index 0000000..d59dcbf
--- /dev/null
+++ b/fs/partitions/ibm.c
@@ -0,0 +1,191 @@
+/*
+ * File...........: linux/fs/partitions/ibm.c      
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *                  Volker Sameske <sameske@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+
+ * History of changes (starts July 2000)
+ * 07/10/00 Fixed detection of CMS formatted disks     
+ * 02/13/00 VTOC partition support added
+ * 12/27/01 fixed PL030593 (CMS reserved minidisk not detected on 64 bit)
+ * 07/24/03 no longer using contents of freed page for CMS label recognition (BZ3611)
+ */
+
+#include <linux/config.h>
+#include <linux/buffer_head.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+#include <asm/dasd.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/vtoc.h>
+
+#include "check.h"
+#include "ibm.h"
+
+/*
+ * compute the block number from a 
+ * cyl-cyl-head-head structure
+ */
+static inline int
+cchh2blk (cchh_t *ptr, struct hd_geometry *geo) {
+        return ptr->cc * geo->heads * geo->sectors +
+	       ptr->hh * geo->sectors;
+}
+
+
+/*
+ * compute the block number from a 
+ * cyl-cyl-head-head-block structure
+ */
+static inline int
+cchhb2blk (cchhb_t *ptr, struct hd_geometry *geo) {
+        return ptr->cc * geo->heads * geo->sectors +
+		ptr->hh * geo->sectors +
+		ptr->b;
+}
+
+/*
+ */
+int 
+ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int blocksize, offset, size;
+	dasd_information_t *info;
+	struct hd_geometry *geo;
+	char type[5] = {0,};
+	char name[7] = {0,};
+	volume_label_t *vlabel;
+	unsigned char *data;
+	Sector sect;
+
+	if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
+		goto out_noinfo;
+	if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
+		goto out_nogeo;
+	if ((vlabel = kmalloc(sizeof(volume_label_t), GFP_KERNEL)) == NULL)
+		goto out_novlab;
+	
+	if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
+	    ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
+		goto out_noioctl;
+	
+	if ((blocksize = bdev_hardsect_size(bdev)) <= 0)
+		goto out_badsect;
+
+	/*
+	 * Get volume label, extract name and type.
+	 */
+	data = read_dev_sector(bdev, info->label_block*(blocksize/512), &sect);
+	if (data == NULL)
+		goto out_readerr;
+
+	strncpy (type, data, 4);
+	if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD")))
+		strncpy(name, data + 8, 6);
+	else
+		strncpy(name, data + 4, 6);
+	memcpy (vlabel, data, sizeof(volume_label_t));
+	put_dev_sector(sect);
+
+	EBCASC(type, 4);
+	EBCASC(name, 6);
+
+	/*
+	 * Three different types: CMS1, VOL1 and LNX1/unlabeled
+	 */
+	if (strncmp(type, "CMS1", 4) == 0) {
+		/*
+		 * VM style CMS1 labeled disk
+		 */
+		int *label = (int *) vlabel;
+
+		if (label[13] != 0) {
+			printk("CMS1/%8s(MDSK):", name);
+			/* disk is reserved minidisk */
+			blocksize = label[3];
+			offset = label[13];
+			size = (label[7] - 1)*(blocksize >> 9);
+		} else {
+			printk("CMS1/%8s:", name);
+			offset = (info->label_block + 1);
+			size = bdev->bd_inode->i_size >> 9;
+		}
+		put_partition(state, 1, offset*(blocksize >> 9),
+				 size-offset*(blocksize >> 9));
+	} else if ((strncmp(type, "VOL1", 4) == 0) &&
+		(!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
+		/*
+		 * New style VOL1 labeled disk
+		 */
+		unsigned int blk;
+		int counter;
+
+		printk("VOL1/%8s:", name);
+
+		/* get block number and read then go through format1 labels */
+		blk = cchhb2blk(&vlabel->vtoc, geo) + 1;
+		counter = 0;
+		while ((data = read_dev_sector(bdev, blk*(blocksize/512),
+					       &sect)) != NULL) {
+			format1_label_t f1;
+
+			memcpy(&f1, data, sizeof(format1_label_t));
+			put_dev_sector(sect);
+
+			/* skip FMT4 / FMT5 / FMT7 labels */
+			if (f1.DS1FMTID == _ascebc['4']
+			    || f1.DS1FMTID == _ascebc['5']
+			    || f1.DS1FMTID == _ascebc['7']) {
+			        blk++;
+				continue;
+			}
+
+			/* only FMT1 valid at this point */
+			if (f1.DS1FMTID != _ascebc['1'])
+				break;
+
+			/* OK, we got valid partition data */
+		        offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
+			size  = cchh2blk(&f1.DS1EXT1.ulimit, geo) - 
+				offset + geo->sectors;
+			if (counter >= state->limit)
+				break;
+			put_partition(state, counter + 1, 
+					 offset * (blocksize >> 9),
+					 size * (blocksize >> 9));
+			counter++;
+			blk++;
+		}
+	} else {
+		/*
+		 * Old style LNX1 or unlabeled disk
+		 */
+		if (strncmp(type, "LNX1", 4) == 0)
+			printk ("LNX1/%8s:", name);
+		else
+			printk("(nonl)/%8s:", name);
+		offset = (info->label_block + 1);
+		size = (bdev->bd_inode->i_size >> 9);
+		put_partition(state, 1, offset*(blocksize >> 9),
+				 size-offset*(blocksize >> 9));
+	}
+
+	printk("\n");
+	kfree(vlabel);
+	kfree(geo);
+	kfree(info);
+	return 1;
+	
+out_readerr:
+out_badsect:
+out_noioctl:
+	kfree(vlabel);
+out_novlab:
+	kfree(geo);
+out_nogeo:
+	kfree(info);
+out_noinfo:
+	return 0;
+}
diff --git a/fs/partitions/ibm.h b/fs/partitions/ibm.h
new file mode 100644
index 0000000..31f85a6
--- /dev/null
+++ b/fs/partitions/ibm.h
@@ -0,0 +1 @@
+int ibm_partition(struct parsed_partitions *, struct block_device *);
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
new file mode 100644
index 0000000..7ab1c11
--- /dev/null
+++ b/fs/partitions/ldm.c
@@ -0,0 +1,1483 @@
+/**
+ * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://linux-ntfs.sf.net/ldm
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program (in the main directory of the source in the file COPYING); if
+ * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
+ * Boston, MA  02111-1307  USA
+ */
+
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/stringify.h>
+#include "ldm.h"
+#include "check.h"
+#include "msdos.h"
+
+typedef enum {
+	FALSE = 0,
+	TRUE  = 1
+} BOOL;
+
+/**
+ * ldm_debug/info/error/crit - Output an error message
+ * @f:    A printf format string containing the message
+ * @...:  Variables to substitute into @f
+ *
+ * ldm_debug() writes a DEBUG level message to the syslog but only if the
+ * driver was compiled with debug enabled. Otherwise, the call turns into a NOP.
+ */
+#ifndef CONFIG_LDM_DEBUG
+#define ldm_debug(...)	do {} while (0)
+#else
+#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __FUNCTION__, f, ##a)
+#endif
+
+#define ldm_crit(f, a...)  _ldm_printk (KERN_CRIT,  __FUNCTION__, f, ##a)
+#define ldm_error(f, a...) _ldm_printk (KERN_ERR,   __FUNCTION__, f, ##a)
+#define ldm_info(f, a...)  _ldm_printk (KERN_INFO,  __FUNCTION__, f, ##a)
+
+__attribute__ ((format (printf, 3, 4)))
+static void _ldm_printk (const char *level, const char *function,
+			 const char *fmt, ...)
+{
+	static char buf[128];
+	va_list args;
+
+	va_start (args, fmt);
+	vsnprintf (buf, sizeof (buf), fmt, args);
+	va_end (args);
+
+	printk ("%s%s(): %s\n", level, function, buf);
+}
+
+
+/**
+ * ldm_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src:  Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return:  0-255  Success, the byte was parsed correctly
+ *          -1     Error, an invalid character was supplied
+ */
+static int ldm_parse_hexbyte (const u8 *src)
+{
+	unsigned int x;		/* For correct wrapping */
+	int h;
+
+	/* high part */
+	if      ((x = src[0] - '0') <= '9'-'0') h = x;
+	else if ((x = src[0] - 'a') <= 'f'-'a') h = x+10;
+	else if ((x = src[0] - 'A') <= 'F'-'A') h = x+10;
+	else return -1;
+	h <<= 4;
+
+	/* low part */
+	if ((x = src[1] - '0') <= '9'-'0') return h | x;
+	if ((x = src[1] - 'a') <= 'f'-'a') return h | (x+10);
+	if ((x = src[1] - 'A') <= 'F'-'A') return h | (x+10);
+	return -1;
+}
+
+/**
+ * ldm_parse_guid - Convert GUID from ASCII to binary
+ * @src:   36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest:  Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return:  TRUE   @dest contains binary GUID
+ *          FALSE  @dest contents are undefined
+ */
+static BOOL ldm_parse_guid (const u8 *src, u8 *dest)
+{
+	static const int size[] = { 4, 2, 2, 2, 6 };
+	int i, j, v;
+
+	if (src[8]  != '-' || src[13] != '-' ||
+	    src[18] != '-' || src[23] != '-')
+		return FALSE;
+
+	for (j = 0; j < 5; j++, src++)
+		for (i = 0; i < size[j]; i++, src+=2, *dest++ = v)
+			if ((v = ldm_parse_hexbyte (src)) < 0)
+				return FALSE;
+
+	return TRUE;
+}
+
+
+/**
+ * ldm_parse_privhead - Read the LDM Database PRIVHEAD structure
+ * @data:  Raw database PRIVHEAD structure loaded from the device
+ * @ph:    In-memory privhead structure in which to return parsed information
+ *
+ * This parses the LDM database PRIVHEAD structure supplied in @data and
+ * sets up the in-memory privhead structure @ph with the obtained information.
+ *
+ * Return:  TRUE   @ph contains the PRIVHEAD data
+ *          FALSE  @ph contents are undefined
+ */
+static BOOL ldm_parse_privhead (const u8 *data, struct privhead *ph)
+{
+	BUG_ON (!data || !ph);
+
+	if (MAGIC_PRIVHEAD != BE64 (data)) {
+		ldm_error ("Cannot find PRIVHEAD structure. LDM database is"
+			" corrupt. Aborting.");
+		return FALSE;
+	}
+
+	ph->ver_major          = BE16 (data + 0x000C);
+	ph->ver_minor          = BE16 (data + 0x000E);
+	ph->logical_disk_start = BE64 (data + 0x011B);
+	ph->logical_disk_size  = BE64 (data + 0x0123);
+	ph->config_start       = BE64 (data + 0x012B);
+	ph->config_size        = BE64 (data + 0x0133);
+
+	if ((ph->ver_major != 2) || (ph->ver_minor != 11)) {
+		ldm_error ("Expected PRIVHEAD version %d.%d, got %d.%d."
+			" Aborting.", 2, 11, ph->ver_major, ph->ver_minor);
+		return FALSE;
+	}
+	if (ph->config_size != LDM_DB_SIZE) {	/* 1 MiB in sectors. */
+		/* Warn the user and continue, carefully */
+		ldm_info ("Database is normally %u bytes, it claims to "
+			"be %llu bytes.", LDM_DB_SIZE,
+			(unsigned long long)ph->config_size );
+	}
+	if ((ph->logical_disk_size == 0) ||
+	    (ph->logical_disk_start + ph->logical_disk_size > ph->config_start)) {
+		ldm_error ("PRIVHEAD disk size doesn't match real disk size");
+		return FALSE;
+	}
+
+	if (!ldm_parse_guid (data + 0x0030, ph->disk_id)) {
+		ldm_error ("PRIVHEAD contains an invalid GUID.");
+		return FALSE;
+	}
+
+	ldm_debug ("Parsed PRIVHEAD successfully.");
+	return TRUE;
+}
+
+/**
+ * ldm_parse_tocblock - Read the LDM Database TOCBLOCK structure
+ * @data:  Raw database TOCBLOCK structure loaded from the device
+ * @toc:   In-memory toc structure in which to return parsed information
+ *
+ * This parses the LDM Database TOCBLOCK (table of contents) structure supplied
+ * in @data and sets up the in-memory tocblock structure @toc with the obtained
+ * information.
+ *
+ * N.B.  The *_start and *_size values returned in @toc are not range-checked.
+ *
+ * Return:  TRUE   @toc contains the TOCBLOCK data
+ *          FALSE  @toc contents are undefined
+ */
+static BOOL ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
+{
+	BUG_ON (!data || !toc);
+
+	if (MAGIC_TOCBLOCK != BE64 (data)) {
+		ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
+		return FALSE;
+	}
+	strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
+	toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+	toc->bitmap1_start = BE64 (data + 0x2E);
+	toc->bitmap1_size  = BE64 (data + 0x36);
+
+	if (strncmp (toc->bitmap1_name, TOC_BITMAP1,
+			sizeof (toc->bitmap1_name)) != 0) {
+		ldm_crit ("TOCBLOCK's first bitmap is '%s', should be '%s'.",
+				TOC_BITMAP1, toc->bitmap1_name);
+		return FALSE;
+	}
+	strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
+	toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+	toc->bitmap2_start = BE64 (data + 0x50);
+	toc->bitmap2_size  = BE64 (data + 0x58);
+	if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
+			sizeof (toc->bitmap2_name)) != 0) {
+		ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.",
+				TOC_BITMAP2, toc->bitmap2_name);
+		return FALSE;
+	}
+	ldm_debug ("Parsed TOCBLOCK successfully.");
+	return TRUE;
+}
+
+/**
+ * ldm_parse_vmdb - Read the LDM Database VMDB structure
+ * @data:  Raw database VMDB structure loaded from the device
+ * @vm:    In-memory vmdb structure in which to return parsed information
+ *
+ * This parses the LDM Database VMDB structure supplied in @data and sets up
+ * the in-memory vmdb structure @vm with the obtained information.
+ *
+ * N.B.  The *_start, *_size and *_seq values will be range-checked later.
+ *
+ * Return:  TRUE   @vm contains VMDB info
+ *          FALSE  @vm contents are undefined
+ */
+static BOOL ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
+{
+	BUG_ON (!data || !vm);
+
+	if (MAGIC_VMDB != BE32 (data)) {
+		ldm_crit ("Cannot find the VMDB, database may be corrupt.");
+		return FALSE;
+	}
+
+	vm->ver_major = BE16 (data + 0x12);
+	vm->ver_minor = BE16 (data + 0x14);
+	if ((vm->ver_major != 4) || (vm->ver_minor != 10)) {
+		ldm_error ("Expected VMDB version %d.%d, got %d.%d. "
+			"Aborting.", 4, 10, vm->ver_major, vm->ver_minor);
+		return FALSE;
+	}
+
+	vm->vblk_size     = BE32 (data + 0x08);
+	vm->vblk_offset   = BE32 (data + 0x0C);
+	vm->last_vblk_seq = BE32 (data + 0x04);
+
+	ldm_debug ("Parsed VMDB successfully.");
+	return TRUE;
+}
+
+/**
+ * ldm_compare_privheads - Compare two privhead objects
+ * @ph1:  First privhead
+ * @ph2:  Second privhead
+ *
+ * This compares the two privhead structures @ph1 and @ph2.
+ *
+ * Return:  TRUE   Identical
+ *          FALSE  Different
+ */
+static BOOL ldm_compare_privheads (const struct privhead *ph1,
+				   const struct privhead *ph2)
+{
+	BUG_ON (!ph1 || !ph2);
+
+	return ((ph1->ver_major          == ph2->ver_major)		&&
+		(ph1->ver_minor          == ph2->ver_minor)		&&
+		(ph1->logical_disk_start == ph2->logical_disk_start)	&&
+		(ph1->logical_disk_size  == ph2->logical_disk_size)	&&
+		(ph1->config_start       == ph2->config_start)		&&
+		(ph1->config_size        == ph2->config_size)		&&
+		!memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+}
+
+/**
+ * ldm_compare_tocblocks - Compare two tocblock objects
+ * @toc1:  First toc
+ * @toc2:  Second toc
+ *
+ * This compares the two tocblock structures @toc1 and @toc2.
+ *
+ * Return:  TRUE   Identical
+ *          FALSE  Different
+ */
+static BOOL ldm_compare_tocblocks (const struct tocblock *toc1,
+				   const struct tocblock *toc2)
+{
+	BUG_ON (!toc1 || !toc2);
+
+	return ((toc1->bitmap1_start == toc2->bitmap1_start)	&&
+		(toc1->bitmap1_size  == toc2->bitmap1_size)	&&
+		(toc1->bitmap2_start == toc2->bitmap2_start)	&&
+		(toc1->bitmap2_size  == toc2->bitmap2_size)	&&
+		!strncmp (toc1->bitmap1_name, toc2->bitmap1_name,
+			sizeof (toc1->bitmap1_name))		&&
+		!strncmp (toc1->bitmap2_name, toc2->bitmap2_name,
+			sizeof (toc1->bitmap2_name)));
+}
+
+/**
+ * ldm_validate_privheads - Compare the primary privhead with its backups
+ * @bdev:  Device holding the LDM Database
+ * @ph1:   Memory struct to fill with ph contents
+ *
+ * Read and compare all three privheads from disk.
+ *
+ * The privheads on disk show the size and location of the main disk area and
+ * the configuration area (the database).  The values are range-checked against
+ * @hd, which contains the real size of the disk.
+ *
+ * Return:  TRUE   Success
+ *          FALSE  Error
+ */
+static BOOL ldm_validate_privheads (struct block_device *bdev,
+				    struct privhead *ph1)
+{
+	static const int off[3] = { OFF_PRIV1, OFF_PRIV2, OFF_PRIV3 };
+	struct privhead *ph[3] = { ph1 };
+	Sector sect;
+	u8 *data;
+	BOOL result = FALSE;
+	long num_sects;
+	int i;
+
+	BUG_ON (!bdev || !ph1);
+
+	ph[1] = kmalloc (sizeof (*ph[1]), GFP_KERNEL);
+	ph[2] = kmalloc (sizeof (*ph[2]), GFP_KERNEL);
+	if (!ph[1] || !ph[2]) {
+		ldm_crit ("Out of memory.");
+		goto out;
+	}
+
+	/* off[1 & 2] are relative to ph[0]->config_start */
+	ph[0]->config_start = 0;
+
+	/* Read and parse privheads */
+	for (i = 0; i < 3; i++) {
+		data = read_dev_sector (bdev,
+			ph[0]->config_start + off[i], &sect);
+		if (!data) {
+			ldm_crit ("Disk read failed.");
+			goto out;
+		}
+		result = ldm_parse_privhead (data, ph[i]);
+		put_dev_sector (sect);
+		if (!result) {
+			ldm_error ("Cannot find PRIVHEAD %d.", i+1); /* Log again */
+			if (i < 2)
+				goto out;	/* Already logged */
+			else
+				break;	/* FIXME ignore for now, 3rd PH can fail on odd-sized disks */
+		}
+	}
+
+	num_sects = bdev->bd_inode->i_size >> 9;
+
+	if ((ph[0]->config_start > num_sects) ||
+	   ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
+		ldm_crit ("Database extends beyond the end of the disk.");
+		goto out;
+	}
+
+	if ((ph[0]->logical_disk_start > ph[0]->config_start) ||
+	   ((ph[0]->logical_disk_start + ph[0]->logical_disk_size)
+		    > ph[0]->config_start)) {
+		ldm_crit ("Disk and database overlap.");
+		goto out;
+	}
+
+	if (!ldm_compare_privheads (ph[0], ph[1])) {
+		ldm_crit ("Primary and backup PRIVHEADs don't match.");
+		goto out;
+	}
+	/* FIXME ignore this for now
+	if (!ldm_compare_privheads (ph[0], ph[2])) {
+		ldm_crit ("Primary and backup PRIVHEADs don't match.");
+		goto out;
+	}*/
+	ldm_debug ("Validated PRIVHEADs successfully.");
+	result = TRUE;
+out:
+	kfree (ph[1]);
+	kfree (ph[2]);
+	return result;
+}
+
+/**
+ * ldm_validate_tocblocks - Validate the table of contents and its backups
+ * @bdev:  Device holding the LDM Database
+ * @base:  Offset, into @bdev, of the database
+ * @ldb:   Cache of the database structures
+ *
+ * Find and compare the four tables of contents of the LDM Database stored on
+ * @bdev and return the parsed information into @toc1.
+ *
+ * The offsets and sizes of the configs are range-checked against a privhead.
+ *
+ * Return:  TRUE   @toc1 contains validated TOCBLOCK info
+ *          FALSE  @toc1 contents are undefined
+ */
+static BOOL ldm_validate_tocblocks (struct block_device *bdev,
+	unsigned long base, struct ldmdb *ldb)
+{
+	static const int off[4] = { OFF_TOCB1, OFF_TOCB2, OFF_TOCB3, OFF_TOCB4};
+	struct tocblock *tb[4];
+	struct privhead *ph;
+	Sector sect;
+	u8 *data;
+	BOOL result = FALSE;
+	int i;
+
+	BUG_ON (!bdev || !ldb);
+
+	ph    = &ldb->ph;
+	tb[0] = &ldb->toc;
+	tb[1] = kmalloc (sizeof (*tb[1]), GFP_KERNEL);
+	tb[2] = kmalloc (sizeof (*tb[2]), GFP_KERNEL);
+	tb[3] = kmalloc (sizeof (*tb[3]), GFP_KERNEL);
+	if (!tb[1] || !tb[2] || !tb[3]) {
+		ldm_crit ("Out of memory.");
+		goto out;
+	}
+
+	for (i = 0; i < 4; i++)		/* Read and parse all four toc's. */
+	{
+		data = read_dev_sector (bdev, base + off[i], &sect);
+		if (!data) {
+			ldm_crit ("Disk read failed.");
+			goto out;
+		}
+		result = ldm_parse_tocblock (data, tb[i]);
+		put_dev_sector (sect);
+		if (!result)
+			goto out;	/* Already logged */
+	}
+
+	/* Range check the toc against a privhead. */
+	if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) ||
+	    ((tb[0]->bitmap2_start + tb[0]->bitmap2_size) > ph->config_size)) {
+		ldm_crit ("The bitmaps are out of range.  Giving up.");
+		goto out;
+	}
+
+	if (!ldm_compare_tocblocks (tb[0], tb[1]) ||	/* Compare all tocs. */
+	    !ldm_compare_tocblocks (tb[0], tb[2]) ||
+	    !ldm_compare_tocblocks (tb[0], tb[3])) {
+		ldm_crit ("The TOCBLOCKs don't match.");
+		goto out;
+	}
+
+	ldm_debug ("Validated TOCBLOCKs successfully.");
+	result = TRUE;
+out:
+	kfree (tb[1]);
+	kfree (tb[2]);
+	kfree (tb[3]);
+	return result;
+}
+
+/**
+ * ldm_validate_vmdb - Read the VMDB and validate it
+ * @bdev:  Device holding the LDM Database
+ * @base:  Offset, into @bdev, of the database
+ * @ldb:   Cache of the database structures
+ *
+ * Find the vmdb of the LDM Database stored on @bdev and return the parsed
+ * information in @ldb.
+ *
+ * Return:  TRUE   @ldb contains validated VBDB info
+ *          FALSE  @ldb contents are undefined
+ */
+static BOOL ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
+			       struct ldmdb *ldb)
+{
+	Sector sect;
+	u8 *data;
+	BOOL result = FALSE;
+	struct vmdb *vm;
+	struct tocblock *toc;
+
+	BUG_ON (!bdev || !ldb);
+
+	vm  = &ldb->vm;
+	toc = &ldb->toc;
+
+	data = read_dev_sector (bdev, base + OFF_VMDB, &sect);
+	if (!data) {
+		ldm_crit ("Disk read failed.");
+		return FALSE;
+	}
+
+	if (!ldm_parse_vmdb (data, vm))
+		goto out;				/* Already logged */
+
+	/* Are there uncommitted transactions? */
+	if (BE16(data + 0x10) != 0x01) {
+		ldm_crit ("Database is not in a consistent state.  Aborting.");
+		goto out;
+	}
+
+	if (vm->vblk_offset != 512)
+		ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset);
+
+	/*
+	 * The last_vblkd_seq can be before the end of the vmdb, just make sure
+	 * it is not out of bounds.
+	 */
+	if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) {
+		ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK.  "
+				"Database is corrupt.  Aborting.");
+		goto out;
+	}
+
+	result = TRUE;
+out:
+	put_dev_sector (sect);
+	return result;
+}
+
+
+/**
+ * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk
+ * @bdev:  Device holding the LDM Database
+ *
+ * This function provides a weak test to decide whether the device is a dynamic
+ * disk or not.  It looks for an MS-DOS-style partition table containing at
+ * least one partition of type 0x42 (formerly SFS, now used by Windows for
+ * dynamic disks).
+ *
+ * N.B.  The only possible error can come from the read_dev_sector and that is
+ *       only likely to happen if the underlying device is strange.  If that IS
+ *       the case we should return zero to let someone else try.
+ *
+ * Return:  TRUE   @bdev is a dynamic disk
+ *          FALSE  @bdev is not a dynamic disk, or an error occurred
+ */
+static BOOL ldm_validate_partition_table (struct block_device *bdev)
+{
+	Sector sect;
+	u8 *data;
+	struct partition *p;
+	int i;
+	BOOL result = FALSE;
+
+	BUG_ON (!bdev);
+
+	data = read_dev_sector (bdev, 0, &sect);
+	if (!data) {
+		ldm_crit ("Disk read failed.");
+		return FALSE;
+	}
+
+	if (*(__le16*) (data + 0x01FE) != cpu_to_le16 (MSDOS_LABEL_MAGIC))
+		goto out;
+
+	p = (struct partition*)(data + 0x01BE);
+	for (i = 0; i < 4; i++, p++)
+		if (SYS_IND (p) == WIN2K_DYNAMIC_PARTITION) {
+			result = TRUE;
+			break;
+		}
+
+	if (result)
+		ldm_debug ("Found W2K dynamic disk partition type.");
+
+out:
+	put_dev_sector (sect);
+	return result;
+}
+
+/**
+ * ldm_get_disk_objid - Search a linked list of vblk's for a given Disk Id
+ * @ldb:  Cache of the database structures
+ *
+ * The LDM Database contains a list of all partitions on all dynamic disks.
+ * The primary PRIVHEAD, at the beginning of the physical disk, tells us
+ * the GUID of this disk.  This function searches for the GUID in a linked
+ * list of vblk's.
+ *
+ * Return:  Pointer, A matching vblk was found
+ *          NULL,    No match, or an error
+ */
+static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
+{
+	struct list_head *item;
+
+	BUG_ON (!ldb);
+
+	list_for_each (item, &ldb->v_disk) {
+		struct vblk *v = list_entry (item, struct vblk, list);
+		if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+			return v;
+	}
+
+	return NULL;
+}
+
+/**
+ * ldm_create_data_partitions - Create data partitions for this device
+ * @pp:   List of the partitions parsed so far
+ * @ldb:  Cache of the database structures
+ *
+ * The database contains ALL the partitions for ALL disk groups, so we need to
+ * filter out this specific disk. Using the disk's object id, we can find all
+ * the partitions in the database that belong to this disk.
+ *
+ * Add each partition in our database, to the parsed_partitions structure.
+ *
+ * N.B.  This function creates the partitions in the order it finds partition
+ *       objects in the linked list.
+ *
+ * Return:  TRUE   Partition created
+ *          FALSE  Error, probably a range checking problem
+ */
+static BOOL ldm_create_data_partitions (struct parsed_partitions *pp,
+					const struct ldmdb *ldb)
+{
+	struct list_head *item;
+	struct vblk *vb;
+	struct vblk *disk;
+	struct vblk_part *part;
+	int part_num = 1;
+
+	BUG_ON (!pp || !ldb);
+
+	disk = ldm_get_disk_objid (ldb);
+	if (!disk) {
+		ldm_crit ("Can't find the ID of this disk in the database.");
+		return FALSE;
+	}
+
+	printk (" [LDM]");
+
+	/* Create the data partitions */
+	list_for_each (item, &ldb->v_part) {
+		vb = list_entry (item, struct vblk, list);
+		part = &vb->vblk.part;
+
+		if (part->disk_id != disk->obj_id)
+			continue;
+
+		put_partition (pp, part_num, ldb->ph.logical_disk_start +
+				part->start, part->size);
+		part_num++;
+	}
+
+	printk ("\n");
+	return TRUE;
+}
+
+
+/**
+ * ldm_relative - Calculate the next relative offset
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @base:    Size of the previous fixed width fields
+ * @offset:  Cumulative size of the previous variable-width fields
+ *
+ * Because many of the VBLK fields are variable-width, it's necessary
+ * to calculate each offset based on the previous one and the length
+ * of the field it pointed to.
+ *
+ * Return:  -1 Error, the calculated offset exceeded the size of the buffer
+ *           n OK, a range-checked offset into buffer
+ */
+static int ldm_relative (const u8 *buffer, int buflen, int base, int offset)
+{
+
+	base += offset;
+	if ((!buffer) || (offset < 0) || (base > buflen))
+		return -1;
+	if ((base + buffer[base]) >= buflen)
+		return -1;
+
+	return buffer[base] + offset + 1;
+}
+
+/**
+ * ldm_get_vnum - Convert a variable-width, big endian number, into cpu order
+ * @block:  Pointer to the variable-width number to convert
+ *
+ * Large numbers in the LDM Database are often stored in a packed format.  Each
+ * number is prefixed by a one byte width marker.  All numbers in the database
+ * are stored in big-endian byte order.  This function reads one of these
+ * numbers and returns the result
+ *
+ * N.B.  This function DOES NOT perform any range checking, though the most
+ *       it will read is eight bytes.
+ *
+ * Return:  n A number
+ *          0 Zero, or an error occurred
+ */
+static u64 ldm_get_vnum (const u8 *block)
+{
+	u64 tmp = 0;
+	u8 length;
+
+	BUG_ON (!block);
+
+	length = *block++;
+
+	if (length && length <= 8)
+		while (length--)
+			tmp = (tmp << 8) | *block++;
+	else
+		ldm_error ("Illegal length %d.", length);
+
+	return tmp;
+}
+
+/**
+ * ldm_get_vstr - Read a length-prefixed string into a buffer
+ * @block:   Pointer to the length marker
+ * @buffer:  Location to copy string to
+ * @buflen:  Size of the output buffer
+ *
+ * Many of the strings in the LDM Database are not NULL terminated.  Instead
+ * they are prefixed by a one byte length marker.  This function copies one of
+ * these strings into a buffer.
+ *
+ * N.B.  This function DOES NOT perform any range checking on the input.
+ *       If the buffer is too small, the output will be truncated.
+ *
+ * Return:  0, Error and @buffer contents are undefined
+ *          n, String length in characters (excluding NULL)
+ *          buflen-1, String was truncated.
+ */
+static int ldm_get_vstr (const u8 *block, u8 *buffer, int buflen)
+{
+	int length;
+
+	BUG_ON (!block || !buffer);
+
+	length = block[0];
+	if (length >= buflen) {
+		ldm_error ("Truncating string %d -> %d.", length, buflen);
+		length = buflen - 1;
+	}
+	memcpy (buffer, block + 1, length);
+	buffer[length] = 0;
+	return length;
+}
+
+
+/**
+ * ldm_parse_cmp3 - Read a raw VBLK Component object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Component object (version 3) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Component VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_cmp3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, r_vstate, r_child, r_parent, r_stripe, r_cols, len;
+	struct vblk_comp *comp;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
+	r_vstate = ldm_relative (buffer, buflen, 0x18, r_name);
+	r_child  = ldm_relative (buffer, buflen, 0x1D, r_vstate);
+	r_parent = ldm_relative (buffer, buflen, 0x2D, r_child);
+
+	if (buffer[0x12] & VBLK_FLAG_COMP_STRIPE) {
+		r_stripe = ldm_relative (buffer, buflen, 0x2E, r_parent);
+		r_cols   = ldm_relative (buffer, buflen, 0x2E, r_stripe);
+		len = r_cols;
+	} else {
+		r_stripe = 0;
+		r_cols   = 0;
+		len = r_parent;
+	}
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_CMP3;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	comp = &vb->vblk.comp;
+	ldm_get_vstr (buffer + 0x18 + r_name, comp->state,
+		sizeof (comp->state));
+	comp->type      = buffer[0x18 + r_vstate];
+	comp->children  = ldm_get_vnum (buffer + 0x1D + r_vstate);
+	comp->parent_id = ldm_get_vnum (buffer + 0x2D + r_child);
+	comp->chunksize = r_stripe ? ldm_get_vnum (buffer+r_parent+0x2E) : 0;
+
+	return TRUE;
+}
+
+/**
+ * ldm_parse_dgr3 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 3) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Disk Group VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static int ldm_parse_dgr3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, r_diskid, r_id1, r_id2, len;
+	struct vblk_dgrp *dgrp;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
+	r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
+
+	if (buffer[0x12] & VBLK_FLAG_DGR3_IDS) {
+		r_id1 = ldm_relative (buffer, buflen, 0x24, r_diskid);
+		r_id2 = ldm_relative (buffer, buflen, 0x24, r_id1);
+		len = r_id2;
+	} else {
+		r_id1 = 0;
+		r_id2 = 0;
+		len = r_diskid;
+	}
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_DGR3;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	dgrp = &vb->vblk.dgrp;
+	ldm_get_vstr (buffer + 0x18 + r_name, dgrp->disk_id,
+		sizeof (dgrp->disk_id));
+	return TRUE;
+}
+
+/**
+ * ldm_parse_dgr4 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 4) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Disk Group VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	char buf[64];
+	int r_objid, r_name, r_id1, r_id2, len;
+	struct vblk_dgrp *dgrp;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
+
+	if (buffer[0x12] & VBLK_FLAG_DGR4_IDS) {
+		r_id1 = ldm_relative (buffer, buflen, 0x44, r_name);
+		r_id2 = ldm_relative (buffer, buflen, 0x44, r_id1);
+		len = r_id2;
+	} else {
+		r_id1 = 0;
+		r_id2 = 0;
+		len = r_name;
+	}
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_DGR4;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	dgrp = &vb->vblk.dgrp;
+
+	ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf));
+	return TRUE;
+}
+
+/**
+ * ldm_parse_dsk3 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 3) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Disk VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, r_diskid, r_altname, len;
+	struct vblk_disk *disk;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid   = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name    = ldm_relative (buffer, buflen, 0x18, r_objid);
+	r_diskid  = ldm_relative (buffer, buflen, 0x18, r_name);
+	r_altname = ldm_relative (buffer, buflen, 0x18, r_diskid);
+	len = r_altname;
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_DSK3;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	disk = &vb->vblk.disk;
+	ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
+		sizeof (disk->alt_name));
+	if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id))
+		return FALSE;
+
+	return TRUE;
+}
+
+/**
+ * ldm_parse_dsk4 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 4) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Disk VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, len;
+	struct vblk_disk *disk;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name  = ldm_relative (buffer, buflen, 0x18, r_objid);
+	len     = r_name;
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_DSK4;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	disk = &vb->vblk.disk;
+	memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+	return TRUE;
+}
+
+/**
+ * ldm_parse_prt3 - Read a raw VBLK Partition object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Partition object (version 3) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Partition VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_prt3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, r_size, r_parent, r_diskid, r_index, len;
+	struct vblk_part *part;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
+	r_size   = ldm_relative (buffer, buflen, 0x34, r_name);
+	r_parent = ldm_relative (buffer, buflen, 0x34, r_size);
+	r_diskid = ldm_relative (buffer, buflen, 0x34, r_parent);
+
+	if (buffer[0x12] & VBLK_FLAG_PART_INDEX) {
+		r_index = ldm_relative (buffer, buflen, 0x34, r_diskid);
+		len = r_index;
+	} else {
+		r_index = 0;
+		len = r_diskid;
+	}
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_PRT3;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	part = &vb->vblk.part;
+	part->start         = BE64         (buffer + 0x24 + r_name);
+	part->volume_offset = BE64         (buffer + 0x2C + r_name);
+	part->size          = ldm_get_vnum (buffer + 0x34 + r_name);
+	part->parent_id     = ldm_get_vnum (buffer + 0x34 + r_size);
+	part->disk_id       = ldm_get_vnum (buffer + 0x34 + r_parent);
+	if (vb->flags & VBLK_FLAG_PART_INDEX)
+		part->partnum = buffer[0x35 + r_diskid];
+	else
+		part->partnum = 0;
+
+	return TRUE;
+}
+
+/**
+ * ldm_parse_vol5 - Read a raw VBLK Volume object into a vblk structure
+ * @buffer:  Block of data being worked on
+ * @buflen:  Size of the block of data
+ * @vb:      In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Volume object (version 5) into a vblk structure.
+ *
+ * Return:  TRUE   @vb contains a Volume VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_vol5 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+	int r_objid, r_name, r_vtype, r_child, r_size, r_id1, r_id2, r_size2;
+	int r_drive, len;
+	struct vblk_volu *volu;
+
+	BUG_ON (!buffer || !vb);
+
+	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
+	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
+	r_vtype  = ldm_relative (buffer, buflen, 0x18, r_name);
+	r_child  = ldm_relative (buffer, buflen, 0x2E, r_vtype);
+	r_size   = ldm_relative (buffer, buflen, 0x3E, r_child);
+
+	if (buffer[0x12] & VBLK_FLAG_VOLU_ID1)
+		r_id1 = ldm_relative (buffer, buflen, 0x53, r_size);
+	else
+		r_id1 = r_size;
+
+	if (buffer[0x12] & VBLK_FLAG_VOLU_ID2)
+		r_id2 = ldm_relative (buffer, buflen, 0x53, r_id1);
+	else
+		r_id2 = r_id1;
+
+	if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE)
+		r_size2 = ldm_relative (buffer, buflen, 0x53, r_id2);
+	else
+		r_size2 = r_id2;
+
+	if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE)
+		r_drive = ldm_relative (buffer, buflen, 0x53, r_size2);
+	else
+		r_drive = r_size2;
+
+	len = r_drive;
+	if (len < 0)
+		return FALSE;
+
+	len += VBLK_SIZE_VOL5;
+	if (len != BE32 (buffer + 0x14))
+		return FALSE;
+
+	volu = &vb->vblk.volu;
+
+	ldm_get_vstr (buffer + 0x18 + r_name,  volu->volume_type,
+		sizeof (volu->volume_type));
+	memcpy (volu->volume_state, buffer + 0x19 + r_vtype,
+			sizeof (volu->volume_state));
+	volu->size = ldm_get_vnum (buffer + 0x3E + r_child);
+	volu->partition_type = buffer[0x42 + r_size];
+	memcpy (volu->guid, buffer + 0x43 + r_size,  sizeof (volu->guid));
+	if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+		ldm_get_vstr (buffer + 0x53 + r_size,  volu->drive_hint,
+			sizeof (volu->drive_hint));
+	}
+	return TRUE;
+}
+
+/**
+ * ldm_parse_vblk - Read a raw VBLK object into a vblk structure
+ * @buf:  Block of data being worked on
+ * @len:  Size of the block of data
+ * @vb:   In-memory vblk in which to return information
+ *
+ * Read a raw VBLK object into a vblk structure.  This function just reads the
+ * information common to all VBLK types, then delegates the rest of the work to
+ * helper functions: ldm_parse_*.
+ *
+ * Return:  TRUE   @vb contains a VBLK
+ *          FALSE  @vb contents are not defined
+ */
+static BOOL ldm_parse_vblk (const u8 *buf, int len, struct vblk *vb)
+{
+	BOOL result = FALSE;
+	int r_objid;
+
+	BUG_ON (!buf || !vb);
+
+	r_objid = ldm_relative (buf, len, 0x18, 0);
+	if (r_objid < 0) {
+		ldm_error ("VBLK header is corrupt.");
+		return FALSE;
+	}
+
+	vb->flags  = buf[0x12];
+	vb->type   = buf[0x13];
+	vb->obj_id = ldm_get_vnum (buf + 0x18);
+	ldm_get_vstr (buf+0x18+r_objid, vb->name, sizeof (vb->name));
+
+	switch (vb->type) {
+		case VBLK_CMP3:  result = ldm_parse_cmp3 (buf, len, vb); break;
+		case VBLK_DSK3:  result = ldm_parse_dsk3 (buf, len, vb); break;
+		case VBLK_DSK4:  result = ldm_parse_dsk4 (buf, len, vb); break;
+		case VBLK_DGR3:  result = ldm_parse_dgr3 (buf, len, vb); break;
+		case VBLK_DGR4:  result = ldm_parse_dgr4 (buf, len, vb); break;
+		case VBLK_PRT3:  result = ldm_parse_prt3 (buf, len, vb); break;
+		case VBLK_VOL5:  result = ldm_parse_vol5 (buf, len, vb); break;
+	}
+
+	if (result)
+		ldm_debug ("Parsed VBLK 0x%llx (type: 0x%02x) ok.",
+			 (unsigned long long) vb->obj_id, vb->type);
+	else
+		ldm_error ("Failed to parse VBLK 0x%llx (type: 0x%02x).",
+			(unsigned long long) vb->obj_id, vb->type);
+
+	return result;
+}
+
+
+/**
+ * ldm_ldmdb_add - Adds a raw VBLK entry to the ldmdb database
+ * @data:  Raw VBLK to add to the database
+ * @len:   Size of the raw VBLK
+ * @ldb:   Cache of the database structures
+ *
+ * The VBLKs are sorted into categories.  Partitions are also sorted by offset.
+ *
+ * N.B.  This function does not check the validity of the VBLKs.
+ *
+ * Return:  TRUE   The VBLK was added
+ *          FALSE  An error occurred
+ */
+static BOOL ldm_ldmdb_add (u8 *data, int len, struct ldmdb *ldb)
+{
+	struct vblk *vb;
+	struct list_head *item;
+
+	BUG_ON (!data || !ldb);
+
+	vb = kmalloc (sizeof (*vb), GFP_KERNEL);
+	if (!vb) {
+		ldm_crit ("Out of memory.");
+		return FALSE;
+	}
+
+	if (!ldm_parse_vblk (data, len, vb)) {
+		kfree(vb);
+		return FALSE;			/* Already logged */
+	}
+
+	/* Put vblk into the correct list. */
+	switch (vb->type) {
+	case VBLK_DGR3:
+	case VBLK_DGR4:
+		list_add (&vb->list, &ldb->v_dgrp);
+		break;
+	case VBLK_DSK3:
+	case VBLK_DSK4:
+		list_add (&vb->list, &ldb->v_disk);
+		break;
+	case VBLK_VOL5:
+		list_add (&vb->list, &ldb->v_volu);
+		break;
+	case VBLK_CMP3:
+		list_add (&vb->list, &ldb->v_comp);
+		break;
+	case VBLK_PRT3:
+		/* Sort by the partition's start sector. */
+		list_for_each (item, &ldb->v_part) {
+			struct vblk *v = list_entry (item, struct vblk, list);
+			if ((v->vblk.part.disk_id == vb->vblk.part.disk_id) &&
+			    (v->vblk.part.start > vb->vblk.part.start)) {
+				list_add_tail (&vb->list, &v->list);
+				return TRUE;
+			}
+		}
+		list_add_tail (&vb->list, &ldb->v_part);
+		break;
+	}
+	return TRUE;
+}
+
+/**
+ * ldm_frag_add - Add a VBLK fragment to a list
+ * @data:   Raw fragment to be added to the list
+ * @size:   Size of the raw fragment
+ * @frags:  Linked list of VBLK fragments
+ *
+ * Fragmented VBLKs may not be consecutive in the database, so they are placed
+ * in a list so they can be pieced together later.
+ *
+ * Return:  TRUE   Success, the VBLK was added to the list
+ *          FALSE  Error, a problem occurred
+ */
+static BOOL ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+{
+	struct frag *f;
+	struct list_head *item;
+	int rec, num, group;
+
+	BUG_ON (!data || !frags);
+
+	group = BE32 (data + 0x08);
+	rec   = BE16 (data + 0x0C);
+	num   = BE16 (data + 0x0E);
+	if ((num < 1) || (num > 4)) {
+		ldm_error ("A VBLK claims to have %d parts.", num);
+		return FALSE;
+	}
+
+	list_for_each (item, frags) {
+		f = list_entry (item, struct frag, list);
+		if (f->group == group)
+			goto found;
+	}
+
+	f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
+	if (!f) {
+		ldm_crit ("Out of memory.");
+		return FALSE;
+	}
+
+	f->group = group;
+	f->num   = num;
+	f->rec   = rec;
+	f->map   = 0xFF << num;
+
+	list_add_tail (&f->list, frags);
+found:
+	if (f->map & (1 << rec)) {
+		ldm_error ("Duplicate VBLK, part %d.", rec);
+		f->map &= 0x7F;			/* Mark the group as broken */
+		return FALSE;
+	}
+
+	f->map |= (1 << rec);
+
+	if (num > 0) {
+		data += VBLK_SIZE_HEAD;
+		size -= VBLK_SIZE_HEAD;
+	}
+	memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
+
+	return TRUE;
+}
+
+/**
+ * ldm_frag_free - Free a linked list of VBLK fragments
+ * @list:  Linked list of fragments
+ *
+ * Free a linked list of VBLK fragments
+ *
+ * Return:  none
+ */
+static void ldm_frag_free (struct list_head *list)
+{
+	struct list_head *item, *tmp;
+
+	BUG_ON (!list);
+
+	list_for_each_safe (item, tmp, list)
+		kfree (list_entry (item, struct frag, list));
+}
+
+/**
+ * ldm_frag_commit - Validate fragmented VBLKs and add them to the database
+ * @frags:  Linked list of VBLK fragments
+ * @ldb:    Cache of the database structures
+ *
+ * Now that all the fragmented VBLKs have been collected, they must be added to
+ * the database for later use.
+ *
+ * Return:  TRUE   All the fragments we added successfully
+ *          FALSE  One or more of the fragments we invalid
+ */
+static BOOL ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
+{
+	struct frag *f;
+	struct list_head *item;
+
+	BUG_ON (!frags || !ldb);
+
+	list_for_each (item, frags) {
+		f = list_entry (item, struct frag, list);
+
+		if (f->map != 0xFF) {
+			ldm_error ("VBLK group %d is incomplete (0x%02x).",
+				f->group, f->map);
+			return FALSE;
+		}
+
+		if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb))
+			return FALSE;		/* Already logged */
+	}
+	return TRUE;
+}
+
+/**
+ * ldm_get_vblks - Read the on-disk database of VBLKs into memory
+ * @bdev:  Device holding the LDM Database
+ * @base:  Offset, into @bdev, of the database
+ * @ldb:   Cache of the database structures
+ *
+ * To use the information from the VBLKs, they need to be read from the disk,
+ * unpacked and validated.  We cache them in @ldb according to their type.
+ *
+ * Return:  TRUE   All the VBLKs were read successfully
+ *          FALSE  An error occurred
+ */
+static BOOL ldm_get_vblks (struct block_device *bdev, unsigned long base,
+			   struct ldmdb *ldb)
+{
+	int size, perbuf, skip, finish, s, v, recs;
+	u8 *data = NULL;
+	Sector sect;
+	BOOL result = FALSE;
+	LIST_HEAD (frags);
+
+	BUG_ON (!bdev || !ldb);
+
+	size   = ldb->vm.vblk_size;
+	perbuf = 512 / size;
+	skip   = ldb->vm.vblk_offset >> 9;		/* Bytes to sectors */
+	finish = (size * ldb->vm.last_vblk_seq) >> 9;
+
+	for (s = skip; s < finish; s++) {		/* For each sector */
+		data = read_dev_sector (bdev, base + OFF_VMDB + s, &sect);
+		if (!data) {
+			ldm_crit ("Disk read failed.");
+			goto out;
+		}
+
+		for (v = 0; v < perbuf; v++, data+=size) {  /* For each vblk */
+			if (MAGIC_VBLK != BE32 (data)) {
+				ldm_error ("Expected to find a VBLK.");
+				goto out;
+			}
+
+			recs = BE16 (data + 0x0E);	/* Number of records */
+			if (recs == 1) {
+				if (!ldm_ldmdb_add (data, size, ldb))
+					goto out;	/* Already logged */
+			} else if (recs > 1) {
+				if (!ldm_frag_add (data, size, &frags))
+					goto out;	/* Already logged */
+			}
+			/* else Record is not in use, ignore it. */
+		}
+		put_dev_sector (sect);
+		data = NULL;
+	}
+
+	result = ldm_frag_commit (&frags, ldb);	/* Failures, already logged */
+out:
+	if (data)
+		put_dev_sector (sect);
+	ldm_frag_free (&frags);
+
+	return result;
+}
+
+/**
+ * ldm_free_vblks - Free a linked list of vblk's
+ * @lh:  Head of a linked list of struct vblk
+ *
+ * Free a list of vblk's and free the memory used to maintain the list.
+ *
+ * Return:  none
+ */
+static void ldm_free_vblks (struct list_head *lh)
+{
+	struct list_head *item, *tmp;
+
+	BUG_ON (!lh);
+
+	list_for_each_safe (item, tmp, lh)
+		kfree (list_entry (item, struct vblk, list));
+}
+
+
+/**
+ * ldm_partition - Find out whether a device is a dynamic disk and handle it
+ * @pp:    List of the partitions parsed so far
+ * @bdev:  Device holding the LDM Database
+ *
+ * This determines whether the device @bdev is a dynamic disk and if so creates
+ * the partitions necessary in the gendisk structure pointed to by @hd.
+ *
+ * We create a dummy device 1, which contains the LDM database, and then create
+ * each partition described by the LDM database in sequence as devices 2+. For
+ * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
+ * and so on: the actual data containing partitions.
+ *
+ * Return:  1 Success, @bdev is a dynamic disk and we handled it
+ *          0 Success, @bdev is not a dynamic disk
+ *         -1 An error occurred before enough information had been read
+ *            Or @bdev is a dynamic disk, but it may be corrupted
+ */
+int ldm_partition (struct parsed_partitions *pp, struct block_device *bdev)
+{
+	struct ldmdb  *ldb;
+	unsigned long base;
+	int result = -1;
+
+	BUG_ON (!pp || !bdev);
+
+	/* Look for signs of a Dynamic Disk */
+	if (!ldm_validate_partition_table (bdev))
+		return 0;
+
+	ldb = kmalloc (sizeof (*ldb), GFP_KERNEL);
+	if (!ldb) {
+		ldm_crit ("Out of memory.");
+		goto out;
+	}
+
+	/* Parse and check privheads. */
+	if (!ldm_validate_privheads (bdev, &ldb->ph))
+		goto out;		/* Already logged */
+
+	/* All further references are relative to base (database start). */
+	base = ldb->ph.config_start;
+
+	/* Parse and check tocs and vmdb. */
+	if (!ldm_validate_tocblocks (bdev, base, ldb) ||
+	    !ldm_validate_vmdb      (bdev, base, ldb))
+	    	goto out;		/* Already logged */
+
+	/* Initialize vblk lists in ldmdb struct */
+	INIT_LIST_HEAD (&ldb->v_dgrp);
+	INIT_LIST_HEAD (&ldb->v_disk);
+	INIT_LIST_HEAD (&ldb->v_volu);
+	INIT_LIST_HEAD (&ldb->v_comp);
+	INIT_LIST_HEAD (&ldb->v_part);
+
+	if (!ldm_get_vblks (bdev, base, ldb)) {
+		ldm_crit ("Failed to read the VBLKs from the database.");
+		goto cleanup;
+	}
+
+	/* Finally, create the data partition devices. */
+	if (ldm_create_data_partitions (pp, ldb)) {
+		ldm_debug ("Parsed LDM database successfully.");
+		result = 1;
+	}
+	/* else Already logged */
+
+cleanup:
+	ldm_free_vblks (&ldb->v_dgrp);
+	ldm_free_vblks (&ldb->v_disk);
+	ldm_free_vblks (&ldb->v_volu);
+	ldm_free_vblks (&ldb->v_comp);
+	ldm_free_vblks (&ldb->v_part);
+out:
+	kfree (ldb);
+	return result;
+}
+
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
new file mode 100644
index 0000000..6e8d795
--- /dev/null
+++ b/fs/partitions/ldm.h
@@ -0,0 +1,220 @@
+/**
+ * ldm - Part of the Linux-NTFS project.
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (C) 2001      Anton Altaparmakov <aia21@cantab.net>
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://linux-ntfs.sf.net/ldm
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS source
+ * in the file COPYING); if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _FS_PT_LDM_H_
+#define _FS_PT_LDM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+struct parsed_partitions;
+
+/* Magic numbers in CPU format. */
+#define MAGIC_VMDB	0x564D4442		/* VMDB */
+#define MAGIC_VBLK	0x56424C4B		/* VBLK */
+#define MAGIC_PRIVHEAD	0x5052495648454144ULL	/* PRIVHEAD */
+#define MAGIC_TOCBLOCK	0x544F43424C4F434BULL	/* TOCBLOCK */
+
+/* The defined vblk types. */
+#define VBLK_VOL5		0x51		/* Volume,     version 5 */
+#define VBLK_CMP3		0x32		/* Component,  version 3 */
+#define VBLK_PRT3		0x33		/* Partition,  version 3 */
+#define VBLK_DSK3		0x34		/* Disk,       version 3 */
+#define VBLK_DSK4		0x44		/* Disk,       version 4 */
+#define VBLK_DGR3		0x35		/* Disk Group, version 3 */
+#define VBLK_DGR4		0x45		/* Disk Group, version 4 */
+
+/* vblk flags indicating extra information will be present */
+#define	VBLK_FLAG_COMP_STRIPE	0x10
+#define	VBLK_FLAG_PART_INDEX	0x08
+#define	VBLK_FLAG_DGR3_IDS	0x08
+#define	VBLK_FLAG_DGR4_IDS	0x08
+#define	VBLK_FLAG_VOLU_ID1	0x08
+#define	VBLK_FLAG_VOLU_ID2	0x20
+#define	VBLK_FLAG_VOLU_SIZE	0x80
+#define	VBLK_FLAG_VOLU_DRIVE	0x02
+
+/* size of a vblk's static parts */
+#define VBLK_SIZE_HEAD		16
+#define VBLK_SIZE_CMP3		22		/* Name and version */
+#define VBLK_SIZE_DGR3		12
+#define VBLK_SIZE_DGR4		44
+#define VBLK_SIZE_DSK3		12
+#define VBLK_SIZE_DSK4		45
+#define VBLK_SIZE_PRT3		28
+#define VBLK_SIZE_VOL5		59
+
+/* component types */
+#define COMP_STRIPE		0x01		/* Stripe-set */
+#define COMP_BASIC		0x02		/* Basic disk */
+#define COMP_RAID		0x03		/* Raid-set */
+
+/* Other constants. */
+#define LDM_DB_SIZE		2048		/* Size in sectors (= 1MiB). */
+
+#define OFF_PRIV1		6		/* Offset of the first privhead
+						   relative to the start of the
+						   device in sectors */
+
+/* Offsets to structures within the LDM Database in sectors. */
+#define OFF_PRIV2		1856		/* Backup private headers. */
+#define OFF_PRIV3		2047
+
+#define OFF_TOCB1		1		/* Tables of contents. */
+#define OFF_TOCB2		2
+#define OFF_TOCB3		2045
+#define OFF_TOCB4		2046
+
+#define OFF_VMDB		17		/* List of partitions. */
+
+#define WIN2K_DYNAMIC_PARTITION	0x42		/* Formerly SFS (Landis). */
+
+#define TOC_BITMAP1		"config"	/* Names of the two defined */
+#define TOC_BITMAP2		"log"		/* bitmaps in the TOCBLOCK. */
+
+/* Most numbers we deal with are big-endian and won't be aligned. */
+#define BE16(x)			((u16)be16_to_cpu(get_unaligned((__be16*)(x))))
+#define BE32(x)			((u32)be32_to_cpu(get_unaligned((__be32*)(x))))
+#define BE64(x)			((u64)be64_to_cpu(get_unaligned((__be64*)(x))))
+
+/* Borrowed from msdos.c */
+#define SYS_IND(p)		(get_unaligned(&(p)->sys_ind))
+
+struct frag {				/* VBLK Fragment handling */
+	struct list_head list;
+	u32		group;
+	u8		num;		/* Total number of records */
+	u8		rec;		/* This is record number n */
+	u8		map;		/* Which portions are in use */
+	u8		data[0];
+};
+
+/* In memory LDM database structures. */
+
+#define GUID_SIZE		16
+
+struct privhead {			/* Offsets and sizes are in sectors. */
+	u16	ver_major;
+	u16	ver_minor;
+	u64	logical_disk_start;
+	u64	logical_disk_size;
+	u64	config_start;
+	u64	config_size;
+	u8	disk_id[GUID_SIZE];
+};
+
+struct tocblock {			/* We have exactly two bitmaps. */
+	u8	bitmap1_name[16];
+	u64	bitmap1_start;
+	u64	bitmap1_size;
+	u8	bitmap2_name[16];
+	u64	bitmap2_start;
+	u64	bitmap2_size;
+};
+
+struct vmdb {				/* VMDB: The database header */
+	u16	ver_major;
+	u16	ver_minor;
+	u32	vblk_size;
+	u32	vblk_offset;
+	u32	last_vblk_seq;
+};
+
+struct vblk_comp {			/* VBLK Component */
+	u8	state[16];
+	u64	parent_id;
+	u8	type;
+	u8	children;
+	u16	chunksize;
+};
+
+struct vblk_dgrp {			/* VBLK Disk Group */
+	u8	disk_id[64];
+};
+
+struct vblk_disk {			/* VBLK Disk */
+	u8	disk_id[GUID_SIZE];
+	u8	alt_name[128];
+};
+
+struct vblk_part {			/* VBLK Partition */
+	u64	start;
+	u64	size;			/* start, size and vol_off in sectors */
+	u64	volume_offset;
+	u64	parent_id;
+	u64	disk_id;
+	u8	partnum;
+};
+
+struct vblk_volu {			/* VBLK Volume */
+	u8	volume_type[16];
+	u8	volume_state[16];
+	u8	guid[16];
+	u8	drive_hint[4];
+	u64	size;
+	u8	partition_type;
+};
+
+struct vblk_head {			/* VBLK standard header */
+	u32 group;
+	u16 rec;
+	u16 nrec;
+};
+
+struct vblk {				/* Generalised VBLK */
+	u8	name[64];
+	u64	obj_id;
+	u32	sequence;
+	u8	flags;
+	u8	type;
+	union {
+		struct vblk_comp comp;
+		struct vblk_dgrp dgrp;
+		struct vblk_disk disk;
+		struct vblk_part part;
+		struct vblk_volu volu;
+	} vblk;
+	struct list_head list;
+};
+
+struct ldmdb {				/* Cache of the database */
+	struct privhead ph;
+	struct tocblock toc;
+	struct vmdb     vm;
+	struct list_head v_dgrp;
+	struct list_head v_disk;
+	struct list_head v_volu;
+	struct list_head v_comp;
+	struct list_head v_part;
+};
+
+int ldm_partition (struct parsed_partitions *state, struct block_device *bdev);
+
+#endif /* _FS_PT_LDM_H_ */
+
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
new file mode 100644
index 0000000..bb22cdd
--- /dev/null
+++ b/fs/partitions/mac.c
@@ -0,0 +1,130 @@
+/*
+ *  fs/partitions/mac.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include "check.h"
+#include "mac.h"
+
+#ifdef CONFIG_PPC_PMAC
+extern void note_bootable_part(dev_t dev, int part, int goodness);
+#endif
+
+/*
+ * Code to understand MacOS partition tables.
+ */
+
+static inline void mac_fix_string(char *stg, int len)
+{
+	int i;
+
+	for (i = len - 1; i >= 0 && stg[i] == ' '; i--)
+		stg[i] = 0;
+}
+
+int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int slot = 1;
+	Sector sect;
+	unsigned char *data;
+	int blk, blocks_in_map;
+	unsigned secsize;
+#ifdef CONFIG_PPC_PMAC
+	int found_root = 0;
+	int found_root_goodness = 0;
+#endif
+	struct mac_partition *part;
+	struct mac_driver_desc *md;
+
+	/* Get 0th block and look at the first partition map entry. */
+	md = (struct mac_driver_desc *) read_dev_sector(bdev, 0, &sect);
+	if (!md)
+		return -1;
+	if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	secsize = be16_to_cpu(md->block_size);
+	put_dev_sector(sect);
+	data = read_dev_sector(bdev, secsize/512, &sect);
+	if (!data)
+		return -1;
+	part = (struct mac_partition *) (data + secsize%512);
+	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+		put_dev_sector(sect);
+		return 0;		/* not a MacOS disk */
+	}
+	printk(" [mac]");
+	blocks_in_map = be32_to_cpu(part->map_count);
+	for (blk = 1; blk <= blocks_in_map; ++blk) {
+		int pos = blk * secsize;
+		put_dev_sector(sect);
+		data = read_dev_sector(bdev, pos/512, &sect);
+		if (!data)
+			return -1;
+		part = (struct mac_partition *) (data + pos%512);
+		if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC)
+			break;
+		put_partition(state, slot,
+			be32_to_cpu(part->start_block) * (secsize/512),
+			be32_to_cpu(part->block_count) * (secsize/512));
+
+#ifdef CONFIG_PPC_PMAC
+		/*
+		 * If this is the first bootable partition, tell the
+		 * setup code, in case it wants to make this the root.
+		 */
+		if (_machine == _MACH_Pmac) {
+			int goodness = 0;
+
+			mac_fix_string(part->processor, 16);
+			mac_fix_string(part->name, 32);
+			mac_fix_string(part->type, 32);					
+		    
+			if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE)
+			    && strcasecmp(part->processor, "powerpc") == 0)
+				goodness++;
+
+			if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0
+			    || (strnicmp(part->type, "Linux", 5) == 0
+			        && strcasecmp(part->type, "Linux_swap") != 0)) {
+				int i, l;
+
+				goodness++;
+				l = strlen(part->name);
+				if (strcmp(part->name, "/") == 0)
+					goodness++;
+				for (i = 0; i <= l - 4; ++i) {
+					if (strnicmp(part->name + i, "root",
+						     4) == 0) {
+						goodness += 2;
+						break;
+					}
+				}
+				if (strnicmp(part->name, "swap", 4) == 0)
+					goodness--;
+			}
+
+			if (goodness > found_root_goodness) {
+				found_root = blk;
+				found_root_goodness = goodness;
+			}
+		}
+#endif /* CONFIG_PPC_PMAC */
+
+		++slot;
+	}
+#ifdef CONFIG_PPC_PMAC
+	if (found_root_goodness)
+		note_bootable_part(bdev->bd_dev, found_root, found_root_goodness);
+#endif
+
+	put_dev_sector(sect);
+	printk("\n");
+	return 1;
+}
diff --git a/fs/partitions/mac.h b/fs/partitions/mac.h
new file mode 100644
index 0000000..bbf26e1
--- /dev/null
+++ b/fs/partitions/mac.h
@@ -0,0 +1,44 @@
+/*
+ *  fs/partitions/mac.h
+ */
+
+#define MAC_PARTITION_MAGIC	0x504d
+
+/* type field value for A/UX or other Unix partitions */
+#define APPLE_AUX_TYPE	"Apple_UNIX_SVR2"
+
+struct mac_partition {
+	__be16	signature;	/* expected to be MAC_PARTITION_MAGIC */
+	__be16	res1;
+	__be32	map_count;	/* # blocks in partition map */
+	__be32	start_block;	/* absolute starting block # of partition */
+	__be32	block_count;	/* number of blocks in partition */
+	char	name[32];	/* partition name */
+	char	type[32];	/* string type description */
+	__be32	data_start;	/* rel block # of first data block */
+	__be32	data_count;	/* number of data blocks */
+	__be32	status;		/* partition status bits */
+	__be32	boot_start;
+	__be32	boot_size;
+	__be32	boot_load;
+	__be32	boot_load2;
+	__be32	boot_entry;
+	__be32	boot_entry2;
+	__be32	boot_cksum;
+	char	processor[16];	/* identifies ISA of boot */
+	/* there is more stuff after this that we don't need */
+};
+
+#define MAC_STATUS_BOOTABLE	8	/* partition is bootable */
+
+#define MAC_DRIVER_MAGIC	0x4552
+
+/* Driver descriptor structure, in block 0 */
+struct mac_driver_desc {
+	__be16	signature;	/* expected to be MAC_DRIVER_MAGIC */
+	__be16	block_size;
+	__be32	block_count;
+    /* ... more stuff */
+};
+
+int mac_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
new file mode 100644
index 0000000..17ee1b4
--- /dev/null
+++ b/fs/partitions/msdos.c
@@ -0,0 +1,479 @@
+/*
+ *  fs/partitions/msdos.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *
+ *  Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ *  in the early extended-partition checks and added DM partitions
+ *
+ *  Support for DiskManager v6.0x added by Mark Lord,
+ *  with information provided by OnTrack.  This now works for linux fdisk
+ *  and LILO, as well as loadlin and bootln.  Note that disks other than
+ *  /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ *  More flexible handling of extended partitions - aeb, 950831
+ *
+ *  Check partition table on IDE disks for common CHS translations
+ *
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/config.h>
+
+#include "check.h"
+#include "msdos.h"
+#include "efi.h"
+
+/*
+ * Many architectures don't like unaligned accesses, while
+ * the nr_sects and start_sect partition table entries are
+ * at a 2 (mod 4) address.
+ */
+#include <asm/unaligned.h>
+
+#define SYS_IND(p)	(get_unaligned(&p->sys_ind))
+#define NR_SECTS(p)	({ __typeof__(p->nr_sects) __a =	\
+				get_unaligned(&p->nr_sects);	\
+				le32_to_cpu(__a); \
+			})
+
+#define START_SECT(p)	({ __typeof__(p->start_sect) __a =	\
+				get_unaligned(&p->start_sect);	\
+				le32_to_cpu(__a); \
+			})
+
+static inline int is_extended_partition(struct partition *p)
+{
+	return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+		SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+		SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#define MSDOS_LABEL_MAGIC1	0x55
+#define MSDOS_LABEL_MAGIC2	0xAA
+
+static inline int
+msdos_magic_present(unsigned char *p)
+{
+	return (p[0] == MSDOS_LABEL_MAGIC1 && p[1] == MSDOS_LABEL_MAGIC2);
+}
+
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries.  The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start).  The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void
+parse_extended(struct parsed_partitions *state, struct block_device *bdev,
+			u32 first_sector, u32 first_size)
+{
+	struct partition *p;
+	Sector sect;
+	unsigned char *data;
+	u32 this_sector, this_size;
+	int sector_size = bdev_hardsect_size(bdev) / 512;
+	int loopct = 0;		/* number of links followed
+				   without finding a data partition */
+	int i;
+
+	this_sector = first_sector;
+	this_size = first_size;
+
+	while (1) {
+		if (++loopct > 100)
+			return;
+		if (state->next == state->limit)
+			return;
+		data = read_dev_sector(bdev, this_sector, &sect);
+		if (!data)
+			return;
+
+		if (!msdos_magic_present(data + 510))
+			goto done; 
+
+		p = (struct partition *) (data + 0x1be);
+
+		/*
+		 * Usually, the first entry is the real data partition,
+		 * the 2nd entry is the next extended partition, or empty,
+		 * and the 3rd and 4th entries are unused.
+		 * However, DRDOS sometimes has the extended partition as
+		 * the first entry (when the data partition is empty),
+		 * and OS/2 seems to use all four entries.
+		 */
+
+		/* 
+		 * First process the data partition(s)
+		 */
+		for (i=0; i<4; i++, p++) {
+			u32 offs, size, next;
+
+			if (SYS_IND(p) == 0)
+				continue;
+			if (!NR_SECTS(p) || is_extended_partition(p))
+				continue;
+
+			/* Check the 3rd and 4th entries -
+			   these sometimes contain random garbage */
+			offs = START_SECT(p)*sector_size;
+			size = NR_SECTS(p)*sector_size;
+			next = this_sector + offs;
+			if (i >= 2) {
+				if (offs + size > this_size)
+					continue;
+				if (next < first_sector)
+					continue;
+				if (next + size > first_sector + first_size)
+					continue;
+			}
+
+			put_partition(state, state->next, next, size);
+			if (SYS_IND(p) == LINUX_RAID_PARTITION)
+				state->parts[state->next].flags = 1;
+			loopct = 0;
+			if (++state->next == state->limit)
+				goto done;
+		}
+		/*
+		 * Next, process the (first) extended partition, if present.
+		 * (So far, there seems to be no reason to make
+		 *  parse_extended()  recursive and allow a tree
+		 *  of extended partitions.)
+		 * It should be a link to the next logical partition.
+		 */
+		p -= 4;
+		for (i=0; i<4; i++, p++)
+			if (NR_SECTS(p) && is_extended_partition(p))
+				break;
+		if (i == 4)
+			goto done;	 /* nothing left to do */
+
+		this_sector = first_sector + START_SECT(p) * sector_size;
+		this_size = NR_SECTS(p) * sector_size;
+		put_dev_sector(sect);
+	}
+done:
+	put_dev_sector(sect);
+}
+
+/* james@bpgc.com: Solaris has a nasty indicator: 0x82 which also
+   indicates linux swap.  Be careful before believing this is Solaris. */
+
+static void
+parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
+			u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_SOLARIS_X86_PARTITION
+	Sector sect;
+	struct solaris_x86_vtoc *v;
+	int i;
+
+	v = (struct solaris_x86_vtoc *)read_dev_sector(bdev, offset+1, &sect);
+	if (!v)
+		return;
+	if (le32_to_cpu(v->v_sanity) != SOLARIS_X86_VTOC_SANE) {
+		put_dev_sector(sect);
+		return;
+	}
+	printk(" %s%d: <solaris:", state->name, origin);
+	if (le32_to_cpu(v->v_version) != 1) {
+		printk("  cannot handle version %d vtoc>\n",
+			le32_to_cpu(v->v_version));
+		put_dev_sector(sect);
+		return;
+	}
+	for (i=0; i<SOLARIS_X86_NUMSLICE && state->next<state->limit; i++) {
+		struct solaris_x86_slice *s = &v->v_slice[i];
+		if (s->s_size == 0)
+			continue;
+		printk(" [s%d]", i);
+		/* solaris partitions are relative to current MS-DOS
+		 * one; must add the offset of the current partition */
+		put_partition(state, state->next++,
+				 le32_to_cpu(s->s_start)+offset,
+				 le32_to_cpu(s->s_size));
+	}
+	put_dev_sector(sect);
+	printk(" >\n");
+#endif
+}
+
+#if defined(CONFIG_BSD_DISKLABEL) || defined(CONFIG_NEC98_PARTITION)
+/* 
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+void
+parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin, char *flavour,
+		int max_partitions)
+{
+	Sector sect;
+	struct bsd_disklabel *l;
+	struct bsd_partition *p;
+
+	l = (struct bsd_disklabel *)read_dev_sector(bdev, offset+1, &sect);
+	if (!l)
+		return;
+	if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) {
+		put_dev_sector(sect);
+		return;
+	}
+	printk(" %s%d: <%s:", state->name, origin, flavour);
+
+	if (le16_to_cpu(l->d_npartitions) < max_partitions)
+		max_partitions = le16_to_cpu(l->d_npartitions);
+	for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
+		u32 bsd_start, bsd_size;
+
+		if (state->next == state->limit)
+			break;
+		if (p->p_fstype == BSD_FS_UNUSED) 
+			continue;
+		bsd_start = le32_to_cpu(p->p_offset);
+		bsd_size = le32_to_cpu(p->p_size);
+		if (offset == bsd_start && size == bsd_size)
+			/* full parent partition, we have it already */
+			continue;
+		if (offset > bsd_start || offset+size < bsd_start+bsd_size) {
+			printk("bad subpartition - ignored\n");
+			continue;
+		}
+		put_partition(state, state->next++, bsd_start, bsd_size);
+	}
+	put_dev_sector(sect);
+	if (le16_to_cpu(l->d_npartitions) > max_partitions)
+		printk(" (ignored %d more)",
+		       le16_to_cpu(l->d_npartitions) - max_partitions);
+	printk(" >\n");
+}
+#endif
+
+static void
+parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+	parse_bsd(state, bdev, offset, size, origin,
+			"bsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void
+parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+	parse_bsd(state, bdev, offset, size, origin,
+			"netbsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void
+parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+	parse_bsd(state, bdev, offset, size, origin,
+			"openbsd", OPENBSD_MAXPARTITIONS);
+#endif
+}
+
+/*
+ * Create devices for Unixware partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+static void
+parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_UNIXWARE_DISKLABEL
+	Sector sect;
+	struct unixware_disklabel *l;
+	struct unixware_slice *p;
+
+	l = (struct unixware_disklabel *)read_dev_sector(bdev, offset+29, &sect);
+	if (!l)
+		return;
+	if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC ||
+	    le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) {
+		put_dev_sector(sect);
+		return;
+	}
+	printk(" %s%d: <unixware:", state->name, origin);
+	p = &l->vtoc.v_slice[1];
+	/* I omit the 0th slice as it is the same as whole disk. */
+	while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) {
+		if (state->next == state->limit)
+			break;
+
+		if (p->s_label != UNIXWARE_FS_UNUSED)
+			put_partition(state, state->next++,
+						START_SECT(p), NR_SECTS(p));
+		p++;
+	}
+	put_dev_sector(sect);
+	printk(" >\n");
+#endif
+}
+
+/*
+ * Minix 2.0.0/2.0.2 subpartition support.
+ * Anand Krishnamurthy <anandk@wiproge.med.ge.com>
+ * Rajeev V. Pillai    <rajeevvp@yahoo.com>
+ */
+static void
+parse_minix(struct parsed_partitions *state, struct block_device *bdev,
+		u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_MINIX_SUBPARTITION
+	Sector sect;
+	unsigned char *data;
+	struct partition *p;
+	int i;
+
+	data = read_dev_sector(bdev, offset, &sect);
+	if (!data)
+		return;
+
+	p = (struct partition *)(data + 0x1be);
+
+	/* The first sector of a Minix partition can have either
+	 * a secondary MBR describing its subpartitions, or
+	 * the normal boot sector. */
+	if (msdos_magic_present (data + 510) &&
+	    SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
+
+		printk(" %s%d: <minix:", state->name, origin);
+		for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) {
+			if (state->next == state->limit)
+				break;
+			/* add each partition in use */
+			if (SYS_IND(p) == MINIX_PARTITION)
+				put_partition(state, state->next++,
+					      START_SECT(p), NR_SECTS(p));
+		}
+		printk(" >\n");
+	}
+	put_dev_sector(sect);
+#endif /* CONFIG_MINIX_SUBPARTITION */
+}
+
+static struct {
+	unsigned char id;
+	void (*parse)(struct parsed_partitions *, struct block_device *,
+			u32, u32, int);
+} subtypes[] = {
+	{FREEBSD_PARTITION, parse_freebsd},
+	{NETBSD_PARTITION, parse_netbsd},
+	{OPENBSD_PARTITION, parse_openbsd},
+	{MINIX_PARTITION, parse_minix},
+	{UNIXWARE_PARTITION, parse_unixware},
+	{SOLARIS_X86_PARTITION, parse_solaris_x86},
+	{NEW_SOLARIS_X86_PARTITION, parse_solaris_x86},
+	{0, NULL},
+};
+ 
+int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int sector_size = bdev_hardsect_size(bdev) / 512;
+	Sector sect;
+	unsigned char *data;
+	struct partition *p;
+	int slot;
+
+	data = read_dev_sector(bdev, 0, &sect);
+	if (!data)
+		return -1;
+	if (!msdos_magic_present(data + 510)) {
+		put_dev_sector(sect);
+		return 0;
+	}
+
+	/*
+	 * Now that the 55aa signature is present, this is probably
+	 * either the boot sector of a FAT filesystem or a DOS-type
+	 * partition table. Reject this in case the boot indicator
+	 * is not 0 or 0x80.
+	 */
+	p = (struct partition *) (data + 0x1be);
+	for (slot = 1; slot <= 4; slot++, p++) {
+		if (p->boot_ind != 0 && p->boot_ind != 0x80) {
+			put_dev_sector(sect);
+			return 0;
+		}
+	}
+
+#ifdef CONFIG_EFI_PARTITION
+	p = (struct partition *) (data + 0x1be);
+	for (slot = 1 ; slot <= 4 ; slot++, p++) {
+		/* If this is an EFI GPT disk, msdos should ignore it. */
+		if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
+			put_dev_sector(sect);
+			return 0;
+		}
+	}
+#endif
+	p = (struct partition *) (data + 0x1be);
+
+	/*
+	 * Look for partitions in two passes:
+	 * First find the primary and DOS-type extended partitions.
+	 * On the second pass look inside *BSD, Unixware and Solaris partitions.
+	 */
+
+	state->next = 5;
+	for (slot = 1 ; slot <= 4 ; slot++, p++) {
+		u32 start = START_SECT(p)*sector_size;
+		u32 size = NR_SECTS(p)*sector_size;
+		if (SYS_IND(p) == 0)
+			continue;
+		if (!size)
+			continue;
+		if (is_extended_partition(p)) {
+			/* prevent someone doing mkfs or mkswap on an
+			   extended partition, but leave room for LILO */
+			put_partition(state, slot, start, size == 1 ? 1 : 2);
+			printk(" <");
+			parse_extended(state, bdev, start, size);
+			printk(" >");
+			continue;
+		}
+		put_partition(state, slot, start, size);
+		if (SYS_IND(p) == LINUX_RAID_PARTITION)
+			state->parts[slot].flags = 1;
+		if (SYS_IND(p) == DM6_PARTITION)
+			printk("[DM]");
+		if (SYS_IND(p) == EZD_PARTITION)
+			printk("[EZD]");
+	}
+
+	printk("\n");
+
+	/* second pass - output for each on a separate line */
+	p = (struct partition *) (0x1be + data);
+	for (slot = 1 ; slot <= 4 ; slot++, p++) {
+		unsigned char id = SYS_IND(p);
+		int n;
+
+		if (!NR_SECTS(p))
+			continue;
+
+		for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
+			;
+
+		if (!subtypes[n].parse)
+			continue;
+		subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
+						NR_SECTS(p)*sector_size, slot);
+	}
+	put_dev_sector(sect);
+	return 1;
+}
diff --git a/fs/partitions/msdos.h b/fs/partitions/msdos.h
new file mode 100644
index 0000000..01e5e0b
--- /dev/null
+++ b/fs/partitions/msdos.h
@@ -0,0 +1,8 @@
+/*
+ *  fs/partitions/msdos.h
+ */
+
+#define MSDOS_LABEL_MAGIC		0xAA55
+
+int msdos_partition(struct parsed_partitions *state, struct block_device *bdev);
+
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
new file mode 100644
index 0000000..c05c17bc
--- /dev/null
+++ b/fs/partitions/osf.c
@@ -0,0 +1,78 @@
+/*
+ *  fs/partitions/osf.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "osf.h"
+
+int osf_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int i;
+	int slot = 1;
+	Sector sect;
+	unsigned char *data;
+	struct disklabel {
+		__le32 d_magic;
+		__le16 d_type,d_subtype;
+		u8 d_typename[16];
+		u8 d_packname[16];
+		__le32 d_secsize;
+		__le32 d_nsectors;
+		__le32 d_ntracks;
+		__le32 d_ncylinders;
+		__le32 d_secpercyl;
+		__le32 d_secprtunit;
+		__le16 d_sparespertrack;
+		__le16 d_sparespercyl;
+		__le32 d_acylinders;
+		__le16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+		__le32 d_headswitch, d_trkseek, d_flags;
+		__le32 d_drivedata[5];
+		__le32 d_spare[5];
+		__le32 d_magic2;
+		__le16 d_checksum;
+		__le16 d_npartitions;
+		__le32 d_bbsize, d_sbsize;
+		struct d_partition {
+			__le32 p_size;
+			__le32 p_offset;
+			__le32 p_fsize;
+			u8  p_fstype;
+			u8  p_frag;
+			__le16 p_cpg;
+		} d_partitions[8];
+	} * label;
+	struct d_partition * partition;
+
+	data = read_dev_sector(bdev, 0, &sect);
+	if (!data)
+		return -1;
+
+	label = (struct disklabel *) (data+64);
+	partition = label->d_partitions;
+	if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) {
+		if (slot == state->limit)
+		        break;
+		if (le32_to_cpu(partition->p_size))
+			put_partition(state, slot,
+				le32_to_cpu(partition->p_offset),
+				le32_to_cpu(partition->p_size));
+		slot++;
+	}
+	printk("\n");
+	put_dev_sector(sect);
+	return 1;
+}
diff --git a/fs/partitions/osf.h b/fs/partitions/osf.h
new file mode 100644
index 0000000..427b8ea
--- /dev/null
+++ b/fs/partitions/osf.h
@@ -0,0 +1,7 @@
+/*
+ *  fs/partitions/osf.h
+ */
+
+#define DISKLABELMAGIC (0x82564557UL)
+
+int osf_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/partitions/sgi.c b/fs/partitions/sgi.c
new file mode 100644
index 0000000..6fa4ff8
--- /dev/null
+++ b/fs/partitions/sgi.c
@@ -0,0 +1,82 @@
+/*
+ *  fs/partitions/sgi.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ */
+
+#include "check.h"
+#include "sgi.h"
+
+struct sgi_disklabel {
+	__be32 magic_mushroom;		/* Big fat spliff... */
+	__be16 root_part_num;		/* Root partition number */
+	__be16 swap_part_num;		/* Swap partition number */
+	s8 boot_file[16];		/* Name of boot file for ARCS */
+	u8 _unused0[48];		/* Device parameter useless crapola.. */
+	struct sgi_volume {
+		s8 name[8];		/* Name of volume */
+		__be32 block_num;		/* Logical block number */
+		__be32 num_bytes;		/* How big, in bytes */
+	} volume[15];
+	struct sgi_partition {
+		__be32 num_blocks;		/* Size in logical blocks */
+		__be32 first_block;	/* First logical block */
+		__be32 type;		/* Type of this partition */
+	} partitions[16];
+	__be32 csum;			/* Disk label checksum */
+	__be32 _unused1;			/* Padding */
+};
+
+int sgi_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int i, csum;
+	__be32 magic;
+	int slot = 1;
+	unsigned int start, blocks;
+	__be32 *ui, cs;
+	Sector sect;
+	struct sgi_disklabel *label;
+	struct sgi_partition *p;
+	char b[BDEVNAME_SIZE];
+
+	label = (struct sgi_disklabel *) read_dev_sector(bdev, 0, &sect);
+	if (!label)
+		return -1;
+	p = &label->partitions[0];
+	magic = label->magic_mushroom;
+	if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
+		/*printk("Dev %s SGI disklabel: bad magic %08x\n",
+		       bdevname(bdev, b), be32_to_cpu(magic));*/
+		put_dev_sector(sect);
+		return 0;
+	}
+	ui = ((__be32 *) (label + 1)) - 1;
+	for(csum = 0; ui >= ((__be32 *) label);) {
+		cs = *ui--;
+		csum += be32_to_cpu(cs);
+	}
+	if(csum) {
+		printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
+		       bdevname(bdev, b));
+		put_dev_sector(sect);
+		return 0;
+	}
+	/* All SGI disk labels have 16 partitions, disks under Linux only
+	 * have 15 minor's.  Luckily there are always a few zero length
+	 * partitions which we don't care about so we never overflow the
+	 * current_minor.
+	 */
+	for(i = 0; i < 16; i++, p++) {
+		blocks = be32_to_cpu(p->num_blocks);
+		start  = be32_to_cpu(p->first_block);
+		if (blocks) {
+			put_partition(state, slot, start, blocks);
+			if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION)
+				state->parts[slot].flags = 1;
+		}
+		slot++;
+	}
+	printk("\n");
+	put_dev_sector(sect);
+	return 1;
+}
diff --git a/fs/partitions/sgi.h b/fs/partitions/sgi.h
new file mode 100644
index 0000000..5d5595c
--- /dev/null
+++ b/fs/partitions/sgi.h
@@ -0,0 +1,8 @@
+/*
+ *  fs/partitions/sgi.h
+ */
+
+extern int sgi_partition(struct parsed_partitions *state, struct block_device *bdev);
+
+#define SGI_LABEL_MAGIC 0x0be5a941
+
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
new file mode 100644
index 0000000..abe91ca
--- /dev/null
+++ b/fs/partitions/sun.c
@@ -0,0 +1,91 @@
+/*
+ *  fs/partitions/sun.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "sun.h"
+
+int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int i;
+	__be16 csum;
+	int slot = 1;
+	__be16 *ush;
+	Sector sect;
+	struct sun_disklabel {
+		unsigned char info[128];   /* Informative text string */
+		unsigned char spare0[14];
+		struct sun_info {
+			unsigned char spare1;
+			unsigned char id;
+			unsigned char spare2;
+			unsigned char flags;
+		} infos[8];
+		unsigned char spare[246];  /* Boot information etc. */
+		__be16 rspeed;     /* Disk rotational speed */
+		__be16 pcylcount;  /* Physical cylinder count */
+		__be16 sparecyl;   /* extra sects per cylinder */
+		unsigned char spare2[4];   /* More magic... */
+		__be16 ilfact;     /* Interleave factor */
+		__be16 ncyl;       /* Data cylinder count */
+		__be16 nacyl;      /* Alt. cylinder count */
+		__be16 ntrks;      /* Tracks per cylinder */
+		__be16 nsect;      /* Sectors per track */
+		unsigned char spare3[4];   /* Even more magic... */
+		struct sun_partition {
+			__be32 start_cylinder;
+			__be32 num_sectors;
+		} partitions[8];
+		__be16 magic;      /* Magic number */
+		__be16 csum;       /* Label xor'd checksum */
+	} * label;		
+	struct sun_partition *p;
+	unsigned long spc;
+	char b[BDEVNAME_SIZE];
+
+	label = (struct sun_disklabel *)read_dev_sector(bdev, 0, &sect);
+	if (!label)
+		return -1;
+
+	p = label->partitions;
+	if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
+/*		printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
+		       bdevname(bdev, b), be16_to_cpu(label->magic)); */
+		put_dev_sector(sect);
+		return 0;
+	}
+	/* Look at the checksum */
+	ush = ((__be16 *) (label+1)) - 1;
+	for (csum = 0; ush >= ((__be16 *) label);)
+		csum ^= *ush--;
+	if (csum) {
+		printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+		       bdevname(bdev, b));
+		put_dev_sector(sect);
+		return 0;
+	}
+
+	/* All Sun disks have 8 partition entries */
+	spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
+	for (i = 0; i < 8; i++, p++) {
+		unsigned long st_sector;
+		int num_sectors;
+
+		st_sector = be32_to_cpu(p->start_cylinder) * spc;
+		num_sectors = be32_to_cpu(p->num_sectors);
+		if (num_sectors) {
+			put_partition(state, slot, st_sector, num_sectors);
+			if (label->infos[i].id == LINUX_RAID_PARTITION)
+				state->parts[slot].flags = 1;
+		}
+		slot++;
+	}
+	printk("\n");
+	put_dev_sector(sect);
+	return 1;
+}
diff --git a/fs/partitions/sun.h b/fs/partitions/sun.h
new file mode 100644
index 0000000..b1b19fd
--- /dev/null
+++ b/fs/partitions/sun.h
@@ -0,0 +1,7 @@
+/*
+ *  fs/partitions/sun.h
+ */
+
+#define SUN_LABEL_MAGIC          0xDABE
+
+int sun_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/partitions/ultrix.c b/fs/partitions/ultrix.c
new file mode 100644
index 0000000..8a8d4d9
--- /dev/null
+++ b/fs/partitions/ultrix.c
@@ -0,0 +1,47 @@
+/*
+ *  fs/partitions/ultrix.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *
+ *  Re-organised Jul 1999 Russell King
+ */
+
+#include "check.h"
+
+int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+	int i;
+	Sector sect;
+	unsigned char *data;
+	struct ultrix_disklabel {
+		s32	pt_magic;	/* magic no. indicating part. info exits */
+		s32	pt_valid;	/* set by driver if pt is current */
+		struct  pt_info {
+			s32		pi_nblocks; /* no. of sectors */
+			u32		pi_blkoff;  /* block offset for start */
+		} pt_part[8];
+	} *label;
+
+#define PT_MAGIC	0x032957	/* Partition magic number */
+#define PT_VALID	1		/* Indicates if struct is valid */
+
+	data = read_dev_sector(bdev, (16384 - sizeof(*label))/512, &sect);
+	if (!data)
+		return -1;
+	
+	label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label));
+
+	if (label->pt_magic == PT_MAGIC && label->pt_valid == PT_VALID) {
+		for (i=0; i<8; i++)
+			if (label->pt_part[i].pi_nblocks)
+				put_partition(state, i+1, 
+					      label->pt_part[i].pi_blkoff,
+					      label->pt_part[i].pi_nblocks);
+		put_dev_sector(sect);
+		printk ("\n");
+		return 1;
+	} else {
+		put_dev_sector(sect);
+		return 0;
+	}
+}
diff --git a/fs/partitions/ultrix.h b/fs/partitions/ultrix.h
new file mode 100644
index 0000000..a74bf8e
--- /dev/null
+++ b/fs/partitions/ultrix.h
@@ -0,0 +1,5 @@
+/*
+ *  fs/partitions/ultrix.h
+ */
+
+int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/pipe.c b/fs/pipe.c
new file mode 100644
index 0000000..25aa09f
--- /dev/null
+++ b/fs/pipe.c
@@ -0,0 +1,835 @@
+/*
+ *  linux/fs/pipe.c
+ *
+ *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
+ */
+
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/uio.h>
+#include <linux/highmem.h>
+
+#include <asm/uaccess.h>
+#include <asm/ioctls.h>
+
+/*
+ * We use a start+len construction, which provides full use of the 
+ * allocated memory.
+ * -- Florian Coosmann (FGC)
+ * 
+ * Reads with count = 0 should always return 0.
+ * -- Julian Bradfield 1999-06-07.
+ *
+ * FIFOs and Pipes now generate SIGIO for both readers and writers.
+ * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
+ *
+ * pipe_read & write cleanup
+ * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
+ */
+
+/* Drop the inode semaphore and wait for a pipe event, atomically */
+void pipe_wait(struct inode * inode)
+{
+	DEFINE_WAIT(wait);
+
+	prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE);
+	up(PIPE_SEM(*inode));
+	schedule();
+	finish_wait(PIPE_WAIT(*inode), &wait);
+	down(PIPE_SEM(*inode));
+}
+
+static inline int
+pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len)
+{
+	unsigned long copy;
+
+	while (len > 0) {
+		while (!iov->iov_len)
+			iov++;
+		copy = min_t(unsigned long, len, iov->iov_len);
+
+		if (copy_from_user(to, iov->iov_base, copy))
+			return -EFAULT;
+		to += copy;
+		len -= copy;
+		iov->iov_base += copy;
+		iov->iov_len -= copy;
+	}
+	return 0;
+}
+
+static inline int
+pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len)
+{
+	unsigned long copy;
+
+	while (len > 0) {
+		while (!iov->iov_len)
+			iov++;
+		copy = min_t(unsigned long, len, iov->iov_len);
+
+		if (copy_to_user(iov->iov_base, from, copy))
+			return -EFAULT;
+		from += copy;
+		len -= copy;
+		iov->iov_base += copy;
+		iov->iov_len -= copy;
+	}
+	return 0;
+}
+
+static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buffer *buf)
+{
+	struct page *page = buf->page;
+
+	if (info->tmp_page) {
+		__free_page(page);
+		return;
+	}
+	info->tmp_page = page;
+}
+
+static void *anon_pipe_buf_map(struct file *file, struct pipe_inode_info *info, struct pipe_buffer *buf)
+{
+	return kmap(buf->page);
+}
+
+static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer *buf)
+{
+	kunmap(buf->page);
+}
+
+static struct pipe_buf_operations anon_pipe_buf_ops = {
+	.can_merge = 1,
+	.map = anon_pipe_buf_map,
+	.unmap = anon_pipe_buf_unmap,
+	.release = anon_pipe_buf_release,
+};
+
+static ssize_t
+pipe_readv(struct file *filp, const struct iovec *_iov,
+	   unsigned long nr_segs, loff_t *ppos)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct pipe_inode_info *info;
+	int do_wakeup;
+	ssize_t ret;
+	struct iovec *iov = (struct iovec *)_iov;
+	size_t total_len;
+
+	total_len = iov_length(iov, nr_segs);
+	/* Null read succeeds. */
+	if (unlikely(total_len == 0))
+		return 0;
+
+	do_wakeup = 0;
+	ret = 0;
+	down(PIPE_SEM(*inode));
+	info = inode->i_pipe;
+	for (;;) {
+		int bufs = info->nrbufs;
+		if (bufs) {
+			int curbuf = info->curbuf;
+			struct pipe_buffer *buf = info->bufs + curbuf;
+			struct pipe_buf_operations *ops = buf->ops;
+			void *addr;
+			size_t chars = buf->len;
+			int error;
+
+			if (chars > total_len)
+				chars = total_len;
+
+			addr = ops->map(filp, info, buf);
+			error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars);
+			ops->unmap(info, buf);
+			if (unlikely(error)) {
+				if (!ret) ret = -EFAULT;
+				break;
+			}
+			ret += chars;
+			buf->offset += chars;
+			buf->len -= chars;
+			if (!buf->len) {
+				buf->ops = NULL;
+				ops->release(info, buf);
+				curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
+				info->curbuf = curbuf;
+				info->nrbufs = --bufs;
+				do_wakeup = 1;
+			}
+			total_len -= chars;
+			if (!total_len)
+				break;	/* common path: read succeeded */
+		}
+		if (bufs)	/* More to do? */
+			continue;
+		if (!PIPE_WRITERS(*inode))
+			break;
+		if (!PIPE_WAITING_WRITERS(*inode)) {
+			/* syscall merging: Usually we must not sleep
+			 * if O_NONBLOCK is set, or if we got some data.
+			 * But if a writer sleeps in kernel space, then
+			 * we can wait for that data without violating POSIX.
+			 */
+			if (ret)
+				break;
+			if (filp->f_flags & O_NONBLOCK) {
+				ret = -EAGAIN;
+				break;
+			}
+		}
+		if (signal_pending(current)) {
+			if (!ret) ret = -ERESTARTSYS;
+			break;
+		}
+		if (do_wakeup) {
+			wake_up_interruptible_sync(PIPE_WAIT(*inode));
+ 			kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+		}
+		pipe_wait(inode);
+	}
+	up(PIPE_SEM(*inode));
+	/* Signal writers asynchronously that there is more room.  */
+	if (do_wakeup) {
+		wake_up_interruptible(PIPE_WAIT(*inode));
+		kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+	}
+	if (ret > 0)
+		file_accessed(filp);
+	return ret;
+}
+
+static ssize_t
+pipe_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = count };
+	return pipe_readv(filp, &iov, 1, ppos);
+}
+
+static ssize_t
+pipe_writev(struct file *filp, const struct iovec *_iov,
+	    unsigned long nr_segs, loff_t *ppos)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct pipe_inode_info *info;
+	ssize_t ret;
+	int do_wakeup;
+	struct iovec *iov = (struct iovec *)_iov;
+	size_t total_len;
+	ssize_t chars;
+
+	total_len = iov_length(iov, nr_segs);
+	/* Null write succeeds. */
+	if (unlikely(total_len == 0))
+		return 0;
+
+	do_wakeup = 0;
+	ret = 0;
+	down(PIPE_SEM(*inode));
+	info = inode->i_pipe;
+
+	if (!PIPE_READERS(*inode)) {
+		send_sig(SIGPIPE, current, 0);
+		ret = -EPIPE;
+		goto out;
+	}
+
+	/* We try to merge small writes */
+	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
+	if (info->nrbufs && chars != 0) {
+		int lastbuf = (info->curbuf + info->nrbufs - 1) & (PIPE_BUFFERS-1);
+		struct pipe_buffer *buf = info->bufs + lastbuf;
+		struct pipe_buf_operations *ops = buf->ops;
+		int offset = buf->offset + buf->len;
+		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
+			void *addr = ops->map(filp, info, buf);
+			int error = pipe_iov_copy_from_user(offset + addr, iov, chars);
+			ops->unmap(info, buf);
+			ret = error;
+			do_wakeup = 1;
+			if (error)
+				goto out;
+			buf->len += chars;
+			total_len -= chars;
+			ret = chars;
+			if (!total_len)
+				goto out;
+		}
+	}
+
+	for (;;) {
+		int bufs;
+		if (!PIPE_READERS(*inode)) {
+			send_sig(SIGPIPE, current, 0);
+			if (!ret) ret = -EPIPE;
+			break;
+		}
+		bufs = info->nrbufs;
+		if (bufs < PIPE_BUFFERS) {
+			int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS-1);
+			struct pipe_buffer *buf = info->bufs + newbuf;
+			struct page *page = info->tmp_page;
+			int error;
+
+			if (!page) {
+				page = alloc_page(GFP_HIGHUSER);
+				if (unlikely(!page)) {
+					ret = ret ? : -ENOMEM;
+					break;
+				}
+				info->tmp_page = page;
+			}
+			/* Always wakeup, even if the copy fails. Otherwise
+			 * we lock up (O_NONBLOCK-)readers that sleep due to
+			 * syscall merging.
+			 * FIXME! Is this really true?
+			 */
+			do_wakeup = 1;
+			chars = PAGE_SIZE;
+			if (chars > total_len)
+				chars = total_len;
+
+			error = pipe_iov_copy_from_user(kmap(page), iov, chars);
+			kunmap(page);
+			if (unlikely(error)) {
+				if (!ret) ret = -EFAULT;
+				break;
+			}
+			ret += chars;
+
+			/* Insert it into the buffer array */
+			buf->page = page;
+			buf->ops = &anon_pipe_buf_ops;
+			buf->offset = 0;
+			buf->len = chars;
+			info->nrbufs = ++bufs;
+			info->tmp_page = NULL;
+
+			total_len -= chars;
+			if (!total_len)
+				break;
+		}
+		if (bufs < PIPE_BUFFERS)
+			continue;
+		if (filp->f_flags & O_NONBLOCK) {
+			if (!ret) ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			if (!ret) ret = -ERESTARTSYS;
+			break;
+		}
+		if (do_wakeup) {
+			wake_up_interruptible_sync(PIPE_WAIT(*inode));
+			kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
+			do_wakeup = 0;
+		}
+		PIPE_WAITING_WRITERS(*inode)++;
+		pipe_wait(inode);
+		PIPE_WAITING_WRITERS(*inode)--;
+	}
+out:
+	up(PIPE_SEM(*inode));
+	if (do_wakeup) {
+		wake_up_interruptible(PIPE_WAIT(*inode));
+		kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
+	}
+	if (ret > 0)
+		inode_update_time(inode, 1);	/* mtime and ctime */
+	return ret;
+}
+
+static ssize_t
+pipe_write(struct file *filp, const char __user *buf,
+	   size_t count, loff_t *ppos)
+{
+	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+	return pipe_writev(filp, &iov, 1, ppos);
+}
+
+static ssize_t
+bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	return -EBADF;
+}
+
+static ssize_t
+bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+	return -EBADF;
+}
+
+static int
+pipe_ioctl(struct inode *pino, struct file *filp,
+	   unsigned int cmd, unsigned long arg)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct pipe_inode_info *info;
+	int count, buf, nrbufs;
+
+	switch (cmd) {
+		case FIONREAD:
+			down(PIPE_SEM(*inode));
+			info =  inode->i_pipe;
+			count = 0;
+			buf = info->curbuf;
+			nrbufs = info->nrbufs;
+			while (--nrbufs >= 0) {
+				count += info->bufs[buf].len;
+				buf = (buf+1) & (PIPE_BUFFERS-1);
+			}
+			up(PIPE_SEM(*inode));
+			return put_user(count, (int __user *)arg);
+		default:
+			return -EINVAL;
+	}
+}
+
+/* No kernel lock held - fine */
+static unsigned int
+pipe_poll(struct file *filp, poll_table *wait)
+{
+	unsigned int mask;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct pipe_inode_info *info = inode->i_pipe;
+	int nrbufs;
+
+	poll_wait(filp, PIPE_WAIT(*inode), wait);
+
+	/* Reading only -- no need for acquiring the semaphore.  */
+	nrbufs = info->nrbufs;
+	mask = 0;
+	if (filp->f_mode & FMODE_READ) {
+		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+		if (!PIPE_WRITERS(*inode) && filp->f_version != PIPE_WCOUNTER(*inode))
+			mask |= POLLHUP;
+	}
+
+	if (filp->f_mode & FMODE_WRITE) {
+		mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
+		if (!PIPE_READERS(*inode))
+			mask |= POLLERR;
+	}
+
+	return mask;
+}
+
+/* FIXME: most Unices do not set POLLERR for fifos */
+#define fifo_poll pipe_poll
+
+static int
+pipe_release(struct inode *inode, int decr, int decw)
+{
+	down(PIPE_SEM(*inode));
+	PIPE_READERS(*inode) -= decr;
+	PIPE_WRITERS(*inode) -= decw;
+	if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
+		free_pipe_info(inode);
+	} else {
+		wake_up_interruptible(PIPE_WAIT(*inode));
+		kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
+		kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+	}
+	up(PIPE_SEM(*inode));
+
+	return 0;
+}
+
+static int
+pipe_read_fasync(int fd, struct file *filp, int on)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int retval;
+
+	down(PIPE_SEM(*inode));
+	retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
+	up(PIPE_SEM(*inode));
+
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+
+static int
+pipe_write_fasync(int fd, struct file *filp, int on)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int retval;
+
+	down(PIPE_SEM(*inode));
+	retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
+	up(PIPE_SEM(*inode));
+
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+
+static int
+pipe_rdwr_fasync(int fd, struct file *filp, int on)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int retval;
+
+	down(PIPE_SEM(*inode));
+
+	retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
+
+	if (retval >= 0)
+		retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
+
+	up(PIPE_SEM(*inode));
+
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+
+static int
+pipe_read_release(struct inode *inode, struct file *filp)
+{
+	pipe_read_fasync(-1, filp, 0);
+	return pipe_release(inode, 1, 0);
+}
+
+static int
+pipe_write_release(struct inode *inode, struct file *filp)
+{
+	pipe_write_fasync(-1, filp, 0);
+	return pipe_release(inode, 0, 1);
+}
+
+static int
+pipe_rdwr_release(struct inode *inode, struct file *filp)
+{
+	int decr, decw;
+
+	pipe_rdwr_fasync(-1, filp, 0);
+	decr = (filp->f_mode & FMODE_READ) != 0;
+	decw = (filp->f_mode & FMODE_WRITE) != 0;
+	return pipe_release(inode, decr, decw);
+}
+
+static int
+pipe_read_open(struct inode *inode, struct file *filp)
+{
+	/* We could have perhaps used atomic_t, but this and friends
+	   below are the only places.  So it doesn't seem worthwhile.  */
+	down(PIPE_SEM(*inode));
+	PIPE_READERS(*inode)++;
+	up(PIPE_SEM(*inode));
+
+	return 0;
+}
+
+static int
+pipe_write_open(struct inode *inode, struct file *filp)
+{
+	down(PIPE_SEM(*inode));
+	PIPE_WRITERS(*inode)++;
+	up(PIPE_SEM(*inode));
+
+	return 0;
+}
+
+static int
+pipe_rdwr_open(struct inode *inode, struct file *filp)
+{
+	down(PIPE_SEM(*inode));
+	if (filp->f_mode & FMODE_READ)
+		PIPE_READERS(*inode)++;
+	if (filp->f_mode & FMODE_WRITE)
+		PIPE_WRITERS(*inode)++;
+	up(PIPE_SEM(*inode));
+
+	return 0;
+}
+
+/*
+ * The file_operations structs are not static because they
+ * are also used in linux/fs/fifo.c to do operations on FIFOs.
+ */
+struct file_operations read_fifo_fops = {
+	.llseek		= no_llseek,
+	.read		= pipe_read,
+	.readv		= pipe_readv,
+	.write		= bad_pipe_w,
+	.poll		= fifo_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_read_open,
+	.release	= pipe_read_release,
+	.fasync		= pipe_read_fasync,
+};
+
+struct file_operations write_fifo_fops = {
+	.llseek		= no_llseek,
+	.read		= bad_pipe_r,
+	.write		= pipe_write,
+	.writev		= pipe_writev,
+	.poll		= fifo_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_write_open,
+	.release	= pipe_write_release,
+	.fasync		= pipe_write_fasync,
+};
+
+struct file_operations rdwr_fifo_fops = {
+	.llseek		= no_llseek,
+	.read		= pipe_read,
+	.readv		= pipe_readv,
+	.write		= pipe_write,
+	.writev		= pipe_writev,
+	.poll		= fifo_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_rdwr_open,
+	.release	= pipe_rdwr_release,
+	.fasync		= pipe_rdwr_fasync,
+};
+
+struct file_operations read_pipe_fops = {
+	.llseek		= no_llseek,
+	.read		= pipe_read,
+	.readv		= pipe_readv,
+	.write		= bad_pipe_w,
+	.poll		= pipe_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_read_open,
+	.release	= pipe_read_release,
+	.fasync		= pipe_read_fasync,
+};
+
+struct file_operations write_pipe_fops = {
+	.llseek		= no_llseek,
+	.read		= bad_pipe_r,
+	.write		= pipe_write,
+	.writev		= pipe_writev,
+	.poll		= pipe_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_write_open,
+	.release	= pipe_write_release,
+	.fasync		= pipe_write_fasync,
+};
+
+struct file_operations rdwr_pipe_fops = {
+	.llseek		= no_llseek,
+	.read		= pipe_read,
+	.readv		= pipe_readv,
+	.write		= pipe_write,
+	.writev		= pipe_writev,
+	.poll		= pipe_poll,
+	.ioctl		= pipe_ioctl,
+	.open		= pipe_rdwr_open,
+	.release	= pipe_rdwr_release,
+	.fasync		= pipe_rdwr_fasync,
+};
+
+void free_pipe_info(struct inode *inode)
+{
+	int i;
+	struct pipe_inode_info *info = inode->i_pipe;
+
+	inode->i_pipe = NULL;
+	for (i = 0; i < PIPE_BUFFERS; i++) {
+		struct pipe_buffer *buf = info->bufs + i;
+		if (buf->ops)
+			buf->ops->release(info, buf);
+	}
+	if (info->tmp_page)
+		__free_page(info->tmp_page);
+	kfree(info);
+}
+
+struct inode* pipe_new(struct inode* inode)
+{
+	struct pipe_inode_info *info;
+
+	info = kmalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
+	if (!info)
+		goto fail_page;
+	memset(info, 0, sizeof(*info));
+	inode->i_pipe = info;
+
+	init_waitqueue_head(PIPE_WAIT(*inode));
+	PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1;
+
+	return inode;
+fail_page:
+	return NULL;
+}
+
+static struct vfsmount *pipe_mnt;
+static int pipefs_delete_dentry(struct dentry *dentry)
+{
+	return 1;
+}
+static struct dentry_operations pipefs_dentry_operations = {
+	.d_delete	= pipefs_delete_dentry,
+};
+
+static struct inode * get_pipe_inode(void)
+{
+	struct inode *inode = new_inode(pipe_mnt->mnt_sb);
+
+	if (!inode)
+		goto fail_inode;
+
+	if(!pipe_new(inode))
+		goto fail_iput;
+	PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
+	inode->i_fop = &rdwr_pipe_fops;
+
+	/*
+	 * Mark the inode dirty from the very beginning,
+	 * that way it will never be moved to the dirty
+	 * list because "mark_inode_dirty()" will think
+	 * that it already _is_ on the dirty list.
+	 */
+	inode->i_state = I_DIRTY;
+	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
+	inode->i_uid = current->fsuid;
+	inode->i_gid = current->fsgid;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_blksize = PAGE_SIZE;
+	return inode;
+
+fail_iput:
+	iput(inode);
+fail_inode:
+	return NULL;
+}
+
+int do_pipe(int *fd)
+{
+	struct qstr this;
+	char name[32];
+	struct dentry *dentry;
+	struct inode * inode;
+	struct file *f1, *f2;
+	int error;
+	int i,j;
+
+	error = -ENFILE;
+	f1 = get_empty_filp();
+	if (!f1)
+		goto no_files;
+
+	f2 = get_empty_filp();
+	if (!f2)
+		goto close_f1;
+
+	inode = get_pipe_inode();
+	if (!inode)
+		goto close_f12;
+
+	error = get_unused_fd();
+	if (error < 0)
+		goto close_f12_inode;
+	i = error;
+
+	error = get_unused_fd();
+	if (error < 0)
+		goto close_f12_inode_i;
+	j = error;
+
+	error = -ENOMEM;
+	sprintf(name, "[%lu]", inode->i_ino);
+	this.name = name;
+	this.len = strlen(name);
+	this.hash = inode->i_ino; /* will go */
+	dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
+	if (!dentry)
+		goto close_f12_inode_i_j;
+	dentry->d_op = &pipefs_dentry_operations;
+	d_add(dentry, inode);
+	f1->f_vfsmnt = f2->f_vfsmnt = mntget(mntget(pipe_mnt));
+	f1->f_dentry = f2->f_dentry = dget(dentry);
+	f1->f_mapping = f2->f_mapping = inode->i_mapping;
+
+	/* read file */
+	f1->f_pos = f2->f_pos = 0;
+	f1->f_flags = O_RDONLY;
+	f1->f_op = &read_pipe_fops;
+	f1->f_mode = FMODE_READ;
+	f1->f_version = 0;
+
+	/* write file */
+	f2->f_flags = O_WRONLY;
+	f2->f_op = &write_pipe_fops;
+	f2->f_mode = FMODE_WRITE;
+	f2->f_version = 0;
+
+	fd_install(i, f1);
+	fd_install(j, f2);
+	fd[0] = i;
+	fd[1] = j;
+	return 0;
+
+close_f12_inode_i_j:
+	put_unused_fd(j);
+close_f12_inode_i:
+	put_unused_fd(i);
+close_f12_inode:
+	free_pipe_info(inode);
+	iput(inode);
+close_f12:
+	put_filp(f2);
+close_f1:
+	put_filp(f1);
+no_files:
+	return error;	
+}
+
+/*
+ * pipefs should _never_ be mounted by userland - too much of security hassle,
+ * no real gain from having the whole whorehouse mounted. So we don't need
+ * any operations on the root directory. However, we need a non-trivial
+ * d_name - pipe: will go nicely and kill the special-casing in procfs.
+ */
+
+static struct super_block *pipefs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
+}
+
+static struct file_system_type pipe_fs_type = {
+	.name		= "pipefs",
+	.get_sb		= pipefs_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init init_pipe_fs(void)
+{
+	int err = register_filesystem(&pipe_fs_type);
+	if (!err) {
+		pipe_mnt = kern_mount(&pipe_fs_type);
+		if (IS_ERR(pipe_mnt)) {
+			err = PTR_ERR(pipe_mnt);
+			unregister_filesystem(&pipe_fs_type);
+		}
+	}
+	return err;
+}
+
+static void __exit exit_pipe_fs(void)
+{
+	unregister_filesystem(&pipe_fs_type);
+	mntput(pipe_mnt);
+}
+
+fs_initcall(init_pipe_fs);
+module_exit(exit_pipe_fs);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
new file mode 100644
index 0000000..296480e
--- /dev/null
+++ b/fs/posix_acl.c
@@ -0,0 +1,381 @@
+/*
+ * linux/fs/posix_acl.c
+ *
+ *  Copyright (C) 2002 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ *
+ *  Fixes from William Schumacher incorporated on 15 March 2001.
+ *     (Reported by Charles Bertsch, <CBertsch@microtest.com>).
+ */
+
+/*
+ *  This file contains generic functions for manipulating
+ *  POSIX 1003.1e draft standard 17 ACLs.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/posix_acl.h>
+#include <linux/module.h>
+
+#include <linux/errno.h>
+
+EXPORT_SYMBOL(posix_acl_alloc);
+EXPORT_SYMBOL(posix_acl_clone);
+EXPORT_SYMBOL(posix_acl_valid);
+EXPORT_SYMBOL(posix_acl_equiv_mode);
+EXPORT_SYMBOL(posix_acl_from_mode);
+EXPORT_SYMBOL(posix_acl_create_masq);
+EXPORT_SYMBOL(posix_acl_chmod_masq);
+EXPORT_SYMBOL(posix_acl_permission);
+
+/*
+ * Allocate a new ACL with the specified number of entries.
+ */
+struct posix_acl *
+posix_acl_alloc(int count, unsigned int __nocast flags)
+{
+	const size_t size = sizeof(struct posix_acl) +
+	                    count * sizeof(struct posix_acl_entry);
+	struct posix_acl *acl = kmalloc(size, flags);
+	if (acl) {
+		atomic_set(&acl->a_refcount, 1);
+		acl->a_count = count;
+	}
+	return acl;
+}
+
+/*
+ * Clone an ACL.
+ */
+struct posix_acl *
+posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags)
+{
+	struct posix_acl *clone = NULL;
+
+	if (acl) {
+		int size = sizeof(struct posix_acl) + acl->a_count *
+		           sizeof(struct posix_acl_entry);
+		clone = kmalloc(size, flags);
+		if (clone) {
+			memcpy(clone, acl, size);
+			atomic_set(&clone->a_refcount, 1);
+		}
+	}
+	return clone;
+}
+
+/*
+ * Check if an acl is valid. Returns 0 if it is, or -E... otherwise.
+ */
+int
+posix_acl_valid(const struct posix_acl *acl)
+{
+	const struct posix_acl_entry *pa, *pe;
+	int state = ACL_USER_OBJ;
+	unsigned int id = 0;  /* keep gcc happy */
+	int needs_mask = 0;
+
+	FOREACH_ACL_ENTRY(pa, acl, pe) {
+		if (pa->e_perm & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
+			return -EINVAL;
+		switch (pa->e_tag) {
+			case ACL_USER_OBJ:
+				if (state == ACL_USER_OBJ) {
+					id = 0;
+					state = ACL_USER;
+					break;
+				}
+				return -EINVAL;
+
+			case ACL_USER:
+				if (state != ACL_USER)
+					return -EINVAL;
+				if (pa->e_id == ACL_UNDEFINED_ID ||
+				    pa->e_id < id)
+					return -EINVAL;
+				id = pa->e_id + 1;
+				needs_mask = 1;
+				break;
+
+			case ACL_GROUP_OBJ:
+				if (state == ACL_USER) {
+					id = 0;
+					state = ACL_GROUP;
+					break;
+				}
+				return -EINVAL;
+
+			case ACL_GROUP:
+				if (state != ACL_GROUP)
+					return -EINVAL;
+				if (pa->e_id == ACL_UNDEFINED_ID ||
+				    pa->e_id < id)
+					return -EINVAL;
+				id = pa->e_id + 1;
+				needs_mask = 1;
+				break;
+
+			case ACL_MASK:
+				if (state != ACL_GROUP)
+					return -EINVAL;
+				state = ACL_OTHER;
+				break;
+
+			case ACL_OTHER:
+				if (state == ACL_OTHER ||
+				    (state == ACL_GROUP && !needs_mask)) {
+					state = 0;
+					break;
+				}
+				return -EINVAL;
+
+			default:
+				return -EINVAL;
+		}
+	}
+	if (state == 0)
+		return 0;
+	return -EINVAL;
+}
+
+/*
+ * Returns 0 if the acl can be exactly represented in the traditional
+ * file mode permission bits, or else 1. Returns -E... on error.
+ */
+int
+posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
+{
+	const struct posix_acl_entry *pa, *pe;
+	mode_t mode = 0;
+	int not_equiv = 0;
+
+	FOREACH_ACL_ENTRY(pa, acl, pe) {
+		switch (pa->e_tag) {
+			case ACL_USER_OBJ:
+				mode |= (pa->e_perm & S_IRWXO) << 6;
+				break;
+			case ACL_GROUP_OBJ:
+				mode |= (pa->e_perm & S_IRWXO) << 3;
+				break;
+			case ACL_OTHER:
+				mode |= pa->e_perm & S_IRWXO;
+				break;
+			case ACL_MASK:
+				mode = (mode & ~S_IRWXG) |
+				       ((pa->e_perm & S_IRWXO) << 3);
+				not_equiv = 1;
+				break;
+			case ACL_USER:
+			case ACL_GROUP:
+				not_equiv = 1;
+				break;
+			default:
+				return -EINVAL;
+		}
+	}
+        if (mode_p)
+                *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+        return not_equiv;
+}
+
+/*
+ * Create an ACL representing the file mode permission bits of an inode.
+ */
+struct posix_acl *
+posix_acl_from_mode(mode_t mode, unsigned int __nocast flags)
+{
+	struct posix_acl *acl = posix_acl_alloc(3, flags);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+
+	acl->a_entries[0].e_tag  = ACL_USER_OBJ;
+	acl->a_entries[0].e_id   = ACL_UNDEFINED_ID;
+	acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6;
+
+	acl->a_entries[1].e_tag  = ACL_GROUP_OBJ;
+	acl->a_entries[1].e_id   = ACL_UNDEFINED_ID;
+	acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3;
+
+	acl->a_entries[2].e_tag  = ACL_OTHER;
+	acl->a_entries[2].e_id   = ACL_UNDEFINED_ID;
+	acl->a_entries[2].e_perm = (mode & S_IRWXO);
+	return acl;
+}
+
+/*
+ * Return 0 if current is granted want access to the inode
+ * by the acl. Returns -E... otherwise.
+ */
+int
+posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
+{
+	const struct posix_acl_entry *pa, *pe, *mask_obj;
+	int found = 0;
+
+	FOREACH_ACL_ENTRY(pa, acl, pe) {
+                switch(pa->e_tag) {
+                        case ACL_USER_OBJ:
+				/* (May have been checked already) */
+                                if (inode->i_uid == current->fsuid)
+                                        goto check_perm;
+                                break;
+                        case ACL_USER:
+                                if (pa->e_id == current->fsuid)
+                                        goto mask;
+				break;
+                        case ACL_GROUP_OBJ:
+                                if (in_group_p(inode->i_gid)) {
+					found = 1;
+					if ((pa->e_perm & want) == want)
+						goto mask;
+                                }
+				break;
+                        case ACL_GROUP:
+                                if (in_group_p(pa->e_id)) {
+					found = 1;
+					if ((pa->e_perm & want) == want)
+						goto mask;
+                                }
+                                break;
+                        case ACL_MASK:
+                                break;
+                        case ACL_OTHER:
+				if (found)
+					return -EACCES;
+				else
+					goto check_perm;
+			default:
+				return -EIO;
+                }
+        }
+	return -EIO;
+
+mask:
+	for (mask_obj = pa+1; mask_obj != pe; mask_obj++) {
+		if (mask_obj->e_tag == ACL_MASK) {
+			if ((pa->e_perm & mask_obj->e_perm & want) == want)
+				return 0;
+			return -EACCES;
+		}
+	}
+
+check_perm:
+	if ((pa->e_perm & want) == want)
+		return 0;
+	return -EACCES;
+}
+
+/*
+ * Modify acl when creating a new inode. The caller must ensure the acl is
+ * only referenced once.
+ *
+ * mode_p initially must contain the mode parameter to the open() / creat()
+ * system calls. All permissions that are not granted by the acl are removed.
+ * The permissions in the acl are changed to reflect the mode_p parameter.
+ */
+int
+posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p)
+{
+	struct posix_acl_entry *pa, *pe;
+	struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
+	mode_t mode = *mode_p;
+	int not_equiv = 0;
+
+	/* assert(atomic_read(acl->a_refcount) == 1); */
+
+	FOREACH_ACL_ENTRY(pa, acl, pe) {
+                switch(pa->e_tag) {
+                        case ACL_USER_OBJ:
+				pa->e_perm &= (mode >> 6) | ~S_IRWXO;
+				mode &= (pa->e_perm << 6) | ~S_IRWXU;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				not_equiv = 1;
+				break;
+
+                        case ACL_GROUP_OBJ:
+				group_obj = pa;
+                                break;
+
+                        case ACL_OTHER:
+				pa->e_perm &= mode | ~S_IRWXO;
+				mode &= pa->e_perm | ~S_IRWXO;
+                                break;
+
+                        case ACL_MASK:
+				mask_obj = pa;
+				not_equiv = 1;
+                                break;
+
+			default:
+				return -EIO;
+                }
+        }
+
+	if (mask_obj) {
+		mask_obj->e_perm &= (mode >> 3) | ~S_IRWXO;
+		mode &= (mask_obj->e_perm << 3) | ~S_IRWXG;
+	} else {
+		if (!group_obj)
+			return -EIO;
+		group_obj->e_perm &= (mode >> 3) | ~S_IRWXO;
+		mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
+	}
+
+	*mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+        return not_equiv;
+}
+
+/*
+ * Modify the ACL for the chmod syscall.
+ */
+int
+posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
+{
+	struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
+	struct posix_acl_entry *pa, *pe;
+
+	/* assert(atomic_read(acl->a_refcount) == 1); */
+
+	FOREACH_ACL_ENTRY(pa, acl, pe) {
+		switch(pa->e_tag) {
+			case ACL_USER_OBJ:
+				pa->e_perm = (mode & S_IRWXU) >> 6;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				break;
+
+			case ACL_GROUP_OBJ:
+				group_obj = pa;
+				break;
+
+			case ACL_MASK:
+				mask_obj = pa;
+				break;
+
+			case ACL_OTHER:
+				pa->e_perm = (mode & S_IRWXO);
+				break;
+
+			default:
+				return -EIO;
+		}
+	}
+
+	if (mask_obj) {
+		mask_obj->e_perm = (mode & S_IRWXG) >> 3;
+	} else {
+		if (!group_obj)
+			return -EIO;
+		group_obj->e_perm = (mode & S_IRWXG) >> 3;
+	}
+
+	return 0;
+}
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
new file mode 100644
index 0000000..738b9b6
--- /dev/null
+++ b/fs/proc/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux proc filesystem routines.
+#
+
+obj-$(CONFIG_PROC_FS) += proc.o
+
+proc-y			:= nommu.o task_nommu.o
+proc-$(CONFIG_MMU)	:= mmu.o task_mmu.o
+
+proc-y       += inode.o root.o base.o generic.o array.o \
+		kmsg.o proc_tty.o proc_misc.o
+
+proc-$(CONFIG_PROC_KCORE)	+= kcore.o
+proc-$(CONFIG_PROC_DEVICETREE)	+= proc_devtree.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
new file mode 100644
index 0000000..37668fe
--- /dev/null
+++ b/fs/proc/array.c
@@ -0,0 +1,484 @@
+/*
+ *  linux/fs/proc/array.c
+ *
+ *  Copyright (C) 1992  by Linus Torvalds
+ *  based on ideas by Darren Senn
+ *
+ * Fixes:
+ * Michael. K. Johnson: stat,statm extensions.
+ *                      <johnsonm@stolaf.edu>
+ *
+ * Pauline Middelink :  Made cmdline,envline only break at '\0's, to
+ *                      make sure SET_PROCTITLE works. Also removed
+ *                      bad '!' which forced address recalculation for
+ *                      EVERY character on the current page.
+ *                      <middelin@polyware.iaf.nl>
+ *
+ * Danny ter Haar    :	added cpuinfo
+ *			<dth@cistron.nl>
+ *
+ * Alessandro Rubini :  profile extension.
+ *                      <rubini@ipvvis.unipv.it>
+ *
+ * Jeff Tranter      :  added BogoMips field to cpuinfo
+ *                      <Jeff_Tranter@Mitel.COM>
+ *
+ * Bruno Haible      :  remove 4K limit for the maps file
+ *			<haible@ma2s2.mathematik.uni-karlsruhe.de>
+ *
+ * Yves Arrouye      :  remove removal of trailing spaces in get_array.
+ *			<Yves.Arrouye@marin.fdn.fr>
+ *
+ * Jerome Forissier  :  added per-CPU time information to /proc/stat
+ *                      and /proc/<pid>/cpu extension
+ *                      <forissier@isia.cma.fr>
+ *			- Incorporation and non-SMP safe operation
+ *			of forissier patch in 2.1.78 by
+ *			Hans Marcus <crowbar@concepts.nl>
+ *
+ * aeb@cwi.nl        :  /proc/partitions
+ *
+ *
+ * Alan Cox	     :  security fixes.
+ *			<Alan.Cox@linux.org>
+ *
+ * Al Viro           :  safe handling of mm_struct
+ *
+ * Gerhard Wichert   :  added BIGMEM support
+ * Siemens AG           <Gerhard.Wichert@pdb.siemens.de>
+ *
+ * Al Viro & Jeff Garzik :  moved most of the thing into base.c and
+ *			 :  proc_misc.c. The rest may eventually go into
+ *			 :  base.c too.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/mman.h>
+#include <linux/proc_fs.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/signal.h>
+#include <linux/highmem.h>
+#include <linux/file.h>
+#include <linux/times.h>
+#include <linux/cpuset.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include "internal.h"
+
+/* Gcc optimizes away "strlen(x)" for constant x */
+#define ADDBUF(buffer, string) \
+do { memcpy(buffer, string, strlen(string)); \
+     buffer += strlen(string); } while (0)
+
+static inline char * task_name(struct task_struct *p, char * buf)
+{
+	int i;
+	char * name;
+	char tcomm[sizeof(p->comm)];
+
+	get_task_comm(tcomm, p);
+
+	ADDBUF(buf, "Name:\t");
+	name = tcomm;
+	i = sizeof(tcomm);
+	do {
+		unsigned char c = *name;
+		name++;
+		i--;
+		*buf = c;
+		if (!c)
+			break;
+		if (c == '\\') {
+			buf[1] = c;
+			buf += 2;
+			continue;
+		}
+		if (c == '\n') {
+			buf[0] = '\\';
+			buf[1] = 'n';
+			buf += 2;
+			continue;
+		}
+		buf++;
+	} while (i);
+	*buf = '\n';
+	return buf+1;
+}
+
+/*
+ * The task state array is a strange "bitmap" of
+ * reasons to sleep. Thus "running" is zero, and
+ * you can test for combinations of others with
+ * simple bit tests.
+ */
+static const char *task_state_array[] = {
+	"R (running)",		/*  0 */
+	"S (sleeping)",		/*  1 */
+	"D (disk sleep)",	/*  2 */
+	"T (stopped)",		/*  4 */
+	"T (tracing stop)",	/*  8 */
+	"Z (zombie)",		/* 16 */
+	"X (dead)"		/* 32 */
+};
+
+static inline const char * get_task_state(struct task_struct *tsk)
+{
+	unsigned int state = (tsk->state & (TASK_RUNNING |
+					    TASK_INTERRUPTIBLE |
+					    TASK_UNINTERRUPTIBLE |
+					    TASK_STOPPED |
+					    TASK_TRACED)) |
+			(tsk->exit_state & (EXIT_ZOMBIE |
+					    EXIT_DEAD));
+	const char **p = &task_state_array[0];
+
+	while (state) {
+		p++;
+		state >>= 1;
+	}
+	return *p;
+}
+
+static inline char * task_state(struct task_struct *p, char *buffer)
+{
+	struct group_info *group_info;
+	int g;
+
+	read_lock(&tasklist_lock);
+	buffer += sprintf(buffer,
+		"State:\t%s\n"
+		"SleepAVG:\t%lu%%\n"
+		"Tgid:\t%d\n"
+		"Pid:\t%d\n"
+		"PPid:\t%d\n"
+		"TracerPid:\t%d\n"
+		"Uid:\t%d\t%d\t%d\t%d\n"
+		"Gid:\t%d\t%d\t%d\t%d\n",
+		get_task_state(p),
+		(p->sleep_avg/1024)*100/(1020000000/1024),
+	       	p->tgid,
+		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
+		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
+		p->uid, p->euid, p->suid, p->fsuid,
+		p->gid, p->egid, p->sgid, p->fsgid);
+	read_unlock(&tasklist_lock);
+	task_lock(p);
+	buffer += sprintf(buffer,
+		"FDSize:\t%d\n"
+		"Groups:\t",
+		p->files ? p->files->max_fds : 0);
+
+	group_info = p->group_info;
+	get_group_info(group_info);
+	task_unlock(p);
+
+	for (g = 0; g < min(group_info->ngroups,NGROUPS_SMALL); g++)
+		buffer += sprintf(buffer, "%d ", GROUP_AT(group_info,g));
+	put_group_info(group_info);
+
+	buffer += sprintf(buffer, "\n");
+	return buffer;
+}
+
+static char * render_sigset_t(const char *header, sigset_t *set, char *buffer)
+{
+	int i, len;
+
+	len = strlen(header);
+	memcpy(buffer, header, len);
+	buffer += len;
+
+	i = _NSIG;
+	do {
+		int x = 0;
+
+		i -= 4;
+		if (sigismember(set, i+1)) x |= 1;
+		if (sigismember(set, i+2)) x |= 2;
+		if (sigismember(set, i+3)) x |= 4;
+		if (sigismember(set, i+4)) x |= 8;
+		*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
+	} while (i >= 4);
+
+	*buffer++ = '\n';
+	*buffer = 0;
+	return buffer;
+}
+
+static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
+				    sigset_t *catch)
+{
+	struct k_sigaction *k;
+	int i;
+
+	k = p->sighand->action;
+	for (i = 1; i <= _NSIG; ++i, ++k) {
+		if (k->sa.sa_handler == SIG_IGN)
+			sigaddset(ign, i);
+		else if (k->sa.sa_handler != SIG_DFL)
+			sigaddset(catch, i);
+	}
+}
+
+static inline char * task_sig(struct task_struct *p, char *buffer)
+{
+	sigset_t pending, shpending, blocked, ignored, caught;
+	int num_threads = 0;
+	unsigned long qsize = 0;
+	unsigned long qlim = 0;
+
+	sigemptyset(&pending);
+	sigemptyset(&shpending);
+	sigemptyset(&blocked);
+	sigemptyset(&ignored);
+	sigemptyset(&caught);
+
+	/* Gather all the data with the appropriate locks held */
+	read_lock(&tasklist_lock);
+	if (p->sighand) {
+		spin_lock_irq(&p->sighand->siglock);
+		pending = p->pending.signal;
+		shpending = p->signal->shared_pending.signal;
+		blocked = p->blocked;
+		collect_sigign_sigcatch(p, &ignored, &caught);
+		num_threads = atomic_read(&p->signal->count);
+		qsize = atomic_read(&p->user->sigpending);
+		qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
+		spin_unlock_irq(&p->sighand->siglock);
+	}
+	read_unlock(&tasklist_lock);
+
+	buffer += sprintf(buffer, "Threads:\t%d\n", num_threads);
+	buffer += sprintf(buffer, "SigQ:\t%lu/%lu\n", qsize, qlim);
+
+	/* render them all */
+	buffer = render_sigset_t("SigPnd:\t", &pending, buffer);
+	buffer = render_sigset_t("ShdPnd:\t", &shpending, buffer);
+	buffer = render_sigset_t("SigBlk:\t", &blocked, buffer);
+	buffer = render_sigset_t("SigIgn:\t", &ignored, buffer);
+	buffer = render_sigset_t("SigCgt:\t", &caught, buffer);
+
+	return buffer;
+}
+
+static inline char *task_cap(struct task_struct *p, char *buffer)
+{
+    return buffer + sprintf(buffer, "CapInh:\t%016x\n"
+			    "CapPrm:\t%016x\n"
+			    "CapEff:\t%016x\n",
+			    cap_t(p->cap_inheritable),
+			    cap_t(p->cap_permitted),
+			    cap_t(p->cap_effective));
+}
+
+int proc_pid_status(struct task_struct *task, char * buffer)
+{
+	char * orig = buffer;
+	struct mm_struct *mm = get_task_mm(task);
+
+	buffer = task_name(task, buffer);
+	buffer = task_state(task, buffer);
+ 
+	if (mm) {
+		buffer = task_mem(mm, buffer);
+		mmput(mm);
+	}
+	buffer = task_sig(task, buffer);
+	buffer = task_cap(task, buffer);
+	buffer = cpuset_task_status_allowed(task, buffer);
+#if defined(CONFIG_ARCH_S390)
+	buffer = task_show_regs(task, buffer);
+#endif
+	return buffer - orig;
+}
+
+static int do_task_stat(struct task_struct *task, char * buffer, int whole)
+{
+	unsigned long vsize, eip, esp, wchan = ~0UL;
+	long priority, nice;
+	int tty_pgrp = -1, tty_nr = 0;
+	sigset_t sigign, sigcatch;
+	char state;
+	int res;
+ 	pid_t ppid, pgid = -1, sid = -1;
+	int num_threads = 0;
+	struct mm_struct *mm;
+	unsigned long long start_time;
+	unsigned long cmin_flt = 0, cmaj_flt = 0;
+	unsigned long  min_flt = 0,  maj_flt = 0;
+	cputime_t cutime, cstime, utime, stime;
+	unsigned long rsslim = 0;
+	unsigned long it_real_value = 0;
+	struct task_struct *t;
+	char tcomm[sizeof(task->comm)];
+
+	state = *get_task_state(task);
+	vsize = eip = esp = 0;
+	mm = get_task_mm(task);
+	if (mm) {
+		vsize = task_vsize(mm);
+		eip = KSTK_EIP(task);
+		esp = KSTK_ESP(task);
+	}
+
+	get_task_comm(tcomm, task);
+
+	sigemptyset(&sigign);
+	sigemptyset(&sigcatch);
+	cutime = cstime = utime = stime = cputime_zero;
+	read_lock(&tasklist_lock);
+	if (task->sighand) {
+		spin_lock_irq(&task->sighand->siglock);
+		num_threads = atomic_read(&task->signal->count);
+		collect_sigign_sigcatch(task, &sigign, &sigcatch);
+
+		/* add up live thread stats at the group level */
+		if (whole) {
+			t = task;
+			do {
+				min_flt += t->min_flt;
+				maj_flt += t->maj_flt;
+				utime = cputime_add(utime, t->utime);
+				stime = cputime_add(stime, t->stime);
+				t = next_thread(t);
+			} while (t != task);
+		}
+
+		spin_unlock_irq(&task->sighand->siglock);
+	}
+	if (task->signal) {
+		if (task->signal->tty) {
+			tty_pgrp = task->signal->tty->pgrp;
+			tty_nr = new_encode_dev(tty_devnum(task->signal->tty));
+		}
+		pgid = process_group(task);
+		sid = task->signal->session;
+		cmin_flt = task->signal->cmin_flt;
+		cmaj_flt = task->signal->cmaj_flt;
+		cutime = task->signal->cutime;
+		cstime = task->signal->cstime;
+		rsslim = task->signal->rlim[RLIMIT_RSS].rlim_cur;
+		if (whole) {
+			min_flt += task->signal->min_flt;
+			maj_flt += task->signal->maj_flt;
+			utime = cputime_add(utime, task->signal->utime);
+			stime = cputime_add(stime, task->signal->stime);
+		}
+		it_real_value = task->signal->it_real_value;
+	}
+	ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
+	read_unlock(&tasklist_lock);
+
+	if (!whole || num_threads<2)
+		wchan = get_wchan(task);
+	if (!whole) {
+		min_flt = task->min_flt;
+		maj_flt = task->maj_flt;
+		utime = task->utime;
+		stime = task->stime;
+	}
+
+	/* scale priority and nice values from timeslices to -20..20 */
+	/* to make it look like a "normal" Unix priority/nice value  */
+	priority = task_prio(task);
+	nice = task_nice(task);
+
+	/* Temporary variable needed for gcc-2.96 */
+	/* convert timespec -> nsec*/
+	start_time = (unsigned long long)task->start_time.tv_sec * NSEC_PER_SEC
+				+ task->start_time.tv_nsec;
+	/* convert nsec -> ticks */
+	start_time = nsec_to_clock_t(start_time);
+
+	res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
+%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
+%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
+		task->pid,
+		tcomm,
+		state,
+		ppid,
+		pgid,
+		sid,
+		tty_nr,
+		tty_pgrp,
+		task->flags,
+		min_flt,
+		cmin_flt,
+		maj_flt,
+		cmaj_flt,
+		cputime_to_clock_t(utime),
+		cputime_to_clock_t(stime),
+		cputime_to_clock_t(cutime),
+		cputime_to_clock_t(cstime),
+		priority,
+		nice,
+		num_threads,
+		jiffies_to_clock_t(it_real_value),
+		start_time,
+		vsize,
+		mm ? get_mm_counter(mm, rss) : 0, /* you might want to shift this left 3 */
+	        rsslim,
+		mm ? mm->start_code : 0,
+		mm ? mm->end_code : 0,
+		mm ? mm->start_stack : 0,
+		esp,
+		eip,
+		/* The signal information here is obsolete.
+		 * It must be decimal for Linux 2.0 compatibility.
+		 * Use /proc/#/status for real-time signals.
+		 */
+		task->pending.signal.sig[0] & 0x7fffffffUL,
+		task->blocked.sig[0] & 0x7fffffffUL,
+		sigign      .sig[0] & 0x7fffffffUL,
+		sigcatch    .sig[0] & 0x7fffffffUL,
+		wchan,
+		0UL,
+		0UL,
+		task->exit_signal,
+		task_cpu(task),
+		task->rt_priority,
+		task->policy);
+	if(mm)
+		mmput(mm);
+	return res;
+}
+
+int proc_tid_stat(struct task_struct *task, char * buffer)
+{
+	return do_task_stat(task, buffer, 0);
+}
+
+int proc_tgid_stat(struct task_struct *task, char * buffer)
+{
+	return do_task_stat(task, buffer, 1);
+}
+
+int proc_pid_statm(struct task_struct *task, char *buffer)
+{
+	int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
+	struct mm_struct *mm = get_task_mm(task);
+	
+	if (mm) {
+		size = task_statm(mm, &shared, &text, &data, &resident);
+		mmput(mm);
+	}
+
+	return sprintf(buffer,"%d %d %d %d %d %d %d\n",
+		       size, resident, shared, text, lib, data, 0);
+}
diff --git a/fs/proc/base.c b/fs/proc/base.c
new file mode 100644
index 0000000..dad8ea4
--- /dev/null
+++ b/fs/proc/base.c
@@ -0,0 +1,2056 @@
+/*
+ *  linux/fs/proc/base.c
+ *
+ *  Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *  proc base directory handling functions
+ *
+ *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
+ *  Instead of using magical inumbers to determine the kind of object
+ *  we allocate and fill in-core inodes upon lookup. They don't even
+ *  go into icache. We cache the reference to task_struct upon lookup too.
+ *  Eventually it should become a filesystem in its own. We don't use the
+ *  rest of procfs anymore.
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/namei.h>
+#include <linux/namespace.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/kallsyms.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/ptrace.h>
+#include <linux/seccomp.h>
+#include <linux/cpuset.h>
+#include <linux/audit.h>
+#include "internal.h"
+
+/*
+ * For hysterical raisins we keep the same inumbers as in the old procfs.
+ * Feel free to change the macro below - just keep the range distinct from
+ * inumbers of the rest of procfs (currently those are in 0x0000--0xffff).
+ * As soon as we'll get a separate superblock we will be able to forget
+ * about magical ranges too.
+ */
+
+#define fake_ino(pid,ino) (((pid)<<16)|(ino))
+
+enum pid_directory_inos {
+	PROC_TGID_INO = 2,
+	PROC_TGID_TASK,
+	PROC_TGID_STATUS,
+	PROC_TGID_MEM,
+#ifdef CONFIG_SECCOMP
+	PROC_TGID_SECCOMP,
+#endif
+	PROC_TGID_CWD,
+	PROC_TGID_ROOT,
+	PROC_TGID_EXE,
+	PROC_TGID_FD,
+	PROC_TGID_ENVIRON,
+	PROC_TGID_AUXV,
+	PROC_TGID_CMDLINE,
+	PROC_TGID_STAT,
+	PROC_TGID_STATM,
+	PROC_TGID_MAPS,
+	PROC_TGID_MOUNTS,
+	PROC_TGID_WCHAN,
+#ifdef CONFIG_SCHEDSTATS
+	PROC_TGID_SCHEDSTAT,
+#endif
+#ifdef CONFIG_CPUSETS
+	PROC_TGID_CPUSET,
+#endif
+#ifdef CONFIG_SECURITY
+	PROC_TGID_ATTR,
+	PROC_TGID_ATTR_CURRENT,
+	PROC_TGID_ATTR_PREV,
+	PROC_TGID_ATTR_EXEC,
+	PROC_TGID_ATTR_FSCREATE,
+#endif
+#ifdef CONFIG_AUDITSYSCALL
+	PROC_TGID_LOGINUID,
+#endif
+	PROC_TGID_FD_DIR,
+	PROC_TGID_OOM_SCORE,
+	PROC_TGID_OOM_ADJUST,
+	PROC_TID_INO,
+	PROC_TID_STATUS,
+	PROC_TID_MEM,
+#ifdef CONFIG_SECCOMP
+	PROC_TID_SECCOMP,
+#endif
+	PROC_TID_CWD,
+	PROC_TID_ROOT,
+	PROC_TID_EXE,
+	PROC_TID_FD,
+	PROC_TID_ENVIRON,
+	PROC_TID_AUXV,
+	PROC_TID_CMDLINE,
+	PROC_TID_STAT,
+	PROC_TID_STATM,
+	PROC_TID_MAPS,
+	PROC_TID_MOUNTS,
+	PROC_TID_WCHAN,
+#ifdef CONFIG_SCHEDSTATS
+	PROC_TID_SCHEDSTAT,
+#endif
+#ifdef CONFIG_CPUSETS
+	PROC_TID_CPUSET,
+#endif
+#ifdef CONFIG_SECURITY
+	PROC_TID_ATTR,
+	PROC_TID_ATTR_CURRENT,
+	PROC_TID_ATTR_PREV,
+	PROC_TID_ATTR_EXEC,
+	PROC_TID_ATTR_FSCREATE,
+#endif
+#ifdef CONFIG_AUDITSYSCALL
+	PROC_TID_LOGINUID,
+#endif
+	PROC_TID_FD_DIR = 0x8000,	/* 0x8000-0xffff */
+	PROC_TID_OOM_SCORE,
+	PROC_TID_OOM_ADJUST,
+};
+
+struct pid_entry {
+	int type;
+	int len;
+	char *name;
+	mode_t mode;
+};
+
+#define E(type,name,mode) {(type),sizeof(name)-1,(name),(mode)}
+
+static struct pid_entry tgid_base_stuff[] = {
+	E(PROC_TGID_TASK,      "task",    S_IFDIR|S_IRUGO|S_IXUGO),
+	E(PROC_TGID_FD,        "fd",      S_IFDIR|S_IRUSR|S_IXUSR),
+	E(PROC_TGID_ENVIRON,   "environ", S_IFREG|S_IRUSR),
+	E(PROC_TGID_AUXV,      "auxv",	  S_IFREG|S_IRUSR),
+	E(PROC_TGID_STATUS,    "status",  S_IFREG|S_IRUGO),
+	E(PROC_TGID_CMDLINE,   "cmdline", S_IFREG|S_IRUGO),
+	E(PROC_TGID_STAT,      "stat",    S_IFREG|S_IRUGO),
+	E(PROC_TGID_STATM,     "statm",   S_IFREG|S_IRUGO),
+	E(PROC_TGID_MAPS,      "maps",    S_IFREG|S_IRUGO),
+	E(PROC_TGID_MEM,       "mem",     S_IFREG|S_IRUSR|S_IWUSR),
+#ifdef CONFIG_SECCOMP
+	E(PROC_TGID_SECCOMP,   "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
+#endif
+	E(PROC_TGID_CWD,       "cwd",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_ROOT,      "root",    S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_EXE,       "exe",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TGID_MOUNTS,    "mounts",  S_IFREG|S_IRUGO),
+#ifdef CONFIG_SECURITY
+	E(PROC_TGID_ATTR,      "attr",    S_IFDIR|S_IRUGO|S_IXUGO),
+#endif
+#ifdef CONFIG_KALLSYMS
+	E(PROC_TGID_WCHAN,     "wchan",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	E(PROC_TGID_SCHEDSTAT, "schedstat", S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_CPUSETS
+	E(PROC_TGID_CPUSET,    "cpuset",  S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TGID_OOM_SCORE, "oom_score",S_IFREG|S_IRUGO),
+	E(PROC_TGID_OOM_ADJUST,"oom_adj", S_IFREG|S_IRUGO|S_IWUSR),
+#ifdef CONFIG_AUDITSYSCALL
+	E(PROC_TGID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
+#endif
+	{0,0,NULL,0}
+};
+static struct pid_entry tid_base_stuff[] = {
+	E(PROC_TID_FD,         "fd",      S_IFDIR|S_IRUSR|S_IXUSR),
+	E(PROC_TID_ENVIRON,    "environ", S_IFREG|S_IRUSR),
+	E(PROC_TID_AUXV,       "auxv",	  S_IFREG|S_IRUSR),
+	E(PROC_TID_STATUS,     "status",  S_IFREG|S_IRUGO),
+	E(PROC_TID_CMDLINE,    "cmdline", S_IFREG|S_IRUGO),
+	E(PROC_TID_STAT,       "stat",    S_IFREG|S_IRUGO),
+	E(PROC_TID_STATM,      "statm",   S_IFREG|S_IRUGO),
+	E(PROC_TID_MAPS,       "maps",    S_IFREG|S_IRUGO),
+	E(PROC_TID_MEM,        "mem",     S_IFREG|S_IRUSR|S_IWUSR),
+#ifdef CONFIG_SECCOMP
+	E(PROC_TID_SECCOMP,    "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
+#endif
+	E(PROC_TID_CWD,        "cwd",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_ROOT,       "root",    S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_EXE,        "exe",     S_IFLNK|S_IRWXUGO),
+	E(PROC_TID_MOUNTS,     "mounts",  S_IFREG|S_IRUGO),
+#ifdef CONFIG_SECURITY
+	E(PROC_TID_ATTR,       "attr",    S_IFDIR|S_IRUGO|S_IXUGO),
+#endif
+#ifdef CONFIG_KALLSYMS
+	E(PROC_TID_WCHAN,      "wchan",   S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	E(PROC_TID_SCHEDSTAT, "schedstat",S_IFREG|S_IRUGO),
+#endif
+#ifdef CONFIG_CPUSETS
+	E(PROC_TID_CPUSET,     "cpuset",  S_IFREG|S_IRUGO),
+#endif
+	E(PROC_TID_OOM_SCORE,  "oom_score",S_IFREG|S_IRUGO),
+	E(PROC_TID_OOM_ADJUST, "oom_adj", S_IFREG|S_IRUGO|S_IWUSR),
+#ifdef CONFIG_AUDITSYSCALL
+	E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
+#endif
+	{0,0,NULL,0}
+};
+
+#ifdef CONFIG_SECURITY
+static struct pid_entry tgid_attr_stuff[] = {
+	E(PROC_TGID_ATTR_CURRENT,  "current",  S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TGID_ATTR_PREV,     "prev",     S_IFREG|S_IRUGO),
+	E(PROC_TGID_ATTR_EXEC,     "exec",     S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TGID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+	{0,0,NULL,0}
+};
+static struct pid_entry tid_attr_stuff[] = {
+	E(PROC_TID_ATTR_CURRENT,   "current",  S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TID_ATTR_PREV,      "prev",     S_IFREG|S_IRUGO),
+	E(PROC_TID_ATTR_EXEC,      "exec",     S_IFREG|S_IRUGO|S_IWUGO),
+	E(PROC_TID_ATTR_FSCREATE,  "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+	{0,0,NULL,0}
+};
+#endif
+
+#undef E
+
+static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct task_struct *task = proc_task(inode);
+	struct files_struct *files;
+	struct file *file;
+	int fd = proc_type(inode) - PROC_TID_FD_DIR;
+
+	files = get_files_struct(task);
+	if (files) {
+		spin_lock(&files->file_lock);
+		file = fcheck_files(files, fd);
+		if (file) {
+			*mnt = mntget(file->f_vfsmnt);
+			*dentry = dget(file->f_dentry);
+			spin_unlock(&files->file_lock);
+			put_files_struct(files);
+			return 0;
+		}
+		spin_unlock(&files->file_lock);
+		put_files_struct(files);
+	}
+	return -ENOENT;
+}
+
+static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct fs_struct *fs;
+	int result = -ENOENT;
+	task_lock(proc_task(inode));
+	fs = proc_task(inode)->fs;
+	if(fs)
+		atomic_inc(&fs->count);
+	task_unlock(proc_task(inode));
+	if (fs) {
+		read_lock(&fs->lock);
+		*mnt = mntget(fs->pwdmnt);
+		*dentry = dget(fs->pwd);
+		read_unlock(&fs->lock);
+		result = 0;
+		put_fs_struct(fs);
+	}
+	return result;
+}
+
+static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct fs_struct *fs;
+	int result = -ENOENT;
+	task_lock(proc_task(inode));
+	fs = proc_task(inode)->fs;
+	if(fs)
+		atomic_inc(&fs->count);
+	task_unlock(proc_task(inode));
+	if (fs) {
+		read_lock(&fs->lock);
+		*mnt = mntget(fs->rootmnt);
+		*dentry = dget(fs->root);
+		read_unlock(&fs->lock);
+		result = 0;
+		put_fs_struct(fs);
+	}
+	return result;
+}
+
+#define MAY_PTRACE(task) \
+	(task == current || \
+	(task->parent == current && \
+	(task->ptrace & PT_PTRACED) && \
+	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 security_ptrace(current,task) == 0))
+
+static int may_ptrace_attach(struct task_struct *task)
+{
+	int retval = 0;
+
+	task_lock(task);
+
+	if (!task->mm)
+		goto out;
+	if (((current->uid != task->euid) ||
+	     (current->uid != task->suid) ||
+	     (current->uid != task->uid) ||
+	     (current->gid != task->egid) ||
+	     (current->gid != task->sgid) ||
+	     (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
+		goto out;
+	rmb();
+	if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
+		goto out;
+	if (security_ptrace(current, task))
+		goto out;
+
+	retval = 1;
+out:
+	task_unlock(task);
+	return retval;
+}
+
+static int proc_pid_environ(struct task_struct *task, char * buffer)
+{
+	int res = 0;
+	struct mm_struct *mm = get_task_mm(task);
+	if (mm) {
+		unsigned int len = mm->env_end - mm->env_start;
+		if (len > PAGE_SIZE)
+			len = PAGE_SIZE;
+		res = access_process_vm(task, mm->env_start, buffer, len, 0);
+		if (!may_ptrace_attach(task))
+			res = -ESRCH;
+		mmput(mm);
+	}
+	return res;
+}
+
+static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+{
+	int res = 0;
+	unsigned int len;
+	struct mm_struct *mm = get_task_mm(task);
+	if (!mm)
+		goto out;
+	if (!mm->arg_end)
+		goto out_mm;	/* Shh! No looking before we're done */
+
+ 	len = mm->arg_end - mm->arg_start;
+ 
+	if (len > PAGE_SIZE)
+		len = PAGE_SIZE;
+ 
+	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+
+	// If the nul at the end of args has been overwritten, then
+	// assume application is using setproctitle(3).
+	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
+		len = strnlen(buffer, res);
+		if (len < res) {
+		    res = len;
+		} else {
+			len = mm->env_end - mm->env_start;
+			if (len > PAGE_SIZE - res)
+				len = PAGE_SIZE - res;
+			res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
+			res = strnlen(buffer, res);
+		}
+	}
+out_mm:
+	mmput(mm);
+out:
+	return res;
+}
+
+static int proc_pid_auxv(struct task_struct *task, char *buffer)
+{
+	int res = 0;
+	struct mm_struct *mm = get_task_mm(task);
+	if (mm) {
+		unsigned int nwords = 0;
+		do
+			nwords += 2;
+		while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+		res = nwords * sizeof(mm->saved_auxv[0]);
+		if (res > PAGE_SIZE)
+			res = PAGE_SIZE;
+		memcpy(buffer, mm->saved_auxv, res);
+		mmput(mm);
+	}
+	return res;
+}
+
+
+#ifdef CONFIG_KALLSYMS
+/*
+ * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+ * Returns the resolved symbol.  If that fails, simply return the address.
+ */
+static int proc_pid_wchan(struct task_struct *task, char *buffer)
+{
+	char *modname;
+	const char *sym_name;
+	unsigned long wchan, size, offset;
+	char namebuf[KSYM_NAME_LEN+1];
+
+	wchan = get_wchan(task);
+
+	sym_name = kallsyms_lookup(wchan, &size, &offset, &modname, namebuf);
+	if (sym_name)
+		return sprintf(buffer, "%s", sym_name);
+	return sprintf(buffer, "%lu", wchan);
+}
+#endif /* CONFIG_KALLSYMS */
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * Provides /proc/PID/schedstat
+ */
+static int proc_pid_schedstat(struct task_struct *task, char *buffer)
+{
+	return sprintf(buffer, "%lu %lu %lu\n",
+			task->sched_info.cpu_time,
+			task->sched_info.run_delay,
+			task->sched_info.pcnt);
+}
+#endif
+
+/* The badness from the OOM killer */
+unsigned long badness(struct task_struct *p, unsigned long uptime);
+static int proc_oom_score(struct task_struct *task, char *buffer)
+{
+	unsigned long points;
+	struct timespec uptime;
+
+	do_posix_clock_monotonic_gettime(&uptime);
+	points = badness(task, uptime.tv_sec);
+	return sprintf(buffer, "%lu\n", points);
+}
+
+/************************************************************************/
+/*                       Here the fs part begins                        */
+/************************************************************************/
+
+/* permission checks */
+
+static int proc_check_root(struct inode *inode)
+{
+	struct dentry *de, *base, *root;
+	struct vfsmount *our_vfsmnt, *vfsmnt, *mnt;
+	int res = 0;
+
+	if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */
+		return -ENOENT;
+	read_lock(&current->fs->lock);
+	our_vfsmnt = mntget(current->fs->rootmnt);
+	base = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+
+	spin_lock(&vfsmount_lock);
+	de = root;
+	mnt = vfsmnt;
+
+	while (vfsmnt != our_vfsmnt) {
+		if (vfsmnt == vfsmnt->mnt_parent)
+			goto out;
+		de = vfsmnt->mnt_mountpoint;
+		vfsmnt = vfsmnt->mnt_parent;
+	}
+
+	if (!is_subdir(de, base))
+		goto out;
+	spin_unlock(&vfsmount_lock);
+
+exit:
+	dput(base);
+	mntput(our_vfsmnt);
+	dput(root);
+	mntput(mnt);
+	return res;
+out:
+	spin_unlock(&vfsmount_lock);
+	res = -EACCES;
+	goto exit;
+}
+
+static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	if (generic_permission(inode, mask, NULL) != 0)
+		return -EACCES;
+	return proc_check_root(inode);
+}
+
+extern struct seq_operations proc_pid_maps_op;
+static int maps_open(struct inode *inode, struct file *file)
+{
+	struct task_struct *task = proc_task(inode);
+	int ret = seq_open(file, &proc_pid_maps_op);
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		m->private = task;
+	}
+	return ret;
+}
+
+static struct file_operations proc_maps_operations = {
+	.open		= maps_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+extern struct seq_operations mounts_op;
+static int mounts_open(struct inode *inode, struct file *file)
+{
+	struct task_struct *task = proc_task(inode);
+	int ret = seq_open(file, &mounts_op);
+
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		struct namespace *namespace;
+		task_lock(task);
+		namespace = task->namespace;
+		if (namespace)
+			get_namespace(namespace);
+		task_unlock(task);
+
+		if (namespace)
+			m->private = namespace;
+		else {
+			seq_release(inode, file);
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static int mounts_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	struct namespace *namespace = m->private;
+	put_namespace(namespace);
+	return seq_release(inode, file);
+}
+
+static struct file_operations proc_mounts_operations = {
+	.open		= mounts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+};
+
+#define PROC_BLOCK_SIZE	(3*1024)		/* 4K page size but our output routines use some slack for overruns */
+
+static ssize_t proc_info_read(struct file * file, char __user * buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length;
+	struct task_struct *task = proc_task(inode);
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	length = PROC_I(inode)->op.proc_read(task, (char*)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
+	free_page(page);
+	return length;
+}
+
+static struct file_operations proc_info_file_operations = {
+	.read		= proc_info_read,
+};
+
+static int mem_open(struct inode* inode, struct file* file)
+{
+	file->private_data = (void*)((long)current->self_exec_id);
+	return 0;
+}
+
+static ssize_t mem_read(struct file * file, char __user * buf,
+			size_t count, loff_t *ppos)
+{
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	char *page;
+	unsigned long src = *ppos;
+	int ret = -ESRCH;
+	struct mm_struct *mm;
+
+	if (!MAY_PTRACE(task) || !may_ptrace_attach(task))
+		goto out;
+
+	ret = -ENOMEM;
+	page = (char *)__get_free_page(GFP_USER);
+	if (!page)
+		goto out;
+
+	ret = 0;
+ 
+	mm = get_task_mm(task);
+	if (!mm)
+		goto out_free;
+
+	ret = -EIO;
+ 
+	if (file->private_data != (void*)((long)current->self_exec_id))
+		goto out_put;
+
+	ret = 0;
+ 
+	while (count > 0) {
+		int this_len, retval;
+
+		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		retval = access_process_vm(task, src, page, this_len, 0);
+		if (!retval || !MAY_PTRACE(task) || !may_ptrace_attach(task)) {
+			if (!ret)
+				ret = -EIO;
+			break;
+		}
+
+		if (copy_to_user(buf, page, retval)) {
+			ret = -EFAULT;
+			break;
+		}
+ 
+		ret += retval;
+		src += retval;
+		buf += retval;
+		count -= retval;
+	}
+	*ppos = src;
+
+out_put:
+	mmput(mm);
+out_free:
+	free_page((unsigned long) page);
+out:
+	return ret;
+}
+
+#define mem_write NULL
+
+#ifndef mem_write
+/* This is a security hazard */
+static ssize_t mem_write(struct file * file, const char * buf,
+			 size_t count, loff_t *ppos)
+{
+	int copied = 0;
+	char *page;
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	unsigned long dst = *ppos;
+
+	if (!MAY_PTRACE(task) || !may_ptrace_attach(task))
+		return -ESRCH;
+
+	page = (char *)__get_free_page(GFP_USER);
+	if (!page)
+		return -ENOMEM;
+
+	while (count > 0) {
+		int this_len, retval;
+
+		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		if (copy_from_user(page, buf, this_len)) {
+			copied = -EFAULT;
+			break;
+		}
+		retval = access_process_vm(task, dst, page, this_len, 1);
+		if (!retval) {
+			if (!copied)
+				copied = -EIO;
+			break;
+		}
+		copied += retval;
+		buf += retval;
+		dst += retval;
+		count -= retval;			
+	}
+	*ppos = dst;
+	free_page((unsigned long) page);
+	return copied;
+}
+#endif
+
+static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
+{
+	switch (orig) {
+	case 0:
+		file->f_pos = offset;
+		break;
+	case 1:
+		file->f_pos += offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+	force_successful_syscall_return();
+	return file->f_pos;
+}
+
+static struct file_operations proc_mem_operations = {
+	.llseek		= mem_lseek,
+	.read		= mem_read,
+	.write		= mem_write,
+	.open		= mem_open,
+};
+
+static ssize_t oom_adjust_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	char buffer[8];
+	size_t len;
+	int oom_adjust = task->oomkilladj;
+	loff_t __ppos = *ppos;
+
+	len = sprintf(buffer, "%i\n", oom_adjust);
+	if (__ppos >= len)
+		return 0;
+	if (count > len-__ppos)
+		count = len-__ppos;
+	if (copy_to_user(buf, buffer + __ppos, count))
+		return -EFAULT;
+	*ppos = __ppos + count;
+	return count;
+}
+
+static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	char buffer[8], *end;
+	int oom_adjust;
+
+	if (!capable(CAP_SYS_RESOURCE))
+		return -EPERM;
+	memset(buffer, 0, 8);
+	if (count > 6)
+		count = 6;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	oom_adjust = simple_strtol(buffer, &end, 0);
+	if (oom_adjust < -16 || oom_adjust > 15)
+		return -EINVAL;
+	if (*end == '\n')
+		end++;
+	task->oomkilladj = oom_adjust;
+	if (end - buffer == 0)
+		return -EIO;
+	return end - buffer;
+}
+
+static struct file_operations proc_oom_adjust_operations = {
+	.read		= oom_adjust_read,
+	.write		= oom_adjust_write,
+};
+
+static struct inode_operations proc_mem_inode_operations = {
+	.permission	= proc_permission,
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+#define TMPBUFLEN 21
+static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
+				  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	struct task_struct *task = proc_task(inode);
+	ssize_t length;
+	char tmpbuf[TMPBUFLEN];
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
+				audit_get_loginuid(task->audit_context));
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
+				   size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	char *page, *tmp;
+	ssize_t length;
+	struct task_struct *task = proc_task(inode);
+	uid_t loginuid;
+
+	if (!capable(CAP_AUDIT_CONTROL))
+		return -EPERM;
+
+	if (current != task)
+		return -EPERM;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+
+	if (*ppos != 0) {
+		/* No partial writes. */
+		return -EINVAL;
+	}
+	page = (char*)__get_free_page(GFP_USER);
+	if (!page)
+		return -ENOMEM;
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
+		goto out_free_page;
+
+	loginuid = simple_strtoul(page, &tmp, 10);
+	if (tmp == page) {
+		length = -EINVAL;
+		goto out_free_page;
+
+	}
+	length = audit_set_loginuid(task->audit_context, loginuid);
+	if (likely(length == 0))
+		length = count;
+
+out_free_page:
+	free_page((unsigned long) page);
+	return length;
+}
+
+static struct file_operations proc_loginuid_operations = {
+	.read		= proc_loginuid_read,
+	.write		= proc_loginuid_write,
+};
+#endif
+
+#ifdef CONFIG_SECCOMP
+static ssize_t seccomp_read(struct file *file, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct task_struct *tsk = proc_task(file->f_dentry->d_inode);
+	char __buf[20];
+	loff_t __ppos = *ppos;
+	size_t len;
+
+	/* no need to print the trailing zero, so use only len */
+	len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
+	if (__ppos >= len)
+		return 0;
+	if (count > len - __ppos)
+		count = len - __ppos;
+	if (copy_to_user(buf, __buf + __ppos, count))
+		return -EFAULT;
+	*ppos = __ppos + count;
+	return count;
+}
+
+static ssize_t seccomp_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct task_struct *tsk = proc_task(file->f_dentry->d_inode);
+	char __buf[20], *end;
+	unsigned int seccomp_mode;
+
+	/* can set it only once to be even more secure */
+	if (unlikely(tsk->seccomp.mode))
+		return -EPERM;
+
+	memset(__buf, 0, sizeof(__buf));
+	count = min(count, sizeof(__buf) - 1);
+	if (copy_from_user(__buf, buf, count))
+		return -EFAULT;
+	seccomp_mode = simple_strtoul(__buf, &end, 0);
+	if (*end == '\n')
+		end++;
+	if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
+		tsk->seccomp.mode = seccomp_mode;
+		set_tsk_thread_flag(tsk, TIF_SECCOMP);
+	} else
+		return -EINVAL;
+	if (unlikely(!(end - __buf)))
+		return -EIO;
+	return end - __buf;
+}
+
+static struct file_operations proc_seccomp_operations = {
+	.read		= seccomp_read,
+	.write		= seccomp_write,
+};
+#endif /* CONFIG_SECCOMP */
+
+static int proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = -EACCES;
+
+	/* We don't need a base pointer in the /proc filesystem */
+	path_release(nd);
+
+	if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE))
+		goto out;
+	error = proc_check_root(inode);
+	if (error)
+		goto out;
+
+	error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
+	nd->last_type = LAST_BIND;
+out:
+	return error;
+}
+
+static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
+			    char __user *buffer, int buflen)
+{
+	struct inode * inode;
+	char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
+	int len;
+
+	if (!tmp)
+		return -ENOMEM;
+		
+	inode = dentry->d_inode;
+	path = d_path(dentry, mnt, tmp, PAGE_SIZE);
+	len = PTR_ERR(path);
+	if (IS_ERR(path))
+		goto out;
+	len = tmp + PAGE_SIZE - 1 - path;
+
+	if (len > buflen)
+		len = buflen;
+	if (copy_to_user(buffer, path, len))
+		len = -EFAULT;
+ out:
+	free_page((unsigned long)tmp);
+	return len;
+}
+
+static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
+{
+	int error = -EACCES;
+	struct inode *inode = dentry->d_inode;
+	struct dentry *de;
+	struct vfsmount *mnt = NULL;
+
+	lock_kernel();
+
+	if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE))
+		goto out;
+	error = proc_check_root(inode);
+	if (error)
+		goto out;
+
+	error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
+	if (error)
+		goto out;
+
+	error = do_proc_readlink(de, mnt, buffer, buflen);
+	dput(de);
+	mntput(mnt);
+out:
+	unlock_kernel();
+	return error;
+}
+
+static struct inode_operations proc_pid_link_inode_operations = {
+	.readlink	= proc_pid_readlink,
+	.follow_link	= proc_pid_follow_link
+};
+
+#define NUMBUF 10
+
+static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct task_struct *p = proc_task(inode);
+	unsigned int fd, tid, ino;
+	int retval;
+	char buf[NUMBUF];
+	struct files_struct * files;
+
+	retval = -ENOENT;
+	if (!pid_alive(p))
+		goto out;
+	retval = 0;
+	tid = p->pid;
+
+	fd = filp->f_pos;
+	switch (fd) {
+		case 0:
+			if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+				goto out;
+			filp->f_pos++;
+		case 1:
+			ino = fake_ino(tid, PROC_TID_INO);
+			if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+				goto out;
+			filp->f_pos++;
+		default:
+			files = get_files_struct(p);
+			if (!files)
+				goto out;
+			spin_lock(&files->file_lock);
+			for (fd = filp->f_pos-2;
+			     fd < files->max_fds;
+			     fd++, filp->f_pos++) {
+				unsigned int i,j;
+
+				if (!fcheck_files(files, fd))
+					continue;
+				spin_unlock(&files->file_lock);
+
+				j = NUMBUF;
+				i = fd;
+				do {
+					j--;
+					buf[j] = '0' + (i % 10);
+					i /= 10;
+				} while (i);
+
+				ino = fake_ino(tid, PROC_TID_FD_DIR + fd);
+				if (filldir(dirent, buf+j, NUMBUF-j, fd+2, ino, DT_LNK) < 0) {
+					spin_lock(&files->file_lock);
+					break;
+				}
+				spin_lock(&files->file_lock);
+			}
+			spin_unlock(&files->file_lock);
+			put_files_struct(files);
+	}
+out:
+	return retval;
+}
+
+static int proc_pident_readdir(struct file *filp,
+		void *dirent, filldir_t filldir,
+		struct pid_entry *ents, unsigned int nents)
+{
+	int i;
+	int pid;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct pid_entry *p;
+	ino_t ino;
+	int ret;
+
+	ret = -ENOENT;
+	if (!pid_alive(proc_task(inode)))
+		goto out;
+
+	ret = 0;
+	pid = proc_task(inode)->pid;
+	i = filp->f_pos;
+	switch (i) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+			goto out;
+		i++;
+		filp->f_pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+			goto out;
+		i++;
+		filp->f_pos++;
+		/* fall through */
+	default:
+		i -= 2;
+		if (i >= nents) {
+			ret = 1;
+			goto out;
+		}
+		p = ents + i;
+		while (p->name) {
+			if (filldir(dirent, p->name, p->len, filp->f_pos,
+				    fake_ino(pid, p->type), p->mode >> 12) < 0)
+				goto out;
+			filp->f_pos++;
+			p++;
+		}
+	}
+
+	ret = 1;
+out:
+	return ret;
+}
+
+static int proc_tgid_base_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
+}
+
+static int proc_tid_base_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
+}
+
+/* building an inode */
+
+static int task_dumpable(struct task_struct *task)
+{
+	int dumpable = 0;
+	struct mm_struct *mm;
+
+	task_lock(task);
+	mm = task->mm;
+	if (mm)
+		dumpable = mm->dumpable;
+	task_unlock(task);
+	return dumpable;
+}
+
+
+static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task, int ino)
+{
+	struct inode * inode;
+	struct proc_inode *ei;
+
+	/* We need a new inode */
+	
+	inode = new_inode(sb);
+	if (!inode)
+		goto out;
+
+	/* Common stuff */
+	ei = PROC_I(inode);
+	ei->task = NULL;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_ino = fake_ino(task->pid, ino);
+
+	if (!pid_alive(task))
+		goto out_unlock;
+
+	/*
+	 * grab the reference to task.
+	 */
+	get_task_struct(task);
+	ei->task = task;
+	ei->type = ino;
+	inode->i_uid = 0;
+	inode->i_gid = 0;
+	if (ino == PROC_TGID_INO || ino == PROC_TID_INO || task_dumpable(task)) {
+		inode->i_uid = task->euid;
+		inode->i_gid = task->egid;
+	}
+	security_task_to_inode(task, inode);
+
+out:
+	return inode;
+
+out_unlock:
+	ei->pde = NULL;
+	iput(inode);
+	return NULL;
+}
+
+/* dentry stuff */
+
+/*
+ *	Exceptional case: normally we are not allowed to unhash a busy
+ * directory. In this case, however, we can do it - no aliasing problems
+ * due to the way we treat inodes.
+ *
+ * Rewrite the inode's ownerships here because the owning task may have
+ * performed a setuid(), etc.
+ */
+static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *task = proc_task(inode);
+	if (pid_alive(task)) {
+		if (proc_type(inode) == PROC_TGID_INO || proc_type(inode) == PROC_TID_INO || task_dumpable(task)) {
+			inode->i_uid = task->euid;
+			inode->i_gid = task->egid;
+		} else {
+			inode->i_uid = 0;
+			inode->i_gid = 0;
+		}
+		security_task_to_inode(task, inode);
+		return 1;
+	}
+	d_drop(dentry);
+	return 0;
+}
+
+static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	struct task_struct *task = proc_task(inode);
+	int fd = proc_type(inode) - PROC_TID_FD_DIR;
+	struct files_struct *files;
+
+	files = get_files_struct(task);
+	if (files) {
+		spin_lock(&files->file_lock);
+		if (fcheck_files(files, fd)) {
+			spin_unlock(&files->file_lock);
+			put_files_struct(files);
+			if (task_dumpable(task)) {
+				inode->i_uid = task->euid;
+				inode->i_gid = task->egid;
+			} else {
+				inode->i_uid = 0;
+				inode->i_gid = 0;
+			}
+			security_task_to_inode(task, inode);
+			return 1;
+		}
+		spin_unlock(&files->file_lock);
+		put_files_struct(files);
+	}
+	d_drop(dentry);
+	return 0;
+}
+
+static void pid_base_iput(struct dentry *dentry, struct inode *inode)
+{
+	struct task_struct *task = proc_task(inode);
+	spin_lock(&task->proc_lock);
+	if (task->proc_dentry == dentry)
+		task->proc_dentry = NULL;
+	spin_unlock(&task->proc_lock);
+	iput(inode);
+}
+
+static int pid_delete_dentry(struct dentry * dentry)
+{
+	/* Is the task we represent dead?
+	 * If so, then don't put the dentry on the lru list,
+	 * kill it immediately.
+	 */
+	return !pid_alive(proc_task(dentry->d_inode));
+}
+
+static struct dentry_operations tid_fd_dentry_operations =
+{
+	.d_revalidate	= tid_fd_revalidate,
+	.d_delete	= pid_delete_dentry,
+};
+
+static struct dentry_operations pid_dentry_operations =
+{
+	.d_revalidate	= pid_revalidate,
+	.d_delete	= pid_delete_dentry,
+};
+
+static struct dentry_operations pid_base_dentry_operations =
+{
+	.d_revalidate	= pid_revalidate,
+	.d_iput		= pid_base_iput,
+	.d_delete	= pid_delete_dentry,
+};
+
+/* Lookups */
+
+static unsigned name_to_int(struct dentry *dentry)
+{
+	const char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	unsigned n = 0;
+
+	if (len > 1 && *name == '0')
+		goto out;
+	while (len-- > 0) {
+		unsigned c = *name++ - '0';
+		if (c > 9)
+			goto out;
+		if (n >= (~0U-9)/10)
+			goto out;
+		n *= 10;
+		n += c;
+	}
+	return n;
+out:
+	return ~0U;
+}
+
+/* SMP-safe */
+static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct task_struct *task = proc_task(dir);
+	unsigned fd = name_to_int(dentry);
+	struct file * file;
+	struct files_struct * files;
+	struct inode *inode;
+	struct proc_inode *ei;
+
+	if (fd == ~0U)
+		goto out;
+	if (!pid_alive(task))
+		goto out;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_FD_DIR+fd);
+	if (!inode)
+		goto out;
+	ei = PROC_I(inode);
+	files = get_files_struct(task);
+	if (!files)
+		goto out_unlock;
+	inode->i_mode = S_IFLNK;
+	spin_lock(&files->file_lock);
+	file = fcheck_files(files, fd);
+	if (!file)
+		goto out_unlock2;
+	if (file->f_mode & 1)
+		inode->i_mode |= S_IRUSR | S_IXUSR;
+	if (file->f_mode & 2)
+		inode->i_mode |= S_IWUSR | S_IXUSR;
+	spin_unlock(&files->file_lock);
+	put_files_struct(files);
+	inode->i_op = &proc_pid_link_inode_operations;
+	inode->i_size = 64;
+	ei->op.proc_get_link = proc_fd_link;
+	dentry->d_op = &tid_fd_dentry_operations;
+	d_add(dentry, inode);
+	return NULL;
+
+out_unlock2:
+	spin_unlock(&files->file_lock);
+	put_files_struct(files);
+out_unlock:
+	iput(inode);
+out:
+	return ERR_PTR(-ENOENT);
+}
+
+static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir);
+static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd);
+
+static struct file_operations proc_fd_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_readfd,
+};
+
+static struct file_operations proc_task_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_task_readdir,
+};
+
+/*
+ * proc directories can do almost nothing..
+ */
+static struct inode_operations proc_fd_inode_operations = {
+	.lookup		= proc_lookupfd,
+	.permission	= proc_permission,
+};
+
+static struct inode_operations proc_task_inode_operations = {
+	.lookup		= proc_task_lookup,
+	.permission	= proc_permission,
+};
+
+#ifdef CONFIG_SECURITY
+static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
+				  size_t count, loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length;
+	struct task_struct *task = proc_task(inode);
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	length = security_getprocattr(task, 
+				      (char*)file->f_dentry->d_name.name, 
+				      (void*)page, count);
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
+	free_page(page);
+	return length;
+}
+
+static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+				   size_t count, loff_t *ppos)
+{ 
+	struct inode * inode = file->f_dentry->d_inode;
+	char *page; 
+	ssize_t length; 
+	struct task_struct *task = proc_task(inode); 
+
+	if (count > PAGE_SIZE) 
+		count = PAGE_SIZE; 
+	if (*ppos != 0) {
+		/* No partial writes. */
+		return -EINVAL;
+	}
+	page = (char*)__get_free_page(GFP_USER); 
+	if (!page) 
+		return -ENOMEM;
+	length = -EFAULT; 
+	if (copy_from_user(page, buf, count)) 
+		goto out;
+
+	length = security_setprocattr(task, 
+				      (char*)file->f_dentry->d_name.name, 
+				      (void*)page, count);
+out:
+	free_page((unsigned long) page);
+	return length;
+} 
+
+static struct file_operations proc_pid_attr_operations = {
+	.read		= proc_pid_attr_read,
+	.write		= proc_pid_attr_write,
+};
+
+static struct file_operations proc_tid_attr_operations;
+static struct inode_operations proc_tid_attr_inode_operations;
+static struct file_operations proc_tgid_attr_operations;
+static struct inode_operations proc_tgid_attr_inode_operations;
+#endif
+
+/* SMP-safe */
+static struct dentry *proc_pident_lookup(struct inode *dir, 
+					 struct dentry *dentry,
+					 struct pid_entry *ents)
+{
+	struct inode *inode;
+	int error;
+	struct task_struct *task = proc_task(dir);
+	struct pid_entry *p;
+	struct proc_inode *ei;
+
+	error = -ENOENT;
+	inode = NULL;
+
+	if (!pid_alive(task))
+		goto out;
+
+	for (p = ents; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = -EINVAL;
+	inode = proc_pid_make_inode(dir->i_sb, task, p->type);
+	if (!inode)
+		goto out;
+
+	ei = PROC_I(inode);
+	inode->i_mode = p->mode;
+	/*
+	 * Yes, it does not scale. And it should not. Don't add
+	 * new entries into /proc/<tgid>/ without very good reasons.
+	 */
+	switch(p->type) {
+		case PROC_TGID_TASK:
+			inode->i_nlink = 3;
+			inode->i_op = &proc_task_inode_operations;
+			inode->i_fop = &proc_task_operations;
+			break;
+		case PROC_TID_FD:
+		case PROC_TGID_FD:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_fd_inode_operations;
+			inode->i_fop = &proc_fd_operations;
+			break;
+		case PROC_TID_EXE:
+		case PROC_TGID_EXE:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_exe_link;
+			break;
+		case PROC_TID_CWD:
+		case PROC_TGID_CWD:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_cwd_link;
+			break;
+		case PROC_TID_ROOT:
+		case PROC_TGID_ROOT:
+			inode->i_op = &proc_pid_link_inode_operations;
+			ei->op.proc_get_link = proc_root_link;
+			break;
+		case PROC_TID_ENVIRON:
+		case PROC_TGID_ENVIRON:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_environ;
+			break;
+		case PROC_TID_AUXV:
+		case PROC_TGID_AUXV:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_auxv;
+			break;
+		case PROC_TID_STATUS:
+		case PROC_TGID_STATUS:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_status;
+			break;
+		case PROC_TID_STAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_tid_stat;
+			break;
+		case PROC_TGID_STAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_tgid_stat;
+			break;
+		case PROC_TID_CMDLINE:
+		case PROC_TGID_CMDLINE:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_cmdline;
+			break;
+		case PROC_TID_STATM:
+		case PROC_TGID_STATM:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_statm;
+			break;
+		case PROC_TID_MAPS:
+		case PROC_TGID_MAPS:
+			inode->i_fop = &proc_maps_operations;
+			break;
+		case PROC_TID_MEM:
+		case PROC_TGID_MEM:
+			inode->i_op = &proc_mem_inode_operations;
+			inode->i_fop = &proc_mem_operations;
+			break;
+#ifdef CONFIG_SECCOMP
+		case PROC_TID_SECCOMP:
+		case PROC_TGID_SECCOMP:
+			inode->i_fop = &proc_seccomp_operations;
+			break;
+#endif /* CONFIG_SECCOMP */
+		case PROC_TID_MOUNTS:
+		case PROC_TGID_MOUNTS:
+			inode->i_fop = &proc_mounts_operations;
+			break;
+#ifdef CONFIG_SECURITY
+		case PROC_TID_ATTR:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_tid_attr_inode_operations;
+			inode->i_fop = &proc_tid_attr_operations;
+			break;
+		case PROC_TGID_ATTR:
+			inode->i_nlink = 2;
+			inode->i_op = &proc_tgid_attr_inode_operations;
+			inode->i_fop = &proc_tgid_attr_operations;
+			break;
+		case PROC_TID_ATTR_CURRENT:
+		case PROC_TGID_ATTR_CURRENT:
+		case PROC_TID_ATTR_PREV:
+		case PROC_TGID_ATTR_PREV:
+		case PROC_TID_ATTR_EXEC:
+		case PROC_TGID_ATTR_EXEC:
+		case PROC_TID_ATTR_FSCREATE:
+		case PROC_TGID_ATTR_FSCREATE:
+			inode->i_fop = &proc_pid_attr_operations;
+			break;
+#endif
+#ifdef CONFIG_KALLSYMS
+		case PROC_TID_WCHAN:
+		case PROC_TGID_WCHAN:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_wchan;
+			break;
+#endif
+#ifdef CONFIG_SCHEDSTATS
+		case PROC_TID_SCHEDSTAT:
+		case PROC_TGID_SCHEDSTAT:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_pid_schedstat;
+			break;
+#endif
+#ifdef CONFIG_CPUSETS
+		case PROC_TID_CPUSET:
+		case PROC_TGID_CPUSET:
+			inode->i_fop = &proc_cpuset_operations;
+			break;
+#endif
+		case PROC_TID_OOM_SCORE:
+		case PROC_TGID_OOM_SCORE:
+			inode->i_fop = &proc_info_file_operations;
+			ei->op.proc_read = proc_oom_score;
+			break;
+		case PROC_TID_OOM_ADJUST:
+		case PROC_TGID_OOM_ADJUST:
+			inode->i_fop = &proc_oom_adjust_operations;
+			break;
+#ifdef CONFIG_AUDITSYSCALL
+		case PROC_TID_LOGINUID:
+		case PROC_TGID_LOGINUID:
+			inode->i_fop = &proc_loginuid_operations;
+			break;
+#endif
+		default:
+			printk("procfs: impossible type (%d)",p->type);
+			iput(inode);
+			return ERR_PTR(-EINVAL);
+	}
+	dentry->d_op = &pid_dentry_operations;
+	d_add(dentry, inode);
+	return NULL;
+
+out:
+	return ERR_PTR(error);
+}
+
+static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+	return proc_pident_lookup(dir, dentry, tgid_base_stuff);
+}
+
+static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+	return proc_pident_lookup(dir, dentry, tid_base_stuff);
+}
+
+static struct file_operations proc_tgid_base_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tgid_base_readdir,
+};
+
+static struct file_operations proc_tid_base_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tid_base_readdir,
+};
+
+static struct inode_operations proc_tgid_base_inode_operations = {
+	.lookup		= proc_tgid_base_lookup,
+};
+
+static struct inode_operations proc_tid_base_inode_operations = {
+	.lookup		= proc_tid_base_lookup,
+};
+
+#ifdef CONFIG_SECURITY
+static int proc_tgid_attr_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tgid_attr_stuff,ARRAY_SIZE(tgid_attr_stuff));
+}
+
+static int proc_tid_attr_readdir(struct file * filp,
+			     void * dirent, filldir_t filldir)
+{
+	return proc_pident_readdir(filp,dirent,filldir,
+				   tid_attr_stuff,ARRAY_SIZE(tid_attr_stuff));
+}
+
+static struct file_operations proc_tgid_attr_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tgid_attr_readdir,
+};
+
+static struct file_operations proc_tid_attr_operations = {
+	.read		= generic_read_dir,
+	.readdir	= proc_tid_attr_readdir,
+};
+
+static struct dentry *proc_tgid_attr_lookup(struct inode *dir,
+				struct dentry *dentry, struct nameidata *nd)
+{
+	return proc_pident_lookup(dir, dentry, tgid_attr_stuff);
+}
+
+static struct dentry *proc_tid_attr_lookup(struct inode *dir,
+				struct dentry *dentry, struct nameidata *nd)
+{
+	return proc_pident_lookup(dir, dentry, tid_attr_stuff);
+}
+
+static struct inode_operations proc_tgid_attr_inode_operations = {
+	.lookup		= proc_tgid_attr_lookup,
+};
+
+static struct inode_operations proc_tid_attr_inode_operations = {
+	.lookup		= proc_tid_attr_lookup,
+};
+#endif
+
+/*
+ * /proc/self:
+ */
+static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
+			      int buflen)
+{
+	char tmp[30];
+	sprintf(tmp, "%d", current->tgid);
+	return vfs_readlink(dentry,buffer,buflen,tmp);
+}
+
+static int proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char tmp[30];
+	sprintf(tmp, "%d", current->tgid);
+	return vfs_follow_link(nd,tmp);
+}	
+
+static struct inode_operations proc_self_inode_operations = {
+	.readlink	= proc_self_readlink,
+	.follow_link	= proc_self_follow_link,
+};
+
+/**
+ * proc_pid_unhash -  Unhash /proc/<pid> entry from the dcache.
+ * @p: task that should be flushed.
+ *
+ * Drops the /proc/<pid> dcache entry from the hash chains.
+ *
+ * Dropping /proc/<pid> entries and detach_pid must be synchroneous,
+ * otherwise e.g. /proc/<pid>/exe might point to the wrong executable,
+ * if the pid value is immediately reused. This is enforced by
+ * - caller must acquire spin_lock(p->proc_lock)
+ * - must be called before detach_pid()
+ * - proc_pid_lookup acquires proc_lock, and checks that
+ *   the target is not dead by looking at the attach count
+ *   of PIDTYPE_PID.
+ */
+
+struct dentry *proc_pid_unhash(struct task_struct *p)
+{
+	struct dentry *proc_dentry;
+
+	proc_dentry = p->proc_dentry;
+	if (proc_dentry != NULL) {
+
+		spin_lock(&dcache_lock);
+		spin_lock(&proc_dentry->d_lock);
+		if (!d_unhashed(proc_dentry)) {
+			dget_locked(proc_dentry);
+			__d_drop(proc_dentry);
+			spin_unlock(&proc_dentry->d_lock);
+		} else {
+			spin_unlock(&proc_dentry->d_lock);
+			proc_dentry = NULL;
+		}
+		spin_unlock(&dcache_lock);
+	}
+	return proc_dentry;
+}
+
+/**
+ * proc_pid_flush - recover memory used by stale /proc/<pid>/x entries
+ * @proc_entry: directoy to prune.
+ *
+ * Shrink the /proc directory that was used by the just killed thread.
+ */
+	
+void proc_pid_flush(struct dentry *proc_dentry)
+{
+	might_sleep();
+	if(proc_dentry != NULL) {
+		shrink_dcache_parent(proc_dentry);
+		dput(proc_dentry);
+	}
+}
+
+/* SMP-safe */
+struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct task_struct *task;
+	struct inode *inode;
+	struct proc_inode *ei;
+	unsigned tgid;
+	int died;
+
+	if (dentry->d_name.len == 4 && !memcmp(dentry->d_name.name,"self",4)) {
+		inode = new_inode(dir->i_sb);
+		if (!inode)
+			return ERR_PTR(-ENOMEM);
+		ei = PROC_I(inode);
+		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_ino = fake_ino(0, PROC_TGID_INO);
+		ei->pde = NULL;
+		inode->i_mode = S_IFLNK|S_IRWXUGO;
+		inode->i_uid = inode->i_gid = 0;
+		inode->i_size = 64;
+		inode->i_op = &proc_self_inode_operations;
+		d_add(dentry, inode);
+		return NULL;
+	}
+	tgid = name_to_int(dentry);
+	if (tgid == ~0U)
+		goto out;
+
+	read_lock(&tasklist_lock);
+	task = find_task_by_pid(tgid);
+	if (task)
+		get_task_struct(task);
+	read_unlock(&tasklist_lock);
+	if (!task)
+		goto out;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
+
+
+	if (!inode) {
+		put_task_struct(task);
+		goto out;
+	}
+	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+	inode->i_op = &proc_tgid_base_inode_operations;
+	inode->i_fop = &proc_tgid_base_operations;
+	inode->i_nlink = 3;
+	inode->i_flags|=S_IMMUTABLE;
+
+	dentry->d_op = &pid_base_dentry_operations;
+
+	died = 0;
+	d_add(dentry, inode);
+	spin_lock(&task->proc_lock);
+	task->proc_dentry = dentry;
+	if (!pid_alive(task)) {
+		dentry = proc_pid_unhash(task);
+		died = 1;
+	}
+	spin_unlock(&task->proc_lock);
+
+	put_task_struct(task);
+	if (died) {
+		proc_pid_flush(dentry);
+		goto out;
+	}
+	return NULL;
+out:
+	return ERR_PTR(-ENOENT);
+}
+
+/* SMP-safe */
+static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct task_struct *task;
+	struct task_struct *leader = proc_task(dir);
+	struct inode *inode;
+	unsigned tid;
+
+	tid = name_to_int(dentry);
+	if (tid == ~0U)
+		goto out;
+
+	read_lock(&tasklist_lock);
+	task = find_task_by_pid(tid);
+	if (task)
+		get_task_struct(task);
+	read_unlock(&tasklist_lock);
+	if (!task)
+		goto out;
+	if (leader->tgid != task->tgid)
+		goto out_drop_task;
+
+	inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_INO);
+
+
+	if (!inode)
+		goto out_drop_task;
+	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+	inode->i_op = &proc_tid_base_inode_operations;
+	inode->i_fop = &proc_tid_base_operations;
+	inode->i_nlink = 3;
+	inode->i_flags|=S_IMMUTABLE;
+
+	dentry->d_op = &pid_base_dentry_operations;
+
+	d_add(dentry, inode);
+
+	put_task_struct(task);
+	return NULL;
+out_drop_task:
+	put_task_struct(task);
+out:
+	return ERR_PTR(-ENOENT);
+}
+
+#define PROC_NUMBUF 10
+#define PROC_MAXPIDS 20
+
+/*
+ * Get a few tgid's to return for filldir - we need to hold the
+ * tasklist lock while doing this, and we must release it before
+ * we actually do the filldir itself, so we use a temp buffer..
+ */
+static int get_tgid_list(int index, unsigned long version, unsigned int *tgids)
+{
+	struct task_struct *p;
+	int nr_tgids = 0;
+
+	index--;
+	read_lock(&tasklist_lock);
+	p = NULL;
+	if (version) {
+		p = find_task_by_pid(version);
+		if (p && !thread_group_leader(p))
+			p = NULL;
+	}
+
+	if (p)
+		index = 0;
+	else
+		p = next_task(&init_task);
+
+	for ( ; p != &init_task; p = next_task(p)) {
+		int tgid = p->pid;
+		if (!pid_alive(p))
+			continue;
+		if (--index >= 0)
+			continue;
+		tgids[nr_tgids] = tgid;
+		nr_tgids++;
+		if (nr_tgids >= PROC_MAXPIDS)
+			break;
+	}
+	read_unlock(&tasklist_lock);
+	return nr_tgids;
+}
+
+/*
+ * Get a few tid's to return for filldir - we need to hold the
+ * tasklist lock while doing this, and we must release it before
+ * we actually do the filldir itself, so we use a temp buffer..
+ */
+static int get_tid_list(int index, unsigned int *tids, struct inode *dir)
+{
+	struct task_struct *leader_task = proc_task(dir);
+	struct task_struct *task = leader_task;
+	int nr_tids = 0;
+
+	index -= 2;
+	read_lock(&tasklist_lock);
+	/*
+	 * The starting point task (leader_task) might be an already
+	 * unlinked task, which cannot be used to access the task-list
+	 * via next_thread().
+	 */
+	if (pid_alive(task)) do {
+		int tid = task->pid;
+
+		if (--index >= 0)
+			continue;
+		tids[nr_tids] = tid;
+		nr_tids++;
+		if (nr_tids >= PROC_MAXPIDS)
+			break;
+	} while ((task = next_thread(task)) != leader_task);
+	read_unlock(&tasklist_lock);
+	return nr_tids;
+}
+
+/* for the /proc/ directory itself, after non-process stuff has been done */
+int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	unsigned int tgid_array[PROC_MAXPIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+	unsigned int nr_tgids, i;
+	int next_tgid;
+
+	if (!nr) {
+		ino_t ino = fake_ino(0,PROC_TGID_INO);
+		if (filldir(dirent, "self", 4, filp->f_pos, ino, DT_LNK) < 0)
+			return 0;
+		filp->f_pos++;
+		nr++;
+	}
+
+	/* f_version caches the tgid value that the last readdir call couldn't
+	 * return. lseek aka telldir automagically resets f_version to 0.
+	 */
+	next_tgid = filp->f_version;
+	filp->f_version = 0;
+	for (;;) {
+		nr_tgids = get_tgid_list(nr, next_tgid, tgid_array);
+		if (!nr_tgids) {
+			/* no more entries ! */
+			break;
+		}
+		next_tgid = 0;
+
+		/* do not use the last found pid, reserve it for next_tgid */
+		if (nr_tgids == PROC_MAXPIDS) {
+			nr_tgids--;
+			next_tgid = tgid_array[nr_tgids];
+		}
+
+		for (i=0;i<nr_tgids;i++) {
+			int tgid = tgid_array[i];
+			ino_t ino = fake_ino(tgid,PROC_TGID_INO);
+			unsigned long j = PROC_NUMBUF;
+
+			do
+				buf[--j] = '0' + (tgid % 10);
+			while ((tgid /= 10) != 0);
+
+			if (filldir(dirent, buf+j, PROC_NUMBUF-j, filp->f_pos, ino, DT_DIR) < 0) {
+				/* returning this tgid failed, save it as the first
+				 * pid for the next readir call */
+				filp->f_version = tgid_array[i];
+				goto out;
+			}
+			filp->f_pos++;
+			nr++;
+		}
+	}
+out:
+	return 0;
+}
+
+/* for the /proc/TGID/task/ directories */
+static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	unsigned int tid_array[PROC_MAXPIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr_tids, i;
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	int retval = -ENOENT;
+	ino_t ino;
+	unsigned long pos = filp->f_pos;  /* avoiding "long long" filp->f_pos */
+
+	if (!pid_alive(proc_task(inode)))
+		goto out;
+	retval = 0;
+
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	}
+
+	nr_tids = get_tid_list(pos, tid_array, inode);
+
+	for (i = 0; i < nr_tids; i++) {
+		unsigned long j = PROC_NUMBUF;
+		int tid = tid_array[i];
+
+		ino = fake_ino(tid,PROC_TID_INO);
+
+		do
+			buf[--j] = '0' + (tid % 10);
+		while ((tid /= 10) != 0);
+
+		if (filldir(dirent, buf+j, PROC_NUMBUF-j, pos, ino, DT_DIR) < 0)
+			break;
+		pos++;
+	}
+out:
+	filp->f_pos = pos;
+	return retval;
+}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
new file mode 100644
index 0000000..6c6315d
--- /dev/null
+++ b/fs/proc/generic.c
@@ -0,0 +1,705 @@
+/*
+ * proc/fs/generic.c --- generic routines for the proc-fs
+ *
+ * This file contains generic proc-fs routines for handling
+ * directories and files.
+ * 
+ * Copyright (C) 1991, 1992 Linus Torvalds.
+ * Copyright (C) 1997 Theodore Ts'o
+ */
+
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/namei.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+
+static ssize_t proc_file_read(struct file *file, char __user *buf,
+			      size_t nbytes, loff_t *ppos);
+static ssize_t proc_file_write(struct file *file, const char __user *buffer,
+			       size_t count, loff_t *ppos);
+static loff_t proc_file_lseek(struct file *, loff_t, int);
+
+int proc_match(int len, const char *name, struct proc_dir_entry *de)
+{
+	if (de->namelen != len)
+		return 0;
+	return !memcmp(name, de->name, len);
+}
+
+static struct file_operations proc_file_operations = {
+	.llseek		= proc_file_lseek,
+	.read		= proc_file_read,
+	.write		= proc_file_write,
+};
+
+/* buffer size is one page but our output routines use some slack for overruns */
+#define PROC_BLOCK_SIZE	(PAGE_SIZE - 1024)
+
+static ssize_t
+proc_file_read(struct file *file, char __user *buf, size_t nbytes,
+	       loff_t *ppos)
+{
+	struct inode * inode = file->f_dentry->d_inode;
+	char 	*page;
+	ssize_t	retval=0;
+	int	eof=0;
+	ssize_t	n, count;
+	char	*start;
+	struct proc_dir_entry * dp;
+
+	dp = PDE(inode);
+	if (!(page = (char*) __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	while ((nbytes > 0) && !eof) {
+		count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
+
+		start = NULL;
+		if (dp->get_info) {
+			/* Handle old net routines */
+			n = dp->get_info(page, &start, *ppos, count);
+			if (n < count)
+				eof = 1;
+		} else if (dp->read_proc) {
+			/*
+			 * How to be a proc read function
+			 * ------------------------------
+			 * Prototype:
+			 *    int f(char *buffer, char **start, off_t offset,
+			 *          int count, int *peof, void *dat)
+			 *
+			 * Assume that the buffer is "count" bytes in size.
+			 *
+			 * If you know you have supplied all the data you
+			 * have, set *peof.
+			 *
+			 * You have three ways to return data:
+			 * 0) Leave *start = NULL.  (This is the default.)
+			 *    Put the data of the requested offset at that
+			 *    offset within the buffer.  Return the number (n)
+			 *    of bytes there are from the beginning of the
+			 *    buffer up to the last byte of data.  If the
+			 *    number of supplied bytes (= n - offset) is 
+			 *    greater than zero and you didn't signal eof
+			 *    and the reader is prepared to take more data
+			 *    you will be called again with the requested
+			 *    offset advanced by the number of bytes 
+			 *    absorbed.  This interface is useful for files
+			 *    no larger than the buffer.
+			 * 1) Set *start = an unsigned long value less than
+			 *    the buffer address but greater than zero.
+			 *    Put the data of the requested offset at the
+			 *    beginning of the buffer.  Return the number of
+			 *    bytes of data placed there.  If this number is
+			 *    greater than zero and you didn't signal eof
+			 *    and the reader is prepared to take more data
+			 *    you will be called again with the requested
+			 *    offset advanced by *start.  This interface is
+			 *    useful when you have a large file consisting
+			 *    of a series of blocks which you want to count
+			 *    and return as wholes.
+			 *    (Hack by Paul.Russell@rustcorp.com.au)
+			 * 2) Set *start = an address within the buffer.
+			 *    Put the data of the requested offset at *start.
+			 *    Return the number of bytes of data placed there.
+			 *    If this number is greater than zero and you
+			 *    didn't signal eof and the reader is prepared to
+			 *    take more data you will be called again with the
+			 *    requested offset advanced by the number of bytes
+			 *    absorbed.
+			 */
+			n = dp->read_proc(page, &start, *ppos,
+					  count, &eof, dp->data);
+		} else
+			break;
+
+		if (n == 0)   /* end of file */
+			break;
+		if (n < 0) {  /* error */
+			if (retval == 0)
+				retval = n;
+			break;
+		}
+
+		if (start == NULL) {
+			if (n > PAGE_SIZE) {
+				printk(KERN_ERR
+				       "proc_file_read: Apparent buffer overflow!\n");
+				n = PAGE_SIZE;
+			}
+			n -= *ppos;
+			if (n <= 0)
+				break;
+			if (n > count)
+				n = count;
+			start = page + *ppos;
+		} else if (start < page) {
+			if (n > PAGE_SIZE) {
+				printk(KERN_ERR
+				       "proc_file_read: Apparent buffer overflow!\n");
+				n = PAGE_SIZE;
+			}
+			if (n > count) {
+				/*
+				 * Don't reduce n because doing so might
+				 * cut off part of a data block.
+				 */
+				printk(KERN_WARNING
+				       "proc_file_read: Read count exceeded\n");
+			}
+		} else /* start >= page */ {
+			unsigned long startoff = (unsigned long)(start - page);
+			if (n > (PAGE_SIZE - startoff)) {
+				printk(KERN_ERR
+				       "proc_file_read: Apparent buffer overflow!\n");
+				n = PAGE_SIZE - startoff;
+			}
+			if (n > count)
+				n = count;
+		}
+		
+ 		n -= copy_to_user(buf, start < page ? page : start, n);
+		if (n == 0) {
+			if (retval == 0)
+				retval = -EFAULT;
+			break;
+		}
+
+		*ppos += start < page ? (unsigned long)start : n;
+		nbytes -= n;
+		buf += n;
+		retval += n;
+	}
+	free_page((unsigned long) page);
+	return retval;
+}
+
+static ssize_t
+proc_file_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct proc_dir_entry * dp;
+	
+	dp = PDE(inode);
+
+	if (!dp->write_proc)
+		return -EIO;
+
+	/* FIXME: does this routine need ppos?  probably... */
+	return dp->write_proc(file, buffer, count, dp->data);
+}
+
+
+static loff_t
+proc_file_lseek(struct file *file, loff_t offset, int orig)
+{
+    lock_kernel();
+
+    switch (orig) {
+    case 0:
+	if (offset < 0)
+	    goto out;
+	file->f_pos = offset;
+	unlock_kernel();
+	return(file->f_pos);
+    case 1:
+	if (offset + file->f_pos < 0)
+	    goto out;
+	file->f_pos += offset;
+	unlock_kernel();
+	return(file->f_pos);
+    case 2:
+	goto out;
+    default:
+	goto out;
+    }
+
+out:
+    unlock_kernel();
+    return -EINVAL;
+}
+
+static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct proc_dir_entry *de = PDE(inode);
+	int error;
+
+	error = inode_change_ok(inode, iattr);
+	if (error)
+		goto out;
+
+	error = inode_setattr(inode, iattr);
+	if (error)
+		goto out;
+	
+	de->uid = inode->i_uid;
+	de->gid = inode->i_gid;
+	de->mode = inode->i_mode;
+out:
+	return error;
+}
+
+static struct inode_operations proc_file_inode_operations = {
+	.setattr	= proc_notify_change,
+};
+
+/*
+ * This function parses a name such as "tty/driver/serial", and
+ * returns the struct proc_dir_entry for "/proc/tty/driver", and
+ * returns "serial" in residual.
+ */
+static int xlate_proc_name(const char *name,
+			   struct proc_dir_entry **ret, const char **residual)
+{
+	const char     		*cp = name, *next;
+	struct proc_dir_entry	*de;
+	int			len;
+
+	de = &proc_root;
+	while (1) {
+		next = strchr(cp, '/');
+		if (!next)
+			break;
+
+		len = next - cp;
+		for (de = de->subdir; de ; de = de->next) {
+			if (proc_match(len, cp, de))
+				break;
+		}
+		if (!de)
+			return -ENOENT;
+		cp += len + 1;
+	}
+	*residual = cp;
+	*ret = de;
+	return 0;
+}
+
+static DEFINE_IDR(proc_inum_idr);
+static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
+
+#define PROC_DYNAMIC_FIRST 0xF0000000UL
+
+/*
+ * Return an inode number between PROC_DYNAMIC_FIRST and
+ * 0xffffffff, or zero on failure.
+ */
+static unsigned int get_inode_number(void)
+{
+	int i, inum = 0;
+	int error;
+
+retry:
+	if (idr_pre_get(&proc_inum_idr, GFP_KERNEL) == 0)
+		return 0;
+
+	spin_lock(&proc_inum_lock);
+	error = idr_get_new(&proc_inum_idr, NULL, &i);
+	spin_unlock(&proc_inum_lock);
+	if (error == -EAGAIN)
+		goto retry;
+	else if (error)
+		return 0;
+
+	inum = (i & MAX_ID_MASK) + PROC_DYNAMIC_FIRST;
+
+	/* inum will never be more than 0xf0ffffff, so no check
+	 * for overflow.
+	 */
+
+	return inum;
+}
+
+static void release_inode_number(unsigned int inum)
+{
+	int id = (inum - PROC_DYNAMIC_FIRST) | ~MAX_ID_MASK;
+
+	spin_lock(&proc_inum_lock);
+	idr_remove(&proc_inum_idr, id);
+	spin_unlock(&proc_inum_lock);
+}
+
+static int proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	nd_set_link(nd, PDE(dentry->d_inode)->data);
+	return 0;
+}
+
+static struct inode_operations proc_link_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= proc_follow_link,
+};
+
+/*
+ * As some entries in /proc are volatile, we want to 
+ * get rid of unused dentries.  This could be made 
+ * smarter: we could keep a "volatile" flag in the 
+ * inode to indicate which ones to keep.
+ */
+static int proc_delete_dentry(struct dentry * dentry)
+{
+	return 1;
+}
+
+static struct dentry_operations proc_dentry_operations =
+{
+	.d_delete	= proc_delete_dentry,
+};
+
+/*
+ * Don't create negative dentries here, return -ENOENT by hand
+ * instead.
+ */
+struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	struct proc_dir_entry * de;
+	int error = -ENOENT;
+
+	lock_kernel();
+	de = PDE(dir);
+	if (de) {
+		for (de = de->subdir; de ; de = de->next) {
+			if (de->namelen != dentry->d_name.len)
+				continue;
+			if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
+				unsigned int ino = de->low_ino;
+
+				error = -EINVAL;
+				inode = proc_get_inode(dir->i_sb, ino, de);
+				break;
+			}
+		}
+	}
+	unlock_kernel();
+
+	if (inode) {
+		dentry->d_op = &proc_dentry_operations;
+		d_add(dentry, inode);
+		return NULL;
+	}
+	return ERR_PTR(error);
+}
+
+/*
+ * This returns non-zero if at EOF, so that the /proc
+ * root directory can use this and check if it should
+ * continue with the <pid> entries..
+ *
+ * Note that the VFS-layer doesn't care about the return
+ * value of the readdir() call, as long as it's non-negative
+ * for success..
+ */
+int proc_readdir(struct file * filp,
+	void * dirent, filldir_t filldir)
+{
+	struct proc_dir_entry * de;
+	unsigned int ino;
+	int i;
+	struct inode *inode = filp->f_dentry->d_inode;
+	int ret = 0;
+
+	lock_kernel();
+
+	ino = inode->i_ino;
+	de = PDE(inode);
+	if (!de) {
+		ret = -EINVAL;
+		goto out;
+	}
+	i = filp->f_pos;
+	switch (i) {
+		case 0:
+			if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+				goto out;
+			i++;
+			filp->f_pos++;
+			/* fall through */
+		case 1:
+			if (filldir(dirent, "..", 2, i,
+				    parent_ino(filp->f_dentry),
+				    DT_DIR) < 0)
+				goto out;
+			i++;
+			filp->f_pos++;
+			/* fall through */
+		default:
+			de = de->subdir;
+			i -= 2;
+			for (;;) {
+				if (!de) {
+					ret = 1;
+					goto out;
+				}
+				if (!i)
+					break;
+				de = de->next;
+				i--;
+			}
+
+			do {
+				if (filldir(dirent, de->name, de->namelen, filp->f_pos,
+					    de->low_ino, de->mode >> 12) < 0)
+					goto out;
+				filp->f_pos++;
+				de = de->next;
+			} while (de);
+	}
+	ret = 1;
+out:	unlock_kernel();
+	return ret;	
+}
+
+/*
+ * These are the generic /proc directory operations. They
+ * use the in-memory "struct proc_dir_entry" tree to parse
+ * the /proc directory.
+ */
+static struct file_operations proc_dir_operations = {
+	.read			= generic_read_dir,
+	.readdir		= proc_readdir,
+};
+
+/*
+ * proc directories can do almost nothing..
+ */
+static struct inode_operations proc_dir_inode_operations = {
+	.lookup		= proc_lookup,
+	.setattr	= proc_notify_change,
+};
+
+static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
+{
+	unsigned int i;
+	
+	i = get_inode_number();
+	if (i == 0)
+		return -EAGAIN;
+	dp->low_ino = i;
+	dp->next = dir->subdir;
+	dp->parent = dir;
+	dir->subdir = dp;
+	if (S_ISDIR(dp->mode)) {
+		if (dp->proc_iops == NULL) {
+			dp->proc_fops = &proc_dir_operations;
+			dp->proc_iops = &proc_dir_inode_operations;
+		}
+		dir->nlink++;
+	} else if (S_ISLNK(dp->mode)) {
+		if (dp->proc_iops == NULL)
+			dp->proc_iops = &proc_link_inode_operations;
+	} else if (S_ISREG(dp->mode)) {
+		if (dp->proc_fops == NULL)
+			dp->proc_fops = &proc_file_operations;
+		if (dp->proc_iops == NULL)
+			dp->proc_iops = &proc_file_inode_operations;
+	}
+	return 0;
+}
+
+/*
+ * Kill an inode that got unregistered..
+ */
+static void proc_kill_inodes(struct proc_dir_entry *de)
+{
+	struct list_head *p;
+	struct super_block *sb = proc_mnt->mnt_sb;
+
+	/*
+	 * Actually it's a partial revoke().
+	 */
+	file_list_lock();
+	list_for_each(p, &sb->s_files) {
+		struct file * filp = list_entry(p, struct file, f_list);
+		struct dentry * dentry = filp->f_dentry;
+		struct inode * inode;
+		struct file_operations *fops;
+
+		if (dentry->d_op != &proc_dentry_operations)
+			continue;
+		inode = dentry->d_inode;
+		if (PDE(inode) != de)
+			continue;
+		fops = filp->f_op;
+		filp->f_op = NULL;
+		fops_put(fops);
+	}
+	file_list_unlock();
+}
+
+static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
+					  const char *name,
+					  mode_t mode,
+					  nlink_t nlink)
+{
+	struct proc_dir_entry *ent = NULL;
+	const char *fn = name;
+	int len;
+
+	/* make sure name is valid */
+	if (!name || !strlen(name)) goto out;
+
+	if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
+		goto out;
+
+	/* At this point there must not be any '/' characters beyond *fn */
+	if (strchr(fn, '/'))
+		goto out;
+
+	len = strlen(fn);
+
+	ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
+	if (!ent) goto out;
+
+	memset(ent, 0, sizeof(struct proc_dir_entry));
+	memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
+	ent->name = ((char *) ent) + sizeof(*ent);
+	ent->namelen = len;
+	ent->mode = mode;
+	ent->nlink = nlink;
+ out:
+	return ent;
+}
+
+struct proc_dir_entry *proc_symlink(const char *name,
+		struct proc_dir_entry *parent, const char *dest)
+{
+	struct proc_dir_entry *ent;
+
+	ent = proc_create(&parent,name,
+			  (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
+
+	if (ent) {
+		ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
+		if (ent->data) {
+			strcpy((char*)ent->data,dest);
+			if (proc_register(parent, ent) < 0) {
+				kfree(ent->data);
+				kfree(ent);
+				ent = NULL;
+			}
+		} else {
+			kfree(ent);
+			ent = NULL;
+		}
+	}
+	return ent;
+}
+
+struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
+		struct proc_dir_entry *parent)
+{
+	struct proc_dir_entry *ent;
+
+	ent = proc_create(&parent, name, S_IFDIR | mode, 2);
+	if (ent) {
+		ent->proc_fops = &proc_dir_operations;
+		ent->proc_iops = &proc_dir_inode_operations;
+
+		if (proc_register(parent, ent) < 0) {
+			kfree(ent);
+			ent = NULL;
+		}
+	}
+	return ent;
+}
+
+struct proc_dir_entry *proc_mkdir(const char *name,
+		struct proc_dir_entry *parent)
+{
+	return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
+}
+
+struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+					 struct proc_dir_entry *parent)
+{
+	struct proc_dir_entry *ent;
+	nlink_t nlink;
+
+	if (S_ISDIR(mode)) {
+		if ((mode & S_IALLUGO) == 0)
+			mode |= S_IRUGO | S_IXUGO;
+		nlink = 2;
+	} else {
+		if ((mode & S_IFMT) == 0)
+			mode |= S_IFREG;
+		if ((mode & S_IALLUGO) == 0)
+			mode |= S_IRUGO;
+		nlink = 1;
+	}
+
+	ent = proc_create(&parent,name,mode,nlink);
+	if (ent) {
+		if (S_ISDIR(mode)) {
+			ent->proc_fops = &proc_dir_operations;
+			ent->proc_iops = &proc_dir_inode_operations;
+		}
+		if (proc_register(parent, ent) < 0) {
+			kfree(ent);
+			ent = NULL;
+		}
+	}
+	return ent;
+}
+
+void free_proc_entry(struct proc_dir_entry *de)
+{
+	unsigned int ino = de->low_ino;
+
+	if (ino < PROC_DYNAMIC_FIRST)
+		return;
+
+	release_inode_number(ino);
+
+	if (S_ISLNK(de->mode) && de->data)
+		kfree(de->data);
+	kfree(de);
+}
+
+/*
+ * Remove a /proc entry and free it if it's not currently in use.
+ * If it is in use, we set the 'deleted' flag.
+ */
+void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
+{
+	struct proc_dir_entry **p;
+	struct proc_dir_entry *de;
+	const char *fn = name;
+	int len;
+
+	if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
+		goto out;
+	len = strlen(fn);
+	for (p = &parent->subdir; *p; p=&(*p)->next ) {
+		if (!proc_match(len, fn, *p))
+			continue;
+		de = *p;
+		*p = de->next;
+		de->next = NULL;
+		if (S_ISDIR(de->mode))
+			parent->nlink--;
+		proc_kill_inodes(de);
+		de->nlink = 0;
+		WARN_ON(de->subdir);
+		if (!atomic_read(&de->count))
+			free_proc_entry(de);
+		else {
+			de->deleted = 1;
+			printk("remove_proc_entry: %s/%s busy, count=%d\n",
+				parent->name, de->name, atomic_read(&de->count));
+		}
+		break;
+	}
+out:
+	return;
+}
diff --git a/fs/proc/inode-alloc.txt b/fs/proc/inode-alloc.txt
new file mode 100644
index 0000000..77212f9
--- /dev/null
+++ b/fs/proc/inode-alloc.txt
@@ -0,0 +1,14 @@
+Current inode allocations in the proc-fs (hex-numbers):
+
+  00000000		reserved
+  00000001-00000fff	static entries	(goners)
+       001		root-ino
+
+  00001000-00001fff	unused
+  0001xxxx-7fffxxxx	pid-dir entries for pid 1-7fff
+  80000000-efffffff	unused
+  f0000000-ffffffff	dynamic entries
+
+Goal:
+	a) once we'll split the thing into several virtual filesystems we
+	will get rid of magical ranges (and this file, BTW).
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
new file mode 100644
index 0000000..133c286
--- /dev/null
+++ b/fs/proc/inode.c
@@ -0,0 +1,218 @@
+/*
+ *  linux/fs/proc/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/file.h>
+#include <linux/limits.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+extern void free_proc_entry(struct proc_dir_entry *);
+
+static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de)
+{
+	if (de)
+		atomic_inc(&de->count);
+	return de;
+}
+
+/*
+ * Decrements the use count and checks for deferred deletion.
+ */
+static void de_put(struct proc_dir_entry *de)
+{
+	if (de) {	
+		lock_kernel();		
+		if (!atomic_read(&de->count)) {
+			printk("de_put: entry %s already free!\n", de->name);
+			unlock_kernel();
+			return;
+		}
+
+		if (atomic_dec_and_test(&de->count)) {
+			if (de->deleted) {
+				printk("de_put: deferred delete of %s\n",
+					de->name);
+				free_proc_entry(de);
+			}
+		}		
+		unlock_kernel();
+	}
+}
+
+/*
+ * Decrement the use count of the proc_dir_entry.
+ */
+static void proc_delete_inode(struct inode *inode)
+{
+	struct proc_dir_entry *de;
+	struct task_struct *tsk;
+
+	/* Let go of any associated process */
+	tsk = PROC_I(inode)->task;
+	if (tsk)
+		put_task_struct(tsk);
+
+	/* Let go of any associated proc directory entry */
+	de = PROC_I(inode)->pde;
+	if (de) {
+		if (de->owner)
+			module_put(de->owner);
+		de_put(de);
+	}
+	clear_inode(inode);
+}
+
+struct vfsmount *proc_mnt;
+
+static void proc_read_inode(struct inode * inode)
+{
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+}
+
+static kmem_cache_t * proc_inode_cachep;
+
+static struct inode *proc_alloc_inode(struct super_block *sb)
+{
+	struct proc_inode *ei;
+	struct inode *inode;
+
+	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	ei->task = NULL;
+	ei->type = 0;
+	ei->op.proc_get_link = NULL;
+	ei->pde = NULL;
+	inode = &ei->vfs_inode;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	return inode;
+}
+
+static void proc_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct proc_inode *ei = (struct proc_inode *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+int __init proc_init_inodecache(void)
+{
+	proc_inode_cachep = kmem_cache_create("proc_inode_cache",
+					     sizeof(struct proc_inode),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (proc_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static int proc_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+static struct super_operations proc_sops = { 
+	.alloc_inode	= proc_alloc_inode,
+	.destroy_inode	= proc_destroy_inode,
+	.read_inode	= proc_read_inode,
+	.drop_inode	= generic_delete_inode,
+	.delete_inode	= proc_delete_inode,
+	.statfs		= simple_statfs,
+	.remount_fs	= proc_remount,
+};
+
+struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
+				struct proc_dir_entry *de)
+{
+	struct inode * inode;
+
+	/*
+	 * Increment the use count so the dir entry can't disappear.
+	 */
+	de_get(de);
+
+	WARN_ON(de && de->deleted);
+
+	inode = iget(sb, ino);
+	if (!inode)
+		goto out_fail;
+	
+	PROC_I(inode)->pde = de;
+	if (de) {
+		if (de->mode) {
+			inode->i_mode = de->mode;
+			inode->i_uid = de->uid;
+			inode->i_gid = de->gid;
+		}
+		if (de->size)
+			inode->i_size = de->size;
+		if (de->nlink)
+			inode->i_nlink = de->nlink;
+		if (!try_module_get(de->owner))
+			goto out_fail;
+		if (de->proc_iops)
+			inode->i_op = de->proc_iops;
+		if (de->proc_fops)
+			inode->i_fop = de->proc_fops;
+	}
+
+out:
+	return inode;
+
+out_fail:
+	de_put(de);
+	goto out;
+}			
+
+int proc_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct inode * root_inode;
+
+	s->s_flags |= MS_NODIRATIME;
+	s->s_blocksize = 1024;
+	s->s_blocksize_bits = 10;
+	s->s_magic = PROC_SUPER_MAGIC;
+	s->s_op = &proc_sops;
+	s->s_time_gran = 1;
+	
+	root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
+	if (!root_inode)
+		goto out_no_root;
+	/*
+	 * Fixup the root inode's nlink value
+	 */
+	root_inode->i_nlink += nr_processes();
+	root_inode->i_uid = 0;
+	root_inode->i_gid = 0;
+	s->s_root = d_alloc_root(root_inode);
+	if (!s->s_root)
+		goto out_no_root;
+	return 0;
+
+out_no_root:
+	printk("proc_read_super: get root inode failed\n");
+	iput(root_inode);
+	return -ENOMEM;
+}
+MODULE_LICENSE("GPL");
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
new file mode 100644
index 0000000..3e55198
--- /dev/null
+++ b/fs/proc/internal.h
@@ -0,0 +1,48 @@
+/* internal.h: internal procfs definitions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/proc_fs.h>
+
+struct vmalloc_info {
+	unsigned long	used;
+	unsigned long	largest_chunk;
+};
+
+#ifdef CONFIG_MMU
+#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+extern void get_vmalloc_info(struct vmalloc_info *vmi);
+#else
+
+#define VMALLOC_TOTAL 0UL
+#define get_vmalloc_info(vmi)			\
+do {						\
+	(vmi)->used = 0;			\
+	(vmi)->largest_chunk = 0;		\
+} while(0)
+
+#endif
+
+extern void create_seq_entry(char *name, mode_t mode, struct file_operations *f);
+extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **);
+extern int proc_tid_stat(struct task_struct *,  char *);
+extern int proc_tgid_stat(struct task_struct *, char *);
+extern int proc_pid_status(struct task_struct *, char *);
+extern int proc_pid_statm(struct task_struct *, char *);
+
+static inline struct task_struct *proc_task(struct inode *inode)
+{
+	return PROC_I(inode)->task;
+}
+
+static inline int proc_type(struct inode *inode)
+{
+	return PROC_I(inode)->type;
+}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
new file mode 100644
index 0000000..1c7da98
--- /dev/null
+++ b/fs/proc/kcore.c
@@ -0,0 +1,404 @@
+/*
+ *	fs/proc/kcore.c kernel ELF core dumper
+ *
+ *	Modelled on fs/exec.c:aout_core_dump()
+ *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
+ *	ELF version written by David Howells <David.Howells@nexor.co.uk>
+ *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
+ *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
+ *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+
+static int open_kcore(struct inode * inode, struct file * filp)
+{
+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
+
+struct file_operations proc_kcore_operations = {
+	.read		= read_kcore,
+	.open		= open_kcore,
+};
+
+#ifndef kc_vaddr_to_offset
+#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
+#endif
+#ifndef	kc_offset_to_vaddr
+#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
+#endif
+
+#define roundup(x, y)  ((((x)+((y)-1))/(y))*(y))
+
+/* An ELF note in memory */
+struct memelfnote
+{
+	const char *name;
+	int type;
+	unsigned int datasz;
+	void *data;
+};
+
+static struct kcore_list *kclist;
+static DEFINE_RWLOCK(kclist_lock);
+
+void
+kclist_add(struct kcore_list *new, void *addr, size_t size)
+{
+	new->addr = (unsigned long)addr;
+	new->size = size;
+
+	write_lock(&kclist_lock);
+	new->next = kclist;
+	kclist = new;
+	write_unlock(&kclist_lock);
+}
+
+static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
+{
+	size_t try, size;
+	struct kcore_list *m;
+
+	*nphdr = 1; /* PT_NOTE */
+	size = 0;
+
+	for (m=kclist; m; m=m->next) {
+		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
+		if (try > size)
+			size = try;
+		*nphdr = *nphdr + 1;
+	}
+	*elf_buflen =	sizeof(struct elfhdr) + 
+			(*nphdr + 2)*sizeof(struct elf_phdr) + 
+			3 * (sizeof(struct elf_note) + 4) +
+			sizeof(struct elf_prstatus) +
+			sizeof(struct elf_prpsinfo) +
+			sizeof(struct task_struct);
+	*elf_buflen = PAGE_ALIGN(*elf_buflen);
+	return size + *elf_buflen;
+}
+
+
+/*****************************************************************************/
+/*
+ * determine size of ELF note
+ */
+static int notesize(struct memelfnote *en)
+{
+	int sz;
+
+	sz = sizeof(struct elf_note);
+	sz += roundup(strlen(en->name), 4);
+	sz += roundup(en->datasz, 4);
+
+	return sz;
+} /* end notesize() */
+
+/*****************************************************************************/
+/*
+ * store a note in the header buffer
+ */
+static char *storenote(struct memelfnote *men, char *bufp)
+{
+	struct elf_note en;
+
+#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
+
+	en.n_namesz = strlen(men->name);
+	en.n_descsz = men->datasz;
+	en.n_type = men->type;
+
+	DUMP_WRITE(&en, sizeof(en));
+	DUMP_WRITE(men->name, en.n_namesz);
+
+	/* XXX - cast from long long to long to avoid need for libgcc.a */
+	bufp = (char*) roundup((unsigned long)bufp,4);
+	DUMP_WRITE(men->data, men->datasz);
+	bufp = (char*) roundup((unsigned long)bufp,4);
+
+#undef DUMP_WRITE
+
+	return bufp;
+} /* end storenote() */
+
+/*
+ * store an ELF coredump header in the supplied buffer
+ * nphdr is the number of elf_phdr to insert
+ */
+static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
+{
+	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
+	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
+	struct elf_phdr *nhdr, *phdr;
+	struct elfhdr *elf;
+	struct memelfnote notes[3];
+	off_t offset = 0;
+	struct kcore_list *m;
+
+	/* setup ELF header */
+	elf = (struct elfhdr *) bufp;
+	bufp += sizeof(struct elfhdr);
+	offset += sizeof(struct elfhdr);
+	memcpy(elf->e_ident, ELFMAG, SELFMAG);
+	elf->e_ident[EI_CLASS]	= ELF_CLASS;
+	elf->e_ident[EI_DATA]	= ELF_DATA;
+	elf->e_ident[EI_VERSION]= EV_CURRENT;
+	elf->e_ident[EI_OSABI] = ELF_OSABI;
+	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
+	elf->e_type	= ET_CORE;
+	elf->e_machine	= ELF_ARCH;
+	elf->e_version	= EV_CURRENT;
+	elf->e_entry	= 0;
+	elf->e_phoff	= sizeof(struct elfhdr);
+	elf->e_shoff	= 0;
+#if defined(CONFIG_H8300)
+	elf->e_flags	= ELF_FLAGS;
+#else
+	elf->e_flags	= 0;
+#endif
+	elf->e_ehsize	= sizeof(struct elfhdr);
+	elf->e_phentsize= sizeof(struct elf_phdr);
+	elf->e_phnum	= nphdr;
+	elf->e_shentsize= 0;
+	elf->e_shnum	= 0;
+	elf->e_shstrndx	= 0;
+
+	/* setup ELF PT_NOTE program header */
+	nhdr = (struct elf_phdr *) bufp;
+	bufp += sizeof(struct elf_phdr);
+	offset += sizeof(struct elf_phdr);
+	nhdr->p_type	= PT_NOTE;
+	nhdr->p_offset	= 0;
+	nhdr->p_vaddr	= 0;
+	nhdr->p_paddr	= 0;
+	nhdr->p_filesz	= 0;
+	nhdr->p_memsz	= 0;
+	nhdr->p_flags	= 0;
+	nhdr->p_align	= 0;
+
+	/* setup ELF PT_LOAD program header for every area */
+	for (m=kclist; m; m=m->next) {
+		phdr = (struct elf_phdr *) bufp;
+		bufp += sizeof(struct elf_phdr);
+		offset += sizeof(struct elf_phdr);
+
+		phdr->p_type	= PT_LOAD;
+		phdr->p_flags	= PF_R|PF_W|PF_X;
+		phdr->p_offset	= kc_vaddr_to_offset(m->addr) + dataoff;
+		phdr->p_vaddr	= (size_t)m->addr;
+		phdr->p_paddr	= 0;
+		phdr->p_filesz	= phdr->p_memsz	= m->size;
+		phdr->p_align	= PAGE_SIZE;
+	}
+
+	/*
+	 * Set up the notes in similar form to SVR4 core dumps made
+	 * with info from their /proc.
+	 */
+	nhdr->p_offset	= offset;
+
+	/* set up the process status */
+	notes[0].name = "CORE";
+	notes[0].type = NT_PRSTATUS;
+	notes[0].datasz = sizeof(struct elf_prstatus);
+	notes[0].data = &prstatus;
+
+	memset(&prstatus, 0, sizeof(struct elf_prstatus));
+
+	nhdr->p_filesz	= notesize(&notes[0]);
+	bufp = storenote(&notes[0], bufp);
+
+	/* set up the process info */
+	notes[1].name	= "CORE";
+	notes[1].type	= NT_PRPSINFO;
+	notes[1].datasz	= sizeof(struct elf_prpsinfo);
+	notes[1].data	= &prpsinfo;
+
+	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
+	prpsinfo.pr_state	= 0;
+	prpsinfo.pr_sname	= 'R';
+	prpsinfo.pr_zomb	= 0;
+
+	strcpy(prpsinfo.pr_fname, "vmlinux");
+	strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
+
+	nhdr->p_filesz	+= notesize(&notes[1]);
+	bufp = storenote(&notes[1], bufp);
+
+	/* set up the task structure */
+	notes[2].name	= "CORE";
+	notes[2].type	= NT_TASKSTRUCT;
+	notes[2].datasz	= sizeof(struct task_struct);
+	notes[2].data	= current;
+
+	nhdr->p_filesz	+= notesize(&notes[2]);
+	bufp = storenote(&notes[2], bufp);
+
+} /* end elf_kcore_store_hdr() */
+
+/*****************************************************************************/
+/*
+ * read from the ELF header and then kernel memory
+ */
+static ssize_t
+read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+{
+	ssize_t acc = 0;
+	size_t size, tsz;
+	size_t elf_buflen;
+	int nphdr;
+	unsigned long start;
+
+	read_lock(&kclist_lock);
+	proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
+	if (buflen == 0 || *fpos >= size) {
+		read_unlock(&kclist_lock);
+		return 0;
+	}
+
+	/* trim buflen to not go beyond EOF */
+	if (buflen > size - *fpos)
+		buflen = size - *fpos;
+
+	/* construct an ELF core header if we'll need some of it */
+	if (*fpos < elf_buflen) {
+		char * elf_buf;
+
+		tsz = elf_buflen - *fpos;
+		if (buflen < tsz)
+			tsz = buflen;
+		elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
+		if (!elf_buf) {
+			read_unlock(&kclist_lock);
+			return -ENOMEM;
+		}
+		memset(elf_buf, 0, elf_buflen);
+		elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
+		read_unlock(&kclist_lock);
+		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
+			kfree(elf_buf);
+			return -EFAULT;
+		}
+		kfree(elf_buf);
+		buflen -= tsz;
+		*fpos += tsz;
+		buffer += tsz;
+		acc += tsz;
+
+		/* leave now if filled buffer already */
+		if (buflen == 0)
+			return acc;
+	} else
+		read_unlock(&kclist_lock);
+
+	/*
+	 * Check to see if our file offset matches with any of
+	 * the addresses in the elf_phdr on our list.
+	 */
+	start = kc_offset_to_vaddr(*fpos - elf_buflen);
+	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
+		tsz = buflen;
+		
+	while (buflen) {
+		struct kcore_list *m;
+
+		read_lock(&kclist_lock);
+		for (m=kclist; m; m=m->next) {
+			if (start >= m->addr && start < (m->addr+m->size))
+				break;
+		}
+		read_unlock(&kclist_lock);
+
+		if (m == NULL) {
+			if (clear_user(buffer, tsz))
+				return -EFAULT;
+		} else if ((start >= VMALLOC_START) && (start < VMALLOC_END)) {
+			char * elf_buf;
+			struct vm_struct *m;
+			unsigned long curstart = start;
+			unsigned long cursize = tsz;
+
+			elf_buf = kmalloc(tsz, GFP_KERNEL);
+			if (!elf_buf)
+				return -ENOMEM;
+			memset(elf_buf, 0, tsz);
+
+			read_lock(&vmlist_lock);
+			for (m=vmlist; m && cursize; m=m->next) {
+				unsigned long vmstart;
+				unsigned long vmsize;
+				unsigned long msize = m->size - PAGE_SIZE;
+
+				if (((unsigned long)m->addr + msize) < 
+								curstart)
+					continue;
+				if ((unsigned long)m->addr > (curstart + 
+								cursize))
+					break;
+				vmstart = (curstart < (unsigned long)m->addr ? 
+					(unsigned long)m->addr : curstart);
+				if (((unsigned long)m->addr + msize) > 
+							(curstart + cursize))
+					vmsize = curstart + cursize - vmstart;
+				else
+					vmsize = (unsigned long)m->addr + 
+							msize - vmstart;
+				curstart = vmstart + vmsize;
+				cursize -= vmsize;
+				/* don't dump ioremap'd stuff! (TA) */
+				if (m->flags & VM_IOREMAP)
+					continue;
+				memcpy(elf_buf + (vmstart - start),
+					(char *)vmstart, vmsize);
+			}
+			read_unlock(&vmlist_lock);
+			if (copy_to_user(buffer, elf_buf, tsz)) {
+				kfree(elf_buf);
+				return -EFAULT;
+			}
+			kfree(elf_buf);
+		} else {
+			if (kern_addr_valid(start)) {
+				unsigned long n;
+
+				n = copy_to_user(buffer, (char *)start, tsz);
+				/*
+				 * We cannot distingush between fault on source
+				 * and fault on destination. When this happens
+				 * we clear too and hope it will trigger the
+				 * EFAULT again.
+				 */
+				if (n) { 
+					if (clear_user(buffer + tsz - n,
+								tsz - n))
+						return -EFAULT;
+				}
+			} else {
+				if (clear_user(buffer, tsz))
+					return -EFAULT;
+			}
+		}
+		buflen -= tsz;
+		*fpos += tsz;
+		buffer += tsz;
+		acc += tsz;
+		start += tsz;
+		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
+	}
+
+	return acc;
+}
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
new file mode 100644
index 0000000..10d37bf
--- /dev/null
+++ b/fs/proc/kmsg.c
@@ -0,0 +1,55 @@
+/*
+ *  linux/fs/proc/kmsg.c
+ *
+ *  Copyright (C) 1992  by Linus Torvalds
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+extern wait_queue_head_t log_wait;
+
+extern int do_syslog(int type, char __user *bug, int count);
+
+static int kmsg_open(struct inode * inode, struct file * file)
+{
+	return do_syslog(1,NULL,0);
+}
+
+static int kmsg_release(struct inode * inode, struct file * file)
+{
+	(void) do_syslog(0,NULL,0);
+	return 0;
+}
+
+static ssize_t kmsg_read(struct file *file, char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
+		return -EAGAIN;
+	return do_syslog(2, buf, count);
+}
+
+static unsigned int kmsg_poll(struct file *file, poll_table *wait)
+{
+	poll_wait(file, &log_wait, wait);
+	if (do_syslog(9, NULL, 0))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+
+struct file_operations proc_kmsg_operations = {
+	.read		= kmsg_read,
+	.poll		= kmsg_poll,
+	.open		= kmsg_open,
+	.release	= kmsg_release,
+};
diff --git a/fs/proc/mmu.c b/fs/proc/mmu.c
new file mode 100644
index 0000000..a704103
--- /dev/null
+++ b/fs/proc/mmu.c
@@ -0,0 +1,67 @@
+/* mmu.c: mmu memory info files
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mman.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/div64.h>
+#include "internal.h"
+
+void get_vmalloc_info(struct vmalloc_info *vmi)
+{
+	struct vm_struct *vma;
+	unsigned long free_area_size;
+	unsigned long prev_end;
+
+	vmi->used = 0;
+
+	if (!vmlist) {
+		vmi->largest_chunk = VMALLOC_TOTAL;
+	}
+	else {
+		vmi->largest_chunk = 0;
+
+		prev_end = VMALLOC_START;
+
+		read_lock(&vmlist_lock);
+
+		for (vma = vmlist; vma; vma = vma->next) {
+			vmi->used += vma->size;
+
+			free_area_size = (unsigned long) vma->addr - prev_end;
+			if (vmi->largest_chunk < free_area_size)
+				vmi->largest_chunk = free_area_size;
+
+			prev_end = vma->size + (unsigned long) vma->addr;
+		}
+
+		if (VMALLOC_END - prev_end > vmi->largest_chunk)
+			vmi->largest_chunk = VMALLOC_END - prev_end;
+
+		read_unlock(&vmlist_lock);
+	}
+}
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
new file mode 100644
index 0000000..f3bf016
--- /dev/null
+++ b/fs/proc/nommu.c
@@ -0,0 +1,135 @@
+/* nommu.c: mmu-less memory info files
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mman.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/div64.h>
+#include "internal.h"
+
+/*
+ * display a list of all the VMAs the kernel knows about
+ * - nommu kernals have a single flat list
+ */
+static int nommu_vma_list_show(struct seq_file *m, void *v)
+{
+	struct vm_area_struct *vma;
+	unsigned long ino = 0;
+	struct file *file;
+	dev_t dev = 0;
+	int flags, len;
+
+	vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
+
+	flags = vma->vm_flags;
+	file = vma->vm_file;
+
+	if (file) {
+		struct inode *inode = vma->vm_file->f_dentry->d_inode;
+		dev = inode->i_sb->s_dev;
+		ino = inode->i_ino;
+	}
+
+	seq_printf(m,
+		   "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+		   vma->vm_start,
+		   vma->vm_end,
+		   flags & VM_READ ? 'r' : '-',
+		   flags & VM_WRITE ? 'w' : '-',
+		   flags & VM_EXEC ? 'x' : '-',
+		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
+		   vma->vm_pgoff << PAGE_SHIFT,
+		   MAJOR(dev), MINOR(dev), ino, &len);
+
+	if (file) {
+		len = 25 + sizeof(void *) * 6 - len;
+		if (len < 1)
+			len = 1;
+		seq_printf(m, "%*c", len, ' ');
+		seq_path(m, file->f_vfsmnt, file->f_dentry, "");
+	}
+
+	seq_putc(m, '\n');
+	return 0;
+}
+
+static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
+{
+	struct rb_node *_rb;
+	loff_t pos = *_pos;
+	void *next = NULL;
+
+	down_read(&nommu_vma_sem);
+
+	for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
+		if (pos == 0) {
+			next = _rb;
+			break;
+		}
+	}
+
+	return next;
+}
+
+static void nommu_vma_list_stop(struct seq_file *m, void *v)
+{
+	up_read(&nommu_vma_sem);
+}
+
+static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return rb_next((struct rb_node *) v);
+}
+
+static struct seq_operations proc_nommu_vma_list_seqop = {
+	.start	= nommu_vma_list_start,
+	.next	= nommu_vma_list_next,
+	.stop	= nommu_vma_list_stop,
+	.show	= nommu_vma_list_show
+};
+
+static int proc_nommu_vma_list_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &proc_nommu_vma_list_seqop);
+}
+
+static struct file_operations proc_nommu_vma_list_operations = {
+	.open    = proc_nommu_vma_list_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+static int __init proc_nommu_init(void)
+{
+	create_seq_entry("maps", S_IRUGO, &proc_nommu_vma_list_operations);
+	return 0;
+}
+
+module_init(proc_nommu_init);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
new file mode 100644
index 0000000..67423c6
--- /dev/null
+++ b/fs/proc/proc_devtree.c
@@ -0,0 +1,165 @@
+/*
+ * proc_devtree.c - handles /proc/device-tree
+ *
+ * Copyright 1997 Paul Mackerras
+ */
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <asm/prom.h>
+#include <asm/uaccess.h>
+
+#ifndef HAVE_ARCH_DEVTREE_FIXUPS
+static inline void set_node_proc_entry(struct device_node *np, struct proc_dir_entry *de)
+{
+}
+
+static void inline set_node_name_link(struct device_node *np, struct proc_dir_entry *de)
+{
+}
+
+static void inline set_node_addr_link(struct device_node *np, struct proc_dir_entry *de)
+{
+}
+#endif
+
+static struct proc_dir_entry *proc_device_tree;
+
+/*
+ * Supply data on a read from /proc/device-tree/node/property.
+ */
+static int property_read_proc(char *page, char **start, off_t off,
+			      int count, int *eof, void *data)
+{
+	struct property *pp = data;
+	int n;
+
+	if (off >= pp->length) {
+		*eof = 1;
+		return 0;
+	}
+	n = pp->length - off;
+	if (n > count)
+		n = count;
+	else
+		*eof = 1;
+	memcpy(page, pp->value + off, n);
+	*start = page;
+	return n;
+}
+
+/*
+ * For a node with a name like "gc@10", we make symlinks called "gc"
+ * and "@10" to it.
+ */
+
+/*
+ * Process a node, adding entries for its children and its properties.
+ */
+void proc_device_tree_add_node(struct device_node *np, struct proc_dir_entry *de)
+{
+	struct property *pp;
+	struct proc_dir_entry *ent;
+	struct device_node *child, *sib;
+	const char *p, *at;
+	int l;
+	struct proc_dir_entry *list, **lastp, *al;
+
+	set_node_proc_entry(np, de);
+	lastp = &list;
+	for (pp = np->properties; pp != 0; pp = pp->next) {
+		/*
+		 * Unfortunately proc_register puts each new entry
+		 * at the beginning of the list.  So we rearrange them.
+		 */
+		ent = create_proc_read_entry(pp->name, strncmp(pp->name, "security-", 9) ?
+					     S_IRUGO : S_IRUSR, de, property_read_proc, pp);
+		if (ent == 0)
+			break;
+		if (!strncmp(pp->name, "security-", 9))
+		     ent->size = 0; /* don't leak number of password chars */
+		else
+		     ent->size = pp->length;
+		*lastp = ent;
+		lastp = &ent->next;
+	}
+	child = NULL;
+	while ((child = of_get_next_child(np, child))) {
+		p = strrchr(child->full_name, '/');
+		if (!p)
+			p = child->full_name;
+		else
+			++p;
+		/* chop off '@0' if the name ends with that */
+		l = strlen(p);
+		if (l > 2 && p[l-2] == '@' && p[l-1] == '0')
+			l -= 2;
+		ent = proc_mkdir(p, de);
+		if (ent == 0)
+			break;
+		*lastp = ent;
+		lastp = &ent->next;
+		proc_device_tree_add_node(child, ent);
+
+		/*
+		 * If we left the address part on the name, consider
+		 * adding symlinks from the name and address parts.
+		 */
+		if (p[l] != 0 || (at = strchr(p, '@')) == 0)
+			continue;
+
+		/*
+		 * If this is the first node with a given name property,
+		 * add a symlink with the name property as its name.
+		 */
+		sib = NULL;
+		while ((sib = of_get_next_child(np, sib)) && sib != child)
+			if (sib->name && strcmp(sib->name, child->name) == 0)
+				break;
+		if (sib == child && strncmp(p, child->name, l) != 0) {
+			al = proc_symlink(child->name, de, ent->name);
+			if (al == 0) {
+				of_node_put(sib);
+				break;
+			}
+			set_node_name_link(child, al);
+			*lastp = al;
+			lastp = &al->next;
+		}
+		of_node_put(sib);
+		/*
+		 * Add another directory with the @address part as its name.
+		 */
+		al = proc_symlink(at, de, ent->name);
+		if (al == 0)
+			break;
+		set_node_addr_link(child, al);
+		*lastp = al;
+		lastp = &al->next;
+	}
+	of_node_put(child);
+	*lastp = NULL;
+	de->subdir = list;
+}
+
+/*
+ * Called on initialization to set up the /proc/device-tree subtree
+ */
+void proc_device_tree_init(void)
+{
+	struct device_node *root;
+	if ( !have_of )
+		return;
+	proc_device_tree = proc_mkdir("device-tree", NULL);
+	if (proc_device_tree == 0)
+		return;
+	root = of_find_node_by_path("/");
+	if (root == 0) {
+		printk(KERN_ERR "/proc/device-tree: can't find root\n");
+		return;
+	}
+	proc_device_tree_add_node(root, proc_device_tree);
+	of_node_put(root);
+}
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
new file mode 100644
index 0000000..1d75d6a
--- /dev/null
+++ b/fs/proc/proc_misc.c
@@ -0,0 +1,615 @@
+/*
+ *  linux/fs/proc/proc_misc.c
+ *
+ *  linux/fs/proc/array.c
+ *  Copyright (C) 1992  by Linus Torvalds
+ *  based on ideas by Darren Senn
+ *
+ *  This used to be the part of array.c. See the rest of history and credits
+ *  there. I took this into a separate file and switched the thing to generic
+ *  proc_file_inode_operations, leaving in array.c only per-process stuff.
+ *  Inumbers allocation made dynamic (via create_proc_entry()).  AV, May 1999.
+ *
+ * Changes:
+ * Fulton Green      :  Encapsulated position metric calculations.
+ *			<kernel@FultonGreen.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/mman.h>
+#include <linux/proc_fs.h>
+#include <linux/ioport.h>
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
+#include <linux/times.h>
+#include <linux/profile.h>
+#include <linux/blkdev.h>
+#include <linux/hugetlb.h>
+#include <linux/jiffies.h>
+#include <linux/sysrq.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/div64.h>
+#include "internal.h"
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+/*
+ * Warning: stuff below (imported functions) assumes that its output will fit
+ * into one page. For some of those functions it may be wrong. Moreover, we
+ * have a way to deal with that gracefully. Right now I used straightforward
+ * wrappers, but this needs further analysis wrt potential overflows.
+ */
+extern int get_hardware_list(char *);
+extern int get_stram_list(char *);
+extern int get_chrdev_list(char *);
+extern int get_filesystem_list(char *);
+extern int get_exec_domain_list(char *);
+extern int get_dma_list(char *);
+extern int get_locks_status (char *, char **, off_t, int);
+
+static int proc_calc_metrics(char *page, char **start, off_t off,
+				 int count, int *eof, int len)
+{
+	if (len <= off+count) *eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len>count) len = count;
+	if (len<0) len = 0;
+	return len;
+}
+
+static int loadavg_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int a, b, c;
+	int len;
+
+	a = avenrun[0] + (FIXED_1/200);
+	b = avenrun[1] + (FIXED_1/200);
+	c = avenrun[2] + (FIXED_1/200);
+	len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		nr_running(), nr_threads, last_pid);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+static int uptime_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	struct timespec uptime;
+	struct timespec idle;
+	int len;
+	cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
+
+	do_posix_clock_monotonic_gettime(&uptime);
+	cputime_to_timespec(idletime, &idle);
+	len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
+			(unsigned long) uptime.tv_sec,
+			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+			(unsigned long) idle.tv_sec,
+			(idle.tv_nsec / (NSEC_PER_SEC / 100)));
+
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+static int meminfo_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	struct sysinfo i;
+	int len;
+	struct page_state ps;
+	unsigned long inactive;
+	unsigned long active;
+	unsigned long free;
+	unsigned long committed;
+	unsigned long allowed;
+	struct vmalloc_info vmi;
+
+	get_page_state(&ps);
+	get_zone_counts(&active, &inactive, &free);
+
+/*
+ * display in kilobytes.
+ */
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+	si_meminfo(&i);
+	si_swapinfo(&i);
+	committed = atomic_read(&vm_committed_space);
+	allowed = ((totalram_pages - hugetlb_total_pages())
+		* sysctl_overcommit_ratio / 100) + total_swap_pages;
+
+	get_vmalloc_info(&vmi);
+
+	/*
+	 * Tagged format, for easy grepping and expansion.
+	 */
+	len = sprintf(page,
+		"MemTotal:     %8lu kB\n"
+		"MemFree:      %8lu kB\n"
+		"Buffers:      %8lu kB\n"
+		"Cached:       %8lu kB\n"
+		"SwapCached:   %8lu kB\n"
+		"Active:       %8lu kB\n"
+		"Inactive:     %8lu kB\n"
+		"HighTotal:    %8lu kB\n"
+		"HighFree:     %8lu kB\n"
+		"LowTotal:     %8lu kB\n"
+		"LowFree:      %8lu kB\n"
+		"SwapTotal:    %8lu kB\n"
+		"SwapFree:     %8lu kB\n"
+		"Dirty:        %8lu kB\n"
+		"Writeback:    %8lu kB\n"
+		"Mapped:       %8lu kB\n"
+		"Slab:         %8lu kB\n"
+		"CommitLimit:  %8lu kB\n"
+		"Committed_AS: %8lu kB\n"
+		"PageTables:   %8lu kB\n"
+		"VmallocTotal: %8lu kB\n"
+		"VmallocUsed:  %8lu kB\n"
+		"VmallocChunk: %8lu kB\n",
+		K(i.totalram),
+		K(i.freeram),
+		K(i.bufferram),
+		K(get_page_cache_size()-total_swapcache_pages-i.bufferram),
+		K(total_swapcache_pages),
+		K(active),
+		K(inactive),
+		K(i.totalhigh),
+		K(i.freehigh),
+		K(i.totalram-i.totalhigh),
+		K(i.freeram-i.freehigh),
+		K(i.totalswap),
+		K(i.freeswap),
+		K(ps.nr_dirty),
+		K(ps.nr_writeback),
+		K(ps.nr_mapped),
+		K(ps.nr_slab),
+		K(allowed),
+		K(committed),
+		K(ps.nr_page_table_pages),
+		(unsigned long)VMALLOC_TOTAL >> 10,
+		vmi.used >> 10,
+		vmi.largest_chunk >> 10
+		);
+
+		len += hugetlb_report_meminfo(page + len);
+
+	return proc_calc_metrics(page, start, off, count, eof, len);
+#undef K
+}
+
+extern struct seq_operations fragmentation_op;
+static int fragmentation_open(struct inode *inode, struct file *file)
+{
+	(void)inode;
+	return seq_open(file, &fragmentation_op);
+}
+
+static struct file_operations fragmentation_file_operations = {
+	.open		= fragmentation_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int version_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	strcpy(page, linux_banner);
+	len = strlen(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+extern struct seq_operations cpuinfo_op;
+static int cpuinfo_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &cpuinfo_op);
+}
+static struct file_operations proc_cpuinfo_operations = {
+	.open		= cpuinfo_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+extern struct seq_operations vmstat_op;
+static int vmstat_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &vmstat_op);
+}
+static struct file_operations proc_vmstat_file_operations = {
+	.open		= vmstat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#ifdef CONFIG_PROC_HARDWARE
+static int hardware_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_hardware_list(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+#endif
+
+#ifdef CONFIG_STRAM_PROC
+static int stram_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_stram_list(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+#endif
+
+extern struct seq_operations partitions_op;
+static int partitions_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &partitions_op);
+}
+static struct file_operations proc_partitions_operations = {
+	.open		= partitions_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+extern struct seq_operations diskstats_op;
+static int diskstats_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &diskstats_op);
+}
+static struct file_operations proc_diskstats_operations = {
+	.open		= diskstats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#ifdef CONFIG_MODULES
+extern struct seq_operations modules_op;
+static int modules_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &modules_op);
+}
+static struct file_operations proc_modules_operations = {
+	.open		= modules_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+#endif
+
+extern struct seq_operations slabinfo_op;
+extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
+static int slabinfo_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &slabinfo_op);
+}
+static struct file_operations proc_slabinfo_operations = {
+	.open		= slabinfo_open,
+	.read		= seq_read,
+	.write		= slabinfo_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int show_stat(struct seq_file *p, void *v)
+{
+	int i;
+	unsigned long jif;
+	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
+	u64 sum = 0;
+
+	user = nice = system = idle = iowait =
+		irq = softirq = steal = cputime64_zero;
+	jif = - wall_to_monotonic.tv_sec;
+	if (wall_to_monotonic.tv_nsec)
+		--jif;
+
+	for_each_cpu(i) {
+		int j;
+
+		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
+		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
+		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
+		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
+		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
+		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
+		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
+		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
+		for (j = 0 ; j < NR_IRQS ; j++)
+			sum += kstat_cpu(i).irqs[j];
+	}
+
+	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu\n",
+		(unsigned long long)cputime64_to_clock_t(user),
+		(unsigned long long)cputime64_to_clock_t(nice),
+		(unsigned long long)cputime64_to_clock_t(system),
+		(unsigned long long)cputime64_to_clock_t(idle),
+		(unsigned long long)cputime64_to_clock_t(iowait),
+		(unsigned long long)cputime64_to_clock_t(irq),
+		(unsigned long long)cputime64_to_clock_t(softirq),
+		(unsigned long long)cputime64_to_clock_t(steal));
+	for_each_online_cpu(i) {
+
+		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
+		user = kstat_cpu(i).cpustat.user;
+		nice = kstat_cpu(i).cpustat.nice;
+		system = kstat_cpu(i).cpustat.system;
+		idle = kstat_cpu(i).cpustat.idle;
+		iowait = kstat_cpu(i).cpustat.iowait;
+		irq = kstat_cpu(i).cpustat.irq;
+		softirq = kstat_cpu(i).cpustat.softirq;
+		steal = kstat_cpu(i).cpustat.steal;
+		seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
+			i,
+			(unsigned long long)cputime64_to_clock_t(user),
+			(unsigned long long)cputime64_to_clock_t(nice),
+			(unsigned long long)cputime64_to_clock_t(system),
+			(unsigned long long)cputime64_to_clock_t(idle),
+			(unsigned long long)cputime64_to_clock_t(iowait),
+			(unsigned long long)cputime64_to_clock_t(irq),
+			(unsigned long long)cputime64_to_clock_t(softirq),
+			(unsigned long long)cputime64_to_clock_t(steal));
+	}
+	seq_printf(p, "intr %llu", (unsigned long long)sum);
+
+#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA)
+	for (i = 0; i < NR_IRQS; i++)
+		seq_printf(p, " %u", kstat_irqs(i));
+#endif
+
+	seq_printf(p,
+		"\nctxt %llu\n"
+		"btime %lu\n"
+		"processes %lu\n"
+		"procs_running %lu\n"
+		"procs_blocked %lu\n",
+		nr_context_switches(),
+		(unsigned long)jif,
+		total_forks,
+		nr_running(),
+		nr_iowait());
+
+	return 0;
+}
+
+static int stat_open(struct inode *inode, struct file *file)
+{
+	unsigned size = 4096 * (1 + num_possible_cpus() / 32);
+	char *buf;
+	struct seq_file *m;
+	int res;
+
+	/* don't ask for more than the kmalloc() max size, currently 128 KB */
+	if (size > 128 * 1024)
+		size = 128 * 1024;
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	res = single_open(file, show_stat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+static struct file_operations proc_stat_operations = {
+	.open		= stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int devices_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_chrdev_list(page);
+	len += get_blkdev_list(page+len);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+/*
+ * /proc/interrupts
+ */
+static void *int_seq_start(struct seq_file *f, loff_t *pos)
+{
+	return (*pos <= NR_IRQS) ? pos : NULL;
+}
+
+static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos > NR_IRQS)
+		return NULL;
+	return pos;
+}
+
+static void int_seq_stop(struct seq_file *f, void *v)
+{
+	/* Nothing to do */
+}
+
+
+extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */
+static struct seq_operations int_seq_ops = {
+	.start = int_seq_start,
+	.next  = int_seq_next,
+	.stop  = int_seq_stop,
+	.show  = show_interrupts
+};
+
+static int interrupts_open(struct inode *inode, struct file *filp)
+{
+	return seq_open(filp, &int_seq_ops);
+}
+
+static struct file_operations proc_interrupts_operations = {
+	.open		= interrupts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int filesystems_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_filesystem_list(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+static int cmdline_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	len = sprintf(page, "%s\n", saved_command_line);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+static int locks_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_locks_status(page, start, off, count);
+
+	if (len < count)
+		*eof = 1;
+	return len;
+}
+
+static int execdomains_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len = get_exec_domain_list(page);
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+/*
+ * writing 'C' to /proc/sysrq-trigger is like sysrq-C
+ */
+static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	if (count) {
+		char c;
+
+		if (get_user(c, buf))
+			return -EFAULT;
+		__handle_sysrq(c, NULL, NULL, 0);
+	}
+	return count;
+}
+
+static struct file_operations proc_sysrq_trigger_operations = {
+	.write		= write_sysrq_trigger,
+};
+#endif
+
+struct proc_dir_entry *proc_root_kcore;
+
+void create_seq_entry(char *name, mode_t mode, struct file_operations *f)
+{
+	struct proc_dir_entry *entry;
+	entry = create_proc_entry(name, mode, NULL);
+	if (entry)
+		entry->proc_fops = f;
+}
+
+void __init proc_misc_init(void)
+{
+	struct proc_dir_entry *entry;
+	static struct {
+		char *name;
+		int (*read_proc)(char*,char**,off_t,int,int*,void*);
+	} *p, simple_ones[] = {
+		{"loadavg",     loadavg_read_proc},
+		{"uptime",	uptime_read_proc},
+		{"meminfo",	meminfo_read_proc},
+		{"version",	version_read_proc},
+#ifdef CONFIG_PROC_HARDWARE
+		{"hardware",	hardware_read_proc},
+#endif
+#ifdef CONFIG_STRAM_PROC
+		{"stram",	stram_read_proc},
+#endif
+		{"devices",	devices_read_proc},
+		{"filesystems",	filesystems_read_proc},
+		{"cmdline",	cmdline_read_proc},
+		{"locks",	locks_read_proc},
+		{"execdomains",	execdomains_read_proc},
+		{NULL,}
+	};
+	for (p = simple_ones; p->name; p++)
+		create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
+
+	proc_symlink("mounts", NULL, "self/mounts");
+
+	/* And now for trickier ones */
+	entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
+	if (entry)
+		entry->proc_fops = &proc_kmsg_operations;
+	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
+	create_seq_entry("partitions", 0, &proc_partitions_operations);
+	create_seq_entry("stat", 0, &proc_stat_operations);
+	create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+	create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+	create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
+	create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
+	create_seq_entry("diskstats", 0, &proc_diskstats_operations);
+#ifdef CONFIG_MODULES
+	create_seq_entry("modules", 0, &proc_modules_operations);
+#endif
+#ifdef CONFIG_SCHEDSTATS
+	create_seq_entry("schedstat", 0, &proc_schedstat_operations);
+#endif
+#ifdef CONFIG_PROC_KCORE
+	proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
+	if (proc_root_kcore) {
+		proc_root_kcore->proc_fops = &proc_kcore_operations;
+		proc_root_kcore->size =
+				(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
+	}
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+	entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
+	if (entry)
+		entry->proc_fops = &proc_sysrq_trigger_operations;
+#endif
+#ifdef CONFIG_PPC32
+	{
+		extern struct file_operations ppc_htab_operations;
+		entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
+		if (entry)
+			entry->proc_fops = &ppc_htab_operations;
+	}
+#endif
+}
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
new file mode 100644
index 0000000..15c4455
--- /dev/null
+++ b/fs/proc/proc_tty.c
@@ -0,0 +1,242 @@
+/*
+ * proc_tty.c -- handles /proc/tty
+ *
+ * Copyright 1997, Theodore Ts'o
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+
+static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
+				int count, int *eof, void *data);
+
+/*
+ * The /proc/tty directory inodes...
+ */
+static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver;
+
+/*
+ * This is the handler for /proc/tty/drivers
+ */
+static void show_tty_range(struct seq_file *m, struct tty_driver *p,
+	dev_t from, int num)
+{
+	seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown");
+	seq_printf(m, "/dev/%-8s ", p->name);
+	if (p->num > 1) {
+		seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from),
+			MINOR(from) + num - 1);
+	} else {
+		seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from));
+	}
+	switch (p->type) {
+	case TTY_DRIVER_TYPE_SYSTEM:
+		seq_printf(m, "system");
+		if (p->subtype == SYSTEM_TYPE_TTY)
+			seq_printf(m, ":/dev/tty");
+		else if (p->subtype == SYSTEM_TYPE_SYSCONS)
+			seq_printf(m, ":console");
+		else if (p->subtype == SYSTEM_TYPE_CONSOLE)
+			seq_printf(m, ":vtmaster");
+		break;
+	case TTY_DRIVER_TYPE_CONSOLE:
+		seq_printf(m, "console");
+		break;
+	case TTY_DRIVER_TYPE_SERIAL:
+		seq_printf(m, "serial");
+		break;
+	case TTY_DRIVER_TYPE_PTY:
+		if (p->subtype == PTY_TYPE_MASTER)
+			seq_printf(m, "pty:master");
+		else if (p->subtype == PTY_TYPE_SLAVE)
+			seq_printf(m, "pty:slave");
+		else
+			seq_printf(m, "pty");
+		break;
+	default:
+		seq_printf(m, "type:%d.%d", p->type, p->subtype);
+	}
+	seq_putc(m, '\n');
+}
+
+static int show_tty_driver(struct seq_file *m, void *v)
+{
+	struct tty_driver *p = v;
+	dev_t from = MKDEV(p->major, p->minor_start);
+	dev_t to = from + p->num;
+
+	if (&p->tty_drivers == tty_drivers.next) {
+		/* pseudo-drivers first */
+		seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
+		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
+		seq_printf(m, "system:/dev/tty\n");
+		seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
+		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
+		seq_printf(m, "system:console\n");
+#ifdef CONFIG_UNIX98_PTYS
+		seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
+		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
+		seq_printf(m, "system\n");
+#endif
+#ifdef CONFIG_VT
+		seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
+		seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
+		seq_printf(m, "system:vtmaster\n");
+#endif
+	}
+
+	while (MAJOR(from) < MAJOR(to)) {
+		dev_t next = MKDEV(MAJOR(from)+1, 0);
+		show_tty_range(m, p, from, next - from);
+		from = next;
+	}
+	if (from != to)
+		show_tty_range(m, p, from, to - from);
+	return 0;
+}
+
+/* iterator */
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+	struct list_head *p;
+	loff_t l = *pos;
+	list_for_each(p, &tty_drivers)
+		if (!l--)
+			return list_entry(p, struct tty_driver, tty_drivers);
+	return NULL;
+}
+
+static void *t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct list_head *p = ((struct tty_driver *)v)->tty_drivers.next;
+	(*pos)++;
+	return p==&tty_drivers ? NULL :
+			list_entry(p, struct tty_driver, tty_drivers);
+}
+
+static void t_stop(struct seq_file *m, void *v)
+{
+}
+
+static struct seq_operations tty_drivers_op = {
+	.start	= t_start,
+	.next	= t_next,
+	.stop	= t_stop,
+	.show	= show_tty_driver
+};
+
+static int tty_drivers_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &tty_drivers_op);
+}
+
+static struct file_operations proc_tty_drivers_operations = {
+	.open		= tty_drivers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*
+ * This is the handler for /proc/tty/ldiscs
+ */
+static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	int	i;
+	int	len = 0;
+	off_t	begin = 0;
+	struct tty_ldisc *ld;
+	
+	for (i=0; i < NR_LDISCS; i++) {
+		ld = tty_ldisc_get(i);
+		if (ld == NULL)
+			continue;
+		len += sprintf(page+len, "%-10s %2d\n",
+			       ld->name ? ld->name : "???", i);
+		tty_ldisc_put(i);
+		if (len+begin > off+count)
+			break;
+		if (len+begin < off) {
+			begin += len;
+			len = 0;
+		}
+	}
+	if (i >= NR_LDISCS)
+		*eof = 1;
+	if (off >= len+begin)
+		return 0;
+	*start = page + (off-begin);
+	return ((count < begin+len-off) ? count : begin+len-off);
+}
+
+/*
+ * This function is called by tty_register_driver() to handle
+ * registering the driver's /proc handler into /proc/tty/driver/<foo>
+ */
+void proc_tty_register_driver(struct tty_driver *driver)
+{
+	struct proc_dir_entry *ent;
+		
+	if ((!driver->read_proc && !driver->write_proc) ||
+	    !driver->driver_name ||
+	    driver->proc_entry)
+		return;
+
+	ent = create_proc_entry(driver->driver_name, 0, proc_tty_driver);
+	if (!ent)
+		return;
+	ent->read_proc = driver->read_proc;
+	ent->write_proc = driver->write_proc;
+	ent->owner = driver->owner;
+	ent->data = driver;
+
+	driver->proc_entry = ent;
+}
+
+/*
+ * This function is called by tty_unregister_driver()
+ */
+void proc_tty_unregister_driver(struct tty_driver *driver)
+{
+	struct proc_dir_entry *ent;
+
+	ent = driver->proc_entry;
+	if (!ent)
+		return;
+		
+	remove_proc_entry(driver->driver_name, proc_tty_driver);
+	
+	driver->proc_entry = NULL;
+}
+
+/*
+ * Called by proc_root_init() to initialize the /proc/tty subtree
+ */
+void __init proc_tty_init(void)
+{
+	struct proc_dir_entry *entry;
+	if (!proc_mkdir("tty", NULL))
+		return;
+	proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
+	/*
+	 * /proc/tty/driver/serial reveals the exact character counts for
+	 * serial links which is just too easy to abuse for inferring
+	 * password lengths and inter-keystroke timings during password
+	 * entry.
+	 */
+	proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, NULL);
+
+	create_proc_read_entry("tty/ldiscs", 0, NULL, tty_ldiscs_read_proc, NULL);
+	entry = create_proc_entry("tty/drivers", 0, NULL);
+	if (entry)
+		entry->proc_fops = &proc_tty_drivers_operations;
+}
diff --git a/fs/proc/root.c b/fs/proc/root.c
new file mode 100644
index 0000000..aef148f
--- /dev/null
+++ b/fs/proc/root.c
@@ -0,0 +1,161 @@
+/*
+ *  linux/fs/proc/root.c
+ *
+ *  Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *  proc root directory handling functions
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/smp_lock.h>
+
+struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc_root_driver;
+
+#ifdef CONFIG_SYSCTL
+struct proc_dir_entry *proc_sys_root;
+#endif
+
+static struct super_block *proc_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, proc_fill_super);
+}
+
+static struct file_system_type proc_fs_type = {
+	.name		= "proc",
+	.get_sb		= proc_get_sb,
+	.kill_sb	= kill_anon_super,
+};
+
+extern int __init proc_init_inodecache(void);
+void __init proc_root_init(void)
+{
+	int err = proc_init_inodecache();
+	if (err)
+		return;
+	err = register_filesystem(&proc_fs_type);
+	if (err)
+		return;
+	proc_mnt = kern_mount(&proc_fs_type);
+	err = PTR_ERR(proc_mnt);
+	if (IS_ERR(proc_mnt)) {
+		unregister_filesystem(&proc_fs_type);
+		return;
+	}
+	proc_misc_init();
+	proc_net = proc_mkdir("net", NULL);
+	proc_net_stat = proc_mkdir("net/stat", NULL);
+
+#ifdef CONFIG_SYSVIPC
+	proc_mkdir("sysvipc", NULL);
+#endif
+#ifdef CONFIG_SYSCTL
+	proc_sys_root = proc_mkdir("sys", NULL);
+#endif
+#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
+	proc_mkdir("sys/fs", NULL);
+	proc_mkdir("sys/fs/binfmt_misc", NULL);
+#endif
+	proc_root_fs = proc_mkdir("fs", NULL);
+	proc_root_driver = proc_mkdir("driver", NULL);
+	proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
+#if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
+	/* just give it a mountpoint */
+	proc_mkdir("openprom", NULL);
+#endif
+	proc_tty_init();
+#ifdef CONFIG_PROC_DEVICETREE
+	proc_device_tree_init();
+#endif
+	proc_bus = proc_mkdir("bus", NULL);
+}
+
+static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	/*
+	 * nr_threads is actually protected by the tasklist_lock;
+	 * however, it's conventional to do reads, especially for
+	 * reporting, without any locking whatsoever.
+	 */
+	if (dir->i_ino == PROC_ROOT_INO) /* check for safety... */
+		dir->i_nlink = proc_root.nlink + nr_threads;
+
+	if (!proc_lookup(dir, dentry, nd)) {
+		return NULL;
+	}
+	
+	return proc_pid_lookup(dir, dentry, nd);
+}
+
+static int proc_root_readdir(struct file * filp,
+	void * dirent, filldir_t filldir)
+{
+	unsigned int nr = filp->f_pos;
+	int ret;
+
+	lock_kernel();
+
+	if (nr < FIRST_PROCESS_ENTRY) {
+		int error = proc_readdir(filp, dirent, filldir);
+		if (error <= 0) {
+			unlock_kernel();
+			return error;
+		}
+		filp->f_pos = FIRST_PROCESS_ENTRY;
+	}
+	unlock_kernel();
+
+	ret = proc_pid_readdir(filp, dirent, filldir);
+	return ret;
+}
+
+/*
+ * The root /proc directory is special, as it has the
+ * <pid> directories. Thus we don't use the generic
+ * directory handling functions for that..
+ */
+static struct file_operations proc_root_operations = {
+	.read		 = generic_read_dir,
+	.readdir	 = proc_root_readdir,
+};
+
+/*
+ * proc root can do almost nothing..
+ */
+static struct inode_operations proc_root_inode_operations = {
+	.lookup		= proc_root_lookup,
+};
+
+/*
+ * This is the root "inode" in the /proc tree..
+ */
+struct proc_dir_entry proc_root = {
+	.low_ino	= PROC_ROOT_INO, 
+	.namelen	= 5, 
+	.name		= "/proc",
+	.mode		= S_IFDIR | S_IRUGO | S_IXUGO, 
+	.nlink		= 2, 
+	.proc_iops	= &proc_root_inode_operations, 
+	.proc_fops	= &proc_root_operations,
+	.parent		= &proc_root,
+};
+
+EXPORT_SYMBOL(proc_symlink);
+EXPORT_SYMBOL(proc_mkdir);
+EXPORT_SYMBOL(create_proc_entry);
+EXPORT_SYMBOL(remove_proc_entry);
+EXPORT_SYMBOL(proc_root);
+EXPORT_SYMBOL(proc_root_fs);
+EXPORT_SYMBOL(proc_net);
+EXPORT_SYMBOL(proc_net_stat);
+EXPORT_SYMBOL(proc_bus);
+EXPORT_SYMBOL(proc_root_driver);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
new file mode 100644
index 0000000..28b4a02
--- /dev/null
+++ b/fs/proc/task_mmu.c
@@ -0,0 +1,235 @@
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include <asm/elf.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+char *task_mem(struct mm_struct *mm, char *buffer)
+{
+	unsigned long data, text, lib;
+
+	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
+	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
+	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
+	buffer += sprintf(buffer,
+		"VmSize:\t%8lu kB\n"
+		"VmLck:\t%8lu kB\n"
+		"VmRSS:\t%8lu kB\n"
+		"VmData:\t%8lu kB\n"
+		"VmStk:\t%8lu kB\n"
+		"VmExe:\t%8lu kB\n"
+		"VmLib:\t%8lu kB\n"
+		"VmPTE:\t%8lu kB\n",
+		(mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+		mm->locked_vm << (PAGE_SHIFT-10),
+		get_mm_counter(mm, rss) << (PAGE_SHIFT-10),
+		data << (PAGE_SHIFT-10),
+		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
+	return buffer;
+}
+
+unsigned long task_vsize(struct mm_struct *mm)
+{
+	return PAGE_SIZE * mm->total_vm;
+}
+
+int task_statm(struct mm_struct *mm, int *shared, int *text,
+	       int *data, int *resident)
+{
+	int rss = get_mm_counter(mm, rss);
+
+	*shared = rss - get_mm_counter(mm, anon_rss);
+	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
+								>> PAGE_SHIFT;
+	*data = mm->total_vm - mm->shared_vm;
+	*resident = rss;
+	return mm->total_vm;
+}
+
+int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct vm_area_struct * vma;
+	int result = -ENOENT;
+	struct task_struct *task = proc_task(inode);
+	struct mm_struct * mm = get_task_mm(task);
+
+	if (!mm)
+		goto out;
+	down_read(&mm->mmap_sem);
+
+	vma = mm->mmap;
+	while (vma) {
+		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
+			break;
+		vma = vma->vm_next;
+	}
+
+	if (vma) {
+		*mnt = mntget(vma->vm_file->f_vfsmnt);
+		*dentry = dget(vma->vm_file->f_dentry);
+		result = 0;
+	}
+
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+out:
+	return result;
+}
+
+static void pad_len_spaces(struct seq_file *m, int len)
+{
+	len = 25 + sizeof(void*) * 6 - len;
+	if (len < 1)
+		len = 1;
+	seq_printf(m, "%*c", len, ' ');
+}
+
+static int show_map(struct seq_file *m, void *v)
+{
+	struct task_struct *task = m->private;
+	struct vm_area_struct *map = v;
+	struct mm_struct *mm = map->vm_mm;
+	struct file *file = map->vm_file;
+	int flags = map->vm_flags;
+	unsigned long ino = 0;
+	dev_t dev = 0;
+	int len;
+
+	if (file) {
+		struct inode *inode = map->vm_file->f_dentry->d_inode;
+		dev = inode->i_sb->s_dev;
+		ino = inode->i_ino;
+	}
+
+	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+			map->vm_start,
+			map->vm_end,
+			flags & VM_READ ? 'r' : '-',
+			flags & VM_WRITE ? 'w' : '-',
+			flags & VM_EXEC ? 'x' : '-',
+			flags & VM_MAYSHARE ? 's' : 'p',
+			map->vm_pgoff << PAGE_SHIFT,
+			MAJOR(dev), MINOR(dev), ino, &len);
+
+	/*
+	 * Print the dentry name for named mappings, and a
+	 * special [heap] marker for the heap:
+	 */
+	if (map->vm_file) {
+		pad_len_spaces(m, len);
+		seq_path(m, file->f_vfsmnt, file->f_dentry, "");
+	} else {
+		if (mm) {
+			if (map->vm_start <= mm->start_brk &&
+						map->vm_end >= mm->brk) {
+				pad_len_spaces(m, len);
+				seq_puts(m, "[heap]");
+			} else {
+				if (map->vm_start <= mm->start_stack &&
+					map->vm_end >= mm->start_stack) {
+
+					pad_len_spaces(m, len);
+					seq_puts(m, "[stack]");
+				}
+			}
+		} else {
+			pad_len_spaces(m, len);
+			seq_puts(m, "[vdso]");
+		}
+	}
+	seq_putc(m, '\n');
+	if (m->count < m->size)  /* map is copied successfully */
+		m->version = (map != get_gate_vma(task))? map->vm_start: 0;
+	return 0;
+}
+
+static void *m_start(struct seq_file *m, loff_t *pos)
+{
+	struct task_struct *task = m->private;
+	unsigned long last_addr = m->version;
+	struct mm_struct *mm;
+	struct vm_area_struct *map, *tail_map;
+	loff_t l = *pos;
+
+	/*
+	 * We remember last_addr rather than next_addr to hit with
+	 * mmap_cache most of the time. We have zero last_addr at
+	 * the begining and also after lseek. We will have -1 last_addr
+	 * after the end of the maps.
+	 */
+
+	if (last_addr == -1UL)
+		return NULL;
+
+	mm = get_task_mm(task);
+	if (!mm)
+		return NULL;
+
+	tail_map = get_gate_vma(task);
+	down_read(&mm->mmap_sem);
+
+	/* Start with last addr hint */
+	if (last_addr && (map = find_vma(mm, last_addr))) {
+		map = map->vm_next;
+		goto out;
+	}
+
+	/*
+	 * Check the map index is within the range and do
+	 * sequential scan until m_index.
+	 */
+	map = NULL;
+	if ((unsigned long)l < mm->map_count) {
+		map = mm->mmap;
+		while (l-- && map)
+			map = map->vm_next;
+		goto out;
+	}
+
+	if (l != mm->map_count)
+		tail_map = NULL; /* After gate map */
+
+out:
+	if (map)
+		return map;
+
+	/* End of maps has reached */
+	m->version = (tail_map != NULL)? 0: -1UL;
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+	return tail_map;
+}
+
+static void m_stop(struct seq_file *m, void *v)
+{
+	struct task_struct *task = m->private;
+	struct vm_area_struct *map = v;
+	if (map && map != get_gate_vma(task)) {
+		struct mm_struct *mm = map->vm_mm;
+		up_read(&mm->mmap_sem);
+		mmput(mm);
+	}
+}
+
+static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct task_struct *task = m->private;
+	struct vm_area_struct *map = v;
+	struct vm_area_struct *tail_map = get_gate_vma(task);
+
+	(*pos)++;
+	if (map && (map != tail_map) && map->vm_next)
+		return map->vm_next;
+	m_stop(m, v);
+	return (map != tail_map)? tail_map: NULL;
+}
+
+struct seq_operations proc_pid_maps_op = {
+	.start	= m_start,
+	.next	= m_next,
+	.stop	= m_stop,
+	.show	= show_map
+};
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
new file mode 100644
index 0000000..8f68827
--- /dev/null
+++ b/fs/proc/task_nommu.c
@@ -0,0 +1,164 @@
+
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+/*
+ * Logic: we've got two memory sums for each process, "shared", and
+ * "non-shared". Shared memory may get counted more then once, for
+ * each process that owns it. Non-shared memory is counted
+ * accurately.
+ */
+char *task_mem(struct mm_struct *mm, char *buffer)
+{
+	struct vm_list_struct *vml;
+	unsigned long bytes = 0, sbytes = 0, slack = 0;
+        
+	down_read(&mm->mmap_sem);
+	for (vml = mm->context.vmlist; vml; vml = vml->next) {
+		if (!vml->vma)
+			continue;
+
+		bytes += kobjsize(vml);
+		if (atomic_read(&mm->mm_count) > 1 ||
+		    atomic_read(&vml->vma->vm_usage) > 1
+		    ) {
+			sbytes += kobjsize((void *) vml->vma->vm_start);
+			sbytes += kobjsize(vml->vma);
+		} else {
+			bytes += kobjsize((void *) vml->vma->vm_start);
+			bytes += kobjsize(vml->vma);
+			slack += kobjsize((void *) vml->vma->vm_start) -
+				(vml->vma->vm_end - vml->vma->vm_start);
+		}
+	}
+
+	if (atomic_read(&mm->mm_count) > 1)
+		sbytes += kobjsize(mm);
+	else
+		bytes += kobjsize(mm);
+	
+	if (current->fs && atomic_read(&current->fs->count) > 1)
+		sbytes += kobjsize(current->fs);
+	else
+		bytes += kobjsize(current->fs);
+
+	if (current->files && atomic_read(&current->files->count) > 1)
+		sbytes += kobjsize(current->files);
+	else
+		bytes += kobjsize(current->files);
+
+	if (current->sighand && atomic_read(&current->sighand->count) > 1)
+		sbytes += kobjsize(current->sighand);
+	else
+		bytes += kobjsize(current->sighand);
+
+	bytes += kobjsize(current); /* includes kernel stack */
+
+	buffer += sprintf(buffer,
+		"Mem:\t%8lu bytes\n"
+		"Slack:\t%8lu bytes\n"
+		"Shared:\t%8lu bytes\n",
+		bytes, slack, sbytes);
+
+	up_read(&mm->mmap_sem);
+	return buffer;
+}
+
+unsigned long task_vsize(struct mm_struct *mm)
+{
+	struct vm_list_struct *tbp;
+	unsigned long vsize = 0;
+
+	down_read(&mm->mmap_sem);
+	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
+		if (tbp->vma)
+			vsize += kobjsize((void *) tbp->vma->vm_start);
+	}
+	up_read(&mm->mmap_sem);
+	return vsize;
+}
+
+int task_statm(struct mm_struct *mm, int *shared, int *text,
+	       int *data, int *resident)
+{
+	struct vm_list_struct *tbp;
+	int size = kobjsize(mm);
+
+	down_read(&mm->mmap_sem);
+	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
+		size += kobjsize(tbp);
+		if (tbp->vma) {
+			size += kobjsize(tbp->vma);
+			size += kobjsize((void *) tbp->vma->vm_start);
+		}
+	}
+
+	size += (*text = mm->end_code - mm->start_code);
+	size += (*data = mm->start_stack - mm->start_data);
+	up_read(&mm->mmap_sem);
+	*resident = size;
+	return size;
+}
+
+int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+{
+	struct vm_list_struct *vml;
+	struct vm_area_struct *vma;
+	struct task_struct *task = proc_task(inode);
+	struct mm_struct *mm = get_task_mm(task);
+	int result = -ENOENT;
+
+	if (!mm)
+		goto out;
+	down_read(&mm->mmap_sem);
+
+	vml = mm->context.vmlist;
+	vma = NULL;
+	while (vml) {
+		if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
+			vma = vml->vma;
+			break;
+		}
+		vml = vml->next;
+	}
+
+	if (vma) {
+		*mnt = mntget(vma->vm_file->f_vfsmnt);
+		*dentry = dget(vma->vm_file->f_dentry);
+		result = 0;
+	}
+
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+out:
+	return result;
+}
+
+/*
+ * Albert D. Cahalan suggested to fake entries for the traditional
+ * sections here.  This might be worth investigating.
+ */
+static int show_map(struct seq_file *m, void *v)
+{
+	return 0;
+}
+static void *m_start(struct seq_file *m, loff_t *pos)
+{
+	return NULL;
+}
+static void m_stop(struct seq_file *m, void *v)
+{
+}
+static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return NULL;
+}
+struct seq_operations proc_pid_maps_op = {
+	.start	= m_start,
+	.next	= m_next,
+	.stop	= m_stop,
+	.show	= show_map
+};
diff --git a/fs/qnx4/Makefile b/fs/qnx4/Makefile
new file mode 100644
index 0000000..502d7fe
--- /dev/null
+++ b/fs/qnx4/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux qnx4-filesystem routines.
+#
+
+obj-$(CONFIG_QNX4FS_FS) += qnx4.o
+
+qnx4-objs := inode.o dir.o namei.o file.o bitmap.o truncate.o fsync.o
diff --git a/fs/qnx4/README b/fs/qnx4/README
new file mode 100644
index 0000000..1f1e320
--- /dev/null
+++ b/fs/qnx4/README
@@ -0,0 +1,9 @@
+
+  This is a snapshot of the QNX4 filesystem for Linux.
+  Please send diffs and remarks to <al@alarsen.net> .
+  
+Credits :
+
+Richard "Scuba" A. Frowijn     <scuba@wxs.nl>
+Frank "Jedi/Sector One" Denis  <j@pureftpd.org>
+Anders Larsen                  <al@alarsen.net> (Maintainer)
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
new file mode 100644
index 0000000..9912539
--- /dev/null
+++ b/fs/qnx4/bitmap.c
@@ -0,0 +1,165 @@
+/*
+ * QNX4 file system, Linux implementation.
+ *
+ * Version : 0.2.1
+ *
+ * Using parts of the xiafs filesystem.
+ *
+ * History :
+ *
+ * 28-05-1998 by Richard Frowijn : first release.
+ * 20-06-1998 by Frank Denis : basic optimisations.
+ * 25-06-1998 by Frank Denis : qnx4_is_free, qnx4_set_bitmap, qnx4_bmap .
+ * 28-06-1998 by Frank Denis : qnx4_free_inode (to be fixed) .
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+#include <linux/stat.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/bitops.h>
+
+int qnx4_new_block(struct super_block *sb)
+{
+	return 0;
+}
+
+static void count_bits(register const char *bmPart, register int size,
+		       int *const tf)
+{
+	char b;
+	int tot = *tf;
+
+	if (size > QNX4_BLOCK_SIZE) {
+		size = QNX4_BLOCK_SIZE;
+	}
+	do {
+		b = *bmPart++;
+		if ((b & 1) == 0)
+			tot++;
+		if ((b & 2) == 0)
+			tot++;
+		if ((b & 4) == 0)
+			tot++;
+		if ((b & 8) == 0)
+			tot++;
+		if ((b & 16) == 0)
+			tot++;
+		if ((b & 32) == 0)
+			tot++;
+		if ((b & 64) == 0)
+			tot++;
+		if ((b & 128) == 0)
+			tot++;
+		size--;
+	} while (size != 0);
+	*tf = tot;
+}
+
+unsigned long qnx4_count_free_blocks(struct super_block *sb)
+{
+	int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
+	int total = 0;
+	int total_free = 0;
+	int offset = 0;
+	int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size);
+	struct buffer_head *bh;
+
+	while (total < size) {
+		if ((bh = sb_bread(sb, start + offset)) == NULL) {
+			printk("qnx4: I/O error in counting free blocks\n");
+			break;
+		}
+		count_bits(bh->b_data, size - total, &total_free);
+		brelse(bh);
+		total += QNX4_BLOCK_SIZE;
+		offset++;
+	}
+
+	return total_free;
+}
+
+#ifdef CONFIG_QNX4FS_RW
+
+int qnx4_is_free(struct super_block *sb, long block)
+{
+	int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
+	int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size);
+	struct buffer_head *bh;
+	const char *g;
+	int ret = -EIO;
+
+	start += block / (QNX4_BLOCK_SIZE * 8);
+	QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n",
+		   (unsigned long) block, (unsigned long) start));
+	(void) size;		/* CHECKME */
+	bh = sb_bread(sb, start);
+	if (bh == NULL) {
+		return -EIO;
+	}
+	g = bh->b_data + (block % QNX4_BLOCK_SIZE);
+	if (((*g) & (1 << (block % 8))) == 0) {
+		QNX4DEBUG(("qnx4: is_free -> block is free\n"));
+		ret = 1;
+	} else {
+		QNX4DEBUG(("qnx4: is_free -> block is busy\n"));
+		ret = 0;
+	}
+	brelse(bh);
+
+	return ret;
+}
+
+int qnx4_set_bitmap(struct super_block *sb, long block, int busy)
+{
+	int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
+	int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size);
+	struct buffer_head *bh;
+	char *g;
+
+	start += block / (QNX4_BLOCK_SIZE * 8);
+	QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n",
+		   (unsigned long) block, (unsigned long) start));
+	(void) size;		/* CHECKME */
+	bh = sb_bread(sb, start);
+	if (bh == NULL) {
+		return -EIO;
+	}
+	g = bh->b_data + (block % QNX4_BLOCK_SIZE);
+	if (busy == 0) {
+		(*g) &= ~(1 << (block % 8));
+	} else {
+		(*g) |= (1 << (block % 8));
+	}
+	mark_buffer_dirty(bh);
+	brelse(bh);
+
+	return 0;
+}
+
+static void qnx4_clear_inode(struct inode *inode)
+{
+	struct qnx4_inode_entry *qnx4_ino = qnx4_raw_inode(inode);
+	/* What for? */
+	memset(qnx4_ino->di_fname, 0, sizeof qnx4_ino->di_fname);
+	qnx4_ino->di_size = 0;
+	qnx4_ino->di_num_xtnts = 0;
+	qnx4_ino->di_mode = 0;
+	qnx4_ino->di_status = 0;
+}
+
+void qnx4_free_inode(struct inode *inode)
+{
+	if (inode->i_ino < 1) {
+		printk("free_inode: inode 0 or nonexistent inode\n");
+		return;
+	}
+	qnx4_clear_inode(inode);
+	clear_inode(inode);
+}
+
+#endif
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
new file mode 100644
index 0000000..cd66147c
--- /dev/null
+++ b/fs/qnx4/dir.c
@@ -0,0 +1,99 @@
+/*
+ * QNX4 file system, Linux implementation.
+ *
+ * Version : 0.2.1
+ *
+ * Using parts of the xiafs filesystem.
+ *
+ * History :
+ *
+ * 28-05-1998 by Richard Frowijn : first release.
+ * 20-06-1998 by Frank Denis : Linux 2.1.99+ & dcache support.
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+#include <linux/stat.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+
+static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	unsigned int offset;
+	struct buffer_head *bh;
+	struct qnx4_inode_entry *de;
+	struct qnx4_link_info *le;
+	unsigned long blknum;
+	int ix, ino;
+	int size;
+
+	QNX4DEBUG(("qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
+	QNX4DEBUG(("filp->f_pos         = %ld\n", (long) filp->f_pos));
+
+	lock_kernel();
+
+	while (filp->f_pos < inode->i_size) {
+		blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
+		bh = sb_bread(inode->i_sb, blknum);
+		if(bh==NULL) {
+			printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
+			break;
+		}
+		ix = (int)(filp->f_pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
+		while (ix < QNX4_INODES_PER_BLOCK) {
+			offset = ix * QNX4_DIR_ENTRY_SIZE;
+			de = (struct qnx4_inode_entry *) (bh->b_data + offset);
+			size = strlen(de->di_fname);
+			if (size) {
+				if ( !( de->di_status & QNX4_FILE_LINK ) && size > QNX4_SHORT_NAME_MAX )
+					size = QNX4_SHORT_NAME_MAX;
+				else if ( size > QNX4_NAME_MAX )
+					size = QNX4_NAME_MAX;
+
+				if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) {
+					QNX4DEBUG(("qnx4_readdir:%.*s\n", size, de->di_fname));
+					if ( ( de->di_status & QNX4_FILE_LINK ) == 0 )
+						ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
+					else {
+						le  = (struct qnx4_link_info*)de;
+						ino = ( le->dl_inode_blk - 1 ) *
+							QNX4_INODES_PER_BLOCK +
+							le->dl_inode_ndx;
+					}
+					if (filldir(dirent, de->di_fname, size, filp->f_pos, ino, DT_UNKNOWN) < 0) {
+						brelse(bh);
+						goto out;
+					}
+				}
+			}
+			ix++;
+			filp->f_pos += QNX4_DIR_ENTRY_SIZE;
+		}
+		brelse(bh);
+	}
+out:
+	unlock_kernel();
+	return 0;
+}
+
+struct file_operations qnx4_dir_operations =
+{
+	.read		= generic_read_dir,
+	.readdir	= qnx4_readdir,
+	.fsync		= file_fsync,
+};
+
+struct inode_operations qnx4_dir_inode_operations =
+{
+	.lookup		= qnx4_lookup,
+#ifdef CONFIG_QNX4FS_RW
+	.create		= qnx4_create,
+	.unlink		= qnx4_unlink,
+	.rmdir		= qnx4_rmdir,
+#endif
+};
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c
new file mode 100644
index 0000000..b471315
--- /dev/null
+++ b/fs/qnx4/file.c
@@ -0,0 +1,42 @@
+/*
+ * QNX4 file system, Linux implementation.
+ *
+ * Version : 0.2.1
+ *
+ * Using parts of the xiafs filesystem.
+ *
+ * History :
+ *
+ * 25-05-1998 by Richard Frowijn : first release.
+ * 21-06-1998 by Frank Denis : wrote qnx4_readpage to use generic_file_read.
+ * 27-06-1998 by Frank Denis : file overwriting.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/qnx4_fs.h>
+
+/*
+ * We have mostly NULL's here: the current defaults are ok for
+ * the qnx4 filesystem.
+ */
+struct file_operations qnx4_file_operations =
+{
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.mmap		= generic_file_mmap,
+	.sendfile	= generic_file_sendfile,
+#ifdef CONFIG_QNX4FS_RW
+	.write		= generic_file_write,
+	.fsync		= qnx4_sync_file,
+#endif
+};
+
+struct inode_operations qnx4_file_inode_operations =
+{
+#ifdef CONFIG_QNX4FS_RW
+	.truncate	= qnx4_truncate,
+#endif
+};
diff --git a/fs/qnx4/fsync.c b/fs/qnx4/fsync.c
new file mode 100644
index 0000000..df5bc75
--- /dev/null
+++ b/fs/qnx4/fsync.c
@@ -0,0 +1,170 @@
+/* 
+ * QNX4 file system, Linux implementation.
+ * 
+ * Version : 0.1
+ * 
+ * Using parts of the xiafs filesystem.
+ * 
+ * History :
+ * 
+ * 24-03-1998 by Richard Frowijn : first release.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+
+#include <asm/system.h>
+
+/*
+ * The functions for qnx4 fs file synchronization.
+ */
+
+#ifdef CONFIG_QNX4FS_RW
+
+static int sync_block(struct inode *inode, unsigned short *block, int wait)
+{
+	struct buffer_head *bh;
+	unsigned short tmp;
+
+	if (!*block)
+		return 0;
+	tmp = *block;
+	bh = sb_find_get_block(inode->i_sb, *block);
+	if (!bh)
+		return 0;
+	if (*block != tmp) {
+		brelse(bh);
+		return 1;
+	}
+	if (wait && buffer_req(bh) && !buffer_uptodate(bh)) {
+		brelse(bh);
+		return -1;
+	}
+	if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) {
+		brelse(bh);
+		return 0;
+	}
+	ll_rw_block(WRITE, 1, &bh);
+	atomic_dec(&bh->b_count);
+	return 0;
+}
+
+#ifdef WTF
+static int sync_iblock(struct inode *inode, unsigned short *iblock,
+		       struct buffer_head **bh, int wait)
+{
+	int rc;
+	unsigned short tmp;
+
+	*bh = NULL;
+	tmp = *iblock;
+	if (!tmp)
+		return 0;
+	rc = sync_block(inode, iblock, wait);
+	if (rc)
+		return rc;
+	*bh = sb_bread(inode->i_sb, tmp);
+	if (tmp != *iblock) {
+		brelse(*bh);
+		*bh = NULL;
+		return 1;
+	}
+	if (!*bh)
+		return -1;
+	return 0;
+}
+#endif
+
+static int sync_direct(struct inode *inode, int wait)
+{
+	int i;
+	int rc, err = 0;
+
+	for (i = 0; i < 7; i++) {
+		rc = sync_block(inode,
+				(unsigned short *) qnx4_raw_inode(inode)->di_first_xtnt.xtnt_blk + i, wait);
+		if (rc > 0)
+			break;
+		if (rc)
+			err = rc;
+	}
+	return err;
+}
+
+#ifdef WTF
+static int sync_indirect(struct inode *inode, unsigned short *iblock, int wait)
+{
+	int i;
+	struct buffer_head *ind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock(inode, iblock, &ind_bh, wait);
+	if (rc || !ind_bh)
+		return rc;
+
+	for (i = 0; i < 512; i++) {
+		rc = sync_block(inode,
+				((unsigned short *) ind_bh->b_data) + i,
+				wait);
+		if (rc > 0)
+			break;
+		if (rc)
+			err = rc;
+	}
+	brelse(ind_bh);
+	return err;
+}
+
+static int sync_dindirect(struct inode *inode, unsigned short *diblock,
+			  int wait)
+{
+	int i;
+	struct buffer_head *dind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock(inode, diblock, &dind_bh, wait);
+	if (rc || !dind_bh)
+		return rc;
+
+	for (i = 0; i < 512; i++) {
+		rc = sync_indirect(inode,
+				((unsigned short *) dind_bh->b_data) + i,
+				   wait);
+		if (rc > 0)
+			break;
+		if (rc)
+			err = rc;
+	}
+	brelse(dind_bh);
+	return err;
+}
+#endif
+
+int qnx4_sync_file(struct file *file, struct dentry *dentry, int unused)
+{
+        struct inode *inode = dentry->d_inode;
+	int wait, err = 0;
+        
+        (void) file;
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	      S_ISLNK(inode->i_mode)))
+		return -EINVAL;
+
+	lock_kernel();
+	for (wait = 0; wait <= 1; wait++) {
+		err |= sync_direct(inode, wait);
+	}
+	err |= qnx4_sync_inode(inode);
+	unlock_kernel();
+	return (err < 0) ? -EIO : 0;
+}
+
+#endif
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
new file mode 100644
index 0000000..aa92d6b
--- /dev/null
+++ b/fs/qnx4/inode.c
@@ -0,0 +1,603 @@
+/*
+ * QNX4 file system, Linux implementation.
+ *
+ * Version : 0.2.1
+ *
+ * Using parts of the xiafs filesystem.
+ *
+ * History :
+ *
+ * 01-06-1998 by Richard Frowijn : first release.
+ * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc.
+ * 30-06-1998 by Frank Denis : first step to write inodes.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+#include <linux/init.h>
+#include <linux/highuid.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <asm/uaccess.h>
+
+#define QNX4_VERSION  4
+#define QNX4_BMNAME   ".bitmap"
+
+static struct super_operations qnx4_sops;
+
+#ifdef CONFIG_QNX4FS_RW
+
+int qnx4_sync_inode(struct inode *inode)
+{
+	int err = 0;
+# if 0
+	struct buffer_head *bh;
+
+   	bh = qnx4_update_inode(inode);
+	if (bh && buffer_dirty(bh))
+	{
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh))
+		{
+			printk ("IO error syncing qnx4 inode [%s:%08lx]\n",
+				inode->i_sb->s_id, inode->i_ino);
+			err = -1;
+		}
+	        brelse (bh);
+	} else if (!bh) {
+		err = -1;
+	}
+# endif
+
+	return err;
+}
+
+static void qnx4_delete_inode(struct inode *inode)
+{
+	QNX4DEBUG(("qnx4: deleting inode [%lu]\n", (unsigned long) inode->i_ino));
+	inode->i_size = 0;
+	qnx4_truncate(inode);
+	lock_kernel();
+	qnx4_free_inode(inode);
+	unlock_kernel();
+}
+
+static void qnx4_write_super(struct super_block *sb)
+{
+	lock_kernel();
+	QNX4DEBUG(("qnx4: write_super\n"));
+	sb->s_dirt = 0;
+	unlock_kernel();
+}
+
+static int qnx4_write_inode(struct inode *inode, int unused)
+{
+	struct qnx4_inode_entry *raw_inode;
+	int block, ino;
+	struct buffer_head *bh;
+	ino = inode->i_ino;
+
+	QNX4DEBUG(("qnx4: write inode 1.\n"));
+	if (inode->i_nlink == 0) {
+		return 0;
+	}
+	if (!ino) {
+		printk("qnx4: bad inode number on dev %s: %d is out of range\n",
+		       inode->i_sb->s_id, ino);
+		return -EIO;
+	}
+	QNX4DEBUG(("qnx4: write inode 2.\n"));
+	block = ino / QNX4_INODES_PER_BLOCK;
+	lock_kernel();
+	if (!(bh = sb_bread(inode->i_sb, block))) {
+		printk("qnx4: major problem: unable to read inode from dev "
+		       "%s\n", inode->i_sb->s_id);
+		unlock_kernel();
+		return -EIO;
+	}
+	raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
+	    (ino % QNX4_INODES_PER_BLOCK);
+	raw_inode->di_mode  = cpu_to_le16(inode->i_mode);
+	raw_inode->di_uid   = cpu_to_le16(fs_high2lowuid(inode->i_uid));
+	raw_inode->di_gid   = cpu_to_le16(fs_high2lowgid(inode->i_gid));
+	raw_inode->di_nlink = cpu_to_le16(inode->i_nlink);
+	raw_inode->di_size  = cpu_to_le32(inode->i_size);
+	raw_inode->di_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+	raw_inode->di_atime = cpu_to_le32(inode->i_atime.tv_sec);
+	raw_inode->di_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+	raw_inode->di_first_xtnt.xtnt_size = cpu_to_le32(inode->i_blocks);
+	mark_buffer_dirty(bh);
+	brelse(bh);
+	unlock_kernel();
+	return 0;
+}
+
+#endif
+
+static void qnx4_put_super(struct super_block *sb);
+static struct inode *qnx4_alloc_inode(struct super_block *sb);
+static void qnx4_destroy_inode(struct inode *inode);
+static void qnx4_read_inode(struct inode *);
+static int qnx4_remount(struct super_block *sb, int *flags, char *data);
+static int qnx4_statfs(struct super_block *, struct kstatfs *);
+
+static struct super_operations qnx4_sops =
+{
+	.alloc_inode	= qnx4_alloc_inode,
+	.destroy_inode	= qnx4_destroy_inode,
+	.read_inode	= qnx4_read_inode,
+	.put_super	= qnx4_put_super,
+	.statfs		= qnx4_statfs,
+	.remount_fs	= qnx4_remount,
+#ifdef CONFIG_QNX4FS_RW
+	.write_inode	= qnx4_write_inode,
+	.delete_inode	= qnx4_delete_inode,
+	.write_super	= qnx4_write_super,
+#endif
+};
+
+static int qnx4_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct qnx4_sb_info *qs;
+
+	qs = qnx4_sb(sb);
+	qs->Version = QNX4_VERSION;
+#ifndef CONFIG_QNX4FS_RW
+	*flags |= MS_RDONLY;
+#endif
+	if (*flags & MS_RDONLY) {
+		return 0;
+	}
+
+	mark_buffer_dirty(qs->sb_buf);
+
+	return 0;
+}
+
+static struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
+				       int create)
+{
+	struct buffer_head *result = NULL;
+
+	if ( nr >= 0 )
+		nr = qnx4_block_map( inode, nr );
+	if (nr) {
+		result = sb_getblk(inode->i_sb, nr);
+		return result;
+	}
+	if (!create) {
+		return NULL;
+	}
+#if 0
+	tmp = qnx4_new_block(inode->i_sb);
+	if (!tmp) {
+		return NULL;
+	}
+	result = sb_getblk(inode->i_sb, tmp);
+	if (tst) {
+		qnx4_free_block(inode->i_sb, tmp);
+		brelse(result);
+		goto repeat;
+	}
+	tst = tmp;
+#endif
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return result;
+}
+
+struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
+{
+	struct buffer_head *bh;
+
+	bh = qnx4_getblk(inode, block, create);
+	if (!bh || buffer_uptodate(bh)) {
+		return bh;
+	}
+	ll_rw_block(READ, 1, &bh);
+	wait_on_buffer(bh);
+	if (buffer_uptodate(bh)) {
+		return bh;
+	}
+	brelse(bh);
+
+	return NULL;
+}
+
+static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
+{
+	unsigned long phys;
+
+	QNX4DEBUG(("qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
+
+	phys = qnx4_block_map( inode, iblock );
+	if ( phys ) {
+		// logical block is before EOF
+		map_bh(bh, inode->i_sb, phys);
+	} else if ( create ) {
+		// to be done.
+	}
+	return 0;
+}
+
+unsigned long qnx4_block_map( struct inode *inode, long iblock )
+{
+	int ix;
+	long offset, i_xblk;
+	unsigned long block = 0;
+	struct buffer_head *bh = NULL;
+	struct qnx4_xblk *xblk = NULL;
+	struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
+	qnx4_nxtnt_t nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts);
+
+	if ( iblock < le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size) ) {
+		// iblock is in the first extent. This is easy.
+		block = le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_blk) + iblock - 1;
+	} else {
+		// iblock is beyond first extent. We have to follow the extent chain.
+		i_xblk = le32_to_cpu(qnx4_inode->di_xblk);
+		offset = iblock - le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size);
+		ix = 0;
+		while ( --nxtnt > 0 ) {
+			if ( ix == 0 ) {
+				// read next xtnt block.
+				bh = sb_bread(inode->i_sb, i_xblk - 1);
+				if ( !bh ) {
+					QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
+					return -EIO;
+				}
+				xblk = (struct qnx4_xblk*)bh->b_data;
+				if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
+					QNX4DEBUG(("qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
+					return -EIO;
+				}
+			}
+			if ( offset < le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size) ) {
+				// got it!
+				block = le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_blk) + offset - 1;
+				break;
+			}
+			offset -= le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size);
+			if ( ++ix >= xblk->xblk_num_xtnts ) {
+				i_xblk = le32_to_cpu(xblk->xblk_next_xblk);
+				ix = 0;
+				brelse( bh );
+				bh = NULL;
+			}
+		}
+		if ( bh )
+			brelse( bh );
+	}
+
+	QNX4DEBUG(("qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
+	return block;
+}
+
+static int qnx4_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	lock_kernel();
+
+	buf->f_type    = sb->s_magic;
+	buf->f_bsize   = sb->s_blocksize;
+	buf->f_blocks  = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
+	buf->f_bfree   = qnx4_count_free_blocks(sb);
+	buf->f_bavail  = buf->f_bfree;
+	buf->f_namelen = QNX4_NAME_MAX;
+
+	unlock_kernel();
+
+	return 0;
+}
+
+/*
+ * Check the root directory of the filesystem to make sure
+ * it really _is_ a qnx4 filesystem, and to check the size
+ * of the directory entry.
+ */
+static const char *qnx4_checkroot(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct qnx4_inode_entry *rootdir;
+	int rd, rl;
+	int i, j;
+	int found = 0;
+
+	if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
+		return "no qnx4 filesystem (no root dir).";
+	} else {
+		QNX4DEBUG(("QNX4 filesystem found on dev %s.\n", sb->s_id));
+		rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
+		rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
+		for (j = 0; j < rl; j++) {
+			bh = sb_bread(sb, rd + j);	/* root dir, first block */
+			if (bh == NULL) {
+				return "unable to read root entry.";
+			}
+			for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
+				rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
+				if (rootdir->di_fname != NULL) {
+					QNX4DEBUG(("Rootdir entry found : [%s]\n", rootdir->di_fname));
+					if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) {
+						found = 1;
+						qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
+						if (!qnx4_sb(sb)->BitMap) {
+							brelse (bh);
+							return "not enough memory for bitmap inode";
+						}
+						memcpy( qnx4_sb(sb)->BitMap, rootdir, sizeof( struct qnx4_inode_entry ) );	/* keep bitmap inode known */
+						break;
+					}
+				}
+			}
+			brelse(bh);
+			if (found != 0) {
+				break;
+			}
+		}
+		if (found == 0) {
+			return "bitmap file not found.";
+		}
+	}
+	return NULL;
+}
+
+static int qnx4_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct buffer_head *bh;
+	struct inode *root;
+	const char *errmsg;
+	struct qnx4_sb_info *qs;
+
+	qs = kmalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
+	if (!qs)
+		return -ENOMEM;
+	s->s_fs_info = qs;
+	memset(qs, 0, sizeof(struct qnx4_sb_info));
+
+	sb_set_blocksize(s, QNX4_BLOCK_SIZE);
+
+	/* Check the superblock signature. Since the qnx4 code is
+	   dangerous, we should leave as quickly as possible
+	   if we don't belong here... */
+	bh = sb_bread(s, 1);
+	if (!bh) {
+		printk("qnx4: unable to read the superblock\n");
+		goto outnobh;
+	}
+	if ( le32_to_cpu( *(__u32*)bh->b_data ) != QNX4_SUPER_MAGIC ) {
+		if (!silent)
+			printk("qnx4: wrong fsid in superblock.\n");
+		goto out;
+	}
+	s->s_op = &qnx4_sops;
+	s->s_magic = QNX4_SUPER_MAGIC;
+#ifndef CONFIG_QNX4FS_RW
+	s->s_flags |= MS_RDONLY;	/* Yup, read-only yet */
+#endif
+	qnx4_sb(s)->sb_buf = bh;
+	qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data;
+
+
+ 	/* check before allocating dentries, inodes, .. */
+	errmsg = qnx4_checkroot(s);
+	if (errmsg != NULL) {
+ 		if (!silent)
+ 			printk("qnx4: %s\n", errmsg);
+		goto out;
+	}
+
+ 	/* does root not have inode number QNX4_ROOT_INO ?? */
+ 	root = iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
+ 	if (!root) {
+ 		printk("qnx4: get inode failed\n");
+ 		goto out;
+ 	}
+
+ 	s->s_root = d_alloc_root(root);
+ 	if (s->s_root == NULL)
+ 		goto outi;
+
+	brelse(bh);
+
+	return 0;
+
+      outi:
+	iput(root);
+      out:
+	brelse(bh);
+      outnobh:
+	kfree(qs);
+	s->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+static void qnx4_put_super(struct super_block *sb)
+{
+	struct qnx4_sb_info *qs = qnx4_sb(sb);
+	kfree( qs->BitMap );
+	kfree( qs );
+	sb->s_fs_info = NULL;
+	return;
+}
+
+static int qnx4_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page,qnx4_get_block, wbc);
+}
+static int qnx4_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,qnx4_get_block);
+}
+static int qnx4_prepare_write(struct file *file, struct page *page,
+			      unsigned from, unsigned to)
+{
+	struct qnx4_inode_info *qnx4_inode = qnx4_i(page->mapping->host);
+	return cont_prepare_write(page, from, to, qnx4_get_block,
+				  &qnx4_inode->mmu_private);
+}
+static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,qnx4_get_block);
+}
+static struct address_space_operations qnx4_aops = {
+	.readpage	= qnx4_readpage,
+	.writepage	= qnx4_writepage,
+	.sync_page	= block_sync_page,
+	.prepare_write	= qnx4_prepare_write,
+	.commit_write	= generic_commit_write,
+	.bmap		= qnx4_bmap
+};
+
+static void qnx4_read_inode(struct inode *inode)
+{
+	struct buffer_head *bh;
+	struct qnx4_inode_entry *raw_inode;
+	int block, ino;
+	struct super_block *sb = inode->i_sb;
+	struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
+
+	ino = inode->i_ino;
+	inode->i_mode = 0;
+
+	QNX4DEBUG(("Reading inode : [%d]\n", ino));
+	if (!ino) {
+		printk("qnx4: bad inode number on dev %s: %d is out of range\n",
+		       sb->s_id, ino);
+		return;
+	}
+	block = ino / QNX4_INODES_PER_BLOCK;
+
+	if (!(bh = sb_bread(sb, block))) {
+		printk("qnx4: major problem: unable to read inode from dev "
+		       "%s\n", sb->s_id);
+		return;
+	}
+	raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
+	    (ino % QNX4_INODES_PER_BLOCK);
+
+	inode->i_mode    = le16_to_cpu(raw_inode->di_mode);
+	inode->i_uid     = (uid_t)le16_to_cpu(raw_inode->di_uid);
+	inode->i_gid     = (gid_t)le16_to_cpu(raw_inode->di_gid);
+	inode->i_nlink   = le16_to_cpu(raw_inode->di_nlink);
+	inode->i_size    = le32_to_cpu(raw_inode->di_size);
+	inode->i_mtime.tv_sec   = le32_to_cpu(raw_inode->di_mtime);
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_sec   = le32_to_cpu(raw_inode->di_atime);
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_sec   = le32_to_cpu(raw_inode->di_ctime);
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks  = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
+	inode->i_blksize = QNX4_DIR_ENTRY_SIZE;
+
+	memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &qnx4_file_inode_operations;
+		inode->i_fop = &qnx4_file_operations;
+		inode->i_mapping->a_ops = &qnx4_aops;
+		qnx4_i(inode)->mmu_private = inode->i_size;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &qnx4_dir_inode_operations;
+		inode->i_fop = &qnx4_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_mapping->a_ops = &qnx4_aops;
+		qnx4_i(inode)->mmu_private = inode->i_size;
+	} else
+		printk("qnx4: bad inode %d on dev %s\n",ino,sb->s_id);
+	brelse(bh);
+}
+
+static kmem_cache_t *qnx4_inode_cachep;
+
+static struct inode *qnx4_alloc_inode(struct super_block *sb)
+{
+	struct qnx4_inode_info *ei;
+	ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void qnx4_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
+}
+
+static void init_once(void *foo, kmem_cache_t * cachep,
+		      unsigned long flags)
+{
+	struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+
+static int init_inodecache(void)
+{
+	qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache",
+					     sizeof(struct qnx4_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (qnx4_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(qnx4_inode_cachep))
+		printk(KERN_INFO
+		       "qnx4_inode_cache: not all structures were freed\n");
+}
+
+static struct super_block *qnx4_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
+}
+
+static struct file_system_type qnx4_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "qnx4",
+	.get_sb		= qnx4_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_qnx4_fs(void)
+{
+	int err;
+
+	err = init_inodecache();
+	if (err)
+		return err;
+
+	err = register_filesystem(&qnx4_fs_type);
+	if (err) {
+		destroy_inodecache();
+		return err;
+	}
+
+	printk("QNX4 filesystem 0.2.3 registered.\n");
+	return 0;
+}
+
+static void __exit exit_qnx4_fs(void)
+{
+	unregister_filesystem(&qnx4_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_qnx4_fs)
+module_exit(exit_qnx4_fs)
+MODULE_LICENSE("GPL");
+
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
new file mode 100644
index 0000000..4af4951
--- /dev/null
+++ b/fs/qnx4/namei.c
@@ -0,0 +1,249 @@
+/* 
+ * QNX4 file system, Linux implementation.
+ * 
+ * Version : 0.2.1
+ * 
+ * Using parts of the xiafs filesystem.
+ * 
+ * History :
+ * 
+ * 01-06-1998 by Richard Frowijn : first release.
+ * 21-06-1998 by Frank Denis : dcache support, fixed error codes.
+ * 04-07-1998 by Frank Denis : first step for rmdir/unlink.
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/errno.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+
+/*
+ * check if the filename is correct. For some obscure reason, qnx writes a
+ * new file twice in the directory entry, first with all possible options at 0
+ * and for a second time the way it is, they want us not to access the qnx
+ * filesystem when whe are using linux.
+ */
+static int qnx4_match(int len, const char *name,
+		      struct buffer_head *bh, unsigned long *offset)
+{
+	struct qnx4_inode_entry *de;
+	int namelen, thislen;
+
+	if (bh == NULL) {
+		printk("qnx4: matching unassigned buffer !\n");
+		return 0;
+	}
+	de = (struct qnx4_inode_entry *) (bh->b_data + *offset);
+	*offset += QNX4_DIR_ENTRY_SIZE;
+	if ((de->di_status & QNX4_FILE_LINK) != 0) {
+		namelen = QNX4_NAME_MAX;
+	} else {
+		namelen = QNX4_SHORT_NAME_MAX;
+	}
+	/* "" means "." ---> so paths like "/usr/lib//libc.a" work */
+	if (!len && (de->di_fname[0] == '.') && (de->di_fname[1] == '\0')) {
+		return 1;
+	}
+	thislen = strlen( de->di_fname );
+	if ( thislen > namelen )
+		thislen = namelen;
+	if (len != thislen) {
+		return 0;
+	}
+	if (strncmp(name, de->di_fname, len) == 0) {
+		if ((de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)) != 0) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static struct buffer_head *qnx4_find_entry(int len, struct inode *dir,
+	   const char *name, struct qnx4_inode_entry **res_dir, int *ino)
+{
+	unsigned long block, offset, blkofs;
+	struct buffer_head *bh;
+
+	*res_dir = NULL;
+	if (!dir->i_sb) {
+		printk("qnx4: no superblock on dir.\n");
+		return NULL;
+	}
+	bh = NULL;
+	block = offset = blkofs = 0;
+	while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) {
+		if (!bh) {
+			bh = qnx4_bread(dir, blkofs, 0);
+			if (!bh) {
+				blkofs++;
+				continue;
+			}
+		}
+		*res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset);
+		if (qnx4_match(len, name, bh, &offset)) {
+			block = qnx4_block_map( dir, blkofs );
+			*ino = block * QNX4_INODES_PER_BLOCK +
+			    (offset / QNX4_DIR_ENTRY_SIZE) - 1;
+			return bh;
+		}
+		if (offset < bh->b_size) {
+			continue;
+		}
+		brelse(bh);
+		bh = NULL;
+		offset = 0;
+		blkofs++;
+	}
+	brelse(bh);
+	*res_dir = NULL;
+	return NULL;
+}
+
+struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	int ino;
+	struct qnx4_inode_entry *de;
+	struct qnx4_link_info *lnk;
+	struct buffer_head *bh;
+	const char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	struct inode *foundinode = NULL;
+
+	lock_kernel();
+	if (!(bh = qnx4_find_entry(len, dir, name, &de, &ino)))
+		goto out;
+	/* The entry is linked, let's get the real info */
+	if ((de->di_status & QNX4_FILE_LINK) == QNX4_FILE_LINK) {
+		lnk = (struct qnx4_link_info *) de;
+		ino = (le32_to_cpu(lnk->dl_inode_blk) - 1) *
+                    QNX4_INODES_PER_BLOCK +
+		    lnk->dl_inode_ndx;
+	}
+	brelse(bh);
+
+	if ((foundinode = iget(dir->i_sb, ino)) == NULL) {
+		unlock_kernel();
+		QNX4DEBUG(("qnx4: lookup->iget -> NULL\n"));
+		return ERR_PTR(-EACCES);
+	}
+out:
+	unlock_kernel();
+	d_add(dentry, foundinode);
+
+	return NULL;
+}
+
+#ifdef CONFIG_QNX4FS_RW
+int qnx4_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	QNX4DEBUG(("qnx4: qnx4_create\n"));
+	if (dir == NULL) {
+		return -ENOENT;
+	}
+	return -ENOSPC;
+}
+
+int qnx4_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct buffer_head *bh;
+	struct qnx4_inode_entry *de;
+	struct inode *inode;
+	int retval;
+	int ino;
+
+	QNX4DEBUG(("qnx4: qnx4_rmdir [%s]\n", dentry->d_name.name));
+	lock_kernel();
+	bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name,
+			     &de, &ino);
+	if (bh == NULL) {
+		unlock_kernel();
+		return -ENOENT;
+	}
+	inode = dentry->d_inode;
+	if (inode->i_ino != ino) {
+		retval = -EIO;
+		goto end_rmdir;
+	}
+#if 0
+	if (!empty_dir(inode)) {
+		retval = -ENOTEMPTY;
+		goto end_rmdir;
+	}
+#endif
+	if (inode->i_nlink != 2) {
+		QNX4DEBUG(("empty directory has nlink!=2 (%d)\n", inode->i_nlink));
+	}
+	QNX4DEBUG(("qnx4: deleting directory\n"));
+	de->di_status = 0;
+	memset(de->di_fname, 0, sizeof de->di_fname);
+	de->di_mode = 0;
+	mark_buffer_dirty(bh);
+	inode->i_nlink = 0;
+	mark_inode_dirty(inode);
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	dir->i_nlink--;
+	mark_inode_dirty(dir);
+	retval = 0;
+
+      end_rmdir:
+	brelse(bh);
+
+	unlock_kernel();
+	return retval;
+}
+
+int qnx4_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct buffer_head *bh;
+	struct qnx4_inode_entry *de;
+	struct inode *inode;
+	int retval;
+	int ino;
+
+	QNX4DEBUG(("qnx4: qnx4_unlink [%s]\n", dentry->d_name.name));
+	lock_kernel();
+	bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name,
+			     &de, &ino);
+	if (bh == NULL) {
+		unlock_kernel();
+		return -ENOENT;
+	}
+	inode = dentry->d_inode;
+	if (inode->i_ino != ino) {
+		retval = -EIO;
+		goto end_unlink;
+	}
+	retval = -EPERM;
+	if (!inode->i_nlink) {
+		QNX4DEBUG(("Deleting nonexistent file (%s:%lu), %d\n",
+			   inode->i_sb->s_id,
+			   inode->i_ino, inode->i_nlink));
+		inode->i_nlink = 1;
+	}
+	de->di_status = 0;
+	memset(de->di_fname, 0, sizeof de->di_fname);
+	de->di_mode = 0;
+	mark_buffer_dirty(bh);
+	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+	inode->i_nlink--;
+	inode->i_ctime = dir->i_ctime;
+	mark_inode_dirty(inode);
+	retval = 0;
+
+end_unlink:
+	unlock_kernel();
+	brelse(bh);
+
+	return retval;
+}
+#endif
diff --git a/fs/qnx4/truncate.c b/fs/qnx4/truncate.c
new file mode 100644
index 0000000..86563ec
--- /dev/null
+++ b/fs/qnx4/truncate.c
@@ -0,0 +1,39 @@
+/* 
+ * QNX4 file system, Linux implementation.
+ * 
+ * Version : 0.1
+ * 
+ * Using parts of the xiafs filesystem.
+ * 
+ * History :
+ * 
+ * 30-06-1998 by Frank DENIS : ugly filler.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/qnx4_fs.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_QNX4FS_RW
+
+void qnx4_truncate(struct inode *inode)
+{
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	      S_ISLNK(inode->i_mode))) {
+		return;
+	}
+	lock_kernel();
+	if (!(S_ISDIR(inode->i_mode))) {
+		/* TODO */
+	}
+	QNX4DEBUG(("qnx4: qnx4_truncate called\n"));
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	unlock_kernel();
+}
+
+#endif
diff --git a/fs/quota.c b/fs/quota.c
new file mode 100644
index 0000000..3f0333a
--- /dev/null
+++ b/fs/quota.c
@@ -0,0 +1,382 @@
+/*
+ * Quota code necessary even when VFS quota support is not compiled
+ * into the kernel.  The interesting stuff is over in dquot.c, here
+ * we have symbols for initial quotactl(2) handling, the sysctl(2)
+ * variables, etc - things needed even when quota support disabled.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/buffer_head.h>
+
+/* Check validity of generic quotactl commands */
+static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+{
+	if (type >= MAXQUOTAS)
+		return -EINVAL;
+	if (!sb && cmd != Q_SYNC)
+		return -ENODEV;
+	/* Is operation supported? */
+	if (sb && !sb->s_qcop)
+		return -ENOSYS;
+
+	switch (cmd) {
+		case Q_GETFMT:
+			break;
+		case Q_QUOTAON:
+			if (!sb->s_qcop->quota_on)
+				return -ENOSYS;
+			break;
+		case Q_QUOTAOFF:
+			if (!sb->s_qcop->quota_off)
+				return -ENOSYS;
+			break;
+		case Q_SETINFO:
+			if (!sb->s_qcop->set_info)
+				return -ENOSYS;
+			break;
+		case Q_GETINFO:
+			if (!sb->s_qcop->get_info)
+				return -ENOSYS;
+			break;
+		case Q_SETQUOTA:
+			if (!sb->s_qcop->set_dqblk)
+				return -ENOSYS;
+			break;
+		case Q_GETQUOTA:
+			if (!sb->s_qcop->get_dqblk)
+				return -ENOSYS;
+			break;
+		case Q_SYNC:
+			if (sb && !sb->s_qcop->quota_sync)
+				return -ENOSYS;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Is quota turned on for commands which need it? */
+	switch (cmd) {
+		case Q_GETFMT:
+		case Q_GETINFO:
+		case Q_QUOTAOFF:
+		case Q_SETINFO:
+		case Q_SETQUOTA:
+		case Q_GETQUOTA:
+			/* This is just informative test so we are satisfied without a lock */
+			if (!sb_has_quota_enabled(sb, type))
+				return -ESRCH;
+	}
+
+	/* Check privileges */
+	if (cmd == Q_GETQUOTA) {
+		if (((type == USRQUOTA && current->euid != id) ||
+		     (type == GRPQUOTA && !in_egroup_p(id))) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+	}
+	else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+	return 0;
+}
+
+/* Check validity of XFS Quota Manager commands */
+static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+{
+	if (type >= XQM_MAXQUOTAS)
+		return -EINVAL;
+	if (!sb)
+		return -ENODEV;
+	if (!sb->s_qcop)
+		return -ENOSYS;
+
+	switch (cmd) {
+		case Q_XQUOTAON:
+		case Q_XQUOTAOFF:
+		case Q_XQUOTARM:
+			if (!sb->s_qcop->set_xstate)
+				return -ENOSYS;
+			break;
+		case Q_XGETQSTAT:
+			if (!sb->s_qcop->get_xstate)
+				return -ENOSYS;
+			break;
+		case Q_XSETQLIM:
+			if (!sb->s_qcop->set_xquota)
+				return -ENOSYS;
+			break;
+		case Q_XGETQUOTA:
+			if (!sb->s_qcop->get_xquota)
+				return -ENOSYS;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Check privileges */
+	if (cmd == Q_XGETQUOTA) {
+		if (((type == XQM_USRQUOTA && current->euid != id) ||
+		     (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
+		     !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+	} else if (cmd != Q_XGETQSTAT) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+	}
+
+	return 0;
+}
+
+static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+{
+	int error;
+
+	if (XQM_COMMAND(cmd))
+		error = xqm_quotactl_valid(sb, type, cmd, id);
+	else
+		error = generic_quotactl_valid(sb, type, cmd, id);
+	if (!error)
+		error = security_quotactl(cmd, type, id, sb);
+	return error;
+}
+
+static struct super_block *get_super_to_sync(int type)
+{
+	struct list_head *head;
+	int cnt, dirty;
+
+restart:
+	spin_lock(&sb_lock);
+	list_for_each(head, &super_blocks) {
+		struct super_block *sb = list_entry(head, struct super_block, s_list);
+
+		/* This test just improves performance so it needn't be reliable... */
+		for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++)
+			if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt)
+			    && info_any_dirty(&sb_dqopt(sb)->info[cnt]))
+				dirty = 1;
+		if (!dirty)
+			continue;
+		sb->s_count++;
+		spin_unlock(&sb_lock);
+		down_read(&sb->s_umount);
+		if (!sb->s_root) {
+			drop_super(sb);
+			goto restart;
+		}
+		return sb;
+	}
+	spin_unlock(&sb_lock);
+	return NULL;
+}
+
+static void quota_sync_sb(struct super_block *sb, int type)
+{
+	int cnt;
+	struct inode *discard[MAXQUOTAS];
+
+	sb->s_qcop->quota_sync(sb, type);
+	/* This is not very clever (and fast) but currently I don't know about
+	 * any other simple way of getting quota data to disk and we must get
+	 * them there for userspace to be visible... */
+	if (sb->s_op->sync_fs)
+		sb->s_op->sync_fs(sb, 1);
+	sync_blockdev(sb->s_bdev);
+
+	/* Now when everything is written we can discard the pagecache so
+	 * that userspace sees the changes. We need i_sem and so we could
+	 * not do it inside dqonoff_sem. Moreover we need to be carefull
+	 * about races with quotaoff() (that is the reason why we have own
+	 * reference to inode). */
+	down(&sb_dqopt(sb)->dqonoff_sem);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		discard[cnt] = NULL;
+		if (type != -1 && cnt != type)
+			continue;
+		if (!sb_has_quota_enabled(sb, cnt))
+			continue;
+		discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
+	}
+	up(&sb_dqopt(sb)->dqonoff_sem);
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (discard[cnt]) {
+			down(&discard[cnt]->i_sem);
+			truncate_inode_pages(&discard[cnt]->i_data, 0);
+			up(&discard[cnt]->i_sem);
+			iput(discard[cnt]);
+		}
+	}
+}
+
+void sync_dquots(struct super_block *sb, int type)
+{
+	if (sb) {
+		if (sb->s_qcop->quota_sync)
+			quota_sync_sb(sb, type);
+	}
+	else {
+		while ((sb = get_super_to_sync(type)) != NULL) {
+			if (sb->s_qcop->quota_sync)
+				quota_sync_sb(sb, type);
+			drop_super(sb);
+		}
+	}
+}
+
+/* Copy parameters and call proper function */
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
+{
+	int ret;
+
+	switch (cmd) {
+		case Q_QUOTAON: {
+			char *pathname;
+
+			if (IS_ERR(pathname = getname(addr)))
+				return PTR_ERR(pathname);
+			ret = sb->s_qcop->quota_on(sb, type, id, pathname);
+			putname(pathname);
+			return ret;
+		}
+		case Q_QUOTAOFF:
+			return sb->s_qcop->quota_off(sb, type);
+
+		case Q_GETFMT: {
+			__u32 fmt;
+
+			down_read(&sb_dqopt(sb)->dqptr_sem);
+			if (!sb_has_quota_enabled(sb, type)) {
+				up_read(&sb_dqopt(sb)->dqptr_sem);
+				return -ESRCH;
+			}
+			fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
+			up_read(&sb_dqopt(sb)->dqptr_sem);
+			if (copy_to_user(addr, &fmt, sizeof(fmt)))
+				return -EFAULT;
+			return 0;
+		}
+		case Q_GETINFO: {
+			struct if_dqinfo info;
+
+			if ((ret = sb->s_qcop->get_info(sb, type, &info)))
+				return ret;
+			if (copy_to_user(addr, &info, sizeof(info)))
+				return -EFAULT;
+			return 0;
+		}
+		case Q_SETINFO: {
+			struct if_dqinfo info;
+
+			if (copy_from_user(&info, addr, sizeof(info)))
+				return -EFAULT;
+			return sb->s_qcop->set_info(sb, type, &info);
+		}
+		case Q_GETQUOTA: {
+			struct if_dqblk idq;
+
+			if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
+				return ret;
+			if (copy_to_user(addr, &idq, sizeof(idq)))
+				return -EFAULT;
+			return 0;
+		}
+		case Q_SETQUOTA: {
+			struct if_dqblk idq;
+
+			if (copy_from_user(&idq, addr, sizeof(idq)))
+				return -EFAULT;
+			return sb->s_qcop->set_dqblk(sb, type, id, &idq);
+		}
+		case Q_SYNC:
+			sync_dquots(sb, type);
+			return 0;
+
+		case Q_XQUOTAON:
+		case Q_XQUOTAOFF:
+		case Q_XQUOTARM: {
+			__u32 flags;
+
+			if (copy_from_user(&flags, addr, sizeof(flags)))
+				return -EFAULT;
+			return sb->s_qcop->set_xstate(sb, flags, cmd);
+		}
+		case Q_XGETQSTAT: {
+			struct fs_quota_stat fqs;
+		
+			if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
+				return ret;
+			if (copy_to_user(addr, &fqs, sizeof(fqs)))
+				return -EFAULT;
+			return 0;
+		}
+		case Q_XSETQLIM: {
+			struct fs_disk_quota fdq;
+
+			if (copy_from_user(&fdq, addr, sizeof(fdq)))
+				return -EFAULT;
+		       return sb->s_qcop->set_xquota(sb, type, id, &fdq);
+		}
+		case Q_XGETQUOTA: {
+			struct fs_disk_quota fdq;
+
+			if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
+				return ret;
+			if (copy_to_user(addr, &fdq, sizeof(fdq)))
+				return -EFAULT;
+			return 0;
+		}
+		/* We never reach here unless validity check is broken */
+		default:
+			BUG();
+	}
+	return 0;
+}
+
+/*
+ * This is the system call interface. This communicates with
+ * the user-level programs. Currently this only supports diskquota
+ * calls. Maybe we need to add the process quotas etc. in the future,
+ * but we probably should use rlimits for that.
+ */
+asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr)
+{
+	uint cmds, type;
+	struct super_block *sb = NULL;
+	struct block_device *bdev;
+	char *tmp;
+	int ret;
+
+	cmds = cmd >> SUBCMDSHIFT;
+	type = cmd & SUBCMDMASK;
+
+	if (cmds != Q_SYNC || special) {
+		tmp = getname(special);
+		if (IS_ERR(tmp))
+			return PTR_ERR(tmp);
+		bdev = lookup_bdev(tmp);
+		putname(tmp);
+		if (IS_ERR(bdev))
+			return PTR_ERR(bdev);
+		sb = get_super(bdev);
+		bdput(bdev);
+		if (!sb)
+			return -ENODEV;
+	}
+
+	ret = check_quotactl_valid(sb, type, cmds, id);
+	if (ret >= 0)
+		ret = do_quotactl(sb, type, cmds, id, addr);
+	if (sb)
+		drop_super(sb);
+
+	return ret;
+}
diff --git a/fs/quota_v1.c b/fs/quota_v1.c
new file mode 100644
index 0000000..f3841f2
--- /dev/null
+++ b/fs/quota_v1.c
@@ -0,0 +1,200 @@
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/dqblk_v1.h>
+#include <linux/quotaio_v1.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/byteorder.h>
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Old quota format support");
+MODULE_LICENSE("GPL");
+
+static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
+{
+	m->dqb_ihardlimit = d->dqb_ihardlimit;
+	m->dqb_isoftlimit = d->dqb_isoftlimit;
+	m->dqb_curinodes = d->dqb_curinodes;
+	m->dqb_bhardlimit = d->dqb_bhardlimit;
+	m->dqb_bsoftlimit = d->dqb_bsoftlimit;
+	m->dqb_curspace = ((qsize_t)d->dqb_curblocks) << QUOTABLOCK_BITS;
+	m->dqb_itime = d->dqb_itime;
+	m->dqb_btime = d->dqb_btime;
+}
+
+static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
+{
+	d->dqb_ihardlimit = m->dqb_ihardlimit;
+	d->dqb_isoftlimit = m->dqb_isoftlimit;
+	d->dqb_curinodes = m->dqb_curinodes;
+	d->dqb_bhardlimit = m->dqb_bhardlimit;
+	d->dqb_bsoftlimit = m->dqb_bsoftlimit;
+	d->dqb_curblocks = toqb(m->dqb_curspace);
+	d->dqb_itime = m->dqb_itime;
+	d->dqb_btime = m->dqb_btime;
+}
+
+static int v1_read_dqblk(struct dquot *dquot)
+{
+	int type = dquot->dq_type;
+	struct v1_disk_dqblk dqblk;
+
+	if (!sb_dqopt(dquot->dq_sb)->files[type])
+		return -EINVAL;
+
+	/* Set structure to 0s in case read fails/is after end of file */
+	memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
+	dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+
+	v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
+	if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+	    dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
+		set_bit(DQ_FAKE_B, &dquot->dq_flags);
+	dqstats.reads++;
+
+	return 0;
+}
+
+static int v1_commit_dqblk(struct dquot *dquot)
+{
+	short type = dquot->dq_type;
+	ssize_t ret;
+	struct v1_disk_dqblk dqblk;
+
+	v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
+	if (dquot->dq_id == 0) {
+		dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+		dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+	}
+	ret = 0;
+	if (sb_dqopt(dquot->dq_sb)->files[type])
+		ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
+					sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+	if (ret != sizeof(struct v1_disk_dqblk)) {
+		printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
+			dquot->dq_sb->s_id);
+		if (ret >= 0)
+			ret = -EIO;
+		goto out;
+	}
+	ret = 0;
+
+out:
+	dqstats.writes++;
+
+	return ret;
+}
+
+/* Magics of new quota format */
+#define V2_INITQMAGICS {\
+	0xd9c01f11,     /* USRQUOTA */\
+	0xd9c01927      /* GRPQUOTA */\
+}
+
+/* Header of new quota format */
+struct v2_disk_dqheader {
+	__le32 dqh_magic;        /* Magic number identifying file */
+	__le32 dqh_version;      /* File version */
+};
+
+static int v1_check_quota_file(struct super_block *sb, int type)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	ulong blocks;
+	size_t off; 
+	struct v2_disk_dqheader dqhead;
+	ssize_t size;
+	loff_t isize;
+	static const uint quota_magics[] = V2_INITQMAGICS;
+
+	isize = i_size_read(inode);
+	if (!isize)
+		return 0;
+	blocks = isize >> BLOCK_SIZE_BITS;
+	off = isize & (BLOCK_SIZE - 1);
+	if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
+		return 0;
+	/* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
+	size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+	if (size != sizeof(struct v2_disk_dqheader))
+		return 1;	/* Probably not new format */
+	if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
+		return 1;	/* Definitely not new format */
+	printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
+        return 0;		/* Seems like a new format file -> refuse it */
+}
+
+static int v1_read_file_info(struct super_block *sb, int type)
+{
+	struct quota_info *dqopt = sb_dqopt(sb);
+	struct v1_disk_dqblk dqblk;
+	int ret;
+
+	if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+		if (ret >= 0)
+			ret = -EIO;
+		goto out;
+	}
+	ret = 0;
+	dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+	dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+out:
+	return ret;
+}
+
+static int v1_write_file_info(struct super_block *sb, int type)
+{
+	struct quota_info *dqopt = sb_dqopt(sb);
+	struct v1_disk_dqblk dqblk;
+	int ret;
+
+	dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
+	if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+	    sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+		if (ret >= 0)
+			ret = -EIO;
+		goto out;
+	}
+	dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
+	dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
+	ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
+	      sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+	if (ret == sizeof(struct v1_disk_dqblk))
+		ret = 0;
+	else if (ret > 0)
+		ret = -EIO;
+out:
+	return ret;
+}
+
+static struct quota_format_ops v1_format_ops = {
+	.check_quota_file	= v1_check_quota_file,
+	.read_file_info		= v1_read_file_info,
+	.write_file_info	= v1_write_file_info,
+	.free_file_info		= NULL,
+	.read_dqblk		= v1_read_dqblk,
+	.commit_dqblk		= v1_commit_dqblk,
+};
+
+static struct quota_format_type v1_quota_format = {
+	.qf_fmt_id	= QFMT_VFS_OLD,
+	.qf_ops		= &v1_format_ops,
+	.qf_owner	= THIS_MODULE
+};
+
+static int __init init_v1_quota_format(void)
+{
+        return register_quota_format(&v1_quota_format);
+}
+
+static void __exit exit_v1_quota_format(void)
+{
+        unregister_quota_format(&v1_quota_format);
+}
+
+module_init(init_v1_quota_format);
+module_exit(exit_v1_quota_format);
+
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
new file mode 100644
index 0000000..19bdb7b
--- /dev/null
+++ b/fs/quota_v2.c
@@ -0,0 +1,693 @@
+/*
+ *	vfsv0 quota IO operations on file
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/dqblk_v2.h>
+#include <linux/quotaio_v2.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Quota format v2 support");
+MODULE_LICENSE("GPL");
+
+#define __QUOTA_V2_PARANOIA
+
+typedef char *dqbuf_t;
+
+#define GETIDINDEX(id, depth) (((id) >> ((V2_DQTREEDEPTH-(depth)-1)*8)) & 0xff)
+#define GETENTRIES(buf) ((struct v2_disk_dqblk *)(((char *)buf)+sizeof(struct v2_disk_dqdbheader)))
+
+/* Check whether given file is really vfsv0 quotafile */
+static int v2_check_quota_file(struct super_block *sb, int type)
+{
+	struct v2_disk_dqheader dqhead;
+	ssize_t size;
+	static const uint quota_magics[] = V2_INITQMAGICS;
+	static const uint quota_versions[] = V2_INITQVERSIONS;
+ 
+	size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+	if (size != sizeof(struct v2_disk_dqheader)) {
+		printk("failed read\n");
+		return 0;
+	}
+	if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
+	    le32_to_cpu(dqhead.dqh_version) != quota_versions[type])
+		return 0;
+	return 1;
+}
+
+/* Read information header from quota file */
+static int v2_read_file_info(struct super_block *sb, int type)
+{
+	struct v2_disk_dqinfo dinfo;
+	struct mem_dqinfo *info = sb_dqopt(sb)->info+type;
+	ssize_t size;
+
+	size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
+	       sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+	if (size != sizeof(struct v2_disk_dqinfo)) {
+		printk(KERN_WARNING "Can't read info structure on device %s.\n",
+			sb->s_id);
+		return -1;
+	}
+	info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
+	info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
+	info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
+	info->u.v2_i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
+	info->u.v2_i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
+	info->u.v2_i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
+	return 0;
+}
+
+/* Write information header to quota file */
+static int v2_write_file_info(struct super_block *sb, int type)
+{
+	struct v2_disk_dqinfo dinfo;
+	struct mem_dqinfo *info = sb_dqopt(sb)->info+type;
+	ssize_t size;
+
+	spin_lock(&dq_data_lock);
+	info->dqi_flags &= ~DQF_INFO_DIRTY;
+	dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
+	dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
+	dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+	spin_unlock(&dq_data_lock);
+	dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.dqi_blocks);
+	dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.dqi_free_blk);
+	dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.dqi_free_entry);
+	size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
+	       sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+	if (size != sizeof(struct v2_disk_dqinfo)) {
+		printk(KERN_WARNING "Can't write info structure on device %s.\n",
+			sb->s_id);
+		return -1;
+	}
+	return 0;
+}
+
+static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d)
+{
+	m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
+	m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
+	m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
+	m->dqb_itime = le64_to_cpu(d->dqb_itime);
+	m->dqb_bhardlimit = le32_to_cpu(d->dqb_bhardlimit);
+	m->dqb_bsoftlimit = le32_to_cpu(d->dqb_bsoftlimit);
+	m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
+	m->dqb_btime = le64_to_cpu(d->dqb_btime);
+}
+
+static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id)
+{
+	d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
+	d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
+	d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
+	d->dqb_itime = cpu_to_le64(m->dqb_itime);
+	d->dqb_bhardlimit = cpu_to_le32(m->dqb_bhardlimit);
+	d->dqb_bsoftlimit = cpu_to_le32(m->dqb_bsoftlimit);
+	d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+	d->dqb_btime = cpu_to_le64(m->dqb_btime);
+	d->dqb_id = cpu_to_le32(id);
+}
+
+static dqbuf_t getdqbuf(void)
+{
+	dqbuf_t buf = kmalloc(V2_DQBLKSIZE, GFP_NOFS);
+	if (!buf)
+		printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
+	return buf;
+}
+
+static inline void freedqbuf(dqbuf_t buf)
+{
+	kfree(buf);
+}
+
+static inline ssize_t read_blk(struct super_block *sb, int type, uint blk, dqbuf_t buf)
+{
+	memset(buf, 0, V2_DQBLKSIZE);
+	return sb->s_op->quota_read(sb, type, (char *)buf,
+	       V2_DQBLKSIZE, blk << V2_DQBLKSIZE_BITS);
+}
+
+static inline ssize_t write_blk(struct super_block *sb, int type, uint blk, dqbuf_t buf)
+{
+	return sb->s_op->quota_write(sb, type, (char *)buf,
+	       V2_DQBLKSIZE, blk << V2_DQBLKSIZE_BITS);
+}
+
+/* Remove empty block from list and return it */
+static int get_free_dqblk(struct super_block *sb, int type)
+{
+	dqbuf_t buf = getdqbuf();
+	struct mem_dqinfo *info = sb_dqinfo(sb, type);
+	struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf;
+	int ret, blk;
+
+	if (!buf)
+		return -ENOMEM;
+	if (info->u.v2_i.dqi_free_blk) {
+		blk = info->u.v2_i.dqi_free_blk;
+		if ((ret = read_blk(sb, type, blk, buf)) < 0)
+			goto out_buf;
+		info->u.v2_i.dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
+	}
+	else {
+		memset(buf, 0, V2_DQBLKSIZE);
+		/* Assure block allocation... */
+		if ((ret = write_blk(sb, type, info->u.v2_i.dqi_blocks, buf)) < 0)
+			goto out_buf;
+		blk = info->u.v2_i.dqi_blocks++;
+	}
+	mark_info_dirty(sb, type);
+	ret = blk;
+out_buf:
+	freedqbuf(buf);
+	return ret;
+}
+
+/* Insert empty block to the list */
+static int put_free_dqblk(struct super_block *sb, int type, dqbuf_t buf, uint blk)
+{
+	struct mem_dqinfo *info = sb_dqinfo(sb, type);
+	struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf;
+	int err;
+
+	dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_blk);
+	dh->dqdh_prev_free = cpu_to_le32(0);
+	dh->dqdh_entries = cpu_to_le16(0);
+	info->u.v2_i.dqi_free_blk = blk;
+	mark_info_dirty(sb, type);
+	/* Some strange block. We had better leave it... */
+	if ((err = write_blk(sb, type, blk, buf)) < 0)
+		return err;
+	return 0;
+}
+
+/* Remove given block from the list of blocks with free entries */
+static int remove_free_dqentry(struct super_block *sb, int type, dqbuf_t buf, uint blk)
+{
+	dqbuf_t tmpbuf = getdqbuf();
+	struct mem_dqinfo *info = sb_dqinfo(sb, type);
+	struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf;
+	uint nextblk = le32_to_cpu(dh->dqdh_next_free), prevblk = le32_to_cpu(dh->dqdh_prev_free);
+	int err;
+
+	if (!tmpbuf)
+		return -ENOMEM;
+	if (nextblk) {
+		if ((err = read_blk(sb, type, nextblk, tmpbuf)) < 0)
+			goto out_buf;
+		((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = dh->dqdh_prev_free;
+		if ((err = write_blk(sb, type, nextblk, tmpbuf)) < 0)
+			goto out_buf;
+	}
+	if (prevblk) {
+		if ((err = read_blk(sb, type, prevblk, tmpbuf)) < 0)
+			goto out_buf;
+		((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_next_free = dh->dqdh_next_free;
+		if ((err = write_blk(sb, type, prevblk, tmpbuf)) < 0)
+			goto out_buf;
+	}
+	else {
+		info->u.v2_i.dqi_free_entry = nextblk;
+		mark_info_dirty(sb, type);
+	}
+	freedqbuf(tmpbuf);
+	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
+	/* No matter whether write succeeds block is out of list */
+	if (write_blk(sb, type, blk, buf) < 0)
+		printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
+	return 0;
+out_buf:
+	freedqbuf(tmpbuf);
+	return err;
+}
+
+/* Insert given block to the beginning of list with free entries */
+static int insert_free_dqentry(struct super_block *sb, int type, dqbuf_t buf, uint blk)
+{
+	dqbuf_t tmpbuf = getdqbuf();
+	struct mem_dqinfo *info = sb_dqinfo(sb, type);
+	struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf;
+	int err;
+
+	if (!tmpbuf)
+		return -ENOMEM;
+	dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_entry);
+	dh->dqdh_prev_free = cpu_to_le32(0);
+	if ((err = write_blk(sb, type, blk, buf)) < 0)
+		goto out_buf;
+	if (info->u.v2_i.dqi_free_entry) {
+		if ((err = read_blk(sb, type, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0)
+			goto out_buf;
+		((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = cpu_to_le32(blk);
+		if ((err = write_blk(sb, type, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0)
+			goto out_buf;
+	}
+	freedqbuf(tmpbuf);
+	info->u.v2_i.dqi_free_entry = blk;
+	mark_info_dirty(sb, type);
+	return 0;
+out_buf:
+	freedqbuf(tmpbuf);
+	return err;
+}
+
+/* Find space for dquot */
+static uint find_free_dqentry(struct dquot *dquot, int *err)
+{
+	struct super_block *sb = dquot->dq_sb;
+	struct mem_dqinfo *info = sb_dqopt(sb)->info+dquot->dq_type;
+	uint blk, i;
+	struct v2_disk_dqdbheader *dh;
+	struct v2_disk_dqblk *ddquot;
+	struct v2_disk_dqblk fakedquot;
+	dqbuf_t buf;
+
+	*err = 0;
+	if (!(buf = getdqbuf())) {
+		*err = -ENOMEM;
+		return 0;
+	}
+	dh = (struct v2_disk_dqdbheader *)buf;
+	ddquot = GETENTRIES(buf);
+	if (info->u.v2_i.dqi_free_entry) {
+		blk = info->u.v2_i.dqi_free_entry;
+		if ((*err = read_blk(sb, dquot->dq_type, blk, buf)) < 0)
+			goto out_buf;
+	}
+	else {
+		blk = get_free_dqblk(sb, dquot->dq_type);
+		if ((int)blk < 0) {
+			*err = blk;
+			freedqbuf(buf);
+			return 0;
+		}
+		memset(buf, 0, V2_DQBLKSIZE);
+		/* This is enough as block is already zeroed and entry list is empty... */
+		info->u.v2_i.dqi_free_entry = blk;
+		mark_info_dirty(sb, dquot->dq_type);
+	}
+	if (le16_to_cpu(dh->dqdh_entries)+1 >= V2_DQSTRINBLK)	/* Block will be full? */
+		if ((*err = remove_free_dqentry(sb, dquot->dq_type, buf, blk)) < 0) {
+			printk(KERN_ERR "VFS: find_free_dqentry(): Can't remove block (%u) from entry free list.\n", blk);
+			goto out_buf;
+		}
+	dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)+1);
+	memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk));
+	/* Find free structure in block */
+	for (i = 0; i < V2_DQSTRINBLK && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)); i++);
+#ifdef __QUOTA_V2_PARANOIA
+	if (i == V2_DQSTRINBLK) {
+		printk(KERN_ERR "VFS: find_free_dqentry(): Data block full but it shouldn't.\n");
+		*err = -EIO;
+		goto out_buf;
+	}
+#endif
+	if ((*err = write_blk(sb, dquot->dq_type, blk, buf)) < 0) {
+		printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota data block %u.\n", blk);
+		goto out_buf;
+	}
+	dquot->dq_off = (blk<<V2_DQBLKSIZE_BITS)+sizeof(struct v2_disk_dqdbheader)+i*sizeof(struct v2_disk_dqblk);
+	freedqbuf(buf);
+	return blk;
+out_buf:
+	freedqbuf(buf);
+	return 0;
+}
+
+/* Insert reference to structure into the trie */
+static int do_insert_tree(struct dquot *dquot, uint *treeblk, int depth)
+{
+	struct super_block *sb = dquot->dq_sb;
+	dqbuf_t buf;
+	int ret = 0, newson = 0, newact = 0;
+	__le32 *ref;
+	uint newblk;
+
+	if (!(buf = getdqbuf()))
+		return -ENOMEM;
+	if (!*treeblk) {
+		ret = get_free_dqblk(sb, dquot->dq_type);
+		if (ret < 0)
+			goto out_buf;
+		*treeblk = ret;
+		memset(buf, 0, V2_DQBLKSIZE);
+		newact = 1;
+	}
+	else {
+		if ((ret = read_blk(sb, dquot->dq_type, *treeblk, buf)) < 0) {
+			printk(KERN_ERR "VFS: Can't read tree quota block %u.\n", *treeblk);
+			goto out_buf;
+		}
+	}
+	ref = (__le32 *)buf;
+	newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]);
+	if (!newblk)
+		newson = 1;
+	if (depth == V2_DQTREEDEPTH-1) {
+#ifdef __QUOTA_V2_PARANOIA
+		if (newblk) {
+			printk(KERN_ERR "VFS: Inserting already present quota entry (block %u).\n", le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]));
+			ret = -EIO;
+			goto out_buf;
+		}
+#endif
+		newblk = find_free_dqentry(dquot, &ret);
+	}
+	else
+		ret = do_insert_tree(dquot, &newblk, depth+1);
+	if (newson && ret >= 0) {
+		ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(newblk);
+		ret = write_blk(sb, dquot->dq_type, *treeblk, buf);
+	}
+	else if (newact && ret < 0)
+		put_free_dqblk(sb, dquot->dq_type, buf, *treeblk);
+out_buf:
+	freedqbuf(buf);
+	return ret;
+}
+
+/* Wrapper for inserting quota structure into tree */
+static inline int dq_insert_tree(struct dquot *dquot)
+{
+	int tmp = V2_DQTREEOFF;
+	return do_insert_tree(dquot, &tmp, 0);
+}
+
+/*
+ *	We don't have to be afraid of deadlocks as we never have quotas on quota files...
+ */
+static int v2_write_dquot(struct dquot *dquot)
+{
+	int type = dquot->dq_type;
+	ssize_t ret;
+	struct v2_disk_dqblk ddquot, empty;
+
+	/* dq_off is guarded by dqio_sem */
+	if (!dquot->dq_off)
+		if ((ret = dq_insert_tree(dquot)) < 0) {
+			printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret);
+			return ret;
+		}
+	spin_lock(&dq_data_lock);
+	mem2diskdqb(&ddquot, &dquot->dq_dqb, dquot->dq_id);
+	/* Argh... We may need to write structure full of zeroes but that would be
+	 * treated as an empty place by the rest of the code. Format change would
+	 * be definitely cleaner but the problems probably are not worth it */
+	memset(&empty, 0, sizeof(struct v2_disk_dqblk));
+	if (!memcmp(&empty, &ddquot, sizeof(struct v2_disk_dqblk)))
+		ddquot.dqb_itime = cpu_to_le64(1);
+	spin_unlock(&dq_data_lock);
+	ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+	      (char *)&ddquot, sizeof(struct v2_disk_dqblk), dquot->dq_off);
+	if (ret != sizeof(struct v2_disk_dqblk)) {
+		printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", dquot->dq_sb->s_id);
+		if (ret >= 0)
+			ret = -ENOSPC;
+	}
+	else
+		ret = 0;
+	dqstats.writes++;
+
+	return ret;
+}
+
+/* Free dquot entry in data block */
+static int free_dqentry(struct dquot *dquot, uint blk)
+{
+	struct super_block *sb = dquot->dq_sb;
+	int type = dquot->dq_type;
+	struct v2_disk_dqdbheader *dh;
+	dqbuf_t buf = getdqbuf();
+	int ret = 0;
+
+	if (!buf)
+		return -ENOMEM;
+	if (dquot->dq_off >> V2_DQBLKSIZE_BITS != blk) {
+		printk(KERN_ERR "VFS: Quota structure has offset to other "
+		  "block (%u) than it should (%u).\n", blk,
+		  (uint)(dquot->dq_off >> V2_DQBLKSIZE_BITS));
+		goto out_buf;
+	}
+	if ((ret = read_blk(sb, type, blk, buf)) < 0) {
+		printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
+		goto out_buf;
+	}
+	dh = (struct v2_disk_dqdbheader *)buf;
+	dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)-1);
+	if (!le16_to_cpu(dh->dqdh_entries)) {	/* Block got free? */
+		if ((ret = remove_free_dqentry(sb, type, buf, blk)) < 0 ||
+		    (ret = put_free_dqblk(sb, type, buf, blk)) < 0) {
+			printk(KERN_ERR "VFS: Can't move quota data block (%u) "
+			  "to free list.\n", blk);
+			goto out_buf;
+		}
+	}
+	else {
+		memset(buf+(dquot->dq_off & ((1 << V2_DQBLKSIZE_BITS)-1)), 0,
+		  sizeof(struct v2_disk_dqblk));
+		if (le16_to_cpu(dh->dqdh_entries) == V2_DQSTRINBLK-1) {
+			/* Insert will write block itself */
+			if ((ret = insert_free_dqentry(sb, type, buf, blk)) < 0) {
+				printk(KERN_ERR "VFS: Can't insert quota data block (%u) to free entry list.\n", blk);
+				goto out_buf;
+			}
+		}
+		else
+			if ((ret = write_blk(sb, type, blk, buf)) < 0) {
+				printk(KERN_ERR "VFS: Can't write quota data "
+				  "block %u\n", blk);
+				goto out_buf;
+			}
+	}
+	dquot->dq_off = 0;	/* Quota is now unattached */
+out_buf:
+	freedqbuf(buf);
+	return ret;
+}
+
+/* Remove reference to dquot from tree */
+static int remove_tree(struct dquot *dquot, uint *blk, int depth)
+{
+	struct super_block *sb = dquot->dq_sb;
+	int type = dquot->dq_type;
+	dqbuf_t buf = getdqbuf();
+	int ret = 0;
+	uint newblk;
+	__le32 *ref = (__le32 *)buf;
+	
+	if (!buf)
+		return -ENOMEM;
+	if ((ret = read_blk(sb, type, *blk, buf)) < 0) {
+		printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
+		goto out_buf;
+	}
+	newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]);
+	if (depth == V2_DQTREEDEPTH-1) {
+		ret = free_dqentry(dquot, newblk);
+		newblk = 0;
+	}
+	else
+		ret = remove_tree(dquot, &newblk, depth+1);
+	if (ret >= 0 && !newblk) {
+		int i;
+		ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(0);
+		for (i = 0; i < V2_DQBLKSIZE && !buf[i]; i++);	/* Block got empty? */
+		if (i == V2_DQBLKSIZE) {
+			put_free_dqblk(sb, type, buf, *blk);
+			*blk = 0;
+		}
+		else
+			if ((ret = write_blk(sb, type, *blk, buf)) < 0)
+				printk(KERN_ERR "VFS: Can't write quota tree "
+				  "block %u.\n", *blk);
+	}
+out_buf:
+	freedqbuf(buf);
+	return ret;	
+}
+
+/* Delete dquot from tree */
+static int v2_delete_dquot(struct dquot *dquot)
+{
+	uint tmp = V2_DQTREEOFF;
+
+	if (!dquot->dq_off)	/* Even not allocated? */
+		return 0;
+	return remove_tree(dquot, &tmp, 0);
+}
+
+/* Find entry in block */
+static loff_t find_block_dqentry(struct dquot *dquot, uint blk)
+{
+	dqbuf_t buf = getdqbuf();
+	loff_t ret = 0;
+	int i;
+	struct v2_disk_dqblk *ddquot = GETENTRIES(buf);
+
+	if (!buf)
+		return -ENOMEM;
+	if ((ret = read_blk(dquot->dq_sb, dquot->dq_type, blk, buf)) < 0) {
+		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+		goto out_buf;
+	}
+	if (dquot->dq_id)
+		for (i = 0; i < V2_DQSTRINBLK &&
+		     le32_to_cpu(ddquot[i].dqb_id) != dquot->dq_id; i++);
+	else {	/* ID 0 as a bit more complicated searching... */
+		struct v2_disk_dqblk fakedquot;
+
+		memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk));
+		for (i = 0; i < V2_DQSTRINBLK; i++)
+			if (!le32_to_cpu(ddquot[i].dqb_id) &&
+			    memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)))
+				break;
+	}
+	if (i == V2_DQSTRINBLK) {
+		printk(KERN_ERR "VFS: Quota for id %u referenced "
+		  "but not present.\n", dquot->dq_id);
+		ret = -EIO;
+		goto out_buf;
+	}
+	else
+		ret = (blk << V2_DQBLKSIZE_BITS) + sizeof(struct
+		  v2_disk_dqdbheader) + i * sizeof(struct v2_disk_dqblk);
+out_buf:
+	freedqbuf(buf);
+	return ret;
+}
+
+/* Find entry for given id in the tree */
+static loff_t find_tree_dqentry(struct dquot *dquot, uint blk, int depth)
+{
+	dqbuf_t buf = getdqbuf();
+	loff_t ret = 0;
+	__le32 *ref = (__le32 *)buf;
+
+	if (!buf)
+		return -ENOMEM;
+	if ((ret = read_blk(dquot->dq_sb, dquot->dq_type, blk, buf)) < 0) {
+		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+		goto out_buf;
+	}
+	ret = 0;
+	blk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]);
+	if (!blk)	/* No reference? */
+		goto out_buf;
+	if (depth < V2_DQTREEDEPTH-1)
+		ret = find_tree_dqentry(dquot, blk, depth+1);
+	else
+		ret = find_block_dqentry(dquot, blk);
+out_buf:
+	freedqbuf(buf);
+	return ret;
+}
+
+/* Find entry for given id in the tree - wrapper function */
+static inline loff_t find_dqentry(struct dquot *dquot)
+{
+	return find_tree_dqentry(dquot, V2_DQTREEOFF, 0);
+}
+
+static int v2_read_dquot(struct dquot *dquot)
+{
+	int type = dquot->dq_type;
+	loff_t offset;
+	struct v2_disk_dqblk ddquot, empty;
+	int ret = 0;
+
+#ifdef __QUOTA_V2_PARANOIA
+	/* Invalidated quota? */
+	if (!dquot->dq_sb || !sb_dqopt(dquot->dq_sb)->files[type]) {
+		printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
+		return -EIO;
+	}
+#endif
+	offset = find_dqentry(dquot);
+	if (offset <= 0) {	/* Entry not present? */
+		if (offset < 0)
+			printk(KERN_ERR "VFS: Can't read quota "
+			  "structure for id %u.\n", dquot->dq_id);
+		dquot->dq_off = 0;
+		set_bit(DQ_FAKE_B, &dquot->dq_flags);
+		memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
+		ret = offset;
+	}
+	else {
+		dquot->dq_off = offset;
+		if ((ret = dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type,
+		    (char *)&ddquot, sizeof(struct v2_disk_dqblk), offset))
+		    != sizeof(struct v2_disk_dqblk)) {
+			if (ret >= 0)
+				ret = -EIO;
+			printk(KERN_ERR "VFS: Error while reading quota "
+			  "structure for id %u.\n", dquot->dq_id);
+			memset(&ddquot, 0, sizeof(struct v2_disk_dqblk));
+		}
+		else {
+			ret = 0;
+			/* We need to escape back all-zero structure */
+			memset(&empty, 0, sizeof(struct v2_disk_dqblk));
+			empty.dqb_itime = cpu_to_le64(1);
+			if (!memcmp(&empty, &ddquot, sizeof(struct v2_disk_dqblk)))
+				ddquot.dqb_itime = 0;
+		}
+		disk2memdqb(&dquot->dq_dqb, &ddquot);
+		if (!dquot->dq_dqb.dqb_bhardlimit &&
+			!dquot->dq_dqb.dqb_bsoftlimit &&
+			!dquot->dq_dqb.dqb_ihardlimit &&
+			!dquot->dq_dqb.dqb_isoftlimit)
+			set_bit(DQ_FAKE_B, &dquot->dq_flags);
+	}
+	dqstats.reads++;
+
+	return ret;
+}
+
+/* Check whether dquot should not be deleted. We know we are
+ * the only one operating on dquot (thanks to dq_lock) */
+static int v2_release_dquot(struct dquot *dquot)
+{
+	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+		return v2_delete_dquot(dquot);
+	return 0;
+}
+
+static struct quota_format_ops v2_format_ops = {
+	.check_quota_file	= v2_check_quota_file,
+	.read_file_info		= v2_read_file_info,
+	.write_file_info	= v2_write_file_info,
+	.free_file_info		= NULL,
+	.read_dqblk		= v2_read_dquot,
+	.commit_dqblk		= v2_write_dquot,
+	.release_dqblk		= v2_release_dquot,
+};
+
+static struct quota_format_type v2_quota_format = {
+	.qf_fmt_id	= QFMT_VFS_V0,
+	.qf_ops		= &v2_format_ops,
+	.qf_owner	= THIS_MODULE
+};
+
+static int __init init_v2_quota_format(void)
+{
+	return register_quota_format(&v2_quota_format);
+}
+
+static void __exit exit_v2_quota_format(void)
+{
+	unregister_quota_format(&v2_quota_format);
+}
+
+module_init(init_v2_quota_format);
+module_exit(exit_v2_quota_format);
diff --git a/fs/ramfs/Makefile b/fs/ramfs/Makefile
new file mode 100644
index 0000000..f096f30
--- /dev/null
+++ b/fs/ramfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux ramfs routines.
+#
+
+obj-$(CONFIG_RAMFS) += ramfs.o
+
+ramfs-objs := inode.o
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
new file mode 100644
index 0000000..0a88917
--- /dev/null
+++ b/fs/ramfs/inode.c
@@ -0,0 +1,246 @@
+/*
+ * Resizable simple ram filesystem for Linux.
+ *
+ * Copyright (C) 2000 Linus Torvalds.
+ *               2000 Transmeta Corp.
+ *
+ * Usage limits added by David Gibson, Linuxcare Australia.
+ * This file is released under the GPL.
+ */
+
+/*
+ * NOTE! This filesystem is probably most useful
+ * not as a real filesystem, but as an example of
+ * how virtual filesystems can be written.
+ *
+ * It doesn't get much simpler than this. Consider
+ * that this file implements the full semantics of
+ * a POSIX-compliant read-write filesystem.
+ *
+ * Note in particular how the filesystem does not
+ * need to implement any data structures of its own
+ * to keep track of the virtual data: using the VFS
+ * caches is sufficient.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/ramfs.h>
+
+#include <asm/uaccess.h>
+
+/* some random number */
+#define RAMFS_MAGIC	0x858458f6
+
+static struct super_operations ramfs_ops;
+static struct address_space_operations ramfs_aops;
+static struct inode_operations ramfs_file_inode_operations;
+static struct inode_operations ramfs_dir_inode_operations;
+
+static struct backing_dev_info ramfs_backing_dev_info = {
+	.ra_pages	= 0,	/* No readahead */
+	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
+			  BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
+			  BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
+};
+
+struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
+{
+	struct inode * inode = new_inode(sb);
+
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = current->fsuid;
+		inode->i_gid = current->fsgid;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_mapping->a_ops = &ramfs_aops;
+		inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		switch (mode & S_IFMT) {
+		default:
+			init_special_inode(inode, mode, dev);
+			break;
+		case S_IFREG:
+			inode->i_op = &ramfs_file_inode_operations;
+			inode->i_fop = &ramfs_file_operations;
+			break;
+		case S_IFDIR:
+			inode->i_op = &ramfs_dir_inode_operations;
+			inode->i_fop = &simple_dir_operations;
+
+			/* directory inodes start off with i_nlink == 2 (for "." entry) */
+			inode->i_nlink++;
+			break;
+		case S_IFLNK:
+			inode->i_op = &page_symlink_inode_operations;
+			break;
+		}
+	}
+	return inode;
+}
+
+/*
+ * File creation. Allocate an inode, and we're done..
+ */
+/* SMP-safe */
+static int
+ramfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+	struct inode * inode = ramfs_get_inode(dir->i_sb, mode, dev);
+	int error = -ENOSPC;
+
+	if (inode) {
+		if (dir->i_mode & S_ISGID) {
+			inode->i_gid = dir->i_gid;
+			if (S_ISDIR(mode))
+				inode->i_mode |= S_ISGID;
+		}
+		d_instantiate(dentry, inode);
+		dget(dentry);	/* Extra count - pin the dentry in core */
+		error = 0;
+	}
+	return error;
+}
+
+static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+	if (!retval)
+		dir->i_nlink++;
+	return retval;
+}
+
+static int ramfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+	return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
+{
+	struct inode *inode;
+	int error = -ENOSPC;
+
+	inode = ramfs_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+	if (inode) {
+		int l = strlen(symname)+1;
+		error = page_symlink(inode, symname, l);
+		if (!error) {
+			if (dir->i_mode & S_ISGID)
+				inode->i_gid = dir->i_gid;
+			d_instantiate(dentry, inode);
+			dget(dentry);
+		} else
+			iput(inode);
+	}
+	return error;
+}
+
+static struct address_space_operations ramfs_aops = {
+	.readpage	= simple_readpage,
+	.prepare_write	= simple_prepare_write,
+	.commit_write	= simple_commit_write
+};
+
+struct file_operations ramfs_file_operations = {
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.fsync		= simple_sync_file,
+	.sendfile	= generic_file_sendfile,
+	.llseek		= generic_file_llseek,
+};
+
+static struct inode_operations ramfs_file_inode_operations = {
+	.getattr	= simple_getattr,
+};
+
+static struct inode_operations ramfs_dir_inode_operations = {
+	.create		= ramfs_create,
+	.lookup		= simple_lookup,
+	.link		= simple_link,
+	.unlink		= simple_unlink,
+	.symlink	= ramfs_symlink,
+	.mkdir		= ramfs_mkdir,
+	.rmdir		= simple_rmdir,
+	.mknod		= ramfs_mknod,
+	.rename		= simple_rename,
+};
+
+static struct super_operations ramfs_ops = {
+	.statfs		= simple_statfs,
+	.drop_inode	= generic_delete_inode,
+};
+
+static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
+{
+	struct inode * inode;
+	struct dentry * root;
+
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = RAMFS_MAGIC;
+	sb->s_op = &ramfs_ops;
+	sb->s_time_gran = 1;
+	inode = ramfs_get_inode(sb, S_IFDIR | 0755, 0);
+	if (!inode)
+		return -ENOMEM;
+
+	root = d_alloc_root(inode);
+	if (!root) {
+		iput(inode);
+		return -ENOMEM;
+	}
+	sb->s_root = root;
+	return 0;
+}
+
+struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, ramfs_fill_super);
+}
+
+static struct super_block *rootfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
+}
+
+static struct file_system_type ramfs_fs_type = {
+	.name		= "ramfs",
+	.get_sb		= ramfs_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+static struct file_system_type rootfs_fs_type = {
+	.name		= "rootfs",
+	.get_sb		= rootfs_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+static int __init init_ramfs_fs(void)
+{
+	return register_filesystem(&ramfs_fs_type);
+}
+
+static void __exit exit_ramfs_fs(void)
+{
+	unregister_filesystem(&ramfs_fs_type);
+}
+
+module_init(init_ramfs_fs)
+module_exit(exit_ramfs_fs)
+
+int __init init_rootfs(void)
+{
+	return register_filesystem(&rootfs_fs_type);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/fs/read_write.c b/fs/read_write.c
new file mode 100644
index 0000000..6be4b1a
--- /dev/null
+++ b/fs/read_write.c
@@ -0,0 +1,730 @@
+/*
+ *  linux/fs/read_write.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/slab.h> 
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/smp_lock.h>
+#include <linux/dnotify.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+struct file_operations generic_ro_fops = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.mmap		= generic_file_readonly_mmap,
+	.sendfile	= generic_file_sendfile,
+};
+
+EXPORT_SYMBOL(generic_ro_fops);
+
+loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
+{
+	long long retval;
+	struct inode *inode = file->f_mapping->host;
+
+	down(&inode->i_sem);
+	switch (origin) {
+		case 2:
+			offset += inode->i_size;
+			break;
+		case 1:
+			offset += file->f_pos;
+	}
+	retval = -EINVAL;
+	if (offset>=0 && offset<=inode->i_sb->s_maxbytes) {
+		if (offset != file->f_pos) {
+			file->f_pos = offset;
+			file->f_version = 0;
+		}
+		retval = offset;
+	}
+	up(&inode->i_sem);
+	return retval;
+}
+
+EXPORT_SYMBOL(generic_file_llseek);
+
+loff_t remote_llseek(struct file *file, loff_t offset, int origin)
+{
+	long long retval;
+
+	lock_kernel();
+	switch (origin) {
+		case 2:
+			offset += i_size_read(file->f_dentry->d_inode);
+			break;
+		case 1:
+			offset += file->f_pos;
+	}
+	retval = -EINVAL;
+	if (offset>=0 && offset<=file->f_dentry->d_inode->i_sb->s_maxbytes) {
+		if (offset != file->f_pos) {
+			file->f_pos = offset;
+			file->f_version = 0;
+		}
+		retval = offset;
+	}
+	unlock_kernel();
+	return retval;
+}
+EXPORT_SYMBOL(remote_llseek);
+
+loff_t no_llseek(struct file *file, loff_t offset, int origin)
+{
+	return -ESPIPE;
+}
+EXPORT_SYMBOL(no_llseek);
+
+loff_t default_llseek(struct file *file, loff_t offset, int origin)
+{
+	long long retval;
+
+	lock_kernel();
+	switch (origin) {
+		case 2:
+			offset += i_size_read(file->f_dentry->d_inode);
+			break;
+		case 1:
+			offset += file->f_pos;
+	}
+	retval = -EINVAL;
+	if (offset >= 0) {
+		if (offset != file->f_pos) {
+			file->f_pos = offset;
+			file->f_version = 0;
+		}
+		retval = offset;
+	}
+	unlock_kernel();
+	return retval;
+}
+EXPORT_SYMBOL(default_llseek);
+
+loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
+{
+	loff_t (*fn)(struct file *, loff_t, int);
+
+	fn = no_llseek;
+	if (file->f_mode & FMODE_LSEEK) {
+		fn = default_llseek;
+		if (file->f_op && file->f_op->llseek)
+			fn = file->f_op->llseek;
+	}
+	return fn(file, offset, origin);
+}
+EXPORT_SYMBOL(vfs_llseek);
+
+asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin)
+{
+	off_t retval;
+	struct file * file;
+	int fput_needed;
+
+	retval = -EBADF;
+	file = fget_light(fd, &fput_needed);
+	if (!file)
+		goto bad;
+
+	retval = -EINVAL;
+	if (origin <= 2) {
+		loff_t res = vfs_llseek(file, offset, origin);
+		retval = res;
+		if (res != (loff_t)retval)
+			retval = -EOVERFLOW;	/* LFS: should only happen on 32 bit platforms */
+	}
+	fput_light(file, fput_needed);
+bad:
+	return retval;
+}
+
+#ifdef __ARCH_WANT_SYS_LLSEEK
+asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
+			   unsigned long offset_low, loff_t __user * result,
+			   unsigned int origin)
+{
+	int retval;
+	struct file * file;
+	loff_t offset;
+	int fput_needed;
+
+	retval = -EBADF;
+	file = fget_light(fd, &fput_needed);
+	if (!file)
+		goto bad;
+
+	retval = -EINVAL;
+	if (origin > 2)
+		goto out_putf;
+
+	offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
+			origin);
+
+	retval = (int)offset;
+	if (offset >= 0) {
+		retval = -EFAULT;
+		if (!copy_to_user(result, &offset, sizeof(offset)))
+			retval = 0;
+	}
+out_putf:
+	fput_light(file, fput_needed);
+bad:
+	return retval;
+}
+#endif
+
+
+int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
+{
+	struct inode *inode;
+	loff_t pos;
+
+	if (unlikely(count > file->f_maxcount))
+		goto Einval;
+	pos = *ppos;
+	if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
+		goto Einval;
+
+	inode = file->f_dentry->d_inode;
+	if (inode->i_flock && MANDATORY_LOCK(inode))
+		return locks_mandatory_area(read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE, inode, file, pos, count);
+	return 0;
+
+Einval:
+	return -EINVAL;
+}
+
+ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, filp);
+	kiocb.ki_pos = *ppos;
+	ret = filp->f_op->aio_read(&kiocb, buf, len, kiocb.ki_pos);
+	if (-EIOCBQUEUED == ret)
+		ret = wait_on_sync_kiocb(&kiocb);
+	*ppos = kiocb.ki_pos;
+	return ret;
+}
+
+EXPORT_SYMBOL(do_sync_read);
+
+ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
+{
+	ssize_t ret;
+
+	if (!(file->f_mode & FMODE_READ))
+		return -EBADF;
+	if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+		return -EINVAL;
+	if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
+		return -EFAULT;
+
+	ret = rw_verify_area(READ, file, pos, count);
+	if (!ret) {
+		ret = security_file_permission (file, MAY_READ);
+		if (!ret) {
+			if (file->f_op->read)
+				ret = file->f_op->read(file, buf, count, pos);
+			else
+				ret = do_sync_read(file, buf, count, pos);
+			if (ret > 0) {
+				dnotify_parent(file->f_dentry, DN_ACCESS);
+				current->rchar += ret;
+			}
+			current->syscr++;
+		}
+	}
+
+	return ret;
+}
+
+EXPORT_SYMBOL(vfs_read);
+
+ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, filp);
+	kiocb.ki_pos = *ppos;
+	ret = filp->f_op->aio_write(&kiocb, buf, len, kiocb.ki_pos);
+	if (-EIOCBQUEUED == ret)
+		ret = wait_on_sync_kiocb(&kiocb);
+	*ppos = kiocb.ki_pos;
+	return ret;
+}
+
+EXPORT_SYMBOL(do_sync_write);
+
+ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+	ssize_t ret;
+
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+		return -EINVAL;
+	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
+		return -EFAULT;
+
+	ret = rw_verify_area(WRITE, file, pos, count);
+	if (!ret) {
+		ret = security_file_permission (file, MAY_WRITE);
+		if (!ret) {
+			if (file->f_op->write)
+				ret = file->f_op->write(file, buf, count, pos);
+			else
+				ret = do_sync_write(file, buf, count, pos);
+			if (ret > 0) {
+				dnotify_parent(file->f_dentry, DN_MODIFY);
+				current->wchar += ret;
+			}
+			current->syscw++;
+		}
+	}
+
+	return ret;
+}
+
+EXPORT_SYMBOL(vfs_write);
+
+static inline loff_t file_pos_read(struct file *file)
+{
+	return file->f_pos;
+}
+
+static inline void file_pos_write(struct file *file, loff_t pos)
+{
+	file->f_pos = pos;
+}
+
+asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		loff_t pos = file_pos_read(file);
+		ret = vfs_read(file, buf, count, &pos);
+		file_pos_write(file, pos);
+		fput_light(file, fput_needed);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sys_read);
+
+asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		loff_t pos = file_pos_read(file);
+		ret = vfs_write(file, buf, count, &pos);
+		file_pos_write(file, pos);
+		fput_light(file, fput_needed);
+	}
+
+	return ret;
+}
+
+asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
+			     size_t count, loff_t pos)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	if (pos < 0)
+		return -EINVAL;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		ret = -ESPIPE;
+		if (file->f_mode & FMODE_PREAD)
+			ret = vfs_read(file, buf, count, &pos);
+		fput_light(file, fput_needed);
+	}
+
+	return ret;
+}
+
+asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
+			      size_t count, loff_t pos)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	if (pos < 0)
+		return -EINVAL;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		ret = -ESPIPE;
+		if (file->f_mode & FMODE_PWRITE)  
+			ret = vfs_write(file, buf, count, &pos);
+		fput_light(file, fput_needed);
+	}
+
+	return ret;
+}
+
+/*
+ * Reduce an iovec's length in-place.  Return the resulting number of segments
+ */
+unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
+{
+	unsigned long seg = 0;
+	size_t len = 0;
+
+	while (seg < nr_segs) {
+		seg++;
+		if (len + iov->iov_len >= to) {
+			iov->iov_len = to - len;
+			break;
+		}
+		len += iov->iov_len;
+		iov++;
+	}
+	return seg;
+}
+
+EXPORT_SYMBOL(iov_shorten);
+
+/* A write operation does a read from user space and vice versa */
+#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
+
+static ssize_t do_readv_writev(int type, struct file *file,
+			       const struct iovec __user * uvector,
+			       unsigned long nr_segs, loff_t *pos)
+{
+	typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
+	typedef ssize_t (*iov_fn_t)(struct file *, const struct iovec *, unsigned long, loff_t *);
+
+	size_t tot_len;
+	struct iovec iovstack[UIO_FASTIOV];
+	struct iovec *iov=iovstack, *vector;
+	ssize_t ret;
+	int seg;
+	io_fn_t fn;
+	iov_fn_t fnv;
+
+	/*
+	 * SuS says "The readv() function *may* fail if the iovcnt argument
+	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
+	 * traditionally returned zero for zero segments, so...
+	 */
+	ret = 0;
+	if (nr_segs == 0)
+		goto out;
+
+	/*
+	 * First get the "struct iovec" from user memory and
+	 * verify all the pointers
+	 */
+	ret = -EINVAL;
+	if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
+		goto out;
+	if (!file->f_op)
+		goto out;
+	if (nr_segs > UIO_FASTIOV) {
+		ret = -ENOMEM;
+		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+		if (!iov)
+			goto out;
+	}
+	ret = -EFAULT;
+	if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector)))
+		goto out;
+
+	/*
+	 * Single unix specification:
+	 * We should -EINVAL if an element length is not >= 0 and fitting an
+	 * ssize_t.  The total length is fitting an ssize_t
+	 *
+	 * Be careful here because iov_len is a size_t not an ssize_t
+	 */
+	tot_len = 0;
+	ret = -EINVAL;
+	for (seg = 0; seg < nr_segs; seg++) {
+		void __user *buf = iov[seg].iov_base;
+		ssize_t len = (ssize_t)iov[seg].iov_len;
+
+		if (unlikely(!access_ok(vrfy_dir(type), buf, len)))
+			goto Efault;
+		if (len < 0)	/* size_t not fitting an ssize_t .. */
+			goto out;
+		tot_len += len;
+		if ((ssize_t)tot_len < 0) /* maths overflow on the ssize_t */
+			goto out;
+	}
+	if (tot_len == 0) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = rw_verify_area(type, file, pos, tot_len);
+	if (ret)
+		goto out;
+
+	fnv = NULL;
+	if (type == READ) {
+		fn = file->f_op->read;
+		fnv = file->f_op->readv;
+	} else {
+		fn = (io_fn_t)file->f_op->write;
+		fnv = file->f_op->writev;
+	}
+	if (fnv) {
+		ret = fnv(file, iov, nr_segs, pos);
+		goto out;
+	}
+
+	/* Do it by hand, with file-ops */
+	ret = 0;
+	vector = iov;
+	while (nr_segs > 0) {
+		void __user * base;
+		size_t len;
+		ssize_t nr;
+
+		base = vector->iov_base;
+		len = vector->iov_len;
+		vector++;
+		nr_segs--;
+
+		nr = fn(file, base, len, pos);
+
+		if (nr < 0) {
+			if (!ret) ret = nr;
+			break;
+		}
+		ret += nr;
+		if (nr != len)
+			break;
+	}
+out:
+	if (iov != iovstack)
+		kfree(iov);
+	if ((ret + (type == READ)) > 0)
+		dnotify_parent(file->f_dentry,
+				(type == READ) ? DN_ACCESS : DN_MODIFY);
+	return ret;
+Efault:
+	ret = -EFAULT;
+	goto out;
+}
+
+ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
+		  unsigned long vlen, loff_t *pos)
+{
+	if (!(file->f_mode & FMODE_READ))
+		return -EBADF;
+	if (!file->f_op || (!file->f_op->readv && !file->f_op->read))
+		return -EINVAL;
+
+	return do_readv_writev(READ, file, vec, vlen, pos);
+}
+
+EXPORT_SYMBOL(vfs_readv);
+
+ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
+		   unsigned long vlen, loff_t *pos)
+{
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+	if (!file->f_op || (!file->f_op->writev && !file->f_op->write))
+		return -EINVAL;
+
+	return do_readv_writev(WRITE, file, vec, vlen, pos);
+}
+
+EXPORT_SYMBOL(vfs_writev);
+
+asmlinkage ssize_t
+sys_readv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		loff_t pos = file_pos_read(file);
+		ret = vfs_readv(file, vec, vlen, &pos);
+		file_pos_write(file, pos);
+		fput_light(file, fput_needed);
+	}
+
+	if (ret > 0)
+		current->rchar += ret;
+	current->syscr++;
+	return ret;
+}
+
+asmlinkage ssize_t
+sys_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
+{
+	struct file *file;
+	ssize_t ret = -EBADF;
+	int fput_needed;
+
+	file = fget_light(fd, &fput_needed);
+	if (file) {
+		loff_t pos = file_pos_read(file);
+		ret = vfs_writev(file, vec, vlen, &pos);
+		file_pos_write(file, pos);
+		fput_light(file, fput_needed);
+	}
+
+	if (ret > 0)
+		current->wchar += ret;
+	current->syscw++;
+	return ret;
+}
+
+static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+			   size_t count, loff_t max)
+{
+	struct file * in_file, * out_file;
+	struct inode * in_inode, * out_inode;
+	loff_t pos;
+	ssize_t retval;
+	int fput_needed_in, fput_needed_out;
+
+	/*
+	 * Get input file, and verify that it is ok..
+	 */
+	retval = -EBADF;
+	in_file = fget_light(in_fd, &fput_needed_in);
+	if (!in_file)
+		goto out;
+	if (!(in_file->f_mode & FMODE_READ))
+		goto fput_in;
+	retval = -EINVAL;
+	in_inode = in_file->f_dentry->d_inode;
+	if (!in_inode)
+		goto fput_in;
+	if (!in_file->f_op || !in_file->f_op->sendfile)
+		goto fput_in;
+	retval = -ESPIPE;
+	if (!ppos)
+		ppos = &in_file->f_pos;
+	else
+		if (!(in_file->f_mode & FMODE_PREAD))
+			goto fput_in;
+	retval = rw_verify_area(READ, in_file, ppos, count);
+	if (retval)
+		goto fput_in;
+
+	retval = security_file_permission (in_file, MAY_READ);
+	if (retval)
+		goto fput_in;
+
+	/*
+	 * Get output file, and verify that it is ok..
+	 */
+	retval = -EBADF;
+	out_file = fget_light(out_fd, &fput_needed_out);
+	if (!out_file)
+		goto fput_in;
+	if (!(out_file->f_mode & FMODE_WRITE))
+		goto fput_out;
+	retval = -EINVAL;
+	if (!out_file->f_op || !out_file->f_op->sendpage)
+		goto fput_out;
+	out_inode = out_file->f_dentry->d_inode;
+	retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
+	if (retval)
+		goto fput_out;
+
+	retval = security_file_permission (out_file, MAY_WRITE);
+	if (retval)
+		goto fput_out;
+
+	if (!max)
+		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
+
+	pos = *ppos;
+	retval = -EINVAL;
+	if (unlikely(pos < 0))
+		goto fput_out;
+	if (unlikely(pos + count > max)) {
+		retval = -EOVERFLOW;
+		if (pos >= max)
+			goto fput_out;
+		count = max - pos;
+	}
+
+	retval = in_file->f_op->sendfile(in_file, ppos, count, file_send_actor, out_file);
+
+	if (retval > 0) {
+		current->rchar += retval;
+		current->wchar += retval;
+	}
+	current->syscr++;
+	current->syscw++;
+
+	if (*ppos > max)
+		retval = -EOVERFLOW;
+
+fput_out:
+	fput_light(out_file, fput_needed_out);
+fput_in:
+	fput_light(in_file, fput_needed_in);
+out:
+	return retval;
+}
+
+asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t __user *offset, size_t count)
+{
+	loff_t pos;
+	off_t off;
+	ssize_t ret;
+
+	if (offset) {
+		if (unlikely(get_user(off, offset)))
+			return -EFAULT;
+		pos = off;
+		ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
+		if (unlikely(put_user(pos, offset)))
+			return -EFAULT;
+		return ret;
+	}
+
+	return do_sendfile(out_fd, in_fd, NULL, count, 0);
+}
+
+asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t __user *offset, size_t count)
+{
+	loff_t pos;
+	ssize_t ret;
+
+	if (offset) {
+		if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
+			return -EFAULT;
+		ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
+		if (unlikely(put_user(pos, offset)))
+			return -EFAULT;
+		return ret;
+	}
+
+	return do_sendfile(out_fd, in_fd, NULL, count, 0);
+}
diff --git a/fs/readdir.c b/fs/readdir.c
new file mode 100644
index 0000000..b03579b
--- /dev/null
+++ b/fs/readdir.c
@@ -0,0 +1,300 @@
+/*
+ *  linux/fs/readdir.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/file.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/dirent.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+
+#include <asm/uaccess.h>
+
+int vfs_readdir(struct file *file, filldir_t filler, void *buf)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	int res = -ENOTDIR;
+	if (!file->f_op || !file->f_op->readdir)
+		goto out;
+
+	res = security_file_permission(file, MAY_READ);
+	if (res)
+		goto out;
+
+	down(&inode->i_sem);
+	res = -ENOENT;
+	if (!IS_DEADDIR(inode)) {
+		res = file->f_op->readdir(file, buf, filler);
+		file_accessed(file);
+	}
+	up(&inode->i_sem);
+out:
+	return res;
+}
+
+EXPORT_SYMBOL(vfs_readdir);
+
+/*
+ * Traditional linux readdir() handling..
+ *
+ * "count=1" is a special case, meaning that the buffer is one
+ * dirent-structure in size and that the code can't handle more
+ * anyway. Thus the special "fillonedir()" function for that
+ * case (the low-level handlers don't need to care about this).
+ */
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
+
+#ifdef __ARCH_WANT_OLD_READDIR
+
+struct old_linux_dirent {
+	unsigned long	d_ino;
+	unsigned long	d_offset;
+	unsigned short	d_namlen;
+	char		d_name[1];
+};
+
+struct readdir_callback {
+	struct old_linux_dirent __user * dirent;
+	int result;
+};
+
+static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
+		      ino_t ino, unsigned int d_type)
+{
+	struct readdir_callback * buf = (struct readdir_callback *) __buf;
+	struct old_linux_dirent __user * dirent;
+
+	if (buf->result)
+		return -EINVAL;
+	buf->result++;
+	dirent = buf->dirent;
+	if (!access_ok(VERIFY_WRITE, dirent,
+			(unsigned long)(dirent->d_name + namlen + 1) -
+				(unsigned long)dirent))
+		goto efault;
+	if (	__put_user(ino, &dirent->d_ino) ||
+		__put_user(offset, &dirent->d_offset) ||
+		__put_user(namlen, &dirent->d_namlen) ||
+		__copy_to_user(dirent->d_name, name, namlen) ||
+		__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	return 0;
+efault:
+	buf->result = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long old_readdir(unsigned int fd, struct old_linux_dirent __user * dirent, unsigned int count)
+{
+	int error;
+	struct file * file;
+	struct readdir_callback buf;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.result = 0;
+	buf.dirent = dirent;
+
+	error = vfs_readdir(file, fillonedir, &buf);
+	if (error >= 0)
+		error = buf.result;
+
+	fput(file);
+out:
+	return error;
+}
+
+#endif /* __ARCH_WANT_OLD_READDIR */
+
+/*
+ * New, all-improved, singing, dancing, iBCS2-compliant getdents()
+ * interface. 
+ */
+struct linux_dirent {
+	unsigned long	d_ino;
+	unsigned long	d_off;
+	unsigned short	d_reclen;
+	char		d_name[1];
+};
+
+struct getdents_callback {
+	struct linux_dirent __user * current_dir;
+	struct linux_dirent __user * previous;
+	int count;
+	int error;
+};
+
+static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+		   ino_t ino, unsigned int d_type)
+{
+	struct linux_dirent __user * dirent;
+	struct getdents_callback * buf = (struct getdents_callback *) __buf;
+	int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+	if (dirent) {
+		if (__put_user(offset, &dirent->d_off))
+			goto efault;
+	}
+	dirent = buf->current_dir;
+	if (__put_user(ino, &dirent->d_ino))
+		goto efault;
+	if (__put_user(reclen, &dirent->d_reclen))
+		goto efault;
+	if (copy_to_user(dirent->d_name, name, namlen))
+		goto efault;
+	if (__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	if (__put_user(d_type, (char __user *) dirent + reclen - 1))
+		goto efault;
+	buf->previous = dirent;
+	dirent = (void __user *)dirent + reclen;
+	buf->current_dir = dirent;
+	buf->count -= reclen;
+	return 0;
+efault:
+	buf->error = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user * dirent, unsigned int count)
+{
+	struct file * file;
+	struct linux_dirent __user * lastdirent;
+	struct getdents_callback buf;
+	int error;
+
+	error = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, dirent, count))
+		goto out;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.current_dir = dirent;
+	buf.previous = NULL;
+	buf.count = count;
+	buf.error = 0;
+
+	error = vfs_readdir(file, filldir, &buf);
+	if (error < 0)
+		goto out_putf;
+	error = buf.error;
+	lastdirent = buf.previous;
+	if (lastdirent) {
+		if (put_user(file->f_pos, &lastdirent->d_off))
+			error = -EFAULT;
+		else
+			error = count - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
+
+struct getdents_callback64 {
+	struct linux_dirent64 __user * current_dir;
+	struct linux_dirent64 __user * previous;
+	int count;
+	int error;
+};
+
+static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+		     ino_t ino, unsigned int d_type)
+{
+	struct linux_dirent64 __user *dirent;
+	struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf;
+	int reclen = ROUND_UP64(NAME_OFFSET(dirent) + namlen + 1);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+	if (dirent) {
+		if (__put_user(offset, &dirent->d_off))
+			goto efault;
+	}
+	dirent = buf->current_dir;
+	if (__put_user(ino, &dirent->d_ino))
+		goto efault;
+	if (__put_user(0, &dirent->d_off))
+		goto efault;
+	if (__put_user(reclen, &dirent->d_reclen))
+		goto efault;
+	if (__put_user(d_type, &dirent->d_type))
+		goto efault;
+	if (copy_to_user(dirent->d_name, name, namlen))
+		goto efault;
+	if (__put_user(0, dirent->d_name + namlen))
+		goto efault;
+	buf->previous = dirent;
+	dirent = (void __user *)dirent + reclen;
+	buf->current_dir = dirent;
+	buf->count -= reclen;
+	return 0;
+efault:
+	buf->error = -EFAULT;
+	return -EFAULT;
+}
+
+asmlinkage long sys_getdents64(unsigned int fd, struct linux_dirent64 __user * dirent, unsigned int count)
+{
+	struct file * file;
+	struct linux_dirent64 __user * lastdirent;
+	struct getdents_callback64 buf;
+	int error;
+
+	error = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, dirent, count))
+		goto out;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.current_dir = dirent;
+	buf.previous = NULL;
+	buf.count = count;
+	buf.error = 0;
+
+	error = vfs_readdir(file, filldir64, &buf);
+	if (error < 0)
+		goto out_putf;
+	error = buf.error;
+	lastdirent = buf.previous;
+	if (lastdirent) {
+		typeof(lastdirent->d_off) d_off = file->f_pos;
+		error = -EFAULT;
+		if (__put_user(d_off, &lastdirent->d_off))
+			goto out_putf;
+		error = count - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
new file mode 100644
index 0000000..3a59309
--- /dev/null
+++ b/fs/reiserfs/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the linux reiser-filesystem routines.
+#
+
+obj-$(CONFIG_REISERFS_FS) += reiserfs.o
+
+reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
+		 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
+		 hashes.o tail_conversion.o journal.o resize.o \
+		 item_ops.o ioctl.o procfs.o
+
+ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
+reiserfs-objs += xattr.o xattr_user.o xattr_trusted.o
+endif
+
+ifeq ($(CONFIG_REISERFS_FS_SECURITY),y)
+reiserfs-objs += xattr_security.o
+endif
+
+ifeq ($(CONFIG_REISERFS_FS_POSIX_ACL),y)
+reiserfs-objs += xattr_acl.o
+endif
+
+# gcc -O2 (the kernel default)  is overaggressive on ppc32 when many inline
+# functions are used.  This causes the compiler to advance the stack
+# pointer out of the available stack space, corrupting kernel space,
+# and causing a panic. Since this behavior only affects ppc32, this ifeq
+# will work around it. If any other architecture displays this behavior,
+# add it here.
+ifeq ($(CONFIG_PPC32),y)
+EXTRA_CFLAGS := -O1
+endif
+
+TAGS:
+	etags *.c
+
diff --git a/fs/reiserfs/README b/fs/reiserfs/README
new file mode 100644
index 0000000..90e1670
--- /dev/null
+++ b/fs/reiserfs/README
@@ -0,0 +1,161 @@
+[LICENSING] 
+
+ReiserFS is hereby licensed under the GNU General
+Public License version 2.
+
+Source code files that contain the phrase "licensing governed by
+reiserfs/README" are "governed files" throughout this file.  Governed
+files are licensed under the GPL.  The portions of them owned by Hans
+Reiser, or authorized to be licensed by him, have been in the past,
+and likely will be in the future, licensed to other parties under
+other licenses.  If you add your code to governed files, and don't
+want it to be owned by Hans Reiser, put your copyright label on that
+code so the poor blight and his customers can keep things straight.
+All portions of governed files not labeled otherwise are owned by Hans
+Reiser, and by adding your code to it, widely distributing it to
+others or sending us a patch, and leaving the sentence in stating that
+licensing is governed by the statement in this file, you accept this.
+It will be a kindness if you identify whether Hans Reiser is allowed
+to license code labeled as owned by you on your behalf other than
+under the GPL, because he wants to know if it is okay to do so and put
+a check in the mail to you (for non-trivial improvements) when he
+makes his next sale.  He makes no guarantees as to the amount if any,
+though he feels motivated to motivate contributors, and you can surely
+discuss this with him before or after contributing.  You have the
+right to decline to allow him to license your code contribution other
+than under the GPL.
+
+Further licensing options are available for commercial and/or other
+interests directly from Hans Reiser: hans@reiser.to.  If you interpret
+the GPL as not allowing those additional licensing options, you read
+it wrongly, and Richard Stallman agrees with me, when carefully read
+you can see that those restrictions on additional terms do not apply
+to the owner of the copyright, and my interpretation of this shall
+govern for this license.  
+
+Finally, nothing in this license shall be interpreted to allow you to
+fail to fairly credit me, or to remove my credits, without my
+permission, unless you are an end user not redistributing to others.
+If you have doubts about how to properly do that, or about what is
+fair, ask.  (Last I spoke with him Richard was contemplating how best
+to address the fair crediting issue in the next GPL version.)
+
+[END LICENSING]
+
+Reiserfs is a file system based on balanced tree algorithms, which is
+described at http://devlinux.com/namesys.
+
+Stop reading here.  Go there, then return.
+
+Send bug reports to yura@namesys.botik.ru.
+
+mkreiserfs and other utilities are in reiserfs/utils, or wherever your
+Linux provider put them.  There is some disagreement about how useful
+it is for users to get their fsck and mkreiserfs out of sync with the
+version of reiserfs that is in their kernel, with many important
+distributors wanting them out of sync.:-) Please try to remember to
+recompile and reinstall fsck and mkreiserfs with every update of
+reiserfs, this is a common source of confusion.  Note that some of the
+utilities cannot be compiled without accessing the balancing code
+which is in the kernel code, and relocating the utilities may require
+you to specify where that code can be found.
+
+Yes, if you update your reiserfs kernel module you do have to
+recompile your kernel, most of the time.  The errors you get will be
+quite cryptic if your forget to do so.
+
+Real users, as opposed to folks who want to hack and then understand
+what went wrong, will want REISERFS_CHECK off.
+
+Hideous Commercial Pitch: Spread your development costs across other OS
+vendors.  Select from the best in the world, not the best in your
+building, by buying from third party OS component suppliers.  Leverage
+the software component development power of the internet.  Be the most
+aggressive in taking advantage of the commercial possibilities of
+decentralized internet development, and add value through your branded
+integration that you sell as an operating system.  Let your competitors
+be the ones to compete against the entire internet by themselves.  Be
+hip, get with the new economic trend, before your competitors do.  Send
+email to hans@reiser.to.
+
+To understand the code, after reading the website, start reading the
+code by reading reiserfs_fs.h first.
+
+Hans Reiser was the project initiator, primary architect, source of all
+funding for the first 5.5 years, and one of the programmers.  He owns
+the copyright.
+
+Vladimir Saveljev was one of the programmers, and he worked long hours
+writing the cleanest code.  He always made the effort to be the best he
+could be, and to make his code the best that it could be.  What resulted
+was quite remarkable. I don't think that money can ever motivate someone
+to work the way he did, he is one of the most selfless men I know.
+
+Yura helps with benchmarking, coding hashes, and block pre-allocation
+code.
+
+Anatoly Pinchuk is a former member of our team who worked closely with
+Vladimir throughout the project's development.  He wrote a quite
+substantial portion of the total code.  He realized that there was a
+space problem with packing tails of files for files larger than a node
+that start on a node aligned boundary (there are reasons to want to node
+align files), and he invented and implemented indirect items and
+unformatted nodes as the solution.
+
+Konstantin Shvachko, with the help of the Russian version of a VC,
+tried to put me in a position where I was forced into giving control
+of the project to him.  (Fortunately, as the person paying the money
+for all salaries from my dayjob I owned all copyrights, and you can't
+really force takeovers of sole proprietorships.)  This was something
+curious, because he never really understood the value of our project,
+why we should do what we do, or why innovation was possible in
+general, but he was sure that he ought to be controlling it.  Every
+innovation had to be forced past him while he was with us.  He added
+two years to the time required to complete reiserfs, and was a net
+loss for me.  Mikhail Gilula was a brilliant innovator who also left
+in a destructive way that erased the value of his contributions, and
+that he was shown much generosity just makes it more painful.
+
+Grigory Zaigralin was an extremely effective system administrator for
+our group.
+
+Igor Krasheninnikov was wonderful at hardware procurement, repair, and
+network installation.
+
+Jeremy Fitzhardinge wrote the teahash.c code, and he gives credit to a
+textbook he got the algorithm from in the code.  Note that his analysis
+of how we could use the hashing code in making 32 bit NFS cookies work
+was probably more important than the actual algorithm.  Colin Plumb also
+contributed to it.
+
+Chris Mason dived right into our code, and in just a few months produced
+the journaling code that dramatically increased the value of ReiserFS.
+He is just an amazing programmer.
+
+Igor Zagorovsky is writing much of the new item handler and extent code
+for our next major release.
+
+Alexander Zarochentcev (sometimes known as zam, or sasha), wrote the
+resizer, and is hard at work on implementing allocate on flush.  SGI
+implemented allocate on flush before us for XFS, and generously took
+the time to convince me we should do it also.  They are great people,
+and a great company.
+
+Yuri Shevchuk and Nikita Danilov are doing squid cache optimization.
+
+Vitaly Fertman is doing fsck.
+
+Jeff Mahoney, of SuSE, contributed a few cleanup fixes, most notably
+the endian safe patches which allow ReiserFS to run on any platform
+supported by the Linux kernel.
+
+SuSE, IntegratedLinux.com, Ecila, MP3.com, bigstorage.com, and the
+Alpha PC Company made it possible for me to not have a day job
+anymore, and to dramatically increase our staffing.  Ecila funded
+hypertext feature development, MP3.com funded journaling, SuSE funded
+core development, IntegratedLinux.com funded squid web cache
+appliances, bigstorage.com funded HSM, and the alpha PC company funded
+the alpha port.  Many of these tasks were helped by sponsors other
+than the ones just named.  SuSE has helped in much more than just
+funding....
+
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
new file mode 100644
index 0000000..a4e2ed5
--- /dev/null
+++ b/fs/reiserfs/bitmap.c
@@ -0,0 +1,1169 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+/* Reiserfs block (de)allocator, bitmap-based. */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/buffer_head.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/reiserfs_fs_sb.h>
+#include <linux/reiserfs_fs_i.h>
+#include <linux/quotaops.h>
+
+#define PREALLOCATION_SIZE 9
+
+/* different reiserfs block allocator options */
+
+#define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
+
+#define  _ALLOC_concentrating_formatted_nodes 0
+#define  _ALLOC_displacing_large_files 1
+#define  _ALLOC_displacing_new_packing_localities 2
+#define  _ALLOC_old_hashed_relocation 3
+#define  _ALLOC_new_hashed_relocation 4
+#define  _ALLOC_skip_busy 5
+#define  _ALLOC_displace_based_on_dirid 6
+#define  _ALLOC_hashed_formatted_nodes 7
+#define  _ALLOC_old_way 8
+#define  _ALLOC_hundredth_slices 9
+#define  _ALLOC_dirid_groups 10
+#define  _ALLOC_oid_groups 11
+#define  _ALLOC_packing_groups 12
+
+#define  concentrating_formatted_nodes(s)	test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
+#define  displacing_large_files(s)		test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
+#define  displacing_new_packing_localities(s)	test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
+
+#define SET_OPTION(optname) \
+   do { \
+        reiserfs_warning(s, "reiserfs: option \"%s\" is set", #optname); \
+        set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
+    } while(0)
+#define TEST_OPTION(optname, s) \
+    test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
+
+static inline void get_bit_address (struct super_block * s,
+				    b_blocknr_t block, int * bmap_nr, int * offset)
+{
+    /* It is in the bitmap block number equal to the block
+     * number divided by the number of bits in a block. */
+    *bmap_nr = block / (s->s_blocksize << 3);
+    /* Within that bitmap block it is located at bit offset *offset. */
+    *offset = block & ((s->s_blocksize << 3) - 1 );
+    return;
+}
+
+#ifdef CONFIG_REISERFS_CHECK
+int is_reusable (struct super_block * s, b_blocknr_t block, int bit_value)
+{
+    int i, j;
+
+    if (block == 0 || block >= SB_BLOCK_COUNT (s)) {
+	reiserfs_warning (s, "vs-4010: is_reusable: block number is out of range %lu (%u)",
+			  block, SB_BLOCK_COUNT (s));
+	return 0;
+    }
+
+    /* it can't be one of the bitmap blocks */
+    for (i = 0; i < SB_BMAP_NR (s); i ++)
+	if (block == SB_AP_BITMAP (s)[i].bh->b_blocknr) {
+	    reiserfs_warning (s, "vs: 4020: is_reusable: "
+			      "bitmap block %lu(%u) can't be freed or reused",
+			      block, SB_BMAP_NR (s));
+	    return 0;
+	}
+  
+    get_bit_address (s, block, &i, &j);
+
+    if (i >= SB_BMAP_NR (s)) {
+	reiserfs_warning (s, "vs-4030: is_reusable: there is no so many bitmap blocks: "
+			  "block=%lu, bitmap_nr=%d", block, i);
+	return 0;
+    }
+
+    if ((bit_value == 0 && 
+         reiserfs_test_le_bit(j, SB_AP_BITMAP(s)[i].bh->b_data)) ||
+	(bit_value == 1 && 
+	 reiserfs_test_le_bit(j, SB_AP_BITMAP (s)[i].bh->b_data) == 0)) {
+	reiserfs_warning (s, "vs-4040: is_reusable: corresponding bit of block %lu does not "
+			  "match required value (i==%d, j==%d) test_bit==%d",
+		block, i, j, reiserfs_test_le_bit (j, SB_AP_BITMAP (s)[i].bh->b_data));
+
+	return 0;
+    }
+
+    if (bit_value == 0 && block == SB_ROOT_BLOCK (s)) {
+	reiserfs_warning (s, "vs-4050: is_reusable: this is root block (%u), "
+			  "it must be busy", SB_ROOT_BLOCK (s));
+	return 0;
+    }
+
+    return 1;
+}
+#endif /* CONFIG_REISERFS_CHECK */
+
+/* searches in journal structures for a given block number (bmap, off). If block
+   is found in reiserfs journal it suggests next free block candidate to test. */
+static inline  int is_block_in_journal (struct super_block * s, int bmap, int
+off, int *next)
+{
+    b_blocknr_t tmp;
+
+    if (reiserfs_in_journal (s, bmap, off, 1, &tmp)) {
+	if (tmp) {              /* hint supplied */
+	    *next = tmp;
+	    PROC_INFO_INC( s, scan_bitmap.in_journal_hint );
+	} else {
+	    (*next) = off + 1;          /* inc offset to avoid looping. */
+	    PROC_INFO_INC( s, scan_bitmap.in_journal_nohint );
+	}
+	PROC_INFO_INC( s, scan_bitmap.retry );
+	return 1;
+    }
+    return 0;
+}
+
+/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
+ * block; */
+static int scan_bitmap_block (struct reiserfs_transaction_handle *th,
+			      int bmap_n, int *beg, int boundary, int min, int max, int unfm)
+{
+    struct super_block *s = th->t_super;
+    struct reiserfs_bitmap_info *bi=&SB_AP_BITMAP(s)[bmap_n];
+    int end, next;
+    int org = *beg;
+
+    BUG_ON (!th->t_trans_id);
+
+    RFALSE(bmap_n >= SB_BMAP_NR (s), "Bitmap %d is out of range (0..%d)",bmap_n, SB_BMAP_NR (s) - 1);
+    PROC_INFO_INC( s, scan_bitmap.bmap );
+/* this is unclear and lacks comments, explain how journal bitmaps
+   work here for the reader.  Convey a sense of the design here. What
+   is a window? */
+/* - I mean `a window of zero bits' as in description of this function - Zam. */
+  
+    if ( !bi ) {
+	reiserfs_warning (s, "NULL bitmap info pointer for bitmap %d", bmap_n);
+	return 0;
+    }
+    if (buffer_locked (bi->bh)) {
+       PROC_INFO_INC( s, scan_bitmap.wait );
+       __wait_on_buffer (bi->bh);
+    }
+
+    while (1) {
+	cont:
+	if (bi->free_count < min)
+		return 0; // No free blocks in this bitmap
+
+	/* search for a first zero bit -- beggining of a window */
+	*beg = reiserfs_find_next_zero_le_bit
+	        ((unsigned long*)(bi->bh->b_data), boundary, *beg);
+  
+	if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
+				      * cannot contain a zero window of minimum size */
+	    return 0;
+	}
+
+	if (unfm && is_block_in_journal(s,bmap_n, *beg, beg))
+	    continue;
+	/* first zero bit found; we check next bits */
+	for (end = *beg + 1;; end ++) {
+	    if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit (end, bi->bh->b_data)) {
+		next = end;
+		break;
+	    }
+	    /* finding the other end of zero bit window requires looking into journal structures (in
+	     * case of searching for free blocks for unformatted nodes) */
+	    if (unfm && is_block_in_journal(s, bmap_n, end, &next))
+		break;
+	}
+
+	/* now (*beg) points to beginning of zero bits window,
+	 * (end) points to one bit after the window end */
+	if (end - *beg >= min) { /* it seems we have found window of proper size */
+	    int i;
+	    reiserfs_prepare_for_journal (s, bi->bh, 1);
+	    /* try to set all blocks used checking are they still free */
+	    for (i = *beg; i < end; i++) {
+		/* It seems that we should not check in journal again. */
+		if (reiserfs_test_and_set_le_bit (i, bi->bh->b_data)) {
+		    /* bit was set by another process
+		     * while we slept in prepare_for_journal() */
+		    PROC_INFO_INC( s, scan_bitmap.stolen );
+		    if (i >= *beg + min)	{ /* we can continue with smaller set of allocated blocks,
+					   * if length of this set is more or equal to `min' */
+			end = i;
+			break;
+		    }
+		    /* otherwise we clear all bit were set ... */
+		    while (--i >= *beg)
+			reiserfs_test_and_clear_le_bit (i, bi->bh->b_data);
+		    reiserfs_restore_prepared_buffer (s, bi->bh);
+		    *beg = org;
+		    /* ... and search again in current block from beginning */
+		    goto cont;	
+		}
+	    }
+	    bi->free_count -= (end - *beg);
+	    journal_mark_dirty (th, s, bi->bh);
+
+	    /* free block count calculation */
+	    reiserfs_prepare_for_journal (s, SB_BUFFER_WITH_SB(s), 1);
+	    PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
+	    journal_mark_dirty (th, s, SB_BUFFER_WITH_SB(s));
+
+	    return end - (*beg);
+	} else {
+	    *beg = next;
+	}
+    }
+}
+
+static int bmap_hash_id(struct super_block *s, u32 id) {
+    char * hash_in = NULL;
+    unsigned long hash;
+    unsigned bm;
+
+    if (id <= 2) {
+	bm = 1;
+    } else {
+        hash_in = (char *)(&id);
+        hash = keyed_hash(hash_in, 4);
+	bm = hash % SB_BMAP_NR(s);
+	if (!bm)
+	    bm = 1;
+    }
+    /* this can only be true when SB_BMAP_NR = 1 */
+    if (bm >= SB_BMAP_NR(s))
+    	bm = 0;
+    return bm;
+}
+
+/*
+ * hashes the id and then returns > 0 if the block group for the
+ * corresponding hash is full
+ */
+static inline int block_group_used(struct super_block *s, u32 id) {
+    int bm;
+    bm = bmap_hash_id(s, id);
+    if (SB_AP_BITMAP(s)[bm].free_count > ((s->s_blocksize << 3) * 60 / 100) ) {
+        return 0;
+    }
+    return 1;
+}
+
+/*
+ * the packing is returned in disk byte order
+ */
+u32 reiserfs_choose_packing(struct inode *dir) {
+    u32 packing;
+    if (TEST_OPTION(packing_groups, dir->i_sb)) {
+	u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
+	/*
+	 * some versions of reiserfsck expect packing locality 1 to be
+	 * special
+	 */
+	if (parent_dir == 1 || block_group_used(dir->i_sb,parent_dir))
+            packing = INODE_PKEY(dir)->k_objectid;
+        else
+            packing = INODE_PKEY(dir)->k_dir_id;
+    } else
+        packing = INODE_PKEY(dir)->k_objectid;
+    return packing;
+}
+  
+/* Tries to find contiguous zero bit window (given size) in given region of
+ * bitmap and place new blocks there. Returns number of allocated blocks. */
+static int scan_bitmap (struct reiserfs_transaction_handle *th,
+			b_blocknr_t *start, b_blocknr_t finish,
+			int min, int max, int unfm, unsigned long file_block)
+{
+    int nr_allocated=0;
+    struct super_block * s = th->t_super;
+    /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
+     * - Hans, it is not a block number - Zam. */
+
+    int bm, off;
+    int end_bm, end_off;
+    int off_max = s->s_blocksize << 3;
+
+    BUG_ON (!th->t_trans_id);
+
+    PROC_INFO_INC( s, scan_bitmap.call ); 
+    if ( SB_FREE_BLOCKS(s) <= 0)
+	return 0; // No point in looking for more free blocks
+
+    get_bit_address (s, *start, &bm, &off);
+    get_bit_address (s, finish, &end_bm, &end_off);
+    if (bm > SB_BMAP_NR(s))
+        return 0;
+    if (end_bm > SB_BMAP_NR(s))
+        end_bm = SB_BMAP_NR(s);
+
+    /* When the bitmap is more than 10% free, anyone can allocate.
+     * When it's less than 10% free, only files that already use the
+     * bitmap are allowed. Once we pass 80% full, this restriction
+     * is lifted.
+     *
+     * We do this so that files that grow later still have space close to
+     * their original allocation. This improves locality, and presumably
+     * performance as a result.
+     *
+     * This is only an allocation policy and does not make up for getting a
+     * bad hint. Decent hinting must be implemented for this to work well.
+     */
+    if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
+	for (;bm < end_bm; bm++, off = 0) {
+	    if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
+		nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
+	    if (nr_allocated)
+		goto ret;
+        }
+	/* we know from above that start is a reasonable number */
+	get_bit_address (s, *start, &bm, &off);
+    }
+
+    for (;bm < end_bm; bm++, off = 0) {
+	nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
+	if (nr_allocated)
+	    goto ret;
+    }
+
+    nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
+  
+ ret:
+    *start = bm * off_max + off;
+    return nr_allocated;
+
+}
+
+static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
+				  struct inode *inode, b_blocknr_t block,
+				  int for_unformatted)
+{
+    struct super_block * s = th->t_super;
+    struct reiserfs_super_block * rs;
+    struct buffer_head * sbh;
+    struct reiserfs_bitmap_info *apbi;
+    int nr, offset;
+
+    BUG_ON (!th->t_trans_id);
+
+    PROC_INFO_INC( s, free_block );
+
+    rs = SB_DISK_SUPER_BLOCK (s);
+    sbh = SB_BUFFER_WITH_SB (s);
+    apbi = SB_AP_BITMAP(s);
+
+    get_bit_address (s, block, &nr, &offset);
+
+    if (nr >= sb_bmap_nr (rs)) {
+	reiserfs_warning (s, "vs-4075: reiserfs_free_block: "
+			  "block %lu is out of range on %s",
+			  block, reiserfs_bdevname (s));
+	return;
+    }
+
+    reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;
+
+    /* clear bit for the given block in bit map */
+    if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
+	reiserfs_warning (s, "vs-4080: reiserfs_free_block: "
+			  "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
+			  reiserfs_bdevname (s), block);
+    }
+    apbi[nr].free_count ++;
+    journal_mark_dirty (th, s, apbi[nr].bh);
+
+    reiserfs_prepare_for_journal(s, sbh, 1) ;
+    /* update super block */
+    set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );
+
+    journal_mark_dirty (th, s, sbh);
+    if (for_unformatted)
+        DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+}
+
+void reiserfs_free_block (struct reiserfs_transaction_handle *th, 
+			  struct inode *inode, b_blocknr_t block,
+			  int for_unformatted)
+{
+    struct super_block * s = th->t_super;
+
+    BUG_ON (!th->t_trans_id);
+
+    RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
+    RFALSE(is_reusable (s, block, 1) == 0, "vs-4071: can not free such block");
+    /* mark it before we clear it, just in case */
+    journal_mark_freed(th, s, block) ;
+    _reiserfs_free_block(th, inode, block, for_unformatted) ;
+}
+
+/* preallocated blocks don't need to be run through journal_mark_freed */
+static void reiserfs_free_prealloc_block (struct reiserfs_transaction_handle *th,
+			  struct inode *inode, b_blocknr_t block) {
+    RFALSE(!th->t_super, "vs-4060: trying to free block on nonexistent device");
+    RFALSE(is_reusable (th->t_super, block, 1) == 0, "vs-4070: can not free such block");
+    BUG_ON (!th->t_trans_id);
+    _reiserfs_free_block(th, inode, block, 1) ;
+}
+
+static void __discard_prealloc (struct reiserfs_transaction_handle * th,
+				struct reiserfs_inode_info *ei)
+{
+    unsigned long save = ei->i_prealloc_block ;
+    int dirty = 0;
+    struct inode *inode = &ei->vfs_inode;
+    BUG_ON (!th->t_trans_id);
+#ifdef CONFIG_REISERFS_CHECK
+    if (ei->i_prealloc_count < 0)
+	reiserfs_warning (th->t_super, "zam-4001:%s: inode has negative prealloc blocks count.", __FUNCTION__ );
+#endif
+    while (ei->i_prealloc_count > 0) {
+	reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+	ei->i_prealloc_block++;
+	ei->i_prealloc_count --;
+	dirty = 1;
+    }
+    if (dirty)
+    	reiserfs_update_sd(th, inode);
+    ei->i_prealloc_block = save;
+    list_del_init(&(ei->i_prealloc_list));
+}
+
+/* FIXME: It should be inline function */
+void reiserfs_discard_prealloc (struct reiserfs_transaction_handle *th, 
+				struct inode *inode)
+{
+    struct reiserfs_inode_info *ei = REISERFS_I(inode);
+    BUG_ON (!th->t_trans_id);
+    if (ei->i_prealloc_count)
+	__discard_prealloc(th, ei);
+}
+
+void reiserfs_discard_all_prealloc (struct reiserfs_transaction_handle *th)
+{
+    struct list_head * plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
+
+    BUG_ON (!th->t_trans_id);
+
+    while (!list_empty(plist)) {
+	struct reiserfs_inode_info *ei;
+	ei = list_entry(plist->next, struct reiserfs_inode_info, i_prealloc_list);
+#ifdef CONFIG_REISERFS_CHECK
+	if (!ei->i_prealloc_count) {
+	    reiserfs_warning (th->t_super, "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.", __FUNCTION__);
+	}
+#endif
+	__discard_prealloc(th, ei);
+    }
+}
+
+void reiserfs_init_alloc_options (struct super_block *s)
+{
+    set_bit (_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
+    set_bit (_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
+    set_bit (_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
+}
+
+/* block allocator related options are parsed here */
+int reiserfs_parse_alloc_options(struct super_block * s, char * options)
+{
+    char * this_char, * value;
+
+    REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
+
+    while ( (this_char = strsep (&options, ":")) != NULL ) {
+	if ((value = strchr (this_char, '=')) != NULL)
+	    *value++ = 0;
+
+	if (!strcmp(this_char, "concentrating_formatted_nodes")) {
+	    int temp;
+	    SET_OPTION(concentrating_formatted_nodes);
+	    temp = (value && *value) ? simple_strtoul (value, &value, 0) : 10;
+	    if (temp <= 0 || temp > 100) {
+		REISERFS_SB(s)->s_alloc_options.border = 10;
+	    } else {
+		REISERFS_SB(s)->s_alloc_options.border = 100 / temp;
+	   }
+	    continue;
+	}
+	if (!strcmp(this_char, "displacing_large_files")) {
+	    SET_OPTION(displacing_large_files);
+	    REISERFS_SB(s)->s_alloc_options.large_file_size =
+		(value && *value) ? simple_strtoul (value, &value, 0) : 16;
+	    continue;
+	}
+	if (!strcmp(this_char, "displacing_new_packing_localities")) {
+	    SET_OPTION(displacing_new_packing_localities);
+	    continue;
+	};
+
+	if (!strcmp(this_char, "old_hashed_relocation")) {
+	    SET_OPTION(old_hashed_relocation);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "new_hashed_relocation")) {
+	    SET_OPTION(new_hashed_relocation);
+	    continue;
+	}
+
+        if (!strcmp(this_char, "dirid_groups")) {
+	    SET_OPTION(dirid_groups);
+	    continue;
+        }
+        if (!strcmp(this_char, "oid_groups")) {
+	    SET_OPTION(oid_groups);
+	    continue;
+        }
+        if (!strcmp(this_char, "packing_groups")) {
+	    SET_OPTION(packing_groups);
+	    continue;
+        }
+	if (!strcmp(this_char, "hashed_formatted_nodes")) {
+	    SET_OPTION(hashed_formatted_nodes);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "skip_busy")) {
+	    SET_OPTION(skip_busy);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "hundredth_slices")) {
+	    SET_OPTION(hundredth_slices);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "old_way")) {
+	    SET_OPTION(old_way);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "displace_based_on_dirid")) {
+	    SET_OPTION(displace_based_on_dirid);
+	    continue;
+	}
+
+	if (!strcmp(this_char, "preallocmin")) {
+	    REISERFS_SB(s)->s_alloc_options.preallocmin =
+		(value && *value) ? simple_strtoul (value, &value, 0) : 4;
+	    continue;
+	}
+
+	if (!strcmp(this_char, "preallocsize")) {
+	    REISERFS_SB(s)->s_alloc_options.preallocsize =
+		(value && *value) ? simple_strtoul (value, &value, 0) : PREALLOCATION_SIZE;
+	    continue;
+	}
+
+	reiserfs_warning (s, "zam-4001: %s : unknown option - %s",
+			  __FUNCTION__ , this_char);
+	return 1;
+      }
+  
+    reiserfs_warning (s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
+    return 0;
+}
+  
+static inline void new_hashed_relocation (reiserfs_blocknr_hint_t * hint)
+{
+    char * hash_in;
+    if (hint->formatted_node) {
+	    hash_in = (char*)&hint->key.k_dir_id;
+    } else {
+	if (!hint->inode) {
+	    //hint->search_start = hint->beg;
+	    hash_in = (char*)&hint->key.k_dir_id;
+	} else 
+	    if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
+		hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
+	    else
+		hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
+      }
+
+    hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
+}
+
+/*
+ * Relocation based on dirid, hashing them into a given bitmap block
+ * files. Formatted nodes are unaffected, a seperate policy covers them
+ */
+static void
+dirid_groups (reiserfs_blocknr_hint_t *hint)
+{
+    unsigned long hash;
+    __u32 dirid = 0;
+    int bm = 0;
+    struct super_block *sb = hint->th->t_super;
+    if (hint->inode)
+	dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
+    else if (hint->formatted_node)
+        dirid = hint->key.k_dir_id;
+
+    if (dirid) {
+	bm = bmap_hash_id(sb, dirid);
+	hash = bm * (sb->s_blocksize << 3);
+	/* give a portion of the block group to metadata */
+	if (hint->inode)
+	    hash += sb->s_blocksize/2;
+	hint->search_start = hash;
+    }
+}
+
+/*
+ * Relocation based on oid, hashing them into a given bitmap block
+ * files. Formatted nodes are unaffected, a seperate policy covers them
+ */
+static void
+oid_groups (reiserfs_blocknr_hint_t *hint)
+{
+    if (hint->inode) {
+	unsigned long hash;
+	__u32 oid;
+	__u32 dirid;
+	int bm;
+
+	dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
+
+	/* keep the root dir and it's first set of subdirs close to
+	 * the start of the disk
+	 */
+	if (dirid <= 2)
+	    hash = (hint->inode->i_sb->s_blocksize << 3);
+	else {
+	    oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
+	    bm = bmap_hash_id(hint->inode->i_sb, oid);
+	    hash = bm * (hint->inode->i_sb->s_blocksize << 3);
+	}
+	hint->search_start = hash;
+    }
+}
+
+/* returns 1 if it finds an indirect item and gets valid hint info
+ * from it, otherwise 0
+ */
+static int get_left_neighbor(reiserfs_blocknr_hint_t *hint)
+{
+    struct path * path;
+    struct buffer_head * bh;
+    struct item_head * ih;
+    int pos_in_item;
+    __u32 * item;
+    int ret = 0;
+
+    if (!hint->path)		/* reiserfs code can call this function w/o pointer to path
+				 * structure supplied; then we rely on supplied search_start */
+	return 0;
+
+    path = hint->path;
+    bh = get_last_bh(path);
+    RFALSE( !bh, "green-4002: Illegal path specified to get_left_neighbor");
+    ih = get_ih(path);
+    pos_in_item = path->pos_in_item;
+    item = get_item (path);
+
+    hint->search_start = bh->b_blocknr;
+
+    if (!hint->formatted_node && is_indirect_le_ih (ih)) {
+	/* for indirect item: go to left and look for the first non-hole entry
+	   in the indirect item */
+	if (pos_in_item == I_UNFM_NUM (ih))
+	    pos_in_item--;
+//	    pos_in_item = I_UNFM_NUM (ih) - 1;
+	while (pos_in_item >= 0) {
+	    int t=get_block_num(item,pos_in_item);
+	    if (t) {
+		hint->search_start = t;
+		ret = 1;
+		break;
+	    }
+	    pos_in_item --;
+	}
+    }
+
+    /* does result value fit into specified region? */
+    return ret;
+}
+
+/* should be, if formatted node, then try to put on first part of the device
+   specified as number of percent with mount option device, else try to put
+   on last of device.  This is not to say it is good code to do so,
+   but the effect should be measured.  */
+static inline void set_border_in_hint(struct super_block *s, reiserfs_blocknr_hint_t *hint)
+{
+    b_blocknr_t border = SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
+
+    if (hint->formatted_node)
+	hint->end = border - 1;
+    else
+	hint->beg = border;
+}
+
+static inline void displace_large_file(reiserfs_blocknr_hint_t *hint)
+{
+    if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
+	hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id), 4) % (hint->end - hint->beg);
+    else
+	hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid), 4) % (hint->end - hint->beg);
+}
+
+static inline void hash_formatted_node(reiserfs_blocknr_hint_t *hint)
+{
+   char * hash_in;
+
+   if (!hint->inode)
+	hash_in = (char*)&hint->key.k_dir_id;
+    else if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
+	hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
+    else
+	hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
+
+	hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
+}
+
+static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *hint)
+{
+    return hint->block == REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
+}
+
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+static inline void displace_new_packing_locality (reiserfs_blocknr_hint_t *hint)
+{
+    struct reiserfs_key * key = &hint->key;
+
+    hint->th->displace_new_blocks = 0;
+    hint->search_start = hint->beg + keyed_hash((char*)(&key->k_objectid),4) % (hint->end - hint->beg);
+}
+  #endif
+
+static inline int old_hashed_relocation (reiserfs_blocknr_hint_t * hint)
+{
+    b_blocknr_t border;
+    u32 hash_in;
+    
+    if (hint->formatted_node || hint->inode == NULL) {
+	return 0;
+      }
+
+    hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
+    border = hint->beg + (u32) keyed_hash(((char *) (&hash_in)), 4) % (hint->end - hint->beg - 1);
+    if (border > hint->search_start)
+	hint->search_start = border;
+
+    return 1;
+  }
+  
+static inline int old_way (reiserfs_blocknr_hint_t * hint)
+{
+    b_blocknr_t border;
+    
+    if (hint->formatted_node || hint->inode == NULL) {
+	return 0;
+    }
+  
+      border = hint->beg + le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end  - hint->beg);
+    if (border > hint->search_start)
+	hint->search_start = border;
+
+    return 1;
+}
+
+static inline void hundredth_slices (reiserfs_blocknr_hint_t * hint)
+{
+    struct reiserfs_key * key = &hint->key;
+    b_blocknr_t slice_start;
+
+    slice_start = (keyed_hash((char*)(&key->k_dir_id),4) % 100) * (hint->end / 100);
+    if ( slice_start > hint->search_start || slice_start + (hint->end / 100) <= hint->search_start) {
+	hint->search_start = slice_start;
+    }
+}
+  
+static void determine_search_start(reiserfs_blocknr_hint_t *hint,
+					  int amount_needed)
+{
+    struct super_block *s = hint->th->t_super;
+    int unfm_hint;
+
+    hint->beg = 0;
+    hint->end = SB_BLOCK_COUNT(s) - 1;
+
+    /* This is former border algorithm. Now with tunable border offset */
+    if (concentrating_formatted_nodes(s))
+	set_border_in_hint(s, hint);
+
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    /* whenever we create a new directory, we displace it.  At first we will
+       hash for location, later we might look for a moderately empty place for
+       it */
+    if (displacing_new_packing_localities(s)
+	&& hint->th->displace_new_blocks) {
+	displace_new_packing_locality(hint);
+
+	/* we do not continue determine_search_start,
+	 * if new packing locality is being displaced */
+	return;
+    }				      
+#endif
+  
+    /* all persons should feel encouraged to add more special cases here and
+     * test them */
+
+    if (displacing_large_files(s) && !hint->formatted_node
+	&& this_blocknr_allocation_would_make_it_a_large_file(hint)) {
+	displace_large_file(hint);
+	return;
+    }
+
+    /* if none of our special cases is relevant, use the left neighbor in the
+       tree order of the new node we are allocating for */
+    if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes,s)) {
+        hash_formatted_node(hint);
+	return;
+    }
+
+    unfm_hint = get_left_neighbor(hint);
+
+    /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
+       new blocks are displaced based on directory ID. Also, if suggested search_start
+       is less than last preallocated block, we start searching from it, assuming that
+       HDD dataflow is faster in forward direction */
+    if ( TEST_OPTION(old_way, s)) {
+	if (!hint->formatted_node) {
+	    if ( !reiserfs_hashed_relocation(s))
+		old_way(hint);
+	    else if (!reiserfs_no_unhashed_relocation(s))
+		old_hashed_relocation(hint);
+
+	    if ( hint->inode && hint->search_start < REISERFS_I(hint->inode)->i_prealloc_block)
+		hint->search_start = REISERFS_I(hint->inode)->i_prealloc_block;
+	}
+	return;
+    }
+
+    /* This is an approach proposed by Hans */
+    if ( TEST_OPTION(hundredth_slices, s) && ! (displacing_large_files(s) && !hint->formatted_node)) {
+	hundredth_slices(hint);
+	return;
+    }
+
+    /* old_hashed_relocation only works on unformatted */
+    if (!unfm_hint && !hint->formatted_node &&
+        TEST_OPTION(old_hashed_relocation, s))
+    {
+	old_hashed_relocation(hint);
+    }
+    /* new_hashed_relocation works with both formatted/unformatted nodes */
+    if ((!unfm_hint || hint->formatted_node) &&
+        TEST_OPTION(new_hashed_relocation, s))
+    {
+	new_hashed_relocation(hint);
+    }
+    /* dirid grouping works only on unformatted nodes */
+    if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups,s))
+    {
+        dirid_groups(hint);
+    }
+
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    if (hint->formatted_node && TEST_OPTION(dirid_groups,s))
+    {
+        dirid_groups(hint);
+    }
+#endif
+
+    /* oid grouping works only on unformatted nodes */
+    if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups,s))
+    {
+        oid_groups(hint);
+    }
+    return;
+}
+
+static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
+{
+    /* make minimum size a mount option and benchmark both ways */
+    /* we preallocate blocks only for regular files, specific size */
+    /* benchmark preallocating always and see what happens */
+
+    hint->prealloc_size = 0;
+
+    if (!hint->formatted_node && hint->preallocate) {
+	if (S_ISREG(hint->inode->i_mode)
+	    && hint->inode->i_size >= REISERFS_SB(hint->th->t_super)->s_alloc_options.preallocmin * hint->inode->i_sb->s_blocksize)
+	    hint->prealloc_size = REISERFS_SB(hint->th->t_super)->s_alloc_options.preallocsize - 1;
+    }
+    return CARRY_ON;
+}
+
+/* XXX I know it could be merged with upper-level function;
+   but may be result function would be too complex. */
+static inline int allocate_without_wrapping_disk (reiserfs_blocknr_hint_t * hint,
+					 b_blocknr_t * new_blocknrs,
+					 b_blocknr_t start, b_blocknr_t finish,
+					 int min,
+					 int amount_needed, int prealloc_size)
+{
+    int rest = amount_needed;
+    int nr_allocated;
+  
+    while (rest > 0 && start <= finish) {
+	nr_allocated = scan_bitmap (hint->th, &start, finish, min,
+				    rest + prealloc_size, !hint->formatted_node,
+				    hint->block);
+
+	if (nr_allocated == 0)	/* no new blocks allocated, return */
+	    break;
+	
+	/* fill free_blocknrs array first */
+	while (rest > 0 && nr_allocated > 0) {
+	    * new_blocknrs ++ = start ++;
+	    rest --; nr_allocated --;
+	}
+
+	/* do we have something to fill prealloc. array also ? */
+	if (nr_allocated > 0) {
+	    /* it means prealloc_size was greater that 0 and we do preallocation */
+	    list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
+		     &SB_JOURNAL(hint->th->t_super)->j_prealloc_list);
+	    REISERFS_I(hint->inode)->i_prealloc_block = start;
+	    REISERFS_I(hint->inode)->i_prealloc_count = nr_allocated;
+	    break;
+	}
+    }
+
+    return (amount_needed - rest);
+}
+
+static inline int blocknrs_and_prealloc_arrays_from_search_start
+    (reiserfs_blocknr_hint_t *hint, b_blocknr_t *new_blocknrs, int amount_needed)
+{
+    struct super_block *s = hint->th->t_super;
+    b_blocknr_t start = hint->search_start;
+    b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
+    int passno = 0;
+    int nr_allocated = 0;
+    int bigalloc = 0;
+
+    determine_prealloc_size(hint);
+    if (!hint->formatted_node) {
+        int quota_ret;
+#ifdef REISERQUOTA_DEBUG
+	reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: allocating %d blocks id=%u", amount_needed, hint->inode->i_uid);
+#endif
+	quota_ret = DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
+	if (quota_ret)    /* Quota exceeded? */
+	    return QUOTA_EXCEEDED;
+	if (hint->preallocate && hint->prealloc_size ) {
+#ifdef REISERQUOTA_DEBUG
+	    reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid);
+#endif
+	    quota_ret = DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode, hint->prealloc_size);
+	    if (quota_ret)
+		hint->preallocate=hint->prealloc_size=0;
+	}
+	/* for unformatted nodes, force large allocations */
+	bigalloc = amount_needed;
+    }
+
+    do {
+	/* in bigalloc mode, nr_allocated should stay zero until
+	 * the entire allocation is filled
+	 */
+	if (unlikely(bigalloc && nr_allocated)) {
+	    reiserfs_warning(s, "bigalloc is %d, nr_allocated %d\n",
+	    bigalloc, nr_allocated);
+	    /* reset things to a sane value */
+	    bigalloc = amount_needed - nr_allocated;
+	}
+	/*
+	 * try pass 0 and pass 1 looking for a nice big
+	 * contiguous allocation.  Then reset and look
+	 * for anything you can find.
+	 */
+	if (passno == 2 && bigalloc) {
+	    passno = 0;
+	    bigalloc = 0;
+	}
+	switch (passno++) {
+        case 0: /* Search from hint->search_start to end of disk */
+	    start = hint->search_start;
+	    finish = SB_BLOCK_COUNT(s) - 1;
+	    break;
+        case 1: /* Search from hint->beg to hint->search_start */
+	    start = hint->beg;
+	    finish = hint->search_start;
+	    break;
+	case 2: /* Last chance: Search from 0 to hint->beg */
+	    start = 0;
+	    finish = hint->beg;
+	    break;
+	default: /* We've tried searching everywhere, not enough space */
+	    /* Free the blocks */
+	    if (!hint->formatted_node) {
+#ifdef REISERQUOTA_DEBUG
+		reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: freeing (nospace) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated, hint->inode->i_uid);
+#endif
+		DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated);     /* Free not allocated blocks */
+	    }
+  	    while (nr_allocated --)
+		reiserfs_free_block(hint->th, hint->inode, new_blocknrs[nr_allocated], !hint->formatted_node);
+
+	    return NO_DISK_SPACE;
+	}
+    } while ((nr_allocated += allocate_without_wrapping_disk (hint,
+			    new_blocknrs + nr_allocated, start, finish,
+			    bigalloc ? bigalloc : 1,
+			    amount_needed - nr_allocated,
+			    hint->prealloc_size))
+			< amount_needed);
+    if ( !hint->formatted_node &&
+         amount_needed + hint->prealloc_size >
+	 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
+    /* Some of preallocation blocks were not allocated */
+#ifdef REISERQUOTA_DEBUG
+	reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: freeing (failed prealloc) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid);
+#endif
+	DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
+	                         hint->prealloc_size - nr_allocated -
+				 REISERFS_I(hint->inode)->i_prealloc_count);
+    }
+
+    return CARRY_ON;
+}
+
+/* grab new blocknrs from preallocated list */
+/* return amount still needed after using them */
+static int use_preallocated_list_if_available (reiserfs_blocknr_hint_t *hint,
+					       b_blocknr_t *new_blocknrs, int amount_needed)
+{
+    struct inode * inode = hint->inode;
+
+    if (REISERFS_I(inode)->i_prealloc_count > 0) {
+	while (amount_needed) {
+
+	    *new_blocknrs ++ = REISERFS_I(inode)->i_prealloc_block ++;
+	    REISERFS_I(inode)->i_prealloc_count --;
+
+	    amount_needed --;
+
+	    if (REISERFS_I(inode)->i_prealloc_count <= 0) {
+		list_del(&REISERFS_I(inode)->i_prealloc_list);  
+		break;
+	    }
+	}
+      }
+    /* return amount still needed after using preallocated blocks */
+    return amount_needed;
+}
+
+int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
+			       b_blocknr_t * new_blocknrs, int amount_needed,
+			       int reserved_by_us /* Amount of blocks we have
+						      already reserved */)
+{
+    int initial_amount_needed = amount_needed;
+    int ret;
+    struct super_block *s = hint->th->t_super;
+
+    /* Check if there is enough space, taking into account reserved space */
+    if ( SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
+	 amount_needed - reserved_by_us)
+        return NO_DISK_SPACE;
+    /* should this be if !hint->inode &&  hint->preallocate? */
+    /* do you mean hint->formatted_node can be removed ? - Zam */
+    /* hint->formatted_node cannot be removed because we try to access
+       inode information here, and there is often no inode assotiated with
+       metadata allocations - green */
+
+    if (!hint->formatted_node && hint->preallocate) {
+	amount_needed = use_preallocated_list_if_available
+	    (hint, new_blocknrs, amount_needed);
+	if (amount_needed == 0)	/* all blocknrs we need we got from
+                                   prealloc. list */
+	    return CARRY_ON;
+	new_blocknrs += (initial_amount_needed - amount_needed);
+    }
+
+    /* find search start and save it in hint structure */
+    determine_search_start(hint, amount_needed);
+    if (hint->search_start >= SB_BLOCK_COUNT(s))
+        hint->search_start = SB_BLOCK_COUNT(s) - 1;
+
+    /* allocation itself; fill new_blocknrs and preallocation arrays */
+    ret = blocknrs_and_prealloc_arrays_from_search_start
+	(hint, new_blocknrs, amount_needed);
+
+    /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
+     * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
+     * variant) */
+
+    if (ret != CARRY_ON) {
+	while (amount_needed ++ < initial_amount_needed) {
+	    reiserfs_free_block(hint->th, hint->inode, *(--new_blocknrs), 1);
+	}
+    }
+    return ret;
+}
+
+/* These 2 functions are here to provide blocks reservation to the rest of kernel */
+/* Reserve @blocks amount of blocks in fs pointed by @sb. Caller must make sure
+   there are actually this much blocks on the FS available */
+void reiserfs_claim_blocks_to_be_allocated( 
+				      struct super_block *sb, /* super block of
+							        filesystem where
+								blocks should be
+								reserved */
+				      int blocks /* How much to reserve */
+					  )
+{
+
+    /* Fast case, if reservation is zero - exit immediately. */
+    if ( !blocks )
+	return;
+
+    spin_lock(&REISERFS_SB(sb)->bitmap_lock);
+    REISERFS_SB(sb)->reserved_blocks += blocks;
+    spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
+}
+
+/* Unreserve @blocks amount of blocks in fs pointed by @sb */
+void reiserfs_release_claimed_blocks( 
+				struct super_block *sb, /* super block of
+							  filesystem where
+							  blocks should be
+							  reserved */
+				int blocks /* How much to unreserve */
+					  )
+{
+
+    /* Fast case, if unreservation is zero - exit immediately. */
+    if ( !blocks )
+	return;
+
+    spin_lock(&REISERFS_SB(sb)->bitmap_lock);
+    REISERFS_SB(sb)->reserved_blocks -= blocks;
+    spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
+    RFALSE( REISERFS_SB(sb)->reserved_blocks < 0, "amount of blocks reserved became zero?");
+}
+
+/* This function estimates how much pages we will be able to write to FS
+   used for reiserfs_file_write() purposes for now. */
+int reiserfs_can_fit_pages ( struct super_block *sb /* superblock of filesystem
+						       to estimate space */ )
+{
+	int space;
+
+	spin_lock(&REISERFS_SB(sb)->bitmap_lock);
+	space = (SB_FREE_BLOCKS(sb) - REISERFS_SB(sb)->reserved_blocks) >> ( PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
+	spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
+
+	return space>0?space:0;
+}
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
new file mode 100644
index 0000000..d1514a9
--- /dev/null
+++ b/fs/reiserfs/dir.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/stat.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <asm/uaccess.h>
+
+extern struct reiserfs_key  MIN_KEY;
+
+static int reiserfs_readdir (struct file *, void *, filldir_t);
+static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) ;
+
+struct file_operations reiserfs_dir_operations = {
+    .read	= generic_read_dir,
+    .readdir	= reiserfs_readdir,
+    .fsync	= reiserfs_dir_fsync,
+    .ioctl	= reiserfs_ioctl,
+};
+
+static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) {
+  struct inode *inode = dentry->d_inode;
+  int err;
+  reiserfs_write_lock(inode->i_sb);
+  err = reiserfs_commit_for_inode(inode) ;
+  reiserfs_write_unlock(inode->i_sb) ;
+  if (err < 0)
+      return err;
+  return 0;
+}
+
+
+#define store_ih(where,what) copy_item_head (where, what)
+
+//
+static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+    struct inode *inode = filp->f_dentry->d_inode;
+    struct cpu_key pos_key;	/* key of current position in the directory (key of directory entry) */
+    INITIALIZE_PATH (path_to_entry);
+    struct buffer_head * bh;
+    int item_num, entry_num;
+    const struct reiserfs_key * rkey;
+    struct item_head * ih, tmp_ih;
+    int search_res;
+    char * local_buf;
+    loff_t next_pos;
+    char small_buf[32] ; /* avoid kmalloc if we can */
+    struct reiserfs_dir_entry de;
+    int ret = 0;
+
+    reiserfs_write_lock(inode->i_sb);
+
+    reiserfs_check_lock_depth(inode->i_sb, "readdir") ;
+
+    /* form key for search the next directory entry using f_pos field of
+       file structure */
+    make_cpu_key (&pos_key, inode, (filp->f_pos) ? (filp->f_pos) : DOT_OFFSET,
+		  TYPE_DIRENTRY, 3);
+    next_pos = cpu_key_k_offset (&pos_key);
+
+    /*  reiserfs_warning (inode->i_sb, "reiserfs_readdir 1: f_pos = %Ld", filp->f_pos);*/
+
+    path_to_entry.reada = PATH_READA;
+    while (1) {
+    research:
+	/* search the directory item, containing entry with specified key */
+	search_res = search_by_entry_key (inode->i_sb, &pos_key, &path_to_entry, &de);
+	if (search_res == IO_ERROR) {
+	    // FIXME: we could just skip part of directory which could
+	    // not be read
+	    ret = -EIO;
+	    goto out;
+	}
+	entry_num = de.de_entry_num;
+	bh = de.de_bh;
+	item_num = de.de_item_num;
+	ih = de.de_ih;
+	store_ih (&tmp_ih, ih);
+		
+	/* we must have found item, that is item of this directory, */
+	RFALSE( COMP_SHORT_KEYS (&(ih->ih_key), &pos_key),
+		"vs-9000: found item %h does not match to dir we readdir %K",
+		ih, &pos_key);
+	RFALSE( item_num > B_NR_ITEMS (bh) - 1,
+		"vs-9005 item_num == %d, item amount == %d", 
+		item_num, B_NR_ITEMS (bh));
+      
+	/* and entry must be not more than number of entries in the item */
+	RFALSE( I_ENTRY_COUNT (ih) < entry_num,
+		"vs-9010: entry number is too big %d (%d)", 
+		entry_num, I_ENTRY_COUNT (ih));
+
+	if (search_res == POSITION_FOUND || entry_num < I_ENTRY_COUNT (ih)) {
+	    /* go through all entries in the directory item beginning from the entry, that has been found */
+	    struct reiserfs_de_head * deh = B_I_DEH (bh, ih) + entry_num;
+
+	    for (; entry_num < I_ENTRY_COUNT (ih); entry_num ++, deh ++) {
+		int d_reclen;
+		char * d_name;
+		off_t d_off;
+		ino_t d_ino;
+
+		if (!de_visible (deh))
+		    /* it is hidden entry */
+		    continue;
+		d_reclen = entry_length (bh, ih, entry_num);
+		d_name = B_I_DEH_ENTRY_FILE_NAME (bh, ih, deh);
+		if (!d_name[d_reclen - 1])
+		    d_reclen = strlen (d_name);
+	
+		if (d_reclen > REISERFS_MAX_NAME(inode->i_sb->s_blocksize)){
+		    /* too big to send back to VFS */
+		    continue ;
+		}
+
+                /* Ignore the .reiserfs_priv entry */
+                if (reiserfs_xattrs (inode->i_sb) &&
+                    !old_format_only(inode->i_sb) &&
+                    filp->f_dentry == inode->i_sb->s_root &&
+                    REISERFS_SB(inode->i_sb)->priv_root &&
+                    REISERFS_SB(inode->i_sb)->priv_root->d_inode &&
+                    deh_objectid(deh) == le32_to_cpu (INODE_PKEY(REISERFS_SB(inode->i_sb)->priv_root->d_inode)->k_objectid)) {
+                  continue;
+                }
+
+		d_off = deh_offset (deh);
+		filp->f_pos = d_off ;
+		d_ino = deh_objectid (deh);
+		if (d_reclen <= 32) {
+		  local_buf = small_buf ;
+		} else {
+		    local_buf = reiserfs_kmalloc(d_reclen, GFP_NOFS, inode->i_sb) ;
+		    if (!local_buf) {
+			pathrelse (&path_to_entry);
+			ret = -ENOMEM ;
+			goto out;
+		    }
+		    if (item_moved (&tmp_ih, &path_to_entry)) {
+			reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+			goto research;
+		    }
+		}
+		// Note, that we copy name to user space via temporary
+		// buffer (local_buf) because filldir will block if
+		// user space buffer is swapped out. At that time
+		// entry can move to somewhere else
+		memcpy (local_buf, d_name, d_reclen);
+		if (filldir (dirent, local_buf, d_reclen, d_off, d_ino, 
+		             DT_UNKNOWN) < 0) {
+		    if (local_buf != small_buf) {
+			reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+		    }
+		    goto end;
+		}
+		if (local_buf != small_buf) {
+		    reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+		}
+
+		// next entry should be looked for with such offset
+		next_pos = deh_offset (deh) + 1;
+
+		if (item_moved (&tmp_ih, &path_to_entry)) {
+		    goto research;
+		}
+	    } /* for */
+	}
+
+	if (item_num != B_NR_ITEMS (bh) - 1)
+	    // end of directory has been reached
+	    goto end;
+
+	/* item we went through is last item of node. Using right
+	   delimiting key check is it directory end */
+	rkey = get_rkey (&path_to_entry, inode->i_sb);
+	if (! comp_le_keys (rkey, &MIN_KEY)) {
+	    /* set pos_key to key, that is the smallest and greater
+	       that key of the last entry in the item */
+	    set_cpu_key_k_offset (&pos_key, next_pos);
+	    continue;
+	}
+
+	if ( COMP_SHORT_KEYS (rkey, &pos_key)) {
+	    // end of directory has been reached
+	    goto end;
+	}
+	
+	/* directory continues in the right neighboring block */
+	set_cpu_key_k_offset (&pos_key, le_key_k_offset (KEY_FORMAT_3_5, rkey));
+
+    } /* while */
+
+
+ end:
+    filp->f_pos = next_pos;
+    pathrelse (&path_to_entry);
+    reiserfs_check_path(&path_to_entry) ;
+ out:
+    reiserfs_write_unlock(inode->i_sb);
+    return ret;
+}
+
+/* compose directory item containing "." and ".." entries (entries are
+   not aligned to 4 byte boundary) */
+/* the last four params are LE */
+void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid,
+			     __u32 par_dirid, __u32 par_objid)
+{
+    struct reiserfs_de_head * deh;
+
+    memset (body, 0, EMPTY_DIR_SIZE_V1);
+    deh = (struct reiserfs_de_head *)body;
+    
+    /* direntry header of "." */
+    put_deh_offset( &(deh[0]), DOT_OFFSET );
+    /* these two are from make_le_item_head, and are are LE */
+    deh[0].deh_dir_id = dirid;
+    deh[0].deh_objectid = objid;
+    deh[0].deh_state = 0; /* Endian safe if 0 */
+    put_deh_location( &(deh[0]), EMPTY_DIR_SIZE_V1 - strlen( "." ));
+    mark_de_visible(&(deh[0]));
+  
+    /* direntry header of ".." */
+    put_deh_offset( &(deh[1]), DOT_DOT_OFFSET);
+    /* key of ".." for the root directory */
+    /* these two are from the inode, and are are LE */
+    deh[1].deh_dir_id = par_dirid;
+    deh[1].deh_objectid = par_objid;
+    deh[1].deh_state = 0; /* Endian safe if 0 */
+    put_deh_location( &(deh[1]), deh_location( &(deh[0]) ) - strlen( ".." ) );
+    mark_de_visible(&(deh[1]));
+
+    /* copy ".." and "." */
+    memcpy (body + deh_location( &(deh[0]) ), ".", 1);
+    memcpy (body + deh_location( &(deh[1]) ), "..", 2);
+}
+
+/* compose directory item containing "." and ".." entries */
+void make_empty_dir_item (char * body, __u32 dirid, __u32 objid,
+			  __u32 par_dirid, __u32 par_objid)
+{
+    struct reiserfs_de_head * deh;
+
+    memset (body, 0, EMPTY_DIR_SIZE);
+    deh = (struct reiserfs_de_head *)body;
+    
+    /* direntry header of "." */
+    put_deh_offset( &(deh[0]), DOT_OFFSET );
+    /* these two are from make_le_item_head, and are are LE */
+    deh[0].deh_dir_id = dirid;
+    deh[0].deh_objectid = objid;
+    deh[0].deh_state = 0; /* Endian safe if 0 */
+    put_deh_location( &(deh[0]), EMPTY_DIR_SIZE - ROUND_UP( strlen( "." ) ) );
+    mark_de_visible(&(deh[0]));
+  
+    /* direntry header of ".." */
+    put_deh_offset( &(deh[1]), DOT_DOT_OFFSET );
+    /* key of ".." for the root directory */
+    /* these two are from the inode, and are are LE */
+    deh[1].deh_dir_id = par_dirid;
+    deh[1].deh_objectid = par_objid;
+    deh[1].deh_state = 0; /* Endian safe if 0 */
+    put_deh_location( &(deh[1]), deh_location( &(deh[0])) - ROUND_UP( strlen( ".." ) ) );
+    mark_de_visible(&(deh[1]));
+
+    /* copy ".." and "." */
+    memcpy (body + deh_location( &(deh[0]) ), ".", 1);
+    memcpy (body + deh_location( &(deh[1]) ), "..", 2);
+}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
new file mode 100644
index 0000000..2118db2
--- /dev/null
+++ b/fs/reiserfs/do_balan.c
@@ -0,0 +1,1597 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/* Now we have all buffers that must be used in balancing of the tree 	*/
+/* Further calculations can not cause schedule(), and thus the buffer 	*/
+/* tree will be stable until the balancing will be finished 		*/
+/* balance the tree according to the analysis made before,		*/
+/* and using buffers obtained after all above.				*/
+
+
+/**
+ ** balance_leaf_when_delete
+ ** balance_leaf
+ ** do_balance
+ **
+ **/
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/buffer_head.h>
+
+#ifdef CONFIG_REISERFS_CHECK
+
+struct tree_balance * cur_tb = NULL; /* detects whether more than one
+                                        copy of tb exists as a means
+                                        of checking whether schedule
+                                        is interrupting do_balance */
+#endif
+
+inline void do_balance_mark_leaf_dirty (struct tree_balance * tb, 
+					struct buffer_head * bh, int flag)
+{
+    journal_mark_dirty(tb->transaction_handle,
+                       tb->transaction_handle->t_super, bh) ;
+}
+
+#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
+#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
+
+
+/* summary: 
+ if deleting something ( tb->insert_size[0] < 0 )
+   return(balance_leaf_when_delete()); (flag d handled here)
+ else
+   if lnum is larger than 0 we put items into the left node
+   if rnum is larger than 0 we put items into the right node
+   if snum1 is larger than 0 we put items into the new node s1
+   if snum2 is larger than 0 we put items into the new node s2 
+Note that all *num* count new items being created.
+
+It would be easier to read balance_leaf() if each of these summary
+lines was a separate procedure rather than being inlined.  I think
+that there are many passages here and in balance_leaf_when_delete() in
+which two calls to one procedure can replace two passages, and it
+might save cache space and improve software maintenance costs to do so.  
+
+Vladimir made the perceptive comment that we should offload most of
+the decision making in this function into fix_nodes/check_balance, and
+then create some sort of structure in tb that says what actions should
+be performed by do_balance.
+
+-Hans */
+
+
+
+/* Balance leaf node in case of delete or cut: insert_size[0] < 0
+ *
+ * lnum, rnum can have values >= -1
+ *	-1 means that the neighbor must be joined with S
+ *	 0 means that nothing should be done with the neighbor
+ *	>0 means to shift entirely or partly the specified number of items to the neighbor
+ */
+static int balance_leaf_when_delete (struct tree_balance * tb, int flag)
+{
+    struct buffer_head * tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+    int item_pos = PATH_LAST_POSITION (tb->tb_path);
+    int pos_in_item = tb->tb_path->pos_in_item;
+    struct buffer_info bi;
+    int n;
+    struct item_head * ih;
+
+    RFALSE( tb->FR[0] && B_LEVEL (tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
+	    "vs- 12000: level: wrong FR %z", tb->FR[0]);
+    RFALSE( tb->blknum[0] > 1,
+	    "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]);
+    RFALSE( ! tb->blknum[0] && ! PATH_H_PPARENT(tb->tb_path, 0),
+	    "PAP-12010: tree can not be empty");
+
+    ih = B_N_PITEM_HEAD (tbS0, item_pos);
+
+    /* Delete or truncate the item */
+
+    switch (flag) {
+    case M_DELETE:   /* delete item in S[0] */
+
+	RFALSE( ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
+	        "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
+ 		 -tb->insert_size [0], ih);
+
+	bi.tb = tb;
+	bi.bi_bh = tbS0;
+	bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+	leaf_delete_items (&bi, 0, item_pos, 1, -1);
+
+	if ( ! item_pos && tb->CFL[0] ) {
+	    if ( B_NR_ITEMS(tbS0) ) {
+		replace_key(tb, tb->CFL[0],tb->lkey[0],tbS0,0);
+	    }
+	    else {
+		if ( ! PATH_H_POSITION (tb->tb_path, 1) )
+		    replace_key(tb, tb->CFL[0],tb->lkey[0],PATH_H_PPARENT(tb->tb_path, 0),0);
+	    }
+	} 
+
+	RFALSE( ! item_pos && !tb->CFL[0],
+		"PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], tb->L[0]);
+    
+	break;
+
+    case M_CUT: {  /* cut item in S[0] */
+	bi.tb = tb;
+	bi.bi_bh = tbS0;
+	bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+	if (is_direntry_le_ih (ih)) {
+
+	    /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
+	    /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
+	    tb->insert_size[0] = -1;
+	    leaf_cut_from_buffer (&bi, item_pos, pos_in_item, -tb->insert_size[0]);
+
+	    RFALSE( ! item_pos && ! pos_in_item && ! tb->CFL[0],
+		    "PAP-12030: can not change delimiting key. CFL[0]=%p", 
+		    tb->CFL[0]);
+
+	    if ( ! item_pos && ! pos_in_item && tb->CFL[0] ) {
+		replace_key(tb, tb->CFL[0],tb->lkey[0],tbS0,0);
+	    }
+	} else {
+	    leaf_cut_from_buffer (&bi, item_pos, pos_in_item, -tb->insert_size[0]);
+
+	    RFALSE( ! ih_item_len(ih),
+		"PAP-12035: cut must leave non-zero dynamic length of item");
+	}
+	break;
+    }
+
+    default:
+	print_cur_tb ("12040");
+	reiserfs_panic (tb->tb_sb, "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)",
+			(flag == M_PASTE) ? "PASTE" : ((flag == M_INSERT) ? "INSERT" : "UNKNOWN"), flag);
+    }
+
+    /* the rule is that no shifting occurs unless by shifting a node can be freed */
+    n = B_NR_ITEMS(tbS0);
+    if ( tb->lnum[0] )     /* L[0] takes part in balancing */
+    {
+	if ( tb->lnum[0] == -1 )    /* L[0] must be joined with S[0] */
+	{
+	    if ( tb->rnum[0] == -1 )    /* R[0] must be also joined with S[0] */
+	    {			
+		if ( tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0) )
+		{
+		    /* all contents of all the 3 buffers will be in L[0] */
+		    if ( PATH_H_POSITION (tb->tb_path, 1) == 0 && 1 < B_NR_ITEMS(tb->FR[0]) )
+			replace_key(tb, tb->CFL[0],tb->lkey[0],tb->FR[0],1);
+
+		    leaf_move_items (LEAF_FROM_S_TO_L, tb, n, -1, NULL);
+		    leaf_move_items (LEAF_FROM_R_TO_L, tb, B_NR_ITEMS(tb->R[0]), -1, NULL);
+
+		    reiserfs_invalidate_buffer (tb, tbS0);
+		    reiserfs_invalidate_buffer (tb, tb->R[0]);
+
+		    return 0;
+		}
+		/* all contents of all the 3 buffers will be in R[0] */
+		leaf_move_items (LEAF_FROM_S_TO_R, tb, n, -1, NULL);
+		leaf_move_items (LEAF_FROM_L_TO_R, tb, B_NR_ITEMS(tb->L[0]), -1, NULL);
+
+		/* right_delimiting_key is correct in R[0] */
+		replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+		reiserfs_invalidate_buffer (tb, tbS0);
+		reiserfs_invalidate_buffer (tb, tb->L[0]);
+
+		return -1;
+	    }
+
+	    RFALSE( tb->rnum[0] != 0, 
+		    "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
+	    /* all contents of L[0] and S[0] will be in L[0] */
+	    leaf_shift_left(tb, n, -1);
+
+	    reiserfs_invalidate_buffer (tb, tbS0);
+
+	    return 0;
+	}
+	/* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
+
+	RFALSE( ( tb->lnum[0] + tb->rnum[0] < n ) || 
+		( tb->lnum[0] + tb->rnum[0] > n+1 ),
+		"PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent",
+		tb->rnum[0], tb->lnum[0], n);
+	RFALSE( ( tb->lnum[0] + tb->rnum[0] == n ) && 
+		(tb->lbytes != -1 || tb->rbytes != -1),
+		"PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split", 
+		tb->rbytes, tb->lbytes);
+	RFALSE( ( tb->lnum[0] + tb->rnum[0] == n + 1 ) && 
+		(tb->lbytes < 1 || tb->rbytes != -1),
+		"PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split", 
+		tb->rbytes, tb->lbytes);
+
+	leaf_shift_left (tb, tb->lnum[0], tb->lbytes);
+	leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+
+	reiserfs_invalidate_buffer (tb, tbS0);
+
+	return 0;
+    }
+
+    if ( tb->rnum[0] == -1 ) {
+	/* all contents of R[0] and S[0] will be in R[0] */
+	leaf_shift_right(tb, n, -1);
+	reiserfs_invalidate_buffer (tb, tbS0);
+	return 0;
+    }
+
+    RFALSE( tb->rnum[0], 
+	    "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
+    return 0;
+}
+
+
+static int balance_leaf (struct tree_balance * tb,
+			 struct item_head * ih,		/* item header of inserted item (this is on little endian) */
+			 const char * body,		/* body  of inserted item or bytes to paste */
+			 int flag,			/* i - insert, d - delete, c - cut, p - paste
+							   (see comment to do_balance) */
+			 struct item_head * insert_key,  /* in our processing of one level we sometimes determine what
+							    must be inserted into the next higher level.  This insertion
+							    consists of a key or two keys and their corresponding
+							    pointers */
+			 struct buffer_head ** insert_ptr /* inserted node-ptrs for the next level */
+    )
+{
+    struct buffer_head * tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+    int item_pos = PATH_LAST_POSITION (tb->tb_path);	/*  index into the array of item headers in S[0] 
+							    of the affected item */
+    struct buffer_info bi;
+    struct buffer_head *S_new[2];  /* new nodes allocated to hold what could not fit into S */
+    int snum[2];	    /* number of items that will be placed
+                               into S_new (includes partially shifted
+                               items) */
+    int sbytes[2];          /* if an item is partially shifted into S_new then 
+			       if it is a directory item 
+			       it is the number of entries from the item that are shifted into S_new
+			       else
+			       it is the number of bytes from the item that are shifted into S_new
+			    */
+    int n, i;
+    int ret_val;
+    int pos_in_item;
+    int zeros_num;
+
+    PROC_INFO_INC( tb -> tb_sb, balance_at[ 0 ] );
+
+    /* Make balance in case insert_size[0] < 0 */
+    if ( tb->insert_size[0] < 0 )
+	return balance_leaf_when_delete (tb, flag);
+  
+    zeros_num = 0;
+    if (flag == M_INSERT && body == 0)
+	zeros_num = ih_item_len( ih );
+
+    pos_in_item = tb->tb_path->pos_in_item;
+    /* for indirect item pos_in_item is measured in unformatted node
+       pointers. Recalculate to bytes */
+    if (flag != M_INSERT && is_indirect_le_ih (B_N_PITEM_HEAD (tbS0, item_pos)))
+	pos_in_item *= UNFM_P_SIZE;
+
+    if ( tb->lnum[0] > 0 ) {
+	/* Shift lnum[0] items from S[0] to the left neighbor L[0] */
+	if ( item_pos < tb->lnum[0] ) {
+	    /* new item or it part falls to L[0], shift it too */
+	    n = B_NR_ITEMS(tb->L[0]);
+
+	    switch (flag) {
+	    case M_INSERT:   /* insert item into L[0] */
+
+		if ( item_pos == tb->lnum[0] - 1 && tb->lbytes != -1 ) {
+		    /* part of new item falls into L[0] */
+		    int new_item_len;
+		    int version;
+
+		    ret_val = leaf_shift_left (tb, tb->lnum[0]-1, -1);
+
+		    /* Calculate item length to insert to S[0] */
+		    new_item_len = ih_item_len(ih) - tb->lbytes;
+		    /* Calculate and check item length to insert to L[0] */
+		    put_ih_item_len(ih, ih_item_len(ih) - new_item_len );
+
+		    RFALSE( ih_item_len(ih) <= 0,
+			    "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
+                            ih_item_len(ih));
+
+		    /* Insert new item into L[0] */
+		    bi.tb = tb;
+		    bi.bi_bh = tb->L[0];
+		    bi.bi_parent = tb->FL[0];
+		    bi.bi_position = get_left_neighbor_position (tb, 0);
+		    leaf_insert_into_buf (&bi, n + item_pos - ret_val, ih, body,
+					  zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
+
+		    version = ih_version (ih);
+
+		    /* Calculate key component, item length and body to insert into S[0] */
+                    set_le_ih_k_offset( ih, le_ih_k_offset( ih ) + (tb->lbytes << (is_indirect_le_ih(ih)?tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT:0)) );
+
+		    put_ih_item_len( ih, new_item_len );
+		    if ( tb->lbytes >  zeros_num ) {
+			body += (tb->lbytes - zeros_num);
+			zeros_num = 0;
+		    }
+		    else
+			zeros_num -= tb->lbytes;
+
+		    RFALSE( ih_item_len(ih) <= 0,
+			"PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d",
+			ih_item_len(ih));
+		} else {
+		    /* new item in whole falls into L[0] */
+		    /* Shift lnum[0]-1 items to L[0] */
+		    ret_val = leaf_shift_left(tb, tb->lnum[0]-1, tb->lbytes);
+		    /* Insert new item into L[0] */
+		    bi.tb = tb;
+		    bi.bi_bh = tb->L[0];
+		    bi.bi_parent = tb->FL[0];
+		    bi.bi_position = get_left_neighbor_position (tb, 0);
+		    leaf_insert_into_buf (&bi, n + item_pos - ret_val, ih, body, zeros_num);
+		    tb->insert_size[0] = 0;
+		    zeros_num = 0;
+		}
+		break;
+
+	    case M_PASTE:   /* append item in L[0] */
+
+		if ( item_pos == tb->lnum[0] - 1 && tb->lbytes != -1 ) {
+		    /* we must shift the part of the appended item */
+		    if ( is_direntry_le_ih (B_N_PITEM_HEAD (tbS0, item_pos))) {
+
+			RFALSE( zeros_num,
+				"PAP-12090: invalid parameter in case of a directory");
+			/* directory item */
+			if ( tb->lbytes > pos_in_item ) {
+			    /* new directory entry falls into L[0] */
+			    struct item_head * pasted;
+			    int l_pos_in_item = pos_in_item;
+							  
+			    /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
+			    ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
+			    if ( ret_val && ! item_pos ) {
+				pasted =  B_N_PITEM_HEAD(tb->L[0],B_NR_ITEMS(tb->L[0])-1);
+				l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes-1);
+			    }
+
+			    /* Append given directory entry to directory item */
+			    bi.tb = tb;
+			    bi.bi_bh = tb->L[0];
+			    bi.bi_parent = tb->FL[0];
+			    bi.bi_position = get_left_neighbor_position (tb, 0);
+			    leaf_paste_in_buffer (&bi, n + item_pos - ret_val, l_pos_in_item,
+						  tb->insert_size[0], body, zeros_num);
+
+			    /* previous string prepared space for pasting new entry, following string pastes this entry */
+
+			    /* when we have merge directory item, pos_in_item has been changed too */
+
+			    /* paste new directory entry. 1 is entry number */
+			    leaf_paste_entries (bi.bi_bh, n + item_pos - ret_val, l_pos_in_item, 1,
+						(struct reiserfs_de_head *)body, 
+						body + DEH_SIZE, tb->insert_size[0]
+				);
+			    tb->insert_size[0] = 0;
+			} else {
+			    /* new directory item doesn't fall into L[0] */
+			    /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
+			    leaf_shift_left (tb, tb->lnum[0], tb->lbytes);
+			}
+			/* Calculate new position to append in item body */
+			pos_in_item -= tb->lbytes;
+		    }
+		    else {
+			/* regular object */
+			RFALSE( tb->lbytes <= 0,
+			        "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
+				tb->lbytes);
+			RFALSE( pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
+                                "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
+				ih_item_len(B_N_PITEM_HEAD(tbS0,item_pos)), pos_in_item);
+
+			if ( tb->lbytes >= pos_in_item ) {
+			    /* appended item will be in L[0] in whole */
+			    int l_n;
+
+			    /* this bytes number must be appended to the last item of L[h] */
+			    l_n = tb->lbytes - pos_in_item;
+
+			    /* Calculate new insert_size[0] */
+			    tb->insert_size[0] -= l_n;
+
+			    RFALSE( tb->insert_size[0] <= 0,
+				    "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
+				    tb->insert_size[0]);
+			    ret_val =  leaf_shift_left(tb,tb->lnum[0], 
+						       ih_item_len(B_N_PITEM_HEAD(tbS0,item_pos)));
+			    /* Append to body of item in L[0] */
+			    bi.tb = tb;
+			    bi.bi_bh = tb->L[0];
+			    bi.bi_parent = tb->FL[0];
+			    bi.bi_position = get_left_neighbor_position (tb, 0);
+			    leaf_paste_in_buffer(
+				&bi,n + item_pos - ret_val,
+				ih_item_len( B_N_PITEM_HEAD(tb->L[0],n+item_pos-ret_val)),
+				l_n,body, zeros_num > l_n ? l_n : zeros_num
+				);
+			    /* 0-th item in S0 can be only of DIRECT type when l_n != 0*/
+			    {
+				int version;
+				int temp_l = l_n;
+				
+				RFALSE (ih_item_len (B_N_PITEM_HEAD (tbS0, 0)),
+					"PAP-12106: item length must be 0");
+				RFALSE (comp_short_le_keys (B_N_PKEY (tbS0, 0),
+							    B_N_PKEY (tb->L[0],
+									    n + item_pos - ret_val)),
+					"PAP-12107: items must be of the same file");
+				if (is_indirect_le_ih(B_N_PITEM_HEAD (tb->L[0],
+								      n + item_pos - ret_val)))	{
+				    temp_l = l_n << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
+				}
+				/* update key of first item in S0 */
+				version = ih_version (B_N_PITEM_HEAD (tbS0, 0));
+				set_le_key_k_offset (version, B_N_PKEY (tbS0, 0), 
+						     le_key_k_offset (version, B_N_PKEY (tbS0, 0)) + temp_l);
+				/* update left delimiting key */
+				set_le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0]),
+						     le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0])) + temp_l);
+			    }
+
+			    /* Calculate new body, position in item and insert_size[0] */
+			    if ( l_n > zeros_num ) {
+				body += (l_n - zeros_num);
+				zeros_num = 0;
+			    }
+			    else
+				zeros_num -= l_n;
+			    pos_in_item = 0;	
+
+			    RFALSE( comp_short_le_keys 
+				    (B_N_PKEY(tbS0,0),
+				     B_N_PKEY(tb->L[0],B_NR_ITEMS(tb->L[0])-1)) ||
+				
+				    !op_is_left_mergeable 
+				    (B_N_PKEY (tbS0, 0), tbS0->b_size) ||
+				    !op_is_left_mergeable
+				    (B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0]), 
+				     tbS0->b_size),
+				    "PAP-12120: item must be merge-able with left neighboring item");
+			}
+			else /* only part of the appended item will be in L[0] */
+			{
+			    /* Calculate position in item for append in S[0] */
+			    pos_in_item -= tb->lbytes;
+
+			    RFALSE( pos_in_item <= 0,
+				    "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
+
+			    /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
+			    leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+			}
+		    }
+		}
+		else /* appended item will be in L[0] in whole */
+		{
+		    struct item_head * pasted;
+
+			if ( ! item_pos  && op_is_left_mergeable (B_N_PKEY (tbS0, 0), tbS0->b_size) )
+			{ /* if we paste into first item of S[0] and it is left mergable */
+			    /* then increment pos_in_item by the size of the last item in L[0] */
+			    pasted = B_N_PITEM_HEAD(tb->L[0],n-1);
+			    if ( is_direntry_le_ih (pasted) )
+				pos_in_item += ih_entry_count(pasted);
+			    else
+				pos_in_item += ih_item_len(pasted);
+			}
+
+		    /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
+		    ret_val = leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+		    /* Append to body of item in L[0] */
+		    bi.tb = tb;
+		    bi.bi_bh = tb->L[0];
+		    bi.bi_parent = tb->FL[0];
+		    bi.bi_position = get_left_neighbor_position (tb, 0);
+		    leaf_paste_in_buffer (&bi, n + item_pos - ret_val, pos_in_item, tb->insert_size[0],
+					  body, zeros_num);
+
+		    /* if appended item is directory, paste entry */
+		    pasted = B_N_PITEM_HEAD (tb->L[0], n + item_pos - ret_val);
+		    if (is_direntry_le_ih (pasted))
+			leaf_paste_entries (
+			    bi.bi_bh, n + item_pos - ret_val, pos_in_item, 1, 
+			    (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+			    );
+		    /* if appended item is indirect item, put unformatted node into un list */
+		    if (is_indirect_le_ih (pasted))
+			set_ih_free_space (pasted, 0);
+		    tb->insert_size[0] = 0;
+		    zeros_num = 0;
+		}
+		break;
+	    default:    /* cases d and t */
+		reiserfs_panic (tb->tb_sb, "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)",
+				(flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+	    }
+	} else { 
+	    /* new item doesn't fall into L[0] */
+	    leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+	}
+    }	/* tb->lnum[0] > 0 */
+
+    /* Calculate new item position */
+    item_pos -= ( tb->lnum[0] - (( tb->lbytes != -1 ) ? 1 : 0));
+
+    if ( tb->rnum[0] > 0 ) {
+	/* shift rnum[0] items from S[0] to the right neighbor R[0] */
+	n = B_NR_ITEMS(tbS0);
+	switch ( flag ) {
+
+	case M_INSERT:   /* insert item */
+	    if ( n - tb->rnum[0] < item_pos )
+	    { /* new item or its part falls to R[0] */
+		if ( item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1 )
+		{ /* part of new item falls into R[0] */
+		    loff_t old_key_comp, old_len, r_zeros_number;
+		    const char * r_body;
+		    int version;
+		    loff_t offset;
+
+		    leaf_shift_right(tb,tb->rnum[0]-1,-1);
+
+		    version = ih_version(ih);
+		    /* Remember key component and item length */
+                    old_key_comp = le_ih_k_offset( ih );
+		    old_len = ih_item_len(ih);
+
+		    /* Calculate key component and item length to insert into R[0] */
+                    offset = le_ih_k_offset( ih ) + ((old_len - tb->rbytes )<<(is_indirect_le_ih(ih)?tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT:0));
+                    set_le_ih_k_offset( ih, offset );
+		    put_ih_item_len( ih, tb->rbytes);
+		    /* Insert part of the item into R[0] */
+		    bi.tb = tb;
+		    bi.bi_bh = tb->R[0];
+		    bi.bi_parent = tb->FR[0];
+		    bi.bi_position = get_right_neighbor_position (tb, 0);
+		    if ( (old_len - tb->rbytes) > zeros_num ) {
+			r_zeros_number = 0;
+			r_body = body + (old_len - tb->rbytes) - zeros_num;
+		    }
+		    else {
+			r_body = body;
+			r_zeros_number = zeros_num - (old_len - tb->rbytes);
+			zeros_num -= r_zeros_number;
+		    }
+
+		    leaf_insert_into_buf (&bi, 0, ih, r_body, r_zeros_number);
+
+		    /* Replace right delimiting key by first key in R[0] */
+		    replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+		    /* Calculate key component and item length to insert into S[0] */
+                    set_le_ih_k_offset( ih, old_key_comp );
+		    put_ih_item_len( ih, old_len - tb->rbytes );
+
+		    tb->insert_size[0] -= tb->rbytes;
+
+		}
+		else /* whole new item falls into R[0] */
+		{					  
+		    /* Shift rnum[0]-1 items to R[0] */
+		    ret_val = leaf_shift_right(tb,tb->rnum[0]-1,tb->rbytes);
+		    /* Insert new item into R[0] */
+		    bi.tb = tb;
+		    bi.bi_bh = tb->R[0];
+		    bi.bi_parent = tb->FR[0];
+		    bi.bi_position = get_right_neighbor_position (tb, 0);
+		    leaf_insert_into_buf (&bi, item_pos - n + tb->rnum[0] - 1, ih, body, zeros_num);
+
+		    if ( item_pos - n + tb->rnum[0] - 1 == 0 ) {
+			replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+		    }
+		    zeros_num = tb->insert_size[0] = 0;
+		}
+	    }
+	    else /* new item or part of it doesn't fall into R[0] */
+	    {
+		leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+	    }
+	    break;
+
+	case M_PASTE:   /* append item */
+
+	    if ( n - tb->rnum[0] <= item_pos )  /* pasted item or part of it falls to R[0] */
+	    {
+		if ( item_pos == n - tb->rnum[0] && tb->rbytes != -1 )
+		{ /* we must shift the part of the appended item */
+		    if ( is_direntry_le_ih (B_N_PITEM_HEAD(tbS0, item_pos)))
+		    { /* we append to directory item */
+			int entry_count;
+
+			RFALSE( zeros_num,
+				"PAP-12145: invalid parameter in case of a directory");
+			entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD(tbS0, item_pos));
+			if ( entry_count - tb->rbytes < pos_in_item )
+			    /* new directory entry falls into R[0] */
+			{
+			    int paste_entry_position;
+
+			    RFALSE( tb->rbytes - 1 >= entry_count || 
+				    ! tb->insert_size[0],
+				    "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
+				    tb->rbytes, entry_count);
+			    /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
+			    leaf_shift_right(tb,tb->rnum[0],tb->rbytes - 1);
+			    /* Paste given directory entry to directory item */
+			    paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
+			    bi.tb = tb;
+			    bi.bi_bh = tb->R[0];
+			    bi.bi_parent = tb->FR[0];
+			    bi.bi_position = get_right_neighbor_position (tb, 0);
+			    leaf_paste_in_buffer (&bi, 0, paste_entry_position,
+						  tb->insert_size[0],body,zeros_num);
+			    /* paste entry */
+			    leaf_paste_entries (
+				bi.bi_bh, 0, paste_entry_position, 1, (struct reiserfs_de_head *)body, 
+				body + DEH_SIZE, tb->insert_size[0]
+				);								
+						
+			    if ( paste_entry_position == 0 ) {
+				/* change delimiting keys */
+				replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+			    }
+
+			    tb->insert_size[0] = 0;
+			    pos_in_item++;
+			}
+			else /* new directory entry doesn't fall into R[0] */
+			{
+			    leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+			}
+		    }
+		    else /* regular object */
+		    {
+			int n_shift, n_rem, r_zeros_number;
+			const char * r_body;
+
+			/* Calculate number of bytes which must be shifted from appended item */
+			if ( (n_shift = tb->rbytes - tb->insert_size[0]) < 0 )
+			    n_shift = 0;
+
+			RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD (tbS0, item_pos)),
+			       "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
+                               pos_in_item, ih_item_len( B_N_PITEM_HEAD(tbS0,item_pos)));
+
+			leaf_shift_right(tb,tb->rnum[0],n_shift);
+			/* Calculate number of bytes which must remain in body after appending to R[0] */
+			if ( (n_rem = tb->insert_size[0] - tb->rbytes) < 0 )
+			    n_rem = 0;
+			
+			{
+			  int version;
+			  unsigned long temp_rem = n_rem;
+			  
+			  version = ih_version (B_N_PITEM_HEAD (tb->R[0],0));
+			  if (is_indirect_le_key(version,B_N_PKEY(tb->R[0],0))){
+			      temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits -
+					 UNFM_P_SHIFT);
+			  }
+			  set_le_key_k_offset (version, B_N_PKEY(tb->R[0],0), 
+					       le_key_k_offset (version, B_N_PKEY(tb->R[0],0)) + temp_rem);
+			  set_le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0]), 
+					       le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) + temp_rem);
+			}
+/*		  k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
+		  k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
+			do_balance_mark_internal_dirty (tb, tb->CFR[0], 0);
+
+			/* Append part of body into R[0] */
+			bi.tb = tb;
+			bi.bi_bh = tb->R[0];
+			bi.bi_parent = tb->FR[0];
+			bi.bi_position = get_right_neighbor_position (tb, 0);
+			if ( n_rem > zeros_num ) {
+			    r_zeros_number = 0;
+			    r_body = body + n_rem - zeros_num;
+			}
+			else {
+			    r_body = body;
+			    r_zeros_number = zeros_num - n_rem;
+			    zeros_num -= r_zeros_number;
+			}
+
+			leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, r_body, r_zeros_number);
+
+			if (is_indirect_le_ih (B_N_PITEM_HEAD(tb->R[0],0))) {
+#if 0
+			    RFALSE( n_rem,
+				    "PAP-12160: paste more than one unformatted node pointer");
+#endif
+			    set_ih_free_space (B_N_PITEM_HEAD(tb->R[0],0), 0);
+			}
+			tb->insert_size[0] = n_rem;
+			if ( ! n_rem )
+			    pos_in_item ++;
+		    }
+		}
+		else /* pasted item in whole falls into R[0] */
+		{
+		    struct item_head * pasted;
+
+		    ret_val = leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+		    /* append item in R[0] */
+		    if ( pos_in_item >= 0 ) {
+			bi.tb = tb;
+			bi.bi_bh = tb->R[0];
+			bi.bi_parent = tb->FR[0];
+			bi.bi_position = get_right_neighbor_position (tb, 0);
+			leaf_paste_in_buffer(&bi,item_pos - n + tb->rnum[0], pos_in_item,
+					     tb->insert_size[0],body, zeros_num);
+		    }
+
+		    /* paste new entry, if item is directory item */
+		    pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
+		    if (is_direntry_le_ih (pasted) && pos_in_item >= 0 ) {
+			leaf_paste_entries (
+			    bi.bi_bh, item_pos - n + tb->rnum[0], pos_in_item, 1, 
+			    (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+			    );
+			if ( ! pos_in_item ) {
+
+			    RFALSE( item_pos - n + tb->rnum[0],
+				    "PAP-12165: directory item must be first item of node when pasting is in 0th position");
+
+			    /* update delimiting keys */
+			    replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+			}
+		    }
+
+		    if (is_indirect_le_ih (pasted))
+			set_ih_free_space (pasted, 0);
+		    zeros_num = tb->insert_size[0] = 0;
+		}
+	    }
+	    else /* new item doesn't fall into R[0] */
+	    {
+		leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+	    }
+	    break;
+	default:    /* cases d and t */
+	    reiserfs_panic (tb->tb_sb, "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)",
+			    (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+	}
+    
+    }	/* tb->rnum[0] > 0 */
+
+
+    RFALSE( tb->blknum[0] > 3,
+	    "PAP-12180: blknum can not be %d. It must be <= 3",  tb->blknum[0]);
+    RFALSE( tb->blknum[0] < 0,
+	    "PAP-12185: blknum can not be %d. It must be >= 0",  tb->blknum[0]);
+
+    /* if while adding to a node we discover that it is possible to split
+       it in two, and merge the left part into the left neighbor and the
+       right part into the right neighbor, eliminating the node */
+    if ( tb->blknum[0] == 0 ) { /* node S[0] is empty now */
+
+	RFALSE( ! tb->lnum[0] || ! tb->rnum[0],
+	        "PAP-12190: lnum and rnum must not be zero");
+	/* if insertion was done before 0-th position in R[0], right
+	   delimiting key of the tb->L[0]'s and left delimiting key are
+	   not set correctly */
+	if (tb->CFL[0]) {
+	    if (!tb->CFR[0])
+		reiserfs_panic (tb->tb_sb, "vs-12195: balance_leaf: CFR not initialized");
+	    copy_key (B_N_PDELIM_KEY (tb->CFL[0], tb->lkey[0]), B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0]));
+	    do_balance_mark_internal_dirty (tb, tb->CFL[0], 0);
+	}
+
+	reiserfs_invalidate_buffer(tb,tbS0);									
+	return 0;
+    }
+
+
+    /* Fill new nodes that appear in place of S[0] */
+
+    /* I am told that this copying is because we need an array to enable
+       the looping code. -Hans */
+    snum[0] = tb->s1num,
+	snum[1] = tb->s2num;
+    sbytes[0] = tb->s1bytes;
+    sbytes[1] = tb->s2bytes;
+    for( i = tb->blknum[0] - 2; i >= 0; i-- ) {
+
+	RFALSE( !snum[i], "PAP-12200: snum[%d] == %d. Must be > 0", i, snum[i]);
+
+	/* here we shift from S to S_new nodes */
+
+	S_new[i] = get_FEB(tb);
+
+	/* initialized block type and tree level */
+        set_blkh_level( B_BLK_HEAD(S_new[i]), DISK_LEAF_NODE_LEVEL );
+
+
+	n = B_NR_ITEMS(tbS0);
+	
+	switch (flag) {
+	case M_INSERT:   /* insert item */
+
+	    if ( n - snum[i] < item_pos )
+	    { /* new item or it's part falls to first new node S_new[i]*/
+		if ( item_pos == n - snum[i] + 1 && sbytes[i] != -1 )
+		{ /* part of new item falls into S_new[i] */
+		    int old_key_comp, old_len, r_zeros_number;
+		    const char * r_body;
+		    int version;
+
+		    /* Move snum[i]-1 items from S[0] to S_new[i] */
+		    leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i] - 1, -1, S_new[i]);
+		    /* Remember key component and item length */
+		    version = ih_version (ih);
+                    old_key_comp = le_ih_k_offset( ih );
+		    old_len = ih_item_len(ih);
+
+		    /* Calculate key component and item length to insert into S_new[i] */
+                    set_le_ih_k_offset( ih,
+                                le_ih_k_offset(ih) + ((old_len - sbytes[i] )<<(is_indirect_le_ih(ih)?tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT:0)) );
+
+		    put_ih_item_len( ih, sbytes[i] );
+
+		    /* Insert part of the item into S_new[i] before 0-th item */
+		    bi.tb = tb;
+		    bi.bi_bh = S_new[i];
+		    bi.bi_parent = NULL;
+		    bi.bi_position = 0;
+
+		    if ( (old_len - sbytes[i]) > zeros_num ) {
+			r_zeros_number = 0;
+			r_body = body + (old_len - sbytes[i]) - zeros_num;
+		    }
+		    else {
+			r_body = body;
+			r_zeros_number = zeros_num - (old_len - sbytes[i]);
+			zeros_num -= r_zeros_number;
+		    }
+
+		    leaf_insert_into_buf (&bi, 0, ih, r_body, r_zeros_number);
+
+		    /* Calculate key component and item length to insert into S[i] */
+                    set_le_ih_k_offset( ih, old_key_comp );
+		    put_ih_item_len( ih, old_len - sbytes[i] );
+		    tb->insert_size[0] -= sbytes[i];
+		}
+		else /* whole new item falls into S_new[i] */
+		{
+		    /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
+		    leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i] - 1, sbytes[i], S_new[i]);
+
+		    /* Insert new item into S_new[i] */
+		    bi.tb = tb;
+		    bi.bi_bh = S_new[i];
+		    bi.bi_parent = NULL;
+		    bi.bi_position = 0;
+		    leaf_insert_into_buf (&bi, item_pos - n + snum[i] - 1, ih, body, zeros_num);
+
+		    zeros_num = tb->insert_size[0] = 0;
+		}
+	    }
+
+	    else /* new item or it part don't falls into S_new[i] */
+	    {
+		leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+	    }
+	    break;
+
+	case M_PASTE:   /* append item */
+
+	    if ( n - snum[i] <= item_pos )  /* pasted item or part if it falls to S_new[i] */
+	    {
+		if ( item_pos == n - snum[i] && sbytes[i] != -1 )
+		{ /* we must shift part of the appended item */
+		    struct item_head * aux_ih;
+
+		    RFALSE( ih, "PAP-12210: ih must be 0");
+
+		    if ( is_direntry_le_ih (aux_ih = B_N_PITEM_HEAD(tbS0,item_pos))) {
+			/* we append to directory item */
+
+			int entry_count;
+		
+			entry_count = ih_entry_count(aux_ih);
+
+			if ( entry_count - sbytes[i] < pos_in_item  && pos_in_item <= entry_count ) {
+			    /* new directory entry falls into S_new[i] */
+		  
+			    RFALSE( ! tb->insert_size[0],
+				    "PAP-12215: insert_size is already 0");
+			    RFALSE( sbytes[i] - 1 >= entry_count,
+				    "PAP-12220: there are no so much entries (%d), only %d",
+				    sbytes[i] - 1, entry_count);
+
+			    /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
+			    leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i]-1, S_new[i]);
+			    /* Paste given directory entry to directory item */
+			    bi.tb = tb;
+			    bi.bi_bh = S_new[i];
+			    bi.bi_parent = NULL;
+			    bi.bi_position = 0;
+			    leaf_paste_in_buffer (&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
+						  tb->insert_size[0], body,zeros_num);
+			    /* paste new directory entry */
+			    leaf_paste_entries (
+				bi.bi_bh, 0, pos_in_item - entry_count + sbytes[i] - 1,
+				1, (struct reiserfs_de_head *)body, body + DEH_SIZE,
+				tb->insert_size[0]
+				);
+			    tb->insert_size[0] = 0;
+			    pos_in_item++;
+			} else { /* new directory entry doesn't fall into S_new[i] */
+			    leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+			}
+		    }
+		    else /* regular object */
+		    {
+			int n_shift, n_rem, r_zeros_number;
+			const char * r_body;
+
+			RFALSE( pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0,item_pos)) ||
+			        tb->insert_size[0] <= 0,
+			        "PAP-12225: item too short or insert_size <= 0");
+
+			/* Calculate number of bytes which must be shifted from appended item */
+			n_shift = sbytes[i] - tb->insert_size[0];
+			if ( n_shift < 0 )
+			    n_shift = 0;
+			leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
+
+			/* Calculate number of bytes which must remain in body after append to S_new[i] */
+			n_rem = tb->insert_size[0] - sbytes[i];
+			if ( n_rem < 0 )
+			    n_rem = 0;
+			/* Append part of body into S_new[0] */
+			bi.tb = tb;
+			bi.bi_bh = S_new[i];
+			bi.bi_parent = NULL;
+			bi.bi_position = 0;
+
+			if ( n_rem > zeros_num ) {
+			    r_zeros_number = 0;
+			    r_body = body + n_rem - zeros_num;
+			}
+			else {
+			    r_body = body;
+			    r_zeros_number = zeros_num - n_rem;
+			    zeros_num -= r_zeros_number;
+			}
+
+			leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0]-n_rem, r_body,r_zeros_number);
+			{
+			    struct item_head * tmp;
+
+			    tmp = B_N_PITEM_HEAD(S_new[i],0);
+			    if (is_indirect_le_ih (tmp)) {
+				set_ih_free_space (tmp, 0);
+				set_le_ih_k_offset( tmp, le_ih_k_offset(tmp) + 
+					            (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
+			    } else {
+				set_le_ih_k_offset( tmp, le_ih_k_offset(tmp) + 
+				                    n_rem );
+			    }
+			}
+
+			tb->insert_size[0] = n_rem;
+			if ( ! n_rem )
+			    pos_in_item++;
+		    }
+		}
+		else
+		    /* item falls wholly into S_new[i] */
+		{
+		    int ret_val;
+		    struct item_head * pasted;
+
+#ifdef CONFIG_REISERFS_CHECK
+		    struct item_head * ih = B_N_PITEM_HEAD(tbS0,item_pos);
+
+		    if ( ! is_direntry_le_ih(ih) && (pos_in_item != ih_item_len(ih) ||
+						     tb->insert_size[0] <= 0) )
+			reiserfs_panic (tb->tb_sb, "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
+#endif /* CONFIG_REISERFS_CHECK */
+
+		    ret_val = leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+
+		    RFALSE( ret_val,
+			    "PAP-12240: unexpected value returned by leaf_move_items (%d)",
+			    ret_val);
+
+		    /* paste into item */
+		    bi.tb = tb;
+		    bi.bi_bh = S_new[i];
+		    bi.bi_parent = NULL;
+		    bi.bi_position = 0;
+		    leaf_paste_in_buffer(&bi, item_pos - n + snum[i], pos_in_item, tb->insert_size[0], body, zeros_num);
+
+		    pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
+		    if (is_direntry_le_ih (pasted))
+		    {
+			leaf_paste_entries (
+			    bi.bi_bh, item_pos - n + snum[i], pos_in_item, 1, 
+			    (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+			    );
+		    }
+
+		    /* if we paste to indirect item update ih_free_space */
+		    if (is_indirect_le_ih (pasted))
+			set_ih_free_space (pasted, 0);
+		    zeros_num = tb->insert_size[0] = 0;
+		}
+	    }
+
+	    else /* pasted item doesn't fall into S_new[i] */
+	    {
+		leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+	    }
+	    break;
+	default:    /* cases d and t */
+	    reiserfs_panic (tb->tb_sb, "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)",
+			    (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+	}
+
+	memcpy (insert_key + i,B_N_PKEY(S_new[i],0),KEY_SIZE);
+	insert_ptr[i] = S_new[i];
+
+	RFALSE (!buffer_journaled (S_new [i]) || buffer_journal_dirty (S_new [i]) ||
+		buffer_dirty (S_new [i]),
+		"PAP-12247: S_new[%d] : (%b)", i, S_new[i]);
+    }
+
+    /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
+       affected item which remains in S */
+    if ( 0 <= item_pos && item_pos < tb->s0num )
+    { /* if we must insert or append into buffer S[0] */
+
+	switch (flag)
+	{
+	case M_INSERT:   /* insert item into S[0] */
+	    bi.tb = tb;
+	    bi.bi_bh = tbS0;
+	    bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	    bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+	    leaf_insert_into_buf (&bi, item_pos, ih, body, zeros_num);
+
+	    /* If we insert the first key change the delimiting key */
+	    if( item_pos == 0 ) {
+		if (tb->CFL[0]) /* can be 0 in reiserfsck */
+		    replace_key(tb, tb->CFL[0], tb->lkey[0],tbS0,0);
+
+	    }
+	    break;
+
+	case M_PASTE: {  /* append item in S[0] */
+	    struct item_head * pasted;
+
+	    pasted = B_N_PITEM_HEAD (tbS0, item_pos);
+	    /* when directory, may be new entry already pasted */
+	    if (is_direntry_le_ih (pasted)) {
+		if ( pos_in_item >= 0 &&
+		    pos_in_item <= ih_entry_count(pasted) ) {
+
+		    RFALSE( ! tb->insert_size[0], 
+			    "PAP-12260: insert_size is 0 already");
+
+		    /* prepare space */
+		    bi.tb = tb;
+		    bi.bi_bh = tbS0;
+		    bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+		    bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+		    leaf_paste_in_buffer(&bi, item_pos, pos_in_item, tb->insert_size[0], body, zeros_num);
+
+		    /* paste entry */
+		    leaf_paste_entries (
+			bi.bi_bh, item_pos, pos_in_item, 1, (struct reiserfs_de_head *)body,
+			body + DEH_SIZE, tb->insert_size[0]
+			);
+		    if ( ! item_pos && ! pos_in_item ) {
+			RFALSE( !tb->CFL[0] || !tb->L[0], 
+				"PAP-12270: CFL[0]/L[0] must be specified");
+			if (tb->CFL[0]) {
+			    replace_key(tb, tb->CFL[0], tb->lkey[0],tbS0,0);
+
+			}
+		    }
+		    tb->insert_size[0] = 0;
+		}
+	    } else { /* regular object */
+		if ( pos_in_item == ih_item_len(pasted) ) {
+
+		    RFALSE( tb->insert_size[0] <= 0,
+			    "PAP-12275: insert size must not be %d",
+                            tb->insert_size[0]);
+		    bi.tb = tb;
+		    bi.bi_bh = tbS0;
+		    bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+		    bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+		    leaf_paste_in_buffer (&bi, item_pos, pos_in_item, tb->insert_size[0], body, zeros_num);
+
+		    if (is_indirect_le_ih (pasted)) {
+#if 0
+			RFALSE( tb->insert_size[0] != UNFM_P_SIZE,
+				"PAP-12280: insert_size for indirect item must be %d, not %d",
+				UNFM_P_SIZE, tb->insert_size[0]);
+#endif
+			set_ih_free_space (pasted, 0);
+		    }
+		    tb->insert_size[0] = 0;
+		}
+
+#ifdef CONFIG_REISERFS_CHECK
+		else {
+		    if ( tb->insert_size[0] ) {
+			print_cur_tb ("12285");
+			reiserfs_panic (tb->tb_sb, "PAP-12285: balance_leaf: insert_size must be 0 (%d)", tb->insert_size[0]);
+		    }
+		}
+#endif /* CONFIG_REISERFS_CHECK */
+	    
+	    }
+	} /* case M_PASTE: */
+	}
+    }
+
+#ifdef CONFIG_REISERFS_CHECK
+    if ( flag == M_PASTE && tb->insert_size[0] ) {
+	print_cur_tb ("12290");
+	reiserfs_panic (tb->tb_sb, "PAP-12290: balance_leaf: insert_size is still not 0 (%d)", tb->insert_size[0]);
+    }
+#endif /* CONFIG_REISERFS_CHECK */
+
+    return 0;
+} /* Leaf level of the tree is balanced (end of balance_leaf) */
+
+
+
+/* Make empty node */
+void make_empty_node (struct buffer_info * bi)
+{
+    struct block_head * blkh;
+
+    RFALSE( bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL");
+
+    blkh = B_BLK_HEAD(bi->bi_bh);
+    set_blkh_nr_item( blkh, 0 );
+    set_blkh_free_space( blkh, MAX_CHILD_SIZE(bi->bi_bh) );
+
+    if (bi->bi_parent)
+	B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */
+}
+
+
+/* Get first empty buffer */
+struct buffer_head * get_FEB (struct tree_balance * tb)
+{
+    int i;
+    struct buffer_head * first_b;
+    struct buffer_info bi;
+
+    for (i = 0; i < MAX_FEB_SIZE; i ++)
+	if (tb->FEB[i] != 0)
+	    break;
+
+    if (i == MAX_FEB_SIZE)
+	reiserfs_panic(tb->tb_sb, "vs-12300: get_FEB: FEB list is empty");
+
+    bi.tb = tb;
+    bi.bi_bh = first_b = tb->FEB[i];
+    bi.bi_parent = NULL;
+    bi.bi_position = 0;
+    make_empty_node (&bi);
+    set_buffer_uptodate(first_b);
+    tb->FEB[i] = NULL;
+    tb->used[i] = first_b;
+
+    return(first_b);
+}
+
+
+/* This is now used because reiserfs_free_block has to be able to
+** schedule.
+*/
+static void store_thrown (struct tree_balance * tb, struct buffer_head * bh)
+{
+    int i;
+
+    if (buffer_dirty (bh))
+      reiserfs_warning (tb->tb_sb, "store_thrown deals with dirty buffer");
+    for (i = 0; i < sizeof (tb->thrown)/sizeof (tb->thrown[0]); i ++)
+	if (!tb->thrown[i]) {
+	    tb->thrown[i] = bh;
+	    get_bh(bh) ; /* free_thrown puts this */
+	    return;
+	}
+    reiserfs_warning (tb->tb_sb, "store_thrown: too many thrown buffers");
+}
+
+static void free_thrown(struct tree_balance *tb) {
+    int i ;
+    b_blocknr_t blocknr ;
+    for (i = 0; i < sizeof (tb->thrown)/sizeof (tb->thrown[0]); i++) {
+	if (tb->thrown[i]) {
+	    blocknr = tb->thrown[i]->b_blocknr ;
+	    if (buffer_dirty (tb->thrown[i]))
+	      reiserfs_warning (tb->tb_sb,
+				"free_thrown deals with dirty buffer %d",
+				blocknr);
+	    brelse(tb->thrown[i]) ; /* incremented in store_thrown */
+	    reiserfs_free_block (tb->transaction_handle, NULL, blocknr, 0);
+	}
+    }
+}
+
+void reiserfs_invalidate_buffer (struct tree_balance * tb, struct buffer_head * bh)
+{
+    struct block_head *blkh;
+    blkh = B_BLK_HEAD(bh);
+    set_blkh_level( blkh, FREE_LEVEL );
+    set_blkh_nr_item( blkh, 0 );
+    
+    clear_buffer_dirty(bh);
+    store_thrown (tb, bh);
+}
+
+/* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
+void replace_key (struct tree_balance * tb, struct buffer_head * dest, int n_dest,
+		  struct buffer_head * src, int n_src)
+{
+
+    RFALSE( dest == NULL || src == NULL,
+	    "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
+	    src, dest);
+    RFALSE( ! B_IS_KEYS_LEVEL (dest),
+	    "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
+	    dest);
+    RFALSE( n_dest < 0 || n_src < 0,
+	    "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest);
+    RFALSE( n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src),
+	    "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
+	    n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest));
+   
+    if (B_IS_ITEMS_LEVEL (src))
+	/* source buffer contains leaf node */
+	memcpy (B_N_PDELIM_KEY(dest,n_dest), B_N_PITEM_HEAD(src,n_src), KEY_SIZE);
+    else
+	memcpy (B_N_PDELIM_KEY(dest,n_dest), B_N_PDELIM_KEY(src,n_src), KEY_SIZE);
+
+    do_balance_mark_internal_dirty (tb, dest, 0);
+}
+
+
+int get_left_neighbor_position (
+				struct tree_balance * tb, 
+				int h
+				)
+{
+  int Sh_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+  RFALSE( PATH_H_PPARENT (tb->tb_path, h) == 0 || tb->FL[h] == 0,
+	  "vs-12325: FL[%d](%p) or F[%d](%p) does not exist", 
+	  h, tb->FL[h], h, PATH_H_PPARENT (tb->tb_path, h));
+
+  if (Sh_position == 0)
+    return B_NR_ITEMS (tb->FL[h]);
+  else
+    return Sh_position - 1;
+}
+
+
+int get_right_neighbor_position (struct tree_balance * tb, int h)
+{
+  int Sh_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+  RFALSE( PATH_H_PPARENT (tb->tb_path, h) == 0 || tb->FR[h] == 0,
+	  "vs-12330: F[%d](%p) or FR[%d](%p) does not exist", 
+	  h, PATH_H_PPARENT (tb->tb_path, h), h, tb->FR[h]);
+
+  if (Sh_position == B_NR_ITEMS (PATH_H_PPARENT (tb->tb_path, h)))
+    return 0;
+  else
+    return Sh_position + 1;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+int is_reusable (struct super_block * s, b_blocknr_t block, int bit_value);
+static void check_internal_node (struct super_block * s, struct buffer_head * bh, char * mes)
+{
+  struct disk_child * dc;
+  int i;
+
+  RFALSE( !bh, "PAP-12336: bh == 0");
+
+  if (!bh || !B_IS_IN_TREE (bh))
+    return;
+ 
+  RFALSE( !buffer_dirty (bh) && 
+	  !(buffer_journaled(bh) || buffer_journal_dirty(bh)),
+	  "PAP-12337: buffer (%b) must be dirty", bh);
+  dc = B_N_CHILD (bh, 0);
+
+  for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) {
+    if (!is_reusable (s, dc_block_number(dc), 1) ) {
+      print_cur_tb (mes);
+      reiserfs_panic (s, "PAP-12338: check_internal_node: invalid child pointer %y in %b", dc, bh);
+    }
+  }
+}
+
+
+static int locked_or_not_in_tree (struct buffer_head * bh, char * which)
+{
+  if ( (!buffer_journal_prepared (bh) && buffer_locked (bh)) ||
+        !B_IS_IN_TREE (bh) ) {
+    reiserfs_warning (NULL, "vs-12339: locked_or_not_in_tree: %s (%b)",
+                      which, bh);
+    return 1;
+  } 
+  return 0;
+}
+
+
+static int check_before_balancing (struct tree_balance * tb)
+{
+  int retval = 0;	
+
+  if ( cur_tb ) {
+    reiserfs_panic (tb->tb_sb, "vs-12335: check_before_balancing: "
+		    "suspect that schedule occurred based on cur_tb not being null at this point in code. "
+		    "do_balance cannot properly handle schedule occurring while it runs.");
+  }
+  
+  /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
+     prepped all of these for us). */
+  if ( tb->lnum[0] ) {
+    retval |= locked_or_not_in_tree (tb->L[0], "L[0]");
+    retval |= locked_or_not_in_tree (tb->FL[0], "FL[0]");
+    retval |= locked_or_not_in_tree (tb->CFL[0], "CFL[0]");
+    check_leaf (tb->L[0]);
+  }
+  if ( tb->rnum[0] ) {
+    retval |= locked_or_not_in_tree (tb->R[0], "R[0]");
+    retval |= locked_or_not_in_tree (tb->FR[0], "FR[0]");
+    retval |= locked_or_not_in_tree (tb->CFR[0], "CFR[0]");
+    check_leaf (tb->R[0]);
+  }
+  retval |= locked_or_not_in_tree (PATH_PLAST_BUFFER (tb->tb_path), "S[0]");
+  check_leaf (PATH_PLAST_BUFFER (tb->tb_path));
+
+  return retval;
+}
+
+
+static void check_after_balance_leaf (struct tree_balance * tb)
+{
+    if (tb->lnum[0]) {
+	if (B_FREE_SPACE (tb->L[0]) != 
+	    MAX_CHILD_SIZE (tb->L[0]) - dc_size(B_N_CHILD (tb->FL[0], get_left_neighbor_position (tb, 0)))) {
+	    print_cur_tb ("12221");
+	    reiserfs_panic (tb->tb_sb, "PAP-12355: check_after_balance_leaf: shift to left was incorrect");
+	}
+    }
+    if (tb->rnum[0]) {
+	if (B_FREE_SPACE (tb->R[0]) != 
+	    MAX_CHILD_SIZE (tb->R[0]) - dc_size(B_N_CHILD (tb->FR[0], get_right_neighbor_position (tb, 0)))) {
+	    print_cur_tb ("12222");
+	    reiserfs_panic (tb->tb_sb, "PAP-12360: check_after_balance_leaf: shift to right was incorrect");
+	}
+    }
+    if (PATH_H_PBUFFER(tb->tb_path,1) &&
+	(B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) != 
+		    (MAX_CHILD_SIZE (PATH_H_PBUFFER(tb->tb_path,0)) -
+		    dc_size(B_N_CHILD (PATH_H_PBUFFER(tb->tb_path,1),
+		    PATH_H_POSITION (tb->tb_path, 1)))) )) {
+	int left = B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0));
+	int right = (MAX_CHILD_SIZE (PATH_H_PBUFFER(tb->tb_path,0)) -
+		    dc_size(B_N_CHILD (PATH_H_PBUFFER(tb->tb_path,1),
+			PATH_H_POSITION (tb->tb_path, 1))));
+	print_cur_tb ("12223");
+	reiserfs_warning (tb->tb_sb,
+	    "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
+    	    "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
+	    left,
+	    MAX_CHILD_SIZE (PATH_H_PBUFFER(tb->tb_path,0)),
+	    PATH_H_PBUFFER(tb->tb_path,1),
+	    PATH_H_POSITION (tb->tb_path, 1),
+	    dc_size(B_N_CHILD (PATH_H_PBUFFER(tb->tb_path,1), PATH_H_POSITION (tb->tb_path, 1 )) ),
+	    right );
+	reiserfs_panic (tb->tb_sb, "PAP-12365: check_after_balance_leaf: S is incorrect");
+    }
+}
+
+
+static void check_leaf_level (struct tree_balance * tb)
+{
+  check_leaf (tb->L[0]);
+  check_leaf (tb->R[0]);
+  check_leaf (PATH_PLAST_BUFFER (tb->tb_path));
+}
+
+static void check_internal_levels (struct tree_balance * tb)
+{
+  int h;
+
+  /* check all internal nodes */
+  for (h = 1; tb->insert_size[h]; h ++) {
+    check_internal_node (tb->tb_sb, PATH_H_PBUFFER (tb->tb_path, h), "BAD BUFFER ON PATH");
+    if (tb->lnum[h])
+      check_internal_node (tb->tb_sb, tb->L[h], "BAD L");
+    if (tb->rnum[h])
+      check_internal_node (tb->tb_sb, tb->R[h], "BAD R");
+  }
+
+}
+
+#endif
+
+
+
+
+
+
+/* Now we have all of the buffers that must be used in balancing of
+   the tree.  We rely on the assumption that schedule() will not occur
+   while do_balance works. ( Only interrupt handlers are acceptable.)
+   We balance the tree according to the analysis made before this,
+   using buffers already obtained.  For SMP support it will someday be
+   necessary to add ordered locking of tb. */
+
+/* Some interesting rules of balancing:
+
+   we delete a maximum of two nodes per level per balancing: we never
+   delete R, when we delete two of three nodes L, S, R then we move
+   them into R.
+
+   we only delete L if we are deleting two nodes, if we delete only
+   one node we delete S
+
+   if we shift leaves then we shift as much as we can: this is a
+   deliberate policy of extremism in node packing which results in
+   higher average utilization after repeated random balance operations
+   at the cost of more memory copies and more balancing as a result of
+   small insertions to full nodes.
+
+   if we shift internal nodes we try to evenly balance the node
+   utilization, with consequent less balancing at the cost of lower
+   utilization.
+
+   one could argue that the policy for directories in leaves should be
+   that of internal nodes, but we will wait until another day to
+   evaluate this....  It would be nice to someday measure and prove
+   these assumptions as to what is optimal....
+
+*/
+
+static inline void do_balance_starts (struct tree_balance *tb)
+{
+    /* use print_cur_tb() to see initial state of struct
+       tree_balance */
+
+    /* store_print_tb (tb); */
+
+    /* do not delete, just comment it out */
+/*    print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, 
+	     "check");*/
+    RFALSE( check_before_balancing (tb), "PAP-12340: locked buffers in TB");
+#ifdef CONFIG_REISERFS_CHECK
+    cur_tb = tb;
+#endif
+}
+
+
+static inline void do_balance_completed (struct tree_balance * tb)
+{
+    
+#ifdef CONFIG_REISERFS_CHECK
+    check_leaf_level (tb);
+    check_internal_levels (tb);
+    cur_tb = NULL;
+#endif
+
+    /* reiserfs_free_block is no longer schedule safe.  So, we need to
+    ** put the buffers we want freed on the thrown list during do_balance,
+    ** and then free them now
+    */
+
+    REISERFS_SB(tb->tb_sb)->s_do_balance ++;
+
+
+    /* release all nodes hold to perform the balancing */
+    unfix_nodes(tb);
+
+    free_thrown(tb) ;
+}
+
+
+
+
+
+void do_balance (struct tree_balance * tb, /* tree_balance structure */
+		 struct item_head * ih,	   /* item header of inserted item */
+		 const char * body,  /* body  of inserted item or bytes to paste */
+		 int flag)  /* i - insert, d - delete
+			       c - cut, p - paste
+						      
+			       Cut means delete part of an item
+			       (includes removing an entry from a
+			       directory).
+						      
+			       Delete means delete whole item.
+						      
+			       Insert means add a new item into the
+			       tree.
+						      						      
+			       Paste means to append to the end of an
+			       existing file or to insert a directory
+			       entry.  */
+{
+    int child_pos, /* position of a child node in its parent */
+	h;	   /* level of the tree being processed */
+    struct item_head insert_key[2]; /* in our processing of one level
+				       we sometimes determine what
+				       must be inserted into the next
+				       higher level.  This insertion
+				       consists of a key or two keys
+				       and their corresponding
+				       pointers */
+    struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next
+					  level */
+
+    tb->tb_mode = flag;
+    tb->need_balance_dirty = 0;
+
+    if (FILESYSTEM_CHANGED_TB(tb)) {
+        reiserfs_panic(tb->tb_sb, "clm-6000: do_balance, fs generation has changed\n") ;
+    }
+    /* if we have no real work to do  */
+    if ( ! tb->insert_size[0] ) {
+	reiserfs_warning (tb->tb_sb,
+			  "PAP-12350: do_balance: insert_size == 0, mode == %c",
+			  flag);
+	unfix_nodes(tb);
+	return;
+    }
+
+    atomic_inc (&(fs_generation (tb->tb_sb)));
+    do_balance_starts (tb);
+    
+	/* balance leaf returns 0 except if combining L R and S into
+	   one node.  see balance_internal() for explanation of this
+	   line of code.*/
+	child_pos = PATH_H_B_ITEM_ORDER (tb->tb_path, 0) +
+	  balance_leaf (tb, ih, body, flag, insert_key, insert_ptr);
+
+#ifdef CONFIG_REISERFS_CHECK
+    check_after_balance_leaf (tb);
+#endif
+
+    /* Balance internal level of the tree. */
+    for ( h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++ )
+	child_pos = balance_internal (tb, h, child_pos, insert_key, insert_ptr);
+
+
+    do_balance_completed (tb);
+
+}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
new file mode 100644
index 0000000..2695011
--- /dev/null
+++ b/fs/reiserfs/file.c
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/quotaops.h>
+
+/*
+** We pack the tails of files on file close, not at the time they are written.
+** This implies an unnecessary copy of the tail and an unnecessary indirect item
+** insertion/balancing, for files that are written in one write.
+** It avoids unnecessary tail packings (balances) for files that are written in
+** multiple writes and are small enough to have tails.
+** 
+** file_release is called by the VFS layer when the file is closed.  If
+** this is the last open file descriptor, and the file
+** small enough to have a tail, and the tail is currently in an
+** unformatted node, the tail is converted back into a direct item.
+** 
+** We use reiserfs_truncate_file to pack the tail, since it already has
+** all the conditions coded.  
+*/
+static int reiserfs_file_release (struct inode * inode, struct file * filp)
+{
+
+    struct reiserfs_transaction_handle th ;
+    int err;
+    int jbegin_failure = 0;
+
+    if (!S_ISREG (inode->i_mode))
+	BUG ();
+
+    /* fast out for when nothing needs to be done */
+    if ((atomic_read(&inode->i_count) > 1 ||
+	!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) || 
+         !tail_has_to_be_packed(inode))       && 
+	REISERFS_I(inode)->i_prealloc_count <= 0) {
+	return 0;
+    }    
+    
+    reiserfs_write_lock(inode->i_sb);
+    down (&inode->i_sem); 
+    /* freeing preallocation only involves relogging blocks that
+     * are already in the current transaction.  preallocation gets
+     * freed at the end of each transaction, so it is impossible for
+     * us to log any additional blocks (including quota blocks)
+     */
+    err = journal_begin(&th, inode->i_sb, 1);
+    if (err) {
+	/* uh oh, we can't allow the inode to go away while there
+	 * is still preallocation blocks pending.  Try to join the
+	 * aborted transaction
+	 */
+	jbegin_failure = err;
+	err = journal_join_abort(&th, inode->i_sb, 1);
+
+	if (err) {
+	    /* hmpf, our choices here aren't good.  We can pin the inode
+	     * which will disallow unmount from every happening, we can
+	     * do nothing, which will corrupt random memory on unmount,
+	     * or we can forcibly remove the file from the preallocation
+	     * list, which will leak blocks on disk.  Lets pin the inode
+	     * and let the admin know what is going on.
+	     */
+	    igrab(inode);
+	    reiserfs_warning(inode->i_sb, "pinning inode %lu because the "
+	                     "preallocation can't be freed");
+	    goto out;
+	}
+    }
+    reiserfs_update_inode_transaction(inode) ;
+
+#ifdef REISERFS_PREALLOCATE
+    reiserfs_discard_prealloc (&th, inode);
+#endif
+    err = journal_end(&th, inode->i_sb, 1);
+
+    /* copy back the error code from journal_begin */
+    if (!err)
+        err = jbegin_failure;
+
+    if (!err && atomic_read(&inode->i_count) <= 1 &&
+	(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
+        tail_has_to_be_packed (inode)) {
+	/* if regular file is released by last holder and it has been
+	   appended (we append by unformatted node only) or its direct
+	   item(s) had to be converted, then it may have to be
+	   indirect2direct converted */
+	err = reiserfs_truncate_file(inode, 0) ;
+    }
+out:
+    up (&inode->i_sem); 
+    reiserfs_write_unlock(inode->i_sb);
+    return err;
+}
+
+static void reiserfs_vfs_truncate_file(struct inode *inode) {
+    reiserfs_truncate_file(inode, 1) ;
+}
+
+/* Sync a reiserfs file. */
+
+/*
+ * FIXME: sync_mapping_buffers() never has anything to sync.  Can
+ * be removed...
+ */
+
+static int reiserfs_sync_file(
+			      struct file   * p_s_filp,
+			      struct dentry * p_s_dentry,
+			      int datasync
+			      ) {
+  struct inode * p_s_inode = p_s_dentry->d_inode;
+  int n_err;
+  int barrier_done;
+
+  if (!S_ISREG(p_s_inode->i_mode))
+      BUG ();
+  n_err = sync_mapping_buffers(p_s_inode->i_mapping) ;
+  reiserfs_write_lock(p_s_inode->i_sb);
+  barrier_done = reiserfs_commit_for_inode(p_s_inode);
+  reiserfs_write_unlock(p_s_inode->i_sb);
+  if (barrier_done != 1)
+      blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL);
+  if (barrier_done < 0)
+    return barrier_done;
+  return ( n_err < 0 ) ? -EIO : 0;
+}
+
+/* I really do not want to play with memory shortage right now, so
+   to simplify the code, we are not going to write more than this much pages at
+   a time. This still should considerably improve performance compared to 4k
+   at a time case. This is 32 pages of 4k size. */
+#define REISERFS_WRITE_PAGES_AT_A_TIME (128 * 1024) / PAGE_CACHE_SIZE
+
+/* Allocates blocks for a file to fulfil write request.
+   Maps all unmapped but prepared pages from the list.
+   Updates metadata with newly allocated blocknumbers as needed */
+static int reiserfs_allocate_blocks_for_region(
+				struct reiserfs_transaction_handle *th,
+				struct inode *inode, /* Inode we work with */
+				loff_t pos, /* Writing position */
+				int num_pages, /* number of pages write going
+						  to touch */
+				int write_bytes, /* amount of bytes to write */
+				struct page **prepared_pages, /* array of
+							         prepared pages
+							       */
+				int blocks_to_allocate /* Amount of blocks we
+							  need to allocate to
+							  fit the data into file
+							 */
+				)
+{
+    struct cpu_key key; // cpu key of item that we are going to deal with
+    struct item_head *ih; // pointer to item head that we are going to deal with
+    struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
+    __u32 * item; // pointer to item we are going to deal with
+    INITIALIZE_PATH(path); // path to item, that we are going to deal with.
+    b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
+    reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
+    size_t res; // return value of various functions that we call.
+    int curr_block; // current block used to keep track of unmapped blocks.
+    int i; // loop counter
+    int itempos; // position in item
+    unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
+						       // first page
+    unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
+    __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
+    int modifying_this_item = 0; // Flag for items traversal code to keep track
+				 // of the fact that we already prepared
+				 // current block for journal
+    int will_prealloc = 0;
+    RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");
+
+    /* only preallocate if this is a small write */
+    if (REISERFS_I(inode)->i_prealloc_count ||
+       (!(write_bytes & (inode->i_sb->s_blocksize -1)) &&
+        blocks_to_allocate <
+        REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
+        will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;
+
+    allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
+    					sizeof(b_blocknr_t), GFP_NOFS);
+
+    /* First we compose a key to point at the writing position, we want to do
+       that outside of any locking region. */
+    make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);
+
+    /* If we came here, it means we absolutely need to open a transaction,
+       since we need to allocate some blocks */
+    reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
+    res = journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS); // Wish I know if this number enough
+    if (res)
+        goto error_exit;
+    reiserfs_update_inode_transaction(inode) ;
+
+    /* Look for the in-tree position of our write, need path for block allocator */
+    res = search_for_position_by_key(inode->i_sb, &key, &path);
+    if ( res == IO_ERROR ) {
+	res = -EIO;
+	goto error_exit;
+    }
+   
+    /* Allocate blocks */
+    /* First fill in "hint" structure for block allocator */
+    hint.th = th; // transaction handle.
+    hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
+    hint.inode = inode; // Inode is needed by block allocator too.
+    hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
+    hint.key = key.on_disk_key; // on disk key of file.
+    hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
+    hint.formatted_node = 0; // We are allocating blocks for unformatted node.
+    hint.preallocate = will_prealloc;
+
+    /* Call block allocator to allocate blocks */
+    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
+    if ( res != CARRY_ON ) {
+	if ( res == NO_DISK_SPACE ) {
+	    /* We flush the transaction in case of no space. This way some
+	       blocks might become free */
+	    SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
+	    res = restart_transaction(th, inode, &path);
+            if (res)
+                goto error_exit;
+
+	    /* We might have scheduled, so search again */
+	    res = search_for_position_by_key(inode->i_sb, &key, &path);
+	    if ( res == IO_ERROR ) {
+		res = -EIO;
+		goto error_exit;
+	    }
+
+	    /* update changed info for hint structure. */
+	    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
+	    if ( res != CARRY_ON ) {
+		res = -ENOSPC; 
+		pathrelse(&path);
+		goto error_exit;
+	    }
+	} else {
+	    res = -ENOSPC;
+	    pathrelse(&path);
+	    goto error_exit;
+	}
+    }
+
+#ifdef __BIG_ENDIAN
+        // Too bad, I have not found any way to convert a given region from
+        // cpu format to little endian format
+    {
+        int i;
+        for ( i = 0; i < blocks_to_allocate ; i++)
+            allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
+    }
+#endif
+
+    /* Blocks allocating well might have scheduled and tree might have changed,
+       let's search the tree again */
+    /* find where in the tree our write should go */
+    res = search_for_position_by_key(inode->i_sb, &key, &path);
+    if ( res == IO_ERROR ) {
+	res = -EIO;
+	goto error_exit_free_blocks;
+    }
+
+    bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
+    ih = get_ih( &path );      // Get a pointer to last item head in path.
+    item = get_item( &path );  // Get a pointer to last item in path
+
+    /* Let's see what we have found */
+    if ( res != POSITION_FOUND ) { /* position not found, this means that we
+				      might need to append file with holes
+				      first */
+	// Since we are writing past the file's end, we need to find out if
+	// there is a hole that needs to be inserted before our writing
+	// position, and how many blocks it is going to cover (we need to
+	//  populate pointers to file blocks representing the hole with zeros)
+
+	{
+	    int item_offset = 1;
+	    /*
+	     * if ih is stat data, its offset is 0 and we don't want to
+	     * add 1 to pos in the hole_size calculation
+	     */
+	    if (is_statdata_le_ih(ih))
+	        item_offset = 0;
+	    hole_size = (pos + item_offset -
+	            (le_key_k_offset( get_inode_item_key_version(inode),
+		    &(ih->ih_key)) +
+		    op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
+		    inode->i_sb->s_blocksize_bits;
+	}
+
+	if ( hole_size > 0 ) {
+	    int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); // How much data to insert first time.
+	    /* area filled with zeroes, to supply as list of zero blocknumbers
+	       We allocate it outside of loop just in case loop would spin for
+	       several iterations. */
+	    char *zeros = kmalloc(to_paste*UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
+	    if ( !zeros ) {
+		res = -ENOMEM;
+		goto error_exit_free_blocks;
+	    }
+	    memset ( zeros, 0, to_paste*UNFM_P_SIZE);
+	    do {
+		to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE );
+		if ( is_indirect_le_ih(ih) ) {
+		    /* Ok, there is existing indirect item already. Need to append it */
+		    /* Calculate position past inserted item */
+		    make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
+		    res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)zeros, UNFM_P_SIZE*to_paste);
+		    if ( res ) {
+			kfree(zeros);
+			goto error_exit_free_blocks;
+		    }
+		} else if ( is_statdata_le_ih(ih) ) {
+		    /* No existing item, create it */
+		    /* item head for new item */
+		    struct item_head ins_ih;
+
+		    /* create a key for our new item */
+		    make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3);
+
+		    /* Create new item head for our new item */
+		    make_le_item_head (&ins_ih, &key, key.version, 1,
+				       TYPE_INDIRECT, to_paste*UNFM_P_SIZE,
+				       0 /* free space */);
+
+		    /* Find where such item should live in the tree */
+		    res = search_item (inode->i_sb, &key, &path);
+		    if ( res != ITEM_NOT_FOUND ) {
+			/* item should not exist, otherwise we have error */
+			if ( res != -ENOSPC ) {
+			    reiserfs_warning (inode->i_sb,
+				"green-9008: search_by_key (%K) returned %d",
+					      &key, res);
+			}
+			res = -EIO;
+		        kfree(zeros);
+			goto error_exit_free_blocks;
+		    }
+		    res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)zeros);
+		} else {
+		    reiserfs_panic(inode->i_sb, "green-9011: Unexpected key type %K\n", &key);
+		}
+		if ( res ) {
+		    kfree(zeros);
+		    goto error_exit_free_blocks;
+		}
+		/* Now we want to check if transaction is too full, and if it is
+		   we restart it. This will also free the path. */
+		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
+		    res = restart_transaction(th, inode, &path);
+                    if (res) {
+                        pathrelse (&path);
+                        kfree(zeros);
+                        goto error_exit;
+                    }
+                }
+
+		/* Well, need to recalculate path and stuff */
+		set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + (to_paste << inode->i_blkbits));
+		res = search_for_position_by_key(inode->i_sb, &key, &path);
+		if ( res == IO_ERROR ) {
+		    res = -EIO;
+		    kfree(zeros);
+		    goto error_exit_free_blocks;
+		}
+		bh=get_last_bh(&path);
+		ih=get_ih(&path);
+		item = get_item(&path);
+		hole_size -= to_paste;
+	    } while ( hole_size );
+	    kfree(zeros);
+	}
+    }
+
+    // Go through existing indirect items first
+    // replace all zeroes with blocknumbers from list
+    // Note that if no corresponding item was found, by previous search,
+    // it means there are no existing in-tree representation for file area
+    // we are going to overwrite, so there is nothing to scan through for holes.
+    for ( curr_block = 0, itempos = path.pos_in_item ; curr_block < blocks_to_allocate && res == POSITION_FOUND ; ) {
+retry:
+
+	if ( itempos >= ih_item_len(ih)/UNFM_P_SIZE ) {
+	    /* We run out of data in this indirect item, let's look for another
+	       one. */
+	    /* First if we are already modifying current item, log it */
+	    if ( modifying_this_item ) {
+		journal_mark_dirty (th, inode->i_sb, bh);
+		modifying_this_item = 0;
+	    }
+	    /* Then set the key to look for a new indirect item (offset of old
+	       item is added to old item length */
+	    set_cpu_key_k_offset( &key, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize));
+	    /* Search ofor position of new key in the tree. */
+	    res = search_for_position_by_key(inode->i_sb, &key, &path);
+	    if ( res == IO_ERROR) {
+		res = -EIO;
+		goto error_exit_free_blocks;
+	    }
+	    bh=get_last_bh(&path);
+	    ih=get_ih(&path);
+	    item = get_item(&path);
+	    itempos = path.pos_in_item;
+	    continue; // loop to check all kinds of conditions and so on.
+	}
+	/* Ok, we have correct position in item now, so let's see if it is
+	   representing file hole (blocknumber is zero) and fill it if needed */
+	if ( !item[itempos] ) {
+	    /* Ok, a hole. Now we need to check if we already prepared this
+	       block to be journaled */
+	    while ( !modifying_this_item ) { // loop until succeed
+		/* Well, this item is not journaled yet, so we must prepare
+		   it for journal first, before we can change it */
+		struct item_head tmp_ih; // We copy item head of found item,
+					 // here to detect if fs changed under
+					 // us while we were preparing for
+					 // journal.
+		int fs_gen; // We store fs generation here to find if someone
+			    // changes fs under our feet
+
+		copy_item_head (&tmp_ih, ih); // Remember itemhead
+		fs_gen = get_generation (inode->i_sb); // remember fs generation
+		reiserfs_prepare_for_journal(inode->i_sb, bh, 1); // Prepare a buffer within which indirect item is stored for changing.
+		if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+		    // Sigh, fs was changed under us, we need to look for new
+		    // location of item we are working with
+
+		    /* unmark prepaerd area as journaled and search for it's
+		       new position */
+		    reiserfs_restore_prepared_buffer(inode->i_sb, bh);
+		    res = search_for_position_by_key(inode->i_sb, &key, &path);
+		    if ( res == IO_ERROR) {
+			res = -EIO;
+			goto error_exit_free_blocks;
+		    }
+		    bh=get_last_bh(&path);
+		    ih=get_ih(&path);
+		    item = get_item(&path);
+		    itempos = path.pos_in_item;
+		    goto retry;
+		}
+		modifying_this_item = 1;
+	    }
+	    item[itempos] = allocated_blocks[curr_block]; // Assign new block
+	    curr_block++;
+	}
+	itempos++;
+    }
+
+    if ( modifying_this_item ) { // We need to log last-accessed block, if it
+				 // was modified, but not logged yet.
+	journal_mark_dirty (th, inode->i_sb, bh);
+    }
+
+    if ( curr_block < blocks_to_allocate ) {
+	// Oh, well need to append to indirect item, or to create indirect item
+	// if there weren't any
+	if ( is_indirect_le_ih(ih) ) {
+	    // Existing indirect item - append. First calculate key for append
+	    // position. We do not need to recalculate path as it should
+	    // already point to correct place.
+	    make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
+	    res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)(allocated_blocks+curr_block), UNFM_P_SIZE*(blocks_to_allocate-curr_block));
+	    if ( res ) {
+		goto error_exit_free_blocks;
+	    }
+	} else if (is_statdata_le_ih(ih) ) {
+	    // Last found item was statdata. That means we need to create indirect item.
+	    struct item_head ins_ih; /* itemhead for new item */
+
+	    /* create a key for our new item */
+	    make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3); // Position one,
+							    // because that's
+							    // where first
+							    // indirect item
+							    // begins
+	    /* Create new item head for our new item */
+	    make_le_item_head (&ins_ih, &key, key.version, 1, TYPE_INDIRECT,
+			       (blocks_to_allocate-curr_block)*UNFM_P_SIZE,
+			       0 /* free space */);
+	    /* Find where such item should live in the tree */
+	    res = search_item (inode->i_sb, &key, &path);
+	    if ( res != ITEM_NOT_FOUND ) {
+		/* Well, if we have found such item already, or some error
+		   occured, we need to warn user and return error */
+		if ( res != -ENOSPC ) {
+		    reiserfs_warning (inode->i_sb,
+				      "green-9009: search_by_key (%K) "
+				      "returned %d", &key, res);
+		}
+		res = -EIO;
+		goto error_exit_free_blocks;
+	    }
+	    /* Insert item into the tree with the data as its body */
+	    res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)(allocated_blocks+curr_block));
+	} else {
+	    reiserfs_panic(inode->i_sb, "green-9010: unexpected item type for key %K\n",&key);
+	}
+    }
+
+    // the caller is responsible for closing the transaction
+    // unless we return an error, they are also responsible for logging
+    // the inode.
+    //
+    pathrelse(&path);
+    /*
+     * cleanup prellocation from previous writes
+     * if this is a partial block write
+     */
+    if (write_bytes & (inode->i_sb->s_blocksize -1))
+        reiserfs_discard_prealloc(th, inode);
+    reiserfs_write_unlock(inode->i_sb);
+
+    // go through all the pages/buffers and map the buffers to newly allocated
+    // blocks (so that system knows where to write these pages later).
+    curr_block = 0;
+    for ( i = 0; i < num_pages ; i++ ) {
+	struct page *page=prepared_pages[i]; //current page
+	struct buffer_head *head = page_buffers(page);// first buffer for a page
+	int block_start, block_end; // in-page offsets for buffers.
+
+	if (!page_buffers(page))
+	    reiserfs_panic(inode->i_sb, "green-9005: No buffers for prepared page???");
+
+	/* For each buffer in page */
+	for(bh = head, block_start = 0; bh != head || !block_start;
+	    block_start=block_end, bh = bh->b_this_page) {
+	    if (!bh)
+		reiserfs_panic(inode->i_sb, "green-9006: Allocated but absent buffer for a page?");
+	    block_end = block_start+inode->i_sb->s_blocksize;
+	    if (i == 0 && block_end <= from )
+		/* if this buffer is before requested data to map, skip it */
+		continue;
+	    if (i == num_pages - 1 && block_start >= to)
+		/* If this buffer is after requested data to map, abort
+		   processing of current page */
+		break;
+
+	    if ( !buffer_mapped(bh) ) { // Ok, unmapped buffer, need to map it
+		map_bh( bh, inode->i_sb, le32_to_cpu(allocated_blocks[curr_block]));
+		curr_block++;
+		set_buffer_new(bh);
+	    }
+	}
+    }
+
+    RFALSE( curr_block > blocks_to_allocate, "green-9007: Used too many blocks? weird");
+
+    kfree(allocated_blocks);
+    return 0;
+
+// Need to deal with transaction here.
+error_exit_free_blocks:
+    pathrelse(&path);
+    // free blocks
+    for( i = 0; i < blocks_to_allocate; i++ )
+	reiserfs_free_block(th, inode, le32_to_cpu(allocated_blocks[i]), 1);
+
+error_exit:
+    if (th->t_trans_id) {
+        int err;
+        // update any changes we made to blk count
+        reiserfs_update_sd(th, inode);
+        err = journal_end(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS);
+        if (err)
+            res = err;
+    }
+    reiserfs_write_unlock(inode->i_sb);
+    kfree(allocated_blocks);
+
+    return res;
+}
+
+/* Unlock pages prepared by reiserfs_prepare_file_region_for_write */
+static void reiserfs_unprepare_pages(struct page **prepared_pages, /* list of locked pages */
+			      size_t num_pages /* amount of pages */) {
+    int i; // loop counter
+
+    for (i=0; i < num_pages ; i++) {
+	struct page *page = prepared_pages[i];
+
+	try_to_free_buffers(page);
+	unlock_page(page);
+	page_cache_release(page);
+    }
+}
+
+/* This function will copy data from userspace to specified pages within
+   supplied byte range */
+static int reiserfs_copy_from_user_to_file_region(
+				loff_t pos, /* In-file position */
+				int num_pages, /* Number of pages affected */
+				int write_bytes, /* Amount of bytes to write */
+				struct page **prepared_pages, /* pointer to 
+								 array to
+								 prepared pages
+								*/
+				const char __user *buf /* Pointer to user-supplied
+						   data*/
+				)
+{
+    long page_fault=0; // status of copy_from_user.
+    int i; // loop counter.
+    int offset; // offset in page
+
+    for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
+	size_t count = min_t(size_t,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
+	struct page *page=prepared_pages[i]; // Current page we process.
+
+	fault_in_pages_readable( buf, count);
+
+	/* Copy data from userspace to the current page */
+	kmap(page);
+	page_fault = __copy_from_user(page_address(page)+offset, buf, count); // Copy the data.
+	/* Flush processor's dcache for this page */
+	flush_dcache_page(page);
+	kunmap(page);
+	buf+=count;
+	write_bytes-=count;
+
+	if (page_fault)
+	    break; // Was there a fault? abort.
+    }
+
+    return page_fault?-EFAULT:0;
+}
+
+/* taken fs/buffer.c:__block_commit_write */
+int reiserfs_commit_page(struct inode *inode, struct page *page,
+		unsigned from, unsigned to)
+{
+    unsigned block_start, block_end;
+    int partial = 0;
+    unsigned blocksize;
+    struct buffer_head *bh, *head;
+    unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
+    int new;
+    int logit = reiserfs_file_data_log(inode);
+    struct super_block *s = inode->i_sb;
+    int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+    struct reiserfs_transaction_handle th;
+    int ret = 0;
+
+    th.t_trans_id = 0;
+    blocksize = 1 << inode->i_blkbits;
+
+    if (logit) {
+	reiserfs_write_lock(s);
+	ret = journal_begin(&th, s, bh_per_page + 1);
+	if (ret)
+	    goto drop_write_lock;
+	reiserfs_update_inode_transaction(inode);
+    }
+    for(bh = head = page_buffers(page), block_start = 0;
+        bh != head || !block_start;
+	block_start=block_end, bh = bh->b_this_page)
+    {
+
+	new = buffer_new(bh);
+	clear_buffer_new(bh);
+	block_end = block_start + blocksize;
+	if (block_end <= from || block_start >= to) {
+	    if (!buffer_uptodate(bh))
+		    partial = 1;
+	} else {
+	    set_buffer_uptodate(bh);
+	    if (logit) {
+		reiserfs_prepare_for_journal(s, bh, 1);
+		journal_mark_dirty(&th, s, bh);
+	    } else if (!buffer_dirty(bh)) {
+		mark_buffer_dirty(bh);
+		/* do data=ordered on any page past the end
+		 * of file and any buffer marked BH_New.
+		 */
+		if (reiserfs_data_ordered(inode->i_sb) &&
+		    (new || page->index >= i_size_index)) {
+		    reiserfs_add_ordered_list(inode, bh);
+	        }
+	    }
+	}
+    }
+    if (logit) {
+	ret = journal_end(&th, s, bh_per_page + 1);
+drop_write_lock:
+	reiserfs_write_unlock(s);
+    }
+    /*
+     * If this is a partial write which happened to make all buffers
+     * uptodate then we can optimize away a bogus readpage() for
+     * the next read(). Here we 'discover' whether the page went
+     * uptodate as a result of this (potentially partial) write.
+     */
+    if (!partial)
+	SetPageUptodate(page);
+    return ret;
+}
+
+
+/* Submit pages for write. This was separated from actual file copying
+   because we might want to allocate block numbers in-between.
+   This function assumes that caller will adjust file size to correct value. */
+static int reiserfs_submit_file_region_for_write(
+				struct reiserfs_transaction_handle *th,
+				struct inode *inode,
+				loff_t pos, /* Writing position offset */
+				size_t num_pages, /* Number of pages to write */
+				size_t write_bytes, /* number of bytes to write */
+				struct page **prepared_pages /* list of pages */
+				)
+{
+    int status; // return status of block_commit_write.
+    int retval = 0; // Return value we are going to return.
+    int i; // loop counter
+    int offset; // Writing offset in page.
+    int orig_write_bytes = write_bytes;
+    int sd_update = 0;
+
+    for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
+	int count = min_t(int,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
+	struct page *page=prepared_pages[i]; // Current page we process.
+
+	status = reiserfs_commit_page(inode, page, offset, offset+count);
+	if ( status )
+	    retval = status; // To not overcomplicate matters We are going to
+			     // submit all the pages even if there was error.
+			     // we only remember error status to report it on
+			     // exit.
+	write_bytes-=count;
+    }
+    /* now that we've gotten all the ordered buffers marked dirty,
+     * we can safely update i_size and close any running transaction
+     */
+    if ( pos + orig_write_bytes > inode->i_size) {
+	inode->i_size = pos + orig_write_bytes; // Set new size
+	/* If the file have grown so much that tail packing is no
+	 * longer possible, reset "need to pack" flag */
+	if ( (have_large_tails (inode->i_sb) &&
+	      inode->i_size > i_block_size (inode)*4) ||
+	     (have_small_tails (inode->i_sb) &&
+	     inode->i_size > i_block_size(inode)) )
+	    REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask ;
+        else if ( (have_large_tails (inode->i_sb) &&
+	          inode->i_size < i_block_size (inode)*4) ||
+	          (have_small_tails (inode->i_sb) &&
+		  inode->i_size < i_block_size(inode)) )
+	    REISERFS_I(inode)->i_flags |= i_pack_on_close_mask ;
+
+	if (th->t_trans_id) {
+	    reiserfs_write_lock(inode->i_sb);
+	    reiserfs_update_sd(th, inode); // And update on-disk metadata
+	    reiserfs_write_unlock(inode->i_sb);
+	} else
+	    inode->i_sb->s_op->dirty_inode(inode);
+
+        sd_update = 1;
+    }
+    if (th->t_trans_id) {
+	reiserfs_write_lock(inode->i_sb);
+	if (!sd_update)
+	    reiserfs_update_sd(th, inode);
+	status = journal_end(th, th->t_super, th->t_blocks_allocated);
+        if (status)
+            retval = status;
+	reiserfs_write_unlock(inode->i_sb);
+    }
+    th->t_trans_id = 0;
+
+    /* 
+     * we have to unlock the pages after updating i_size, otherwise
+     * we race with writepage
+     */
+    for ( i = 0; i < num_pages ; i++) {
+	struct page *page=prepared_pages[i];
+	unlock_page(page); 
+	mark_page_accessed(page);
+	page_cache_release(page);
+    }
+    return retval;
+}
+
+/* Look if passed writing region is going to touch file's tail
+   (if it is present). And if it is, convert the tail to unformatted node */
+static int reiserfs_check_for_tail_and_convert( struct inode *inode, /* inode to deal with */
+					 loff_t pos, /* Writing position */
+					 int write_bytes /* amount of bytes to write */
+				        )
+{
+    INITIALIZE_PATH(path); // needed for search_for_position
+    struct cpu_key key; // Key that would represent last touched writing byte.
+    struct item_head *ih; // item header of found block;
+    int res; // Return value of various functions we call.
+    int cont_expand_offset; // We will put offset for generic_cont_expand here
+			    // This can be int just because tails are created
+			    // only for small files.
+ 
+/* this embodies a dependency on a particular tail policy */
+    if ( inode->i_size >= inode->i_sb->s_blocksize*4 ) {
+	/* such a big files do not have tails, so we won't bother ourselves
+	   to look for tails, simply return */
+	return 0;
+    }
+
+    reiserfs_write_lock(inode->i_sb);
+    /* find the item containing the last byte to be written, or if
+     * writing past the end of the file then the last item of the
+     * file (and then we check its type). */
+    make_cpu_key (&key, inode, pos+write_bytes+1, TYPE_ANY, 3/*key length*/);
+    res = search_for_position_by_key(inode->i_sb, &key, &path);
+    if ( res == IO_ERROR ) {
+        reiserfs_write_unlock(inode->i_sb);
+	return -EIO;
+    }
+    ih = get_ih(&path);
+    res = 0;
+    if ( is_direct_le_ih(ih) ) {
+	/* Ok, closest item is file tail (tails are stored in "direct"
+	 * items), so we need to unpack it. */
+	/* To not overcomplicate matters, we just call generic_cont_expand
+	   which will in turn call other stuff and finally will boil down to
+	    reiserfs_get_block() that would do necessary conversion. */
+	cont_expand_offset = le_key_k_offset(get_inode_item_key_version(inode), &(ih->ih_key));
+	pathrelse(&path);
+	res = generic_cont_expand( inode, cont_expand_offset);
+    } else
+	pathrelse(&path);
+
+    reiserfs_write_unlock(inode->i_sb);
+    return res;
+}
+
+/* This function locks pages starting from @pos for @inode.
+   @num_pages pages are locked and stored in
+   @prepared_pages array. Also buffers are allocated for these pages.
+   First and last page of the region is read if it is overwritten only
+   partially. If last page did not exist before write (file hole or file
+   append), it is zeroed, then. 
+   Returns number of unallocated blocks that should be allocated to cover
+   new file data.*/
+static int reiserfs_prepare_file_region_for_write(
+				struct inode *inode /* Inode of the file */,
+				loff_t pos, /* position in the file */
+				size_t num_pages, /* number of pages to
+					          prepare */
+				size_t write_bytes, /* Amount of bytes to be
+						    overwritten from
+						    @pos */
+				struct page **prepared_pages /* pointer to array
+							       where to store
+							       prepared pages */
+					   )
+{
+    int res=0; // Return values of different functions we call.
+    unsigned long index = pos >> PAGE_CACHE_SHIFT; // Offset in file in pages.
+    int from = (pos & (PAGE_CACHE_SIZE - 1)); // Writing offset in first page
+    int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1;
+					 /* offset of last modified byte in last
+				            page */
+    struct address_space *mapping = inode->i_mapping; // Pages are mapped here.
+    int i; // Simple counter
+    int blocks = 0; /* Return value (blocks that should be allocated) */
+    struct buffer_head *bh, *head; // Current bufferhead and first bufferhead
+				   // of a page.
+    unsigned block_start, block_end; // Starting and ending offsets of current
+				     // buffer in the page.
+    struct buffer_head *wait[2], **wait_bh=wait; // Buffers for page, if
+						 // Page appeared to be not up
+						 // to date. Note how we have
+						 // at most 2 buffers, this is
+						 // because we at most may
+						 // partially overwrite two
+						 // buffers for one page. One at                                                 // the beginning of write area
+						 // and one at the end.
+						 // Everything inthe middle gets                                                 // overwritten totally.
+
+    struct cpu_key key; // cpu key of item that we are going to deal with
+    struct item_head *ih = NULL; // pointer to item head that we are going to deal with
+    struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with
+    INITIALIZE_PATH(path); // path to item, that we are going to deal with.
+    __u32 * item=NULL; // pointer to item we are going to deal with
+    int item_pos=-1; /* Position in indirect item */
+
+
+    if ( num_pages < 1 ) {
+	reiserfs_warning (inode->i_sb,
+			  "green-9001: reiserfs_prepare_file_region_for_write "
+			  "called with zero number of pages to process");
+	return -EFAULT;
+    }
+
+    /* We have 2 loops for pages. In first loop we grab and lock the pages, so
+       that nobody would touch these until we release the pages. Then
+       we'd start to deal with mapping buffers to blocks. */
+    for ( i = 0; i < num_pages; i++) {
+	prepared_pages[i] = grab_cache_page(mapping, index + i); // locks the page
+	if ( !prepared_pages[i]) {
+	    res = -ENOMEM;
+	    goto failed_page_grabbing;
+	}
+	if (!page_has_buffers(prepared_pages[i]))
+	    create_empty_buffers(prepared_pages[i], inode->i_sb->s_blocksize, 0);
+    }
+
+    /* Let's count amount of blocks for a case where all the blocks
+       overwritten are new (we will substract already allocated blocks later)*/
+    if ( num_pages > 2 )
+	/* These are full-overwritten pages so we count all the blocks in
+	   these pages are counted as needed to be allocated */
+	blocks = (num_pages - 2) << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+    /* count blocks needed for first page (possibly partially written) */
+    blocks += ((PAGE_CACHE_SIZE - from) >> inode->i_blkbits) +
+	   !!(from & (inode->i_sb->s_blocksize-1)); /* roundup */
+
+    /* Now we account for last page. If last page == first page (we
+       overwrite only one page), we substract all the blocks past the
+       last writing position in a page out of already calculated number
+       of blocks */
+    blocks += ((num_pages > 1) << (PAGE_CACHE_SHIFT-inode->i_blkbits)) -
+	   ((PAGE_CACHE_SIZE - to) >> inode->i_blkbits);
+	   /* Note how we do not roundup here since partial blocks still
+		   should be allocated */
+
+    /* Now if all the write area lies past the file end, no point in
+       maping blocks, since there is none, so we just zero out remaining
+       parts of first and last pages in write area (if needed) */
+    if ( (pos & ~((loff_t)PAGE_CACHE_SIZE - 1)) > inode->i_size ) {
+	if ( from != 0 ) {/* First page needs to be partially zeroed */
+	    char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
+	    memset(kaddr, 0, from);
+	    kunmap_atomic( kaddr, KM_USER0);
+	}
+	if ( to != PAGE_CACHE_SIZE ) { /* Last page needs to be partially zeroed */
+	    char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
+	    memset(kaddr+to, 0, PAGE_CACHE_SIZE - to);
+	    kunmap_atomic( kaddr, KM_USER0);
+	}
+
+	/* Since all blocks are new - use already calculated value */
+	return blocks;
+    }
+
+    /* Well, since we write somewhere into the middle of a file, there is
+       possibility we are writing over some already allocated blocks, so
+       let's map these blocks and substract number of such blocks out of blocks
+       we need to allocate (calculated above) */
+    /* Mask write position to start on blocksize, we do it out of the
+       loop for performance reasons */
+    pos &= ~((loff_t) inode->i_sb->s_blocksize - 1);
+    /* Set cpu key to the starting position in a file (on left block boundary)*/
+    make_cpu_key (&key, inode, 1 + ((pos) & ~((loff_t) inode->i_sb->s_blocksize - 1)), TYPE_ANY, 3/*key length*/);
+
+    reiserfs_write_lock(inode->i_sb); // We need that for at least search_by_key()
+    for ( i = 0; i < num_pages ; i++ ) { 
+
+	head = page_buffers(prepared_pages[i]);
+	/* For each buffer in the page */
+	for(bh = head, block_start = 0; bh != head || !block_start;
+	    block_start=block_end, bh = bh->b_this_page) {
+		if (!bh)
+		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
+		/* Find where this buffer ends */
+		block_end = block_start+inode->i_sb->s_blocksize;
+		if (i == 0 && block_end <= from )
+		    /* if this buffer is before requested data to map, skip it*/
+		    continue;
+
+		if (i == num_pages - 1 && block_start >= to) {
+		    /* If this buffer is after requested data to map, abort
+		       processing of current page */
+		    break;
+		}
+
+		if ( buffer_mapped(bh) && bh->b_blocknr !=0 ) {
+		    /* This is optimisation for a case where buffer is mapped
+		       and have blocknumber assigned. In case significant amount
+		       of such buffers are present, we may avoid some amount
+		       of search_by_key calls.
+		       Probably it would be possible to move parts of this code
+		       out of BKL, but I afraid that would overcomplicate code
+		       without any noticeable benefit.
+		    */
+		    item_pos++;
+		    /* Update the key */
+		    set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
+		    blocks--; // Decrease the amount of blocks that need to be
+			      // allocated
+		    continue; // Go to the next buffer
+		}
+
+		if ( !itembuf || /* if first iteration */
+		     item_pos >= ih_item_len(ih)/UNFM_P_SIZE)
+					     { /* or if we progressed past the
+						  current unformatted_item */
+			/* Try to find next item */
+			res = search_for_position_by_key(inode->i_sb, &key, &path);
+			/* Abort if no more items */
+			if ( res != POSITION_FOUND ) {
+			    /* make sure later loops don't use this item */
+			    itembuf = NULL;
+			    item = NULL;
+			    break;
+			}
+
+			/* Update information about current indirect item */
+			itembuf = get_last_bh( &path );
+			ih = get_ih( &path );
+			item = get_item( &path );
+			item_pos = path.pos_in_item;
+
+			RFALSE( !is_indirect_le_ih (ih), "green-9003: indirect item expected");
+		}
+
+		/* See if there is some block associated with the file
+		   at that position, map the buffer to this block */
+		if ( get_block_num(item,item_pos) ) {
+		    map_bh(bh, inode->i_sb, get_block_num(item,item_pos));
+		    blocks--; // Decrease the amount of blocks that need to be
+			      // allocated
+		}
+		item_pos++;
+		/* Update the key */
+		set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
+	}
+    }
+    pathrelse(&path); // Free the path
+    reiserfs_write_unlock(inode->i_sb);
+
+	/* Now zero out unmappend buffers for the first and last pages of
+	   write area or issue read requests if page is mapped. */
+	/* First page, see if it is not uptodate */
+	if ( !PageUptodate(prepared_pages[0]) ) {
+	    head = page_buffers(prepared_pages[0]);
+
+	    /* For each buffer in page */
+	    for(bh = head, block_start = 0; bh != head || !block_start;
+		block_start=block_end, bh = bh->b_this_page) {
+
+		if (!bh)
+		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
+		/* Find where this buffer ends */
+		block_end = block_start+inode->i_sb->s_blocksize;
+		if ( block_end <= from )
+		    /* if this buffer is before requested data to map, skip it*/
+		    continue;
+		if ( block_start < from ) { /* Aha, our partial buffer */
+		    if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
+						  issue READ request for it to
+						  not loose data */
+			ll_rw_block(READ, 1, &bh);
+			*wait_bh++=bh;
+		    } else { /* Not mapped, zero it */
+			char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
+			memset(kaddr+block_start, 0, from-block_start);
+			kunmap_atomic( kaddr, KM_USER0);
+			set_buffer_uptodate(bh);
+		    }
+		}
+	    }
+	}
+
+	/* Last page, see if it is not uptodate, or if the last page is past the end of the file. */
+	if ( !PageUptodate(prepared_pages[num_pages-1]) || 
+	    ((pos+write_bytes)>>PAGE_CACHE_SHIFT) > (inode->i_size>>PAGE_CACHE_SHIFT) ) {
+	    head = page_buffers(prepared_pages[num_pages-1]);
+
+	    /* for each buffer in page */
+	    for(bh = head, block_start = 0; bh != head || !block_start;
+		block_start=block_end, bh = bh->b_this_page) {
+
+		if (!bh)
+		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
+		/* Find where this buffer ends */
+		block_end = block_start+inode->i_sb->s_blocksize;
+		if ( block_start >= to )
+		    /* if this buffer is after requested data to map, skip it*/
+		    break;
+		if ( block_end > to ) { /* Aha, our partial buffer */
+		    if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
+						  issue READ request for it to
+						  not loose data */
+			ll_rw_block(READ, 1, &bh);
+			*wait_bh++=bh;
+		    } else { /* Not mapped, zero it */
+			char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
+			memset(kaddr+to, 0, block_end-to);
+			kunmap_atomic( kaddr, KM_USER0);
+			set_buffer_uptodate(bh);
+		    }
+		}
+	    }
+	}
+
+    /* Wait for read requests we made to happen, if necessary */
+    while(wait_bh > wait) {
+	wait_on_buffer(*--wait_bh);
+	if (!buffer_uptodate(*wait_bh)) {
+	    res = -EIO;
+	    goto failed_read;
+	}
+    }
+
+    return blocks;
+failed_page_grabbing:
+    num_pages = i;
+failed_read:
+    reiserfs_unprepare_pages(prepared_pages, num_pages);
+    return res;
+}
+
+/* Write @count bytes at position @ppos in a file indicated by @file
+   from the buffer @buf.  
+
+   generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
+   something simple that works.  It is not for serious use by general purpose filesystems, excepting the one that it was
+   written for (ext2/3).  This is for several reasons:
+
+   * It has no understanding of any filesystem specific optimizations.
+
+   * It enters the filesystem repeatedly for each page that is written.
+
+   * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
+   * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
+   * to reiserfs which allows for fewer tree traversals.
+
+   * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
+
+   * Asking the block allocation code for blocks one at a time is slightly less efficient.
+
+   All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
+   use it, but we were in a hurry to make code freeze, and so it couldn't be revised then.  This new code should make
+   things right finally.
+
+   Future Features: providing search_by_key with hints.
+
+*/
+static ssize_t reiserfs_file_write( struct file *file, /* the file we are going to write into */
+                             const char __user *buf, /*  pointer to user supplied data
+(in userspace) */
+                             size_t count, /* amount of bytes to write */
+                             loff_t *ppos /* pointer to position in file that we start writing at. Should be updated to
+                                           * new current position before returning. */ )
+{
+    size_t already_written = 0; // Number of bytes already written to the file.
+    loff_t pos; // Current position in the file.
+    ssize_t res; // return value of various functions that we call.
+    int err = 0;
+    struct inode *inode = file->f_dentry->d_inode; // Inode of the file that we are writing to.
+				/* To simplify coding at this time, we store
+				   locked pages in array for now */
+    struct page * prepared_pages[REISERFS_WRITE_PAGES_AT_A_TIME];
+    struct reiserfs_transaction_handle th;
+    th.t_trans_id = 0;
+
+    if ( file->f_flags & O_DIRECT) { // Direct IO needs treatment
+	ssize_t result, after_file_end = 0;
+	if ( (*ppos + count >= inode->i_size) || (file->f_flags & O_APPEND) ) {
+	    /* If we are appending a file, we need to put this savelink in here.
+	       If we will crash while doing direct io, finish_unfinished will
+	       cut the garbage from the file end. */
+	    reiserfs_write_lock(inode->i_sb);
+	    err = journal_begin(&th, inode->i_sb,  JOURNAL_PER_BALANCE_CNT );
+            if (err) {
+		reiserfs_write_unlock (inode->i_sb);
+		return err;
+	    }
+	    reiserfs_update_inode_transaction(inode);
+	    add_save_link (&th, inode, 1 /* Truncate */);
+	    after_file_end = 1;
+	    err = journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT );
+            reiserfs_write_unlock(inode->i_sb);
+	    if (err)
+		return err;
+	}
+	result = generic_file_write(file, buf, count, ppos);
+
+	if ( after_file_end ) { /* Now update i_size and remove the savelink */
+	    struct reiserfs_transaction_handle th;
+	    reiserfs_write_lock(inode->i_sb);
+	    err = journal_begin(&th, inode->i_sb, 1);
+            if (err) {
+                reiserfs_write_unlock (inode->i_sb);
+                return err;
+            }
+	    reiserfs_update_inode_transaction(inode);
+	    reiserfs_update_sd(&th, inode);
+	    err = journal_end(&th, inode->i_sb, 1);
+            if (err) {
+                reiserfs_write_unlock (inode->i_sb);
+                return err;
+            }
+	    err = remove_save_link (inode, 1/* truncate */);
+	    reiserfs_write_unlock(inode->i_sb);
+            if (err)
+                return err;
+	}
+
+	return result;
+    }
+
+    if ( unlikely((ssize_t) count < 0 ))
+        return -EINVAL;
+
+    if (unlikely(!access_ok(VERIFY_READ, buf, count)))
+        return -EFAULT;
+
+    down(&inode->i_sem); // locks the entire file for just us
+
+    pos = *ppos;
+
+    /* Check if we can write to specified region of file, file
+       is not overly big and this kind of stuff. Adjust pos and
+       count, if needed */
+    res = generic_write_checks(file, &pos, &count, 0);
+    if (res)
+	goto out;
+
+    if ( count == 0 )
+	goto out;
+
+    res = remove_suid(file->f_dentry);
+    if (res)
+	goto out;
+
+    inode_update_time(inode, 1); /* Both mtime and ctime */
+
+    // Ok, we are done with all the checks.
+
+    // Now we should start real work
+
+    /* If we are going to write past the file's packed tail or if we are going
+       to overwrite part of the tail, we need that tail to be converted into
+       unformatted node */
+    res = reiserfs_check_for_tail_and_convert( inode, pos, count);
+    if (res)
+	goto out;
+
+    while ( count > 0) {
+	/* This is the main loop in which we running until some error occures
+	   or until we write all of the data. */
+	size_t num_pages;/* amount of pages we are going to write this iteration */
+	size_t write_bytes; /* amount of bytes to write during this iteration */
+	size_t blocks_to_allocate; /* how much blocks we need to allocate for this iteration */
+        
+        /*  (pos & (PAGE_CACHE_SIZE-1)) is an idiom for offset into a page of pos*/
+	num_pages = !!((pos+count) & (PAGE_CACHE_SIZE - 1)) + /* round up partial
+							  pages */
+		    ((count + (pos & (PAGE_CACHE_SIZE-1))) >> PAGE_CACHE_SHIFT); 
+						/* convert size to amount of
+						   pages */
+	reiserfs_write_lock(inode->i_sb);
+	if ( num_pages > REISERFS_WRITE_PAGES_AT_A_TIME 
+		|| num_pages > reiserfs_can_fit_pages(inode->i_sb) ) {
+	    /* If we were asked to write more data than we want to or if there
+	       is not that much space, then we shorten amount of data to write
+	       for this iteration. */
+	    num_pages = min_t(size_t, REISERFS_WRITE_PAGES_AT_A_TIME, reiserfs_can_fit_pages(inode->i_sb));
+	    /* Also we should not forget to set size in bytes accordingly */
+	    write_bytes = (num_pages << PAGE_CACHE_SHIFT) - 
+			    (pos & (PAGE_CACHE_SIZE-1));
+					 /* If position is not on the
+					    start of the page, we need
+					    to substract the offset
+					    within page */
+	} else
+	    write_bytes = count;
+
+	/* reserve the blocks to be allocated later, so that later on
+	   we still have the space to write the blocks to */
+	reiserfs_claim_blocks_to_be_allocated(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
+	reiserfs_write_unlock(inode->i_sb);
+
+	if ( !num_pages ) { /* If we do not have enough space even for */
+	    res = -ENOSPC;  /* single page, return -ENOSPC */
+	    if ( pos > (inode->i_size & (inode->i_sb->s_blocksize-1)))
+		break; // In case we are writing past the file end, break.
+	    // Otherwise we are possibly overwriting the file, so
+	    // let's set write size to be equal or less than blocksize.
+	    // This way we get it correctly for file holes.
+	    // But overwriting files on absolutelly full volumes would not
+	    // be very efficient. Well, people are not supposed to fill
+	    // 100% of disk space anyway.
+	    write_bytes = min_t(size_t, count, inode->i_sb->s_blocksize - (pos & (inode->i_sb->s_blocksize - 1)));
+	    num_pages = 1;
+	    // No blocks were claimed before, so do it now.
+	    reiserfs_claim_blocks_to_be_allocated(inode->i_sb, 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits));
+	}
+
+	/* Prepare for writing into the region, read in all the
+	   partially overwritten pages, if needed. And lock the pages,
+	   so that nobody else can access these until we are done.
+	   We get number of actual blocks needed as a result.*/
+	blocks_to_allocate = reiserfs_prepare_file_region_for_write(inode, pos, num_pages, write_bytes, prepared_pages);
+	if ( blocks_to_allocate < 0 ) {
+	    res = blocks_to_allocate;
+	    reiserfs_release_claimed_blocks(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
+	    break;
+	}
+
+	/* First we correct our estimate of how many blocks we need */
+	reiserfs_release_claimed_blocks(inode->i_sb, (num_pages << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits)) - blocks_to_allocate );
+
+	if ( blocks_to_allocate > 0) {/*We only allocate blocks if we need to*/
+	    /* Fill in all the possible holes and append the file if needed */
+	    res = reiserfs_allocate_blocks_for_region(&th, inode, pos, num_pages, write_bytes, prepared_pages, blocks_to_allocate);
+	}
+
+	/* well, we have allocated the blocks, so it is time to free
+	   the reservation we made earlier. */
+	reiserfs_release_claimed_blocks(inode->i_sb, blocks_to_allocate);
+	if ( res ) {
+	    reiserfs_unprepare_pages(prepared_pages, num_pages);
+	    break;
+	}
+
+/* NOTE that allocating blocks and filling blocks can be done in reverse order
+   and probably we would do that just to get rid of garbage in files after a
+   crash */
+
+	/* Copy data from user-supplied buffer to file's pages */
+	res = reiserfs_copy_from_user_to_file_region(pos, num_pages, write_bytes, prepared_pages, buf);
+	if ( res ) {
+	    reiserfs_unprepare_pages(prepared_pages, num_pages);
+	    break;
+	}
+
+	/* Send the pages to disk and unlock them. */
+	res = reiserfs_submit_file_region_for_write(&th, inode, pos, num_pages,
+	                                            write_bytes,prepared_pages);
+	if ( res )
+	    break;
+
+	already_written += write_bytes;
+	buf += write_bytes;
+	*ppos = pos += write_bytes;
+	count -= write_bytes;
+	balance_dirty_pages_ratelimited(inode->i_mapping);
+    }
+
+    /* this is only true on error */
+    if (th.t_trans_id) {
+        reiserfs_write_lock(inode->i_sb);
+        err = journal_end(&th, th.t_super, th.t_blocks_allocated);
+        reiserfs_write_unlock(inode->i_sb);
+        if (err) {
+            res = err;
+            goto out;
+        }
+    }
+
+    if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
+	res = generic_osync_inode(inode, file->f_mapping, OSYNC_METADATA|OSYNC_DATA);
+
+    up(&inode->i_sem);
+    reiserfs_async_progress_wait(inode->i_sb);
+    return (already_written != 0)?already_written:res;
+
+out:
+    up(&inode->i_sem); // unlock the file on exit.
+    return res;
+}
+
+static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user *buf,
+			       size_t count, loff_t pos)
+{
+    return generic_file_aio_write(iocb, buf, count, pos);
+}
+
+
+
+struct file_operations reiserfs_file_operations = {
+    .read	= generic_file_read,
+    .write	= reiserfs_file_write,
+    .ioctl	= reiserfs_ioctl,
+    .mmap	= generic_file_mmap,
+    .release	= reiserfs_file_release,
+    .fsync	= reiserfs_sync_file,
+    .sendfile	= generic_file_sendfile,
+    .aio_read   = generic_file_aio_read,
+    .aio_write  = reiserfs_aio_write,
+};
+
+
+struct  inode_operations reiserfs_file_inode_operations = {
+    .truncate	= reiserfs_vfs_truncate_file,
+    .setattr    = reiserfs_setattr,
+    .setxattr   = reiserfs_setxattr,
+    .getxattr   = reiserfs_getxattr,
+    .listxattr  = reiserfs_listxattr,
+    .removexattr = reiserfs_removexattr,
+    .permission = reiserfs_permission,
+};
+
+
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
new file mode 100644
index 0000000..e4f64be
--- /dev/null
+++ b/fs/reiserfs/fix_node.c
@@ -0,0 +1,2518 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/**
+ ** old_item_num
+ ** old_entry_num
+ ** set_entry_sizes
+ ** create_virtual_node
+ ** check_left
+ ** check_right
+ ** directory_part_size
+ ** get_num_ver
+ ** set_parameters
+ ** is_leaf_removable
+ ** are_leaves_removable
+ ** get_empty_nodes
+ ** get_lfree
+ ** get_rfree
+ ** is_left_neighbor_in_cache
+ ** decrement_key
+ ** get_far_parent
+ ** get_parents
+ ** can_node_be_removed
+ ** ip_check_balance
+ ** dc_check_balance_internal
+ ** dc_check_balance_leaf
+ ** dc_check_balance
+ ** check_balance
+ ** get_direct_parent
+ ** get_neighbors
+ ** fix_nodes
+ ** 
+ ** 
+ **/
+
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/buffer_head.h>
+
+
+/* To make any changes in the tree we find a node, that contains item
+   to be changed/deleted or position in the node we insert a new item
+   to. We call this node S. To do balancing we need to decide what we
+   will shift to left/right neighbor, or to a new node, where new item
+   will be etc. To make this analysis simpler we build virtual
+   node. Virtual node is an array of items, that will replace items of
+   node S. (For instance if we are going to delete an item, virtual
+   node does not contain it). Virtual node keeps information about
+   item sizes and types, mergeability of first and last items, sizes
+   of all entries in directory item. We use this array of items when
+   calculating what we can shift to neighbors and how many nodes we
+   have to have if we do not any shiftings, if we shift to left/right
+   neighbor or to both. */
+
+
+/* taking item number in virtual node, returns number of item, that it has in source buffer */
+static inline int old_item_num (int new_num, int affected_item_num, int mode)
+{
+  if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
+    return new_num;
+
+  if (mode == M_INSERT) {
+
+    RFALSE( new_num == 0, 
+	    "vs-8005: for INSERT mode and item number of inserted item");
+
+    return new_num - 1;
+  }
+
+  RFALSE( mode != M_DELETE,
+	  "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'", mode);
+  /* delete mode */
+  return new_num + 1;
+}
+
+static void create_virtual_node (struct tree_balance * tb, int h)
+{
+    struct item_head * ih;
+    struct virtual_node * vn = tb->tb_vn;
+    int new_num;
+    struct buffer_head * Sh;	/* this comes from tb->S[h] */
+
+    Sh = PATH_H_PBUFFER (tb->tb_path, h);
+
+    /* size of changed node */
+    vn->vn_size = MAX_CHILD_SIZE (Sh) - B_FREE_SPACE (Sh) + tb->insert_size[h];
+
+    /* for internal nodes array if virtual items is not created */
+    if (h) {
+	vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
+	return;
+    }
+
+    /* number of items in virtual node  */
+    vn->vn_nr_item = B_NR_ITEMS (Sh) + ((vn->vn_mode == M_INSERT)? 1 : 0) - ((vn->vn_mode == M_DELETE)? 1 : 0);
+
+    /* first virtual item */
+    vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
+    memset (vn->vn_vi, 0, vn->vn_nr_item * sizeof (struct virtual_item));
+    vn->vn_free_ptr += vn->vn_nr_item * sizeof (struct virtual_item);
+
+
+    /* first item in the node */
+    ih = B_N_PITEM_HEAD (Sh, 0);
+
+    /* define the mergeability for 0-th item (if it is not being deleted) */
+    if (op_is_left_mergeable (&(ih->ih_key), Sh->b_size) && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
+	    vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
+
+    /* go through all items those remain in the virtual node (except for the new (inserted) one) */
+    for (new_num = 0; new_num < vn->vn_nr_item; new_num ++) {
+	int j;
+	struct virtual_item * vi = vn->vn_vi + new_num;
+	int is_affected = ((new_num != vn->vn_affected_item_num) ? 0 : 1);
+    
+
+	if (is_affected && vn->vn_mode == M_INSERT)
+	    continue;
+    
+	/* get item number in source node */
+	j = old_item_num (new_num, vn->vn_affected_item_num, vn->vn_mode);
+    
+	vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
+	vi->vi_ih = ih + j;
+	vi->vi_item = B_I_PITEM (Sh, ih + j);
+	vi->vi_uarea = vn->vn_free_ptr;
+
+	// FIXME: there is no check, that item operation did not
+	// consume too much memory
+	vn->vn_free_ptr += op_create_vi (vn, vi, is_affected, tb->insert_size [0]);
+	if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
+	    reiserfs_panic (tb->tb_sb, "vs-8030: create_virtual_node: "
+			    "virtual node space consumed");
+
+	if (!is_affected)
+	    /* this is not being changed */
+	    continue;
+    
+	if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
+	    vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
+	    vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
+	}
+    }
+
+  
+    /* virtual inserted item is not defined yet */
+    if (vn->vn_mode == M_INSERT) {
+	struct virtual_item * vi = vn->vn_vi + vn->vn_affected_item_num;
+      
+	RFALSE( vn->vn_ins_ih == 0,
+		"vs-8040: item header of inserted item is not specified");
+	vi->vi_item_len = tb->insert_size[0];
+	vi->vi_ih = vn->vn_ins_ih;
+	vi->vi_item = vn->vn_data;
+	vi->vi_uarea = vn->vn_free_ptr;
+	
+	op_create_vi (vn, vi, 0/*not pasted or cut*/, tb->insert_size [0]);
+    }
+  
+    /* set right merge flag we take right delimiting key and check whether it is a mergeable item */
+    if (tb->CFR[0]) {
+	struct reiserfs_key * key;
+
+	key = B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0]);
+	if (op_is_left_mergeable (key, Sh->b_size) && (vn->vn_mode != M_DELETE ||
+						       vn->vn_affected_item_num != B_NR_ITEMS (Sh) - 1))
+		vn->vn_vi[vn->vn_nr_item-1].vi_type |= VI_TYPE_RIGHT_MERGEABLE;
+
+#ifdef CONFIG_REISERFS_CHECK
+	if (op_is_left_mergeable (key, Sh->b_size) &&
+	    !(vn->vn_mode != M_DELETE || vn->vn_affected_item_num != B_NR_ITEMS (Sh) - 1) ) {
+	    /* we delete last item and it could be merged with right neighbor's first item */
+	    if (!(B_NR_ITEMS (Sh) == 1 && is_direntry_le_ih (B_N_PITEM_HEAD (Sh, 0)) &&
+		  I_ENTRY_COUNT (B_N_PITEM_HEAD (Sh, 0)) == 1)) {
+		/* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
+		print_block (Sh, 0, -1, -1);
+		reiserfs_panic (tb->tb_sb, "vs-8045: create_virtual_node: rdkey %k, affected item==%d (mode==%c) Must be %c", 
+				key, vn->vn_affected_item_num, vn->vn_mode, M_DELETE);
+	    } else
+		/* we can delete directory item, that has only one directory entry in it */
+		;
+	}
+#endif
+    
+    }
+}
+
+
+/* using virtual node check, how many items can be shifted to left
+   neighbor */
+static void check_left (struct tree_balance * tb, int h, int cur_free)
+{
+    int i;
+    struct virtual_node * vn = tb->tb_vn;
+    struct virtual_item * vi;
+    int d_size, ih_size;
+
+    RFALSE( cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free);
+
+    /* internal level */
+    if (h > 0) {	
+	tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
+	return;
+    }
+
+    /* leaf level */
+
+    if (!cur_free || !vn->vn_nr_item) {
+	/* no free space or nothing to move */
+	tb->lnum[h] = 0;
+	tb->lbytes = -1;
+	return;
+    }
+
+    RFALSE( !PATH_H_PPARENT (tb->tb_path, 0),
+	    "vs-8055: parent does not exist or invalid");
+
+    vi = vn->vn_vi;
+    if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
+	/* all contents of S[0] fits into L[0] */
+
+	RFALSE( vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
+		"vs-8055: invalid mode or balance condition failed");
+
+	tb->lnum[0] = vn->vn_nr_item;
+	tb->lbytes = -1;
+	return;
+    }
+  
+
+    d_size = 0, ih_size = IH_SIZE;
+
+    /* first item may be merge with last item in left neighbor */
+    if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
+	d_size = -((int)IH_SIZE), ih_size = 0;
+
+    tb->lnum[0] = 0;
+    for (i = 0; i < vn->vn_nr_item; i ++, ih_size = IH_SIZE, d_size = 0, vi ++) {
+	d_size += vi->vi_item_len;
+	if (cur_free >= d_size) {	
+	    /* the item can be shifted entirely */
+	    cur_free -= d_size;
+	    tb->lnum[0] ++;
+	    continue;
+	}
+      
+	/* the item cannot be shifted entirely, try to split it */
+	/* check whether L[0] can hold ih and at least one byte of the item body */
+	if (cur_free <= ih_size) {
+	    /* cannot shift even a part of the current item */
+	    tb->lbytes = -1;
+	    return;
+	}
+	cur_free -= ih_size;
+    
+	tb->lbytes = op_check_left (vi, cur_free, 0, 0);
+	if (tb->lbytes != -1)
+	    /* count partially shifted item */
+	    tb->lnum[0] ++;
+    
+	break;
+    }
+  
+    return;
+}
+
+
+/* using virtual node check, how many items can be shifted to right
+   neighbor */
+static void check_right (struct tree_balance * tb, int h, int cur_free)
+{
+    int i;
+    struct virtual_node * vn = tb->tb_vn;
+    struct virtual_item * vi;
+    int d_size, ih_size;
+
+    RFALSE( cur_free < 0, "vs-8070: cur_free < 0");
+    
+    /* internal level */
+    if (h > 0) {
+	tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
+	return;
+    }
+    
+    /* leaf level */
+    
+    if (!cur_free || !vn->vn_nr_item) {
+	/* no free space  */
+	tb->rnum[h] = 0;
+	tb->rbytes = -1;
+	return;
+    }
+  
+    RFALSE( !PATH_H_PPARENT (tb->tb_path, 0),
+	    "vs-8075: parent does not exist or invalid");
+  
+    vi = vn->vn_vi + vn->vn_nr_item - 1;
+    if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
+	/* all contents of S[0] fits into R[0] */
+	
+	RFALSE( vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
+		"vs-8080: invalid mode or balance condition failed");
+
+	tb->rnum[h] = vn->vn_nr_item;
+	tb->rbytes = -1;
+	return;
+    }
+    
+    d_size = 0, ih_size = IH_SIZE;
+    
+    /* last item may be merge with first item in right neighbor */
+    if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
+	d_size = -(int)IH_SIZE, ih_size = 0;
+
+    tb->rnum[0] = 0;
+    for (i = vn->vn_nr_item - 1; i >= 0; i --, d_size = 0, ih_size = IH_SIZE, vi --) {
+	d_size += vi->vi_item_len;
+	if (cur_free >= d_size) {	
+	    /* the item can be shifted entirely */
+	    cur_free -= d_size;
+	    tb->rnum[0] ++;
+	    continue;
+	}
+	
+	/* check whether R[0] can hold ih and at least one byte of the item body */
+	if ( cur_free <= ih_size ) {    /* cannot shift even a part of the current item */
+	    tb->rbytes = -1;
+	    return;
+	}
+	
+	/* R[0] can hold the header of the item and at least one byte of its body */
+	cur_free -= ih_size;	/* cur_free is still > 0 */
+
+	tb->rbytes = op_check_right (vi, cur_free);
+	if (tb->rbytes != -1)
+	    /* count partially shifted item */
+	    tb->rnum[0] ++;
+    
+	break;
+    }
+	
+  return;
+}
+
+
+/*
+ * from - number of items, which are shifted to left neighbor entirely
+ * to - number of item, which are shifted to right neighbor entirely
+ * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
+ * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
+static int get_num_ver (int mode, struct tree_balance * tb, int h,
+			int from, int from_bytes,
+			int to,   int to_bytes,
+			short * snum012, int flow
+    )
+{
+    int i;
+    int cur_free;
+    //    int bytes;
+    int units;
+    struct virtual_node * vn = tb->tb_vn;
+    //    struct virtual_item * vi;
+
+    int total_node_size, max_node_size, current_item_size;
+    int needed_nodes;
+    int start_item, 	/* position of item we start filling node from */
+	end_item,	/* position of item we finish filling node by */
+	start_bytes,/* number of first bytes (entries for directory) of start_item-th item 
+		       we do not include into node that is being filled */
+	end_bytes;	/* number of last bytes (entries for directory) of end_item-th item 
+			   we do node include into node that is being filled */
+    int split_item_positions[2]; /* these are positions in virtual item of
+				    items, that are split between S[0] and
+				    S1new and S1new and S2new */
+
+    split_item_positions[0] = -1;
+    split_item_positions[1] = -1;
+
+    /* We only create additional nodes if we are in insert or paste mode
+       or we are in replace mode at the internal level. If h is 0 and
+       the mode is M_REPLACE then in fix_nodes we change the mode to
+       paste or insert before we get here in the code.  */
+    RFALSE( tb->insert_size[h] < 0  || (mode != M_INSERT && mode != M_PASTE),
+	    "vs-8100: insert_size < 0 in overflow");
+
+    max_node_size = MAX_CHILD_SIZE (PATH_H_PBUFFER (tb->tb_path, h));
+
+    /* snum012 [0-2] - number of items, that lay
+       to S[0], first new node and second new node */
+    snum012[3] = -1;	/* s1bytes */
+    snum012[4] = -1;	/* s2bytes */
+
+    /* internal level */
+    if (h > 0) {
+	i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
+	if (i == max_node_size)
+	    return 1;
+	return (i / max_node_size + 1);
+    }
+
+    /* leaf level */
+    needed_nodes = 1;
+    total_node_size = 0;
+    cur_free = max_node_size;
+
+    // start from 'from'-th item
+    start_item = from;
+    // skip its first 'start_bytes' units
+    start_bytes = ((from_bytes != -1) ? from_bytes : 0);
+
+    // last included item is the 'end_item'-th one
+    end_item = vn->vn_nr_item - to - 1;
+    // do not count last 'end_bytes' units of 'end_item'-th item
+    end_bytes = (to_bytes != -1) ? to_bytes : 0;
+
+    /* go through all item beginning from the start_item-th item and ending by
+       the end_item-th item. Do not count first 'start_bytes' units of
+       'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
+    
+    for (i = start_item; i <= end_item; i ++) {
+	struct virtual_item * vi = vn->vn_vi + i;
+	int skip_from_end = ((i == end_item) ? end_bytes : 0);
+
+	RFALSE( needed_nodes > 3, "vs-8105: too many nodes are needed");
+
+	/* get size of current item */
+	current_item_size = vi->vi_item_len;
+
+	/* do not take in calculation head part (from_bytes) of from-th item */
+	current_item_size -= op_part_size (vi, 0/*from start*/, start_bytes);
+
+	/* do not take in calculation tail part of last item */
+	current_item_size -= op_part_size (vi, 1/*from end*/, skip_from_end);
+
+	/* if item fits into current node entierly */
+	if (total_node_size + current_item_size <= max_node_size) {
+	    snum012[needed_nodes - 1] ++;
+	    total_node_size += current_item_size;
+	    start_bytes = 0;
+	    continue;
+	}
+
+	if (current_item_size > max_node_size) {
+	    /* virtual item length is longer, than max size of item in
+               a node. It is impossible for direct item */
+	    RFALSE( is_direct_le_ih (vi->vi_ih),
+		    "vs-8110: "
+		    "direct item length is %d. It can not be longer than %d",
+		    current_item_size, max_node_size);
+	    /* we will try to split it */
+	    flow = 1;
+	}
+
+	if (!flow) {
+	    /* as we do not split items, take new node and continue */
+	    needed_nodes ++; i --; total_node_size = 0;
+	    continue;
+	}
+
+	// calculate number of item units which fit into node being
+	// filled
+	{
+	    int free_space;
+
+	    free_space = max_node_size - total_node_size - IH_SIZE;
+	    units = op_check_left (vi, free_space, start_bytes, skip_from_end);
+	    if (units == -1) {
+		/* nothing fits into current node, take new node and continue */
+		needed_nodes ++, i--, total_node_size = 0;
+		continue;
+	    }
+	}
+
+	/* something fits into the current node */
+	//if (snum012[3] != -1 || needed_nodes != 1)
+	//  reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
+	//snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
+	start_bytes += units;
+	snum012[needed_nodes - 1 + 3] = units;
+
+	if (needed_nodes > 2)
+	    reiserfs_warning (tb->tb_sb, "vs-8111: get_num_ver: "
+			      "split_item_position is out of boundary");
+	snum012[needed_nodes - 1] ++;
+	split_item_positions[needed_nodes - 1] = i;
+	needed_nodes ++;
+	/* continue from the same item with start_bytes != -1 */
+	start_item = i;
+	i --;
+	total_node_size = 0;
+    }
+
+    // sum012[4] (if it is not -1) contains number of units of which
+    // are to be in S1new, snum012[3] - to be in S0. They are supposed
+    // to be S1bytes and S2bytes correspondingly, so recalculate
+    if (snum012[4] > 0) {
+	int split_item_num;
+	int bytes_to_r, bytes_to_l;
+	int bytes_to_S1new;
+    
+	split_item_num = split_item_positions[1];
+	bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0);
+	bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0);
+	bytes_to_S1new = ((split_item_positions[0] == split_item_positions[1]) ? snum012[3] : 0);
+
+	// s2bytes
+	snum012[4] = op_unit_num (&vn->vn_vi[split_item_num]) - snum012[4] - bytes_to_r - bytes_to_l - bytes_to_S1new;
+
+	if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
+	    vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
+	    reiserfs_warning (tb->tb_sb, "vs-8115: get_num_ver: not "
+			      "directory or indirect item");
+    }
+
+    /* now we know S2bytes, calculate S1bytes */
+    if (snum012[3] > 0) {
+	int split_item_num;
+	int bytes_to_r, bytes_to_l;
+	int bytes_to_S2new;
+    
+	split_item_num = split_item_positions[0];
+	bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0);
+	bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0);
+	bytes_to_S2new = ((split_item_positions[0] == split_item_positions[1] && snum012[4] != -1) ? snum012[4] : 0);
+
+	// s1bytes
+	snum012[3] = op_unit_num (&vn->vn_vi[split_item_num]) - snum012[3] - bytes_to_r - bytes_to_l - bytes_to_S2new;
+    }
+    
+    return needed_nodes;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+
+/* Set parameters for balancing.
+ * Performs write of results of analysis of balancing into structure tb,
+ * where it will later be used by the functions that actually do the balancing. 
+ * Parameters:
+ *	tb	tree_balance structure;
+ *	h	current level of the node;
+ *	lnum	number of items from S[h] that must be shifted to L[h];
+ *	rnum	number of items from S[h] that must be shifted to R[h];
+ *	blk_num	number of blocks that S[h] will be splitted into;
+ *	s012	number of items that fall into splitted nodes.
+ *	lbytes	number of bytes which flow to the left neighbor from the item that is not
+ *		not shifted entirely
+ *	rbytes	number of bytes which flow to the right neighbor from the item that is not
+ *		not shifted entirely
+ *	s1bytes	number of bytes which flow to the first  new node when S[0] splits (this number is contained in s012 array)
+ */
+
+static void set_parameters (struct tree_balance * tb, int h, int lnum,
+			    int rnum, int blk_num, short * s012, int lb, int rb)
+{
+
+  tb->lnum[h] = lnum;
+  tb->rnum[h] = rnum;
+  tb->blknum[h] = blk_num;
+
+  if (h == 0)
+    {  /* only for leaf level */
+      if (s012 != NULL)
+	{
+	  tb->s0num = * s012 ++,
+	  tb->s1num = * s012 ++,
+	  tb->s2num = * s012 ++;
+	  tb->s1bytes = * s012 ++;
+	  tb->s2bytes = * s012;
+	}
+      tb->lbytes = lb;
+      tb->rbytes = rb;
+    }
+  PROC_INFO_ADD( tb -> tb_sb, lnum[ h ], lnum );
+  PROC_INFO_ADD( tb -> tb_sb, rnum[ h ], rnum );
+
+  PROC_INFO_ADD( tb -> tb_sb, lbytes[ h ], lb );
+  PROC_INFO_ADD( tb -> tb_sb, rbytes[ h ], rb );
+}
+
+
+
+/* check, does node disappear if we shift tb->lnum[0] items to left
+   neighbor and tb->rnum[0] to the right one. */
+static int is_leaf_removable (struct tree_balance * tb)
+{
+  struct virtual_node * vn = tb->tb_vn;
+  int to_left, to_right;
+  int size;
+  int remain_items;
+
+  /* number of items, that will be shifted to left (right) neighbor
+     entirely */
+  to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
+  to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
+  remain_items = vn->vn_nr_item;
+
+  /* how many items remain in S[0] after shiftings to neighbors */
+  remain_items -= (to_left + to_right);
+
+  if (remain_items < 1) {
+    /* all content of node can be shifted to neighbors */
+    set_parameters (tb, 0, to_left, vn->vn_nr_item - to_left, 0, NULL, -1, -1);    
+    return 1;
+  }
+  
+  if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
+    /* S[0] is not removable */
+    return 0;
+
+  /* check, whether we can divide 1 remaining item between neighbors */
+
+  /* get size of remaining item (in item units) */
+  size = op_unit_num (&(vn->vn_vi[to_left]));
+
+  if (tb->lbytes + tb->rbytes >= size) {
+    set_parameters (tb, 0, to_left + 1, to_right + 1, 0, NULL, tb->lbytes, -1);
+    return 1;
+  }
+
+  return 0;
+}
+
+
+/* check whether L, S, R can be joined in one node */
+static int are_leaves_removable (struct tree_balance * tb, int lfree, int rfree)
+{
+  struct virtual_node * vn = tb->tb_vn;
+  int ih_size;
+  struct buffer_head *S0;
+
+  S0 = PATH_H_PBUFFER (tb->tb_path, 0);
+
+  ih_size = 0;
+  if (vn->vn_nr_item) {
+    if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
+      ih_size += IH_SIZE;
+    
+	if (vn->vn_vi[vn->vn_nr_item-1].vi_type & VI_TYPE_RIGHT_MERGEABLE)
+	    ih_size += IH_SIZE;
+    } else {
+	/* there was only one item and it will be deleted */
+	struct item_head * ih;
+    
+    RFALSE( B_NR_ITEMS (S0) != 1,
+	    "vs-8125: item number must be 1: it is %d", B_NR_ITEMS(S0));
+
+    ih = B_N_PITEM_HEAD (S0, 0);
+    if (tb->CFR[0] && !comp_short_le_keys (&(ih->ih_key), B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0])))
+	if (is_direntry_le_ih (ih)) {
+	    /* Directory must be in correct state here: that is
+	       somewhere at the left side should exist first directory
+	       item. But the item being deleted can not be that first
+	       one because its right neighbor is item of the same
+	       directory. (But first item always gets deleted in last
+	       turn). So, neighbors of deleted item can be merged, so
+	       we can save ih_size */
+	    ih_size = IH_SIZE;
+	    
+	    /* we might check that left neighbor exists and is of the
+	       same directory */
+	    RFALSE(le_ih_k_offset (ih) == DOT_OFFSET,
+		"vs-8130: first directory item can not be removed until directory is not empty");
+      }
+    
+  }
+
+  if (MAX_CHILD_SIZE (S0) + vn->vn_size <= rfree + lfree + ih_size) {
+    set_parameters (tb, 0, -1, -1, -1, NULL, -1, -1);
+    PROC_INFO_INC( tb -> tb_sb, leaves_removable );
+    return 1;  
+  }
+  return 0;
+  
+}
+
+
+
+/* when we do not split item, lnum and rnum are numbers of entire items */
+#define SET_PAR_SHIFT_LEFT \
+if (h)\
+{\
+   int to_l;\
+   \
+   to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
+	      (MAX_NR_KEY(Sh) + 1 - lpar);\
+	      \
+	      set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
+}\
+else \
+{\
+   if (lset==LEFT_SHIFT_FLOW)\
+     set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
+		     tb->lbytes, -1);\
+   else\
+     set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
+		     -1, -1);\
+}
+
+
+#define SET_PAR_SHIFT_RIGHT \
+if (h)\
+{\
+   int to_r;\
+   \
+   to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
+   \
+   set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
+}\
+else \
+{\
+   if (rset==RIGHT_SHIFT_FLOW)\
+     set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
+		  -1, tb->rbytes);\
+   else\
+     set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
+		  -1, -1);\
+}
+
+
+static void free_buffers_in_tb (
+		       struct tree_balance * p_s_tb
+		       ) {
+  int n_counter;
+
+  decrement_counters_in_path(p_s_tb->tb_path);
+  
+  for ( n_counter = 0; n_counter < MAX_HEIGHT; n_counter++ ) {
+    decrement_bcount(p_s_tb->L[n_counter]);
+    p_s_tb->L[n_counter] = NULL;
+    decrement_bcount(p_s_tb->R[n_counter]);
+    p_s_tb->R[n_counter] = NULL;
+    decrement_bcount(p_s_tb->FL[n_counter]);
+    p_s_tb->FL[n_counter] = NULL;
+    decrement_bcount(p_s_tb->FR[n_counter]);
+    p_s_tb->FR[n_counter] = NULL;
+    decrement_bcount(p_s_tb->CFL[n_counter]);
+    p_s_tb->CFL[n_counter] = NULL;
+    decrement_bcount(p_s_tb->CFR[n_counter]);
+    p_s_tb->CFR[n_counter] = NULL;
+  }
+}
+
+
+/* Get new buffers for storing new nodes that are created while balancing.
+ * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ *	        CARRY_ON - schedule didn't occur while the function worked;
+ *	        NO_DISK_SPACE - no disk space.
+ */
+/* The function is NOT SCHEDULE-SAFE! */
+static int  get_empty_nodes(
+              struct tree_balance * p_s_tb,
+              int n_h
+            ) {
+  struct buffer_head  * p_s_new_bh,
+    		      *	p_s_Sh = PATH_H_PBUFFER (p_s_tb->tb_path, n_h);
+  b_blocknr_t	      *	p_n_blocknr,
+    			a_n_blocknrs[MAX_AMOUNT_NEEDED] = {0, };
+  int       		n_counter,
+   			n_number_of_freeblk,
+                	n_amount_needed,/* number of needed empty blocks */
+			n_retval = CARRY_ON;
+  struct super_block *	p_s_sb = p_s_tb->tb_sb;
+
+
+  /* number_of_freeblk is the number of empty blocks which have been
+     acquired for use by the balancing algorithm minus the number of
+     empty blocks used in the previous levels of the analysis,
+     number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
+     after empty blocks are acquired, and the balancing analysis is
+     then restarted, amount_needed is the number needed by this level
+     (n_h) of the balancing analysis.
+			    
+     Note that for systems with many processes writing, it would be
+     more layout optimal to calculate the total number needed by all
+     levels and then to run reiserfs_new_blocks to get all of them at once.  */
+
+  /* Initiate number_of_freeblk to the amount acquired prior to the restart of
+     the analysis or 0 if not restarted, then subtract the amount needed
+     by all of the levels of the tree below n_h. */
+  /* blknum includes S[n_h], so we subtract 1 in this calculation */
+  for ( n_counter = 0, n_number_of_freeblk = p_s_tb->cur_blknum; n_counter < n_h; n_counter++ )
+    n_number_of_freeblk -= ( p_s_tb->blknum[n_counter] ) ? (p_s_tb->blknum[n_counter] - 1) : 0;
+
+  /* Allocate missing empty blocks. */
+  /* if p_s_Sh == 0  then we are getting a new root */
+  n_amount_needed = ( p_s_Sh ) ? (p_s_tb->blknum[n_h] - 1) : 1;
+  /*  Amount_needed = the amount that we need more than the amount that we have. */
+  if ( n_amount_needed > n_number_of_freeblk )
+    n_amount_needed -= n_number_of_freeblk;
+  else /* If we have enough already then there is nothing to do. */
+    return CARRY_ON;
+
+  /* No need to check quota - is not allocated for blocks used for formatted nodes */
+  if (reiserfs_new_form_blocknrs (p_s_tb, a_n_blocknrs,
+                                   n_amount_needed) == NO_DISK_SPACE)
+    return NO_DISK_SPACE;
+
+  /* for each blocknumber we just got, get a buffer and stick it on FEB */
+  for ( p_n_blocknr = a_n_blocknrs, n_counter = 0; n_counter < n_amount_needed;
+	p_n_blocknr++, n_counter++ ) { 
+
+    RFALSE( ! *p_n_blocknr,
+	    "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
+
+    p_s_new_bh = sb_getblk(p_s_sb, *p_n_blocknr);
+    RFALSE (buffer_dirty (p_s_new_bh) ||
+	    buffer_journaled (p_s_new_bh) ||
+	    buffer_journal_dirty (p_s_new_bh),
+	    "PAP-8140: journlaled or dirty buffer %b for the new block", 
+	    p_s_new_bh);
+    
+    /* Put empty buffers into the array. */
+    RFALSE (p_s_tb->FEB[p_s_tb->cur_blknum],
+	    "PAP-8141: busy slot for new buffer");
+
+    set_buffer_journal_new (p_s_new_bh);
+    p_s_tb->FEB[p_s_tb->cur_blknum++] = p_s_new_bh;
+  }
+
+  if ( n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB (p_s_tb) )
+    n_retval = REPEAT_SEARCH ;
+
+  return n_retval;
+}
+
+
+/* Get free space of the left neighbor, which is stored in the parent
+ * node of the left neighbor.  */
+static int get_lfree (struct tree_balance * tb, int h)
+{
+    struct buffer_head * l, * f;
+    int order;
+
+    if ((f = PATH_H_PPARENT (tb->tb_path, h)) == 0 || (l = tb->FL[h]) == 0)
+	return 0;
+
+    if (f == l)
+	order = PATH_H_B_ITEM_ORDER (tb->tb_path, h) - 1;
+    else {
+	order = B_NR_ITEMS (l);
+	f = l;
+    }
+
+    return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f,order)));
+}
+
+
+/* Get free space of the right neighbor,
+ * which is stored in the parent node of the right neighbor.
+ */
+static int get_rfree (struct tree_balance * tb, int h)
+{
+  struct buffer_head * r, * f;
+  int order;
+
+  if ((f = PATH_H_PPARENT (tb->tb_path, h)) == 0 || (r = tb->FR[h]) == 0)
+    return 0;
+
+  if (f == r)
+      order = PATH_H_B_ITEM_ORDER (tb->tb_path, h) + 1;
+  else {
+      order = 0;
+      f = r;
+  }
+
+  return (MAX_CHILD_SIZE(f) - dc_size( B_N_CHILD(f,order)));
+
+}
+
+
+/* Check whether left neighbor is in memory. */
+static int  is_left_neighbor_in_cache(
+              struct tree_balance * p_s_tb,
+              int                   n_h
+            ) {
+  struct buffer_head  * p_s_father, * left;
+  struct super_block  * p_s_sb = p_s_tb->tb_sb;
+  b_blocknr_t		n_left_neighbor_blocknr;
+  int                   n_left_neighbor_position;
+
+  if ( ! p_s_tb->FL[n_h] ) /* Father of the left neighbor does not exist. */
+    return 0;
+
+  /* Calculate father of the node to be balanced. */
+  p_s_father = PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1);
+
+  RFALSE( ! p_s_father || 
+	  ! B_IS_IN_TREE (p_s_father) || 
+	  ! B_IS_IN_TREE (p_s_tb->FL[n_h]) ||
+	  ! buffer_uptodate (p_s_father) || 
+	  ! buffer_uptodate (p_s_tb->FL[n_h]),
+	  "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", 
+	  p_s_father, p_s_tb->FL[n_h]);
+
+
+  /* Get position of the pointer to the left neighbor into the left father. */
+  n_left_neighbor_position = ( p_s_father == p_s_tb->FL[n_h] ) ?
+                      p_s_tb->lkey[n_h] : B_NR_ITEMS (p_s_tb->FL[n_h]);
+  /* Get left neighbor block number. */
+  n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
+  /* Look for the left neighbor in the cache. */
+  if ( (left = sb_find_get_block(p_s_sb, n_left_neighbor_blocknr)) ) {
+
+    RFALSE( buffer_uptodate (left) && ! B_IS_IN_TREE(left),
+	    "vs-8170: left neighbor (%b %z) is not in the tree", left, left);
+    put_bh(left) ;
+    return 1;
+  }
+
+  return 0;
+}
+
+
+#define LEFT_PARENTS  'l'
+#define RIGHT_PARENTS 'r'
+
+
+static void decrement_key (struct cpu_key * p_s_key)
+{
+    // call item specific function for this key
+    item_ops[cpu_key_k_type (p_s_key)]->decrement_key (p_s_key);
+}
+
+
+
+
+/* Calculate far left/right parent of the left/right neighbor of the current node, that
+ * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
+ * Calculate left/right common parent of the current node and L[h]/R[h].
+ * Calculate left/right delimiting key position.
+ * Returns:	PATH_INCORRECT   - path in the tree is not correct;
+ 		SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ *	        CARRY_ON         - schedule didn't occur while the function worked;
+ */
+static int  get_far_parent (struct tree_balance *   p_s_tb,
+			    int                     n_h,
+			    struct buffer_head  **  pp_s_father,
+			    struct buffer_head  **  pp_s_com_father,
+			    char                    c_lr_par) 
+{
+    struct buffer_head  * p_s_parent;
+    INITIALIZE_PATH (s_path_to_neighbor_father);
+    struct path * p_s_path = p_s_tb->tb_path;
+    struct cpu_key	s_lr_father_key;
+    int                   n_counter,
+	n_position = INT_MAX,
+	n_first_last_position = 0,
+	n_path_offset = PATH_H_PATH_OFFSET(p_s_path, n_h);
+
+    /* Starting from F[n_h] go upwards in the tree, and look for the common
+      ancestor of F[n_h], and its neighbor l/r, that should be obtained. */
+
+    n_counter = n_path_offset;
+
+    RFALSE( n_counter < FIRST_PATH_ELEMENT_OFFSET,
+	    "PAP-8180: invalid path length");
+
+  
+    for ( ; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter--  )  {
+	/* Check whether parent of the current buffer in the path is really parent in the tree. */
+	if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_path, n_counter - 1)) )
+	    return REPEAT_SEARCH;
+	/* Check whether position in the parent is correct. */
+	if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_counter - 1)) > B_NR_ITEMS(p_s_parent) )
+	    return REPEAT_SEARCH;
+	/* Check whether parent at the path really points to the child. */
+	if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+	     PATH_OFFSET_PBUFFER(p_s_path, n_counter)->b_blocknr )
+	    return REPEAT_SEARCH;
+	/* Return delimiting key if position in the parent is not equal to first/last one. */
+	if ( c_lr_par == RIGHT_PARENTS )
+	    n_first_last_position = B_NR_ITEMS (p_s_parent);
+	if ( n_position != n_first_last_position ) {
+	    *pp_s_com_father = p_s_parent;
+	    get_bh(*pp_s_com_father) ;
+	    /*(*pp_s_com_father = p_s_parent)->b_count++;*/
+	    break;
+	}
+    }
+
+    /* if we are in the root of the tree, then there is no common father */
+    if ( n_counter == FIRST_PATH_ELEMENT_OFFSET ) {
+	/* Check whether first buffer in the path is the root of the tree. */
+	if ( PATH_OFFSET_PBUFFER(p_s_tb->tb_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+	     SB_ROOT_BLOCK (p_s_tb->tb_sb) ) {
+	    *pp_s_father = *pp_s_com_father = NULL;
+	    return CARRY_ON;
+	}
+	return REPEAT_SEARCH;
+    }
+
+    RFALSE( B_LEVEL (*pp_s_com_father) <= DISK_LEAF_NODE_LEVEL,
+	    "PAP-8185: (%b %z) level too small", 
+	    *pp_s_com_father, *pp_s_com_father);
+
+    /* Check whether the common parent is locked. */
+
+    if ( buffer_locked (*pp_s_com_father) ) {
+	__wait_on_buffer(*pp_s_com_father);
+	if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+	    decrement_bcount(*pp_s_com_father);
+	    return REPEAT_SEARCH;
+	}
+    }
+
+    /* So, we got common parent of the current node and its left/right neighbor.
+     Now we are geting the parent of the left/right neighbor. */
+
+    /* Form key to get parent of the left/right neighbor. */
+    le_key2cpu_key (&s_lr_father_key, B_N_PDELIM_KEY(*pp_s_com_father, ( c_lr_par == LEFT_PARENTS ) ?
+						     (p_s_tb->lkey[n_h - 1] = n_position - 1) : (p_s_tb->rkey[n_h - 1] = n_position)));
+
+
+    if ( c_lr_par == LEFT_PARENTS )
+	decrement_key(&s_lr_father_key);
+
+    if (search_by_key(p_s_tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, n_h + 1) == IO_ERROR)
+	// path is released
+	return IO_ERROR;
+
+    if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+	decrement_counters_in_path(&s_path_to_neighbor_father);
+	decrement_bcount(*pp_s_com_father);
+	return REPEAT_SEARCH;
+    }
+
+    *pp_s_father = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
+
+    RFALSE( B_LEVEL (*pp_s_father) != n_h + 1,
+	    "PAP-8190: (%b %z) level too small", *pp_s_father, *pp_s_father);
+    RFALSE( s_path_to_neighbor_father.path_length < FIRST_PATH_ELEMENT_OFFSET,
+	    "PAP-8192: path length is too small");
+
+    s_path_to_neighbor_father.path_length--;
+    decrement_counters_in_path(&s_path_to_neighbor_father);
+    return CARRY_ON;
+}
+
+
+/* Get parents of neighbors of node in the path(S[n_path_offset]) and common parents of
+ * S[n_path_offset] and L[n_path_offset]/R[n_path_offset]: F[n_path_offset], FL[n_path_offset],
+ * FR[n_path_offset], CFL[n_path_offset], CFR[n_path_offset].
+ * Calculate numbers of left and right delimiting keys position: lkey[n_path_offset], rkey[n_path_offset].
+ * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ *	        CARRY_ON - schedule didn't occur while the function worked;
+ */
+static int  get_parents (struct tree_balance * p_s_tb, int n_h)
+{
+    struct path         * p_s_path = p_s_tb->tb_path;
+    int                   n_position,
+	n_ret_value,
+	n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
+    struct buffer_head  * p_s_curf,
+	* p_s_curcf;
+
+    /* Current node is the root of the tree or will be root of the tree */
+    if ( n_path_offset <= FIRST_PATH_ELEMENT_OFFSET ) {
+	/* The root can not have parents.
+	   Release nodes which previously were obtained as parents of the current node neighbors. */
+	decrement_bcount(p_s_tb->FL[n_h]);
+	decrement_bcount(p_s_tb->CFL[n_h]);
+	decrement_bcount(p_s_tb->FR[n_h]);
+	decrement_bcount(p_s_tb->CFR[n_h]);
+	p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] = p_s_tb->CFR[n_h] = NULL;
+	return CARRY_ON;
+    }
+  
+    /* Get parent FL[n_path_offset] of L[n_path_offset]. */
+    if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1)) )  {
+	/* Current node is not the first child of its parent. */
+	/*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2;*/
+	p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+	get_bh(p_s_curf) ;
+	get_bh(p_s_curf) ;
+	p_s_tb->lkey[n_h] = n_position - 1;
+    }
+    else  {
+	/* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node.
+	   Calculate current common parent of L[n_path_offset] and the current node. Note that
+	   CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset].
+	   Calculate lkey[n_path_offset]. */
+	if ( (n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf,
+					   &p_s_curcf, LEFT_PARENTS)) != CARRY_ON )
+	    return n_ret_value;
+    }
+
+    decrement_bcount(p_s_tb->FL[n_h]);
+    p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
+    decrement_bcount(p_s_tb->CFL[n_h]);
+    p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
+
+    RFALSE( (p_s_curf && !B_IS_IN_TREE (p_s_curf)) || 
+	    (p_s_curcf && !B_IS_IN_TREE (p_s_curcf)),
+	    "PAP-8195: FL (%b) or CFL (%b) is invalid", p_s_curf, p_s_curcf);
+
+/* Get parent FR[n_h] of R[n_h]. */
+
+/* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */
+    if ( n_position == B_NR_ITEMS (PATH_H_PBUFFER(p_s_path, n_h + 1)) ) {
+/* Calculate current parent of R[n_h], which is the right neighbor of F[n_h].
+   Calculate current common parent of R[n_h] and current node. Note that CFR[n_h]
+   not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */
+	if ( (n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf,  &p_s_curcf, RIGHT_PARENTS)) != CARRY_ON )
+	    return n_ret_value;
+    }
+    else {
+/* Current node is not the last child of its parent F[n_h]. */
+	/*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2;*/
+	p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+	get_bh(p_s_curf) ;
+	get_bh(p_s_curf) ;
+	p_s_tb->rkey[n_h] = n_position;
+    }	
+
+    decrement_bcount(p_s_tb->FR[n_h]);
+    p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */
+    
+    decrement_bcount(p_s_tb->CFR[n_h]);
+    p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */
+
+    RFALSE( (p_s_curf && !B_IS_IN_TREE (p_s_curf)) ||
+            (p_s_curcf && !B_IS_IN_TREE (p_s_curcf)),
+	    "PAP-8205: FR (%b) or CFR (%b) is invalid", p_s_curf, p_s_curcf);
+
+    return CARRY_ON;
+}
+
+
+/* it is possible to remove node as result of shiftings to
+   neighbors even when we insert or paste item. */
+static inline int can_node_be_removed (int mode, int lfree, int sfree, int rfree, struct tree_balance * tb, int h)
+{
+    struct buffer_head * Sh = PATH_H_PBUFFER (tb->tb_path, h);
+    int levbytes = tb->insert_size[h];
+    struct item_head * ih;
+    struct reiserfs_key * r_key = NULL;
+
+    ih = B_N_PITEM_HEAD (Sh, 0);
+    if ( tb->CFR[h] )
+	r_key = B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]);
+  
+    if (
+	lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
+	/* shifting may merge items which might save space */
+	- (( ! h && op_is_left_mergeable (&(ih->ih_key), Sh->b_size) ) ? IH_SIZE : 0)
+	- (( ! h && r_key && op_is_left_mergeable (r_key, Sh->b_size) ) ? IH_SIZE : 0)
+	+ (( h ) ? KEY_SIZE : 0))
+    {
+	/* node can not be removed */
+	if (sfree >= levbytes ) { /* new item fits into node S[h] without any shifting */
+	    if ( ! h )
+		tb->s0num = B_NR_ITEMS(Sh) + ((mode == M_INSERT ) ? 1 : 0);
+	    set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+	    return NO_BALANCING_NEEDED;
+	}
+    }
+    PROC_INFO_INC( tb -> tb_sb, can_node_be_removed[ h ] );
+    return !NO_BALANCING_NEEDED;
+}
+
+
+
+/* Check whether current node S[h] is balanced when increasing its size by
+ * Inserting or Pasting.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *	tb	tree_balance structure;
+ *	h	current level of the node;
+ *	inum	item number in S[h];
+ *	mode	i - insert, p - paste;
+ * Returns:	1 - schedule occurred; 
+ *	        0 - balancing for higher levels needed;
+ *	       -1 - no balancing for higher levels needed;
+ *	       -2 - no disk space.
+ */
+/* ip means Inserting or Pasting */
+static int ip_check_balance (struct tree_balance * tb, int h)
+{
+    struct virtual_node * vn = tb->tb_vn;
+    int levbytes,  /* Number of bytes that must be inserted into (value
+		      is negative if bytes are deleted) buffer which
+		      contains node being balanced.  The mnemonic is
+		      that the attempted change in node space used level
+		      is levbytes bytes. */
+	n_ret_value;
+
+    int lfree, sfree, rfree /* free space in L, S and R */;
+
+    /* nver is short for number of vertixes, and lnver is the number if
+       we shift to the left, rnver is the number if we shift to the
+       right, and lrnver is the number if we shift in both directions.
+       The goal is to minimize first the number of vertixes, and second,
+       the number of vertixes whose contents are changed by shifting,
+       and third the number of uncached vertixes whose contents are
+       changed by shifting and must be read from disk.  */
+    int nver, lnver, rnver, lrnver;
+
+    /* used at leaf level only, S0 = S[0] is the node being balanced,
+       sInum [ I = 0,1,2 ] is the number of items that will
+       remain in node SI after balancing.  S1 and S2 are new
+       nodes that might be created. */
+  
+    /* we perform 8 calls to get_num_ver().  For each call we calculate five parameters.
+       where 4th parameter is s1bytes and 5th - s2bytes
+    */
+    short snum012[40] = {0,};	/* s0num, s1num, s2num for 8 cases 
+				   0,1 - do not shift and do not shift but bottle
+				   2 - shift only whole item to left
+				   3 - shift to left and bottle as much as possible
+				   4,5 - shift to right	(whole items and as much as possible
+				   6,7 - shift to both directions (whole items and as much as possible)
+				*/
+
+    /* Sh is the node whose balance is currently being checked */
+    struct buffer_head * Sh;
+  
+    Sh = PATH_H_PBUFFER (tb->tb_path, h);
+    levbytes = tb->insert_size[h];
+  
+    /* Calculate balance parameters for creating new root. */
+    if ( ! Sh )  {
+	if ( ! h )
+	    reiserfs_panic (tb->tb_sb, "vs-8210: ip_check_balance: S[0] can not be 0");
+	switch ( n_ret_value = get_empty_nodes (tb, h) )  {
+	case CARRY_ON:
+	    set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+	    return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+
+	case NO_DISK_SPACE:
+	case REPEAT_SEARCH:
+	    return n_ret_value;
+	default:   
+	    reiserfs_panic(tb->tb_sb, "vs-8215: ip_check_balance: incorrect return value of get_empty_nodes");
+	}
+    }
+  
+    if ( (n_ret_value = get_parents (tb, h)) != CARRY_ON ) /* get parents of S[h] neighbors. */
+	return n_ret_value;
+  
+    sfree = B_FREE_SPACE (Sh);
+
+    /* get free space of neighbors */
+    rfree = get_rfree (tb, h);
+    lfree = get_lfree (tb, h);
+
+    if (can_node_be_removed (vn->vn_mode, lfree, sfree, rfree, tb, h) == NO_BALANCING_NEEDED)
+	/* and new item fits into node S[h] without any shifting */
+	return NO_BALANCING_NEEDED;
+     
+    create_virtual_node (tb, h);
+
+    /*	
+	determine maximal number of items we can shift to the left neighbor (in tb structure)
+	and the maximal number of bytes that can flow to the left neighbor
+	from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
+    */
+    check_left (tb, h, lfree);
+
+    /*
+      determine maximal number of items we can shift to the right neighbor (in tb structure)
+      and the maximal number of bytes that can flow to the right neighbor
+      from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
+    */
+    check_right (tb, h, rfree);
+
+
+    /* all contents of internal node S[h] can be moved into its
+       neighbors, S[h] will be removed after balancing */
+    if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
+	int to_r; 
+       
+	/* Since we are working on internal nodes, and our internal
+	   nodes have fixed size entries, then we can balance by the
+	   number of items rather than the space they consume.  In this
+	   routine we set the left node equal to the right node,
+	   allowing a difference of less than or equal to 1 child
+	   pointer. */
+	to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 - 
+	    (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+	set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+	return CARRY_ON;
+    }
+
+    /* this checks balance condition, that any two neighboring nodes can not fit in one node */
+    RFALSE( h && 
+	    ( tb->lnum[h] >= vn->vn_nr_item + 1 || 
+	      tb->rnum[h] >= vn->vn_nr_item + 1),
+	    "vs-8220: tree is not balanced on internal level");
+    RFALSE( ! h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
+		    (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1)) ),
+	    "vs-8225: tree is not balanced on leaf level");
+
+    /* all contents of S[0] can be moved into its neighbors
+       S[0] will be removed after balancing. */
+    if (!h && is_leaf_removable (tb))
+	return CARRY_ON;
+
+
+    /* why do we perform this check here rather than earlier??
+       Answer: we can win 1 node in some cases above. Moreover we
+       checked it above, when we checked, that S[0] is not removable
+       in principle */
+    if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
+	if ( ! h )
+	    tb->s0num = vn->vn_nr_item;
+	set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+	return NO_BALANCING_NEEDED;
+    }
+
+
+    {
+	int lpar, rpar, nset, lset, rset, lrset;
+	/* 
+	 * regular overflowing of the node
+	 */
+
+	/* get_num_ver works in 2 modes (FLOW & NO_FLOW) 
+	   lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
+	   nset, lset, rset, lrset - shows, whether flowing items give better packing 
+	*/
+#define FLOW 1
+#define NO_FLOW 0	/* do not any splitting */
+
+	/* we choose one the following */
+#define NOTHING_SHIFT_NO_FLOW	0
+#define NOTHING_SHIFT_FLOW	5
+#define LEFT_SHIFT_NO_FLOW	10
+#define LEFT_SHIFT_FLOW		15
+#define RIGHT_SHIFT_NO_FLOW	20
+#define RIGHT_SHIFT_FLOW	25
+#define LR_SHIFT_NO_FLOW	30
+#define LR_SHIFT_FLOW		35
+
+
+	lpar = tb->lnum[h];
+	rpar = tb->rnum[h];
+
+
+	/* calculate number of blocks S[h] must be split into when
+	   nothing is shifted to the neighbors,
+	   as well as number of items in each part of the split node (s012 numbers),
+	   and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
+	nset = NOTHING_SHIFT_NO_FLOW;
+	nver = get_num_ver (vn->vn_mode, tb, h,
+			    0, -1, h?vn->vn_nr_item:0, -1, 
+			    snum012, NO_FLOW);
+
+	if (!h)
+	{
+	    int nver1;
+
+	    /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
+	    nver1 = get_num_ver (vn->vn_mode, tb, h, 
+				 0, -1, 0, -1, 
+				 snum012 + NOTHING_SHIFT_FLOW, FLOW);
+	    if (nver > nver1)
+		nset = NOTHING_SHIFT_FLOW, nver = nver1;
+	}
+       
+ 
+	/* calculate number of blocks S[h] must be split into when
+	   l_shift_num first items and l_shift_bytes of the right most
+	   liquid item to be shifted are shifted to the left neighbor,
+	   as well as number of items in each part of the splitted node (s012 numbers),
+	   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+	*/
+	lset = LEFT_SHIFT_NO_FLOW;
+	lnver = get_num_ver (vn->vn_mode, tb, h, 
+			     lpar - (( h || tb->lbytes == -1 ) ? 0 : 1), -1, h ? vn->vn_nr_item:0, -1,
+			     snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
+	if (!h)
+	{
+	    int lnver1;
+
+	    lnver1 = get_num_ver (vn->vn_mode, tb, h, 
+				  lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, 0, -1,
+				  snum012 + LEFT_SHIFT_FLOW, FLOW);
+	    if (lnver > lnver1)
+		lset = LEFT_SHIFT_FLOW, lnver = lnver1;
+	}
+
+
+	/* calculate number of blocks S[h] must be split into when
+	   r_shift_num first items and r_shift_bytes of the left most
+	   liquid item to be shifted are shifted to the right neighbor,
+	   as well as number of items in each part of the splitted node (s012 numbers),
+	   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+	*/
+	rset = RIGHT_SHIFT_NO_FLOW;
+	rnver = get_num_ver (vn->vn_mode, tb, h, 
+			     0, -1, h ? (vn->vn_nr_item-rpar) : (rpar - (( tb->rbytes != -1 ) ? 1 : 0)), -1, 
+			     snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
+	if (!h)
+	{
+	    int rnver1;
+
+	    rnver1 = get_num_ver (vn->vn_mode, tb, h, 
+				  0, -1, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes, 
+				  snum012 + RIGHT_SHIFT_FLOW, FLOW);
+
+	    if (rnver > rnver1)
+		rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
+	}
+
+
+	/* calculate number of blocks S[h] must be split into when
+	   items are shifted in both directions,
+	   as well as number of items in each part of the splitted node (s012 numbers),
+	   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+	*/
+	lrset = LR_SHIFT_NO_FLOW;
+	lrnver = get_num_ver (vn->vn_mode, tb, h, 
+			      lpar - ((h || tb->lbytes == -1) ? 0 : 1), -1, h ? (vn->vn_nr_item-rpar):(rpar - ((tb->rbytes != -1) ? 1 : 0)), -1,
+			      snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
+	if (!h)
+	{
+	    int lrnver1;
+
+	    lrnver1 = get_num_ver (vn->vn_mode, tb, h, 
+				   lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes,
+				   snum012 + LR_SHIFT_FLOW, FLOW);
+	    if (lrnver > lrnver1)
+		lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
+	}
+
+
+
+	/* Our general shifting strategy is:
+	   1) to minimized number of new nodes;
+	   2) to minimized number of neighbors involved in shifting;
+	   3) to minimized number of disk reads; */
+
+	/* we can win TWO or ONE nodes by shifting in both directions */
+	if (lrnver < lnver && lrnver < rnver)
+	{
+	    RFALSE( h && 
+		    (tb->lnum[h] != 1 || 
+		     tb->rnum[h] != 1 || 
+		     lrnver != 1 || rnver != 2 || lnver != 2 || h != 1),
+		    "vs-8230: bad h");
+	    if (lrset == LR_SHIFT_FLOW)
+		set_parameters (tb, h, tb->lnum[h], tb->rnum[h], lrnver, snum012 + lrset,
+				tb->lbytes, tb->rbytes);
+	    else
+		set_parameters (tb, h, tb->lnum[h] - ((tb->lbytes == -1) ? 0 : 1), 
+				tb->rnum[h] - ((tb->rbytes == -1) ? 0 : 1), lrnver, snum012 + lrset, -1, -1);
+
+	    return CARRY_ON;
+	}
+
+	/* if shifting doesn't lead to better packing then don't shift */
+	if (nver == lrnver)
+	{
+	    set_parameters (tb, h, 0, 0, nver, snum012 + nset, -1, -1);
+	    return CARRY_ON;
+	}
+
+
+	/* now we know that for better packing shifting in only one
+	   direction either to the left or to the right is required */
+
+	/*  if shifting to the left is better than shifting to the right */
+	if (lnver < rnver)
+	{
+	    SET_PAR_SHIFT_LEFT;
+	    return CARRY_ON;
+	}
+
+	/* if shifting to the right is better than shifting to the left */
+	if (lnver > rnver)
+	{
+	    SET_PAR_SHIFT_RIGHT;
+	    return CARRY_ON;
+	}
+
+
+	/* now shifting in either direction gives the same number
+	   of nodes and we can make use of the cached neighbors */
+	if (is_left_neighbor_in_cache (tb,h))
+	{
+	    SET_PAR_SHIFT_LEFT;
+	    return CARRY_ON;
+	}
+
+	/* shift to the right independently on whether the right neighbor in cache or not */
+	SET_PAR_SHIFT_RIGHT;
+	return CARRY_ON;
+    }
+}
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Cutting for INTERNAL node of S+tree.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *	tb	tree_balance structure;
+ *	h	current level of the node;
+ *	inum	item number in S[h];
+ *	mode	i - insert, p - paste;
+ * Returns:	1 - schedule occurred; 
+ *	        0 - balancing for higher levels needed;
+ *	       -1 - no balancing for higher levels needed;
+ *	       -2 - no disk space.
+ *
+ * Note: Items of internal nodes have fixed size, so the balance condition for
+ * the internal part of S+tree is as for the B-trees.
+ */
+static int dc_check_balance_internal (struct tree_balance * tb, int h)
+{
+  struct virtual_node * vn = tb->tb_vn;
+
+  /* Sh is the node whose balance is currently being checked,
+     and Fh is its father.  */
+  struct buffer_head * Sh, * Fh;
+  int maxsize,
+      n_ret_value;
+  int lfree, rfree /* free space in L and R */;
+
+  Sh = PATH_H_PBUFFER (tb->tb_path, h); 
+  Fh = PATH_H_PPARENT (tb->tb_path, h); 
+
+  maxsize = MAX_CHILD_SIZE(Sh); 
+
+/*   using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
+/*   new_nr_item = number of items node would have if operation is */
+/* 	performed without balancing (new_nr_item); */
+  create_virtual_node (tb, h);
+
+  if ( ! Fh )
+    {   /* S[h] is the root. */
+      if ( vn->vn_nr_item > 0 )
+	{
+	  set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+	  return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+	}
+      /* new_nr_item == 0.
+       * Current root will be deleted resulting in
+       * decrementing the tree height. */
+      set_parameters (tb, h, 0, 0, 0, NULL, -1, -1);
+      return CARRY_ON;
+    }
+
+  if ( (n_ret_value = get_parents(tb,h)) != CARRY_ON )
+    return n_ret_value;
+
+
+  /* get free space of neighbors */
+  rfree = get_rfree (tb, h);
+  lfree = get_lfree (tb, h);
+		
+  /* determine maximal number of items we can fit into neighbors */
+  check_left (tb, h, lfree);
+  check_right (tb, h, rfree);
+
+
+  if ( vn->vn_nr_item >= MIN_NR_KEY(Sh) )
+    { /* Balance condition for the internal node is valid.
+       * In this case we balance only if it leads to better packing. */ 
+      if ( vn->vn_nr_item == MIN_NR_KEY(Sh) )
+	{ /* Here we join S[h] with one of its neighbors,
+	   * which is impossible with greater values of new_nr_item. */
+	  if ( tb->lnum[h] >= vn->vn_nr_item + 1 )
+	    {
+	      /* All contents of S[h] can be moved to L[h]. */
+	      int n;
+	      int order_L;
+	      
+	      order_L = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
+	      n = dc_size(B_N_CHILD(tb->FL[h],order_L)) / (DC_SIZE + KEY_SIZE);
+	      set_parameters (tb, h, -n-1, 0, 0, NULL, -1, -1);
+	      return CARRY_ON;
+	    }
+
+	  if ( tb->rnum[h] >= vn->vn_nr_item + 1 )
+	    {
+	      /* All contents of S[h] can be moved to R[h]. */
+	      int n;
+	      int order_R;
+	    
+	      order_R = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==B_NR_ITEMS(Fh)) ? 0 : n + 1;
+	      n = dc_size(B_N_CHILD(tb->FR[h],order_R)) / (DC_SIZE + KEY_SIZE);
+	      set_parameters (tb, h, 0, -n-1, 0, NULL, -1, -1);
+	      return CARRY_ON;   
+	    }
+	}
+
+      if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)
+	{
+	  /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
+	  int to_r;
+
+	  to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 - 
+	    (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+	  set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+	  return CARRY_ON;
+	}
+
+      /* Balancing does not lead to better packing. */
+      set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+      return NO_BALANCING_NEEDED;
+    }
+
+  /* Current node contain insufficient number of items. Balancing is required. */	
+  /* Check whether we can merge S[h] with left neighbor. */
+  if (tb->lnum[h] >= vn->vn_nr_item + 1)
+    if (is_left_neighbor_in_cache (tb,h) || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h])
+      {
+	int n;
+	int order_L;
+	      
+	order_L = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
+	n = dc_size(B_N_CHILD(tb->FL[h],order_L)) / (DC_SIZE + KEY_SIZE);
+	set_parameters (tb, h, -n-1, 0, 0, NULL, -1, -1);
+	return CARRY_ON;
+      }
+
+  /* Check whether we can merge S[h] with right neighbor. */
+  if (tb->rnum[h] >= vn->vn_nr_item + 1)
+    {
+      int n;
+      int order_R;
+	    
+      order_R = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==B_NR_ITEMS(Fh)) ? 0 : (n + 1);
+      n = dc_size(B_N_CHILD(tb->FR[h],order_R)) / (DC_SIZE + KEY_SIZE);
+      set_parameters (tb, h, 0, -n-1, 0, NULL, -1, -1);
+      return CARRY_ON;   
+    }
+
+  /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
+  if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)
+    {
+      int to_r;
+	    
+      to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 - 
+	(MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+      set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+      return CARRY_ON;
+    }
+
+  /* For internal nodes try to borrow item from a neighbor */
+  RFALSE( !tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
+
+  /* Borrow one or two items from caching neighbor */
+  if (is_left_neighbor_in_cache (tb,h) || !tb->FR[h])
+    {
+      int from_l;
+		
+      from_l = (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item + 1) / 2 -  (vn->vn_nr_item + 1);
+      set_parameters (tb, h, -from_l, 0, 1, NULL, -1, -1);
+      return CARRY_ON;
+    }
+
+  set_parameters (tb, h, 0, -((MAX_NR_KEY(Sh)+1-tb->rnum[h]+vn->vn_nr_item+1)/2-(vn->vn_nr_item+1)), 1, 
+		  NULL, -1, -1);
+  return CARRY_ON;
+}
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Truncating for LEAF node of S+tree.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *	tb	tree_balance structure;
+ *	h	current level of the node;
+ *	inum	item number in S[h];
+ *	mode	i - insert, p - paste;
+ * Returns:	1 - schedule occurred; 
+ *	        0 - balancing for higher levels needed;
+ *	       -1 - no balancing for higher levels needed;
+ *	       -2 - no disk space.
+ */
+static int dc_check_balance_leaf (struct tree_balance * tb, int h)
+{
+  struct virtual_node * vn = tb->tb_vn;
+
+  /* Number of bytes that must be deleted from
+     (value is negative if bytes are deleted) buffer which
+     contains node being balanced.  The mnemonic is that the
+     attempted change in node space used level is levbytes bytes. */
+  int levbytes;
+  /* the maximal item size */
+  int maxsize,
+      n_ret_value;
+  /* S0 is the node whose balance is currently being checked,
+     and F0 is its father.  */
+  struct buffer_head * S0, * F0;
+  int lfree, rfree /* free space in L and R */;
+
+  S0 = PATH_H_PBUFFER (tb->tb_path, 0);
+  F0 = PATH_H_PPARENT (tb->tb_path, 0);
+
+  levbytes = tb->insert_size[h];
+
+  maxsize = MAX_CHILD_SIZE(S0); 	/* maximal possible size of an item */
+
+  if ( ! F0 )
+    {  /* S[0] is the root now. */
+
+      RFALSE( -levbytes >= maxsize - B_FREE_SPACE (S0),
+	      "vs-8240: attempt to create empty buffer tree");
+
+      set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+      return NO_BALANCING_NEEDED;
+    }
+
+  if ( (n_ret_value = get_parents(tb,h)) != CARRY_ON )
+    return n_ret_value;
+
+  /* get free space of neighbors */
+  rfree = get_rfree (tb, h);
+  lfree = get_lfree (tb, h);		
+
+  create_virtual_node (tb, h);
+
+  /* if 3 leaves can be merge to one, set parameters and return */
+  if (are_leaves_removable (tb, lfree, rfree))
+    return CARRY_ON;
+
+  /* determine maximal number of items we can shift to the left/right  neighbor
+     and the maximal number of bytes that can flow to the left/right neighbor
+     from the left/right most liquid item that cannot be shifted from S[0] entirely
+     */
+  check_left (tb, h, lfree);
+  check_right (tb, h, rfree);   
+
+  /* check whether we can merge S with left neighbor. */
+  if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
+    if (is_left_neighbor_in_cache (tb,h) ||
+	((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
+	!tb->FR[h]) {
+      
+      RFALSE( !tb->FL[h], "vs-8245: dc_check_balance_leaf: FL[h] must exist");
+
+      /* set parameter to merge S[0] with its left neighbor */
+      set_parameters (tb, h, -1, 0, 0, NULL, -1, -1);
+      return CARRY_ON;
+    }
+
+  /* check whether we can merge S[0] with right neighbor. */
+  if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
+    set_parameters (tb, h, 0, -1, 0, NULL, -1, -1);
+    return CARRY_ON;
+  }
+  
+  /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
+  if (is_leaf_removable (tb))
+    return CARRY_ON;
+  
+  /* Balancing is not required. */
+  tb->s0num = vn->vn_nr_item;
+  set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+  return NO_BALANCING_NEEDED;
+}
+
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Cutting.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *	tb	tree_balance structure;
+ *	h	current level of the node;
+ *	inum	item number in S[h];
+ *	mode	d - delete, c - cut.
+ * Returns:	1 - schedule occurred; 
+ *	        0 - balancing for higher levels needed;
+ *	       -1 - no balancing for higher levels needed;
+ *	       -2 - no disk space.
+ */
+static int dc_check_balance (struct tree_balance * tb, int h)
+{
+ RFALSE( ! (PATH_H_PBUFFER (tb->tb_path, h)), "vs-8250: S is not initialized");
+
+ if ( h )
+   return dc_check_balance_internal (tb, h);
+ else
+   return dc_check_balance_leaf (tb, h);
+}
+
+
+
+/* Check whether current node S[h] is balanced.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *
+ *	tb	tree_balance structure:
+ *
+ *              tb is a large structure that must be read about in the header file
+ *              at the same time as this procedure if the reader is to successfully
+ *              understand this procedure
+ *
+ *	h	current level of the node;
+ *	inum	item number in S[h];
+ *	mode	i - insert, p - paste, d - delete, c - cut.
+ * Returns:	1 - schedule occurred; 
+ *	        0 - balancing for higher levels needed;
+ *	       -1 - no balancing for higher levels needed;
+ *	       -2 - no disk space.
+ */
+static int check_balance (int mode, 
+			  struct tree_balance * tb,
+			  int h, 
+			  int inum,
+			  int pos_in_item,
+			  struct item_head * ins_ih,
+			  const void * data
+			  )
+{
+  struct virtual_node * vn;
+
+  vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
+  vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
+  vn->vn_mode = mode;
+  vn->vn_affected_item_num = inum;
+  vn->vn_pos_in_item = pos_in_item;
+  vn->vn_ins_ih = ins_ih;
+  vn->vn_data = data;
+
+  RFALSE( mode == M_INSERT && !vn->vn_ins_ih,
+	  "vs-8255: ins_ih can not be 0 in insert mode");
+
+ if ( tb->insert_size[h] > 0 )
+   /* Calculate balance parameters when size of node is increasing. */
+   return ip_check_balance (tb, h);
+
+ /* Calculate balance parameters when  size of node is decreasing. */
+ return dc_check_balance (tb, h);
+}
+
+
+
+/* Check whether parent at the path is the really parent of the current node.*/
+static int  get_direct_parent(
+              struct tree_balance * p_s_tb,
+              int                   n_h
+            ) {
+    struct buffer_head  * p_s_bh;
+    struct path         * p_s_path      = p_s_tb->tb_path;
+    int                   n_position,
+	n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
+    
+    /* We are in the root or in the new root. */
+    if ( n_path_offset <= FIRST_PATH_ELEMENT_OFFSET ) {
+	
+	RFALSE( n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
+		"PAP-8260: invalid offset in the path");
+
+	if ( PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+	     SB_ROOT_BLOCK (p_s_tb->tb_sb) ) {
+	    /* Root is not changed. */
+	    PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL;
+	    PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0;
+	    return CARRY_ON;
+	}
+	return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
+    }
+
+    if ( ! B_IS_IN_TREE(p_s_bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1)) )
+	return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+
+    if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1)) > B_NR_ITEMS(p_s_bh) )
+	return REPEAT_SEARCH;
+    
+    if ( B_N_CHILD_NUM(p_s_bh, n_position) != PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr )
+	/* Parent in the path is not parent of the current node in the tree. */
+	return REPEAT_SEARCH;
+
+    if ( buffer_locked(p_s_bh) ) {
+	__wait_on_buffer(p_s_bh);
+	if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+	    return REPEAT_SEARCH;
+    }
+
+    return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node.  */
+}
+
+
+/* Using lnum[n_h] and rnum[n_h] we should determine what neighbors
+ * of S[n_h] we
+ * need in order to balance S[n_h], and get them if necessary.
+ * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ *	        CARRY_ON - schedule didn't occur while the function worked;
+ */
+static int  get_neighbors(
+	            struct tree_balance * p_s_tb,
+	            int 		  n_h
+	          ) {
+    int		 	n_child_position,
+	n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1);
+    unsigned long		n_son_number;
+    struct super_block  *	p_s_sb = p_s_tb->tb_sb;
+    struct buffer_head  * p_s_bh;
+
+
+    PROC_INFO_INC( p_s_sb, get_neighbors[ n_h ] );
+
+    if ( p_s_tb->lnum[n_h] ) {
+	/* We need left neighbor to balance S[n_h]. */
+	PROC_INFO_INC( p_s_sb, need_l_neighbor[ n_h ] );
+	p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+	
+	RFALSE( p_s_bh == p_s_tb->FL[n_h] && 
+		! PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset),
+		"PAP-8270: invalid position in the parent");
+
+	n_child_position = ( p_s_bh == p_s_tb->FL[n_h] ) ? p_s_tb->lkey[n_h] : B_NR_ITEMS (p_s_tb->FL[n_h]);
+	n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position);
+	p_s_bh = sb_bread(p_s_sb, n_son_number);
+	if (!p_s_bh)
+	    return IO_ERROR;
+	if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+	    decrement_bcount(p_s_bh);
+	    PROC_INFO_INC( p_s_sb, get_neighbors_restart[ n_h ] );
+	    return REPEAT_SEARCH;
+	}
+	
+	RFALSE( ! B_IS_IN_TREE(p_s_tb->FL[n_h]) ||
+                n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) ||
+	        B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) !=
+                p_s_bh->b_blocknr, "PAP-8275: invalid parent");
+	RFALSE( ! B_IS_IN_TREE(p_s_bh), "PAP-8280: invalid child");
+	RFALSE( ! n_h &&
+                B_FREE_SPACE (p_s_bh) != MAX_CHILD_SIZE (p_s_bh) - dc_size(B_N_CHILD (p_s_tb->FL[0],n_child_position)),
+                "PAP-8290: invalid child size of left neighbor");
+
+	decrement_bcount(p_s_tb->L[n_h]);
+	p_s_tb->L[n_h] = p_s_bh;
+    }
+
+
+    if ( p_s_tb->rnum[n_h] ) { /* We need right neighbor to balance S[n_path_offset]. */
+	PROC_INFO_INC( p_s_sb, need_r_neighbor[ n_h ] );
+	p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+	
+	RFALSE( p_s_bh == p_s_tb->FR[n_h] && 
+		PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset) >= B_NR_ITEMS(p_s_bh),
+		"PAP-8295: invalid position in the parent");
+
+	n_child_position = ( p_s_bh == p_s_tb->FR[n_h] ) ? p_s_tb->rkey[n_h] + 1 : 0;
+	n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position);
+	p_s_bh = sb_bread(p_s_sb, n_son_number);
+	if (!p_s_bh)
+	    return IO_ERROR;
+	if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+	    decrement_bcount(p_s_bh);
+	    PROC_INFO_INC( p_s_sb, get_neighbors_restart[ n_h ] );
+	    return REPEAT_SEARCH;
+	}
+	decrement_bcount(p_s_tb->R[n_h]);
+	p_s_tb->R[n_h] = p_s_bh;
+
+	RFALSE( ! n_h && B_FREE_SPACE (p_s_bh) != MAX_CHILD_SIZE (p_s_bh) - dc_size(B_N_CHILD (p_s_tb->FR[0],n_child_position)),
+                "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
+                B_FREE_SPACE (p_s_bh), MAX_CHILD_SIZE (p_s_bh),
+                dc_size(B_N_CHILD (p_s_tb->FR[0],n_child_position)));
+	
+    }
+    return CARRY_ON;
+}
+
+#ifdef CONFIG_REISERFS_CHECK
+void * reiserfs_kmalloc (size_t size, int flags, struct super_block * s)
+{
+    void * vp;
+    static size_t malloced;
+
+
+    vp = kmalloc (size, flags);
+    if (vp) {
+	REISERFS_SB(s)->s_kmallocs += size;
+	if (REISERFS_SB(s)->s_kmallocs > malloced + 200000) {
+	    reiserfs_warning (s,
+			      "vs-8301: reiserfs_kmalloc: allocated memory %d",
+			      REISERFS_SB(s)->s_kmallocs);
+	    malloced = REISERFS_SB(s)->s_kmallocs;
+	}
+    }
+    return vp;
+}
+
+void reiserfs_kfree (const void * vp, size_t size, struct super_block * s)
+{
+    kfree (vp);
+  
+    REISERFS_SB(s)->s_kmallocs -= size;
+    if (REISERFS_SB(s)->s_kmallocs < 0)
+	reiserfs_warning (s, "vs-8302: reiserfs_kfree: allocated memory %d",
+			  REISERFS_SB(s)->s_kmallocs);
+
+}
+#endif
+
+
+static int get_virtual_node_size (struct super_block * sb, struct buffer_head * bh)
+{
+    int max_num_of_items;
+    int max_num_of_entries;
+    unsigned long blocksize = sb->s_blocksize;
+
+#define MIN_NAME_LEN 1
+
+    max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN);
+    max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) / 
+                         (DEH_SIZE + MIN_NAME_LEN);
+
+    return sizeof(struct virtual_node) + 
+           max(max_num_of_items * sizeof (struct virtual_item),
+	       sizeof (struct virtual_item) + sizeof(struct direntry_uarea) + 
+               (max_num_of_entries - 1) * sizeof (__u16));
+}
+
+
+
+/* maybe we should fail balancing we are going to perform when kmalloc
+   fails several times. But now it will loop until kmalloc gets
+   required memory */
+static int get_mem_for_virtual_node (struct tree_balance * tb)
+{
+    int check_fs = 0;
+    int size;
+    char * buf;
+
+    size = get_virtual_node_size (tb->tb_sb, PATH_PLAST_BUFFER (tb->tb_path));
+
+    if (size > tb->vn_buf_size) {
+	/* we have to allocate more memory for virtual node */
+	if (tb->vn_buf) {
+	    /* free memory allocated before */
+	    reiserfs_kfree (tb->vn_buf, tb->vn_buf_size, tb->tb_sb);
+	    /* this is not needed if kfree is atomic */
+            check_fs = 1;
+	}
+
+	/* virtual node requires now more memory */
+	tb->vn_buf_size = size;
+
+	/* get memory for virtual item */
+	buf = reiserfs_kmalloc(size, GFP_ATOMIC | __GFP_NOWARN, tb->tb_sb);
+	if ( ! buf ) {
+	    /* getting memory with GFP_KERNEL priority may involve
+               balancing now (due to indirect_to_direct conversion on
+               dcache shrinking). So, release path and collected
+               resources here */
+	    free_buffers_in_tb (tb);
+	    buf = reiserfs_kmalloc(size, GFP_NOFS, tb->tb_sb);
+	    if ( !buf ) {
+#ifdef CONFIG_REISERFS_CHECK
+		reiserfs_warning (tb->tb_sb,
+				  "vs-8345: get_mem_for_virtual_node: "
+				  "kmalloc failed. reiserfs kmalloced %d bytes",
+				  REISERFS_SB(tb->tb_sb)->s_kmallocs);
+#endif
+		tb->vn_buf_size = 0;
+	    }
+	    tb->vn_buf = buf;
+	    schedule() ;
+	    return REPEAT_SEARCH;
+	}
+
+	tb->vn_buf = buf;
+    }
+
+    if ( check_fs && FILESYSTEM_CHANGED_TB (tb) )
+        return REPEAT_SEARCH;
+
+    return CARRY_ON;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+static void tb_buffer_sanity_check (struct super_block * p_s_sb,
+				    struct buffer_head * p_s_bh, 
+				    const char *descr, int level) {
+  if (p_s_bh) {
+    if (atomic_read (&(p_s_bh->b_count)) <= 0) {
+
+      reiserfs_panic (p_s_sb, "jmacd-1: tb_buffer_sanity_check(): negative or zero reference counter for buffer %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+
+    if ( ! buffer_uptodate (p_s_bh) ) {
+      reiserfs_panic (p_s_sb, "jmacd-2: tb_buffer_sanity_check(): buffer is not up to date %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+
+    if ( ! B_IS_IN_TREE (p_s_bh) ) {
+      reiserfs_panic (p_s_sb, "jmacd-3: tb_buffer_sanity_check(): buffer is not in tree %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+
+    if (p_s_bh->b_bdev != p_s_sb->s_bdev) {
+	reiserfs_panic (p_s_sb, "jmacd-4: tb_buffer_sanity_check(): buffer has wrong device %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+
+    if (p_s_bh->b_size != p_s_sb->s_blocksize) {
+	reiserfs_panic (p_s_sb, "jmacd-5: tb_buffer_sanity_check(): buffer has wrong blocksize %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+
+    if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+	reiserfs_panic (p_s_sb, "jmacd-6: tb_buffer_sanity_check(): buffer block number too high %s[%d] (%b)\n", descr, level, p_s_bh);
+    }
+  }
+}
+#else
+static void tb_buffer_sanity_check (struct super_block * p_s_sb,
+				    struct buffer_head * p_s_bh, 
+				    const char *descr, int level)
+{;}
+#endif
+
+static int clear_all_dirty_bits(struct super_block *s,
+                                 struct buffer_head *bh) {
+  return reiserfs_prepare_for_journal(s, bh, 0) ;
+}
+
+static int wait_tb_buffers_until_unlocked (struct tree_balance * p_s_tb)
+{
+    struct buffer_head * locked;
+#ifdef CONFIG_REISERFS_CHECK
+    int repeat_counter = 0;
+#endif
+    int i;
+
+    do {
+
+	locked = NULL;
+
+	for ( i = p_s_tb->tb_path->path_length; !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i-- ) {
+	    if ( PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i) ) {
+		/* if I understand correctly, we can only be sure the last buffer
+		** in the path is in the tree --clm
+		*/
+#ifdef CONFIG_REISERFS_CHECK
+		if (PATH_PLAST_BUFFER(p_s_tb->tb_path) ==
+		    PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, 
+					    PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i), 
+					    "S", 
+					    p_s_tb->tb_path->path_length - i);
+		}
+#endif
+		if (!clear_all_dirty_bits(p_s_tb->tb_sb,
+				     PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i)))
+		{
+		    locked = PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i);
+		}
+	    }
+	}
+
+	for ( i = 0; !locked && i < MAX_HEIGHT && p_s_tb->insert_size[i]; i++ ) { 
+
+	    if (p_s_tb->lnum[i] ) {
+
+		if ( p_s_tb->L[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->L[i], "L", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->L[i]))
+			locked = p_s_tb->L[i];
+		}
+
+		if ( !locked && p_s_tb->FL[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->FL[i], "FL", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FL[i]))
+			locked = p_s_tb->FL[i];
+		}
+
+		if ( !locked && p_s_tb->CFL[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->CFL[i], "CFL", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->CFL[i]))
+			locked = p_s_tb->CFL[i];
+		}
+
+	    }
+
+	    if ( !locked && (p_s_tb->rnum[i]) ) {
+
+		if ( p_s_tb->R[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->R[i], "R", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->R[i]))
+			locked = p_s_tb->R[i];
+		}
+
+       
+		if ( !locked && p_s_tb->FR[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->FR[i], "FR", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FR[i]))
+			locked = p_s_tb->FR[i];
+		}
+
+		if ( !locked && p_s_tb->CFR[i] ) {
+		    tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->CFR[i], "CFR", i);
+		    if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->CFR[i]))
+			locked = p_s_tb->CFR[i];
+		}
+	    }
+	}
+	/* as far as I can tell, this is not required.  The FEB list seems
+	** to be full of newly allocated nodes, which will never be locked,
+	** dirty, or anything else.
+	** To be safe, I'm putting in the checks and waits in.  For the moment,
+	** they are needed to keep the code in journal.c from complaining
+	** about the buffer.  That code is inside CONFIG_REISERFS_CHECK as well.
+	** --clm
+	*/
+	for ( i = 0; !locked && i < MAX_FEB_SIZE; i++ ) { 
+	    if ( p_s_tb->FEB[i] ) {
+		if (!clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FEB[i]))
+		    locked = p_s_tb->FEB[i] ;
+	    }
+	}
+
+	if (locked) {
+#ifdef CONFIG_REISERFS_CHECK
+	    repeat_counter++;
+	    if ( (repeat_counter % 10000) == 0) {
+		reiserfs_warning (p_s_tb->tb_sb,
+				  "wait_tb_buffers_until_released(): too many "
+				  "iterations waiting for buffer to unlock "
+				  "(%b)", locked);
+
+		/* Don't loop forever.  Try to recover from possible error. */
+
+		return ( FILESYSTEM_CHANGED_TB (p_s_tb) ) ? REPEAT_SEARCH : CARRY_ON;
+	    }
+#endif
+	    __wait_on_buffer (locked);
+	    if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+		return REPEAT_SEARCH;
+	    }
+	}
+
+    } while (locked);
+
+    return CARRY_ON;
+}
+
+
+/* Prepare for balancing, that is
+ *	get all necessary parents, and neighbors;
+ *	analyze what and where should be moved;
+ *	get sufficient number of new nodes;
+ * Balancing will start only after all resources will be collected at a time.
+ * 
+ * When ported to SMP kernels, only at the last moment after all needed nodes
+ * are collected in cache, will the resources be locked using the usual
+ * textbook ordered lock acquisition algorithms.  Note that ensuring that
+ * this code neither write locks what it does not need to write lock nor locks out of order
+ * will be a pain in the butt that could have been avoided.  Grumble grumble. -Hans
+ * 
+ * fix is meant in the sense of render unchanging
+ * 
+ * Latency might be improved by first gathering a list of what buffers are needed
+ * and then getting as many of them in parallel as possible? -Hans
+ *
+ * Parameters:
+ *	op_mode	i - insert, d - delete, c - cut (truncate), p - paste (append)
+ *	tb	tree_balance structure;
+ *	inum	item number in S[h];
+ *      pos_in_item - comment this if you can
+ *      ins_ih & ins_sd are used when inserting
+ * Returns:	1 - schedule occurred while the function worked;
+ *	        0 - schedule didn't occur while the function worked;
+ *             -1 - if no_disk_space 
+ */
+
+
+int fix_nodes (int n_op_mode,
+	       struct tree_balance * 	p_s_tb,
+	       struct item_head * p_s_ins_ih, // item head of item being inserted
+	       const void * data // inserted item or data to be pasted
+    ) {
+    int	n_ret_value,
+    	n_h,
+    	n_item_num = PATH_LAST_POSITION(p_s_tb->tb_path);
+    int n_pos_in_item;
+
+    /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
+    ** during wait_tb_buffers_run
+    */
+    int wait_tb_buffers_run = 0 ; 
+    struct buffer_head  * p_s_tbS0 = PATH_PLAST_BUFFER(p_s_tb->tb_path);
+
+    ++ REISERFS_SB(p_s_tb -> tb_sb) -> s_fix_nodes;
+
+    n_pos_in_item = p_s_tb->tb_path->pos_in_item;
+
+
+    p_s_tb->fs_gen = get_generation (p_s_tb->tb_sb);
+
+    /* we prepare and log the super here so it will already be in the
+    ** transaction when do_balance needs to change it.
+    ** This way do_balance won't have to schedule when trying to prepare
+    ** the super for logging
+    */
+    reiserfs_prepare_for_journal(p_s_tb->tb_sb, 
+                                 SB_BUFFER_WITH_SB(p_s_tb->tb_sb), 1) ;
+    journal_mark_dirty(p_s_tb->transaction_handle, p_s_tb->tb_sb, 
+                       SB_BUFFER_WITH_SB(p_s_tb->tb_sb)) ;
+    if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+	return REPEAT_SEARCH;
+
+    /* if it possible in indirect_to_direct conversion */
+    if (buffer_locked (p_s_tbS0)) {
+        __wait_on_buffer (p_s_tbS0);
+        if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+            return REPEAT_SEARCH;
+    }
+
+#ifdef CONFIG_REISERFS_CHECK
+    if ( cur_tb ) {
+	print_cur_tb ("fix_nodes");
+	reiserfs_panic(p_s_tb->tb_sb,"PAP-8305: fix_nodes:  there is pending do_balance");
+    }
+
+    if (!buffer_uptodate (p_s_tbS0) || !B_IS_IN_TREE (p_s_tbS0)) {
+	reiserfs_panic (p_s_tb->tb_sb, "PAP-8320: fix_nodes: S[0] (%b %z) is not uptodate "
+			"at the beginning of fix_nodes or not in tree (mode %c)", p_s_tbS0, p_s_tbS0, n_op_mode);
+    }
+
+    /* Check parameters. */
+    switch (n_op_mode) {
+    case M_INSERT:
+	if ( n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0) )
+	    reiserfs_panic(p_s_tb->tb_sb,"PAP-8330: fix_nodes: Incorrect item number %d (in S0 - %d) in case of insert",
+			   n_item_num, B_NR_ITEMS(p_s_tbS0));
+	break;
+    case M_PASTE:
+    case M_DELETE:
+    case M_CUT:
+	if ( n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0) ) {
+	    print_block (p_s_tbS0, 0, -1, -1);
+	    reiserfs_panic(p_s_tb->tb_sb,"PAP-8335: fix_nodes: Incorrect item number(%d); mode = %c insert_size = %d\n", n_item_num, n_op_mode, p_s_tb->insert_size[0]);
+	}
+	break;
+    default:
+	reiserfs_panic(p_s_tb->tb_sb,"PAP-8340: fix_nodes: Incorrect mode of operation");
+    }
+#endif
+
+    if (get_mem_for_virtual_node (p_s_tb) == REPEAT_SEARCH)
+	// FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+	return REPEAT_SEARCH;
+
+
+    /* Starting from the leaf level; for all levels n_h of the tree. */
+    for ( n_h = 0; n_h < MAX_HEIGHT && p_s_tb->insert_size[n_h]; n_h++ ) { 
+	if ( (n_ret_value = get_direct_parent(p_s_tb, n_h)) != CARRY_ON ) {
+	    goto repeat;
+	}
+
+	if ( (n_ret_value = check_balance (n_op_mode, p_s_tb, n_h, n_item_num,
+					   n_pos_in_item, p_s_ins_ih, data)) != CARRY_ON ) {
+	    if ( n_ret_value == NO_BALANCING_NEEDED ) {
+		/* No balancing for higher levels needed. */
+		if ( (n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON ) {
+		    goto repeat;
+		}
+		if ( n_h != MAX_HEIGHT - 1 )  
+		    p_s_tb->insert_size[n_h + 1] = 0;
+		/* ok, analysis and resource gathering are complete */
+		break;
+	    }
+	    goto repeat;
+	}
+
+	if ( (n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON ) {
+	    goto repeat;
+	}
+
+	if ( (n_ret_value = get_empty_nodes(p_s_tb, n_h)) != CARRY_ON ) {
+	    goto repeat;        /* No disk space, or schedule occurred and
+				   analysis may be invalid and needs to be redone. */
+	}
+    
+	if ( ! PATH_H_PBUFFER(p_s_tb->tb_path, n_h) ) {
+	    /* We have a positive insert size but no nodes exist on this
+	       level, this means that we are creating a new root. */
+
+	    RFALSE( p_s_tb->blknum[n_h] != 1,
+		    "PAP-8350: creating new empty root");
+
+	    if ( n_h < MAX_HEIGHT - 1 )
+		p_s_tb->insert_size[n_h + 1] = 0;
+	}
+	else
+	    if ( ! PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1) ) {
+		if ( p_s_tb->blknum[n_h] > 1 ) {
+		    /* The tree needs to be grown, so this node S[n_h]
+		       which is the root node is split into two nodes,
+		       and a new node (S[n_h+1]) will be created to
+		       become the root node.  */
+	  
+		    RFALSE( n_h == MAX_HEIGHT - 1,
+			    "PAP-8355: attempt to create too high of a tree");
+
+		    p_s_tb->insert_size[n_h + 1] = (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1) + DC_SIZE;
+		}
+		else
+		    if ( n_h < MAX_HEIGHT - 1 )
+			p_s_tb->insert_size[n_h + 1] = 0;
+	    }
+	    else
+		p_s_tb->insert_size[n_h + 1] = (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1);
+    }
+
+    if ((n_ret_value = wait_tb_buffers_until_unlocked (p_s_tb)) == CARRY_ON) {
+	if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+	    wait_tb_buffers_run = 1 ;
+	    n_ret_value = REPEAT_SEARCH ;
+	    goto repeat; 
+	} else {
+	    return CARRY_ON;
+	}
+    } else {
+	wait_tb_buffers_run = 1 ;
+	goto repeat; 
+    }
+
+ repeat:
+    // fix_nodes was unable to perform its calculation due to
+    // filesystem got changed under us, lack of free disk space or i/o
+    // failure. If the first is the case - the search will be
+    // repeated. For now - free all resources acquired so far except
+    // for the new allocated nodes
+    {
+	int i;
+
+	/* Release path buffers. */
+	if (wait_tb_buffers_run) {
+	    pathrelse_and_restore(p_s_tb->tb_sb, p_s_tb->tb_path) ;
+	} else {
+	    pathrelse (p_s_tb->tb_path);
+        }	
+	/* brelse all resources collected for balancing */
+	for ( i = 0; i < MAX_HEIGHT; i++ ) {
+	    if (wait_tb_buffers_run) {
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->L[i]);
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->R[i]);
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->FL[i]);
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->FR[i]);
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->CFL[i]);
+		reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->CFR[i]);
+	    }
+
+	    brelse (p_s_tb->L[i]);p_s_tb->L[i] = NULL;
+	    brelse (p_s_tb->R[i]);p_s_tb->R[i] = NULL;
+	    brelse (p_s_tb->FL[i]);p_s_tb->FL[i] = NULL;
+	    brelse (p_s_tb->FR[i]);p_s_tb->FR[i] = NULL;
+	    brelse (p_s_tb->CFL[i]);p_s_tb->CFL[i] = NULL;
+	    brelse (p_s_tb->CFR[i]);p_s_tb->CFR[i] = NULL;
+	}
+
+	if (wait_tb_buffers_run) {
+	    for ( i = 0; i < MAX_FEB_SIZE; i++ ) { 
+		if ( p_s_tb->FEB[i] ) {
+		    reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, 
+						     p_s_tb->FEB[i]) ;
+		}
+	    }
+	}
+	return n_ret_value;
+    }
+
+}
+
+
+/* Anatoly will probably forgive me renaming p_s_tb to tb. I just
+   wanted to make lines shorter */
+void unfix_nodes (struct tree_balance * tb)
+{
+    int	i;
+
+    /* Release path buffers. */
+    pathrelse_and_restore (tb->tb_sb, tb->tb_path);
+
+    /* brelse all resources collected for balancing */
+    for ( i = 0; i < MAX_HEIGHT; i++ ) {
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->L[i]);
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->R[i]);
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->FL[i]);
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->FR[i]);
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->CFL[i]);
+	reiserfs_restore_prepared_buffer (tb->tb_sb, tb->CFR[i]);
+
+	brelse (tb->L[i]);
+	brelse (tb->R[i]);
+	brelse (tb->FL[i]);
+	brelse (tb->FR[i]);
+	brelse (tb->CFL[i]);
+	brelse (tb->CFR[i]);
+    }
+
+    /* deal with list of allocated (used and unused) nodes */
+    for ( i = 0; i < MAX_FEB_SIZE; i++ ) {
+	if ( tb->FEB[i] ) {
+	    b_blocknr_t blocknr  = tb->FEB[i]->b_blocknr ;
+	    /* de-allocated block which was not used by balancing and
+               bforget about buffer for it */
+	    brelse (tb->FEB[i]);
+	    reiserfs_free_block (tb->transaction_handle, NULL, blocknr, 0);
+	}
+	if (tb->used[i]) {
+	    /* release used as new nodes including a new root */
+	    brelse (tb->used[i]);
+	}
+    }
+
+    if (tb->vn_buf) 
+    reiserfs_kfree (tb->vn_buf, tb->vn_buf_size, tb->tb_sb);
+
+} 
+
+
+
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
new file mode 100644
index 0000000..08d0508
--- /dev/null
+++ b/fs/reiserfs/hashes.c
@@ -0,0 +1,209 @@
+
+/*
+ * Keyed 32-bit hash function using TEA in a Davis-Meyer function
+ *   H0 = Key
+ *   Hi = E Mi(Hi-1) + Hi-1
+ *
+ * (see Applied Cryptography, 2nd edition, p448).
+ *
+ * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
+ * 
+ * Jeremy has agreed to the contents of reiserfs/README. -Hans
+ * Yura's function is added (04/07/2000)
+ */
+
+//
+// keyed_hash
+// yura_hash
+// r5_hash
+//
+
+#include <linux/kernel.h>
+#include <asm/types.h>
+#include <asm/bug.h>
+
+
+#define DELTA 0x9E3779B9
+#define FULLROUNDS 10		/* 32 is overkill, 16 is strong crypto */
+#define PARTROUNDS 6		/* 6 gets complete mixing */
+
+/* a, b, c, d - data; h0, h1 - accumulated hash */
+#define TEACORE(rounds)							\
+	do {								\
+		u32 sum = 0;						\
+		int n = rounds;						\
+		u32 b0, b1;						\
+									\
+		b0 = h0;						\
+		b1 = h1;						\
+									\
+		do							\
+		{							\
+			sum += DELTA;					\
+			b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);	\
+			b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);	\
+		} while(--n);						\
+									\
+		h0 += b0;						\
+		h1 += b1;						\
+	} while(0)
+
+
+u32 keyed_hash(const signed char *msg, int len)
+{
+	u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3}; 
+
+	u32 h0 = k[0], h1 = k[1];
+	u32 a, b, c, d;
+	u32 pad;
+	int i;
+ 
+	//	assert(len >= 0 && len < 256);
+
+	pad = (u32)len | ((u32)len << 8);
+	pad |= pad << 16;
+
+	while(len >= 16)
+	{
+		a = (u32)msg[ 0]      |
+		    (u32)msg[ 1] << 8 |
+		    (u32)msg[ 2] << 16|
+		    (u32)msg[ 3] << 24;
+		b = (u32)msg[ 4]      |
+		    (u32)msg[ 5] << 8 |
+		    (u32)msg[ 6] << 16|
+		    (u32)msg[ 7] << 24;
+		c = (u32)msg[ 8]      |
+		    (u32)msg[ 9] << 8 |
+		    (u32)msg[10] << 16|
+		    (u32)msg[11] << 24;
+		d = (u32)msg[12]      |
+		    (u32)msg[13] << 8 |
+		    (u32)msg[14] << 16|
+		    (u32)msg[15] << 24;
+		
+		TEACORE(PARTROUNDS);
+
+		len -= 16;
+		msg += 16;
+	}
+
+	if (len >= 12)
+	{
+		a = (u32)msg[ 0]      |
+		    (u32)msg[ 1] << 8 |
+		    (u32)msg[ 2] << 16|
+		    (u32)msg[ 3] << 24;
+		b = (u32)msg[ 4]      |
+		    (u32)msg[ 5] << 8 |
+		    (u32)msg[ 6] << 16|
+		    (u32)msg[ 7] << 24;
+		c = (u32)msg[ 8]      |
+		    (u32)msg[ 9] << 8 |
+		    (u32)msg[10] << 16|
+		    (u32)msg[11] << 24;
+
+		d = pad;
+		for(i = 12; i < len; i++)
+		{
+			d <<= 8;
+			d |= msg[i];
+		}
+	}
+	else if (len >= 8)
+	{
+		a = (u32)msg[ 0]      |
+		    (u32)msg[ 1] << 8 |
+		    (u32)msg[ 2] << 16|
+		    (u32)msg[ 3] << 24;
+		b = (u32)msg[ 4]      |
+		    (u32)msg[ 5] << 8 |
+		    (u32)msg[ 6] << 16|
+		    (u32)msg[ 7] << 24;
+
+		c = d = pad;
+		for(i = 8; i < len; i++)
+		{
+			c <<= 8;
+			c |= msg[i];
+		}
+	}
+	else if (len >= 4)
+	{
+		a = (u32)msg[ 0]      |
+		    (u32)msg[ 1] << 8 |
+		    (u32)msg[ 2] << 16|
+		    (u32)msg[ 3] << 24;
+
+		b = c = d = pad;
+		for(i = 4; i < len; i++)
+		{
+			b <<= 8;
+			b |= msg[i];
+		}
+	}
+	else
+	{
+		a = b = c = d = pad;
+		for(i = 0; i < len; i++)
+		{
+			a <<= 8;
+			a |= msg[i];
+		}
+	}
+
+	TEACORE(FULLROUNDS);
+
+/*	return 0;*/
+	return h0^h1;
+}
+
+/* What follows in this file is copyright 2000 by Hans Reiser, and the
+ * licensing of what follows is governed by reiserfs/README */
+
+u32 yura_hash (const signed char *msg, int len)
+{
+    int j, pow;
+    u32 a, c;
+    int i;
+    
+    for (pow=1,i=1; i < len; i++) pow = pow * 10; 
+    
+    if (len == 1) 
+	a = msg[0]-48;
+    else
+	a = (msg[0] - 48) * pow;
+    
+    for (i=1; i < len; i++) {
+	c = msg[i] - 48; 
+	for (pow=1,j=i; j < len-1; j++) pow = pow * 10; 
+	a = a + c * pow;
+    }
+    
+    for (; i < 40; i++) {
+	c = '0' - 48; 
+	for (pow=1,j=i; j < len-1; j++) pow = pow * 10; 
+	a = a + c * pow;
+    }
+    
+    for (; i < 256; i++) {
+	c = i; 
+	for (pow=1,j=i; j < len-1; j++) pow = pow * 10; 
+	a = a + c * pow;
+    }
+    
+    a = a << 7;
+    return a;
+}
+
+u32 r5_hash (const signed char *msg, int len)
+{
+  u32 a=0;
+  while(*msg) { 
+    a += *msg << 4;
+    a += *msg >> 4;
+    a *= 11;
+    msg++;
+   } 
+  return a;
+}
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
new file mode 100644
index 0000000..a362125
--- /dev/null
+++ b/fs/reiserfs/ibalance.c
@@ -0,0 +1,1058 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/buffer_head.h>
+
+/* this is one and only function that is used outside (do_balance.c) */
+int	balance_internal (
+			  struct tree_balance * ,
+			  int,
+			  int,
+			  struct item_head * ,
+			  struct buffer_head ** 
+			  );
+
+/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */
+#define INTERNAL_SHIFT_FROM_S_TO_L 0
+#define INTERNAL_SHIFT_FROM_R_TO_S 1
+#define INTERNAL_SHIFT_FROM_L_TO_S 2
+#define INTERNAL_SHIFT_FROM_S_TO_R 3
+#define INTERNAL_INSERT_TO_S 4
+#define INTERNAL_INSERT_TO_L 5
+#define INTERNAL_INSERT_TO_R 6
+
+static void	internal_define_dest_src_infos (
+						int shift_mode,
+						struct tree_balance * tb,
+						int h,
+						struct buffer_info * dest_bi,
+						struct buffer_info * src_bi,
+						int * d_key,
+						struct buffer_head ** cf
+						)
+{
+    memset (dest_bi, 0, sizeof (struct buffer_info));
+    memset (src_bi, 0, sizeof (struct buffer_info));
+    /* define dest, src, dest parent, dest position */
+    switch (shift_mode) {
+    case INTERNAL_SHIFT_FROM_S_TO_L:	/* used in internal_shift_left */
+	src_bi->tb = tb;
+	src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+	src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->L[h];
+	dest_bi->bi_parent = tb->FL[h];
+	dest_bi->bi_position = get_left_neighbor_position (tb, h);
+	*d_key = tb->lkey[h];
+	*cf = tb->CFL[h];
+	break;
+    case INTERNAL_SHIFT_FROM_L_TO_S:
+	src_bi->tb = tb;
+	src_bi->bi_bh = tb->L[h];
+	src_bi->bi_parent = tb->FL[h];
+	src_bi->bi_position = get_left_neighbor_position (tb, h);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+	dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */
+	*d_key = tb->lkey[h];
+	*cf = tb->CFL[h];
+	break;
+      
+    case INTERNAL_SHIFT_FROM_R_TO_S:	/* used in internal_shift_left */
+	src_bi->tb = tb;
+	src_bi->bi_bh = tb->R[h];
+	src_bi->bi_parent = tb->FR[h];
+	src_bi->bi_position = get_right_neighbor_position (tb, h);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+	dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+	*d_key = tb->rkey[h];
+	*cf = tb->CFR[h];
+	break;
+
+    case INTERNAL_SHIFT_FROM_S_TO_R:
+	src_bi->tb = tb;
+	src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+	src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->R[h];
+	dest_bi->bi_parent = tb->FR[h];
+	dest_bi->bi_position = get_right_neighbor_position (tb, h);
+	*d_key = tb->rkey[h];
+	*cf = tb->CFR[h];
+	break;
+
+    case INTERNAL_INSERT_TO_L:
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->L[h];
+	dest_bi->bi_parent = tb->FL[h];
+	dest_bi->bi_position = get_left_neighbor_position (tb, h);
+	break;
+	
+    case INTERNAL_INSERT_TO_S:
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+	dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+	break;
+
+    case INTERNAL_INSERT_TO_R:
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->R[h];
+	dest_bi->bi_parent = tb->FR[h];
+	dest_bi->bi_position = get_right_neighbor_position (tb, h);
+	break;
+
+    default:
+	reiserfs_panic (tb->tb_sb, "internal_define_dest_src_infos: shift type is unknown (%d)", shift_mode);
+    }
+}
+
+
+
+/* Insert count node pointers into buffer cur before position to + 1.
+ * Insert count items into buffer cur before position to.
+ * Items and node pointers are specified by inserted and bh respectively.
+ */ 
+static void internal_insert_childs (struct buffer_info * cur_bi,
+				    int to, int count,
+				    struct item_head * inserted,
+				    struct buffer_head ** bh
+    )
+{
+    struct buffer_head * cur = cur_bi->bi_bh;
+    struct block_head * blkh;
+    int nr;
+    struct reiserfs_key * ih;
+    struct disk_child new_dc[2];
+    struct disk_child * dc;
+    int i;
+
+    if (count <= 0)
+	return;
+
+    blkh = B_BLK_HEAD(cur);
+    nr = blkh_nr_item(blkh);
+
+    RFALSE( count > 2,
+	    "too many children (%d) are to be inserted", count);
+    RFALSE( B_FREE_SPACE (cur) < count * (KEY_SIZE + DC_SIZE),
+	    "no enough free space (%d), needed %d bytes", 
+	    B_FREE_SPACE (cur), count * (KEY_SIZE + DC_SIZE));
+
+    /* prepare space for count disk_child */
+    dc = B_N_CHILD(cur,to+1);
+
+    memmove (dc + count, dc, (nr+1-(to+1)) * DC_SIZE);
+
+    /* copy to_be_insert disk children */
+    for (i = 0; i < count; i ++) {
+	put_dc_size( &(new_dc[i]), MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i]));
+	put_dc_block_number( &(new_dc[i]), bh[i]->b_blocknr );
+    }
+    memcpy (dc, new_dc, DC_SIZE * count);
+
+  
+    /* prepare space for count items  */
+    ih = B_N_PDELIM_KEY (cur, ((to == -1) ? 0 : to));
+
+    memmove (ih + count, ih, (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE);
+
+    /* copy item headers (keys) */
+    memcpy (ih, inserted, KEY_SIZE);
+    if ( count > 1 )
+	memcpy (ih + 1, inserted + 1, KEY_SIZE);
+
+    /* sizes, item number */
+    set_blkh_nr_item( blkh, blkh_nr_item(blkh) + count );
+    set_blkh_free_space( blkh,
+                        blkh_free_space(blkh) - count * (DC_SIZE + KEY_SIZE ) );
+
+    do_balance_mark_internal_dirty (cur_bi->tb, cur,0);
+
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+    check_internal (cur);
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+    if (cur_bi->bi_parent) {
+	struct disk_child *t_dc = B_N_CHILD (cur_bi->bi_parent,cur_bi->bi_position);
+	put_dc_size( t_dc, dc_size(t_dc) + (count * (DC_SIZE + KEY_SIZE)));
+	do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0);
+
+	/*&&&&&&&&&&&&&&&&&&&&&&&&*/
+	check_internal (cur_bi->bi_parent);
+	/*&&&&&&&&&&&&&&&&&&&&&&&&*/   
+    }
+
+}
+
+
+/* Delete del_num items and node pointers from buffer cur starting from *
+ * the first_i'th item and first_p'th pointers respectively.		*/
+static void	internal_delete_pointers_items (
+						struct buffer_info * cur_bi,
+						int first_p, 
+						int first_i, 
+						int del_num
+						)
+{
+  struct buffer_head * cur = cur_bi->bi_bh;
+  int nr;
+  struct block_head * blkh;
+  struct reiserfs_key * key;
+  struct disk_child * dc;
+
+  RFALSE( cur == NULL, "buffer is 0");
+  RFALSE( del_num < 0,
+          "negative number of items (%d) can not be deleted", del_num);
+  RFALSE( first_p < 0 || first_p + del_num > B_NR_ITEMS (cur) + 1 || first_i < 0,
+          "first pointer order (%d) < 0 or "
+          "no so many pointers (%d), only (%d) or "
+          "first key order %d < 0", first_p, 
+          first_p + del_num, B_NR_ITEMS (cur) + 1, first_i);
+  if ( del_num == 0 )
+    return;
+
+  blkh = B_BLK_HEAD(cur);
+  nr = blkh_nr_item(blkh);
+
+  if ( first_p == 0 && del_num == nr + 1 ) {
+    RFALSE( first_i != 0, "1st deleted key must have order 0, not %d", first_i);
+    make_empty_node (cur_bi);
+    return;
+  }
+
+  RFALSE( first_i + del_num > B_NR_ITEMS (cur),
+          "first_i = %d del_num = %d "
+          "no so many keys (%d) in the node (%b)(%z)",
+          first_i, del_num, first_i + del_num, cur, cur);
+
+
+  /* deleting */
+  dc = B_N_CHILD (cur, first_p);
+
+  memmove (dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE);
+  key = B_N_PDELIM_KEY (cur, first_i);
+  memmove (key, key + del_num, (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - del_num) * DC_SIZE);
+
+
+  /* sizes, item number */
+  set_blkh_nr_item( blkh, blkh_nr_item(blkh) - del_num );
+  set_blkh_free_space( blkh,
+                    blkh_free_space(blkh) + (del_num * (KEY_SIZE + DC_SIZE) ) );
+
+  do_balance_mark_internal_dirty (cur_bi->tb, cur, 0);
+  /*&&&&&&&&&&&&&&&&&&&&&&&*/
+  check_internal (cur);
+  /*&&&&&&&&&&&&&&&&&&&&&&&*/
+ 
+  if (cur_bi->bi_parent) {
+    struct disk_child *t_dc;
+    t_dc = B_N_CHILD (cur_bi->bi_parent, cur_bi->bi_position);
+    put_dc_size( t_dc, dc_size(t_dc) - (del_num * (KEY_SIZE + DC_SIZE) ) );
+
+    do_balance_mark_internal_dirty (cur_bi->tb, cur_bi->bi_parent,0);
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+    check_internal (cur_bi->bi_parent);
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/   
+  }
+}
+
+
+/* delete n node pointers and items starting from given position */
+static void  internal_delete_childs (struct buffer_info * cur_bi, 
+				     int from, int n)
+{
+  int i_from;
+
+  i_from = (from == 0) ? from : from - 1;
+
+  /* delete n pointers starting from `from' position in CUR;
+     delete n keys starting from 'i_from' position in CUR;
+     */
+  internal_delete_pointers_items (cur_bi, from, i_from, n);
+}
+
+
+/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
+* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest 
+ */
+static void internal_copy_pointers_items (
+					  struct buffer_info * dest_bi,
+					  struct buffer_head * src,
+					  int last_first, int cpy_num
+					  )
+{
+  /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST *
+   * as delimiting key have already inserted to buffer dest.*/
+  struct buffer_head * dest = dest_bi->bi_bh;
+  int nr_dest, nr_src;
+  int dest_order, src_order;
+  struct block_head * blkh;
+  struct reiserfs_key * key;
+  struct disk_child * dc;
+
+  nr_src = B_NR_ITEMS (src);
+
+  RFALSE( dest == NULL || src == NULL, 
+	  "src (%p) or dest (%p) buffer is 0", src, dest);
+  RFALSE( last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST,
+	  "invalid last_first parameter (%d)", last_first);
+  RFALSE( nr_src < cpy_num - 1, 
+	  "no so many items (%d) in src (%d)", cpy_num, nr_src);
+  RFALSE( cpy_num < 0, "cpy_num less than 0 (%d)", cpy_num);
+  RFALSE( cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest),
+	  "cpy_num (%d) + item number in dest (%d) can not be > MAX_NR_KEY(%d)",
+	  cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest));
+
+  if ( cpy_num == 0 )
+    return;
+
+	/* coping */
+  blkh = B_BLK_HEAD(dest);
+  nr_dest = blkh_nr_item(blkh);
+
+  /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest;*/
+  /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0;*/
+  (last_first == LAST_TO_FIRST) ?	(dest_order = 0, src_order = nr_src - cpy_num + 1) :
+    (dest_order = nr_dest, src_order = 0);
+
+  /* prepare space for cpy_num pointers */
+  dc = B_N_CHILD (dest, dest_order);
+
+  memmove (dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE);
+
+	/* insert pointers */
+  memcpy (dc, B_N_CHILD (src, src_order), DC_SIZE * cpy_num);
+
+
+  /* prepare space for cpy_num - 1 item headers */
+  key = B_N_PDELIM_KEY(dest, dest_order);
+  memmove (key + cpy_num - 1, key,
+	   KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest + cpy_num));
+
+
+  /* insert headers */
+  memcpy (key, B_N_PDELIM_KEY (src, src_order), KEY_SIZE * (cpy_num - 1));
+
+  /* sizes, item number */
+  set_blkh_nr_item( blkh, blkh_nr_item(blkh) + (cpy_num - 1 ) );
+  set_blkh_free_space( blkh,
+      blkh_free_space(blkh) - (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num ) );
+
+  do_balance_mark_internal_dirty (dest_bi->tb, dest, 0);
+
+  /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+  check_internal (dest);
+  /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+  if (dest_bi->bi_parent) {
+    struct disk_child *t_dc;
+    t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position);
+    put_dc_size( t_dc, dc_size(t_dc) + (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num) );
+
+    do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0);
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+    check_internal (dest_bi->bi_parent);
+    /*&&&&&&&&&&&&&&&&&&&&&&&&*/   
+  }
+
+}
+
+
+/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest.
+ * Delete cpy_num - del_par items and node pointers from buffer src.
+ * last_first == FIRST_TO_LAST means, that we copy/delete first items from src.
+ * last_first == LAST_TO_FIRST means, that we copy/delete last items from src.
+ */
+static void internal_move_pointers_items (struct buffer_info * dest_bi, 
+					  struct buffer_info * src_bi, 
+					  int last_first, int cpy_num, int del_par)
+{
+    int first_pointer;
+    int first_item;
+    
+    internal_copy_pointers_items (dest_bi, src_bi->bi_bh, last_first, cpy_num);
+
+    if (last_first == FIRST_TO_LAST) {	/* shift_left occurs */
+	first_pointer = 0;
+	first_item = 0;
+	/* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, 
+	   for key - with first_item */
+	internal_delete_pointers_items (src_bi, first_pointer, first_item, cpy_num - del_par);
+    } else {			/* shift_right occurs */
+	int i, j;
+
+	i = ( cpy_num - del_par == ( j = B_NR_ITEMS(src_bi->bi_bh)) + 1 ) ? 0 : j - cpy_num + del_par;
+
+	internal_delete_pointers_items (src_bi, j + 1 - cpy_num + del_par, i, cpy_num - del_par);
+    }
+}
+
+/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */
+static void internal_insert_key (struct buffer_info * dest_bi, 
+				 int dest_position_before,                 /* insert key before key with n_dest number */
+				 struct buffer_head * src, 
+				 int src_position)
+{
+    struct buffer_head * dest = dest_bi->bi_bh;
+    int nr;
+    struct block_head * blkh;
+    struct reiserfs_key * key;
+
+    RFALSE( dest == NULL || src == NULL,
+	    "source(%p) or dest(%p) buffer is 0", src, dest);
+    RFALSE( dest_position_before < 0 || src_position < 0,
+	    "source(%d) or dest(%d) key number less than 0", 
+	    src_position, dest_position_before);
+    RFALSE( dest_position_before > B_NR_ITEMS (dest) || 
+	    src_position >= B_NR_ITEMS(src),
+	    "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))",
+	    dest_position_before, B_NR_ITEMS (dest), 
+	    src_position, B_NR_ITEMS(src));
+    RFALSE( B_FREE_SPACE (dest) < KEY_SIZE,
+	    "no enough free space (%d) in dest buffer", B_FREE_SPACE (dest));
+
+    blkh = B_BLK_HEAD(dest);
+    nr = blkh_nr_item(blkh);
+
+    /* prepare space for inserting key */
+    key = B_N_PDELIM_KEY (dest, dest_position_before);
+    memmove (key + 1, key, (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE);
+
+    /* insert key */
+    memcpy (key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE);
+
+    /* Change dirt, free space, item number fields. */
+
+    set_blkh_nr_item( blkh, blkh_nr_item(blkh) + 1 );
+    set_blkh_free_space( blkh, blkh_free_space(blkh) - KEY_SIZE );
+
+    do_balance_mark_internal_dirty (dest_bi->tb, dest, 0);
+
+    if (dest_bi->bi_parent) {
+	struct disk_child *t_dc;
+	t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position);
+	put_dc_size( t_dc, dc_size(t_dc) + KEY_SIZE );
+
+	do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0);
+    }
+}
+
+
+
+/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. 
+ * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
+ * Replace  d_key'th key in buffer cfl.
+ * Delete pointer_amount items and node pointers from buffer src.
+ */
+/* this can be invoked both to shift from S to L and from R to S */
+static void	internal_shift_left (
+				     int mode,	/* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */
+				     struct tree_balance * tb,
+				     int h,
+				     int pointer_amount
+				     )
+{
+  struct buffer_info dest_bi, src_bi;
+  struct buffer_head * cf;
+  int d_key_position;
+
+  internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+  /*printk("pointer_amount = %d\n",pointer_amount);*/
+
+  if (pointer_amount) {
+    /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */
+    internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position);
+
+    if (B_NR_ITEMS(src_bi.bi_bh) == pointer_amount - 1) {
+      if (src_bi.bi_position/*src->b_item_order*/ == 0)
+	replace_key (tb, cf, d_key_position, src_bi.bi_parent/*src->b_parent*/, 0);
+    } else
+      replace_key (tb, cf, d_key_position, src_bi.bi_bh, pointer_amount - 1);
+  }
+  /* last parameter is del_parameter */
+  internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 0);
+
+}
+
+/* Insert delimiting key to L[h].
+ * Copy n node pointers and n - 1 items from buffer S[h] to L[h].
+ * Delete n - 1 items and node pointers from buffer S[h].
+ */
+/* it always shifts from S[h] to L[h] */
+static void	internal_shift1_left (
+				      struct tree_balance * tb, 
+				      int h, 
+				      int pointer_amount
+				      )
+{
+  struct buffer_info dest_bi, src_bi;
+  struct buffer_head * cf;
+  int d_key_position;
+
+  internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+  if ( pointer_amount > 0 ) /* insert lkey[h]-th key  from CFL[h] to left neighbor L[h] */
+    internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position);
+  /*		internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]);*/
+
+  /* last parameter is del_parameter */
+  internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 1);
+  /*	internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1);*/
+}
+
+
+/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. 
+ * Copy n node pointers and n - 1 items from buffer src to buffer dest.
+ * Replace  d_key'th key in buffer cfr.
+ * Delete n items and node pointers from buffer src.
+ */
+static void internal_shift_right (
+				  int mode,	/* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */
+				  struct tree_balance * tb,
+				  int h,
+				  int pointer_amount
+				  )
+{
+  struct buffer_info dest_bi, src_bi;
+  struct buffer_head * cf;
+  int d_key_position;
+  int nr;
+
+
+  internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+  nr = B_NR_ITEMS (src_bi.bi_bh);
+
+  if (pointer_amount > 0) {
+    /* insert delimiting key from common father of dest and src to dest node into position 0 */
+    internal_insert_key (&dest_bi, 0, cf, d_key_position);
+    if (nr == pointer_amount - 1) {
+	 RFALSE( src_bi.bi_bh != PATH_H_PBUFFER (tb->tb_path, h)/*tb->S[h]*/ || 
+		 dest_bi.bi_bh != tb->R[h],
+		 "src (%p) must be == tb->S[h](%p) when it disappears",
+		 src_bi.bi_bh, PATH_H_PBUFFER (tb->tb_path, h));
+      /* when S[h] disappers replace left delemiting key as well */
+      if (tb->CFL[h])
+	replace_key (tb, cf, d_key_position, tb->CFL[h], tb->lkey[h]);
+    } else
+      replace_key (tb, cf, d_key_position, src_bi.bi_bh, nr - pointer_amount);
+  }      
+
+  /* last parameter is del_parameter */
+  internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 0);
+}
+
+/* Insert delimiting key to R[h].
+ * Copy n node pointers and n - 1 items from buffer S[h] to R[h].
+ * Delete n - 1 items and node pointers from buffer S[h].
+ */
+/* it always shift from S[h] to R[h] */
+static void	internal_shift1_right (
+				       struct tree_balance * tb, 
+				       int h, 
+				       int pointer_amount
+				       )
+{
+  struct buffer_info dest_bi, src_bi;
+  struct buffer_head * cf;
+  int d_key_position;
+
+  internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+  if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */
+    internal_insert_key (&dest_bi, 0, cf, d_key_position);
+  /*		internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]);*/
+	
+  /* last parameter is del_parameter */
+  internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 1);
+  /*	internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1);*/
+}
+
+
+/* Delete insert_num node pointers together with their left items
+ * and balance current node.*/
+static void balance_internal_when_delete (struct tree_balance * tb, 
+					  int h, int child_pos)
+{
+    int insert_num;
+    int n;
+    struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+    struct buffer_info bi;
+
+    insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));
+  
+    /* delete child-node-pointer(s) together with their left item(s) */
+    bi.tb = tb;
+    bi.bi_bh = tbSh;
+    bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+    bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+    internal_delete_childs (&bi, child_pos, -insert_num);
+
+    RFALSE( tb->blknum[h] > 1,
+	    "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]);
+
+    n = B_NR_ITEMS(tbSh);
+
+    if ( tb->lnum[h] == 0 && tb->rnum[h] == 0 ) {
+	if ( tb->blknum[h] == 0 ) {
+	    /* node S[h] (root of the tree) is empty now */
+	    struct buffer_head *new_root;
+
+	    RFALSE( n || B_FREE_SPACE (tbSh) != MAX_CHILD_SIZE(tbSh) - DC_SIZE,
+		    "buffer must have only 0 keys (%d)", n);
+	    RFALSE( bi.bi_parent, "root has parent (%p)", bi.bi_parent);
+		
+	    /* choose a new root */
+	    if ( ! tb->L[h-1] || ! B_NR_ITEMS(tb->L[h-1]) )
+		new_root = tb->R[h-1];
+	    else
+		new_root = tb->L[h-1];
+	    /* switch super block's tree root block number to the new value */
+            PUT_SB_ROOT_BLOCK( tb->tb_sb, new_root->b_blocknr );
+	    //REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --;
+            PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) - 1 );
+
+	    do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
+	    /*&&&&&&&&&&&&&&&&&&&&&&*/
+	    if (h > 1)
+		/* use check_internal if new root is an internal node */
+		check_internal (new_root);
+	    /*&&&&&&&&&&&&&&&&&&&&&&*/
+
+	    /* do what is needed for buffer thrown from tree */
+	    reiserfs_invalidate_buffer(tb, tbSh);
+	    return;
+	}
+	return;
+    }
+
+    if ( tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1 ) { /* join S[h] with L[h] */
+
+	RFALSE( tb->rnum[h] != 0,
+		"invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
+		h, tb->rnum[h]);
+
+	internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
+	reiserfs_invalidate_buffer(tb, tbSh);
+
+	return;
+    }
+
+    if ( tb->R[h] &&  tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1 ) { /* join S[h] with R[h] */
+	RFALSE( tb->lnum[h] != 0,
+		"invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
+		h, tb->lnum[h]);
+
+	internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
+
+	reiserfs_invalidate_buffer(tb,tbSh);
+	return;
+    }
+
+    if ( tb->lnum[h] < 0 ) { /* borrow from left neighbor L[h] */
+	RFALSE( tb->rnum[h] != 0,
+		"wrong tb->rnum[%d]==%d when borrow from L[h]", h, tb->rnum[h]);
+	/*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]);*/
+	internal_shift_right (INTERNAL_SHIFT_FROM_L_TO_S, tb, h, -tb->lnum[h]);
+	return;
+    }
+
+    if ( tb->rnum[h] < 0 ) { /* borrow from right neighbor R[h] */
+	 RFALSE( tb->lnum[h] != 0,
+		 "invalid tb->lnum[%d]==%d when borrow from R[h]", 
+		 h, tb->lnum[h]);
+	internal_shift_left (INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]);*/
+	return;
+    }
+
+    if ( tb->lnum[h] > 0 ) { /* split S[h] into two parts and put them into neighbors */
+	RFALSE( tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
+		"invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
+		h, tb->lnum[h], h, tb->rnum[h], n);
+
+	internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]);*/
+	internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
+
+	reiserfs_invalidate_buffer (tb, tbSh);
+
+	return;
+    }
+    reiserfs_panic (tb->tb_sb, "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
+		    h, tb->lnum[h], h, tb->rnum[h]);
+}
+
+
+/* Replace delimiting key of buffers L[h] and S[h] by the given key.*/
+static void replace_lkey (
+		      struct tree_balance * tb,
+		      int h,
+		      struct item_head * key
+		      )
+{
+   RFALSE( tb->L[h] == NULL || tb->CFL[h] == NULL,
+	   "L[h](%p) and CFL[h](%p) must exist in replace_lkey", 
+	   tb->L[h], tb->CFL[h]);
+
+  if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
+    return;
+
+  memcpy (B_N_PDELIM_KEY(tb->CFL[h],tb->lkey[h]), key, KEY_SIZE);
+
+  do_balance_mark_internal_dirty (tb, tb->CFL[h],0);
+}
+
+
+/* Replace delimiting key of buffers S[h] and R[h] by the given key.*/
+static void replace_rkey (
+		      struct tree_balance * tb,
+		      int h,
+		      struct item_head * key
+		      )
+{
+  RFALSE( tb->R[h] == NULL || tb->CFR[h] == NULL,
+	  "R[h](%p) and CFR[h](%p) must exist in replace_rkey", 
+	  tb->R[h], tb->CFR[h]);
+  RFALSE( B_NR_ITEMS(tb->R[h]) == 0,
+	  "R[h] can not be empty if it exists (item number=%d)", 
+	  B_NR_ITEMS(tb->R[h]));
+
+  memcpy (B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]), key, KEY_SIZE);
+
+  do_balance_mark_internal_dirty (tb, tb->CFR[h], 0);
+}
+
+
+int balance_internal (struct tree_balance * tb,			/* tree_balance structure 		*/
+		      int h,					/* level of the tree 			*/
+		      int child_pos,
+		      struct item_head * insert_key,		/* key for insertion on higher level   	*/
+		      struct buffer_head ** insert_ptr	/* node for insertion on higher level*/
+    )
+    /* if inserting/pasting
+       {
+       child_pos is the position of the node-pointer in S[h] that	 *
+       pointed to S[h-1] before balancing of the h-1 level;		 *
+       this means that new pointers and items must be inserted AFTER *
+       child_pos
+       }
+       else 
+       {
+   it is the position of the leftmost pointer that must be deleted (together with
+   its corresponding key to the left of the pointer)
+   as a result of the previous level's balancing.
+   }
+*/
+{
+    struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+    struct buffer_info bi;
+    int order;		/* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
+    int insert_num, n, k;
+    struct buffer_head * S_new;
+    struct item_head new_insert_key;
+    struct buffer_head * new_insert_ptr = NULL;
+    struct item_head * new_insert_key_addr = insert_key;
+
+    RFALSE( h < 1, "h (%d) can not be < 1 on internal level", h);
+
+    PROC_INFO_INC( tb -> tb_sb, balance_at[ h ] );
+
+    order = ( tbSh ) ? PATH_H_POSITION (tb->tb_path, h + 1)/*tb->S[h]->b_item_order*/ : 0;
+
+  /* Using insert_size[h] calculate the number insert_num of items
+     that must be inserted to or deleted from S[h]. */
+    insert_num = tb->insert_size[h]/((int)(KEY_SIZE + DC_SIZE));
+
+    /* Check whether insert_num is proper **/
+    RFALSE( insert_num < -2  ||  insert_num > 2,
+	    "incorrect number of items inserted to the internal node (%d)", 
+	    insert_num);
+    RFALSE( h > 1  && (insert_num > 1 || insert_num < -1),
+	    "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level", 
+	    insert_num, h);
+
+    /* Make balance in case insert_num < 0 */
+    if ( insert_num < 0 ) {
+	balance_internal_when_delete (tb, h, child_pos);
+	return order;
+    }
+ 
+    k = 0;
+    if ( tb->lnum[h] > 0 ) {
+	/* shift lnum[h] items from S[h] to the left neighbor L[h].
+	   check how many of new items fall into L[h] or CFL[h] after
+	   shifting */
+	n = B_NR_ITEMS (tb->L[h]); /* number of items in L[h] */
+	if ( tb->lnum[h] <= child_pos ) {
+	    /* new items don't fall into L[h] or CFL[h] */
+	    internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);
+	    /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]);*/
+	    child_pos -= tb->lnum[h];
+	} else if ( tb->lnum[h] > child_pos + insert_num ) {
+	    /* all new items fall into L[h] */
+	    internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h] - insert_num);
+	    /*			internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
+				tb->lnum[h]-insert_num);
+	    */
+	    /* insert insert_num keys and node-pointers into L[h] */
+	    bi.tb = tb;
+	    bi.bi_bh = tb->L[h];
+	    bi.bi_parent = tb->FL[h];
+	    bi.bi_position = get_left_neighbor_position (tb, h);
+	    internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next*/ n + child_pos + 1,
+				    insert_num,insert_key,insert_ptr);
+
+	    insert_num = 0; 
+	} else {
+	    struct disk_child * dc;
+
+	    /* some items fall into L[h] or CFL[h], but some don't fall */
+	    internal_shift1_left(tb,h,child_pos+1);
+	    /* calculate number of new items that fall into L[h] */
+	    k = tb->lnum[h] - child_pos - 1;
+	    bi.tb = tb;
+	    bi.bi_bh = tb->L[h];
+	    bi.bi_parent = tb->FL[h];
+	    bi.bi_position = get_left_neighbor_position (tb, h);
+	    internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next,*/ n + child_pos + 1,k,
+				    insert_key,insert_ptr);
+
+	    replace_lkey(tb,h,insert_key + k);
+
+	    /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
+	    dc = B_N_CHILD(tbSh, 0);
+	    put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[k]) - B_FREE_SPACE (insert_ptr[k]));
+	    put_dc_block_number( dc, insert_ptr[k]->b_blocknr );
+
+	    do_balance_mark_internal_dirty (tb, tbSh, 0);
+
+	    k++;
+	    insert_key += k;
+	    insert_ptr += k;
+	    insert_num -= k;
+	    child_pos = 0;
+	}
+    }	/* tb->lnum[h] > 0 */
+
+    if ( tb->rnum[h] > 0 ) {
+	/*shift rnum[h] items from S[h] to the right neighbor R[h]*/
+	/* check how many of new items fall into R or CFR after shifting */
+	n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
+	if ( n - tb->rnum[h] >= child_pos )
+	    /* new items fall into S[h] */
+	    /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]);*/
+	    internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
+	else
+	    if ( n + insert_num - tb->rnum[h] < child_pos )
+	    {
+		/* all new items fall into R[h] */
+		/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
+	    tb->rnum[h] - insert_num);*/
+		internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h] - insert_num);
+
+		/* insert insert_num keys and node-pointers into R[h] */
+		bi.tb = tb;
+		bi.bi_bh = tb->R[h];
+		bi.bi_parent = tb->FR[h];
+		bi.bi_position = get_right_neighbor_position (tb, h);
+		internal_insert_childs (&bi, /*tb->R[h],tb->S[h-1]->b_next*/ child_pos - n - insert_num + tb->rnum[h] - 1,
+					insert_num,insert_key,insert_ptr);
+		insert_num = 0;
+	    }
+	    else
+	    {
+		struct disk_child * dc;
+
+		/* one of the items falls into CFR[h] */
+		internal_shift1_right(tb,h,n - child_pos + 1);
+		/* calculate number of new items that fall into R[h] */
+		k = tb->rnum[h] - n + child_pos - 1;
+		bi.tb = tb;
+		bi.bi_bh = tb->R[h];
+		bi.bi_parent = tb->FR[h];
+		bi.bi_position = get_right_neighbor_position (tb, h);
+		internal_insert_childs (&bi, /*tb->R[h], tb->R[h]->b_child,*/ 0, k, insert_key + 1, insert_ptr + 1);
+
+		replace_rkey(tb,h,insert_key + insert_num - k - 1);
+
+		/* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1]*/
+		dc = B_N_CHILD(tb->R[h], 0);
+		put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) -
+    				    B_FREE_SPACE (insert_ptr[insert_num-k-1]));
+		put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr );
+
+		do_balance_mark_internal_dirty (tb, tb->R[h],0);
+
+		insert_num -= (k + 1);
+	    }
+    }
+
+    /** Fill new node that appears instead of S[h] **/
+    RFALSE( tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
+    RFALSE( tb->blknum[h] < 0, "blknum can not be < 0");
+
+    if ( ! tb->blknum[h] )
+    { /* node S[h] is empty now */
+	RFALSE( ! tbSh, "S[h] is equal NULL");
+
+	/* do what is needed for buffer thrown from tree */
+	reiserfs_invalidate_buffer(tb,tbSh);
+	return order;
+    }
+
+    if ( ! tbSh ) {
+	/* create new root */
+	struct disk_child  * dc;
+	struct buffer_head * tbSh_1 = PATH_H_PBUFFER (tb->tb_path, h - 1);
+        struct block_head *  blkh;
+
+
+	if ( tb->blknum[h] != 1 )
+	    reiserfs_panic(NULL, "balance_internal: One new node required for creating the new root");
+	/* S[h] = empty buffer from the list FEB. */
+	tbSh = get_FEB (tb);
+        blkh = B_BLK_HEAD(tbSh);
+        set_blkh_level( blkh, h + 1 );
+
+	/* Put the unique node-pointer to S[h] that points to S[h-1]. */
+
+	dc = B_N_CHILD(tbSh, 0);
+	put_dc_block_number( dc, tbSh_1->b_blocknr );
+	put_dc_size( dc, (MAX_CHILD_SIZE (tbSh_1) - B_FREE_SPACE (tbSh_1)));
+
+	tb->insert_size[h] -= DC_SIZE;
+        set_blkh_free_space( blkh, blkh_free_space(blkh) - DC_SIZE );
+
+	do_balance_mark_internal_dirty (tb, tbSh, 0);
+
+	/*&&&&&&&&&&&&&&&&&&&&&&&&*/
+	check_internal (tbSh);
+	/*&&&&&&&&&&&&&&&&&&&&&&&&*/
+    
+    /* put new root into path structure */
+	PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) = tbSh;
+
+	/* Change root in structure super block. */
+        PUT_SB_ROOT_BLOCK( tb->tb_sb, tbSh->b_blocknr );
+        PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1 );
+	do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
+    }
+	
+    if ( tb->blknum[h] == 2 ) {
+	int snum;
+	struct buffer_info dest_bi, src_bi;
+
+
+	/* S_new = free buffer from list FEB */
+	S_new = get_FEB(tb);
+
+        set_blkh_level( B_BLK_HEAD(S_new), h + 1 );
+
+	dest_bi.tb = tb;
+	dest_bi.bi_bh = S_new;
+	dest_bi.bi_parent = NULL;
+	dest_bi.bi_position = 0;
+	src_bi.tb = tb;
+	src_bi.bi_bh = tbSh;
+	src_bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	src_bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+		
+	n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
+	snum = (insert_num + n + 1)/2;
+	if ( n - snum >= child_pos ) {
+	    /* new items don't fall into S_new */
+	    /*	store the delimiting key for the next level */
+	    /* new_insert_key = (n - snum)'th key in S[h] */
+	    memcpy (&new_insert_key,B_N_PDELIM_KEY(tbSh,n - snum),
+		    KEY_SIZE);
+	    /* last parameter is del_par */
+	    internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum, 0);
+	    /*            internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0);*/
+	} else if ( n + insert_num - snum < child_pos ) {
+	    /* all new items fall into S_new */
+	    /*	store the delimiting key for the next level */
+	    /* new_insert_key = (n + insert_item - snum)'th key in S[h] */
+	    memcpy(&new_insert_key,B_N_PDELIM_KEY(tbSh,n + insert_num - snum),
+		   KEY_SIZE);
+	    /* last parameter is del_par */
+	    internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum - insert_num, 0);
+	    /*			internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0);*/
+
+	    /* insert insert_num keys and node-pointers into S_new */
+	    internal_insert_childs (&dest_bi, /*S_new,tb->S[h-1]->b_next,*/child_pos - n - insert_num + snum - 1,
+				    insert_num,insert_key,insert_ptr);
+
+	    insert_num = 0;
+	} else {
+	    struct disk_child * dc;
+
+	    /* some items fall into S_new, but some don't fall */
+	    /* last parameter is del_par */
+	    internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, n - child_pos + 1, 1);
+	    /*			internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1);*/
+	    /* calculate number of new items that fall into S_new */
+	    k = snum - n + child_pos - 1;
+
+	    internal_insert_childs (&dest_bi, /*S_new,*/ 0, k, insert_key + 1, insert_ptr+1);
+
+	    /* new_insert_key = insert_key[insert_num - k - 1] */
+	    memcpy(&new_insert_key,insert_key + insert_num - k - 1,
+		   KEY_SIZE);
+	    /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
+
+	    dc = B_N_CHILD(S_new,0);
+	    put_dc_size( dc, (MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) -
+				B_FREE_SPACE(insert_ptr[insert_num-k-1])) );
+	    put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr );
+
+	    do_balance_mark_internal_dirty (tb, S_new,0);
+			
+	    insert_num -= (k + 1);
+	}
+	/* new_insert_ptr = node_pointer to S_new */
+	new_insert_ptr = S_new;
+
+	RFALSE (!buffer_journaled(S_new) || buffer_journal_dirty(S_new) ||
+		buffer_dirty (S_new),
+		"cm-00001: bad S_new (%b)", S_new);
+
+	// S_new is released in unfix_nodes
+    }
+
+    n = B_NR_ITEMS (tbSh); /*number of items in S[h] */
+
+	if ( 0 <= child_pos && child_pos <= n && insert_num > 0 ) {
+	    bi.tb = tb;
+	    bi.bi_bh = tbSh;
+	    bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+	    bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+		internal_insert_childs (
+		    &bi,/*tbSh,*/
+		    /*		( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next :  tb->S[h]->b_child->b_next,*/
+		    child_pos,insert_num,insert_key,insert_ptr
+		    );
+	}
+
+
+	memcpy (new_insert_key_addr,&new_insert_key,KEY_SIZE);
+	insert_ptr[0] = new_insert_ptr;
+
+	return order;
+    }
+
+  
+    
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
new file mode 100644
index 0000000..7543031
--- /dev/null
+++ b/fs/reiserfs/inode.c
@@ -0,0 +1,2846 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/quotaops.h>
+
+extern int reiserfs_default_io_size; /* default io size devuned in super.c */
+
+static int reiserfs_commit_write(struct file *f, struct page *page,
+                                 unsigned from, unsigned to);
+static int reiserfs_prepare_write(struct file *f, struct page *page,
+				  unsigned from, unsigned to);
+
+void reiserfs_delete_inode (struct inode * inode)
+{
+    /* We need blocks for transaction + (user+group) quota update (possibly delete) */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 * REISERFS_QUOTA_INIT_BLOCKS;
+    struct reiserfs_transaction_handle th ;
+  
+    reiserfs_write_lock(inode->i_sb);
+
+    /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
+    if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
+	down (&inode->i_sem); 
+
+	reiserfs_delete_xattrs (inode);
+
+	if (journal_begin(&th, inode->i_sb, jbegin_count)) {
+	    up (&inode->i_sem);
+	    goto out;
+	}
+	reiserfs_update_inode_transaction(inode) ;
+
+	if (reiserfs_delete_object (&th, inode)) {
+	    up (&inode->i_sem);
+	    goto out;
+	}
+
+	/* Do quota update inside a transaction for journaled quotas. We must do that
+	 * after delete_object so that quota updates go into the same transaction as
+	 * stat data deletion */
+	DQUOT_FREE_INODE(inode);
+
+	if (journal_end(&th, inode->i_sb, jbegin_count)) {
+	    up (&inode->i_sem);
+	    goto out;
+	}
+
+        up (&inode->i_sem);
+
+        /* all items of file are deleted, so we can remove "save" link */
+	remove_save_link (inode, 0/* not truncate */); /* we can't do anything
+                                                        * about an error here */
+    } else {
+	/* no object items are in the tree */
+	;
+    }
+out:
+    clear_inode (inode); /* note this must go after the journal_end to prevent deadlock */
+    inode->i_blocks = 0;
+    reiserfs_write_unlock(inode->i_sb);
+}
+
+static void _make_cpu_key (struct cpu_key * key, int version, __u32 dirid, __u32 objectid, 
+	       loff_t offset, int type, int length )
+{
+    key->version = version;
+
+    key->on_disk_key.k_dir_id = dirid;
+    key->on_disk_key.k_objectid = objectid;
+    set_cpu_key_k_offset (key, offset);
+    set_cpu_key_k_type (key, type);  
+    key->key_length = length;
+}
+
+
+/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set
+   offset and type of key */
+void make_cpu_key (struct cpu_key * key, struct inode * inode, loff_t offset,
+	      int type, int length )
+{
+  _make_cpu_key (key, get_inode_item_key_version (inode), le32_to_cpu (INODE_PKEY (inode)->k_dir_id),
+		 le32_to_cpu (INODE_PKEY (inode)->k_objectid), 
+		 offset, type, length);
+}
+
+
+//
+// when key is 0, do not set version and short key
+//
+inline void make_le_item_head (struct item_head * ih, const struct cpu_key * key,
+			       int version,
+			       loff_t offset, int type, int length, 
+			       int entry_count/*or ih_free_space*/)
+{
+    if (key) {
+	ih->ih_key.k_dir_id = cpu_to_le32 (key->on_disk_key.k_dir_id);
+	ih->ih_key.k_objectid = cpu_to_le32 (key->on_disk_key.k_objectid);
+    }
+    put_ih_version( ih, version );
+    set_le_ih_k_offset (ih, offset);
+    set_le_ih_k_type (ih, type);
+    put_ih_item_len( ih, length );
+    /*    set_ih_free_space (ih, 0);*/
+    // for directory items it is entry count, for directs and stat
+    // datas - 0xffff, for indirects - 0
+    put_ih_entry_count( ih, entry_count );
+}
+
+//
+// FIXME: we might cache recently accessed indirect item
+
+// Ugh.  Not too eager for that....
+//  I cut the code until such time as I see a convincing argument (benchmark).
+// I don't want a bloated inode struct..., and I don't like code complexity....
+
+/* cutting the code is fine, since it really isn't in use yet and is easy
+** to add back in.  But, Vladimir has a really good idea here.  Think
+** about what happens for reading a file.  For each page,
+** The VFS layer calls reiserfs_readpage, who searches the tree to find
+** an indirect item.  This indirect item has X number of pointers, where
+** X is a big number if we've done the block allocation right.  But,
+** we only use one or two of these pointers during each call to readpage,
+** needlessly researching again later on.
+**
+** The size of the cache could be dynamic based on the size of the file.
+**
+** I'd also like to see us cache the location the stat data item, since
+** we are needlessly researching for that frequently.
+**
+** --chris
+*/
+
+/* If this page has a file tail in it, and
+** it was read in by get_block_create_0, the page data is valid,
+** but tail is still sitting in a direct item, and we can't write to
+** it.  So, look through this page, and check all the mapped buffers
+** to make sure they have valid block numbers.  Any that don't need
+** to be unmapped, so that block_prepare_write will correctly call
+** reiserfs_get_block to convert the tail into an unformatted node
+*/
+static inline void fix_tail_page_for_writing(struct page *page) {
+    struct buffer_head *head, *next, *bh ;
+
+    if (page && page_has_buffers(page)) {
+	head = page_buffers(page) ;
+	bh = head ;
+	do {
+	    next = bh->b_this_page ;
+	    if (buffer_mapped(bh) && bh->b_blocknr == 0) {
+	        reiserfs_unmap_buffer(bh) ;
+	    }
+	    bh = next ;
+	} while (bh != head) ;
+    }
+}
+
+/* reiserfs_get_block does not need to allocate a block only if it has been
+   done already or non-hole position has been found in the indirect item */
+static inline int allocation_needed (int retval, b_blocknr_t allocated, 
+				     struct item_head * ih,
+				     __u32 * item, int pos_in_item)
+{
+  if (allocated)
+	 return 0;
+  if (retval == POSITION_FOUND && is_indirect_le_ih (ih) && 
+      get_block_num(item, pos_in_item))
+	 return 0;
+  return 1;
+}
+
+static inline int indirect_item_found (int retval, struct item_head * ih)
+{
+  return (retval == POSITION_FOUND) && is_indirect_le_ih (ih);
+}
+
+
+static inline void set_block_dev_mapped (struct buffer_head * bh, 
+					 b_blocknr_t block, struct inode * inode)
+{
+	map_bh(bh, inode->i_sb, block);
+}
+
+
+//
+// files which were created in the earlier version can not be longer,
+// than 2 gb
+//
+static int file_capable (struct inode * inode, long block)
+{
+    if (get_inode_item_key_version (inode) != KEY_FORMAT_3_5 || // it is new file.
+	block < (1 << (31 - inode->i_sb->s_blocksize_bits))) // old file, but 'block' is inside of 2gb
+	return 1;
+
+    return 0;
+}
+
+/*static*/ int restart_transaction(struct reiserfs_transaction_handle *th,
+				struct inode *inode, struct path *path) {
+  struct super_block *s = th->t_super ;
+  int len = th->t_blocks_allocated ;
+  int err;
+
+  BUG_ON (!th->t_trans_id);
+  BUG_ON (!th->t_refcount);
+
+  /* we cannot restart while nested */
+  if (th->t_refcount > 1) {
+      return 0  ;
+  }
+  pathrelse(path) ;
+  reiserfs_update_sd(th, inode) ;
+  err = journal_end(th, s, len) ;
+  if (!err) {
+      err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6) ;
+      if (!err)
+        reiserfs_update_inode_transaction(inode) ;
+  }
+  return err;
+}
+
+// it is called by get_block when create == 0. Returns block number
+// for 'block'-th logical block of file. When it hits direct item it
+// returns 0 (being called from bmap) or read direct item into piece
+// of page (bh_result)
+
+// Please improve the english/clarity in the comment above, as it is
+// hard to understand.
+
+static int _get_block_create_0 (struct inode * inode, long block,
+				 struct buffer_head * bh_result,
+				 int args)
+{
+    INITIALIZE_PATH (path);
+    struct cpu_key key;
+    struct buffer_head * bh;
+    struct item_head * ih, tmp_ih;
+    int fs_gen ;
+    int blocknr;
+    char * p = NULL;
+    int chars;
+    int ret ;
+    int done = 0 ;
+    unsigned long offset ;
+
+    // prepare the key to look for the 'block'-th block of file
+    make_cpu_key (&key, inode,
+		  (loff_t)block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 3);
+
+research:
+    if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) {
+	pathrelse (&path);
+        if (p)
+            kunmap(bh_result->b_page) ;
+	// We do not return -ENOENT if there is a hole but page is uptodate, because it means
+	// That there is some MMAPED data associated with it that is yet to be written to disk.
+	if ((args & GET_BLOCK_NO_HOLE) && !PageUptodate(bh_result->b_page) ) {
+	    return -ENOENT ;
+	}
+        return 0 ;
+    }
+    
+    //
+    bh = get_last_bh (&path);
+    ih = get_ih (&path);
+    if (is_indirect_le_ih (ih)) {
+	__u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+	
+	/* FIXME: here we could cache indirect item or part of it in
+	   the inode to avoid search_by_key in case of subsequent
+	   access to file */
+	blocknr = get_block_num(ind_item, path.pos_in_item) ;
+	ret = 0 ;
+	if (blocknr) {
+	    map_bh(bh_result, inode->i_sb, blocknr);
+	    if (path.pos_in_item == ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
+		set_buffer_boundary(bh_result);
+	    }
+	} else 
+	    // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+	    // That there is some MMAPED data associated with it that is yet to  be written to disk.
+	    if ((args & GET_BLOCK_NO_HOLE) && !PageUptodate(bh_result->b_page) ) {
+	    ret = -ENOENT ;
+	    }
+
+	pathrelse (&path);
+        if (p)
+            kunmap(bh_result->b_page) ;
+	return ret ;
+    }
+
+    // requested data are in direct item(s)
+    if (!(args & GET_BLOCK_READ_DIRECT)) {
+	// we are called by bmap. FIXME: we can not map block of file
+	// when it is stored in direct item(s)
+	pathrelse (&path);	
+        if (p)
+            kunmap(bh_result->b_page) ;
+	return -ENOENT;
+    }
+
+    /* if we've got a direct item, and the buffer or page was uptodate,
+    ** we don't want to pull data off disk again.  skip to the
+    ** end, where we map the buffer and return
+    */
+    if (buffer_uptodate(bh_result)) {
+        goto finished ;
+    } else 
+	/*
+	** grab_tail_page can trigger calls to reiserfs_get_block on up to date
+	** pages without any buffers.  If the page is up to date, we don't want
+	** read old data off disk.  Set the up to date bit on the buffer instead
+	** and jump to the end
+	*/
+	    if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
+		set_buffer_uptodate(bh_result);
+		goto finished ;
+    }
+
+    // read file tail into part of page
+    offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
+    fs_gen = get_generation(inode->i_sb) ;
+    copy_item_head (&tmp_ih, ih);
+
+    /* we only want to kmap if we are reading the tail into the page.
+    ** this is not the common case, so we don't kmap until we are
+    ** sure we need to.  But, this means the item might move if
+    ** kmap schedules
+    */
+    if (!p) {
+	p = (char *)kmap(bh_result->b_page) ;
+	if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+	    goto research;
+	}
+    }
+    p += offset ;
+    memset (p, 0, inode->i_sb->s_blocksize);
+    do {
+	if (!is_direct_le_ih (ih)) {
+	    BUG ();
+        }
+	/* make sure we don't read more bytes than actually exist in
+	** the file.  This can happen in odd cases where i_size isn't
+	** correct, and when direct item padding results in a few 
+	** extra bytes at the end of the direct item
+	*/
+        if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
+	    break ;
+	if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
+	    chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path.pos_in_item;
+	    done = 1 ;
+	} else {
+	    chars = ih_item_len(ih) - path.pos_in_item;
+	}
+	memcpy (p, B_I_PITEM (bh, ih) + path.pos_in_item, chars);
+
+	if (done) 
+	    break ;
+
+	p += chars;
+
+	if (PATH_LAST_POSITION (&path) != (B_NR_ITEMS (bh) - 1))
+	    // we done, if read direct item is not the last item of
+	    // node FIXME: we could try to check right delimiting key
+	    // to see whether direct item continues in the right
+	    // neighbor or rely on i_size
+	    break;
+
+	// update key to look for the next piece
+	set_cpu_key_k_offset (&key, cpu_key_k_offset (&key) + chars);
+	if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND)
+	    // we read something from tail, even if now we got IO_ERROR
+	    break;
+	bh = get_last_bh (&path);
+	ih = get_ih (&path);
+    } while (1);
+
+    flush_dcache_page(bh_result->b_page) ;
+    kunmap(bh_result->b_page) ;
+
+finished:
+    pathrelse (&path);
+    /* this buffer has valid data, but isn't valid for io.  mapping it to
+     * block #0 tells the rest of reiserfs it just has a tail in it
+     */
+    map_bh(bh_result, inode->i_sb, 0);
+    set_buffer_uptodate (bh_result);
+    return 0;
+}
+
+
+// this is called to create file map. So, _get_block_create_0 will not
+// read direct item
+static int reiserfs_bmap (struct inode * inode, sector_t block,
+			  struct buffer_head * bh_result, int create)
+{
+    if (!file_capable (inode, block))
+	return -EFBIG;
+
+    reiserfs_write_lock(inode->i_sb);
+    /* do not read the direct item */
+    _get_block_create_0 (inode, block, bh_result, 0) ;
+    reiserfs_write_unlock(inode->i_sb);
+    return 0;
+}
+
+/* special version of get_block that is only used by grab_tail_page right
+** now.  It is sent to block_prepare_write, and when you try to get a
+** block past the end of the file (or a block from a hole) it returns
+** -ENOENT instead of a valid buffer.  block_prepare_write expects to
+** be able to do i/o on the buffers returned, unless an error value
+** is also returned.
+** 
+** So, this allows block_prepare_write to be used for reading a single block
+** in a page.  Where it does not produce a valid page for holes, or past the
+** end of the file.  This turns out to be exactly what we need for reading
+** tails for conversion.
+**
+** The point of the wrapper is forcing a certain value for create, even
+** though the VFS layer is calling this function with create==1.  If you 
+** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, 
+** don't use this function.
+*/
+static int reiserfs_get_block_create_0 (struct inode * inode, sector_t block,
+			struct buffer_head * bh_result, int create) {
+    return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE) ;
+}
+
+/* This is special helper for reiserfs_get_block in case we are executing
+   direct_IO request. */
+static int reiserfs_get_blocks_direct_io(struct inode *inode,
+					 sector_t iblock,
+					 unsigned long max_blocks,
+					 struct buffer_head *bh_result,
+					 int create)
+{
+    int ret ;
+
+    bh_result->b_page = NULL;
+
+    /* We set the b_size before reiserfs_get_block call since it is
+       referenced in convert_tail_for_hole() that may be called from
+       reiserfs_get_block() */
+    bh_result->b_size = (1 << inode->i_blkbits);
+
+    ret = reiserfs_get_block(inode, iblock, bh_result,
+                             create | GET_BLOCK_NO_DANGLE) ;
+    if (ret)
+        goto out;
+
+    /* don't allow direct io onto tail pages */
+    if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
+        /* make sure future calls to the direct io funcs for this offset
+        ** in the file fail by unmapping the buffer
+        */
+        clear_buffer_mapped(bh_result);
+        ret = -EINVAL ;
+    }
+    /* Possible unpacked tail. Flush the data before pages have
+       disappeared */
+    if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
+        int err;
+        lock_kernel();
+        err = reiserfs_commit_for_inode(inode);
+        REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
+        unlock_kernel();
+        if (err < 0)
+            ret = err;
+    }
+out:
+    return ret ;
+}
+
+
+/*
+** helper function for when reiserfs_get_block is called for a hole
+** but the file tail is still in a direct item
+** bh_result is the buffer head for the hole
+** tail_offset is the offset of the start of the tail in the file
+**
+** This calls prepare_write, which will start a new transaction
+** you should not be in a transaction, or have any paths held when you
+** call this.
+*/
+static int convert_tail_for_hole(struct inode *inode, 
+                                 struct buffer_head *bh_result,
+				 loff_t tail_offset) {
+    unsigned long index ;
+    unsigned long tail_end ; 
+    unsigned long tail_start ;
+    struct page * tail_page ;
+    struct page * hole_page = bh_result->b_page ;
+    int retval = 0 ;
+
+    if ((tail_offset & (bh_result->b_size - 1)) != 1) 
+        return -EIO ;
+
+    /* always try to read until the end of the block */
+    tail_start = tail_offset & (PAGE_CACHE_SIZE - 1) ;
+    tail_end = (tail_start | (bh_result->b_size - 1)) + 1 ;
+
+    index = tail_offset >> PAGE_CACHE_SHIFT ;
+    /* hole_page can be zero in case of direct_io, we are sure
+       that we cannot get here if we write with O_DIRECT into
+       tail page */
+    if (!hole_page || index != hole_page->index) {
+	tail_page = grab_cache_page(inode->i_mapping, index) ;
+	retval = -ENOMEM;
+	if (!tail_page) {
+	    goto out ;
+	}
+    } else {
+        tail_page = hole_page ;
+    }
+
+    /* we don't have to make sure the conversion did not happen while
+    ** we were locking the page because anyone that could convert
+    ** must first take i_sem.
+    **
+    ** We must fix the tail page for writing because it might have buffers
+    ** that are mapped, but have a block number of 0.  This indicates tail
+    ** data that has been read directly into the page, and block_prepare_write
+    ** won't trigger a get_block in this case.
+    */
+    fix_tail_page_for_writing(tail_page) ;
+    retval = reiserfs_prepare_write(NULL, tail_page, tail_start, tail_end);
+    if (retval)
+        goto unlock ;
+
+    /* tail conversion might change the data in the page */
+    flush_dcache_page(tail_page) ;
+
+    retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end) ;
+
+unlock:
+    if (tail_page != hole_page) {
+        unlock_page(tail_page) ;
+	page_cache_release(tail_page) ;
+    }
+out:
+    return retval ;
+}
+
+static inline int _allocate_block(struct reiserfs_transaction_handle *th,
+			   long block,
+                           struct inode *inode, 
+			   b_blocknr_t *allocated_block_nr, 
+			   struct path * path,
+			   int flags) {
+    BUG_ON (!th->t_trans_id);
+  
+#ifdef REISERFS_PREALLOCATE
+    if (!(flags & GET_BLOCK_NO_ISEM)) {
+	return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr, path, block);
+    }
+#endif
+    return reiserfs_new_unf_blocknrs (th, inode, allocated_block_nr, path, block);
+}
+
+int reiserfs_get_block (struct inode * inode, sector_t block,
+			struct buffer_head * bh_result, int create)
+{
+    int repeat, retval = 0;
+    b_blocknr_t allocated_block_nr = 0;// b_blocknr_t is (unsigned) 32 bit int
+    INITIALIZE_PATH(path);
+    int pos_in_item;
+    struct cpu_key key;
+    struct buffer_head * bh, * unbh = NULL;
+    struct item_head * ih, tmp_ih;
+    __u32 * item;
+    int done;
+    int fs_gen;
+    struct reiserfs_transaction_handle *th = NULL;
+    /* space reserved in transaction batch: 
+        . 3 balancings in direct->indirect conversion
+        . 1 block involved into reiserfs_update_sd()
+       XXX in practically impossible worst case direct2indirect()
+       can incur (much) more than 3 balancings.
+       quota update for user, group */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS;
+    int version;
+    int dangle = 1;
+    loff_t new_offset = (((loff_t)block) << inode->i_sb->s_blocksize_bits) + 1 ;
+
+				/* bad.... */
+    reiserfs_write_lock(inode->i_sb);
+    version = get_inode_item_key_version (inode);
+
+    if (block < 0) {
+	reiserfs_write_unlock(inode->i_sb);
+	return -EIO;
+    }
+
+    if (!file_capable (inode, block)) {
+	reiserfs_write_unlock(inode->i_sb);
+	return -EFBIG;
+    }
+
+    /* if !create, we aren't changing the FS, so we don't need to
+    ** log anything, so we don't need to start a transaction
+    */
+    if (!(create & GET_BLOCK_CREATE)) {
+	int ret ;
+	/* find number of block-th logical block of the file */
+	ret = _get_block_create_0 (inode, block, bh_result, 
+	                           create | GET_BLOCK_READ_DIRECT) ;
+	reiserfs_write_unlock(inode->i_sb);
+	return ret;
+    }
+    /*
+     * if we're already in a transaction, make sure to close
+     * any new transactions we start in this func
+     */
+    if ((create & GET_BLOCK_NO_DANGLE) ||
+        reiserfs_transaction_running(inode->i_sb))
+        dangle = 0;
+
+    /* If file is of such a size, that it might have a tail and tails are enabled
+    ** we should mark it as possibly needing tail packing on close
+    */
+    if ( (have_large_tails (inode->i_sb) && inode->i_size < i_block_size (inode)*4) ||
+	 (have_small_tails (inode->i_sb) && inode->i_size < i_block_size(inode)) )
+	REISERFS_I(inode)->i_flags |= i_pack_on_close_mask ;
+
+    /* set the key of the first byte in the 'block'-th block of file */
+    make_cpu_key (&key, inode, new_offset,
+		  TYPE_ANY, 3/*key length*/);
+    if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
+start_trans:
+	th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
+	if (!th) {
+	    retval = -ENOMEM;
+	    goto failure;
+	}
+	reiserfs_update_inode_transaction(inode) ;
+    }
+ research:
+
+    retval = search_for_position_by_key (inode->i_sb, &key, &path);
+    if (retval == IO_ERROR) {
+	retval = -EIO;
+	goto failure;
+    }
+	
+    bh = get_last_bh (&path);
+    ih = get_ih (&path);
+    item = get_item (&path);
+    pos_in_item = path.pos_in_item;
+
+    fs_gen = get_generation (inode->i_sb);
+    copy_item_head (&tmp_ih, ih);
+
+    if (allocation_needed (retval, allocated_block_nr, ih, item, pos_in_item)) {
+	/* we have to allocate block for the unformatted node */
+	if (!th) {
+	    pathrelse(&path) ;
+	    goto start_trans;
+	}
+
+	repeat = _allocate_block(th, block, inode, &allocated_block_nr, &path, create);
+
+	if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
+	    /* restart the transaction to give the journal a chance to free
+	    ** some blocks.  releases the path, so we have to go back to
+	    ** research if we succeed on the second try
+	    */
+	    SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
+	    retval = restart_transaction(th, inode, &path) ;
+            if (retval)
+                goto failure;
+	    repeat = _allocate_block(th, block, inode, &allocated_block_nr, NULL, create);
+
+	    if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
+		goto research ;
+	    }
+	    if (repeat == QUOTA_EXCEEDED)
+		retval = -EDQUOT;
+	    else
+		retval = -ENOSPC;
+	    goto failure;
+	}
+
+	if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+	    goto research;
+	}
+    }
+
+    if (indirect_item_found (retval, ih)) {
+        b_blocknr_t unfm_ptr;
+	/* 'block'-th block is in the file already (there is
+	   corresponding cell in some indirect item). But it may be
+	   zero unformatted node pointer (hole) */
+        unfm_ptr = get_block_num (item, pos_in_item);
+	if (unfm_ptr == 0) {
+	    /* use allocated block to plug the hole */
+	    reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+	    if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+		reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+		goto research;
+	    }
+	    set_buffer_new(bh_result);
+	    if (buffer_dirty(bh_result) && reiserfs_data_ordered(inode->i_sb))
+	    	reiserfs_add_ordered_list(inode, bh_result);
+	    put_block_num(item, pos_in_item, allocated_block_nr) ;
+            unfm_ptr = allocated_block_nr;
+	    journal_mark_dirty (th, inode->i_sb, bh);
+	    reiserfs_update_sd(th, inode) ;
+	}
+	set_block_dev_mapped(bh_result, unfm_ptr, inode);
+	pathrelse (&path);
+        retval = 0;
+	if (!dangle && th)
+	    retval = reiserfs_end_persistent_transaction(th);
+
+	reiserfs_write_unlock(inode->i_sb);
+	 
+	/* the item was found, so new blocks were not added to the file
+	** there is no need to make sure the inode is updated with this 
+	** transaction
+	*/
+	return retval;
+    }
+
+    if (!th) {
+	pathrelse(&path) ;
+	goto start_trans;
+    }
+
+    /* desired position is not found or is in the direct item. We have
+       to append file with holes up to 'block'-th block converting
+       direct items to indirect one if necessary */
+    done = 0;
+    do {
+	if (is_statdata_le_ih (ih)) {
+	    __u32 unp = 0;
+	    struct cpu_key tmp_key;
+
+	    /* indirect item has to be inserted */
+	    make_le_item_head (&tmp_ih, &key, version, 1, TYPE_INDIRECT, 
+			       UNFM_P_SIZE, 0/* free_space */);
+
+	    if (cpu_key_k_offset (&key) == 1) {
+		/* we are going to add 'block'-th block to the file. Use
+		   allocated block for that */
+		unp = cpu_to_le32 (allocated_block_nr);
+		set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+		set_buffer_new(bh_result);
+		done = 1;
+	    }
+	    tmp_key = key; // ;)
+	    set_cpu_key_k_offset (&tmp_key, 1);
+	    PATH_LAST_POSITION(&path) ++;
+
+	    retval = reiserfs_insert_item (th, &path, &tmp_key, &tmp_ih, inode, (char *)&unp);
+	    if (retval) {
+		reiserfs_free_block (th, inode, allocated_block_nr, 1);
+		goto failure; // retval == -ENOSPC, -EDQUOT or -EIO or -EEXIST
+	    }
+	    //mark_tail_converted (inode);
+	} else if (is_direct_le_ih (ih)) {
+	    /* direct item has to be converted */
+	    loff_t tail_offset;
+
+	    tail_offset = ((le_ih_k_offset (ih) - 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
+	    if (tail_offset == cpu_key_k_offset (&key)) {
+		/* direct item we just found fits into block we have
+                   to map. Convert it into unformatted node: use
+                   bh_result for the conversion */
+		set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+		unbh = bh_result;
+		done = 1;
+	    } else {
+		/* we have to padd file tail stored in direct item(s)
+		   up to block size and convert it to unformatted
+		   node. FIXME: this should also get into page cache */
+
+		pathrelse(&path) ;
+		/*
+		 * ugly, but we can only end the transaction if
+		 * we aren't nested
+		 */
+		BUG_ON (!th->t_refcount);
+		if (th->t_refcount == 1) {
+		    retval = reiserfs_end_persistent_transaction(th);
+		    th = NULL;
+		    if (retval)
+			goto failure;
+		}
+
+		retval = convert_tail_for_hole(inode, bh_result, tail_offset) ;
+		if (retval) {
+		    if ( retval != -ENOSPC )
+			reiserfs_warning (inode->i_sb, "clm-6004: convert tail failed inode %lu, error %d", inode->i_ino, retval) ;
+		    if (allocated_block_nr) {
+			/* the bitmap, the super, and the stat data == 3 */
+			if (!th)
+			    th = reiserfs_persistent_transaction(inode->i_sb,3);
+			if (th)
+			    reiserfs_free_block (th,inode,allocated_block_nr,1);
+		    }
+		    goto failure ;
+		}
+		goto research ;
+	    }
+	    retval = direct2indirect (th, inode, &path, unbh, tail_offset);
+	    if (retval) {
+		reiserfs_unmap_buffer(unbh);
+		reiserfs_free_block (th, inode, allocated_block_nr, 1);
+		goto failure;
+	    }
+	    /* it is important the set_buffer_uptodate is done after
+	    ** the direct2indirect.  The buffer might contain valid
+	    ** data newer than the data on disk (read by readpage, changed,
+	    ** and then sent here by writepage).  direct2indirect needs
+	    ** to know if unbh was already up to date, so it can decide
+	    ** if the data in unbh needs to be replaced with data from
+	    ** the disk
+	    */
+	    set_buffer_uptodate (unbh);
+
+	    /* unbh->b_page == NULL in case of DIRECT_IO request, this means
+	       buffer will disappear shortly, so it should not be added to
+	     */
+	    if ( unbh->b_page ) {
+		/* we've converted the tail, so we must
+		** flush unbh before the transaction commits
+		*/
+		reiserfs_add_tail_list(inode, unbh) ;
+
+		/* mark it dirty now to prevent commit_write from adding
+		** this buffer to the inode's dirty buffer list
+		*/
+		/*
+		 * AKPM: changed __mark_buffer_dirty to mark_buffer_dirty().
+		 * It's still atomic, but it sets the page dirty too,
+		 * which makes it eligible for writeback at any time by the
+		 * VM (which was also the case with __mark_buffer_dirty())
+		 */
+		mark_buffer_dirty(unbh) ;
+	    }
+	} else {
+	    /* append indirect item with holes if needed, when appending
+	       pointer to 'block'-th block use block, which is already
+	       allocated */
+	    struct cpu_key tmp_key;
+	    unp_t unf_single=0; // We use this in case we need to allocate only
+				// one block which is a fastpath
+	    unp_t *un;
+	    __u64 max_to_insert=MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE;
+	    __u64 blocks_needed;
+
+	    RFALSE( pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
+		    "vs-804: invalid position for append");
+	    /* indirect item has to be appended, set up key of that position */
+	    make_cpu_key (&tmp_key, inode,
+			  le_key_k_offset (version, &(ih->ih_key)) + op_bytes_number (ih, inode->i_sb->s_blocksize),
+			  //pos_in_item * inode->i_sb->s_blocksize,
+			  TYPE_INDIRECT, 3);// key type is unimportant
+
+	    blocks_needed = 1 + ((cpu_key_k_offset (&key) - cpu_key_k_offset (&tmp_key)) >> inode->i_sb->s_blocksize_bits);
+	    RFALSE( blocks_needed < 0, "green-805: invalid offset");
+
+	    if ( blocks_needed == 1 ) {
+		un = &unf_single;
+	    } else {
+		un=kmalloc( min(blocks_needed,max_to_insert)*UNFM_P_SIZE,
+			    GFP_ATOMIC); // We need to avoid scheduling.
+		if ( !un) {
+		    un = &unf_single;
+		    blocks_needed = 1;
+		    max_to_insert = 0;
+		} else
+		    memset(un, 0, UNFM_P_SIZE * min(blocks_needed,max_to_insert));
+	    }
+	    if ( blocks_needed <= max_to_insert) {
+		/* we are going to add target block to the file. Use allocated
+		   block for that */
+		un[blocks_needed-1] = cpu_to_le32 (allocated_block_nr);
+		set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+		set_buffer_new(bh_result);
+		done = 1;
+	    } else {
+		/* paste hole to the indirect item */
+		/* If kmalloc failed, max_to_insert becomes zero and it means we
+		   only have space for one block */
+		blocks_needed=max_to_insert?max_to_insert:1;
+	    }
+	    retval = reiserfs_paste_into_item (th, &path, &tmp_key, inode, (char *)un, UNFM_P_SIZE * blocks_needed);
+
+	    if (blocks_needed != 1)
+		kfree(un);
+
+	    if (retval) {
+		reiserfs_free_block (th, inode, allocated_block_nr, 1);
+		goto failure;
+	    }
+	    if (!done) {
+		/* We need to mark new file size in case this function will be
+		   interrupted/aborted later on. And we may do this only for
+		   holes. */
+		inode->i_size += inode->i_sb->s_blocksize * blocks_needed;
+	    }
+	}
+
+	if (done == 1)
+	    break;
+
+	/* this loop could log more blocks than we had originally asked
+	** for.  So, we have to allow the transaction to end if it is
+	** too big or too full.  Update the inode so things are 
+	** consistent if we crash before the function returns
+	**
+	** release the path so that anybody waiting on the path before
+	** ending their transaction will be able to continue.
+	*/
+	if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
+	  retval = restart_transaction(th, inode, &path) ;
+	  if (retval)
+	    goto failure;
+	}
+	/* inserting indirect pointers for a hole can take a 
+	** long time.  reschedule if needed
+	*/
+	cond_resched();
+
+	retval = search_for_position_by_key (inode->i_sb, &key, &path);
+	if (retval == IO_ERROR) {
+	    retval = -EIO;
+	    goto failure;
+	}
+	if (retval == POSITION_FOUND) {
+	    reiserfs_warning (inode->i_sb, "vs-825: reiserfs_get_block: "
+			      "%K should not be found", &key);
+	    retval = -EEXIST;
+	    if (allocated_block_nr)
+	        reiserfs_free_block (th, inode, allocated_block_nr, 1);
+	    pathrelse(&path) ;
+	    goto failure;
+	}
+	bh = get_last_bh (&path);
+	ih = get_ih (&path);
+	item = get_item (&path);
+	pos_in_item = path.pos_in_item;
+    } while (1);
+
+
+    retval = 0;
+
+ failure:
+    if (th && (!dangle || (retval && !th->t_trans_id))) {
+        int err;
+        if (th->t_trans_id)
+            reiserfs_update_sd(th, inode);
+        err = reiserfs_end_persistent_transaction(th);
+        if (err)
+            retval = err;
+    }
+
+    reiserfs_write_unlock(inode->i_sb);
+    reiserfs_check_path(&path) ;
+    return retval;
+}
+
+static int
+reiserfs_readpages(struct file *file, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+    return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
+}
+
+/* Compute real number of used bytes by file
+ * Following three functions can go away when we'll have enough space in stat item
+ */
+static int real_space_diff(struct inode *inode, int sd_size)
+{
+    int bytes;
+    loff_t blocksize = inode->i_sb->s_blocksize ;
+
+    if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
+        return sd_size ;
+
+    /* End of file is also in full block with indirect reference, so round
+    ** up to the next block.
+    **
+    ** there is just no way to know if the tail is actually packed
+    ** on the file, so we have to assume it isn't.  When we pack the
+    ** tail, we add 4 bytes to pretend there really is an unformatted
+    ** node pointer
+    */
+    bytes = ((inode->i_size + (blocksize-1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE + sd_size;
+    return bytes ;
+}
+
+static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
+                                        int sd_size)
+{
+    if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
+        return inode->i_size + (loff_t)(real_space_diff(inode, sd_size)) ;
+    }
+    return ((loff_t)real_space_diff(inode, sd_size)) + (((loff_t)blocks) << 9);
+}
+
+/* Compute number of blocks used by file in ReiserFS counting */
+static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
+{
+    loff_t bytes = inode_get_bytes(inode) ;
+    loff_t real_space = real_space_diff(inode, sd_size) ;
+
+    /* keeps fsck and non-quota versions of reiserfs happy */
+    if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
+        bytes += (loff_t)511 ;
+    }
+
+    /* files from before the quota patch might i_blocks such that
+    ** bytes < real_space.  Deal with that here to prevent it from
+    ** going negative.
+    */
+    if (bytes < real_space)
+        return 0 ;
+    return (bytes - real_space) >> 9;
+}
+
+//
+// BAD: new directories have stat data of new type and all other items
+// of old type. Version stored in the inode says about body items, so
+// in update_stat_data we can not rely on inode, but have to check
+// item version directly
+//
+
+// called by read_locked_inode
+static void init_inode (struct inode * inode, struct path * path)
+{
+    struct buffer_head * bh;
+    struct item_head * ih;
+    __u32 rdev;
+    //int version = ITEM_VERSION_1;
+
+    bh = PATH_PLAST_BUFFER (path);
+    ih = PATH_PITEM_HEAD (path);
+
+
+    copy_key (INODE_PKEY (inode), &(ih->ih_key));
+    inode->i_blksize = reiserfs_default_io_size;
+
+    INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list ));
+    REISERFS_I(inode)->i_flags = 0;
+    REISERFS_I(inode)->i_prealloc_block = 0;
+    REISERFS_I(inode)->i_prealloc_count = 0;
+    REISERFS_I(inode)->i_trans_id = 0;
+    REISERFS_I(inode)->i_jl = NULL;
+    REISERFS_I(inode)->i_acl_access = NULL;
+    REISERFS_I(inode)->i_acl_default = NULL;
+    init_rwsem (&REISERFS_I(inode)->xattr_sem);
+
+    if (stat_data_v1 (ih)) {
+	struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih);
+	unsigned long blocks;
+
+	set_inode_item_key_version (inode, KEY_FORMAT_3_5);
+        set_inode_sd_version (inode, STAT_DATA_V1);
+	inode->i_mode  = sd_v1_mode(sd);
+	inode->i_nlink = sd_v1_nlink(sd);
+	inode->i_uid   = sd_v1_uid(sd);
+	inode->i_gid   = sd_v1_gid(sd);
+	inode->i_size  = sd_v1_size(sd);
+	inode->i_atime.tv_sec = sd_v1_atime(sd);
+	inode->i_mtime.tv_sec = sd_v1_mtime(sd);
+	inode->i_ctime.tv_sec = sd_v1_ctime(sd);
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+
+	inode->i_blocks = sd_v1_blocks(sd);
+	inode->i_generation = le32_to_cpu (INODE_PKEY (inode)->k_dir_id);
+	blocks = (inode->i_size + 511) >> 9;
+	blocks = _ROUND_UP (blocks, inode->i_sb->s_blocksize >> 9);
+	if (inode->i_blocks > blocks) {
+	    // there was a bug in <=3.5.23 when i_blocks could take negative
+	    // values. Starting from 3.5.17 this value could even be stored in
+	    // stat data. For such files we set i_blocks based on file
+	    // size. Just 2 notes: this can be wrong for sparce files. On-disk value will be
+	    // only updated if file's inode will ever change
+	    inode->i_blocks = blocks;
+	}
+
+        rdev = sd_v1_rdev(sd);
+	REISERFS_I(inode)->i_first_direct_byte = sd_v1_first_direct_byte(sd);
+	/* an early bug in the quota code can give us an odd number for the
+	** block count.  This is incorrect, fix it here.
+	*/
+	if (inode->i_blocks & 1) {
+	    inode->i_blocks++ ;
+	}
+	inode_set_bytes(inode, to_real_used_space(inode, inode->i_blocks,
+	                                          SD_V1_SIZE));
+	/* nopack is initially zero for v1 objects. For v2 objects,
+	   nopack is initialised from sd_attrs */
+	REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
+    } else {
+	// new stat data found, but object may have old items
+	// (directories and symlinks)
+	struct stat_data * sd = (struct stat_data *)B_I_PITEM (bh, ih);
+
+	inode->i_mode   = sd_v2_mode(sd);
+	inode->i_nlink  = sd_v2_nlink(sd);
+	inode->i_uid    = sd_v2_uid(sd);
+	inode->i_size   = sd_v2_size(sd);
+	inode->i_gid    = sd_v2_gid(sd);
+	inode->i_mtime.tv_sec  = sd_v2_mtime(sd);
+	inode->i_atime.tv_sec = sd_v2_atime(sd);
+	inode->i_ctime.tv_sec  = sd_v2_ctime(sd);
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_blocks = sd_v2_blocks(sd);
+        rdev            = sd_v2_rdev(sd);
+	if( S_ISCHR( inode -> i_mode ) || S_ISBLK( inode -> i_mode ) )
+	    inode->i_generation = le32_to_cpu (INODE_PKEY (inode)->k_dir_id);
+	else
+            inode->i_generation = sd_v2_generation(sd);
+
+	if (S_ISDIR (inode->i_mode) || S_ISLNK (inode->i_mode))
+	    set_inode_item_key_version (inode, KEY_FORMAT_3_5);
+	else
+	    set_inode_item_key_version (inode, KEY_FORMAT_3_6);
+	REISERFS_I(inode)->i_first_direct_byte = 0;
+	set_inode_sd_version (inode, STAT_DATA_V2);
+	inode_set_bytes(inode, to_real_used_space(inode, inode->i_blocks,
+	                                          SD_V2_SIZE));
+	/* read persistent inode attributes from sd and initalise
+	   generic inode flags from them */
+	REISERFS_I(inode)->i_attrs = sd_v2_attrs( sd );
+	sd_attrs_to_i_attrs( sd_v2_attrs( sd ), inode );
+    }
+
+    pathrelse (path);
+    if (S_ISREG (inode->i_mode)) {
+	inode->i_op = &reiserfs_file_inode_operations;
+	inode->i_fop = &reiserfs_file_operations;
+	inode->i_mapping->a_ops = &reiserfs_address_space_operations ;
+    } else if (S_ISDIR (inode->i_mode)) {
+	inode->i_op = &reiserfs_dir_inode_operations;
+	inode->i_fop = &reiserfs_dir_operations;
+    } else if (S_ISLNK (inode->i_mode)) {
+	inode->i_op = &reiserfs_symlink_inode_operations;
+	inode->i_mapping->a_ops = &reiserfs_address_space_operations;
+    } else {
+	inode->i_blocks = 0;
+	inode->i_op = &reiserfs_special_inode_operations;
+	init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+    }
+}
+
+
+// update new stat data with inode fields
+static void inode2sd (void * sd, struct inode * inode, loff_t size)
+{
+    struct stat_data * sd_v2 = (struct stat_data *)sd;
+    __u16 flags;
+
+    set_sd_v2_mode(sd_v2, inode->i_mode );
+    set_sd_v2_nlink(sd_v2, inode->i_nlink );
+    set_sd_v2_uid(sd_v2, inode->i_uid );
+    set_sd_v2_size(sd_v2, size );
+    set_sd_v2_gid(sd_v2, inode->i_gid );
+    set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec );
+    set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec );
+    set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec );
+    set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
+    if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+	set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
+    else
+	set_sd_v2_generation(sd_v2, inode->i_generation);
+    flags = REISERFS_I(inode)->i_attrs;
+    i_attrs_to_sd_attrs( inode, &flags );
+    set_sd_v2_attrs( sd_v2, flags );
+}
+
+
+// used to copy inode's fields to old stat data
+static void inode2sd_v1 (void * sd, struct inode * inode, loff_t size)
+{
+    struct stat_data_v1 * sd_v1 = (struct stat_data_v1 *)sd;
+
+    set_sd_v1_mode(sd_v1, inode->i_mode );
+    set_sd_v1_uid(sd_v1, inode->i_uid );
+    set_sd_v1_gid(sd_v1, inode->i_gid );
+    set_sd_v1_nlink(sd_v1, inode->i_nlink );
+    set_sd_v1_size(sd_v1, size );
+    set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec );
+    set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec );
+    set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec );
+
+    if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+        set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
+    else
+        set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
+
+    // Sigh. i_first_direct_byte is back
+    set_sd_v1_first_direct_byte(sd_v1, REISERFS_I(inode)->i_first_direct_byte);
+}
+
+
+/* NOTE, you must prepare the buffer head before sending it here,
+** and then log it after the call
+*/
+static void update_stat_data (struct path * path, struct inode * inode,
+                              loff_t size)
+{
+    struct buffer_head * bh;
+    struct item_head * ih;
+  
+    bh = PATH_PLAST_BUFFER (path);
+    ih = PATH_PITEM_HEAD (path);
+
+    if (!is_statdata_le_ih (ih))
+	reiserfs_panic (inode->i_sb, "vs-13065: update_stat_data: key %k, found item %h",
+			INODE_PKEY (inode), ih);
+  
+    if (stat_data_v1 (ih)) {
+	// path points to old stat data
+	inode2sd_v1 (B_I_PITEM (bh, ih), inode, size);
+    } else {
+	inode2sd (B_I_PITEM (bh, ih), inode, size);
+    }
+
+    return;
+}
+
+
+void reiserfs_update_sd_size (struct reiserfs_transaction_handle *th,
+			      struct inode * inode, loff_t size)
+{
+    struct cpu_key key;
+    INITIALIZE_PATH(path);
+    struct buffer_head *bh ;
+    int fs_gen ;
+    struct item_head *ih, tmp_ih ;
+    int retval;
+
+    BUG_ON (!th->t_trans_id);
+
+    make_cpu_key (&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);//key type is unimportant
+    
+    for(;;) {
+	int pos;
+	/* look for the object's stat data */
+	retval = search_item (inode->i_sb, &key, &path);
+	if (retval == IO_ERROR) {
+	    reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: "
+			      "i/o failure occurred trying to update %K stat data",
+			      &key);
+	    return;
+	}
+	if (retval == ITEM_NOT_FOUND) {
+	    pos = PATH_LAST_POSITION (&path);
+	    pathrelse(&path) ;
+	    if (inode->i_nlink == 0) {
+		/*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found");*/
+		return;
+	    }
+	    reiserfs_warning (inode->i_sb, "vs-13060: reiserfs_update_sd: "
+			      "stat data of object %k (nlink == %d) not found (pos %d)",
+			      INODE_PKEY (inode), inode->i_nlink, pos);
+	    reiserfs_check_path(&path) ;
+	    return;
+	}
+	
+	/* sigh, prepare_for_journal might schedule.  When it schedules the
+	** FS might change.  We have to detect that, and loop back to the
+	** search if the stat data item has moved
+	*/
+	bh = get_last_bh(&path) ;
+	ih = get_ih(&path) ;
+	copy_item_head (&tmp_ih, ih);
+	fs_gen = get_generation (inode->i_sb);
+	reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+	if (fs_changed (fs_gen, inode->i_sb) && item_moved(&tmp_ih, &path)) {
+	    reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+	    continue ;	/* Stat_data item has been moved after scheduling. */
+	}
+	break;
+    }
+    update_stat_data (&path, inode, size);
+    journal_mark_dirty(th, th->t_super, bh) ; 
+    pathrelse (&path);
+    return;
+}
+
+/* reiserfs_read_locked_inode is called to read the inode off disk, and it
+** does a make_bad_inode when things go wrong.  But, we need to make sure
+** and clear the key in the private portion of the inode, otherwise a
+** corresponding iput might try to delete whatever object the inode last
+** represented.
+*/
+static void reiserfs_make_bad_inode(struct inode *inode) {
+    memset(INODE_PKEY(inode), 0, KEY_SIZE);
+    make_bad_inode(inode);
+}
+
+//
+// initially this function was derived from minix or ext2's analog and
+// evolved as the prototype did
+//
+
+int reiserfs_init_locked_inode (struct inode * inode, void *p)
+{
+    struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p ;
+    inode->i_ino = args->objectid;
+    INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
+    return 0;
+}
+
+/* looks for stat data in the tree, and fills up the fields of in-core
+   inode stat data fields */
+void reiserfs_read_locked_inode (struct inode * inode, struct reiserfs_iget_args *args)
+{
+    INITIALIZE_PATH (path_to_sd);
+    struct cpu_key key;
+    unsigned long dirino;
+    int retval;
+
+    dirino = args->dirid ;
+
+    /* set version 1, version 2 could be used too, because stat data
+       key is the same in both versions */
+    key.version = KEY_FORMAT_3_5;
+    key.on_disk_key.k_dir_id = dirino;
+    key.on_disk_key.k_objectid = inode->i_ino;
+    key.on_disk_key.u.k_offset_v1.k_offset = SD_OFFSET;
+    key.on_disk_key.u.k_offset_v1.k_uniqueness = SD_UNIQUENESS;
+
+    /* look for the object's stat data */
+    retval = search_item (inode->i_sb, &key, &path_to_sd);
+    if (retval == IO_ERROR) {
+	reiserfs_warning (inode->i_sb, "vs-13070: reiserfs_read_locked_inode: "
+			  "i/o failure occurred trying to find stat data of %K",
+			  &key);
+	reiserfs_make_bad_inode(inode) ;
+	return;
+    }
+    if (retval != ITEM_FOUND) {
+	/* a stale NFS handle can trigger this without it being an error */
+	pathrelse (&path_to_sd);
+	reiserfs_make_bad_inode(inode) ;
+	inode->i_nlink = 0;
+	return;
+    }
+
+    init_inode (inode, &path_to_sd);
+   
+    /* It is possible that knfsd is trying to access inode of a file
+       that is being removed from the disk by some other thread. As we
+       update sd on unlink all that is required is to check for nlink
+       here. This bug was first found by Sizif when debugging
+       SquidNG/Butterfly, forgotten, and found again after Philippe
+       Gramoulle <philippe.gramoulle@mmania.com> reproduced it. 
+
+       More logical fix would require changes in fs/inode.c:iput() to
+       remove inode from hash-table _after_ fs cleaned disk stuff up and
+       in iget() to return NULL if I_FREEING inode is found in
+       hash-table. */
+    /* Currently there is one place where it's ok to meet inode with
+       nlink==0: processing of open-unlinked and half-truncated files
+       during mount (fs/reiserfs/super.c:finish_unfinished()). */
+    if( ( inode -> i_nlink == 0 ) && 
+	! REISERFS_SB(inode -> i_sb) -> s_is_unlinked_ok ) {
+	    reiserfs_warning (inode->i_sb,
+			      "vs-13075: reiserfs_read_locked_inode: "
+			      "dead inode read from disk %K. "
+			      "This is likely to be race with knfsd. Ignore",
+			      &key );
+	    reiserfs_make_bad_inode( inode );
+    }
+
+    reiserfs_check_path(&path_to_sd) ; /* init inode should be relsing */
+
+}
+
+/**
+ * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
+ *
+ * @inode:    inode from hash table to check
+ * @opaque:   "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
+ *
+ * This function is called by iget5_locked() to distinguish reiserfs inodes
+ * having the same inode numbers. Such inodes can only exist due to some
+ * error condition. One of them should be bad. Inodes with identical
+ * inode numbers (objectids) are distinguished by parent directory ids.
+ *
+ */
+int reiserfs_find_actor( struct inode *inode, void *opaque )
+{
+    struct reiserfs_iget_args *args;
+
+    args = opaque;
+    /* args is already in CPU order */
+    return (inode->i_ino == args->objectid) &&
+	(le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
+}
+
+struct inode * reiserfs_iget (struct super_block * s, const struct cpu_key * key)
+{
+    struct inode * inode;
+    struct reiserfs_iget_args args ;
+
+    args.objectid = key->on_disk_key.k_objectid ;
+    args.dirid = key->on_disk_key.k_dir_id ;
+    inode = iget5_locked (s, key->on_disk_key.k_objectid, 
+		   reiserfs_find_actor, reiserfs_init_locked_inode, (void *)(&args));
+    if (!inode) 
+	return ERR_PTR(-ENOMEM) ;
+
+    if (inode->i_state & I_NEW) {
+	reiserfs_read_locked_inode(inode, &args);
+	unlock_new_inode(inode);
+    }
+
+    if (comp_short_keys (INODE_PKEY (inode), key) || is_bad_inode (inode)) {
+	/* either due to i/o error or a stale NFS handle */
+	iput (inode);
+	inode = NULL;
+    }
+    return inode;
+}
+
+struct dentry *reiserfs_get_dentry(struct super_block *sb, void *vobjp)
+{
+    __u32 *data = vobjp;
+    struct cpu_key key ;
+    struct dentry *result;
+    struct inode *inode;
+    
+    key.on_disk_key.k_objectid = data[0] ;
+    key.on_disk_key.k_dir_id = data[1] ;
+    reiserfs_write_lock(sb);
+    inode = reiserfs_iget(sb, &key) ;
+    if (inode && !IS_ERR(inode) && data[2] != 0 &&
+	data[2] != inode->i_generation) {
+	    iput(inode) ;
+	    inode = NULL ;
+    }
+    reiserfs_write_unlock(sb);
+    if (!inode)
+	    inode = ERR_PTR(-ESTALE);
+    if (IS_ERR(inode))
+	    return ERR_PTR(PTR_ERR(inode));
+    result = d_alloc_anon(inode);
+    if (!result) {
+	    iput(inode);
+	    return ERR_PTR(-ENOMEM);
+    }
+    return result;
+}
+
+struct dentry *reiserfs_decode_fh(struct super_block *sb, __u32 *data,
+                                     int len, int fhtype,
+				  int (*acceptable)(void *contect, struct dentry *de),
+				  void *context) {
+    __u32 obj[3], parent[3];
+
+    /* fhtype happens to reflect the number of u32s encoded.
+     * due to a bug in earlier code, fhtype might indicate there
+     * are more u32s then actually fitted.
+     * so if fhtype seems to be more than len, reduce fhtype.
+     * Valid types are:
+     *   2 - objectid + dir_id - legacy support
+     *   3 - objectid + dir_id + generation
+     *   4 - objectid + dir_id + objectid and dirid of parent - legacy
+     *   5 - objectid + dir_id + generation + objectid and dirid of parent
+     *   6 - as above plus generation of directory
+     * 6 does not fit in NFSv2 handles
+     */
+    if (fhtype > len) {
+	    if (fhtype != 6 || len != 5)
+		    reiserfs_warning (sb, "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+			   fhtype, len);
+	    fhtype = 5;
+    }
+
+    obj[0] = data[0];
+    obj[1] = data[1];
+    if (fhtype == 3 || fhtype >= 5)
+	    obj[2] = data[2];
+    else    obj[2] = 0; /* generation number */
+
+    if (fhtype >= 4) {
+	    parent[0] = data[fhtype>=5?3:2] ;
+	    parent[1] = data[fhtype>=5?4:3] ;
+	    if (fhtype == 6)
+		    parent[2] = data[5];
+	    else    parent[2] = 0;
+    }
+    return sb->s_export_op->find_exported_dentry(sb, obj, fhtype < 4 ? NULL : parent,
+			       acceptable, context);
+}
+
+int reiserfs_encode_fh(struct dentry *dentry, __u32 *data, int *lenp, int need_parent) {
+    struct inode *inode = dentry->d_inode ;
+    int maxlen = *lenp;
+    
+    if (maxlen < 3)
+        return 255 ;
+
+    data[0] = inode->i_ino ;
+    data[1] = le32_to_cpu(INODE_PKEY (inode)->k_dir_id) ;
+    data[2] = inode->i_generation ;
+    *lenp = 3 ;
+    /* no room for directory info? return what we've stored so far */
+    if (maxlen < 5 || ! need_parent)
+        return 3 ;
+
+    spin_lock(&dentry->d_lock);
+    inode = dentry->d_parent->d_inode ;
+    data[3] = inode->i_ino ;
+    data[4] = le32_to_cpu(INODE_PKEY (inode)->k_dir_id) ;
+    *lenp = 5 ;
+    if (maxlen >= 6) {
+	    data[5] = inode->i_generation ;
+	    *lenp = 6 ;
+    }
+    spin_unlock(&dentry->d_lock);
+    return *lenp ;
+}
+
+
+/* looks for stat data, then copies fields to it, marks the buffer
+   containing stat data as dirty */
+/* reiserfs inodes are never really dirty, since the dirty inode call
+** always logs them.  This call allows the VFS inode marking routines
+** to properly mark inodes for datasync and such, but only actually
+** does something when called for a synchronous update.
+*/
+int reiserfs_write_inode (struct inode * inode, int do_sync) {
+    struct reiserfs_transaction_handle th ;
+    int jbegin_count = 1 ;
+
+    if (inode->i_sb->s_flags & MS_RDONLY)
+        return -EROFS;
+    /* memory pressure can sometimes initiate write_inode calls with sync == 1,
+    ** these cases are just when the system needs ram, not when the 
+    ** inode needs to reach disk for safety, and they can safely be
+    ** ignored because the altered inode has already been logged.
+    */
+    if (do_sync && !(current->flags & PF_MEMALLOC)) {
+	reiserfs_write_lock(inode->i_sb);
+	if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
+            reiserfs_update_sd (&th, inode);
+            journal_end_sync(&th, inode->i_sb, jbegin_count) ;
+        }
+	reiserfs_write_unlock(inode->i_sb);
+    }
+    return 0;
+}
+
+/* stat data of new object is inserted already, this inserts the item
+   containing "." and ".." entries */
+static int reiserfs_new_directory (struct reiserfs_transaction_handle *th, 
+				   struct inode *inode,
+				   struct item_head * ih, struct path * path,
+				   struct inode * dir)
+{
+    struct super_block * sb = th->t_super;
+    char empty_dir [EMPTY_DIR_SIZE];
+    char * body = empty_dir;
+    struct cpu_key key;
+    int retval;
+
+    BUG_ON (!th->t_trans_id);
+    
+    _make_cpu_key (&key, KEY_FORMAT_3_5, le32_to_cpu (ih->ih_key.k_dir_id),
+		   le32_to_cpu (ih->ih_key.k_objectid), DOT_OFFSET, TYPE_DIRENTRY, 3/*key length*/);
+    
+    /* compose item head for new item. Directories consist of items of
+       old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
+       is done by reiserfs_new_inode */
+    if (old_format_only (sb)) {
+	make_le_item_head (ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
+	
+	make_empty_dir_item_v1 (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
+				INODE_PKEY (dir)->k_dir_id, 
+				INODE_PKEY (dir)->k_objectid );
+    } else {
+	make_le_item_head (ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
+	
+	make_empty_dir_item (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
+		   		INODE_PKEY (dir)->k_dir_id, 
+		   		INODE_PKEY (dir)->k_objectid );
+    }
+    
+    /* look for place in the tree for new item */
+    retval = search_item (sb, &key, path);
+    if (retval == IO_ERROR) {
+	reiserfs_warning (sb, "vs-13080: reiserfs_new_directory: "
+			  "i/o failure occurred creating new directory");
+	return -EIO;
+    }
+    if (retval == ITEM_FOUND) {
+	pathrelse (path);
+	reiserfs_warning (sb, "vs-13070: reiserfs_new_directory: "
+			  "object with this key exists (%k)", &(ih->ih_key));
+	return -EEXIST;
+    }
+
+    /* insert item, that is empty directory item */
+    return reiserfs_insert_item (th, path, &key, ih, inode, body);
+}
+
+
+/* stat data of object has been inserted, this inserts the item
+   containing the body of symlink */
+static int reiserfs_new_symlink (struct reiserfs_transaction_handle *th, 
+				 struct inode *inode,	/* Inode of symlink */
+				 struct item_head * ih,
+				 struct path * path, const char * symname, int item_len)
+{
+    struct super_block * sb = th->t_super;
+    struct cpu_key key;
+    int retval;
+
+    BUG_ON (!th->t_trans_id);
+
+    _make_cpu_key (&key, KEY_FORMAT_3_5, 
+		   le32_to_cpu (ih->ih_key.k_dir_id), 
+		   le32_to_cpu (ih->ih_key.k_objectid),
+		   1, TYPE_DIRECT, 3/*key length*/);
+
+    make_le_item_head (ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len, 0/*free_space*/);
+
+    /* look for place in the tree for new item */
+    retval = search_item (sb, &key, path);
+    if (retval == IO_ERROR) {
+	reiserfs_warning (sb, "vs-13080: reiserfs_new_symlinik: "
+			  "i/o failure occurred creating new symlink");
+	return -EIO;
+    }
+    if (retval == ITEM_FOUND) {
+	pathrelse (path);
+	reiserfs_warning (sb, "vs-13080: reiserfs_new_symlink: "
+			  "object with this key exists (%k)", &(ih->ih_key));
+	return -EEXIST;
+    }
+
+    /* insert item, that is body of symlink */
+    return reiserfs_insert_item (th, path, &key, ih, inode, symname);
+}
+
+
+/* inserts the stat data into the tree, and then calls
+   reiserfs_new_directory (to insert ".", ".." item if new object is
+   directory) or reiserfs_new_symlink (to insert symlink body if new
+   object is symlink) or nothing (if new object is regular file) 
+
+   NOTE! uid and gid must already be set in the inode.  If we return
+   non-zero due to an error, we have to drop the quota previously allocated
+   for the fresh inode.  This can only be done outside a transaction, so
+   if we return non-zero, we also end the transaction.  */
+int reiserfs_new_inode (struct reiserfs_transaction_handle *th,
+			struct inode * dir, int mode, 
+			const char * symname, 
+                        /* 0 for regular, EMTRY_DIR_SIZE for dirs, 
+			   strlen (symname) for symlinks)*/
+		         loff_t i_size, struct dentry *dentry, 
+			 struct inode *inode)
+{
+    struct super_block * sb;
+    INITIALIZE_PATH (path_to_key);
+    struct cpu_key key;
+    struct item_head ih;
+    struct stat_data sd;
+    int retval;
+    int err;
+
+    BUG_ON (!th->t_trans_id);
+  
+    if (DQUOT_ALLOC_INODE(inode)) {
+	err = -EDQUOT;
+	goto out_end_trans;
+    }
+    if (!dir || !dir->i_nlink) {
+	err = -EPERM;
+	goto out_bad_inode;
+    }
+
+    sb = dir->i_sb;
+
+    /* item head of new item */
+    ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
+    ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th));
+    if (!ih.ih_key.k_objectid) {
+	err = -ENOMEM;
+	goto out_bad_inode ;
+    }
+    if (old_format_only (sb))
+	/* not a perfect generation count, as object ids can be reused, but 
+	** this is as good as reiserfs can do right now.
+	** note that the private part of inode isn't filled in yet, we have
+	** to use the directory.
+	*/
+	inode->i_generation = le32_to_cpu (INODE_PKEY (dir)->k_objectid);
+    else
+#if defined( USE_INODE_GENERATION_COUNTER )
+	inode->i_generation = le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
+#else
+	inode->i_generation = ++event;
+#endif
+
+    /* fill stat data */
+    inode->i_nlink = (S_ISDIR (mode) ? 2 : 1);
+
+    /* uid and gid must already be set by the caller for quota init */
+
+    /* symlink cannot be immutable or append only, right? */
+    if( S_ISLNK( inode -> i_mode ) )
+	    inode -> i_flags &= ~ ( S_IMMUTABLE | S_APPEND );
+
+    inode->i_mtime = inode->i_atime = inode->i_ctime =
+	    CURRENT_TIME_SEC;
+    inode->i_size = i_size;
+    inode->i_blocks = 0;
+    inode->i_bytes = 0;
+    REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 : 
+      U32_MAX/*NO_BYTES_IN_DIRECT_ITEM*/;
+
+    INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list ));
+    REISERFS_I(inode)->i_flags = 0;
+    REISERFS_I(inode)->i_prealloc_block = 0;
+    REISERFS_I(inode)->i_prealloc_count = 0;
+    REISERFS_I(inode)->i_trans_id = 0;
+    REISERFS_I(inode)->i_jl = NULL;
+    REISERFS_I(inode)->i_attrs =
+	REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
+    sd_attrs_to_i_attrs( REISERFS_I(inode) -> i_attrs, inode );
+    REISERFS_I(inode)->i_acl_access = NULL;
+    REISERFS_I(inode)->i_acl_default = NULL;
+    init_rwsem (&REISERFS_I(inode)->xattr_sem);
+
+    if (old_format_only (sb))
+	make_le_item_head (&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
+    else
+	make_le_item_head (&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
+
+    /* key to search for correct place for new stat data */
+    _make_cpu_key (&key, KEY_FORMAT_3_6, le32_to_cpu (ih.ih_key.k_dir_id),
+		   le32_to_cpu (ih.ih_key.k_objectid), SD_OFFSET, TYPE_STAT_DATA, 3/*key length*/);
+
+    /* find proper place for inserting of stat data */
+    retval = search_item (sb, &key, &path_to_key);
+    if (retval == IO_ERROR) {
+	err = -EIO;
+	goto out_bad_inode;
+    }
+    if (retval == ITEM_FOUND) {
+	pathrelse (&path_to_key);
+	err = -EEXIST;
+	goto out_bad_inode;
+    }
+    if (old_format_only (sb)) {
+	if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
+	    pathrelse (&path_to_key);
+	    /* i_uid or i_gid is too big to be stored in stat data v3.5 */
+	    err = -EINVAL;
+	    goto out_bad_inode;
+	}
+	inode2sd_v1 (&sd, inode, inode->i_size);
+    } else {
+	inode2sd (&sd, inode, inode->i_size);
+    }
+    // these do not go to on-disk stat data
+    inode->i_ino = le32_to_cpu (ih.ih_key.k_objectid);
+    inode->i_blksize = reiserfs_default_io_size;
+  
+    // store in in-core inode the key of stat data and version all
+    // object items will have (directory items will have old offset
+    // format, other new objects will consist of new items)
+    memcpy (INODE_PKEY (inode), &(ih.ih_key), KEY_SIZE);
+    if (old_format_only (sb) || S_ISDIR(mode) || S_ISLNK(mode))
+        set_inode_item_key_version (inode, KEY_FORMAT_3_5);
+    else
+        set_inode_item_key_version (inode, KEY_FORMAT_3_6);
+    if (old_format_only (sb))
+	set_inode_sd_version (inode, STAT_DATA_V1);
+    else
+	set_inode_sd_version (inode, STAT_DATA_V2);
+    
+    /* insert the stat data into the tree */
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    if (REISERFS_I(dir)->new_packing_locality)
+	th->displace_new_blocks = 1;
+#endif
+    retval = reiserfs_insert_item (th, &path_to_key, &key, &ih, inode, (char *)(&sd));
+    if (retval) {
+	err = retval;
+	reiserfs_check_path(&path_to_key) ;
+	goto out_bad_inode;
+    }
+
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    if (!th->displace_new_blocks)
+	REISERFS_I(dir)->new_packing_locality = 0;
+#endif
+    if (S_ISDIR(mode)) {
+	/* insert item with "." and ".." */
+	retval = reiserfs_new_directory (th, inode, &ih, &path_to_key, dir);
+    }
+
+    if (S_ISLNK(mode)) {
+	/* insert body of symlink */
+	if (!old_format_only (sb))
+	    i_size = ROUND_UP(i_size);
+	retval = reiserfs_new_symlink (th, inode, &ih, &path_to_key, symname, i_size);
+    }
+    if (retval) {
+	err = retval;
+	reiserfs_check_path(&path_to_key) ;
+	journal_end(th, th->t_super, th->t_blocks_allocated);
+	goto out_inserted_sd;
+    }
+
+    /* XXX CHECK THIS */
+    if (reiserfs_posixacl (inode->i_sb)) {
+        retval = reiserfs_inherit_default_acl (dir, dentry, inode);
+        if (retval) {
+            err = retval;
+            reiserfs_check_path(&path_to_key) ;
+            journal_end(th, th->t_super, th->t_blocks_allocated);
+            goto out_inserted_sd;
+        }
+    } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+	reiserfs_warning (inode->i_sb, "ACLs aren't enabled in the fs, "
+			  "but vfs thinks they are!");
+    } else if (is_reiserfs_priv_object (dir)) {
+	reiserfs_mark_inode_private (inode);
+    }
+
+    insert_inode_hash (inode);
+    reiserfs_update_sd(th, inode);
+    reiserfs_check_path(&path_to_key) ;
+
+    return 0;
+
+/* it looks like you can easily compress these two goto targets into
+ * one.  Keeping it like this doesn't actually hurt anything, and they
+ * are place holders for what the quota code actually needs.
+ */
+out_bad_inode:
+    /* Invalidate the object, nothing was inserted yet */
+    INODE_PKEY(inode)->k_objectid = 0;
+
+    /* Quota change must be inside a transaction for journaling */
+    DQUOT_FREE_INODE(inode);
+
+out_end_trans:
+    journal_end(th, th->t_super, th->t_blocks_allocated) ;
+    /* Drop can be outside and it needs more credits so it's better to have it outside */
+    DQUOT_DROP(inode);
+    inode->i_flags |= S_NOQUOTA;
+    make_bad_inode(inode);
+
+out_inserted_sd:
+    inode->i_nlink = 0;
+    th->t_trans_id = 0; /* so the caller can't use this handle later */
+    iput(inode);
+    return err;
+}
+
+/*
+** finds the tail page in the page cache,
+** reads the last block in.
+**
+** On success, page_result is set to a locked, pinned page, and bh_result
+** is set to an up to date buffer for the last block in the file.  returns 0.
+**
+** tail conversion is not done, so bh_result might not be valid for writing
+** check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
+** trying to write the block.
+**
+** on failure, nonzero is returned, page_result and bh_result are untouched.
+*/
+static int grab_tail_page(struct inode *p_s_inode, 
+			  struct page **page_result, 
+			  struct buffer_head **bh_result) {
+
+    /* we want the page with the last byte in the file,
+    ** not the page that will hold the next byte for appending
+    */
+    unsigned long index = (p_s_inode->i_size-1) >> PAGE_CACHE_SHIFT ;
+    unsigned long pos = 0 ;
+    unsigned long start = 0 ;
+    unsigned long blocksize = p_s_inode->i_sb->s_blocksize ;
+    unsigned long offset = (p_s_inode->i_size) & (PAGE_CACHE_SIZE - 1) ;
+    struct buffer_head *bh ;
+    struct buffer_head *head ;
+    struct page * page ;
+    int error ;
+    
+    /* we know that we are only called with inode->i_size > 0.
+    ** we also know that a file tail can never be as big as a block
+    ** If i_size % blocksize == 0, our file is currently block aligned
+    ** and it won't need converting or zeroing after a truncate.
+    */
+    if ((offset & (blocksize - 1)) == 0) {
+        return -ENOENT ;
+    }
+    page = grab_cache_page(p_s_inode->i_mapping, index) ;
+    error = -ENOMEM ;
+    if (!page) {
+        goto out ;
+    }
+    /* start within the page of the last block in the file */
+    start = (offset / blocksize) * blocksize ;
+
+    error = block_prepare_write(page, start, offset, 
+				reiserfs_get_block_create_0) ;
+    if (error)
+	goto unlock ;
+
+    head = page_buffers(page) ;      
+    bh = head;
+    do {
+	if (pos >= start) {
+	    break ;
+	}
+	bh = bh->b_this_page ;
+	pos += blocksize ;
+    } while(bh != head) ;
+
+    if (!buffer_uptodate(bh)) {
+	/* note, this should never happen, prepare_write should
+	** be taking care of this for us.  If the buffer isn't up to date,
+	** I've screwed up the code to find the buffer, or the code to
+	** call prepare_write
+	*/
+	reiserfs_warning (p_s_inode->i_sb,
+			  "clm-6000: error reading block %lu on dev %s",
+			  bh->b_blocknr,
+			  reiserfs_bdevname (p_s_inode->i_sb)) ;
+	error = -EIO ;
+	goto unlock ;
+    }
+    *bh_result = bh ;
+    *page_result = page ;
+
+out:
+    return error ;
+
+unlock:
+    unlock_page(page) ;
+    page_cache_release(page) ;
+    return error ;
+}
+
+/*
+** vfs version of truncate file.  Must NOT be called with
+** a transaction already started.
+**
+** some code taken from block_truncate_page
+*/
+int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
+    struct reiserfs_transaction_handle th ;
+    /* we want the offset for the first byte after the end of the file */
+    unsigned long offset = p_s_inode->i_size & (PAGE_CACHE_SIZE - 1) ;
+    unsigned blocksize = p_s_inode->i_sb->s_blocksize ;
+    unsigned length ;
+    struct page *page = NULL ;
+    int error ;
+    struct buffer_head *bh = NULL ;
+
+    reiserfs_write_lock(p_s_inode->i_sb);
+
+    if (p_s_inode->i_size > 0) {
+        if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
+	    // -ENOENT means we truncated past the end of the file, 
+	    // and get_block_create_0 could not find a block to read in,
+	    // which is ok.
+	    if (error != -ENOENT)
+	        reiserfs_warning (p_s_inode->i_sb,
+				  "clm-6001: grab_tail_page failed %d",
+				  error);
+	    page = NULL ;
+	    bh = NULL ;
+	}
+    }
+
+    /* so, if page != NULL, we have a buffer head for the offset at 
+    ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0, 
+    ** then we have an unformatted node.  Otherwise, we have a direct item, 
+    ** and no zeroing is required on disk.  We zero after the truncate, 
+    ** because the truncate might pack the item anyway 
+    ** (it will unmap bh if it packs).
+    */
+    /* it is enough to reserve space in transaction for 2 balancings:
+       one for "save" link adding and another for the first
+       cut_from_item. 1 is for update_sd */
+    error = journal_begin (&th, p_s_inode->i_sb,
+                           JOURNAL_PER_BALANCE_CNT * 2 + 1);
+    if (error)
+        goto out;
+    reiserfs_update_inode_transaction(p_s_inode) ;
+    if (update_timestamps)
+	    /* we are doing real truncate: if the system crashes before the last
+	       transaction of truncating gets committed - on reboot the file
+	       either appears truncated properly or not truncated at all */
+	add_save_link (&th, p_s_inode, 1);
+    error = reiserfs_do_truncate (&th, p_s_inode, page, update_timestamps) ;
+    if (error)
+        goto out;
+    error = journal_end (&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);
+    if (error)
+        goto out;
+
+    if (update_timestamps) {
+	error = remove_save_link (p_s_inode, 1/* truncate */);
+        if (error)
+            goto out;
+    }
+
+    if (page) {
+        length = offset & (blocksize - 1) ;
+	/* if we are not on a block boundary */
+	if (length) {
+	    char *kaddr;
+
+	    length = blocksize - length ;
+	    kaddr = kmap_atomic(page, KM_USER0) ;
+	    memset(kaddr + offset, 0, length) ;   
+	    flush_dcache_page(page) ;
+	    kunmap_atomic(kaddr, KM_USER0) ;
+	    if (buffer_mapped(bh) && bh->b_blocknr != 0) {
+	        mark_buffer_dirty(bh) ;
+	    }
+	}
+	unlock_page(page) ;
+	page_cache_release(page) ;
+    }
+
+    reiserfs_write_unlock(p_s_inode->i_sb);
+    return 0;
+out:
+    if (page) {
+        unlock_page (page);
+        page_cache_release (page);
+    }
+    reiserfs_write_unlock(p_s_inode->i_sb);
+    return error;
+}
+
+static int map_block_for_writepage(struct inode *inode, 
+			       struct buffer_head *bh_result, 
+                               unsigned long block) {
+    struct reiserfs_transaction_handle th ;
+    int fs_gen ;
+    struct item_head tmp_ih ;
+    struct item_head *ih ;
+    struct buffer_head *bh ;
+    __u32 *item ;
+    struct cpu_key key ;
+    INITIALIZE_PATH(path) ;
+    int pos_in_item ;
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT ;
+    loff_t byte_offset = (block << inode->i_sb->s_blocksize_bits) + 1 ;
+    int retval ;
+    int use_get_block = 0 ;
+    int bytes_copied = 0 ;
+    int copy_size ;
+    int trans_running = 0;
+
+    /* catch places below that try to log something without starting a trans */
+    th.t_trans_id = 0;
+
+    if (!buffer_uptodate(bh_result)) {
+	return -EIO;
+    }
+
+    kmap(bh_result->b_page) ;
+start_over:
+    reiserfs_write_lock(inode->i_sb);
+    make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3) ;
+
+research:
+    retval = search_for_position_by_key(inode->i_sb, &key, &path) ;
+    if (retval != POSITION_FOUND) {
+        use_get_block = 1;
+	goto out ;
+    } 
+
+    bh = get_last_bh(&path) ;
+    ih = get_ih(&path) ;
+    item = get_item(&path) ;
+    pos_in_item = path.pos_in_item ;
+
+    /* we've found an unformatted node */
+    if (indirect_item_found(retval, ih)) {
+	if (bytes_copied > 0) {
+	    reiserfs_warning (inode->i_sb, "clm-6002: bytes_copied %d",
+			      bytes_copied) ;
+	}
+        if (!get_block_num(item, pos_in_item)) {
+	    /* crap, we are writing to a hole */
+	    use_get_block = 1;
+	    goto out ;
+	}
+	set_block_dev_mapped(bh_result, get_block_num(item,pos_in_item),inode);
+    } else if (is_direct_le_ih(ih)) {
+        char *p ; 
+        p = page_address(bh_result->b_page) ;
+        p += (byte_offset -1) & (PAGE_CACHE_SIZE - 1) ;
+        copy_size = ih_item_len(ih) - pos_in_item;
+
+	fs_gen = get_generation(inode->i_sb) ;
+	copy_item_head(&tmp_ih, ih) ;
+
+	if (!trans_running) {
+	    /* vs-3050 is gone, no need to drop the path */
+	    retval = journal_begin(&th, inode->i_sb, jbegin_count) ;
+            if (retval)
+                goto out;
+	    reiserfs_update_inode_transaction(inode) ;
+	    trans_running = 1;
+	    if (fs_changed(fs_gen, inode->i_sb) && item_moved(&tmp_ih, &path)) {
+		reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+		goto research;
+	    }
+	}
+
+	reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+
+	if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+	    reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+	    goto research;
+	}
+
+	memcpy( B_I_PITEM(bh, ih) + pos_in_item, p + bytes_copied, copy_size) ;
+
+	journal_mark_dirty(&th, inode->i_sb, bh) ;
+	bytes_copied += copy_size ;
+	set_block_dev_mapped(bh_result, 0, inode);
+
+	/* are there still bytes left? */
+        if (bytes_copied < bh_result->b_size && 
+	    (byte_offset + bytes_copied) < inode->i_size) {
+	    set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + copy_size) ;
+	    goto research ;
+	}
+    } else {
+        reiserfs_warning (inode->i_sb,
+			  "clm-6003: bad item inode %lu, device %s",
+			  inode->i_ino, reiserfs_bdevname (inode->i_sb)) ;
+        retval = -EIO ;
+	goto out ;
+    }
+    retval = 0 ;
+    
+out:
+    pathrelse(&path) ;
+    if (trans_running) {
+        int err = journal_end(&th, inode->i_sb, jbegin_count) ;
+        if (err)
+            retval = err;
+	trans_running = 0;
+    }
+    reiserfs_write_unlock(inode->i_sb);
+
+    /* this is where we fill in holes in the file. */
+    if (use_get_block) {
+	retval = reiserfs_get_block(inode, block, bh_result, 
+	                            GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM |
+				    GET_BLOCK_NO_DANGLE);
+	if (!retval) {
+	    if (!buffer_mapped(bh_result) || bh_result->b_blocknr == 0) {
+	        /* get_block failed to find a mapped unformatted node. */
+		use_get_block = 0 ;
+		goto start_over ;
+	    }
+	}
+    }
+    kunmap(bh_result->b_page) ;
+
+    if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
+	/* we've copied data from the page into the direct item, so the
+	 * buffer in the page is now clean, mark it to reflect that.
+	 */
+        lock_buffer(bh_result);
+	clear_buffer_dirty(bh_result);
+	unlock_buffer(bh_result);
+    }
+    return retval ;
+}
+
+/* 
+ * mason@suse.com: updated in 2.5.54 to follow the same general io 
+ * start/recovery path as __block_write_full_page, along with special
+ * code to handle reiserfs tails.
+ */
+static int reiserfs_write_full_page(struct page *page, struct writeback_control *wbc) {
+    struct inode *inode = page->mapping->host ;
+    unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT ;
+    int error = 0;
+    unsigned long block ;
+    struct buffer_head *head, *bh;
+    int partial = 0 ;
+    int nr = 0;
+    int checked = PageChecked(page);
+    struct reiserfs_transaction_handle th;
+    struct super_block *s = inode->i_sb;
+    int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+    th.t_trans_id = 0;
+
+    /* The page dirty bit is cleared before writepage is called, which
+     * means we have to tell create_empty_buffers to make dirty buffers
+     * The page really should be up to date at this point, so tossing
+     * in the BH_Uptodate is just a sanity check.
+     */
+    if (!page_has_buffers(page)) {
+	create_empty_buffers(page, s->s_blocksize,
+	                    (1 << BH_Dirty) | (1 << BH_Uptodate));
+    }
+    head = page_buffers(page) ;
+
+    /* last page in the file, zero out any contents past the
+    ** last byte in the file
+    */
+    if (page->index >= end_index) {
+	char *kaddr;
+	unsigned last_offset;
+
+        last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ;
+	/* no file contents in this page */
+	if (page->index >= end_index + 1 || !last_offset) {
+    	    unlock_page(page);
+	    return 0;
+	}
+	kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE-last_offset) ;
+	flush_dcache_page(page) ;
+	kunmap_atomic(kaddr, KM_USER0) ;
+    }
+    bh = head ;
+    block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits) ;
+    /* first map all the buffers, logging any direct items we find */
+    do {
+	if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) ||
+	   (buffer_mapped(bh) && bh->b_blocknr == 0))) {
+	    /* not mapped yet, or it points to a direct item, search
+	     * the btree for the mapping info, and log any direct
+	     * items found
+	     */
+	    if ((error = map_block_for_writepage(inode, bh, block))) {
+		goto fail ;
+	    }
+	}
+        bh = bh->b_this_page;
+	block++;
+    } while(bh != head) ;
+
+    /*
+     * we start the transaction after map_block_for_writepage,
+     * because it can create holes in the file (an unbounded operation).
+     * starting it here, we can make a reliable estimate for how many
+     * blocks we're going to log
+     */
+    if (checked) {
+	ClearPageChecked(page);
+	reiserfs_write_lock(s);
+	error = journal_begin(&th, s, bh_per_page + 1);
+	if (error) {
+	    reiserfs_write_unlock(s);
+	    goto fail;
+	}
+	reiserfs_update_inode_transaction(inode);
+    }
+    /* now go through and lock any dirty buffers on the page */
+    do {
+	get_bh(bh);
+	if (!buffer_mapped(bh))
+	    continue;
+	if (buffer_mapped(bh) && bh->b_blocknr == 0)
+	    continue;
+
+	if (checked) {
+	    reiserfs_prepare_for_journal(s, bh, 1);
+	    journal_mark_dirty(&th, s, bh);
+	    continue;
+	}
+	/* from this point on, we know the buffer is mapped to a
+	 * real block and not a direct item
+	 */
+	if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+	    lock_buffer(bh);
+	} else {
+	    if (test_set_buffer_locked(bh)) {
+		redirty_page_for_writepage(wbc, page);
+		continue;
+	    }
+	}
+	if (test_clear_buffer_dirty(bh)) {
+	    mark_buffer_async_write(bh);
+	} else {
+	    unlock_buffer(bh);
+	}
+    } while((bh = bh->b_this_page) != head);
+
+    if (checked) {
+	error = journal_end(&th, s, bh_per_page + 1);
+	reiserfs_write_unlock(s);
+	if (error)
+	    goto fail;
+    }
+    BUG_ON(PageWriteback(page));
+    set_page_writeback(page);
+    unlock_page(page);
+
+    /*
+     * since any buffer might be the only dirty buffer on the page, 
+     * the first submit_bh can bring the page out of writeback.
+     * be careful with the buffers.
+     */
+    do {
+        struct buffer_head *next = bh->b_this_page;
+	if (buffer_async_write(bh)) {
+	    submit_bh(WRITE, bh);
+	    nr++;
+	}
+	put_bh(bh);
+	bh = next;
+    } while(bh != head);
+
+    error = 0;
+done:
+    if (nr == 0) {
+        /*
+         * if this page only had a direct item, it is very possible for
+         * no io to be required without there being an error.  Or, 
+	 * someone else could have locked them and sent them down the 
+	 * pipe without locking the page
+	 */
+	bh = head ;
+	do {
+	    if (!buffer_uptodate(bh)) {
+	        partial = 1;
+		break;
+	    }
+	    bh = bh->b_this_page;
+	} while(bh != head);
+	if (!partial)
+	    SetPageUptodate(page);
+	end_page_writeback(page);
+    }
+    return error;
+
+fail:
+    /* catches various errors, we need to make sure any valid dirty blocks
+     * get to the media.  The page is currently locked and not marked for 
+     * writeback
+     */
+    ClearPageUptodate(page);
+    bh = head;
+    do {
+	get_bh(bh);
+	if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
+	    lock_buffer(bh);
+	    mark_buffer_async_write(bh);
+	} else {
+	    /*
+	     * clear any dirty bits that might have come from getting
+	     * attached to a dirty page
+	     */
+	     clear_buffer_dirty(bh);
+	}
+        bh = bh->b_this_page;
+    } while(bh != head);
+    SetPageError(page);
+    BUG_ON(PageWriteback(page));
+    set_page_writeback(page);
+    unlock_page(page);
+    do {
+        struct buffer_head *next = bh->b_this_page;
+	if (buffer_async_write(bh)) {
+	    clear_buffer_dirty(bh);
+	    submit_bh(WRITE, bh);
+	    nr++;
+	}
+	put_bh(bh);
+	bh = next;
+    } while(bh != head);
+    goto done;
+}
+
+
+static int reiserfs_readpage (struct file *f, struct page * page)
+{
+    return block_read_full_page (page, reiserfs_get_block);
+}
+
+
+static int reiserfs_writepage (struct page * page, struct writeback_control *wbc)
+{
+    struct inode *inode = page->mapping->host ;
+    reiserfs_wait_on_write_block(inode->i_sb) ;
+    return reiserfs_write_full_page(page, wbc) ;
+}
+
+static int reiserfs_prepare_write(struct file *f, struct page *page,
+			   unsigned from, unsigned to) {
+    struct inode *inode = page->mapping->host ;
+    int ret;
+    int old_ref = 0;
+
+    reiserfs_wait_on_write_block(inode->i_sb) ;
+    fix_tail_page_for_writing(page) ;
+    if (reiserfs_transaction_running(inode->i_sb)) {
+	struct reiserfs_transaction_handle *th;
+	th = (struct reiserfs_transaction_handle *)current->journal_info;
+        BUG_ON (!th->t_refcount);
+        BUG_ON (!th->t_trans_id);
+	old_ref = th->t_refcount;
+	th->t_refcount++;
+    }
+
+    ret = block_prepare_write(page, from, to, reiserfs_get_block) ;
+    if (ret && reiserfs_transaction_running(inode->i_sb)) {
+    	struct reiserfs_transaction_handle *th = current->journal_info;
+	/* this gets a little ugly.  If reiserfs_get_block returned an
+	 * error and left a transacstion running, we've got to close it,
+	 * and we've got to free handle if it was a persistent transaction.
+	 *
+	 * But, if we had nested into an existing transaction, we need
+	 * to just drop the ref count on the handle.
+	 *
+	 * If old_ref == 0, the transaction is from reiserfs_get_block,
+	 * and it was a persistent trans.  Otherwise, it was nested above.
+	 */
+	if (th->t_refcount > old_ref) {
+	    if (old_ref)
+	    	th->t_refcount--;
+	    else {
+                int err;
+		reiserfs_write_lock(inode->i_sb);
+		err = reiserfs_end_persistent_transaction(th);
+		reiserfs_write_unlock(inode->i_sb);
+                if (err)
+                    ret = err;
+	    }
+	}
+    }
+    return ret;
+
+}
+
+
+static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block) {
+  return generic_block_bmap(as, block, reiserfs_bmap) ;
+}
+
+static int reiserfs_commit_write(struct file *f, struct page *page, 
+                                 unsigned from, unsigned to) {
+    struct inode *inode = page->mapping->host ;
+    loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+    int ret = 0;
+    int update_sd = 0;
+    struct reiserfs_transaction_handle *th = NULL;
+    
+    reiserfs_wait_on_write_block(inode->i_sb) ;
+    if (reiserfs_transaction_running(inode->i_sb)) {
+        th = current->journal_info;
+    }
+    reiserfs_commit_page(inode, page, from, to);
+ 
+    /* generic_commit_write does this for us, but does not update the
+    ** transaction tracking stuff when the size changes.  So, we have
+    ** to do the i_size updates here.
+    */
+    if (pos > inode->i_size) {
+	struct reiserfs_transaction_handle myth ;
+	reiserfs_write_lock(inode->i_sb);
+	/* If the file have grown beyond the border where it
+	   can have a tail, unmark it as needing a tail
+	   packing */
+	if ( (have_large_tails (inode->i_sb) && inode->i_size > i_block_size (inode)*4) ||
+	     (have_small_tails (inode->i_sb) && inode->i_size > i_block_size(inode)) )
+	    REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask ;
+
+	ret = journal_begin(&myth, inode->i_sb, 1) ;
+	if (ret) {
+	    reiserfs_write_unlock(inode->i_sb);
+	    goto journal_error;
+	}
+	reiserfs_update_inode_transaction(inode) ;
+	inode->i_size = pos ;
+	reiserfs_update_sd(&myth, inode) ;
+	update_sd = 1;
+	ret = journal_end(&myth, inode->i_sb, 1) ;
+	reiserfs_write_unlock(inode->i_sb);
+	if (ret)
+	    goto journal_error;
+    }
+    if (th) {
+	reiserfs_write_lock(inode->i_sb);
+	if (!update_sd)
+	    reiserfs_update_sd(th, inode) ;
+	ret = reiserfs_end_persistent_transaction(th);
+	reiserfs_write_unlock(inode->i_sb);
+	if (ret)
+	    goto out;
+    }
+ 
+    /* we test for O_SYNC here so we can commit the transaction
+    ** for any packed tails the file might have had
+    */
+    if (f && (f->f_flags & O_SYNC)) {
+	reiserfs_write_lock(inode->i_sb);
+ 	ret = reiserfs_commit_for_inode(inode) ;
+	reiserfs_write_unlock(inode->i_sb);
+    }
+out:
+    return ret ;
+
+journal_error:
+    if (th) {
+	reiserfs_write_lock(inode->i_sb);
+	if (!update_sd)
+	    reiserfs_update_sd(th, inode) ;
+        ret = reiserfs_end_persistent_transaction(th);
+	reiserfs_write_unlock(inode->i_sb);
+    }
+
+    return ret;
+}
+
+void sd_attrs_to_i_attrs( __u16 sd_attrs, struct inode *inode )
+{
+	if( reiserfs_attrs( inode -> i_sb ) ) {
+		if( sd_attrs & REISERFS_SYNC_FL )
+			inode -> i_flags |= S_SYNC;
+		else
+			inode -> i_flags &= ~S_SYNC;
+		if( sd_attrs & REISERFS_IMMUTABLE_FL )
+			inode -> i_flags |= S_IMMUTABLE;
+		else
+			inode -> i_flags &= ~S_IMMUTABLE;
+		if( sd_attrs & REISERFS_APPEND_FL )
+			inode -> i_flags |= S_APPEND;
+		else
+			inode -> i_flags &= ~S_APPEND;
+		if( sd_attrs & REISERFS_NOATIME_FL )
+			inode -> i_flags |= S_NOATIME;
+		else
+			inode -> i_flags &= ~S_NOATIME;
+		if( sd_attrs & REISERFS_NOTAIL_FL )
+			REISERFS_I(inode)->i_flags |= i_nopack_mask;
+		else
+			REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
+	}
+}
+
+void i_attrs_to_sd_attrs( struct inode *inode, __u16 *sd_attrs )
+{
+	if( reiserfs_attrs( inode -> i_sb ) ) {
+		if( inode -> i_flags & S_IMMUTABLE )
+			*sd_attrs |= REISERFS_IMMUTABLE_FL;
+		else
+			*sd_attrs &= ~REISERFS_IMMUTABLE_FL;
+		if( inode -> i_flags & S_SYNC )
+			*sd_attrs |= REISERFS_SYNC_FL;
+		else
+			*sd_attrs &= ~REISERFS_SYNC_FL;
+		if( inode -> i_flags & S_NOATIME )
+			*sd_attrs |= REISERFS_NOATIME_FL;
+		else
+			*sd_attrs &= ~REISERFS_NOATIME_FL;
+		if( REISERFS_I(inode)->i_flags & i_nopack_mask )
+			*sd_attrs |= REISERFS_NOTAIL_FL;
+		else
+			*sd_attrs &= ~REISERFS_NOTAIL_FL;
+	}
+}
+
+/* decide if this buffer needs to stay around for data logging or ordered
+** write purposes
+*/
+static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
+{
+    int ret = 1 ;
+    struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb) ;
+
+    spin_lock(&j->j_dirty_buffers_lock) ;
+    if (!buffer_mapped(bh)) {
+        goto free_jh;
+    }
+    /* the page is locked, and the only places that log a data buffer
+     * also lock the page.
+     */
+    if (reiserfs_file_data_log(inode)) {
+	/*
+	 * very conservative, leave the buffer pinned if
+	 * anyone might need it.
+	 */
+        if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
+	    ret = 0 ;
+	}
+    } else
+    if (buffer_dirty(bh) || buffer_locked(bh)) {
+	struct reiserfs_journal_list *jl;
+	struct reiserfs_jh *jh = bh->b_private;
+
+	/* why is this safe?
+	 * reiserfs_setattr updates i_size in the on disk
+	 * stat data before allowing vmtruncate to be called.
+	 *
+	 * If buffer was put onto the ordered list for this
+	 * transaction, we know for sure either this transaction
+	 * or an older one already has updated i_size on disk,
+	 * and this ordered data won't be referenced in the file
+	 * if we crash.
+	 *
+	 * if the buffer was put onto the ordered list for an older
+	 * transaction, we need to leave it around
+	 */
+	if (jh && (jl = jh->jl) && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
+	    ret = 0;
+    }
+free_jh:
+    if (ret && bh->b_private) {
+        reiserfs_free_jh(bh);
+    }
+    spin_unlock(&j->j_dirty_buffers_lock) ;
+    return ret ;
+}
+
+/* clm -- taken from fs/buffer.c:block_invalidate_page */
+static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
+{
+    struct buffer_head *head, *bh, *next;
+    struct inode *inode = page->mapping->host;
+    unsigned int curr_off = 0;
+    int ret = 1;
+
+    BUG_ON(!PageLocked(page));
+
+    if (offset == 0)
+	ClearPageChecked(page);
+
+    if (!page_has_buffers(page))
+	goto out;
+
+    head = page_buffers(page);
+    bh = head;
+    do {
+	unsigned int next_off = curr_off + bh->b_size;
+	next = bh->b_this_page;
+
+	/*
+	 * is this block fully invalidated?
+	 */
+	if (offset <= curr_off) {
+	    if (invalidatepage_can_drop(inode, bh))
+		reiserfs_unmap_buffer(bh);
+	    else
+	        ret = 0;
+	}
+	curr_off = next_off;
+	bh = next;
+    } while (bh != head);
+
+    /*
+     * We release buffers only if the entire page is being invalidated.
+     * The get_block cached value has been unconditionally invalidated,
+     * so real IO is not possible anymore.
+     */
+    if (!offset && ret)
+	ret = try_to_release_page(page, 0);
+out:
+    return ret;
+}
+
+static int reiserfs_set_page_dirty(struct page *page) {
+    struct inode *inode = page->mapping->host;
+    if (reiserfs_file_data_log(inode)) {
+	SetPageChecked(page);
+	return __set_page_dirty_nobuffers(page);
+    }
+    return __set_page_dirty_buffers(page);
+}
+
+/*
+ * Returns 1 if the page's buffers were dropped.  The page is locked.
+ *
+ * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
+ * in the buffers at page_buffers(page).
+ *
+ * even in -o notail mode, we can't be sure an old mount without -o notail
+ * didn't create files with tails.
+ */
+static int reiserfs_releasepage(struct page *page, int unused_gfp_flags)
+{
+    struct inode *inode = page->mapping->host ;
+    struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb) ;
+    struct buffer_head *head ;
+    struct buffer_head *bh ;
+    int ret = 1 ;
+
+    WARN_ON(PageChecked(page));
+    spin_lock(&j->j_dirty_buffers_lock) ;
+    head = page_buffers(page) ;
+    bh = head ;
+    do {
+	if (bh->b_private) {
+	    if (!buffer_dirty(bh) && !buffer_locked(bh)) {
+		reiserfs_free_jh(bh);
+	    } else {
+		ret = 0 ;
+		break ;
+	    }
+	}
+	bh = bh->b_this_page ;
+    } while (bh != head) ;
+    if (ret)
+	ret = try_to_free_buffers(page) ;
+    spin_unlock(&j->j_dirty_buffers_lock) ;
+    return ret ;
+}
+
+/* We thank Mingming Cao for helping us understand in great detail what
+   to do in this section of the code. */
+static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
+		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+    struct file *file = iocb->ki_filp;
+    struct inode *inode = file->f_mapping->host;
+
+    return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+			offset, nr_segs, reiserfs_get_blocks_direct_io, NULL);
+}
+
+int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) {
+    struct inode *inode = dentry->d_inode ;
+    int error ;
+    unsigned int ia_valid = attr->ia_valid;
+    reiserfs_write_lock(inode->i_sb);
+    if (attr->ia_valid & ATTR_SIZE) {
+	/* version 2 items will be caught by the s_maxbytes check
+	** done for us in vmtruncate
+	*/
+	if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
+	    attr->ia_size > MAX_NON_LFS) {
+	    error = -EFBIG ;
+	    goto out;
+	}
+	/* fill in hole pointers in the expanding truncate case. */
+        if (attr->ia_size > inode->i_size) {
+	    error = generic_cont_expand(inode, attr->ia_size) ;
+	    if (REISERFS_I(inode)->i_prealloc_count > 0) {
+		int err;
+		struct reiserfs_transaction_handle th ;
+		/* we're changing at most 2 bitmaps, inode + super */
+		err = journal_begin(&th, inode->i_sb, 4) ;
+		if (!err) {
+		    reiserfs_discard_prealloc (&th, inode);
+		    err = journal_end(&th, inode->i_sb, 4) ;
+		}
+		if (err)
+		    error = err;
+	    }
+	    if (error)
+	        goto out;
+	}
+    }
+
+    if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) ||
+	 ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) &&
+	(get_inode_sd_version (inode) == STAT_DATA_V1)) {
+		/* stat data of format v3.5 has 16 bit uid and gid */
+	    error = -EINVAL;
+	    goto out;
+	}
+
+    error = inode_change_ok(inode, attr) ;
+    if (!error) {
+	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+	    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+                error = reiserfs_chown_xattrs (inode, attr);
+
+                if (!error) {
+		    struct reiserfs_transaction_handle th;
+
+		    /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+		    journal_begin(&th, inode->i_sb, 4*REISERFS_QUOTA_INIT_BLOCKS+2);
+                    error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+		    if (error) {
+			journal_end(&th, inode->i_sb, 4*REISERFS_QUOTA_INIT_BLOCKS+2);
+			goto out;
+		    }
+		    /* Update corresponding info in inode so that everything is in
+		     * one transaction */
+		    if (attr->ia_valid & ATTR_UID)
+			inode->i_uid = attr->ia_uid;
+		    if (attr->ia_valid & ATTR_GID)
+			inode->i_gid = attr->ia_gid;
+		    mark_inode_dirty(inode);
+		    journal_end(&th, inode->i_sb, 4*REISERFS_QUOTA_INIT_BLOCKS+2);
+		}
+        }
+        if (!error)
+            error = inode_setattr(inode, attr) ;
+    }
+
+
+    if (!error && reiserfs_posixacl (inode->i_sb)) {
+        if (attr->ia_valid & ATTR_MODE)
+            error = reiserfs_acl_chmod (inode);
+    }
+
+out:
+    reiserfs_write_unlock(inode->i_sb);
+    return error ;
+}
+
+
+
+struct address_space_operations reiserfs_address_space_operations = {
+    .writepage = reiserfs_writepage,
+    .readpage = reiserfs_readpage, 
+    .readpages = reiserfs_readpages, 
+    .releasepage = reiserfs_releasepage,
+    .invalidatepage = reiserfs_invalidatepage,
+    .sync_page = block_sync_page,
+    .prepare_write = reiserfs_prepare_write,
+    .commit_write = reiserfs_commit_write,
+    .bmap = reiserfs_aop_bmap,
+    .direct_IO = reiserfs_direct_IO,
+    .set_page_dirty = reiserfs_set_page_dirty,
+} ;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
new file mode 100644
index 0000000..94dc424
--- /dev/null
+++ b/fs/reiserfs/ioctl.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/time.h>
+#include <asm/uaccess.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+static int reiserfs_unpack (struct inode * inode, struct file * filp);
+
+/*
+** reiserfs_ioctl - handler for ioctl for inode
+** supported commands:
+**  1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
+**                           and prevent packing file (argument arg has to be non-zero)
+**  2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
+**  3) That's all for a while ...
+*/
+int reiserfs_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+		unsigned long arg)
+{
+	unsigned int flags;
+
+	switch (cmd) {
+	    case REISERFS_IOC_UNPACK:
+		if( S_ISREG( inode -> i_mode ) ) {
+		if (arg)
+		    return reiserfs_unpack (inode, filp);
+			else
+				return 0;
+		} else
+			return -ENOTTY;
+	/* following two cases are taken from fs/ext2/ioctl.c by Remy
+	   Card (card@masi.ibp.fr) */
+	case REISERFS_IOC_GETFLAGS:
+		flags = REISERFS_I(inode) -> i_attrs;
+		i_attrs_to_sd_attrs( inode, ( __u16 * ) &flags );
+		return put_user(flags, (int __user *) arg);
+	case REISERFS_IOC_SETFLAGS: {
+		if (IS_RDONLY(inode))
+			return -EROFS;
+
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EPERM;
+
+		if (get_user(flags, (int __user *) arg))
+			return -EFAULT;
+
+		if ( ( ( flags ^ REISERFS_I(inode) -> i_attrs) & ( REISERFS_IMMUTABLE_FL | REISERFS_APPEND_FL)) &&
+		     !capable( CAP_LINUX_IMMUTABLE ) )
+			return -EPERM;
+			
+		if( ( flags & REISERFS_NOTAIL_FL ) &&
+		    S_ISREG( inode -> i_mode ) ) {
+				int result;
+
+				result = reiserfs_unpack( inode, filp );
+				if( result )
+					return result;
+		}
+		sd_attrs_to_i_attrs( flags, inode );
+		REISERFS_I(inode) -> i_attrs = flags;
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		return 0;
+	}
+	case REISERFS_IOC_GETVERSION:
+		return put_user(inode->i_generation, (int __user *) arg);
+	case REISERFS_IOC_SETVERSION:
+		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+			return -EPERM;
+		if (IS_RDONLY(inode))
+			return -EROFS;
+		if (get_user(inode->i_generation, (int __user *) arg))
+			return -EFAULT;	
+		inode->i_ctime = CURRENT_TIME_SEC;
+		mark_inode_dirty(inode);
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+** reiserfs_unpack
+** Function try to convert tail from direct item into indirect.
+** It set up nopack attribute in the REISERFS_I(inode)->nopack
+*/
+static int reiserfs_unpack (struct inode * inode, struct file * filp)
+{
+    int retval = 0;
+    int index ;
+    struct page *page ;
+    struct address_space *mapping ;
+    unsigned long write_from ;
+    unsigned long blocksize = inode->i_sb->s_blocksize ;
+    	
+    if (inode->i_size == 0) {
+        REISERFS_I(inode)->i_flags |= i_nopack_mask;
+        return 0 ;
+    }
+    /* ioctl already done */
+    if (REISERFS_I(inode)->i_flags & i_nopack_mask) {
+        return 0 ;
+    }
+    reiserfs_write_lock(inode->i_sb);
+
+    /* we need to make sure nobody is changing the file size beneath
+    ** us
+    */
+    down(&inode->i_sem) ;
+
+    write_from = inode->i_size & (blocksize - 1) ;
+    /* if we are on a block boundary, we are already unpacked.  */
+    if ( write_from == 0) {
+	REISERFS_I(inode)->i_flags |= i_nopack_mask;
+	goto out ;
+    }
+
+    /* we unpack by finding the page with the tail, and calling
+    ** reiserfs_prepare_write on that page.  This will force a 
+    ** reiserfs_get_block to unpack the tail for us.
+    */
+    index = inode->i_size >> PAGE_CACHE_SHIFT ;
+    mapping = inode->i_mapping ;
+    page = grab_cache_page(mapping, index) ;
+    retval = -ENOMEM;
+    if (!page) {
+        goto out ;
+    }
+    retval = mapping->a_ops->prepare_write(NULL, page, write_from, write_from) ;
+    if (retval)
+        goto out_unlock ;
+
+    /* conversion can change page contents, must flush */
+    flush_dcache_page(page) ;
+    retval = mapping->a_ops->commit_write(NULL, page, write_from, write_from) ;
+    REISERFS_I(inode)->i_flags |= i_nopack_mask;
+
+out_unlock:
+    unlock_page(page) ;
+    page_cache_release(page) ;
+
+out:
+    up(&inode->i_sem) ;
+    reiserfs_write_unlock(inode->i_sb);
+    return retval;
+}
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
new file mode 100644
index 0000000..9cf7c13
--- /dev/null
+++ b/fs/reiserfs/item_ops.c
@@ -0,0 +1,788 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+
+// this contains item handlers for old item types: sd, direct,
+// indirect, directory
+
+/* and where are the comments? how about saying where we can find an
+   explanation of each item handler method? -Hans */
+
+//////////////////////////////////////////////////////////////////////////////
+// stat data functions
+//
+static int sd_bytes_number (struct item_head * ih, int block_size)
+{
+  return 0;
+}
+
+static void sd_decrement_key (struct cpu_key * key)
+{
+    key->on_disk_key.k_objectid --;
+    set_cpu_key_k_type (key, TYPE_ANY);
+    set_cpu_key_k_offset(key, (loff_t)(-1));
+}
+
+static int sd_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
+{
+    return 0;
+}
+
+
+
+static char * print_time (time_t t)
+{
+    static char timebuf[256];
+
+    sprintf (timebuf, "%ld", t);
+    return timebuf;
+}
+
+
+static void sd_print_item (struct item_head * ih, char * item)
+{
+    printk ("\tmode | size | nlinks | first direct | mtime\n");
+    if (stat_data_v1 (ih)) {
+      	struct stat_data_v1 * sd = (struct stat_data_v1 *)item;
+
+	printk ("\t0%-6o | %6u | %2u | %d | %s\n", sd_v1_mode(sd),
+                sd_v1_size(sd), sd_v1_nlink(sd), sd_v1_first_direct_byte(sd),
+                print_time( sd_v1_mtime(sd) ) );
+    } else {
+	struct stat_data * sd = (struct stat_data *)item;
+
+	printk ("\t0%-6o | %6Lu | %2u | %d | %s\n", sd_v2_mode(sd),
+            (unsigned long long)sd_v2_size(sd), sd_v2_nlink(sd),
+            sd_v2_rdev(sd), print_time(sd_v2_mtime(sd)));
+    }
+}
+
+static void sd_check_item (struct item_head * ih, char * item)
+{
+    // FIXME: type something here!
+}
+
+
+static int sd_create_vi (struct virtual_node * vn,
+			 struct virtual_item * vi, 
+			 int is_affected, 
+			 int insert_size)
+{
+    vi->vi_index = TYPE_STAT_DATA;
+    //vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
+    return 0;
+}
+
+
+static int sd_check_left (struct virtual_item * vi, int free, 
+			  int start_skip, int end_skip)
+{
+    if (start_skip || end_skip)
+	BUG ();
+    return -1;
+}
+
+
+static int sd_check_right (struct virtual_item * vi, int free)
+{
+    return -1;
+}
+
+static int sd_part_size (struct virtual_item * vi, int first, int count)
+{
+    if (count)
+	BUG ();
+    return 0;
+}
+
+static int sd_unit_num (struct virtual_item * vi)
+{
+    return vi->vi_item_len - IH_SIZE;
+}
+
+
+static void sd_print_vi (struct virtual_item * vi)
+{
+    reiserfs_warning (NULL, "STATDATA, index %d, type 0x%x, %h",
+		      vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+static struct item_operations stat_data_ops = {
+	.bytes_number		= sd_bytes_number,
+	.decrement_key		= sd_decrement_key,
+	.is_left_mergeable	= sd_is_left_mergeable,
+	.print_item		= sd_print_item,
+	.check_item		= sd_check_item,
+
+	.create_vi		= sd_create_vi,
+	.check_left		= sd_check_left,
+	.check_right		= sd_check_right,
+	.part_size		= sd_part_size,
+	.unit_num		= sd_unit_num,
+	.print_vi		= sd_print_vi
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// direct item functions
+//
+static int direct_bytes_number (struct item_head * ih, int block_size)
+{
+  return ih_item_len(ih);
+}
+
+
+// FIXME: this should probably switch to indirect as well
+static void direct_decrement_key (struct cpu_key * key)
+{
+    cpu_key_k_offset_dec (key);
+    if (cpu_key_k_offset (key) == 0)
+	set_cpu_key_k_type (key, TYPE_STAT_DATA);	
+}
+
+
+static int direct_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
+{
+    int version = le_key_version (key);
+    return ((le_key_k_offset (version, key) & (bsize - 1)) != 1);
+}
+
+
+static void direct_print_item (struct item_head * ih, char * item)
+{
+    int j = 0;
+
+//    return;
+    printk ("\"");
+    while (j < ih_item_len(ih))
+	printk ("%c", item[j++]);
+    printk ("\"\n");
+}
+
+
+static void direct_check_item (struct item_head * ih, char * item)
+{
+    // FIXME: type something here!
+}
+
+
+static int direct_create_vi (struct virtual_node * vn,
+			     struct virtual_item * vi, 
+			     int is_affected, 
+			     int insert_size)
+{
+    vi->vi_index = TYPE_DIRECT;
+    //vi->vi_type |= VI_TYPE_DIRECT;
+    return 0;
+}
+
+static int direct_check_left (struct virtual_item * vi, int free,
+			      int start_skip, int end_skip)
+{
+    int bytes;
+
+    bytes = free - free % 8;
+    return bytes ?: -1;    
+}
+
+
+static int direct_check_right (struct virtual_item * vi, int free)
+{
+    return direct_check_left (vi, free, 0, 0);
+}
+
+static int direct_part_size (struct virtual_item * vi, int first, int count)
+{
+    return count;
+}
+
+
+static int direct_unit_num (struct virtual_item * vi)
+{
+    return vi->vi_item_len - IH_SIZE;
+}
+
+
+static void direct_print_vi (struct virtual_item * vi)
+{
+    reiserfs_warning (NULL, "DIRECT, index %d, type 0x%x, %h",
+		      vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+static struct item_operations direct_ops = {
+	.bytes_number		= direct_bytes_number,
+	.decrement_key		= direct_decrement_key,
+	.is_left_mergeable	= direct_is_left_mergeable,
+	.print_item		= direct_print_item,
+	.check_item		= direct_check_item,
+
+	.create_vi		= direct_create_vi,
+	.check_left		= direct_check_left,
+	.check_right		= direct_check_right,
+	.part_size		= direct_part_size,
+	.unit_num		= direct_unit_num,
+	.print_vi		= direct_print_vi
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// indirect item functions
+//
+
+static int indirect_bytes_number (struct item_head * ih, int block_size)
+{
+  return ih_item_len(ih) / UNFM_P_SIZE * block_size; //- get_ih_free_space (ih);
+}
+
+
+// decrease offset, if it becomes 0, change type to stat data
+static void indirect_decrement_key (struct cpu_key * key)
+{
+    cpu_key_k_offset_dec (key);
+    if (cpu_key_k_offset (key) == 0)
+	set_cpu_key_k_type (key, TYPE_STAT_DATA);
+}
+
+
+// if it is not first item of the body, then it is mergeable
+static int indirect_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
+{
+    int version = le_key_version (key);
+    return (le_key_k_offset (version, key) != 1);
+}
+
+
+// printing of indirect item
+static void start_new_sequence (__u32 * start, int * len, __u32 new)
+{
+    *start = new;
+    *len = 1;
+}
+
+
+static int sequence_finished (__u32 start, int * len, __u32 new)
+{
+    if (start == INT_MAX)
+	return 1;
+
+    if (start == 0 && new == 0) {
+	(*len) ++;
+	return 0;
+    }
+    if (start != 0 && (start + *len) == new) {
+	(*len) ++;
+	return 0;
+    }
+    return 1;
+}
+
+static void print_sequence (__u32 start, int len)
+{
+    if (start == INT_MAX)
+	return;
+
+    if (len == 1)
+	printk (" %d", start);
+    else
+	printk (" %d(%d)", start, len);
+}
+
+
+static void indirect_print_item (struct item_head * ih, char * item)
+{
+    int j;
+    __u32 * unp, prev = INT_MAX;
+    int num;
+
+    unp = (__u32 *)item;
+
+    if (ih_item_len(ih) % UNFM_P_SIZE)
+	reiserfs_warning (NULL, "indirect_print_item: invalid item len");
+
+    printk ("%d pointers\n[ ", (int)I_UNFM_NUM (ih));
+    for (j = 0; j < I_UNFM_NUM (ih); j ++) {
+	if (sequence_finished (prev, &num, get_block_num(unp, j))) {
+	    print_sequence (prev, num);
+	    start_new_sequence (&prev, &num, get_block_num(unp, j));
+	}
+    }
+    print_sequence (prev, num);
+    printk ("]\n");
+}
+
+static void indirect_check_item (struct item_head * ih, char * item)
+{
+    // FIXME: type something here!
+}
+
+
+static int indirect_create_vi (struct virtual_node * vn,
+			       struct virtual_item * vi, 
+			       int is_affected, 
+			       int insert_size)
+{
+    vi->vi_index = TYPE_INDIRECT;
+    //vi->vi_type |= VI_TYPE_INDIRECT;
+    return 0;
+}
+
+static int indirect_check_left (struct virtual_item * vi, int free,
+				int start_skip, int end_skip)
+{
+    int bytes;
+
+    bytes = free - free % UNFM_P_SIZE;
+    return bytes ?: -1;    
+}
+
+
+static int indirect_check_right (struct virtual_item * vi, int free)
+{
+    return indirect_check_left (vi, free, 0, 0);
+}
+
+
+
+// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
+static int indirect_part_size (struct virtual_item * vi, int first, int units)
+{
+    // unit of indirect item is byte (yet)
+    return units;
+}
+
+static int indirect_unit_num (struct virtual_item * vi)
+{
+    // unit of indirect item is byte (yet)
+    return vi->vi_item_len - IH_SIZE;
+}
+
+static void indirect_print_vi (struct virtual_item * vi)
+{
+    reiserfs_warning (NULL, "INDIRECT, index %d, type 0x%x, %h",
+		      vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+static struct item_operations indirect_ops = {
+	.bytes_number		= indirect_bytes_number,
+	.decrement_key		= indirect_decrement_key,
+	.is_left_mergeable	= indirect_is_left_mergeable,
+	.print_item		= indirect_print_item,
+	.check_item		= indirect_check_item,
+
+	.create_vi		= indirect_create_vi,
+	.check_left		= indirect_check_left,
+	.check_right		= indirect_check_right,
+	.part_size		= indirect_part_size,
+	.unit_num		= indirect_unit_num,
+	.print_vi		= indirect_print_vi
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// direntry functions
+//
+
+
+static int direntry_bytes_number (struct item_head * ih, int block_size)
+{
+    reiserfs_warning (NULL, "vs-16090: direntry_bytes_number: "
+		      "bytes number is asked for direntry");
+    return 0;
+}
+
+static void direntry_decrement_key (struct cpu_key * key)
+{
+    cpu_key_k_offset_dec (key);
+    if (cpu_key_k_offset (key) == 0)
+	set_cpu_key_k_type (key, TYPE_STAT_DATA);	
+}
+
+
+static int direntry_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
+{
+    if (le32_to_cpu (key->u.k_offset_v1.k_offset) == DOT_OFFSET)
+	return 0;
+    return 1;
+	
+}
+
+
+static void direntry_print_item (struct item_head * ih, char * item)
+{
+    int i;
+    int namelen;
+    struct reiserfs_de_head * deh;
+    char * name;
+    static char namebuf [80];
+
+
+    printk ("\n # %-15s%-30s%-15s%-15s%-15s\n", "Name", "Key of pointed object", "Hash", "Gen number", "Status");
+
+    deh = (struct reiserfs_de_head *)item;
+
+    for (i = 0; i < I_ENTRY_COUNT (ih); i ++, deh ++) {
+	namelen = (i ? (deh_location(deh - 1)) : ih_item_len(ih)) - deh_location(deh);
+	name = item + deh_location(deh);
+	if (name[namelen-1] == 0)
+	  namelen = strlen (name);
+	namebuf[0] = '"';
+	if (namelen > sizeof (namebuf) - 3) {
+	    strncpy (namebuf + 1, name, sizeof (namebuf) - 3);
+	    namebuf[sizeof (namebuf) - 2] = '"';
+	    namebuf[sizeof (namebuf) - 1] = 0;
+	} else {
+	    memcpy (namebuf + 1, name, namelen);
+	    namebuf[namelen + 1] = '"';
+	    namebuf[namelen + 2] = 0;
+	}
+
+	printk ("%d:  %-15s%-15d%-15d%-15Ld%-15Ld(%s)\n", 
+		i, namebuf,
+		deh_dir_id(deh), deh_objectid(deh),
+		GET_HASH_VALUE (deh_offset (deh)), GET_GENERATION_NUMBER ((deh_offset (deh))),
+		(de_hidden (deh)) ? "HIDDEN" : "VISIBLE");
+    }
+}
+
+
+static void direntry_check_item (struct item_head * ih, char * item)
+{
+    int i;
+    struct reiserfs_de_head * deh;
+
+    // FIXME: type something here!
+    deh = (struct reiserfs_de_head *)item;
+    for (i = 0; i < I_ENTRY_COUNT (ih); i ++, deh ++) {
+	;
+    }
+}
+
+
+
+#define DIRENTRY_VI_FIRST_DIRENTRY_ITEM 1
+
+/*
+ * function returns old entry number in directory item in real node
+ * using new entry number in virtual item in virtual node */
+static inline int old_entry_num (int is_affected, int virtual_entry_num, int pos_in_item, int mode)
+{
+    if ( mode == M_INSERT || mode == M_DELETE)
+	return virtual_entry_num;
+    
+    if (!is_affected)
+	/* cut or paste is applied to another item */
+	return virtual_entry_num;
+
+    if (virtual_entry_num < pos_in_item)
+	return virtual_entry_num;
+
+    if (mode == M_CUT)
+	return virtual_entry_num + 1;
+
+    RFALSE( mode != M_PASTE || virtual_entry_num == 0,
+	    "vs-8015: old_entry_num: mode must be M_PASTE (mode = \'%c\'", mode);
+    
+    return virtual_entry_num - 1;
+}
+
+
+
+
+/* Create an array of sizes of directory entries for virtual
+   item. Return space used by an item. FIXME: no control over
+   consuming of space used by this item handler */
+static int direntry_create_vi (struct virtual_node * vn,
+			       struct virtual_item * vi, 
+			       int is_affected, 
+			       int insert_size)
+{
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+    int i, j;
+    int size = sizeof (struct direntry_uarea);
+    struct reiserfs_de_head * deh;
+  
+    vi->vi_index = TYPE_DIRENTRY;
+
+    if (!(vi->vi_ih) || !vi->vi_item)
+	BUG ();
+
+
+    dir_u->flags = 0;
+    if (le_ih_k_offset (vi->vi_ih) == DOT_OFFSET)
+	dir_u->flags |= DIRENTRY_VI_FIRST_DIRENTRY_ITEM;
+
+    deh = (struct reiserfs_de_head *)(vi->vi_item);
+    
+    
+    /* virtual directory item have this amount of entry after */
+    dir_u->entry_count = ih_entry_count (vi->vi_ih) + 
+	((is_affected) ? ((vn->vn_mode == M_CUT) ? -1 :
+			  (vn->vn_mode == M_PASTE ? 1 : 0)) : 0);
+    
+    for (i = 0; i < dir_u->entry_count; i ++) {
+	j = old_entry_num (is_affected, i, vn->vn_pos_in_item, vn->vn_mode);
+        dir_u->entry_sizes[i] = (j ? deh_location( &(deh[j - 1]) ) :
+                                ih_item_len (vi->vi_ih)) -
+                                deh_location( &(deh[j])) + DEH_SIZE;
+    }
+
+    size += (dir_u->entry_count * sizeof (short));
+    
+    /* set size of pasted entry */
+    if (is_affected && vn->vn_mode == M_PASTE)
+	dir_u->entry_sizes[vn->vn_pos_in_item] = insert_size;
+
+
+#ifdef CONFIG_REISERFS_CHECK
+    /* compare total size of entries with item length */
+    {
+	int k, l;
+    
+	l = 0;
+	for (k = 0; k < dir_u->entry_count; k ++)
+	    l += dir_u->entry_sizes[k];
+    
+	if (l + IH_SIZE != vi->vi_item_len + 
+	    ((is_affected && (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT)) ? insert_size : 0) ) {
+	    reiserfs_panic (NULL, "vs-8025: set_entry_sizes: (mode==%c, insert_size==%d), invalid length of directory item",
+			    vn->vn_mode, insert_size);
+	}
+    }
+#endif
+
+    return size;
+
+
+}
+
+
+//
+// return number of entries which may fit into specified amount of
+// free space, or -1 if free space is not enough even for 1 entry
+//
+static int direntry_check_left (struct virtual_item * vi, int free,
+				int start_skip, int end_skip)
+{
+    int i;
+    int entries = 0;
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+
+    for (i = start_skip; i < dir_u->entry_count - end_skip; i ++) {
+	if (dir_u->entry_sizes[i] > free)
+	    /* i-th entry doesn't fit into the remaining free space */
+	    break;
+		  
+	free -= dir_u->entry_sizes[i];
+	entries ++;
+    }
+
+    if (entries == dir_u->entry_count) {
+	reiserfs_panic (NULL, "free space %d, entry_count %d\n", free, dir_u->entry_count);
+    }
+
+    /* "." and ".." can not be separated from each other */
+    if (start_skip == 0 && (dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries < 2)
+	entries = 0;
+    
+    return entries ?: -1;
+}
+
+
+static int direntry_check_right (struct virtual_item * vi, int free)
+{
+    int i;
+    int entries = 0;
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+    
+    for (i = dir_u->entry_count - 1; i >= 0; i --) {
+	if (dir_u->entry_sizes[i] > free)
+	    /* i-th entry doesn't fit into the remaining free space */
+	    break;
+	
+	free -= dir_u->entry_sizes[i];
+	entries ++;
+    }
+    if (entries == dir_u->entry_count)
+	BUG ();
+
+    /* "." and ".." can not be separated from each other */
+    if ((dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries > dir_u->entry_count - 2)
+	entries = dir_u->entry_count - 2;
+
+    return entries ?: -1;
+}
+
+
+/* sum of entry sizes between from-th and to-th entries including both edges */
+static int direntry_part_size (struct virtual_item * vi, int first, int count)
+{
+    int i, retval;
+    int from, to;
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+    
+    retval = 0;
+    if (first == 0)
+	from = 0;
+    else
+	from = dir_u->entry_count - count;
+    to = from + count - 1;
+
+    for (i = from; i <= to; i ++)
+	retval += dir_u->entry_sizes[i];
+
+    return retval;
+}
+
+static int direntry_unit_num (struct virtual_item * vi)
+{
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+    
+    return dir_u->entry_count;
+}
+
+
+
+static void direntry_print_vi (struct virtual_item * vi)
+{
+    int i;
+    struct direntry_uarea * dir_u = vi->vi_uarea;
+
+    reiserfs_warning (NULL, "DIRENTRY, index %d, type 0x%x, %h, flags 0x%x",
+		      vi->vi_index, vi->vi_type, vi->vi_ih, dir_u->flags);
+    printk ("%d entries: ", dir_u->entry_count);
+    for (i = 0; i < dir_u->entry_count; i ++)
+	printk ("%d ", dir_u->entry_sizes[i]);
+    printk ("\n");
+}
+
+static struct item_operations direntry_ops = {
+	.bytes_number		= direntry_bytes_number,
+	.decrement_key		= direntry_decrement_key,
+	.is_left_mergeable	= direntry_is_left_mergeable,
+	.print_item		= direntry_print_item,
+	.check_item		= direntry_check_item,
+
+	.create_vi		= direntry_create_vi,
+	.check_left		= direntry_check_left,
+	.check_right		= direntry_check_right,
+	.part_size		= direntry_part_size,
+	.unit_num		= direntry_unit_num,
+	.print_vi		= direntry_print_vi
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Error catching functions to catch errors caused by incorrect item types.
+//
+static int errcatch_bytes_number (struct item_head * ih, int block_size)
+{
+    reiserfs_warning (NULL, "green-16001: Invalid item type observed, run fsck ASAP");
+    return 0;
+}
+
+static void errcatch_decrement_key (struct cpu_key * key)
+{
+    reiserfs_warning (NULL, "green-16002: Invalid item type observed, run fsck ASAP");
+}
+
+
+static int errcatch_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
+{
+    reiserfs_warning (NULL, "green-16003: Invalid item type observed, run fsck ASAP");
+    return 0;
+}
+
+
+static void errcatch_print_item (struct item_head * ih, char * item)
+{
+    reiserfs_warning (NULL, "green-16004: Invalid item type observed, run fsck ASAP");
+}
+
+
+static void errcatch_check_item (struct item_head * ih, char * item)
+{
+    reiserfs_warning (NULL, "green-16005: Invalid item type observed, run fsck ASAP");
+}
+
+static int errcatch_create_vi (struct virtual_node * vn,
+			       struct virtual_item * vi, 
+			       int is_affected, 
+			       int insert_size)
+{
+    reiserfs_warning (NULL, "green-16006: Invalid item type observed, run fsck ASAP");
+    return 0;	// We might return -1 here as well, but it won't help as create_virtual_node() from where
+		// this operation is called from is of return type void.
+}
+
+static int errcatch_check_left (struct virtual_item * vi, int free,
+				int start_skip, int end_skip)
+{
+    reiserfs_warning (NULL, "green-16007: Invalid item type observed, run fsck ASAP");
+    return -1;
+}
+
+
+static int errcatch_check_right (struct virtual_item * vi, int free)
+{
+    reiserfs_warning (NULL, "green-16008: Invalid item type observed, run fsck ASAP");
+    return -1;
+}
+
+static int errcatch_part_size (struct virtual_item * vi, int first, int count)
+{
+    reiserfs_warning (NULL, "green-16009: Invalid item type observed, run fsck ASAP");
+    return 0;
+}
+
+static int errcatch_unit_num (struct virtual_item * vi)
+{
+    reiserfs_warning (NULL, "green-16010: Invalid item type observed, run fsck ASAP");
+    return 0;
+}
+
+static void errcatch_print_vi (struct virtual_item * vi)
+{
+    reiserfs_warning (NULL, "green-16011: Invalid item type observed, run fsck ASAP");
+}
+
+static struct item_operations errcatch_ops = {
+    errcatch_bytes_number,
+    errcatch_decrement_key,
+    errcatch_is_left_mergeable,
+    errcatch_print_item,
+    errcatch_check_item,
+
+    errcatch_create_vi,
+    errcatch_check_left,
+    errcatch_check_right,
+    errcatch_part_size,
+    errcatch_unit_num,
+    errcatch_print_vi
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+//
+#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
+  do not compile
+#endif
+
+struct item_operations * item_ops [TYPE_ANY + 1] = {
+  &stat_data_ops,
+  &indirect_ops,
+  &direct_ops,
+  &direntry_ops,
+  NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+  &errcatch_ops		/* This is to catch errors with invalid type (15th entry for TYPE_ANY) */
+};
+
+
+
+
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
new file mode 100644
index 0000000..c9ad3a7
--- /dev/null
+++ b/fs/reiserfs/journal.c
@@ -0,0 +1,3876 @@
+/*
+** Write ahead logging implementation copyright Chris Mason 2000
+**
+** The background commits make this code very interelated, and 
+** overly complex.  I need to rethink things a bit....The major players:
+**
+** journal_begin -- call with the number of blocks you expect to log.  
+**                  If the current transaction is too
+** 		    old, it will block until the current transaction is 
+** 		    finished, and then start a new one.
+**		    Usually, your transaction will get joined in with 
+**                  previous ones for speed.
+**
+** journal_join  -- same as journal_begin, but won't block on the current 
+**                  transaction regardless of age.  Don't ever call
+**                  this.  Ever.  There are only two places it should be 
+**                  called from, and they are both inside this file.
+**
+** journal_mark_dirty -- adds blocks into this transaction.  clears any flags 
+**                       that might make them get sent to disk
+**                       and then marks them BH_JDirty.  Puts the buffer head 
+**                       into the current transaction hash.  
+**
+** journal_end -- if the current transaction is batchable, it does nothing
+**                   otherwise, it could do an async/synchronous commit, or
+**                   a full flush of all log and real blocks in the 
+**                   transaction.
+**
+** flush_old_commits -- if the current transaction is too old, it is ended and 
+**                      commit blocks are sent to disk.  Forces commit blocks 
+**                      to disk for all backgrounded commits that have been 
+**                      around too long.
+**		     -- Note, if you call this as an immediate flush from 
+**		        from within kupdate, it will ignore the immediate flag
+*/
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/time.h>
+#include <asm/semaphore.h>
+
+#include <linux/vmalloc.h>
+#include <linux/reiserfs_fs.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/workqueue.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+
+
+/* gets a struct reiserfs_journal_list * from a list head */
+#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
+                               j_list))
+#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
+                               j_working_list))
+
+/* the number of mounted filesystems.  This is used to decide when to
+** start and kill the commit workqueue
+*/
+static int reiserfs_mounted_fs_count;
+
+static struct workqueue_struct *commit_wq;
+
+#define JOURNAL_TRANS_HALF 1018   /* must be correct to keep the desc and commit
+				     structs at 4k */
+#define BUFNR 64 /*read ahead */
+
+/* cnode stat bits.  Move these into reiserfs_fs.h */
+
+#define BLOCK_FREED 2		/* this block was freed, and can't be written.  */
+#define BLOCK_FREED_HOLDER 3    /* this block was freed during this transaction, and can't be written */
+
+#define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */
+#define BLOCK_DIRTIED 5
+
+
+/* journal list state bits */
+#define LIST_TOUCHED 1
+#define LIST_DIRTY   2
+#define LIST_COMMIT_PENDING  4		/* someone will commit this list */
+
+/* flags for do_journal_end */
+#define FLUSH_ALL   1		/* flush commit and real blocks */
+#define COMMIT_NOW  2		/* end and commit this transaction */
+#define WAIT        4		/* wait for the log blocks to hit the disk*/
+
+static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
+static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
+static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall)  ;
+static int can_dirty(struct reiserfs_journal_cnode *cn) ;
+static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
+static int release_journal_dev( struct super_block *super,
+				struct reiserfs_journal *journal );
+static int dirty_one_transaction(struct super_block *s,
+                                 struct reiserfs_journal_list *jl);
+static void flush_async_commits(void *p);
+static void queue_log_writer(struct super_block *s);
+
+/* values for join in do_journal_begin_r */
+enum {
+    JBEGIN_REG = 0, /* regular journal begin */
+    JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
+    JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
+};
+
+static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
+                             struct super_block * p_s_sb,
+			     unsigned long nblocks,int join);
+
+static void init_journal_hash(struct super_block *p_s_sb) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+}
+
+/*
+** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
+** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
+** more details.
+*/
+static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
+  if (bh) {
+    clear_buffer_dirty(bh);
+    clear_buffer_journal_test(bh);
+  }
+  return 0 ;
+}
+
+static void disable_barrier(struct super_block *s)
+{
+    REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
+    printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s));
+}
+
+static struct reiserfs_bitmap_node *
+allocate_bitmap_node(struct super_block *p_s_sb) {
+  struct reiserfs_bitmap_node *bn ;
+  static int id;
+
+  bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
+  if (!bn) {
+    return NULL ;
+  }
+  bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
+  if (!bn->data) {
+    reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
+    return NULL ;
+  }
+  bn->id = id++ ;
+  memset(bn->data, 0, p_s_sb->s_blocksize) ;
+  INIT_LIST_HEAD(&bn->list) ;
+  return bn ;
+}
+
+static struct reiserfs_bitmap_node *
+get_bitmap_node(struct super_block *p_s_sb) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_bitmap_node *bn = NULL;
+  struct list_head *entry = journal->j_bitmap_nodes.next ;
+
+  journal->j_used_bitmap_nodes++ ;
+repeat:
+
+  if(entry != &journal->j_bitmap_nodes) {
+    bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
+    list_del(entry) ;
+    memset(bn->data, 0, p_s_sb->s_blocksize) ;
+    journal->j_free_bitmap_nodes-- ;
+    return bn ;
+  }
+  bn = allocate_bitmap_node(p_s_sb) ;
+  if (!bn) {
+    yield();
+    goto repeat ;
+  }
+  return bn ;
+}
+static inline void free_bitmap_node(struct super_block *p_s_sb,
+                                    struct reiserfs_bitmap_node *bn) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  journal->j_used_bitmap_nodes-- ;
+  if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
+    reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
+    reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
+  } else {
+    list_add(&bn->list, &journal->j_bitmap_nodes) ;
+    journal->j_free_bitmap_nodes++ ;
+  }
+}
+
+static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
+  int i ;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_bitmap_node *bn = NULL ;
+  for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
+    bn = allocate_bitmap_node(p_s_sb) ;
+    if (bn) {
+      list_add(&bn->list, &journal->j_bitmap_nodes) ;
+      journal->j_free_bitmap_nodes++ ;
+    } else {
+      break ; // this is ok, we'll try again when more are needed 
+    }
+  }
+}
+
+static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
+                                  struct reiserfs_list_bitmap *jb) {
+  int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
+  int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
+
+  if (!jb->bitmaps[bmap_nr]) {
+    jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
+  }
+  set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
+  return 0 ;
+}
+
+static void cleanup_bitmap_list(struct super_block *p_s_sb,
+                                struct reiserfs_list_bitmap *jb) {
+  int i;
+  if (jb->bitmaps == NULL)
+    return;
+
+  for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
+    if (jb->bitmaps[i]) {
+      free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
+      jb->bitmaps[i] = NULL ;
+    }
+  }
+}
+
+/*
+** only call this on FS unmount.
+*/
+static int free_list_bitmaps(struct super_block *p_s_sb,
+                             struct reiserfs_list_bitmap *jb_array) {
+  int i ;
+  struct reiserfs_list_bitmap *jb ;
+  for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+    jb = jb_array + i ;
+    jb->journal_list = NULL ;
+    cleanup_bitmap_list(p_s_sb, jb) ;
+    vfree(jb->bitmaps) ;
+    jb->bitmaps = NULL ;
+  }
+  return 0;
+}
+
+static int free_bitmap_nodes(struct super_block *p_s_sb) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct list_head *next = journal->j_bitmap_nodes.next ;
+  struct reiserfs_bitmap_node *bn ;
+
+  while(next != &journal->j_bitmap_nodes) {
+    bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
+    list_del(next) ;
+    reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
+    reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
+    next = journal->j_bitmap_nodes.next ;
+    journal->j_free_bitmap_nodes-- ;
+  }
+
+  return 0 ;
+}
+
+/*
+** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. 
+** jb_array is the array to be filled in.
+*/
+int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
+                                   struct reiserfs_list_bitmap *jb_array,
+				   int bmap_nr) {
+  int i ;
+  int failed = 0 ;
+  struct reiserfs_list_bitmap *jb ;
+  int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
+
+  for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+    jb = jb_array + i ;
+    jb->journal_list = NULL ;
+    jb->bitmaps = vmalloc( mem ) ;
+    if (!jb->bitmaps) {
+      reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists") ;
+      failed = 1;   
+      break ;
+    }
+    memset(jb->bitmaps, 0, mem) ;
+  }
+  if (failed) {
+    free_list_bitmaps(p_s_sb, jb_array) ;
+    return -1 ;
+  }
+  return 0 ;
+}
+
+/*
+** find an available list bitmap.  If you can't find one, flush a commit list 
+** and try again
+*/
+static struct reiserfs_list_bitmap *
+get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
+  int i,j ; 
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_list_bitmap *jb = NULL ;
+
+  for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
+    i = journal->j_list_bitmap_index ;
+    journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
+    jb = journal->j_list_bitmap + i ;
+    if (journal->j_list_bitmap[i].journal_list) {
+      flush_commit_list(p_s_sb, journal->j_list_bitmap[i].journal_list, 1) ;
+      if (!journal->j_list_bitmap[i].journal_list) {
+	break ;
+      }
+    } else {
+      break ;
+    }
+  }
+  if (jb->journal_list) { /* double check to make sure if flushed correctly */
+    return NULL ;
+  }
+  jb->journal_list = jl ;
+  return jb ;
+}
+
+/* 
+** allocates a new chunk of X nodes, and links them all together as a list.
+** Uses the cnode->next and cnode->prev pointers
+** returns NULL on failure
+*/
+static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
+  struct reiserfs_journal_cnode *head ;
+  int i ;
+  if (num_cnodes <= 0) {
+    return NULL ;
+  }
+  head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
+  if (!head) {
+    return NULL ;
+  }
+  memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
+  head[0].prev = NULL ;
+  head[0].next = head + 1 ;
+  for (i = 1 ; i < num_cnodes; i++) {
+    head[i].prev = head + (i - 1) ;
+    head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
+  }
+  head[num_cnodes -1].next = NULL ;
+  return head ;
+}
+
+/*
+** pulls a cnode off the free list, or returns NULL on failure 
+*/
+static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
+  struct reiserfs_journal_cnode *cn ;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+  reiserfs_check_lock_depth(p_s_sb, "get_cnode") ;
+
+  if (journal->j_cnode_free <= 0) {
+    return NULL ;
+  }
+  journal->j_cnode_used++ ;
+  journal->j_cnode_free-- ;
+  cn = journal->j_cnode_free_list ;
+  if (!cn) {
+    return cn ;
+  }
+  if (cn->next) {
+    cn->next->prev = NULL ;
+  }
+  journal->j_cnode_free_list = cn->next ;
+  memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
+  return cn ;
+}
+
+/*
+** returns a cnode to the free list 
+*/
+static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+  reiserfs_check_lock_depth(p_s_sb, "free_cnode") ;
+
+  journal->j_cnode_used-- ;
+  journal->j_cnode_free++ ;
+  /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
+  cn->next = journal->j_cnode_free_list ;
+  if (journal->j_cnode_free_list) {
+    journal->j_cnode_free_list->prev = cn ;
+  }
+  cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
+  journal->j_cnode_free_list = cn ;
+}
+
+static void clear_prepared_bits(struct buffer_head *bh) {
+  clear_buffer_journal_prepared (bh);
+  clear_buffer_journal_restore_dirty (bh);
+}
+
+/* utility function to force a BUG if it is called without the big
+** kernel lock held.  caller is the string printed just before calling BUG()
+*/
+void reiserfs_check_lock_depth(struct super_block *sb, char *caller) {
+#ifdef CONFIG_SMP
+  if (current->lock_depth < 0) {
+    reiserfs_panic (sb, "%s called without kernel lock held", caller) ;
+  }
+#else
+  ;
+#endif
+}
+
+/* return a cnode with same dev, block number and size in table, or null if not found */
+static inline struct reiserfs_journal_cnode *
+get_journal_hash_dev(struct super_block *sb,
+		     struct reiserfs_journal_cnode **table,
+		     long bl)
+{
+  struct reiserfs_journal_cnode *cn ;
+  cn = journal_hash(table, sb, bl) ;
+  while(cn) {
+    if (cn->blocknr == bl && cn->sb == sb)
+      return cn ;
+    cn = cn->hnext ;
+  }
+  return (struct reiserfs_journal_cnode *)0 ;
+}
+
+/*
+** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
+** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
+** being overwritten by a replay after crashing.
+**
+** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
+** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
+** sure you never write the block without logging it.
+**
+** next_zero_bit is a suggestion about the next block to try for find_forward.
+** when bl is rejected because it is set in a journal list bitmap, we search
+** for the next zero bit in the bitmap that rejected bl.  Then, we return that
+** through next_zero_bit for find_forward to try.
+**
+** Just because we return something in next_zero_bit does not mean we won't
+** reject it on the next call to reiserfs_in_journal
+**
+*/
+int reiserfs_in_journal(struct super_block *p_s_sb,
+                        int bmap_nr, int bit_nr, int search_all, 
+			b_blocknr_t *next_zero_bit) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_cnode *cn ;
+  struct reiserfs_list_bitmap *jb ;
+  int i ;
+  unsigned long bl;
+
+  *next_zero_bit = 0 ; /* always start this at zero. */
+
+  PROC_INFO_INC( p_s_sb, journal.in_journal );
+  /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
+  ** if we crash before the transaction that freed it commits,  this transaction won't
+  ** have committed either, and the block will never be written
+  */
+  if (search_all) {
+    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+      PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
+      jb = journal->j_list_bitmap + i ;
+      if (jb->journal_list && jb->bitmaps[bmap_nr] &&
+          test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
+	*next_zero_bit = find_next_zero_bit((unsigned long *)
+	                             (jb->bitmaps[bmap_nr]->data),
+	                             p_s_sb->s_blocksize << 3, bit_nr+1) ; 
+	return 1 ;
+      }
+    }
+  }
+
+  bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
+  /* is it in any old transactions? */
+  if (search_all && (cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
+    return 1; 
+  }
+
+  /* is it in the current transaction.  This should never happen */
+  if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
+    BUG();
+    return 1; 
+  }
+
+  PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
+  /* safe for reuse */
+  return 0 ;
+}
+
+/* insert cn into table
+*/
+static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
+  struct reiserfs_journal_cnode *cn_orig ;
+
+  cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
+  cn->hnext = cn_orig ;
+  cn->hprev = NULL ;
+  if (cn_orig) {
+    cn_orig->hprev = cn ;
+  }
+  journal_hash(table, cn->sb, cn->blocknr) =  cn ;
+}
+
+/* lock the current transaction */
+inline static void lock_journal(struct super_block *p_s_sb) {
+    PROC_INFO_INC( p_s_sb, journal.lock_journal );
+    down(&SB_JOURNAL(p_s_sb)->j_lock);
+}
+
+/* unlock the current transaction */
+inline static void unlock_journal(struct super_block *p_s_sb) {
+    up(&SB_JOURNAL(p_s_sb)->j_lock);
+}
+
+static inline void get_journal_list(struct reiserfs_journal_list *jl)
+{
+    jl->j_refcount++;
+}
+
+static inline void put_journal_list(struct super_block *s,
+                                   struct reiserfs_journal_list *jl)
+{
+    if (jl->j_refcount < 1) {
+        reiserfs_panic (s, "trans id %lu, refcount at %d", jl->j_trans_id,
+	                                         jl->j_refcount);
+    }
+    if (--jl->j_refcount == 0)
+        reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
+}
+
+/*
+** this used to be much more involved, and I'm keeping it just in case things get ugly again.
+** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
+** transaction.
+*/
+static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
+
+  struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
+  if (jb) {
+    cleanup_bitmap_list(p_s_sb, jb) ;
+  }
+  jl->j_list_bitmap->journal_list = NULL ;
+  jl->j_list_bitmap = NULL ;
+}
+
+static int journal_list_still_alive(struct super_block *s,
+                                    unsigned long trans_id)
+{
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    struct list_head *entry = &journal->j_journal_list;
+    struct reiserfs_journal_list *jl;
+
+    if (!list_empty(entry)) {
+        jl = JOURNAL_LIST_ENTRY(entry->next);
+	if (jl->j_trans_id <= trans_id) {
+	    return 1;
+	}
+    }
+    return 0;
+}
+
+static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
+    char b[BDEVNAME_SIZE];
+
+    if (buffer_journaled(bh)) {
+        reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk",
+	                 bh->b_blocknr, bdevname(bh->b_bdev, b)) ;
+    }
+    if (uptodate)
+    	set_buffer_uptodate(bh) ;
+    else
+    	clear_buffer_uptodate(bh) ;
+    unlock_buffer(bh) ;
+    put_bh(bh) ;
+}
+
+static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) {
+    if (uptodate)
+    	set_buffer_uptodate(bh) ;
+    else
+    	clear_buffer_uptodate(bh) ;
+    unlock_buffer(bh) ;
+    put_bh(bh) ;
+}
+
+static void submit_logged_buffer(struct buffer_head *bh) {
+    get_bh(bh) ;
+    bh->b_end_io = reiserfs_end_buffer_io_sync ;
+    clear_buffer_journal_new (bh);
+    clear_buffer_dirty(bh) ;
+    if (!test_clear_buffer_journal_test (bh))
+        BUG();
+    if (!buffer_uptodate(bh))
+        BUG();
+    submit_bh(WRITE, bh) ;
+}
+
+static void submit_ordered_buffer(struct buffer_head *bh) {
+    get_bh(bh) ;
+    bh->b_end_io = reiserfs_end_ordered_io;
+    clear_buffer_dirty(bh) ;
+    if (!buffer_uptodate(bh))
+        BUG();
+    submit_bh(WRITE, bh) ;
+}
+
+static int submit_barrier_buffer(struct buffer_head *bh) {
+    get_bh(bh) ;
+    bh->b_end_io = reiserfs_end_ordered_io;
+    clear_buffer_dirty(bh) ;
+    if (!buffer_uptodate(bh))
+        BUG();
+    return submit_bh(WRITE_BARRIER, bh) ;
+}
+
+static void check_barrier_completion(struct super_block *s,
+                                     struct buffer_head *bh) {
+    if (buffer_eopnotsupp(bh)) {
+	clear_buffer_eopnotsupp(bh);
+	disable_barrier(s);
+	set_buffer_uptodate(bh);
+	set_buffer_dirty(bh);
+	sync_dirty_buffer(bh);
+    }
+}
+
+#define CHUNK_SIZE 32
+struct buffer_chunk {
+    struct buffer_head *bh[CHUNK_SIZE];
+    int nr;
+};
+
+static void write_chunk(struct buffer_chunk *chunk) {
+    int i;
+    for (i = 0; i < chunk->nr ; i++) {
+	submit_logged_buffer(chunk->bh[i]) ;
+    }
+    chunk->nr = 0;
+}
+
+static void write_ordered_chunk(struct buffer_chunk *chunk) {
+    int i;
+    for (i = 0; i < chunk->nr ; i++) {
+	submit_ordered_buffer(chunk->bh[i]) ;
+    }
+    chunk->nr = 0;
+}
+
+static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
+			 spinlock_t *lock,
+			 void (fn)(struct buffer_chunk *))
+{
+    int ret = 0;
+    if (chunk->nr >= CHUNK_SIZE)
+        BUG();
+    chunk->bh[chunk->nr++] = bh;
+    if (chunk->nr >= CHUNK_SIZE) {
+	ret = 1;
+        if (lock)
+	    spin_unlock(lock);
+        fn(chunk);
+        if (lock)
+	    spin_lock(lock);
+    }
+    return ret;
+}
+
+
+static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
+static struct reiserfs_jh *alloc_jh(void) {
+    struct reiserfs_jh *jh;
+    while(1) {
+	jh = kmalloc(sizeof(*jh), GFP_NOFS);
+	if (jh) {
+	    atomic_inc(&nr_reiserfs_jh);
+	    return jh;
+	}
+        yield();
+    }
+}
+
+/*
+ * we want to free the jh when the buffer has been written
+ * and waited on
+ */
+void reiserfs_free_jh(struct buffer_head *bh) {
+    struct reiserfs_jh *jh;
+
+    jh = bh->b_private;
+    if (jh) {
+	bh->b_private = NULL;
+	jh->bh = NULL;
+	list_del_init(&jh->list);
+	kfree(jh);
+	if (atomic_read(&nr_reiserfs_jh) <= 0)
+	    BUG();
+	atomic_dec(&nr_reiserfs_jh);
+	put_bh(bh);
+    }
+}
+
+static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
+                           int tail)
+{
+    struct reiserfs_jh *jh;
+
+    if (bh->b_private) {
+	spin_lock(&j->j_dirty_buffers_lock);
+	if (!bh->b_private) {
+	    spin_unlock(&j->j_dirty_buffers_lock);
+	    goto no_jh;
+	}
+        jh = bh->b_private;
+	list_del_init(&jh->list);
+    } else {
+no_jh:
+	get_bh(bh);
+	jh = alloc_jh();
+	spin_lock(&j->j_dirty_buffers_lock);
+	/* buffer must be locked for __add_jh, should be able to have
+	 * two adds at the same time
+	 */
+	if (bh->b_private)
+	    BUG();
+	jh->bh = bh;
+	bh->b_private = jh;
+    }
+    jh->jl = j->j_current_jl;
+    if (tail)
+	list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
+    else {
+	list_add_tail(&jh->list, &jh->jl->j_bh_list);
+    }
+    spin_unlock(&j->j_dirty_buffers_lock);
+    return 0;
+}
+
+int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) {
+    return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
+}
+int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) {
+    return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
+}
+
+#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
+static int write_ordered_buffers(spinlock_t *lock,
+				 struct reiserfs_journal *j,
+                                 struct reiserfs_journal_list *jl,
+				 struct list_head *list)
+{
+    struct buffer_head *bh;
+    struct reiserfs_jh *jh;
+    int ret = j->j_errno;
+    struct buffer_chunk chunk;
+    struct list_head tmp;
+    INIT_LIST_HEAD(&tmp);
+
+    chunk.nr = 0;
+    spin_lock(lock);
+    while(!list_empty(list)) {
+        jh = JH_ENTRY(list->next);
+	bh = jh->bh;
+	get_bh(bh);
+	if (test_set_buffer_locked(bh)) {
+	    if (!buffer_dirty(bh)) {
+		list_del_init(&jh->list);
+		list_add(&jh->list, &tmp);
+		goto loop_next;
+	    }
+	    spin_unlock(lock);
+	    if (chunk.nr)
+		write_ordered_chunk(&chunk);
+	    wait_on_buffer(bh);
+	    cond_resched();
+	    spin_lock(lock);
+	    goto loop_next;
+        }
+	if (buffer_dirty(bh)) {
+	    list_del_init(&jh->list);
+	    list_add(&jh->list, &tmp);
+            add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
+	} else {
+	    reiserfs_free_jh(bh);
+	    unlock_buffer(bh);
+	}
+loop_next:
+	put_bh(bh);
+	cond_resched_lock(lock);
+    }
+    if (chunk.nr) {
+	spin_unlock(lock);
+        write_ordered_chunk(&chunk);
+	spin_lock(lock);
+    }
+    while(!list_empty(&tmp)) {
+        jh = JH_ENTRY(tmp.prev);
+	bh = jh->bh;
+	get_bh(bh);
+	reiserfs_free_jh(bh);
+
+	if (buffer_locked(bh)) {
+	    spin_unlock(lock);
+	    wait_on_buffer(bh);
+	    spin_lock(lock);
+	}
+	if (!buffer_uptodate(bh)) {
+	    ret = -EIO;
+        }
+	put_bh(bh);
+	cond_resched_lock(lock);
+    }
+    spin_unlock(lock);
+    return ret;
+}
+
+static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    struct reiserfs_journal_list *other_jl;
+    struct reiserfs_journal_list *first_jl;
+    struct list_head *entry;
+    unsigned long trans_id = jl->j_trans_id;
+    unsigned long other_trans_id;
+    unsigned long first_trans_id;
+
+find_first:
+    /*
+     * first we walk backwards to find the oldest uncommitted transation
+     */
+    first_jl = jl;
+    entry = jl->j_list.prev;
+    while(1) {
+	other_jl = JOURNAL_LIST_ENTRY(entry);
+	if (entry == &journal->j_journal_list ||
+	    atomic_read(&other_jl->j_older_commits_done))
+	    break;
+
+        first_jl = other_jl;
+	entry = other_jl->j_list.prev;
+    }
+
+    /* if we didn't find any older uncommitted transactions, return now */
+    if (first_jl == jl) {
+        return 0;
+    }
+
+    first_trans_id = first_jl->j_trans_id;
+
+    entry = &first_jl->j_list;
+    while(1) {
+	other_jl = JOURNAL_LIST_ENTRY(entry);
+	other_trans_id = other_jl->j_trans_id;
+
+	if (other_trans_id < trans_id) {
+	    if (atomic_read(&other_jl->j_commit_left) != 0) {
+		flush_commit_list(s, other_jl, 0);
+
+		/* list we were called with is gone, return */
+		if (!journal_list_still_alive(s, trans_id))
+		    return 1;
+
+		/* the one we just flushed is gone, this means all
+		 * older lists are also gone, so first_jl is no longer
+		 * valid either.  Go back to the beginning.
+		 */
+		if (!journal_list_still_alive(s, other_trans_id)) {
+		    goto find_first;
+		}
+	    }
+	    entry = entry->next;
+	    if (entry == &journal->j_journal_list)
+		return 0;
+	} else {
+	    return 0;
+	}
+    }
+    return 0;
+}
+int reiserfs_async_progress_wait(struct super_block *s) {
+    DEFINE_WAIT(wait);
+    struct reiserfs_journal *j = SB_JOURNAL(s);
+    if (atomic_read(&j->j_async_throttle))
+    	blk_congestion_wait(WRITE, HZ/10);
+    return 0;
+}
+
+/*
+** if this journal list still has commit blocks unflushed, send them to disk.
+**
+** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
+** Before the commit block can by written, every other log block must be safely on disk
+**
+*/
+static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
+  int i;
+  int bn ;
+  struct buffer_head *tbh = NULL ;
+  unsigned long trans_id = jl->j_trans_id;
+  struct reiserfs_journal *journal = SB_JOURNAL (s);
+  int barrier = 0;
+  int retval = 0;
+
+  reiserfs_check_lock_depth(s, "flush_commit_list") ;
+
+  if (atomic_read(&jl->j_older_commits_done)) {
+    return 0 ;
+  }
+
+  /* before we can put our commit blocks on disk, we have to make sure everyone older than
+  ** us is on disk too
+  */
+  BUG_ON (jl->j_len <= 0);
+  BUG_ON (trans_id == journal->j_trans_id);
+
+  get_journal_list(jl);
+  if (flushall) {
+    if (flush_older_commits(s, jl) == 1) {
+      /* list disappeared during flush_older_commits.  return */
+      goto put_jl;
+    }
+  }
+
+  /* make sure nobody is trying to flush this one at the same time */
+  down(&jl->j_commit_lock);
+  if (!journal_list_still_alive(s, trans_id)) {
+    up(&jl->j_commit_lock);
+    goto put_jl;
+  }
+  BUG_ON (jl->j_trans_id == 0);
+
+  /* this commit is done, exit */
+  if (atomic_read(&(jl->j_commit_left)) <= 0) {
+    if (flushall) {
+      atomic_set(&(jl->j_older_commits_done), 1) ;
+    }
+    up(&jl->j_commit_lock);
+    goto put_jl;
+  }
+
+  if (!list_empty(&jl->j_bh_list)) {
+      unlock_kernel();
+      write_ordered_buffers(&journal->j_dirty_buffers_lock,
+                            journal, jl, &jl->j_bh_list);
+      lock_kernel();
+  }
+  BUG_ON (!list_empty(&jl->j_bh_list));
+  /*
+   * for the description block and all the log blocks, submit any buffers
+   * that haven't already reached the disk
+   */
+  atomic_inc(&journal->j_async_throttle);
+  for (i = 0 ; i < (jl->j_len + 1) ; i++) {
+    bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %
+         SB_ONDISK_JOURNAL_SIZE(s);
+    tbh = journal_find_get_block(s, bn) ;
+    if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
+	ll_rw_block(WRITE, 1, &tbh) ;
+    put_bh(tbh) ;
+  }
+  atomic_dec(&journal->j_async_throttle);
+
+  /* wait on everything written so far before writing the commit
+   * if we are in barrier mode, send the commit down now
+   */
+  barrier = reiserfs_barrier_flush(s);
+  if (barrier) {
+      int ret;
+      lock_buffer(jl->j_commit_bh);
+      ret = submit_barrier_buffer(jl->j_commit_bh);
+      if (ret == -EOPNOTSUPP) {
+	  set_buffer_uptodate(jl->j_commit_bh);
+          disable_barrier(s);
+	  barrier = 0;
+      }
+  }
+  for (i = 0 ;  i < (jl->j_len + 1) ; i++) {
+    bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
+	 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
+    tbh = journal_find_get_block(s, bn) ;
+    wait_on_buffer(tbh) ;
+    // since we're using ll_rw_blk above, it might have skipped over
+    // a locked buffer.  Double check here
+    //
+    if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
+      sync_dirty_buffer(tbh);
+    if (unlikely (!buffer_uptodate(tbh))) {
+#ifdef CONFIG_REISERFS_CHECK
+      reiserfs_warning(s, "journal-601, buffer write failed") ;
+#endif
+      retval = -EIO;
+    }
+    put_bh(tbh) ; /* once for journal_find_get_block */
+    put_bh(tbh) ;    /* once due to original getblk in do_journal_end */
+    atomic_dec(&(jl->j_commit_left)) ;
+  }
+
+  BUG_ON (atomic_read(&(jl->j_commit_left)) != 1);
+
+  if (!barrier) {
+      if (buffer_dirty(jl->j_commit_bh))
+	BUG();
+      mark_buffer_dirty(jl->j_commit_bh) ;
+      sync_dirty_buffer(jl->j_commit_bh) ;
+  } else
+      wait_on_buffer(jl->j_commit_bh);
+
+  check_barrier_completion(s, jl->j_commit_bh);
+
+  /* If there was a write error in the journal - we can't commit this
+   * transaction - it will be invalid and, if successful, will just end
+   * up propogating the write error out to the filesystem. */
+  if (unlikely (!buffer_uptodate(jl->j_commit_bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+    reiserfs_warning(s, "journal-615: buffer write failed") ;
+#endif
+    retval = -EIO;
+  }
+  bforget(jl->j_commit_bh) ;
+  if (journal->j_last_commit_id != 0 &&
+     (jl->j_trans_id - journal->j_last_commit_id) != 1) {
+      reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
+                       journal->j_last_commit_id,
+		       jl->j_trans_id);
+  }
+  journal->j_last_commit_id = jl->j_trans_id;
+
+  /* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
+  cleanup_freed_for_journal_list(s, jl) ;
+
+  retval = retval ? retval : journal->j_errno;
+
+  /* mark the metadata dirty */
+  if (!retval)
+    dirty_one_transaction(s, jl);
+  atomic_dec(&(jl->j_commit_left)) ;
+
+  if (flushall) {
+    atomic_set(&(jl->j_older_commits_done), 1) ;
+  }
+  up(&jl->j_commit_lock);
+put_jl:
+  put_journal_list(s, jl);
+
+  if (retval)
+    reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__);
+  return retval;
+}
+
+/*
+** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or 
+** returns NULL if it can't find anything 
+*/
+static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
+  struct super_block *sb = cn->sb;
+  b_blocknr_t blocknr = cn->blocknr ;
+
+  cn = cn->hprev ;
+  while(cn) {
+    if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
+      return cn->jlist ;
+    }
+    cn = cn->hprev ;
+  }
+  return NULL ;
+}
+
+static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
+struct reiserfs_journal_list *, unsigned long, int);
+
+/*
+** once all the real blocks have been flushed, it is safe to remove them from the
+** journal list for this transaction.  Aside from freeing the cnode, this also allows the
+** block to be reallocated for data blocks if it had been deleted.
+*/
+static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_cnode *cn, *last ;
+  cn = jl->j_realblock ;
+
+  /* which is better, to lock once around the whole loop, or
+  ** to lock for each call to remove_journal_hash?
+  */
+  while(cn) {
+    if (cn->blocknr != 0) {
+      if (debug) {
+       reiserfs_warning (p_s_sb, "block %u, bh is %d, state %ld", cn->blocknr,
+                         cn->bh ? 1: 0, cn->state) ;
+      }
+      cn->state = 0 ;
+      remove_journal_hash(p_s_sb, journal->j_list_hash_table, jl, cn->blocknr, 1) ;
+    }
+    last = cn ;
+    cn = cn->next ;
+    free_cnode(p_s_sb, last) ;
+  }
+  jl->j_realblock = NULL ;
+}
+
+/*
+** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
+** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
+** releasing blocks in this transaction for reuse as data blocks.
+** called by flush_journal_list, before it calls remove_all_from_journal_list
+**
+*/
+static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
+  struct reiserfs_journal_header *jh ;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+  if (reiserfs_is_journal_aborted (journal))
+    return -EIO;
+
+  if (trans_id >= journal->j_last_flush_trans_id) {
+    if (buffer_locked((journal->j_header_bh)))  {
+      wait_on_buffer((journal->j_header_bh)) ;
+      if (unlikely (!buffer_uptodate(journal->j_header_bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+        reiserfs_warning (p_s_sb, "journal-699: buffer write failed") ;
+#endif
+        return -EIO;
+      }
+    }
+    journal->j_last_flush_trans_id = trans_id ;
+    journal->j_first_unflushed_offset = offset ;
+    jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
+    jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
+    jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
+    jh->j_mount_id = cpu_to_le32(journal->j_mount_id) ;
+
+    if (reiserfs_barrier_flush(p_s_sb)) {
+	int ret;
+	lock_buffer(journal->j_header_bh);
+	ret = submit_barrier_buffer(journal->j_header_bh);
+	if (ret == -EOPNOTSUPP) {
+	    set_buffer_uptodate(journal->j_header_bh);
+	    disable_barrier(p_s_sb);
+	    goto sync;
+	}
+	wait_on_buffer(journal->j_header_bh);
+	check_barrier_completion(p_s_sb, journal->j_header_bh);
+    } else {
+sync:
+	set_buffer_dirty(journal->j_header_bh) ;
+	sync_dirty_buffer(journal->j_header_bh) ;
+    }
+    if (!buffer_uptodate(journal->j_header_bh)) {
+      reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
+      return -EIO ;
+    }
+  }
+  return 0 ;
+}
+
+static int update_journal_header_block(struct super_block *p_s_sb, 
+                                       unsigned long offset, 
+				       unsigned long trans_id) {
+    return _update_journal_header_block(p_s_sb, offset, trans_id);
+}
+/* 
+** flush any and all journal lists older than you are 
+** can only be called from flush_journal_list
+*/
+static int flush_older_journal_lists(struct super_block *p_s_sb,
+                                     struct reiserfs_journal_list *jl)
+{
+    struct list_head *entry;
+    struct reiserfs_journal_list *other_jl ;
+    struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+    unsigned long trans_id = jl->j_trans_id;
+
+    /* we know we are the only ones flushing things, no extra race
+     * protection is required.
+     */
+restart:
+    entry = journal->j_journal_list.next;
+    /* Did we wrap? */
+    if (entry == &journal->j_journal_list)
+        return 0;
+    other_jl = JOURNAL_LIST_ENTRY(entry);
+    if (other_jl->j_trans_id < trans_id) {
+        BUG_ON (other_jl->j_refcount <= 0);
+	/* do not flush all */
+	flush_journal_list(p_s_sb, other_jl, 0) ;
+
+	/* other_jl is now deleted from the list */
+	goto restart;
+    }
+    return 0 ;
+}
+
+static void del_from_work_list(struct super_block *s,
+                               struct reiserfs_journal_list *jl) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    if (!list_empty(&jl->j_working_list)) {
+	list_del_init(&jl->j_working_list);
+	journal->j_num_work_lists--;
+    }
+}
+
+/* flush a journal list, both commit and real blocks
+**
+** always set flushall to 1, unless you are calling from inside
+** flush_journal_list
+**
+** IMPORTANT.  This can only be called while there are no journal writers, 
+** and the journal is locked.  That means it can only be called from 
+** do_journal_end, or by journal_release
+*/
+static int flush_journal_list(struct super_block *s, 
+                              struct reiserfs_journal_list *jl, int flushall) {
+  struct reiserfs_journal_list *pjl ;
+  struct reiserfs_journal_cnode *cn, *last ;
+  int count ;
+  int was_jwait = 0 ;
+  int was_dirty = 0 ;
+  struct buffer_head *saved_bh ; 
+  unsigned long j_len_saved = jl->j_len ;
+  struct reiserfs_journal *journal = SB_JOURNAL (s);
+  int err = 0;
+
+  BUG_ON (j_len_saved <= 0);
+
+  if (atomic_read(&journal->j_wcount) != 0) {
+    reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d",
+                      atomic_read(&journal->j_wcount)) ;
+  }
+  BUG_ON (jl->j_trans_id == 0);
+
+  /* if flushall == 0, the lock is already held */
+  if (flushall) {
+      down(&journal->j_flush_sem);
+  } else if (!down_trylock(&journal->j_flush_sem)) {
+      BUG();
+  }
+
+  count = 0 ;
+  if (j_len_saved > journal->j_trans_max) {
+    reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved, jl->j_trans_id);
+    return 0 ;
+  }
+
+  /* if all the work is already done, get out of here */
+  if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 
+      atomic_read(&(jl->j_commit_left)) <= 0) {
+    goto flush_older_and_return ;
+  } 
+
+  /* start by putting the commit list on disk.  This will also flush 
+  ** the commit lists of any olders transactions
+  */
+  flush_commit_list(s, jl, 1) ;
+
+  if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted (journal))
+      BUG();
+
+  /* are we done now? */
+  if (atomic_read(&(jl->j_nonzerolen)) <= 0 && 
+      atomic_read(&(jl->j_commit_left)) <= 0) {
+    goto flush_older_and_return ;
+  }
+
+  /* loop through each cnode, see if we need to write it, 
+  ** or wait on a more recent transaction, or just ignore it 
+  */
+  if (atomic_read(&(journal->j_wcount)) != 0) {
+    reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
+  }
+  cn = jl->j_realblock ;
+  while(cn) {
+    was_jwait = 0 ;
+    was_dirty = 0 ;
+    saved_bh = NULL ;
+    /* blocknr of 0 is no longer in the hash, ignore it */
+    if (cn->blocknr == 0) {
+      goto free_cnode ;
+    }
+
+    /* This transaction failed commit. Don't write out to the disk */
+    if (!(jl->j_state & LIST_DIRTY))
+        goto free_cnode;
+
+    pjl = find_newer_jl_for_cn(cn) ;
+    /* the order is important here.  We check pjl to make sure we
+    ** don't clear BH_JDirty_wait if we aren't the one writing this
+    ** block to disk
+    */
+    if (!pjl && cn->bh) {
+      saved_bh = cn->bh ;
+
+      /* we do this to make sure nobody releases the buffer while 
+      ** we are working with it 
+      */
+      get_bh(saved_bh) ;
+
+      if (buffer_journal_dirty(saved_bh)) {
+        BUG_ON (!can_dirty (cn));
+        was_jwait = 1 ;
+        was_dirty = 1 ;
+      } else if (can_dirty(cn)) {
+        /* everything with !pjl && jwait should be writable */
+	BUG();
+      }
+    }
+
+    /* if someone has this block in a newer transaction, just make
+    ** sure they are commited, and don't try writing it to disk
+    */
+    if (pjl) {
+      if (atomic_read(&pjl->j_commit_left))
+        flush_commit_list(s, pjl, 1) ;
+      goto free_cnode ;
+    }
+
+    /* bh == NULL when the block got to disk on its own, OR, 
+    ** the block got freed in a future transaction 
+    */
+    if (saved_bh == NULL) {
+      goto free_cnode ;
+    }
+
+    /* this should never happen.  kupdate_one_transaction has this list
+    ** locked while it works, so we should never see a buffer here that
+    ** is not marked JDirty_wait
+    */
+    if ((!was_jwait) && !buffer_locked(saved_bh)) {
+	reiserfs_warning (s, "journal-813: BAD! buffer %llu %cdirty %cjwait, "
+			  "not in a newer tranasction",
+			  (unsigned long long)saved_bh->b_blocknr,
+			  was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
+    }
+    if (was_dirty) { 
+      /* we inc again because saved_bh gets decremented at free_cnode */
+      get_bh(saved_bh) ;
+      set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
+      lock_buffer(saved_bh);
+      BUG_ON (cn->blocknr != saved_bh->b_blocknr);
+      if (buffer_dirty(saved_bh))
+        submit_logged_buffer(saved_bh) ;
+      else
+        unlock_buffer(saved_bh);
+      count++ ;
+    } else {
+      reiserfs_warning (s, "clm-2082: Unable to flush buffer %llu in %s",
+                        (unsigned long long)saved_bh->b_blocknr, __FUNCTION__);
+    }
+free_cnode:
+    last = cn ;
+    cn = cn->next ;
+    if (saved_bh) {
+      /* we incremented this to keep others from taking the buffer head away */
+      put_bh(saved_bh) ;
+      if (atomic_read(&(saved_bh->b_count)) < 0) {
+        reiserfs_warning (s, "journal-945: saved_bh->b_count < 0");
+      }
+    }
+  }
+  if (count > 0) {
+    cn = jl->j_realblock ;
+    while(cn) {
+      if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
+	if (!cn->bh) {
+	  reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
+	}
+	wait_on_buffer(cn->bh) ;
+	if (!cn->bh) {
+	  reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
+	}
+	if (unlikely (!buffer_uptodate(cn->bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+	  reiserfs_warning(s, "journal-949: buffer write failed\n") ;
+#endif
+          err = -EIO;
+  	}
+	/* note, we must clear the JDirty_wait bit after the up to date
+	** check, otherwise we race against our flushpage routine
+	*/
+        BUG_ON (!test_clear_buffer_journal_dirty (cn->bh));
+
+        /* undo the inc from journal_mark_dirty */
+	put_bh(cn->bh) ;
+        brelse(cn->bh) ;
+      }
+      cn = cn->next ;
+    }
+  }
+
+  if (err)
+    reiserfs_abort (s, -EIO, "Write error while pushing transaction to disk in %s", __FUNCTION__);
+flush_older_and_return:
+
+
+  /* before we can update the journal header block, we _must_ flush all 
+  ** real blocks from all older transactions to disk.  This is because
+  ** once the header block is updated, this transaction will not be
+  ** replayed after a crash
+  */
+  if (flushall) {
+    flush_older_journal_lists(s, jl);
+  } 
+  
+  err = journal->j_errno;
+  /* before we can remove everything from the hash tables for this 
+  ** transaction, we must make sure it can never be replayed
+  **
+  ** since we are only called from do_journal_end, we know for sure there
+  ** are no allocations going on while we are flushing journal lists.  So,
+  ** we only need to update the journal header block for the last list
+  ** being flushed
+  */
+  if (!err && flushall) {
+    err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
+    if (err)
+        reiserfs_abort (s, -EIO, "Write error while updating journal header in %s", __FUNCTION__);
+  }
+  remove_all_from_journal_list(s, jl, 0) ;
+  list_del_init(&jl->j_list);
+  journal->j_num_lists--;
+  del_from_work_list(s, jl);
+
+  if (journal->j_last_flush_id != 0 &&
+     (jl->j_trans_id - journal->j_last_flush_id) != 1) {
+      reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
+                       journal->j_last_flush_id,
+		       jl->j_trans_id);
+  }
+  journal->j_last_flush_id = jl->j_trans_id;
+
+  /* not strictly required since we are freeing the list, but it should
+   * help find code using dead lists later on
+   */
+  jl->j_len = 0 ;
+  atomic_set(&(jl->j_nonzerolen), 0) ;
+  jl->j_start = 0 ;
+  jl->j_realblock = NULL ;
+  jl->j_commit_bh = NULL ;
+  jl->j_trans_id = 0 ;
+  jl->j_state = 0;
+  put_journal_list(s, jl);
+  if (flushall)
+    up(&journal->j_flush_sem);
+  return err ;
+} 
+
+static int write_one_transaction(struct super_block *s,
+                                 struct reiserfs_journal_list *jl,
+				 struct buffer_chunk *chunk)
+{
+    struct reiserfs_journal_cnode *cn;
+    int ret = 0 ;
+
+    jl->j_state |= LIST_TOUCHED;
+    del_from_work_list(s, jl);
+    if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
+        return 0;
+    }
+
+    cn = jl->j_realblock ;
+    while(cn) {
+        /* if the blocknr == 0, this has been cleared from the hash,
+        ** skip it
+        */
+        if (cn->blocknr == 0) {
+            goto next ;
+        }
+        if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
+	    struct buffer_head *tmp_bh;
+	    /* we can race against journal_mark_freed when we try
+	     * to lock_buffer(cn->bh), so we have to inc the buffer
+	     * count, and recheck things after locking
+	     */
+	    tmp_bh = cn->bh;
+	    get_bh(tmp_bh);
+	    lock_buffer(tmp_bh);
+	    if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
+		if (!buffer_journal_dirty(tmp_bh) ||
+		    buffer_journal_prepared(tmp_bh))
+		    BUG();
+		add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
+		ret++;
+	    } else {
+		/* note, cn->bh might be null now */
+		unlock_buffer(tmp_bh);
+	    }
+	    put_bh(tmp_bh);
+        }
+next:
+        cn = cn->next ;
+	cond_resched();
+    }
+    return ret ;
+}
+
+/* used by flush_commit_list */
+static int dirty_one_transaction(struct super_block *s,
+                                 struct reiserfs_journal_list *jl)
+{
+    struct reiserfs_journal_cnode *cn;
+    struct reiserfs_journal_list *pjl;
+    int ret = 0 ;
+
+    jl->j_state |= LIST_DIRTY;
+    cn = jl->j_realblock ;
+    while(cn) {
+        /* look for a more recent transaction that logged this
+        ** buffer.  Only the most recent transaction with a buffer in
+        ** it is allowed to send that buffer to disk
+        */
+	pjl = find_newer_jl_for_cn(cn) ;
+        if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh))
+	{
+	    BUG_ON (!can_dirty(cn));
+	    /* if the buffer is prepared, it will either be logged
+	     * or restored.  If restored, we need to make sure
+	     * it actually gets marked dirty
+	     */
+            clear_buffer_journal_new (cn->bh);
+            if (buffer_journal_prepared (cn->bh)) {
+                set_buffer_journal_restore_dirty (cn->bh);
+	    } else {
+                set_buffer_journal_test (cn->bh);
+	        mark_buffer_dirty(cn->bh);
+	    }
+        } 
+        cn = cn->next ;
+    }
+    return ret ;
+}
+
+static int kupdate_transactions(struct super_block *s,
+                                   struct reiserfs_journal_list *jl,
+				   struct reiserfs_journal_list **next_jl,
+				   unsigned long *next_trans_id,
+				   int num_blocks,
+				   int num_trans) {
+    int ret = 0;
+    int written = 0 ;
+    int transactions_flushed = 0;
+    unsigned long orig_trans_id = jl->j_trans_id;
+    struct buffer_chunk chunk;
+    struct list_head *entry;
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    chunk.nr = 0;
+
+    down(&journal->j_flush_sem);
+    if (!journal_list_still_alive(s, orig_trans_id)) {
+	goto done;
+    }
+
+    /* we've got j_flush_sem held, nobody is going to delete any
+     * of these lists out from underneath us
+     */
+    while((num_trans && transactions_flushed < num_trans) ||
+          (!num_trans && written < num_blocks)) {
+
+	if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
+	    atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY))
+	{
+	    del_from_work_list(s, jl);
+	    break;
+	}
+	ret = write_one_transaction(s, jl, &chunk);
+
+	if (ret < 0)
+	    goto done;
+	transactions_flushed++;
+	written += ret;
+	entry = jl->j_list.next;
+
+	/* did we wrap? */
+	if (entry == &journal->j_journal_list) {
+	    break;
+        }
+	jl = JOURNAL_LIST_ENTRY(entry);
+
+	/* don't bother with older transactions */
+	if (jl->j_trans_id <= orig_trans_id)
+	    break;
+    }
+    if (chunk.nr) {
+        write_chunk(&chunk);
+    }
+
+done:
+    up(&journal->j_flush_sem);
+    return ret;
+}
+
+/* for o_sync and fsync heavy applications, they tend to use
+** all the journa list slots with tiny transactions.  These
+** trigger lots and lots of calls to update the header block, which
+** adds seeks and slows things down.
+**
+** This function tries to clear out a large chunk of the journal lists
+** at once, which makes everything faster since only the newest journal
+** list updates the header block
+*/
+static int flush_used_journal_lists(struct super_block *s,
+                                    struct reiserfs_journal_list *jl) {
+    unsigned long len = 0;
+    unsigned long cur_len;
+    int ret;
+    int i;
+    int limit = 256;
+    struct reiserfs_journal_list *tjl;
+    struct reiserfs_journal_list *flush_jl;
+    unsigned long trans_id;
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+
+    flush_jl = tjl = jl;
+
+    /* in data logging mode, try harder to flush a lot of blocks */
+    if (reiserfs_data_log(s))
+	limit = 1024;
+    /* flush for 256 transactions or limit blocks, whichever comes first */
+    for(i = 0 ; i < 256 && len < limit ; i++) {
+	if (atomic_read(&tjl->j_commit_left) ||
+	    tjl->j_trans_id < jl->j_trans_id) {
+	    break;
+	}
+	cur_len = atomic_read(&tjl->j_nonzerolen);
+	if (cur_len > 0) {
+	    tjl->j_state &= ~LIST_TOUCHED;
+	}
+	len += cur_len;
+	flush_jl = tjl;
+	if (tjl->j_list.next == &journal->j_journal_list)
+	    break;
+	tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
+    }
+    /* try to find a group of blocks we can flush across all the
+    ** transactions, but only bother if we've actually spanned
+    ** across multiple lists
+    */
+    if (flush_jl != jl) {
+        ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
+    }
+    flush_journal_list(s, flush_jl, 1);
+    return 0;
+}
+
+/*
+** removes any nodes in table with name block and dev as bh.
+** only touchs the hnext and hprev pointers.
+*/
+void remove_journal_hash(struct super_block *sb,
+			struct reiserfs_journal_cnode **table,
+			struct reiserfs_journal_list *jl,
+			unsigned long block, int remove_freed)
+{
+  struct reiserfs_journal_cnode *cur ;
+  struct reiserfs_journal_cnode **head ;
+
+  head= &(journal_hash(table, sb, block)) ;
+  if (!head) {
+    return ;
+  }
+  cur = *head ;
+  while(cur) {
+    if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) && 
+        (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
+      if (cur->hnext) {
+        cur->hnext->hprev = cur->hprev ;
+      }
+      if (cur->hprev) {
+	cur->hprev->hnext = cur->hnext ;
+      } else {
+	*head = cur->hnext ;
+      }
+      cur->blocknr = 0 ;
+      cur->sb = NULL ;
+      cur->state = 0 ;
+      if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
+	atomic_dec(&(cur->jlist->j_nonzerolen)) ;
+      cur->bh = NULL ;
+      cur->jlist = NULL ;
+    } 
+    cur = cur->hnext ;
+  }
+}
+
+static void free_journal_ram(struct super_block *p_s_sb) {
+  struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+  reiserfs_kfree(journal->j_current_jl,
+                 sizeof(struct reiserfs_journal_list), p_s_sb);
+  journal->j_num_lists--;
+
+  vfree(journal->j_cnode_free_orig) ;
+  free_list_bitmaps(p_s_sb, journal->j_list_bitmap) ;
+  free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
+  if (journal->j_header_bh) {
+    brelse(journal->j_header_bh) ;
+  }
+  /* j_header_bh is on the journal dev, make sure not to release the journal
+   * dev until we brelse j_header_bh
+   */
+  release_journal_dev(p_s_sb, journal);
+  vfree(journal) ;
+}
+
+/*
+** call on unmount.  Only set error to 1 if you haven't made your way out
+** of read_super() yet.  Any other caller must keep error at 0.
+*/
+static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
+  struct reiserfs_transaction_handle myth ;
+  int flushed = 0;
+  struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+
+  /* we only want to flush out transactions if we were called with error == 0
+  */
+  if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
+    /* end the current trans */
+    BUG_ON (!th->t_trans_id);
+    do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
+
+    /* make sure something gets logged to force our way into the flush code */
+    if (!journal_join(&myth, p_s_sb, 1)) {
+        reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+        journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+        do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
+        flushed = 1;
+    }
+  }
+
+  /* this also catches errors during the do_journal_end above */
+  if (!error && reiserfs_is_journal_aborted(journal)) {
+      memset(&myth, 0, sizeof(myth));
+      if (!journal_join_abort(&myth, p_s_sb, 1)) {
+	  reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+	  journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+          do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL) ;
+      }
+  }
+
+  reiserfs_mounted_fs_count-- ;
+  /* wait for all commits to finish */
+  cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
+  flush_workqueue(commit_wq);
+  if (!reiserfs_mounted_fs_count) {
+    destroy_workqueue(commit_wq);
+    commit_wq = NULL;
+  }
+
+  free_journal_ram(p_s_sb) ;
+
+  return 0 ;
+}
+
+/*
+** call on unmount.  flush all journal trans, release all alloc'd ram
+*/
+int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
+  return do_journal_release(th, p_s_sb, 0) ;
+}
+/*
+** only call from an error condition inside reiserfs_read_super!
+*/
+int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
+  return do_journal_release(th, p_s_sb, 1) ;
+}
+
+/* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
+static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc, 
+			               struct reiserfs_journal_commit *commit) {
+  if (get_commit_trans_id (commit) != get_desc_trans_id (desc) || 
+      get_commit_trans_len (commit) != get_desc_trans_len (desc) || 
+      get_commit_trans_len (commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
+      get_commit_trans_len (commit) <= 0 
+  ) {
+    return 1 ;
+  }
+  return 0 ;
+}
+/* returns 0 if it did not find a description block  
+** returns -1 if it found a corrupt commit block
+** returns 1 if both desc and commit were valid 
+*/
+static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
+  struct reiserfs_journal_desc *desc ;
+  struct reiserfs_journal_commit *commit ;
+  struct buffer_head *c_bh ;
+  unsigned long offset ;
+
+  if (!d_bh)
+      return 0 ;
+
+  desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+  if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8)) {
+    if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
+      reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
+	              "is valid returning because trans_id %d is greater than "
+		      "oldest_invalid %lu", get_desc_trans_id(desc),
+		       *oldest_invalid_trans_id);
+      return 0 ;
+    }
+    if (newest_mount_id && *newest_mount_id > get_desc_mount_id (desc)) {
+      reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
+                     "is valid returning because mount_id %d is less than "
+		     "newest_mount_id %lu", get_desc_mount_id (desc),
+		     *newest_mount_id) ;
+      return -1 ;
+    }
+    if ( get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max ) {
+      reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc));
+      return -1 ;
+    }
+    offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+
+    /* ok, we have a journal description block, lets see if the transaction was valid */
+    c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+		 ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
+    if (!c_bh)
+      return 0 ;
+    commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+    if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+      reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, 
+                     "journal_transaction_is_valid, commit offset %ld had bad "
+		     "time %d or length %d",
+		     c_bh->b_blocknr -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
+		     get_commit_trans_id (commit), 
+		     get_commit_trans_len(commit));
+      brelse(c_bh) ;
+      if (oldest_invalid_trans_id) {
+	*oldest_invalid_trans_id = get_desc_trans_id(desc) ;
+	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
+		       "transaction_is_valid setting oldest invalid trans_id "
+		       "to %d", get_desc_trans_id(desc)) ;
+      }
+      return -1; 
+    }
+    brelse(c_bh) ;
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
+                   "transaction start offset %llu, len %d id %d",
+		   d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+		   get_desc_trans_len(desc), get_desc_trans_id(desc)) ;
+    return 1 ;
+  } else {
+    return 0 ;
+  }
+}
+
+static void brelse_array(struct buffer_head **heads, int num) {
+  int i ;
+  for (i = 0 ; i < num ; i++) {
+    brelse(heads[i]) ;
+  }
+}
+
+/*
+** given the start, and values for the oldest acceptable transactions,
+** this either reads in a replays a transaction, or returns because the transaction
+** is invalid, or too old.
+*/
+static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start, 
+				    unsigned long oldest_trans_id, unsigned long newest_mount_id) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_desc *desc ;
+  struct reiserfs_journal_commit *commit ;
+  unsigned long trans_id = 0 ;
+  struct buffer_head *c_bh ;
+  struct buffer_head *d_bh ;
+  struct buffer_head **log_blocks = NULL ;
+  struct buffer_head **real_blocks = NULL ;
+  unsigned long trans_offset ;
+  int i;
+  int trans_half;
+
+  d_bh = journal_bread(p_s_sb, cur_dblock) ;
+  if (!d_bh)
+    return 1 ;
+  desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+  trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+  reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
+                 "journal_read_transaction, offset %llu, len %d mount_id %d",
+		 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+		 get_desc_trans_len(desc), get_desc_mount_id(desc)) ;
+  if (get_desc_trans_id(desc) < oldest_trans_id) {
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
+                   "journal_read_trans skipping because %lu is too old",
+		   cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
+    brelse(d_bh) ;
+    return 1 ;
+  }
+  if (get_desc_mount_id(desc) != newest_mount_id) {
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
+                   "journal_read_trans skipping because %d is != "
+		   "newest_mount_id %lu", get_desc_mount_id(desc),
+		    newest_mount_id) ;
+    brelse(d_bh) ;
+    return 1 ;
+  }
+  c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+		((trans_offset + get_desc_trans_len(desc) + 1) % 
+		 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
+  if (!c_bh) {
+    brelse(d_bh) ;
+    return 1 ;
+  }
+  commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+  if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
+                   "commit offset %llu had bad time %d or length %d",
+		   c_bh->b_blocknr -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+		   get_commit_trans_id(commit), get_commit_trans_len(commit));
+    brelse(c_bh) ;
+    brelse(d_bh) ;
+    return 1; 
+  }
+  trans_id = get_desc_trans_id(desc) ;
+  /* now we know we've got a good transaction, and it was inside the valid time ranges */
+  log_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
+  real_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
+  if (!log_blocks  || !real_blocks) {
+    brelse(c_bh) ;
+    brelse(d_bh) ;
+    reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+    reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+    reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS") ;
+    return -1 ;
+  }
+  /* get all the buffer heads */
+  trans_half = journal_trans_half (p_s_sb->s_blocksize) ;
+  for(i = 0 ; i < get_desc_trans_len(desc) ; i++) {
+    log_blocks[i] =  journal_getblk(p_s_sb,  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
+    if (i < trans_half) {
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
+    } else {
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - trans_half])) ;
+    }
+    if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
+      reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
+      goto abort_replay;
+    }
+    /* make sure we don't try to replay onto log or reserved area */
+    if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
+      reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block") ;
+abort_replay:
+      brelse_array(log_blocks, i) ;
+      brelse_array(real_blocks, i) ;
+      brelse(c_bh) ;
+      brelse(d_bh) ;
+      reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      return -1 ;
+    }
+  }
+  /* read in the log blocks, memcpy to the corresponding real block */
+  ll_rw_block(READ, get_desc_trans_len(desc), log_blocks) ;
+  for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
+    wait_on_buffer(log_blocks[i]) ;
+    if (!buffer_uptodate(log_blocks[i])) {
+      reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed") ;
+      brelse_array(log_blocks + i, get_desc_trans_len(desc) - i) ;
+      brelse_array(real_blocks, get_desc_trans_len(desc)) ;
+      brelse(c_bh) ;
+      brelse(d_bh) ;
+      reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      return -1 ;
+    }
+    memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
+    set_buffer_uptodate(real_blocks[i]) ;
+    brelse(log_blocks[i]) ;
+  }
+  /* flush out the real blocks */
+  for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
+    set_buffer_dirty(real_blocks[i]) ;
+    ll_rw_block(WRITE, 1, real_blocks + i) ;
+  }
+  for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
+    wait_on_buffer(real_blocks[i]) ; 
+    if (!buffer_uptodate(real_blocks[i])) {
+      reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed") ;
+      brelse_array(real_blocks + i, get_desc_trans_len(desc) - i) ;
+      brelse(c_bh) ;
+      brelse(d_bh) ;
+      reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
+      return -1 ;
+    }
+    brelse(real_blocks[i]) ;
+  }
+  cur_dblock =  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
+  reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
+                 "start to offset %ld",
+		 cur_dblock -  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
+  
+  /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
+  journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+  journal->j_last_flush_trans_id = trans_id ;
+  journal->j_trans_id = trans_id + 1;
+  brelse(c_bh) ;
+  brelse(d_bh) ;
+  reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
+  reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
+  return 0 ;
+}
+
+/* This function reads blocks starting from block and to max_block of bufsize
+   size (but no more than BUFNR blocks at a time). This proved to improve
+   mounting speed on self-rebuilding raid5 arrays at least.
+   Right now it is only used from journal code. But later we might use it
+   from other places.
+   Note: Do not use journal_getblk/sb_getblk functions here! */
+static struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
+			    unsigned int max_block)
+{
+	struct buffer_head * bhlist[BUFNR];
+	unsigned int blocks = BUFNR;
+	struct buffer_head * bh;
+	int i, j;
+	
+	bh = __getblk (dev, block, bufsize );
+	if (buffer_uptodate (bh))
+		return (bh);   
+		
+	if (block + BUFNR > max_block) {
+		blocks = max_block - block;
+	}
+	bhlist[0] = bh;
+	j = 1;
+	for (i = 1; i < blocks; i++) {
+		bh = __getblk (dev, block + i, bufsize);
+		if (buffer_uptodate (bh)) {
+			brelse (bh);
+			break;
+		}
+		else bhlist[j++] = bh;
+	}
+	ll_rw_block (READ, j, bhlist);
+	for(i = 1; i < j; i++) 
+		brelse (bhlist[i]);
+	bh = bhlist[0];
+	wait_on_buffer (bh);
+	if (buffer_uptodate (bh))
+		return bh;
+	brelse (bh);
+	return NULL;
+}
+
+/*
+** read and replay the log
+** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
+** transaction.  This tests that before finding all the transactions in the log, which makes normal mount times fast.
+**
+** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
+**
+** On exit, it sets things up so the first transaction will work correctly.
+*/
+static int journal_read(struct super_block *p_s_sb) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_desc *desc ;
+  unsigned long oldest_trans_id = 0;
+  unsigned long oldest_invalid_trans_id = 0 ;
+  time_t start ;
+  unsigned long oldest_start = 0;
+  unsigned long cur_dblock = 0 ;
+  unsigned long newest_mount_id = 9 ;
+  struct buffer_head *d_bh ;
+  struct reiserfs_journal_header *jh ;
+  int valid_journal_header = 0 ;
+  int replay_count = 0 ;
+  int continue_replay = 1 ;
+  int ret ;
+  char b[BDEVNAME_SIZE];
+
+  cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+  reiserfs_info (p_s_sb, "checking transaction log (%s)\n",
+	 bdevname(journal->j_dev_bd, b));
+  start = get_seconds();
+
+  /* step 1, read in the journal header block.  Check the transaction it says 
+  ** is the first unflushed, and if that transaction is not valid, 
+  ** replay is done
+  */
+  journal->j_header_bh = journal_bread(p_s_sb,
+					   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
+					   SB_ONDISK_JOURNAL_SIZE(p_s_sb));
+  if (!journal->j_header_bh) {
+    return 1 ;
+  }
+  jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
+  if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 && 
+      le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) && 
+      le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
+    oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
+                       le32_to_cpu(jh->j_first_unflushed_offset) ;
+    oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
+    newest_mount_id = le32_to_cpu(jh->j_mount_id);
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
+                   "header: first_unflushed_offset %d, last_flushed_trans_id "
+		   "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
+		   le32_to_cpu(jh->j_last_flush_trans_id)) ;
+    valid_journal_header = 1 ;
+
+    /* now, we try to read the first unflushed offset.  If it is not valid, 
+    ** there is nothing more we can do, and it makes no sense to read 
+    ** through the whole log.
+    */
+    d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
+    ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
+    if (!ret) {
+      continue_replay = 0 ;
+    }
+    brelse(d_bh) ;
+    goto start_log_replay;
+  }
+
+  if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
+    reiserfs_warning (p_s_sb,
+		      "clm-2076: device is readonly, unable to replay log") ;
+    return -1 ;
+  }
+
+  /* ok, there are transactions that need to be replayed.  start with the first log block, find
+  ** all the valid transactions, and pick out the oldest.
+  */
+  while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
+    /* Note that it is required for blocksize of primary fs device and journal
+       device to be the same */
+    d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
+			   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
+    ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
+    if (ret == 1) {
+      desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+      if (oldest_start == 0) { /* init all oldest_ values */
+        oldest_trans_id = get_desc_trans_id(desc) ;
+	oldest_start = d_bh->b_blocknr ;
+	newest_mount_id = get_desc_mount_id(desc) ;
+	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
+	               "oldest_start to offset %llu, trans_id %lu",
+		       oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+		       oldest_trans_id) ;
+      } else if (oldest_trans_id > get_desc_trans_id(desc)) { 
+        /* one we just read was older */
+        oldest_trans_id = get_desc_trans_id(desc) ;
+	oldest_start = d_bh->b_blocknr ;
+	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
+	               "oldest_start to offset %lu, trans_id %lu",
+			oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+			oldest_trans_id) ;
+      }
+      if (newest_mount_id < get_desc_mount_id(desc)) {
+        newest_mount_id = get_desc_mount_id(desc) ;
+	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
+	              "newest_mount_id to %d", get_desc_mount_id(desc));
+      }
+      cur_dblock += get_desc_trans_len(desc) + 2 ;
+    } else {
+      cur_dblock++ ;
+    }
+    brelse(d_bh) ;
+  }
+
+start_log_replay:
+  cur_dblock = oldest_start ;
+  if (oldest_trans_id)  {
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
+                   "from offset %llu, trans_id %lu",
+		   cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb), 
+		   oldest_trans_id) ;
+
+  }
+  replay_count = 0 ;
+  while(continue_replay && oldest_trans_id > 0) {
+    ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
+    if (ret < 0) {
+      return ret ;
+    } else if (ret != 0) {
+      break ;
+    }
+    cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start ;
+    replay_count++ ;
+   if (cur_dblock == oldest_start)
+        break;
+  }
+
+  if (oldest_trans_id == 0) {
+    reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
+                   "transactions found") ;
+  }
+  /* j_start does not get set correctly if we don't replay any transactions.
+  ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
+  ** copy the trans_id from the header
+  */
+  if (valid_journal_header && replay_count == 0) { 
+    journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
+    journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
+    journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
+    journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
+  } else {
+    journal->j_mount_id = newest_mount_id + 1 ;
+  }
+  reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
+                 "newest_mount_id to %lu", journal->j_mount_id) ;
+  journal->j_first_unflushed_offset = journal->j_start ;
+  if (replay_count > 0) {
+    reiserfs_info (p_s_sb, "replayed %d transactions in %lu seconds\n",
+		   replay_count, get_seconds() - start) ;
+  }
+  if (!bdev_read_only(p_s_sb->s_bdev) && 
+       _update_journal_header_block(p_s_sb, journal->j_start,
+                                   journal->j_last_flush_trans_id))
+  {
+      /* replay failed, caller must call free_journal_ram and abort
+      ** the mount
+      */
+      return -1 ;
+  }
+  return 0 ;
+}
+
+static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
+{
+    struct reiserfs_journal_list *jl;
+retry:
+    jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS, s);
+    if (!jl) {
+	yield();
+	goto retry;
+    }
+    memset(jl, 0, sizeof(*jl));
+    INIT_LIST_HEAD(&jl->j_list);
+    INIT_LIST_HEAD(&jl->j_working_list);
+    INIT_LIST_HEAD(&jl->j_tail_bh_list);
+    INIT_LIST_HEAD(&jl->j_bh_list);
+    sema_init(&jl->j_commit_lock, 1);
+    SB_JOURNAL(s)->j_num_lists++;
+    get_journal_list(jl);
+    return jl;
+}
+
+static void journal_list_init(struct super_block *p_s_sb) {
+    SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
+}
+
+static int release_journal_dev( struct super_block *super,
+				struct reiserfs_journal *journal )
+{
+    int result;
+    
+    result = 0;
+
+    if( journal -> j_dev_file != NULL ) {
+	result = filp_close( journal -> j_dev_file, NULL );
+	journal -> j_dev_file = NULL;
+	journal -> j_dev_bd = NULL;
+    } else if( journal -> j_dev_bd != NULL ) {
+	result = blkdev_put( journal -> j_dev_bd );
+	journal -> j_dev_bd = NULL;
+    }
+
+    if( result != 0 ) {
+	reiserfs_warning(super, "sh-457: release_journal_dev: Cannot release journal device: %i", result );
+    }
+    return result;
+}
+
+static int journal_init_dev( struct super_block *super, 
+			     struct reiserfs_journal *journal, 
+			     const char *jdev_name )
+{
+	int result;
+	dev_t jdev;
+	int blkdev_mode = FMODE_READ | FMODE_WRITE;
+	char b[BDEVNAME_SIZE];
+
+	result = 0;
+
+	journal -> j_dev_bd = NULL;
+	journal -> j_dev_file = NULL;
+	jdev = SB_ONDISK_JOURNAL_DEVICE( super ) ?
+		new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;	
+
+	if (bdev_read_only(super->s_bdev))
+	    blkdev_mode = FMODE_READ;
+
+	/* there is no "jdev" option and journal is on separate device */
+	if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
+		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
+		if (IS_ERR(journal->j_dev_bd)) {
+			result = PTR_ERR(journal->j_dev_bd);
+			journal->j_dev_bd = NULL;
+			reiserfs_warning (super, "sh-458: journal_init_dev: "
+					  "cannot init journal device '%s': %i",
+					  __bdevname(jdev, b), result );
+			return result;
+		} else if (jdev != super->s_dev)
+			set_blocksize(journal->j_dev_bd, super->s_blocksize);
+		return 0;
+	}
+
+	journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
+	if( !IS_ERR( journal -> j_dev_file ) ) {
+		struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
+		if( !S_ISBLK( jdev_inode -> i_mode ) ) {
+			reiserfs_warning  (super, "journal_init_dev: '%s' is "
+					   "not a block device", jdev_name );
+			result = -ENOTBLK;
+		} else  {
+			/* ok */
+			journal->j_dev_bd = I_BDEV(jdev_inode);
+			set_blocksize(journal->j_dev_bd, super->s_blocksize);
+		}
+	} else {
+		result = PTR_ERR( journal -> j_dev_file );
+		journal -> j_dev_file = NULL;
+		reiserfs_warning (super,
+				  "journal_init_dev: Cannot open '%s': %i",
+				  jdev_name, result );
+	}
+	if( result != 0 ) {
+		release_journal_dev( super, journal );
+	}
+	reiserfs_info(super, "journal_init_dev: journal device: %s\n",
+		bdevname(journal->j_dev_bd, b));
+	return result;
+}
+
+/*
+** must be called once on fs mount.  calls journal_read for you
+*/
+int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format, unsigned int commit_max_age) {
+    int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
+    struct buffer_head *bhjh;
+    struct reiserfs_super_block * rs;
+    struct reiserfs_journal_header *jh;
+    struct reiserfs_journal *journal;
+    struct reiserfs_journal_list *jl;
+    char b[BDEVNAME_SIZE];
+
+    journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
+    if (!journal) {
+	reiserfs_warning (p_s_sb, "journal-1256: unable to get memory for journal structure") ;
+	return 1 ;
+    }
+    memset(journal, 0, sizeof(struct reiserfs_journal)) ;
+    INIT_LIST_HEAD(&journal->j_bitmap_nodes) ;
+    INIT_LIST_HEAD (&journal->j_prealloc_list);
+    INIT_LIST_HEAD(&journal->j_working_list);
+    INIT_LIST_HEAD(&journal->j_journal_list);
+    journal->j_persistent_trans = 0;
+    if (reiserfs_allocate_list_bitmaps(p_s_sb,
+				       journal->j_list_bitmap,
+ 				       SB_BMAP_NR(p_s_sb)))
+	goto free_and_return ;
+    allocate_bitmap_nodes(p_s_sb) ;
+
+    /* reserved for journal area support */
+    SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
+					     REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
+					     SB_BMAP_NR(p_s_sb) + 1 :
+					     REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2); 
+    
+    /* Sanity check to see is the standard journal fitting withing first bitmap
+       (actual for small blocksizes) */
+    if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb ) &&
+         (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8) ) {
+	reiserfs_warning (p_s_sb, "journal-1393: journal does not fit for area "
+			  "addressed by first of bitmap blocks. It starts at "
+			  "%u and its size is %u. Block size %ld",
+			  SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
+			  SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize);
+	goto free_and_return;
+    }
+
+    if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
+      reiserfs_warning (p_s_sb, "sh-462: unable to initialize jornal device");
+      goto free_and_return;
+    }
+
+     rs = SB_DISK_SUPER_BLOCK(p_s_sb);
+     
+     /* read journal header */
+     bhjh = journal_bread(p_s_sb,
+		   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
+     if (!bhjh) {
+	 reiserfs_warning (p_s_sb, "sh-459: unable to read journal header");
+	 goto free_and_return;
+     }
+     jh = (struct reiserfs_journal_header *)(bhjh->b_data);
+     
+     /* make sure that journal matches to the super block */
+     if (is_reiserfs_jr(rs) && (jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs))) {
+	 reiserfs_warning (p_s_sb, "sh-460: journal header magic %x "
+			   "(device %s) does not match to magic found in super "
+			   "block %x",
+			   jh->jh_journal.jp_journal_magic,
+			   bdevname( journal->j_dev_bd, b),
+			   sb_jp_journal_magic(rs));
+	 brelse (bhjh);
+	 goto free_and_return;
+  }
+     
+  journal->j_trans_max      = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
+  journal->j_max_batch      = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
+  journal->j_max_commit_age = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
+  journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
+
+  if (journal->j_trans_max) {
+    /* make sure these parameters are available, assign it if they are not */
+    __u32 initial = journal->j_trans_max;
+    __u32 ratio = 1;
+    
+    if (p_s_sb->s_blocksize < 4096)
+      ratio = 4096 / p_s_sb->s_blocksize;
+    
+    if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/journal->j_trans_max < JOURNAL_MIN_RATIO)
+      journal->j_trans_max = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
+    if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
+      journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT / ratio;
+    if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
+      journal->j_trans_max = JOURNAL_TRANS_MIN_DEFAULT / ratio;
+    
+    if (journal->j_trans_max != initial)
+      reiserfs_warning (p_s_sb, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
+	      initial, journal->j_trans_max);
+
+    journal->j_max_batch = journal->j_trans_max*
+      JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
+  }  
+  
+  if (!journal->j_trans_max) {
+    /*we have the file system was created by old version of mkreiserfs 
+      so this field contains zero value */
+    journal->j_trans_max      = JOURNAL_TRANS_MAX_DEFAULT ;
+    journal->j_max_batch      = JOURNAL_MAX_BATCH_DEFAULT ;
+    journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE ;
+    
+    /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
+       trans max size is decreased proportionally */
+    if (p_s_sb->s_blocksize < 4096) {
+      journal->j_trans_max /= (4096 / p_s_sb->s_blocksize) ;
+      journal->j_max_batch = (journal->j_trans_max) * 9 / 10 ;
+    }
+  }
+
+  journal->j_default_max_commit_age = journal->j_max_commit_age;
+
+  if (commit_max_age != 0) {
+      journal->j_max_commit_age = commit_max_age;
+      journal->j_max_trans_age = commit_max_age;
+  }
+
+  reiserfs_info (p_s_sb, "journal params: device %s, size %u, "
+		 "journal first block %u, max trans len %u, max batch %u, "
+		 "max commit age %u, max trans age %u\n",
+		 bdevname( journal->j_dev_bd, b),
+		 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+		 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
+		 journal->j_trans_max,
+		 journal->j_max_batch,
+		 journal->j_max_commit_age,
+		 journal->j_max_trans_age);
+
+  brelse (bhjh);
+     
+  journal->j_list_bitmap_index = 0 ;
+  journal_list_init(p_s_sb) ;
+
+  memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+
+  INIT_LIST_HEAD(&journal->j_dirty_buffers) ;
+  spin_lock_init(&journal->j_dirty_buffers_lock) ;
+
+  journal->j_start = 0 ;
+  journal->j_len = 0 ;
+  journal->j_len_alloc = 0 ;
+  atomic_set(&(journal->j_wcount), 0) ;
+  atomic_set(&(journal->j_async_throttle), 0) ;
+  journal->j_bcount = 0 ;
+  journal->j_trans_start_time = 0 ;
+  journal->j_last = NULL ;
+  journal->j_first = NULL ;
+  init_waitqueue_head(&(journal->j_join_wait)) ;
+  sema_init(&journal->j_lock, 1);
+  sema_init(&journal->j_flush_sem, 1);
+
+  journal->j_trans_id = 10 ;
+  journal->j_mount_id = 10 ;
+  journal->j_state = 0 ;
+  atomic_set(&(journal->j_jlock), 0) ;
+  journal->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
+  journal->j_cnode_free_orig = journal->j_cnode_free_list ;
+  journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0 ;
+  journal->j_cnode_used = 0 ;
+  journal->j_must_wait = 0 ;
+
+  init_journal_hash(p_s_sb) ;
+  jl = journal->j_current_jl;
+  jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
+  if (!jl->j_list_bitmap) {
+    reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0") ;
+    goto free_and_return;
+  }
+  if (journal_read(p_s_sb) < 0) {
+    reiserfs_warning(p_s_sb, "Replay Failure, unable to mount") ;
+    goto free_and_return;
+  }
+
+  reiserfs_mounted_fs_count++ ;
+  if (reiserfs_mounted_fs_count <= 1)
+    commit_wq = create_workqueue("reiserfs");
+
+  INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+  return 0 ;
+free_and_return:
+  free_journal_ram(p_s_sb);
+  return 1;
+}
+
+/*
+** test for a polite end of the current transaction.  Used by file_write, and should
+** be used by delete to make sure they don't write more than can fit inside a single
+** transaction
+*/
+int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
+  struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
+  time_t now = get_seconds() ;
+  /* cannot restart while nested */
+  BUG_ON (!th->t_trans_id);
+  if (th->t_refcount > 1)
+    return 0 ;
+  if ( journal->j_must_wait > 0 ||
+       (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
+       atomic_read(&(journal->j_jlock)) ||
+      (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
+       journal->j_cnode_free < (journal->j_trans_max * 3)) {
+    return 1 ;
+  }
+  return 0 ;
+}
+
+/* this must be called inside a transaction, and requires the 
+** kernel_lock to be held
+*/
+void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
+    struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
+    BUG_ON (!th->t_trans_id);
+    journal->j_must_wait = 1 ;
+    set_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
+    return ;
+}
+
+/* this must be called without a transaction started, and does not
+** require BKL
+*/
+void reiserfs_allow_writes(struct super_block *s) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    clear_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
+    wake_up(&journal->j_join_wait) ;
+}
+
+/* this must be called without a transaction started, and does not
+** require BKL
+*/
+void reiserfs_wait_on_write_block(struct super_block *s) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    wait_event(journal->j_join_wait,
+               !test_bit(J_WRITERS_BLOCKED, &journal->j_state)) ;
+}
+
+static void queue_log_writer(struct super_block *s) {
+    wait_queue_t wait;
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    set_bit(J_WRITERS_QUEUED, &journal->j_state);
+
+    /*
+     * we don't want to use wait_event here because
+     * we only want to wait once.
+     */
+    init_waitqueue_entry(&wait, current);
+    add_wait_queue(&journal->j_join_wait, &wait);
+    set_current_state(TASK_UNINTERRUPTIBLE);
+    if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
+        schedule();
+    current->state = TASK_RUNNING;
+    remove_wait_queue(&journal->j_join_wait, &wait);
+}
+
+static void wake_queued_writers(struct super_block *s) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
+        wake_up(&journal->j_join_wait);
+}
+
+static void let_transaction_grow(struct super_block *sb,
+                                 unsigned long trans_id)
+{
+    struct reiserfs_journal *journal = SB_JOURNAL (sb);
+    unsigned long bcount = journal->j_bcount;
+    while(1) {
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(1);
+	journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
+        while ((atomic_read(&journal->j_wcount) > 0 ||
+	        atomic_read(&journal->j_jlock)) &&
+	       journal->j_trans_id == trans_id) {
+	    queue_log_writer(sb);
+	}
+	if (journal->j_trans_id != trans_id)
+	    break;
+	if (bcount == journal->j_bcount)
+	    break;
+	bcount = journal->j_bcount;
+    }
+}
+
+/* join == true if you must join an existing transaction.
+** join == false if you can deal with waiting for others to finish
+**
+** this will block until the transaction is joinable.  send the number of blocks you
+** expect to use in nblocks.
+*/
+static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
+  time_t now = get_seconds() ;
+  int old_trans_id  ;
+  struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+  struct reiserfs_transaction_handle myth;
+  int sched_count = 0;
+  int retval;
+
+  reiserfs_check_lock_depth(p_s_sb, "journal_begin") ;
+
+  PROC_INFO_INC( p_s_sb, journal.journal_being );
+  /* set here for journal_join */
+  th->t_refcount = 1;
+  th->t_super = p_s_sb ;
+
+relock:
+  lock_journal(p_s_sb) ;
+  if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted (journal)) {
+    unlock_journal (p_s_sb);
+    retval = journal->j_errno;
+    goto out_fail;
+  }
+  journal->j_bcount++;
+
+  if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
+    unlock_journal(p_s_sb) ;
+    reiserfs_wait_on_write_block(p_s_sb) ;
+    PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
+    goto relock ;
+  }
+  now = get_seconds();
+
+  /* if there is no room in the journal OR
+  ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning 
+  ** we don't sleep if there aren't other writers
+  */
+
+  if ( (!join && journal->j_must_wait > 0) ||
+     ( !join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) ||
+     (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 &&
+      (now - journal->j_trans_start_time) > journal->j_max_trans_age) ||
+     (!join && atomic_read(&journal->j_jlock)) ||
+     (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
+
+    old_trans_id = journal->j_trans_id;
+    unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
+
+    if (!join && (journal->j_len_alloc + nblocks + 2) >=
+        journal->j_max_batch &&
+	((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75))
+    {
+	if (atomic_read(&journal->j_wcount) > 10) {
+	    sched_count++;
+	    queue_log_writer(p_s_sb);
+	    goto relock;
+	}
+    }
+    /* don't mess with joining the transaction if all we have to do is
+     * wait for someone else to do a commit
+     */
+    if (atomic_read(&journal->j_jlock)) {
+	while (journal->j_trans_id == old_trans_id &&
+	       atomic_read(&journal->j_jlock)) {
+	    queue_log_writer(p_s_sb);
+        }
+	goto relock;
+    }
+    retval = journal_join(&myth, p_s_sb, 1) ;
+    if (retval)
+        goto out_fail;
+
+    /* someone might have ended the transaction while we joined */
+    if (old_trans_id != journal->j_trans_id) {
+        retval = do_journal_end(&myth, p_s_sb, 1, 0) ;
+    } else {
+        retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
+    }
+
+    if (retval)
+        goto out_fail;
+
+    PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
+    goto relock ;
+  }
+  /* we are the first writer, set trans_id */
+  if (journal->j_trans_start_time == 0) {
+    journal->j_trans_start_time = get_seconds();
+  }
+  atomic_inc(&(journal->j_wcount)) ;
+  journal->j_len_alloc += nblocks ;
+  th->t_blocks_logged = 0 ;
+  th->t_blocks_allocated = nblocks ;
+  th->t_trans_id = journal->j_trans_id ;
+  unlock_journal(p_s_sb) ;
+  INIT_LIST_HEAD (&th->t_list);
+  return 0 ;
+
+out_fail:
+  memset (th, 0, sizeof (*th));
+  /* Re-set th->t_super, so we can properly keep track of how many
+   * persistent transactions there are. We need to do this so if this
+   * call is part of a failed restart_transaction, we can free it later */
+  th->t_super = p_s_sb;
+  return retval;
+}
+
+struct reiserfs_transaction_handle *
+reiserfs_persistent_transaction(struct super_block *s, int nblocks) {
+    int ret ;
+    struct reiserfs_transaction_handle *th ;
+
+    /* if we're nesting into an existing transaction.  It will be
+    ** persistent on its own
+    */
+    if (reiserfs_transaction_running(s)) {
+        th = current->journal_info ;
+	th->t_refcount++ ;
+	if (th->t_refcount < 2) {
+	    BUG() ;
+	}
+	return th ;
+    }
+    th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS, s) ;
+    if (!th)
+       return NULL;
+    ret = journal_begin(th, s, nblocks) ;
+    if (ret) {
+	reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
+        return NULL;
+    }
+
+    SB_JOURNAL(s)->j_persistent_trans++;
+    return th ;
+}
+
+int
+reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) {
+    struct super_block *s = th->t_super;
+    int ret = 0;
+    if (th->t_trans_id)
+        ret = journal_end(th, th->t_super, th->t_blocks_allocated);
+    else
+        ret = -EIO;
+    if (th->t_refcount == 0) {
+        SB_JOURNAL(s)->j_persistent_trans--;
+	reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
+    }
+    return ret;
+}
+
+static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+  struct reiserfs_transaction_handle *cur_th = current->journal_info;
+
+  /* this keeps do_journal_end from NULLing out the current->journal_info
+  ** pointer
+  */
+  th->t_handle_save = cur_th ;
+  if (cur_th && cur_th->t_refcount > 1) {
+      BUG() ;
+  }
+  return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN) ;
+}
+
+int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+  struct reiserfs_transaction_handle *cur_th = current->journal_info;
+
+  /* this keeps do_journal_end from NULLing out the current->journal_info
+  ** pointer
+  */
+  th->t_handle_save = cur_th ;
+  if (cur_th && cur_th->t_refcount > 1) {
+      BUG() ;
+  }
+  return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT) ;
+}
+
+int journal_begin(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, unsigned long nblocks) {
+    struct reiserfs_transaction_handle *cur_th = current->journal_info ;
+    int ret ;
+
+    th->t_handle_save = NULL ;
+    if (cur_th) {
+	/* we are nesting into the current transaction */
+	if (cur_th->t_super == p_s_sb) {
+              BUG_ON (!cur_th->t_refcount);
+	      cur_th->t_refcount++ ;
+	      memcpy(th, cur_th, sizeof(*th));
+	      if (th->t_refcount <= 1)
+		      reiserfs_warning (p_s_sb, "BAD: refcount <= 1, but journal_info != 0");
+	      return 0;
+	} else {
+	    /* we've ended up with a handle from a different filesystem.
+	    ** save it and restore on journal_end.  This should never
+	    ** really happen...
+	    */
+	    reiserfs_warning(p_s_sb, "clm-2100: nesting info a different FS") ;
+	    th->t_handle_save = current->journal_info ;
+	    current->journal_info = th;
+	}
+    } else {
+	current->journal_info = th;
+    }
+    ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG) ;
+    if (current->journal_info != th)
+        BUG() ;
+
+    /* I guess this boils down to being the reciprocal of clm-2100 above.
+     * If do_journal_begin_r fails, we need to put it back, since journal_end
+     * won't be called to do it. */
+    if (ret)
+        current->journal_info = th->t_handle_save;
+    else
+        BUG_ON (!th->t_refcount);
+
+    return ret ;
+}
+
+/*
+** puts bh into the current transaction.  If it was already there, reorders removes the
+** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
+**
+** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
+** transaction is committed.
+** 
+** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+*/
+int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_cnode *cn = NULL;
+  int count_already_incd = 0 ;
+  int prepared = 0 ;
+  BUG_ON (!th->t_trans_id);
+
+  PROC_INFO_INC( p_s_sb, journal.mark_dirty );
+  if (th->t_trans_id != journal->j_trans_id) {
+    reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n", 
+                   th->t_trans_id, journal->j_trans_id);
+  }
+
+  p_s_sb->s_dirt = 1;
+
+  prepared = test_clear_buffer_journal_prepared (bh);
+  clear_buffer_journal_restore_dirty (bh);
+  /* already in this transaction, we are done */
+  if (buffer_journaled(bh)) {
+    PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
+    return 0 ;
+  }
+
+  /* this must be turned into a panic instead of a warning.  We can't allow
+  ** a dirty or journal_dirty or locked buffer to be logged, as some changes
+  ** could get to disk too early.  NOT GOOD.
+  */
+  if (!prepared || buffer_dirty(bh)) {
+    reiserfs_warning (p_s_sb, "journal-1777: buffer %llu bad state "
+		      "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
+		      (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!',
+			    buffer_locked(bh) ? ' ' : '!',
+			    buffer_dirty(bh) ? ' ' : '!',
+			    buffer_journal_dirty(bh) ? ' ' : '!') ;
+  }
+
+  if (atomic_read(&(journal->j_wcount)) <= 0) {
+    reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal->j_wcount))) ;
+    return 1 ;
+  }
+  /* this error means I've screwed up, and we've overflowed the transaction.  
+  ** Nothing can be done here, except make the FS readonly or panic.
+  */ 
+  if (journal->j_len >= journal->j_trans_max) {
+    reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal->j_len) ;
+  }
+
+  if (buffer_journal_dirty(bh)) {
+    count_already_incd = 1 ;
+    PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
+    clear_buffer_journal_dirty (bh);
+  }
+
+  if (journal->j_len > journal->j_len_alloc) {
+    journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT ;
+  }
+
+  set_buffer_journaled (bh);
+
+  /* now put this guy on the end */
+  if (!cn) {
+    cn = get_cnode(p_s_sb) ;
+    if (!cn) {
+      reiserfs_panic(p_s_sb, "get_cnode failed!\n"); 
+    }
+
+    if (th->t_blocks_logged == th->t_blocks_allocated) {
+      th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
+      journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
+    }
+    th->t_blocks_logged++ ;
+    journal->j_len++ ;
+
+    cn->bh = bh ;
+    cn->blocknr = bh->b_blocknr ;
+    cn->sb = p_s_sb;
+    cn->jlist = NULL ;
+    insert_journal_hash(journal->j_hash_table, cn) ;
+    if (!count_already_incd) {
+      get_bh(bh) ;
+    }
+  }
+  cn->next = NULL ;
+  cn->prev = journal->j_last ;
+  cn->bh = bh ;
+  if (journal->j_last) {
+    journal->j_last->next = cn ;
+    journal->j_last = cn ;
+  } else {
+    journal->j_first = cn ;
+    journal->j_last = cn ;
+  }
+  return 0 ;
+}
+
+int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+  if (!current->journal_info && th->t_refcount > 1)
+    reiserfs_warning (p_s_sb, "REISER-NESTING: th NULL, refcount %d",
+                      th->t_refcount);
+
+  if (!th->t_trans_id) {
+    WARN_ON (1);
+    return -EIO;
+  }
+
+  th->t_refcount--;
+  if (th->t_refcount > 0) {
+    struct reiserfs_transaction_handle *cur_th = current->journal_info ;
+
+    /* we aren't allowed to close a nested transaction on a different
+    ** filesystem from the one in the task struct
+    */
+    if (cur_th->t_super != th->t_super)
+      BUG() ;
+
+    if (th != cur_th) {
+      memcpy(current->journal_info, th, sizeof(*th));
+      th->t_trans_id = 0;
+    }
+    return 0;
+  } else {
+    return do_journal_end(th, p_s_sb, nblocks, 0) ;
+  }
+}
+
+/* removes from the current transaction, relsing and descrementing any counters.  
+** also files the removed buffer directly onto the clean list
+**
+** called by journal_mark_freed when a block has been deleted
+**
+** returns 1 if it cleaned and relsed the buffer. 0 otherwise
+*/
+static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
+  struct buffer_head *bh ;
+  struct reiserfs_journal_cnode *cn ;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  int ret = 0;
+
+  cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr) ;
+  if (!cn || !cn->bh) {
+    return ret ;
+  }
+  bh = cn->bh ;
+  if (cn->prev) {
+    cn->prev->next = cn->next ;
+  }
+  if (cn->next) {
+    cn->next->prev = cn->prev ;
+  }
+  if (cn == journal->j_first) {
+    journal->j_first = cn->next ;
+  }
+  if (cn == journal->j_last) {
+    journal->j_last = cn->prev ;
+  }
+  if (bh)
+	remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, bh->b_blocknr, 0) ;
+  clear_buffer_journaled  (bh); /* don't log this one */
+
+  if (!already_cleaned) {
+    clear_buffer_journal_dirty (bh);
+    clear_buffer_dirty(bh);
+    clear_buffer_journal_test (bh);
+    put_bh(bh) ;
+    if (atomic_read(&(bh->b_count)) < 0) {
+      reiserfs_warning (p_s_sb, "journal-1752: remove from trans, b_count < 0");
+    }
+    ret = 1 ;
+  }
+  journal->j_len-- ;
+  journal->j_len_alloc-- ;
+  free_cnode(p_s_sb, cn) ;
+  return ret ;
+}
+
+/*
+** for any cnode in a journal list, it can only be dirtied of all the
+** transactions that include it are commited to disk.
+** this checks through each transaction, and returns 1 if you are allowed to dirty,
+** and 0 if you aren't
+**
+** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
+** blocks for a given transaction on disk
+**
+*/
+static int can_dirty(struct reiserfs_journal_cnode *cn) {
+  struct super_block *sb = cn->sb;
+  b_blocknr_t blocknr = cn->blocknr  ;
+  struct reiserfs_journal_cnode *cur = cn->hprev ;
+  int can_dirty = 1 ;
+  
+  /* first test hprev.  These are all newer than cn, so any node here
+  ** with the same block number and dev means this node can't be sent
+  ** to disk right now.
+  */
+  while(cur && can_dirty) {
+    if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && 
+        cur->blocknr == blocknr) {
+      can_dirty = 0 ;
+    }
+    cur = cur->hprev ;
+  }
+  /* then test hnext.  These are all older than cn.  As long as they
+  ** are committed to the log, it is safe to write cn to disk
+  */
+  cur = cn->hnext ;
+  while(cur && can_dirty) {
+    if (cur->jlist && cur->jlist->j_len > 0 && 
+        atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && 
+        cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
+      can_dirty = 0 ;
+    }
+    cur = cur->hnext ;
+  }
+  return can_dirty ;
+}
+
+/* syncs the commit blocks, but does not force the real buffers to disk
+** will wait until the current transaction is done/commited before returning 
+*/
+int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+  BUG_ON (!th->t_trans_id);
+  /* you can sync while nested, very, very bad */
+  if (th->t_refcount > 1) {
+    BUG() ;
+  }
+  if (journal->j_len == 0) {
+    reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+    journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+  }
+  return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
+}
+
+/*
+** writeback the pending async commits to disk
+*/
+static void flush_async_commits(void *p) {
+  struct super_block *p_s_sb = p;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_list *jl;
+  struct list_head *entry;
+
+  lock_kernel();
+  if (!list_empty(&journal->j_journal_list)) {
+      /* last entry is the youngest, commit it and you get everything */
+      entry = journal->j_journal_list.prev;
+      jl = JOURNAL_LIST_ENTRY(entry);
+      flush_commit_list(p_s_sb, jl, 1);
+  }
+  unlock_kernel();
+  /*
+   * this is a little racey, but there's no harm in missing
+   * the filemap_fdata_write
+   */
+  if (!atomic_read(&journal->j_async_throttle) && !reiserfs_is_journal_aborted (journal)) {
+      atomic_inc(&journal->j_async_throttle);
+      filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
+      atomic_dec(&journal->j_async_throttle);
+  }
+}
+
+/*
+** flushes any old transactions to disk
+** ends the current transaction if it is too old
+*/
+int reiserfs_flush_old_commits(struct super_block *p_s_sb) {
+    time_t now ;
+    struct reiserfs_transaction_handle th ;
+    struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+    now = get_seconds();
+    /* safety check so we don't flush while we are replaying the log during
+     * mount
+     */
+    if (list_empty(&journal->j_journal_list)) {
+	return 0  ;
+    }
+
+    /* check the current transaction.  If there are no writers, and it is
+     * too old, finish it, and force the commit blocks to disk
+     */
+    if (atomic_read(&journal->j_wcount) <= 0 &&
+        journal->j_trans_start_time > 0 &&
+        journal->j_len > 0 &&
+        (now - journal->j_trans_start_time) > journal->j_max_trans_age)
+    {
+	if (!journal_join(&th, p_s_sb, 1)) {
+            reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+            journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+
+            /* we're only being called from kreiserfsd, it makes no sense to do
+            ** an async commit so that kreiserfsd can do it later
+            */
+            do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
+        }
+    }
+    return p_s_sb->s_dirt;
+}
+
+/*
+** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
+** 
+** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all 
+** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
+** flushes the commit list and returns 0.
+**
+** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
+** 
+** Note, we can't allow the journal_end to proceed while there are still writers in the log.
+*/
+static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, 
+                             unsigned long nblocks, int flags) {
+
+  time_t now ;
+  int flush = flags & FLUSH_ALL ;
+  int commit_now = flags & COMMIT_NOW ;
+  int wait_on_commit = flags & WAIT ;
+  struct reiserfs_journal_list *jl;
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+  BUG_ON (!th->t_trans_id);
+
+  if (th->t_trans_id != journal->j_trans_id) {
+    reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n", 
+                   th->t_trans_id, journal->j_trans_id);
+  }
+
+  journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
+  if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed.  unmounting might not call begin */
+    atomic_dec(&(journal->j_wcount)) ;
+  }
+
+  /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released 
+  ** will be dealt with by next transaction that actually writes something, but should be taken
+  ** care of in this trans
+  */
+  if (journal->j_len == 0) {
+    BUG();
+  }
+  /* if wcount > 0, and we are called to with flush or commit_now,
+  ** we wait on j_join_wait.  We will wake up when the last writer has
+  ** finished the transaction, and started it on its way to the disk.
+  ** Then, we flush the commit or journal list, and just return 0 
+  ** because the rest of journal end was already done for this transaction.
+  */
+  if (atomic_read(&(journal->j_wcount)) > 0) {
+    if (flush || commit_now) {
+      unsigned trans_id ;
+
+      jl = journal->j_current_jl;
+      trans_id = jl->j_trans_id;
+      if (wait_on_commit)
+        jl->j_state |= LIST_COMMIT_PENDING;
+      atomic_set(&(journal->j_jlock), 1) ;
+      if (flush) {
+        journal->j_next_full_flush = 1 ;
+      }
+      unlock_journal(p_s_sb) ;
+
+      /* sleep while the current transaction is still j_jlocked */
+      while(journal->j_trans_id == trans_id) {
+	if (atomic_read(&journal->j_jlock)) {
+	    queue_log_writer(p_s_sb);
+        } else {
+	    lock_journal(p_s_sb);
+	    if (journal->j_trans_id == trans_id) {
+	        atomic_set(&(journal->j_jlock), 1) ;
+	    }
+	    unlock_journal(p_s_sb);
+	}
+      }
+      if (journal->j_trans_id == trans_id) {
+          BUG();
+      }
+      if (commit_now && journal_list_still_alive(p_s_sb, trans_id) &&
+          wait_on_commit)
+      {
+	  flush_commit_list(p_s_sb, jl, 1) ;
+      }
+      return 0 ;
+    } 
+    unlock_journal(p_s_sb) ;
+    return 0 ;
+  }
+
+  /* deal with old transactions where we are the last writers */
+  now = get_seconds();
+  if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
+    commit_now = 1 ;
+    journal->j_next_async_flush = 1 ;
+  }
+  /* don't batch when someone is waiting on j_join_wait */
+  /* don't batch when syncing the commit or flushing the whole trans */
+  if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now &&
+      (journal->j_len < journal->j_max_batch)  &&
+      journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) {
+    journal->j_bcount++ ;
+    unlock_journal(p_s_sb) ;
+    return 0 ;
+  }
+
+  if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
+    reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", journal->j_start) ;
+  }
+  return 1 ;
+}
+
+/*
+** Does all the work that makes deleting blocks safe.
+** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
+** 
+** otherwise:
+** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
+** before this transaction has finished.
+**
+** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
+** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
+** the block can't be reallocated yet.
+**
+** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
+*/
+int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_cnode *cn = NULL ;
+  struct buffer_head *bh = NULL ;
+  struct reiserfs_list_bitmap *jb = NULL ;
+  int cleaned = 0 ;
+  BUG_ON (!th->t_trans_id);
+
+  cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
+  if (cn && cn->bh) {
+      bh = cn->bh ;
+      get_bh(bh) ;
+  }
+  /* if it is journal new, we just remove it from this transaction */
+  if (bh && buffer_journal_new(bh)) {
+    clear_buffer_journal_new (bh);
+    clear_prepared_bits(bh) ;
+    reiserfs_clean_and_file_buffer(bh) ;
+    cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
+  } else {
+    /* set the bit for this block in the journal bitmap for this transaction */
+    jb = journal->j_current_jl->j_list_bitmap;
+    if (!jb) {
+      reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
+    }
+    set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
+
+    /* Note, the entire while loop is not allowed to schedule.  */
+
+    if (bh) {
+      clear_prepared_bits(bh) ;
+      reiserfs_clean_and_file_buffer(bh) ;
+    }
+    cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
+
+    /* find all older transactions with this block, make sure they don't try to write it out */
+    cn = get_journal_hash_dev(p_s_sb,journal->j_list_hash_table,  blocknr) ;
+    while (cn) {
+      if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
+	set_bit(BLOCK_FREED, &cn->state) ;
+	if (cn->bh) {
+	  if (!cleaned) {
+	    /* remove_from_transaction will brelse the buffer if it was 
+	    ** in the current trans
+	    */
+            clear_buffer_journal_dirty (cn->bh);
+	    clear_buffer_dirty(cn->bh);
+	    clear_buffer_journal_test(cn->bh);
+	    cleaned = 1 ;
+	    put_bh(cn->bh) ;
+	    if (atomic_read(&(cn->bh->b_count)) < 0) {
+	      reiserfs_warning (p_s_sb, "journal-2138: cn->bh->b_count < 0");
+	    }
+	  }
+	  if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
+	    atomic_dec(&(cn->jlist->j_nonzerolen)) ;
+	  }
+	  cn->bh = NULL ; 
+	} 
+      }
+      cn = cn->hnext ;
+    }
+  }
+
+  if (bh) {
+    put_bh(bh) ; /* get_hash grabs the buffer */
+    if (atomic_read(&(bh->b_count)) < 0) {
+      reiserfs_warning (p_s_sb, "journal-2165: bh->b_count < 0");
+    }
+  }
+  return 0 ;
+}
+
+void reiserfs_update_inode_transaction(struct inode *inode) {
+  struct reiserfs_journal *journal = SB_JOURNAL (inode->i_sb);
+  REISERFS_I(inode)->i_jl = journal->j_current_jl;
+  REISERFS_I(inode)->i_trans_id = journal->j_trans_id ;
+}
+
+/*
+ * returns -1 on error, 0 if no commits/barriers were done and 1
+ * if a transaction was actually committed and the barrier was done
+ */
+static int __commit_trans_jl(struct inode *inode, unsigned long id,
+                                 struct reiserfs_journal_list *jl)
+{
+    struct reiserfs_transaction_handle th ;
+    struct super_block *sb = inode->i_sb ;
+    struct reiserfs_journal *journal = SB_JOURNAL (sb);
+    int ret = 0;
+
+    /* is it from the current transaction, or from an unknown transaction? */
+    if (id == journal->j_trans_id) {
+	jl = journal->j_current_jl;
+	/* try to let other writers come in and grow this transaction */
+	let_transaction_grow(sb, id);
+	if (journal->j_trans_id != id) {
+	    goto flush_commit_only;
+	}
+
+	ret = journal_begin(&th, sb, 1) ;
+	if (ret)
+	    return ret;
+
+	/* someone might have ended this transaction while we joined */
+	if (journal->j_trans_id != id) {
+	    reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1) ;
+	    journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)) ;
+	    ret = journal_end(&th, sb, 1) ;
+	    goto flush_commit_only;
+	}
+
+	ret = journal_end_sync(&th, sb, 1) ;
+	if (!ret)
+	    ret = 1;
+
+    } else {
+	/* this gets tricky, we have to make sure the journal list in
+	 * the inode still exists.  We know the list is still around
+	 * if we've got a larger transaction id than the oldest list
+	 */
+flush_commit_only:
+	if (journal_list_still_alive(inode->i_sb, id)) {
+	    /*
+	     * we only set ret to 1 when we know for sure
+	     * the barrier hasn't been started yet on the commit
+	     * block.
+	     */
+	    if (atomic_read(&jl->j_commit_left) > 1)
+	        ret = 1;
+	    flush_commit_list(sb, jl, 1) ;
+	    if (journal->j_errno)
+		ret = journal->j_errno;
+	}
+    }
+    /* otherwise the list is gone, and long since committed */
+    return ret;
+}
+
+int reiserfs_commit_for_inode(struct inode *inode) {
+    unsigned long id = REISERFS_I(inode)->i_trans_id;
+    struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
+
+    /* for the whole inode, assume unset id means it was
+     * changed in the current transaction.  More conservative
+     */
+    if (!id || !jl) {
+	reiserfs_update_inode_transaction(inode) ;
+	id = REISERFS_I(inode)->i_trans_id;
+	/* jl will be updated in __commit_trans_jl */
+    }
+
+   return __commit_trans_jl(inode, id, jl);
+}
+
+void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb, 
+                                      struct buffer_head *bh) {
+    struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+    PROC_INFO_INC( p_s_sb, journal.restore_prepared );
+    if (!bh) {
+	return ;
+    }
+    if (test_clear_buffer_journal_restore_dirty (bh) &&
+	buffer_journal_dirty(bh)) {
+	struct reiserfs_journal_cnode *cn;
+	cn = get_journal_hash_dev(p_s_sb,
+	                          journal->j_list_hash_table,
+				  bh->b_blocknr);
+	if (cn && can_dirty(cn)) {
+            set_buffer_journal_test (bh);
+	    mark_buffer_dirty(bh);
+        }
+    }
+    clear_buffer_journal_prepared (bh);
+}
+
+extern struct tree_balance *cur_tb ;
+/*
+** before we can change a metadata block, we have to make sure it won't
+** be written to disk while we are altering it.  So, we must:
+** clean it
+** wait on it.
+** 
+*/
+int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
+                                  struct buffer_head *bh, int wait) {
+  PROC_INFO_INC( p_s_sb, journal.prepare );
+
+    if (test_set_buffer_locked(bh)) {
+	if (!wait)
+	    return 0;
+	lock_buffer(bh);
+    }
+    set_buffer_journal_prepared (bh);
+    if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh))  {
+        clear_buffer_journal_test (bh);
+        set_buffer_journal_restore_dirty (bh);
+    }
+    unlock_buffer(bh);
+    return 1;
+}
+
+static void flush_old_journal_lists(struct super_block *s) {
+    struct reiserfs_journal *journal = SB_JOURNAL (s);
+    struct reiserfs_journal_list *jl;
+    struct list_head *entry;
+    time_t now = get_seconds();
+
+    while(!list_empty(&journal->j_journal_list)) {
+        entry = journal->j_journal_list.next;
+	jl = JOURNAL_LIST_ENTRY(entry);
+	/* this check should always be run, to send old lists to disk */
+	if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
+	    flush_used_journal_lists(s, jl);
+	} else {
+	    break;
+	}
+    }
+}
+
+/* 
+** long and ugly.  If flush, will not return until all commit
+** blocks and all real buffers in the trans are on disk.
+** If no_async, won't return until all commit blocks are on disk.
+**
+** keep reading, there are comments as you go along
+**
+** If the journal is aborted, we just clean up. Things like flushing
+** journal lists, etc just won't happen.
+*/
+static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block  * p_s_sb, unsigned long nblocks, 
+		          int flags) {
+  struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+  struct reiserfs_journal_cnode *cn, *next, *jl_cn; 
+  struct reiserfs_journal_cnode *last_cn = NULL;
+  struct reiserfs_journal_desc *desc ; 
+  struct reiserfs_journal_commit *commit ; 
+  struct buffer_head *c_bh ; /* commit bh */
+  struct buffer_head *d_bh ; /* desc bh */
+  int cur_write_start = 0 ; /* start index of current log write */
+  int old_start ;
+  int i ;
+  int flush = flags & FLUSH_ALL ;
+  int wait_on_commit = flags & WAIT ;
+  struct reiserfs_journal_list *jl, *temp_jl;
+  struct list_head *entry, *safe;
+  unsigned long jindex;
+  unsigned long commit_trans_id;
+  int trans_half;
+
+  BUG_ON (th->t_refcount > 1);
+  BUG_ON (!th->t_trans_id);
+
+  current->journal_info = th->t_handle_save;
+  reiserfs_check_lock_depth(p_s_sb, "journal end");
+  if (journal->j_len == 0) {
+      reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+      journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+  }
+
+  lock_journal(p_s_sb) ;
+  if (journal->j_next_full_flush) {
+    flags |= FLUSH_ALL ;
+    flush = 1 ;
+  }
+  if (journal->j_next_async_flush) {
+    flags |= COMMIT_NOW | WAIT;
+    wait_on_commit = 1;
+  }
+
+  /* check_journal_end locks the journal, and unlocks if it does not return 1 
+  ** it tells us if we should continue with the journal_end, or just return
+  */
+  if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
+    p_s_sb->s_dirt = 1;
+    wake_queued_writers(p_s_sb);
+    reiserfs_async_progress_wait(p_s_sb);
+    goto out ;
+  }
+
+  /* check_journal_end might set these, check again */
+  if (journal->j_next_full_flush) {
+    flush = 1 ;
+  }
+
+  /*
+  ** j must wait means we have to flush the log blocks, and the real blocks for
+  ** this transaction
+  */
+  if (journal->j_must_wait > 0) {
+    flush = 1 ;
+  }
+
+#ifdef REISERFS_PREALLOCATE
+  /* quota ops might need to nest, setup the journal_info pointer for them */
+  current->journal_info = th ;
+  reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
+				      * the transaction */
+  current->journal_info = th->t_handle_save ;
+#endif
+  
+  /* setup description block */
+  d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start) ;
+  set_buffer_uptodate(d_bh);
+  desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
+  memset(d_bh->b_data, 0, d_bh->b_size) ;
+  memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
+  set_desc_trans_id(desc, journal->j_trans_id) ;
+
+  /* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
+  c_bh =  journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
+		 ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
+  commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+  memset(c_bh->b_data, 0, c_bh->b_size) ;
+  set_commit_trans_id(commit, journal->j_trans_id) ;
+  set_buffer_uptodate(c_bh) ;
+
+  /* init this journal list */
+  jl = journal->j_current_jl;
+
+  /* we lock the commit before doing anything because
+   * we want to make sure nobody tries to run flush_commit_list until
+   * the new transaction is fully setup, and we've already flushed the
+   * ordered bh list
+   */
+  down(&jl->j_commit_lock);
+
+  /* save the transaction id in case we need to commit it later */
+  commit_trans_id = jl->j_trans_id;
+
+  atomic_set(&jl->j_older_commits_done, 0) ;
+  jl->j_trans_id = journal->j_trans_id ;
+  jl->j_timestamp = journal->j_trans_start_time ;
+  jl->j_commit_bh = c_bh ;
+  jl->j_start = journal->j_start ;
+  jl->j_len = journal->j_len ;
+  atomic_set(&jl->j_nonzerolen, journal->j_len) ;
+  atomic_set(&jl->j_commit_left, journal->j_len + 2);
+  jl->j_realblock = NULL ;
+
+  /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
+  **  for each real block, add it to the journal list hash,
+  ** copy into real block index array in the commit or desc block
+  */
+  trans_half = journal_trans_half(p_s_sb->s_blocksize);
+  for (i = 0, cn = journal->j_first ; cn ; cn = cn->next, i++) {
+    if (buffer_journaled (cn->bh)) {
+      jl_cn = get_cnode(p_s_sb) ;
+      if (!jl_cn) {
+        reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
+      }
+      if (i == 0) {
+        jl->j_realblock = jl_cn ;
+      }
+      jl_cn->prev = last_cn ;
+      jl_cn->next = NULL ;
+      if (last_cn) {
+        last_cn->next = jl_cn ;
+      }
+      last_cn = jl_cn ;
+      /* make sure the block we are trying to log is not a block 
+         of journal or reserved area */
+
+      if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
+        reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
+      }
+      jl_cn->blocknr = cn->bh->b_blocknr ; 
+      jl_cn->state = 0 ;
+      jl_cn->sb = p_s_sb;
+      jl_cn->bh = cn->bh ;
+      jl_cn->jlist = jl;
+      insert_journal_hash(journal->j_list_hash_table, jl_cn) ;
+      if (i < trans_half) {
+	desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
+      } else {
+	commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr) ;
+      }
+    } else {
+      i-- ;
+    }
+  }
+  set_desc_trans_len(desc, journal->j_len) ;
+  set_desc_mount_id(desc, journal->j_mount_id) ;
+  set_desc_trans_id(desc, journal->j_trans_id) ;
+  set_commit_trans_len(commit, journal->j_len);
+
+  /* special check in case all buffers in the journal were marked for not logging */
+  if (journal->j_len == 0) {
+    BUG();
+  }
+
+  /* we're about to dirty all the log blocks, mark the description block
+   * dirty now too.  Don't mark the commit block dirty until all the
+   * others are on disk
+   */
+  mark_buffer_dirty(d_bh);
+
+  /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
+  cur_write_start = journal->j_start ;
+  cn = journal->j_first ;
+  jindex = 1 ; /* start at one so we don't get the desc again */
+  while(cn) {
+    clear_buffer_journal_new (cn->bh);
+    /* copy all the real blocks into log area.  dirty log blocks */
+    if (buffer_journaled (cn->bh)) {
+      struct buffer_head *tmp_bh ;
+      char *addr;
+      struct page *page;
+      tmp_bh =  journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + 
+		       ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
+      set_buffer_uptodate(tmp_bh);
+      page = cn->bh->b_page;
+      addr = kmap(page);
+      memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data),
+             cn->bh->b_size);
+      kunmap(page);
+      mark_buffer_dirty(tmp_bh);
+      jindex++ ;
+      set_buffer_journal_dirty (cn->bh);
+      clear_buffer_journaled (cn->bh);
+    } else {
+      /* JDirty cleared sometime during transaction.  don't log this one */
+      reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
+      brelse(cn->bh) ;
+    }
+    next = cn->next ;
+    free_cnode(p_s_sb, cn) ;
+    cn = next ;
+    cond_resched();
+  }
+
+  /* we are done  with both the c_bh and d_bh, but
+  ** c_bh must be written after all other commit blocks,
+  ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
+  */
+
+  journal->j_current_jl = alloc_journal_list(p_s_sb);
+
+  /* now it is safe to insert this transaction on the main list */
+  list_add_tail(&jl->j_list, &journal->j_journal_list);
+  list_add_tail(&jl->j_working_list, &journal->j_working_list);
+  journal->j_num_work_lists++;
+
+  /* reset journal values for the next transaction */
+  old_start = journal->j_start ;
+  journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
+  atomic_set(&(journal->j_wcount), 0) ;
+  journal->j_bcount = 0 ;
+  journal->j_last = NULL ;
+  journal->j_first = NULL ;
+  journal->j_len = 0 ;
+  journal->j_trans_start_time = 0 ;
+  journal->j_trans_id++ ;
+  journal->j_current_jl->j_trans_id = journal->j_trans_id;
+  journal->j_must_wait = 0 ;
+  journal->j_len_alloc = 0 ;
+  journal->j_next_full_flush = 0 ;
+  journal->j_next_async_flush = 0 ;
+  init_journal_hash(p_s_sb) ; 
+
+  // make sure reiserfs_add_jh sees the new current_jl before we
+  // write out the tails
+  smp_mb();
+
+  /* tail conversion targets have to hit the disk before we end the
+   * transaction.  Otherwise a later transaction might repack the tail
+   * before this transaction commits, leaving the data block unflushed and
+   * clean, if we crash before the later transaction commits, the data block
+   * is lost.
+   */
+  if (!list_empty(&jl->j_tail_bh_list)) {
+      unlock_kernel();
+      write_ordered_buffers(&journal->j_dirty_buffers_lock,
+			    journal, jl, &jl->j_tail_bh_list);
+      lock_kernel();
+  }
+  if (!list_empty(&jl->j_tail_bh_list))
+      BUG();
+  up(&jl->j_commit_lock);
+
+  /* honor the flush wishes from the caller, simple commits can
+  ** be done outside the journal lock, they are done below
+  **
+  ** if we don't flush the commit list right now, we put it into
+  ** the work queue so the people waiting on the async progress work
+  ** queue don't wait for this proc to flush journal lists and such.
+  */
+  if (flush) {
+    flush_commit_list(p_s_sb, jl, 1) ;
+    flush_journal_list(p_s_sb, jl, 1) ;
+  } else if (!(jl->j_state & LIST_COMMIT_PENDING))
+    queue_delayed_work(commit_wq, &journal->j_work, HZ/10);
+
+
+  /* if the next transaction has any chance of wrapping, flush 
+  ** transactions that might get overwritten.  If any journal lists are very 
+  ** old flush them as well.  
+  */
+first_jl:
+  list_for_each_safe(entry, safe, &journal->j_journal_list) {
+    temp_jl = JOURNAL_LIST_ENTRY(entry);
+    if (journal->j_start <= temp_jl->j_start) {
+      if ((journal->j_start + journal->j_trans_max + 1) >=
+          temp_jl->j_start)
+      {
+	flush_used_journal_lists(p_s_sb, temp_jl);
+	goto first_jl;
+      } else if ((journal->j_start +
+                  journal->j_trans_max + 1) <
+		  SB_ONDISK_JOURNAL_SIZE(p_s_sb))
+      {
+          /* if we don't cross into the next transaction and we don't
+	   * wrap, there is no way we can overlap any later transactions
+	   * break now
+	   */
+	  break;
+      }
+    } else if ((journal->j_start +
+                journal->j_trans_max + 1) >
+		SB_ONDISK_JOURNAL_SIZE(p_s_sb))
+    {
+      if (((journal->j_start + journal->j_trans_max + 1) %
+            SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= temp_jl->j_start)
+      {
+	flush_used_journal_lists(p_s_sb, temp_jl);
+	goto first_jl;
+      } else {
+	  /* we don't overlap anything from out start to the end of the
+	   * log, and our wrapped portion doesn't overlap anything at
+	   * the start of the log.  We can break
+	   */
+	  break;
+      }
+    }
+  }
+  flush_old_journal_lists(p_s_sb);
+
+  journal->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, journal->j_current_jl) ;
+
+  if (!(journal->j_current_jl->j_list_bitmap)) {
+    reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
+  }
+
+  atomic_set(&(journal->j_jlock), 0) ;
+  unlock_journal(p_s_sb) ;
+  /* wake up any body waiting to join. */
+  clear_bit(J_WRITERS_QUEUED, &journal->j_state);
+  wake_up(&(journal->j_join_wait)) ;
+
+  if (!flush && wait_on_commit &&
+      journal_list_still_alive(p_s_sb, commit_trans_id)) {
+	  flush_commit_list(p_s_sb, jl, 1) ;
+  }
+out:
+  reiserfs_check_lock_depth(p_s_sb, "journal end2");
+
+  memset (th, 0, sizeof (*th));
+  /* Re-set th->t_super, so we can properly keep track of how many
+   * persistent transactions there are. We need to do this so if this
+   * call is part of a failed restart_transaction, we can free it later */
+  th->t_super = p_s_sb;
+
+  return journal->j_errno;
+}
+
+static void
+__reiserfs_journal_abort_hard (struct super_block *sb)
+{
+    struct reiserfs_journal *journal = SB_JOURNAL (sb);
+    if (test_bit (J_ABORTED, &journal->j_state))
+        return;
+
+    printk (KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
+                      reiserfs_bdevname (sb));
+
+    sb->s_flags |= MS_RDONLY;
+    set_bit (J_ABORTED, &journal->j_state);
+
+#ifdef CONFIG_REISERFS_CHECK
+    dump_stack();
+#endif
+}
+
+static void
+__reiserfs_journal_abort_soft (struct super_block *sb, int errno)
+{
+    struct reiserfs_journal *journal = SB_JOURNAL (sb);
+    if (test_bit (J_ABORTED, &journal->j_state))
+        return;
+
+    if (!journal->j_errno)
+        journal->j_errno = errno;
+
+    __reiserfs_journal_abort_hard (sb);
+}
+
+void
+reiserfs_journal_abort (struct super_block *sb, int errno)
+{
+    return __reiserfs_journal_abort_soft (sb, errno);
+}
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
new file mode 100644
index 0000000..2406608
--- /dev/null
+++ b/fs/reiserfs/lbalance.c
@@ -0,0 +1,1222 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/buffer_head.h>
+
+/* these are used in do_balance.c */
+
+/* leaf_move_items
+   leaf_shift_left
+   leaf_shift_right
+   leaf_delete_items
+   leaf_insert_into_buf
+   leaf_paste_in_buffer
+   leaf_cut_from_buffer
+   leaf_paste_entries
+   */
+
+
+/* copy copy_count entries from source directory item to dest buffer (creating new item if needed) */
+static void leaf_copy_dir_entries (struct buffer_info * dest_bi, struct buffer_head * source, 
+				   int last_first, int item_num, int from, int copy_count)
+{
+    struct buffer_head * dest = dest_bi->bi_bh;
+    int item_num_in_dest;		/* either the number of target item,
+					   or if we must create a new item,
+					   the number of the item we will
+					   create it next to */
+    struct item_head * ih;
+    struct reiserfs_de_head * deh;
+    int copy_records_len;			/* length of all records in item to be copied */
+    char * records;
+
+    ih = B_N_PITEM_HEAD (source, item_num);
+
+    RFALSE( !is_direntry_le_ih (ih), "vs-10000: item must be directory item");
+
+    /* length of all record to be copied and first byte of the last of them */
+    deh = B_I_DEH (source, ih);
+    if (copy_count) {
+	copy_records_len = (from ? deh_location( &(deh[from - 1]) ) :
+            ih_item_len(ih)) - deh_location( &(deh[from + copy_count - 1]));
+	records = source->b_data + ih_location(ih) +
+                                deh_location( &(deh[from + copy_count - 1]));
+    } else {
+	copy_records_len = 0;
+	records = NULL;
+    }
+
+    /* when copy last to first, dest buffer can contain 0 items */
+    item_num_in_dest = (last_first == LAST_TO_FIRST) ? (( B_NR_ITEMS(dest) ) ? 0 : -1) : (B_NR_ITEMS(dest) - 1);
+
+    /* if there are no items in dest or the first/last item in dest is not item of the same directory */
+    if ( (item_num_in_dest == - 1) ||
+	(last_first == FIRST_TO_LAST && le_ih_k_offset (ih) == DOT_OFFSET) ||
+	    (last_first == LAST_TO_FIRST && comp_short_le_keys/*COMP_SHORT_KEYS*/ (&ih->ih_key, B_N_PKEY (dest, item_num_in_dest)))) {
+	/* create new item in dest */
+	struct item_head new_ih;
+
+	/* form item header */
+	memcpy (&new_ih.ih_key, &ih->ih_key, KEY_SIZE);
+	put_ih_version( &new_ih, KEY_FORMAT_3_5 );
+	/* calculate item len */
+	put_ih_item_len( &new_ih, DEH_SIZE * copy_count + copy_records_len );
+	put_ih_entry_count( &new_ih, 0 );
+    
+	if (last_first == LAST_TO_FIRST) {
+	    /* form key by the following way */
+	    if (from < I_ENTRY_COUNT(ih)) {
+		set_le_ih_k_offset( &new_ih, deh_offset( &(deh[from]) ) );
+		/*memcpy (&new_ih.ih_key.k_offset, &deh[from].deh_offset, SHORT_KEY_SIZE);*/
+	    } else {
+		/* no entries will be copied to this item in this function */
+		set_le_ih_k_offset (&new_ih, U32_MAX);
+		/* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
+	    }
+	    set_le_key_k_type (KEY_FORMAT_3_5, &(new_ih.ih_key), TYPE_DIRENTRY);
+	}
+    
+	/* insert item into dest buffer */
+	leaf_insert_into_buf (dest_bi, (last_first == LAST_TO_FIRST) ? 0 : B_NR_ITEMS(dest), &new_ih, NULL, 0);
+    } else {
+	/* prepare space for entries */
+	leaf_paste_in_buffer (dest_bi, (last_first==FIRST_TO_LAST) ? (B_NR_ITEMS(dest) - 1) : 0, MAX_US_INT,
+			      DEH_SIZE * copy_count + copy_records_len, records, 0
+	    );
+    }
+  
+    item_num_in_dest = (last_first == FIRST_TO_LAST) ? (B_NR_ITEMS(dest)-1) : 0;
+    
+    leaf_paste_entries (dest_bi->bi_bh, item_num_in_dest,
+			(last_first == FIRST_TO_LAST) ? I_ENTRY_COUNT(B_N_PITEM_HEAD (dest, item_num_in_dest)) : 0,
+			copy_count, deh + from, records,
+			DEH_SIZE * copy_count + copy_records_len
+	);
+}
+
+
+/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or 
+   part of it or nothing (see the return 0 below) from SOURCE to the end 
+   (if last_first) or beginning (!last_first) of the DEST */
+/* returns 1 if anything was copied, else 0 */
+static int leaf_copy_boundary_item (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+				    int bytes_or_entries)
+{
+  struct buffer_head * dest = dest_bi->bi_bh;
+  int dest_nr_item, src_nr_item; /* number of items in the source and destination buffers */
+  struct item_head * ih;
+  struct item_head * dih;
+  
+  dest_nr_item = B_NR_ITEMS(dest);
+  
+  if ( last_first == FIRST_TO_LAST ) {
+    /* if ( DEST is empty or first item of SOURCE and last item of DEST are the items of different objects
+       or of different types ) then there is no need to treat this item differently from the other items
+       that we copy, so we return */
+    ih = B_N_PITEM_HEAD (src, 0);
+    dih = B_N_PITEM_HEAD (dest, dest_nr_item - 1);
+    if (!dest_nr_item || (!op_is_left_mergeable (&(ih->ih_key), src->b_size)))
+      /* there is nothing to merge */
+      return 0;
+      
+    RFALSE( ! ih_item_len(ih), "vs-10010: item can not have empty length");
+      
+    if ( is_direntry_le_ih (ih) ) {
+      if ( bytes_or_entries == -1 )
+	/* copy all entries to dest */
+	bytes_or_entries = ih_entry_count(ih);
+      leaf_copy_dir_entries (dest_bi, src, FIRST_TO_LAST, 0, 0, bytes_or_entries);
+      return 1;
+    }
+      
+    /* copy part of the body of the first item of SOURCE to the end of the body of the last item of the DEST
+       part defined by 'bytes_or_entries'; if bytes_or_entries == -1 copy whole body; don't create new item header
+       */
+    if ( bytes_or_entries == -1 )
+      bytes_or_entries = ih_item_len(ih);
+
+#ifdef CONFIG_REISERFS_CHECK
+    else {
+      if (bytes_or_entries == ih_item_len(ih) && is_indirect_le_ih(ih))
+	if (get_ih_free_space (ih))
+	  reiserfs_panic (NULL, "vs-10020: leaf_copy_boundary_item: "
+			  "last unformatted node must be filled entirely (%h)",
+			  ih);
+    }
+#endif
+      
+    /* merge first item (or its part) of src buffer with the last
+       item of dest buffer. Both are of the same file */
+    leaf_paste_in_buffer (dest_bi,
+			  dest_nr_item - 1, ih_item_len(dih), bytes_or_entries, B_I_PITEM(src,ih), 0
+			  );
+      
+    if (is_indirect_le_ih (dih)) {
+      RFALSE( get_ih_free_space (dih),
+              "vs-10030: merge to left: last unformatted node of non-last indirect item %h must have zerto free space",
+              ih);
+      if (bytes_or_entries == ih_item_len(ih))
+	set_ih_free_space (dih, get_ih_free_space (ih));
+    }
+    
+    return 1;
+  }
+  
+
+  /* copy boundary item to right (last_first == LAST_TO_FIRST) */
+
+  /* ( DEST is empty or last item of SOURCE and first item of DEST
+     are the items of different object or of different types )
+     */
+  src_nr_item = B_NR_ITEMS (src);
+  ih = B_N_PITEM_HEAD (src, src_nr_item - 1);
+  dih = B_N_PITEM_HEAD (dest, 0);
+
+  if (!dest_nr_item || !op_is_left_mergeable (&(dih->ih_key), src->b_size))
+    return 0;
+  
+  if ( is_direntry_le_ih (ih)) {
+    if ( bytes_or_entries == -1 )
+      /* bytes_or_entries = entries number in last item body of SOURCE */
+      bytes_or_entries = ih_entry_count(ih);
+    
+    leaf_copy_dir_entries (dest_bi, src, LAST_TO_FIRST, src_nr_item - 1, ih_entry_count(ih) - bytes_or_entries, bytes_or_entries);
+    return 1;
+  }
+
+  /* copy part of the body of the last item of SOURCE to the begin of the body of the first item of the DEST;
+     part defined by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body; change first item key of the DEST;
+     don't create new item header
+     */
+  
+  RFALSE( is_indirect_le_ih(ih) && get_ih_free_space (ih),
+          "vs-10040: merge to right: last unformatted node of non-last indirect item must be filled entirely (%h)",
+		    ih);
+
+  if ( bytes_or_entries == -1 ) {
+    /* bytes_or_entries = length of last item body of SOURCE */
+    bytes_or_entries = ih_item_len(ih);
+
+    RFALSE( le_ih_k_offset (dih) !=
+            le_ih_k_offset (ih) + op_bytes_number (ih, src->b_size),
+            "vs-10050: items %h and %h do not match", ih, dih);
+
+    /* change first item key of the DEST */
+    set_le_ih_k_offset (dih, le_ih_k_offset (ih));
+
+    /* item becomes non-mergeable */
+    /* or mergeable if left item was */
+    set_le_ih_k_type (dih, le_ih_k_type (ih));
+  } else {
+    /* merge to right only part of item */
+    RFALSE( ih_item_len(ih) <= bytes_or_entries,
+            "vs-10060: no so much bytes %lu (needed %lu)",
+            ( unsigned long )ih_item_len(ih), ( unsigned long )bytes_or_entries);
+    
+    /* change first item key of the DEST */
+    if ( is_direct_le_ih (dih) ) {
+      RFALSE( le_ih_k_offset (dih) <= (unsigned long)bytes_or_entries,
+	      "vs-10070: dih %h, bytes_or_entries(%d)", dih, bytes_or_entries);
+      set_le_ih_k_offset (dih, le_ih_k_offset (dih) - bytes_or_entries);
+    } else {
+      RFALSE( le_ih_k_offset (dih) <=
+              (bytes_or_entries / UNFM_P_SIZE) * dest->b_size,
+              "vs-10080: dih %h, bytes_or_entries(%d)",
+              dih, (bytes_or_entries/UNFM_P_SIZE)*dest->b_size);
+      set_le_ih_k_offset (dih, le_ih_k_offset (dih) - ((bytes_or_entries / UNFM_P_SIZE) * dest->b_size));
+    }
+  }
+  
+  leaf_paste_in_buffer (dest_bi, 0, 0, bytes_or_entries, B_I_PITEM(src,ih) + ih_item_len(ih) - bytes_or_entries, 0);
+  return 1;
+}
+
+
+/* copy cpy_mun items from buffer src to buffer dest
+ * last_first == FIRST_TO_LAST means, that we copy cpy_num  items beginning from first-th item in src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy cpy_num  items beginning from first-th item in src to head of dest
+ */
+static void leaf_copy_items_entirely (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+				      int first, int cpy_num)
+{
+    struct buffer_head * dest;
+    int nr, free_space;
+    int dest_before;
+    int last_loc, last_inserted_loc, location;
+    int i, j;
+    struct block_head * blkh;
+    struct item_head * ih;
+
+    RFALSE( last_first != LAST_TO_FIRST  && last_first != FIRST_TO_LAST,
+	    "vs-10090: bad last_first parameter %d", last_first);
+    RFALSE( B_NR_ITEMS (src) - first < cpy_num,
+	    "vs-10100: too few items in source %d, required %d from %d",
+	    B_NR_ITEMS(src), cpy_num, first);
+    RFALSE( cpy_num < 0, "vs-10110: can not copy negative amount of items");
+    RFALSE( ! dest_bi, "vs-10120: can not copy negative amount of items");
+
+    dest = dest_bi->bi_bh;
+
+    RFALSE( ! dest, "vs-10130: can not copy negative amount of items");
+
+    if (cpy_num == 0)
+	return;
+
+    blkh = B_BLK_HEAD(dest);
+    nr = blkh_nr_item( blkh );
+    free_space = blkh_free_space(blkh);
+  
+    /* we will insert items before 0-th or nr-th item in dest buffer. It depends of last_first parameter */
+    dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr;
+
+    /* location of head of first new item */
+    ih = B_N_PITEM_HEAD (dest, dest_before);
+
+    RFALSE( blkh_free_space(blkh) < cpy_num * IH_SIZE,
+            "vs-10140: not enough free space for headers %d (needed %d)",
+            B_FREE_SPACE (dest), cpy_num * IH_SIZE);
+
+    /* prepare space for headers */
+    memmove (ih + cpy_num, ih, (nr-dest_before) * IH_SIZE);
+
+    /* copy item headers */
+    memcpy (ih, B_N_PITEM_HEAD (src, first), cpy_num * IH_SIZE);
+
+    free_space -= (IH_SIZE * cpy_num);
+    set_blkh_free_space( blkh, free_space );
+
+    /* location of unmovable item */
+    j = location = (dest_before == 0) ? dest->b_size : ih_location(ih-1);
+    for (i = dest_before; i < nr + cpy_num; i ++) {
+        location -= ih_item_len( ih + i - dest_before );
+        put_ih_location( ih + i - dest_before, location );
+    }
+
+    /* prepare space for items */
+    last_loc = ih_location( &(ih[nr+cpy_num-1-dest_before]) );
+    last_inserted_loc = ih_location( &(ih[cpy_num-1]) );
+
+    /* check free space */
+    RFALSE( free_space < j - last_inserted_loc,
+	    "vs-10150: not enough free space for items %d (needed %d)",
+            free_space, j - last_inserted_loc);
+
+    memmove (dest->b_data + last_loc,
+	     dest->b_data + last_loc + j - last_inserted_loc,
+	     last_inserted_loc - last_loc);
+
+    /* copy items */
+    memcpy (dest->b_data + last_inserted_loc, B_N_PITEM(src,(first + cpy_num - 1)),
+	    j - last_inserted_loc);
+
+    /* sizes, item number */
+    set_blkh_nr_item( blkh, nr + cpy_num );
+    set_blkh_free_space( blkh, free_space - (j - last_inserted_loc) );
+
+    do_balance_mark_leaf_dirty (dest_bi->tb, dest, 0);
+
+    if (dest_bi->bi_parent) {
+	struct disk_child *t_dc;
+	t_dc = B_N_CHILD (dest_bi->bi_parent, dest_bi->bi_position);
+	RFALSE( dc_block_number(t_dc) != dest->b_blocknr,
+	        "vs-10160: block number in bh does not match to field in disk_child structure %lu and %lu",
+                ( long unsigned ) dest->b_blocknr, 
+		( long unsigned ) dc_block_number(t_dc));
+	put_dc_size( t_dc, dc_size(t_dc) + (j - last_inserted_loc + IH_SIZE * cpy_num ) );
+    
+	do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent, 0);
+    }
+}
+
+
+/* This function splits the (liquid) item into two items (useful when
+   shifting part of an item into another node.) */
+static void leaf_item_bottle (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+			      int item_num, int cpy_bytes)
+{
+    struct buffer_head * dest = dest_bi->bi_bh;
+    struct item_head * ih;
+  
+    RFALSE( cpy_bytes == -1, "vs-10170: bytes == - 1 means: do not split item");
+
+    if ( last_first == FIRST_TO_LAST ) {
+	/* if ( if item in position item_num in buffer SOURCE is directory item ) */
+	if (is_direntry_le_ih (ih = B_N_PITEM_HEAD(src,item_num)))
+	    leaf_copy_dir_entries (dest_bi, src, FIRST_TO_LAST, item_num, 0, cpy_bytes);
+	else {
+	    struct item_head n_ih;
+      
+	    /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST 
+	       part defined by 'cpy_bytes'; create new item header; change old item_header (????);
+	       n_ih = new item_header;
+	    */
+	    memcpy (&n_ih, ih, IH_SIZE);
+	    put_ih_item_len( &n_ih, cpy_bytes );
+	    if (is_indirect_le_ih (ih)) {
+		RFALSE( cpy_bytes == ih_item_len(ih) && get_ih_free_space(ih),
+		        "vs-10180: when whole indirect item is bottle to left neighbor, it must have free_space==0 (not %lu)",
+                        ( long unsigned ) get_ih_free_space (ih));
+		set_ih_free_space (&n_ih, 0);
+	    }
+
+	    RFALSE( op_is_left_mergeable (&(ih->ih_key), src->b_size),
+		    "vs-10190: bad mergeability of item %h", ih);
+	    n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
+	    leaf_insert_into_buf (dest_bi, B_NR_ITEMS(dest), &n_ih, B_N_PITEM (src, item_num), 0);
+	}
+    } else {
+	/*  if ( if item in position item_num in buffer SOURCE is directory item ) */
+	if (is_direntry_le_ih(ih = B_N_PITEM_HEAD (src, item_num)))
+	    leaf_copy_dir_entries (dest_bi, src, LAST_TO_FIRST, item_num, I_ENTRY_COUNT(ih) - cpy_bytes, cpy_bytes);
+	else {
+	    struct item_head n_ih;
+      
+	    /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST 
+	       part defined by 'cpy_bytes'; create new item header;
+	       n_ih = new item_header;
+	    */
+	    memcpy (&n_ih, ih, SHORT_KEY_SIZE);
+
+	    n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
+
+	    if (is_direct_le_ih (ih)) {
+		set_le_ih_k_offset (&n_ih, le_ih_k_offset (ih) + ih_item_len(ih) - cpy_bytes);
+		set_le_ih_k_type (&n_ih, TYPE_DIRECT);
+		set_ih_free_space (&n_ih, MAX_US_INT);
+	    } else {
+		/* indirect item */
+		RFALSE( !cpy_bytes && get_ih_free_space (ih),
+		        "vs-10200: ih->ih_free_space must be 0 when indirect item will be appended");
+		set_le_ih_k_offset (&n_ih, le_ih_k_offset (ih) + (ih_item_len(ih) - cpy_bytes) / UNFM_P_SIZE * dest->b_size);
+		set_le_ih_k_type (&n_ih, TYPE_INDIRECT);
+		set_ih_free_space (&n_ih, get_ih_free_space (ih));
+	    }
+      
+	    /* set item length */
+	    put_ih_item_len( &n_ih, cpy_bytes );
+
+	    n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
+
+	    leaf_insert_into_buf (dest_bi, 0, &n_ih, B_N_PITEM(src,item_num) + ih_item_len(ih) - cpy_bytes, 0);
+	}
+    }
+}
+
+
+/* If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE to DEST.
+   If cpy_bytes not equal to minus one than copy cpy_num-1 whole items from SOURCE to DEST.
+   From last item copy cpy_num bytes for regular item and cpy_num directory entries for
+   directory item. */
+static int leaf_copy_items (struct buffer_info * dest_bi, struct buffer_head * src, int last_first, int cpy_num,
+			    int cpy_bytes)
+{
+  struct buffer_head * dest;
+  int pos, i, src_nr_item, bytes;
+
+  dest = dest_bi->bi_bh;
+  RFALSE( !dest || !src, "vs-10210: !dest || !src");
+  RFALSE( last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST,
+	  "vs-10220:last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST");
+  RFALSE( B_NR_ITEMS(src) < cpy_num,
+	  "vs-10230: No enough items: %d, req. %d", B_NR_ITEMS(src), cpy_num);
+  RFALSE( cpy_num < 0,"vs-10240: cpy_num < 0 (%d)", cpy_num);
+
+ if ( cpy_num == 0 )
+   return 0;
+ 
+ if ( last_first == FIRST_TO_LAST ) {
+   /* copy items to left */
+   pos = 0;
+   if ( cpy_num == 1 )
+     bytes = cpy_bytes;
+   else
+     bytes = -1;
+   
+   /* copy the first item or it part or nothing to the end of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes)) */
+   i = leaf_copy_boundary_item (dest_bi, src, FIRST_TO_LAST, bytes);
+   cpy_num -= i;
+   if ( cpy_num == 0 )
+     return i;
+   pos += i;
+   if ( cpy_bytes == -1 )
+     /* copy first cpy_num items starting from position 'pos' of SOURCE to end of DEST */
+     leaf_copy_items_entirely (dest_bi, src, FIRST_TO_LAST, pos, cpy_num);
+   else {
+     /* copy first cpy_num-1 items starting from position 'pos-1' of the SOURCE to the end of the DEST */
+     leaf_copy_items_entirely (dest_bi, src, FIRST_TO_LAST, pos, cpy_num-1);
+	     
+     /* copy part of the item which number is cpy_num+pos-1 to the end of the DEST */
+     leaf_item_bottle (dest_bi, src, FIRST_TO_LAST, cpy_num+pos-1, cpy_bytes);
+   } 
+ } else {
+   /* copy items to right */
+   src_nr_item = B_NR_ITEMS (src);
+   if ( cpy_num == 1 )
+     bytes = cpy_bytes;
+   else
+     bytes = -1;
+   
+   /* copy the last item or it part or nothing to the begin of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes)); */
+   i = leaf_copy_boundary_item (dest_bi, src, LAST_TO_FIRST, bytes);
+   
+   cpy_num -= i;
+   if ( cpy_num == 0 )
+     return i;
+   
+   pos = src_nr_item - cpy_num - i;
+   if ( cpy_bytes == -1 ) {
+     /* starting from position 'pos' copy last cpy_num items of SOURCE to begin of DEST */
+     leaf_copy_items_entirely (dest_bi, src, LAST_TO_FIRST, pos, cpy_num);
+   } else {
+     /* copy last cpy_num-1 items starting from position 'pos+1' of the SOURCE to the begin of the DEST; */
+     leaf_copy_items_entirely (dest_bi, src, LAST_TO_FIRST, pos+1, cpy_num-1);
+
+     /* copy part of the item which number is pos to the begin of the DEST */
+     leaf_item_bottle (dest_bi, src, LAST_TO_FIRST, pos, cpy_bytes);
+   }
+ }
+ return i;
+}
+
+
+/* there are types of coping: from S[0] to L[0], from S[0] to R[0],
+   from R[0] to L[0]. for each of these we have to define parent and
+   positions of destination and source buffers */
+static void leaf_define_dest_src_infos (int shift_mode, struct tree_balance * tb, struct buffer_info * dest_bi,
+					struct buffer_info * src_bi, int * first_last,
+					struct buffer_head * Snew)
+{
+    memset (dest_bi, 0, sizeof (struct buffer_info));
+    memset (src_bi, 0, sizeof (struct buffer_info));
+
+    /* define dest, src, dest parent, dest position */
+    switch (shift_mode) {
+    case LEAF_FROM_S_TO_L:    /* it is used in leaf_shift_left */
+	src_bi->tb = tb;
+	src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+	src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);	/* src->b_item_order */
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->L[0];
+	dest_bi->bi_parent = tb->FL[0];
+	dest_bi->bi_position = get_left_neighbor_position (tb, 0);
+	*first_last = FIRST_TO_LAST;
+	break;
+
+    case LEAF_FROM_S_TO_R:  /* it is used in leaf_shift_right */
+	src_bi->tb = tb;
+	src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+	src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->R[0];
+	dest_bi->bi_parent = tb->FR[0];
+	dest_bi->bi_position = get_right_neighbor_position (tb, 0);
+	*first_last = LAST_TO_FIRST;
+	break;
+
+    case LEAF_FROM_R_TO_L:  /* it is used in balance_leaf_when_delete */
+	src_bi->tb = tb;
+	src_bi->bi_bh = tb->R[0];
+	src_bi->bi_parent = tb->FR[0];
+	src_bi->bi_position = get_right_neighbor_position (tb, 0);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->L[0];
+	dest_bi->bi_parent = tb->FL[0];
+	dest_bi->bi_position = get_left_neighbor_position (tb, 0);
+	*first_last = FIRST_TO_LAST;
+	break;
+    
+    case LEAF_FROM_L_TO_R:  /* it is used in balance_leaf_when_delete */
+	src_bi->tb = tb;
+	src_bi->bi_bh = tb->L[0];
+	src_bi->bi_parent = tb->FL[0];
+	src_bi->bi_position = get_left_neighbor_position (tb, 0);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = tb->R[0];
+	dest_bi->bi_parent = tb->FR[0];
+	dest_bi->bi_position = get_right_neighbor_position (tb, 0);
+	*first_last = LAST_TO_FIRST;
+	break;
+
+    case LEAF_FROM_S_TO_SNEW:
+	src_bi->tb = tb;
+	src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+	src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+	src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);
+	dest_bi->tb = tb;
+	dest_bi->bi_bh = Snew;
+	dest_bi->bi_parent = NULL;
+	dest_bi->bi_position = 0;
+	*first_last = LAST_TO_FIRST;
+	break;
+    
+    default:
+	reiserfs_panic (NULL, "vs-10250: leaf_define_dest_src_infos: shift type is unknown (%d)", shift_mode);
+    }
+    RFALSE( src_bi->bi_bh == 0 || dest_bi->bi_bh == 0,
+	    "vs-10260: mode==%d, source (%p) or dest (%p) buffer is initialized incorrectly",
+	    shift_mode, src_bi->bi_bh, dest_bi->bi_bh);
+}
+
+
+
+
+/* copy mov_num items and mov_bytes of the (mov_num-1)th item to
+   neighbor. Delete them from source */
+int leaf_move_items (int shift_mode, struct tree_balance * tb, int mov_num, int mov_bytes, struct buffer_head * Snew)
+{
+  int ret_value;
+  struct buffer_info dest_bi, src_bi;
+  int first_last;
+
+  leaf_define_dest_src_infos (shift_mode, tb, &dest_bi, &src_bi, &first_last, Snew);
+
+  ret_value = leaf_copy_items (&dest_bi, src_bi.bi_bh, first_last, mov_num, mov_bytes);
+
+  leaf_delete_items (&src_bi, first_last, (first_last == FIRST_TO_LAST) ? 0 : (B_NR_ITEMS(src_bi.bi_bh) - mov_num), mov_num, mov_bytes);
+
+  
+  return ret_value;
+}
+
+
+/* Shift shift_num items (and shift_bytes of last shifted item if shift_bytes != -1)
+   from S[0] to L[0] and replace the delimiting key */
+int leaf_shift_left (struct tree_balance * tb, int shift_num, int shift_bytes)
+{
+  struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
+  int i;
+
+  /* move shift_num (and shift_bytes bytes) items from S[0] to left neighbor L[0] */
+  i = leaf_move_items (LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL);
+
+  if ( shift_num ) {
+    if (B_NR_ITEMS (S0) == 0) { /* number of items in S[0] == 0 */
+
+      RFALSE( shift_bytes != -1,
+	      "vs-10270: S0 is empty now, but shift_bytes != -1 (%d)", 
+	      shift_bytes);
+#ifdef CONFIG_REISERFS_CHECK
+      if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) {
+	print_cur_tb ("vs-10275");
+	reiserfs_panic (tb->tb_sb, "vs-10275: leaf_shift_left: balance condition corrupted (%c)", tb->tb_mode);
+      }
+#endif
+
+      if (PATH_H_POSITION (tb->tb_path, 1) == 0)
+	replace_key (tb, tb->CFL[0], tb->lkey[0], PATH_H_PPARENT (tb->tb_path, 0), 0);
+
+    } else {     
+      /* replace lkey in CFL[0] by 0-th key from S[0]; */
+      replace_key (tb, tb->CFL[0], tb->lkey[0], S0, 0);
+      
+      RFALSE( (shift_bytes != -1 &&
+              !(is_direntry_le_ih (B_N_PITEM_HEAD (S0, 0))
+                && !I_ENTRY_COUNT (B_N_PITEM_HEAD (S0, 0)))) &&
+	      (!op_is_left_mergeable (B_N_PKEY (S0, 0), S0->b_size)),
+	      "vs-10280: item must be mergeable");
+    }
+  }
+  
+  return i;
+}
+
+
+
+
+
+/* CLEANING STOPPED HERE */
+
+
+
+
+/* Shift shift_num (shift_bytes) items from S[0] to the right neighbor, and replace the delimiting key */
+int	leaf_shift_right(
+		struct tree_balance * tb, 
+		int shift_num,
+		int shift_bytes
+	)
+{
+  //  struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
+  int ret_value;
+
+  /* move shift_num (and shift_bytes) items from S[0] to right neighbor R[0] */
+  ret_value = leaf_move_items (LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL);
+
+  /* replace rkey in CFR[0] by the 0-th key from R[0] */
+  if (shift_num) {
+    replace_key (tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+  }
+
+  return ret_value;
+}
+
+
+
+static void leaf_delete_items_entirely (struct buffer_info * bi,
+					int first, int del_num);
+/*  If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
+    If not. 
+    If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
+    the first item. Part defined by del_bytes. Don't delete first item header
+    If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
+    the last item . Part defined by del_bytes. Don't delete last item header.
+*/
+void leaf_delete_items (struct buffer_info * cur_bi, int last_first, 
+			int first, int del_num, int del_bytes)
+{
+    struct buffer_head * bh;
+    int item_amount = B_NR_ITEMS (bh = cur_bi->bi_bh);
+
+    RFALSE( !bh, "10155: bh is not defined");
+    RFALSE( del_num < 0, "10160: del_num can not be < 0. del_num==%d", del_num);
+    RFALSE( first < 0 || first + del_num > item_amount,
+	    "10165: invalid number of first item to be deleted (%d) or "
+	    "no so much items (%d) to delete (only %d)", 
+	    first, first + del_num, item_amount);
+
+    if ( del_num == 0 )
+	return;
+
+    if ( first == 0 && del_num == item_amount && del_bytes == -1 ) {
+	make_empty_node (cur_bi);
+	do_balance_mark_leaf_dirty (cur_bi->tb, bh, 0);
+	return;
+    }
+
+    if ( del_bytes == -1 )
+	/* delete del_num items beginning from item in position first */
+	leaf_delete_items_entirely (cur_bi, first, del_num);
+    else {
+	if ( last_first == FIRST_TO_LAST ) {
+	    /* delete del_num-1 items beginning from item in position first  */
+	    leaf_delete_items_entirely (cur_bi, first, del_num-1);
+
+	    /* delete the part of the first item of the bh
+	       do not delete item header
+	    */
+	    leaf_cut_from_buffer (cur_bi, 0, 0, del_bytes);
+	} else  {
+	    struct item_head * ih;
+	    int len;
+
+	    /* delete del_num-1 items beginning from item in position first+1  */
+	    leaf_delete_items_entirely (cur_bi, first+1, del_num-1);
+
+	    if (is_direntry_le_ih (ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh)-1))) 	/* the last item is directory  */
+	        /* len = numbers of directory entries in this item */
+		len = ih_entry_count(ih);
+	    else
+	        /* len = body len of item */
+		len = ih_item_len(ih);
+
+	    /* delete the part of the last item of the bh 
+	       do not delete item header
+	    */
+	    leaf_cut_from_buffer (cur_bi, B_NR_ITEMS(bh)-1, len - del_bytes, del_bytes);
+	}
+    }
+}
+
+
+/* insert item into the leaf node in position before */
+void leaf_insert_into_buf (struct buffer_info * bi, int before,
+			   struct item_head * inserted_item_ih,
+			   const char * inserted_item_body,
+			   int zeros_number)
+{
+    struct buffer_head * bh = bi->bi_bh;
+    int nr, free_space;
+    struct block_head * blkh;
+    struct item_head * ih;
+    int i;
+    int last_loc, unmoved_loc;
+    char * to;
+
+
+    blkh = B_BLK_HEAD(bh);
+    nr = blkh_nr_item(blkh);
+    free_space = blkh_free_space( blkh );
+
+    /* check free space */
+    RFALSE( free_space < ih_item_len(inserted_item_ih) + IH_SIZE,
+            "vs-10170: not enough free space in block %z, new item %h",
+            bh, inserted_item_ih);
+    RFALSE( zeros_number > ih_item_len(inserted_item_ih),
+	    "vs-10172: zero number == %d, item length == %d",
+            zeros_number, ih_item_len(inserted_item_ih));
+
+
+    /* get item new item must be inserted before */
+    ih = B_N_PITEM_HEAD (bh, before);
+
+    /* prepare space for the body of new item */
+    last_loc = nr ? ih_location( &(ih[nr - before - 1]) ) : bh->b_size;
+    unmoved_loc = before ? ih_location( ih-1 ) : bh->b_size;
+
+
+    memmove (bh->b_data + last_loc - ih_item_len(inserted_item_ih), 
+	     bh->b_data + last_loc, unmoved_loc - last_loc);
+
+    to = bh->b_data + unmoved_loc - ih_item_len(inserted_item_ih);
+    memset (to, 0, zeros_number);
+    to += zeros_number;
+
+    /* copy body to prepared space */
+    if (inserted_item_body)
+	memmove (to, inserted_item_body, ih_item_len(inserted_item_ih) - zeros_number);
+    else
+	memset(to, '\0', ih_item_len(inserted_item_ih) - zeros_number);
+  
+    /* insert item header */
+    memmove (ih + 1, ih, IH_SIZE * (nr - before));
+    memmove (ih, inserted_item_ih, IH_SIZE);
+  
+    /* change locations */
+    for (i = before; i < nr + 1; i ++)
+    {
+        unmoved_loc -= ih_item_len( &(ih[i-before]));
+	put_ih_location( &(ih[i-before]), unmoved_loc );
+    }
+  
+    /* sizes, free space, item number */
+    set_blkh_nr_item( blkh, blkh_nr_item(blkh) + 1 );
+    set_blkh_free_space( blkh,
+                    free_space - (IH_SIZE + ih_item_len(inserted_item_ih ) ) );
+    do_balance_mark_leaf_dirty (bi->tb, bh, 1);
+
+    if (bi->bi_parent) { 
+	struct disk_child *t_dc;
+	t_dc = B_N_CHILD (bi->bi_parent, bi->bi_position);
+	put_dc_size( t_dc, dc_size(t_dc) + (IH_SIZE + ih_item_len(inserted_item_ih)));
+	do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+    }
+}
+
+
+/* paste paste_size bytes to affected_item_num-th item. 
+   When item is a directory, this only prepare space for new entries */
+void leaf_paste_in_buffer (struct buffer_info * bi, int affected_item_num,
+			   int pos_in_item, int paste_size,
+			   const char * body,
+			   int zeros_number)
+{
+    struct buffer_head * bh = bi->bi_bh;
+    int nr, free_space;
+    struct block_head * blkh;
+    struct item_head * ih;
+    int i;
+    int last_loc, unmoved_loc;
+
+    blkh = B_BLK_HEAD(bh);
+    nr = blkh_nr_item(blkh);
+    free_space = blkh_free_space(blkh);
+
+
+    /* check free space */
+    RFALSE( free_space < paste_size,
+            "vs-10175: not enough free space: needed %d, available %d",
+            paste_size, free_space);
+
+#ifdef CONFIG_REISERFS_CHECK
+    if (zeros_number > paste_size) {
+	print_cur_tb ("10177");
+	reiserfs_panic ( NULL, "vs-10177: leaf_paste_in_buffer: ero number == %d, paste_size == %d",
+                         zeros_number, paste_size);
+    }
+#endif /* CONFIG_REISERFS_CHECK */
+
+
+    /* item to be appended */
+    ih = B_N_PITEM_HEAD(bh, affected_item_num);
+
+    last_loc = ih_location( &(ih[nr - affected_item_num - 1]) );
+    unmoved_loc = affected_item_num ? ih_location( ih-1 ) : bh->b_size;
+
+    /* prepare space */
+    memmove (bh->b_data + last_loc - paste_size, bh->b_data + last_loc,
+	     unmoved_loc - last_loc);
+
+
+    /* change locations */
+    for (i = affected_item_num; i < nr; i ++)
+	put_ih_location( &(ih[i-affected_item_num]),
+                    ih_location( &(ih[i-affected_item_num])) - paste_size );
+
+    if ( body ) {
+	if (!is_direntry_le_ih (ih)) {
+	    if (!pos_in_item) {
+		/* shift data to right */
+		memmove (bh->b_data + ih_location(ih) + paste_size, 
+			 bh->b_data + ih_location(ih), ih_item_len(ih));
+		/* paste data in the head of item */
+		memset (bh->b_data + ih_location(ih), 0, zeros_number);
+		memcpy (bh->b_data + ih_location(ih) + zeros_number, body, paste_size - zeros_number);
+	    } else {
+		memset (bh->b_data + unmoved_loc - paste_size, 0, zeros_number);
+		memcpy (bh->b_data + unmoved_loc - paste_size + zeros_number, body, paste_size - zeros_number);
+	    }
+	}
+    }
+    else
+	memset(bh->b_data + unmoved_loc - paste_size, '\0', paste_size);
+
+    put_ih_item_len( ih, ih_item_len(ih) + paste_size );
+
+    /* change free space */
+    set_blkh_free_space( blkh, free_space - paste_size );
+
+    do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+
+    if (bi->bi_parent) { 
+	struct disk_child *t_dc = B_N_CHILD (bi->bi_parent, bi->bi_position);
+	put_dc_size( t_dc, dc_size(t_dc) + paste_size );
+	do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+    }
+}
+
+
+/* cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
+   does not have free space, so it moves DEHs and remaining records as
+   necessary. Return value is size of removed part of directory item
+   in bytes. */
+static int	leaf_cut_entries (
+				struct buffer_head * bh,
+				struct item_head * ih, 
+				int from, 
+				int del_count
+			)
+{
+  char * item;
+  struct reiserfs_de_head * deh;
+  int prev_record_offset;	/* offset of record, that is (from-1)th */
+  char * prev_record;		/* */
+  int cut_records_len;		/* length of all removed records */
+  int i;
+
+
+  /* make sure, that item is directory and there are enough entries to
+     remove */
+  RFALSE( !is_direntry_le_ih (ih), "10180: item is not directory item");
+  RFALSE( I_ENTRY_COUNT(ih) < from + del_count,
+	  "10185: item contains not enough entries: entry_cout = %d, from = %d, to delete = %d",
+	  I_ENTRY_COUNT(ih), from, del_count);
+
+  if (del_count == 0)
+    return 0;
+
+  /* first byte of item */
+  item = bh->b_data + ih_location(ih);
+
+  /* entry head array */
+  deh = B_I_DEH (bh, ih);
+
+  /* first byte of remaining entries, those are BEFORE cut entries
+     (prev_record) and length of all removed records (cut_records_len) */
+  prev_record_offset = (from ? deh_location( &(deh[from - 1])) : ih_item_len(ih));
+  cut_records_len = prev_record_offset/*from_record*/ -
+                                deh_location( &(deh[from + del_count - 1]));
+  prev_record = item + prev_record_offset;
+
+
+  /* adjust locations of remaining entries */
+  for (i = I_ENTRY_COUNT(ih) - 1; i > from + del_count - 1; i --)
+    put_deh_location( &(deh[i]),
+                        deh_location( &deh[i] ) - (DEH_SIZE * del_count ) );
+
+  for (i = 0; i < from; i ++)
+    put_deh_location( &(deh[i]),
+        deh_location( &deh[i] ) - (DEH_SIZE * del_count + cut_records_len) );
+
+  put_ih_entry_count( ih, ih_entry_count(ih) - del_count );
+
+  /* shift entry head array and entries those are AFTER removed entries */
+  memmove ((char *)(deh + from),
+	   deh + from + del_count, 
+	   prev_record - cut_records_len - (char *)(deh + from + del_count));
+  
+  /* shift records, those are BEFORE removed entries */
+  memmove (prev_record - cut_records_len - DEH_SIZE * del_count,
+	   prev_record, item + ih_item_len(ih) - prev_record);
+
+  return DEH_SIZE * del_count + cut_records_len;
+}
+
+
+/*  when cut item is part of regular file
+        pos_in_item - first byte that must be cut
+        cut_size - number of bytes to be cut beginning from pos_in_item
+ 
+   when cut item is part of directory
+        pos_in_item - number of first deleted entry
+        cut_size - count of deleted entries
+    */
+void leaf_cut_from_buffer (struct buffer_info * bi, int cut_item_num,
+			   int pos_in_item, int cut_size)
+{
+    int nr;
+    struct buffer_head * bh = bi->bi_bh;
+    struct block_head * blkh;
+    struct item_head * ih;
+    int last_loc, unmoved_loc;
+    int i;
+
+    blkh = B_BLK_HEAD(bh);
+    nr = blkh_nr_item(blkh);
+
+    /* item head of truncated item */
+    ih = B_N_PITEM_HEAD (bh, cut_item_num);
+
+    if (is_direntry_le_ih (ih)) {
+        /* first cut entry ()*/
+        cut_size = leaf_cut_entries (bh, ih, pos_in_item, cut_size);
+        if (pos_in_item == 0) {
+	        /* change key */
+            RFALSE( cut_item_num,
+                    "when 0-th enrty of item is cut, that item must be first in the node, not %d-th", cut_item_num);
+            /* change item key by key of first entry in the item */
+	    set_le_ih_k_offset (ih, deh_offset(B_I_DEH (bh, ih)));
+            /*memcpy (&ih->ih_key.k_offset, &(B_I_DEH (bh, ih)->deh_offset), SHORT_KEY_SIZE);*/
+	    }
+    } else {
+        /* item is direct or indirect */
+        RFALSE( is_statdata_le_ih (ih), "10195: item is stat data");
+        RFALSE( pos_in_item && pos_in_item + cut_size != ih_item_len(ih),
+                "10200: invalid offset (%lu) or trunc_size (%lu) or ih_item_len (%lu)",
+                ( long unsigned ) pos_in_item, ( long unsigned ) cut_size, 
+		( long unsigned ) ih_item_len (ih));
+
+        /* shift item body to left if cut is from the head of item */
+        if (pos_in_item == 0) {
+            memmove( bh->b_data + ih_location(ih),
+		     bh->b_data + ih_location(ih) + cut_size,
+		     ih_item_len(ih) - cut_size);
+	    
+            /* change key of item */
+            if (is_direct_le_ih (ih))
+		set_le_ih_k_offset (ih, le_ih_k_offset (ih) + cut_size);
+            else {
+		set_le_ih_k_offset (ih, le_ih_k_offset (ih) + (cut_size / UNFM_P_SIZE) * bh->b_size);
+                RFALSE( ih_item_len(ih) == cut_size && get_ih_free_space (ih),
+                        "10205: invalid ih_free_space (%h)", ih);
+	        }
+	    }
+    }
+  
+
+    /* location of the last item */
+    last_loc = ih_location( &(ih[nr - cut_item_num - 1]) );
+
+    /* location of the item, which is remaining at the same place */
+    unmoved_loc = cut_item_num ? ih_location(ih-1) : bh->b_size;
+
+
+    /* shift */
+    memmove (bh->b_data + last_loc + cut_size, bh->b_data + last_loc,
+	       unmoved_loc - last_loc - cut_size);
+
+    /* change item length */
+    put_ih_item_len( ih, ih_item_len(ih) - cut_size );
+  
+    if (is_indirect_le_ih (ih)) {
+        if (pos_in_item)
+            set_ih_free_space (ih, 0);
+    }
+
+    /* change locations */
+    for (i = cut_item_num; i < nr; i ++)
+    put_ih_location( &(ih[i-cut_item_num]), ih_location( &ih[i-cut_item_num]) + cut_size );
+
+    /* size, free space */
+    set_blkh_free_space( blkh, blkh_free_space(blkh) + cut_size );
+
+    do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+    
+    if (bi->bi_parent) { 
+      struct disk_child *t_dc;
+      t_dc = B_N_CHILD (bi->bi_parent, bi->bi_position);
+      put_dc_size( t_dc, dc_size(t_dc) - cut_size );
+      do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+    }
+}
+
+
+/* delete del_num items from buffer starting from the first'th item */
+static void leaf_delete_items_entirely (struct buffer_info * bi,
+					int first, int del_num)
+{
+    struct buffer_head * bh = bi->bi_bh;
+    int nr;
+    int i, j;
+    int last_loc, last_removed_loc;
+    struct block_head * blkh;
+    struct item_head * ih;
+
+  RFALSE( bh == NULL, "10210: buffer is 0");
+  RFALSE( del_num < 0, "10215: del_num less than 0 (%d)", del_num);
+
+  if (del_num == 0)
+    return;
+
+  blkh = B_BLK_HEAD(bh);
+  nr = blkh_nr_item(blkh);
+
+  RFALSE( first < 0 || first + del_num > nr,
+          "10220: first=%d, number=%d, there is %d items", first, del_num, nr);
+
+  if (first == 0 && del_num == nr) {
+    /* this does not work */
+    make_empty_node (bi);
+    
+    do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+    return;
+  }
+
+  ih = B_N_PITEM_HEAD (bh, first);
+  
+  /* location of unmovable item */
+  j = (first == 0) ? bh->b_size : ih_location(ih-1);
+      
+  /* delete items */
+  last_loc = ih_location( &(ih[nr-1-first]) );
+  last_removed_loc = ih_location( &(ih[del_num-1]) );
+
+  memmove (bh->b_data + last_loc + j - last_removed_loc,
+	   bh->b_data + last_loc, last_removed_loc - last_loc);
+  
+  /* delete item headers */
+  memmove (ih, ih + del_num, (nr - first - del_num) * IH_SIZE);
+  
+  /* change item location */
+  for (i = first; i < nr - del_num; i ++)
+    put_ih_location( &(ih[i-first]), ih_location( &(ih[i-first]) ) + (j - last_removed_loc) );
+
+  /* sizes, item number */
+  set_blkh_nr_item( blkh, blkh_nr_item(blkh) - del_num );
+  set_blkh_free_space( blkh, blkh_free_space(blkh) + (j - last_removed_loc + IH_SIZE * del_num) );
+
+  do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+  
+  if (bi->bi_parent) {
+    struct disk_child *t_dc = B_N_CHILD (bi->bi_parent, bi->bi_position);
+    put_dc_size( t_dc, dc_size(t_dc) -
+				(j - last_removed_loc + IH_SIZE * del_num));
+    do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+  }
+}
+
+
+
+
+
+/* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
+void    leaf_paste_entries (
+			struct buffer_head * bh,
+			int item_num,
+			int before,
+			int new_entry_count,
+			struct reiserfs_de_head * new_dehs,
+			const char * records,
+			int paste_size
+		)
+{
+    struct item_head * ih;
+    char * item;
+    struct reiserfs_de_head * deh;
+    char * insert_point;
+    int i, old_entry_num;
+
+    if (new_entry_count == 0)
+        return;
+
+    ih = B_N_PITEM_HEAD(bh, item_num);
+
+  /* make sure, that item is directory, and there are enough records in it */
+  RFALSE( !is_direntry_le_ih (ih), "10225: item is not directory item");
+  RFALSE( I_ENTRY_COUNT (ih) < before,
+	  "10230: there are no entry we paste entries before. entry_count = %d, before = %d",
+	  I_ENTRY_COUNT (ih), before);
+
+
+  /* first byte of dest item */
+  item = bh->b_data + ih_location(ih);
+
+  /* entry head array */
+  deh = B_I_DEH (bh, ih);
+
+  /* new records will be pasted at this point */
+  insert_point = item + (before ? deh_location( &(deh[before - 1])) : (ih_item_len(ih) - paste_size));
+
+  /* adjust locations of records that will be AFTER new records */
+  for (i = I_ENTRY_COUNT(ih) - 1; i >= before; i --)
+    put_deh_location( &(deh[i]),
+                deh_location(&(deh[i])) + (DEH_SIZE * new_entry_count )); 
+
+  /* adjust locations of records that will be BEFORE new records */
+  for (i = 0; i < before; i ++)
+    put_deh_location( &(deh[i]), deh_location(&(deh[i])) + paste_size );
+
+  old_entry_num = I_ENTRY_COUNT(ih);
+  put_ih_entry_count( ih, ih_entry_count(ih) + new_entry_count );
+
+  /* prepare space for pasted records */
+  memmove (insert_point + paste_size, insert_point, item + (ih_item_len(ih) - paste_size) - insert_point);
+
+  /* copy new records */
+  memcpy (insert_point + DEH_SIZE * new_entry_count, records,
+		   paste_size - DEH_SIZE * new_entry_count);
+  
+  /* prepare space for new entry heads */
+  deh += before;
+  memmove ((char *)(deh + new_entry_count), deh, insert_point - (char *)deh);
+
+  /* copy new entry heads */
+  deh = (struct reiserfs_de_head *)((char *)deh);
+  memcpy (deh, new_dehs, DEH_SIZE * new_entry_count);
+
+  /* set locations of new records */
+  for (i = 0; i < new_entry_count; i ++)
+  {
+    put_deh_location( &(deh[i]),
+        deh_location( &(deh[i] )) +
+        (- deh_location( &(new_dehs[new_entry_count - 1])) +
+        insert_point + DEH_SIZE * new_entry_count - item));
+  }
+
+
+  /* change item key if necessary (when we paste before 0-th entry */
+  if (!before)
+    {
+	set_le_ih_k_offset (ih, deh_offset(new_dehs));
+/*      memcpy (&ih->ih_key.k_offset, 
+		       &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
+    }
+
+#ifdef CONFIG_REISERFS_CHECK
+  {
+    int prev, next;
+    /* check record locations */
+    deh = B_I_DEH (bh, ih);
+    for (i = 0; i < I_ENTRY_COUNT(ih); i ++) {
+      next = (i < I_ENTRY_COUNT(ih) - 1) ? deh_location( &(deh[i + 1])) : 0;
+      prev = (i != 0) ? deh_location( &(deh[i - 1]) ) : 0;
+      
+      if (prev && prev <= deh_location( &(deh[i])))
+	reiserfs_warning (NULL, "vs-10240: leaf_paste_entries: directory item (%h) corrupted (prev %a, cur(%d) %a)",
+			  ih, deh + i - 1, i, deh + i);
+      if (next && next >= deh_location( &(deh[i])))
+	reiserfs_warning (NULL, "vs-10250: leaf_paste_entries: directory item (%h) corrupted (cur(%d) %a, next %a)",
+			  ih, i, deh + i, deh + i + 1);
+    }
+  }
+#endif
+
+}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
new file mode 100644
index 0000000..80e92d9
--- /dev/null
+++ b/fs/reiserfs/namei.c
@@ -0,0 +1,1491 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ *
+ * Trivial changes by Alan Cox to remove EHASHCOLLISION for compatibility
+ *
+ * Trivial Changes:
+ * Rights granted to Hans Reiser to redistribute under other terms providing
+ * he accepts all liability including but not limited to patent, fitness
+ * for purpose, and direct or indirect claims arising from failure to perform.
+ *
+ * NO WARRANTY
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/bitops.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/smp_lock.h>
+#include <linux/quotaops.h>
+
+#define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { i->i_nlink++; if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
+#define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) i->i_nlink--;
+
+// directory item contains array of entry headers. This performs
+// binary search through that array
+static int bin_search_in_dir_item (struct reiserfs_dir_entry * de, loff_t off)
+{
+    struct item_head * ih = de->de_ih;
+    struct reiserfs_de_head * deh = de->de_deh;
+    int rbound, lbound, j;
+
+    lbound = 0;
+    rbound = I_ENTRY_COUNT (ih) - 1;
+
+    for (j = (rbound + lbound) / 2; lbound <= rbound; j = (rbound + lbound) / 2) {
+	if (off < deh_offset (deh + j)) {
+	    rbound = j - 1;
+	    continue;
+	}
+	if (off > deh_offset (deh + j)) {
+	    lbound = j + 1;
+	    continue;
+	}
+	// this is not name found, but matched third key component
+	de->de_entry_num = j;
+	return NAME_FOUND;
+    }
+
+    de->de_entry_num = lbound;
+    return NAME_NOT_FOUND;
+}
+
+
+// comment?  maybe something like set de to point to what the path points to?
+static inline void set_de_item_location (struct reiserfs_dir_entry * de, struct path * path)
+{
+    de->de_bh = get_last_bh (path);
+    de->de_ih = get_ih (path);
+    de->de_deh = B_I_DEH (de->de_bh, de->de_ih);
+    de->de_item_num = PATH_LAST_POSITION (path);
+} 
+
+
+// de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+inline void set_de_name_and_namelen (struct reiserfs_dir_entry * de)
+{
+    struct reiserfs_de_head * deh = de->de_deh + de->de_entry_num;
+
+    if (de->de_entry_num >= ih_entry_count (de->de_ih))
+	BUG ();
+
+    de->de_entrylen = entry_length (de->de_bh, de->de_ih, de->de_entry_num);
+    de->de_namelen = de->de_entrylen - (de_with_sd (deh) ? SD_SIZE : 0);
+    de->de_name = B_I_PITEM (de->de_bh, de->de_ih) + deh_location(deh);
+    if (de->de_name[de->de_namelen - 1] == 0)
+	de->de_namelen = strlen (de->de_name);
+}
+
+
+// what entry points to
+static inline void set_de_object_key (struct reiserfs_dir_entry * de)
+{
+    if (de->de_entry_num >= ih_entry_count (de->de_ih))
+	BUG ();
+    de->de_dir_id = deh_dir_id( &(de->de_deh[de->de_entry_num]));
+    de->de_objectid = deh_objectid( &(de->de_deh[de->de_entry_num]));
+}
+
+
+static inline void store_de_entry_key (struct reiserfs_dir_entry * de)
+{
+    struct reiserfs_de_head * deh = de->de_deh + de->de_entry_num;
+
+    if (de->de_entry_num >= ih_entry_count (de->de_ih))
+	BUG ();
+
+    /* store key of the found entry */
+    de->de_entry_key.version = KEY_FORMAT_3_5;
+    de->de_entry_key.on_disk_key.k_dir_id = le32_to_cpu (de->de_ih->ih_key.k_dir_id);
+    de->de_entry_key.on_disk_key.k_objectid = le32_to_cpu (de->de_ih->ih_key.k_objectid);
+    set_cpu_key_k_offset (&(de->de_entry_key), deh_offset (deh));
+    set_cpu_key_k_type (&(de->de_entry_key), TYPE_DIRENTRY);
+}
+
+
+/* We assign a key to each directory item, and place multiple entries
+in a single directory item.  A directory item has a key equal to the
+key of the first directory entry in it.
+
+This function first calls search_by_key, then, if item whose first
+entry matches is not found it looks for the entry inside directory
+item found by search_by_key. Fills the path to the entry, and to the
+entry position in the item 
+
+*/
+
+/* The function is NOT SCHEDULE-SAFE! */
+int search_by_entry_key (struct super_block * sb, const struct cpu_key * key,
+			 struct path * path, struct reiserfs_dir_entry * de)
+{
+    int retval;
+
+    retval = search_item (sb, key, path);
+    switch (retval) {
+    case ITEM_NOT_FOUND:
+	if (!PATH_LAST_POSITION (path)) {
+	    reiserfs_warning (sb, "vs-7000: search_by_entry_key: search_by_key returned item position == 0");
+	    pathrelse(path) ;
+	    return IO_ERROR ;
+	}
+	PATH_LAST_POSITION (path) --;
+
+    case ITEM_FOUND:
+	break;
+
+    case IO_ERROR:
+	return retval;
+
+    default:
+	pathrelse (path);
+	reiserfs_warning (sb, "vs-7002: search_by_entry_key: no path to here");
+	return IO_ERROR;
+    }
+
+    set_de_item_location (de, path);
+
+#ifdef CONFIG_REISERFS_CHECK
+    if (!is_direntry_le_ih (de->de_ih) || 
+	COMP_SHORT_KEYS (&(de->de_ih->ih_key), key)) {
+	print_block (de->de_bh, 0, -1, -1);
+	reiserfs_panic (sb, "vs-7005: search_by_entry_key: found item %h is not directory item or "
+                        "does not belong to the same directory as key %K", de->de_ih, key);
+    }
+#endif /* CONFIG_REISERFS_CHECK */
+
+    /* binary search in directory item by third componen t of the
+       key. sets de->de_entry_num of de */
+    retval = bin_search_in_dir_item (de, cpu_key_k_offset (key));
+    path->pos_in_item = de->de_entry_num;
+    if (retval != NAME_NOT_FOUND) {
+	// ugly, but rename needs de_bh, de_deh, de_name, de_namelen, de_objectid set
+	set_de_name_and_namelen (de);
+	set_de_object_key (de);
+    }
+    return retval;
+}
+
+
+
+/* Keyed 32-bit hash function using TEA in a Davis-Meyer function */
+
+/* The third component is hashed, and you can choose from more than
+   one hash function.  Per directory hashes are not yet implemented
+   but are thought about. This function should be moved to hashes.c
+   Jedi, please do so.  -Hans */
+
+static __u32 get_third_component (struct super_block * s, 
+				  const char * name, int len)
+{
+    __u32 res;
+
+    if (!len || (len == 1 && name[0] == '.'))
+	return DOT_OFFSET;
+    if (len == 2 && name[0] == '.' && name[1] == '.')
+	return DOT_DOT_OFFSET;
+
+    res = REISERFS_SB(s)->s_hash_function (name, len);
+
+    // take bits from 7-th to 30-th including both bounds
+    res = GET_HASH_VALUE(res);
+    if (res == 0)
+	// needed to have no names before "." and ".." those have hash
+	// value == 0 and generation conters 1 and 2 accordingly
+	res = 128;
+    return res + MAX_GENERATION_NUMBER;
+}
+
+
+static int reiserfs_match (struct reiserfs_dir_entry * de, 
+			   const char * name, int namelen)
+{
+    int retval = NAME_NOT_FOUND;
+
+    if ((namelen == de->de_namelen) &&
+	!memcmp(de->de_name, name, de->de_namelen))
+	retval = (de_visible (de->de_deh + de->de_entry_num) ? NAME_FOUND : NAME_FOUND_INVISIBLE);
+
+    return retval;
+}
+
+
+/* de's de_bh, de_ih, de_deh, de_item_num, de_entry_num are set already */
+
+				/* used when hash collisions exist */
+
+
+static int linear_search_in_dir_item (struct cpu_key * key, struct reiserfs_dir_entry * de,
+				      const char * name, int namelen)
+{
+    struct reiserfs_de_head * deh = de->de_deh;
+    int retval;
+    int i;
+
+    i = de->de_entry_num;
+
+    if (i == I_ENTRY_COUNT (de->de_ih) ||
+	GET_HASH_VALUE (deh_offset (deh + i)) != GET_HASH_VALUE (cpu_key_k_offset (key))) {
+	i --;
+    }
+
+    RFALSE( de->de_deh != B_I_DEH (de->de_bh, de->de_ih),
+	    "vs-7010: array of entry headers not found");
+
+    deh += i;
+
+    for (; i >= 0; i --, deh --) {
+	if (GET_HASH_VALUE (deh_offset (deh)) !=
+	    GET_HASH_VALUE (cpu_key_k_offset (key))) {
+	    // hash value does not match, no need to check whole name
+	    return NAME_NOT_FOUND;
+	}
+   
+	/* mark, that this generation number is used */
+	if (de->de_gen_number_bit_string)
+	    set_bit (GET_GENERATION_NUMBER (deh_offset (deh)), (unsigned long *)de->de_gen_number_bit_string);
+
+	// calculate pointer to name and namelen
+	de->de_entry_num = i;
+	set_de_name_and_namelen (de);
+
+	if ((retval = reiserfs_match (de, name, namelen)) != NAME_NOT_FOUND) {
+	    // de's de_name, de_namelen, de_recordlen are set. Fill the rest:
+
+	    // key of pointed object
+	    set_de_object_key (de);
+
+	    store_de_entry_key (de);
+
+	    // retval can be NAME_FOUND or NAME_FOUND_INVISIBLE
+	    return retval;
+	}
+    }
+
+    if (GET_GENERATION_NUMBER (le_ih_k_offset (de->de_ih)) == 0)
+	/* we have reached left most entry in the node. In common we
+           have to go to the left neighbor, but if generation counter
+           is 0 already, we know for sure, that there is no name with
+           the same hash value */
+	// FIXME: this work correctly only because hash value can not
+	// be 0. Btw, in case of Yura's hash it is probably possible,
+	// so, this is a bug
+	return NAME_NOT_FOUND;
+
+    RFALSE( de->de_item_num,
+	    "vs-7015: two diritems of the same directory in one node?");
+
+    return GOTO_PREVIOUS_ITEM;
+}
+
+
+// may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
+// FIXME: should add something like IOERROR
+static int reiserfs_find_entry (struct inode * dir, const char * name, int namelen, 
+				struct path * path_to_entry, struct reiserfs_dir_entry * de)
+{
+    struct cpu_key key_to_search;
+    int retval;
+
+
+    if (namelen > REISERFS_MAX_NAME (dir->i_sb->s_blocksize))
+	return NAME_NOT_FOUND;
+
+    /* we will search for this key in the tree */
+    make_cpu_key (&key_to_search, dir, 
+		  get_third_component (dir->i_sb, name, namelen), TYPE_DIRENTRY, 3);
+
+    while (1) {
+	retval = search_by_entry_key (dir->i_sb, &key_to_search, path_to_entry, de);
+	if (retval == IO_ERROR) {
+	    reiserfs_warning (dir->i_sb, "zam-7001: io error in %s",
+			      __FUNCTION__);
+	    return IO_ERROR;
+	}
+
+	/* compare names for all entries having given hash value */
+	retval = linear_search_in_dir_item (&key_to_search, de, name, namelen);
+	if (retval != GOTO_PREVIOUS_ITEM) {
+	    /* there is no need to scan directory anymore. Given entry found or does not exist */
+	    path_to_entry->pos_in_item = de->de_entry_num;
+	    return retval;
+	}
+
+	/* there is left neighboring item of this directory and given entry can be there */
+	set_cpu_key_k_offset (&key_to_search, le_ih_k_offset (de->de_ih) - 1);
+	pathrelse (path_to_entry);
+
+    } /* while (1) */
+}
+
+
+static struct dentry * reiserfs_lookup (struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+    int retval;
+    struct inode * inode = NULL;
+    struct reiserfs_dir_entry de;
+    INITIALIZE_PATH (path_to_entry);
+
+    if (REISERFS_MAX_NAME (dir->i_sb->s_blocksize) < dentry->d_name.len)
+	return ERR_PTR(-ENAMETOOLONG);
+
+    reiserfs_write_lock(dir->i_sb);
+    de.de_gen_number_bit_string = NULL;
+    retval = reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path_to_entry, &de);
+    pathrelse (&path_to_entry);
+    if (retval == NAME_FOUND) {
+        /* Hide the .reiserfs_priv directory */
+	if (reiserfs_xattrs (dir->i_sb) &&
+	    !old_format_only(dir->i_sb) &&
+            REISERFS_SB(dir->i_sb)->priv_root &&
+            REISERFS_SB(dir->i_sb)->priv_root->d_inode &&
+	    de.de_objectid == le32_to_cpu (INODE_PKEY(REISERFS_SB(dir->i_sb)->priv_root->d_inode)->k_objectid)) {
+	  reiserfs_write_unlock (dir->i_sb);
+	  return ERR_PTR (-EACCES);
+	}
+
+	inode = reiserfs_iget (dir->i_sb, (struct cpu_key *)&(de.de_dir_id));
+	if (!inode || IS_ERR(inode)) {
+	    reiserfs_write_unlock(dir->i_sb);
+	    return ERR_PTR(-EACCES);
+        }
+
+	/* Propogate the priv_object flag so we know we're in the priv tree */
+	if (is_reiserfs_priv_object (dir))
+	    reiserfs_mark_inode_private (inode);
+    }
+    reiserfs_write_unlock(dir->i_sb);
+    if ( retval == IO_ERROR ) {
+	return ERR_PTR(-EIO);
+    }
+
+    if (inode)
+	    return d_splice_alias(inode, dentry);
+    
+    d_add(dentry, inode);
+    return NULL;
+}
+
+
+/* 
+** looks up the dentry of the parent directory for child.
+** taken from ext2_get_parent
+*/
+struct dentry *reiserfs_get_parent(struct dentry *child)
+{
+    int retval;
+    struct inode * inode = NULL;
+    struct reiserfs_dir_entry de;
+    INITIALIZE_PATH (path_to_entry);
+    struct dentry *parent;
+    struct inode *dir = child->d_inode ;
+
+
+    if (dir->i_nlink == 0) {
+	return ERR_PTR(-ENOENT);
+    }
+    de.de_gen_number_bit_string = NULL;
+
+    reiserfs_write_lock(dir->i_sb);
+    retval = reiserfs_find_entry (dir, "..", 2, &path_to_entry, &de);
+    pathrelse (&path_to_entry);
+    if (retval != NAME_FOUND) {
+	reiserfs_write_unlock(dir->i_sb);
+	return ERR_PTR(-ENOENT);
+    }
+    inode = reiserfs_iget (dir->i_sb, (struct cpu_key *)&(de.de_dir_id));
+    reiserfs_write_unlock(dir->i_sb);
+
+    if (!inode || IS_ERR(inode)) {
+	return ERR_PTR(-EACCES);
+    }
+    parent = d_alloc_anon(inode);
+    if (!parent) {
+	iput(inode);
+	parent = ERR_PTR(-ENOMEM);
+    }
+    return parent;
+}
+
+
+/* add entry to the directory (entry can be hidden). 
+
+insert definition of when hidden directories are used here -Hans
+
+ Does not mark dir   inode dirty, do it after successesfull call to it */
+
+static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct inode * dir,
+                               const char * name, int namelen, struct inode * inode,
+			       int visible)
+{
+    struct cpu_key entry_key;
+    struct reiserfs_de_head * deh;
+    INITIALIZE_PATH (path);
+    struct reiserfs_dir_entry de;
+    int bit_string [MAX_GENERATION_NUMBER / (sizeof(int) * 8) + 1];
+    int gen_number;
+    char small_buf[32+DEH_SIZE] ; /* 48 bytes now and we avoid kmalloc
+                                     if we create file with short name */
+    char * buffer;
+    int buflen, paste_size;
+    int retval;
+
+    BUG_ON (!th->t_trans_id);
+
+    /* cannot allow items to be added into a busy deleted directory */
+    if (!namelen)
+	return -EINVAL;
+
+    if (namelen > REISERFS_MAX_NAME (dir->i_sb->s_blocksize))
+	return -ENAMETOOLONG;
+
+    /* each entry has unique key. compose it */
+    make_cpu_key (&entry_key, dir, 
+		  get_third_component (dir->i_sb, name, namelen), TYPE_DIRENTRY, 3);
+
+    /* get memory for composing the entry */
+    buflen = DEH_SIZE + ROUND_UP (namelen);
+    if (buflen > sizeof (small_buf)) {
+	buffer = reiserfs_kmalloc (buflen, GFP_NOFS, dir->i_sb);
+	if (buffer == 0)
+	    return -ENOMEM;
+    } else
+	buffer = small_buf;
+
+    paste_size = (get_inode_sd_version (dir) == STAT_DATA_V1) ? (DEH_SIZE + namelen) : buflen;
+
+    /* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */
+    deh = (struct reiserfs_de_head *)buffer;
+    deh->deh_location = 0; /* JDM Endian safe if 0 */
+    put_deh_offset( deh, cpu_key_k_offset( &entry_key ) );
+    deh->deh_state = 0; /* JDM Endian safe if 0 */
+    /* put key (ino analog) to de */
+    deh->deh_dir_id = INODE_PKEY (inode)->k_dir_id; /* safe: k_dir_id is le */
+    deh->deh_objectid = INODE_PKEY (inode)->k_objectid; /* safe: k_objectid is le */
+
+    /* copy name */
+    memcpy ((char *)(deh + 1), name, namelen);
+    /* padd by 0s to the 4 byte boundary */
+    padd_item ((char *)(deh + 1), ROUND_UP (namelen), namelen);
+
+    /* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */
+    mark_de_without_sd (deh);
+    visible ? mark_de_visible (deh) : mark_de_hidden (deh);
+
+    /* find the proper place for the new entry */
+    memset (bit_string, 0, sizeof (bit_string));
+    de.de_gen_number_bit_string = (char *)bit_string;
+    retval = reiserfs_find_entry (dir, name, namelen, &path, &de);
+    if( retval != NAME_NOT_FOUND ) {
+	if (buffer != small_buf)
+	    reiserfs_kfree (buffer, buflen, dir->i_sb);
+	pathrelse (&path);
+
+	if ( retval == IO_ERROR ) {
+	    return -EIO;
+	}
+
+        if (retval != NAME_FOUND) {
+	    reiserfs_warning (dir->i_sb, "zam-7002:%s: \"reiserfs_find_entry\" "
+			      "has returned unexpected value (%d)",
+			      __FUNCTION__, retval);
+       }
+
+	return -EEXIST;
+    }
+
+    gen_number = find_first_zero_bit ((unsigned long *)bit_string, MAX_GENERATION_NUMBER + 1);
+    if (gen_number > MAX_GENERATION_NUMBER) {
+      /* there is no free generation number */
+      reiserfs_warning (dir->i_sb, "reiserfs_add_entry: Congratulations! we have got hash function screwed up");
+      if (buffer != small_buf)
+          reiserfs_kfree (buffer, buflen, dir->i_sb);
+      pathrelse (&path);
+      return -EBUSY;
+    }
+    /* adjust offset of directory enrty */
+    put_deh_offset(deh, SET_GENERATION_NUMBER(deh_offset(deh), gen_number));
+    set_cpu_key_k_offset (&entry_key, deh_offset(deh));
+ 
+    /* update max-hash-collisions counter in reiserfs_sb_info */
+    PROC_INFO_MAX( th -> t_super, max_hash_collisions, gen_number );
+ 		  
+    if (gen_number != 0) {	/* we need to re-search for the insertion point */
+      if (search_by_entry_key (dir->i_sb, &entry_key, &path, &de) != NAME_NOT_FOUND) {
+            reiserfs_warning (dir->i_sb, "vs-7032: reiserfs_add_entry: "
+                              "entry with this key (%K) already exists",
+                              &entry_key);
+
+	    if (buffer != small_buf)
+		reiserfs_kfree (buffer, buflen, dir->i_sb);
+	    pathrelse (&path);
+	    return -EBUSY;
+	}
+    }
+  
+    /* perform the insertion of the entry that we have prepared */
+    retval = reiserfs_paste_into_item (th, &path, &entry_key, dir, buffer, paste_size);
+    if (buffer != small_buf)
+	reiserfs_kfree (buffer, buflen, dir->i_sb);
+    if (retval) {
+	reiserfs_check_path(&path) ;
+	return retval;
+    }
+
+    dir->i_size += paste_size;
+    dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+    if (!S_ISDIR (inode->i_mode) && visible)
+	// reiserfs_mkdir or reiserfs_rename will do that by itself
+	reiserfs_update_sd (th, dir);
+
+    reiserfs_check_path(&path) ;
+    return 0;
+}
+
+/* quota utility function, call if you've had to abort after calling
+** new_inode_init, and have not called reiserfs_new_inode yet.
+** This should only be called on inodes that do not have stat data
+** inserted into the tree yet.
+*/
+static int drop_new_inode(struct inode *inode) {
+    DQUOT_DROP(inode);
+    make_bad_inode(inode) ;
+    inode->i_flags |= S_NOQUOTA;
+    iput(inode) ;
+    return 0 ;
+}
+
+/* utility function that does setup for reiserfs_new_inode.  
+** DQUOT_INIT needs lots of credits so it's better to have it
+** outside of a transaction, so we had to pull some bits of
+** reiserfs_new_inode out into this func.
+*/
+static int new_inode_init(struct inode *inode, struct inode *dir, int mode) {
+
+    /* the quota init calls have to know who to charge the quota to, so
+    ** we have to set uid and gid here
+    */
+    inode->i_uid = current->fsuid;
+    inode->i_mode = mode;
+
+    if (dir->i_mode & S_ISGID) {
+        inode->i_gid = dir->i_gid;
+        if (S_ISDIR(mode))
+            inode->i_mode |= S_ISGID;
+    } else {
+        inode->i_gid = current->fsgid;
+    }
+    DQUOT_INIT(inode);
+    return 0 ;
+}
+
+static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+    int retval;
+    struct inode * inode;
+    /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+    struct reiserfs_transaction_handle th ;
+    int locked;
+
+    if (!(inode = new_inode(dir->i_sb))) {
+	return -ENOMEM ;
+    }
+    new_inode_init(inode, dir, mode);
+
+    locked = reiserfs_cache_default_acl (dir);
+
+    reiserfs_write_lock(dir->i_sb);
+
+    if (locked)
+        reiserfs_write_lock_xattrs (dir->i_sb);
+
+    retval = journal_begin(&th, dir->i_sb, jbegin_count);
+    if (retval) {
+        drop_new_inode (inode);
+        goto out_failed;
+    }
+
+    retval = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode);
+    if (retval)
+        goto out_failed;
+	
+    if (locked) {
+        reiserfs_write_unlock_xattrs (dir->i_sb);
+        locked = 0;
+    }
+
+    inode->i_op = &reiserfs_file_inode_operations;
+    inode->i_fop = &reiserfs_file_operations;
+    inode->i_mapping->a_ops = &reiserfs_address_space_operations ;
+
+    retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len, 
+				inode, 1/*visible*/);
+    if (retval) {
+        int err;
+	inode->i_nlink--;
+	reiserfs_update_sd (&th, inode);
+	err = journal_end(&th, dir->i_sb, jbegin_count) ;
+        if (err)
+            retval = err;
+	iput (inode);
+	goto out_failed;
+    }
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    d_instantiate(dentry, inode);
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+
+out_failed:
+    if (locked)
+        reiserfs_write_unlock_xattrs (dir->i_sb);
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+}
+
+
+static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+    int retval;
+    struct inode * inode;
+    struct reiserfs_transaction_handle th ;
+    /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+    int locked;
+
+    if (!new_valid_dev(rdev))
+	return -EINVAL;
+
+    if (!(inode = new_inode(dir->i_sb))) {
+	return -ENOMEM ;
+    }
+    new_inode_init(inode, dir, mode);
+
+    locked = reiserfs_cache_default_acl (dir);
+
+    reiserfs_write_lock(dir->i_sb);
+
+    if (locked)
+        reiserfs_write_lock_xattrs (dir->i_sb);
+
+    retval = journal_begin(&th, dir->i_sb, jbegin_count) ;
+    if (retval) {
+        drop_new_inode (inode);
+        goto out_failed;
+    }
+
+    retval = reiserfs_new_inode (&th, dir, mode, NULL, 0/*i_size*/, dentry, inode);
+    if (retval) {
+        goto out_failed;
+    }
+
+    if (locked) {
+        reiserfs_write_unlock_xattrs (dir->i_sb);
+        locked = 0;
+    }
+
+
+    inode->i_op = &reiserfs_special_inode_operations;
+    init_special_inode(inode, inode->i_mode, rdev) ;
+
+    //FIXME: needed for block and char devices only
+    reiserfs_update_sd (&th, inode);
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len, 
+				 inode, 1/*visible*/);
+    if (retval) {
+        int err;
+	inode->i_nlink--;
+	reiserfs_update_sd (&th, inode);
+	err = journal_end(&th, dir->i_sb, jbegin_count) ;
+        if (err)
+	    retval = err;
+	iput (inode);
+	goto out_failed;
+    }
+
+    d_instantiate(dentry, inode);
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+
+out_failed:
+    if (locked)
+        reiserfs_write_unlock_xattrs (dir->i_sb);
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+}
+
+
+static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
+{
+    int retval;
+    struct inode * inode;
+    struct reiserfs_transaction_handle th ;
+    /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+    int locked;
+
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    /* set flag that new packing locality created and new blocks for the content     * of that directory are not displaced yet */
+    REISERFS_I(dir)->new_packing_locality = 1;
+#endif
+    mode = S_IFDIR | mode;
+    if (!(inode = new_inode(dir->i_sb))) {
+	return -ENOMEM ;
+    }
+    new_inode_init(inode, dir, mode);
+
+    locked = reiserfs_cache_default_acl (dir);
+
+    reiserfs_write_lock(dir->i_sb);
+    if (locked)
+        reiserfs_write_lock_xattrs (dir->i_sb);
+
+    retval = journal_begin(&th, dir->i_sb, jbegin_count) ;
+    if (retval) {
+        drop_new_inode (inode);
+        goto out_failed;
+    }
+
+
+    /* inc the link count now, so another writer doesn't overflow it while
+    ** we sleep later on.
+    */
+    INC_DIR_INODE_NLINK(dir)
+
+    retval = reiserfs_new_inode (&th, dir, mode, NULL/*symlink*/,
+				old_format_only (dir->i_sb) ? 
+				EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
+				dentry, inode);
+    if (retval) {
+	dir->i_nlink-- ;
+	goto out_failed;
+    }
+
+    if (locked) {
+	reiserfs_write_unlock_xattrs (dir->i_sb);
+	locked = 0;
+    }
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    inode->i_op = &reiserfs_dir_inode_operations;
+    inode->i_fop = &reiserfs_dir_operations;
+
+    // note, _this_ add_entry will not update dir's stat data
+    retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len, 
+				inode, 1/*visible*/);
+    if (retval) {
+	int err;
+	inode->i_nlink = 0;
+	DEC_DIR_INODE_NLINK(dir);
+	reiserfs_update_sd (&th, inode);
+	err = journal_end(&th, dir->i_sb, jbegin_count) ;
+	if (err)
+	    retval = err;
+	iput (inode);
+	goto out_failed;
+    }
+
+    // the above add_entry did not update dir's stat data
+    reiserfs_update_sd (&th, dir);
+
+    d_instantiate(dentry, inode);
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+out_failed:
+    if (locked)
+        reiserfs_write_unlock_xattrs (dir->i_sb);
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+}
+
+static inline int reiserfs_empty_dir(struct inode *inode) {
+    /* we can cheat because an old format dir cannot have
+    ** EMPTY_DIR_SIZE, and a new format dir cannot have
+    ** EMPTY_DIR_SIZE_V1.  So, if the inode is either size, 
+    ** regardless of disk format version, the directory is empty.
+    */
+    if (inode->i_size != EMPTY_DIR_SIZE &&
+        inode->i_size != EMPTY_DIR_SIZE_V1) {
+        return 0 ;
+    }
+    return 1 ;
+}
+
+static int reiserfs_rmdir (struct inode * dir, struct dentry *dentry)
+{
+    int retval, err;
+    struct inode * inode;
+    struct reiserfs_transaction_handle th ;
+    int jbegin_count; 
+    INITIALIZE_PATH (path);
+    struct reiserfs_dir_entry de;
+
+
+    /* we will be doing 2 balancings and update 2 stat data, we change quotas
+     * of the owner of the directory and of the owner of the parent directory */
+    jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+
+    reiserfs_write_lock(dir->i_sb);
+    retval = journal_begin(&th, dir->i_sb, jbegin_count) ;
+    if (retval)
+        goto out_rmdir;
+
+    de.de_gen_number_bit_string = NULL;
+    if ( (retval = reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path, &de)) == NAME_NOT_FOUND) {
+	retval = -ENOENT;
+	goto end_rmdir;
+    } else if ( retval == IO_ERROR) {
+	retval = -EIO;
+	goto end_rmdir;
+    }
+
+    inode = dentry->d_inode;
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    if (de.de_objectid != inode->i_ino) {
+	// FIXME: compare key of an object and a key found in the
+	// entry
+	retval = -EIO;
+	goto end_rmdir;
+    }
+    if (!reiserfs_empty_dir(inode)) {
+	retval = -ENOTEMPTY;
+	goto end_rmdir;
+    }
+
+    /* cut entry from dir directory */
+    retval = reiserfs_cut_from_item (&th, &path, &(de.de_entry_key), dir, 
+                                     NULL, /* page */ 
+				     0/*new file size - not used here*/);
+    if (retval < 0)
+	goto end_rmdir;
+
+    if ( inode->i_nlink != 2 && inode->i_nlink != 1 )
+	reiserfs_warning (inode->i_sb, "%s: empty directory has nlink "
+			  "!= 2 (%d)", __FUNCTION__, inode->i_nlink);
+
+    inode->i_nlink = 0;
+    inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+    reiserfs_update_sd (&th, inode);
+
+    DEC_DIR_INODE_NLINK(dir)
+    dir->i_size -= (DEH_SIZE + de.de_entrylen);
+    reiserfs_update_sd (&th, dir);
+
+    /* prevent empty directory from getting lost */
+    add_save_link (&th, inode, 0/* not truncate */);
+
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+    reiserfs_check_path(&path) ;
+out_rmdir:
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+	
+ end_rmdir:
+    /* we must release path, because we did not call
+       reiserfs_cut_from_item, or reiserfs_cut_from_item does not
+       release path if operation was not complete */
+    pathrelse (&path);
+    err = journal_end(&th, dir->i_sb, jbegin_count) ;
+    reiserfs_write_unlock(dir->i_sb);
+    return err ? err : retval;
+}
+
+static int reiserfs_unlink (struct inode * dir, struct dentry *dentry)
+{
+    int retval, err;
+    struct inode * inode;
+    struct reiserfs_dir_entry de;
+    INITIALIZE_PATH (path);
+    struct reiserfs_transaction_handle th ;
+    int jbegin_count;
+    unsigned long savelink;
+
+    inode = dentry->d_inode;
+
+    /* in this transaction we can be doing at max two balancings and update
+       two stat datas, we change quotas of the owner of the directory and of
+       the owner of the parent directory */
+    jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+
+    reiserfs_write_lock(dir->i_sb);
+    retval = journal_begin(&th, dir->i_sb, jbegin_count) ;
+    if (retval)
+        goto out_unlink;
+	
+    de.de_gen_number_bit_string = NULL;
+    if ( (retval = reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path, &de)) == NAME_NOT_FOUND) {
+	retval = -ENOENT;
+	goto end_unlink;
+    } else if (retval == IO_ERROR) {
+	retval = -EIO;
+	goto end_unlink;
+    }
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    if (de.de_objectid != inode->i_ino) {
+	// FIXME: compare key of an object and a key found in the
+	// entry
+	retval = -EIO;
+	goto end_unlink;
+    }
+  
+    if (!inode->i_nlink) {
+	reiserfs_warning (inode->i_sb, "%s: deleting nonexistent file "
+			  "(%s:%lu), %d", __FUNCTION__,
+			  reiserfs_bdevname (inode->i_sb), inode->i_ino,
+			  inode->i_nlink);
+	inode->i_nlink = 1;
+    }
+
+    inode->i_nlink--;
+
+    /*
+     * we schedule before doing the add_save_link call, save the link
+     * count so we don't race
+     */
+    savelink = inode->i_nlink;
+
+
+    retval = reiserfs_cut_from_item (&th, &path, &(de.de_entry_key), dir, NULL, 0);
+    if (retval < 0) {
+	inode->i_nlink++;
+	goto end_unlink;
+    }
+    inode->i_ctime = CURRENT_TIME_SEC;
+    reiserfs_update_sd (&th, inode);
+
+    dir->i_size -= (de.de_entrylen + DEH_SIZE);
+    dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+    reiserfs_update_sd (&th, dir);
+
+    if (!savelink)
+       /* prevent file from getting lost */
+       add_save_link (&th, inode, 0/* not truncate */);
+
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+    reiserfs_check_path(&path) ;
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+
+ end_unlink:
+    pathrelse (&path);
+    err = journal_end(&th, dir->i_sb, jbegin_count) ;
+    reiserfs_check_path(&path) ;
+    if (err)
+        retval = err;
+out_unlink:
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+}
+
+static int reiserfs_symlink (struct inode * parent_dir, 
+                            struct dentry * dentry, const char * symname)
+{
+    int retval;
+    struct inode * inode;
+    char * name;
+    int item_len;
+    struct reiserfs_transaction_handle th ;
+    int mode = S_IFLNK | S_IRWXUGO;
+    /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS+REISERFS_QUOTA_TRANS_BLOCKS);
+
+    if (!(inode = new_inode(parent_dir->i_sb))) {
+	return -ENOMEM ;
+    }
+    new_inode_init(inode, parent_dir, mode);
+
+    reiserfs_write_lock(parent_dir->i_sb);
+    item_len = ROUND_UP (strlen (symname));
+    if (item_len > MAX_DIRECT_ITEM_LEN (parent_dir->i_sb->s_blocksize)) {
+	retval =  -ENAMETOOLONG;
+	drop_new_inode(inode);
+	goto out_failed;
+    }
+  
+    name = reiserfs_kmalloc (item_len, GFP_NOFS, parent_dir->i_sb);
+    if (!name) {
+	drop_new_inode(inode);
+	retval =  -ENOMEM;
+	goto out_failed;
+    }
+    memcpy (name, symname, strlen (symname));
+    padd_item (name, item_len, strlen (symname));
+
+    /* We would inherit the default ACL here, but symlinks don't get ACLs */
+
+    retval = journal_begin(&th, parent_dir->i_sb, jbegin_count) ;
+    if (retval) {
+        drop_new_inode (inode);
+        reiserfs_kfree (name, item_len, parent_dir->i_sb);
+        goto out_failed;
+    }
+
+    retval = reiserfs_new_inode (&th, parent_dir, mode, name, strlen (symname), 
+                                 dentry, inode);
+    reiserfs_kfree (name, item_len, parent_dir->i_sb);
+    if (retval) { /* reiserfs_new_inode iputs for us */
+	goto out_failed;
+    }
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(parent_dir) ;
+
+    inode->i_op = &reiserfs_symlink_inode_operations;
+    inode->i_mapping->a_ops = &reiserfs_address_space_operations;
+
+    // must be sure this inode is written with this transaction
+    //
+    //reiserfs_update_sd (&th, inode, READ_BLOCKS);
+
+    retval = reiserfs_add_entry (&th, parent_dir, dentry->d_name.name, 
+                                 dentry->d_name.len, inode, 1/*visible*/);
+    if (retval) {
+	int err;
+	inode->i_nlink--;
+	reiserfs_update_sd (&th, inode);
+	err = journal_end(&th, parent_dir->i_sb, jbegin_count) ;
+	if (err)
+	    retval = err;
+	iput (inode);
+	goto out_failed;
+    }
+
+    d_instantiate(dentry, inode);
+    retval = journal_end(&th, parent_dir->i_sb, jbegin_count) ;
+out_failed:
+    reiserfs_write_unlock(parent_dir->i_sb);
+    return retval;
+}
+
+static int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry * dentry)
+{
+    int retval;
+    struct inode *inode = old_dentry->d_inode;
+    struct reiserfs_transaction_handle th ;
+    /* We need blocks for transaction + update of quotas for the owners of the directory */
+    int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * REISERFS_QUOTA_TRANS_BLOCKS;
+
+    reiserfs_write_lock(dir->i_sb);
+    if (inode->i_nlink >= REISERFS_LINK_MAX) {
+	//FIXME: sd_nlink is 32 bit for new files
+	reiserfs_write_unlock(dir->i_sb);
+	return -EMLINK;
+    }
+    if (inode->i_nlink == 0) {
+        reiserfs_write_unlock(dir->i_sb);
+        return -ENOENT;
+    }
+
+    /* inc before scheduling so reiserfs_unlink knows we are here */
+    inode->i_nlink++;
+
+    retval = journal_begin(&th, dir->i_sb, jbegin_count) ;
+    if (retval) {
+        inode->i_nlink--;
+        reiserfs_write_unlock (dir->i_sb);
+        return retval;
+    }
+
+    /* create new entry */
+    retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+				 inode, 1/*visible*/);
+
+    reiserfs_update_inode_transaction(inode) ;
+    reiserfs_update_inode_transaction(dir) ;
+
+    if (retval) {
+	int err;
+	inode->i_nlink--;
+	err = journal_end(&th, dir->i_sb, jbegin_count) ;
+	reiserfs_write_unlock(dir->i_sb);
+	return err ? err : retval;
+    }
+
+    inode->i_ctime = CURRENT_TIME_SEC;
+    reiserfs_update_sd (&th, inode);
+
+    atomic_inc(&inode->i_count) ;
+    d_instantiate(dentry, inode);
+    retval = journal_end(&th, dir->i_sb, jbegin_count) ;
+    reiserfs_write_unlock(dir->i_sb);
+    return retval;
+}
+
+
+// de contains information pointing to an entry which 
+static int de_still_valid (const char * name, int len, struct reiserfs_dir_entry * de)
+{
+    struct reiserfs_dir_entry tmp = *de;
+    
+    // recalculate pointer to name and name length
+    set_de_name_and_namelen (&tmp);
+    // FIXME: could check more
+    if (tmp.de_namelen != len || memcmp (name, de->de_name, len))
+	return 0;
+    return 1;
+}
+
+
+static int entry_points_to_object (const char * name, int len, struct reiserfs_dir_entry * de, struct inode * inode)
+{
+    if (!de_still_valid (name, len, de))
+	return 0;
+
+    if (inode) {
+	if (!de_visible (de->de_deh + de->de_entry_num))
+	    reiserfs_panic (NULL, "vs-7042: entry_points_to_object: entry must be visible");
+	return (de->de_objectid == inode->i_ino) ? 1 : 0;
+    }
+
+    /* this must be added hidden entry */
+    if (de_visible (de->de_deh + de->de_entry_num))
+	reiserfs_panic (NULL, "vs-7043: entry_points_to_object: entry must be visible");
+
+    return 1;
+}
+
+
+/* sets key of objectid the entry has to point to */
+static void set_ino_in_dir_entry (struct reiserfs_dir_entry * de, struct reiserfs_key * key)
+{
+    /* JDM These operations are endian safe - both are le */
+    de->de_deh[de->de_entry_num].deh_dir_id = key->k_dir_id;
+    de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
+}
+
+
+/* 
+ * process, that is going to call fix_nodes/do_balance must hold only
+ * one path. If it holds 2 or more, it can get into endless waiting in
+ * get_empty_nodes or its clones 
+ */
+static int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry,
+			    struct inode * new_dir, struct dentry *new_dentry)
+{
+    int retval;
+    INITIALIZE_PATH (old_entry_path);
+    INITIALIZE_PATH (new_entry_path);
+    INITIALIZE_PATH (dot_dot_entry_path);
+    struct item_head new_entry_ih, old_entry_ih, dot_dot_ih ;
+    struct reiserfs_dir_entry old_de, new_de, dot_dot_de;
+    struct inode * old_inode, * new_dentry_inode;
+    struct reiserfs_transaction_handle th ;
+    int jbegin_count ; 
+    umode_t old_inode_mode;
+    unsigned long savelink = 1;
+    struct timespec ctime;
+
+    /* three balancings: (1) old name removal, (2) new name insertion
+       and (3) maybe "save" link insertion
+       stat data updates: (1) old directory,
+       (2) new directory and (3) maybe old object stat data (when it is
+       directory) and (4) maybe stat data of object to which new entry
+       pointed initially and (5) maybe block containing ".." of
+       renamed directory
+       quota updates: two parent directories */
+    jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 5 + 4 * REISERFS_QUOTA_TRANS_BLOCKS;
+
+    old_inode = old_dentry->d_inode;
+    new_dentry_inode = new_dentry->d_inode;
+
+    // make sure, that oldname still exists and points to an object we
+    // are going to rename
+    old_de.de_gen_number_bit_string = NULL;
+    reiserfs_write_lock(old_dir->i_sb);
+    retval = reiserfs_find_entry (old_dir, old_dentry->d_name.name, old_dentry->d_name.len,
+				  &old_entry_path, &old_de);
+    pathrelse (&old_entry_path);
+    if (retval == IO_ERROR) {
+	reiserfs_write_unlock(old_dir->i_sb);
+	return -EIO;
+    }
+
+    if (retval != NAME_FOUND || old_de.de_objectid != old_inode->i_ino) {
+	reiserfs_write_unlock(old_dir->i_sb);
+	return -ENOENT;
+    }
+
+    old_inode_mode = old_inode->i_mode;
+    if (S_ISDIR(old_inode_mode)) {
+	// make sure, that directory being renamed has correct ".." 
+	// and that its new parent directory has not too many links
+	// already
+
+	if (new_dentry_inode) {
+	    if (!reiserfs_empty_dir(new_dentry_inode)) {
+		reiserfs_write_unlock(old_dir->i_sb);
+		return -ENOTEMPTY;
+	    }
+	}
+	
+	/* directory is renamed, its parent directory will be changed, 
+	** so find ".." entry 
+	*/
+	dot_dot_de.de_gen_number_bit_string = NULL;
+	retval = reiserfs_find_entry (old_inode, "..", 2, &dot_dot_entry_path, &dot_dot_de);
+	pathrelse (&dot_dot_entry_path);
+	if (retval != NAME_FOUND) {
+	    reiserfs_write_unlock(old_dir->i_sb);
+	    return -EIO;
+	}
+
+	/* inode number of .. must equal old_dir->i_ino */
+	if (dot_dot_de.de_objectid != old_dir->i_ino) {
+	    reiserfs_write_unlock(old_dir->i_sb);
+	    return -EIO;
+	}
+    }
+
+    retval = journal_begin(&th, old_dir->i_sb, jbegin_count) ;
+    if (retval) {
+        reiserfs_write_unlock (old_dir->i_sb);
+        return retval;
+    }
+
+    /* add new entry (or find the existing one) */
+    retval = reiserfs_add_entry (&th, new_dir, new_dentry->d_name.name, new_dentry->d_name.len, 
+				 old_inode, 0);
+    if (retval == -EEXIST) {
+	if (!new_dentry_inode) {
+	    reiserfs_panic (old_dir->i_sb,
+			    "vs-7050: new entry is found, new inode == 0\n");
+	}
+    } else if (retval) {
+	int err = journal_end(&th, old_dir->i_sb, jbegin_count) ;
+	reiserfs_write_unlock(old_dir->i_sb);
+	return err ? err : retval;
+    }
+
+    reiserfs_update_inode_transaction(old_dir) ;
+    reiserfs_update_inode_transaction(new_dir) ;
+
+    /* this makes it so an fsync on an open fd for the old name will
+    ** commit the rename operation
+    */
+    reiserfs_update_inode_transaction(old_inode) ;
+
+    if (new_dentry_inode) 
+	reiserfs_update_inode_transaction(new_dentry_inode) ;
+
+    while (1) {
+	// look for old name using corresponding entry key (found by reiserfs_find_entry)
+	if ((retval = search_by_entry_key (new_dir->i_sb, &old_de.de_entry_key,
+					   &old_entry_path, &old_de)) != NAME_FOUND) {
+	    pathrelse(&old_entry_path);
+	    journal_end(&th, old_dir->i_sb, jbegin_count);
+	    reiserfs_write_unlock(old_dir->i_sb);
+	    return -EIO;
+	}
+
+	copy_item_head(&old_entry_ih, get_ih(&old_entry_path)) ;
+
+	reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1) ;
+
+	// look for new name by reiserfs_find_entry
+	new_de.de_gen_number_bit_string = NULL;
+	retval = reiserfs_find_entry (new_dir, new_dentry->d_name.name, new_dentry->d_name.len, 
+				      &new_entry_path, &new_de);
+	// reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from
+        // reiserfs_add_entry above, and we'll catch any i/o errors before we get here.
+	if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) {
+	    pathrelse(&new_entry_path);
+	    pathrelse(&old_entry_path);
+	    journal_end(&th, old_dir->i_sb, jbegin_count);
+	    reiserfs_write_unlock(old_dir->i_sb);
+	    return -EIO;
+	}
+
+	copy_item_head(&new_entry_ih, get_ih(&new_entry_path)) ;
+
+	reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1) ;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+	    if ((retval = search_by_entry_key (new_dir->i_sb, &dot_dot_de.de_entry_key,
+					       &dot_dot_entry_path, &dot_dot_de)) != NAME_FOUND) {
+		pathrelse(&dot_dot_entry_path);
+		pathrelse(&new_entry_path);
+		pathrelse(&old_entry_path);
+		journal_end(&th, old_dir->i_sb, jbegin_count);
+		reiserfs_write_unlock(old_dir->i_sb);
+		return -EIO;
+	    }
+	    copy_item_head(&dot_dot_ih, get_ih(&dot_dot_entry_path)) ;
+	    // node containing ".." gets into transaction
+	    reiserfs_prepare_for_journal(old_inode->i_sb, dot_dot_de.de_bh, 1) ;
+	}
+				/* we should check seals here, not do
+                                   this stuff, yes? Then, having
+                                   gathered everything into RAM we
+                                   should lock the buffers, yes?  -Hans */
+				/* probably.  our rename needs to hold more 
+				** than one path at once.  The seals would 
+				** have to be written to deal with multi-path 
+				** issues -chris
+				*/
+	/* sanity checking before doing the rename - avoid races many
+	** of the above checks could have scheduled.  We have to be
+	** sure our items haven't been shifted by another process.
+	*/
+	if (item_moved(&new_entry_ih, &new_entry_path) ||
+	    !entry_points_to_object(new_dentry->d_name.name, 
+	                            new_dentry->d_name.len,
+				    &new_de, new_dentry_inode) ||
+	    item_moved(&old_entry_ih, &old_entry_path) || 
+	    !entry_points_to_object (old_dentry->d_name.name, 
+	                             old_dentry->d_name.len,
+				     &old_de, old_inode)) {
+	    reiserfs_restore_prepared_buffer (old_inode->i_sb, new_de.de_bh);
+	    reiserfs_restore_prepared_buffer (old_inode->i_sb, old_de.de_bh);
+	    if (S_ISDIR(old_inode_mode))
+		reiserfs_restore_prepared_buffer (old_inode->i_sb, dot_dot_de.de_bh);
+	    continue;
+	}
+	if (S_ISDIR(old_inode_mode)) {
+	    if ( item_moved(&dot_dot_ih, &dot_dot_entry_path) ||
+		!entry_points_to_object ( "..", 2, &dot_dot_de, old_dir) ) {
+		reiserfs_restore_prepared_buffer (old_inode->i_sb, old_de.de_bh);
+		reiserfs_restore_prepared_buffer (old_inode->i_sb, new_de.de_bh);
+		reiserfs_restore_prepared_buffer (old_inode->i_sb, dot_dot_de.de_bh);
+		continue;
+	    }
+	}
+
+	RFALSE( S_ISDIR(old_inode_mode) && 
+		 !buffer_journal_prepared(dot_dot_de.de_bh), "" );
+
+	break;
+    }
+
+    /* ok, all the changes can be done in one fell swoop when we
+       have claimed all the buffers needed.*/
+    
+    mark_de_visible (new_de.de_deh + new_de.de_entry_num);
+    set_ino_in_dir_entry (&new_de, INODE_PKEY (old_inode));
+    journal_mark_dirty (&th, old_dir->i_sb, new_de.de_bh);
+
+    mark_de_hidden (old_de.de_deh + old_de.de_entry_num);
+    journal_mark_dirty (&th, old_dir->i_sb, old_de.de_bh);
+    ctime = CURRENT_TIME_SEC;
+    old_dir->i_ctime = old_dir->i_mtime = ctime;
+    new_dir->i_ctime = new_dir->i_mtime = ctime;
+    /* thanks to Alex Adriaanse <alex_a@caltech.edu> for patch which adds ctime update of
+       renamed object */
+    old_inode->i_ctime = ctime;
+
+    if (new_dentry_inode) {
+	// adjust link number of the victim
+	if (S_ISDIR(new_dentry_inode->i_mode)) {
+	    new_dentry_inode->i_nlink  = 0;
+	} else {
+	    new_dentry_inode->i_nlink--;
+	}
+	new_dentry_inode->i_ctime = ctime;
+	savelink = new_dentry_inode->i_nlink;
+    }
+
+    if (S_ISDIR(old_inode_mode)) {
+	// adjust ".." of renamed directory 
+	set_ino_in_dir_entry (&dot_dot_de, INODE_PKEY (new_dir));
+	journal_mark_dirty (&th, new_dir->i_sb, dot_dot_de.de_bh);
+	
+        if (!new_dentry_inode)
+	    /* there (in new_dir) was no directory, so it got new link
+	       (".."  of renamed directory) */
+	    INC_DIR_INODE_NLINK(new_dir);
+		
+	/* old directory lost one link - ".. " of renamed directory */
+	DEC_DIR_INODE_NLINK(old_dir);
+    }
+
+    // looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
+    pathrelse (&new_entry_path);
+    pathrelse (&dot_dot_entry_path);
+
+    // FIXME: this reiserfs_cut_from_item's return value may screw up
+    // anybody, but it will panic if will not be able to find the
+    // entry. This needs one more clean up
+    if (reiserfs_cut_from_item (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL, 0) < 0)
+	reiserfs_warning (old_dir->i_sb, "vs-7060: reiserfs_rename: couldn't not cut old name. Fsck later?");
+
+    old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;
+
+    reiserfs_update_sd (&th, old_dir);
+    reiserfs_update_sd (&th, new_dir);
+    reiserfs_update_sd (&th, old_inode);
+
+    if (new_dentry_inode) {
+	if (savelink == 0)
+	    add_save_link (&th, new_dentry_inode, 0/* not truncate */);
+	reiserfs_update_sd (&th, new_dentry_inode);
+    }
+
+    retval = journal_end(&th, old_dir->i_sb, jbegin_count) ;
+    reiserfs_write_unlock(old_dir->i_sb);
+    return retval;
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations reiserfs_dir_inode_operations = {
+  //&reiserfs_dir_operations,	/* default_file_ops */
+    .create	= reiserfs_create,
+    .lookup	= reiserfs_lookup,
+    .link	= reiserfs_link,
+    .unlink	= reiserfs_unlink,
+    .symlink	= reiserfs_symlink,
+    .mkdir	= reiserfs_mkdir,
+    .rmdir	= reiserfs_rmdir,
+    .mknod	= reiserfs_mknod,
+    .rename	= reiserfs_rename,
+    .setattr    = reiserfs_setattr,
+    .setxattr   = reiserfs_setxattr,
+    .getxattr   = reiserfs_getxattr,
+    .listxattr  = reiserfs_listxattr,
+    .removexattr = reiserfs_removexattr,
+    .permission     = reiserfs_permission,
+};
+
+/*
+ * symlink operations.. same as page_symlink_inode_operations, with xattr
+ * stuff added
+ */
+struct inode_operations reiserfs_symlink_inode_operations = {
+    .readlink       = generic_readlink,
+    .follow_link    = page_follow_link_light,
+    .put_link       = page_put_link,
+    .setattr        = reiserfs_setattr,
+    .setxattr       = reiserfs_setxattr,
+    .getxattr       = reiserfs_getxattr,
+    .listxattr      = reiserfs_listxattr,
+    .removexattr    = reiserfs_removexattr,
+    .permission     = reiserfs_permission,
+
+};
+
+
+/*
+ * special file operations.. just xattr/acl stuff
+ */
+struct inode_operations reiserfs_special_inode_operations = {
+    .setattr        = reiserfs_setattr,
+    .setxattr       = reiserfs_setxattr,
+    .getxattr       = reiserfs_getxattr,
+    .listxattr      = reiserfs_listxattr,
+    .removexattr    = reiserfs_removexattr,
+    .permission     = reiserfs_permission,
+
+};
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
new file mode 100644
index 0000000..0785c43
--- /dev/null
+++ b/fs/reiserfs/objectid.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/time.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_fs_sb.h>
+
+// find where objectid map starts
+#define objectid_map(s,rs) (old_format_only (s) ? \
+                         (__u32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
+			 (__u32 *)((rs) + 1))
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+static void check_objectid_map (struct super_block * s, __u32 * map)
+{
+    if (le32_to_cpu (map[0]) != 1)
+	reiserfs_panic (s, "vs-15010: check_objectid_map: map corrupted: %lx",
+			( long unsigned int ) le32_to_cpu (map[0]));
+
+    // FIXME: add something else here
+}
+
+#else
+static void check_objectid_map (struct super_block * s, __u32 * map)
+{;}
+#endif
+
+
+/* When we allocate objectids we allocate the first unused objectid.
+   Each sequence of objectids in use (the odd sequences) is followed
+   by a sequence of objectids not in use (the even sequences).  We
+   only need to record the last objectid in each of these sequences
+   (both the odd and even sequences) in order to fully define the
+   boundaries of the sequences.  A consequence of allocating the first
+   objectid not in use is that under most conditions this scheme is
+   extremely compact.  The exception is immediately after a sequence
+   of operations which deletes a large number of objects of
+   non-sequential objectids, and even then it will become compact
+   again as soon as more objects are created.  Note that many
+   interesting optimizations of layout could result from complicating
+   objectid assignment, but we have deferred making them for now. */
+
+
+/* get unique object identifier */
+__u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th)
+{
+    struct super_block * s = th->t_super;
+    struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+    __u32 * map = objectid_map (s, rs);
+    __u32 unused_objectid;
+
+    BUG_ON (!th->t_trans_id);
+
+    check_objectid_map (s, map);
+
+    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+                                /* comment needed -Hans */
+    unused_objectid = le32_to_cpu (map[1]);
+    if (unused_objectid == U32_MAX) {
+	reiserfs_warning (s, "%s: no more object ids", __FUNCTION__);
+	reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s)) ;
+	return 0;
+    }
+
+    /* This incrementation allocates the first unused objectid. That
+       is to say, the first entry on the objectid map is the first
+       unused objectid, and by incrementing it we use it.  See below
+       where we check to see if we eliminated a sequence of unused
+       objectids.... */
+    map[1] = cpu_to_le32 (unused_objectid + 1);
+
+    /* Now we check to see if we eliminated the last remaining member of
+       the first even sequence (and can eliminate the sequence by
+       eliminating its last objectid from oids), and can collapse the
+       first two odd sequences into one sequence.  If so, then the net
+       result is to eliminate a pair of objectids from oids.  We do this
+       by shifting the entire map to the left. */
+    if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
+	memmove (map + 1, map + 3, (sb_oid_cursize(rs) - 3) * sizeof(__u32));
+        set_sb_oid_cursize( rs, sb_oid_cursize(rs) - 2 );
+    }
+
+    journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s));
+    return unused_objectid;
+}
+
+
+/* makes object identifier unused */
+void reiserfs_release_objectid (struct reiserfs_transaction_handle *th, 
+				__u32 objectid_to_release)
+{
+    struct super_block * s = th->t_super;
+    struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+    __u32 * map = objectid_map (s, rs);
+    int i = 0;
+
+    BUG_ON (!th->t_trans_id);
+    //return;
+    check_objectid_map (s, map);
+
+    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+    journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s)); 
+
+    /* start at the beginning of the objectid map (i = 0) and go to
+       the end of it (i = disk_sb->s_oid_cursize).  Linear search is
+       what we use, though it is possible that binary search would be
+       more efficient after performing lots of deletions (which is
+       when oids is large.)  We only check even i's. */
+    while (i < sb_oid_cursize(rs)) {
+	if (objectid_to_release == le32_to_cpu (map[i])) {
+	    /* This incrementation unallocates the objectid. */
+	    //map[i]++;
+	    map[i] = cpu_to_le32 (le32_to_cpu (map[i]) + 1);
+
+	    /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
+	    if (map[i] == map[i+1]) {
+		/* shrink objectid map */
+		memmove (map + i, map + i + 2, 
+			 (sb_oid_cursize(rs) - i - 2) * sizeof (__u32));
+		//disk_sb->s_oid_cursize -= 2;
+                set_sb_oid_cursize( rs, sb_oid_cursize(rs) - 2 );
+
+		RFALSE( sb_oid_cursize(rs) < 2 || 
+		        sb_oid_cursize(rs) > sb_oid_maxsize(rs),
+		        "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
+                        sb_oid_cursize(rs), sb_oid_maxsize(rs));
+	    }
+	    return;
+	}
+
+	if (objectid_to_release > le32_to_cpu (map[i]) && 
+	    objectid_to_release < le32_to_cpu (map[i + 1])) {
+	    /* size of objectid map is not changed */
+	    if (objectid_to_release + 1 == le32_to_cpu (map[i + 1])) {
+		//objectid_map[i+1]--;
+		map[i + 1] = cpu_to_le32 (le32_to_cpu (map[i + 1]) - 1);
+		return;
+	    }
+
+            /* JDM comparing two little-endian values for equality -- safe */
+	if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
+		/* objectid map must be expanded, but there is no space */
+		PROC_INFO_INC( s, leaked_oid );
+		return;
+	}
+
+	    /* expand the objectid map*/
+	    memmove (map + i + 3, map + i + 1, 
+		     (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
+	    map[i + 1] = cpu_to_le32 (objectid_to_release);
+	    map[i + 2] = cpu_to_le32 (objectid_to_release + 1);
+            set_sb_oid_cursize( rs, sb_oid_cursize(rs) + 2 );
+	    return;
+	}
+	i += 2;
+    }
+
+    reiserfs_warning (s, "vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)",
+		      ( long unsigned ) objectid_to_release);
+}
+
+
+int reiserfs_convert_objectid_map_v1(struct super_block *s) {
+    struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK (s);
+    int cur_size = sb_oid_cursize(disk_sb);
+    int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2 ;
+    int old_max = sb_oid_maxsize(disk_sb);
+    struct reiserfs_super_block_v1 *disk_sb_v1 ;
+    __u32 *objectid_map, *new_objectid_map ;
+    int i ;
+
+    disk_sb_v1=(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
+    objectid_map = (__u32 *)(disk_sb_v1 + 1) ;
+    new_objectid_map = (__u32 *)(disk_sb + 1) ;
+
+    if (cur_size > new_size) {
+	/* mark everyone used that was listed as free at the end of the objectid
+	** map 
+	*/
+	objectid_map[new_size - 1] = objectid_map[cur_size - 1] ;
+	set_sb_oid_cursize(disk_sb,new_size) ;
+    }
+    /* move the smaller objectid map past the end of the new super */
+    for (i = new_size - 1 ; i >= 0 ; i--) {
+        objectid_map[i + (old_max - new_size)] = objectid_map[i] ; 
+    }
+
+
+    /* set the max size so we don't overflow later */
+    set_sb_oid_maxsize(disk_sb,new_size) ;
+
+    /* Zero out label and generate random UUID */
+    memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label)) ;
+    generate_random_uuid(disk_sb->s_uuid);
+
+    /* finally, zero out the unused chunk of the new super */
+    memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused)) ;
+    return 0 ;
+}
+
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
new file mode 100644
index 0000000..16fdca1
--- /dev/null
+++ b/fs/reiserfs/prints.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+
+#include <stdarg.h>
+
+static char error_buf[1024];
+static char fmt_buf[1024];
+static char off_buf[80];
+
+
+static char * reiserfs_cpu_offset (struct cpu_key * key)
+{
+  if (cpu_key_k_type(key) == TYPE_DIRENTRY)
+    sprintf (off_buf, "%Lu(%Lu)", 
+	     (unsigned long long)GET_HASH_VALUE (cpu_key_k_offset (key)),
+	     (unsigned long long)GET_GENERATION_NUMBER (cpu_key_k_offset (key)));
+  else
+    sprintf (off_buf, "0x%Lx", (unsigned long long)cpu_key_k_offset (key));
+  return off_buf;
+}
+
+
+static char * le_offset (struct reiserfs_key * key)
+{
+  int version;
+
+  version = le_key_version (key);
+  if (le_key_k_type (version, key) == TYPE_DIRENTRY)
+    sprintf (off_buf, "%Lu(%Lu)", 
+	     (unsigned long long)GET_HASH_VALUE (le_key_k_offset (version, key)),
+	     (unsigned long long)GET_GENERATION_NUMBER (le_key_k_offset (version, key)));
+  else
+    sprintf (off_buf, "0x%Lx", (unsigned long long)le_key_k_offset (version, key));
+  return off_buf;
+}
+
+
+static char * cpu_type (struct cpu_key * key)
+{
+    if (cpu_key_k_type (key) == TYPE_STAT_DATA)
+	return "SD";
+    if (cpu_key_k_type (key) == TYPE_DIRENTRY)
+	return "DIR";
+    if (cpu_key_k_type (key) == TYPE_DIRECT)
+	return "DIRECT";
+    if (cpu_key_k_type (key) == TYPE_INDIRECT)
+	return "IND";
+    return "UNKNOWN";
+}
+
+
+static char * le_type (struct reiserfs_key * key)
+{
+    int version;
+    
+    version = le_key_version (key);
+
+    if (le_key_k_type (version, key) == TYPE_STAT_DATA)
+	return "SD";
+    if (le_key_k_type (version, key) == TYPE_DIRENTRY)
+	return "DIR";
+    if (le_key_k_type (version, key) == TYPE_DIRECT)
+	return "DIRECT";
+    if (le_key_k_type (version, key) == TYPE_INDIRECT)
+	return "IND";
+    return "UNKNOWN";
+}
+
+
+/* %k */
+static void sprintf_le_key (char * buf, struct reiserfs_key * key)
+{
+  if (key)
+    sprintf (buf, "[%d %d %s %s]", le32_to_cpu (key->k_dir_id),
+	     le32_to_cpu (key->k_objectid), le_offset (key), le_type (key));
+  else
+    sprintf (buf, "[NULL]");
+}
+
+
+/* %K */
+static void sprintf_cpu_key (char * buf, struct cpu_key * key)
+{
+  if (key)
+    sprintf (buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
+	     key->on_disk_key.k_objectid, reiserfs_cpu_offset (key),
+             cpu_type (key));
+  else
+    sprintf (buf, "[NULL]");
+}
+
+static void sprintf_de_head( char *buf, struct reiserfs_de_head *deh )
+{
+    if( deh )
+        sprintf( buf, "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", deh_offset(deh), deh_dir_id(deh),
+                 deh_objectid(deh), deh_location(deh), deh_state(deh) );
+    else
+        sprintf( buf, "[NULL]" );
+
+}
+
+static void sprintf_item_head (char * buf, struct item_head * ih)
+{
+    if (ih) {
+	strcpy (buf, (ih_version (ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
+	sprintf_le_key (buf + strlen (buf), &(ih->ih_key));
+	sprintf (buf + strlen (buf), ", item_len %d, item_location %d, "
+		 "free_space(entry_count) %d",
+		 ih_item_len(ih), ih_location(ih), ih_free_space (ih));
+    } else
+	sprintf (buf, "[NULL]");
+}
+
+
+static void sprintf_direntry (char * buf, struct reiserfs_dir_entry * de)
+{
+  char name[20];
+
+  memcpy (name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
+  name [de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
+  sprintf (buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+}
+
+
+static void sprintf_block_head (char * buf, struct buffer_head * bh)
+{
+  sprintf (buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
+	   B_LEVEL (bh), B_NR_ITEMS (bh), B_FREE_SPACE (bh));
+}
+
+
+static void sprintf_buffer_head (char * buf, struct buffer_head * bh) 
+{
+  char b[BDEVNAME_SIZE];
+
+  sprintf (buf, "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+	   bdevname (bh->b_bdev, b), bh->b_size,
+	   (unsigned long long)bh->b_blocknr,
+	   atomic_read (&(bh->b_count)),
+	   bh->b_state, bh->b_page,
+	   buffer_uptodate (bh) ? "UPTODATE" : "!UPTODATE",
+	   buffer_dirty (bh) ? "DIRTY" : "CLEAN",
+	   buffer_locked (bh) ? "LOCKED" : "UNLOCKED");
+}
+
+
+static void sprintf_disk_child (char * buf, struct disk_child * dc)
+{
+  sprintf (buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc), dc_size(dc));
+}
+
+
+static char * is_there_reiserfs_struct (char * fmt, int * what, int * skip)
+{
+  char * k = fmt;
+
+  *skip = 0;
+  
+  while ((k = strchr (k, '%')) != NULL)
+  {
+    if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
+	      k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a' ) {
+      *what = k[1];
+      break;
+    }
+    (*skip) ++;
+    k ++;
+  }
+  return k;
+}
+
+
+/* debugging reiserfs we used to print out a lot of different
+   variables, like keys, item headers, buffer heads etc. Values of
+   most fields matter. So it took a long time just to write
+   appropriative printk. With this reiserfs_warning you can use format
+   specification for complex structures like you used to do with
+   printfs for integers, doubles and pointers. For instance, to print
+   out key structure you have to write just: 
+   reiserfs_warning ("bad key %k", key); 
+   instead of 
+   printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid, 
+           key->k_offset, key->k_uniqueness); 
+*/
+
+
+static void
+prepare_error_buf( const char *fmt, va_list args )
+{
+    char * fmt1 = fmt_buf;
+    char * k;
+    char * p = error_buf;
+    int i, j, what, skip;
+
+    strcpy (fmt1, fmt);
+
+    while( (k = is_there_reiserfs_struct( fmt1, &what, &skip )) != NULL )
+    {
+        *k = 0;
+
+        p += vsprintf (p, fmt1, args);
+
+        for (i = 0; i < skip; i ++)
+            j = va_arg (args, int);
+
+        switch (what) {
+        case 'k':
+            sprintf_le_key (p, va_arg(args, struct reiserfs_key *));
+            break;
+        case 'K':
+            sprintf_cpu_key (p, va_arg(args, struct cpu_key *));
+            break;
+        case 'h':
+            sprintf_item_head (p, va_arg(args, struct item_head *));
+            break;
+        case 't':
+            sprintf_direntry (p, va_arg(args, struct reiserfs_dir_entry *));
+            break;
+        case 'y':
+            sprintf_disk_child (p, va_arg(args, struct disk_child *));
+            break;
+        case 'z':
+            sprintf_block_head (p, va_arg(args, struct buffer_head *));
+            break;
+        case 'b':
+            sprintf_buffer_head (p, va_arg(args, struct buffer_head *));
+            break;
+        case 'a':
+            sprintf_de_head (p, va_arg(args, struct reiserfs_de_head *));
+            break;
+        }
+
+        p += strlen (p);
+        fmt1 = k + 2;
+    }
+    vsprintf (p, fmt1, args);
+
+}
+
+
+/* in addition to usual conversion specifiers this accepts reiserfs
+   specific conversion specifiers: 
+   %k to print little endian key, 
+   %K to print cpu key, 
+   %h to print item_head,
+   %t to print directory entry 
+   %z to print block head (arg must be struct buffer_head *
+   %b to print buffer_head
+*/
+
+#define do_reiserfs_warning(fmt)\
+{\
+    va_list args;\
+    va_start( args, fmt );\
+    prepare_error_buf( fmt, args );\
+    va_end( args );\
+}
+
+void reiserfs_warning (struct super_block *sb, const char * fmt, ...)
+{
+  do_reiserfs_warning(fmt);
+  if (sb)
+      printk (KERN_WARNING "ReiserFS: %s: warning: %s\n",
+             reiserfs_bdevname (sb), error_buf);
+  else
+      printk (KERN_WARNING "ReiserFS: warning: %s\n", error_buf);
+}
+
+/* No newline.. reiserfs_info calls can be followed by printk's */
+void reiserfs_info (struct super_block *sb, const char * fmt, ...)
+{
+  do_reiserfs_warning(fmt);
+  if (sb)
+      printk (KERN_NOTICE "ReiserFS: %s: %s",
+             reiserfs_bdevname (sb), error_buf);
+  else
+      printk (KERN_NOTICE "ReiserFS: %s", error_buf);
+}
+
+/* No newline.. reiserfs_printk calls can be followed by printk's */
+static void reiserfs_printk (const char * fmt, ...)
+{
+  do_reiserfs_warning(fmt);
+  printk (error_buf);
+}
+
+void reiserfs_debug (struct super_block *s, int level, const char * fmt, ...)
+{
+#ifdef CONFIG_REISERFS_CHECK
+  do_reiserfs_warning(fmt);
+  if (s)
+      printk (KERN_DEBUG "ReiserFS: %s: %s\n",
+             reiserfs_bdevname (s), error_buf);
+  else
+      printk (KERN_DEBUG "ReiserFS: %s\n", error_buf);
+#endif
+}
+
+/* The format:
+
+           maintainer-errorid: [function-name:] message
+
+    where errorid is unique to the maintainer and function-name is
+    optional, is recommended, so that anyone can easily find the bug
+    with a simple grep for the short to type string
+    maintainer-errorid.  Don't bother with reusing errorids, there are
+    lots of numbers out there.
+
+    Example: 
+    
+    reiserfs_panic(
+	p_sb, "reiser-29: reiserfs_new_blocknrs: "
+	"one of search_start or rn(%d) is equal to MAX_B_NUM,"
+	"which means that we are optimizing location based on the bogus location of a temp buffer (%p).", 
+	rn, bh
+    );
+
+    Regular panic()s sometimes clear the screen before the message can
+    be read, thus the need for the while loop.  
+
+    Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
+    pointless complexity):
+
+    panics in reiserfs_fs.h have numbers from 1000 to 1999
+    super.c				        2000 to 2999
+    preserve.c (unused)			    3000 to 3999
+    bitmap.c				    4000 to 4999
+    stree.c				        5000 to 5999
+    prints.c				    6000 to 6999
+    namei.c                     7000 to 7999
+    fix_nodes.c                 8000 to 8999
+    dir.c                       9000 to 9999
+	lbalance.c					10000 to 10999
+	ibalance.c		11000 to 11999 not ready
+	do_balan.c		12000 to 12999
+	inode.c			13000 to 13999
+	file.c			14000 to 14999
+    objectid.c                       15000 - 15999
+    buffer.c                         16000 - 16999
+    symlink.c                        17000 - 17999
+
+   .  */
+
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+void reiserfs_panic (struct super_block * sb, const char * fmt, ...)
+{
+  do_reiserfs_warning(fmt);
+  printk (KERN_EMERG "REISERFS: panic (device %s): %s\n",
+          reiserfs_bdevname (sb), error_buf);
+  BUG ();
+
+  /* this is not actually called, but makes reiserfs_panic() "noreturn" */
+  panic ("REISERFS: panic (device %s): %s\n",
+	 reiserfs_bdevname (sb), error_buf);
+}
+
+void
+reiserfs_abort (struct super_block *sb, int errno, const char *fmt, ...)
+{
+    do_reiserfs_warning (fmt);
+
+    if (reiserfs_error_panic (sb)) {
+        panic (KERN_CRIT "REISERFS: panic (device %s): %s\n",
+               reiserfs_bdevname (sb), error_buf);
+    }
+
+    if (sb->s_flags & MS_RDONLY)
+        return;
+
+    printk (KERN_CRIT "REISERFS: abort (device %s): %s\n",
+            reiserfs_bdevname (sb), error_buf);
+
+    sb->s_flags |= MS_RDONLY;
+    reiserfs_journal_abort (sb, errno);
+}
+
+/* this prints internal nodes (4 keys/items in line) (dc_number,
+   dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
+   dc_size)...*/
+static int print_internal (struct buffer_head * bh, int first, int last)
+{
+    struct reiserfs_key * key;
+    struct disk_child * dc;
+    int i;
+    int from, to;
+    
+    if (!B_IS_KEYS_LEVEL (bh))
+	return 1;
+
+    check_internal (bh);
+    
+    if (first == -1) {
+	from = 0;
+	to = B_NR_ITEMS (bh);
+    } else {
+	from = first;
+	to = last < B_NR_ITEMS (bh) ? last : B_NR_ITEMS (bh);
+    }
+
+    reiserfs_printk ("INTERNAL NODE (%ld) contains %z\n",  bh->b_blocknr, bh);
+    
+    dc = B_N_CHILD (bh, from);
+    reiserfs_printk ("PTR %d: %y ", from, dc);
+    
+    for (i = from, key = B_N_PDELIM_KEY (bh, from), dc ++; i < to; i ++, key ++, dc ++) {
+	reiserfs_printk ("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
+	if (i && i % 4 == 0)
+	    printk ("\n");
+    }
+    printk ("\n");
+    return 0;
+}
+
+
+
+
+
+static int print_leaf (struct buffer_head * bh, int print_mode, int first, int last)
+{
+    struct block_head * blkh;
+    struct item_head * ih;
+    int i, nr;
+    int from, to;
+
+    if (!B_IS_ITEMS_LEVEL (bh))
+	return 1;
+
+    check_leaf (bh);
+
+    blkh = B_BLK_HEAD (bh);
+    ih = B_N_PITEM_HEAD (bh,0);
+    nr = blkh_nr_item(blkh);
+
+    printk ("\n===================================================================\n");
+    reiserfs_printk ("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh);
+
+    if (!(print_mode & PRINT_LEAF_ITEMS)) {
+	reiserfs_printk ("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n",
+			  &(ih->ih_key), &((ih + nr - 1)->ih_key));
+	return 0;
+    }
+
+    if (first < 0 || first > nr - 1) 
+	from = 0;
+    else 
+	from = first;
+
+    if (last < 0 || last > nr )
+	to = nr;
+    else
+	to = last;
+
+    ih += from;
+    printk ("-------------------------------------------------------------------------------\n");
+    printk ("|##|   type    |           key           | ilen | free_space | version | loc  |\n");
+    for (i = from; i < to; i++, ih ++) {
+	printk ("-------------------------------------------------------------------------------\n");
+	reiserfs_printk ("|%2d| %h |\n", i, ih);
+	if (print_mode & PRINT_LEAF_ITEMS)
+	    op_print_item (ih, B_I_PITEM (bh, ih));
+    }
+
+    printk ("===================================================================\n");
+
+    return 0;
+}
+
+char * reiserfs_hashname(int code)
+{
+    if ( code == YURA_HASH)
+	return "rupasov";
+    if ( code == TEA_HASH)
+	return "tea";
+    if ( code == R5_HASH)
+	return "r5";
+
+    return "unknown";
+}
+
+/* return 1 if this is not super block */
+static int print_super_block (struct buffer_head * bh)
+{
+    struct reiserfs_super_block * rs = (struct reiserfs_super_block *)(bh->b_data);
+    int skipped, data_blocks;
+    char *version;
+    char b[BDEVNAME_SIZE];
+
+    if (is_reiserfs_3_5(rs)) {
+        version = "3.5";
+    } else if (is_reiserfs_3_6(rs)) {
+        version = "3.6";
+    } else if (is_reiserfs_jr(rs)) {
+      version = ((sb_version(rs) == REISERFS_VERSION_2) ?
+ 		 "3.6" : "3.5");  
+    } else {
+	return 1;
+    }
+
+    printk ("%s\'s super block is in block %llu\n", bdevname (bh->b_bdev, b),
+            (unsigned long long)bh->b_blocknr);
+    printk ("Reiserfs version %s\n", version );
+    printk ("Block count %u\n", sb_block_count(rs));
+    printk ("Blocksize %d\n", sb_blocksize(rs));
+    printk ("Free blocks %u\n", sb_free_blocks(rs));
+    // FIXME: this would be confusing if
+    // someone stores reiserfs super block in some data block ;)
+//    skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
+    skipped = bh->b_blocknr;
+    data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
+	    (!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) + 1 : sb_reserved_for_journal(rs)) -	    
+	    sb_free_blocks(rs);
+    printk ("Busy blocks (skipped %d, bitmaps - %d, journal (or reserved) blocks - %d\n"
+	    "1 super block, %d data blocks\n", 
+	    skipped, sb_bmap_nr(rs), (!is_reiserfs_jr(rs) ? (sb_jp_journal_size(rs) + 1) :
+				      sb_reserved_for_journal(rs)) , data_blocks);
+    printk ("Root block %u\n", sb_root_block(rs));
+    printk ("Journal block (first) %d\n", sb_jp_journal_1st_block(rs));
+    printk ("Journal dev %d\n", sb_jp_journal_dev(rs));
+    printk ("Journal orig size %d\n", sb_jp_journal_size(rs));
+    printk ("FS state %d\n", sb_fs_state(rs));
+    printk ("Hash function \"%s\"\n",
+	    reiserfs_hashname(sb_hash_function_code(rs)));
+    
+    printk ("Tree height %d\n", sb_tree_height(rs));
+    return 0;
+}
+
+static int print_desc_block (struct buffer_head * bh)
+{
+    struct reiserfs_journal_desc * desc;
+
+    if (memcmp(get_journal_desc_magic (bh), JOURNAL_DESC_MAGIC, 8))
+	return 1;
+
+    desc = (struct reiserfs_journal_desc *)(bh->b_data);
+    printk ("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)",
+	    (unsigned long long)bh->b_blocknr, get_desc_trans_id (desc), get_desc_mount_id (desc),
+	    get_desc_trans_len (desc));
+
+    return 0;
+}
+
+
+void print_block (struct buffer_head * bh, ...)//int print_mode, int first, int last)
+{
+    va_list args;
+    int mode, first, last;
+
+    va_start (args, bh);
+
+    if ( ! bh ) {
+	printk("print_block: buffer is NULL\n");
+	return;
+    }
+
+    mode = va_arg (args, int);
+    first = va_arg (args, int);
+    last = va_arg (args, int);
+    if (print_leaf (bh, mode, first, last))
+	if (print_internal (bh, first, last))
+	    if (print_super_block (bh))
+		if (print_desc_block (bh))
+		    printk ("Block %llu contains unformatted data\n", (unsigned long long)bh->b_blocknr);
+}
+
+
+
+static char print_tb_buf[2048];
+
+/* this stores initial state of tree balance in the print_tb_buf */
+void store_print_tb (struct tree_balance * tb)
+{
+    int h = 0;
+    int i;
+    struct buffer_head * tbSh, * tbFh;
+
+    if (!tb)
+	return;
+
+    sprintf (print_tb_buf, "\n"
+	     "BALANCING %d\n"
+	     "MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n" 
+	     "=====================================================================\n"
+	     "* h *    S    *    L    *    R    *   F   *   FL  *   FR  *  CFL  *  CFR  *\n",
+	     REISERFS_SB(tb->tb_sb)->s_do_balance,
+	     tb->tb_mode, PATH_LAST_POSITION (tb->tb_path), tb->tb_path->pos_in_item);
+  
+    for (h = 0; h < sizeof(tb->insert_size) / sizeof (tb->insert_size[0]); h ++) {
+	if (PATH_H_PATH_OFFSET (tb->tb_path, h) <= tb->tb_path->path_length && 
+	    PATH_H_PATH_OFFSET (tb->tb_path, h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
+	    tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+	    tbFh = PATH_H_PPARENT (tb->tb_path, h);
+	} else {
+	    tbSh = NULL;
+	    tbFh = NULL;
+	}
+	sprintf (print_tb_buf + strlen (print_tb_buf),
+		 "* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
+		 h, 
+		 (tbSh) ? (long long)(tbSh->b_blocknr):(-1LL),
+		 (tbSh) ? atomic_read (&(tbSh->b_count)) : -1,
+		 (tb->L[h]) ? (long long)(tb->L[h]->b_blocknr):(-1LL),
+		 (tb->L[h]) ? atomic_read (&(tb->L[h]->b_count)) : -1,
+		 (tb->R[h]) ? (long long)(tb->R[h]->b_blocknr):(-1LL),
+		 (tb->R[h]) ? atomic_read (&(tb->R[h]->b_count)) : -1,
+		 (tbFh) ? (long long)(tbFh->b_blocknr):(-1LL),
+		 (tb->FL[h]) ? (long long)(tb->FL[h]->b_blocknr):(-1LL),
+		 (tb->FR[h]) ? (long long)(tb->FR[h]->b_blocknr):(-1LL),
+		 (tb->CFL[h]) ? (long long)(tb->CFL[h]->b_blocknr):(-1LL),
+		 (tb->CFR[h]) ? (long long)(tb->CFR[h]->b_blocknr):(-1LL));
+    }
+
+    sprintf (print_tb_buf + strlen (print_tb_buf), 
+	     "=====================================================================\n"
+	     "* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
+	     "* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
+	     tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],tb->rbytes, tb->blknum[0], 
+	     tb->s0num, tb->s1num,tb->s1bytes,  tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0], tb->rkey[0]);
+
+    /* this prints balance parameters for non-leaf levels */
+    h = 0;
+    do {
+	h++;
+	sprintf (print_tb_buf + strlen (print_tb_buf),
+		 "* %d * %4d * %2d *    * %2d *    * %2d *\n",
+		h, tb->insert_size[h], tb->lnum[h], tb->rnum[h], tb->blknum[h]);
+    } while (tb->insert_size[h]);
+
+    sprintf (print_tb_buf + strlen (print_tb_buf), 
+	     "=====================================================================\n"
+	     "FEB list: ");
+
+    /* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
+    h = 0;
+    for (i = 0; i < sizeof (tb->FEB) / sizeof (tb->FEB[0]); i ++)
+	sprintf (print_tb_buf + strlen (print_tb_buf),
+		 "%p (%llu %d)%s", tb->FEB[i], tb->FEB[i] ? (unsigned long long)tb->FEB[i]->b_blocknr : 0ULL,
+		 tb->FEB[i] ? atomic_read (&(tb->FEB[i]->b_count)) : 0, 
+		 (i == sizeof (tb->FEB) / sizeof (tb->FEB[0]) - 1) ? "\n" : ", ");
+
+    sprintf (print_tb_buf + strlen (print_tb_buf), 
+	     "======================== the end ====================================\n");
+}
+
+void print_cur_tb (char * mes)
+{
+    printk ("%s\n%s", mes, print_tb_buf);
+}
+
+static void check_leaf_block_head (struct buffer_head * bh)
+{
+  struct block_head * blkh;
+  int nr;
+
+  blkh = B_BLK_HEAD (bh);
+  nr = blkh_nr_item(blkh);
+  if ( nr > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+    reiserfs_panic (NULL, "vs-6010: check_leaf_block_head: invalid item number %z", bh);
+  if ( blkh_free_space(blkh) > 
+      bh->b_size - BLKH_SIZE - IH_SIZE * nr )
+    reiserfs_panic (NULL, "vs-6020: check_leaf_block_head: invalid free space %z", bh);
+    
+}
+
+static void check_internal_block_head (struct buffer_head * bh)
+{
+    struct block_head * blkh;
+    
+    blkh = B_BLK_HEAD (bh);
+    if (!(B_LEVEL (bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL (bh) <= MAX_HEIGHT))
+	reiserfs_panic (NULL, "vs-6025: check_internal_block_head: invalid level %z", bh);
+
+    if (B_NR_ITEMS (bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+	reiserfs_panic (NULL, "vs-6030: check_internal_block_head: invalid item number %z", bh);
+
+    if (B_FREE_SPACE (bh) != 
+	bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS (bh) - DC_SIZE * (B_NR_ITEMS (bh) + 1))
+	reiserfs_panic (NULL, "vs-6040: check_internal_block_head: invalid free space %z", bh);
+
+}
+
+
+void check_leaf (struct buffer_head * bh)
+{
+    int i;
+    struct item_head * ih;
+
+    if (!bh)
+	return;
+    check_leaf_block_head (bh);
+    for (i = 0, ih = B_N_PITEM_HEAD (bh, 0); i < B_NR_ITEMS (bh); i ++, ih ++)
+	op_check_item (ih, B_I_PITEM (bh, ih));
+}
+
+
+void check_internal (struct buffer_head * bh)
+{
+  if (!bh)
+    return;
+  check_internal_block_head (bh);
+}
+
+
+void print_statistics (struct super_block * s)
+{
+
+  /*
+  printk ("reiserfs_put_super: session statistics: balances %d, fix_nodes %d, \
+bmap with search %d, without %d, dir2ind %d, ind2dir %d\n",
+	  REISERFS_SB(s)->s_do_balance, REISERFS_SB(s)->s_fix_nodes,
+	  REISERFS_SB(s)->s_bmaps, REISERFS_SB(s)->s_bmaps_without_search,
+	  REISERFS_SB(s)->s_direct2indirect, REISERFS_SB(s)->s_indirect2direct);
+  */
+
+}
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
new file mode 100644
index 0000000..f4ea81a
--- /dev/null
+++ b/fs/reiserfs/procfs.c
@@ -0,0 +1,664 @@
+/* -*- linux-c -*- */
+
+/* fs/reiserfs/procfs.c */
+
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/* proc info support a la one created by Sizif@Botik.RU for PGC */
+
+/* $Id: procfs.c,v 1.1.8.2 2001/07/15 17:08:42 god Exp $ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_fs_sb.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+
+#if defined( REISERFS_PROC_INFO )
+
+/*
+ * LOCKING:
+ *
+ * We rely on new Alexander Viro's super-block locking.
+ *
+ */
+
+static int show_version(struct seq_file *m, struct super_block *sb)
+{
+	char *format;
+    
+	if ( REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6) ) {
+		format = "3.6";
+	} else if ( REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_5) ) {
+		format = "3.5";
+	} else {
+		format = "unknown";
+	}
+
+	seq_printf(m, "%s format\twith checks %s\n",
+			format,
+#if defined( CONFIG_REISERFS_CHECK )
+			"on"
+#else
+			"off"
+#endif
+		);
+	return 0;
+}
+
+int reiserfs_global_version_in_proc( char *buffer, char **start, off_t offset,
+				     int count, int *eof, void *data )
+{
+	*start = buffer;
+	*eof = 1;
+	return 0;
+}
+
+#define SF( x ) ( r -> x )
+#define SFP( x ) SF( s_proc_info_data.x )
+#define SFPL( x ) SFP( x[ level ] )
+#define SFPF( x ) SFP( scan_bitmap.x )
+#define SFPJ( x ) SFP( journal.x )
+
+#define D2C( x ) le16_to_cpu( x )
+#define D4C( x ) le32_to_cpu( x )
+#define DF( x ) D2C( rs -> s_v1.x )
+#define DFL( x ) D4C( rs -> s_v1.x )
+
+#define objectid_map( s, rs ) (old_format_only (s) ?				\
+                         (__u32 *)((struct reiserfs_super_block_v1 *)rs + 1) :	\
+			 (__u32 *)(rs + 1))
+#define MAP( i ) D4C( objectid_map( sb, rs )[ i ] )
+
+#define DJF( x ) le32_to_cpu( rs -> x )
+#define DJV( x ) le32_to_cpu( s_v1 -> x )
+#define DJP( x ) le32_to_cpu( jp -> x ) 
+#define JF( x ) ( r -> s_journal -> x )
+
+static int show_super(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *r = REISERFS_SB(sb);
+    
+	seq_printf(m,	"state: \t%s\n"
+			"mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n"
+			"gen. counter: \t%i\n"
+			"s_kmallocs: \t%i\n"
+			"s_disk_reads: \t%i\n"
+			"s_disk_writes: \t%i\n"
+			"s_fix_nodes: \t%i\n"
+			"s_do_balance: \t%i\n"
+			"s_unneeded_left_neighbor: \t%i\n"
+			"s_good_search_by_key_reada: \t%i\n"
+			"s_bmaps: \t%i\n"
+			"s_bmaps_without_search: \t%i\n"
+			"s_direct2indirect: \t%i\n"
+			"s_indirect2direct: \t%i\n"
+			"\n"
+			"max_hash_collisions: \t%i\n"
+
+			"breads: \t%lu\n"
+			"bread_misses: \t%lu\n"
+
+			"search_by_key: \t%lu\n"
+			"search_by_key_fs_changed: \t%lu\n"
+			"search_by_key_restarted: \t%lu\n"
+			
+			"insert_item_restarted: \t%lu\n"
+			"paste_into_item_restarted: \t%lu\n"
+			"cut_from_item_restarted: \t%lu\n"
+			"delete_solid_item_restarted: \t%lu\n"
+			"delete_item_restarted: \t%lu\n"
+
+			"leaked_oid: \t%lu\n"
+			"leaves_removable: \t%lu\n",
+
+			SF( s_mount_state ) == REISERFS_VALID_FS ?
+			"REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
+			reiserfs_r5_hash( sb ) ? "FORCE_R5 " : "",
+			reiserfs_rupasov_hash( sb ) ? "FORCE_RUPASOV " : "",
+			reiserfs_tea_hash( sb ) ? "FORCE_TEA " : "",
+			reiserfs_hash_detect( sb ) ? "DETECT_HASH " : "",
+			reiserfs_no_border( sb ) ? "NO_BORDER " : "BORDER ",
+			reiserfs_no_unhashed_relocation( sb ) ? "NO_UNHASHED_RELOCATION " : "",
+			reiserfs_hashed_relocation( sb ) ? "UNHASHED_RELOCATION " : "",
+			reiserfs_test4( sb ) ? "TEST4 " : "",
+			have_large_tails( sb ) ? "TAILS " : have_small_tails(sb)?"SMALL_TAILS ":"NO_TAILS ",
+			replay_only( sb ) ? "REPLAY_ONLY " : "",
+			convert_reiserfs( sb ) ? "CONV " : "",
+
+			atomic_read( &r -> s_generation_counter ),
+			SF( s_kmallocs ),
+			SF( s_disk_reads ),
+			SF( s_disk_writes ),
+			SF( s_fix_nodes ),
+			SF( s_do_balance ),
+			SF( s_unneeded_left_neighbor ),
+			SF( s_good_search_by_key_reada ),
+			SF( s_bmaps ),
+			SF( s_bmaps_without_search ),
+			SF( s_direct2indirect ),
+			SF( s_indirect2direct ),
+			SFP( max_hash_collisions ),
+			SFP( breads ),
+			SFP( bread_miss ),
+			SFP( search_by_key ),
+			SFP( search_by_key_fs_changed ),
+			SFP( search_by_key_restarted ),
+
+			SFP( insert_item_restarted ),
+			SFP( paste_into_item_restarted ),
+			SFP( cut_from_item_restarted ),
+			SFP( delete_solid_item_restarted ),
+			SFP( delete_item_restarted ),
+
+			SFP( leaked_oid ),
+			SFP( leaves_removable ) );
+
+	return 0;
+}
+
+static int show_per_level(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *r = REISERFS_SB(sb);
+	int level;
+
+	seq_printf(m,	"level\t"
+			"     balances"
+			" [sbk:  reads"
+			"   fs_changed"
+			"   restarted]"
+			"   free space"
+			"        items"
+			"   can_remove"
+			"         lnum"
+			"         rnum"
+			"       lbytes"
+			"       rbytes"
+			"     get_neig"
+			" get_neig_res"
+			"  need_l_neig"
+			"  need_r_neig"
+			"\n"
+			
+		);
+
+	for( level = 0 ; level < MAX_HEIGHT ; ++ level ) {
+		seq_printf(m,	"%i\t"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12li"
+				" %12li"
+				" %12li"
+				" %12li"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				" %12lu"
+				"\n",
+				level, 
+				SFPL( balance_at ),
+				SFPL( sbk_read_at ),
+				SFPL( sbk_fs_changed ),
+				SFPL( sbk_restarted ),
+				SFPL( free_at ),
+				SFPL( items_at ),
+				SFPL( can_node_be_removed ),
+				SFPL( lnum ),
+				SFPL( rnum ),
+				SFPL( lbytes ),
+				SFPL( rbytes ),
+				SFPL( get_neighbors ),
+				SFPL( get_neighbors_restart ),
+				SFPL( need_l_neighbor ),
+				SFPL( need_r_neighbor )
+			);
+	}
+	return 0;
+}
+
+static int show_bitmap(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *r = REISERFS_SB(sb);
+
+	seq_printf(m,	"free_block: %lu\n"
+			"  scan_bitmap:"
+			"          wait"
+			"          bmap"
+			"         retry"
+			"        stolen"
+			"  journal_hint"
+			"journal_nohint"
+			"\n"
+			" %14lu"
+			" %14lu"
+			" %14lu"
+			" %14lu"
+			" %14lu"
+			" %14lu"
+			" %14lu"
+			"\n",
+			SFP( free_block ),
+			SFPF( call ), 
+			SFPF( wait ), 
+			SFPF( bmap ),
+			SFPF( retry ),
+			SFPF( stolen ),
+			SFPF( in_journal_hint ),
+			SFPF( in_journal_nohint ) );
+
+	return 0;
+}
+
+static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
+	struct reiserfs_super_block *rs = sb_info -> s_rs;
+	int hash_code = DFL( s_hash_function_code );
+	__u32 flags = DJF( s_flags );
+
+	seq_printf(m,	"block_count: \t%i\n"
+			"free_blocks: \t%i\n"
+			"root_block: \t%i\n"
+			"blocksize: \t%i\n"
+			"oid_maxsize: \t%i\n"
+			"oid_cursize: \t%i\n"
+			"umount_state: \t%i\n"
+			"magic: \t%10.10s\n"
+			"fs_state: \t%i\n"
+			"hash: \t%s\n"
+			"tree_height: \t%i\n"
+			"bmap_nr: \t%i\n"
+			"version: \t%i\n"
+			"flags: \t%x[%s]\n"
+			"reserved_for_journal: \t%i\n",
+
+			DFL( s_block_count ),
+			DFL( s_free_blocks ),
+			DFL( s_root_block ),
+			DF( s_blocksize ),
+			DF( s_oid_maxsize ),
+			DF( s_oid_cursize ),
+			DF( s_umount_state ),
+			rs -> s_v1.s_magic,
+			DF( s_fs_state ),
+			hash_code == TEA_HASH ? "tea" :
+			( hash_code == YURA_HASH ) ? "rupasov" :
+			( hash_code == R5_HASH ) ? "r5" :
+			( hash_code == UNSET_HASH ) ? "unset" : "unknown",
+			DF( s_tree_height ),
+			DF( s_bmap_nr ),
+			DF( s_version ),
+			flags,
+			( flags & reiserfs_attrs_cleared )
+			? "attrs_cleared" : "",
+			DF (s_reserved_for_journal));
+
+	return 0;
+}
+
+static int show_oidmap(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
+	struct reiserfs_super_block *rs = sb_info -> s_rs;
+	unsigned int mapsize = le16_to_cpu( rs -> s_v1.s_oid_cursize );
+	unsigned long total_used = 0;
+	int i;
+
+	for( i = 0 ; i < mapsize ; ++i ) {
+		__u32 right;
+
+		right = ( i == mapsize - 1 ) ? MAX_KEY_OBJECTID : MAP( i + 1 );
+		seq_printf(m, "%s: [ %x .. %x )\n",
+				( i & 1 ) ? "free" : "used", MAP( i ), right );
+		if( ! ( i & 1 ) ) {
+			total_used += right - MAP( i );
+		}
+	}
+#if defined( REISERFS_USE_OIDMAPF )
+	if( sb_info -> oidmap.use_file && ( sb_info -> oidmap.mapf != NULL ) ) {
+		loff_t size = sb_info->oidmap.mapf->f_dentry->d_inode->i_size;
+		total_used += size / sizeof( reiserfs_oidinterval_d_t );
+	}
+#endif
+	seq_printf(m, "total: \t%i [%i/%i] used: %lu [exact]\n", 
+			mapsize, 
+			mapsize, le16_to_cpu( rs -> s_v1.s_oid_maxsize ),
+			total_used);
+	return 0;
+}
+
+static int show_journal(struct seq_file *m, struct super_block *sb)
+{
+	struct reiserfs_sb_info *r = REISERFS_SB(sb);
+	struct reiserfs_super_block *rs = r -> s_rs;
+	struct journal_params *jp = &rs->s_v1.s_journal;
+	char b[BDEVNAME_SIZE];
+    
+
+	seq_printf(m,	 /* on-disk fields */
+ 			"jp_journal_1st_block: \t%i\n"
+ 			"jp_journal_dev: \t%s[%x]\n"
+ 			"jp_journal_size: \t%i\n"
+ 			"jp_journal_trans_max: \t%i\n"
+ 			"jp_journal_magic: \t%i\n"
+ 			"jp_journal_max_batch: \t%i\n"
+ 			"jp_journal_max_commit_age: \t%i\n"
+ 			"jp_journal_max_trans_age: \t%i\n"
+			/* incore fields */
+			"j_1st_reserved_block: \t%i\n"	  
+			"j_state: \t%li\n"			
+			"j_trans_id: \t%lu\n"
+			"j_mount_id: \t%lu\n"
+			"j_start: \t%lu\n"
+			"j_len: \t%lu\n"
+			"j_len_alloc: \t%lu\n"
+			"j_wcount: \t%i\n"
+			"j_bcount: \t%lu\n"
+			"j_first_unflushed_offset: \t%lu\n"
+			"j_last_flush_trans_id: \t%lu\n"
+			"j_trans_start_time: \t%li\n"
+			"j_list_bitmap_index: \t%i\n"
+			"j_must_wait: \t%i\n"
+			"j_next_full_flush: \t%i\n"
+			"j_next_async_flush: \t%i\n"
+			"j_cnode_used: \t%i\n"
+			"j_cnode_free: \t%i\n"
+			"\n"
+			/* reiserfs_proc_info_data_t.journal fields */
+			"in_journal: \t%12lu\n"
+			"in_journal_bitmap: \t%12lu\n"
+			"in_journal_reusable: \t%12lu\n"
+			"lock_journal: \t%12lu\n"
+			"lock_journal_wait: \t%12lu\n"
+			"journal_begin: \t%12lu\n"
+			"journal_relock_writers: \t%12lu\n"
+			"journal_relock_wcount: \t%12lu\n"
+			"mark_dirty: \t%12lu\n"
+			"mark_dirty_already: \t%12lu\n"
+			"mark_dirty_notjournal: \t%12lu\n"
+			"restore_prepared: \t%12lu\n"
+			"prepare: \t%12lu\n"
+			"prepare_retry: \t%12lu\n",
+
+                        DJP( jp_journal_1st_block ),
+                        bdevname(SB_JOURNAL(sb)->j_dev_bd, b),
+                        DJP( jp_journal_dev ),
+                        DJP( jp_journal_size ),
+                        DJP( jp_journal_trans_max ),
+                        DJP( jp_journal_magic ),
+                        DJP( jp_journal_max_batch ),
+			SB_JOURNAL(sb)->j_max_commit_age,
+                        DJP( jp_journal_max_trans_age ),
+
+			JF( j_1st_reserved_block ),			
+			JF( j_state ),			
+			JF( j_trans_id ),
+			JF( j_mount_id ),
+			JF( j_start ),
+			JF( j_len ),
+			JF( j_len_alloc ),
+			atomic_read( & r -> s_journal -> j_wcount ),
+			JF( j_bcount ),
+			JF( j_first_unflushed_offset ),
+			JF( j_last_flush_trans_id ),
+			JF( j_trans_start_time ),
+			JF( j_list_bitmap_index ),
+			JF( j_must_wait ),
+			JF( j_next_full_flush ),
+			JF( j_next_async_flush ),
+			JF( j_cnode_used ),
+			JF( j_cnode_free ),
+
+			SFPJ( in_journal ),
+			SFPJ( in_journal_bitmap ),
+			SFPJ( in_journal_reusable ),
+			SFPJ( lock_journal ),
+			SFPJ( lock_journal_wait ),
+			SFPJ( journal_being ),
+			SFPJ( journal_relock_writers ),
+			SFPJ( journal_relock_wcount ),
+			SFPJ( mark_dirty ),
+			SFPJ( mark_dirty_already ),
+			SFPJ( mark_dirty_notjournal ),
+			SFPJ( restore_prepared ),
+			SFPJ( prepare ),
+			SFPJ( prepare_retry )
+		);
+	return 0;
+}
+
+/* iterator */
+static int test_sb(struct super_block *sb, void *data)
+{
+	return data == sb;
+}
+
+static int set_sb(struct super_block *sb, void *data)
+{
+	return -ENOENT;
+}
+
+static void *r_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_dir_entry *de = m->private;
+	struct super_block *s = de->parent->data;
+	loff_t l = *pos;
+
+	if (l)
+		return NULL;
+
+	if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, s)))
+		return NULL;
+
+	up_write(&s->s_umount);
+
+	if (de->deleted) {
+		deactivate_super(s);
+		return NULL;
+	}
+
+	return s;
+}
+
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	if (v)
+		deactivate_super(v);
+	return NULL;
+}
+
+static void r_stop(struct seq_file *m, void *v)
+{
+	if (v)
+		deactivate_super(v);
+}
+
+static int r_show(struct seq_file *m, void *v)
+{
+	struct proc_dir_entry *de = m->private;
+	int (*show)(struct seq_file *, struct super_block *) = de->data;
+	return show(m, v);
+}
+
+static struct seq_operations r_ops = {
+	.start = r_start,
+	.next = r_next,
+	.stop = r_stop,
+	.show = r_show,
+};
+
+static int r_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &r_ops);
+
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		m->private = PDE(inode);
+	}
+	return ret;
+}
+
+static struct file_operations r_file_operations = {
+	.open		= r_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static struct proc_dir_entry *proc_info_root = NULL;
+static const char proc_info_root_name[] = "fs/reiserfs";
+
+static void add_file(struct super_block *sb, char *name,
+	int (*func)(struct seq_file *, struct super_block *))
+{
+	struct proc_dir_entry *de;
+	de = create_proc_entry(name, 0, REISERFS_SB(sb)->procdir);
+	if (de) {
+		de->data = func;
+		de->proc_fops = &r_file_operations;
+	}
+}
+
+int reiserfs_proc_info_init( struct super_block *sb )
+{
+	spin_lock_init( & __PINFO( sb ).lock );
+	REISERFS_SB(sb)->procdir = proc_mkdir(reiserfs_bdevname (sb), proc_info_root);
+	if( REISERFS_SB(sb)->procdir ) {
+		REISERFS_SB(sb)->procdir->owner = THIS_MODULE;
+		REISERFS_SB(sb)->procdir->data = sb;
+		add_file(sb, "version", show_version);
+		add_file(sb, "super", show_super);
+		add_file(sb, "per-level", show_per_level);
+		add_file(sb, "bitmap", show_bitmap);
+		add_file(sb, "on-disk-super", show_on_disk_super);
+		add_file(sb, "oidmap", show_oidmap);
+		add_file(sb, "journal", show_journal);
+		return 0;
+	}
+	reiserfs_warning(sb, "reiserfs: cannot create /proc/%s/%s",
+			 proc_info_root_name, reiserfs_bdevname (sb) );
+	return 1;
+}
+
+int reiserfs_proc_info_done( struct super_block *sb )
+{
+	struct proc_dir_entry *de = REISERFS_SB(sb)->procdir;
+	if (de) {
+		remove_proc_entry("journal", de);
+		remove_proc_entry("oidmap", de);
+		remove_proc_entry("on-disk-super", de);
+		remove_proc_entry("bitmap", de);
+		remove_proc_entry("per-level", de);
+		remove_proc_entry("super", de);
+		remove_proc_entry("version", de);
+	}
+	spin_lock( & __PINFO( sb ).lock );
+	__PINFO( sb ).exiting = 1;
+	spin_unlock( & __PINFO( sb ).lock );
+	if ( proc_info_root ) {
+		remove_proc_entry( reiserfs_bdevname (sb), proc_info_root );
+		REISERFS_SB(sb)->procdir = NULL;
+	}
+	return 0;
+}
+
+struct proc_dir_entry *reiserfs_proc_register_global( char *name, 
+						      read_proc_t *func )
+{
+	return ( proc_info_root ) ? create_proc_read_entry( name, 0, 
+							    proc_info_root, 
+							    func, NULL ) : NULL;
+}
+
+void reiserfs_proc_unregister_global( const char *name )
+{
+	remove_proc_entry( name, proc_info_root );
+}
+
+int reiserfs_proc_info_global_init( void )
+{
+	if( proc_info_root == NULL ) {
+		proc_info_root = proc_mkdir(proc_info_root_name, NULL);
+		if( proc_info_root ) {
+			proc_info_root -> owner = THIS_MODULE;
+		} else {
+			reiserfs_warning (NULL,
+					  "reiserfs: cannot create /proc/%s",
+					  proc_info_root_name );
+			return 1;
+		}
+	}
+	return 0;
+}
+
+int reiserfs_proc_info_global_done( void )
+{
+	if ( proc_info_root != NULL ) {
+		proc_info_root = NULL;
+		remove_proc_entry(proc_info_root_name, NULL);
+	}
+	return 0;
+}
+
+/* REISERFS_PROC_INFO */
+#else
+
+int reiserfs_proc_info_init( struct super_block *sb ) { return 0; }
+int reiserfs_proc_info_done( struct super_block *sb ) { return 0; }
+
+struct proc_dir_entry *reiserfs_proc_register_global( char *name, 
+						      read_proc_t *func )
+{ return NULL; }
+
+void reiserfs_proc_unregister_global( const char *name ) {;}
+
+int reiserfs_proc_info_global_init( void ) { return 0; }
+int reiserfs_proc_info_global_done( void ) { return 0; }
+
+int reiserfs_global_version_in_proc( char *buffer, char **start, 
+				     off_t offset,
+				     int count, int *eof, void *data )
+{ return 0; }
+
+/* REISERFS_PROC_INFO */
+#endif
+
+/*
+ * $Log: procfs.c,v $
+ * Revision 1.1.8.2  2001/07/15 17:08:42  god
+ *  . use get_super() in procfs.c
+ *  . remove remove_save_link() from reiserfs_do_truncate()
+ *
+ * I accept terms and conditions stated in the Legal Agreement
+ * (available at http://www.namesys.com/legalese.html)
+ *
+ * Revision 1.1.8.1  2001/07/11 16:48:50  god
+ * proc info support
+ *
+ * I accept terms and conditions stated in the Legal Agreement
+ * (available at http://www.namesys.com/legalese.html)
+ *
+ */
+
+/* 
+ * Make Linus happy.
+ * Local variables:
+ * c-indentation-style: "K&R"
+ * mode-name: "LC"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
new file mode 100644
index 0000000..1700120
--- /dev/null
+++ b/fs/reiserfs/resize.c
@@ -0,0 +1,182 @@
+/* 
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+ 
+/* 
+ * Written by Alexander Zarochentcev.
+ *
+ * The kernel part of the (on-line) reiserfs resizer.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_fs_sb.h>
+#include <linux/buffer_head.h>
+
+int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
+{
+        int err = 0;
+	struct reiserfs_super_block * sb;
+        struct reiserfs_bitmap_info *bitmap;
+	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
+	struct buffer_head * bh;
+	struct reiserfs_transaction_handle th;
+	unsigned int bmap_nr_new, bmap_nr;
+	unsigned int block_r_new, block_r;
+	
+	struct reiserfs_list_bitmap * jb;
+	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
+	
+	unsigned long int block_count, free_blocks;
+	int i;
+	int copy_size ;
+
+	sb = SB_DISK_SUPER_BLOCK(s);
+
+	if (SB_BLOCK_COUNT(s) >= block_count_new) {
+		printk("can\'t shrink filesystem on-line\n");
+		return -EINVAL;
+	}
+
+	/* check the device size */
+	bh = sb_bread(s, block_count_new - 1);
+	if (!bh) {
+		printk("reiserfs_resize: can\'t read last block\n");
+		return -EINVAL;
+	}	
+	bforget(bh);
+
+	/* old disk layout detection; those partitions can be mounted, but
+	 * cannot be resized */
+	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
+		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
+		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
+		return -ENOTSUPP;
+	}
+       
+	/* count used bits in last bitmap block */
+	block_r = SB_BLOCK_COUNT(s) -
+	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
+	
+	/* count bitmap blocks in new fs */
+	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
+	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
+	if (block_r_new) 
+		bmap_nr_new++;
+	else
+		block_r_new = s->s_blocksize * 8;
+
+	/* save old values */
+	block_count = SB_BLOCK_COUNT(s);
+	bmap_nr     = SB_BMAP_NR(s);
+
+	/* resizing of reiserfs bitmaps (journal and real), if needed */
+	if (bmap_nr_new > bmap_nr) {	    
+	    /* reallocate journal bitmaps */
+	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
+		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
+		unlock_super(s) ;
+		return -ENOMEM ;
+	    }
+	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
+	    ** node pointers from the old journal bitmap structs, and then
+	    ** transfer the new data structures into the journal struct.
+	    **
+	    ** using the copy_size var below allows this code to work for
+	    ** both shrinking and expanding the FS.
+	    */
+	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
+	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
+	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+		struct reiserfs_bitmap_node **node_tmp ;
+		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
+		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;
+
+		/* just in case vfree schedules on us, copy the new
+		** pointer into the journal struct before freeing the 
+		** old one
+		*/
+		node_tmp = jb->bitmaps ;
+		jb->bitmaps = jbitmap[i].bitmaps ;
+		vfree(node_tmp) ;
+	    }	
+	
+	    /* allocate additional bitmap blocks, reallocate array of bitmap
+	     * block pointers */
+	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
+	    if (!bitmap) {
+		/* Journal bitmaps are still supersized, but the memory isn't
+		 * leaked, so I guess it's ok */
+		printk("reiserfs_resize: unable to allocate memory.\n");
+		return -ENOMEM;
+	    }
+	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
+	    for (i = 0; i < bmap_nr; i++)
+		bitmap[i] = old_bitmap[i];
+
+	    /* This doesn't go through the journal, but it doesn't have to.
+	     * The changes are still atomic: We're synced up when the journal
+	     * transaction begins, and the new bitmaps don't matter if the
+	     * transaction fails. */
+	    for (i = bmap_nr; i < bmap_nr_new; i++) {
+		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
+		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
+		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);
+
+		set_buffer_uptodate(bitmap[i].bh);
+		mark_buffer_dirty(bitmap[i].bh) ;
+		sync_dirty_buffer(bitmap[i].bh);
+		// update bitmap_info stuff
+		bitmap[i].first_zero_hint=1;
+		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
+	    }	
+	    /* free old bitmap blocks array */
+	    SB_AP_BITMAP(s) = bitmap;
+	    vfree (old_bitmap);
+	}
+	
+	/* begin transaction, if there was an error, it's fine. Yes, we have
+	 * incorrect bitmaps now, but none of it is ever going to touch the
+	 * disk anyway. */
+	err = journal_begin(&th, s, 10);
+	if (err)
+	    return err;
+
+	/* correct last bitmap blocks in old and new disk layout */
+	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
+	for (i = block_r; i < s->s_blocksize * 8; i++)
+	    reiserfs_test_and_clear_le_bit(i, 
+					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
+	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
+	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
+	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;
+
+	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);
+
+	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
+	for (i = block_r_new; i < s->s_blocksize * 8; i++)
+	    reiserfs_test_and_set_le_bit(i,
+					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
+	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
+ 
+	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
+	/* Extreme case where last bitmap is the only valid block in itself. */
+	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
+	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
+ 	/* update super */
+	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+	free_blocks = SB_FREE_BLOCKS(s);
+	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
+	PUT_SB_BLOCK_COUNT(s, block_count_new);
+	PUT_SB_BMAP_NR(s, bmap_nr_new);
+	s->s_dirt = 1;
+
+	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+	
+	SB_JOURNAL(s)->j_must_wait = 1;
+	return journal_end(&th, s, 10);
+}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
new file mode 100644
index 0000000..73ec521
--- /dev/null
+++ b/fs/reiserfs/stree.c
@@ -0,0 +1,2073 @@
+/*
+ *  Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/*
+ *  Written by Anatoly P. Pinchuk pap@namesys.botik.ru
+ *  Programm System Institute
+ *  Pereslavl-Zalessky Russia
+ */
+
+/*
+ *  This file contains functions dealing with S+tree
+ *
+ * B_IS_IN_TREE
+ * copy_item_head
+ * comp_short_keys
+ * comp_keys
+ * comp_short_le_keys
+ * le_key2cpu_key
+ * comp_le_keys
+ * bin_search
+ * get_lkey
+ * get_rkey
+ * key_in_buffer
+ * decrement_bcount
+ * decrement_counters_in_path
+ * reiserfs_check_path
+ * pathrelse_and_restore
+ * pathrelse
+ * search_by_key_reada
+ * search_by_key
+ * search_for_position_by_key
+ * comp_items
+ * prepare_for_direct_item
+ * prepare_for_direntry_item
+ * prepare_for_delete_or_cut
+ * calc_deleted_bytes_number
+ * init_tb_struct
+ * padd_item
+ * reiserfs_delete_item
+ * reiserfs_delete_solid_item
+ * reiserfs_delete_object
+ * maybe_indirect_to_direct
+ * indirect_to_direct_roll_back
+ * reiserfs_cut_from_item
+ * truncate_directory
+ * reiserfs_do_truncate
+ * reiserfs_paste_into_item
+ * reiserfs_insert_item
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/quotaops.h>
+
+/* Does the buffer contain a disk block which is in the tree. */
+inline int B_IS_IN_TREE (const struct buffer_head * p_s_bh)
+{
+
+  RFALSE( B_LEVEL (p_s_bh) > MAX_HEIGHT,
+	  "PAP-1010: block (%b) has too big level (%z)", p_s_bh, p_s_bh);
+
+  return ( B_LEVEL (p_s_bh) != FREE_LEVEL );
+}
+
+//
+// to gets item head in le form
+//
+inline void copy_item_head(struct item_head * p_v_to, 
+			   const struct item_head * p_v_from)
+{
+  memcpy (p_v_to, p_v_from, IH_SIZE);
+}
+
+
+/* k1 is pointer to on-disk structure which is stored in little-endian
+   form. k2 is pointer to cpu variable. For key of items of the same
+   object this returns 0.
+   Returns: -1 if key1 < key2 
+   0 if key1 == key2
+   1 if key1 > key2 */
+inline int  comp_short_keys (const struct reiserfs_key * le_key,
+			     const struct cpu_key * cpu_key)
+{
+  __u32 * p_s_le_u32, * p_s_cpu_u32;
+  int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+  p_s_le_u32 = (__u32 *)le_key;
+  p_s_cpu_u32 = (__u32 *)&cpu_key->on_disk_key;
+  for( ; n_key_length--; ++p_s_le_u32, ++p_s_cpu_u32 ) {
+    if ( le32_to_cpu (*p_s_le_u32) < *p_s_cpu_u32 )
+      return -1;
+    if ( le32_to_cpu (*p_s_le_u32) > *p_s_cpu_u32 )
+      return 1;
+  }
+
+  return 0;
+}
+
+
+/* k1 is pointer to on-disk structure which is stored in little-endian
+   form. k2 is pointer to cpu variable.
+   Compare keys using all 4 key fields.
+   Returns: -1 if key1 < key2 0
+   if key1 = key2 1 if key1 > key2 */
+static inline int  comp_keys (const struct reiserfs_key * le_key, const struct cpu_key * cpu_key)
+{
+  int retval;
+
+  retval = comp_short_keys (le_key, cpu_key);
+  if (retval)
+      return retval;
+  if (le_key_k_offset (le_key_version(le_key), le_key) < cpu_key_k_offset (cpu_key))
+      return -1;
+  if (le_key_k_offset (le_key_version(le_key), le_key) > cpu_key_k_offset (cpu_key))
+      return 1;
+
+  if (cpu_key->key_length == 3)
+      return 0;
+
+  /* this part is needed only when tail conversion is in progress */
+  if (le_key_k_type (le_key_version(le_key), le_key) < cpu_key_k_type (cpu_key))
+    return -1;
+
+  if (le_key_k_type (le_key_version(le_key), le_key) > cpu_key_k_type (cpu_key))
+    return 1;
+
+  return 0;
+}
+
+
+inline int comp_short_le_keys (const struct reiserfs_key * key1, const struct reiserfs_key * key2)
+{
+  __u32 * p_s_1_u32, * p_s_2_u32;
+  int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+  p_s_1_u32 = (__u32 *)key1;
+  p_s_2_u32 = (__u32 *)key2;
+  for( ; n_key_length--; ++p_s_1_u32, ++p_s_2_u32 ) {
+    if ( le32_to_cpu (*p_s_1_u32) < le32_to_cpu (*p_s_2_u32) )
+      return -1;
+    if ( le32_to_cpu (*p_s_1_u32) > le32_to_cpu (*p_s_2_u32) )
+      return 1;
+  }
+  return 0;
+}
+
+inline void le_key2cpu_key (struct cpu_key * to, const struct reiserfs_key * from)
+{
+    to->on_disk_key.k_dir_id = le32_to_cpu (from->k_dir_id);
+    to->on_disk_key.k_objectid = le32_to_cpu (from->k_objectid);
+    
+    // find out version of the key
+    to->version = le_key_version (from);
+    if (to->version == KEY_FORMAT_3_5) {
+	to->on_disk_key.u.k_offset_v1.k_offset = le32_to_cpu (from->u.k_offset_v1.k_offset);
+	to->on_disk_key.u.k_offset_v1.k_uniqueness = le32_to_cpu (from->u.k_offset_v1.k_uniqueness);
+    } else {
+	to->on_disk_key.u.k_offset_v2.k_offset = offset_v2_k_offset(&from->u.k_offset_v2);
+	to->on_disk_key.u.k_offset_v2.k_type = offset_v2_k_type(&from->u.k_offset_v2);
+    } 
+}
+
+
+
+// this does not say which one is bigger, it only returns 1 if keys
+// are not equal, 0 otherwise
+inline int comp_le_keys (const struct reiserfs_key * k1, const struct reiserfs_key * k2)
+{
+    return memcmp (k1, k2, sizeof (struct reiserfs_key));
+}
+
+/**************************************************************************
+ *  Binary search toolkit function                                        *
+ *  Search for an item in the array by the item key                       *
+ *  Returns:    1 if found,  0 if not found;                              *
+ *        *p_n_pos = number of the searched element if found, else the    *
+ *        number of the first element that is larger than p_v_key.        *
+ **************************************************************************/
+/* For those not familiar with binary search: n_lbound is the leftmost item that it
+ could be, n_rbound the rightmost item that it could be.  We examine the item
+ halfway between n_lbound and n_rbound, and that tells us either that we can increase
+ n_lbound, or decrease n_rbound, or that we have found it, or if n_lbound <= n_rbound that
+ there are no possible items, and we have not found it. With each examination we
+ cut the number of possible items it could be by one more than half rounded down,
+ or we find it. */
+static inline	int bin_search (
+              const void * p_v_key, /* Key to search for.                   */
+	      const void * p_v_base,/* First item in the array.             */
+	      int       p_n_num,    /* Number of items in the array.        */
+	      int       p_n_width,  /* Item size in the array.
+				       searched. Lest the reader be
+				       confused, note that this is crafted
+				       as a general function, and when it
+				       is applied specifically to the array
+				       of item headers in a node, p_n_width
+				       is actually the item header size not
+				       the item size.                      */
+	      int     * p_n_pos     /* Number of the searched for element. */
+            ) {
+    int   n_rbound, n_lbound, n_j;
+
+   for ( n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0))/2; n_lbound <= n_rbound; n_j = (n_rbound + n_lbound)/2 )
+     switch( comp_keys((struct reiserfs_key *)((char * )p_v_base + n_j * p_n_width), (struct cpu_key *)p_v_key) )  {
+     case -1: n_lbound = n_j + 1; continue;
+     case  1: n_rbound = n_j - 1; continue;
+     case  0: *p_n_pos = n_j;     return ITEM_FOUND; /* Key found in the array.  */
+        }
+
+    /* bin_search did not find given key, it returns position of key,
+        that is minimal and greater than the given one. */
+    *p_n_pos = n_lbound;
+    return ITEM_NOT_FOUND;
+}
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+
+
+/* Minimal possible key. It is never in the tree. */
+const struct reiserfs_key  MIN_KEY = {0, 0, {{0, 0},}};
+
+/* Maximal possible key. It is never in the tree. */
+const struct reiserfs_key  MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}};
+
+
+/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
+   of the path, and going upwards.  We must check the path's validity at each step.  If the key is not in
+   the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
+   case we return a special key, either MIN_KEY or MAX_KEY. */
+static inline	const struct  reiserfs_key * get_lkey  (
+	                const struct path         * p_s_chk_path,
+                        const struct super_block  * p_s_sb
+                      ) {
+  int                   n_position, n_path_offset = p_s_chk_path->path_length;
+  struct buffer_head  * p_s_parent;
+  
+  RFALSE( n_path_offset < FIRST_PATH_ELEMENT_OFFSET, 
+	  "PAP-5010: invalid offset in the path");
+
+  /* While not higher in path than first element. */
+  while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
+
+    RFALSE( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
+	    "PAP-5020: parent is not uptodate");
+
+    /* Parent at the path is not in the tree now. */
+    if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+      return &MAX_KEY;
+    /* Check whether position in the parent is correct. */
+    if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
+       return &MAX_KEY;
+    /* Check whether parent at the path really points to the child. */
+    if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+	 PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
+      return &MAX_KEY;
+    /* Return delimiting key if position in the parent is not equal to zero. */
+    if ( n_position )
+      return B_N_PDELIM_KEY(p_s_parent, n_position - 1);
+  }
+  /* Return MIN_KEY if we are in the root of the buffer tree. */
+  if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+       SB_ROOT_BLOCK (p_s_sb) )
+    return &MIN_KEY;
+  return  &MAX_KEY;
+}
+
+
+/* Get delimiting key of the buffer at the path and its right neighbor. */
+inline	const struct  reiserfs_key * get_rkey  (
+	                const struct path         * p_s_chk_path,
+                        const struct super_block  * p_s_sb
+                      ) {
+  int                   n_position,
+    			n_path_offset = p_s_chk_path->path_length;
+  struct buffer_head  * p_s_parent;
+
+  RFALSE( n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
+	  "PAP-5030: invalid offset in the path");
+
+  while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
+
+    RFALSE( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
+	    "PAP-5040: parent is not uptodate");
+
+    /* Parent at the path is not in the tree now. */
+    if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+      return &MIN_KEY;
+    /* Check whether position in the parent is correct. */
+    if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
+      return &MIN_KEY;
+    /* Check whether parent at the path really points to the child. */
+    if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+                                        PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
+      return &MIN_KEY;
+    /* Return delimiting key if position in the parent is not the last one. */
+    if ( n_position != B_NR_ITEMS(p_s_parent) )
+      return B_N_PDELIM_KEY(p_s_parent, n_position);
+  }
+  /* Return MAX_KEY if we are in the root of the buffer tree. */
+  if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+       SB_ROOT_BLOCK (p_s_sb) )
+    return &MAX_KEY;
+  return  &MIN_KEY;
+}
+
+
+/* Check whether a key is contained in the tree rooted from a buffer at a path. */
+/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
+   the path.  These delimiting keys are stored at least one level above that buffer in the tree. If the
+   buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
+   this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
+static  inline  int key_in_buffer (
+                      struct path         * p_s_chk_path, /* Path which should be checked.  */
+                      const struct cpu_key      * p_s_key,      /* Key which should be checked.   */
+                      struct super_block  * p_s_sb        /* Super block pointer.           */
+		      ) {
+
+  RFALSE( ! p_s_key || p_s_chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET ||
+	  p_s_chk_path->path_length > MAX_HEIGHT,
+	  "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)",
+	  p_s_key, p_s_chk_path->path_length);
+  RFALSE( !PATH_PLAST_BUFFER(p_s_chk_path)->b_bdev,
+	  "PAP-5060: device must not be NODEV");
+
+  if ( comp_keys(get_lkey(p_s_chk_path, p_s_sb), p_s_key) == 1 )
+    /* left delimiting key is bigger, that the key we look for */
+    return 0;
+  //  if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, p_s_sb)) != -1 )
+  if ( comp_keys(get_rkey(p_s_chk_path, p_s_sb), p_s_key) != 1 )
+    /* p_s_key must be less than right delimitiing key */
+    return 0;
+  return 1;
+}
+
+
+inline void decrement_bcount(
+              struct buffer_head  * p_s_bh
+            ) { 
+  if ( p_s_bh ) {
+    if ( atomic_read (&(p_s_bh->b_count)) ) {
+      put_bh(p_s_bh) ;
+      return;
+    }
+    reiserfs_panic(NULL, "PAP-5070: decrement_bcount: trying to free free buffer %b", p_s_bh);
+  }
+}
+
+
+/* Decrement b_count field of the all buffers in the path. */
+void decrement_counters_in_path (
+              struct path * p_s_search_path
+            ) {
+  int n_path_offset = p_s_search_path->path_length;
+
+  RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET ||
+	  n_path_offset > EXTENDED_MAX_HEIGHT - 1,
+	  "PAP-5080: invalid path offset of %d", n_path_offset);
+
+  while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
+    struct buffer_head * bh;
+
+    bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
+    decrement_bcount (bh);
+  }
+  p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+
+int reiserfs_check_path(struct path *p) {
+  RFALSE( p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
+	  "path not properly relsed") ;
+  return 0 ;
+}
+
+
+/* Release all buffers in the path. Restore dirty bits clean
+** when preparing the buffer for the log
+**
+** only called from fix_nodes()
+*/
+void  pathrelse_and_restore (
+	struct super_block *s, 
+        struct path * p_s_search_path
+      ) {
+  int n_path_offset = p_s_search_path->path_length;
+
+  RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, 
+	  "clm-4000: invalid path offset");
+  
+  while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET )  {
+    reiserfs_restore_prepared_buffer(s, PATH_OFFSET_PBUFFER(p_s_search_path, 
+                                     n_path_offset));
+    brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
+  }
+  p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+/* Release all buffers in the path. */
+void  pathrelse (
+        struct path * p_s_search_path
+      ) {
+  int n_path_offset = p_s_search_path->path_length;
+
+  RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
+	  "PAP-5090: invalid path offset");
+  
+  while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET )  
+    brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
+
+  p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+
+
+static int is_leaf (char * buf, int blocksize, struct buffer_head * bh)
+{
+    struct block_head * blkh;
+    struct item_head * ih;
+    int used_space;
+    int prev_location;
+    int i;
+    int nr;
+
+    blkh = (struct block_head *)buf;
+    if ( blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) {
+	reiserfs_warning (NULL, "is_leaf: this should be caught earlier");
+	return 0;
+    }
+
+    nr = blkh_nr_item(blkh);
+    if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
+	/* item number is too big or too small */
+	reiserfs_warning (NULL, "is_leaf: nr_item seems wrong: %z", bh);
+	return 0;
+    }
+    ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
+    used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location (ih));
+    if (used_space != blocksize - blkh_free_space(blkh)) {
+	/* free space does not match to calculated amount of use space */
+	reiserfs_warning (NULL, "is_leaf: free space seems wrong: %z", bh);
+	return 0;
+    }
+
+    // FIXME: it is_leaf will hit performance too much - we may have
+    // return 1 here
+
+    /* check tables of item heads */
+    ih = (struct item_head *)(buf + BLKH_SIZE);
+    prev_location = blocksize;
+    for (i = 0; i < nr; i ++, ih ++) {
+	if ( le_ih_k_type(ih) == TYPE_ANY) {
+	    reiserfs_warning (NULL, "is_leaf: wrong item type for item %h",ih);
+	    return 0;
+	}
+	if (ih_location (ih) >= blocksize || ih_location (ih) < IH_SIZE * nr) {
+	    reiserfs_warning (NULL, "is_leaf: item location seems wrong: %h", ih);
+	    return 0;
+	}
+	if (ih_item_len (ih) < 1 || ih_item_len (ih) > MAX_ITEM_LEN (blocksize)) {
+	    reiserfs_warning (NULL, "is_leaf: item length seems wrong: %h", ih);
+	    return 0;
+	}
+	if (prev_location - ih_location (ih) != ih_item_len (ih)) {
+	    reiserfs_warning (NULL, "is_leaf: item location seems wrong (second one): %h", ih);
+	    return 0;
+	}
+	prev_location = ih_location (ih);
+    }
+
+    // one may imagine much more checks
+    return 1;
+}
+
+
+/* returns 1 if buf looks like an internal node, 0 otherwise */
+static int is_internal (char * buf, int blocksize, struct buffer_head * bh)
+{
+    struct block_head * blkh;
+    int nr;
+    int used_space;
+
+    blkh = (struct block_head *)buf;
+    nr = blkh_level(blkh);
+    if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) {
+	/* this level is not possible for internal nodes */
+	reiserfs_warning (NULL, "is_internal: this should be caught earlier");
+	return 0;
+    }
+    
+    nr = blkh_nr_item(blkh);
+    if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
+	/* for internal which is not root we might check min number of keys */
+	reiserfs_warning (NULL, "is_internal: number of key seems wrong: %z", bh);
+	return 0;
+    }
+
+    used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
+    if (used_space != blocksize - blkh_free_space(blkh)) {
+	reiserfs_warning (NULL, "is_internal: free space seems wrong: %z", bh);
+	return 0;
+    }
+
+    // one may imagine much more checks
+    return 1;
+}
+
+
+// make sure that bh contains formatted node of reiserfs tree of
+// 'level'-th level
+static int is_tree_node (struct buffer_head * bh, int level)
+{
+    if (B_LEVEL (bh) != level) {
+	reiserfs_warning (NULL, "is_tree_node: node level %d does not match to the expected one %d",
+		B_LEVEL (bh), level);
+	return 0;
+    }
+    if (level == DISK_LEAF_NODE_LEVEL)
+	return is_leaf (bh->b_data, bh->b_size, bh);
+
+    return is_internal (bh->b_data, bh->b_size, bh);
+}
+
+
+
+#define SEARCH_BY_KEY_READA 16
+
+/* The function is NOT SCHEDULE-SAFE! */
+static void search_by_key_reada (struct super_block * s,
+                                 struct buffer_head **bh,
+				 unsigned long *b, int num)
+{
+    int i,j;
+  
+    for (i = 0 ; i < num ; i++) {
+	bh[i] = sb_getblk (s, b[i]);
+    }
+    for (j = 0 ; j < i ; j++) {
+	/*
+	 * note, this needs attention if we are getting rid of the BKL
+	 * you have to make sure the prepared bit isn't set on this buffer
+	 */
+	if (!buffer_uptodate(bh[j]))
+	    ll_rw_block(READA, 1, bh + j);
+    	brelse(bh[j]);
+    }
+}
+
+/**************************************************************************
+ * Algorithm   SearchByKey                                                *
+ *             look for item in the Disk S+Tree by its key                *
+ * Input:  p_s_sb   -  super block                                        *
+ *         p_s_key  - pointer to the key to search                        *
+ * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR                         *
+ *         p_s_search_path - path from the root to the needed leaf        *
+ **************************************************************************/
+
+/* This function fills up the path from the root to the leaf as it
+   descends the tree looking for the key.  It uses reiserfs_bread to
+   try to find buffers in the cache given their block number.  If it
+   does not find them in the cache it reads them from disk.  For each
+   node search_by_key finds using reiserfs_bread it then uses
+   bin_search to look through that node.  bin_search will find the
+   position of the block_number of the next node if it is looking
+   through an internal node.  If it is looking through a leaf node
+   bin_search will find the position of the item which has key either
+   equal to given key, or which is the maximal key less than the given
+   key.  search_by_key returns a path that must be checked for the
+   correctness of the top of the path but need not be checked for the
+   correctness of the bottom of the path */
+/* The function is NOT SCHEDULE-SAFE! */
+int search_by_key (struct super_block * p_s_sb,
+		   const struct cpu_key * p_s_key, /* Key to search. */
+		   struct path * p_s_search_path, /* This structure was
+						     allocated and initialized
+						     by the calling
+						     function. It is filled up
+						     by this function.  */
+		   int n_stop_level /* How far down the tree to search. To
+                                       stop at leaf level - set to
+                                       DISK_LEAF_NODE_LEVEL */
+    ) {
+    int  n_block_number;
+    int  expected_level;
+    struct buffer_head  *       p_s_bh;
+    struct path_element *       p_s_last_element;
+    int				n_node_level, n_retval;
+    int 			right_neighbor_of_leaf_node;
+    int				fs_gen;
+    struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
+    unsigned long      reada_blocks[SEARCH_BY_KEY_READA];
+    int reada_count = 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+    int n_repeat_counter = 0;
+#endif
+    
+    PROC_INFO_INC( p_s_sb, search_by_key );
+    
+    /* As we add each node to a path we increase its count.  This means that
+       we must be careful to release all nodes in a path before we either
+       discard the path struct or re-use the path struct, as we do here. */
+
+    decrement_counters_in_path(p_s_search_path);
+
+    right_neighbor_of_leaf_node = 0;
+
+    /* With each iteration of this loop we search through the items in the
+       current node, and calculate the next current node(next path element)
+       for the next iteration of this loop.. */
+    n_block_number = SB_ROOT_BLOCK (p_s_sb);
+    expected_level = -1;
+    while ( 1 ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+	if ( !(++n_repeat_counter % 50000) )
+	    reiserfs_warning (p_s_sb, "PAP-5100: search_by_key: %s:"
+			      "there were %d iterations of while loop "
+			      "looking for key %K",
+			      current->comm, n_repeat_counter, p_s_key);
+#endif
+
+	/* prep path to have another element added to it. */
+	p_s_last_element = PATH_OFFSET_PELEMENT(p_s_search_path, ++p_s_search_path->path_length);
+	fs_gen = get_generation (p_s_sb);
+
+	/* Read the next tree node, and set the last element in the path to
+           have a pointer to it. */
+	if ((p_s_bh = p_s_last_element->pe_buffer =
+	     sb_getblk(p_s_sb, n_block_number)) ) {
+	    if (!buffer_uptodate(p_s_bh) && reada_count > 1) {
+		search_by_key_reada (p_s_sb, reada_bh,
+		                     reada_blocks, reada_count);
+	    }
+	    ll_rw_block(READ, 1, &p_s_bh);
+	    wait_on_buffer(p_s_bh);
+	    if (!buffer_uptodate(p_s_bh))
+	        goto io_error;
+	} else {
+io_error:
+	    p_s_search_path->path_length --;
+	    pathrelse(p_s_search_path);
+	    return IO_ERROR;
+	}
+	reada_count = 0;
+	if (expected_level == -1)
+		expected_level = SB_TREE_HEIGHT (p_s_sb);
+	expected_level --;
+
+	/* It is possible that schedule occurred. We must check whether the key
+	   to search is still in the tree rooted from the current buffer. If
+	   not then repeat search from the root. */
+	if ( fs_changed (fs_gen, p_s_sb) && 
+	    (!B_IS_IN_TREE (p_s_bh) ||
+	     B_LEVEL(p_s_bh) != expected_level ||
+	     !key_in_buffer(p_s_search_path, p_s_key, p_s_sb))) {
+	    PROC_INFO_INC( p_s_sb, search_by_key_fs_changed );
+	    PROC_INFO_INC( p_s_sb, search_by_key_restarted );
+	    PROC_INFO_INC( p_s_sb, sbk_restarted[ expected_level - 1 ] );
+	    decrement_counters_in_path(p_s_search_path);
+	    
+	    /* Get the root block number so that we can repeat the search
+	       starting from the root. */
+	    n_block_number = SB_ROOT_BLOCK (p_s_sb);
+	    expected_level = -1;
+	    right_neighbor_of_leaf_node = 0;
+	    
+	    /* repeat search from the root */
+	    continue;
+	}
+
+        /* only check that the key is in the buffer if p_s_key is not
+           equal to the MAX_KEY. Latter case is only possible in
+           "finish_unfinished()" processing during mount. */
+        RFALSE( comp_keys( &MAX_KEY, p_s_key ) &&
+                ! key_in_buffer(p_s_search_path, p_s_key, p_s_sb),
+		"PAP-5130: key is not in the buffer");
+#ifdef CONFIG_REISERFS_CHECK
+	if ( cur_tb ) {
+	    print_cur_tb ("5140");
+	    reiserfs_panic(p_s_sb, "PAP-5140: search_by_key: schedule occurred in do_balance!");
+	}
+#endif
+
+	// make sure, that the node contents look like a node of
+	// certain level
+	if (!is_tree_node (p_s_bh, expected_level)) {
+	    reiserfs_warning (p_s_sb, "vs-5150: search_by_key: "
+			      "invalid format found in block %ld. Fsck?",
+			      p_s_bh->b_blocknr);
+	    pathrelse (p_s_search_path);
+	    return IO_ERROR;
+	}
+	
+	/* ok, we have acquired next formatted node in the tree */
+	n_node_level = B_LEVEL (p_s_bh);
+
+	PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level - 1 );
+
+	RFALSE( n_node_level < n_stop_level,
+		"vs-5152: tree level (%d) is less than stop level (%d)",
+		n_node_level, n_stop_level);
+
+	n_retval = bin_search( p_s_key, B_N_PITEM_HEAD(p_s_bh, 0),
+                B_NR_ITEMS(p_s_bh),
+                ( n_node_level == DISK_LEAF_NODE_LEVEL ) ? IH_SIZE : KEY_SIZE,
+                &(p_s_last_element->pe_position));
+	if (n_node_level == n_stop_level) {
+	    return n_retval;
+	}
+
+	/* we are not in the stop level */
+	if (n_retval == ITEM_FOUND)
+	    /* item has been found, so we choose the pointer which is to the right of the found one */
+	    p_s_last_element->pe_position++;
+
+	/* if item was not found we choose the position which is to
+	   the left of the found item. This requires no code,
+	   bin_search did it already.*/
+
+	/* So we have chosen a position in the current node which is
+	   an internal node.  Now we calculate child block number by
+	   position in the node. */
+	n_block_number = B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position);
+
+	/* if we are going to read leaf nodes, try for read ahead as well */
+	if ((p_s_search_path->reada & PATH_READA) &&
+	    n_node_level == DISK_LEAF_NODE_LEVEL + 1)
+	{
+	    int pos = p_s_last_element->pe_position;
+	    int limit = B_NR_ITEMS(p_s_bh);
+	    struct reiserfs_key *le_key;
+
+	    if (p_s_search_path->reada & PATH_READA_BACK)
+		limit = 0;
+	    while(reada_count < SEARCH_BY_KEY_READA) {
+		if (pos == limit)
+		    break;
+	        reada_blocks[reada_count++] = B_N_CHILD_NUM(p_s_bh, pos);
+		if (p_s_search_path->reada & PATH_READA_BACK)
+		    pos--;
+		else
+		    pos++;
+
+		/*
+		 * check to make sure we're in the same object
+		 */
+		le_key = B_N_PDELIM_KEY(p_s_bh, pos);
+		if (le32_to_cpu(le_key->k_objectid) !=
+		    p_s_key->on_disk_key.k_objectid)
+		{
+		    break;
+		}
+	    }
+        }
+    }
+}
+
+
+/* Form the path to an item and position in this item which contains
+   file byte defined by p_s_key. If there is no such item
+   corresponding to the key, we point the path to the item with
+   maximal key less than p_s_key, and *p_n_pos_in_item is set to one
+   past the last entry/byte in the item.  If searching for entry in a
+   directory item, and it is not found, *p_n_pos_in_item is set to one
+   entry more than the entry with maximal key which is less than the
+   sought key.
+
+   Note that if there is no entry in this same node which is one more,
+   then we point to an imaginary entry.  for direct items, the
+   position is in units of bytes, for indirect items the position is
+   in units of blocknr entries, for directory items the position is in
+   units of directory entries.  */
+
+/* The function is NOT SCHEDULE-SAFE! */
+int search_for_position_by_key (struct super_block  * p_s_sb,         /* Pointer to the super block.          */
+				const struct cpu_key  * p_cpu_key,      /* Key to search (cpu variable)         */
+				struct path         * p_s_search_path /* Filled up by this function.          */
+    ) {
+    struct item_head    * p_le_ih; /* pointer to on-disk structure */
+    int                   n_blk_size;
+    loff_t item_offset, offset;
+    struct reiserfs_dir_entry de;
+    int retval;
+
+    /* If searching for directory entry. */
+    if ( is_direntry_cpu_key (p_cpu_key) )
+	return  search_by_entry_key (p_s_sb, p_cpu_key, p_s_search_path, &de);
+
+    /* If not searching for directory entry. */
+    
+    /* If item is found. */
+    retval = search_item (p_s_sb, p_cpu_key, p_s_search_path);
+    if (retval == IO_ERROR)
+	return retval;
+    if ( retval == ITEM_FOUND )  {
+
+	RFALSE( ! ih_item_len(
+                B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path),
+			       PATH_LAST_POSITION(p_s_search_path))),
+	        "PAP-5165: item length equals zero");
+
+	pos_in_item(p_s_search_path) = 0;
+	return POSITION_FOUND;
+    }
+
+    RFALSE( ! PATH_LAST_POSITION(p_s_search_path),
+	    "PAP-5170: position equals zero");
+
+    /* Item is not found. Set path to the previous item. */
+    p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path), --PATH_LAST_POSITION(p_s_search_path));
+    n_blk_size = p_s_sb->s_blocksize;
+
+    if (comp_short_keys (&(p_le_ih->ih_key), p_cpu_key)) {
+	return FILE_NOT_FOUND;
+    }
+
+    // FIXME: quite ugly this far
+
+    item_offset = le_ih_k_offset (p_le_ih);
+    offset = cpu_key_k_offset (p_cpu_key);
+
+    /* Needed byte is contained in the item pointed to by the path.*/
+    if (item_offset <= offset &&
+	item_offset + op_bytes_number (p_le_ih, n_blk_size) > offset) {
+	pos_in_item (p_s_search_path) = offset - item_offset;
+	if ( is_indirect_le_ih(p_le_ih) ) {
+	    pos_in_item (p_s_search_path) /= n_blk_size;
+	}
+	return POSITION_FOUND;
+    }
+
+    /* Needed byte is not contained in the item pointed to by the
+     path. Set pos_in_item out of the item. */
+    if ( is_indirect_le_ih (p_le_ih) )
+	pos_in_item (p_s_search_path) = ih_item_len(p_le_ih) / UNFM_P_SIZE;
+    else
+        pos_in_item (p_s_search_path) = ih_item_len( p_le_ih );
+  
+    return POSITION_NOT_FOUND;
+}
+
+
+/* Compare given item and item pointed to by the path. */
+int comp_items (const struct item_head * stored_ih, const struct path * p_s_path)
+{
+    struct buffer_head  * p_s_bh;
+    struct item_head    * ih;
+
+    /* Last buffer at the path is not in the tree. */
+    if ( ! B_IS_IN_TREE(p_s_bh = PATH_PLAST_BUFFER(p_s_path)) )
+	return 1;
+
+    /* Last path position is invalid. */
+    if ( PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(p_s_bh) )
+	return 1;
+
+    /* we need only to know, whether it is the same item */
+    ih = get_ih (p_s_path);
+    return memcmp (stored_ih, ih, IH_SIZE);
+}
+
+
+/* unformatted nodes are not logged anymore, ever.  This is safe
+** now
+*/
+#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
+
+// block can not be forgotten as it is in I/O or held by someone
+#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
+
+
+
+// prepare for delete or cut of direct item
+static inline int prepare_for_direct_item (struct path * path,
+					   struct item_head * le_ih,
+					   struct inode * inode,
+					   loff_t new_file_length,
+					   int * cut_size)
+{
+    loff_t round_len;
+
+
+    if ( new_file_length == max_reiserfs_offset (inode) ) {
+	/* item has to be deleted */
+	*cut_size = -(IH_SIZE + ih_item_len(le_ih));
+	return M_DELETE;
+    }
+	
+    // new file gets truncated
+    if (get_inode_item_key_version (inode) == KEY_FORMAT_3_6) {
+	// 
+	round_len = ROUND_UP (new_file_length); 
+	/* this was n_new_file_length < le_ih ... */
+	if ( round_len < le_ih_k_offset (le_ih) )  {
+	    *cut_size = -(IH_SIZE + ih_item_len(le_ih));
+	    return M_DELETE; /* Delete this item. */
+	}
+	/* Calculate first position and size for cutting from item. */
+	pos_in_item (path) = round_len - (le_ih_k_offset (le_ih) - 1);
+	*cut_size = -(ih_item_len(le_ih) - pos_in_item(path));
+	
+	return M_CUT; /* Cut from this item. */
+    }
+
+
+    // old file: items may have any length
+
+    if ( new_file_length < le_ih_k_offset (le_ih) )  {
+	*cut_size = -(IH_SIZE + ih_item_len(le_ih));
+	return M_DELETE; /* Delete this item. */
+    }
+    /* Calculate first position and size for cutting from item. */
+    *cut_size = -(ih_item_len(le_ih) -
+		      (pos_in_item (path) = new_file_length + 1 - le_ih_k_offset (le_ih)));
+    return M_CUT; /* Cut from this item. */
+}
+
+
+static inline int prepare_for_direntry_item (struct path * path,
+					     struct item_head * le_ih,
+					     struct inode * inode,
+					     loff_t new_file_length,
+					     int * cut_size)
+{
+    if (le_ih_k_offset (le_ih) == DOT_OFFSET && 
+	new_file_length == max_reiserfs_offset (inode)) {
+	RFALSE( ih_entry_count (le_ih) != 2,
+	        "PAP-5220: incorrect empty directory item (%h)", le_ih);
+	*cut_size = -(IH_SIZE + ih_item_len(le_ih));
+	return M_DELETE; /* Delete the directory item containing "." and ".." entry. */
+    }
+    
+    if ( ih_entry_count (le_ih) == 1 )  {
+	/* Delete the directory item such as there is one record only
+	   in this item*/
+	*cut_size = -(IH_SIZE + ih_item_len(le_ih));
+	return M_DELETE;
+    }
+    
+    /* Cut one record from the directory item. */
+    *cut_size = -(DEH_SIZE + entry_length (get_last_bh (path), le_ih, pos_in_item (path)));
+    return M_CUT; 
+}
+
+
+/*  If the path points to a directory or direct item, calculate mode and the size cut, for balance.
+    If the path points to an indirect item, remove some number of its unformatted nodes.
+    In case of file truncate calculate whether this item must be deleted/truncated or last
+    unformatted node of this item will be converted to a direct item.
+    This function returns a determination of what balance mode the calling function should employ. */
+static char  prepare_for_delete_or_cut(
+				       struct reiserfs_transaction_handle *th, 
+				       struct inode * inode,
+				       struct path         * p_s_path,
+				       const struct cpu_key      * p_s_item_key,
+				       int                 * p_n_removed,      /* Number of unformatted nodes which were removed
+										  from end of the file. */
+				       int                 * p_n_cut_size,
+				       unsigned long long    n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
+    ) {
+    struct super_block  * p_s_sb = inode->i_sb;
+    struct item_head    * p_le_ih = PATH_PITEM_HEAD(p_s_path);
+    struct buffer_head  * p_s_bh = PATH_PLAST_BUFFER(p_s_path);
+
+    BUG_ON (!th->t_trans_id);
+
+    /* Stat_data item. */
+    if ( is_statdata_le_ih (p_le_ih) ) {
+
+	RFALSE( n_new_file_length != max_reiserfs_offset (inode),
+		"PAP-5210: mode must be M_DELETE");
+
+	*p_n_cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
+	return M_DELETE;
+    }
+
+
+    /* Directory item. */
+    if ( is_direntry_le_ih (p_le_ih) )
+	return prepare_for_direntry_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
+
+    /* Direct item. */
+    if ( is_direct_le_ih (p_le_ih) )
+	return prepare_for_direct_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
+
+
+    /* Case of an indirect item. */
+    {
+	int                   n_unfm_number,    /* Number of the item unformatted nodes. */
+	    n_counter,
+	    n_blk_size;
+	__u32               * p_n_unfm_pointer; /* Pointer to the unformatted node number. */
+	__u32 tmp;
+	struct item_head      s_ih;           /* Item header. */
+	char                  c_mode;           /* Returned mode of the balance. */
+	int need_research;
+
+
+	n_blk_size = p_s_sb->s_blocksize;
+
+	/* Search for the needed object indirect item until there are no unformatted nodes to be removed. */
+	do  {
+	    need_research = 0;
+            p_s_bh = PATH_PLAST_BUFFER(p_s_path);
+	    /* Copy indirect item header to a temp variable. */
+	    copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+	    /* Calculate number of unformatted nodes in this item. */
+	    n_unfm_number = I_UNFM_NUM(&s_ih);
+
+	    RFALSE( ! is_indirect_le_ih(&s_ih) || ! n_unfm_number ||
+		    pos_in_item (p_s_path) + 1 !=  n_unfm_number,
+		    "PAP-5240: invalid item %h "
+		    "n_unfm_number = %d *p_n_pos_in_item = %d", 
+		    &s_ih, n_unfm_number, pos_in_item (p_s_path));
+
+	    /* Calculate balance mode and position in the item to remove unformatted nodes. */
+	    if ( n_new_file_length == max_reiserfs_offset (inode) ) {/* Case of delete. */
+		pos_in_item (p_s_path) = 0;
+		*p_n_cut_size = -(IH_SIZE + ih_item_len(&s_ih));
+		c_mode = M_DELETE;
+	    }
+	    else  { /* Case of truncate. */
+		if ( n_new_file_length < le_ih_k_offset (&s_ih) )  {
+		    pos_in_item (p_s_path) = 0;
+		    *p_n_cut_size = -(IH_SIZE + ih_item_len(&s_ih));
+		    c_mode = M_DELETE; /* Delete this item. */
+		}
+		else  {
+		    /* indirect item must be truncated starting from *p_n_pos_in_item-th position */
+		    pos_in_item (p_s_path) = (n_new_file_length + n_blk_size - le_ih_k_offset (&s_ih) ) >> p_s_sb->s_blocksize_bits;
+
+		    RFALSE( pos_in_item (p_s_path) > n_unfm_number,
+			    "PAP-5250: invalid position in the item");
+
+		    /* Either convert last unformatted node of indirect item to direct item or increase
+		       its free space.  */
+		    if ( pos_in_item (p_s_path) == n_unfm_number )  {
+			*p_n_cut_size = 0; /* Nothing to cut. */
+			return M_CONVERT; /* Maybe convert last unformatted node to the direct item. */
+		    }
+		    /* Calculate size to cut. */
+		    *p_n_cut_size = -(ih_item_len(&s_ih) - pos_in_item(p_s_path) * UNFM_P_SIZE);
+
+		    c_mode = M_CUT;     /* Cut from this indirect item. */
+		}
+	    }
+
+	    RFALSE( n_unfm_number <= pos_in_item (p_s_path),
+		    "PAP-5260: invalid position in the indirect item");
+
+	    /* pointers to be cut */
+	    n_unfm_number -= pos_in_item (p_s_path);
+	    /* Set pointer to the last unformatted node pointer that is to be cut. */
+	    p_n_unfm_pointer = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1 - *p_n_removed;
+
+
+	    /* We go through the unformatted nodes pointers of the indirect
+	       item and look for the unformatted nodes in the cache. If we
+	       found some of them we free it, zero corresponding indirect item
+	       entry and log buffer containing that indirect item. For this we
+	       need to prepare last path element for logging. If some
+	       unformatted node has b_count > 1 we must not free this
+	       unformatted node since it is in use. */
+	    reiserfs_prepare_for_journal(p_s_sb, p_s_bh, 1);
+	    // note: path could be changed, first line in for loop takes care
+	    // of it
+
+	    for (n_counter = *p_n_removed;
+		 n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
+
+		cond_resched();
+		if (item_moved (&s_ih, p_s_path)) {
+		    need_research = 1 ;
+		    break;
+		}
+		RFALSE( p_n_unfm_pointer < (__u32 *)B_I_PITEM(p_s_bh, &s_ih) ||
+			p_n_unfm_pointer > (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1,
+			"vs-5265: pointer out of range");
+
+		/* Hole, nothing to remove. */
+		if ( ! get_block_num(p_n_unfm_pointer,0) )  {
+			(*p_n_removed)++;
+			continue;
+		}
+
+		(*p_n_removed)++;
+
+		tmp = get_block_num(p_n_unfm_pointer,0);
+		put_block_num(p_n_unfm_pointer, 0, 0);
+		journal_mark_dirty (th, p_s_sb, p_s_bh);
+		reiserfs_free_block(th, inode, tmp, 1);
+		if ( item_moved (&s_ih, p_s_path) )  {
+			need_research = 1;
+			break ;
+		}
+	    }
+
+	    /* a trick.  If the buffer has been logged, this
+	    ** will do nothing.  If we've broken the loop without
+	    ** logging it, it will restore the buffer
+	    **
+	    */
+	    reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh);
+
+	    /* This loop can be optimized. */
+	} while ( (*p_n_removed < n_unfm_number || need_research) &&
+		  search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND );
+
+	RFALSE( *p_n_removed < n_unfm_number, 
+		"PAP-5310: indirect item is not found");
+	RFALSE( item_moved (&s_ih, p_s_path), 
+		"after while, comp failed, retry") ;
+
+	if (c_mode == M_CUT)
+	    pos_in_item (p_s_path) *= UNFM_P_SIZE;
+	return c_mode;
+    }
+}
+
+/* Calculate number of bytes which will be deleted or cut during balance */
+static int calc_deleted_bytes_number(
+    struct  tree_balance  * p_s_tb,
+    char                    c_mode
+    ) {
+    int                     n_del_size;
+    struct  item_head     * p_le_ih = PATH_PITEM_HEAD(p_s_tb->tb_path);
+
+    if ( is_statdata_le_ih (p_le_ih) )
+	return 0;
+
+    n_del_size = ( c_mode == M_DELETE ) ? ih_item_len(p_le_ih) : -p_s_tb->insert_size[0];
+    if ( is_direntry_le_ih (p_le_ih) ) {
+	// return EMPTY_DIR_SIZE; /* We delete emty directoris only. */
+	// we can't use EMPTY_DIR_SIZE, as old format dirs have a different
+	// empty size.  ick. FIXME, is this right?
+	//
+	return n_del_size ;
+    }
+
+    if ( is_indirect_le_ih (p_le_ih) )
+	n_del_size = (n_del_size/UNFM_P_SIZE)*
+	  (PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_size);// - get_ih_free_space (p_le_ih);
+    return n_del_size;
+}
+
+static void init_tb_struct(
+    struct reiserfs_transaction_handle *th,
+    struct tree_balance * p_s_tb,
+    struct super_block  * p_s_sb,
+    struct path         * p_s_path,
+    int                   n_size
+    ) {
+
+    BUG_ON (!th->t_trans_id);
+
+    memset (p_s_tb,'\0',sizeof(struct tree_balance));
+    p_s_tb->transaction_handle = th ;
+    p_s_tb->tb_sb = p_s_sb;
+    p_s_tb->tb_path = p_s_path;
+    PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+    PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+    p_s_tb->insert_size[0] = n_size;
+}
+
+
+
+void padd_item (char * item, int total_length, int length)
+{
+    int i;
+
+    for (i = total_length; i > length; )
+	item [--i] = 0;
+}
+
+#ifdef REISERQUOTA_DEBUG
+char key2type(struct reiserfs_key *ih)
+{
+  if (is_direntry_le_key(2, ih))
+    return 'd';
+  if (is_direct_le_key(2, ih))
+    return 'D';
+  if (is_indirect_le_key(2, ih))
+    return 'i';
+  if (is_statdata_le_key(2, ih))
+    return 's';
+  return 'u';
+}
+
+char head2type(struct item_head *ih)
+{
+  if (is_direntry_le_ih(ih))
+    return 'd';
+  if (is_direct_le_ih(ih))
+    return 'D';
+  if (is_indirect_le_ih(ih))
+    return 'i';
+  if (is_statdata_le_ih(ih))
+    return 's';
+  return 'u';
+}
+#endif
+
+/* Delete object item. */
+int reiserfs_delete_item (struct reiserfs_transaction_handle *th, 
+			  struct path * p_s_path, /* Path to the deleted item. */
+			  const struct cpu_key * p_s_item_key, /* Key to search for the deleted item.  */
+			  struct inode * p_s_inode,/* inode is here just to update i_blocks and quotas */
+			  struct buffer_head  * p_s_un_bh)    /* NULL or unformatted node pointer.    */
+{
+    struct super_block * p_s_sb = p_s_inode->i_sb;
+    struct tree_balance   s_del_balance;
+    struct item_head      s_ih;
+    struct item_head      *q_ih;
+    int			  quota_cut_bytes;
+    int                   n_ret_value,
+	n_del_size,
+	n_removed;
+
+#ifdef CONFIG_REISERFS_CHECK
+    char                  c_mode;
+    int			n_iter = 0;
+#endif
+
+    BUG_ON (!th->t_trans_id);
+
+    init_tb_struct(th, &s_del_balance, p_s_sb, p_s_path, 0/*size is unknown*/);
+
+    while ( 1 ) {
+	n_removed = 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+	n_iter++;
+	c_mode =
+#endif
+	    prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed, &n_del_size, max_reiserfs_offset (p_s_inode));
+
+	RFALSE( c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
+
+	copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+	s_del_balance.insert_size[0] = n_del_size;
+
+	n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
+	if ( n_ret_value != REPEAT_SEARCH )
+	    break;
+
+	PROC_INFO_INC( p_s_sb, delete_item_restarted );
+
+	// file system changed, repeat search
+	n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
+	if (n_ret_value == IO_ERROR)
+	    break;
+	if (n_ret_value == FILE_NOT_FOUND) {
+	    reiserfs_warning (p_s_sb, "vs-5340: reiserfs_delete_item: "
+			      "no items of the file %K found", p_s_item_key);
+	    break;
+	}
+    } /* while (1) */
+
+    if ( n_ret_value != CARRY_ON ) {
+	unfix_nodes(&s_del_balance);
+	return 0;
+    }
+
+    // reiserfs_delete_item returns item length when success
+    n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
+    q_ih = get_ih(p_s_path) ;
+    quota_cut_bytes = ih_item_len(q_ih) ;
+
+    /* hack so the quota code doesn't have to guess if the file
+    ** has a tail.  On tail insert, we allocate quota for 1 unformatted node.
+    ** We test the offset because the tail might have been
+    ** split into multiple items, and we only want to decrement for
+    ** the unfm node once
+    */
+    if (!S_ISLNK (p_s_inode->i_mode) && is_direct_le_ih(q_ih)) {
+        if ((le_ih_k_offset(q_ih) & (p_s_sb->s_blocksize - 1)) == 1) {
+            quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE;
+        } else {
+	    quota_cut_bytes = 0 ;
+	}
+    }
+
+    if ( p_s_un_bh )  {
+	int off;
+        char *data ;
+
+	/* We are in direct2indirect conversion, so move tail contents
+           to the unformatted node */
+	/* note, we do the copy before preparing the buffer because we
+	** don't care about the contents of the unformatted node yet.
+	** the only thing we really care about is the direct item's data
+	** is in the unformatted node.
+	**
+	** Otherwise, we would have to call reiserfs_prepare_for_journal on
+	** the unformatted node, which might schedule, meaning we'd have to
+	** loop all the way back up to the start of the while loop.
+	**
+	** The unformatted node must be dirtied later on.  We can't be
+	** sure here if the entire tail has been deleted yet.
+        **
+        ** p_s_un_bh is from the page cache (all unformatted nodes are
+        ** from the page cache) and might be a highmem page.  So, we
+        ** can't use p_s_un_bh->b_data.
+	** -clm
+	*/
+
+        data = kmap_atomic(p_s_un_bh->b_page, KM_USER0);
+	off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+	memcpy(data + off,
+	       B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value);
+	kunmap_atomic(data, KM_USER0);
+    }
+    /* Perform balancing after all resources have been collected at once. */ 
+    do_balance(&s_del_balance, NULL, NULL, M_DELETE);
+
+#ifdef REISERQUOTA_DEBUG
+    reiserfs_debug (p_s_sb, REISERFS_DEBUG_CODE, "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
+#endif
+    DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+
+    /* Return deleted body length */
+    return n_ret_value;
+}
+
+
+/* Summary Of Mechanisms For Handling Collisions Between Processes:
+
+ deletion of the body of the object is performed by iput(), with the
+ result that if multiple processes are operating on a file, the
+ deletion of the body of the file is deferred until the last process
+ that has an open inode performs its iput().
+
+ writes and truncates are protected from collisions by use of
+ semaphores.
+
+ creates, linking, and mknod are protected from collisions with other
+ processes by making the reiserfs_add_entry() the last step in the
+ creation, and then rolling back all changes if there was a collision.
+ - Hans
+*/
+
+
+/* this deletes item which never gets split */
+void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
+				 struct inode *inode,
+				 struct reiserfs_key * key)
+{
+    struct tree_balance tb;
+    INITIALIZE_PATH (path);
+    int item_len = 0;
+    int tb_init = 0 ;
+    struct cpu_key cpu_key;
+    int retval;
+    int quota_cut_bytes = 0;
+
+    BUG_ON (!th->t_trans_id);
+    
+    le_key2cpu_key (&cpu_key, key);
+    
+    while (1) {
+	retval = search_item (th->t_super, &cpu_key, &path);
+	if (retval == IO_ERROR) {
+	    reiserfs_warning (th->t_super,
+			      "vs-5350: reiserfs_delete_solid_item: "
+			      "i/o failure occurred trying to delete %K",
+			      &cpu_key);
+	    break;
+	}
+	if (retval != ITEM_FOUND) {
+	    pathrelse (&path);
+	    // No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir
+	    if ( !( (unsigned long long) GET_HASH_VALUE (le_key_k_offset (le_key_version (key), key)) == 0 && \
+		 (unsigned long long) GET_GENERATION_NUMBER (le_key_k_offset (le_key_version (key), key)) == 1 ) )
+		reiserfs_warning (th->t_super, "vs-5355: reiserfs_delete_solid_item: %k not found", key);
+	    break;
+	}
+	if (!tb_init) {
+	    tb_init = 1 ;
+	    item_len = ih_item_len( PATH_PITEM_HEAD(&path) );
+	    init_tb_struct (th, &tb, th->t_super, &path, - (IH_SIZE + item_len));
+	}
+	quota_cut_bytes = ih_item_len(PATH_PITEM_HEAD(&path)) ;
+
+	retval = fix_nodes (M_DELETE, &tb, NULL, NULL);
+	if (retval == REPEAT_SEARCH) {
+	    PROC_INFO_INC( th -> t_super, delete_solid_item_restarted );
+	    continue;
+	}
+
+	if (retval == CARRY_ON) {
+	    do_balance (&tb, NULL, NULL, M_DELETE);
+	    if (inode) {	/* Should we count quota for item? (we don't count quotas for save-links) */
+#ifdef REISERQUOTA_DEBUG
+		reiserfs_debug (th->t_super, REISERFS_DEBUG_CODE, "reiserquota delete_solid_item(): freeing %u id=%u type=%c", quota_cut_bytes, inode->i_uid, key2type(key));
+#endif
+		DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes);
+	    }
+	    break;
+	}
+
+	// IO_ERROR, NO_DISK_SPACE, etc
+	reiserfs_warning (th->t_super, "vs-5360: reiserfs_delete_solid_item: "
+			  "could not delete %K due to fix_nodes failure", &cpu_key);
+	unfix_nodes (&tb);
+	break;
+    }
+
+    reiserfs_check_path(&path) ;
+}
+
+
+int reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * inode)
+{
+    int err;
+    inode->i_size = 0;
+    BUG_ON (!th->t_trans_id);
+
+    /* for directory this deletes item containing "." and ".." */
+    err = reiserfs_do_truncate (th, inode, NULL, 0/*no timestamp updates*/);
+    if (err)
+        return err;
+    
+#if defined( USE_INODE_GENERATION_COUNTER )
+    if( !old_format_only ( th -> t_super ) )
+      {
+       __u32 *inode_generation;
+       
+       inode_generation = 
+         &REISERFS_SB(th -> t_super) -> s_rs -> s_inode_generation;
+       *inode_generation = cpu_to_le32( le32_to_cpu( *inode_generation ) + 1 );
+      }
+/* USE_INODE_GENERATION_COUNTER */
+#endif
+    reiserfs_delete_solid_item (th, inode, INODE_PKEY (inode));
+
+    return err;
+}
+
+static void
+unmap_buffers(struct page *page, loff_t pos) {
+    struct buffer_head *bh ;
+    struct buffer_head *head ;
+    struct buffer_head *next ;
+    unsigned long tail_index ;
+    unsigned long cur_index ;
+
+    if (page) {
+	if (page_has_buffers(page)) {
+	    tail_index = pos & (PAGE_CACHE_SIZE - 1) ;
+	    cur_index = 0 ;
+	    head = page_buffers(page) ;
+	    bh = head ;
+	    do {
+		next = bh->b_this_page ;
+
+		/* we want to unmap the buffers that contain the tail, and
+		** all the buffers after it (since the tail must be at the
+		** end of the file).  We don't want to unmap file data
+		** before the tail, since it might be dirty and waiting to
+		** reach disk
+		*/
+		cur_index += bh->b_size ;
+		if (cur_index > tail_index) {
+		    reiserfs_unmap_buffer(bh) ;
+		}
+		bh = next ;
+	    } while (bh != head) ;
+	    if ( PAGE_SIZE == bh->b_size ) {
+		clear_page_dirty(page);
+	    }
+	}
+    }
+}
+
+static int maybe_indirect_to_direct (struct reiserfs_transaction_handle *th, 
+			      struct inode * p_s_inode,
+			      struct page *page, 
+			      struct path         * p_s_path,
+			      const struct cpu_key      * p_s_item_key,
+			      loff_t         n_new_file_size,
+			      char                * p_c_mode
+			      ) {
+    struct super_block * p_s_sb = p_s_inode->i_sb;
+    int n_block_size = p_s_sb->s_blocksize;
+    int cut_bytes;
+    BUG_ON (!th->t_trans_id);
+
+    if (n_new_file_size != p_s_inode->i_size)
+	BUG ();
+
+    /* the page being sent in could be NULL if there was an i/o error
+    ** reading in the last block.  The user will hit problems trying to
+    ** read the file, but for now we just skip the indirect2direct
+    */
+    if (atomic_read(&p_s_inode->i_count) > 1 || 
+        !tail_has_to_be_packed (p_s_inode) || 
+	!page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
+	// leave tail in an unformatted node	
+	*p_c_mode = M_SKIP_BALANCING;
+	cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1));
+	pathrelse(p_s_path);
+	return cut_bytes;
+    }
+    /* Permorm the conversion to a direct_item. */
+    /*return indirect_to_direct (p_s_inode, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);*/
+    return indirect2direct (th, p_s_inode, page, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);
+}
+
+
+/* we did indirect_to_direct conversion. And we have inserted direct
+   item successesfully, but there were no disk space to cut unfm
+   pointer being converted. Therefore we have to delete inserted
+   direct item(s) */
+static void indirect_to_direct_roll_back (struct reiserfs_transaction_handle *th, struct inode * inode, struct path * path)
+{
+    struct cpu_key tail_key;
+    int tail_len;
+    int removed;
+    BUG_ON (!th->t_trans_id);
+
+    make_cpu_key (&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);// !!!!
+    tail_key.key_length = 4;
+
+    tail_len = (cpu_key_k_offset (&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1;
+    while (tail_len) {
+	/* look for the last byte of the tail */
+	if (search_for_position_by_key (inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND)
+	    reiserfs_panic (inode->i_sb, "vs-5615: indirect_to_direct_roll_back: found invalid item");
+	RFALSE( path->pos_in_item != ih_item_len(PATH_PITEM_HEAD (path)) - 1,
+	        "vs-5616: appended bytes found");
+	PATH_LAST_POSITION (path) --;
+	
+	removed = reiserfs_delete_item (th, path, &tail_key, inode, NULL/*unbh not needed*/);
+	RFALSE( removed <= 0 || removed > tail_len,
+	        "vs-5617: there was tail %d bytes, removed item length %d bytes",
+                tail_len, removed);
+	tail_len -= removed;
+	set_cpu_key_k_offset (&tail_key, cpu_key_k_offset (&tail_key) - removed);
+    }
+    reiserfs_warning (inode->i_sb, "indirect_to_direct_roll_back: indirect_to_direct conversion has been rolled back due to lack of disk space");
+    //mark_file_without_tail (inode);
+    mark_inode_dirty (inode);
+}
+
+
+/* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
+int reiserfs_cut_from_item (struct reiserfs_transaction_handle *th, 
+			    struct path * p_s_path,
+			    struct cpu_key * p_s_item_key,
+			    struct inode * p_s_inode,
+			    struct page *page, 
+			    loff_t n_new_file_size)
+{
+    struct super_block * p_s_sb = p_s_inode->i_sb;
+    /* Every function which is going to call do_balance must first
+       create a tree_balance structure.  Then it must fill up this
+       structure by using the init_tb_struct and fix_nodes functions.
+       After that we can make tree balancing. */
+    struct tree_balance s_cut_balance;
+    struct item_head *p_le_ih;
+    int n_cut_size = 0,        /* Amount to be cut. */
+	n_ret_value = CARRY_ON,
+	n_removed = 0,     /* Number of the removed unformatted nodes. */
+	n_is_inode_locked = 0;
+    char                c_mode;            /* Mode of the balance. */
+    int retval2 = -1;
+    int quota_cut_bytes;
+    loff_t tail_pos = 0;
+
+    BUG_ON (!th->t_trans_id);
+    
+    init_tb_struct(th, &s_cut_balance, p_s_inode->i_sb, p_s_path, n_cut_size);
+
+
+    /* Repeat this loop until we either cut the item without needing
+       to balance, or we fix_nodes without schedule occurring */
+    while ( 1 ) {
+	/* Determine the balance mode, position of the first byte to
+	   be cut, and size to be cut.  In case of the indirect item
+	   free unformatted nodes which are pointed to by the cut
+	   pointers. */
+      
+	c_mode = prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed, 
+					   &n_cut_size, n_new_file_size);
+	if ( c_mode == M_CONVERT )  {
+	    /* convert last unformatted node to direct item or leave
+               tail in the unformatted node */
+	    RFALSE( n_ret_value != CARRY_ON, "PAP-5570: can not convert twice");
+
+	    n_ret_value = maybe_indirect_to_direct (th, p_s_inode, page, p_s_path, p_s_item_key,
+						    n_new_file_size, &c_mode);
+	    if ( c_mode == M_SKIP_BALANCING )
+		/* tail has been left in the unformatted node */
+		return n_ret_value;
+
+	    n_is_inode_locked = 1;
+	  
+	    /* removing of last unformatted node will change value we
+               have to return to truncate. Save it */
+	    retval2 = n_ret_value;
+	    /*retval2 = p_s_sb->s_blocksize - (n_new_file_size & (p_s_sb->s_blocksize - 1));*/
+	  
+	    /* So, we have performed the first part of the conversion:
+	       inserting the new direct item.  Now we are removing the
+	       last unformatted node pointer. Set key to search for
+	       it. */
+      	    set_cpu_key_k_type (p_s_item_key, TYPE_INDIRECT);
+	    p_s_item_key->key_length = 4;
+	    n_new_file_size -= (n_new_file_size & (p_s_sb->s_blocksize - 1));
+	    tail_pos = n_new_file_size;
+	    set_cpu_key_k_offset (p_s_item_key, n_new_file_size + 1);
+	    if ( search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND ){
+		print_block (PATH_PLAST_BUFFER (p_s_path), 3, PATH_LAST_POSITION (p_s_path) - 1, PATH_LAST_POSITION (p_s_path) + 1);
+		reiserfs_panic(p_s_sb, "PAP-5580: reiserfs_cut_from_item: item to convert does not exist (%K)", p_s_item_key);
+	    }
+	    continue;
+	}
+	if (n_cut_size == 0) {
+	    pathrelse (p_s_path);
+	    return 0;
+	}
+
+	s_cut_balance.insert_size[0] = n_cut_size;
+	
+	n_ret_value = fix_nodes(c_mode, &s_cut_balance, NULL, NULL);
+      	if ( n_ret_value != REPEAT_SEARCH )
+	    break;
+	
+	PROC_INFO_INC( p_s_sb, cut_from_item_restarted );
+
+	n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
+	if (n_ret_value == POSITION_FOUND)
+	    continue;
+
+	reiserfs_warning (p_s_sb, "PAP-5610: reiserfs_cut_from_item: item %K not found", p_s_item_key);
+	unfix_nodes (&s_cut_balance);
+	return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
+    } /* while */
+  
+    // check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
+    if ( n_ret_value != CARRY_ON ) {
+	if ( n_is_inode_locked ) {
+	    // FIXME: this seems to be not needed: we are always able
+	    // to cut item
+	    indirect_to_direct_roll_back (th, p_s_inode, p_s_path);
+	}
+	if (n_ret_value == NO_DISK_SPACE)
+	    reiserfs_warning (p_s_sb, "NO_DISK_SPACE");
+	unfix_nodes (&s_cut_balance);
+	return -EIO;
+    }
+
+    /* go ahead and perform balancing */
+    
+    RFALSE( c_mode == M_PASTE || c_mode == M_INSERT, "invalid mode");
+
+    /* Calculate number of bytes that need to be cut from the item. */
+    quota_cut_bytes = ( c_mode == M_DELETE ) ? ih_item_len(get_ih(p_s_path)) : -s_cut_balance.insert_size[0];
+    if (retval2 == -1)
+	n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode);
+    else
+	n_ret_value = retval2;
+
+
+    /* For direct items, we only change the quota when deleting the last
+    ** item.
+    */
+    p_le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
+    if (!S_ISLNK (p_s_inode->i_mode) && is_direct_le_ih(p_le_ih)) {
+        if (c_mode == M_DELETE &&
+	   (le_ih_k_offset (p_le_ih) & (p_s_sb->s_blocksize - 1)) == 1 ) {
+	    // FIXME: this is to keep 3.5 happy
+	    REISERFS_I(p_s_inode)->i_first_direct_byte = U32_MAX;
+	    quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE ;
+        } else {
+	    quota_cut_bytes = 0 ;
+	}
+    }
+#ifdef CONFIG_REISERFS_CHECK
+    if (n_is_inode_locked) {
+	struct item_head * le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
+	/* we are going to complete indirect2direct conversion. Make
+           sure, that we exactly remove last unformatted node pointer
+           of the item */
+	if (!is_indirect_le_ih (le_ih))
+	    reiserfs_panic (p_s_sb, "vs-5652: reiserfs_cut_from_item: "
+			    "item must be indirect %h", le_ih);
+
+	if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
+	    reiserfs_panic (p_s_sb, "vs-5653: reiserfs_cut_from_item: "
+			    "completing indirect2direct conversion indirect item %h "
+			    "being deleted must be of 4 byte long", le_ih);
+
+	if (c_mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
+	    reiserfs_panic (p_s_sb, "vs-5654: reiserfs_cut_from_item: "
+			    "can not complete indirect2direct conversion of %h (CUT, insert_size==%d)",
+			    le_ih, s_cut_balance.insert_size[0]);
+	}
+	/* it would be useful to make sure, that right neighboring
+           item is direct item of this file */
+    }
+#endif
+    
+    do_balance(&s_cut_balance, NULL, NULL, c_mode);
+    if ( n_is_inode_locked ) {
+	/* we've done an indirect->direct conversion.  when the data block
+	** was freed, it was removed from the list of blocks that must
+	** be flushed before the transaction commits, make sure to
+	** unmap and invalidate it
+	*/
+	unmap_buffers(page, tail_pos);
+	REISERFS_I(p_s_inode)->i_flags &= ~i_pack_on_close_mask ;
+    }
+#ifdef REISERQUOTA_DEBUG
+    reiserfs_debug (p_s_inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota cut_from_item(): freeing %u id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, '?');
+#endif
+    DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+    return n_ret_value;
+}
+
+static void truncate_directory (struct reiserfs_transaction_handle *th, struct inode * inode)
+{
+    BUG_ON (!th->t_trans_id);
+    if (inode->i_nlink)
+	reiserfs_warning (inode->i_sb,
+			  "vs-5655: truncate_directory: link count != 0");
+
+    set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), DOT_OFFSET);
+    set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_DIRENTRY);
+    reiserfs_delete_solid_item (th, inode, INODE_PKEY (inode));
+    reiserfs_update_sd(th, inode) ;
+    set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), SD_OFFSET);
+    set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_STAT_DATA);    
+}
+
+
+
+
+/* Truncate file to the new size. Note, this must be called with a transaction
+   already started */
+int reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
+			   struct  inode * p_s_inode, /* ->i_size contains new
+                                                         size */
+			   struct page *page, /* up to date for last block */
+			   int update_timestamps  /* when it is called by
+						     file_release to convert
+						     the tail - no timestamps
+						     should be updated */
+    ) {
+    INITIALIZE_PATH (s_search_path);       /* Path to the current object item. */
+    struct item_head    * p_le_ih;         /* Pointer to an item header. */
+    struct cpu_key      s_item_key;     /* Key to search for a previous file item. */
+    loff_t         n_file_size,    /* Old file size. */
+	n_new_file_size;/* New file size. */
+    int                   n_deleted;      /* Number of deleted or truncated bytes. */
+    int retval;
+    int err = 0;
+
+    BUG_ON (!th->t_trans_id);
+    if ( ! (S_ISREG(p_s_inode->i_mode) || S_ISDIR(p_s_inode->i_mode) || S_ISLNK(p_s_inode->i_mode)) )
+	return 0;
+
+    if (S_ISDIR(p_s_inode->i_mode)) {
+	// deletion of directory - no need to update timestamps
+	truncate_directory (th, p_s_inode);
+	return 0;
+    }
+
+    /* Get new file size. */
+    n_new_file_size = p_s_inode->i_size;
+
+    // FIXME: note, that key type is unimportant here
+    make_cpu_key (&s_item_key, p_s_inode, max_reiserfs_offset (p_s_inode), TYPE_DIRECT, 3);
+
+    retval = search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path);
+    if (retval == IO_ERROR) {
+	reiserfs_warning (p_s_inode->i_sb, "vs-5657: reiserfs_do_truncate: "
+			  "i/o failure occurred trying to truncate %K", &s_item_key);
+        err = -EIO;
+        goto out;
+    }
+    if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
+	reiserfs_warning (p_s_inode->i_sb, "PAP-5660: reiserfs_do_truncate: "
+			  "wrong result %d of search for %K", retval, &s_item_key);
+
+        err = -EIO;
+        goto out;
+    }
+
+    s_search_path.pos_in_item --;
+
+    /* Get real file size (total length of all file items) */
+    p_le_ih = PATH_PITEM_HEAD(&s_search_path);
+    if ( is_statdata_le_ih (p_le_ih) )
+	n_file_size = 0;
+    else {
+	loff_t offset = le_ih_k_offset (p_le_ih);
+	int bytes = op_bytes_number (p_le_ih,p_s_inode->i_sb->s_blocksize);
+
+	/* this may mismatch with real file size: if last direct item
+           had no padding zeros and last unformatted node had no free
+           space, this file would have this file size */
+	n_file_size = offset + bytes - 1;
+    }
+    /*
+     * are we doing a full truncate or delete, if so
+     * kick in the reada code
+     */
+    if (n_new_file_size == 0)
+        s_search_path.reada = PATH_READA | PATH_READA_BACK;
+
+    if ( n_file_size == 0 || n_file_size < n_new_file_size ) {
+	goto update_and_out ;
+    }
+
+    /* Update key to search for the last file item. */
+    set_cpu_key_k_offset (&s_item_key, n_file_size);
+
+    do  {
+	/* Cut or delete file item. */
+	n_deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, p_s_inode,  page, n_new_file_size);
+	if (n_deleted < 0) {
+	    reiserfs_warning (p_s_inode->i_sb, "vs-5665: reiserfs_do_truncate: reiserfs_cut_from_item failed");
+	    reiserfs_check_path(&s_search_path) ;
+	    return 0;
+	}
+
+	RFALSE( n_deleted > n_file_size,
+		"PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K",
+		n_deleted, n_file_size, &s_item_key);
+
+	/* Change key to search the last file item. */
+	n_file_size -= n_deleted;
+
+	set_cpu_key_k_offset (&s_item_key, n_file_size);
+
+	/* While there are bytes to truncate and previous file item is presented in the tree. */
+
+	/*
+	** This loop could take a really long time, and could log 
+	** many more blocks than a transaction can hold.  So, we do a polite
+	** journal end here, and if the transaction needs ending, we make
+	** sure the file is consistent before ending the current trans
+	** and starting a new one
+	*/
+        if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
+	  int orig_len_alloc = th->t_blocks_allocated ;
+	  decrement_counters_in_path(&s_search_path) ;
+
+	  if (update_timestamps) {
+	      p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME_SEC;
+	  } 
+	  reiserfs_update_sd(th, p_s_inode) ;
+
+	  err = journal_end(th, p_s_inode->i_sb, orig_len_alloc) ;
+	  if (err)
+	    goto out;
+	  err = journal_begin (th, p_s_inode->i_sb,
+                               JOURNAL_PER_BALANCE_CNT * 6);
+	  if (err)
+	    goto out;
+	  reiserfs_update_inode_transaction(p_s_inode) ;
+	}
+    } while ( n_file_size > ROUND_UP (n_new_file_size) &&
+	      search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND )  ;
+
+    RFALSE( n_file_size > ROUND_UP (n_new_file_size),
+	    "PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d",
+	    n_new_file_size, n_file_size, s_item_key.on_disk_key.k_objectid);
+
+update_and_out:
+    if (update_timestamps) {
+	// this is truncate, not file closing
+	    p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME_SEC;
+    }
+    reiserfs_update_sd (th, p_s_inode);
+
+out:
+    pathrelse(&s_search_path) ;
+    return err;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+// this makes sure, that we __append__, not overwrite or add holes
+static void check_research_for_paste (struct path * path, 
+				      const struct cpu_key * p_s_key)
+{
+    struct item_head * found_ih = get_ih (path);
+    
+    if (is_direct_le_ih (found_ih)) {
+	if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_last_bh (path)->b_size) !=
+	    cpu_key_k_offset (p_s_key) ||
+	    op_bytes_number (found_ih, get_last_bh (path)->b_size) != pos_in_item (path))
+	    reiserfs_panic (NULL, "PAP-5720: check_research_for_paste: "
+			    "found direct item %h or position (%d) does not match to key %K",
+			    found_ih, pos_in_item (path), p_s_key);
+    }
+    if (is_indirect_le_ih (found_ih)) {
+	if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_last_bh (path)->b_size) != cpu_key_k_offset (p_s_key) || 
+	    I_UNFM_NUM (found_ih) != pos_in_item (path) ||
+	    get_ih_free_space (found_ih) != 0)
+	    reiserfs_panic (NULL, "PAP-5730: check_research_for_paste: "
+			    "found indirect item (%h) or position (%d) does not match to key (%K)",
+			    found_ih, pos_in_item (path), p_s_key);
+    }
+}
+#endif /* config reiserfs check */
+
+
+/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
+int reiserfs_paste_into_item (struct reiserfs_transaction_handle *th, 
+			      struct path         * p_s_search_path,	/* Path to the pasted item.          */
+			      const struct cpu_key      * p_s_key,        	/* Key to search for the needed item.*/
+			      struct inode	  * inode,		/* Inode item belongs to */
+			      const char          * p_c_body,       	/* Pointer to the bytes to paste.    */
+			      int                   n_pasted_size)  	/* Size of pasted bytes.             */
+{
+    struct tree_balance s_paste_balance;
+    int                 retval;
+    int			fs_gen;
+
+    BUG_ON (!th->t_trans_id);
+
+    fs_gen = get_generation(inode->i_sb) ;
+
+#ifdef REISERQUOTA_DEBUG
+    reiserfs_debug (inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): allocating %u id=%u type=%c", n_pasted_size, inode->i_uid, key2type(&(p_s_key->on_disk_key)));
+#endif
+
+    if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+	pathrelse(p_s_search_path);
+	return -EDQUOT;
+    }
+    init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path, n_pasted_size);
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    s_paste_balance.key = p_s_key->on_disk_key;
+#endif
+
+    /* DQUOT_* can schedule, must check before the fix_nodes */
+    if (fs_changed(fs_gen, inode->i_sb)) {
+	goto search_again;
+    }
+
+    while ((retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, p_c_body)) ==
+REPEAT_SEARCH ) {
+search_again:
+	/* file system changed while we were in the fix_nodes */
+	PROC_INFO_INC( th -> t_super, paste_into_item_restarted );
+	retval = search_for_position_by_key (th->t_super, p_s_key, p_s_search_path);
+	if (retval == IO_ERROR) {
+	    retval = -EIO ;
+	    goto error_out ;
+	}
+	if (retval == POSITION_FOUND) {
+	    reiserfs_warning (inode->i_sb, "PAP-5710: reiserfs_paste_into_item: entry or pasted byte (%K) exists", p_s_key);
+	    retval = -EEXIST ;
+	    goto error_out ;
+	}
+	
+#ifdef CONFIG_REISERFS_CHECK
+	check_research_for_paste (p_s_search_path, p_s_key);
+#endif
+    }
+
+    /* Perform balancing after all resources are collected by fix_nodes, and
+       accessing them will not risk triggering schedule. */
+    if ( retval == CARRY_ON ) {
+	do_balance(&s_paste_balance, NULL/*ih*/, p_c_body, M_PASTE);
+	return 0;
+    }
+    retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
+error_out:
+    /* this also releases the path */
+    unfix_nodes(&s_paste_balance);
+#ifdef REISERQUOTA_DEBUG
+    reiserfs_debug (inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): freeing %u id=%u type=%c", n_pasted_size, inode->i_uid, key2type(&(p_s_key->on_disk_key)));
+#endif
+    DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+    return retval ;
+}
+
+
+/* Insert new item into the buffer at the path. */
+int reiserfs_insert_item(struct reiserfs_transaction_handle *th, 
+			 struct path         * 	p_s_path,         /* Path to the inserteded item.         */
+			 const struct cpu_key      * key,
+			 struct item_head    * 	p_s_ih,           /* Pointer to the item header to insert.*/
+			 struct inode        * inode,
+			 const char          * 	p_c_body)         /* Pointer to the bytes to insert.      */
+{
+    struct tree_balance s_ins_balance;
+    int                 retval;
+    int fs_gen = 0 ;
+    int quota_bytes = 0 ;
+
+    BUG_ON (!th->t_trans_id);
+
+    if (inode) {      /* Do we count quotas for item? */
+	fs_gen = get_generation(inode->i_sb);
+	quota_bytes = ih_item_len(p_s_ih);
+
+	/* hack so the quota code doesn't have to guess if the file has
+	 ** a tail, links are always tails, so there's no guessing needed
+	 */
+	if (!S_ISLNK (inode->i_mode) && is_direct_le_ih(p_s_ih)) {
+	    quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE ;
+	}
+#ifdef REISERQUOTA_DEBUG
+	reiserfs_debug (inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(p_s_ih));
+#endif
+	/* We can't dirty inode here. It would be immediately written but
+	 * appropriate stat item isn't inserted yet... */
+	if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+	    pathrelse(p_s_path);
+	    return -EDQUOT;
+	}
+    }
+    init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path, IH_SIZE + ih_item_len(p_s_ih));
+#ifdef DISPLACE_NEW_PACKING_LOCALITIES
+    s_ins_balance.key = key->on_disk_key;
+#endif
+    /* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */
+    if (inode && fs_changed(fs_gen, inode->i_sb)) {
+	goto search_again;
+    }
+
+    while ( (retval = fix_nodes(M_INSERT, &s_ins_balance, p_s_ih, p_c_body)) == REPEAT_SEARCH) {
+search_again:
+	/* file system changed while we were in the fix_nodes */
+	PROC_INFO_INC( th -> t_super, insert_item_restarted );
+	retval = search_item (th->t_super, key, p_s_path);
+	if (retval == IO_ERROR) {
+	    retval = -EIO;
+	    goto error_out ;
+	}
+	if (retval == ITEM_FOUND) {
+	    reiserfs_warning (th->t_super, "PAP-5760: reiserfs_insert_item: "
+			      "key %K already exists in the tree", key);
+	    retval = -EEXIST ;
+	    goto error_out; 
+	}
+    }
+
+    /* make balancing after all resources will be collected at a time */ 
+    if ( retval == CARRY_ON ) {
+	do_balance (&s_ins_balance, p_s_ih, p_c_body, M_INSERT);
+	return 0;
+    }
+
+    retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
+error_out:
+    /* also releases the path */
+    unfix_nodes(&s_ins_balance);
+#ifdef REISERQUOTA_DEBUG
+    reiserfs_debug (th->t_super, REISERFS_DEBUG_CODE, "reiserquota insert_item(): freeing %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(p_s_ih));
+#endif
+    if (inode)
+	DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes) ;
+    return retval; 
+}
+
+
+
+
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
new file mode 100644
index 0000000..bcdf243
--- /dev/null
+++ b/fs/reiserfs/super.c
@@ -0,0 +1,2148 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ *
+ * Trivial changes by Alan Cox to add the LFS fixes
+ *
+ * Trivial Changes:
+ * Rights granted to Hans Reiser to redistribute under other terms providing
+ * he accepts all liability including but not limited to patent, fitness
+ * for purpose, and direct or indirect claims arising from failure to perform.
+ *
+ * NO WARRANTY
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <asm/uaccess.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/namespace.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/quotaops.h>
+
+struct file_system_type reiserfs_fs_type;
+
+static const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING;
+static const char reiserfs_3_6_magic_string[] = REISER2FS_SUPER_MAGIC_STRING;
+static const char reiserfs_jr_magic_string[] = REISER2FS_JR_SUPER_MAGIC_STRING;
+
+int is_reiserfs_3_5 (struct reiserfs_super_block * rs)
+{
+  return !strncmp (rs->s_v1.s_magic, reiserfs_3_5_magic_string,
+		   strlen (reiserfs_3_5_magic_string));
+}
+
+
+int is_reiserfs_3_6 (struct reiserfs_super_block * rs)
+{
+  return !strncmp (rs->s_v1.s_magic, reiserfs_3_6_magic_string,
+ 		   strlen (reiserfs_3_6_magic_string));
+}
+
+
+int is_reiserfs_jr (struct reiserfs_super_block * rs)
+{
+  return !strncmp (rs->s_v1.s_magic, reiserfs_jr_magic_string,
+ 		   strlen (reiserfs_jr_magic_string));
+}
+
+
+static int is_any_reiserfs_magic_string (struct reiserfs_super_block * rs)
+{
+  return (is_reiserfs_3_5 (rs) || is_reiserfs_3_6 (rs) ||
+	  is_reiserfs_jr (rs));
+}
+
+static int reiserfs_remount (struct super_block * s, int * flags, char * data);
+static int reiserfs_statfs (struct super_block * s, struct kstatfs * buf);
+
+static int reiserfs_sync_fs (struct super_block * s, int wait)
+{
+    if (!(s->s_flags & MS_RDONLY)) {
+        struct reiserfs_transaction_handle th;
+	reiserfs_write_lock(s);
+	if (!journal_begin(&th, s, 1))
+            if (!journal_end_sync(&th, s, 1))
+                reiserfs_flush_old_commits(s);
+	s->s_dirt = 0; /* Even if it's not true.
+                        * We'll loop forever in sync_supers otherwise */
+	reiserfs_write_unlock(s);
+    } else {
+        s->s_dirt = 0;
+    }
+    return 0;
+}
+
+static void reiserfs_write_super(struct super_block *s)
+{
+    reiserfs_sync_fs(s, 1);
+}
+
+static void reiserfs_write_super_lockfs (struct super_block * s)
+{
+  struct reiserfs_transaction_handle th ;
+  reiserfs_write_lock(s);
+  if (!(s->s_flags & MS_RDONLY)) {
+    int err = journal_begin(&th, s, 1) ;
+    if (err) {
+        reiserfs_block_writes(&th) ;
+    } else {
+        reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+        journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+        reiserfs_block_writes(&th) ;
+        journal_end_sync(&th, s, 1) ;
+    }
+  }
+  s->s_dirt = 0;
+  reiserfs_write_unlock(s);
+}
+
+static void reiserfs_unlockfs(struct super_block *s) {
+  reiserfs_allow_writes(s) ;
+}
+
+extern const struct reiserfs_key  MAX_KEY;
+
+
+/* this is used to delete "save link" when there are no items of a
+   file it points to. It can either happen if unlink is completed but
+   "save unlink" removal, or if file has both unlink and truncate
+   pending and as unlink completes first (because key of "save link"
+   protecting unlink is bigger that a key lf "save link" which
+   protects truncate), so there left no items to make truncate
+   completion on */
+static int remove_save_link_only (struct super_block * s, struct reiserfs_key * key, int oid_free)
+{
+    struct reiserfs_transaction_handle th;
+    int err;
+
+     /* we are going to do one balancing */
+     err = journal_begin (&th, s, JOURNAL_PER_BALANCE_CNT);
+     if (err)
+        return err;
+ 
+     reiserfs_delete_solid_item (&th, NULL, key);
+     if (oid_free)
+        /* removals are protected by direct items */
+        reiserfs_release_objectid (&th, le32_to_cpu (key->k_objectid));
+
+     return journal_end (&th, s, JOURNAL_PER_BALANCE_CNT);
+}
+ 
+#ifdef CONFIG_QUOTA
+static int reiserfs_quota_on_mount(struct super_block *, int);
+#endif
+ 
+/* look for uncompleted unlinks and truncates and complete them */
+static int finish_unfinished (struct super_block * s)
+{
+    INITIALIZE_PATH (path);
+    struct cpu_key max_cpu_key, obj_key;
+    struct reiserfs_key save_link_key;
+    int retval = 0;
+    struct item_head * ih;
+    struct buffer_head * bh;
+    int item_pos;
+    char * item;
+    int done;
+    struct inode * inode;
+    int truncate;
+#ifdef CONFIG_QUOTA
+    int i;
+    int ms_active_set;
+#endif
+ 
+ 
+    /* compose key to look for "save" links */
+    max_cpu_key.version = KEY_FORMAT_3_5;
+    max_cpu_key.on_disk_key = MAX_KEY;
+    max_cpu_key.key_length = 3;
+
+#ifdef CONFIG_QUOTA
+    /* Needed for iput() to work correctly and not trash data */
+    if (s->s_flags & MS_ACTIVE) {
+	    ms_active_set = 0;
+    } else {
+	    ms_active_set = 1;
+	    s->s_flags |= MS_ACTIVE;
+    }
+    /* Turn on quotas so that they are updated correctly */
+    for (i = 0; i < MAXQUOTAS; i++) {
+	if (REISERFS_SB(s)->s_qf_names[i]) {
+	    int ret = reiserfs_quota_on_mount(s, i);
+	    if (ret < 0)
+		reiserfs_warning(s, "reiserfs: cannot turn on journalled quota: error %d", ret);
+	}
+    }
+#endif
+ 
+    done = 0;
+    REISERFS_SB(s)->s_is_unlinked_ok = 1;
+    while (!retval) {
+        retval = search_item (s, &max_cpu_key, &path);
+        if (retval != ITEM_NOT_FOUND) {
+            reiserfs_warning (s, "vs-2140: finish_unfinished: search_by_key returned %d",
+                              retval);
+            break;
+        }
+        
+        bh = get_last_bh (&path);
+        item_pos = get_item_pos (&path);
+        if (item_pos != B_NR_ITEMS (bh)) {
+            reiserfs_warning (s, "vs-2060: finish_unfinished: wrong position found");
+            break;
+        }
+        item_pos --;
+        ih = B_N_PITEM_HEAD (bh, item_pos);
+ 
+        if (le32_to_cpu (ih->ih_key.k_dir_id) != MAX_KEY_OBJECTID)
+            /* there are no "save" links anymore */
+            break;
+ 
+        save_link_key = ih->ih_key;
+        if (is_indirect_le_ih (ih))
+            truncate = 1;
+        else
+            truncate = 0;
+ 
+        /* reiserfs_iget needs k_dirid and k_objectid only */
+        item = B_I_PITEM (bh, ih);
+        obj_key.on_disk_key.k_dir_id = le32_to_cpu (*(__u32 *)item);
+        obj_key.on_disk_key.k_objectid = le32_to_cpu (ih->ih_key.k_objectid);
+	obj_key.on_disk_key.u.k_offset_v1.k_offset = 0;
+	obj_key.on_disk_key.u.k_offset_v1.k_uniqueness = 0;
+	
+        pathrelse (&path);
+ 
+        inode = reiserfs_iget (s, &obj_key);
+        if (!inode) {
+            /* the unlink almost completed, it just did not manage to remove
+	       "save" link and release objectid */
+            reiserfs_warning (s, "vs-2180: finish_unfinished: iget failed for %K",
+                              &obj_key);
+            retval = remove_save_link_only (s, &save_link_key, 1);
+            continue;
+        }
+
+	if (!truncate && inode->i_nlink) {
+	    /* file is not unlinked */
+            reiserfs_warning (s, "vs-2185: finish_unfinished: file %K is not unlinked",
+                              &obj_key);
+            retval = remove_save_link_only (s, &save_link_key, 0);
+            continue;
+	}
+	DQUOT_INIT(inode);
+
+	if (truncate && S_ISDIR (inode->i_mode) ) {
+	    /* We got a truncate request for a dir which is impossible.
+	       The only imaginable way is to execute unfinished truncate request
+	       then boot into old kernel, remove the file and create dir with
+	       the same key. */
+	    reiserfs_warning(s, "green-2101: impossible truncate on a directory %k. Please report", INODE_PKEY (inode));
+	    retval = remove_save_link_only (s, &save_link_key, 0);
+	    truncate = 0;
+	    iput (inode); 
+	    continue;
+	}
+ 
+        if (truncate) {
+            REISERFS_I(inode) -> i_flags |= i_link_saved_truncate_mask;
+            /* not completed truncate found. New size was committed together
+	       with "save" link */
+            reiserfs_info (s, "Truncating %k to %Ld ..",
+                              INODE_PKEY (inode), inode->i_size);
+            reiserfs_truncate_file (inode, 0/*don't update modification time*/);
+            retval = remove_save_link (inode, truncate);
+        } else {
+            REISERFS_I(inode) -> i_flags |= i_link_saved_unlink_mask;
+            /* not completed unlink (rmdir) found */
+            reiserfs_info (s, "Removing %k..", INODE_PKEY (inode));
+            /* removal gets completed in iput */
+            retval = 0;
+        }
+ 
+        iput (inode);
+        printk ("done\n");
+        done ++;
+    }
+    REISERFS_SB(s)->s_is_unlinked_ok = 0;
+     
+#ifdef CONFIG_QUOTA
+    /* Turn quotas off */
+    for (i = 0; i < MAXQUOTAS; i++) {
+            if (sb_dqopt(s)->files[i])
+                    vfs_quota_off_mount(s, i);
+    }
+    if (ms_active_set)
+	    /* Restore the flag back */
+	    s->s_flags &= ~MS_ACTIVE;
+#endif
+    pathrelse (&path);
+    if (done)
+        reiserfs_info (s, "There were %d uncompleted unlinks/truncates. "
+                          "Completed\n", done);
+    return retval;
+}
+ 
+/* to protect file being unlinked from getting lost we "safe" link files
+   being unlinked. This link will be deleted in the same transaction with last
+   item of file. mounting the filesytem we scan all these links and remove
+   files which almost got lost */
+void add_save_link (struct reiserfs_transaction_handle * th,
+		    struct inode * inode, int truncate)
+{
+    INITIALIZE_PATH (path);
+    int retval;
+    struct cpu_key key;
+    struct item_head ih;
+    __u32 link;
+
+    BUG_ON (!th->t_trans_id);
+
+    /* file can only get one "save link" of each kind */
+    RFALSE( truncate && 
+	    ( REISERFS_I(inode) -> i_flags & i_link_saved_truncate_mask ),
+	    "saved link already exists for truncated inode %lx",
+	    ( long ) inode -> i_ino );
+    RFALSE( !truncate && 
+	    ( REISERFS_I(inode) -> i_flags & i_link_saved_unlink_mask ),
+	    "saved link already exists for unlinked inode %lx",
+	    ( long ) inode -> i_ino );
+
+    /* setup key of "save" link */
+    key.version = KEY_FORMAT_3_5;
+    key.on_disk_key.k_dir_id = MAX_KEY_OBJECTID;
+    key.on_disk_key.k_objectid = inode->i_ino;
+    if (!truncate) {
+	/* unlink, rmdir, rename */
+	set_cpu_key_k_offset (&key, 1 + inode->i_sb->s_blocksize);
+	set_cpu_key_k_type (&key, TYPE_DIRECT);
+
+	/* item head of "safe" link */
+	make_le_item_head (&ih, &key, key.version, 1 + inode->i_sb->s_blocksize, TYPE_DIRECT,
+			   4/*length*/, 0xffff/*free space*/);
+    } else {
+	/* truncate */
+	if (S_ISDIR (inode->i_mode))
+	    reiserfs_warning(inode->i_sb, "green-2102: Adding a truncate savelink for a directory %k! Please report", INODE_PKEY(inode));
+	set_cpu_key_k_offset (&key, 1);
+	set_cpu_key_k_type (&key, TYPE_INDIRECT);
+
+	/* item head of "safe" link */
+	make_le_item_head (&ih, &key, key.version, 1, TYPE_INDIRECT,
+			   4/*length*/, 0/*free space*/);
+    }
+    key.key_length = 3;
+
+    /* look for its place in the tree */
+    retval = search_item (inode->i_sb, &key, &path);
+    if (retval != ITEM_NOT_FOUND) {
+	if ( retval != -ENOSPC )
+	    reiserfs_warning (inode->i_sb, "vs-2100: add_save_link:"
+			  "search_by_key (%K) returned %d", &key, retval);
+	pathrelse (&path);
+	return;
+    }
+
+    /* body of "save" link */
+    link = INODE_PKEY (inode)->k_dir_id;
+
+    /* put "save" link inot tree, don't charge quota to anyone */
+    retval = reiserfs_insert_item (th, &path, &key, &ih, NULL, (char *)&link);
+    if (retval) {
+	if (retval != -ENOSPC)
+	    reiserfs_warning (inode->i_sb, "vs-2120: add_save_link: insert_item returned %d",
+			  retval);
+    } else {
+	if( truncate )
+	    REISERFS_I(inode) -> i_flags |= i_link_saved_truncate_mask;
+	else
+	    REISERFS_I(inode) -> i_flags |= i_link_saved_unlink_mask;
+    }
+}
+
+
+/* this opens transaction unlike add_save_link */
+int remove_save_link (struct inode * inode, int truncate)
+{
+    struct reiserfs_transaction_handle th;
+    struct reiserfs_key key;
+    int err;
+ 
+    /* we are going to do one balancing only */
+    err = journal_begin (&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
+    if (err)
+        return err;
+ 
+    /* setup key of "save" link */
+    key.k_dir_id = cpu_to_le32 (MAX_KEY_OBJECTID);
+    key.k_objectid = INODE_PKEY (inode)->k_objectid;
+    if (!truncate) {
+        /* unlink, rmdir, rename */
+        set_le_key_k_offset (KEY_FORMAT_3_5, &key,
+			     1 + inode->i_sb->s_blocksize);
+        set_le_key_k_type (KEY_FORMAT_3_5, &key, TYPE_DIRECT);
+    } else {
+        /* truncate */
+        set_le_key_k_offset (KEY_FORMAT_3_5, &key, 1);
+        set_le_key_k_type (KEY_FORMAT_3_5, &key, TYPE_INDIRECT);
+    }
+ 
+    if( ( truncate && 
+          ( REISERFS_I(inode) -> i_flags & i_link_saved_truncate_mask ) ) ||
+        ( !truncate && 
+          ( REISERFS_I(inode) -> i_flags & i_link_saved_unlink_mask ) ) )
+	/* don't take quota bytes from anywhere */
+	reiserfs_delete_solid_item (&th, NULL, &key);
+    if (!truncate) {
+	reiserfs_release_objectid (&th, inode->i_ino);
+	REISERFS_I(inode) -> i_flags &= ~i_link_saved_unlink_mask;
+    } else
+	REISERFS_I(inode) -> i_flags &= ~i_link_saved_truncate_mask;
+ 
+    return journal_end (&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
+}
+
+
+static void reiserfs_put_super (struct super_block * s)
+{
+  int i;
+  struct reiserfs_transaction_handle th ;
+  th.t_trans_id = 0;
+
+  if (REISERFS_SB(s)->xattr_root) {
+    d_invalidate (REISERFS_SB(s)->xattr_root);
+    dput (REISERFS_SB(s)->xattr_root);
+  }
+  
+  if (REISERFS_SB(s)->priv_root) {
+    d_invalidate (REISERFS_SB(s)->priv_root);
+    dput (REISERFS_SB(s)->priv_root);
+  }
+
+  /* change file system state to current state if it was mounted with read-write permissions */
+  if (!(s->s_flags & MS_RDONLY)) {
+    if (!journal_begin(&th, s, 10)) {
+        reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+        set_sb_umount_state( SB_DISK_SUPER_BLOCK(s), REISERFS_SB(s)->s_mount_state );
+        journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+    }
+  }
+
+  /* note, journal_release checks for readonly mount, and can decide not
+  ** to do a journal_end
+  */
+  journal_release(&th, s) ;
+
+  for (i = 0; i < SB_BMAP_NR (s); i ++)
+    brelse (SB_AP_BITMAP (s)[i].bh);
+
+  vfree (SB_AP_BITMAP (s));
+
+  brelse (SB_BUFFER_WITH_SB (s));
+
+  print_statistics (s);
+
+  if (REISERFS_SB(s)->s_kmallocs != 0) {
+    reiserfs_warning (s, "vs-2004: reiserfs_put_super: allocated memory left %d",
+		      REISERFS_SB(s)->s_kmallocs);
+  }
+
+  if (REISERFS_SB(s)->reserved_blocks != 0) {
+    reiserfs_warning (s, "green-2005: reiserfs_put_super: reserved blocks left %d",
+		      REISERFS_SB(s)->reserved_blocks);
+  }
+
+  reiserfs_proc_info_done( s );
+
+  kfree(s->s_fs_info);
+  s->s_fs_info = NULL;
+
+  return;
+}
+
+static kmem_cache_t * reiserfs_inode_cachep;
+
+static struct inode *reiserfs_alloc_inode(struct super_block *sb)
+{
+	struct reiserfs_inode_info *ei;
+	ei = (struct reiserfs_inode_info *)kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void reiserfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		INIT_LIST_HEAD(&ei->i_prealloc_list) ;
+		inode_init_once(&ei->vfs_inode);
+		ei->i_acl_access = NULL;
+		ei->i_acl_default = NULL;
+	}
+}
+ 
+static int init_inodecache(void)
+{
+	reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache",
+					     sizeof(struct reiserfs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (reiserfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(reiserfs_inode_cachep))
+		reiserfs_warning (NULL, "reiserfs_inode_cache: not all structures were freed");
+}
+
+/* we don't mark inodes dirty, we just log them */
+static void reiserfs_dirty_inode (struct inode * inode) {
+    struct reiserfs_transaction_handle th ;
+
+    int err = 0;
+    if (inode->i_sb->s_flags & MS_RDONLY) {
+        reiserfs_warning(inode->i_sb, "clm-6006: writing inode %lu on readonly FS",
+	                  inode->i_ino) ;
+        return ;
+    }
+    reiserfs_write_lock(inode->i_sb);
+
+    /* this is really only used for atime updates, so they don't have
+    ** to be included in O_SYNC or fsync
+    */
+    err = journal_begin(&th, inode->i_sb, 1) ;
+    if (err) {
+        reiserfs_write_unlock (inode->i_sb);
+        return;
+    }
+    reiserfs_update_sd (&th, inode);
+    journal_end(&th, inode->i_sb, 1) ;
+    reiserfs_write_unlock(inode->i_sb);
+}
+
+static void reiserfs_clear_inode (struct inode *inode)
+{
+    struct posix_acl *acl;
+
+    acl = REISERFS_I(inode)->i_acl_access;
+    if (acl && !IS_ERR (acl))
+        posix_acl_release (acl);
+    REISERFS_I(inode)->i_acl_access = NULL;
+
+    acl = REISERFS_I(inode)->i_acl_default;
+    if (acl && !IS_ERR (acl))
+        posix_acl_release (acl);
+    REISERFS_I(inode)->i_acl_default = NULL;
+}
+
+#ifdef CONFIG_QUOTA
+static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
+static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t, loff_t);
+#endif
+
+static struct super_operations reiserfs_sops =
+{
+  .alloc_inode = reiserfs_alloc_inode,
+  .destroy_inode = reiserfs_destroy_inode,
+  .write_inode = reiserfs_write_inode,
+  .dirty_inode = reiserfs_dirty_inode,
+  .delete_inode = reiserfs_delete_inode,
+  .clear_inode  = reiserfs_clear_inode,
+  .put_super = reiserfs_put_super,
+  .write_super = reiserfs_write_super,
+  .sync_fs = reiserfs_sync_fs,
+  .write_super_lockfs = reiserfs_write_super_lockfs,
+  .unlockfs = reiserfs_unlockfs,
+  .statfs = reiserfs_statfs,
+  .remount_fs = reiserfs_remount,
+#ifdef CONFIG_QUOTA
+  .quota_read = reiserfs_quota_read,
+  .quota_write = reiserfs_quota_write,
+#endif
+};
+
+#ifdef CONFIG_QUOTA
+#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
+
+static int reiserfs_dquot_initialize(struct inode *, int);
+static int reiserfs_dquot_drop(struct inode *);
+static int reiserfs_write_dquot(struct dquot *);
+static int reiserfs_acquire_dquot(struct dquot *);
+static int reiserfs_release_dquot(struct dquot *);
+static int reiserfs_mark_dquot_dirty(struct dquot *);
+static int reiserfs_write_info(struct super_block *, int);
+static int reiserfs_quota_on(struct super_block *, int, int, char *);
+
+static struct dquot_operations reiserfs_quota_operations =
+{
+  .initialize = reiserfs_dquot_initialize,
+  .drop = reiserfs_dquot_drop,
+  .alloc_space = dquot_alloc_space,
+  .alloc_inode = dquot_alloc_inode,
+  .free_space = dquot_free_space,
+  .free_inode = dquot_free_inode,
+  .transfer = dquot_transfer,
+  .write_dquot = reiserfs_write_dquot,
+  .acquire_dquot = reiserfs_acquire_dquot,
+  .release_dquot = reiserfs_release_dquot,
+  .mark_dirty = reiserfs_mark_dquot_dirty,
+  .write_info = reiserfs_write_info,
+};
+
+static struct quotactl_ops reiserfs_qctl_operations =
+{
+  .quota_on = reiserfs_quota_on,
+  .quota_off = vfs_quota_off,
+  .quota_sync = vfs_quota_sync,
+  .get_info = vfs_get_dqinfo,
+  .set_info = vfs_set_dqinfo,
+  .get_dqblk = vfs_get_dqblk,
+  .set_dqblk = vfs_set_dqblk,
+};
+#endif
+
+static struct export_operations reiserfs_export_ops = {
+  .encode_fh = reiserfs_encode_fh,
+  .decode_fh = reiserfs_decode_fh,
+  .get_parent = reiserfs_get_parent,
+  .get_dentry = reiserfs_get_dentry,
+} ;
+
+/* this struct is used in reiserfs_getopt () for containing the value for those
+   mount options that have values rather than being toggles. */
+typedef struct {
+    char * value;
+    int setmask; /* bitmask which is to set on mount_options bitmask when this
+                    value is found, 0 is no bits are to be changed. */
+    int clrmask; /* bitmask which is to clear on mount_options bitmask when  this
+		    value is found, 0 is no bits are to be changed. This is
+		    applied BEFORE setmask */
+} arg_desc_t;
+
+/* Set this bit in arg_required to allow empty arguments */
+#define REISERFS_OPT_ALLOWEMPTY 31
+
+/* this struct is used in reiserfs_getopt() for describing the set of reiserfs
+   mount options */
+typedef struct {
+    char * option_name;
+    int arg_required; /* 0 if argument is not required, not 0 otherwise */
+    const arg_desc_t * values; /* list of values accepted by an option */
+    int setmask; /* bitmask which is to set on mount_options bitmask when this
+                    value is found, 0 is no bits are to be changed. */
+    int clrmask; /* bitmask which is to clear on mount_options bitmask when  this
+		    value is found, 0 is no bits are to be changed. This is
+		    applied BEFORE setmask */
+} opt_desc_t;
+
+/* possible values for -o data= */
+static const arg_desc_t logging_mode[] = {
+    {"ordered", 1<<REISERFS_DATA_ORDERED, (1<<REISERFS_DATA_LOG|1<<REISERFS_DATA_WRITEBACK)},
+    {"journal", 1<<REISERFS_DATA_LOG, (1<<REISERFS_DATA_ORDERED|1<<REISERFS_DATA_WRITEBACK)},
+    {"writeback", 1<<REISERFS_DATA_WRITEBACK, (1<<REISERFS_DATA_ORDERED|1<<REISERFS_DATA_LOG)},
+    {NULL, 0}
+};
+
+/* possible values for -o barrier= */
+static const arg_desc_t barrier_mode[] = {
+    {"none", 1<<REISERFS_BARRIER_NONE, 1<<REISERFS_BARRIER_FLUSH},
+    {"flush", 1<<REISERFS_BARRIER_FLUSH, 1<<REISERFS_BARRIER_NONE},
+    {NULL, 0}
+};
+
+/* possible values for "-o block-allocator=" and bits which are to be set in
+   s_mount_opt of reiserfs specific part of in-core super block */
+static const arg_desc_t balloc[] = {
+    {"noborder", 1<<REISERFS_NO_BORDER, 0},
+    {"border", 0, 1<<REISERFS_NO_BORDER},
+    {"no_unhashed_relocation", 1<<REISERFS_NO_UNHASHED_RELOCATION, 0},
+    {"hashed_relocation", 1<<REISERFS_HASHED_RELOCATION, 0},
+    {"test4", 1<<REISERFS_TEST4, 0},
+    {"notest4", 0, 1<<REISERFS_TEST4},
+    {NULL, 0, 0}
+};
+
+static const arg_desc_t tails[] = {
+    {"on", 1<<REISERFS_LARGETAIL, 1<<REISERFS_SMALLTAIL},
+    {"off", 0, (1<<REISERFS_LARGETAIL)|(1<<REISERFS_SMALLTAIL)},
+    {"small", 1<<REISERFS_SMALLTAIL, 1<<REISERFS_LARGETAIL},
+    {NULL, 0, 0}
+};
+
+static const arg_desc_t error_actions[] = {
+    {"panic", 1 << REISERFS_ERROR_PANIC,
+              (1 << REISERFS_ERROR_RO | 1 << REISERFS_ERROR_CONTINUE)},
+    {"ro-remount", 1 << REISERFS_ERROR_RO,
+              (1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_CONTINUE)},
+#ifdef REISERFS_JOURNAL_ERROR_ALLOWS_NO_LOG
+    {"continue", 1 << REISERFS_ERROR_CONTINUE,
+              (1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_RO)},
+#endif
+    {NULL, 0, 0},
+};
+
+int reiserfs_default_io_size = 128 * 1024; /* Default recommended I/O size is 128k.
+					      There might be broken applications that are
+					      confused by this. Use nolargeio mount option
+					      to get usual i/o size = PAGE_SIZE.
+					    */
+
+/* proceed only one option from a list *cur - string containing of mount options
+   opts - array of options which are accepted
+   opt_arg - if option is found and requires an argument and if it is specifed
+   in the input - pointer to the argument is stored here
+   bit_flags - if option requires to set a certain bit - it is set here
+   return -1 if unknown option is found, opt->arg_required otherwise */
+static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * opts, char ** opt_arg,
+			    unsigned long * bit_flags)
+{
+    char * p;
+    /* foo=bar, 
+       ^   ^  ^
+       |   |  +-- option_end
+       |   +-- arg_start
+       +-- option_start
+    */
+    const opt_desc_t * opt;
+    const arg_desc_t * arg;
+    
+    
+    p = *cur;
+    
+    /* assume argument cannot contain commas */
+    *cur = strchr (p, ',');
+    if (*cur) {
+	*(*cur) = '\0';
+	(*cur) ++;
+    }
+
+    if ( !strncmp (p, "alloc=", 6) ) {
+	/* Ugly special case, probably we should redo options parser so that
+	   it can understand several arguments for some options, also so that
+	   it can fill several bitfields with option values. */
+	if ( reiserfs_parse_alloc_options( s, p + 6) ) {
+	    return -1;
+	} else {
+	    return 0;
+	}
+    }
+
+ 
+    /* for every option in the list */
+    for (opt = opts; opt->option_name; opt ++) {
+	if (!strncmp (p, opt->option_name, strlen (opt->option_name))) {
+	    if (bit_flags) {
+                if (opt->clrmask == (1 << REISERFS_UNSUPPORTED_OPT))
+                    reiserfs_warning (s, "%s not supported.", p);
+                else
+                    *bit_flags &= ~opt->clrmask;
+                if (opt->setmask == (1 << REISERFS_UNSUPPORTED_OPT))
+                    reiserfs_warning (s, "%s not supported.", p);
+                else
+                    *bit_flags |= opt->setmask;
+	    }
+	    break;
+	}
+    }
+    if (!opt->option_name) {
+	reiserfs_warning (s, "unknown mount option \"%s\"", p);
+	return -1;
+    }
+    
+    p += strlen (opt->option_name);
+    switch (*p) {
+    case '=':
+	if (!opt->arg_required) {
+	    reiserfs_warning (s, "the option \"%s\" does not require an argument",
+		    opt->option_name);
+	    return -1;
+	}
+	break;
+	
+    case 0:
+	if (opt->arg_required) {
+	    reiserfs_warning (s, "the option \"%s\" requires an argument", opt->option_name);
+	    return -1;
+	}
+	break;
+    default:
+	reiserfs_warning (s, "head of option \"%s\" is only correct", opt->option_name);
+	return -1;
+    }
+
+    /* move to the argument, or to next option if argument is not required */
+    p ++;
+    
+    if ( opt->arg_required && !(opt->arg_required & (1<<REISERFS_OPT_ALLOWEMPTY)) && !strlen (p) ) {
+	/* this catches "option=," if not allowed */
+	reiserfs_warning (s, "empty argument for \"%s\"", opt->option_name);
+	return -1;
+    }
+    
+    if (!opt->values) {
+	/* *=NULLopt_arg contains pointer to argument */
+	*opt_arg = p;
+	return opt->arg_required & ~(1<<REISERFS_OPT_ALLOWEMPTY);
+    }
+    
+    /* values possible for this option are listed in opt->values */
+    for (arg = opt->values; arg->value; arg ++) {
+	if (!strcmp (p, arg->value)) {
+	    if (bit_flags) {
+		*bit_flags &= ~arg->clrmask;
+		*bit_flags |= arg->setmask;
+	    }
+	    return opt->arg_required;
+	}
+    }
+    
+    reiserfs_warning (s, "bad value \"%s\" for option \"%s\"", p, opt->option_name);
+    return -1;
+}
+
+/* returns 0 if something is wrong in option string, 1 - otherwise */
+static int reiserfs_parse_options (struct super_block * s, char * options, /* string given via mount's -o */
+				   unsigned long * mount_options,
+				   /* after the parsing phase, contains the
+				      collection of bitflags defining what
+				      mount options were selected. */
+				   unsigned long * blocks, /* strtol-ed from NNN of resize=NNN */
+				   char ** jdev_name,
+				   unsigned int * commit_max_age)
+{
+    int c;
+    char * arg = NULL;
+    char * pos;
+    opt_desc_t opts[] = {
+	/* Compatibility stuff, so that -o notail for old setups still work */
+	{"tails",	.arg_required = 't', .values = tails},
+	{"notail",	.clrmask = (1<<REISERFS_LARGETAIL)|(1<<REISERFS_SMALLTAIL)},
+	{"conv",	.setmask = 1<<REISERFS_CONVERT},
+	{"attrs",	.setmask = 1<<REISERFS_ATTRS},
+	{"noattrs",	.clrmask = 1<<REISERFS_ATTRS},
+#ifdef CONFIG_REISERFS_FS_XATTR
+	{"user_xattr",	.setmask = 1<<REISERFS_XATTRS_USER},
+	{"nouser_xattr",.clrmask = 1<<REISERFS_XATTRS_USER},
+#else
+	{"user_xattr",	.setmask = 1<<REISERFS_UNSUPPORTED_OPT},
+	{"nouser_xattr",.clrmask = 1<<REISERFS_UNSUPPORTED_OPT},
+#endif
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+	{"acl",		.setmask = 1<<REISERFS_POSIXACL},
+	{"noacl",	.clrmask = 1<<REISERFS_POSIXACL},
+#else
+	{"acl",		.setmask = 1<<REISERFS_UNSUPPORTED_OPT},
+	{"noacl",	.clrmask = 1<<REISERFS_UNSUPPORTED_OPT},
+#endif
+	{"nolog",},	 /* This is unsupported */
+	{"replayonly",	.setmask = 1<<REPLAYONLY},
+	{"block-allocator", .arg_required = 'a', .values = balloc},
+	{"data",	.arg_required = 'd', .values = logging_mode},
+	{"barrier",	.arg_required = 'b', .values = barrier_mode},
+	{"resize",	.arg_required = 'r', .values = NULL},
+	{"jdev",	.arg_required = 'j', .values = NULL},
+	{"nolargeio",	.arg_required = 'w', .values = NULL},
+	{"commit",	.arg_required = 'c', .values = NULL},
+	{"usrquota",},
+	{"grpquota",},
+	{"errors", 	.arg_required = 'e', .values = error_actions},
+	{"usrjquota",	.arg_required = 'u'|(1<<REISERFS_OPT_ALLOWEMPTY), .values = NULL},
+	{"grpjquota",	.arg_required = 'g'|(1<<REISERFS_OPT_ALLOWEMPTY), .values = NULL},
+	{"jqfmt",	.arg_required = 'f', .values = NULL},
+	{NULL,}
+    };
+	
+    *blocks = 0;
+    if (!options || !*options)
+	/* use default configuration: create tails, journaling on, no
+	   conversion to newest format */
+	return 1;
+    
+    for (pos = options; pos; ) {
+	c = reiserfs_getopt (s, &pos, opts, &arg, mount_options);
+	if (c == -1)
+	    /* wrong option is given */
+	    return 0;
+	
+	if (c == 'r') {
+	    char * p;
+	    
+	    p = NULL;
+	    /* "resize=NNN" */
+	    *blocks = simple_strtoul (arg, &p, 0);
+	    if (*p != '\0') {
+		/* NNN does not look like a number */
+		reiserfs_warning (s, "reiserfs_parse_options: bad value %s", arg);
+		return 0;
+	    }
+	}
+
+	if ( c == 'c' ) {
+		char *p = NULL;
+		unsigned long val = simple_strtoul (arg, &p, 0);
+		/* commit=NNN (time in seconds) */
+		if ( *p != '\0' || val >= (unsigned int)-1) {
+			reiserfs_warning (s, "reiserfs_parse_options: bad value %s", arg);			return 0;
+		}
+		*commit_max_age = (unsigned int)val;
+	}
+
+	if ( c == 'w' ) {
+		char *p=NULL;
+		int val = simple_strtoul (arg, &p, 0);
+
+		if ( *p != '\0') {
+		    reiserfs_warning (s, "reiserfs_parse_options: non-numeric value %s for nolargeio option", arg);
+		    return 0;
+		}
+		if ( val ) 
+		    reiserfs_default_io_size = PAGE_SIZE;
+		else
+		    reiserfs_default_io_size = 128 * 1024;
+	}
+
+	if (c == 'j') {
+	    if (arg && *arg && jdev_name) {
+		if ( *jdev_name ) { //Hm, already assigned?
+		    reiserfs_warning (s, "reiserfs_parse_options: journal device was already  specified to be %s", *jdev_name);
+		    return 0;
+		}
+		*jdev_name = arg;
+	    }
+	}
+
+#ifdef CONFIG_QUOTA
+	if (c == 'u' || c == 'g') {
+	    int qtype = c == 'u' ? USRQUOTA : GRPQUOTA;
+
+	    if (sb_any_quota_enabled(s)) {
+		reiserfs_warning(s, "reiserfs_parse_options: cannot change journalled quota options when quota turned on.");
+		return 0;
+	    }
+	    if (*arg) {	/* Some filename specified? */
+	        if (REISERFS_SB(s)->s_qf_names[qtype] && strcmp(REISERFS_SB(s)->s_qf_names[qtype], arg)) {
+		    reiserfs_warning(s, "reiserfs_parse_options: %s quota file already specified.", QTYPE2NAME(qtype));
+		    return 0;
+		}
+		if (strchr(arg, '/')) {
+		    reiserfs_warning(s, "reiserfs_parse_options: quotafile must be on filesystem root.");
+		    return 0;
+		}
+	    	REISERFS_SB(s)->s_qf_names[qtype] = kmalloc(strlen(arg)+1, GFP_KERNEL);
+		if (!REISERFS_SB(s)->s_qf_names[qtype]) {
+		    reiserfs_warning(s, "reiserfs_parse_options: not enough memory for storing quotafile name.");
+		    return 0;
+		}
+		strcpy(REISERFS_SB(s)->s_qf_names[qtype], arg);
+	    }
+	    else {
+		if (REISERFS_SB(s)->s_qf_names[qtype]) {
+		    kfree(REISERFS_SB(s)->s_qf_names[qtype]);
+		    REISERFS_SB(s)->s_qf_names[qtype] = NULL;
+		}
+	    }
+	}
+	if (c == 'f') {
+	    if (!strcmp(arg, "vfsold"))
+		REISERFS_SB(s)->s_jquota_fmt = QFMT_VFS_OLD;
+	    else if (!strcmp(arg, "vfsv0"))
+		REISERFS_SB(s)->s_jquota_fmt = QFMT_VFS_V0;
+	    else {
+		reiserfs_warning(s, "reiserfs_parse_options: unknown quota format specified.");
+		return 0;
+	    }
+	}
+#else
+	if (c == 'u' || c == 'g' || c == 'f') {
+	    reiserfs_warning(s, "reiserfs_parse_options: journalled quota options not supported.");
+	    return 0;
+	}
+#endif
+    }
+    
+#ifdef CONFIG_QUOTA
+    if (!REISERFS_SB(s)->s_jquota_fmt && (REISERFS_SB(s)->s_qf_names[USRQUOTA] || REISERFS_SB(s)->s_qf_names[GRPQUOTA])) {
+	reiserfs_warning(s, "reiserfs_parse_options: journalled quota format not specified.");
+	return 0;
+    }
+#endif
+    return 1;
+}
+
+static void switch_data_mode(struct super_block *s, unsigned long mode) {
+    REISERFS_SB(s)->s_mount_opt &= ~((1 << REISERFS_DATA_LOG) |
+                                       (1 << REISERFS_DATA_ORDERED) |
+				       (1 << REISERFS_DATA_WRITEBACK));
+    REISERFS_SB(s)->s_mount_opt |= (1 << mode);
+}
+
+static void handle_data_mode(struct super_block *s, unsigned long mount_options)
+{
+    if (mount_options & (1 << REISERFS_DATA_LOG)) {
+        if (!reiserfs_data_log(s)) {
+	    switch_data_mode(s, REISERFS_DATA_LOG);
+	    reiserfs_info (s, "switching to journaled data mode\n");
+	}
+    } else if (mount_options & (1 << REISERFS_DATA_ORDERED)) {
+        if (!reiserfs_data_ordered(s)) {
+	    switch_data_mode(s, REISERFS_DATA_ORDERED);
+	    reiserfs_info (s, "switching to ordered data mode\n");
+	}
+    } else if (mount_options & (1 << REISERFS_DATA_WRITEBACK)) {
+        if (!reiserfs_data_writeback(s)) {
+	    switch_data_mode(s, REISERFS_DATA_WRITEBACK);
+	    reiserfs_info (s, "switching to writeback data mode\n");
+	}
+    }
+}
+
+static void handle_barrier_mode(struct super_block *s, unsigned long bits) {
+    int flush = (1 << REISERFS_BARRIER_FLUSH);
+    int none = (1 << REISERFS_BARRIER_NONE);
+    int all_barrier = flush | none;
+
+    if (bits & all_barrier) {
+        REISERFS_SB(s)->s_mount_opt &= ~all_barrier;
+	if (bits & flush) {
+	    REISERFS_SB(s)->s_mount_opt |= flush;
+	    printk("reiserfs: enabling write barrier flush mode\n");
+	} else if (bits & none) {
+	    REISERFS_SB(s)->s_mount_opt |= none;
+	    printk("reiserfs: write barriers turned off\n");
+	}
+   }
+}
+
+static void handle_attrs( struct super_block *s )
+{
+	struct reiserfs_super_block * rs;
+
+	if( reiserfs_attrs( s ) ) {
+		rs = SB_DISK_SUPER_BLOCK (s);
+		if( old_format_only(s) ) {
+			reiserfs_warning(s, "reiserfs: cannot support attributes on 3.5.x disk format" );
+			REISERFS_SB(s) -> s_mount_opt &= ~ ( 1 << REISERFS_ATTRS );
+			return;
+		}
+		if( !( le32_to_cpu( rs -> s_flags ) & reiserfs_attrs_cleared ) ) {
+				reiserfs_warning(s, "reiserfs: cannot support attributes until flag is set in super-block" );
+				REISERFS_SB(s) -> s_mount_opt &= ~ ( 1 << REISERFS_ATTRS );
+		}
+	}
+}
+
+static int reiserfs_remount (struct super_block * s, int * mount_flags, char * arg)
+{
+  struct reiserfs_super_block * rs;
+  struct reiserfs_transaction_handle th ;
+  unsigned long blocks;
+  unsigned long mount_options = REISERFS_SB(s)->s_mount_opt;
+  unsigned long safe_mask = 0;
+  unsigned int commit_max_age = (unsigned int)-1;
+  struct reiserfs_journal *journal = SB_JOURNAL(s);
+  int err;
+#ifdef CONFIG_QUOTA
+  int i;
+#endif
+
+  rs = SB_DISK_SUPER_BLOCK (s);
+
+  if (!reiserfs_parse_options(s, arg, &mount_options, &blocks, NULL, &commit_max_age)) {
+#ifdef CONFIG_QUOTA
+    for (i = 0; i < MAXQUOTAS; i++)
+	if (REISERFS_SB(s)->s_qf_names[i]) {
+	    kfree(REISERFS_SB(s)->s_qf_names[i]);
+	    REISERFS_SB(s)->s_qf_names[i] = NULL;
+	}
+#endif
+    return -EINVAL;
+  }
+  
+  handle_attrs(s);
+
+  /* Add options that are safe here */
+  safe_mask |= 1 << REISERFS_SMALLTAIL;
+  safe_mask |= 1 << REISERFS_LARGETAIL;
+  safe_mask |= 1 << REISERFS_NO_BORDER;
+  safe_mask |= 1 << REISERFS_NO_UNHASHED_RELOCATION;
+  safe_mask |= 1 << REISERFS_HASHED_RELOCATION;
+  safe_mask |= 1 << REISERFS_TEST4;
+  safe_mask |= 1 << REISERFS_ATTRS;
+  safe_mask |= 1 << REISERFS_XATTRS_USER;
+  safe_mask |= 1 << REISERFS_POSIXACL;
+  safe_mask |= 1 << REISERFS_BARRIER_FLUSH;
+  safe_mask |= 1 << REISERFS_BARRIER_NONE;
+  safe_mask |= 1 << REISERFS_ERROR_RO;
+  safe_mask |= 1 << REISERFS_ERROR_CONTINUE;
+  safe_mask |= 1 << REISERFS_ERROR_PANIC;
+
+  /* Update the bitmask, taking care to keep
+   * the bits we're not allowed to change here */
+  REISERFS_SB(s)->s_mount_opt = (REISERFS_SB(s)->s_mount_opt & ~safe_mask) |  (mount_options & safe_mask);
+
+  if(commit_max_age != 0 && commit_max_age != (unsigned int)-1) {
+    journal->j_max_commit_age = commit_max_age;
+    journal->j_max_trans_age = commit_max_age;
+  }
+  else if(commit_max_age == 0)
+  {
+    /* 0 means restore defaults. */
+    journal->j_max_commit_age = journal->j_default_max_commit_age;
+    journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
+  }
+
+  if(blocks) {
+    int rc = reiserfs_resize(s, blocks);
+    if (rc != 0)
+      return rc;
+  }
+
+  if (*mount_flags & MS_RDONLY) {
+    reiserfs_xattr_init (s, *mount_flags);
+    /* remount read-only */
+    if (s->s_flags & MS_RDONLY)
+      /* it is read-only already */
+      return 0;
+    /* try to remount file system with read-only permissions */
+    if (sb_umount_state(rs) == REISERFS_VALID_FS || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
+      return 0;
+    }
+
+    err = journal_begin(&th, s, 10) ;
+    if (err)
+        return err;
+
+    /* Mounting a rw partition read-only. */
+    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+    set_sb_umount_state( rs, REISERFS_SB(s)->s_mount_state );
+    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+  } else {
+    /* remount read-write */
+    if (!(s->s_flags & MS_RDONLY)) {
+	reiserfs_xattr_init (s, *mount_flags);
+	return 0; /* We are read-write already */
+    }
+
+    if (reiserfs_is_journal_aborted (journal))
+	return journal->j_errno;
+
+    handle_data_mode(s, mount_options);
+    handle_barrier_mode(s, mount_options);
+    REISERFS_SB(s)->s_mount_state = sb_umount_state(rs) ;
+    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
+    err = journal_begin(&th, s, 10) ;
+    if (err)
+	return err;
+    
+    /* Mount a partition which is read-only, read-write */
+    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+    REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
+    s->s_flags &= ~MS_RDONLY;
+    set_sb_umount_state( rs, REISERFS_ERROR_FS );
+    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
+    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+    REISERFS_SB(s)->s_mount_state = REISERFS_VALID_FS ;
+  }
+  /* this will force a full flush of all journal lists */
+  SB_JOURNAL(s)->j_must_wait = 1 ;
+  err = journal_end(&th, s, 10) ;
+  if (err)
+    return err;
+  s->s_dirt = 0;
+
+  if (!( *mount_flags & MS_RDONLY ) ) {
+    finish_unfinished( s );
+    reiserfs_xattr_init (s, *mount_flags);
+  }
+
+  return 0;
+}
+
+/* load_bitmap_info_data - Sets up the reiserfs_bitmap_info structure from disk.
+ * @sb - superblock for this filesystem
+ * @bi - the bitmap info to be loaded. Requires that bi->bh is valid.
+ *
+ * This routine counts how many free bits there are, finding the first zero
+ * as a side effect. Could also be implemented as a loop of test_bit() calls, or
+ * a loop of find_first_zero_bit() calls. This implementation is similar to
+ * find_first_zero_bit(), but doesn't return after it finds the first bit.
+ * Should only be called on fs mount, but should be fairly efficient anyways.
+ *
+ * bi->first_zero_hint is considered unset if it == 0, since the bitmap itself
+ * will * invariably occupt block 0 represented in the bitmap. The only
+ * exception to this is when free_count also == 0, since there will be no
+ * free blocks at all.
+ */
+
+static void load_bitmap_info_data (struct super_block *sb,
+                                   struct reiserfs_bitmap_info *bi)
+{
+    unsigned long *cur = (unsigned long *)bi->bh->b_data;
+
+    while ((char *)cur < (bi->bh->b_data + sb->s_blocksize)) {
+
+	/* No need to scan if all 0's or all 1's.
+	 * Since we're only counting 0's, we can simply ignore all 1's */
+	if (*cur == 0) {
+	    if (bi->first_zero_hint == 0) {
+		bi->first_zero_hint = ((char *)cur - bi->bh->b_data) << 3;
+	    }
+	    bi->free_count += sizeof(unsigned long)*8;
+	} else if (*cur != ~0L) {
+	    int b;
+	    for (b = 0; b < sizeof(unsigned long)*8; b++) {
+		if (!reiserfs_test_le_bit (b, cur)) {
+		    bi->free_count ++;
+		    if (bi->first_zero_hint == 0)
+			bi->first_zero_hint =
+					(((char *)cur - bi->bh->b_data) << 3) + b;
+		    }
+		}
+	    }
+	cur ++;
+    }
+
+#ifdef CONFIG_REISERFS_CHECK
+// This outputs a lot of unneded info on big FSes
+//    reiserfs_warning ("bitmap loaded from block %d: %d free blocks",
+//		      bi->bh->b_blocknr, bi->free_count);
+#endif
+}
+  
+static int read_bitmaps (struct super_block * s)
+{
+    int i, bmap_nr;
+
+    SB_AP_BITMAP (s) = vmalloc (sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
+    if (SB_AP_BITMAP (s) == 0)
+	return 1;
+    memset (SB_AP_BITMAP (s), 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
+    for (i = 0, bmap_nr = REISERFS_DISK_OFFSET_IN_BYTES / s->s_blocksize + 1;
+	 i < SB_BMAP_NR(s); i++, bmap_nr = s->s_blocksize * 8 * i) {
+	SB_AP_BITMAP (s)[i].bh = sb_getblk(s, bmap_nr);
+	if (!buffer_uptodate(SB_AP_BITMAP(s)[i].bh))
+	    ll_rw_block(READ, 1, &SB_AP_BITMAP(s)[i].bh);
+    }
+    for (i = 0; i < SB_BMAP_NR(s); i++) {
+	wait_on_buffer(SB_AP_BITMAP (s)[i].bh);
+	if (!buffer_uptodate(SB_AP_BITMAP(s)[i].bh)) {
+	    reiserfs_warning(s,"sh-2029: reiserfs read_bitmaps: "
+			 "bitmap block (#%lu) reading failed",
+			 SB_AP_BITMAP(s)[i].bh->b_blocknr);
+	    for (i = 0; i < SB_BMAP_NR(s); i++)
+		brelse(SB_AP_BITMAP(s)[i].bh);
+	    vfree(SB_AP_BITMAP(s));
+	    SB_AP_BITMAP(s) = NULL;
+	    return 1;
+	}
+	load_bitmap_info_data (s, SB_AP_BITMAP (s) + i);
+    }
+    return 0;
+}
+
+static int read_old_bitmaps (struct super_block * s)
+{
+  int i ;
+  struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK(s);
+  int bmp1 = (REISERFS_OLD_DISK_OFFSET_IN_BYTES / s->s_blocksize) + 1;  /* first of bitmap blocks */
+
+  /* read true bitmap */
+  SB_AP_BITMAP (s) = vmalloc (sizeof (struct reiserfs_buffer_info *) * sb_bmap_nr(rs));
+  if (SB_AP_BITMAP (s) == 0)
+    return 1;
+
+  memset (SB_AP_BITMAP (s), 0, sizeof (struct reiserfs_buffer_info *) * sb_bmap_nr(rs));
+
+  for (i = 0; i < sb_bmap_nr(rs); i ++) {
+    SB_AP_BITMAP (s)[i].bh = sb_bread (s, bmp1 + i);
+    if (!SB_AP_BITMAP (s)[i].bh)
+      return 1;
+    load_bitmap_info_data (s, SB_AP_BITMAP (s) + i);
+  }
+
+  return 0;
+}
+
+static int read_super_block (struct super_block * s, int offset)
+{
+    struct buffer_head * bh;
+    struct reiserfs_super_block * rs;
+    int fs_blocksize;
+ 
+
+    bh = sb_bread (s, offset / s->s_blocksize);
+    if (!bh) {
+      reiserfs_warning (s, "sh-2006: read_super_block: "
+              "bread failed (dev %s, block %lu, size %lu)",
+              reiserfs_bdevname (s), offset / s->s_blocksize, s->s_blocksize);
+      return 1;
+    }
+ 
+    rs = (struct reiserfs_super_block *)bh->b_data;
+    if (!is_any_reiserfs_magic_string (rs)) {
+      brelse (bh);
+      return 1;
+    }
+ 
+    //
+    // ok, reiserfs signature (old or new) found in at the given offset
+    //    
+    fs_blocksize = sb_blocksize(rs);
+    brelse (bh);
+    sb_set_blocksize (s, fs_blocksize);
+    
+    bh = sb_bread (s, offset / s->s_blocksize);
+    if (!bh) {
+	reiserfs_warning (s, "sh-2007: read_super_block: "
+                "bread failed (dev %s, block %lu, size %lu)\n",
+                reiserfs_bdevname (s), offset / s->s_blocksize, s->s_blocksize);
+	return 1;
+    }
+    
+    rs = (struct reiserfs_super_block *)bh->b_data;
+    if (sb_blocksize(rs) != s->s_blocksize) {
+	reiserfs_warning (s, "sh-2011: read_super_block: "
+		"can't find a reiserfs filesystem on (dev %s, block %Lu, size %lu)\n",
+		reiserfs_bdevname (s), (unsigned long long)bh->b_blocknr, s->s_blocksize);
+	brelse (bh);
+	return 1;
+    }
+
+    if ( rs->s_v1.s_root_block == -1 ) {
+       brelse(bh) ;
+       reiserfs_warning (s, "Unfinished reiserfsck --rebuild-tree run detected. Please run\n"
+              "reiserfsck --rebuild-tree and wait for a completion. If that fails\n"
+              "get newer reiserfsprogs package");
+       return 1;
+    }
+
+    SB_BUFFER_WITH_SB (s) = bh;
+    SB_DISK_SUPER_BLOCK (s) = rs;
+
+    if (is_reiserfs_jr (rs)) {
+	/* magic is of non-standard journal filesystem, look at s_version to
+	   find which format is in use */
+	if (sb_version(rs) == REISERFS_VERSION_2)
+	  reiserfs_warning (s, "read_super_block: found reiserfs format \"3.6\""
+		  " with non-standard journal");
+	else if (sb_version(rs) == REISERFS_VERSION_1)
+	  reiserfs_warning (s, "read_super_block: found reiserfs format \"3.5\""
+		  " with non-standard journal");
+	else {
+	  reiserfs_warning (s, "sh-2012: read_super_block: found unknown "
+			    "format \"%u\" of reiserfs with non-standard magic",
+			    sb_version(rs));
+	return 1;
+	}
+    }
+    else
+      /* s_version of standard format may contain incorrect information,
+	 so we just look at the magic string */
+      reiserfs_info (s, "found reiserfs format \"%s\" with standard journal\n",
+	      is_reiserfs_3_5 (rs) ? "3.5" : "3.6");
+
+    s->s_op = &reiserfs_sops;
+    s->s_export_op = &reiserfs_export_ops;
+#ifdef CONFIG_QUOTA
+    s->s_qcop = &reiserfs_qctl_operations;
+    s->dq_op = &reiserfs_quota_operations;
+#endif
+
+    /* new format is limited by the 32 bit wide i_blocks field, want to
+    ** be one full block below that.
+    */
+    s->s_maxbytes = (512LL << 32) - s->s_blocksize ;
+    return 0;
+}
+
+
+
+/* after journal replay, reread all bitmap and super blocks */
+static int reread_meta_blocks(struct super_block *s) {
+  int i ;
+  ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s))) ;
+  wait_on_buffer(SB_BUFFER_WITH_SB(s)) ;
+  if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
+    reiserfs_warning (s, "reread_meta_blocks, error reading the super") ;
+    return 1 ;
+  }
+
+  for (i = 0; i < SB_BMAP_NR(s) ; i++) {
+    ll_rw_block(READ, 1, &(SB_AP_BITMAP(s)[i].bh)) ;
+    wait_on_buffer(SB_AP_BITMAP(s)[i].bh) ;
+    if (!buffer_uptodate(SB_AP_BITMAP(s)[i].bh)) {
+      reiserfs_warning (s, "reread_meta_blocks, error reading bitmap block number %d at %llu",
+        i, (unsigned long long)SB_AP_BITMAP(s)[i].bh->b_blocknr) ;
+      return 1 ;
+    }
+  }
+  return 0 ;
+
+}
+
+
+/////////////////////////////////////////////////////
+// hash detection stuff
+
+
+// if root directory is empty - we set default - Yura's - hash and
+// warn about it
+// FIXME: we look for only one name in a directory. If tea and yura
+// bith have the same value - we ask user to send report to the
+// mailing list
+static __u32 find_hash_out (struct super_block * s)
+{
+    int retval;
+    struct inode * inode;
+    struct cpu_key key;
+    INITIALIZE_PATH (path);
+    struct reiserfs_dir_entry de;
+    __u32 hash = DEFAULT_HASH;
+
+    inode = s->s_root->d_inode;
+
+    do { // Some serious "goto"-hater was there ;)
+	u32 teahash, r5hash, yurahash;
+
+	make_cpu_key (&key, inode, ~0, TYPE_DIRENTRY, 3);
+	retval = search_by_entry_key (s, &key, &path, &de);
+	if (retval == IO_ERROR) {
+	    pathrelse (&path);
+	    return UNSET_HASH ;
+	}
+	if (retval == NAME_NOT_FOUND)
+	    de.de_entry_num --;
+	set_de_name_and_namelen (&de);
+	if (deh_offset( &(de.de_deh[de.de_entry_num]) ) == DOT_DOT_OFFSET) {
+	    /* allow override in this case */
+	    if (reiserfs_rupasov_hash(s)) {
+		hash = YURA_HASH ;
+	    }
+	    reiserfs_warning(s,"FS seems to be empty, autodetect "
+	                     "is using the default hash");
+	    break;
+	}
+	r5hash=GET_HASH_VALUE (r5_hash (de.de_name, de.de_namelen));
+	teahash=GET_HASH_VALUE (keyed_hash (de.de_name, de.de_namelen));
+	yurahash=GET_HASH_VALUE (yura_hash (de.de_name, de.de_namelen));
+	if ( ( (teahash == r5hash) && (GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num]))) == r5hash) ) ||
+	     ( (teahash == yurahash) && (yurahash == GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num])))) ) ||
+	     ( (r5hash == yurahash) && (yurahash == GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num])))) ) ) {
+	    reiserfs_warning(s,"Unable to automatically detect hash function. "
+			     "Please mount with -o hash={tea,rupasov,r5}",
+			     reiserfs_bdevname (s));
+	    hash = UNSET_HASH;
+	    break;
+	}
+	if (GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num])) ) == yurahash)
+	    hash = YURA_HASH;
+	else if (GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num])) ) == teahash)
+	    hash = TEA_HASH;
+	else if (GET_HASH_VALUE( deh_offset(&(de.de_deh[de.de_entry_num])) ) == r5hash)
+	    hash = R5_HASH;
+	else {
+	    reiserfs_warning (s,"Unrecognised hash function");
+	    hash = UNSET_HASH;
+	}
+    } while (0);
+
+    pathrelse (&path);
+    return hash;
+}
+
+// finds out which hash names are sorted with
+static int what_hash (struct super_block * s)
+{
+    __u32 code;
+
+    code = sb_hash_function_code(SB_DISK_SUPER_BLOCK(s));
+
+    /* reiserfs_hash_detect() == true if any of the hash mount options
+    ** were used.  We must check them to make sure the user isn't
+    ** using a bad hash value
+    */
+    if (code == UNSET_HASH || reiserfs_hash_detect(s))
+	code = find_hash_out (s);
+
+    if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
+	/* detection has found the hash, and we must check against the 
+	** mount options 
+	*/
+	if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
+	    reiserfs_warning (s, "Error, %s hash detected, "
+		   "unable to force rupasov hash", reiserfs_hashname(code)) ;
+	    code = UNSET_HASH ;
+	} else if (reiserfs_tea_hash(s) && code != TEA_HASH) {
+	    reiserfs_warning (s, "Error, %s hash detected, "
+		   "unable to force tea hash", reiserfs_hashname(code)) ;
+	    code = UNSET_HASH ;
+	} else if (reiserfs_r5_hash(s) && code != R5_HASH) {
+	    reiserfs_warning (s, "Error, %s hash detected, "
+		   "unable to force r5 hash", reiserfs_hashname(code)) ;
+	    code = UNSET_HASH ;
+	} 
+    } else { 
+        /* find_hash_out was not called or could not determine the hash */
+	if (reiserfs_rupasov_hash(s)) {
+	    code = YURA_HASH ;
+	} else if (reiserfs_tea_hash(s)) {
+	    code = TEA_HASH ;
+	} else if (reiserfs_r5_hash(s)) {
+	    code = R5_HASH ;
+	} 
+    }
+
+    /* if we are mounted RW, and we have a new valid hash code, update 
+    ** the super
+    */
+    if (code != UNSET_HASH && 
+	!(s->s_flags & MS_RDONLY) && 
+        code != sb_hash_function_code(SB_DISK_SUPER_BLOCK(s))) {
+        set_sb_hash_function_code(SB_DISK_SUPER_BLOCK(s), code);
+    }
+    return code;
+}
+
+// return pointer to appropriate function
+static hashf_t hash_function (struct super_block * s)
+{
+    switch (what_hash (s)) {
+    case TEA_HASH:
+	reiserfs_info (s, "Using tea hash to sort names\n");
+	return keyed_hash;
+    case YURA_HASH:
+	reiserfs_info (s, "Using rupasov hash to sort names\n");
+	return yura_hash;
+    case R5_HASH:
+	reiserfs_info (s, "Using r5 hash to sort names\n");
+	return r5_hash;
+    }
+    return NULL;
+}
+
+// this is used to set up correct value for old partitions
+static int function2code (hashf_t func)
+{
+    if (func == keyed_hash)
+	return TEA_HASH;
+    if (func == yura_hash)
+	return YURA_HASH;
+    if (func == r5_hash)
+	return R5_HASH;
+
+    BUG() ; // should never happen
+
+    return 0;
+}
+
+#define SWARN(silent, s, ...)			\
+	if (!(silent))				\
+		reiserfs_warning (s, __VA_ARGS__)
+
+static int reiserfs_fill_super (struct super_block * s, void * data, int silent)
+{
+    struct inode *root_inode;
+    int j;
+    struct reiserfs_transaction_handle th ;
+    int old_format = 0;
+    unsigned long blocks;
+    unsigned int commit_max_age = 0;
+    int jinit_done = 0 ;
+    struct reiserfs_iget_args args ;
+    struct reiserfs_super_block * rs;
+    char *jdev_name;
+    struct reiserfs_sb_info *sbi;
+    int errval = -EINVAL;
+
+    sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
+    if (!sbi) {
+	errval = -ENOMEM;
+	goto error;
+    }
+    s->s_fs_info = sbi;
+    memset (sbi, 0, sizeof (struct reiserfs_sb_info));
+    /* Set default values for options: non-aggressive tails, RO on errors */
+    REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+    REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+    /* no preallocation minimum, be smart in
+       reiserfs_file_write instead */
+    REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+    /* Preallocate by 16 blocks (17-1) at once */
+    REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
+    /* Initialize the rwsem for xattr dir */
+    init_rwsem(&REISERFS_SB(s)->xattr_dir_sem);
+
+    /* setup default block allocator options */
+    reiserfs_init_alloc_options(s);
+
+    jdev_name = NULL;
+    if (reiserfs_parse_options (s, (char *) data, &(sbi->s_mount_opt), &blocks, &jdev_name, &commit_max_age) == 0) {
+	goto error;
+    }
+
+    if (blocks) {
+	SWARN (silent, s, "jmacd-7: reiserfs_fill_super: resize option "
+	       "for remount only");
+	goto error;
+    }	
+
+    /* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
+    if (!read_super_block (s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
+      old_format = 1;
+    /* try new format (64-th 1k block), which can contain reiserfs super block */
+    else if (read_super_block (s, REISERFS_DISK_OFFSET_IN_BYTES)) {
+      SWARN(silent, s, "sh-2021: reiserfs_fill_super: can not find reiserfs on %s", reiserfs_bdevname (s));
+      goto error;
+    }
+
+    rs = SB_DISK_SUPER_BLOCK (s);
+    /* Let's do basic sanity check to verify that underlying device is not
+       smaller than the filesystem. If the check fails then abort and scream,
+       because bad stuff will happen otherwise. */
+    if ( s->s_bdev && s->s_bdev->bd_inode && i_size_read(s->s_bdev->bd_inode) < sb_block_count(rs)*sb_blocksize(rs)) {
+	SWARN (silent, s, "Filesystem on %s cannot be mounted because it is bigger than the device", reiserfs_bdevname(s));
+	SWARN(silent, s, "You may need to run fsck or increase size of your LVM partition");
+	SWARN(silent, s, "Or may be you forgot to reboot after fdisk when it told you to");
+	goto error;
+    }
+
+    sbi->s_mount_state = SB_REISERFS_STATE(s);
+    sbi->s_mount_state = REISERFS_VALID_FS ;
+
+    if (old_format ? read_old_bitmaps(s) : read_bitmaps(s)) {
+	SWARN(silent, s, "jmacd-8: reiserfs_fill_super: unable to read bitmap");
+	goto error;
+    }
+#ifdef CONFIG_REISERFS_CHECK
+    SWARN (silent, s, "CONFIG_REISERFS_CHECK is set ON");
+    SWARN (silent, s, "- it is slow mode for debugging.");
+#endif
+
+    /* make data=ordered the default */
+    if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
+        !reiserfs_data_writeback(s))
+    {
+         REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
+    }
+
+    if (reiserfs_data_log(s)) {
+        reiserfs_info (s, "using journaled data mode\n");
+    } else if (reiserfs_data_ordered(s)) {
+        reiserfs_info (s, "using ordered data mode\n");
+    } else {
+        reiserfs_info (s, "using writeback data mode\n");
+    }
+    if (reiserfs_barrier_flush(s)) {
+    	printk("reiserfs: using flush barriers\n");
+    }
+
+    // set_device_ro(s->s_dev, 1) ;
+    if( journal_init(s, jdev_name, old_format, commit_max_age) ) {
+	SWARN(silent, s, "sh-2022: reiserfs_fill_super: unable to initialize journal space") ;
+	goto error ;
+    } else {
+	jinit_done = 1 ; /* once this is set, journal_release must be called
+			 ** if we error out of the mount
+			 */
+    }
+    if (reread_meta_blocks(s)) {
+	SWARN(silent, s, "jmacd-9: reiserfs_fill_super: unable to reread meta blocks after journal init") ;
+	goto error ;
+    }
+
+    if (replay_only (s))
+	goto error;
+
+    if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
+        SWARN(silent, s, "clm-7000: Detected readonly device, marking FS readonly") ;
+	s->s_flags |= MS_RDONLY ;
+    }
+    args.objectid = REISERFS_ROOT_OBJECTID ;
+    args.dirid = REISERFS_ROOT_PARENT_OBJECTID ;
+    root_inode = iget5_locked (s, REISERFS_ROOT_OBJECTID, reiserfs_find_actor, reiserfs_init_locked_inode, (void *)(&args));
+    if (!root_inode) {
+	SWARN(silent, s, "jmacd-10: reiserfs_fill_super: get root inode failed");
+	goto error;
+    }
+
+    if (root_inode->i_state & I_NEW) {
+	reiserfs_read_locked_inode(root_inode, &args);
+	unlock_new_inode(root_inode);
+    }
+
+    s->s_root = d_alloc_root(root_inode);  
+    if (!s->s_root) {
+	iput(root_inode);
+	goto error;
+    }
+
+    // define and initialize hash function
+    sbi->s_hash_function = hash_function (s);
+    if (sbi->s_hash_function == NULL) {
+      dput(s->s_root) ;
+      s->s_root = NULL ;
+      goto error ;
+    }
+
+    if (is_reiserfs_3_5 (rs) || (is_reiserfs_jr (rs) && SB_VERSION (s) == REISERFS_VERSION_1))
+	set_bit(REISERFS_3_5, &(sbi->s_properties));
+    else
+	set_bit(REISERFS_3_6, &(sbi->s_properties));
+    
+    if (!(s->s_flags & MS_RDONLY)) {
+
+	errval = journal_begin(&th, s, 1) ;
+        if (errval) {
+	    dput (s->s_root);
+	    s->s_root = NULL;
+	    goto error;
+        }
+	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+
+        set_sb_umount_state( rs, REISERFS_ERROR_FS );
+	set_sb_fs_state (rs, 0);
+	
+	if (old_format_only(s)) {
+	  /* filesystem of format 3.5 either with standard or non-standard
+	     journal */
+	  if (convert_reiserfs (s)) {
+	    /* and -o conv is given */
+	    if(!silent)
+	      reiserfs_info (s,"converting 3.5 filesystem to the 3.6 format") ;
+
+	    if (is_reiserfs_3_5 (rs))
+	      /* put magic string of 3.6 format. 2.2 will not be able to
+		 mount this filesystem anymore */
+	      memcpy (rs->s_v1.s_magic, reiserfs_3_6_magic_string,
+		      sizeof (reiserfs_3_6_magic_string));
+
+	    set_sb_version(rs,REISERFS_VERSION_2);
+	    reiserfs_convert_objectid_map_v1(s) ;
+	    set_bit(REISERFS_3_6, &(sbi->s_properties));
+	    clear_bit(REISERFS_3_5, &(sbi->s_properties));
+	  } else if (!silent){
+	    reiserfs_info (s, "using 3.5.x disk format\n") ;
+	  }
+	}
+
+	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+	errval = journal_end(&th, s, 1) ;
+	if (errval) {
+	    dput (s->s_root);
+	    s->s_root = NULL;
+	    goto error;
+	}
+
+	if ((errval = reiserfs_xattr_init (s, s->s_flags))) {
+	    dput (s->s_root);
+	    s->s_root = NULL;
+	    goto error;
+	}
+
+	/* look for files which were to be removed in previous session */
+	finish_unfinished (s);
+    } else {
+	if ( old_format_only(s) && !silent) {
+	    reiserfs_info (s, "using 3.5.x disk format\n") ;
+	}
+
+	if ((errval = reiserfs_xattr_init (s, s->s_flags))) {
+	    dput (s->s_root);
+	    s->s_root = NULL;
+	    goto error;
+	}
+    }
+    // mark hash in super block: it could be unset. overwrite should be ok
+    set_sb_hash_function_code( rs, function2code(sbi->s_hash_function ) );
+
+    handle_attrs( s );
+
+    reiserfs_proc_info_init( s );
+
+    init_waitqueue_head (&(sbi->s_wait));
+    spin_lock_init(&sbi->bitmap_lock);
+
+    return (0);
+
+ error:
+    if (jinit_done) { /* kill the commit thread, free journal ram */
+	journal_release_error(NULL, s) ;
+    }
+    if (SB_DISK_SUPER_BLOCK (s)) {
+	for (j = 0; j < SB_BMAP_NR (s); j ++) {
+	    if (SB_AP_BITMAP (s))
+		brelse (SB_AP_BITMAP (s)[j].bh);
+	}
+	if (SB_AP_BITMAP (s))
+	    vfree (SB_AP_BITMAP (s));
+    }
+    if (SB_BUFFER_WITH_SB (s))
+	brelse(SB_BUFFER_WITH_SB (s));
+#ifdef CONFIG_QUOTA
+    for (j = 0; j < MAXQUOTAS; j++) {
+	if (sbi->s_qf_names[j])
+	    kfree(sbi->s_qf_names[j]);
+    }
+#endif
+    if (sbi != NULL) {
+	kfree(sbi);
+    }
+
+    s->s_fs_info = NULL;
+    return errval;
+}
+
+
+static int reiserfs_statfs (struct super_block * s, struct kstatfs * buf)
+{
+  struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+  
+  buf->f_namelen = (REISERFS_MAX_NAME (s->s_blocksize));
+  buf->f_bfree   = sb_free_blocks(rs);
+  buf->f_bavail  = buf->f_bfree;
+  buf->f_blocks  = sb_block_count(rs) - sb_bmap_nr(rs) - 1;
+  buf->f_bsize   = s->s_blocksize;
+  /* changed to accommodate gcc folks.*/
+  buf->f_type    =  REISERFS_SUPER_MAGIC;
+  return 0;
+}
+
+#ifdef CONFIG_QUOTA
+static int reiserfs_dquot_initialize(struct inode *inode, int type)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    /* We may create quota structure so we need to reserve enough blocks */
+    reiserfs_write_lock(inode->i_sb);
+    journal_begin(&th, inode->i_sb, 2*REISERFS_QUOTA_INIT_BLOCKS);
+    ret = dquot_initialize(inode, type);
+    journal_end(&th, inode->i_sb, 2*REISERFS_QUOTA_INIT_BLOCKS);
+    reiserfs_write_unlock(inode->i_sb);
+    return ret;
+}
+
+static int reiserfs_dquot_drop(struct inode *inode)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    /* We may delete quota structure so we need to reserve enough blocks */
+    reiserfs_write_lock(inode->i_sb);
+    journal_begin(&th, inode->i_sb, 2*REISERFS_QUOTA_INIT_BLOCKS);
+    ret = dquot_drop(inode);
+    journal_end(&th, inode->i_sb, 2*REISERFS_QUOTA_INIT_BLOCKS);
+    reiserfs_write_unlock(inode->i_sb);
+    return ret;
+}
+
+static int reiserfs_write_dquot(struct dquot *dquot)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    reiserfs_write_lock(dquot->dq_sb);
+    journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_TRANS_BLOCKS);
+    ret = dquot_commit(dquot);
+    journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_TRANS_BLOCKS);
+    reiserfs_write_unlock(dquot->dq_sb);
+    return ret;
+}
+
+static int reiserfs_acquire_dquot(struct dquot *dquot)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    reiserfs_write_lock(dquot->dq_sb);
+    journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS);
+    ret = dquot_acquire(dquot);
+    journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS);
+    reiserfs_write_unlock(dquot->dq_sb);
+    return ret;
+}
+
+static int reiserfs_release_dquot(struct dquot *dquot)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    reiserfs_write_lock(dquot->dq_sb);
+    journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS);
+    ret = dquot_release(dquot);
+    journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS);
+    reiserfs_write_unlock(dquot->dq_sb);
+    return ret;
+}
+
+static int reiserfs_mark_dquot_dirty(struct dquot *dquot)
+{
+    /* Are we journalling quotas? */
+    if (REISERFS_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
+        REISERFS_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+	dquot_mark_dquot_dirty(dquot);
+	return reiserfs_write_dquot(dquot);
+    }
+    else
+	return dquot_mark_dquot_dirty(dquot);
+}
+
+static int reiserfs_write_info(struct super_block *sb, int type)
+{
+    struct reiserfs_transaction_handle th;
+    int ret;
+
+    /* Data block + inode block */
+    reiserfs_write_lock(sb);
+    journal_begin(&th, sb, 2);
+    ret = dquot_commit_info(sb, type);
+    journal_end(&th, sb, 2);
+    reiserfs_write_unlock(sb);
+    return ret;
+}
+
+/*
+ * Turn on quotas during mount time - we need to find
+ * the quota file and such...
+ */
+static int reiserfs_quota_on_mount(struct super_block *sb, int type)
+{
+    int err;
+    struct dentry *dentry;
+    struct qstr name = { .name = REISERFS_SB(sb)->s_qf_names[type],
+                         .hash = 0,
+                         .len = strlen(REISERFS_SB(sb)->s_qf_names[type])};
+
+    dentry = lookup_hash(&name, sb->s_root);
+    if (IS_ERR(dentry))
+            return PTR_ERR(dentry);
+    err = vfs_quota_on_mount(type, REISERFS_SB(sb)->s_jquota_fmt, dentry);
+    /* Now invalidate and put the dentry - quota got its own reference
+     * to inode and dentry has at least wrong hash so we had better
+     * throw it away */
+    d_invalidate(dentry);
+    dput(dentry);
+    return err;
+}
+
+/*
+ * Standard function to be called on quota_on
+ */
+static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
+{
+    int err;
+    struct nameidata nd;
+
+    err = path_lookup(path, LOOKUP_FOLLOW, &nd);
+    if (err)
+        return err;
+    /* Quotafile not on the same filesystem? */
+    if (nd.mnt->mnt_sb != sb) {
+	path_release(&nd);
+        return -EXDEV;
+    }
+    /* We must not pack tails for quota files on reiserfs for quota IO to work */
+    if (!REISERFS_I(nd.dentry->d_inode)->i_flags & i_nopack_mask) {
+	reiserfs_warning(sb, "reiserfs: Quota file must have tail packing disabled.");
+	path_release(&nd);
+	return -EINVAL;
+    }
+    /* Not journalling quota? No more tests needed... */
+    if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] &&
+        !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) {
+	path_release(&nd);
+        return vfs_quota_on(sb, type, format_id, path);
+    }
+    /* Quotafile not of fs root? */
+    if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+	reiserfs_warning(sb, "reiserfs: Quota file not on filesystem root. "
+                             "Journalled quota will not work.");
+    path_release(&nd);
+    return vfs_quota_on(sb, type, format_id, path);
+}
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
+				   size_t len, loff_t off)
+{
+    struct inode *inode = sb_dqopt(sb)->files[type];
+    unsigned long blk = off >> sb->s_blocksize_bits;
+    int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
+    size_t toread;
+    struct buffer_head tmp_bh, *bh;
+    loff_t i_size = i_size_read(inode);
+
+    if (off > i_size)
+	return 0;
+    if (off+len > i_size)
+	len = i_size-off;
+    toread = len;
+    while (toread > 0) {
+	tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread;
+	tmp_bh.b_state = 0;
+	/* Quota files are without tails so we can safely use this function */
+	reiserfs_write_lock(sb);
+	err = reiserfs_get_block(inode, blk, &tmp_bh, 0);
+	reiserfs_write_unlock(sb);
+	if (err)
+	    return err;
+	if (!buffer_mapped(&tmp_bh))    /* A hole? */
+	    memset(data, 0, tocopy);
+	else {
+	    bh = sb_bread(sb, tmp_bh.b_blocknr);
+	    if (!bh)
+		return -EIO;
+	    memcpy(data, bh->b_data+offset, tocopy);
+	    brelse(bh);
+	}
+	offset = 0;
+	toread -= tocopy;
+	data += tocopy;
+	blk++;
+    }
+    return len;
+}
+
+/* Write to quotafile (we know the transaction is already started and has
+ * enough credits) */
+static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+				    const char *data, size_t len, loff_t off)
+{
+    struct inode *inode = sb_dqopt(sb)->files[type];
+    unsigned long blk = off >> sb->s_blocksize_bits;
+    int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
+    int journal_quota = REISERFS_SB(sb)->s_qf_names[type] != NULL;
+    size_t towrite = len;
+    struct buffer_head tmp_bh, *bh;
+
+    down(&inode->i_sem);
+    while (towrite > 0) {
+	tocopy = sb->s_blocksize - offset < towrite ?
+	         sb->s_blocksize - offset : towrite;
+	tmp_bh.b_state = 0;
+	err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+	if (err)
+	    goto out;
+	if (offset || tocopy != sb->s_blocksize)
+	    bh = sb_bread(sb, tmp_bh.b_blocknr);
+	else
+	    bh = sb_getblk(sb, tmp_bh.b_blocknr);
+	if (!bh) {
+	    err = -EIO;
+	    goto out;
+	}
+	lock_buffer(bh);
+	memcpy(bh->b_data+offset, data, tocopy);
+	flush_dcache_page(bh->b_page);
+	set_buffer_uptodate(bh);
+	unlock_buffer(bh);
+	reiserfs_prepare_for_journal(sb, bh, 1);
+	journal_mark_dirty(current->journal_info, sb, bh);
+	if (!journal_quota)
+		reiserfs_add_ordered_list(inode, bh);
+	brelse(bh);
+	offset = 0;
+	towrite -= tocopy;
+	data += tocopy;
+	blk++;
+    }
+out:
+    if (len == towrite)
+	return err;
+    if (inode->i_size < off+len-towrite)
+	i_size_write(inode, off+len-towrite);
+    inode->i_version++;
+    inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+    mark_inode_dirty(inode);
+    up(&inode->i_sem);
+    return len - towrite;
+}
+
+#endif
+
+static struct super_block*
+get_super_block (struct file_system_type *fs_type, int flags,
+		 const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super);
+}
+
+static int __init
+init_reiserfs_fs ( void )
+{
+	int ret;
+
+	if ((ret = init_inodecache ())) {
+		return ret;
+	}
+
+        if ((ret = reiserfs_xattr_register_handlers ()))
+            goto failed_reiserfs_xattr_register_handlers;
+
+	reiserfs_proc_info_global_init ();
+	reiserfs_proc_register_global ("version", reiserfs_global_version_in_proc);
+
+        ret = register_filesystem (& reiserfs_fs_type);
+
+	if (ret == 0) {
+		return 0;
+	}
+
+        reiserfs_xattr_unregister_handlers ();
+
+failed_reiserfs_xattr_register_handlers:
+	reiserfs_proc_unregister_global ("version");
+	reiserfs_proc_info_global_done ();
+	destroy_inodecache ();
+
+	return ret;
+}
+
+static void __exit
+exit_reiserfs_fs ( void )
+{
+        reiserfs_xattr_unregister_handlers ();
+	reiserfs_proc_unregister_global ("version");
+	reiserfs_proc_info_global_done ();
+        unregister_filesystem (& reiserfs_fs_type);
+	destroy_inodecache ();
+}
+
+struct file_system_type reiserfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "reiserfs",
+	.get_sb		= get_super_block,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+MODULE_DESCRIPTION ("ReiserFS journaled filesystem");
+MODULE_AUTHOR      ("Hans Reiser <reiser@namesys.com>");
+MODULE_LICENSE     ("GPL");
+
+module_init (init_reiserfs_fs);
+module_exit (exit_reiserfs_fs);
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
new file mode 100644
index 0000000..6191909
--- /dev/null
+++ b/fs/reiserfs/tail_conversion.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
+ */
+
+#include <linux/config.h>
+#include <linux/time.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/reiserfs_fs.h>
+
+/* access to tail : when one is going to read tail it must make sure, that is not running.
+ direct2indirect and indirect2direct can not run concurrently */
+
+
+/* Converts direct items to an unformatted node. Panics if file has no
+   tail. -ENOSPC if no disk space for conversion */
+/* path points to first direct item of the file regarless of how many of
+   them are there */
+int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inode, 
+		     struct path * path, struct buffer_head * unbh,
+		     loff_t tail_offset)
+{
+    struct super_block * sb = inode->i_sb;
+    struct buffer_head *up_to_date_bh ;
+    struct item_head * p_le_ih = PATH_PITEM_HEAD (path);
+    unsigned long total_tail = 0 ;
+    struct cpu_key end_key;  /* Key to search for the last byte of the
+				converted item. */
+    struct item_head ind_ih; /* new indirect item to be inserted or
+                                key of unfm pointer to be pasted */
+    int	n_blk_size,
+      n_retval;	  /* returned value for reiserfs_insert_item and clones */
+    unp_t unfm_ptr;  /* Handle on an unformatted node
+				       that will be inserted in the
+				       tree. */
+
+    BUG_ON (!th->t_trans_id);
+
+    REISERFS_SB(sb)->s_direct2indirect ++;
+
+    n_blk_size = sb->s_blocksize;
+
+    /* and key to search for append or insert pointer to the new
+       unformatted node. */
+    copy_item_head (&ind_ih, p_le_ih);
+    set_le_ih_k_offset (&ind_ih, tail_offset);
+    set_le_ih_k_type (&ind_ih, TYPE_INDIRECT);
+
+    /* Set the key to search for the place for new unfm pointer */
+    make_cpu_key (&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
+
+    // FIXME: we could avoid this 
+    if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND ) {
+	reiserfs_warning (sb, "PAP-14030: direct2indirect: "
+			"pasted or inserted byte exists in the tree %K. "
+			"Use fsck to repair.", &end_key);
+	pathrelse(path);
+	return -EIO;
+    }
+    
+    p_le_ih = PATH_PITEM_HEAD (path);
+
+    unfm_ptr = cpu_to_le32 (unbh->b_blocknr);
+    
+    if ( is_statdata_le_ih (p_le_ih) )  {
+	/* Insert new indirect item. */
+	set_ih_free_space (&ind_ih, 0); /* delete at nearest future */
+        put_ih_item_len( &ind_ih, UNFM_P_SIZE );
+	PATH_LAST_POSITION (path)++;
+	n_retval = reiserfs_insert_item (th, path, &end_key, &ind_ih, inode,
+					 (char *)&unfm_ptr);
+    } else {
+	/* Paste into last indirect item of an object. */
+	n_retval = reiserfs_paste_into_item(th, path, &end_key, inode,
+					    (char *)&unfm_ptr, UNFM_P_SIZE);
+    }
+    if ( n_retval ) {
+	return n_retval;
+    }
+
+    // note: from here there are two keys which have matching first
+    // three key components. They only differ by the fourth one.
+
+
+    /* Set the key to search for the direct items of the file */
+    make_cpu_key (&end_key, inode, max_reiserfs_offset (inode), TYPE_DIRECT, 4);
+
+    /* Move bytes from the direct items to the new unformatted node
+       and delete them. */
+    while (1)  {
+	int tail_size;
+
+	/* end_key.k_offset is set so, that we will always have found
+           last item of the file */
+	if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND )
+	    reiserfs_panic (sb, "PAP-14050: direct2indirect: "
+			    "direct item (%K) not found", &end_key);
+	p_le_ih = PATH_PITEM_HEAD (path);
+	RFALSE( !is_direct_le_ih (p_le_ih),
+	        "vs-14055: direct item expected(%K), found %h",
+                &end_key, p_le_ih);
+        tail_size = (le_ih_k_offset (p_le_ih) & (n_blk_size - 1))
+            + ih_item_len(p_le_ih) - 1;
+
+	/* we only send the unbh pointer if the buffer is not up to date.
+	** this avoids overwriting good data from writepage() with old data
+	** from the disk or buffer cache
+	** Special case: unbh->b_page will be NULL if we are coming through
+	** DIRECT_IO handler here.
+	*/
+	if (!unbh->b_page || buffer_uptodate(unbh) || PageUptodate(unbh->b_page)) {
+	    up_to_date_bh = NULL ;
+	} else {
+	    up_to_date_bh = unbh ;
+	}
+	n_retval = reiserfs_delete_item (th, path, &end_key, inode, 
+	                                 up_to_date_bh) ;
+
+	total_tail += n_retval ;
+	if (tail_size == n_retval)
+	    // done: file does not have direct items anymore
+	    break;
+
+    }
+    /* if we've copied bytes from disk into the page, we need to zero
+    ** out the unused part of the block (it was not up to date before)
+    */
+    if (up_to_date_bh) {
+        unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
+	char *kaddr=kmap_atomic(up_to_date_bh->b_page, KM_USER0);
+	memset(kaddr + pgoff, 0, n_blk_size - total_tail) ;
+	kunmap_atomic(kaddr, KM_USER0);
+    }
+
+    REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
+
+    return 0;
+}
+
+
+/* stolen from fs/buffer.c */
+void reiserfs_unmap_buffer(struct buffer_head *bh) {
+    lock_buffer(bh) ;
+    if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
+      BUG() ;
+    }
+    clear_buffer_dirty(bh) ;
+    /* Remove the buffer from whatever list it belongs to. We are mostly
+       interested in removing it from per-sb j_dirty_buffers list, to avoid
+        BUG() on attempt to write not mapped buffer */
+    if ( (!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
+	struct inode *inode = bh->b_page->mapping->host;
+	struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
+	spin_lock(&j->j_dirty_buffers_lock);
+	list_del_init(&bh->b_assoc_buffers);
+	reiserfs_free_jh(bh);
+	spin_unlock(&j->j_dirty_buffers_lock);
+    }
+    clear_buffer_mapped(bh) ;
+    clear_buffer_req(bh) ;
+    clear_buffer_new(bh);
+    bh->b_bdev = NULL;
+    unlock_buffer(bh) ;
+}
+
+/* this first locks inode (neither reads nor sync are permitted),
+   reads tail through page cache, insert direct item. When direct item
+   inserted successfully inode is left locked. Return value is always
+   what we expect from it (number of cut bytes). But when tail remains
+   in the unformatted node, we set mode to SKIP_BALANCING and unlock
+   inode */
+int indirect2direct (struct reiserfs_transaction_handle *th, 
+		     struct inode * p_s_inode,
+		     struct page *page, 
+		     struct path * p_s_path, /* path to the indirect item. */
+		     const struct cpu_key * p_s_item_key, /* Key to look for unformatted node pointer to be cut. */
+		     loff_t n_new_file_size, /* New file size. */
+		     char * p_c_mode)
+{
+    struct super_block * p_s_sb = p_s_inode->i_sb;
+    struct item_head      s_ih;
+    unsigned long n_block_size = p_s_sb->s_blocksize;
+    char * tail;
+    int tail_len, round_tail_len;
+    loff_t pos, pos1; /* position of first byte of the tail */
+    struct cpu_key key;
+
+    BUG_ON (!th->t_trans_id);
+
+    REISERFS_SB(p_s_sb)->s_indirect2direct ++;
+
+    *p_c_mode = M_SKIP_BALANCING;
+
+    /* store item head path points to. */
+    copy_item_head (&s_ih, PATH_PITEM_HEAD(p_s_path));
+
+    tail_len = (n_new_file_size & (n_block_size - 1));
+    if (get_inode_sd_version (p_s_inode) == STAT_DATA_V2)
+	round_tail_len = ROUND_UP (tail_len);
+    else
+	round_tail_len = tail_len;
+
+    pos = le_ih_k_offset (&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
+    pos1 = pos;
+
+    // we are protected by i_sem. The tail can not disapper, not
+    // append can be done either
+    // we are in truncate or packing tail in file_release
+
+    tail = (char *)kmap(page) ; /* this can schedule */
+
+    if (path_changed (&s_ih, p_s_path)) {
+	/* re-search indirect item */
+	if ( search_for_position_by_key (p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND )
+	    reiserfs_panic(p_s_sb, "PAP-5520: indirect2direct: "
+			   "item to be converted %K does not exist", p_s_item_key);
+	copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+#ifdef CONFIG_REISERFS_CHECK
+	pos = le_ih_k_offset (&s_ih) - 1 + 
+	    (ih_item_len(&s_ih) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
+	if (pos != pos1)
+	    reiserfs_panic (p_s_sb, "vs-5530: indirect2direct: "
+			    "tail position changed while we were reading it");
+#endif
+    }
+
+
+    /* Set direct item header to insert. */
+    make_le_item_head (&s_ih, NULL, get_inode_item_key_version (p_s_inode), pos1 + 1,
+		       TYPE_DIRECT, round_tail_len, 0xffff/*ih_free_space*/);
+
+    /* we want a pointer to the first byte of the tail in the page.
+    ** the page was locked and this part of the page was up to date when
+    ** indirect2direct was called, so we know the bytes are still valid
+    */
+    tail = tail + (pos & (PAGE_CACHE_SIZE - 1)) ;
+
+    PATH_LAST_POSITION(p_s_path)++;
+
+    key = *p_s_item_key;
+    set_cpu_key_k_type (&key, TYPE_DIRECT);
+    key.key_length = 4;
+    /* Insert tail as new direct item in the tree */
+    if ( reiserfs_insert_item(th, p_s_path, &key, &s_ih, p_s_inode,
+			      tail ? tail : NULL) < 0 ) {
+	/* No disk memory. So we can not convert last unformatted node
+	   to the direct item.  In this case we used to adjust
+	   indirect items's ih_free_space. Now ih_free_space is not
+	   used, it would be ideal to write zeros to corresponding
+	   unformatted node. For now i_size is considered as guard for
+	   going out of file size */
+	kunmap(page) ;
+	return n_block_size - round_tail_len;
+    }
+    kunmap(page) ;
+
+    /* make sure to get the i_blocks changes from reiserfs_insert_item */
+    reiserfs_update_sd(th, p_s_inode);
+
+    // note: we have now the same as in above direct2indirect
+    // conversion: there are two keys which have matching first three
+    // key components. They only differ by the fouhth one.
+
+    /* We have inserted new direct item and must remove last
+       unformatted node. */
+    *p_c_mode = M_CUT;
+
+    /* we store position of first direct item in the in-core inode */
+    //mark_file_with_tail (p_s_inode, pos1 + 1);
+    REISERFS_I(p_s_inode)->i_first_direct_byte = pos1 + 1;
+
+    return n_block_size - round_tail_len;
+}
+
+
+
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
new file mode 100644
index 0000000..45582fe
--- /dev/null
+++ b/fs/reiserfs/xattr.c
@@ -0,0 +1,1450 @@
+/*
+ * linux/fs/reiserfs/xattr.c
+ *
+ * Copyright (c) 2002 by Jeff Mahoney, <jeffm@suse.com>
+ *
+ */
+
+/*
+ * In order to implement EA/ACLs in a clean, backwards compatible manner,
+ * they are implemented as files in a "private" directory.
+ * Each EA is in it's own file, with the directory layout like so (/ is assumed
+ * to be relative to fs root). Inside the /.reiserfs_priv/xattrs directory,
+ * directories named using the capital-hex form of the objectid and
+ * generation number are used. Inside each directory are individual files
+ * named with the name of the extended attribute.
+ *
+ * So, for objectid 12648430, we could have:
+ * /.reiserfs_priv/xattrs/C0FFEE.0/system.posix_acl_access
+ * /.reiserfs_priv/xattrs/C0FFEE.0/system.posix_acl_default
+ * /.reiserfs_priv/xattrs/C0FFEE.0/user.Content-Type
+ * .. or similar.
+ *
+ * The file contents are the text of the EA. The size is known based on the
+ * stat data describing the file.
+ *
+ * In the case of system.posix_acl_access and system.posix_acl_default, since
+ * these are special cases for filesystem ACLs, they are interpreted by the
+ * kernel, in addition, they are negatively and positively cached and attached
+ * to the inode so that unnecessary lookups are avoided.
+ */
+
+#include <linux/reiserfs_fs.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/mbcache.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <linux/smp_lock.h>
+#include <linux/stat.h>
+#include <asm/semaphore.h>
+
+#define FL_READONLY 128
+#define FL_DIR_SEM_HELD 256
+#define PRIVROOT_NAME ".reiserfs_priv"
+#define XAROOT_NAME   "xattrs"
+
+static struct reiserfs_xattr_handler *find_xattr_handler_prefix (const char *prefix);
+
+static struct dentry *
+create_xa_root (struct super_block *sb)
+{
+    struct dentry *privroot = dget (REISERFS_SB(sb)->priv_root);
+    struct dentry *xaroot;
+
+    /* This needs to be created at mount-time */
+    if (!privroot)
+        return ERR_PTR(-EOPNOTSUPP);
+
+    xaroot = lookup_one_len (XAROOT_NAME, privroot, strlen (XAROOT_NAME));
+    if (IS_ERR (xaroot)) {
+        goto out;
+    } else if (!xaroot->d_inode) {
+        int err;
+        down (&privroot->d_inode->i_sem);
+        err = privroot->d_inode->i_op->mkdir (privroot->d_inode, xaroot, 0700);
+        up (&privroot->d_inode->i_sem);
+
+        if (err) {
+            dput (xaroot);
+            dput (privroot);
+            return ERR_PTR (err);
+        }
+        REISERFS_SB(sb)->xattr_root = dget (xaroot);
+    }
+
+out:
+    dput (privroot);
+    return xaroot;
+}
+
+/* This will return a dentry, or error, refering to the xa root directory.
+ * If the xa root doesn't exist yet, the dentry will be returned without
+ * an associated inode. This dentry can be used with ->mkdir to create
+ * the xa directory. */
+static struct dentry *
+__get_xa_root (struct super_block *s)
+{
+    struct dentry *privroot = dget (REISERFS_SB(s)->priv_root);
+    struct dentry *xaroot = NULL;
+
+    if (IS_ERR (privroot) || !privroot)
+        return privroot;
+
+    xaroot = lookup_one_len (XAROOT_NAME, privroot, strlen (XAROOT_NAME));
+    if (IS_ERR (xaroot)) {
+        goto out;
+    } else if (!xaroot->d_inode) {
+        dput (xaroot);
+        xaroot = NULL;
+        goto out;
+    }
+
+    REISERFS_SB(s)->xattr_root = dget (xaroot);
+
+out:
+    dput (privroot);
+    return xaroot;
+}
+
+/* Returns the dentry (or NULL) referring to the root of the extended
+ * attribute directory tree. If it has already been retreived, it is used.
+ * Otherwise, we attempt to retreive it from disk. It may also return
+ * a pointer-encoded error.
+ */
+static inline struct dentry *
+get_xa_root (struct super_block *s)
+{
+    struct dentry *dentry = dget (REISERFS_SB(s)->xattr_root);
+
+    if (!dentry)
+        dentry = __get_xa_root (s);
+
+    return dentry;
+}
+
+/* Opens the directory corresponding to the inode's extended attribute store.
+ * If flags allow, the tree to the directory may be created. If creation is
+ * prohibited, -ENODATA is returned. */
+static struct dentry *
+open_xa_dir (const struct inode *inode, int flags)
+{
+    struct dentry *xaroot, *xadir;
+    char namebuf[17];
+
+    xaroot = get_xa_root (inode->i_sb);
+    if (IS_ERR (xaroot)) {
+        return xaroot;
+    } else if (!xaroot) {
+        if (flags == 0 || flags & XATTR_CREATE) {
+            xaroot = create_xa_root (inode->i_sb);
+            if (IS_ERR (xaroot))
+                return xaroot;
+        }
+        if (!xaroot)
+            return ERR_PTR (-ENODATA);
+    }
+
+    /* ok, we have xaroot open */
+
+    snprintf (namebuf, sizeof (namebuf), "%X.%X",
+              le32_to_cpu (INODE_PKEY (inode)->k_objectid),
+              inode->i_generation);
+    xadir = lookup_one_len (namebuf, xaroot, strlen (namebuf));
+    if (IS_ERR (xadir)) {
+        dput (xaroot);
+        return xadir;
+    }
+
+    if (!xadir->d_inode) {
+        int err;
+        if (flags == 0 || flags & XATTR_CREATE) {
+            /* Although there is nothing else trying to create this directory,
+             * another directory with the same hash may be created, so we need
+             * to protect against that */
+            err = xaroot->d_inode->i_op->mkdir (xaroot->d_inode, xadir, 0700);
+            if (err) {
+                dput (xaroot);
+                dput (xadir);
+                return ERR_PTR (err);
+            }
+        }
+        if (!xadir->d_inode) {
+            dput (xaroot);
+            dput (xadir);
+            return ERR_PTR (-ENODATA);
+        }
+    }
+
+    dput (xaroot);
+    return xadir;
+}
+
+/* Returns a dentry corresponding to a specific extended attribute file
+ * for the inode. If flags allow, the file is created. Otherwise, a
+ * valid or negative dentry, or an error is returned. */
+static struct dentry *
+get_xa_file_dentry (const struct inode *inode, const char *name, int flags)
+{
+    struct dentry *xadir, *xafile;
+    int err = 0;
+
+    xadir = open_xa_dir (inode, flags);
+    if (IS_ERR (xadir)) {
+        return ERR_PTR (PTR_ERR (xadir));
+    } else if (xadir && !xadir->d_inode) {
+        dput (xadir);
+        return ERR_PTR (-ENODATA);
+    }
+
+    xafile = lookup_one_len (name, xadir, strlen (name));
+    if (IS_ERR (xafile)) {
+        dput (xadir);
+        return ERR_PTR (PTR_ERR (xafile));
+    }
+
+    if (xafile->d_inode) { /* file exists */
+        if (flags & XATTR_CREATE) {
+            err = -EEXIST;
+            dput (xafile);
+            goto out;
+        }
+    } else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
+        goto out;
+    } else {
+        /* inode->i_sem is down, so nothing else can try to create
+         * the same xattr */
+        err = xadir->d_inode->i_op->create (xadir->d_inode, xafile,
+                                            0700|S_IFREG, NULL);
+
+        if (err) {
+            dput (xafile);
+            goto out;
+        }
+    }
+
+out:
+    dput (xadir);
+    if (err)
+        xafile = ERR_PTR (err);
+    return xafile;
+}
+
+
+/* Opens a file pointer to the attribute associated with inode */
+static struct file *
+open_xa_file (const struct inode *inode, const char *name, int flags)
+{
+    struct dentry *xafile;
+    struct file *fp;
+
+    xafile = get_xa_file_dentry (inode, name, flags);
+    if (IS_ERR (xafile))
+        return ERR_PTR (PTR_ERR (xafile));
+    else if (!xafile->d_inode) {
+        dput (xafile);
+        return ERR_PTR (-ENODATA);
+    }
+
+    fp = dentry_open (xafile, NULL, O_RDWR);
+    /* dentry_open dputs the dentry if it fails */
+
+    return fp;
+}
+
+
+/*
+ * this is very similar to fs/reiserfs/dir.c:reiserfs_readdir, but
+ * we need to drop the path before calling the filldir struct.  That
+ * would be a big performance hit to the non-xattr case, so I've copied
+ * the whole thing for now. --clm
+ *
+ * the big difference is that I go backwards through the directory,
+ * and don't mess with f->f_pos, but the idea is the same.  Do some
+ * action on each and every entry in the directory.
+ *
+ * we're called with i_sem held, so there are no worries about the directory
+ * changing underneath us.
+ */
+static int __xattr_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+    struct inode *inode = filp->f_dentry->d_inode;
+    struct cpu_key pos_key;	/* key of current position in the directory (key of directory entry) */
+    INITIALIZE_PATH (path_to_entry);
+    struct buffer_head * bh;
+    int entry_num;
+    struct item_head * ih, tmp_ih;
+    int search_res;
+    char * local_buf;
+    loff_t next_pos;
+    char small_buf[32] ; /* avoid kmalloc if we can */
+    struct reiserfs_de_head *deh;
+    int d_reclen;
+    char * d_name;
+    off_t d_off;
+    ino_t d_ino;
+    struct reiserfs_dir_entry de;
+
+
+    /* form key for search the next directory entry using f_pos field of
+       file structure */
+    next_pos = max_reiserfs_offset(inode);
+
+    while (1) {
+research:
+	if (next_pos <= DOT_DOT_OFFSET)
+	    break;
+	make_cpu_key (&pos_key, inode, next_pos, TYPE_DIRENTRY, 3);
+
+	search_res = search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry, &de);
+	if (search_res == IO_ERROR) {
+	    // FIXME: we could just skip part of directory which could
+	    // not be read
+	    pathrelse(&path_to_entry);
+	    return -EIO;
+	}
+
+	if (search_res == NAME_NOT_FOUND)
+	    de.de_entry_num--;
+
+	set_de_name_and_namelen(&de);
+	entry_num = de.de_entry_num;
+	deh = &(de.de_deh[entry_num]);
+
+	bh = de.de_bh;
+	ih = de.de_ih;
+
+	if (!is_direntry_le_ih(ih)) {
+            reiserfs_warning(inode->i_sb, "not direntry %h", ih);
+	    break;
+        }
+	copy_item_head(&tmp_ih, ih);
+
+	/* we must have found item, that is item of this directory, */
+	RFALSE( COMP_SHORT_KEYS (&(ih->ih_key), &pos_key),
+		"vs-9000: found item %h does not match to dir we readdir %K",
+		ih, &pos_key);
+
+	if (deh_offset(deh) <= DOT_DOT_OFFSET) {
+	    break;
+	}
+
+	/* look for the previous entry in the directory */
+	next_pos = deh_offset (deh) - 1;
+
+	if (!de_visible (deh))
+	    /* it is hidden entry */
+	    continue;
+
+	d_reclen = entry_length(bh, ih, entry_num);
+	d_name = B_I_DEH_ENTRY_FILE_NAME (bh, ih, deh);
+	d_off = deh_offset (deh);
+	d_ino = deh_objectid (deh);
+
+	if (!d_name[d_reclen - 1])
+	    d_reclen = strlen (d_name);
+
+	if (d_reclen > REISERFS_MAX_NAME(inode->i_sb->s_blocksize)){
+	    /* too big to send back to VFS */
+	    continue ;
+	}
+
+        /* Ignore the .reiserfs_priv entry */
+        if (reiserfs_xattrs (inode->i_sb) &&
+            !old_format_only(inode->i_sb) &&
+            deh_objectid (deh) == le32_to_cpu (INODE_PKEY(REISERFS_SB(inode->i_sb)->priv_root->d_inode)->k_objectid))
+          continue;
+
+	if (d_reclen <= 32) {
+	  local_buf = small_buf ;
+	} else {
+	    local_buf = reiserfs_kmalloc(d_reclen, GFP_NOFS, inode->i_sb) ;
+	    if (!local_buf) {
+		pathrelse (&path_to_entry);
+		return -ENOMEM ;
+	    }
+	    if (item_moved (&tmp_ih, &path_to_entry)) {
+		reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+
+		/* sigh, must retry.  Do this same offset again */
+		next_pos = d_off;
+		goto research;
+	    }
+	}
+
+	// Note, that we copy name to user space via temporary
+	// buffer (local_buf) because filldir will block if
+	// user space buffer is swapped out. At that time
+	// entry can move to somewhere else
+	memcpy (local_buf, d_name, d_reclen);
+
+	/* the filldir function might need to start transactions,
+	 * or do who knows what.  Release the path now that we've
+	 * copied all the important stuff out of the deh
+	 */
+	pathrelse (&path_to_entry);
+
+	if (filldir (dirent, local_buf, d_reclen, d_off, d_ino,
+		     DT_UNKNOWN) < 0) {
+	    if (local_buf != small_buf) {
+		reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+	    }
+	    goto end;
+	}
+	if (local_buf != small_buf) {
+	    reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+	}
+    } /* while */
+
+end:
+    pathrelse (&path_to_entry);
+    return 0;
+}
+
+/*
+ * this could be done with dedicated readdir ops for the xattr files,
+ * but I want to get something working asap
+ * this is stolen from vfs_readdir
+ *
+ */
+static
+int xattr_readdir(struct file *file, filldir_t filler, void *buf)
+{
+        struct inode *inode = file->f_dentry->d_inode;
+        int res = -ENOTDIR;
+        if (!file->f_op || !file->f_op->readdir)
+                goto out;
+        down(&inode->i_sem);
+//        down(&inode->i_zombie);
+        res = -ENOENT;
+        if (!IS_DEADDIR(inode)) {
+                lock_kernel();
+                res = __xattr_readdir(file, buf, filler);
+                unlock_kernel();
+        }
+//        up(&inode->i_zombie);
+        up(&inode->i_sem);
+out:
+        return res;
+}
+
+
+/* Internal operations on file data */
+static inline void
+reiserfs_put_page(struct page *page)
+{
+        kunmap(page);
+        page_cache_release(page);
+}
+
+static struct page *
+reiserfs_get_page(struct inode *dir, unsigned long n)
+{
+        struct address_space *mapping = dir->i_mapping;
+        struct page *page;
+        /* We can deadlock if we try to free dentries,
+           and an unlink/rmdir has just occured - GFP_NOFS avoids this */
+        mapping->flags = (mapping->flags & ~__GFP_BITS_MASK) | GFP_NOFS;
+        page = read_cache_page (mapping, n,
+                                (filler_t*)mapping->a_ops->readpage, NULL);
+        if (!IS_ERR(page)) {
+                wait_on_page_locked(page);
+                kmap(page);
+                if (!PageUptodate(page))
+                        goto fail;
+
+                if (PageError(page))
+                        goto fail;
+        }
+        return page;
+
+fail:
+        reiserfs_put_page(page);
+        return ERR_PTR(-EIO);
+}
+
+static inline __u32
+xattr_hash (const char *msg, int len)
+{
+    return csum_partial (msg, len, 0);
+}
+
+/* Generic extended attribute operations that can be used by xa plugins */
+
+/*
+ * inode->i_sem: down
+ */
+int
+reiserfs_xattr_set (struct inode *inode, const char *name, const void *buffer,
+                    size_t buffer_size, int flags)
+{
+    int err = 0;
+    struct file *fp;
+    struct page *page;
+    char *data;
+    struct address_space *mapping;
+    size_t file_pos = 0;
+    size_t buffer_pos = 0;
+    struct inode *xinode;
+    struct iattr newattrs;
+    __u32 xahash = 0;
+
+    if (IS_RDONLY (inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (inode) || IS_APPEND (inode))
+        return -EPERM;
+
+    if (get_inode_sd_version (inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    /* Empty xattrs are ok, they're just empty files, no hash */
+    if (buffer && buffer_size)
+        xahash = xattr_hash (buffer, buffer_size);
+
+open_file:
+    fp = open_xa_file (inode, name, flags);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        goto out;
+    }
+
+    xinode = fp->f_dentry->d_inode;
+    REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+    /* we need to copy it off.. */
+    if (xinode->i_nlink > 1) {
+	fput(fp);
+        err = reiserfs_xattr_del (inode, name);
+        if (err < 0)
+            goto out;
+        /* We just killed the old one, we're not replacing anymore */
+        if (flags & XATTR_REPLACE)
+            flags &= ~XATTR_REPLACE;
+        goto open_file;
+    }
+
+    /* Resize it so we're ok to write there */
+    newattrs.ia_size = buffer_size;
+    newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+    down (&xinode->i_sem);
+    err = notify_change(fp->f_dentry, &newattrs);
+    if (err)
+        goto out_filp;
+
+    mapping = xinode->i_mapping;
+    while (buffer_pos < buffer_size || buffer_pos == 0) {
+        size_t chunk;
+        size_t skip = 0;
+        size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
+        if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
+            chunk = PAGE_CACHE_SIZE;
+        else
+            chunk = buffer_size - buffer_pos;
+
+        page = reiserfs_get_page (xinode, file_pos >> PAGE_CACHE_SHIFT);
+        if (IS_ERR (page)) {
+            err = PTR_ERR (page);
+            goto out_filp;
+        }
+
+        lock_page (page);
+        data = page_address (page);
+
+        if (file_pos == 0) {
+            struct reiserfs_xattr_header *rxh;
+            skip = file_pos = sizeof (struct reiserfs_xattr_header);
+            if (chunk + skip > PAGE_CACHE_SIZE)
+                chunk = PAGE_CACHE_SIZE - skip;
+            rxh = (struct reiserfs_xattr_header *)data;
+            rxh->h_magic = cpu_to_le32 (REISERFS_XATTR_MAGIC);
+            rxh->h_hash = cpu_to_le32 (xahash);
+        }
+
+        err = mapping->a_ops->prepare_write (fp, page, page_offset,
+                                             page_offset + chunk + skip);
+        if (!err) {
+	    if (buffer)
+		memcpy (data + skip, buffer + buffer_pos, chunk);
+            err = mapping->a_ops->commit_write (fp, page, page_offset,
+                                                page_offset + chunk + skip);
+	}
+        unlock_page (page);
+        reiserfs_put_page (page);
+        buffer_pos += chunk;
+        file_pos += chunk;
+        skip = 0;
+        if (err || buffer_size == 0 || !buffer)
+            break;
+    }
+
+    /* We can't mark the inode dirty if it's not hashed. This is the case
+     * when we're inheriting the default ACL. If we dirty it, the inode
+     * gets marked dirty, but won't (ever) make it onto the dirty list until
+     * it's synced explicitly to clear I_DIRTY. This is bad. */
+    if (!hlist_unhashed(&inode->i_hash)) {
+        inode->i_ctime = CURRENT_TIME_SEC;
+        mark_inode_dirty (inode);
+    }
+
+out_filp:
+    up (&xinode->i_sem);
+    fput(fp);
+
+out:
+    return err;
+}
+
+/*
+ * inode->i_sem: down
+ */
+int
+reiserfs_xattr_get (const struct inode *inode, const char *name, void *buffer,
+                    size_t buffer_size)
+{
+    ssize_t err = 0;
+    struct file *fp;
+    size_t isize;
+    size_t file_pos = 0;
+    size_t buffer_pos = 0;
+    struct page *page;
+    struct inode *xinode;
+    __u32 hash = 0;
+
+    if (name == NULL)
+        return -EINVAL;
+
+    /* We can't have xattrs attached to v1 items since they don't have
+     * generation numbers */
+    if (get_inode_sd_version (inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    fp = open_xa_file (inode, name, FL_READONLY);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        goto out;
+    }
+
+    xinode = fp->f_dentry->d_inode;
+    isize = xinode->i_size;
+    REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+    /* Just return the size needed */
+    if (buffer == NULL) {
+        err = isize - sizeof (struct reiserfs_xattr_header);
+        goto out_dput;
+    }
+
+    if (buffer_size < isize - sizeof (struct reiserfs_xattr_header)) {
+        err = -ERANGE;
+        goto out_dput;
+    }
+
+    while (file_pos < isize) {
+        size_t chunk;
+        char *data;
+        size_t skip = 0;
+        if (isize - file_pos > PAGE_CACHE_SIZE)
+            chunk = PAGE_CACHE_SIZE;
+        else
+            chunk = isize - file_pos;
+
+        page = reiserfs_get_page (xinode, file_pos >> PAGE_CACHE_SHIFT);
+        if (IS_ERR (page)) {
+            err = PTR_ERR (page);
+            goto out_dput;
+        }
+
+        lock_page (page);
+        data = page_address (page);
+        if (file_pos == 0) {
+            struct reiserfs_xattr_header *rxh =
+                                        (struct reiserfs_xattr_header *)data;
+            skip = file_pos = sizeof (struct reiserfs_xattr_header);
+            chunk -= skip;
+            /* Magic doesn't match up.. */
+            if (rxh->h_magic != cpu_to_le32 (REISERFS_XATTR_MAGIC)) {
+                unlock_page (page);
+                reiserfs_put_page (page);
+                reiserfs_warning (inode->i_sb, "Invalid magic for xattr (%s) "
+                                  "associated with %k", name,
+                                  INODE_PKEY (inode));
+                err = -EIO;
+                goto out_dput;
+            }
+            hash = le32_to_cpu (rxh->h_hash);
+        }
+        memcpy (buffer + buffer_pos, data + skip, chunk);
+        unlock_page (page);
+        reiserfs_put_page (page);
+        file_pos += chunk;
+        buffer_pos += chunk;
+        skip = 0;
+    }
+    err = isize - sizeof (struct reiserfs_xattr_header);
+
+    if (xattr_hash (buffer, isize - sizeof (struct reiserfs_xattr_header)) != hash) {
+        reiserfs_warning (inode->i_sb, "Invalid hash for xattr (%s) associated "
+                          "with %k", name, INODE_PKEY (inode));
+        err = -EIO;
+    }
+
+out_dput:
+    fput(fp);
+
+out:
+    return err;
+}
+
+static int
+__reiserfs_xattr_del (struct dentry *xadir, const char *name, int namelen)
+{
+    struct dentry *dentry;
+    struct inode *dir = xadir->d_inode;
+    int err = 0;
+
+    dentry = lookup_one_len (name, xadir, namelen);
+    if (IS_ERR (dentry)) {
+        err = PTR_ERR (dentry);
+        goto out;
+    } else if (!dentry->d_inode) {
+        err = -ENODATA;
+        goto out_file;
+    }
+
+    /* Skip directories.. */
+    if (S_ISDIR (dentry->d_inode->i_mode))
+        goto out_file;
+
+    if (!is_reiserfs_priv_object (dentry->d_inode)) {
+        reiserfs_warning (dir->i_sb, "OID %08x [%.*s/%.*s] doesn't have "
+                                     "priv flag set [parent is %sset].",
+                        le32_to_cpu (INODE_PKEY (dentry->d_inode)->k_objectid),
+                        xadir->d_name.len, xadir->d_name.name, namelen, name,
+                        is_reiserfs_priv_object (xadir->d_inode) ? "" : "not ");
+        dput (dentry);
+        return -EIO;
+    }
+
+    err = dir->i_op->unlink (dir, dentry);
+    if (!err)
+        d_delete (dentry);
+
+out_file:
+    dput (dentry);
+
+out:
+    return err;
+}
+
+
+int
+reiserfs_xattr_del (struct inode *inode, const char *name)
+{
+    struct dentry *dir;
+    int err;
+
+    if (IS_RDONLY (inode))
+        return -EROFS;
+
+    dir = open_xa_dir (inode, FL_READONLY);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        goto out;
+    }
+
+    err = __reiserfs_xattr_del (dir, name, strlen (name));
+    dput (dir);
+
+    if (!err) {
+        inode->i_ctime = CURRENT_TIME_SEC;
+        mark_inode_dirty (inode);
+    }
+
+out:
+    return err;
+}
+
+/* The following are side effects of other operations that aren't explicitly
+ * modifying extended attributes. This includes operations such as permissions
+ * or ownership changes, object deletions, etc. */
+
+static int
+reiserfs_delete_xattrs_filler (void *buf, const char *name, int namelen,
+                               loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct dentry *xadir = (struct dentry *)buf;
+
+    return __reiserfs_xattr_del (xadir, name, namelen);
+
+}
+
+/* This is called w/ inode->i_sem downed */
+int
+reiserfs_delete_xattrs (struct inode *inode)
+{
+    struct file *fp;
+    struct dentry *dir, *root;
+    int err = 0;
+
+    /* Skip out, an xattr has no xattrs associated with it */
+    if (is_reiserfs_priv_object (inode) ||
+        get_inode_sd_version (inode) == STAT_DATA_V1 ||
+        !reiserfs_xattrs(inode->i_sb))
+    {
+        return 0;
+    }
+    reiserfs_read_lock_xattrs (inode->i_sb);
+    dir = open_xa_dir (inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (inode->i_sb);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        goto out;
+    } else if (!dir->d_inode) {
+        dput (dir);
+        return 0;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    lock_kernel ();
+    err = xattr_readdir (fp, reiserfs_delete_xattrs_filler, dir);
+    if (err) {
+        unlock_kernel ();
+        goto out_dir;
+    }
+
+    /* Leftovers besides . and .. -- that's not good. */
+    if (dir->d_inode->i_nlink <= 2) {
+        root = get_xa_root (inode->i_sb);
+        reiserfs_write_lock_xattrs (inode->i_sb);
+        err = vfs_rmdir (root->d_inode, dir);
+        reiserfs_write_unlock_xattrs (inode->i_sb);
+        dput (root);
+    } else {
+        reiserfs_warning (inode->i_sb,
+                          "Couldn't remove all entries in directory");
+    }
+    unlock_kernel ();
+
+out_dir:
+    fput(fp);
+
+out:
+    if (!err)
+        REISERFS_I(inode)->i_flags = REISERFS_I(inode)->i_flags & ~i_has_xattr_dir;
+    return err;
+}
+
+struct reiserfs_chown_buf {
+    struct inode *inode;
+    struct dentry *xadir;
+    struct iattr *attrs;
+};
+
+/* XXX: If there is a better way to do this, I'd love to hear about it */
+static int
+reiserfs_chown_xattrs_filler (void *buf, const char *name, int namelen,
+                               loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct reiserfs_chown_buf *chown_buf = (struct reiserfs_chown_buf *)buf;
+    struct dentry *xafile, *xadir = chown_buf->xadir;
+    struct iattr *attrs = chown_buf->attrs;
+    int err = 0;
+
+    xafile = lookup_one_len (name, xadir, namelen);
+    if (IS_ERR (xafile))
+        return PTR_ERR (xafile);
+    else if (!xafile->d_inode) {
+        dput (xafile);
+        return -ENODATA;
+    }
+
+    if (!S_ISDIR (xafile->d_inode->i_mode))
+        err = notify_change (xafile, attrs);
+    dput (xafile);
+
+    return err;
+}
+
+int
+reiserfs_chown_xattrs (struct inode *inode, struct iattr *attrs)
+{
+    struct file *fp;
+    struct dentry *dir;
+    int err = 0;
+    struct reiserfs_chown_buf buf;
+    unsigned int ia_valid = attrs->ia_valid;
+
+    /* Skip out, an xattr has no xattrs associated with it */
+    if (is_reiserfs_priv_object (inode) ||
+        get_inode_sd_version (inode) == STAT_DATA_V1 ||
+        !reiserfs_xattrs(inode->i_sb))
+    {
+        return 0;
+    }
+    reiserfs_read_lock_xattrs (inode->i_sb);
+    dir = open_xa_dir (inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (inode->i_sb);
+    if (IS_ERR (dir)) {
+        if (PTR_ERR (dir) != -ENODATA)
+            err = PTR_ERR (dir);
+        goto out;
+    } else if (!dir->d_inode) {
+        dput (dir);
+        goto out;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    lock_kernel ();
+
+    attrs->ia_valid &= (ATTR_UID | ATTR_GID | ATTR_CTIME);
+    buf.xadir = dir;
+    buf.attrs = attrs;
+    buf.inode = inode;
+
+    err = xattr_readdir (fp, reiserfs_chown_xattrs_filler, &buf);
+    if (err) {
+        unlock_kernel ();
+        goto out_dir;
+    }
+
+    err = notify_change (dir, attrs);
+    unlock_kernel ();
+
+out_dir:
+    fput(fp);
+
+out:
+    attrs->ia_valid = ia_valid;
+    return err;
+}
+
+
+/* Actual operations that are exported to VFS-land */
+
+/*
+ * Inode operation getxattr()
+ * Preliminary locking: we down dentry->d_inode->i_sem
+ */
+ssize_t
+reiserfs_getxattr (struct dentry *dentry, const char *name, void *buffer,
+                   size_t size)
+{
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+    int err;
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    reiserfs_read_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+    err = xah->get (dentry->d_inode, name, buffer, size);
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_read_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+
+/*
+ * Inode operation setxattr()
+ *
+ * dentry->d_inode->i_sem down
+ */
+int
+reiserfs_setxattr (struct dentry *dentry, const char *name, const void *value,
+                   size_t size, int flags)
+{
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+    int err;
+    int lock;
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    if (IS_RDONLY (dentry->d_inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (dentry->d_inode) || IS_APPEND (dentry->d_inode))
+        return -EROFS;
+
+    reiserfs_write_lock_xattr_i (dentry->d_inode);
+    lock = !has_xattr_dir (dentry->d_inode);
+    if (lock)
+        reiserfs_write_lock_xattrs (dentry->d_sb);
+    else
+        reiserfs_read_lock_xattrs (dentry->d_sb);
+    err = xah->set (dentry->d_inode, name, value, size, flags);
+    if (lock)
+        reiserfs_write_unlock_xattrs (dentry->d_sb);
+    else
+        reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_write_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+/*
+ * Inode operation removexattr()
+ *
+ * dentry->d_inode->i_sem down
+ */
+int
+reiserfs_removexattr (struct dentry *dentry, const char *name)
+{
+    int err;
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    if (IS_RDONLY (dentry->d_inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (dentry->d_inode) || IS_APPEND (dentry->d_inode))
+        return -EPERM;
+
+    reiserfs_write_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+
+    /* Deletion pre-operation */
+    if (xah->del) {
+        err = xah->del (dentry->d_inode, name);
+        if (err)
+            goto out;
+    }
+
+    err = reiserfs_xattr_del (dentry->d_inode, name);
+
+    dentry->d_inode->i_ctime = CURRENT_TIME_SEC;
+    mark_inode_dirty (dentry->d_inode);
+
+out:
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_write_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+
+/* This is what filldir will use:
+ * r_pos will always contain the amount of space required for the entire
+ * list. If r_pos becomes larger than r_size, we need more space and we
+ * return an error indicating this. If r_pos is less than r_size, then we've
+ * filled the buffer successfully and we return success */
+struct reiserfs_listxattr_buf {
+    int r_pos;
+    int r_size;
+    char *r_buf;
+    struct inode *r_inode;
+};
+
+static int
+reiserfs_listxattr_filler (void *buf, const char *name, int namelen,
+                           loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct reiserfs_listxattr_buf *b = (struct reiserfs_listxattr_buf *)buf;
+    int len = 0;
+    if (name[0] != '.' || (namelen != 1 && (name[1] != '.' || namelen != 2))) {
+        struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+        if (!xah) return 0; /* Unsupported xattr name, skip it */
+
+        /* We call ->list() twice because the operation isn't required to just
+         * return the name back - we want to make sure we have enough space */
+        len += xah->list (b->r_inode, name, namelen, NULL);
+
+        if (len) {
+            if (b->r_pos + len + 1 <= b->r_size) {
+                char *p = b->r_buf + b->r_pos;
+                p += xah->list (b->r_inode, name, namelen, p);
+                *p++ = '\0';
+            }
+            b->r_pos += len + 1;
+        }
+    }
+
+    return 0;
+}
+/*
+ * Inode operation listxattr()
+ *
+ * Preliminary locking: we down dentry->d_inode->i_sem
+ */
+ssize_t
+reiserfs_listxattr (struct dentry *dentry, char *buffer, size_t size)
+{
+    struct file *fp;
+    struct dentry *dir;
+    int err = 0;
+    struct reiserfs_listxattr_buf buf;
+
+    if (!dentry->d_inode)
+        return -EINVAL;
+
+    if (!reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    reiserfs_read_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+    dir = open_xa_dir (dentry->d_inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        if (err == -ENODATA)
+            err = 0; /* Not an error if there aren't any xattrs */
+        goto out;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    buf.r_buf = buffer;
+    buf.r_size = buffer ? size : 0;
+    buf.r_pos = 0;
+    buf.r_inode = dentry->d_inode;
+
+    REISERFS_I(dentry->d_inode)->i_flags |= i_has_xattr_dir;
+
+    err = xattr_readdir (fp, reiserfs_listxattr_filler, &buf);
+    if (err)
+        goto out_dir;
+
+    if (buf.r_pos > buf.r_size && buffer != NULL)
+        err = -ERANGE;
+    else
+        err = buf.r_pos;
+
+out_dir:
+    fput(fp);
+
+out:
+    reiserfs_read_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+/* This is the implementation for the xattr plugin infrastructure */
+static struct list_head xattr_handlers = LIST_HEAD_INIT (xattr_handlers);
+static DEFINE_RWLOCK(handler_lock);
+
+static struct reiserfs_xattr_handler *
+find_xattr_handler_prefix (const char *prefix)
+{
+    struct reiserfs_xattr_handler *xah = NULL;
+    struct list_head *p;
+
+    read_lock (&handler_lock);
+    list_for_each (p, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (strncmp (xah->prefix, prefix, strlen (xah->prefix)) == 0)
+            break;
+        xah = NULL;
+    }
+
+    read_unlock (&handler_lock);
+    return xah;
+}
+
+static void
+__unregister_handlers (void)
+{
+    struct reiserfs_xattr_handler *xah;
+    struct list_head *p, *tmp;
+
+    list_for_each_safe (p, tmp, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (xah->exit)
+            xah->exit();
+
+        list_del_init (p);
+    }
+    INIT_LIST_HEAD (&xattr_handlers);
+}
+
+int __init
+reiserfs_xattr_register_handlers (void)
+{
+    int err = 0;
+    struct reiserfs_xattr_handler *xah;
+    struct list_head *p;
+
+    write_lock (&handler_lock);
+
+    /* If we're already initialized, nothing to do */
+    if (!list_empty (&xattr_handlers)) {
+        write_unlock (&handler_lock);
+        return 0;
+    }
+
+    /* Add the handlers */
+    list_add_tail (&user_handler.handlers, &xattr_handlers);
+    list_add_tail (&trusted_handler.handlers, &xattr_handlers);
+#ifdef CONFIG_REISERFS_FS_SECURITY
+    list_add_tail (&security_handler.handlers, &xattr_handlers);
+#endif
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+    list_add_tail (&posix_acl_access_handler.handlers, &xattr_handlers);
+    list_add_tail (&posix_acl_default_handler.handlers, &xattr_handlers);
+#endif
+
+    /* Run initializers, if available */
+    list_for_each (p, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (xah->init) {
+            err = xah->init ();
+            if (err) {
+                list_del_init (p);
+                break;
+            }
+        }
+    }
+
+    /* Clean up other handlers, if any failed */
+    if (err)
+        __unregister_handlers ();
+
+    write_unlock (&handler_lock);
+    return err;
+}
+
+void
+reiserfs_xattr_unregister_handlers (void)
+{
+    write_lock (&handler_lock);
+    __unregister_handlers ();
+    write_unlock (&handler_lock);
+}
+
+/* This will catch lookups from the fs root to .reiserfs_priv */
+static int
+xattr_lookup_poison (struct dentry *dentry, struct qstr *q1, struct qstr *name)
+{
+    struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
+    if (name->len == priv_root->d_name.len &&
+        name->hash == priv_root->d_name.hash &&
+        !memcmp (name->name, priv_root->d_name.name, name->len)) {
+            return -ENOENT;
+    } else if (q1->len == name->len &&
+               !memcmp(q1->name, name->name, name->len))
+        return 0;
+    return 1;
+}
+
+static struct dentry_operations xattr_lookup_poison_ops = {
+    .d_compare = xattr_lookup_poison,
+};
+
+
+/* We need to take a copy of the mount flags since things like
+ * MS_RDONLY don't get set until *after* we're called.
+ * mount_flags != mount_options */
+int
+reiserfs_xattr_init (struct super_block *s, int mount_flags)
+{
+  int err = 0;
+
+  /* We need generation numbers to ensure that the oid mapping is correct
+   * v3.5 filesystems don't have them. */
+  if (!old_format_only (s)) {
+    set_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+  } else if (reiserfs_xattrs_optional (s)) {
+    /* Old format filesystem, but optional xattrs have been enabled
+     * at mount time. Error out. */
+    reiserfs_warning (s, "xattrs/ACLs not supported on pre v3.6 "
+                      "format filesystem. Failing mount.");
+    err = -EOPNOTSUPP;
+    goto error;
+  } else {
+    /* Old format filesystem, but no optional xattrs have been enabled. This
+     * means we silently disable xattrs on the filesystem. */
+    clear_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+  }
+
+  /* If we don't have the privroot located yet - go find it */
+  if (reiserfs_xattrs (s) && !REISERFS_SB(s)->priv_root) {
+      struct dentry *dentry;
+      dentry = lookup_one_len (PRIVROOT_NAME, s->s_root,
+                               strlen (PRIVROOT_NAME));
+      if (!IS_ERR (dentry)) {
+        if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) {
+            struct inode *inode = dentry->d_parent->d_inode;
+            down (&inode->i_sem);
+            err = inode->i_op->mkdir (inode, dentry, 0700);
+            up (&inode->i_sem);
+            if (err) {
+                dput (dentry);
+                dentry = NULL;
+            }
+
+            if (dentry && dentry->d_inode)
+                reiserfs_warning (s, "Created %s on %s - reserved for "
+                                  "xattr storage.", PRIVROOT_NAME,
+                                  reiserfs_bdevname (inode->i_sb));
+        } else if (!dentry->d_inode) {
+            dput (dentry);
+            dentry = NULL;
+        }
+      } else
+        err = PTR_ERR (dentry);
+
+      if (!err && dentry) {
+          s->s_root->d_op = &xattr_lookup_poison_ops;
+          reiserfs_mark_inode_private (dentry->d_inode);
+          REISERFS_SB(s)->priv_root = dentry;
+      } else if (!(mount_flags & MS_RDONLY)) { /* xattrs are unavailable */
+          /* If we're read-only it just means that the dir hasn't been
+           * created. Not an error -- just no xattrs on the fs. We'll
+           * check again if we go read-write */
+          reiserfs_warning (s, "xattrs/ACLs enabled and couldn't "
+                            "find/create .reiserfs_priv. Failing mount.");
+          err = -EOPNOTSUPP;
+      }
+  }
+
+error:
+   /* This is only nonzero if there was an error initializing the xattr
+    * directory or if there is a condition where we don't support them. */
+    if (err) {
+          clear_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+          clear_bit (REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
+          clear_bit (REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
+    }
+
+    /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+    s->s_flags = s->s_flags & ~MS_POSIXACL;
+    if (reiserfs_posixacl (s))
+	s->s_flags |= MS_POSIXACL;
+
+    return err;
+}
+
+static int
+__reiserfs_permission (struct inode *inode, int mask, struct nameidata *nd,
+                       int need_lock)
+{
+	umode_t			mode = inode->i_mode;
+
+	if (mask & MAY_WRITE) {
+		/*
+		 * Nobody gets write access to a read-only fs.
+		 */
+		if (IS_RDONLY(inode) &&
+		    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
+			return -EROFS;
+
+		/*
+		 * Nobody gets write access to an immutable file.
+		 */
+		if (IS_IMMUTABLE(inode))
+			return -EACCES;
+	}
+
+	/* We don't do permission checks on the internal objects.
+	* Permissions are determined by the "owning" object. */
+        if (is_reiserfs_priv_object (inode))
+		return 0;
+
+	if (current->fsuid == inode->i_uid) {
+		mode >>= 6;
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+	} else if (reiserfs_posixacl(inode->i_sb) &&
+                   get_inode_sd_version (inode) != STAT_DATA_V1) {
+                struct posix_acl *acl;
+
+		/* ACL can't contain additional permissions if
+		   the ACL_MASK entry is 0 */
+		if (!(mode & S_IRWXG))
+			goto check_groups;
+
+                if (need_lock) {
+		    reiserfs_read_lock_xattr_i (inode);
+                    reiserfs_read_lock_xattrs (inode->i_sb);
+		}
+                acl = reiserfs_get_acl (inode, ACL_TYPE_ACCESS);
+                if (need_lock) {
+                    reiserfs_read_unlock_xattrs (inode->i_sb);
+		    reiserfs_read_unlock_xattr_i (inode);
+		}
+                if (IS_ERR (acl)) {
+                    if (PTR_ERR (acl) == -ENODATA)
+                        goto check_groups;
+                    return PTR_ERR (acl);
+                }
+
+                if (acl) {
+                    int err = posix_acl_permission (inode, acl, mask);
+                    posix_acl_release (acl);
+                    if (err == -EACCES) {
+                        goto check_capabilities;
+                    }
+                    return err;
+		} else {
+			goto check_groups;
+                }
+#endif
+	} else {
+check_groups:
+		if (in_group_p(inode->i_gid))
+			mode >>= 3;
+	}
+
+	/*
+	 * If the DACs are ok we don't need any capability check.
+	 */
+	if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
+		return 0;
+
+check_capabilities:
+	/*
+	 * Read/write DACs are always overridable.
+	 * Executable DACs are overridable if at least one exec bit is set.
+	 */
+	if (!(mask & MAY_EXEC) ||
+	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
+		if (capable(CAP_DAC_OVERRIDE))
+			return 0;
+
+	/*
+	 * Searching includes executable on directories, else just read.
+	 */
+	if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+		if (capable(CAP_DAC_READ_SEARCH))
+			return 0;
+
+	return -EACCES;
+}
+
+int
+reiserfs_permission (struct inode *inode, int mask, struct nameidata *nd)
+{
+    return __reiserfs_permission (inode, mask, nd, 1);
+}
+
+int
+reiserfs_permission_locked (struct inode *inode, int mask, struct nameidata *nd)
+{
+    return __reiserfs_permission (inode, mask, nd, 0);
+}
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
new file mode 100644
index 0000000..e302071
--- /dev/null
+++ b/fs/reiserfs/xattr_acl.c
@@ -0,0 +1,571 @@
+#include <linux/fs.h>
+#include <linux/posix_acl.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/xattr_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/reiserfs_acl.h>
+#include <asm/uaccess.h>
+
+static int reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl);
+
+static int
+xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
+{
+	struct posix_acl *acl;
+	int error;
+
+	if (!reiserfs_posixacl(inode->i_sb))
+		return -EOPNOTSUPP;
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		return -EPERM;
+
+	if (value) {
+		acl = posix_acl_from_xattr(value, size);
+		if (IS_ERR(acl)) {
+			return PTR_ERR(acl);
+		} else if (acl) {
+			error = posix_acl_valid(acl);
+			if (error)
+				goto release_and_out;
+		}
+	} else
+		acl = NULL;
+
+	error = reiserfs_set_acl (inode, type, acl);
+
+release_and_out:
+	posix_acl_release(acl);
+	return error;
+}
+
+
+static int
+xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+{
+	struct posix_acl *acl;
+	int error;
+
+	if (!reiserfs_posixacl(inode->i_sb))
+		return -EOPNOTSUPP;
+
+	acl = reiserfs_get_acl (inode, type);
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl == NULL)
+		return -ENODATA;
+	error = posix_acl_to_xattr(acl, buffer, size);
+	posix_acl_release(acl);
+
+	return error;
+}
+
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *
+posix_acl_from_disk(const void *value, size_t size)
+{
+	const char *end = (char *)value + size;
+	int n, count;
+	struct posix_acl *acl;
+
+	if (!value)
+		return NULL;
+	if (size < sizeof(reiserfs_acl_header))
+		 return ERR_PTR(-EINVAL);
+	if (((reiserfs_acl_header *)value)->a_version !=
+	    cpu_to_le32(REISERFS_ACL_VERSION))
+		return ERR_PTR(-EINVAL);
+	value = (char *)value + sizeof(reiserfs_acl_header);
+	count = reiserfs_acl_count(size);
+	if (count < 0)
+		return ERR_PTR(-EINVAL);
+	if (count == 0)
+		return NULL;
+	acl = posix_acl_alloc(count, GFP_NOFS);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+	for (n=0; n < count; n++) {
+		reiserfs_acl_entry *entry =
+			(reiserfs_acl_entry *)value;
+		if ((char *)value + sizeof(reiserfs_acl_entry_short) > end)
+			goto fail;
+		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
+		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				value = (char *)value +
+					sizeof(reiserfs_acl_entry_short);
+				acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				value = (char *)value + sizeof(reiserfs_acl_entry);
+				if ((char *)value > end)
+					goto fail;
+				acl->a_entries[n].e_id =
+					le32_to_cpu(entry->e_id);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	if (value != end)
+		goto fail;
+	return acl;
+
+fail:
+	posix_acl_release(acl);
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static void *
+posix_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+	reiserfs_acl_header *ext_acl;
+	char *e;
+	int n;
+
+	*size = reiserfs_acl_size(acl->a_count);
+	ext_acl = (reiserfs_acl_header *)kmalloc(sizeof(reiserfs_acl_header) +
+		acl->a_count * sizeof(reiserfs_acl_entry), GFP_NOFS);
+	if (!ext_acl)
+		return ERR_PTR(-ENOMEM);
+	ext_acl->a_version = cpu_to_le32(REISERFS_ACL_VERSION);
+	e = (char *)ext_acl + sizeof(reiserfs_acl_header);
+	for (n=0; n < acl->a_count; n++) {
+		reiserfs_acl_entry *entry = (reiserfs_acl_entry *)e;
+		entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+		entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+		switch(acl->a_entries[n].e_tag) {
+			case ACL_USER:
+			case ACL_GROUP:
+				entry->e_id =
+					cpu_to_le32(acl->a_entries[n].e_id);
+				e += sizeof(reiserfs_acl_entry);
+				break;
+
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				e += sizeof(reiserfs_acl_entry_short);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	return (char *)ext_acl;
+
+fail:
+	kfree(ext_acl);
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Inode operation get_posix_acl().
+ *
+ * inode->i_sem: down
+ * BKL held [before 2.5.x]
+ */
+struct posix_acl *
+reiserfs_get_acl(struct inode *inode, int type)
+{
+	char *name, *value;
+	struct posix_acl *acl, **p_acl;
+	size_t size;
+	int retval;
+        struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+        switch (type) {
+            case ACL_TYPE_ACCESS:
+                name = XATTR_NAME_ACL_ACCESS;
+                p_acl = &reiserfs_i->i_acl_access;
+                break;
+            case ACL_TYPE_DEFAULT:
+                name = XATTR_NAME_ACL_DEFAULT;
+                p_acl = &reiserfs_i->i_acl_default;
+                break;
+            default:
+                return ERR_PTR (-EINVAL);
+        }
+
+        if (IS_ERR (*p_acl)) {
+            if (PTR_ERR (*p_acl) == -ENODATA)
+                return NULL;
+        } else if (*p_acl != NULL)
+            return posix_acl_dup (*p_acl);
+
+        size = reiserfs_xattr_get (inode, name, NULL, 0);
+        if ((int)size < 0) {
+            if (size == -ENODATA || size == -ENOSYS) {
+		*p_acl = ERR_PTR (-ENODATA);
+		return NULL;
+            }
+            return ERR_PTR (size);
+        }
+
+        value = kmalloc (size, GFP_NOFS);
+        if (!value)
+            return ERR_PTR (-ENOMEM);
+
+	retval = reiserfs_xattr_get(inode, name, value, size);
+	if (retval == -ENODATA || retval == -ENOSYS) {
+		/* This shouldn't actually happen as it should have
+		   been caught above.. but just in case */
+		acl = NULL;
+		*p_acl = ERR_PTR (-ENODATA);
+        } else if (retval < 0) {
+		acl = ERR_PTR(retval);
+	} else {
+		acl = posix_acl_from_disk(value, retval);
+		*p_acl = posix_acl_dup (acl);
+        }
+
+	kfree(value);
+	return acl;
+}
+
+/*
+ * Inode operation set_posix_acl().
+ *
+ * inode->i_sem: down
+ * BKL held [before 2.5.x]
+ */
+static int
+reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+        char *name;
+	void *value = NULL;
+	struct posix_acl **p_acl;
+	size_t size;
+	int error;
+        struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+        switch (type) {
+            case ACL_TYPE_ACCESS:
+                name = XATTR_NAME_ACL_ACCESS;
+                p_acl = &reiserfs_i->i_acl_access;
+                if (acl) {
+                    mode_t mode = inode->i_mode;
+                    error = posix_acl_equiv_mode (acl, &mode);
+                    if (error < 0)
+                        return error;
+                    else {
+                        inode->i_mode = mode;
+                        if (error == 0)
+                            acl = NULL;
+                    }
+                }
+                break;
+            case ACL_TYPE_DEFAULT:
+                name = XATTR_NAME_ACL_DEFAULT;
+                p_acl = &reiserfs_i->i_acl_default;
+                if (!S_ISDIR (inode->i_mode))
+                    return acl ? -EACCES : 0;
+                break;
+            default:
+                return -EINVAL;
+        }
+
+ 	if (acl) {
+            value = posix_acl_to_disk(acl, &size);
+            if (IS_ERR(value))
+                return (int)PTR_ERR(value);
+            error = reiserfs_xattr_set(inode, name, value, size, 0);
+	} else {
+            error = reiserfs_xattr_del (inode, name);
+            if (error == -ENODATA) {
+                /* This may seem odd here, but it means that the ACL was set
+                 * with a value representable with mode bits. If there was
+                 * an ACL before, reiserfs_xattr_del already dirtied the inode.
+                 */
+                mark_inode_dirty (inode);
+                error = 0;
+            }
+        }
+
+	if (value)
+		kfree(value);
+
+        if (!error) {
+            /* Release the old one */
+            if (!IS_ERR (*p_acl) && *p_acl)
+                posix_acl_release (*p_acl);
+
+            if (acl == NULL)
+                *p_acl = ERR_PTR (-ENODATA);
+            else
+                *p_acl = posix_acl_dup (acl);
+        }
+
+	return error;
+}
+
+/* dir->i_sem: down,
+ * inode is new and not released into the wild yet */
+int
+reiserfs_inherit_default_acl (struct inode *dir, struct dentry *dentry, struct inode *inode)
+{
+    struct posix_acl *acl;
+    int err = 0;
+
+    /* ACLs only get applied to files and directories */
+    if (S_ISLNK (inode->i_mode))
+        return 0;
+
+    /* ACLs can only be used on "new" objects, so if it's an old object
+     * there is nothing to inherit from */
+    if (get_inode_sd_version (dir) == STAT_DATA_V1)
+        goto apply_umask;
+
+    /* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
+     * would be useless since permissions are ignored, and a pain because
+     * it introduces locking cycles */
+    if (is_reiserfs_priv_object (dir)) {
+        reiserfs_mark_inode_private (inode);
+        goto apply_umask;
+    }
+
+    acl = reiserfs_get_acl (dir, ACL_TYPE_DEFAULT);
+    if (IS_ERR (acl)) {
+        if (PTR_ERR (acl) == -ENODATA)
+            goto apply_umask;
+        return PTR_ERR (acl);
+    }
+
+    if (acl) {
+        struct posix_acl *acl_copy;
+        mode_t mode = inode->i_mode;
+        int need_acl;
+
+        /* Copy the default ACL to the default ACL of a new directory */
+        if (S_ISDIR (inode->i_mode)) {
+            err = reiserfs_set_acl (inode, ACL_TYPE_DEFAULT, acl);
+            if (err)
+                goto cleanup;
+        }
+
+        /* Now we reconcile the new ACL and the mode,
+           potentially modifying both */
+        acl_copy = posix_acl_clone (acl, GFP_NOFS);
+        if (!acl_copy) {
+            err = -ENOMEM;
+            goto cleanup;
+        }
+
+
+        need_acl = posix_acl_create_masq (acl_copy, &mode);
+        if (need_acl >= 0) {
+            if (mode != inode->i_mode) {
+                inode->i_mode = mode;
+            }
+
+            /* If we need an ACL.. */
+            if (need_acl > 0) {
+                err = reiserfs_set_acl (inode, ACL_TYPE_ACCESS, acl_copy);
+                if (err)
+                    goto cleanup_copy;
+            }
+        }
+cleanup_copy:
+        posix_acl_release (acl_copy);
+cleanup:
+        posix_acl_release (acl);
+    } else {
+apply_umask:
+        /* no ACL, apply umask */
+        inode->i_mode &= ~current->fs->umask;
+    }
+
+    return err;
+}
+
+/* Looks up and caches the result of the default ACL.
+ * We do this so that we don't need to carry the xattr_sem into
+ * reiserfs_new_inode if we don't need to */
+int
+reiserfs_cache_default_acl (struct inode *inode)
+{
+    int ret = 0;
+    if (reiserfs_posixacl (inode->i_sb) &&
+        !is_reiserfs_priv_object (inode)) {
+        struct posix_acl *acl;
+        reiserfs_read_lock_xattr_i (inode);
+        reiserfs_read_lock_xattrs (inode->i_sb);
+        acl = reiserfs_get_acl (inode, ACL_TYPE_DEFAULT);
+        reiserfs_read_unlock_xattrs (inode->i_sb);
+        reiserfs_read_unlock_xattr_i (inode);
+        ret = acl ? 1 : 0;
+        posix_acl_release (acl);
+    }
+
+    return ret;
+}
+
+int
+reiserfs_acl_chmod (struct inode *inode)
+{
+        struct posix_acl *acl, *clone;
+        int error;
+
+        if (S_ISLNK(inode->i_mode))
+                return -EOPNOTSUPP;
+
+	if (get_inode_sd_version (inode) == STAT_DATA_V1 ||
+	    !reiserfs_posixacl(inode->i_sb))
+        {
+	    return 0;
+	}
+
+        reiserfs_read_lock_xattrs (inode->i_sb);
+        acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+        reiserfs_read_unlock_xattrs (inode->i_sb);
+        if (!acl)
+                return 0;
+        if (IS_ERR(acl))
+                return PTR_ERR(acl);
+        clone = posix_acl_clone(acl, GFP_NOFS);
+        posix_acl_release(acl);
+        if (!clone)
+                return -ENOMEM;
+        error = posix_acl_chmod_masq(clone, inode->i_mode);
+        if (!error) {
+                int lock = !has_xattr_dir (inode);
+                reiserfs_write_lock_xattr_i (inode);
+                if (lock)
+                    reiserfs_write_lock_xattrs (inode->i_sb);
+                else
+                    reiserfs_read_lock_xattrs (inode->i_sb);
+                error = reiserfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+                if (lock)
+                    reiserfs_write_unlock_xattrs (inode->i_sb);
+                else
+                    reiserfs_read_unlock_xattrs (inode->i_sb);
+                reiserfs_write_unlock_xattr_i (inode);
+        }
+        posix_acl_release(clone);
+        return error;
+}
+
+static int
+posix_acl_access_get(struct inode *inode, const char *name,
+			  void *buffer, size_t size)
+{
+	if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+		return -EINVAL;
+	return xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int
+posix_acl_access_set(struct inode *inode, const char *name,
+			  const void *value, size_t size, int flags)
+{
+	if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+		return -EINVAL;
+	return xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int
+posix_acl_access_del (struct inode *inode, const char *name)
+{
+    struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+    struct posix_acl **acl = &reiserfs_i->i_acl_access;
+    if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+	return -EINVAL;
+    if (!IS_ERR (*acl) && *acl) {
+        posix_acl_release (*acl);
+        *acl = ERR_PTR (-ENODATA);
+    }
+
+    return 0;
+}
+
+static int
+posix_acl_access_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+    if (!reiserfs_posixacl (inode->i_sb))
+        return 0;
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+struct reiserfs_xattr_handler posix_acl_access_handler = {
+	.prefix = XATTR_NAME_ACL_ACCESS,
+	.get = posix_acl_access_get,
+	.set = posix_acl_access_set,
+	.del = posix_acl_access_del,
+	.list = posix_acl_access_list,
+};
+
+static int
+posix_acl_default_get (struct inode *inode, const char *name,
+			   void *buffer, size_t size)
+{
+	if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+		return -EINVAL;
+	return xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int
+posix_acl_default_set(struct inode *inode, const char *name,
+			   const void *value, size_t size, int flags)
+{
+	if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+		return -EINVAL;
+	return xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+static int
+posix_acl_default_del (struct inode *inode, const char *name)
+{
+    struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+    struct posix_acl **acl = &reiserfs_i->i_acl_default;
+    if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+	return -EINVAL;
+    if (!IS_ERR (*acl) && *acl) {
+        posix_acl_release (*acl);
+        *acl = ERR_PTR (-ENODATA);
+    }
+
+    return 0;
+}
+
+static int
+posix_acl_default_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+    if (!reiserfs_posixacl (inode->i_sb))
+        return 0;
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+struct reiserfs_xattr_handler posix_acl_default_handler = {
+	.prefix = XATTR_NAME_ACL_DEFAULT,
+	.get = posix_acl_default_get,
+	.set = posix_acl_default_set,
+	.del = posix_acl_default_del,
+	.list = posix_acl_default_list,
+};
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
new file mode 100644
index 0000000..e044d51
--- /dev/null
+++ b/fs/reiserfs/xattr_security.c
@@ -0,0 +1,69 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/reiserfs_xattr.h>
+#include <asm/uaccess.h>
+
+#define XATTR_SECURITY_PREFIX "security."
+
+static int
+security_get (struct inode *inode, const char *name, void *buffer, size_t size)
+{
+    if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
+        return -EINVAL;
+
+    if (is_reiserfs_priv_object(inode))
+        return -EPERM;
+
+    return reiserfs_xattr_get (inode, name, buffer, size);
+}
+
+static int
+security_set (struct inode *inode, const char *name, const void *buffer,
+          size_t size, int flags)
+{
+    if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
+        return -EINVAL;
+
+    if (is_reiserfs_priv_object(inode))
+        return -EPERM;
+
+    return reiserfs_xattr_set (inode, name, buffer, size, flags);
+}
+
+static int
+security_del (struct inode *inode, const char *name)
+{
+    if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
+        return -EINVAL;
+
+    if (is_reiserfs_priv_object(inode))
+        return -EPERM;
+
+    return 0;
+}
+
+static int
+security_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+
+    if (is_reiserfs_priv_object(inode))
+        return 0;
+
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+
+struct reiserfs_xattr_handler security_handler = {
+	.prefix = XATTR_SECURITY_PREFIX,
+	.get = security_get,
+	.set = security_set,
+	.del = security_del,
+	.list = security_list,
+};
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
new file mode 100644
index 0000000..4376219
--- /dev/null
+++ b/fs/reiserfs/xattr_trusted.c
@@ -0,0 +1,81 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/reiserfs_xattr.h>
+#include <asm/uaccess.h>
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+
+static int
+trusted_get (struct inode *inode, const char *name, void *buffer, size_t size)
+{
+    if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    if (!(capable(CAP_SYS_ADMIN) || is_reiserfs_priv_object(inode)))
+        return -EPERM;
+
+    return reiserfs_xattr_get (inode, name, buffer, size);
+}
+
+static int
+trusted_set (struct inode *inode, const char *name, const void *buffer,
+          size_t size, int flags)
+{
+    if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    if (!(capable(CAP_SYS_ADMIN) || is_reiserfs_priv_object(inode)))
+        return -EPERM;
+
+    return reiserfs_xattr_set (inode, name, buffer, size, flags);
+}
+
+static int
+trusted_del (struct inode *inode, const char *name)
+{
+    if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    if (!(capable(CAP_SYS_ADMIN) || is_reiserfs_priv_object(inode)))
+        return -EPERM;
+
+    return 0;
+}
+
+static int
+trusted_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+
+    if (!reiserfs_xattrs (inode->i_sb))
+        return 0;
+
+    if (!(capable(CAP_SYS_ADMIN) || is_reiserfs_priv_object(inode)))
+        return 0;
+
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+
+struct reiserfs_xattr_handler trusted_handler = {
+	.prefix = XATTR_TRUSTED_PREFIX,
+	.get = trusted_get,
+	.set = trusted_set,
+	.del = trusted_del,
+	.list = trusted_list,
+};
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
new file mode 100644
index 0000000..0772806
--- /dev/null
+++ b/fs/reiserfs/xattr_user.c
@@ -0,0 +1,99 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/reiserfs_xattr.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+# include <linux/reiserfs_acl.h>
+#endif
+
+#define XATTR_USER_PREFIX "user."
+
+static int
+user_get (struct inode *inode, const char *name, void *buffer, size_t size)
+{
+
+    int error;
+
+    if (strlen(name) < sizeof(XATTR_USER_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs_user (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    error = reiserfs_permission_locked (inode, MAY_READ, NULL);
+    if (error)
+        return error;
+
+    return reiserfs_xattr_get (inode, name, buffer, size);
+}
+
+static int
+user_set (struct inode *inode, const char *name, const void *buffer,
+          size_t size, int flags)
+{
+
+    int error;
+
+    if (strlen(name) < sizeof(XATTR_USER_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs_user (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    if (!S_ISREG (inode->i_mode) &&
+        (!S_ISDIR (inode->i_mode) || inode->i_mode & S_ISVTX))
+        return -EPERM;
+
+    error = reiserfs_permission_locked (inode, MAY_WRITE, NULL);
+    if (error)
+        return error;
+
+    return reiserfs_xattr_set (inode, name, buffer, size, flags);
+}
+
+static int
+user_del (struct inode *inode, const char *name)
+{
+    int error;
+
+    if (strlen(name) < sizeof(XATTR_USER_PREFIX))
+        return -EINVAL;
+
+    if (!reiserfs_xattrs_user (inode->i_sb))
+        return -EOPNOTSUPP;
+
+    if (!S_ISREG (inode->i_mode) &&
+        (!S_ISDIR (inode->i_mode) || inode->i_mode & S_ISVTX))
+        return -EPERM;
+
+    error = reiserfs_permission_locked (inode, MAY_WRITE, NULL);
+    if (error)
+        return error;
+
+    return 0;
+}
+
+static int
+user_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+    if (!reiserfs_xattrs_user (inode->i_sb))
+        return 0;
+
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+struct reiserfs_xattr_handler user_handler = {
+	.prefix = XATTR_USER_PREFIX,
+	.get = user_get,
+	.set = user_set,
+	.del = user_del,
+	.list = user_list,
+};
diff --git a/fs/romfs/Makefile b/fs/romfs/Makefile
new file mode 100644
index 0000000..c95b21c
--- /dev/null
+++ b/fs/romfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux romfs filesystem routines.
+#
+
+obj-$(CONFIG_ROMFS_FS) += romfs.o
+
+romfs-objs := inode.o
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
new file mode 100644
index 0000000..c74f382
--- /dev/null
+++ b/fs/romfs/inode.c
@@ -0,0 +1,648 @@
+/*
+ * ROMFS file system, Linux implementation
+ *
+ * Copyright (C) 1997-1999  Janos Farkas <chexum@shadow.banki.hu>
+ *
+ * Using parts of the minix filesystem
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * and parts of the affs filesystem additionally
+ * Copyright (C) 1993  Ray Burr
+ * Copyright (C) 1996  Hans-Joachim Widmaier
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes
+ *					Changed for 2.1.19 modules
+ *	Jan 1997			Initial release
+ *	Jun 1997			2.1.43+ changes
+ *					Proper page locking in readpage
+ *					Changed to work with 2.1.45+ fs
+ *	Jul 1997			Fixed follow_link
+ *			2.1.47
+ *					lookup shouldn't return -ENOENT
+ *					from Horst von Brand:
+ *					  fail on wrong checksum
+ *					  double unlock_super was possible
+ *					  correct namelen for statfs
+ *					spotted by Bill Hawes:
+ *					  readlink shouldn't iput()
+ *	Jun 1998	2.1.106		from Avery Pennarun: glibc scandir()
+ *					  exposed a problem in readdir
+ *			2.1.107		code-freeze spellchecker run
+ *	Aug 1998			2.1.118+ VFS changes
+ *	Sep 1998	2.1.122		another VFS change (follow_link)
+ *	Apr 1999	2.2.7		no more EBADF checking in
+ *					  lookup/readdir, use ERR_PTR
+ *	Jun 1999	2.3.6		d_alloc_root use changed
+ *			2.3.9		clean up usage of ENOENT/negative
+ *					  dentries in lookup
+ *					clean up page flags setting
+ *					  (error, uptodate, locking) in
+ *					  in readpage
+ *					use init_special_inode for
+ *					  fifos/sockets (and streamline) in
+ *					  read_inode, fix _ops table order
+ *	Aug 1999	2.3.16		__initfunc() => __init change
+ *	Oct 1999	2.3.24		page->owner hack obsoleted
+ *	Nov 1999	2.3.27		2.3.25+ page->offset => index change
+ */
+
+/* todo:
+ *	- see Documentation/filesystems/romfs.txt
+ *	- use allocated, not stack memory for file names?
+ *	- considering write access...
+ *	- network (tftp) files?
+ *	- merge back some _op tables
+ */
+
+/*
+ * Sorry about some optimizations and for some goto's.  I just wanted
+ * to squeeze some more bytes out of this code.. :)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/romfs_fs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+
+#include <asm/uaccess.h>
+
+struct romfs_inode_info {
+	unsigned long i_metasize;	/* size of non-data area */
+	unsigned long i_dataoffset;	/* from the start of fs */
+	struct inode vfs_inode;
+};
+
+/* instead of private superblock data */
+static inline unsigned long romfs_maxsize(struct super_block *sb)
+{
+	return (unsigned long)sb->s_fs_info;
+}
+
+static inline struct romfs_inode_info *ROMFS_I(struct inode *inode)
+{
+	return list_entry(inode, struct romfs_inode_info, vfs_inode);
+}
+
+static __u32
+romfs_checksum(void *data, int size)
+{
+	__u32 sum;
+	__be32 *ptr;
+
+	sum = 0; ptr = data;
+	size>>=2;
+	while (size>0) {
+		sum += be32_to_cpu(*ptr++);
+		size--;
+	}
+	return sum;
+}
+
+static struct super_operations romfs_ops;
+
+static int romfs_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct buffer_head *bh;
+	struct romfs_super_block *rsb;
+	struct inode *root;
+	int sz;
+
+	/* I would parse the options here, but there are none.. :) */
+
+	sb_set_blocksize(s, ROMBSIZE);
+	s->s_maxbytes = 0xFFFFFFFF;
+
+	bh = sb_bread(s, 0);
+	if (!bh) {
+		/* XXX merge with other printk? */
+                printk ("romfs: unable to read superblock\n");
+		goto outnobh;
+	}
+
+	rsb = (struct romfs_super_block *)bh->b_data;
+	sz = be32_to_cpu(rsb->size);
+	if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1
+	   || sz < ROMFH_SIZE) {
+		if (!silent)
+			printk ("VFS: Can't find a romfs filesystem on dev "
+				"%s.\n", s->s_id);
+		goto out;
+	}
+	if (romfs_checksum(rsb, min_t(int, sz, 512))) {
+		printk ("romfs: bad initial checksum on dev "
+			"%s.\n", s->s_id);
+		goto out;
+	}
+
+	s->s_magic = ROMFS_MAGIC;
+	s->s_fs_info = (void *)(long)sz;
+
+	s->s_flags |= MS_RDONLY;
+
+	/* Find the start of the fs */
+	sz = (ROMFH_SIZE +
+	      strnlen(rsb->name, ROMFS_MAXFN) + 1 + ROMFH_PAD)
+	     & ROMFH_MASK;
+
+	s->s_op	= &romfs_ops;
+	root = iget(s, sz);
+	if (!root)
+		goto out;
+
+	s->s_root = d_alloc_root(root);
+	if (!s->s_root)
+		goto outiput;
+
+	brelse(bh);
+	return 0;
+
+outiput:
+	iput(root);
+out:
+	brelse(bh);
+outnobh:
+	return -EINVAL;
+}
+
+/* That's simple too. */
+
+static int
+romfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = ROMFS_MAGIC;
+	buf->f_bsize = ROMBSIZE;
+	buf->f_bfree = buf->f_bavail = buf->f_ffree;
+	buf->f_blocks = (romfs_maxsize(sb)+ROMBSIZE-1)>>ROMBSBITS;
+	buf->f_namelen = ROMFS_MAXFN;
+	return 0;
+}
+
+/* some helper routines */
+
+static int
+romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
+{
+	struct buffer_head *bh;
+	unsigned long avail, maxsize, res;
+
+	maxsize = romfs_maxsize(i->i_sb);
+	if (offset >= maxsize)
+		return -1;
+
+	/* strnlen is almost always valid */
+	if (count > maxsize || offset+count > maxsize)
+		count = maxsize-offset;
+
+	bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
+	if (!bh)
+		return -1;		/* error */
+
+	avail = ROMBSIZE - (offset & ROMBMASK);
+	maxsize = min_t(unsigned long, count, avail);
+	res = strnlen(((char *)bh->b_data)+(offset&ROMBMASK), maxsize);
+	brelse(bh);
+
+	if (res < maxsize)
+		return res;		/* found all of it */
+
+	while (res < count) {
+		offset += maxsize;
+
+		bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
+		if (!bh)
+			return -1;
+		maxsize = min_t(unsigned long, count - res, ROMBSIZE);
+		avail = strnlen(bh->b_data, maxsize);
+		res += avail;
+		brelse(bh);
+		if (avail < maxsize)
+			return res;
+	}
+	return res;
+}
+
+static int
+romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long count)
+{
+	struct buffer_head *bh;
+	unsigned long avail, maxsize, res;
+
+	maxsize = romfs_maxsize(i->i_sb);
+	if (offset >= maxsize || count > maxsize || offset+count>maxsize)
+		return -1;
+
+	bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
+	if (!bh)
+		return -1;		/* error */
+
+	avail = ROMBSIZE - (offset & ROMBMASK);
+	maxsize = min_t(unsigned long, count, avail);
+	memcpy(dest, ((char *)bh->b_data) + (offset & ROMBMASK), maxsize);
+	brelse(bh);
+
+	res = maxsize;			/* all of it */
+
+	while (res < count) {
+		offset += maxsize;
+		dest += maxsize;
+
+		bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
+		if (!bh)
+			return -1;
+		maxsize = min_t(unsigned long, count - res, ROMBSIZE);
+		memcpy(dest, bh->b_data, maxsize);
+		brelse(bh);
+		res += maxsize;
+	}
+	return res;
+}
+
+static unsigned char romfs_dtype_table[] = {
+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_SOCK, DT_FIFO
+};
+
+static int
+romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *i = filp->f_dentry->d_inode;
+	struct romfs_inode ri;
+	unsigned long offset, maxoff;
+	int j, ino, nextfh;
+	int stored = 0;
+	char fsname[ROMFS_MAXFN];	/* XXX dynamic? */
+
+	lock_kernel();
+
+	maxoff = romfs_maxsize(i->i_sb);
+
+	offset = filp->f_pos;
+	if (!offset) {
+		offset = i->i_ino & ROMFH_MASK;
+		if (romfs_copyfrom(i, &ri, offset, ROMFH_SIZE) <= 0)
+			goto out;
+		offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
+	}
+
+	/* Not really failsafe, but we are read-only... */
+	for(;;) {
+		if (!offset || offset >= maxoff) {
+			offset = maxoff;
+			filp->f_pos = offset;
+			goto out;
+		}
+		filp->f_pos = offset;
+
+		/* Fetch inode info */
+		if (romfs_copyfrom(i, &ri, offset, ROMFH_SIZE) <= 0)
+			goto out;
+
+		j = romfs_strnlen(i, offset+ROMFH_SIZE, sizeof(fsname)-1);
+		if (j < 0)
+			goto out;
+
+		fsname[j]=0;
+		romfs_copyfrom(i, fsname, offset+ROMFH_SIZE, j);
+
+		ino = offset;
+		nextfh = be32_to_cpu(ri.next);
+		if ((nextfh & ROMFH_TYPE) == ROMFH_HRD)
+			ino = be32_to_cpu(ri.spec);
+		if (filldir(dirent, fsname, j, offset, ino,
+			    romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0) {
+			goto out;
+		}
+		stored++;
+		offset = nextfh & ROMFH_MASK;
+	}
+out:
+	unlock_kernel();
+	return stored;
+}
+
+static struct dentry *
+romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	unsigned long offset, maxoff;
+	int fslen, res;
+	struct inode *inode;
+	char fsname[ROMFS_MAXFN];	/* XXX dynamic? */
+	struct romfs_inode ri;
+	const char *name;		/* got from dentry */
+	int len;
+
+	res = -EACCES;			/* placeholder for "no data here" */
+	offset = dir->i_ino & ROMFH_MASK;
+	lock_kernel();
+	if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0)
+		goto out;
+
+	maxoff = romfs_maxsize(dir->i_sb);
+	offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
+
+	/* OK, now find the file whose name is in "dentry" in the
+	 * directory specified by "dir".  */
+
+	name = dentry->d_name.name;
+	len = dentry->d_name.len;
+
+	for(;;) {
+		if (!offset || offset >= maxoff)
+			goto out0;
+		if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0)
+			goto out;
+
+		/* try to match the first 16 bytes of name */
+		fslen = romfs_strnlen(dir, offset+ROMFH_SIZE, ROMFH_SIZE);
+		if (len < ROMFH_SIZE) {
+			if (len == fslen) {
+				/* both are shorter, and same size */
+				romfs_copyfrom(dir, fsname, offset+ROMFH_SIZE, len+1);
+				if (strncmp (name, fsname, len) == 0)
+					break;
+			}
+		} else if (fslen >= ROMFH_SIZE) {
+			/* both are longer; XXX optimize max size */
+			fslen = romfs_strnlen(dir, offset+ROMFH_SIZE, sizeof(fsname)-1);
+			if (len == fslen) {
+				romfs_copyfrom(dir, fsname, offset+ROMFH_SIZE, len+1);
+				if (strncmp(name, fsname, len) == 0)
+					break;
+			}
+		}
+		/* next entry */
+		offset = be32_to_cpu(ri.next) & ROMFH_MASK;
+	}
+
+	/* Hard link handling */
+	if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD)
+		offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
+
+	if ((inode = iget(dir->i_sb, offset)))
+		goto outi;
+
+	/*
+	 * it's a bit funky, _lookup needs to return an error code
+	 * (negative) or a NULL, both as a dentry.  ENOENT should not
+	 * be returned, instead we need to create a negative dentry by
+	 * d_add(dentry, NULL); and return 0 as no error.
+	 * (Although as I see, it only matters on writable file
+	 * systems).
+	 */
+
+out0:	inode = NULL;
+outi:	res = 0;
+	d_add (dentry, inode);
+
+out:	unlock_kernel();
+	return ERR_PTR(res);
+}
+
+/*
+ * Ok, we do readpage, to be able to execute programs.  Unfortunately,
+ * we can't use bmap, since we may have looser alignments.
+ */
+
+static int
+romfs_readpage(struct file *file, struct page * page)
+{
+	struct inode *inode = page->mapping->host;
+	unsigned long offset, avail, readlen;
+	void *buf;
+	int result = -EIO;
+
+	page_cache_get(page);
+	lock_kernel();
+	buf = kmap(page);
+	if (!buf)
+		goto err_out;
+
+	/* 32 bit warning -- but not for us :) */
+	offset = page->index << PAGE_CACHE_SHIFT;
+	if (offset < inode->i_size) {
+		avail = inode->i_size-offset;
+		readlen = min_t(unsigned long, avail, PAGE_SIZE);
+		if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) {
+			if (readlen < PAGE_SIZE) {
+				memset(buf + readlen,0,PAGE_SIZE-readlen);
+			}
+			SetPageUptodate(page);
+			result = 0;
+		}
+	}
+	if (result) {
+		memset(buf, 0, PAGE_SIZE);
+		SetPageError(page);
+	}
+	flush_dcache_page(page);
+
+	unlock_page(page);
+
+	kunmap(page);
+err_out:
+	page_cache_release(page);
+	unlock_kernel();
+
+	return result;
+}
+
+/* Mapping from our types to the kernel */
+
+static struct address_space_operations romfs_aops = {
+	.readpage = romfs_readpage
+};
+
+static struct file_operations romfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= romfs_readdir,
+};
+
+static struct inode_operations romfs_dir_inode_operations = {
+	.lookup		= romfs_lookup,
+};
+
+static mode_t romfs_modemap[] =
+{
+	0, S_IFDIR+0644, S_IFREG+0644, S_IFLNK+0777,
+	S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644
+};
+
+static void
+romfs_read_inode(struct inode *i)
+{
+	int nextfh, ino;
+	struct romfs_inode ri;
+
+	ino = i->i_ino & ROMFH_MASK;
+	i->i_mode = 0;
+
+	/* Loop for finding the real hard link */
+	for(;;) {
+		if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) {
+			printk("romfs: read error for inode 0x%x\n", ino);
+			return;
+		}
+		/* XXX: do romfs_checksum here too (with name) */
+
+		nextfh = be32_to_cpu(ri.next);
+		if ((nextfh & ROMFH_TYPE) != ROMFH_HRD)
+			break;
+
+		ino = be32_to_cpu(ri.spec) & ROMFH_MASK;
+	}
+
+	i->i_nlink = 1;		/* Hard to decide.. */
+	i->i_size = be32_to_cpu(ri.size);
+	i->i_mtime.tv_sec = i->i_atime.tv_sec = i->i_ctime.tv_sec = 0;
+	i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0;
+	i->i_uid = i->i_gid = 0;
+
+        /* Precalculate the data offset */
+        ino = romfs_strnlen(i, ino+ROMFH_SIZE, ROMFS_MAXFN);
+        if (ino >= 0)
+                ino = ((ROMFH_SIZE+ino+1+ROMFH_PAD)&ROMFH_MASK);
+        else
+                ino = 0;
+
+        ROMFS_I(i)->i_metasize = ino;
+        ROMFS_I(i)->i_dataoffset = ino+(i->i_ino&ROMFH_MASK);
+
+        /* Compute permissions */
+        ino = romfs_modemap[nextfh & ROMFH_TYPE];
+	/* only "normal" files have ops */
+	switch (nextfh & ROMFH_TYPE) {
+		case 1:
+			i->i_size = ROMFS_I(i)->i_metasize;
+			i->i_op = &romfs_dir_inode_operations;
+			i->i_fop = &romfs_dir_operations;
+			if (nextfh & ROMFH_EXEC)
+				ino |= S_IXUGO;
+			i->i_mode = ino;
+			break;
+		case 2:
+			i->i_fop = &generic_ro_fops;
+			i->i_data.a_ops = &romfs_aops;
+			if (nextfh & ROMFH_EXEC)
+				ino |= S_IXUGO;
+			i->i_mode = ino;
+			break;
+		case 3:
+			i->i_op = &page_symlink_inode_operations;
+			i->i_data.a_ops = &romfs_aops;
+			i->i_mode = ino | S_IRWXUGO;
+			break;
+		default:
+			/* depending on MBZ for sock/fifos */
+			nextfh = be32_to_cpu(ri.spec);
+			init_special_inode(i, ino,
+					MKDEV(nextfh>>16,nextfh&0xffff));
+	}
+}
+
+static kmem_cache_t * romfs_inode_cachep;
+
+static struct inode *romfs_alloc_inode(struct super_block *sb)
+{
+	struct romfs_inode_info *ei;
+	ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void romfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	romfs_inode_cachep = kmem_cache_create("romfs_inode_cache",
+					     sizeof(struct romfs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (romfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(romfs_inode_cachep))
+		printk(KERN_INFO "romfs_inode_cache: not all structures were freed\n");
+}
+
+static int romfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+static struct super_operations romfs_ops = {
+	.alloc_inode	= romfs_alloc_inode,
+	.destroy_inode	= romfs_destroy_inode,
+	.read_inode	= romfs_read_inode,
+	.statfs		= romfs_statfs,
+	.remount_fs	= romfs_remount,
+};
+
+static struct super_block *romfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, romfs_fill_super);
+}
+
+static struct file_system_type romfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "romfs",
+	.get_sb		= romfs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_romfs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+        err = register_filesystem(&romfs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_romfs_fs(void)
+{
+	unregister_filesystem(&romfs_fs_type);
+	destroy_inodecache();
+}
+
+/* Yes, works even as a module... :) */
+
+module_init(init_romfs_fs)
+module_exit(exit_romfs_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/select.c b/fs/select.c
new file mode 100644
index 0000000..25b1cca
--- /dev/null
+++ b/fs/select.c
@@ -0,0 +1,534 @@
+/*
+ * This file contains the procedures for the handling of select and poll
+ *
+ * Created for Linux based loosely upon Mathius Lattner's minix
+ * patches by Peter MacDonald. Heavily edited by Linus.
+ *
+ *  4 February 1994
+ *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
+ *     flag set in its personality we do *not* modify the given timeout
+ *     parameter to reflect time remaining.
+ *
+ *  24 January 2000
+ *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
+ *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
+ */
+
+#include <linux/syscalls.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/poll.h>
+#include <linux/personality.h> /* for STICKY_TIMEOUTS */
+#include <linux/file.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+
+#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
+#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
+
+struct poll_table_entry {
+	struct file * filp;
+	wait_queue_t wait;
+	wait_queue_head_t * wait_address;
+};
+
+struct poll_table_page {
+	struct poll_table_page * next;
+	struct poll_table_entry * entry;
+	struct poll_table_entry entries[0];
+};
+
+#define POLL_TABLE_FULL(table) \
+	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
+
+/*
+ * Ok, Peter made a complicated, but straightforward multiple_wait() function.
+ * I have rewritten this, taking some shortcuts: This code may not be easy to
+ * follow, but it should be free of race-conditions, and it's practical. If you
+ * understand what I'm doing here, then you understand how the linux
+ * sleep/wakeup mechanism works.
+ *
+ * Two very simple procedures, poll_wait() and poll_freewait() make all the
+ * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
+ * as all select/poll functions have to call it to add an entry to the
+ * poll table.
+ */
+void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p);
+
+void poll_initwait(struct poll_wqueues *pwq)
+{
+	init_poll_funcptr(&pwq->pt, __pollwait);
+	pwq->error = 0;
+	pwq->table = NULL;
+}
+
+EXPORT_SYMBOL(poll_initwait);
+
+void poll_freewait(struct poll_wqueues *pwq)
+{
+	struct poll_table_page * p = pwq->table;
+	while (p) {
+		struct poll_table_entry * entry;
+		struct poll_table_page *old;
+
+		entry = p->entry;
+		do {
+			entry--;
+			remove_wait_queue(entry->wait_address,&entry->wait);
+			fput(entry->filp);
+		} while (entry > p->entries);
+		old = p;
+		p = p->next;
+		free_page((unsigned long) old);
+	}
+}
+
+EXPORT_SYMBOL(poll_freewait);
+
+void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *_p)
+{
+	struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt);
+	struct poll_table_page *table = p->table;
+
+	if (!table || POLL_TABLE_FULL(table)) {
+		struct poll_table_page *new_table;
+
+		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
+		if (!new_table) {
+			p->error = -ENOMEM;
+			__set_current_state(TASK_RUNNING);
+			return;
+		}
+		new_table->entry = new_table->entries;
+		new_table->next = table;
+		p->table = new_table;
+		table = new_table;
+	}
+
+	/* Add a new entry */
+	{
+		struct poll_table_entry * entry = table->entry;
+		table->entry = entry+1;
+	 	get_file(filp);
+	 	entry->filp = filp;
+		entry->wait_address = wait_address;
+		init_waitqueue_entry(&entry->wait, current);
+		add_wait_queue(wait_address,&entry->wait);
+	}
+}
+
+#define FDS_IN(fds, n)		(fds->in + n)
+#define FDS_OUT(fds, n)		(fds->out + n)
+#define FDS_EX(fds, n)		(fds->ex + n)
+
+#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
+
+static int max_select_fd(unsigned long n, fd_set_bits *fds)
+{
+	unsigned long *open_fds;
+	unsigned long set;
+	int max;
+
+	/* handle last in-complete long-word first */
+	set = ~(~0UL << (n & (__NFDBITS-1)));
+	n /= __NFDBITS;
+	open_fds = current->files->open_fds->fds_bits+n;
+	max = 0;
+	if (set) {
+		set &= BITS(fds, n);
+		if (set) {
+			if (!(set & ~*open_fds))
+				goto get_max;
+			return -EBADF;
+		}
+	}
+	while (n) {
+		open_fds--;
+		n--;
+		set = BITS(fds, n);
+		if (!set)
+			continue;
+		if (set & ~*open_fds)
+			return -EBADF;
+		if (max)
+			continue;
+get_max:
+		do {
+			max++;
+			set >>= 1;
+		} while (set);
+		max += n * __NFDBITS;
+	}
+
+	return max;
+}
+
+#define BIT(i)		(1UL << ((i)&(__NFDBITS-1)))
+#define MEM(i,m)	((m)+(unsigned)(i)/__NFDBITS)
+#define ISSET(i,m)	(((i)&*(m)) != 0)
+#define SET(i,m)	(*(m) |= (i))
+
+#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
+#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
+#define POLLEX_SET (POLLPRI)
+
+int do_select(int n, fd_set_bits *fds, long *timeout)
+{
+	struct poll_wqueues table;
+	poll_table *wait;
+	int retval, i;
+	long __timeout = *timeout;
+
+ 	spin_lock(&current->files->file_lock);
+	retval = max_select_fd(n, fds);
+	spin_unlock(&current->files->file_lock);
+
+	if (retval < 0)
+		return retval;
+	n = retval;
+
+	poll_initwait(&table);
+	wait = &table.pt;
+	if (!__timeout)
+		wait = NULL;
+	retval = 0;
+	for (;;) {
+		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		inp = fds->in; outp = fds->out; exp = fds->ex;
+		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
+
+		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
+			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
+			unsigned long res_in = 0, res_out = 0, res_ex = 0;
+			struct file_operations *f_op = NULL;
+			struct file *file = NULL;
+
+			in = *inp++; out = *outp++; ex = *exp++;
+			all_bits = in | out | ex;
+			if (all_bits == 0) {
+				i += __NFDBITS;
+				continue;
+			}
+
+			for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
+				if (i >= n)
+					break;
+				if (!(bit & all_bits))
+					continue;
+				file = fget(i);
+				if (file) {
+					f_op = file->f_op;
+					mask = DEFAULT_POLLMASK;
+					if (f_op && f_op->poll)
+						mask = (*f_op->poll)(file, retval ? NULL : wait);
+					fput(file);
+					if ((mask & POLLIN_SET) && (in & bit)) {
+						res_in |= bit;
+						retval++;
+					}
+					if ((mask & POLLOUT_SET) && (out & bit)) {
+						res_out |= bit;
+						retval++;
+					}
+					if ((mask & POLLEX_SET) && (ex & bit)) {
+						res_ex |= bit;
+						retval++;
+					}
+				}
+				cond_resched();
+			}
+			if (res_in)
+				*rinp = res_in;
+			if (res_out)
+				*routp = res_out;
+			if (res_ex)
+				*rexp = res_ex;
+		}
+		wait = NULL;
+		if (retval || !__timeout || signal_pending(current))
+			break;
+		if(table.error) {
+			retval = table.error;
+			break;
+		}
+		__timeout = schedule_timeout(__timeout);
+	}
+	__set_current_state(TASK_RUNNING);
+
+	poll_freewait(&table);
+
+	/*
+	 * Up-to-date the caller timeout.
+	 */
+	*timeout = __timeout;
+	return retval;
+}
+
+static void *select_bits_alloc(int size)
+{
+	return kmalloc(6 * size, GFP_KERNEL);
+}
+
+static void select_bits_free(void *bits, int size)
+{
+	kfree(bits);
+}
+
+/*
+ * We can actually return ERESTARTSYS instead of EINTR, but I'd
+ * like to be certain this leads to no problems. So I return
+ * EINTR just for safety.
+ *
+ * Update: ERESTARTSYS breaks at least the xview clock binary, so
+ * I'm trying ERESTARTNOHAND which restart only when you want to.
+ */
+#define MAX_SELECT_SECONDS \
+	((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
+
+asmlinkage long
+sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+{
+	fd_set_bits fds;
+	char *bits;
+	long timeout;
+	int ret, size, max_fdset;
+
+	timeout = MAX_SCHEDULE_TIMEOUT;
+	if (tvp) {
+		time_t sec, usec;
+
+		if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
+		    || __get_user(sec, &tvp->tv_sec)
+		    || __get_user(usec, &tvp->tv_usec)) {
+			ret = -EFAULT;
+			goto out_nofds;
+		}
+
+		ret = -EINVAL;
+		if (sec < 0 || usec < 0)
+			goto out_nofds;
+
+		if ((unsigned long) sec < MAX_SELECT_SECONDS) {
+			timeout = ROUND_UP(usec, 1000000/HZ);
+			timeout += sec * (unsigned long) HZ;
+		}
+	}
+
+	ret = -EINVAL;
+	if (n < 0)
+		goto out_nofds;
+
+	/* max_fdset can increase, so grab it once to avoid race */
+	max_fdset = current->files->max_fdset;
+	if (n > max_fdset)
+		n = max_fdset;
+
+	/*
+	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
+	 * since we used fdset we need to allocate memory in units of
+	 * long-words. 
+	 */
+	ret = -ENOMEM;
+	size = FDS_BYTES(n);
+	bits = select_bits_alloc(size);
+	if (!bits)
+		goto out_nofds;
+	fds.in      = (unsigned long *)  bits;
+	fds.out     = (unsigned long *) (bits +   size);
+	fds.ex      = (unsigned long *) (bits + 2*size);
+	fds.res_in  = (unsigned long *) (bits + 3*size);
+	fds.res_out = (unsigned long *) (bits + 4*size);
+	fds.res_ex  = (unsigned long *) (bits + 5*size);
+
+	if ((ret = get_fd_set(n, inp, fds.in)) ||
+	    (ret = get_fd_set(n, outp, fds.out)) ||
+	    (ret = get_fd_set(n, exp, fds.ex)))
+		goto out;
+	zero_fd_set(n, fds.res_in);
+	zero_fd_set(n, fds.res_out);
+	zero_fd_set(n, fds.res_ex);
+
+	ret = do_select(n, &fds, &timeout);
+
+	if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
+		time_t sec = 0, usec = 0;
+		if (timeout) {
+			sec = timeout / HZ;
+			usec = timeout % HZ;
+			usec *= (1000000/HZ);
+		}
+		put_user(sec, &tvp->tv_sec);
+		put_user(usec, &tvp->tv_usec);
+	}
+
+	if (ret < 0)
+		goto out;
+	if (!ret) {
+		ret = -ERESTARTNOHAND;
+		if (signal_pending(current))
+			goto out;
+		ret = 0;
+	}
+
+	if (set_fd_set(n, inp, fds.res_in) ||
+	    set_fd_set(n, outp, fds.res_out) ||
+	    set_fd_set(n, exp, fds.res_ex))
+		ret = -EFAULT;
+
+out:
+	select_bits_free(bits, size);
+out_nofds:
+	return ret;
+}
+
+struct poll_list {
+	struct poll_list *next;
+	int len;
+	struct pollfd entries[0];
+};
+
+#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
+
+static void do_pollfd(unsigned int num, struct pollfd * fdpage,
+	poll_table ** pwait, int *count)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		int fd;
+		unsigned int mask;
+		struct pollfd *fdp;
+
+		mask = 0;
+		fdp = fdpage+i;
+		fd = fdp->fd;
+		if (fd >= 0) {
+			struct file * file = fget(fd);
+			mask = POLLNVAL;
+			if (file != NULL) {
+				mask = DEFAULT_POLLMASK;
+				if (file->f_op && file->f_op->poll)
+					mask = file->f_op->poll(file, *pwait);
+				mask &= fdp->events | POLLERR | POLLHUP;
+				fput(file);
+			}
+			if (mask) {
+				*pwait = NULL;
+				(*count)++;
+			}
+		}
+		fdp->revents = mask;
+	}
+}
+
+static int do_poll(unsigned int nfds,  struct poll_list *list,
+			struct poll_wqueues *wait, long timeout)
+{
+	int count = 0;
+	poll_table* pt = &wait->pt;
+
+	if (!timeout)
+		pt = NULL;
+ 
+	for (;;) {
+		struct poll_list *walk;
+		set_current_state(TASK_INTERRUPTIBLE);
+		walk = list;
+		while(walk != NULL) {
+			do_pollfd( walk->len, walk->entries, &pt, &count);
+			walk = walk->next;
+		}
+		pt = NULL;
+		if (count || !timeout || signal_pending(current))
+			break;
+		count = wait->error;
+		if (count)
+			break;
+		timeout = schedule_timeout(timeout);
+	}
+	__set_current_state(TASK_RUNNING);
+	return count;
+}
+
+asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout)
+{
+	struct poll_wqueues table;
+ 	int fdcount, err;
+ 	unsigned int i;
+	struct poll_list *head;
+ 	struct poll_list *walk;
+
+	/* Do a sanity check on nfds ... */
+	if (nfds > current->files->max_fdset && nfds > OPEN_MAX)
+		return -EINVAL;
+
+	if (timeout) {
+		/* Careful about overflow in the intermediate values */
+		if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ)
+			timeout = (unsigned long)(timeout*HZ+999)/1000+1;
+		else /* Negative or overflow */
+			timeout = MAX_SCHEDULE_TIMEOUT;
+	}
+
+	poll_initwait(&table);
+
+	head = NULL;
+	walk = NULL;
+	i = nfds;
+	err = -ENOMEM;
+	while(i!=0) {
+		struct poll_list *pp;
+		pp = kmalloc(sizeof(struct poll_list)+
+				sizeof(struct pollfd)*
+				(i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i),
+					GFP_KERNEL);
+		if(pp==NULL)
+			goto out_fds;
+		pp->next=NULL;
+		pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i);
+		if (head == NULL)
+			head = pp;
+		else
+			walk->next = pp;
+
+		walk = pp;
+		if (copy_from_user(pp->entries, ufds + nfds-i, 
+				sizeof(struct pollfd)*pp->len)) {
+			err = -EFAULT;
+			goto out_fds;
+		}
+		i -= pp->len;
+	}
+	fdcount = do_poll(nfds, head, &table, timeout);
+
+	/* OK, now copy the revents fields back to user space. */
+	walk = head;
+	err = -EFAULT;
+	while(walk != NULL) {
+		struct pollfd *fds = walk->entries;
+		int j;
+
+		for (j=0; j < walk->len; j++, ufds++) {
+			if(__put_user(fds[j].revents, &ufds->revents))
+				goto out_fds;
+		}
+		walk = walk->next;
+  	}
+	err = fdcount;
+	if (!fdcount && signal_pending(current))
+		err = -EINTR;
+out_fds:
+	walk = head;
+	while(walk!=NULL) {
+		struct poll_list *pp = walk->next;
+		kfree(walk);
+		walk = pp;
+	}
+	poll_freewait(&table);
+	return err;
+}
diff --git a/fs/seq_file.c b/fs/seq_file.c
new file mode 100644
index 0000000..650c43b
--- /dev/null
+++ b/fs/seq_file.c
@@ -0,0 +1,440 @@
+/*
+ * linux/fs/seq_file.c
+ *
+ * helper functions for making synthetic files from sequences of records.
+ * initial implementation -- AV, Oct 2001.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+/**
+ *	seq_open -	initialize sequential file
+ *	@file: file we initialize
+ *	@op: method table describing the sequence
+ *
+ *	seq_open() sets @file, associating it with a sequence described
+ *	by @op.  @op->start() sets the iterator up and returns the first
+ *	element of sequence. @op->stop() shuts it down.  @op->next()
+ *	returns the next element of sequence.  @op->show() prints element
+ *	into the buffer.  In case of error ->start() and ->next() return
+ *	ERR_PTR(error).  In the end of sequence they return %NULL. ->show()
+ *	returns 0 in case of success and negative number in case of error.
+ */
+int seq_open(struct file *file, struct seq_operations *op)
+{
+	struct seq_file *p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+	memset(p, 0, sizeof(*p));
+	sema_init(&p->sem, 1);
+	p->op = op;
+	file->private_data = p;
+
+	/*
+	 * Wrappers around seq_open(e.g. swaps_open) need to be
+	 * aware of this. If they set f_version themselves, they
+	 * should call seq_open first and then set f_version.
+	 */
+	file->f_version = 0;
+
+	/* SEQ files support lseek, but not pread/pwrite */
+	file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+EXPORT_SYMBOL(seq_open);
+
+/**
+ *	seq_read -	->read() method for sequential files.
+ *	@file, @buf, @size, @ppos: see file_operations method
+ *
+ *	Ready-made ->f_op->read()
+ */
+ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+{
+	struct seq_file *m = (struct seq_file *)file->private_data;
+	size_t copied = 0;
+	loff_t pos;
+	size_t n;
+	void *p;
+	int err = 0;
+
+	down(&m->sem);
+	/*
+	 * seq_file->op->..m_start/m_stop/m_next may do special actions
+	 * or optimisations based on the file->f_version, so we want to
+	 * pass the file->f_version to those methods.
+	 *
+	 * seq_file->version is just copy of f_version, and seq_file
+	 * methods can treat it simply as file version.
+	 * It is copied in first and copied out after all operations.
+	 * It is convenient to have it as  part of structure to avoid the
+	 * need of passing another argument to all the seq_file methods.
+	 */
+	m->version = file->f_version;
+	/* grab buffer if we didn't have one */
+	if (!m->buf) {
+		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		if (!m->buf)
+			goto Enomem;
+	}
+	/* if not empty - flush it first */
+	if (m->count) {
+		n = min(m->count, size);
+		err = copy_to_user(buf, m->buf + m->from, n);
+		if (err)
+			goto Efault;
+		m->count -= n;
+		m->from += n;
+		size -= n;
+		buf += n;
+		copied += n;
+		if (!m->count)
+			m->index++;
+		if (!size)
+			goto Done;
+	}
+	/* we need at least one record in buffer */
+	while (1) {
+		pos = m->index;
+		p = m->op->start(m, &pos);
+		err = PTR_ERR(p);
+		if (!p || IS_ERR(p))
+			break;
+		err = m->op->show(m, p);
+		if (err)
+			break;
+		if (m->count < m->size)
+			goto Fill;
+		m->op->stop(m, p);
+		kfree(m->buf);
+		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+		if (!m->buf)
+			goto Enomem;
+		m->count = 0;
+		m->version = 0;
+	}
+	m->op->stop(m, p);
+	m->count = 0;
+	goto Done;
+Fill:
+	/* they want more? let's try to get some more */
+	while (m->count < size) {
+		size_t offs = m->count;
+		loff_t next = pos;
+		p = m->op->next(m, p, &next);
+		if (!p || IS_ERR(p)) {
+			err = PTR_ERR(p);
+			break;
+		}
+		err = m->op->show(m, p);
+		if (err || m->count == m->size) {
+			m->count = offs;
+			break;
+		}
+		pos = next;
+	}
+	m->op->stop(m, p);
+	n = min(m->count, size);
+	err = copy_to_user(buf, m->buf, n);
+	if (err)
+		goto Efault;
+	copied += n;
+	m->count -= n;
+	if (m->count)
+		m->from = n;
+	else
+		pos++;
+	m->index = pos;
+Done:
+	if (!copied)
+		copied = err;
+	else
+		*ppos += copied;
+	file->f_version = m->version;
+	up(&m->sem);
+	return copied;
+Enomem:
+	err = -ENOMEM;
+	goto Done;
+Efault:
+	err = -EFAULT;
+	goto Done;
+}
+EXPORT_SYMBOL(seq_read);
+
+static int traverse(struct seq_file *m, loff_t offset)
+{
+	loff_t pos = 0;
+	int error = 0;
+	void *p;
+
+	m->version = 0;
+	m->index = 0;
+	m->count = m->from = 0;
+	if (!offset)
+		return 0;
+	if (!m->buf) {
+		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		if (!m->buf)
+			return -ENOMEM;
+	}
+	p = m->op->start(m, &m->index);
+	while (p) {
+		error = PTR_ERR(p);
+		if (IS_ERR(p))
+			break;
+		error = m->op->show(m, p);
+		if (error)
+			break;
+		if (m->count == m->size)
+			goto Eoverflow;
+		if (pos + m->count > offset) {
+			m->from = offset - pos;
+			m->count -= m->from;
+			break;
+		}
+		pos += m->count;
+		m->count = 0;
+		if (pos == offset) {
+			m->index++;
+			break;
+		}
+		p = m->op->next(m, p, &m->index);
+	}
+	m->op->stop(m, p);
+	return error;
+
+Eoverflow:
+	m->op->stop(m, p);
+	kfree(m->buf);
+	m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+	return !m->buf ? -ENOMEM : -EAGAIN;
+}
+
+/**
+ *	seq_lseek -	->llseek() method for sequential files.
+ *	@file, @offset, @origin: see file_operations method
+ *
+ *	Ready-made ->f_op->llseek()
+ */
+loff_t seq_lseek(struct file *file, loff_t offset, int origin)
+{
+	struct seq_file *m = (struct seq_file *)file->private_data;
+	long long retval = -EINVAL;
+
+	down(&m->sem);
+	m->version = file->f_version;
+	switch (origin) {
+		case 1:
+			offset += file->f_pos;
+		case 0:
+			if (offset < 0)
+				break;
+			retval = offset;
+			if (offset != file->f_pos) {
+				while ((retval=traverse(m, offset)) == -EAGAIN)
+					;
+				if (retval) {
+					/* with extreme prejudice... */
+					file->f_pos = 0;
+					m->version = 0;
+					m->index = 0;
+					m->count = 0;
+				} else {
+					retval = file->f_pos = offset;
+				}
+			}
+	}
+	up(&m->sem);
+	file->f_version = m->version;
+	return retval;
+}
+EXPORT_SYMBOL(seq_lseek);
+
+/**
+ *	seq_release -	free the structures associated with sequential file.
+ *	@file: file in question
+ *	@inode: file->f_dentry->d_inode
+ *
+ *	Frees the structures associated with sequential file; can be used
+ *	as ->f_op->release() if you don't have private data to destroy.
+ */
+int seq_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = (struct seq_file *)file->private_data;
+	kfree(m->buf);
+	kfree(m);
+	return 0;
+}
+EXPORT_SYMBOL(seq_release);
+
+/**
+ *	seq_escape -	print string into buffer, escaping some characters
+ *	@m:	target buffer
+ *	@s:	string
+ *	@esc:	set of characters that need escaping
+ *
+ *	Puts string into buffer, replacing each occurrence of character from
+ *	@esc with usual octal escape.  Returns 0 in case of success, -1 - in
+ *	case of overflow.
+ */
+int seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+	char *end = m->buf + m->size;
+        char *p;
+	char c;
+
+        for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
+		if (!strchr(esc, c)) {
+			*p++ = c;
+			continue;
+		}
+		if (p + 3 < end) {
+			*p++ = '\\';
+			*p++ = '0' + ((c & 0300) >> 6);
+			*p++ = '0' + ((c & 070) >> 3);
+			*p++ = '0' + (c & 07);
+			continue;
+		}
+		m->count = m->size;
+		return -1;
+        }
+	m->count = p - m->buf;
+        return 0;
+}
+EXPORT_SYMBOL(seq_escape);
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+	va_list args;
+	int len;
+
+	if (m->count < m->size) {
+		va_start(args, f);
+		len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
+		va_end(args);
+		if (m->count + len < m->size) {
+			m->count += len;
+			return 0;
+		}
+	}
+	m->count = m->size;
+	return -1;
+}
+EXPORT_SYMBOL(seq_printf);
+
+int seq_path(struct seq_file *m,
+	     struct vfsmount *mnt, struct dentry *dentry,
+	     char *esc)
+{
+	if (m->count < m->size) {
+		char *s = m->buf + m->count;
+		char *p = d_path(dentry, mnt, s, m->size - m->count);
+		if (!IS_ERR(p)) {
+			while (s <= p) {
+				char c = *p++;
+				if (!c) {
+					p = m->buf + m->count;
+					m->count = s - m->buf;
+					return s - p;
+				} else if (!strchr(esc, c)) {
+					*s++ = c;
+				} else if (s + 4 > p) {
+					break;
+				} else {
+					*s++ = '\\';
+					*s++ = '0' + ((c & 0300) >> 6);
+					*s++ = '0' + ((c & 070) >> 3);
+					*s++ = '0' + (c & 07);
+				}
+			}
+		}
+	}
+	m->count = m->size;
+	return -1;
+}
+EXPORT_SYMBOL(seq_path);
+
+static void *single_start(struct seq_file *p, loff_t *pos)
+{
+	return NULL + (*pos == 0);
+}
+
+static void *single_next(struct seq_file *p, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+
+static void single_stop(struct seq_file *p, void *v)
+{
+}
+
+int single_open(struct file *file, int (*show)(struct seq_file *, void *),
+		void *data)
+{
+	struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
+	int res = -ENOMEM;
+
+	if (op) {
+		op->start = single_start;
+		op->next = single_next;
+		op->stop = single_stop;
+		op->show = show;
+		res = seq_open(file, op);
+		if (!res)
+			((struct seq_file *)file->private_data)->private = data;
+		else
+			kfree(op);
+	}
+	return res;
+}
+EXPORT_SYMBOL(single_open);
+
+int single_release(struct inode *inode, struct file *file)
+{
+	struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
+	int res = seq_release(inode, file);
+	kfree(op);
+	return res;
+}
+EXPORT_SYMBOL(single_release);
+
+int seq_release_private(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	kfree(seq->private);
+	seq->private = NULL;
+	return seq_release(inode, file);
+}
+EXPORT_SYMBOL(seq_release_private);
+
+int seq_putc(struct seq_file *m, char c)
+{
+	if (m->count < m->size) {
+		m->buf[m->count++] = c;
+		return 0;
+	}
+	return -1;
+}
+EXPORT_SYMBOL(seq_putc);
+
+int seq_puts(struct seq_file *m, const char *s)
+{
+	int len = strlen(s);
+	if (m->count + len < m->size) {
+		memcpy(m->buf + m->count, s, len);
+		m->count += len;
+		return 0;
+	}
+	m->count = m->size;
+	return -1;
+}
+EXPORT_SYMBOL(seq_puts);
diff --git a/fs/smbfs/Makefile b/fs/smbfs/Makefile
new file mode 100644
index 0000000..93246b7
--- /dev/null
+++ b/fs/smbfs/Makefile
@@ -0,0 +1,39 @@
+#
+# Makefile for the linux smb-filesystem routines.
+#
+
+obj-$(CONFIG_SMB_FS) += smbfs.o
+
+smbfs-objs := proc.o dir.o cache.o sock.o inode.o file.o ioctl.o getopt.o \
+		symlink.o smbiod.o request.o
+
+# If you want debugging output, you may add these flags to the EXTRA_CFLAGS
+# SMBFS_PARANOIA should normally be enabled.
+
+EXTRA_CFLAGS += -DSMBFS_PARANOIA
+#EXTRA_CFLAGS += -DSMBFS_DEBUG
+#EXTRA_CFLAGS += -DSMBFS_DEBUG_VERBOSE
+#EXTRA_CFLAGS += -DDEBUG_SMB_MALLOC
+#EXTRA_CFLAGS += -DDEBUG_SMB_TIMESTAMP
+#EXTRA_CFLAGS += -Werror
+
+#
+# Maintainer rules
+#
+
+# getopt.c not included. It is intentionally separate
+SRC = proc.c dir.c cache.c sock.c inode.c file.c ioctl.c smbiod.c request.c \
+	symlink.c
+
+proto:
+	-rm -f proto.h
+	@echo >  proto2.h "/*"
+	@echo >> proto2.h " *  Autogenerated with cproto on: " `date`
+	@echo >> proto2.h " */"
+	@echo >> proto2.h ""
+	@echo >> proto2.h "struct smb_request;"
+	@echo >> proto2.h "struct sock;"
+	@echo >> proto2.h "struct statfs;"
+	@echo >> proto2.h ""
+	cproto -E "gcc -E" -e -v -I $(TOPDIR)/include -DMAKING_PROTO -D__KERNEL__ $(SRC) >> proto2.h
+	mv proto2.h proto.h
diff --git a/fs/smbfs/cache.c b/fs/smbfs/cache.c
new file mode 100644
index 0000000..f3e6b81
--- /dev/null
+++ b/fs/smbfs/cache.c
@@ -0,0 +1,209 @@
+/*
+ *  cache.c
+ *
+ * Copyright (C) 1997 by Bill Hawes
+ *
+ * Routines to support directory cacheing using the page cache.
+ * This cache code is almost directly taken from ncpfs.
+ *
+ * Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/dirent.h>
+#include <linux/smb_fs.h>
+#include <linux/pagemap.h>
+#include <linux/net.h>
+
+#include <asm/page.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+
+/*
+ * Force the next attempt to use the cache to be a timeout.
+ * If we can't find the page that's fine, it will cause a refresh.
+ */
+void
+smb_invalid_dir_cache(struct inode * dir)
+{
+	struct smb_sb_info *server = server_from_inode(dir);
+	union  smb_dir_cache *cache = NULL;
+	struct page *page = NULL;
+
+	page = grab_cache_page(&dir->i_data, 0);
+	if (!page)
+		goto out;
+
+	if (!PageUptodate(page))
+		goto out_unlock;
+
+	cache = kmap(page);
+	cache->head.time = jiffies - SMB_MAX_AGE(server);
+
+	kunmap(page);
+	SetPageUptodate(page);
+out_unlock:
+	unlock_page(page);
+	page_cache_release(page);
+out:
+	return;
+}
+
+/*
+ * Mark all dentries for 'parent' as invalid, forcing them to be re-read
+ */
+void
+smb_invalidate_dircache_entries(struct dentry *parent)
+{
+	struct smb_sb_info *server = server_from_dentry(parent);
+	struct list_head *next;
+	struct dentry *dentry;
+
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dentry = list_entry(next, struct dentry, d_child);
+		dentry->d_fsdata = NULL;
+		smb_age_dentry(server, dentry);
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
+}
+
+/*
+ * dget, but require that fpos and parent matches what the dentry contains.
+ * dentry is not known to be a valid pointer at entry.
+ */
+struct dentry *
+smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
+{
+	struct dentry *dent = dentry;
+	struct list_head *next;
+
+	if (d_validate(dent, parent)) {
+		if (dent->d_name.len <= SMB_MAXNAMELEN &&
+		    (unsigned long)dent->d_fsdata == fpos) {
+			if (!dent->d_inode) {
+				dput(dent);
+				dent = NULL;
+			}
+			return dent;
+		}
+		dput(dent);
+	}
+
+	/* If a pointer is invalid, we search the dentry. */
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dent = list_entry(next, struct dentry, d_child);
+		if ((unsigned long)dent->d_fsdata == fpos) {
+			if (dent->d_inode)
+				dget_locked(dent);
+			else
+				dent = NULL;
+			goto out_unlock;
+		}
+		next = next->next;
+	}
+	dent = NULL;
+out_unlock:
+	spin_unlock(&dcache_lock);
+	return dent;
+}
+
+
+/*
+ * Create dentry/inode for this file and add it to the dircache.
+ */
+int
+smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+	       struct smb_cache_control *ctrl, struct qstr *qname,
+	       struct smb_fattr *entry)
+{
+	struct dentry *newdent, *dentry = filp->f_dentry;
+	struct inode *newino, *inode = dentry->d_inode;
+	struct smb_cache_control ctl = *ctrl;
+	int valid = 0;
+	int hashed = 0;
+	ino_t ino = 0;
+
+	qname->hash = full_name_hash(qname->name, qname->len);
+
+	if (dentry->d_op && dentry->d_op->d_hash)
+		if (dentry->d_op->d_hash(dentry, qname) != 0)
+			goto end_advance;
+
+	newdent = d_lookup(dentry, qname);
+
+	if (!newdent) {
+		newdent = d_alloc(dentry, qname);
+		if (!newdent)
+			goto end_advance;
+	} else {
+		hashed = 1;
+		memcpy((char *) newdent->d_name.name, qname->name,
+		       newdent->d_name.len);
+	}
+
+	if (!newdent->d_inode) {
+		smb_renew_times(newdent);
+		entry->f_ino = iunique(inode->i_sb, 2);
+		newino = smb_iget(inode->i_sb, entry);
+		if (newino) {
+			smb_new_dentry(newdent);
+			d_instantiate(newdent, newino);
+			if (!hashed)
+				d_rehash(newdent);
+		}
+	} else
+		smb_set_inode_attr(newdent->d_inode, entry);
+
+        if (newdent->d_inode) {
+		ino = newdent->d_inode->i_ino;
+		newdent->d_fsdata = (void *) ctl.fpos;
+		smb_new_dentry(newdent);
+	}
+
+	if (ctl.idx >= SMB_DIRCACHE_SIZE) {
+		if (ctl.page) {
+			kunmap(ctl.page);
+			SetPageUptodate(ctl.page);
+			unlock_page(ctl.page);
+			page_cache_release(ctl.page);
+		}
+		ctl.cache = NULL;
+		ctl.idx  -= SMB_DIRCACHE_SIZE;
+		ctl.ofs  += 1;
+		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
+		if (ctl.page)
+			ctl.cache = kmap(ctl.page);
+	}
+	if (ctl.cache) {
+		ctl.cache->dentry[ctl.idx] = newdent;
+		valid = 1;
+	}
+	dput(newdent);
+
+end_advance:
+	if (!valid)
+		ctl.valid = 0;
+	if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
+		if (!ino)
+			ino = find_inode_number(dentry, qname);
+		if (!ino)
+			ino = iunique(inode->i_sb, 2);
+		ctl.filled = filldir(dirent, qname->name, qname->len,
+				     filp->f_pos, ino, DT_UNKNOWN);
+		if (!ctl.filled)
+			filp->f_pos += 1;
+	}
+	ctl.fpos += 1;
+	ctl.idx  += 1;
+	*ctrl = ctl;
+	return (ctl.valid || !ctl.filled);
+}
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
new file mode 100644
index 0000000..c6c33e1
--- /dev/null
+++ b/fs/smbfs/dir.c
@@ -0,0 +1,693 @@
+/*
+ *  dir.c
+ *
+ *  Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/ctype.h>
+#include <linux/net.h>
+
+#include <linux/smb_fs.h>
+#include <linux/smb_mount.h>
+#include <linux/smbno.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+
+static int smb_readdir(struct file *, void *, filldir_t);
+static int smb_dir_open(struct inode *, struct file *);
+
+static struct dentry *smb_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int smb_create(struct inode *, struct dentry *, int, struct nameidata *);
+static int smb_mkdir(struct inode *, struct dentry *, int);
+static int smb_rmdir(struct inode *, struct dentry *);
+static int smb_unlink(struct inode *, struct dentry *);
+static int smb_rename(struct inode *, struct dentry *,
+		      struct inode *, struct dentry *);
+static int smb_make_node(struct inode *,struct dentry *,int,dev_t);
+static int smb_link(struct dentry *, struct inode *, struct dentry *);
+
+struct file_operations smb_dir_operations =
+{
+	.read		= generic_read_dir,
+	.readdir	= smb_readdir,
+	.ioctl		= smb_ioctl,
+	.open		= smb_dir_open,
+};
+
+struct inode_operations smb_dir_inode_operations =
+{
+	.create		= smb_create,
+	.lookup		= smb_lookup,
+	.unlink		= smb_unlink,
+	.mkdir		= smb_mkdir,
+	.rmdir		= smb_rmdir,
+	.rename		= smb_rename,
+	.getattr	= smb_getattr,
+	.setattr	= smb_notify_change,
+};
+
+struct inode_operations smb_dir_inode_operations_unix =
+{
+	.create		= smb_create,
+	.lookup		= smb_lookup,
+	.unlink		= smb_unlink,
+	.mkdir		= smb_mkdir,
+	.rmdir		= smb_rmdir,
+	.rename		= smb_rename,
+	.getattr	= smb_getattr,
+	.setattr	= smb_notify_change,
+	.symlink	= smb_symlink,
+	.mknod		= smb_make_node,
+	.link		= smb_link,
+};
+
+/*
+ * Read a directory, using filldir to fill the dirent memory.
+ * smb_proc_readdir does the actual reading from the smb server.
+ *
+ * The cache code is almost directly taken from ncpfs
+ */
+static int 
+smb_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *dir = dentry->d_inode;
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	union  smb_dir_cache *cache = NULL;
+	struct smb_cache_control ctl;
+	struct page *page = NULL;
+	int result;
+
+	ctl.page  = NULL;
+	ctl.cache = NULL;
+
+	VERBOSE("reading %s/%s, f_pos=%d\n",
+		DENTRY_PATH(dentry),  (int) filp->f_pos);
+
+	result = 0;
+
+	lock_kernel();
+
+	switch ((unsigned int) filp->f_pos) {
+	case 0:
+		if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
+			goto out;
+		filp->f_pos = 1;
+		/* fallthrough */
+	case 1:
+		if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR) < 0)
+			goto out;
+		filp->f_pos = 2;
+	}
+
+	/*
+	 * Make sure our inode is up-to-date.
+	 */
+	result = smb_revalidate_inode(dentry);
+	if (result)
+		goto out;
+
+
+	page = grab_cache_page(&dir->i_data, 0);
+	if (!page)
+		goto read_really;
+
+	ctl.cache = cache = kmap(page);
+	ctl.head  = cache->head;
+
+	if (!PageUptodate(page) || !ctl.head.eof) {
+		VERBOSE("%s/%s, page uptodate=%d, eof=%d\n",
+			 DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
+		goto init_cache;
+	}
+
+	if (filp->f_pos == 2) {
+		if (jiffies - ctl.head.time >= SMB_MAX_AGE(server))
+			goto init_cache;
+
+		/*
+		 * N.B. ncpfs checks mtime of dentry too here, we don't.
+		 *   1. common smb servers do not update mtime on dir changes
+		 *   2. it requires an extra smb request
+		 *      (revalidate has the same timeout as ctl.head.time)
+		 *
+		 * Instead smbfs invalidates its own cache on local changes
+		 * and remote changes are not seen until timeout.
+		 */
+	}
+
+	if (filp->f_pos > ctl.head.end)
+		goto finished;
+
+	ctl.fpos = filp->f_pos + (SMB_DIRCACHE_START - 2);
+	ctl.ofs  = ctl.fpos / SMB_DIRCACHE_SIZE;
+	ctl.idx  = ctl.fpos % SMB_DIRCACHE_SIZE;
+
+	for (;;) {
+		if (ctl.ofs != 0) {
+			ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
+			if (!ctl.page)
+				goto invalid_cache;
+			ctl.cache = kmap(ctl.page);
+			if (!PageUptodate(ctl.page))
+				goto invalid_cache;
+		}
+		while (ctl.idx < SMB_DIRCACHE_SIZE) {
+			struct dentry *dent;
+			int res;
+
+			dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx],
+					     dentry, filp->f_pos);
+			if (!dent)
+				goto invalid_cache;
+
+			res = filldir(dirent, dent->d_name.name,
+				      dent->d_name.len, filp->f_pos,
+				      dent->d_inode->i_ino, DT_UNKNOWN);
+			dput(dent);
+			if (res)
+				goto finished;
+			filp->f_pos += 1;
+			ctl.idx += 1;
+			if (filp->f_pos > ctl.head.end)
+				goto finished;
+		}
+		if (ctl.page) {
+			kunmap(ctl.page);
+			SetPageUptodate(ctl.page);
+			unlock_page(ctl.page);
+			page_cache_release(ctl.page);
+			ctl.page = NULL;
+		}
+		ctl.idx  = 0;
+		ctl.ofs += 1;
+	}
+invalid_cache:
+	if (ctl.page) {
+		kunmap(ctl.page);
+		unlock_page(ctl.page);
+		page_cache_release(ctl.page);
+		ctl.page = NULL;
+	}
+	ctl.cache = cache;
+init_cache:
+	smb_invalidate_dircache_entries(dentry);
+	ctl.head.time = jiffies;
+	ctl.head.eof = 0;
+	ctl.fpos = 2;
+	ctl.ofs = 0;
+	ctl.idx = SMB_DIRCACHE_START;
+	ctl.filled = 0;
+	ctl.valid  = 1;
+read_really:
+	result = server->ops->readdir(filp, dirent, filldir, &ctl);
+	if (ctl.idx == -1)
+		goto invalid_cache;	/* retry */
+	ctl.head.end = ctl.fpos - 1;
+	ctl.head.eof = ctl.valid;
+finished:
+	if (page) {
+		cache->head = ctl.head;
+		kunmap(page);
+		SetPageUptodate(page);
+		unlock_page(page);
+		page_cache_release(page);
+	}
+	if (ctl.page) {
+		kunmap(ctl.page);
+		SetPageUptodate(ctl.page);
+		unlock_page(ctl.page);
+		page_cache_release(ctl.page);
+	}
+out:
+	unlock_kernel();
+	return result;
+}
+
+static int
+smb_dir_open(struct inode *dir, struct file *file)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct smb_sb_info *server;
+	int error = 0;
+
+	VERBOSE("(%s/%s)\n", dentry->d_parent->d_name.name,
+		file->f_dentry->d_name.name);
+
+	/*
+	 * Directory timestamps in the core protocol aren't updated
+	 * when a file is added, so we give them a very short TTL.
+	 */
+	lock_kernel();
+	server = server_from_dentry(dentry);
+	if (server->opt.protocol < SMB_PROTOCOL_LANMAN2) {
+		unsigned long age = jiffies - SMB_I(dir)->oldmtime;
+		if (age > 2*HZ)
+			smb_invalid_dir_cache(dir);
+	}
+
+	/*
+	 * Note: in order to allow the smbmount process to open the
+	 * mount point, we only revalidate if the connection is valid or
+	 * if the process is trying to access something other than the root.
+	 */
+	if (server->state == CONN_VALID || !IS_ROOT(dentry))
+		error = smb_revalidate_inode(dentry);
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * Dentry operations routines
+ */
+static int smb_lookup_validate(struct dentry *, struct nameidata *);
+static int smb_hash_dentry(struct dentry *, struct qstr *);
+static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+static int smb_delete_dentry(struct dentry *);
+
+static struct dentry_operations smbfs_dentry_operations =
+{
+	.d_revalidate	= smb_lookup_validate,
+	.d_hash		= smb_hash_dentry,
+	.d_compare	= smb_compare_dentry,
+	.d_delete	= smb_delete_dentry,
+};
+
+static struct dentry_operations smbfs_dentry_operations_case =
+{
+	.d_revalidate	= smb_lookup_validate,
+	.d_delete	= smb_delete_dentry,
+};
+
+
+/*
+ * This is the callback when the dcache has a lookup hit.
+ */
+static int
+smb_lookup_validate(struct dentry * dentry, struct nameidata *nd)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	struct inode * inode = dentry->d_inode;
+	unsigned long age = jiffies - dentry->d_time;
+	int valid;
+
+	/*
+	 * The default validation is based on dentry age:
+	 * we believe in dentries for a few seconds.  (But each
+	 * successful server lookup renews the timestamp.)
+	 */
+	valid = (age <= SMB_MAX_AGE(server));
+#ifdef SMBFS_DEBUG_VERBOSE
+	if (!valid)
+		VERBOSE("%s/%s not valid, age=%lu\n", 
+			DENTRY_PATH(dentry), age);
+#endif
+
+	if (inode) {
+		lock_kernel();
+		if (is_bad_inode(inode)) {
+			PARANOIA("%s/%s has dud inode\n", DENTRY_PATH(dentry));
+			valid = 0;
+		} else if (!valid)
+			valid = (smb_revalidate_inode(dentry) == 0);
+		unlock_kernel();
+	} else {
+		/*
+		 * What should we do for negative dentries?
+		 */
+	}
+	return valid;
+}
+
+static int 
+smb_hash_dentry(struct dentry *dir, struct qstr *this)
+{
+	unsigned long hash;
+	int i;
+
+	hash = init_name_hash();
+	for (i=0; i < this->len ; i++)
+		hash = partial_name_hash(tolower(this->name[i]), hash);
+	this->hash = end_name_hash(hash);
+  
+	return 0;
+}
+
+static int
+smb_compare_dentry(struct dentry *dir, struct qstr *a, struct qstr *b)
+{
+	int i, result = 1;
+
+	if (a->len != b->len)
+		goto out;
+	for (i=0; i < a->len; i++) {
+		if (tolower(a->name[i]) != tolower(b->name[i]))
+			goto out;
+	}
+	result = 0;
+out:
+	return result;
+}
+
+/*
+ * This is the callback from dput() when d_count is going to 0.
+ * We use this to unhash dentries with bad inodes.
+ */
+static int
+smb_delete_dentry(struct dentry * dentry)
+{
+	if (dentry->d_inode) {
+		if (is_bad_inode(dentry->d_inode)) {
+			PARANOIA("bad inode, unhashing %s/%s\n",
+				 DENTRY_PATH(dentry));
+			return 1;
+		}
+	} else {
+		/* N.B. Unhash negative dentries? */
+	}
+	return 0;
+}
+
+/*
+ * Initialize a new dentry
+ */
+void
+smb_new_dentry(struct dentry *dentry)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+
+	if (server->mnt->flags & SMB_MOUNT_CASE)
+		dentry->d_op = &smbfs_dentry_operations_case;
+	else
+		dentry->d_op = &smbfs_dentry_operations;
+	dentry->d_time = jiffies;
+}
+
+
+/*
+ * Whenever a lookup succeeds, we know the parent directories
+ * are all valid, so we want to update the dentry timestamps.
+ * N.B. Move this to dcache?
+ */
+void
+smb_renew_times(struct dentry * dentry)
+{
+	dget(dentry);
+	spin_lock(&dentry->d_lock);
+	for (;;) {
+		struct dentry *parent;
+
+		dentry->d_time = jiffies;
+		if (IS_ROOT(dentry))
+			break;
+		parent = dentry->d_parent;
+		dget(parent);
+		spin_unlock(&dentry->d_lock);
+		dput(dentry);
+		dentry = parent;
+		spin_lock(&dentry->d_lock);
+	}
+	spin_unlock(&dentry->d_lock);
+	dput(dentry);
+}
+
+static struct dentry *
+smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct smb_fattr finfo;
+	struct inode *inode;
+	int error;
+	struct smb_sb_info *server;
+
+	error = -ENAMETOOLONG;
+	if (dentry->d_name.len > SMB_MAXNAMELEN)
+		goto out;
+
+	lock_kernel();
+	error = smb_proc_getattr(dentry, &finfo);
+#ifdef SMBFS_PARANOIA
+	if (error && error != -ENOENT)
+		PARANOIA("find %s/%s failed, error=%d\n",
+			 DENTRY_PATH(dentry), error);
+#endif
+
+	inode = NULL;
+	if (error == -ENOENT)
+		goto add_entry;
+	if (!error) {
+		error = -EACCES;
+		finfo.f_ino = iunique(dentry->d_sb, 2);
+		inode = smb_iget(dir->i_sb, &finfo);
+		if (inode) {
+	add_entry:
+			server = server_from_dentry(dentry);
+			if (server->mnt->flags & SMB_MOUNT_CASE)
+				dentry->d_op = &smbfs_dentry_operations_case;
+			else
+				dentry->d_op = &smbfs_dentry_operations;
+
+			d_add(dentry, inode);
+			smb_renew_times(dentry);
+			error = 0;
+		}
+	}
+	unlock_kernel();
+out:
+	return ERR_PTR(error);
+}
+
+/*
+ * This code is common to all routines creating a new inode.
+ */
+static int
+smb_instantiate(struct dentry *dentry, __u16 fileid, int have_id)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	struct inode *inode;
+	int error;
+	struct smb_fattr fattr;
+
+	VERBOSE("file %s/%s, fileid=%u\n", DENTRY_PATH(dentry), fileid);
+
+	error = smb_proc_getattr(dentry, &fattr);
+	if (error)
+		goto out_close;
+
+	smb_renew_times(dentry);
+	fattr.f_ino = iunique(dentry->d_sb, 2);
+	inode = smb_iget(dentry->d_sb, &fattr);
+	if (!inode)
+		goto out_no_inode;
+
+	if (have_id) {
+		struct smb_inode_info *ei = SMB_I(inode);
+		ei->fileid = fileid;
+		ei->access = SMB_O_RDWR;
+		ei->open = server->generation;
+	}
+	d_instantiate(dentry, inode);
+out:
+	return error;
+
+out_no_inode:
+	error = -EACCES;
+out_close:
+	if (have_id) {
+		PARANOIA("%s/%s failed, error=%d, closing %u\n",
+			 DENTRY_PATH(dentry), error, fileid);
+		smb_close_fileid(dentry, fileid);
+	}
+	goto out;
+}
+
+/* N.B. How should the mode argument be used? */
+static int
+smb_create(struct inode *dir, struct dentry *dentry, int mode,
+		struct nameidata *nd)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	__u16 fileid;
+	int error;
+	struct iattr attr;
+
+	VERBOSE("creating %s/%s, mode=%d\n", DENTRY_PATH(dentry), mode);
+
+	lock_kernel();
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_create(dentry, 0, get_seconds(), &fileid);
+	if (!error) {
+		if (server->opt.capabilities & SMB_CAP_UNIX) {
+			/* Set attributes for new file */
+			attr.ia_valid = ATTR_MODE;
+			attr.ia_mode = mode;
+			error = smb_proc_setattr_unix(dentry, &attr, 0, 0);
+		}
+		error = smb_instantiate(dentry, fileid, 1);
+	} else {
+		PARANOIA("%s/%s failed, error=%d\n",
+			 DENTRY_PATH(dentry), error);
+	}
+	unlock_kernel();
+	return error;
+}
+
+/* N.B. How should the mode argument be used? */
+static int
+smb_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	int error;
+	struct iattr attr;
+
+	lock_kernel();
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_mkdir(dentry);
+	if (!error) {
+		if (server->opt.capabilities & SMB_CAP_UNIX) {
+			/* Set attributes for new directory */
+			attr.ia_valid = ATTR_MODE;
+			attr.ia_mode = mode;
+			error = smb_proc_setattr_unix(dentry, &attr, 0, 0);
+		}
+		error = smb_instantiate(dentry, 0, 0);
+	}
+	unlock_kernel();
+	return error;
+}
+
+static int
+smb_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	/*
+	 * Close the directory if it's open.
+	 */
+	lock_kernel();
+	smb_close(inode);
+
+	/*
+	 * Check that nobody else is using the directory..
+	 */
+	error = -EBUSY;
+	if (!d_unhashed(dentry))
+		goto out;
+
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_rmdir(dentry);
+
+out:
+	unlock_kernel();
+	return error;
+}
+
+static int
+smb_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int error;
+
+	/*
+	 * Close the file if it's open.
+	 */
+	lock_kernel();
+	smb_close(dentry->d_inode);
+
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_unlink(dentry);
+	if (!error)
+		smb_renew_times(dentry);
+	unlock_kernel();
+	return error;
+}
+
+static int
+smb_rename(struct inode *old_dir, struct dentry *old_dentry,
+	   struct inode *new_dir, struct dentry *new_dentry)
+{
+	int error;
+
+	/*
+	 * Close any open files, and check whether to delete the
+	 * target before attempting the rename.
+	 */
+	lock_kernel();
+	if (old_dentry->d_inode)
+		smb_close(old_dentry->d_inode);
+	if (new_dentry->d_inode) {
+		smb_close(new_dentry->d_inode);
+		error = smb_proc_unlink(new_dentry);
+		if (error) {
+			VERBOSE("unlink %s/%s, error=%d\n",
+				DENTRY_PATH(new_dentry), error);
+			goto out;
+		}
+		/* FIXME */
+		d_delete(new_dentry);
+	}
+
+	smb_invalid_dir_cache(old_dir);
+	smb_invalid_dir_cache(new_dir);
+	error = smb_proc_mv(old_dentry, new_dentry);
+	if (!error) {
+		smb_renew_times(old_dentry);
+		smb_renew_times(new_dentry);
+	}
+out:
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * FIXME: samba servers won't let you create device nodes unless uid/gid
+ * matches the connection credentials (and we don't know which those are ...)
+ */
+static int
+smb_make_node(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+	int error;
+	struct iattr attr;
+
+	attr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID;
+	attr.ia_mode = mode;
+	attr.ia_uid = current->euid;
+	attr.ia_gid = current->egid;
+
+	if (!new_valid_dev(dev))
+		return -EINVAL;
+
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_setattr_unix(dentry, &attr, MAJOR(dev), MINOR(dev));
+	if (!error) {
+		error = smb_instantiate(dentry, 0, 0);
+	}
+	return error;
+}
+
+/*
+ * dentry = existing file
+ * new_dentry = new file
+ */
+static int
+smb_link(struct dentry *dentry, struct inode *dir, struct dentry *new_dentry)
+{
+	int error;
+
+	DEBUG1("smb_link old=%s/%s new=%s/%s\n",
+	       DENTRY_PATH(dentry), DENTRY_PATH(new_dentry));
+	smb_invalid_dir_cache(dir);
+	error = smb_proc_link(server_from_dentry(dentry), dentry, new_dentry);
+	if (!error) {
+		smb_renew_times(dentry);
+		error = smb_instantiate(new_dentry, 0, 0);
+	}
+	return error;
+}
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
new file mode 100644
index 0000000..b4fcfa8
--- /dev/null
+++ b/fs/smbfs/file.c
@@ -0,0 +1,423 @@
+/*
+ *  file.c
+ *
+ *  Copyright (C) 1995, 1996, 1997 by Paal-Kr. Engstad and Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/net.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/smbno.h>
+#include <linux/smb_fs.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+
+static int
+smb_fsync(struct file *file, struct dentry * dentry, int datasync)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	int result;
+
+	VERBOSE("sync file %s/%s\n", DENTRY_PATH(dentry));
+
+	/*
+	 * The VFS will writepage() all dirty pages for us, but we
+	 * should send a SMBflush to the server, letting it know that
+	 * we want things synchronized with actual storage.
+	 *
+	 * Note: this function requires all pages to have been written already
+	 *       (should be ok with writepage_sync)
+	 */
+	result = smb_proc_flush(server, SMB_I(dentry->d_inode)->fileid);
+	return result;
+}
+
+/*
+ * Read a page synchronously.
+ */
+static int
+smb_readpage_sync(struct dentry *dentry, struct page *page)
+{
+	char *buffer = kmap(page);
+	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	unsigned int rsize = smb_get_rsize(server);
+	int count = PAGE_SIZE;
+	int result;
+
+	VERBOSE("file %s/%s, count=%d@%Ld, rsize=%d\n",
+		DENTRY_PATH(dentry), count, offset, rsize);
+
+	result = smb_open(dentry, SMB_O_RDONLY);
+	if (result < 0)
+		goto io_error;
+
+	do {
+		if (count < rsize)
+			rsize = count;
+
+		result = server->ops->read(dentry->d_inode,offset,rsize,buffer);
+		if (result < 0)
+			goto io_error;
+
+		count -= result;
+		offset += result;
+		buffer += result;
+		dentry->d_inode->i_atime =
+			current_fs_time(dentry->d_inode->i_sb);
+		if (result < rsize)
+			break;
+	} while (count);
+
+	memset(buffer, 0, count);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	result = 0;
+
+io_error:
+	kunmap(page);
+	unlock_page(page);
+	return result;
+}
+
+/*
+ * We are called with the page locked and we unlock it when done.
+ */
+static int
+smb_readpage(struct file *file, struct page *page)
+{
+	int		error;
+	struct dentry  *dentry = file->f_dentry;
+
+	page_cache_get(page);
+	error = smb_readpage_sync(dentry, page);
+	page_cache_release(page);
+	return error;
+}
+
+/*
+ * Write a page synchronously.
+ * Offset is the data offset within the page.
+ */
+static int
+smb_writepage_sync(struct inode *inode, struct page *page,
+		   unsigned long pageoffset, unsigned int count)
+{
+	loff_t offset;
+	char *buffer = kmap(page) + pageoffset;
+	struct smb_sb_info *server = server_from_inode(inode);
+	unsigned int wsize = smb_get_wsize(server);
+	int ret = 0;
+
+	offset = ((loff_t)page->index << PAGE_CACHE_SHIFT) + pageoffset;
+	VERBOSE("file ino=%ld, fileid=%d, count=%d@%Ld, wsize=%d\n",
+		inode->i_ino, SMB_I(inode)->fileid, count, offset, wsize);
+
+	do {
+		int write_ret;
+
+		if (count < wsize)
+			wsize = count;
+
+		write_ret = server->ops->write(inode, offset, wsize, buffer);
+		if (write_ret < 0) {
+			PARANOIA("failed write, wsize=%d, write_ret=%d\n",
+				 wsize, write_ret);
+			ret = write_ret;
+			break;
+		}
+		/* N.B. what if result < wsize?? */
+#ifdef SMBFS_PARANOIA
+		if (write_ret < wsize)
+			PARANOIA("short write, wsize=%d, write_ret=%d\n",
+				 wsize, write_ret);
+#endif
+		buffer += wsize;
+		offset += wsize;
+		count -= wsize;
+		/*
+		 * Update the inode now rather than waiting for a refresh.
+		 */
+		inode->i_mtime = inode->i_atime = current_fs_time(inode->i_sb);
+		SMB_I(inode)->flags |= SMB_F_LOCALWRITE;
+		if (offset > inode->i_size)
+			inode->i_size = offset;
+	} while (count);
+
+	kunmap(page);
+	return ret;
+}
+
+/*
+ * Write a page to the server. This will be used for NFS swapping only
+ * (for now), and we currently do this synchronously only.
+ *
+ * We are called with the page locked and we unlock it when done.
+ */
+static int
+smb_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode;
+	unsigned long end_index;
+	unsigned offset = PAGE_CACHE_SIZE;
+	int err;
+
+	if (!mapping)
+		BUG();
+	inode = mapping->host;
+	if (!inode)
+		BUG();
+
+	end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+	/* easy case */
+	if (page->index < end_index)
+		goto do_it;
+	/* things got complicated... */
+	offset = inode->i_size & (PAGE_CACHE_SIZE-1);
+	/* OK, are we completely out? */
+	if (page->index >= end_index+1 || !offset)
+		return 0; /* truncated - don't care */
+do_it:
+	page_cache_get(page);
+	err = smb_writepage_sync(inode, page, 0, offset);
+	SetPageUptodate(page);
+	unlock_page(page);
+	page_cache_release(page);
+	return err;
+}
+
+static int
+smb_updatepage(struct file *file, struct page *page, unsigned long offset,
+	       unsigned int count)
+{
+	struct dentry *dentry = file->f_dentry;
+
+	DEBUG1("(%s/%s %d@%ld)\n", DENTRY_PATH(dentry), 
+	       count, (page->index << PAGE_CACHE_SHIFT)+offset);
+
+	return smb_writepage_sync(dentry->d_inode, page, offset, count);
+}
+
+static ssize_t
+smb_file_read(struct file * file, char __user * buf, size_t count, loff_t *ppos)
+{
+	struct dentry * dentry = file->f_dentry;
+	ssize_t	status;
+
+	VERBOSE("file %s/%s, count=%lu@%lu\n", DENTRY_PATH(dentry),
+		(unsigned long) count, (unsigned long) *ppos);
+
+	status = smb_revalidate_inode(dentry);
+	if (status) {
+		PARANOIA("%s/%s validation failed, error=%Zd\n",
+			 DENTRY_PATH(dentry), status);
+		goto out;
+	}
+
+	VERBOSE("before read, size=%ld, flags=%x, atime=%ld\n",
+		(long)dentry->d_inode->i_size,
+		dentry->d_inode->i_flags, dentry->d_inode->i_atime);
+
+	status = generic_file_read(file, buf, count, ppos);
+out:
+	return status;
+}
+
+static int
+smb_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	struct dentry * dentry = file->f_dentry;
+	int	status;
+
+	VERBOSE("file %s/%s, address %lu - %lu\n",
+		DENTRY_PATH(dentry), vma->vm_start, vma->vm_end);
+
+	status = smb_revalidate_inode(dentry);
+	if (status) {
+		PARANOIA("%s/%s validation failed, error=%d\n",
+			 DENTRY_PATH(dentry), status);
+		goto out;
+	}
+	status = generic_file_mmap(file, vma);
+out:
+	return status;
+}
+
+static ssize_t
+smb_file_sendfile(struct file *file, loff_t *ppos,
+		  size_t count, read_actor_t actor, void *target)
+{
+	struct dentry *dentry = file->f_dentry;
+	ssize_t status;
+
+	VERBOSE("file %s/%s, pos=%Ld, count=%d\n",
+		DENTRY_PATH(dentry), *ppos, count);
+
+	status = smb_revalidate_inode(dentry);
+	if (status) {
+		PARANOIA("%s/%s validation failed, error=%Zd\n",
+			 DENTRY_PATH(dentry), status);
+		goto out;
+	}
+	status = generic_file_sendfile(file, ppos, count, actor, target);
+out:
+	return status;
+}
+
+/*
+ * This does the "real" work of the write. The generic routine has
+ * allocated the page, locked it, done all the page alignment stuff
+ * calculations etc. Now we should just copy the data from user
+ * space and write it back to the real medium..
+ *
+ * If the writer ends up delaying the write, the writer needs to
+ * increment the page use counts until he is done with the page.
+ */
+static int smb_prepare_write(struct file *file, struct page *page, 
+			     unsigned offset, unsigned to)
+{
+	return 0;
+}
+
+static int smb_commit_write(struct file *file, struct page *page,
+			    unsigned offset, unsigned to)
+{
+	int status;
+
+	status = -EFAULT;
+	lock_kernel();
+	status = smb_updatepage(file, page, offset, to-offset);
+	unlock_kernel();
+	return status;
+}
+
+struct address_space_operations smb_file_aops = {
+	.readpage = smb_readpage,
+	.writepage = smb_writepage,
+	.prepare_write = smb_prepare_write,
+	.commit_write = smb_commit_write
+};
+
+/* 
+ * Write to a file (through the page cache).
+ */
+static ssize_t
+smb_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct dentry * dentry = file->f_dentry;
+	ssize_t	result;
+
+	VERBOSE("file %s/%s, count=%lu@%lu\n",
+		DENTRY_PATH(dentry),
+		(unsigned long) count, (unsigned long) *ppos);
+
+	result = smb_revalidate_inode(dentry);
+	if (result) {
+		PARANOIA("%s/%s validation failed, error=%Zd\n",
+			 DENTRY_PATH(dentry), result);
+		goto out;
+	}
+
+	result = smb_open(dentry, SMB_O_WRONLY);
+	if (result)
+		goto out;
+
+	if (count > 0) {
+		result = generic_file_write(file, buf, count, ppos);
+		VERBOSE("pos=%ld, size=%ld, mtime=%ld, atime=%ld\n",
+			(long) file->f_pos, (long) dentry->d_inode->i_size,
+			dentry->d_inode->i_mtime, dentry->d_inode->i_atime);
+	}
+out:
+	return result;
+}
+
+static int
+smb_file_open(struct inode *inode, struct file * file)
+{
+	int result;
+	struct dentry *dentry = file->f_dentry;
+	int smb_mode = (file->f_mode & O_ACCMODE) - 1;
+
+	lock_kernel();
+	result = smb_open(dentry, smb_mode);
+	if (result)
+		goto out;
+	SMB_I(inode)->openers++;
+out:
+	unlock_kernel();
+	return result;
+}
+
+static int
+smb_file_release(struct inode *inode, struct file * file)
+{
+	lock_kernel();
+	if (!--SMB_I(inode)->openers) {
+		/* We must flush any dirty pages now as we won't be able to
+		   write anything after close. mmap can trigger this.
+		   "openers" should perhaps include mmap'ers ... */
+		filemap_fdatawrite(inode->i_mapping);
+		filemap_fdatawait(inode->i_mapping);
+		smb_close(inode);
+	}
+	unlock_kernel();
+	return 0;
+}
+
+/*
+ * Check whether the required access is compatible with
+ * an inode's permission. SMB doesn't recognize superuser
+ * privileges, so we need our own check for this.
+ */
+static int
+smb_file_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	int mode = inode->i_mode;
+	int error = 0;
+
+	VERBOSE("mode=%x, mask=%x\n", mode, mask);
+
+	/* Look at user permissions */
+	mode >>= 6;
+	if ((mode & 7 & mask) != mask)
+		error = -EACCES;
+	return error;
+}
+
+struct file_operations smb_file_operations =
+{
+	.llseek		= remote_llseek,
+	.read		= smb_file_read,
+	.write		= smb_file_write,
+	.ioctl		= smb_ioctl,
+	.mmap		= smb_file_mmap,
+	.open		= smb_file_open,
+	.release	= smb_file_release,
+	.fsync		= smb_fsync,
+	.sendfile	= smb_file_sendfile,
+};
+
+struct inode_operations smb_file_inode_operations =
+{
+	.permission	= smb_file_permission,
+	.getattr	= smb_getattr,
+	.setattr	= smb_notify_change,
+};
diff --git a/fs/smbfs/getopt.c b/fs/smbfs/getopt.c
new file mode 100644
index 0000000..7ae0f52
--- /dev/null
+++ b/fs/smbfs/getopt.c
@@ -0,0 +1,64 @@
+/*
+ * getopt.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/net.h>
+
+#include "getopt.h"
+
+/**
+ *	smb_getopt - option parser
+ *	@caller: name of the caller, for error messages
+ *	@options: the options string
+ *	@opts: an array of &struct option entries controlling parser operations
+ *	@optopt: output; will contain the current option
+ *	@optarg: output; will contain the value (if one exists)
+ *	@flag: output; may be NULL; should point to a long for or'ing flags
+ *	@value: output; may be NULL; will be overwritten with the integer value
+ *		of the current argument.
+ *
+ *	Helper to parse options on the format used by mount ("a=b,c=d,e,f").
+ *	Returns opts->val if a matching entry in the 'opts' array is found,
+ *	0 when no more tokens are found, -1 if an error is encountered.
+ */
+int smb_getopt(char *caller, char **options, struct option *opts,
+	       char **optopt, char **optarg, unsigned long *flag,
+	       unsigned long *value)
+{
+	char *token;
+	char *val;
+	int i;
+
+	do {
+		if ((token = strsep(options, ",")) == NULL)
+			return 0;
+	} while (*token == '\0');
+	*optopt = token;
+
+	*optarg = NULL;
+	if ((val = strchr (token, '=')) != NULL) {
+		*val++ = 0;
+		if (value)
+			*value = simple_strtoul(val, NULL, 0);
+		*optarg = val;
+	}
+
+	for (i = 0; opts[i].name != NULL; i++) {
+		if (!strcmp(opts[i].name, token)) {
+			if (!opts[i].flag && (!val || !*val)) {
+				printk("%s: the %s option requires an argument\n",
+				       caller, token);
+				return -1;
+			}
+
+			if (flag && opts[i].flag)
+				*flag |= opts[i].flag;
+
+			return opts[i].val;
+		}
+	}
+	printk("%s: Unrecognized mount option %s\n", caller, token);
+	return -1;
+}
diff --git a/fs/smbfs/getopt.h b/fs/smbfs/getopt.h
new file mode 100644
index 0000000..146219a
--- /dev/null
+++ b/fs/smbfs/getopt.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_GETOPT_H
+#define _LINUX_GETOPT_H
+
+struct option {
+	const char *name;
+	unsigned long flag;
+	int val;
+};
+
+extern int smb_getopt(char *caller, char **options, struct option *opts,
+		      char **optopt, char **optarg, unsigned long *flag,
+		      unsigned long *value);
+
+#endif /* _LINUX_GETOPT_H */
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
new file mode 100644
index 0000000..4765aaa
--- /dev/null
+++ b/fs/smbfs/inode.c
@@ -0,0 +1,849 @@
+/*
+ *  inode.c
+ *
+ *  Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/dcache.h>
+#include <linux/smp_lock.h>
+#include <linux/nls.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/net.h>
+#include <linux/vfs.h>
+#include <linux/highuid.h>
+#include <linux/smb_fs.h>
+#include <linux/smbno.h>
+#include <linux/smb_mount.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "smb_debug.h"
+#include "getopt.h"
+#include "proto.h"
+
+/* Always pick a default string */
+#ifdef CONFIG_SMB_NLS_REMOTE
+#define SMB_NLS_REMOTE CONFIG_SMB_NLS_REMOTE
+#else
+#define SMB_NLS_REMOTE ""
+#endif
+
+#define SMB_TTL_DEFAULT 1000
+
+static void smb_delete_inode(struct inode *);
+static void smb_put_super(struct super_block *);
+static int  smb_statfs(struct super_block *, struct kstatfs *);
+static int  smb_show_options(struct seq_file *, struct vfsmount *);
+
+static kmem_cache_t *smb_inode_cachep;
+
+static struct inode *smb_alloc_inode(struct super_block *sb)
+{
+	struct smb_inode_info *ei;
+	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void smb_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(smb_inode_cachep, SMB_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct smb_inode_info *ei = (struct smb_inode_info *) foo;
+	unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
+
+	if ((flags & flagmask) == SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	smb_inode_cachep = kmem_cache_create("smb_inode_cache",
+					     sizeof(struct smb_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (smb_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(smb_inode_cachep))
+		printk(KERN_INFO "smb_inode_cache: not all structures were freed\n");
+}
+
+static int smb_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_NODIRATIME;
+	return 0;
+}
+
+static struct super_operations smb_sops =
+{
+	.alloc_inode	= smb_alloc_inode,
+	.destroy_inode	= smb_destroy_inode,
+	.drop_inode	= generic_delete_inode,
+	.delete_inode	= smb_delete_inode,
+	.put_super	= smb_put_super,
+	.statfs		= smb_statfs,
+	.show_options	= smb_show_options,
+	.remount_fs	= smb_remount,
+};
+
+
+/* We are always generating a new inode here */
+struct inode *
+smb_iget(struct super_block *sb, struct smb_fattr *fattr)
+{
+	struct smb_sb_info *server = SMB_SB(sb);
+	struct inode *result;
+
+	DEBUG1("smb_iget: %p\n", fattr);
+
+	result = new_inode(sb);
+	if (!result)
+		return result;
+	result->i_ino = fattr->f_ino;
+	SMB_I(result)->open = 0;
+	SMB_I(result)->fileid = 0;
+	SMB_I(result)->access = 0;
+	SMB_I(result)->flags = 0;
+	SMB_I(result)->closed = 0;
+	SMB_I(result)->openers = 0;
+	smb_set_inode_attr(result, fattr);
+	if (S_ISREG(result->i_mode)) {
+		result->i_op = &smb_file_inode_operations;
+		result->i_fop = &smb_file_operations;
+		result->i_data.a_ops = &smb_file_aops;
+	} else if (S_ISDIR(result->i_mode)) {
+		if (server->opt.capabilities & SMB_CAP_UNIX)
+			result->i_op = &smb_dir_inode_operations_unix;
+		else
+			result->i_op = &smb_dir_inode_operations;
+		result->i_fop = &smb_dir_operations;
+	} else if (S_ISLNK(result->i_mode)) {
+		result->i_op = &smb_link_inode_operations;
+	} else {
+		init_special_inode(result, result->i_mode, fattr->f_rdev);
+	}
+	insert_inode_hash(result);
+	return result;
+}
+
+/*
+ * Copy the inode data to a smb_fattr structure.
+ */
+void
+smb_get_inode_attr(struct inode *inode, struct smb_fattr *fattr)
+{
+	memset(fattr, 0, sizeof(struct smb_fattr));
+	fattr->f_mode	= inode->i_mode;
+	fattr->f_nlink	= inode->i_nlink;
+	fattr->f_ino	= inode->i_ino;
+	fattr->f_uid	= inode->i_uid;
+	fattr->f_gid	= inode->i_gid;
+	fattr->f_size	= inode->i_size;
+	fattr->f_mtime	= inode->i_mtime;
+	fattr->f_ctime	= inode->i_ctime;
+	fattr->f_atime	= inode->i_atime;
+	fattr->f_blksize= inode->i_blksize;
+	fattr->f_blocks	= inode->i_blocks;
+
+	fattr->attr	= SMB_I(inode)->attr;
+	/*
+	 * Keep the attributes in sync with the inode permissions.
+	 */
+	if (fattr->f_mode & S_IWUSR)
+		fattr->attr &= ~aRONLY;
+	else
+		fattr->attr |= aRONLY;
+}
+
+/*
+ * Update the inode, possibly causing it to invalidate its pages if mtime/size
+ * is different from last time.
+ */
+void
+smb_set_inode_attr(struct inode *inode, struct smb_fattr *fattr)
+{
+	struct smb_inode_info *ei = SMB_I(inode);
+
+	/*
+	 * A size change should have a different mtime, or same mtime
+	 * but different size.
+	 */
+	time_t last_time = inode->i_mtime.tv_sec;
+	loff_t last_sz = inode->i_size;
+
+	inode->i_mode	= fattr->f_mode;
+	inode->i_nlink	= fattr->f_nlink;
+	inode->i_uid	= fattr->f_uid;
+	inode->i_gid	= fattr->f_gid;
+	inode->i_ctime	= fattr->f_ctime;
+	inode->i_blksize= fattr->f_blksize;
+	inode->i_blocks = fattr->f_blocks;
+	inode->i_size	= fattr->f_size;
+	inode->i_mtime	= fattr->f_mtime;
+	inode->i_atime	= fattr->f_atime;
+	ei->attr = fattr->attr;
+
+	/*
+	 * Update the "last time refreshed" field for revalidation.
+	 */
+	ei->oldmtime = jiffies;
+
+	if (inode->i_mtime.tv_sec != last_time || inode->i_size != last_sz) {
+		VERBOSE("%ld changed, old=%ld, new=%ld, oz=%ld, nz=%ld\n",
+			inode->i_ino,
+			(long) last_time, (long) inode->i_mtime,
+			(long) last_sz, (long) inode->i_size);
+
+		if (!S_ISDIR(inode->i_mode))
+			invalidate_remote_inode(inode);
+	}
+}
+
+/*
+ * This is called if the connection has gone bad ...
+ * try to kill off all the current inodes.
+ */
+void
+smb_invalidate_inodes(struct smb_sb_info *server)
+{
+	VERBOSE("\n");
+	shrink_dcache_sb(SB_of(server));
+	invalidate_inodes(SB_of(server));
+}
+
+/*
+ * This is called to update the inode attributes after
+ * we've made changes to a file or directory.
+ */
+static int
+smb_refresh_inode(struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+	struct smb_fattr fattr;
+
+	error = smb_proc_getattr(dentry, &fattr);
+	if (!error) {
+		smb_renew_times(dentry);
+		/*
+		 * Check whether the type part of the mode changed,
+		 * and don't update the attributes if it did.
+		 *
+		 * And don't dick with the root inode
+		 */
+		if (inode->i_ino == 2)
+			return error;
+		if (S_ISLNK(inode->i_mode))
+			return error;	/* VFS will deal with it */
+
+		if ((inode->i_mode & S_IFMT) == (fattr.f_mode & S_IFMT)) {
+			smb_set_inode_attr(inode, &fattr);
+		} else {
+			/*
+			 * Big trouble! The inode has become a new object,
+			 * so any operations attempted on it are invalid.
+			 *
+			 * To limit damage, mark the inode as bad so that
+			 * subsequent lookup validations will fail.
+			 */
+			PARANOIA("%s/%s changed mode, %07o to %07o\n",
+				 DENTRY_PATH(dentry),
+				 inode->i_mode, fattr.f_mode);
+
+			fattr.f_mode = inode->i_mode; /* save mode */
+			make_bad_inode(inode);
+			inode->i_mode = fattr.f_mode; /* restore mode */
+			/*
+			 * No need to worry about unhashing the dentry: the
+			 * lookup validation will see that the inode is bad.
+			 * But we do want to invalidate the caches ...
+			 */
+			if (!S_ISDIR(inode->i_mode))
+				invalidate_remote_inode(inode);
+			else
+				smb_invalid_dir_cache(inode);
+			error = -EIO;
+		}
+	}
+	return error;
+}
+
+/*
+ * This is called when we want to check whether the inode
+ * has changed on the server.  If it has changed, we must
+ * invalidate our local caches.
+ */
+int
+smb_revalidate_inode(struct dentry *dentry)
+{
+	struct smb_sb_info *s = server_from_dentry(dentry);
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+
+	DEBUG1("smb_revalidate_inode\n");
+	lock_kernel();
+
+	/*
+	 * Check whether we've recently refreshed the inode.
+	 */
+	if (time_before(jiffies, SMB_I(inode)->oldmtime + SMB_MAX_AGE(s))) {
+		VERBOSE("up-to-date, ino=%ld, jiffies=%lu, oldtime=%lu\n",
+			inode->i_ino, jiffies, SMB_I(inode)->oldmtime);
+		goto out;
+	}
+
+	error = smb_refresh_inode(dentry);
+out:
+	unlock_kernel();
+	return error;
+}
+
+/*
+ * This routine is called when i_nlink == 0 and i_count goes to 0.
+ * All blocking cleanup operations need to go here to avoid races.
+ */
+static void
+smb_delete_inode(struct inode *ino)
+{
+	DEBUG1("ino=%ld\n", ino->i_ino);
+	lock_kernel();
+	if (smb_close(ino))
+		PARANOIA("could not close inode %ld\n", ino->i_ino);
+	unlock_kernel();
+	clear_inode(ino);
+}
+
+static struct option opts[] = {
+	{ "version",	0, 'v' },
+	{ "win95",	SMB_MOUNT_WIN95, 1 },
+	{ "oldattr",	SMB_MOUNT_OLDATTR, 1 },
+	{ "dirattr",	SMB_MOUNT_DIRATTR, 1 },
+	{ "case",	SMB_MOUNT_CASE, 1 },
+	{ "uid",	0, 'u' },
+	{ "gid",	0, 'g' },
+	{ "file_mode",	0, 'f' },
+	{ "dir_mode",	0, 'd' },
+	{ "iocharset",	0, 'i' },
+	{ "codepage",	0, 'c' },
+	{ "ttl",	0, 't' },
+	{ NULL,		0, 0}
+};
+
+static int
+parse_options(struct smb_mount_data_kernel *mnt, char *options)
+{
+	int c;
+	unsigned long flags;
+	unsigned long value;
+	char *optarg;
+	char *optopt;
+
+	flags = 0;
+	while ( (c = smb_getopt("smbfs", &options, opts,
+				&optopt, &optarg, &flags, &value)) > 0) {
+
+		VERBOSE("'%s' -> '%s'\n", optopt, optarg ? optarg : "<none>");
+		switch (c) {
+		case 1:
+			/* got a "flag" option */
+			break;
+		case 'v':
+			if (value != SMB_MOUNT_VERSION) {
+			printk ("smbfs: Bad mount version %ld, expected %d\n",
+				value, SMB_MOUNT_VERSION);
+				return 0;
+			}
+			mnt->version = value;
+			break;
+		case 'u':
+			mnt->uid = value;
+			flags |= SMB_MOUNT_UID;
+			break;
+		case 'g':
+			mnt->gid = value;
+			flags |= SMB_MOUNT_GID;
+			break;
+		case 'f':
+			mnt->file_mode = (value & S_IRWXUGO) | S_IFREG;
+			flags |= SMB_MOUNT_FMODE;
+			break;
+		case 'd':
+			mnt->dir_mode = (value & S_IRWXUGO) | S_IFDIR;
+			flags |= SMB_MOUNT_DMODE;
+			break;
+		case 'i':
+			strlcpy(mnt->codepage.local_name, optarg, 
+				SMB_NLS_MAXNAMELEN);
+			break;
+		case 'c':
+			strlcpy(mnt->codepage.remote_name, optarg,
+				SMB_NLS_MAXNAMELEN);
+			break;
+		case 't':
+			mnt->ttl = value;
+			break;
+		default:
+			printk ("smbfs: Unrecognized mount option %s\n",
+				optopt);
+			return -1;
+		}
+	}
+	mnt->flags = flags;
+	return c;
+}
+
+/*
+ * smb_show_options() is for displaying mount options in /proc/mounts.
+ * It tries to avoid showing settings that were not changed from their
+ * defaults.
+ */
+static int
+smb_show_options(struct seq_file *s, struct vfsmount *m)
+{
+	struct smb_mount_data_kernel *mnt = SMB_SB(m->mnt_sb)->mnt;
+	int i;
+
+	for (i = 0; opts[i].name != NULL; i++)
+		if (mnt->flags & opts[i].flag)
+			seq_printf(s, ",%s", opts[i].name);
+
+	if (mnt->flags & SMB_MOUNT_UID)
+		seq_printf(s, ",uid=%d", mnt->uid);
+	if (mnt->flags & SMB_MOUNT_GID)
+		seq_printf(s, ",gid=%d", mnt->gid);
+	if (mnt->mounted_uid != 0)
+		seq_printf(s, ",mounted_uid=%d", mnt->mounted_uid);
+
+	/* 
+	 * Defaults for file_mode and dir_mode are unknown to us; they
+	 * depend on the current umask of the user doing the mount.
+	 */
+	if (mnt->flags & SMB_MOUNT_FMODE)
+		seq_printf(s, ",file_mode=%04o", mnt->file_mode & S_IRWXUGO);
+	if (mnt->flags & SMB_MOUNT_DMODE)
+		seq_printf(s, ",dir_mode=%04o", mnt->dir_mode & S_IRWXUGO);
+
+	if (strcmp(mnt->codepage.local_name, CONFIG_NLS_DEFAULT))
+		seq_printf(s, ",iocharset=%s", mnt->codepage.local_name);
+	if (strcmp(mnt->codepage.remote_name, SMB_NLS_REMOTE))
+		seq_printf(s, ",codepage=%s", mnt->codepage.remote_name);
+
+	if (mnt->ttl != SMB_TTL_DEFAULT)
+		seq_printf(s, ",ttl=%d", mnt->ttl);
+
+	return 0;
+}
+
+static void
+smb_unload_nls(struct smb_sb_info *server)
+{
+	if (server->remote_nls) {
+		unload_nls(server->remote_nls);
+		server->remote_nls = NULL;
+	}
+	if (server->local_nls) {
+		unload_nls(server->local_nls);
+		server->local_nls = NULL;
+	}
+}
+
+static void
+smb_put_super(struct super_block *sb)
+{
+	struct smb_sb_info *server = SMB_SB(sb);
+
+	smb_lock_server(server);
+	server->state = CONN_INVALID;
+	smbiod_unregister_server(server);
+
+	smb_close_socket(server);
+
+	if (server->conn_pid)
+		kill_proc(server->conn_pid, SIGTERM, 1);
+
+	smb_kfree(server->ops);
+	smb_unload_nls(server);
+	sb->s_fs_info = NULL;
+	smb_unlock_server(server);
+	smb_kfree(server);
+}
+
+static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
+{
+	struct smb_sb_info *server;
+	struct smb_mount_data_kernel *mnt;
+	struct smb_mount_data *oldmnt;
+	struct inode *root_inode;
+	struct smb_fattr root;
+	int ver;
+	void *mem;
+
+	if (!raw_data)
+		goto out_no_data;
+
+	oldmnt = (struct smb_mount_data *) raw_data;
+	ver = oldmnt->version;
+	if (ver != SMB_MOUNT_OLDVERSION && cpu_to_be32(ver) != SMB_MOUNT_ASCII)
+		goto out_wrong_data;
+
+	sb->s_flags |= MS_NODIRATIME;
+	sb->s_blocksize = 1024;	/* Eh...  Is this correct? */
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = SMB_SUPER_MAGIC;
+	sb->s_op = &smb_sops;
+	sb->s_time_gran = 100;
+
+	server = smb_kmalloc(sizeof(struct smb_sb_info), GFP_KERNEL);
+	if (!server)
+		goto out_no_server;
+	sb->s_fs_info = server;
+	memset(server, 0, sizeof(struct smb_sb_info));
+
+	server->super_block = sb;
+	server->mnt = NULL;
+	server->sock_file = NULL;
+	init_waitqueue_head(&server->conn_wq);
+	init_MUTEX(&server->sem);
+	INIT_LIST_HEAD(&server->entry);
+	INIT_LIST_HEAD(&server->xmitq);
+	INIT_LIST_HEAD(&server->recvq);
+	server->conn_error = 0;
+	server->conn_pid = 0;
+	server->state = CONN_INVALID; /* no connection yet */
+	server->generation = 0;
+
+	/* Allocate the global temp buffer and some superblock helper structs */
+	/* FIXME: move these to the smb_sb_info struct */
+	VERBOSE("alloc chunk = %d\n", sizeof(struct smb_ops) +
+		sizeof(struct smb_mount_data_kernel));
+	mem = smb_kmalloc(sizeof(struct smb_ops) +
+			  sizeof(struct smb_mount_data_kernel), GFP_KERNEL);
+	if (!mem)
+		goto out_no_mem;
+
+	server->ops = mem;
+	smb_install_null_ops(server->ops);
+	server->mnt = mem + sizeof(struct smb_ops);
+
+	/* Setup NLS stuff */
+	server->remote_nls = NULL;
+	server->local_nls = NULL;
+
+	mnt = server->mnt;
+
+	memset(mnt, 0, sizeof(struct smb_mount_data_kernel));
+	strlcpy(mnt->codepage.local_name, CONFIG_NLS_DEFAULT,
+		SMB_NLS_MAXNAMELEN);
+	strlcpy(mnt->codepage.remote_name, SMB_NLS_REMOTE,
+		SMB_NLS_MAXNAMELEN);
+
+	mnt->ttl = SMB_TTL_DEFAULT;
+	if (ver == SMB_MOUNT_OLDVERSION) {
+		mnt->version = oldmnt->version;
+
+		SET_UID(mnt->uid, oldmnt->uid);
+		SET_GID(mnt->gid, oldmnt->gid);
+
+		mnt->file_mode = (oldmnt->file_mode & S_IRWXUGO) | S_IFREG;
+		mnt->dir_mode = (oldmnt->dir_mode & S_IRWXUGO) | S_IFDIR;
+
+		mnt->flags = (oldmnt->file_mode >> 9) | SMB_MOUNT_UID |
+			SMB_MOUNT_GID | SMB_MOUNT_FMODE | SMB_MOUNT_DMODE;
+	} else {
+		mnt->file_mode = S_IRWXU | S_IRGRP | S_IXGRP |
+				S_IROTH | S_IXOTH | S_IFREG;
+		mnt->dir_mode = S_IRWXU | S_IRGRP | S_IXGRP |
+				S_IROTH | S_IXOTH | S_IFDIR;
+		if (parse_options(mnt, raw_data))
+			goto out_bad_option;
+	}
+	mnt->mounted_uid = current->uid;
+	smb_setcodepage(server, &mnt->codepage);
+
+	/*
+	 * Display the enabled options
+	 * Note: smb_proc_getattr uses these in 2.4 (but was changed in 2.2)
+	 */
+	if (mnt->flags & SMB_MOUNT_OLDATTR)
+		printk("SMBFS: Using core getattr (Win 95 speedup)\n");
+	else if (mnt->flags & SMB_MOUNT_DIRATTR)
+		printk("SMBFS: Using dir ff getattr\n");
+
+	if (smbiod_register_server(server) < 0) {
+		printk(KERN_ERR "smbfs: failed to start smbiod\n");
+		goto out_no_smbiod;
+	}
+
+	/*
+	 * Keep the super block locked while we get the root inode.
+	 */
+	smb_init_root_dirent(server, &root, sb);
+	root_inode = smb_iget(sb, &root);
+	if (!root_inode)
+		goto out_no_root;
+
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root)
+		goto out_no_root;
+
+	smb_new_dentry(sb->s_root);
+
+	return 0;
+
+out_no_root:
+	iput(root_inode);
+out_no_smbiod:
+	smb_unload_nls(server);
+out_bad_option:
+	smb_kfree(mem);
+out_no_mem:
+	if (!server->mnt)
+		printk(KERN_ERR "smb_fill_super: allocation failure\n");
+	sb->s_fs_info = NULL;
+	smb_kfree(server);
+	goto out_fail;
+out_wrong_data:
+	printk(KERN_ERR "smbfs: mount_data version %d is not supported\n", ver);
+	goto out_fail;
+out_no_data:
+	printk(KERN_ERR "smb_fill_super: missing data argument\n");
+out_fail:
+	return -EINVAL;
+out_no_server:
+	printk(KERN_ERR "smb_fill_super: cannot allocate struct smb_sb_info\n");
+	return -ENOMEM;
+}
+
+static int
+smb_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	int result;
+	
+	lock_kernel();
+
+	result = smb_proc_dskattr(sb, buf);
+
+	unlock_kernel();
+
+	buf->f_type = SMB_SUPER_MAGIC;
+	buf->f_namelen = SMB_MAXPATHLEN;
+	return result;
+}
+
+int smb_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	int err = smb_revalidate_inode(dentry);
+	if (!err)
+		generic_fillattr(dentry->d_inode, stat);
+	return err;
+}
+
+int
+smb_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	unsigned int mask = (S_IFREG | S_IFDIR | S_IRWXUGO);
+	int error, changed, refresh = 0;
+	struct smb_fattr fattr;
+
+	lock_kernel();
+
+	error = smb_revalidate_inode(dentry);
+	if (error)
+		goto out;
+
+	if ((error = inode_change_ok(inode, attr)) < 0)
+		goto out;
+
+	error = -EPERM;
+	if ((attr->ia_valid & ATTR_UID) && (attr->ia_uid != server->mnt->uid))
+		goto out;
+
+	if ((attr->ia_valid & ATTR_GID) && (attr->ia_uid != server->mnt->gid))
+		goto out;
+
+	if ((attr->ia_valid & ATTR_MODE) && (attr->ia_mode & ~mask))
+		goto out;
+
+	if ((attr->ia_valid & ATTR_SIZE) != 0) {
+		VERBOSE("changing %s/%s, old size=%ld, new size=%ld\n",
+			DENTRY_PATH(dentry),
+			(long) inode->i_size, (long) attr->ia_size);
+
+		filemap_fdatawrite(inode->i_mapping);
+		filemap_fdatawait(inode->i_mapping);
+
+		error = smb_open(dentry, O_WRONLY);
+		if (error)
+			goto out;
+		error = server->ops->truncate(inode, attr->ia_size);
+		if (error)
+			goto out;
+		error = vmtruncate(inode, attr->ia_size);
+		if (error)
+			goto out;
+		refresh = 1;
+	}
+
+	if (server->opt.capabilities & SMB_CAP_UNIX) {
+		/* For now we don't want to set the size with setattr_unix */
+		attr->ia_valid &= ~ATTR_SIZE;
+		/* FIXME: only call if we actually want to set something? */
+		error = smb_proc_setattr_unix(dentry, attr, 0, 0);
+		if (!error)
+			refresh = 1;
+
+		goto out;
+	}
+
+	/*
+	 * Initialize the fattr and check for changed fields.
+	 * Note: CTIME under SMB is creation time rather than
+	 * change time, so we don't attempt to change it.
+	 */
+	smb_get_inode_attr(inode, &fattr);
+
+	changed = 0;
+	if ((attr->ia_valid & ATTR_MTIME) != 0) {
+		fattr.f_mtime = attr->ia_mtime;
+		changed = 1;
+	}
+	if ((attr->ia_valid & ATTR_ATIME) != 0) {
+		fattr.f_atime = attr->ia_atime;
+		/* Earlier protocols don't have an access time */
+		if (server->opt.protocol >= SMB_PROTOCOL_LANMAN2)
+			changed = 1;
+	}
+	if (changed) {
+		error = smb_proc_settime(dentry, &fattr);
+		if (error)
+			goto out;
+		refresh = 1;
+	}
+
+	/*
+	 * Check for mode changes ... we're extremely limited in
+	 * what can be set for SMB servers: just the read-only bit.
+	 */
+	if ((attr->ia_valid & ATTR_MODE) != 0) {
+		VERBOSE("%s/%s mode change, old=%x, new=%x\n",
+			DENTRY_PATH(dentry), fattr.f_mode, attr->ia_mode);
+		changed = 0;
+		if (attr->ia_mode & S_IWUSR) {
+			if (fattr.attr & aRONLY) {
+				fattr.attr &= ~aRONLY;
+				changed = 1;
+			}
+		} else {
+			if (!(fattr.attr & aRONLY)) {
+				fattr.attr |= aRONLY;
+				changed = 1;
+			}
+		}
+		if (changed) {
+			error = smb_proc_setattr(dentry, &fattr);
+			if (error)
+				goto out;
+			refresh = 1;
+		}
+	}
+	error = 0;
+
+out:
+	if (refresh)
+		smb_refresh_inode(dentry);
+	unlock_kernel();
+	return error;
+}
+
+#ifdef DEBUG_SMB_MALLOC
+int smb_malloced;
+int smb_current_kmalloced;
+int smb_current_vmalloced;
+#endif
+
+static struct super_block *smb_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_nodev(fs_type, flags, data, smb_fill_super);
+}
+
+static struct file_system_type smb_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "smbfs",
+	.get_sb		= smb_get_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags	= FS_BINARY_MOUNTDATA,
+};
+
+static int __init init_smb_fs(void)
+{
+	int err;
+	DEBUG1("registering ...\n");
+
+#ifdef DEBUG_SMB_MALLOC
+	smb_malloced = 0;
+	smb_current_kmalloced = 0;
+	smb_current_vmalloced = 0;
+#endif
+
+	err = init_inodecache();
+	if (err)
+		goto out_inode;
+	err = smb_init_request_cache();
+	if (err)
+		goto out_request;
+	err = register_filesystem(&smb_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	smb_destroy_request_cache();
+out_request:
+	destroy_inodecache();
+out_inode:
+	return err;
+}
+
+static void __exit exit_smb_fs(void)
+{
+	DEBUG1("unregistering ...\n");
+	unregister_filesystem(&smb_fs_type);
+	smb_destroy_request_cache();
+	destroy_inodecache();
+#ifdef DEBUG_SMB_MALLOC
+	printk(KERN_DEBUG "smb_malloced: %d\n", smb_malloced);
+	printk(KERN_DEBUG "smb_current_kmalloced: %d\n",smb_current_kmalloced);
+	printk(KERN_DEBUG "smb_current_vmalloced: %d\n",smb_current_vmalloced);
+#endif
+}
+
+module_init(init_smb_fs)
+module_exit(exit_smb_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/smbfs/ioctl.c b/fs/smbfs/ioctl.c
new file mode 100644
index 0000000..dbae1f8
--- /dev/null
+++ b/fs/smbfs/ioctl.c
@@ -0,0 +1,67 @@
+/*
+ *  ioctl.c
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/highuid.h>
+#include <linux/net.h>
+
+#include <linux/smb_fs.h>
+#include <linux/smb_mount.h>
+
+#include <asm/uaccess.h>
+
+#include "proto.h"
+
+int
+smb_ioctl(struct inode *inode, struct file *filp,
+	  unsigned int cmd, unsigned long arg)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	struct smb_conn_opt opt;
+	int result = -EINVAL;
+
+	switch (cmd) {
+		uid16_t uid16;
+		uid_t uid32;
+	case SMB_IOC_GETMOUNTUID:
+		SET_UID(uid16, server->mnt->mounted_uid);
+		result = put_user(uid16, (uid16_t __user *) arg);
+		break;
+	case SMB_IOC_GETMOUNTUID32:
+		SET_UID(uid32, server->mnt->mounted_uid);
+		result = put_user(uid32, (uid_t __user *) arg);
+		break;
+
+	case SMB_IOC_NEWCONN:
+		/* arg is smb_conn_opt, or NULL if no connection was made */
+		if (!arg) {
+			result = 0;
+			smb_lock_server(server);
+			server->state = CONN_RETRIED;
+			printk(KERN_ERR "Connection attempt failed!  [%d]\n",
+			       server->conn_error);
+			smbiod_flush(server);
+			smb_unlock_server(server);
+			break;
+		}
+
+		result = -EFAULT;
+		if (!copy_from_user(&opt, (void __user *)arg, sizeof(opt)))
+			result = smb_newconn(server, &opt);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
new file mode 100644
index 0000000..220babe
--- /dev/null
+++ b/fs/smbfs/proc.c
@@ -0,0 +1,3509 @@
+/*
+ *  proc.c
+ *
+ *  Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/dcache.h>
+#include <linux/dirent.h>
+#include <linux/nls.h>
+#include <linux/smp_lock.h>
+#include <linux/net.h>
+#include <linux/vfs.h>
+#include <linux/smb_fs.h>
+#include <linux/smbno.h>
+#include <linux/smb_mount.h>
+
+#include <net/sock.h>
+
+#include <asm/string.h>
+#include <asm/div64.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+#include "request.h"
+
+
+/* Features. Undefine if they cause problems, this should perhaps be a
+   config option. */
+#define SMBFS_POSIX_UNLINK 1
+
+/* Allow smb_retry to be interrupted. */
+#define SMB_RETRY_INTR
+
+#define SMB_VWV(packet)  ((packet) + SMB_HEADER_LEN)
+#define SMB_CMD(packet)  (*(packet+8))
+#define SMB_WCT(packet)  (*(packet+SMB_HEADER_LEN - 1))
+
+#define SMB_DIRINFO_SIZE 43
+#define SMB_STATUS_SIZE  21
+
+#define SMB_ST_BLKSIZE	(PAGE_SIZE)
+#define SMB_ST_BLKSHIFT	(PAGE_SHIFT)
+
+static struct smb_ops smb_ops_core;
+static struct smb_ops smb_ops_os2;
+static struct smb_ops smb_ops_win95;
+static struct smb_ops smb_ops_winNT;
+static struct smb_ops smb_ops_unix;
+static struct smb_ops smb_ops_null;
+
+static void
+smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
+static void
+smb_finish_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
+static int
+smb_proc_getattr_core(struct smb_sb_info *server, struct dentry *dir,
+		      struct smb_fattr *fattr);
+static int
+smb_proc_getattr_ff(struct smb_sb_info *server, struct dentry *dentry,
+		    struct smb_fattr *fattr);
+static int
+smb_proc_setattr_core(struct smb_sb_info *server, struct dentry *dentry,
+		      u16 attr);
+static int
+smb_proc_setattr_ext(struct smb_sb_info *server,
+		     struct inode *inode, struct smb_fattr *fattr);
+static int
+smb_proc_query_cifsunix(struct smb_sb_info *server);
+static void
+install_ops(struct smb_ops *dst, struct smb_ops *src);
+
+
+static void
+str_upper(char *name, int len)
+{
+	while (len--)
+	{
+		if (*name >= 'a' && *name <= 'z')
+			*name -= ('a' - 'A');
+		name++;
+	}
+}
+
+#if 0
+static void
+str_lower(char *name, int len)
+{
+	while (len--)
+	{
+		if (*name >= 'A' && *name <= 'Z')
+			*name += ('a' - 'A');
+		name++;
+	}
+}
+#endif
+
+/* reverse a string inline. This is used by the dircache walking routines */
+static void reverse_string(char *buf, int len)
+{
+	char c;
+	char *end = buf+len-1;
+
+	while(buf < end) {
+		c = *buf;
+		*(buf++) = *end;
+		*(end--) = c;
+	}
+}
+
+/* no conversion, just a wrapper for memcpy. */
+static int convert_memcpy(unsigned char *output, int olen,
+			  const unsigned char *input, int ilen,
+			  struct nls_table *nls_from,
+			  struct nls_table *nls_to)
+{
+	if (olen < ilen)
+		return -ENAMETOOLONG;
+	memcpy(output, input, ilen);
+	return ilen;
+}
+
+static inline int write_char(unsigned char ch, char *output, int olen)
+{
+	if (olen < 4)
+		return -ENAMETOOLONG;
+	sprintf(output, ":x%02x", ch);
+	return 4;
+}
+
+static inline int write_unichar(wchar_t ch, char *output, int olen)
+{
+	if (olen < 5)
+		return -ENAMETOOLONG;
+	sprintf(output, ":%04x", ch);
+	return 5;
+}
+
+/* convert from one "codepage" to another (possibly being utf8). */
+static int convert_cp(unsigned char *output, int olen,
+		      const unsigned char *input, int ilen,
+		      struct nls_table *nls_from,
+		      struct nls_table *nls_to)
+{
+	int len = 0;
+	int n;
+	wchar_t ch;
+
+	while (ilen > 0) {
+		/* convert by changing to unicode and back to the new cp */
+		n = nls_from->char2uni(input, ilen, &ch);
+		if (n == -EINVAL) {
+			ilen--;
+			n = write_char(*input++, output, olen);
+			if (n < 0)
+				goto fail;
+			output += n;
+			olen -= n;
+			len += n;
+			continue;
+		} else if (n < 0)
+			goto fail;
+		input += n;
+		ilen -= n;
+
+		n = nls_to->uni2char(ch, output, olen);
+		if (n == -EINVAL)
+			n = write_unichar(ch, output, olen);
+		if (n < 0)
+			goto fail;
+		output += n;
+		olen -= n;
+
+		len += n;
+	}
+	return len;
+fail:
+	return n;
+}
+
+/* ----------------------------------------------------------- */
+
+/*
+ * nls_unicode
+ *
+ * This encodes/decodes little endian unicode format
+ */
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+	if (boundlen < 2)
+		return -EINVAL;
+	*out++ = uni & 0xff;
+	*out++ = uni >> 8;
+	return 2;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+	if (boundlen < 2)
+		return -EINVAL;
+	*uni = (rawstring[1] << 8) | rawstring[0];
+	return 2;
+}
+
+static struct nls_table unicode_table = {
+	.charset	= "unicode",
+	.uni2char	= uni2char,
+	.char2uni	= char2uni,
+};
+
+/* ----------------------------------------------------------- */
+
+static int setcodepage(struct nls_table **p, char *name)
+{
+	struct nls_table *nls;
+
+	if (!name || !*name) {
+		nls = NULL;
+	} else if ( (nls = load_nls(name)) == NULL) {
+		printk (KERN_ERR "smbfs: failed to load nls '%s'\n", name);
+		return -EINVAL;
+	}
+
+	/* if already set, unload the previous one. */
+	if (*p && *p != &unicode_table)
+		unload_nls(*p);
+	*p = nls;
+
+	return 0;
+}
+
+/* Handles all changes to codepage settings. */
+int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
+{
+	int n = 0;
+
+	smb_lock_server(server);
+
+	/* Don't load any nls_* at all, if no remote is requested */
+	if (!*cp->remote_name)
+		goto out;
+
+	/* local */
+	n = setcodepage(&server->local_nls, cp->local_name);
+	if (n != 0)
+		goto out;
+
+	/* remote */
+	if (!strcmp(cp->remote_name, "unicode")) {
+		server->remote_nls = &unicode_table;
+	} else {
+		n = setcodepage(&server->remote_nls, cp->remote_name);
+		if (n != 0)
+			setcodepage(&server->local_nls, NULL);
+	}
+
+out:
+	if (server->local_nls != NULL && server->remote_nls != NULL)
+		server->ops->convert = convert_cp;
+	else
+		server->ops->convert = convert_memcpy;
+
+	smb_unlock_server(server);
+	return n;
+}
+
+
+/*****************************************************************************/
+/*                                                                           */
+/*  Encoding/Decoding section                                                */
+/*                                                                           */
+/*****************************************************************************/
+
+static __u8 *
+smb_encode_smb_length(__u8 * p, __u32 len)
+{
+	*p = 0;
+	*(p+1) = 0;
+	*(p+2) = (len & 0xFF00) >> 8;
+	*(p+3) = (len & 0xFF);
+	if (len > 0xFFFF)
+	{
+		*(p+1) = 1;
+	}
+	return p + 4;
+}
+
+/*
+ * smb_build_path: build the path to entry and name storing it in buf.
+ * The path returned will have the trailing '\0'.
+ */
+static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
+			  int maxlen,
+			  struct dentry *entry, struct qstr *name)
+{
+	unsigned char *path = buf;
+	int len;
+	int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE) != 0;
+
+	if (maxlen < (2<<unicode))
+		return -ENAMETOOLONG;
+
+	if (maxlen > SMB_MAXPATHLEN + 1)
+		maxlen = SMB_MAXPATHLEN + 1;
+
+	if (entry == NULL)
+		goto test_name_and_out;
+
+	/*
+	 * If IS_ROOT, we have to do no walking at all.
+	 */
+	if (IS_ROOT(entry) && !name) {
+		*path++ = '\\';
+		if (unicode) *path++ = '\0';
+		*path++ = '\0';
+		if (unicode) *path++ = '\0';
+		return path-buf;
+	}
+
+	/*
+	 * Build the path string walking the tree backward from end to ROOT
+	 * and store it in reversed order [see reverse_string()]
+	 */
+	dget(entry);
+	spin_lock(&entry->d_lock);
+	while (!IS_ROOT(entry)) {
+		struct dentry *parent;
+
+		if (maxlen < (3<<unicode)) {
+			spin_unlock(&entry->d_lock);
+			dput(entry);
+			return -ENAMETOOLONG;
+		}
+
+		len = server->ops->convert(path, maxlen-2, 
+				      entry->d_name.name, entry->d_name.len,
+				      server->local_nls, server->remote_nls);
+		if (len < 0) {
+			spin_unlock(&entry->d_lock);
+			dput(entry);
+			return len;
+		}
+		reverse_string(path, len);
+		path += len;
+		if (unicode) {
+			/* Note: reverse order */
+			*path++ = '\0';
+			maxlen--;
+		}
+		*path++ = '\\';
+		maxlen -= len+1;
+
+		parent = entry->d_parent;
+		dget(parent);
+		spin_unlock(&entry->d_lock);
+		dput(entry);
+		entry = parent;
+		spin_lock(&entry->d_lock);
+	}
+	spin_unlock(&entry->d_lock);
+	dput(entry);
+	reverse_string(buf, path-buf);
+
+	/* maxlen has space for at least one char */
+test_name_and_out:
+	if (name) {
+		if (maxlen < (3<<unicode))
+			return -ENAMETOOLONG;
+		*path++ = '\\';
+		if (unicode) {
+			*path++ = '\0';
+			maxlen--;
+		}
+		len = server->ops->convert(path, maxlen-2, 
+				      name->name, name->len,
+				      server->local_nls, server->remote_nls);
+		if (len < 0)
+			return len;
+		path += len;
+		maxlen -= len+1;
+	}
+	/* maxlen has space for at least one char */
+	*path++ = '\0';
+	if (unicode) *path++ = '\0';
+	return path-buf;
+}
+
+static int smb_encode_path(struct smb_sb_info *server, char *buf, int maxlen,
+			   struct dentry *dir, struct qstr *name)
+{
+	int result;
+
+	result = smb_build_path(server, buf, maxlen, dir, name);
+	if (result < 0)
+		goto out;
+	if (server->opt.protocol <= SMB_PROTOCOL_COREPLUS)
+		str_upper(buf, result);
+out:
+	return result;
+}
+
+/* encode_path for non-trans2 request SMBs */
+static int smb_simple_encode_path(struct smb_request *req, char **p,
+				  struct dentry * entry, struct qstr * name)
+{
+	struct smb_sb_info *server = req->rq_server;
+	char *s = *p;
+	int res;
+	int maxlen = ((char *)req->rq_buffer + req->rq_bufsize) - s;
+	int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE);
+
+	if (!maxlen)
+		return -ENAMETOOLONG;
+	*s++ = 4;	/* ASCII data format */
+
+	/*
+	 * SMB Unicode strings must be 16bit aligned relative the start of the
+	 * packet. If they are not they must be padded with 0.
+	 */
+	if (unicode) {
+		int align = s - (char *)req->rq_buffer;
+		if (!(align & 1)) {
+			*s++ = '\0';
+			maxlen--;
+		}
+	}
+
+	res = smb_encode_path(server, s, maxlen-1, entry, name);
+	if (res < 0)
+		return res;
+	*p = s + res;
+	return 0;
+}
+
+/* The following are taken directly from msdos-fs */
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+
+static int day_n[] =
+{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0};
+		  /* JanFebMarApr May Jun Jul Aug Sep Oct Nov Dec */
+
+
+static time_t
+utc2local(struct smb_sb_info *server, time_t time)
+{
+	return time - server->opt.serverzone*60;
+}
+
+static time_t
+local2utc(struct smb_sb_info *server, time_t time)
+{
+	return time + server->opt.serverzone*60;
+}
+
+/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
+
+static time_t
+date_dos2unix(struct smb_sb_info *server, __u16 date, __u16 time)
+{
+	int month, year;
+	time_t secs;
+
+	/* first subtract and mask after that... Otherwise, if
+	   date == 0, bad things happen */
+	month = ((date >> 5) - 1) & 15;
+	year = date >> 9;
+	secs = (time & 31) * 2 + 60 * ((time >> 5) & 63) + (time >> 11) * 3600 + 86400 *
+	    ((date & 31) - 1 + day_n[month] + (year / 4) + year * 365 - ((year & 3) == 0 &&
+						   month < 2 ? 1 : 0) + 3653);
+	/* days since 1.1.70 plus 80's leap day */
+	return local2utc(server, secs);
+}
+
+
+/* Convert linear UNIX date to a MS-DOS time/date pair. */
+
+static void
+date_unix2dos(struct smb_sb_info *server,
+	      int unix_date, __u16 *date, __u16 *time)
+{
+	int day, year, nl_day, month;
+
+	unix_date = utc2local(server, unix_date);
+	if (unix_date < 315532800)
+		unix_date = 315532800;
+
+	*time = (unix_date % 60) / 2 +
+		(((unix_date / 60) % 60) << 5) +
+		(((unix_date / 3600) % 24) << 11);
+
+	day = unix_date / 86400 - 3652;
+	year = day / 365;
+	if ((year + 3) / 4 + 365 * year > day)
+		year--;
+	day -= (year + 3) / 4 + 365 * year;
+	if (day == 59 && !(year & 3)) {
+		nl_day = day;
+		month = 2;
+	} else {
+		nl_day = (year & 3) || day <= 59 ? day : day - 1;
+		for (month = 0; month < 12; month++)
+			if (day_n[month] > nl_day)
+				break;
+	}
+	*date = nl_day - day_n[month - 1] + 1 + (month << 5) + (year << 9);
+}
+
+/* The following are taken from fs/ntfs/util.c */
+
+#define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000)
+
+/*
+ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+ * into Unix UTC (based 1970-01-01, in seconds).
+ */
+static struct timespec
+smb_ntutc2unixutc(u64 ntutc)
+{
+	struct timespec ts;
+	/* FIXME: what about the timezone difference? */
+	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+	u64 t = ntutc - NTFS_TIME_OFFSET;
+	ts.tv_nsec = do_div(t, 10000000) * 100;
+	ts.tv_sec = t; 
+	return ts;
+}
+
+/* Convert the Unix UTC into NT time */
+static u64
+smb_unixutc2ntutc(struct timespec ts)
+{
+	/* Note: timezone conversion is probably wrong. */
+	/* return ((u64)utc2local(server, t)) * 10000000 + NTFS_TIME_OFFSET; */
+	return ((u64)ts.tv_sec) * 10000000 + ts.tv_nsec/100 + NTFS_TIME_OFFSET;
+}
+
+#define MAX_FILE_MODE	6
+static mode_t file_mode[] = {
+	S_IFREG, S_IFDIR, S_IFLNK, S_IFCHR, S_IFBLK, S_IFIFO, S_IFSOCK
+};
+
+static int smb_filetype_to_mode(u32 filetype)
+{
+	if (filetype > MAX_FILE_MODE) {
+		PARANOIA("Filetype out of range: %d\n", filetype);
+		return S_IFREG;
+	}
+	return file_mode[filetype];
+}
+
+static u32 smb_filetype_from_mode(int mode)
+{
+	if (S_ISREG(mode))
+		return UNIX_TYPE_FILE;
+	if (S_ISDIR(mode))
+		return UNIX_TYPE_DIR;
+	if (S_ISLNK(mode))
+		return UNIX_TYPE_SYMLINK;
+	if (S_ISCHR(mode))
+		return UNIX_TYPE_CHARDEV;
+	if (S_ISBLK(mode))
+		return UNIX_TYPE_BLKDEV;
+	if (S_ISFIFO(mode))
+		return UNIX_TYPE_FIFO;
+	if (S_ISSOCK(mode))
+		return UNIX_TYPE_SOCKET;
+	return UNIX_TYPE_UNKNOWN;
+}
+
+
+/*****************************************************************************/
+/*                                                                           */
+/*  Support section.                                                         */
+/*                                                                           */
+/*****************************************************************************/
+
+__u32
+smb_len(__u8 * p)
+{
+	return ((*(p+1) & 0x1) << 16L) | (*(p+2) << 8L) | *(p+3);
+}
+
+static __u16
+smb_bcc(__u8 * packet)
+{
+	int pos = SMB_HEADER_LEN + SMB_WCT(packet) * sizeof(__u16);
+	return WVAL(packet, pos);
+}
+
+/* smb_valid_packet: We check if packet fulfills the basic
+   requirements of a smb packet */
+
+static int
+smb_valid_packet(__u8 * packet)
+{
+	return (packet[4] == 0xff
+		&& packet[5] == 'S'
+		&& packet[6] == 'M'
+		&& packet[7] == 'B'
+		&& (smb_len(packet) + 4 == SMB_HEADER_LEN
+		    + SMB_WCT(packet) * 2 + smb_bcc(packet)));
+}
+
+/* smb_verify: We check if we got the answer we expected, and if we
+   got enough data. If bcc == -1, we don't care. */
+
+static int
+smb_verify(__u8 * packet, int command, int wct, int bcc)
+{
+	if (SMB_CMD(packet) != command)
+		goto bad_command;
+	if (SMB_WCT(packet) < wct)
+		goto bad_wct;
+	if (bcc != -1 && smb_bcc(packet) < bcc)
+		goto bad_bcc;
+	return 0;
+
+bad_command:
+	printk(KERN_ERR "smb_verify: command=%x, SMB_CMD=%x??\n",
+	       command, SMB_CMD(packet));
+	goto fail;
+bad_wct:
+	printk(KERN_ERR "smb_verify: command=%x, wct=%d, SMB_WCT=%d??\n",
+	       command, wct, SMB_WCT(packet));
+	goto fail;
+bad_bcc:
+	printk(KERN_ERR "smb_verify: command=%x, bcc=%d, SMB_BCC=%d??\n",
+	       command, bcc, smb_bcc(packet));
+fail:
+	return -EIO;
+}
+
+/*
+ * Returns the maximum read or write size for the "payload". Making all of the
+ * packet fit within the negotiated max_xmit size.
+ *
+ * N.B. Since this value is usually computed before locking the server,
+ * the server's packet size must never be decreased!
+ */
+static inline int
+smb_get_xmitsize(struct smb_sb_info *server, int overhead)
+{
+	return server->opt.max_xmit - overhead;
+}
+
+/*
+ * Calculate the maximum read size
+ */
+int
+smb_get_rsize(struct smb_sb_info *server)
+{
+	/* readX has 12 parameters, read has 5 */
+	int overhead = SMB_HEADER_LEN + 12 * sizeof(__u16) + 2 + 1 + 2;
+	int size = smb_get_xmitsize(server, overhead);
+
+	VERBOSE("xmit=%d, size=%d\n", server->opt.max_xmit, size);
+
+	return size;
+}
+
+/*
+ * Calculate the maximum write size
+ */
+int
+smb_get_wsize(struct smb_sb_info *server)
+{
+	/* writeX has 14 parameters, write has 5 */
+	int overhead = SMB_HEADER_LEN + 14 * sizeof(__u16) + 2 + 1 + 2;
+	int size = smb_get_xmitsize(server, overhead);
+
+	VERBOSE("xmit=%d, size=%d\n", server->opt.max_xmit, size);
+
+	return size;
+}
+
+/*
+ * Convert SMB error codes to -E... errno values.
+ */
+int
+smb_errno(struct smb_request *req)
+{
+	int errcls = req->rq_rcls;
+	int error  = req->rq_err;
+	char *class = "Unknown";
+
+	VERBOSE("errcls %d  code %d  from command 0x%x\n",
+		errcls, error, SMB_CMD(req->rq_header));
+
+	if (errcls == ERRDOS) {
+		switch (error) {
+		case ERRbadfunc:
+			return -EINVAL;
+		case ERRbadfile:
+		case ERRbadpath:
+			return -ENOENT;
+		case ERRnofids:
+			return -EMFILE;
+		case ERRnoaccess:
+			return -EACCES;
+		case ERRbadfid:
+			return -EBADF;
+		case ERRbadmcb:
+			return -EREMOTEIO;
+		case ERRnomem:
+			return -ENOMEM;
+		case ERRbadmem:
+			return -EFAULT;
+		case ERRbadenv:
+		case ERRbadformat:
+			return -EREMOTEIO;
+		case ERRbadaccess:
+			return -EACCES;
+		case ERRbaddata:
+			return -E2BIG;
+		case ERRbaddrive:
+			return -ENXIO;
+		case ERRremcd:
+			return -EREMOTEIO;
+		case ERRdiffdevice:
+			return -EXDEV;
+		case ERRnofiles:
+			return -ENOENT;
+		case ERRbadshare:
+			return -ETXTBSY;
+		case ERRlock:
+			return -EDEADLK;
+		case ERRfilexists:
+			return -EEXIST;
+		case ERROR_INVALID_PARAMETER:
+			return -EINVAL;
+		case ERROR_DISK_FULL:
+			return -ENOSPC;
+		case ERROR_INVALID_NAME:
+			return -ENOENT;
+		case ERROR_DIR_NOT_EMPTY:
+			return -ENOTEMPTY;
+		case ERROR_NOT_LOCKED:
+                       return -ENOLCK;
+		case ERROR_ALREADY_EXISTS:
+			return -EEXIST;
+		default:
+			class = "ERRDOS";
+			goto err_unknown;
+		}
+	} else if (errcls == ERRSRV) {
+		switch (error) {
+		/* N.B. This is wrong ... EIO ? */
+		case ERRerror:
+			return -ENFILE;
+		case ERRbadpw:
+			return -EINVAL;
+		case ERRbadtype:
+		case ERRtimeout:
+			return -EIO;
+		case ERRaccess:
+			return -EACCES;
+		/*
+		 * This is a fatal error, as it means the "tree ID"
+		 * for this connection is no longer valid. We map
+		 * to a special error code and get a new connection.
+		 */
+		case ERRinvnid:
+			return -EBADSLT;
+		default:
+			class = "ERRSRV";
+			goto err_unknown;
+		}
+	} else if (errcls == ERRHRD) {
+		switch (error) {
+		case ERRnowrite:
+			return -EROFS;
+		case ERRbadunit:
+			return -ENODEV;
+		case ERRnotready:
+			return -EUCLEAN;
+		case ERRbadcmd:
+		case ERRdata:
+			return -EIO;
+		case ERRbadreq:
+			return -ERANGE;
+		case ERRbadshare:
+			return -ETXTBSY;
+		case ERRlock:
+			return -EDEADLK;
+		case ERRdiskfull:
+			return -ENOSPC;
+		default:
+			class = "ERRHRD";
+			goto err_unknown;
+		}
+	} else if (errcls == ERRCMD) {
+		class = "ERRCMD";
+	} else if (errcls == SUCCESS) {
+		return 0;	/* This is the only valid 0 return */
+	}
+
+err_unknown:
+	printk(KERN_ERR "smb_errno: class %s, code %d from command 0x%x\n",
+	       class, error, SMB_CMD(req->rq_header));
+	return -EIO;
+}
+
+/* smb_request_ok: We expect the server to be locked. Then we do the
+   request and check the answer completely. When smb_request_ok
+   returns 0, you can be quite sure that everything went well. When
+   the answer is <=0, the returned number is a valid unix errno. */
+
+static int
+smb_request_ok(struct smb_request *req, int command, int wct, int bcc)
+{
+	int result;
+
+	req->rq_resp_wct = wct;
+	req->rq_resp_bcc = bcc;
+
+	result = smb_add_request(req);
+	if (result != 0) {
+		DEBUG1("smb_request failed\n");
+		goto out;
+	}
+
+	if (smb_valid_packet(req->rq_header) != 0) {
+		PARANOIA("invalid packet!\n");
+		goto out;
+	}
+
+	result = smb_verify(req->rq_header, command, wct, bcc);
+
+out:
+	return result;
+}
+
+/*
+ * This implements the NEWCONN ioctl. It installs the server pid,
+ * sets server->state to CONN_VALID, and wakes up the waiting process.
+ */
+int
+smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
+{
+	struct file *filp;
+	struct sock *sk;
+	int error;
+
+	VERBOSE("fd=%d, pid=%d\n", opt->fd, current->pid);
+
+	smb_lock_server(server);
+
+	/*
+	 * Make sure we don't already have a valid connection ...
+	 */
+	error = -EINVAL;
+	if (server->state == CONN_VALID)
+		goto out;
+
+	error = -EACCES;
+	if (current->uid != server->mnt->mounted_uid && 
+	    !capable(CAP_SYS_ADMIN))
+		goto out;
+
+	error = -EBADF;
+	filp = fget(opt->fd);
+	if (!filp)
+		goto out;
+	if (!smb_valid_socket(filp->f_dentry->d_inode))
+		goto out_putf;
+
+	server->sock_file = filp;
+	server->conn_pid = current->pid;
+	server->opt = *opt;
+	server->generation += 1;
+	server->state = CONN_VALID;
+	error = 0;
+
+	if (server->conn_error) {
+		/*
+		 * conn_error is the returncode we originally decided to
+		 * drop the old connection on. This message should be positive
+		 * and not make people ask questions on why smbfs is printing
+		 * error messages ...
+		 */
+		printk(KERN_INFO "SMB connection re-established (%d)\n",
+		       server->conn_error);
+		server->conn_error = 0;
+	}
+
+	/*
+	 * Store the server in sock user_data (Only used by sunrpc)
+	 */
+	sk = SOCKET_I(filp->f_dentry->d_inode)->sk;
+	sk->sk_user_data = server;
+
+	/* chain into the data_ready callback */
+	server->data_ready = xchg(&sk->sk_data_ready, smb_data_ready);
+
+	/* check if we have an old smbmount that uses seconds for the 
+	   serverzone */
+	if (server->opt.serverzone > 12*60 || server->opt.serverzone < -12*60)
+		server->opt.serverzone /= 60;
+
+	/* now that we have an established connection we can detect the server
+	   type and enable bug workarounds */
+	if (server->opt.protocol < SMB_PROTOCOL_LANMAN2)
+		install_ops(server->ops, &smb_ops_core);
+	else if (server->opt.protocol == SMB_PROTOCOL_LANMAN2)
+		install_ops(server->ops, &smb_ops_os2);
+	else if (server->opt.protocol == SMB_PROTOCOL_NT1 &&
+		 (server->opt.max_xmit < 0x1000) &&
+		 !(server->opt.capabilities & SMB_CAP_NT_SMBS)) {
+		/* FIXME: can we kill the WIN95 flag now? */
+		server->mnt->flags |= SMB_MOUNT_WIN95;
+		VERBOSE("detected WIN95 server\n");
+		install_ops(server->ops, &smb_ops_win95);
+	} else {
+		/*
+		 * Samba has max_xmit 65535
+		 * NT4spX has max_xmit 4536 (or something like that)
+		 * win2k has ...
+		 */
+		VERBOSE("detected NT1 (Samba, NT4/5) server\n");
+		install_ops(server->ops, &smb_ops_winNT);
+	}
+
+	/* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
+	if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
+		server->ops->getattr = smb_proc_getattr_core;
+	} else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
+		server->ops->getattr = smb_proc_getattr_ff;
+	}
+
+	/* Decode server capabilities */
+	if (server->opt.capabilities & SMB_CAP_LARGE_FILES) {
+		/* Should be ok to set this now, as no one can access the
+		   mount until the connection has been established. */
+		SB_of(server)->s_maxbytes = ~0ULL >> 1;
+		VERBOSE("LFS enabled\n");
+	}
+	if (server->opt.capabilities & SMB_CAP_UNICODE) {
+		server->mnt->flags |= SMB_MOUNT_UNICODE;
+		VERBOSE("Unicode enabled\n");
+	} else {
+		server->mnt->flags &= ~SMB_MOUNT_UNICODE;
+	}
+#if 0
+	/* flags we may test for other patches ... */
+	if (server->opt.capabilities & SMB_CAP_LARGE_READX) {
+		VERBOSE("Large reads enabled\n");
+	}
+	if (server->opt.capabilities & SMB_CAP_LARGE_WRITEX) {
+		VERBOSE("Large writes enabled\n");
+	}
+#endif
+	if (server->opt.capabilities & SMB_CAP_UNIX) {
+		struct inode *inode;
+		VERBOSE("Using UNIX CIFS extensions\n");
+		install_ops(server->ops, &smb_ops_unix);
+		inode = SB_of(server)->s_root->d_inode;
+		if (inode)
+			inode->i_op = &smb_dir_inode_operations_unix;
+	}
+
+	VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n",
+		server->opt.protocol, server->opt.max_xmit, server->conn_pid,
+		server->opt.capabilities);
+
+	/* FIXME: this really should be done by smbmount. */
+	if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) {
+		server->opt.max_xmit = SMB_MAX_PACKET_SIZE;
+	}
+
+	smb_unlock_server(server);
+	smbiod_wake_up();
+	if (server->opt.capabilities & SMB_CAP_UNIX)
+		smb_proc_query_cifsunix(server);
+
+	server->conn_complete++;
+	wake_up_interruptible_all(&server->conn_wq);
+	return error;
+
+out:
+	smb_unlock_server(server);
+	smbiod_wake_up();
+	return error;
+
+out_putf:
+	fput(filp);
+	goto out;
+}
+
+/* smb_setup_header: We completely set up the packet. You only have to
+   insert the command-specific fields */
+
+__u8 *
+smb_setup_header(struct smb_request *req, __u8 command, __u16 wct, __u16 bcc)
+{
+	__u32 xmit_len = SMB_HEADER_LEN + wct * sizeof(__u16) + bcc + 2;
+	__u8 *p = req->rq_header;
+	struct smb_sb_info *server = req->rq_server;
+
+	p = smb_encode_smb_length(p, xmit_len - 4);
+
+	*p++ = 0xff;
+	*p++ = 'S';
+	*p++ = 'M';
+	*p++ = 'B';
+	*p++ = command;
+
+	memset(p, '\0', 19);
+	p += 19;
+	p += 8;
+
+	if (server->opt.protocol > SMB_PROTOCOL_CORE) {
+		int flags = SMB_FLAGS_CASELESS_PATHNAMES;
+		int flags2 = SMB_FLAGS2_LONG_PATH_COMPONENTS |
+			SMB_FLAGS2_EXTENDED_ATTRIBUTES;	/* EA? not really ... */
+
+		*(req->rq_header + smb_flg) = flags;
+		if (server->mnt->flags & SMB_MOUNT_UNICODE)
+			flags2 |= SMB_FLAGS2_UNICODE_STRINGS;
+		WSET(req->rq_header, smb_flg2, flags2);
+	}
+	*p++ = wct;		/* wct */
+	p += 2 * wct;
+	WSET(p, 0, bcc);
+
+	/* Include the header in the data to send */
+	req->rq_iovlen = 1;
+	req->rq_iov[0].iov_base = req->rq_header;
+	req->rq_iov[0].iov_len  = xmit_len - bcc;
+
+	return req->rq_buffer;
+}
+
+static void
+smb_setup_bcc(struct smb_request *req, __u8 *p)
+{
+	u16 bcc = p - req->rq_buffer;
+	u8 *pbcc = req->rq_header + SMB_HEADER_LEN + 2*SMB_WCT(req->rq_header);
+
+	WSET(pbcc, 0, bcc);
+
+	smb_encode_smb_length(req->rq_header, SMB_HEADER_LEN + 
+			      2*SMB_WCT(req->rq_header) - 2 + bcc);
+
+	/* Include the "bytes" in the data to send */
+	req->rq_iovlen = 2;
+	req->rq_iov[1].iov_base = req->rq_buffer;
+	req->rq_iov[1].iov_len  = bcc;
+}
+
+static int
+smb_proc_seek(struct smb_sb_info *server, __u16 fileid,
+	      __u16 mode, off_t offset)
+{
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBlseek, 4, 0);
+	WSET(req->rq_header, smb_vwv0, fileid);
+	WSET(req->rq_header, smb_vwv1, mode);
+	DSET(req->rq_header, smb_vwv2, offset);
+	req->rq_flags |= SMB_REQ_NORETRY;
+
+	result = smb_request_ok(req, SMBlseek, 2, 0);
+	if (result < 0) {
+		result = 0;
+		goto out_free;
+	}
+
+	result = DVAL(req->rq_header, smb_vwv0);
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_open(struct smb_sb_info *server, struct dentry *dentry, int wish)
+{
+	struct inode *ino = dentry->d_inode;
+	struct smb_inode_info *ei = SMB_I(ino);
+	int mode, read_write = 0x42, read_only = 0x40;
+	int res;
+	char *p;
+	struct smb_request *req;
+
+	/*
+	 * Attempt to open r/w, unless there are no write privileges.
+	 */
+	mode = read_write;
+	if (!(ino->i_mode & (S_IWUSR | S_IWGRP | S_IWOTH)))
+		mode = read_only;
+#if 0
+	/* FIXME: why is this code not in? below we fix it so that a caller
+	   wanting RO doesn't get RW. smb_revalidate_inode does some 
+	   optimization based on access mode. tail -f needs it to be correct.
+
+	   We must open rw since we don't do the open if called a second time
+	   with different 'wish'. Is that not supported by smb servers? */
+	if (!(wish & (O_WRONLY | O_RDWR)))
+		mode = read_only;
+#endif
+
+	res = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+      retry:
+	p = smb_setup_header(req, SMBopen, 2, 0);
+	WSET(req->rq_header, smb_vwv0, mode);
+	WSET(req->rq_header, smb_vwv1, aSYSTEM | aHIDDEN | aDIR);
+	res = smb_simple_encode_path(req, &p, dentry, NULL);
+	if (res < 0)
+		goto out_free;
+	smb_setup_bcc(req, p);
+
+	res = smb_request_ok(req, SMBopen, 7, 0);
+	if (res != 0) {
+		if (mode == read_write &&
+		    (res == -EACCES || res == -ETXTBSY || res == -EROFS))
+		{
+			VERBOSE("%s/%s R/W failed, error=%d, retrying R/O\n",
+				DENTRY_PATH(dentry), res);
+			mode = read_only;
+			req->rq_flags = 0;
+			goto retry;
+		}
+		goto out_free;
+	}
+	/* We should now have data in vwv[0..6]. */
+
+	ei->fileid = WVAL(req->rq_header, smb_vwv0);
+	ei->attr   = WVAL(req->rq_header, smb_vwv1);
+	/* smb_vwv2 has mtime */
+	/* smb_vwv4 has size  */
+	ei->access = (WVAL(req->rq_header, smb_vwv6) & SMB_ACCMASK);
+	ei->open = server->generation;
+
+out_free:
+	smb_rput(req);
+out:
+	return res;
+}
+
+/*
+ * Make sure the file is open, and check that the access
+ * is compatible with the desired access.
+ */
+int
+smb_open(struct dentry *dentry, int wish)
+{
+	struct inode *inode = dentry->d_inode;
+	int result;
+	__u16 access;
+
+	result = -ENOENT;
+	if (!inode) {
+		printk(KERN_ERR "smb_open: no inode for dentry %s/%s\n",
+		       DENTRY_PATH(dentry));
+		goto out;
+	}
+
+	if (!smb_is_open(inode)) {
+		struct smb_sb_info *server = server_from_inode(inode);
+		result = 0;
+		if (!smb_is_open(inode))
+			result = smb_proc_open(server, dentry, wish);
+		if (result)
+			goto out;
+		/*
+		 * A successful open means the path is still valid ...
+		 */
+		smb_renew_times(dentry);
+	}
+
+	/*
+	 * Check whether the access is compatible with the desired mode.
+	 */
+	result = 0;
+	access = SMB_I(inode)->access;
+	if (access != wish && access != SMB_O_RDWR) {
+		PARANOIA("%s/%s access denied, access=%x, wish=%x\n",
+			 DENTRY_PATH(dentry), access, wish);
+		result = -EACCES;
+	}
+out:
+	return result;
+}
+
+static int 
+smb_proc_close(struct smb_sb_info *server, __u16 fileid, __u32 mtime)
+{
+	struct smb_request *req;
+	int result = -ENOMEM;
+
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBclose, 3, 0);
+	WSET(req->rq_header, smb_vwv0, fileid);
+	DSET(req->rq_header, smb_vwv1, utc2local(server, mtime));
+	req->rq_flags |= SMB_REQ_NORETRY;
+	result = smb_request_ok(req, SMBclose, 0, 0);
+
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Win NT 4.0 has an apparent bug in that it fails to update the
+ * modify time when writing to a file. As a workaround, we update
+ * both modify and access time locally, and post the times to the
+ * server when closing the file.
+ */
+static int 
+smb_proc_close_inode(struct smb_sb_info *server, struct inode * ino)
+{
+	struct smb_inode_info *ei = SMB_I(ino);
+	int result = 0;
+	if (smb_is_open(ino))
+	{
+		/*
+		 * We clear the open flag in advance, in case another
+ 		 * process observes the value while we block below.
+		 */
+		ei->open = 0;
+
+		/*
+		 * Kludge alert: SMB timestamps are accurate only to
+		 * two seconds ... round the times to avoid needless
+		 * cache invalidations!
+		 */
+		if (ino->i_mtime.tv_sec & 1) { 
+			ino->i_mtime.tv_sec--;
+			ino->i_mtime.tv_nsec = 0; 
+		}
+		if (ino->i_atime.tv_sec & 1) {
+			ino->i_atime.tv_sec--;
+			ino->i_atime.tv_nsec = 0;
+		}
+		/*
+		 * If the file is open with write permissions,
+		 * update the time stamps to sync mtime and atime.
+		 */
+		if ((server->opt.capabilities & SMB_CAP_UNIX) == 0 &&
+		    (server->opt.protocol >= SMB_PROTOCOL_LANMAN2) &&
+		    !(ei->access == SMB_O_RDONLY))
+		{
+			struct smb_fattr fattr;
+			smb_get_inode_attr(ino, &fattr);
+			smb_proc_setattr_ext(server, ino, &fattr);
+		}
+
+		result = smb_proc_close(server, ei->fileid, ino->i_mtime.tv_sec);
+		/*
+		 * Force a revalidation after closing ... some servers
+		 * don't post the size until the file has been closed.
+		 */
+		if (server->opt.protocol < SMB_PROTOCOL_NT1)
+			ei->oldmtime = 0;
+		ei->closed = jiffies;
+	}
+	return result;
+}
+
+int
+smb_close(struct inode *ino)
+{
+	int result = 0;
+
+	if (smb_is_open(ino)) {
+		struct smb_sb_info *server = server_from_inode(ino);
+		result = smb_proc_close_inode(server, ino);
+	}
+	return result;
+}
+
+/*
+ * This is used to close a file following a failed instantiate.
+ * Since we don't have an inode, we can't use any of the above.
+ */
+int
+smb_close_fileid(struct dentry *dentry, __u16 fileid)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	int result;
+
+	result = smb_proc_close(server, fileid, get_seconds());
+	return result;
+}
+
+/* In smb_proc_read and smb_proc_write we do not retry, because the
+   file-id would not be valid after a reconnection. */
+
+static void
+smb_proc_read_data(struct smb_request *req)
+{
+	req->rq_iov[0].iov_base = req->rq_buffer;
+	req->rq_iov[0].iov_len  = 3;
+
+	req->rq_iov[1].iov_base = req->rq_page;
+	req->rq_iov[1].iov_len  = req->rq_rsize;
+	req->rq_iovlen = 2;
+
+	req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
+}
+
+static int
+smb_proc_read(struct inode *inode, loff_t offset, int count, char *data)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	__u16 returned_count, data_len;
+	unsigned char *buf;
+	int result;
+	struct smb_request *req;
+	u8 rbuf[4];
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBread, 5, 0);
+	buf = req->rq_header;
+	WSET(buf, smb_vwv0, SMB_I(inode)->fileid);
+	WSET(buf, smb_vwv1, count);
+	DSET(buf, smb_vwv2, offset);
+	WSET(buf, smb_vwv4, 0);
+
+	req->rq_page = data;
+	req->rq_rsize = count;
+	req->rq_callback = smb_proc_read_data;
+	req->rq_buffer = rbuf;
+	req->rq_flags |= SMB_REQ_NORETRY | SMB_REQ_STATIC;
+
+	result = smb_request_ok(req, SMBread, 5, -1);
+	if (result < 0)
+		goto out_free;
+	returned_count = WVAL(req->rq_header, smb_vwv0);
+
+	data_len = WVAL(rbuf, 1);
+
+	if (returned_count != data_len) {
+		printk(KERN_NOTICE "smb_proc_read: returned != data_len\n");
+		printk(KERN_NOTICE "smb_proc_read: ret_c=%d, data_len=%d\n",
+		       returned_count, data_len);
+	}
+	result = data_len;
+
+out_free:
+	smb_rput(req);
+out:
+	VERBOSE("ino=%ld, fileid=%d, count=%d, result=%d\n",
+		inode->i_ino, SMB_I(inode)->fileid, count, result);
+	return result;
+}
+
+static int
+smb_proc_write(struct inode *inode, loff_t offset, int count, const char *data)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	int result;
+	u16 fileid = SMB_I(inode)->fileid;
+	u8 buf[4];
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	VERBOSE("ino=%ld, fileid=%d, count=%d@%Ld\n",
+		inode->i_ino, fileid, count, offset);
+
+	smb_setup_header(req, SMBwrite, 5, count + 3);
+	WSET(req->rq_header, smb_vwv0, fileid);
+	WSET(req->rq_header, smb_vwv1, count);
+	DSET(req->rq_header, smb_vwv2, offset);
+	WSET(req->rq_header, smb_vwv4, 0);
+
+	buf[0] = 1;
+	WSET(buf, 1, count);	/* yes, again ... */
+	req->rq_iov[1].iov_base = buf;
+	req->rq_iov[1].iov_len = 3;
+	req->rq_iov[2].iov_base = (char *) data;
+	req->rq_iov[2].iov_len = count;
+	req->rq_iovlen = 3;
+	req->rq_flags |= SMB_REQ_NORETRY;
+
+	result = smb_request_ok(req, SMBwrite, 1, 0);
+	if (result >= 0)
+		result = WVAL(req->rq_header, smb_vwv0);
+
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * In smb_proc_readX and smb_proc_writeX we do not retry, because the
+ * file-id would not be valid after a reconnection.
+ */
+
+#define SMB_READX_MAX_PAD      64
+static void
+smb_proc_readX_data(struct smb_request *req)
+{
+	/* header length, excluding the netbios length (-4) */
+	int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
+	int data_off = WVAL(req->rq_header, smb_vwv6);
+
+	/*
+	 * Some genius made the padding to the data bytes arbitrary.
+	 * So we must first calculate the amount of padding used by the server.
+	 */
+	data_off -= hdrlen;
+	if (data_off > SMB_READX_MAX_PAD || data_off < 0) {
+		PARANOIA("offset is larger than SMB_READX_MAX_PAD or negative!\n");
+		PARANOIA("%d > %d || %d < 0\n", data_off, SMB_READX_MAX_PAD, data_off);
+		req->rq_rlen = req->rq_bufsize + 1;
+		return;
+	}
+	req->rq_iov[0].iov_base = req->rq_buffer;
+	req->rq_iov[0].iov_len  = data_off;
+
+	req->rq_iov[1].iov_base = req->rq_page;
+	req->rq_iov[1].iov_len  = req->rq_rsize;
+	req->rq_iovlen = 2;
+
+	req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
+}
+
+static int
+smb_proc_readX(struct inode *inode, loff_t offset, int count, char *data)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	unsigned char *buf;
+	int result;
+	struct smb_request *req;
+	static char pad[SMB_READX_MAX_PAD];
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBreadX, 12, 0);
+	buf = req->rq_header;
+	WSET(buf, smb_vwv0, 0x00ff);
+	WSET(buf, smb_vwv1, 0);
+	WSET(buf, smb_vwv2, SMB_I(inode)->fileid);
+	DSET(buf, smb_vwv3, (u32)offset);               /* low 32 bits */
+	WSET(buf, smb_vwv5, count);
+	WSET(buf, smb_vwv6, 0);
+	DSET(buf, smb_vwv7, 0);
+	WSET(buf, smb_vwv9, 0);
+	DSET(buf, smb_vwv10, (u32)(offset >> 32));      /* high 32 bits */
+	WSET(buf, smb_vwv11, 0);
+
+	req->rq_page = data;
+	req->rq_rsize = count;
+	req->rq_callback = smb_proc_readX_data;
+	req->rq_buffer = pad;
+	req->rq_bufsize = SMB_READX_MAX_PAD;
+	req->rq_flags |= SMB_REQ_STATIC | SMB_REQ_NORETRY;
+
+	result = smb_request_ok(req, SMBreadX, 12, -1);
+	if (result < 0)
+		goto out_free;
+	result = WVAL(req->rq_header, smb_vwv5);
+
+out_free:
+	smb_rput(req);
+out:
+	VERBOSE("ino=%ld, fileid=%d, count=%d, result=%d\n",
+		inode->i_ino, SMB_I(inode)->fileid, count, result);
+	return result;
+}
+
+static int
+smb_proc_writeX(struct inode *inode, loff_t offset, int count, const char *data)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	int result;
+	u8 *p;
+	static u8 pad[4];
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	VERBOSE("ino=%ld, fileid=%d, count=%d@%Ld\n",
+		inode->i_ino, SMB_I(inode)->fileid, count, offset);
+
+	p = smb_setup_header(req, SMBwriteX, 14, count + 1);
+	WSET(req->rq_header, smb_vwv0, 0x00ff);
+	WSET(req->rq_header, smb_vwv1, 0);
+	WSET(req->rq_header, smb_vwv2, SMB_I(inode)->fileid);
+	DSET(req->rq_header, smb_vwv3, (u32)offset);	/* low 32 bits */
+	DSET(req->rq_header, smb_vwv5, 0);
+	WSET(req->rq_header, smb_vwv7, 0);		/* write mode */
+	WSET(req->rq_header, smb_vwv8, 0);
+	WSET(req->rq_header, smb_vwv9, 0);
+	WSET(req->rq_header, smb_vwv10, count);		/* data length */
+	WSET(req->rq_header, smb_vwv11, smb_vwv12 + 2 + 1);
+	DSET(req->rq_header, smb_vwv12, (u32)(offset >> 32));
+
+	req->rq_iov[1].iov_base = pad;
+	req->rq_iov[1].iov_len = 1;
+	req->rq_iov[2].iov_base = (char *) data;
+	req->rq_iov[2].iov_len = count;
+	req->rq_iovlen = 3;
+	req->rq_flags |= SMB_REQ_NORETRY;
+
+	result = smb_request_ok(req, SMBwriteX, 6, 0);
+ 	if (result >= 0)
+		result = WVAL(req->rq_header, smb_vwv2);
+
+	smb_rput(req);
+out:
+	return result;
+}
+
+int
+smb_proc_create(struct dentry *dentry, __u16 attr, time_t ctime, __u16 *fileid)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	char *p;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	p = smb_setup_header(req, SMBcreate, 3, 0);
+	WSET(req->rq_header, smb_vwv0, attr);
+	DSET(req->rq_header, smb_vwv1, utc2local(server, ctime));
+	result = smb_simple_encode_path(req, &p, dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	smb_setup_bcc(req, p);
+
+	result = smb_request_ok(req, SMBcreate, 1, 0);
+	if (result < 0)
+		goto out_free;
+
+	*fileid = WVAL(req->rq_header, smb_vwv0);
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+int
+smb_proc_mv(struct dentry *old_dentry, struct dentry *new_dentry)
+{
+	struct smb_sb_info *server = server_from_dentry(old_dentry);
+	char *p;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	p = smb_setup_header(req, SMBmv, 1, 0);
+	WSET(req->rq_header, smb_vwv0, aSYSTEM | aHIDDEN | aDIR);
+	result = smb_simple_encode_path(req, &p, old_dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	result = smb_simple_encode_path(req, &p, new_dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	smb_setup_bcc(req, p);
+
+	if ((result = smb_request_ok(req, SMBmv, 0, 0)) < 0)
+		goto out_free;
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Code common to mkdir and rmdir.
+ */
+static int
+smb_proc_generic_command(struct dentry *dentry, __u8 command)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	char *p;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	p = smb_setup_header(req, command, 0, 0);
+	result = smb_simple_encode_path(req, &p, dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	smb_setup_bcc(req, p);
+
+	result = smb_request_ok(req, command, 0, 0);
+	if (result < 0)
+		goto out_free;
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+int
+smb_proc_mkdir(struct dentry *dentry)
+{
+	return smb_proc_generic_command(dentry, SMBmkdir);
+}
+
+int
+smb_proc_rmdir(struct dentry *dentry)
+{
+	return smb_proc_generic_command(dentry, SMBrmdir);
+}
+
+#if SMBFS_POSIX_UNLINK
+/*
+ * Removes readonly attribute from a file. Used by unlink to give posix
+ * semantics.
+ */
+static int
+smb_set_rw(struct dentry *dentry,struct smb_sb_info *server)
+{
+	int result;
+	struct smb_fattr fattr;
+
+	/* FIXME: cifsUE should allow removing a readonly file. */
+
+	/* first get current attribute */
+	smb_init_dirent(server, &fattr);
+	result = server->ops->getattr(server, dentry, &fattr);
+	smb_finish_dirent(server, &fattr);
+	if (result < 0)
+		return result;
+
+	/* if RONLY attribute is set, remove it */
+	if (fattr.attr & aRONLY) {  /* read only attribute is set */
+		fattr.attr &= ~aRONLY;
+		result = smb_proc_setattr_core(server, dentry, fattr.attr);
+	}
+	return result;
+}
+#endif
+
+int
+smb_proc_unlink(struct dentry *dentry)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	int flag = 0;
+	char *p;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+      retry:
+	p = smb_setup_header(req, SMBunlink, 1, 0);
+	WSET(req->rq_header, smb_vwv0, aSYSTEM | aHIDDEN);
+	result = smb_simple_encode_path(req, &p, dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	smb_setup_bcc(req, p);
+
+	if ((result = smb_request_ok(req, SMBunlink, 0, 0)) < 0) {
+#if SMBFS_POSIX_UNLINK
+		if (result == -EACCES && !flag) {
+			/* Posix semantics is for the read-only state
+			   of a file to be ignored in unlink(). In the
+			   SMB world a unlink() is refused on a
+			   read-only file. To make things easier for
+			   unix users we try to override the files
+			   permission if the unlink fails with the
+			   right error.
+			   This introduces a race condition that could
+			   lead to a file being written by someone who
+			   shouldn't have access, but as far as I can
+			   tell that is unavoidable */
+
+			/* remove RONLY attribute and try again */
+			result = smb_set_rw(dentry,server);
+			if (result == 0) {
+				flag = 1;
+				req->rq_flags = 0;
+				goto retry;
+			}
+		}
+#endif
+		goto out_free;
+	}
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+int
+smb_proc_flush(struct smb_sb_info *server, __u16 fileid)
+{
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBflush, 1, 0);
+	WSET(req->rq_header, smb_vwv0, fileid);
+	req->rq_flags |= SMB_REQ_NORETRY;
+	result = smb_request_ok(req, SMBflush, 0, 0);
+
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_trunc32(struct inode *inode, loff_t length)
+{
+	/*
+	 * Writing 0bytes is old-SMB magic for truncating files.
+	 * MAX_NON_LFS should prevent this from being called with a too
+	 * large offset.
+	 */
+	return smb_proc_write(inode, length, 0, NULL);
+}
+
+static int
+smb_proc_trunc64(struct inode *inode, loff_t length)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	int result;
+	char *param;
+	char *data;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 14)))
+		goto out;
+
+	param = req->rq_buffer;
+	data = req->rq_buffer + 6;
+
+	/* FIXME: must we also set allocation size? winNT seems to do that */
+	WSET(param, 0, SMB_I(inode)->fileid);
+	WSET(param, 2, SMB_SET_FILE_END_OF_FILE_INFO);
+	WSET(param, 4, 0);
+	LSET(data, 0, length);
+
+	req->rq_trans2_command = TRANSACT2_SETFILEINFO;
+	req->rq_ldata = 8;
+	req->rq_data  = data;
+	req->rq_lparm = 6;
+	req->rq_parm  = param;
+	req->rq_flags |= SMB_REQ_NORETRY;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+
+	result = 0;
+	if (req->rq_rcls != 0)
+		result = smb_errno(req);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_trunc95(struct inode *inode, loff_t length)
+{
+	struct smb_sb_info *server = server_from_inode(inode);
+	int result = smb_proc_trunc32(inode, length);
+ 
+	/*
+	 * win9x doesn't appear to update the size immediately.
+	 * It will return the old file size after the truncate,
+	 * confusing smbfs. So we force an update.
+	 *
+	 * FIXME: is this still necessary?
+	 */
+	smb_proc_flush(server, SMB_I(inode)->fileid);
+	return result;
+}
+
+static void
+smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
+{
+	memset(fattr, 0, sizeof(*fattr));
+
+	fattr->f_nlink = 1;
+	fattr->f_uid = server->mnt->uid;
+	fattr->f_gid = server->mnt->gid;
+	fattr->f_blksize = SMB_ST_BLKSIZE;
+	fattr->f_unix = 0;
+}
+
+static void
+smb_finish_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
+{
+	if (fattr->f_unix)
+		return;
+
+	fattr->f_mode = server->mnt->file_mode;
+	if (fattr->attr & aDIR) {
+		fattr->f_mode = server->mnt->dir_mode;
+		fattr->f_size = SMB_ST_BLKSIZE;
+	}
+	/* Check the read-only flag */
+	if (fattr->attr & aRONLY)
+		fattr->f_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
+
+	/* How many 512 byte blocks do we need for this file? */
+	fattr->f_blocks = 0;
+	if (fattr->f_size != 0)
+		fattr->f_blocks = 1 + ((fattr->f_size-1) >> 9);
+	return;
+}
+
+void
+smb_init_root_dirent(struct smb_sb_info *server, struct smb_fattr *fattr,
+		     struct super_block *sb)
+{
+	smb_init_dirent(server, fattr);
+	fattr->attr = aDIR;
+	fattr->f_ino = 2; /* traditional root inode number */
+	fattr->f_mtime = current_fs_time(sb);
+	smb_finish_dirent(server, fattr);
+}
+
+/*
+ * Decode a dirent for old protocols
+ *
+ * qname is filled with the decoded, and possibly translated, name.
+ * fattr receives decoded attributes
+ *
+ * Bugs Noted:
+ * (1) Pathworks servers may pad the name with extra spaces.
+ */
+static char *
+smb_decode_short_dirent(struct smb_sb_info *server, char *p,
+			struct qstr *qname, struct smb_fattr *fattr,
+			unsigned char *name_buf)
+{
+	int len;
+
+	/*
+	 * SMB doesn't have a concept of inode numbers ...
+	 */
+	smb_init_dirent(server, fattr);
+	fattr->f_ino = 0;	/* FIXME: do we need this? */
+
+	p += SMB_STATUS_SIZE;	/* reserved (search_status) */
+	fattr->attr = *p;
+	fattr->f_mtime.tv_sec = date_dos2unix(server, WVAL(p, 3), WVAL(p, 1));
+	fattr->f_mtime.tv_nsec = 0;
+	fattr->f_size = DVAL(p, 5);
+	fattr->f_ctime = fattr->f_mtime;
+	fattr->f_atime = fattr->f_mtime;
+	qname->name = p + 9;
+	len = strnlen(qname->name, 12);
+
+	/*
+	 * Trim trailing blanks for Pathworks servers
+	 */
+	while (len > 2 && qname->name[len-1] == ' ')
+		len--;
+
+	smb_finish_dirent(server, fattr);
+
+#if 0
+	/* FIXME: These only work for ascii chars, and recent smbmount doesn't
+	   allow the flag to be set anyway. It kills const. Remove? */
+	switch (server->opt.case_handling) {
+	case SMB_CASE_UPPER:
+		str_upper(entry->name, len);
+		break;
+	case SMB_CASE_LOWER:
+		str_lower(entry->name, len);
+		break;
+	default:
+		break;
+	}
+#endif
+
+	qname->len = 0;
+	len = server->ops->convert(name_buf, SMB_MAXNAMELEN,
+				   qname->name, len,
+				   server->remote_nls, server->local_nls);
+	if (len > 0) {
+		qname->len = len;
+		qname->name = name_buf;
+		DEBUG1("len=%d, name=%.*s\n",qname->len,qname->len,qname->name);
+	}
+
+	return p + 22;
+}
+
+/*
+ * This routine is used to read in directory entries from the network.
+ * Note that it is for short directory name seeks, i.e.: protocol <
+ * SMB_PROTOCOL_LANMAN2
+ */
+static int
+smb_proc_readdir_short(struct file *filp, void *dirent, filldir_t filldir,
+		       struct smb_cache_control *ctl)
+{
+	struct dentry *dir = filp->f_dentry;
+	struct smb_sb_info *server = server_from_dentry(dir);
+	struct qstr qname;
+	struct smb_fattr fattr;
+	char *p;
+	int result;
+	int i, first, entries_seen, entries;
+	int entries_asked = (server->opt.max_xmit - 100) / SMB_DIRINFO_SIZE;
+	__u16 bcc;
+	__u16 count;
+	char status[SMB_STATUS_SIZE];
+	static struct qstr mask = {
+		.name	= "*.*",
+		.len	= 3,
+	};
+	unsigned char *last_status;
+	struct smb_request *req;
+	unsigned char *name_buf;
+
+	VERBOSE("%s/%s\n", DENTRY_PATH(dir));
+
+	lock_kernel();
+
+	result = -ENOMEM;
+	if (! (name_buf = kmalloc(SMB_MAXNAMELEN, GFP_KERNEL)))
+		goto out;
+
+	first = 1;
+	entries = 0;
+	entries_seen = 2; /* implicit . and .. */
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, server->opt.max_xmit)))
+		goto out_name;
+
+	while (1) {
+		p = smb_setup_header(req, SMBsearch, 2, 0);
+		WSET(req->rq_header, smb_vwv0, entries_asked);
+		WSET(req->rq_header, smb_vwv1, aDIR);
+		if (first == 1) {
+			result = smb_simple_encode_path(req, &p, dir, &mask);
+			if (result < 0)
+				goto out_free;
+			if (p + 3 > (char *)req->rq_buffer + req->rq_bufsize) {
+				result = -ENAMETOOLONG;
+				goto out_free;
+			}
+			*p++ = 5;
+			WSET(p, 0, 0);
+			p += 2;
+			first = 0;
+		} else {
+			if (p + 5 + SMB_STATUS_SIZE >
+			    (char *)req->rq_buffer + req->rq_bufsize) {
+				result = -ENAMETOOLONG;
+				goto out_free;
+			}
+				
+			*p++ = 4;
+			*p++ = 0;
+			*p++ = 5;
+			WSET(p, 0, SMB_STATUS_SIZE);
+			p += 2;
+			memcpy(p, status, SMB_STATUS_SIZE);
+			p += SMB_STATUS_SIZE;
+		}
+
+		smb_setup_bcc(req, p);
+
+		result = smb_request_ok(req, SMBsearch, 1, -1);
+		if (result < 0) {
+			if ((req->rq_rcls == ERRDOS) && 
+			    (req->rq_err  == ERRnofiles))
+				break;
+			goto out_free;
+		}
+		count = WVAL(req->rq_header, smb_vwv0);
+		if (count <= 0)
+			break;
+
+		result = -EIO;
+		bcc = smb_bcc(req->rq_header);
+		if (bcc != count * SMB_DIRINFO_SIZE + 3)
+			goto out_free;
+		p = req->rq_buffer + 3;
+
+
+		/* Make sure the response fits in the buffer. Fixed sized 
+		   entries means we don't have to check in the decode loop. */
+
+		last_status = req->rq_buffer + 3 + (count-1) * SMB_DIRINFO_SIZE;
+
+		if (last_status + SMB_DIRINFO_SIZE >=
+		    req->rq_buffer + req->rq_bufsize) {
+			printk(KERN_ERR "smb_proc_readdir_short: "
+			       "last dir entry outside buffer! "
+			       "%d@%p  %d@%p\n", SMB_DIRINFO_SIZE, last_status,
+			       req->rq_bufsize, req->rq_buffer);
+			goto out_free;
+		}
+
+		/* Read the last entry into the status field. */
+		memcpy(status, last_status, SMB_STATUS_SIZE);
+
+
+		/* Now we are ready to parse smb directory entries. */
+
+		for (i = 0; i < count; i++) {
+			p = smb_decode_short_dirent(server, p, 
+						    &qname, &fattr, name_buf);
+			if (qname.len == 0)
+				continue;
+
+			if (entries_seen == 2 && qname.name[0] == '.') {
+				if (qname.len == 1)
+					continue;
+				if (qname.name[1] == '.' && qname.len == 2)
+					continue;
+			}
+			if (!smb_fill_cache(filp, dirent, filldir, ctl, 
+					    &qname, &fattr))
+				;	/* stop reading? */
+			entries_seen++;
+		}
+	}
+	result = entries;
+
+out_free:
+	smb_rput(req);
+out_name:
+	kfree(name_buf);
+out:
+	unlock_kernel();
+	return result;
+}
+
+static void smb_decode_unix_basic(struct smb_fattr *fattr, struct smb_sb_info *server, char *p)
+{
+	u64 size, disk_bytes;
+
+	/* FIXME: verify nls support. all is sent as utf8? */
+
+	fattr->f_unix = 1;
+	fattr->f_mode = 0;
+
+	/* FIXME: use the uniqueID from the remote instead? */
+	/* 0 L file size in bytes */
+	/* 8 L file size on disk in bytes (block count) */
+	/* 40 L uid */
+	/* 48 L gid */
+	/* 56 W file type */
+	/* 60 L devmajor */
+	/* 68 L devminor */
+	/* 76 L unique ID (inode) */
+	/* 84 L permissions */
+	/* 92 L link count */
+
+	size = LVAL(p, 0);
+	disk_bytes = LVAL(p, 8);
+
+	/*
+	 * Some samba versions round up on-disk byte usage
+	 * to 1MB boundaries, making it useless. When seeing
+	 * that, use the size instead.
+	 */
+	if (!(disk_bytes & 0xfffff))
+		disk_bytes = size+511;
+
+	fattr->f_size = size;
+	fattr->f_blocks = disk_bytes >> 9;
+	fattr->f_ctime = smb_ntutc2unixutc(LVAL(p, 16));
+	fattr->f_atime = smb_ntutc2unixutc(LVAL(p, 24));
+	fattr->f_mtime = smb_ntutc2unixutc(LVAL(p, 32));
+
+	if (server->mnt->flags & SMB_MOUNT_UID)
+		fattr->f_uid = server->mnt->uid;
+	else
+		fattr->f_uid = LVAL(p, 40);
+
+	if (server->mnt->flags & SMB_MOUNT_GID)
+		fattr->f_gid = server->mnt->gid;
+	else
+		fattr->f_gid = LVAL(p, 48);
+
+	fattr->f_mode |= smb_filetype_to_mode(WVAL(p, 56));
+
+	if (S_ISBLK(fattr->f_mode) || S_ISCHR(fattr->f_mode)) {
+		__u64 major = LVAL(p, 60);
+		__u64 minor = LVAL(p, 68);
+
+		fattr->f_rdev = MKDEV(major & 0xffffffff, minor & 0xffffffff);
+		if (MAJOR(fattr->f_rdev) != (major & 0xffffffff) ||
+	    	MINOR(fattr->f_rdev) != (minor & 0xffffffff))
+			fattr->f_rdev = 0;
+	}
+
+	fattr->f_mode |= LVAL(p, 84);
+
+	if ( (server->mnt->flags & SMB_MOUNT_DMODE) &&
+	     (S_ISDIR(fattr->f_mode)) )
+		fattr->f_mode = (server->mnt->dir_mode & S_IRWXUGO) | S_IFDIR;
+	else if ( (server->mnt->flags & SMB_MOUNT_FMODE) &&
+	          !(S_ISDIR(fattr->f_mode)) )
+		fattr->f_mode = (server->mnt->file_mode & S_IRWXUGO) |
+				(fattr->f_mode & S_IFMT);
+
+}
+
+/*
+ * Interpret a long filename structure using the specified info level:
+ *   level 1 for anything below NT1 protocol
+ *   level 260 for NT1 protocol
+ *
+ * qname is filled with the decoded, and possibly translated, name
+ * fattr receives decoded attributes.
+ *
+ * Bugs Noted:
+ * (1) Win NT 4.0 appends a null byte to names and counts it in the length!
+ */
+static char *
+smb_decode_long_dirent(struct smb_sb_info *server, char *p, int level,
+		       struct qstr *qname, struct smb_fattr *fattr,
+		       unsigned char *name_buf)
+{
+	char *result;
+	unsigned int len = 0;
+	int n;
+	__u16 date, time;
+	int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE);
+
+	/*
+	 * SMB doesn't have a concept of inode numbers ...
+	 */
+	smb_init_dirent(server, fattr);
+	fattr->f_ino = 0;	/* FIXME: do we need this? */
+
+	switch (level) {
+	case 1:
+		len = *((unsigned char *) p + 22);
+		qname->name = p + 23;
+		result = p + 24 + len;
+
+		date = WVAL(p, 0);
+		time = WVAL(p, 2);
+		fattr->f_ctime.tv_sec = date_dos2unix(server, date, time);
+		fattr->f_ctime.tv_nsec = 0;
+
+		date = WVAL(p, 4);
+		time = WVAL(p, 6);
+		fattr->f_atime.tv_sec = date_dos2unix(server, date, time);
+		fattr->f_atime.tv_nsec = 0;
+
+		date = WVAL(p, 8);
+		time = WVAL(p, 10);
+		fattr->f_mtime.tv_sec = date_dos2unix(server, date, time);
+		fattr->f_mtime.tv_nsec = 0;
+		fattr->f_size = DVAL(p, 12);
+		/* ULONG allocation size */
+		fattr->attr = WVAL(p, 20);
+
+		VERBOSE("info 1 at %p, len=%d, name=%.*s\n",
+			p, len, len, qname->name);
+		break;
+	case 260:
+		result = p + WVAL(p, 0);
+		len = DVAL(p, 60);
+		if (len > 255) len = 255;
+		/* NT4 null terminates, unless we are using unicode ... */
+		qname->name = p + 94;
+		if (!unicode && len && qname->name[len-1] == '\0')
+			len--;
+
+		fattr->f_ctime = smb_ntutc2unixutc(LVAL(p, 8));
+		fattr->f_atime = smb_ntutc2unixutc(LVAL(p, 16));
+		fattr->f_mtime = smb_ntutc2unixutc(LVAL(p, 24));
+		/* change time (32) */
+		fattr->f_size = LVAL(p, 40);
+		/* alloc size (48) */
+		fattr->attr = DVAL(p, 56);
+
+		VERBOSE("info 260 at %p, len=%d, name=%.*s\n",
+			p, len, len, qname->name);
+		break;
+	case SMB_FIND_FILE_UNIX:
+		result = p + WVAL(p, 0);
+		qname->name = p + 108;
+
+		len = strlen(qname->name);
+		/* FIXME: should we check the length?? */
+
+		p += 8;
+		smb_decode_unix_basic(fattr, server, p);
+		VERBOSE("info SMB_FIND_FILE_UNIX at %p, len=%d, name=%.*s\n",
+			p, len, len, qname->name);
+		break;
+	default:
+		PARANOIA("Unknown info level %d\n", level);
+		result = p + WVAL(p, 0);
+		goto out;
+	}
+
+	smb_finish_dirent(server, fattr);
+
+#if 0
+	/* FIXME: These only work for ascii chars, and recent smbmount doesn't
+	   allow the flag to be set anyway. Remove? */
+	switch (server->opt.case_handling) {
+	case SMB_CASE_UPPER:
+		str_upper(qname->name, len);
+		break;
+	case SMB_CASE_LOWER:
+		str_lower(qname->name, len);
+		break;
+	default:
+		break;
+	}
+#endif
+
+	qname->len = 0;
+	n = server->ops->convert(name_buf, SMB_MAXNAMELEN,
+				 qname->name, len,
+				 server->remote_nls, server->local_nls);
+	if (n > 0) {
+		qname->len = n;
+		qname->name = name_buf;
+	}
+
+out:
+	return result;
+}
+
+/* findfirst/findnext flags */
+#define SMB_CLOSE_AFTER_FIRST (1<<0)
+#define SMB_CLOSE_IF_END (1<<1)
+#define SMB_REQUIRE_RESUME_KEY (1<<2)
+#define SMB_CONTINUE_BIT (1<<3)
+
+/*
+ * Note: samba-2.0.7 (at least) has a very similar routine, cli_list, in
+ * source/libsmb/clilist.c. When looking for smb bugs in the readdir code,
+ * go there for advise.
+ *
+ * Bugs Noted:
+ * (1) When using Info Level 1 Win NT 4.0 truncates directory listings 
+ * for certain patterns of names and/or lengths. The breakage pattern
+ * is completely reproducible and can be toggled by the creation of a
+ * single file. (E.g. echo hi >foo breaks, rm -f foo works.)
+ */
+static int
+smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir,
+		      struct smb_cache_control *ctl)
+{
+	struct dentry *dir = filp->f_dentry;
+	struct smb_sb_info *server = server_from_dentry(dir);
+	struct qstr qname;
+	struct smb_fattr fattr;
+
+	unsigned char *p, *lastname;
+	char *mask, *param;
+	__u16 command;
+	int first, entries_seen;
+
+	/* Both NT and OS/2 accept info level 1 (but see note below). */
+	int info_level = 260;
+	const int max_matches = 512;
+
+	unsigned int ff_searchcount = 0;
+	unsigned int ff_eos = 0;
+	unsigned int ff_lastname = 0;
+	unsigned int ff_dir_handle = 0;
+	unsigned int loop_count = 0;
+	unsigned int mask_len, i;
+	int result;
+	struct smb_request *req;
+	unsigned char *name_buf;
+	static struct qstr star = {
+		.name	= "*",
+		.len	= 1,
+	};
+
+	lock_kernel();
+
+	/*
+	 * We always prefer unix style. Use info level 1 for older
+	 * servers that don't do 260.
+	 */
+	if (server->opt.capabilities & SMB_CAP_UNIX)
+		info_level = SMB_FIND_FILE_UNIX;
+	else if (server->opt.protocol < SMB_PROTOCOL_NT1)
+		info_level = 1;
+
+	result = -ENOMEM;
+	if (! (name_buf = kmalloc(SMB_MAXNAMELEN+2, GFP_KERNEL)))
+		goto out;
+	if (! (req = smb_alloc_request(server, server->opt.max_xmit)))
+		goto out_name;
+	param = req->rq_buffer;
+
+	/*
+	 * Encode the initial path
+	 */
+	mask = param + 12;
+
+	result = smb_encode_path(server, mask, SMB_MAXPATHLEN+1, dir, &star);
+	if (result <= 0)
+		goto out_free;
+	mask_len = result - 1;	/* mask_len is strlen, not #bytes */
+	result = 0;
+	first = 1;
+	VERBOSE("starting mask_len=%d, mask=%s\n", mask_len, mask);
+
+	entries_seen = 2;
+	ff_eos = 0;
+
+	while (ff_eos == 0) {
+		loop_count += 1;
+		if (loop_count > 10) {
+			printk(KERN_WARNING "smb_proc_readdir_long: "
+			       "Looping in FIND_NEXT??\n");
+			result = -EIO;
+			break;
+		}
+
+		if (first != 0) {
+			command = TRANSACT2_FINDFIRST;
+			WSET(param, 0, aSYSTEM | aHIDDEN | aDIR);
+			WSET(param, 2, max_matches);	/* max count */
+			WSET(param, 4, SMB_CLOSE_IF_END);
+			WSET(param, 6, info_level);
+			DSET(param, 8, 0);
+		} else {
+			command = TRANSACT2_FINDNEXT;
+
+			VERBOSE("handle=0x%X, lastname=%d, mask=%.*s\n",
+				ff_dir_handle, ff_lastname, mask_len, mask);
+
+			WSET(param, 0, ff_dir_handle);	/* search handle */
+			WSET(param, 2, max_matches);	/* max count */
+			WSET(param, 4, info_level);
+			DSET(param, 6, 0);
+			WSET(param, 10, SMB_CONTINUE_BIT|SMB_CLOSE_IF_END);
+		}
+
+		req->rq_trans2_command = command;
+		req->rq_ldata = 0;
+		req->rq_data  = NULL;
+		req->rq_lparm = 12 + mask_len + 1;
+		req->rq_parm  = param;
+		req->rq_flags = 0;
+		result = smb_add_request(req);
+		if (result < 0) {
+			PARANOIA("error=%d, breaking\n", result);
+			break;
+		}
+
+		if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) {
+			/* a damn Win95 bug - sometimes it clags if you 
+			   ask it too fast */
+			current->state = TASK_INTERRUPTIBLE;
+			schedule_timeout(HZ/5);
+			continue;
+                }
+
+		if (req->rq_rcls != 0) {
+			result = smb_errno(req);
+			PARANOIA("name=%s, result=%d, rcls=%d, err=%d\n",
+				 mask, result, req->rq_rcls, req->rq_err);
+			break;
+		}
+
+		/* parse out some important return info */
+		if (first != 0) {
+			ff_dir_handle = WVAL(req->rq_parm, 0);
+			ff_searchcount = WVAL(req->rq_parm, 2);
+			ff_eos = WVAL(req->rq_parm, 4);
+			ff_lastname = WVAL(req->rq_parm, 8);
+		} else {
+			ff_searchcount = WVAL(req->rq_parm, 0);
+			ff_eos = WVAL(req->rq_parm, 2);
+			ff_lastname = WVAL(req->rq_parm, 6);
+		}
+
+		if (ff_searchcount == 0)
+			break;
+
+		/* Now we are ready to parse smb directory entries. */
+
+		/* point to the data bytes */
+		p = req->rq_data;
+		for (i = 0; i < ff_searchcount; i++) {
+			/* make sure we stay within the buffer */
+			if (p >= req->rq_data + req->rq_ldata) {
+				printk(KERN_ERR "smb_proc_readdir_long: "
+				       "dirent pointer outside buffer! "
+				       "%p  %d@%p\n",
+				       p, req->rq_ldata, req->rq_data);
+				result = -EIO; /* always a comm. error? */
+				goto out_free;
+			}
+
+			p = smb_decode_long_dirent(server, p, info_level,
+						   &qname, &fattr, name_buf);
+
+			/* ignore . and .. from the server */
+			if (entries_seen == 2 && qname.name[0] == '.') {
+				if (qname.len == 1)
+					continue;
+				if (qname.name[1] == '.' && qname.len == 2)
+					continue;
+			}
+
+			if (!smb_fill_cache(filp, dirent, filldir, ctl, 
+					    &qname, &fattr))
+				;	/* stop reading? */
+			entries_seen++;
+		}
+
+		VERBOSE("received %d entries, eos=%d\n", ff_searchcount,ff_eos);
+
+		/*
+		 * We might need the lastname for continuations.
+		 *
+		 * Note that some servers (win95?) point to the filename and
+		 * others (NT4, Samba using NT1) to the dir entry. We assume
+		 * here that those who do not point to a filename do not need
+		 * this info to continue the listing.
+		 *
+		 * OS/2 needs this and talks infolevel 1.
+		 * NetApps want lastname with infolevel 260.
+		 * win2k want lastname with infolevel 260, and points to
+		 *       the record not to the name.
+		 * Samba+CifsUnixExt doesn't need lastname.
+		 *
+		 * Both are happy if we return the data they point to. So we do.
+		 * (FIXME: above is not true with win2k)
+		 */
+		mask_len = 0;
+		if (info_level != SMB_FIND_FILE_UNIX &&
+		    ff_lastname > 0 && ff_lastname < req->rq_ldata) {
+			lastname = req->rq_data + ff_lastname;
+
+			switch (info_level) {
+			case 260:
+				mask_len = req->rq_ldata - ff_lastname;
+				break;
+			case 1:
+				/* lastname points to a length byte */
+				mask_len = *lastname++;
+				if (ff_lastname + 1 + mask_len > req->rq_ldata)
+					mask_len = req->rq_ldata - ff_lastname - 1;
+				break;
+			}
+
+			/*
+			 * Update the mask string for the next message.
+			 */
+			if (mask_len > 255)
+				mask_len = 255;
+			if (mask_len)
+				strncpy(mask, lastname, mask_len);
+		}
+		mask_len = strnlen(mask, mask_len);
+		VERBOSE("new mask, len=%d@%d of %d, mask=%.*s\n",
+			mask_len, ff_lastname, req->rq_ldata, mask_len, mask);
+
+		first = 0;
+		loop_count = 0;
+	}
+
+out_free:
+	smb_rput(req);
+out_name:
+	kfree(name_buf);
+out:
+	unlock_kernel();
+	return result;
+}
+
+/*
+ * This version uses the trans2 TRANSACT2_FINDFIRST message 
+ * to get the attribute data.
+ *
+ * Bugs Noted:
+ */
+static int
+smb_proc_getattr_ff(struct smb_sb_info *server, struct dentry *dentry,
+			struct smb_fattr *fattr)
+{
+	char *param, *mask;
+	__u16 date, time;
+	int mask_len, result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+	mask = param + 12;
+
+	mask_len = smb_encode_path(server, mask, SMB_MAXPATHLEN+1, dentry,NULL);
+	if (mask_len < 0) {
+		result = mask_len;
+		goto out_free;
+	}
+	VERBOSE("name=%s, len=%d\n", mask, mask_len);
+	WSET(param, 0, aSYSTEM | aHIDDEN | aDIR);
+	WSET(param, 2, 1);	/* max count */
+	WSET(param, 4, 1);	/* close after this call */
+	WSET(param, 6, 1);	/* info_level */
+	DSET(param, 8, 0);
+
+	req->rq_trans2_command = TRANSACT2_FINDFIRST;
+	req->rq_ldata = 0;
+	req->rq_data  = NULL;
+	req->rq_lparm = 12 + mask_len;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+	if (req->rq_rcls != 0) {
+		result = smb_errno(req);
+#ifdef SMBFS_PARANOIA
+		if (result != -ENOENT)
+			PARANOIA("error for %s, rcls=%d, err=%d\n",
+				 mask, req->rq_rcls, req->rq_err);
+#endif
+		goto out_free;
+	}
+	/* Make sure we got enough data ... */
+	result = -EINVAL;
+	if (req->rq_ldata < 22 || WVAL(req->rq_parm, 2) != 1) {
+		PARANOIA("bad result for %s, len=%d, count=%d\n",
+			 mask, req->rq_ldata, WVAL(req->rq_parm, 2));
+		goto out_free;
+	}
+
+	/*
+	 * Decode the response into the fattr ...
+	 */
+	date = WVAL(req->rq_data, 0);
+	time = WVAL(req->rq_data, 2);
+	fattr->f_ctime.tv_sec = date_dos2unix(server, date, time);
+	fattr->f_ctime.tv_nsec = 0;
+
+	date = WVAL(req->rq_data, 4);
+	time = WVAL(req->rq_data, 6);
+	fattr->f_atime.tv_sec = date_dos2unix(server, date, time);
+	fattr->f_atime.tv_nsec = 0;
+
+	date = WVAL(req->rq_data, 8);
+	time = WVAL(req->rq_data, 10);
+	fattr->f_mtime.tv_sec = date_dos2unix(server, date, time);
+	fattr->f_mtime.tv_nsec = 0;
+	VERBOSE("name=%s, date=%x, time=%x, mtime=%ld\n",
+		mask, date, time, fattr->f_mtime);
+	fattr->f_size = DVAL(req->rq_data, 12);
+	/* ULONG allocation size */
+	fattr->attr = WVAL(req->rq_data, 20);
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_getattr_core(struct smb_sb_info *server, struct dentry *dir,
+		      struct smb_fattr *fattr)
+{
+	int result;
+	char *p;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	p = smb_setup_header(req, SMBgetatr, 0, 0);
+	result = smb_simple_encode_path(req, &p, dir, NULL);
+	if (result < 0)
+ 		goto out_free;
+	smb_setup_bcc(req, p);
+
+	if ((result = smb_request_ok(req, SMBgetatr, 10, 0)) < 0)
+		goto out_free;
+	fattr->attr    = WVAL(req->rq_header, smb_vwv0);
+	fattr->f_mtime.tv_sec = local2utc(server, DVAL(req->rq_header, smb_vwv1));
+	fattr->f_mtime.tv_nsec = 0;
+	fattr->f_size  = DVAL(req->rq_header, smb_vwv3);
+	fattr->f_ctime = fattr->f_mtime; 
+	fattr->f_atime = fattr->f_mtime; 
+#ifdef SMBFS_DEBUG_TIMESTAMP
+	printk("getattr_core: %s/%s, mtime=%ld\n",
+	       DENTRY_PATH(dir), fattr->f_mtime);
+#endif
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Bugs Noted:
+ * (1) Win 95 swaps the date and time fields in the standard info level.
+ */
+static int
+smb_proc_getattr_trans2(struct smb_sb_info *server, struct dentry *dir,
+			struct smb_request *req, int infolevel)
+{
+	char *p, *param;
+	int result;
+
+	param = req->rq_buffer;
+	WSET(param, 0, infolevel);
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, dir, NULL);
+	if (result < 0)
+		goto out;
+	p = param + 6 + result;
+
+	req->rq_trans2_command = TRANSACT2_QPATHINFO;
+	req->rq_ldata = 0;
+	req->rq_data  = NULL;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out;
+	if (req->rq_rcls != 0) {
+		VERBOSE("for %s: result=%d, rcls=%d, err=%d\n",
+			&param[6], result, req->rq_rcls, req->rq_err);
+		result = smb_errno(req);
+		goto out;
+	}
+	result = -ENOENT;
+	if (req->rq_ldata < 22) {
+		PARANOIA("not enough data for %s, len=%d\n",
+			 &param[6], req->rq_ldata);
+		goto out;
+	}
+
+	result = 0;
+out:
+	return result;
+}
+
+static int
+smb_proc_getattr_trans2_std(struct smb_sb_info *server, struct dentry *dir,
+			    struct smb_fattr *attr)
+{
+	u16 date, time;
+	int off_date = 0, off_time = 2;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	result = smb_proc_getattr_trans2(server, dir, req, SMB_INFO_STANDARD);
+	if (result < 0)
+		goto out_free;
+
+	/*
+	 * Kludge alert: Win 95 swaps the date and time field,
+	 * contrary to the CIFS docs and Win NT practice.
+	 */
+	if (server->mnt->flags & SMB_MOUNT_WIN95) {
+		off_date = 2;
+		off_time = 0;
+	}
+	date = WVAL(req->rq_data, off_date);
+	time = WVAL(req->rq_data, off_time);
+	attr->f_ctime.tv_sec = date_dos2unix(server, date, time);
+	attr->f_ctime.tv_nsec = 0;
+
+	date = WVAL(req->rq_data, 4 + off_date);
+	time = WVAL(req->rq_data, 4 + off_time);
+	attr->f_atime.tv_sec = date_dos2unix(server, date, time);
+	attr->f_atime.tv_nsec = 0;
+
+	date = WVAL(req->rq_data, 8 + off_date);
+	time = WVAL(req->rq_data, 8 + off_time);
+	attr->f_mtime.tv_sec = date_dos2unix(server, date, time);
+	attr->f_mtime.tv_nsec = 0;
+#ifdef SMBFS_DEBUG_TIMESTAMP
+	printk(KERN_DEBUG "getattr_trans2: %s/%s, date=%x, time=%x, mtime=%ld\n",
+	       DENTRY_PATH(dir), date, time, attr->f_mtime);
+#endif
+	attr->f_size = DVAL(req->rq_data, 12);
+	attr->attr = WVAL(req->rq_data, 20);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_getattr_trans2_all(struct smb_sb_info *server, struct dentry *dir,
+			    struct smb_fattr *attr)
+{
+	struct smb_request *req;
+	int result;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	result = smb_proc_getattr_trans2(server, dir, req,
+					 SMB_QUERY_FILE_ALL_INFO);
+	if (result < 0)
+		goto out_free;
+
+	attr->f_ctime = smb_ntutc2unixutc(LVAL(req->rq_data, 0));
+	attr->f_atime = smb_ntutc2unixutc(LVAL(req->rq_data, 8));
+	attr->f_mtime = smb_ntutc2unixutc(LVAL(req->rq_data, 16));
+	/* change (24) */
+	attr->attr = WVAL(req->rq_data, 32);
+	/* pad? (34) */
+	/* allocated size (40) */
+	attr->f_size = LVAL(req->rq_data, 48);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_getattr_unix(struct smb_sb_info *server, struct dentry *dir,
+		      struct smb_fattr *attr)
+{
+	struct smb_request *req;
+	int result;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	result = smb_proc_getattr_trans2(server, dir, req,
+					 SMB_QUERY_FILE_UNIX_BASIC);
+	if (result < 0)
+		goto out_free;
+
+	smb_decode_unix_basic(attr, server, req->rq_data);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_getattr_95(struct smb_sb_info *server, struct dentry *dir,
+		    struct smb_fattr *attr)
+{
+	struct inode *inode = dir->d_inode;
+	int result;
+
+	/* FIXME: why not use the "all" version? */
+	result = smb_proc_getattr_trans2_std(server, dir, attr);
+	if (result < 0)
+		goto out;
+
+	/*
+	 * None of the getattr versions here can make win9x return the right
+	 * filesize if there are changes made to an open file.
+	 * A seek-to-end does return the right size, but we only need to do
+	 * that on files we have written.
+	 */
+	if (inode && SMB_I(inode)->flags & SMB_F_LOCALWRITE &&
+	    smb_is_open(inode))
+	{
+		__u16 fileid = SMB_I(inode)->fileid;
+		attr->f_size = smb_proc_seek(server, fileid, 2, 0);
+	}
+
+out:
+	return result;
+}
+
+static int
+smb_proc_ops_wait(struct smb_sb_info *server)
+{
+	int result;
+
+	result = wait_event_interruptible_timeout(server->conn_wq,
+				server->conn_complete, 30*HZ);
+
+	if (!result || signal_pending(current))
+		return -EIO;
+
+	return 0;
+}
+
+static int
+smb_proc_getattr_null(struct smb_sb_info *server, struct dentry *dir,
+			  struct smb_fattr *fattr)
+{
+	int result;
+
+	if (smb_proc_ops_wait(server) < 0)
+		return -EIO;
+
+	smb_init_dirent(server, fattr);
+	result = server->ops->getattr(server, dir, fattr);
+	smb_finish_dirent(server, fattr);
+
+	return result;
+}
+
+static int
+smb_proc_readdir_null(struct file *filp, void *dirent, filldir_t filldir,
+		      struct smb_cache_control *ctl)
+{
+	struct smb_sb_info *server = server_from_dentry(filp->f_dentry);
+
+	if (smb_proc_ops_wait(server) < 0)
+		return -EIO;
+
+	return server->ops->readdir(filp, dirent, filldir, ctl);
+}
+
+int
+smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr)
+{
+	struct smb_sb_info *server = server_from_dentry(dir);
+	int result;
+
+	smb_init_dirent(server, fattr);
+	result = server->ops->getattr(server, dir, fattr);
+	smb_finish_dirent(server, fattr);
+
+	return result;
+}
+
+
+/*
+ * Because of bugs in the core protocol, we use this only to set
+ * attributes. See smb_proc_settime() below for timestamp handling.
+ *
+ * Bugs Noted:
+ * (1) If mtime is non-zero, both Win 3.1 and Win 95 fail
+ * with an undocumented error (ERRDOS code 50). Setting
+ * mtime to 0 allows the attributes to be set.
+ * (2) The extra parameters following the name string aren't
+ * in the CIFS docs, but seem to be necessary for operation.
+ */
+static int
+smb_proc_setattr_core(struct smb_sb_info *server, struct dentry *dentry,
+		      __u16 attr)
+{
+	char *p;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+
+	p = smb_setup_header(req, SMBsetatr, 8, 0);
+	WSET(req->rq_header, smb_vwv0, attr);
+	DSET(req->rq_header, smb_vwv1, 0); /* mtime */
+	WSET(req->rq_header, smb_vwv3, 0); /* reserved values */
+	WSET(req->rq_header, smb_vwv4, 0);
+	WSET(req->rq_header, smb_vwv5, 0);
+	WSET(req->rq_header, smb_vwv6, 0);
+	WSET(req->rq_header, smb_vwv7, 0);
+	result = smb_simple_encode_path(req, &p, dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	if (p + 2 > (char *)req->rq_buffer + req->rq_bufsize) {
+		result = -ENAMETOOLONG;
+		goto out_free;
+	}
+	*p++ = 4;
+	*p++ = 0;
+	smb_setup_bcc(req, p);
+
+	result = smb_request_ok(req, SMBsetatr, 0, 0);
+	if (result < 0)
+		goto out_free;
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Because of bugs in the trans2 setattr messages, we must set
+ * attributes and timestamps separately. The core SMBsetatr
+ * message seems to be the only reliable way to set attributes.
+ */
+int
+smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr)
+{
+	struct smb_sb_info *server = server_from_dentry(dir);
+	int result;
+
+	VERBOSE("setting %s/%s, open=%d\n", 
+		DENTRY_PATH(dir), smb_is_open(dir->d_inode));
+	result = smb_proc_setattr_core(server, dir, fattr->attr);
+	return result;
+}
+
+/*
+ * Sets the timestamps for an file open with write permissions.
+ */
+static int
+smb_proc_setattr_ext(struct smb_sb_info *server,
+		      struct inode *inode, struct smb_fattr *fattr)
+{
+	__u16 date, time;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBsetattrE, 7, 0);
+	WSET(req->rq_header, smb_vwv0, SMB_I(inode)->fileid);
+	/* We don't change the creation time */
+	WSET(req->rq_header, smb_vwv1, 0);
+	WSET(req->rq_header, smb_vwv2, 0);
+	date_unix2dos(server, fattr->f_atime.tv_sec, &date, &time);
+	WSET(req->rq_header, smb_vwv3, date);
+	WSET(req->rq_header, smb_vwv4, time);
+	date_unix2dos(server, fattr->f_mtime.tv_sec, &date, &time);
+	WSET(req->rq_header, smb_vwv5, date);
+	WSET(req->rq_header, smb_vwv6, time);
+#ifdef SMBFS_DEBUG_TIMESTAMP
+	printk(KERN_DEBUG "smb_proc_setattr_ext: date=%d, time=%d, mtime=%ld\n",
+	       date, time, fattr->f_mtime);
+#endif
+
+	req->rq_flags |= SMB_REQ_NORETRY;
+	result = smb_request_ok(req, SMBsetattrE, 0, 0);
+	if (result < 0)
+		goto out_free;
+	result = 0;
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Bugs Noted:
+ * (1) The TRANSACT2_SETPATHINFO message under Win NT 4.0 doesn't
+ * set the file's attribute flags.
+ */
+static int
+smb_proc_setattr_trans2(struct smb_sb_info *server,
+			struct dentry *dir, struct smb_fattr *fattr)
+{
+	__u16 date, time;
+	char *p, *param;
+	int result;
+	char data[26];
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+
+	WSET(param, 0, 1);	/* Info level SMB_INFO_STANDARD */
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, dir, NULL);
+	if (result < 0)
+		goto out_free;
+	p = param + 6 + result;
+
+	WSET(data, 0, 0); /* creation time */
+	WSET(data, 2, 0);
+	date_unix2dos(server, fattr->f_atime.tv_sec, &date, &time);
+	WSET(data, 4, date);
+	WSET(data, 6, time);
+	date_unix2dos(server, fattr->f_mtime.tv_sec, &date, &time);
+	WSET(data, 8, date);
+	WSET(data, 10, time);
+#ifdef SMBFS_DEBUG_TIMESTAMP
+	printk(KERN_DEBUG "setattr_trans2: %s/%s, date=%x, time=%x, mtime=%ld\n", 
+	       DENTRY_PATH(dir), date, time, fattr->f_mtime);
+#endif
+	DSET(data, 12, 0); /* size */
+	DSET(data, 16, 0); /* blksize */
+	WSET(data, 20, 0); /* attr */
+	DSET(data, 22, 0); /* ULONG EA size */
+
+	req->rq_trans2_command = TRANSACT2_SETPATHINFO;
+	req->rq_ldata = 26;
+	req->rq_data  = data;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+	result = 0;
+	if (req->rq_rcls != 0)
+		result = smb_errno(req);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * ATTR_MODE      0x001
+ * ATTR_UID       0x002
+ * ATTR_GID       0x004
+ * ATTR_SIZE      0x008
+ * ATTR_ATIME     0x010
+ * ATTR_MTIME     0x020
+ * ATTR_CTIME     0x040
+ * ATTR_ATIME_SET 0x080
+ * ATTR_MTIME_SET 0x100
+ * ATTR_FORCE     0x200	
+ * ATTR_ATTR_FLAG 0x400
+ *
+ * major/minor should only be set by mknod.
+ */
+int
+smb_proc_setattr_unix(struct dentry *d, struct iattr *attr,
+		      unsigned int major, unsigned int minor)
+{
+	struct smb_sb_info *server = server_from_dentry(d);
+	u64 nttime;
+	char *p, *param;
+	int result;
+	char data[100];
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+
+	DEBUG1("valid flags = 0x%04x\n", attr->ia_valid);
+
+	WSET(param, 0, SMB_SET_FILE_UNIX_BASIC);
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, d, NULL);
+	if (result < 0)
+		goto out_free;
+	p = param + 6 + result;
+
+	/* 0 L file size in bytes */
+	/* 8 L file size on disk in bytes (block count) */
+	/* 40 L uid */
+	/* 48 L gid */
+	/* 56 W file type enum */
+	/* 60 L devmajor */
+	/* 68 L devminor */
+	/* 76 L unique ID (inode) */
+	/* 84 L permissions */
+	/* 92 L link count */
+	LSET(data, 0, SMB_SIZE_NO_CHANGE);
+	LSET(data, 8, SMB_SIZE_NO_CHANGE);
+	LSET(data, 16, SMB_TIME_NO_CHANGE);
+	LSET(data, 24, SMB_TIME_NO_CHANGE);
+	LSET(data, 32, SMB_TIME_NO_CHANGE);
+	LSET(data, 40, SMB_UID_NO_CHANGE);
+	LSET(data, 48, SMB_GID_NO_CHANGE);
+	LSET(data, 56, smb_filetype_from_mode(attr->ia_mode));
+	LSET(data, 60, major);
+	LSET(data, 68, minor);
+	LSET(data, 76, 0);
+	LSET(data, 84, SMB_MODE_NO_CHANGE);
+	LSET(data, 92, 0);
+
+	if (attr->ia_valid & ATTR_SIZE) {
+		LSET(data, 0, attr->ia_size);
+		LSET(data, 8, 0); /* can't set anyway */
+	}
+
+	/*
+	 * FIXME: check the conversion function it the correct one
+	 *
+	 * we can't set ctime but we might as well pass this to the server
+	 * and let it ignore it.
+	 */
+	if (attr->ia_valid & ATTR_CTIME) {
+		nttime = smb_unixutc2ntutc(attr->ia_ctime);
+		LSET(data, 16, nttime);
+	}
+	if (attr->ia_valid & ATTR_ATIME) {
+		nttime = smb_unixutc2ntutc(attr->ia_atime);
+		LSET(data, 24, nttime);
+	}
+	if (attr->ia_valid & ATTR_MTIME) {
+		nttime = smb_unixutc2ntutc(attr->ia_mtime);
+		LSET(data, 32, nttime);
+	}
+	
+	if (attr->ia_valid & ATTR_UID) {
+		LSET(data, 40, attr->ia_uid);
+	}
+	if (attr->ia_valid & ATTR_GID) {
+		LSET(data, 48, attr->ia_gid); 
+	}
+	
+	if (attr->ia_valid & ATTR_MODE) {
+		LSET(data, 84, attr->ia_mode);
+	}
+
+	req->rq_trans2_command = TRANSACT2_SETPATHINFO;
+	req->rq_ldata = 100;
+	req->rq_data  = data;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+
+/*
+ * Set the modify and access timestamps for a file.
+ *
+ * Incredibly enough, in all of SMB there is no message to allow
+ * setting both attributes and timestamps at once. 
+ *
+ * Bugs Noted:
+ * (1) Win 95 doesn't support the TRANSACT2_SETFILEINFO message 
+ * with info level 1 (INFO_STANDARD).
+ * (2) Win 95 seems not to support setting directory timestamps.
+ * (3) Under the core protocol apparently the only way to set the
+ * timestamp is to open and close the file.
+ */
+int
+smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr)
+{
+	struct smb_sb_info *server = server_from_dentry(dentry);
+	struct inode *inode = dentry->d_inode;
+	int result;
+
+	VERBOSE("setting %s/%s, open=%d\n",
+		DENTRY_PATH(dentry), smb_is_open(inode));
+
+	/* setting the time on a Win95 server fails (tridge) */
+	if (server->opt.protocol >= SMB_PROTOCOL_LANMAN2 && 
+	    !(server->mnt->flags & SMB_MOUNT_WIN95)) {
+		if (smb_is_open(inode) && SMB_I(inode)->access != SMB_O_RDONLY)
+			result = smb_proc_setattr_ext(server, inode, fattr);
+		else
+			result = smb_proc_setattr_trans2(server, dentry, fattr);
+	} else {
+		/*
+		 * Fail silently on directories ... timestamp can't be set?
+		 */
+		result = 0;
+		if (S_ISREG(inode->i_mode)) {
+			/*
+			 * Set the mtime by opening and closing the file.
+			 * Note that the file is opened read-only, but this
+			 * still allows us to set the date (tridge)
+			 */
+			result = -EACCES;
+			if (!smb_is_open(inode))
+				smb_proc_open(server, dentry, SMB_O_RDONLY);
+			if (smb_is_open(inode)) {
+				inode->i_mtime = fattr->f_mtime;
+				result = smb_proc_close_inode(server, inode);
+			}
+		}
+	}
+
+	return result;
+}
+
+int
+smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr)
+{
+	struct smb_sb_info *server = SMB_SB(sb);
+	int result;
+	char *p;
+	long unit;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 0)))
+		goto out;
+
+	smb_setup_header(req, SMBdskattr, 0, 0);
+	if ((result = smb_request_ok(req, SMBdskattr, 5, 0)) < 0)
+		goto out_free;
+	p = SMB_VWV(req->rq_header);
+	unit = (WVAL(p, 2) * WVAL(p, 4)) >> SMB_ST_BLKSHIFT;
+	attr->f_blocks = WVAL(p, 0) * unit;
+	attr->f_bsize  = SMB_ST_BLKSIZE;
+	attr->f_bavail = attr->f_bfree = WVAL(p, 6) * unit;
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+int
+smb_proc_read_link(struct smb_sb_info *server, struct dentry *d,
+		   char *buffer, int len)
+{
+	char *p, *param;
+	int result;
+	struct smb_request *req;
+
+	DEBUG1("readlink of %s/%s\n", DENTRY_PATH(d));
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+
+	WSET(param, 0, SMB_QUERY_FILE_UNIX_LINK);
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, d, NULL);
+	if (result < 0)
+		goto out_free;
+	p = param + 6 + result;
+
+	req->rq_trans2_command = TRANSACT2_QPATHINFO;
+	req->rq_ldata = 0;
+	req->rq_data  = NULL;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+	DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
+		&param[6], result, req->rq_rcls, req->rq_err);
+
+	/* copy data up to the \0 or buffer length */
+	result = len;
+	if (req->rq_ldata < len)
+		result = req->rq_ldata;
+	strncpy(buffer, req->rq_data, result);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+
+/*
+ * Create a symlink object called dentry which points to oldpath.
+ * Samba does not permit dangling links but returns a suitable error message.
+ */
+int
+smb_proc_symlink(struct smb_sb_info *server, struct dentry *d,
+		 const char *oldpath)
+{
+	char *p, *param;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+
+	WSET(param, 0, SMB_SET_FILE_UNIX_LINK);
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param + 6, SMB_MAXPATHLEN+1, d, NULL);
+	if (result < 0)
+		goto out_free;
+	p = param + 6 + result;
+
+	req->rq_trans2_command = TRANSACT2_SETPATHINFO;
+	req->rq_ldata = strlen(oldpath) + 1;
+	req->rq_data  = (char *) oldpath;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+
+	DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
+		&param[6], result, req->rq_rcls, req->rq_err);
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+/*
+ * Create a hard link object called new_dentry which points to dentry.
+ */
+int
+smb_proc_link(struct smb_sb_info *server, struct dentry *dentry,
+	      struct dentry *new_dentry)
+{
+	char *p, *param;
+	int result;
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, PAGE_SIZE)))
+		goto out;
+	param = req->rq_buffer;
+
+	WSET(param, 0, SMB_SET_FILE_UNIX_HLINK);
+	DSET(param, 2, 0);
+	result = smb_encode_path(server, param + 6, SMB_MAXPATHLEN+1,
+				 new_dentry, NULL);
+	if (result < 0)
+		goto out_free;
+	p = param + 6 + result;
+
+	/* Grr, pointless separation of parameters and data ... */
+	req->rq_data = p;
+	req->rq_ldata = smb_encode_path(server, p, SMB_MAXPATHLEN+1,
+					dentry, NULL);
+
+	req->rq_trans2_command = TRANSACT2_SETPATHINFO;
+	req->rq_lparm = p - param;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+
+	DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
+	       &param[6], result, req->rq_rcls, req->rq_err);
+	result = 0;
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+static int
+smb_proc_query_cifsunix(struct smb_sb_info *server)
+{
+	int result;
+	int major, minor;
+	u64 caps;
+	char param[2];
+	struct smb_request *req;
+
+	result = -ENOMEM;
+	if (! (req = smb_alloc_request(server, 100)))
+		goto out;
+
+	WSET(param, 0, SMB_QUERY_CIFS_UNIX_INFO);
+
+	req->rq_trans2_command = TRANSACT2_QFSINFO;
+	req->rq_ldata = 0;
+	req->rq_data  = NULL;
+	req->rq_lparm = 2;
+	req->rq_parm  = param;
+	req->rq_flags = 0;
+	result = smb_add_request(req);
+	if (result < 0)
+		goto out_free;
+
+	if (req->rq_ldata < 12) {
+		PARANOIA("Not enough data\n");
+		goto out_free;
+	}
+	major = WVAL(req->rq_data, 0);
+	minor = WVAL(req->rq_data, 2);
+
+	DEBUG1("Server implements CIFS Extensions for UNIX systems v%d.%d\n",
+	       major, minor);
+	/* FIXME: verify that we are ok with this major/minor? */
+
+	caps = LVAL(req->rq_data, 4);
+	DEBUG1("Server capabilities 0x%016llx\n", caps);
+
+out_free:
+	smb_rput(req);
+out:
+	return result;
+}
+
+
+static void
+install_ops(struct smb_ops *dst, struct smb_ops *src)
+{
+	memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
+}
+
+/* < LANMAN2 */
+static struct smb_ops smb_ops_core =
+{
+	.read		= smb_proc_read,
+	.write		= smb_proc_write,
+	.readdir	= smb_proc_readdir_short,
+	.getattr	= smb_proc_getattr_core,
+	.truncate	= smb_proc_trunc32,
+};
+
+/* LANMAN2, OS/2, others? */
+static struct smb_ops smb_ops_os2 =
+{
+	.read		= smb_proc_read,
+	.write		= smb_proc_write,
+	.readdir	= smb_proc_readdir_long,
+	.getattr	= smb_proc_getattr_trans2_std,
+	.truncate	= smb_proc_trunc32,
+};
+
+/* Win95, and possibly some NetApp versions too */
+static struct smb_ops smb_ops_win95 =
+{
+	.read		= smb_proc_read,    /* does not support 12word readX */
+	.write		= smb_proc_write,
+	.readdir	= smb_proc_readdir_long,
+	.getattr	= smb_proc_getattr_95,
+	.truncate	= smb_proc_trunc95,
+};
+
+/* Samba, NT4 and NT5 */
+static struct smb_ops smb_ops_winNT =
+{
+	.read		= smb_proc_readX,
+	.write		= smb_proc_writeX,
+	.readdir	= smb_proc_readdir_long,
+	.getattr	= smb_proc_getattr_trans2_all,
+	.truncate	= smb_proc_trunc64,
+};
+
+/* Samba w/ unix extensions. Others? */
+static struct smb_ops smb_ops_unix =
+{
+	.read		= smb_proc_readX,
+	.write		= smb_proc_writeX,
+	.readdir	= smb_proc_readdir_long,
+	.getattr	= smb_proc_getattr_unix,
+	/* FIXME: core/ext/time setattr needs to be cleaned up! */
+	/* .setattr	= smb_proc_setattr_unix, */
+	.truncate	= smb_proc_trunc64,
+};
+
+/* Place holder until real ops are in place */
+static struct smb_ops smb_ops_null =
+{
+	.readdir	= smb_proc_readdir_null,
+	.getattr	= smb_proc_getattr_null,
+};
+
+void smb_install_null_ops(struct smb_ops *ops)
+{
+	install_ops(ops, &smb_ops_null);
+}
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
new file mode 100644
index 0000000..e866ec8
--- /dev/null
+++ b/fs/smbfs/proto.h
@@ -0,0 +1,87 @@
+/*
+ *  Autogenerated with cproto on:  Sat Sep 13 17:18:51 CEST 2003
+ */
+
+struct smb_request;
+struct sock;
+struct statfs;
+
+/* proc.c */
+extern int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp);
+extern __u32 smb_len(__u8 *p);
+extern int smb_get_rsize(struct smb_sb_info *server);
+extern int smb_get_wsize(struct smb_sb_info *server);
+extern int smb_errno(struct smb_request *req);
+extern int smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt);
+extern __u8 *smb_setup_header(struct smb_request *req, __u8 command, __u16 wct, __u16 bcc);
+extern int smb_open(struct dentry *dentry, int wish);
+extern int smb_close(struct inode *ino);
+extern int smb_close_fileid(struct dentry *dentry, __u16 fileid);
+extern int smb_proc_create(struct dentry *dentry, __u16 attr, time_t ctime, __u16 *fileid);
+extern int smb_proc_mv(struct dentry *old_dentry, struct dentry *new_dentry);
+extern int smb_proc_mkdir(struct dentry *dentry);
+extern int smb_proc_rmdir(struct dentry *dentry);
+extern int smb_proc_unlink(struct dentry *dentry);
+extern int smb_proc_flush(struct smb_sb_info *server, __u16 fileid);
+extern void smb_init_root_dirent(struct smb_sb_info *server, struct smb_fattr *fattr,
+				 struct super_block *sb);
+extern int smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr);
+extern int smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr);
+extern int smb_proc_setattr_unix(struct dentry *d, struct iattr *attr, unsigned int major, unsigned int minor);
+extern int smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr);
+extern int smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr);
+extern int smb_proc_read_link(struct smb_sb_info *server, struct dentry *d, char *buffer, int len);
+extern int smb_proc_symlink(struct smb_sb_info *server, struct dentry *d, const char *oldpath);
+extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry);
+extern void smb_install_null_ops(struct smb_ops *ops);
+/* dir.c */
+extern struct file_operations smb_dir_operations;
+extern struct inode_operations smb_dir_inode_operations;
+extern struct inode_operations smb_dir_inode_operations_unix;
+extern void smb_new_dentry(struct dentry *dentry);
+extern void smb_renew_times(struct dentry *dentry);
+/* cache.c */
+extern void smb_invalid_dir_cache(struct inode *dir);
+extern void smb_invalidate_dircache_entries(struct dentry *parent);
+extern struct dentry *smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos);
+extern int smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct smb_cache_control *ctrl, struct qstr *qname, struct smb_fattr *entry);
+/* sock.c */
+extern void smb_data_ready(struct sock *sk, int len);
+extern int smb_valid_socket(struct inode *inode);
+extern void smb_close_socket(struct smb_sb_info *server);
+extern int smb_recv_available(struct smb_sb_info *server);
+extern int smb_receive_header(struct smb_sb_info *server);
+extern int smb_receive_drop(struct smb_sb_info *server);
+extern int smb_receive(struct smb_sb_info *server, struct smb_request *req);
+extern int smb_send_request(struct smb_request *req);
+/* inode.c */
+extern struct inode *smb_iget(struct super_block *sb, struct smb_fattr *fattr);
+extern void smb_get_inode_attr(struct inode *inode, struct smb_fattr *fattr);
+extern void smb_set_inode_attr(struct inode *inode, struct smb_fattr *fattr);
+extern void smb_invalidate_inodes(struct smb_sb_info *server);
+extern int smb_revalidate_inode(struct dentry *dentry);
+extern int smb_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
+extern int smb_notify_change(struct dentry *dentry, struct iattr *attr);
+/* file.c */
+extern struct address_space_operations smb_file_aops;
+extern struct file_operations smb_file_operations;
+extern struct inode_operations smb_file_inode_operations;
+/* ioctl.c */
+extern int smb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+/* smbiod.c */
+extern void smbiod_wake_up(void);
+extern int smbiod_register_server(struct smb_sb_info *server);
+extern void smbiod_unregister_server(struct smb_sb_info *server);
+extern void smbiod_flush(struct smb_sb_info *server);
+extern int smbiod_retry(struct smb_sb_info *server);
+/* request.c */
+extern int smb_init_request_cache(void);
+extern void smb_destroy_request_cache(void);
+extern struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize);
+extern void smb_rput(struct smb_request *req);
+extern int smb_add_request(struct smb_request *req);
+extern int smb_request_send_server(struct smb_sb_info *server);
+extern int smb_request_recv(struct smb_sb_info *server);
+/* symlink.c */
+extern int smb_symlink(struct inode *inode, struct dentry *dentry, const char *oldname);
+extern struct inode_operations smb_link_inode_operations;
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
new file mode 100644
index 0000000..2d85dd7
--- /dev/null
+++ b/fs/smbfs/request.c
@@ -0,0 +1,823 @@
+/*
+ *  request.c
+ *
+ *  Copyright (C) 2001 by Urban Widmark
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/net.h>
+
+#include <linux/smb_fs.h>
+#include <linux/smbno.h>
+#include <linux/smb_mount.h>
+
+#include "smb_debug.h"
+#include "request.h"
+#include "proto.h"
+
+/* #define SMB_SLAB_DEBUG	(SLAB_RED_ZONE | SLAB_POISON) */
+#define SMB_SLAB_DEBUG	0
+
+#define ROUND_UP(x) (((x)+3) & ~3)
+
+/* cache for request structures */
+static kmem_cache_t *req_cachep;
+
+static int smb_request_send_req(struct smb_request *req);
+
+/*
+  /proc/slabinfo:
+  name, active, num, objsize, active_slabs, num_slaps, #pages
+*/
+
+
+int smb_init_request_cache(void)
+{
+	req_cachep = kmem_cache_create("smb_request",
+				       sizeof(struct smb_request), 0,
+				       SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
+				       NULL, NULL);
+	if (req_cachep == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void smb_destroy_request_cache(void)
+{
+	if (kmem_cache_destroy(req_cachep))
+		printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
+}
+
+/*
+ * Allocate and initialise a request structure
+ */
+static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
+						int bufsize)
+{
+	struct smb_request *req;
+	unsigned char *buf = NULL;
+
+	req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
+	VERBOSE("allocating request: %p\n", req);
+	if (!req)
+		goto out;
+
+	if (bufsize > 0) {
+		buf = smb_kmalloc(bufsize, GFP_NOFS);
+		if (!buf) {
+			kmem_cache_free(req_cachep, req);
+			return NULL;
+		}
+	}
+
+	memset(req, 0, sizeof(struct smb_request));
+	req->rq_buffer = buf;
+	req->rq_bufsize = bufsize;
+	req->rq_server = server;
+	init_waitqueue_head(&req->rq_wait);
+	INIT_LIST_HEAD(&req->rq_queue);
+	atomic_set(&req->rq_count, 1);
+
+out:
+	return req;
+}
+
+struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
+{
+	struct smb_request *req = NULL;
+
+	for (;;) {
+		atomic_inc(&server->nr_requests);
+		if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {
+			req = smb_do_alloc_request(server, bufsize);
+			if (req != NULL)
+				break;
+		}
+
+#if 0
+		/*
+		 * Try to free up at least one request in order to stay
+		 * below the hard limit
+		 */
+                if (nfs_try_to_free_pages(server))
+			continue;
+
+		if (signalled() && (server->flags & NFS_MOUNT_INTR))
+			return ERR_PTR(-ERESTARTSYS);
+		current->policy = SCHED_YIELD;
+		schedule();
+#else
+		/* FIXME: we want something like nfs does above, but that
+		   requires changes to all callers and can wait. */
+		break;
+#endif
+	}
+	return req;
+}
+
+static void smb_free_request(struct smb_request *req)
+{
+	atomic_dec(&req->rq_server->nr_requests);
+	if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))
+		smb_kfree(req->rq_buffer);
+	if (req->rq_trans2buffer)
+		smb_kfree(req->rq_trans2buffer);
+	kmem_cache_free(req_cachep, req);
+}
+
+/*
+ * What prevents a rget to race with a rput? The count must never drop to zero
+ * while it is in use. Only rput if it is ok that it is free'd.
+ */
+static void smb_rget(struct smb_request *req)
+{
+	atomic_inc(&req->rq_count);
+}
+void smb_rput(struct smb_request *req)
+{
+	if (atomic_dec_and_test(&req->rq_count)) {
+		list_del_init(&req->rq_queue);
+		smb_free_request(req);
+	}
+}
+
+/* setup to receive the data part of the SMB */
+static int smb_setup_bcc(struct smb_request *req)
+{
+	int result = 0;
+	req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
+
+	if (req->rq_rlen > req->rq_bufsize) {
+		PARANOIA("Packet too large %d > %d\n",
+			 req->rq_rlen, req->rq_bufsize);
+		return -ENOBUFS;
+	}
+
+	req->rq_iov[0].iov_base = req->rq_buffer;
+	req->rq_iov[0].iov_len  = req->rq_rlen;
+	req->rq_iovlen = 1;
+
+	return result;
+}
+
+/*
+ * Prepare a "normal" request structure.
+ */
+static int smb_setup_request(struct smb_request *req)
+{
+	int len = smb_len(req->rq_header) + 4;
+	req->rq_slen = len;
+
+	/* if we expect a data part in the reply we set the iov's to read it */
+	if (req->rq_resp_bcc)
+		req->rq_setup_read = smb_setup_bcc;
+
+	/* This tries to support re-using the same request */
+	req->rq_bytes_sent = 0;
+	req->rq_rcls = 0;
+	req->rq_err = 0;
+	req->rq_errno = 0;
+	req->rq_fragment = 0;
+	if (req->rq_trans2buffer)
+		smb_kfree(req->rq_trans2buffer);
+
+	return 0;
+}
+
+/*
+ * Prepare a transaction2 request structure
+ */
+static int smb_setup_trans2request(struct smb_request *req)
+{
+	struct smb_sb_info *server = req->rq_server;
+	int mparam, mdata;
+	static unsigned char padding[4];
+
+	/* I know the following is very ugly, but I want to build the
+	   smb packet as efficiently as possible. */
+
+	const int smb_parameters = 15;
+	const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;
+	const int oparam = ROUND_UP(header + 3);
+	const int odata  = ROUND_UP(oparam + req->rq_lparm);
+	const int bcc = (req->rq_data ? odata + req->rq_ldata :
+					oparam + req->rq_lparm) - header;
+
+	if ((bcc + oparam) > server->opt.max_xmit)
+		return -ENOMEM;
+	smb_setup_header(req, SMBtrans2, smb_parameters, bcc);
+
+	/*
+	 * max parameters + max data + max setup == bufsize to make NT4 happy
+	 * and not abort the transfer or split into multiple responses. It also
+	 * makes smbfs happy as handling packets larger than the buffer size
+	 * is extra work.
+	 *
+	 * OS/2 is probably going to hate me for this ...
+	 */
+	mparam = SMB_TRANS2_MAX_PARAM;
+	mdata = req->rq_bufsize - mparam;
+
+	mdata = server->opt.max_xmit - mparam - 100;
+	if (mdata < 1024) {
+		mdata = 1024;
+		mparam = 20;
+	}
+
+#if 0
+	/* NT/win2k has ~4k max_xmit, so with this we request more than it wants
+	   to return as one SMB. Useful for testing the fragmented trans2
+	   handling. */
+	mdata = 8192;
+#endif
+
+	WSET(req->rq_header, smb_tpscnt, req->rq_lparm);
+	WSET(req->rq_header, smb_tdscnt, req->rq_ldata);
+	WSET(req->rq_header, smb_mprcnt, mparam);
+	WSET(req->rq_header, smb_mdrcnt, mdata);
+	WSET(req->rq_header, smb_msrcnt, 0);    /* max setup always 0 ? */
+	WSET(req->rq_header, smb_flags, 0);
+	DSET(req->rq_header, smb_timeout, 0);
+	WSET(req->rq_header, smb_pscnt, req->rq_lparm);
+	WSET(req->rq_header, smb_psoff, oparam - 4);
+	WSET(req->rq_header, smb_dscnt, req->rq_ldata);
+	WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);
+	*(req->rq_header + smb_suwcnt) = 0x01;          /* setup count */
+	*(req->rq_header + smb_suwcnt + 1) = 0x00;      /* reserved */
+	WSET(req->rq_header, smb_setup0, req->rq_trans2_command);
+
+	req->rq_iovlen = 2;
+	req->rq_iov[0].iov_base = (void *) req->rq_header;
+	req->rq_iov[0].iov_len = oparam;
+	req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;
+	req->rq_iov[1].iov_len = req->rq_lparm;
+	req->rq_slen = oparam + req->rq_lparm;
+
+	if (req->rq_data) {
+		req->rq_iovlen += 2;
+		req->rq_iov[2].iov_base = padding;
+		req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;
+		req->rq_iov[3].iov_base = req->rq_data;
+		req->rq_iov[3].iov_len = req->rq_ldata;
+		req->rq_slen = odata + req->rq_ldata;
+	}
+
+	/* always a data part for trans2 replies */
+	req->rq_setup_read = smb_setup_bcc;
+
+	return 0;
+}
+
+/*
+ * Add a request and tell smbiod to process it
+ */
+int smb_add_request(struct smb_request *req)
+{
+	long timeleft;
+	struct smb_sb_info *server = req->rq_server;
+	int result = 0;
+
+	smb_setup_request(req);
+	if (req->rq_trans2_command) {
+		if (req->rq_buffer == NULL) {
+			PARANOIA("trans2 attempted without response buffer!\n");
+			return -EIO;
+		}
+		result = smb_setup_trans2request(req);
+	}
+	if (result < 0)
+		return result;
+
+#ifdef SMB_DEBUG_PACKET_SIZE
+	add_xmit_stats(req);
+#endif
+
+	/* add 'req' to the queue of requests */
+	if (smb_lock_server_interruptible(server))
+		return -EINTR;
+
+	/*
+	 * Try to send the request as the process. If that fails we queue the
+	 * request and let smbiod send it later.
+	 */
+
+	/* FIXME: each server has a number on the maximum number of parallel
+	   requests. 10, 50 or so. We should not allow more requests to be
+	   active. */
+	if (server->mid > 0xf000)
+		server->mid = 0;
+	req->rq_mid = server->mid++;
+	WSET(req->rq_header, smb_mid, req->rq_mid);
+
+	result = 0;
+	if (server->state == CONN_VALID) {
+		if (list_empty(&server->xmitq))
+			result = smb_request_send_req(req);
+		if (result < 0) {
+			/* Connection lost? */
+			server->conn_error = result;
+			server->state = CONN_INVALID;
+		}
+	}
+	if (result != 1)
+		list_add_tail(&req->rq_queue, &server->xmitq);
+	smb_rget(req);
+
+	if (server->state != CONN_VALID)
+		smbiod_retry(server);
+
+	smb_unlock_server(server);
+
+	smbiod_wake_up();
+
+	timeleft = wait_event_interruptible_timeout(req->rq_wait,
+				    req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);
+	if (!timeleft || signal_pending(current)) {
+		/*
+		 * On timeout or on interrupt we want to try and remove the
+		 * request from the recvq/xmitq.
+		 */
+		smb_lock_server(server);
+		if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
+			list_del_init(&req->rq_queue);
+			smb_rput(req);
+		}
+		smb_unlock_server(server);
+	}
+
+	if (!timeleft) {
+		PARANOIA("request [%p, mid=%d] timed out!\n",
+			 req, req->rq_mid);
+		VERBOSE("smb_com:  %02x\n", *(req->rq_header + smb_com));
+		VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));
+		VERBOSE("smb_flg:  %02x\n", *(req->rq_header + smb_flg));
+		VERBOSE("smb_tid:  %04x\n", WVAL(req->rq_header, smb_tid));
+		VERBOSE("smb_pid:  %04x\n", WVAL(req->rq_header, smb_pid));
+		VERBOSE("smb_uid:  %04x\n", WVAL(req->rq_header, smb_uid));
+		VERBOSE("smb_mid:  %04x\n", WVAL(req->rq_header, smb_mid));
+		VERBOSE("smb_wct:  %02x\n", *(req->rq_header + smb_wct));
+
+		req->rq_rcls = ERRSRV;
+		req->rq_err  = ERRtimeout;
+
+		/* Just in case it was "stuck" */
+		smbiod_wake_up();
+	}
+	VERBOSE("woke up, rcls=%d\n", req->rq_rcls);
+
+	if (req->rq_rcls != 0)
+		req->rq_errno = smb_errno(req);
+	if (signal_pending(current))
+		req->rq_errno = -ERESTARTSYS;
+	return req->rq_errno;
+}
+
+/*
+ * Send a request and place it on the recvq if successfully sent.
+ * Must be called with the server lock held.
+ */
+static int smb_request_send_req(struct smb_request *req)
+{
+	struct smb_sb_info *server = req->rq_server;
+	int result;
+
+	if (req->rq_bytes_sent == 0) {
+		WSET(req->rq_header, smb_tid, server->opt.tid);
+		WSET(req->rq_header, smb_pid, 1);
+		WSET(req->rq_header, smb_uid, server->opt.server_uid);
+	}
+
+	result = smb_send_request(req);
+	if (result < 0 && result != -EAGAIN)
+		goto out;
+
+	result = 0;
+	if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
+		goto out;
+
+	list_del_init(&req->rq_queue);
+	list_add_tail(&req->rq_queue, &server->recvq);
+	result = 1;
+out:
+	return result;
+}
+
+/*
+ * Sends one request for this server. (smbiod)
+ * Must be called with the server lock held.
+ * Returns: <0 on error
+ *           0 if no request could be completely sent
+ *           1 if all data for one request was sent
+ */
+int smb_request_send_server(struct smb_sb_info *server)
+{
+	struct list_head *head;
+	struct smb_request *req;
+	int result;
+
+	if (server->state != CONN_VALID)
+		return 0;
+
+	/* dequeue first request, if any */
+	req = NULL;
+	head = server->xmitq.next;
+	if (head != &server->xmitq) {
+		req = list_entry(head, struct smb_request, rq_queue);
+	}
+	if (!req)
+		return 0;
+
+	result = smb_request_send_req(req);
+	if (result < 0) {
+		server->conn_error = result;
+		list_del_init(&req->rq_queue);
+		list_add(&req->rq_queue, &server->xmitq);
+		result = -EIO;
+		goto out;
+	}
+
+out:
+	return result;
+}
+
+/*
+ * Try to find a request matching this "mid". Typically the first entry will
+ * be the matching one.
+ */
+static struct smb_request *find_request(struct smb_sb_info *server, int mid)
+{
+	struct list_head *tmp;
+	struct smb_request *req = NULL;
+
+	list_for_each(tmp, &server->recvq) {
+		req = list_entry(tmp, struct smb_request, rq_queue);
+		if (req->rq_mid == mid) {
+			break;
+		}
+		req = NULL;
+	}
+
+	if (!req) {
+		VERBOSE("received reply with mid %d but no request!\n",
+			WVAL(server->header, smb_mid));
+		server->rstate = SMB_RECV_DROP;
+	}
+
+	return req;
+}
+
+/*
+ * Called when we have read the smb header and believe this is a response.
+ */
+static int smb_init_request(struct smb_sb_info *server, struct smb_request *req)
+{
+	int hdrlen, wct;
+
+	memcpy(req->rq_header, server->header, SMB_HEADER_LEN);
+
+	wct = *(req->rq_header + smb_wct);
+	if (wct > 20) {	
+		PARANOIA("wct too large, %d > 20\n", wct);
+		server->rstate = SMB_RECV_DROP;
+		return 0;
+	}
+
+	req->rq_resp_wct = wct;
+	hdrlen = SMB_HEADER_LEN + wct*2 + 2;
+	VERBOSE("header length: %d   smb_wct: %2d\n", hdrlen, wct);
+
+	req->rq_bytes_recvd = SMB_HEADER_LEN;
+	req->rq_rlen = hdrlen;
+	req->rq_iov[0].iov_base = req->rq_header;
+	req->rq_iov[0].iov_len  = hdrlen;
+	req->rq_iovlen = 1;
+	server->rstate = SMB_RECV_PARAM;
+
+#ifdef SMB_DEBUG_PACKET_SIZE
+	add_recv_stats(smb_len(server->header));
+#endif
+	return 0;
+}
+
+/*
+ * Reads the SMB parameters
+ */
+static int smb_recv_param(struct smb_sb_info *server, struct smb_request *req)
+{
+	int result;
+
+	result = smb_receive(server, req);
+	if (result < 0)
+		return result;
+	if (req->rq_bytes_recvd < req->rq_rlen)
+		return 0;
+
+	VERBOSE("result: %d   smb_bcc:  %04x\n", result,
+		WVAL(req->rq_header, SMB_HEADER_LEN +
+		     (*(req->rq_header + smb_wct) * 2)));
+
+	result = 0;
+	req->rq_iov[0].iov_base = NULL;
+	req->rq_rlen = 0;
+	if (req->rq_callback)
+		req->rq_callback(req);
+	else if (req->rq_setup_read)
+		result = req->rq_setup_read(req);
+	if (result < 0) {
+		server->rstate = SMB_RECV_DROP;
+		return result;
+	}
+
+	server->rstate = req->rq_rlen > 0 ? SMB_RECV_DATA : SMB_RECV_END;
+
+	req->rq_bytes_recvd = 0;	// recvd out of the iov
+
+	VERBOSE("rlen: %d\n", req->rq_rlen);
+	if (req->rq_rlen < 0) {
+		PARANOIA("Parameters read beyond end of packet!\n");
+		server->rstate = SMB_RECV_END;
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * Reads the SMB data
+ */
+static int smb_recv_data(struct smb_sb_info *server, struct smb_request *req)
+{
+	int result;
+
+	result = smb_receive(server, req);
+	if (result < 0)
+		goto out;
+	if (req->rq_bytes_recvd < req->rq_rlen)
+		goto out;
+	server->rstate = SMB_RECV_END;
+out:
+	VERBOSE("result: %d\n", result);
+	return result;
+}
+
+/*
+ * Receive a transaction2 response
+ * Return: 0 if the response has been fully read
+ *         1 if there are further "fragments" to read
+ *        <0 if there is an error
+ */
+static int smb_recv_trans2(struct smb_sb_info *server, struct smb_request *req)
+{
+	unsigned char *inbuf;
+	unsigned int parm_disp, parm_offset, parm_count, parm_tot;
+	unsigned int data_disp, data_offset, data_count, data_tot;
+	int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
+
+	VERBOSE("handling trans2\n");
+
+	inbuf = req->rq_header;
+	data_tot    = WVAL(inbuf, smb_tdrcnt);
+	parm_tot    = WVAL(inbuf, smb_tprcnt);
+	parm_disp   = WVAL(inbuf, smb_prdisp);
+	parm_offset = WVAL(inbuf, smb_proff);
+	parm_count  = WVAL(inbuf, smb_prcnt);
+	data_disp   = WVAL(inbuf, smb_drdisp);
+	data_offset = WVAL(inbuf, smb_droff);
+	data_count  = WVAL(inbuf, smb_drcnt);
+
+	/* Modify offset for the split header/buffer we use */
+	if (data_count || data_offset) {
+		if (unlikely(data_offset < hdrlen))
+			goto out_bad_data;
+		else
+			data_offset -= hdrlen;
+	}
+	if (parm_count || parm_offset) {
+		if (unlikely(parm_offset < hdrlen))
+			goto out_bad_parm;
+		else
+			parm_offset -= hdrlen;
+	}
+
+	if (parm_count == parm_tot && data_count == data_tot) {
+		/*
+		 * This packet has all the trans2 data.
+		 *
+		 * We setup the request so that this will be the common
+		 * case. It may be a server error to not return a
+		 * response that fits.
+		 */
+		VERBOSE("single trans2 response  "
+			"dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
+			data_count, parm_count,
+			data_offset, parm_offset);
+		req->rq_ldata = data_count;
+		req->rq_lparm = parm_count;
+		req->rq_data = req->rq_buffer + data_offset;
+		req->rq_parm = req->rq_buffer + parm_offset;
+		if (unlikely(parm_offset + parm_count > req->rq_rlen))
+			goto out_bad_parm;
+		if (unlikely(data_offset + data_count > req->rq_rlen))
+			goto out_bad_data;
+		return 0;
+	}
+
+	VERBOSE("multi trans2 response  "
+		"frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
+		req->rq_fragment,
+		data_count, parm_count,
+		data_offset, parm_offset);
+
+	if (!req->rq_fragment) {
+		int buf_len;
+
+		/* We got the first trans2 fragment */
+		req->rq_fragment = 1;
+		req->rq_total_data = data_tot;
+		req->rq_total_parm = parm_tot;
+		req->rq_ldata = 0;
+		req->rq_lparm = 0;
+
+		buf_len = data_tot + parm_tot;
+		if (buf_len > SMB_MAX_PACKET_SIZE)
+			goto out_too_long;
+
+		req->rq_trans2bufsize = buf_len;
+		req->rq_trans2buffer = smb_kmalloc(buf_len, GFP_NOFS);
+		if (!req->rq_trans2buffer)
+			goto out_no_mem;
+		memset(req->rq_trans2buffer, 0, buf_len);
+
+		req->rq_parm = req->rq_trans2buffer;
+		req->rq_data = req->rq_trans2buffer + parm_tot;
+	} else if (unlikely(req->rq_total_data < data_tot ||
+			    req->rq_total_parm < parm_tot))
+		goto out_data_grew;
+
+	if (unlikely(parm_disp + parm_count > req->rq_total_parm ||
+		     parm_offset + parm_count > req->rq_rlen))
+		goto out_bad_parm;
+	if (unlikely(data_disp + data_count > req->rq_total_data ||
+		     data_offset + data_count > req->rq_rlen))
+		goto out_bad_data;
+
+	inbuf = req->rq_buffer;
+	memcpy(req->rq_parm + parm_disp, inbuf + parm_offset, parm_count);
+	memcpy(req->rq_data + data_disp, inbuf + data_offset, data_count);
+
+	req->rq_ldata += data_count;
+	req->rq_lparm += parm_count;
+
+	/*
+	 * Check whether we've received all of the data. Note that
+	 * we use the packet totals -- total lengths might shrink!
+	 */
+	if (req->rq_ldata >= data_tot && req->rq_lparm >= parm_tot) {
+		req->rq_ldata = data_tot;
+		req->rq_lparm = parm_tot;
+		return 0;
+	}
+	return 1;
+
+out_too_long:
+	printk(KERN_ERR "smb_trans2: data/param too long, data=%u, parm=%u\n",
+		data_tot, parm_tot);
+	goto out_EIO;
+out_no_mem:
+	printk(KERN_ERR "smb_trans2: couldn't allocate data area of %d bytes\n",
+	       req->rq_trans2bufsize);
+	req->rq_errno = -ENOMEM;
+	goto out;
+out_data_grew:
+	printk(KERN_ERR "smb_trans2: data/params grew!\n");
+	goto out_EIO;
+out_bad_parm:
+	printk(KERN_ERR "smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
+	       parm_disp, parm_count, parm_tot, parm_offset);
+	goto out_EIO;
+out_bad_data:
+	printk(KERN_ERR "smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
+	       data_disp, data_count, data_tot, data_offset);
+out_EIO:
+	req->rq_errno = -EIO;
+out:
+	return req->rq_errno;
+}
+
+/*
+ * State machine for receiving responses. We handle the fact that we can't
+ * read the full response in one try by having states telling us how much we
+ * have read.
+ *
+ * Must be called with the server lock held (only called from smbiod).
+ *
+ * Return: <0 on error
+ */
+int smb_request_recv(struct smb_sb_info *server)
+{
+	struct smb_request *req = NULL;
+	int result = 0;
+
+	if (smb_recv_available(server) <= 0)
+		return 0;
+
+	VERBOSE("state: %d\n", server->rstate);
+	switch (server->rstate) {
+	case SMB_RECV_DROP:
+		result = smb_receive_drop(server);
+		if (result < 0)
+			break;
+		if (server->rstate == SMB_RECV_DROP)
+			break;
+		server->rstate = SMB_RECV_START;
+		/* fallthrough */
+	case SMB_RECV_START:
+		server->smb_read = 0;
+		server->rstate = SMB_RECV_HEADER;
+		/* fallthrough */
+	case SMB_RECV_HEADER:
+		result = smb_receive_header(server);
+		if (result < 0)
+			break;
+		if (server->rstate == SMB_RECV_HEADER)
+			break;
+		if (! (*(server->header + smb_flg) & SMB_FLAGS_REPLY) ) {
+			server->rstate = SMB_RECV_REQUEST;
+			break;
+		}
+		if (server->rstate != SMB_RECV_HCOMPLETE)
+			break;
+		/* fallthrough */
+	case SMB_RECV_HCOMPLETE:
+		req = find_request(server, WVAL(server->header, smb_mid));
+		if (!req)
+			break;
+		smb_init_request(server, req);
+		req->rq_rcls = *(req->rq_header + smb_rcls);
+		req->rq_err  = WVAL(req->rq_header, smb_err);
+		if (server->rstate != SMB_RECV_PARAM)
+			break;
+		/* fallthrough */
+	case SMB_RECV_PARAM:
+		if (!req)
+			req = find_request(server,WVAL(server->header,smb_mid));
+		if (!req)
+			break;
+		result = smb_recv_param(server, req);
+		if (result < 0)
+			break;
+		if (server->rstate != SMB_RECV_DATA)
+			break;
+		/* fallthrough */
+	case SMB_RECV_DATA:
+		if (!req)
+			req = find_request(server,WVAL(server->header,smb_mid));
+		if (!req)
+			break;
+		result = smb_recv_data(server, req);
+		if (result < 0)
+			break;
+		break;
+
+		/* We should never be called with any of these states */
+	case SMB_RECV_END:
+	case SMB_RECV_REQUEST:
+		server->rstate = SMB_RECV_END;
+		break;
+	}
+
+	if (result < 0) {
+		/* We saw an error */
+		return result;
+	}
+
+	if (server->rstate != SMB_RECV_END)
+		return 0;
+
+	result = 0;
+	if (req->rq_trans2_command && req->rq_rcls == SUCCESS)
+		result = smb_recv_trans2(server, req);
+
+	/*
+	 * Response completely read. Drop any extra bytes sent by the server.
+	 * (Yes, servers sometimes add extra bytes to responses)
+	 */
+	VERBOSE("smb_len: %d   smb_read: %d\n",
+		server->smb_len, server->smb_read);
+	if (server->smb_read < server->smb_len)
+		smb_receive_drop(server);
+
+	server->rstate = SMB_RECV_START;
+
+	if (!result) {
+		list_del_init(&req->rq_queue);
+		req->rq_flags |= SMB_REQ_RECEIVED;
+		smb_rput(req);
+		wake_up_interruptible(&req->rq_wait);
+	}
+	return 0;
+}
diff --git a/fs/smbfs/request.h b/fs/smbfs/request.h
new file mode 100644
index 0000000..efb2145
--- /dev/null
+++ b/fs/smbfs/request.h
@@ -0,0 +1,70 @@
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/wait.h>
+
+struct smb_request {
+	struct list_head rq_queue;	/* recvq or xmitq for the server */
+
+	atomic_t rq_count;
+
+	wait_queue_head_t rq_wait;
+	int rq_flags;
+	int rq_mid;	/* multiplex ID, set by request.c */
+
+	struct smb_sb_info *rq_server;
+
+	/* header + word count + parameter words + byte count */
+	unsigned char rq_header[SMB_HEADER_LEN + 20*2 + 2];
+
+	int rq_bufsize;
+	unsigned char *rq_buffer;
+
+	/* FIXME: this is not good enough for merging IO requests. */
+	unsigned char *rq_page;
+	int rq_rsize;
+
+	int rq_resp_wct;
+	int rq_resp_bcc;
+
+	int rq_rlen;
+	int rq_bytes_recvd;
+
+	int rq_slen;
+	int rq_bytes_sent;
+
+	int rq_iovlen;
+	struct kvec rq_iov[4];
+
+	int (*rq_setup_read) (struct smb_request *);
+	void (*rq_callback) (struct smb_request *);
+
+	/* ------ trans2 stuff ------ */
+
+	u16 rq_trans2_command;	/* 0 if not a trans2 request */
+	unsigned int rq_ldata;
+	unsigned char *rq_data;
+	unsigned int rq_lparm;
+	unsigned char *rq_parm;
+
+	int rq_fragment;
+	u32 rq_total_data;
+	u32 rq_total_parm;
+	int rq_trans2bufsize;
+	unsigned char *rq_trans2buffer;
+
+	/* ------ response ------ */
+
+	unsigned short rq_rcls;
+	unsigned short rq_err;
+	int rq_errno;
+};
+
+#define SMB_REQ_STATIC		0x0001	/* rq_buffer is static */
+#define SMB_REQ_NORETRY		0x0002	/* request is invalid after retry */
+
+#define SMB_REQ_TRANSMITTED	0x4000	/* all data has been sent */
+#define SMB_REQ_RECEIVED	0x8000	/* reply received, smbiod is done */
+
+#define xSMB_REQ_NOREPLY	0x0004	/* we don't want the reply (if any) */
+#define xSMB_REQ_NORECEIVER	0x0008	/* caller doesn't wait for response */
diff --git a/fs/smbfs/smb_debug.h b/fs/smbfs/smb_debug.h
new file mode 100644
index 0000000..734972b
--- /dev/null
+++ b/fs/smbfs/smb_debug.h
@@ -0,0 +1,34 @@
+/*
+ * Defines some debug macros for smbfs.
+ */
+
+/* This makes a dentry parent/child name pair. Useful for debugging printk's */
+#define DENTRY_PATH(dentry) \
+	(dentry)->d_parent->d_name.name,(dentry)->d_name.name
+
+/*
+ * safety checks that should never happen ???
+ * these are normally enabled.
+ */
+#ifdef SMBFS_PARANOIA
+# define PARANOIA(f, a...) printk(KERN_NOTICE "%s: " f, __FUNCTION__ , ## a)
+#else
+# define PARANOIA(f, a...) do { ; } while(0)
+#endif
+
+/* lots of debug messages */
+#ifdef SMBFS_DEBUG_VERBOSE
+# define VERBOSE(f, a...) printk(KERN_DEBUG "%s: " f, __FUNCTION__ , ## a)
+#else
+# define VERBOSE(f, a...) do { ; } while(0)
+#endif
+
+/*
+ * "normal" debug messages, but not with a normal DEBUG define ... way
+ * too common name.
+ */
+#ifdef SMBFS_DEBUG
+#define DEBUG1(f, a...) printk(KERN_DEBUG "%s: " f, __FUNCTION__ , ## a)
+#else
+#define DEBUG1(f, a...) do { ; } while(0)
+#endif
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
new file mode 100644
index 0000000..481a97a42
--- /dev/null
+++ b/fs/smbfs/smbiod.c
@@ -0,0 +1,341 @@
+/*
+ *  smbiod.c
+ *
+ *  Copyright (C) 2000, Charles Loep / Corel Corp.
+ *  Copyright (C) 2001, Urban Widmark
+ */
+
+#include <linux/config.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/dcache.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/ip.h>
+
+#include <linux/smb_fs.h>
+#include <linux/smbno.h>
+#include <linux/smb_mount.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "smb_debug.h"
+#include "request.h"
+#include "proto.h"
+
+enum smbiod_state {
+	SMBIOD_DEAD,
+	SMBIOD_STARTING,
+	SMBIOD_RUNNING,
+};
+
+static enum smbiod_state smbiod_state = SMBIOD_DEAD;
+static pid_t smbiod_pid;
+static DECLARE_WAIT_QUEUE_HEAD(smbiod_wait);
+static LIST_HEAD(smb_servers);
+static DEFINE_SPINLOCK(servers_lock);
+
+#define SMBIOD_DATA_READY	(1<<0)
+static long smbiod_flags;
+
+static int smbiod(void *);
+static int smbiod_start(void);
+
+/*
+ * called when there's work for us to do
+ */
+void smbiod_wake_up(void)
+{
+	if (smbiod_state == SMBIOD_DEAD)
+		return;
+	set_bit(SMBIOD_DATA_READY, &smbiod_flags);
+	wake_up_interruptible(&smbiod_wait);
+}
+
+/*
+ * start smbiod if none is running
+ */
+static int smbiod_start(void)
+{
+	pid_t pid;
+	if (smbiod_state != SMBIOD_DEAD)
+		return 0;
+	smbiod_state = SMBIOD_STARTING;
+	__module_get(THIS_MODULE);
+	spin_unlock(&servers_lock);
+	pid = kernel_thread(smbiod, NULL, 0);
+	if (pid < 0)
+		module_put(THIS_MODULE);
+
+	spin_lock(&servers_lock);
+	smbiod_state = pid < 0 ? SMBIOD_DEAD : SMBIOD_RUNNING;
+	smbiod_pid = pid;
+	return pid;
+}
+
+/*
+ * register a server & start smbiod if necessary
+ */
+int smbiod_register_server(struct smb_sb_info *server)
+{
+	int ret;
+	spin_lock(&servers_lock);
+	list_add(&server->entry, &smb_servers);
+	VERBOSE("%p\n", server);
+	ret = smbiod_start();
+	spin_unlock(&servers_lock);
+	return ret;
+}
+
+/*
+ * Unregister a server
+ * Must be called with the server lock held.
+ */
+void smbiod_unregister_server(struct smb_sb_info *server)
+{
+	spin_lock(&servers_lock);
+	list_del_init(&server->entry);
+	VERBOSE("%p\n", server);
+	spin_unlock(&servers_lock);
+
+	smbiod_wake_up();
+	smbiod_flush(server);
+}
+
+void smbiod_flush(struct smb_sb_info *server)
+{
+	struct list_head *tmp, *n;
+	struct smb_request *req;
+
+	list_for_each_safe(tmp, n, &server->xmitq) {
+		req = list_entry(tmp, struct smb_request, rq_queue);
+		req->rq_errno = -EIO;
+		list_del_init(&req->rq_queue);
+		smb_rput(req);
+		wake_up_interruptible(&req->rq_wait);
+	}
+	list_for_each_safe(tmp, n, &server->recvq) {
+		req = list_entry(tmp, struct smb_request, rq_queue);
+		req->rq_errno = -EIO;
+		list_del_init(&req->rq_queue);
+		smb_rput(req);
+		wake_up_interruptible(&req->rq_wait);
+	}
+}
+
+/*
+ * Wake up smbmount and make it reconnect to the server.
+ * This must be called with the server locked.
+ *
+ * FIXME: add smbconnect version to this
+ */
+int smbiod_retry(struct smb_sb_info *server)
+{
+	struct list_head *head;
+	struct smb_request *req;
+	pid_t pid = server->conn_pid;
+	int result = 0;
+
+	VERBOSE("state: %d\n", server->state);
+	if (server->state == CONN_VALID || server->state == CONN_RETRYING)
+		goto out;
+
+	smb_invalidate_inodes(server);
+
+	/*
+	 * Some requests are meaningless after a retry, so we abort them.
+	 * One example are all requests using 'fileid' since the files are
+	 * closed on retry.
+	 */
+	head = server->xmitq.next;
+	while (head != &server->xmitq) {
+		req = list_entry(head, struct smb_request, rq_queue);
+		head = head->next;
+
+		req->rq_bytes_sent = 0;
+		if (req->rq_flags & SMB_REQ_NORETRY) {
+			VERBOSE("aborting request %p on xmitq\n", req);
+			req->rq_errno = -EIO;
+			list_del_init(&req->rq_queue);
+			smb_rput(req);
+			wake_up_interruptible(&req->rq_wait);
+		}
+	}
+
+	/*
+	 * FIXME: test the code for retrying request we already sent
+	 */
+	head = server->recvq.next;
+	while (head != &server->recvq) {
+		req = list_entry(head, struct smb_request, rq_queue);
+		head = head->next;
+#if 0
+		if (req->rq_flags & SMB_REQ_RETRY) {
+			/* must move the request to the xmitq */
+			VERBOSE("retrying request %p on recvq\n", req);
+			list_del(&req->rq_queue);
+			list_add(&req->rq_queue, &server->xmitq);
+			continue;
+		}
+#endif
+
+		VERBOSE("aborting request %p on recvq\n", req);
+		/* req->rq_rcls = ???; */ /* FIXME: set smb error code too? */
+		req->rq_errno = -EIO;
+		list_del_init(&req->rq_queue);
+		smb_rput(req);
+		wake_up_interruptible(&req->rq_wait);
+	}
+
+	smb_close_socket(server);
+
+	if (pid == 0) {
+		/* FIXME: this is fatal, umount? */
+		printk(KERN_ERR "smb_retry: no connection process\n");
+		server->state = CONN_RETRIED;
+		goto out;
+	}
+
+	/*
+	 * Change state so that only one retry per server will be started.
+	 */
+	server->state = CONN_RETRYING;
+
+	/*
+	 * Note: use the "priv" flag, as a user process may need to reconnect.
+	 */
+	result = kill_proc(pid, SIGUSR1, 1);
+	if (result) {
+		/* FIXME: this is most likely fatal, umount? */
+		printk(KERN_ERR "smb_retry: signal failed [%d]\n", result);
+		goto out;
+	}
+	VERBOSE("signalled pid %d\n", pid);
+
+	/* FIXME: The retried requests should perhaps get a "time boost". */
+
+out:
+	return result;
+}
+
+/*
+ * Currently handles lockingX packets.
+ */
+static void smbiod_handle_request(struct smb_sb_info *server)
+{
+	PARANOIA("smbiod got a request ... and we don't implement oplocks!\n");
+	server->rstate = SMB_RECV_DROP;
+}
+
+/*
+ * Do some IO for one server.
+ */
+static void smbiod_doio(struct smb_sb_info *server)
+{
+	int result;
+	int maxwork = 7;
+
+	if (server->state != CONN_VALID)
+		goto out;
+
+	do {
+		result = smb_request_recv(server);
+		if (result < 0) {
+			server->state = CONN_INVALID;
+			smbiod_retry(server);
+			goto out;	/* reconnecting is slow */
+		} else if (server->rstate == SMB_RECV_REQUEST)
+			smbiod_handle_request(server);
+	} while (result > 0 && maxwork-- > 0);
+
+	/*
+	 * If there is more to read then we want to be sure to wake up again.
+	 */
+	if (server->state != CONN_VALID)
+		goto out;
+	if (smb_recv_available(server) > 0)
+		set_bit(SMBIOD_DATA_READY, &smbiod_flags);
+
+	do {
+		result = smb_request_send_server(server);
+		if (result < 0) {
+			server->state = CONN_INVALID;
+			smbiod_retry(server);
+			goto out;	/* reconnecting is slow */
+		}
+	} while (result > 0);
+
+	/*
+	 * If the last request was not sent out we want to wake up again.
+	 */
+	if (!list_empty(&server->xmitq))
+		set_bit(SMBIOD_DATA_READY, &smbiod_flags);
+
+out:
+	return;
+}
+
+/*
+ * smbiod kernel thread
+ */
+static int smbiod(void *unused)
+{
+	daemonize("smbiod");
+
+	allow_signal(SIGKILL);
+
+	VERBOSE("SMB Kernel thread starting (%d) ...\n", current->pid);
+
+	for (;;) {
+		struct smb_sb_info *server;
+		struct list_head *pos, *n;
+
+		/* FIXME: Use poll? */
+		wait_event_interruptible(smbiod_wait,
+			 test_bit(SMBIOD_DATA_READY, &smbiod_flags));
+		if (signal_pending(current)) {
+			spin_lock(&servers_lock);
+			smbiod_state = SMBIOD_DEAD;
+			spin_unlock(&servers_lock);
+			break;
+		}
+
+		clear_bit(SMBIOD_DATA_READY, &smbiod_flags);
+
+		spin_lock(&servers_lock);
+		if (list_empty(&smb_servers)) {
+			smbiod_state = SMBIOD_DEAD;
+			spin_unlock(&servers_lock);
+			break;
+		}
+
+		list_for_each_safe(pos, n, &smb_servers) {
+			server = list_entry(pos, struct smb_sb_info, entry);
+			VERBOSE("checking server %p\n", server);
+
+			if (server->state == CONN_VALID) {
+				spin_unlock(&servers_lock);
+
+				smb_lock_server(server);
+				smbiod_doio(server);
+				smb_unlock_server(server);
+
+				spin_lock(&servers_lock);
+			}
+		}
+		spin_unlock(&servers_lock);
+	}
+
+	VERBOSE("SMB Kernel thread exiting (%d) ...\n", current->pid);
+	module_put_and_exit(0);
+}
diff --git a/fs/smbfs/sock.c b/fs/smbfs/sock.c
new file mode 100644
index 0000000..93f3cd2
--- /dev/null
+++ b/fs/smbfs/sock.c
@@ -0,0 +1,388 @@
+/*
+ *  sock.c
+ *
+ *  Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke
+ *  Copyright (C) 1997 by Volker Lendecke
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/tcp.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/smp_lock.h>
+#include <linux/workqueue.h>
+#include <net/scm.h>
+#include <net/ip.h>
+
+#include <linux/smb_fs.h>
+#include <linux/smb.h>
+#include <linux/smbno.h>
+
+#include <asm/uaccess.h>
+#include <asm/ioctls.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+#include "request.h"
+
+
+static int
+_recvfrom(struct socket *socket, unsigned char *ubuf, int size, unsigned flags)
+{
+	struct kvec iov = {ubuf, size};
+	struct msghdr msg = {.msg_flags = flags};
+	msg.msg_flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
+	return kernel_recvmsg(socket, &msg, &iov, 1, size, msg.msg_flags);
+}
+
+/*
+ * Return the server this socket belongs to
+ */
+static struct smb_sb_info *
+server_from_socket(struct socket *socket)
+{
+	return socket->sk->sk_user_data;
+}
+
+/*
+ * Called when there is data on the socket.
+ */
+void
+smb_data_ready(struct sock *sk, int len)
+{
+	struct smb_sb_info *server = server_from_socket(sk->sk_socket);
+	void (*data_ready)(struct sock *, int) = server->data_ready;
+
+	data_ready(sk, len);
+	VERBOSE("(%p, %d)\n", sk, len);
+	smbiod_wake_up();
+}
+
+int
+smb_valid_socket(struct inode * inode)
+{
+	return (inode && S_ISSOCK(inode->i_mode) && 
+		SOCKET_I(inode)->type == SOCK_STREAM);
+}
+
+static struct socket *
+server_sock(struct smb_sb_info *server)
+{
+	struct file *file;
+
+	if (server && (file = server->sock_file))
+	{
+#ifdef SMBFS_PARANOIA
+		if (!smb_valid_socket(file->f_dentry->d_inode))
+			PARANOIA("bad socket!\n");
+#endif
+		return SOCKET_I(file->f_dentry->d_inode);
+	}
+	return NULL;
+}
+
+void
+smb_close_socket(struct smb_sb_info *server)
+{
+	struct file * file = server->sock_file;
+
+	if (file) {
+		struct socket *sock = server_sock(server);
+
+		VERBOSE("closing socket %p\n", sock);
+		sock->sk->sk_data_ready = server->data_ready;
+		server->sock_file = NULL;
+		fput(file);
+	}
+}
+
+static int
+smb_get_length(struct socket *socket, unsigned char *header)
+{
+	int result;
+
+	result = _recvfrom(socket, header, 4, MSG_PEEK);
+	if (result == -EAGAIN)
+		return -ENODATA;
+	if (result < 0) {
+		PARANOIA("recv error = %d\n", -result);
+		return result;
+	}
+	if (result < 4)
+		return -ENODATA;
+
+	switch (header[0]) {
+	case 0x00:
+	case 0x82:
+		break;
+
+	case 0x85:
+		DEBUG1("Got SESSION KEEP ALIVE\n");
+		_recvfrom(socket, header, 4, 0);	/* read away */
+		return -ENODATA;
+
+	default:
+		PARANOIA("Invalid NBT packet, code=%x\n", header[0]);
+		return -EIO;
+	}
+
+	/* The length in the RFC NB header is the raw data length */
+	return smb_len(header);
+}
+
+int
+smb_recv_available(struct smb_sb_info *server)
+{
+	mm_segment_t oldfs;
+	int avail, err;
+	struct socket *sock = server_sock(server);
+
+	oldfs = get_fs();
+	set_fs(get_ds());
+	err = sock->ops->ioctl(sock, SIOCINQ, (unsigned long) &avail);
+	set_fs(oldfs);
+	return (err >= 0) ? avail : err;
+}
+
+/*
+ * Adjust the kvec to move on 'n' bytes (from nfs/sunrpc)
+ */
+static int
+smb_move_iov(struct kvec **data, size_t *num, struct kvec *vec, unsigned amount)
+{
+	struct kvec *iv = *data;
+	int i;
+	int len;
+
+	/*
+	 *	Eat any sent kvecs
+	 */
+	while (iv->iov_len <= amount) {
+		amount -= iv->iov_len;
+		iv++;
+		(*num)--;
+	}
+
+	/*
+	 *	And chew down the partial one
+	 */
+	vec[0].iov_len = iv->iov_len-amount;
+	vec[0].iov_base =((unsigned char *)iv->iov_base)+amount;
+	iv++;
+
+	len = vec[0].iov_len;
+
+	/*
+	 *	And copy any others
+	 */
+	for (i = 1; i < *num; i++) {
+		vec[i] = *iv++;
+		len += vec[i].iov_len;
+	}
+
+	*data = vec;
+	return len;
+}
+
+/*
+ * smb_receive_header
+ * Only called by the smbiod thread.
+ */
+int
+smb_receive_header(struct smb_sb_info *server)
+{
+	struct socket *sock;
+	int result = 0;
+	unsigned char peek_buf[4];
+
+	result = -EIO; 
+	sock = server_sock(server);
+	if (!sock)
+		goto out;
+	if (sock->sk->sk_state != TCP_ESTABLISHED)
+		goto out;
+
+	if (!server->smb_read) {
+		result = smb_get_length(sock, peek_buf);
+		if (result < 0) {
+			if (result == -ENODATA)
+				result = 0;
+			goto out;
+		}
+		server->smb_len = result + 4;
+
+		if (server->smb_len < SMB_HEADER_LEN) {
+			PARANOIA("short packet: %d\n", result);
+			server->rstate = SMB_RECV_DROP;
+			result = -EIO;
+			goto out;
+		}
+		if (server->smb_len > SMB_MAX_PACKET_SIZE) {
+			PARANOIA("long packet: %d\n", result);
+			server->rstate = SMB_RECV_DROP;
+			result = -EIO;
+			goto out;
+		}
+	}
+
+	result = _recvfrom(sock, server->header + server->smb_read,
+			   SMB_HEADER_LEN - server->smb_read, 0);
+	VERBOSE("_recvfrom: %d\n", result);
+	if (result < 0) {
+		VERBOSE("receive error: %d\n", result);
+		goto out;
+	}
+	server->smb_read += result;
+
+	if (server->smb_read == SMB_HEADER_LEN)
+		server->rstate = SMB_RECV_HCOMPLETE;
+out:
+	return result;
+}
+
+static char drop_buffer[PAGE_SIZE];
+
+/*
+ * smb_receive_drop - read and throw away the data
+ * Only called by the smbiod thread.
+ *
+ * FIXME: we are in the kernel, could we just tell the socket that we want
+ * to drop stuff from the buffer?
+ */
+int
+smb_receive_drop(struct smb_sb_info *server)
+{
+	struct socket *sock;
+	unsigned int flags;
+	struct kvec iov;
+	struct msghdr msg;
+	int rlen = smb_len(server->header) - server->smb_read + 4;
+	int result = -EIO;
+
+	if (rlen > PAGE_SIZE)
+		rlen = PAGE_SIZE;
+
+	sock = server_sock(server);
+	if (!sock)
+		goto out;
+	if (sock->sk->sk_state != TCP_ESTABLISHED)
+		goto out;
+
+	flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+	iov.iov_base = drop_buffer;
+	iov.iov_len = PAGE_SIZE;
+	msg.msg_flags = flags;
+	msg.msg_name = NULL;
+	msg.msg_namelen = 0;
+	msg.msg_control = NULL;
+
+	result = kernel_recvmsg(sock, &msg, &iov, 1, rlen, flags);
+
+	VERBOSE("read: %d\n", result);
+	if (result < 0) {
+		VERBOSE("receive error: %d\n", result);
+		goto out;
+	}
+	server->smb_read += result;
+
+	if (server->smb_read >= server->smb_len)
+		server->rstate = SMB_RECV_END;
+
+out:
+	return result;
+}
+
+/*
+ * smb_receive
+ * Only called by the smbiod thread.
+ */
+int
+smb_receive(struct smb_sb_info *server, struct smb_request *req)
+{
+	struct socket *sock;
+	unsigned int flags;
+	struct kvec iov[4];
+	struct kvec *p = req->rq_iov;
+	size_t num = req->rq_iovlen;
+	struct msghdr msg;
+	int rlen;
+	int result = -EIO;
+
+	sock = server_sock(server);
+	if (!sock)
+		goto out;
+	if (sock->sk->sk_state != TCP_ESTABLISHED)
+		goto out;
+
+	flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+	msg.msg_flags = flags;
+	msg.msg_name = NULL;
+	msg.msg_namelen = 0;
+	msg.msg_control = NULL;
+
+	/* Dont repeat bytes and count available bufferspace */
+	rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd);
+	if (req->rq_rlen < rlen)
+		rlen = req->rq_rlen;
+
+	result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
+
+	VERBOSE("read: %d\n", result);
+	if (result < 0) {
+		VERBOSE("receive error: %d\n", result);
+		goto out;
+	}
+	req->rq_bytes_recvd += result;
+	server->smb_read += result;
+
+out:
+	return result;
+}
+
+/*
+ * Try to send a SMB request. This may return after sending only parts of the
+ * request. SMB_REQ_TRANSMITTED will be set if a request was fully sent.
+ *
+ * Parts of this was taken from xprt_sendmsg from net/sunrpc/xprt.c
+ */
+int
+smb_send_request(struct smb_request *req)
+{
+	struct smb_sb_info *server = req->rq_server;
+	struct socket *sock;
+	struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
+        int slen = req->rq_slen - req->rq_bytes_sent;
+	int result = -EIO;
+	struct kvec iov[4];
+	struct kvec *p = req->rq_iov;
+	size_t num = req->rq_iovlen;
+
+	sock = server_sock(server);
+	if (!sock)
+		goto out;
+	if (sock->sk->sk_state != TCP_ESTABLISHED)
+		goto out;
+
+	/* Dont repeat bytes */
+	if (req->rq_bytes_sent)
+		smb_move_iov(&p, &num, iov, req->rq_bytes_sent);
+
+	result = kernel_sendmsg(sock, &msg, p, num, slen);
+
+	if (result >= 0) {
+		req->rq_bytes_sent += result;
+		if (req->rq_bytes_sent >= req->rq_slen)
+			req->rq_flags |= SMB_REQ_TRANSMITTED;
+	}
+out:
+	return result;
+}
diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
new file mode 100644
index 0000000..8b069e0
--- /dev/null
+++ b/fs/smbfs/symlink.c
@@ -0,0 +1,70 @@
+/*
+ *  symlink.c
+ *
+ *  Copyright (C) 2002 by John Newbigin
+ *
+ *  Please add a note about your changes to smbfs in the ChangeLog file.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/net.h>
+#include <linux/namei.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/smbno.h>
+#include <linux/smb_fs.h>
+
+#include "smb_debug.h"
+#include "proto.h"
+
+int smb_symlink(struct inode *inode, struct dentry *dentry, const char *oldname)
+{
+	DEBUG1("create symlink %s -> %s/%s\n", oldname, DENTRY_PATH(dentry));
+
+	return smb_proc_symlink(server_from_dentry(dentry), dentry, oldname);
+}
+
+static int smb_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *link = __getname();
+	DEBUG1("followlink of %s/%s\n", DENTRY_PATH(dentry));
+
+	if (!link) {
+		link = ERR_PTR(-ENOMEM);
+	} else {
+		int len = smb_proc_read_link(server_from_dentry(dentry),
+						dentry, link, PATH_MAX - 1);
+		if (len < 0) {
+			putname(link);
+			link = ERR_PTR(len);
+		} else {
+			link[len] = 0;
+		}
+	}
+	nd_set_link(nd, link);
+	return 0;
+}
+
+static void smb_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s))
+		putname(s);
+}
+
+struct inode_operations smb_link_inode_operations =
+{
+	.readlink	= generic_readlink,
+	.follow_link	= smb_follow_link,
+	.put_link	= smb_put_link,
+};
diff --git a/fs/stat.c b/fs/stat.c
new file mode 100644
index 0000000..b8a0e51
--- /dev/null
+++ b/fs/stat.c
@@ -0,0 +1,410 @@
+/*
+ *  linux/fs/stat.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+void generic_fillattr(struct inode *inode, struct kstat *stat)
+{
+	stat->dev = inode->i_sb->s_dev;
+	stat->ino = inode->i_ino;
+	stat->mode = inode->i_mode;
+	stat->nlink = inode->i_nlink;
+	stat->uid = inode->i_uid;
+	stat->gid = inode->i_gid;
+	stat->rdev = inode->i_rdev;
+	stat->atime = inode->i_atime;
+	stat->mtime = inode->i_mtime;
+	stat->ctime = inode->i_ctime;
+	stat->size = i_size_read(inode);
+	stat->blocks = inode->i_blocks;
+	stat->blksize = inode->i_blksize;
+}
+
+EXPORT_SYMBOL(generic_fillattr);
+
+int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	int retval;
+
+	retval = security_inode_getattr(mnt, dentry);
+	if (retval)
+		return retval;
+
+	if (inode->i_op->getattr)
+		return inode->i_op->getattr(mnt, dentry, stat);
+
+	generic_fillattr(inode, stat);
+	if (!stat->blksize) {
+		struct super_block *s = inode->i_sb;
+		unsigned blocks;
+		blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
+		stat->blocks = (s->s_blocksize / 512) * blocks;
+		stat->blksize = s->s_blocksize;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(vfs_getattr);
+
+int vfs_stat(char __user *name, struct kstat *stat)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(name, &nd);
+	if (!error) {
+		error = vfs_getattr(nd.mnt, nd.dentry, stat);
+		path_release(&nd);
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(vfs_stat);
+
+int vfs_lstat(char __user *name, struct kstat *stat)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk_link(name, &nd);
+	if (!error) {
+		error = vfs_getattr(nd.mnt, nd.dentry, stat);
+		path_release(&nd);
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(vfs_lstat);
+
+int vfs_fstat(unsigned int fd, struct kstat *stat)
+{
+	struct file *f = fget(fd);
+	int error = -EBADF;
+
+	if (f) {
+		error = vfs_getattr(f->f_vfsmnt, f->f_dentry, stat);
+		fput(f);
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(vfs_fstat);
+
+#ifdef __ARCH_WANT_OLD_STAT
+
+/*
+ * For backward compatibility?  Maybe this should be moved
+ * into arch/i386 instead?
+ */
+static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
+{
+	static int warncount = 5;
+	struct __old_kernel_stat tmp;
+	
+	if (warncount > 0) {
+		warncount--;
+		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
+			current->comm);
+	} else if (warncount < 0) {
+		/* it's laughable, but... */
+		warncount = 0;
+	}
+
+	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
+	tmp.st_dev = old_encode_dev(stat->dev);
+	tmp.st_ino = stat->ino;
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = stat->nlink;
+	if (tmp.st_nlink != stat->nlink)
+		return -EOVERFLOW;
+	SET_UID(tmp.st_uid, stat->uid);
+	SET_GID(tmp.st_gid, stat->gid);
+	tmp.st_rdev = old_encode_dev(stat->rdev);
+#if BITS_PER_LONG == 32
+	if (stat->size > MAX_NON_LFS)
+		return -EOVERFLOW;
+#endif	
+	tmp.st_size = stat->size;
+	tmp.st_atime = stat->atime.tv_sec;
+	tmp.st_mtime = stat->mtime.tv_sec;
+	tmp.st_ctime = stat->ctime.tv_sec;
+	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_stat(filename, &stat);
+
+	if (!error)
+		error = cp_old_stat(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_lstat(filename, &stat);
+
+	if (!error)
+		error = cp_old_stat(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_fstat(unsigned int fd, struct __old_kernel_stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_fstat(fd, &stat);
+
+	if (!error)
+		error = cp_old_stat(&stat, statbuf);
+
+	return error;
+}
+
+#endif /* __ARCH_WANT_OLD_STAT */
+
+static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
+{
+	struct stat tmp;
+
+#if BITS_PER_LONG == 32
+	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+#else
+	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+#endif
+
+	memset(&tmp, 0, sizeof(tmp));
+#if BITS_PER_LONG == 32
+	tmp.st_dev = old_encode_dev(stat->dev);
+#else
+	tmp.st_dev = new_encode_dev(stat->dev);
+#endif
+	tmp.st_ino = stat->ino;
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = stat->nlink;
+	if (tmp.st_nlink != stat->nlink)
+		return -EOVERFLOW;
+	SET_UID(tmp.st_uid, stat->uid);
+	SET_GID(tmp.st_gid, stat->gid);
+#if BITS_PER_LONG == 32
+	tmp.st_rdev = old_encode_dev(stat->rdev);
+#else
+	tmp.st_rdev = new_encode_dev(stat->rdev);
+#endif
+#if BITS_PER_LONG == 32
+	if (stat->size > MAX_NON_LFS)
+		return -EOVERFLOW;
+#endif	
+	tmp.st_size = stat->size;
+	tmp.st_atime = stat->atime.tv_sec;
+	tmp.st_mtime = stat->mtime.tv_sec;
+	tmp.st_ctime = stat->ctime.tv_sec;
+#ifdef STAT_HAVE_NSEC
+	tmp.st_atime_nsec = stat->atime.tv_nsec;
+	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
+	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
+#endif
+	tmp.st_blocks = stat->blocks;
+	tmp.st_blksize = stat->blksize;
+	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys_newstat(char __user * filename, struct stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_stat(filename, &stat);
+
+	if (!error)
+		error = cp_new_stat(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_newlstat(char __user * filename, struct stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_lstat(filename, &stat);
+
+	if (!error)
+		error = cp_new_stat(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_newfstat(unsigned int fd, struct stat __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_fstat(fd, &stat);
+
+	if (!error)
+		error = cp_new_stat(&stat, statbuf);
+
+	return error;
+}
+
+asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bufsiz)
+{
+	struct nameidata nd;
+	int error;
+
+	if (bufsiz <= 0)
+		return -EINVAL;
+
+	error = user_path_walk_link(path, &nd);
+	if (!error) {
+		struct inode * inode = nd.dentry->d_inode;
+
+		error = -EINVAL;
+		if (inode->i_op && inode->i_op->readlink) {
+			error = security_inode_readlink(nd.dentry);
+			if (!error) {
+				touch_atime(nd.mnt, nd.dentry);
+				error = inode->i_op->readlink(nd.dentry, buf, bufsiz);
+			}
+		}
+		path_release(&nd);
+	}
+	return error;
+}
+
+
+/* ---------- LFS-64 ----------- */
+#ifdef __ARCH_WANT_STAT64
+
+static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
+{
+	struct stat64 tmp;
+
+	memset(&tmp, 0, sizeof(struct stat64));
+#ifdef CONFIG_MIPS
+	/* mips has weird padding, so we don't get 64 bits there */
+	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+	tmp.st_dev = new_encode_dev(stat->dev);
+	tmp.st_rdev = new_encode_dev(stat->rdev);
+#else
+	tmp.st_dev = huge_encode_dev(stat->dev);
+	tmp.st_rdev = huge_encode_dev(stat->rdev);
+#endif
+	tmp.st_ino = stat->ino;
+#ifdef STAT64_HAS_BROKEN_ST_INO
+	tmp.__st_ino = stat->ino;
+#endif
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = stat->nlink;
+	tmp.st_uid = stat->uid;
+	tmp.st_gid = stat->gid;
+	tmp.st_atime = stat->atime.tv_sec;
+	tmp.st_atime_nsec = stat->atime.tv_nsec;
+	tmp.st_mtime = stat->mtime.tv_sec;
+	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
+	tmp.st_ctime = stat->ctime.tv_sec;
+	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
+	tmp.st_size = stat->size;
+	tmp.st_blocks = stat->blocks;
+	tmp.st_blksize = stat->blksize;
+	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys_stat64(char __user * filename, struct stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_stat(filename, &stat);
+
+	if (!error)
+		error = cp_new_stat64(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_lstat64(char __user * filename, struct stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_lstat(filename, &stat);
+
+	if (!error)
+		error = cp_new_stat64(&stat, statbuf);
+
+	return error;
+}
+asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_fstat(fd, &stat);
+
+	if (!error)
+		error = cp_new_stat64(&stat, statbuf);
+
+	return error;
+}
+
+#endif /* __ARCH_WANT_STAT64 */
+
+void inode_add_bytes(struct inode *inode, loff_t bytes)
+{
+	spin_lock(&inode->i_lock);
+	inode->i_blocks += bytes >> 9;
+	bytes &= 511;
+	inode->i_bytes += bytes;
+	if (inode->i_bytes >= 512) {
+		inode->i_blocks++;
+		inode->i_bytes -= 512;
+	}
+	spin_unlock(&inode->i_lock);
+}
+
+EXPORT_SYMBOL(inode_add_bytes);
+
+void inode_sub_bytes(struct inode *inode, loff_t bytes)
+{
+	spin_lock(&inode->i_lock);
+	inode->i_blocks -= bytes >> 9;
+	bytes &= 511;
+	if (inode->i_bytes < bytes) {
+		inode->i_blocks--;
+		inode->i_bytes += 512;
+	}
+	inode->i_bytes -= bytes;
+	spin_unlock(&inode->i_lock);
+}
+
+EXPORT_SYMBOL(inode_sub_bytes);
+
+loff_t inode_get_bytes(struct inode *inode)
+{
+	loff_t ret;
+
+	spin_lock(&inode->i_lock);
+	ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+	spin_unlock(&inode->i_lock);
+	return ret;
+}
+
+EXPORT_SYMBOL(inode_get_bytes);
+
+void inode_set_bytes(struct inode *inode, loff_t bytes)
+{
+	/* Caller is here responsible for sufficient locking
+	 * (ie. inode->i_lock) */
+	inode->i_blocks = bytes >> 9;
+	inode->i_bytes = bytes & 511;
+}
+
+EXPORT_SYMBOL(inode_set_bytes);
diff --git a/fs/super.c b/fs/super.c
new file mode 100644
index 0000000..3a1b8ca
--- /dev/null
+++ b/fs/super.c
@@ -0,0 +1,860 @@
+/*
+ *  linux/fs/super.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  super.c contains code to handle: - mount structures
+ *                                   - super-block tables
+ *                                   - filesystem drivers list
+ *                                   - mount system call
+ *                                   - umount system call
+ *                                   - ustat system call
+ *
+ * GK 2/5/95  -  Changed to support mounting the root fs via NFS
+ *
+ *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
+ *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
+ *  Added options to /proc/mounts:
+ *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
+ *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
+ *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/acct.h>
+#include <linux/blkdev.h>
+#include <linux/quotaops.h>
+#include <linux/namei.h>
+#include <linux/buffer_head.h>		/* for fsync_super() */
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/vfs.h>
+#include <linux/writeback.h>		/* for the emergency remount stuff */
+#include <linux/idr.h>
+#include <linux/kobject.h>
+#include <asm/uaccess.h>
+
+
+void get_filesystem(struct file_system_type *fs);
+void put_filesystem(struct file_system_type *fs);
+struct file_system_type *get_fs_type(const char *name);
+
+LIST_HEAD(super_blocks);
+DEFINE_SPINLOCK(sb_lock);
+
+/**
+ *	alloc_super	-	create new superblock
+ *
+ *	Allocates and initializes a new &struct super_block.  alloc_super()
+ *	returns a pointer new superblock or %NULL if allocation had failed.
+ */
+static struct super_block *alloc_super(void)
+{
+	struct super_block *s = kmalloc(sizeof(struct super_block),  GFP_USER);
+	static struct super_operations default_op;
+
+	if (s) {
+		memset(s, 0, sizeof(struct super_block));
+		if (security_sb_alloc(s)) {
+			kfree(s);
+			s = NULL;
+			goto out;
+		}
+		INIT_LIST_HEAD(&s->s_dirty);
+		INIT_LIST_HEAD(&s->s_io);
+		INIT_LIST_HEAD(&s->s_files);
+		INIT_LIST_HEAD(&s->s_instances);
+		INIT_HLIST_HEAD(&s->s_anon);
+		INIT_LIST_HEAD(&s->s_inodes);
+		init_rwsem(&s->s_umount);
+		sema_init(&s->s_lock, 1);
+		down_write(&s->s_umount);
+		s->s_count = S_BIAS;
+		atomic_set(&s->s_active, 1);
+		sema_init(&s->s_vfs_rename_sem,1);
+		sema_init(&s->s_dquot.dqio_sem, 1);
+		sema_init(&s->s_dquot.dqonoff_sem, 1);
+		init_rwsem(&s->s_dquot.dqptr_sem);
+		init_waitqueue_head(&s->s_wait_unfrozen);
+		s->s_maxbytes = MAX_NON_LFS;
+		s->dq_op = sb_dquot_ops;
+		s->s_qcop = sb_quotactl_ops;
+		s->s_op = &default_op;
+		s->s_time_gran = 1000000000;
+	}
+out:
+	return s;
+}
+
+/**
+ *	destroy_super	-	frees a superblock
+ *	@s: superblock to free
+ *
+ *	Frees a superblock.
+ */
+static inline void destroy_super(struct super_block *s)
+{
+	security_sb_free(s);
+	kfree(s);
+}
+
+/* Superblock refcounting  */
+
+/*
+ * Drop a superblock's refcount.  Returns non-zero if the superblock was
+ * destroyed.  The caller must hold sb_lock.
+ */
+int __put_super(struct super_block *sb)
+{
+	int ret = 0;
+
+	if (!--sb->s_count) {
+		destroy_super(sb);
+		ret = 1;
+	}
+	return ret;
+}
+
+/*
+ * Drop a superblock's refcount.
+ * Returns non-zero if the superblock is about to be destroyed and
+ * at least is already removed from super_blocks list, so if we are
+ * making a loop through super blocks then we need to restart.
+ * The caller must hold sb_lock.
+ */
+int __put_super_and_need_restart(struct super_block *sb)
+{
+	/* check for race with generic_shutdown_super() */
+	if (list_empty(&sb->s_list)) {
+		/* super block is removed, need to restart... */
+		__put_super(sb);
+		return 1;
+	}
+	/* can't be the last, since s_list is still in use */
+	sb->s_count--;
+	BUG_ON(sb->s_count == 0);
+	return 0;
+}
+
+/**
+ *	put_super	-	drop a temporary reference to superblock
+ *	@sb: superblock in question
+ *
+ *	Drops a temporary reference, frees superblock if there's no
+ *	references left.
+ */
+static void put_super(struct super_block *sb)
+{
+	spin_lock(&sb_lock);
+	__put_super(sb);
+	spin_unlock(&sb_lock);
+}
+
+
+/**
+ *	deactivate_super	-	drop an active reference to superblock
+ *	@s: superblock to deactivate
+ *
+ *	Drops an active reference to superblock, acquiring a temprory one if
+ *	there is no active references left.  In that case we lock superblock,
+ *	tell fs driver to shut it down and drop the temporary reference we
+ *	had just acquired.
+ */
+void deactivate_super(struct super_block *s)
+{
+	struct file_system_type *fs = s->s_type;
+	if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
+		s->s_count -= S_BIAS-1;
+		spin_unlock(&sb_lock);
+		down_write(&s->s_umount);
+		fs->kill_sb(s);
+		put_filesystem(fs);
+		put_super(s);
+	}
+}
+
+EXPORT_SYMBOL(deactivate_super);
+
+/**
+ *	grab_super - acquire an active reference
+ *	@s: reference we are trying to make active
+ *
+ *	Tries to acquire an active reference.  grab_super() is used when we
+ * 	had just found a superblock in super_blocks or fs_type->fs_supers
+ *	and want to turn it into a full-blown active reference.  grab_super()
+ *	is called with sb_lock held and drops it.  Returns 1 in case of
+ *	success, 0 if we had failed (superblock contents was already dead or
+ *	dying when grab_super() had been called).
+ */
+static int grab_super(struct super_block *s)
+{
+	s->s_count++;
+	spin_unlock(&sb_lock);
+	down_write(&s->s_umount);
+	if (s->s_root) {
+		spin_lock(&sb_lock);
+		if (s->s_count > S_BIAS) {
+			atomic_inc(&s->s_active);
+			s->s_count--;
+			spin_unlock(&sb_lock);
+			return 1;
+		}
+		spin_unlock(&sb_lock);
+	}
+	up_write(&s->s_umount);
+	put_super(s);
+	yield();
+	return 0;
+}
+
+/**
+ *	generic_shutdown_super	-	common helper for ->kill_sb()
+ *	@sb: superblock to kill
+ *
+ *	generic_shutdown_super() does all fs-independent work on superblock
+ *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
+ *	that need destruction out of superblock, call generic_shutdown_super()
+ *	and release aforementioned objects.  Note: dentries and inodes _are_
+ *	taken care of and do not need specific handling.
+ */
+void generic_shutdown_super(struct super_block *sb)
+{
+	struct dentry *root = sb->s_root;
+	struct super_operations *sop = sb->s_op;
+
+	if (root) {
+		sb->s_root = NULL;
+		shrink_dcache_parent(root);
+		shrink_dcache_anon(&sb->s_anon);
+		dput(root);
+		fsync_super(sb);
+		lock_super(sb);
+		sb->s_flags &= ~MS_ACTIVE;
+		/* bad name - it should be evict_inodes() */
+		invalidate_inodes(sb);
+		lock_kernel();
+
+		if (sop->write_super && sb->s_dirt)
+			sop->write_super(sb);
+		if (sop->put_super)
+			sop->put_super(sb);
+
+		/* Forget any remaining inodes */
+		if (invalidate_inodes(sb)) {
+			printk("VFS: Busy inodes after unmount. "
+			   "Self-destruct in 5 seconds.  Have a nice day...\n");
+		}
+
+		unlock_kernel();
+		unlock_super(sb);
+	}
+	spin_lock(&sb_lock);
+	/* should be initialized for __put_super_and_need_restart() */
+	list_del_init(&sb->s_list);
+	list_del(&sb->s_instances);
+	spin_unlock(&sb_lock);
+	up_write(&sb->s_umount);
+}
+
+EXPORT_SYMBOL(generic_shutdown_super);
+
+/**
+ *	sget	-	find or create a superblock
+ *	@type:	filesystem type superblock should belong to
+ *	@test:	comparison callback
+ *	@set:	setup callback
+ *	@data:	argument to each of them
+ */
+struct super_block *sget(struct file_system_type *type,
+			int (*test)(struct super_block *,void *),
+			int (*set)(struct super_block *,void *),
+			void *data)
+{
+	struct super_block *s = NULL;
+	struct list_head *p;
+	int err;
+
+retry:
+	spin_lock(&sb_lock);
+	if (test) list_for_each(p, &type->fs_supers) {
+		struct super_block *old;
+		old = list_entry(p, struct super_block, s_instances);
+		if (!test(old, data))
+			continue;
+		if (!grab_super(old))
+			goto retry;
+		if (s)
+			destroy_super(s);
+		return old;
+	}
+	if (!s) {
+		spin_unlock(&sb_lock);
+		s = alloc_super();
+		if (!s)
+			return ERR_PTR(-ENOMEM);
+		goto retry;
+	}
+		
+	err = set(s, data);
+	if (err) {
+		spin_unlock(&sb_lock);
+		destroy_super(s);
+		return ERR_PTR(err);
+	}
+	s->s_type = type;
+	strlcpy(s->s_id, type->name, sizeof(s->s_id));
+	list_add_tail(&s->s_list, &super_blocks);
+	list_add(&s->s_instances, &type->fs_supers);
+	spin_unlock(&sb_lock);
+	get_filesystem(type);
+	return s;
+}
+
+EXPORT_SYMBOL(sget);
+
+void drop_super(struct super_block *sb)
+{
+	up_read(&sb->s_umount);
+	put_super(sb);
+}
+
+EXPORT_SYMBOL(drop_super);
+
+static inline void write_super(struct super_block *sb)
+{
+	lock_super(sb);
+	if (sb->s_root && sb->s_dirt)
+		if (sb->s_op->write_super)
+			sb->s_op->write_super(sb);
+	unlock_super(sb);
+}
+
+/*
+ * Note: check the dirty flag before waiting, so we don't
+ * hold up the sync while mounting a device. (The newly
+ * mounted device won't need syncing.)
+ */
+void sync_supers(void)
+{
+	struct super_block * sb;
+restart:
+	spin_lock(&sb_lock);
+	sb = sb_entry(super_blocks.next);
+	while (sb != sb_entry(&super_blocks))
+		if (sb->s_dirt) {
+			sb->s_count++;
+			spin_unlock(&sb_lock);
+			down_read(&sb->s_umount);
+			write_super(sb);
+			drop_super(sb);
+			goto restart;
+		} else
+			sb = sb_entry(sb->s_list.next);
+	spin_unlock(&sb_lock);
+}
+
+/*
+ * Call the ->sync_fs super_op against all filesytems which are r/w and
+ * which implement it.
+ *
+ * This operation is careful to avoid the livelock which could easily happen
+ * if two or more filesystems are being continuously dirtied.  s_need_sync_fs
+ * is used only here.  We set it against all filesystems and then clear it as
+ * we sync them.  So redirtied filesystems are skipped.
+ *
+ * But if process A is currently running sync_filesytems and then process B
+ * calls sync_filesystems as well, process B will set all the s_need_sync_fs
+ * flags again, which will cause process A to resync everything.  Fix that with
+ * a local mutex.
+ *
+ * (Fabian) Avoid sync_fs with clean fs & wait mode 0
+ */
+void sync_filesystems(int wait)
+{
+	struct super_block *sb;
+	static DECLARE_MUTEX(mutex);
+
+	down(&mutex);		/* Could be down_interruptible */
+	spin_lock(&sb_lock);
+	for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks);
+			sb = sb_entry(sb->s_list.next)) {
+		if (!sb->s_op->sync_fs)
+			continue;
+		if (sb->s_flags & MS_RDONLY)
+			continue;
+		sb->s_need_sync_fs = 1;
+	}
+	spin_unlock(&sb_lock);
+
+restart:
+	spin_lock(&sb_lock);
+	for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks);
+			sb = sb_entry(sb->s_list.next)) {
+		if (!sb->s_need_sync_fs)
+			continue;
+		sb->s_need_sync_fs = 0;
+		if (sb->s_flags & MS_RDONLY)
+			continue;	/* hm.  Was remounted r/o meanwhile */
+		sb->s_count++;
+		spin_unlock(&sb_lock);
+		down_read(&sb->s_umount);
+		if (sb->s_root && (wait || sb->s_dirt))
+			sb->s_op->sync_fs(sb, wait);
+		drop_super(sb);
+		goto restart;
+	}
+	spin_unlock(&sb_lock);
+	up(&mutex);
+}
+
+/**
+ *	get_super - get the superblock of a device
+ *	@bdev: device to get the superblock for
+ *	
+ *	Scans the superblock list and finds the superblock of the file system
+ *	mounted on the device given. %NULL is returned if no match is found.
+ */
+
+struct super_block * get_super(struct block_device *bdev)
+{
+	struct list_head *p;
+	if (!bdev)
+		return NULL;
+rescan:
+	spin_lock(&sb_lock);
+	list_for_each(p, &super_blocks) {
+		struct super_block *s = sb_entry(p);
+		if (s->s_bdev == bdev) {
+			s->s_count++;
+			spin_unlock(&sb_lock);
+			down_read(&s->s_umount);
+			if (s->s_root)
+				return s;
+			drop_super(s);
+			goto rescan;
+		}
+	}
+	spin_unlock(&sb_lock);
+	return NULL;
+}
+
+EXPORT_SYMBOL(get_super);
+ 
+struct super_block * user_get_super(dev_t dev)
+{
+	struct list_head *p;
+
+rescan:
+	spin_lock(&sb_lock);
+	list_for_each(p, &super_blocks) {
+		struct super_block *s = sb_entry(p);
+		if (s->s_dev ==  dev) {
+			s->s_count++;
+			spin_unlock(&sb_lock);
+			down_read(&s->s_umount);
+			if (s->s_root)
+				return s;
+			drop_super(s);
+			goto rescan;
+		}
+	}
+	spin_unlock(&sb_lock);
+	return NULL;
+}
+
+EXPORT_SYMBOL(user_get_super);
+
+asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
+{
+        struct super_block *s;
+        struct ustat tmp;
+        struct kstatfs sbuf;
+	int err = -EINVAL;
+
+        s = user_get_super(new_decode_dev(dev));
+        if (s == NULL)
+                goto out;
+	err = vfs_statfs(s, &sbuf);
+	drop_super(s);
+	if (err)
+		goto out;
+
+        memset(&tmp,0,sizeof(struct ustat));
+        tmp.f_tfree = sbuf.f_bfree;
+        tmp.f_tinode = sbuf.f_ffree;
+
+        err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0;
+out:
+	return err;
+}
+
+/**
+ *	mark_files_ro
+ *	@sb: superblock in question
+ *
+ *	All files are marked read/only.  We don't care about pending
+ *	delete files so this should be used in 'force' mode only
+ */
+
+static void mark_files_ro(struct super_block *sb)
+{
+	struct file *f;
+
+	file_list_lock();
+	list_for_each_entry(f, &sb->s_files, f_list) {
+		if (S_ISREG(f->f_dentry->d_inode->i_mode) && file_count(f))
+			f->f_mode &= ~FMODE_WRITE;
+	}
+	file_list_unlock();
+}
+
+/**
+ *	do_remount_sb - asks filesystem to change mount options.
+ *	@sb:	superblock in question
+ *	@flags:	numeric part of options
+ *	@data:	the rest of options
+ *      @force: whether or not to force the change
+ *
+ *	Alters the mount options of a mounted file system.
+ */
+int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+{
+	int retval;
+	
+	if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
+		return -EACCES;
+	if (flags & MS_RDONLY)
+		acct_auto_close(sb);
+	shrink_dcache_sb(sb);
+	fsync_super(sb);
+
+	/* If we are remounting RDONLY and current sb is read/write,
+	   make sure there are no rw files opened */
+	if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) {
+		if (force)
+			mark_files_ro(sb);
+		else if (!fs_may_remount_ro(sb))
+			return -EBUSY;
+	}
+
+	if (sb->s_op->remount_fs) {
+		lock_super(sb);
+		retval = sb->s_op->remount_fs(sb, &flags, data);
+		unlock_super(sb);
+		if (retval)
+			return retval;
+	}
+	sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+	return 0;
+}
+
+static void do_emergency_remount(unsigned long foo)
+{
+	struct super_block *sb;
+
+	spin_lock(&sb_lock);
+	list_for_each_entry(sb, &super_blocks, s_list) {
+		sb->s_count++;
+		spin_unlock(&sb_lock);
+		down_read(&sb->s_umount);
+		if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
+			/*
+			 * ->remount_fs needs lock_kernel().
+			 *
+			 * What lock protects sb->s_flags??
+			 */
+			lock_kernel();
+			do_remount_sb(sb, MS_RDONLY, NULL, 1);
+			unlock_kernel();
+		}
+		drop_super(sb);
+		spin_lock(&sb_lock);
+	}
+	spin_unlock(&sb_lock);
+	printk("Emergency Remount complete\n");
+}
+
+void emergency_remount(void)
+{
+	pdflush_operation(do_emergency_remount, 0);
+}
+
+/*
+ * Unnamed block devices are dummy devices used by virtual
+ * filesystems which don't use real block-devices.  -- jrs
+ */
+
+static struct idr unnamed_dev_idr;
+static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
+
+int set_anon_super(struct super_block *s, void *data)
+{
+	int dev;
+	int error;
+
+ retry:
+	if (idr_pre_get(&unnamed_dev_idr, GFP_ATOMIC) == 0)
+		return -ENOMEM;
+	spin_lock(&unnamed_dev_lock);
+	error = idr_get_new(&unnamed_dev_idr, NULL, &dev);
+	spin_unlock(&unnamed_dev_lock);
+	if (error == -EAGAIN)
+		/* We raced and lost with another CPU. */
+		goto retry;
+	else if (error)
+		return -EAGAIN;
+
+	if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
+		spin_lock(&unnamed_dev_lock);
+		idr_remove(&unnamed_dev_idr, dev);
+		spin_unlock(&unnamed_dev_lock);
+		return -EMFILE;
+	}
+	s->s_dev = MKDEV(0, dev & MINORMASK);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_anon_super);
+
+void kill_anon_super(struct super_block *sb)
+{
+	int slot = MINOR(sb->s_dev);
+
+	generic_shutdown_super(sb);
+	spin_lock(&unnamed_dev_lock);
+	idr_remove(&unnamed_dev_idr, slot);
+	spin_unlock(&unnamed_dev_lock);
+}
+
+EXPORT_SYMBOL(kill_anon_super);
+
+void __init unnamed_dev_init(void)
+{
+	idr_init(&unnamed_dev_idr);
+}
+
+void kill_litter_super(struct super_block *sb)
+{
+	if (sb->s_root)
+		d_genocide(sb->s_root);
+	kill_anon_super(sb);
+}
+
+EXPORT_SYMBOL(kill_litter_super);
+
+static int set_bdev_super(struct super_block *s, void *data)
+{
+	s->s_bdev = data;
+	s->s_dev = s->s_bdev->bd_dev;
+	return 0;
+}
+
+static int test_bdev_super(struct super_block *s, void *data)
+{
+	return (void *)s->s_bdev == data;
+}
+
+static void bdev_uevent(struct block_device *bdev, enum kobject_action action)
+{
+	if (bdev->bd_disk) {
+		if (bdev->bd_part)
+			kobject_uevent(&bdev->bd_part->kobj, action, NULL);
+		else
+			kobject_uevent(&bdev->bd_disk->kobj, action, NULL);
+	}
+}
+
+struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data,
+	int (*fill_super)(struct super_block *, void *, int))
+{
+	struct block_device *bdev;
+	struct super_block *s;
+	int error = 0;
+
+	bdev = open_bdev_excl(dev_name, flags, fs_type);
+	if (IS_ERR(bdev))
+		return (struct super_block *)bdev;
+
+	/*
+	 * once the super is inserted into the list by sget, s_umount
+	 * will protect the lockfs code from trying to start a snapshot
+	 * while we are mounting
+	 */
+	down(&bdev->bd_mount_sem);
+	s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
+	up(&bdev->bd_mount_sem);
+	if (IS_ERR(s))
+		goto out;
+
+	if (s->s_root) {
+		if ((flags ^ s->s_flags) & MS_RDONLY) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			s = ERR_PTR(-EBUSY);
+		}
+		goto out;
+	} else {
+		char b[BDEVNAME_SIZE];
+
+		s->s_flags = flags;
+		strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
+		s->s_old_blocksize = block_size(bdev);
+		sb_set_blocksize(s, s->s_old_blocksize);
+		error = fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+		if (error) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			s = ERR_PTR(error);
+		} else {
+			s->s_flags |= MS_ACTIVE;
+			bdev_uevent(bdev, KOBJ_MOUNT);
+		}
+	}
+
+	return s;
+
+out:
+	close_bdev_excl(bdev);
+	return s;
+}
+
+EXPORT_SYMBOL(get_sb_bdev);
+
+void kill_block_super(struct super_block *sb)
+{
+	struct block_device *bdev = sb->s_bdev;
+
+	bdev_uevent(bdev, KOBJ_UMOUNT);
+	generic_shutdown_super(sb);
+	sync_blockdev(bdev);
+	close_bdev_excl(bdev);
+}
+
+EXPORT_SYMBOL(kill_block_super);
+
+struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+	int flags, void *data,
+	int (*fill_super)(struct super_block *, void *, int))
+{
+	int error;
+	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+
+	if (IS_ERR(s))
+		return s;
+
+	s->s_flags = flags;
+
+	error = fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+	if (error) {
+		up_write(&s->s_umount);
+		deactivate_super(s);
+		return ERR_PTR(error);
+	}
+	s->s_flags |= MS_ACTIVE;
+	return s;
+}
+
+EXPORT_SYMBOL(get_sb_nodev);
+
+static int compare_single(struct super_block *s, void *p)
+{
+	return 1;
+}
+
+struct super_block *get_sb_single(struct file_system_type *fs_type,
+	int flags, void *data,
+	int (*fill_super)(struct super_block *, void *, int))
+{
+	struct super_block *s;
+	int error;
+
+	s = sget(fs_type, compare_single, set_anon_super, NULL);
+	if (IS_ERR(s))
+		return s;
+	if (!s->s_root) {
+		s->s_flags = flags;
+		error = fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+		if (error) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			return ERR_PTR(error);
+		}
+		s->s_flags |= MS_ACTIVE;
+	}
+	do_remount_sb(s, flags, data, 0);
+	return s;
+}
+
+EXPORT_SYMBOL(get_sb_single);
+
+struct vfsmount *
+do_kern_mount(const char *fstype, int flags, const char *name, void *data)
+{
+	struct file_system_type *type = get_fs_type(fstype);
+	struct super_block *sb = ERR_PTR(-ENOMEM);
+	struct vfsmount *mnt;
+	int error;
+	char *secdata = NULL;
+
+	if (!type)
+		return ERR_PTR(-ENODEV);
+
+	mnt = alloc_vfsmnt(name);
+	if (!mnt)
+		goto out;
+
+	if (data) {
+		secdata = alloc_secdata();
+		if (!secdata) {
+			sb = ERR_PTR(-ENOMEM);
+			goto out_mnt;
+		}
+
+		error = security_sb_copy_data(type, data, secdata);
+		if (error) {
+			sb = ERR_PTR(error);
+			goto out_free_secdata;
+		}
+	}
+
+	sb = type->get_sb(type, flags, name, data);
+	if (IS_ERR(sb))
+		goto out_free_secdata;
+ 	error = security_sb_kern_mount(sb, secdata);
+ 	if (error)
+ 		goto out_sb;
+	mnt->mnt_sb = sb;
+	mnt->mnt_root = dget(sb->s_root);
+	mnt->mnt_mountpoint = sb->s_root;
+	mnt->mnt_parent = mnt;
+	mnt->mnt_namespace = current->namespace;
+	up_write(&sb->s_umount);
+	put_filesystem(type);
+	return mnt;
+out_sb:
+	up_write(&sb->s_umount);
+	deactivate_super(sb);
+	sb = ERR_PTR(error);
+out_free_secdata:
+	free_secdata(secdata);
+out_mnt:
+	free_vfsmnt(mnt);
+out:
+	put_filesystem(type);
+	return (struct vfsmount *)sb;
+}
+
+EXPORT_SYMBOL_GPL(do_kern_mount);
+
+struct vfsmount *kern_mount(struct file_system_type *type)
+{
+	return do_kern_mount(type->name, 0, type->name, NULL);
+}
+
+EXPORT_SYMBOL(kern_mount);
diff --git a/fs/sysfs/Makefile b/fs/sysfs/Makefile
new file mode 100644
index 0000000..7a1ceb9
--- /dev/null
+++ b/fs/sysfs/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the sysfs virtual filesystem
+#
+
+obj-y		:= inode.o file.o dir.o symlink.o mount.o bin.o \
+		   group.o
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
new file mode 100644
index 0000000..d4aaa88
--- /dev/null
+++ b/fs/sysfs/bin.c
@@ -0,0 +1,204 @@
+/*
+ * bin.c - binary file operations for sysfs.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Matthew Wilcox
+ * Copyright (c) 2004 Silicon Graphics, Inc.
+ */
+
+#undef DEBUG
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+
+#include "sysfs.h"
+
+static int
+fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
+{
+	struct bin_attribute * attr = to_bin_attr(dentry);
+	struct kobject * kobj = to_kobj(dentry->d_parent);
+
+	if (!attr->read)
+		return -EINVAL;
+
+	return attr->read(kobj, buffer, off, count);
+}
+
+static ssize_t
+read(struct file * file, char __user * userbuf, size_t count, loff_t * off)
+{
+	char *buffer = file->private_data;
+	struct dentry *dentry = file->f_dentry;
+	int size = dentry->d_inode->i_size;
+	loff_t offs = *off;
+	int ret;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+
+	if (size) {
+		if (offs > size)
+			return 0;
+		if (offs + count > size)
+			count = size - offs;
+	}
+
+	ret = fill_read(dentry, buffer, offs, count);
+	if (ret < 0) 
+		return ret;
+	count = ret;
+
+	if (copy_to_user(userbuf, buffer, count))
+		return -EFAULT;
+
+	pr_debug("offs = %lld, *off = %lld, count = %zd\n", offs, *off, count);
+
+	*off = offs + count;
+
+	return count;
+}
+
+static int
+flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
+{
+	struct bin_attribute *attr = to_bin_attr(dentry);
+	struct kobject *kobj = to_kobj(dentry->d_parent);
+
+	if (!attr->write)
+		return -EINVAL;
+
+	return attr->write(kobj, buffer, offset, count);
+}
+
+static ssize_t write(struct file * file, const char __user * userbuf,
+		     size_t count, loff_t * off)
+{
+	char *buffer = file->private_data;
+	struct dentry *dentry = file->f_dentry;
+	int size = dentry->d_inode->i_size;
+	loff_t offs = *off;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+	if (size) {
+		if (offs > size)
+			return 0;
+		if (offs + count > size)
+			count = size - offs;
+	}
+
+	if (copy_from_user(buffer, userbuf, count))
+		return -EFAULT;
+
+	count = flush_write(dentry, buffer, offs, count);
+	if (count > 0)
+		*off = offs + count;
+	return count;
+}
+
+static int mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct dentry *dentry = file->f_dentry;
+	struct bin_attribute *attr = to_bin_attr(dentry);
+	struct kobject *kobj = to_kobj(dentry->d_parent);
+
+	if (!attr->mmap)
+		return -EINVAL;
+
+	return attr->mmap(kobj, attr, vma);
+}
+
+static int open(struct inode * inode, struct file * file)
+{
+	struct kobject *kobj = sysfs_get_kobject(file->f_dentry->d_parent);
+	struct bin_attribute * attr = to_bin_attr(file->f_dentry);
+	int error = -EINVAL;
+
+	if (!kobj || !attr)
+		goto Done;
+
+	/* Grab the module reference for this attribute if we have one */
+	error = -ENODEV;
+	if (!try_module_get(attr->attr.owner)) 
+		goto Done;
+
+	error = -EACCES;
+	if ((file->f_mode & FMODE_WRITE) && !(attr->write || attr->mmap))
+		goto Error;
+	if ((file->f_mode & FMODE_READ) && !(attr->read || attr->mmap))
+		goto Error;
+
+	error = -ENOMEM;
+	file->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!file->private_data)
+		goto Error;
+
+	error = 0;
+    goto Done;
+
+ Error:
+	module_put(attr->attr.owner);
+ Done:
+	if (error && kobj)
+		kobject_put(kobj);
+	return error;
+}
+
+static int release(struct inode * inode, struct file * file)
+{
+	struct kobject * kobj = to_kobj(file->f_dentry->d_parent);
+	struct bin_attribute * attr = to_bin_attr(file->f_dentry);
+	u8 * buffer = file->private_data;
+
+	if (kobj) 
+		kobject_put(kobj);
+	module_put(attr->attr.owner);
+	kfree(buffer);
+	return 0;
+}
+
+struct file_operations bin_fops = {
+	.read		= read,
+	.write		= write,
+	.mmap		= mmap,
+	.llseek		= generic_file_llseek,
+	.open		= open,
+	.release	= release,
+};
+
+/**
+ *	sysfs_create_bin_file - create binary file for object.
+ *	@kobj:	object.
+ *	@attr:	attribute descriptor.
+ *
+ */
+
+int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
+{
+	BUG_ON(!kobj || !kobj->dentry || !attr);
+
+	return sysfs_add_file(kobj->dentry, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
+}
+
+
+/**
+ *	sysfs_remove_bin_file - remove binary file for object.
+ *	@kobj:	object.
+ *	@attr:	attribute descriptor.
+ *
+ */
+
+int sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
+{
+	sysfs_hash_and_remove(kobj->dentry,attr->attr.name);
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
+EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
new file mode 100644
index 0000000..fe19821
--- /dev/null
+++ b/fs/sysfs/dir.c
@@ -0,0 +1,475 @@
+/*
+ * dir.c - Operations for sysfs directories.
+ */
+
+#undef DEBUG
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include "sysfs.h"
+
+DECLARE_RWSEM(sysfs_rename_sem);
+
+static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
+{
+	struct sysfs_dirent * sd = dentry->d_fsdata;
+
+	if (sd) {
+		BUG_ON(sd->s_dentry != dentry);
+		sd->s_dentry = NULL;
+		sysfs_put(sd);
+	}
+	iput(inode);
+}
+
+static struct dentry_operations sysfs_dentry_ops = {
+	.d_iput		= sysfs_d_iput,
+};
+
+/*
+ * Allocates a new sysfs_dirent and links it to the parent sysfs_dirent
+ */
+static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
+						void * element)
+{
+	struct sysfs_dirent * sd;
+
+	sd = kmem_cache_alloc(sysfs_dir_cachep, GFP_KERNEL);
+	if (!sd)
+		return NULL;
+
+	memset(sd, 0, sizeof(*sd));
+	atomic_set(&sd->s_count, 1);
+	INIT_LIST_HEAD(&sd->s_children);
+	list_add(&sd->s_sibling, &parent_sd->s_children);
+	sd->s_element = element;
+
+	return sd;
+}
+
+int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry,
+			void * element, umode_t mode, int type)
+{
+	struct sysfs_dirent * sd;
+
+	sd = sysfs_new_dirent(parent_sd, element);
+	if (!sd)
+		return -ENOMEM;
+
+	sd->s_mode = mode;
+	sd->s_type = type;
+	sd->s_dentry = dentry;
+	if (dentry) {
+		dentry->d_fsdata = sysfs_get(sd);
+		dentry->d_op = &sysfs_dentry_ops;
+	}
+
+	return 0;
+}
+
+static int init_dir(struct inode * inode)
+{
+	inode->i_op = &sysfs_dir_inode_operations;
+	inode->i_fop = &sysfs_dir_operations;
+
+	/* directory inodes start off with i_nlink == 2 (for "." entry) */
+	inode->i_nlink++;
+	return 0;
+}
+
+static int init_file(struct inode * inode)
+{
+	inode->i_size = PAGE_SIZE;
+	inode->i_fop = &sysfs_file_operations;
+	return 0;
+}
+
+static int init_symlink(struct inode * inode)
+{
+	inode->i_op = &sysfs_symlink_inode_operations;
+	return 0;
+}
+
+static int create_dir(struct kobject * k, struct dentry * p,
+		      const char * n, struct dentry ** d)
+{
+	int error;
+	umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+
+	down(&p->d_inode->i_sem);
+	*d = sysfs_get_dentry(p,n);
+	if (!IS_ERR(*d)) {
+		error = sysfs_create(*d, mode, init_dir);
+		if (!error) {
+			error = sysfs_make_dirent(p->d_fsdata, *d, k, mode,
+						SYSFS_DIR);
+			if (!error) {
+				p->d_inode->i_nlink++;
+				(*d)->d_op = &sysfs_dentry_ops;
+				d_rehash(*d);
+			}
+		}
+		if (error && (error != -EEXIST))
+			d_drop(*d);
+		dput(*d);
+	} else
+		error = PTR_ERR(*d);
+	up(&p->d_inode->i_sem);
+	return error;
+}
+
+
+int sysfs_create_subdir(struct kobject * k, const char * n, struct dentry ** d)
+{
+	return create_dir(k,k->dentry,n,d);
+}
+
+/**
+ *	sysfs_create_dir - create a directory for an object.
+ *	@parent:	parent parent object.
+ *	@kobj:		object we're creating directory for. 
+ */
+
+int sysfs_create_dir(struct kobject * kobj)
+{
+	struct dentry * dentry = NULL;
+	struct dentry * parent;
+	int error = 0;
+
+	BUG_ON(!kobj);
+
+	if (kobj->parent)
+		parent = kobj->parent->dentry;
+	else if (sysfs_mount && sysfs_mount->mnt_sb)
+		parent = sysfs_mount->mnt_sb->s_root;
+	else
+		return -EFAULT;
+
+	error = create_dir(kobj,parent,kobject_name(kobj),&dentry);
+	if (!error)
+		kobj->dentry = dentry;
+	return error;
+}
+
+/* attaches attribute's sysfs_dirent to the dentry corresponding to the
+ * attribute file
+ */
+static int sysfs_attach_attr(struct sysfs_dirent * sd, struct dentry * dentry)
+{
+	struct attribute * attr = NULL;
+	struct bin_attribute * bin_attr = NULL;
+	int (* init) (struct inode *) = NULL;
+	int error = 0;
+
+        if (sd->s_type & SYSFS_KOBJ_BIN_ATTR) {
+                bin_attr = sd->s_element;
+                attr = &bin_attr->attr;
+        } else {
+                attr = sd->s_element;
+                init = init_file;
+        }
+
+	error = sysfs_create(dentry, (attr->mode & S_IALLUGO) | S_IFREG, init);
+	if (error)
+		return error;
+
+        if (bin_attr) {
+		dentry->d_inode->i_size = bin_attr->size;
+		dentry->d_inode->i_fop = &bin_fops;
+	}
+	dentry->d_op = &sysfs_dentry_ops;
+	dentry->d_fsdata = sysfs_get(sd);
+	sd->s_dentry = dentry;
+	d_rehash(dentry);
+
+	return 0;
+}
+
+static int sysfs_attach_link(struct sysfs_dirent * sd, struct dentry * dentry)
+{
+	int err = 0;
+
+	err = sysfs_create(dentry, S_IFLNK|S_IRWXUGO, init_symlink);
+	if (!err) {
+		dentry->d_op = &sysfs_dentry_ops;
+		dentry->d_fsdata = sysfs_get(sd);
+		sd->s_dentry = dentry;
+		d_rehash(dentry);
+	}
+	return err;
+}
+
+static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
+				struct nameidata *nd)
+{
+	struct sysfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
+	struct sysfs_dirent * sd;
+	int err = 0;
+
+	list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+		if (sd->s_type & SYSFS_NOT_PINNED) {
+			const unsigned char * name = sysfs_get_name(sd);
+
+			if (strcmp(name, dentry->d_name.name))
+				continue;
+
+			if (sd->s_type & SYSFS_KOBJ_LINK)
+				err = sysfs_attach_link(sd, dentry);
+			else
+				err = sysfs_attach_attr(sd, dentry);
+			break;
+		}
+	}
+
+	return ERR_PTR(err);
+}
+
+struct inode_operations sysfs_dir_inode_operations = {
+	.lookup		= sysfs_lookup,
+};
+
+static void remove_dir(struct dentry * d)
+{
+	struct dentry * parent = dget(d->d_parent);
+	struct sysfs_dirent * sd;
+
+	down(&parent->d_inode->i_sem);
+	d_delete(d);
+	sd = d->d_fsdata;
+ 	list_del_init(&sd->s_sibling);
+	sysfs_put(sd);
+	if (d->d_inode)
+		simple_rmdir(parent->d_inode,d);
+
+	pr_debug(" o %s removing done (%d)\n",d->d_name.name,
+		 atomic_read(&d->d_count));
+
+	up(&parent->d_inode->i_sem);
+	dput(parent);
+}
+
+void sysfs_remove_subdir(struct dentry * d)
+{
+	remove_dir(d);
+}
+
+
+/**
+ *	sysfs_remove_dir - remove an object's directory.
+ *	@kobj:	object. 
+ *
+ *	The only thing special about this is that we remove any files in 
+ *	the directory before we remove the directory, and we've inlined
+ *	what used to be sysfs_rmdir() below, instead of calling separately.
+ */
+
+void sysfs_remove_dir(struct kobject * kobj)
+{
+	struct dentry * dentry = dget(kobj->dentry);
+	struct sysfs_dirent * parent_sd;
+	struct sysfs_dirent * sd, * tmp;
+
+	if (!dentry)
+		return;
+
+	pr_debug("sysfs %s: removing dir\n",dentry->d_name.name);
+	down(&dentry->d_inode->i_sem);
+	parent_sd = dentry->d_fsdata;
+	list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
+		if (!sd->s_element || !(sd->s_type & SYSFS_NOT_PINNED))
+			continue;
+		list_del_init(&sd->s_sibling);
+		sysfs_drop_dentry(sd, dentry);
+		sysfs_put(sd);
+	}
+	up(&dentry->d_inode->i_sem);
+
+	remove_dir(dentry);
+	/**
+	 * Drop reference from dget() on entrance.
+	 */
+	dput(dentry);
+}
+
+int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
+{
+	int error = 0;
+	struct dentry * new_dentry, * parent;
+
+	if (!strcmp(kobject_name(kobj), new_name))
+		return -EINVAL;
+
+	if (!kobj->parent)
+		return -EINVAL;
+
+	down_write(&sysfs_rename_sem);
+	parent = kobj->parent->dentry;
+
+	down(&parent->d_inode->i_sem);
+
+	new_dentry = sysfs_get_dentry(parent, new_name);
+	if (!IS_ERR(new_dentry)) {
+  		if (!new_dentry->d_inode) {
+			error = kobject_set_name(kobj, "%s", new_name);
+			if (!error) {
+				d_add(new_dentry, NULL);
+				d_move(kobj->dentry, new_dentry);
+			}
+			else
+				d_drop(new_dentry);
+		} else
+			error = -EEXIST;
+		dput(new_dentry);
+	}
+	up(&parent->d_inode->i_sem);	
+	up_write(&sysfs_rename_sem);
+
+	return error;
+}
+
+static int sysfs_dir_open(struct inode *inode, struct file *file)
+{
+	struct dentry * dentry = file->f_dentry;
+	struct sysfs_dirent * parent_sd = dentry->d_fsdata;
+
+	down(&dentry->d_inode->i_sem);
+	file->private_data = sysfs_new_dirent(parent_sd, NULL);
+	up(&dentry->d_inode->i_sem);
+
+	return file->private_data ? 0 : -ENOMEM;
+
+}
+
+static int sysfs_dir_close(struct inode *inode, struct file *file)
+{
+	struct dentry * dentry = file->f_dentry;
+	struct sysfs_dirent * cursor = file->private_data;
+
+	down(&dentry->d_inode->i_sem);
+	list_del_init(&cursor->s_sibling);
+	up(&dentry->d_inode->i_sem);
+
+	release_sysfs_dirent(cursor);
+
+	return 0;
+}
+
+/* Relationship between s_mode and the DT_xxx types */
+static inline unsigned char dt_type(struct sysfs_dirent *sd)
+{
+	return (sd->s_mode >> 12) & 15;
+}
+
+static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct sysfs_dirent * parent_sd = dentry->d_fsdata;
+	struct sysfs_dirent *cursor = filp->private_data;
+	struct list_head *p, *q = &cursor->s_sibling;
+	ino_t ino;
+	int i = filp->f_pos;
+
+	switch (i) {
+		case 0:
+			ino = dentry->d_inode->i_ino;
+			if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+				break;
+			filp->f_pos++;
+			i++;
+			/* fallthrough */
+		case 1:
+			ino = parent_ino(dentry);
+			if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+				break;
+			filp->f_pos++;
+			i++;
+			/* fallthrough */
+		default:
+			if (filp->f_pos == 2) {
+				list_del(q);
+				list_add(q, &parent_sd->s_children);
+			}
+			for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+				struct sysfs_dirent *next;
+				const char * name;
+				int len;
+
+				next = list_entry(p, struct sysfs_dirent,
+						   s_sibling);
+				if (!next->s_element)
+					continue;
+
+				name = sysfs_get_name(next);
+				len = strlen(name);
+				if (next->s_dentry)
+					ino = next->s_dentry->d_inode->i_ino;
+				else
+					ino = iunique(sysfs_sb, 2);
+
+				if (filldir(dirent, name, len, filp->f_pos, ino,
+						 dt_type(next)) < 0)
+					return 0;
+
+				list_del(q);
+				list_add(q, p);
+				p = q;
+				filp->f_pos++;
+			}
+	}
+	return 0;
+}
+
+static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
+{
+	struct dentry * dentry = file->f_dentry;
+
+	down(&dentry->d_inode->i_sem);
+	switch (origin) {
+		case 1:
+			offset += file->f_pos;
+		case 0:
+			if (offset >= 0)
+				break;
+		default:
+			up(&file->f_dentry->d_inode->i_sem);
+			return -EINVAL;
+	}
+	if (offset != file->f_pos) {
+		file->f_pos = offset;
+		if (file->f_pos >= 2) {
+			struct sysfs_dirent *sd = dentry->d_fsdata;
+			struct sysfs_dirent *cursor = file->private_data;
+			struct list_head *p;
+			loff_t n = file->f_pos - 2;
+
+			list_del(&cursor->s_sibling);
+			p = sd->s_children.next;
+			while (n && p != &sd->s_children) {
+				struct sysfs_dirent *next;
+				next = list_entry(p, struct sysfs_dirent,
+						   s_sibling);
+				if (next->s_element)
+					n--;
+				p = p->next;
+			}
+			list_add_tail(&cursor->s_sibling, p);
+		}
+	}
+	up(&dentry->d_inode->i_sem);
+	return offset;
+}
+
+struct file_operations sysfs_dir_operations = {
+	.open		= sysfs_dir_open,
+	.release	= sysfs_dir_close,
+	.llseek		= sysfs_dir_lseek,
+	.read		= generic_read_dir,
+	.readdir	= sysfs_readdir,
+};
+
+EXPORT_SYMBOL_GPL(sysfs_create_dir);
+EXPORT_SYMBOL_GPL(sysfs_remove_dir);
+EXPORT_SYMBOL_GPL(sysfs_rename_dir);
+
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
new file mode 100644
index 0000000..352f966
--- /dev/null
+++ b/fs/sysfs/file.c
@@ -0,0 +1,447 @@
+/*
+ * file.c - operations for regular (text) files.
+ */
+
+#include <linux/module.h>
+#include <linux/dnotify.h>
+#include <linux/kobject.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "sysfs.h"
+
+#define to_subsys(k) container_of(k,struct subsystem,kset.kobj)
+#define to_sattr(a) container_of(a,struct subsys_attribute,attr)
+
+/**
+ * Subsystem file operations.
+ * These operations allow subsystems to have files that can be 
+ * read/written. 
+ */
+static ssize_t 
+subsys_attr_show(struct kobject * kobj, struct attribute * attr, char * page)
+{
+	struct subsystem * s = to_subsys(kobj);
+	struct subsys_attribute * sattr = to_sattr(attr);
+	ssize_t ret = 0;
+
+	if (sattr->show)
+		ret = sattr->show(s,page);
+	return ret;
+}
+
+static ssize_t 
+subsys_attr_store(struct kobject * kobj, struct attribute * attr, 
+		  const char * page, size_t count)
+{
+	struct subsystem * s = to_subsys(kobj);
+	struct subsys_attribute * sattr = to_sattr(attr);
+	ssize_t ret = 0;
+
+	if (sattr->store)
+		ret = sattr->store(s,page,count);
+	return ret;
+}
+
+static struct sysfs_ops subsys_sysfs_ops = {
+	.show	= subsys_attr_show,
+	.store	= subsys_attr_store,
+};
+
+
+struct sysfs_buffer {
+	size_t			count;
+	loff_t			pos;
+	char			* page;
+	struct sysfs_ops	* ops;
+	struct semaphore	sem;
+	int			needs_read_fill;
+};
+
+
+/**
+ *	fill_read_buffer - allocate and fill buffer from object.
+ *	@dentry:	dentry pointer.
+ *	@buffer:	data buffer for file.
+ *
+ *	Allocate @buffer->page, if it hasn't been already, then call the
+ *	kobject's show() method to fill the buffer with this attribute's 
+ *	data. 
+ *	This is called only once, on the file's first read. 
+ */
+static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
+{
+	struct attribute * attr = to_attr(dentry);
+	struct kobject * kobj = to_kobj(dentry->d_parent);
+	struct sysfs_ops * ops = buffer->ops;
+	int ret = 0;
+	ssize_t count;
+
+	if (!buffer->page)
+		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!buffer->page)
+		return -ENOMEM;
+
+	count = ops->show(kobj,attr,buffer->page);
+	buffer->needs_read_fill = 0;
+	BUG_ON(count > (ssize_t)PAGE_SIZE);
+	if (count >= 0)
+		buffer->count = count;
+	else
+		ret = count;
+	return ret;
+}
+
+
+/**
+ *	flush_read_buffer - push buffer to userspace.
+ *	@buffer:	data buffer for file.
+ *	@userbuf:	user-passed buffer.
+ *	@count:		number of bytes requested.
+ *	@ppos:		file position.
+ *
+ *	Copy the buffer we filled in fill_read_buffer() to userspace.
+ *	This is done at the reader's leisure, copying and advancing 
+ *	the amount they specify each time.
+ *	This may be called continuously until the buffer is empty.
+ */
+static int flush_read_buffer(struct sysfs_buffer * buffer, char __user * buf,
+			     size_t count, loff_t * ppos)
+{
+	int error;
+
+	if (*ppos > buffer->count)
+		return 0;
+
+	if (count > (buffer->count - *ppos))
+		count = buffer->count - *ppos;
+
+	error = copy_to_user(buf,buffer->page + *ppos,count);
+	if (!error)
+		*ppos += count;
+	return error ? -EFAULT : count;
+}
+
+/**
+ *	sysfs_read_file - read an attribute. 
+ *	@file:	file pointer.
+ *	@buf:	buffer to fill.
+ *	@count:	number of bytes to read.
+ *	@ppos:	starting offset in file.
+ *
+ *	Userspace wants to read an attribute file. The attribute descriptor
+ *	is in the file's ->d_fsdata. The target object is in the directory's
+ *	->d_fsdata.
+ *
+ *	We call fill_read_buffer() to allocate and fill the buffer from the
+ *	object's show() method exactly once (if the read is happening from
+ *	the beginning of the file). That should fill the entire buffer with
+ *	all the data the object has to offer for that attribute.
+ *	We then call flush_read_buffer() to copy the buffer to userspace
+ *	in the increments specified.
+ */
+
+static ssize_t
+sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct sysfs_buffer * buffer = file->private_data;
+	ssize_t retval = 0;
+
+	down(&buffer->sem);
+	if (buffer->needs_read_fill) {
+		if ((retval = fill_read_buffer(file->f_dentry,buffer)))
+			goto out;
+	}
+	pr_debug("%s: count = %d, ppos = %lld, buf = %s\n",
+		 __FUNCTION__,count,*ppos,buffer->page);
+	retval = flush_read_buffer(buffer,buf,count,ppos);
+out:
+	up(&buffer->sem);
+	return retval;
+}
+
+
+/**
+ *	fill_write_buffer - copy buffer from userspace.
+ *	@buffer:	data buffer for file.
+ *	@userbuf:	data from user.
+ *	@count:		number of bytes in @userbuf.
+ *
+ *	Allocate @buffer->page if it hasn't been already, then
+ *	copy the user-supplied buffer into it.
+ */
+
+static int 
+fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t count)
+{
+	int error;
+
+	if (!buffer->page)
+		buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!buffer->page)
+		return -ENOMEM;
+
+	if (count >= PAGE_SIZE)
+		count = PAGE_SIZE - 1;
+	error = copy_from_user(buffer->page,buf,count);
+	buffer->needs_read_fill = 1;
+	return error ? -EFAULT : count;
+}
+
+
+/**
+ *	flush_write_buffer - push buffer to kobject.
+ *	@file:		file pointer.
+ *	@buffer:	data buffer for file.
+ *
+ *	Get the correct pointers for the kobject and the attribute we're
+ *	dealing with, then call the store() method for the attribute, 
+ *	passing the buffer that we acquired in fill_write_buffer().
+ */
+
+static int 
+flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
+{
+	struct attribute * attr = to_attr(dentry);
+	struct kobject * kobj = to_kobj(dentry->d_parent);
+	struct sysfs_ops * ops = buffer->ops;
+
+	return ops->store(kobj,attr,buffer->page,count);
+}
+
+
+/**
+ *	sysfs_write_file - write an attribute.
+ *	@file:	file pointer
+ *	@buf:	data to write
+ *	@count:	number of bytes
+ *	@ppos:	starting offset
+ *
+ *	Similar to sysfs_read_file(), though working in the opposite direction.
+ *	We allocate and fill the data from the user in fill_write_buffer(),
+ *	then push it to the kobject in flush_write_buffer().
+ *	There is no easy way for us to know if userspace is only doing a partial
+ *	write, so we don't support them. We expect the entire buffer to come
+ *	on the first write. 
+ *	Hint: if you're writing a value, first read the file, modify only the
+ *	the value you're changing, then write entire buffer back. 
+ */
+
+static ssize_t
+sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct sysfs_buffer * buffer = file->private_data;
+	ssize_t len;
+
+	down(&buffer->sem);
+	len = fill_write_buffer(buffer, buf, count);
+	if (len > 0)
+		len = flush_write_buffer(file->f_dentry, buffer, len);
+	if (len > 0)
+		*ppos += len;
+	up(&buffer->sem);
+	return len;
+}
+
+static int check_perm(struct inode * inode, struct file * file)
+{
+	struct kobject *kobj = sysfs_get_kobject(file->f_dentry->d_parent);
+	struct attribute * attr = to_attr(file->f_dentry);
+	struct sysfs_buffer * buffer;
+	struct sysfs_ops * ops = NULL;
+	int error = 0;
+
+	if (!kobj || !attr)
+		goto Einval;
+
+	/* Grab the module reference for this attribute if we have one */
+	if (!try_module_get(attr->owner)) {
+		error = -ENODEV;
+		goto Done;
+	}
+
+	/* if the kobject has no ktype, then we assume that it is a subsystem
+	 * itself, and use ops for it.
+	 */
+	if (kobj->kset && kobj->kset->ktype)
+		ops = kobj->kset->ktype->sysfs_ops;
+	else if (kobj->ktype)
+		ops = kobj->ktype->sysfs_ops;
+	else
+		ops = &subsys_sysfs_ops;
+
+	/* No sysfs operations, either from having no subsystem,
+	 * or the subsystem have no operations.
+	 */
+	if (!ops)
+		goto Eaccess;
+
+	/* File needs write support.
+	 * The inode's perms must say it's ok, 
+	 * and we must have a store method.
+	 */
+	if (file->f_mode & FMODE_WRITE) {
+
+		if (!(inode->i_mode & S_IWUGO) || !ops->store)
+			goto Eaccess;
+
+	}
+
+	/* File needs read support.
+	 * The inode's perms must say it's ok, and we there
+	 * must be a show method for it.
+	 */
+	if (file->f_mode & FMODE_READ) {
+		if (!(inode->i_mode & S_IRUGO) || !ops->show)
+			goto Eaccess;
+	}
+
+	/* No error? Great, allocate a buffer for the file, and store it
+	 * it in file->private_data for easy access.
+	 */
+	buffer = kmalloc(sizeof(struct sysfs_buffer),GFP_KERNEL);
+	if (buffer) {
+		memset(buffer,0,sizeof(struct sysfs_buffer));
+		init_MUTEX(&buffer->sem);
+		buffer->needs_read_fill = 1;
+		buffer->ops = ops;
+		file->private_data = buffer;
+	} else
+		error = -ENOMEM;
+	goto Done;
+
+ Einval:
+	error = -EINVAL;
+	goto Done;
+ Eaccess:
+	error = -EACCES;
+	module_put(attr->owner);
+ Done:
+	if (error && kobj)
+		kobject_put(kobj);
+	return error;
+}
+
+static int sysfs_open_file(struct inode * inode, struct file * filp)
+{
+	return check_perm(inode,filp);
+}
+
+static int sysfs_release(struct inode * inode, struct file * filp)
+{
+	struct kobject * kobj = to_kobj(filp->f_dentry->d_parent);
+	struct attribute * attr = to_attr(filp->f_dentry);
+	struct module * owner = attr->owner;
+	struct sysfs_buffer * buffer = filp->private_data;
+
+	if (kobj) 
+		kobject_put(kobj);
+	/* After this point, attr should not be accessed. */
+	module_put(owner);
+
+	if (buffer) {
+		if (buffer->page)
+			free_page((unsigned long)buffer->page);
+		kfree(buffer);
+	}
+	return 0;
+}
+
+struct file_operations sysfs_file_operations = {
+	.read		= sysfs_read_file,
+	.write		= sysfs_write_file,
+	.llseek		= generic_file_llseek,
+	.open		= sysfs_open_file,
+	.release	= sysfs_release,
+};
+
+
+int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type)
+{
+	struct sysfs_dirent * parent_sd = dir->d_fsdata;
+	umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
+	int error = 0;
+
+	down(&dir->d_inode->i_sem);
+	error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
+	up(&dir->d_inode->i_sem);
+
+	return error;
+}
+
+
+/**
+ *	sysfs_create_file - create an attribute file for an object.
+ *	@kobj:	object we're creating for. 
+ *	@attr:	atrribute descriptor.
+ */
+
+int sysfs_create_file(struct kobject * kobj, const struct attribute * attr)
+{
+	BUG_ON(!kobj || !kobj->dentry || !attr);
+
+	return sysfs_add_file(kobj->dentry, attr, SYSFS_KOBJ_ATTR);
+
+}
+
+
+/**
+ * sysfs_update_file - update the modified timestamp on an object attribute.
+ * @kobj: object we're acting for.
+ * @attr: attribute descriptor.
+ *
+ * Also call dnotify for the dentry, which lots of userspace programs
+ * use.
+ */
+int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
+{
+	struct dentry * dir = kobj->dentry;
+	struct dentry * victim;
+	int res = -ENOENT;
+
+	down(&dir->d_inode->i_sem);
+	victim = sysfs_get_dentry(dir, attr->name);
+	if (!IS_ERR(victim)) {
+		/* make sure dentry is really there */
+		if (victim->d_inode && 
+		    (victim->d_parent->d_inode == dir->d_inode)) {
+			victim->d_inode->i_mtime = CURRENT_TIME;
+			dnotify_parent(victim, DN_MODIFY);
+
+			/**
+			 * Drop reference from initial sysfs_get_dentry().
+			 */
+			dput(victim);
+			res = 0;
+		} else
+			d_drop(victim);
+		
+		/**
+		 * Drop the reference acquired from sysfs_get_dentry() above.
+		 */
+		dput(victim);
+	}
+	up(&dir->d_inode->i_sem);
+
+	return res;
+}
+
+
+/**
+ *	sysfs_remove_file - remove an object attribute.
+ *	@kobj:	object we're acting for.
+ *	@attr:	attribute descriptor.
+ *
+ *	Hash the attribute name and kill the victim.
+ */
+
+void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
+{
+	sysfs_hash_and_remove(kobj->dentry,attr->name);
+}
+
+
+EXPORT_SYMBOL_GPL(sysfs_create_file);
+EXPORT_SYMBOL_GPL(sysfs_remove_file);
+EXPORT_SYMBOL_GPL(sysfs_update_file);
+
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
new file mode 100644
index 0000000..f11ac5e
--- /dev/null
+++ b/fs/sysfs/group.c
@@ -0,0 +1,84 @@
+/*
+ * fs/sysfs/group.c - Operations for adding/removing multiple files at once.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * This file is released undert the GPL v2. 
+ *
+ */
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/dcache.h>
+#include <linux/err.h>
+#include "sysfs.h"
+
+
+static void remove_files(struct dentry * dir, 
+			 const struct attribute_group * grp)
+{
+	struct attribute *const* attr;
+
+	for (attr = grp->attrs; *attr; attr++)
+		sysfs_hash_and_remove(dir,(*attr)->name);
+}
+
+static int create_files(struct dentry * dir,
+			const struct attribute_group * grp)
+{
+	struct attribute *const* attr;
+	int error = 0;
+
+	for (attr = grp->attrs; *attr && !error; attr++) {
+		error = sysfs_add_file(dir, *attr, SYSFS_KOBJ_ATTR);
+	}
+	if (error)
+		remove_files(dir,grp);
+	return error;
+}
+
+
+int sysfs_create_group(struct kobject * kobj, 
+		       const struct attribute_group * grp)
+{
+	struct dentry * dir;
+	int error;
+
+	BUG_ON(!kobj || !kobj->dentry);
+
+	if (grp->name) {
+		error = sysfs_create_subdir(kobj,grp->name,&dir);
+		if (error)
+			return error;
+	} else
+		dir = kobj->dentry;
+	dir = dget(dir);
+	if ((error = create_files(dir,grp))) {
+		if (grp->name)
+			sysfs_remove_subdir(dir);
+	}
+	dput(dir);
+	return error;
+}
+
+void sysfs_remove_group(struct kobject * kobj, 
+			const struct attribute_group * grp)
+{
+	struct dentry * dir;
+
+	if (grp->name)
+		dir = sysfs_get_dentry(kobj->dentry,grp->name);
+	else
+		dir = dget(kobj->dentry);
+
+	remove_files(dir,grp);
+	if (grp->name)
+		sysfs_remove_subdir(dir);
+	/* release the ref. taken in this routine */
+	dput(dir);
+}
+
+
+EXPORT_SYMBOL_GPL(sysfs_create_group);
+EXPORT_SYMBOL_GPL(sysfs_remove_group);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
new file mode 100644
index 0000000..aff7b2d
--- /dev/null
+++ b/fs/sysfs/inode.c
@@ -0,0 +1,165 @@
+/*
+ * inode.c - basic inode and dentry operations.
+ *
+ * sysfs is Copyright (c) 2001-3 Patrick Mochel
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
+ */
+
+#undef DEBUG 
+
+#include <linux/pagemap.h>
+#include <linux/namei.h>
+#include <linux/backing-dev.h>
+#include "sysfs.h"
+
+extern struct super_block * sysfs_sb;
+
+static struct address_space_operations sysfs_aops = {
+	.readpage	= simple_readpage,
+	.prepare_write	= simple_prepare_write,
+	.commit_write	= simple_commit_write
+};
+
+static struct backing_dev_info sysfs_backing_dev_info = {
+	.ra_pages	= 0,	/* No readahead */
+	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+struct inode * sysfs_new_inode(mode_t mode)
+{
+	struct inode * inode = new_inode(sysfs_sb);
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = 0;
+		inode->i_gid = 0;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_mapping->a_ops = &sysfs_aops;
+		inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
+	}
+	return inode;
+}
+
+int sysfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *))
+{
+	int error = 0;
+	struct inode * inode = NULL;
+	if (dentry) {
+		if (!dentry->d_inode) {
+			if ((inode = sysfs_new_inode(mode))) {
+				if (dentry->d_parent && dentry->d_parent->d_inode) {
+					struct inode *p_inode = dentry->d_parent->d_inode;
+					p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
+				}
+				goto Proceed;
+			}
+			else 
+				error = -ENOMEM;
+		} else
+			error = -EEXIST;
+	} else 
+		error = -ENOENT;
+	goto Done;
+
+ Proceed:
+	if (init)
+		error = init(inode);
+	if (!error) {
+		d_instantiate(dentry, inode);
+		if (S_ISDIR(mode))
+			dget(dentry);  /* pin only directory dentry in core */
+	} else
+		iput(inode);
+ Done:
+	return error;
+}
+
+struct dentry * sysfs_get_dentry(struct dentry * parent, const char * name)
+{
+	struct qstr qstr;
+
+	qstr.name = name;
+	qstr.len = strlen(name);
+	qstr.hash = full_name_hash(name,qstr.len);
+	return lookup_hash(&qstr,parent);
+}
+
+/*
+ * Get the name for corresponding element represented by the given sysfs_dirent
+ */
+const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
+{
+	struct attribute * attr;
+	struct bin_attribute * bin_attr;
+	struct sysfs_symlink  * sl;
+
+	if (!sd || !sd->s_element)
+		BUG();
+
+	switch (sd->s_type) {
+		case SYSFS_DIR:
+			/* Always have a dentry so use that */
+			return sd->s_dentry->d_name.name;
+
+		case SYSFS_KOBJ_ATTR:
+			attr = sd->s_element;
+			return attr->name;
+
+		case SYSFS_KOBJ_BIN_ATTR:
+			bin_attr = sd->s_element;
+			return bin_attr->attr.name;
+
+		case SYSFS_KOBJ_LINK:
+			sl = sd->s_element;
+			return sl->link_name;
+	}
+	return NULL;
+}
+
+
+/*
+ * Unhashes the dentry corresponding to given sysfs_dirent
+ * Called with parent inode's i_sem held.
+ */
+void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
+{
+	struct dentry * dentry = sd->s_dentry;
+
+	if (dentry) {
+		spin_lock(&dcache_lock);
+		spin_lock(&dentry->d_lock);
+		if (!(d_unhashed(dentry) && dentry->d_inode)) {
+			dget_locked(dentry);
+			__d_drop(dentry);
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&dcache_lock);
+			simple_unlink(parent->d_inode, dentry);
+		} else {
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&dcache_lock);
+		}
+	}
+}
+
+void sysfs_hash_and_remove(struct dentry * dir, const char * name)
+{
+	struct sysfs_dirent * sd;
+	struct sysfs_dirent * parent_sd = dir->d_fsdata;
+
+	down(&dir->d_inode->i_sem);
+	list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+		if (!sd->s_element)
+			continue;
+		if (!strcmp(sysfs_get_name(sd), name)) {
+			list_del_init(&sd->s_sibling);
+			sysfs_drop_dentry(sd, dir);
+			sysfs_put(sd);
+			break;
+		}
+	}
+	up(&dir->d_inode->i_sem);
+}
+
+
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
new file mode 100644
index 0000000..5c805bb
--- /dev/null
+++ b/fs/sysfs/mount.c
@@ -0,0 +1,107 @@
+/*
+ * mount.c - operations for initializing and mounting sysfs.
+ */
+
+#define DEBUG 
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+
+#include "sysfs.h"
+
+/* Random magic number */
+#define SYSFS_MAGIC 0x62656572
+
+struct vfsmount *sysfs_mount;
+struct super_block * sysfs_sb = NULL;
+kmem_cache_t *sysfs_dir_cachep;
+
+static struct super_operations sysfs_ops = {
+	.statfs		= simple_statfs,
+	.drop_inode	= generic_delete_inode,
+};
+
+static struct sysfs_dirent sysfs_root = {
+	.s_sibling	= LIST_HEAD_INIT(sysfs_root.s_sibling),
+	.s_children	= LIST_HEAD_INIT(sysfs_root.s_children),
+	.s_element	= NULL,
+	.s_type		= SYSFS_ROOT,
+};
+
+static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct inode *inode;
+	struct dentry *root;
+
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = SYSFS_MAGIC;
+	sb->s_op = &sysfs_ops;
+	sb->s_time_gran = 1;
+	sysfs_sb = sb;
+
+	inode = sysfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
+	if (inode) {
+		inode->i_op = &sysfs_dir_inode_operations;
+		inode->i_fop = &sysfs_dir_operations;
+		/* directory inodes start off with i_nlink == 2 (for "." entry) */
+		inode->i_nlink++;	
+	} else {
+		pr_debug("sysfs: could not get root inode\n");
+		return -ENOMEM;
+	}
+
+	root = d_alloc_root(inode);
+	if (!root) {
+		pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
+		iput(inode);
+		return -ENOMEM;
+	}
+	root->d_fsdata = &sysfs_root;
+	sb->s_root = root;
+	return 0;
+}
+
+static struct super_block *sysfs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, sysfs_fill_super);
+}
+
+static struct file_system_type sysfs_fs_type = {
+	.name		= "sysfs",
+	.get_sb		= sysfs_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+int __init sysfs_init(void)
+{
+	int err = -ENOMEM;
+
+	sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
+					      sizeof(struct sysfs_dirent),
+					      0, 0, NULL, NULL);
+	if (!sysfs_dir_cachep)
+		goto out;
+
+	err = register_filesystem(&sysfs_fs_type);
+	if (!err) {
+		sysfs_mount = kern_mount(&sysfs_fs_type);
+		if (IS_ERR(sysfs_mount)) {
+			printk(KERN_ERR "sysfs: could not mount!\n");
+			err = PTR_ERR(sysfs_mount);
+			sysfs_mount = NULL;
+			unregister_filesystem(&sysfs_fs_type);
+			goto out_err;
+		}
+	} else
+		goto out_err;
+out:
+	return err;
+out_err:
+	kmem_cache_destroy(sysfs_dir_cachep);
+	sysfs_dir_cachep = NULL;
+	goto out;
+}
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
new file mode 100644
index 0000000..dfdf701
--- /dev/null
+++ b/fs/sysfs/symlink.c
@@ -0,0 +1,180 @@
+/*
+ * symlink.c - operations for sysfs symlinks.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/namei.h>
+
+#include "sysfs.h"
+
+static int object_depth(struct kobject * kobj)
+{
+	struct kobject * p = kobj;
+	int depth = 0;
+	do { depth++; } while ((p = p->parent));
+	return depth;
+}
+
+static int object_path_length(struct kobject * kobj)
+{
+	struct kobject * p = kobj;
+	int length = 1;
+	do {
+		length += strlen(kobject_name(p)) + 1;
+		p = p->parent;
+	} while (p);
+	return length;
+}
+
+static void fill_object_path(struct kobject * kobj, char * buffer, int length)
+{
+	struct kobject * p;
+
+	--length;
+	for (p = kobj; p; p = p->parent) {
+		int cur = strlen(kobject_name(p));
+
+		/* back up enough to print this bus id with '/' */
+		length -= cur;
+		strncpy(buffer + length,kobject_name(p),cur);
+		*(buffer + --length) = '/';
+	}
+}
+
+static int sysfs_add_link(struct dentry * parent, char * name, struct kobject * target)
+{
+	struct sysfs_dirent * parent_sd = parent->d_fsdata;
+	struct sysfs_symlink * sl;
+	int error = 0;
+
+	error = -ENOMEM;
+	sl = kmalloc(sizeof(*sl), GFP_KERNEL);
+	if (!sl)
+		goto exit1;
+
+	sl->link_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!sl->link_name)
+		goto exit2;
+
+	strcpy(sl->link_name, name);
+	sl->target_kobj = kobject_get(target);
+
+	error = sysfs_make_dirent(parent_sd, NULL, sl, S_IFLNK|S_IRWXUGO,
+				SYSFS_KOBJ_LINK);
+	if (!error)
+		return 0;
+
+	kfree(sl->link_name);
+exit2:
+	kfree(sl);
+exit1:
+	return error;
+}
+
+/**
+ *	sysfs_create_link - create symlink between two objects.
+ *	@kobj:	object whose directory we're creating the link in.
+ *	@target:	object we're pointing to.
+ *	@name:		name of the symlink.
+ */
+int sysfs_create_link(struct kobject * kobj, struct kobject * target, char * name)
+{
+	struct dentry * dentry = kobj->dentry;
+	int error = 0;
+
+	BUG_ON(!kobj || !kobj->dentry || !name);
+
+	down(&dentry->d_inode->i_sem);
+	error = sysfs_add_link(dentry, name, target);
+	up(&dentry->d_inode->i_sem);
+	return error;
+}
+
+
+/**
+ *	sysfs_remove_link - remove symlink in object's directory.
+ *	@kobj:	object we're acting for.
+ *	@name:	name of the symlink to remove.
+ */
+
+void sysfs_remove_link(struct kobject * kobj, char * name)
+{
+	sysfs_hash_and_remove(kobj->dentry,name);
+}
+
+static int sysfs_get_target_path(struct kobject * kobj, struct kobject * target,
+				   char *path)
+{
+	char * s;
+	int depth, size;
+
+	depth = object_depth(kobj);
+	size = object_path_length(target) + depth * 3 - 1;
+	if (size > PATH_MAX)
+		return -ENAMETOOLONG;
+
+	pr_debug("%s: depth = %d, size = %d\n", __FUNCTION__, depth, size);
+
+	for (s = path; depth--; s += 3)
+		strcpy(s,"../");
+
+	fill_object_path(target, path, size);
+	pr_debug("%s: path = '%s'\n", __FUNCTION__, path);
+
+	return 0;
+}
+
+static int sysfs_getlink(struct dentry *dentry, char * path)
+{
+	struct kobject *kobj, *target_kobj;
+	int error = 0;
+
+	kobj = sysfs_get_kobject(dentry->d_parent);
+	if (!kobj)
+		return -EINVAL;
+
+	target_kobj = sysfs_get_kobject(dentry);
+	if (!target_kobj) {
+		kobject_put(kobj);
+		return -EINVAL;
+	}
+
+	down_read(&sysfs_rename_sem);
+	error = sysfs_get_target_path(kobj, target_kobj, path);
+	up_read(&sysfs_rename_sem);
+	
+	kobject_put(kobj);
+	kobject_put(target_kobj);
+	return error;
+
+}
+
+static int sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	int error = -ENOMEM;
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
+	if (page)
+		error = sysfs_getlink(dentry, (char *) page); 
+	nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
+	return 0;
+}
+
+static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *page = nd_get_link(nd);
+	if (!IS_ERR(page))
+		free_page((unsigned long)page);
+}
+
+struct inode_operations sysfs_symlink_inode_operations = {
+	.readlink = generic_readlink,
+	.follow_link = sysfs_follow_link,
+	.put_link = sysfs_put_link,
+};
+
+
+EXPORT_SYMBOL_GPL(sysfs_create_link);
+EXPORT_SYMBOL_GPL(sysfs_remove_link);
+
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
new file mode 100644
index 0000000..a8a24a0
--- /dev/null
+++ b/fs/sysfs/sysfs.h
@@ -0,0 +1,95 @@
+
+extern struct vfsmount * sysfs_mount;
+extern kmem_cache_t *sysfs_dir_cachep;
+
+extern struct inode * sysfs_new_inode(mode_t mode);
+extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+
+extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *,
+				umode_t, int);
+extern struct dentry * sysfs_get_dentry(struct dentry *, const char *);
+
+extern int sysfs_add_file(struct dentry *, const struct attribute *, int);
+extern void sysfs_hash_and_remove(struct dentry * dir, const char * name);
+
+extern int sysfs_create_subdir(struct kobject *, const char *, struct dentry **);
+extern void sysfs_remove_subdir(struct dentry *);
+
+extern const unsigned char * sysfs_get_name(struct sysfs_dirent *sd);
+extern void sysfs_drop_dentry(struct sysfs_dirent *sd, struct dentry *parent);
+
+extern struct rw_semaphore sysfs_rename_sem;
+extern struct super_block * sysfs_sb;
+extern struct file_operations sysfs_dir_operations;
+extern struct file_operations sysfs_file_operations;
+extern struct file_operations bin_fops;
+extern struct inode_operations sysfs_dir_inode_operations;
+extern struct inode_operations sysfs_symlink_inode_operations;
+
+struct sysfs_symlink {
+	char * link_name;
+	struct kobject * target_kobj;
+};
+
+static inline struct kobject * to_kobj(struct dentry * dentry)
+{
+	struct sysfs_dirent * sd = dentry->d_fsdata;
+	return ((struct kobject *) sd->s_element);
+}
+
+static inline struct attribute * to_attr(struct dentry * dentry)
+{
+	struct sysfs_dirent * sd = dentry->d_fsdata;
+	return ((struct attribute *) sd->s_element);
+}
+
+static inline struct bin_attribute * to_bin_attr(struct dentry * dentry)
+{
+	struct sysfs_dirent * sd = dentry->d_fsdata;
+	return ((struct bin_attribute *) sd->s_element);
+}
+
+static inline struct kobject *sysfs_get_kobject(struct dentry *dentry)
+{
+	struct kobject * kobj = NULL;
+
+	spin_lock(&dcache_lock);
+	if (!d_unhashed(dentry)) {
+		struct sysfs_dirent * sd = dentry->d_fsdata;
+		if (sd->s_type & SYSFS_KOBJ_LINK) {
+			struct sysfs_symlink * sl = sd->s_element;
+			kobj = kobject_get(sl->target_kobj);
+		} else
+			kobj = kobject_get(sd->s_element);
+	}
+	spin_unlock(&dcache_lock);
+
+	return kobj;
+}
+
+static inline void release_sysfs_dirent(struct sysfs_dirent * sd)
+{
+	if (sd->s_type & SYSFS_KOBJ_LINK) {
+		struct sysfs_symlink * sl = sd->s_element;
+		kfree(sl->link_name);
+		kobject_put(sl->target_kobj);
+		kfree(sl);
+	}
+	kmem_cache_free(sysfs_dir_cachep, sd);
+}
+
+static inline struct sysfs_dirent * sysfs_get(struct sysfs_dirent * sd)
+{
+	if (sd) {
+		WARN_ON(!atomic_read(&sd->s_count));
+		atomic_inc(&sd->s_count);
+	}
+	return sd;
+}
+
+static inline void sysfs_put(struct sysfs_dirent * sd)
+{
+	if (atomic_dec_and_test(&sd->s_count))
+		release_sysfs_dirent(sd);
+}
+
diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES
new file mode 100644
index 0000000..66ea6e9
--- /dev/null
+++ b/fs/sysv/CHANGES
@@ -0,0 +1,60 @@
+Mon, 15 Dec 1997	  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+	*    namei.c: struct sysv_dir_inode_operations updated to use dentries.
+
+Fri, 23 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+	*    inode.c: corrected 1 track offset setting (in sb->sv_block_base).
+		      Originally it was overridden (by setting to zero)
+		      in detected_[xenix,sysv4,sysv2,coherent]. Thanks
+		      to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
+		      for identifying the problem.
+
+Tue, 27 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+        *    inode.c: added 2048-byte block support to SystemV FS.
+		      Merged detected_bs[512,1024,2048]() into one function:
+		      void detected_bs (u_char type, struct super_block *sb).
+		      Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
+		      for the patch.
+
+Wed, 4 Feb 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+	*    namei.c: removed static subdir(); is_subdir() from dcache.c
+		      is used instead. Cosmetic changes.
+
+Thu, 3 Dec 1998   Al Viro (viro@parcelfarce.linux.theplanet.co.uk)
+	*    namei.c (sysv_rmdir):
+		      Bugectomy: old check for victim being busy
+		      (inode->i_count) wasn't replaced (with checking
+		      dentry->d_count) and escaped Linus in the last round
+		      of changes. Shot and buried.
+
+Wed, 9 Dec 1998   AV
+	*    namei.c (do_sysv_rename):
+		       Fixed incorrect check for other owners + race.
+		       Removed checks that went to VFS.
+	*    namei.c (sysv_unlink):
+		       Removed checks that went to VFS.
+
+Thu, 10 Dec 1998   AV
+	*    namei.c (do_mknod):
+			Removed dead code - mknod is never asked to
+			create a symlink or directory. Incidentially,
+			it wouldn't do it right if it would be called.
+
+Sat, 26 Dec 1998   KGB
+	*    inode.c (detect_sysv4):
+			Added detection of expanded s_type field (0x10,
+			0x20 and 0x30).  Forced read-only access in this case.
+
+Sun, 21 Mar 1999   AV
+	*    namei.c (sysv_link):
+			Fixed i_count usage that resulted in dcache corruption.
+	*    inode.c:
+			Filled ->delete_inode() method with sysv_delete_inode().
+			sysv_put_inode() is gone, as it tried to do ->delete_
+			_inode()'s job.
+	*    ialloc.c: (sysv_free_inode):
+			Fixed race.
+
+Sun, 30 Apr 1999   AV
+	*    namei.c (sysv_mknod):
+			Removed dead code (S_IFREG case is now passed to
+			->create() by VFS).
diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog
new file mode 100644
index 0000000..18e3487
--- /dev/null
+++ b/fs/sysv/ChangeLog
@@ -0,0 +1,106 @@
+Thu Feb 14 2002  Andrew Morton  <akpm@zip.com.au>
+
+	* dir_commit_chunk(): call writeout_one_page() as well as
+	  waitfor_one_page() for IS_SYNC directories, so that we
+	  actually do sync the directory. (forward-port from 2.4).
+
+Thu Feb  7 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+
+	* super.c: switched to ->get_sb()
+	* ChangeLog: fixed dates ;-)
+
+2002-01-24  David S. Miller  <davem@redhat.com>
+
+	* inode.c: Include linux/init.h
+
+Mon Jan 21 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+	* ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out.
+	* i_vnode renamed to vfs_inode.  Sorry, but let's keep that
+	  consistent.
+
+Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
+
+	* include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using
+		list_entry() instead of inode->u.
+	* include/linux/sysv_fs_i.h: Add 'struct inode  i_vnode' field to
+		sysv_inode_info structure.
+	* inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode
+		sop methods, add infrastructure for per-fs inode slab cache.
+	* super.c (init_sysv_fs): Initialize inode cache, recover properly
+		in the case of failed register_filesystem for V7.
+	(exit_sysv_fs): Destroy inode cache.
+
+Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
+
+	* include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I().
+	* dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to
+		access fs-private inode data.
+	* ialloc.c (sysv_new_inode): Likewise.
+	* inode.c (sysv_read_inode): Likewise.
+	(sysv_update_inode): Likewise.
+	* itree.c (get_branch): Likewise.
+	(sysv_truncate): Likewise.
+	* symlink.c (sysv_readlink): Likewise.
+	(sysv_follow_link): Likewise.
+
+Fri Jan  4 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+
+	* ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname().
+	* inode.c (sysv_read_inode): Likewise.
+	  (sysv_update_inode): Likewise.
+	  (sysv_sync_inode): Likewise.
+	* super.c (detect_sysv): Likewise.
+	  (complete_read_super): Likewise.
+	  (sysv_read_super): Likewise.
+	  (v7_read_super): Likewise.
+
+Sun Dec 30 2001  Manfred Spraul  <manfreds@colorfullife.com>
+
+	* dir.c (dir_commit_chunk): Do not set dir->i_version.
+	(sysv_readdir): Likewise.
+
+Thu Dec 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+
+	* itree.c (get_block): Use map_bh() to fill out bh_result.
+
+Tue Dec 25 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+
+	* super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize.
+	  (v7_read_super): Likewise.
+
+Tue Nov 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
+
+	* itree.c (get_block): Change type for iblock argument to sector_t.
+	* super.c (sysv_read_super): Set s_blocksize early.
+	  (v7_read_super): Likewise.
+	* balloc.c (sysv_new_block): Use sb_bread(). instead of bread().
+	  (sysv_count_free_blocks): Likewise.
+	* ialloc.c (sysv_raw_inode): Likewise.
+	* itree.c (get_branch): Likewise.
+	  (free_branches): Likewise.
+	* super.c (sysv_read_super): Likewise.
+	  (v7_read_super): Likewise.
+
+Sat Dec 15 2001  Christoph Hellwig  <hch@infradead.org>
+
+	* inode.c (sysv_read_inode): Mark inode as bad in case of failure.
+	* super.c (complete_read_super): Check for bad root inode.
+
+Wed Nov 21 2001  Andrew Morton  <andrewm@uow.edu.au>
+
+	* file.c (sysv_sync_file): Call fsync_inode_data_buffers.
+
+Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
+
+	* dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h:
+	Implement per-Inode lookup offset cache.
+	Modelled after Ted's ext2 patch.
+
+Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
+
+	* inode.c, super.c, include/linux/sysv_fs.h,
+	  include/linux/sysv_fs_sb.h:
+	Remove symlink faking.	Noone really wants to use these as
+	linux filesystems and native OSes don't support it anyway.
+
+
diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO
new file mode 100644
index 0000000..de4e4d1
--- /dev/null
+++ b/fs/sysv/INTRO
@@ -0,0 +1,182 @@
+This is the implementation of the SystemV/Coherent filesystem for Linux.
+It grew out of separate filesystem implementations
+
+    Xenix FS      Doug Evans <dje@cygnus.com>  June 1992
+    SystemV FS    Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993
+    Coherent FS   B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993
+
+and was merged together in July 1993.
+
+These filesystems are rather similar. Here is a comparison with Minix FS:
+
+* Linux fdisk reports on partitions
+  - Minix FS     0x81 Linux/Minix
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  0x08 AIX bootable
+
+* Size of a block or zone (data allocation unit on disk)
+  - Minix FS     1024
+  - Xenix FS     1024 (also 512 ??)
+  - SystemV FS   1024 (also 512 and 2048)
+  - Coherent FS   512
+
+* General layout: all have one boot block, one super block and
+  separate areas for inodes and for directories/data.
+  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
+  all the block numbers (including the super block) are offset by one track.
+
+* Byte ordering of "short" (16 bit entities) on disk:
+  - Minix FS     little endian  0 1
+  - Xenix FS     little endian  0 1
+  - SystemV FS   little endian  0 1
+  - Coherent FS  little endian  0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Byte ordering of "long" (32 bit entities) on disk:
+  - Minix FS     little endian  0 1 2 3
+  - Xenix FS     little endian  0 1 2 3
+  - SystemV FS   little endian  0 1 2 3
+  - Coherent FS  PDP-11         2 3 0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Inode on disk: "short", 0 means non-existent, the root dir ino is:
+  - Minix FS                            1
+  - Xenix FS, SystemV FS, Coherent FS   2
+
+* Maximum number of hard links to a file:
+  - Minix FS     250
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  >=10000
+
+* Free inode management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      There is a cache of a certain number of free inodes in the super-block.
+      When it is exhausted, new free inodes are found using a linear search.
+
+* Free block management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      Free blocks are organized in a "free list". Maybe a misleading term,
+      since it is not true that every free block contains a pointer to
+      the next free block. Rather, the free blocks are organized in chunks
+      of limited size, and every now and then a free block contains pointers
+      to the free blocks pertaining to the next chunk; the first of these
+      contains pointers and so on. The list terminates with a "block number"
+      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
+
+* Super-block location:
+  - Minix FS     block 1 = bytes 1024..2047
+  - Xenix FS     block 1 = bytes 1024..2047
+  - SystemV FS   bytes 512..1023
+  - Coherent FS  block 1 = bytes 512..1023
+
+* Super-block layout:
+  - Minix FS
+                    unsigned short s_ninodes;
+                    unsigned short s_nzones;
+                    unsigned short s_imap_blocks;
+                    unsigned short s_zmap_blocks;
+                    unsigned short s_firstdatazone;
+                    unsigned short s_log_zone_size;
+                    unsigned long s_max_size;
+                    unsigned short s_magic;
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short s_firstdatazone;
+                    unsigned long  s_nzones;
+                    unsigned short s_fzone_count;
+                    unsigned long  s_fzones[NICFREE];
+                    unsigned short s_finode_count;
+                    unsigned short s_finodes[NICINOD];
+                    char           s_flock;
+                    char           s_ilock;
+                    char           s_modified;
+                    char           s_rdonly;
+                    unsigned long  s_time;
+                    short          s_dinfo[4]; -- SystemV FS only
+                    unsigned long  s_free_zones;
+                    unsigned short s_free_inodes;
+                    short          s_dinfo[4]; -- Xenix FS only
+                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
+                    char           s_fname[6];
+                    char           s_fpack[6];
+    then they differ considerably:
+        Xenix FS
+                    char           s_clean;
+                    char           s_fill[371];
+                    long           s_magic;
+                    long           s_type;
+        SystemV FS
+                    long           s_fill[12 or 14];
+                    long           s_state;
+                    long           s_magic;
+                    long           s_type;
+        Coherent FS
+                    unsigned long  s_unique;
+    Note that Coherent FS has no magic.
+
+* Inode layout:
+  - Minix FS
+                    unsigned short i_mode;
+                    unsigned short i_uid;
+                    unsigned long  i_size;
+                    unsigned long  i_time;
+                    unsigned char  i_gid;
+                    unsigned char  i_nlinks;
+                    unsigned short i_zone[7+1+1];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short i_mode;
+                    unsigned short i_nlink;
+                    unsigned short i_uid;
+                    unsigned short i_gid;
+                    unsigned long  i_size;
+                    unsigned char  i_zone[3*(10+1+1+1)];
+                    unsigned long  i_atime;
+                    unsigned long  i_mtime;
+                    unsigned long  i_ctime;
+
+* Regular file data blocks are organized as
+  - Minix FS
+               7 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+  - Xenix FS, SystemV FS, Coherent FS
+              10 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+               1 triple-indirect block (pointer to pointers to pointers to blocks)
+
+* Inode size, inodes per block
+  - Minix FS        32   32
+  - Xenix FS        64   16
+  - SystemV FS      64   16
+  - Coherent FS     64    8
+
+* Directory entry on disk
+  - Minix FS
+                    unsigned short inode;
+                    char name[14/30];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short inode;
+                    char name[14];
+
+* Dir entry size, dir entries per block
+  - Minix FS     16/32    64/32
+  - Xenix FS     16       64
+  - SystemV FS   16       64
+  - Coherent FS  16       32
+
+* How to implement symbolic links such that the host fsck doesn't scream:
+  - Minix FS     normal
+  - Xenix FS     kludge: as regular files with  chmod 1000
+  - SystemV FS   ??
+  - Coherent FS  kludge: as regular files with  chmod 1000
+
+
+Notation: We often speak of a "block" but mean a zone (the allocation unit)
+and not the disk driver's notion of "block".
+
+
+Bruno Haible  <haible@ma2s2.mathematik.uni-karlsruhe.de>
diff --git a/fs/sysv/Makefile b/fs/sysv/Makefile
new file mode 100644
index 0000000..3591f9d
--- /dev/null
+++ b/fs/sysv/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux SystemV/Coherent filesystem routines.
+#
+
+obj-$(CONFIG_SYSV_FS) += sysv.o
+
+sysv-objs := ialloc.o balloc.o inode.o itree.o file.o dir.o \
+	     namei.o super.o symlink.o
diff --git a/fs/sysv/balloc.c b/fs/sysv/balloc.c
new file mode 100644
index 0000000..9a6ad96a
--- /dev/null
+++ b/fs/sysv/balloc.c
@@ -0,0 +1,239 @@
+/*
+ *  linux/fs/sysv/balloc.c
+ *
+ *  minix/bitmap.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext/freelists.c
+ *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
+ *
+ *  xenix/alloc.c
+ *  Copyright (C) 1992  Doug Evans
+ *
+ *  coh/alloc.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/balloc.c
+ *  Copyright (C) 1993  Bruno Haible
+ *
+ *  This file contains code for allocating/freeing blocks.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+#include "sysv.h"
+
+/* We don't trust the value of
+   sb->sv_sbd2->s_tfree = *sb->sv_free_blocks
+   but we nevertheless keep it up to date. */
+
+static inline sysv_zone_t *get_chunk(struct super_block *sb, struct buffer_head *bh)
+{
+	char *bh_data = bh->b_data;
+
+	if (SYSV_SB(sb)->s_type == FSTYPE_SYSV4)
+		return (sysv_zone_t*)(bh_data+4);
+	else
+		return (sysv_zone_t*)(bh_data+2);
+}
+
+/* NOTE NOTE NOTE: nr is a block number _as_ _stored_ _on_ _disk_ */
+
+void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
+{
+	struct sysv_sb_info * sbi = SYSV_SB(sb);
+	struct buffer_head * bh;
+	sysv_zone_t *blocks = sbi->s_bcache;
+	unsigned count;
+	unsigned block = fs32_to_cpu(sbi, nr);
+
+	/*
+	 * This code does not work at all for AFS (it has a bitmap
+	 * free list).  As AFS is supposed to be read-only no one
+	 * should call this for an AFS filesystem anyway...
+	 */
+	if (sbi->s_type == FSTYPE_AFS)
+		return;
+
+	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
+		printk("sysv_free_block: trying to free block not in datazone\n");
+		return;
+	}
+
+	lock_super(sb);
+	count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
+
+	if (count > sbi->s_flc_size) {
+		printk("sysv_free_block: flc_count > flc_size\n");
+		unlock_super(sb);
+		return;
+	}
+	/* If the free list head in super-block is full, it is copied
+	 * into this block being freed, ditto if it's completely empty
+	 * (applies only on Coherent).
+	 */
+	if (count == sbi->s_flc_size || count == 0) {
+		block += sbi->s_block_base;
+		bh = sb_getblk(sb, block);
+		if (!bh) {
+			printk("sysv_free_block: getblk() failed\n");
+			unlock_super(sb);
+			return;
+		}
+		memset(bh->b_data, 0, sb->s_blocksize);
+		*(__fs16*)bh->b_data = cpu_to_fs16(sbi, count);
+		memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t));
+		mark_buffer_dirty(bh);
+		set_buffer_uptodate(bh);
+		brelse(bh);
+		count = 0;
+	}
+	sbi->s_bcache[count++] = nr;
+
+	*sbi->s_bcache_count = cpu_to_fs16(sbi, count);
+	fs32_add(sbi, sbi->s_free_blocks, 1);
+	dirty_sb(sb);
+	unlock_super(sb);
+}
+
+sysv_zone_t sysv_new_block(struct super_block * sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	unsigned int block;
+	sysv_zone_t nr;
+	struct buffer_head * bh;
+	unsigned count;
+
+	lock_super(sb);
+	count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
+
+	if (count == 0) /* Applies only to Coherent FS */
+		goto Enospc;
+	nr = sbi->s_bcache[--count];
+	if (nr == 0)  /* Applies only to Xenix FS, SystemV FS */
+		goto Enospc;
+
+	block = fs32_to_cpu(sbi, nr);
+
+	*sbi->s_bcache_count = cpu_to_fs16(sbi, count);
+
+	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
+		printk("sysv_new_block: new block %d is not in data zone\n",
+			block);
+		goto Enospc;
+	}
+
+	if (count == 0) { /* the last block continues the free list */
+		unsigned count;
+
+		block += sbi->s_block_base;
+		if (!(bh = sb_bread(sb, block))) {
+			printk("sysv_new_block: cannot read free-list block\n");
+			/* retry this same block next time */
+			*sbi->s_bcache_count = cpu_to_fs16(sbi, 1);
+			goto Enospc;
+		}
+		count = fs16_to_cpu(sbi, *(__fs16*)bh->b_data);
+		if (count > sbi->s_flc_size) {
+			printk("sysv_new_block: free-list block with >flc_size entries\n");
+			brelse(bh);
+			goto Enospc;
+		}
+		*sbi->s_bcache_count = cpu_to_fs16(sbi, count);
+		memcpy(sbi->s_bcache, get_chunk(sb, bh),
+				count * sizeof(sysv_zone_t));
+		brelse(bh);
+	}
+	/* Now the free list head in the superblock is valid again. */
+	fs32_add(sbi, sbi->s_free_blocks, -1);
+	dirty_sb(sb);
+	unlock_super(sb);
+	return nr;
+
+Enospc:
+	unlock_super(sb);
+	return 0;
+}
+
+unsigned long sysv_count_free_blocks(struct super_block * sb)
+{
+	struct sysv_sb_info * sbi = SYSV_SB(sb);
+	int sb_count;
+	int count;
+	struct buffer_head * bh = NULL;
+	sysv_zone_t *blocks;
+	unsigned block;
+	int n;
+
+	/*
+	 * This code does not work at all for AFS (it has a bitmap
+	 * free list).  As AFS is supposed to be read-only we just
+	 * lie and say it has no free block at all.
+	 */
+	if (sbi->s_type == FSTYPE_AFS)
+		return 0;
+
+	lock_super(sb);
+	sb_count = fs32_to_cpu(sbi, *sbi->s_free_blocks);
+
+	if (0)
+		goto trust_sb;
+
+	/* this causes a lot of disk traffic ... */
+	count = 0;
+	n = fs16_to_cpu(sbi, *sbi->s_bcache_count);
+	blocks = sbi->s_bcache;
+	while (1) {
+		sysv_zone_t zone;
+		if (n > sbi->s_flc_size)
+			goto E2big;
+		zone = 0;
+		while (n && (zone = blocks[--n]) != 0)
+			count++;
+		if (zone == 0)
+			break;
+
+		block = fs32_to_cpu(sbi, zone);
+		if (bh)
+			brelse(bh);
+
+		if (block < sbi->s_firstdatazone || block >= sbi->s_nzones)
+			goto Einval;
+		block += sbi->s_block_base;
+		bh = sb_bread(sb, block);
+		if (!bh)
+			goto Eio;
+		n = fs16_to_cpu(sbi, *(__fs16*)bh->b_data);
+		blocks = get_chunk(sb, bh);
+	}
+	if (bh)
+		brelse(bh);
+	if (count != sb_count)
+		goto Ecount;
+done:
+	unlock_super(sb);
+	return count;
+
+Einval:
+	printk("sysv_count_free_blocks: new block %d is not in data zone\n",
+		block);
+	goto trust_sb;
+Eio:
+	printk("sysv_count_free_blocks: cannot read free-list block\n");
+	goto trust_sb;
+E2big:
+	printk("sysv_count_free_blocks: >flc_size entries in free-list block\n");
+	if (bh)
+		brelse(bh);
+trust_sb:
+	count = sb_count;
+	goto done;
+Ecount:
+	printk("sysv_count_free_blocks: free block count was %d, "
+		"correcting to %d\n", sb_count, count);
+	if (!(sb->s_flags & MS_RDONLY)) {
+		*sbi->s_free_blocks = cpu_to_fs32(sbi, count);
+		dirty_sb(sb);
+	}
+	goto done;
+}
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
new file mode 100644
index 0000000..69a085a
--- /dev/null
+++ b/fs/sysv/dir.c
@@ -0,0 +1,388 @@
+/*
+ *  linux/fs/sysv/dir.c
+ *
+ *  minix/dir.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  coh/dir.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/dir.c
+ *  Copyright (C) 1993  Bruno Haible
+ *
+ *  SystemV/Coherent directory handling functions
+ */
+
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+#include "sysv.h"
+
+static int sysv_readdir(struct file *, void *, filldir_t);
+
+struct file_operations sysv_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= sysv_readdir,
+	.fsync		= sysv_sync_file,
+};
+
+static inline void dir_put_page(struct page *page)
+{
+	kunmap(page);
+	page_cache_release(page);
+}
+
+static inline unsigned long dir_pages(struct inode *inode)
+{
+	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
+}
+
+static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
+{
+	struct inode *dir = (struct inode *)page->mapping->host;
+	int err = 0;
+
+	page->mapping->a_ops->commit_write(NULL, page, from, to);
+	if (IS_DIRSYNC(dir))
+		err = write_one_page(page, 1);
+	else
+		unlock_page(page);
+	return err;
+}
+
+static struct page * dir_get_page(struct inode *dir, unsigned long n)
+{
+	struct address_space *mapping = dir->i_mapping;
+	struct page *page = read_cache_page(mapping, n,
+				(filler_t*)mapping->a_ops->readpage, NULL);
+	if (!IS_ERR(page)) {
+		wait_on_page_locked(page);
+		kmap(page);
+		if (!PageUptodate(page))
+			goto fail;
+	}
+	return page;
+
+fail:
+	dir_put_page(page);
+	return ERR_PTR(-EIO);
+}
+
+static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	unsigned long pos = filp->f_pos;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct super_block *sb = inode->i_sb;
+	unsigned offset = pos & ~PAGE_CACHE_MASK;
+	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned long npages = dir_pages(inode);
+
+	lock_kernel();
+
+	pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
+	if (pos >= inode->i_size)
+		goto done;
+
+	for ( ; n < npages; n++, offset = 0) {
+		char *kaddr, *limit;
+		struct sysv_dir_entry *de;
+		struct page *page = dir_get_page(inode, n);
+
+		if (IS_ERR(page))
+			continue;
+		kaddr = (char *)page_address(page);
+		de = (struct sysv_dir_entry *)(kaddr+offset);
+		limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+		for ( ;(char*)de <= limit; de++) {
+			char *name = de->name;
+			int over;
+
+			if (!de->inode)
+				continue;
+
+			offset = (char *)de - kaddr;
+
+			over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN),
+					(n<<PAGE_CACHE_SHIFT) | offset,
+					fs16_to_cpu(SYSV_SB(sb), de->inode),
+					DT_UNKNOWN);
+			if (over) {
+				dir_put_page(page);
+				goto done;
+			}
+		}
+		dir_put_page(page);
+	}
+
+done:
+	filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
+	unlock_kernel();
+	return 0;
+}
+
+/* compare strings: name[0..len-1] (not zero-terminated) and
+ * buffer[0..] (filled with zeroes up to buffer[0..maxlen-1])
+ */
+static inline int namecompare(int len, int maxlen,
+	const char * name, const char * buffer)
+{
+	if (len < maxlen && buffer[len])
+		return 0;
+	return !memcmp(name, buffer, len);
+}
+
+/*
+ *	sysv_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ */
+struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page)
+{
+	const char * name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	struct inode * dir = dentry->d_parent->d_inode;
+	unsigned long start, n;
+	unsigned long npages = dir_pages(dir);
+	struct page *page = NULL;
+	struct sysv_dir_entry *de;
+
+	*res_page = NULL;
+
+	start = SYSV_I(dir)->i_dir_start_lookup;
+	if (start >= npages)
+		start = 0;
+	n = start;
+
+	do {
+		char *kaddr;
+		page = dir_get_page(dir, n);
+		if (!IS_ERR(page)) {
+			kaddr = (char*)page_address(page);
+			de = (struct sysv_dir_entry *) kaddr;
+			kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+			for ( ; (char *) de <= kaddr ; de++) {
+				if (!de->inode)
+					continue;
+				if (namecompare(namelen, SYSV_NAMELEN,
+							name, de->name))
+					goto found;
+			}
+		}
+		dir_put_page(page);
+
+		if (++n >= npages)
+			n = 0;
+	} while (n != start);
+
+	return NULL;
+
+found:
+	SYSV_I(dir)->i_dir_start_lookup = n;
+	*res_page = page;
+	return de;
+}
+
+int sysv_add_link(struct dentry *dentry, struct inode *inode)
+{
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char * name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	struct page *page = NULL;
+	struct sysv_dir_entry * de;
+	unsigned long npages = dir_pages(dir);
+	unsigned long n;
+	char *kaddr;
+	unsigned from, to;
+	int err;
+
+	/* We take care of directory expansion in the same loop */
+	for (n = 0; n <= npages; n++) {
+		page = dir_get_page(dir, n);
+		err = PTR_ERR(page);
+		if (IS_ERR(page))
+			goto out;
+		kaddr = (char*)page_address(page);
+		de = (struct sysv_dir_entry *)kaddr;
+		kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+		while ((char *)de <= kaddr) {
+			if (!de->inode)
+				goto got_it;
+			err = -EEXIST;
+			if (namecompare(namelen, SYSV_NAMELEN, name, de->name)) 
+				goto out_page;
+			de++;
+		}
+		dir_put_page(page);
+	}
+	BUG();
+	return -EINVAL;
+
+got_it:
+	from = (char*)de - (char*)page_address(page);
+	to = from + SYSV_DIRSIZE;
+	lock_page(page);
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		goto out_unlock;
+	memcpy (de->name, name, namelen);
+	memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
+	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
+	err = dir_commit_chunk(page, from, to);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+out_page:
+	dir_put_page(page);
+out:
+	return err;
+out_unlock:
+	unlock_page(page);
+	goto out_page;
+}
+
+int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = (struct inode*)mapping->host;
+	char *kaddr = (char*)page_address(page);
+	unsigned from = (char*)de - kaddr;
+	unsigned to = from + SYSV_DIRSIZE;
+	int err;
+
+	lock_page(page);
+	err = mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		BUG();
+	de->inode = 0;
+	err = dir_commit_chunk(page, from, to);
+	dir_put_page(page);
+	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return err;
+}
+
+int sysv_make_empty(struct inode *inode, struct inode *dir)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page = grab_cache_page(mapping, 0);
+	struct sysv_dir_entry * de;
+	char *base;
+	int err;
+
+	if (!page)
+		return -ENOMEM;
+	kmap(page);
+	err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE);
+	if (err) {
+		unlock_page(page);
+		goto fail;
+	}
+
+	base = (char*)page_address(page);
+	memset(base, 0, PAGE_CACHE_SIZE);
+
+	de = (struct sysv_dir_entry *) base;
+	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
+	strcpy(de->name,".");
+	de++;
+	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
+	strcpy(de->name,"..");
+
+	err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
+fail:
+	kunmap(page);
+	page_cache_release(page);
+	return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int sysv_empty_dir(struct inode * inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct page *page = NULL;
+	unsigned long i, npages = dir_pages(inode);
+
+	for (i = 0; i < npages; i++) {
+		char *kaddr;
+		struct sysv_dir_entry * de;
+		page = dir_get_page(inode, i);
+
+		if (IS_ERR(page))
+			continue;
+
+		kaddr = (char *)page_address(page);
+		de = (struct sysv_dir_entry *)kaddr;
+		kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE;
+
+		for ( ;(char *)de <= kaddr; de++) {
+			if (!de->inode)
+				continue;
+			/* check for . and .. */
+			if (de->name[0] != '.')
+				goto not_empty;
+			if (!de->name[1]) {
+				if (de->inode == cpu_to_fs16(SYSV_SB(sb),
+							inode->i_ino))
+					continue;
+				goto not_empty;
+			}
+			if (de->name[1] != '.' || de->name[2])
+				goto not_empty;
+		}
+		dir_put_page(page);
+	}
+	return 1;
+
+not_empty:
+	dir_put_page(page);
+	return 0;
+}
+
+/* Releases the page */
+void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
+	struct inode *inode)
+{
+	struct inode *dir = (struct inode*)page->mapping->host;
+	unsigned from = (char *)de-(char*)page_address(page);
+	unsigned to = from + SYSV_DIRSIZE;
+	int err;
+
+	lock_page(page);
+	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+	if (err)
+		BUG();
+	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
+	err = dir_commit_chunk(page, from, to);
+	dir_put_page(page);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(dir);
+}
+
+struct sysv_dir_entry * sysv_dotdot (struct inode *dir, struct page **p)
+{
+	struct page *page = dir_get_page(dir, 0);
+	struct sysv_dir_entry *de = NULL;
+
+	if (!IS_ERR(page)) {
+		de = (struct sysv_dir_entry*) page_address(page) + 1;
+		*p = page;
+	}
+	return de;
+}
+
+ino_t sysv_inode_by_name(struct dentry *dentry)
+{
+	struct page *page;
+	struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
+	ino_t res = 0;
+	
+	if (de) {
+		res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
+		dir_put_page(page);
+	}
+	return res;
+}
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
new file mode 100644
index 0000000..da69abc
--- /dev/null
+++ b/fs/sysv/file.c
@@ -0,0 +1,49 @@
+/*
+ *  linux/fs/sysv/file.c
+ *
+ *  minix/file.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  coh/file.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/file.c
+ *  Copyright (C) 1993  Bruno Haible
+ *
+ *  SystemV/Coherent regular file handling primitives
+ */
+
+#include "sysv.h"
+
+/*
+ * We have mostly NULLs here: the current defaults are OK for
+ * the coh filesystem.
+ */
+struct file_operations sysv_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.fsync		= sysv_sync_file,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations sysv_file_inode_operations = {
+	.truncate	= sysv_truncate,
+	.getattr	= sysv_getattr,
+};
+
+int sysv_sync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	int err;
+
+	err = sync_mapping_buffers(inode->i_mapping);
+	if (!(inode->i_state & I_DIRTY))
+		return err;
+	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+		return err;
+	
+	err |= sysv_sync_inode(inode);
+	return err ? -EIO : 0;
+}
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
new file mode 100644
index 0000000..9b585d1
--- /dev/null
+++ b/fs/sysv/ialloc.c
@@ -0,0 +1,240 @@
+/*
+ *  linux/fs/sysv/ialloc.c
+ *
+ *  minix/bitmap.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext/freelists.c
+ *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
+ *
+ *  xenix/alloc.c
+ *  Copyright (C) 1992  Doug Evans
+ *
+ *  coh/alloc.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/ialloc.c
+ *  Copyright (C) 1993  Bruno Haible
+ *
+ *  This file contains code for allocating/freeing inodes.
+ */
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include "sysv.h"
+
+/* We don't trust the value of
+   sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes
+   but we nevertheless keep it up to date. */
+
+/* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */
+
+/* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */
+static inline sysv_ino_t *
+sv_sb_fic_inode(struct super_block * sb, unsigned int i)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+
+	if (sbi->s_bh1 == sbi->s_bh2)
+		return &sbi->s_sb_fic_inodes[i];
+	else {
+		/* 512 byte Xenix FS */
+		unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]);
+		if (offset < 512)
+			return (sysv_ino_t*)(sbi->s_sbd1 + offset);
+		else
+			return (sysv_ino_t*)(sbi->s_sbd2 + offset);
+	}
+}
+
+struct sysv_inode *
+sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	struct sysv_inode *res;
+	int block = sbi->s_firstinodezone + sbi->s_block_base;
+
+	block += (ino-1) >> sbi->s_inodes_per_block_bits;
+	*bh = sb_bread(sb, block);
+	if (!*bh)
+		return NULL;
+	res = (struct sysv_inode *)(*bh)->b_data;
+	return res + ((ino-1) & sbi->s_inodes_per_block_1);
+}
+
+static int refill_free_cache(struct super_block *sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	struct buffer_head * bh;
+	struct sysv_inode * raw_inode;
+	int i = 0, ino;
+
+	ino = SYSV_ROOT_INO+1;
+	raw_inode = sysv_raw_inode(sb, ino, &bh);
+	if (!raw_inode)
+		goto out;
+	while (ino <= sbi->s_ninodes) {
+		if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) {
+			*sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino);
+			if (i == sbi->s_fic_size)
+				break;
+		}
+		if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
+			brelse(bh);
+			raw_inode = sysv_raw_inode(sb, ino, &bh);
+			if (!raw_inode)
+				goto out;
+		} else
+			raw_inode++;
+	}
+	brelse(bh);
+out:
+	return i;
+}
+
+void sysv_free_inode(struct inode * inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	unsigned int ino;
+	struct buffer_head * bh;
+	struct sysv_inode * raw_inode;
+	unsigned count;
+
+	sb = inode->i_sb;
+	ino = inode->i_ino;
+	if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) {
+		printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n");
+		return;
+	}
+	raw_inode = sysv_raw_inode(sb, ino, &bh);
+	clear_inode(inode);
+	if (!raw_inode) {
+		printk("sysv_free_inode: unable to read inode block on device "
+		       "%s\n", inode->i_sb->s_id);
+		return;
+	}
+	lock_super(sb);
+	count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
+	if (count < sbi->s_fic_size) {
+		*sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino);
+		*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
+	}
+	fs16_add(sbi, sbi->s_sb_total_free_inodes, 1);
+	dirty_sb(sb);
+	memset(raw_inode, 0, sizeof(struct sysv_inode));
+	mark_buffer_dirty(bh);
+	unlock_super(sb);
+	brelse(bh);
+}
+
+struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
+{
+	struct super_block *sb = dir->i_sb;
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	struct inode *inode;
+	sysv_ino_t ino;
+	unsigned count;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	lock_super(sb);
+	count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
+	if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) {
+		count = refill_free_cache(sb);
+		if (count == 0) {
+			iput(inode);
+			unlock_super(sb);
+			return ERR_PTR(-ENOSPC);
+		}
+	}
+	/* Now count > 0. */
+	ino = *sv_sb_fic_inode(sb,--count);
+	*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
+	fs16_add(sbi, sbi->s_sb_total_free_inodes, -1);
+	dirty_sb(sb);
+	
+	if (dir->i_mode & S_ISGID) {
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+
+	inode->i_uid = current->fsuid;
+	inode->i_ino = fs16_to_cpu(sbi, ino);
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_blocks = inode->i_blksize = 0;
+	memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
+	SYSV_I(inode)->i_dir_start_lookup = 0;
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+
+	inode->i_mode = mode;		/* for sysv_write_inode() */
+	sysv_write_inode(inode, 0);	/* ensure inode not allocated again */
+	mark_inode_dirty(inode);	/* cleared by sysv_write_inode() */
+	/* That's it. */
+	unlock_super(sb);
+	return inode;
+}
+
+unsigned long sysv_count_free_inodes(struct super_block * sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	struct buffer_head * bh;
+	struct sysv_inode * raw_inode;
+	int ino, count, sb_count;
+
+	lock_super(sb);
+
+	sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes);
+
+	if (0)
+		goto trust_sb;
+
+	/* this causes a lot of disk traffic ... */
+	count = 0;
+	ino = SYSV_ROOT_INO+1;
+	raw_inode = sysv_raw_inode(sb, ino, &bh);
+	if (!raw_inode)
+		goto Eio;
+	while (ino <= sbi->s_ninodes) {
+		if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0)
+			count++;
+		if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
+			brelse(bh);
+			raw_inode = sysv_raw_inode(sb, ino, &bh);
+			if (!raw_inode)
+				goto Eio;
+		} else
+			raw_inode++;
+	}
+	brelse(bh);
+	if (count != sb_count)
+		goto Einval;
+out:
+	unlock_super(sb);
+	return count;
+
+Einval:
+	printk("sysv_count_free_inodes: "
+		"free inode count was %d, correcting to %d\n",
+		sb_count, count);
+	if (!(sb->s_flags & MS_RDONLY)) {
+		*sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count);
+		dirty_sb(sb);
+	}
+	goto out;
+
+Eio:
+	printk("sysv_count_free_inodes: unable to read inode table\n");
+trust_sb:
+	count = sb_count;
+	goto out;
+}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
new file mode 100644
index 0000000..0530077
--- /dev/null
+++ b/fs/sysv/inode.c
@@ -0,0 +1,354 @@
+/*
+ *  linux/fs/sysv/inode.c
+ *
+ *  minix/inode.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  xenix/inode.c
+ *  Copyright (C) 1992  Doug Evans
+ *
+ *  coh/inode.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/inode.c
+ *  Copyright (C) 1993  Paul B. Monday
+ *
+ *  sysv/inode.c
+ *  Copyright (C) 1993  Bruno Haible
+ *  Copyright (C) 1997, 1998  Krzysztof G. Baranowski
+ *
+ *  This file contains code for allocating/freeing inodes and for read/writing
+ *  the superblock.
+ */
+
+#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <asm/byteorder.h>
+#include "sysv.h"
+
+/* This is only called on sync() and umount(), when s_dirt=1. */
+static void sysv_write_super(struct super_block *sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	unsigned long time = get_seconds(), old_time;
+
+	lock_kernel();
+	if (sb->s_flags & MS_RDONLY)
+		goto clean;
+
+	/*
+	 * If we are going to write out the super block,
+	 * then attach current time stamp.
+	 * But if the filesystem was marked clean, keep it clean.
+	 */
+	old_time = fs32_to_cpu(sbi, *sbi->s_sb_time);
+	if (sbi->s_type == FSTYPE_SYSV4) {
+		if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time))
+			*sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time);
+		*sbi->s_sb_time = cpu_to_fs32(sbi, time);
+		mark_buffer_dirty(sbi->s_bh2);
+	}
+clean:
+	sb->s_dirt = 0;
+	unlock_kernel();
+}
+
+static int sysv_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	if (sbi->s_forced_ro)
+		*flags |= MS_RDONLY;
+	if (!(*flags & MS_RDONLY))
+		sb->s_dirt = 1;
+	return 0;
+}
+
+static void sysv_put_super(struct super_block *sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		/* XXX ext2 also updates the state here */
+		mark_buffer_dirty(sbi->s_bh1);
+		if (sbi->s_bh1 != sbi->s_bh2)
+			mark_buffer_dirty(sbi->s_bh2);
+	}
+
+	brelse(sbi->s_bh1);
+	if (sbi->s_bh1 != sbi->s_bh2)
+		brelse(sbi->s_bh2);
+
+	kfree(sbi);
+}
+
+static int sysv_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+
+	buf->f_type = sb->s_magic;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = sbi->s_ndatazones;
+	buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb);
+	buf->f_files = sbi->s_ninodes;
+	buf->f_ffree = sysv_count_free_inodes(sb);
+	buf->f_namelen = SYSV_NAMELEN;
+	return 0;
+}
+
+/* 
+ * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32
+ */
+static inline void read3byte(struct sysv_sb_info *sbi,
+	unsigned char * from, unsigned char * to)
+{
+	if (sbi->s_bytesex == BYTESEX_PDP) {
+		to[0] = from[0];
+		to[1] = 0;
+		to[2] = from[1];
+		to[3] = from[2];
+	} else if (sbi->s_bytesex == BYTESEX_LE) {
+		to[0] = from[0];
+		to[1] = from[1];
+		to[2] = from[2];
+		to[3] = 0;
+	} else {
+		to[0] = 0;
+		to[1] = from[0];
+		to[2] = from[1];
+		to[3] = from[2];
+	}
+}
+
+static inline void write3byte(struct sysv_sb_info *sbi,
+	unsigned char * from, unsigned char * to)
+{
+	if (sbi->s_bytesex == BYTESEX_PDP) {
+		to[0] = from[0];
+		to[1] = from[2];
+		to[2] = from[3];
+	} else if (sbi->s_bytesex == BYTESEX_LE) {
+		to[0] = from[0];
+		to[1] = from[1];
+		to[2] = from[2];
+	} else {
+		to[0] = from[1];
+		to[1] = from[2];
+		to[2] = from[3];
+	}
+}
+
+static struct inode_operations sysv_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.getattr	= sysv_getattr,
+};
+
+void sysv_set_inode(struct inode *inode, dev_t rdev)
+{
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &sysv_file_inode_operations;
+		inode->i_fop = &sysv_file_operations;
+		inode->i_mapping->a_ops = &sysv_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &sysv_dir_inode_operations;
+		inode->i_fop = &sysv_dir_operations;
+		inode->i_mapping->a_ops = &sysv_aops;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (inode->i_blocks) {
+			inode->i_op = &sysv_symlink_inode_operations;
+			inode->i_mapping->a_ops = &sysv_aops;
+		} else
+			inode->i_op = &sysv_fast_symlink_inode_operations;
+	} else
+		init_special_inode(inode, inode->i_mode, rdev);
+}
+
+static void sysv_read_inode(struct inode *inode)
+{
+	struct super_block * sb = inode->i_sb;
+	struct sysv_sb_info * sbi = SYSV_SB(sb);
+	struct buffer_head * bh;
+	struct sysv_inode * raw_inode;
+	struct sysv_inode_info * si;
+	unsigned int block, ino = inode->i_ino;
+
+	if (!ino || ino > sbi->s_ninodes) {
+		printk("Bad inode number on dev %s: %d is out of range\n",
+		       inode->i_sb->s_id, ino);
+		goto bad_inode;
+	}
+	raw_inode = sysv_raw_inode(sb, ino, &bh);
+	if (!raw_inode) {
+		printk("Major problem: unable to read inode from dev %s\n",
+		       inode->i_sb->s_id);
+		goto bad_inode;
+	}
+	/* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */
+	inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode);
+	inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid);
+	inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid);
+	inode->i_nlink = fs16_to_cpu(sbi, raw_inode->i_nlink);
+	inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
+	inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime);
+	inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime);
+	inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime);
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_blocks = inode->i_blksize = 0;
+
+	si = SYSV_I(inode);
+	for (block = 0; block < 10+1+1+1; block++)
+		read3byte(sbi, &raw_inode->i_data[3*block],
+				(u8 *)&si->i_data[block]);
+	brelse(bh);
+	si->i_dir_start_lookup = 0;
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+		sysv_set_inode(inode,
+			       old_decode_dev(fs32_to_cpu(sbi, si->i_data[0])));
+	else
+		sysv_set_inode(inode, 0);
+	return;
+
+bad_inode:
+	make_bad_inode(inode);
+	return;
+}
+
+static struct buffer_head * sysv_update_inode(struct inode * inode)
+{
+	struct super_block * sb = inode->i_sb;
+	struct sysv_sb_info * sbi = SYSV_SB(sb);
+	struct buffer_head * bh;
+	struct sysv_inode * raw_inode;
+	struct sysv_inode_info * si;
+	unsigned int ino, block;
+
+	ino = inode->i_ino;
+	if (!ino || ino > sbi->s_ninodes) {
+		printk("Bad inode number on dev %s: %d is out of range\n",
+		       inode->i_sb->s_id, ino);
+		return NULL;
+	}
+	raw_inode = sysv_raw_inode(sb, ino, &bh);
+	if (!raw_inode) {
+		printk("unable to read i-node block\n");
+		return NULL;
+	}
+
+	raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode);
+	raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid));
+	raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid));
+	raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink);
+	raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
+	raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
+	raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec);
+	raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec);
+
+	si = SYSV_I(inode);
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+		si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev));
+	for (block = 0; block < 10+1+1+1; block++)
+		write3byte(sbi, (u8 *)&si->i_data[block],
+			&raw_inode->i_data[3*block]);
+	mark_buffer_dirty(bh);
+	return bh;
+}
+
+int sysv_write_inode(struct inode * inode, int wait)
+{
+	struct buffer_head *bh;
+	lock_kernel();
+	bh = sysv_update_inode(inode);
+	brelse(bh);
+	unlock_kernel();
+	return 0;
+}
+
+int sysv_sync_inode(struct inode * inode)
+{
+        int err = 0;
+        struct buffer_head *bh;
+
+        bh = sysv_update_inode(inode);
+        if (bh && buffer_dirty(bh)) {
+                sync_dirty_buffer(bh);
+                if (buffer_req(bh) && !buffer_uptodate(bh)) {
+                        printk ("IO error syncing sysv inode [%s:%08lx]\n",
+                                inode->i_sb->s_id, inode->i_ino);
+                        err = -1;
+                }
+        }
+        else if (!bh)
+                err = -1;
+        brelse (bh);
+        return err;
+}
+
+static void sysv_delete_inode(struct inode *inode)
+{
+	inode->i_size = 0;
+	sysv_truncate(inode);
+	lock_kernel();
+	sysv_free_inode(inode);
+	unlock_kernel();
+}
+
+static kmem_cache_t *sysv_inode_cachep;
+
+static struct inode *sysv_alloc_inode(struct super_block *sb)
+{
+	struct sysv_inode_info *si;
+
+	si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL);
+	if (!si)
+		return NULL;
+	return &si->vfs_inode;
+}
+
+static void sysv_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
+}
+
+static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+{
+	struct sysv_inode_info *si = (struct sysv_inode_info *)p;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+			SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&si->vfs_inode);
+}
+
+struct super_operations sysv_sops = {
+	.alloc_inode	= sysv_alloc_inode,
+	.destroy_inode	= sysv_destroy_inode,
+	.read_inode	= sysv_read_inode,
+	.write_inode	= sysv_write_inode,
+	.delete_inode	= sysv_delete_inode,
+	.put_super	= sysv_put_super,
+	.write_super	= sysv_write_super,
+	.remount_fs	= sysv_remount,
+	.statfs		= sysv_statfs,
+};
+
+int __init sysv_init_icache(void)
+{
+	sysv_inode_cachep = kmem_cache_create("sysv_inode_cache",
+			sizeof(struct sysv_inode_info), 0,
+			SLAB_RECLAIM_ACCOUNT,
+			init_once, NULL);
+	if (!sysv_inode_cachep)
+		return -ENOMEM;
+	return 0;
+}
+
+void sysv_destroy_icache(void)
+{
+	kmem_cache_destroy(sysv_inode_cachep);
+}
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
new file mode 100644
index 0000000..86f5f8d
--- /dev/null
+++ b/fs/sysv/itree.c
@@ -0,0 +1,475 @@
+/*
+ *  linux/fs/sysv/itree.c
+ *
+ *  Handling of indirect blocks' trees.
+ *  AV, Sep--Dec 2000
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/mount.h>
+#include <linux/string.h>
+#include "sysv.h"
+
+enum {DIRECT = 10, DEPTH = 4};	/* Have triple indirect */
+
+static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
+{
+	mark_buffer_dirty_inode(bh, inode);
+	if (IS_SYNC(inode))
+		sync_dirty_buffer(bh);
+}
+
+static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
+{
+	struct super_block *sb = inode->i_sb;
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	int ptrs_bits = sbi->s_ind_per_block_bits;
+	unsigned long	indirect_blocks = sbi->s_ind_per_block,
+			double_blocks = sbi->s_ind_per_block_2;
+	int n = 0;
+
+	if (block < 0) {
+		printk("sysv_block_map: block < 0\n");
+	} else if (block < DIRECT) {
+		offsets[n++] = block;
+	} else if ( (block -= DIRECT) < indirect_blocks) {
+		offsets[n++] = DIRECT;
+		offsets[n++] = block;
+	} else if ((block -= indirect_blocks) < double_blocks) {
+		offsets[n++] = DIRECT+1;
+		offsets[n++] = block >> ptrs_bits;
+		offsets[n++] = block & (indirect_blocks - 1);
+	} else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
+		offsets[n++] = DIRECT+2;
+		offsets[n++] = block >> (ptrs_bits * 2);
+		offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
+		offsets[n++] = block & (indirect_blocks - 1);
+	} else {
+		/* nothing */;
+	}
+	return n;
+}
+
+static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr)
+{
+	return sbi->s_block_base + fs32_to_cpu(sbi, nr);
+}
+
+typedef struct {
+	sysv_zone_t     *p;
+	sysv_zone_t     key;
+	struct buffer_head *bh;
+} Indirect;
+
+static DEFINE_RWLOCK(pointers_lock);
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
+{
+	p->key = *(p->p = v);
+	p->bh = bh;
+}
+
+static inline int verify_chain(Indirect *from, Indirect *to)
+{
+	while (from <= to && from->key == *from->p)
+		from++;
+	return (from > to);
+}
+
+static inline sysv_zone_t *block_end(struct buffer_head *bh)
+{
+	return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
+}
+
+/*
+ * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock)
+ */
+static Indirect *get_branch(struct inode *inode,
+			    int depth,
+			    int offsets[],
+			    Indirect chain[],
+			    int *err)
+{
+	struct super_block *sb = inode->i_sb;
+	Indirect *p = chain;
+	struct buffer_head *bh;
+
+	*err = 0;
+	add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets);
+	if (!p->key)
+		goto no_block;
+	while (--depth) {
+		int block = block_to_cpu(SYSV_SB(sb), p->key);
+		bh = sb_bread(sb, block);
+		if (!bh)
+			goto failure;
+		if (!verify_chain(chain, p))
+			goto changed;
+		add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
+		if (!p->key)
+			goto no_block;
+	}
+	return NULL;
+
+changed:
+	brelse(bh);
+	*err = -EAGAIN;
+	goto no_block;
+failure:
+	*err = -EIO;
+no_block:
+	return p;
+}
+
+static int alloc_branch(struct inode *inode,
+			int num,
+			int *offsets,
+			Indirect *branch)
+{
+	int blocksize = inode->i_sb->s_blocksize;
+	int n = 0;
+	int i;
+
+	branch[0].key = sysv_new_block(inode->i_sb);
+	if (branch[0].key) for (n = 1; n < num; n++) {
+		struct buffer_head *bh;
+		int parent;
+		/* Allocate the next block */
+		branch[n].key = sysv_new_block(inode->i_sb);
+		if (!branch[n].key)
+			break;
+		/*
+		 * Get buffer_head for parent block, zero it out and set 
+		 * the pointer to new one, then send parent to disk.
+		 */
+		parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
+		bh = sb_getblk(inode->i_sb, parent);
+		lock_buffer(bh);
+		memset(bh->b_data, 0, blocksize);
+		branch[n].bh = bh;
+		branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
+		*branch[n].p = branch[n].key;
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		dirty_indirect(bh, inode);
+	}
+	if (n == num)
+		return 0;
+
+	/* Allocation failed, free what we already allocated */
+	for (i = 1; i < n; i++)
+		bforget(branch[i].bh);
+	for (i = 0; i < n; i++)
+		sysv_free_block(inode->i_sb, branch[i].key);
+	return -ENOSPC;
+}
+
+static inline int splice_branch(struct inode *inode,
+				Indirect chain[],
+				Indirect *where,
+				int num)
+{
+	int i;
+
+	/* Verify that place we are splicing to is still there and vacant */
+	write_lock(&pointers_lock);
+	if (!verify_chain(chain, where-1) || *where->p)
+		goto changed;
+	*where->p = where->key;
+	write_unlock(&pointers_lock);
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+
+	/* had we spliced it onto indirect block? */
+	if (where->bh)
+		dirty_indirect(where->bh, inode);
+
+	if (IS_SYNC(inode))
+		sysv_sync_inode(inode);
+	else
+		mark_inode_dirty(inode);
+	return 0;
+
+changed:
+	write_unlock(&pointers_lock);
+	for (i = 1; i < num; i++)
+		bforget(where[i].bh);
+	for (i = 0; i < num; i++)
+		sysv_free_block(inode->i_sb, where[i].key);
+	return -EAGAIN;
+}
+
+static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
+{
+	int err = -EIO;
+	int offsets[DEPTH];
+	Indirect chain[DEPTH];
+	struct super_block *sb = inode->i_sb;
+	Indirect *partial;
+	int left;
+	int depth = block_to_path(inode, iblock, offsets);
+
+	if (depth == 0)
+		goto out;
+
+reread:
+	read_lock(&pointers_lock);
+	partial = get_branch(inode, depth, offsets, chain, &err);
+	read_unlock(&pointers_lock);
+
+	/* Simplest case - block found, no allocation needed */
+	if (!partial) {
+got_it:
+		map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb),
+					chain[depth-1].key));
+		/* Clean up and exit */
+		partial = chain+depth-1; /* the whole chain */
+		goto cleanup;
+	}
+
+	/* Next simple case - plain lookup or failed read of indirect block */
+	if (!create || err == -EIO) {
+cleanup:
+		while (partial > chain) {
+			brelse(partial->bh);
+			partial--;
+		}
+out:
+		return err;
+	}
+
+	/*
+	 * Indirect block might be removed by truncate while we were
+	 * reading it. Handling of that case (forget what we've got and
+	 * reread) is taken out of the main path.
+	 */
+	if (err == -EAGAIN)
+		goto changed;
+
+	left = (chain + depth) - partial;
+	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
+	if (err)
+		goto cleanup;
+
+	if (splice_branch(inode, chain, partial, left) < 0)
+		goto changed;
+
+	set_buffer_new(bh_result);
+	goto got_it;
+
+changed:
+	while (partial > chain) {
+		brelse(partial->bh);
+		partial--;
+	}
+	goto reread;
+}
+
+static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
+{
+	while (p < q)
+		if (*p++)
+			return 0;
+	return 1;
+}
+
+static Indirect *find_shared(struct inode *inode,
+				int depth,
+				int offsets[],
+				Indirect chain[],
+				sysv_zone_t *top)
+{
+	Indirect *partial, *p;
+	int k, err;
+
+	*top = 0;
+	for (k = depth; k > 1 && !offsets[k-1]; k--)
+		;
+
+	write_lock(&pointers_lock);
+	partial = get_branch(inode, k, offsets, chain, &err);
+	if (!partial)
+		partial = chain + k-1;
+	/*
+	 * If the branch acquired continuation since we've looked at it -
+	 * fine, it should all survive and (new) top doesn't belong to us.
+	 */
+	if (!partial->key && *partial->p) {
+		write_unlock(&pointers_lock);
+		goto no_top;
+	}
+	for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
+		;
+	/*
+	 * OK, we've found the last block that must survive. The rest of our
+	 * branch should be detached before unlocking. However, if that rest
+	 * of branch is all ours and does not grow immediately from the inode
+	 * it's easier to cheat and just decrement partial->p.
+	 */
+	if (p == chain + k - 1 && p > chain) {
+		p->p--;
+	} else {
+		*top = *p->p;
+		*p->p = 0;
+	}
+	write_unlock(&pointers_lock);
+
+	while (partial > p) {
+		brelse(partial->bh);
+		partial--;
+	}
+no_top:
+	return partial;
+}
+
+static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
+{
+	for ( ; p < q ; p++) {
+		sysv_zone_t nr = *p;
+		if (nr) {
+			*p = 0;
+			sysv_free_block(inode->i_sb, nr);
+			mark_inode_dirty(inode);
+		}
+	}
+}
+
+static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
+{
+	struct buffer_head * bh;
+	struct super_block *sb = inode->i_sb;
+
+	if (depth--) {
+		for ( ; p < q ; p++) {
+			int block;
+			sysv_zone_t nr = *p;
+			if (!nr)
+				continue;
+			*p = 0;
+			block = block_to_cpu(SYSV_SB(sb), nr);
+			bh = sb_bread(sb, block);
+			if (!bh)
+				continue;
+			free_branches(inode, (sysv_zone_t*)bh->b_data,
+					block_end(bh), depth);
+			bforget(bh);
+			sysv_free_block(sb, nr);
+			mark_inode_dirty(inode);
+		}
+	} else
+		free_data(inode, p, q);
+}
+
+void sysv_truncate (struct inode * inode)
+{
+	sysv_zone_t *i_data = SYSV_I(inode)->i_data;
+	int offsets[DEPTH];
+	Indirect chain[DEPTH];
+	Indirect *partial;
+	sysv_zone_t nr = 0;
+	int n;
+	long iblock;
+	unsigned blocksize;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	    S_ISLNK(inode->i_mode)))
+		return;
+
+	blocksize = inode->i_sb->s_blocksize;
+	iblock = (inode->i_size + blocksize-1)
+					>> inode->i_sb->s_blocksize_bits;
+
+	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
+
+	n = block_to_path(inode, iblock, offsets);
+	if (n == 0)
+		return;
+
+	if (n == 1) {
+		free_data(inode, i_data+offsets[0], i_data + DIRECT);
+		goto do_indirects;
+	}
+
+	partial = find_shared(inode, n, offsets, chain, &nr);
+	/* Kill the top of shared branch (already detached) */
+	if (nr) {
+		if (partial == chain)
+			mark_inode_dirty(inode);
+		else
+			dirty_indirect(partial->bh, inode);
+		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
+	}
+	/* Clear the ends of indirect blocks on the shared branch */
+	while (partial > chain) {
+		free_branches(inode, partial->p + 1, block_end(partial->bh),
+				(chain+n-1) - partial);
+		dirty_indirect(partial->bh, inode);
+		brelse (partial->bh);
+		partial--;
+	}
+do_indirects:
+	/* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
+	while (n < DEPTH) {
+		nr = i_data[DIRECT + n - 1];
+		if (nr) {
+			i_data[DIRECT + n - 1] = 0;
+			mark_inode_dirty(inode);
+			free_branches(inode, &nr, &nr+1, n);
+		}
+		n++;
+	}
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	if (IS_SYNC(inode))
+		sysv_sync_inode (inode);
+	else
+		mark_inode_dirty(inode);
+}
+
+static unsigned sysv_nblocks(struct super_block *s, loff_t size)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(s);
+	int ptrs_bits = sbi->s_ind_per_block_bits;
+	unsigned blocks, res, direct = DIRECT, i = DEPTH;
+	blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits;
+	res = blocks;
+	while (--i && blocks > direct) {
+		blocks = ((blocks - direct - 1) >> ptrs_bits) + 1;
+		res += blocks;
+		direct = 1;
+	}
+	return blocks;
+}
+
+int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+	struct super_block *s = mnt->mnt_sb;
+	generic_fillattr(dentry->d_inode, stat);
+	stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
+	stat->blksize = s->s_blocksize;
+	return 0;
+}
+
+static int sysv_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page,get_block,wbc);
+}
+static int sysv_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,get_block);
+}
+static int sysv_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,get_block);
+}
+static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,get_block);
+}
+struct address_space_operations sysv_aops = {
+	.readpage = sysv_readpage,
+	.writepage = sysv_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = sysv_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = sysv_bmap
+};
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
new file mode 100644
index 0000000..7f0e4b5
--- /dev/null
+++ b/fs/sysv/namei.c
@@ -0,0 +1,318 @@
+/*
+ *  linux/fs/sysv/namei.c
+ *
+ *  minix/namei.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  coh/namei.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/namei.c
+ *  Copyright (C) 1993  Bruno Haible
+ *  Copyright (C) 1997, 1998  Krzysztof G. Baranowski
+ */
+
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include "sysv.h"
+
+static inline void inc_count(struct inode *inode)
+{
+	inode->i_nlink++;
+	mark_inode_dirty(inode);
+}
+
+static inline void dec_count(struct inode *inode)
+{
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+}
+
+static int add_nondir(struct dentry *dentry, struct inode *inode)
+{
+	int err = sysv_add_link(dentry, inode);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	dec_count(inode);
+	iput(inode);
+	return err;
+}
+
+static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
+{
+	/* Truncate the name in place, avoids having to define a compare
+	   function. */
+	if (qstr->len > SYSV_NAMELEN) {
+		qstr->len = SYSV_NAMELEN;
+		qstr->hash = full_name_hash(qstr->name, qstr->len);
+	}
+	return 0;
+}
+
+struct dentry_operations sysv_dentry_operations = {
+	.d_hash		= sysv_hash,
+};
+
+static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+{
+	struct inode * inode = NULL;
+	ino_t ino;
+
+	dentry->d_op = dir->i_sb->s_root->d_op;
+	if (dentry->d_name.len > SYSV_NAMELEN)
+		return ERR_PTR(-ENAMETOOLONG);
+	ino = sysv_inode_by_name(dentry);
+
+	if (ino) {
+		inode = iget(dir->i_sb, ino);
+		if (!inode)
+			return ERR_PTR(-EACCES);
+	}
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static int sysv_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+{
+	struct inode * inode;
+	int err;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+
+	inode = sysv_new_inode(dir, mode);
+	err = PTR_ERR(inode);
+
+	if (!IS_ERR(inode)) {
+		sysv_set_inode(inode, rdev);
+		mark_inode_dirty(inode);
+		err = add_nondir(dentry, inode);
+	}
+	return err;
+}
+
+static int sysv_create(struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+{
+	return sysv_mknod(dir, dentry, mode, 0);
+}
+
+static int sysv_symlink(struct inode * dir, struct dentry * dentry, 
+	const char * symname)
+{
+	int err = -ENAMETOOLONG;
+	int l = strlen(symname)+1;
+	struct inode * inode;
+
+	if (l > dir->i_sb->s_blocksize)
+		goto out;
+
+	inode = sysv_new_inode(dir, S_IFLNK|0777);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+	
+	sysv_set_inode(inode, 0);
+	err = page_symlink(inode, symname, l);
+	if (err)
+		goto out_fail;
+
+	mark_inode_dirty(inode);
+	err = add_nondir(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	dec_count(inode);
+	iput(inode);
+	goto out;
+}
+
+static int sysv_link(struct dentry * old_dentry, struct inode * dir, 
+	struct dentry * dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+
+	if (inode->i_nlink >= SYSV_SB(inode->i_sb)->s_link_max)
+		return -EMLINK;
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	inc_count(inode);
+	atomic_inc(&inode->i_count);
+
+	return add_nondir(dentry, inode);
+}
+
+static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+{
+	struct inode * inode;
+	int err = -EMLINK;
+
+	if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) 
+		goto out;
+	inc_count(dir);
+
+	inode = sysv_new_inode(dir, S_IFDIR|mode);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_dir;
+
+	sysv_set_inode(inode, 0);
+
+	inc_count(inode);
+
+	err = sysv_make_empty(inode, dir);
+	if (err)
+		goto out_fail;
+
+	err = sysv_add_link(dentry, inode);
+	if (err)
+		goto out_fail;
+
+        d_instantiate(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	dec_count(inode);
+	dec_count(inode);
+	iput(inode);
+out_dir:
+	dec_count(dir);
+	goto out;
+}
+
+static int sysv_unlink(struct inode * dir, struct dentry * dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	struct page * page;
+	struct sysv_dir_entry * de;
+	int err = -ENOENT;
+
+	de = sysv_find_entry(dentry, &page);
+	if (!de)
+		goto out;
+
+	err = sysv_delete_entry (de, page);
+	if (err)
+		goto out;
+
+	inode->i_ctime = dir->i_ctime;
+	dec_count(inode);
+out:
+	return err;
+}
+
+static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	int err = -ENOTEMPTY;
+
+	if (sysv_empty_dir(inode)) {
+		err = sysv_unlink(dir, dentry);
+		if (!err) {
+			inode->i_size = 0;
+			dec_count(inode);
+			dec_count(dir);
+		}
+	}
+	return err;
+}
+
+/*
+ * Anybody can rename anything with this: the permission checks are left to the
+ * higher-level routines.
+ */
+static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
+		  struct inode * new_dir, struct dentry * new_dentry)
+{
+	struct inode * old_inode = old_dentry->d_inode;
+	struct inode * new_inode = new_dentry->d_inode;
+	struct page * dir_page = NULL;
+	struct sysv_dir_entry * dir_de = NULL;
+	struct page * old_page;
+	struct sysv_dir_entry * old_de;
+	int err = -ENOENT;
+
+	old_de = sysv_find_entry(old_dentry, &old_page);
+	if (!old_de)
+		goto out;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+		err = -EIO;
+		dir_de = sysv_dotdot(old_inode, &dir_page);
+		if (!dir_de)
+			goto out_old;
+	}
+
+	if (new_inode) {
+		struct page * new_page;
+		struct sysv_dir_entry * new_de;
+
+		err = -ENOTEMPTY;
+		if (dir_de && !sysv_empty_dir(new_inode))
+			goto out_dir;
+
+		err = -ENOENT;
+		new_de = sysv_find_entry(new_dentry, &new_page);
+		if (!new_de)
+			goto out_dir;
+		inc_count(old_inode);
+		sysv_set_link(new_de, new_page, old_inode);
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		if (dir_de)
+			new_inode->i_nlink--;
+		dec_count(new_inode);
+	} else {
+		if (dir_de) {
+			err = -EMLINK;
+			if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
+				goto out_dir;
+		}
+		inc_count(old_inode);
+		err = sysv_add_link(new_dentry, old_inode);
+		if (err) {
+			dec_count(old_inode);
+			goto out_dir;
+		}
+		if (dir_de)
+			inc_count(new_dir);
+	}
+
+	sysv_delete_entry(old_de, old_page);
+	dec_count(old_inode);
+
+	if (dir_de) {
+		sysv_set_link(dir_de, dir_page, new_dir);
+		dec_count(old_dir);
+	}
+	return 0;
+
+out_dir:
+	if (dir_de) {
+		kunmap(dir_page);
+		page_cache_release(dir_page);
+	}
+out_old:
+	kunmap(old_page);
+	page_cache_release(old_page);
+out:
+	return err;
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations sysv_dir_inode_operations = {
+	.create		= sysv_create,
+	.lookup		= sysv_lookup,
+	.link		= sysv_link,
+	.unlink		= sysv_unlink,
+	.symlink	= sysv_symlink,
+	.mkdir		= sysv_mkdir,
+	.rmdir		= sysv_rmdir,
+	.mknod		= sysv_mknod,
+	.rename		= sysv_rename,
+	.getattr	= sysv_getattr,
+};
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
new file mode 100644
index 0000000..59e76b5
--- /dev/null
+++ b/fs/sysv/super.c
@@ -0,0 +1,572 @@
+/*
+ *  linux/fs/sysv/inode.c
+ *
+ *  minix/inode.c
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  xenix/inode.c
+ *  Copyright (C) 1992  Doug Evans
+ *
+ *  coh/inode.c
+ *  Copyright (C) 1993  Pascal Haible, Bruno Haible
+ *
+ *  sysv/inode.c
+ *  Copyright (C) 1993  Paul B. Monday
+ *
+ *  sysv/inode.c
+ *  Copyright (C) 1993  Bruno Haible
+ *  Copyright (C) 1997, 1998  Krzysztof G. Baranowski
+ *
+ *  This file contains code for read/parsing the superblock.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include "sysv.h"
+
+/*
+ * The following functions try to recognize specific filesystems.
+ *
+ * We recognize:
+ * - Xenix FS by its magic number.
+ * - SystemV FS by its magic number.
+ * - Coherent FS by its funny fname/fpack field.
+ * - SCO AFS by s_nfree == 0xffff
+ * - V7 FS has no distinguishing features.
+ *
+ * We discriminate among SystemV4 and SystemV2 FS by the assumption that
+ * the time stamp is not < 01-01-1980.
+ */
+
+enum {
+	JAN_1_1980 = (10*365 + 2) * 24 * 60 * 60
+};
+
+static void detected_xenix(struct sysv_sb_info *sbi)
+{
+	struct buffer_head *bh1 = sbi->s_bh1;
+	struct buffer_head *bh2 = sbi->s_bh2;
+	struct xenix_super_block * sbd1;
+	struct xenix_super_block * sbd2;
+
+	if (bh1 != bh2)
+		sbd1 = sbd2 = (struct xenix_super_block *) bh1->b_data;
+	else {
+		/* block size = 512, so bh1 != bh2 */
+		sbd1 = (struct xenix_super_block *) bh1->b_data;
+		sbd2 = (struct xenix_super_block *) (bh2->b_data - 512);
+	}
+
+	sbi->s_link_max = XENIX_LINK_MAX;
+	sbi->s_fic_size = XENIX_NICINOD;
+	sbi->s_flc_size = XENIX_NICFREE;
+	sbi->s_sbd1 = (char *)sbd1;
+	sbi->s_sbd2 = (char *)sbd2;
+	sbi->s_sb_fic_count = &sbd1->s_ninode;
+	sbi->s_sb_fic_inodes = &sbd1->s_inode[0];
+	sbi->s_sb_total_free_inodes = &sbd2->s_tinode;
+	sbi->s_bcache_count = &sbd1->s_nfree;
+	sbi->s_bcache = &sbd1->s_free[0];
+	sbi->s_free_blocks = &sbd2->s_tfree;
+	sbi->s_sb_time = &sbd2->s_time;
+	sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd1->s_isize);
+	sbi->s_nzones = fs32_to_cpu(sbi, sbd1->s_fsize);
+}
+
+static void detected_sysv4(struct sysv_sb_info *sbi)
+{
+	struct sysv4_super_block * sbd;
+	struct buffer_head *bh1 = sbi->s_bh1;
+	struct buffer_head *bh2 = sbi->s_bh2;
+
+	if (bh1 == bh2)
+		sbd = (struct sysv4_super_block *) (bh1->b_data + BLOCK_SIZE/2);
+	else
+		sbd = (struct sysv4_super_block *) bh2->b_data;
+
+	sbi->s_link_max = SYSV_LINK_MAX;
+	sbi->s_fic_size = SYSV_NICINOD;
+	sbi->s_flc_size = SYSV_NICFREE;
+	sbi->s_sbd1 = (char *)sbd;
+	sbi->s_sbd2 = (char *)sbd;
+	sbi->s_sb_fic_count = &sbd->s_ninode;
+	sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+	sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+	sbi->s_bcache_count = &sbd->s_nfree;
+	sbi->s_bcache = &sbd->s_free[0];
+	sbi->s_free_blocks = &sbd->s_tfree;
+	sbi->s_sb_time = &sbd->s_time;
+	sbi->s_sb_state = &sbd->s_state;
+	sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+	sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_sysv2(struct sysv_sb_info *sbi)
+{
+	struct sysv2_super_block *sbd;
+	struct buffer_head *bh1 = sbi->s_bh1;
+	struct buffer_head *bh2 = sbi->s_bh2;
+
+	if (bh1 == bh2)
+		sbd = (struct sysv2_super_block *) (bh1->b_data + BLOCK_SIZE/2);
+	else
+		sbd = (struct sysv2_super_block *) bh2->b_data;
+
+	sbi->s_link_max = SYSV_LINK_MAX;
+	sbi->s_fic_size = SYSV_NICINOD;
+	sbi->s_flc_size = SYSV_NICFREE;
+	sbi->s_sbd1 = (char *)sbd;
+	sbi->s_sbd2 = (char *)sbd;
+	sbi->s_sb_fic_count = &sbd->s_ninode;
+	sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+	sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+	sbi->s_bcache_count = &sbd->s_nfree;
+	sbi->s_bcache = &sbd->s_free[0];
+	sbi->s_free_blocks = &sbd->s_tfree;
+	sbi->s_sb_time = &sbd->s_time;
+	sbi->s_sb_state = &sbd->s_state;
+	sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+	sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_coherent(struct sysv_sb_info *sbi)
+{
+	struct coh_super_block * sbd;
+	struct buffer_head *bh1 = sbi->s_bh1;
+
+	sbd = (struct coh_super_block *) bh1->b_data;
+
+	sbi->s_link_max = COH_LINK_MAX;
+	sbi->s_fic_size = COH_NICINOD;
+	sbi->s_flc_size = COH_NICFREE;
+	sbi->s_sbd1 = (char *)sbd;
+	sbi->s_sbd2 = (char *)sbd;
+	sbi->s_sb_fic_count = &sbd->s_ninode;
+	sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+	sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+	sbi->s_bcache_count = &sbd->s_nfree;
+	sbi->s_bcache = &sbd->s_free[0];
+	sbi->s_free_blocks = &sbd->s_tfree;
+	sbi->s_sb_time = &sbd->s_time;
+	sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+	sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_v7(struct sysv_sb_info *sbi)
+{
+	struct buffer_head *bh2 = sbi->s_bh2;
+	struct v7_super_block *sbd = (struct v7_super_block *)bh2->b_data;
+
+	sbi->s_link_max = V7_LINK_MAX;
+	sbi->s_fic_size = V7_NICINOD;
+	sbi->s_flc_size = V7_NICFREE;
+	sbi->s_sbd1 = (char *)sbd;
+	sbi->s_sbd2 = (char *)sbd;
+	sbi->s_sb_fic_count = &sbd->s_ninode;
+	sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+	sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+	sbi->s_bcache_count = &sbd->s_nfree;
+	sbi->s_bcache = &sbd->s_free[0];
+	sbi->s_free_blocks = &sbd->s_tfree;
+	sbi->s_sb_time = &sbd->s_time;
+	sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+	sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static int detect_xenix(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+	struct xenix_super_block *sbd = (struct xenix_super_block *)bh->b_data;
+	if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0x2b5544))
+		sbi->s_bytesex = BYTESEX_LE;
+	else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0x2b5544))
+		sbi->s_bytesex = BYTESEX_BE;
+	else
+		return 0;
+	switch (fs32_to_cpu(sbi, sbd->s_type)) {
+	case 1:
+		sbi->s_type = FSTYPE_XENIX;
+		return 1;
+	case 2:
+		sbi->s_type = FSTYPE_XENIX;
+		return 2;
+	default:
+		return 0;
+	}
+}
+
+static int detect_sysv(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+	struct super_block *sb = sbi->s_sb;
+	/* All relevant fields are at the same offsets in R2 and R4 */
+	struct sysv4_super_block * sbd;
+	u32 type;
+
+	sbd = (struct sysv4_super_block *) (bh->b_data + BLOCK_SIZE/2);
+	if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0xfd187e20))
+		sbi->s_bytesex = BYTESEX_LE;
+	else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0xfd187e20))
+		sbi->s_bytesex = BYTESEX_BE;
+	else
+		return 0;
+
+	type = fs32_to_cpu(sbi, sbd->s_type);
+ 
+ 	if (fs16_to_cpu(sbi, sbd->s_nfree) == 0xffff) {
+ 		sbi->s_type = FSTYPE_AFS;
+		sbi->s_forced_ro = 1;
+ 		if (!(sb->s_flags & MS_RDONLY)) {
+ 			printk("SysV FS: SCO EAFS on %s detected, " 
+ 				"forcing read-only mode.\n", 
+ 				sb->s_id);
+ 		}
+ 		return type;
+ 	}
+ 
+	if (fs32_to_cpu(sbi, sbd->s_time) < JAN_1_1980) {
+		/* this is likely to happen on SystemV2 FS */
+		if (type > 3 || type < 1)
+			return 0;
+		sbi->s_type = FSTYPE_SYSV2;
+		return type;
+	}
+	if ((type > 3 || type < 1) && (type > 0x30 || type < 0x10))
+		return 0;
+
+	/* On Interactive Unix (ISC) Version 4.0/3.x s_type field = 0x10,
+	   0x20 or 0x30 indicates that symbolic links and the 14-character
+	   filename limit is gone. Due to lack of information about this
+           feature read-only mode seems to be a reasonable approach... -KGB */
+
+	if (type >= 0x10) {
+		printk("SysV FS: can't handle long file names on %s, "
+		       "forcing read-only mode.\n", sb->s_id);
+		sbi->s_forced_ro = 1;
+	}
+
+	sbi->s_type = FSTYPE_SYSV4;
+	return type >= 0x10 ? type >> 4 : type;
+}
+
+static int detect_coherent(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+	struct coh_super_block * sbd;
+
+	sbd = (struct coh_super_block *) (bh->b_data + BLOCK_SIZE/2);
+	if ((memcmp(sbd->s_fname,"noname",6) && memcmp(sbd->s_fname,"xxxxx ",6))
+	    || (memcmp(sbd->s_fpack,"nopack",6) && memcmp(sbd->s_fpack,"xxxxx\n",6)))
+		return 0;
+	sbi->s_bytesex = BYTESEX_PDP;
+	sbi->s_type = FSTYPE_COH;
+	return 1;
+}
+
+static int detect_sysv_odd(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+	int size = detect_sysv(sbi, bh);
+
+	return size>2 ? 0 : size;
+}
+
+static struct {
+	int block;
+	int (*test)(struct sysv_sb_info *, struct buffer_head *);
+} flavours[] = {
+	{1, detect_xenix},
+	{0, detect_sysv},
+	{0, detect_coherent},
+	{9, detect_sysv_odd},
+	{15,detect_sysv_odd},
+	{18,detect_sysv},
+};
+
+static char *flavour_names[] = {
+	[FSTYPE_XENIX]	= "Xenix",
+	[FSTYPE_SYSV4]	= "SystemV",
+	[FSTYPE_SYSV2]	= "SystemV Release 2",
+	[FSTYPE_COH]	= "Coherent",
+	[FSTYPE_V7]	= "V7",
+	[FSTYPE_AFS]	= "AFS",
+};
+
+static void (*flavour_setup[])(struct sysv_sb_info *) = {
+	[FSTYPE_XENIX]	= detected_xenix,
+	[FSTYPE_SYSV4]	= detected_sysv4,
+	[FSTYPE_SYSV2]	= detected_sysv2,
+	[FSTYPE_COH]	= detected_coherent,
+	[FSTYPE_V7]	= detected_v7,
+	[FSTYPE_AFS]	= detected_sysv4,
+};
+
+static int complete_read_super(struct super_block *sb, int silent, int size)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+	struct inode *root_inode;
+	char *found = flavour_names[sbi->s_type];
+	u_char n_bits = size+8;
+	int bsize = 1 << n_bits;
+	int bsize_4 = bsize >> 2;
+
+	sbi->s_firstinodezone = 2;
+
+	flavour_setup[sbi->s_type](sbi);
+	
+	sbi->s_truncate = 1;
+	sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone;
+	sbi->s_inodes_per_block = bsize >> 6;
+	sbi->s_inodes_per_block_1 = (bsize >> 6)-1;
+	sbi->s_inodes_per_block_bits = n_bits-6;
+	sbi->s_ind_per_block = bsize_4;
+	sbi->s_ind_per_block_2 = bsize_4*bsize_4;
+	sbi->s_toobig_block = 10 + bsize_4 * (1 + bsize_4 * (1 + bsize_4));
+	sbi->s_ind_per_block_bits = n_bits-2;
+
+	sbi->s_ninodes = (sbi->s_firstdatazone - sbi->s_firstinodezone)
+		<< sbi->s_inodes_per_block_bits;
+
+	if (!silent)
+		printk("VFS: Found a %s FS (block size = %ld) on device %s\n",
+		       found, sb->s_blocksize, sb->s_id);
+
+	sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
+	/* set up enough so that it can read an inode */
+	sb->s_op = &sysv_sops;
+	root_inode = iget(sb,SYSV_ROOT_INO);
+	if (!root_inode || is_bad_inode(root_inode)) {
+		printk("SysV FS: get root inode failed\n");
+		return 0;
+	}
+	sb->s_root = d_alloc_root(root_inode);
+	if (!sb->s_root) {
+		iput(root_inode);
+		printk("SysV FS: get root dentry failed\n");
+		return 0;
+	}
+	if (sbi->s_forced_ro)
+		sb->s_flags |= MS_RDONLY;
+	if (sbi->s_truncate)
+		sb->s_root->d_op = &sysv_dentry_operations;
+	sb->s_dirt = 1;
+	return 1;
+}
+
+static int sysv_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct buffer_head *bh1, *bh = NULL;
+	struct sysv_sb_info *sbi;
+	unsigned long blocknr;
+	int size = 0, i;
+	
+	if (1024 != sizeof (struct xenix_super_block))
+		panic("Xenix FS: bad superblock size");
+	if (512 != sizeof (struct sysv4_super_block))
+		panic("SystemV FS: bad superblock size");
+	if (512 != sizeof (struct sysv2_super_block))
+		panic("SystemV FS: bad superblock size");
+	if (500 != sizeof (struct coh_super_block))
+		panic("Coherent FS: bad superblock size");
+	if (64 != sizeof (struct sysv_inode))
+		panic("sysv fs: bad inode size");
+
+	sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	memset(sbi, 0, sizeof(struct sysv_sb_info));
+
+	sbi->s_sb = sb;
+	sbi->s_block_base = 0;
+	sb->s_fs_info = sbi;
+	
+	sb_set_blocksize(sb, BLOCK_SIZE);
+
+	for (i = 0; i < sizeof(flavours)/sizeof(flavours[0]) && !size; i++) {
+		brelse(bh);
+		bh = sb_bread(sb, flavours[i].block);
+		if (!bh)
+			continue;
+		size = flavours[i].test(SYSV_SB(sb), bh);
+	}
+
+	if (!size)
+		goto Eunknown;
+
+	switch (size) {
+		case 1:
+			blocknr = bh->b_blocknr << 1;
+			brelse(bh);
+			sb_set_blocksize(sb, 512);
+			bh1 = sb_bread(sb, blocknr);
+			bh = sb_bread(sb, blocknr + 1);
+			break;
+		case 2:
+			bh1 = bh;
+			break;
+		case 3:
+			blocknr = bh->b_blocknr >> 1;
+			brelse(bh);
+			sb_set_blocksize(sb, 2048);
+			bh1 = bh = sb_bread(sb, blocknr);
+			break;
+		default:
+			goto Ebadsize;
+	}
+
+	if (bh && bh1) {
+		sbi->s_bh1 = bh1;
+		sbi->s_bh2 = bh;
+		if (complete_read_super(sb, silent, size))
+			return 0;
+	}
+
+	brelse(bh1);
+	brelse(bh);
+	sb_set_blocksize(sb, BLOCK_SIZE);
+	printk("oldfs: cannot read superblock\n");
+failed:
+	kfree(sbi);
+	return -EINVAL;
+
+Eunknown:
+	brelse(bh);
+	if (!silent)
+		printk("VFS: unable to find oldfs superblock on device %s\n",
+			sb->s_id);
+	goto failed;
+Ebadsize:
+	brelse(bh);
+	if (!silent)
+		printk("VFS: oldfs: unsupported block size (%dKb)\n",
+			1<<(size-2));
+	goto failed;
+}
+
+static int v7_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct sysv_sb_info *sbi;
+	struct buffer_head *bh, *bh2 = NULL;
+	struct v7_super_block *v7sb;
+	struct sysv_inode *v7i;
+
+	if (440 != sizeof (struct v7_super_block))
+		panic("V7 FS: bad super-block size");
+	if (64 != sizeof (struct sysv_inode))
+		panic("sysv fs: bad i-node size");
+
+	sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	memset(sbi, 0, sizeof(struct sysv_sb_info));
+
+	sbi->s_sb = sb;
+	sbi->s_block_base = 0;
+	sbi->s_type = FSTYPE_V7;
+	sbi->s_bytesex = BYTESEX_PDP;
+	sb->s_fs_info = sbi;
+	
+	sb_set_blocksize(sb, 512);
+
+	if ((bh = sb_bread(sb, 1)) == NULL) {
+		if (!silent)
+			printk("VFS: unable to read V7 FS superblock on "
+			       "device %s.\n", sb->s_id);
+		goto failed;
+	}
+
+	/* plausibility check on superblock */
+	v7sb = (struct v7_super_block *) bh->b_data;
+	if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE ||
+	    fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD ||
+	    fs32_to_cpu(sbi, v7sb->s_time) == 0)
+		goto failed;
+
+	/* plausibility check on root inode: it is a directory,
+	   with a nonzero size that is a multiple of 16 */
+	if ((bh2 = sb_bread(sb, 2)) == NULL)
+		goto failed;
+	v7i = (struct sysv_inode *)(bh2->b_data + 64);
+	if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR ||
+	    (fs32_to_cpu(sbi, v7i->i_size) == 0) ||
+	    (fs32_to_cpu(sbi, v7i->i_size) & 017) != 0)
+		goto failed;
+	brelse(bh2);
+	bh2 = NULL;
+
+	sbi->s_bh1 = bh;
+	sbi->s_bh2 = bh;
+	if (complete_read_super(sb, silent, 1))
+		return 0;
+
+failed:
+	brelse(bh2);
+	brelse(bh);
+	kfree(sbi);
+	return -EINVAL;
+}
+
+/* Every kernel module contains stuff like this. */
+
+static struct super_block *sysv_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super);
+}
+
+static struct super_block *v7_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super);
+}
+
+static struct file_system_type sysv_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "sysv",
+	.get_sb		= sysv_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static struct file_system_type v7_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "v7",
+	.get_sb		= v7_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+extern int sysv_init_icache(void) __init;
+extern void sysv_destroy_icache(void);
+
+static int __init init_sysv_fs(void)
+{
+	int error;
+
+	error = sysv_init_icache();
+	if (error)
+		goto out;
+	error = register_filesystem(&sysv_fs_type);
+	if (error)
+		goto destroy_icache;
+	error = register_filesystem(&v7_fs_type);
+	if (error)
+		goto unregister;
+	return 0;
+
+unregister:
+	unregister_filesystem(&sysv_fs_type);
+destroy_icache:
+	sysv_destroy_icache();
+out:
+	return error;
+}
+
+static void __exit exit_sysv_fs(void)
+{
+	unregister_filesystem(&sysv_fs_type);
+	unregister_filesystem(&v7_fs_type);
+	sysv_destroy_icache();
+}
+
+module_init(init_sysv_fs)
+module_exit(exit_sysv_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/sysv/symlink.c b/fs/sysv/symlink.c
new file mode 100644
index 0000000..ed637db
--- /dev/null
+++ b/fs/sysv/symlink.c
@@ -0,0 +1,20 @@
+/*
+ *  linux/fs/sysv/symlink.c
+ *
+ *  Handling of System V filesystem fast symlinks extensions.
+ *  Aug 2001, Christoph Hellwig (hch@infradead.org)
+ */
+
+#include "sysv.h"
+#include <linux/namei.h>
+
+static int sysv_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	nd_set_link(nd, (char *)SYSV_I(dentry->d_inode)->i_data);
+	return 0;
+}
+
+struct inode_operations sysv_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= sysv_follow_link,
+};
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
new file mode 100644
index 0000000..b7f9b4a
--- /dev/null
+++ b/fs/sysv/sysv.h
@@ -0,0 +1,244 @@
+#ifndef _SYSV_H
+#define _SYSV_H
+
+#include <linux/buffer_head.h>
+
+typedef __u16 __bitwise __fs16;
+typedef __u32 __bitwise __fs32;
+
+#include <linux/sysv_fs.h>
+
+/*
+ * SystemV/V7/Coherent super-block data in memory
+ *
+ * The SystemV/V7/Coherent superblock contains dynamic data (it gets modified
+ * while the system is running). This is in contrast to the Minix and Berkeley
+ * filesystems (where the superblock is never modified). This affects the
+ * sync() operation: we must keep the superblock in a disk buffer and use this
+ * one as our "working copy".
+ */
+
+struct sysv_sb_info {
+	struct super_block *s_sb;	/* VFS superblock */
+	int	       s_type;		/* file system type: FSTYPE_{XENIX|SYSV|COH} */
+	char	       s_bytesex;	/* bytesex (le/be/pdp) */
+	char	       s_truncate;	/* if 1: names > SYSV_NAMELEN chars are truncated */
+					/* if 0: they are disallowed (ENAMETOOLONG) */
+	nlink_t        s_link_max;	/* max number of hard links to a file */
+	unsigned int   s_inodes_per_block;	/* number of inodes per block */
+	unsigned int   s_inodes_per_block_1;	/* inodes_per_block - 1 */
+	unsigned int   s_inodes_per_block_bits;	/* log2(inodes_per_block) */
+	unsigned int   s_ind_per_block;		/* number of indirections per block */
+	unsigned int   s_ind_per_block_bits;	/* log2(ind_per_block) */
+	unsigned int   s_ind_per_block_2;	/* ind_per_block ^ 2 */
+	unsigned int   s_toobig_block;		/* 10 + ipb + ipb^2 + ipb^3 */
+	unsigned int   s_block_base;	/* physical block number of block 0 */
+	unsigned short s_fic_size;	/* free inode cache size, NICINOD */
+	unsigned short s_flc_size;	/* free block list chunk size, NICFREE */
+	/* The superblock is kept in one or two disk buffers: */
+	struct buffer_head *s_bh1;
+	struct buffer_head *s_bh2;
+	/* These are pointers into the disk buffer, to compensate for
+	   different superblock layout. */
+	char *         s_sbd1;		/* entire superblock data, for part 1 */
+	char *         s_sbd2;		/* entire superblock data, for part 2 */
+	__fs16         *s_sb_fic_count;	/* pointer to s_sbd->s_ninode */
+        sysv_ino_t     *s_sb_fic_inodes; /* pointer to s_sbd->s_inode */
+	__fs16         *s_sb_total_free_inodes; /* pointer to s_sbd->s_tinode */
+	__fs16         *s_bcache_count;	/* pointer to s_sbd->s_nfree */
+	sysv_zone_t    *s_bcache;	/* pointer to s_sbd->s_free */
+	__fs32         *s_free_blocks;	/* pointer to s_sbd->s_tfree */
+	__fs32         *s_sb_time;	/* pointer to s_sbd->s_time */
+	__fs32         *s_sb_state;	/* pointer to s_sbd->s_state, only FSTYPE_SYSV */
+	/* We keep those superblock entities that don't change here;
+	   this saves us an indirection and perhaps a conversion. */
+	u32            s_firstinodezone; /* index of first inode zone */
+	u32            s_firstdatazone;	/* same as s_sbd->s_isize */
+	u32            s_ninodes;	/* total number of inodes */
+	u32            s_ndatazones;	/* total number of data zones */
+	u32            s_nzones;	/* same as s_sbd->s_fsize */
+	u16	       s_namelen;       /* max length of dir entry */
+	int	       s_forced_ro;
+};
+
+/*
+ * SystemV/V7/Coherent FS inode data in memory
+ */
+struct sysv_inode_info {
+	__fs32		i_data[13];
+	u32		i_dir_start_lookup;
+	struct inode	vfs_inode;
+};
+
+
+static inline struct sysv_inode_info *SYSV_I(struct inode *inode)
+{
+	return list_entry(inode, struct sysv_inode_info, vfs_inode);
+}
+
+static inline struct sysv_sb_info *SYSV_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+
+/* identify the FS in memory */
+enum {
+	FSTYPE_NONE = 0,
+	FSTYPE_XENIX,
+	FSTYPE_SYSV4,
+	FSTYPE_SYSV2,
+	FSTYPE_COH,
+	FSTYPE_V7,
+	FSTYPE_AFS,
+	FSTYPE_END,
+};
+
+#define SYSV_MAGIC_BASE		0x012FF7B3
+
+#define XENIX_SUPER_MAGIC	(SYSV_MAGIC_BASE+FSTYPE_XENIX)
+#define SYSV4_SUPER_MAGIC	(SYSV_MAGIC_BASE+FSTYPE_SYSV4)
+#define SYSV2_SUPER_MAGIC	(SYSV_MAGIC_BASE+FSTYPE_SYSV2)
+#define COH_SUPER_MAGIC		(SYSV_MAGIC_BASE+FSTYPE_COH)
+
+
+/* Admissible values for i_nlink: 0.._LINK_MAX */
+enum {
+	XENIX_LINK_MAX	=	126,	/* ?? */
+	SYSV_LINK_MAX	=	126,	/* 127? 251? */
+	V7_LINK_MAX     =	126,	/* ?? */
+	COH_LINK_MAX	=	10000,
+};
+
+
+static inline void dirty_sb(struct super_block *sb)
+{
+	struct sysv_sb_info *sbi = SYSV_SB(sb);
+
+	mark_buffer_dirty(sbi->s_bh1);
+	if (sbi->s_bh1 != sbi->s_bh2)
+		mark_buffer_dirty(sbi->s_bh2);
+	sb->s_dirt = 1;
+}
+
+
+/* ialloc.c */
+extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned,
+			struct buffer_head **);
+extern struct inode * sysv_new_inode(const struct inode *, mode_t);
+extern void sysv_free_inode(struct inode *);
+extern unsigned long sysv_count_free_inodes(struct super_block *);
+
+/* balloc.c */
+extern sysv_zone_t sysv_new_block(struct super_block *);
+extern void sysv_free_block(struct super_block *, sysv_zone_t);
+extern unsigned long sysv_count_free_blocks(struct super_block *);
+
+/* itree.c */
+extern void sysv_truncate(struct inode *);
+
+/* inode.c */
+extern int sysv_write_inode(struct inode *, int);
+extern int sysv_sync_inode(struct inode *);
+extern int sysv_sync_file(struct file *, struct dentry *, int);
+extern void sysv_set_inode(struct inode *, dev_t);
+extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+
+/* dir.c */
+extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **);
+extern int sysv_add_link(struct dentry *, struct inode *);
+extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *);
+extern int sysv_make_empty(struct inode *, struct inode *);
+extern int sysv_empty_dir(struct inode *);
+extern void sysv_set_link(struct sysv_dir_entry *, struct page *,
+			struct inode *);
+extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **);
+extern ino_t sysv_inode_by_name(struct dentry *);
+
+
+extern struct inode_operations sysv_file_inode_operations;
+extern struct inode_operations sysv_dir_inode_operations;
+extern struct inode_operations sysv_fast_symlink_inode_operations;
+extern struct file_operations sysv_file_operations;
+extern struct file_operations sysv_dir_operations;
+extern struct address_space_operations sysv_aops;
+extern struct super_operations sysv_sops;
+extern struct dentry_operations sysv_dentry_operations;
+
+
+enum {
+	BYTESEX_LE,
+	BYTESEX_PDP,
+	BYTESEX_BE,
+};
+
+static inline u32 PDP_swab(u32 x)
+{
+#ifdef __LITTLE_ENDIAN
+	return ((x & 0xffff) << 16) | ((x & 0xffff0000) >> 16);
+#else
+#ifdef __BIG_ENDIAN
+	return ((x & 0xff00ff) << 8) | ((x & 0xff00ff00) >> 8);
+#else
+#error BYTESEX
+#endif
+#endif
+}
+
+static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+{
+	if (sbi->s_bytesex == BYTESEX_PDP)
+		return PDP_swab((__force __u32)n);
+	else if (sbi->s_bytesex == BYTESEX_LE)
+		return le32_to_cpu((__force __le32)n);
+	else
+		return be32_to_cpu((__force __be32)n);
+}
+
+static inline __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n)
+{
+	if (sbi->s_bytesex == BYTESEX_PDP)
+		return (__force __fs32)PDP_swab(n);
+	else if (sbi->s_bytesex == BYTESEX_LE)
+		return (__force __fs32)cpu_to_le32(n);
+	else
+		return (__force __fs32)cpu_to_be32(n);
+}
+
+static inline __fs32 fs32_add(struct sysv_sb_info *sbi, __fs32 *n, int d)
+{
+	if (sbi->s_bytesex == BYTESEX_PDP)
+		*(__u32*)n = PDP_swab(PDP_swab(*(__u32*)n)+d);
+	else if (sbi->s_bytesex == BYTESEX_LE)
+		*(__le32*)n = cpu_to_le32(le32_to_cpu(*(__le32*)n)+d);
+	else
+		*(__be32*)n = cpu_to_be32(be32_to_cpu(*(__be32*)n)+d);
+	return *n;
+}
+
+static inline __u16 fs16_to_cpu(struct sysv_sb_info *sbi, __fs16 n)
+{
+	if (sbi->s_bytesex != BYTESEX_BE)
+		return le16_to_cpu((__force __le16)n);
+	else
+		return be16_to_cpu((__force __be16)n);
+}
+
+static inline __fs16 cpu_to_fs16(struct sysv_sb_info *sbi, __u16 n)
+{
+	if (sbi->s_bytesex != BYTESEX_BE)
+		return (__force __fs16)cpu_to_le16(n);
+	else
+		return (__force __fs16)cpu_to_be16(n);
+}
+
+static inline __fs16 fs16_add(struct sysv_sb_info *sbi, __fs16 *n, int d)
+{
+	if (sbi->s_bytesex != BYTESEX_BE)
+		*(__le16*)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+	else
+		*(__be16*)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+	return *n;
+}
+
+#endif /* _SYSV_H */
diff --git a/fs/udf/Makefile b/fs/udf/Makefile
new file mode 100644
index 0000000..be845e7
--- /dev/null
+++ b/fs/udf/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the linux udf-filesystem routines.
+#
+
+obj-$(CONFIG_UDF_FS) += udf.o
+
+udf-objs     := balloc.o dir.o file.o ialloc.o inode.o lowlevel.o namei.o \
+		partition.o super.o truncate.o symlink.o fsync.o \
+		crc.o directory.o misc.o udftime.o unicode.o
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
new file mode 100644
index 0000000..b9ded26
--- /dev/null
+++ b/fs/udf/balloc.c
@@ -0,0 +1,959 @@
+/*
+ * balloc.c
+ *
+ * PURPOSE
+ *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1999-2001 Ben Fennema
+ *  (C) 1999 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  02/24/99 blf  Created.
+ *
+ */
+
+#include "udfdecl.h"
+
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/bitops.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+#define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
+#define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
+#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
+#define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
+#define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
+
+#define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
+#define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
+#define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
+#define uintBPL_t uint(BITS_PER_LONG)
+#define uint(x) xuint(x)
+#define xuint(x) __le ## x
+
+extern inline int find_next_one_bit (void * addr, int size, int offset)
+{
+	uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
+	int result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= (BITS_PER_LONG-1);
+	if (offset)
+	{
+		tmp = leBPL_to_cpup(p++);
+		tmp &= ~0UL << offset;
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1))
+	{
+		if ((tmp = leBPL_to_cpup(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = leBPL_to_cpup(p);
+found_first:
+	tmp &= ~0UL >> (BITS_PER_LONG-size);
+found_middle:
+	return result + ffz(~tmp);
+}
+
+#define find_first_one_bit(addr, size)\
+	find_next_one_bit((addr), (size), 0)
+
+static int read_block_bitmap(struct super_block * sb,
+	struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr)
+{
+	struct buffer_head *bh = NULL;
+	int retval = 0;
+	kernel_lb_addr loc;
+
+	loc.logicalBlockNum = bitmap->s_extPosition;
+	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
+
+	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
+	if (!bh)
+	{
+		retval = -EIO;
+	}
+	bitmap->s_block_bitmap[bitmap_nr] = bh;
+	return retval;
+}
+
+static int __load_block_bitmap(struct super_block * sb,
+	struct udf_bitmap *bitmap, unsigned int block_group)
+{
+	int retval = 0;
+	int nr_groups = bitmap->s_nr_groups;
+
+	if (block_group >= nr_groups)
+	{
+		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups);
+	}
+
+	if (bitmap->s_block_bitmap[block_group])
+		return block_group;
+	else
+	{
+		retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+		if (retval < 0)
+			return retval;
+		return block_group;
+	}
+}
+
+static inline int load_block_bitmap(struct super_block * sb,
+	struct udf_bitmap *bitmap, unsigned int block_group)
+{
+	int slot;
+
+	slot = __load_block_bitmap(sb, bitmap, block_group);
+
+	if (slot < 0)
+		return slot;
+
+	if (!bitmap->s_block_bitmap[slot])
+		return -EIO;
+
+	return slot;
+}
+
+static void udf_bitmap_free_blocks(struct super_block * sb,
+	struct inode * inode,
+	struct udf_bitmap *bitmap,
+	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct buffer_head * bh = NULL;
+	unsigned long block;
+	unsigned long block_group;
+	unsigned long bit;
+	unsigned long i;
+	int bitmap_nr;
+	unsigned long overflow;
+
+	down(&sbi->s_alloc_sem);
+	if (bloc.logicalBlockNum < 0 ||
+		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
+	{
+		udf_debug("%d < %d || %d + %d > %d\n",
+			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+		goto error_return;
+	}
+
+	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
+
+do_more:
+	overflow = 0;
+	block_group = block >> (sb->s_blocksize_bits + 3);
+	bit = block % (sb->s_blocksize << 3);
+
+	/*
+	 * Check to see if we are freeing blocks across a group boundary.
+	 */
+	if (bit + count > (sb->s_blocksize << 3))
+	{
+		overflow = bit + count - (sb->s_blocksize << 3);
+		count -= overflow;
+	}
+	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
+	if (bitmap_nr < 0)
+		goto error_return;
+
+	bh = bitmap->s_block_bitmap[bitmap_nr];
+	for (i=0; i < count; i++)
+	{
+		if (udf_set_bit(bit + i, bh->b_data))
+		{
+			udf_debug("bit %ld already set\n", bit + i);
+			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
+		}
+		else
+		{
+			if (inode)
+				DQUOT_FREE_BLOCK(inode, 1);
+			if (UDF_SB_LVIDBH(sb))
+			{
+				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
+					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1);
+			}
+		}
+	}
+	mark_buffer_dirty(bh);
+	if (overflow)
+	{
+		block += count;
+		count = overflow;
+		goto do_more;
+	}
+error_return:
+	sb->s_dirt = 1;
+	if (UDF_SB_LVIDBH(sb))
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	up(&sbi->s_alloc_sem);
+	return;
+}
+
+static int udf_bitmap_prealloc_blocks(struct super_block * sb,
+	struct inode * inode,
+	struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block,
+	uint32_t block_count)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	int alloc_count = 0;
+	int bit, block, block_group, group_start;
+	int nr_groups, bitmap_nr;
+	struct buffer_head *bh;
+
+	down(&sbi->s_alloc_sem);
+	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
+		goto out;
+
+	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
+		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
+
+repeat:
+	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
+		(sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
+	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
+	block_group = block >> (sb->s_blocksize_bits + 3);
+	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
+
+	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
+	if (bitmap_nr < 0)
+		goto out;
+	bh = bitmap->s_block_bitmap[bitmap_nr];
+
+	bit = block % (sb->s_blocksize << 3);
+
+	while (bit < (sb->s_blocksize << 3) && block_count > 0)
+	{
+		if (!udf_test_bit(bit, bh->b_data))
+			goto out;
+		else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+			goto out;
+		else if (!udf_clear_bit(bit, bh->b_data))
+		{
+			udf_debug("bit already cleared for block %d\n", bit);
+			DQUOT_FREE_BLOCK(inode, 1);
+			goto out;
+		}
+		block_count --;
+		alloc_count ++;
+		bit ++;
+		block ++;
+	}
+	mark_buffer_dirty(bh);
+	if (block_count > 0)
+		goto repeat;
+out:
+	if (UDF_SB_LVIDBH(sb))
+	{
+		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
+			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+	sb->s_dirt = 1;
+	up(&sbi->s_alloc_sem);
+	return alloc_count;
+}
+
+static int udf_bitmap_new_block(struct super_block * sb,
+	struct inode * inode,
+	struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	int newbit, bit=0, block, block_group, group_start;
+	int end_goal, nr_groups, bitmap_nr, i;
+	struct buffer_head *bh = NULL;
+	char *ptr;
+	int newblock = 0;
+
+	*err = -ENOSPC;
+	down(&sbi->s_alloc_sem);
+
+repeat:
+	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
+		goal = 0;
+
+	nr_groups = bitmap->s_nr_groups;
+	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
+	block_group = block >> (sb->s_blocksize_bits + 3);
+	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
+
+	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
+	if (bitmap_nr < 0)
+		goto error_return;
+	bh = bitmap->s_block_bitmap[bitmap_nr];
+	ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
+
+	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
+	{
+		bit = block % (sb->s_blocksize << 3);
+
+		if (udf_test_bit(bit, bh->b_data))
+		{
+			goto got_block;
+		}
+		end_goal = (bit + 63) & ~63;
+		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
+		if (bit < end_goal)
+			goto got_block;
+		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
+		newbit = (ptr - ((char *)bh->b_data)) << 3;
+		if (newbit < sb->s_blocksize << 3)
+		{
+			bit = newbit;
+			goto search_back;
+		}
+		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
+		if (newbit < sb->s_blocksize << 3)
+		{
+			bit = newbit;
+			goto got_block;
+		}
+	}
+
+	for (i=0; i<(nr_groups*2); i++)
+	{
+		block_group ++;
+		if (block_group >= nr_groups)
+			block_group = 0;
+		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
+
+		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
+		if (bitmap_nr < 0)
+			goto error_return;
+		bh = bitmap->s_block_bitmap[bitmap_nr];
+		if (i < nr_groups)
+		{
+			ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
+			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
+			{
+				bit = (ptr - ((char *)bh->b_data)) << 3;
+				break;
+			}
+		}
+		else
+		{
+			bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3);
+			if (bit < sb->s_blocksize << 3)
+				break;
+		}
+	}
+	if (i >= (nr_groups*2))
+	{
+		up(&sbi->s_alloc_sem);
+		return newblock;
+	}
+	if (bit < sb->s_blocksize << 3)
+		goto search_back;
+	else
+		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
+	if (bit >= sb->s_blocksize << 3)
+	{
+		up(&sbi->s_alloc_sem);
+		return 0;
+	}
+
+search_back:
+	for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--);
+
+got_block:
+
+	/*
+	 * Check quota for allocation of this block.
+	 */
+	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
+	{
+		up(&sbi->s_alloc_sem);
+		*err = -EDQUOT;
+		return 0;
+	}
+
+	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
+		(sizeof(struct spaceBitmapDesc) << 3);
+
+	if (!udf_clear_bit(bit, bh->b_data))
+	{
+		udf_debug("bit already cleared for block %d\n", bit);
+		goto repeat;
+	}
+
+	mark_buffer_dirty(bh);
+
+	if (UDF_SB_LVIDBH(sb))
+	{
+		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
+			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+	sb->s_dirt = 1;
+	up(&sbi->s_alloc_sem);
+	*err = 0;
+	return newblock;
+
+error_return:
+	*err = -EIO;
+	up(&sbi->s_alloc_sem);
+	return 0;
+}
+
+static void udf_table_free_blocks(struct super_block * sb,
+	struct inode * inode,
+	struct inode * table,
+	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	uint32_t start, end;
+	uint32_t nextoffset, oextoffset, elen;
+	kernel_lb_addr nbloc, obloc, eloc;
+	struct buffer_head *obh, *nbh;
+	int8_t etype;
+	int i;
+
+	down(&sbi->s_alloc_sem);
+	if (bloc.logicalBlockNum < 0 ||
+		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
+	{
+		udf_debug("%d < %d || %d + %d > %d\n",
+			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+		goto error_return;
+	}
+
+	/* We do this up front - There are some error conditions that could occure,
+	   but.. oh well */
+	if (inode)
+		DQUOT_FREE_BLOCK(inode, count);
+	if (UDF_SB_LVIDBH(sb))
+	{
+		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
+			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+
+	start = bloc.logicalBlockNum + offset;
+	end = bloc.logicalBlockNum + offset + count - 1;
+
+	oextoffset = nextoffset = sizeof(struct unallocSpaceEntry);
+	elen = 0;
+	obloc = nbloc = UDF_I_LOCATION(table);
+
+	obh = nbh = NULL;
+
+	while (count && (etype =
+		udf_next_aext(table, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
+	{
+		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
+			start))
+		{
+			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
+			{
+				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
+			}
+			else
+			{
+				elen = (etype << 30) |
+					(elen + (count << sb->s_blocksize_bits));
+				start += count;
+				count = 0;
+			}
+			udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1);
+		}
+		else if (eloc.logicalBlockNum == (end + 1))
+		{
+			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
+			{
+				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+				eloc.logicalBlockNum -=
+					((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
+			}
+			else
+			{
+				eloc.logicalBlockNum = start;
+				elen = (etype << 30) |
+					(elen + (count << sb->s_blocksize_bits));
+				end -= count;
+				count = 0;
+			}
+			udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1);
+		}
+
+		if (nbh != obh)
+		{
+			i = -1;
+			obloc = nbloc;
+			udf_release_data(obh);
+			atomic_inc(&nbh->b_count);
+			obh = nbh;
+			oextoffset = 0;
+		}
+		else
+			oextoffset = nextoffset;
+	}
+
+	if (count)
+	{
+		/* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
+				 a new block, and since we hold the super block lock already
+				 very bad things would happen :)
+
+				 We copy the behavior of udf_add_aext, but instead of
+				 trying to allocate a new block close to the existing one,
+				 we just steal a block from the extent we are trying to add.
+
+				 It would be nice if the blocks were close together, but it
+				 isn't required.
+		*/
+
+		int adsize;
+		short_ad *sad = NULL;
+		long_ad *lad = NULL;
+		struct allocExtDesc *aed;
+
+		eloc.logicalBlockNum = start;
+		elen = EXT_RECORDED_ALLOCATED |
+			(count << sb->s_blocksize_bits);
+
+		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
+			adsize = sizeof(short_ad);
+		else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
+			adsize = sizeof(long_ad);
+		else
+		{
+			udf_release_data(obh);
+			udf_release_data(nbh);
+			goto error_return;
+		}
+
+		if (nextoffset + (2 * adsize) > sb->s_blocksize)
+		{
+			char *sptr, *dptr;
+			int loffset;
+	
+			udf_release_data(obh);
+			obh = nbh;
+			obloc = nbloc;
+			oextoffset = nextoffset;
+
+			/* Steal a block from the extent being free'd */
+			nbloc.logicalBlockNum = eloc.logicalBlockNum;
+			eloc.logicalBlockNum ++;
+			elen -= sb->s_blocksize;
+
+			if (!(nbh = udf_tread(sb,
+				udf_get_lb_pblock(sb, nbloc, 0))))
+			{
+				udf_release_data(obh);
+				goto error_return;
+			}
+			aed = (struct allocExtDesc *)(nbh->b_data);
+			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
+			if (nextoffset + adsize > sb->s_blocksize)
+			{
+				loffset = nextoffset;
+				aed->lengthAllocDescs = cpu_to_le32(adsize);
+				if (obh)
+					sptr = UDF_I_DATA(inode) + nextoffset -  udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize;
+				else
+					sptr = obh->b_data + nextoffset - adsize;
+				dptr = nbh->b_data + sizeof(struct allocExtDesc);
+				memcpy(dptr, sptr, adsize);
+				nextoffset = sizeof(struct allocExtDesc) + adsize;
+			}
+			else
+			{
+				loffset = nextoffset + adsize;
+				aed->lengthAllocDescs = cpu_to_le32(0);
+				sptr = (obh)->b_data + nextoffset;
+				nextoffset = sizeof(struct allocExtDesc);
+
+				if (obh)
+				{
+					aed = (struct allocExtDesc *)(obh)->b_data;
+					aed->lengthAllocDescs =
+						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+				}
+				else
+				{
+					UDF_I_LENALLOC(table) += adsize;
+					mark_inode_dirty(table);
+				}
+			}
+			if (UDF_SB_UDFREV(sb) >= 0x0200)
+				udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
+					nbloc.logicalBlockNum, sizeof(tag));
+			else
+				udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
+					nbloc.logicalBlockNum, sizeof(tag));
+			switch (UDF_I_ALLOCTYPE(table))
+			{
+				case ICBTAG_FLAG_AD_SHORT:
+				{
+					sad = (short_ad *)sptr;
+					sad->extLength = cpu_to_le32(
+						EXT_NEXT_EXTENT_ALLOCDECS |
+						sb->s_blocksize);
+					sad->extPosition = cpu_to_le32(nbloc.logicalBlockNum);
+					break;
+				}
+				case ICBTAG_FLAG_AD_LONG:
+				{
+					lad = (long_ad *)sptr;
+					lad->extLength = cpu_to_le32(
+						EXT_NEXT_EXTENT_ALLOCDECS |
+						sb->s_blocksize);
+					lad->extLocation = cpu_to_lelb(nbloc);
+					break;
+				}
+			}
+			if (obh)
+			{
+				udf_update_tag(obh->b_data, loffset);
+				mark_buffer_dirty(obh);
+			}
+			else
+				mark_inode_dirty(table);
+		}
+
+		if (elen) /* It's possible that stealing the block emptied the extent */
+		{
+			udf_write_aext(table, nbloc, &nextoffset, eloc, elen, nbh, 1);
+
+			if (!nbh)
+			{
+				UDF_I_LENALLOC(table) += adsize;
+				mark_inode_dirty(table);
+			}
+			else
+			{
+				aed = (struct allocExtDesc *)nbh->b_data;
+				aed->lengthAllocDescs =
+					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+				udf_update_tag(nbh->b_data, nextoffset);
+				mark_buffer_dirty(nbh);
+			}
+		}
+	}
+
+	udf_release_data(nbh);
+	udf_release_data(obh);
+
+error_return:
+	sb->s_dirt = 1;
+	up(&sbi->s_alloc_sem);
+	return;
+}
+
+static int udf_table_prealloc_blocks(struct super_block * sb,
+	struct inode * inode,
+	struct inode *table, uint16_t partition, uint32_t first_block,
+	uint32_t block_count)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	int alloc_count = 0;
+	uint32_t extoffset, elen, adsize;
+	kernel_lb_addr bloc, eloc;
+	struct buffer_head *bh;
+	int8_t etype = -1;
+
+	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
+		return 0;
+
+	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		return 0;
+
+	down(&sbi->s_alloc_sem);
+	extoffset = sizeof(struct unallocSpaceEntry);
+	bloc = UDF_I_LOCATION(table);
+
+	bh = NULL;
+	eloc.logicalBlockNum = 0xFFFFFFFF;
+
+	while (first_block != eloc.logicalBlockNum && (etype =
+		udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
+	{
+		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
+			eloc.logicalBlockNum, elen, first_block);
+		; /* empty loop body */
+	}
+
+	if (first_block == eloc.logicalBlockNum)
+	{
+		extoffset -= adsize;
+
+		alloc_count = (elen >> sb->s_blocksize_bits);
+		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count))
+			alloc_count = 0;
+		else if (alloc_count > block_count)
+		{
+			alloc_count = block_count;
+			eloc.logicalBlockNum += alloc_count;
+			elen -= (alloc_count << sb->s_blocksize_bits);
+			udf_write_aext(table, bloc, &extoffset, eloc, (etype << 30) | elen, bh, 1);
+		}
+		else
+			udf_delete_aext(table, bloc, extoffset, eloc, (etype << 30) | elen, bh);
+	}
+	else
+		alloc_count = 0;
+
+	udf_release_data(bh);
+
+	if (alloc_count && UDF_SB_LVIDBH(sb))
+	{
+		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
+			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+		sb->s_dirt = 1;
+	}
+	up(&sbi->s_alloc_sem);
+	return alloc_count;
+}
+
+static int udf_table_new_block(struct super_block * sb,
+	struct inode * inode,
+	struct inode *table, uint16_t partition, uint32_t goal, int *err)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
+	uint32_t newblock = 0, adsize;
+	uint32_t extoffset, goal_extoffset, elen, goal_elen = 0;
+	kernel_lb_addr bloc, goal_bloc, eloc, goal_eloc;
+	struct buffer_head *bh, *goal_bh;
+	int8_t etype;
+
+	*err = -ENOSPC;
+
+	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		return newblock;
+
+	down(&sbi->s_alloc_sem);
+	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
+		goal = 0;
+
+	/* We search for the closest matching block to goal. If we find a exact hit,
+	   we stop. Otherwise we keep going till we run out of extents.
+	   We store the buffer_head, bloc, and extoffset of the current closest
+	   match and use that when we are done.
+	*/
+
+	extoffset = sizeof(struct unallocSpaceEntry);
+	bloc = UDF_I_LOCATION(table);
+
+	goal_bh = bh = NULL;
+
+	while (spread && (etype =
+		udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
+	{
+		if (goal >= eloc.logicalBlockNum)
+		{
+			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
+				nspread = 0;
+			else
+				nspread = goal - eloc.logicalBlockNum -
+					(elen >> sb->s_blocksize_bits);
+		}
+		else
+			nspread = eloc.logicalBlockNum - goal;
+
+		if (nspread < spread)
+		{
+			spread = nspread;
+			if (goal_bh != bh)
+			{
+				udf_release_data(goal_bh);
+				goal_bh = bh;
+				atomic_inc(&goal_bh->b_count);
+			}
+			goal_bloc = bloc;
+			goal_extoffset = extoffset - adsize;
+			goal_eloc = eloc;
+			goal_elen = (etype << 30) | elen;
+		}
+	}
+
+	udf_release_data(bh);
+
+	if (spread == 0xFFFFFFFF)
+	{
+		udf_release_data(goal_bh);
+		up(&sbi->s_alloc_sem);
+		return 0;
+	}
+
+	/* Only allocate blocks from the beginning of the extent.
+	   That way, we only delete (empty) extents, never have to insert an
+	   extent because of splitting */
+	/* This works, but very poorly.... */
+
+	newblock = goal_eloc.logicalBlockNum;
+	goal_eloc.logicalBlockNum ++;
+	goal_elen -= sb->s_blocksize;
+
+	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
+	{
+		udf_release_data(goal_bh);
+		up(&sbi->s_alloc_sem);
+		*err = -EDQUOT;
+		return 0;
+	}
+
+	if (goal_elen)
+		udf_write_aext(table, goal_bloc, &goal_extoffset, goal_eloc, goal_elen, goal_bh, 1);
+	else
+		udf_delete_aext(table, goal_bloc, goal_extoffset, goal_eloc, goal_elen, goal_bh);
+	udf_release_data(goal_bh);
+
+	if (UDF_SB_LVIDBH(sb))
+	{
+		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
+			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+
+	sb->s_dirt = 1;
+	up(&sbi->s_alloc_sem);
+	*err = 0;
+	return newblock;
+}
+
+inline void udf_free_blocks(struct super_block * sb,
+	struct inode * inode,
+	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+{
+	uint16_t partition = bloc.partitionReferenceNum;
+
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
+	{
+		return udf_bitmap_free_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+			bloc, offset, count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
+	{
+		return udf_table_free_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+			bloc, offset, count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
+	{
+		return udf_bitmap_free_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+			bloc, offset, count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
+	{
+		return udf_table_free_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+			bloc, offset, count);
+	}
+	else
+		return;
+}
+
+inline int udf_prealloc_blocks(struct super_block * sb,
+	struct inode * inode,
+	uint16_t partition, uint32_t first_block, uint32_t block_count)
+{
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
+	{
+		return udf_bitmap_prealloc_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+			partition, first_block, block_count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
+	{
+		return udf_table_prealloc_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+			partition, first_block, block_count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
+	{
+		return udf_bitmap_prealloc_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+			partition, first_block, block_count);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
+	{
+		return udf_table_prealloc_blocks(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+			partition, first_block, block_count);
+	}
+	else
+		return 0;
+}
+
+inline int udf_new_block(struct super_block * sb,
+	struct inode * inode,
+	uint16_t partition, uint32_t goal, int *err)
+{
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
+	{
+		return udf_bitmap_new_block(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+			partition, goal, err);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
+	{
+		return udf_table_new_block(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+			partition, goal, err);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
+	{
+		return udf_bitmap_new_block(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+			partition, goal, err);
+	}
+	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
+	{
+		return udf_table_new_block(sb, inode,
+			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+			partition, goal, err);
+	}
+	else
+	{
+		*err = -EIO;
+		return 0;
+	}
+}
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
new file mode 100644
index 0000000..d95c6e3
--- /dev/null
+++ b/fs/udf/crc.c
@@ -0,0 +1,178 @@
+/*
+ * crc.c
+ *
+ * PURPOSE
+ *	Routines to generate, calculate, and test a 16-bit CRC.
+ *
+ * DESCRIPTION
+ *	The CRC code was devised by Don P. Mitchell of AT&T Bell Laboratories
+ *	and Ned W. Rhodes of Software Systems Group. It has been published in
+ *	"Design and Validation of Computer Protocols", Prentice Hall,
+ *	Englewood Cliffs, NJ, 1991, Chapter 3, ISBN 0-13-539925-4.
+ *
+ *	Copyright is held by AT&T.
+ *
+ *	AT&T gives permission for the free use of the CRC source code.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ */
+
+#include "udfdecl.h"
+
+static uint16_t crc_table[256] = {
+	0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50a5U, 0x60c6U, 0x70e7U,
+	0x8108U, 0x9129U, 0xa14aU, 0xb16bU, 0xc18cU, 0xd1adU, 0xe1ceU, 0xf1efU,
+	0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52b5U, 0x4294U, 0x72f7U, 0x62d6U,
+	0x9339U, 0x8318U, 0xb37bU, 0xa35aU, 0xd3bdU, 0xc39cU, 0xf3ffU, 0xe3deU,
+	0x2462U, 0x3443U, 0x0420U, 0x1401U, 0x64e6U, 0x74c7U, 0x44a4U, 0x5485U,
+	0xa56aU, 0xb54bU, 0x8528U, 0x9509U, 0xe5eeU, 0xf5cfU, 0xc5acU, 0xd58dU,
+	0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76d7U, 0x66f6U, 0x5695U, 0x46b4U,
+	0xb75bU, 0xa77aU, 0x9719U, 0x8738U, 0xf7dfU, 0xe7feU, 0xd79dU, 0xc7bcU,
+	0x48c4U, 0x58e5U, 0x6886U, 0x78a7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U,
+	0xc9ccU, 0xd9edU, 0xe98eU, 0xf9afU, 0x8948U, 0x9969U, 0xa90aU, 0xb92bU,
+	0x5af5U, 0x4ad4U, 0x7ab7U, 0x6a96U, 0x1a71U, 0x0a50U, 0x3a33U, 0x2a12U,
+	0xdbfdU, 0xcbdcU, 0xfbbfU, 0xeb9eU, 0x9b79U, 0x8b58U, 0xbb3bU, 0xab1aU,
+	0x6ca6U, 0x7c87U, 0x4ce4U, 0x5cc5U, 0x2c22U, 0x3c03U, 0x0c60U, 0x1c41U,
+	0xedaeU, 0xfd8fU, 0xcdecU, 0xddcdU, 0xad2aU, 0xbd0bU, 0x8d68U, 0x9d49U,
+	0x7e97U, 0x6eb6U, 0x5ed5U, 0x4ef4U, 0x3e13U, 0x2e32U, 0x1e51U, 0x0e70U,
+	0xff9fU, 0xefbeU, 0xdfddU, 0xcffcU, 0xbf1bU, 0xaf3aU, 0x9f59U, 0x8f78U,
+	0x9188U, 0x81a9U, 0xb1caU, 0xa1ebU, 0xd10cU, 0xc12dU, 0xf14eU, 0xe16fU,
+	0x1080U, 0x00a1U, 0x30c2U, 0x20e3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U,
+	0x83b9U, 0x9398U, 0xa3fbU, 0xb3daU, 0xc33dU, 0xd31cU, 0xe37fU, 0xf35eU,
+	0x02b1U, 0x1290U, 0x22f3U, 0x32d2U, 0x4235U, 0x5214U, 0x6277U, 0x7256U,
+	0xb5eaU, 0xa5cbU, 0x95a8U, 0x8589U, 0xf56eU, 0xe54fU, 0xd52cU, 0xc50dU,
+	0x34e2U, 0x24c3U, 0x14a0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U,
+	0xa7dbU, 0xb7faU, 0x8799U, 0x97b8U, 0xe75fU, 0xf77eU, 0xc71dU, 0xd73cU,
+	0x26d3U, 0x36f2U, 0x0691U, 0x16b0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U,
+	0xd94cU, 0xc96dU, 0xf90eU, 0xe92fU, 0x99c8U, 0x89e9U, 0xb98aU, 0xa9abU,
+	0x5844U, 0x4865U, 0x7806U, 0x6827U, 0x18c0U, 0x08e1U, 0x3882U, 0x28a3U,
+	0xcb7dU, 0xdb5cU, 0xeb3fU, 0xfb1eU, 0x8bf9U, 0x9bd8U, 0xabbbU, 0xbb9aU,
+	0x4a75U, 0x5a54U, 0x6a37U, 0x7a16U, 0x0af1U, 0x1ad0U, 0x2ab3U, 0x3a92U,
+	0xfd2eU, 0xed0fU, 0xdd6cU, 0xcd4dU, 0xbdaaU, 0xad8bU, 0x9de8U, 0x8dc9U,
+	0x7c26U, 0x6c07U, 0x5c64U, 0x4c45U, 0x3ca2U, 0x2c83U, 0x1ce0U, 0x0cc1U,
+	0xef1fU, 0xff3eU, 0xcf5dU, 0xdf7cU, 0xaf9bU, 0xbfbaU, 0x8fd9U, 0x9ff8U,
+	0x6e17U, 0x7e36U, 0x4e55U, 0x5e74U, 0x2e93U, 0x3eb2U, 0x0ed1U, 0x1ef0U
+};
+
+/*
+ * udf_crc
+ *
+ * PURPOSE
+ *	Calculate a 16-bit CRC checksum using ITU-T V.41 polynomial.
+ *
+ * DESCRIPTION
+ *	The OSTA-UDF(tm) 1.50 standard states that using CRCs is mandatory.
+ *	The polynomial used is:	x^16 + x^12 + x^15 + 1
+ *
+ * PRE-CONDITIONS
+ *	data		Pointer to the data block.
+ *	size		Size of the data block.
+ *
+ * POST-CONDITIONS
+ *	<return>	CRC of the data block.
+ *
+ * HISTORY
+ *	July 21, 1997 - Andrew E. Mileski
+ *	Adapted from OSTA-UDF(tm) 1.50 standard.
+ */
+uint16_t
+udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
+{
+	while (size--)
+		crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
+
+	return crc;
+}
+
+/****************************************************************************/
+#if defined(TEST)
+
+/*
+ * PURPOSE
+ *	Test udf_crc()
+ *
+ * HISTORY
+ *	July 21, 1997 - Andrew E. Mileski
+ *	Adapted from OSTA-UDF(tm) 1.50 standard.
+ */
+
+unsigned char bytes[] = { 0x70U, 0x6AU, 0x77U };
+
+int main(void)
+{
+	unsigned short x;
+
+	x = udf_crc16(bytes, sizeof bytes);
+	printf("udf_crc16: calculated = %4.4x, correct = %4.4x\n", x, 0x3299U);
+
+	return 0;
+}
+
+#endif /* defined(TEST) */
+
+/****************************************************************************/
+#if defined(GENERATE)
+
+/*
+ * PURPOSE
+ *	Generate a table for fast 16-bit CRC calculations (any polynomial).
+ *
+ * DESCRIPTION
+ *	The ITU-T V.41 polynomial is 010041.
+ *
+ * HISTORY
+ *	July 21, 1997 - Andrew E. Mileski
+ *	Adapted from OSTA-UDF(tm) 1.50 standard.
+ */
+
+#include <stdio.h>
+
+int main(int argc, char **argv)
+{
+	unsigned long crc, poly;
+	int n, i;
+
+	/* Get the polynomial */
+	sscanf(argv[1], "%lo", &poly);
+	if (poly & 0xffff0000U){
+		fprintf(stderr, "polynomial is too large\en");
+		exit(1);
+	}
+
+	printf("/* CRC 0%o */\n", poly);
+
+	/* Create a table */
+	printf("static unsigned short crc_table[256] = {\n");
+	for (n = 0; n < 256; n++){
+		if (n % 8 == 0)
+			printf("\t");
+		crc = n << 8;
+		for (i = 0; i < 8; i++){
+			if(crc & 0x8000U)
+				crc = (crc << 1) ^ poly;
+			else
+				crc <<= 1;
+		crc &= 0xFFFFU;
+		}
+		if (n == 255)
+			printf("0x%04xU ", crc);
+		else
+			printf("0x%04xU, ", crc);
+		if(n % 8 == 7)
+			printf("\n");
+	}
+	printf("};\n");
+
+	return 0;
+}
+
+#endif /* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
new file mode 100644
index 0000000..82440b7
--- /dev/null
+++ b/fs/udf/dir.c
@@ -0,0 +1,268 @@
+/*
+ * dir.c
+ *
+ * PURPOSE
+ *  Directory handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-2004 Ben Fennema
+ *
+ * HISTORY
+ *
+ *  10/05/98 dgb  Split directory operations into its own file
+ *                Implemented directory reads via do_udf_readdir
+ *  10/06/98      Made directory operations work!
+ *  11/17/98      Rewrote directory to support ICBTAG_FLAG_AD_LONG
+ *  11/25/98 blf  Rewrote directory handling (readdir+lookup) to support reading
+ *                across blocks.
+ *  12/12/98      Split out the lookup code to namei.c. bulk of directory
+ *                code now in directory.c:udf_fileident_read.
+ */
+
+#include "udfdecl.h"
+
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+/* Prototypes for file operations */
+static int udf_readdir(struct file *, void *, filldir_t);
+static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *);
+
+/* readdir and lookup functions */
+
+struct file_operations udf_dir_operations = {
+	.read			= generic_read_dir,
+	.readdir		= udf_readdir,
+	.ioctl			= udf_ioctl,
+	.fsync			= udf_fsync_file,
+};
+
+/*
+ * udf_readdir
+ *
+ * PURPOSE
+ *	Read a directory entry.
+ *
+ * DESCRIPTION
+ *	Optional - sys_getdents() will return -ENOTDIR if this routine is not
+ *	available.
+ *
+ *	Refer to sys_getdents() in fs/readdir.c
+ *	sys_getdents() -> .
+ *
+ * PRE-CONDITIONS
+ *	filp			Pointer to directory file.
+ *	buf			Pointer to directory entry buffer.
+ *	filldir			Pointer to filldir function.
+ *
+ * POST-CONDITIONS
+ *	<return>		>=0 on success.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+
+int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct inode *dir = filp->f_dentry->d_inode;
+	int result;
+
+	lock_kernel();
+
+	if ( filp->f_pos == 0 ) 
+	{
+		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0)
+		{
+			unlock_kernel();
+			return 0;
+		}
+		filp->f_pos ++;
+	}
+
+	result = do_udf_readdir(dir, filp, filldir, dirent);
+	unlock_kernel();
+ 	return result;
+}
+
+static int 
+do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *dirent)
+{
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc *fi=NULL;
+	struct fileIdentDesc cfi;
+	int block, iblock;
+	loff_t nf_pos = filp->f_pos - 1;
+	int flen;
+	char fname[UDF_NAME_LEN];
+	char *nameptr;
+	uint16_t liu;
+	uint8_t lfi;
+	loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
+	struct buffer_head * bh = NULL, * tmp, * bha[16];
+	kernel_lb_addr bloc, eloc;
+	uint32_t extoffset, elen, offset;
+	int i, num;
+	unsigned int dt_type;
+
+	if (nf_pos >= size)
+		return 0;
+
+	if (nf_pos == 0)
+		nf_pos = (udf_ext0_offset(dir) >> 2);
+
+	fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh.sbh = fibh.ebh = NULL;
+	else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
+		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
+	{
+		offset >>= dir->i_sb->s_blocksize_bits;
+		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
+		{
+			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
+				extoffset -= sizeof(short_ad);
+			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
+				extoffset -= sizeof(long_ad);
+		}
+		else
+			offset = 0;
+
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			return -EIO;
+		}
+	
+		if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
+		{
+			i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
+			if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
+				i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
+			for (num=0; i>0; i--)
+			{
+				block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
+				tmp = udf_tgetblk(dir->i_sb, block);
+				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
+					bha[num++] = tmp;
+				else
+					brelse(tmp);
+			}
+			if (num)
+			{
+				ll_rw_block(READA, num, bha);
+				for (i=0; i<num; i++)
+					brelse(bha[i]);
+			}
+		}
+	}
+	else
+	{
+		udf_release_data(bh);
+		return -ENOENT;
+	}
+
+	while ( nf_pos < size )
+	{
+		filp->f_pos = nf_pos + 1;
+
+		fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+
+		if (!fi)
+		{
+			if (fibh.sbh != fibh.ebh)
+				udf_release_data(fibh.ebh);
+			udf_release_data(fibh.sbh);
+			udf_release_data(bh);
+			return 0;
+		}
+
+		liu = le16_to_cpu(cfi.lengthOfImpUse);
+		lfi = cfi.lengthFileIdent;
+
+		if (fibh.sbh == fibh.ebh)
+			nameptr = fi->fileIdent + liu;
+		else
+		{
+			int poffset;	/* Unpaded ending offset */
+
+			poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+
+			if (poffset >= lfi)
+				nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
+			else
+			{
+				nameptr = fname;
+				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
+				memcpy(nameptr + lfi - poffset, fibh.ebh->b_data, poffset);
+			}
+		}
+
+		if ( (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
+		{
+			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
+				continue;
+		}
+		
+		if ( (cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
+		{
+			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+				continue;
+		}
+
+		if ( cfi.fileCharacteristics & FID_FILE_CHAR_PARENT )
+		{
+			iblock = parent_ino(filp->f_dentry);
+			flen = 2;
+			memcpy(fname, "..", flen);
+			dt_type = DT_DIR;
+		}
+		else
+		{
+			kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
+
+			iblock = udf_get_lb_pblock(dir->i_sb, tloc, 0);
+			flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+			dt_type = DT_UNKNOWN;
+		}
+
+		if (flen)
+		{
+			if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0)
+			{
+				if (fibh.sbh != fibh.ebh)
+					udf_release_data(fibh.ebh);
+				udf_release_data(fibh.sbh);
+				udf_release_data(bh);
+	 			return 0;
+			}
+		}
+	} /* end while */
+
+	filp->f_pos = nf_pos + 1;
+
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	udf_release_data(bh);
+
+	return 0;
+}
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
new file mode 100644
index 0000000..9a61ecc
--- /dev/null
+++ b/fs/udf/directory.c
@@ -0,0 +1,343 @@
+/*
+ * directory.c
+ *
+ * PURPOSE
+ *	Directory related functions
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ */
+
+#include "udfdecl.h"
+#include "udf_i.h"
+
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+
+#if 0
+static uint8_t *
+udf_filead_read(struct inode *dir, uint8_t *tmpad, uint8_t ad_size,
+		kernel_lb_addr fe_loc, int *pos, int *offset,
+		struct buffer_head **bh, int *error)
+{
+	int loffset = *offset;
+	int block;
+	uint8_t *ad;
+	int remainder;
+
+	*error = 0;
+
+	ad = (uint8_t *)(*bh)->b_data + *offset;
+	*offset += ad_size;
+
+	if (!ad)
+	{
+		udf_release_data(*bh);
+		*error = 1;
+		return NULL;
+	}
+
+	if (*offset == dir->i_sb->s_blocksize)
+	{
+		udf_release_data(*bh);
+		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
+		if (!block)
+			return NULL;
+		if (!(*bh = udf_tread(dir->i_sb, block)))
+			return NULL;
+	}
+	else if (*offset > dir->i_sb->s_blocksize)
+	{
+		ad = tmpad;
+
+		remainder = dir->i_sb->s_blocksize - loffset;
+		memcpy((uint8_t *)ad, (*bh)->b_data + loffset, remainder);
+
+		udf_release_data(*bh);
+		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
+		if (!block)
+			return NULL;
+		if (!((*bh) = udf_tread(dir->i_sb, block)))
+			return NULL;
+
+		memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder);
+		*offset = ad_size - remainder;
+	}
+	return ad;
+}
+#endif
+
+struct fileIdentDesc *
+udf_fileident_read(struct inode *dir, loff_t *nf_pos,
+	struct udf_fileident_bh *fibh,
+	struct fileIdentDesc *cfi,
+	kernel_lb_addr *bloc, uint32_t *extoffset, 
+	kernel_lb_addr *eloc, uint32_t *elen,
+	uint32_t *offset, struct buffer_head **bh)
+{
+	struct fileIdentDesc *fi;
+	int i, num, block;
+	struct buffer_head * tmp, * bha[16];
+
+	fibh->soffset = fibh->eoffset;
+
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		fi = udf_get_fileident(UDF_I_DATA(dir) -
+			(UDF_I_EFE(dir) ?
+				sizeof(struct extendedFileEntry) :
+				sizeof(struct fileEntry)),
+			dir->i_sb->s_blocksize, &(fibh->eoffset));
+
+		if (!fi)
+			return NULL;
+
+		*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
+
+		memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+
+		return fi;
+	}
+
+	if (fibh->eoffset == dir->i_sb->s_blocksize)
+	{
+		int lextoffset = *extoffset;
+
+		if (udf_next_aext(dir, bloc, extoffset, eloc, elen, bh, 1) !=
+			(EXT_RECORDED_ALLOCATED >> 30))
+		{
+			return NULL;
+		}
+
+		block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
+
+		(*offset) ++;
+
+		if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
+			*offset = 0;
+		else
+			*extoffset = lextoffset;
+
+		udf_release_data(fibh->sbh);
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
+			return NULL;
+		fibh->soffset = fibh->eoffset = 0;
+
+		if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
+		{
+			i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
+			if (i+*offset > (*elen >> dir->i_sb->s_blocksize_bits))
+				i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset;
+			for (num=0; i>0; i--)
+			{
+				block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
+				tmp = udf_tgetblk(dir->i_sb, block);
+				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
+					bha[num++] = tmp;
+				else
+					brelse(tmp);
+			}
+			if (num)
+			{
+				ll_rw_block(READA, num, bha);
+				for (i=0; i<num; i++)
+					brelse(bha[i]);
+			}
+		}
+	}
+	else if (fibh->sbh != fibh->ebh)
+	{
+		udf_release_data(fibh->sbh);
+		fibh->sbh = fibh->ebh;
+	}
+
+	fi = udf_get_fileident(fibh->sbh->b_data, dir->i_sb->s_blocksize,
+		&(fibh->eoffset));
+
+	if (!fi)
+		return NULL;
+
+	*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
+
+	if (fibh->eoffset <= dir->i_sb->s_blocksize)
+	{
+		memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+	}
+	else if (fibh->eoffset > dir->i_sb->s_blocksize)
+	{
+		int lextoffset = *extoffset;
+
+		if (udf_next_aext(dir, bloc, extoffset, eloc, elen, bh, 1) !=
+			(EXT_RECORDED_ALLOCATED >> 30))
+		{
+			return NULL;
+		}
+
+		block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
+
+		(*offset) ++;
+
+		if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
+			*offset = 0;
+		else
+			*extoffset = lextoffset;
+
+		fibh->soffset -= dir->i_sb->s_blocksize;
+		fibh->eoffset -= dir->i_sb->s_blocksize;
+
+		if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
+			return NULL;
+
+		if (sizeof(struct fileIdentDesc) > - fibh->soffset)
+		{
+			int fi_len;
+
+			memcpy((uint8_t *)cfi, (uint8_t *)fi, - fibh->soffset);
+			memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data,
+				sizeof(struct fileIdentDesc) + fibh->soffset);
+
+			fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent +
+				le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
+
+			*nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
+			fibh->eoffset = fibh->soffset + fi_len;
+		}
+		else
+		{
+			memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+		}
+	}
+	return fi;
+}
+
+struct fileIdentDesc * 
+udf_get_fileident(void * buffer, int bufsize, int * offset)
+{
+	struct fileIdentDesc *fi;
+	int lengthThisIdent;
+	uint8_t * ptr;
+	int padlen;
+
+	if ( (!buffer) || (!offset) ) {
+		udf_debug("invalidparms\n, buffer=%p, offset=%p\n", buffer, offset);
+		return NULL;
+	}
+
+	ptr = buffer;
+
+	if ( (*offset > 0) && (*offset < bufsize) ) {
+		ptr += *offset;
+	}
+	fi=(struct fileIdentDesc *)ptr;
+	if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID)
+	{
+		udf_debug("0x%x != TAG_IDENT_FID\n",
+			le16_to_cpu(fi->descTag.tagIdent));
+		udf_debug("offset: %u sizeof: %lu bufsize: %u\n",
+			*offset, (unsigned long)sizeof(struct fileIdentDesc), bufsize);
+		return NULL;
+	}
+	if ( (*offset + sizeof(struct fileIdentDesc)) > bufsize )
+	{
+		lengthThisIdent = sizeof(struct fileIdentDesc);
+	}
+	else
+		lengthThisIdent = sizeof(struct fileIdentDesc) +
+			fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
+
+	/* we need to figure padding, too! */
+	padlen = lengthThisIdent % UDF_NAME_PAD;
+	if (padlen)
+		lengthThisIdent += (UDF_NAME_PAD - padlen);
+	*offset = *offset + lengthThisIdent;
+
+	return fi;
+}
+
+#if 0
+static extent_ad *
+udf_get_fileextent(void * buffer, int bufsize, int * offset)
+{
+	extent_ad * ext;
+	struct fileEntry *fe;
+	uint8_t * ptr;
+
+	if ( (!buffer) || (!offset) )
+	{
+		printk(KERN_ERR "udf: udf_get_fileextent() invalidparms\n");
+		return NULL;
+	}
+
+	fe = (struct fileEntry *)buffer;
+
+	if ( le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE )
+	{
+		udf_debug("0x%x != TAG_IDENT_FE\n",
+			le16_to_cpu(fe->descTag.tagIdent));
+		return NULL;
+	}
+
+	ptr=(uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr);
+
+	if ( (*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs)) )
+	{
+		ptr += *offset;
+	}
+
+	ext = (extent_ad *)ptr;
+
+	*offset = *offset + sizeof(extent_ad);
+	return ext;
+}
+#endif
+
+short_ad *
+udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
+{
+	short_ad *sa;
+
+	if ( (!ptr) || (!offset) )
+	{
+		printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
+		return NULL;
+	}
+
+	if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
+		return NULL;
+	else if ((sa = (short_ad *)ptr)->extLength == 0)
+		return NULL;
+
+	if (inc)
+		*offset += sizeof(short_ad);
+	return sa;
+}
+
+long_ad *
+udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
+{
+	long_ad *la;
+
+	if ( (!ptr) || (!offset) ) 
+	{
+		printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
+		return NULL;
+	}
+
+	if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
+		return NULL;
+	else if ((la = (long_ad *)ptr)->extLength == 0)
+		return NULL;
+
+	if (inc)
+		*offset += sizeof(long_ad);
+	return la;
+}
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
new file mode 100644
index 0000000..f81f2eb
--- /dev/null
+++ b/fs/udf/ecma_167.h
@@ -0,0 +1,864 @@
+/*
+ * ecma_167.h
+ *
+ * This file is based on ECMA-167 3rd edition (June 1997)
+ * http://www.ecma.ch
+ *
+ * Copyright (c) 2001-2002  Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+
+#ifndef _ECMA_167_H
+#define _ECMA_167_H 1
+
+/* Character set specification (ECMA 167r3 1/7.2.1) */
+typedef struct
+{
+	uint8_t		charSetType;
+	uint8_t		charSetInfo[63];
+} __attribute__ ((packed)) charspec;
+
+/* Character Set Type (ECMA 167r3 1/7.2.1.1) */
+#define CHARSPEC_TYPE_CS0		0x00	/* (1/7.2.2) */
+#define CHARSPEC_TYPE_CS1		0x01	/* (1/7.2.3) */
+#define CHARSPEC_TYPE_CS2		0x02	/* (1/7.2.4) */
+#define CHARSPEC_TYPE_CS3		0x03	/* (1/7.2.5) */
+#define CHARSPEC_TYPE_CS4		0x04	/* (1/7.2.6) */
+#define CHARSPEC_TYPE_CS5		0x05	/* (1/7.2.7) */
+#define CHARSPEC_TYPE_CS6		0x06	/* (1/7.2.8) */
+#define CHARSPEC_TYPE_CS7		0x07	/* (1/7.2.9) */
+#define CHARSPEC_TYPE_CS8		0x08	/* (1/7.2.10) */
+
+typedef uint8_t		dstring;
+
+/* Timestamp (ECMA 167r3 1/7.3) */
+typedef struct
+{
+	__le16		typeAndTimezone;
+	__le16		year;
+	uint8_t		month;
+	uint8_t		day;
+	uint8_t		hour;
+	uint8_t		minute;
+	uint8_t		second;
+	uint8_t		centiseconds;
+	uint8_t		hundredsOfMicroseconds;
+	uint8_t		microseconds;
+} __attribute__ ((packed)) timestamp;
+
+typedef struct
+{
+	uint16_t	typeAndTimezone;
+	int16_t		year;
+	uint8_t		month;
+	uint8_t		day;
+	uint8_t		hour;
+	uint8_t		minute;
+	uint8_t		second;
+	uint8_t		centiseconds;
+	uint8_t		hundredsOfMicroseconds;
+	uint8_t		microseconds;
+} __attribute__ ((packed)) kernel_timestamp;
+
+/* Type and Time Zone (ECMA 167r3 1/7.3.1) */
+#define TIMESTAMP_TYPE_MASK		0xF000
+#define TIMESTAMP_TYPE_CUT		0x0000
+#define TIMESTAMP_TYPE_LOCAL		0x1000
+#define TIMESTAMP_TYPE_AGREEMENT	0x2000
+#define TIMESTAMP_TIMEZONE_MASK		0x0FFF
+
+/* Entity identifier (ECMA 167r3 1/7.4) */
+typedef struct
+{
+	uint8_t		flags;
+	uint8_t		ident[23];
+	uint8_t		identSuffix[8];
+} __attribute__ ((packed)) regid;
+
+/* Flags (ECMA 167r3 1/7.4.1) */
+#define ENTITYID_FLAGS_DIRTY		0x00
+#define ENTITYID_FLAGS_PROTECTED	0x01
+
+/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
+#define VSD_STD_ID_LEN			5
+struct volStructDesc
+{
+	uint8_t		structType;
+	uint8_t		stdIdent[VSD_STD_ID_LEN];
+	uint8_t		structVersion;
+	uint8_t		structData[2041];
+} __attribute__ ((packed));
+
+/* Standard Identifier (EMCA 167r2 2/9.1.2) */
+#define VSD_STD_ID_NSR02		"NSR02"	/* (3/9.1) */
+
+/* Standard Identifier (ECMA 167r3 2/9.1.2) */
+#define VSD_STD_ID_BEA01		"BEA01"	/* (2/9.2) */
+#define VSD_STD_ID_BOOT2		"BOOT2"	/* (2/9.4) */
+#define VSD_STD_ID_CD001		"CD001"	/* (ECMA-119) */
+#define VSD_STD_ID_CDW02		"CDW02"	/* (ECMA-168) */
+#define VSD_STD_ID_NSR03		"NSR03"	/* (3/9.1) */
+#define VSD_STD_ID_TEA01		"TEA01"	/* (2/9.3) */
+
+/* Beginning Extended Area Descriptor (ECMA 167r3 2/9.2) */
+struct beginningExtendedAreaDesc
+{
+	uint8_t		structType;
+	uint8_t		stdIdent[VSD_STD_ID_LEN];
+	uint8_t		structVersion;
+	uint8_t		structData[2041];
+} __attribute__ ((packed));
+
+/* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */
+struct terminatingExtendedAreaDesc
+{
+	uint8_t		structType;
+	uint8_t		stdIdent[VSD_STD_ID_LEN];
+	uint8_t		structVersion;
+	uint8_t		structData[2041];
+} __attribute__ ((packed));
+
+/* Boot Descriptor (ECMA 167r3 2/9.4) */
+struct bootDesc
+{
+	uint8_t		structType;
+	uint8_t		stdIdent[VSD_STD_ID_LEN];
+	uint8_t		structVersion;
+	uint8_t		reserved1;
+	regid		archType;
+	regid		bootIdent;
+	__le32		bootExtLocation;
+	__le32		bootExtLength;
+	__le64		loadAddress;
+	__le64		startAddress;
+	timestamp	descCreationDateAndTime;
+	__le16		flags;
+	uint8_t		reserved2[32];
+	uint8_t		bootUse[1906];
+} __attribute__ ((packed));
+
+/* Flags (ECMA 167r3 2/9.4.12) */
+#define BOOT_FLAGS_ERASE		0x01
+
+/* Extent Descriptor (ECMA 167r3 3/7.1) */
+typedef struct
+{
+	__le32		extLength;
+	__le32		extLocation;
+} __attribute__ ((packed)) extent_ad;
+
+typedef struct
+{
+	uint32_t	extLength;
+	uint32_t	extLocation;
+} kernel_extent_ad;
+
+/* Descriptor Tag (ECMA 167r3 3/7.2) */
+typedef struct
+{
+	__le16		tagIdent;
+	__le16		descVersion;
+	uint8_t		tagChecksum;
+	uint8_t		reserved;
+	__le16		tagSerialNum;
+	__le16		descCRC;
+	__le16		descCRCLength;
+	__le32		tagLocation;
+} __attribute__ ((packed)) tag;
+
+/* Tag Identifier (ECMA 167r3 3/7.2.1) */
+#define TAG_IDENT_PVD			0x0001
+#define TAG_IDENT_AVDP			0x0002
+#define TAG_IDENT_VDP			0x0003
+#define TAG_IDENT_IUVD			0x0004
+#define TAG_IDENT_PD			0x0005
+#define TAG_IDENT_LVD			0x0006
+#define TAG_IDENT_USD			0x0007
+#define TAG_IDENT_TD			0x0008
+#define TAG_IDENT_LVID			0x0009
+
+/* NSR Descriptor (ECMA 167r3 3/9.1) */
+struct NSRDesc
+{
+	uint8_t		structType;
+	uint8_t		stdIdent[VSD_STD_ID_LEN];
+	uint8_t		structVersion;
+	uint8_t		reserved;
+	uint8_t		structData[2040];
+} __attribute__ ((packed));
+	
+/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
+struct primaryVolDesc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	__le32		primaryVolDescNum;
+	dstring		volIdent[32];
+	__le16		volSeqNum;
+	__le16		maxVolSeqNum;
+	__le16		interchangeLvl;
+	__le16		maxInterchangeLvl;
+	__le32		charSetList;
+	__le32		maxCharSetList;
+	dstring		volSetIdent[128];
+	charspec	descCharSet;
+	charspec	explanatoryCharSet;
+	extent_ad	volAbstract;
+	extent_ad	volCopyright;
+	regid		appIdent;
+	timestamp	recordingDateAndTime;
+	regid		impIdent;
+	uint8_t		impUse[64];
+	__le32		predecessorVolDescSeqLocation;
+	__le16		flags;
+	uint8_t		reserved[22];
+} __attribute__ ((packed));
+
+/* Flags (ECMA 167r3 3/10.1.21) */
+#define PVD_FLAGS_VSID_COMMON		0x0001
+
+/* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */
+struct anchorVolDescPtr
+{
+	tag		descTag;
+	extent_ad	mainVolDescSeqExt;
+	extent_ad	reserveVolDescSeqExt;
+	uint8_t	 	reserved[480];
+} __attribute__ ((packed));
+
+/* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */
+struct volDescPtr
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	extent_ad	nextVolDescSeqExt;
+	uint8_t		reserved[484];
+} __attribute__ ((packed));
+
+/* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */
+struct impUseVolDesc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	regid		impIdent;
+	uint8_t		impUse[460];
+} __attribute__ ((packed));
+
+/* Partition Descriptor (ECMA 167r3 3/10.5) */
+struct partitionDesc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	__le16		partitionFlags;
+	__le16		partitionNumber;
+	regid		partitionContents;
+	uint8_t		partitionContentsUse[128];
+	__le32		accessType;
+	__le32		partitionStartingLocation;
+	__le32		partitionLength;
+	regid		impIdent;
+	uint8_t		impUse[128];
+	uint8_t		reserved[156];
+} __attribute__ ((packed));
+
+/* Partition Flags (ECMA 167r3 3/10.5.3) */
+#define PD_PARTITION_FLAGS_ALLOC	0x0001
+
+/* Partition Contents (ECMA 167r2 3/10.5.3) */
+#define PD_PARTITION_CONTENTS_NSR02	"+NSR02"
+
+/* Partition Contents (ECMA 167r3 3/10.5.5) */
+#define PD_PARTITION_CONTENTS_FDC01	"+FDC01"
+#define PD_PARTITION_CONTENTS_CD001	"+CD001"
+#define PD_PARTITION_CONTENTS_CDW02	"+CDW02"
+#define PD_PARTITION_CONTENTS_NSR03	"+NSR03"
+
+/* Access Type (ECMA 167r3 3/10.5.7) */
+#define PD_ACCESS_TYPE_NONE		0x00000000
+#define PD_ACCESS_TYPE_READ_ONLY	0x00000001
+#define PD_ACCESS_TYPE_WRITE_ONCE	0x00000002
+#define PD_ACCESS_TYPE_REWRITABLE	0x00000003
+#define PD_ACCESS_TYPE_OVERWRITABLE	0x00000004
+
+/* Logical Volume Descriptor (ECMA 167r3 3/10.6) */
+struct logicalVolDesc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	charspec	descCharSet;
+	dstring		logicalVolIdent[128];
+	__le32		logicalBlockSize;
+	regid		domainIdent;
+	uint8_t		logicalVolContentsUse[16];
+	__le32		mapTableLength;
+	__le32		numPartitionMaps;
+	regid		impIdent;
+	uint8_t		impUse[128];
+	extent_ad	integritySeqExt;
+	uint8_t		partitionMaps[0];
+} __attribute__ ((packed));
+
+/* Generic Partition Map (ECMA 167r3 3/10.7.1) */
+struct genericPartitionMap
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	uint8_t		partitionMapping[0];
+} __attribute__ ((packed));
+
+/* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
+#define GP_PARTITION_MAP_TYPE_UNDEF	0x00
+#define GP_PARTIITON_MAP_TYPE_1		0x01
+#define GP_PARTITION_MAP_TYPE_2		0x02
+
+/* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
+struct genericPartitionMap1
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	__le16		volSeqNum;
+	__le16		partitionNum;
+} __attribute__ ((packed));
+
+/* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */
+struct genericPartitionMap2
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength; 
+	uint8_t		partitionIdent[62];
+} __attribute__ ((packed));
+
+/* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */
+struct unallocSpaceDesc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+	__le32		numAllocDescs;
+	extent_ad	allocDescs[0];
+} __attribute__ ((packed));
+
+/* Terminating Descriptor (ECMA 167r3 3/10.9) */
+struct terminatingDesc
+{
+	tag		descTag;
+	uint8_t		reserved[496];
+} __attribute__ ((packed));
+
+/* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */
+struct logicalVolIntegrityDesc
+{
+	tag		descTag;
+	timestamp	recordingDateAndTime;
+	__le32		integrityType;
+	extent_ad	nextIntegrityExt;
+	uint8_t		logicalVolContentsUse[32];
+	__le32		numOfPartitions;
+	__le32		lengthOfImpUse;
+	__le32		freeSpaceTable[0];
+	__le32		sizeTable[0];
+	uint8_t		impUse[0];
+} __attribute__ ((packed));
+
+/* Integrity Type (ECMA 167r3 3/10.10.3) */
+#define LVID_INTEGRITY_TYPE_OPEN	0x00000000
+#define LVID_INTEGRITY_TYPE_CLOSE	0x00000001
+
+/* Recorded Address (ECMA 167r3 4/7.1) */
+typedef struct 
+{
+	__le32		logicalBlockNum;
+	__le16	 	partitionReferenceNum;
+} __attribute__ ((packed)) lb_addr;
+
+/* ... and its in-core analog */
+typedef struct 
+{
+	uint32_t		logicalBlockNum;
+	uint16_t	 	partitionReferenceNum;
+} kernel_lb_addr;
+
+/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
+typedef struct
+{
+        __le32		extLength;
+        __le32		extPosition;
+} __attribute__ ((packed)) short_ad;
+
+/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
+typedef struct
+{
+	__le32		extLength;
+	lb_addr		extLocation;
+	uint8_t		impUse[6];
+} __attribute__ ((packed)) long_ad;
+
+typedef struct
+{
+	uint32_t	extLength;
+	kernel_lb_addr	extLocation;
+	uint8_t		impUse[6];
+} kernel_long_ad;
+
+/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
+typedef struct
+{
+	__le32		extLength;
+	__le32		recordedLength;
+	__le32		informationLength;
+	lb_addr		extLocation;
+} __attribute__ ((packed)) ext_ad;
+
+typedef struct
+{
+	uint32_t	extLength;
+	uint32_t	recordedLength;
+	uint32_t	informationLength;
+	kernel_lb_addr	extLocation;
+} kernel_ext_ad;
+
+/* Descriptor Tag (ECMA 167r3 4/7.2 - See 3/7.2) */
+
+/* Tag Identifier (ECMA 167r3 4/7.2.1) */
+#define TAG_IDENT_FSD			0x0100
+#define TAG_IDENT_FID			0x0101
+#define TAG_IDENT_AED			0x0102
+#define TAG_IDENT_IE			0x0103
+#define TAG_IDENT_TE			0x0104
+#define TAG_IDENT_FE			0x0105
+#define TAG_IDENT_EAHD			0x0106
+#define TAG_IDENT_USE			0x0107
+#define TAG_IDENT_SBD			0x0108
+#define TAG_IDENT_PIE			0x0109
+#define TAG_IDENT_EFE			0x010A
+
+/* File Set Descriptor (ECMA 167r3 4/14.1) */
+struct fileSetDesc
+{
+	tag		descTag;
+	timestamp	recordingDateAndTime;
+	__le16		interchangeLvl;
+	__le16		maxInterchangeLvl;
+	__le32		charSetList;
+	__le32		maxCharSetList;
+	__le32		fileSetNum;
+	__le32		fileSetDescNum;
+	charspec	logicalVolIdentCharSet;
+	dstring		logicalVolIdent[128];
+	charspec	fileSetCharSet;
+	dstring		fileSetIdent[32];
+	dstring		copyrightFileIdent[32];
+	dstring		abstractFileIdent[32];
+	long_ad		rootDirectoryICB;
+	regid		domainIdent;
+	long_ad		nextExt;
+	long_ad		streamDirectoryICB;
+	uint8_t		reserved[32];
+} __attribute__ ((packed));
+
+/* Partition Header Descriptor (ECMA 167r3 4/14.3) */
+struct partitionHeaderDesc
+{
+	short_ad	unallocSpaceTable;
+	short_ad	unallocSpaceBitmap;
+	short_ad	partitionIntegrityTable;
+	short_ad	freedSpaceTable;
+	short_ad	freedSpaceBitmap;
+	uint8_t		reserved[88];
+} __attribute__ ((packed));
+
+/* File Identifier Descriptor (ECMA 167r3 4/14.4) */
+struct fileIdentDesc
+{
+	tag		descTag;
+	__le16		fileVersionNum;
+	uint8_t		fileCharacteristics;
+	uint8_t		lengthFileIdent;
+	long_ad		icb;
+	__le16		lengthOfImpUse;
+	uint8_t		impUse[0];
+	uint8_t		fileIdent[0];
+	uint8_t		padding[0];
+} __attribute__ ((packed));
+
+/* File Characteristics (ECMA 167r3 4/14.4.3) */
+#define FID_FILE_CHAR_HIDDEN		0x01
+#define FID_FILE_CHAR_DIRECTORY		0x02
+#define FID_FILE_CHAR_DELETED		0x04
+#define FID_FILE_CHAR_PARENT		0x08
+#define FID_FILE_CHAR_METADATA		0x10
+
+/* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */
+struct allocExtDesc
+{
+	tag		descTag;
+	__le32		previousAllocExtLocation;
+	__le32		lengthAllocDescs;
+} __attribute__ ((packed));
+
+/* ICB Tag (ECMA 167r3 4/14.6) */
+typedef struct
+{
+	__le32		priorRecordedNumDirectEntries;
+	__le16		strategyType;
+	__le16		strategyParameter;
+	__le16		numEntries;
+	uint8_t		reserved;
+	uint8_t		fileType;
+	lb_addr		parentICBLocation;
+	__le16		flags;
+} __attribute__ ((packed)) icbtag;
+
+/* Strategy Type (ECMA 167r3 4/14.6.2) */
+#define ICBTAG_STRATEGY_TYPE_UNDEF	0x0000
+#define ICBTAG_STRATEGY_TYPE_1		0x0001
+#define ICBTAG_STRATEGY_TYPE_2		0x0002
+#define ICBTAG_STRATEGY_TYPE_3		0x0003
+#define ICBTAG_STRATEGY_TYPE_4		0x0004
+
+/* File Type (ECMA 167r3 4/14.6.6) */
+#define ICBTAG_FILE_TYPE_UNDEF		0x00
+#define ICBTAG_FILE_TYPE_USE		0x01
+#define ICBTAG_FILE_TYPE_PIE		0x02
+#define ICBTAG_FILE_TYPE_IE		0x03
+#define ICBTAG_FILE_TYPE_DIRECTORY	0x04
+#define ICBTAG_FILE_TYPE_REGULAR	0x05
+#define ICBTAG_FILE_TYPE_BLOCK		0x06
+#define ICBTAG_FILE_TYPE_CHAR		0x07
+#define ICBTAG_FILE_TYPE_EA		0x08
+#define ICBTAG_FILE_TYPE_FIFO		0x09
+#define ICBTAG_FILE_TYPE_SOCKET		0x0A
+#define ICBTAG_FILE_TYPE_TE		0x0B
+#define ICBTAG_FILE_TYPE_SYMLINK	0x0C
+#define ICBTAG_FILE_TYPE_STREAMDIR	0x0D
+
+/* Flags (ECMA 167r3 4/14.6.8) */
+#define ICBTAG_FLAG_AD_MASK		0x0007
+#define ICBTAG_FLAG_AD_SHORT		0x0000
+#define ICBTAG_FLAG_AD_LONG		0x0001
+#define ICBTAG_FLAG_AD_EXTENDED		0x0002
+#define ICBTAG_FLAG_AD_IN_ICB		0x0003
+#define ICBTAG_FLAG_SORTED		0x0008
+#define ICBTAG_FLAG_NONRELOCATABLE	0x0010
+#define ICBTAG_FLAG_ARCHIVE		0x0020
+#define ICBTAG_FLAG_SETUID		0x0040
+#define ICBTAG_FLAG_SETGID		0x0080
+#define ICBTAG_FLAG_STICKY		0x0100
+#define ICBTAG_FLAG_CONTIGUOUS		0x0200
+#define ICBTAG_FLAG_SYSTEM		0x0400
+#define ICBTAG_FLAG_TRANSFORMED		0x0800
+#define ICBTAG_FLAG_MULTIVERSIONS	0x1000
+#define ICBTAG_FLAG_STREAM		0x2000
+
+/* Indirect Entry (ECMA 167r3 4/14.7) */
+struct indirectEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+	long_ad		indirectICB;
+} __attribute__ ((packed));
+
+/* Terminal Entry (ECMA 167r3 4/14.8) */
+struct terminalEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+} __attribute__ ((packed));
+
+/* File Entry (ECMA 167r3 4/14.9) */
+struct fileEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+	__le32		uid;
+	__le32		gid;
+	__le32		permissions;
+	__le16		fileLinkCount;
+	uint8_t		recordFormat;
+	uint8_t		recordDisplayAttr;
+	__le32		recordLength;
+	__le64		informationLength;
+	__le64		logicalBlocksRecorded;
+	timestamp	accessTime;
+	timestamp	modificationTime;
+	timestamp	attrTime;
+	__le32		checkpoint;
+	long_ad		extendedAttrICB;
+	regid		impIdent;
+	__le64		uniqueID;
+	__le32		lengthExtendedAttr;
+	__le32		lengthAllocDescs;
+	uint8_t		extendedAttr[0];
+	uint8_t		allocDescs[0];
+} __attribute__ ((packed));
+
+/* Permissions (ECMA 167r3 4/14.9.5) */
+#define FE_PERM_O_EXEC			0x00000001U
+#define FE_PERM_O_WRITE			0x00000002U
+#define FE_PERM_O_READ			0x00000004U
+#define FE_PERM_O_CHATTR		0x00000008U
+#define FE_PERM_O_DELETE		0x00000010U
+#define FE_PERM_G_EXEC			0x00000020U
+#define FE_PERM_G_WRITE			0x00000040U
+#define FE_PERM_G_READ			0x00000080U
+#define FE_PERM_G_CHATTR		0x00000100U
+#define FE_PERM_G_DELETE		0x00000200U
+#define FE_PERM_U_EXEC			0x00000400U
+#define FE_PERM_U_WRITE			0x00000800U
+#define FE_PERM_U_READ			0x00001000U
+#define FE_PERM_U_CHATTR		0x00002000U
+#define FE_PERM_U_DELETE		0x00004000U
+
+/* Record Format (ECMA 167r3 4/14.9.7) */
+#define FE_RECORD_FMT_UNDEF		0x00
+#define FE_RECORD_FMT_FIXED_PAD		0x01
+#define FE_RECORD_FMT_FIXED		0x02
+#define FE_RECORD_FMT_VARIABLE8		0x03
+#define FE_RECORD_FMT_VARIABLE16	0x04
+#define FE_RECORD_FMT_VARIABLE16_MSB	0x05
+#define FE_RECORD_FMT_VARIABLE32	0x06
+#define FE_RECORD_FMT_PRINT		0x07
+#define FE_RECORD_FMT_LF		0x08
+#define FE_RECORD_FMT_CR		0x09
+#define FE_RECORD_FMT_CRLF		0x0A
+#define FE_RECORD_FMT_LFCR		0x0B
+
+/* Record Display Attributes (ECMA 167r3 4/14.9.8) */
+#define FE_RECORD_DISPLAY_ATTR_UNDEF	0x00
+#define FE_RECORD_DISPLAY_ATTR_1	0x01
+#define FE_RECORD_DISPLAY_ATTR_2	0x02
+#define FE_RECORD_DISPLAY_ATTR_3	0x03
+
+/* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */
+struct extendedAttrHeaderDesc
+{
+	tag		descTag;
+	__le32		impAttrLocation;
+	__le32		appAttrLocation;
+} __attribute__ ((packed));
+
+/* Generic Format (ECMA 167r3 4/14.10.2) */
+struct genericFormat
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	uint8_t		attrData[0];
+} __attribute__ ((packed));
+
+/* Character Set Information (ECMA 167r3 4/14.10.3) */
+struct charSetInfo
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		escapeSeqLength;
+	uint8_t		charSetType;
+	uint8_t		escapeSeq[0];
+} __attribute__ ((packed));
+
+/* Alternate Permissions (ECMA 167r3 4/14.10.4) */
+struct altPerms
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le16		ownerIdent;
+	__le16		groupIdent;
+	__le16		permission;
+} __attribute__ ((packed));
+
+/* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */
+struct fileTimesExtAttr
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		dataLength;
+	__le32		fileTimeExistence;
+	uint8_t		fileTimes;
+} __attribute__ ((packed));
+
+/* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */
+#define FTE_CREATION			0x00000001
+#define FTE_DELETION			0x00000004
+#define FTE_EFFECTIVE			0x00000008
+#define FTE_BACKUP			0x00000002
+
+/* Information Times Extended Attribute (ECMA 167r3 4/14.10.6) */
+struct infoTimesExtAttr
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		dataLength;
+	__le32		infoTimeExistence;
+	uint8_t		infoTimes[0];
+} __attribute__ ((packed));
+
+/* Device Specification (ECMA 167r3 4/14.10.7) */
+struct deviceSpec
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		impUseLength;
+	__le32		majorDeviceIdent;
+	__le32		minorDeviceIdent;
+	uint8_t		impUse[0];
+} __attribute__ ((packed));
+
+/* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
+struct impUseExtAttr
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		impUseLength;
+	regid		impIdent;
+	uint8_t		impUse[0];
+} __attribute__ ((packed));
+
+/* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
+struct appUseExtAttr
+{
+	__le32		attrType;
+	uint8_t		attrSubtype;
+	uint8_t		reserved[3];
+	__le32		attrLength;
+	__le32		appUseLength;
+	regid		appIdent;
+	uint8_t		appUse[0];
+} __attribute__ ((packed));
+
+#define EXTATTR_CHAR_SET		1
+#define EXTATTR_ALT_PERMS		3
+#define EXTATTR_FILE_TIMES		5
+#define EXTATTR_INFO_TIMES		6
+#define EXTATTR_DEV_SPEC		12
+#define EXTATTR_IMP_USE			2048
+#define EXTATTR_APP_USE			65536
+
+
+/* Unallocated Space Entry (ECMA 167r3 4/14.11) */
+struct unallocSpaceEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+	__le32		lengthAllocDescs;
+	uint8_t		allocDescs[0];
+} __attribute__ ((packed));
+
+/* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
+struct spaceBitmapDesc
+{
+	tag		descTag;
+	__le32		numOfBits;
+	__le32		numOfBytes;
+	uint8_t		bitmap[0];
+} __attribute__ ((packed));
+
+/* Partition Integrity Entry (ECMA 167r3 4/14.13) */
+struct partitionIntegrityEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+	timestamp	recordingDateAndTime;
+	uint8_t		integrityType;
+	uint8_t		reserved[175];
+	regid		impIdent;
+	uint8_t		impUse[256];
+} __attribute__ ((packed));
+
+/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
+
+/* Extent Length (ECMA 167r3 4/14.14.1.1) */
+#define EXT_RECORDED_ALLOCATED		0x00000000
+#define EXT_NOT_RECORDED_ALLOCATED	0x40000000
+#define EXT_NOT_RECORDED_NOT_ALLOCATED	0x80000000
+#define EXT_NEXT_EXTENT_ALLOCDECS	0xC0000000
+
+/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
+
+/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
+
+/* Logical Volume Header Descriptor (ECMA 167r3 4/14.15) */
+struct logicalVolHeaderDesc
+{
+	__le64		uniqueID;
+	uint8_t		reserved[24];
+} __attribute__ ((packed));
+
+/* Path Component (ECMA 167r3 4/14.16.1) */
+struct pathComponent
+{
+	uint8_t		componentType;
+	uint8_t		lengthComponentIdent;
+	__le16		componentFileVersionNum;
+	dstring		componentIdent[0];
+} __attribute__ ((packed));
+
+/* File Entry (ECMA 167r3 4/14.17) */
+struct extendedFileEntry
+{
+	tag		descTag;
+	icbtag		icbTag;
+	__le32		uid;
+	__le32		gid;
+	__le32		permissions;
+	__le16		fileLinkCount;
+	uint8_t		recordFormat;
+	uint8_t		recordDisplayAttr;
+	__le32		recordLength;
+	__le64		informationLength;
+	__le64		objectSize;
+	__le64		logicalBlocksRecorded;
+	timestamp	accessTime;
+	timestamp	modificationTime;
+	timestamp	createTime;
+	timestamp	attrTime;
+	__le32		checkpoint;
+	__le32		reserved;
+	long_ad		extendedAttrICB;
+	long_ad		streamDirectoryICB;
+	regid		impIdent;
+	__le64		uniqueID;
+	__le32		lengthExtendedAttr;
+	__le32		lengthAllocDescs;
+	uint8_t		extendedAttr[0];
+	uint8_t		allocDescs[0];
+} __attribute__ ((packed));
+
+#endif /* _ECMA_167_H */
diff --git a/fs/udf/file.c b/fs/udf/file.c
new file mode 100644
index 0000000..2faa417
--- /dev/null
+++ b/fs/udf/file.c
@@ -0,0 +1,270 @@
+/*
+ * file.c
+ *
+ * PURPOSE
+ *  File handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *  E-mail regarding any portion of the Linux UDF file system should be
+ *  directed to the development team mailing list (run by majordomo):
+ *    linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *  This file is distributed under the terms of the GNU General Public
+ *  License (GPL). Copies of the GPL can be obtained from:
+ *    ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *  Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-1999 Dave Boynton
+ *  (C) 1998-2004 Ben Fennema
+ *  (C) 1999-2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  10/02/98 dgb  Attempt to integrate into udf.o
+ *  10/07/98      Switched to using generic_readpage, etc., like isofs
+ *                And it works!
+ *  12/06/98 blf  Added udf_file_read. uses generic_file_read for all cases but
+ *                ICBTAG_FLAG_AD_IN_ICB.
+ *  04/06/99      64 bit file handling on 32 bit systems taken from ext2 file.c
+ *  05/12/99      Preliminary file write support
+ */
+
+#include "udfdecl.h"
+#include <linux/fs.h>
+#include <linux/udf_fs.h>
+#include <asm/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memset */
+#include <linux/errno.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+static int udf_adinicb_readpage(struct file *file, struct page * page)
+{
+	struct inode *inode = page->mapping->host;
+	char *kaddr;
+
+	if (!PageLocked(page))
+		PAGE_BUG(page);
+
+	kaddr = kmap(page);
+	memset(kaddr, 0, PAGE_CACHE_SIZE);
+	memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+}
+
+static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct inode *inode = page->mapping->host;
+	char *kaddr;
+
+	if (!PageLocked(page))
+		PAGE_BUG(page);
+
+	kaddr = kmap(page);
+	memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size);
+	mark_inode_dirty(inode);
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+}
+
+static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+	kmap(page);
+	return 0;
+}
+
+static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+	struct inode *inode = page->mapping->host;
+	char *kaddr = page_address(page);
+
+	memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
+		kaddr + offset, to - offset);
+	mark_inode_dirty(inode);
+	SetPageUptodate(page);
+	kunmap(page);
+	/* only one page here */
+	if (to > inode->i_size)
+		inode->i_size = to;
+	return 0;
+}
+
+struct address_space_operations udf_adinicb_aops = {
+	.readpage		= udf_adinicb_readpage,
+	.writepage		= udf_adinicb_writepage,
+	.sync_page		= block_sync_page,
+	.prepare_write		= udf_adinicb_prepare_write,
+	.commit_write		= udf_adinicb_commit_write,
+};
+
+static ssize_t udf_file_write(struct file * file, const char __user * buf,
+	size_t count, loff_t *ppos)
+{
+	ssize_t retval;
+	struct inode *inode = file->f_dentry->d_inode;
+	int err, pos;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		if (file->f_flags & O_APPEND)
+			pos = inode->i_size;
+		else
+			pos = *ppos;
+
+		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
+			pos + count))
+		{
+			udf_expand_file_adinicb(inode, pos + count, &err);
+			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+			{
+				udf_debug("udf_expand_adinicb: err=%d\n", err);
+				return err;
+			}
+		}
+		else
+		{
+			if (pos + count > inode->i_size)
+				UDF_I_LENALLOC(inode) = pos + count;
+			else
+				UDF_I_LENALLOC(inode) = inode->i_size;
+		}
+	}
+
+	retval = generic_file_write(file, buf, count, ppos);
+
+	if (retval > 0)
+		mark_inode_dirty(inode);
+	return retval;
+}
+
+/*
+ * udf_ioctl
+ *
+ * PURPOSE
+ *	Issue an ioctl.
+ *
+ * DESCRIPTION
+ *	Optional - sys_ioctl() will return -ENOTTY if this routine is not
+ *	available, and the ioctl cannot be handled without filesystem help.
+ *
+ *	sys_ioctl() handles these ioctls that apply only to regular files:
+ *		FIBMAP [requires udf_block_map()], FIGETBSZ, FIONREAD
+ *	These ioctls are also handled by sys_ioctl():
+ *		FIOCLEX, FIONCLEX, FIONBIO, FIOASYNC
+ *	All other ioctls are passed to the filesystem.
+ *
+ *	Refer to sys_ioctl() in fs/ioctl.c
+ *	sys_ioctl() -> .
+ *
+ * PRE-CONDITIONS
+ *	inode			Pointer to inode that ioctl was issued on.
+ *	filp			Pointer to file that ioctl was issued on.
+ *	cmd			The ioctl command.
+ *	arg			The ioctl argument [can be interpreted as a
+ *				user-space pointer if desired].
+ *
+ * POST-CONDITIONS
+ *	<return>		Success (>=0) or an error code (<=0) that
+ *				sys_ioctl() will return.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+	unsigned long arg)
+{
+	int result = -EINVAL;
+
+	if ( permission(inode, MAY_READ, NULL) != 0 )
+	{
+		udf_debug("no permission to access inode %lu\n",
+						inode->i_ino);
+		return -EPERM;
+	}
+
+	if ( !arg )
+	{
+		udf_debug("invalid argument to udf_ioctl\n");
+		return -EINVAL;
+	}
+
+	switch (cmd)
+	{
+		case UDF_GETVOLIDENT:
+			return copy_to_user((char __user *)arg,
+				UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
+		case UDF_RELOCATE_BLOCKS:
+		{
+			long old, new;
+
+			if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+			if (get_user(old, (long __user *)arg)) return -EFAULT;
+			if ((result = udf_relocate_blocks(inode->i_sb,
+					old, &new)) == 0)
+				result = put_user(new, (long __user *)arg);
+
+			return result;
+		}
+		case UDF_GETEASIZE:
+			result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
+			break;
+
+		case UDF_GETEABLOCK:
+			result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
+				UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
+			break;
+	}
+
+	return result;
+}
+
+/*
+ * udf_release_file
+ *
+ * PURPOSE
+ *  Called when all references to the file are closed
+ *
+ * DESCRIPTION
+ *  Discard prealloced blocks
+ *
+ * HISTORY
+ *
+ */
+static int udf_release_file(struct inode * inode, struct file * filp)
+{
+	if (filp->f_mode & FMODE_WRITE)
+	{
+		lock_kernel();
+		udf_discard_prealloc(inode);
+		unlock_kernel();
+	}
+	return 0;
+}
+
+struct file_operations udf_file_operations = {
+	.read			= generic_file_read,
+	.ioctl			= udf_ioctl,
+	.open			= generic_file_open,
+	.mmap			= generic_file_mmap,
+	.write			= udf_file_write,
+	.release		= udf_release_file,
+	.fsync			= udf_fsync_file,
+	.sendfile		= generic_file_sendfile,
+};
+
+struct inode_operations udf_file_inode_operations = {
+	.truncate		= udf_truncate,
+};
diff --git a/fs/udf/fsync.c b/fs/udf/fsync.c
new file mode 100644
index 0000000..2dde6b8
--- /dev/null
+++ b/fs/udf/fsync.c
@@ -0,0 +1,56 @@
+/*
+ * fsync.c
+ *
+ * PURPOSE
+ *  Fsync handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *  E-mail regarding any portion of the Linux UDF file system should be
+ *  directed to the development team mailing list (run by majordomo):
+ *      linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *  This file is distributed under the terms of the GNU General Public
+ *  License (GPL). Copies of the GPL can be obtained from:
+ *      ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *  Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1999-2001 Ben Fennema
+ *  (C) 1999-2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  05/22/99 blf  Created.
+ */
+
+#include "udfdecl.h"
+
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+
+static int udf_fsync_inode(struct inode *, int);
+
+/*
+ *	File may be NULL when we are called. Perhaps we shouldn't
+ *	even pass file to fsync ?
+ */
+
+int udf_fsync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	return udf_fsync_inode(inode, datasync);
+}
+
+static int udf_fsync_inode(struct inode *inode, int datasync)
+{
+	int err;
+
+	err = sync_mapping_buffers(inode->i_mapping);
+	if (!(inode->i_state & I_DIRTY))
+		return err;
+	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+		return err;
+
+	err |= udf_sync_inode (inode);
+	return err ? -EIO : 0;
+}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
new file mode 100644
index 0000000..a7e5d40
--- /dev/null
+++ b/fs/udf/ialloc.c
@@ -0,0 +1,170 @@
+/*
+ * ialloc.c
+ *
+ * PURPOSE
+ *	Inode allocation handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-2001 Ben Fennema
+ *
+ * HISTORY
+ *
+ *  02/24/99 blf  Created.
+ *
+ */
+
+#include "udfdecl.h"
+#include <linux/fs.h>
+#include <linux/quotaops.h>
+#include <linux/udf_fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+void udf_free_inode(struct inode * inode)
+{
+	struct super_block *sb = inode->i_sb;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+
+	/*
+	 * Note: we must free any quota before locking the superblock,
+	 * as writing the quota to disk may need the lock as well.
+	 */
+	DQUOT_FREE_INODE(inode);
+	DQUOT_DROP(inode);
+
+	clear_inode(inode);
+
+	down(&sbi->s_alloc_sem);
+	if (sbi->s_lvidbh) {
+		if (S_ISDIR(inode->i_mode))
+			UDF_SB_LVIDIU(sb)->numDirs =
+				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
+		else
+			UDF_SB_LVIDIU(sb)->numFiles =
+				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
+		
+		mark_buffer_dirty(sbi->s_lvidbh);
+	}
+	up(&sbi->s_alloc_sem);
+
+	udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
+}
+
+struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
+{
+	struct super_block *sb = dir->i_sb;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct inode * inode;
+	int block;
+	uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum;
+
+	inode = new_inode(sb);
+
+	if (!inode)
+	{
+		*err = -ENOMEM;
+		return NULL;
+	}
+	*err = -ENOSPC;
+
+	block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
+		start, err);
+	if (*err)
+	{
+		iput(inode);
+		return NULL;
+	}
+
+	down(&sbi->s_alloc_sem);
+	UDF_I_UNIQUE(inode) = 0;
+	UDF_I_LENEXTENTS(inode) = 0;
+	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
+	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
+	UDF_I_STRAT4096(inode) = 0;
+	if (UDF_SB_LVIDBH(sb))
+	{
+		struct logicalVolHeaderDesc *lvhd;
+		uint64_t uniqueID;
+		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
+		if (S_ISDIR(mode))
+			UDF_SB_LVIDIU(sb)->numDirs =
+				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
+		else
+			UDF_SB_LVIDIU(sb)->numFiles =
+				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
+		UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
+		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
+			uniqueID += 16;
+		lvhd->uniqueID = cpu_to_le64(uniqueID);
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+	inode->i_mode = mode;
+	inode->i_uid = current->fsuid;
+	if (dir->i_mode & S_ISGID)
+	{
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	}
+	else
+		inode->i_gid = current->fsgid;
+
+	UDF_I_LOCATION(inode).logicalBlockNum = block;
+	UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
+	inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = 0;
+	UDF_I_LENEATTR(inode) = 0;
+	UDF_I_LENALLOC(inode) = 0;
+	UDF_I_USE(inode) = 0;
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
+	{
+		UDF_I_EFE(inode) = 1;
+		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
+		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+	}
+	else
+	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
+		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+	}
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
+	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
+	else
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
+	inode->i_mtime = inode->i_atime = inode->i_ctime =
+		UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+	up(&sbi->s_alloc_sem);
+
+	if (DQUOT_ALLOC_INODE(inode))
+	{
+		DQUOT_DROP(inode);
+		inode->i_flags |= S_NOQUOTA;
+		inode->i_nlink = 0;
+		iput(inode);
+		*err = -EDQUOT;
+		return NULL;
+	}
+
+	*err = 0;
+	return inode;
+}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
new file mode 100644
index 0000000..0506e11
--- /dev/null
+++ b/fs/udf/inode.c
@@ -0,0 +1,2010 @@
+/*
+ * inode.c
+ *
+ * PURPOSE
+ *  Inode handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *  E-mail regarding any portion of the Linux UDF file system should be
+ *  directed to the development team mailing list (run by majordomo):
+ *    linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *  This file is distributed under the terms of the GNU General Public
+ *  License (GPL). Copies of the GPL can be obtained from:
+ *    ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *  Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998 Dave Boynton
+ *  (C) 1998-2004 Ben Fennema
+ *  (C) 1999-2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  10/04/98 dgb  Added rudimentary directory functions
+ *  10/07/98      Fully working udf_block_map! It works!
+ *  11/25/98      bmap altered to better support extents
+ *  12/06/98 blf  partition support in udf_iget, udf_block_map and udf_read_inode
+ *  12/12/98      rewrote udf_block_map to handle next extents and descs across
+ *                block boundaries (which is not actually allowed)
+ *  12/20/98      added support for strategy 4096
+ *  03/07/99      rewrote udf_block_map (again)
+ *                New funcs, inode_bmap, udf_next_aext
+ *  04/19/99      Support for writing device EA's for major/minor #
+ */
+
+#include "udfdecl.h"
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/slab.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+MODULE_AUTHOR("Ben Fennema");
+MODULE_DESCRIPTION("Universal Disk Format Filesystem");
+MODULE_LICENSE("GPL");
+
+#define EXTENT_MERGE_SIZE 5
+
+static mode_t udf_convert_permissions(struct fileEntry *);
+static int udf_update_inode(struct inode *, int);
+static void udf_fill_inode(struct inode *, struct buffer_head *);
+static struct buffer_head *inode_getblk(struct inode *, long, int *,
+	long *, int *);
+static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
+	kernel_lb_addr, uint32_t, struct buffer_head *);
+static void udf_split_extents(struct inode *, int *, int, int,
+	kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+static void udf_prealloc_extents(struct inode *, int, int,
+	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+static void udf_merge_extents(struct inode *,
+	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+static void udf_update_extents(struct inode *,
+	kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
+	kernel_lb_addr, uint32_t, struct buffer_head **);
+static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
+
+/*
+ * udf_delete_inode
+ *
+ * PURPOSE
+ *	Clean-up before the specified inode is destroyed.
+ *
+ * DESCRIPTION
+ *	This routine is called when the kernel destroys an inode structure
+ *	ie. when iput() finds i_count == 0.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ *
+ *  Called at the last iput() if i_nlink is zero.
+ */
+void udf_delete_inode(struct inode * inode)
+{
+	if (is_bad_inode(inode))
+		goto no_delete;
+
+	inode->i_size = 0;
+	udf_truncate(inode);
+	lock_kernel();
+
+	udf_update_inode(inode, IS_SYNC(inode));
+	udf_free_inode(inode);
+
+	unlock_kernel();
+	return;
+no_delete:
+	clear_inode(inode);
+}
+
+void udf_clear_inode(struct inode *inode)
+{
+	if (!(inode->i_sb->s_flags & MS_RDONLY)) {
+		lock_kernel();
+		udf_discard_prealloc(inode);
+		unlock_kernel();
+	}
+
+	kfree(UDF_I_DATA(inode));
+	UDF_I_DATA(inode) = NULL;
+}
+
+static int udf_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page, udf_get_block, wbc);
+}
+
+static int udf_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page, udf_get_block);
+}
+
+static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page, from, to, udf_get_block);
+}
+
+static sector_t udf_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,udf_get_block);
+}
+
+struct address_space_operations udf_aops = {
+	.readpage		= udf_readpage,
+	.writepage		= udf_writepage,
+	.sync_page		= block_sync_page,
+	.prepare_write		= udf_prepare_write,
+	.commit_write		= generic_commit_write,
+	.bmap			= udf_bmap,
+};
+
+void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
+{
+	struct page *page;
+	char *kaddr;
+	struct writeback_control udf_wbc = {
+		.sync_mode = WB_SYNC_NONE,
+		.nr_to_write = 1,
+	};
+
+	/* from now on we have normal address_space methods */
+	inode->i_data.a_ops = &udf_aops;
+
+	if (!UDF_I_LENALLOC(inode))
+	{
+		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
+			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
+		else
+			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
+		mark_inode_dirty(inode);
+		return;
+	}
+
+	page = grab_cache_page(inode->i_mapping, 0);
+	if (!PageLocked(page))
+		PAGE_BUG(page);
+	if (!PageUptodate(page))
+	{
+		kaddr = kmap(page);
+		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
+			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
+		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
+			UDF_I_LENALLOC(inode));
+		flush_dcache_page(page);
+		SetPageUptodate(page);
+		kunmap(page);
+	}
+	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
+		UDF_I_LENALLOC(inode));
+	UDF_I_LENALLOC(inode) = 0;
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
+	else
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
+
+	inode->i_data.a_ops->writepage(page, &udf_wbc);
+	page_cache_release(page);
+
+	mark_inode_dirty(inode);
+}
+
+struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
+{
+	int newblock;
+	struct buffer_head *sbh = NULL, *dbh = NULL;
+	kernel_lb_addr bloc, eloc;
+	uint32_t elen, extoffset;
+	uint8_t alloctype;
+
+	struct udf_fileident_bh sfibh, dfibh;
+	loff_t f_pos = udf_ext0_offset(inode) >> 2;
+	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
+	struct fileIdentDesc cfi, *sfi, *dfi;
+
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
+		alloctype = ICBTAG_FLAG_AD_SHORT;
+	else
+		alloctype = ICBTAG_FLAG_AD_LONG;
+
+	if (!inode->i_size)
+	{
+		UDF_I_ALLOCTYPE(inode) = alloctype;
+		mark_inode_dirty(inode);
+		return NULL;
+	}
+
+	/* alloc block, and copy data to it */
+	*block = udf_new_block(inode->i_sb, inode,
+		UDF_I_LOCATION(inode).partitionReferenceNum,
+		UDF_I_LOCATION(inode).logicalBlockNum, err);
+
+	if (!(*block))
+		return NULL;
+	newblock = udf_get_pblock(inode->i_sb, *block,
+		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+	if (!newblock)
+		return NULL;
+	dbh = udf_tgetblk(inode->i_sb, newblock);
+	if (!dbh)
+		return NULL;
+	lock_buffer(dbh);
+	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
+	set_buffer_uptodate(dbh);
+	unlock_buffer(dbh);
+	mark_buffer_dirty_inode(dbh, inode);
+
+	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
+	sbh = sfibh.sbh = sfibh.ebh = NULL;
+	dfibh.soffset = dfibh.eoffset = 0;
+	dfibh.sbh = dfibh.ebh = dbh;
+	while ( (f_pos < size) )
+	{
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
+		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
+		if (!sfi)
+		{
+			udf_release_data(dbh);
+			return NULL;
+		}
+		UDF_I_ALLOCTYPE(inode) = alloctype;
+		sfi->descTag.tagLocation = cpu_to_le32(*block);
+		dfibh.soffset = dfibh.eoffset;
+		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
+		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
+		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
+			sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
+		{
+			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
+			udf_release_data(dbh);
+			return NULL;
+		}
+	}
+	mark_buffer_dirty_inode(dbh, inode);
+
+	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
+	UDF_I_LENALLOC(inode) = 0;
+	bloc = UDF_I_LOCATION(inode);
+	eloc.logicalBlockNum = *block;
+	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+	elen = inode->i_size;
+	UDF_I_LENEXTENTS(inode) = elen;
+	extoffset = udf_file_entry_alloc_offset(inode);
+	udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
+	/* UniqueID stuff */
+
+	udf_release_data(sbh);
+	mark_inode_dirty(inode);
+	return dbh;
+}
+
+static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
+{
+	int err, new;
+	struct buffer_head *bh;
+	unsigned long phys;
+
+	if (!create)
+	{
+		phys = udf_block_map(inode, block);
+		if (phys)
+			map_bh(bh_result, inode->i_sb, phys);
+		return 0;
+	}
+
+	err = -EIO;
+	new = 0;
+	bh = NULL;
+
+	lock_kernel();
+
+	if (block < 0)
+		goto abort_negative;
+
+	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
+	{
+		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
+		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
+	}
+
+	err = 0;
+
+	bh = inode_getblk(inode, block, &err, &phys, &new);
+	if (bh)
+		BUG();
+	if (err)
+		goto abort;
+	if (!phys)
+		BUG();
+
+	if (new)
+		set_buffer_new(bh_result);
+	map_bh(bh_result, inode->i_sb, phys);
+abort:
+	unlock_kernel();
+	return err;
+
+abort_negative:
+	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
+	goto abort;
+}
+
+static struct buffer_head *
+udf_getblk(struct inode *inode, long block, int create, int *err)
+{
+	struct buffer_head dummy;
+
+	dummy.b_state = 0;
+	dummy.b_blocknr = -1000;
+	*err = udf_get_block(inode, block, &dummy, create);
+	if (!*err && buffer_mapped(&dummy))
+	{
+		struct buffer_head *bh;
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+		if (buffer_new(&dummy))
+		{
+			lock_buffer(bh);
+			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
+			set_buffer_uptodate(bh);
+			unlock_buffer(bh);
+			mark_buffer_dirty_inode(bh, inode);
+		}
+		return bh;
+	}
+	return NULL;
+}
+
+static struct buffer_head * inode_getblk(struct inode * inode, long block,
+	int *err, long *phys, int *new)
+{
+	struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
+	kernel_long_ad laarr[EXTENT_MERGE_SIZE];
+	uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
+	int count = 0, startnum = 0, endnum = 0;
+	uint32_t elen = 0;
+	kernel_lb_addr eloc, pbloc, cbloc, nbloc;
+	int c = 1;
+	uint64_t lbcount = 0, b_off = 0;
+	uint32_t newblocknum, newblock, offset = 0;
+	int8_t etype;
+	int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
+	char lastblock = 0;
+
+	pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
+	b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
+	pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
+
+	/* find the extent which contains the block we are looking for.
+       alternate between laarr[0] and laarr[1] for locations of the
+       current extent, and the previous extent */
+	do
+	{
+		if (pbh != cbh)
+		{
+			udf_release_data(pbh);
+			atomic_inc(&cbh->b_count);
+			pbh = cbh;
+		}
+		if (cbh != nbh)
+		{
+			udf_release_data(cbh);
+			atomic_inc(&nbh->b_count);
+			cbh = nbh;
+		}
+
+		lbcount += elen;
+
+		pbloc = cbloc;
+		cbloc = nbloc;
+
+		pextoffset = cextoffset;
+		cextoffset = nextoffset;
+
+		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
+			break;
+
+		c = !c;
+
+		laarr[c].extLength = (etype << 30) | elen;
+		laarr[c].extLocation = eloc;
+
+		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			pgoal = eloc.logicalBlockNum +
+				((elen + inode->i_sb->s_blocksize - 1) >>
+				inode->i_sb->s_blocksize_bits);
+
+		count ++;
+	} while (lbcount + elen <= b_off);
+
+	b_off -= lbcount;
+	offset = b_off >> inode->i_sb->s_blocksize_bits;
+
+	/* if the extent is allocated and recorded, return the block
+       if the extent is not a multiple of the blocksize, round up */
+
+	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
+	{
+		if (elen & (inode->i_sb->s_blocksize - 1))
+		{
+			elen = EXT_RECORDED_ALLOCATED |
+				((elen + inode->i_sb->s_blocksize - 1) &
+				~(inode->i_sb->s_blocksize - 1));
+			etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
+		}
+		udf_release_data(pbh);
+		udf_release_data(cbh);
+		udf_release_data(nbh);
+		newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
+		*phys = newblock;
+		return NULL;
+	}
+
+	if (etype == -1)
+	{
+		endnum = startnum = ((count > 1) ? 1 : count);
+		if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
+		{
+			laarr[c].extLength =
+				(laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
+				(((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
+					inode->i_sb->s_blocksize - 1) &
+				~(inode->i_sb->s_blocksize - 1));
+			UDF_I_LENEXTENTS(inode) =
+				(UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
+					~(inode->i_sb->s_blocksize - 1);
+		}
+		c = !c;
+		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+			((offset + 1) << inode->i_sb->s_blocksize_bits);
+		memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
+		count ++;
+		endnum ++;
+		lastblock = 1;
+	}
+	else
+		endnum = startnum = ((count > 2) ? 2 : count);
+
+	/* if the current extent is in position 0, swap it with the previous */
+	if (!c && count != 1)
+	{
+		laarr[2] = laarr[0];
+		laarr[0] = laarr[1];
+		laarr[1] = laarr[2];
+		c = 1;
+	}
+
+	/* if the current block is located in a extent, read the next extent */
+	if (etype != -1)
+	{
+		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
+		{
+			laarr[c+1].extLength = (etype << 30) | elen;
+			laarr[c+1].extLocation = eloc;
+			count ++;
+			startnum ++;
+			endnum ++;
+		}
+		else
+			lastblock = 1;
+	}
+	udf_release_data(cbh);
+	udf_release_data(nbh);
+
+	/* if the current extent is not recorded but allocated, get the
+		block in the extent corresponding to the requested block */
+	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
+	else /* otherwise, allocate a new block */
+	{
+		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
+			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
+
+		if (!goal)
+		{
+			if (!(goal = pgoal))
+				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
+		}
+
+		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
+			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
+		{
+			udf_release_data(pbh);
+			*err = -ENOSPC;
+			return NULL;
+		}
+		UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
+	}
+
+	/* if the extent the requsted block is located in contains multiple blocks,
+       split the extent into at most three extents. blocks prior to requested
+       block, requested block, and blocks after requested block */
+	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
+
+#ifdef UDF_PREALLOCATE
+	/* preallocate blocks */
+	udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
+#endif
+
+	/* merge any continuous blocks in laarr */
+	udf_merge_extents(inode, laarr, &endnum);
+
+	/* write back the new extents, inserting new extents if the new number
+       of extents is greater than the old number, and deleting extents if
+       the new number of extents is less than the old number */
+	udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
+
+	udf_release_data(pbh);
+
+	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
+		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
+	{
+		return NULL;
+	}
+	*phys = newblock;
+	*err = 0;
+	*new = 1;
+	UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
+	UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
+	inode->i_ctime = current_fs_time(inode->i_sb);
+
+	if (IS_SYNC(inode))
+		udf_sync_inode(inode);
+	else
+		mark_inode_dirty(inode);
+	return result;
+}
+
+static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
+	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+{
+	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
+		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+	{
+		int curr = *c;
+		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
+			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+		int8_t etype = (laarr[curr].extLength >> 30);
+
+		if (blen == 1)
+			;
+		else if (!offset || blen == offset + 1)
+		{
+			laarr[curr+2] = laarr[curr+1];
+			laarr[curr+1] = laarr[curr];
+		}
+		else
+		{
+			laarr[curr+3] = laarr[curr+1];
+			laarr[curr+2] = laarr[curr+1] = laarr[curr];
+		}
+
+		if (offset)
+		{
+			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+			{
+				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
+				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+					(offset << inode->i_sb->s_blocksize_bits);
+				laarr[curr].extLocation.logicalBlockNum = 0;
+				laarr[curr].extLocation.partitionReferenceNum = 0;
+			}
+			else
+				laarr[curr].extLength = (etype << 30) |
+					(offset << inode->i_sb->s_blocksize_bits);
+			curr ++;
+			(*c) ++;
+			(*endnum) ++;
+		}
+		
+		laarr[curr].extLocation.logicalBlockNum = newblocknum;
+		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			laarr[curr].extLocation.partitionReferenceNum =
+				UDF_I_LOCATION(inode).partitionReferenceNum;
+		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
+			inode->i_sb->s_blocksize;
+		curr ++;
+
+		if (blen != offset + 1)
+		{
+			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
+			laarr[curr].extLength = (etype << 30) |
+				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
+			curr ++;
+			(*endnum) ++;
+		}
+	}
+}
+
+static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
+	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+{
+	int start, length = 0, currlength = 0, i;
+
+	if (*endnum >= (c+1))
+	{
+		if (!lastblock)
+			return;
+		else
+			start = c;
+	}
+	else
+	{
+		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+		{
+			start = c+1;
+			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
+				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		}
+		else
+			start = c;
+	}
+
+	for (i=start+1; i<=*endnum; i++)
+	{
+		if (i == *endnum)
+		{
+			if (lastblock)
+				length += UDF_DEFAULT_PREALLOC_BLOCKS;
+		}
+		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		else
+			break;
+	}
+
+	if (length)
+	{
+		int next = laarr[start].extLocation.logicalBlockNum +
+			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
+			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
+			laarr[start].extLocation.partitionReferenceNum,
+			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
+				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
+
+		if (numalloc)
+		{
+			if (start == (c+1))
+				laarr[start].extLength +=
+					(numalloc << inode->i_sb->s_blocksize_bits);
+			else
+			{
+				memmove(&laarr[c+2], &laarr[c+1],
+					sizeof(long_ad) * (*endnum - (c+1)));
+				(*endnum) ++;
+				laarr[c+1].extLocation.logicalBlockNum = next;
+				laarr[c+1].extLocation.partitionReferenceNum =
+					laarr[c].extLocation.partitionReferenceNum;
+				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
+					(numalloc << inode->i_sb->s_blocksize_bits);
+				start = c+1;
+			}
+
+			for (i=start+1; numalloc && i<*endnum; i++)
+			{
+				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+
+				if (elen > numalloc)
+				{
+					laarr[i].extLength -=
+						(numalloc << inode->i_sb->s_blocksize_bits);
+					numalloc = 0;
+				}
+				else
+				{
+					numalloc -= elen;
+					if (*endnum > (i+1))
+						memmove(&laarr[i], &laarr[i+1], 
+							sizeof(long_ad) * (*endnum - (i+1)));
+					i --;
+					(*endnum) --;
+				}
+			}
+			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
+		}
+	}
+}
+
+static void udf_merge_extents(struct inode *inode,
+	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+{
+	int i;
+
+	for (i=0; i<(*endnum-1); i++)
+	{
+		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
+		{
+			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
+				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
+				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
+			{
+				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
+					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
+				{
+					laarr[i+1].extLength = (laarr[i+1].extLength -
+						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
+					laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
+						(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
+					laarr[i+1].extLocation.logicalBlockNum =
+						laarr[i].extLocation.logicalBlockNum +
+						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
+							inode->i_sb->s_blocksize_bits);
+				}
+				else
+				{
+					laarr[i].extLength = laarr[i+1].extLength +
+						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
+					if (*endnum > (i+2))
+						memmove(&laarr[i+1], &laarr[i+2],
+							sizeof(long_ad) * (*endnum - (i+2)));
+					i --;
+					(*endnum) --;
+				}
+			}
+		}
+		else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
+			((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
+		{
+			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
+				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+			laarr[i].extLocation.logicalBlockNum = 0;
+			laarr[i].extLocation.partitionReferenceNum = 0;
+
+			if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+				(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
+				inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
+			{
+				laarr[i+1].extLength = (laarr[i+1].extLength -
+					(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+					UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
+				laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
+					(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
+			}
+			else
+			{
+				laarr[i].extLength = laarr[i+1].extLength +
+					(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+					inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
+				if (*endnum > (i+2))
+					memmove(&laarr[i+1], &laarr[i+2],
+						sizeof(long_ad) * (*endnum - (i+2)));
+				i --;
+				(*endnum) --;
+			}
+		}
+		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+		{
+			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
+				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+			       inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+			laarr[i].extLocation.logicalBlockNum = 0;
+			laarr[i].extLocation.partitionReferenceNum = 0;
+			laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
+				EXT_NOT_RECORDED_NOT_ALLOCATED;
+		}
+	}
+}
+
+static void udf_update_extents(struct inode *inode,
+	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
+	kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
+{
+	int start = 0, i;
+	kernel_lb_addr tmploc;
+	uint32_t tmplen;
+
+	if (startnum > endnum)
+	{
+		for (i=0; i<(startnum-endnum); i++)
+		{
+			udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
+				laarr[i].extLength, *pbh);
+		}
+	}
+	else if (startnum < endnum)
+	{
+		for (i=0; i<(endnum-startnum); i++)
+		{
+			udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
+				laarr[i].extLength, *pbh);
+			udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
+				&laarr[i].extLength, pbh, 1);
+			start ++;
+		}
+	}
+
+	for (i=start; i<endnum; i++)
+	{
+		udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
+		udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
+			laarr[i].extLength, *pbh, 1);
+	}
+}
+
+struct buffer_head * udf_bread(struct inode * inode, int block,
+	int create, int * err)
+{
+	struct buffer_head * bh = NULL;
+
+	bh = udf_getblk(inode, block, create, err);
+	if (!bh)
+		return NULL;
+
+	if (buffer_uptodate(bh))
+		return bh;
+	ll_rw_block(READ, 1, &bh);
+	wait_on_buffer(bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	brelse(bh);
+	*err = -EIO;
+	return NULL;
+}
+
+void udf_truncate(struct inode * inode)
+{
+	int offset;
+	int err;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+			S_ISLNK(inode->i_mode)))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+
+	lock_kernel();
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
+			inode->i_size))
+		{
+			udf_expand_file_adinicb(inode, inode->i_size, &err);
+			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+			{
+				inode->i_size = UDF_I_LENALLOC(inode);
+				unlock_kernel();
+				return;
+			}
+			else
+				udf_truncate_extents(inode);
+		}
+		else
+		{
+			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
+			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
+			UDF_I_LENALLOC(inode) = inode->i_size;
+		}
+	}
+	else
+	{
+		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
+		udf_truncate_extents(inode);
+	}	
+
+	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+	if (IS_SYNC(inode))
+		udf_sync_inode (inode);
+	else
+		mark_inode_dirty(inode);
+	unlock_kernel();
+}
+
+static void
+__udf_read_inode(struct inode *inode)
+{
+	struct buffer_head *bh = NULL;
+	struct fileEntry *fe;
+	uint16_t ident;
+
+	/*
+	 * Set defaults, but the inode is still incomplete!
+	 * Note: get_new_inode() sets the following on a new inode:
+	 *      i_sb = sb
+	 *      i_no = ino
+	 *      i_flags = sb->s_flags
+	 *      i_state = 0
+	 * clean_inode(): zero fills and sets
+	 *      i_count = 1
+	 *      i_nlink = 1
+	 *      i_op = NULL;
+	 */
+	inode->i_blksize = PAGE_SIZE;
+
+	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
+
+	if (!bh)
+	{
+		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
+			inode->i_ino);
+		make_bad_inode(inode);
+		return;
+	}
+
+	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
+		ident != TAG_IDENT_USE)
+	{
+		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
+			inode->i_ino, ident);
+		udf_release_data(bh);
+		make_bad_inode(inode);
+		return;
+	}
+
+	fe = (struct fileEntry *)bh->b_data;
+
+	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
+	{
+		struct buffer_head *ibh = NULL, *nbh = NULL;
+		struct indirectEntry *ie;
+
+		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
+		if (ident == TAG_IDENT_IE)
+		{
+			if (ibh)
+			{
+				kernel_lb_addr loc;
+				ie = (struct indirectEntry *)ibh->b_data;
+	
+				loc = lelb_to_cpu(ie->indirectICB.extLocation);
+	
+				if (ie->indirectICB.extLength && 
+					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
+				{
+					if (ident == TAG_IDENT_FE ||
+						ident == TAG_IDENT_EFE)
+					{
+						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
+						udf_release_data(bh);
+						udf_release_data(ibh);
+						udf_release_data(nbh);
+						__udf_read_inode(inode);
+						return;
+					}
+					else
+					{
+						udf_release_data(nbh);
+						udf_release_data(ibh);
+					}
+				}
+				else
+					udf_release_data(ibh);
+			}
+		}
+		else
+			udf_release_data(ibh);
+	}
+	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
+	{
+		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
+			le16_to_cpu(fe->icbTag.strategyType));
+		udf_release_data(bh);
+		make_bad_inode(inode);
+		return;
+	}
+	udf_fill_inode(inode, bh);
+	udf_release_data(bh);
+}
+
+static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+{
+	struct fileEntry *fe;
+	struct extendedFileEntry *efe;
+	time_t convtime;
+	long convtime_usec;
+	int offset;
+
+	fe = (struct fileEntry *)bh->b_data;
+	efe = (struct extendedFileEntry *)bh->b_data;
+
+	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
+		UDF_I_STRAT4096(inode) = 0;
+	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
+		UDF_I_STRAT4096(inode) = 1;
+
+	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
+	UDF_I_UNIQUE(inode) = 0;
+	UDF_I_LENEATTR(inode) = 0;
+	UDF_I_LENEXTENTS(inode) = 0;
+	UDF_I_LENALLOC(inode) = 0;
+	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
+	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
+	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
+	{
+		UDF_I_EFE(inode) = 1;
+		UDF_I_USE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+	}
+	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
+	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_USE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+	}
+	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
+	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_USE(inode) = 1;
+		UDF_I_LENALLOC(inode) =
+			le32_to_cpu(
+				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
+		return;
+	}
+
+	inode->i_uid = le32_to_cpu(fe->uid);
+	if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
+
+	inode->i_gid = le32_to_cpu(fe->gid);
+	if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
+
+	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
+	if (!inode->i_nlink)
+		inode->i_nlink = 1;
+	
+	inode->i_size = le64_to_cpu(fe->informationLength);
+	UDF_I_LENEXTENTS(inode) = inode->i_size;
+
+	inode->i_mode = udf_convert_permissions(fe);
+	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
+
+	if (UDF_I_EFE(inode) == 0)
+	{
+		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
+			(inode->i_sb->s_blocksize_bits - 9);
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(fe->accessTime)) )
+		{
+			inode->i_atime.tv_sec = convtime;
+			inode->i_atime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(fe->modificationTime)) )
+		{
+			inode->i_mtime.tv_sec = convtime;
+			inode->i_mtime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(fe->attrTime)) )
+		{
+			inode->i_ctime.tv_sec = convtime;
+			inode->i_ctime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
+		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
+		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
+		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
+	}
+	else
+	{
+		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << 
+			(inode->i_sb->s_blocksize_bits - 9);
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(efe->accessTime)) )
+		{
+			inode->i_atime.tv_sec = convtime;
+			inode->i_atime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(efe->modificationTime)) )
+		{
+			inode->i_mtime.tv_sec = convtime;
+			inode->i_mtime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(efe->createTime)) )
+		{
+			UDF_I_CRTIME(inode).tv_sec = convtime;
+			UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		if ( udf_stamp_to_time(&convtime, &convtime_usec,
+			lets_to_cpu(efe->attrTime)) )
+		{
+			inode->i_ctime.tv_sec = convtime;
+			inode->i_ctime.tv_nsec = convtime_usec * 1000;
+		}
+		else
+		{
+			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
+		}
+
+		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
+		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
+		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
+		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
+	}
+
+	switch (fe->icbTag.fileType)
+	{
+		case ICBTAG_FILE_TYPE_DIRECTORY:
+		{
+			inode->i_op = &udf_dir_inode_operations;
+			inode->i_fop = &udf_dir_operations;
+			inode->i_mode |= S_IFDIR;
+			inode->i_nlink ++;
+			break;
+		}
+		case ICBTAG_FILE_TYPE_REALTIME:
+		case ICBTAG_FILE_TYPE_REGULAR:
+		case ICBTAG_FILE_TYPE_UNDEF:
+		{
+			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+				inode->i_data.a_ops = &udf_adinicb_aops;
+			else
+				inode->i_data.a_ops = &udf_aops;
+			inode->i_op = &udf_file_inode_operations;
+			inode->i_fop = &udf_file_operations;
+			inode->i_mode |= S_IFREG;
+			break;
+		}
+		case ICBTAG_FILE_TYPE_BLOCK:
+		{
+			inode->i_mode |= S_IFBLK;
+			break;
+		}
+		case ICBTAG_FILE_TYPE_CHAR:
+		{
+			inode->i_mode |= S_IFCHR;
+			break;
+		}
+		case ICBTAG_FILE_TYPE_FIFO:
+		{
+			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
+			break;
+		}
+		case ICBTAG_FILE_TYPE_SOCKET:
+		{
+			init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
+			break;
+		}
+		case ICBTAG_FILE_TYPE_SYMLINK:
+		{
+			inode->i_data.a_ops = &udf_symlink_aops;
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mode = S_IFLNK|S_IRWXUGO;
+			break;
+		}
+		default:
+		{
+			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
+				inode->i_ino, fe->icbTag.fileType);
+			make_bad_inode(inode);
+			return;
+		}
+	}
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+	{
+		struct deviceSpec *dsea =
+			(struct deviceSpec *)
+				udf_get_extendedattr(inode, 12, 1);
+
+		if (dsea)
+		{
+			init_special_inode(inode, inode->i_mode, MKDEV(
+				le32_to_cpu(dsea->majorDeviceIdent),
+				le32_to_cpu(dsea->minorDeviceIdent)));
+			/* Developer ID ??? */
+		}
+		else
+		{
+			make_bad_inode(inode);
+		}
+	}
+}
+
+static mode_t
+udf_convert_permissions(struct fileEntry *fe)
+{
+	mode_t mode;
+	uint32_t permissions;
+	uint32_t flags;
+
+	permissions = le32_to_cpu(fe->permissions);
+	flags = le16_to_cpu(fe->icbTag.flags);
+
+	mode =	(( permissions      ) & S_IRWXO) |
+		(( permissions >> 2 ) & S_IRWXG) |
+		(( permissions >> 4 ) & S_IRWXU) |
+		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
+		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
+		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
+
+	return mode;
+}
+
+/*
+ * udf_write_inode
+ *
+ * PURPOSE
+ *	Write out the specified inode.
+ *
+ * DESCRIPTION
+ *	This routine is called whenever an inode is synced.
+ *	Currently this routine is just a placeholder.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+
+int udf_write_inode(struct inode * inode, int sync)
+{
+	int ret;
+	lock_kernel();
+	ret = udf_update_inode(inode, sync);
+	unlock_kernel();
+	return ret;
+}
+
+int udf_sync_inode(struct inode * inode)
+{
+	return udf_update_inode(inode, 1);
+}
+
+static int
+udf_update_inode(struct inode *inode, int do_sync)
+{
+	struct buffer_head *bh = NULL;
+	struct fileEntry *fe;
+	struct extendedFileEntry *efe;
+	uint32_t udfperms;
+	uint16_t icbflags;
+	uint16_t crclen;
+	int i;
+	kernel_timestamp cpu_time;
+	int err = 0;
+
+	bh = udf_tread(inode->i_sb,
+		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
+
+	if (!bh)
+	{
+		udf_debug("bread failure\n");
+		return -EIO;
+	}
+
+	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
+
+	fe = (struct fileEntry *)bh->b_data;
+	efe = (struct extendedFileEntry *)bh->b_data;
+
+	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
+	{
+		struct unallocSpaceEntry *use =
+			(struct unallocSpaceEntry *)bh->b_data;
+
+		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
+		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
+		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
+			sizeof(tag);
+		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+		use->descTag.descCRCLength = cpu_to_le16(crclen);
+		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
+
+		use->descTag.tagChecksum = 0;
+		for (i=0; i<16; i++)
+			if (i != 4)
+				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
+
+		mark_buffer_dirty(bh);
+		udf_release_data(bh);
+		return err;
+	}
+
+	if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
+		fe->uid = cpu_to_le32(inode->i_uid);
+
+	if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
+		fe->gid = cpu_to_le32(inode->i_gid);
+
+	udfperms =	((inode->i_mode & S_IRWXO)     ) |
+			((inode->i_mode & S_IRWXG) << 2) |
+			((inode->i_mode & S_IRWXU) << 4);
+
+	udfperms |=	(le32_to_cpu(fe->permissions) &
+			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
+			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
+			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
+	fe->permissions = cpu_to_le32(udfperms);
+
+	if (S_ISDIR(inode->i_mode))
+		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
+	else
+		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
+
+	fe->informationLength = cpu_to_le64(inode->i_size);
+
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+	{
+		regid *eid;
+		struct deviceSpec *dsea =
+			(struct deviceSpec *)
+				udf_get_extendedattr(inode, 12, 1);
+
+		if (!dsea)
+		{
+			dsea = (struct deviceSpec *)
+				udf_add_extendedattr(inode,
+					sizeof(struct deviceSpec) +
+					sizeof(regid), 12, 0x3);
+			dsea->attrType = cpu_to_le32(12);
+			dsea->attrSubtype = 1;
+			dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
+				sizeof(regid));
+			dsea->impUseLength = cpu_to_le32(sizeof(regid));
+		}
+		eid = (regid *)dsea->impUse;
+		memset(eid, 0, sizeof(regid));
+		strcpy(eid->ident, UDF_ID_DEVELOPER);
+		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
+		eid->identSuffix[1] = UDF_OS_ID_LINUX;
+		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
+		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
+	}
+
+	if (UDF_I_EFE(inode) == 0)
+	{
+		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+		fe->logicalBlocksRecorded = cpu_to_le64(
+			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
+			(inode->i_sb->s_blocksize_bits - 9));
+
+		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
+			fe->accessTime = cpu_to_lets(cpu_time);
+		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
+			fe->modificationTime = cpu_to_lets(cpu_time);
+		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
+			fe->attrTime = cpu_to_lets(cpu_time);
+		memset(&(fe->impIdent), 0, sizeof(regid));
+		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
+		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+		fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
+		fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
+		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
+		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
+		crclen = sizeof(struct fileEntry);
+	}
+	else
+	{
+		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+		efe->objectSize = cpu_to_le64(inode->i_size);
+		efe->logicalBlocksRecorded = cpu_to_le64(
+			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
+			(inode->i_sb->s_blocksize_bits - 9));
+
+		if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
+			(UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
+			 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
+		{
+			UDF_I_CRTIME(inode) = inode->i_atime;
+		}
+		if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
+			(UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
+			 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
+		{
+			UDF_I_CRTIME(inode) = inode->i_mtime;
+		}
+		if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
+			(UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
+			 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
+		{
+			UDF_I_CRTIME(inode) = inode->i_ctime;
+		}
+
+		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
+			efe->accessTime = cpu_to_lets(cpu_time);
+		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
+			efe->modificationTime = cpu_to_lets(cpu_time);
+		if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
+			efe->createTime = cpu_to_lets(cpu_time);
+		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
+			efe->attrTime = cpu_to_lets(cpu_time);
+
+		memset(&(efe->impIdent), 0, sizeof(regid));
+		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
+		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+		efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
+		efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
+		efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
+		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
+		crclen = sizeof(struct extendedFileEntry);
+	}
+	if (UDF_I_STRAT4096(inode))
+	{
+		fe->icbTag.strategyType = cpu_to_le16(4096);
+		fe->icbTag.strategyParameter = cpu_to_le16(1);
+		fe->icbTag.numEntries = cpu_to_le16(2);
+	}
+	else
+	{
+		fe->icbTag.strategyType = cpu_to_le16(4);
+		fe->icbTag.numEntries = cpu_to_le16(1);
+	}
+
+	if (S_ISDIR(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
+	else if (S_ISREG(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
+	else if (S_ISLNK(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
+	else if (S_ISBLK(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
+	else if (S_ISCHR(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
+	else if (S_ISFIFO(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
+	else if (S_ISSOCK(inode->i_mode))
+		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
+
+	icbflags =	UDF_I_ALLOCTYPE(inode) |
+			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
+			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
+			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
+			(le16_to_cpu(fe->icbTag.flags) &
+				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
+				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
+
+	fe->icbTag.flags = cpu_to_le16(icbflags);
+	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
+		fe->descTag.descVersion = cpu_to_le16(3);
+	else
+		fe->descTag.descVersion = cpu_to_le16(2);
+	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
+	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
+	fe->descTag.descCRCLength = cpu_to_le16(crclen);
+	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
+
+	fe->descTag.tagChecksum = 0;
+	for (i=0; i<16; i++)
+		if (i != 4)
+			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
+
+	/* write the data blocks */
+	mark_buffer_dirty(bh);
+	if (do_sync)
+	{
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh))
+		{
+			printk("IO error syncing udf inode [%s:%08lx]\n",
+				inode->i_sb->s_id, inode->i_ino);
+			err = -EIO;
+		}
+	}
+	udf_release_data(bh);
+	return err;
+}
+
+struct inode *
+udf_iget(struct super_block *sb, kernel_lb_addr ino)
+{
+	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
+	struct inode *inode = iget_locked(sb, block);
+
+	if (!inode)
+		return NULL;
+
+	if (inode->i_state & I_NEW) {
+		memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
+		__udf_read_inode(inode);
+		unlock_new_inode(inode);
+	}
+
+	if (is_bad_inode(inode))
+		goto out_iput;
+
+	if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
+		udf_debug("block=%d, partition=%d out of range\n",
+			ino.logicalBlockNum, ino.partitionReferenceNum);
+		make_bad_inode(inode);
+		goto out_iput;
+	}
+
+	return inode;
+
+ out_iput:
+	iput(inode);
+	return NULL;
+}
+
+int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
+	kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
+{
+	int adsize;
+	short_ad *sad = NULL;
+	long_ad *lad = NULL;
+	struct allocExtDesc *aed;
+	int8_t etype;
+	uint8_t *ptr;
+
+	if (!*bh)
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+	else
+		ptr = (*bh)->b_data + *extoffset;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		return -1;
+
+	if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
+	{
+		char *sptr, *dptr;
+		struct buffer_head *nbh;
+		int err, loffset;
+		kernel_lb_addr obloc = *bloc;
+
+		if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
+			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
+		{
+			return -1;
+		}
+		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
+			*bloc, 0))))
+		{
+			return -1;
+		}
+		lock_buffer(nbh);
+		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
+		set_buffer_uptodate(nbh);
+		unlock_buffer(nbh);
+		mark_buffer_dirty_inode(nbh, inode);
+
+		aed = (struct allocExtDesc *)(nbh->b_data);
+		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
+			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
+		if (*extoffset + adsize > inode->i_sb->s_blocksize)
+		{
+			loffset = *extoffset;
+			aed->lengthAllocDescs = cpu_to_le32(adsize);
+			sptr = ptr - adsize;
+			dptr = nbh->b_data + sizeof(struct allocExtDesc);
+			memcpy(dptr, sptr, adsize);
+			*extoffset = sizeof(struct allocExtDesc) + adsize;
+		}
+		else
+		{
+			loffset = *extoffset + adsize;
+			aed->lengthAllocDescs = cpu_to_le32(0);
+			sptr = ptr;
+			*extoffset = sizeof(struct allocExtDesc);
+
+			if (*bh)
+			{
+				aed = (struct allocExtDesc *)(*bh)->b_data;
+				aed->lengthAllocDescs =
+					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+			}
+			else
+			{
+				UDF_I_LENALLOC(inode) += adsize;
+				mark_inode_dirty(inode);
+			}
+		}
+		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
+			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
+				bloc->logicalBlockNum, sizeof(tag));
+		else
+			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
+				bloc->logicalBlockNum, sizeof(tag));
+		switch (UDF_I_ALLOCTYPE(inode))
+		{
+			case ICBTAG_FLAG_AD_SHORT:
+			{
+				sad = (short_ad *)sptr;
+				sad->extLength = cpu_to_le32(
+					EXT_NEXT_EXTENT_ALLOCDECS |
+					inode->i_sb->s_blocksize);
+				sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
+				break;
+			}
+			case ICBTAG_FLAG_AD_LONG:
+			{
+				lad = (long_ad *)sptr;
+				lad->extLength = cpu_to_le32(
+					EXT_NEXT_EXTENT_ALLOCDECS |
+					inode->i_sb->s_blocksize);
+				lad->extLocation = cpu_to_lelb(*bloc);
+				memset(lad->impUse, 0x00, sizeof(lad->impUse));
+				break;
+			}
+		}
+		if (*bh)
+		{
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag((*bh)->b_data, loffset);
+			else
+				udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
+			mark_buffer_dirty_inode(*bh, inode);
+			udf_release_data(*bh);
+		}
+		else
+			mark_inode_dirty(inode);
+		*bh = nbh;
+	}
+
+	etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
+
+	if (!*bh)
+	{
+		UDF_I_LENALLOC(inode) += adsize;
+		mark_inode_dirty(inode);
+	}
+	else
+	{
+		aed = (struct allocExtDesc *)(*bh)->b_data;
+		aed->lengthAllocDescs =
+			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+			udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
+		else
+			udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
+		mark_buffer_dirty_inode(*bh, inode);
+	}
+
+	return etype;
+}
+
+int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
+    kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
+{
+	int adsize;
+	uint8_t *ptr;
+
+	if (!bh)
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+	else
+	{
+		ptr = bh->b_data + *extoffset;
+		atomic_inc(&bh->b_count);
+	}
+
+	switch (UDF_I_ALLOCTYPE(inode))
+	{
+		case ICBTAG_FLAG_AD_SHORT:
+		{
+			short_ad *sad = (short_ad *)ptr;
+			sad->extLength = cpu_to_le32(elen);
+			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
+			adsize = sizeof(short_ad);
+			break;
+		}
+		case ICBTAG_FLAG_AD_LONG:
+		{
+			long_ad *lad = (long_ad *)ptr;
+			lad->extLength = cpu_to_le32(elen);
+			lad->extLocation = cpu_to_lelb(eloc);
+			memset(lad->impUse, 0x00, sizeof(lad->impUse));
+			adsize = sizeof(long_ad);
+			break;
+		}
+		default:
+			return -1;
+	}
+
+	if (bh)
+	{
+		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+		{
+			struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
+			udf_update_tag((bh)->b_data,
+				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
+		}
+		mark_buffer_dirty_inode(bh, inode);
+		udf_release_data(bh);
+	}
+	else
+		mark_inode_dirty(inode);
+
+	if (inc)
+		*extoffset += adsize;
+	return (elen >> 30);
+}
+
+int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
+	kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
+{
+	int8_t etype;
+
+	while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
+		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
+	{
+		*bloc = *eloc;
+		*extoffset = sizeof(struct allocExtDesc);
+		udf_release_data(*bh);
+		if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
+		{
+			udf_debug("reading block %d failed!\n",
+				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
+			return -1;
+		}
+	}
+
+	return etype;
+}
+
+int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
+	kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
+{
+	int alen;
+	int8_t etype;
+	uint8_t *ptr;
+
+	if (!*bh)
+	{
+		if (!(*extoffset))
+			*extoffset = udf_file_entry_alloc_offset(inode);
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
+	}
+	else
+	{
+		if (!(*extoffset))
+			*extoffset = sizeof(struct allocExtDesc);
+		ptr = (*bh)->b_data + *extoffset;
+		alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
+	}
+
+	switch (UDF_I_ALLOCTYPE(inode))
+	{
+		case ICBTAG_FLAG_AD_SHORT:
+		{
+			short_ad *sad;
+
+			if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
+				return -1;
+
+			etype = le32_to_cpu(sad->extLength) >> 30;
+			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
+			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
+			break;
+		}
+		case ICBTAG_FLAG_AD_LONG:
+		{
+			long_ad *lad;
+
+			if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
+				return -1;
+
+			etype = le32_to_cpu(lad->extLength) >> 30;
+			*eloc = lelb_to_cpu(lad->extLocation);
+			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
+			break;
+		}
+		default:
+		{
+			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
+			return -1;
+		}
+	}
+
+	return etype;
+}
+
+static int8_t
+udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
+		kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
+{
+	kernel_lb_addr oeloc;
+	uint32_t oelen;
+	int8_t etype;
+
+	if (bh)
+		atomic_inc(&bh->b_count);
+
+	while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
+	{
+		udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
+
+		neloc = oeloc;
+		nelen = (etype << 30) | oelen;
+	}
+	udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
+	udf_release_data(bh);
+	return (nelen >> 30);
+}
+
+int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
+	kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
+{
+	struct buffer_head *obh;
+	kernel_lb_addr obloc;
+	int oextoffset, adsize;
+	int8_t etype;
+	struct allocExtDesc *aed;
+
+	if (nbh)
+	{
+		atomic_inc(&nbh->b_count);
+		atomic_inc(&nbh->b_count);
+	}
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		adsize = 0;
+
+	obh = nbh;
+	obloc = nbloc;
+	oextoffset = nextoffset;
+
+	if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
+		return -1;
+
+	while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
+	{
+		udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
+		if (obh != nbh)
+		{
+			obloc = nbloc;
+			udf_release_data(obh);
+			atomic_inc(&nbh->b_count);
+			obh = nbh;
+			oextoffset = nextoffset - adsize;
+		}
+	}
+	memset(&eloc, 0x00, sizeof(kernel_lb_addr));
+	elen = 0;
+
+	if (nbh != obh)
+	{
+		udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
+		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
+		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
+		if (!obh)
+		{
+			UDF_I_LENALLOC(inode) -= (adsize * 2);
+			mark_inode_dirty(inode);
+		}
+		else
+		{
+			aed = (struct allocExtDesc *)(obh)->b_data;
+			aed->lengthAllocDescs =
+				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
+			else
+				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
+			mark_buffer_dirty_inode(obh, inode);
+		}
+	}
+	else
+	{
+		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
+		if (!obh)
+		{
+			UDF_I_LENALLOC(inode) -= adsize;
+			mark_inode_dirty(inode);
+		}
+		else
+		{
+			aed = (struct allocExtDesc *)(obh)->b_data;
+			aed->lengthAllocDescs =
+				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag((obh)->b_data, oextoffset - adsize);
+			else
+				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
+			mark_buffer_dirty_inode(obh, inode);
+		}
+	}
+	
+	udf_release_data(nbh);
+	udf_release_data(obh);
+	return (elen >> 30);
+}
+
+int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
+	kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
+{
+	uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
+	int8_t etype;
+
+	if (block < 0)
+	{
+		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
+		return -1;
+	}
+	if (!inode)
+	{
+		printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
+		return -1;
+	}
+
+	*extoffset = 0;
+	*elen = 0;
+	*bloc = UDF_I_LOCATION(inode);
+
+	do
+	{
+		if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
+		{
+			*offset = bcount - lbcount;
+			UDF_I_LENEXTENTS(inode) = lbcount;
+			return -1;
+		}
+		lbcount += *elen;
+	} while (lbcount <= bcount);
+
+	*offset = bcount + *elen - lbcount;
+
+	return etype;
+}
+
+long udf_block_map(struct inode *inode, long block)
+{
+	kernel_lb_addr eloc, bloc;
+	uint32_t offset, extoffset, elen;
+	struct buffer_head *bh = NULL;
+	int ret;
+
+	lock_kernel();
+
+	if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
+		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
+	else
+		ret = 0;
+
+	unlock_kernel();
+	udf_release_data(bh);
+
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
+		return udf_fixed_to_variable(ret);
+	else
+		return ret;
+}
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
new file mode 100644
index 0000000..2da5087
--- /dev/null
+++ b/fs/udf/lowlevel.c
@@ -0,0 +1,77 @@
+/*
+ * lowlevel.c
+ *
+ * PURPOSE
+ *  Low Level Device Routines for the UDF filesystem
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1999-2001 Ben Fennema
+ *
+ * HISTORY
+ *
+ *  03/26/99 blf  Created.
+ */
+
+#include "udfdecl.h"
+
+#include <linux/blkdev.h>
+#include <linux/cdrom.h>
+#include <asm/uaccess.h>
+
+#include <linux/udf_fs.h>
+#include "udf_sb.h"
+
+unsigned int 
+udf_get_last_session(struct super_block *sb)
+{
+	struct cdrom_multisession ms_info;
+	unsigned int vol_desc_start;
+	struct block_device *bdev = sb->s_bdev;
+	int i;
+
+	vol_desc_start=0;
+	ms_info.addr_format=CDROM_LBA;
+	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
+
+#define WE_OBEY_THE_WRITTEN_STANDARDS 1
+
+	if (i == 0)
+	{
+		udf_debug("XA disk: %s, vol_desc_start=%d\n",
+			(ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
+#if WE_OBEY_THE_WRITTEN_STANDARDS
+		if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
+#endif
+			vol_desc_start = ms_info.addr.lba;
+	}
+	else
+	{
+		udf_debug("CDROMMULTISESSION not supported: rc=%d\n", i);
+	}
+	return vol_desc_start;
+}
+
+unsigned long
+udf_get_last_block(struct super_block *sb)
+{
+	struct block_device *bdev = sb->s_bdev;
+	unsigned long lblock = 0;
+
+	if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock))
+		lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+
+	if (lblock)
+		return lblock - 1;
+	else
+		return 0;
+}
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
new file mode 100644
index 0000000..fd321f9
--- /dev/null
+++ b/fs/udf/misc.c
@@ -0,0 +1,313 @@
+/*
+ * misc.c
+ *
+ * PURPOSE
+ *	Miscellaneous routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998 Dave Boynton
+ *  (C) 1998-2004 Ben Fennema
+ *  (C) 1999-2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  04/19/99 blf  partial support for reading/writing specific EA's
+ */
+
+#include "udfdecl.h"
+
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/udf_fs.h>
+#include <linux/buffer_head.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+struct buffer_head *
+udf_tgetblk(struct super_block *sb, int block)
+{
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
+		return sb_getblk(sb, udf_fixed_to_variable(block));
+	else
+		return sb_getblk(sb, block);
+}
+
+struct buffer_head *
+udf_tread(struct super_block *sb, int block)
+{
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
+		return sb_bread(sb, udf_fixed_to_variable(block));
+	else
+		return sb_bread(sb, block);
+}
+
+struct genericFormat *
+udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
+	uint8_t loc)
+{
+	uint8_t *ea = NULL, *ad = NULL;
+	int offset;
+	uint16_t crclen;
+	int i;
+
+	ea = UDF_I_DATA(inode);
+	if (UDF_I_LENEATTR(inode))
+		ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
+	else
+	{
+		ad = ea;
+		size += sizeof(struct extendedAttrHeaderDesc);
+	}
+
+	offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
+		UDF_I_LENALLOC(inode);
+
+	/* TODO - Check for FreeEASpace */
+
+	if (loc & 0x01 && offset >= size)
+	{
+		struct extendedAttrHeaderDesc *eahd;
+		eahd = (struct extendedAttrHeaderDesc *)ea;
+
+		if (UDF_I_LENALLOC(inode))
+		{
+			memmove(&ad[size], ad, UDF_I_LENALLOC(inode));
+		}
+
+		if (UDF_I_LENEATTR(inode))
+		{
+			/* check checksum/crc */
+			if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
+				le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
+			{
+				return NULL;
+			}
+		}
+		else
+		{
+			size -= sizeof(struct extendedAttrHeaderDesc);
+			UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
+			eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
+			if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
+				eahd->descTag.descVersion = cpu_to_le16(3);
+			else
+				eahd->descTag.descVersion = cpu_to_le16(2);
+			eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
+			eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+			eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
+			eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
+		}
+
+		offset = UDF_I_LENEATTR(inode);
+		if (type < 2048)
+		{
+			if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
+			{
+				uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
+				memmove(&ea[offset - aal + size],
+					&ea[aal], offset - aal);
+				offset -= aal;
+				eahd->appAttrLocation = cpu_to_le32(aal + size);
+			}
+			if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode))
+			{
+				uint32_t ial = le32_to_cpu(eahd->impAttrLocation);
+				memmove(&ea[offset - ial + size],
+					&ea[ial], offset - ial);
+				offset -= ial;
+				eahd->impAttrLocation = cpu_to_le32(ial + size);
+			}
+		}
+		else if (type < 65536)
+		{
+			if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
+			{
+				uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
+				memmove(&ea[offset - aal + size],
+					&ea[aal], offset - aal);
+				offset -= aal;
+				eahd->appAttrLocation = cpu_to_le32(aal + size);
+			}
+		}
+		/* rewrite CRC + checksum of eahd */
+		crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
+		eahd->descTag.descCRCLength = cpu_to_le16(crclen);
+		eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + sizeof(tag), crclen, 0));
+		eahd->descTag.tagChecksum = 0;
+		for (i=0; i<16; i++)
+			if (i != 4)
+				eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i];
+		UDF_I_LENEATTR(inode) += size;
+		return (struct genericFormat *)&ea[offset];
+	}
+	if (loc & 0x02)
+	{
+	}
+	return NULL;
+}
+
+struct genericFormat *
+udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
+{
+	struct genericFormat *gaf;
+	uint8_t *ea = NULL;
+	uint32_t offset;
+
+	ea = UDF_I_DATA(inode);
+
+	if (UDF_I_LENEATTR(inode))
+	{
+		struct extendedAttrHeaderDesc *eahd;
+		eahd = (struct extendedAttrHeaderDesc *)ea;
+
+		/* check checksum/crc */
+		if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
+			le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
+		{
+			return NULL;
+		}
+	
+		if (type < 2048)
+			offset = sizeof(struct extendedAttrHeaderDesc);
+		else if (type < 65536)
+			offset = le32_to_cpu(eahd->impAttrLocation);
+		else
+			offset = le32_to_cpu(eahd->appAttrLocation);
+
+		while (offset < UDF_I_LENEATTR(inode))
+		{
+			gaf = (struct genericFormat *)&ea[offset];
+			if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype)
+				return gaf;
+			else
+				offset += le32_to_cpu(gaf->attrLength);
+		}
+	}
+	return NULL;
+}
+
+/*
+ * udf_read_tagged
+ *
+ * PURPOSE
+ *	Read the first block of a tagged descriptor.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+struct buffer_head *
+udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident)
+{
+	tag *tag_p;
+	struct buffer_head *bh = NULL;
+	register uint8_t checksum;
+	register int i;
+
+	/* Read the block */
+	if (block == 0xFFFFFFFF)
+		return NULL;
+
+	bh = udf_tread(sb, block + UDF_SB_SESSION(sb));
+	if (!bh)
+	{
+		udf_debug("block=%d, location=%d: read failed\n", block + UDF_SB_SESSION(sb), location);
+		return NULL;
+	}
+
+	tag_p = (tag *)(bh->b_data);
+
+	*ident = le16_to_cpu(tag_p->tagIdent);
+
+	if ( location != le32_to_cpu(tag_p->tagLocation) )
+	{
+		udf_debug("location mismatch block %u, tag %u != %u\n",
+			block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
+		goto error_out;
+	}
+	
+	/* Verify the tag checksum */
+	checksum = 0U;
+	for (i = 0; i < 4; i++)
+		checksum += (uint8_t)(bh->b_data[i]);
+	for (i = 5; i < 16; i++)
+		checksum += (uint8_t)(bh->b_data[i]);
+	if (checksum != tag_p->tagChecksum) {
+		printk(KERN_ERR "udf: tag checksum failed block %d\n", block);
+		goto error_out;
+	}
+
+	/* Verify the tag version */
+	if (le16_to_cpu(tag_p->descVersion) != 0x0002U &&
+		le16_to_cpu(tag_p->descVersion) != 0x0003U)
+	{
+		udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n",
+			le16_to_cpu(tag_p->descVersion), block);
+		goto error_out;
+	}
+
+	/* Verify the descriptor CRC */
+	if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
+		le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
+			le16_to_cpu(tag_p->descCRCLength), 0))
+	{
+		return bh;
+	}
+	udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
+		block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
+
+error_out:
+	brelse(bh);
+	return NULL;
+}
+
+struct buffer_head *
+udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, uint32_t offset, uint16_t *ident)
+{
+	return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
+		loc.logicalBlockNum + offset, ident);
+}
+
+void udf_release_data(struct buffer_head *bh)
+{
+	if (bh)
+		brelse(bh);
+}
+
+void udf_update_tag(char *data, int length)
+{
+	tag *tptr = (tag *)data;
+	int i;
+
+	length -= sizeof(tag);
+
+	tptr->tagChecksum = 0;
+	tptr->descCRCLength = cpu_to_le16(length);
+	tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
+
+	for (i=0; i<16; i++)
+		if (i != 4)
+			tptr->tagChecksum += (uint8_t)(data[i]);
+}
+
+void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
+	uint32_t loc, int length)
+{
+	tag *tptr = (tag *)data;
+	tptr->tagIdent = cpu_to_le16(ident);
+	tptr->descVersion = cpu_to_le16(version);
+	tptr->tagSerialNum = cpu_to_le16(snum);
+	tptr->tagLocation = cpu_to_le32(loc);
+	udf_update_tag(data, length);
+}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
new file mode 100644
index 0000000..3f6dc71
--- /dev/null
+++ b/fs/udf/namei.c
@@ -0,0 +1,1334 @@
+/*
+ * namei.c
+ *
+ * PURPOSE
+ *      Inode name handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *      E-mail regarding any portion of the Linux UDF file system should be
+ *      directed to the development team mailing list (run by majordomo):
+ *              linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *      This file is distributed under the terms of the GNU General Public
+ *      License (GPL). Copies of the GPL can be obtained from:
+ *              ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *      Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-2004 Ben Fennema
+ *  (C) 1999-2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  12/12/98 blf  Created. Split out the lookup code from dir.c
+ *  04/19/99 blf  link, mknod, symlink support
+ */
+
+#include "udfdecl.h"
+
+#include "udf_i.h"
+#include "udf_sb.h"
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/quotaops.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+static inline int udf_match(int len1, const char *name1, int len2, const char *name2)
+{
+	if (len1 != len2)
+		return 0;
+	return !memcmp(name1, name2, len1);
+}
+
+int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
+	struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
+	uint8_t *impuse, uint8_t *fileident)
+{
+	uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag);
+	uint16_t crc;
+	uint8_t checksum = 0;
+	int i;
+	int offset;
+	uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
+	uint8_t lfi = cfi->lengthFileIdent;
+	int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
+		sizeof(struct fileIdentDesc);
+	int adinicb = 0;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+		adinicb = 1;
+
+	offset = fibh->soffset + sizeof(struct fileIdentDesc);
+
+	if (impuse)
+	{
+		if (adinicb || (offset + liu < 0))
+			memcpy((uint8_t *)sfi->impUse, impuse, liu);
+		else if (offset >= 0)
+			memcpy(fibh->ebh->b_data + offset, impuse, liu);
+		else
+		{
+			memcpy((uint8_t *)sfi->impUse, impuse, -offset);
+			memcpy(fibh->ebh->b_data, impuse - offset, liu + offset);
+		}
+	}
+
+	offset += liu;
+
+	if (fileident)
+	{
+		if (adinicb || (offset + lfi < 0))
+			memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
+		else if (offset >= 0)
+			memcpy(fibh->ebh->b_data + offset, fileident, lfi);
+		else
+		{
+			memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset);
+			memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset);
+		}
+	}
+
+	offset += lfi;
+
+	if (adinicb || (offset + padlen < 0))
+		memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
+	else if (offset >= 0)
+		memset(fibh->ebh->b_data + offset, 0x00, padlen);
+	else
+	{
+		memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
+		memset(fibh->ebh->b_data, 0x00, padlen + offset);
+	}
+
+	crc = udf_crc((uint8_t *)cfi + sizeof(tag), sizeof(struct fileIdentDesc) -
+		sizeof(tag), 0);
+
+	if (fibh->sbh == fibh->ebh)
+		crc = udf_crc((uint8_t *)sfi->impUse,
+			crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
+	else if (sizeof(struct fileIdentDesc) >= -fibh->soffset)
+		crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset,
+			crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
+	else
+	{
+		crc = udf_crc((uint8_t *)sfi->impUse,
+			-fibh->soffset - sizeof(struct fileIdentDesc), crc);
+		crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
+	}
+
+	cfi->descTag.descCRC = cpu_to_le16(crc);
+	cfi->descTag.descCRCLength = cpu_to_le16(crclen);
+
+	for (i=0; i<16; i++)
+		if (i != 4)
+			checksum += ((uint8_t *)&cfi->descTag)[i];
+
+	cfi->descTag.tagChecksum = checksum;
+	if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
+		memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
+	else
+	{
+		memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
+		memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
+			sizeof(struct fileIdentDesc) + fibh->soffset);
+	}
+
+	if (adinicb)
+		mark_inode_dirty(inode);
+	else
+	{
+		if (fibh->sbh != fibh->ebh)
+			mark_buffer_dirty_inode(fibh->ebh, inode);
+		mark_buffer_dirty_inode(fibh->sbh, inode);
+	}
+	return 0;
+}
+
+static struct fileIdentDesc *
+udf_find_entry(struct inode *dir, struct dentry *dentry,
+	struct udf_fileident_bh *fibh,
+	struct fileIdentDesc *cfi)
+{
+	struct fileIdentDesc *fi=NULL;
+	loff_t f_pos;
+	int block, flen;
+	char fname[UDF_NAME_LEN];
+	char *nameptr;
+	uint8_t lfi;
+	uint16_t liu;
+	loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
+	kernel_lb_addr bloc, eloc;
+	uint32_t extoffset, elen, offset;
+	struct buffer_head *bh = NULL;
+
+	if (!dir)
+		return NULL;
+
+	f_pos = (udf_ext0_offset(dir) >> 2);
+
+	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh->sbh = fibh->ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
+	{
+		offset >>= dir->i_sb->s_blocksize_bits;
+		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
+		{
+			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
+				extoffset -= sizeof(short_ad);
+			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
+				extoffset -= sizeof(long_ad);
+		}
+		else
+			offset = 0;
+
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			return NULL;
+		}
+	}
+	else
+	{
+		udf_release_data(bh);
+		return NULL;
+	}
+
+	while ( (f_pos < size) )
+	{
+		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+
+		if (!fi)
+		{
+			if (fibh->sbh != fibh->ebh)
+				udf_release_data(fibh->ebh);
+			udf_release_data(fibh->sbh);
+			udf_release_data(bh);
+			return NULL;
+		}
+
+		liu = le16_to_cpu(cfi->lengthOfImpUse);
+		lfi = cfi->lengthFileIdent;
+
+		if (fibh->sbh == fibh->ebh)
+		{
+			nameptr = fi->fileIdent + liu;
+		}
+		else
+		{
+			int poffset;	/* Unpaded ending offset */
+
+			poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+
+			if (poffset >= lfi)
+				nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi);
+			else
+			{
+				nameptr = fname;
+				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
+				memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
+			}
+		}
+
+		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
+		{
+			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
+				continue;
+		}
+	    
+		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
+		{
+			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+				continue;
+		}
+
+		if (!lfi)
+			continue;
+
+		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)))
+		{
+			if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
+			{
+				udf_release_data(bh);
+				return fi;
+			}
+		}
+	}
+	if (fibh->sbh != fibh->ebh)
+		udf_release_data(fibh->ebh);
+	udf_release_data(fibh->sbh);
+	udf_release_data(bh);
+	return NULL;
+}
+
+/*
+ * udf_lookup
+ *
+ * PURPOSE
+ *	Look-up the inode for a given name.
+ *
+ * DESCRIPTION
+ *	Required - lookup_dentry() will return -ENOTDIR if this routine is not
+ *	available for a directory. The filesystem is useless if this routine is
+ *	not available for at least the filesystem's root directory.
+ *
+ *	This routine is passed an incomplete dentry - it must be completed by
+ *	calling d_add(dentry, inode). If the name does not exist, then the
+ *	specified inode must be set to null. An error should only be returned
+ *	when the lookup fails for a reason other than the name not existing.
+ *	Note that the directory inode semaphore is held during the call.
+ *
+ *	Refer to lookup_dentry() in fs/namei.c
+ *	lookup_dentry() -> lookup() -> real_lookup() -> .
+ *
+ * PRE-CONDITIONS
+ *	dir			Pointer to inode of parent directory.
+ *	dentry			Pointer to dentry to complete.
+ *	nd			Pointer to lookup nameidata
+ *
+ * POST-CONDITIONS
+ *	<return>		Zero on success.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+
+static struct dentry *
+udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = NULL;
+	struct fileIdentDesc cfi, *fi;
+	struct udf_fileident_bh fibh;
+
+	if (dentry->d_name.len > UDF_NAME_LEN-2)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	lock_kernel();
+#ifdef UDF_RECOVERY
+	/* temporary shorthand for specifying files by inode number */
+	if (!strncmp(dentry->d_name.name, ".B=", 3) )
+	{
+		kernel_lb_addr lb = { 0, simple_strtoul(dentry->d_name.name+3, NULL, 0) };
+		inode = udf_iget(dir->i_sb, lb);
+		if (!inode)
+		{
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	else
+#endif /* UDF_RECOVERY */
+
+	if ((fi = udf_find_entry(dir, dentry, &fibh, &cfi)))
+	{
+		if (fibh.sbh != fibh.ebh)
+			udf_release_data(fibh.ebh);
+		udf_release_data(fibh.sbh);
+
+		inode = udf_iget(dir->i_sb, lelb_to_cpu(cfi.icb.extLocation));
+		if ( !inode )
+		{
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	d_add(dentry, inode);
+	return NULL;
+}
+
+static struct fileIdentDesc *
+udf_add_entry(struct inode *dir, struct dentry *dentry,
+	struct udf_fileident_bh *fibh,
+	struct fileIdentDesc *cfi, int *err)
+{
+	struct super_block *sb;
+	struct fileIdentDesc *fi=NULL;
+	char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
+	int namelen;
+	loff_t f_pos;
+	int flen;
+	char *nameptr;
+	loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
+	int nfidlen;
+	uint8_t lfi;
+	uint16_t liu;
+	int block;
+	kernel_lb_addr bloc, eloc;
+	uint32_t extoffset, elen, offset;
+	struct buffer_head *bh = NULL;
+
+	sb = dir->i_sb;
+
+	if (dentry)
+	{
+		if (!dentry->d_name.len)
+		{
+			*err = -EINVAL;
+			return NULL;
+		}
+
+		if ( !(namelen = udf_put_filename(sb, dentry->d_name.name, name, dentry->d_name.len)))
+		{
+			*err = -ENAMETOOLONG;
+			return NULL;
+		}
+	}
+	else
+		namelen = 0;
+
+	nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+
+	f_pos = (udf_ext0_offset(dir) >> 2);
+
+	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh->sbh = fibh->ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
+	{
+		offset >>= dir->i_sb->s_blocksize_bits;
+		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
+		{
+			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
+				extoffset -= sizeof(short_ad);
+			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
+				extoffset -= sizeof(long_ad);
+		}
+		else
+			offset = 0;
+
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			*err = -EIO;
+			return NULL;
+		}
+
+		block = UDF_I_LOCATION(dir).logicalBlockNum;
+
+	}
+	else
+	{
+		block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
+		fibh->sbh = fibh->ebh = NULL;
+		fibh->soffset = fibh->eoffset = sb->s_blocksize;
+		goto add;
+	}
+
+	while ( (f_pos < size) )
+	{
+		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+
+		if (!fi)
+		{
+			if (fibh->sbh != fibh->ebh)
+				udf_release_data(fibh->ebh);
+			udf_release_data(fibh->sbh);
+			udf_release_data(bh);
+			*err = -EIO;
+			return NULL;
+		}
+
+		liu = le16_to_cpu(cfi->lengthOfImpUse);
+		lfi = cfi->lengthFileIdent;
+
+		if (fibh->sbh == fibh->ebh)
+			nameptr = fi->fileIdent + liu;
+		else
+		{
+			int poffset;	/* Unpaded ending offset */
+
+			poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+
+			if (poffset >= lfi)
+				nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
+			else
+			{
+				nameptr = fname;
+				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
+				memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
+			}
+		}
+
+		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
+		{
+			if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
+			{
+				udf_release_data(bh);
+				cfi->descTag.tagSerialNum = cpu_to_le16(1);
+				cfi->fileVersionNum = cpu_to_le16(1);
+				cfi->fileCharacteristics = 0;
+				cfi->lengthFileIdent = namelen;
+				cfi->lengthOfImpUse = cpu_to_le16(0);
+				if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
+					return fi;
+				else
+				{
+					*err = -EIO;
+					return NULL;
+				}
+			}
+		}
+
+		if (!lfi || !dentry)
+			continue;
+
+		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
+			udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
+		{
+			if (fibh->sbh != fibh->ebh)
+				udf_release_data(fibh->ebh);
+			udf_release_data(fibh->sbh);
+			udf_release_data(bh);
+			*err = -EEXIST;
+			return NULL;
+		}
+	}
+
+add:
+	f_pos += nfidlen;
+
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
+		sb->s_blocksize - fibh->eoffset < nfidlen)
+	{
+		udf_release_data(bh);
+		bh = NULL;
+		fibh->soffset -= udf_ext0_offset(dir);
+		fibh->eoffset -= udf_ext0_offset(dir);
+		f_pos -= (udf_ext0_offset(dir) >> 2);
+		if (fibh->sbh != fibh->ebh)
+			udf_release_data(fibh->ebh);
+		udf_release_data(fibh->sbh);
+		if (!(fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err)))
+			return NULL;
+		bloc = UDF_I_LOCATION(dir);
+		eloc.logicalBlockNum = block;
+		eloc.partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
+		elen = dir->i_sb->s_blocksize;
+		extoffset = udf_file_entry_alloc_offset(dir);
+		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
+			extoffset += sizeof(short_ad);
+		else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
+			extoffset += sizeof(long_ad);
+	}
+
+	if (sb->s_blocksize - fibh->eoffset >= nfidlen)
+	{
+		fibh->soffset = fibh->eoffset;
+		fibh->eoffset += nfidlen;
+		if (fibh->sbh != fibh->ebh)
+		{
+			udf_release_data(fibh->sbh);
+			fibh->sbh = fibh->ebh;
+		}
+
+		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		{
+			block = UDF_I_LOCATION(dir).logicalBlockNum;
+			fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir) + UDF_I_LENEATTR(dir));
+		}
+		else
+		{
+			block = eloc.logicalBlockNum + ((elen - 1) >>
+				dir->i_sb->s_blocksize_bits);
+			fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
+		}
+	}
+	else
+	{
+		fibh->soffset = fibh->eoffset - sb->s_blocksize;
+		fibh->eoffset += nfidlen - sb->s_blocksize;
+		if (fibh->sbh != fibh->ebh)
+		{
+			udf_release_data(fibh->sbh);
+			fibh->sbh = fibh->ebh;
+		}
+
+		block = eloc.logicalBlockNum + ((elen - 1) >>
+			dir->i_sb->s_blocksize_bits);
+
+		if (!(fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err)))
+		{
+			udf_release_data(bh);
+			udf_release_data(fibh->sbh);
+			return NULL;
+		}
+
+		if (!(fibh->soffset))
+		{
+			if (udf_next_aext(dir, &bloc, &extoffset, &eloc, &elen, &bh, 1) ==
+				(EXT_RECORDED_ALLOCATED >> 30))
+			{
+				block = eloc.logicalBlockNum + ((elen - 1) >>
+					dir->i_sb->s_blocksize_bits);
+			}
+			else
+				block ++;
+
+			udf_release_data(fibh->sbh);
+			fibh->sbh = fibh->ebh;
+			fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
+		}
+		else
+		{
+			fi = (struct fileIdentDesc *)
+				(fibh->sbh->b_data + sb->s_blocksize + fibh->soffset);
+		}
+	}
+
+	memset(cfi, 0, sizeof(struct fileIdentDesc));
+	if (UDF_SB_UDFREV(sb) >= 0x0200)
+		udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag));
+	else
+		udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag));
+	cfi->fileVersionNum = cpu_to_le16(1);
+	cfi->lengthFileIdent = namelen;
+	cfi->lengthOfImpUse = cpu_to_le16(0);
+	if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
+	{
+		udf_release_data(bh);
+		dir->i_size += nfidlen;
+		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+			UDF_I_LENALLOC(dir) += nfidlen;
+		mark_inode_dirty(dir);
+		return fi;
+	}
+	else
+	{
+		udf_release_data(bh);
+		if (fibh->sbh != fibh->ebh)
+			udf_release_data(fibh->ebh);
+		udf_release_data(fibh->sbh);
+		*err = -EIO;
+		return NULL;
+	}
+}
+
+static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
+	struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi)
+{
+	cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
+		memset(&(cfi->icb), 0x00, sizeof(long_ad));
+	return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
+}
+
+static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+	struct udf_fileident_bh fibh;
+	struct inode *inode;
+	struct fileIdentDesc cfi, *fi;
+	int err;
+
+	lock_kernel();
+	inode = udf_new_inode(dir, mode, &err);
+	if (!inode)
+	{
+		unlock_kernel();
+		return err;
+	}
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+		inode->i_data.a_ops = &udf_adinicb_aops;
+	else
+		inode->i_data.a_ops = &udf_aops;
+	inode->i_op = &udf_file_inode_operations;
+	inode->i_fop = &udf_file_operations;
+	inode->i_mode = mode;
+	mark_inode_dirty(inode);
+
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
+	{
+		inode->i_nlink --;
+		mark_inode_dirty(inode);
+		iput(inode);
+		unlock_kernel();
+		return err;
+	}
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
+	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		mark_inode_dirty(dir);
+	}
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	unlock_kernel();
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+{
+	struct inode * inode;
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc cfi, *fi;
+	int err;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+
+	lock_kernel();
+	err = -EIO;
+	inode = udf_new_inode(dir, mode, &err);
+	if (!inode)
+		goto out;
+
+	inode->i_uid = current->fsuid;
+	init_special_inode(inode, mode, rdev);
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
+	{
+		inode->i_nlink --;
+		mark_inode_dirty(inode);
+		iput(inode);
+		unlock_kernel();
+		return err;
+	}
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
+	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		mark_inode_dirty(dir);
+	}
+	mark_inode_dirty(inode);
+
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	d_instantiate(dentry, inode);
+	err = 0;
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	struct inode * inode;
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc cfi, *fi;
+	int err;
+
+	lock_kernel();
+	err = -EMLINK;
+	if (dir->i_nlink >= (256<<sizeof(dir->i_nlink))-1)
+		goto out;
+
+	err = -EIO;
+	inode = udf_new_inode(dir, S_IFDIR, &err);
+	if (!inode)
+		goto out;
+
+	inode->i_op = &udf_dir_inode_operations;
+	inode->i_fop = &udf_dir_operations;
+	if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err)))
+	{
+		inode->i_nlink--;
+		mark_inode_dirty(inode);
+		iput(inode);
+		goto out;
+	}
+	inode->i_nlink = 2;
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir));
+	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL);
+	cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
+	udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL);
+	udf_release_data(fibh.sbh);
+	inode->i_mode = S_IFDIR | mode;
+	if (dir->i_mode & S_ISGID)
+		inode->i_mode |= S_ISGID;
+	mark_inode_dirty(inode);
+
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
+	{
+		inode->i_nlink = 0;
+		mark_inode_dirty(inode);
+		iput(inode);
+		goto out;
+	}
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
+	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
+	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
+	dir->i_nlink++;
+	mark_inode_dirty(dir);
+	d_instantiate(dentry, inode);
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	err = 0;
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int empty_dir(struct inode *dir)
+{
+	struct fileIdentDesc *fi, cfi;
+	struct udf_fileident_bh fibh;
+	loff_t f_pos;
+	loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
+	int block;
+	kernel_lb_addr bloc, eloc;
+	uint32_t extoffset, elen, offset;
+	struct buffer_head *bh = NULL;
+
+	f_pos = (udf_ext0_offset(dir) >> 2);
+
+	fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh.sbh = fibh.ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
+	{
+		offset >>= dir->i_sb->s_blocksize_bits;
+		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
+		{
+			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
+				extoffset -= sizeof(short_ad);
+			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
+				extoffset -= sizeof(long_ad);
+		}
+		else
+			offset = 0;
+
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			return 0;
+		}
+	}
+	else
+	{
+		udf_release_data(bh);
+		return 0;
+	}
+
+
+	while ( (f_pos < size) )
+	{
+		fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+
+		if (!fi)
+		{
+			if (fibh.sbh != fibh.ebh)
+				udf_release_data(fibh.ebh);
+			udf_release_data(fibh.sbh);
+			udf_release_data(bh);
+			return 0;
+		}
+
+		if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
+		{
+			if (fibh.sbh != fibh.ebh)
+				udf_release_data(fibh.ebh);
+			udf_release_data(fibh.sbh);
+			udf_release_data(bh);
+			return 0;
+		}
+	}
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	udf_release_data(bh);
+	return 1;
+}
+
+static int udf_rmdir(struct inode * dir, struct dentry * dentry)
+{
+	int retval;
+	struct inode * inode = dentry->d_inode;
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc *fi, cfi;
+	kernel_lb_addr tloc;
+
+	retval = -ENOENT;
+	lock_kernel();
+	fi = udf_find_entry(dir, dentry, &fibh, &cfi);
+	if (!fi)
+		goto out;
+
+	retval = -EIO;
+	tloc = lelb_to_cpu(cfi.icb.extLocation);
+	if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
+		goto end_rmdir;
+	retval = -ENOTEMPTY;
+	if (!empty_dir(inode))
+		goto end_rmdir;
+	retval = udf_delete_entry(dir, fi, &fibh, &cfi);
+	if (retval)
+		goto end_rmdir;
+	if (inode->i_nlink != 2)
+		udf_warning(inode->i_sb, "udf_rmdir",
+			"empty directory has nlink != 2 (%d)",
+			inode->i_nlink);
+	inode->i_nlink = 0;
+	inode->i_size = 0;
+	mark_inode_dirty(inode);
+	dir->i_nlink --;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	mark_inode_dirty(dir);
+
+end_rmdir:
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+out:
+	unlock_kernel();
+	return retval;
+}
+
+static int udf_unlink(struct inode * dir, struct dentry * dentry)
+{
+	int retval;
+	struct inode * inode = dentry->d_inode;
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc *fi;
+	struct fileIdentDesc cfi;
+	kernel_lb_addr tloc;
+
+	retval = -ENOENT;
+	lock_kernel();
+	fi = udf_find_entry(dir, dentry, &fibh, &cfi);
+	if (!fi)
+		goto out;
+
+	retval = -EIO;
+	tloc = lelb_to_cpu(cfi.icb.extLocation);
+	if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
+		goto end_unlink;
+
+	if (!inode->i_nlink)
+	{
+		udf_debug("Deleting nonexistent file (%lu), %d\n",
+			inode->i_ino, inode->i_nlink);
+		inode->i_nlink = 1;
+	}
+	retval = udf_delete_entry(dir, fi, &fibh, &cfi);
+	if (retval)
+		goto end_unlink;
+	dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	mark_inode_dirty(dir);
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+	inode->i_ctime = dir->i_ctime;
+	retval = 0;
+
+end_unlink:
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+out:
+	unlock_kernel();
+	return retval;
+}
+
+static int udf_symlink(struct inode * dir, struct dentry * dentry, const char * symname)
+{
+	struct inode * inode;
+	struct pathComponent *pc;
+	char *compstart;
+	struct udf_fileident_bh fibh;
+	struct buffer_head *bh = NULL;
+	int eoffset, elen = 0;
+	struct fileIdentDesc *fi;
+	struct fileIdentDesc cfi;
+	char *ea;
+	int err;
+	int block;
+	char name[UDF_NAME_LEN];
+	int namelen;
+
+	lock_kernel();
+	if (!(inode = udf_new_inode(dir, S_IFLNK, &err)))
+		goto out;
+
+	inode->i_mode = S_IFLNK | S_IRWXUGO;
+	inode->i_data.a_ops = &udf_symlink_aops;
+	inode->i_op = &page_symlink_inode_operations;
+
+	if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB)
+	{
+		struct buffer_head *bh = NULL;
+		kernel_lb_addr bloc, eloc;
+		uint32_t elen, extoffset;
+
+		block = udf_new_block(inode->i_sb, inode,
+			UDF_I_LOCATION(inode).partitionReferenceNum,
+			UDF_I_LOCATION(inode).logicalBlockNum, &err);
+		if (!block)
+			goto out_no_entry;
+		bloc = UDF_I_LOCATION(inode);
+		eloc.logicalBlockNum = block;
+		eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+		elen = inode->i_sb->s_blocksize;
+		UDF_I_LENEXTENTS(inode) = elen;
+		extoffset = udf_file_entry_alloc_offset(inode);
+		udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 0);
+		udf_release_data(bh);
+
+		block = udf_get_pblock(inode->i_sb, block,
+			UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+		bh = udf_tread(inode->i_sb, block);
+		lock_buffer(bh);
+		memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
+		mark_buffer_dirty_inode(bh, inode);
+		ea = bh->b_data + udf_ext0_offset(inode);
+	}
+	else
+		ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
+
+	eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
+	pc = (struct pathComponent *)ea;
+
+	if (*symname == '/')
+	{
+		do
+		{
+			symname++;
+		} while (*symname == '/');
+
+		pc->componentType = 1;
+		pc->lengthComponentIdent = 0;
+		pc->componentFileVersionNum = 0;
+		pc += sizeof(struct pathComponent);
+		elen += sizeof(struct pathComponent);
+	}
+
+	err = -ENAMETOOLONG;
+
+	while (*symname)
+	{
+		if (elen + sizeof(struct pathComponent) > eoffset)
+			goto out_no_entry;
+
+		pc = (struct pathComponent *)(ea + elen);
+
+		compstart = (char *)symname;
+
+		do
+		{
+			symname++;
+		} while (*symname && *symname != '/');
+
+		pc->componentType = 5;
+		pc->lengthComponentIdent = 0;
+		pc->componentFileVersionNum = 0;
+		if (compstart[0] == '.')
+		{
+			if ((symname-compstart) == 1)
+				pc->componentType = 4;
+			else if ((symname-compstart) == 2 && compstart[1] == '.')
+				pc->componentType = 3;
+		}
+
+		if (pc->componentType == 5)
+		{
+			if ( !(namelen = udf_put_filename(inode->i_sb, compstart, name, symname-compstart)))
+				goto out_no_entry;
+
+			if (elen + sizeof(struct pathComponent) + namelen > eoffset)
+				goto out_no_entry;
+			else
+				pc->lengthComponentIdent = namelen;
+
+			memcpy(pc->componentIdent, name, namelen);
+		}
+
+		elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
+
+		if (*symname)
+		{
+			do
+			{
+				symname++;
+			} while (*symname == '/');
+		}
+	}
+
+	udf_release_data(bh);
+	inode->i_size = elen;
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+		UDF_I_LENALLOC(inode) = inode->i_size;
+	mark_inode_dirty(inode);
+
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
+		goto out_no_entry;
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
+	if (UDF_SB_LVIDBH(inode->i_sb))
+	{
+		struct logicalVolHeaderDesc *lvhd;
+		uint64_t uniqueID;
+		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
+		uniqueID = le64_to_cpu(lvhd->uniqueID);
+		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
+		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
+			uniqueID += 16;
+		lvhd->uniqueID = cpu_to_le64(uniqueID);
+		mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
+	}
+	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		mark_inode_dirty(dir);
+	}
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	d_instantiate(dentry, inode);
+	err = 0;
+
+out:
+	unlock_kernel();
+	return err;
+
+out_no_entry:
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+	iput(inode);
+	goto out;
+}
+
+static int udf_link(struct dentry * old_dentry, struct inode * dir,
+	 struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	struct udf_fileident_bh fibh;
+	struct fileIdentDesc cfi, *fi;
+	int err;
+
+	lock_kernel();
+	if (inode->i_nlink >= (256<<sizeof(inode->i_nlink))-1)
+	{
+		unlock_kernel();
+		return -EMLINK;
+	}
+
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
+	{
+		unlock_kernel();
+		return err;
+	}
+	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
+	if (UDF_SB_LVIDBH(inode->i_sb))
+	{
+		struct logicalVolHeaderDesc *lvhd;
+		uint64_t uniqueID;
+		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
+		uniqueID = le64_to_cpu(lvhd->uniqueID);
+		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
+		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
+			uniqueID += 16;
+		lvhd->uniqueID = cpu_to_le64(uniqueID);
+		mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
+	}
+	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		mark_inode_dirty(dir);
+	}
+	if (fibh.sbh != fibh.ebh)
+		udf_release_data(fibh.ebh);
+	udf_release_data(fibh.sbh);
+	inode->i_nlink ++;
+	inode->i_ctime = current_fs_time(inode->i_sb);
+	mark_inode_dirty(inode);
+	atomic_inc(&inode->i_count);
+	d_instantiate(dentry, inode);
+	unlock_kernel();
+	return 0;
+}
+
+/* Anybody can rename anything with this: the permission checks are left to the
+ * higher-level routines.
+ */
+static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
+	struct inode * new_dir, struct dentry * new_dentry)
+{
+	struct inode * old_inode = old_dentry->d_inode;
+	struct inode * new_inode = new_dentry->d_inode;
+	struct udf_fileident_bh ofibh, nfibh;
+	struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi;
+	struct buffer_head *dir_bh = NULL;
+	int retval = -ENOENT;
+	kernel_lb_addr tloc;
+
+	lock_kernel();
+	if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi)))
+	{
+		if (ofibh.sbh != ofibh.ebh)
+			udf_release_data(ofibh.ebh);
+		udf_release_data(ofibh.sbh);
+	}
+	tloc = lelb_to_cpu(ocfi.icb.extLocation);
+	if (!ofi || udf_get_lb_pblock(old_dir->i_sb, tloc, 0)
+					!= old_inode->i_ino)
+		goto end_rename;
+
+	nfi = udf_find_entry(new_dir, new_dentry, &nfibh, &ncfi);
+	if (nfi)
+	{
+		if (!new_inode)
+		{
+			if (nfibh.sbh != nfibh.ebh)
+				udf_release_data(nfibh.ebh);
+			udf_release_data(nfibh.sbh);
+			nfi = NULL;
+		}
+	}
+	if (S_ISDIR(old_inode->i_mode))
+	{
+		uint32_t offset = udf_ext0_offset(old_inode);
+
+		if (new_inode)
+		{
+			retval = -ENOTEMPTY;
+			if (!empty_dir(new_inode))
+				goto end_rename;
+		}
+		retval = -EIO;
+		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
+		{
+			dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) -
+				(UDF_I_EFE(old_inode) ?
+					sizeof(struct extendedFileEntry) :
+					sizeof(struct fileEntry)),
+				old_inode->i_sb->s_blocksize, &offset);
+		}
+		else
+		{
+			dir_bh = udf_bread(old_inode, 0, 0, &retval);
+			if (!dir_bh)
+				goto end_rename;
+			dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset);
+		}
+		if (!dir_fi)
+			goto end_rename;
+		tloc = lelb_to_cpu(dir_fi->icb.extLocation);
+		if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0)
+					!= old_dir->i_ino)
+			goto end_rename;
+
+		retval = -EMLINK;
+		if (!new_inode && new_dir->i_nlink >= (256<<sizeof(new_dir->i_nlink))-1)
+			goto end_rename;
+	}
+	if (!nfi)
+	{
+		nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
+		if (!nfi)
+			goto end_rename;
+	}
+
+	/*
+	 * Like most other Unix systems, set the ctime for inodes on a
+	 * rename.
+	 */
+	old_inode->i_ctime = current_fs_time(old_inode->i_sb);
+	mark_inode_dirty(old_inode);
+
+	/*
+	 * ok, that's it
+	 */
+	ncfi.fileVersionNum = ocfi.fileVersionNum;
+	ncfi.fileCharacteristics = ocfi.fileCharacteristics;
+	memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(long_ad));
+	udf_write_fi(new_dir, &ncfi, nfi, &nfibh, NULL, NULL);
+
+	/* The old fid may have moved - find it again */
+	ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi);
+	udf_delete_entry(old_dir, ofi, &ofibh, &ocfi);
+
+	if (new_inode)
+	{
+		new_inode->i_nlink--;
+		new_inode->i_ctime = current_fs_time(new_inode->i_sb);
+		mark_inode_dirty(new_inode);
+	}
+	old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
+	mark_inode_dirty(old_dir);
+
+	if (dir_fi)
+	{
+		dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir));
+		udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) +
+			le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
+		{
+			mark_inode_dirty(old_inode);
+		}
+		else
+			mark_buffer_dirty_inode(dir_bh, old_inode);
+		old_dir->i_nlink --;
+		mark_inode_dirty(old_dir);
+		if (new_inode)
+		{
+			new_inode->i_nlink --;
+			mark_inode_dirty(new_inode);
+		}
+		else
+		{
+			new_dir->i_nlink ++;
+			mark_inode_dirty(new_dir);
+		}
+	}
+
+	if (ofi)
+	{
+		if (ofibh.sbh != ofibh.ebh)
+			udf_release_data(ofibh.ebh);
+		udf_release_data(ofibh.sbh);
+	}
+
+	retval = 0;
+
+end_rename:
+	udf_release_data(dir_bh);
+	if (nfi)
+	{
+		if (nfibh.sbh != nfibh.ebh)
+			udf_release_data(nfibh.ebh);
+		udf_release_data(nfibh.sbh);
+	}
+	unlock_kernel();
+	return retval;
+}
+
+struct inode_operations udf_dir_inode_operations = {
+	.lookup				= udf_lookup,
+	.create				= udf_create,
+	.link				= udf_link,
+	.unlink				= udf_unlink,
+	.symlink			= udf_symlink,
+	.mkdir				= udf_mkdir,
+	.rmdir				= udf_rmdir,
+	.mknod				= udf_mknod,
+	.rename				= udf_rename,
+};
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
new file mode 100644
index 0000000..e82aae6
--- /dev/null
+++ b/fs/udf/osta_udf.h
@@ -0,0 +1,296 @@
+/*
+ * osta_udf.h
+ *
+ * This file is based on OSTA UDF(tm) 2.50 (April 30, 2003)
+ * http://www.osta.org
+ *
+ * Copyright (c) 2001-2004  Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "ecma_167.h"
+
+#ifndef _OSTA_UDF_H
+#define _OSTA_UDF_H 1
+
+/* OSTA CS0 Charspec (UDF 2.50 2.1.2) */
+#define UDF_CHAR_SET_TYPE		0
+#define UDF_CHAR_SET_INFO		"OSTA Compressed Unicode"
+
+/* Entity Identifier (UDF 2.50 2.1.5) */
+/* Identifiers (UDF 2.50 2.1.5.2) */
+#define UDF_ID_DEVELOPER		"*Linux UDFFS"
+#define	UDF_ID_COMPLIANT		"*OSTA UDF Compliant"
+#define UDF_ID_LV_INFO			"*UDF LV Info"
+#define UDF_ID_FREE_EA			"*UDF FreeEASpace"
+#define UDF_ID_FREE_APP_EA		"*UDF FreeAppEASpace"
+#define UDF_ID_DVD_CGMS			"*UDF DVD CGMS Info"
+#define UDF_ID_OS2_EA			"*UDF OS/2 EA"
+#define UDF_ID_OS2_EA_LENGTH		"*UDF OS/2 EALength"
+#define UDF_ID_MAC_VOLUME		"*UDF Mac VolumeInfo"
+#define UDF_ID_MAC_FINDER		"*UDF Mac FinderInfo"
+#define UDF_ID_MAC_UNIQUE		"*UDF Mac UniqueIDTable"
+#define UDF_ID_MAC_RESOURCE		"*UDF Mac ResourceFork"
+#define UDF_ID_VIRTUAL			"*UDF Virtual Partition"
+#define UDF_ID_SPARABLE			"*UDF Sparable Partition"
+#define UDF_ID_ALLOC			"*UDF Virtual Alloc Tbl"
+#define UDF_ID_SPARING			"*UDF Sparing Table"
+#define UDF_ID_METADATA			"*UDF Metadata Partition"
+
+/* Identifier Suffix (UDF 2.50 2.1.5.3) */
+#define IS_DF_HARD_WRITE_PROTECT	0x01
+#define IS_DF_SOFT_WRITE_PROTECT	0x02
+
+struct UDFIdentSuffix
+{
+	__le16		UDFRevision;
+	uint8_t		OSClass;
+	uint8_t		OSIdentifier;
+	uint8_t		reserved[4];
+} __attribute__ ((packed));
+
+struct impIdentSuffix
+{
+	uint8_t		OSClass;
+	uint8_t		OSIdentifier;
+	uint8_t		reserved[6];
+} __attribute__ ((packed));
+
+struct appIdentSuffix
+{
+	uint8_t		impUse[8];
+} __attribute__ ((packed));
+
+/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
+/* Implementation Use (UDF 2.50 2.2.6.4) */
+struct logicalVolIntegrityDescImpUse
+{
+	regid		impIdent;
+	__le32		numFiles;
+	__le32		numDirs;
+	__le16		minUDFReadRev;
+	__le16		minUDFWriteRev;
+	__le16		maxUDFWriteRev;
+	uint8_t		impUse[0];
+} __attribute__ ((packed));
+
+/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
+/* Implementation Use (UDF 2.50 2.2.7.2) */
+struct impUseVolDescImpUse
+{
+	charspec	LVICharset;
+	dstring		logicalVolIdent[128];
+	dstring		LVInfo1[36];
+	dstring		LVInfo2[36];
+	dstring		LVInfo3[36];
+	regid		impIdent;
+	uint8_t		impUse[128];
+} __attribute__ ((packed));
+
+struct udfPartitionMap2
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	uint8_t		reserved1[2];
+	regid		partIdent;
+	__le16		volSeqNum;
+	__le16		partitionNum;
+} __attribute__ ((packed));
+
+/* Virtual Partition Map (UDF 2.50 2.2.8) */
+struct virtualPartitionMap
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	uint8_t		reserved1[2];
+	regid		partIdent;
+	__le16		volSeqNum;
+	__le16		partitionNum;
+	uint8_t		reserved2[24];
+} __attribute__ ((packed));
+
+/* Sparable Partition Map (UDF 2.50 2.2.9) */
+struct sparablePartitionMap
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	uint8_t		reserved1[2];
+	regid		partIdent;
+	__le16		volSeqNum;
+	__le16		partitionNum;
+	__le16		packetLength;
+	uint8_t		numSparingTables;
+	uint8_t		reserved2[1];
+	__le32		sizeSparingTable;
+	__le32		locSparingTable[4];
+} __attribute__ ((packed));
+
+/* Metadata Partition Map (UDF 2.4.0 2.2.10) */
+struct metadataPartitionMap
+{
+	uint8_t		partitionMapType;
+	uint8_t		partitionMapLength;
+	uint8_t		reserved1[2];
+	regid		partIdent;
+	__le16		volSeqNum;
+	__le16		partitionNum;
+	__le32		metadataFileLoc;
+	__le32		metadataMirrorFileLoc;
+	__le32		metadataBitmapFileLoc;
+	__le32		allocUnitSize;
+	__le16		alignUnitSize;
+	uint8_t		flags;
+	uint8_t		reserved2[5];
+} __attribute__ ((packed));
+
+/* Virtual Allocation Table (UDF 1.5 2.2.10) */
+struct virtualAllocationTable15
+{
+	__le32		VirtualSector[0];
+	regid		vatIdent;
+	__le32		previousVATICBLoc;
+} __attribute__ ((packed));  
+
+#define ICBTAG_FILE_TYPE_VAT15		0x00U
+
+/* Virtual Allocation Table (UDF 2.50 2.2.11) */
+struct virtualAllocationTable20
+{
+	__le16		lengthHeader;
+	__le16		lengthImpUse;
+	dstring		logicalVolIdent[128];
+	__le32		previousVATICBLoc;
+	__le32		numFiles;
+	__le32		numDirs;
+	__le16		minReadRevision;
+	__le16		minWriteRevision;
+	__le16		maxWriteRevision;
+	__le16		reserved;
+	uint8_t		impUse[0];
+	__le32		vatEntry[0];
+} __attribute__ ((packed));
+
+#define ICBTAG_FILE_TYPE_VAT20		0xF8U
+
+/* Sparing Table (UDF 2.50 2.2.12) */
+struct sparingEntry
+{
+	__le32		origLocation;
+	__le32		mappedLocation;
+} __attribute__ ((packed));
+
+struct sparingTable
+{
+	tag 		descTag;
+	regid		sparingIdent;
+	__le16		reallocationTableLen;
+	__le16		reserved;
+	__le32		sequenceNum;
+	struct sparingEntry
+			mapEntry[0];
+} __attribute__ ((packed));
+
+/* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */
+#define ICBTAG_FILE_TYPE_MAIN		0xFA
+#define ICBTAG_FILE_TYPE_MIRROR		0xFB
+#define ICBTAG_FILE_TYPE_BITMAP		0xFC
+
+/* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
+struct allocDescImpUse
+{
+	__le16		flags;
+	uint8_t		impUse[4];
+} __attribute__ ((packed));
+
+#define AD_IU_EXT_ERASED		0x0001
+
+/* Real-Time Files (UDF 2.50 6.11) */
+#define ICBTAG_FILE_TYPE_REALTIME	0xF9U
+
+/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
+/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
+struct freeEaSpace
+{
+	__le16		headerChecksum;
+	uint8_t		freeEASpace[0];
+} __attribute__ ((packed));
+
+/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
+struct DVDCopyrightImpUse 
+{
+	__le16		headerChecksum;
+	uint8_t		CGMSInfo;
+	uint8_t		dataType;
+	uint8_t		protectionSystemInfo[4];
+} __attribute__ ((packed));
+
+/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
+/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
+struct freeAppEASpace
+{
+	__le16		headerChecksum;
+	uint8_t		freeEASpace[0];
+} __attribute__ ((packed));
+
+/* UDF Defined System Stream (UDF 2.50 3.3.7) */
+#define UDF_ID_UNIQUE_ID		"*UDF Unique ID Mapping Data"
+#define UDF_ID_NON_ALLOC		"*UDF Non-Allocatable Space"
+#define UDF_ID_POWER_CAL		"*UDF Power Cal Table"
+#define UDF_ID_BACKUP			"*UDF Backup"
+
+/* Operating System Identifiers (UDF 2.50 6.3) */
+#define UDF_OS_CLASS_UNDEF		0x00U
+#define UDF_OS_CLASS_DOS		0x01U
+#define UDF_OS_CLASS_OS2		0x02U
+#define UDF_OS_CLASS_MAC		0x03U
+#define UDF_OS_CLASS_UNIX		0x04U
+#define UDF_OS_CLASS_WIN9X		0x05U
+#define UDF_OS_CLASS_WINNT		0x06U
+#define UDF_OS_CLASS_OS400		0x07U
+#define UDF_OS_CLASS_BEOS		0x08U
+#define UDF_OS_CLASS_WINCE		0x09U
+
+#define UDF_OS_ID_UNDEF			0x00U
+#define UDF_OS_ID_DOS			0x00U
+#define UDF_OS_ID_OS2			0x00U
+#define UDF_OS_ID_MAC			0x00U
+#define UDF_OS_ID_MAX_OSX		0x01U
+#define UDF_OS_ID_UNIX			0x00U
+#define UDF_OS_ID_AIX			0x01U
+#define UDF_OS_ID_SOLARIS		0x02U
+#define UDF_OS_ID_HPUX			0x03U
+#define UDF_OS_ID_IRIX			0x04U
+#define UDF_OS_ID_LINUX			0x05U
+#define UDF_OS_ID_MKLINUX		0x06U
+#define UDF_OS_ID_FREEBSD		0x07U
+#define UDF_OS_ID_WIN9X			0x00U
+#define UDF_OS_ID_WINNT			0x00U
+#define UDF_OS_ID_OS400			0x00U
+#define UDF_OS_ID_BEOS			0x00U
+#define UDF_OS_ID_WINCE			0x00U
+
+#endif /* _OSTA_UDF_H */
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
new file mode 100644
index 0000000..4d36f26
--- /dev/null
+++ b/fs/udf/partition.c
@@ -0,0 +1,226 @@
+/*
+ * partition.c
+ *
+ * PURPOSE
+ *      Partition handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *      E-mail regarding any portion of the Linux UDF file system should be
+ *      directed to the development team mailing list (run by majordomo):
+ *              linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *      This file is distributed under the terms of the GNU General Public
+ *      License (GPL). Copies of the GPL can be obtained from:
+ *              ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *      Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-2001 Ben Fennema
+ *
+ * HISTORY
+ *
+ * 12/06/98 blf  Created file. 
+ *
+ */
+
+#include "udfdecl.h"
+#include "udf_sb.h"
+#include "udf_i.h"
+
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/udf_fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+{
+	if (partition >= UDF_SB_NUMPARTS(sb))
+	{
+		udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
+			block, partition, offset);
+		return 0xFFFFFFFF;
+	}
+	if (UDF_SB_PARTFUNC(sb, partition))
+		return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
+	else
+		return UDF_SB_PARTROOT(sb, partition) + block + offset;
+}
+
+uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+{
+	struct buffer_head *bh = NULL;
+	uint32_t newblock;
+	uint32_t index;
+	uint32_t loc;
+
+	index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
+
+	if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries)
+	{
+		udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
+			block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
+		return 0xFFFFFFFF;
+	}
+
+	if (block >= index)
+	{
+		block -= index;
+		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
+		index = block % (sb->s_blocksize / sizeof(uint32_t));
+	}
+	else
+	{
+		newblock = 0;
+		index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
+	}
+
+	loc = udf_block_map(UDF_SB_VAT(sb), newblock);
+
+	if (!(bh = sb_bread(sb, loc)))
+	{
+		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
+			sb, block, partition, loc, index);
+		return 0xFFFFFFFF;
+	}
+
+	loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
+
+	udf_release_data(bh);
+
+	if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition)
+	{
+		udf_debug("recursive call to udf_get_pblock!\n");
+		return 0xFFFFFFFF;
+	}
+
+	return udf_get_pblock(sb, loc, UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum, offset);
+}
+
+inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+{
+	return udf_get_pblock_virt15(sb, block, partition, offset);
+}
+
+uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+{
+	int i;
+	struct sparingTable *st = NULL;
+	uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
+
+	for (i=0; i<4; i++)
+	{
+		if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL)
+		{
+			st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
+			break;
+		}
+	}
+
+	if (st)
+	{
+		for (i=0; i<le16_to_cpu(st->reallocationTableLen); i++)
+		{
+			if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0)
+				break;
+			else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet)
+			{
+				return le32_to_cpu(st->mapEntry[i].mappedLocation) +
+					((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
+			}
+			else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet)
+				break;
+		}
+	}
+	return UDF_SB_PARTROOT(sb,partition) + block + offset;
+}
+
+int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
+{
+	struct udf_sparing_data *sdata;
+	struct sparingTable *st = NULL;
+	struct sparingEntry mapEntry;
+	uint32_t packet;
+	int i, j, k, l;
+
+	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
+	{
+		if (old_block > UDF_SB_PARTROOT(sb,i) &&
+		    old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i))
+		{
+			sdata = &UDF_SB_TYPESPAR(sb,i);
+			packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
+
+			for (j=0; j<4; j++)
+			{
+				if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
+				{
+					st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
+					break;
+				}
+			}
+
+			if (!st)
+				return 1;
+
+			for (k=0; k<le16_to_cpu(st->reallocationTableLen); k++)
+			{
+				if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF)
+				{
+					for (; j<4; j++)
+					{
+						if (sdata->s_spar_map[j])
+						{
+							st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
+							st->mapEntry[k].origLocation = cpu_to_le32(packet);
+							udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
+							mark_buffer_dirty(sdata->s_spar_map[j]);
+						}
+					}
+					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
+						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+					return 0;
+				}
+				else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet)
+				{
+					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
+						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+					return 0;
+				}
+				else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet)
+					break;
+			}
+			for (l=k; l<le16_to_cpu(st->reallocationTableLen); l++)
+			{
+				if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF)
+				{
+					for (; j<4; j++)
+					{
+						if (sdata->s_spar_map[j])
+						{
+							st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
+							mapEntry = st->mapEntry[l];
+							mapEntry.origLocation = cpu_to_le32(packet);
+							memmove(&st->mapEntry[k+1], &st->mapEntry[k], (l-k)*sizeof(struct sparingEntry));
+							st->mapEntry[k] = mapEntry;
+							udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
+							mark_buffer_dirty(sdata->s_spar_map[j]);
+						}
+					}
+					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
+						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+					return 0;
+				}
+			}
+			return 1;
+		}
+	}
+	if (i == UDF_SB_NUMPARTS(sb))
+	{
+		/* outside of partitions */
+		/* for now, fail =) */
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/fs/udf/super.c b/fs/udf/super.c
new file mode 100644
index 0000000..15bd4f2
--- /dev/null
+++ b/fs/udf/super.c
@@ -0,0 +1,1934 @@
+/*
+ * super.c
+ *
+ * PURPOSE
+ *  Super block routines for the OSTA-UDF(tm) filesystem.
+ *
+ * DESCRIPTION
+ *  OSTA-UDF(tm) = Optical Storage Technology Association
+ *  Universal Disk Format.
+ *
+ *  This code is based on version 2.00 of the UDF specification,
+ *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
+ *    http://www.osta.org/
+ *    http://www.ecma.ch/
+ *    http://www.iso.org/
+ *
+ * CONTACTS
+ *  E-mail regarding any portion of the Linux UDF file system should be
+ *  directed to the development team mailing list (run by majordomo):
+ *	  linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *  This file is distributed under the terms of the GNU General Public
+ *  License (GPL). Copies of the GPL can be obtained from:
+ *    ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *  Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998 Dave Boynton
+ *  (C) 1998-2004 Ben Fennema
+ *  (C) 2000 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  09/24/98 dgb  changed to allow compiling outside of kernel, and
+ *                added some debugging.
+ *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
+ *  10/16/98      attempting some multi-session support
+ *  10/17/98      added freespace count for "df"
+ *  11/11/98 gr   added novrs option
+ *  11/26/98 dgb  added fileset,anchor mount options
+ *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced vol descs
+ *                rewrote option handling based on isofs
+ *  12/20/98      find the free space bitmap (if it exists)
+ */
+
+#include "udfdecl.h"    
+
+#include <linux/config.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/stat.h>
+#include <linux/cdrom.h>
+#include <linux/nls.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include <linux/udf_fs.h>
+#include "udf_sb.h"
+#include "udf_i.h"
+
+#include <linux/init.h>
+#include <asm/uaccess.h>
+
+#define VDS_POS_PRIMARY_VOL_DESC	0
+#define VDS_POS_UNALLOC_SPACE_DESC	1
+#define VDS_POS_LOGICAL_VOL_DESC	2
+#define VDS_POS_PARTITION_DESC		3
+#define VDS_POS_IMP_USE_VOL_DESC	4
+#define VDS_POS_VOL_DESC_PTR		5
+#define VDS_POS_TERMINATING_DESC	6
+#define VDS_POS_LENGTH			7
+
+static char error_buf[1024];
+
+/* These are the "meat" - everything else is stuffing */
+static int udf_fill_super(struct super_block *, void *, int);
+static void udf_put_super(struct super_block *);
+static void udf_write_super(struct super_block *);
+static int udf_remount_fs(struct super_block *, int *, char *);
+static int udf_check_valid(struct super_block *, int, int);
+static int udf_vrs(struct super_block *sb, int silent);
+static int udf_load_partition(struct super_block *, kernel_lb_addr *);
+static int udf_load_logicalvol(struct super_block *, struct buffer_head *, kernel_lb_addr *);
+static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
+static void udf_find_anchor(struct super_block *);
+static int udf_find_fileset(struct super_block *, kernel_lb_addr *, kernel_lb_addr *);
+static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
+static void udf_load_fileset(struct super_block *, struct buffer_head *, kernel_lb_addr *);
+static void udf_load_partdesc(struct super_block *, struct buffer_head *);
+static void udf_open_lvid(struct super_block *);
+static void udf_close_lvid(struct super_block *);
+static unsigned int udf_count_free(struct super_block *);
+static int udf_statfs(struct super_block *, struct kstatfs *);
+
+/* UDF filesystem type */
+static struct super_block *udf_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super);
+}
+
+static struct file_system_type udf_fstype = {
+	.owner		= THIS_MODULE,
+	.name		= "udf",
+	.get_sb		= udf_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static kmem_cache_t * udf_inode_cachep;
+
+static struct inode *udf_alloc_inode(struct super_block *sb)
+{
+	struct udf_inode_info *ei;
+	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void udf_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct udf_inode_info *ei = (struct udf_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+	{
+		ei->i_ext.i_data = NULL;
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static int init_inodecache(void)
+{
+	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
+					     sizeof(struct udf_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (udf_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(udf_inode_cachep))
+		printk(KERN_INFO "udf_inode_cache: not all structures were freed\n");
+}
+
+/* Superblock operations */
+static struct super_operations udf_sb_ops = {
+	.alloc_inode		= udf_alloc_inode,
+	.destroy_inode		= udf_destroy_inode,
+	.write_inode		= udf_write_inode,
+	.delete_inode		= udf_delete_inode,
+	.clear_inode		= udf_clear_inode,
+	.put_super		= udf_put_super,
+	.write_super		= udf_write_super,
+	.statfs			= udf_statfs,
+	.remount_fs		= udf_remount_fs,
+};
+
+struct udf_options
+{
+	unsigned char novrs;
+	unsigned int blocksize;
+	unsigned int session;
+	unsigned int lastblock;
+	unsigned int anchor;
+	unsigned int volume;
+	unsigned short partition;
+	unsigned int fileset;
+	unsigned int rootdir;
+	unsigned int flags;
+	mode_t umask;
+	gid_t gid;
+	uid_t uid;
+	struct nls_table *nls_map;
+};
+
+static int __init init_udf_fs(void)
+{
+	int err;
+	err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&udf_fstype);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_udf_fs(void)
+{
+	unregister_filesystem(&udf_fstype);
+	destroy_inodecache();
+}
+
+module_init(init_udf_fs)
+module_exit(exit_udf_fs)
+
+/*
+ * udf_parse_options
+ *
+ * PURPOSE
+ *	Parse mount options.
+ *
+ * DESCRIPTION
+ *	The following mount options are supported:
+ *
+ *	gid=		Set the default group.
+ *	umask=		Set the default umask.
+ *	uid=		Set the default user.
+ *	bs=		Set the block size.
+ *	unhide		Show otherwise hidden files.
+ *	undelete	Show deleted files in lists.
+ *	adinicb		Embed data in the inode (default)
+ *	noadinicb	Don't embed data in the inode
+ *	shortad		Use short ad's
+ *	longad		Use long ad's (default)
+ *	nostrict	Unset strict conformance
+ *	iocharset=	Set the NLS character set
+ *
+ *	The remaining are for debugging and disaster recovery:
+ *
+ *	novrs		Skip volume sequence recognition 
+ *
+ *	The following expect a offset from 0.
+ *
+ *	session=	Set the CDROM session (default= last session)
+ *	anchor=		Override standard anchor location. (default= 256)
+ *	volume=		Override the VolumeDesc location. (unused)
+ *	partition=	Override the PartitionDesc location. (unused)
+ *	lastblock=	Set the last block of the filesystem/
+ *
+ *	The following expect a offset from the partition root.
+ *
+ *	fileset=	Override the fileset block location. (unused)
+ *	rootdir=	Override the root directory location. (unused)
+ *		WARNING: overriding the rootdir to a non-directory may
+ *		yield highly unpredictable results.
+ *
+ * PRE-CONDITIONS
+ *	options		Pointer to mount options string.
+ *	uopts		Pointer to mount options variable.
+ *
+ * POST-CONDITIONS
+ *	<return>	1	Mount options parsed okay.
+ *	<return>	0	Error parsing mount options.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+
+enum {
+	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
+	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
+	Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
+	Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
+	Opt_rootdir, Opt_utf8, Opt_iocharset,
+	Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_novrs, "novrs"},
+	{Opt_nostrict, "nostrict"},
+	{Opt_bs, "bs=%u"},
+	{Opt_unhide, "unhide"},
+	{Opt_undelete, "undelete"},
+	{Opt_noadinicb, "noadinicb"},
+	{Opt_adinicb, "adinicb"},
+	{Opt_shortad, "shortad"},
+	{Opt_longad, "longad"},
+	{Opt_gid, "gid=%u"},
+	{Opt_uid, "uid=%u"},
+	{Opt_umask, "umask=%o"},
+	{Opt_session, "session=%u"},
+	{Opt_lastblock, "lastblock=%u"},
+	{Opt_anchor, "anchor=%u"},
+	{Opt_volume, "volume=%u"},
+	{Opt_partition, "partition=%u"},
+	{Opt_fileset, "fileset=%u"},
+	{Opt_rootdir, "rootdir=%u"},
+	{Opt_utf8, "utf8"},
+	{Opt_iocharset, "iocharset=%s"},
+	{Opt_err, NULL}
+};
+
+static int
+udf_parse_options(char *options, struct udf_options *uopt)
+{
+	char *p;
+	int option;
+
+	uopt->novrs = 0;
+	uopt->blocksize = 2048;
+	uopt->partition = 0xFFFF;
+	uopt->session = 0xFFFFFFFF;
+	uopt->lastblock = 0;
+	uopt->anchor = 0;
+	uopt->volume = 0xFFFFFFFF;
+	uopt->rootdir = 0xFFFFFFFF;
+	uopt->fileset = 0xFFFFFFFF;
+	uopt->nls_map = NULL;
+
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL)
+	{
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token)
+		{
+			case Opt_novrs:
+				uopt->novrs = 1;
+			case Opt_bs:
+				if (match_int(&args[0], &option))
+					return 0;
+				uopt->blocksize = option;
+				break;
+			case Opt_unhide:
+				uopt->flags |= (1 << UDF_FLAG_UNHIDE);
+				break;
+			case Opt_undelete:
+				uopt->flags |= (1 << UDF_FLAG_UNDELETE);
+				break;
+			case Opt_noadinicb:
+				uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
+				break;
+			case Opt_adinicb:
+				uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
+				break;
+			case Opt_shortad:
+				uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
+				break;
+			case Opt_longad:
+				uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
+				break;
+			case Opt_gid:
+				if (match_int(args, &option))
+					return 0;
+				uopt->gid = option;
+				break;
+			case Opt_uid:
+				if (match_int(args, &option))
+					return 0;
+				uopt->uid = option;
+				break;
+			case Opt_umask:
+				if (match_octal(args, &option))
+					return 0;
+				uopt->umask = option;
+				break;
+			case Opt_nostrict:
+				uopt->flags &= ~(1 << UDF_FLAG_STRICT);
+				break;
+			case Opt_session:
+				if (match_int(args, &option))
+					return 0;
+				uopt->session = option;
+				break;
+			case Opt_lastblock:
+				if (match_int(args, &option))
+					return 0;
+				uopt->lastblock = option;
+				break;
+			case Opt_anchor:
+				if (match_int(args, &option))
+					return 0;
+				uopt->anchor = option;
+				break;
+			case Opt_volume:
+				if (match_int(args, &option))
+					return 0;
+				uopt->volume = option;
+				break;
+			case Opt_partition:
+				if (match_int(args, &option))
+					return 0;
+				uopt->partition = option;
+				break;
+			case Opt_fileset:
+				if (match_int(args, &option))
+					return 0;
+				uopt->fileset = option;
+				break;
+			case Opt_rootdir:
+				if (match_int(args, &option))
+					return 0;
+				uopt->rootdir = option;
+				break;
+			case Opt_utf8:
+				uopt->flags |= (1 << UDF_FLAG_UTF8);
+				break;
+#ifdef CONFIG_UDF_NLS
+			case Opt_iocharset:
+				uopt->nls_map = load_nls(args[0].from);
+				uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+				break;
+#endif
+			default:
+				printk(KERN_ERR "udf: bad mount option \"%s\" "
+						"or missing value\n", p);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+void
+udf_write_super(struct super_block *sb)
+{
+	lock_kernel();
+	if (!(sb->s_flags & MS_RDONLY))
+		udf_open_lvid(sb);
+	sb->s_dirt = 0;
+	unlock_kernel();
+}
+
+static int
+udf_remount_fs(struct super_block *sb, int *flags, char *options)
+{
+	struct udf_options uopt;
+
+	uopt.flags = UDF_SB(sb)->s_flags ;
+	uopt.uid   = UDF_SB(sb)->s_uid ;
+	uopt.gid   = UDF_SB(sb)->s_gid ;
+	uopt.umask = UDF_SB(sb)->s_umask ;
+
+	if ( !udf_parse_options(options, &uopt) )
+		return -EINVAL;
+
+	UDF_SB(sb)->s_flags = uopt.flags;
+	UDF_SB(sb)->s_uid   = uopt.uid;
+	UDF_SB(sb)->s_gid   = uopt.gid;
+	UDF_SB(sb)->s_umask = uopt.umask;
+
+	if (UDF_SB_LVIDBH(sb)) {
+		int write_rev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
+		if (write_rev > UDF_MAX_WRITE_VERSION)
+			*flags |= MS_RDONLY;
+	}
+
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+	if (*flags & MS_RDONLY)
+		udf_close_lvid(sb);
+	else
+		udf_open_lvid(sb);
+
+	return 0;
+}
+
+/*
+ * udf_set_blocksize
+ *
+ * PURPOSE
+ *	Set the block size to be used in all transfers.
+ *
+ * DESCRIPTION
+ *	To allow room for a DMA transfer, it is best to guess big when unsure.
+ *	This routine picks 2048 bytes as the blocksize when guessing. This
+ *	should be adequate until devices with larger block sizes become common.
+ *
+ *	Note that the Linux kernel can currently only deal with blocksizes of
+ *	512, 1024, 2048, 4096, and 8192 bytes.
+ *
+ * PRE-CONDITIONS
+ *	sb			Pointer to _locked_ superblock.
+ *
+ * POST-CONDITIONS
+ *	sb->s_blocksize		Blocksize.
+ *	sb->s_blocksize_bits	log2 of blocksize.
+ *	<return>	0	Blocksize is valid.
+ *	<return>	1	Blocksize is invalid.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static  int
+udf_set_blocksize(struct super_block *sb, int bsize)
+{
+	if (!sb_min_blocksize(sb, bsize)) {
+		udf_debug("Bad block size (%d)\n", bsize);
+		printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
+		return 0;
+	}
+	return sb->s_blocksize;
+}
+
+static int
+udf_vrs(struct super_block *sb, int silent)
+{
+	struct volStructDesc *vsd = NULL;
+	int sector = 32768;
+	int sectorsize;
+	struct buffer_head *bh = NULL;
+	int iso9660=0;
+	int nsr02=0;
+	int nsr03=0;
+
+	/* Block size must be a multiple of 512 */
+	if (sb->s_blocksize & 511)
+		return 0;
+
+	if (sb->s_blocksize < sizeof(struct volStructDesc))
+		sectorsize = sizeof(struct volStructDesc);
+	else
+		sectorsize = sb->s_blocksize;
+
+	sector += (UDF_SB_SESSION(sb) << sb->s_blocksize_bits);
+
+	udf_debug("Starting at sector %u (%ld byte sectors)\n",
+		(sector >> sb->s_blocksize_bits), sb->s_blocksize);
+	/* Process the sequence (if applicable) */
+	for (;!nsr02 && !nsr03; sector += sectorsize)
+	{
+		/* Read a block */
+		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
+		if (!bh)
+			break;
+
+		/* Look for ISO  descriptors */
+		vsd = (struct volStructDesc *)(bh->b_data +
+			(sector & (sb->s_blocksize - 1)));
+
+		if (vsd->stdIdent[0] == 0)
+		{
+			udf_release_data(bh);
+			break;
+		}
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN))
+		{
+			iso9660 = sector;
+			switch (vsd->structType)
+			{
+				case 0: 
+					udf_debug("ISO9660 Boot Record found\n");
+					break;
+				case 1: 
+					udf_debug("ISO9660 Primary Volume Descriptor found\n");
+					break;
+				case 2: 
+					udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
+					break;
+				case 3: 
+					udf_debug("ISO9660 Volume Partition Descriptor found\n");
+					break;
+				case 255: 
+					udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
+					break;
+				default: 
+					udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
+					break;
+			}
+		}
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
+		{
+		}
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN))
+		{
+			udf_release_data(bh);
+			break;
+		}
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
+		{
+			nsr02 = sector;
+		}
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
+		{
+			nsr03 = sector;
+		}
+		udf_release_data(bh);
+	}
+
+	if (nsr03)
+		return nsr03;
+	else if (nsr02)
+		return nsr02;
+	else if (sector - (UDF_SB_SESSION(sb) << sb->s_blocksize_bits) == 32768)
+		return -1;
+	else
+		return 0;
+}
+
+/*
+ * udf_find_anchor
+ *
+ * PURPOSE
+ *	Find an anchor volume descriptor.
+ *
+ * PRE-CONDITIONS
+ *	sb			Pointer to _locked_ superblock.
+ *	lastblock		Last block on media.
+ *
+ * POST-CONDITIONS
+ *	<return>		1 if not found, 0 if ok
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static void
+udf_find_anchor(struct super_block *sb)
+{
+	int lastblock = UDF_SB_LASTBLOCK(sb);
+	struct buffer_head *bh = NULL;
+	uint16_t ident;
+	uint32_t location;
+	int i;
+
+	if (lastblock)
+	{
+		int varlastblock = udf_variable_to_fixed(lastblock);
+		int last[] =  { lastblock, lastblock - 2,
+				lastblock - 150, lastblock - 152,
+				varlastblock, varlastblock - 2,
+				varlastblock - 150, varlastblock - 152 };
+
+		lastblock = 0;
+
+		/* Search for an anchor volume descriptor pointer */
+
+		/*  according to spec, anchor is in either:
+		 *     block 256
+		 *     lastblock-256
+		 *     lastblock
+		 *  however, if the disc isn't closed, it could be 512 */
+
+		for (i=0; (!lastblock && i<sizeof(last)/sizeof(int)); i++)
+		{
+			if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
+			{
+				ident = location = 0;
+			}
+			else
+			{
+				ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
+				location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+				udf_release_data(bh);
+			}
+	
+			if (ident == TAG_IDENT_AVDP)
+			{
+				if (location == last[i] - UDF_SB_SESSION(sb))
+				{
+					lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb);
+					UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb);
+				}
+				else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb))
+				{
+					UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+					lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb);
+					UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb);
+				}
+				else
+					udf_debug("Anchor found at block %d, location mismatch %d.\n",
+						last[i], location);
+			}
+			else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE)
+			{
+				lastblock = last[i];
+				UDF_SB_ANCHOR(sb)[3] = 512;
+			}
+			else
+			{
+				if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
+				{
+					ident = location = 0;
+				}
+				else
+				{
+					ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
+					location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+					udf_release_data(bh);
+				}
+	
+				if (ident == TAG_IDENT_AVDP &&
+					location == last[i] - 256 - UDF_SB_SESSION(sb))
+				{
+					lastblock = last[i];
+					UDF_SB_ANCHOR(sb)[1] = last[i] - 256;
+				}
+				else
+				{
+					if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
+					{
+						ident = location = 0;
+					}
+					else
+					{
+						ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
+						location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+						udf_release_data(bh);
+					}
+	
+					if (ident == TAG_IDENT_AVDP &&
+						location == udf_variable_to_fixed(last[i]) - 256)
+					{
+						UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+						lastblock = udf_variable_to_fixed(last[i]);
+						UDF_SB_ANCHOR(sb)[1] = lastblock - 256;
+					}
+				}
+			}
+		}
+	}
+
+	if (!lastblock)
+	{
+		/* We havn't found the lastblock. check 312 */
+		if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
+		{
+			ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
+			location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+			udf_release_data(bh);
+
+			if (ident == TAG_IDENT_AVDP && location == 256)
+				UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+		}
+	}
+
+	for (i=0; i<sizeof(UDF_SB_ANCHOR(sb))/sizeof(int); i++)
+	{
+		if (UDF_SB_ANCHOR(sb)[i])
+		{
+			if (!(bh = udf_read_tagged(sb,
+				UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
+			{
+				UDF_SB_ANCHOR(sb)[i] = 0;
+			}
+			else
+			{
+				udf_release_data(bh);
+				if ((ident != TAG_IDENT_AVDP) && (i ||
+					(ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE)))
+				{
+					UDF_SB_ANCHOR(sb)[i] = 0;
+				}
+			}
+		}
+	}
+
+	UDF_SB_LASTBLOCK(sb) = lastblock;
+}
+
+static int 
+udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root)
+{
+	struct buffer_head *bh = NULL;
+	long lastblock;
+	uint16_t ident;
+
+	if (fileset->logicalBlockNum != 0xFFFFFFFF ||
+		fileset->partitionReferenceNum != 0xFFFF)
+	{
+		bh = udf_read_ptagged(sb, *fileset, 0, &ident);
+
+		if (!bh)
+			return 1;
+		else if (ident != TAG_IDENT_FSD)
+		{
+			udf_release_data(bh);
+			return 1;
+		}
+			
+	}
+
+	if (!bh) /* Search backwards through the partitions */
+	{
+		kernel_lb_addr newfileset;
+
+		return 1;
+		
+		for (newfileset.partitionReferenceNum=UDF_SB_NUMPARTS(sb)-1;
+			(newfileset.partitionReferenceNum != 0xFFFF &&
+				fileset->logicalBlockNum == 0xFFFFFFFF &&
+				fileset->partitionReferenceNum == 0xFFFF);
+			newfileset.partitionReferenceNum--)
+		{
+			lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum);
+			newfileset.logicalBlockNum = 0;
+
+			do
+			{
+				bh = udf_read_ptagged(sb, newfileset, 0, &ident);
+				if (!bh)
+				{
+					newfileset.logicalBlockNum ++;
+					continue;
+				}
+
+				switch (ident)
+				{
+					case TAG_IDENT_SBD:
+					{
+						struct spaceBitmapDesc *sp;
+						sp = (struct spaceBitmapDesc *)bh->b_data;
+						newfileset.logicalBlockNum += 1 +
+							((le32_to_cpu(sp->numOfBytes) + sizeof(struct spaceBitmapDesc) - 1)
+								>> sb->s_blocksize_bits);
+						udf_release_data(bh);
+						break;
+					}
+					case TAG_IDENT_FSD:
+					{
+						*fileset = newfileset;
+						break;
+					}
+					default:
+					{
+						newfileset.logicalBlockNum ++;
+						udf_release_data(bh);
+						bh = NULL;
+						break;
+					}
+				}
+			}
+			while (newfileset.logicalBlockNum < lastblock &&
+				fileset->logicalBlockNum == 0xFFFFFFFF &&
+				fileset->partitionReferenceNum == 0xFFFF);
+		}
+	}
+
+	if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
+		fileset->partitionReferenceNum != 0xFFFF) && bh)
+	{
+		udf_debug("Fileset at block=%d, partition=%d\n",
+			fileset->logicalBlockNum, fileset->partitionReferenceNum);
+
+		UDF_SB_PARTITION(sb) = fileset->partitionReferenceNum;
+		udf_load_fileset(sb, bh, root);
+		udf_release_data(bh);
+		return 0;
+	}
+	return 1;
+}
+
+static void 
+udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
+{
+	struct primaryVolDesc *pvoldesc;
+	time_t recording;
+	long recording_usec;
+	struct ustr instr;
+	struct ustr outstr;
+
+	pvoldesc = (struct primaryVolDesc *)bh->b_data;
+
+	if ( udf_stamp_to_time(&recording, &recording_usec,
+		lets_to_cpu(pvoldesc->recordingDateAndTime)) )
+	{
+		kernel_timestamp ts;
+		ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
+		udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n",
+			recording, recording_usec,
+			ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.typeAndTimezone);
+		UDF_SB_RECORDTIME(sb).tv_sec = recording;
+		UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000;
+	}
+
+	if ( !udf_build_ustr(&instr, pvoldesc->volIdent, 32) )
+	{
+		if (udf_CS0toUTF8(&outstr, &instr))
+		{
+			strncpy( UDF_SB_VOLIDENT(sb), outstr.u_name,
+				outstr.u_len > 31 ? 31 : outstr.u_len);
+			udf_debug("volIdent[] = '%s'\n", UDF_SB_VOLIDENT(sb));
+		}
+	}
+
+	if ( !udf_build_ustr(&instr, pvoldesc->volSetIdent, 128) )
+	{
+		if (udf_CS0toUTF8(&outstr, &instr))
+			udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
+	}
+}
+
+static void 
+udf_load_fileset(struct super_block *sb, struct buffer_head *bh, kernel_lb_addr *root)
+{
+	struct fileSetDesc *fset;
+
+	fset = (struct fileSetDesc *)bh->b_data;
+
+	*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
+
+	UDF_SB_SERIALNUM(sb) = le16_to_cpu(fset->descTag.tagSerialNum);
+
+	udf_debug("Rootdir at block=%d, partition=%d\n", 
+		root->logicalBlockNum, root->partitionReferenceNum);
+}
+
+static void 
+udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+{
+	struct partitionDesc *p;
+	int i;
+
+	p = (struct partitionDesc *)bh->b_data;
+
+	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
+	{
+		udf_debug("Searching map: (%d == %d)\n", 
+			UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber));
+		if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber))
+		{
+			UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */
+			UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation);
+			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY)
+				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_READ_ONLY;
+			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_WRITE_ONCE)
+				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_WRITE_ONCE;
+			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_REWRITABLE)
+				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_REWRITABLE;
+			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_OVERWRITABLE)
+				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE;
+
+			if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) ||
+				!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
+			{
+				struct partitionHeaderDesc *phd;
+
+				phd = (struct partitionHeaderDesc *)(p->partitionContentsUse);
+				if (phd->unallocSpaceTable.extLength)
+				{
+					kernel_lb_addr loc = { le32_to_cpu(phd->unallocSpaceTable.extPosition), i };
+
+					UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
+						udf_iget(sb, loc);
+					UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
+					udf_debug("unallocSpaceTable (part %d) @ %ld\n",
+						i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
+				}
+				if (phd->unallocSpaceBitmap.extLength)
+				{
+					UDF_SB_ALLOC_BITMAP(sb, i, s_uspace);
+					if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL)
+					{
+						UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength =
+							le32_to_cpu(phd->unallocSpaceBitmap.extLength);
+						UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition =
+							le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
+						UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP;
+						udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
+							i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition);
+					}
+				}
+				if (phd->partitionIntegrityTable.extLength)
+					udf_debug("partitionIntegrityTable (part %d)\n", i);
+				if (phd->freedSpaceTable.extLength)
+				{
+					kernel_lb_addr loc = { le32_to_cpu(phd->freedSpaceTable.extPosition), i };
+
+					UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
+						udf_iget(sb, loc);
+					UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
+					udf_debug("freedSpaceTable (part %d) @ %ld\n",
+						i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
+				}
+				if (phd->freedSpaceBitmap.extLength)
+				{
+					UDF_SB_ALLOC_BITMAP(sb, i, s_fspace);
+					if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL)
+					{
+						UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength =
+							le32_to_cpu(phd->freedSpaceBitmap.extLength);
+						UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition =
+							le32_to_cpu(phd->freedSpaceBitmap.extPosition);
+						UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP;
+						udf_debug("freedSpaceBitmap (part %d) @ %d\n",
+							i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition);
+					}
+				}
+			}
+			break;
+		}
+	}
+	if (i == UDF_SB_NUMPARTS(sb))
+	{
+		udf_debug("Partition (%d) not found in partition map\n", le16_to_cpu(p->partitionNumber));
+	}
+	else
+	{
+		udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n",
+			le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
+			UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
+	}
+}
+
+static int 
+udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_addr *fileset)
+{
+	struct logicalVolDesc *lvd;
+	int i, j, offset;
+	uint8_t type;
+
+	lvd = (struct logicalVolDesc *)bh->b_data;
+
+	UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps));
+
+	for (i=0,offset=0;
+		 i<UDF_SB_NUMPARTS(sb) && offset<le32_to_cpu(lvd->mapTableLength);
+		 i++,offset+=((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength)
+	{
+		type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType;
+		if (type == 1)
+		{
+			struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]);
+			UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15;
+			UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum);
+			UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum);
+			UDF_SB_PARTFUNC(sb,i) = NULL;
+		}
+		else if (type == 2)
+		{
+			struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]);
+			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL)))
+			{
+				if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150)
+				{
+					UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15;
+					UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15;
+				}
+				else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200)
+				{
+					UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20;
+					UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20;
+				}
+			}
+			else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE)))
+			{
+				uint32_t loc;
+				uint16_t ident;
+				struct sparingTable *st;
+				struct sparablePartitionMap *spm = (struct sparablePartitionMap *)&(lvd->partitionMaps[offset]);
+
+				UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15;
+				UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength);
+				for (j=0; j<spm->numSparingTables; j++)
+				{
+					loc = le32_to_cpu(spm->locSparingTable[j]);
+					UDF_SB_TYPESPAR(sb,i).s_spar_map[j] =
+						udf_read_tagged(sb, loc, loc, &ident);
+					if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
+					{
+						st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data;
+						if (ident != 0 ||
+							strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING)))
+						{
+							udf_release_data(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]);
+							UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL;
+						}
+					}
+				}
+				UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15;
+			}
+			else
+			{
+				udf_debug("Unknown ident: %s\n", upm2->partIdent.ident);
+				continue;
+			}
+			UDF_SB_PARTVSN(sb,i) = le16_to_cpu(upm2->volSeqNum);
+			UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum);
+		}
+		udf_debug("Partition (%d:%d) type %d on volume %d\n",
+			i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i));
+	}
+
+	if (fileset)
+	{
+		long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
+
+		*fileset = lelb_to_cpu(la->extLocation);
+		udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
+			fileset->logicalBlockNum,
+			fileset->partitionReferenceNum);
+	}
+	if (lvd->integritySeqExt.extLength)
+		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
+	return 0;
+}
+
+/*
+ * udf_load_logicalvolint
+ *
+ */
+static void
+udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
+{
+	struct buffer_head *bh = NULL;
+	uint16_t ident;
+
+	while (loc.extLength > 0 &&
+		(bh = udf_read_tagged(sb, loc.extLocation,
+			loc.extLocation, &ident)) &&
+		ident == TAG_IDENT_LVID)
+	{
+		UDF_SB_LVIDBH(sb) = bh;
+		
+		if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength)
+			udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt));
+		
+		if (UDF_SB_LVIDBH(sb) != bh)
+			udf_release_data(bh);
+		loc.extLength -= sb->s_blocksize;
+		loc.extLocation ++;
+	}
+	if (UDF_SB_LVIDBH(sb) != bh)
+		udf_release_data(bh);
+}
+
+/*
+ * udf_process_sequence
+ *
+ * PURPOSE
+ *	Process a main/reserve volume descriptor sequence.
+ *
+ * PRE-CONDITIONS
+ *	sb			Pointer to _locked_ superblock.
+ *	block			First block of first extent of the sequence.
+ *	lastblock		Lastblock of first extent of the sequence.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static  int
+udf_process_sequence(struct super_block *sb, long block, long lastblock, kernel_lb_addr *fileset)
+{
+	struct buffer_head *bh = NULL;
+	struct udf_vds_record vds[VDS_POS_LENGTH];
+	struct generic_desc *gd;
+	struct volDescPtr *vdp;
+	int done=0;
+	int i,j;
+	uint32_t vdsn;
+	uint16_t ident;
+	long next_s = 0, next_e = 0;
+
+	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
+
+	/* Read the main descriptor sequence */
+	for (;(!done && block <= lastblock); block++)
+	{
+
+		bh = udf_read_tagged(sb, block, block, &ident);
+		if (!bh) 
+			break;
+
+		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
+		gd = (struct generic_desc *)bh->b_data;
+		vdsn = le32_to_cpu(gd->volDescSeqNum);
+		switch (ident)
+		{
+			case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
+				if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum)
+				{
+					vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn;
+					vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
+				}
+				break;
+			case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
+				if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum)
+				{
+					vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
+					vds[VDS_POS_VOL_DESC_PTR].block = block;
+
+					vdp = (struct volDescPtr *)bh->b_data;
+					next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
+					next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength);
+					next_e = next_e >> sb->s_blocksize_bits;
+					next_e += next_s;
+				}
+				break;
+			case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
+				if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum)
+				{
+					vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn;
+					vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
+				}
+				break;
+			case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
+				if (!vds[VDS_POS_PARTITION_DESC].block)
+					vds[VDS_POS_PARTITION_DESC].block = block;
+				break;
+			case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
+				if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum)
+				{
+					vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn;
+					vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
+				}
+				break;
+			case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
+				if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum)
+				{
+					vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn;
+					vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
+				}
+				break;
+			case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
+				vds[VDS_POS_TERMINATING_DESC].block = block;
+				if (next_e)
+				{
+					block = next_s;
+					lastblock = next_e;
+					next_s = next_e = 0;
+				}
+				else
+					done = 1;
+				break;
+		}
+		udf_release_data(bh);
+	}
+	for (i=0; i<VDS_POS_LENGTH; i++)
+	{
+		if (vds[i].block)
+		{
+			bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident);
+
+			if (i == VDS_POS_PRIMARY_VOL_DESC)
+				udf_load_pvoldesc(sb, bh);
+			else if (i == VDS_POS_LOGICAL_VOL_DESC)
+				udf_load_logicalvol(sb, bh, fileset);
+			else if (i == VDS_POS_PARTITION_DESC)
+			{
+				struct buffer_head *bh2 = NULL;
+				udf_load_partdesc(sb, bh);
+				for (j=vds[i].block+1; j<vds[VDS_POS_TERMINATING_DESC].block; j++)
+				{
+					bh2 = udf_read_tagged(sb, j, j, &ident);
+					gd = (struct generic_desc *)bh2->b_data;
+					if (ident == TAG_IDENT_PD)
+						udf_load_partdesc(sb, bh2);
+					udf_release_data(bh2);
+				}
+			}
+			udf_release_data(bh);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * udf_check_valid()
+ */
+static int
+udf_check_valid(struct super_block *sb, int novrs, int silent)
+{
+	long block;
+
+	if (novrs)
+	{
+		udf_debug("Validity check skipped because of novrs option\n");
+		return 0;
+	}
+	/* Check that it is NSR02 compliant */
+	/* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
+	else if ((block = udf_vrs(sb, silent)) == -1)
+	{
+		udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+		if (!UDF_SB_LASTBLOCK(sb))
+			UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
+		return 0;
+	}
+	else 
+		return !block;
+}
+
+static int
+udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
+{
+	struct anchorVolDescPtr *anchor;
+	uint16_t ident;
+	struct buffer_head *bh;
+	long main_s, main_e, reserve_s, reserve_e;
+	int i, j;
+
+	if (!sb)
+		return 1;
+
+	for (i=0; i<sizeof(UDF_SB_ANCHOR(sb))/sizeof(int); i++)
+	{
+		if (UDF_SB_ANCHOR(sb)[i] && (bh = udf_read_tagged(sb,
+			UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
+		{
+			anchor = (struct anchorVolDescPtr *)bh->b_data;
+
+			/* Locate the main sequence */
+			main_s = le32_to_cpu( anchor->mainVolDescSeqExt.extLocation );
+			main_e = le32_to_cpu( anchor->mainVolDescSeqExt.extLength );
+			main_e = main_e >> sb->s_blocksize_bits;
+			main_e += main_s;
+	
+			/* Locate the reserve sequence */
+			reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
+			reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
+			reserve_e = reserve_e >> sb->s_blocksize_bits;
+			reserve_e += reserve_s;
+
+			udf_release_data(bh);
+
+			/* Process the main & reserve sequences */
+			/* responsible for finding the PartitionDesc(s) */
+			if (!(udf_process_sequence(sb, main_s, main_e, fileset) &&
+				udf_process_sequence(sb, reserve_s, reserve_e, fileset)))
+			{
+				break;
+			}
+		}
+	}
+
+	if (i == sizeof(UDF_SB_ANCHOR(sb))/sizeof(int))
+	{
+		udf_debug("No Anchor block found\n");
+		return 1;
+	}
+	else
+		udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]);
+
+	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
+	{
+		switch UDF_SB_PARTTYPE(sb, i)
+		{
+			case UDF_VIRTUAL_MAP15:
+			case UDF_VIRTUAL_MAP20:
+			{
+				kernel_lb_addr ino;
+
+				if (!UDF_SB_LASTBLOCK(sb))
+				{
+					UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
+					udf_find_anchor(sb);
+				}
+
+				if (!UDF_SB_LASTBLOCK(sb))
+				{
+					udf_debug("Unable to determine Lastblock (For Virtual Partition)\n");
+					return 1;
+				}
+
+				for (j=0; j<UDF_SB_NUMPARTS(sb); j++)
+				{
+					if (j != i &&
+						UDF_SB_PARTVSN(sb,i) == UDF_SB_PARTVSN(sb,j) &&
+						UDF_SB_PARTNUM(sb,i) == UDF_SB_PARTNUM(sb,j))
+					{
+						ino.partitionReferenceNum = j;
+						ino.logicalBlockNum = UDF_SB_LASTBLOCK(sb) -
+							UDF_SB_PARTROOT(sb,j);
+						break;
+					}
+				}
+
+				if (j == UDF_SB_NUMPARTS(sb))
+					return 1;
+
+				if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino)))
+					return 1;
+
+				if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP15)
+				{
+					UDF_SB_TYPEVIRT(sb,i).s_start_offset = udf_ext0_offset(UDF_SB_VAT(sb));
+					UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size - 36) >> 2;
+				}
+				else if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP20)
+				{
+					struct buffer_head *bh = NULL;
+					uint32_t pos;
+
+					pos = udf_block_map(UDF_SB_VAT(sb), 0);
+					bh = sb_bread(sb, pos);
+					UDF_SB_TYPEVIRT(sb,i).s_start_offset =
+						le16_to_cpu(((struct virtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
+							udf_ext0_offset(UDF_SB_VAT(sb));
+					UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size -
+						UDF_SB_TYPEVIRT(sb,i).s_start_offset) >> 2;
+					udf_release_data(bh);
+				}
+				UDF_SB_PARTROOT(sb,i) = udf_get_pblock(sb, 0, i, 0);
+				UDF_SB_PARTLEN(sb,i) = UDF_SB_PARTLEN(sb,ino.partitionReferenceNum);
+			}
+		}
+	}
+	return 0;
+}
+
+static void udf_open_lvid(struct super_block *sb)
+{
+	if (UDF_SB_LVIDBH(sb))
+	{
+		int i;
+		kernel_timestamp cpu_time;
+
+		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
+			UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
+		UDF_SB_LVID(sb)->integrityType = LVID_INTEGRITY_TYPE_OPEN;
+
+		UDF_SB_LVID(sb)->descTag.descCRC =
+			cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
+			le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+
+		UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
+		for (i=0; i<16; i++)
+			if (i != 4)
+				UDF_SB_LVID(sb)->descTag.tagChecksum +=
+					((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
+
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+}
+
+static void udf_close_lvid(struct super_block *sb)
+{
+	if (UDF_SB_LVIDBH(sb) &&
+		UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN)
+	{
+		int i;
+		kernel_timestamp cpu_time;
+
+		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
+			UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
+		if (UDF_MAX_WRITE_VERSION > le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev))
+			UDF_SB_LVIDIU(sb)->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
+		if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev))
+			UDF_SB_LVIDIU(sb)->minUDFReadRev = cpu_to_le16(UDF_SB_UDFREV(sb));
+		if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev))
+			UDF_SB_LVIDIU(sb)->minUDFWriteRev = cpu_to_le16(UDF_SB_UDFREV(sb));
+		UDF_SB_LVID(sb)->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
+
+		UDF_SB_LVID(sb)->descTag.descCRC =
+			cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
+			le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+
+		UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
+		for (i=0; i<16; i++)
+			if (i != 4)
+				UDF_SB_LVID(sb)->descTag.tagChecksum +=
+					((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
+
+		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
+	}
+}
+
+/*
+ * udf_read_super
+ *
+ * PURPOSE
+ *	Complete the specified super block.
+ *
+ * PRE-CONDITIONS
+ *	sb			Pointer to superblock to complete - never NULL.
+ *	sb->s_dev		Device to read suberblock from.
+ *	options			Pointer to mount options.
+ *	silent			Silent flag.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static int udf_fill_super(struct super_block *sb, void *options, int silent)
+{
+	int i;
+	struct inode *inode=NULL;
+	struct udf_options uopt;
+	kernel_lb_addr rootdir, fileset;
+	struct udf_sb_info *sbi;
+
+	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
+	uopt.uid = -1;
+	uopt.gid = -1;
+	uopt.umask = 0;
+
+	sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+	sb->s_fs_info = sbi;
+	memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
+
+	init_MUTEX(&sbi->s_alloc_sem);
+
+	if (!udf_parse_options((char *)options, &uopt))
+		goto error_out;
+
+	if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
+	    uopt.flags & (1 << UDF_FLAG_NLS_MAP))
+	{
+		udf_error(sb, "udf_read_super",
+			"utf8 cannot be combined with iocharset\n");
+		goto error_out;
+	}
+#ifdef CONFIG_UDF_NLS
+	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map)
+	{
+		uopt.nls_map = load_nls_default();
+		if (!uopt.nls_map)
+			uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
+		else
+			udf_debug("Using default NLS map\n");
+	}
+#endif
+	if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
+		uopt.flags |= (1 << UDF_FLAG_UTF8);
+
+	fileset.logicalBlockNum = 0xFFFFFFFF;
+	fileset.partitionReferenceNum = 0xFFFF;
+
+	UDF_SB(sb)->s_flags = uopt.flags;
+	UDF_SB(sb)->s_uid = uopt.uid;
+	UDF_SB(sb)->s_gid = uopt.gid;
+	UDF_SB(sb)->s_umask = uopt.umask;
+	UDF_SB(sb)->s_nls_map = uopt.nls_map;
+
+	/* Set the block size for all transfers */
+	if (!udf_set_blocksize(sb, uopt.blocksize))
+		goto error_out;
+
+	if ( uopt.session == 0xFFFFFFFF )
+		UDF_SB_SESSION(sb) = udf_get_last_session(sb);
+	else
+		UDF_SB_SESSION(sb) = uopt.session;
+
+	udf_debug("Multi-session=%d\n", UDF_SB_SESSION(sb));
+
+	UDF_SB_LASTBLOCK(sb) = uopt.lastblock;
+	UDF_SB_ANCHOR(sb)[0] = UDF_SB_ANCHOR(sb)[1] = 0;
+	UDF_SB_ANCHOR(sb)[2] = uopt.anchor;
+	UDF_SB_ANCHOR(sb)[3] = 256;
+
+	if (udf_check_valid(sb, uopt.novrs, silent)) /* read volume recognition sequences */
+	{
+		printk("UDF-fs: No VRS found\n");
+ 		goto error_out;
+	}
+
+	udf_find_anchor(sb);
+
+	/* Fill in the rest of the superblock */
+	sb->s_op = &udf_sb_ops;
+	sb->dq_op = NULL;
+	sb->s_dirt = 0;
+	sb->s_magic = UDF_SUPER_MAGIC;
+	sb->s_time_gran = 1000;
+
+	if (udf_load_partition(sb, &fileset))
+	{
+		printk("UDF-fs: No partition found (1)\n");
+		goto error_out;
+	}
+
+	udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb));
+
+	if ( UDF_SB_LVIDBH(sb) )
+	{
+		uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev);
+		uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
+		/* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */
+
+		if (minUDFReadRev > UDF_MAX_READ_VERSION)
+		{
+			printk("UDF-fs: minUDFReadRev=%x (max is %x)\n",
+				le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev),
+				UDF_MAX_READ_VERSION);
+			goto error_out;
+		}
+		else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
+		{
+			sb->s_flags |= MS_RDONLY;
+		}
+
+		UDF_SB_UDFREV(sb) = minUDFWriteRev;
+
+		if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
+			UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
+		if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
+			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
+	}
+
+	if ( !UDF_SB_NUMPARTS(sb) )
+	{
+		printk("UDF-fs: No partition found (2)\n");
+		goto error_out;
+	}
+
+	if ( udf_find_fileset(sb, &fileset, &rootdir) )
+	{
+		printk("UDF-fs: No fileset found\n");
+		goto error_out;
+	}
+
+	if (!silent)
+	{
+		kernel_timestamp ts;
+		udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb));
+		udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
+			UDFFS_VERSION, UDFFS_DATE,
+			UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
+			ts.typeAndTimezone);
+	}
+	if (!(sb->s_flags & MS_RDONLY))
+		udf_open_lvid(sb);
+
+	/* Assign the root inode */
+	/* assign inodes by physical block number */
+	/* perhaps it's not extensible enough, but for now ... */
+	inode = udf_iget(sb, rootdir); 
+	if (!inode)
+	{
+		printk("UDF-fs: Error in udf_iget, block=%d, partition=%d\n",
+			rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+		goto error_out;
+	}
+
+	/* Allocate a dentry for the root inode */
+	sb->s_root = d_alloc_root(inode);
+	if (!sb->s_root)
+	{
+		printk("UDF-fs: Couldn't allocate root dentry\n");
+		iput(inode);
+		goto error_out;
+	}
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	return 0;
+
+error_out:
+	if (UDF_SB_VAT(sb))
+		iput(UDF_SB_VAT(sb));
+	if (UDF_SB_NUMPARTS(sb))
+	{
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
+			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
+			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
+		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
+		{
+			for (i=0; i<4; i++)
+				udf_release_data(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
+		}
+	}
+#ifdef CONFIG_UDF_NLS
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+		unload_nls(UDF_SB(sb)->s_nls_map);
+#endif
+	if (!(sb->s_flags & MS_RDONLY))
+		udf_close_lvid(sb);
+	udf_release_data(UDF_SB_LVIDBH(sb));
+	UDF_SB_FREE(sb);
+	kfree(sbi);
+	sb->s_fs_info = NULL;
+	return -EINVAL;
+}
+
+void udf_error(struct super_block *sb, const char *function,
+	const char *fmt, ...)
+{
+	va_list args;
+
+	if (!(sb->s_flags & MS_RDONLY))
+	{
+		/* mark sb error */
+		sb->s_dirt = 1;
+	}
+	va_start(args, fmt);
+	vsprintf(error_buf, fmt, args);
+	va_end(args);
+	printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+void udf_warning(struct super_block *sb, const char *function,
+	const char *fmt, ...)
+{
+	va_list args;
+
+	va_start (args, fmt);
+	vsprintf(error_buf, fmt, args);
+	va_end(args);
+	printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+/*
+ * udf_put_super
+ *
+ * PURPOSE
+ *	Prepare for destruction of the superblock.
+ *
+ * DESCRIPTION
+ *	Called before the filesystem is unmounted.
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static void
+udf_put_super(struct super_block *sb)
+{
+	int i;
+
+	if (UDF_SB_VAT(sb))
+		iput(UDF_SB_VAT(sb));
+	if (UDF_SB_NUMPARTS(sb))
+	{
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
+			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
+			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
+		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
+		{
+			for (i=0; i<4; i++)
+				udf_release_data(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
+		}
+	}
+#ifdef CONFIG_UDF_NLS
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+		unload_nls(UDF_SB(sb)->s_nls_map);
+#endif
+	if (!(sb->s_flags & MS_RDONLY))
+		udf_close_lvid(sb);
+	udf_release_data(UDF_SB_LVIDBH(sb));
+	UDF_SB_FREE(sb);
+	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
+}
+
+/*
+ * udf_stat_fs
+ *
+ * PURPOSE
+ *	Return info about the filesystem.
+ *
+ * DESCRIPTION
+ *	Called by sys_statfs()
+ *
+ * HISTORY
+ *	July 1, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static int
+udf_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	buf->f_type = UDF_SUPER_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_blocks = UDF_SB_PARTLEN(sb, UDF_SB_PARTITION(sb));
+	buf->f_bfree = udf_count_free(sb);
+	buf->f_bavail = buf->f_bfree;
+	buf->f_files = (UDF_SB_LVIDBH(sb) ?
+		(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
+		le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree;
+	buf->f_ffree = buf->f_bfree;
+	/* __kernel_fsid_t f_fsid */
+	buf->f_namelen = UDF_NAME_LEN-2;
+
+	return 0;
+}
+
+static unsigned char udf_bitmap_lookup[16] = {
+	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
+};
+
+static unsigned int
+udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
+{
+	struct buffer_head *bh = NULL;
+	unsigned int accum = 0;
+	int index;
+	int block = 0, newblock;
+	kernel_lb_addr loc;
+	uint32_t bytes;
+	uint8_t value;
+	uint8_t *ptr;
+	uint16_t ident;
+	struct spaceBitmapDesc *bm;
+
+	lock_kernel();
+
+	loc.logicalBlockNum = bitmap->s_extPosition;
+	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
+	bh = udf_read_ptagged(sb, loc, 0, &ident);
+
+	if (!bh)
+	{
+		printk(KERN_ERR "udf: udf_count_free failed\n");
+		goto out;
+	}
+	else if (ident != TAG_IDENT_SBD)
+	{
+		udf_release_data(bh);
+		printk(KERN_ERR "udf: udf_count_free failed\n");
+		goto out;
+	}
+
+	bm = (struct spaceBitmapDesc *)bh->b_data;
+	bytes = le32_to_cpu(bm->numOfBytes);
+	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
+	ptr = (uint8_t *)bh->b_data;
+
+	while ( bytes > 0 )
+	{
+		while ((bytes > 0) && (index < sb->s_blocksize))
+		{
+			value = ptr[index];
+			accum += udf_bitmap_lookup[ value & 0x0f ];
+			accum += udf_bitmap_lookup[ value >> 4 ];
+			index++;
+			bytes--;
+		}
+		if ( bytes )
+		{
+			udf_release_data(bh);
+			newblock = udf_get_lb_pblock(sb, loc, ++block);
+			bh = udf_tread(sb, newblock);
+			if (!bh)
+			{
+				udf_debug("read failed\n");
+				goto out;
+			}
+			index = 0;
+			ptr = (uint8_t *)bh->b_data;
+		}
+	}
+	udf_release_data(bh);
+
+out:
+	unlock_kernel();
+
+	return accum;
+}
+
+static unsigned int
+udf_count_free_table(struct super_block *sb, struct inode * table)
+{
+	unsigned int accum = 0;
+	uint32_t extoffset, elen;
+	kernel_lb_addr bloc, eloc;
+	int8_t etype;
+	struct buffer_head *bh = NULL;
+
+	lock_kernel();
+
+	bloc = UDF_I_LOCATION(table);
+	extoffset = sizeof(struct unallocSpaceEntry);
+
+	while ((etype = udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
+	{
+		accum += (elen >> table->i_sb->s_blocksize_bits);
+	}
+	udf_release_data(bh);
+
+	unlock_kernel();
+
+	return accum;
+}
+	
+static unsigned int
+udf_count_free(struct super_block *sb)
+{
+	unsigned int accum = 0;
+
+	if (UDF_SB_LVIDBH(sb))
+	{
+		if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb))
+		{
+			accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]);
+
+			if (accum == 0xFFFFFFFF)
+				accum = 0;
+		}
+	}
+
+	if (accum)
+		return accum;
+
+	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
+	{
+		accum += udf_count_free_bitmap(sb,
+			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap);
+	}
+	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
+	{
+		accum += udf_count_free_bitmap(sb,
+			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap);
+	}
+	if (accum)
+		return accum;
+
+	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
+	{
+		accum += udf_count_free_table(sb,
+			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
+	}
+	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
+	{
+		accum += udf_count_free_table(sb,
+			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
+	}
+
+	return accum;
+}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
new file mode 100644
index 0000000..43f3051
--- /dev/null
+++ b/fs/udf/symlink.c
@@ -0,0 +1,123 @@
+/*
+ * symlink.c
+ *
+ * PURPOSE
+ *	Symlink handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1998-2001 Ben Fennema
+ *  (C) 1999 Stelias Computing Inc 
+ *
+ * HISTORY
+ *
+ *  04/16/99 blf  Created.
+ *
+ */
+
+#include "udfdecl.h"
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/udf_fs.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include "udf_i.h"
+
+static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char *to)
+{
+	struct pathComponent *pc;
+	int elen = 0;
+	char *p = to;
+
+	while (elen < fromlen)
+	{
+		pc = (struct pathComponent *)(from + elen);
+		switch (pc->componentType)
+		{
+			case 1:
+				if (pc->lengthComponentIdent == 0)
+				{
+					p = to;
+					*p++ = '/';
+				}
+				break;
+			case 3:
+				memcpy(p, "../", 3);
+				p += 3;
+				break;
+			case 4:
+				memcpy(p, "./", 2);
+				p += 2;
+				/* that would be . - just ignore */
+				break;
+			case 5:
+				p += udf_get_filename(sb, pc->componentIdent, p, pc->lengthComponentIdent);
+				*p++ = '/';
+				break;
+		}
+		elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
+	}
+	if (p > to+1)
+		p[-1] = '\0';
+	else
+		p[0] = '\0';
+}
+
+static int udf_symlink_filler(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct buffer_head *bh = NULL;
+	char *symlink;
+	int err = -EIO;
+	char *p = kmap(page);
+
+	lock_kernel();
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+		symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
+	else
+	{
+		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
+
+		if (!bh)
+			goto out;
+
+		symlink = bh->b_data;
+	}
+
+	udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+	udf_release_data(bh);
+
+	unlock_kernel();
+	SetPageUptodate(page);
+	kunmap(page);
+	unlock_page(page);
+	return 0;
+out:
+	unlock_kernel();
+	SetPageError(page);
+	kunmap(page);
+	unlock_page(page);
+	return err;
+}
+
+/*
+ * symlinks can't do much...
+ */
+struct address_space_operations udf_symlink_aops = {
+	.readpage		= udf_symlink_filler,
+};
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
new file mode 100644
index 0000000..7dc8a55
--- /dev/null
+++ b/fs/udf/truncate.c
@@ -0,0 +1,284 @@
+/*
+ * truncate.c
+ *
+ * PURPOSE
+ *	Truncate handling routines for the OSTA-UDF(tm) filesystem.
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ *
+ *  (C) 1999-2004 Ben Fennema
+ *  (C) 1999 Stelias Computing Inc
+ *
+ * HISTORY
+ *
+ *  02/24/99 blf  Created.
+ *
+ */
+
+#include "udfdecl.h"
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/udf_fs.h>
+#include <linux/buffer_head.h>
+
+#include "udf_i.h"
+#include "udf_sb.h"
+
+static void extent_trunc(struct inode * inode, kernel_lb_addr bloc, int extoffset,
+	kernel_lb_addr eloc, int8_t etype, uint32_t elen, struct buffer_head *bh, uint32_t nelen)
+{
+	kernel_lb_addr neloc = { 0, 0 };
+	int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+	int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+
+	if (nelen)
+	{
+		if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+		{
+			udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block);
+			etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30);
+		}
+		else
+			neloc = eloc;
+		nelen = (etype << 30) | nelen;
+	}
+
+	if (elen != nelen)
+	{
+		udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 0);
+		if (last_block - first_block > 0)
+		{
+			if (etype == (EXT_RECORDED_ALLOCATED >> 30))
+				mark_inode_dirty(inode);
+
+			if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+				udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
+		}
+	}
+}
+
+void udf_discard_prealloc(struct inode * inode)
+{
+	kernel_lb_addr bloc, eloc;
+	uint32_t extoffset = 0, elen, nelen;
+	uint64_t lbcount = 0;
+	int8_t etype = -1, netype;
+	struct buffer_head *bh = NULL;
+	int adsize;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ||
+		inode->i_size == UDF_I_LENEXTENTS(inode))
+	{
+		return;
+	}
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		adsize = 0;
+
+	bloc = UDF_I_LOCATION(inode);
+
+	while ((netype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
+	{
+		etype = netype;
+		lbcount += elen;
+		if (lbcount > inode->i_size && lbcount - inode->i_size < inode->i_sb->s_blocksize)
+		{
+			nelen = elen - (lbcount - inode->i_size);
+			extent_trunc(inode, bloc, extoffset-adsize, eloc, etype, elen, bh, nelen);
+			lbcount = inode->i_size;
+		}
+	}
+	if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+	{
+		extoffset -= adsize;
+		lbcount -= elen;
+		extent_trunc(inode, bloc, extoffset, eloc, etype, elen, bh, 0);
+		if (!bh)
+		{
+			UDF_I_LENALLOC(inode) = extoffset - udf_file_entry_alloc_offset(inode);
+			mark_inode_dirty(inode);
+		}
+		else
+		{
+			struct allocExtDesc *aed = (struct allocExtDesc *)(bh->b_data);
+			aed->lengthAllocDescs = cpu_to_le32(extoffset - sizeof(struct allocExtDesc));
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag(bh->b_data, extoffset);
+			else
+				udf_update_tag(bh->b_data, sizeof(struct allocExtDesc));
+			mark_buffer_dirty_inode(bh, inode);
+		}
+	}
+	UDF_I_LENEXTENTS(inode) = lbcount;
+
+	udf_release_data(bh);
+}
+
+void udf_truncate_extents(struct inode * inode)
+{
+	kernel_lb_addr bloc, eloc, neloc = { 0, 0 };
+	uint32_t extoffset, elen, offset, nelen = 0, lelen = 0, lenalloc;
+	int8_t etype;
+	int first_block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+	struct buffer_head *bh = NULL;
+	int adsize;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		adsize = 0;
+
+	etype = inode_bmap(inode, first_block, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+	offset += (inode->i_size & (inode->i_sb->s_blocksize - 1));
+	if (etype != -1)
+	{
+		extoffset -= adsize;
+		extent_trunc(inode, bloc, extoffset, eloc, etype, elen, bh, offset);
+		extoffset += adsize;
+
+		if (offset)
+			lenalloc = extoffset;
+		else
+			lenalloc = extoffset - adsize;
+
+		if (!bh)
+			lenalloc -= udf_file_entry_alloc_offset(inode);
+		else
+			lenalloc -= sizeof(struct allocExtDesc);
+
+		while ((etype = udf_current_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 0)) != -1)
+		{
+			if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
+			{
+				udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 0);
+				extoffset = 0;
+				if (lelen)
+				{
+					if (!bh)
+						BUG();
+					else
+						memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
+					udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
+				}
+				else
+				{
+					if (!bh)
+					{
+						UDF_I_LENALLOC(inode) = lenalloc;
+						mark_inode_dirty(inode);
+					}
+					else
+					{
+						struct allocExtDesc *aed = (struct allocExtDesc *)(bh->b_data);
+						aed->lengthAllocDescs = cpu_to_le32(lenalloc);
+						if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+							udf_update_tag(bh->b_data, lenalloc +
+								sizeof(struct allocExtDesc));
+						else
+							udf_update_tag(bh->b_data, sizeof(struct allocExtDesc));
+						mark_buffer_dirty_inode(bh, inode);
+					}
+				}
+
+				udf_release_data(bh);
+				extoffset = sizeof(struct allocExtDesc);
+				bloc = eloc;
+				bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, bloc, 0));
+				if (elen)
+					lelen = (elen + inode->i_sb->s_blocksize - 1) >>
+						inode->i_sb->s_blocksize_bits;
+				else
+					lelen = 1;
+			}
+			else
+			{
+				extent_trunc(inode, bloc, extoffset, eloc, etype, elen, bh, 0);
+				extoffset += adsize;
+			}
+		}
+
+		if (lelen)
+		{
+			if (!bh)
+				BUG();
+			else
+				memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
+			udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
+		}
+		else
+		{
+			if (!bh)
+			{
+				UDF_I_LENALLOC(inode) = lenalloc;
+				mark_inode_dirty(inode);
+			}
+			else
+			{
+				struct allocExtDesc *aed = (struct allocExtDesc *)(bh->b_data);
+				aed->lengthAllocDescs = cpu_to_le32(lenalloc);
+				if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+					udf_update_tag(bh->b_data, lenalloc +
+						sizeof(struct allocExtDesc));
+				else
+					udf_update_tag(bh->b_data, sizeof(struct allocExtDesc));
+				mark_buffer_dirty_inode(bh, inode);
+			}
+		}
+	}
+	else if (inode->i_size)
+	{
+		if (offset)
+		{
+			extoffset -= adsize;
+			etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1);
+			if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			{
+				extoffset -= adsize;
+				elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset);
+				udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0);
+			}
+			else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+			{
+				kernel_lb_addr neloc = { 0, 0 };
+				extoffset -= adsize;
+				nelen = EXT_NOT_RECORDED_NOT_ALLOCATED |
+					((elen + offset + inode->i_sb->s_blocksize - 1) &
+					~(inode->i_sb->s_blocksize - 1));
+				udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
+				udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1);
+			}
+			else
+			{
+				if (elen & (inode->i_sb->s_blocksize - 1))
+				{
+					extoffset -= adsize;
+					elen = EXT_RECORDED_ALLOCATED |
+						((elen + inode->i_sb->s_blocksize - 1) &
+						~(inode->i_sb->s_blocksize - 1));
+					udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1);
+				}
+				memset(&eloc, 0x00, sizeof(kernel_lb_addr));
+				elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
+				udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
+			}
+		}
+	}
+	UDF_I_LENEXTENTS(inode) = inode->i_size;
+
+	udf_release_data(bh);
+}
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
new file mode 100644
index 0000000..d7dbe6f
--- /dev/null
+++ b/fs/udf/udf_i.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_UDF_I_H
+#define __LINUX_UDF_I_H
+
+#include <linux/udf_fs_i.h>
+static inline struct udf_inode_info *UDF_I(struct inode *inode)
+{
+	return list_entry(inode, struct udf_inode_info, vfs_inode);
+}
+
+#define UDF_I_LOCATION(X)	( UDF_I(X)->i_location )
+#define UDF_I_LENEATTR(X)	( UDF_I(X)->i_lenEAttr )
+#define UDF_I_LENALLOC(X)	( UDF_I(X)->i_lenAlloc )
+#define UDF_I_LENEXTENTS(X)	( UDF_I(X)->i_lenExtents )
+#define UDF_I_UNIQUE(X)		( UDF_I(X)->i_unique )
+#define UDF_I_ALLOCTYPE(X)	( UDF_I(X)->i_alloc_type )
+#define UDF_I_EFE(X)		( UDF_I(X)->i_efe )
+#define UDF_I_USE(X)		( UDF_I(X)->i_use )
+#define UDF_I_STRAT4096(X)	( UDF_I(X)->i_strat4096 )
+#define UDF_I_NEXT_ALLOC_BLOCK(X)	( UDF_I(X)->i_next_alloc_block )
+#define UDF_I_NEXT_ALLOC_GOAL(X)	( UDF_I(X)->i_next_alloc_goal )
+#define UDF_I_CRTIME(X)		( UDF_I(X)->i_crtime )
+#define UDF_I_SAD(X)		( UDF_I(X)->i_ext.i_sad )
+#define UDF_I_LAD(X)		( UDF_I(X)->i_ext.i_lad )
+#define UDF_I_DATA(X)		( UDF_I(X)->i_ext.i_data )
+
+#endif /* !defined(_LINUX_UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
new file mode 100644
index 0000000..0e54922
--- /dev/null
+++ b/fs/udf/udf_sb.h
@@ -0,0 +1,139 @@
+#ifndef __LINUX_UDF_SB_H
+#define __LINUX_UDF_SB_H
+
+/* Since UDF 2.01 is ISO 13346 based... */
+#define UDF_SUPER_MAGIC			0x15013346
+
+#define UDF_MAX_READ_VERSION		0x0201
+#define UDF_MAX_WRITE_VERSION		0x0201
+
+#define UDF_FLAG_USE_EXTENDED_FE	0
+#define UDF_VERS_USE_EXTENDED_FE	0x0200
+#define UDF_FLAG_USE_STREAMS		1
+#define UDF_VERS_USE_STREAMS		0x0200
+#define UDF_FLAG_USE_SHORT_AD		2
+#define UDF_FLAG_USE_AD_IN_ICB		3
+#define UDF_FLAG_USE_FILE_CTIME_EA	4
+#define UDF_FLAG_STRICT			5
+#define UDF_FLAG_UNDELETE		6
+#define UDF_FLAG_UNHIDE			7
+#define UDF_FLAG_VARCONV		8
+#define UDF_FLAG_NLS_MAP		9
+#define UDF_FLAG_UTF8			10
+
+#define UDF_PART_FLAG_UNALLOC_BITMAP	0x0001
+#define UDF_PART_FLAG_UNALLOC_TABLE	0x0002
+#define UDF_PART_FLAG_FREED_BITMAP	0x0004
+#define UDF_PART_FLAG_FREED_TABLE	0x0008
+#define UDF_PART_FLAG_READ_ONLY		0x0010
+#define UDF_PART_FLAG_WRITE_ONCE	0x0020
+#define UDF_PART_FLAG_REWRITABLE	0x0040
+#define UDF_PART_FLAG_OVERWRITABLE	0x0080
+
+static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+#define UDF_SB_FREE(X)\
+{\
+	if (UDF_SB(X))\
+	{\
+		if (UDF_SB_PARTMAPS(X))\
+			kfree(UDF_SB_PARTMAPS(X));\
+		UDF_SB_PARTMAPS(X) = NULL;\
+	}\
+}
+
+#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
+{\
+	UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
+	if (UDF_SB_PARTMAPS(X) != NULL)\
+	{\
+		UDF_SB_NUMPARTS(X) = Y;\
+		memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\
+	}\
+	else\
+	{\
+		UDF_SB_NUMPARTS(X) = 0;\
+		udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
+	}\
+}
+
+#define UDF_SB_ALLOC_BITMAP(X,Y,Z)\
+{\
+	int nr_groups = ((UDF_SB_PARTLEN((X),(Y)) + (sizeof(struct spaceBitmapDesc) << 3) +\
+		((X)->s_blocksize * 8) - 1) / ((X)->s_blocksize * 8));\
+	int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
+	if (size <= PAGE_SIZE)\
+		UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
+	else\
+		UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\
+	if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL)\
+	{\
+		memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\
+		UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\
+			(struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\
+		UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
+	}\
+	else\
+	{\
+		udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
+	}\
+}
+
+#define UDF_SB_FREE_BITMAP(X,Y,Z)\
+{\
+	int i;\
+	int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\
+	int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
+	for (i=0; i<nr_groups; i++)\
+	{\
+		if (UDF_SB_BITMAP(X,Y,Z,i))\
+			udf_release_data(UDF_SB_BITMAP(X,Y,Z,i));\
+	}\
+	if (size <= PAGE_SIZE)\
+		kfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
+	else\
+		vfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
+}
+
+#define UDF_QUERY_FLAG(X,Y)			( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
+#define UDF_SET_FLAG(X,Y)			( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
+#define UDF_CLEAR_FLAG(X,Y)			( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
+
+#define UDF_UPDATE_UDFREV(X,Y)			( ((Y) > UDF_SB_UDFREV(X)) ? UDF_SB_UDFREV(X) = (Y) : UDF_SB_UDFREV(X) )
+
+#define UDF_SB_PARTMAPS(X)			( UDF_SB(X)->s_partmaps )
+#define UDF_SB_PARTTYPE(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_type )
+#define UDF_SB_PARTROOT(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_root )
+#define UDF_SB_PARTLEN(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_len )
+#define UDF_SB_PARTVSN(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_volumeseqnum )
+#define UDF_SB_PARTNUM(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_num )
+#define UDF_SB_TYPESPAR(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_sparing )
+#define UDF_SB_TYPEVIRT(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_virtual )
+#define UDF_SB_PARTFUNC(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_func )
+#define UDF_SB_PARTFLAGS(X,Y)			( UDF_SB_PARTMAPS(X)[(Y)].s_partition_flags )
+#define UDF_SB_BITMAP(X,Y,Z,I)			( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap[I] )
+#define UDF_SB_BITMAP_NR_GROUPS(X,Y,Z)		( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups )
+
+#define UDF_SB_VOLIDENT(X)			( UDF_SB(X)->s_volident )
+#define UDF_SB_NUMPARTS(X)			( UDF_SB(X)->s_partitions )
+#define UDF_SB_PARTITION(X)			( UDF_SB(X)->s_partition )
+#define UDF_SB_SESSION(X)			( UDF_SB(X)->s_session )
+#define UDF_SB_ANCHOR(X)			( UDF_SB(X)->s_anchor )
+#define UDF_SB_LASTBLOCK(X)			( UDF_SB(X)->s_lastblock )
+#define UDF_SB_LVIDBH(X)			( UDF_SB(X)->s_lvidbh )
+#define UDF_SB_LVID(X)				( (struct logicalVolIntegrityDesc *)UDF_SB_LVIDBH(X)->b_data )
+#define UDF_SB_LVIDIU(X)			( (struct logicalVolIntegrityDescImpUse *)&(UDF_SB_LVID(X)->impUse[le32_to_cpu(UDF_SB_LVID(X)->numOfPartitions) * 2 * sizeof(uint32_t)/sizeof(uint8_t)]) )
+
+#define UDF_SB_UMASK(X)				( UDF_SB(X)->s_umask )
+#define UDF_SB_GID(X)				( UDF_SB(X)->s_gid )
+#define UDF_SB_UID(X)				( UDF_SB(X)->s_uid )
+#define UDF_SB_RECORDTIME(X)			( UDF_SB(X)->s_recordtime )
+#define UDF_SB_SERIALNUM(X)			( UDF_SB(X)->s_serialnum )
+#define UDF_SB_UDFREV(X)			( UDF_SB(X)->s_udfrev )
+#define UDF_SB_FLAGS(X)				( UDF_SB(X)->s_flags )
+#define UDF_SB_VAT(X)				( UDF_SB(X)->s_vat )
+
+#endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
new file mode 100644
index 0000000..1d5800e
--- /dev/null
+++ b/fs/udf/udfdecl.h
@@ -0,0 +1,167 @@
+#ifndef __UDF_DECL_H
+#define __UDF_DECL_H
+
+#include <linux/udf_fs.h>
+#include "ecma_167.h"
+#include "osta_udf.h"
+
+#include <linux/fs.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/udf_fs_i.h>
+#include <linux/udf_fs_sb.h>
+#include <linux/buffer_head.h>
+
+#include "udfend.h"
+
+#define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) )
+#define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) )
+
+#define UDF_EXTENT_LENGTH_MASK	0x3FFFFFFF
+#define UDF_EXTENT_FLAG_MASK	0xC0000000
+
+#define UDF_NAME_PAD		4
+#define UDF_NAME_LEN		256
+#define UDF_PATH_LEN		1023
+
+#define udf_file_entry_alloc_offset(inode)\
+	(UDF_I_USE(inode) ?\
+		sizeof(struct unallocSpaceEntry) :\
+		((UDF_I_EFE(inode) ?\
+			sizeof(struct extendedFileEntry) :\
+			sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode)))
+
+#define udf_ext0_offset(inode)\
+	(UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\
+		udf_file_entry_alloc_offset(inode) : 0)
+
+#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
+
+struct dentry;
+struct inode;
+struct task_struct;
+struct buffer_head;
+struct super_block;
+
+extern struct inode_operations udf_dir_inode_operations;
+extern struct file_operations udf_dir_operations;
+extern struct inode_operations udf_file_inode_operations;
+extern struct file_operations udf_file_operations;
+extern struct address_space_operations udf_aops;
+extern struct address_space_operations udf_adinicb_aops;
+extern struct address_space_operations udf_symlink_aops;
+
+struct udf_fileident_bh
+{
+	struct buffer_head *sbh;
+	struct buffer_head *ebh;
+	int soffset;
+	int eoffset;
+};
+
+struct udf_vds_record
+{
+	uint32_t block;
+	uint32_t volDescSeqNum;
+};
+
+struct generic_desc
+{
+	tag		descTag;
+	__le32		volDescSeqNum;
+};
+
+struct ustr
+{
+	uint8_t u_cmpID;
+	uint8_t u_name[UDF_NAME_LEN-2];
+	uint8_t u_len;
+};
+
+/* super.c */
+extern void udf_error(struct super_block *, const char *, const char *, ...);
+extern void udf_warning(struct super_block *, const char *, const char *, ...);
+
+/* namei.c */
+extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, struct fileIdentDesc *, struct udf_fileident_bh *, uint8_t *, uint8_t *);
+
+/* file.c */
+extern int udf_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+/* inode.c */
+extern struct inode *udf_iget(struct super_block *, kernel_lb_addr);
+extern int udf_sync_inode(struct inode *);
+extern void udf_expand_file_adinicb(struct inode *, int, int *);
+extern struct buffer_head * udf_expand_dir_adinicb(struct inode *, int *, int *);
+extern struct buffer_head * udf_bread(struct inode *, int, int, int *);
+extern void udf_truncate(struct inode *);
+extern void udf_read_inode(struct inode *);
+extern void udf_delete_inode(struct inode *);
+extern void udf_clear_inode(struct inode *);
+extern int udf_write_inode(struct inode *, int);
+extern long udf_block_map(struct inode *, long);
+extern int8_t inode_bmap(struct inode *, int, kernel_lb_addr *, uint32_t *, kernel_lb_addr *, uint32_t *, uint32_t *, struct buffer_head **);
+extern int8_t udf_add_aext(struct inode *, kernel_lb_addr *, int *, kernel_lb_addr, uint32_t, struct buffer_head **, int);
+extern int8_t udf_write_aext(struct inode *, kernel_lb_addr, int *, kernel_lb_addr, uint32_t, struct buffer_head *, int);
+extern int8_t udf_delete_aext(struct inode *, kernel_lb_addr, int, kernel_lb_addr, uint32_t, struct buffer_head *);
+extern int8_t udf_next_aext(struct inode *, kernel_lb_addr *, int *, kernel_lb_addr *, uint32_t *, struct buffer_head **, int);
+extern int8_t udf_current_aext(struct inode *, kernel_lb_addr *, int *, kernel_lb_addr *, uint32_t *, struct buffer_head **, int);
+
+/* misc.c */
+extern struct buffer_head *udf_tgetblk(struct super_block *, int);
+extern struct buffer_head *udf_tread(struct super_block *, int);
+extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t, uint32_t, uint8_t);
+extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t, uint8_t);
+extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t, uint32_t, uint16_t *);
+extern struct buffer_head *udf_read_ptagged(struct super_block *, kernel_lb_addr, uint32_t, uint16_t *);
+extern void udf_release_data(struct buffer_head *);
+extern void udf_update_tag(char *, int);
+extern void udf_new_tag(char *, uint16_t, uint16_t, uint16_t, uint32_t, int);
+
+/* lowlevel.c */
+extern unsigned int udf_get_last_session(struct super_block *);
+extern unsigned long udf_get_last_block(struct super_block *);
+
+/* partition.c */
+extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern int udf_relocate_blocks(struct super_block *, long, long *);
+
+/* unicode.c */
+extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, int);
+extern int udf_build_ustr(struct ustr *, dstring *, int);
+extern int udf_CS0toUTF8(struct ustr *, struct ustr *);
+
+/* ialloc.c */
+extern void udf_free_inode(struct inode *);
+extern struct inode * udf_new_inode (struct inode *, int, int *);
+
+/* truncate.c */
+extern void udf_discard_prealloc(struct inode *);
+extern void udf_truncate_extents(struct inode *);
+
+/* balloc.c */
+extern void udf_free_blocks(struct super_block *, struct inode *, kernel_lb_addr, uint32_t, uint32_t);
+extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t, uint32_t, uint32_t);
+extern int udf_new_block(struct super_block *, struct inode *, uint16_t, uint32_t, int *);
+
+/* fsync.c */
+extern int udf_fsync_file(struct file *, struct dentry *, int);
+
+/* directory.c */
+extern struct fileIdentDesc * udf_fileident_read(struct inode *, loff_t *, struct udf_fileident_bh *, struct fileIdentDesc *, kernel_lb_addr *, uint32_t *, kernel_lb_addr *, uint32_t *, uint32_t *, struct buffer_head **);
+extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
+extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
+extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
+
+/* crc.c */
+extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
+
+/* udftime.c */
+extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp);
+extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec);
+
+#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
new file mode 100644
index 0000000..17d3788
--- /dev/null
+++ b/fs/udf/udfend.h
@@ -0,0 +1,81 @@
+#ifndef __UDF_ENDIAN_H
+#define __UDF_ENDIAN_H
+
+#include <asm/byteorder.h>
+#include <linux/string.h>
+
+static inline kernel_lb_addr lelb_to_cpu(lb_addr in)
+{
+	kernel_lb_addr out;
+	out.logicalBlockNum = le32_to_cpu(in.logicalBlockNum);
+	out.partitionReferenceNum = le16_to_cpu(in.partitionReferenceNum);
+	return out;
+}
+
+static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
+{
+	lb_addr out;
+	out.logicalBlockNum = cpu_to_le32(in.logicalBlockNum);
+	out.partitionReferenceNum = cpu_to_le16(in.partitionReferenceNum);
+	return out;
+}
+
+static inline kernel_timestamp lets_to_cpu(timestamp in)
+{
+	kernel_timestamp out;
+	memcpy(&out, &in, sizeof(timestamp));
+	out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
+	out.year = le16_to_cpu(in.year);
+	return out;
+}
+
+static inline short_ad lesa_to_cpu(short_ad in)
+{
+	short_ad out;
+	out.extLength = le32_to_cpu(in.extLength);
+	out.extPosition = le32_to_cpu(in.extPosition);
+	return out;
+}
+
+static inline short_ad cpu_to_lesa(short_ad in)
+{
+	short_ad out;
+	out.extLength = cpu_to_le32(in.extLength);
+	out.extPosition = cpu_to_le32(in.extPosition);
+	return out;
+}
+
+static inline kernel_long_ad lela_to_cpu(long_ad in)
+{
+	kernel_long_ad out;
+	out.extLength = le32_to_cpu(in.extLength);
+	out.extLocation = lelb_to_cpu(in.extLocation);
+	return out;
+}
+
+static inline long_ad cpu_to_lela(kernel_long_ad in)
+{
+	long_ad out;
+	out.extLength = cpu_to_le32(in.extLength);
+	out.extLocation = cpu_to_lelb(in.extLocation);
+	return out;
+}
+
+static inline kernel_extent_ad leea_to_cpu(extent_ad in)
+{
+	kernel_extent_ad out;
+	out.extLength = le32_to_cpu(in.extLength);
+	out.extLocation = le32_to_cpu(in.extLocation);
+	return out;
+}
+
+static inline timestamp cpu_to_lets(kernel_timestamp in)
+{
+	timestamp out;
+	memcpy(&out, &in, sizeof(timestamp));
+	out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
+	out.year = cpu_to_le16(in.year);
+	return out;
+}
+
+#endif /* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
new file mode 100644
index 0000000..c2634bd
--- /dev/null
+++ b/fs/udf/udftime.c
@@ -0,0 +1,174 @@
+/* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Paul Eggert (eggert@twinsun.com).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Library General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Library General Public License for more details.
+
+   You should have received a copy of the GNU Library General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If not,
+   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/*
+ * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time 
+ *     10/04/98: added new table-based lookup after seeing how ugly the gnu code is
+ * blf 09/27/99: ripped out all the old code and inserted new table from
+ *					John Brockmeyer (without leap second corrections)
+ *				 rewrote udf_stamp_to_time and fixed timezone accounting in
+					udf_time_to_stamp.
+ */
+
+/*
+ * We don't take into account leap seconds. This may be correct or incorrect.
+ * For more NIST information (especially dealing with leap seconds), see:
+ *  http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "udfdecl.h"
+
+#define EPOCH_YEAR 1970
+
+#ifndef __isleap
+/* Nonzero if YEAR is a leap year (every 4 years,
+   except every 100th isn't, and every 400th is).  */
+#define	__isleap(year)	\
+  ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+#endif
+
+/* How many days come before each month (0-12).  */
+const unsigned short int __mon_yday[2][13] =
+{
+	/* Normal years.  */
+	{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+	/* Leap years.  */
+	{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+};
+
+#define MAX_YEAR_SECONDS	69
+#define SPD 0x15180 /*3600*24*/
+#define SPY(y,l,s) (SPD * (365*y+l)+s)
+
+static time_t year_seconds[MAX_YEAR_SECONDS]= {
+/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0), 
+/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0), 
+/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0), 
+/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0), 
+/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0), 
+/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0), 
+/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0), 
+/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0), 
+/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0), 
+/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0), 
+/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0), 
+/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0), 
+/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0), 
+/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0), 
+/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0), 
+/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0), 
+/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0), 
+/*2038*/ SPY(68,17,0)
+};
+
+extern struct timezone sys_tz;
+
+#define SECS_PER_HOUR	(60 * 60)
+#define SECS_PER_DAY	(SECS_PER_HOUR * 24)
+
+time_t *
+udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
+{
+	int yday;
+	uint8_t type = src.typeAndTimezone >> 12;
+	int16_t offset;
+
+	if (type == 1)
+	{
+		offset = src.typeAndTimezone << 4;
+		/* sign extent offset */
+		offset = (offset >> 4);
+		if (offset == -2047) /* unspecified offset */
+			offset = 0;
+	}
+	else
+		offset = 0;
+
+	if ((src.year < EPOCH_YEAR) ||
+		(src.year > EPOCH_YEAR+MAX_YEAR_SECONDS))
+	{
+		*dest = -1;
+		*dest_usec = -1;
+		return NULL;
+	}
+	*dest = year_seconds[src.year - EPOCH_YEAR];
+	*dest -= offset * 60;
+
+	yday = ((__mon_yday[__isleap (src.year)]
+		[src.month-1]) + (src.day-1));
+	*dest += ( ( (yday* 24) + src.hour ) * 60 + src.minute ) * 60 + src.second;
+	*dest_usec = src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds;
+	return dest;
+}
+
+
+kernel_timestamp *
+udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
+{
+	long int days, rem, y;
+	const unsigned short int *ip;
+	int16_t offset;
+
+	offset = -sys_tz.tz_minuteswest;
+
+	if (!dest)
+		return NULL;
+
+	dest->typeAndTimezone = 0x1000 | (offset & 0x0FFF);
+
+	ts.tv_sec += offset * 60;
+	days = ts.tv_sec / SECS_PER_DAY;
+	rem = ts.tv_sec % SECS_PER_DAY;
+	dest->hour = rem / SECS_PER_HOUR;
+	rem %= SECS_PER_HOUR;
+	dest->minute = rem / 60;
+	dest->second = rem % 60;
+	y = 1970;
+
+#define DIV(a,b) ((a) / (b) - ((a) % (b) < 0))
+#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
+
+	while (days < 0 || days >= (__isleap(y) ? 366 : 365))
+	{
+		long int yg = y + days / 365 - (days % 365 < 0);
+
+		/* Adjust DAYS and Y to match the guessed year.  */
+		days -= ((yg - y) * 365
+			+ LEAPS_THRU_END_OF (yg - 1)
+			- LEAPS_THRU_END_OF (y - 1));
+		y = yg;
+	}
+	dest->year = y;
+	ip = __mon_yday[__isleap(y)];
+	for (y = 11; days < (long int) ip[y]; --y)
+		continue;
+	days -= ip[y];
+	dest->month = y + 1;
+	dest->day = days + 1;
+
+	dest->centiseconds = ts.tv_nsec / 10000000;
+	dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100;
+	dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
+		dest->hundredsOfMicroseconds * 100);
+	return dest;
+}
+
+/* EOF */
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
new file mode 100644
index 0000000..5a80efd
--- /dev/null
+++ b/fs/udf/unicode.c
@@ -0,0 +1,516 @@
+/*
+ * unicode.c
+ *
+ * PURPOSE
+ *	Routines for converting between UTF-8 and OSTA Compressed Unicode.
+ *      Also handles filename mangling
+ *
+ * DESCRIPTION
+ *	OSTA Compressed Unicode is explained in the OSTA UDF specification.
+ *		http://www.osta.org/
+ *	UTF-8 is explained in the IETF RFC XXXX.
+ *		ftp://ftp.internic.net/rfc/rfcxxxx.txt
+ *
+ * CONTACTS
+ *	E-mail regarding any portion of the Linux UDF file system should be
+ *	directed to the development team's mailing list (run by majordomo):
+ *		linux_udf@hpesjro.fc.hp.com
+ *
+ * COPYRIGHT
+ *	This file is distributed under the terms of the GNU General Public
+ *	License (GPL). Copies of the GPL can be obtained from:
+ *		ftp://prep.ai.mit.edu/pub/gnu/GPL
+ *	Each contributing author retains all rights to their own work.
+ */
+
+#include "udfdecl.h"
+
+#include <linux/kernel.h>
+#include <linux/string.h>	/* for memset */
+#include <linux/nls.h>
+#include <linux/udf_fs.h>
+
+#include "udf_sb.h"
+
+static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+
+static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
+{
+	if ( (!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN-2) )
+		return 0;
+	memset(dest, 0, sizeof(struct ustr));
+	memcpy(dest->u_name, src, strlen);
+	dest->u_cmpID = 0x08;
+	dest->u_len = strlen;
+	return strlen;
+}
+
+/*
+ * udf_build_ustr
+ */
+int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
+{
+	int usesize;
+
+	if ( (!dest) || (!ptr) || (!size) )
+		return -1;
+
+	memset(dest, 0, sizeof(struct ustr));
+	usesize= (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
+	dest->u_cmpID=ptr[0];
+	dest->u_len=ptr[size-1];
+	memcpy(dest->u_name, ptr+1, usesize-1);
+	return 0;
+}
+
+/*
+ * udf_build_ustr_exact
+ */
+static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
+{
+	if ( (!dest) || (!ptr) || (!exactsize) )
+		return -1;
+
+	memset(dest, 0, sizeof(struct ustr));
+	dest->u_cmpID=ptr[0];
+	dest->u_len=exactsize-1;
+	memcpy(dest->u_name, ptr+1, exactsize-1);
+	return 0;
+}
+
+/*
+ * udf_ocu_to_utf8
+ *
+ * PURPOSE
+ *	Convert OSTA Compressed Unicode to the UTF-8 equivalent.
+ *
+ * DESCRIPTION
+ *	This routine is only called by udf_filldir().
+ *
+ * PRE-CONDITIONS
+ *	utf			Pointer to UTF-8 output buffer.
+ *	ocu			Pointer to OSTA Compressed Unicode input buffer
+ *				of size UDF_NAME_LEN bytes.
+ * 				both of type "struct ustr *"
+ *
+ * POST-CONDITIONS
+ *	<return>		Zero on success.
+ *
+ * HISTORY
+ *	November 12, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
+{
+	uint8_t *ocu;
+	uint32_t c;
+	uint8_t cmp_id, ocu_len;
+	int i;
+
+	ocu = ocu_i->u_name;
+
+	ocu_len = ocu_i->u_len;
+	cmp_id = ocu_i->u_cmpID;
+	utf_o->u_len = 0;
+
+	if (ocu_len == 0)
+	{
+		memset(utf_o, 0, sizeof(struct ustr));
+		utf_o->u_cmpID = 0;
+		utf_o->u_len = 0;
+		return 0;
+	}
+
+	if ((cmp_id != 8) && (cmp_id != 16))
+	{
+		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+		return 0;
+	}
+
+	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
+	{
+
+		/* Expand OSTA compressed Unicode to Unicode */
+		c = ocu[i++];
+		if (cmp_id == 16)
+			c = (c << 8) | ocu[i++];
+
+		/* Compress Unicode to UTF-8 */
+		if (c < 0x80U)
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
+		else if (c < 0x800U)
+		{
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6));
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
+		}
+		else
+		{
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12));
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f));
+			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
+		}
+	}
+	utf_o->u_cmpID=8;
+
+	return utf_o->u_len;
+}
+
+/*
+ *
+ * udf_utf8_to_ocu
+ *
+ * PURPOSE
+ *	Convert UTF-8 to the OSTA Compressed Unicode equivalent.
+ *
+ * DESCRIPTION
+ *	This routine is only called by udf_lookup().
+ *
+ * PRE-CONDITIONS
+ *	ocu			Pointer to OSTA Compressed Unicode output
+ *				buffer of size UDF_NAME_LEN bytes.
+ *	utf			Pointer to UTF-8 input buffer.
+ *	utf_len			Length of UTF-8 input buffer in bytes.
+ *
+ * POST-CONDITIONS
+ *	<return>		Zero on success.
+ *
+ * HISTORY
+ *	November 12, 1997 - Andrew E. Mileski
+ *	Written, tested, and released.
+ */
+static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+{
+	unsigned c, i, max_val, utf_char;
+	int utf_cnt, u_len;
+
+	memset(ocu, 0, sizeof(dstring) * length);
+	ocu[0] = 8;
+	max_val = 0xffU;
+
+try_again:
+	u_len = 0U;
+	utf_char = 0U;
+	utf_cnt = 0U;
+	for (i = 0U; i < utf->u_len; i++)
+	{
+		c = (uint8_t)utf->u_name[i];
+
+		/* Complete a multi-byte UTF-8 character */
+		if (utf_cnt)
+		{
+			utf_char = (utf_char << 6) | (c & 0x3fU);
+			if (--utf_cnt)
+				continue;
+		}
+		else
+		{
+			/* Check for a multi-byte UTF-8 character */
+			if (c & 0x80U)
+			{
+				/* Start a multi-byte UTF-8 character */
+				if ((c & 0xe0U) == 0xc0U)
+				{
+					utf_char = c & 0x1fU;
+					utf_cnt = 1;
+				}
+				else if ((c & 0xf0U) == 0xe0U)
+				{
+					utf_char = c & 0x0fU;
+					utf_cnt = 2;
+				}
+				else if ((c & 0xf8U) == 0xf0U)
+				{
+					utf_char = c & 0x07U;
+					utf_cnt = 3;
+				}
+				else if ((c & 0xfcU) == 0xf8U)
+				{
+					utf_char = c & 0x03U;
+					utf_cnt = 4;
+				}
+				else if ((c & 0xfeU) == 0xfcU)
+				{
+					utf_char = c & 0x01U;
+					utf_cnt = 5;
+				}
+				else
+					goto error_out;
+				continue;
+			} else
+				/* Single byte UTF-8 character (most common) */
+				utf_char = c;
+		}
+
+		/* Choose no compression if necessary */
+		if (utf_char > max_val)
+		{
+			if ( 0xffU == max_val )
+			{
+				max_val = 0xffffU;
+				ocu[0] = (uint8_t)0x10U;
+				goto try_again;
+			}
+			goto error_out;
+		}
+
+		if (max_val == 0xffffU)
+		{
+			ocu[++u_len] = (uint8_t)(utf_char >> 8);
+		}
+		ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
+	}
+
+
+	if (utf_cnt)
+	{
+error_out:
+		ocu[++u_len] = '?';
+		printk(KERN_DEBUG "udf: bad UTF-8 character\n");
+	}
+
+	ocu[length - 1] = (uint8_t)u_len + 1;
+	return u_len + 1;
+}
+
+static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, struct ustr *ocu_i)
+{
+	uint8_t *ocu;
+	uint32_t c;
+	uint8_t cmp_id, ocu_len;
+	int i;
+
+	ocu = ocu_i->u_name;
+
+	ocu_len = ocu_i->u_len;
+	cmp_id = ocu_i->u_cmpID;
+	utf_o->u_len = 0;
+
+	if (ocu_len == 0)
+	{
+		memset(utf_o, 0, sizeof(struct ustr));
+		utf_o->u_cmpID = 0;
+		utf_o->u_len = 0;
+		return 0;
+	}
+
+	if ((cmp_id != 8) && (cmp_id != 16))
+	{
+		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+		return 0;
+	}
+
+	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
+	{
+		/* Expand OSTA compressed Unicode to Unicode */
+		c = ocu[i++];
+		if (cmp_id == 16)
+			c = (c << 8) | ocu[i++];
+
+		utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len], 
+			UDF_NAME_LEN - utf_o->u_len);
+	}
+	utf_o->u_cmpID=8;
+
+	return utf_o->u_len;
+}
+
+static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, int length)
+{
+	unsigned len, i, max_val;
+	uint16_t uni_char;
+	int u_len;
+
+	memset(ocu, 0, sizeof(dstring) * length);
+	ocu[0] = 8;
+	max_val = 0xffU;
+
+try_again:
+	u_len = 0U;
+	for (i = 0U; i < uni->u_len; i++)
+	{
+		len = nls->char2uni(&uni->u_name[i], uni->u_len-i, &uni_char);
+		if (len <= 0)
+			continue;
+
+		if (uni_char > max_val)
+		{
+			max_val = 0xffffU;
+			ocu[0] = (uint8_t)0x10U;
+			goto try_again;
+		}
+		
+		if (max_val == 0xffffU)
+			ocu[++u_len] = (uint8_t)(uni_char >> 8);
+		ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
+		i += len - 1;
+	}
+
+	ocu[length - 1] = (uint8_t)u_len + 1;
+	return u_len + 1;
+}
+
+int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, int flen)
+{
+	struct ustr filename, unifilename;
+	int len;
+
+	if (udf_build_ustr_exact(&unifilename, sname, flen))
+	{
+		return 0;
+	}
+
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
+	{
+		if (!udf_CS0toUTF8(&filename, &unifilename) )
+		{
+			udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
+			return 0;
+		}
+	}
+	else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+	{
+		if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename) )
+		{
+			udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
+			return 0;
+		}
+	}
+	else
+		return 0;
+
+	if ((len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
+		unifilename.u_name, unifilename.u_len)))
+	{
+		return len;
+	}
+	return 0;
+}
+
+int udf_put_filename(struct super_block *sb, const uint8_t *sname, uint8_t *dname, int flen)
+{
+	struct ustr unifilename;
+	int namelen;
+
+	if ( !(udf_char_to_ustr(&unifilename, sname, flen)) )
+	{
+		return 0;
+	}
+
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
+	{
+		if ( !(namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN)) )
+		{
+			return 0;
+		}
+	}
+	else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+	{
+		if ( !(namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN)) )
+		{
+			return 0;
+		}
+	}
+	else
+		return 0;
+
+	return namelen;
+}
+
+#define ILLEGAL_CHAR_MARK	'_'
+#define EXT_MARK			'.'
+#define CRC_MARK			'#'
+#define EXT_SIZE			5
+
+static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen, uint8_t *fidName, int fidNameLen)
+{
+	int index, newIndex = 0, needsCRC = 0;	
+	int extIndex = 0, newExtIndex = 0, hasExt = 0;
+	unsigned short valueCRC;
+	uint8_t curr;
+	const uint8_t hexChar[] = "0123456789ABCDEF";
+
+	if (udfName[0] == '.' && (udfLen == 1 ||
+		(udfLen == 2 && udfName[1] == '.')))
+	{
+		needsCRC = 1;
+		newIndex = udfLen;
+		memcpy(newName, udfName, udfLen);
+	}
+	else
+	{	
+		for (index = 0; index < udfLen; index++)
+		{
+			curr = udfName[index];
+			if (curr == '/' || curr == 0)
+			{
+				needsCRC = 1;
+				curr = ILLEGAL_CHAR_MARK;
+				while (index+1 < udfLen && (udfName[index+1] == '/' ||
+					udfName[index+1] == 0))
+					index++;
+			}
+			if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE)
+			{
+				if (udfLen == index + 1)
+					hasExt = 0;
+				else
+				{
+					hasExt = 1;
+					extIndex = index;
+					newExtIndex = newIndex;
+				}
+			}
+			if (newIndex < 256)
+				newName[newIndex++] = curr;
+			else
+				needsCRC = 1;
+		}
+	}
+	if (needsCRC)
+	{
+		uint8_t ext[EXT_SIZE];
+		int localExtIndex = 0;
+
+		if (hasExt)
+		{
+			int maxFilenameLen;
+			for(index = 0; index<EXT_SIZE && extIndex + index +1 < udfLen;
+				index++ )
+			{
+				curr = udfName[extIndex + index + 1];
+
+				if (curr == '/' || curr == 0)
+				{
+					needsCRC = 1;
+					curr = ILLEGAL_CHAR_MARK;
+					while(extIndex + index + 2 < udfLen && (index + 1 < EXT_SIZE
+						&& (udfName[extIndex + index + 2] == '/' ||
+							udfName[extIndex + index + 2] == 0)))
+						index++;
+				}
+				ext[localExtIndex++] = curr;
+			}
+			maxFilenameLen = 250 - localExtIndex;
+			if (newIndex > maxFilenameLen)
+				newIndex = maxFilenameLen;
+			else
+				newIndex = newExtIndex;
+		}
+		else if (newIndex > 250)
+			newIndex = 250;
+		newName[newIndex++] = CRC_MARK;
+		valueCRC = udf_crc(fidName, fidNameLen, 0);
+		newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
+		newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8];
+		newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
+		newName[newIndex++] = hexChar[(valueCRC & 0x000f)];
+
+		if (hasExt)
+		{
+			newName[newIndex++] = EXT_MARK;
+			for (index = 0;index < localExtIndex ;index++ )
+				newName[newIndex++] = ext[index];
+		}
+	}
+	return newIndex;
+}
diff --git a/fs/ufs/Makefile b/fs/ufs/Makefile
new file mode 100644
index 0000000..dd39980
--- /dev/null
+++ b/fs/ufs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux ufs filesystem routines.
+#
+
+obj-$(CONFIG_UFS_FS) += ufs.o
+
+ufs-objs := balloc.o cylinder.o dir.o file.o ialloc.o inode.o \
+	    namei.o super.o symlink.o truncate.o util.o
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
new file mode 100644
index 0000000..997640c
--- /dev/null
+++ b/fs/ufs/balloc.c
@@ -0,0 +1,818 @@
+/*
+ *  linux/fs/ufs/balloc.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_BALLOC_DEBUG
+
+#ifdef UFS_BALLOC_DEBUG
+#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
+static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
+static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
+static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
+static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
+static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
+
+/*
+ * Free 'count' fragments from fragment number 'fragment'
+ */
+void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
+	
+	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
+		ufs_error (sb, "ufs_free_fragments", "internal error");
+	
+	lock_super(sb);
+	
+	cgno = ufs_dtog(fragment);
+	bit = ufs_dtogd(fragment);
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
+		goto failed;
+	}
+		
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi) 
+		goto failed;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
+		goto failed;
+	}
+
+	end_bit = bit + count;
+	bbase = ufs_blknum (bit);
+	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
+	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
+	for (i = bit; i < end_bit; i++) {
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
+			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
+		else ufs_error (sb, "ufs_free_fragments",
+			"bit already cleared for fragment %u", i);
+	}
+	
+	DQUOT_FREE_BLOCK (inode, count);
+
+	
+	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
+	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
+	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
+
+	/*
+	 * Trying to reassemble free fragments into block
+	 */
+	blkno = ufs_fragstoblks (bbase);
+	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
+		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
+		fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
+		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
+		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+			ufs_clusteracct (sb, ucpi, blkno, 1);
+		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
+		cylno = ufs_cbtocylno (bbase);
+		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
+		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	}
+	
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+	
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+	return;
+
+failed:
+	unlock_super (sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return;
+}
+
+/*
+ * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
+ */
+void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+
+	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
+	
+	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
+		ufs_error (sb, "ufs_free_blocks", "internal error, "
+			"fragment %u, count %u\n", fragment, count);
+		goto failed;
+	}
+
+	lock_super(sb);
+	
+do_more:
+	overflow = 0;
+	cgno = ufs_dtog (fragment);
+	bit = ufs_dtogd (fragment);
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
+		goto failed;
+	}
+	end_bit = bit + count;
+	if (end_bit > uspi->s_fpg) {
+		overflow = bit + count - uspi->s_fpg;
+		count -= overflow;
+		end_bit -= overflow;
+	}
+
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi) 
+		goto failed;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
+		goto failed;
+	}
+
+	for (i = bit; i < end_bit; i += uspi->s_fpb) {
+		blkno = ufs_fragstoblks(i);
+		if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
+			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
+		}
+		ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
+		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+			ufs_clusteracct (sb, ucpi, blkno, 1);
+		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+
+		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
+		cylno = ufs_cbtocylno(i);
+		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
+		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+
+	if (overflow) {
+		fragment += count;
+		count = overflow;
+		goto do_more;
+	}
+
+	sb->s_dirt = 1;
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+	return;
+
+failed:
+	unlock_super (sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return;
+}
+
+
+
+#define NULLIFY_FRAGMENTS \
+	for (i = oldcount; i < newcount; i++) { \
+		bh = sb_getblk(sb, result + i); \
+		memset (bh->b_data, 0, sb->s_blocksize); \
+		set_buffer_uptodate(bh); \
+		mark_buffer_dirty (bh); \
+		if (IS_SYNC(inode)) \
+			sync_dirty_buffer(bh); \
+		brelse (bh); \
+	}
+
+unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
+	unsigned goal, unsigned count, int * err )
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct buffer_head * bh;
+	unsigned cgno, oldcount, newcount, tmp, request, i, result;
+	
+	UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	*err = -ENOSPC;
+
+	lock_super (sb);
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
+		ufs_warning (sb, "ufs_new_fragments", "internal warning"
+			" fragment %u, count %u", fragment, count);
+		count = uspi->s_fpb - ufs_fragnum(fragment); 
+	}
+	oldcount = ufs_fragnum (fragment);
+	newcount = oldcount + count;
+
+	/*
+	 * Somebody else has just allocated our fragments
+	 */
+	if (oldcount) {
+		if (!tmp) {
+			ufs_error (sb, "ufs_new_fragments", "internal error, "
+				"fragment %u, tmp %u\n", fragment, tmp);
+			unlock_super (sb);
+			return (unsigned)-1;
+		}
+		if (fragment < UFS_I(inode)->i_lastfrag) {
+			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
+			unlock_super (sb);
+			return 0;
+		}
+	}
+	else {
+		if (tmp) {
+			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
+			unlock_super(sb);
+			return 0;
+		}
+	}
+
+	/*
+	 * There is not enough space for user on the device
+	 */
+	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
+		unlock_super (sb);
+		UFSD(("EXIT (FAILED)\n"))
+		return 0;
+	}
+
+	if (goal >= uspi->s_size) 
+		goal = 0;
+	if (goal == 0) 
+		cgno = ufs_inotocg (inode->i_ino);
+	else
+		cgno = ufs_dtog (goal);
+	 
+	/*
+	 * allocate new fragment
+	 */
+	if (oldcount == 0) {
+		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
+		if (result) {
+			*p = cpu_to_fs32(sb, result);
+			*err = 0;
+			inode->i_blocks += count << uspi->s_nspfshift;
+			UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+			NULLIFY_FRAGMENTS
+		}
+		unlock_super(sb);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	/*
+	 * resize block
+	 */
+	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
+	if (result) {
+		*err = 0;
+		inode->i_blocks += count << uspi->s_nspfshift;
+		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+		NULLIFY_FRAGMENTS
+		unlock_super(sb);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	/*
+	 * allocate new block and move data
+	 */
+	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
+	    case UFS_OPTSPACE:
+		request = newcount;
+		if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) 
+		    > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
+			break;
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+		break;
+	    default:
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+	
+	    case UFS_OPTTIME:
+		request = uspi->s_fpb;
+		if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
+		    (uspi->s_minfree - 2) / 100)
+			break;
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+		break;
+	}
+	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
+	if (result) {
+		for (i = 0; i < oldcount; i++) {
+			bh = sb_bread(sb, tmp + i);
+			if(bh)
+			{
+				clear_buffer_dirty(bh);
+				bh->b_blocknr = result + i;
+				mark_buffer_dirty (bh);
+				if (IS_SYNC(inode))
+					sync_dirty_buffer(bh);
+				brelse (bh);
+			}
+			else
+			{
+				printk(KERN_ERR "ufs_new_fragments: bread fail\n");
+				unlock_super(sb);
+				return 0;
+			}
+		}
+		*p = cpu_to_fs32(sb, result);
+		*err = 0;
+		inode->i_blocks += count << uspi->s_nspfshift;
+		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+		NULLIFY_FRAGMENTS
+		unlock_super(sb);
+		if (newcount < request)
+			ufs_free_fragments (inode, result + newcount, request - newcount);
+		ufs_free_fragments (inode, tmp, oldcount);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	unlock_super(sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+}		
+
+static unsigned
+ufs_add_fragments (struct inode * inode, unsigned fragment,
+		   unsigned oldcount, unsigned newcount, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned cgno, fragno, fragoff, count, fragsize, i;
+	
+	UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	count = newcount - oldcount;
+	
+	cgno = ufs_dtog(fragment);
+	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
+		return 0;
+	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
+		return 0;
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi)
+		return 0;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_add_fragments",
+			"internal error, bad magic number on cg %u", cgno);
+		return 0;
+	}
+
+	fragno = ufs_dtogd (fragment);
+	fragoff = ufs_fragnum (fragno);
+	for (i = oldcount; i < newcount; i++)
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
+			return 0;
+	/*
+	 * Block can be extended
+	 */
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
+			break;
+	fragsize = i - oldcount;
+	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
+		ufs_panic (sb, "ufs_add_fragments",
+			"internal error or corrupted bitmap on cg %u", cgno);
+	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
+	if (fragsize != count)
+		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
+	for (i = oldcount; i < newcount; i++)
+		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
+	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+		*err = -EDQUOT;
+		return 0;
+	}
+
+	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
+	
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	UFSD(("EXIT, fragment %u\n", fragment))
+	
+	return fragment;
+}
+
+#define UFS_TEST_FREE_SPACE_CG \
+	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
+	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
+		goto cg_found; \
+	for (k = count; k < uspi->s_fpb; k++) \
+		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
+			goto cg_found; 
+
+static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
+	unsigned goal, unsigned count, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned oldcg, i, j, k, result, allocsize;
+	
+	UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	oldcg = cgno;
+	
+	/*
+	 * 1. searching on preferred cylinder group
+	 */
+	UFS_TEST_FREE_SPACE_CG
+
+	/*
+	 * 2. quadratic rehash
+	 */
+	for (j = 1; j < uspi->s_ncg; j *= 2) {
+		cgno += j;
+		if (cgno >= uspi->s_ncg) 
+			cgno -= uspi->s_ncg;
+		UFS_TEST_FREE_SPACE_CG
+	}
+
+	/*
+	 * 3. brute force search
+	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
+	 */
+	cgno = (oldcg + 1) % uspi->s_ncg;
+	for (j = 2; j < uspi->s_ncg; j++) {
+		cgno++;
+		if (cgno >= uspi->s_ncg)
+			cgno = 0;
+		UFS_TEST_FREE_SPACE_CG
+	}
+	
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+
+cg_found:
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi)
+		return 0;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) 
+		ufs_panic (sb, "ufs_alloc_fragments",
+			"internal error, bad magic number on cg %u", cgno);
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+
+	if (count == uspi->s_fpb) {
+		result = ufs_alloccg_block (inode, ucpi, goal, err);
+		if (result == (unsigned)-1)
+			return 0;
+		goto succed;
+	}
+
+	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
+		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
+			break;
+	
+	if (allocsize == uspi->s_fpb) {
+		result = ufs_alloccg_block (inode, ucpi, goal, err);
+		if (result == (unsigned)-1)
+			return 0;
+		goal = ufs_dtogd (result);
+		for (i = count; i < uspi->s_fpb; i++)
+			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
+		i = uspi->s_fpb - count;
+		DQUOT_FREE_BLOCK(inode, i);
+
+		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
+		fs32_add(sb, &ucg->cg_frsum[i], 1);
+		goto succed;
+	}
+
+	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
+	if (result == (unsigned)-1)
+		return 0;
+	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+		*err = -EDQUOT;
+		return 0;
+	}
+	for (i = 0; i < count; i++)
+		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
+	
+	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
+
+	if (count != allocsize)
+		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
+
+succed:
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	result += cgno * uspi->s_fpg;
+	UFSD(("EXIT3, result %u\n", result))
+	return result;
+}
+
+static unsigned ufs_alloccg_block (struct inode * inode,
+	struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cylinder_group * ucg;
+	unsigned result, cylno, blkno;
+
+	UFSD(("ENTER, goal %u\n", goal))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (goal == 0) {
+		goal = ucpi->c_rotor;
+		goto norot;
+	}
+	goal = ufs_blknum (goal);
+	goal = ufs_dtogd (goal);
+	
+	/*
+	 * If the requested block is available, use it.
+	 */
+	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
+		result = goal;
+		goto gotit;
+	}
+	
+norot:	
+	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
+	if (result == (unsigned)-1)
+		return (unsigned)-1;
+	ucpi->c_rotor = result;
+gotit:
+	blkno = ufs_fragstoblks(result);
+	ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
+	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+		ufs_clusteracct (sb, ucpi, blkno, -1);
+	if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+		*err = -EDQUOT;
+		return (unsigned)-1;
+	}
+
+	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
+	cylno = ufs_cbtocylno(result);
+	fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
+	fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	
+	UFSD(("EXIT, result %u\n", result))
+
+	return result;
+}
+
+static unsigned ufs_bitmap_search (struct super_block * sb,
+	struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cylinder_group * ucg;
+	unsigned start, length, location, result;
+	unsigned possition, fragsize, blockmap, mask;
+	
+	UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (goal)
+		start = ufs_dtogd(goal) >> 3;
+	else
+		start = ucpi->c_frotor >> 3;
+		
+	length = ((uspi->s_fpg + 7) >> 3) - start;
+	location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
+		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
+		1 << (count - 1 + (uspi->s_fpb & 7))); 
+	if (location == 0) {
+		length = start + 1;
+		location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length, 
+			(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
+			1 << (count - 1 + (uspi->s_fpb & 7)));
+		if (location == 0) {
+			ufs_error (sb, "ufs_bitmap_search",
+			"bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
+			ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
+			return (unsigned)-1;
+		}
+		start = 0;
+	}
+	result = (start + length - location) << 3;
+	ucpi->c_frotor = result;
+
+	/*
+	 * found the byte in the map
+	 */
+	blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
+	fragsize = 0;
+	for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
+		if (blockmap & mask) {
+			if (!(possition & uspi->s_fpbmask))
+				fragsize = 1;
+			else 
+				fragsize++;
+		}
+		else {
+			if (fragsize == count) {
+				result += possition - count;
+				UFSD(("EXIT, result %u\n", result))
+				return result;
+			}
+			fragsize = 0;
+		}
+	}
+	if (fragsize == count) {
+		result += possition - count;
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+	ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
+	UFSD(("EXIT (FAILED)\n"))
+	return (unsigned)-1;
+}
+
+static void ufs_clusteracct(struct super_block * sb,
+	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
+{
+	struct ufs_sb_private_info * uspi;
+	int i, start, end, forw, back;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	if (uspi->s_contigsumsize <= 0)
+		return;
+
+	if (cnt > 0)
+		ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
+	else
+		ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
+
+	/*
+	 * Find the size of the cluster going forward.
+	 */
+	start = blkno + 1;
+	end = start + uspi->s_contigsumsize;
+	if ( end >= ucpi->c_nclusterblks)
+		end = ucpi->c_nclusterblks;
+	i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
+	if (i > end)
+		i = end;
+	forw = i - start;
+	
+	/*
+	 * Find the size of the cluster going backward.
+	 */
+	start = blkno - 1;
+	end = start - uspi->s_contigsumsize;
+	if (end < 0 ) 
+		end = -1;
+	i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
+	if ( i < end) 
+		i = end;
+	back = start - i;
+	
+	/*
+	 * Account for old cluster and the possibly new forward and
+	 * back clusters.
+	 */
+	i = back + forw + 1;
+	if (i > uspi->s_contigsumsize)
+		i = uspi->s_contigsumsize;
+	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
+	if (back > 0)
+		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
+	if (forw > 0)
+		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
+}
+
+
+static unsigned char ufs_fragtable_8fpb[] = {
+	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
+	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
+	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
+	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
+	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
+};
+
+static unsigned char ufs_fragtable_other[] = {
+	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
+	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
+	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
+	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
+	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
+	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
+};
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
new file mode 100644
index 0000000..14abb8b
--- /dev/null
+++ b/fs/ufs/cylinder.c
@@ -0,0 +1,209 @@
+/*
+ *  linux/fs/ufs/cylinder.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  ext2 - inode (block) bitmap caching inspired
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_CYLINDER_DEBUG
+
+#ifdef UFS_CYLINDER_DEBUG
+#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+
+/*
+ * Read cylinder group into cache. The memory space for ufs_cg_private_info
+ * structure is already allocated during ufs_read_super.
+ */
+static void ufs_read_cylinder (struct super_block * sb,
+	unsigned cgno, unsigned bitmap_nr)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned i, j;
+
+	UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
+	uspi = sbi->s_uspi;
+	ucpi = sbi->s_ucpi[bitmap_nr];
+	ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
+
+	UCPI_UBH->fragment = ufs_cgcmin(cgno);
+	UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits;
+	/*
+	 * We have already the first fragment of cylinder group block in buffer
+	 */
+	UCPI_UBH->bh[0] = sbi->s_ucg[cgno];
+	for (i = 1; i < UCPI_UBH->count; i++)
+		if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
+			goto failed;
+	sbi->s_cgno[bitmap_nr] = cgno;
+			
+	ucpi->c_cgx	= fs32_to_cpu(sb, ucg->cg_cgx);
+	ucpi->c_ncyl	= fs16_to_cpu(sb, ucg->cg_ncyl);
+	ucpi->c_niblk	= fs16_to_cpu(sb, ucg->cg_niblk);
+	ucpi->c_ndblk	= fs32_to_cpu(sb, ucg->cg_ndblk);
+	ucpi->c_rotor	= fs32_to_cpu(sb, ucg->cg_rotor);
+	ucpi->c_frotor	= fs32_to_cpu(sb, ucg->cg_frotor);
+	ucpi->c_irotor	= fs32_to_cpu(sb, ucg->cg_irotor);
+	ucpi->c_btotoff	= fs32_to_cpu(sb, ucg->cg_btotoff);
+	ucpi->c_boff	= fs32_to_cpu(sb, ucg->cg_boff);
+	ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
+	ucpi->c_freeoff	= fs32_to_cpu(sb, ucg->cg_freeoff);
+	ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
+	ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
+	ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
+	ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
+	UFSD(("EXIT\n"))
+	return;	
+	
+failed:
+	for (j = 1; j < i; j++)
+		brelse (sbi->s_ucg[j]);
+	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
+	ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
+}
+
+/*
+ * Remove cylinder group from cache, doesn't release memory
+ * allocated for cylinder group (this is done at ufs_put_super only).
+ */
+void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi; 
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned i;
+
+	UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
+
+	uspi = sbi->s_uspi;
+	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
+		UFSD(("EXIT\n"))
+		return;
+	}
+	ucpi = sbi->s_ucpi[bitmap_nr];
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
+		ufs_panic (sb, "ufs_put_cylinder", "internal error");
+		return;
+	}
+	/*
+	 * rotor is not so important data, so we put it to disk 
+	 * at the end of working with cylinder
+	 */
+	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
+	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
+	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	for (i = 1; i < UCPI_UBH->count; i++) {
+		brelse (UCPI_UBH->bh[i]);
+	}
+
+	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
+	UFSD(("EXIT\n"))
+}
+
+/*
+ * Find cylinder group in cache and return it as pointer.
+ * If cylinder group is not in cache, we will load it from disk.
+ *
+ * The cache is managed by LRU algorithm. 
+ */
+struct ufs_cg_private_info * ufs_load_cylinder (
+	struct super_block * sb, unsigned cgno)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_cg_private_info * ucpi;
+	unsigned cg, i, j;
+
+	UFSD(("ENTER, cgno %u\n", cgno))
+
+	uspi = sbi->s_uspi;
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
+		return NULL;
+	}
+	/*
+	 * Cylinder group number cg it in cache and it was last used
+	 */
+	if (sbi->s_cgno[0] == cgno) {
+		UFSD(("EXIT\n"))
+		return sbi->s_ucpi[0];
+	}
+	/*
+	 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
+	 */
+	if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
+		if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
+			if (sbi->s_cgno[cgno] != cgno) {
+				ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
+				UFSD(("EXIT (FAILED)\n"))
+				return NULL;
+			}
+			else {
+				UFSD(("EXIT\n"))
+				return sbi->s_ucpi[cgno];
+			}
+		} else {
+			ufs_read_cylinder (sb, cgno, cgno);
+			UFSD(("EXIT\n"))
+			return sbi->s_ucpi[cgno];
+		}
+	}
+	/*
+	 * Cylinder group number cg is in cache but it was not last used, 
+	 * we will move to the first position
+	 */
+	for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
+	if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
+		cg = sbi->s_cgno[i];
+		ucpi = sbi->s_ucpi[i];
+		for (j = i; j > 0; j--) {
+			sbi->s_cgno[j] = sbi->s_cgno[j-1];
+			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
+		}
+		sbi->s_cgno[0] = cg;
+		sbi->s_ucpi[0] = ucpi;
+	/*
+	 * Cylinder group number cg is not in cache, we will read it from disk
+	 * and put it to the first position
+	 */
+	} else {
+		if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
+			sbi->s_cg_loaded++;
+		else
+			ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
+		ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
+		for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
+			sbi->s_cgno[j] = sbi->s_cgno[j-1];
+			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
+		}
+		sbi->s_ucpi[0] = ucpi;
+		ufs_read_cylinder (sb, cgno, 0);
+	}
+	UFSD(("EXIT\n"))
+	return sbi->s_ucpi[0];
+}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
new file mode 100644
index 0000000..d0915fb
--- /dev/null
+++ b/fs/ufs/dir.c
@@ -0,0 +1,627 @@
+/*
+ *  linux/fs/ufs/ufs_dir.c
+ *
+ * Copyright (C) 1996
+ * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
+ * Laboratory for Computer Science Research Computing Facility
+ * Rutgers, The State University of New Jersey
+ *
+ * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
+ *
+ * 4.4BSD (FreeBSD) support added on February 1st 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
+ * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_DIR_DEBUG
+
+#ifdef UFS_DIR_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static int
+ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_entry *,
+		     struct buffer_head *, unsigned long);
+
+
+/*
+ * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
+ *
+ * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
+ */
+static inline int ufs_match(struct super_block *sb, int len,
+		const char * const name, struct ufs_dir_entry * de)
+{
+	if (len != ufs_get_de_namlen(sb, de))
+		return 0;
+	if (!de->d_ino)
+		return 0;
+	return !memcmp(name, de->d_name, len);
+}
+
+/*
+ * This is blatantly stolen from ext2fs
+ */
+static int
+ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int error = 0;
+	unsigned long offset, lblk;
+	int i, stored;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de;
+	struct super_block * sb;
+	int de_reclen;
+	unsigned flags;
+	u64     blk= 0L;
+
+	lock_kernel();
+
+	sb = inode->i_sb;
+	flags = UFS_SB(sb)->s_flags;
+
+	UFSD(("ENTER, ino %lu  f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
+
+	stored = 0;
+	bh = NULL;
+	offset = filp->f_pos & (sb->s_blocksize - 1);
+
+	while (!error && !stored && filp->f_pos < inode->i_size) {
+		lblk = (filp->f_pos) >> sb->s_blocksize_bits;
+		blk = ufs_frag_map(inode, lblk);
+		if (!blk || !(bh = sb_bread(sb, blk))) {
+			/* XXX - error - skip to the next block */
+			printk("ufs_readdir: "
+			       "dir inode %lu has a hole at offset %lu\n",
+			       inode->i_ino, (unsigned long int)filp->f_pos);
+			filp->f_pos += sb->s_blocksize - offset;
+			continue;
+		}
+
+revalidate:
+		/* If the dir block has changed since the last call to
+		 * readdir(2), then we might be pointing to an invalid
+		 * dirent right now.  Scan from the start of the block
+		 * to make sure. */
+		if (filp->f_version != inode->i_version) {
+			for (i = 0; i < sb->s_blocksize && i < offset; ) {
+				de = (struct ufs_dir_entry *)(bh->b_data + i);
+				/* It's too expensive to do a full
+				 * dirent test each time round this
+				 * loop, but we do have to test at
+				 * least that it is non-zero.  A
+				 * failure will be detected in the
+				 * dirent test below. */
+				de_reclen = fs16_to_cpu(sb, de->d_reclen);
+				if (de_reclen < 1)
+					break;
+				i += de_reclen;
+			}
+			offset = i;
+			filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+				| offset;
+			filp->f_version = inode->i_version;
+		}
+
+		while (!error && filp->f_pos < inode->i_size
+		       && offset < sb->s_blocksize) {
+			de = (struct ufs_dir_entry *) (bh->b_data + offset);
+			/* XXX - put in a real ufs_check_dir_entry() */
+			if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) {
+				filp->f_pos = (filp->f_pos &
+				              (sb->s_blocksize - 1)) +
+				               sb->s_blocksize;
+				brelse(bh);
+				unlock_kernel();
+				return stored;
+			}
+			if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
+						   bh, offset)) {
+				/* On error, skip the f_pos to the
+				   next block. */
+				filp->f_pos = (filp->f_pos |
+				              (sb->s_blocksize - 1)) +
+					       1;
+				brelse (bh);
+				unlock_kernel();
+				return stored;
+			}
+			offset += fs16_to_cpu(sb, de->d_reclen);
+			if (de->d_ino) {
+				/* We might block in the next section
+				 * if the data destination is
+				 * currently swapped out.  So, use a
+				 * version stamp to detect whether or
+				 * not the directory has been modified
+				 * during the copy operation. */
+				unsigned long version = filp->f_version;
+				unsigned char d_type = DT_UNKNOWN;
+
+				UFSD(("filldir(%s,%u)\n", de->d_name,
+							fs32_to_cpu(sb, de->d_ino)))
+				UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)))
+
+				if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
+					d_type = de->d_u.d_44.d_type;
+				error = filldir(dirent, de->d_name,
+						ufs_get_de_namlen(sb, de), filp->f_pos,
+						fs32_to_cpu(sb, de->d_ino), d_type);
+				if (error)
+					break;
+				if (version != filp->f_version)
+					goto revalidate;
+				stored ++;
+			}
+			filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
+		}
+		offset = 0;
+		brelse (bh);
+	}
+	unlock_kernel();
+	return 0;
+}
+
+/*
+ * define how far ahead to read directories while searching them.
+ */
+#define NAMEI_RA_CHUNKS  2
+#define NAMEI_RA_BLOCKS  4
+#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
+
+/*
+ *	ufs_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_bh). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ */
+struct ufs_dir_entry * ufs_find_entry (struct dentry *dentry,
+	struct buffer_head ** res_bh)
+{
+	struct super_block * sb;
+	struct buffer_head * bh_use[NAMEI_RA_SIZE];
+	struct buffer_head * bh_read[NAMEI_RA_SIZE];
+	unsigned long offset;
+	int block, toread, i, err;
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+
+	UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen))
+	
+	*res_bh = NULL;
+	
+	sb = dir->i_sb;
+	
+	if (namelen > UFS_MAXNAMLEN)
+		return NULL;
+
+	memset (bh_use, 0, sizeof (bh_use));
+	toread = 0;
+	for (block = 0; block < NAMEI_RA_SIZE; ++block) {
+		struct buffer_head * bh;
+
+		if ((block << sb->s_blocksize_bits) >= dir->i_size)
+			break;
+		bh = ufs_getfrag (dir, block, 0, &err);
+		bh_use[block] = bh;
+		if (bh && !buffer_uptodate(bh))
+			bh_read[toread++] = bh;
+	}
+
+	for (block = 0, offset = 0; offset < dir->i_size; block++) {
+		struct buffer_head * bh;
+		struct ufs_dir_entry * de;
+		char * dlimit;
+
+		if ((block % NAMEI_RA_BLOCKS) == 0 && toread) {
+			ll_rw_block (READ, toread, bh_read);
+			toread = 0;
+		}
+		bh = bh_use[block % NAMEI_RA_SIZE];
+		if (!bh) {
+			ufs_error (sb, "ufs_find_entry", 
+				"directory #%lu contains a hole at offset %lu",
+				dir->i_ino, offset);
+			offset += sb->s_blocksize;
+			continue;
+		}
+		wait_on_buffer (bh);
+		if (!buffer_uptodate(bh)) {
+			/*
+			 * read error: all bets are off
+			 */
+			break;
+		}
+
+		de = (struct ufs_dir_entry *) bh->b_data;
+		dlimit = bh->b_data + sb->s_blocksize;
+		while ((char *) de < dlimit && offset < dir->i_size) {
+			/* this code is executed quadratically often */
+			/* do minimal checking by hand */
+			int de_len;
+
+			if ((char *) de + namelen <= dlimit &&
+			    ufs_match(sb, namelen, name, de)) {
+				/* found a match -
+				just to be sure, do a full check */
+				if (!ufs_check_dir_entry("ufs_find_entry",
+				    dir, de, bh, offset))
+					goto failed;
+				for (i = 0; i < NAMEI_RA_SIZE; ++i) {
+					if (bh_use[i] != bh)
+						brelse (bh_use[i]);
+				}
+				*res_bh = bh;
+				return de;
+			}
+                        /* prevent looping on a bad block */
+			de_len = fs16_to_cpu(sb, de->d_reclen);
+			if (de_len <= 0)
+				goto failed;
+			offset += de_len;
+			de = (struct ufs_dir_entry *) ((char *) de + de_len);
+		}
+
+		brelse (bh);
+		if (((block + NAMEI_RA_SIZE) << sb->s_blocksize_bits ) >=
+		    dir->i_size)
+			bh = NULL;
+		else
+			bh = ufs_getfrag (dir, block + NAMEI_RA_SIZE, 0, &err);
+		bh_use[block % NAMEI_RA_SIZE] = bh;
+		if (bh && !buffer_uptodate(bh))
+			bh_read[toread++] = bh;
+	}
+
+failed:
+	for (i = 0; i < NAMEI_RA_SIZE; ++i) brelse (bh_use[i]);
+	UFSD(("EXIT\n"))
+	return NULL;
+}
+
+static int
+ufs_check_dir_entry (const char *function, struct inode *dir,
+		     struct ufs_dir_entry *de, struct buffer_head *bh,
+		     unsigned long offset)
+{
+	struct super_block *sb = dir->i_sb;
+	const char *error_msg = NULL;
+	int rlen = fs16_to_cpu(sb, de->d_reclen);
+
+	if (rlen < UFS_DIR_REC_LEN(1))
+		error_msg = "reclen is smaller than minimal";
+	else if (rlen % 4 != 0)
+		error_msg = "reclen % 4 != 0";
+	else if (rlen < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)))
+		error_msg = "reclen is too small for namlen";
+	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+		error_msg = "directory entry across blocks";
+	else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
+				      UFS_SB(sb)->s_uspi->s_ncg))
+		error_msg = "inode out of bounds";
+
+	if (error_msg != NULL)
+		ufs_error (sb, function, "bad entry in directory #%lu, size %Lu: %s - "
+			    "offset=%lu, inode=%lu, reclen=%d, namlen=%d",
+			    dir->i_ino, dir->i_size, error_msg, offset,
+			    (unsigned long)fs32_to_cpu(sb, de->d_ino),
+			    rlen, ufs_get_de_namlen(sb, de));
+	
+	return (error_msg == NULL ? 1 : 0);
+}
+
+struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct buffer_head **p)
+{
+	int err;
+	struct buffer_head *bh = ufs_bread (dir, 0, 0, &err);
+	struct ufs_dir_entry *res = NULL;
+
+	if (bh) {
+		res = (struct ufs_dir_entry *) bh->b_data;
+		res = (struct ufs_dir_entry *)((char *)res +
+			fs16_to_cpu(dir->i_sb, res->d_reclen));
+	}
+	*p = bh;
+	return res;
+}
+ino_t ufs_inode_by_name(struct inode * dir, struct dentry *dentry)
+{
+	ino_t res = 0;
+	struct ufs_dir_entry * de;
+	struct buffer_head *bh;
+
+	de = ufs_find_entry (dentry, &bh);
+	if (de) {
+		res = fs32_to_cpu(dir->i_sb, de->d_ino);
+		brelse(bh);
+	}
+	return res;
+}
+
+void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
+		struct buffer_head *bh, struct inode *inode)
+{
+	dir->i_version++;
+	de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
+	mark_buffer_dirty(bh);
+	if (IS_DIRSYNC(dir))
+		sync_dirty_buffer(bh);
+	brelse (bh);
+}
+
+/*
+ *	ufs_add_entry()
+ *
+ * adds a file entry to the specified directory, using the same
+ * semantics as ufs_find_entry(). It returns NULL if it failed.
+ */
+int ufs_add_link(struct dentry *dentry, struct inode *inode)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	unsigned long offset;
+	unsigned fragoff;
+	unsigned short rec_len;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de, * de1;
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	int err;
+
+	UFSD(("ENTER, name %s, namelen %u\n", name, namelen))
+	
+	sb = dir->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	if (!namelen)
+		return -EINVAL;
+	bh = ufs_bread (dir, 0, 0, &err);
+	if (!bh)
+		return err;
+	rec_len = UFS_DIR_REC_LEN(namelen);
+	offset = 0;
+	de = (struct ufs_dir_entry *) bh->b_data;
+	while (1) {
+		if ((char *)de >= UFS_SECTOR_SIZE + bh->b_data) {
+			fragoff = offset & ~uspi->s_fmask;
+			if (fragoff != 0 && fragoff != UFS_SECTOR_SIZE)
+				ufs_error (sb, "ufs_add_entry", "internal error"
+					" fragoff %u", fragoff);
+			if (!fragoff) {
+				brelse (bh);
+				bh = ufs_bread (dir, offset >> sb->s_blocksize_bits, 1, &err);
+				if (!bh)
+					return err;
+			}
+			if (dir->i_size <= offset) {
+				if (dir->i_size == 0) {
+					brelse(bh);
+					return -ENOENT;
+				}
+				de = (struct ufs_dir_entry *) (bh->b_data + fragoff);
+				de->d_ino = 0;
+				de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
+				ufs_set_de_namlen(sb, de, 0);
+				dir->i_size = offset + UFS_SECTOR_SIZE;
+				mark_inode_dirty(dir);
+			} else {
+				de = (struct ufs_dir_entry *) bh->b_data;
+			}
+		}
+		if (!ufs_check_dir_entry ("ufs_add_entry", dir, de, bh, offset)) {
+			brelse (bh);
+			return -ENOENT;
+		}
+		if (ufs_match(sb, namelen, name, de)) {
+			brelse (bh);
+			return -EEXIST;
+		}
+		if (de->d_ino == 0 && fs16_to_cpu(sb, de->d_reclen) >= rec_len)
+			break;
+			
+		if (fs16_to_cpu(sb, de->d_reclen) >=
+		     UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)) + rec_len)
+			break;
+		offset += fs16_to_cpu(sb, de->d_reclen);
+		de = (struct ufs_dir_entry *) ((char *) de + fs16_to_cpu(sb, de->d_reclen));
+	}
+
+	if (de->d_ino) {
+		de1 = (struct ufs_dir_entry *) ((char *) de +
+			UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de1->d_reclen =
+			cpu_to_fs16(sb, fs16_to_cpu(sb, de->d_reclen) -
+				UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de->d_reclen =
+			cpu_to_fs16(sb, UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de = de1;
+	}
+	de->d_ino = 0;
+	ufs_set_de_namlen(sb, de, namelen);
+	memcpy (de->d_name, name, namelen + 1);
+	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
+	ufs_set_de_type(sb, de, inode->i_mode);
+	mark_buffer_dirty(bh);
+	if (IS_DIRSYNC(dir))
+		sync_dirty_buffer(bh);
+	brelse (bh);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_version++;
+	mark_inode_dirty(dir);
+
+	UFSD(("EXIT\n"))
+	return 0;
+}
+
+/*
+ * ufs_delete_entry deletes a directory entry by merging it with the
+ * previous entry.
+ */
+int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
+	struct buffer_head * bh )
+	
+{
+	struct super_block * sb;
+	struct ufs_dir_entry * de, * pde;
+	unsigned i;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	i = 0;
+	pde = NULL;
+	de = (struct ufs_dir_entry *) bh->b_data;
+	
+	UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
+		fs32_to_cpu(sb, de->d_ino),
+		fs16to_cpu(sb, de->d_reclen),
+		ufs_get_de_namlen(sb, de), de->d_name))
+
+	while (i < bh->b_size) {
+		if (!ufs_check_dir_entry ("ufs_delete_entry", inode, de, bh, i)) {
+			brelse(bh);
+			return -EIO;
+		}
+		if (de == dir)  {
+			if (pde)
+				fs16_add(sb, &pde->d_reclen,
+					fs16_to_cpu(sb, dir->d_reclen));
+			dir->d_ino = 0;
+			inode->i_version++;
+			inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+			mark_inode_dirty(inode);
+			mark_buffer_dirty(bh);
+			if (IS_DIRSYNC(inode))
+				sync_dirty_buffer(bh);
+			brelse(bh);
+			UFSD(("EXIT\n"))
+			return 0;
+		}
+		i += fs16_to_cpu(sb, de->d_reclen);
+		if (i == UFS_SECTOR_SIZE) pde = NULL;
+		else pde = de;
+		de = (struct ufs_dir_entry *)
+		    ((char *) de + fs16_to_cpu(sb, de->d_reclen));
+		if (i == UFS_SECTOR_SIZE && de->d_reclen == 0)
+			break;
+	}
+	UFSD(("EXIT\n"))
+	brelse(bh);
+	return -ENOENT;
+}
+
+int ufs_make_empty(struct inode * inode, struct inode *dir)
+{
+	struct super_block * sb = dir->i_sb;
+	struct buffer_head * dir_block;
+	struct ufs_dir_entry * de;
+	int err;
+
+	dir_block = ufs_bread (inode, 0, 1, &err);
+	if (!dir_block)
+		return err;
+
+	inode->i_blocks = sb->s_blocksize / UFS_SECTOR_SIZE;
+	de = (struct ufs_dir_entry *) dir_block->b_data;
+	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
+	ufs_set_de_type(sb, de, inode->i_mode);
+	ufs_set_de_namlen(sb, de, 1);
+	de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
+	strcpy (de->d_name, ".");
+	de = (struct ufs_dir_entry *)
+		((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	de->d_ino = cpu_to_fs32(sb, dir->i_ino);
+	ufs_set_de_type(sb, de, dir->i_mode);
+	de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
+	ufs_set_de_namlen(sb, de, 2);
+	strcpy (de->d_name, "..");
+	mark_buffer_dirty(dir_block);
+	brelse (dir_block);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int ufs_empty_dir (struct inode * inode)
+{
+	struct super_block * sb;
+	unsigned long offset;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de, * de1;
+	int err;
+	
+	sb = inode->i_sb;
+
+	if (inode->i_size < UFS_DIR_REC_LEN(1) + UFS_DIR_REC_LEN(2) ||
+	    !(bh = ufs_bread (inode, 0, 0, &err))) {
+	    	ufs_warning (inode->i_sb, "empty_dir",
+			      "bad directory (dir #%lu) - no data block",
+			      inode->i_ino);
+		return 1;
+	}
+	de = (struct ufs_dir_entry *) bh->b_data;
+	de1 = (struct ufs_dir_entry *)
+		((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	if (fs32_to_cpu(sb, de->d_ino) != inode->i_ino || de1->d_ino == 0 ||
+	     strcmp (".", de->d_name) || strcmp ("..", de1->d_name)) {
+	    	ufs_warning (inode->i_sb, "empty_dir",
+			      "bad directory (dir #%lu) - no `.' or `..'",
+			      inode->i_ino);
+		return 1;
+	}
+	offset = fs16_to_cpu(sb, de->d_reclen) + fs16_to_cpu(sb, de1->d_reclen);
+	de = (struct ufs_dir_entry *)
+		((char *)de1 + fs16_to_cpu(sb, de1->d_reclen));
+	while (offset < inode->i_size ) {
+		if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
+			brelse (bh);
+			bh = ufs_bread (inode, offset >> sb->s_blocksize_bits, 1, &err);
+	 		if (!bh) {
+				ufs_error (sb, "empty_dir",
+					    "directory #%lu contains a hole at offset %lu",
+					    inode->i_ino, offset);
+				offset += sb->s_blocksize;
+				continue;
+			}
+			de = (struct ufs_dir_entry *) bh->b_data;
+		}
+		if (!ufs_check_dir_entry ("empty_dir", inode, de, bh, offset)) {
+			brelse (bh);
+			return 1;
+		}
+		if (de->d_ino) {
+			brelse (bh);
+			return 0;
+		}
+		offset += fs16_to_cpu(sb, de->d_reclen);
+		de = (struct ufs_dir_entry *)
+			((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	}
+	brelse (bh);
+	return 1;
+}
+
+struct file_operations ufs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= ufs_readdir,
+	.fsync		= file_fsync,
+};
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
new file mode 100644
index 0000000..ed69d7fe
--- /dev/null
+++ b/fs/ufs/file.c
@@ -0,0 +1,55 @@
+/*
+ *  linux/fs/ufs/file.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 fs regular file handling primitives
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/fcntl.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+/*
+ * We have mostly NULL's here: the current defaults are ok for
+ * the ufs filesystem.
+ */
+ 
+struct file_operations ufs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.open           = generic_file_open,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations ufs_file_inode_operations = {
+	.truncate	= ufs_truncate,
+};
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
new file mode 100644
index 0000000..61a6b15
--- /dev/null
+++ b/fs/ufs/ialloc.c
@@ -0,0 +1,302 @@
+/*
+ *  linux/fs/ufs/ialloc.c
+ *
+ * Copyright (c) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/ialloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  BSD ufs-inspired inode and directory allocation by 
+ *  Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_IALLOC_DEBUG
+
+#ifdef UFS_IALLOC_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+/*
+ * NOTE! When we get the inode, we're the only people
+ * that have access to it, and as such there are no
+ * race conditions we have to worry about. The inode
+ * is not on the hash-lists, and it cannot be reached
+ * through the filesystem because the directory entry
+ * has been deleted earlier.
+ *
+ * HOWEVER: we must make sure that we get no aliases,
+ * which means that we have to call "clear_inode()"
+ * _before_ we mark the inode not in use in the inode
+ * bitmaps. Otherwise a newly created file might use
+ * the same inode number (not actually the same pointer
+ * though), and then we'd have two inodes sharing the
+ * same inode number and space on the harddisk.
+ */
+void ufs_free_inode (struct inode * inode)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	int is_directory;
+	unsigned ino, cg, bit;
+	
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	ino = inode->i_ino;
+
+	lock_super (sb);
+
+	if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
+		ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
+		unlock_super (sb);
+		return;
+	}
+	
+	cg = ufs_inotocg (ino);
+	bit = ufs_inotocgoff (ino);
+	ucpi = ufs_load_cylinder (sb, cg);
+	if (!ucpi) {
+		unlock_super (sb);
+		return;
+	}
+	ucg = ubh_get_ucg(UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg))
+		ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
+
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+
+	is_directory = S_ISDIR(inode->i_mode);
+
+	DQUOT_FREE_INODE(inode);
+	DQUOT_DROP(inode);
+
+	clear_inode (inode);
+
+	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
+		ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
+	else {
+		ubh_clrbit (UCPI_UBH, ucpi->c_iusedoff, bit);
+		if (ino < ucpi->c_irotor)
+			ucpi->c_irotor = ino;
+		fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
+
+		if (is_directory) {
+			fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
+			fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1);
+			fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
+		}
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	
+	sb->s_dirt = 1;
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+struct inode * ufs_new_inode(struct inode * dir, int mode)
+{
+	struct super_block * sb;
+	struct ufs_sb_info * sbi;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	struct inode * inode;
+	unsigned cg, bit, i, j, start;
+	struct ufs_inode_info *ufsi;
+
+	UFSD(("ENTER\n"))
+	
+	/* Cannot create files in a deleted directory */
+	if (!dir || !dir->i_nlink)
+		return ERR_PTR(-EPERM);
+	sb = dir->i_sb;
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	ufsi = UFS_I(inode);
+	sbi = UFS_SB(sb);
+	uspi = sbi->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+
+	lock_super (sb);
+
+	/*
+	 * Try to place the inode in its parent directory
+	 */
+	i = ufs_inotocg(dir->i_ino);
+	if (sbi->fs_cs(i).cs_nifree) {
+		cg = i;
+		goto cg_found;
+	}
+
+	/*
+	 * Use a quadratic hash to find a group with a free inode
+	 */
+	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
+		i += j;
+		if (i >= uspi->s_ncg)
+			i -= uspi->s_ncg;
+		if (sbi->fs_cs(i).cs_nifree) {
+			cg = i;
+			goto cg_found;
+		}
+	}
+
+	/*
+	 * That failed: try linear search for a free inode
+	 */
+	i = ufs_inotocg(dir->i_ino) + 1;
+	for (j = 2; j < uspi->s_ncg; j++) {
+		i++;
+		if (i >= uspi->s_ncg)
+			i = 0;
+		if (sbi->fs_cs(i).cs_nifree) {
+			cg = i;
+			goto cg_found;
+		}
+	}
+	
+	goto failed;
+
+cg_found:
+	ucpi = ufs_load_cylinder (sb, cg);
+	if (!ucpi)
+		goto failed;
+	ucg = ubh_get_ucg(UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) 
+		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");
+
+	start = ucpi->c_irotor;
+	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
+	if (!(bit < uspi->s_ipg)) {
+		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
+		if (!(bit < start)) {
+			ufs_error (sb, "ufs_new_inode",
+			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
+			goto failed;
+		}
+	}
+	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
+	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
+		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
+	else {
+		ufs_panic (sb, "ufs_new_inode", "internal error");
+		goto failed;
+	}
+	
+	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
+	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
+	
+	if (S_ISDIR(mode)) {
+		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
+		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	inode->i_mode = mode;
+	inode->i_uid = current->fsuid;
+	if (dir->i_mode & S_ISGID) {
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			inode->i_mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+
+	inode->i_ino = cg * uspi->s_ipg + bit;
+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
+	inode->i_blocks = 0;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	ufsi->i_flags = UFS_I(dir)->i_flags;
+	ufsi->i_lastfrag = 0;
+	ufsi->i_gen = 0;
+	ufsi->i_shadow = 0;
+	ufsi->i_osync = 0;
+	ufsi->i_oeftflag = 0;
+	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
+
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+
+	unlock_super (sb);
+
+	if (DQUOT_ALLOC_INODE(inode)) {
+		DQUOT_DROP(inode);
+		inode->i_flags |= S_NOQUOTA;
+		inode->i_nlink = 0;
+		iput(inode);
+		return ERR_PTR(-EDQUOT);
+	}
+
+	UFSD(("allocating inode %lu\n", inode->i_ino))
+	UFSD(("EXIT\n"))
+	return inode;
+
+failed:
+	unlock_super (sb);
+	make_bad_inode(inode);
+	iput (inode);
+	UFSD(("EXIT (FAILED)\n"))
+	return ERR_PTR(-ENOSPC);
+}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
new file mode 100644
index 0000000..718627c
--- /dev/null
+++ b/fs/ufs/inode.c
@@ -0,0 +1,816 @@
+/*
+ *  linux/fs/ufs/inode.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_INODE_DEBUG
+#undef UFS_INODE_DEBUG_MORE
+
+#ifdef UFS_INODE_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
+{
+	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
+	int ptrs = uspi->s_apb;
+	int ptrs_bits = uspi->s_apbshift;
+	const long direct_blocks = UFS_NDADDR,
+		indirect_blocks = ptrs,
+		double_blocks = (1 << (ptrs_bits * 2));
+	int n = 0;
+
+
+	UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%d \n",ptrs,double_blocks));
+	if (i_block < 0) {
+		ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
+	} else if (i_block < direct_blocks) {
+		offsets[n++] = i_block;
+	} else if ((i_block -= direct_blocks) < indirect_blocks) {
+		offsets[n++] = UFS_IND_BLOCK;
+		offsets[n++] = i_block;
+	} else if ((i_block -= indirect_blocks) < double_blocks) {
+		offsets[n++] = UFS_DIND_BLOCK;
+		offsets[n++] = i_block >> ptrs_bits;
+		offsets[n++] = i_block & (ptrs - 1);
+	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+		offsets[n++] = UFS_TIND_BLOCK;
+		offsets[n++] = i_block >> (ptrs_bits * 2);
+		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+		offsets[n++] = i_block & (ptrs - 1);
+	} else {
+		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
+	}
+	return n;
+}
+
+/*
+ * Returns the location of the fragment from
+ * the begining of the filesystem.
+ */
+
+u64  ufs_frag_map(struct inode *inode, sector_t frag)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
+	int shift = uspi->s_apbshift-uspi->s_fpbshift;
+	sector_t offsets[4], *p;
+	int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
+	u64  ret = 0L;
+	__fs32 block;
+	__fs64 u2_block = 0L;
+	unsigned flags = UFS_SB(sb)->s_flags;
+	u64 temp = 0L;
+
+	UFSD((": frag = %lu  depth = %d\n",frag,depth));
+	UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask));
+
+	if (depth == 0)
+		return 0;
+
+	p = offsets;
+
+	lock_kernel();
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		goto ufs2;
+
+	block = ufsi->i_u1.i_data[*p++];
+	if (!block)
+		goto out;
+	while (--depth) {
+		struct buffer_head *bh;
+		sector_t n = *p++;
+
+		bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
+		if (!bh)
+			goto out;
+		block = ((__fs32 *) bh->b_data)[n & mask];
+		brelse (bh);
+		if (!block)
+			goto out;
+	}
+	ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
+	goto out;
+ufs2:
+	u2_block = ufsi->i_u1.u2_i_data[*p++];
+	if (!u2_block)
+		goto out;
+
+
+	while (--depth) {
+		struct buffer_head *bh;
+		sector_t n = *p++;
+
+
+		temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
+		bh = sb_bread(sb, temp +(u64) (n>>shift));
+		if (!bh)
+			goto out;
+		u2_block = ((__fs64 *)bh->b_data)[n & mask];
+		brelse(bh);
+		if (!u2_block)
+			goto out;
+	}
+	temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
+	ret = temp + (u64) (frag & uspi->s_fpbmask);
+
+out:
+	unlock_kernel();
+	return ret;
+}
+
+static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
+	unsigned int fragment, unsigned int new_fragment,
+	unsigned int required, int *err, int metadata, long *phys, int *new)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * result;
+	unsigned block, blockoff, lastfrag, lastblock, lastblockoff;
+	unsigned tmp, goal;
+	__fs32 * p, * p2;
+	unsigned flags = 0;
+
+	UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
+		inode->i_ino, fragment, new_fragment, required))         
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	flags = UFS_SB(sb)->s_flags;
+        /* TODO : to be done for write support
+        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+             goto ufs2;
+         */
+
+	block = ufs_fragstoblks (fragment);
+	blockoff = ufs_fragnum (fragment);
+	p = ufsi->i_u1.i_data + block;
+	goal = 0;
+
+repeat:
+	tmp = fs32_to_cpu(sb, *p);
+	lastfrag = ufsi->i_lastfrag;
+	if (tmp && fragment < lastfrag) {
+		if (metadata) {
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
+			if (tmp == fs32_to_cpu(sb, *p)) {
+				UFSD(("EXIT, result %u\n", tmp + blockoff))
+				return result;
+			}
+			brelse (result);
+			goto repeat;
+		} else {
+			*phys = tmp;
+			return NULL;
+		}
+	}
+
+	lastblock = ufs_fragstoblks (lastfrag);
+	lastblockoff = ufs_fragnum (lastfrag);
+	/*
+	 * We will extend file into new block beyond last allocated block
+	 */
+	if (lastblock < block) {
+		/*
+		 * We must reallocate last allocated block
+		 */
+		if (lastblockoff) {
+			p2 = ufsi->i_u1.i_data + lastblock;
+			tmp = ufs_new_fragments (inode, p2, lastfrag, 
+				fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err);
+			if (!tmp) {
+				if (lastfrag != ufsi->i_lastfrag)
+					goto repeat;
+				else
+					return NULL;
+			}
+			lastfrag = ufsi->i_lastfrag;
+			
+		}
+		goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
+		tmp = ufs_new_fragments (inode, p, fragment - blockoff, 
+			goal, required + blockoff, err);
+	}
+	/*
+	 * We will extend last allocated block
+	 */
+	else if (lastblock == block) {
+		tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff),
+			fs32_to_cpu(sb, *p), required +  (blockoff - lastblockoff), err);
+	}
+	/*
+	 * We will allocate new block before last allocated block
+	 */
+	else /* (lastblock > block) */ {
+		if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
+			goal = tmp + uspi->s_fpb;
+		tmp = ufs_new_fragments (inode, p, fragment - blockoff, 
+			goal, uspi->s_fpb, err);
+	}
+	if (!tmp) {
+		if ((!blockoff && *p) || 
+		    (blockoff && lastfrag != ufsi->i_lastfrag))
+			goto repeat;
+		*err = -ENOSPC;
+		return NULL;
+	}
+
+	/* The nullification of framgents done in ufs/balloc.c is
+	 * something I don't have the stomache to move into here right
+	 * now. -DaveM
+	 */
+	if (metadata) {
+		result = sb_getblk(inode->i_sb, tmp + blockoff);
+	} else {
+		*phys = tmp;
+		result = NULL;
+		*err = 0;
+		*new = 1;
+	}
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	if (IS_SYNC(inode))
+		ufs_sync_inode (inode);
+	mark_inode_dirty(inode);
+	UFSD(("EXIT, result %u\n", tmp + blockoff))
+	return result;
+
+     /* This part : To be implemented ....
+        Required only for writing, not required for READ-ONLY.
+ufs2:
+
+	u2_block = ufs_fragstoblks(fragment);
+	u2_blockoff = ufs_fragnum(fragment);
+	p = ufsi->i_u1.u2_i_data + block;
+	goal = 0;
+
+repeat2:
+	tmp = fs32_to_cpu(sb, *p);
+	lastfrag = ufsi->i_lastfrag;
+
+     */
+}
+
+static struct buffer_head * ufs_block_getfrag (struct inode *inode,
+	struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, 
+	unsigned int blocksize, int * err, int metadata, long *phys, int *new)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * result;
+	unsigned tmp, goal, block, blockoff;
+	__fs32 * p;
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	block = ufs_fragstoblks (fragment);
+	blockoff = ufs_fragnum (fragment);
+
+	UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment))	
+
+	result = NULL;
+	if (!bh)
+		goto out;
+	if (!buffer_uptodate(bh)) {
+		ll_rw_block (READ, 1, &bh);
+		wait_on_buffer (bh);
+		if (!buffer_uptodate(bh))
+			goto out;
+	}
+
+	p = (__fs32 *) bh->b_data + block;
+repeat:
+	tmp = fs32_to_cpu(sb, *p);
+	if (tmp) {
+		if (metadata) {
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
+			if (tmp == fs32_to_cpu(sb, *p))
+				goto out;
+			brelse (result);
+			goto repeat;
+		} else {
+			*phys = tmp;
+			goto out;
+		}
+	}
+
+	if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb))
+		goal = tmp + uspi->s_fpb;
+	else
+		goal = bh->b_blocknr + uspi->s_fpb;
+	tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err);
+	if (!tmp) {
+		if (fs32_to_cpu(sb, *p))
+			goto repeat;
+		goto out;
+	}		
+
+	/* The nullification of framgents done in ufs/balloc.c is
+	 * something I don't have the stomache to move into here right
+	 * now. -DaveM
+	 */
+	if (metadata) {
+		result = sb_getblk(sb, tmp + blockoff);
+	} else {
+		*phys = tmp;
+		*new = 1;
+	}
+
+	mark_buffer_dirty(bh);
+	if (IS_SYNC(inode))
+		sync_dirty_buffer(bh);
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+out:
+	brelse (bh);
+	UFSD(("EXIT, result %u\n", tmp + blockoff))
+	return result;
+}
+
+/*
+ * This function gets the block which contains the fragment.
+ */
+
+static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
+{
+	struct super_block * sb = inode->i_sb;
+	struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
+	struct buffer_head * bh;
+	int ret, err, new;
+	unsigned long ptr,phys;
+	u64 phys64 = 0;
+	
+	if (!create) {
+		phys64 = ufs_frag_map(inode, fragment);
+		UFSD(("phys64 = %lu \n",phys64));
+		if (phys64)
+			map_bh(bh_result, sb, phys64);
+		return 0;
+	}
+
+        /* This code entered only while writing ....? */
+
+	err = -EIO;
+	new = 0;
+	ret = 0;
+	bh = NULL;
+
+	lock_kernel();
+
+	UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
+	if (fragment < 0)
+		goto abort_negative;
+	if (fragment >
+	    ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
+	     << uspi->s_fpbshift))
+		goto abort_too_big;
+
+	err = 0;
+	ptr = fragment;
+	  
+	/*
+	 * ok, these macros clean the logic up a bit and make
+	 * it much more readable:
+	 */
+#define GET_INODE_DATABLOCK(x) \
+		ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
+#define GET_INODE_PTR(x) \
+		ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
+#define GET_INDIRECT_DATABLOCK(x) \
+		ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
+				  &err, 0, &phys, &new);
+#define GET_INDIRECT_PTR(x) \
+		ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
+				  &err, 1, NULL, NULL);
+
+	if (ptr < UFS_NDIR_FRAGMENT) {
+		bh = GET_INODE_DATABLOCK(ptr);
+		goto out;
+	}
+	ptr -= UFS_NDIR_FRAGMENT;
+	if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
+		bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
+		goto get_indirect;
+	}
+	ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
+	if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
+		bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
+		goto get_double;
+	}
+	ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
+	bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
+	bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
+get_double:
+	bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
+get_indirect:
+	bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
+
+#undef GET_INODE_DATABLOCK
+#undef GET_INODE_PTR
+#undef GET_INDIRECT_DATABLOCK
+#undef GET_INDIRECT_PTR
+
+out:
+	if (err)
+		goto abort;
+	if (new)
+		set_buffer_new(bh_result);
+	map_bh(bh_result, sb, phys);
+abort:
+	unlock_kernel();
+	return err;
+
+abort_negative:
+	ufs_warning(sb, "ufs_get_block", "block < 0");
+	goto abort;
+
+abort_too_big:
+	ufs_warning(sb, "ufs_get_block", "block > big");
+	goto abort;
+}
+
+struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
+				int create, int *err)
+{
+	struct buffer_head dummy;
+	int error;
+
+	dummy.b_state = 0;
+	dummy.b_blocknr = -1000;
+	error = ufs_getfrag_block(inode, fragment, &dummy, create);
+	*err = error;
+	if (!error && buffer_mapped(&dummy)) {
+		struct buffer_head *bh;
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+		if (buffer_new(&dummy)) {
+			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+		}
+		return bh;
+	}
+	return NULL;
+}
+
+struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
+	int create, int * err)
+{
+	struct buffer_head * bh;
+
+	UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
+	bh = ufs_getfrag (inode, fragment, create, err);
+	if (!bh || buffer_uptodate(bh)) 		
+		return bh;
+	ll_rw_block (READ, 1, &bh);
+	wait_on_buffer (bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	brelse (bh);
+	*err = -EIO;
+	return NULL;
+}
+
+static int ufs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page,ufs_getfrag_block,wbc);
+}
+static int ufs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,ufs_getfrag_block);
+}
+static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,ufs_getfrag_block);
+}
+static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,ufs_getfrag_block);
+}
+struct address_space_operations ufs_aops = {
+	.readpage = ufs_readpage,
+	.writepage = ufs_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = ufs_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = ufs_bmap
+};
+
+void ufs_read_inode (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_inode * ufs_inode;	
+	struct ufs2_inode *ufs2_inode;
+	struct buffer_head * bh;
+	mode_t mode;
+	unsigned i;
+	unsigned flags;
+	
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+
+	if (inode->i_ino < UFS_ROOTINO || 
+	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
+		goto bad_inode;
+	}
+	
+	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
+	if (!bh) {
+		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
+		goto bad_inode;
+	}
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		goto ufs2_inode;
+
+	ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
+
+	/*
+	 * Copy data to the in-core inode.
+	 */
+	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
+	inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
+	if (inode->i_nlink == 0)
+		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+	
+	/*
+	 * Linux now has 32-bit uid and gid, so we can support EFT.
+	 */
+	inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
+	inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
+
+	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
+	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
+	inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size (for stat) */
+	inode->i_version++;
+	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
+	ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
+	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
+	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
+	ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	
+	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
+	}
+	ufsi->i_osync = 0;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ufs_dir_inode_operations;
+		inode->i_fop = &ufs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (!inode->i_blocks)
+			inode->i_op = &ufs_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &ufs_aops;
+		}
+	} else
+		init_special_inode(inode, inode->i_mode,
+			ufs_get_inode_dev(sb, ufsi));
+
+	brelse (bh);
+
+	UFSD(("EXIT\n"))
+	return;
+
+bad_inode:
+	make_bad_inode(inode);
+	return;
+
+ufs2_inode :
+	UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino))
+
+	ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
+
+	/*
+	 * Copy data to the in-core inode.
+	 */
+	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
+	inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
+	if (inode->i_nlink == 0)
+		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+
+        /*
+         * Linux now has 32-bit uid and gid, so we can support EFT.
+         */
+	inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
+	inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
+
+	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
+	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec);
+	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec);
+	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec);
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
+	inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
+
+	inode->i_version++;
+	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
+	ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
+	/*
+	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
+	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
+	*/
+	ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
+
+	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufsi->i_u1.u2_i_data[i] =
+				ufs2_inode->ui_u2.ui_addr.ui_db[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
+	}
+	ufsi->i_osync = 0;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ufs_dir_inode_operations;
+		inode->i_fop = &ufs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (!inode->i_blocks)
+			inode->i_op = &ufs_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &ufs_aops;
+		}
+	} else   /* TODO  : here ...*/
+		init_special_inode(inode, inode->i_mode,
+			ufs_get_inode_dev(sb, ufsi));
+
+	brelse(bh);
+
+	UFSD(("EXIT\n"))
+	return;
+}
+
+static int ufs_update_inode(struct inode * inode, int do_sync)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	struct ufs_inode * ufs_inode;
+	unsigned i;
+	unsigned flags;
+
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+
+	if (inode->i_ino < UFS_ROOTINO || 
+	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
+		return -1;
+	}
+
+	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
+	if (!bh) {
+		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
+		return -1;
+	}
+	ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode));
+
+	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
+	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
+
+	ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
+	ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
+		
+	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
+	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
+	ufs_inode->ui_atime.tv_usec = 0;
+	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
+	ufs_inode->ui_ctime.tv_usec = 0;
+	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
+	ufs_inode->ui_mtime.tv_usec = 0;
+	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
+	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
+	ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen);
+
+	if ((flags & UFS_UID_MASK) == UFS_UID_EFT) {
+		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
+		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
+	}
+
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
+		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
+	} else if (inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+	}
+
+	if (!inode->i_nlink)
+		memset (ufs_inode, 0, sizeof(struct ufs_inode));
+		
+	mark_buffer_dirty(bh);
+	if (do_sync)
+		sync_dirty_buffer(bh);
+	brelse (bh);
+	
+	UFSD(("EXIT\n"))
+	return 0;
+}
+
+int ufs_write_inode (struct inode * inode, int wait)
+{
+	int ret;
+	lock_kernel();
+	ret = ufs_update_inode (inode, wait);
+	unlock_kernel();
+	return ret;
+}
+
+int ufs_sync_inode (struct inode *inode)
+{
+	return ufs_update_inode (inode, 1);
+}
+
+void ufs_delete_inode (struct inode * inode)
+{
+	/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
+	lock_kernel();
+	mark_inode_dirty(inode);
+	ufs_update_inode(inode, IS_SYNC(inode));
+	inode->i_size = 0;
+	if (inode->i_blocks)
+		ufs_truncate (inode);
+	ufs_free_inode (inode);
+	unlock_kernel();
+}
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
new file mode 100644
index 0000000..2958cde
--- /dev/null
+++ b/fs/ufs/namei.c
@@ -0,0 +1,375 @@
+/*
+ * linux/fs/ufs/namei.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/namei.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include "swab.h"	/* will go away - see comment in mknod() */
+#include "util.h"
+
+/*
+#undef UFS_NAMEI_DEBUG
+*/
+#define UFS_NAMEI_DEBUG
+
+#ifdef UFS_NAMEI_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static inline void ufs_inc_count(struct inode *inode)
+{
+	inode->i_nlink++;
+	mark_inode_dirty(inode);
+}
+
+static inline void ufs_dec_count(struct inode *inode)
+{
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+}
+
+static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
+{
+	int err = ufs_add_link(dentry, inode);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	ufs_dec_count(inode);
+	iput(inode);
+	return err;
+}
+
+static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode * inode = NULL;
+	ino_t ino;
+	
+	if (dentry->d_name.len > UFS_MAXNAMLEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	lock_kernel();
+	ino = ufs_inode_by_name(dir, dentry);
+	if (ino) {
+		inode = iget(dir->i_sb, ino);
+		if (!inode) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	d_add(dentry, inode);
+	return NULL;
+}
+
+/*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate(). 
+ */
+static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
+		struct nameidata *nd)
+{
+	struct inode * inode = ufs_new_inode(dir, mode);
+	int err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+		mark_inode_dirty(inode);
+		lock_kernel();
+		err = ufs_add_nondir(dentry, inode);
+		unlock_kernel();
+	}
+	return err;
+}
+
+static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	struct inode *inode;
+	int err;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+	inode = ufs_new_inode(dir, mode);
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		init_special_inode(inode, mode, rdev);
+		/* NOTE: that'll go when we get wide dev_t */
+		ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev);
+		mark_inode_dirty(inode);
+		lock_kernel();
+		err = ufs_add_nondir(dentry, inode);
+		unlock_kernel();
+	}
+	return err;
+}
+
+static int ufs_symlink (struct inode * dir, struct dentry * dentry,
+	const char * symname)
+{
+	struct super_block * sb = dir->i_sb;
+	int err = -ENAMETOOLONG;
+	unsigned l = strlen(symname)+1;
+	struct inode * inode;
+
+	if (l > sb->s_blocksize)
+		goto out;
+
+	lock_kernel();
+	inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
+		/* slow symlink */
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+		err = page_symlink(inode, symname, l);
+		if (err)
+			goto out_fail;
+	} else {
+		/* fast symlink */
+		inode->i_op = &ufs_fast_symlink_inode_operations;
+		memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
+		inode->i_size = l-1;
+	}
+	mark_inode_dirty(inode);
+
+	err = ufs_add_nondir(dentry, inode);
+out:
+	unlock_kernel();
+	return err;
+
+out_fail:
+	ufs_dec_count(inode);
+	iput(inode);
+	goto out;
+}
+
+static int ufs_link (struct dentry * old_dentry, struct inode * dir,
+	struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	int error;
+
+	lock_kernel();
+	if (inode->i_nlink >= UFS_LINK_MAX) {
+		unlock_kernel();
+		return -EMLINK;
+	}
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	ufs_inc_count(inode);
+	atomic_inc(&inode->i_count);
+
+	error = ufs_add_nondir(dentry, inode);
+	unlock_kernel();
+	return error;
+}
+
+static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	struct inode * inode;
+	int err = -EMLINK;
+
+	if (dir->i_nlink >= UFS_LINK_MAX)
+		goto out;
+
+	lock_kernel();
+	ufs_inc_count(dir);
+
+	inode = ufs_new_inode(dir, S_IFDIR|mode);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_dir;
+
+	inode->i_op = &ufs_dir_inode_operations;
+	inode->i_fop = &ufs_dir_operations;
+
+	ufs_inc_count(inode);
+
+	err = ufs_make_empty(inode, dir);
+	if (err)
+		goto out_fail;
+
+	err = ufs_add_link(dentry, inode);
+	if (err)
+		goto out_fail;
+	unlock_kernel();
+
+	d_instantiate(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	ufs_dec_count(inode);
+	ufs_dec_count(inode);
+	iput (inode);
+out_dir:
+	ufs_dec_count(dir);
+	unlock_kernel();
+	goto out;
+}
+
+static int ufs_unlink(struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de;
+	int err = -ENOENT;
+
+	lock_kernel();
+	de = ufs_find_entry (dentry, &bh);
+	if (!de)
+		goto out;
+
+	err = ufs_delete_entry (dir, de, bh);
+	if (err)
+		goto out;
+
+	inode->i_ctime = dir->i_ctime;
+	ufs_dec_count(inode);
+	err = 0;
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	int err= -ENOTEMPTY;
+
+	lock_kernel();
+	if (ufs_empty_dir (inode)) {
+		err = ufs_unlink(dir, dentry);
+		if (!err) {
+			inode->i_size = 0;
+			ufs_dec_count(inode);
+			ufs_dec_count(dir);
+		}
+	}
+	unlock_kernel();
+	return err;
+}
+
+static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
+	struct inode * new_dir,	struct dentry * new_dentry )
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	struct buffer_head *dir_bh = NULL;
+	struct ufs_dir_entry *dir_de = NULL;
+	struct buffer_head *old_bh;
+	struct ufs_dir_entry *old_de;
+	int err = -ENOENT;
+
+	lock_kernel();
+	old_de = ufs_find_entry (old_dentry, &old_bh);
+	if (!old_de)
+		goto out;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+		err = -EIO;
+		dir_de = ufs_dotdot(old_inode, &dir_bh);
+		if (!dir_de)
+			goto out_old;
+	}
+
+	if (new_inode) {
+		struct buffer_head *new_bh;
+		struct ufs_dir_entry *new_de;
+
+		err = -ENOTEMPTY;
+		if (dir_de && !ufs_empty_dir (new_inode))
+			goto out_dir;
+		err = -ENOENT;
+		new_de = ufs_find_entry (new_dentry, &new_bh);
+		if (!new_de)
+			goto out_dir;
+		ufs_inc_count(old_inode);
+		ufs_set_link(new_dir, new_de, new_bh, old_inode);
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		if (dir_de)
+			new_inode->i_nlink--;
+		ufs_dec_count(new_inode);
+	} else {
+		if (dir_de) {
+			err = -EMLINK;
+			if (new_dir->i_nlink >= UFS_LINK_MAX)
+				goto out_dir;
+		}
+		ufs_inc_count(old_inode);
+		err = ufs_add_link(new_dentry, old_inode);
+		if (err) {
+			ufs_dec_count(old_inode);
+			goto out_dir;
+		}
+		if (dir_de)
+			ufs_inc_count(new_dir);
+	}
+
+	ufs_delete_entry (old_dir, old_de, old_bh);
+
+	ufs_dec_count(old_inode);
+
+	if (dir_de) {
+		ufs_set_link(old_inode, dir_de, dir_bh, new_dir);
+		ufs_dec_count(old_dir);
+	}
+	unlock_kernel();
+	return 0;
+
+out_dir:
+	if (dir_de)
+		brelse(dir_bh);
+out_old:
+	brelse (old_bh);
+out:
+	unlock_kernel();
+	return err;
+}
+
+struct inode_operations ufs_dir_inode_operations = {
+	.create		= ufs_create,
+	.lookup		= ufs_lookup,
+	.link		= ufs_link,
+	.unlink		= ufs_unlink,
+	.symlink	= ufs_symlink,
+	.mkdir		= ufs_mkdir,
+	.rmdir		= ufs_rmdir,
+	.mknod		= ufs_mknod,
+	.rename		= ufs_rename,
+};
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
new file mode 100644
index 0000000..f036d69
--- /dev/null
+++ b/fs/ufs/super.c
@@ -0,0 +1,1347 @@
+/*
+ *  linux/fs/ufs/super.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+/* Derived from
+ *
+ *  linux/fs/ext2/super.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+ 
+/*
+ * Inspired by
+ *
+ *  linux/fs/ufs/super.c
+ *
+ * Copyright (C) 1996
+ * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
+ * Laboratory for Computer Science Research Computing Facility
+ * Rutgers, The State University of New Jersey
+ *
+ * Copyright (C) 1996  Eddie C. Dost  (ecd@skynet.be)
+ *
+ * Kernel module support added on 96/04/26 by
+ * Stefan Reinauer <stepan@home.culture.mipt.ru>
+ *
+ * Module usage counts added on 96/04/29 by
+ * Gertjan van Wingerde <gertjan@cs.vu.nl>
+ *
+ * Clean swab support on 19970406 by
+ * Francois-Rene Rideau <fare@tunes.org>
+ *
+ * 4.4BSD (FreeBSD) support added on February 1st 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
+ * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
+ *
+ * NeXTstep support added on February 5th 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk>.
+ *
+ * write support Daniel Pirkl <daniel.pirkl@email.cz> 1998
+ * 
+ * HP/UX hfs filesystem support added by
+ * Martin K. Petersen <mkp@mkp.net>, August 1999
+ *
+ * UFS2 (of FreeBSD 5.x) support added by
+ * Niraj Kumar <niraj17@iitbombay.org>, Jan 2004
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <stdarg.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/parser.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_SUPER_DEBUG
+#undef UFS_SUPER_DEBUG_MORE
+
+
+#undef UFS_SUPER_DEBUG_MORE
+#ifdef UFS_SUPER_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+#ifdef UFS_SUPER_DEBUG_MORE
+/*
+ * Print contents of ufs_super_block, useful for debugging
+ */
+void ufs_print_super_stuff(struct super_block *sb,
+	struct ufs_super_block_first * usb1,
+	struct ufs_super_block_second * usb2, 
+	struct ufs_super_block_third * usb3)
+{
+	printk("ufs_print_super_stuff\n");
+	printk("size of usb:     %u\n", sizeof(struct ufs_super_block));
+	printk("  magic:         0x%x\n", fs32_to_cpu(sb, usb3->fs_magic));
+	printk("  sblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_sblkno));
+	printk("  cblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_cblkno));
+	printk("  iblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_iblkno));
+	printk("  dblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_dblkno));
+	printk("  cgoffset:      %u\n", fs32_to_cpu(sb, usb1->fs_cgoffset));
+	printk("  ~cgmask:       0x%x\n", ~fs32_to_cpu(sb, usb1->fs_cgmask));
+	printk("  size:          %u\n", fs32_to_cpu(sb, usb1->fs_size));
+	printk("  dsize:         %u\n", fs32_to_cpu(sb, usb1->fs_dsize));
+	printk("  ncg:           %u\n", fs32_to_cpu(sb, usb1->fs_ncg));
+	printk("  bsize:         %u\n", fs32_to_cpu(sb, usb1->fs_bsize));
+	printk("  fsize:         %u\n", fs32_to_cpu(sb, usb1->fs_fsize));
+	printk("  frag:          %u\n", fs32_to_cpu(sb, usb1->fs_frag));
+	printk("  fragshift:     %u\n", fs32_to_cpu(sb, usb1->fs_fragshift));
+	printk("  ~fmask:        %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask));
+	printk("  fshift:        %u\n", fs32_to_cpu(sb, usb1->fs_fshift));
+	printk("  sbsize:        %u\n", fs32_to_cpu(sb, usb1->fs_sbsize));
+	printk("  spc:           %u\n", fs32_to_cpu(sb, usb1->fs_spc));
+	printk("  cpg:           %u\n", fs32_to_cpu(sb, usb1->fs_cpg));
+	printk("  ipg:           %u\n", fs32_to_cpu(sb, usb1->fs_ipg));
+	printk("  fpg:           %u\n", fs32_to_cpu(sb, usb1->fs_fpg));
+	printk("  csaddr:        %u\n", fs32_to_cpu(sb, usb1->fs_csaddr));
+	printk("  cssize:        %u\n", fs32_to_cpu(sb, usb1->fs_cssize));
+	printk("  cgsize:        %u\n", fs32_to_cpu(sb, usb1->fs_cgsize));
+	printk("  fstodb:        %u\n", fs32_to_cpu(sb, usb1->fs_fsbtodb));
+	printk("  contigsumsize: %d\n", fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize));
+	printk("  postblformat:  %u\n", fs32_to_cpu(sb, usb3->fs_postblformat));
+	printk("  nrpos:         %u\n", fs32_to_cpu(sb, usb3->fs_nrpos));
+	printk("  ndir           %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir));
+	printk("  nifree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree));
+	printk("  nbfree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree));
+	printk("  nffree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree));
+	printk("\n");
+}
+
+/*
+ * Print contents of ufs2 ufs_super_block, useful for debugging
+ */
+void ufs2_print_super_stuff(
+     struct super_block *sb,
+      struct ufs_super_block *usb)
+{
+	printk("ufs_print_super_stuff\n");
+	printk("size of usb:     %u\n", sizeof(struct ufs_super_block));
+	printk("  magic:         0x%x\n", fs32_to_cpu(sb, usb->fs_magic));
+	printk("  fs_size:   %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size));
+	printk("  fs_dsize:  %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize));
+	printk("  bsize:         %u\n", fs32_to_cpu(usb, usb->fs_bsize));
+	printk("  fsize:         %u\n", fs32_to_cpu(usb, usb->fs_fsize));
+	printk("  fs_volname:  %s\n", usb->fs_u11.fs_u2.fs_volname);
+	printk("  fs_fsmnt:  %s\n", usb->fs_u11.fs_u2.fs_fsmnt);
+	printk("  fs_sblockloc: %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_sblockloc));
+	printk("  cs_ndir(No of dirs):  %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_cstotal.cs_ndir));
+	printk("  cs_nbfree(No of free blocks):  %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree));
+	printk("\n");
+}
+
+/*
+ * Print contents of ufs_cylinder_group, useful for debugging
+ */
+void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group *cg)
+{
+	printk("\nufs_print_cylinder_stuff\n");
+	printk("size of ucg: %u\n", sizeof(struct ufs_cylinder_group));
+	printk("  magic:        %x\n", fs32_to_cpu(sb, cg->cg_magic));
+	printk("  time:         %u\n", fs32_to_cpu(sb, cg->cg_time));
+	printk("  cgx:          %u\n", fs32_to_cpu(sb, cg->cg_cgx));
+	printk("  ncyl:         %u\n", fs16_to_cpu(sb, cg->cg_ncyl));
+	printk("  niblk:        %u\n", fs16_to_cpu(sb, cg->cg_niblk));
+	printk("  ndblk:        %u\n", fs32_to_cpu(sb, cg->cg_ndblk));
+	printk("  cs_ndir:      %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_ndir));
+	printk("  cs_nbfree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nbfree));
+	printk("  cs_nifree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nifree));
+	printk("  cs_nffree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nffree));
+	printk("  rotor:        %u\n", fs32_to_cpu(sb, cg->cg_rotor));
+	printk("  frotor:       %u\n", fs32_to_cpu(sb, cg->cg_frotor));
+	printk("  irotor:       %u\n", fs32_to_cpu(sb, cg->cg_irotor));
+	printk("  frsum:        %u, %u, %u, %u, %u, %u, %u, %u\n",
+	    fs32_to_cpu(sb, cg->cg_frsum[0]), fs32_to_cpu(sb, cg->cg_frsum[1]),
+	    fs32_to_cpu(sb, cg->cg_frsum[2]), fs32_to_cpu(sb, cg->cg_frsum[3]),
+	    fs32_to_cpu(sb, cg->cg_frsum[4]), fs32_to_cpu(sb, cg->cg_frsum[5]),
+	    fs32_to_cpu(sb, cg->cg_frsum[6]), fs32_to_cpu(sb, cg->cg_frsum[7]));
+	printk("  btotoff:      %u\n", fs32_to_cpu(sb, cg->cg_btotoff));
+	printk("  boff:         %u\n", fs32_to_cpu(sb, cg->cg_boff));
+	printk("  iuseoff:      %u\n", fs32_to_cpu(sb, cg->cg_iusedoff));
+	printk("  freeoff:      %u\n", fs32_to_cpu(sb, cg->cg_freeoff));
+	printk("  nextfreeoff:  %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff));
+	printk("  clustersumoff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff));
+	printk("  clusteroff    %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff));
+	printk("  nclusterblks  %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks));
+	printk("\n");
+}
+#endif /* UFS_SUPER_DEBUG_MORE */
+
+static struct super_operations ufs_super_ops;
+
+static char error_buf[1024];
+
+void ufs_error (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	va_list args;
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_clean = UFS_FSBAD;
+		ubh_mark_buffer_dirty(USPI_UBH);
+		sb->s_dirt = 1;
+		sb->s_flags |= MS_RDONLY;
+	}
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
+	case UFS_MOUNT_ONERROR_PANIC:
+		panic ("UFS-fs panic (device %s): %s: %s\n", 
+			sb->s_id, function, error_buf);
+
+	case UFS_MOUNT_ONERROR_LOCK:
+	case UFS_MOUNT_ONERROR_UMOUNT:
+	case UFS_MOUNT_ONERROR_REPAIR:
+		printk (KERN_CRIT "UFS-fs error (device %s): %s: %s\n",
+			sb->s_id, function, error_buf);
+	}		
+}
+
+void ufs_panic (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	va_list args;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_clean = UFS_FSBAD;
+		ubh_mark_buffer_dirty(USPI_UBH);
+		sb->s_dirt = 1;
+	}
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	sb->s_flags |= MS_RDONLY;
+	printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+void ufs_warning (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	va_list args;
+
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+enum {
+	Opt_type_old, Opt_type_sunx86, Opt_type_sun, Opt_type_44bsd,
+	Opt_type_ufs2, Opt_type_hp, Opt_type_nextstepcd, Opt_type_nextstep,
+	Opt_type_openstep, Opt_onerror_panic, Opt_onerror_lock,
+	Opt_onerror_umount, Opt_onerror_repair, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_type_old, "ufstype=old"},
+	{Opt_type_sunx86, "ufstype=sunx86"},
+	{Opt_type_sun, "ufstype=sun"},
+	{Opt_type_44bsd, "ufstype=44bsd"},
+	{Opt_type_ufs2, "ufstype=ufs2"},
+	{Opt_type_ufs2, "ufstype=5xbsd"},
+	{Opt_type_hp, "ufstype=hp"},
+	{Opt_type_nextstepcd, "ufstype=nextstep-cd"},
+	{Opt_type_nextstep, "ufstype=nextstep"},
+	{Opt_type_openstep, "ufstype=openstep"},
+	{Opt_onerror_panic, "onerror=panic"},
+	{Opt_onerror_lock, "onerror=lock"},
+	{Opt_onerror_umount, "onerror=umount"},
+	{Opt_onerror_repair, "onerror=repair"},
+	{Opt_err, NULL}
+};
+
+static int ufs_parse_options (char * options, unsigned * mount_options)
+{
+	char * p;
+	
+	UFSD(("ENTER\n"))
+	
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_type_old:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_OLD);
+			break;
+		case Opt_type_sunx86:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_SUNx86);
+			break;
+		case Opt_type_sun:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_SUN);
+			break;
+		case Opt_type_44bsd:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_44BSD);
+			break;
+		case Opt_type_ufs2:
+			ufs_clear_opt(*mount_options, UFSTYPE);
+			ufs_set_opt(*mount_options, UFSTYPE_UFS2);
+			break;
+		case Opt_type_hp:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_HP);
+			break;
+		case Opt_type_nextstepcd:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP_CD);
+			break;
+		case Opt_type_nextstep:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP);
+			break;
+		case Opt_type_openstep:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_OPENSTEP);
+			break;
+		case Opt_onerror_panic:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_PANIC);
+			break;
+		case Opt_onerror_lock:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_LOCK);
+			break;
+		case Opt_onerror_umount:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_UMOUNT);
+			break;
+		case Opt_onerror_repair:
+			printk("UFS-fs: Unable to do repair on error, "
+				"will lock lock instead\n");
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_REPAIR);
+			break;
+		default:
+			printk("UFS-fs: Invalid option: \"%s\" "
+					"or missing value\n", p);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * Read on-disk structures associated with cylinder groups
+ */
+static int ufs_read_cylinder_structures (struct super_block *sb) {
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block *usb;
+	struct ufs_buffer_head * ubh;
+	unsigned char * base, * space;
+	unsigned size, blks, i;
+	unsigned flags = 0;
+	
+	UFSD(("ENTER\n"))
+	
+	uspi = sbi->s_uspi;
+
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data;
+
+        flags = UFS_SB(sb)->s_flags;
+	
+	/*
+	 * Read cs structures from (usually) first data block
+	 * on the device. 
+	 */
+	size = uspi->s_cssize;
+	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	base = space = kmalloc(size, GFP_KERNEL);
+	if (!base)
+		goto failed; 
+	for (i = 0; i < blks; i += uspi->s_fpb) {
+		size = uspi->s_bsize;
+		if (i + uspi->s_fpb > blks)
+			size = (blks - i) * uspi->s_fsize;
+
+		if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+			ubh = ubh_bread(sb,
+				fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size);
+			if (!ubh)
+				goto failed;
+			ubh_ubhcpymem (space, ubh, size);
+			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
+		}
+		else {
+			ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
+			if (!ubh)
+				goto failed;
+			ubh_ubhcpymem(space, ubh, size);
+			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
+		}
+		space += size;
+		ubh_brelse (ubh);
+		ubh = NULL;
+	}
+
+	/*
+	 * Read cylinder group (we read only first fragment from block
+	 * at this time) and prepare internal data structures for cg caching.
+	 */
+	if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL)))
+		goto failed;
+	for (i = 0; i < uspi->s_ncg; i++) 
+		sbi->s_ucg[i] = NULL;
+	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
+		sbi->s_ucpi[i] = NULL;
+		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
+	}
+	for (i = 0; i < uspi->s_ncg; i++) {
+		UFSD(("read cg %u\n", i))
+		if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
+			goto failed;
+		if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
+			goto failed;
+#ifdef UFS_SUPER_DEBUG_MORE
+		ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
+#endif
+	}
+	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
+		if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL)))
+			goto failed;
+		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
+	}
+	sbi->s_cg_loaded = 0;
+	UFSD(("EXIT\n"))
+	return 1;
+
+failed:
+	if (base) kfree (base);
+	if (sbi->s_ucg) {
+		for (i = 0; i < uspi->s_ncg; i++)
+			if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]);
+		kfree (sbi->s_ucg);
+		for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
+			if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]);
+	}
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+}
+
+/*
+ * Put on-disk structures associated with cylinder groups and 
+ * write them back to disk
+ */
+static void ufs_put_cylinder_structures (struct super_block *sb) {
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * ubh;
+	unsigned char * base, * space;
+	unsigned blks, size, i;
+	
+	UFSD(("ENTER\n"))
+	
+	uspi = sbi->s_uspi;
+
+	size = uspi->s_cssize;
+	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	base = space = (char*) sbi->s_csp[0];
+	for (i = 0; i < blks; i += uspi->s_fpb) {
+		size = uspi->s_bsize;
+		if (i + uspi->s_fpb > blks)
+			size = (blks - i) * uspi->s_fsize;
+		ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
+		ubh_memcpyubh (ubh, space, size);
+		space += size;
+		ubh_mark_buffer_uptodate (ubh, 1);
+		ubh_mark_buffer_dirty (ubh);
+		ubh_brelse (ubh);
+	}
+	for (i = 0; i < sbi->s_cg_loaded; i++) {
+		ufs_put_cylinder (sb, i);
+		kfree (sbi->s_ucpi[i]);
+	}
+	for (; i < UFS_MAX_GROUP_LOADED; i++) 
+		kfree (sbi->s_ucpi[i]);
+	for (i = 0; i < uspi->s_ncg; i++) 
+		brelse (sbi->s_ucg[i]);
+	kfree (sbi->s_ucg);
+	kfree (base);
+	UFSD(("EXIT\n"))
+}
+
+static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct ufs_sb_info * sbi;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_second * usb2;
+	struct ufs_super_block_third * usb3;
+	struct ufs_super_block *usb;
+	struct ufs_buffer_head * ubh;	
+	struct inode *inode;
+	unsigned block_size, super_block_size;
+	unsigned flags;
+
+	uspi = NULL;
+	ubh = NULL;
+	flags = 0;
+	
+	UFSD(("ENTER\n"))
+		
+	sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		goto failed_nomem;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct ufs_sb_info));
+
+	UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY)))
+	
+#ifndef CONFIG_UFS_FS_WRITE
+	if (!(sb->s_flags & MS_RDONLY)) {
+		printk("ufs was compiled with read-only support, "
+		"can't be mounted as read-write\n");
+		goto failed;
+	}
+#endif
+	/*
+	 * Set default mount options
+	 * Parse mount options
+	 */
+	sbi->s_mount_opt = 0;
+	ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK);
+	if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) {
+		printk("wrong mount options\n");
+		goto failed;
+	}
+	if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) {
+		if (!silent)
+			printk("You didn't specify the type of your ufs filesystem\n\n"
+			"mount -t ufs -o ufstype="
+			"sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n"
+			">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
+			"default is ufstype=old\n");
+		ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD);
+	}
+
+	sbi->s_uspi = uspi =
+		kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL);
+	if (!uspi)
+		goto failed;
+
+	/* Keep 2Gig file limit. Some UFS variants need to override 
+	   this but as I don't know which I'll let those in the know loosen
+	   the rules */
+	   
+	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
+	case UFS_MOUNT_UFSTYPE_44BSD:
+		UFSD(("ufstype=44bsd\n"))
+		uspi->s_fsize = block_size = 512;
+		uspi->s_fmask = ~(512 - 1);
+		uspi->s_fshift = 9;
+		uspi->s_sbsize = super_block_size = 1536;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		break;
+	case UFS_MOUNT_UFSTYPE_UFS2:
+		UFSD(("ufstype=ufs2\n"))
+		uspi->s_fsize = block_size = 512;
+		uspi->s_fmask = ~(512 - 1);
+		uspi->s_fshift = 9;
+		uspi->s_sbsize = super_block_size = 1536;
+		uspi->s_sbbase =  0;
+		flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			printk(KERN_INFO "ufstype=ufs2 is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+ 		}
+		break;
+		
+	case UFS_MOUNT_UFSTYPE_SUN:
+		UFSD(("ufstype=sun\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		uspi->s_maxsymlinklen = 56;
+		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN;
+		break;
+
+	case UFS_MOUNT_UFSTYPE_SUNx86:
+		UFSD(("ufstype=sunx86\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		uspi->s_maxsymlinklen = 56;
+		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN;
+		break;
+
+	case UFS_MOUNT_UFSTYPE_OLD:
+		UFSD(("ufstype=old\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=old is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_NEXTSTEP:
+		UFSD(("ufstype=nextstep\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=nextstep is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
+		UFSD(("ufstype=nextstep-cd\n"))
+		uspi->s_fsize = block_size = 2048;
+		uspi->s_fmask = ~(2048 - 1);
+		uspi->s_fshift = 11;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_OPENSTEP:
+		UFSD(("ufstype=openstep\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=openstep is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_HP:
+		UFSD(("ufstype=hp\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=hp is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+ 		}
+ 		break;
+	default:
+		if (!silent)
+			printk("unknown ufstype\n");
+		goto failed;
+	}
+	
+again:	
+	if (!sb_set_blocksize(sb, block_size)) {
+		printk(KERN_ERR "UFS: failed to set blocksize\n");
+		goto failed;
+	}
+
+	/*
+	 * read ufs super block from device
+	 */
+	if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + SBLOCK_UFS2/block_size, super_block_size);
+	}
+	else {
+		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
+	}
+	if (!ubh) 
+            goto failed;
+
+	
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb2 = ubh_get_usb_second(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
+
+	/*
+	 * Check ufs magic number
+	 */
+	sbi->s_bytesex = BYTESEX_LE;
+	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
+		case UFS_MAGIC:
+		case UFS2_MAGIC:
+		case UFS_MAGIC_LFN:
+	        case UFS_MAGIC_FEA:
+	        case UFS_MAGIC_4GB:
+			goto magic_found;
+	}
+	sbi->s_bytesex = BYTESEX_BE;
+	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
+		case UFS_MAGIC:
+		case UFS2_MAGIC:
+		case UFS_MAGIC_LFN:
+	        case UFS_MAGIC_FEA:
+	        case UFS_MAGIC_4GB:
+			goto magic_found;
+	}
+
+	if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) 
+	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) 
+	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) 
+	  && uspi->s_sbbase < 256) {
+		ubh_brelse_uspi(uspi);
+		ubh = NULL;
+		uspi->s_sbbase += 8;
+		goto again;
+	}
+	if (!silent)
+		printk("ufs_read_super: bad magic number\n");
+	goto failed;
+
+magic_found:
+	/*
+	 * Check block and fragment sizes
+	 */
+	uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize);
+	uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize);
+	uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize);
+	uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
+	uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
+
+	if (uspi->s_fsize & (uspi->s_fsize - 1)) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n",
+			uspi->s_fsize);
+			goto failed;
+	}
+	if (uspi->s_fsize < 512) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n",
+			uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_fsize > 4096) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n",
+			uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_bsize & (uspi->s_bsize - 1)) {
+		printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n",
+			uspi->s_bsize);
+		goto failed;
+	}
+	if (uspi->s_bsize < 4096) {
+		printk(KERN_ERR "ufs_read_super: block size %u is too small\n",
+			uspi->s_bsize);
+		goto failed;
+	}
+	if (uspi->s_bsize / uspi->s_fsize > 8) {
+		printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n",
+			uspi->s_bsize / uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
+		ubh_brelse_uspi(uspi);
+		ubh = NULL;
+		block_size = uspi->s_fsize;
+		super_block_size = uspi->s_sbsize;
+		UFSD(("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size))
+		goto again;
+	}
+
+#ifdef UFS_SUPER_DEBUG_MORE
+        if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		ufs2_print_super_stuff(sb,usb);
+        else
+		ufs_print_super_stuff(sb, usb1, usb2, usb3);
+#endif
+
+	/*
+	 * Check, if file system was correctly unmounted.
+	 * If not, make it read only.
+	 */
+	if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) ||
+	  ((flags & UFS_ST_MASK) == UFS_ST_OLD) ||
+	  (((flags & UFS_ST_MASK) == UFS_ST_SUN || 
+	  (flags & UFS_ST_MASK) == UFS_ST_SUNx86) && 
+	  (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
+		switch(usb1->fs_clean) {
+		case UFS_FSCLEAN:
+			UFSD(("fs is clean\n"))
+			break;
+		case UFS_FSSTABLE:
+			UFSD(("fs is stable\n"))
+			break;
+		case UFS_FSOSF1:
+			UFSD(("fs is DEC OSF/1\n"))
+			break;
+		case UFS_FSACTIVE:
+			printk("ufs_read_super: fs is active\n");
+			sb->s_flags |= MS_RDONLY;
+			break;
+		case UFS_FSBAD:
+			printk("ufs_read_super: fs is bad\n");
+			sb->s_flags |= MS_RDONLY;
+			break;
+		default:
+			printk("ufs_read_super: can't grok fs_clean 0x%x\n", usb1->fs_clean);
+			sb->s_flags |= MS_RDONLY;
+			break;
+		}
+	}
+	else {
+		printk("ufs_read_super: fs needs fsck\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+
+	/*
+	 * Read ufs_super_block into internal data structures
+	 */
+	sb->s_op = &ufs_super_ops;
+	sb->dq_op = NULL; /***/
+	sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
+
+	uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno);
+	uspi->s_cblkno = fs32_to_cpu(sb, usb1->fs_cblkno);
+	uspi->s_iblkno = fs32_to_cpu(sb, usb1->fs_iblkno);
+	uspi->s_dblkno = fs32_to_cpu(sb, usb1->fs_dblkno);
+	uspi->s_cgoffset = fs32_to_cpu(sb, usb1->fs_cgoffset);
+	uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
+
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		uspi->s_u2_size  = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size);
+		uspi->s_u2_dsize = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
+	}
+	else {
+		uspi->s_size  =  fs32_to_cpu(sb, usb1->fs_size);
+		uspi->s_dsize =  fs32_to_cpu(sb, usb1->fs_dsize);
+	}
+
+	uspi->s_ncg = fs32_to_cpu(sb, usb1->fs_ncg);
+	/* s_bsize already set */
+	/* s_fsize already set */
+	uspi->s_fpb = fs32_to_cpu(sb, usb1->fs_frag);
+	uspi->s_minfree = fs32_to_cpu(sb, usb1->fs_minfree);
+	uspi->s_bmask = fs32_to_cpu(sb, usb1->fs_bmask);
+	uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
+	uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift);
+	uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
+	UFSD(("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift,
+		uspi->s_fshift));
+	uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift);
+	uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb);
+	/* s_sbsize already set */
+	uspi->s_csmask = fs32_to_cpu(sb, usb1->fs_csmask);
+	uspi->s_csshift = fs32_to_cpu(sb, usb1->fs_csshift);
+	uspi->s_nindir = fs32_to_cpu(sb, usb1->fs_nindir);
+	uspi->s_inopb = fs32_to_cpu(sb, usb1->fs_inopb);
+	uspi->s_nspf = fs32_to_cpu(sb, usb1->fs_nspf);
+	uspi->s_npsect = ufs_get_fs_npsect(sb, usb1, usb3);
+	uspi->s_interleave = fs32_to_cpu(sb, usb1->fs_interleave);
+	uspi->s_trackskew = fs32_to_cpu(sb, usb1->fs_trackskew);
+	uspi->s_csaddr = fs32_to_cpu(sb, usb1->fs_csaddr);
+	uspi->s_cssize = fs32_to_cpu(sb, usb1->fs_cssize);
+	uspi->s_cgsize = fs32_to_cpu(sb, usb1->fs_cgsize);
+	uspi->s_ntrak = fs32_to_cpu(sb, usb1->fs_ntrak);
+	uspi->s_nsect = fs32_to_cpu(sb, usb1->fs_nsect);
+	uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc);
+	uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg);
+	uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg);
+	uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_cpc);
+	uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize);
+	uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3);
+	uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3);
+	uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat);
+	uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos);
+	uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
+	uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
+
+	/*
+	 * Compute another frequently used values
+	 */
+	uspi->s_fpbmask = uspi->s_fpb - 1;
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		uspi->s_apbshift = uspi->s_bshift - 3;
+	}
+	else {
+		uspi->s_apbshift = uspi->s_bshift - 2;
+	}
+	uspi->s_2apbshift = uspi->s_apbshift * 2;
+	uspi->s_3apbshift = uspi->s_apbshift * 3;
+	uspi->s_apb = 1 << uspi->s_apbshift;
+	uspi->s_2apb = 1 << uspi->s_2apbshift;
+	uspi->s_3apb = 1 << uspi->s_3apbshift;
+	uspi->s_apbmask = uspi->s_apb - 1;
+	uspi->s_nspfshift = uspi->s_fshift - UFS_SECTOR_BITS;
+	uspi->s_nspb = uspi->s_nspf << uspi->s_fpbshift;
+	uspi->s_inopf = uspi->s_inopb >> uspi->s_fpbshift;
+	uspi->s_bpf = uspi->s_fsize << 3;
+	uspi->s_bpfshift = uspi->s_fshift + 3;
+	uspi->s_bpfmask = uspi->s_bpf - 1;
+	if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) ==
+	    UFS_MOUNT_UFSTYPE_44BSD)
+		uspi->s_maxsymlinklen =
+		    fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen);
+	
+	sbi->s_flags = flags;
+
+	inode = iget(sb, UFS_ROOTINO);
+	if (!inode || is_bad_inode(inode))
+		goto failed;
+	sb->s_root = d_alloc_root(inode);
+	if (!sb->s_root)
+		goto dalloc_failed;
+
+
+	/*
+	 * Read cylinder group structures
+	 */
+	if (!(sb->s_flags & MS_RDONLY))
+		if (!ufs_read_cylinder_structures(sb))
+			goto failed;
+
+	UFSD(("EXIT\n"))
+	return 0;
+
+dalloc_failed:
+	iput(inode);
+failed:
+	if (ubh) ubh_brelse_uspi (uspi);
+	if (uspi) kfree (uspi);
+	if (sbi) kfree(sbi);
+	sb->s_fs_info = NULL;
+	UFSD(("EXIT (FAILED)\n"))
+	return -EINVAL;
+
+failed_nomem:
+	UFSD(("EXIT (NOMEM)\n"))
+	return -ENOMEM;
+}
+
+static void ufs_write_super (struct super_block *sb) {
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_third * usb3;
+	unsigned flags;
+
+	lock_kernel();
+
+	UFSD(("ENTER\n"))
+	flags = UFS_SB(sb)->s_flags;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+		if ((flags & UFS_ST_MASK) == UFS_ST_SUN 
+		  || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+			ufs_set_fs_state(sb, usb1, usb3,
+					UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
+		ubh_mark_buffer_dirty (USPI_UBH);
+	}
+	sb->s_dirt = 0;
+	UFSD(("EXIT\n"))
+	unlock_kernel();
+}
+
+static void ufs_put_super (struct super_block *sb)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+		
+	UFSD(("ENTER\n"))
+
+	if (!(sb->s_flags & MS_RDONLY))
+		ufs_put_cylinder_structures (sb);
+	
+	ubh_brelse_uspi (sbi->s_uspi);
+	kfree (sbi->s_uspi);
+	kfree (sbi);
+	sb->s_fs_info = NULL;
+	return;
+}
+
+
+static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_third * usb3;
+	unsigned new_mount_opt, ufstype;
+	unsigned flags;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+	
+	/*
+	 * Allow the "check" option to be passed as a remount option.
+	 * It is not possible to change ufstype option during remount
+	 */
+	ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE;
+	new_mount_opt = 0;
+	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
+	if (!ufs_parse_options (data, &new_mount_opt))
+		return -EINVAL;
+	if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
+		new_mount_opt |= ufstype;
+	}
+	else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
+		printk("ufstype can't be changed during remount\n");
+		return -EINVAL;
+	}
+
+	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
+		UFS_SB(sb)->s_mount_opt = new_mount_opt;
+		return 0;
+	}
+	
+	/*
+	 * fs was mouted as rw, remounting ro
+	 */
+	if (*mount_flags & MS_RDONLY) {
+		ufs_put_cylinder_structures(sb);
+		usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+		if ((flags & UFS_ST_MASK) == UFS_ST_SUN
+		  || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) 
+			ufs_set_fs_state(sb, usb1, usb3,
+				UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
+		ubh_mark_buffer_dirty (USPI_UBH);
+		sb->s_dirt = 0;
+		sb->s_flags |= MS_RDONLY;
+	}
+	/*
+	 * fs was mounted as ro, remounting rw
+	 */
+	else {
+#ifndef CONFIG_UFS_FS_WRITE
+		printk("ufs was compiled with read-only support, "
+		"can't be mounted as read-write\n");
+		return -EINVAL;
+#else
+		if (ufstype != UFS_MOUNT_UFSTYPE_SUN && 
+		    ufstype != UFS_MOUNT_UFSTYPE_44BSD &&
+		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86) {
+			printk("this ufstype is read-only supported\n");
+			return -EINVAL;
+		}
+		if (!ufs_read_cylinder_structures (sb)) {
+			printk("failed during remounting\n");
+			return -EPERM;
+		}
+		sb->s_flags &= ~MS_RDONLY;
+#endif
+	}
+	UFS_SB(sb)->s_mount_opt = new_mount_opt;
+	return 0;
+}
+
+static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block * usb;
+	unsigned  flags = 0;
+
+	lock_kernel();
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
+	
+	flags = UFS_SB(sb)->s_flags;
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		buf->f_type = UFS2_MAGIC;
+		buf->f_blocks = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
+		buf->f_bfree = ufs_blkstofrags(fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree)) +
+			fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nffree);
+		buf->f_ffree = fs64_to_cpu(sb,
+        		usb->fs_u11.fs_u2.fs_cstotal.cs_nifree);
+	}
+	else {
+		buf->f_type = UFS_MAGIC;
+		buf->f_blocks = uspi->s_dsize;
+		buf->f_bfree = ufs_blkstofrags(fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)) +
+			fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree);
+		buf->f_ffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree);
+	}
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
+		? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
+	buf->f_files = uspi->s_ncg * uspi->s_ipg;
+	buf->f_namelen = UFS_MAXNAMLEN;
+
+	unlock_kernel();
+
+	return 0;
+}
+
+static kmem_cache_t * ufs_inode_cachep;
+
+static struct inode *ufs_alloc_inode(struct super_block *sb)
+{
+	struct ufs_inode_info *ei;
+	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void ufs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
+					     sizeof(struct ufs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (ufs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(ufs_inode_cachep))
+		printk(KERN_INFO "ufs_inode_cache: not all structures were freed\n");
+}
+
+#ifdef CONFIG_QUOTA
+static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t);
+static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
+#endif
+
+static struct super_operations ufs_super_ops = {
+	.alloc_inode	= ufs_alloc_inode,
+	.destroy_inode	= ufs_destroy_inode,
+	.read_inode	= ufs_read_inode,
+	.write_inode	= ufs_write_inode,
+	.delete_inode	= ufs_delete_inode,
+	.put_super	= ufs_put_super,
+	.write_super	= ufs_write_super,
+	.statfs		= ufs_statfs,
+	.remount_fs	= ufs_remount,
+#ifdef CONFIG_QUOTA
+	.quota_read	= ufs_quota_read,
+	.quota_write	= ufs_quota_write,
+#endif
+};
+
+#ifdef CONFIG_QUOTA
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t ufs_quota_read(struct super_block *sb, int type, char *data,
+			       size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> sb->s_blocksize_bits;
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t toread;
+	struct buffer_head *bh;
+	loff_t i_size = i_size_read(inode);
+
+	if (off > i_size)
+		return 0;
+	if (off+len > i_size)
+		len = i_size-off;
+	toread = len;
+	while (toread > 0) {
+		tocopy = sb->s_blocksize - offset < toread ?
+				sb->s_blocksize - offset : toread;
+
+		bh = ufs_bread(inode, blk, 0, &err);
+		if (err)
+			return err;
+		if (!bh)	/* A hole? */
+			memset(data, 0, tocopy);
+		else {
+			memcpy(data, bh->b_data+offset, tocopy);
+			brelse(bh);
+		}
+		offset = 0;
+		toread -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+	return len;
+}
+
+/* Write to quotafile */
+static ssize_t ufs_quota_write(struct super_block *sb, int type,
+				const char *data, size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> sb->s_blocksize_bits;
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t towrite = len;
+	struct buffer_head *bh;
+
+	down(&inode->i_sem);
+	while (towrite > 0) {
+		tocopy = sb->s_blocksize - offset < towrite ?
+				sb->s_blocksize - offset : towrite;
+
+		bh = ufs_bread(inode, blk, 1, &err);
+		if (!bh)
+			goto out;
+		lock_buffer(bh);
+		memcpy(bh->b_data+offset, data, tocopy);
+		flush_dcache_page(bh->b_page);
+		set_buffer_uptodate(bh);
+		mark_buffer_dirty(bh);
+		unlock_buffer(bh);
+		brelse(bh);
+		offset = 0;
+		towrite -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+out:
+	if (len == towrite)
+		return err;
+	if (inode->i_size < off+len-towrite)
+		i_size_write(inode, off+len-towrite);
+	inode->i_version++;
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	up(&inode->i_sem);
+	return len - towrite;
+}
+
+#endif
+
+static struct super_block *ufs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super);
+}
+
+static struct file_system_type ufs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ufs",
+	.get_sb		= ufs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_ufs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&ufs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_ufs_fs(void)
+{
+	unregister_filesystem(&ufs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_ufs_fs)
+module_exit(exit_ufs_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
new file mode 100644
index 0000000..1683d2b
--- /dev/null
+++ b/fs/ufs/swab.h
@@ -0,0 +1,133 @@
+/*
+ *  linux/fs/ufs/swab.h
+ *
+ * Copyright (C) 1997, 1998 Francois-Rene Rideau <fare@tunes.org>
+ * Copyright (C) 1998 Jakub Jelinek <jj@ultra.linux.cz>
+ * Copyright (C) 2001 Christoph Hellwig <hch@infradead.org>
+ */
+
+#ifndef _UFS_SWAB_H
+#define _UFS_SWAB_H
+
+/*
+ * Notes:
+ *    HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes
+ *    in case there are ufs implementations that have strange bytesexes,
+ *    you'll need to modify code here as well as in ufs_super.c and ufs_fs.h
+ *    to support them.
+ */
+
+enum {
+	BYTESEX_LE,
+	BYTESEX_BE
+};
+
+static inline u64
+fs64_to_cpu(struct super_block *sbp, __fs64 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le64_to_cpu((__force __le64)n);
+	else
+		return be64_to_cpu((__force __be64)n);
+}
+
+static inline __fs64
+cpu_to_fs64(struct super_block *sbp, u64 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs64)cpu_to_le64(n);
+	else
+		return (__force __fs64)cpu_to_be64(n);
+}
+
+static __inline u32
+fs64_add(struct super_block *sbp, u32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return *n = cpu_to_le64(le64_to_cpu(*n)+d);
+	else
+		return *n = cpu_to_be64(be64_to_cpu(*n)+d);
+}
+
+static __inline u32
+fs64_sub(struct super_block *sbp, u32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return *n = cpu_to_le64(le64_to_cpu(*n)-d);
+	else
+		return *n = cpu_to_be64(be64_to_cpu(*n)-d);
+}
+
+static __inline u32
+fs32_to_cpu(struct super_block *sbp, __fs32 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le32_to_cpu((__force __le32)n);
+	else
+		return be32_to_cpu((__force __be32)n);
+}
+
+static inline __fs32
+cpu_to_fs32(struct super_block *sbp, u32 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs32)cpu_to_le32(n);
+	else
+		return (__force __fs32)cpu_to_be32(n);
+}
+
+static inline void
+fs32_add(struct super_block *sbp, __fs32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
+	else
+		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
+}
+
+static inline void
+fs32_sub(struct super_block *sbp, __fs32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
+	else
+		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
+}
+
+static inline u16
+fs16_to_cpu(struct super_block *sbp, __fs16 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le16_to_cpu((__force __le16)n);
+	else
+		return be16_to_cpu((__force __be16)n);
+}
+
+static inline __fs16
+cpu_to_fs16(struct super_block *sbp, u16 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs16)cpu_to_le16(n);
+	else
+		return (__force __fs16)cpu_to_be16(n);
+}
+
+static inline void
+fs16_add(struct super_block *sbp, __fs16 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+	else
+		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+}
+
+static inline void
+fs16_sub(struct super_block *sbp, __fs16 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
+	else
+		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
+}
+
+#endif /* _UFS_SWAB_H */
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
new file mode 100644
index 0000000..a0e4914
--- /dev/null
+++ b/fs/ufs/symlink.c
@@ -0,0 +1,42 @@
+/*
+ *  linux/fs/ufs/symlink.c
+ *
+ * Only fast symlinks left here - the rest is done by generic code. AV, 1999
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@emai.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/symlink.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/symlink.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 symlink handling code
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/ufs_fs.h>
+
+static int ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct ufs_inode_info *p = UFS_I(dentry->d_inode);
+	nd_set_link(nd, (char*)p->i_u1.i_symlink);
+	return 0;
+}
+
+struct inode_operations ufs_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= ufs_follow_link,
+};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
new file mode 100644
index 0000000..e312bf8
--- /dev/null
+++ b/fs/ufs/truncate.c
@@ -0,0 +1,477 @@
+/*
+ *  linux/fs/ufs/truncate.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/truncate.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/truncate.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+/*
+ * Real random numbers for secure rm added 94/02/18
+ * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/fcntl.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_TRUNCATE_DEBUG
+
+#ifdef UFS_TRUNCATE_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+ 
+/*
+ * Secure deletion currently doesn't work. It interacts very badly
+ * with buffers shared with memory mappings, and for that reason
+ * can't be done in the truncate() routines. It should instead be
+ * done separately in "release()" before calling the truncate routines
+ * that will release the actual file blocks.
+ *
+ *		Linus
+ */
+
+#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
+#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
+
+#define DATA_BUFFER_USED(bh) \
+	(atomic_read(&bh->b_count)>1 || buffer_locked(bh))
+
+static int ufs_trunc_direct (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	__fs32 * p;
+	unsigned frag1, frag2, frag3, frag4, block1, block2;
+	unsigned frag_to_free, free_count;
+	unsigned i, j, tmp;
+	int retry;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	
+	frag_to_free = 0;
+	free_count = 0;
+	retry = 0;
+	
+	frag1 = DIRECT_FRAGMENT;
+	frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
+	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
+	frag3 = frag4 & ~uspi->s_fpbmask;
+	block1 = block2 = 0;
+	if (frag2 > frag3) {
+		frag2 = frag4;
+		frag3 = frag4 = 0;
+	}
+	else if (frag2 < frag3) {
+		block1 = ufs_fragstoblks (frag2);
+		block2 = ufs_fragstoblks (frag3);
+	}
+
+	UFSD(("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4))
+
+	if (frag1 >= frag2)
+		goto next1;		
+
+	/*
+	 * Free first free fragments
+	 */
+	p = ufsi->i_u1.i_data + ufs_fragstoblks (frag1);
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp )
+		ufs_panic (sb, "ufs_trunc_direct", "internal error");
+	frag1 = ufs_fragnum (frag1);
+	frag2 = ufs_fragnum (frag2);
+	for (j = frag1; j < frag2; j++) {
+		bh = sb_find_get_block (sb, tmp + j);
+		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+			retry = 1;
+			brelse (bh);
+			goto next1;
+		}
+		bforget (bh);
+	}
+	inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
+	mark_inode_dirty(inode);
+	ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
+	frag_to_free = tmp + frag1;
+
+next1:
+	/*
+	 * Free whole blocks
+	 */
+	for (i = block1 ; i < block2; i++) {
+		p = ufsi->i_u1.i_data + i;
+		tmp = fs32_to_cpu(sb, *p);
+		if (!tmp)
+			continue;
+		for (j = 0; j < uspi->s_fpb; j++) {
+			bh = sb_find_get_block(sb, tmp + j);
+			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+				retry = 1;
+				brelse (bh);
+				goto next2;
+			}
+			bforget (bh);
+		}
+		*p = 0;
+		inode->i_blocks -= uspi->s_nspb;
+		mark_inode_dirty(inode);
+		if (free_count == 0) {
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		} else if (free_count > 0 && frag_to_free == tmp - free_count)
+			free_count += uspi->s_fpb;
+		else {
+			ufs_free_blocks (inode, frag_to_free, free_count);
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		}
+next2:;
+	}
+	
+	if (free_count > 0)
+		ufs_free_blocks (inode, frag_to_free, free_count);
+
+	if (frag3 >= frag4)
+		goto next3;
+
+	/*
+	 * Free last free fragments
+	 */
+	p = ufsi->i_u1.i_data + ufs_fragstoblks (frag3);
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp )
+		ufs_panic(sb, "ufs_truncate_direct", "internal error");
+	frag4 = ufs_fragnum (frag4);
+	for (j = 0; j < frag4; j++) {
+		bh = sb_find_get_block (sb, tmp + j);
+		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+			retry = 1;
+			brelse (bh);
+			goto next1;
+		}
+		bforget (bh);
+	}
+	*p = 0;
+	inode->i_blocks -= frag4 << uspi->s_nspfshift;
+	mark_inode_dirty(inode);
+	ufs_free_fragments (inode, tmp, frag4);
+ next3:
+
+	UFSD(("EXIT\n"))
+	return retry;
+}
+
+
+static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * ind_ubh;
+	struct buffer_head * bh;
+	__fs32 * ind;
+	unsigned indirect_block, i, j, tmp;
+	unsigned frag_to_free, free_count;
+	int retry;
+
+	UFSD(("ENTER\n"))
+		
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	frag_to_free = 0;
+	free_count = 0;
+	retry = 0;
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp)
+		return 0;
+	ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (ind_ubh);
+		return 1;
+	}
+	if (!ind_ubh) {
+		*p = 0;
+		return 0;
+	}
+
+	indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
+	for (i = indirect_block; i < uspi->s_apb; i++) {
+		ind = ubh_get_addr32 (ind_ubh, i);
+		tmp = fs32_to_cpu(sb, *ind);
+		if (!tmp)
+			continue;
+		for (j = 0; j < uspi->s_fpb; j++) {
+			bh = sb_find_get_block(sb, tmp + j);
+			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
+				retry = 1;
+				brelse (bh);
+				goto next;
+			}
+			bforget (bh);
+		}	
+		*ind = 0;
+		ubh_mark_buffer_dirty(ind_ubh);
+		if (free_count == 0) {
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		} else if (free_count > 0 && frag_to_free == tmp - free_count)
+			free_count += uspi->s_fpb;
+		else {
+			ufs_free_blocks (inode, frag_to_free, free_count);
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		}
+		inode->i_blocks -= uspi->s_nspb;
+		mark_inode_dirty(inode);
+next:;
+	}
+
+	if (free_count > 0) {
+		ufs_free_blocks (inode, frag_to_free, free_count);
+	}
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32(ind_ubh,i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(ind_ubh) != 1) {
+			retry = 1;
+		}
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(ind_ubh);
+			ind_ubh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
+		ubh_wait_on_buffer (ind_ubh);
+		ubh_ll_rw_block (WRITE, 1, &ind_ubh);
+		ubh_wait_on_buffer (ind_ubh);
+	}
+	ubh_brelse (ind_ubh);
+	
+	UFSD(("EXIT\n"))
+	
+	return retry;
+}
+
+static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * dind_bh;
+	unsigned i, tmp, dindirect_block;
+	__fs32 * dind;
+	int retry = 0;
+	
+	UFSD(("ENTER\n"))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	dindirect_block = (DIRECT_BLOCK > offset) 
+		? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
+	retry = 0;
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp)
+		return 0;
+	dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (dind_bh);
+		return 1;
+	}
+	if (!dind_bh) {
+		*p = 0;
+		return 0;
+	}
+
+	for (i = dindirect_block ; i < uspi->s_apb ; i++) {
+		dind = ubh_get_addr32 (dind_bh, i);
+		tmp = fs32_to_cpu(sb, *dind);
+		if (!tmp)
+			continue;
+		retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
+		ubh_mark_buffer_dirty(dind_bh);
+	}
+
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32 (dind_bh, i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(dind_bh) != 1)
+			retry = 1;
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(dind_bh);
+			dind_bh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
+		ubh_wait_on_buffer (dind_bh);
+		ubh_ll_rw_block (WRITE, 1, &dind_bh);
+		ubh_wait_on_buffer (dind_bh);
+	}
+	ubh_brelse (dind_bh);
+	
+	UFSD(("EXIT\n"))
+	
+	return retry;
+}
+
+static int ufs_trunc_tindirect (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * tind_bh;
+	unsigned tindirect_block, tmp, i;
+	__fs32 * tind, * p;
+	int retry;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	retry = 0;
+	
+	tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
+		? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
+	p = ufsi->i_u1.i_data + UFS_TIND_BLOCK;
+	if (!(tmp = fs32_to_cpu(sb, *p)))
+		return 0;
+	tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (tind_bh);
+		return 1;
+	}
+	if (!tind_bh) {
+		*p = 0;
+		return 0;
+	}
+
+	for (i = tindirect_block ; i < uspi->s_apb ; i++) {
+		tind = ubh_get_addr32 (tind_bh, i);
+		retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + 
+			uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
+		ubh_mark_buffer_dirty(tind_bh);
+	}
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32 (tind_bh, i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(tind_bh) != 1)
+			retry = 1;
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(tind_bh);
+			tind_bh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
+		ubh_wait_on_buffer (tind_bh);
+		ubh_ll_rw_block (WRITE, 1, &tind_bh);
+		ubh_wait_on_buffer (tind_bh);
+	}
+	ubh_brelse (tind_bh);
+	
+	UFSD(("EXIT\n"))
+	return retry;
+}
+		
+void ufs_truncate (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	unsigned offset;
+	int err, retry;
+	
+	UFSD(("ENTER\n"))
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+	lock_kernel();
+	while (1) {
+		retry = ufs_trunc_direct(inode);
+		retry |= ufs_trunc_indirect (inode, UFS_IND_BLOCK,
+			(__fs32 *) &ufsi->i_u1.i_data[UFS_IND_BLOCK]);
+		retry |= ufs_trunc_dindirect (inode, UFS_IND_BLOCK + uspi->s_apb,
+			(__fs32 *) &ufsi->i_u1.i_data[UFS_DIND_BLOCK]);
+		retry |= ufs_trunc_tindirect (inode);
+		if (!retry)
+			break;
+		if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
+			ufs_sync_inode (inode);
+		blk_run_address_space(inode->i_mapping);
+		yield();
+	}
+	offset = inode->i_size & uspi->s_fshift;
+	if (offset) {
+		bh = ufs_bread (inode, inode->i_size >> uspi->s_fshift, 0, &err);
+		if (bh) {
+			memset (bh->b_data + offset, 0, uspi->s_fsize - offset);
+			mark_buffer_dirty (bh);
+			brelse (bh);
+		}
+	}
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	ufsi->i_lastfrag = DIRECT_FRAGMENT;
+	unlock_kernel();
+	mark_inode_dirty(inode);
+	UFSD(("EXIT\n"))
+}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
new file mode 100644
index 0000000..59acc8f
--- /dev/null
+++ b/fs/ufs/util.c
@@ -0,0 +1,257 @@
+/*
+ *  linux/fs/ufs/util.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+ 
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ufs_fs.h>
+#include <linux/buffer_head.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_UTILS_DEBUG
+
+#ifdef UFS_UTILS_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+
+struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
+	struct super_block *sb, u64 fragment, u64 size)
+{
+	struct ufs_buffer_head * ubh;
+	unsigned i, j ;
+	u64  count = 0;
+	if (size & ~uspi->s_fmask)
+		return NULL;
+	count = size >> uspi->s_fshift;
+	if (count > UFS_MAXFRAG)
+		return NULL;
+	ubh = (struct ufs_buffer_head *)
+		kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
+	if (!ubh)
+		return NULL;
+	ubh->fragment = fragment;
+	ubh->count = count;
+	for (i = 0; i < count; i++)
+		if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
+			goto failed;
+	for (; i < UFS_MAXFRAG; i++)
+		ubh->bh[i] = NULL;
+	return ubh;
+failed:
+	for (j = 0; j < i; j++)
+		brelse (ubh->bh[j]);
+	kfree(ubh);
+	return NULL;
+}
+
+struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
+	struct super_block *sb, u64 fragment, u64 size)
+{
+	unsigned i, j;
+	u64 count = 0;
+	if (size & ~uspi->s_fmask)
+		return NULL;
+	count = size >> uspi->s_fshift;
+	if (count <= 0 || count > UFS_MAXFRAG)
+		return NULL;
+	USPI_UBH->fragment = fragment;
+	USPI_UBH->count = count;
+	for (i = 0; i < count; i++)
+		if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
+			goto failed;
+	for (; i < UFS_MAXFRAG; i++)
+		USPI_UBH->bh[i] = NULL;
+	return USPI_UBH;
+failed:
+	for (j = 0; j < i; j++)
+		brelse (USPI_UBH->bh[j]);
+	return NULL;
+}
+
+void ubh_brelse (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for (i = 0; i < ubh->count; i++)
+		brelse (ubh->bh[i]);
+	kfree (ubh);
+}
+
+void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
+{
+	unsigned i;
+	if (!USPI_UBH)
+		return;
+	for ( i = 0; i < USPI_UBH->count; i++ ) {
+		brelse (USPI_UBH->bh[i]);
+		USPI_UBH->bh[i] = NULL;
+	}
+}
+
+void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < ubh->count; i++ )
+		mark_buffer_dirty (ubh->bh[i]);
+}
+
+void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	if (flag) {
+		for ( i = 0; i < ubh->count; i++ )
+			set_buffer_uptodate (ubh->bh[i]);
+	} else {
+		for ( i = 0; i < ubh->count; i++ )
+			clear_buffer_uptodate (ubh->bh[i]);
+	}
+}
+
+void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[])
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < nr; i++ )
+		ll_rw_block (rw, ubh[i]->count, ubh[i]->bh);
+}
+
+void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < ubh->count; i++ )
+		wait_on_buffer (ubh->bh[i]);
+}
+
+unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	unsigned max = 0;
+	if (!ubh)
+		return 0;
+	for ( i = 0; i < ubh->count; i++ ) 
+		if ( atomic_read(&ubh->bh[i]->b_count) > max )
+			max = atomic_read(&ubh->bh[i]->b_count);
+	return max;
+}
+
+void ubh_bforget (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh) 
+		return;
+	for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] ) 
+		bforget (ubh->bh[i]);
+}
+ 
+int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	unsigned result = 0;
+	if (!ubh)
+		return 0;
+	for ( i = 0; i < ubh->count; i++ )
+		result |= buffer_dirty(ubh->bh[i]);
+	return result;
+}
+
+void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi, 
+	unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
+{
+	unsigned len, bhno;
+	if (size > (ubh->count << uspi->s_fshift))
+		size = ubh->count << uspi->s_fshift;
+	bhno = 0;
+	while (size) {
+		len = min_t(unsigned int, size, uspi->s_fsize);
+		memcpy (mem, ubh->bh[bhno]->b_data, len);
+		mem += uspi->s_fsize;
+		size -= len;
+		bhno++;
+	}
+}
+
+void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi, 
+	struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
+{
+	unsigned len, bhno;
+	if (size > (ubh->count << uspi->s_fshift))
+		size = ubh->count << uspi->s_fshift;
+	bhno = 0;
+	while (size) {
+		len = min_t(unsigned int, size, uspi->s_fsize);
+		memcpy (ubh->bh[bhno]->b_data, mem, len);
+		mem += uspi->s_fsize;
+		size -= len;
+		bhno++;
+	}
+}
+
+dev_t
+ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
+{
+	__fs32 fs32;
+	dev_t dev;
+
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		fs32 = ufsi->i_u1.i_data[1];
+	else
+		fs32 = ufsi->i_u1.i_data[0];
+	fs32 = fs32_to_cpu(sb, fs32);
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUNx86:
+	case UFS_ST_SUN:
+		if ((fs32 & 0xffff0000) == 0 ||
+		    (fs32 & 0xffff0000) == 0xffff0000)
+			dev = old_decode_dev(fs32 & 0x7fff);
+		else
+			dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
+		break;
+
+	default:
+		dev = old_decode_dev(fs32);
+		break;
+	}
+	return dev;
+}
+
+void
+ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
+{
+	__fs32 fs32;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUNx86:
+	case UFS_ST_SUN:
+		fs32 = sysv_encode_dev(dev);
+		if ((fs32 & 0xffff8000) == 0) {
+			fs32 = old_encode_dev(dev);
+		}
+		break;
+
+	default:
+		fs32 = old_encode_dev(dev);
+		break;
+	}
+	fs32 = cpu_to_fs32(sb, fs32);
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		ufsi->i_u1.i_data[1] = fs32;
+	else
+		ufsi->i_u1.i_data[0] = fs32;
+}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
new file mode 100644
index 0000000..b264007
--- /dev/null
+++ b/fs/ufs/util.h
@@ -0,0 +1,526 @@
+/*
+ *  linux/fs/ufs/util.h
+ *
+ * Copyright (C) 1998 
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include "swab.h"
+
+
+/*
+ * some useful macros
+ */
+#define in_range(b,first,len)	((b)>=(first)&&(b)<(first)+(len))
+
+/*
+ * macros used for retyping
+ */
+#define UCPI_UBH ((struct ufs_buffer_head *)ucpi)
+#define USPI_UBH ((struct ufs_buffer_head *)uspi)
+
+
+
+/*
+ * macros used for accessing structures
+ */
+static inline s32
+ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
+		 struct ufs_super_block_third *usb3)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state);
+	case UFS_ST_SUNx86:
+		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
+	case UFS_ST_44BSD:
+	default:
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_state);
+	}
+}
+
+static inline void
+ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
+		 struct ufs_super_block_third *usb3, s32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value);
+		break;
+	case UFS_ST_SUNx86:
+		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
+		break;
+	case UFS_ST_44BSD:
+		usb3->fs_u2.fs_44.fs_state = cpu_to_fs32(sb, value);
+		break;
+	}
+}
+
+static inline u32
+ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
+		  struct ufs_super_block_third *usb3)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect);
+	else
+		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
+}
+
+static inline u64
+ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
+{
+	__fs64 tmp;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1];
+		break;
+	case UFS_ST_SUNx86:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qbmask[1];
+		break;
+	case UFS_ST_44BSD:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qbmask[1];
+		break;
+	}
+
+	return fs64_to_cpu(sb, tmp);
+}
+
+static inline u64
+ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
+{
+	__fs64 tmp;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1];
+		break;
+	case UFS_ST_SUNx86:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qfmask[1];
+		break;
+	case UFS_ST_44BSD:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qfmask[1];
+		break;
+	}
+
+	return fs64_to_cpu(sb, tmp);
+}
+
+static inline u16
+ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
+		return fs16_to_cpu(sb, de->d_u.d_namlen);
+	else
+		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
+}
+
+static inline void
+ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
+		de->d_u.d_namlen = cpu_to_fs16(sb, value);
+	else
+		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
+}
+
+static inline void
+ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
+		return;
+
+	/*
+	 * TODO turn this into a table lookup
+	 */
+	switch (mode & S_IFMT) {
+	case S_IFSOCK:
+		de->d_u.d_44.d_type = DT_SOCK;
+		break;
+	case S_IFLNK:
+		de->d_u.d_44.d_type = DT_LNK;
+		break;
+	case S_IFREG:
+		de->d_u.d_44.d_type = DT_REG;
+		break;
+	case S_IFBLK:
+		de->d_u.d_44.d_type = DT_BLK;
+		break;
+	case S_IFDIR:
+		de->d_u.d_44.d_type = DT_DIR;
+		break;
+	case S_IFCHR:
+		de->d_u.d_44.d_type = DT_CHR;
+		break;
+	case S_IFIFO:
+		de->d_u.d_44.d_type = DT_FIFO;
+		break;
+	default:
+		de->d_u.d_44.d_type = DT_UNKNOWN;
+	}
+}
+
+static inline u32
+ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
+	case UFS_UID_44BSD:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
+	default:
+		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
+	}
+}
+
+static inline void
+ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
+		break;
+	case UFS_UID_44BSD:
+		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
+		break;
+	}
+	inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); 
+}
+
+static inline u32
+ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
+	case UFS_UID_44BSD:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
+	default:
+		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
+	}
+}
+
+static inline void
+ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
+		break;
+	case UFS_UID_44BSD:
+		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
+		break;
+	}
+	inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
+}
+
+extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
+extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
+
+/*
+ * These functions manipulate ufs buffers
+ */
+#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)  
+extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
+extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
+extern void ubh_brelse (struct ufs_buffer_head *);
+extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
+extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
+extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
+extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **);
+extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
+extern unsigned ubh_max_bcount (struct ufs_buffer_head *);
+extern void ubh_bforget (struct ufs_buffer_head *);
+extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
+#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
+extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
+#define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
+extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
+
+
+
+/*
+ * macros to get important structures from ufs_buffer_head
+ */
+#define ubh_get_usb_first(ubh) \
+	((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
+
+#define ubh_get_usb_second(ubh) \
+	((struct ufs_super_block_second *)(ubh)-> \
+	bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
+
+#define ubh_get_usb_third(ubh) \
+	((struct ufs_super_block_third *)((ubh)-> \
+	bh[UFS_SECTOR_SIZE*2 >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE*2 & ~uspi->s_fmask)))
+
+#define ubh_get_ucg(ubh) \
+	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
+
+
+/*
+ * Extract byte from ufs_buffer_head
+ * Extract the bits for a block from a map inside ufs_buffer_head
+ */
+#define ubh_get_addr8(ubh,begin) \
+	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
+	((begin) & ~uspi->s_fmask))
+
+#define ubh_get_addr16(ubh,begin) \
+	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
+	((begin) & (uspi->fsize>>1) - 1)))
+
+#define ubh_get_addr32(ubh,begin) \
+	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
+	((begin) & ((uspi->s_fsize>>2) - 1)))
+
+#define ubh_get_addr ubh_get_addr8
+
+#define ubh_blkmap(ubh,begin,bit) \
+	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
+
+
+/*
+ * Macros for access to superblock array structures
+ */
+#define ubh_postbl(ubh,cylno,i) \
+	((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
+	? (*(__s16*)(ubh_get_addr(ubh, \
+	(unsigned)(&((struct ufs_super_block *)0)->fs_opostbl) \
+	+ (((cylno) * 16 + (i)) << 1) ) )) \
+	: (*(__s16*)(ubh_get_addr(ubh, \
+	uspi->s_postbloff + (((cylno) * uspi->s_nrpos + (i)) << 1) ))))
+
+#define ubh_rotbl(ubh,i) \
+	((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
+	? (*(__u8*)(ubh_get_addr(ubh, \
+	(unsigned)(&((struct ufs_super_block *)0)->fs_space) + (i)))) \
+	: (*(__u8*)(ubh_get_addr(ubh, uspi->s_rotbloff + (i)))))
+
+/*
+ * Determine the number of available frags given a
+ * percentage to hold in reserve.
+ */
+#define ufs_freespace(usb, percentreserved) \
+	(ufs_blkstofrags(fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nbfree)) + \
+	fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nffree) - (uspi->s_dsize * (percentreserved) / 100))
+
+/*
+ * Macros to access cylinder group array structures
+ */
+#define ubh_cg_blktot(ucpi,cylno) \
+	(*((__fs32*)ubh_get_addr(UCPI_UBH, (ucpi)->c_btotoff + ((cylno) << 2))))
+
+#define ubh_cg_blks(ucpi,cylno,rpos) \
+	(*((__fs16*)ubh_get_addr(UCPI_UBH, \
+	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
+
+/*
+ * Bitmap operations
+ * These functions work like classical bitmap operations.
+ * The difference is that we don't have the whole bitmap
+ * in one contiguous chunk of memory, but in several buffers.
+ * The parameters of each function are super_block, ufs_buffer_head and
+ * position of the beginning of the bitmap.
+ */
+#define ubh_setbit(ubh,begin,bit) \
+	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
+
+#define ubh_clrbit(ubh,begin,bit) \
+	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
+
+#define ubh_isset(ubh,begin,bit) \
+	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
+
+#define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
+
+#define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
+
+#define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
+static inline unsigned _ubh_find_next_zero_bit_(
+	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
+	unsigned begin, unsigned size, unsigned offset)
+{
+	unsigned base, count, pos;
+
+	size -= offset;
+	begin <<= 3;
+	offset += begin;
+	base = offset >> uspi->s_bpfshift;
+	offset &= uspi->s_bpfmask;
+	for (;;) {
+		count = min_t(unsigned int, size + offset, uspi->s_bpf);
+		size -= count - offset;
+		pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
+		if (pos < count || !size)
+			break;
+		base++;
+		offset = 0;
+	}
+	return (base << uspi->s_bpfshift) + pos - begin;
+} 	
+
+static inline unsigned find_last_zero_bit (unsigned char * bitmap,
+	unsigned size, unsigned offset)
+{
+	unsigned bit, i;
+	unsigned char * mapp;
+	unsigned char map;
+
+	mapp = bitmap + (size >> 3);
+	map = *mapp--;
+	bit = 1 << (size & 7);
+	for (i = size; i > offset; i--) {
+		if ((map & bit) == 0)
+			break;
+		if ((i & 7) != 0) {
+			bit >>= 1;
+		} else {
+			map = *mapp--;
+			bit = 1 << 7;
+		}
+	}
+	return i;
+}
+
+#define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
+static inline unsigned _ubh_find_last_zero_bit_(
+	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
+	unsigned begin, unsigned start, unsigned end)
+{
+	unsigned base, count, pos, size;
+
+	size = start - end;
+	begin <<= 3;
+	start += begin;
+	base = start >> uspi->s_bpfshift;
+	start &= uspi->s_bpfmask;
+	for (;;) {
+		count = min_t(unsigned int,
+			    size + (uspi->s_bpf - start), uspi->s_bpf)
+			- (uspi->s_bpf - start);
+		size -= count;
+		pos = find_last_zero_bit (ubh->bh[base]->b_data,
+			start, start - count);
+		if (pos > start - count || !size)
+			break;
+		base--;
+		start = uspi->s_bpf;
+	}
+	return (base << uspi->s_bpfshift) + pos - begin;
+} 	
+
+#define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
+
+#define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
+static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
+	case 4:
+		return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+	case 2:
+		return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+	case 1:
+		return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+	}
+	return 0;	
+}
+
+#define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
+static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	*ubh_get_addr (ubh, begin + block) = 0x00;
+	    	return; 
+	case 4:
+		*ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
+		return;
+	case 2:
+		*ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
+		return;
+	case 1:
+		*ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
+		return;
+	}
+}
+
+#define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
+static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	*ubh_get_addr(ubh, begin + block) = 0xff;
+	    	return;
+	case 4:
+		*ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
+		return;
+	case 2:
+		*ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
+		return;
+	case 1:
+		*ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
+		return;
+	}
+}
+
+static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
+	__fs32 * fraglist, int cnt)
+{
+	struct ufs_sb_private_info * uspi;
+	unsigned fragsize, pos;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	
+	fragsize = 0;
+	for (pos = 0; pos < uspi->s_fpb; pos++) {
+		if (blockmap & (1 << pos)) {
+			fragsize++;
+		}
+		else if (fragsize > 0) {
+			fs32_add(sb, &fraglist[fragsize], cnt);
+			fragsize = 0;
+		}
+	}
+	if (fragsize > 0 && fragsize < uspi->s_fpb)
+		fs32_add(sb, &fraglist[fragsize], cnt);
+}
+
+#define ubh_scanc(ubh,begin,size,table,mask) _ubh_scanc_(uspi,ubh,begin,size,table,mask)
+static inline unsigned _ubh_scanc_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, 
+	unsigned begin, unsigned size, unsigned char * table, unsigned char mask)
+{
+	unsigned rest, offset;
+	unsigned char * cp;
+	
+
+	offset = begin & ~uspi->s_fmask;
+	begin >>= uspi->s_fshift;
+	for (;;) {
+		if ((offset + size) < uspi->s_fsize)
+			rest = size;
+		else
+			rest = uspi->s_fsize - offset;
+		size -= rest;
+		cp = ubh->bh[begin]->b_data + offset;
+		while ((table[*cp++] & mask) == 0 && --rest);
+		if (rest || !size)
+			break;
+		begin++;
+		offset = 0;
+	}
+	return (size + rest);
+}
diff --git a/fs/umsdos/notes b/fs/umsdos/notes
new file mode 100644
index 0000000..3c47d1f
--- /dev/null
+++ b/fs/umsdos/notes
@@ -0,0 +1,17 @@
+This file contain idea and things I don't want to forget
+
+Possible bug in fs/read_write.c
+Function sys_readdir()
+
+	There is a call the verify_area that does not take in account
+	the count parameter. I guess it should read
+
+	error = verify_area(VERIFY_WRITE, dirent, count*sizeof (*dirent));
+
+	instead of
+
+	error = verify_area(VERIFY_WRITE, dirent, sizeof (*dirent));
+
+	Of course, now , count is always 1
+
+
diff --git a/fs/vfat/Makefile b/fs/vfat/Makefile
new file mode 100644
index 0000000..40f2798
--- /dev/null
+++ b/fs/vfat/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux vfat-filesystem routines.
+#
+
+obj-$(CONFIG_VFAT_FS) += vfat.o
+
+vfat-y := namei.o
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
new file mode 100644
index 0000000..1c6f6b5
--- /dev/null
+++ b/fs/vfat/namei.c
@@ -0,0 +1,1082 @@
+/*
+ *  linux/fs/vfat/namei.c
+ *
+ *  Written 1992,1993 by Werner Almesberger
+ *
+ *  Windows95/Windows NT compatible extended MSDOS filesystem
+ *    by Gordon Chaffee Copyright (C) 1995.  Send bug reports for the
+ *    VFAT filesystem to <chaffee@cs.berkeley.edu>.  Specify
+ *    what file operation caused you trouble and if you can duplicate
+ *    the problem, send a script that demonstrates it.
+ *
+ *  Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de>
+ *
+ *  Support Multibyte characters and cleanup by
+ *				OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+ */
+
+#include <linux/module.h>
+
+#include <linux/jiffies.h>
+#include <linux/msdos_fs.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/namei.h>
+
+static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	int ret = 1;
+
+	if (!dentry->d_inode &&
+	    nd && !(nd->flags & LOOKUP_CONTINUE) && (nd->flags & LOOKUP_CREATE))
+		/*
+		 * negative dentry is dropped, in order to make sure
+		 * to use the name which a user desires if this is
+		 * create path.
+		 */
+		ret = 0;
+	else {
+		spin_lock(&dentry->d_lock);
+		if (dentry->d_time != dentry->d_parent->d_inode->i_version)
+			ret = 0;
+		spin_unlock(&dentry->d_lock);
+	}
+	return ret;
+}
+
+/* returns the length of a struct qstr, ignoring trailing dots */
+static unsigned int vfat_striptail_len(struct qstr *qstr)
+{
+	unsigned int len = qstr->len;
+
+	while (len && qstr->name[len - 1] == '.')
+		len--;
+	return len;
+}
+
+/*
+ * Compute the hash for the vfat name corresponding to the dentry.
+ * Note: if the name is invalid, we leave the hash code unchanged so
+ * that the existing dentry can be used. The vfat fs routines will
+ * return ENOENT or EINVAL as appropriate.
+ */
+static int vfat_hash(struct dentry *dentry, struct qstr *qstr)
+{
+	qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr));
+	return 0;
+}
+
+/*
+ * Compute the hash for the vfat name corresponding to the dentry.
+ * Note: if the name is invalid, we leave the hash code unchanged so
+ * that the existing dentry can be used. The vfat fs routines will
+ * return ENOENT or EINVAL as appropriate.
+ */
+static int vfat_hashi(struct dentry *dentry, struct qstr *qstr)
+{
+	struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io;
+	const unsigned char *name;
+	unsigned int len;
+	unsigned long hash;
+
+	name = qstr->name;
+	len = vfat_striptail_len(qstr);
+
+	hash = init_name_hash();
+	while (len--)
+		hash = partial_name_hash(nls_tolower(t, *name++), hash);
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+/*
+ * Case insensitive compare of two vfat names.
+ */
+static int vfat_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io;
+	unsigned int alen, blen;
+
+	/* A filename cannot end in '.' or we treat it like it has none */
+	alen = vfat_striptail_len(a);
+	blen = vfat_striptail_len(b);
+	if (alen == blen) {
+		if (nls_strnicmp(t, a->name, b->name, alen) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+/*
+ * Case sensitive compare of two vfat names.
+ */
+static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
+{
+	unsigned int alen, blen;
+
+	/* A filename cannot end in '.' or we treat it like it has none */
+	alen = vfat_striptail_len(a);
+	blen = vfat_striptail_len(b);
+	if (alen == blen) {
+		if (strncmp(a->name, b->name, alen) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+static struct dentry_operations vfat_dentry_ops[4] = {
+	{
+		.d_hash		= vfat_hashi,
+		.d_compare	= vfat_cmpi,
+	},
+	{
+		.d_revalidate	= vfat_revalidate,
+		.d_hash		= vfat_hashi,
+		.d_compare	= vfat_cmpi,
+	},
+	{
+		.d_hash		= vfat_hash,
+		.d_compare	= vfat_cmp,
+	},
+	{
+		.d_revalidate	= vfat_revalidate,
+		.d_hash		= vfat_hash,
+		.d_compare	= vfat_cmp,
+	}
+};
+
+/* Characters that are undesirable in an MS-DOS file name */
+
+static inline wchar_t vfat_bad_char(wchar_t w)
+{
+	return (w < 0x0020)
+	    || (w == '*') || (w == '?') || (w == '<') || (w == '>')
+	    || (w == '|') || (w == '"') || (w == ':') || (w == '/')
+	    || (w == '\\');
+}
+
+static inline wchar_t vfat_replace_char(wchar_t w)
+{
+	return (w == '[') || (w == ']') || (w == ';') || (w == ',')
+	    || (w == '+') || (w == '=');
+}
+
+static wchar_t vfat_skip_char(wchar_t w)
+{
+	return (w == '.') || (w == ' ');
+}
+
+static inline int vfat_is_used_badchars(const wchar_t *s, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (vfat_bad_char(s[i]))
+			return -EINVAL;
+	return 0;
+}
+
+static int vfat_valid_longname(const unsigned char *name, unsigned int len)
+{
+	if (name[len - 1] == ' ')
+		return -EINVAL;
+	if (len >= 256)
+		return -ENAMETOOLONG;
+
+	/* MS-DOS "device special files" */
+	if (len == 3 || (len > 3 && name[3] == '.')) {	/* basename == 3 */
+		if (!strnicmp(name, "aux", 3) ||
+		    !strnicmp(name, "con", 3) ||
+		    !strnicmp(name, "nul", 3) ||
+		    !strnicmp(name, "prn", 3))
+			return -EINVAL;
+	}
+	if (len == 4 || (len > 4 && name[4] == '.')) {	/* basename == 4 */
+		/* "com1", "com2", ... */
+		if ('1' <= name[3] && name[3] <= '9') {
+			if (!strnicmp(name, "com", 3) ||
+			    !strnicmp(name, "lpt", 3))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int vfat_find_form(struct inode *dir, unsigned char *name)
+{
+	struct fat_slot_info sinfo;
+	int err = fat_scan(dir, name, &sinfo);
+	if (err)
+		return -ENOENT;
+	brelse(sinfo.bh);
+	return 0;
+}
+
+/*
+ * 1) Valid characters for the 8.3 format alias are any combination of
+ * letters, uppercase alphabets, digits, any of the
+ * following special characters:
+ *     $ % ' ` - @ { } ~ ! # ( ) & _ ^
+ * In this case Longfilename is not stored in disk.
+ *
+ * WinNT's Extension:
+ * File name and extension name is contain uppercase/lowercase
+ * only. And it is expressed by CASE_LOWER_BASE and CASE_LOWER_EXT.
+ *
+ * 2) File name is 8.3 format, but it contain the uppercase and
+ * lowercase char, muliti bytes char, etc. In this case numtail is not
+ * added, but Longfilename is stored.
+ *
+ * 3) When the one except for the above, or the following special
+ * character are contained:
+ *        .   [ ] ; , + =
+ * numtail is added, and Longfilename must be stored in disk .
+ */
+struct shortname_info {
+	unsigned char lower:1,
+		      upper:1,
+		      valid:1;
+};
+#define INIT_SHORTNAME_INFO(x)	do {		\
+	(x)->lower = 1;				\
+	(x)->upper = 1;				\
+	(x)->valid = 1;				\
+} while (0)
+
+static inline int to_shortname_char(struct nls_table *nls,
+				    unsigned char *buf, int buf_size,
+				    wchar_t *src, struct shortname_info *info)
+{
+	int len;
+
+	if (vfat_skip_char(*src)) {
+		info->valid = 0;
+		return 0;
+	}
+	if (vfat_replace_char(*src)) {
+		info->valid = 0;
+		buf[0] = '_';
+		return 1;
+	}
+
+	len = nls->uni2char(*src, buf, buf_size);
+	if (len <= 0) {
+		info->valid = 0;
+		buf[0] = '_';
+		len = 1;
+	} else if (len == 1) {
+		unsigned char prev = buf[0];
+
+		if (buf[0] >= 0x7F) {
+			info->lower = 0;
+			info->upper = 0;
+		}
+
+		buf[0] = nls_toupper(nls, buf[0]);
+		if (isalpha(buf[0])) {
+			if (buf[0] == prev)
+				info->lower = 0;
+			else
+				info->upper = 0;
+		}
+	} else {
+		info->lower = 0;
+		info->upper = 0;
+	}
+
+	return len;
+}
+
+/*
+ * Given a valid longname, create a unique shortname.  Make sure the
+ * shortname does not exist
+ * Returns negative number on error, 0 for a normal
+ * return, and 1 for valid shortname
+ */
+static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
+				 wchar_t *uname, int ulen,
+				 unsigned char *name_res, unsigned char *lcase)
+{
+	struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
+	wchar_t *ip, *ext_start, *end, *name_start;
+	unsigned char base[9], ext[4], buf[8], *p;
+	unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
+	int chl, chi;
+	int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
+	int is_shortname;
+	struct shortname_info base_info, ext_info;
+
+	is_shortname = 1;
+	INIT_SHORTNAME_INFO(&base_info);
+	INIT_SHORTNAME_INFO(&ext_info);
+
+	/* Now, we need to create a shortname from the long name */
+	ext_start = end = &uname[ulen];
+	while (--ext_start >= uname) {
+		if (*ext_start == 0x002E) {	/* is `.' */
+			if (ext_start == end - 1) {
+				sz = ulen;
+				ext_start = NULL;
+			}
+			break;
+		}
+	}
+
+	if (ext_start == uname - 1) {
+		sz = ulen;
+		ext_start = NULL;
+	} else if (ext_start) {
+		/*
+		 * Names which start with a dot could be just
+		 * an extension eg. "...test".  In this case Win95
+		 * uses the extension as the name and sets no extension.
+		 */
+		name_start = &uname[0];
+		while (name_start < ext_start) {
+			if (!vfat_skip_char(*name_start))
+				break;
+			name_start++;
+		}
+		if (name_start != ext_start) {
+			sz = ext_start - uname;
+			ext_start++;
+		} else {
+			sz = ulen;
+			ext_start = NULL;
+		}
+	}
+
+	numtail_baselen = 6;
+	numtail2_baselen = 2;
+	for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
+		chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
+					ip, &base_info);
+		if (chl == 0)
+			continue;
+
+		if (baselen < 2 && (baselen + chl) > 2)
+			numtail2_baselen = baselen;
+		if (baselen < 6 && (baselen + chl) > 6)
+			numtail_baselen = baselen;
+		for (chi = 0; chi < chl; chi++) {
+			*p++ = charbuf[chi];
+			baselen++;
+			if (baselen >= 8)
+				break;
+		}
+		if (baselen >= 8) {
+			if ((chi < chl - 1) || (ip + 1) - uname < sz)
+				is_shortname = 0;
+			break;
+		}
+	}
+	if (baselen == 0) {
+		return -EINVAL;
+	}
+
+	extlen = 0;
+	if (ext_start) {
+		for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
+			chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
+						ip, &ext_info);
+			if (chl == 0)
+				continue;
+
+			if ((extlen + chl) > 3) {
+				is_shortname = 0;
+				break;
+			}
+			for (chi = 0; chi < chl; chi++) {
+				*p++ = charbuf[chi];
+				extlen++;
+			}
+			if (extlen >= 3) {
+				if (ip + 1 != end)
+					is_shortname = 0;
+				break;
+			}
+		}
+	}
+	ext[extlen] = '\0';
+	base[baselen] = '\0';
+
+	/* Yes, it can happen. ".\xe5" would do it. */
+	if (base[0] == DELETED_FLAG)
+		base[0] = 0x05;
+
+	/* OK, at this point we know that base is not longer than 8 symbols,
+	 * ext is not longer than 3, base is nonempty, both don't contain
+	 * any bad symbols (lowercase transformed to uppercase).
+	 */
+
+	memset(name_res, ' ', MSDOS_NAME);
+	memcpy(name_res, base, baselen);
+	memcpy(name_res + 8, ext, extlen);
+	*lcase = 0;
+	if (is_shortname && base_info.valid && ext_info.valid) {
+		if (vfat_find_form(dir, name_res) == 0)
+			return -EEXIST;
+
+		if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
+			return (base_info.upper && ext_info.upper);
+		} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
+			if ((base_info.upper || base_info.lower) &&
+			    (ext_info.upper || ext_info.lower)) {
+				if (!base_info.upper && base_info.lower)
+					*lcase |= CASE_LOWER_BASE;
+				if (!ext_info.upper && ext_info.lower)
+					*lcase |= CASE_LOWER_EXT;
+				return 1;
+			}
+			return 0;
+		} else {
+			BUG();
+		}
+	}
+
+	if (opts->numtail == 0)
+		if (vfat_find_form(dir, name_res) < 0)
+			return 0;
+
+	/*
+	 * Try to find a unique extension.  This used to
+	 * iterate through all possibilities sequentially,
+	 * but that gave extremely bad performance.  Windows
+	 * only tries a few cases before using random
+	 * values for part of the base.
+	 */
+
+	if (baselen > 6) {
+		baselen = numtail_baselen;
+		name_res[7] = ' ';
+	}
+	name_res[baselen] = '~';
+	for (i = 1; i < 10; i++) {
+		name_res[baselen + 1] = i + '0';
+		if (vfat_find_form(dir, name_res) < 0)
+			return 0;
+	}
+
+	i = jiffies & 0xffff;
+	sz = (jiffies >> 16) & 0x7;
+	if (baselen > 2) {
+		baselen = numtail2_baselen;
+		name_res[7] = ' ';
+	}
+	name_res[baselen + 4] = '~';
+	name_res[baselen + 5] = '1' + sz;
+	while (1) {
+		sprintf(buf, "%04X", i);
+		memcpy(&name_res[baselen], buf, 4);
+		if (vfat_find_form(dir, name_res) < 0)
+			break;
+		i -= 11;
+	}
+	return 0;
+}
+
+/* Translate a string, including coded sequences into Unicode */
+static int
+xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
+	     int *longlen, int *outlen, int escape, int utf8,
+	     struct nls_table *nls)
+{
+	const unsigned char *ip;
+	unsigned char nc;
+	unsigned char *op;
+	unsigned int ec;
+	int i, k, fill;
+	int charlen;
+
+	if (utf8) {
+		int name_len = strlen(name);
+
+		*outlen = utf8_mbstowcs((wchar_t *)outname, name, PAGE_SIZE);
+
+		/*
+		 * We stripped '.'s before and set len appropriately,
+		 * but utf8_mbstowcs doesn't care about len
+		 */
+		*outlen -= (name_len - len);
+
+		op = &outname[*outlen * sizeof(wchar_t)];
+	} else {
+		if (nls) {
+			for (i = 0, ip = name, op = outname, *outlen = 0;
+			     i < len && *outlen <= 260;
+			     *outlen += 1)
+			{
+				if (escape && (*ip == ':')) {
+					if (i > len - 5)
+						return -EINVAL;
+					ec = 0;
+					for (k = 1; k < 5; k++) {
+						nc = ip[k];
+						ec <<= 4;
+						if (nc >= '0' && nc <= '9') {
+							ec |= nc - '0';
+							continue;
+						}
+						if (nc >= 'a' && nc <= 'f') {
+							ec |= nc - ('a' - 10);
+							continue;
+						}
+						if (nc >= 'A' && nc <= 'F') {
+							ec |= nc - ('A' - 10);
+							continue;
+						}
+						return -EINVAL;
+					}
+					*op++ = ec & 0xFF;
+					*op++ = ec >> 8;
+					ip += 5;
+					i += 5;
+				} else {
+					if ((charlen = nls->char2uni(ip, len - i, (wchar_t *)op)) < 0)
+						return -EINVAL;
+					ip += charlen;
+					i += charlen;
+					op += 2;
+				}
+			}
+		} else {
+			for (i = 0, ip = name, op = outname, *outlen = 0;
+			     i < len && *outlen <= 260;
+			     i++, *outlen += 1)
+			{
+				*op++ = *ip++;
+				*op++ = 0;
+			}
+		}
+	}
+	if (*outlen > 260)
+		return -ENAMETOOLONG;
+
+	*longlen = *outlen;
+	if (*outlen % 13) {
+		*op++ = 0;
+		*op++ = 0;
+		*outlen += 1;
+		if (*outlen % 13) {
+			fill = 13 - (*outlen % 13);
+			for (i = 0; i < fill; i++) {
+				*op++ = 0xff;
+				*op++ = 0xff;
+			}
+			*outlen += fill;
+		}
+	}
+
+	return 0;
+}
+
+static int vfat_build_slots(struct inode *dir, const unsigned char *name,
+			    int len, int is_dir, int cluster,
+			    struct timespec *ts,
+			    struct msdos_dir_slot *slots, int *nr_slots)
+{
+	struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
+	struct fat_mount_options *opts = &sbi->options;
+	struct msdos_dir_slot *ps;
+	struct msdos_dir_entry *de;
+	unsigned long page;
+	unsigned char cksum, lcase;
+	unsigned char msdos_name[MSDOS_NAME];
+	wchar_t *uname;
+	__le16 time, date;
+	int err, ulen, usize, i;
+	loff_t offset;
+
+	*nr_slots = 0;
+	err = vfat_valid_longname(name, len);
+	if (err)
+		return err;
+
+	page = __get_free_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+
+	uname = (wchar_t *)page;
+	err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
+			   opts->unicode_xlate, opts->utf8, sbi->nls_io);
+	if (err)
+		goto out_free;
+
+	err = vfat_is_used_badchars(uname, ulen);
+	if (err)
+		goto out_free;
+
+	err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen,
+				    msdos_name, &lcase);
+	if (err < 0)
+		goto out_free;
+	else if (err == 1) {
+		de = (struct msdos_dir_entry *)slots;
+		err = 0;
+		goto shortname;
+	}
+
+	/* build the entry of long file name */
+	for (cksum = i = 0; i < 11; i++)
+		cksum = (((cksum&1)<<7)|((cksum&0xfe)>>1)) + msdos_name[i];
+
+	*nr_slots = usize / 13;
+	for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
+		ps->id = i;
+		ps->attr = ATTR_EXT;
+		ps->reserved = 0;
+		ps->alias_checksum = cksum;
+		ps->start = 0;
+		offset = (i - 1) * 13;
+		fatwchar_to16(ps->name0_4, uname + offset, 5);
+		fatwchar_to16(ps->name5_10, uname + offset + 5, 6);
+		fatwchar_to16(ps->name11_12, uname + offset + 11, 2);
+	}
+	slots[0].id |= 0x40;
+	de = (struct msdos_dir_entry *)ps;
+
+shortname:
+	/* build the entry of 8.3 alias name */
+	(*nr_slots)++;
+	memcpy(de->name, msdos_name, MSDOS_NAME);
+	de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
+	de->lcase = lcase;
+	fat_date_unix2dos(ts->tv_sec, &time, &date);
+	de->time = de->ctime = time;
+	de->date = de->cdate = de->adate = date;
+	de->ctime_cs = 0;
+	de->start = cpu_to_le16(cluster);
+	de->starthi = cpu_to_le16(cluster >> 16);
+	de->size = 0;
+out_free:
+	free_page(page);
+	return err;
+}
+
+static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
+			  int cluster, struct timespec *ts,
+			  struct fat_slot_info *sinfo)
+{
+	struct msdos_dir_slot *slots;
+	unsigned int len;
+	int err, nr_slots;
+
+	len = vfat_striptail_len(qname);
+	if (len == 0)
+		return -ENOENT;
+
+	slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_KERNEL);
+	if (slots == NULL)
+		return -ENOMEM;
+
+	err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts,
+			       slots, &nr_slots);
+	if (err)
+		goto cleanup;
+
+	err = fat_add_entries(dir, slots, nr_slots, sinfo);
+	if (err)
+		goto cleanup;
+
+	/* update timestamp */
+	dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
+	if (IS_DIRSYNC(dir))
+		(void)fat_sync_inode(dir);
+	else
+		mark_inode_dirty(dir);
+cleanup:
+	kfree(slots);
+	return err;
+}
+
+static int vfat_find(struct inode *dir, struct qstr *qname,
+		     struct fat_slot_info *sinfo)
+{
+	unsigned int len = vfat_striptail_len(qname);
+	if (len == 0)
+		return -ENOENT;
+	return fat_search_long(dir, qname->name, len, sinfo);
+}
+
+static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
+				  struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct fat_slot_info sinfo;
+	struct inode *inode = NULL;
+	struct dentry *alias;
+	int err, table;
+
+	lock_kernel();
+	table = (MSDOS_SB(sb)->options.name_check == 's') ? 2 : 0;
+	dentry->d_op = &vfat_dentry_ops[table];
+
+	err = vfat_find(dir, &dentry->d_name, &sinfo);
+	if (err) {
+		table++;
+		goto error;
+	}
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		unlock_kernel();
+		return ERR_PTR(PTR_ERR(inode));
+	}
+	alias = d_find_alias(inode);
+	if (alias) {
+		if (d_invalidate(alias) == 0)
+			dput(alias);
+		else {
+			iput(inode);
+			unlock_kernel();
+			return alias;
+		}
+
+	}
+error:
+	unlock_kernel();
+	dentry->d_op = &vfat_dentry_ops[table];
+	dentry->d_time = dentry->d_parent->d_inode->i_version;
+	dentry = d_splice_alias(inode, dentry);
+	if (dentry) {
+		dentry->d_op = &vfat_dentry_ops[table];
+		dentry->d_time = dentry->d_parent->d_inode->i_version;
+	}
+	return dentry;
+}
+
+static int vfat_create(struct inode *dir, struct dentry *dentry, int mode,
+		       struct nameidata *nd)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode;
+	struct fat_slot_info sinfo;
+	struct timespec ts;
+	int err;
+
+	lock_kernel();
+
+	ts = CURRENT_TIME_SEC;
+	err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
+	if (err)
+		goto out;
+	dir->i_version++;
+
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out;
+	}
+	inode->i_version++;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+	/* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+	dentry->d_time = dentry->d_parent->d_inode->i_version;
+	d_instantiate(dentry, inode);
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct fat_slot_info sinfo;
+	int err;
+
+	lock_kernel();
+
+	err = fat_dir_empty(inode);
+	if (err)
+		goto out;
+	err = vfat_find(dir, &dentry->d_name, &sinfo);
+	if (err)
+		goto out;
+
+	err = fat_remove_entries(dir, &sinfo);	/* and releases bh */
+	if (err)
+		goto out;
+	dir->i_nlink--;
+
+	inode->i_nlink = 0;
+	inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+	fat_detach(inode);
+out:
+	unlock_kernel();
+
+	return err;
+}
+
+static int vfat_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct fat_slot_info sinfo;
+	int err;
+
+	lock_kernel();
+
+	err = vfat_find(dir, &dentry->d_name, &sinfo);
+	if (err)
+		goto out;
+
+	err = fat_remove_entries(dir, &sinfo);	/* and releases bh */
+	if (err)
+		goto out;
+	inode->i_nlink = 0;
+	inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+	fat_detach(inode);
+out:
+	unlock_kernel();
+
+	return err;
+}
+
+static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct super_block *sb = dir->i_sb;
+	struct inode *inode;
+	struct fat_slot_info sinfo;
+	struct timespec ts;
+	int err, cluster;
+
+	lock_kernel();
+
+	ts = CURRENT_TIME_SEC;
+	cluster = fat_alloc_new_dir(dir, &ts);
+	if (cluster < 0) {
+		err = cluster;
+		goto out;
+	}
+	err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
+	if (err)
+		goto out_free;
+	dir->i_version++;
+	dir->i_nlink++;
+
+	inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
+	brelse(sinfo.bh);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		/* the directory was completed, just return a error */
+		goto out;
+	}
+	inode->i_version++;
+	inode->i_nlink = 2;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+	/* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+	dentry->d_time = dentry->d_parent->d_inode->i_version;
+	d_instantiate(dentry, inode);
+
+	unlock_kernel();
+	return 0;
+
+out_free:
+	fat_free_clusters(dir, cluster);
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+		       struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct buffer_head *dotdot_bh;
+	struct msdos_dir_entry *dotdot_de;
+	loff_t dotdot_i_pos;
+	struct inode *old_inode, *new_inode;
+	struct fat_slot_info old_sinfo, sinfo;
+	struct timespec ts;
+	int err, is_dir, update_dotdot, corrupt = 0;
+
+	old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
+	old_inode = old_dentry->d_inode;
+	new_inode = new_dentry->d_inode;
+	lock_kernel();
+	err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
+	if (err)
+		goto out;
+
+	is_dir = S_ISDIR(old_inode->i_mode);
+	update_dotdot = (is_dir && old_dir != new_dir);
+	if (update_dotdot) {
+		if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
+					 &dotdot_i_pos) < 0) {
+			err = -EIO;
+			goto out;
+		}
+	}
+
+	ts = CURRENT_TIME_SEC;
+	if (new_inode) {
+		err = vfat_find(new_dir, &new_dentry->d_name, &sinfo);
+		if (err)
+			goto out;
+		if (MSDOS_I(new_inode)->i_pos != sinfo.i_pos) {
+			/* WTF??? Cry and fail. */
+			printk(KERN_WARNING "vfat_rename: fs corrupted\n");
+			goto out;
+		}
+
+		if (is_dir) {
+			err = fat_dir_empty(new_inode);
+			if (err)
+				goto out;
+		}
+		fat_detach(new_inode);
+	} else {
+		err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
+				     &ts, &sinfo);
+		if (err)
+			goto out;
+	}
+	new_dir->i_version++;
+
+	fat_detach(old_inode);
+	fat_attach(old_inode, sinfo.i_pos);
+	if (IS_DIRSYNC(new_dir)) {
+		err = fat_sync_inode(old_inode);
+		if (err)
+			goto error_inode;
+	} else
+		mark_inode_dirty(old_inode);
+
+	if (update_dotdot) {
+		int start = MSDOS_I(new_dir)->i_logstart;
+		dotdot_de->start = cpu_to_le16(start);
+		dotdot_de->starthi = cpu_to_le16(start >> 16);
+		mark_buffer_dirty(dotdot_bh);
+		if (IS_DIRSYNC(new_dir)) {
+			err = sync_dirty_buffer(dotdot_bh);
+			if (err)
+				goto error_dotdot;
+		}
+		old_dir->i_nlink--;
+		if (!new_inode)
+ 			new_dir->i_nlink++;
+	}
+
+	err = fat_remove_entries(old_dir, &old_sinfo);	/* and releases bh */
+	old_sinfo.bh = NULL;
+	if (err)
+		goto error_dotdot;
+	old_dir->i_version++;
+	old_dir->i_ctime = old_dir->i_mtime = ts;
+	if (IS_DIRSYNC(old_dir))
+		(void)fat_sync_inode(old_dir);
+	else
+		mark_inode_dirty(old_dir);
+
+	if (new_inode) {
+		if (is_dir)
+			new_inode->i_nlink -= 2;
+		else
+			new_inode->i_nlink--;
+		new_inode->i_ctime = ts;
+	}
+out:
+	brelse(sinfo.bh);
+	brelse(dotdot_bh);
+	brelse(old_sinfo.bh);
+	unlock_kernel();
+
+	return err;
+
+error_dotdot:
+	/* data cluster is shared, serious corruption */
+	corrupt = 1;
+
+	if (update_dotdot) {
+		int start = MSDOS_I(old_dir)->i_logstart;
+		dotdot_de->start = cpu_to_le16(start);
+		dotdot_de->starthi = cpu_to_le16(start >> 16);
+		mark_buffer_dirty(dotdot_bh);
+		corrupt |= sync_dirty_buffer(dotdot_bh);
+	}
+error_inode:
+	fat_detach(old_inode);
+	fat_attach(old_inode, old_sinfo.i_pos);
+	if (new_inode) {
+		fat_attach(new_inode, sinfo.i_pos);
+		if (corrupt)
+			corrupt |= fat_sync_inode(new_inode);
+	} else {
+		/*
+		 * If new entry was not sharing the data cluster, it
+		 * shouldn't be serious corruption.
+		 */
+		int err2 = fat_remove_entries(new_dir, &sinfo);
+		if (corrupt)
+			corrupt |= err2;
+		sinfo.bh = NULL;
+	}
+	if (corrupt < 0) {
+		fat_fs_panic(new_dir->i_sb,
+			     "%s: Filesystem corrupted (i_pos %lld)",
+			     __FUNCTION__, sinfo.i_pos);
+	}
+	goto out;
+}
+
+static struct inode_operations vfat_dir_inode_operations = {
+	.create		= vfat_create,
+	.lookup		= vfat_lookup,
+	.unlink		= vfat_unlink,
+	.mkdir		= vfat_mkdir,
+	.rmdir		= vfat_rmdir,
+	.rename		= vfat_rename,
+	.setattr	= fat_notify_change,
+};
+
+static int vfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int res;
+
+	res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1);
+	if (res)
+		return res;
+
+	if (MSDOS_SB(sb)->options.name_check != 's')
+		sb->s_root->d_op = &vfat_dentry_ops[0];
+	else
+		sb->s_root->d_op = &vfat_dentry_ops[2];
+
+	return 0;
+}
+
+static struct super_block *vfat_get_sb(struct file_system_type *fs_type,
+				       int flags, const char *dev_name,
+				       void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
+}
+
+static struct file_system_type vfat_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "vfat",
+	.get_sb		= vfat_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_vfat_fs(void)
+{
+	return register_filesystem(&vfat_fs_type);
+}
+
+static void __exit exit_vfat_fs(void)
+{
+	unregister_filesystem(&vfat_fs_type);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VFAT filesystem support");
+MODULE_AUTHOR("Gordon Chaffee");
+
+module_init(init_vfat_fs)
+module_exit(exit_vfat_fs)
diff --git a/fs/xattr.c b/fs/xattr.c
new file mode 100644
index 0000000..93dee70
--- /dev/null
+++ b/fs/xattr.c
@@ -0,0 +1,480 @@
+/*
+  File: fs/xattr.c
+
+  Extended attribute handling.
+
+  Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+  Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+  Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/file.h>
+#include <linux/xattr.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+/*
+ * Extended attribute SET operations
+ */
+static long
+setxattr(struct dentry *d, char __user *name, void __user *value,
+	 size_t size, int flags)
+{
+	int error;
+	void *kvalue = NULL;
+	char kname[XATTR_NAME_MAX + 1];
+
+	if (flags & ~(XATTR_CREATE|XATTR_REPLACE))
+		return -EINVAL;
+
+	error = strncpy_from_user(kname, name, sizeof(kname));
+	if (error == 0 || error == sizeof(kname))
+		error = -ERANGE;
+	if (error < 0)
+		return error;
+
+	if (size) {
+		if (size > XATTR_SIZE_MAX)
+			return -E2BIG;
+		kvalue = kmalloc(size, GFP_KERNEL);
+		if (!kvalue)
+			return -ENOMEM;
+		if (copy_from_user(kvalue, value, size)) {
+			kfree(kvalue);
+			return -EFAULT;
+		}
+	}
+
+	error = -EOPNOTSUPP;
+	if (d->d_inode->i_op && d->d_inode->i_op->setxattr) {
+		down(&d->d_inode->i_sem);
+		error = security_inode_setxattr(d, kname, kvalue, size, flags);
+		if (error)
+			goto out;
+		error = d->d_inode->i_op->setxattr(d, kname, kvalue, size, flags);
+		if (!error)
+			security_inode_post_setxattr(d, kname, kvalue, size, flags);
+out:
+		up(&d->d_inode->i_sem);
+	}
+	if (kvalue)
+		kfree(kvalue);
+	return error;
+}
+
+asmlinkage long
+sys_setxattr(char __user *path, char __user *name, void __user *value,
+	     size_t size, int flags)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(path, &nd);
+	if (error)
+		return error;
+	error = setxattr(nd.dentry, name, value, size, flags);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage long
+sys_lsetxattr(char __user *path, char __user *name, void __user *value,
+	      size_t size, int flags)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk_link(path, &nd);
+	if (error)
+		return error;
+	error = setxattr(nd.dentry, name, value, size, flags);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage long
+sys_fsetxattr(int fd, char __user *name, void __user *value,
+	      size_t size, int flags)
+{
+	struct file *f;
+	int error = -EBADF;
+
+	f = fget(fd);
+	if (!f)
+		return error;
+	error = setxattr(f->f_dentry, name, value, size, flags);
+	fput(f);
+	return error;
+}
+
+/*
+ * Extended attribute GET operations
+ */
+static ssize_t
+getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
+{
+	ssize_t error;
+	void *kvalue = NULL;
+	char kname[XATTR_NAME_MAX + 1];
+
+	error = strncpy_from_user(kname, name, sizeof(kname));
+	if (error == 0 || error == sizeof(kname))
+		error = -ERANGE;
+	if (error < 0)
+		return error;
+
+	if (size) {
+		if (size > XATTR_SIZE_MAX)
+			size = XATTR_SIZE_MAX;
+		kvalue = kmalloc(size, GFP_KERNEL);
+		if (!kvalue)
+			return -ENOMEM;
+	}
+
+	error = -EOPNOTSUPP;
+	if (d->d_inode->i_op && d->d_inode->i_op->getxattr) {
+		error = security_inode_getxattr(d, kname);
+		if (error)
+			goto out;
+		error = d->d_inode->i_op->getxattr(d, kname, kvalue, size);
+		if (error > 0) {
+			if (size && copy_to_user(value, kvalue, error))
+				error = -EFAULT;
+		} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
+			/* The file system tried to returned a value bigger
+			   than XATTR_SIZE_MAX bytes. Not possible. */
+			error = -E2BIG;
+		}
+	}
+out:
+	if (kvalue)
+		kfree(kvalue);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_getxattr(char __user *path, char __user *name, void __user *value,
+	     size_t size)
+{
+	struct nameidata nd;
+	ssize_t error;
+
+	error = user_path_walk(path, &nd);
+	if (error)
+		return error;
+	error = getxattr(nd.dentry, name, value, size);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_lgetxattr(char __user *path, char __user *name, void __user *value,
+	      size_t size)
+{
+	struct nameidata nd;
+	ssize_t error;
+
+	error = user_path_walk_link(path, &nd);
+	if (error)
+		return error;
+	error = getxattr(nd.dentry, name, value, size);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_fgetxattr(int fd, char __user *name, void __user *value, size_t size)
+{
+	struct file *f;
+	ssize_t error = -EBADF;
+
+	f = fget(fd);
+	if (!f)
+		return error;
+	error = getxattr(f->f_dentry, name, value, size);
+	fput(f);
+	return error;
+}
+
+/*
+ * Extended attribute LIST operations
+ */
+static ssize_t
+listxattr(struct dentry *d, char __user *list, size_t size)
+{
+	ssize_t error;
+	char *klist = NULL;
+
+	if (size) {
+		if (size > XATTR_LIST_MAX)
+			size = XATTR_LIST_MAX;
+		klist = kmalloc(size, GFP_KERNEL);
+		if (!klist)
+			return -ENOMEM;
+	}
+
+	error = -EOPNOTSUPP;
+	if (d->d_inode->i_op && d->d_inode->i_op->listxattr) {
+		error = security_inode_listxattr(d);
+		if (error)
+			goto out;
+		error = d->d_inode->i_op->listxattr(d, klist, size);
+		if (error > 0) {
+			if (size && copy_to_user(list, klist, error))
+				error = -EFAULT;
+		} else if (error == -ERANGE && size >= XATTR_LIST_MAX) {
+			/* The file system tried to returned a list bigger
+			   than XATTR_LIST_MAX bytes. Not possible. */
+			error = -E2BIG;
+		}
+	}
+out:
+	if (klist)
+		kfree(klist);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_listxattr(char __user *path, char __user *list, size_t size)
+{
+	struct nameidata nd;
+	ssize_t error;
+
+	error = user_path_walk(path, &nd);
+	if (error)
+		return error;
+	error = listxattr(nd.dentry, list, size);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_llistxattr(char __user *path, char __user *list, size_t size)
+{
+	struct nameidata nd;
+	ssize_t error;
+
+	error = user_path_walk_link(path, &nd);
+	if (error)
+		return error;
+	error = listxattr(nd.dentry, list, size);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage ssize_t
+sys_flistxattr(int fd, char __user *list, size_t size)
+{
+	struct file *f;
+	ssize_t error = -EBADF;
+
+	f = fget(fd);
+	if (!f)
+		return error;
+	error = listxattr(f->f_dentry, list, size);
+	fput(f);
+	return error;
+}
+
+/*
+ * Extended attribute REMOVE operations
+ */
+static long
+removexattr(struct dentry *d, char __user *name)
+{
+	int error;
+	char kname[XATTR_NAME_MAX + 1];
+
+	error = strncpy_from_user(kname, name, sizeof(kname));
+	if (error == 0 || error == sizeof(kname))
+		error = -ERANGE;
+	if (error < 0)
+		return error;
+
+	error = -EOPNOTSUPP;
+	if (d->d_inode->i_op && d->d_inode->i_op->removexattr) {
+		error = security_inode_removexattr(d, kname);
+		if (error)
+			goto out;
+		down(&d->d_inode->i_sem);
+		error = d->d_inode->i_op->removexattr(d, kname);
+		up(&d->d_inode->i_sem);
+	}
+out:
+	return error;
+}
+
+asmlinkage long
+sys_removexattr(char __user *path, char __user *name)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(path, &nd);
+	if (error)
+		return error;
+	error = removexattr(nd.dentry, name);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage long
+sys_lremovexattr(char __user *path, char __user *name)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk_link(path, &nd);
+	if (error)
+		return error;
+	error = removexattr(nd.dentry, name);
+	path_release(&nd);
+	return error;
+}
+
+asmlinkage long
+sys_fremovexattr(int fd, char __user *name)
+{
+	struct file *f;
+	int error = -EBADF;
+
+	f = fget(fd);
+	if (!f)
+		return error;
+	error = removexattr(f->f_dentry, name);
+	fput(f);
+	return error;
+}
+
+
+static const char *
+strcmp_prefix(const char *a, const char *a_prefix)
+{
+	while (*a_prefix && *a == *a_prefix) {
+		a++;
+		a_prefix++;
+	}
+	return *a_prefix ? NULL : a;
+}
+
+/*
+ * In order to implement different sets of xattr operations for each xattr
+ * prefix with the generic xattr API, a filesystem should create a
+ * null-terminated array of struct xattr_handler (one for each prefix) and
+ * hang a pointer to it off of the s_xattr field of the superblock.
+ *
+ * The generic_fooxattr() functions will use this list to dispatch xattr
+ * operations to the correct xattr_handler.
+ */
+#define for_each_xattr_handler(handlers, handler)		\
+		for ((handler) = *(handlers)++;			\
+			(handler) != NULL;			\
+			(handler) = *(handlers)++)
+
+/*
+ * Find the xattr_handler with the matching prefix.
+ */
+static struct xattr_handler *
+xattr_resolve_name(struct xattr_handler **handlers, const char **name)
+{
+	struct xattr_handler *handler;
+
+	if (!*name)
+		return NULL;
+
+	for_each_xattr_handler(handlers, handler) {
+		const char *n = strcmp_prefix(*name, handler->prefix);
+		if (n) {
+			*name = n;
+			break;
+		}
+	}
+	return handler;
+}
+
+/*
+ * Find the handler for the prefix and dispatch its get() operation.
+ */
+ssize_t
+generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size)
+{
+	struct xattr_handler *handler;
+	struct inode *inode = dentry->d_inode;
+
+	handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+	if (!handler)
+		return -EOPNOTSUPP;
+	return handler->get(inode, name, buffer, size);
+}
+
+/*
+ * Combine the results of the list() operation from every xattr_handler in the
+ * list.
+ */
+ssize_t
+generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+	struct inode *inode = dentry->d_inode;
+	struct xattr_handler *handler, **handlers = inode->i_sb->s_xattr;
+	unsigned int size = 0;
+
+	if (!buffer) {
+		for_each_xattr_handler(handlers, handler)
+			size += handler->list(inode, NULL, 0, NULL, 0);
+	} else {
+		char *buf = buffer;
+
+		for_each_xattr_handler(handlers, handler) {
+			size = handler->list(inode, buf, buffer_size, NULL, 0);
+			if (size > buffer_size)
+				return -ERANGE;
+			buf += size;
+			buffer_size -= size;
+		}
+		size = buf - buffer;
+	}
+	return size;
+}
+
+/*
+ * Find the handler for the prefix and dispatch its set() operation.
+ */
+int
+generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+{
+	struct xattr_handler *handler;
+	struct inode *inode = dentry->d_inode;
+
+	if (size == 0)
+		value = "";  /* empty EA, do not remove */
+	handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+	if (!handler)
+		return -EOPNOTSUPP;
+	return handler->set(inode, name, value, size, flags);
+}
+
+/*
+ * Find the handler for the prefix and dispatch its set() operation to remove
+ * any associated extended attribute.
+ */
+int
+generic_removexattr(struct dentry *dentry, const char *name)
+{
+	struct xattr_handler *handler;
+	struct inode *inode = dentry->d_inode;
+
+	handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+	if (!handler)
+		return -EOPNOTSUPP;
+	return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
+}
+
+EXPORT_SYMBOL(generic_getxattr);
+EXPORT_SYMBOL(generic_listxattr);
+EXPORT_SYMBOL(generic_setxattr);
+EXPORT_SYMBOL(generic_removexattr);
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
new file mode 100644
index 0000000..789a255
--- /dev/null
+++ b/fs/xattr_acl.c
@@ -0,0 +1,99 @@
+/*
+ * linux/fs/xattr_acl.c
+ *
+ * Almost all from linux/fs/ext2/acl.c:
+ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+
+
+/*
+ * Convert from extended attribute to in-memory representation.
+ */
+struct posix_acl *
+posix_acl_from_xattr(const void *value, size_t size)
+{
+	posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+	posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+	int count;
+	struct posix_acl *acl;
+	struct posix_acl_entry *acl_e;
+
+	if (!value)
+		return NULL;
+	if (size < sizeof(posix_acl_xattr_header))
+		 return ERR_PTR(-EINVAL);
+	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	count = posix_acl_xattr_count(size);
+	if (count < 0)
+		return ERR_PTR(-EINVAL);
+	if (count == 0)
+		return NULL;
+	
+	acl = posix_acl_alloc(count, GFP_KERNEL);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+	acl_e = acl->a_entries;
+	
+	for (end = entry + count; entry != end; acl_e++, entry++) {
+		acl_e->e_tag  = le16_to_cpu(entry->e_tag);
+		acl_e->e_perm = le16_to_cpu(entry->e_perm);
+
+		switch(acl_e->e_tag) {
+			case ACL_USER_OBJ:
+			case ACL_GROUP_OBJ:
+			case ACL_MASK:
+			case ACL_OTHER:
+				acl_e->e_id = ACL_UNDEFINED_ID;
+				break;
+
+			case ACL_USER:
+			case ACL_GROUP:
+				acl_e->e_id = le32_to_cpu(entry->e_id);
+				break;
+
+			default:
+				goto fail;
+		}
+	}
+	return acl;
+
+fail:
+	posix_acl_release(acl);
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL (posix_acl_from_xattr);
+
+/*
+ * Convert from in-memory to extended attribute representation.
+ */
+int
+posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size)
+{
+	posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
+	posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
+	int real_size, n;
+
+	real_size = posix_acl_xattr_size(acl->a_count);
+	if (!buffer)
+		return real_size;
+	if (real_size > size)
+		return -ERANGE;
+	
+	ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+
+	for (n=0; n < acl->a_count; n++, ext_entry++) {
+		ext_entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+		ext_entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+		ext_entry->e_id   = cpu_to_le32(acl->a_entries[n].e_id);
+	}
+	return real_size;
+}
+EXPORT_SYMBOL (posix_acl_to_xattr);
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
new file mode 100644
index 0000000..c92306f
--- /dev/null
+++ b/fs/xfs/Kconfig
@@ -0,0 +1,85 @@
+menu "XFS support"
+
+config XFS_FS
+	tristate "XFS filesystem support"
+	select EXPORTFS if NFSD!=n
+	help
+	  XFS is a high performance journaling filesystem which originated
+	  on the SGI IRIX platform.  It is completely multi-threaded, can
+	  support large files and large filesystems, extended attributes,
+	  variable block sizes, is extent based, and makes extensive use of
+	  Btrees (directories, extents, free space) to aid both performance
+	  and scalability.
+
+	  Refer to the documentation at <http://oss.sgi.com/projects/xfs/>
+	  for complete details.  This implementation is on-disk compatible
+	  with the IRIX version of XFS.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called xfs.  Be aware, however, that if the file
+	  system of your root partition is compiled as a module, you'll need
+	  to use an initial ramdisk (initrd) to boot.
+
+config XFS_EXPORT
+	bool
+	default y if XFS_FS && EXPORTFS
+
+config XFS_RT
+	bool "Realtime support (EXPERIMENTAL)"
+	depends on XFS_FS && EXPERIMENTAL
+	help
+	  If you say Y here you will be able to mount and use XFS filesystems
+	  which contain a realtime subvolume. The realtime subvolume is a
+	  separate area of disk space where only file data is stored. The
+	  realtime subvolume is designed to provide very deterministic
+	  data rates suitable for media streaming applications.
+
+	  See the xfs man page in section 5 for a bit more information.
+
+	  This feature is unsupported at this time, is not yet fully
+	  functional, and may cause serious problems.
+
+	  If unsure, say N.
+
+config XFS_QUOTA
+	bool "Quota support"
+	depends on XFS_FS
+	help
+	  If you say Y here, you will be able to set limits for disk usage on
+	  a per user and/or a per group basis under XFS.  XFS considers quota
+	  information as filesystem metadata and uses journaling to provide a
+	  higher level guarantee of consistency.  The on-disk data format for
+	  quota is also compatible with the IRIX version of XFS, allowing a
+	  filesystem to be migrated between Linux and IRIX without any need
+	  for conversion.
+
+	  If unsure, say N.  More comprehensive documentation can be found in
+	  README.quota in the xfsprogs package.  XFS quota can be used either
+	  with or without the generic quota support enabled (CONFIG_QUOTA) -
+	  they are completely independent subsystems.
+
+config XFS_SECURITY
+	bool "Security Label support"
+	depends on XFS_FS
+	help
+	  Security labels support alternative access control models
+	  implemented by security modules like SELinux.  This option
+	  enables an extended attribute namespace for inode security
+	  labels in the XFS filesystem.
+
+	  If you are not using a security module that requires using
+	  extended attributes for inode security labels, say N.
+
+config XFS_POSIX_ACL
+	bool "POSIX ACL support"
+	depends on XFS_FS
+	help
+	  POSIX Access Control Lists (ACLs) support permissions for users and
+	  groups beyond the owner/group/world scheme.
+
+	  To learn more about Access Control Lists, visit the POSIX ACLs for
+	  Linux website <http://acl.bestbits.at/>.
+
+	  If you don't know what Access Control Lists are, say N.
+
+endmenu
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
new file mode 100644
index 0000000..554e4a1
--- /dev/null
+++ b/fs/xfs/Makefile
@@ -0,0 +1,150 @@
+#
+# Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.  Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+# Mountain View, CA  94043, or:
+#
+# http://www.sgi.com
+#
+# For further information regarding this notice, see:
+#
+# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+#
+
+EXTRA_CFLAGS +=	 -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
+
+ifeq ($(CONFIG_XFS_DEBUG),y)
+	EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
+	EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
+endif
+ifeq ($(CONFIG_XFS_TRACE),y)
+	EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
+	EXTRA_CFLAGS += -DXFS_ATTR_TRACE
+	EXTRA_CFLAGS += -DXFS_BLI_TRACE
+	EXTRA_CFLAGS += -DXFS_BMAP_TRACE
+	EXTRA_CFLAGS += -DXFS_BMBT_TRACE
+	EXTRA_CFLAGS += -DXFS_DIR_TRACE
+	EXTRA_CFLAGS += -DXFS_DIR2_TRACE
+	EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
+	EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
+	EXTRA_CFLAGS += -DXFS_LOG_TRACE
+	EXTRA_CFLAGS += -DXFS_RW_TRACE
+	EXTRA_CFLAGS += -DPAGEBUF_TRACE
+	# EXTRA_CFLAGS += -DXFS_VNODE_TRACE
+endif
+
+obj-$(CONFIG_XFS_FS)		+= xfs.o
+
+xfs-$(CONFIG_XFS_QUOTA)		+= $(addprefix quota/, \
+				   xfs_dquot.o \
+				   xfs_dquot_item.o \
+				   xfs_trans_dquot.o \
+				   xfs_qm_syscalls.o \
+				   xfs_qm_bhv.o \
+				   xfs_qm.o)
+ifeq ($(CONFIG_XFS_QUOTA),y)
+xfs-$(CONFIG_PROC_FS)		+= quota/xfs_qm_stats.o
+endif
+
+xfs-$(CONFIG_XFS_RT)		+= xfs_rtalloc.o
+xfs-$(CONFIG_XFS_POSIX_ACL)	+= xfs_acl.o
+xfs-$(CONFIG_PROC_FS)		+= linux-2.6/xfs_stats.o
+xfs-$(CONFIG_SYSCTL)		+= linux-2.6/xfs_sysctl.o
+xfs-$(CONFIG_COMPAT)		+= linux-2.6/xfs_ioctl32.o
+xfs-$(CONFIG_XFS_EXPORT)	+= linux-2.6/xfs_export.o
+
+
+xfs-y				+= xfs_alloc.o \
+				   xfs_alloc_btree.o \
+				   xfs_attr.o \
+				   xfs_attr_leaf.o \
+				   xfs_behavior.o \
+				   xfs_bit.o \
+				   xfs_bmap.o \
+				   xfs_bmap_btree.o \
+				   xfs_btree.o \
+				   xfs_buf_item.o \
+				   xfs_da_btree.o \
+				   xfs_dir.o \
+				   xfs_dir2.o \
+				   xfs_dir2_block.o \
+				   xfs_dir2_data.o \
+				   xfs_dir2_leaf.o \
+				   xfs_dir2_node.o \
+				   xfs_dir2_sf.o \
+				   xfs_dir_leaf.o \
+				   xfs_error.o \
+				   xfs_extfree_item.o \
+				   xfs_fsops.o \
+				   xfs_ialloc.o \
+				   xfs_ialloc_btree.o \
+				   xfs_iget.o \
+				   xfs_inode.o \
+				   xfs_inode_item.o \
+				   xfs_iocore.o \
+				   xfs_iomap.o \
+				   xfs_itable.o \
+				   xfs_dfrag.o \
+				   xfs_log.o \
+				   xfs_log_recover.o \
+				   xfs_macros.o \
+				   xfs_mount.o \
+				   xfs_rename.o \
+				   xfs_trans.o \
+				   xfs_trans_ail.o \
+				   xfs_trans_buf.o \
+				   xfs_trans_extfree.o \
+				   xfs_trans_inode.o \
+				   xfs_trans_item.o \
+				   xfs_utils.o \
+				   xfs_vfsops.o \
+				   xfs_vnodeops.o \
+				   xfs_rw.o \
+				   xfs_dmops.o \
+				   xfs_qmops.o
+
+xfs-$(CONFIG_XFS_TRACE)		+= xfs_dir2_trace.o
+
+# Objects in linux-2.6/
+xfs-y				+= $(addprefix linux-2.6/, \
+				   kmem.o \
+				   xfs_aops.o \
+				   xfs_buf.o \
+				   xfs_file.o \
+				   xfs_fs_subr.o \
+				   xfs_globals.o \
+				   xfs_ioctl.o \
+				   xfs_iops.o \
+				   xfs_lrw.o \
+				   xfs_super.o \
+				   xfs_vfs.o \
+				   xfs_vnode.o)
+
+# Objects in support/
+xfs-y				+= $(addprefix support/, \
+				   debug.o \
+				   move.o \
+				   qsort.o \
+				   uuid.o)
+
+xfs-$(CONFIG_XFS_TRACE)		+= support/ktrace.o
+
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
new file mode 100644
index 0000000..364ea8c
--- /dev/null
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/blkdev.h>
+
+#include "time.h"
+#include "kmem.h"
+
+#define MAX_VMALLOCS	6
+#define MAX_SLAB_SIZE	0x20000
+
+
+void *
+kmem_alloc(size_t size, int flags)
+{
+	int	retries = 0;
+	int	lflags = kmem_flags_convert(flags);
+	void	*ptr;
+
+	do {
+		if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
+			ptr = kmalloc(size, lflags);
+		else
+			ptr = __vmalloc(size, lflags, PAGE_KERNEL);
+		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+			return ptr;
+		if (!(++retries % 100))
+			printk(KERN_ERR "XFS: possible memory allocation "
+					"deadlock in %s (mode:0x%x)\n",
+					__FUNCTION__, lflags);
+		blk_congestion_wait(WRITE, HZ/50);
+	} while (1);
+}
+
+void *
+kmem_zalloc(size_t size, int flags)
+{
+	void	*ptr;
+
+	ptr = kmem_alloc(size, flags);
+	if (ptr)
+		memset((char *)ptr, 0, (int)size);
+	return ptr;
+}
+
+void
+kmem_free(void *ptr, size_t size)
+{
+	if (((unsigned long)ptr < VMALLOC_START) ||
+	    ((unsigned long)ptr >= VMALLOC_END)) {
+		kfree(ptr);
+	} else {
+		vfree(ptr);
+	}
+}
+
+void *
+kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
+{
+	void	*new;
+
+	new = kmem_alloc(newsize, flags);
+	if (ptr) {
+		if (new)
+			memcpy(new, ptr,
+				((oldsize < newsize) ? oldsize : newsize));
+		kmem_free(ptr, oldsize);
+	}
+	return new;
+}
+
+void *
+kmem_zone_alloc(kmem_zone_t *zone, int flags)
+{
+	int	retries = 0;
+	int	lflags = kmem_flags_convert(flags);
+	void	*ptr;
+
+	do {
+		ptr = kmem_cache_alloc(zone, lflags);
+		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+			return ptr;
+		if (!(++retries % 100))
+			printk(KERN_ERR "XFS: possible memory allocation "
+					"deadlock in %s (mode:0x%x)\n",
+					__FUNCTION__, lflags);
+		blk_congestion_wait(WRITE, HZ/50);
+	} while (1);
+}
+
+void *
+kmem_zone_zalloc(kmem_zone_t *zone, int flags)
+{
+	void	*ptr;
+
+	ptr = kmem_zone_alloc(zone, flags);
+	if (ptr)
+		memset((char *)ptr, 0, kmem_cache_size(zone));
+	return ptr;
+}
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
new file mode 100644
index 0000000..1397b66
--- /dev/null
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_KMEM_H__
+#define __XFS_SUPPORT_KMEM_H__
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+/*
+ * memory management routines
+ */
+#define KM_SLEEP	0x0001
+#define KM_NOSLEEP	0x0002
+#define KM_NOFS		0x0004
+#define KM_MAYFAIL	0x0008
+
+#define	kmem_zone	kmem_cache_s
+#define kmem_zone_t	kmem_cache_t
+
+typedef unsigned long xfs_pflags_t;
+
+#define PFLAGS_TEST_NOIO()              (current->flags & PF_NOIO)
+#define PFLAGS_TEST_FSTRANS()           (current->flags & PF_FSTRANS)
+
+#define PFLAGS_SET_NOIO() do {		\
+	current->flags |= PF_NOIO;	\
+} while (0)
+
+#define PFLAGS_CLEAR_NOIO() do {	\
+	current->flags &= ~PF_NOIO;	\
+} while (0)
+
+/* these could be nested, so we save state */
+#define PFLAGS_SET_FSTRANS(STATEP) do {	\
+	*(STATEP) = current->flags;	\
+	current->flags |= PF_FSTRANS;	\
+} while (0)
+
+#define PFLAGS_CLEAR_FSTRANS(STATEP) do { \
+	*(STATEP) = current->flags;	\
+	current->flags &= ~PF_FSTRANS;	\
+} while (0)
+
+/* Restore the PF_FSTRANS state to what was saved in STATEP */
+#define PFLAGS_RESTORE_FSTRANS(STATEP) do {     		\
+	current->flags = ((current->flags & ~PF_FSTRANS) |	\
+			  (*(STATEP) & PF_FSTRANS));		\
+} while (0)
+
+#define PFLAGS_DUP(OSTATEP, NSTATEP) do { \
+	*(NSTATEP) = *(OSTATEP);	\
+} while (0)
+
+static __inline unsigned int kmem_flags_convert(int flags)
+{
+	int	lflags = __GFP_NOWARN;	/* we'll report problems, if need be */
+
+#ifdef DEBUG
+	if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
+		printk(KERN_WARNING
+		    "XFS: memory allocation with wrong flags (%x)\n", flags);
+		BUG();
+	}
+#endif
+
+	if (flags & KM_NOSLEEP) {
+		lflags |= GFP_ATOMIC;
+	} else {
+		lflags |= GFP_KERNEL;
+
+		/* avoid recusive callbacks to filesystem during transactions */
+		if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
+			lflags &= ~__GFP_FS;
+	}
+        
+        return lflags;
+}
+
+static __inline kmem_zone_t *
+kmem_zone_init(int size, char *zone_name)
+{
+	return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
+}
+
+static __inline void
+kmem_zone_free(kmem_zone_t *zone, void *ptr)
+{
+	kmem_cache_free(zone, ptr);
+}
+
+static __inline void
+kmem_zone_destroy(kmem_zone_t *zone)
+{
+	if (zone && kmem_cache_destroy(zone))
+		BUG();
+}
+
+extern void	    *kmem_zone_zalloc(kmem_zone_t *, int);
+extern void	    *kmem_zone_alloc(kmem_zone_t *, int);
+
+extern void	    *kmem_alloc(size_t, int);
+extern void	    *kmem_realloc(void *, size_t, size_t, int);
+extern void	    *kmem_zalloc(size_t, int);
+extern void         kmem_free(void *, size_t);
+
+typedef struct shrinker *kmem_shaker_t;
+typedef int (*kmem_shake_func_t)(int, unsigned int);
+
+static __inline kmem_shaker_t
+kmem_shake_register(kmem_shake_func_t sfunc)
+{
+	return set_shrinker(DEFAULT_SEEKS, sfunc);
+}
+
+static __inline void
+kmem_shake_deregister(kmem_shaker_t shrinker)
+{
+	remove_shrinker(shrinker);
+}
+
+static __inline int
+kmem_shake_allow(unsigned int gfp_mask)
+{
+	return (gfp_mask & __GFP_WAIT);
+}
+
+#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h
new file mode 100644
index 0000000..d2c11a0
--- /dev/null
+++ b/fs/xfs/linux-2.6/mrlock.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_MRLOCK_H__
+#define __XFS_SUPPORT_MRLOCK_H__
+
+#include <linux/rwsem.h>
+
+enum { MR_NONE, MR_ACCESS, MR_UPDATE };
+
+typedef struct {
+	struct rw_semaphore	mr_lock;
+	int			mr_writer;
+} mrlock_t;
+
+#define mrinit(mrp, name)	\
+	( (mrp)->mr_writer = 0, init_rwsem(&(mrp)->mr_lock) )
+#define mrlock_init(mrp, t,n,s)	mrinit(mrp, n)
+#define mrfree(mrp)		do { } while (0)
+#define mraccess(mrp)		mraccessf(mrp, 0)
+#define mrupdate(mrp)		mrupdatef(mrp, 0)
+
+static inline void mraccessf(mrlock_t *mrp, int flags)
+{
+	down_read(&mrp->mr_lock);
+}
+
+static inline void mrupdatef(mrlock_t *mrp, int flags)
+{
+	down_write(&mrp->mr_lock);
+	mrp->mr_writer = 1;
+}
+
+static inline int mrtryaccess(mrlock_t *mrp)
+{
+	return down_read_trylock(&mrp->mr_lock);
+}
+
+static inline int mrtryupdate(mrlock_t *mrp)
+{
+	if (!down_write_trylock(&mrp->mr_lock))
+		return 0;
+	mrp->mr_writer = 1;
+	return 1;
+}
+
+static inline void mrunlock(mrlock_t *mrp)
+{
+	if (mrp->mr_writer) {
+		mrp->mr_writer = 0;
+		up_write(&mrp->mr_lock);
+	} else {
+		up_read(&mrp->mr_lock);
+	}
+}
+
+static inline void mrdemote(mrlock_t *mrp)
+{
+	mrp->mr_writer = 0;
+	downgrade_write(&mrp->mr_lock);
+}
+
+#ifdef DEBUG
+/*
+ * Debug-only routine, without some platform-specific asm code, we can
+ * now only answer requests regarding whether we hold the lock for write
+ * (reader state is outside our visibility, we only track writer state).
+ * Note: means !ismrlocked would give false positivies, so don't do that.
+ */
+static inline int ismrlocked(mrlock_t *mrp, int type)
+{
+	if (mrp && type == MR_UPDATE)
+		return mrp->mr_writer;
+	return 1;
+}
+#endif
+
+#endif /* __XFS_SUPPORT_MRLOCK_H__ */
diff --git a/fs/xfs/linux-2.6/mutex.h b/fs/xfs/linux-2.6/mutex.h
new file mode 100644
index 0000000..0b296bb
--- /dev/null
+++ b/fs/xfs/linux-2.6/mutex.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_MUTEX_H__
+#define __XFS_SUPPORT_MUTEX_H__
+
+#include <linux/spinlock.h>
+#include <asm/semaphore.h>
+
+/*
+ * Map the mutex'es from IRIX to Linux semaphores.
+ *
+ * Destroy just simply initializes to -99 which should block all other
+ * callers.
+ */
+#define MUTEX_DEFAULT		0x0
+typedef struct semaphore	mutex_t;
+
+#define mutex_init(lock, type, name)		sema_init(lock, 1)
+#define mutex_destroy(lock)			sema_init(lock, -99)
+#define mutex_lock(lock, num)			down(lock)
+#define mutex_trylock(lock)			(down_trylock(lock) ? 0 : 1)
+#define mutex_unlock(lock)			up(lock)
+
+#endif /* __XFS_SUPPORT_MUTEX_H__ */
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
new file mode 100644
index 0000000..30b67b4
--- /dev/null
+++ b/fs/xfs/linux-2.6/sema.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_SEMA_H__
+#define __XFS_SUPPORT_SEMA_H__
+
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+/*
+ * sema_t structure just maps to struct semaphore in Linux kernel.
+ */
+
+typedef struct semaphore sema_t;
+
+#define init_sema(sp, val, c, d)	sema_init(sp, val)
+#define initsema(sp, val)		sema_init(sp, val)
+#define initnsema(sp, val, name)	sema_init(sp, val)
+#define psema(sp, b)			down(sp)
+#define vsema(sp)			up(sp)
+#define valusema(sp)			(atomic_read(&(sp)->count))
+#define freesema(sema)
+
+/*
+ * Map cpsema (try to get the sema) to down_trylock. We need to switch
+ * the return values since cpsema returns 1 (acquired) 0 (failed) and
+ * down_trylock returns the reverse 0 (acquired) 1 (failed).
+ */
+
+#define cpsema(sp)			(down_trylock(sp) ? 0 : 1)
+
+/*
+ * Didn't do cvsema(sp). Not sure how to map this to up/down/...
+ * It does a vsema if the values is < 0 other wise nothing.
+ */
+
+#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/spin.h b/fs/xfs/linux-2.6/spin.h
new file mode 100644
index 0000000..bcf60a0
--- /dev/null
+++ b/fs/xfs/linux-2.6/spin.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_SPIN_H__
+#define __XFS_SUPPORT_SPIN_H__
+
+#include <linux/sched.h>	/* preempt needs this */
+#include <linux/spinlock.h>
+
+/*
+ * Map lock_t from IRIX to Linux spinlocks.
+ *
+ * We do not make use of lock_t from interrupt context, so we do not
+ * have to worry about disabling interrupts at all (unlike IRIX).
+ */
+
+typedef spinlock_t lock_t;
+
+#define SPLDECL(s)			unsigned long s
+
+#define spinlock_init(lock, name)	spin_lock_init(lock)
+#define	spinlock_destroy(lock)
+#define mutex_spinlock(lock)		({ spin_lock(lock); 0; })
+#define mutex_spinunlock(lock, s)	do { spin_unlock(lock); (void)s; } while (0)
+#define nested_spinlock(lock)		spin_lock(lock)
+#define nested_spinunlock(lock)		spin_unlock(lock)
+
+#endif /* __XFS_SUPPORT_SPIN_H__ */
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
new file mode 100644
index 0000000..821d316
--- /dev/null
+++ b/fs/xfs/linux-2.6/sv.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_SV_H__
+#define __XFS_SUPPORT_SV_H__
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+/*
+ * Synchronisation variables.
+ *
+ * (Parameters "pri", "svf" and "rts" are not implemented)
+ */
+
+typedef struct sv_s {
+	wait_queue_head_t waiters;
+} sv_t;
+
+#define SV_FIFO		0x0		/* sv_t is FIFO type */
+#define SV_LIFO		0x2		/* sv_t is LIFO type */
+#define SV_PRIO		0x4		/* sv_t is PRIO type */
+#define SV_KEYED	0x6		/* sv_t is KEYED type */
+#define SV_DEFAULT      SV_FIFO
+
+
+static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
+			     unsigned long timeout)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	add_wait_queue_exclusive(&sv->waiters, &wait);
+	__set_current_state(state);
+	spin_unlock(lock);
+
+	schedule_timeout(timeout);
+
+	remove_wait_queue(&sv->waiters, &wait);
+}
+
+#define init_sv(sv,type,name,flag) \
+	init_waitqueue_head(&(sv)->waiters)
+#define sv_init(sv,flag,name) \
+	init_waitqueue_head(&(sv)->waiters)
+#define sv_destroy(sv) \
+	/*NOTHING*/
+#define sv_wait(sv, pri, lock, s) \
+	_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_wait_sig(sv, pri, lock, s)   \
+	_sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
+	_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
+	_sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_signal(sv) \
+	wake_up(&(sv)->waiters)
+#define sv_broadcast(sv) \
+	wake_up_all(&(sv)->waiters)
+
+#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/linux-2.6/time.h
new file mode 100644
index 0000000..6c6fd0f
--- /dev/null
+++ b/fs/xfs/linux-2.6/time.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_TIME_H__
+#define __XFS_SUPPORT_TIME_H__
+
+#include <linux/sched.h>
+#include <linux/time.h>
+
+typedef struct timespec timespec_t;
+
+static inline void delay(long ticks)
+{
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(ticks);
+}
+
+static inline void nanotime(struct timespec *tvp)
+{
+	*tvp = CURRENT_TIME;
+}
+
+#endif /* __XFS_SUPPORT_TIME_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
new file mode 100644
index 0000000..76a8475
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_trans.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+#include "xfs_iomap.h"
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+
+STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
+STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
+		struct writeback_control *wbc, void *, int, int);
+
+#if defined(XFS_RW_TRACE)
+void
+xfs_page_trace(
+	int		tag,
+	struct inode	*inode,
+	struct page	*page,
+	int		mask)
+{
+	xfs_inode_t	*ip;
+	bhv_desc_t	*bdp;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	loff_t		isize = i_size_read(inode);
+	loff_t		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	int		delalloc = -1, unmapped = -1, unwritten = -1;
+
+	if (page_has_buffers(page))
+		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+
+	bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
+	ip = XFS_BHVTOI(bdp);
+	if (!ip->i_rwtrace)
+		return;
+
+	ktrace_enter(ip->i_rwtrace,
+		(void *)((unsigned long)tag),
+		(void *)ip,
+		(void *)inode,
+		(void *)page,
+		(void *)((unsigned long)mask),
+		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+		(void *)((unsigned long)((isize >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(isize & 0xffffffff)),
+		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(offset & 0xffffffff)),
+		(void *)((unsigned long)delalloc),
+		(void *)((unsigned long)unmapped),
+		(void *)((unsigned long)unwritten),
+		(void *)NULL,
+		(void *)NULL);
+}
+#else
+#define xfs_page_trace(tag, inode, page, mask)
+#endif
+
+void
+linvfs_unwritten_done(
+	struct buffer_head	*bh,
+	int			uptodate)
+{
+	xfs_buf_t		*pb = (xfs_buf_t *)bh->b_private;
+
+	ASSERT(buffer_unwritten(bh));
+	bh->b_end_io = NULL;
+	clear_buffer_unwritten(bh);
+	if (!uptodate)
+		pagebuf_ioerror(pb, EIO);
+	if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+		pagebuf_iodone(pb, 1, 1);
+	}
+	end_buffer_async_write(bh, uptodate);
+}
+
+/*
+ * Issue transactions to convert a buffer range from unwritten
+ * to written extents (buffered IO).
+ */
+STATIC void
+linvfs_unwritten_convert(
+	xfs_buf_t	*bp)
+{
+	vnode_t		*vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
+	int		error;
+
+	BUG_ON(atomic_read(&bp->pb_hold) < 1);
+	VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
+			BMAPI_UNWRITTEN, NULL, NULL, error);
+	XFS_BUF_SET_FSPRIVATE(bp, NULL);
+	XFS_BUF_CLR_IODONE_FUNC(bp);
+	XFS_BUF_UNDATAIO(bp);
+	iput(LINVFS_GET_IP(vp));
+	pagebuf_iodone(bp, 0, 0);
+}
+
+/*
+ * Issue transactions to convert a buffer range from unwritten
+ * to written extents (direct IO).
+ */
+STATIC void
+linvfs_unwritten_convert_direct(
+	struct inode	*inode,
+	loff_t		offset,
+	ssize_t		size,
+	void		*private)
+{
+	ASSERT(!private || inode == (struct inode *)private);
+
+	/* private indicates an unwritten extent lay beneath this IO */
+	if (private && size > 0) {
+		vnode_t	*vp = LINVFS_GET_VP(inode);
+		int	error;
+
+		VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+	}
+}
+
+STATIC int
+xfs_map_blocks(
+	struct inode		*inode,
+	loff_t			offset,
+	ssize_t			count,
+	xfs_iomap_t		*mapp,
+	int			flags)
+{
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+	int			error, nmaps = 1;
+
+	VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
+	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
+		VMODIFY(vp);
+	return -error;
+}
+
+/*
+ * Finds the corresponding mapping in block @map array of the
+ * given @offset within a @page.
+ */
+STATIC xfs_iomap_t *
+xfs_offset_to_map(
+	struct page		*page,
+	xfs_iomap_t		*iomapp,
+	unsigned long		offset)
+{
+	loff_t			full_offset;	/* offset from start of file */
+
+	ASSERT(offset < PAGE_CACHE_SIZE);
+
+	full_offset = page->index;		/* NB: using 64bit number */
+	full_offset <<= PAGE_CACHE_SHIFT;	/* offset from file start */
+	full_offset += offset;			/* offset from page start */
+
+	if (full_offset < iomapp->iomap_offset)
+		return NULL;
+	if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
+		return iomapp;
+	return NULL;
+}
+
+STATIC void
+xfs_map_at_offset(
+	struct page		*page,
+	struct buffer_head	*bh,
+	unsigned long		offset,
+	int			block_bits,
+	xfs_iomap_t		*iomapp)
+{
+	xfs_daddr_t		bn;
+	loff_t			delta;
+	int			sector_shift;
+
+	ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
+	ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
+	ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
+
+	delta = page->index;
+	delta <<= PAGE_CACHE_SHIFT;
+	delta += offset;
+	delta -= iomapp->iomap_offset;
+	delta >>= block_bits;
+
+	sector_shift = block_bits - BBSHIFT;
+	bn = iomapp->iomap_bn >> sector_shift;
+	bn += delta;
+	BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
+	ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
+
+	lock_buffer(bh);
+	bh->b_blocknr = bn;
+	bh->b_bdev = iomapp->iomap_target->pbr_bdev;
+	set_buffer_mapped(bh);
+	clear_buffer_delay(bh);
+}
+
+/*
+ * Look for a page at index which is unlocked and contains our
+ * unwritten extent flagged buffers at its head.  Returns page
+ * locked and with an extra reference count, and length of the
+ * unwritten extent component on this page that we can write,
+ * in units of filesystem blocks.
+ */
+STATIC struct page *
+xfs_probe_unwritten_page(
+	struct address_space	*mapping,
+	pgoff_t			index,
+	xfs_iomap_t		*iomapp,
+	xfs_buf_t		*pb,
+	unsigned long		max_offset,
+	unsigned long		*fsbs,
+	unsigned int            bbits)
+{
+	struct page		*page;
+
+	page = find_trylock_page(mapping, index);
+	if (!page)
+		return NULL;
+	if (PageWriteback(page))
+		goto out;
+
+	if (page->mapping && page_has_buffers(page)) {
+		struct buffer_head	*bh, *head;
+		unsigned long		p_offset = 0;
+
+		*fsbs = 0;
+		bh = head = page_buffers(page);
+		do {
+			if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
+				break;
+			if (!xfs_offset_to_map(page, iomapp, p_offset))
+				break;
+			if (p_offset >= max_offset)
+				break;
+			xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
+			set_buffer_unwritten_io(bh);
+			bh->b_private = pb;
+			p_offset += bh->b_size;
+			(*fsbs)++;
+		} while ((bh = bh->b_this_page) != head);
+
+		if (p_offset)
+			return page;
+	}
+
+out:
+	unlock_page(page);
+	return NULL;
+}
+
+/*
+ * Look for a page at index which is unlocked and not mapped
+ * yet - clustering for mmap write case.
+ */
+STATIC unsigned int
+xfs_probe_unmapped_page(
+	struct address_space	*mapping,
+	pgoff_t			index,
+	unsigned int		pg_offset)
+{
+	struct page		*page;
+	int			ret = 0;
+
+	page = find_trylock_page(mapping, index);
+	if (!page)
+		return 0;
+	if (PageWriteback(page))
+		goto out;
+
+	if (page->mapping && PageDirty(page)) {
+		if (page_has_buffers(page)) {
+			struct buffer_head	*bh, *head;
+
+			bh = head = page_buffers(page);
+			do {
+				if (buffer_mapped(bh) || !buffer_uptodate(bh))
+					break;
+				ret += bh->b_size;
+				if (ret >= pg_offset)
+					break;
+			} while ((bh = bh->b_this_page) != head);
+		} else
+			ret = PAGE_CACHE_SIZE;
+	}
+
+out:
+	unlock_page(page);
+	return ret;
+}
+
+STATIC unsigned int
+xfs_probe_unmapped_cluster(
+	struct inode		*inode,
+	struct page		*startpage,
+	struct buffer_head	*bh,
+	struct buffer_head	*head)
+{
+	pgoff_t			tindex, tlast, tloff;
+	unsigned int		pg_offset, len, total = 0;
+	struct address_space	*mapping = inode->i_mapping;
+
+	/* First sum forwards in this page */
+	do {
+		if (buffer_mapped(bh))
+			break;
+		total += bh->b_size;
+	} while ((bh = bh->b_this_page) != head);
+
+	/* If we reached the end of the page, sum forwards in
+	 * following pages.
+	 */
+	if (bh == head) {
+		tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+		/* Prune this back to avoid pathological behavior */
+		tloff = min(tlast, startpage->index + 64);
+		for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
+			len = xfs_probe_unmapped_page(mapping, tindex,
+							PAGE_CACHE_SIZE);
+			if (!len)
+				return total;
+			total += len;
+		}
+		if (tindex == tlast &&
+		    (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
+			total += xfs_probe_unmapped_page(mapping,
+							tindex, pg_offset);
+		}
+	}
+	return total;
+}
+
+/*
+ * Probe for a given page (index) in the inode and test if it is delayed
+ * and without unwritten buffers.  Returns page locked and with an extra
+ * reference count.
+ */
+STATIC struct page *
+xfs_probe_delalloc_page(
+	struct inode		*inode,
+	pgoff_t			index)
+{
+	struct page		*page;
+
+	page = find_trylock_page(inode->i_mapping, index);
+	if (!page)
+		return NULL;
+	if (PageWriteback(page))
+		goto out;
+
+	if (page->mapping && page_has_buffers(page)) {
+		struct buffer_head	*bh, *head;
+		int			acceptable = 0;
+
+		bh = head = page_buffers(page);
+		do {
+			if (buffer_unwritten(bh)) {
+				acceptable = 0;
+				break;
+			} else if (buffer_delay(bh)) {
+				acceptable = 1;
+			}
+		} while ((bh = bh->b_this_page) != head);
+
+		if (acceptable)
+			return page;
+	}
+
+out:
+	unlock_page(page);
+	return NULL;
+}
+
+STATIC int
+xfs_map_unwritten(
+	struct inode		*inode,
+	struct page		*start_page,
+	struct buffer_head	*head,
+	struct buffer_head	*curr,
+	unsigned long		p_offset,
+	int			block_bits,
+	xfs_iomap_t		*iomapp,
+	struct writeback_control *wbc,
+	int			startio,
+	int			all_bh)
+{
+	struct buffer_head	*bh = curr;
+	xfs_iomap_t		*tmp;
+	xfs_buf_t		*pb;
+	loff_t			offset, size;
+	unsigned long		nblocks = 0;
+
+	offset = start_page->index;
+	offset <<= PAGE_CACHE_SHIFT;
+	offset += p_offset;
+
+	/* get an "empty" pagebuf to manage IO completion
+	 * Proper values will be set before returning */
+	pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
+	if (!pb)
+		return -EAGAIN;
+
+	/* Take a reference to the inode to prevent it from
+	 * being reclaimed while we have outstanding unwritten
+	 * extent IO on it.
+	 */
+	if ((igrab(inode)) != inode) {
+		pagebuf_free(pb);
+		return -EAGAIN;
+	}
+
+	/* Set the count to 1 initially, this will stop an I/O
+	 * completion callout which happens before we have started
+	 * all the I/O from calling pagebuf_iodone too early.
+	 */
+	atomic_set(&pb->pb_io_remaining, 1);
+
+	/* First map forwards in the page consecutive buffers
+	 * covering this unwritten extent
+	 */
+	do {
+		if (!buffer_unwritten(bh))
+			break;
+		tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
+		if (!tmp)
+			break;
+		xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
+		set_buffer_unwritten_io(bh);
+		bh->b_private = pb;
+		p_offset += bh->b_size;
+		nblocks++;
+	} while ((bh = bh->b_this_page) != head);
+
+	atomic_add(nblocks, &pb->pb_io_remaining);
+
+	/* If we reached the end of the page, map forwards in any
+	 * following pages which are also covered by this extent.
+	 */
+	if (bh == head) {
+		struct address_space	*mapping = inode->i_mapping;
+		pgoff_t			tindex, tloff, tlast;
+		unsigned long		bs;
+		unsigned int		pg_offset, bbits = inode->i_blkbits;
+		struct page		*page;
+
+		tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+		tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
+		tloff = min(tlast, tloff);
+		for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
+			page = xfs_probe_unwritten_page(mapping,
+						tindex, iomapp, pb,
+						PAGE_CACHE_SIZE, &bs, bbits);
+			if (!page)
+				break;
+			nblocks += bs;
+			atomic_add(bs, &pb->pb_io_remaining);
+			xfs_convert_page(inode, page, iomapp, wbc, pb,
+							startio, all_bh);
+			/* stop if converting the next page might add
+			 * enough blocks that the corresponding byte
+			 * count won't fit in our ulong page buf length */
+			if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
+				goto enough;
+		}
+
+		if (tindex == tlast &&
+		    (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
+			page = xfs_probe_unwritten_page(mapping,
+							tindex, iomapp, pb,
+							pg_offset, &bs, bbits);
+			if (page) {
+				nblocks += bs;
+				atomic_add(bs, &pb->pb_io_remaining);
+				xfs_convert_page(inode, page, iomapp, wbc, pb,
+							startio, all_bh);
+				if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
+					goto enough;
+			}
+		}
+	}
+
+enough:
+	size = nblocks;		/* NB: using 64bit number here */
+	size <<= block_bits;	/* convert fsb's to byte range */
+
+	XFS_BUF_DATAIO(pb);
+	XFS_BUF_ASYNC(pb);
+	XFS_BUF_SET_SIZE(pb, size);
+	XFS_BUF_SET_COUNT(pb, size);
+	XFS_BUF_SET_OFFSET(pb, offset);
+	XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
+	XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
+
+	if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+		pagebuf_iodone(pb, 1, 1);
+	}
+
+	return 0;
+}
+
+STATIC void
+xfs_submit_page(
+	struct page		*page,
+	struct writeback_control *wbc,
+	struct buffer_head	*bh_arr[],
+	int			bh_count,
+	int			probed_page,
+	int			clear_dirty)
+{
+	struct buffer_head	*bh;
+	int			i;
+
+	BUG_ON(PageWriteback(page));
+	set_page_writeback(page);
+	if (clear_dirty)
+		clear_page_dirty(page);
+	unlock_page(page);
+
+	if (bh_count) {
+		for (i = 0; i < bh_count; i++) {
+			bh = bh_arr[i];
+			mark_buffer_async_write(bh);
+			if (buffer_unwritten(bh))
+				set_buffer_unwritten_io(bh);
+			set_buffer_uptodate(bh);
+			clear_buffer_dirty(bh);
+		}
+
+		for (i = 0; i < bh_count; i++)
+			submit_bh(WRITE, bh_arr[i]);
+
+		if (probed_page && clear_dirty)
+			wbc->nr_to_write--;	/* Wrote an "extra" page */
+	} else {
+		end_page_writeback(page);
+		wbc->pages_skipped++;	/* We didn't write this page */
+	}
+}
+
+/*
+ * Allocate & map buffers for page given the extent map. Write it out.
+ * except for the original page of a writepage, this is called on
+ * delalloc/unwritten pages only, for the original page it is possible
+ * that the page has no mapping at all.
+ */
+STATIC void
+xfs_convert_page(
+	struct inode		*inode,
+	struct page		*page,
+	xfs_iomap_t		*iomapp,
+	struct writeback_control *wbc,
+	void			*private,
+	int			startio,
+	int			all_bh)
+{
+	struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
+	xfs_iomap_t		*mp = iomapp, *tmp;
+	unsigned long		end, offset;
+	pgoff_t			end_index;
+	int			i = 0, index = 0;
+	int			bbits = inode->i_blkbits;
+
+	end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+	if (page->index < end_index) {
+		end = PAGE_CACHE_SIZE;
+	} else {
+		end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
+	}
+	bh = head = page_buffers(page);
+	do {
+		offset = i << bbits;
+		if (offset >= end)
+			break;
+		if (!(PageUptodate(page) || buffer_uptodate(bh)))
+			continue;
+		if (buffer_mapped(bh) && all_bh &&
+		    !(buffer_unwritten(bh) || buffer_delay(bh))) {
+			if (startio) {
+				lock_buffer(bh);
+				bh_arr[index++] = bh;
+			}
+			continue;
+		}
+		tmp = xfs_offset_to_map(page, mp, offset);
+		if (!tmp)
+			continue;
+		ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
+		ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
+
+		/* If this is a new unwritten extent buffer (i.e. one
+		 * that we haven't passed in private data for, we must
+		 * now map this buffer too.
+		 */
+		if (buffer_unwritten(bh) && !bh->b_end_io) {
+			ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
+			xfs_map_unwritten(inode, page, head, bh, offset,
+					bbits, tmp, wbc, startio, all_bh);
+		} else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
+			xfs_map_at_offset(page, bh, offset, bbits, tmp);
+			if (buffer_unwritten(bh)) {
+				set_buffer_unwritten_io(bh);
+				bh->b_private = private;
+				ASSERT(private);
+			}
+		}
+		if (startio) {
+			bh_arr[index++] = bh;
+		} else {
+			set_buffer_dirty(bh);
+			unlock_buffer(bh);
+			mark_buffer_dirty(bh);
+		}
+	} while (i++, (bh = bh->b_this_page) != head);
+
+	if (startio) {
+		xfs_submit_page(page, wbc, bh_arr, index, 1, index == i);
+	} else {
+		unlock_page(page);
+	}
+}
+
+/*
+ * Convert & write out a cluster of pages in the same extent as defined
+ * by mp and following the start page.
+ */
+STATIC void
+xfs_cluster_write(
+	struct inode		*inode,
+	pgoff_t			tindex,
+	xfs_iomap_t		*iomapp,
+	struct writeback_control *wbc,
+	int			startio,
+	int			all_bh,
+	pgoff_t			tlast)
+{
+	struct page		*page;
+
+	for (; tindex <= tlast; tindex++) {
+		page = xfs_probe_delalloc_page(inode, tindex);
+		if (!page)
+			break;
+		xfs_convert_page(inode, page, iomapp, wbc, NULL,
+				startio, all_bh);
+	}
+}
+
+/*
+ * Calling this without startio set means we are being asked to make a dirty
+ * page ready for freeing it's buffers.  When called with startio set then
+ * we are coming from writepage.
+ *
+ * When called with startio set it is important that we write the WHOLE
+ * page if possible.
+ * The bh->b_state's cannot know if any of the blocks or which block for
+ * that matter are dirty due to mmap writes, and therefore bh uptodate is
+ * only vaild if the page itself isn't completely uptodate.  Some layers
+ * may clear the page dirty flag prior to calling write page, under the
+ * assumption the entire page will be written out; by not writing out the
+ * whole page the page can be reused before all valid dirty data is
+ * written out.  Note: in the case of a page that has been dirty'd by
+ * mapwrite and but partially setup by block_prepare_write the
+ * bh->b_states's will not agree and only ones setup by BPW/BCW will have
+ * valid state, thus the whole page must be written out thing.
+ */
+
+STATIC int
+xfs_page_state_convert(
+	struct inode	*inode,
+	struct page	*page,
+	struct writeback_control *wbc,
+	int		startio,
+	int		unmapped) /* also implies page uptodate */
+{
+	struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
+	xfs_iomap_t		*iomp, iomap;
+	loff_t			offset;
+	unsigned long           p_offset = 0;
+	__uint64_t              end_offset;
+	pgoff_t                 end_index, last_index, tlast;
+	int			len, err, i, cnt = 0, uptodate = 1;
+	int			flags = startio ? 0 : BMAPI_TRYLOCK;
+	int			page_dirty, delalloc = 0;
+
+	/* Is this page beyond the end of the file? */
+	offset = i_size_read(inode);
+	end_index = offset >> PAGE_CACHE_SHIFT;
+	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
+	if (page->index >= end_index) {
+		if ((page->index >= end_index + 1) ||
+		    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
+			err = -EIO;
+			goto error;
+		}
+	}
+
+	offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	end_offset = min_t(unsigned long long,
+			offset + PAGE_CACHE_SIZE, i_size_read(inode));
+
+	bh = head = page_buffers(page);
+	iomp = NULL;
+
+	/*
+	 * page_dirty is initially a count of buffers on the page and
+	 * is decrememted as we move each into a cleanable state.
+	 */
+	len = bh->b_size;
+	page_dirty = PAGE_CACHE_SIZE / len;
+
+	do {
+		if (offset >= end_offset)
+			break;
+		if (!buffer_uptodate(bh))
+			uptodate = 0;
+		if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
+			continue;
+
+		if (iomp) {
+			iomp = xfs_offset_to_map(page, &iomap, p_offset);
+		}
+
+		/*
+		 * First case, map an unwritten extent and prepare for
+		 * extent state conversion transaction on completion.
+		 */
+		if (buffer_unwritten(bh)) {
+			if (!startio)
+				continue;
+			if (!iomp) {
+				err = xfs_map_blocks(inode, offset, len, &iomap,
+						BMAPI_READ|BMAPI_IGNSTATE);
+				if (err) {
+					goto error;
+				}
+				iomp = xfs_offset_to_map(page, &iomap,
+								p_offset);
+			}
+			if (iomp) {
+				if (!bh->b_end_io) {
+					err = xfs_map_unwritten(inode, page,
+							head, bh, p_offset,
+							inode->i_blkbits, iomp,
+							wbc, startio, unmapped);
+					if (err) {
+						goto error;
+					}
+				} else {
+					set_bit(BH_Lock, &bh->b_state);
+				}
+				BUG_ON(!buffer_locked(bh));
+				bh_arr[cnt++] = bh;
+				page_dirty--;
+			}
+		/*
+		 * Second case, allocate space for a delalloc buffer.
+		 * We can return EAGAIN here in the release page case.
+		 */
+		} else if (buffer_delay(bh)) {
+			if (!iomp) {
+				delalloc = 1;
+				err = xfs_map_blocks(inode, offset, len, &iomap,
+						BMAPI_ALLOCATE | flags);
+				if (err) {
+					goto error;
+				}
+				iomp = xfs_offset_to_map(page, &iomap,
+								p_offset);
+			}
+			if (iomp) {
+				xfs_map_at_offset(page, bh, p_offset,
+						inode->i_blkbits, iomp);
+				if (startio) {
+					bh_arr[cnt++] = bh;
+				} else {
+					set_buffer_dirty(bh);
+					unlock_buffer(bh);
+					mark_buffer_dirty(bh);
+				}
+				page_dirty--;
+			}
+		} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+			   (unmapped || startio)) {
+
+			if (!buffer_mapped(bh)) {
+				int	size;
+
+				/*
+				 * Getting here implies an unmapped buffer
+				 * was found, and we are in a path where we
+				 * need to write the whole page out.
+				 */
+				if (!iomp) {
+					size = xfs_probe_unmapped_cluster(
+							inode, page, bh, head);
+					err = xfs_map_blocks(inode, offset,
+							size, &iomap,
+							BMAPI_WRITE|BMAPI_MMAP);
+					if (err) {
+						goto error;
+					}
+					iomp = xfs_offset_to_map(page, &iomap,
+								     p_offset);
+				}
+				if (iomp) {
+					xfs_map_at_offset(page,
+							bh, p_offset,
+							inode->i_blkbits, iomp);
+					if (startio) {
+						bh_arr[cnt++] = bh;
+					} else {
+						set_buffer_dirty(bh);
+						unlock_buffer(bh);
+						mark_buffer_dirty(bh);
+					}
+					page_dirty--;
+				}
+			} else if (startio) {
+				if (buffer_uptodate(bh) &&
+				    !test_and_set_bit(BH_Lock, &bh->b_state)) {
+					bh_arr[cnt++] = bh;
+					page_dirty--;
+				}
+			}
+		}
+	} while (offset += len, p_offset += len,
+		((bh = bh->b_this_page) != head));
+
+	if (uptodate && bh == head)
+		SetPageUptodate(page);
+
+	if (startio)
+		xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1);
+
+	if (iomp) {
+		tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
+					PAGE_CACHE_SHIFT;
+		if (delalloc && (tlast > last_index))
+			tlast = last_index;
+		xfs_cluster_write(inode, page->index + 1, iomp, wbc,
+					startio, unmapped, tlast);
+	}
+
+	return page_dirty;
+
+error:
+	for (i = 0; i < cnt; i++) {
+		unlock_buffer(bh_arr[i]);
+	}
+
+	/*
+	 * If it's delalloc and we have nowhere to put it,
+	 * throw it away, unless the lower layers told
+	 * us to try again.
+	 */
+	if (err != -EAGAIN) {
+		if (!unmapped) {
+			block_invalidatepage(page, 0);
+		}
+		ClearPageUptodate(page);
+	}
+	return err;
+}
+
+STATIC int
+__linvfs_get_block(
+	struct inode		*inode,
+	sector_t		iblock,
+	unsigned long		blocks,
+	struct buffer_head	*bh_result,
+	int			create,
+	int			direct,
+	bmapi_flags_t		flags)
+{
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+	xfs_iomap_t		iomap;
+	int			retpbbm = 1;
+	int			error;
+	ssize_t			size;
+	loff_t			offset = (loff_t)iblock << inode->i_blkbits;
+
+	if (blocks)
+		size = blocks << inode->i_blkbits;
+	else
+		size = 1 << inode->i_blkbits;
+
+	VOP_BMAP(vp, offset, size,
+		create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
+	if (error)
+		return -error;
+
+	if (retpbbm == 0)
+		return 0;
+
+	if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
+		xfs_daddr_t		bn;
+		loff_t			delta;
+
+		/* For unwritten extents do not report a disk address on
+		 * the read case (treat as if we're reading into a hole).
+		 */
+		if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+			delta = offset - iomap.iomap_offset;
+			delta >>= inode->i_blkbits;
+
+			bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
+			bn += delta;
+			BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
+			bh_result->b_blocknr = bn;
+			set_buffer_mapped(bh_result);
+		}
+		if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+			if (direct)
+				bh_result->b_private = inode;
+			set_buffer_unwritten(bh_result);
+			set_buffer_delay(bh_result);
+		}
+	}
+
+	/* If this is a realtime file, data might be on a new device */
+	bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+
+	/* If we previously allocated a block out beyond eof and
+	 * we are now coming back to use it then we will need to
+	 * flag it as new even if it has a disk address.
+	 */
+	if (create &&
+	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
+	     (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
+		set_buffer_new(bh_result);
+	}
+
+	if (iomap.iomap_flags & IOMAP_DELAY) {
+		BUG_ON(direct);
+		if (create) {
+			set_buffer_uptodate(bh_result);
+			set_buffer_mapped(bh_result);
+			set_buffer_delay(bh_result);
+		}
+	}
+
+	if (blocks) {
+		bh_result->b_size = (ssize_t)min(
+			(loff_t)(iomap.iomap_bsize - iomap.iomap_delta),
+			(loff_t)(blocks << inode->i_blkbits));
+	}
+
+	return 0;
+}
+
+int
+linvfs_get_block(
+	struct inode		*inode,
+	sector_t		iblock,
+	struct buffer_head	*bh_result,
+	int			create)
+{
+	return __linvfs_get_block(inode, iblock, 0, bh_result,
+					create, 0, BMAPI_WRITE);
+}
+
+STATIC int
+linvfs_get_blocks_direct(
+	struct inode		*inode,
+	sector_t		iblock,
+	unsigned long		max_blocks,
+	struct buffer_head	*bh_result,
+	int			create)
+{
+	return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
+					create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+}
+
+STATIC ssize_t
+linvfs_direct_IO(
+	int			rw,
+	struct kiocb		*iocb,
+	const struct iovec	*iov,
+	loff_t			offset,
+	unsigned long		nr_segs)
+{
+	struct file	*file = iocb->ki_filp;
+	struct inode	*inode = file->f_mapping->host;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	xfs_iomap_t	iomap;
+	int		maps = 1;
+	int		error;
+
+	VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
+	if (error)
+		return -error;
+
+	return blockdev_direct_IO_own_locking(rw, iocb, inode,
+		iomap.iomap_target->pbr_bdev,
+		iov, offset, nr_segs,
+		linvfs_get_blocks_direct,
+		linvfs_unwritten_convert_direct);
+}
+
+
+STATIC sector_t
+linvfs_bmap(
+	struct address_space	*mapping,
+	sector_t		block)
+{
+	struct inode		*inode = (struct inode *)mapping->host;
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+	int			error;
+
+	vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
+
+	VOP_RWLOCK(vp, VRWLOCK_READ);
+	VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
+	VOP_RWUNLOCK(vp, VRWLOCK_READ);
+	return generic_block_bmap(mapping, block, linvfs_get_block);
+}
+
+STATIC int
+linvfs_readpage(
+	struct file		*unused,
+	struct page		*page)
+{
+	return mpage_readpage(page, linvfs_get_block);
+}
+
+STATIC int
+linvfs_readpages(
+	struct file		*unused,
+	struct address_space	*mapping,
+	struct list_head	*pages,
+	unsigned		nr_pages)
+{
+	return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
+}
+
+STATIC void
+xfs_count_page_state(
+	struct page		*page,
+	int			*delalloc,
+	int			*unmapped,
+	int			*unwritten)
+{
+	struct buffer_head	*bh, *head;
+
+	*delalloc = *unmapped = *unwritten = 0;
+
+	bh = head = page_buffers(page);
+	do {
+		if (buffer_uptodate(bh) && !buffer_mapped(bh))
+			(*unmapped) = 1;
+		else if (buffer_unwritten(bh) && !buffer_delay(bh))
+			clear_buffer_unwritten(bh);
+		else if (buffer_unwritten(bh))
+			(*unwritten) = 1;
+		else if (buffer_delay(bh))
+			(*delalloc) = 1;
+	} while ((bh = bh->b_this_page) != head);
+}
+
+
+/*
+ * writepage: Called from one of two places:
+ *
+ * 1. we are flushing a delalloc buffer head.
+ *
+ * 2. we are writing out a dirty page. Typically the page dirty
+ *    state is cleared before we get here. In this case is it
+ *    conceivable we have no buffer heads.
+ *
+ * For delalloc space on the page we need to allocate space and
+ * flush it. For unmapped buffer heads on the page we should
+ * allocate space if the page is uptodate. For any other dirty
+ * buffer heads on the page we should flush them.
+ *
+ * If we detect that a transaction would be required to flush
+ * the page, we have to check the process flags first, if we
+ * are already in a transaction or disk I/O during allocations
+ * is off, we need to fail the writepage and redirty the page.
+ */
+
+STATIC int
+linvfs_writepage(
+	struct page		*page,
+	struct writeback_control *wbc)
+{
+	int			error;
+	int			need_trans;
+	int			delalloc, unmapped, unwritten;
+	struct inode		*inode = page->mapping->host;
+
+	xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
+
+	/*
+	 * We need a transaction if:
+	 *  1. There are delalloc buffers on the page
+	 *  2. The page is uptodate and we have unmapped buffers
+	 *  3. The page is uptodate and we have no buffers
+	 *  4. There are unwritten buffers on the page
+	 */
+
+	if (!page_has_buffers(page)) {
+		unmapped = 1;
+		need_trans = 1;
+	} else {
+		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+		if (!PageUptodate(page))
+			unmapped = 0;
+		need_trans = delalloc + unmapped + unwritten;
+	}
+
+	/*
+	 * If we need a transaction and the process flags say
+	 * we are already in a transaction, or no IO is allowed
+	 * then mark the page dirty again and leave the page
+	 * as is.
+	 */
+	if (PFLAGS_TEST_FSTRANS() && need_trans)
+		goto out_fail;
+
+	/*
+	 * Delay hooking up buffer heads until we have
+	 * made our go/no-go decision.
+	 */
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+
+	/*
+	 * Convert delayed allocate, unwritten or unmapped space
+	 * to real space and flush out to disk.
+	 */
+	error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
+	if (error == -EAGAIN)
+		goto out_fail;
+	if (unlikely(error < 0))
+		goto out_unlock;
+
+	return 0;
+
+out_fail:
+	redirty_page_for_writepage(wbc, page);
+	unlock_page(page);
+	return 0;
+out_unlock:
+	unlock_page(page);
+	return error;
+}
+
+/*
+ * Called to move a page into cleanable state - and from there
+ * to be released. Possibly the page is already clean. We always
+ * have buffer heads in this call.
+ *
+ * Returns 0 if the page is ok to release, 1 otherwise.
+ *
+ * Possible scenarios are:
+ *
+ * 1. We are being called to release a page which has been written
+ *    to via regular I/O. buffer heads will be dirty and possibly
+ *    delalloc. If no delalloc buffer heads in this case then we
+ *    can just return zero.
+ *
+ * 2. We are called to release a page which has been written via
+ *    mmap, all we need to do is ensure there is no delalloc
+ *    state in the buffer heads, if not we can let the caller
+ *    free them and we should come back later via writepage.
+ */
+STATIC int
+linvfs_release_page(
+	struct page		*page,
+	int			gfp_mask)
+{
+	struct inode		*inode = page->mapping->host;
+	int			dirty, delalloc, unmapped, unwritten;
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_ALL,
+		.nr_to_write = 1,
+	};
+
+	xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
+
+	xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+	if (!delalloc && !unwritten)
+		goto free_buffers;
+
+	if (!(gfp_mask & __GFP_FS))
+		return 0;
+
+	/* If we are already inside a transaction or the thread cannot
+	 * do I/O, we cannot release this page.
+	 */
+	if (PFLAGS_TEST_FSTRANS())
+		return 0;
+
+	/*
+	 * Convert delalloc space to real space, do not flush the
+	 * data out to disk, that will be done by the caller.
+	 * Never need to allocate space here - we will always
+	 * come back to writepage in that case.
+	 */
+	dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
+	if (dirty == 0 && !unwritten)
+		goto free_buffers;
+	return 0;
+
+free_buffers:
+	return try_to_free_buffers(page);
+}
+
+STATIC int
+linvfs_prepare_write(
+	struct file		*file,
+	struct page		*page,
+	unsigned int		from,
+	unsigned int		to)
+{
+	return block_prepare_write(page, from, to, linvfs_get_block);
+}
+
+struct address_space_operations linvfs_aops = {
+	.readpage		= linvfs_readpage,
+	.readpages		= linvfs_readpages,
+	.writepage		= linvfs_writepage,
+	.sync_page		= block_sync_page,
+	.releasepage		= linvfs_release_page,
+	.prepare_write		= linvfs_prepare_write,
+	.commit_write		= generic_commit_write,
+	.bmap			= linvfs_bmap,
+	.direct_IO		= linvfs_direct_IO,
+};
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
new file mode 100644
index 0000000..23e0eb6
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -0,0 +1,1980 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ *	The xfs_buf.c code provides an abstract buffer cache model on top
+ *	of the Linux page cache.  Cached metadata blocks for a file system
+ *	are hashed to the inode for the block device.  xfs_buf.c assembles
+ *	buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
+ *
+ *      Written by Steve Lord, Jim Mostek, Russell Cattelan
+ *		    and Rajagopal Ananthanarayanan ("ananth") at SGI.
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/bio.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/percpu.h>
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+
+#include "xfs_linux.h"
+
+/*
+ * File wide globals
+ */
+
+STATIC kmem_cache_t *pagebuf_cache;
+STATIC kmem_shaker_t pagebuf_shake;
+STATIC int pagebuf_daemon_wakeup(int, unsigned int);
+STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
+STATIC struct workqueue_struct *pagebuf_logio_workqueue;
+STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
+
+/*
+ * Pagebuf debugging
+ */
+
+#ifdef PAGEBUF_TRACE
+void
+pagebuf_trace(
+	xfs_buf_t	*pb,
+	char		*id,
+	void		*data,
+	void		*ra)
+{
+	ktrace_enter(pagebuf_trace_buf,
+		pb, id,
+		(void *)(unsigned long)pb->pb_flags,
+		(void *)(unsigned long)pb->pb_hold.counter,
+		(void *)(unsigned long)pb->pb_sema.count.counter,
+		(void *)current,
+		data, ra,
+		(void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
+		(void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
+		(void *)(unsigned long)pb->pb_buffer_length,
+		NULL, NULL, NULL, NULL, NULL);
+}
+ktrace_t *pagebuf_trace_buf;
+#define PAGEBUF_TRACE_SIZE	4096
+#define PB_TRACE(pb, id, data)	\
+	pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
+#else
+#define PB_TRACE(pb, id, data)	do { } while (0)
+#endif
+
+#ifdef PAGEBUF_LOCK_TRACKING
+# define PB_SET_OWNER(pb)	((pb)->pb_last_holder = current->pid)
+# define PB_CLEAR_OWNER(pb)	((pb)->pb_last_holder = -1)
+# define PB_GET_OWNER(pb)	((pb)->pb_last_holder)
+#else
+# define PB_SET_OWNER(pb)	do { } while (0)
+# define PB_CLEAR_OWNER(pb)	do { } while (0)
+# define PB_GET_OWNER(pb)	do { } while (0)
+#endif
+
+/*
+ * Pagebuf allocation / freeing.
+ */
+
+#define pb_to_gfp(flags) \
+	((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
+	  ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
+
+#define pb_to_km(flags) \
+	 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+
+
+#define pagebuf_allocate(flags) \
+	kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))
+#define pagebuf_deallocate(pb) \
+	kmem_zone_free(pagebuf_cache, (pb));
+
+/*
+ * Page Region interfaces.
+ *
+ * For pages in filesystems where the blocksize is smaller than the
+ * pagesize, we use the page->private field (long) to hold a bitmap
+ * of uptodate regions within the page.
+ *
+ * Each such region is "bytes per page / bits per long" bytes long.
+ *
+ * NBPPR == number-of-bytes-per-page-region
+ * BTOPR == bytes-to-page-region (rounded up)
+ * BTOPRT == bytes-to-page-region-truncated (rounded down)
+ */
+#if (BITS_PER_LONG == 32)
+#define PRSHIFT		(PAGE_CACHE_SHIFT - 5)	/* (32 == 1<<5) */
+#elif (BITS_PER_LONG == 64)
+#define PRSHIFT		(PAGE_CACHE_SHIFT - 6)	/* (64 == 1<<6) */
+#else
+#error BITS_PER_LONG must be 32 or 64
+#endif
+#define NBPPR		(PAGE_CACHE_SIZE/BITS_PER_LONG)
+#define BTOPR(b)	(((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
+#define BTOPRT(b)	(((unsigned int)(b) >> PRSHIFT))
+
+STATIC unsigned long
+page_region_mask(
+	size_t		offset,
+	size_t		length)
+{
+	unsigned long	mask;
+	int		first, final;
+
+	first = BTOPR(offset);
+	final = BTOPRT(offset + length - 1);
+	first = min(first, final);
+
+	mask = ~0UL;
+	mask <<= BITS_PER_LONG - (final - first);
+	mask >>= BITS_PER_LONG - (final);
+
+	ASSERT(offset + length <= PAGE_CACHE_SIZE);
+	ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
+
+	return mask;
+}
+
+STATIC inline void
+set_page_region(
+	struct page	*page,
+	size_t		offset,
+	size_t		length)
+{
+	page->private |= page_region_mask(offset, length);
+	if (page->private == ~0UL)
+		SetPageUptodate(page);
+}
+
+STATIC inline int
+test_page_region(
+	struct page	*page,
+	size_t		offset,
+	size_t		length)
+{
+	unsigned long	mask = page_region_mask(offset, length);
+
+	return (mask && (page->private & mask) == mask);
+}
+
+/*
+ * Mapping of multi-page buffers into contiguous virtual space
+ */
+
+typedef struct a_list {
+	void		*vm_addr;
+	struct a_list	*next;
+} a_list_t;
+
+STATIC a_list_t		*as_free_head;
+STATIC int		as_list_len;
+STATIC DEFINE_SPINLOCK(as_lock);
+
+/*
+ * Try to batch vunmaps because they are costly.
+ */
+STATIC void
+free_address(
+	void		*addr)
+{
+	a_list_t	*aentry;
+
+	aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
+	if (likely(aentry)) {
+		spin_lock(&as_lock);
+		aentry->next = as_free_head;
+		aentry->vm_addr = addr;
+		as_free_head = aentry;
+		as_list_len++;
+		spin_unlock(&as_lock);
+	} else {
+		vunmap(addr);
+	}
+}
+
+STATIC void
+purge_addresses(void)
+{
+	a_list_t	*aentry, *old;
+
+	if (as_free_head == NULL)
+		return;
+
+	spin_lock(&as_lock);
+	aentry = as_free_head;
+	as_free_head = NULL;
+	as_list_len = 0;
+	spin_unlock(&as_lock);
+
+	while ((old = aentry) != NULL) {
+		vunmap(aentry->vm_addr);
+		aentry = aentry->next;
+		kfree(old);
+	}
+}
+
+/*
+ *	Internal pagebuf object manipulation
+ */
+
+STATIC void
+_pagebuf_initialize(
+	xfs_buf_t		*pb,
+	xfs_buftarg_t		*target,
+	loff_t			range_base,
+	size_t			range_length,
+	page_buf_flags_t	flags)
+{
+	/*
+	 * We don't want certain flags to appear in pb->pb_flags.
+	 */
+	flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
+
+	memset(pb, 0, sizeof(xfs_buf_t));
+	atomic_set(&pb->pb_hold, 1);
+	init_MUTEX_LOCKED(&pb->pb_iodonesema);
+	INIT_LIST_HEAD(&pb->pb_list);
+	INIT_LIST_HEAD(&pb->pb_hash_list);
+	init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
+	PB_SET_OWNER(pb);
+	pb->pb_target = target;
+	pb->pb_file_offset = range_base;
+	/*
+	 * Set buffer_length and count_desired to the same value initially.
+	 * I/O routines should use count_desired, which will be the same in
+	 * most cases but may be reset (e.g. XFS recovery).
+	 */
+	pb->pb_buffer_length = pb->pb_count_desired = range_length;
+	pb->pb_flags = flags | PBF_NONE;
+	pb->pb_bn = XFS_BUF_DADDR_NULL;
+	atomic_set(&pb->pb_pin_count, 0);
+	init_waitqueue_head(&pb->pb_waiters);
+
+	XFS_STATS_INC(pb_create);
+	PB_TRACE(pb, "initialize", target);
+}
+
+/*
+ * Allocate a page array capable of holding a specified number
+ * of pages, and point the page buf at it.
+ */
+STATIC int
+_pagebuf_get_pages(
+	xfs_buf_t		*pb,
+	int			page_count,
+	page_buf_flags_t	flags)
+{
+	/* Make sure that we have a page list */
+	if (pb->pb_pages == NULL) {
+		pb->pb_offset = page_buf_poff(pb->pb_file_offset);
+		pb->pb_page_count = page_count;
+		if (page_count <= PB_PAGES) {
+			pb->pb_pages = pb->pb_page_array;
+		} else {
+			pb->pb_pages = kmem_alloc(sizeof(struct page *) *
+					page_count, pb_to_km(flags));
+			if (pb->pb_pages == NULL)
+				return -ENOMEM;
+		}
+		memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
+	}
+	return 0;
+}
+
+/*
+ *	Frees pb_pages if it was malloced.
+ */
+STATIC void
+_pagebuf_free_pages(
+	xfs_buf_t	*bp)
+{
+	if (bp->pb_pages != bp->pb_page_array) {
+		kmem_free(bp->pb_pages,
+			  bp->pb_page_count * sizeof(struct page *));
+	}
+}
+
+/*
+ *	Releases the specified buffer.
+ *
+ * 	The modification state of any associated pages is left unchanged.
+ * 	The buffer most not be on any hash - use pagebuf_rele instead for
+ * 	hashed and refcounted buffers
+ */
+void
+pagebuf_free(
+	xfs_buf_t		*bp)
+{
+	PB_TRACE(bp, "free", 0);
+
+	ASSERT(list_empty(&bp->pb_hash_list));
+
+	if (bp->pb_flags & _PBF_PAGE_CACHE) {
+		uint		i;
+
+		if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
+			free_address(bp->pb_addr - bp->pb_offset);
+
+		for (i = 0; i < bp->pb_page_count; i++)
+			page_cache_release(bp->pb_pages[i]);
+		_pagebuf_free_pages(bp);
+	} else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
+		 /*
+		  * XXX(hch): bp->pb_count_desired might be incorrect (see
+		  * pagebuf_associate_memory for details), but fortunately
+		  * the Linux version of kmem_free ignores the len argument..
+		  */
+		kmem_free(bp->pb_addr, bp->pb_count_desired);
+		_pagebuf_free_pages(bp);
+	}
+
+	pagebuf_deallocate(bp);
+}
+
+/*
+ *	Finds all pages for buffer in question and builds it's page list.
+ */
+STATIC int
+_pagebuf_lookup_pages(
+	xfs_buf_t		*bp,
+	uint			flags)
+{
+	struct address_space	*mapping = bp->pb_target->pbr_mapping;
+	size_t			blocksize = bp->pb_target->pbr_bsize;
+	size_t			size = bp->pb_count_desired;
+	size_t			nbytes, offset;
+	int			gfp_mask = pb_to_gfp(flags);
+	unsigned short		page_count, i;
+	pgoff_t			first;
+	loff_t			end;
+	int			error;
+
+	end = bp->pb_file_offset + bp->pb_buffer_length;
+	page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
+
+	error = _pagebuf_get_pages(bp, page_count, flags);
+	if (unlikely(error))
+		return error;
+	bp->pb_flags |= _PBF_PAGE_CACHE;
+
+	offset = bp->pb_offset;
+	first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
+
+	for (i = 0; i < bp->pb_page_count; i++) {
+		struct page	*page;
+		uint		retries = 0;
+
+	      retry:
+		page = find_or_create_page(mapping, first + i, gfp_mask);
+		if (unlikely(page == NULL)) {
+			if (flags & PBF_READ_AHEAD) {
+				bp->pb_page_count = i;
+				for (i = 0; i < bp->pb_page_count; i++)
+					unlock_page(bp->pb_pages[i]);
+				return -ENOMEM;
+			}
+
+			/*
+			 * This could deadlock.
+			 *
+			 * But until all the XFS lowlevel code is revamped to
+			 * handle buffer allocation failures we can't do much.
+			 */
+			if (!(++retries % 100))
+				printk(KERN_ERR
+					"XFS: possible memory allocation "
+					"deadlock in %s (mode:0x%x)\n",
+					__FUNCTION__, gfp_mask);
+
+			XFS_STATS_INC(pb_page_retries);
+			pagebuf_daemon_wakeup(0, gfp_mask);
+			blk_congestion_wait(WRITE, HZ/50);
+			goto retry;
+		}
+
+		XFS_STATS_INC(pb_page_found);
+
+		nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
+		size -= nbytes;
+
+		if (!PageUptodate(page)) {
+			page_count--;
+			if (blocksize >= PAGE_CACHE_SIZE) {
+				if (flags & PBF_READ)
+					bp->pb_locked = 1;
+			} else if (!PagePrivate(page)) {
+				if (test_page_region(page, offset, nbytes))
+					page_count++;
+			}
+		}
+
+		bp->pb_pages[i] = page;
+		offset = 0;
+	}
+
+	if (!bp->pb_locked) {
+		for (i = 0; i < bp->pb_page_count; i++)
+			unlock_page(bp->pb_pages[i]);
+	}
+
+	if (page_count) {
+		/* if we have any uptodate pages, mark that in the buffer */
+		bp->pb_flags &= ~PBF_NONE;
+
+		/* if some pages aren't uptodate, mark that in the buffer */
+		if (page_count != bp->pb_page_count)
+			bp->pb_flags |= PBF_PARTIAL;
+	}
+
+	PB_TRACE(bp, "lookup_pages", (long)page_count);
+	return error;
+}
+
+/*
+ *	Map buffer into kernel address-space if nessecary.
+ */
+STATIC int
+_pagebuf_map_pages(
+	xfs_buf_t		*bp,
+	uint			flags)
+{
+	/* A single page buffer is always mappable */
+	if (bp->pb_page_count == 1) {
+		bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
+		bp->pb_flags |= PBF_MAPPED;
+	} else if (flags & PBF_MAPPED) {
+		if (as_list_len > 64)
+			purge_addresses();
+		bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
+				VM_MAP, PAGE_KERNEL);
+		if (unlikely(bp->pb_addr == NULL))
+			return -ENOMEM;
+		bp->pb_addr += bp->pb_offset;
+		bp->pb_flags |= PBF_MAPPED;
+	}
+
+	return 0;
+}
+
+/*
+ *	Finding and Reading Buffers
+ */
+
+/*
+ *	_pagebuf_find
+ *
+ *	Looks up, and creates if absent, a lockable buffer for
+ *	a given range of an inode.  The buffer is returned
+ *	locked.	 If other overlapping buffers exist, they are
+ *	released before the new buffer is created and locked,
+ *	which may imply that this call will block until those buffers
+ *	are unlocked.  No I/O is implied by this call.
+ */
+xfs_buf_t *
+_pagebuf_find(
+	xfs_buftarg_t		*btp,	/* block device target		*/
+	loff_t			ioff,	/* starting offset of range	*/
+	size_t			isize,	/* length of range		*/
+	page_buf_flags_t	flags,	/* PBF_TRYLOCK			*/
+	xfs_buf_t		*new_pb)/* newly allocated buffer	*/
+{
+	loff_t			range_base;
+	size_t			range_length;
+	xfs_bufhash_t		*hash;
+	xfs_buf_t		*pb, *n;
+
+	range_base = (ioff << BBSHIFT);
+	range_length = (isize << BBSHIFT);
+
+	/* Check for IOs smaller than the sector size / not sector aligned */
+	ASSERT(!(range_length < (1 << btp->pbr_sshift)));
+	ASSERT(!(range_base & (loff_t)btp->pbr_smask));
+
+	hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
+
+	spin_lock(&hash->bh_lock);
+
+	list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
+		ASSERT(btp == pb->pb_target);
+		if (pb->pb_file_offset == range_base &&
+		    pb->pb_buffer_length == range_length) {
+			/*
+			 * If we look at something bring it to the
+			 * front of the list for next time.
+			 */
+			atomic_inc(&pb->pb_hold);
+			list_move(&pb->pb_hash_list, &hash->bh_list);
+			goto found;
+		}
+	}
+
+	/* No match found */
+	if (new_pb) {
+		_pagebuf_initialize(new_pb, btp, range_base,
+				range_length, flags);
+		new_pb->pb_hash = hash;
+		list_add(&new_pb->pb_hash_list, &hash->bh_list);
+	} else {
+		XFS_STATS_INC(pb_miss_locked);
+	}
+
+	spin_unlock(&hash->bh_lock);
+	return new_pb;
+
+found:
+	spin_unlock(&hash->bh_lock);
+
+	/* Attempt to get the semaphore without sleeping,
+	 * if this does not work then we need to drop the
+	 * spinlock and do a hard attempt on the semaphore.
+	 */
+	if (down_trylock(&pb->pb_sema)) {
+		if (!(flags & PBF_TRYLOCK)) {
+			/* wait for buffer ownership */
+			PB_TRACE(pb, "get_lock", 0);
+			pagebuf_lock(pb);
+			XFS_STATS_INC(pb_get_locked_waited);
+		} else {
+			/* We asked for a trylock and failed, no need
+			 * to look at file offset and length here, we
+			 * know that this pagebuf at least overlaps our
+			 * pagebuf and is locked, therefore our buffer
+			 * either does not exist, or is this buffer
+			 */
+
+			pagebuf_rele(pb);
+			XFS_STATS_INC(pb_busy_locked);
+			return (NULL);
+		}
+	} else {
+		/* trylock worked */
+		PB_SET_OWNER(pb);
+	}
+
+	if (pb->pb_flags & PBF_STALE)
+		pb->pb_flags &= PBF_MAPPED;
+	PB_TRACE(pb, "got_lock", 0);
+	XFS_STATS_INC(pb_get_locked);
+	return (pb);
+}
+
+/*
+ *	xfs_buf_get_flags assembles a buffer covering the specified range.
+ *
+ *	Storage in memory for all portions of the buffer will be allocated,
+ *	although backing storage may not be.
+ */
+xfs_buf_t *
+xfs_buf_get_flags(			/* allocate a buffer		*/
+	xfs_buftarg_t		*target,/* target for buffer		*/
+	loff_t			ioff,	/* starting offset of range	*/
+	size_t			isize,	/* length of range		*/
+	page_buf_flags_t	flags)	/* PBF_TRYLOCK			*/
+{
+	xfs_buf_t		*pb, *new_pb;
+	int			error = 0, i;
+
+	new_pb = pagebuf_allocate(flags);
+	if (unlikely(!new_pb))
+		return NULL;
+
+	pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
+	if (pb == new_pb) {
+		error = _pagebuf_lookup_pages(pb, flags);
+		if (error)
+			goto no_buffer;
+	} else {
+		pagebuf_deallocate(new_pb);
+		if (unlikely(pb == NULL))
+			return NULL;
+	}
+
+	for (i = 0; i < pb->pb_page_count; i++)
+		mark_page_accessed(pb->pb_pages[i]);
+
+	if (!(pb->pb_flags & PBF_MAPPED)) {
+		error = _pagebuf_map_pages(pb, flags);
+		if (unlikely(error)) {
+			printk(KERN_WARNING "%s: failed to map pages\n",
+					__FUNCTION__);
+			goto no_buffer;
+		}
+	}
+
+	XFS_STATS_INC(pb_get);
+
+	/*
+	 * Always fill in the block number now, the mapped cases can do
+	 * their own overlay of this later.
+	 */
+	pb->pb_bn = ioff;
+	pb->pb_count_desired = pb->pb_buffer_length;
+
+	PB_TRACE(pb, "get", (unsigned long)flags);
+	return pb;
+
+ no_buffer:
+	if (flags & (PBF_LOCK | PBF_TRYLOCK))
+		pagebuf_unlock(pb);
+	pagebuf_rele(pb);
+	return NULL;
+}
+
+xfs_buf_t *
+xfs_buf_read_flags(
+	xfs_buftarg_t		*target,
+	loff_t			ioff,
+	size_t			isize,
+	page_buf_flags_t	flags)
+{
+	xfs_buf_t		*pb;
+
+	flags |= PBF_READ;
+
+	pb = xfs_buf_get_flags(target, ioff, isize, flags);
+	if (pb) {
+		if (PBF_NOT_DONE(pb)) {
+			PB_TRACE(pb, "read", (unsigned long)flags);
+			XFS_STATS_INC(pb_get_read);
+			pagebuf_iostart(pb, flags);
+		} else if (flags & PBF_ASYNC) {
+			PB_TRACE(pb, "read_async", (unsigned long)flags);
+			/*
+			 * Read ahead call which is already satisfied,
+			 * drop the buffer
+			 */
+			goto no_buffer;
+		} else {
+			PB_TRACE(pb, "read_done", (unsigned long)flags);
+			/* We do not want read in the flags */
+			pb->pb_flags &= ~PBF_READ;
+		}
+	}
+
+	return pb;
+
+ no_buffer:
+	if (flags & (PBF_LOCK | PBF_TRYLOCK))
+		pagebuf_unlock(pb);
+	pagebuf_rele(pb);
+	return NULL;
+}
+
+/*
+ * Create a skeletal pagebuf (no pages associated with it).
+ */
+xfs_buf_t *
+pagebuf_lookup(
+	xfs_buftarg_t		*target,
+	loff_t			ioff,
+	size_t			isize,
+	page_buf_flags_t	flags)
+{
+	xfs_buf_t		*pb;
+
+	pb = pagebuf_allocate(flags);
+	if (pb) {
+		_pagebuf_initialize(pb, target, ioff, isize, flags);
+	}
+	return pb;
+}
+
+/*
+ * If we are not low on memory then do the readahead in a deadlock
+ * safe manner.
+ */
+void
+pagebuf_readahead(
+	xfs_buftarg_t		*target,
+	loff_t			ioff,
+	size_t			isize,
+	page_buf_flags_t	flags)
+{
+	struct backing_dev_info *bdi;
+
+	bdi = target->pbr_mapping->backing_dev_info;
+	if (bdi_read_congested(bdi))
+		return;
+
+	flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
+	xfs_buf_read_flags(target, ioff, isize, flags);
+}
+
+xfs_buf_t *
+pagebuf_get_empty(
+	size_t			len,
+	xfs_buftarg_t		*target)
+{
+	xfs_buf_t		*pb;
+
+	pb = pagebuf_allocate(0);
+	if (pb)
+		_pagebuf_initialize(pb, target, 0, len, 0);
+	return pb;
+}
+
+static inline struct page *
+mem_to_page(
+	void			*addr)
+{
+	if (((unsigned long)addr < VMALLOC_START) ||
+	    ((unsigned long)addr >= VMALLOC_END)) {
+		return virt_to_page(addr);
+	} else {
+		return vmalloc_to_page(addr);
+	}
+}
+
+int
+pagebuf_associate_memory(
+	xfs_buf_t		*pb,
+	void			*mem,
+	size_t			len)
+{
+	int			rval;
+	int			i = 0;
+	size_t			ptr;
+	size_t			end, end_cur;
+	off_t			offset;
+	int			page_count;
+
+	page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+	offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
+	if (offset && (len > PAGE_CACHE_SIZE))
+		page_count++;
+
+	/* Free any previous set of page pointers */
+	if (pb->pb_pages)
+		_pagebuf_free_pages(pb);
+
+	pb->pb_pages = NULL;
+	pb->pb_addr = mem;
+
+	rval = _pagebuf_get_pages(pb, page_count, 0);
+	if (rval)
+		return rval;
+
+	pb->pb_offset = offset;
+	ptr = (size_t) mem & PAGE_CACHE_MASK;
+	end = PAGE_CACHE_ALIGN((size_t) mem + len);
+	end_cur = end;
+	/* set up first page */
+	pb->pb_pages[0] = mem_to_page(mem);
+
+	ptr += PAGE_CACHE_SIZE;
+	pb->pb_page_count = ++i;
+	while (ptr < end) {
+		pb->pb_pages[i] = mem_to_page((void *)ptr);
+		pb->pb_page_count = ++i;
+		ptr += PAGE_CACHE_SIZE;
+	}
+	pb->pb_locked = 0;
+
+	pb->pb_count_desired = pb->pb_buffer_length = len;
+	pb->pb_flags |= PBF_MAPPED;
+
+	return 0;
+}
+
+xfs_buf_t *
+pagebuf_get_no_daddr(
+	size_t			len,
+	xfs_buftarg_t		*target)
+{
+	size_t			malloc_len = len;
+	xfs_buf_t		*bp;
+	void			*data;
+	int			error;
+
+	bp = pagebuf_allocate(0);
+	if (unlikely(bp == NULL))
+		goto fail;
+	_pagebuf_initialize(bp, target, 0, len, PBF_FORCEIO);
+
+ try_again:
+	data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
+	if (unlikely(data == NULL))
+		goto fail_free_buf;
+
+	/* check whether alignment matches.. */
+	if ((__psunsigned_t)data !=
+	    ((__psunsigned_t)data & ~target->pbr_smask)) {
+		/* .. else double the size and try again */
+		kmem_free(data, malloc_len);
+		malloc_len <<= 1;
+		goto try_again;
+	}
+
+	error = pagebuf_associate_memory(bp, data, len);
+	if (error)
+		goto fail_free_mem;
+	bp->pb_flags |= _PBF_KMEM_ALLOC;
+
+	pagebuf_unlock(bp);
+
+	PB_TRACE(bp, "no_daddr", data);
+	return bp;
+ fail_free_mem:
+	kmem_free(data, malloc_len);
+ fail_free_buf:
+	pagebuf_free(bp);
+ fail:
+	return NULL;
+}
+
+/*
+ *	pagebuf_hold
+ *
+ *	Increment reference count on buffer, to hold the buffer concurrently
+ *	with another thread which may release (free) the buffer asynchronously.
+ *
+ *	Must hold the buffer already to call this function.
+ */
+void
+pagebuf_hold(
+	xfs_buf_t		*pb)
+{
+	atomic_inc(&pb->pb_hold);
+	PB_TRACE(pb, "hold", 0);
+}
+
+/*
+ *	pagebuf_rele
+ *
+ *	pagebuf_rele releases a hold on the specified buffer.  If the
+ *	the hold count is 1, pagebuf_rele calls pagebuf_free.
+ */
+void
+pagebuf_rele(
+	xfs_buf_t		*pb)
+{
+	xfs_bufhash_t		*hash = pb->pb_hash;
+
+	PB_TRACE(pb, "rele", pb->pb_relse);
+
+	/*
+	 * pagebuf_lookup buffers are not hashed, not delayed write,
+	 * and don't have their own release routines.  Special case.
+	 */
+	if (unlikely(!hash)) {
+		ASSERT(!pb->pb_relse);
+		if (atomic_dec_and_test(&pb->pb_hold))
+			xfs_buf_free(pb);
+		return;
+	}
+
+	if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
+		int		do_free = 1;
+
+		if (pb->pb_relse) {
+			atomic_inc(&pb->pb_hold);
+			spin_unlock(&hash->bh_lock);
+			(*(pb->pb_relse)) (pb);
+			spin_lock(&hash->bh_lock);
+			do_free = 0;
+		}
+
+		if (pb->pb_flags & PBF_DELWRI) {
+			pb->pb_flags |= PBF_ASYNC;
+			atomic_inc(&pb->pb_hold);
+			pagebuf_delwri_queue(pb, 0);
+			do_free = 0;
+		} else if (pb->pb_flags & PBF_FS_MANAGED) {
+			do_free = 0;
+		}
+
+		if (do_free) {
+			list_del_init(&pb->pb_hash_list);
+			spin_unlock(&hash->bh_lock);
+			pagebuf_free(pb);
+		} else {
+			spin_unlock(&hash->bh_lock);
+		}
+	}
+}
+
+
+/*
+ *	Mutual exclusion on buffers.  Locking model:
+ *
+ *	Buffers associated with inodes for which buffer locking
+ *	is not enabled are not protected by semaphores, and are
+ *	assumed to be exclusively owned by the caller.  There is a
+ *	spinlock in the buffer, used by the caller when concurrent
+ *	access is possible.
+ */
+
+/*
+ *	pagebuf_cond_lock
+ *
+ *	pagebuf_cond_lock locks a buffer object, if it is not already locked.
+ *	Note that this in no way
+ *	locks the underlying pages, so it is only useful for synchronizing
+ *	concurrent use of page buffer objects, not for synchronizing independent
+ *	access to the underlying pages.
+ */
+int
+pagebuf_cond_lock(			/* lock buffer, if not locked	*/
+					/* returns -EBUSY if locked)	*/
+	xfs_buf_t		*pb)
+{
+	int			locked;
+
+	locked = down_trylock(&pb->pb_sema) == 0;
+	if (locked) {
+		PB_SET_OWNER(pb);
+	}
+	PB_TRACE(pb, "cond_lock", (long)locked);
+	return(locked ? 0 : -EBUSY);
+}
+
+#if defined(DEBUG) || defined(XFS_BLI_TRACE)
+/*
+ *	pagebuf_lock_value
+ *
+ *	Return lock value for a pagebuf
+ */
+int
+pagebuf_lock_value(
+	xfs_buf_t		*pb)
+{
+	return(atomic_read(&pb->pb_sema.count));
+}
+#endif
+
+/*
+ *	pagebuf_lock
+ *
+ *	pagebuf_lock locks a buffer object.  Note that this in no way
+ *	locks the underlying pages, so it is only useful for synchronizing
+ *	concurrent use of page buffer objects, not for synchronizing independent
+ *	access to the underlying pages.
+ */
+int
+pagebuf_lock(
+	xfs_buf_t		*pb)
+{
+	PB_TRACE(pb, "lock", 0);
+	if (atomic_read(&pb->pb_io_remaining))
+		blk_run_address_space(pb->pb_target->pbr_mapping);
+	down(&pb->pb_sema);
+	PB_SET_OWNER(pb);
+	PB_TRACE(pb, "locked", 0);
+	return 0;
+}
+
+/*
+ *	pagebuf_unlock
+ *
+ *	pagebuf_unlock releases the lock on the buffer object created by
+ *	pagebuf_lock or pagebuf_cond_lock (not any
+ *	pinning of underlying pages created by pagebuf_pin).
+ */
+void
+pagebuf_unlock(				/* unlock buffer		*/
+	xfs_buf_t		*pb)	/* buffer to unlock		*/
+{
+	PB_CLEAR_OWNER(pb);
+	up(&pb->pb_sema);
+	PB_TRACE(pb, "unlock", 0);
+}
+
+
+/*
+ *	Pinning Buffer Storage in Memory
+ */
+
+/*
+ *	pagebuf_pin
+ *
+ *	pagebuf_pin locks all of the memory represented by a buffer in
+ *	memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
+ *	the same or different buffers affecting a given page, will
+ *	properly count the number of outstanding "pin" requests.  The
+ *	buffer may be released after the pagebuf_pin and a different
+ *	buffer used when calling pagebuf_unpin, if desired.
+ *	pagebuf_pin should be used by the file system when it wants be
+ *	assured that no attempt will be made to force the affected
+ *	memory to disk.	 It does not assure that a given logical page
+ *	will not be moved to a different physical page.
+ */
+void
+pagebuf_pin(
+	xfs_buf_t		*pb)
+{
+	atomic_inc(&pb->pb_pin_count);
+	PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
+}
+
+/*
+ *	pagebuf_unpin
+ *
+ *	pagebuf_unpin reverses the locking of memory performed by
+ *	pagebuf_pin.  Note that both functions affected the logical
+ *	pages associated with the buffer, not the buffer itself.
+ */
+void
+pagebuf_unpin(
+	xfs_buf_t		*pb)
+{
+	if (atomic_dec_and_test(&pb->pb_pin_count)) {
+		wake_up_all(&pb->pb_waiters);
+	}
+	PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
+}
+
+int
+pagebuf_ispin(
+	xfs_buf_t		*pb)
+{
+	return atomic_read(&pb->pb_pin_count);
+}
+
+/*
+ *	pagebuf_wait_unpin
+ *
+ *	pagebuf_wait_unpin waits until all of the memory associated
+ *	with the buffer is not longer locked in memory.  It returns
+ *	immediately if none of the affected pages are locked.
+ */
+static inline void
+_pagebuf_wait_unpin(
+	xfs_buf_t		*pb)
+{
+	DECLARE_WAITQUEUE	(wait, current);
+
+	if (atomic_read(&pb->pb_pin_count) == 0)
+		return;
+
+	add_wait_queue(&pb->pb_waiters, &wait);
+	for (;;) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (atomic_read(&pb->pb_pin_count) == 0)
+			break;
+		if (atomic_read(&pb->pb_io_remaining))
+			blk_run_address_space(pb->pb_target->pbr_mapping);
+		schedule();
+	}
+	remove_wait_queue(&pb->pb_waiters, &wait);
+	set_current_state(TASK_RUNNING);
+}
+
+/*
+ *	Buffer Utility Routines
+ */
+
+/*
+ *	pagebuf_iodone
+ *
+ *	pagebuf_iodone marks a buffer for which I/O is in progress
+ *	done with respect to that I/O.	The pb_iodone routine, if
+ *	present, will be called as a side-effect.
+ */
+STATIC void
+pagebuf_iodone_work(
+	void			*v)
+{
+	xfs_buf_t		*bp = (xfs_buf_t *)v;
+
+	if (bp->pb_iodone)
+		(*(bp->pb_iodone))(bp);
+	else if (bp->pb_flags & PBF_ASYNC)
+		xfs_buf_relse(bp);
+}
+
+void
+pagebuf_iodone(
+	xfs_buf_t		*pb,
+	int			dataio,
+	int			schedule)
+{
+	pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
+	if (pb->pb_error == 0) {
+		pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
+	}
+
+	PB_TRACE(pb, "iodone", pb->pb_iodone);
+
+	if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
+		if (schedule) {
+			INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
+			queue_work(dataio ? pagebuf_dataio_workqueue :
+				pagebuf_logio_workqueue, &pb->pb_iodone_work);
+		} else {
+			pagebuf_iodone_work(pb);
+		}
+	} else {
+		up(&pb->pb_iodonesema);
+	}
+}
+
+/*
+ *	pagebuf_ioerror
+ *
+ *	pagebuf_ioerror sets the error code for a buffer.
+ */
+void
+pagebuf_ioerror(			/* mark/clear buffer error flag */
+	xfs_buf_t		*pb,	/* buffer to mark		*/
+	int			error)	/* error to store (0 if none)	*/
+{
+	ASSERT(error >= 0 && error <= 0xffff);
+	pb->pb_error = (unsigned short)error;
+	PB_TRACE(pb, "ioerror", (unsigned long)error);
+}
+
+/*
+ *	pagebuf_iostart
+ *
+ *	pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
+ *	If necessary, it will arrange for any disk space allocation required,
+ *	and it will break up the request if the block mappings require it.
+ *	The pb_iodone routine in the buffer supplied will only be called
+ *	when all of the subsidiary I/O requests, if any, have been completed.
+ *	pagebuf_iostart calls the pagebuf_ioinitiate routine or
+ *	pagebuf_iorequest, if the former routine is not defined, to start
+ *	the I/O on a given low-level request.
+ */
+int
+pagebuf_iostart(			/* start I/O on a buffer	  */
+	xfs_buf_t		*pb,	/* buffer to start		  */
+	page_buf_flags_t	flags)	/* PBF_LOCK, PBF_ASYNC, PBF_READ, */
+					/* PBF_WRITE, PBF_DELWRI,	  */
+					/* PBF_DONT_BLOCK		  */
+{
+	int			status = 0;
+
+	PB_TRACE(pb, "iostart", (unsigned long)flags);
+
+	if (flags & PBF_DELWRI) {
+		pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
+		pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
+		pagebuf_delwri_queue(pb, 1);
+		return status;
+	}
+
+	pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
+			PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+	pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
+			PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+
+	BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
+
+	/* For writes allow an alternate strategy routine to precede
+	 * the actual I/O request (which may not be issued at all in
+	 * a shutdown situation, for example).
+	 */
+	status = (flags & PBF_WRITE) ?
+		pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
+
+	/* Wait for I/O if we are not an async request.
+	 * Note: async I/O request completion will release the buffer,
+	 * and that can already be done by this point.  So using the
+	 * buffer pointer from here on, after async I/O, is invalid.
+	 */
+	if (!status && !(flags & PBF_ASYNC))
+		status = pagebuf_iowait(pb);
+
+	return status;
+}
+
+/*
+ * Helper routine for pagebuf_iorequest
+ */
+
+STATIC __inline__ int
+_pagebuf_iolocked(
+	xfs_buf_t		*pb)
+{
+	ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
+	if (pb->pb_flags & PBF_READ)
+		return pb->pb_locked;
+	return 0;
+}
+
+STATIC __inline__ void
+_pagebuf_iodone(
+	xfs_buf_t		*pb,
+	int			schedule)
+{
+	if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+		pb->pb_locked = 0;
+		pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule);
+	}
+}
+
+STATIC int
+bio_end_io_pagebuf(
+	struct bio		*bio,
+	unsigned int		bytes_done,
+	int			error)
+{
+	xfs_buf_t		*pb = (xfs_buf_t *)bio->bi_private;
+	unsigned int		i, blocksize = pb->pb_target->pbr_bsize;
+	struct bio_vec		*bvec = bio->bi_io_vec;
+
+	if (bio->bi_size)
+		return 1;
+
+	if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+		pb->pb_error = EIO;
+
+	for (i = 0; i < bio->bi_vcnt; i++, bvec++) {
+		struct page	*page = bvec->bv_page;
+
+		if (pb->pb_error) {
+			SetPageError(page);
+		} else if (blocksize == PAGE_CACHE_SIZE) {
+			SetPageUptodate(page);
+		} else if (!PagePrivate(page) &&
+				(pb->pb_flags & _PBF_PAGE_CACHE)) {
+			set_page_region(page, bvec->bv_offset, bvec->bv_len);
+		}
+
+		if (_pagebuf_iolocked(pb)) {
+			unlock_page(page);
+		}
+	}
+
+	_pagebuf_iodone(pb, 1);
+	bio_put(bio);
+	return 0;
+}
+
+STATIC void
+_pagebuf_ioapply(
+	xfs_buf_t		*pb)
+{
+	int			i, rw, map_i, total_nr_pages, nr_pages;
+	struct bio		*bio;
+	int			offset = pb->pb_offset;
+	int			size = pb->pb_count_desired;
+	sector_t		sector = pb->pb_bn;
+	unsigned int		blocksize = pb->pb_target->pbr_bsize;
+	int			locking = _pagebuf_iolocked(pb);
+
+	total_nr_pages = pb->pb_page_count;
+	map_i = 0;
+
+	if (pb->pb_flags & _PBF_RUN_QUEUES) {
+		pb->pb_flags &= ~_PBF_RUN_QUEUES;
+		rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
+	} else {
+		rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
+	}
+
+	/* Special code path for reading a sub page size pagebuf in --
+	 * we populate up the whole page, and hence the other metadata
+	 * in the same page.  This optimization is only valid when the
+	 * filesystem block size and the page size are equal.
+	 */
+	if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
+	    (pb->pb_flags & PBF_READ) && locking &&
+	    (blocksize == PAGE_CACHE_SIZE)) {
+		bio = bio_alloc(GFP_NOIO, 1);
+
+		bio->bi_bdev = pb->pb_target->pbr_bdev;
+		bio->bi_sector = sector - (offset >> BBSHIFT);
+		bio->bi_end_io = bio_end_io_pagebuf;
+		bio->bi_private = pb;
+
+		bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
+		size = 0;
+
+		atomic_inc(&pb->pb_io_remaining);
+
+		goto submit_io;
+	}
+
+	/* Lock down the pages which we need to for the request */
+	if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
+		for (i = 0; size; i++) {
+			int		nbytes = PAGE_CACHE_SIZE - offset;
+			struct page	*page = pb->pb_pages[i];
+
+			if (nbytes > size)
+				nbytes = size;
+
+			lock_page(page);
+
+			size -= nbytes;
+			offset = 0;
+		}
+		offset = pb->pb_offset;
+		size = pb->pb_count_desired;
+	}
+
+next_chunk:
+	atomic_inc(&pb->pb_io_remaining);
+	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
+	if (nr_pages > total_nr_pages)
+		nr_pages = total_nr_pages;
+
+	bio = bio_alloc(GFP_NOIO, nr_pages);
+	bio->bi_bdev = pb->pb_target->pbr_bdev;
+	bio->bi_sector = sector;
+	bio->bi_end_io = bio_end_io_pagebuf;
+	bio->bi_private = pb;
+
+	for (; size && nr_pages; nr_pages--, map_i++) {
+		int	nbytes = PAGE_CACHE_SIZE - offset;
+
+		if (nbytes > size)
+			nbytes = size;
+
+		if (bio_add_page(bio, pb->pb_pages[map_i],
+					nbytes, offset) < nbytes)
+			break;
+
+		offset = 0;
+		sector += nbytes >> BBSHIFT;
+		size -= nbytes;
+		total_nr_pages--;
+	}
+
+submit_io:
+	if (likely(bio->bi_size)) {
+		submit_bio(rw, bio);
+		if (size)
+			goto next_chunk;
+	} else {
+		bio_put(bio);
+		pagebuf_ioerror(pb, EIO);
+	}
+}
+
+/*
+ *	pagebuf_iorequest -- the core I/O request routine.
+ */
+int
+pagebuf_iorequest(			/* start real I/O		*/
+	xfs_buf_t		*pb)	/* buffer to convey to device	*/
+{
+	PB_TRACE(pb, "iorequest", 0);
+
+	if (pb->pb_flags & PBF_DELWRI) {
+		pagebuf_delwri_queue(pb, 1);
+		return 0;
+	}
+
+	if (pb->pb_flags & PBF_WRITE) {
+		_pagebuf_wait_unpin(pb);
+	}
+
+	pagebuf_hold(pb);
+
+	/* Set the count to 1 initially, this will stop an I/O
+	 * completion callout which happens before we have started
+	 * all the I/O from calling pagebuf_iodone too early.
+	 */
+	atomic_set(&pb->pb_io_remaining, 1);
+	_pagebuf_ioapply(pb);
+	_pagebuf_iodone(pb, 0);
+
+	pagebuf_rele(pb);
+	return 0;
+}
+
+/*
+ *	pagebuf_iowait
+ *
+ *	pagebuf_iowait waits for I/O to complete on the buffer supplied.
+ *	It returns immediately if no I/O is pending.  In any case, it returns
+ *	the error code, if any, or 0 if there is no error.
+ */
+int
+pagebuf_iowait(
+	xfs_buf_t		*pb)
+{
+	PB_TRACE(pb, "iowait", 0);
+	if (atomic_read(&pb->pb_io_remaining))
+		blk_run_address_space(pb->pb_target->pbr_mapping);
+	down(&pb->pb_iodonesema);
+	PB_TRACE(pb, "iowaited", (long)pb->pb_error);
+	return pb->pb_error;
+}
+
+caddr_t
+pagebuf_offset(
+	xfs_buf_t		*pb,
+	size_t			offset)
+{
+	struct page		*page;
+
+	offset += pb->pb_offset;
+
+	page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
+	return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
+}
+
+/*
+ *	pagebuf_iomove
+ *
+ *	Move data into or out of a buffer.
+ */
+void
+pagebuf_iomove(
+	xfs_buf_t		*pb,	/* buffer to process		*/
+	size_t			boff,	/* starting buffer offset	*/
+	size_t			bsize,	/* length to copy		*/
+	caddr_t			data,	/* data address			*/
+	page_buf_rw_t		mode)	/* read/write flag		*/
+{
+	size_t			bend, cpoff, csize;
+	struct page		*page;
+
+	bend = boff + bsize;
+	while (boff < bend) {
+		page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
+		cpoff = page_buf_poff(boff + pb->pb_offset);
+		csize = min_t(size_t,
+			      PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
+
+		ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
+
+		switch (mode) {
+		case PBRW_ZERO:
+			memset(page_address(page) + cpoff, 0, csize);
+			break;
+		case PBRW_READ:
+			memcpy(data, page_address(page) + cpoff, csize);
+			break;
+		case PBRW_WRITE:
+			memcpy(page_address(page) + cpoff, data, csize);
+		}
+
+		boff += csize;
+		data += csize;
+	}
+}
+
+/*
+ *	Handling of buftargs.
+ */
+
+/*
+ * Wait for any bufs with callbacks that have been submitted but
+ * have not yet returned... walk the hash list for the target.
+ */
+void
+xfs_wait_buftarg(
+	xfs_buftarg_t	*btp)
+{
+	xfs_buf_t	*bp, *n;
+	xfs_bufhash_t	*hash;
+	uint		i;
+
+	for (i = 0; i < (1 << btp->bt_hashshift); i++) {
+		hash = &btp->bt_hash[i];
+again:
+		spin_lock(&hash->bh_lock);
+		list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
+			ASSERT(btp == bp->pb_target);
+			if (!(bp->pb_flags & PBF_FS_MANAGED)) {
+				spin_unlock(&hash->bh_lock);
+				delay(100);
+				goto again;
+			}
+		}
+		spin_unlock(&hash->bh_lock);
+	}
+}
+
+/*
+ * Allocate buffer hash table for a given target.
+ * For devices containing metadata (i.e. not the log/realtime devices)
+ * we need to allocate a much larger hash table.
+ */
+STATIC void
+xfs_alloc_bufhash(
+	xfs_buftarg_t		*btp,
+	int			external)
+{
+	unsigned int		i;
+
+	btp->bt_hashshift = external ? 3 : 8;	/* 8 or 256 buckets */
+	btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
+	btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
+					sizeof(xfs_bufhash_t), KM_SLEEP);
+	for (i = 0; i < (1 << btp->bt_hashshift); i++) {
+		spin_lock_init(&btp->bt_hash[i].bh_lock);
+		INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
+	}
+}
+
+STATIC void
+xfs_free_bufhash(
+	xfs_buftarg_t		*btp)
+{
+	kmem_free(btp->bt_hash,
+			(1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
+	btp->bt_hash = NULL;
+}
+
+void
+xfs_free_buftarg(
+	xfs_buftarg_t		*btp,
+	int			external)
+{
+	xfs_flush_buftarg(btp, 1);
+	if (external)
+		xfs_blkdev_put(btp->pbr_bdev);
+	xfs_free_bufhash(btp);
+	iput(btp->pbr_mapping->host);
+	kmem_free(btp, sizeof(*btp));
+}
+
+void
+xfs_incore_relse(
+	xfs_buftarg_t		*btp,
+	int			delwri_only,
+	int			wait)
+{
+	invalidate_bdev(btp->pbr_bdev, 1);
+	truncate_inode_pages(btp->pbr_mapping, 0LL);
+}
+
+STATIC int
+xfs_setsize_buftarg_flags(
+	xfs_buftarg_t		*btp,
+	unsigned int		blocksize,
+	unsigned int		sectorsize,
+	int			verbose)
+{
+	btp->pbr_bsize = blocksize;
+	btp->pbr_sshift = ffs(sectorsize) - 1;
+	btp->pbr_smask = sectorsize - 1;
+
+	if (set_blocksize(btp->pbr_bdev, sectorsize)) {
+		printk(KERN_WARNING
+			"XFS: Cannot set_blocksize to %u on device %s\n",
+			sectorsize, XFS_BUFTARG_NAME(btp));
+		return EINVAL;
+	}
+
+	if (verbose &&
+	    (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
+		printk(KERN_WARNING
+			"XFS: %u byte sectors in use on device %s.  "
+			"This is suboptimal; %u or greater is ideal.\n",
+			sectorsize, XFS_BUFTARG_NAME(btp),
+			(unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
+	}
+
+	return 0;
+}
+
+/*
+* When allocating the initial buffer target we have not yet
+* read in the superblock, so don't know what sized sectors
+* are being used is at this early stage.  Play safe.
+*/
+STATIC int
+xfs_setsize_buftarg_early(
+	xfs_buftarg_t		*btp,
+	struct block_device	*bdev)
+{
+	return xfs_setsize_buftarg_flags(btp,
+			PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
+}
+
+int
+xfs_setsize_buftarg(
+	xfs_buftarg_t		*btp,
+	unsigned int		blocksize,
+	unsigned int		sectorsize)
+{
+	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
+}
+
+STATIC int
+xfs_mapping_buftarg(
+	xfs_buftarg_t		*btp,
+	struct block_device	*bdev)
+{
+	struct backing_dev_info	*bdi;
+	struct inode		*inode;
+	struct address_space	*mapping;
+	static struct address_space_operations mapping_aops = {
+		.sync_page = block_sync_page,
+	};
+
+	inode = new_inode(bdev->bd_inode->i_sb);
+	if (!inode) {
+		printk(KERN_WARNING
+			"XFS: Cannot allocate mapping inode for device %s\n",
+			XFS_BUFTARG_NAME(btp));
+		return ENOMEM;
+	}
+	inode->i_mode = S_IFBLK;
+	inode->i_bdev = bdev;
+	inode->i_rdev = bdev->bd_dev;
+	bdi = blk_get_backing_dev_info(bdev);
+	if (!bdi)
+		bdi = &default_backing_dev_info;
+	mapping = &inode->i_data;
+	mapping->a_ops = &mapping_aops;
+	mapping->backing_dev_info = bdi;
+	mapping_set_gfp_mask(mapping, GFP_NOFS);
+	btp->pbr_mapping = mapping;
+	return 0;
+}
+
+xfs_buftarg_t *
+xfs_alloc_buftarg(
+	struct block_device	*bdev,
+	int			external)
+{
+	xfs_buftarg_t		*btp;
+
+	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+
+	btp->pbr_dev =  bdev->bd_dev;
+	btp->pbr_bdev = bdev;
+	if (xfs_setsize_buftarg_early(btp, bdev))
+		goto error;
+	if (xfs_mapping_buftarg(btp, bdev))
+		goto error;
+	xfs_alloc_bufhash(btp, external);
+	return btp;
+
+error:
+	kmem_free(btp, sizeof(*btp));
+	return NULL;
+}
+
+
+/*
+ * Pagebuf delayed write buffer handling
+ */
+
+STATIC LIST_HEAD(pbd_delwrite_queue);
+STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
+
+STATIC void
+pagebuf_delwri_queue(
+	xfs_buf_t		*pb,
+	int			unlock)
+{
+	PB_TRACE(pb, "delwri_q", (long)unlock);
+	ASSERT(pb->pb_flags & PBF_DELWRI);
+
+	spin_lock(&pbd_delwrite_lock);
+	/* If already in the queue, dequeue and place at tail */
+	if (!list_empty(&pb->pb_list)) {
+		if (unlock) {
+			atomic_dec(&pb->pb_hold);
+		}
+		list_del(&pb->pb_list);
+	}
+
+	list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
+	pb->pb_queuetime = jiffies;
+	spin_unlock(&pbd_delwrite_lock);
+
+	if (unlock)
+		pagebuf_unlock(pb);
+}
+
+void
+pagebuf_delwri_dequeue(
+	xfs_buf_t		*pb)
+{
+	int			dequeued = 0;
+
+	spin_lock(&pbd_delwrite_lock);
+	if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
+		list_del_init(&pb->pb_list);
+		dequeued = 1;
+	}
+	pb->pb_flags &= ~PBF_DELWRI;
+	spin_unlock(&pbd_delwrite_lock);
+
+	if (dequeued)
+		pagebuf_rele(pb);
+
+	PB_TRACE(pb, "delwri_dq", (long)dequeued);
+}
+
+STATIC void
+pagebuf_runall_queues(
+	struct workqueue_struct	*queue)
+{
+	flush_workqueue(queue);
+}
+
+/* Defines for pagebuf daemon */
+STATIC DECLARE_COMPLETION(pagebuf_daemon_done);
+STATIC struct task_struct *pagebuf_daemon_task;
+STATIC int pagebuf_daemon_active;
+STATIC int force_flush;
+
+
+STATIC int
+pagebuf_daemon_wakeup(
+	int			priority,
+	unsigned int		mask)
+{
+	force_flush = 1;
+	barrier();
+	wake_up_process(pagebuf_daemon_task);
+	return 0;
+}
+
+STATIC int
+pagebuf_daemon(
+	void			*data)
+{
+	struct list_head	tmp;
+	unsigned long		age;
+	xfs_buftarg_t		*target;
+	xfs_buf_t		*pb, *n;
+
+	/*  Set up the thread  */
+	daemonize("xfsbufd");
+	current->flags |= PF_MEMALLOC;
+
+	pagebuf_daemon_task = current;
+	pagebuf_daemon_active = 1;
+	barrier();
+
+	INIT_LIST_HEAD(&tmp);
+	do {
+		try_to_freeze(PF_FREEZE);
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
+
+		age = (xfs_buf_age_centisecs * HZ) / 100;
+		spin_lock(&pbd_delwrite_lock);
+		list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
+			PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
+			ASSERT(pb->pb_flags & PBF_DELWRI);
+
+			if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
+				if (!force_flush &&
+				    time_before(jiffies,
+						pb->pb_queuetime + age)) {
+					pagebuf_unlock(pb);
+					break;
+				}
+
+				pb->pb_flags &= ~PBF_DELWRI;
+				pb->pb_flags |= PBF_WRITE;
+				list_move(&pb->pb_list, &tmp);
+			}
+		}
+		spin_unlock(&pbd_delwrite_lock);
+
+		while (!list_empty(&tmp)) {
+			pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+			target = pb->pb_target;
+
+			list_del_init(&pb->pb_list);
+			pagebuf_iostrategy(pb);
+
+			blk_run_address_space(target->pbr_mapping);
+		}
+
+		if (as_list_len > 0)
+			purge_addresses();
+
+		force_flush = 0;
+	} while (pagebuf_daemon_active);
+
+	complete_and_exit(&pagebuf_daemon_done, 0);
+}
+
+/*
+ * Go through all incore buffers, and release buffers if they belong to
+ * the given device. This is used in filesystem error handling to
+ * preserve the consistency of its metadata.
+ */
+int
+xfs_flush_buftarg(
+	xfs_buftarg_t		*target,
+	int			wait)
+{
+	struct list_head	tmp;
+	xfs_buf_t		*pb, *n;
+	int			pincount = 0;
+
+	pagebuf_runall_queues(pagebuf_dataio_workqueue);
+	pagebuf_runall_queues(pagebuf_logio_workqueue);
+
+	INIT_LIST_HEAD(&tmp);
+	spin_lock(&pbd_delwrite_lock);
+	list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
+
+		if (pb->pb_target != target)
+			continue;
+
+		ASSERT(pb->pb_flags & PBF_DELWRI);
+		PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
+		if (pagebuf_ispin(pb)) {
+			pincount++;
+			continue;
+		}
+
+		pb->pb_flags &= ~PBF_DELWRI;
+		pb->pb_flags |= PBF_WRITE;
+		list_move(&pb->pb_list, &tmp);
+	}
+	spin_unlock(&pbd_delwrite_lock);
+
+	/*
+	 * Dropped the delayed write list lock, now walk the temporary list
+	 */
+	list_for_each_entry_safe(pb, n, &tmp, pb_list) {
+		if (wait)
+			pb->pb_flags &= ~PBF_ASYNC;
+		else
+			list_del_init(&pb->pb_list);
+
+		pagebuf_lock(pb);
+		pagebuf_iostrategy(pb);
+	}
+
+	/*
+	 * Remaining list items must be flushed before returning
+	 */
+	while (!list_empty(&tmp)) {
+		pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+
+		list_del_init(&pb->pb_list);
+		xfs_iowait(pb);
+		xfs_buf_relse(pb);
+	}
+
+	if (wait)
+		blk_run_address_space(target->pbr_mapping);
+
+	return pincount;
+}
+
+STATIC int
+pagebuf_daemon_start(void)
+{
+	int		rval;
+
+	pagebuf_logio_workqueue = create_workqueue("xfslogd");
+	if (!pagebuf_logio_workqueue)
+		return -ENOMEM;
+
+	pagebuf_dataio_workqueue = create_workqueue("xfsdatad");
+	if (!pagebuf_dataio_workqueue) {
+		destroy_workqueue(pagebuf_logio_workqueue);
+		return -ENOMEM;
+	}
+
+	rval = kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES);
+	if (rval < 0) {
+		destroy_workqueue(pagebuf_logio_workqueue);
+		destroy_workqueue(pagebuf_dataio_workqueue);
+	}
+
+	return rval;
+}
+
+/*
+ * pagebuf_daemon_stop
+ *
+ * Note: do not mark as __exit, it is called from pagebuf_terminate.
+ */
+STATIC void
+pagebuf_daemon_stop(void)
+{
+	pagebuf_daemon_active = 0;
+	barrier();
+	wait_for_completion(&pagebuf_daemon_done);
+
+	destroy_workqueue(pagebuf_logio_workqueue);
+	destroy_workqueue(pagebuf_dataio_workqueue);
+}
+
+/*
+ *	Initialization and Termination
+ */
+
+int __init
+pagebuf_init(void)
+{
+	pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
+			SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (pagebuf_cache == NULL) {
+		printk("XFS: couldn't init xfs_buf_t cache\n");
+		pagebuf_terminate();
+		return -ENOMEM;
+	}
+
+#ifdef PAGEBUF_TRACE
+	pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
+#endif
+
+	pagebuf_daemon_start();
+
+	pagebuf_shake = kmem_shake_register(pagebuf_daemon_wakeup);
+	if (pagebuf_shake == NULL) {
+		pagebuf_terminate();
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+
+/*
+ *	pagebuf_terminate.
+ *
+ *	Note: do not mark as __exit, this is also called from the __init code.
+ */
+void
+pagebuf_terminate(void)
+{
+	pagebuf_daemon_stop();
+
+#ifdef PAGEBUF_TRACE
+	ktrace_free(pagebuf_trace_buf);
+#endif
+
+	kmem_zone_destroy(pagebuf_cache);
+	kmem_shake_deregister(pagebuf_shake);
+}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
new file mode 100644
index 0000000..74deed8
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Written by Steve Lord, Jim Mostek, Russell Cattelan at SGI
+ */
+
+#ifndef __XFS_BUF_H__
+#define __XFS_BUF_H__
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/uio.h>
+
+/*
+ *	Base types
+ */
+
+#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
+
+#define page_buf_ctob(pp)	((pp) * PAGE_CACHE_SIZE)
+#define page_buf_btoc(dd)	(((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
+#define page_buf_btoct(dd)	((dd) >> PAGE_CACHE_SHIFT)
+#define page_buf_poff(aa)	((aa) & ~PAGE_CACHE_MASK)
+
+typedef enum page_buf_rw_e {
+	PBRW_READ = 1,			/* transfer into target memory */
+	PBRW_WRITE = 2,			/* transfer from target memory */
+	PBRW_ZERO = 3			/* Zero target memory */
+} page_buf_rw_t;
+
+
+typedef enum page_buf_flags_e {		/* pb_flags values */
+	PBF_READ = (1 << 0),	/* buffer intended for reading from device */
+	PBF_WRITE = (1 << 1),	/* buffer intended for writing to device   */
+	PBF_MAPPED = (1 << 2),  /* buffer mapped (pb_addr valid)           */
+	PBF_PARTIAL = (1 << 3), /* buffer partially read                   */
+	PBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
+	PBF_NONE = (1 << 5),    /* buffer not read at all                  */
+	PBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
+	PBF_STALE = (1 << 7),	/* buffer has been staled, do not find it  */
+	PBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
+	PBF_FS_DATAIOD = (1 << 9),  /* schedule IO completion on fs datad  */
+	PBF_FORCEIO = (1 << 10),    /* ignore any cache state		   */
+	PBF_FLUSH = (1 << 11),	    /* flush disk write cache		   */
+	PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead		   */
+
+	/* flags used only as arguments to access routines */
+	PBF_LOCK = (1 << 14),       /* lock requested			   */
+	PBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait	   */
+	PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread	   */
+
+	/* flags used only internally */
+	_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache		   */
+	_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()		   */
+	_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue	   */
+} page_buf_flags_t;
+
+#define PBF_UPDATE (PBF_READ | PBF_WRITE)
+#define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0)
+#define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0)
+
+typedef struct xfs_bufhash {
+	struct list_head	bh_list;
+	spinlock_t		bh_lock;
+} xfs_bufhash_t;
+
+typedef struct xfs_buftarg {
+	dev_t			pbr_dev;
+	struct block_device	*pbr_bdev;
+	struct address_space	*pbr_mapping;
+	unsigned int		pbr_bsize;
+	unsigned int		pbr_sshift;
+	size_t			pbr_smask;
+
+	/* per-device buffer hash table */
+	uint			bt_hashmask;
+	uint			bt_hashshift;
+	xfs_bufhash_t		*bt_hash;
+} xfs_buftarg_t;
+
+/*
+ *	xfs_buf_t:  Buffer structure for page cache-based buffers
+ *
+ * This buffer structure is used by the page cache buffer management routines
+ * to refer to an assembly of pages forming a logical buffer.  The actual I/O
+ * is performed with buffer_head structures, as required by drivers.
+ * 
+ * The buffer structure is used on temporary basis only, and discarded when
+ * released.  The real data storage is recorded in the page cache.  Metadata is
+ * hashed to the block device on which the file system resides.
+ */
+
+struct xfs_buf;
+
+/* call-back function on I/O completion */
+typedef void (*page_buf_iodone_t)(struct xfs_buf *);
+/* call-back function on I/O completion */
+typedef void (*page_buf_relse_t)(struct xfs_buf *);
+/* pre-write function */
+typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
+
+#define PB_PAGES	2
+
+typedef struct xfs_buf {
+	struct semaphore	pb_sema;	/* semaphore for lockables  */
+	unsigned long		pb_queuetime;	/* time buffer was queued   */
+	atomic_t		pb_pin_count;	/* pin count		    */
+	wait_queue_head_t	pb_waiters;	/* unpin waiters	    */
+	struct list_head	pb_list;
+	page_buf_flags_t	pb_flags;	/* status flags */
+	struct list_head	pb_hash_list;	/* hash table list */
+	xfs_bufhash_t		*pb_hash;	/* hash table list start */
+	xfs_buftarg_t		*pb_target;	/* buffer target (device) */
+	atomic_t		pb_hold;	/* reference count */
+	xfs_daddr_t		pb_bn;		/* block number for I/O */
+	loff_t			pb_file_offset;	/* offset in file */
+	size_t			pb_buffer_length; /* size of buffer in bytes */
+	size_t			pb_count_desired; /* desired transfer size */
+	void			*pb_addr;	/* virtual address of buffer */
+	struct work_struct	pb_iodone_work;
+	atomic_t		pb_io_remaining;/* #outstanding I/O requests */
+	page_buf_iodone_t	pb_iodone;	/* I/O completion function */
+	page_buf_relse_t	pb_relse;	/* releasing function */
+	page_buf_bdstrat_t	pb_strat;	/* pre-write function */
+	struct semaphore	pb_iodonesema;	/* Semaphore for I/O waiters */
+	void			*pb_fspriv;
+	void			*pb_fspriv2;
+	void			*pb_fspriv3;
+	unsigned short		pb_error;	/* error code on I/O */
+ 	unsigned short		pb_locked;	/* page array is locked */
+ 	unsigned int		pb_page_count;	/* size of page array */
+	unsigned int		pb_offset;	/* page offset in first page */
+	struct page		**pb_pages;	/* array of page pointers */
+	struct page		*pb_page_array[PB_PAGES]; /* inline pages */
+#ifdef PAGEBUF_LOCK_TRACKING
+	int			pb_last_holder;
+#endif
+} xfs_buf_t;
+
+
+/* Finding and Reading Buffers */
+
+extern xfs_buf_t *_pagebuf_find(	/* find buffer for block if	*/
+					/* the block is in memory	*/
+		xfs_buftarg_t *,	/* inode for block		*/
+		loff_t,			/* starting offset of range	*/
+		size_t,			/* length of range		*/
+		page_buf_flags_t,	/* PBF_LOCK			*/
+	        xfs_buf_t *);		/* newly allocated buffer	*/
+
+#define xfs_incore(buftarg,blkno,len,lockit) \
+	_pagebuf_find(buftarg, blkno ,len, lockit, NULL)
+
+extern xfs_buf_t *xfs_buf_get_flags(	/* allocate a buffer		*/
+		xfs_buftarg_t *,	/* inode for buffer		*/
+		loff_t,			/* starting offset of range     */
+		size_t,			/* length of range              */
+		page_buf_flags_t);	/* PBF_LOCK, PBF_READ,		*/
+					/* PBF_ASYNC			*/
+
+#define xfs_buf_get(target, blkno, len, flags) \
+	xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
+
+extern xfs_buf_t *xfs_buf_read_flags(	/* allocate and read a buffer	*/
+		xfs_buftarg_t *,	/* inode for buffer		*/
+		loff_t,			/* starting offset of range	*/
+		size_t,			/* length of range		*/
+		page_buf_flags_t);	/* PBF_LOCK, PBF_ASYNC		*/
+
+#define xfs_buf_read(target, blkno, len, flags) \
+	xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
+
+extern xfs_buf_t *pagebuf_lookup(
+		xfs_buftarg_t *,
+		loff_t,			/* starting offset of range	*/
+		size_t,			/* length of range		*/
+		page_buf_flags_t);	/* PBF_READ, PBF_WRITE,		*/
+					/* PBF_FORCEIO, 		*/
+
+extern xfs_buf_t *pagebuf_get_empty(	/* allocate pagebuf struct with	*/
+					/*  no memory or disk address	*/
+		size_t len,
+		xfs_buftarg_t *);	/* mount point "fake" inode	*/
+
+extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct	*/
+					/* without disk address		*/
+		size_t len,
+		xfs_buftarg_t *);	/* mount point "fake" inode	*/
+
+extern int pagebuf_associate_memory(
+		xfs_buf_t *,
+		void *,
+		size_t);
+
+extern void pagebuf_hold(		/* increment reference count	*/
+		xfs_buf_t *);		/* buffer to hold		*/
+
+extern void pagebuf_readahead(		/* read ahead into cache	*/
+		xfs_buftarg_t  *,	/* target for buffer (or NULL)	*/
+		loff_t,			/* starting offset of range     */
+		size_t,			/* length of range              */
+		page_buf_flags_t);	/* additional read flags	*/
+
+/* Releasing Buffers */
+
+extern void pagebuf_free(		/* deallocate a buffer		*/
+		xfs_buf_t *);		/* buffer to deallocate		*/
+
+extern void pagebuf_rele(		/* release hold on a buffer	*/
+		xfs_buf_t *);		/* buffer to release		*/
+
+/* Locking and Unlocking Buffers */
+
+extern int pagebuf_cond_lock(		/* lock buffer, if not locked	*/
+					/* (returns -EBUSY if locked)	*/
+		xfs_buf_t *);		/* buffer to lock		*/
+
+extern int pagebuf_lock_value(		/* return count on lock		*/
+		xfs_buf_t *);          /* buffer to check              */
+
+extern int pagebuf_lock(		/* lock buffer                  */
+		xfs_buf_t *);          /* buffer to lock               */
+
+extern void pagebuf_unlock(		/* unlock buffer		*/
+		xfs_buf_t *);		/* buffer to unlock		*/
+
+/* Buffer Read and Write Routines */
+
+extern void pagebuf_iodone(		/* mark buffer I/O complete	*/
+		xfs_buf_t *,		/* buffer to mark		*/
+		int,			/* use data/log helper thread.	*/
+		int);			/* run completion locally, or in
+					 * a helper thread.		*/
+
+extern void pagebuf_ioerror(		/* mark buffer in error	(or not) */
+		xfs_buf_t *,		/* buffer to mark		*/
+		int);			/* error to store (0 if none)	*/
+
+extern int pagebuf_iostart(		/* start I/O on a buffer	*/
+		xfs_buf_t *,		/* buffer to start		*/
+		page_buf_flags_t);	/* PBF_LOCK, PBF_ASYNC,		*/
+					/* PBF_READ, PBF_WRITE,		*/
+					/* PBF_DELWRI			*/
+
+extern int pagebuf_iorequest(		/* start real I/O		*/
+		xfs_buf_t *);		/* buffer to convey to device	*/
+
+extern int pagebuf_iowait(		/* wait for buffer I/O done	*/
+		xfs_buf_t *);		/* buffer to wait on		*/
+
+extern void pagebuf_iomove(		/* move data in/out of pagebuf	*/
+		xfs_buf_t *,		/* buffer to manipulate		*/
+		size_t,			/* starting buffer offset	*/
+		size_t,			/* length in buffer		*/
+		caddr_t,		/* data pointer			*/
+		page_buf_rw_t);		/* direction			*/
+
+static inline int pagebuf_iostrategy(xfs_buf_t *pb)
+{
+	return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
+}
+
+static inline int pagebuf_geterror(xfs_buf_t *pb)
+{
+	return pb ? pb->pb_error : ENOMEM;
+}
+
+/* Buffer Utility Routines */
+
+extern caddr_t pagebuf_offset(		/* pointer at offset in buffer	*/
+		xfs_buf_t *,		/* buffer to offset into	*/
+		size_t);		/* offset			*/
+
+/* Pinning Buffer Storage in Memory */
+
+extern void pagebuf_pin(		/* pin buffer in memory		*/
+		xfs_buf_t *);		/* buffer to pin		*/
+
+extern void pagebuf_unpin(		/* unpin buffered data		*/
+		xfs_buf_t *);		/* buffer to unpin		*/
+
+extern int pagebuf_ispin(		/* check if buffer is pinned	*/
+		xfs_buf_t *);		/* buffer to check		*/
+
+/* Delayed Write Buffer Routines */
+
+extern void pagebuf_delwri_dequeue(xfs_buf_t *);
+
+/* Buffer Daemon Setup Routines */
+
+extern int pagebuf_init(void);
+extern void pagebuf_terminate(void);
+
+
+#ifdef PAGEBUF_TRACE
+extern ktrace_t *pagebuf_trace_buf;
+extern void pagebuf_trace(
+		xfs_buf_t *,		/* buffer being traced		*/
+		char *,			/* description of operation	*/
+		void *,			/* arbitrary diagnostic value	*/
+		void *);		/* return address		*/
+#else
+# define pagebuf_trace(pb, id, ptr, ra)	do { } while (0)
+#endif
+
+#define pagebuf_target_name(target)	\
+	({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
+
+
+
+
+
+/* These are just for xfs_syncsub... it sets an internal variable
+ * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
+ */
+#define XFS_B_ASYNC		PBF_ASYNC
+#define XFS_B_DELWRI		PBF_DELWRI
+#define XFS_B_READ		PBF_READ
+#define XFS_B_WRITE		PBF_WRITE
+#define XFS_B_STALE		PBF_STALE
+
+#define XFS_BUF_TRYLOCK		PBF_TRYLOCK
+#define XFS_INCORE_TRYLOCK	PBF_TRYLOCK
+#define XFS_BUF_LOCK		PBF_LOCK
+#define XFS_BUF_MAPPED		PBF_MAPPED
+
+#define BUF_BUSY		PBF_DONT_BLOCK
+
+#define XFS_BUF_BFLAGS(x)	((x)->pb_flags)
+#define XFS_BUF_ZEROFLAGS(x)	\
+	((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
+
+#define XFS_BUF_STALE(x)	((x)->pb_flags |= XFS_B_STALE)
+#define XFS_BUF_UNSTALE(x)	((x)->pb_flags &= ~XFS_B_STALE)
+#define XFS_BUF_ISSTALE(x)	((x)->pb_flags & XFS_B_STALE)
+#define XFS_BUF_SUPER_STALE(x)	do {				\
+					XFS_BUF_STALE(x);	\
+					pagebuf_delwri_dequeue(x);	\
+					XFS_BUF_DONE(x);	\
+				} while (0)
+
+#define XFS_BUF_MANAGE		PBF_FS_MANAGED
+#define XFS_BUF_UNMANAGE(x)	((x)->pb_flags &= ~PBF_FS_MANAGED)
+
+#define XFS_BUF_DELAYWRITE(x)	 ((x)->pb_flags |= PBF_DELWRI)
+#define XFS_BUF_UNDELAYWRITE(x)	 pagebuf_delwri_dequeue(x)
+#define XFS_BUF_ISDELAYWRITE(x)	 ((x)->pb_flags & PBF_DELWRI)
+
+#define XFS_BUF_ERROR(x,no)	 pagebuf_ioerror(x,no)
+#define XFS_BUF_GETERROR(x)	 pagebuf_geterror(x)
+#define XFS_BUF_ISERROR(x)	 (pagebuf_geterror(x)?1:0)
+
+#define XFS_BUF_DONE(x)		 ((x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE))
+#define XFS_BUF_UNDONE(x)	 ((x)->pb_flags |= PBF_PARTIAL|PBF_NONE)
+#define XFS_BUF_ISDONE(x)	 (!(PBF_NOT_DONE(x)))
+
+#define XFS_BUF_BUSY(x)		 ((x)->pb_flags |= PBF_FORCEIO)
+#define XFS_BUF_UNBUSY(x)	 ((x)->pb_flags &= ~PBF_FORCEIO)
+#define XFS_BUF_ISBUSY(x)	 (1)
+
+#define XFS_BUF_ASYNC(x)	 ((x)->pb_flags |= PBF_ASYNC)
+#define XFS_BUF_UNASYNC(x)	 ((x)->pb_flags &= ~PBF_ASYNC)
+#define XFS_BUF_ISASYNC(x)	 ((x)->pb_flags & PBF_ASYNC)
+
+#define XFS_BUF_FLUSH(x)	 ((x)->pb_flags |= PBF_FLUSH)
+#define XFS_BUF_UNFLUSH(x)	 ((x)->pb_flags &= ~PBF_FLUSH)
+#define XFS_BUF_ISFLUSH(x)	 ((x)->pb_flags & PBF_FLUSH)
+
+#define XFS_BUF_SHUT(x)		 printk("XFS_BUF_SHUT not implemented yet\n")
+#define XFS_BUF_UNSHUT(x)	 printk("XFS_BUF_UNSHUT not implemented yet\n")
+#define XFS_BUF_ISSHUT(x)	 (0)
+
+#define XFS_BUF_HOLD(x)		pagebuf_hold(x)
+#define XFS_BUF_READ(x)		((x)->pb_flags |= PBF_READ)
+#define XFS_BUF_UNREAD(x)	((x)->pb_flags &= ~PBF_READ)
+#define XFS_BUF_ISREAD(x)	((x)->pb_flags & PBF_READ)
+
+#define XFS_BUF_WRITE(x)	((x)->pb_flags |= PBF_WRITE)
+#define XFS_BUF_UNWRITE(x)	((x)->pb_flags &= ~PBF_WRITE)
+#define XFS_BUF_ISWRITE(x)	((x)->pb_flags & PBF_WRITE)
+
+#define XFS_BUF_ISUNINITIAL(x)	 (0)
+#define XFS_BUF_UNUNINITIAL(x)	 (0)
+
+#define XFS_BUF_BP_ISMAPPED(bp)	 1
+
+#define XFS_BUF_DATAIO(x)	((x)->pb_flags |= PBF_FS_DATAIOD)
+#define XFS_BUF_UNDATAIO(x)	((x)->pb_flags &= ~PBF_FS_DATAIOD)
+
+#define XFS_BUF_IODONE_FUNC(buf)	(buf)->pb_iodone
+#define XFS_BUF_SET_IODONE_FUNC(buf, func)	\
+			(buf)->pb_iodone = (func)
+#define XFS_BUF_CLR_IODONE_FUNC(buf)		\
+			(buf)->pb_iodone = NULL
+#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func)	\
+			(buf)->pb_strat = (func)
+#define XFS_BUF_CLR_BDSTRAT_FUNC(buf)		\
+			(buf)->pb_strat = NULL
+
+#define XFS_BUF_FSPRIVATE(buf, type)		\
+			((type)(buf)->pb_fspriv)
+#define XFS_BUF_SET_FSPRIVATE(buf, value)	\
+			(buf)->pb_fspriv = (void *)(value)
+#define XFS_BUF_FSPRIVATE2(buf, type)		\
+			((type)(buf)->pb_fspriv2)
+#define XFS_BUF_SET_FSPRIVATE2(buf, value)	\
+			(buf)->pb_fspriv2 = (void *)(value)
+#define XFS_BUF_FSPRIVATE3(buf, type)		\
+			((type)(buf)->pb_fspriv3)
+#define XFS_BUF_SET_FSPRIVATE3(buf, value)	\
+			(buf)->pb_fspriv3  = (void *)(value)
+#define XFS_BUF_SET_START(buf)
+
+#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
+			(buf)->pb_relse = (value)
+
+#define XFS_BUF_PTR(bp)		(xfs_caddr_t)((bp)->pb_addr)
+
+extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
+{
+	if (bp->pb_flags & PBF_MAPPED)
+		return XFS_BUF_PTR(bp) + offset;
+	return (xfs_caddr_t) pagebuf_offset(bp, offset);
+}
+
+#define XFS_BUF_SET_PTR(bp, val, count)		\
+				pagebuf_associate_memory(bp, val, count)
+#define XFS_BUF_ADDR(bp)	((bp)->pb_bn)
+#define XFS_BUF_SET_ADDR(bp, blk)		\
+			((bp)->pb_bn = (xfs_daddr_t)(blk))
+#define XFS_BUF_OFFSET(bp)	((bp)->pb_file_offset)
+#define XFS_BUF_SET_OFFSET(bp, off)		\
+			((bp)->pb_file_offset = (off))
+#define XFS_BUF_COUNT(bp)	((bp)->pb_count_desired)
+#define XFS_BUF_SET_COUNT(bp, cnt)		\
+			((bp)->pb_count_desired = (cnt))
+#define XFS_BUF_SIZE(bp)	((bp)->pb_buffer_length)
+#define XFS_BUF_SET_SIZE(bp, cnt)		\
+			((bp)->pb_buffer_length = (cnt))
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
+#define XFS_BUF_SET_VTYPE(bp, type)
+#define XFS_BUF_SET_REF(bp, ref)
+
+#define XFS_BUF_ISPINNED(bp)	pagebuf_ispin(bp)
+
+#define XFS_BUF_VALUSEMA(bp)	pagebuf_lock_value(bp)
+#define XFS_BUF_CPSEMA(bp)	(pagebuf_cond_lock(bp) == 0)
+#define XFS_BUF_VSEMA(bp)	pagebuf_unlock(bp)
+#define XFS_BUF_PSEMA(bp,x)	pagebuf_lock(bp)
+#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
+
+/* setup the buffer target from a buftarg structure */
+#define XFS_BUF_SET_TARGET(bp, target)	\
+		(bp)->pb_target = (target)
+#define XFS_BUF_TARGET(bp)	((bp)->pb_target)
+#define XFS_BUFTARG_NAME(target)	\
+		pagebuf_target_name(target)
+
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
+#define XFS_BUF_SET_VTYPE(bp, type)
+#define XFS_BUF_SET_REF(bp, ref)
+
+static inline int	xfs_bawrite(void *mp, xfs_buf_t *bp)
+{
+	bp->pb_fspriv3 = mp;
+	bp->pb_strat = xfs_bdstrat_cb;
+	pagebuf_delwri_dequeue(bp);
+	return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
+}
+
+static inline void	xfs_buf_relse(xfs_buf_t *bp)
+{
+	if (!bp->pb_relse)
+		pagebuf_unlock(bp);
+	pagebuf_rele(bp);
+}
+
+#define xfs_bpin(bp)		pagebuf_pin(bp)
+#define xfs_bunpin(bp)		pagebuf_unpin(bp)
+
+#define xfs_buftrace(id, bp)	\
+	    pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
+
+#define xfs_biodone(pb)		    \
+	    pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
+
+#define xfs_biomove(pb, off, len, data, rw) \
+	    pagebuf_iomove((pb), (off), (len), (data), \
+		((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
+
+#define xfs_biozero(pb, off, len) \
+	    pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
+
+
+static inline int	XFS_bwrite(xfs_buf_t *pb)
+{
+	int	iowait = (pb->pb_flags & PBF_ASYNC) == 0;
+	int	error = 0;
+
+	if (!iowait)
+		pb->pb_flags |= _PBF_RUN_QUEUES;
+
+	pagebuf_delwri_dequeue(pb);
+	pagebuf_iostrategy(pb);
+	if (iowait) {
+		error = pagebuf_iowait(pb);
+		xfs_buf_relse(pb);
+	}
+	return error;
+}
+
+#define XFS_bdwrite(pb)		     \
+	    pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
+
+static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
+{
+	bp->pb_strat = xfs_bdstrat_cb;
+	bp->pb_fspriv3 = mp;
+
+	return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
+}
+
+#define XFS_bdstrat(bp) pagebuf_iorequest(bp)
+
+#define xfs_iowait(pb)	pagebuf_iowait(pb)
+
+#define xfs_baread(target, rablkno, ralen)  \
+	pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
+
+#define xfs_buf_get_empty(len, target)	pagebuf_get_empty((len), (target))
+#define xfs_buf_get_noaddr(len, target)	pagebuf_get_no_daddr((len), (target))
+#define xfs_buf_free(bp)		pagebuf_free(bp)
+
+
+/*
+ *	Handling of buftargs.
+ */
+
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
+extern void xfs_free_buftarg(xfs_buftarg_t *, int);
+extern void xfs_wait_buftarg(xfs_buftarg_t *);
+extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
+extern void xfs_incore_relse(xfs_buftarg_t *, int, int);
+extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+
+#define xfs_getsize_buftarg(buftarg) \
+	block_size((buftarg)->pbr_bdev)
+#define xfs_readonly_buftarg(buftarg) \
+	bdev_read_only((buftarg)->pbr_bdev)
+#define xfs_binval(buftarg) \
+	xfs_flush_buftarg(buftarg, 1)
+#define XFS_bflush(buftarg) \
+	xfs_flush_buftarg(buftarg, 1)
+
+#endif	/* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h
new file mode 100644
index 0000000..00c4584
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_cred.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_CRED_H__
+#define __XFS_CRED_H__
+
+/*
+ * Credentials
+ */
+typedef struct cred {
+	/* EMPTY */
+} cred_t;
+
+extern struct cred *sys_cred;
+
+/* this is a hack.. (assums sys_cred is the only cred_t in the system) */
+static __inline int capable_cred(cred_t *cr, int cid)
+{
+	return (cr == sys_cred) ? 1 : capable(cid);
+}
+
+#endif  /* __XFS_CRED_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
new file mode 100644
index 0000000..f372a1a
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_types.h"
+#include "xfs_dmapi.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_mount.h"
+#include "xfs_export.h"
+
+/*
+ * XFS encode and decodes the fileid portion of NFS filehandles
+ * itself instead of letting the generic NFS code do it.  This
+ * allows filesystems with 64 bit inode numbers to be exported.
+ *
+ * Note that a side effect is that xfs_vget() won't be passed a
+ * zero inode/generation pair under normal circumstances.  As
+ * however a malicious client could send us such data, the check
+ * remains in that code.
+ */
+
+
+STATIC struct dentry *
+linvfs_decode_fh(
+	struct super_block	*sb,
+	__u32			*fh,
+	int			fh_len,
+	int			fileid_type,
+	int (*acceptable)(
+		void		*context,
+		struct dentry	*de),
+	void			*context)
+{
+	xfs_fid2_t		ifid;
+	xfs_fid2_t		pfid;
+	void			*parent = NULL;
+	int			is64 = 0;
+	__u32			*p = fh;
+
+#if XFS_BIG_INUMS
+	is64 = (fileid_type & XFS_FILEID_TYPE_64FLAG);
+	fileid_type &= ~XFS_FILEID_TYPE_64FLAG;
+#endif
+
+	/*
+	 * Note that we only accept fileids which are long enough
+	 * rather than allow the parent generation number to default
+	 * to zero.  XFS considers zero a valid generation number not
+	 * an invalid/wildcard value.  There's little point printk'ing
+	 * a warning here as we don't have the client information
+	 * which would make such a warning useful.
+	 */
+	if (fileid_type > 2 ||
+	    fh_len < xfs_fileid_length((fileid_type == 2), is64))
+		return NULL;
+
+	p = xfs_fileid_decode_fid2(p, &ifid, is64);
+
+	if (fileid_type == 2) {
+		p = xfs_fileid_decode_fid2(p, &pfid, is64);
+		parent = &pfid;
+	}
+	
+	fh = (__u32 *)&ifid;
+	return find_exported_dentry(sb, fh, parent, acceptable, context);
+}
+
+
+STATIC int
+linvfs_encode_fh(
+	struct dentry		*dentry,
+	__u32			*fh,
+	int			*max_len,
+	int			connectable)
+{
+	struct inode		*inode = dentry->d_inode;
+	int			type = 1;
+	__u32			*p = fh;
+	int			len;
+	int			is64 = 0;
+#if XFS_BIG_INUMS
+	vfs_t			*vfs = LINVFS_GET_VFS(inode->i_sb);
+	xfs_mount_t		*mp = XFS_VFSTOM(vfs);
+	
+	if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT)) {
+		/* filesystem may contain 64bit inode numbers */
+		is64 = XFS_FILEID_TYPE_64FLAG;
+	}
+#endif
+
+	/* Directories don't need their parent encoded, they have ".." */
+	if (S_ISDIR(inode->i_mode))
+	    connectable = 0;
+
+	/*
+	 * Only encode if there is enough space given.  In practice
+	 * this means we can't export a filesystem with 64bit inodes
+	 * over NFSv2 with the subtree_check export option; the other
+	 * seven combinations work.  The real answer is "don't use v2".
+	 */
+	len = xfs_fileid_length(connectable, is64);
+	if (*max_len < len)
+		return 255;
+	*max_len = len;
+
+	p = xfs_fileid_encode_inode(p, inode, is64);
+	if (connectable) {
+		spin_lock(&dentry->d_lock);
+		p = xfs_fileid_encode_inode(p, dentry->d_parent->d_inode, is64);
+		spin_unlock(&dentry->d_lock);
+		type = 2;
+	}
+	BUG_ON((p - fh) != len);
+	return type | is64;
+}
+
+STATIC struct dentry *
+linvfs_get_dentry(
+	struct super_block	*sb,
+	void			*data)
+{
+	vnode_t			*vp;
+	struct inode		*inode;
+	struct dentry		*result;
+	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	VFS_VGET(vfsp, &vp, (fid_t *)data, error);
+	if (error || vp == NULL)
+		return ERR_PTR(-ESTALE) ;
+
+	inode = LINVFS_GET_IP(vp);
+	result = d_alloc_anon(inode);
+        if (!result) {
+		iput(inode);
+		return ERR_PTR(-ENOMEM);
+	}
+	return result;
+}
+
+STATIC struct dentry *
+linvfs_get_parent(
+	struct dentry		*child)
+{
+	int			error;
+	vnode_t			*vp, *cvp;
+	struct dentry		*parent;
+	struct dentry		dotdot;
+
+	dotdot.d_name.name = "..";
+	dotdot.d_name.len = 2;
+	dotdot.d_inode = NULL;
+
+	cvp = NULL;
+	vp = LINVFS_GET_VP(child->d_inode);
+	VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
+	if (unlikely(error))
+		return ERR_PTR(-error);
+
+	parent = d_alloc_anon(LINVFS_GET_IP(cvp));
+	if (unlikely(!parent)) {
+		VN_RELE(cvp);
+		return ERR_PTR(-ENOMEM);
+	}
+	return parent;
+}
+
+struct export_operations linvfs_export_ops = {
+	.decode_fh		= linvfs_decode_fh,
+	.encode_fh		= linvfs_encode_fh,
+	.get_parent		= linvfs_get_parent,
+	.get_dentry		= linvfs_get_dentry,
+};
diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/linux-2.6/xfs_export.h
new file mode 100644
index 0000000..60b2aba
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_export.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_EXPORT_H__
+#define __XFS_EXPORT_H__
+
+/*
+ * Common defines for code related to exporting XFS filesystems over NFS.
+ *
+ * The NFS fileid goes out on the wire as an array of
+ * 32bit unsigned ints in host order.  There are 5 possible
+ * formats.
+ *
+ * (1)	fileid_type=0x00
+ *	(no fileid data; handled by the generic code)
+ *
+ * (2)	fileid_type=0x01
+ *	inode-num
+ *	generation
+ *
+ * (3)	fileid_type=0x02
+ *	inode-num
+ *	generation
+ *	parent-inode-num
+ *	parent-generation
+ *
+ * (4)	fileid_type=0x81
+ *	inode-num-lo32
+ *	inode-num-hi32
+ *	generation
+ *
+ * (5)	fileid_type=0x82
+ *	inode-num-lo32
+ *	inode-num-hi32
+ *	generation
+ *	parent-inode-num-lo32
+ *	parent-inode-num-hi32
+ *	parent-generation
+ *
+ * Note, the NFS filehandle also includes an fsid portion which
+ * may have an inode number in it.  That number is hardcoded to
+ * 32bits and there is no way for XFS to intercept it.  In
+ * practice this means when exporting an XFS filesytem with 64bit
+ * inodes you should either export the mountpoint (rather than
+ * a subdirectory) or use the "fsid" export option.
+ */
+
+/* This flag goes on the wire.  Don't play with it. */
+#define XFS_FILEID_TYPE_64FLAG	0x80	/* NFS fileid has 64bit inodes */
+
+/* Calculate the length in u32 units of the fileid data */
+static inline int
+xfs_fileid_length(int hasparent, int is64)
+{
+	return hasparent ? (is64 ? 6 : 4) : (is64 ? 3 : 2);
+}
+
+/*
+ * Decode encoded inode information (either for the inode itself
+ * or the parent) into an xfs_fid2_t structure.  Advances and
+ * returns the new data pointer
+ */
+static inline __u32 *
+xfs_fileid_decode_fid2(__u32 *p, xfs_fid2_t *fid, int is64)
+{
+	fid->fid_len = sizeof(xfs_fid2_t) - sizeof(fid->fid_len);
+	fid->fid_pad = 0;
+	fid->fid_ino = *p++;
+#if XFS_BIG_INUMS
+	if (is64)
+		fid->fid_ino |= (((__u64)(*p++)) << 32);
+#endif
+	fid->fid_gen = *p++;
+	return p;
+}
+
+/*
+ * Encode inode information (either for the inode itself or the
+ * parent) into a fileid buffer.  Advances and returns the new
+ * data pointer.
+ */
+static inline __u32 *
+xfs_fileid_encode_inode(__u32 *p, struct inode *inode, int is64)
+{
+	*p++ = (__u32)inode->i_ino;
+#if XFS_BIG_INUMS
+	if (is64)
+		*p++ = (__u32)(inode->i_ino >> 32);
+#endif
+	*p++ = inode->i_generation;
+	return p;
+}
+
+#endif	/* __XFS_EXPORT_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
new file mode 100644
index 0000000..9f057a4
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_trans.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+#include "xfs_ioctl32.h"
+
+#include <linux/dcache.h>
+#include <linux/smp_lock.h>
+
+static struct vm_operations_struct linvfs_file_vm_ops;
+
+
+STATIC inline ssize_t
+__linvfs_read(
+	struct kiocb		*iocb,
+	char			__user *buf,
+	int			ioflags,
+	size_t			count,
+	loff_t			pos)
+{
+	struct iovec		iov = {buf, count};
+	struct file		*file = iocb->ki_filp;
+	vnode_t			*vp = LINVFS_GET_VP(file->f_dentry->d_inode);
+	ssize_t			rval;
+
+	BUG_ON(iocb->ki_pos != pos);
+
+	if (unlikely(file->f_flags & O_DIRECT))
+		ioflags |= IO_ISDIRECT;
+	VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
+	return rval;
+}
+
+
+STATIC ssize_t
+linvfs_aio_read(
+	struct kiocb		*iocb,
+	char			__user *buf,
+	size_t			count,
+	loff_t			pos)
+{
+	return __linvfs_read(iocb, buf, IO_ISAIO, count, pos);
+}
+
+STATIC ssize_t
+linvfs_aio_read_invis(
+	struct kiocb		*iocb,
+	char			__user *buf,
+	size_t			count,
+	loff_t			pos)
+{
+	return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_write(
+	struct kiocb	*iocb,
+	const char	__user *buf,
+	int		ioflags,
+	size_t		count,
+	loff_t		pos)
+{
+	struct iovec	iov = {(void __user *)buf, count};
+	struct file	*file = iocb->ki_filp;
+	struct inode	*inode = file->f_mapping->host;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	ssize_t		rval;
+
+	BUG_ON(iocb->ki_pos != pos);
+	if (unlikely(file->f_flags & O_DIRECT))
+		ioflags |= IO_ISDIRECT;
+
+	VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
+	return rval;
+}
+
+
+STATIC ssize_t
+linvfs_aio_write(
+	struct kiocb		*iocb,
+	const char		__user *buf,
+	size_t			count,
+	loff_t			pos)
+{
+	return __linvfs_write(iocb, buf, IO_ISAIO, count, pos);
+}
+
+STATIC ssize_t
+linvfs_aio_write_invis(
+	struct kiocb		*iocb,
+	const char		__user *buf,
+	size_t			count,
+	loff_t			pos)
+{
+	return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_readv(
+	struct file		*file,
+	const struct iovec 	*iov,
+	int			ioflags,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	struct inode	*inode = file->f_mapping->host;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	struct		kiocb kiocb;
+	ssize_t		rval;
+
+	init_sync_kiocb(&kiocb, file);
+	kiocb.ki_pos = *ppos;
+
+	if (unlikely(file->f_flags & O_DIRECT))
+		ioflags |= IO_ISDIRECT;
+	VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
+
+	*ppos = kiocb.ki_pos;
+	return rval;
+}
+
+STATIC ssize_t
+linvfs_readv(
+	struct file		*file,
+	const struct iovec 	*iov,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	return __linvfs_readv(file, iov, 0, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_readv_invis(
+	struct file		*file,
+	const struct iovec 	*iov,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_writev(
+	struct file		*file,
+	const struct iovec 	*iov,
+	int			ioflags,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	struct inode	*inode = file->f_mapping->host;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	struct		kiocb kiocb;
+	ssize_t		rval;
+
+	init_sync_kiocb(&kiocb, file);
+	kiocb.ki_pos = *ppos;
+	if (unlikely(file->f_flags & O_DIRECT))
+		ioflags |= IO_ISDIRECT;
+
+	VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
+
+	*ppos = kiocb.ki_pos;
+	return rval;
+}
+
+
+STATIC ssize_t
+linvfs_writev(
+	struct file		*file,
+	const struct iovec 	*iov,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	return __linvfs_writev(file, iov, 0, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_writev_invis(
+	struct file		*file,
+	const struct iovec 	*iov,
+	unsigned long		nr_segs,
+	loff_t			*ppos)
+{
+	return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_sendfile(
+	struct file		*filp,
+	loff_t			*ppos,
+	size_t			count,
+	read_actor_t		actor,
+	void			*target)
+{
+	vnode_t			*vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
+	ssize_t			rval;
+
+	VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval);
+	return rval;
+}
+
+
+STATIC int
+linvfs_open(
+	struct inode	*inode,
+	struct file	*filp)
+{
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	int		error;
+
+	if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
+		return -EFBIG;
+
+	ASSERT(vp);
+	VOP_OPEN(vp, NULL, error);
+	return -error;
+}
+
+
+STATIC int
+linvfs_release(
+	struct inode	*inode,
+	struct file	*filp)
+{
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	int		error = 0;
+
+	if (vp)
+		VOP_RELEASE(vp, error);
+	return -error;
+}
+
+
+STATIC int
+linvfs_fsync(
+	struct file	*filp,
+	struct dentry	*dentry,
+	int		datasync)
+{
+	struct inode	*inode = dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	int		error;
+	int		flags = FSYNC_WAIT;
+
+	if (datasync)
+		flags |= FSYNC_DATA;
+
+	ASSERT(vp);
+	VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
+	return -error;
+}
+
+/*
+ * linvfs_readdir maps to VOP_READDIR().
+ * We need to build a uio, cred, ...
+ */
+
+#define nextdp(dp)      ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
+
+STATIC int
+linvfs_readdir(
+	struct file	*filp,
+	void		*dirent,
+	filldir_t	filldir)
+{
+	int		error = 0;
+	vnode_t		*vp;
+	uio_t		uio;
+	iovec_t		iov;
+	int		eof = 0;
+	caddr_t		read_buf;
+	int		namelen, size = 0;
+	size_t		rlen = PAGE_CACHE_SIZE;
+	xfs_off_t	start_offset, curr_offset;
+	xfs_dirent_t	*dbp = NULL;
+
+	vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
+	ASSERT(vp);
+
+	/* Try fairly hard to get memory */
+	do {
+		if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
+			break;
+		rlen >>= 1;
+	} while (rlen >= 1024);
+
+	if (read_buf == NULL)
+		return -ENOMEM;
+
+	uio.uio_iov = &iov;
+	uio.uio_segflg = UIO_SYSSPACE;
+	curr_offset = filp->f_pos;
+	if (filp->f_pos != 0x7fffffff)
+		uio.uio_offset = filp->f_pos;
+	else
+		uio.uio_offset = 0xffffffff;
+
+	while (!eof) {
+		uio.uio_resid = iov.iov_len = rlen;
+		iov.iov_base = read_buf;
+		uio.uio_iovcnt = 1;
+
+		start_offset = uio.uio_offset;
+
+		VOP_READDIR(vp, &uio, NULL, &eof, error);
+		if ((uio.uio_offset == start_offset) || error) {
+			size = 0;
+			break;
+		}
+
+		size = rlen - uio.uio_resid;
+		dbp = (xfs_dirent_t *)read_buf;
+		while (size > 0) {
+			namelen = strlen(dbp->d_name);
+
+			if (filldir(dirent, dbp->d_name, namelen,
+					(loff_t) curr_offset & 0x7fffffff,
+					(ino_t) dbp->d_ino,
+					DT_UNKNOWN)) {
+				goto done;
+			}
+			size -= dbp->d_reclen;
+			curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
+			dbp = nextdp(dbp);
+		}
+	}
+done:
+	if (!error) {
+		if (size == 0)
+			filp->f_pos = uio.uio_offset & 0x7fffffff;
+		else if (dbp)
+			filp->f_pos = curr_offset;
+	}
+
+	kfree(read_buf);
+	return -error;
+}
+
+
+STATIC int
+linvfs_file_mmap(
+	struct file	*filp,
+	struct vm_area_struct *vma)
+{
+	struct inode	*ip = filp->f_dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(ip);
+	vattr_t		va = { .va_mask = XFS_AT_UPDATIME };
+	int		error;
+
+	if (vp->v_vfsp->vfs_flag & VFS_DMI) {
+		xfs_mount_t	*mp = XFS_VFSTOM(vp->v_vfsp);
+
+		error = -XFS_SEND_MMAP(mp, vma, 0);
+		if (error)
+			return error;
+	}
+
+	vma->vm_ops = &linvfs_file_vm_ops;
+
+	VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
+	if (!error)
+		vn_revalidate(vp);	/* update Linux inode flags */
+	return 0;
+}
+
+
+STATIC long
+linvfs_ioctl(
+	struct file	*filp,
+	unsigned int	cmd,
+	unsigned long	arg)
+{
+	int		error;
+	struct inode *inode = filp->f_dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+
+	VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
+	VMODIFY(vp);
+
+	/* NOTE:  some of the ioctl's return positive #'s as a
+	 *	  byte count indicating success, such as
+	 *	  readlink_by_handle.  So we don't "sign flip"
+	 *	  like most other routines.  This means true
+	 *	  errors need to be returned as a negative value.
+	 */
+	return error;
+}
+
+STATIC long
+linvfs_ioctl_invis(
+	struct file	*filp,
+	unsigned int	cmd,
+	unsigned long	arg)
+{
+	int		error;
+	struct inode *inode = filp->f_dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+
+	ASSERT(vp);
+	VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
+	VMODIFY(vp);
+
+	/* NOTE:  some of the ioctl's return positive #'s as a
+	 *	  byte count indicating success, such as
+	 *	  readlink_by_handle.  So we don't "sign flip"
+	 *	  like most other routines.  This means true
+	 *	  errors need to be returned as a negative value.
+	 */
+	return error;
+}
+
+#ifdef HAVE_VMOP_MPROTECT
+STATIC int
+linvfs_mprotect(
+	struct vm_area_struct *vma,
+	unsigned int	newflags)
+{
+	vnode_t		*vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode);
+	int		error = 0;
+
+	if (vp->v_vfsp->vfs_flag & VFS_DMI) {
+		if ((vma->vm_flags & VM_MAYSHARE) &&
+		    (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
+			xfs_mount_t	*mp = XFS_VFSTOM(vp->v_vfsp);
+
+			error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
+		    }
+	}
+	return error;
+}
+#endif /* HAVE_VMOP_MPROTECT */
+
+#ifdef HAVE_FOP_OPEN_EXEC
+/* If the user is attempting to execute a file that is offline then
+ * we have to trigger a DMAPI READ event before the file is marked as busy
+ * otherwise the invisible I/O will not be able to write to the file to bring
+ * it back online.
+ */
+STATIC int
+linvfs_open_exec(
+	struct inode	*inode)
+{
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	xfs_mount_t	*mp = XFS_VFSTOM(vp->v_vfsp);
+	int		error = 0;
+	bhv_desc_t	*bdp;
+	xfs_inode_t	*ip;
+
+	if (vp->v_vfsp->vfs_flag & VFS_DMI) {
+		bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
+		if (!bdp) {
+			error = -EINVAL;
+			goto open_exec_out;
+		}
+		ip = XFS_BHVTOI(bdp);
+		if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
+			error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
+					       0, 0, 0, NULL);
+		}
+	}
+open_exec_out:
+	return error;
+}
+#endif /* HAVE_FOP_OPEN_EXEC */
+
+struct file_operations linvfs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.write		= do_sync_write,
+	.readv		= linvfs_readv,
+	.writev		= linvfs_writev,
+	.aio_read	= linvfs_aio_read,
+	.aio_write	= linvfs_aio_write,
+	.sendfile	= linvfs_sendfile,
+	.unlocked_ioctl	= linvfs_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = xfs_compat_ioctl,
+#endif
+	.mmap		= linvfs_file_mmap,
+	.open		= linvfs_open,
+	.release	= linvfs_release,
+	.fsync		= linvfs_fsync,
+#ifdef HAVE_FOP_OPEN_EXEC
+	.open_exec	= linvfs_open_exec,
+#endif
+};
+
+struct file_operations linvfs_invis_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.write		= do_sync_write,
+	.readv		= linvfs_readv_invis,
+	.writev		= linvfs_writev_invis,
+	.aio_read	= linvfs_aio_read_invis,
+	.aio_write	= linvfs_aio_write_invis,
+	.sendfile	= linvfs_sendfile,
+	.unlocked_ioctl	= linvfs_ioctl_invis,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = xfs_compat_invis_ioctl,
+#endif
+	.mmap		= linvfs_file_mmap,
+	.open		= linvfs_open,
+	.release	= linvfs_release,
+	.fsync		= linvfs_fsync,
+};
+
+
+struct file_operations linvfs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= linvfs_readdir,
+	.unlocked_ioctl	= linvfs_ioctl,
+	.fsync		= linvfs_fsync,
+};
+
+static struct vm_operations_struct linvfs_file_vm_ops = {
+	.nopage		= filemap_nopage,
+	.populate	= filemap_populate,
+#ifdef HAVE_VMOP_MPROTECT
+	.mprotect	= linvfs_mprotect,
+#endif
+};
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
new file mode 100644
index 0000000..05ebd30
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+/*
+ * Stub for no-op vnode operations that return error status.
+ */
+int
+fs_noerr(void)
+{
+	return 0;
+}
+
+/*
+ * Operation unsupported under this file system.
+ */
+int
+fs_nosys(void)
+{
+	return ENOSYS;
+}
+
+/*
+ * Stub for inactive, strategy, and read/write lock/unlock.  Does nothing.
+ */
+/* ARGSUSED */
+void
+fs_noval(void)
+{
+}
+
+/*
+ * vnode pcache layer for vnode_tosspages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+void
+fs_tosspages(
+	bhv_desc_t	*bdp,
+	xfs_off_t	first,
+	xfs_off_t	last,
+	int		fiopt)
+{
+	vnode_t		*vp = BHV_TO_VNODE(bdp);
+	struct inode	*ip = LINVFS_GET_IP(vp);
+
+	if (VN_CACHED(vp))
+		truncate_inode_pages(ip->i_mapping, first);
+}
+
+
+/*
+ * vnode pcache layer for vnode_flushinval_pages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+void
+fs_flushinval_pages(
+	bhv_desc_t	*bdp,
+	xfs_off_t	first,
+	xfs_off_t	last,
+	int		fiopt)
+{
+	vnode_t		*vp = BHV_TO_VNODE(bdp);
+	struct inode	*ip = LINVFS_GET_IP(vp);
+
+	if (VN_CACHED(vp)) {
+		filemap_fdatawrite(ip->i_mapping);
+		filemap_fdatawait(ip->i_mapping);
+
+		truncate_inode_pages(ip->i_mapping, first);
+	}
+}
+
+/*
+ * vnode pcache layer for vnode_flush_pages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+int
+fs_flush_pages(
+	bhv_desc_t	*bdp,
+	xfs_off_t	first,
+	xfs_off_t	last,
+	uint64_t	flags,
+	int		fiopt)
+{
+	vnode_t		*vp = BHV_TO_VNODE(bdp);
+	struct inode	*ip = LINVFS_GET_IP(vp);
+
+	if (VN_CACHED(vp)) {
+		filemap_fdatawrite(ip->i_mapping);
+		filemap_fdatawait(ip->i_mapping);
+	}
+
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h
new file mode 100644
index 0000000..2db9ddb
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_SUBR_H__
+#define __XFS_SUBR_H__
+
+/*
+ * Utilities shared among file system implementations.
+ */
+
+struct cred;
+
+extern int	fs_noerr(void);
+extern int	fs_nosys(void);
+extern void	fs_noval(void);
+extern void	fs_tosspages(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+extern void	fs_flushinval_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+extern int	fs_flush_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, uint64_t, int);
+
+#endif	/* __XFS_FS_SUBR_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
new file mode 100644
index 0000000..a6da5b4
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains globals needed by XFS that were normally defined
+ * somewhere else in IRIX.
+ */
+
+#include "xfs.h"
+#include "xfs_cred.h"
+#include "xfs_sysctl.h"
+
+/*
+ * System memory size - used to scale certain data structures in XFS.
+ */
+unsigned long xfs_physmem;
+
+/*
+ * Tunable XFS parameters.  xfs_params is required even when CONFIG_SYSCTL=n,
+ * other XFS code uses these values.  Times are measured in centisecs (i.e.
+ * 100ths of a second).
+ */
+xfs_param_t xfs_params = {
+			  /*	MIN		DFLT		MAX	*/
+	.restrict_chown	= {	0,		1,		1	},
+	.sgid_inherit	= {	0,		0,		1	},
+	.symlink_mode	= {	0,		0,		1	},
+	.panic_mask	= {	0,		0,		127	},
+	.error_level	= {	0,		3,		11	},
+	.syncd_timer	= {	1*100,		30*100,		7200*100},
+	.stats_clear	= {	0,		0,		1	},
+	.inherit_sync	= {	0,		1,		1	},
+	.inherit_nodump	= {	0,		1,		1	},
+	.inherit_noatim = {	0,		1,		1	},
+	.xfs_buf_timer	= {	100/2,		1*100,		30*100	},
+	.xfs_buf_age	= {	1*100,		15*100,		7200*100},
+	.inherit_nosym	= {	0,		0,		1	},
+	.rotorstep	= {	1,		1,		255	},
+};
+
+/*
+ * Global system credential structure.
+ */
+cred_t sys_cred_val, *sys_cred = &sys_cred_val;
+
diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h
new file mode 100644
index 0000000..e81e2f3
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_globals.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_GLOBALS_H__
+#define __XFS_GLOBALS_H__
+
+/*
+ * This file declares globals needed by XFS that were normally defined
+ * somewhere else in IRIX.
+ */
+
+extern uint64_t	xfs_panic_mask;		/* set to cause more panics */
+extern unsigned long xfs_physmem;
+extern struct cred *sys_cred;
+
+#endif	/* __XFS_GLOBALS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
new file mode 100644
index 0000000..69809ee
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -0,0 +1,1336 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_dfrag.h"
+#include "xfs_fsops.h"
+
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+
+/*
+ * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
+ * a file or fs handle.
+ *
+ * XFS_IOC_PATH_TO_FSHANDLE
+ *    returns fs handle for a mount point or path within that mount point
+ * XFS_IOC_FD_TO_HANDLE
+ *    returns full handle for a FD opened in user space
+ * XFS_IOC_PATH_TO_HANDLE
+ *    returns full handle for a path
+ */
+STATIC int
+xfs_find_handle(
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	int			hsize;
+	xfs_handle_t		handle;
+	xfs_fsop_handlereq_t	hreq;
+	struct inode		*inode;
+	struct vnode		*vp;
+
+	if (copy_from_user(&hreq, arg, sizeof(hreq)))
+		return -XFS_ERROR(EFAULT);
+
+	memset((char *)&handle, 0, sizeof(handle));
+
+	switch (cmd) {
+	case XFS_IOC_PATH_TO_FSHANDLE:
+	case XFS_IOC_PATH_TO_HANDLE: {
+		struct nameidata	nd;
+		int			error;
+
+		error = user_path_walk_link((const char __user *)hreq.path, &nd);
+		if (error)
+			return error;
+
+		ASSERT(nd.dentry);
+		ASSERT(nd.dentry->d_inode);
+		inode = igrab(nd.dentry->d_inode);
+		path_release(&nd);
+		break;
+	}
+
+	case XFS_IOC_FD_TO_HANDLE: {
+		struct file	*file;
+
+		file = fget(hreq.fd);
+		if (!file)
+		    return -EBADF;
+
+		ASSERT(file->f_dentry);
+		ASSERT(file->f_dentry->d_inode);
+		inode = igrab(file->f_dentry->d_inode);
+		fput(file);
+		break;
+	}
+
+	default:
+		ASSERT(0);
+		return -XFS_ERROR(EINVAL);
+	}
+
+	if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
+		/* we're not in XFS anymore, Toto */
+		iput(inode);
+		return -XFS_ERROR(EINVAL);
+	}
+
+	/* we need the vnode */
+	vp = LINVFS_GET_VP(inode);
+	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
+		iput(inode);
+		return -XFS_ERROR(EBADF);
+	}
+
+	/* now we can grab the fsid */
+	memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
+	hsize = sizeof(xfs_fsid_t);
+
+	if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
+		xfs_inode_t	*ip;
+		bhv_desc_t	*bhv;
+		int		lock_mode;
+
+		/* need to get access to the xfs_inode to read the generation */
+		bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
+		ASSERT(bhv);
+		ip = XFS_BHVTOI(bhv);
+		ASSERT(ip);
+		lock_mode = xfs_ilock_map_shared(ip);
+
+		/* fill in fid section of handle from inode */
+		handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
+					    sizeof(handle.ha_fid.xfs_fid_len);
+		handle.ha_fid.xfs_fid_pad = 0;
+		handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
+		handle.ha_fid.xfs_fid_ino = ip->i_ino;
+
+		xfs_iunlock_map_shared(ip, lock_mode);
+
+		hsize = XFS_HSIZE(handle);
+	}
+
+	/* now copy our handle into the user buffer & write out the size */
+	if (copy_to_user(hreq.ohandle, &handle, hsize) ||
+	    copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
+		iput(inode);
+		return -XFS_ERROR(EFAULT);
+	}
+
+	iput(inode);
+	return 0;
+}
+
+
+/*
+ * Convert userspace handle data into vnode (and inode).
+ * We [ab]use the fact that all the fsop_handlereq ioctl calls
+ * have a data structure argument whose first component is always
+ * a xfs_fsop_handlereq_t, so we can cast to and from this type.
+ * This allows us to optimise the copy_from_user calls and gives
+ * a handy, shared routine.
+ *
+ * If no error, caller must always VN_RELE the returned vp.
+ */
+STATIC int
+xfs_vget_fsop_handlereq(
+	xfs_mount_t		*mp,
+	struct inode		*parinode,	/* parent inode pointer    */
+	xfs_fsop_handlereq_t	*hreq,
+	vnode_t			**vp,
+	struct inode		**inode)
+{
+	void			__user *hanp;
+	size_t			hlen;
+	xfs_fid_t		*xfid;
+	xfs_handle_t		*handlep;
+	xfs_handle_t		handle;
+	xfs_inode_t		*ip;
+	struct inode		*inodep;
+	vnode_t			*vpp;
+	xfs_ino_t		ino;
+	__u32			igen;
+	int			error;
+
+	/*
+	 * Only allow handle opens under a directory.
+	 */
+	if (!S_ISDIR(parinode->i_mode))
+		return XFS_ERROR(ENOTDIR);
+
+	hanp = hreq->ihandle;
+	hlen = hreq->ihandlen;
+	handlep = &handle;
+
+	if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
+		return XFS_ERROR(EINVAL);
+	if (copy_from_user(handlep, hanp, hlen))
+		return XFS_ERROR(EFAULT);
+	if (hlen < sizeof(*handlep))
+		memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
+	if (hlen > sizeof(handlep->ha_fsid)) {
+		if (handlep->ha_fid.xfs_fid_len !=
+				(hlen - sizeof(handlep->ha_fsid)
+					- sizeof(handlep->ha_fid.xfs_fid_len))
+		    || handlep->ha_fid.xfs_fid_pad)
+			return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * Crack the handle, obtain the inode # & generation #
+	 */
+	xfid = (struct xfs_fid *)&handlep->ha_fid;
+	if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
+		ino  = xfid->xfs_fid_ino;
+		igen = xfid->xfs_fid_gen;
+	} else {
+		return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * Get the XFS inode, building a vnode to go with it.
+	 */
+	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
+	if (error)
+		return error;
+	if (ip == NULL)
+		return XFS_ERROR(EIO);
+	if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
+		xfs_iput_new(ip, XFS_ILOCK_SHARED);
+		return XFS_ERROR(ENOENT);
+	}
+
+	vpp = XFS_ITOV(ip);
+	inodep = LINVFS_GET_IP(vpp);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	*vp = vpp;
+	*inode = inodep;
+	return 0;
+}
+
+STATIC int
+xfs_open_by_handle(
+	xfs_mount_t		*mp,
+	void			__user *arg,
+	struct file		*parfilp,
+	struct inode		*parinode)
+{
+	int			error;
+	int			new_fd;
+	int			permflag;
+	struct file		*filp;
+	struct inode		*inode;
+	struct dentry		*dentry;
+	vnode_t			*vp;
+	xfs_fsop_handlereq_t	hreq;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
+		return -XFS_ERROR(EFAULT);
+
+	error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
+	if (error)
+		return -error;
+
+	/* Restrict xfs_open_by_handle to directories & regular files. */
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
+		iput(inode);
+		return -XFS_ERROR(EINVAL);
+	}
+
+#if BITS_PER_LONG != 32
+	hreq.oflags |= O_LARGEFILE;
+#endif
+	/* Put open permission in namei format. */
+	permflag = hreq.oflags;
+	if ((permflag+1) & O_ACCMODE)
+		permflag++;
+	if (permflag & O_TRUNC)
+		permflag |= 2;
+
+	if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
+	    (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
+		iput(inode);
+		return -XFS_ERROR(EPERM);
+	}
+
+	if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
+		iput(inode);
+		return -XFS_ERROR(EACCES);
+	}
+
+	/* Can't write directories. */
+	if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
+		iput(inode);
+		return -XFS_ERROR(EISDIR);
+	}
+
+	if ((new_fd = get_unused_fd()) < 0) {
+		iput(inode);
+		return new_fd;
+	}
+
+	dentry = d_alloc_anon(inode);
+	if (dentry == NULL) {
+		iput(inode);
+		put_unused_fd(new_fd);
+		return -XFS_ERROR(ENOMEM);
+	}
+
+	/* Ensure umount returns EBUSY on umounts while this file is open. */
+	mntget(parfilp->f_vfsmnt);
+
+	/* Create file pointer. */
+	filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
+	if (IS_ERR(filp)) {
+		put_unused_fd(new_fd);
+		return -XFS_ERROR(-PTR_ERR(filp));
+	}
+	if (inode->i_mode & S_IFREG)
+		filp->f_op = &linvfs_invis_file_operations;
+
+	fd_install(new_fd, filp);
+	return new_fd;
+}
+
+STATIC int
+xfs_readlink_by_handle(
+	xfs_mount_t		*mp,
+	void			__user *arg,
+	struct file		*parfilp,
+	struct inode		*parinode)
+{
+	int			error;
+	struct iovec		aiov;
+	struct uio		auio;
+	struct inode		*inode;
+	xfs_fsop_handlereq_t	hreq;
+	vnode_t			*vp;
+	__u32			olen;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
+		return -XFS_ERROR(EFAULT);
+
+	error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
+	if (error)
+		return -error;
+
+	/* Restrict this handle operation to symlinks only. */
+	if (vp->v_type != VLNK) {
+		VN_RELE(vp);
+		return -XFS_ERROR(EINVAL);
+	}
+
+	if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
+		VN_RELE(vp);
+		return -XFS_ERROR(EFAULT);
+	}
+	aiov.iov_len	= olen;
+	aiov.iov_base	= hreq.ohandle;
+
+	auio.uio_iov	= &aiov;
+	auio.uio_iovcnt	= 1;
+	auio.uio_offset	= 0;
+	auio.uio_segflg	= UIO_USERSPACE;
+	auio.uio_resid	= olen;
+
+	VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
+
+	VN_RELE(vp);
+	return (olen - auio.uio_resid);
+}
+
+STATIC int
+xfs_fssetdm_by_handle(
+	xfs_mount_t		*mp,
+	void			__user *arg,
+	struct file		*parfilp,
+	struct inode		*parinode)
+{
+	int			error;
+	struct fsdmidata	fsd;
+	xfs_fsop_setdm_handlereq_t dmhreq;
+	struct inode		*inode;
+	bhv_desc_t		*bdp;
+	vnode_t			*vp;
+
+	if (!capable(CAP_MKNOD))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
+		return -XFS_ERROR(EFAULT);
+
+	error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode);
+	if (error)
+		return -error;
+
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+		VN_RELE(vp);
+		return -XFS_ERROR(EPERM);
+	}
+
+	if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
+		VN_RELE(vp);
+		return -XFS_ERROR(EFAULT);
+	}
+
+	bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
+	error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
+
+	VN_RELE(vp);
+	if (error)
+		return -error;
+	return 0;
+}
+
+STATIC int
+xfs_attrlist_by_handle(
+	xfs_mount_t		*mp,
+	void			__user *arg,
+	struct file		*parfilp,
+	struct inode		*parinode)
+{
+	int			error;
+	attrlist_cursor_kern_t	*cursor;
+	xfs_fsop_attrlist_handlereq_t al_hreq;
+	struct inode		*inode;
+	vnode_t			*vp;
+	char			*kbuf;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
+		return -XFS_ERROR(EFAULT);
+	if (al_hreq.buflen > XATTR_LIST_MAX)
+		return -XFS_ERROR(EINVAL);
+
+	error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq,
+			&vp, &inode);
+	if (error)
+		goto out;
+
+	kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
+	if (!kbuf)
+		goto out_vn_rele;
+
+	cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
+	VOP_ATTR_LIST(vp, kbuf, al_hreq.buflen, al_hreq.flags,
+			cursor, NULL, error);
+	if (error)
+		goto out_kfree;
+
+	if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
+		error = -EFAULT;
+
+ out_kfree:
+	kfree(kbuf);
+ out_vn_rele:
+	VN_RELE(vp);
+ out:
+	return -error;
+}
+
+STATIC int
+xfs_attrmulti_attr_get(
+	struct vnode		*vp,
+	char			*name,
+	char			__user *ubuf,
+	__uint32_t		*len,
+	__uint32_t		flags)
+{
+	char			*kbuf;
+	int			error = EFAULT;
+	
+	if (*len > XATTR_SIZE_MAX)
+		return EINVAL;
+	kbuf = kmalloc(*len, GFP_KERNEL);
+	if (!kbuf)
+		return ENOMEM;
+
+	VOP_ATTR_GET(vp, name, kbuf, len, flags, NULL, error);
+	if (error)
+		goto out_kfree;
+
+	if (copy_to_user(ubuf, kbuf, *len))
+		error = EFAULT;
+
+ out_kfree:
+	kfree(kbuf);
+	return error;
+}
+
+STATIC int
+xfs_attrmulti_attr_set(
+	struct vnode		*vp,
+	char			*name,
+	const char		__user *ubuf,
+	__uint32_t		len,
+	__uint32_t		flags)
+{
+	char			*kbuf;
+	int			error = EFAULT;
+
+	if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
+		return EPERM;
+	if (len > XATTR_SIZE_MAX)
+		return EINVAL;
+
+	kbuf = kmalloc(len, GFP_KERNEL);
+	if (!kbuf)
+		return ENOMEM;
+
+	if (copy_from_user(kbuf, ubuf, len))
+		goto out_kfree;
+			
+	VOP_ATTR_SET(vp, name, kbuf, len, flags, NULL, error);
+
+ out_kfree:
+	kfree(kbuf);
+	return error;
+}
+
+STATIC int
+xfs_attrmulti_attr_remove(
+	struct vnode		*vp,
+	char			*name,
+	__uint32_t		flags)
+{
+	int			error;
+
+	if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
+		return EPERM;
+
+	VOP_ATTR_REMOVE(vp, name, flags, NULL, error);
+	return error;
+}
+
+STATIC int
+xfs_attrmulti_by_handle(
+	xfs_mount_t		*mp,
+	void			__user *arg,
+	struct file		*parfilp,
+	struct inode		*parinode)
+{
+	int			error;
+	xfs_attr_multiop_t	*ops;
+	xfs_fsop_attrmulti_handlereq_t am_hreq;
+	struct inode		*inode;
+	vnode_t			*vp;
+	unsigned int		i, size;
+	char			*attr_name;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
+		return -XFS_ERROR(EFAULT);
+
+	error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode);
+	if (error)
+		goto out;
+
+	error = E2BIG;
+	size = am_hreq.opcount * sizeof(attr_multiop_t);
+	if (!size || size > 16 * PAGE_SIZE)
+		goto out_vn_rele;
+
+	error = ENOMEM;
+	ops = kmalloc(size, GFP_KERNEL);
+	if (!ops)
+		goto out_vn_rele;
+
+	error = EFAULT;
+	if (copy_from_user(ops, am_hreq.ops, size))
+		goto out_kfree_ops;
+
+	attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
+	if (!attr_name)
+		goto out_kfree_ops;
+
+
+	error = 0;
+	for (i = 0; i < am_hreq.opcount; i++) {
+		ops[i].am_error = strncpy_from_user(attr_name,
+				ops[i].am_attrname, MAXNAMELEN);
+		if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
+			error = -ERANGE;
+		if (ops[i].am_error < 0)
+			break;
+
+		switch (ops[i].am_opcode) {
+		case ATTR_OP_GET:
+			ops[i].am_error = xfs_attrmulti_attr_get(vp,
+					attr_name, ops[i].am_attrvalue,
+					&ops[i].am_length, ops[i].am_flags);
+			break;
+		case ATTR_OP_SET:
+			ops[i].am_error = xfs_attrmulti_attr_set(vp,
+					attr_name, ops[i].am_attrvalue,
+					ops[i].am_length, ops[i].am_flags);
+			break;
+		case ATTR_OP_REMOVE:
+			ops[i].am_error = xfs_attrmulti_attr_remove(vp,
+					attr_name, ops[i].am_flags);
+			break;
+		default:
+			ops[i].am_error = EINVAL;
+		}
+	}
+
+	if (copy_to_user(am_hreq.ops, ops, size))
+		error = XFS_ERROR(EFAULT);
+
+	kfree(attr_name);
+ out_kfree_ops:
+	kfree(ops);
+ out_vn_rele:
+	VN_RELE(vp);
+ out:
+	return -error;
+}
+
+/* prototypes for a few of the stack-hungry cases that have
+ * their own functions.  Functions are defined after their use
+ * so gcc doesn't get fancy and inline them with -03 */
+
+STATIC int
+xfs_ioc_space(
+	bhv_desc_t		*bdp,
+	vnode_t			*vp,
+	struct file		*filp,
+	int			flags,
+	unsigned int		cmd,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_bulkstat(
+	xfs_mount_t		*mp,
+	unsigned int		cmd,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_fsgeometry_v1(
+	xfs_mount_t		*mp,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_fsgeometry(
+	xfs_mount_t		*mp,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_xattr(
+	vnode_t			*vp,
+	xfs_inode_t		*ip,
+	struct file		*filp,
+	unsigned int		cmd,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_getbmap(
+	bhv_desc_t		*bdp,
+	struct file		*filp,
+	int			flags,
+	unsigned int		cmd,
+	void			__user *arg);
+
+STATIC int
+xfs_ioc_getbmapx(
+	bhv_desc_t		*bdp,
+	void			__user *arg);
+
+int
+xfs_ioctl(
+	bhv_desc_t		*bdp,
+	struct inode		*inode,
+	struct file		*filp,
+	int			ioflags,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	int			error;
+	vnode_t			*vp;
+	xfs_inode_t		*ip;
+	xfs_mount_t		*mp;
+
+	vp = LINVFS_GET_VP(inode);
+
+	vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	switch (cmd) {
+
+	case XFS_IOC_ALLOCSP:
+	case XFS_IOC_FREESP:
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_ALLOCSP64:
+	case XFS_IOC_FREESP64:
+	case XFS_IOC_RESVSP64:
+	case XFS_IOC_UNRESVSP64:
+		/*
+		 * Only allow the sys admin to reserve space unless
+		 * unwritten extents are enabled.
+		 */
+		if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
+
+	case XFS_IOC_DIOINFO: {
+		struct dioattr	da;
+		xfs_buftarg_t	*target =
+			(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+			mp->m_rtdev_targp : mp->m_ddev_targp;
+
+		da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
+		/* The size dio will do in one go */
+		da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
+
+		if (copy_to_user(arg, &da, sizeof(da)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_FSBULKSTAT_SINGLE:
+	case XFS_IOC_FSBULKSTAT:
+	case XFS_IOC_FSINUMBERS:
+		return xfs_ioc_bulkstat(mp, cmd, arg);
+
+	case XFS_IOC_FSGEOMETRY_V1:
+		return xfs_ioc_fsgeometry_v1(mp, arg);
+
+	case XFS_IOC_FSGEOMETRY:
+		return xfs_ioc_fsgeometry(mp, arg);
+
+	case XFS_IOC_GETVERSION:
+	case XFS_IOC_GETXFLAGS:
+	case XFS_IOC_SETXFLAGS:
+	case XFS_IOC_FSGETXATTR:
+	case XFS_IOC_FSSETXATTR:
+	case XFS_IOC_FSGETXATTRA:
+		return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
+
+	case XFS_IOC_FSSETDM: {
+		struct fsdmidata	dmi;
+
+		if (copy_from_user(&dmi, arg, sizeof(dmi)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
+							NULL);
+		return -error;
+	}
+
+	case XFS_IOC_GETBMAP:
+	case XFS_IOC_GETBMAPA:
+		return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
+
+	case XFS_IOC_GETBMAPX:
+		return xfs_ioc_getbmapx(bdp, arg);
+
+	case XFS_IOC_FD_TO_HANDLE:
+	case XFS_IOC_PATH_TO_HANDLE:
+	case XFS_IOC_PATH_TO_FSHANDLE:
+		return xfs_find_handle(cmd, arg);
+
+	case XFS_IOC_OPEN_BY_HANDLE:
+		return xfs_open_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_FSSETDM_BY_HANDLE:
+		return xfs_fssetdm_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_READLINK_BY_HANDLE:
+		return xfs_readlink_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_ATTRLIST_BY_HANDLE:
+		return xfs_attrlist_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_ATTRMULTI_BY_HANDLE:
+		return xfs_attrmulti_by_handle(mp, arg, filp, inode);
+
+	case XFS_IOC_SWAPEXT: {
+		error = xfs_swapext((struct xfs_swapext __user *)arg);
+		return -error;
+	}
+
+	case XFS_IOC_FSCOUNTS: {
+		xfs_fsop_counts_t out;
+
+		error = xfs_fs_counts(mp, &out);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &out, sizeof(out)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_SET_RESBLKS: {
+		xfs_fsop_resblks_t inout;
+		__uint64_t	   in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&inout, arg, sizeof(inout)))
+			return -XFS_ERROR(EFAULT);
+
+		/* input parameter is passed in resblks field of structure */
+		in = inout.resblks;
+		error = xfs_reserve_blocks(mp, &in, &inout);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &inout, sizeof(inout)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_GET_RESBLKS: {
+		xfs_fsop_resblks_t out;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		error = xfs_reserve_blocks(mp, NULL, &out);
+		if (error)
+			return -error;
+
+		if (copy_to_user(arg, &out, sizeof(out)))
+			return -XFS_ERROR(EFAULT);
+
+		return 0;
+	}
+
+	case XFS_IOC_FSGROWFSDATA: {
+		xfs_growfs_data_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_data(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FSGROWFSLOG: {
+		xfs_growfs_log_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_log(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FSGROWFSRT: {
+		xfs_growfs_rt_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_growfs_rt(mp, &in);
+		return -error;
+	}
+
+	case XFS_IOC_FREEZE:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (inode->i_sb->s_frozen == SB_UNFROZEN)
+			freeze_bdev(inode->i_sb->s_bdev);
+		return 0;
+
+	case XFS_IOC_THAW:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (inode->i_sb->s_frozen != SB_UNFROZEN)
+			thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
+		return 0;
+
+	case XFS_IOC_GOINGDOWN: {
+		__uint32_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (get_user(in, (__uint32_t __user *)arg))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_fs_goingdown(mp, in);
+		return -error;
+	}
+
+	case XFS_IOC_ERROR_INJECTION: {
+		xfs_error_injection_t in;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&in, arg, sizeof(in)))
+			return -XFS_ERROR(EFAULT);
+
+		error = xfs_errortag_add(in.errtag, mp);
+		return -error;
+	}
+
+	case XFS_IOC_ERROR_CLEARALL:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		error = xfs_errortag_clearall(mp);
+		return -error;
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+STATIC int
+xfs_ioc_space(
+	bhv_desc_t		*bdp,
+	vnode_t			*vp,
+	struct file		*filp,
+	int			ioflags,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	xfs_flock64_t		bf;
+	int			attr_flags = 0;
+	int			error;
+
+	if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
+		return -XFS_ERROR(EPERM);
+
+	if (!(filp->f_flags & FMODE_WRITE))
+		return -XFS_ERROR(EBADF);
+
+	if (vp->v_type != VREG)
+		return -XFS_ERROR(EINVAL);
+
+	if (copy_from_user(&bf, arg, sizeof(bf)))
+		return -XFS_ERROR(EFAULT);
+
+	if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+		attr_flags |= ATTR_NONBLOCK;
+	if (ioflags & IO_INVIS)
+		attr_flags |= ATTR_DMI;
+
+	error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos,
+					      NULL, attr_flags);
+	return -error;
+}
+
+STATIC int
+xfs_ioc_bulkstat(
+	xfs_mount_t		*mp,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	xfs_fsop_bulkreq_t	bulkreq;
+	int			count;	/* # of records returned */
+	xfs_ino_t		inlast;	/* last inode number */
+	int			done;
+	int			error;
+
+	/* done = 1 if there are more stats to get and if bulkstat */
+	/* should be called again (unused here, but used in dmapi) */
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
+
+	if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
+		return -XFS_ERROR(EFAULT);
+
+	if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
+		return -XFS_ERROR(EFAULT);
+
+	if ((count = bulkreq.icount) <= 0)
+		return -XFS_ERROR(EINVAL);
+
+	if (cmd == XFS_IOC_FSINUMBERS)
+		error = xfs_inumbers(mp, &inlast, &count,
+						bulkreq.ubuffer);
+	else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
+		error = xfs_bulkstat_single(mp, &inlast,
+						bulkreq.ubuffer, &done);
+	else {	/* XFS_IOC_FSBULKSTAT */
+		if (count == 1 && inlast != 0) {
+			inlast++;
+			error = xfs_bulkstat_single(mp, &inlast,
+					bulkreq.ubuffer, &done);
+		} else {
+			error = xfs_bulkstat(mp, &inlast, &count,
+				(bulkstat_one_pf)xfs_bulkstat_one, NULL,
+				sizeof(xfs_bstat_t), bulkreq.ubuffer,
+				BULKSTAT_FG_QUICK, &done);
+		}
+	}
+
+	if (error)
+		return -error;
+
+	if (bulkreq.ocount != NULL) {
+		if (copy_to_user(bulkreq.lastip, &inlast,
+						sizeof(xfs_ino_t)))
+			return -XFS_ERROR(EFAULT);
+
+		if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
+			return -XFS_ERROR(EFAULT);
+	}
+
+	return 0;
+}
+
+STATIC int
+xfs_ioc_fsgeometry_v1(
+	xfs_mount_t		*mp,
+	void			__user *arg)
+{
+	xfs_fsop_geom_v1_t	fsgeo;
+	int			error;
+
+	error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+	if (error)
+		return -error;
+
+	if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
+
+STATIC int
+xfs_ioc_fsgeometry(
+	xfs_mount_t		*mp,
+	void			__user *arg)
+{
+	xfs_fsop_geom_t		fsgeo;
+	int			error;
+
+	error = xfs_fs_geometry(mp, &fsgeo, 4);
+	if (error)
+		return -error;
+
+	if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
+
+/*
+ * Linux extended inode flags interface.
+ */
+#define LINUX_XFLAG_SYNC	0x00000008 /* Synchronous updates */
+#define LINUX_XFLAG_IMMUTABLE	0x00000010 /* Immutable file */
+#define LINUX_XFLAG_APPEND	0x00000020 /* writes to file may only append */
+#define LINUX_XFLAG_NODUMP	0x00000040 /* do not dump file */
+#define LINUX_XFLAG_NOATIME	0x00000080 /* do not update atime */
+
+STATIC unsigned int
+xfs_merge_ioc_xflags(
+	unsigned int	flags,
+	unsigned int	start)
+{
+	unsigned int	xflags = start;
+
+	if (flags & LINUX_XFLAG_IMMUTABLE)
+		xflags |= XFS_XFLAG_IMMUTABLE;
+	else
+		xflags &= ~XFS_XFLAG_IMMUTABLE;
+	if (flags & LINUX_XFLAG_APPEND)
+		xflags |= XFS_XFLAG_APPEND;
+	else
+		xflags &= ~XFS_XFLAG_APPEND;
+	if (flags & LINUX_XFLAG_SYNC)
+		xflags |= XFS_XFLAG_SYNC;
+	else
+		xflags &= ~XFS_XFLAG_SYNC;
+	if (flags & LINUX_XFLAG_NOATIME)
+		xflags |= XFS_XFLAG_NOATIME;
+	else
+		xflags &= ~XFS_XFLAG_NOATIME;
+	if (flags & LINUX_XFLAG_NODUMP)
+		xflags |= XFS_XFLAG_NODUMP;
+	else
+		xflags &= ~XFS_XFLAG_NODUMP;
+
+	return xflags;
+}
+
+STATIC unsigned int
+xfs_di2lxflags(
+	__uint16_t	di_flags)
+{
+	unsigned int	flags = 0;
+
+	if (di_flags & XFS_DIFLAG_IMMUTABLE)
+		flags |= LINUX_XFLAG_IMMUTABLE;
+	if (di_flags & XFS_DIFLAG_APPEND)
+		flags |= LINUX_XFLAG_APPEND;
+	if (di_flags & XFS_DIFLAG_SYNC)
+		flags |= LINUX_XFLAG_SYNC;
+	if (di_flags & XFS_DIFLAG_NOATIME)
+		flags |= LINUX_XFLAG_NOATIME;
+	if (di_flags & XFS_DIFLAG_NODUMP)
+		flags |= LINUX_XFLAG_NODUMP;
+	return flags;
+}
+
+STATIC int
+xfs_ioc_xattr(
+	vnode_t			*vp,
+	xfs_inode_t		*ip,
+	struct file		*filp,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	struct fsxattr		fa;
+	vattr_t			va;
+	int			error;
+	int			attr_flags;
+	unsigned int		flags;
+
+	switch (cmd) {
+	case XFS_IOC_FSGETXATTR: {
+		va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
+		VOP_GETATTR(vp, &va, 0, NULL, error);
+		if (error)
+			return -error;
+
+		fa.fsx_xflags	= va.va_xflags;
+		fa.fsx_extsize	= va.va_extsize;
+		fa.fsx_nextents = va.va_nextents;
+
+		if (copy_to_user(arg, &fa, sizeof(fa)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_FSSETXATTR: {
+		if (copy_from_user(&fa, arg, sizeof(fa)))
+			return -XFS_ERROR(EFAULT);
+
+		attr_flags = 0;
+		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+			attr_flags |= ATTR_NONBLOCK;
+
+		va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
+		va.va_xflags  = fa.fsx_xflags;
+		va.va_extsize = fa.fsx_extsize;
+
+		VOP_SETATTR(vp, &va, attr_flags, NULL, error);
+		if (!error)
+			vn_revalidate(vp);	/* update Linux inode flags */
+		return -error;
+	}
+
+	case XFS_IOC_FSGETXATTRA: {
+		va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
+		VOP_GETATTR(vp, &va, 0, NULL, error);
+		if (error)
+			return -error;
+
+		fa.fsx_xflags	= va.va_xflags;
+		fa.fsx_extsize	= va.va_extsize;
+		fa.fsx_nextents = va.va_anextents;
+
+		if (copy_to_user(arg, &fa, sizeof(fa)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_GETXFLAGS: {
+		flags = xfs_di2lxflags(ip->i_d.di_flags);
+		if (copy_to_user(arg, &flags, sizeof(flags)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	case XFS_IOC_SETXFLAGS: {
+		if (copy_from_user(&flags, arg, sizeof(flags)))
+			return -XFS_ERROR(EFAULT);
+
+		if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
+			      LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
+			      LINUX_XFLAG_SYNC))
+			return -XFS_ERROR(EOPNOTSUPP);
+
+		attr_flags = 0;
+		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+			attr_flags |= ATTR_NONBLOCK;
+
+		va.va_mask = XFS_AT_XFLAGS;
+		va.va_xflags = xfs_merge_ioc_xflags(flags,
+				xfs_ip2xflags(ip));
+
+		VOP_SETATTR(vp, &va, attr_flags, NULL, error);
+		if (!error)
+			vn_revalidate(vp);	/* update Linux inode flags */
+		return -error;
+	}
+
+	case XFS_IOC_GETVERSION: {
+		flags = LINVFS_GET_IP(vp)->i_generation;
+		if (copy_to_user(arg, &flags, sizeof(flags)))
+			return -XFS_ERROR(EFAULT);
+		return 0;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+STATIC int
+xfs_ioc_getbmap(
+	bhv_desc_t		*bdp,
+	struct file		*filp,
+	int			ioflags,
+	unsigned int		cmd,
+	void			__user *arg)
+{
+	struct getbmap		bm;
+	int			iflags;
+	int			error;
+
+	if (copy_from_user(&bm, arg, sizeof(bm)))
+		return -XFS_ERROR(EFAULT);
+
+	if (bm.bmv_count < 2)
+		return -XFS_ERROR(EINVAL);
+
+	iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
+	if (ioflags & IO_INVIS)
+		iflags |= BMV_IF_NO_DMAPI_READ;
+
+	error = xfs_getbmap(bdp, &bm, (struct getbmap __user *)arg+1, iflags);
+	if (error)
+		return -error;
+
+	if (copy_to_user(arg, &bm, sizeof(bm)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
+
+STATIC int
+xfs_ioc_getbmapx(
+	bhv_desc_t		*bdp,
+	void			__user *arg)
+{
+	struct getbmapx		bmx;
+	struct getbmap		bm;
+	int			iflags;
+	int			error;
+
+	if (copy_from_user(&bmx, arg, sizeof(bmx)))
+		return -XFS_ERROR(EFAULT);
+
+	if (bmx.bmv_count < 2)
+		return -XFS_ERROR(EINVAL);
+
+	/*
+	 * Map input getbmapx structure to a getbmap
+	 * structure for xfs_getbmap.
+	 */
+	GETBMAP_CONVERT(bmx, bm);
+
+	iflags = bmx.bmv_iflags;
+
+	if (iflags & (~BMV_IF_VALID))
+		return -XFS_ERROR(EINVAL);
+
+	iflags |= BMV_IF_EXTENDED;
+
+	error = xfs_getbmap(bdp, &bm, (struct getbmapx __user *)arg+1, iflags);
+	if (error)
+		return -error;
+
+	GETBMAP_CONVERT(bm, bmx);
+
+	if (copy_to_user(arg, &bmx, sizeof(bmx)))
+		return -XFS_ERROR(EFAULT);
+
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
new file mode 100644
index 0000000..7a12c83
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/ioctl32.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+#include "xfs.h"
+#include "xfs_types.h"
+#include "xfs_fs.h"
+#include "xfs_vfs.h"
+#include "xfs_vnode.h"
+#include "xfs_dfrag.h"
+
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
+#define BROKEN_X86_ALIGNMENT
+#else
+
+typedef struct xfs_fsop_bulkreq32 {
+	compat_uptr_t	lastip;		/* last inode # pointer		*/
+	__s32		icount;		/* count of entries in buffer	*/
+	compat_uptr_t	ubuffer;	/* user buffer for inode desc.	*/
+	__s32		ocount;		/* output count pointer		*/
+} xfs_fsop_bulkreq32_t;
+
+static unsigned long
+xfs_ioctl32_bulkstat(unsigned long arg)
+{
+	xfs_fsop_bulkreq32_t	__user *p32 = (void __user *)arg;
+	xfs_fsop_bulkreq_t	__user *p = compat_alloc_user_space(sizeof(*p));
+	u32			addr;
+
+	if (get_user(addr, &p32->lastip) ||
+	    put_user(compat_ptr(addr), &p->lastip) ||
+	    copy_in_user(&p->icount, &p32->icount, sizeof(s32)) ||
+	    get_user(addr, &p32->ubuffer) ||
+	    put_user(compat_ptr(addr), &p->ubuffer) ||
+	    get_user(addr, &p32->ocount) ||
+	    put_user(compat_ptr(addr), &p->ocount))
+		return -EFAULT;
+
+	return (unsigned long)p;
+}
+#endif
+
+static long
+__xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
+{
+	int		error;
+	struct inode *inode = f->f_dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+
+	switch (cmd) {
+	case XFS_IOC_DIOINFO:
+	case XFS_IOC_FSGEOMETRY_V1:
+	case XFS_IOC_FSGEOMETRY:
+	case XFS_IOC_GETVERSION:
+	case XFS_IOC_GETXFLAGS:
+	case XFS_IOC_SETXFLAGS:
+	case XFS_IOC_FSGETXATTR:
+	case XFS_IOC_FSSETXATTR:
+	case XFS_IOC_FSGETXATTRA:
+	case XFS_IOC_FSSETDM:
+	case XFS_IOC_GETBMAP:
+	case XFS_IOC_GETBMAPA:
+	case XFS_IOC_GETBMAPX:
+/* not handled
+	case XFS_IOC_FD_TO_HANDLE:
+	case XFS_IOC_PATH_TO_HANDLE:
+	case XFS_IOC_PATH_TO_HANDLE:
+	case XFS_IOC_PATH_TO_FSHANDLE:
+	case XFS_IOC_OPEN_BY_HANDLE:
+	case XFS_IOC_FSSETDM_BY_HANDLE:
+	case XFS_IOC_READLINK_BY_HANDLE:
+	case XFS_IOC_ATTRLIST_BY_HANDLE:
+	case XFS_IOC_ATTRMULTI_BY_HANDLE:
+*/
+	case XFS_IOC_FSCOUNTS:
+	case XFS_IOC_SET_RESBLKS:
+	case XFS_IOC_GET_RESBLKS:
+	case XFS_IOC_FSGROWFSDATA:
+	case XFS_IOC_FSGROWFSLOG:
+	case XFS_IOC_FSGROWFSRT:
+	case XFS_IOC_FREEZE:
+	case XFS_IOC_THAW:
+	case XFS_IOC_GOINGDOWN:
+	case XFS_IOC_ERROR_INJECTION:
+	case XFS_IOC_ERROR_CLEARALL:
+		break;
+
+#ifndef BROKEN_X86_ALIGNMENT
+	/* xfs_flock_t and xfs_bstat_t have wrong u32 vs u64 alignment */
+	case XFS_IOC_ALLOCSP:
+	case XFS_IOC_FREESP:
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_ALLOCSP64:
+	case XFS_IOC_FREESP64:
+	case XFS_IOC_RESVSP64:
+	case XFS_IOC_UNRESVSP64:
+	case XFS_IOC_SWAPEXT:
+		break;
+
+	case XFS_IOC_FSBULKSTAT_SINGLE:
+	case XFS_IOC_FSBULKSTAT:
+	case XFS_IOC_FSINUMBERS:
+		arg = xfs_ioctl32_bulkstat(arg);
+		break;
+#endif
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	VOP_IOCTL(vp, inode, f, mode, cmd, (void __user *)arg, error);
+	VMODIFY(vp);
+
+	return error;
+}
+
+long xfs_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+{
+	return __xfs_compat_ioctl(0, f, cmd, arg);
+}
+
+long xfs_compat_invis_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+{
+	return __xfs_compat_ioctl(IO_INVIS, f, cmd, arg);
+}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h
new file mode 100644
index 0000000..779f69a
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+long xfs_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+long xfs_compat_invis_ioctl(struct file *f, unsigned cmd, unsigned long arg);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
new file mode 100644
index 0000000..407e993
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+
+#include <linux/xattr.h>
+#include <linux/namei.h>
+
+
+/*
+ * Pull the link count and size up from the xfs inode to the linux inode
+ */
+STATIC void
+validate_fields(
+	struct inode	*ip)
+{
+	vnode_t		*vp = LINVFS_GET_VP(ip);
+	vattr_t		va;
+	int		error;
+
+	va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS;
+	VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error);
+	if (likely(!error)) {
+		ip->i_nlink = va.va_nlink;
+		ip->i_blocks = va.va_nblocks;
+
+		/* we're under i_sem so i_size can't change under us */
+		if (i_size_read(ip) != va.va_size)
+			i_size_write(ip, va.va_size);
+	}
+}
+
+/*
+ * Determine whether a process has a valid fs_struct (kernel daemons
+ * like knfsd don't have an fs_struct).
+ *
+ * XXX(hch):  nfsd is broken, better fix it instead.
+ */
+STATIC inline int
+has_fs_struct(struct task_struct *task)
+{
+	return (task->fs != init_task.fs);
+}
+
+STATIC int
+linvfs_mknod(
+	struct inode	*dir,
+	struct dentry	*dentry,
+	int		mode,
+	dev_t		rdev)
+{
+	struct inode	*ip;
+	vattr_t		va;
+	vnode_t		*vp = NULL, *dvp = LINVFS_GET_VP(dir);
+	xfs_acl_t	*default_acl = NULL;
+	attrexists_t	test_default_acl = _ACL_DEFAULT_EXISTS;
+	int		error;
+
+	/*
+	 * Irix uses Missed'em'V split, but doesn't want to see
+	 * the upper 5 bits of (14bit) major.
+	 */
+	if (!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)
+		return -EINVAL;
+
+	if (test_default_acl && test_default_acl(dvp)) {
+		if (!_ACL_ALLOC(default_acl))
+			return -ENOMEM;
+		if (!_ACL_GET_DEFAULT(dvp, default_acl)) {
+			_ACL_FREE(default_acl);
+			default_acl = NULL;
+		}
+	}
+
+	if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current))
+		mode &= ~current->fs->umask;
+
+	memset(&va, 0, sizeof(va));
+	va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
+	va.va_type = IFTOVT(mode);
+	va.va_mode = mode;
+
+	switch (mode & S_IFMT) {
+	case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK:
+		va.va_rdev = sysv_encode_dev(rdev);
+		va.va_mask |= XFS_AT_RDEV;
+		/*FALLTHROUGH*/
+	case S_IFREG:
+		VOP_CREATE(dvp, dentry, &va, &vp, NULL, error);
+		break;
+	case S_IFDIR:
+		VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error);
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	if (default_acl) {
+		if (!error) {
+			error = _ACL_INHERIT(vp, &va, default_acl);
+			if (!error) {
+				VMODIFY(vp);
+			} else {
+				struct dentry	teardown = {};
+				int		err2;
+
+				/* Oh, the horror.
+				 * If we can't add the ACL we must back out.
+				 * ENOSPC can hit here, among other things.
+				 */
+				teardown.d_inode = ip = LINVFS_GET_IP(vp);
+				teardown.d_name = dentry->d_name;
+
+				vn_mark_bad(vp);
+				
+				if (S_ISDIR(mode))
+					VOP_RMDIR(dvp, &teardown, NULL, err2);
+				else
+					VOP_REMOVE(dvp, &teardown, NULL, err2);
+				VN_RELE(vp);
+			}
+		}
+		_ACL_FREE(default_acl);
+	}
+
+	if (!error) {
+		ASSERT(vp);
+		ip = LINVFS_GET_IP(vp);
+
+		if (S_ISCHR(mode) || S_ISBLK(mode))
+			ip->i_rdev = rdev;
+		else if (S_ISDIR(mode))
+			validate_fields(ip);
+		d_instantiate(dentry, ip);
+		validate_fields(dir);
+	}
+	return -error;
+}
+
+STATIC int
+linvfs_create(
+	struct inode	*dir,
+	struct dentry	*dentry,
+	int		mode,
+	struct nameidata *nd)
+{
+	return linvfs_mknod(dir, dentry, mode, 0);
+}
+
+STATIC int
+linvfs_mkdir(
+	struct inode	*dir,
+	struct dentry	*dentry,
+	int		mode)
+{
+	return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0);
+}
+
+STATIC struct dentry *
+linvfs_lookup(
+	struct inode	*dir,
+	struct dentry	*dentry,
+	struct nameidata *nd)
+{
+	struct vnode	*vp = LINVFS_GET_VP(dir), *cvp;
+	int		error;
+
+	if (dentry->d_name.len >= MAXNAMELEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error);
+	if (error) {
+		if (unlikely(error != ENOENT))
+			return ERR_PTR(-error);
+		d_add(dentry, NULL);
+		return NULL;
+	}
+
+	return d_splice_alias(LINVFS_GET_IP(cvp), dentry);
+}
+
+STATIC int
+linvfs_link(
+	struct dentry	*old_dentry,
+	struct inode	*dir,
+	struct dentry	*dentry)
+{
+	struct inode	*ip;	/* inode of guy being linked to */
+	vnode_t		*tdvp;	/* target directory for new name/link */
+	vnode_t		*vp;	/* vp of name being linked */
+	int		error;
+
+	ip = old_dentry->d_inode;	/* inode being linked to */
+	if (S_ISDIR(ip->i_mode))
+		return -EPERM;
+
+	tdvp = LINVFS_GET_VP(dir);
+	vp = LINVFS_GET_VP(ip);
+
+	VOP_LINK(tdvp, vp, dentry, NULL, error);
+	if (!error) {
+		VMODIFY(tdvp);
+		VN_HOLD(vp);
+		validate_fields(ip);
+		d_instantiate(dentry, ip);
+	}
+	return -error;
+}
+
+STATIC int
+linvfs_unlink(
+	struct inode	*dir,
+	struct dentry	*dentry)
+{
+	struct inode	*inode;
+	vnode_t		*dvp;	/* directory containing name to remove */
+	int		error;
+
+	inode = dentry->d_inode;
+	dvp = LINVFS_GET_VP(dir);
+
+	VOP_REMOVE(dvp, dentry, NULL, error);
+	if (!error) {
+		validate_fields(dir);	/* For size only */
+		validate_fields(inode);
+	}
+
+	return -error;
+}
+
+STATIC int
+linvfs_symlink(
+	struct inode	*dir,
+	struct dentry	*dentry,
+	const char	*symname)
+{
+	struct inode	*ip;
+	vattr_t		va;
+	vnode_t		*dvp;	/* directory containing name of symlink */
+	vnode_t		*cvp;	/* used to lookup symlink to put in dentry */
+	int		error;
+
+	dvp = LINVFS_GET_VP(dir);
+	cvp = NULL;
+
+	memset(&va, 0, sizeof(va));
+	va.va_type = VLNK;
+	va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO;
+	va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
+
+	error = 0;
+	VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
+	if (!error && cvp) {
+		ASSERT(cvp->v_type == VLNK);
+		ip = LINVFS_GET_IP(cvp);
+		d_instantiate(dentry, ip);
+		validate_fields(dir);
+		validate_fields(ip); /* size needs update */
+	}
+	return -error;
+}
+
+STATIC int
+linvfs_rmdir(
+	struct inode	*dir,
+	struct dentry	*dentry)
+{
+	struct inode	*inode = dentry->d_inode;
+	vnode_t		*dvp = LINVFS_GET_VP(dir);
+	int		error;
+
+	VOP_RMDIR(dvp, dentry, NULL, error);
+	if (!error) {
+		validate_fields(inode);
+		validate_fields(dir);
+	}
+	return -error;
+}
+
+STATIC int
+linvfs_rename(
+	struct inode	*odir,
+	struct dentry	*odentry,
+	struct inode	*ndir,
+	struct dentry	*ndentry)
+{
+	struct inode	*new_inode = ndentry->d_inode;
+	vnode_t		*fvp;	/* from directory */
+	vnode_t		*tvp;	/* target directory */
+	int		error;
+
+	fvp = LINVFS_GET_VP(odir);
+	tvp = LINVFS_GET_VP(ndir);
+
+	VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error);
+	if (error)
+		return -error;
+
+	if (new_inode)
+		validate_fields(new_inode);
+
+	validate_fields(odir);
+	if (ndir != odir)
+		validate_fields(ndir);
+	return 0;
+}
+
+/*
+ * careful here - this function can get called recursively, so
+ * we need to be very careful about how much stack we use.
+ * uio is kmalloced for this reason...
+ */
+STATIC int
+linvfs_follow_link(
+	struct dentry		*dentry,
+	struct nameidata	*nd)
+{
+	vnode_t			*vp;
+	uio_t			*uio;
+	iovec_t			iov;
+	int			error;
+	char			*link;
+
+	ASSERT(dentry);
+	ASSERT(nd);
+
+	link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL);
+	if (!link) {
+		nd_set_link(nd, ERR_PTR(-ENOMEM));
+		return 0;
+	}
+
+	uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL);
+	if (!uio) {
+		kfree(link);
+		nd_set_link(nd, ERR_PTR(-ENOMEM));
+		return 0;
+	}
+
+	vp = LINVFS_GET_VP(dentry->d_inode);
+
+	iov.iov_base = link;
+	iov.iov_len = MAXNAMELEN;
+
+	uio->uio_iov = &iov;
+	uio->uio_offset = 0;
+	uio->uio_segflg = UIO_SYSSPACE;
+	uio->uio_resid = MAXNAMELEN;
+	uio->uio_iovcnt = 1;
+
+	VOP_READLINK(vp, uio, 0, NULL, error);
+	if (error) {
+		kfree(link);
+		link = ERR_PTR(-error);
+	} else {
+		link[MAXNAMELEN - uio->uio_resid] = '\0';
+	}
+	kfree(uio);
+
+	nd_set_link(nd, link);
+	return 0;
+}
+
+static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s))
+		kfree(s);
+}
+
+#ifdef CONFIG_XFS_POSIX_ACL
+STATIC int
+linvfs_permission(
+	struct inode	*inode,
+	int		mode,
+	struct nameidata *nd)
+{
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	int		error;
+
+	mode <<= 6;		/* convert from linux to vnode access bits */
+	VOP_ACCESS(vp, mode, NULL, error);
+	return -error;
+}
+#else
+#define linvfs_permission NULL
+#endif
+
+STATIC int
+linvfs_getattr(
+	struct vfsmount	*mnt,
+	struct dentry	*dentry,
+	struct kstat	*stat)
+{
+	struct inode	*inode = dentry->d_inode;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	int		error = 0;
+
+	if (unlikely(vp->v_flag & VMODIFIED))
+		error = vn_revalidate(vp);
+	if (!error)
+		generic_fillattr(inode, stat);
+	return 0;
+}
+
+STATIC int
+linvfs_setattr(
+	struct dentry	*dentry,
+	struct iattr	*attr)
+{
+	struct inode	*inode = dentry->d_inode;
+	unsigned int	ia_valid = attr->ia_valid;
+	vnode_t		*vp = LINVFS_GET_VP(inode);
+	vattr_t		vattr;
+	int		flags = 0;
+	int		error;
+
+	memset(&vattr, 0, sizeof(vattr_t));
+	if (ia_valid & ATTR_UID) {
+		vattr.va_mask |= XFS_AT_UID;
+		vattr.va_uid = attr->ia_uid;
+	}
+	if (ia_valid & ATTR_GID) {
+		vattr.va_mask |= XFS_AT_GID;
+		vattr.va_gid = attr->ia_gid;
+	}
+	if (ia_valid & ATTR_SIZE) {
+		vattr.va_mask |= XFS_AT_SIZE;
+		vattr.va_size = attr->ia_size;
+	}
+	if (ia_valid & ATTR_ATIME) {
+		vattr.va_mask |= XFS_AT_ATIME;
+		vattr.va_atime = attr->ia_atime;
+	}
+	if (ia_valid & ATTR_MTIME) {
+		vattr.va_mask |= XFS_AT_MTIME;
+		vattr.va_mtime = attr->ia_mtime;
+	}
+	if (ia_valid & ATTR_CTIME) {
+		vattr.va_mask |= XFS_AT_CTIME;
+		vattr.va_ctime = attr->ia_ctime;
+	}
+	if (ia_valid & ATTR_MODE) {
+		vattr.va_mask |= XFS_AT_MODE;
+		vattr.va_mode = attr->ia_mode;
+		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+			inode->i_mode &= ~S_ISGID;
+	}
+
+	if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET))
+		flags |= ATTR_UTIME;
+#ifdef ATTR_NO_BLOCK
+	if ((ia_valid & ATTR_NO_BLOCK))
+		flags |= ATTR_NONBLOCK;
+#endif
+
+	VOP_SETATTR(vp, &vattr, flags, NULL, error);
+	if (error)
+		return -error;
+	vn_revalidate(vp);
+	return error;
+}
+
+STATIC void
+linvfs_truncate(
+	struct inode	*inode)
+{
+	block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block);
+}
+
+STATIC int
+linvfs_setxattr(
+	struct dentry	*dentry,
+	const char	*name,
+	const void	*data,
+	size_t		size,
+	int		flags)
+{
+	vnode_t		*vp = LINVFS_GET_VP(dentry->d_inode);
+	char		*attr = (char *)name;
+	attrnames_t	*namesp;
+	int		xflags = 0;
+	int		error;
+
+	namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	attr += namesp->attr_namelen;
+	error = namesp->attr_capable(vp, NULL);
+	if (error)
+		return error;
+
+	/* Convert Linux syscall to XFS internal ATTR flags */
+	if (flags & XATTR_CREATE)
+		xflags |= ATTR_CREATE;
+	if (flags & XATTR_REPLACE)
+		xflags |= ATTR_REPLACE;
+	xflags |= namesp->attr_flag;
+	return namesp->attr_set(vp, attr, (void *)data, size, xflags);
+}
+
+STATIC ssize_t
+linvfs_getxattr(
+	struct dentry	*dentry,
+	const char	*name,
+	void		*data,
+	size_t		size)
+{
+	vnode_t		*vp = LINVFS_GET_VP(dentry->d_inode);
+	char		*attr = (char *)name;
+	attrnames_t	*namesp;
+	int		xflags = 0;
+	ssize_t		error;
+
+	namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	attr += namesp->attr_namelen;
+	error = namesp->attr_capable(vp, NULL);
+	if (error)
+		return error;
+
+	/* Convert Linux syscall to XFS internal ATTR flags */
+	if (!size) {
+		xflags |= ATTR_KERNOVAL;
+		data = NULL;
+	}
+	xflags |= namesp->attr_flag;
+	return namesp->attr_get(vp, attr, (void *)data, size, xflags);
+}
+
+STATIC ssize_t
+linvfs_listxattr(
+	struct dentry		*dentry,
+	char			*data,
+	size_t			size)
+{
+	vnode_t			*vp = LINVFS_GET_VP(dentry->d_inode);
+	int			error, xflags = ATTR_KERNAMELS;
+	ssize_t			result;
+
+	if (!size)
+		xflags |= ATTR_KERNOVAL;
+	xflags |= capable(CAP_SYS_ADMIN) ? ATTR_KERNFULLS : ATTR_KERNORMALS;
+
+	error = attr_generic_list(vp, data, size, xflags, &result);
+	if (error < 0)
+		return error;
+	return result;
+}
+
+STATIC int
+linvfs_removexattr(
+	struct dentry	*dentry,
+	const char	*name)
+{
+	vnode_t		*vp = LINVFS_GET_VP(dentry->d_inode);
+	char		*attr = (char *)name;
+	attrnames_t	*namesp;
+	int		xflags = 0;
+	int		error;
+
+	namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	attr += namesp->attr_namelen;
+	error = namesp->attr_capable(vp, NULL);
+	if (error)
+		return error;
+	xflags |= namesp->attr_flag;
+	return namesp->attr_remove(vp, attr, xflags);
+}
+
+
+struct inode_operations linvfs_file_inode_operations = {
+	.permission		= linvfs_permission,
+	.truncate		= linvfs_truncate,
+	.getattr		= linvfs_getattr,
+	.setattr		= linvfs_setattr,
+	.setxattr		= linvfs_setxattr,
+	.getxattr		= linvfs_getxattr,
+	.listxattr		= linvfs_listxattr,
+	.removexattr		= linvfs_removexattr,
+};
+
+struct inode_operations linvfs_dir_inode_operations = {
+	.create			= linvfs_create,
+	.lookup			= linvfs_lookup,
+	.link			= linvfs_link,
+	.unlink			= linvfs_unlink,
+	.symlink		= linvfs_symlink,
+	.mkdir			= linvfs_mkdir,
+	.rmdir			= linvfs_rmdir,
+	.mknod			= linvfs_mknod,
+	.rename			= linvfs_rename,
+	.permission		= linvfs_permission,
+	.getattr		= linvfs_getattr,
+	.setattr		= linvfs_setattr,
+	.setxattr		= linvfs_setxattr,
+	.getxattr		= linvfs_getxattr,
+	.listxattr		= linvfs_listxattr,
+	.removexattr		= linvfs_removexattr,
+};
+
+struct inode_operations linvfs_symlink_inode_operations = {
+	.readlink		= generic_readlink,
+	.follow_link		= linvfs_follow_link,
+	.put_link		= linvfs_put_link,
+	.permission		= linvfs_permission,
+	.getattr		= linvfs_getattr,
+	.setattr		= linvfs_setattr,
+	.setxattr		= linvfs_setxattr,
+	.getxattr		= linvfs_getxattr,
+	.listxattr		= linvfs_listxattr,
+	.removexattr		= linvfs_removexattr,
+};
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
new file mode 100644
index 0000000..6a69a62
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_IOPS_H__
+#define __XFS_IOPS_H__
+
+extern struct inode_operations linvfs_file_inode_operations;
+extern struct inode_operations linvfs_dir_inode_operations;
+extern struct inode_operations linvfs_symlink_inode_operations;
+
+extern struct file_operations linvfs_file_operations;
+extern struct file_operations linvfs_invis_file_operations;
+extern struct file_operations linvfs_dir_operations;
+
+extern struct address_space_operations linvfs_aops;
+
+extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+extern void linvfs_unwritten_done(struct buffer_head *, int);
+
+extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
+                        int, unsigned int, void __user *);
+
+#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
new file mode 100644
index 0000000..71bb410
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_LINUX__
+#define __XFS_LINUX__
+
+#include <linux/types.h>
+#include <linux/config.h>
+
+/*
+ * Some types are conditional depending on the target system.
+ * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits.
+ * XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well
+ * as requiring XFS_BIG_BLKNOS to be set.
+ */
+#if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
+# define XFS_BIG_BLKNOS	1
+# if BITS_PER_LONG == 64
+#  define XFS_BIG_INUMS	1
+# else
+#  define XFS_BIG_INUMS	0
+# endif
+#else
+# define XFS_BIG_BLKNOS	0
+# define XFS_BIG_INUMS	0
+#endif
+
+#include <xfs_types.h>
+#include <xfs_arch.h>
+
+#include <kmem.h>
+#include <mrlock.h>
+#include <spin.h>
+#include <sv.h>
+#include <mutex.h>
+#include <sema.h>
+#include <time.h>
+
+#include <support/qsort.h>
+#include <support/ktrace.h>
+#include <support/debug.h>
+#include <support/move.h>
+#include <support/uuid.h>
+
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/major.h>
+#include <linux/pagemap.h>
+#include <linux/vfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/sort.h>
+
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <asm/param.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <xfs_behavior.h>
+#include <xfs_vfs.h>
+#include <xfs_cred.h>
+#include <xfs_vnode.h>
+#include <xfs_stats.h>
+#include <xfs_sysctl.h>
+#include <xfs_iops.h>
+#include <xfs_super.h>
+#include <xfs_globals.h>
+#include <xfs_fs_subr.h>
+#include <xfs_lrw.h>
+#include <xfs_buf.h>
+
+/*
+ * Feature macros (disable/enable)
+ */
+#undef  HAVE_REFCACHE	/* reference cache not needed for NFS in 2.6 */
+#define HAVE_SENDFILE	/* sendfile(2) exists in 2.6, but not in 2.4 */
+
+/*
+ * State flag for unwritten extent buffers.
+ *
+ * We need to be able to distinguish between these and delayed
+ * allocate buffers within XFS.  The generic IO path code does
+ * not need to distinguish - we use the BH_Delay flag for both
+ * delalloc and these ondisk-uninitialised buffers.
+ */
+BUFFER_FNS(PrivateStart, unwritten);
+static inline void set_buffer_unwritten_io(struct buffer_head *bh)
+{
+	bh->b_end_io = linvfs_unwritten_done;
+}
+
+#define restricted_chown	xfs_params.restrict_chown.val
+#define irix_sgid_inherit	xfs_params.sgid_inherit.val
+#define irix_symlink_mode	xfs_params.symlink_mode.val
+#define xfs_panic_mask		xfs_params.panic_mask.val
+#define xfs_error_level		xfs_params.error_level.val
+#define xfs_syncd_centisecs	xfs_params.syncd_timer.val
+#define xfs_stats_clear		xfs_params.stats_clear.val
+#define xfs_inherit_sync	xfs_params.inherit_sync.val
+#define xfs_inherit_nodump	xfs_params.inherit_nodump.val
+#define xfs_inherit_noatime	xfs_params.inherit_noatim.val
+#define xfs_buf_timer_centisecs	xfs_params.xfs_buf_timer.val
+#define xfs_buf_age_centisecs	xfs_params.xfs_buf_age.val
+#define xfs_inherit_nosymlinks	xfs_params.inherit_nosym.val
+#define xfs_rotorstep		xfs_params.rotorstep.val
+
+#ifndef __smp_processor_id
+#define __smp_processor_id()	smp_processor_id()
+#endif
+#define current_cpu()		__smp_processor_id()
+#define current_pid()		(current->pid)
+#define current_fsuid(cred)	(current->fsuid)
+#define current_fsgid(cred)	(current->fsgid)
+
+#define NBPP		PAGE_SIZE
+#define DPPSHFT		(PAGE_SHIFT - 9)
+#define NDPP		(1 << (PAGE_SHIFT - 9))
+#define dtop(DD)	(((DD) + NDPP - 1) >> DPPSHFT)
+#define dtopt(DD)	((DD) >> DPPSHFT)
+#define dpoff(DD)	((DD) & (NDPP-1))
+
+#define NBBY		8		/* number of bits per byte */
+#define	NBPC		PAGE_SIZE	/* Number of bytes per click */
+#define	BPCSHIFT	PAGE_SHIFT	/* LOG2(NBPC) if exact */
+
+/*
+ * Size of block device i/o is parameterized here.
+ * Currently the system supports page-sized i/o.
+ */
+#define	BLKDEV_IOSHIFT		BPCSHIFT
+#define	BLKDEV_IOSIZE		(1<<BLKDEV_IOSHIFT)
+/* number of BB's per block device block */
+#define	BLKDEV_BB		BTOBB(BLKDEV_IOSIZE)
+
+/* bytes to clicks */
+#define	btoc(x)		(((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define	btoct(x)	((__psunsigned_t)(x)>>BPCSHIFT)
+#define	btoc64(x)	(((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define	btoct64(x)	((__uint64_t)(x)>>BPCSHIFT)
+#define	io_btoc(x)	(((__psunsigned_t)(x)+(IO_NBPC-1))>>IO_BPCSHIFT)
+#define	io_btoct(x)	((__psunsigned_t)(x)>>IO_BPCSHIFT)
+
+/* off_t bytes to clicks */
+#define offtoc(x)       (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define offtoct(x)      ((xfs_off_t)(x)>>BPCSHIFT)
+
+/* clicks to off_t bytes */
+#define	ctooff(x)	((xfs_off_t)(x)<<BPCSHIFT)
+
+/* clicks to bytes */
+#define	ctob(x)		((__psunsigned_t)(x)<<BPCSHIFT)
+#define btoct(x)        ((__psunsigned_t)(x)>>BPCSHIFT)
+#define	ctob64(x)	((__uint64_t)(x)<<BPCSHIFT)
+#define	io_ctob(x)	((__psunsigned_t)(x)<<IO_BPCSHIFT)
+
+/* bytes to clicks */
+#define btoc(x)         (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
+
+#ifndef CELL_CAPABLE
+#define FSC_NOTIFY_NAME_CHANGED(vp)
+#endif
+
+#ifndef ENOATTR
+#define ENOATTR		ENODATA		/* Attribute not found */
+#endif
+
+/* Note: EWRONGFS never visible outside the kernel */
+#define	EWRONGFS	EINVAL		/* Mount with wrong filesystem type */
+
+/*
+ * XXX EFSCORRUPTED needs a real value in errno.h. asm-i386/errno.h won't
+ *     return codes out of its known range in errno.
+ * XXX Also note: needs to be < 1000 and fairly unique on Linux (mustn't
+ *     conflict with any code we use already or any code a driver may use)
+ * XXX Some options (currently we do #2):
+ *	1/ New error code ["Filesystem is corrupted", _after_ glibc updated]
+ *	2/ 990 ["Unknown error 990"]
+ *	3/ EUCLEAN ["Structure needs cleaning"]
+ *	4/ Convert EFSCORRUPTED to EIO [just prior to return into userspace]
+ */
+#define EFSCORRUPTED    990		/* Filesystem is corrupted */
+
+#define SYNCHRONIZE()	barrier()
+#define __return_address __builtin_return_address(0)
+
+/*
+ * IRIX (BSD) quotactl makes use of separate commands for user/group,
+ * whereas on Linux the syscall encodes this information into the cmd
+ * field (see the QCMD macro in quota.h).  These macros help keep the
+ * code portable - they are not visible from the syscall interface.
+ */
+#define Q_XSETGQLIM	XQM_CMD(0x8)	/* set groups disk limits */
+#define Q_XGETGQUOTA	XQM_CMD(0x9)	/* get groups disk limits */
+
+/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */
+/* we may well need to fine-tune this if it ever becomes an issue.  */
+#define DQUOT_MAX_HEURISTIC	1024	/* NR_DQUOTS */
+#define ndquot			DQUOT_MAX_HEURISTIC
+
+/* IRIX uses the current size of the name cache to guess a good value */
+/* - this isn't the same but is a good enough starting point for now. */
+#define DQUOT_HASH_HEURISTIC	files_stat.nr_files
+
+/* IRIX inodes maintain the project ID also, zero this field on Linux */
+#define DEFAULT_PROJID	0
+#define dfltprid	DEFAULT_PROJID
+
+#define MAXPATHLEN	1024
+
+#define MIN(a,b)	(min(a,b))
+#define MAX(a,b)	(max(a,b))
+#define howmany(x, y)	(((x)+((y)-1))/(y))
+#define roundup(x, y)	((((x)+((y)-1))/(y))*(y))
+
+#define xfs_stack_trace()	dump_stack()
+
+#define xfs_itruncate_data(ip, off)	\
+	(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
+
+
+/* Move the kernel do_div definition off to one side */
+
+#if defined __i386__
+/* For ia32 we need to pull some tricks to get past various versions
+ * of the compiler which do not like us using do_div in the middle
+ * of large functions.
+ */
+static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+{
+	__u32	mod;
+
+	switch (n) {
+		case 4:
+			mod = *(__u32 *)a % b;
+			*(__u32 *)a = *(__u32 *)a / b;
+			return mod;
+		case 8:
+			{
+			unsigned long __upper, __low, __high, __mod;
+			__u64	c = *(__u64 *)a;
+			__upper = __high = c >> 32;
+			__low = c;
+			if (__high) {
+				__upper = __high % (b);
+				__high = __high / (b);
+			}
+			asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
+			asm("":"=A" (c):"a" (__low),"d" (__high));
+			*(__u64 *)a = c;
+			return __mod;
+			}
+	}
+
+	/* NOTREACHED */
+	return 0;
+}
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+{
+	switch (n) {
+		case 4:
+			return *(__u32 *)a % b;
+		case 8:
+			{
+			unsigned long __upper, __low, __high, __mod;
+			__u64	c = *(__u64 *)a;
+			__upper = __high = c >> 32;
+			__low = c;
+			if (__high) {
+				__upper = __high % (b);
+				__high = __high / (b);
+			}
+			asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
+			asm("":"=A" (c):"a" (__low),"d" (__high));
+			return __mod;
+			}
+	}
+
+	/* NOTREACHED */
+	return 0;
+}
+#else
+static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+{
+	__u32	mod;
+
+	switch (n) {
+		case 4:
+			mod = *(__u32 *)a % b;
+			*(__u32 *)a = *(__u32 *)a / b;
+			return mod;
+		case 8:
+			mod = do_div(*(__u64 *)a, b);
+			return mod;
+	}
+
+	/* NOTREACHED */
+	return 0;
+}
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+{
+	switch (n) {
+		case 4:
+			return *(__u32 *)a % b;
+		case 8:
+			{
+			__u64	c = *(__u64 *)a;
+			return do_div(c, b);
+			}
+	}
+
+	/* NOTREACHED */
+	return 0;
+}
+#endif
+
+#undef do_div
+#define do_div(a, b)	xfs_do_div(&(a), (b), sizeof(a))
+#define do_mod(a, b)	xfs_do_mod(&(a), (b), sizeof(a))
+
+static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
+{
+	x += y - 1;
+	do_div(x, y);
+	return(x * y);
+}
+
+#define qsort(a, n, s, cmp) sort(a, n, s, cmp, NULL)
+
+#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
new file mode 100644
index 0000000..ff145fd
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -0,0 +1,1082 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+/*
+ *  fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
+ *
+ */
+
+#include "xfs.h"
+
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_inode_item.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_iomap.h"
+
+#include <linux/capability.h>
+#include <linux/writeback.h>
+
+
+#if defined(XFS_RW_TRACE)
+void
+xfs_rw_enter_trace(
+	int			tag,
+	xfs_iocore_t		*io,
+	void			*data,
+	size_t			segs,
+	loff_t			offset,
+	int			ioflags)
+{
+	xfs_inode_t	*ip = XFS_IO_INODE(io);
+
+	if (ip->i_rwtrace == NULL)
+		return;
+	ktrace_enter(ip->i_rwtrace,
+		(void *)(unsigned long)tag,
+		(void *)ip,
+		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+		(void *)data,
+		(void *)((unsigned long)segs),
+		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(offset & 0xffffffff)),
+		(void *)((unsigned long)ioflags),
+		(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL);
+}
+
+void
+xfs_inval_cached_trace(
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	xfs_off_t	len,
+	xfs_off_t	first,
+	xfs_off_t	last)
+{
+	xfs_inode_t	*ip = XFS_IO_INODE(io);
+
+	if (ip->i_rwtrace == NULL)
+		return;
+	ktrace_enter(ip->i_rwtrace,
+		(void *)(__psint_t)XFS_INVAL_CACHED,
+		(void *)ip,
+		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(offset & 0xffffffff)),
+		(void *)((unsigned long)((len >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(len & 0xffffffff)),
+		(void *)((unsigned long)((first >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(first & 0xffffffff)),
+		(void *)((unsigned long)((last >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(last & 0xffffffff)),
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL);
+}
+#endif
+
+/*
+ *	xfs_iozero
+ *
+ *	xfs_iozero clears the specified range of buffer supplied,
+ *	and marks all the affected blocks as valid and modified.  If
+ *	an affected block is not allocated, it will be allocated.  If
+ *	an affected block is not completely overwritten, and is not
+ *	valid before the operation, it will be read from disk before
+ *	being partially zeroed.
+ */
+STATIC int
+xfs_iozero(
+	struct inode		*ip,	/* inode			*/
+	loff_t			pos,	/* offset in file		*/
+	size_t			count,	/* size of data to zero		*/
+	loff_t			end_size)	/* max file size to set */
+{
+	unsigned		bytes;
+	struct page		*page;
+	struct address_space	*mapping;
+	char			*kaddr;
+	int			status;
+
+	mapping = ip->i_mapping;
+	do {
+		unsigned long index, offset;
+
+		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
+		index = pos >> PAGE_CACHE_SHIFT;
+		bytes = PAGE_CACHE_SIZE - offset;
+		if (bytes > count)
+			bytes = count;
+
+		status = -ENOMEM;
+		page = grab_cache_page(mapping, index);
+		if (!page)
+			break;
+
+		kaddr = kmap(page);
+		status = mapping->a_ops->prepare_write(NULL, page, offset,
+							offset + bytes);
+		if (status) {
+			goto unlock;
+		}
+
+		memset((void *) (kaddr + offset), 0, bytes);
+		flush_dcache_page(page);
+		status = mapping->a_ops->commit_write(NULL, page, offset,
+							offset + bytes);
+		if (!status) {
+			pos += bytes;
+			count -= bytes;
+			if (pos > i_size_read(ip))
+				i_size_write(ip, pos < end_size ? pos : end_size);
+		}
+
+unlock:
+		kunmap(page);
+		unlock_page(page);
+		page_cache_release(page);
+		if (status)
+			break;
+	} while (count);
+
+	return (-status);
+}
+
+/*
+ * xfs_inval_cached_pages
+ * 
+ * This routine is responsible for keeping direct I/O and buffered I/O
+ * somewhat coherent.  From here we make sure that we're at least
+ * temporarily holding the inode I/O lock exclusively and then call
+ * the page cache to flush and invalidate any cached pages.  If there
+ * are no cached pages this routine will be very quick.
+ */
+void
+xfs_inval_cached_pages(
+	vnode_t		*vp,
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	int		write,
+	int		relock)
+{
+	if (VN_CACHED(vp)) {
+		xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
+		VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
+	}
+
+}
+
+ssize_t			/* bytes read, or (-)  error */
+xfs_read(
+	bhv_desc_t		*bdp,
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned int		segs,
+	loff_t			*offset,
+	int			ioflags,
+	cred_t			*credp)
+{
+	struct file		*file = iocb->ki_filp;
+	struct inode		*inode = file->f_mapping->host;
+	size_t			size = 0;
+	ssize_t			ret;
+	xfs_fsize_t		n;
+	xfs_inode_t		*ip;
+	xfs_mount_t		*mp;
+	vnode_t			*vp;
+	unsigned long		seg;
+
+	ip = XFS_BHVTOI(bdp);
+	vp = BHV_TO_VNODE(bdp);
+	mp = ip->i_mount;
+
+	XFS_STATS_INC(xs_read_calls);
+
+	/* START copy & waste from filemap.c */
+	for (seg = 0; seg < segs; seg++) {
+		const struct iovec *iv = &iovp[seg];
+
+		/*
+		 * If any segment has a negative length, or the cumulative
+		 * length ever wraps negative then return -EINVAL.
+		 */
+		size += iv->iov_len;
+		if (unlikely((ssize_t)(size|iv->iov_len) < 0))
+			return XFS_ERROR(-EINVAL);
+	}
+	/* END copy & waste from filemap.c */
+
+	if (unlikely(ioflags & IO_ISDIRECT)) {
+		xfs_buftarg_t	*target =
+			(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+				mp->m_rtdev_targp : mp->m_ddev_targp;
+		if ((*offset & target->pbr_smask) ||
+		    (size & target->pbr_smask)) {
+			if (*offset == ip->i_d.di_size) {
+				return (0);
+			}
+			return -XFS_ERROR(EINVAL);
+		}
+	}
+
+	n = XFS_MAXIOFFSET(mp) - *offset;
+	if ((n <= 0) || (size == 0))
+		return 0;
+
+	if (n < size)
+		size = n;
+
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		return -EIO;
+	}
+
+	if (unlikely(ioflags & IO_ISDIRECT))
+		down(&inode->i_sem);
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
+	    !(ioflags & IO_INVIS)) {
+		vrwlock_t locktype = VRWLOCK_READ;
+
+		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
+					BHV_TO_VNODE(bdp), *offset, size,
+					FILP_DELAY_FLAG(file), &locktype);
+		if (ret) {
+			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+			goto unlock_isem;
+		}
+	}
+
+	xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
+				(void *)iovp, segs, *offset, ioflags);
+	ret = __generic_file_aio_read(iocb, iovp, segs, offset);
+	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
+		ret = wait_on_sync_kiocb(iocb);
+	if (ret > 0)
+		XFS_STATS_ADD(xs_read_bytes, ret);
+
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+	if (likely(!(ioflags & IO_INVIS)))
+		xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
+
+unlock_isem:
+	if (unlikely(ioflags & IO_ISDIRECT))
+		up(&inode->i_sem);
+	return ret;
+}
+
+ssize_t
+xfs_sendfile(
+	bhv_desc_t		*bdp,
+	struct file		*filp,
+	loff_t			*offset,
+	int			ioflags,
+	size_t			count,
+	read_actor_t		actor,
+	void			*target,
+	cred_t			*credp)
+{
+	ssize_t			ret;
+	xfs_fsize_t		n;
+	xfs_inode_t		*ip;
+	xfs_mount_t		*mp;
+	vnode_t			*vp;
+
+	ip = XFS_BHVTOI(bdp);
+	vp = BHV_TO_VNODE(bdp);
+	mp = ip->i_mount;
+
+	XFS_STATS_INC(xs_read_calls);
+
+	n = XFS_MAXIOFFSET(mp) - *offset;
+	if ((n <= 0) || (count == 0))
+		return 0;
+
+	if (n < count)
+		count = n;
+
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return -EIO;
+
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
+	    (!(ioflags & IO_INVIS))) {
+		vrwlock_t locktype = VRWLOCK_READ;
+		int error;
+
+		error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
+				      FILP_DELAY_FLAG(filp), &locktype);
+		if (error) {
+			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+			return -error;
+		}
+	}
+	xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
+		   (void *)(unsigned long)target, count, *offset, ioflags);
+	ret = generic_file_sendfile(filp, offset, count, actor, target);
+
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+	if (ret > 0)
+		XFS_STATS_ADD(xs_read_bytes, ret);
+
+	if (likely(!(ioflags & IO_INVIS)))
+		xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
+
+	return ret;
+}
+
+/*
+ * This routine is called to handle zeroing any space in the last
+ * block of the file that is beyond the EOF.  We do this since the
+ * size is being increased without writing anything to that block
+ * and we don't want anyone to read the garbage on the disk.
+ */
+STATIC int				/* error (positive) */
+xfs_zero_last_block(
+	struct inode	*ip,
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	xfs_fsize_t	isize,
+	xfs_fsize_t	end_size)
+{
+	xfs_fileoff_t	last_fsb;
+	xfs_mount_t	*mp;
+	int		nimaps;
+	int		zero_offset;
+	int		zero_len;
+	int		isize_fsb_offset;
+	int		error = 0;
+	xfs_bmbt_irec_t	imap;
+	loff_t		loff;
+	size_t		lsize;
+
+	ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
+	ASSERT(offset > isize);
+
+	mp = io->io_mount;
+
+	isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
+	if (isize_fsb_offset == 0) {
+		/*
+		 * There are no extra bytes in the last block on disk to
+		 * zero, so return.
+		 */
+		return 0;
+	}
+
+	last_fsb = XFS_B_TO_FSBT(mp, isize);
+	nimaps = 1;
+	error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
+			  &nimaps, NULL);
+	if (error) {
+		return error;
+	}
+	ASSERT(nimaps > 0);
+	/*
+	 * If the block underlying isize is just a hole, then there
+	 * is nothing to zero.
+	 */
+	if (imap.br_startblock == HOLESTARTBLOCK) {
+		return 0;
+	}
+	/*
+	 * Zero the part of the last block beyond the EOF, and write it
+	 * out sync.  We need to drop the ilock while we do this so we
+	 * don't deadlock when the buffer cache calls back to us.
+	 */
+	XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
+	loff = XFS_FSB_TO_B(mp, last_fsb);
+	lsize = XFS_FSB_TO_B(mp, 1);
+
+	zero_offset = isize_fsb_offset;
+	zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
+
+	error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
+
+	XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+	ASSERT(error >= 0);
+	return error;
+}
+
+/*
+ * Zero any on disk space between the current EOF and the new,
+ * larger EOF.  This handles the normal case of zeroing the remainder
+ * of the last block in the file and the unusual case of zeroing blocks
+ * out beyond the size of the file.  This second case only happens
+ * with fixed size extents and when the system crashes before the inode
+ * size was updated but after blocks were allocated.  If fill is set,
+ * then any holes in the range are filled and zeroed.  If not, the holes
+ * are left alone as holes.
+ */
+
+int					/* error (positive) */
+xfs_zero_eof(
+	vnode_t		*vp,
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,		/* starting I/O offset */
+	xfs_fsize_t	isize,		/* current inode size */
+	xfs_fsize_t	end_size)	/* terminal inode size */
+{
+	struct inode	*ip = LINVFS_GET_IP(vp);
+	xfs_fileoff_t	start_zero_fsb;
+	xfs_fileoff_t	end_zero_fsb;
+	xfs_fileoff_t	prev_zero_fsb;
+	xfs_fileoff_t	zero_count_fsb;
+	xfs_fileoff_t	last_fsb;
+	xfs_extlen_t	buf_len_fsb;
+	xfs_extlen_t	prev_zero_count;
+	xfs_mount_t	*mp;
+	int		nimaps;
+	int		error = 0;
+	xfs_bmbt_irec_t	imap;
+	loff_t		loff;
+	size_t		lsize;
+
+	ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+	ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+
+	mp = io->io_mount;
+
+	/*
+	 * First handle zeroing the block on which isize resides.
+	 * We only zero a part of that block so it is handled specially.
+	 */
+	error = xfs_zero_last_block(ip, io, offset, isize, end_size);
+	if (error) {
+		ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+		ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+		return error;
+	}
+
+	/*
+	 * Calculate the range between the new size and the old
+	 * where blocks needing to be zeroed may exist.  To get the
+	 * block where the last byte in the file currently resides,
+	 * we need to subtract one from the size and truncate back
+	 * to a block boundary.  We subtract 1 in case the size is
+	 * exactly on a block boundary.
+	 */
+	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
+	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
+	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
+	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
+	if (last_fsb == end_zero_fsb) {
+		/*
+		 * The size was only incremented on its last block.
+		 * We took care of that above, so just return.
+		 */
+		return 0;
+	}
+
+	ASSERT(start_zero_fsb <= end_zero_fsb);
+	prev_zero_fsb = NULLFILEOFF;
+	prev_zero_count = 0;
+	while (start_zero_fsb <= end_zero_fsb) {
+		nimaps = 1;
+		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
+		error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
+				  0, NULL, 0, &imap, &nimaps, NULL);
+		if (error) {
+			ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+			ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+			return error;
+		}
+		ASSERT(nimaps > 0);
+
+		if (imap.br_state == XFS_EXT_UNWRITTEN ||
+		    imap.br_startblock == HOLESTARTBLOCK) {
+			/*
+			 * This loop handles initializing pages that were
+			 * partially initialized by the code below this
+			 * loop. It basically zeroes the part of the page
+			 * that sits on a hole and sets the page as P_HOLE
+			 * and calls remapf if it is a mapped file.
+			 */
+			prev_zero_fsb = NULLFILEOFF;
+			prev_zero_count = 0;
+			start_zero_fsb = imap.br_startoff +
+					 imap.br_blockcount;
+			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
+			continue;
+		}
+
+		/*
+		 * There are blocks in the range requested.
+		 * Zero them a single write at a time.  We actually
+		 * don't zero the entire range returned if it is
+		 * too big and simply loop around to get the rest.
+		 * That is not the most efficient thing to do, but it
+		 * is simple and this path should not be exercised often.
+		 */
+		buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
+					      mp->m_writeio_blocks << 8);
+		/*
+		 * Drop the inode lock while we're doing the I/O.
+		 * We'll still have the iolock to protect us.
+		 */
+		XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+
+		loff = XFS_FSB_TO_B(mp, start_zero_fsb);
+		lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
+
+		error = xfs_iozero(ip, loff, lsize, end_size);
+
+		if (error) {
+			goto out_lock;
+		}
+
+		prev_zero_fsb = start_zero_fsb;
+		prev_zero_count = buf_len_fsb;
+		start_zero_fsb = imap.br_startoff + buf_len_fsb;
+		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
+
+		XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+	}
+
+	return 0;
+
+out_lock:
+
+	XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+	ASSERT(error >= 0);
+	return error;
+}
+
+ssize_t				/* bytes written, or (-) error */
+xfs_write(
+	bhv_desc_t		*bdp,
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned int		nsegs,
+	loff_t			*offset,
+	int			ioflags,
+	cred_t			*credp)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	unsigned long		segs = nsegs;
+	xfs_inode_t		*xip;
+	xfs_mount_t		*mp;
+	ssize_t			ret = 0, error = 0;
+	xfs_fsize_t		isize, new_size;
+	xfs_iocore_t		*io;
+	vnode_t			*vp;
+	unsigned long		seg;
+	int			iolock;
+	int			eventsent = 0;
+	vrwlock_t		locktype;
+	size_t			ocount = 0, count;
+	loff_t			pos;
+	int			need_isem = 1, need_flush = 0;
+
+	XFS_STATS_INC(xs_write_calls);
+
+	vp = BHV_TO_VNODE(bdp);
+	xip = XFS_BHVTOI(bdp);
+
+	for (seg = 0; seg < segs; seg++) {
+		const struct iovec *iv = &iovp[seg];
+
+		/*
+		 * If any segment has a negative length, or the cumulative
+		 * length ever wraps negative then return -EINVAL.
+		 */
+		ocount += iv->iov_len;
+		if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
+			return -EINVAL;
+		if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+			continue;
+		if (seg == 0)
+			return -EFAULT;
+		segs = seg;
+		ocount -= iv->iov_len;  /* This segment is no good */
+		break;
+	}
+
+	count = ocount;
+	pos = *offset;
+
+	if (count == 0)
+		return 0;
+
+	io = &xip->i_iocore;
+	mp = io->io_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -EIO;
+
+	fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
+
+	if (ioflags & IO_ISDIRECT) {
+		xfs_buftarg_t	*target =
+			(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+				mp->m_rtdev_targp : mp->m_ddev_targp;
+
+		if ((pos & target->pbr_smask) || (count & target->pbr_smask))
+			return XFS_ERROR(-EINVAL);
+
+		if (!VN_CACHED(vp) && pos < i_size_read(inode))
+			need_isem = 0;
+
+		if (VN_CACHED(vp))
+			need_flush = 1;
+	}
+
+relock:
+	if (need_isem) {
+		iolock = XFS_IOLOCK_EXCL;
+		locktype = VRWLOCK_WRITE;
+
+		down(&inode->i_sem);
+	} else {
+		iolock = XFS_IOLOCK_SHARED;
+		locktype = VRWLOCK_WRITE_DIRECT;
+	}
+
+	xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
+
+	isize = i_size_read(inode);
+
+	if (file->f_flags & O_APPEND)
+		*offset = isize;
+
+start:
+	error = -generic_write_checks(file, &pos, &count,
+					S_ISBLK(inode->i_mode));
+	if (error) {
+		xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
+		goto out_unlock_isem;
+	}
+
+	new_size = pos + count;
+	if (new_size > isize)
+		io->io_new_size = new_size;
+
+	if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
+	    !(ioflags & IO_INVIS) && !eventsent)) {
+		loff_t		savedsize = pos;
+		int		dmflags = FILP_DELAY_FLAG(file);
+
+		if (need_isem)
+			dmflags |= DM_FLAGS_ISEM;
+
+		xfs_iunlock(xip, XFS_ILOCK_EXCL);
+		error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
+				      pos, count,
+				      dmflags, &locktype);
+		if (error) {
+			xfs_iunlock(xip, iolock);
+			goto out_unlock_isem;
+		}
+		xfs_ilock(xip, XFS_ILOCK_EXCL);
+		eventsent = 1;
+
+		/*
+		 * The iolock was dropped and reaquired in XFS_SEND_DATA
+		 * so we have to recheck the size when appending.
+		 * We will only "goto start;" once, since having sent the
+		 * event prevents another call to XFS_SEND_DATA, which is
+		 * what allows the size to change in the first place.
+		 */
+		if ((file->f_flags & O_APPEND) && savedsize != isize) {
+			pos = isize = xip->i_d.di_size;
+			goto start;
+		}
+	}
+
+	/*
+	 * On Linux, generic_file_write updates the times even if
+	 * no data is copied in so long as the write had a size.
+	 *
+	 * We must update xfs' times since revalidate will overcopy xfs.
+	 */
+	if (!(ioflags & IO_INVIS)) {
+		xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+		inode_update_time(inode, 1);
+	}
+
+	/*
+	 * If the offset is beyond the size of the file, we have a couple
+	 * of things to do. First, if there is already space allocated
+	 * we need to either create holes or zero the disk or ...
+	 *
+	 * If there is a page where the previous size lands, we need
+	 * to zero it out up to the new size.
+	 */
+
+	if (pos > isize) {
+		error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
+					isize, pos + count);
+		if (error) {
+			xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
+			goto out_unlock_isem;
+		}
+	}
+	xfs_iunlock(xip, XFS_ILOCK_EXCL);
+
+	/*
+	 * If we're writing the file then make sure to clear the
+	 * setuid and setgid bits if the process is not being run
+	 * by root.  This keeps people from modifying setuid and
+	 * setgid binaries.
+	 */
+
+	if (((xip->i_d.di_mode & S_ISUID) ||
+	    ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
+		(S_ISGID | S_IXGRP))) &&
+	     !capable(CAP_FSETID)) {
+		error = xfs_write_clear_setuid(xip);
+		if (likely(!error))
+			error = -remove_suid(file->f_dentry);
+		if (unlikely(error)) {
+			xfs_iunlock(xip, iolock);
+			goto out_unlock_isem;
+		}
+	}
+
+retry:
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+	if ((ioflags & IO_ISDIRECT)) {
+		if (need_flush) {
+			xfs_inval_cached_trace(io, pos, -1,
+					ctooff(offtoct(pos)), -1);
+			VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
+					-1, FI_REMAPF_LOCKED);
+		}
+
+		if (need_isem) {
+			/* demote the lock now the cached pages are gone */
+			XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
+			up(&inode->i_sem);
+
+			iolock = XFS_IOLOCK_SHARED;
+			locktype = VRWLOCK_WRITE_DIRECT;
+			need_isem = 0;
+		}
+
+ 		xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
+				*offset, ioflags);
+		ret = generic_file_direct_write(iocb, iovp,
+				&segs, pos, offset, count, ocount);
+
+		/*
+		 * direct-io write to a hole: fall through to buffered I/O
+		 * for completing the rest of the request.
+		 */
+		if (ret >= 0 && ret != count) {
+			XFS_STATS_ADD(xs_write_bytes, ret);
+
+			pos += ret;
+			count -= ret;
+
+			need_isem = 1;
+			ioflags &= ~IO_ISDIRECT;
+			xfs_iunlock(xip, iolock);
+			goto relock;
+		}
+	} else {
+		xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
+				*offset, ioflags);
+		ret = generic_file_buffered_write(iocb, iovp, segs,
+				pos, offset, count, ret);
+	}
+
+	current->backing_dev_info = NULL;
+
+	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
+		ret = wait_on_sync_kiocb(iocb);
+
+	if ((ret == -ENOSPC) &&
+	    DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
+	    !(ioflags & IO_INVIS)) {
+
+		xfs_rwunlock(bdp, locktype);
+		error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
+				DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
+				0, 0, 0); /* Delay flag intentionally  unused */
+		if (error)
+			goto out_unlock_isem;
+		xfs_rwlock(bdp, locktype);
+		pos = xip->i_d.di_size;
+		ret = 0;
+		goto retry;
+	}
+
+	if (*offset > xip->i_d.di_size) {
+		xfs_ilock(xip, XFS_ILOCK_EXCL);
+		if (*offset > xip->i_d.di_size) {
+			xip->i_d.di_size = *offset;
+			i_size_write(inode, *offset);
+			xip->i_update_core = 1;
+			xip->i_update_size = 1;
+		}
+		xfs_iunlock(xip, XFS_ILOCK_EXCL);
+	}
+
+	error = -ret;
+	if (ret <= 0)
+		goto out_unlock_internal;
+
+	XFS_STATS_ADD(xs_write_bytes, ret);
+
+	/* Handle various SYNC-type writes */
+	if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
+		/*
+		 * If we're treating this as O_DSYNC and we have not updated the
+		 * size, force the log.
+		 */
+		if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
+		    !(xip->i_update_size)) {
+			xfs_inode_log_item_t	*iip = xip->i_itemp;
+
+			/*
+			 * If an allocation transaction occurred
+			 * without extending the size, then we have to force
+			 * the log up the proper point to ensure that the
+			 * allocation is permanent.  We can't count on
+			 * the fact that buffered writes lock out direct I/O
+			 * writes - the direct I/O write could have extended
+			 * the size nontransactionally, then finished before
+			 * we started.  xfs_write_file will think that the file
+			 * didn't grow but the update isn't safe unless the
+			 * size change is logged.
+			 *
+			 * Force the log if we've committed a transaction
+			 * against the inode or if someone else has and
+			 * the commit record hasn't gone to disk (e.g.
+			 * the inode is pinned).  This guarantees that
+			 * all changes affecting the inode are permanent
+			 * when we return.
+			 */
+			if (iip && iip->ili_last_lsn) {
+				xfs_log_force(mp, iip->ili_last_lsn,
+						XFS_LOG_FORCE | XFS_LOG_SYNC);
+			} else if (xfs_ipincount(xip) > 0) {
+				xfs_log_force(mp, (xfs_lsn_t)0,
+						XFS_LOG_FORCE | XFS_LOG_SYNC);
+			}
+
+		} else {
+			xfs_trans_t	*tp;
+
+			/*
+			 * O_SYNC or O_DSYNC _with_ a size update are handled
+			 * the same way.
+			 *
+			 * If the write was synchronous then we need to make
+			 * sure that the inode modification time is permanent.
+			 * We'll have updated the timestamp above, so here
+			 * we use a synchronous transaction to log the inode.
+			 * It's not fast, but it's necessary.
+			 *
+			 * If this a dsync write and the size got changed
+			 * non-transactionally, then we need to ensure that
+			 * the size change gets logged in a synchronous
+			 * transaction.
+			 */
+
+			tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
+			if ((error = xfs_trans_reserve(tp, 0,
+						      XFS_SWRITE_LOG_RES(mp),
+						      0, 0, 0))) {
+				/* Transaction reserve failed */
+				xfs_trans_cancel(tp, 0);
+			} else {
+				/* Transaction reserve successful */
+				xfs_ilock(xip, XFS_ILOCK_EXCL);
+				xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
+				xfs_trans_ihold(tp, xip);
+				xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
+				xfs_trans_set_sync(tp);
+				error = xfs_trans_commit(tp, 0, NULL);
+				xfs_iunlock(xip, XFS_ILOCK_EXCL);
+			}
+			if (error)
+				goto out_unlock_internal;
+		}
+	
+		xfs_rwunlock(bdp, locktype);
+		if (need_isem)
+			up(&inode->i_sem);
+
+		error = sync_page_range(inode, mapping, pos, ret);
+		if (!error)
+			error = ret;
+		return error;
+	}
+
+ out_unlock_internal:
+	xfs_rwunlock(bdp, locktype);
+ out_unlock_isem:
+	if (need_isem)
+		up(&inode->i_sem);
+	return -error;
+}
+
+/*
+ * All xfs metadata buffers except log state machine buffers
+ * get this attached as their b_bdstrat callback function.
+ * This is so that we can catch a buffer
+ * after prematurely unpinning it to forcibly shutdown the filesystem.
+ */
+int
+xfs_bdstrat_cb(struct xfs_buf *bp)
+{
+	xfs_mount_t	*mp;
+
+	mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
+	if (!XFS_FORCED_SHUTDOWN(mp)) {
+		pagebuf_iorequest(bp);
+		return 0;
+	} else {
+		xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
+		/*
+		 * Metadata write that didn't get logged but
+		 * written delayed anyway. These aren't associated
+		 * with a transaction, and can be ignored.
+		 */
+		if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
+		    (XFS_BUF_ISREAD(bp)) == 0)
+			return (xfs_bioerror_relse(bp));
+		else
+			return (xfs_bioerror(bp));
+	}
+}
+
+
+int
+xfs_bmap(bhv_desc_t	*bdp,
+	xfs_off_t	offset,
+	ssize_t		count,
+	int		flags,
+	xfs_iomap_t	*iomapp,
+	int		*niomaps)
+{
+	xfs_inode_t	*ip = XFS_BHVTOI(bdp);
+	xfs_iocore_t	*io = &ip->i_iocore;
+
+	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
+	ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
+	       ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
+
+	return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
+}
+
+/*
+ * Wrapper around bdstrat so that we can stop data
+ * from going to disk in case we are shutting down the filesystem.
+ * Typically user data goes thru this path; one of the exceptions
+ * is the superblock.
+ */
+int
+xfsbdstrat(
+	struct xfs_mount	*mp,
+	struct xfs_buf		*bp)
+{
+	ASSERT(mp);
+	if (!XFS_FORCED_SHUTDOWN(mp)) {
+		/* Grio redirection would go here
+		 * if (XFS_BUF_IS_GRIO(bp)) {
+		 */
+
+		pagebuf_iorequest(bp);
+		return 0;
+	}
+
+	xfs_buftrace("XFSBDSTRAT IOERROR", bp);
+	return (xfs_bioerror_relse(bp));
+}
+
+/*
+ * If the underlying (data/log/rt) device is readonly, there are some
+ * operations that cannot proceed.
+ */
+int
+xfs_dev_is_read_only(
+	xfs_mount_t		*mp,
+	char			*message)
+{
+	if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
+	    xfs_readonly_buftarg(mp->m_logdev_targp) ||
+	    (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
+		cmn_err(CE_NOTE,
+			"XFS: %s required on read-only device.", message);
+		cmn_err(CE_NOTE,
+			"XFS: write access unavailable, cannot proceed.");
+		return EROFS;
+	}
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
new file mode 100644
index 0000000..d723e35
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_LRW_H__
+#define __XFS_LRW_H__
+
+struct vnode;
+struct bhv_desc;
+struct xfs_mount;
+struct xfs_iocore;
+struct xfs_inode;
+struct xfs_bmbt_irec;
+struct xfs_buf;
+struct xfs_iomap;
+
+#if defined(XFS_RW_TRACE)
+/*
+ * Defines for the trace mechanisms in xfs_lrw.c.
+ */
+#define	XFS_RW_KTRACE_SIZE	128
+
+#define	XFS_READ_ENTER		1
+#define	XFS_WRITE_ENTER		2
+#define XFS_IOMAP_READ_ENTER	3
+#define	XFS_IOMAP_WRITE_ENTER	4
+#define	XFS_IOMAP_READ_MAP	5
+#define	XFS_IOMAP_WRITE_MAP	6
+#define	XFS_IOMAP_WRITE_NOSPACE	7
+#define	XFS_ITRUNC_START	8
+#define	XFS_ITRUNC_FINISH1	9
+#define	XFS_ITRUNC_FINISH2	10
+#define	XFS_CTRUNC1		11
+#define	XFS_CTRUNC2		12
+#define	XFS_CTRUNC3		13
+#define	XFS_CTRUNC4		14
+#define	XFS_CTRUNC5		15
+#define	XFS_CTRUNC6		16
+#define	XFS_BUNMAPI		17
+#define	XFS_INVAL_CACHED	18
+#define	XFS_DIORD_ENTER		19
+#define	XFS_DIOWR_ENTER		20
+#define	XFS_SENDFILE_ENTER	21
+#define	XFS_WRITEPAGE_ENTER	22
+#define	XFS_RELEASEPAGE_ENTER	23
+#define	XFS_IOMAP_ALLOC_ENTER	24
+#define	XFS_IOMAP_ALLOC_MAP	25
+#define	XFS_IOMAP_UNWRITTEN	26
+extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
+				void *, size_t, loff_t, int);
+extern void xfs_inval_cached_trace(struct xfs_iocore *,
+				xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
+#else
+#define xfs_rw_enter_trace(tag, io, data, size, offset, ioflags)
+#define xfs_inval_cached_trace(io, offset, len, first, last)
+#endif
+
+/*
+ * Maximum count of bmaps used by read and write paths.
+ */
+#define	XFS_MAX_RW_NBMAPS	4
+
+extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int,
+			struct xfs_iomap *, int *);
+extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
+extern int xfs_bdstrat_cb(struct xfs_buf *);
+
+extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
+				xfs_fsize_t, xfs_fsize_t);
+extern void xfs_inval_cached_pages(struct vnode	*, struct xfs_iocore *,
+				xfs_off_t, int, int);
+extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
+				const struct iovec *, unsigned int,
+				loff_t *, int, struct cred *);
+extern ssize_t xfs_write(struct bhv_desc *, struct kiocb *,
+				const struct iovec *, unsigned int,
+				loff_t *, int, struct cred *);
+extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
+				loff_t *, int, size_t, read_actor_t,
+				void *, struct cred *);
+
+extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
+
+#define XFS_FSB_TO_DB_IO(io,fsb) \
+		(((io)->io_flags & XFS_IOCORE_RT) ? \
+		 XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \
+		 XFS_FSB_TO_DADDR((io)->io_mount, (fsb)))
+
+#endif	/* __XFS_LRW_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
new file mode 100644
index 0000000..aaf5ddb
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include <linux/proc_fs.h>
+
+DEFINE_PER_CPU(struct xfsstats, xfsstats);
+
+STATIC int
+xfs_read_xfsstats(
+	char		*buffer,
+	char		**start,
+	off_t		offset,
+	int		count,
+	int		*eof,
+	void		*data)
+{
+	int		c, i, j, len, val;
+	__uint64_t	xs_xstrat_bytes = 0;
+	__uint64_t	xs_write_bytes = 0;
+	__uint64_t	xs_read_bytes = 0;
+
+	static struct xstats_entry {
+		char	*desc;
+		int	endpoint;
+	} xstats[] = {
+		{ "extent_alloc",	XFSSTAT_END_EXTENT_ALLOC	},
+		{ "abt",		XFSSTAT_END_ALLOC_BTREE		},
+		{ "blk_map",		XFSSTAT_END_BLOCK_MAPPING	},
+		{ "bmbt",		XFSSTAT_END_BLOCK_MAP_BTREE	},
+		{ "dir",		XFSSTAT_END_DIRECTORY_OPS	},
+		{ "trans",		XFSSTAT_END_TRANSACTIONS	},
+		{ "ig",			XFSSTAT_END_INODE_OPS		},
+		{ "log",		XFSSTAT_END_LOG_OPS		},
+		{ "push_ail",		XFSSTAT_END_TAIL_PUSHING	},
+		{ "xstrat",		XFSSTAT_END_WRITE_CONVERT	},
+		{ "rw",			XFSSTAT_END_READ_WRITE_OPS	},
+		{ "attr",		XFSSTAT_END_ATTRIBUTE_OPS	},
+		{ "icluster",		XFSSTAT_END_INODE_CLUSTER	},
+		{ "vnodes",		XFSSTAT_END_VNODE_OPS		},
+		{ "buf",		XFSSTAT_END_BUF			},
+	};
+
+	/* Loop over all stats groups */
+	for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) {
+		len += sprintf(buffer + len, xstats[i].desc);
+		/* inner loop does each group */
+		while (j < xstats[i].endpoint) {
+			val = 0;
+			/* sum over all cpus */
+			for (c = 0; c < NR_CPUS; c++) {
+				if (!cpu_possible(c)) continue;
+				val += *(((__u32*)&per_cpu(xfsstats, c) + j));
+			}
+			len += sprintf(buffer + len, " %u", val);
+			j++;
+		}
+		buffer[len++] = '\n';
+	}
+	/* extra precision counters */
+	for (i = 0; i < NR_CPUS; i++) {
+		if (!cpu_possible(i)) continue;
+		xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
+		xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
+		xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
+	}
+
+	len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n",
+			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
+	len += sprintf(buffer + len, "debug %u\n",
+#if defined(DEBUG)
+		1);
+#else
+		0);
+#endif
+
+	if (offset >= len) {
+		*start = buffer;
+		*eof = 1;
+		return 0;
+	}
+	*start = buffer + offset;
+	if ((len -= offset) > count)
+		return count;
+	*eof = 1;
+
+	return len;
+}
+
+void
+xfs_init_procfs(void)
+{
+	if (!proc_mkdir("fs/xfs", NULL))
+		return;
+	create_proc_read_entry("fs/xfs/stat", 0, NULL, xfs_read_xfsstats, NULL);
+}
+
+void
+xfs_cleanup_procfs(void)
+{
+	remove_proc_entry("fs/xfs/stat", NULL);
+	remove_proc_entry("fs/xfs", NULL);
+}
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h
new file mode 100644
index 0000000..3f756a6
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_stats.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_STATS_H__
+#define __XFS_STATS_H__
+
+
+#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
+
+#include <linux/percpu.h>
+
+/*
+ * XFS global statistics
+ */
+struct xfsstats {
+# define XFSSTAT_END_EXTENT_ALLOC	4
+	__uint32_t		xs_allocx;
+	__uint32_t		xs_allocb;
+	__uint32_t		xs_freex;
+	__uint32_t		xs_freeb;
+# define XFSSTAT_END_ALLOC_BTREE	(XFSSTAT_END_EXTENT_ALLOC+4)
+	__uint32_t		xs_abt_lookup;
+	__uint32_t		xs_abt_compare;
+	__uint32_t		xs_abt_insrec;
+	__uint32_t		xs_abt_delrec;
+# define XFSSTAT_END_BLOCK_MAPPING	(XFSSTAT_END_ALLOC_BTREE+7)
+	__uint32_t		xs_blk_mapr;
+	__uint32_t		xs_blk_mapw;
+	__uint32_t		xs_blk_unmap;
+	__uint32_t		xs_add_exlist;
+	__uint32_t		xs_del_exlist;
+	__uint32_t		xs_look_exlist;
+	__uint32_t		xs_cmp_exlist;
+# define XFSSTAT_END_BLOCK_MAP_BTREE	(XFSSTAT_END_BLOCK_MAPPING+4)
+	__uint32_t		xs_bmbt_lookup;
+	__uint32_t		xs_bmbt_compare;
+	__uint32_t		xs_bmbt_insrec;
+	__uint32_t		xs_bmbt_delrec;
+# define XFSSTAT_END_DIRECTORY_OPS	(XFSSTAT_END_BLOCK_MAP_BTREE+4)
+	__uint32_t		xs_dir_lookup;
+	__uint32_t		xs_dir_create;
+	__uint32_t		xs_dir_remove;
+	__uint32_t		xs_dir_getdents;
+# define XFSSTAT_END_TRANSACTIONS	(XFSSTAT_END_DIRECTORY_OPS+3)
+	__uint32_t		xs_trans_sync;
+	__uint32_t		xs_trans_async;
+	__uint32_t		xs_trans_empty;
+# define XFSSTAT_END_INODE_OPS		(XFSSTAT_END_TRANSACTIONS+7)
+	__uint32_t		xs_ig_attempts;
+	__uint32_t		xs_ig_found;
+	__uint32_t		xs_ig_frecycle;
+	__uint32_t		xs_ig_missed;
+	__uint32_t		xs_ig_dup;
+	__uint32_t		xs_ig_reclaims;
+	__uint32_t		xs_ig_attrchg;
+# define XFSSTAT_END_LOG_OPS		(XFSSTAT_END_INODE_OPS+5)
+	__uint32_t		xs_log_writes;
+	__uint32_t		xs_log_blocks;
+	__uint32_t		xs_log_noiclogs;
+	__uint32_t		xs_log_force;
+	__uint32_t		xs_log_force_sleep;
+# define XFSSTAT_END_TAIL_PUSHING	(XFSSTAT_END_LOG_OPS+10)
+	__uint32_t		xs_try_logspace;
+	__uint32_t		xs_sleep_logspace;
+	__uint32_t		xs_push_ail;
+	__uint32_t		xs_push_ail_success;
+	__uint32_t		xs_push_ail_pushbuf;
+	__uint32_t		xs_push_ail_pinned;
+	__uint32_t		xs_push_ail_locked;
+	__uint32_t		xs_push_ail_flushing;
+	__uint32_t		xs_push_ail_restarts;
+	__uint32_t		xs_push_ail_flush;
+# define XFSSTAT_END_WRITE_CONVERT	(XFSSTAT_END_TAIL_PUSHING+2)
+	__uint32_t		xs_xstrat_quick;
+	__uint32_t		xs_xstrat_split;
+# define XFSSTAT_END_READ_WRITE_OPS	(XFSSTAT_END_WRITE_CONVERT+2)
+	__uint32_t		xs_write_calls;
+	__uint32_t		xs_read_calls;
+# define XFSSTAT_END_ATTRIBUTE_OPS	(XFSSTAT_END_READ_WRITE_OPS+4)
+	__uint32_t		xs_attr_get;
+	__uint32_t		xs_attr_set;
+	__uint32_t		xs_attr_remove;
+	__uint32_t		xs_attr_list;
+# define XFSSTAT_END_INODE_CLUSTER	(XFSSTAT_END_ATTRIBUTE_OPS+3)
+	__uint32_t		xs_iflush_count;
+	__uint32_t		xs_icluster_flushcnt;
+	__uint32_t		xs_icluster_flushinode;
+# define XFSSTAT_END_VNODE_OPS		(XFSSTAT_END_INODE_CLUSTER+8)
+	__uint32_t		vn_active;	/* # vnodes not on free lists */
+	__uint32_t		vn_alloc;	/* # times vn_alloc called */
+	__uint32_t		vn_get;		/* # times vn_get called */
+	__uint32_t		vn_hold;	/* # times vn_hold called */
+	__uint32_t		vn_rele;	/* # times vn_rele called */
+	__uint32_t		vn_reclaim;	/* # times vn_reclaim called */
+	__uint32_t		vn_remove;	/* # times vn_remove called */
+	__uint32_t		vn_free;	/* # times vn_free called */
+#define XFSSTAT_END_BUF			(XFSSTAT_END_VNODE_OPS+9)
+	__uint32_t		pb_get;
+	__uint32_t		pb_create;
+	__uint32_t		pb_get_locked;
+	__uint32_t		pb_get_locked_waited;
+	__uint32_t		pb_busy_locked;
+	__uint32_t		pb_miss_locked;
+	__uint32_t		pb_page_retries;
+	__uint32_t		pb_page_found;
+	__uint32_t		pb_get_read;
+/* Extra precision counters */
+	__uint64_t		xs_xstrat_bytes;
+	__uint64_t		xs_write_bytes;
+	__uint64_t		xs_read_bytes;
+};
+
+DECLARE_PER_CPU(struct xfsstats, xfsstats);
+
+/*
+ * We don't disable preempt, not too worried about poking the
+ * wrong CPU's stat for now (also aggregated before reporting).
+ */
+#define XFS_STATS_INC(v)	(per_cpu(xfsstats, current_cpu()).v++)
+#define XFS_STATS_DEC(v)	(per_cpu(xfsstats, current_cpu()).v--)
+#define XFS_STATS_ADD(v, inc)	(per_cpu(xfsstats, current_cpu()).v += (inc))
+
+extern void xfs_init_procfs(void);
+extern void xfs_cleanup_procfs(void);
+
+
+#else	/* !CONFIG_PROC_FS */
+
+# define XFS_STATS_INC(count)
+# define XFS_STATS_DEC(count)
+# define XFS_STATS_ADD(count, inc)
+
+static __inline void xfs_init_procfs(void) { };
+static __inline void xfs_cleanup_procfs(void) { };
+
+#endif	/* !CONFIG_PROC_FS */
+
+#endif /* __XFS_STATS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
new file mode 100644
index 0000000..53dc658
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_version.h"
+#include "xfs_ioctl32.h"
+
+#include <linux/namei.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/writeback.h>
+
+STATIC struct quotactl_ops linvfs_qops;
+STATIC struct super_operations linvfs_sops;
+STATIC kmem_zone_t *linvfs_inode_zone;
+
+STATIC struct xfs_mount_args *
+xfs_args_allocate(
+	struct super_block	*sb)
+{
+	struct xfs_mount_args	*args;
+
+	args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
+	args->logbufs = args->logbufsize = -1;
+	strncpy(args->fsname, sb->s_id, MAXNAMELEN);
+
+	/* Copy the already-parsed mount(2) flags we're interested in */
+	if (sb->s_flags & MS_NOATIME)
+		args->flags |= XFSMNT_NOATIME;
+	if (sb->s_flags & MS_DIRSYNC)
+		args->flags |= XFSMNT_DIRSYNC;
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		args->flags |= XFSMNT_WSYNC;
+
+	/* Default to 32 bit inodes on Linux all the time */
+	args->flags |= XFSMNT_32BITINODES;
+
+	return args;
+}
+
+__uint64_t
+xfs_max_file_offset(
+	unsigned int		blockshift)
+{
+	unsigned int		pagefactor = 1;
+	unsigned int		bitshift = BITS_PER_LONG - 1;
+
+	/* Figure out maximum filesize, on Linux this can depend on
+	 * the filesystem blocksize (on 32 bit platforms).
+	 * __block_prepare_write does this in an [unsigned] long...
+	 *      page->index << (PAGE_CACHE_SHIFT - bbits)
+	 * So, for page sized blocks (4K on 32 bit platforms),
+	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
+	 *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+	 * but for smaller blocksizes it is less (bbits = log2 bsize).
+	 * Note1: get_block_t takes a long (implicit cast from above)
+	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
+	 * can optionally convert the [unsigned] long from above into
+	 * an [unsigned] long long.
+	 */
+
+#if BITS_PER_LONG == 32
+# if defined(CONFIG_LBD)
+	ASSERT(sizeof(sector_t) == 8);
+	pagefactor = PAGE_CACHE_SIZE;
+	bitshift = BITS_PER_LONG;
+# else
+	pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+# endif
+#endif
+
+	return (((__uint64_t)pagefactor) << bitshift) - 1;
+}
+
+STATIC __inline__ void
+xfs_set_inodeops(
+	struct inode		*inode)
+{
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+
+	if (vp->v_type == VNON) {
+		vn_mark_bad(vp);
+	} else if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &linvfs_file_inode_operations;
+		inode->i_fop = &linvfs_file_operations;
+		inode->i_mapping->a_ops = &linvfs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &linvfs_dir_inode_operations;
+		inode->i_fop = &linvfs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		inode->i_op = &linvfs_symlink_inode_operations;
+		if (inode->i_blocks)
+			inode->i_mapping->a_ops = &linvfs_aops;
+	} else {
+		inode->i_op = &linvfs_file_inode_operations;
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+	}
+}
+
+STATIC __inline__ void
+xfs_revalidate_inode(
+	xfs_mount_t		*mp,
+	vnode_t			*vp,
+	xfs_inode_t		*ip)
+{
+	struct inode		*inode = LINVFS_GET_IP(vp);
+
+	inode->i_mode	= (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
+	inode->i_nlink	= ip->i_d.di_nlink;
+	inode->i_uid	= ip->i_d.di_uid;
+	inode->i_gid	= ip->i_d.di_gid;
+	if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
+		inode->i_rdev = 0;
+	} else {
+		xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
+		inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+	}
+	inode->i_blksize = PAGE_CACHE_SIZE;
+	inode->i_generation = ip->i_d.di_gen;
+	i_size_write(inode, ip->i_d.di_size);
+	inode->i_blocks =
+		XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
+	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
+	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
+	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
+	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
+	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
+	if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+		inode->i_flags |= S_IMMUTABLE;
+	else
+		inode->i_flags &= ~S_IMMUTABLE;
+	if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+		inode->i_flags |= S_APPEND;
+	else
+		inode->i_flags &= ~S_APPEND;
+	if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
+		inode->i_flags |= S_SYNC;
+	else
+		inode->i_flags &= ~S_SYNC;
+	if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
+		inode->i_flags |= S_NOATIME;
+	else
+		inode->i_flags &= ~S_NOATIME;
+	vp->v_flag &= ~VMODIFIED;
+}
+
+void
+xfs_initialize_vnode(
+	bhv_desc_t		*bdp,
+	vnode_t			*vp,
+	bhv_desc_t		*inode_bhv,
+	int			unlock)
+{
+	xfs_inode_t		*ip = XFS_BHVTOI(inode_bhv);
+	struct inode		*inode = LINVFS_GET_IP(vp);
+
+	if (!inode_bhv->bd_vobj) {
+		vp->v_vfsp = bhvtovfs(bdp);
+		bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
+		bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
+	}
+
+	/*
+	 * We need to set the ops vectors, and unlock the inode, but if
+	 * we have been called during the new inode create process, it is
+	 * too early to fill in the Linux inode.  We will get called a
+	 * second time once the inode is properly set up, and then we can
+	 * finish our work.
+	 */
+	if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
+		vp->v_type = IFTOVT(ip->i_d.di_mode);
+		xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
+		xfs_set_inodeops(inode);
+	
+		ip->i_flags &= ~XFS_INEW;
+		barrier();
+
+		unlock_new_inode(inode);
+	}
+}
+
+int
+xfs_blkdev_get(
+	xfs_mount_t		*mp,
+	const char		*name,
+	struct block_device	**bdevp)
+{
+	int			error = 0;
+
+	*bdevp = open_bdev_excl(name, 0, mp);
+	if (IS_ERR(*bdevp)) {
+		error = PTR_ERR(*bdevp);
+		printk("XFS: Invalid device [%s], error=%d\n", name, error);
+	}
+
+	return -error;
+}
+
+void
+xfs_blkdev_put(
+	struct block_device	*bdev)
+{
+	if (bdev)
+		close_bdev_excl(bdev);
+}
+
+
+STATIC struct inode *
+linvfs_alloc_inode(
+	struct super_block	*sb)
+{
+	vnode_t			*vp;
+
+	vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone, 
+                kmem_flags_convert(KM_SLEEP));
+	if (!vp)
+		return NULL;
+	return LINVFS_GET_IP(vp);
+}
+
+STATIC void
+linvfs_destroy_inode(
+	struct inode		*inode)
+{
+	kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode));
+}
+
+STATIC void
+init_once(
+	void			*data,
+	kmem_cache_t		*cachep,
+	unsigned long		flags)
+{
+	vnode_t			*vp = (vnode_t *)data;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(LINVFS_GET_IP(vp));
+}
+
+STATIC int
+init_inodecache( void )
+{
+	linvfs_inode_zone = kmem_cache_create("linvfs_icache",
+				sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
+				init_once, NULL);
+	if (linvfs_inode_zone == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+STATIC void
+destroy_inodecache( void )
+{
+	if (kmem_cache_destroy(linvfs_inode_zone))
+		printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__);
+}
+
+/*
+ * Attempt to flush the inode, this will actually fail
+ * if the inode is pinned, but we dirty the inode again
+ * at the point when it is unpinned after a log write,
+ * since this is when the inode itself becomes flushable. 
+ */
+STATIC int
+linvfs_write_inode(
+	struct inode		*inode,
+	int			sync)
+{
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+	int			error = 0, flags = FLUSH_INODE;
+
+	if (vp) {
+		vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+		if (sync)
+			flags |= FLUSH_SYNC;
+		VOP_IFLUSH(vp, flags, error);
+		if (error == EAGAIN) {
+			if (sync)
+				VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
+			else
+				error = 0;
+		}
+	}
+
+	return -error;
+}
+
+STATIC void
+linvfs_clear_inode(
+	struct inode		*inode)
+{
+	vnode_t			*vp = LINVFS_GET_VP(inode);
+
+	if (vp) {
+		vn_rele(vp);
+		vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+		/*
+		 * Do all our cleanup, and remove this vnode.
+		 */
+		vn_remove(vp);
+	}
+}
+
+
+/*
+ * Enqueue a work item to be picked up by the vfs xfssyncd thread.
+ * Doing this has two advantages:
+ * - It saves on stack space, which is tight in certain situations
+ * - It can be used (with care) as a mechanism to avoid deadlocks.
+ * Flushing while allocating in a full filesystem requires both.
+ */
+STATIC void
+xfs_syncd_queue_work(
+	struct vfs	*vfs,
+	void		*data,
+	void		(*syncer)(vfs_t *, void *))
+{
+	vfs_sync_work_t	*work;
+
+	work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
+	INIT_LIST_HEAD(&work->w_list);
+	work->w_syncer = syncer;
+	work->w_data = data;
+	work->w_vfs = vfs;
+	spin_lock(&vfs->vfs_sync_lock);
+	list_add_tail(&work->w_list, &vfs->vfs_sync_list);
+	spin_unlock(&vfs->vfs_sync_lock);
+	wake_up_process(vfs->vfs_sync_task);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations.  At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room...
+ */
+STATIC void
+xfs_flush_inode_work(
+	vfs_t		*vfs,
+	void		*inode)
+{
+	filemap_flush(((struct inode *)inode)->i_mapping);
+	iput((struct inode *)inode);
+}
+
+void
+xfs_flush_inode(
+	xfs_inode_t	*ip)
+{
+	struct inode	*inode = LINVFS_GET_IP(XFS_ITOV(ip));
+	struct vfs	*vfs = XFS_MTOVFS(ip->i_mount);
+
+	igrab(inode);
+	xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
+	delay(HZ/2);
+}
+
+/*
+ * This is the "bigger hammer" version of xfs_flush_inode_work...
+ * (IOW, "If at first you don't succeed, use a Bigger Hammer").
+ */
+STATIC void
+xfs_flush_device_work(
+	vfs_t		*vfs,
+	void		*inode)
+{
+	sync_blockdev(vfs->vfs_super->s_bdev);
+	iput((struct inode *)inode);
+}
+
+void
+xfs_flush_device(
+	xfs_inode_t	*ip)
+{
+	struct inode	*inode = LINVFS_GET_IP(XFS_ITOV(ip));
+	struct vfs	*vfs = XFS_MTOVFS(ip->i_mount);
+
+	igrab(inode);
+	xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
+	delay(HZ/2);
+	xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+}
+
+#define SYNCD_FLAGS	(SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
+STATIC void
+vfs_sync_worker(
+	vfs_t		*vfsp,
+	void		*unused)
+{
+	int		error;
+
+	if (!(vfsp->vfs_flag & VFS_RDONLY))
+		VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
+	vfsp->vfs_sync_seq++;
+	wmb();
+	wake_up(&vfsp->vfs_wait_single_sync_task);
+}
+
+STATIC int
+xfssyncd(
+	void			*arg)
+{
+	long			timeleft;
+	vfs_t			*vfsp = (vfs_t *) arg;
+	struct list_head	tmp;
+	struct vfs_sync_work	*work, *n;
+
+	daemonize("xfssyncd");
+
+	vfsp->vfs_sync_work.w_vfs = vfsp;
+	vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
+	vfsp->vfs_sync_task = current;
+	wmb();
+	wake_up(&vfsp->vfs_wait_sync_task);
+
+	INIT_LIST_HEAD(&tmp);
+	timeleft = (xfs_syncd_centisecs * HZ) / 100;
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		timeleft = schedule_timeout(timeleft);
+		/* swsusp */
+		try_to_freeze(PF_FREEZE);
+		if (vfsp->vfs_flag & VFS_UMOUNT)
+			break;
+
+		spin_lock(&vfsp->vfs_sync_lock);
+		/*
+		 * We can get woken by laptop mode, to do a sync -
+		 * that's the (only!) case where the list would be
+		 * empty with time remaining.
+		 */
+		if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
+			if (!timeleft)
+				timeleft = (xfs_syncd_centisecs * HZ) / 100;
+			INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
+			list_add_tail(&vfsp->vfs_sync_work.w_list,
+					&vfsp->vfs_sync_list);
+		}
+		list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
+			list_move(&work->w_list, &tmp);
+		spin_unlock(&vfsp->vfs_sync_lock);
+
+		list_for_each_entry_safe(work, n, &tmp, w_list) {
+			(*work->w_syncer)(vfsp, work->w_data);
+			list_del(&work->w_list);
+			if (work == &vfsp->vfs_sync_work)
+				continue;
+			kmem_free(work, sizeof(struct vfs_sync_work));
+		}
+	}
+
+	vfsp->vfs_sync_task = NULL;
+	wmb();
+	wake_up(&vfsp->vfs_wait_sync_task);
+
+	return 0;
+}
+
+STATIC int
+linvfs_start_syncd(
+	vfs_t			*vfsp)
+{
+	int			pid;
+
+	pid = kernel_thread(xfssyncd, (void *) vfsp,
+			CLONE_VM | CLONE_FS | CLONE_FILES);
+	if (pid < 0)
+		return -pid;
+	wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
+	return 0;
+}
+
+STATIC void
+linvfs_stop_syncd(
+	vfs_t			*vfsp)
+{
+	vfsp->vfs_flag |= VFS_UMOUNT;
+	wmb();
+
+	wake_up_process(vfsp->vfs_sync_task);
+	wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
+}
+
+STATIC void
+linvfs_put_super(
+	struct super_block	*sb)
+{
+	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	linvfs_stop_syncd(vfsp);
+	VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
+	if (!error)
+		VFS_UNMOUNT(vfsp, 0, NULL, error);
+	if (error) {
+		printk("XFS unmount got error %d\n", error);
+		printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
+		return;
+	}
+
+	vfs_deallocate(vfsp);
+}
+
+STATIC void
+linvfs_write_super(
+	struct super_block	*sb)
+{
+	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	if (sb->s_flags & MS_RDONLY) {
+		sb->s_dirt = 0; /* paranoia */
+		return;
+	}
+	/* Push the log and superblock a little */
+	VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
+	sb->s_dirt = 0;
+}
+
+STATIC int
+linvfs_sync_super(
+	struct super_block	*sb,
+	int			wait)
+{
+	vfs_t		*vfsp = LINVFS_GET_VFS(sb);
+	int		error;
+	int		flags = SYNC_FSDATA;
+
+	if (wait)
+		flags |= SYNC_WAIT;
+
+	VFS_SYNC(vfsp, flags, NULL, error);
+	sb->s_dirt = 0;
+
+	if (unlikely(laptop_mode)) {
+		int	prev_sync_seq = vfsp->vfs_sync_seq;
+
+		/*
+		 * The disk must be active because we're syncing.
+		 * We schedule xfssyncd now (now that the disk is
+		 * active) instead of later (when it might not be).
+		 */
+		wake_up_process(vfsp->vfs_sync_task);
+		/*
+		 * We have to wait for the sync iteration to complete.
+		 * If we don't, the disk activity caused by the sync
+		 * will come after the sync is completed, and that
+		 * triggers another sync from laptop mode.
+		 */
+		wait_event(vfsp->vfs_wait_single_sync_task,
+				vfsp->vfs_sync_seq != prev_sync_seq);
+	}
+
+	return -error;
+}
+
+STATIC int
+linvfs_statfs(
+	struct super_block	*sb,
+	struct kstatfs		*statp)
+{
+	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	VFS_STATVFS(vfsp, statp, NULL, error);
+	return -error;
+}
+
+STATIC int
+linvfs_remount(
+	struct super_block	*sb,
+	int			*flags,
+	char			*options)
+{
+	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
+	struct xfs_mount_args	*args = xfs_args_allocate(sb);
+	int			error;
+
+	VFS_PARSEARGS(vfsp, options, args, 1, error);
+	if (!error)
+		VFS_MNTUPDATE(vfsp, flags, args, error);
+	kmem_free(args, sizeof(*args));
+	return -error;
+}
+
+STATIC void
+linvfs_freeze_fs(
+	struct super_block	*sb)
+{
+	VFS_FREEZE(LINVFS_GET_VFS(sb));
+}
+
+STATIC int
+linvfs_show_options(
+	struct seq_file		*m,
+	struct vfsmount		*mnt)
+{
+	struct vfs		*vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
+	int			error;
+
+	VFS_SHOWARGS(vfsp, m, error);
+	return error;
+}
+
+STATIC int
+linvfs_getxstate(
+	struct super_block	*sb,
+	struct fs_quota_stat	*fqs)
+{
+	struct vfs		*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
+	return -error;
+}
+
+STATIC int
+linvfs_setxstate(
+	struct super_block	*sb,
+	unsigned int		flags,
+	int			op)
+{
+	struct vfs		*vfsp = LINVFS_GET_VFS(sb);
+	int			error;
+
+	VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
+	return -error;
+}
+
+STATIC int
+linvfs_getxquota(
+	struct super_block	*sb,
+	int			type,
+	qid_t			id,
+	struct fs_disk_quota	*fdq)
+{
+	struct vfs		*vfsp = LINVFS_GET_VFS(sb);
+	int			error, getmode;
+
+	getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA;
+	VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
+	return -error;
+}
+
+STATIC int
+linvfs_setxquota(
+	struct super_block	*sb,
+	int			type,
+	qid_t			id,
+	struct fs_disk_quota	*fdq)
+{
+	struct vfs		*vfsp = LINVFS_GET_VFS(sb);
+	int			error, setmode;
+
+	setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM;
+	VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
+	return -error;
+}
+
+STATIC int
+linvfs_fill_super(
+	struct super_block	*sb,
+	void			*data,
+	int			silent)
+{
+	vnode_t			*rootvp;
+	struct vfs		*vfsp = vfs_allocate();
+	struct xfs_mount_args	*args = xfs_args_allocate(sb);
+	struct kstatfs		statvfs;
+	int			error, error2;
+
+	vfsp->vfs_super = sb;
+	LINVFS_SET_VFS(sb, vfsp);
+	if (sb->s_flags & MS_RDONLY)
+		vfsp->vfs_flag |= VFS_RDONLY;
+	bhv_insert_all_vfsops(vfsp);
+
+	VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
+	if (error) {
+		bhv_remove_all_vfsops(vfsp, 1);
+		goto fail_vfsop;
+	}
+
+	sb_min_blocksize(sb, BBSIZE);
+#ifdef CONFIG_XFS_EXPORT
+	sb->s_export_op = &linvfs_export_ops;
+#endif
+	sb->s_qcop = &linvfs_qops;
+	sb->s_op = &linvfs_sops;
+
+	VFS_MOUNT(vfsp, args, NULL, error);
+	if (error) {
+		bhv_remove_all_vfsops(vfsp, 1);
+		goto fail_vfsop;
+	}
+
+	VFS_STATVFS(vfsp, &statvfs, NULL, error);
+	if (error)
+		goto fail_unmount;
+
+	sb->s_dirt = 1;
+	sb->s_magic = statvfs.f_type;
+	sb->s_blocksize = statvfs.f_bsize;
+	sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
+	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
+	sb->s_time_gran = 1;
+	set_posix_acl_flag(sb);
+
+	VFS_ROOT(vfsp, &rootvp, error);
+	if (error)
+		goto fail_unmount;
+
+	sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
+	if (!sb->s_root) {
+		error = ENOMEM;
+		goto fail_vnrele;
+	}
+	if (is_bad_inode(sb->s_root->d_inode)) {
+		error = EINVAL;
+		goto fail_vnrele;
+	}
+	if ((error = linvfs_start_syncd(vfsp)))
+		goto fail_vnrele;
+	vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
+
+	kmem_free(args, sizeof(*args));
+	return 0;
+
+fail_vnrele:
+	if (sb->s_root) {
+		dput(sb->s_root);
+		sb->s_root = NULL;
+	} else {
+		VN_RELE(rootvp);
+	}
+
+fail_unmount:
+	VFS_UNMOUNT(vfsp, 0, NULL, error2);
+
+fail_vfsop:
+	vfs_deallocate(vfsp);
+	kmem_free(args, sizeof(*args));
+	return -error;
+}
+
+STATIC struct super_block *
+linvfs_get_sb(
+	struct file_system_type	*fs_type,
+	int			flags,
+	const char		*dev_name,
+	void			*data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super);
+}
+
+STATIC struct super_operations linvfs_sops = {
+	.alloc_inode		= linvfs_alloc_inode,
+	.destroy_inode		= linvfs_destroy_inode,
+	.write_inode		= linvfs_write_inode,
+	.clear_inode		= linvfs_clear_inode,
+	.put_super		= linvfs_put_super,
+	.write_super		= linvfs_write_super,
+	.sync_fs		= linvfs_sync_super,
+	.write_super_lockfs	= linvfs_freeze_fs,
+	.statfs			= linvfs_statfs,
+	.remount_fs		= linvfs_remount,
+	.show_options		= linvfs_show_options,
+};
+
+STATIC struct quotactl_ops linvfs_qops = {
+	.get_xstate		= linvfs_getxstate,
+	.set_xstate		= linvfs_setxstate,
+	.get_xquota		= linvfs_getxquota,
+	.set_xquota		= linvfs_setxquota,
+};
+
+STATIC struct file_system_type xfs_fs_type = {
+	.owner			= THIS_MODULE,
+	.name			= "xfs",
+	.get_sb			= linvfs_get_sb,
+	.kill_sb		= kill_block_super,
+	.fs_flags		= FS_REQUIRES_DEV,
+};
+
+
+STATIC int __init
+init_xfs_fs( void )
+{
+	int			error;
+	struct sysinfo		si;
+	static char		message[] __initdata = KERN_INFO \
+		XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
+
+	printk(message);
+
+	si_meminfo(&si);
+	xfs_physmem = si.totalram;
+
+	ktrace_init(64);
+
+	error = init_inodecache();
+	if (error < 0)
+		goto undo_inodecache;
+
+	error = pagebuf_init();
+	if (error < 0)
+		goto undo_pagebuf;
+
+	vn_init();
+	xfs_init();
+	uuid_init();
+	vfs_initquota();
+
+	error = register_filesystem(&xfs_fs_type);
+	if (error)
+		goto undo_register;
+	XFS_DM_INIT(&xfs_fs_type);
+	return 0;
+
+undo_register:
+	pagebuf_terminate();
+
+undo_pagebuf:
+	destroy_inodecache();
+
+undo_inodecache:
+	return error;
+}
+
+STATIC void __exit
+exit_xfs_fs( void )
+{
+	vfs_exitquota();
+	XFS_DM_EXIT(&xfs_fs_type);
+	unregister_filesystem(&xfs_fs_type);
+	xfs_cleanup();
+	pagebuf_terminate();
+	destroy_inodecache();
+	ktrace_uninit();
+}
+
+module_init(init_xfs_fs);
+module_exit(exit_xfs_fs);
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
+MODULE_LICENSE("GPL");
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
new file mode 100644
index 0000000..ec7e003
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPER_H__
+#define __XFS_SUPER_H__
+
+#ifdef CONFIG_XFS_DMAPI
+# define vfs_insertdmapi(vfs)	vfs_insertops(vfsp, &xfs_dmops)
+# define vfs_initdmapi()	dmapi_init()
+# define vfs_exitdmapi()	dmapi_uninit()
+#else
+# define vfs_insertdmapi(vfs)	do { } while (0)
+# define vfs_initdmapi()	do { } while (0)
+# define vfs_exitdmapi()	do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_QUOTA
+# define vfs_insertquota(vfs)	vfs_insertops(vfsp, &xfs_qmops)
+extern void xfs_qm_init(void);
+extern void xfs_qm_exit(void);
+# define vfs_initquota()	xfs_qm_init()
+# define vfs_exitquota()	xfs_qm_exit()
+#else
+# define vfs_insertquota(vfs)	do { } while (0)
+# define vfs_initquota()	do { } while (0)
+# define vfs_exitquota()	do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_POSIX_ACL
+# define XFS_ACL_STRING		"ACLs, "
+# define set_posix_acl_flag(sb)	((sb)->s_flags |= MS_POSIXACL)
+#else
+# define XFS_ACL_STRING
+# define set_posix_acl_flag(sb)	do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_SECURITY
+# define XFS_SECURITY_STRING	"security attributes, "
+# define ENOSECURITY		0
+#else
+# define XFS_SECURITY_STRING
+# define ENOSECURITY		EOPNOTSUPP
+#endif
+
+#ifdef CONFIG_XFS_RT
+# define XFS_REALTIME_STRING	"realtime, "
+#else
+# define XFS_REALTIME_STRING
+#endif
+
+#if XFS_BIG_BLKNOS
+# if XFS_BIG_INUMS
+#  define XFS_BIGFS_STRING	"large block/inode numbers, "
+# else
+#  define XFS_BIGFS_STRING	"large block numbers, "
+# endif
+#else
+# define XFS_BIGFS_STRING
+#endif
+
+#ifdef CONFIG_XFS_TRACE
+# define XFS_TRACE_STRING	"tracing, "
+#else
+# define XFS_TRACE_STRING
+#endif
+
+#ifdef CONFIG_XFS_DMAPI
+# define XFS_DMAPI_STRING	"dmapi support, "
+#else
+# define XFS_DMAPI_STRING
+#endif
+
+#ifdef DEBUG
+# define XFS_DBG_STRING		"debug"
+#else
+# define XFS_DBG_STRING		"no debug"
+#endif
+
+#define XFS_BUILD_OPTIONS	XFS_ACL_STRING \
+				XFS_SECURITY_STRING \
+				XFS_REALTIME_STRING \
+				XFS_BIGFS_STRING \
+				XFS_TRACE_STRING \
+				XFS_DMAPI_STRING \
+				XFS_DBG_STRING /* DBG must be last */
+
+#define LINVFS_GET_VFS(s) \
+	(vfs_t *)((s)->s_fs_info)
+#define LINVFS_SET_VFS(s, vfsp) \
+	((s)->s_fs_info = vfsp)
+
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_buftarg;
+struct block_device;
+
+extern __uint64_t xfs_max_file_offset(unsigned int);
+
+extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int);
+
+extern void xfs_flush_inode(struct xfs_inode *);
+extern void xfs_flush_device(struct xfs_inode *);
+
+extern int  xfs_blkdev_get(struct xfs_mount *, const char *,
+				struct block_device **);
+extern void xfs_blkdev_put(struct block_device *);
+
+extern struct export_operations linvfs_export_ops;
+
+#endif	/* __XFS_SUPER_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
new file mode 100644
index 0000000..0dc0103
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_rw.h"
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+
+
+static struct ctl_table_header *xfs_table_header;
+
+
+#ifdef CONFIG_PROC_FS
+STATIC int
+xfs_stats_clear_proc_handler(
+	ctl_table	*ctl,
+	int		write,
+	struct file	*filp,
+	void		__user *buffer,
+	size_t		*lenp,
+	loff_t		*ppos)
+{
+	int		c, ret, *valp = ctl->data;
+	__uint32_t	vn_active;
+
+	ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos);
+
+	if (!ret && write && *valp) {
+		printk("XFS Clearing xfsstats\n");
+		for (c = 0; c < NR_CPUS; c++) {
+			if (!cpu_possible(c)) continue;
+			preempt_disable();
+			/* save vn_active, it's a universal truth! */
+			vn_active = per_cpu(xfsstats, c).vn_active;
+			memset(&per_cpu(xfsstats, c), 0,
+			       sizeof(struct xfsstats));
+			per_cpu(xfsstats, c).vn_active = vn_active;
+			preempt_enable();
+		}
+		xfs_stats_clear = 0;
+	}
+
+	return ret;
+}
+#endif /* CONFIG_PROC_FS */
+
+STATIC ctl_table xfs_table[] = {
+	{XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.restrict_chown.min, &xfs_params.restrict_chown.max},
+
+	{XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.sgid_inherit.min, &xfs_params.sgid_inherit.max},
+
+	{XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.symlink_mode.min, &xfs_params.symlink_mode.max},
+
+	{XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.panic_mask.min, &xfs_params.panic_mask.max},
+
+	{XFS_ERRLEVEL, "error_level", &xfs_params.error_level.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.error_level.min, &xfs_params.error_level.max},
+
+	{XFS_SYNCD_TIMER, "xfssyncd_centisecs", &xfs_params.syncd_timer.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.syncd_timer.min, &xfs_params.syncd_timer.max},
+
+	{XFS_INHERIT_SYNC, "inherit_sync", &xfs_params.inherit_sync.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.inherit_sync.min, &xfs_params.inherit_sync.max},
+
+	{XFS_INHERIT_NODUMP, "inherit_nodump", &xfs_params.inherit_nodump.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.inherit_nodump.min, &xfs_params.inherit_nodump.max},
+
+	{XFS_INHERIT_NOATIME, "inherit_noatime", &xfs_params.inherit_noatim.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.inherit_noatim.min, &xfs_params.inherit_noatim.max},
+	
+	{XFS_BUF_TIMER, "xfsbufd_centisecs", &xfs_params.xfs_buf_timer.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.xfs_buf_timer.min, &xfs_params.xfs_buf_timer.max},
+
+	{XFS_BUF_AGE, "age_buffer_centisecs", &xfs_params.xfs_buf_age.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max},
+
+	{XFS_INHERIT_NOSYM, "inherit_nosymlinks", &xfs_params.inherit_nosym.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL,
+	&xfs_params.inherit_nosym.min, &xfs_params.inherit_nosym.max},
+
+	{XFS_ROTORSTEP, "rotorstep", &xfs_params.rotorstep.val,
+	sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+	&sysctl_intvec, NULL, 
+	&xfs_params.rotorstep.min, &xfs_params.rotorstep.max},
+
+	/* please keep this the last entry */
+#ifdef CONFIG_PROC_FS
+	{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
+	sizeof(int), 0644, NULL, &xfs_stats_clear_proc_handler,
+	&sysctl_intvec, NULL, 
+	&xfs_params.stats_clear.min, &xfs_params.stats_clear.max},
+#endif /* CONFIG_PROC_FS */
+
+	{0}
+};
+
+STATIC ctl_table xfs_dir_table[] = {
+	{FS_XFS, "xfs", NULL, 0, 0555, xfs_table},
+	{0}
+};
+
+STATIC ctl_table xfs_root_table[] = {
+	{CTL_FS, "fs",  NULL, 0, 0555, xfs_dir_table},
+	{0}
+};
+
+void
+xfs_sysctl_register(void)
+{
+	xfs_table_header = register_sysctl_table(xfs_root_table, 1);
+}
+
+void
+xfs_sysctl_unregister(void)
+{
+	if (xfs_table_header)
+		unregister_sysctl_table(xfs_table_header);
+}
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h
new file mode 100644
index 0000000..a39a950
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_sysctl.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __XFS_SYSCTL_H__
+#define __XFS_SYSCTL_H__
+
+#include <linux/sysctl.h>
+
+/*
+ * Tunable xfs parameters
+ */
+
+typedef struct xfs_sysctl_val {
+	int min;
+	int val;
+	int max;
+} xfs_sysctl_val_t;
+
+typedef struct xfs_param {
+	xfs_sysctl_val_t restrict_chown;/* Root/non-root can give away files.*/
+	xfs_sysctl_val_t sgid_inherit;	/* Inherit S_ISGID if process' GID is
+					 * not a member of parent dir GID. */
+	xfs_sysctl_val_t symlink_mode;	/* Link creat mode affected by umask */
+	xfs_sysctl_val_t panic_mask;	/* bitmask to cause panic on errors. */
+	xfs_sysctl_val_t error_level;	/* Degree of reporting for problems  */
+	xfs_sysctl_val_t syncd_timer;	/* Interval between xfssyncd wakeups */
+	xfs_sysctl_val_t stats_clear;	/* Reset all XFS statistics to zero. */
+	xfs_sysctl_val_t inherit_sync;	/* Inherit the "sync" inode flag. */
+	xfs_sysctl_val_t inherit_nodump;/* Inherit the "nodump" inode flag. */
+	xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */
+	xfs_sysctl_val_t xfs_buf_timer;	/* Interval between xfsbufd wakeups. */
+	xfs_sysctl_val_t xfs_buf_age;	/* Metadata buffer age before flush. */
+	xfs_sysctl_val_t inherit_nosym;	/* Inherit the "nosymlinks" flag. */
+	xfs_sysctl_val_t rotorstep;	/* inode32 AG rotoring control knob */
+} xfs_param_t;
+
+/*
+ * xfs_error_level:
+ *
+ * How much error reporting will be done when internal problems are
+ * encountered.  These problems normally return an EFSCORRUPTED to their
+ * caller, with no other information reported.
+ *
+ * 0	No error reports
+ * 1	Report EFSCORRUPTED errors that will cause a filesystem shutdown
+ * 5	Report all EFSCORRUPTED errors (all of the above errors, plus any
+ *	additional errors that are known to not cause shutdowns)
+ *
+ * xfs_panic_mask bit 0x8 turns the error reports into panics
+ */
+
+enum {
+	/* XFS_REFCACHE_SIZE = 1 */
+	/* XFS_REFCACHE_PURGE = 2 */
+	XFS_RESTRICT_CHOWN = 3,
+	XFS_SGID_INHERIT = 4,
+	XFS_SYMLINK_MODE = 5,
+	XFS_PANIC_MASK = 6,
+	XFS_ERRLEVEL = 7,
+	XFS_SYNCD_TIMER = 8,
+	/* XFS_PROBE_DMAPI = 9 */
+	/* XFS_PROBE_IOOPS = 10 */
+	/* XFS_PROBE_QUOTA = 11 */
+	XFS_STATS_CLEAR = 12,
+	XFS_INHERIT_SYNC = 13,
+	XFS_INHERIT_NODUMP = 14,
+	XFS_INHERIT_NOATIME = 15,
+	XFS_BUF_TIMER = 16,
+	XFS_BUF_AGE = 17,
+	/* XFS_IO_BYPASS = 18 */
+	XFS_INHERIT_NOSYM = 19,
+	XFS_ROTORSTEP = 20,
+};
+
+extern xfs_param_t	xfs_params;
+
+#ifdef CONFIG_SYSCTL
+extern void xfs_sysctl_register(void);
+extern void xfs_sysctl_unregister(void);
+#else
+# define xfs_sysctl_register()		do { } while (0)
+# define xfs_sysctl_unregister()	do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
+#endif /* __XFS_SYSCTL_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_version.h b/fs/xfs/linux-2.6/xfs_version.h
new file mode 100644
index 0000000..96f9639
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_version.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2001-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Dummy file that can contain a timestamp to put into the
+ * XFS init string, to help users keep track of what they're
+ * running
+ */
+
+#ifndef __XFS_VERSION_H__
+#define __XFS_VERSION_H__
+
+#define XFS_VERSION_STRING "SGI XFS"
+
+#endif /* __XFS_VERSION_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
new file mode 100644
index 0000000..669c616
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_macros.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_imap.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_quota.h"
+
+int
+vfs_mount(
+	struct bhv_desc		*bdp,
+	struct xfs_mount_args	*args,
+	struct cred		*cr)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_mount)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_mount)(next, args, cr));
+}
+
+int
+vfs_parseargs(
+	struct bhv_desc		*bdp,
+	char			*s,
+	struct xfs_mount_args	*args,
+	int			f)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_parseargs)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_parseargs)(next, s, args, f));
+}
+
+int
+vfs_showargs(
+	struct bhv_desc		*bdp,
+	struct seq_file		*m)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_showargs)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_showargs)(next, m));
+}
+
+int
+vfs_unmount(
+	struct bhv_desc		*bdp,
+	int			fl,
+	struct cred		*cr)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_unmount)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_unmount)(next, fl, cr));
+}
+
+int
+vfs_mntupdate(
+	struct bhv_desc		*bdp,
+	int			*fl,
+	struct xfs_mount_args	*args)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_mntupdate)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_mntupdate)(next, fl, args));
+}
+
+int
+vfs_root(
+	struct bhv_desc		*bdp,
+	struct vnode		**vpp)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_root)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_root)(next, vpp));
+}
+
+int
+vfs_statvfs(
+	struct bhv_desc		*bdp,
+	xfs_statfs_t		*sp,
+	struct vnode		*vp)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_statvfs)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_statvfs)(next, sp, vp));
+}
+
+int
+vfs_sync(
+	struct bhv_desc		*bdp,
+	int			fl,
+	struct cred		*cr)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_sync)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_sync)(next, fl, cr));
+}
+
+int
+vfs_vget(
+	struct bhv_desc		*bdp,
+	struct vnode		**vpp,
+	struct fid		*fidp)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_vget)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_vget)(next, vpp, fidp));
+}
+
+int
+vfs_dmapiops(
+	struct bhv_desc		*bdp,
+	caddr_t			addr)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_dmapiops)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_dmapiops)(next, addr));
+}
+
+int
+vfs_quotactl(
+	struct bhv_desc		*bdp,
+	int			cmd,
+	int			id,
+	caddr_t			addr)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_quotactl)
+		next = BHV_NEXT(next);
+	return ((*bhvtovfsops(next)->vfs_quotactl)(next, cmd, id, addr));
+}
+
+void
+vfs_init_vnode(
+	struct bhv_desc		*bdp,
+	struct vnode		*vp,
+	struct bhv_desc		*bp,
+	int			unlock)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_init_vnode)
+		next = BHV_NEXT(next);
+	((*bhvtovfsops(next)->vfs_init_vnode)(next, vp, bp, unlock));
+}
+
+void
+vfs_force_shutdown(
+	struct bhv_desc		*bdp,
+	int			fl,
+	char			*file,
+	int			line)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_force_shutdown)
+		next = BHV_NEXT(next);
+	((*bhvtovfsops(next)->vfs_force_shutdown)(next, fl, file, line));
+}
+
+void
+vfs_freeze(
+	struct bhv_desc		*bdp)
+{
+	struct bhv_desc		*next = bdp;
+
+	ASSERT(next);
+	while (! (bhvtovfsops(next))->vfs_freeze)
+		next = BHV_NEXT(next);
+	((*bhvtovfsops(next)->vfs_freeze)(next));
+}
+
+vfs_t *
+vfs_allocate( void )
+{
+	struct vfs		*vfsp;
+
+	vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP);
+	bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
+	INIT_LIST_HEAD(&vfsp->vfs_sync_list);
+	spin_lock_init(&vfsp->vfs_sync_lock);
+	init_waitqueue_head(&vfsp->vfs_wait_sync_task);
+	init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
+	return vfsp;
+}
+
+void
+vfs_deallocate(
+	struct vfs		*vfsp)
+{
+	bhv_head_destroy(VFS_BHVHEAD(vfsp));
+	kmem_free(vfsp, sizeof(vfs_t));
+}
+
+void
+vfs_insertops(
+	struct vfs		*vfsp,
+	struct bhv_vfsops	*vfsops)
+{
+	struct bhv_desc		*bdp;
+
+	bdp = kmem_alloc(sizeof(struct bhv_desc), KM_SLEEP);
+	bhv_desc_init(bdp, NULL, vfsp, vfsops);
+	bhv_insert(&vfsp->vfs_bh, bdp);
+}
+
+void
+vfs_insertbhv(
+	struct vfs		*vfsp,
+	struct bhv_desc		*bdp,
+	struct vfsops		*vfsops,
+	void			*mount)
+{
+	bhv_desc_init(bdp, mount, vfsp, vfsops);
+	bhv_insert_initial(&vfsp->vfs_bh, bdp);
+}
+
+void
+bhv_remove_vfsops(
+	struct vfs		*vfsp,
+	int			pos)
+{
+	struct bhv_desc		*bhv;
+
+	bhv = bhv_lookup_range(&vfsp->vfs_bh, pos, pos);
+	if (!bhv)
+		return;
+	bhv_remove(&vfsp->vfs_bh, bhv);
+	kmem_free(bhv, sizeof(*bhv));
+}
+
+void
+bhv_remove_all_vfsops(
+	struct vfs		*vfsp,
+	int			freebase)
+{
+	struct xfs_mount	*mp;
+
+	bhv_remove_vfsops(vfsp, VFS_POSITION_QM);
+	bhv_remove_vfsops(vfsp, VFS_POSITION_DM);
+	if (!freebase)
+		return;
+	mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops));
+	VFS_REMOVEBHV(vfsp, &mp->m_bhv);
+	xfs_mount_free(mp, 0);
+}
+
+void
+bhv_insert_all_vfsops(
+	struct vfs		*vfsp)
+{
+	struct xfs_mount	*mp;
+
+	mp = xfs_mount_init();
+	vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
+	vfs_insertdmapi(vfsp);
+	vfs_insertquota(vfsp);
+}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
new file mode 100644
index 0000000..7649399
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_VFS_H__
+#define __XFS_VFS_H__
+
+#include <linux/vfs.h>
+#include "xfs_fs.h"
+
+struct fid;
+struct vfs;
+struct cred;
+struct vnode;
+struct kstatfs;
+struct seq_file;
+struct super_block;
+struct xfs_mount_args;
+
+typedef struct kstatfs xfs_statfs_t;
+
+typedef struct vfs_sync_work {
+	struct list_head	w_list;
+	struct vfs		*w_vfs;
+	void			*w_data;	/* syncer routine argument */
+	void			(*w_syncer)(struct vfs *, void *);
+} vfs_sync_work_t;
+
+typedef struct vfs {
+	u_int			vfs_flag;	/* flags */
+	xfs_fsid_t		vfs_fsid;	/* file system ID */
+	xfs_fsid_t		*vfs_altfsid;	/* An ID fixed for life of FS */
+	bhv_head_t		vfs_bh;		/* head of vfs behavior chain */
+	struct super_block	*vfs_super;	/* generic superblock pointer */
+	struct task_struct	*vfs_sync_task;	/* generalised sync thread */
+	vfs_sync_work_t		vfs_sync_work;	/* work item for VFS_SYNC */
+	struct list_head	vfs_sync_list;	/* sync thread work item list */
+	spinlock_t		vfs_sync_lock;	/* work item list lock */
+	int 			vfs_sync_seq;	/* sync thread generation no. */
+	wait_queue_head_t	vfs_wait_single_sync_task;
+	wait_queue_head_t	vfs_wait_sync_task;
+} vfs_t;
+
+#define vfs_fbhv		vfs_bh.bh_first	/* 1st on vfs behavior chain */
+
+#define bhvtovfs(bdp)		( (struct vfs *)BHV_VOBJ(bdp) )
+#define bhvtovfsops(bdp)	( (struct vfsops *)BHV_OPS(bdp) )
+#define VFS_BHVHEAD(vfs)	( &(vfs)->vfs_bh )
+#define VFS_REMOVEBHV(vfs, bdp)	( bhv_remove(VFS_BHVHEAD(vfs), bdp) )
+
+#define VFS_POSITION_BASE	BHV_POSITION_BASE	/* chain bottom */
+#define VFS_POSITION_TOP	BHV_POSITION_TOP	/* chain top */
+#define VFS_POSITION_INVALID	BHV_POSITION_INVALID	/* invalid pos. num */
+
+typedef enum {
+	VFS_BHV_UNKNOWN,	/* not specified */
+	VFS_BHV_XFS,		/* xfs */
+	VFS_BHV_DM,		/* data migration */
+	VFS_BHV_QM,		/* quota manager */
+	VFS_BHV_IO,		/* IO path */
+	VFS_BHV_END		/* housekeeping end-of-range */
+} vfs_bhv_t;
+
+#define VFS_POSITION_XFS	(BHV_POSITION_BASE)
+#define VFS_POSITION_DM		(VFS_POSITION_BASE+10)
+#define VFS_POSITION_QM		(VFS_POSITION_BASE+20)
+#define VFS_POSITION_IO		(VFS_POSITION_BASE+30)
+
+#define VFS_RDONLY		0x0001	/* read-only vfs */
+#define VFS_GRPID		0x0002	/* group-ID assigned from directory */
+#define VFS_DMI			0x0004	/* filesystem has the DMI enabled */
+#define VFS_UMOUNT		0x0008	/* unmount in progress */
+#define VFS_END			0x0008	/* max flag */
+
+#define SYNC_ATTR		0x0001	/* sync attributes */
+#define SYNC_CLOSE		0x0002	/* close file system down */
+#define SYNC_DELWRI		0x0004	/* look at delayed writes */
+#define SYNC_WAIT		0x0008	/* wait for i/o to complete */
+#define SYNC_BDFLUSH		0x0010	/* BDFLUSH is calling -- don't block */
+#define SYNC_FSDATA		0x0020	/* flush fs data (e.g. superblocks) */
+#define SYNC_REFCACHE		0x0040  /* prune some of the nfs ref cache */
+#define SYNC_REMOUNT		0x0080  /* remount readonly, no dummy LRs */
+
+typedef int	(*vfs_mount_t)(bhv_desc_t *,
+				struct xfs_mount_args *, struct cred *);
+typedef int	(*vfs_parseargs_t)(bhv_desc_t *, char *,
+				struct xfs_mount_args *, int);
+typedef	int	(*vfs_showargs_t)(bhv_desc_t *, struct seq_file *);
+typedef int	(*vfs_unmount_t)(bhv_desc_t *, int, struct cred *);
+typedef int	(*vfs_mntupdate_t)(bhv_desc_t *, int *,
+				struct xfs_mount_args *);
+typedef int	(*vfs_root_t)(bhv_desc_t *, struct vnode **);
+typedef int	(*vfs_statvfs_t)(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+typedef int	(*vfs_sync_t)(bhv_desc_t *, int, struct cred *);
+typedef int	(*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *);
+typedef int	(*vfs_dmapiops_t)(bhv_desc_t *, caddr_t);
+typedef int	(*vfs_quotactl_t)(bhv_desc_t *, int, int, caddr_t);
+typedef void	(*vfs_init_vnode_t)(bhv_desc_t *,
+				struct vnode *, bhv_desc_t *, int);
+typedef void	(*vfs_force_shutdown_t)(bhv_desc_t *, int, char *, int);
+typedef void	(*vfs_freeze_t)(bhv_desc_t *);
+
+typedef struct vfsops {
+	bhv_position_t		vf_position;	/* behavior chain position */
+	vfs_mount_t		vfs_mount;	/* mount file system */
+	vfs_parseargs_t		vfs_parseargs;	/* parse mount options */
+	vfs_showargs_t		vfs_showargs;	/* unparse mount options */
+	vfs_unmount_t		vfs_unmount;	/* unmount file system */
+	vfs_mntupdate_t		vfs_mntupdate;	/* update file system options */
+	vfs_root_t		vfs_root;	/* get root vnode */
+	vfs_statvfs_t		vfs_statvfs;	/* file system statistics */
+	vfs_sync_t		vfs_sync;	/* flush files */
+	vfs_vget_t		vfs_vget;	/* get vnode from fid */
+	vfs_dmapiops_t		vfs_dmapiops;	/* data migration */
+	vfs_quotactl_t		vfs_quotactl;	/* disk quota */
+	vfs_init_vnode_t	vfs_init_vnode;	/* initialize a new vnode */
+	vfs_force_shutdown_t	vfs_force_shutdown;	/* crash and burn */
+	vfs_freeze_t		vfs_freeze;	/* freeze fs for snapshot */
+} vfsops_t;
+
+/*
+ * VFS's.  Operates on vfs structure pointers (starts at bhv head).
+ */
+#define VHEAD(v)			((v)->vfs_fbhv)
+#define VFS_MOUNT(v, ma,cr, rv)		((rv) = vfs_mount(VHEAD(v), ma,cr))
+#define VFS_PARSEARGS(v, o,ma,f, rv)	((rv) = vfs_parseargs(VHEAD(v), o,ma,f))
+#define VFS_SHOWARGS(v, m, rv)		((rv) = vfs_showargs(VHEAD(v), m))
+#define VFS_UNMOUNT(v, f, cr, rv)	((rv) = vfs_unmount(VHEAD(v), f,cr))
+#define VFS_MNTUPDATE(v, fl, args, rv)	((rv) = vfs_mntupdate(VHEAD(v), fl, args))
+#define VFS_ROOT(v, vpp, rv)		((rv) = vfs_root(VHEAD(v), vpp))
+#define VFS_STATVFS(v, sp,vp, rv)	((rv) = vfs_statvfs(VHEAD(v), sp,vp))
+#define VFS_SYNC(v, flag,cr, rv)	((rv) = vfs_sync(VHEAD(v), flag,cr))
+#define VFS_VGET(v, vpp,fidp, rv)	((rv) = vfs_vget(VHEAD(v), vpp,fidp))
+#define VFS_DMAPIOPS(v, p, rv)		((rv) = vfs_dmapiops(VHEAD(v), p))
+#define VFS_QUOTACTL(v, c,id,p, rv)	((rv) = vfs_quotactl(VHEAD(v), c,id,p))
+#define VFS_INIT_VNODE(v, vp,b,ul)	( vfs_init_vnode(VHEAD(v), vp,b,ul) )
+#define VFS_FORCE_SHUTDOWN(v, fl,f,l)	( vfs_force_shutdown(VHEAD(v), fl,f,l) )
+#define VFS_FREEZE(v)			( vfs_freeze(VHEAD(v)) )
+
+/*
+ * PVFS's.  Operates on behavior descriptor pointers.
+ */
+#define PVFS_MOUNT(b, ma,cr, rv)	((rv) = vfs_mount(b, ma,cr))
+#define PVFS_PARSEARGS(b, o,ma,f, rv)	((rv) = vfs_parseargs(b, o,ma,f))
+#define PVFS_SHOWARGS(b, m, rv)		((rv) = vfs_showargs(b, m))
+#define PVFS_UNMOUNT(b, f,cr, rv)	((rv) = vfs_unmount(b, f,cr))
+#define PVFS_MNTUPDATE(b, fl, args, rv)	((rv) = vfs_mntupdate(b, fl, args))
+#define PVFS_ROOT(b, vpp, rv)		((rv) = vfs_root(b, vpp))
+#define PVFS_STATVFS(b, sp,vp, rv)	((rv) = vfs_statvfs(b, sp,vp))
+#define PVFS_SYNC(b, flag,cr, rv)	((rv) = vfs_sync(b, flag,cr))
+#define PVFS_VGET(b, vpp,fidp, rv)	((rv) = vfs_vget(b, vpp,fidp))
+#define PVFS_DMAPIOPS(b, p, rv)		((rv) = vfs_dmapiops(b, p))
+#define PVFS_QUOTACTL(b, c,id,p, rv)	((rv) = vfs_quotactl(b, c,id,p))
+#define PVFS_INIT_VNODE(b, vp,b2,ul)	( vfs_init_vnode(b, vp,b2,ul) )
+#define PVFS_FORCE_SHUTDOWN(b, fl,f,l)	( vfs_force_shutdown(b, fl,f,l) )
+#define PVFS_FREEZE(b)			( vfs_freeze(b) )
+
+extern int vfs_mount(bhv_desc_t *, struct xfs_mount_args *, struct cred *);
+extern int vfs_parseargs(bhv_desc_t *, char *, struct xfs_mount_args *, int);
+extern int vfs_showargs(bhv_desc_t *, struct seq_file *);
+extern int vfs_unmount(bhv_desc_t *, int, struct cred *);
+extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *);
+extern int vfs_root(bhv_desc_t *, struct vnode **);
+extern int vfs_statvfs(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+extern int vfs_sync(bhv_desc_t *, int, struct cred *);
+extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *);
+extern int vfs_dmapiops(bhv_desc_t *, caddr_t);
+extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t);
+extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int);
+extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int);
+extern void vfs_freeze(bhv_desc_t *);
+
+typedef struct bhv_vfsops {
+	struct vfsops		bhv_common;
+	void *			bhv_custom;
+} bhv_vfsops_t;
+
+#define vfs_bhv_lookup(v, id)	( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) )
+#define vfs_bhv_custom(b)	( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom )
+#define vfs_bhv_set_custom(b,o)	( (b)->bhv_custom = (void *)(o))
+#define vfs_bhv_clr_custom(b)	( (b)->bhv_custom = NULL )
+
+extern vfs_t *vfs_allocate(void);
+extern void vfs_deallocate(vfs_t *);
+extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
+extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
+
+extern void bhv_insert_all_vfsops(struct vfs *);
+extern void bhv_remove_all_vfsops(struct vfs *, int);
+extern void bhv_remove_vfsops(struct vfs *, int);
+
+#define fs_frozen(vfsp)		((vfsp)->vfs_super->s_frozen)
+#define fs_check_frozen(vfsp, level) \
+	vfs_check_frozen(vfsp->vfs_super, level);
+
+#endif	/* __XFS_VFS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
new file mode 100644
index 0000000..849c61c
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+
+uint64_t vn_generation;		/* vnode generation number */
+DEFINE_SPINLOCK(vnumber_lock);
+
+/*
+ * Dedicated vnode inactive/reclaim sync semaphores.
+ * Prime number of hash buckets since address is used as the key.
+ */
+#define NVSYNC                  37
+#define vptosync(v)             (&vsync[((unsigned long)v) % NVSYNC])
+sv_t vsync[NVSYNC];
+
+/*
+ * Translate stat(2) file types to vnode types and vice versa.
+ * Aware of numeric order of S_IFMT and vnode type values.
+ */
+enum vtype iftovt_tab[] = {
+	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
+	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
+};
+
+u_short vttoif_tab[] = {
+	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
+};
+
+
+void
+vn_init(void)
+{
+	register sv_t *svp;
+	register int i;
+
+	for (svp = vsync, i = 0; i < NVSYNC; i++, svp++)
+		init_sv(svp, SV_DEFAULT, "vsy", i);
+}
+
+/*
+ * Clean a vnode of filesystem-specific data and prepare it for reuse.
+ */
+STATIC int
+vn_reclaim(
+	struct vnode	*vp)
+{
+	int		error;
+
+	XFS_STATS_INC(vn_reclaim);
+	vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);
+
+	/*
+	 * Only make the VOP_RECLAIM call if there are behaviors
+	 * to call.
+	 */
+	if (vp->v_fbhv) {
+		VOP_RECLAIM(vp, error);
+		if (error)
+			return -error;
+	}
+	ASSERT(vp->v_fbhv == NULL);
+
+	VN_LOCK(vp);
+	vp->v_flag &= (VRECLM|VWAIT);
+	VN_UNLOCK(vp, 0);
+
+	vp->v_type = VNON;
+	vp->v_fbhv = NULL;
+
+#ifdef XFS_VNODE_TRACE
+	ktrace_free(vp->v_trace);
+	vp->v_trace = NULL;
+#endif
+
+	return 0;
+}
+
+STATIC void
+vn_wakeup(
+	struct vnode	*vp)
+{
+	VN_LOCK(vp);
+	if (vp->v_flag & VWAIT)
+		sv_broadcast(vptosync(vp));
+	vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
+	VN_UNLOCK(vp, 0);
+}
+
+int
+vn_wait(
+	struct vnode	*vp)
+{
+	VN_LOCK(vp);
+	if (vp->v_flag & (VINACT | VRECLM)) {
+		vp->v_flag |= VWAIT;
+		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
+		return 1;
+	}
+	VN_UNLOCK(vp, 0);
+	return 0;
+}
+
+struct vnode *
+vn_initialize(
+	struct inode	*inode)
+{
+	struct vnode	*vp = LINVFS_GET_VP(inode);
+
+	XFS_STATS_INC(vn_active);
+	XFS_STATS_INC(vn_alloc);
+
+	vp->v_flag = VMODIFIED;
+	spinlock_init(&vp->v_lock, "v_lock");
+
+	spin_lock(&vnumber_lock);
+	if (!++vn_generation)	/* v_number shouldn't be zero */
+		vn_generation++;
+	vp->v_number = vn_generation;
+	spin_unlock(&vnumber_lock);
+
+	ASSERT(VN_CACHED(vp) == 0);
+
+	/* Initialize the first behavior and the behavior chain head. */
+	vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
+
+#ifdef	XFS_VNODE_TRACE
+	vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
+	printk("Allocated VNODE_TRACE at 0x%p\n", vp->v_trace);
+#endif	/* XFS_VNODE_TRACE */
+
+	vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);
+	return vp;
+}
+
+/*
+ * Get a reference on a vnode.
+ */
+vnode_t *
+vn_get(
+	struct vnode	*vp,
+	vmap_t		*vmap)
+{
+	struct inode	*inode;
+
+	XFS_STATS_INC(vn_get);
+	inode = LINVFS_GET_IP(vp);
+	if (inode->i_state & I_FREEING)
+		return NULL;
+
+	inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
+	if (!inode)	/* Inode not present */
+		return NULL;
+
+	vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
+
+	return vp;
+}
+
+/*
+ * Revalidate the Linux inode from the vattr.
+ * Note: i_size _not_ updated; we must hold the inode
+ * semaphore when doing that - callers responsibility.
+ */
+void
+vn_revalidate_core(
+	struct vnode	*vp,
+	vattr_t		*vap)
+{
+	struct inode	*inode = LINVFS_GET_IP(vp);
+
+	inode->i_mode	    = VTTOIF(vap->va_type) | vap->va_mode;
+	inode->i_nlink	    = vap->va_nlink;
+	inode->i_uid	    = vap->va_uid;
+	inode->i_gid	    = vap->va_gid;
+	inode->i_blocks	    = vap->va_nblocks;
+	inode->i_mtime	    = vap->va_mtime;
+	inode->i_ctime	    = vap->va_ctime;
+	inode->i_atime	    = vap->va_atime;
+	if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
+		inode->i_flags |= S_IMMUTABLE;
+	else
+		inode->i_flags &= ~S_IMMUTABLE;
+	if (vap->va_xflags & XFS_XFLAG_APPEND)
+		inode->i_flags |= S_APPEND;
+	else
+		inode->i_flags &= ~S_APPEND;
+	if (vap->va_xflags & XFS_XFLAG_SYNC)
+		inode->i_flags |= S_SYNC;
+	else
+		inode->i_flags &= ~S_SYNC;
+	if (vap->va_xflags & XFS_XFLAG_NOATIME)
+		inode->i_flags |= S_NOATIME;
+	else
+		inode->i_flags &= ~S_NOATIME;
+}
+
+/*
+ * Revalidate the Linux inode from the vnode.
+ */
+int
+vn_revalidate(
+	struct vnode	*vp)
+{
+	vattr_t		va;
+	int		error;
+
+	vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address);
+	ASSERT(vp->v_fbhv != NULL);
+
+	va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS;
+	VOP_GETATTR(vp, &va, 0, NULL, error);
+	if (!error) {
+		vn_revalidate_core(vp, &va);
+		VUNMODIFY(vp);
+	}
+	return -error;
+}
+
+/*
+ * purge a vnode from the cache
+ * At this point the vnode is guaranteed to have no references (vn_count == 0)
+ * The caller has to make sure that there are no ways someone could
+ * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
+ */
+void
+vn_purge(
+	struct vnode	*vp,
+	vmap_t		*vmap)
+{
+	vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);
+
+again:
+	/*
+	 * Check whether vp has already been reclaimed since our caller
+	 * sampled its version while holding a filesystem cache lock that
+	 * its VOP_RECLAIM function acquires.
+	 */
+	VN_LOCK(vp);
+	if (vp->v_number != vmap->v_number) {
+		VN_UNLOCK(vp, 0);
+		return;
+	}
+
+	/*
+	 * If vp is being reclaimed or inactivated, wait until it is inert,
+	 * then proceed.  Can't assume that vnode is actually reclaimed
+	 * just because the reclaimed flag is asserted -- a vn_alloc
+	 * reclaim can fail.
+	 */
+	if (vp->v_flag & (VINACT | VRECLM)) {
+		ASSERT(vn_count(vp) == 0);
+		vp->v_flag |= VWAIT;
+		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
+		goto again;
+	}
+
+	/*
+	 * Another process could have raced in and gotten this vnode...
+	 */
+	if (vn_count(vp) > 0) {
+		VN_UNLOCK(vp, 0);
+		return;
+	}
+
+	XFS_STATS_DEC(vn_active);
+	vp->v_flag |= VRECLM;
+	VN_UNLOCK(vp, 0);
+
+	/*
+	 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
+	 * vp's filesystem to flush and invalidate all cached resources.
+	 * When vn_reclaim returns, vp should have no private data,
+	 * either in a system cache or attached to v_data.
+	 */
+	if (vn_reclaim(vp) != 0)
+		panic("vn_purge: cannot reclaim");
+
+	/*
+	 * Wakeup anyone waiting for vp to be reclaimed.
+	 */
+	vn_wakeup(vp);
+}
+
+/*
+ * Add a reference to a referenced vnode.
+ */
+struct vnode *
+vn_hold(
+	struct vnode	*vp)
+{
+	struct inode	*inode;
+
+	XFS_STATS_INC(vn_hold);
+
+	VN_LOCK(vp);
+	inode = igrab(LINVFS_GET_IP(vp));
+	ASSERT(inode);
+	VN_UNLOCK(vp, 0);
+
+	return vp;
+}
+
+/*
+ *  Call VOP_INACTIVE on last reference.
+ */
+void
+vn_rele(
+	struct vnode	*vp)
+{
+	int		vcnt;
+	int		cache;
+
+	XFS_STATS_INC(vn_rele);
+
+	VN_LOCK(vp);
+
+	vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
+	vcnt = vn_count(vp);
+
+	/*
+	 * Since we always get called from put_inode we know
+	 * that i_count won't be decremented after we
+	 * return.
+	 */
+	if (!vcnt) {
+		/*
+		 * As soon as we turn this on, noone can find us in vn_get
+		 * until we turn off VINACT or VRECLM
+		 */
+		vp->v_flag |= VINACT;
+		VN_UNLOCK(vp, 0);
+
+		/*
+		 * Do not make the VOP_INACTIVE call if there
+		 * are no behaviors attached to the vnode to call.
+		 */
+		if (vp->v_fbhv)
+			VOP_INACTIVE(vp, NULL, cache);
+
+		VN_LOCK(vp);
+		if (vp->v_flag & VWAIT)
+			sv_broadcast(vptosync(vp));
+
+		vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
+	}
+
+	VN_UNLOCK(vp, 0);
+
+	vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
+}
+
+/*
+ * Finish the removal of a vnode.
+ */
+void
+vn_remove(
+	struct vnode	*vp)
+{
+	vmap_t		vmap;
+
+	/* Make sure we don't do this to the same vnode twice */
+	if (!(vp->v_fbhv))
+		return;
+
+	XFS_STATS_INC(vn_remove);
+	vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address);
+
+	/*
+	 * After the following purge the vnode
+	 * will no longer exist.
+	 */
+	VMAP(vp, vmap);
+	vn_purge(vp, &vmap);
+}
+
+
+#ifdef	XFS_VNODE_TRACE
+
+#define KTRACE_ENTER(vp, vk, s, line, ra)			\
+	ktrace_enter(	(vp)->v_trace,				\
+/*  0 */		(void *)(__psint_t)(vk),		\
+/*  1 */		(void *)(s),				\
+/*  2 */		(void *)(__psint_t) line,		\
+/*  3 */		(void *)(vn_count(vp)), \
+/*  4 */		(void *)(ra),				\
+/*  5 */		(void *)(__psunsigned_t)(vp)->v_flag,	\
+/*  6 */		(void *)(__psint_t)current_cpu(),	\
+/*  7 */		(void *)(__psint_t)current_pid(),	\
+/*  8 */		(void *)__return_address,		\
+/*  9 */		0, 0, 0, 0, 0, 0, 0)
+
+/*
+ * Vnode tracing code.
+ */
+void
+vn_trace_entry(vnode_t *vp, char *func, inst_t *ra)
+{
+	KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
+}
+
+void
+vn_trace_exit(vnode_t *vp, char *func, inst_t *ra)
+{
+	KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
+}
+
+void
+vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+	KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
+}
+
+void
+vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+	KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
+}
+
+void
+vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+	KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
+}
+#endif	/* XFS_VNODE_TRACE */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
new file mode 100644
index 0000000..da76c1f
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -0,0 +1,666 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ *
+ * Portions Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef __XFS_VNODE_H__
+#define __XFS_VNODE_H__
+
+struct uio;
+struct file;
+struct vattr;
+struct xfs_iomap;
+struct attrlist_cursor_kern;
+
+/*
+ * Vnode types.  VNON means no type.
+ */
+enum vtype	{ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VFIFO, VBAD, VSOCK };
+
+typedef xfs_ino_t vnumber_t;
+typedef struct dentry vname_t;
+typedef bhv_head_t vn_bhv_head_t;
+
+/*
+ * MP locking protocols:
+ *	v_flag, v_vfsp				VN_LOCK/VN_UNLOCK
+ *	v_type					read-only or fs-dependent
+ */
+typedef struct vnode {
+	__u32		v_flag;			/* vnode flags (see below) */
+	enum vtype	v_type;			/* vnode type */
+	struct vfs	*v_vfsp;		/* ptr to containing VFS */
+	vnumber_t	v_number;		/* in-core vnode number */
+	vn_bhv_head_t	v_bh;			/* behavior head */
+	spinlock_t	v_lock;			/* VN_LOCK/VN_UNLOCK */
+	struct inode	v_inode;		/* Linux inode */
+#ifdef XFS_VNODE_TRACE
+	struct ktrace	*v_trace;		/* trace header structure    */
+#endif
+} vnode_t;
+
+#define v_fbhv			v_bh.bh_first	       /* first behavior */
+#define v_fops			v_bh.bh_first->bd_ops  /* first behavior ops */
+
+#define VNODE_POSITION_BASE	BHV_POSITION_BASE	/* chain bottom */
+#define VNODE_POSITION_TOP	BHV_POSITION_TOP	/* chain top */
+#define VNODE_POSITION_INVALID	BHV_POSITION_INVALID	/* invalid pos. num */
+
+typedef enum {
+	VN_BHV_UNKNOWN,		/* not specified */
+	VN_BHV_XFS,		/* xfs */
+	VN_BHV_DM,		/* data migration */
+	VN_BHV_QM,		/* quota manager */
+	VN_BHV_IO,		/* IO path */
+	VN_BHV_END		/* housekeeping end-of-range */
+} vn_bhv_t;
+
+#define VNODE_POSITION_XFS	(VNODE_POSITION_BASE)
+#define VNODE_POSITION_DM	(VNODE_POSITION_BASE+10)
+#define VNODE_POSITION_QM	(VNODE_POSITION_BASE+20)
+#define VNODE_POSITION_IO	(VNODE_POSITION_BASE+30)
+
+/*
+ * Macros for dealing with the behavior descriptor inside of the vnode.
+ */
+#define BHV_TO_VNODE(bdp)	((vnode_t *)BHV_VOBJ(bdp))
+#define BHV_TO_VNODE_NULL(bdp)	((vnode_t *)BHV_VOBJNULL(bdp))
+
+#define VN_BHV_HEAD(vp)			((bhv_head_t *)(&((vp)->v_bh)))
+#define vn_bhv_head_init(bhp,name)	bhv_head_init(bhp,name)
+#define vn_bhv_remove(bhp,bdp)		bhv_remove(bhp,bdp)
+#define vn_bhv_lookup(bhp,ops)		bhv_lookup(bhp,ops)
+#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops)
+
+/*
+ * Vnode to Linux inode mapping.
+ */
+#define LINVFS_GET_VP(inode)	((vnode_t *)list_entry(inode, vnode_t, v_inode))
+#define LINVFS_GET_IP(vp)	(&(vp)->v_inode)
+
+/*
+ * Convert between vnode types and inode formats (since POSIX.1
+ * defines mode word of stat structure in terms of inode formats).
+ */
+extern enum vtype	iftovt_tab[];
+extern u_short		vttoif_tab[];
+#define IFTOVT(mode)	(iftovt_tab[((mode) & S_IFMT) >> 12])
+#define VTTOIF(indx)	(vttoif_tab[(int)(indx)])
+#define MAKEIMODE(indx, mode)	(int)(VTTOIF(indx) | (mode))
+
+
+/*
+ * Vnode flags.
+ */
+#define VINACT		       0x1	/* vnode is being inactivated	*/
+#define VRECLM		       0x2	/* vnode is being reclaimed	*/
+#define VWAIT		       0x4	/* waiting for VINACT/VRECLM to end */
+#define VMODIFIED	       0x8	/* XFS inode state possibly differs */
+					/* to the Linux inode state.	*/
+
+/*
+ * Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter.
+ */
+typedef enum vrwlock {
+	VRWLOCK_NONE,
+	VRWLOCK_READ,
+	VRWLOCK_WRITE,
+	VRWLOCK_WRITE_DIRECT,
+	VRWLOCK_TRY_READ,
+	VRWLOCK_TRY_WRITE
+} vrwlock_t;
+
+/*
+ * Return values for VOP_INACTIVE.  A return value of
+ * VN_INACTIVE_NOCACHE implies that the file system behavior
+ * has disassociated its state and bhv_desc_t from the vnode.
+ */
+#define	VN_INACTIVE_CACHE	0
+#define	VN_INACTIVE_NOCACHE	1
+
+/*
+ * Values for the cmd code given to VOP_VNODE_CHANGE.
+ */
+typedef enum vchange {
+	VCHANGE_FLAGS_FRLOCKS		= 0,
+	VCHANGE_FLAGS_ENF_LOCKING	= 1,
+	VCHANGE_FLAGS_TRUNCATED		= 2,
+	VCHANGE_FLAGS_PAGE_DIRTY	= 3,
+	VCHANGE_FLAGS_IOEXCL_COUNT	= 4
+} vchange_t;
+
+
+typedef int	(*vop_open_t)(bhv_desc_t *, struct cred *);
+typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
+				const struct iovec *, unsigned int,
+				loff_t *, int, struct cred *);
+typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct kiocb *,
+				const struct iovec *, unsigned int,
+				loff_t *, int, struct cred *);
+typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
+				loff_t *, int, size_t, read_actor_t,
+				void *, struct cred *);
+typedef int	(*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,
+				int, unsigned int, void __user *);
+typedef int	(*vop_getattr_t)(bhv_desc_t *, struct vattr *, int,
+				struct cred *);
+typedef int	(*vop_setattr_t)(bhv_desc_t *, struct vattr *, int,
+				struct cred *);
+typedef int	(*vop_access_t)(bhv_desc_t *, int, struct cred *);
+typedef int	(*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **,
+				int, vnode_t *, struct cred *);
+typedef int	(*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *,
+				vnode_t **, struct cred *);
+typedef int	(*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *);
+typedef int	(*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *,
+				struct cred *);
+typedef int	(*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *,
+				struct cred *);
+typedef int	(*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *,
+				vnode_t **, struct cred *);
+typedef int	(*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *);
+typedef int	(*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *,
+				int *);
+typedef int	(*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *,
+				char *, vnode_t **, struct cred *);
+typedef int	(*vop_readlink_t)(bhv_desc_t *, struct uio *, int,
+				struct cred *);
+typedef int	(*vop_fsync_t)(bhv_desc_t *, int, struct cred *,
+				xfs_off_t, xfs_off_t);
+typedef int	(*vop_inactive_t)(bhv_desc_t *, struct cred *);
+typedef int	(*vop_fid2_t)(bhv_desc_t *, struct fid *);
+typedef int	(*vop_release_t)(bhv_desc_t *);
+typedef int	(*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
+typedef void	(*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
+typedef int	(*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int,
+				struct xfs_iomap *, int *);
+typedef int	(*vop_reclaim_t)(bhv_desc_t *);
+typedef int	(*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int,
+				struct cred *);
+typedef	int	(*vop_attr_set_t)(bhv_desc_t *, char *, char *, int, int,
+				struct cred *);
+typedef	int	(*vop_attr_remove_t)(bhv_desc_t *, char *, int, struct cred *);
+typedef	int	(*vop_attr_list_t)(bhv_desc_t *, char *, int, int,
+				struct attrlist_cursor_kern *, struct cred *);
+typedef void	(*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int);
+typedef void	(*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t);
+typedef void	(*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+typedef void	(*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+typedef int	(*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t,
+				uint64_t, int);
+typedef int	(*vop_iflush_t)(bhv_desc_t *, int);
+
+
+typedef struct vnodeops {
+	bhv_position_t  vn_position;    /* position within behavior chain */
+	vop_open_t		vop_open;
+	vop_read_t		vop_read;
+	vop_write_t		vop_write;
+	vop_sendfile_t		vop_sendfile;
+	vop_ioctl_t		vop_ioctl;
+	vop_getattr_t		vop_getattr;
+	vop_setattr_t		vop_setattr;
+	vop_access_t		vop_access;
+	vop_lookup_t		vop_lookup;
+	vop_create_t		vop_create;
+	vop_remove_t		vop_remove;
+	vop_link_t		vop_link;
+	vop_rename_t		vop_rename;
+	vop_mkdir_t		vop_mkdir;
+	vop_rmdir_t		vop_rmdir;
+	vop_readdir_t		vop_readdir;
+	vop_symlink_t		vop_symlink;
+	vop_readlink_t		vop_readlink;
+	vop_fsync_t		vop_fsync;
+	vop_inactive_t		vop_inactive;
+	vop_fid2_t		vop_fid2;
+	vop_rwlock_t		vop_rwlock;
+	vop_rwunlock_t		vop_rwunlock;
+	vop_bmap_t		vop_bmap;
+	vop_reclaim_t		vop_reclaim;
+	vop_attr_get_t		vop_attr_get;
+	vop_attr_set_t		vop_attr_set;
+	vop_attr_remove_t	vop_attr_remove;
+	vop_attr_list_t		vop_attr_list;
+	vop_link_removed_t	vop_link_removed;
+	vop_vnode_change_t	vop_vnode_change;
+	vop_ptossvp_t		vop_tosspages;
+	vop_pflushinvalvp_t	vop_flushinval_pages;
+	vop_pflushvp_t		vop_flush_pages;
+	vop_release_t		vop_release;
+	vop_iflush_t		vop_iflush;
+} vnodeops_t;
+
+/*
+ * VOP's.
+ */
+#define _VOP_(op, vp)	(*((vnodeops_t *)(vp)->v_fops)->op)
+
+#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv)			\
+	rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
+#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv)		\
+	rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
+#define VOP_SENDFILE(vp,f,off,ioflags,cnt,act,targ,cr,rv)		\
+	rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,off,ioflags,cnt,act,targ,cr)
+#define VOP_BMAP(vp,of,sz,rw,b,n,rv)					\
+	rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
+#define VOP_OPEN(vp, cr, rv)						\
+	rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
+#define VOP_GETATTR(vp, vap, f, cr, rv)					\
+	rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr)
+#define	VOP_SETATTR(vp, vap, f, cr, rv)					\
+	rv = _VOP_(vop_setattr, vp)((vp)->v_fbhv, vap, f, cr)
+#define	VOP_ACCESS(vp, mode, cr, rv)					\
+	rv = _VOP_(vop_access, vp)((vp)->v_fbhv, mode, cr)
+#define	VOP_LOOKUP(vp,d,vpp,f,rdir,cr,rv)				\
+	rv = _VOP_(vop_lookup, vp)((vp)->v_fbhv,d,vpp,f,rdir,cr)
+#define VOP_CREATE(dvp,d,vap,vpp,cr,rv)					\
+	rv = _VOP_(vop_create, dvp)((dvp)->v_fbhv,d,vap,vpp,cr)
+#define VOP_REMOVE(dvp,d,cr,rv)						\
+	rv = _VOP_(vop_remove, dvp)((dvp)->v_fbhv,d,cr)
+#define	VOP_LINK(tdvp,fvp,d,cr,rv)					\
+	rv = _VOP_(vop_link, tdvp)((tdvp)->v_fbhv,fvp,d,cr)
+#define	VOP_RENAME(fvp,fnm,tdvp,tnm,cr,rv)				\
+	rv = _VOP_(vop_rename, fvp)((fvp)->v_fbhv,fnm,tdvp,tnm,cr)
+#define	VOP_MKDIR(dp,d,vap,vpp,cr,rv)					\
+	rv = _VOP_(vop_mkdir, dp)((dp)->v_fbhv,d,vap,vpp,cr)
+#define	VOP_RMDIR(dp,d,cr,rv)	 					\
+	rv = _VOP_(vop_rmdir, dp)((dp)->v_fbhv,d,cr)
+#define	VOP_READDIR(vp,uiop,cr,eofp,rv)					\
+	rv = _VOP_(vop_readdir, vp)((vp)->v_fbhv,uiop,cr,eofp)
+#define	VOP_SYMLINK(dvp,d,vap,tnm,vpp,cr,rv)				\
+	rv = _VOP_(vop_symlink, dvp) ((dvp)->v_fbhv,d,vap,tnm,vpp,cr)
+#define	VOP_READLINK(vp,uiop,fl,cr,rv)					\
+	rv = _VOP_(vop_readlink, vp)((vp)->v_fbhv,uiop,fl,cr)
+#define	VOP_FSYNC(vp,f,cr,b,e,rv)					\
+	rv = _VOP_(vop_fsync, vp)((vp)->v_fbhv,f,cr,b,e)
+#define VOP_INACTIVE(vp, cr, rv)					\
+	rv = _VOP_(vop_inactive, vp)((vp)->v_fbhv, cr)
+#define VOP_RELEASE(vp, rv)						\
+	rv = _VOP_(vop_release, vp)((vp)->v_fbhv)
+#define VOP_FID2(vp, fidp, rv)						\
+	rv = _VOP_(vop_fid2, vp)((vp)->v_fbhv, fidp)
+#define VOP_RWLOCK(vp,i)						\
+	(void)_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
+#define VOP_RWLOCK_TRY(vp,i)						\
+	_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
+#define VOP_RWUNLOCK(vp,i)						\
+	(void)_VOP_(vop_rwunlock, vp)((vp)->v_fbhv, i)
+#define VOP_FRLOCK(vp,c,fl,flags,offset,fr,rv)				\
+	rv = _VOP_(vop_frlock, vp)((vp)->v_fbhv,c,fl,flags,offset,fr)
+#define VOP_RECLAIM(vp, rv)						\
+	rv = _VOP_(vop_reclaim, vp)((vp)->v_fbhv)
+#define VOP_ATTR_GET(vp, name, val, vallenp, fl, cred, rv)		\
+	rv = _VOP_(vop_attr_get, vp)((vp)->v_fbhv,name,val,vallenp,fl,cred)
+#define	VOP_ATTR_SET(vp, name, val, vallen, fl, cred, rv)		\
+	rv = _VOP_(vop_attr_set, vp)((vp)->v_fbhv,name,val,vallen,fl,cred)
+#define	VOP_ATTR_REMOVE(vp, name, flags, cred, rv)			\
+	rv = _VOP_(vop_attr_remove, vp)((vp)->v_fbhv,name,flags,cred)
+#define	VOP_ATTR_LIST(vp, buf, buflen, fl, cursor, cred, rv)		\
+	rv = _VOP_(vop_attr_list, vp)((vp)->v_fbhv,buf,buflen,fl,cursor,cred)
+#define VOP_LINK_REMOVED(vp, dvp, linkzero)				\
+	(void)_VOP_(vop_link_removed, vp)((vp)->v_fbhv, dvp, linkzero)
+#define VOP_VNODE_CHANGE(vp, cmd, val)					\
+	(void)_VOP_(vop_vnode_change, vp)((vp)->v_fbhv,cmd,val)
+/*
+ * These are page cache functions that now go thru VOPs.
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_TOSS_PAGES(vp, first, last, fiopt)				\
+	_VOP_(vop_tosspages, vp)((vp)->v_fbhv,first, last, fiopt)
+/*
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_FLUSHINVAL_PAGES(vp, first, last, fiopt)			\
+	_VOP_(vop_flushinval_pages, vp)((vp)->v_fbhv,first,last,fiopt)
+/*
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_FLUSH_PAGES(vp, first, last, flags, fiopt, rv)		\
+	rv = _VOP_(vop_flush_pages, vp)((vp)->v_fbhv,first,last,flags,fiopt)
+#define VOP_IOCTL(vp, inode, filp, fl, cmd, arg, rv)			\
+	rv = _VOP_(vop_ioctl, vp)((vp)->v_fbhv,inode,filp,fl,cmd,arg)
+#define VOP_IFLUSH(vp, flags, rv)					\
+	rv = _VOP_(vop_iflush, vp)((vp)->v_fbhv, flags)
+
+/*
+ * Flags for read/write calls - same values as IRIX
+ */
+#define IO_ISAIO	0x00001		/* don't wait for completion */
+#define IO_ISDIRECT	0x00004		/* bypass page cache */
+#define IO_INVIS	0x00020		/* don't update inode timestamps */
+
+/*
+ * Flags for VOP_IFLUSH call
+ */
+#define FLUSH_SYNC		1	/* wait for flush to complete	*/
+#define FLUSH_INODE		2	/* flush the inode itself	*/
+#define FLUSH_LOG		4	/* force the last log entry for
+					 * this inode out to disk	*/
+
+/*
+ * Flush/Invalidate options for VOP_TOSS_PAGES, VOP_FLUSHINVAL_PAGES and
+ *	VOP_FLUSH_PAGES.
+ */
+#define FI_NONE			0	/* none */
+#define FI_REMAPF		1	/* Do a remapf prior to the operation */
+#define FI_REMAPF_LOCKED	2	/* Do a remapf prior to the operation.
+					   Prevent VM access to the pages until
+					   the operation completes. */
+
+/*
+ * Vnode attributes.  va_mask indicates those attributes the caller
+ * wants to set or extract.
+ */
+typedef struct vattr {
+	int		va_mask;	/* bit-mask of attributes present */
+	enum vtype	va_type;	/* vnode type (for create) */
+	mode_t		va_mode;	/* file access mode and type */
+	nlink_t		va_nlink;	/* number of references to file */
+	uid_t		va_uid;		/* owner user id */
+	gid_t		va_gid;		/* owner group id */
+	xfs_ino_t	va_nodeid;	/* file id */
+	xfs_off_t	va_size;	/* file size in bytes */
+	u_long		va_blocksize;	/* blocksize preferred for i/o */
+	struct timespec	va_atime;	/* time of last access */
+	struct timespec	va_mtime;	/* time of last modification */
+	struct timespec	va_ctime;	/* time file changed */
+	u_int		va_gen;		/* generation number of file */
+	xfs_dev_t	va_rdev;	/* device the special file represents */
+	__int64_t	va_nblocks;	/* number of blocks allocated */
+	u_long		va_xflags;	/* random extended file flags */
+	u_long		va_extsize;	/* file extent size */
+	u_long		va_nextents;	/* number of extents in file */
+	u_long		va_anextents;	/* number of attr extents in file */
+	int		va_projid;	/* project id */
+} vattr_t;
+
+/*
+ * setattr or getattr attributes
+ */
+#define XFS_AT_TYPE		0x00000001
+#define XFS_AT_MODE		0x00000002
+#define XFS_AT_UID		0x00000004
+#define XFS_AT_GID		0x00000008
+#define XFS_AT_FSID		0x00000010
+#define XFS_AT_NODEID		0x00000020
+#define XFS_AT_NLINK		0x00000040
+#define XFS_AT_SIZE		0x00000080
+#define XFS_AT_ATIME		0x00000100
+#define XFS_AT_MTIME		0x00000200
+#define XFS_AT_CTIME		0x00000400
+#define XFS_AT_RDEV		0x00000800
+#define XFS_AT_BLKSIZE		0x00001000
+#define XFS_AT_NBLOCKS		0x00002000
+#define XFS_AT_VCODE		0x00004000
+#define XFS_AT_MAC		0x00008000
+#define XFS_AT_UPDATIME		0x00010000
+#define XFS_AT_UPDMTIME		0x00020000
+#define XFS_AT_UPDCTIME		0x00040000
+#define XFS_AT_ACL		0x00080000
+#define XFS_AT_CAP		0x00100000
+#define XFS_AT_INF		0x00200000
+#define XFS_AT_XFLAGS		0x00400000
+#define XFS_AT_EXTSIZE		0x00800000
+#define XFS_AT_NEXTENTS		0x01000000
+#define XFS_AT_ANEXTENTS	0x02000000
+#define XFS_AT_PROJID		0x04000000
+#define XFS_AT_SIZE_NOPERM	0x08000000
+#define XFS_AT_GENCOUNT		0x10000000
+
+#define XFS_AT_ALL	(XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
+		XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
+		XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
+		XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\
+		XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\
+		XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT)
+
+#define XFS_AT_STAT	(XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
+		XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
+		XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
+		XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID)
+
+#define XFS_AT_TIMES	(XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME)
+
+#define XFS_AT_UPDTIMES	(XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME)
+
+#define XFS_AT_NOSET	(XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\
+		XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
+		XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
+
+/*
+ *  Modes.
+ */
+#define VSUID	S_ISUID		/* set user id on execution */
+#define VSGID	S_ISGID		/* set group id on execution */
+#define VSVTX	S_ISVTX		/* save swapped text even after use */
+#define VREAD	S_IRUSR		/* read, write, execute permissions */
+#define VWRITE	S_IWUSR
+#define VEXEC	S_IXUSR
+
+#define MODEMASK S_IALLUGO	/* mode bits plus permission bits */
+
+/*
+ * Check whether mandatory file locking is enabled.
+ */
+#define MANDLOCK(vp, mode)	\
+	((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
+
+extern void	vn_init(void);
+extern int	vn_wait(struct vnode *);
+extern vnode_t	*vn_initialize(struct inode *);
+
+/*
+ * Acquiring and invalidating vnodes:
+ *
+ *	if (vn_get(vp, version, 0))
+ *		...;
+ *	vn_purge(vp, version);
+ *
+ * vn_get and vn_purge must be called with vmap_t arguments, sampled
+ * while a lock that the vnode's VOP_RECLAIM function acquires is
+ * held, to ensure that the vnode sampled with the lock held isn't
+ * recycled (VOP_RECLAIMed) or deallocated between the release of the lock
+ * and the subsequent vn_get or vn_purge.
+ */
+
+/*
+ * vnode_map structures _must_ match vn_epoch and vnode structure sizes.
+ */
+typedef struct vnode_map {
+	vfs_t		*v_vfsp;
+	vnumber_t	v_number;		/* in-core vnode number */
+	xfs_ino_t	v_ino;			/* inode #	*/
+} vmap_t;
+
+#define VMAP(vp, vmap)	{(vmap).v_vfsp	 = (vp)->v_vfsp,	\
+			 (vmap).v_number = (vp)->v_number,	\
+			 (vmap).v_ino	 = (vp)->v_inode.i_ino; }
+
+extern void	vn_purge(struct vnode *, vmap_t *);
+extern vnode_t	*vn_get(struct vnode *, vmap_t *);
+extern int	vn_revalidate(struct vnode *);
+extern void	vn_revalidate_core(struct vnode *, vattr_t *);
+extern void	vn_remove(struct vnode *);
+
+static inline int vn_count(struct vnode *vp)
+{
+	return atomic_read(&LINVFS_GET_IP(vp)->i_count);
+}
+
+/*
+ * Vnode reference counting functions (and macros for compatibility).
+ */
+extern vnode_t	*vn_hold(struct vnode *);
+extern void	vn_rele(struct vnode *);
+
+#if defined(XFS_VNODE_TRACE)
+#define VN_HOLD(vp)		\
+	((void)vn_hold(vp),	\
+	  vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address))
+#define VN_RELE(vp)		\
+	  (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \
+	   iput(LINVFS_GET_IP(vp)))
+#else
+#define VN_HOLD(vp)		((void)vn_hold(vp))
+#define VN_RELE(vp)		(iput(LINVFS_GET_IP(vp)))
+#endif
+
+/*
+ * Vname handling macros.
+ */
+#define VNAME(dentry)		((char *) (dentry)->d_name.name)
+#define VNAMELEN(dentry)	((dentry)->d_name.len)
+#define VNAME_TO_VNODE(dentry)	(LINVFS_GET_VP((dentry)->d_inode))
+
+/*
+ * Vnode spinlock manipulation.
+ */
+#define VN_LOCK(vp)		mutex_spinlock(&(vp)->v_lock)
+#define VN_UNLOCK(vp, s)	mutex_spinunlock(&(vp)->v_lock, s)
+#define VN_FLAGSET(vp,b)	vn_flagset(vp,b)
+#define VN_FLAGCLR(vp,b)	vn_flagclr(vp,b)
+
+static __inline__ void vn_flagset(struct vnode *vp, uint flag)
+{
+	spin_lock(&vp->v_lock);
+	vp->v_flag |= flag;
+	spin_unlock(&vp->v_lock);
+}
+
+static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
+{
+	spin_lock(&vp->v_lock);
+	vp->v_flag &= ~flag;
+	spin_unlock(&vp->v_lock);
+}
+
+/*
+ * Update modify/access/change times on the vnode
+ */
+#define VN_MTIMESET(vp, tvp)	(LINVFS_GET_IP(vp)->i_mtime = *(tvp))
+#define VN_ATIMESET(vp, tvp)	(LINVFS_GET_IP(vp)->i_atime = *(tvp))
+#define VN_CTIMESET(vp, tvp)	(LINVFS_GET_IP(vp)->i_ctime = *(tvp))
+
+/*
+ * Dealing with bad inodes
+ */
+static inline void vn_mark_bad(struct vnode *vp)
+{
+	make_bad_inode(LINVFS_GET_IP(vp));
+}
+
+static inline int VN_BAD(struct vnode *vp)
+{
+	return is_bad_inode(LINVFS_GET_IP(vp));
+}
+
+/*
+ * Some useful predicates.
+ */
+#define VN_MAPPED(vp)	mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
+#define VN_CACHED(vp)	(LINVFS_GET_IP(vp)->i_mapping->nrpages)
+#define VN_DIRTY(vp)	mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
+					PAGECACHE_TAG_DIRTY)
+#define VMODIFY(vp)	VN_FLAGSET(vp, VMODIFIED)
+#define VUNMODIFY(vp)	VN_FLAGCLR(vp, VMODIFIED)
+
+/*
+ * Flags to VOP_SETATTR/VOP_GETATTR.
+ */
+#define	ATTR_UTIME	0x01	/* non-default utime(2) request */
+#define	ATTR_DMI	0x08	/* invocation from a DMI function */
+#define	ATTR_LAZY	0x80	/* set/get attributes lazily */
+#define	ATTR_NONBLOCK	0x100	/* return EAGAIN if operation would block */
+
+/*
+ * Flags to VOP_FSYNC and VOP_RECLAIM.
+ */
+#define FSYNC_NOWAIT	0	/* asynchronous flush */
+#define FSYNC_WAIT	0x1	/* synchronous fsync or forced reclaim */
+#define FSYNC_INVAL	0x2	/* flush and invalidate cached data */
+#define FSYNC_DATA	0x4	/* synchronous fsync of data only */
+
+/*
+ * Tracking vnode activity.
+ */
+#if defined(XFS_VNODE_TRACE)
+
+#define	VNODE_TRACE_SIZE	16		/* number of trace entries */
+#define	VNODE_KTRACE_ENTRY	1
+#define	VNODE_KTRACE_EXIT	2
+#define	VNODE_KTRACE_HOLD	3
+#define	VNODE_KTRACE_REF	4
+#define	VNODE_KTRACE_RELE	5
+
+extern void vn_trace_entry(struct vnode *, char *, inst_t *);
+extern void vn_trace_exit(struct vnode *, char *, inst_t *);
+extern void vn_trace_hold(struct vnode *, char *, int, inst_t *);
+extern void vn_trace_ref(struct vnode *, char *, int, inst_t *);
+extern void vn_trace_rele(struct vnode *, char *, int, inst_t *);
+
+#define	VN_TRACE(vp)		\
+	vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address)
+#else
+#define	vn_trace_entry(a,b,c)
+#define	vn_trace_exit(a,b,c)
+#define	vn_trace_hold(a,b,c,d)
+#define	vn_trace_ref(a,b,c,d)
+#define	vn_trace_rele(a,b,c,d)
+#define	VN_TRACE(vp)
+#endif
+
+#endif	/* __XFS_VNODE_H__ */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
new file mode 100644
index 0000000..740d20d
--- /dev/null
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -0,0 +1,1648 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_space.h"
+#include "xfs_trans_priv.h"
+
+#include "xfs_qm.h"
+
+
+/*
+   LOCK ORDER
+
+   inode lock		    (ilock)
+   dquot hash-chain lock    (hashlock)
+   xqm dquot freelist lock  (freelistlock
+   mount's dquot list lock  (mplistlock)
+   user dquot lock - lock ordering among dquots is based on the uid or gid
+   group dquot lock - similar to udquots. Between the two dquots, the udquot
+		      has to be locked first.
+   pin lock - the dquot lock must be held to take this lock.
+   flush lock - ditto.
+*/
+
+STATIC void		xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
+
+#ifdef DEBUG
+xfs_buftarg_t *xfs_dqerror_target;
+int xfs_do_dqerror;
+int xfs_dqreq_num;
+int xfs_dqerror_mod = 33;
+#endif
+
+/*
+ * Allocate and initialize a dquot. We don't always allocate fresh memory;
+ * we try to reclaim a free dquot if the number of incore dquots are above
+ * a threshold.
+ * The only field inside the core that gets initialized at this point
+ * is the d_id field. The idea is to fill in the entire q_core
+ * when we read in the on disk dquot.
+ */
+xfs_dquot_t *
+xfs_qm_dqinit(
+	xfs_mount_t  *mp,
+	xfs_dqid_t   id,
+	uint	     type)
+{
+	xfs_dquot_t	*dqp;
+	boolean_t	brandnewdquot;
+
+	brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
+	dqp->dq_flags = type;
+	INT_SET(dqp->q_core.d_id, ARCH_CONVERT, id);
+	dqp->q_mount = mp;
+
+	/*
+	 * No need to re-initialize these if this is a reclaimed dquot.
+	 */
+	if (brandnewdquot) {
+		dqp->dq_flnext = dqp->dq_flprev = dqp;
+		mutex_init(&dqp->q_qlock,  MUTEX_DEFAULT, "xdq");
+		initnsema(&dqp->q_flock, 1, "fdq");
+		sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
+
+#ifdef XFS_DQUOT_TRACE
+		dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
+		xfs_dqtrace_entry(dqp, "DQINIT");
+#endif
+	} else {
+		/*
+		 * Only the q_core portion was zeroed in dqreclaim_one().
+		 * So, we need to reset others.
+		 */
+		 dqp->q_nrefs = 0;
+		 dqp->q_blkno = 0;
+		 dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
+		 dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
+		 dqp->q_bufoffset = 0;
+		 dqp->q_fileoffset = 0;
+		 dqp->q_transp = NULL;
+		 dqp->q_gdquot = NULL;
+		 dqp->q_res_bcount = 0;
+		 dqp->q_res_icount = 0;
+		 dqp->q_res_rtbcount = 0;
+		 dqp->q_pincount = 0;
+		 dqp->q_hash = NULL;
+		 ASSERT(dqp->dq_flnext == dqp->dq_flprev);
+
+#ifdef XFS_DQUOT_TRACE
+		 ASSERT(dqp->q_trace);
+		 xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
+#endif
+	 }
+
+	/*
+	 * log item gets initialized later
+	 */
+	return (dqp);
+}
+
+/*
+ * This is called to free all the memory associated with a dquot
+ */
+void
+xfs_qm_dqdestroy(
+	xfs_dquot_t	*dqp)
+{
+	ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
+
+	mutex_destroy(&dqp->q_qlock);
+	freesema(&dqp->q_flock);
+	sv_destroy(&dqp->q_pinwait);
+
+#ifdef XFS_DQUOT_TRACE
+	if (dqp->q_trace)
+	     ktrace_free(dqp->q_trace);
+	dqp->q_trace = NULL;
+#endif
+	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
+	atomic_dec(&xfs_Gqm->qm_totaldquots);
+}
+
+/*
+ * This is what a 'fresh' dquot inside a dquot chunk looks like on disk.
+ */
+STATIC void
+xfs_qm_dqinit_core(
+	xfs_dqid_t	id,
+	uint		type,
+	xfs_dqblk_t	*d)
+{
+	/*
+	 * Caller has zero'd the entire dquot 'chunk' already.
+	 */
+	INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
+	INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
+	INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
+	INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
+}
+
+
+#ifdef XFS_DQUOT_TRACE
+/*
+ * Dquot tracing for debugging.
+ */
+/* ARGSUSED */
+void
+__xfs_dqtrace_entry(
+	xfs_dquot_t	*dqp,
+	char		*func,
+	void		*retaddr,
+	xfs_inode_t	*ip)
+{
+	xfs_dquot_t	*udqp = NULL;
+	xfs_ino_t	ino = 0;
+
+	ASSERT(dqp->q_trace);
+	if (ip) {
+		ino = ip->i_ino;
+		udqp = ip->i_udquot;
+	}
+	ktrace_enter(dqp->q_trace,
+		     (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
+		     (void *)func,
+		     (void *)(__psint_t)dqp->q_nrefs,
+		     (void *)(__psint_t)dqp->dq_flags,
+		     (void *)(__psint_t)dqp->q_res_bcount,
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_bcount,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_icount,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit,
+						ARCH_CONVERT),
+		     (void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
+		     (void *)(__psint_t)current_pid(),
+		     (void *)(__psint_t)ino,
+		     (void *)(__psint_t)retaddr,
+		     (void *)(__psint_t)udqp);
+	return;
+}
+#endif
+
+
+/*
+ * If default limits are in force, push them into the dquot now.
+ * We overwrite the dquot limits only if they are zero and this
+ * is not the root dquot.
+ */
+void
+xfs_qm_adjust_dqlimits(
+	xfs_mount_t		*mp,
+	xfs_disk_dquot_t	*d)
+{
+	xfs_quotainfo_t		*q = mp->m_quotainfo;
+
+	ASSERT(d->d_id);
+
+	if (q->qi_bsoftlimit && !d->d_blk_softlimit)
+		INT_SET(d->d_blk_softlimit, ARCH_CONVERT, q->qi_bsoftlimit);
+	if (q->qi_bhardlimit && !d->d_blk_hardlimit)
+		INT_SET(d->d_blk_hardlimit, ARCH_CONVERT, q->qi_bhardlimit);
+	if (q->qi_isoftlimit && !d->d_ino_softlimit)
+		INT_SET(d->d_ino_softlimit, ARCH_CONVERT, q->qi_isoftlimit);
+	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
+		INT_SET(d->d_ino_hardlimit, ARCH_CONVERT, q->qi_ihardlimit);
+	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
+		INT_SET(d->d_rtb_softlimit, ARCH_CONVERT, q->qi_rtbsoftlimit);
+	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
+		INT_SET(d->d_rtb_hardlimit, ARCH_CONVERT, q->qi_rtbhardlimit);
+}
+
+/*
+ * Check the limits and timers of a dquot and start or reset timers
+ * if necessary.
+ * This gets called even when quota enforcement is OFF, which makes our
+ * life a little less complicated. (We just don't reject any quota
+ * reservations in that case, when enforcement is off).
+ * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
+ * enforcement's off.
+ * In contrast, warnings are a little different in that they don't
+ * 'automatically' get started when limits get exceeded.
+ */
+void
+xfs_qm_adjust_dqtimers(
+	xfs_mount_t		*mp,
+	xfs_disk_dquot_t	*d)
+{
+	ASSERT(d->d_id);
+
+#ifdef QUOTADEBUG
+	if (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT))
+		ASSERT(INT_GET(d->d_blk_softlimit, ARCH_CONVERT) <=
+			INT_GET(d->d_blk_hardlimit, ARCH_CONVERT));
+	if (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT))
+		ASSERT(INT_GET(d->d_ino_softlimit, ARCH_CONVERT) <=
+			INT_GET(d->d_ino_hardlimit, ARCH_CONVERT));
+	if (INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT))
+		ASSERT(INT_GET(d->d_rtb_softlimit, ARCH_CONVERT) <=
+			INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT));
+#endif
+	if (!d->d_btimer) {
+		if ((INT_GET(d->d_blk_softlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_bcount, ARCH_CONVERT) >=
+				INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) ||
+		    (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_bcount, ARCH_CONVERT) >=
+				INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) {
+			INT_SET(d->d_btimer, ARCH_CONVERT,
+				get_seconds() + XFS_QI_BTIMELIMIT(mp));
+		}
+	} else {
+		if ((!d->d_blk_softlimit ||
+		    (INT_GET(d->d_bcount, ARCH_CONVERT) <
+				INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) &&
+		    (!d->d_blk_hardlimit ||
+		    (INT_GET(d->d_bcount, ARCH_CONVERT) <
+				INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) {
+			d->d_btimer = 0;
+		}
+	}
+
+	if (!d->d_itimer) {
+		if ((INT_GET(d->d_ino_softlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_icount, ARCH_CONVERT) >=
+				INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) ||
+		    (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_icount, ARCH_CONVERT) >=
+				INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) {
+			INT_SET(d->d_itimer, ARCH_CONVERT,
+				get_seconds() + XFS_QI_ITIMELIMIT(mp));
+		}
+	} else {
+		if ((!d->d_ino_softlimit ||
+		    (INT_GET(d->d_icount, ARCH_CONVERT) <
+				INT_GET(d->d_ino_softlimit, ARCH_CONVERT)))  &&
+		    (!d->d_ino_hardlimit ||
+		    (INT_GET(d->d_icount, ARCH_CONVERT) <
+				INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) {
+			d->d_itimer = 0;
+		}
+	}
+
+	if (!d->d_rtbtimer) {
+		if ((INT_GET(d->d_rtb_softlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_rtbcount, ARCH_CONVERT) >=
+				INT_GET(d->d_rtb_softlimit, ARCH_CONVERT))) ||
+		    (INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT) &&
+		    (INT_GET(d->d_rtbcount, ARCH_CONVERT) >=
+				INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)))) {
+			INT_SET(d->d_rtbtimer, ARCH_CONVERT,
+				get_seconds() + XFS_QI_RTBTIMELIMIT(mp));
+		}
+	} else {
+		if ((!d->d_rtb_softlimit ||
+		    (INT_GET(d->d_rtbcount, ARCH_CONVERT) <
+				INT_GET(d->d_rtb_softlimit, ARCH_CONVERT))) &&
+		    (!d->d_rtb_hardlimit ||
+		    (INT_GET(d->d_rtbcount, ARCH_CONVERT) <
+				INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)))) {
+			d->d_rtbtimer = 0;
+		}
+	}
+}
+
+/*
+ * Increment or reset warnings of a given dquot.
+ */
+int
+xfs_qm_dqwarn(
+	xfs_disk_dquot_t	*d,
+	uint			flags)
+{
+	int	warned;
+
+	/*
+	 * root's limits are not real limits.
+	 */
+	if (!d->d_id)
+		return (0);
+
+	warned = 0;
+	if (INT_GET(d->d_blk_softlimit, ARCH_CONVERT) &&
+	    (INT_GET(d->d_bcount, ARCH_CONVERT) >=
+	     INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
+		if (flags & XFS_QMOPT_DOWARN) {
+			INT_MOD(d->d_bwarns, ARCH_CONVERT, +1);
+			warned++;
+		}
+	} else {
+		if (!d->d_blk_softlimit ||
+		    (INT_GET(d->d_bcount, ARCH_CONVERT) <
+		     INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
+			d->d_bwarns = 0;
+		}
+	}
+
+	if (INT_GET(d->d_ino_softlimit, ARCH_CONVERT) > 0 &&
+	    (INT_GET(d->d_icount, ARCH_CONVERT) >=
+	     INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
+		if (flags & XFS_QMOPT_DOWARN) {
+			INT_MOD(d->d_iwarns, ARCH_CONVERT, +1);
+			warned++;
+		}
+	} else {
+		if (!d->d_ino_softlimit ||
+		    (INT_GET(d->d_icount, ARCH_CONVERT) <
+		     INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
+			d->d_iwarns = 0;
+		}
+	}
+#ifdef QUOTADEBUG
+	if (INT_GET(d->d_iwarns, ARCH_CONVERT))
+		cmn_err(CE_DEBUG,
+			"--------@@Inode warnings running : %Lu >= %Lu",
+			INT_GET(d->d_icount, ARCH_CONVERT),
+			INT_GET(d->d_ino_softlimit, ARCH_CONVERT));
+	if (INT_GET(d->d_bwarns, ARCH_CONVERT))
+		cmn_err(CE_DEBUG,
+			"--------@@Blks warnings running : %Lu >= %Lu",
+			INT_GET(d->d_bcount, ARCH_CONVERT),
+			INT_GET(d->d_blk_softlimit, ARCH_CONVERT));
+#endif
+	return (warned);
+}
+
+
+/*
+ * initialize a buffer full of dquots and log the whole thing
+ */
+STATIC void
+xfs_qm_init_dquot_blk(
+	xfs_trans_t	*tp,
+	xfs_mount_t	*mp,
+	xfs_dqid_t	id,
+	uint		type,
+	xfs_buf_t	*bp)
+{
+	xfs_dqblk_t	*d;
+	int		curid, i;
+
+	ASSERT(tp);
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+
+	d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
+
+	/*
+	 * ID of the first dquot in the block - id's are zero based.
+	 */
+	curid = id - (id % XFS_QM_DQPERBLK(mp));
+	ASSERT(curid >= 0);
+	memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
+	for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
+		xfs_qm_dqinit_core(curid, type, d);
+	xfs_trans_dquot_buf(tp, bp,
+			    type & XFS_DQ_USER ?
+			    XFS_BLI_UDQUOT_BUF :
+			    XFS_BLI_GDQUOT_BUF);
+	xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1);
+}
+
+
+
+/*
+ * Allocate a block and fill it with dquots.
+ * This is called when the bmapi finds a hole.
+ */
+STATIC int
+xfs_qm_dqalloc(
+	xfs_trans_t	*tp,
+	xfs_mount_t	*mp,
+	xfs_dquot_t	*dqp,
+	xfs_inode_t	*quotip,
+	xfs_fileoff_t	offset_fsb,
+	xfs_buf_t	**O_bpp)
+{
+	xfs_fsblock_t	firstblock;
+	xfs_bmap_free_t flist;
+	xfs_bmbt_irec_t map;
+	int		nmaps, error, committed;
+	xfs_buf_t	*bp;
+
+	ASSERT(tp != NULL);
+	xfs_dqtrace_entry(dqp, "DQALLOC");
+
+	/*
+	 * Initialize the bmap freelist prior to calling bmapi code.
+	 */
+	XFS_BMAP_INIT(&flist, &firstblock);
+	xfs_ilock(quotip, XFS_ILOCK_EXCL);
+	/*
+	 * Return if this type of quotas is turned off while we didn't
+	 * have an inode lock
+	 */
+	if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
+		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
+		return (ESRCH);
+	}
+
+	/*
+	 * xfs_trans_commit normally decrements the vnode ref count
+	 * when it unlocks the inode. Since we want to keep the quota
+	 * inode around, we bump the vnode ref count now.
+	 */
+	VN_HOLD(XFS_ITOV(quotip));
+
+	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+	nmaps = 1;
+	if ((error = xfs_bmapi(tp, quotip,
+			      offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
+			      XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
+			      &firstblock,
+			      XFS_QM_DQALLOC_SPACE_RES(mp),
+			      &map, &nmaps, &flist))) {
+		goto error0;
+	}
+	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
+	ASSERT(nmaps == 1);
+	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+	       (map.br_startblock != HOLESTARTBLOCK));
+
+	/*
+	 * Keep track of the blkno to save a lookup later
+	 */
+	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
+
+	/* now we can just get the buffer (there's nothing to read yet) */
+	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+			       dqp->q_blkno,
+			       XFS_QI_DQCHUNKLEN(mp),
+			       0);
+	if (!bp || (error = XFS_BUF_GETERROR(bp)))
+		goto error1;
+	/*
+	 * Make a chunk of dquots out of this buffer and log
+	 * the entire thing.
+	 */
+	xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
+			      dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP),
+			      bp);
+
+	if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) {
+		goto error1;
+	}
+
+	*O_bpp = bp;
+	return 0;
+
+      error1:
+	xfs_bmap_cancel(&flist);
+      error0:
+	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
+
+	return (error);
+}
+
+/*
+ * Maps a dquot to the buffer containing its on-disk version.
+ * This returns a ptr to the buffer containing the on-disk dquot
+ * in the bpp param, and a ptr to the on-disk dquot within that buffer
+ */
+STATIC int
+xfs_qm_dqtobp(
+	xfs_trans_t		*tp,
+	xfs_dquot_t		*dqp,
+	xfs_disk_dquot_t	**O_ddpp,
+	xfs_buf_t		**O_bpp,
+	uint			flags)
+{
+	xfs_bmbt_irec_t map;
+	int		nmaps, error;
+	xfs_buf_t	*bp;
+	xfs_inode_t	*quotip;
+	xfs_mount_t	*mp;
+	xfs_disk_dquot_t *ddq;
+	xfs_dqid_t	id;
+	boolean_t	newdquot;
+
+	mp = dqp->q_mount;
+	id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT);
+	nmaps = 1;
+	newdquot = B_FALSE;
+
+	/*
+	 * If we don't know where the dquot lives, find out.
+	 */
+	if (dqp->q_blkno == (xfs_daddr_t) 0) {
+		/* We use the id as an index */
+		dqp->q_fileoffset = (xfs_fileoff_t) ((uint)id /
+						     XFS_QM_DQPERBLK(mp));
+		nmaps = 1;
+		quotip = XFS_DQ_TO_QIP(dqp);
+		xfs_ilock(quotip, XFS_ILOCK_SHARED);
+		/*
+		 * Return if this type of quotas is turned off while we didn't
+		 * have an inode lock
+		 */
+		if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
+			xfs_iunlock(quotip, XFS_ILOCK_SHARED);
+			return (ESRCH);
+		}
+		/*
+		 * Find the block map; no allocations yet
+		 */
+		error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
+				  XFS_DQUOT_CLUSTER_SIZE_FSB,
+				  XFS_BMAPI_METADATA,
+				  NULL, 0, &map, &nmaps, NULL);
+
+		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
+		if (error)
+			return (error);
+		ASSERT(nmaps == 1);
+		ASSERT(map.br_blockcount == 1);
+
+		/*
+		 * offset of dquot in the (fixed sized) dquot chunk.
+		 */
+		dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *
+			sizeof(xfs_dqblk_t);
+		if (map.br_startblock == HOLESTARTBLOCK) {
+			/*
+			 * We don't allocate unless we're asked to
+			 */
+			if (!(flags & XFS_QMOPT_DQALLOC))
+				return (ENOENT);
+
+			ASSERT(tp);
+			if ((error = xfs_qm_dqalloc(tp, mp, dqp, quotip,
+						dqp->q_fileoffset, &bp)))
+				return (error);
+			newdquot = B_TRUE;
+		} else {
+			/*
+			 * store the blkno etc so that we don't have to do the
+			 * mapping all the time
+			 */
+			dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
+		}
+	}
+	ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
+	ASSERT(dqp->q_blkno != HOLESTARTBLOCK);
+
+	/*
+	 * Read in the buffer, unless we've just done the allocation
+	 * (in which case we already have the buf).
+	 */
+	if (! newdquot) {
+		xfs_dqtrace_entry(dqp, "DQTOBP READBUF");
+		if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+					       dqp->q_blkno,
+					       XFS_QI_DQCHUNKLEN(mp),
+					       0, &bp))) {
+			return (error);
+		}
+		if (error || !bp)
+			return XFS_ERROR(error);
+	}
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+
+	/*
+	 * calculate the location of the dquot inside the buffer.
+	 */
+	ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset);
+
+	/*
+	 * A simple sanity check in case we got a corrupted dquot...
+	 */
+	if (xfs_qm_dqcheck(ddq, id,
+			   dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP),
+			   flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
+			   "dqtobp")) {
+		if (!(flags & XFS_QMOPT_DQREPAIR)) {
+			xfs_trans_brelse(tp, bp);
+			return XFS_ERROR(EIO);
+		}
+		XFS_BUF_BUSY(bp); /* We dirtied this */
+	}
+
+	*O_bpp = bp;
+	*O_ddpp = ddq;
+
+	return (0);
+}
+
+
+/*
+ * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
+ * and release the buffer immediately.
+ *
+ */
+/* ARGSUSED */
+STATIC int
+xfs_qm_dqread(
+	xfs_trans_t	*tp,
+	xfs_dqid_t	id,
+	xfs_dquot_t	*dqp,	/* dquot to get filled in */
+	uint		flags)
+{
+	xfs_disk_dquot_t *ddqp;
+	xfs_buf_t	 *bp;
+	int		 error;
+
+	/*
+	 * get a pointer to the on-disk dquot and the buffer containing it
+	 * dqp already knows its own type (GROUP/USER).
+	 */
+	xfs_dqtrace_entry(dqp, "DQREAD");
+	if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) {
+		return (error);
+	}
+
+	/* copy everything from disk dquot to the incore dquot */
+	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
+	ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id);
+	xfs_qm_dquot_logitem_init(dqp);
+
+	/*
+	 * Reservation counters are defined as reservation plus current usage
+	 * to avoid having to add everytime.
+	 */
+	dqp->q_res_bcount = INT_GET(ddqp->d_bcount, ARCH_CONVERT);
+	dqp->q_res_icount = INT_GET(ddqp->d_icount, ARCH_CONVERT);
+	dqp->q_res_rtbcount = INT_GET(ddqp->d_rtbcount, ARCH_CONVERT);
+
+	/* Mark the buf so that this will stay incore a little longer */
+	XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF);
+
+	/*
+	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
+	 * So we need to release with xfs_trans_brelse().
+	 * The strategy here is identical to that of inodes; we lock
+	 * the dquot in xfs_qm_dqget() before making it accessible to
+	 * others. This is because dquots, like inodes, need a good level of
+	 * concurrency, and we don't want to take locks on the entire buffers
+	 * for dquot accesses.
+	 * Note also that the dquot buffer may even be dirty at this point, if
+	 * this particular dquot was repaired. We still aren't afraid to
+	 * brelse it because we have the changes incore.
+	 */
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+	xfs_trans_brelse(tp, bp);
+
+	return (error);
+}
+
+
+/*
+ * allocate an incore dquot from the kernel heap,
+ * and fill its core with quota information kept on disk.
+ * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk
+ * if it wasn't already allocated.
+ */
+STATIC int
+xfs_qm_idtodq(
+	xfs_mount_t	*mp,
+	xfs_dqid_t	id,	 /* gid or uid, depending on type */
+	uint		type,	 /* UDQUOT or GDQUOT */
+	uint		flags,	 /* DQALLOC, DQREPAIR */
+	xfs_dquot_t	**O_dqpp)/* OUT : incore dquot, not locked */
+{
+	xfs_dquot_t	*dqp;
+	int		error;
+	xfs_trans_t	*tp;
+	int		cancelflags=0;
+
+	dqp = xfs_qm_dqinit(mp, id, type);
+	tp = NULL;
+	if (flags & XFS_QMOPT_DQALLOC) {
+		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
+		if ((error = xfs_trans_reserve(tp,
+				       XFS_QM_DQALLOC_SPACE_RES(mp),
+				       XFS_WRITE_LOG_RES(mp) +
+					      BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 +
+					      128,
+				       0,
+				       XFS_TRANS_PERM_LOG_RES,
+				       XFS_WRITE_LOG_COUNT))) {
+			cancelflags = 0;
+			goto error0;
+		}
+		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+	}
+
+	/*
+	 * Read it from disk; xfs_dqread() takes care of
+	 * all the necessary initialization of dquot's fields (locks, etc)
+	 */
+	if ((error = xfs_qm_dqread(tp, id, dqp, flags))) {
+		/*
+		 * This can happen if quotas got turned off (ESRCH),
+		 * or if the dquot didn't exist on disk and we ask to
+		 * allocate (ENOENT).
+		 */
+		xfs_dqtrace_entry(dqp, "DQREAD FAIL");
+		cancelflags |= XFS_TRANS_ABORT;
+		goto error0;
+	}
+	if (tp) {
+		if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,
+					     NULL)))
+			goto error1;
+	}
+
+	*O_dqpp = dqp;
+	return (0);
+
+ error0:
+	ASSERT(error);
+	if (tp)
+		xfs_trans_cancel(tp, cancelflags);
+ error1:
+	xfs_qm_dqdestroy(dqp);
+	*O_dqpp = NULL;
+	return (error);
+}
+
+/*
+ * Lookup a dquot in the incore dquot hashtable. We keep two separate
+ * hashtables for user and group dquots; and, these are global tables
+ * inside the XQM, not per-filesystem tables.
+ * The hash chain must be locked by caller, and it is left locked
+ * on return. Returning dquot is locked.
+ */
+STATIC int
+xfs_qm_dqlookup(
+	xfs_mount_t		*mp,
+	xfs_dqid_t		id,
+	xfs_dqhash_t		*qh,
+	xfs_dquot_t		**O_dqpp)
+{
+	xfs_dquot_t		*dqp;
+	uint			flist_locked;
+	xfs_dquot_t		*d;
+
+	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
+
+	flist_locked = B_FALSE;
+
+	/*
+	 * Traverse the hashchain looking for a match
+	 */
+	for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {
+		/*
+		 * We already have the hashlock. We don't need the
+		 * dqlock to look at the id field of the dquot, since the
+		 * id can't be modified without the hashlock anyway.
+		 */
+		if (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id && dqp->q_mount == mp) {
+			xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");
+			/*
+			 * All in core dquots must be on the dqlist of mp
+			 */
+			ASSERT(dqp->MPL_PREVP != NULL);
+
+			xfs_dqlock(dqp);
+			if (dqp->q_nrefs == 0) {
+				ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
+				if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+					xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");
+
+					/*
+					 * We may have raced with dqreclaim_one()
+					 * (and lost). So, flag that we don't
+					 * want the dquot to be reclaimed.
+					 */
+					dqp->dq_flags |= XFS_DQ_WANT;
+					xfs_dqunlock(dqp);
+					xfs_qm_freelist_lock(xfs_Gqm);
+					xfs_dqlock(dqp);
+					dqp->dq_flags &= ~(XFS_DQ_WANT);
+				}
+				flist_locked = B_TRUE;
+			}
+
+			/*
+			 * id couldn't have changed; we had the hashlock all
+			 * along
+			 */
+			ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id);
+
+			if (flist_locked) {
+				if (dqp->q_nrefs != 0) {
+					xfs_qm_freelist_unlock(xfs_Gqm);
+					flist_locked = B_FALSE;
+				} else {
+					/*
+					 * take it off the freelist
+					 */
+					xfs_dqtrace_entry(dqp,
+							"DQLOOKUP: TAKEOFF FL");
+					XQM_FREELIST_REMOVE(dqp);
+					/* xfs_qm_freelist_print(&(xfs_Gqm->
+							qm_dqfreelist),
+							"after removal"); */
+				}
+			}
+
+			/*
+			 * grab a reference
+			 */
+			XFS_DQHOLD(dqp);
+
+			if (flist_locked)
+				xfs_qm_freelist_unlock(xfs_Gqm);
+			/*
+			 * move the dquot to the front of the hashchain
+			 */
+			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
+			if (dqp->HL_PREVP != &qh->qh_next) {
+				xfs_dqtrace_entry(dqp,
+						  "DQLOOKUP: HASH MOVETOFRONT");
+				if ((d = dqp->HL_NEXT))
+					d->HL_PREVP = dqp->HL_PREVP;
+				*(dqp->HL_PREVP) = d;
+				d = qh->qh_next;
+				d->HL_PREVP = &dqp->HL_NEXT;
+				dqp->HL_NEXT = d;
+				dqp->HL_PREVP = &qh->qh_next;
+				qh->qh_next = dqp;
+			}
+			xfs_dqtrace_entry(dqp, "LOOKUP END");
+			*O_dqpp = dqp;
+			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
+			return (0);
+		}
+	}
+
+	*O_dqpp = NULL;
+	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
+	return (1);
+}
+
+/*
+ * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
+ * a locked dquot, doing an allocation (if requested) as needed.
+ * When both an inode and an id are given, the inode's id takes precedence.
+ * That is, if the id changes while we don't hold the ilock inside this
+ * function, the new dquot is returned, not necessarily the one requested
+ * in the id argument.
+ */
+int
+xfs_qm_dqget(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip,	  /* locked inode (optional) */
+	xfs_dqid_t	id,	  /* gid or uid, depending on type */
+	uint		type,	  /* UDQUOT or GDQUOT */
+	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
+	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
+{
+	xfs_dquot_t	*dqp;
+	xfs_dqhash_t	*h;
+	uint		version;
+	int		error;
+
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
+	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
+		return (ESRCH);
+	}
+	h = XFS_DQ_HASH(mp, id, type);
+
+#ifdef DEBUG
+	if (xfs_do_dqerror) {
+		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
+		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
+			cmn_err(CE_DEBUG, "Returning error in dqget");
+			return (EIO);
+		}
+	}
+#endif
+
+ again:
+
+#ifdef DEBUG
+	ASSERT(type == XFS_DQ_USER || type == XFS_DQ_GROUP);
+	if (ip) {
+		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+		if (type == XFS_DQ_USER)
+			ASSERT(ip->i_udquot == NULL);
+		else
+			ASSERT(ip->i_gdquot == NULL);
+	}
+#endif
+	XFS_DQ_HASH_LOCK(h);
+
+	/*
+	 * Look in the cache (hashtable).
+	 * The chain is kept locked during lookup.
+	 */
+	if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
+		XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
+		/*
+		 * The dquot was found, moved to the front of the chain,
+		 * taken off the freelist if it was on it, and locked
+		 * at this point. Just unlock the hashchain and return.
+		 */
+		ASSERT(*O_dqpp);
+		ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
+		XFS_DQ_HASH_UNLOCK(h);
+		xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
+		return (0);	/* success */
+	}
+	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
+
+	/*
+	 * Dquot cache miss. We don't want to keep the inode lock across
+	 * a (potential) disk read. Also we don't want to deal with the lock
+	 * ordering between quotainode and this inode. OTOH, dropping the inode
+	 * lock here means dealing with a chown that can happen before
+	 * we re-acquire the lock.
+	 */
+	if (ip)
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	/*
+	 * Save the hashchain version stamp, and unlock the chain, so that
+	 * we don't keep the lock across a disk read
+	 */
+	version = h->qh_version;
+	XFS_DQ_HASH_UNLOCK(h);
+
+	/*
+	 * Allocate the dquot on the kernel heap, and read the ondisk
+	 * portion off the disk. Also, do all the necessary initialization
+	 * This can return ENOENT if dquot didn't exist on disk and we didn't
+	 * ask it to allocate; ESRCH if quotas got turned off suddenly.
+	 */
+	if ((error = xfs_qm_idtodq(mp, id, type,
+				  flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
+					   XFS_QMOPT_DOWARN),
+				  &dqp))) {
+		if (ip)
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+		return (error);
+	}
+
+	/*
+	 * See if this is mount code calling to look at the overall quota limits
+	 * which are stored in the id == 0 user or group's dquot.
+	 * Since we may not have done a quotacheck by this point, just return
+	 * the dquot without attaching it to any hashtables, lists, etc, or even
+	 * taking a reference.
+	 * The caller must dqdestroy this once done.
+	 */
+	if (flags & XFS_QMOPT_DQSUSER) {
+		ASSERT(id == 0);
+		ASSERT(! ip);
+		goto dqret;
+	}
+
+	/*
+	 * Dquot lock comes after hashlock in the lock ordering
+	 */
+	if (ip) {
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		if (! XFS_IS_DQTYPE_ON(mp, type)) {
+			/* inode stays locked on return */
+			xfs_qm_dqdestroy(dqp);
+			return XFS_ERROR(ESRCH);
+		}
+		/*
+		 * A dquot could be attached to this inode by now, since
+		 * we had dropped the ilock.
+		 */
+		if (type == XFS_DQ_USER) {
+			if (ip->i_udquot) {
+				xfs_qm_dqdestroy(dqp);
+				dqp = ip->i_udquot;
+				xfs_dqlock(dqp);
+				goto dqret;
+			}
+		} else {
+			if (ip->i_gdquot) {
+				xfs_qm_dqdestroy(dqp);
+				dqp = ip->i_gdquot;
+				xfs_dqlock(dqp);
+				goto dqret;
+			}
+		}
+	}
+
+	/*
+	 * Hashlock comes after ilock in lock order
+	 */
+	XFS_DQ_HASH_LOCK(h);
+	if (version != h->qh_version) {
+		xfs_dquot_t *tmpdqp;
+		/*
+		 * Now, see if somebody else put the dquot in the
+		 * hashtable before us. This can happen because we didn't
+		 * keep the hashchain lock. We don't have to worry about
+		 * lock order between the two dquots here since dqp isn't
+		 * on any findable lists yet.
+		 */
+		if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
+			/*
+			 * Duplicate found. Just throw away the new dquot
+			 * and start over.
+			 */
+			xfs_qm_dqput(tmpdqp);
+			XFS_DQ_HASH_UNLOCK(h);
+			xfs_qm_dqdestroy(dqp);
+			XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
+			goto again;
+		}
+	}
+
+	/*
+	 * Put the dquot at the beginning of the hash-chain and mp's list
+	 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
+	 */
+	ASSERT(XFS_DQ_IS_HASH_LOCKED(h));
+	dqp->q_hash = h;
+	XQM_HASHLIST_INSERT(h, dqp);
+
+	/*
+	 * Attach this dquot to this filesystem's list of all dquots,
+	 * kept inside the mount structure in m_quotainfo field
+	 */
+	xfs_qm_mplist_lock(mp);
+
+	/*
+	 * We return a locked dquot to the caller, with a reference taken
+	 */
+	xfs_dqlock(dqp);
+	dqp->q_nrefs = 1;
+
+	XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
+
+	xfs_qm_mplist_unlock(mp);
+	XFS_DQ_HASH_UNLOCK(h);
+ dqret:
+	ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip));
+	xfs_dqtrace_entry(dqp, "DQGET DONE");
+	*O_dqpp = dqp;
+	return (0);
+}
+
+
+/*
+ * Release a reference to the dquot (decrement ref-count)
+ * and unlock it. If there is a group quota attached to this
+ * dquot, carefully release that too without tripping over
+ * deadlocks'n'stuff.
+ */
+void
+xfs_qm_dqput(
+	xfs_dquot_t	*dqp)
+{
+	xfs_dquot_t	*gdqp;
+
+	ASSERT(dqp->q_nrefs > 0);
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	xfs_dqtrace_entry(dqp, "DQPUT");
+
+	if (dqp->q_nrefs != 1) {
+		dqp->q_nrefs--;
+		xfs_dqunlock(dqp);
+		return;
+	}
+
+	/*
+	 * drop the dqlock and acquire the freelist and dqlock
+	 * in the right order; but try to get it out-of-order first
+	 */
+	if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+		xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");
+		xfs_dqunlock(dqp);
+		xfs_qm_freelist_lock(xfs_Gqm);
+		xfs_dqlock(dqp);
+	}
+
+	while (1) {
+		gdqp = NULL;
+
+		/* We can't depend on nrefs being == 1 here */
+		if (--dqp->q_nrefs == 0) {
+			xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");
+			/*
+			 * insert at end of the freelist.
+			 */
+			XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);
+
+			/*
+			 * If we just added a udquot to the freelist, then
+			 * we want to release the gdquot reference that
+			 * it (probably) has. Otherwise it'll keep the
+			 * gdquot from getting reclaimed.
+			 */
+			if ((gdqp = dqp->q_gdquot)) {
+				/*
+				 * Avoid a recursive dqput call
+				 */
+				xfs_dqlock(gdqp);
+				dqp->q_gdquot = NULL;
+			}
+
+			/* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
+			   "@@@@@++ Free list (after append) @@@@@+");
+			   */
+		}
+		xfs_dqunlock(dqp);
+
+		/*
+		 * If we had a group quota inside the user quota as a hint,
+		 * release it now.
+		 */
+		if (! gdqp)
+			break;
+		dqp = gdqp;
+	}
+	xfs_qm_freelist_unlock(xfs_Gqm);
+}
+
+/*
+ * Release a dquot. Flush it if dirty, then dqput() it.
+ * dquot must not be locked.
+ */
+void
+xfs_qm_dqrele(
+	xfs_dquot_t	*dqp)
+{
+	ASSERT(dqp);
+	xfs_dqtrace_entry(dqp, "DQRELE");
+
+	xfs_dqlock(dqp);
+	/*
+	 * We don't care to flush it if the dquot is dirty here.
+	 * That will create stutters that we want to avoid.
+	 * Instead we do a delayed write when we try to reclaim
+	 * a dirty dquot. Also xfs_sync will take part of the burden...
+	 */
+	xfs_qm_dqput(dqp);
+}
+
+
+/*
+ * Write a modified dquot to disk.
+ * The dquot must be locked and the flush lock too taken by caller.
+ * The flush lock will not be unlocked until the dquot reaches the disk,
+ * but the dquot is free to be unlocked and modified by the caller
+ * in the interim. Dquot is still locked on return. This behavior is
+ * identical to that of inodes.
+ */
+int
+xfs_qm_dqflush(
+	xfs_dquot_t		*dqp,
+	uint			flags)
+{
+	xfs_mount_t		*mp;
+	xfs_buf_t		*bp;
+	xfs_disk_dquot_t	*ddqp;
+	int			error;
+	SPLDECL(s);
+
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+	xfs_dqtrace_entry(dqp, "DQFLUSH");
+
+	/*
+	 * If not dirty, nada.
+	 */
+	if (!XFS_DQ_IS_DIRTY(dqp)) {
+		xfs_dqfunlock(dqp);
+		return (0);
+	}
+
+	/*
+	 * Cant flush a pinned dquot. Wait for it.
+	 */
+	xfs_qm_dqunpin_wait(dqp);
+
+	/*
+	 * This may have been unpinned because the filesystem is shutting
+	 * down forcibly. If that's the case we must not write this dquot
+	 * to disk, because the log record didn't make it to disk!
+	 */
+	if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) {
+		dqp->dq_flags &= ~(XFS_DQ_DIRTY);
+		xfs_dqfunlock(dqp);
+		return XFS_ERROR(EIO);
+	}
+
+	/*
+	 * Get the buffer containing the on-disk dquot
+	 * We don't need a transaction envelope because we know that the
+	 * the ondisk-dquot has already been allocated for.
+	 */
+	if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
+		xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
+		ASSERT(error != ENOENT);
+		/*
+		 * Quotas could have gotten turned off (ESRCH)
+		 */
+		xfs_dqfunlock(dqp);
+		return (error);
+	}
+
+	if (xfs_qm_dqcheck(&dqp->q_core, INT_GET(ddqp->d_id, ARCH_CONVERT), 0, XFS_QMOPT_DOWARN,
+			   "dqflush (incore copy)")) {
+		xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE);
+		return XFS_ERROR(EIO);
+	}
+
+	/* This is the only portion of data that needs to persist */
+	memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t));
+
+	/*
+	 * Clear the dirty field and remember the flush lsn for later use.
+	 */
+	dqp->dq_flags &= ~(XFS_DQ_DIRTY);
+	mp = dqp->q_mount;
+
+	/* lsn is 64 bits */
+	AIL_LOCK(mp, s);
+	dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
+	AIL_UNLOCK(mp, s);
+
+	/*
+	 * Attach an iodone routine so that we can remove this dquot from the
+	 * AIL and release the flush lock once the dquot is synced to disk.
+	 */
+	xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *))
+			      xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item));
+	/*
+	 * If the buffer is pinned then push on the log so we won't
+	 * get stuck waiting in the write for too long.
+	 */
+	if (XFS_BUF_ISPINNED(bp)) {
+		xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE");
+		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+	}
+
+	if (flags & XFS_QMOPT_DELWRI) {
+		xfs_bdwrite(mp, bp);
+	} else if (flags & XFS_QMOPT_ASYNC) {
+		xfs_bawrite(mp, bp);
+	} else {
+		error = xfs_bwrite(mp, bp);
+	}
+	xfs_dqtrace_entry(dqp, "DQFLUSH END");
+	/*
+	 * dqp is still locked, but caller is free to unlock it now.
+	 */
+	return (error);
+
+}
+
+/*
+ * This is the dquot flushing I/O completion routine.  It is called
+ * from interrupt level when the buffer containing the dquot is
+ * flushed to disk.  It is responsible for removing the dquot logitem
+ * from the AIL if it has not been re-logged, and unlocking the dquot's
+ * flush lock. This behavior is very similar to that of inodes..
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_qm_dqflush_done(
+	xfs_buf_t		*bp,
+	xfs_dq_logitem_t	*qip)
+{
+	xfs_dquot_t		*dqp;
+	SPLDECL(s);
+
+	dqp = qip->qli_dquot;
+
+	/*
+	 * We only want to pull the item from the AIL if its
+	 * location in the log has not changed since we started the flush.
+	 * Thus, we only bother if the dquot's lsn has
+	 * not changed. First we check the lsn outside the lock
+	 * since it's cheaper, and then we recheck while
+	 * holding the lock before removing the dquot from the AIL.
+	 */
+	if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
+	    qip->qli_item.li_lsn == qip->qli_flush_lsn) {
+
+		AIL_LOCK(dqp->q_mount, s);
+		/*
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
+			xfs_trans_delete_ail(dqp->q_mount,
+					     (xfs_log_item_t*)qip, s);
+		else
+			AIL_UNLOCK(dqp->q_mount, s);
+	}
+
+	/*
+	 * Release the dq's flush lock since we're done with it.
+	 */
+	xfs_dqfunlock(dqp);
+}
+
+
+int
+xfs_qm_dqflock_nowait(
+	xfs_dquot_t *dqp)
+{
+	int locked;
+
+	locked = cpsema(&((dqp)->q_flock));
+
+	/* XXX ifdef these out */
+	if (locked)
+		(dqp)->dq_flags |= XFS_DQ_FLOCKED;
+	return (locked);
+}
+
+
+int
+xfs_qm_dqlock_nowait(
+	xfs_dquot_t *dqp)
+{
+	return (mutex_trylock(&((dqp)->q_qlock)));
+}
+
+void
+xfs_dqlock(
+	xfs_dquot_t *dqp)
+{
+	mutex_lock(&(dqp->q_qlock), PINOD);
+}
+
+void
+xfs_dqunlock(
+	xfs_dquot_t *dqp)
+{
+	mutex_unlock(&(dqp->q_qlock));
+	if (dqp->q_logitem.qli_dquot == dqp) {
+		/* Once was dqp->q_mount, but might just have been cleared */
+		xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp,
+					(xfs_log_item_t*)&(dqp->q_logitem));
+	}
+}
+
+
+void
+xfs_dqunlock_nonotify(
+	xfs_dquot_t *dqp)
+{
+	mutex_unlock(&(dqp->q_qlock));
+}
+
+void
+xfs_dqlock2(
+	xfs_dquot_t	*d1,
+	xfs_dquot_t	*d2)
+{
+	if (d1 && d2) {
+		ASSERT(d1 != d2);
+		if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) {
+			xfs_dqlock(d2);
+			xfs_dqlock(d1);
+		} else {
+			xfs_dqlock(d1);
+			xfs_dqlock(d2);
+		}
+	} else {
+		if (d1) {
+			xfs_dqlock(d1);
+		} else if (d2) {
+			xfs_dqlock(d2);
+		}
+	}
+}
+
+
+/*
+ * Take a dquot out of the mount's dqlist as well as the hashlist.
+ * This is called via unmount as well as quotaoff, and the purge
+ * will always succeed unless there are soft (temp) references
+ * outstanding.
+ *
+ * This returns 0 if it was purged, 1 if it wasn't. It's not an error code
+ * that we're returning! XXXsup - not cool.
+ */
+/* ARGSUSED */
+int
+xfs_qm_dqpurge(
+	xfs_dquot_t	*dqp,
+	uint		flags)
+{
+	xfs_dqhash_t	*thishash;
+	xfs_mount_t	*mp;
+
+	mp = dqp->q_mount;
+
+	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+	ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));
+
+	xfs_dqlock(dqp);
+	/*
+	 * We really can't afford to purge a dquot that is
+	 * referenced, because these are hard refs.
+	 * It shouldn't happen in general because we went thru _all_ inodes in
+	 * dqrele_all_inodes before calling this and didn't let the mountlock go.
+	 * However it is possible that we have dquots with temporary
+	 * references that are not attached to an inode. e.g. see xfs_setattr().
+	 */
+	if (dqp->q_nrefs != 0) {
+		xfs_dqunlock(dqp);
+		XFS_DQ_HASH_UNLOCK(dqp->q_hash);
+		return (1);
+	}
+
+	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+
+	/*
+	 * If we're turning off quotas, we have to make sure that, for
+	 * example, we don't delete quota disk blocks while dquots are
+	 * in the process of getting written to those disk blocks.
+	 * This dquot might well be on AIL, and we can't leave it there
+	 * if we're turning off quotas. Basically, we need this flush
+	 * lock, and are willing to block on it.
+	 */
+	if (! xfs_qm_dqflock_nowait(dqp)) {
+		/*
+		 * Block on the flush lock after nudging dquot buffer,
+		 * if it is incore.
+		 */
+		xfs_qm_dqflock_pushbuf_wait(dqp);
+	}
+
+	/*
+	 * XXXIf we're turning this type of quotas off, we don't care
+	 * about the dirty metadata sitting in this dquot. OTOH, if
+	 * we're unmounting, we do care, so we flush it and wait.
+	 */
+	if (XFS_DQ_IS_DIRTY(dqp)) {
+		xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
+		/* dqflush unlocks dqflock */
+		/*
+		 * Given that dqpurge is a very rare occurrence, it is OK
+		 * that we're holding the hashlist and mplist locks
+		 * across the disk write. But, ... XXXsup
+		 *
+		 * We don't care about getting disk errors here. We need
+		 * to purge this dquot anyway, so we go ahead regardless.
+		 */
+		(void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
+		xfs_dqflock(dqp);
+	}
+	ASSERT(dqp->q_pincount == 0);
+	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
+	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
+
+	thishash = dqp->q_hash;
+	XQM_HASHLIST_REMOVE(thishash, dqp);
+	XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);
+	/*
+	 * XXX Move this to the front of the freelist, if we can get the
+	 * freelist lock.
+	 */
+	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+
+	dqp->q_mount = NULL;
+	dqp->q_hash = NULL;
+	dqp->dq_flags = XFS_DQ_INACTIVE;
+	memset(&dqp->q_core, 0, sizeof(dqp->q_core));
+	xfs_dqfunlock(dqp);
+	xfs_dqunlock(dqp);
+	XFS_DQ_HASH_UNLOCK(thishash);
+	return (0);
+}
+
+
+#ifdef QUOTADEBUG
+void
+xfs_qm_dqprint(xfs_dquot_t *dqp)
+{
+	cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------");
+	cmn_err(CE_DEBUG, "---- dquotID =  %d",
+		(int)INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- type    =  %s",
+		XFS_QM_ISUDQ(dqp) ? "USR" : "GRP");
+	cmn_err(CE_DEBUG, "---- fs      =  0x%p", dqp->q_mount);
+	cmn_err(CE_DEBUG, "---- blkno   =  0x%x", (int) dqp->q_blkno);
+	cmn_err(CE_DEBUG, "---- boffset =  0x%x", (int) dqp->q_bufoffset);
+	cmn_err(CE_DEBUG, "---- blkhlimit =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT),
+		(int) INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- blkslimit =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT),
+		(int)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- inohlimit =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT),
+		(int)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- inoslimit =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT),
+		(int)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- bcount  =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT),
+		(int)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- icount  =  %Lu (0x%x)",
+		INT_GET(dqp->q_core.d_icount, ARCH_CONVERT),
+		(int)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- btimer  =  %d",
+		(int)INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---- itimer  =  %d",
+		(int)INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT));
+	cmn_err(CE_DEBUG, "---------------------------");
+}
+#endif
+
+/*
+ * Give the buffer a little push if it is incore and
+ * wait on the flush lock.
+ */
+void
+xfs_qm_dqflock_pushbuf_wait(
+	xfs_dquot_t	*dqp)
+{
+	xfs_buf_t	*bp;
+
+	/*
+	 * Check to see if the dquot has been flushed delayed
+	 * write.  If so, grab its buffer and send it
+	 * out immediately.  We'll be able to acquire
+	 * the flush lock when the I/O completes.
+	 */
+	bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
+		    XFS_QI_DQCHUNKLEN(dqp->q_mount),
+		    XFS_INCORE_TRYLOCK);
+	if (bp != NULL) {
+		if (XFS_BUF_ISDELAYWRITE(bp)) {
+			if (XFS_BUF_ISPINNED(bp)) {
+				xfs_log_force(dqp->q_mount,
+					      (xfs_lsn_t)0,
+					      XFS_LOG_FORCE);
+			}
+			xfs_bawrite(dqp->q_mount, bp);
+		} else {
+			xfs_buf_relse(bp);
+		}
+	}
+	xfs_dqflock(dqp);
+}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
new file mode 100644
index 0000000..0c3fe31
--- /dev/null
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DQUOT_H__
+#define __XFS_DQUOT_H__
+
+/*
+ * Dquots are structures that hold quota information about a user or a group,
+ * much like inodes are for files. In fact, dquots share many characteristics
+ * with inodes. However, dquots can also be a centralized resource, relative
+ * to a collection of inodes. In this respect, dquots share some characteristics
+ * of the superblock.
+ * XFS dquots exploit both those in its algorithms. They make every attempt
+ * to not be a bottleneck when quotas are on and have minimal impact, if any,
+ * when quotas are off.
+ */
+
+/*
+ * The hash chain headers (hash buckets)
+ */
+typedef struct xfs_dqhash {
+	struct xfs_dquot *qh_next;
+	mutex_t		  qh_lock;
+	uint		  qh_version;	/* ever increasing version */
+	uint		  qh_nelems;	/* number of dquots on the list */
+} xfs_dqhash_t;
+
+typedef struct xfs_dqlink {
+	struct xfs_dquot  *ql_next;	/* forward link */
+	struct xfs_dquot **ql_prevp;	/* pointer to prev ql_next */
+} xfs_dqlink_t;
+
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * This is the marker which is designed to occupy the first few
+ * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers
+ * must come first.
+ * This serves as the marker ("sentinel") when we have to restart list
+ * iterations because of locking considerations.
+ */
+typedef struct xfs_dqmarker {
+	struct xfs_dquot*dqm_flnext;	/* link to freelist: must be first */
+	struct xfs_dquot*dqm_flprev;
+	xfs_dqlink_t	 dqm_mplist;	/* link to mount's list of dquots */
+	xfs_dqlink_t	 dqm_hashlist;	/* link to the hash chain */
+	uint		 dqm_flags;	/* various flags (XFS_DQ_*) */
+} xfs_dqmarker_t;
+
+/*
+ * The incore dquot structure
+ */
+typedef struct xfs_dquot {
+	xfs_dqmarker_t	 q_lists;	/* list ptrs, q_flags (marker) */
+	xfs_dqhash_t	*q_hash;	/* the hashchain header */
+	struct xfs_mount*q_mount;	/* filesystem this relates to */
+	struct xfs_trans*q_transp;	/* trans this belongs to currently */
+	uint		 q_nrefs;	/* # active refs from inodes */
+	xfs_daddr_t	 q_blkno;	/* blkno of dquot buffer */
+	int		 q_bufoffset;	/* off of dq in buffer (# dquots) */
+	xfs_fileoff_t	 q_fileoffset;	/* offset in quotas file */
+
+	struct xfs_dquot*q_gdquot;	/* group dquot, hint only */
+	xfs_disk_dquot_t q_core;	/* actual usage & quotas */
+	xfs_dq_logitem_t q_logitem;	/* dquot log item */
+	xfs_qcnt_t	 q_res_bcount;	/* total regular nblks used+reserved */
+	xfs_qcnt_t	 q_res_icount;	/* total inos allocd+reserved */
+	xfs_qcnt_t	 q_res_rtbcount;/* total realtime blks used+reserved */
+	mutex_t		 q_qlock;	/* quota lock */
+	sema_t		 q_flock;	/* flush lock */
+	uint		 q_pincount;	/* pin count for this dquot */
+	sv_t		 q_pinwait;	/* sync var for pinning */
+#ifdef XFS_DQUOT_TRACE
+	struct ktrace	*q_trace;	/* trace header structure */
+#endif
+} xfs_dquot_t;
+
+
+#define dq_flnext	q_lists.dqm_flnext
+#define dq_flprev	q_lists.dqm_flprev
+#define dq_mplist	q_lists.dqm_mplist
+#define dq_hashlist	q_lists.dqm_hashlist
+#define dq_flags	q_lists.dqm_flags
+
+#define XFS_DQHOLD(dqp)		((dqp)->q_nrefs++)
+
+/*
+ * Quota Accounting flags
+ */
+#define XFS_ALL_QUOTA_ACCT	(XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT)
+#define XFS_ALL_QUOTA_ENFD	(XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD)
+#define XFS_ALL_QUOTA_CHKD	(XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD)
+#define XFS_ALL_QUOTA_ACTV	(XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE)
+#define XFS_ALL_QUOTA_ACCT_ENFD (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
+				 XFS_GQUOTA_ACCT|XFS_GQUOTA_ENFD)
+
+#define XFS_IS_QUOTA_RUNNING(mp)  ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
+#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
+#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
+
+/*
+ * Quota Limit Enforcement flags
+ */
+#define XFS_IS_QUOTA_ENFORCED(mp)	((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
+#define XFS_IS_UQUOTA_ENFORCED(mp)	((mp)->m_qflags & XFS_UQUOTA_ENFD)
+#define XFS_IS_GQUOTA_ENFORCED(mp)	((mp)->m_qflags & XFS_GQUOTA_ENFD)
+
+#ifdef DEBUG
+static inline int
+XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
+{
+	if (mutex_trylock(&dqp->q_qlock)) {
+		mutex_unlock(&dqp->q_qlock);
+		return 0;
+	}
+	return 1;
+}
+#endif
+
+
+/*
+ * The following three routines simply manage the q_flock
+ * semaphore embedded in the dquot.  This semaphore synchronizes
+ * processes attempting to flush the in-core dquot back to disk.
+ */
+#define xfs_dqflock(dqp)	 { psema(&((dqp)->q_flock), PINOD | PRECALC);\
+				   (dqp)->dq_flags |= XFS_DQ_FLOCKED; }
+#define xfs_dqfunlock(dqp)	 { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \
+				   vsema(&((dqp)->q_flock)); \
+				   (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
+
+#define XFS_DQ_PINLOCK(dqp)	   mutex_spinlock( \
+				     &(XFS_DQ_TO_QINF(dqp)->qi_pinlock))
+#define XFS_DQ_PINUNLOCK(dqp, s)   mutex_spinunlock( \
+				     &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s)
+
+#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0)
+#define XFS_DQ_IS_ON_FREELIST(dqp)  ((dqp)->dq_flnext != (dqp))
+#define XFS_DQ_IS_DIRTY(dqp)	((dqp)->dq_flags & XFS_DQ_DIRTY)
+#define XFS_QM_ISUDQ(dqp)	((dqp)->dq_flags & XFS_DQ_USER)
+#define XFS_DQ_TO_QINF(dqp)	((dqp)->q_mount->m_quotainfo)
+#define XFS_DQ_TO_QIP(dqp)	(XFS_QM_ISUDQ(dqp) ? \
+				 XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \
+				 XFS_DQ_TO_QINF(dqp)->qi_gquotaip)
+
+#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \
+				     (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
+				     (XFS_IS_GQUOTA_ON((d)->q_mount))))
+
+#ifdef XFS_DQUOT_TRACE
+/*
+ * Dquot Tracing stuff.
+ */
+#define DQUOT_TRACE_SIZE	64
+#define DQUOT_KTRACE_ENTRY	1
+
+extern void		__xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
+					    void *, xfs_inode_t *);
+#define xfs_dqtrace_entry_ino(a,b,ip) \
+		__xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
+#define xfs_dqtrace_entry(a,b) \
+		__xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
+#else
+#define xfs_dqtrace_entry(a,b)
+#define xfs_dqtrace_entry_ino(a,b,ip)
+#endif
+
+#ifdef QUOTADEBUG
+extern void		xfs_qm_dqprint(xfs_dquot_t *);
+#else
+#define xfs_qm_dqprint(a)
+#endif
+
+extern void		xfs_qm_dqdestroy(xfs_dquot_t *);
+extern int		xfs_qm_dqflush(xfs_dquot_t *, uint);
+extern int		xfs_qm_dqpurge(xfs_dquot_t *, uint);
+extern void		xfs_qm_dqunpin_wait(xfs_dquot_t *);
+extern int		xfs_qm_dqlock_nowait(xfs_dquot_t *);
+extern int		xfs_qm_dqflock_nowait(xfs_dquot_t *);
+extern void		xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
+extern void		xfs_qm_adjust_dqtimers(xfs_mount_t *,
+					xfs_disk_dquot_t *);
+extern void		xfs_qm_adjust_dqlimits(xfs_mount_t *,
+					xfs_disk_dquot_t *);
+extern int		xfs_qm_dqwarn(xfs_disk_dquot_t *, uint);
+extern int		xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
+					xfs_dqid_t, uint, uint, xfs_dquot_t **);
+extern void		xfs_qm_dqput(xfs_dquot_t *);
+extern void		xfs_qm_dqrele(xfs_dquot_t *);
+extern void		xfs_dqlock(xfs_dquot_t *);
+extern void		xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
+extern void		xfs_dqunlock(xfs_dquot_t *);
+extern void		xfs_dqunlock_nonotify(xfs_dquot_t *);
+
+#endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
new file mode 100644
index 0000000..a5425ee
--- /dev/null
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_priv.h"
+
+#include "xfs_qm.h"
+
+
+/*
+ * returns the number of iovecs needed to log the given dquot item.
+ */
+/* ARGSUSED */
+STATIC uint
+xfs_qm_dquot_logitem_size(
+	xfs_dq_logitem_t	*logitem)
+{
+	/*
+	 * we need only two iovecs, one for the format, one for the real thing
+	 */
+	return (2);
+}
+
+/*
+ * fills in the vector of log iovecs for the given dquot log item.
+ */
+STATIC void
+xfs_qm_dquot_logitem_format(
+	xfs_dq_logitem_t	*logitem,
+	xfs_log_iovec_t		*logvec)
+{
+	ASSERT(logitem);
+	ASSERT(logitem->qli_dquot);
+
+	logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
+	logvec->i_len  = sizeof(xfs_dq_logformat_t);
+	logvec++;
+	logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
+	logvec->i_len  = sizeof(xfs_disk_dquot_t);
+
+	ASSERT(2 == logitem->qli_item.li_desc->lid_size);
+	logitem->qli_format.qlf_size = 2;
+
+}
+
+/*
+ * Increment the pin count of the given dquot.
+ * This value is protected by pinlock spinlock in the xQM structure.
+ */
+STATIC void
+xfs_qm_dquot_logitem_pin(
+	xfs_dq_logitem_t *logitem)
+{
+	unsigned long	s;
+	xfs_dquot_t *dqp;
+
+	dqp = logitem->qli_dquot;
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	s = XFS_DQ_PINLOCK(dqp);
+	dqp->q_pincount++;
+	XFS_DQ_PINUNLOCK(dqp, s);
+}
+
+/*
+ * Decrement the pin count of the given dquot, and wake up
+ * anyone in xfs_dqwait_unpin() if the count goes to 0.	 The
+ * dquot must have been previously pinned with a call to xfs_dqpin().
+ */
+/* ARGSUSED */
+STATIC void
+xfs_qm_dquot_logitem_unpin(
+	xfs_dq_logitem_t *logitem,
+	int		  stale)
+{
+	unsigned long	s;
+	xfs_dquot_t *dqp;
+
+	dqp = logitem->qli_dquot;
+	ASSERT(dqp->q_pincount > 0);
+	s = XFS_DQ_PINLOCK(dqp);
+	dqp->q_pincount--;
+	if (dqp->q_pincount == 0) {
+		sv_broadcast(&dqp->q_pinwait);
+	}
+	XFS_DQ_PINUNLOCK(dqp, s);
+}
+
+/* ARGSUSED */
+STATIC void
+xfs_qm_dquot_logitem_unpin_remove(
+	xfs_dq_logitem_t *logitem,
+	xfs_trans_t	 *tp)
+{
+	xfs_qm_dquot_logitem_unpin(logitem, 0);
+}
+
+/*
+ * Given the logitem, this writes the corresponding dquot entry to disk
+ * asynchronously. This is called with the dquot entry securely locked;
+ * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
+ * at the end.
+ */
+STATIC void
+xfs_qm_dquot_logitem_push(
+	xfs_dq_logitem_t	*logitem)
+{
+	xfs_dquot_t	*dqp;
+
+	dqp = logitem->qli_dquot;
+
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+
+	/*
+	 * Since we were able to lock the dquot's flush lock and
+	 * we found it on the AIL, the dquot must be dirty.  This
+	 * is because the dquot is removed from the AIL while still
+	 * holding the flush lock in xfs_dqflush_done().  Thus, if
+	 * we found it in the AIL and were able to obtain the flush
+	 * lock without sleeping, then there must not have been
+	 * anyone in the process of flushing the dquot.
+	 */
+	xfs_qm_dqflush(dqp, XFS_B_DELWRI);
+	xfs_dqunlock(dqp);
+}
+
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_qm_dquot_logitem_committed(
+	xfs_dq_logitem_t	*l,
+	xfs_lsn_t		lsn)
+{
+	/*
+	 * We always re-log the entire dquot when it becomes dirty,
+	 * so, the latest copy _is_ the only one that matters.
+	 */
+	return (lsn);
+}
+
+
+/*
+ * This is called to wait for the given dquot to be unpinned.
+ * Most of these pin/unpin routines are plagiarized from inode code.
+ */
+void
+xfs_qm_dqunpin_wait(
+	xfs_dquot_t	*dqp)
+{
+	SPLDECL(s);
+
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	if (dqp->q_pincount == 0) {
+		return;
+	}
+
+	/*
+	 * Give the log a push so we don't wait here too long.
+	 */
+	xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
+	s = XFS_DQ_PINLOCK(dqp);
+	if (dqp->q_pincount == 0) {
+		XFS_DQ_PINUNLOCK(dqp, s);
+		return;
+	}
+	sv_wait(&(dqp->q_pinwait), PINOD,
+		&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
+}
+
+/*
+ * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
+ * the dquot is locked by us, but the flush lock isn't. So, here we are
+ * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
+ * If so, we want to push it out to help us take this item off the AIL as soon
+ * as possible.
+ *
+ * We must not be holding the AIL_LOCK at this point. Calling incore() to
+ * search the buffercache can be a time consuming thing, and AIL_LOCK is a
+ * spinlock.
+ */
+STATIC void
+xfs_qm_dquot_logitem_pushbuf(
+	xfs_dq_logitem_t    *qip)
+{
+	xfs_dquot_t	*dqp;
+	xfs_mount_t	*mp;
+	xfs_buf_t	*bp;
+	uint		dopush;
+
+	dqp = qip->qli_dquot;
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+
+	/*
+	 * The qli_pushbuf_flag keeps others from
+	 * trying to duplicate our effort.
+	 */
+	ASSERT(qip->qli_pushbuf_flag != 0);
+	ASSERT(qip->qli_push_owner == get_thread_id());
+
+	/*
+	 * If flushlock isn't locked anymore, chances are that the
+	 * inode flush completed and the inode was taken off the AIL.
+	 * So, just get out.
+	 */
+	if ((valusema(&(dqp->q_flock)) > 0)  ||
+	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
+		qip->qli_pushbuf_flag = 0;
+		xfs_dqunlock(dqp);
+		return;
+	}
+	mp = dqp->q_mount;
+	bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
+		    XFS_QI_DQCHUNKLEN(mp),
+		    XFS_INCORE_TRYLOCK);
+	if (bp != NULL) {
+		if (XFS_BUF_ISDELAYWRITE(bp)) {
+			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
+				  (valusema(&(dqp->q_flock)) <= 0));
+			qip->qli_pushbuf_flag = 0;
+			xfs_dqunlock(dqp);
+
+			if (XFS_BUF_ISPINNED(bp)) {
+				xfs_log_force(mp, (xfs_lsn_t)0,
+					      XFS_LOG_FORCE);
+			}
+			if (dopush) {
+#ifdef XFSRACEDEBUG
+				delay_for_intr();
+				delay(300);
+#endif
+				xfs_bawrite(mp, bp);
+			} else {
+				xfs_buf_relse(bp);
+			}
+		} else {
+			qip->qli_pushbuf_flag = 0;
+			xfs_dqunlock(dqp);
+			xfs_buf_relse(bp);
+		}
+		return;
+	}
+
+	qip->qli_pushbuf_flag = 0;
+	xfs_dqunlock(dqp);
+}
+
+/*
+ * This is called to attempt to lock the dquot associated with this
+ * dquot log item.  Don't sleep on the dquot lock or the flush lock.
+ * If the flush lock is already held, indicating that the dquot has
+ * been or is in the process of being flushed, then see if we can
+ * find the dquot's buffer in the buffer cache without sleeping.  If
+ * we can and it is marked delayed write, then we want to send it out.
+ * We delay doing so until the push routine, though, to avoid sleeping
+ * in any device strategy routines.
+ */
+STATIC uint
+xfs_qm_dquot_logitem_trylock(
+	xfs_dq_logitem_t	*qip)
+{
+	xfs_dquot_t		*dqp;
+	uint			retval;
+
+	dqp = qip->qli_dquot;
+	if (dqp->q_pincount > 0)
+		return (XFS_ITEM_PINNED);
+
+	if (! xfs_qm_dqlock_nowait(dqp))
+		return (XFS_ITEM_LOCKED);
+
+	retval = XFS_ITEM_SUCCESS;
+	if (! xfs_qm_dqflock_nowait(dqp)) {
+		/*
+		 * The dquot is already being flushed.	It may have been
+		 * flushed delayed write, however, and we don't want to
+		 * get stuck waiting for that to complete.  So, we want to check
+		 * to see if we can lock the dquot's buffer without sleeping.
+		 * If we can and it is marked for delayed write, then we
+		 * hold it and send it out from the push routine.  We don't
+		 * want to do that now since we might sleep in the device
+		 * strategy routine.  We also don't want to grab the buffer lock
+		 * here because we'd like not to call into the buffer cache
+		 * while holding the AIL_LOCK.
+		 * Make sure to only return PUSHBUF if we set pushbuf_flag
+		 * ourselves.  If someone else is doing it then we don't
+		 * want to go to the push routine and duplicate their efforts.
+		 */
+		if (qip->qli_pushbuf_flag == 0) {
+			qip->qli_pushbuf_flag = 1;
+			ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
+#ifdef DEBUG
+			qip->qli_push_owner = get_thread_id();
+#endif
+			/*
+			 * The dquot is left locked.
+			 */
+			retval = XFS_ITEM_PUSHBUF;
+		} else {
+			retval = XFS_ITEM_FLUSHING;
+			xfs_dqunlock_nonotify(dqp);
+		}
+	}
+
+	ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
+	return (retval);
+}
+
+
+/*
+ * Unlock the dquot associated with the log item.
+ * Clear the fields of the dquot and dquot log item that
+ * are specific to the current transaction.  If the
+ * hold flags is set, do not unlock the dquot.
+ */
+STATIC void
+xfs_qm_dquot_logitem_unlock(
+	xfs_dq_logitem_t    *ql)
+{
+	xfs_dquot_t	*dqp;
+
+	ASSERT(ql != NULL);
+	dqp = ql->qli_dquot;
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+
+	/*
+	 * Clear the transaction pointer in the dquot
+	 */
+	dqp->q_transp = NULL;
+
+	/*
+	 * dquots are never 'held' from getting unlocked at the end of
+	 * a transaction.  Their locking and unlocking is hidden inside the
+	 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
+	 * for the logitem.
+	 */
+	xfs_dqunlock(dqp);
+}
+
+
+/*
+ * The transaction with the dquot locked has aborted.  The dquot
+ * must not be dirty within the transaction.  We simply unlock just
+ * as if the transaction had been cancelled.
+ */
+STATIC void
+xfs_qm_dquot_logitem_abort(
+	xfs_dq_logitem_t    *ql)
+{
+	xfs_qm_dquot_logitem_unlock(ql);
+}
+
+/*
+ * this needs to stamp an lsn into the dquot, I think.
+ * rpc's that look at user dquot's would then have to
+ * push on the dependency recorded in the dquot
+ */
+/* ARGSUSED */
+STATIC void
+xfs_qm_dquot_logitem_committing(
+	xfs_dq_logitem_t	*l,
+	xfs_lsn_t		lsn)
+{
+	return;
+}
+
+
+/*
+ * This is the ops vector for dquots
+ */
+struct xfs_item_ops xfs_dquot_item_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_qm_dquot_logitem_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))
+					xfs_qm_dquot_logitem_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
+					xfs_qm_dquot_logitem_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))
+					xfs_qm_dquot_logitem_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_dquot_logitem_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_abort,
+	.iop_pushbuf	= (void(*)(xfs_log_item_t*))
+					xfs_qm_dquot_logitem_pushbuf,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_dquot_logitem_committing
+};
+
+/*
+ * Initialize the dquot log item for a newly allocated dquot.
+ * The dquot isn't locked at this point, but it isn't on any of the lists
+ * either, so we don't care.
+ */
+void
+xfs_qm_dquot_logitem_init(
+	struct xfs_dquot *dqp)
+{
+	xfs_dq_logitem_t  *lp;
+	lp = &dqp->q_logitem;
+
+	lp->qli_item.li_type = XFS_LI_DQUOT;
+	lp->qli_item.li_ops = &xfs_dquot_item_ops;
+	lp->qli_item.li_mountp = dqp->q_mount;
+	lp->qli_dquot = dqp;
+	lp->qli_format.qlf_type = XFS_LI_DQUOT;
+	lp->qli_format.qlf_id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT);
+	lp->qli_format.qlf_blkno = dqp->q_blkno;
+	lp->qli_format.qlf_len = 1;
+	/*
+	 * This is just the offset of this dquot within its buffer
+	 * (which is currently 1 FSB and probably won't change).
+	 * Hence 32 bits for this offset should be just fine.
+	 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
+	 * here, and recompute it at recovery time.
+	 */
+	lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
+}
+
+/*------------------  QUOTAOFF LOG ITEMS  -------------------*/
+
+/*
+ * This returns the number of iovecs needed to log the given quotaoff item.
+ * We only need 1 iovec for an quotaoff item.  It just logs the
+ * quotaoff_log_format structure.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
+{
+	return (1);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given quotaoff log item. We use only 1 iovec, and we point that
+ * at the quotaoff_log_format structure embedded in the quotaoff item.
+ * It is at this point that we assert that all of the extent
+ * slots in the quotaoff item have been filled.
+ */
+STATIC void
+xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t	*qf,
+			   xfs_log_iovec_t	*log_vector)
+{
+	ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
+
+	log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
+	log_vector->i_len = sizeof(xfs_qoff_logitem_t);
+	qf->qql_format.qf_size = 1;
+}
+
+
+/*
+ * Pinning has no meaning for an quotaoff item, so just return.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
+{
+	return;
+}
+
+
+/*
+ * Since pinning has no meaning for an quotaoff item, unpinning does
+ * not either.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
+{
+	return;
+}
+
+/*ARGSUSED*/
+STATIC void
+xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
+{
+	return;
+}
+
+/*
+ * Quotaoff items have no locking, so just return success.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
+{
+	return XFS_ITEM_LOCKED;
+}
+
+/*
+ * Quotaoff items have no locking or pushing, so return failure
+ * so that the caller doesn't bother with us.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
+{
+	return;
+}
+
+/*
+ * The quotaoff-start-item is logged only once and cannot be moved in the log,
+ * so simply return the lsn at which it's been logged.
+ */
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
+{
+	return (lsn);
+}
+
+/*
+ * The transaction of which this QUOTAOFF is a part has been aborted.
+ * Just clean up after ourselves.
+ * Shouldn't this never happen in the case of qoffend logitems? XXX
+ */
+STATIC void
+xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t *qf)
+{
+	kmem_free(qf, sizeof(xfs_qoff_logitem_t));
+}
+
+/*
+ * There isn't much you can do to push on an quotaoff item.  It is simply
+ * stuck waiting for the log to be flushed to disk.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
+{
+	return;
+}
+
+
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_qm_qoffend_logitem_committed(
+	xfs_qoff_logitem_t *qfe,
+	xfs_lsn_t lsn)
+{
+	xfs_qoff_logitem_t	*qfs;
+	SPLDECL(s);
+
+	qfs = qfe->qql_start_lip;
+	AIL_LOCK(qfs->qql_item.li_mountp,s);
+	/*
+	 * Delete the qoff-start logitem from the AIL.
+	 * xfs_trans_delete_ail() drops the AIL lock.
+	 */
+	xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s);
+	kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
+	kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
+	return (xfs_lsn_t)-1;
+}
+
+/*
+ * XXX rcc - don't know quite what to do with this.  I think we can
+ * just ignore it.  The only time that isn't the case is if we allow
+ * the client to somehow see that quotas have been turned off in which
+ * we can't allow that to get back until the quotaoff hits the disk.
+ * So how would that happen?  Also, do we need different routines for
+ * quotaoff start and quotaoff end?  I suspect the answer is yes but
+ * to be sure, I need to look at the recovery code and see how quota off
+ * recovery is handled (do we roll forward or back or do something else).
+ * If we roll forwards or backwards, then we need two separate routines,
+ * one that does nothing and one that stamps in the lsn that matters
+ * (truly makes the quotaoff irrevocable).  If we do something else,
+ * then maybe we don't need two.
+ */
+/* ARGSUSED */
+STATIC void
+xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
+{
+	return;
+}
+
+/* ARGSUSED */
+STATIC void
+xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
+{
+	return;
+}
+
+struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_qm_qoff_logitem_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t* ,int))
+					xfs_qm_qoff_logitem_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
+					xfs_qm_qoff_logitem_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_qoffend_logitem_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
+	.iop_pushbuf	= NULL,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_qoffend_logitem_committing
+};
+
+/*
+ * This is the ops vector shared by all quotaoff-start log items.
+ */
+struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_qm_qoff_logitem_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))
+					xfs_qm_qoff_logitem_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
+					xfs_qm_qoff_logitem_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_qoff_logitem_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
+	.iop_pushbuf	= NULL,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_qm_qoff_logitem_committing
+};
+
+/*
+ * Allocate and initialize an quotaoff item of the correct quota type(s).
+ */
+xfs_qoff_logitem_t *
+xfs_qm_qoff_logitem_init(
+	struct xfs_mount *mp,
+	xfs_qoff_logitem_t *start,
+	uint flags)
+{
+	xfs_qoff_logitem_t	*qf;
+
+	qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
+
+	qf->qql_item.li_type = XFS_LI_QUOTAOFF;
+	if (start)
+		qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
+	else
+		qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
+	qf->qql_item.li_mountp = mp;
+	qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
+	qf->qql_format.qf_flags = flags;
+	qf->qql_start_lip = start;
+	return (qf);
+}
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h
new file mode 100644
index 0000000..9c6500d
--- /dev/null
+++ b/fs/xfs/quota/xfs_dquot_item.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DQUOT_ITEM_H__
+#define __XFS_DQUOT_ITEM_H__
+
+struct xfs_dquot;
+struct xfs_trans;
+struct xfs_mount;
+struct xfs_qoff_logitem;
+
+typedef struct xfs_dq_logitem {
+	xfs_log_item_t		 qli_item;	   /* common portion */
+	struct xfs_dquot	*qli_dquot;	   /* dquot ptr */
+	xfs_lsn_t		 qli_flush_lsn;	   /* lsn at last flush */
+	unsigned short		 qli_pushbuf_flag; /* 1 bit used in push_ail */
+#ifdef DEBUG
+	uint64_t		 qli_push_owner;
+#endif
+	xfs_dq_logformat_t	 qli_format;	   /* logged structure */
+} xfs_dq_logitem_t;
+
+typedef struct xfs_qoff_logitem {
+	xfs_log_item_t		 qql_item;	/* common portion */
+	struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
+	xfs_qoff_logformat_t	 qql_format;	/* logged structure */
+} xfs_qoff_logitem_t;
+
+
+extern void		   xfs_qm_dquot_logitem_init(struct xfs_dquot *);
+extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
+					struct xfs_qoff_logitem *, uint);
+extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
+					struct xfs_qoff_logitem *, uint);
+extern void		   xfs_trans_log_quotaoff_item(struct xfs_trans *,
+					struct xfs_qoff_logitem *);
+
+#endif	/* __XFS_DQUOT_ITEM_H__ */
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
new file mode 100644
index 0000000..89f2cd6
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm.c
@@ -0,0 +1,2848 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_space.h"
+#include "xfs_utils.h"
+
+#include "xfs_qm.h"
+
+/*
+ * The global quota manager. There is only one of these for the entire
+ * system, _not_ one per file system. XQM keeps track of the overall
+ * quota functionality, including maintaining the freelist and hash
+ * tables of dquots.
+ */
+mutex_t xfs_Gqm_lock;
+struct xfs_qm	*xfs_Gqm;
+
+kmem_zone_t	*qm_dqzone;
+kmem_zone_t	*qm_dqtrxzone;
+kmem_shaker_t	xfs_qm_shaker;
+
+STATIC void	xfs_qm_list_init(xfs_dqlist_t *, char *, int);
+STATIC void	xfs_qm_list_destroy(xfs_dqlist_t *);
+
+STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
+STATIC int	xfs_qm_shake(int, unsigned int);
+
+#ifdef DEBUG
+extern mutex_t	qcheck_lock;
+#endif
+
+#ifdef QUOTADEBUG
+#define XQM_LIST_PRINT(l, NXT, title) \
+{ \
+	xfs_dquot_t	*dqp; int i = 0; \
+	cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
+	for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
+		cmn_err(CE_DEBUG, "   %d.  \"%d (%s)\"   " \
+				  "bcnt = %d, icnt = %d, refs = %d", \
+			++i, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \
+			DQFLAGTO_TYPESTR(dqp),	     \
+			(int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \
+			(int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \
+			(int) dqp->q_nrefs);  } \
+}
+#else
+#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
+#endif
+
+/*
+ * Initialize the XQM structure.
+ * Note that there is not one quota manager per file system.
+ */
+STATIC struct xfs_qm *
+xfs_Gqm_init(void)
+{
+	xfs_qm_t		*xqm;
+	int			hsize, i;
+
+	xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
+	ASSERT(xqm);
+
+	/*
+	 * Initialize the dquot hash tables.
+	 */
+	hsize = (DQUOT_HASH_HEURISTIC < XFS_QM_NCSIZE_THRESHOLD) ?
+		XFS_QM_HASHSIZE_LOW : XFS_QM_HASHSIZE_HIGH;
+	xqm->qm_dqhashmask = hsize - 1;
+
+	xqm->qm_usr_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize *
+						      sizeof(xfs_dqhash_t),
+						      KM_SLEEP);
+	xqm->qm_grp_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize *
+						      sizeof(xfs_dqhash_t),
+						      KM_SLEEP);
+	ASSERT(xqm->qm_usr_dqhtable != NULL);
+	ASSERT(xqm->qm_grp_dqhtable != NULL);
+
+	for (i = 0; i < hsize; i++) {
+		xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
+		xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
+	}
+
+	/*
+	 * Freelist of all dquots of all file systems
+	 */
+	xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
+
+	/*
+	 * dquot zone. we register our own low-memory callback.
+	 */
+	if (!qm_dqzone) {
+		xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
+						"xfs_dquots");
+		qm_dqzone = xqm->qm_dqzone;
+	} else
+		xqm->qm_dqzone = qm_dqzone;
+
+	xfs_qm_shaker = kmem_shake_register(xfs_qm_shake);
+
+	/*
+	 * The t_dqinfo portion of transactions.
+	 */
+	if (!qm_dqtrxzone) {
+		xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
+						   "xfs_dqtrx");
+		qm_dqtrxzone = xqm->qm_dqtrxzone;
+	} else
+		xqm->qm_dqtrxzone = qm_dqtrxzone;
+
+	atomic_set(&xqm->qm_totaldquots, 0);
+	xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
+	xqm->qm_nrefs = 0;
+#ifdef DEBUG
+	mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
+#endif
+	return xqm;
+}
+
+/*
+ * Destroy the global quota manager when its reference count goes to zero.
+ */
+void
+xfs_qm_destroy(
+	struct xfs_qm	*xqm)
+{
+	int		hsize, i;
+
+	ASSERT(xqm != NULL);
+	ASSERT(xqm->qm_nrefs == 0);
+	kmem_shake_deregister(xfs_qm_shaker);
+	hsize = xqm->qm_dqhashmask + 1;
+	for (i = 0; i < hsize; i++) {
+		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
+		xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
+	}
+	kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
+	kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
+	xqm->qm_usr_dqhtable = NULL;
+	xqm->qm_grp_dqhtable = NULL;
+	xqm->qm_dqhashmask = 0;
+	xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
+#ifdef DEBUG
+	mutex_destroy(&qcheck_lock);
+#endif
+	kmem_free(xqm, sizeof(xfs_qm_t));
+}
+
+/*
+ * Called at mount time to let XQM know that another file system is
+ * starting quotas. This isn't crucial information as the individual mount
+ * structures are pretty independent, but it helps the XQM keep a
+ * global view of what's going on.
+ */
+/* ARGSUSED */
+STATIC int
+xfs_qm_hold_quotafs_ref(
+	struct xfs_mount *mp)
+{
+	/*
+	 * Need to lock the xfs_Gqm structure for things like this. For example,
+	 * the structure could disappear between the entry to this routine and
+	 * a HOLD operation if not locked.
+	 */
+	XFS_QM_LOCK(xfs_Gqm);
+
+	if (xfs_Gqm == NULL)
+		xfs_Gqm = xfs_Gqm_init();
+	/*
+	 * We can keep a list of all filesystems with quotas mounted for
+	 * debugging and statistical purposes, but ...
+	 * Just take a reference and get out.
+	 */
+	XFS_QM_HOLD(xfs_Gqm);
+	XFS_QM_UNLOCK(xfs_Gqm);
+
+	return 0;
+}
+
+
+/*
+ * Release the reference that a filesystem took at mount time,
+ * so that we know when we need to destroy the entire quota manager.
+ */
+/* ARGSUSED */
+STATIC void
+xfs_qm_rele_quotafs_ref(
+	struct xfs_mount *mp)
+{
+	xfs_dquot_t	*dqp, *nextdqp;
+
+	ASSERT(xfs_Gqm);
+	ASSERT(xfs_Gqm->qm_nrefs > 0);
+
+	/*
+	 * Go thru the freelist and destroy all inactive dquots.
+	 */
+	xfs_qm_freelist_lock(xfs_Gqm);
+
+	for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
+	     dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
+		xfs_dqlock(dqp);
+		nextdqp = dqp->dq_flnext;
+		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
+			ASSERT(dqp->q_mount == NULL);
+			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
+			ASSERT(dqp->HL_PREVP == NULL);
+			ASSERT(dqp->MPL_PREVP == NULL);
+			XQM_FREELIST_REMOVE(dqp);
+			xfs_dqunlock(dqp);
+			xfs_qm_dqdestroy(dqp);
+		} else {
+			xfs_dqunlock(dqp);
+		}
+		dqp = nextdqp;
+	}
+	xfs_qm_freelist_unlock(xfs_Gqm);
+
+	/*
+	 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
+	 * be restarted.
+	 */
+	XFS_QM_LOCK(xfs_Gqm);
+	XFS_QM_RELE(xfs_Gqm);
+	if (xfs_Gqm->qm_nrefs == 0) {
+		xfs_qm_destroy(xfs_Gqm);
+		xfs_Gqm = NULL;
+	}
+	XFS_QM_UNLOCK(xfs_Gqm);
+}
+
+/*
+ * This is called at mount time from xfs_mountfs to initialize the quotainfo
+ * structure and start the global quotamanager (xfs_Gqm) if it hasn't done
+ * so already.	Note that the superblock has not been read in yet.
+ */
+void
+xfs_qm_mount_quotainit(
+	xfs_mount_t	*mp,
+	uint		flags)
+{
+	/*
+	 * User or group quotas has to be on.
+	 */
+	ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA));
+
+	/*
+	 * Initialize the flags in the mount structure. From this point
+	 * onwards we look at m_qflags to figure out if quotas's ON/OFF, etc.
+	 * Note that we enforce nothing if accounting is off.
+	 * ie.	XFSMNT_*QUOTA must be ON for XFSMNT_*QUOTAENF.
+	 * It isn't necessary to take the quotaoff lock to do this; this is
+	 * called from mount.
+	 */
+	if (flags & XFSMNT_UQUOTA) {
+		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+		if (flags & XFSMNT_UQUOTAENF)
+			mp->m_qflags |= XFS_UQUOTA_ENFD;
+	}
+	if (flags & XFSMNT_GQUOTA) {
+		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+		if (flags & XFSMNT_GQUOTAENF)
+			mp->m_qflags |= XFS_GQUOTA_ENFD;
+	}
+}
+
+/*
+ * Just destroy the quotainfo structure.
+ */
+void
+xfs_qm_unmount_quotadestroy(
+	xfs_mount_t	*mp)
+{
+	if (mp->m_quotainfo)
+		xfs_qm_destroy_quotainfo(mp);
+}
+
+
+/*
+ * This is called from xfs_mountfs to start quotas and initialize all
+ * necessary data structures like quotainfo.  This is also responsible for
+ * running a quotacheck as necessary.  We are guaranteed that the superblock
+ * is consistently read in at this point.
+ */
+int
+xfs_qm_mount_quotas(
+	xfs_mount_t	*mp,
+	int		mfsi_flags)
+{
+	unsigned long	s;
+	int		error = 0;
+	uint		sbf;
+
+	/*
+	 * If a file system had quotas running earlier, but decided to
+	 * mount without -o quota/uquota/gquota options, revoke the
+	 * quotachecked license, and bail out.
+	 */
+	if (! XFS_IS_QUOTA_ON(mp) &&
+	    (mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) {
+		mp->m_qflags = 0;
+		goto write_changes;
+	}
+
+	/*
+	 * If quotas on realtime volumes is not supported, we disable
+	 * quotas immediately.
+	 */
+	if (mp->m_sb.sb_rextents) {
+		cmn_err(CE_NOTE,
+			"Cannot turn on quotas for realtime filesystem %s",
+			mp->m_fsname);
+		mp->m_qflags = 0;
+		goto write_changes;
+	}
+
+#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
+	cmn_err(CE_NOTE, "Attempting to turn on disk quotas.");
+#endif
+
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+	/*
+	 * Allocate the quotainfo structure inside the mount struct, and
+	 * create quotainode(s), and change/rev superblock if necessary.
+	 */
+	if ((error = xfs_qm_init_quotainfo(mp))) {
+		/*
+		 * We must turn off quotas.
+		 */
+		ASSERT(mp->m_quotainfo == NULL);
+		mp->m_qflags = 0;
+		goto write_changes;
+	}
+	/*
+	 * If any of the quotas are not consistent, do a quotacheck.
+	 */
+	if (XFS_QM_NEED_QUOTACHECK(mp) &&
+		!(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
+#ifdef DEBUG
+		cmn_err(CE_NOTE, "Doing a quotacheck. Please wait.");
+#endif
+		if ((error = xfs_qm_quotacheck(mp))) {
+			/* Quotacheck has failed and quotas have
+			 * been disabled.
+			 */
+			return XFS_ERROR(error);
+		}
+#ifdef DEBUG
+		cmn_err(CE_NOTE, "Done quotacheck.");
+#endif
+	}
+ write_changes:
+	/*
+	 * We actually don't have to acquire the SB_LOCK at all.
+	 * This can only be called from mount, and that's single threaded. XXX
+	 */
+	s = XFS_SB_LOCK(mp);
+	sbf = mp->m_sb.sb_qflags;
+	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
+	XFS_SB_UNLOCK(mp, s);
+
+	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
+		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
+			/*
+			 * We could only have been turning quotas off.
+			 * We aren't in very good shape actually because
+			 * the incore structures are convinced that quotas are
+			 * off, but the on disk superblock doesn't know that !
+			 */
+			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"XFS mount_quotas: Superblock update failed!");
+		}
+	}
+
+	if (error) {
+		xfs_fs_cmn_err(CE_WARN, mp,
+			"Failed to initialize disk quotas.");
+	}
+	return XFS_ERROR(error);
+}
+
+/*
+ * Called from the vfsops layer.
+ */
+int
+xfs_qm_unmount_quotas(
+	xfs_mount_t	*mp)
+{
+	xfs_inode_t	*uqp, *gqp;
+	int		error = 0;
+
+	/*
+	 * Release the dquots that root inode, et al might be holding,
+	 * before we flush quotas and blow away the quotainfo structure.
+	 */
+	ASSERT(mp->m_rootip);
+	xfs_qm_dqdetach(mp->m_rootip);
+	if (mp->m_rbmip)
+		xfs_qm_dqdetach(mp->m_rbmip);
+	if (mp->m_rsumip)
+		xfs_qm_dqdetach(mp->m_rsumip);
+
+	/*
+	 * Flush out the quota inodes.
+	 */
+	uqp = gqp = NULL;
+	if (mp->m_quotainfo) {
+		if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
+			xfs_ilock(uqp, XFS_ILOCK_EXCL);
+			xfs_iflock(uqp);
+			error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
+			xfs_iunlock(uqp, XFS_ILOCK_EXCL);
+			if (unlikely(error == EFSCORRUPTED)) {
+				XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
+						 XFS_ERRLEVEL_LOW, mp);
+				goto out;
+			}
+		}
+		if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
+			xfs_ilock(gqp, XFS_ILOCK_EXCL);
+			xfs_iflock(gqp);
+			error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
+			xfs_iunlock(gqp, XFS_ILOCK_EXCL);
+			if (unlikely(error == EFSCORRUPTED)) {
+				XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
+						 XFS_ERRLEVEL_LOW, mp);
+				goto out;
+			}
+		}
+	}
+	if (uqp) {
+		 XFS_PURGE_INODE(uqp);
+		 mp->m_quotainfo->qi_uquotaip = NULL;
+	}
+	if (gqp) {
+		XFS_PURGE_INODE(gqp);
+		mp->m_quotainfo->qi_gquotaip = NULL;
+	}
+out:
+	return XFS_ERROR(error);
+}
+
+/*
+ * Flush all dquots of the given file system to disk. The dquots are
+ * _not_ purged from memory here, just their data written to disk.
+ */
+int
+xfs_qm_dqflush_all(
+	xfs_mount_t	*mp,
+	int		flags)
+{
+	int		recl;
+	xfs_dquot_t	*dqp;
+	int		niters;
+	int		error;
+
+	if (mp->m_quotainfo == NULL)
+		return (0);
+	niters = 0;
+again:
+	xfs_qm_mplist_lock(mp);
+	FOREACH_DQUOT_IN_MP(dqp, mp) {
+		xfs_dqlock(dqp);
+		if (! XFS_DQ_IS_DIRTY(dqp)) {
+			xfs_dqunlock(dqp);
+			continue;
+		}
+		xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
+		/* XXX a sentinel would be better */
+		recl = XFS_QI_MPLRECLAIMS(mp);
+		if (! xfs_qm_dqflock_nowait(dqp)) {
+			/*
+			 * If we can't grab the flush lock then check
+			 * to see if the dquot has been flushed delayed
+			 * write.  If so, grab its buffer and send it
+			 * out immediately.  We'll be able to acquire
+			 * the flush lock when the I/O completes.
+			 */
+			xfs_qm_dqflock_pushbuf_wait(dqp);
+		}
+		/*
+		 * Let go of the mplist lock. We don't want to hold it
+		 * across a disk write.
+		 */
+		xfs_qm_mplist_unlock(mp);
+		error = xfs_qm_dqflush(dqp, flags);
+		xfs_dqunlock(dqp);
+		if (error)
+			return (error);
+
+		xfs_qm_mplist_lock(mp);
+		if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+			xfs_qm_mplist_unlock(mp);
+			/* XXX restart limit */
+			goto again;
+		}
+	}
+
+	xfs_qm_mplist_unlock(mp);
+	/* return ! busy */
+	return (0);
+}
+/*
+ * Release the group dquot pointers the user dquots may be
+ * carrying around as a hint. mplist is locked on entry and exit.
+ */
+STATIC void
+xfs_qm_detach_gdquots(
+	xfs_mount_t	*mp)
+{
+	xfs_dquot_t	*dqp, *gdqp;
+	int		nrecl;
+
+ again:
+	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+	dqp = XFS_QI_MPLNEXT(mp);
+	while (dqp) {
+		xfs_dqlock(dqp);
+		if ((gdqp = dqp->q_gdquot)) {
+			xfs_dqlock(gdqp);
+			dqp->q_gdquot = NULL;
+		}
+		xfs_dqunlock(dqp);
+
+		if (gdqp) {
+			/*
+			 * Can't hold the mplist lock across a dqput.
+			 * XXXmust convert to marker based iterations here.
+			 */
+			nrecl = XFS_QI_MPLRECLAIMS(mp);
+			xfs_qm_mplist_unlock(mp);
+			xfs_qm_dqput(gdqp);
+
+			xfs_qm_mplist_lock(mp);
+			if (nrecl != XFS_QI_MPLRECLAIMS(mp))
+				goto again;
+		}
+		dqp = dqp->MPL_NEXT;
+	}
+}
+
+/*
+ * Go through all the incore dquots of this file system and take them
+ * off the mplist and hashlist, if the dquot type matches the dqtype
+ * parameter. This is used when turning off quota accounting for
+ * users and/or groups, as well as when the filesystem is unmounting.
+ */
+STATIC int
+xfs_qm_dqpurge_int(
+	xfs_mount_t	*mp,
+	uint		flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */
+{
+	xfs_dquot_t	*dqp;
+	uint		dqtype;
+	int		nrecl;
+	xfs_dquot_t	*nextdqp;
+	int		nmisses;
+
+	if (mp->m_quotainfo == NULL)
+		return (0);
+
+	dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
+	dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
+
+	xfs_qm_mplist_lock(mp);
+
+	/*
+	 * In the first pass through all incore dquots of this filesystem,
+	 * we release the group dquot pointers the user dquots may be
+	 * carrying around as a hint. We need to do this irrespective of
+	 * what's being turned off.
+	 */
+	xfs_qm_detach_gdquots(mp);
+
+      again:
+	nmisses = 0;
+	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+	/*
+	 * Try to get rid of all of the unwanted dquots. The idea is to
+	 * get them off mplist and hashlist, but leave them on freelist.
+	 */
+	dqp = XFS_QI_MPLNEXT(mp);
+	while (dqp) {
+		/*
+		 * It's OK to look at the type without taking dqlock here.
+		 * We're holding the mplist lock here, and that's needed for
+		 * a dqreclaim.
+		 */
+		if ((dqp->dq_flags & dqtype) == 0) {
+			dqp = dqp->MPL_NEXT;
+			continue;
+		}
+
+		if (! xfs_qm_dqhashlock_nowait(dqp)) {
+			nrecl = XFS_QI_MPLRECLAIMS(mp);
+			xfs_qm_mplist_unlock(mp);
+			XFS_DQ_HASH_LOCK(dqp->q_hash);
+			xfs_qm_mplist_lock(mp);
+
+			/*
+			 * XXXTheoretically, we can get into a very long
+			 * ping pong game here.
+			 * No one can be adding dquots to the mplist at
+			 * this point, but somebody might be taking things off.
+			 */
+			if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
+				XFS_DQ_HASH_UNLOCK(dqp->q_hash);
+				goto again;
+			}
+		}
+
+		/*
+		 * Take the dquot off the mplist and hashlist. It may remain on
+		 * freelist in INACTIVE state.
+		 */
+		nextdqp = dqp->MPL_NEXT;
+		nmisses += xfs_qm_dqpurge(dqp, flags);
+		dqp = nextdqp;
+	}
+	xfs_qm_mplist_unlock(mp);
+	return nmisses;
+}
+
+int
+xfs_qm_dqpurge_all(
+	xfs_mount_t	*mp,
+	uint		flags)
+{
+	int		ndquots;
+
+	/*
+	 * Purge the dquot cache.
+	 * None of the dquots should really be busy at this point.
+	 */
+	if (mp->m_quotainfo) {
+		while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
+			delay(ndquots * 10);
+		}
+	}
+	return 0;
+}
+
+STATIC int
+xfs_qm_dqattach_one(
+	xfs_inode_t	*ip,
+	xfs_dqid_t	id,
+	uint		type,
+	uint		doalloc,
+	uint		dolock,
+	xfs_dquot_t	*udqhint, /* hint */
+	xfs_dquot_t	**IO_idqpp)
+{
+	xfs_dquot_t	*dqp;
+	int		error;
+
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	error = 0;
+	/*
+	 * See if we already have it in the inode itself. IO_idqpp is
+	 * &i_udquot or &i_gdquot. This made the code look weird, but
+	 * made the logic a lot simpler.
+	 */
+	if ((dqp = *IO_idqpp)) {
+		if (dolock)
+			xfs_dqlock(dqp);
+		xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
+		goto done;
+	}
+
+	/*
+	 * udqhint is the i_udquot field in inode, and is non-NULL only
+	 * when the type arg is XFS_DQ_GROUP. Its purpose is to save a
+	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
+	 * the user dquot.
+	 */
+	ASSERT(!udqhint || type == XFS_DQ_GROUP);
+	if (udqhint && !dolock)
+		xfs_dqlock(udqhint);
+
+	/*
+	 * No need to take dqlock to look at the id.
+	 * The ID can't change until it gets reclaimed, and it won't
+	 * be reclaimed as long as we have a ref from inode and we hold
+	 * the ilock.
+	 */
+	if (udqhint &&
+	    (dqp = udqhint->q_gdquot) &&
+	    (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id)) {
+		ASSERT(XFS_DQ_IS_LOCKED(udqhint));
+		xfs_dqlock(dqp);
+		XFS_DQHOLD(dqp);
+		ASSERT(*IO_idqpp == NULL);
+		*IO_idqpp = dqp;
+		if (!dolock) {
+			xfs_dqunlock(dqp);
+			xfs_dqunlock(udqhint);
+		}
+		goto done;
+	}
+	/*
+	 * We can't hold a dquot lock when we call the dqget code.
+	 * We'll deadlock in no time, because of (not conforming to)
+	 * lock ordering - the inodelock comes before any dquot lock,
+	 * and we may drop and reacquire the ilock in xfs_qm_dqget().
+	 */
+	if (udqhint)
+		xfs_dqunlock(udqhint);
+	/*
+	 * Find the dquot from somewhere. This bumps the
+	 * reference count of dquot and returns it locked.
+	 * This can return ENOENT if dquot didn't exist on
+	 * disk and we didn't ask it to allocate;
+	 * ESRCH if quotas got turned off suddenly.
+	 */
+	if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
+				 doalloc|XFS_QMOPT_DOWARN, &dqp))) {
+		if (udqhint && dolock)
+			xfs_dqlock(udqhint);
+		goto done;
+	}
+
+	xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
+	/*
+	 * dqget may have dropped and re-acquired the ilock, but it guarantees
+	 * that the dquot returned is the one that should go in the inode.
+	 */
+	*IO_idqpp = dqp;
+	ASSERT(dqp);
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	if (! dolock) {
+		xfs_dqunlock(dqp);
+		goto done;
+	}
+	if (! udqhint)
+		goto done;
+
+	ASSERT(udqhint);
+	ASSERT(dolock);
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	if (! xfs_qm_dqlock_nowait(udqhint)) {
+		xfs_dqunlock(dqp);
+		xfs_dqlock(udqhint);
+		xfs_dqlock(dqp);
+	}
+      done:
+#ifdef QUOTADEBUG
+	if (udqhint) {
+		if (dolock)
+			ASSERT(XFS_DQ_IS_LOCKED(udqhint));
+	}
+	if (! error) {
+		if (dolock)
+			ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	}
+#endif
+	return (error);
+}
+
+
+/*
+ * Given a udquot and gdquot, attach a ptr to the group dquot in the
+ * udquot as a hint for future lookups. The idea sounds simple, but the
+ * execution isn't, because the udquot might have a group dquot attached
+ * already and getting rid of that gets us into lock ordering contraints.
+ * The process is complicated more by the fact that the dquots may or may not
+ * be locked on entry.
+ */
+STATIC void
+xfs_qm_dqattach_grouphint(
+	xfs_dquot_t	*udq,
+	xfs_dquot_t	*gdq,
+	uint		locked)
+{
+	xfs_dquot_t	*tmp;
+
+#ifdef QUOTADEBUG
+	if (locked) {
+		ASSERT(XFS_DQ_IS_LOCKED(udq));
+		ASSERT(XFS_DQ_IS_LOCKED(gdq));
+	}
+#endif
+	if (! locked)
+		xfs_dqlock(udq);
+
+	if ((tmp = udq->q_gdquot)) {
+		if (tmp == gdq) {
+			if (! locked)
+				xfs_dqunlock(udq);
+			return;
+		}
+
+		udq->q_gdquot = NULL;
+		/*
+		 * We can't keep any dqlocks when calling dqrele,
+		 * because the freelist lock comes before dqlocks.
+		 */
+		xfs_dqunlock(udq);
+		if (locked)
+			xfs_dqunlock(gdq);
+		/*
+		 * we took a hard reference once upon a time in dqget,
+		 * so give it back when the udquot no longer points at it
+		 * dqput() does the unlocking of the dquot.
+		 */
+		xfs_qm_dqrele(tmp);
+
+		xfs_dqlock(udq);
+		xfs_dqlock(gdq);
+
+	} else {
+		ASSERT(XFS_DQ_IS_LOCKED(udq));
+		if (! locked) {
+			xfs_dqlock(gdq);
+		}
+	}
+
+	ASSERT(XFS_DQ_IS_LOCKED(udq));
+	ASSERT(XFS_DQ_IS_LOCKED(gdq));
+	/*
+	 * Somebody could have attached a gdquot here,
+	 * when we dropped the uqlock. If so, just do nothing.
+	 */
+	if (udq->q_gdquot == NULL) {
+		XFS_DQHOLD(gdq);
+		udq->q_gdquot = gdq;
+	}
+	if (! locked) {
+		xfs_dqunlock(gdq);
+		xfs_dqunlock(udq);
+	}
+}
+
+
+/*
+ * Given a locked inode, attach dquot(s) to it, taking UQUOTAON / GQUOTAON
+ * in to account.
+ * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
+ * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
+ * much made this code a complete mess, but it has been pretty useful.
+ * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
+ * Inode may get unlocked and relocked in here, and the caller must deal with
+ * the consequences.
+ */
+int
+xfs_qm_dqattach(
+	xfs_inode_t	*ip,
+	uint		flags)
+{
+	xfs_mount_t	*mp = ip->i_mount;
+	uint		nquotas = 0;
+	int		error = 0;
+
+	if ((! XFS_IS_QUOTA_ON(mp)) ||
+	    (! XFS_NOT_DQATTACHED(mp, ip)) ||
+	    (ip->i_ino == mp->m_sb.sb_uquotino) ||
+	    (ip->i_ino == mp->m_sb.sb_gquotino))
+		return (0);
+
+	ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
+	       XFS_ISLOCKED_INODE_EXCL(ip));
+
+	if (! (flags & XFS_QMOPT_ILOCKED))
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	if (XFS_IS_UQUOTA_ON(mp)) {
+		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
+						flags & XFS_QMOPT_DQALLOC,
+						flags & XFS_QMOPT_DQLOCK,
+						NULL, &ip->i_udquot);
+		if (error)
+			goto done;
+		nquotas++;
+	}
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	if (XFS_IS_GQUOTA_ON(mp)) {
+		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
+						flags & XFS_QMOPT_DQALLOC,
+						flags & XFS_QMOPT_DQLOCK,
+						ip->i_udquot, &ip->i_gdquot);
+		/*
+		 * Don't worry about the udquot that we may have
+		 * attached above. It'll get detached, if not already.
+		 */
+		if (error)
+			goto done;
+		nquotas++;
+	}
+
+	/*
+	 * Attach this group quota to the user quota as a hint.
+	 * This WON'T, in general, result in a thrash.
+	 */
+	if (nquotas == 2) {
+		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+		ASSERT(ip->i_udquot);
+		ASSERT(ip->i_gdquot);
+
+		/*
+		 * We may or may not have the i_udquot locked at this point,
+		 * but this check is OK since we don't depend on the i_gdquot to
+		 * be accurate 100% all the time. It is just a hint, and this
+		 * will succeed in general.
+		 */
+		if (ip->i_udquot->q_gdquot == ip->i_gdquot)
+			goto done;
+		/*
+		 * Attach i_gdquot to the gdquot hint inside the i_udquot.
+		 */
+		xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
+					 flags & XFS_QMOPT_DQLOCK);
+	}
+
+      done:
+
+#ifdef QUOTADEBUG
+	if (! error) {
+		if (ip->i_udquot) {
+			if (flags & XFS_QMOPT_DQLOCK)
+				ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
+		}
+		if (ip->i_gdquot) {
+			if (flags & XFS_QMOPT_DQLOCK)
+				ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
+		}
+		if (XFS_IS_UQUOTA_ON(mp))
+			ASSERT(ip->i_udquot);
+		if (XFS_IS_GQUOTA_ON(mp))
+			ASSERT(ip->i_gdquot);
+	}
+#endif
+
+	if (! (flags & XFS_QMOPT_ILOCKED))
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+#ifdef QUOTADEBUG
+	else
+		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+#endif
+	return (error);
+}
+
+/*
+ * Release dquots (and their references) if any.
+ * The inode should be locked EXCL except when this's called by
+ * xfs_ireclaim.
+ */
+void
+xfs_qm_dqdetach(
+	xfs_inode_t	*ip)
+{
+	if (!(ip->i_udquot || ip->i_gdquot))
+		return;
+
+	ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
+	ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
+	if (ip->i_udquot)
+		xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
+	if (ip->i_udquot) {
+		xfs_qm_dqrele(ip->i_udquot);
+		ip->i_udquot = NULL;
+	}
+	if (ip->i_gdquot) {
+		xfs_qm_dqrele(ip->i_gdquot);
+		ip->i_gdquot = NULL;
+	}
+}
+
+/*
+ * This is called by VFS_SYNC and flags arg determines the caller,
+ * and its motives, as done in xfs_sync.
+ *
+ * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
+ * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
+ * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
+ */
+
+int
+xfs_qm_sync(
+	xfs_mount_t	*mp,
+	short		flags)
+{
+	int		recl, restarts;
+	xfs_dquot_t	*dqp;
+	uint		flush_flags;
+	boolean_t	nowait;
+	int		error;
+
+	restarts = 0;
+	/*
+	 * We won't block unless we are asked to.
+	 */
+	nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
+
+  again:
+	xfs_qm_mplist_lock(mp);
+	/*
+	 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
+	 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
+	 * when we have the mplist lock, we know that dquots will be consistent
+	 * as long as we have it locked.
+	 */
+	if (! XFS_IS_QUOTA_ON(mp)) {
+		xfs_qm_mplist_unlock(mp);
+		return (0);
+	}
+	FOREACH_DQUOT_IN_MP(dqp, mp) {
+		/*
+		 * If this is vfs_sync calling, then skip the dquots that
+		 * don't 'seem' to be dirty. ie. don't acquire dqlock.
+		 * This is very similar to what xfs_sync does with inodes.
+		 */
+		if (flags & SYNC_BDFLUSH) {
+			if (! XFS_DQ_IS_DIRTY(dqp))
+				continue;
+		}
+
+		if (nowait) {
+			/*
+			 * Try to acquire the dquot lock. We are NOT out of
+			 * lock order, but we just don't want to wait for this
+			 * lock, unless somebody wanted us to.
+			 */
+			if (! xfs_qm_dqlock_nowait(dqp))
+				continue;
+		} else {
+			xfs_dqlock(dqp);
+		}
+
+		/*
+		 * Now, find out for sure if this dquot is dirty or not.
+		 */
+		if (! XFS_DQ_IS_DIRTY(dqp)) {
+			xfs_dqunlock(dqp);
+			continue;
+		}
+
+		/* XXX a sentinel would be better */
+		recl = XFS_QI_MPLRECLAIMS(mp);
+		if (! xfs_qm_dqflock_nowait(dqp)) {
+			if (nowait) {
+				xfs_dqunlock(dqp);
+				continue;
+			}
+			/*
+			 * If we can't grab the flush lock then if the caller
+			 * really wanted us to give this our best shot,
+			 * see if we can give a push to the buffer before we wait
+			 * on the flush lock. At this point, we know that
+			 * eventhough the dquot is being flushed,
+			 * it has (new) dirty data.
+			 */
+			xfs_qm_dqflock_pushbuf_wait(dqp);
+		}
+		/*
+		 * Let go of the mplist lock. We don't want to hold it
+		 * across a disk write
+		 */
+		flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
+		xfs_qm_mplist_unlock(mp);
+		xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
+		error = xfs_qm_dqflush(dqp, flush_flags);
+		xfs_dqunlock(dqp);
+		if (error && XFS_FORCED_SHUTDOWN(mp))
+			return(0);	/* Need to prevent umount failure */
+		else if (error)
+			return (error);
+
+		xfs_qm_mplist_lock(mp);
+		if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+			if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
+				break;
+
+			xfs_qm_mplist_unlock(mp);
+			goto again;
+		}
+	}
+
+	xfs_qm_mplist_unlock(mp);
+	return (0);
+}
+
+
+/*
+ * This initializes all the quota information that's kept in the
+ * mount structure
+ */
+int
+xfs_qm_init_quotainfo(
+	xfs_mount_t	*mp)
+{
+	xfs_quotainfo_t *qinf;
+	int		error;
+	xfs_dquot_t	*dqp;
+
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+
+	/*
+	 * Tell XQM that we exist as soon as possible.
+	 */
+	if ((error = xfs_qm_hold_quotafs_ref(mp))) {
+		return (error);
+	}
+
+	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
+
+	/*
+	 * See if quotainodes are setup, and if not, allocate them,
+	 * and change the superblock accordingly.
+	 */
+	if ((error = xfs_qm_init_quotainos(mp))) {
+		kmem_free(qinf, sizeof(xfs_quotainfo_t));
+		mp->m_quotainfo = NULL;
+		return (error);
+	}
+
+	spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin");
+	xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
+	qinf->qi_dqreclaims = 0;
+
+	/* mutex used to serialize quotaoffs */
+	mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff");
+
+	/* Precalc some constants */
+	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
+	ASSERT(qinf->qi_dqchunklen);
+	qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
+	do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
+
+	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
+
+	/*
+	 * We try to get the limits from the superuser's limits fields.
+	 * This is quite hacky, but it is standard quota practice.
+	 * We look at the USR dquot with id == 0 first, but if user quotas
+	 * are not enabled we goto the GRP dquot with id == 0.
+	 * We don't really care to keep separate default limits for user
+	 * and group quotas, at least not at this point.
+	 */
+	error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
+			     (XFS_IS_UQUOTA_RUNNING(mp)) ?
+			     XFS_DQ_USER : XFS_DQ_GROUP,
+			     XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
+			     &dqp);
+	if (! error) {
+		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
+
+		/*
+		 * The warnings and timers set the grace period given to
+		 * a user or group before he or she can not perform any
+		 * more writing. If it is zero, a default is used.
+		 */
+		qinf->qi_btimelimit =
+				INT_GET(ddqp->d_btimer, ARCH_CONVERT) ?
+				INT_GET(ddqp->d_btimer, ARCH_CONVERT) :
+				XFS_QM_BTIMELIMIT;
+		qinf->qi_itimelimit =
+				INT_GET(ddqp->d_itimer, ARCH_CONVERT) ?
+				INT_GET(ddqp->d_itimer, ARCH_CONVERT) :
+				XFS_QM_ITIMELIMIT;
+		qinf->qi_rtbtimelimit =
+				INT_GET(ddqp->d_rtbtimer, ARCH_CONVERT) ?
+				INT_GET(ddqp->d_rtbtimer, ARCH_CONVERT) :
+				XFS_QM_RTBTIMELIMIT;
+		qinf->qi_bwarnlimit =
+				INT_GET(ddqp->d_bwarns, ARCH_CONVERT) ?
+				INT_GET(ddqp->d_bwarns, ARCH_CONVERT) :
+				XFS_QM_BWARNLIMIT;
+		qinf->qi_iwarnlimit =
+				INT_GET(ddqp->d_iwarns, ARCH_CONVERT) ?
+				INT_GET(ddqp->d_iwarns, ARCH_CONVERT) :
+				XFS_QM_IWARNLIMIT;
+		qinf->qi_bhardlimit =
+				INT_GET(ddqp->d_blk_hardlimit, ARCH_CONVERT);
+		qinf->qi_bsoftlimit =
+				INT_GET(ddqp->d_blk_softlimit, ARCH_CONVERT);
+		qinf->qi_ihardlimit =
+				INT_GET(ddqp->d_ino_hardlimit, ARCH_CONVERT);
+		qinf->qi_isoftlimit =
+				INT_GET(ddqp->d_ino_softlimit, ARCH_CONVERT);
+		qinf->qi_rtbhardlimit =
+				INT_GET(ddqp->d_rtb_hardlimit, ARCH_CONVERT);
+		qinf->qi_rtbsoftlimit =
+				INT_GET(ddqp->d_rtb_softlimit, ARCH_CONVERT);
+ 
+		/*
+		 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
+		 * we don't want this dquot cached. We haven't done a
+		 * quotacheck yet, and quotacheck doesn't like incore dquots.
+		 */
+		xfs_qm_dqdestroy(dqp);
+	} else {
+		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
+		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
+		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
+		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
+		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
+	}
+
+	return (0);
+}
+
+
+/*
+ * Gets called when unmounting a filesystem or when all quotas get
+ * turned off.
+ * This purges the quota inodes, destroys locks and frees itself.
+ */
+void
+xfs_qm_destroy_quotainfo(
+	xfs_mount_t	*mp)
+{
+	xfs_quotainfo_t *qi;
+
+	qi = mp->m_quotainfo;
+	ASSERT(qi != NULL);
+	ASSERT(xfs_Gqm != NULL);
+
+	/*
+	 * Release the reference that XQM kept, so that we know
+	 * when the XQM structure should be freed. We cannot assume
+	 * that xfs_Gqm is non-null after this point.
+	 */
+	xfs_qm_rele_quotafs_ref(mp);
+
+	spinlock_destroy(&qi->qi_pinlock);
+	xfs_qm_list_destroy(&qi->qi_dqlist);
+
+	if (qi->qi_uquotaip) {
+		XFS_PURGE_INODE(qi->qi_uquotaip);
+		qi->qi_uquotaip = NULL; /* paranoia */
+	}
+	if (qi->qi_gquotaip) {
+		XFS_PURGE_INODE(qi->qi_gquotaip);
+		qi->qi_gquotaip = NULL;
+	}
+	mutex_destroy(&qi->qi_quotaofflock);
+	kmem_free(qi, sizeof(xfs_quotainfo_t));
+	mp->m_quotainfo = NULL;
+}
+
+
+
+/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
+
+/* ARGSUSED */
+STATIC void
+xfs_qm_list_init(
+	xfs_dqlist_t	*list,
+	char		*str,
+	int		n)
+{
+	mutex_init(&list->qh_lock, MUTEX_DEFAULT, str);
+	list->qh_next = NULL;
+	list->qh_version = 0;
+	list->qh_nelems = 0;
+}
+
+STATIC void
+xfs_qm_list_destroy(
+	xfs_dqlist_t	*list)
+{
+	mutex_destroy(&(list->qh_lock));
+}
+
+
+/*
+ * Stripped down version of dqattach. This doesn't attach, or even look at the
+ * dquots attached to the inode. The rationale is that there won't be any
+ * attached at the time this is called from quotacheck.
+ */
+STATIC int
+xfs_qm_dqget_noattach(
+	xfs_inode_t	*ip,
+	xfs_dquot_t	**O_udqpp,
+	xfs_dquot_t	**O_gdqpp)
+{
+	int		error;
+	xfs_mount_t	*mp;
+	xfs_dquot_t	*udqp, *gdqp;
+
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	mp = ip->i_mount;
+	udqp = NULL;
+	gdqp = NULL;
+
+	if (XFS_IS_UQUOTA_ON(mp)) {
+		ASSERT(ip->i_udquot == NULL);
+		/*
+		 * We want the dquot allocated if it doesn't exist.
+		 */
+		if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
+					 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
+					 &udqp))) {
+			/*
+			 * Shouldn't be able to turn off quotas here.
+			 */
+			ASSERT(error != ESRCH);
+			ASSERT(error != ENOENT);
+			return (error);
+		}
+		ASSERT(udqp);
+	}
+
+	if (XFS_IS_GQUOTA_ON(mp)) {
+		ASSERT(ip->i_gdquot == NULL);
+		if (udqp)
+			xfs_dqunlock(udqp);
+		if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_gid, XFS_DQ_GROUP,
+					 XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
+					 &gdqp))) {
+			if (udqp)
+				xfs_qm_dqrele(udqp);
+			ASSERT(error != ESRCH);
+			ASSERT(error != ENOENT);
+			return (error);
+		}
+		ASSERT(gdqp);
+
+		/* Reacquire the locks in the right order */
+		if (udqp) {
+			if (! xfs_qm_dqlock_nowait(udqp)) {
+				xfs_dqunlock(gdqp);
+				xfs_dqlock(udqp);
+				xfs_dqlock(gdqp);
+			}
+		}
+	}
+
+	*O_udqpp = udqp;
+	*O_gdqpp = gdqp;
+
+#ifdef QUOTADEBUG
+	if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
+	if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
+#endif
+	return (0);
+}
+
+/*
+ * Create an inode and return with a reference already taken, but unlocked
+ * This is how we create quota inodes
+ */
+STATIC int
+xfs_qm_qino_alloc(
+	xfs_mount_t	*mp,
+	xfs_inode_t	**ip,
+	__int64_t	sbfields,
+	uint		flags)
+{
+	xfs_trans_t	*tp;
+	int		error;
+	unsigned long s;
+	cred_t		zerocr;
+	int		committed;
+
+	tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE);
+	if ((error = xfs_trans_reserve(tp,
+				      XFS_QM_QINOCREATE_SPACE_RES(mp),
+				      XFS_CREATE_LOG_RES(mp), 0,
+				      XFS_TRANS_PERM_LOG_RES,
+				      XFS_CREATE_LOG_COUNT))) {
+		xfs_trans_cancel(tp, 0);
+		return (error);
+	}
+	memset(&zerocr, 0, sizeof(zerocr));
+
+	if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0,
+				   &zerocr, 0, 1, ip, &committed))) {
+		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
+				 XFS_TRANS_ABORT);
+		return (error);
+	}
+
+	/*
+	 * Keep an extra reference to this quota inode. This inode is
+	 * locked exclusively and joined to the transaction already.
+	 */
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
+	VN_HOLD(XFS_ITOV((*ip)));
+
+	/*
+	 * Make the changes in the superblock, and log those too.
+	 * sbfields arg may contain fields other than *QUOTINO;
+	 * VERSIONNUM for example.
+	 */
+	s = XFS_SB_LOCK(mp);
+	if (flags & XFS_QMOPT_SBVERSION) {
+#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
+		unsigned oldv = mp->m_sb.sb_versionnum;
+#endif
+		ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb));
+		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
+				   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
+		       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
+			XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
+
+		XFS_SB_VERSION_ADDQUOTA(&mp->m_sb);
+		mp->m_sb.sb_uquotino = NULLFSINO;
+		mp->m_sb.sb_gquotino = NULLFSINO;
+
+		/* qflags will get updated _after_ quotacheck */
+		mp->m_sb.sb_qflags = 0;
+#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
+		cmn_err(CE_NOTE,
+			"Old superblock version %x, converting to %x.",
+			oldv, mp->m_sb.sb_versionnum);
+#endif
+	}
+	if (flags & XFS_QMOPT_UQUOTA)
+		mp->m_sb.sb_uquotino = (*ip)->i_ino;
+	else
+		mp->m_sb.sb_gquotino = (*ip)->i_ino;
+	XFS_SB_UNLOCK(mp, s);
+	xfs_mod_sb(tp, sbfields);
+
+	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,
+				     NULL))) {
+		xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
+		return (error);
+	}
+	return (0);
+}
+
+
+STATIC int
+xfs_qm_reset_dqcounts(
+	xfs_mount_t	*mp,
+	xfs_buf_t	*bp,
+	xfs_dqid_t	id,
+	uint		type)
+{
+	xfs_disk_dquot_t	*ddq;
+	int			j;
+
+	xfs_buftrace("RESET DQUOTS", bp);
+	/*
+	 * Reset all counters and timers. They'll be
+	 * started afresh by xfs_qm_quotacheck.
+	 */
+#ifdef DEBUG
+	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
+	do_div(j, sizeof(xfs_dqblk_t));
+	ASSERT(XFS_QM_DQPERBLK(mp) == j);
+#endif
+	ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
+	for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
+		/*
+		 * Do a sanity check, and if needed, repair the dqblk. Don't
+		 * output any warnings because it's perfectly possible to
+		 * find unitialized dquot blks. See comment in xfs_qm_dqcheck.
+		 */
+		(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+				      "xfs_quotacheck");
+		INT_SET(ddq->d_bcount, ARCH_CONVERT, 0ULL);
+		INT_SET(ddq->d_icount, ARCH_CONVERT, 0ULL);
+		INT_SET(ddq->d_rtbcount, ARCH_CONVERT, 0ULL);
+		INT_SET(ddq->d_btimer, ARCH_CONVERT, (time_t)0);
+		INT_SET(ddq->d_itimer, ARCH_CONVERT, (time_t)0);
+		INT_SET(ddq->d_bwarns, ARCH_CONVERT, 0UL);
+		INT_SET(ddq->d_iwarns, ARCH_CONVERT, 0UL);
+		ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
+	}
+
+	return (0);
+}
+
+STATIC int
+xfs_qm_dqiter_bufs(
+	xfs_mount_t	*mp,
+	xfs_dqid_t	firstid,
+	xfs_fsblock_t	bno,
+	xfs_filblks_t	blkcnt,
+	uint		flags)
+{
+	xfs_buf_t	*bp;
+	int		error;
+	int		notcommitted;
+	int		incr;
+
+	ASSERT(blkcnt > 0);
+	notcommitted = 0;
+	incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
+		XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
+	error = 0;
+
+	/*
+	 * Blkcnt arg can be a very big number, and might even be
+	 * larger than the log itself. So, we have to break it up into
+	 * manageable-sized transactions.
+	 * Note that we don't start a permanent transaction here; we might
+	 * not be able to get a log reservation for the whole thing up front,
+	 * and we don't really care to either, because we just discard
+	 * everything if we were to crash in the middle of this loop.
+	 */
+	while (blkcnt--) {
+		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+			      XFS_FSB_TO_DADDR(mp, bno),
+			      (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
+		if (error)
+			break;
+
+		(void) xfs_qm_reset_dqcounts(mp, bp, firstid,
+					     flags & XFS_QMOPT_UQUOTA ?
+					     XFS_DQ_USER : XFS_DQ_GROUP);
+		xfs_bdwrite(mp, bp);
+		/*
+		 * goto the next block.
+		 */
+		bno++;
+		firstid += XFS_QM_DQPERBLK(mp);
+	}
+	return (error);
+}
+
+/*
+ * Iterate over all allocated USR/GRP dquots in the system, calling a
+ * caller supplied function for every chunk of dquots that we find.
+ */
+STATIC int
+xfs_qm_dqiterate(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*qip,
+	uint		flags)
+{
+	xfs_bmbt_irec_t		*map;
+	int			i, nmaps;	/* number of map entries */
+	int			error;		/* return value */
+	xfs_fileoff_t		lblkno;
+	xfs_filblks_t		maxlblkcnt;
+	xfs_dqid_t		firstid;
+	xfs_fsblock_t		rablkno;
+	xfs_filblks_t		rablkcnt;
+
+	error = 0;
+	/*
+	 * This looks racey, but we can't keep an inode lock across a
+	 * trans_reserve. But, this gets called during quotacheck, and that
+	 * happens only at mount time which is single threaded.
+	 */
+	if (qip->i_d.di_nblocks == 0)
+		return (0);
+
+	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
+
+	lblkno = 0;
+	maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+	do {
+		nmaps = XFS_DQITER_MAP_SIZE;
+		/*
+		 * We aren't changing the inode itself. Just changing
+		 * some of its data. No new blocks are added here, and
+		 * the inode is never added to the transaction.
+		 */
+		xfs_ilock(qip, XFS_ILOCK_SHARED);
+		error = xfs_bmapi(NULL, qip, lblkno,
+				  maxlblkcnt - lblkno,
+				  XFS_BMAPI_METADATA,
+				  NULL,
+				  0, map, &nmaps, NULL);
+		xfs_iunlock(qip, XFS_ILOCK_SHARED);
+		if (error)
+			break;
+
+		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
+		for (i = 0; i < nmaps; i++) {
+			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
+			ASSERT(map[i].br_blockcount);
+
+
+			lblkno += map[i].br_blockcount;
+
+			if (map[i].br_startblock == HOLESTARTBLOCK)
+				continue;
+
+			firstid = (xfs_dqid_t) map[i].br_startoff *
+				XFS_QM_DQPERBLK(mp);
+			/*
+			 * Do a read-ahead on the next extent.
+			 */
+			if ((i+1 < nmaps) &&
+			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
+				rablkcnt =  map[i+1].br_blockcount;
+				rablkno = map[i+1].br_startblock;
+				while (rablkcnt--) {
+					xfs_baread(mp->m_ddev_targp,
+					       XFS_FSB_TO_DADDR(mp, rablkno),
+					       (int)XFS_QI_DQCHUNKLEN(mp));
+					rablkno++;
+				}
+			}
+			/*
+			 * Iterate thru all the blks in the extent and
+			 * reset the counters of all the dquots inside them.
+			 */
+			if ((error = xfs_qm_dqiter_bufs(mp,
+						       firstid,
+						       map[i].br_startblock,
+						       map[i].br_blockcount,
+						       flags))) {
+				break;
+			}
+		}
+
+		if (error)
+			break;
+	} while (nmaps > 0);
+
+	kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
+
+	return (error);
+}
+
+/*
+ * Called by dqusage_adjust in doing a quotacheck.
+ * Given the inode, and a dquot (either USR or GRP, doesn't matter),
+ * this updates its incore copy as well as the buffer copy. This is
+ * so that once the quotacheck is done, we can just log all the buffers,
+ * as opposed to logging numerous updates to individual dquots.
+ */
+STATIC void
+xfs_qm_quotacheck_dqadjust(
+	xfs_dquot_t		*dqp,
+	xfs_qcnt_t		nblks,
+	xfs_qcnt_t		rtblks)
+{
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
+	/*
+	 * Adjust the inode count and the block count to reflect this inode's
+	 * resource usage.
+	 */
+	INT_MOD(dqp->q_core.d_icount, ARCH_CONVERT, +1);
+	dqp->q_res_icount++;
+	if (nblks) {
+		INT_MOD(dqp->q_core.d_bcount, ARCH_CONVERT, nblks);
+		dqp->q_res_bcount += nblks;
+	}
+	if (rtblks) {
+		INT_MOD(dqp->q_core.d_rtbcount, ARCH_CONVERT, rtblks);
+		dqp->q_res_rtbcount += rtblks;
+	}
+
+	/*
+	 * Set default limits, adjust timers (since we changed usages)
+	 */
+	if (! XFS_IS_SUSER_DQUOT(dqp)) {
+		xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
+		xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
+	}
+
+	dqp->dq_flags |= XFS_DQ_DIRTY;
+}
+
+STATIC int
+xfs_qm_get_rtblks(
+	xfs_inode_t	*ip,
+	xfs_qcnt_t	*O_rtblks)
+{
+	xfs_filblks_t	rtblks;			/* total rt blks */
+	xfs_ifork_t	*ifp;			/* inode fork pointer */
+	xfs_extnum_t	nextents;		/* number of extent entries */
+	xfs_bmbt_rec_t	*base;			/* base of extent array */
+	xfs_bmbt_rec_t	*ep;			/* pointer to an extent entry */
+	int		error;
+
+	ASSERT(XFS_IS_REALTIME_INODE(ip));
+	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
+			return (error);
+	}
+	rtblks = 0;
+	nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+	base = &ifp->if_u1.if_extents[0];
+	for (ep = base; ep < &base[nextents]; ep++)
+		rtblks += xfs_bmbt_get_blockcount(ep);
+	*O_rtblks = (xfs_qcnt_t)rtblks;
+	return (0);
+}
+
+/*
+ * callback routine supplied to bulkstat(). Given an inumber, find its
+ * dquots and update them to account for resources taken by that inode.
+ */
+/* ARGSUSED */
+STATIC int
+xfs_qm_dqusage_adjust(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	ino,		/* inode number to get data for */
+	void		__user *buffer,	/* not used */
+	int		ubsize,		/* not used */
+	void		*private_data,	/* not used */
+	xfs_daddr_t	bno,		/* starting block of inode cluster */
+	int		*ubused,	/* not used */
+	void		*dip,		/* on-disk inode pointer (not used) */
+	int		*res)		/* result code value */
+{
+	xfs_inode_t	*ip;
+	xfs_dquot_t	*udqp, *gdqp;
+	xfs_qcnt_t	nblks, rtblks;
+	int		error;
+
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+
+	/*
+	 * rootino must have its resources accounted for, not so with the quota
+	 * inodes.
+	 */
+	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
+		*res = BULKSTAT_RV_NOTHING;
+		return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
+	 * interface expects the inode to be exclusively locked because that's
+	 * the case in all other instances. It's OK that we do this because
+	 * quotacheck is done only at mount time.
+	 */
+	if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
+		*res = BULKSTAT_RV_NOTHING;
+		return (error);
+	}
+
+	if (ip->i_d.di_mode == 0) {
+		xfs_iput_new(ip, XFS_ILOCK_EXCL);
+		*res = BULKSTAT_RV_NOTHING;
+		return XFS_ERROR(ENOENT);
+	}
+
+	/*
+	 * Obtain the locked dquots. In case of an error (eg. allocation
+	 * fails for ENOSPC), we return the negative of the error number
+	 * to bulkstat, so that it can get propagated to quotacheck() and
+	 * making us disable quotas for the file system.
+	 */
+	if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
+		xfs_iput(ip, XFS_ILOCK_EXCL);
+		*res = BULKSTAT_RV_GIVEUP;
+		return (error);
+	}
+
+	rtblks = 0;
+	if (! XFS_IS_REALTIME_INODE(ip)) {
+		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
+	} else {
+		/*
+		 * Walk thru the extent list and count the realtime blocks.
+		 */
+		if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
+			xfs_iput(ip, XFS_ILOCK_EXCL);
+			if (udqp)
+				xfs_qm_dqput(udqp);
+			if (gdqp)
+				xfs_qm_dqput(gdqp);
+			*res = BULKSTAT_RV_GIVEUP;
+			return (error);
+		}
+		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
+	}
+	ASSERT(ip->i_delayed_blks == 0);
+
+	/*
+	 * We can't release the inode while holding its dquot locks.
+	 * The inode can go into inactive and might try to acquire the dquotlocks.
+	 * So, just unlock here and do a vn_rele at the end.
+	 */
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+	/*
+	 * Add the (disk blocks and inode) resources occupied by this
+	 * inode to its dquots. We do this adjustment in the incore dquot,
+	 * and also copy the changes to its buffer.
+	 * We don't care about putting these changes in a transaction
+	 * envelope because if we crash in the middle of a 'quotacheck'
+	 * we have to start from the beginning anyway.
+	 * Once we're done, we'll log all the dquot bufs.
+	 *
+	 * The *QUOTA_ON checks below may look pretty racey, but quotachecks
+	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
+	 */
+	if (XFS_IS_UQUOTA_ON(mp)) {
+		ASSERT(udqp);
+		xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
+		xfs_qm_dqput(udqp);
+	}
+	if (XFS_IS_GQUOTA_ON(mp)) {
+		ASSERT(gdqp);
+		xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
+		xfs_qm_dqput(gdqp);
+	}
+	/*
+	 * Now release the inode. This will send it to 'inactive', and
+	 * possibly even free blocks.
+	 */
+	VN_RELE(XFS_ITOV(ip));
+
+	/*
+	 * Goto next inode.
+	 */
+	*res = BULKSTAT_RV_DIDONE;
+	return (0);
+}
+
+/*
+ * Walk thru all the filesystem inodes and construct a consistent view
+ * of the disk quota world. If the quotacheck fails, disable quotas.
+ */
+int
+xfs_qm_quotacheck(
+	xfs_mount_t	*mp)
+{
+	int		done, count, error;
+	xfs_ino_t	lastino;
+	size_t		structsz;
+	xfs_inode_t	*uip, *gip;
+	uint		flags;
+
+	count = INT_MAX;
+	structsz = 1;
+	lastino = 0;
+	flags = 0;
+
+	ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+
+	/*
+	 * There should be no cached dquots. The (simplistic) quotacheck
+	 * algorithm doesn't like that.
+	 */
+	ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
+
+	cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
+
+	/*
+	 * First we go thru all the dquots on disk, USR and GRP, and reset
+	 * their counters to zero. We need a clean slate.
+	 * We don't log our changes till later.
+	 */
+	if ((uip = XFS_QI_UQIP(mp))) {
+		if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
+			goto error_return;
+		flags |= XFS_UQUOTA_CHKD;
+	}
+
+	if ((gip = XFS_QI_GQIP(mp))) {
+		if ((error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA)))
+			goto error_return;
+		flags |= XFS_GQUOTA_CHKD;
+	}
+
+	do {
+		/*
+		 * Iterate thru all the inodes in the file system,
+		 * adjusting the corresponding dquot counters in core.
+		 */
+		if ((error = xfs_bulkstat(mp, &lastino, &count,
+				     xfs_qm_dqusage_adjust, NULL,
+				     structsz, NULL,
+				     BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED,
+				     &done)))
+			break;
+
+	} while (! done);
+
+	/*
+	 * We can get this error if we couldn't do a dquot allocation inside
+	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
+	 * dirty dquots that might be cached, we just want to get rid of them
+	 * and turn quotaoff. The dquots won't be attached to any of the inodes
+	 * at this point (because we intentionally didn't in dqget_noattach).
+	 */
+	if (error) {
+		xfs_qm_dqpurge_all(mp,
+				   XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
+				   XFS_QMOPT_QUOTAOFF);
+		goto error_return;
+	}
+	/*
+	 * We've made all the changes that we need to make incore.
+	 * Now flush_them down to disk buffers.
+	 */
+	xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
+
+	/*
+	 * We didn't log anything, because if we crashed, we'll have to
+	 * start the quotacheck from scratch anyway. However, we must make
+	 * sure that our dquot changes are secure before we put the
+	 * quotacheck'd stamp on the superblock. So, here we do a synchronous
+	 * flush.
+	 */
+	XFS_bflush(mp->m_ddev_targp);
+
+	/*
+	 * If one type of quotas is off, then it will lose its
+	 * quotachecked status, since we won't be doing accounting for
+	 * that type anymore.
+	 */
+	mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD);
+	mp->m_qflags |= flags;
+
+	XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
+
+ error_return:
+	if (error) {
+		cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
+			"Disabling quotas.",
+			mp->m_fsname, error);
+		/*
+		 * We must turn off quotas.
+		 */
+		ASSERT(mp->m_quotainfo != NULL);
+		ASSERT(xfs_Gqm != NULL);
+		xfs_qm_destroy_quotainfo(mp);
+		xfs_mount_reset_sbqflags(mp);
+	} else {
+		cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
+	}
+	return (error);
+}
+
+/*
+ * This is called after the superblock has been read in and we're ready to
+ * iget the quota inodes.
+ */
+STATIC int
+xfs_qm_init_quotainos(
+	xfs_mount_t	*mp)
+{
+	xfs_inode_t	*uip, *gip;
+	int		error;
+	__int64_t	sbflags;
+	uint		flags;
+
+	ASSERT(mp->m_quotainfo);
+	uip = gip = NULL;
+	sbflags = 0;
+	flags = 0;
+
+	/*
+	 * Get the uquota and gquota inodes
+	 */
+	if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
+		if (XFS_IS_UQUOTA_ON(mp) &&
+		    mp->m_sb.sb_uquotino != NULLFSINO) {
+			ASSERT(mp->m_sb.sb_uquotino > 0);
+			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
+					     0, 0, &uip, 0)))
+				return XFS_ERROR(error);
+		}
+		if (XFS_IS_GQUOTA_ON(mp) &&
+		    mp->m_sb.sb_gquotino != NULLFSINO) {
+			ASSERT(mp->m_sb.sb_gquotino > 0);
+			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
+					     0, 0, &gip, 0))) {
+				if (uip)
+					VN_RELE(XFS_ITOV(uip));
+				return XFS_ERROR(error);
+			}
+		}
+	} else {
+		flags |= XFS_QMOPT_SBVERSION;
+		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
+			    XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
+	}
+
+	/*
+	 * Create the two inodes, if they don't exist already. The changes
+	 * made above will get added to a transaction and logged in one of
+	 * the qino_alloc calls below.  If the device is readonly,
+	 * temporarily switch to read-write to do this.
+	 */
+	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
+		if ((error = xfs_qm_qino_alloc(mp, &uip,
+					      sbflags | XFS_SB_UQUOTINO,
+					      flags | XFS_QMOPT_UQUOTA)))
+			return XFS_ERROR(error);
+
+		flags &= ~XFS_QMOPT_SBVERSION;
+	}
+	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
+		if ((error = xfs_qm_qino_alloc(mp, &gip,
+					      sbflags | XFS_SB_GQUOTINO,
+					      flags | XFS_QMOPT_GQUOTA))) {
+			if (uip)
+				VN_RELE(XFS_ITOV(uip));
+
+			return XFS_ERROR(error);
+		}
+	}
+
+	XFS_QI_UQIP(mp) = uip;
+	XFS_QI_GQIP(mp) = gip;
+
+	return (0);
+}
+
+
+/*
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
+ * favor the lookup function ...
+ * XXXsup merge this with qm_reclaim_one().
+ */
+STATIC int
+xfs_qm_shake_freelist(
+	int howmany)
+{
+	int		nreclaimed;
+	xfs_dqhash_t	*hash;
+	xfs_dquot_t	*dqp, *nextdqp;
+	int		restarts;
+	int		nflushes;
+
+	if (howmany <= 0)
+		return (0);
+
+	nreclaimed = 0;
+	restarts = 0;
+	nflushes = 0;
+
+#ifdef QUOTADEBUG
+	cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
+#endif
+	/* lock order is : hashchainlock, freelistlock, mplistlock */
+ tryagain:
+	xfs_qm_freelist_lock(xfs_Gqm);
+
+	for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
+	     ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
+	      nreclaimed < howmany); ) {
+		xfs_dqlock(dqp);
+
+		/*
+		 * We are racing with dqlookup here. Naturally we don't
+		 * want to reclaim a dquot that lookup wants.
+		 */
+		if (dqp->dq_flags & XFS_DQ_WANT) {
+			xfs_dqunlock(dqp);
+			xfs_qm_freelist_unlock(xfs_Gqm);
+			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+				return (nreclaimed);
+			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
+			goto tryagain;
+		}
+
+		/*
+		 * If the dquot is inactive, we are assured that it is
+		 * not on the mplist or the hashlist, and that makes our
+		 * life easier.
+		 */
+		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
+			ASSERT(dqp->q_mount == NULL);
+			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
+			ASSERT(dqp->HL_PREVP == NULL);
+			ASSERT(dqp->MPL_PREVP == NULL);
+			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
+			nextdqp = dqp->dq_flnext;
+			goto off_freelist;
+		}
+
+		ASSERT(dqp->MPL_PREVP);
+		/*
+		 * Try to grab the flush lock. If this dquot is in the process of
+		 * getting flushed to disk, we don't want to reclaim it.
+		 */
+		if (! xfs_qm_dqflock_nowait(dqp)) {
+			xfs_dqunlock(dqp);
+			dqp = dqp->dq_flnext;
+			continue;
+		}
+
+		/*
+		 * We have the flush lock so we know that this is not in the
+		 * process of being flushed. So, if this is dirty, flush it
+		 * DELWRI so that we don't get a freelist infested with
+		 * dirty dquots.
+		 */
+		if (XFS_DQ_IS_DIRTY(dqp)) {
+			xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
+			/*
+			 * We flush it delayed write, so don't bother
+			 * releasing the mplock.
+			 */
+			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
+			dqp = dqp->dq_flnext;
+			continue;
+		}
+		/*
+		 * We're trying to get the hashlock out of order. This races
+		 * with dqlookup; so, we giveup and goto the next dquot if
+		 * we couldn't get the hashlock. This way, we won't starve
+		 * a dqlookup process that holds the hashlock that is
+		 * waiting for the freelist lock.
+		 */
+		if (! xfs_qm_dqhashlock_nowait(dqp)) {
+			xfs_dqfunlock(dqp);
+			xfs_dqunlock(dqp);
+			dqp = dqp->dq_flnext;
+			continue;
+		}
+		/*
+		 * This races with dquot allocation code as well as dqflush_all
+		 * and reclaim code. So, if we failed to grab the mplist lock,
+		 * giveup everything and start over.
+		 */
+		hash = dqp->q_hash;
+		ASSERT(hash);
+		if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
+			/* XXX put a sentinel so that we can come back here */
+			xfs_dqfunlock(dqp);
+			xfs_dqunlock(dqp);
+			XFS_DQ_HASH_UNLOCK(hash);
+			xfs_qm_freelist_unlock(xfs_Gqm);
+			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+				return (nreclaimed);
+			goto tryagain;
+		}
+		xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
+#ifdef QUOTADEBUG
+		cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
+			dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
+#endif
+		ASSERT(dqp->q_nrefs == 0);
+		nextdqp = dqp->dq_flnext;
+		XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
+		XQM_HASHLIST_REMOVE(hash, dqp);
+		xfs_dqfunlock(dqp);
+		xfs_qm_mplist_unlock(dqp->q_mount);
+		XFS_DQ_HASH_UNLOCK(hash);
+
+ off_freelist:
+		XQM_FREELIST_REMOVE(dqp);
+		xfs_dqunlock(dqp);
+		nreclaimed++;
+		XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
+		xfs_qm_dqdestroy(dqp);
+		dqp = nextdqp;
+	}
+	xfs_qm_freelist_unlock(xfs_Gqm);
+	return (nreclaimed);
+}
+
+
+/*
+ * The kmem_shake interface is invoked when memory is running low.
+ */
+/* ARGSUSED */
+STATIC int
+xfs_qm_shake(int nr_to_scan, unsigned int gfp_mask)
+{
+	int	ndqused, nfree, n;
+
+	if (!kmem_shake_allow(gfp_mask))
+		return (0);
+	if (!xfs_Gqm)
+		return (0);
+
+	nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
+	/* incore dquots in all f/s's */
+	ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
+
+	ASSERT(ndqused >= 0);
+
+	if (nfree <= ndqused && nfree < ndquot)
+		return (0);
+
+	ndqused *= xfs_Gqm->qm_dqfree_ratio;	/* target # of free dquots */
+	n = nfree - ndqused - ndquot;		/* # over target */
+
+	return xfs_qm_shake_freelist(MAX(nfree, n));
+}
+
+
+/*
+ * Just pop the least recently used dquot off the freelist and
+ * recycle it. The returned dquot is locked.
+ */
+STATIC xfs_dquot_t *
+xfs_qm_dqreclaim_one(void)
+{
+	xfs_dquot_t	*dqpout;
+	xfs_dquot_t	*dqp;
+	int		restarts;
+	int		nflushes;
+
+	restarts = 0;
+	dqpout = NULL;
+	nflushes = 0;
+
+	/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
+ startagain:
+	xfs_qm_freelist_lock(xfs_Gqm);
+
+	FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
+		xfs_dqlock(dqp);
+
+		/*
+		 * We are racing with dqlookup here. Naturally we don't
+		 * want to reclaim a dquot that lookup wants. We release the
+		 * freelist lock and start over, so that lookup will grab
+		 * both the dquot and the freelistlock.
+		 */
+		if (dqp->dq_flags & XFS_DQ_WANT) {
+			ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
+			xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
+			xfs_dqunlock(dqp);
+			xfs_qm_freelist_unlock(xfs_Gqm);
+			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+				return (NULL);
+			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
+			goto startagain;
+		}
+
+		/*
+		 * If the dquot is inactive, we are assured that it is
+		 * not on the mplist or the hashlist, and that makes our
+		 * life easier.
+		 */
+		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
+			ASSERT(dqp->q_mount == NULL);
+			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
+			ASSERT(dqp->HL_PREVP == NULL);
+			ASSERT(dqp->MPL_PREVP == NULL);
+			XQM_FREELIST_REMOVE(dqp);
+			xfs_dqunlock(dqp);
+			dqpout = dqp;
+			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
+			break;
+		}
+
+		ASSERT(dqp->q_hash);
+		ASSERT(dqp->MPL_PREVP);
+
+		/*
+		 * Try to grab the flush lock. If this dquot is in the process of
+		 * getting flushed to disk, we don't want to reclaim it.
+		 */
+		if (! xfs_qm_dqflock_nowait(dqp)) {
+			xfs_dqunlock(dqp);
+			continue;
+		}
+
+		/*
+		 * We have the flush lock so we know that this is not in the
+		 * process of being flushed. So, if this is dirty, flush it
+		 * DELWRI so that we don't get a freelist infested with
+		 * dirty dquots.
+		 */
+		if (XFS_DQ_IS_DIRTY(dqp)) {
+			xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
+			/*
+			 * We flush it delayed write, so don't bother
+			 * releasing the freelist lock.
+			 */
+			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
+			continue;
+		}
+
+		if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
+			xfs_dqfunlock(dqp);
+			xfs_dqunlock(dqp);
+			continue;
+		}
+
+		if (! xfs_qm_dqhashlock_nowait(dqp))
+			goto mplistunlock;
+
+		ASSERT(dqp->q_nrefs == 0);
+		xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
+		XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
+		XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
+		XQM_FREELIST_REMOVE(dqp);
+		dqpout = dqp;
+		XFS_DQ_HASH_UNLOCK(dqp->q_hash);
+ mplistunlock:
+		xfs_qm_mplist_unlock(dqp->q_mount);
+		xfs_dqfunlock(dqp);
+		xfs_dqunlock(dqp);
+		if (dqpout)
+			break;
+	}
+
+	xfs_qm_freelist_unlock(xfs_Gqm);
+	return (dqpout);
+}
+
+
+/*------------------------------------------------------------------*/
+
+/*
+ * Return a new incore dquot. Depending on the number of
+ * dquots in the system, we either allocate a new one on the kernel heap,
+ * or reclaim a free one.
+ * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
+ * to reclaim an existing one from the freelist.
+ */
+boolean_t
+xfs_qm_dqalloc_incore(
+	xfs_dquot_t **O_dqpp)
+{
+	xfs_dquot_t	*dqp;
+
+	/*
+	 * Check against high water mark to see if we want to pop
+	 * a nincompoop dquot off the freelist.
+	 */
+	if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
+		/*
+		 * Try to recycle a dquot from the freelist.
+		 */
+		if ((dqp = xfs_qm_dqreclaim_one())) {
+			XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
+			/*
+			 * Just zero the core here. The rest will get
+			 * reinitialized by caller. XXX we shouldn't even
+			 * do this zero ...
+			 */
+			memset(&dqp->q_core, 0, sizeof(dqp->q_core));
+			*O_dqpp = dqp;
+			return (B_FALSE);
+		}
+		XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
+	}
+
+	/*
+	 * Allocate a brand new dquot on the kernel heap and return it
+	 * to the caller to initialize.
+	 */
+	ASSERT(xfs_Gqm->qm_dqzone != NULL);
+	*O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
+	atomic_inc(&xfs_Gqm->qm_totaldquots);
+
+	return (B_TRUE);
+}
+
+
+/*
+ * Start a transaction and write the incore superblock changes to
+ * disk. flags parameter indicates which fields have changed.
+ */
+int
+xfs_qm_write_sb_changes(
+	xfs_mount_t	*mp,
+	__int64_t	flags)
+{
+	xfs_trans_t	*tp;
+	int		error;
+
+#ifdef QUOTADEBUG
+	cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
+#endif
+	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
+	if ((error = xfs_trans_reserve(tp, 0,
+				      mp->m_sb.sb_sectsize + 128, 0,
+				      0,
+				      XFS_DEFAULT_LOG_COUNT))) {
+		xfs_trans_cancel(tp, 0);
+		return (error);
+	}
+
+	xfs_mod_sb(tp, flags);
+	(void) xfs_trans_commit(tp, 0, NULL);
+
+	return (0);
+}
+
+
+/* --------------- utility functions for vnodeops ---------------- */
+
+
+/*
+ * Given an inode, a uid and gid (from cred_t) make sure that we have
+ * allocated relevant dquot(s) on disk, and that we won't exceed inode
+ * quotas by creating this file.
+ * This also attaches dquot(s) to the given inode after locking it,
+ * and returns the dquots corresponding to the uid and/or gid.
+ *
+ * in	: inode (unlocked)
+ * out	: udquot, gdquot with references taken and unlocked
+ */
+int
+xfs_qm_vop_dqalloc(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip,
+	uid_t		uid,
+	gid_t		gid,
+	uint		flags,
+	xfs_dquot_t	**O_udqpp,
+	xfs_dquot_t	**O_gdqpp)
+{
+	int		error;
+	xfs_dquot_t	*uq, *gq;
+	uint		lockflags;
+
+	if (!XFS_IS_QUOTA_ON(mp))
+		return 0;
+
+	lockflags = XFS_ILOCK_EXCL;
+	xfs_ilock(ip, lockflags);
+
+	if ((flags & XFS_QMOPT_INHERIT) &&
+	    XFS_INHERIT_GID(ip, XFS_MTOVFS(mp)))
+		gid = ip->i_d.di_gid;
+
+	/*
+	 * Attach the dquot(s) to this inode, doing a dquot allocation
+	 * if necessary. The dquot(s) will not be locked.
+	 */
+	if (XFS_NOT_DQATTACHED(mp, ip)) {
+		if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
+					    XFS_QMOPT_ILOCKED))) {
+			xfs_iunlock(ip, lockflags);
+			return (error);
+		}
+	}
+
+	uq = gq = NULL;
+	if ((flags & XFS_QMOPT_UQUOTA) &&
+	    XFS_IS_UQUOTA_ON(mp)) {
+		if (ip->i_d.di_uid != uid) {
+			/*
+			 * What we need is the dquot that has this uid, and
+			 * if we send the inode to dqget, the uid of the inode
+			 * takes priority over what's sent in the uid argument.
+			 * We must unlock inode here before calling dqget if
+			 * we're not sending the inode, because otherwise
+			 * we'll deadlock by doing trans_reserve while
+			 * holding ilock.
+			 */
+			xfs_iunlock(ip, lockflags);
+			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
+						 XFS_DQ_USER,
+						 XFS_QMOPT_DQALLOC |
+						 XFS_QMOPT_DOWARN,
+						 &uq))) {
+				ASSERT(error != ENOENT);
+				return (error);
+			}
+			/*
+			 * Get the ilock in the right order.
+			 */
+			xfs_dqunlock(uq);
+			lockflags = XFS_ILOCK_SHARED;
+			xfs_ilock(ip, lockflags);
+		} else {
+			/*
+			 * Take an extra reference, because we'll return
+			 * this to caller
+			 */
+			ASSERT(ip->i_udquot);
+			uq = ip->i_udquot;
+			xfs_dqlock(uq);
+			XFS_DQHOLD(uq);
+			xfs_dqunlock(uq);
+		}
+	}
+	if ((flags & XFS_QMOPT_GQUOTA) &&
+	    XFS_IS_GQUOTA_ON(mp)) {
+		if (ip->i_d.di_gid != gid) {
+			xfs_iunlock(ip, lockflags);
+			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
+						 XFS_DQ_GROUP,
+						 XFS_QMOPT_DQALLOC |
+						 XFS_QMOPT_DOWARN,
+						 &gq))) {
+				if (uq)
+					xfs_qm_dqrele(uq);
+				ASSERT(error != ENOENT);
+				return (error);
+			}
+			xfs_dqunlock(gq);
+			lockflags = XFS_ILOCK_SHARED;
+			xfs_ilock(ip, lockflags);
+		} else {
+			ASSERT(ip->i_gdquot);
+			gq = ip->i_gdquot;
+			xfs_dqlock(gq);
+			XFS_DQHOLD(gq);
+			xfs_dqunlock(gq);
+		}
+	}
+	if (uq)
+		xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
+
+	xfs_iunlock(ip, lockflags);
+	if (O_udqpp)
+		*O_udqpp = uq;
+	else if (uq)
+		xfs_qm_dqrele(uq);
+	if (O_gdqpp)
+		*O_gdqpp = gq;
+	else if (gq)
+		xfs_qm_dqrele(gq);
+	return (0);
+}
+
+/*
+ * Actually transfer ownership, and do dquot modifications.
+ * These were already reserved.
+ */
+xfs_dquot_t *
+xfs_qm_vop_chown(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_dquot_t	**IO_olddq,
+	xfs_dquot_t	*newdq)
+{
+	xfs_dquot_t	*prevdq;
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
+
+	/* old dquot */
+	prevdq = *IO_olddq;
+	ASSERT(prevdq);
+	ASSERT(prevdq != newdq);
+
+	xfs_trans_mod_dquot(tp, prevdq,
+			    XFS_TRANS_DQ_BCOUNT,
+			    -(ip->i_d.di_nblocks));
+	xfs_trans_mod_dquot(tp, prevdq,
+			    XFS_TRANS_DQ_ICOUNT,
+			    -1);
+
+	/* the sparkling new dquot */
+	xfs_trans_mod_dquot(tp, newdq,
+			    XFS_TRANS_DQ_BCOUNT,
+			    ip->i_d.di_nblocks);
+	xfs_trans_mod_dquot(tp, newdq,
+			    XFS_TRANS_DQ_ICOUNT,
+			    1);
+
+	/*
+	 * Take an extra reference, because the inode
+	 * is going to keep this dquot pointer even
+	 * after the trans_commit.
+	 */
+	xfs_dqlock(newdq);
+	XFS_DQHOLD(newdq);
+	xfs_dqunlock(newdq);
+	*IO_olddq = newdq;
+
+	return (prevdq);
+}
+
+/*
+ * Quota reservations for setattr(AT_UID|AT_GID).
+ */
+int
+xfs_qm_vop_chown_reserve(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_dquot_t	*udqp,
+	xfs_dquot_t	*gdqp,
+	uint		flags)
+{
+	int		error;
+	xfs_mount_t	*mp;
+	uint		delblks;
+	xfs_dquot_t	*unresudq, *unresgdq, *delblksudq, *delblksgdq;
+
+	ASSERT(XFS_ISLOCKED_INODE(ip));
+	mp = ip->i_mount;
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+
+	delblks = ip->i_delayed_blks;
+	delblksudq = delblksgdq = unresudq = unresgdq = NULL;
+
+	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
+	    ip->i_d.di_uid != (uid_t)INT_GET(udqp->q_core.d_id, ARCH_CONVERT)) {
+		delblksudq = udqp;
+		/*
+		 * If there are delayed allocation blocks, then we have to
+		 * unreserve those from the old dquot, and add them to the
+		 * new dquot.
+		 */
+		if (delblks) {
+			ASSERT(ip->i_udquot);
+			unresudq = ip->i_udquot;
+		}
+	}
+	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
+	    ip->i_d.di_gid != INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) {
+		delblksgdq = gdqp;
+		if (delblks) {
+			ASSERT(ip->i_gdquot);
+			unresgdq = ip->i_gdquot;
+		}
+	}
+
+	if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
+				delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
+				flags | XFS_QMOPT_RES_REGBLKS)))
+		return (error);
+
+	/*
+	 * Do the delayed blks reservations/unreservations now. Since, these
+	 * are done without the help of a transaction, if a reservation fails
+	 * its previous reservations won't be automatically undone by trans
+	 * code. So, we have to do it manually here.
+	 */
+	if (delblks) {
+		/*
+		 * Do the reservations first. Unreservation can't fail.
+		 */
+		ASSERT(delblksudq || delblksgdq);
+		ASSERT(unresudq || unresgdq);
+		if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
+				delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
+				flags | XFS_QMOPT_RES_REGBLKS)))
+			return (error);
+		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
+				unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
+				XFS_QMOPT_RES_REGBLKS);
+	}
+
+	return (0);
+}
+
+int
+xfs_qm_vop_rename_dqattach(
+	xfs_inode_t	**i_tab)
+{
+	xfs_inode_t	*ip;
+	int		i;
+	int		error;
+
+	ip = i_tab[0];
+
+	if (! XFS_IS_QUOTA_ON(ip->i_mount))
+		return (0);
+
+	if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
+		error = xfs_qm_dqattach(ip, 0);
+		if (error)
+			return (error);
+	}
+	for (i = 1; (i < 4 && i_tab[i]); i++) {
+		/*
+		 * Watch out for duplicate entries in the table.
+		 */
+		if ((ip = i_tab[i]) != i_tab[i-1]) {
+			if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
+				error = xfs_qm_dqattach(ip, 0);
+				if (error)
+					return (error);
+			}
+		}
+	}
+	return (0);
+}
+
+void
+xfs_qm_vop_dqattach_and_dqmod_newinode(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_dquot_t	*udqp,
+	xfs_dquot_t	*gdqp)
+{
+	if (!XFS_IS_QUOTA_ON(tp->t_mountp))
+		return;
+
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
+
+	if (udqp) {
+		xfs_dqlock(udqp);
+		XFS_DQHOLD(udqp);
+		xfs_dqunlock(udqp);
+		ASSERT(ip->i_udquot == NULL);
+		ip->i_udquot = udqp;
+		ASSERT(ip->i_d.di_uid == INT_GET(udqp->q_core.d_id, ARCH_CONVERT));
+		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
+	}
+	if (gdqp) {
+		xfs_dqlock(gdqp);
+		XFS_DQHOLD(gdqp);
+		xfs_dqunlock(gdqp);
+		ASSERT(ip->i_gdquot == NULL);
+		ip->i_gdquot = gdqp;
+		ASSERT(ip->i_d.di_gid == INT_GET(gdqp->q_core.d_id, ARCH_CONVERT));
+		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
+	}
+}
+
+/* ------------- list stuff -----------------*/
+void
+xfs_qm_freelist_init(xfs_frlist_t *ql)
+{
+	ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
+	mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf");
+	ql->qh_version = 0;
+	ql->qh_nelems = 0;
+}
+
+void
+xfs_qm_freelist_destroy(xfs_frlist_t *ql)
+{
+	xfs_dquot_t	*dqp, *nextdqp;
+
+	mutex_lock(&ql->qh_lock, PINOD);
+	for (dqp = ql->qh_next;
+	     dqp != (xfs_dquot_t *)ql; ) {
+		xfs_dqlock(dqp);
+		nextdqp = dqp->dq_flnext;
+#ifdef QUOTADEBUG
+		cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+#endif
+		XQM_FREELIST_REMOVE(dqp);
+		xfs_dqunlock(dqp);
+		xfs_qm_dqdestroy(dqp);
+		dqp = nextdqp;
+	}
+	/*
+	 * Don't bother about unlocking.
+	 */
+	mutex_destroy(&ql->qh_lock);
+
+	ASSERT(ql->qh_nelems == 0);
+}
+
+void
+xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
+{
+	dq->dq_flnext = ql->qh_next;
+	dq->dq_flprev = (xfs_dquot_t *)ql;
+	ql->qh_next = dq;
+	dq->dq_flnext->dq_flprev = dq;
+	xfs_Gqm->qm_dqfreelist.qh_nelems++;
+	xfs_Gqm->qm_dqfreelist.qh_version++;
+}
+
+void
+xfs_qm_freelist_unlink(xfs_dquot_t *dq)
+{
+	xfs_dquot_t *next = dq->dq_flnext;
+	xfs_dquot_t *prev = dq->dq_flprev;
+
+	next->dq_flprev = prev;
+	prev->dq_flnext = next;
+	dq->dq_flnext = dq->dq_flprev = dq;
+	xfs_Gqm->qm_dqfreelist.qh_nelems--;
+	xfs_Gqm->qm_dqfreelist.qh_version++;
+}
+
+void
+xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
+{
+	xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
+}
+
+int
+xfs_qm_dqhashlock_nowait(
+	xfs_dquot_t *dqp)
+{
+	int locked;
+
+	locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
+	return (locked);
+}
+
+int
+xfs_qm_freelist_lock_nowait(
+	xfs_qm_t *xqm)
+{
+	int locked;
+
+	locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
+	return (locked);
+}
+
+int
+xfs_qm_mplist_nowait(
+	xfs_mount_t	*mp)
+{
+	int locked;
+
+	ASSERT(mp->m_quotainfo);
+	locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
+	return (locked);
+}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
new file mode 100644
index 0000000..dcf1a7a
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_QM_H__
+#define __XFS_QM_H__
+
+#include "xfs_dquot_item.h"
+#include "xfs_dquot.h"
+#include "xfs_quota_priv.h"
+#include "xfs_qm_stats.h"
+
+struct xfs_qm;
+struct xfs_inode;
+
+extern mutex_t		xfs_Gqm_lock;
+extern struct xfs_qm	*xfs_Gqm;
+extern kmem_zone_t	*qm_dqzone;
+extern kmem_zone_t	*qm_dqtrxzone;
+
+/*
+ * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
+ * iterate over the mountpt's dquot list in one call.
+ */
+#define XFS_QM_SYNC_MAX_RESTARTS	7
+
+/*
+ * Ditto, for xfs_qm_dqreclaim_one.
+ */
+#define XFS_QM_RECLAIM_MAX_RESTARTS	4
+
+/*
+ * Ideal ratio of free to in use dquots. Quota manager makes an attempt
+ * to keep this balance.
+ */
+#define XFS_QM_DQFREE_RATIO		2
+
+/*
+ * Dquot hashtable constants/threshold values.
+ */
+#define XFS_QM_NCSIZE_THRESHOLD		5000
+#define XFS_QM_HASHSIZE_LOW		32
+#define XFS_QM_HASHSIZE_HIGH		64
+
+/*
+ * We output a cmn_err when quotachecking a quota file with more than
+ * this many fsbs.
+ */
+#define XFS_QM_BIG_QCHECK_NBLKS		500
+
+/*
+ * This defines the unit of allocation of dquots.
+ * Currently, it is just one file system block, and a 4K blk contains 30
+ * (136 * 30 = 4080) dquots. It's probably not worth trying to make
+ * this more dynamic.
+ * XXXsup However, if this number is changed, we have to make sure that we don't
+ * implicitly assume that we do allocations in chunks of a single filesystem
+ * block in the dquot/xqm code.
+ */
+#define XFS_DQUOT_CLUSTER_SIZE_FSB	(xfs_filblks_t)1
+/*
+ * When doing a quotacheck, we log dquot clusters of this many FSBs at most
+ * in a single transaction. We don't want to ask for too huge a log reservation.
+ */
+#define XFS_QM_MAX_DQCLUSTER_LOGSZ	3
+
+typedef xfs_dqhash_t	xfs_dqlist_t;
+/*
+ * The freelist head. The first two fields match the first two in the
+ * xfs_dquot_t structure (in xfs_dqmarker_t)
+ */
+typedef struct xfs_frlist {
+       struct xfs_dquot *qh_next;
+       struct xfs_dquot *qh_prev;
+       mutex_t		 qh_lock;
+       uint		 qh_version;
+       uint		 qh_nelems;
+} xfs_frlist_t;
+
+/*
+ * Quota Manager (global) structure. Lives only in core.
+ */
+typedef struct xfs_qm {
+	xfs_dqlist_t	*qm_usr_dqhtable;/* udquot hash table */
+	xfs_dqlist_t	*qm_grp_dqhtable;/* gdquot hash table */
+	uint		 qm_dqhashmask;	 /* # buckets in dq hashtab - 1 */
+	xfs_frlist_t	 qm_dqfreelist;	 /* freelist of dquots */
+	atomic_t	 qm_totaldquots; /* total incore dquots */
+	uint		 qm_nrefs;	 /* file systems with quota on */
+	int		 qm_dqfree_ratio;/* ratio of free to inuse dquots */
+	kmem_zone_t	*qm_dqzone;	 /* dquot mem-alloc zone */
+	kmem_zone_t	*qm_dqtrxzone;	 /* t_dqinfo of transactions */
+} xfs_qm_t;
+
+/*
+ * Various quota information for individual filesystems.
+ * The mount structure keeps a pointer to this.
+ */
+typedef struct xfs_quotainfo {
+	xfs_inode_t	*qi_uquotaip;	 /* user quota inode */
+	xfs_inode_t	*qi_gquotaip;	 /* group quota inode */
+	lock_t		 qi_pinlock;	 /* dquot pinning mutex */
+	xfs_dqlist_t	 qi_dqlist;	 /* all dquots in filesys */
+	int		 qi_dqreclaims;	 /* a change here indicates
+					    a removal in the dqlist */
+	time_t		 qi_btimelimit;	 /* limit for blks timer */
+	time_t		 qi_itimelimit;	 /* limit for inodes timer */
+	time_t		 qi_rtbtimelimit;/* limit for rt blks timer */
+	xfs_qwarncnt_t	 qi_bwarnlimit;	 /* limit for num warnings */
+	xfs_qwarncnt_t	 qi_iwarnlimit;	 /* limit for num warnings */
+	mutex_t		 qi_quotaofflock;/* to serialize quotaoff */
+	xfs_filblks_t	 qi_dqchunklen;	 /* # BBs in a chunk of dqs */
+	uint		 qi_dqperchunk;	 /* # ondisk dqs in above chunk */
+	xfs_qcnt_t	 qi_bhardlimit;	 /* default data blk hard limit */
+	xfs_qcnt_t	 qi_bsoftlimit;	 /* default data blk soft limit */
+	xfs_qcnt_t	 qi_ihardlimit;	 /* default inode count hard limit */
+	xfs_qcnt_t	 qi_isoftlimit;	 /* default inode count soft limit */
+	xfs_qcnt_t	 qi_rtbhardlimit;/* default realtime blk hard limit */
+	xfs_qcnt_t	 qi_rtbsoftlimit;/* default realtime blk soft limit */
+} xfs_quotainfo_t;
+
+
+extern xfs_dqtrxops_t	xfs_trans_dquot_ops;
+
+extern void	xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
+extern int	xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
+			xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
+extern void	xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *);
+extern void	xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *);
+
+/*
+ * We keep the usr and grp dquots separately so that locking will be easier
+ * to do at commit time. All transactions that we know of at this point
+ * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value.
+ */
+#define XFS_QM_TRANS_MAXDQS		2
+typedef struct xfs_dquot_acct {
+	xfs_dqtrx_t	dqa_usrdquots[XFS_QM_TRANS_MAXDQS];
+	xfs_dqtrx_t	dqa_grpdquots[XFS_QM_TRANS_MAXDQS];
+} xfs_dquot_acct_t;
+
+/*
+ * Users are allowed to have a usage exceeding their softlimit for
+ * a period this long.
+ */
+#define XFS_QM_BTIMELIMIT	(7 * 24*60*60)          /* 1 week */
+#define XFS_QM_RTBTIMELIMIT	(7 * 24*60*60)          /* 1 week */
+#define XFS_QM_ITIMELIMIT	(7 * 24*60*60)          /* 1 week */
+
+#define XFS_QM_BWARNLIMIT	5
+#define XFS_QM_IWARNLIMIT	5
+
+#define XFS_QM_LOCK(xqm)	(mutex_lock(&xqm##_lock, PINOD))
+#define XFS_QM_UNLOCK(xqm)	(mutex_unlock(&xqm##_lock))
+#define XFS_QM_HOLD(xqm)	((xqm)->qm_nrefs++)
+#define XFS_QM_RELE(xqm)	((xqm)->qm_nrefs--)
+
+extern void		xfs_mount_reset_sbqflags(xfs_mount_t *);
+
+extern int		xfs_qm_init_quotainfo(xfs_mount_t *);
+extern void		xfs_qm_destroy_quotainfo(xfs_mount_t *);
+extern int		xfs_qm_mount_quotas(xfs_mount_t *, int);
+extern void		xfs_qm_mount_quotainit(xfs_mount_t *, uint);
+extern int		xfs_qm_quotacheck(xfs_mount_t *);
+extern void		xfs_qm_unmount_quotadestroy(xfs_mount_t *);
+extern int		xfs_qm_unmount_quotas(xfs_mount_t *);
+extern int		xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
+extern int		xfs_qm_sync(xfs_mount_t *, short);
+
+/* dquot stuff */
+extern boolean_t	xfs_qm_dqalloc_incore(xfs_dquot_t **);
+extern int		xfs_qm_dqattach(xfs_inode_t *, uint);
+extern void		xfs_qm_dqdetach(xfs_inode_t *);
+extern int		xfs_qm_dqpurge_all(xfs_mount_t *, uint);
+extern void		xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
+
+/* vop stuff */
+extern int		xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
+					uid_t, gid_t, uint,
+					xfs_dquot_t **, xfs_dquot_t **);
+extern void		xfs_qm_vop_dqattach_and_dqmod_newinode(
+					xfs_trans_t *, xfs_inode_t *,
+					xfs_dquot_t *, xfs_dquot_t *);
+extern int		xfs_qm_vop_rename_dqattach(xfs_inode_t **);
+extern xfs_dquot_t *	xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
+					xfs_dquot_t **, xfs_dquot_t *);
+extern int		xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
+					xfs_dquot_t *, xfs_dquot_t *, uint);
+
+/* list stuff */
+extern void		xfs_qm_freelist_init(xfs_frlist_t *);
+extern void		xfs_qm_freelist_destroy(xfs_frlist_t *);
+extern void		xfs_qm_freelist_insert(xfs_frlist_t *, xfs_dquot_t *);
+extern void		xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
+extern void		xfs_qm_freelist_unlink(xfs_dquot_t *);
+extern int		xfs_qm_freelist_lock_nowait(xfs_qm_t *);
+extern int		xfs_qm_mplist_nowait(xfs_mount_t *);
+extern int		xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
+
+/* system call interface */
+extern int		xfs_qm_quotactl(bhv_desc_t *, int, int, xfs_caddr_t);
+
+#ifdef DEBUG
+extern int		xfs_qm_internalqcheck(xfs_mount_t *);
+#else
+#define xfs_qm_internalqcheck(mp)	(0)
+#endif
+
+#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
new file mode 100644
index 0000000..be67d9c
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+
+#include "xfs_qm.h"
+
+#define MNTOPT_QUOTA	"quota"		/* disk quotas (user) */
+#define MNTOPT_NOQUOTA	"noquota"	/* no quotas */
+#define MNTOPT_USRQUOTA	"usrquota"	/* user quota enabled */
+#define MNTOPT_GRPQUOTA	"grpquota"	/* group quota enabled */
+#define MNTOPT_UQUOTA	"uquota"	/* user quota (IRIX variant) */
+#define MNTOPT_GQUOTA	"gquota"	/* group quota (IRIX variant) */
+#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
+#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
+#define MNTOPT_QUOTANOENF  "qnoenforce"	/* same as uqnoenforce */
+
+STATIC int
+xfs_qm_parseargs(
+	struct bhv_desc		*bhv,
+	char			*options,
+	struct xfs_mount_args	*args,
+	int			update)
+{
+	size_t			length;
+	char			*local_options = options;
+	char			*this_char;
+	int			error;
+	int			referenced = update;
+
+	while ((this_char = strsep(&local_options, ",")) != NULL) {
+		length = strlen(this_char);
+		if (local_options)
+			length++;
+
+		if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
+			args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
+			args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
+			referenced = update;
+		} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
+			   !strcmp(this_char, MNTOPT_UQUOTA) ||
+			   !strcmp(this_char, MNTOPT_USRQUOTA)) {
+			args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
+			referenced = 1;
+		} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
+			   !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
+			args->flags |= XFSMNT_UQUOTA;
+			args->flags &= ~XFSMNT_UQUOTAENF;
+			referenced = 1;
+		} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
+			   !strcmp(this_char, MNTOPT_GRPQUOTA)) {
+			args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
+			referenced = 1;
+		} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
+			args->flags |= XFSMNT_GQUOTA;
+			args->flags &= ~XFSMNT_GQUOTAENF;
+			referenced = 1;
+		} else {
+			if (local_options)
+				*(local_options-1) = ',';
+			continue;
+		}
+
+		while (length--)
+			*this_char++ = ',';
+	}
+
+	PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
+	if (!error && !referenced)
+		bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
+	return error;
+}
+
+STATIC int
+xfs_qm_showargs(
+	struct bhv_desc		*bhv,
+	struct seq_file		*m)
+{
+	struct vfs		*vfsp = bhvtovfs(bhv);
+	struct xfs_mount	*mp = XFS_VFSTOM(vfsp);
+	int			error;
+
+	if (mp->m_qflags & XFS_UQUOTA_ACCT) {
+		(mp->m_qflags & XFS_UQUOTA_ENFD) ?
+			seq_puts(m, "," MNTOPT_USRQUOTA) :
+			seq_puts(m, "," MNTOPT_UQUOTANOENF);
+	}
+
+	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
+		(mp->m_qflags & XFS_GQUOTA_ENFD) ?
+			seq_puts(m, "," MNTOPT_GRPQUOTA) :
+			seq_puts(m, "," MNTOPT_GQUOTANOENF);
+	}
+
+	if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT)))
+		seq_puts(m, "," MNTOPT_NOQUOTA);
+
+	PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
+	return error;
+}
+
+STATIC int
+xfs_qm_mount(
+	struct bhv_desc		*bhv,
+	struct xfs_mount_args	*args,
+	struct cred		*cr)
+{
+	struct vfs		*vfsp = bhvtovfs(bhv);
+	struct xfs_mount	*mp = XFS_VFSTOM(vfsp);
+	int			error;
+
+	if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA))
+		xfs_qm_mount_quotainit(mp, args->flags);
+	PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
+	return error;
+}
+
+STATIC int
+xfs_qm_syncall(
+	struct bhv_desc		*bhv,
+	int			flags,
+	cred_t			*credp)
+{
+	struct vfs		*vfsp = bhvtovfs(bhv);
+	struct xfs_mount	*mp = XFS_VFSTOM(vfsp);
+	int			error;
+
+	/*
+	 * Get the Quota Manager to flush the dquots.
+	 */
+	if (XFS_IS_QUOTA_ON(mp)) {
+		if ((error = xfs_qm_sync(mp, flags))) {
+			/*
+			 * If we got an IO error, we will be shutting down.
+			 * So, there's nothing more for us to do here.
+			 */
+			ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
+			if (XFS_FORCED_SHUTDOWN(mp)) {
+				return XFS_ERROR(error);
+			}
+		}
+	}
+	PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error);
+	return error;
+}
+
+/*
+ * Clear the quotaflags in memory and in the superblock.
+ */
+void
+xfs_mount_reset_sbqflags(
+	xfs_mount_t		*mp)
+{
+	xfs_trans_t		*tp;
+	unsigned long		s;
+
+	mp->m_qflags = 0;
+	/*
+	 * It is OK to look at sb_qflags here in mount path,
+	 * without SB_LOCK.
+	 */
+	if (mp->m_sb.sb_qflags == 0)
+		return;
+	s = XFS_SB_LOCK(mp);
+	mp->m_sb.sb_qflags = 0;
+	XFS_SB_UNLOCK(mp, s);
+
+	/*
+	 * if the fs is readonly, let the incore superblock run
+	 * with quotas off but don't flush the update out to disk
+	 */
+	if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
+		return;
+#ifdef QUOTADEBUG
+	xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
+#endif
+	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
+	if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+				      XFS_DEFAULT_LOG_COUNT)) {
+		xfs_trans_cancel(tp, 0);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_mount_reset_sbqflags: Superblock update failed!");
+		return;
+	}
+	xfs_mod_sb(tp, XFS_SB_QFLAGS);
+	xfs_trans_commit(tp, 0, NULL);
+}
+
+STATIC int
+xfs_qm_newmount(
+	xfs_mount_t	*mp,
+	uint		*needquotamount,
+	uint		*quotaflags)
+{
+	uint		quotaondisk;
+	uint		uquotaondisk = 0, gquotaondisk = 0;
+
+	*quotaflags = 0;
+	*needquotamount = B_FALSE;
+
+	quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
+		mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT);
+
+	if (quotaondisk) {
+		uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
+		gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
+	}
+
+	/*
+	 * If the device itself is read-only, we can't allow
+	 * the user to change the state of quota on the mount -
+	 * this would generate a transaction on the ro device,
+	 * which would lead to an I/O error and shutdown
+	 */
+
+	if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
+	    (!uquotaondisk &&  XFS_IS_UQUOTA_ON(mp)) ||
+	     (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
+	    (!gquotaondisk &&  XFS_IS_GQUOTA_ON(mp)))  &&
+	    xfs_dev_is_read_only(mp, "changing quota state")) {
+		cmn_err(CE_WARN,
+			"XFS: please mount with%s%s%s.",
+			(!quotaondisk ? "out quota" : ""),
+			(uquotaondisk ? " usrquota" : ""),
+			(gquotaondisk ? " grpquota" : ""));
+		return XFS_ERROR(EPERM);
+	}
+
+	if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
+		/*
+		 * Call mount_quotas at this point only if we won't have to do
+		 * a quotacheck.
+		 */
+		if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
+			/*
+			 * If an error occured, qm_mount_quotas code
+			 * has already disabled quotas. So, just finish
+			 * mounting, and get on with the boring life
+			 * without disk quotas.
+			 */
+			xfs_qm_mount_quotas(mp, 0);
+		} else {
+			/*
+			 * Clear the quota flags, but remember them. This
+			 * is so that the quota code doesn't get invoked
+			 * before we're ready. This can happen when an
+			 * inode goes inactive and wants to free blocks,
+			 * or via xfs_log_mount_finish.
+			 */
+			*needquotamount = B_TRUE;
+			*quotaflags = mp->m_qflags;
+			mp->m_qflags = 0;
+		}
+	}
+
+	return 0;
+}
+
+STATIC int
+xfs_qm_endmount(
+	xfs_mount_t	*mp,
+	uint		needquotamount,
+	uint		quotaflags,
+	int		mfsi_flags)
+{
+	if (needquotamount) {
+		ASSERT(mp->m_qflags == 0);
+		mp->m_qflags = quotaflags;
+		xfs_qm_mount_quotas(mp, mfsi_flags);
+	}
+
+#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
+	if (! (XFS_IS_QUOTA_ON(mp)))
+		xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
+	else
+		xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
+#endif
+
+#ifdef QUOTADEBUG
+	if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
+		cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
+#endif
+
+	return 0;
+}
+
+STATIC void
+xfs_qm_dqrele_null(
+	xfs_dquot_t	*dq)
+{
+	/*
+	 * Called from XFS, where we always check first for a NULL dquot.
+	 */
+	if (!dq)
+		return;
+	xfs_qm_dqrele(dq);
+}
+
+
+struct xfs_qmops xfs_qmcore_xfs = {
+	.xfs_qminit		= xfs_qm_newmount,
+	.xfs_qmdone		= xfs_qm_unmount_quotadestroy,
+	.xfs_qmmount		= xfs_qm_endmount,
+	.xfs_qmunmount		= xfs_qm_unmount_quotas,
+	.xfs_dqrele		= xfs_qm_dqrele_null,
+	.xfs_dqattach		= xfs_qm_dqattach,
+	.xfs_dqdetach		= xfs_qm_dqdetach,
+	.xfs_dqpurgeall		= xfs_qm_dqpurge_all,
+	.xfs_dqvopalloc		= xfs_qm_vop_dqalloc,
+	.xfs_dqvopcreate	= xfs_qm_vop_dqattach_and_dqmod_newinode,
+	.xfs_dqvoprename	= xfs_qm_vop_rename_dqattach,
+	.xfs_dqvopchown		= xfs_qm_vop_chown,
+	.xfs_dqvopchownresv	= xfs_qm_vop_chown_reserve,
+	.xfs_dqtrxops		= &xfs_trans_dquot_ops,
+};
+
+struct bhv_vfsops xfs_qmops = { {
+	BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM),
+	.vfs_parseargs		= xfs_qm_parseargs,
+	.vfs_showargs		= xfs_qm_showargs,
+	.vfs_mount		= xfs_qm_mount,
+	.vfs_sync		= xfs_qm_syncall,
+	.vfs_quotactl		= xfs_qm_quotactl, },
+};
+
+
+void __init
+xfs_qm_init(void)
+{
+	static char	message[] __initdata =
+		KERN_INFO "SGI XFS Quota Management subsystem\n";
+
+	printk(message);
+	mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
+	vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs);
+	xfs_qm_init_procfs();
+}
+
+void __exit
+xfs_qm_exit(void)
+{
+	vfs_bhv_clr_custom(&xfs_qmops);
+	xfs_qm_cleanup_procfs();
+	if (qm_dqzone)
+		kmem_cache_destroy(qm_dqzone);
+	if (qm_dqtrxzone)
+		kmem_cache_destroy(qm_dqtrxzone);
+}
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
new file mode 100644
index 0000000..29978e0
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+
+#include "xfs_qm.h"
+
+struct xqmstats xqmstats;
+
+STATIC int
+xfs_qm_read_xfsquota(
+	char		*buffer,
+	char		**start,
+	off_t		offset,
+	int		count,
+	int		*eof,
+	void		*data)
+{
+	int		len;
+
+	/* maximum; incore; ratio free to inuse; freelist */
+	len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
+			ndquot,
+			xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
+			xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
+			xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
+
+	if (offset >= len) {
+		*start = buffer;
+		*eof = 1;
+		return 0;
+	}
+	*start = buffer + offset;
+	if ((len -= offset) > count)
+		return count;
+	*eof = 1;
+
+	return len;
+}
+
+STATIC int
+xfs_qm_read_stats(
+	char		*buffer,
+	char		**start,
+	off_t		offset,
+	int		count,
+	int		*eof,
+	void		*data)
+{
+	int		len;
+
+	/* quota performance statistics */
+	len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n",
+			xqmstats.xs_qm_dqreclaims,
+			xqmstats.xs_qm_dqreclaim_misses,
+			xqmstats.xs_qm_dquot_dups,
+			xqmstats.xs_qm_dqcachemisses,
+			xqmstats.xs_qm_dqcachehits,
+			xqmstats.xs_qm_dqwants,
+			xqmstats.xs_qm_dqshake_reclaims,
+			xqmstats.xs_qm_dqinact_reclaims);
+
+	if (offset >= len) {
+		*start = buffer;
+		*eof = 1;
+		return 0;
+	}
+	*start = buffer + offset;
+	if ((len -= offset) > count)
+		return count;
+	*eof = 1;
+
+	return len;
+}
+
+void
+xfs_qm_init_procfs(void)
+{
+	create_proc_read_entry("fs/xfs/xqmstat", 0, NULL, xfs_qm_read_stats, NULL);
+	create_proc_read_entry("fs/xfs/xqm", 0, NULL, xfs_qm_read_xfsquota, NULL);
+}
+
+void
+xfs_qm_cleanup_procfs(void)
+{
+	remove_proc_entry("fs/xfs/xqm", NULL);
+	remove_proc_entry("fs/xfs/xqmstat", NULL);
+}
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h
new file mode 100644
index 0000000..8093c5c
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm_stats.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_QM_STATS_H__
+#define __XFS_QM_STATS_H__
+
+
+#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
+
+/*
+ * XQM global statistics
+ */
+struct xqmstats {
+	__uint32_t		xs_qm_dqreclaims;
+	__uint32_t		xs_qm_dqreclaim_misses;
+	__uint32_t		xs_qm_dquot_dups;
+	__uint32_t		xs_qm_dqcachemisses;
+	__uint32_t		xs_qm_dqcachehits;
+	__uint32_t		xs_qm_dqwants;
+	__uint32_t		xs_qm_dqshake_reclaims;
+	__uint32_t		xs_qm_dqinact_reclaims;
+};
+
+extern struct xqmstats xqmstats;
+
+# define XQM_STATS_INC(count)	( (count)++ )
+
+extern void xfs_qm_init_procfs(void);
+extern void xfs_qm_cleanup_procfs(void);
+
+#else
+
+# define XQM_STATS_INC(count)	do { } while (0)
+
+static __inline void xfs_qm_init_procfs(void) { };
+static __inline void xfs_qm_cleanup_procfs(void) { };
+
+#endif
+
+#endif	/* __XFS_QM_STATS_H__ */
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
new file mode 100644
index 0000000..229f5b5
--- /dev/null
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -0,0 +1,1458 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+
+#include "xfs_qm.h"
+
+#ifdef DEBUG
+# define qdprintk(s, args...)	cmn_err(CE_DEBUG, s, ## args)
+#else
+# define qdprintk(s, args...)	do { } while (0)
+#endif
+
+STATIC int	xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
+STATIC int	xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
+					fs_disk_quota_t *);
+STATIC int	xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
+STATIC int	xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
+					fs_disk_quota_t *);
+STATIC int	xfs_qm_scall_quotaon(xfs_mount_t *, uint);
+STATIC int	xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t);
+STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
+STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
+					uint);
+STATIC uint	xfs_qm_import_flags(uint);
+STATIC uint	xfs_qm_export_flags(uint);
+STATIC uint	xfs_qm_import_qtype_flags(uint);
+STATIC uint	xfs_qm_export_qtype_flags(uint);
+STATIC void	xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
+					fs_disk_quota_t *);
+
+
+/*
+ * The main distribution switch of all XFS quotactl system calls.
+ */
+int
+xfs_qm_quotactl(
+	struct bhv_desc *bdp,
+	int		cmd,
+	int		id,
+	xfs_caddr_t	addr)
+{
+	xfs_mount_t	*mp;
+	int		error;
+	struct vfs	*vfsp;
+
+	vfsp = bhvtovfs(bdp);
+	mp = XFS_VFSTOM(vfsp);
+
+	if (addr == NULL && cmd != Q_SYNC)
+		return XFS_ERROR(EINVAL);
+	if (id < 0 && cmd != Q_SYNC)
+		return XFS_ERROR(EINVAL);
+
+	/*
+	 * The following commands are valid even when quotaoff.
+	 */
+	switch (cmd) {
+		/*
+		 * truncate quota files. quota must be off.
+		 */
+	      case Q_XQUOTARM:
+		if (XFS_IS_QUOTA_ON(mp) || addr == NULL)
+			return XFS_ERROR(EINVAL);
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		return (xfs_qm_scall_trunc_qfiles(mp,
+			       xfs_qm_import_qtype_flags(*(uint *)addr)));
+		/*
+		 * Get quota status information.
+		 */
+	      case Q_XGETQSTAT:
+		return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr));
+
+		/*
+		 * QUOTAON for root f/s and quota enforcement on others..
+		 * Quota accounting for non-root f/s's must be turned on
+		 * at mount time.
+		 */
+	      case Q_XQUOTAON:
+		if (addr == NULL)
+			return XFS_ERROR(EINVAL);
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		return (xfs_qm_scall_quotaon(mp,
+					  xfs_qm_import_flags(*(uint *)addr)));
+	      case Q_XQUOTAOFF:
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		break;
+
+	      default:
+		break;
+	}
+
+	if (! XFS_IS_QUOTA_ON(mp))
+		return XFS_ERROR(ESRCH);
+
+	switch (cmd) {
+	      case Q_XQUOTAOFF:
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		error = xfs_qm_scall_quotaoff(mp,
+					    xfs_qm_import_flags(*(uint *)addr),
+					    B_FALSE);
+		break;
+
+		/*
+		 * Defaults to XFS_GETUQUOTA.
+		 */
+	      case Q_XGETQUOTA:
+		error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER,
+					(fs_disk_quota_t *)addr);
+		break;
+		/*
+		 * Set limits, both hard and soft. Defaults to Q_SETUQLIM.
+		 */
+	      case Q_XSETQLIM:
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER,
+					     (fs_disk_quota_t *)addr);
+		break;
+
+	       case Q_XSETGQLIM:
+		if (vfsp->vfs_flag & VFS_RDONLY)
+			return XFS_ERROR(EROFS);
+		error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
+					     (fs_disk_quota_t *)addr);
+		break;
+
+
+	      case Q_XGETGQUOTA:
+		error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
+					(fs_disk_quota_t *)addr);
+		break;
+
+		/*
+		 * Quotas are entirely undefined after quotaoff in XFS quotas.
+		 * For instance, there's no way to set limits when quotaoff.
+		 */
+
+	      default:
+		error = XFS_ERROR(EINVAL);
+		break;
+	}
+
+	return (error);
+}
+
+/*
+ * Turn off quota accounting and/or enforcement for all udquots and/or
+ * gdquots. Called only at unmount time.
+ *
+ * This assumes that there are no dquots of this file system cached
+ * incore, and modifies the ondisk dquot directly. Therefore, for example,
+ * it is an error to call this twice, without purging the cache.
+ */
+STATIC int
+xfs_qm_scall_quotaoff(
+	xfs_mount_t		*mp,
+	uint			flags,
+	boolean_t		force)
+{
+	uint			dqtype;
+	unsigned long	s;
+	int			error;
+	uint			inactivate_flags;
+	xfs_qoff_logitem_t	*qoffstart;
+	int			nculprits;
+
+	if (!force && !capable(CAP_SYS_ADMIN))
+		return XFS_ERROR(EPERM);
+	/*
+	 * No file system can have quotas enabled on disk but not in core.
+	 * Note that quota utilities (like quotaoff) _expect_
+	 * errno == EEXIST here.
+	 */
+	if ((mp->m_qflags & flags) == 0)
+		return XFS_ERROR(EEXIST);
+	error = 0;
+
+	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
+
+	/*
+	 * We don't want to deal with two quotaoffs messing up each other,
+	 * so we're going to serialize it. quotaoff isn't exactly a performance
+	 * critical thing.
+	 * If quotaoff, then we must be dealing with the root filesystem.
+	 */
+	ASSERT(mp->m_quotainfo);
+	if (mp->m_quotainfo)
+		mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+
+	ASSERT(mp->m_quotainfo);
+
+	/*
+	 * If we're just turning off quota enforcement, change mp and go.
+	 */
+	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
+		mp->m_qflags &= ~(flags);
+
+		s = XFS_SB_LOCK(mp);
+		mp->m_sb.sb_qflags = mp->m_qflags;
+		XFS_SB_UNLOCK(mp, s);
+		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+
+		/* XXX what to do if error ? Revert back to old vals incore ? */
+		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
+		return (error);
+	}
+
+	dqtype = 0;
+	inactivate_flags = 0;
+	/*
+	 * If accounting is off, we must turn enforcement off, clear the
+	 * quota 'CHKD' certificate to make it known that we have to
+	 * do a quotacheck the next time this quota is turned on.
+	 */
+	if (flags & XFS_UQUOTA_ACCT) {
+		dqtype |= XFS_QMOPT_UQUOTA;
+		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
+		inactivate_flags |= XFS_UQUOTA_ACTIVE;
+	}
+	if (flags & XFS_GQUOTA_ACCT) {
+		dqtype |= XFS_QMOPT_GQUOTA;
+		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
+		inactivate_flags |= XFS_GQUOTA_ACTIVE;
+	}
+
+	/*
+	 * Nothing to do?  Don't complain. This happens when we're just
+	 * turning off quota enforcement.
+	 */
+	if ((mp->m_qflags & flags) == 0) {
+		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+		return (0);
+	}
+
+	/*
+	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
+	 * and synchronously.
+	 */
+	xfs_qm_log_quotaoff(mp, &qoffstart, flags);
+
+	/*
+	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
+	 * to take care of the race between dqget and quotaoff. We don't take
+	 * any special locks to reset these bits. All processes need to check
+	 * these bits *after* taking inode lock(s) to see if the particular
+	 * quota type is in the process of being turned off. If *ACTIVE, it is
+	 * guaranteed that all dquot structures and all quotainode ptrs will all
+	 * stay valid as long as that inode is kept locked.
+	 *
+	 * There is no turning back after this.
+	 */
+	mp->m_qflags &= ~inactivate_flags;
+
+	/*
+	 * Give back all the dquot reference(s) held by inodes.
+	 * Here we go thru every single incore inode in this file system, and
+	 * do a dqrele on the i_udquot/i_gdquot that it may have.
+	 * Essentially, as long as somebody has an inode locked, this guarantees
+	 * that quotas will not be turned off. This is handy because in a
+	 * transaction once we lock the inode(s) and check for quotaon, we can
+	 * depend on the quota inodes (and other things) being valid as long as
+	 * we keep the lock(s).
+	 */
+	xfs_qm_dqrele_all_inodes(mp, flags);
+
+	/*
+	 * Next we make the changes in the quota flag in the mount struct.
+	 * This isn't protected by a particular lock directly, because we
+	 * don't want to take a mrlock everytime we depend on quotas being on.
+	 */
+	mp->m_qflags &= ~(flags);
+
+	/*
+	 * Go through all the dquots of this file system and purge them,
+	 * according to what was turned off. We may not be able to get rid
+	 * of all dquots, because dquots can have temporary references that
+	 * are not attached to inodes. eg. xfs_setattr, xfs_create.
+	 * So, if we couldn't purge all the dquots from the filesystem,
+	 * we can't get rid of the incore data structures.
+	 */
+	while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF)))
+		delay(10 * nculprits);
+
+	/*
+	 * Transactions that had started before ACTIVE state bit was cleared
+	 * could have logged many dquots, so they'd have higher LSNs than
+	 * the first QUOTAOFF log record does. If we happen to crash when
+	 * the tail of the log has gone past the QUOTAOFF record, but
+	 * before the last dquot modification, those dquots __will__
+	 * recover, and that's not good.
+	 *
+	 * So, we have QUOTAOFF start and end logitems; the start
+	 * logitem won't get overwritten until the end logitem appears...
+	 */
+	xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
+
+	/*
+	 * If quotas is completely disabled, close shop.
+	 */
+	if ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_ALL) {
+		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+		xfs_qm_destroy_quotainfo(mp);
+		return (0);
+	}
+
+	/*
+	 * Release our quotainode references, and vn_purge them,
+	 * if we don't need them anymore.
+	 */
+	if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) {
+		XFS_PURGE_INODE(XFS_QI_UQIP(mp));
+		XFS_QI_UQIP(mp) = NULL;
+	}
+	if ((dqtype & XFS_QMOPT_GQUOTA) && XFS_QI_GQIP(mp)) {
+		XFS_PURGE_INODE(XFS_QI_GQIP(mp));
+		XFS_QI_GQIP(mp) = NULL;
+	}
+	mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+
+	return (error);
+}
+
+STATIC int
+xfs_qm_scall_trunc_qfiles(
+	xfs_mount_t	*mp,
+	uint		flags)
+{
+	int		error;
+	xfs_inode_t	*qip;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return XFS_ERROR(EPERM);
+	error = 0;
+	if (!XFS_SB_VERSION_HASQUOTA(&mp->m_sb) || flags == 0) {
+		qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
+		return XFS_ERROR(EINVAL);
+	}
+
+	if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
+		error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
+		if (! error) {
+			(void) xfs_truncate_file(mp, qip);
+			VN_RELE(XFS_ITOV(qip));
+		}
+	}
+
+	if ((flags & XFS_DQ_GROUP) && mp->m_sb.sb_gquotino != NULLFSINO) {
+		error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
+		if (! error) {
+			(void) xfs_truncate_file(mp, qip);
+			VN_RELE(XFS_ITOV(qip));
+		}
+	}
+
+	return (error);
+}
+
+
+/*
+ * Switch on (a given) quota enforcement for a filesystem.  This takes
+ * effect immediately.
+ * (Switching on quota accounting must be done at mount time.)
+ */
+STATIC int
+xfs_qm_scall_quotaon(
+	xfs_mount_t	*mp,
+	uint		flags)
+{
+	int		error;
+	unsigned long s;
+	uint		qf;
+	uint		accflags;
+	__int64_t	sbflags;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return XFS_ERROR(EPERM);
+
+	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
+	/*
+	 * Switching on quota accounting must be done at mount time.
+	 */
+	accflags = flags & XFS_ALL_QUOTA_ACCT;
+	flags &= ~(XFS_ALL_QUOTA_ACCT);
+
+	sbflags = 0;
+
+	if (flags == 0) {
+		qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags);
+		return XFS_ERROR(EINVAL);
+	}
+
+	/* No fs can turn on quotas with a delayed effect */
+	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
+
+	/*
+	 * Can't enforce without accounting. We check the superblock
+	 * qflags here instead of m_qflags because rootfs can have
+	 * quota acct on ondisk without m_qflags' knowing.
+	 */
+	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
+	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
+	    (flags & XFS_UQUOTA_ENFD))
+	    ||
+	    ((flags & XFS_GQUOTA_ACCT) == 0 &&
+	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
+	    (flags & XFS_GQUOTA_ENFD))) {
+		qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n",
+			flags, mp->m_sb.sb_qflags);
+		return XFS_ERROR(EINVAL);
+	}
+	/*
+	 * If everything's upto-date incore, then don't waste time.
+	 */
+	if ((mp->m_qflags & flags) == flags)
+		return XFS_ERROR(EEXIST);
+
+	/*
+	 * Change sb_qflags on disk but not incore mp->qflags
+	 * if this is the root filesystem.
+	 */
+	s = XFS_SB_LOCK(mp);
+	qf = mp->m_sb.sb_qflags;
+	mp->m_sb.sb_qflags = qf | flags;
+	XFS_SB_UNLOCK(mp, s);
+
+	/*
+	 * There's nothing to change if it's the same.
+	 */
+	if ((qf & flags) == flags && sbflags == 0)
+		return XFS_ERROR(EEXIST);
+	sbflags |= XFS_SB_QFLAGS;
+
+	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
+		return (error);
+	/*
+	 * If we aren't trying to switch on quota enforcement, we are done.
+	 */
+	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
+	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
+	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
+		return (0);
+
+	if (! XFS_IS_QUOTA_RUNNING(mp))
+		return XFS_ERROR(ESRCH);
+
+	/*
+	 * Switch on quota enforcement in core.
+	 */
+	mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
+	mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+
+	return (0);
+}
+
+
+
+/*
+ * Return quota status information, such as uquota-off, enforcements, etc.
+ */
+STATIC int
+xfs_qm_scall_getqstat(
+	xfs_mount_t	*mp,
+	fs_quota_stat_t *out)
+{
+	xfs_inode_t	*uip, *gip;
+	boolean_t	tempuqip, tempgqip;
+
+	uip = gip = NULL;
+	tempuqip = tempgqip = B_FALSE;
+	memset(out, 0, sizeof(fs_quota_stat_t));
+
+	out->qs_version = FS_QSTAT_VERSION;
+	if (! XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
+		out->qs_uquota.qfs_ino = NULLFSINO;
+		out->qs_gquota.qfs_ino = NULLFSINO;
+		return (0);
+	}
+	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
+							(XFS_ALL_QUOTA_ACCT|
+							 XFS_ALL_QUOTA_ENFD));
+	out->qs_pad = 0;
+	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
+	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
+
+	if (mp->m_quotainfo) {
+		uip = mp->m_quotainfo->qi_uquotaip;
+		gip = mp->m_quotainfo->qi_gquotaip;
+	}
+	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
+		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
+					0, 0, &uip, 0) == 0)
+			tempuqip = B_TRUE;
+	}
+	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
+		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
+					0, 0, &gip, 0) == 0)
+			tempgqip = B_TRUE;
+	}
+	if (uip) {
+		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
+		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
+		if (tempuqip)
+			VN_RELE(XFS_ITOV(uip));
+	}
+	if (gip) {
+		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
+		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
+		if (tempgqip)
+			VN_RELE(XFS_ITOV(gip));
+	}
+	if (mp->m_quotainfo) {
+		out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp);
+		out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp);
+		out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp);
+		out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp);
+		out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp);
+		out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp);
+	}
+	return (0);
+}
+
+/*
+ * Adjust quota limits, and start/stop timers accordingly.
+ */
+STATIC int
+xfs_qm_scall_setqlim(
+	xfs_mount_t		*mp,
+	xfs_dqid_t		id,
+	uint			type,
+	fs_disk_quota_t		*newlim)
+{
+	xfs_disk_dquot_t	*ddq;
+	xfs_dquot_t		*dqp;
+	xfs_trans_t		*tp;
+	int			error;
+	xfs_qcnt_t		hard, soft;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return XFS_ERROR(EPERM);
+
+	if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0)
+		return (0);
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
+	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
+				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
+		xfs_trans_cancel(tp, 0);
+		return (error);
+	}
+
+	/*
+	 * We don't want to race with a quotaoff so take the quotaoff lock.
+	 * (We don't hold an inode lock, so there's nothing else to stop
+	 * a quotaoff from happening). (XXXThis doesn't currently happen
+	 * because we take the vfslock before calling xfs_qm_sysent).
+	 */
+	mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+
+	/*
+	 * Get the dquot (locked), and join it to the transaction.
+	 * Allocate the dquot if this doesn't exist.
+	 */
+	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
+		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+		ASSERT(error != ENOENT);
+		return (error);
+	}
+	xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
+	xfs_trans_dqjoin(tp, dqp);
+	ddq = &dqp->q_core;
+
+	/*
+	 * Make sure that hardlimits are >= soft limits before changing.
+	 */
+	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
+		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+			INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT);
+	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
+		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+			INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT);
+	if (hard == 0 || hard >= soft) {
+		INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard);
+		INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft);
+		if (id == 0) {
+			mp->m_quotainfo->qi_bhardlimit = hard;
+			mp->m_quotainfo->qi_bsoftlimit = soft;
+		}
+	} else {
+		qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
+	}
+	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
+		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+			INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT);
+	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
+		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+			INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT);
+	if (hard == 0 || hard >= soft) {
+		INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard);
+		INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft);
+		if (id == 0) {
+			mp->m_quotainfo->qi_rtbhardlimit = hard;
+			mp->m_quotainfo->qi_rtbsoftlimit = soft;
+		}
+	} else {
+		qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+	}
+
+	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+		(xfs_qcnt_t) newlim->d_ino_hardlimit :
+			INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT);
+	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+		(xfs_qcnt_t) newlim->d_ino_softlimit :
+			INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT);
+	if (hard == 0 || hard >= soft) {
+		INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard);
+		INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft);
+		if (id == 0) {
+			mp->m_quotainfo->qi_ihardlimit = hard;
+			mp->m_quotainfo->qi_isoftlimit = soft;
+		}
+	} else {
+		qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
+	}
+
+	if (id == 0) {
+		/*
+		 * Timelimits for the super user set the relative time
+		 * the other users can be over quota for this file system.
+		 * If it is zero a default is used.  Ditto for the default
+		 * soft and hard limit values (already done, above).
+		 */
+		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
+			mp->m_quotainfo->qi_btimelimit = newlim->d_btimer;
+			INT_SET(ddq->d_btimer, ARCH_CONVERT, newlim->d_btimer);
+		}
+		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
+			mp->m_quotainfo->qi_itimelimit = newlim->d_itimer;
+			INT_SET(ddq->d_itimer, ARCH_CONVERT, newlim->d_itimer);
+		}
+		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
+			mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer;
+			INT_SET(ddq->d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer);
+		}
+	} else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ {
+		/*
+		 * If the user is now over quota, start the timelimit.
+		 * The user will not be 'warned'.
+		 * Note that we keep the timers ticking, whether enforcement
+		 * is on or off. We don't really want to bother with iterating
+		 * over all ondisk dquots and turning the timers on/off.
+		 */
+		xfs_qm_adjust_dqtimers(mp, ddq);
+	}
+	dqp->dq_flags |= XFS_DQ_DIRTY;
+	xfs_trans_log_dquot(tp, dqp);
+
+	xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
+	xfs_trans_commit(tp, 0, NULL);
+	xfs_qm_dqprint(dqp);
+	xfs_qm_dqrele(dqp);
+	mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+
+	return (0);
+}
+
+STATIC int
+xfs_qm_scall_getquota(
+	xfs_mount_t	*mp,
+	xfs_dqid_t	id,
+	uint		type,
+	fs_disk_quota_t *out)
+{
+	xfs_dquot_t	*dqp;
+	int		error;
+
+	/*
+	 * Try to get the dquot. We don't want it allocated on disk, so
+	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
+	 * exist, we'll get ENOENT back.
+	 */
+	if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
+		return (error);
+	}
+
+	xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
+	/*
+	 * If everything's NULL, this dquot doesn't quite exist as far as
+	 * our utility programs are concerned.
+	 */
+	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+		xfs_qm_dqput(dqp);
+		return XFS_ERROR(ENOENT);
+	}
+	/* xfs_qm_dqprint(dqp); */
+	/*
+	 * Convert the disk dquot to the exportable format
+	 */
+	xfs_qm_export_dquot(mp, &dqp->q_core, out);
+	xfs_qm_dqput(dqp);
+	return (error ? XFS_ERROR(EFAULT) : 0);
+}
+
+
+STATIC int
+xfs_qm_log_quotaoff_end(
+	xfs_mount_t		*mp,
+	xfs_qoff_logitem_t	*startqoff,
+	uint			flags)
+{
+	xfs_trans_t	       *tp;
+	int			error;
+	xfs_qoff_logitem_t     *qoffi;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
+
+	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
+				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
+		xfs_trans_cancel(tp, 0);
+		return (error);
+	}
+
+	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
+					flags & XFS_ALL_QUOTA_ACCT);
+	xfs_trans_log_quotaoff_item(tp, qoffi);
+
+	/*
+	 * We have to make sure that the transaction is secure on disk before we
+	 * return and actually stop quota accounting. So, make it synchronous.
+	 * We don't care about quotoff's performance.
+	 */
+	xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0, NULL);
+	return (error);
+}
+
+
+STATIC int
+xfs_qm_log_quotaoff(
+	xfs_mount_t	       *mp,
+	xfs_qoff_logitem_t     **qoffstartp,
+	uint		       flags)
+{
+	xfs_trans_t	       *tp;
+	int			error;
+	unsigned long	s;
+	xfs_qoff_logitem_t     *qoffi=NULL;
+	uint			oldsbqflag=0;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
+	if ((error = xfs_trans_reserve(tp, 0,
+				      sizeof(xfs_qoff_logitem_t) * 2 +
+				      mp->m_sb.sb_sectsize + 128,
+				      0,
+				      0,
+				      XFS_DEFAULT_LOG_COUNT))) {
+		goto error0;
+	}
+
+	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
+	xfs_trans_log_quotaoff_item(tp, qoffi);
+
+	s = XFS_SB_LOCK(mp);
+	oldsbqflag = mp->m_sb.sb_qflags;
+	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
+	XFS_SB_UNLOCK(mp, s);
+
+	xfs_mod_sb(tp, XFS_SB_QFLAGS);
+
+	/*
+	 * We have to make sure that the transaction is secure on disk before we
+	 * return and actually stop quota accounting. So, make it synchronous.
+	 * We don't care about quotoff's performance.
+	 */
+	xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0, NULL);
+
+error0:
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		/*
+		 * No one else is modifying sb_qflags, so this is OK.
+		 * We still hold the quotaofflock.
+		 */
+		s = XFS_SB_LOCK(mp);
+		mp->m_sb.sb_qflags = oldsbqflag;
+		XFS_SB_UNLOCK(mp, s);
+	}
+	*qoffstartp = qoffi;
+	return (error);
+}
+
+
+/*
+ * Translate an internal style on-disk-dquot to the exportable format.
+ * The main differences are that the counters/limits are all in Basic
+ * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
+ * to be converted to the native endianness.
+ */
+STATIC void
+xfs_qm_export_dquot(
+	xfs_mount_t		*mp,
+	xfs_disk_dquot_t	*src,
+	struct fs_disk_quota	*dst)
+{
+	memset(dst, 0, sizeof(*dst));
+	dst->d_version = FS_DQUOT_VERSION;  /* different from src->d_version */
+	dst->d_flags =
+		xfs_qm_export_qtype_flags(INT_GET(src->d_flags, ARCH_CONVERT));
+	dst->d_id = INT_GET(src->d_id, ARCH_CONVERT);
+	dst->d_blk_hardlimit = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_hardlimit, ARCH_CONVERT));
+	dst->d_blk_softlimit = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_softlimit, ARCH_CONVERT));
+	dst->d_ino_hardlimit = (__uint64_t)
+		INT_GET(src->d_ino_hardlimit, ARCH_CONVERT);
+	dst->d_ino_softlimit = (__uint64_t)
+		INT_GET(src->d_ino_softlimit, ARCH_CONVERT);
+	dst->d_bcount = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_bcount, ARCH_CONVERT));
+	dst->d_icount = (__uint64_t) INT_GET(src->d_icount, ARCH_CONVERT);
+	dst->d_btimer = (__uint32_t) INT_GET(src->d_btimer, ARCH_CONVERT);
+	dst->d_itimer = (__uint32_t) INT_GET(src->d_itimer, ARCH_CONVERT);
+	dst->d_iwarns = INT_GET(src->d_iwarns, ARCH_CONVERT);
+	dst->d_bwarns = INT_GET(src->d_bwarns, ARCH_CONVERT);
+
+	dst->d_rtb_hardlimit = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_hardlimit, ARCH_CONVERT));
+	dst->d_rtb_softlimit = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_softlimit, ARCH_CONVERT));
+	dst->d_rtbcount = (__uint64_t)
+		XFS_FSB_TO_BB(mp, INT_GET(src->d_rtbcount, ARCH_CONVERT));
+	dst->d_rtbtimer = (__uint32_t) INT_GET(src->d_rtbtimer, ARCH_CONVERT);
+	dst->d_rtbwarns = INT_GET(src->d_rtbwarns, ARCH_CONVERT);
+
+	/*
+	 * Internally, we don't reset all the timers when quota enforcement
+	 * gets turned off. No need to confuse the userlevel code,
+	 * so return zeroes in that case.
+	 */
+	if (! XFS_IS_QUOTA_ENFORCED(mp)) {
+		dst->d_btimer = 0;
+		dst->d_itimer = 0;
+		dst->d_rtbtimer = 0;
+	}
+
+#ifdef DEBUG
+	if (XFS_IS_QUOTA_ENFORCED(mp) && dst->d_id != 0) {
+		if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
+		    (dst->d_blk_softlimit > 0)) {
+			ASSERT(dst->d_btimer != 0);
+		}
+		if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
+		    (dst->d_ino_softlimit > 0)) {
+			ASSERT(dst->d_itimer != 0);
+		}
+	}
+#endif
+}
+
+STATIC uint
+xfs_qm_import_qtype_flags(
+	uint uflags)
+{
+	/*
+	 * Can't be both at the same time.
+	 */
+	if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ==
+	     (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
+	    ((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == 0))
+		return (0);
+
+	return (uflags & XFS_USER_QUOTA) ?
+		XFS_DQ_USER : XFS_DQ_GROUP;
+}
+
+STATIC uint
+xfs_qm_export_qtype_flags(
+	uint flags)
+{
+	/*
+	 * Can't be both at the same time.
+	 */
+	ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) !=
+		(XFS_GROUP_QUOTA | XFS_USER_QUOTA));
+	ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != 0);
+
+	return (flags & XFS_DQ_USER) ?
+		XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+}
+
+STATIC uint
+xfs_qm_import_flags(
+	uint uflags)
+{
+	uint flags = 0;
+
+	if (uflags & XFS_QUOTA_UDQ_ACCT)
+		flags |= XFS_UQUOTA_ACCT;
+	if (uflags & XFS_QUOTA_GDQ_ACCT)
+		flags |= XFS_GQUOTA_ACCT;
+	if (uflags & XFS_QUOTA_UDQ_ENFD)
+		flags |= XFS_UQUOTA_ENFD;
+	if (uflags & XFS_QUOTA_GDQ_ENFD)
+		flags |= XFS_GQUOTA_ENFD;
+	return (flags);
+}
+
+
+STATIC uint
+xfs_qm_export_flags(
+	uint flags)
+{
+	uint uflags;
+
+	uflags = 0;
+	if (flags & XFS_UQUOTA_ACCT)
+		uflags |= XFS_QUOTA_UDQ_ACCT;
+	if (flags & XFS_GQUOTA_ACCT)
+		uflags |= XFS_QUOTA_GDQ_ACCT;
+	if (flags & XFS_UQUOTA_ENFD)
+		uflags |= XFS_QUOTA_UDQ_ENFD;
+	if (flags & XFS_GQUOTA_ENFD)
+		uflags |= XFS_QUOTA_GDQ_ENFD;
+	return (uflags);
+}
+
+
+/*
+ * Go thru all the inodes in the file system, releasing their dquots.
+ * Note that the mount structure gets modified to indicate that quotas are off
+ * AFTER this, in the case of quotaoff. This also gets called from
+ * xfs_rootumount.
+ */
+void
+xfs_qm_dqrele_all_inodes(
+	struct xfs_mount *mp,
+	uint		 flags)
+{
+	vmap_t		vmap;
+	xfs_inode_t	*ip, *topino;
+	uint		ireclaims;
+	vnode_t		*vp;
+	boolean_t	vnode_refd;
+
+	ASSERT(mp->m_quotainfo);
+
+again:
+	XFS_MOUNT_ILOCK(mp);
+	ip = mp->m_inodes;
+	if (ip == NULL) {
+		XFS_MOUNT_IUNLOCK(mp);
+		return;
+	}
+	do {
+		/* Skip markers inserted by xfs_sync */
+		if (ip->i_mount == NULL) {
+			ip = ip->i_mnext;
+			continue;
+		}
+		/* Root inode, rbmip and rsumip have associated blocks */
+		if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
+			ASSERT(ip->i_udquot == NULL);
+			ASSERT(ip->i_gdquot == NULL);
+			ip = ip->i_mnext;
+			continue;
+		}
+		vp = XFS_ITOV_NULL(ip);
+		if (!vp) {
+			ASSERT(ip->i_udquot == NULL);
+			ASSERT(ip->i_gdquot == NULL);
+			ip = ip->i_mnext;
+			continue;
+		}
+		vnode_refd = B_FALSE;
+		if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
+			/*
+			 * Sample vp mapping while holding the mplock, lest
+			 * we come across a non-existent vnode.
+			 */
+			VMAP(vp, vmap);
+			ireclaims = mp->m_ireclaims;
+			topino = mp->m_inodes;
+			XFS_MOUNT_IUNLOCK(mp);
+
+			/* XXX restart limit ? */
+			if ( ! (vp = vn_get(vp, &vmap)))
+				goto again;
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			vnode_refd = B_TRUE;
+		} else {
+			ireclaims = mp->m_ireclaims;
+			topino = mp->m_inodes;
+			XFS_MOUNT_IUNLOCK(mp);
+		}
+
+		/*
+		 * We don't keep the mountlock across the dqrele() call,
+		 * since it can take a while..
+		 */
+		if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
+			xfs_qm_dqrele(ip->i_udquot);
+			ip->i_udquot = NULL;
+		}
+		if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
+			xfs_qm_dqrele(ip->i_gdquot);
+			ip->i_gdquot = NULL;
+		}
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		/*
+		 * Wait until we've dropped the ilock and mountlock to
+		 * do the vn_rele. Or be condemned to an eternity in the
+		 * inactive code in hell.
+		 */
+		if (vnode_refd)
+			VN_RELE(vp);
+		XFS_MOUNT_ILOCK(mp);
+		/*
+		 * If an inode was inserted or removed, we gotta
+		 * start over again.
+		 */
+		if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) {
+			/* XXX use a sentinel */
+			XFS_MOUNT_IUNLOCK(mp);
+			goto again;
+		}
+		ip = ip->i_mnext;
+	} while (ip != mp->m_inodes);
+
+	XFS_MOUNT_IUNLOCK(mp);
+}
+
+/*------------------------------------------------------------------------*/
+#ifdef DEBUG
+/*
+ * This contains all the test functions for XFS disk quotas.
+ * Currently it does a quota accounting check. ie. it walks through
+ * all inodes in the file system, calculating the dquot accounting fields,
+ * and prints out any inconsistencies.
+ */
+xfs_dqhash_t *qmtest_udqtab;
+xfs_dqhash_t *qmtest_gdqtab;
+int	      qmtest_hashmask;
+int	      qmtest_nfails;
+mutex_t	      qcheck_lock;
+
+#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
+				 (__psunsigned_t)(id)) & \
+				(qmtest_hashmask - 1))
+
+#define DQTEST_HASH(mp, id, type)   ((type & XFS_DQ_USER) ? \
+				     (qmtest_udqtab + \
+				      DQTEST_HASHVAL(mp, id)) : \
+				     (qmtest_gdqtab + \
+				      DQTEST_HASHVAL(mp, id)))
+
+#define DQTEST_LIST_PRINT(l, NXT, title) \
+{ \
+	  xfs_dqtest_t	*dqp; int i = 0;\
+	  cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
+	  for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
+	       dqp = (xfs_dqtest_t *)dqp->NXT) { \
+		cmn_err(CE_DEBUG, "  %d. \"%d (%s)\"  bcnt = %d, icnt = %d", \
+			 ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp),	     \
+			 dqp->d_bcount, dqp->d_icount); } \
+}
+
+typedef struct dqtest {
+	xfs_dqmarker_t	q_lists;
+	xfs_dqhash_t	*q_hash;	/* the hashchain header */
+	xfs_mount_t	*q_mount;	/* filesystem this relates to */
+	xfs_dqid_t	d_id;		/* user id or group id */
+	xfs_qcnt_t	d_bcount;	/* # disk blocks owned by the user */
+	xfs_qcnt_t	d_icount;	/* # inodes owned by the user */
+} xfs_dqtest_t;
+
+STATIC void
+xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
+{
+	xfs_dquot_t *d;
+	if (((d) = (h)->qh_next))
+		(d)->HL_PREVP = &((dqp)->HL_NEXT);
+	(dqp)->HL_NEXT = d;
+	(dqp)->HL_PREVP = &((h)->qh_next);
+	(h)->qh_next = (xfs_dquot_t *)dqp;
+	(h)->qh_version++;
+	(h)->qh_nelems++;
+}
+STATIC void
+xfs_qm_dqtest_print(
+	xfs_dqtest_t	*d)
+{
+	cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------");
+	cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id);
+	cmn_err(CE_DEBUG, "---- type     = %s", XFS_QM_ISUDQ(d)? "USR" : "GRP");
+	cmn_err(CE_DEBUG, "---- fs       = 0x%p", d->q_mount);
+	cmn_err(CE_DEBUG, "---- bcount   = %Lu (0x%x)",
+		d->d_bcount, (int)d->d_bcount);
+	cmn_err(CE_DEBUG, "---- icount   = %Lu (0x%x)",
+		d->d_icount, (int)d->d_icount);
+	cmn_err(CE_DEBUG, "---------------------------");
+}
+
+STATIC void
+xfs_qm_dqtest_failed(
+	xfs_dqtest_t	*d,
+	xfs_dquot_t	*dqp,
+	char		*reason,
+	xfs_qcnt_t	a,
+	xfs_qcnt_t	b,
+	int		error)
+{
+	qmtest_nfails++;
+	if (error)
+		cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s",
+		       INT_GET(d->d_id, ARCH_CONVERT), error, reason);
+	else
+		cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]",
+		       INT_GET(d->d_id, ARCH_CONVERT), reason, (int)a, (int)b);
+	xfs_qm_dqtest_print(d);
+	if (dqp)
+		xfs_qm_dqprint(dqp);
+}
+
+STATIC int
+xfs_dqtest_cmp2(
+	xfs_dqtest_t	*d,
+	xfs_dquot_t	*dqp)
+{
+	int err = 0;
+	if (INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) != d->d_icount) {
+		xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
+			INT_GET(dqp->q_core.d_icount, ARCH_CONVERT),
+			d->d_icount, 0);
+		err++;
+	}
+	if (INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) != d->d_bcount) {
+		xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
+			INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT),
+			d->d_bcount, 0);
+		err++;
+	}
+	if (INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) &&
+	    INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) >=
+	    INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)) {
+		if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
+			cmn_err(CE_DEBUG,
+				"%d [%s] [0x%p] BLK TIMER NOT STARTED",
+				d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
+			err++;
+		}
+	}
+	if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) &&
+	    INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >=
+	    INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) {
+		if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
+			cmn_err(CE_DEBUG,
+				"%d [%s] [0x%p] INO TIMER NOT STARTED",
+				d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
+			err++;
+		}
+	}
+#ifdef QUOTADEBUG
+	if (!err) {
+		cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked",
+			d->d_id, XFS_QM_ISUDQ(d) ? "USR" : "GRP", d->q_mount);
+	}
+#endif
+	return (err);
+}
+
+STATIC void
+xfs_dqtest_cmp(
+	xfs_dqtest_t	*d)
+{
+	xfs_dquot_t	*dqp;
+	int		error;
+
+	/* xfs_qm_dqtest_print(d); */
+	if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
+				 &dqp))) {
+		xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
+		return;
+	}
+	xfs_dqtest_cmp2(d, dqp);
+	xfs_qm_dqput(dqp);
+}
+
+STATIC int
+xfs_qm_internalqcheck_dqget(
+	xfs_mount_t	*mp,
+	xfs_dqid_t	id,
+	uint		type,
+	xfs_dqtest_t	**O_dq)
+{
+	xfs_dqtest_t	*d;
+	xfs_dqhash_t	*h;
+
+	h = DQTEST_HASH(mp, id, type);
+	for (d = (xfs_dqtest_t *) h->qh_next; d != NULL;
+	     d = (xfs_dqtest_t *) d->HL_NEXT) {
+		/* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */
+		if (d->d_id == id && mp == d->q_mount) {
+			*O_dq = d;
+			return (0);
+		}
+	}
+	d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
+	d->dq_flags = type;
+	d->d_id = id;
+	d->q_mount = mp;
+	d->q_hash = h;
+	xfs_qm_hashinsert(h, d);
+	*O_dq = d;
+	return (0);
+}
+
+STATIC void
+xfs_qm_internalqcheck_get_dquots(
+	xfs_mount_t	*mp,
+	xfs_dqid_t	uid,
+	xfs_dqid_t	gid,
+	xfs_dqtest_t	**ud,
+	xfs_dqtest_t	**gd)
+{
+	if (XFS_IS_UQUOTA_ON(mp))
+		xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
+	if (XFS_IS_GQUOTA_ON(mp))
+		xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
+}
+
+
+STATIC void
+xfs_qm_internalqcheck_dqadjust(
+	xfs_inode_t		*ip,
+	xfs_dqtest_t		*d)
+{
+	d->d_icount++;
+	d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
+}
+
+STATIC int
+xfs_qm_internalqcheck_adjust(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	ino,		/* inode number to get data for */
+	void		__user *buffer,	/* not used */
+	int		ubsize,		/* not used */
+	void		*private_data,	/* not used */
+	xfs_daddr_t	bno,		/* starting block of inode cluster */
+	int		*ubused,	/* not used */
+	void		*dip,		/* not used */
+	int		*res)		/* bulkstat result code */
+{
+	xfs_inode_t		*ip;
+	xfs_dqtest_t		*ud, *gd;
+	uint			lock_flags;
+	boolean_t		ipreleased;
+	int			error;
+
+	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
+
+	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
+		*res = BULKSTAT_RV_NOTHING;
+		qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n",
+			(unsigned long long) ino,
+			(unsigned long long) mp->m_sb.sb_uquotino,
+			(unsigned long long) mp->m_sb.sb_gquotino);
+		return XFS_ERROR(EINVAL);
+	}
+	ipreleased = B_FALSE;
+ again:
+	lock_flags = XFS_ILOCK_SHARED;
+	if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
+		*res = BULKSTAT_RV_NOTHING;
+		return (error);
+	}
+
+	if (ip->i_d.di_mode == 0) {
+		xfs_iput_new(ip, lock_flags);
+		*res = BULKSTAT_RV_NOTHING;
+		return XFS_ERROR(ENOENT);
+	}
+
+	/*
+	 * This inode can have blocks after eof which can get released
+	 * when we send it to inactive. Since we don't check the dquot
+	 * until the after all our calculations are done, we must get rid
+	 * of those now.
+	 */
+	if (! ipreleased) {
+		xfs_iput(ip, lock_flags);
+		ipreleased = B_TRUE;
+		goto again;
+	}
+	xfs_qm_internalqcheck_get_dquots(mp,
+					(xfs_dqid_t) ip->i_d.di_uid,
+					(xfs_dqid_t) ip->i_d.di_gid,
+					&ud, &gd);
+	if (XFS_IS_UQUOTA_ON(mp)) {
+		ASSERT(ud);
+		xfs_qm_internalqcheck_dqadjust(ip, ud);
+	}
+	if (XFS_IS_GQUOTA_ON(mp)) {
+		ASSERT(gd);
+		xfs_qm_internalqcheck_dqadjust(ip, gd);
+	}
+	xfs_iput(ip, lock_flags);
+	*res = BULKSTAT_RV_DIDONE;
+	return (0);
+}
+
+
+/* PRIVATE, debugging */
+int
+xfs_qm_internalqcheck(
+	xfs_mount_t	*mp)
+{
+	xfs_ino_t	lastino;
+	int		done, count;
+	int		i;
+	xfs_dqtest_t	*d, *e;
+	xfs_dqhash_t	*h1;
+	int		error;
+
+	lastino = 0;
+	qmtest_hashmask = 32;
+	count = 5;
+	done = 0;
+	qmtest_nfails = 0;
+
+	if (! XFS_IS_QUOTA_ON(mp))
+		return XFS_ERROR(ESRCH);
+
+	xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+	XFS_bflush(mp->m_ddev_targp);
+	xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+	XFS_bflush(mp->m_ddev_targp);
+
+	mutex_lock(&qcheck_lock, PINOD);
+	/* There should be absolutely no quota activity while this
+	   is going on. */
+	qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
+				    sizeof(xfs_dqhash_t), KM_SLEEP);
+	qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
+				    sizeof(xfs_dqhash_t), KM_SLEEP);
+	do {
+		/*
+		 * Iterate thru all the inodes in the file system,
+		 * adjusting the corresponding dquot counters
+		 */
+		if ((error = xfs_bulkstat(mp, &lastino, &count,
+				 xfs_qm_internalqcheck_adjust, NULL,
+				 0, NULL, BULKSTAT_FG_IGET, &done))) {
+			break;
+		}
+	} while (! done);
+	if (error) {
+		cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
+	}
+	cmn_err(CE_DEBUG, "Checking results against system dquots");
+	for (i = 0; i < qmtest_hashmask; i++) {
+		h1 = &qmtest_udqtab[i];
+		for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+			xfs_dqtest_cmp(d);
+			e = (xfs_dqtest_t *) d->HL_NEXT;
+			kmem_free(d, sizeof(xfs_dqtest_t));
+			d = e;
+		}
+		h1 = &qmtest_gdqtab[i];
+		for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+			xfs_dqtest_cmp(d);
+			e = (xfs_dqtest_t *) d->HL_NEXT;
+			kmem_free(d, sizeof(xfs_dqtest_t));
+			d = e;
+		}
+	}
+
+	if (qmtest_nfails) {
+		cmn_err(CE_DEBUG, "******** quotacheck failed  ********");
+		cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails);
+	} else {
+		cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
+	}
+	kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
+	kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
+	mutex_unlock(&qcheck_lock);
+	return (qmtest_nfails);
+}
+
+#endif /* DEBUG */
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
new file mode 100644
index 0000000..414b600
--- /dev/null
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_QUOTA_PRIV_H__
+#define __XFS_QUOTA_PRIV_H__
+
+/*
+ * Number of bmaps that we ask from bmapi when doing a quotacheck.
+ * We make this restriction to keep the memory usage to a minimum.
+ */
+#define XFS_DQITER_MAP_SIZE	10
+
+/* Number of dquots that fit in to a dquot block */
+#define XFS_QM_DQPERBLK(mp)	((mp)->m_quotainfo->qi_dqperchunk)
+
+#define XFS_ISLOCKED_INODE(ip)		(ismrlocked(&(ip)->i_lock, \
+					    MR_UPDATE | MR_ACCESS) != 0)
+#define XFS_ISLOCKED_INODE_EXCL(ip)	(ismrlocked(&(ip)->i_lock, \
+					    MR_UPDATE) != 0)
+
+#define XFS_DQ_IS_ADDEDTO_TRX(t, d)	((d)->q_transp == (t))
+
+#define XFS_QI_MPLRECLAIMS(mp)	((mp)->m_quotainfo->qi_dqreclaims)
+#define XFS_QI_UQIP(mp)		((mp)->m_quotainfo->qi_uquotaip)
+#define XFS_QI_GQIP(mp)		((mp)->m_quotainfo->qi_gquotaip)
+#define XFS_QI_DQCHUNKLEN(mp)	((mp)->m_quotainfo->qi_dqchunklen)
+#define XFS_QI_BTIMELIMIT(mp)	((mp)->m_quotainfo->qi_btimelimit)
+#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
+#define XFS_QI_ITIMELIMIT(mp)	((mp)->m_quotainfo->qi_itimelimit)
+#define XFS_QI_BWARNLIMIT(mp)	((mp)->m_quotainfo->qi_bwarnlimit)
+#define XFS_QI_IWARNLIMIT(mp)	((mp)->m_quotainfo->qi_iwarnlimit)
+#define XFS_QI_QOFFLOCK(mp)	((mp)->m_quotainfo->qi_quotaofflock)
+
+#define XFS_QI_MPL_LIST(mp)	((mp)->m_quotainfo->qi_dqlist)
+#define XFS_QI_MPLLOCK(mp)	((mp)->m_quotainfo->qi_dqlist.qh_lock)
+#define XFS_QI_MPLNEXT(mp)	((mp)->m_quotainfo->qi_dqlist.qh_next)
+#define XFS_QI_MPLNDQUOTS(mp)	((mp)->m_quotainfo->qi_dqlist.qh_nelems)
+
+#define XQMLCK(h)			(mutex_lock(&((h)->qh_lock), PINOD))
+#define XQMUNLCK(h)			(mutex_unlock(&((h)->qh_lock)))
+#ifdef DEBUG
+struct xfs_dqhash;
+static inline int XQMISLCKD(struct xfs_dqhash *h)
+{
+	if (mutex_trylock(&h->qh_lock)) {
+		mutex_unlock(&h->qh_lock);
+		return 0;
+	}
+	return 1;
+}
+#endif
+
+#define XFS_DQ_HASH_LOCK(h)		XQMLCK(h)
+#define XFS_DQ_HASH_UNLOCK(h)		XQMUNLCK(h)
+#define XFS_DQ_IS_HASH_LOCKED(h)	XQMISLCKD(h)
+
+#define xfs_qm_mplist_lock(mp)		XQMLCK(&(XFS_QI_MPL_LIST(mp)))
+#define xfs_qm_mplist_unlock(mp)	XQMUNLCK(&(XFS_QI_MPL_LIST(mp)))
+#define XFS_QM_IS_MPLIST_LOCKED(mp)	XQMISLCKD(&(XFS_QI_MPL_LIST(mp)))
+
+#define xfs_qm_freelist_lock(qm)	XQMLCK(&((qm)->qm_dqfreelist))
+#define xfs_qm_freelist_unlock(qm)	XQMUNLCK(&((qm)->qm_dqfreelist))
+#define XFS_QM_IS_FREELIST_LOCKED(qm)	XQMISLCKD(&((qm)->qm_dqfreelist))
+
+/*
+ * Hash into a bucket in the dquot hash table, based on <mp, id>.
+ */
+#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
+				 (__psunsigned_t)(id)) & \
+				(xfs_Gqm->qm_dqhashmask - 1))
+#define XFS_DQ_HASH(mp, id, type)   (type == XFS_DQ_USER ? \
+				     (xfs_Gqm->qm_usr_dqhtable + \
+				      XFS_DQ_HASHVAL(mp, id)) : \
+				     (xfs_Gqm->qm_grp_dqhtable + \
+				      XFS_DQ_HASHVAL(mp, id)))
+#define XFS_IS_DQTYPE_ON(mp, type)   (type == XFS_DQ_USER ? \
+				      XFS_IS_UQUOTA_ON(mp):XFS_IS_GQUOTA_ON(mp))
+#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
+	!dqp->q_core.d_blk_hardlimit && \
+	!dqp->q_core.d_blk_softlimit && \
+	!dqp->q_core.d_rtb_hardlimit && \
+	!dqp->q_core.d_rtb_softlimit && \
+	!dqp->q_core.d_ino_hardlimit && \
+	!dqp->q_core.d_ino_softlimit && \
+	!dqp->q_core.d_bcount && \
+	!dqp->q_core.d_rtbcount && \
+	!dqp->q_core.d_icount)
+
+#define HL_PREVP	dq_hashlist.ql_prevp
+#define HL_NEXT		dq_hashlist.ql_next
+#define MPL_PREVP	dq_mplist.ql_prevp
+#define MPL_NEXT	dq_mplist.ql_next
+
+
+#define _LIST_REMOVE(h, dqp, PVP, NXT)				\
+	{							\
+		 xfs_dquot_t *d;				\
+		 if (((d) = (dqp)->NXT))				\
+			 (d)->PVP = (dqp)->PVP;			\
+		 *((dqp)->PVP) = d;				\
+		 (dqp)->NXT = NULL;				\
+		 (dqp)->PVP = NULL;				\
+		 (h)->qh_version++;				\
+		 (h)->qh_nelems--;				\
+	}
+
+#define _LIST_INSERT(h, dqp, PVP, NXT)				\
+	{							\
+		 xfs_dquot_t *d;				\
+		 if (((d) = (h)->qh_next))			\
+			 (d)->PVP = &((dqp)->NXT);		\
+		 (dqp)->NXT = d;				\
+		 (dqp)->PVP = &((h)->qh_next);			\
+		 (h)->qh_next = dqp;				\
+		 (h)->qh_version++;				\
+		 (h)->qh_nelems++;				\
+	 }
+
+#define FOREACH_DQUOT_IN_MP(dqp, mp) \
+	for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT)
+
+#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist)	\
+for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
+     (dqp) = (dqp)->dq_flnext)
+
+#define XQM_HASHLIST_INSERT(h, dqp)	\
+	 _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
+
+#define XQM_FREELIST_INSERT(h, dqp)	\
+	 xfs_qm_freelist_append(h, dqp)
+
+#define XQM_MPLIST_INSERT(h, dqp)	\
+	 _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
+
+#define XQM_HASHLIST_REMOVE(h, dqp)	\
+	 _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
+#define XQM_FREELIST_REMOVE(dqp)	\
+	 xfs_qm_freelist_unlink(dqp)
+#define XQM_MPLIST_REMOVE(h, dqp)	\
+	{ _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
+	  XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; }
+
+#define XFS_DQ_IS_LOGITEM_INITD(dqp)	((dqp)->q_logitem.qli_dquot == (dqp))
+
+#define XFS_QM_DQP_TO_DQACCT(tp, dqp)	(XFS_QM_ISUDQ(dqp) ? \
+					 (tp)->t_dqinfo->dqa_usrdquots : \
+					 (tp)->t_dqinfo->dqa_grpdquots)
+#define XFS_IS_SUSER_DQUOT(dqp)		\
+	(!((dqp)->q_core.d_id))
+
+#define XFS_PURGE_INODE(ip)		\
+	{				\
+	  vmap_t dqvmap;		\
+	  vnode_t *dqvp;		\
+	  dqvp = XFS_ITOV(ip);		\
+	  VMAP(dqvp, dqvmap);		\
+	  VN_RELE(dqvp);		\
+	}
+
+#define DQFLAGTO_TYPESTR(d)	(((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
+				 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : "???"))
+#define DQFLAGTO_DIRTYSTR(d)	(XFS_DQ_IS_DIRTY(d) ? "DIRTY" : "NOTDIRTY")
+
+#endif	/* __XFS_QUOTA_PRIV_H__ */
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
new file mode 100644
index 0000000..149b2a1
--- /dev/null
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_priv.h"
+
+#include "xfs_qm.h"
+
+STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
+
+/*
+ * Add the locked dquot to the transaction.
+ * The dquot must be locked, and it cannot be associated with any
+ * transaction.
+ */
+void
+xfs_trans_dqjoin(
+	xfs_trans_t	*tp,
+	xfs_dquot_t	*dqp)
+{
+	xfs_dq_logitem_t    *lp;
+
+	ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp));
+	lp = &dqp->q_logitem;
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
+
+	/*
+	 * Initialize i_transp so we can later determine if this dquot is
+	 * associated with this transaction.
+	 */
+	dqp->q_transp = tp;
+}
+
+
+/*
+ * This is called to mark the dquot as needing
+ * to be logged when the transaction is committed.  The dquot must
+ * already be associated with the given transaction.
+ * Note that it marks the entire transaction as dirty. In the ordinary
+ * case, this gets called via xfs_trans_commit, after the transaction
+ * is already dirty. However, there's nothing stop this from getting
+ * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
+ * flag.
+ */
+void
+xfs_trans_log_dquot(
+	xfs_trans_t	*tp,
+	xfs_dquot_t	*dqp)
+{
+	xfs_log_item_desc_t	*lidp;
+
+	ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+}
+
+/*
+ * Carry forward whatever is left of the quota blk reservation to
+ * the spanky new transaction
+ */
+STATIC void
+xfs_trans_dup_dqinfo(
+	xfs_trans_t	*otp,
+	xfs_trans_t	*ntp)
+{
+	xfs_dqtrx_t	*oq, *nq;
+	int		i,j;
+	xfs_dqtrx_t	*oqa, *nqa;
+
+	if (!otp->t_dqinfo)
+		return;
+
+	xfs_trans_alloc_dqinfo(ntp);
+	oqa = otp->t_dqinfo->dqa_usrdquots;
+	nqa = ntp->t_dqinfo->dqa_usrdquots;
+
+	/*
+	 * Because the quota blk reservation is carried forward,
+	 * it is also necessary to carry forward the DQ_DIRTY flag.
+	 */
+	if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
+		ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
+
+	for (j = 0; j < 2; j++) {
+		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
+			if (oqa[i].qt_dquot == NULL)
+				break;
+			oq = &oqa[i];
+			nq = &nqa[i];
+
+			nq->qt_dquot = oq->qt_dquot;
+			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
+			nq->qt_rtbcount_delta = 0;
+
+			/*
+			 * Transfer whatever is left of the reservations.
+			 */
+			nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
+			oq->qt_blk_res = oq->qt_blk_res_used;
+
+			nq->qt_rtblk_res = oq->qt_rtblk_res -
+				oq->qt_rtblk_res_used;
+			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
+
+			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
+			oq->qt_ino_res = oq->qt_ino_res_used;
+
+		}
+		oqa = otp->t_dqinfo->dqa_grpdquots;
+		nqa = ntp->t_dqinfo->dqa_grpdquots;
+	}
+}
+
+/*
+ * Wrap around mod_dquot to account for both user and group quotas.
+ */
+void
+xfs_trans_mod_dquot_byino(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	uint		field,
+	long		delta)
+{
+	xfs_mount_t	*mp;
+
+	ASSERT(tp);
+	mp = tp->t_mountp;
+
+	if (!XFS_IS_QUOTA_ON(mp) ||
+	    ip->i_ino == mp->m_sb.sb_uquotino ||
+	    ip->i_ino == mp->m_sb.sb_gquotino)
+		return;
+
+	if (tp->t_dqinfo == NULL)
+		xfs_trans_alloc_dqinfo(tp);
+
+	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) {
+		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
+	}
+	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) {
+		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
+	}
+}
+
+STATIC xfs_dqtrx_t *
+xfs_trans_get_dqtrx(
+	xfs_trans_t	*tp,
+	xfs_dquot_t	*dqp)
+{
+	int		i;
+	xfs_dqtrx_t	*qa;
+
+	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
+		qa = XFS_QM_DQP_TO_DQACCT(tp, dqp);
+
+		if (qa[i].qt_dquot == NULL ||
+		    qa[i].qt_dquot == dqp) {
+			return (&qa[i]);
+		}
+	}
+
+	return (NULL);
+}
+
+/*
+ * Make the changes in the transaction structure.
+ * The moral equivalent to xfs_trans_mod_sb().
+ * We don't touch any fields in the dquot, so we don't care
+ * if it's locked or not (most of the time it won't be).
+ */
+void
+xfs_trans_mod_dquot(
+	xfs_trans_t	*tp,
+	xfs_dquot_t	*dqp,
+	uint		field,
+	long		delta)
+{
+	xfs_dqtrx_t	*qtrx;
+
+	ASSERT(tp);
+	qtrx = NULL;
+
+	if (tp->t_dqinfo == NULL)
+		xfs_trans_alloc_dqinfo(tp);
+	/*
+	 * Find either the first free slot or the slot that belongs
+	 * to this dquot.
+	 */
+	qtrx = xfs_trans_get_dqtrx(tp, dqp);
+	ASSERT(qtrx);
+	if (qtrx->qt_dquot == NULL)
+		qtrx->qt_dquot = dqp;
+
+	switch (field) {
+
+		/*
+		 * regular disk blk reservation
+		 */
+	      case XFS_TRANS_DQ_RES_BLKS:
+		qtrx->qt_blk_res += (ulong)delta;
+		break;
+
+		/*
+		 * inode reservation
+		 */
+	      case XFS_TRANS_DQ_RES_INOS:
+		qtrx->qt_ino_res += (ulong)delta;
+		break;
+
+		/*
+		 * disk blocks used.
+		 */
+	      case XFS_TRANS_DQ_BCOUNT:
+		if (qtrx->qt_blk_res && delta > 0) {
+			qtrx->qt_blk_res_used += (ulong)delta;
+			ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
+		}
+		qtrx->qt_bcount_delta += delta;
+		break;
+
+	      case XFS_TRANS_DQ_DELBCOUNT:
+		qtrx->qt_delbcnt_delta += delta;
+		break;
+
+		/*
+		 * Inode Count
+		 */
+	      case XFS_TRANS_DQ_ICOUNT:
+		if (qtrx->qt_ino_res && delta > 0) {
+			qtrx->qt_ino_res_used += (ulong)delta;
+			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
+		}
+		qtrx->qt_icount_delta += delta;
+		break;
+
+		/*
+		 * rtblk reservation
+		 */
+	      case XFS_TRANS_DQ_RES_RTBLKS:
+		qtrx->qt_rtblk_res += (ulong)delta;
+		break;
+
+		/*
+		 * rtblk count
+		 */
+	      case XFS_TRANS_DQ_RTBCOUNT:
+		if (qtrx->qt_rtblk_res && delta > 0) {
+			qtrx->qt_rtblk_res_used += (ulong)delta;
+			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
+		}
+		qtrx->qt_rtbcount_delta += delta;
+		break;
+
+	      case XFS_TRANS_DQ_DELRTBCOUNT:
+		qtrx->qt_delrtb_delta += delta;
+		break;
+
+	      default:
+		ASSERT(0);
+	}
+	tp->t_flags |= XFS_TRANS_DQ_DIRTY;
+}
+
+
+/*
+ * Given an array of dqtrx structures, lock all the dquots associated
+ * and join them to the transaction, provided they have been modified.
+ * We know that the highest number of dquots (of one type - usr OR grp),
+ * involved in a transaction is 2 and that both usr and grp combined - 3.
+ * So, we don't attempt to make this very generic.
+ */
+STATIC void
+xfs_trans_dqlockedjoin(
+	xfs_trans_t	*tp,
+	xfs_dqtrx_t	*q)
+{
+	ASSERT(q[0].qt_dquot != NULL);
+	if (q[1].qt_dquot == NULL) {
+		xfs_dqlock(q[0].qt_dquot);
+		xfs_trans_dqjoin(tp, q[0].qt_dquot);
+	} else {
+		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
+		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
+		xfs_trans_dqjoin(tp, q[0].qt_dquot);
+		xfs_trans_dqjoin(tp, q[1].qt_dquot);
+	}
+}
+
+
+/*
+ * Called by xfs_trans_commit() and similar in spirit to
+ * xfs_trans_apply_sb_deltas().
+ * Go thru all the dquots belonging to this transaction and modify the
+ * INCORE dquot to reflect the actual usages.
+ * Unreserve just the reservations done by this transaction.
+ * dquot is still left locked at exit.
+ */
+void
+xfs_trans_apply_dquot_deltas(
+	xfs_trans_t		*tp)
+{
+	int			i, j;
+	xfs_dquot_t		*dqp;
+	xfs_dqtrx_t		*qtrx, *qa;
+	xfs_disk_dquot_t	*d;
+	long			totalbdelta;
+	long			totalrtbdelta;
+
+	if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY))
+		return;
+
+	ASSERT(tp->t_dqinfo);
+	qa = tp->t_dqinfo->dqa_usrdquots;
+	for (j = 0; j < 2; j++) {
+		if (qa[0].qt_dquot == NULL) {
+			qa = tp->t_dqinfo->dqa_grpdquots;
+			continue;
+		}
+
+		/*
+		 * Lock all of the dquots and join them to the transaction.
+		 */
+		xfs_trans_dqlockedjoin(tp, qa);
+
+		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
+			qtrx = &qa[i];
+			/*
+			 * The array of dquots is filled
+			 * sequentially, not sparsely.
+			 */
+			if ((dqp = qtrx->qt_dquot) == NULL)
+				break;
+
+			ASSERT(XFS_DQ_IS_LOCKED(dqp));
+			ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+
+			/*
+			 * adjust the actual number of blocks used
+			 */
+			d = &dqp->q_core;
+
+			/*
+			 * The issue here is - sometimes we don't make a blkquota
+			 * reservation intentionally to be fair to users
+			 * (when the amount is small). On the other hand,
+			 * delayed allocs do make reservations, but that's
+			 * outside of a transaction, so we have no
+			 * idea how much was really reserved.
+			 * So, here we've accumulated delayed allocation blks and
+			 * non-delay blks. The assumption is that the
+			 * delayed ones are always reserved (outside of a
+			 * transaction), and the others may or may not have
+			 * quota reservations.
+			 */
+			totalbdelta = qtrx->qt_bcount_delta +
+				qtrx->qt_delbcnt_delta;
+			totalrtbdelta = qtrx->qt_rtbcount_delta +
+				qtrx->qt_delrtb_delta;
+#ifdef QUOTADEBUG
+			if (totalbdelta < 0)
+				ASSERT(INT_GET(d->d_bcount, ARCH_CONVERT) >=
+				       (xfs_qcnt_t) -totalbdelta);
+
+			if (totalrtbdelta < 0)
+				ASSERT(INT_GET(d->d_rtbcount, ARCH_CONVERT) >=
+				       (xfs_qcnt_t) -totalrtbdelta);
+
+			if (qtrx->qt_icount_delta < 0)
+				ASSERT(INT_GET(d->d_icount, ARCH_CONVERT) >=
+				       (xfs_qcnt_t) -qtrx->qt_icount_delta);
+#endif
+			if (totalbdelta)
+				INT_MOD(d->d_bcount, ARCH_CONVERT, (xfs_qcnt_t)totalbdelta);
+
+			if (qtrx->qt_icount_delta)
+				INT_MOD(d->d_icount, ARCH_CONVERT, (xfs_qcnt_t)qtrx->qt_icount_delta);
+
+			if (totalrtbdelta)
+				INT_MOD(d->d_rtbcount, ARCH_CONVERT, (xfs_qcnt_t)totalrtbdelta);
+
+			/*
+			 * Get any default limits in use.
+			 * Start/reset the timer(s) if needed.
+			 */
+			if (d->d_id) {
+				xfs_qm_adjust_dqlimits(tp->t_mountp, d);
+				xfs_qm_adjust_dqtimers(tp->t_mountp, d);
+			}
+
+			dqp->dq_flags |= XFS_DQ_DIRTY;
+			/*
+			 * add this to the list of items to get logged
+			 */
+			xfs_trans_log_dquot(tp, dqp);
+			/*
+			 * Take off what's left of the original reservation.
+			 * In case of delayed allocations, there's no
+			 * reservation that a transaction structure knows of.
+			 */
+			if (qtrx->qt_blk_res != 0) {
+				if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
+					if (qtrx->qt_blk_res >
+					    qtrx->qt_blk_res_used)
+						dqp->q_res_bcount -= (xfs_qcnt_t)
+							(qtrx->qt_blk_res -
+							 qtrx->qt_blk_res_used);
+					else
+						dqp->q_res_bcount -= (xfs_qcnt_t)
+							(qtrx->qt_blk_res_used -
+							 qtrx->qt_blk_res);
+				}
+			} else {
+				/*
+				 * These blks were never reserved, either inside
+				 * a transaction or outside one (in a delayed
+				 * allocation). Also, this isn't always a
+				 * negative number since we sometimes
+				 * deliberately skip quota reservations.
+				 */
+				if (qtrx->qt_bcount_delta) {
+					dqp->q_res_bcount +=
+					      (xfs_qcnt_t)qtrx->qt_bcount_delta;
+				}
+			}
+			/*
+			 * Adjust the RT reservation.
+			 */
+			if (qtrx->qt_rtblk_res != 0) {
+				if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
+					if (qtrx->qt_rtblk_res >
+					    qtrx->qt_rtblk_res_used)
+					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
+						       (qtrx->qt_rtblk_res -
+							qtrx->qt_rtblk_res_used);
+					else
+					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
+						       (qtrx->qt_rtblk_res_used -
+							qtrx->qt_rtblk_res);
+				}
+			} else {
+				if (qtrx->qt_rtbcount_delta)
+					dqp->q_res_rtbcount +=
+					    (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
+			}
+
+			/*
+			 * Adjust the inode reservation.
+			 */
+			if (qtrx->qt_ino_res != 0) {
+				ASSERT(qtrx->qt_ino_res >=
+				       qtrx->qt_ino_res_used);
+				if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
+					dqp->q_res_icount -= (xfs_qcnt_t)
+						(qtrx->qt_ino_res -
+						 qtrx->qt_ino_res_used);
+			} else {
+				if (qtrx->qt_icount_delta)
+					dqp->q_res_icount +=
+					    (xfs_qcnt_t)qtrx->qt_icount_delta;
+			}
+
+
+#ifdef QUOTADEBUG
+			if (qtrx->qt_rtblk_res != 0)
+				cmn_err(CE_DEBUG, "RT res %d for 0x%p\n",
+					(int) qtrx->qt_rtblk_res, dqp);
+#endif
+			ASSERT(dqp->q_res_bcount >=
+				INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
+			ASSERT(dqp->q_res_icount >=
+				INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
+			ASSERT(dqp->q_res_rtbcount >=
+				INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
+		}
+		/*
+		 * Do the group quotas next
+		 */
+		qa = tp->t_dqinfo->dqa_grpdquots;
+	}
+}
+
+/*
+ * Release the reservations, and adjust the dquots accordingly.
+ * This is called only when the transaction is being aborted. If by
+ * any chance we have done dquot modifications incore (ie. deltas) already,
+ * we simply throw those away, since that's the expected behavior
+ * when a transaction is curtailed without a commit.
+ */
+STATIC void
+xfs_trans_unreserve_and_mod_dquots(
+	xfs_trans_t		*tp)
+{
+	int			i, j;
+	xfs_dquot_t		*dqp;
+	xfs_dqtrx_t		*qtrx, *qa;
+	boolean_t		locked;
+
+	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
+		return;
+
+	qa = tp->t_dqinfo->dqa_usrdquots;
+
+	for (j = 0; j < 2; j++) {
+		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
+			qtrx = &qa[i];
+			/*
+			 * We assume that the array of dquots is filled
+			 * sequentially, not sparsely.
+			 */
+			if ((dqp = qtrx->qt_dquot) == NULL)
+				break;
+			/*
+			 * Unreserve the original reservation. We don't care
+			 * about the number of blocks used field, or deltas.
+			 * Also we don't bother to zero the fields.
+			 */
+			locked = B_FALSE;
+			if (qtrx->qt_blk_res) {
+				xfs_dqlock(dqp);
+				locked = B_TRUE;
+				dqp->q_res_bcount -=
+					(xfs_qcnt_t)qtrx->qt_blk_res;
+			}
+			if (qtrx->qt_ino_res) {
+				if (!locked) {
+					xfs_dqlock(dqp);
+					locked = B_TRUE;
+				}
+				dqp->q_res_icount -=
+					(xfs_qcnt_t)qtrx->qt_ino_res;
+			}
+
+			if (qtrx->qt_rtblk_res) {
+				if (!locked) {
+					xfs_dqlock(dqp);
+					locked = B_TRUE;
+				}
+				dqp->q_res_rtbcount -=
+					(xfs_qcnt_t)qtrx->qt_rtblk_res;
+			}
+			if (locked)
+				xfs_dqunlock(dqp);
+
+		}
+		qa = tp->t_dqinfo->dqa_grpdquots;
+	}
+}
+
+/*
+ * This reserves disk blocks and inodes against a dquot.
+ * Flags indicate if the dquot is to be locked here and also
+ * if the blk reservation is for RT or regular blocks.
+ * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
+ * Returns EDQUOT if quota is exceeded.
+ */
+STATIC int
+xfs_trans_dqresv(
+	xfs_trans_t	*tp,
+	xfs_mount_t	*mp,
+	xfs_dquot_t	*dqp,
+	long		nblks,
+	long		ninos,
+	uint		flags)
+{
+	int		error;
+	xfs_qcnt_t	hardlimit;
+	xfs_qcnt_t	softlimit;
+	time_t		btimer;
+	xfs_qcnt_t	*resbcountp;
+	xfs_quotainfo_t	*q = mp->m_quotainfo;
+
+	if (! (flags & XFS_QMOPT_DQLOCK)) {
+		xfs_dqlock(dqp);
+	}
+	ASSERT(XFS_DQ_IS_LOCKED(dqp));
+	if (flags & XFS_TRANS_DQ_RES_BLKS) {
+		hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT);
+		if (!hardlimit)
+			hardlimit = q->qi_bhardlimit;
+		softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT);
+		if (!softlimit)
+			softlimit = q->qi_bsoftlimit;
+		btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT);
+		resbcountp = &dqp->q_res_bcount;
+	} else {
+		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
+		hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT);
+		if (!hardlimit)
+			hardlimit = q->qi_rtbhardlimit;
+		softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT);
+		if (!softlimit)
+			softlimit = q->qi_rtbsoftlimit;
+		btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT);
+		resbcountp = &dqp->q_res_rtbcount;
+	}
+	error = 0;
+
+	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
+	    dqp->q_core.d_id &&
+	    XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
+#ifdef QUOTADEBUG
+		cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
+			  " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
+#endif
+		if (nblks > 0) {
+			/*
+			 * dquot is locked already. See if we'd go over the
+			 * hardlimit or exceed the timelimit if we allocate
+			 * nblks.
+			 */
+			if (hardlimit > 0ULL &&
+			     (hardlimit <= nblks + *resbcountp)) {
+				error = EDQUOT;
+				goto error_return;
+			}
+
+			if (softlimit > 0ULL &&
+			     (softlimit <= nblks + *resbcountp)) {
+				/*
+				 * If timer or warnings has expired,
+				 * return EDQUOT
+				 */
+				if ((btimer != 0 && get_seconds() > btimer) ||
+				    (dqp->q_core.d_bwarns &&
+				     INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >=
+				     XFS_QI_BWARNLIMIT(dqp->q_mount))) {
+					error = EDQUOT;
+					goto error_return;
+				}
+			}
+		}
+		if (ninos > 0) {
+			hardlimit = INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT);
+			if (!hardlimit)
+				hardlimit = q->qi_ihardlimit;
+			softlimit = INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT);
+			if (!softlimit)
+				softlimit = q->qi_isoftlimit;
+			if (hardlimit > 0ULL &&
+			    INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= hardlimit) {
+				error = EDQUOT;
+				goto error_return;
+			} else if (softlimit > 0ULL &&
+				   INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= softlimit) {
+				/*
+				 * If timer or warnings has expired,
+				 * return EDQUOT
+				 */
+				if ((dqp->q_core.d_itimer &&
+				     get_seconds() > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) ||
+				    (dqp->q_core.d_iwarns &&
+				     INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >=
+				     XFS_QI_IWARNLIMIT(dqp->q_mount))) {
+					error = EDQUOT;
+					goto error_return;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Change the reservation, but not the actual usage.
+	 * Note that q_res_bcount = q_core.d_bcount + resv
+	 */
+	(*resbcountp) += (xfs_qcnt_t)nblks;
+	if (ninos != 0)
+		dqp->q_res_icount += (xfs_qcnt_t)ninos;
+
+	/*
+	 * note the reservation amt in the trans struct too,
+	 * so that the transaction knows how much was reserved by
+	 * it against this particular dquot.
+	 * We don't do this when we are reserving for a delayed allocation,
+	 * because we don't have the luxury of a transaction envelope then.
+	 */
+	if (tp) {
+		ASSERT(tp->t_dqinfo);
+		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
+		if (nblks != 0)
+			xfs_trans_mod_dquot(tp, dqp,
+					    flags & XFS_QMOPT_RESBLK_MASK,
+					    nblks);
+		if (ninos != 0)
+			xfs_trans_mod_dquot(tp, dqp,
+					    XFS_TRANS_DQ_RES_INOS,
+					    ninos);
+	}
+	ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
+	ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
+	ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
+
+error_return:
+	if (! (flags & XFS_QMOPT_DQLOCK)) {
+		xfs_dqunlock(dqp);
+	}
+	return (error);
+}
+
+
+/*
+ * Given a dquot(s), make disk block and/or inode reservations against them.
+ * The fact that this does the reservation against both the usr and
+ * grp quotas is important, because this follows a both-or-nothing
+ * approach.
+ *
+ * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked.
+ *	   XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
+ *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
+ *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
+ * dquots are unlocked on return, if they were not locked by caller.
+ */
+int
+xfs_trans_reserve_quota_bydquots(
+	xfs_trans_t	*tp,
+	xfs_mount_t	*mp,
+	xfs_dquot_t	*udqp,
+	xfs_dquot_t	*gdqp,
+	long		nblks,
+	long		ninos,
+	uint		flags)
+{
+	int		resvd;
+
+	if (! XFS_IS_QUOTA_ON(mp))
+		return (0);
+
+	if (tp && tp->t_dqinfo == NULL)
+		xfs_trans_alloc_dqinfo(tp);
+
+	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
+	resvd = 0;
+
+	if (udqp) {
+		if (xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags))
+			return (EDQUOT);
+		resvd = 1;
+	}
+
+	if (gdqp) {
+		if (xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags)) {
+			/*
+			 * can't do it, so backout previous reservation
+			 */
+			if (resvd) {
+				flags |= XFS_QMOPT_FORCE_RES;
+				xfs_trans_dqresv(tp, mp, udqp,
+						 -nblks, -ninos, flags);
+			}
+			return (EDQUOT);
+		}
+	}
+
+	/*
+	 * Didnt change anything critical, so, no need to log
+	 */
+	return (0);
+}
+
+
+/*
+ * Lock the dquot and change the reservation if we can.
+ * This doesn't change the actual usage, just the reservation.
+ * The inode sent in is locked.
+ *
+ * Returns 0 on success, EDQUOT or other errors otherwise
+ */
+STATIC int
+xfs_trans_reserve_quota_nblks(
+	xfs_trans_t	*tp,
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip,
+	long		nblks,
+	long		ninos,
+	uint		type)
+{
+	int		error;
+
+	if (!XFS_IS_QUOTA_ON(mp))
+		return (0);
+
+	ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
+	ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
+
+	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
+	ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS ||
+	       (type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_BLKS);
+
+	/*
+	 * Reserve nblks against these dquots, with trans as the mediator.
+	 */
+	error = xfs_trans_reserve_quota_bydquots(tp, mp,
+						 ip->i_udquot, ip->i_gdquot,
+						 nblks, ninos,
+						 type);
+	return (error);
+}
+
+/*
+ * This routine is called to allocate a quotaoff log item.
+ */
+xfs_qoff_logitem_t *
+xfs_trans_get_qoff_item(
+	xfs_trans_t		*tp,
+	xfs_qoff_logitem_t	*startqoff,
+	uint			flags)
+{
+	xfs_qoff_logitem_t	*q;
+
+	ASSERT(tp != NULL);
+
+	q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
+	ASSERT(q != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
+
+	return (q);
+}
+
+
+/*
+ * This is called to mark the quotaoff logitem as needing
+ * to be logged when the transaction is committed.  The logitem must
+ * already be associated with the given transaction.
+ */
+void
+xfs_trans_log_quotaoff_item(
+	xfs_trans_t		*tp,
+	xfs_qoff_logitem_t	*qlp)
+{
+	xfs_log_item_desc_t	*lidp;
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+}
+
+STATIC void
+xfs_trans_alloc_dqinfo(
+	xfs_trans_t	*tp)
+{
+	(tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
+}
+
+STATIC void
+xfs_trans_free_dqinfo(
+	xfs_trans_t	*tp)
+{
+	if (!tp->t_dqinfo)
+		return;
+	kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo);
+	(tp)->t_dqinfo = NULL;
+}
+
+xfs_dqtrxops_t	xfs_trans_dquot_ops = {
+	.qo_dup_dqinfo			= xfs_trans_dup_dqinfo,
+	.qo_free_dqinfo			= xfs_trans_free_dqinfo,
+	.qo_mod_dquot_byino		= xfs_trans_mod_dquot_byino,
+	.qo_apply_dquot_deltas		= xfs_trans_apply_dquot_deltas,
+	.qo_reserve_quota_nblks		= xfs_trans_reserve_quota_nblks,
+	.qo_reserve_quota_bydquots	= xfs_trans_reserve_quota_bydquots,
+	.qo_unreserve_and_mod_dquots	= xfs_trans_unreserve_and_mod_dquots,
+};
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
new file mode 100644
index 0000000..7d6e1f3
--- /dev/null
+++ b/fs/xfs/support/debug.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "debug.h"
+
+#include <asm/page.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+
+int			doass = 1;
+static char		message[256];	/* keep it off the stack */
+static DEFINE_SPINLOCK(xfs_err_lock);
+
+/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
+#define XFS_MAX_ERR_LEVEL	7
+#define XFS_ERR_MASK		((1 << 3) - 1)
+static char		*err_level[XFS_MAX_ERR_LEVEL+1] =
+					{KERN_EMERG, KERN_ALERT, KERN_CRIT,
+					 KERN_ERR, KERN_WARNING, KERN_NOTICE,
+					 KERN_INFO, KERN_DEBUG};
+
+void
+assfail(char *a, char *f, int l)
+{
+    printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l);
+    BUG();
+}
+
+#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
+
+unsigned long
+random(void)
+{
+	static unsigned long	RandomValue = 1;
+	/* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+	register long	rv = RandomValue;
+	register long	lo;
+	register long	hi;
+
+	hi = rv / 127773;
+	lo = rv % 127773;
+	rv = 16807 * lo - 2836 * hi;
+	if( rv <= 0 ) rv += 2147483647;
+	return( RandomValue = rv );
+}
+
+int
+get_thread_id(void)
+{
+	return current->pid;
+}
+
+#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
+
+void
+cmn_err(register int level, char *fmt, ...)
+{
+	char	*fp = fmt;
+	int	len;
+	ulong	flags;
+	va_list	ap;
+
+	level &= XFS_ERR_MASK;
+	if (level > XFS_MAX_ERR_LEVEL)
+		level = XFS_MAX_ERR_LEVEL;
+	spin_lock_irqsave(&xfs_err_lock,flags);
+	va_start(ap, fmt);
+	if (*fmt == '!') fp++;
+	len = vsprintf(message, fp, ap);
+	if (message[len-1] != '\n')
+		strcat(message, "\n");
+	printk("%s%s", err_level[level], message);
+	va_end(ap);
+	spin_unlock_irqrestore(&xfs_err_lock,flags);
+
+	if (level == CE_PANIC)
+		BUG();
+}
+
+
+void
+icmn_err(register int level, char *fmt, va_list ap)
+{
+	ulong	flags;
+	int	len;
+
+	level &= XFS_ERR_MASK;
+	if(level > XFS_MAX_ERR_LEVEL)
+		level = XFS_MAX_ERR_LEVEL;
+	spin_lock_irqsave(&xfs_err_lock,flags);
+	len = vsprintf(message, fmt, ap);
+	if (message[len-1] != '\n')
+		strcat(message, "\n");
+	spin_unlock_irqrestore(&xfs_err_lock,flags);
+	printk("%s%s", err_level[level], message);
+	if (level == CE_PANIC)
+		BUG();
+}
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
new file mode 100644
index 0000000..40b0f4c
--- /dev/null
+++ b/fs/xfs/support/debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_SUPPORT_DEBUG_H__
+#define	__XFS_SUPPORT_DEBUG_H__
+
+#include <stdarg.h>
+
+#define CE_DEBUG        7               /* debug        */
+#define CE_CONT         6               /* continuation */
+#define CE_NOTE         5               /* notice       */
+#define CE_WARN         4               /* warning      */
+#define CE_ALERT        1               /* alert        */
+#define CE_PANIC        0               /* panic        */
+
+extern void icmn_err(int, char *, va_list);
+/* PRINTFLIKE2 */
+extern void cmn_err(int, char *, ...);
+
+#ifndef STATIC
+# define STATIC static
+#endif
+
+#ifdef DEBUG
+# ifdef lint
+#  define ASSERT(EX)	((void)0) /* avoid "constant in conditional" babble */
+# else
+#  define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__))
+# endif	/* lint */
+#else
+# define ASSERT(x)	((void)0)
+#endif
+
+extern int doass;		/* dynamically turn off asserts */
+extern void assfail(char *, char *, int);
+#ifdef DEBUG
+extern unsigned long random(void);
+extern int get_thread_id(void);
+#endif
+
+#define ASSERT_ALWAYS(EX)  ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
+#define	debug_stop_all_cpus(param)	/* param is "cpumask_t *" */
+
+#endif  /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
new file mode 100644
index 0000000..3dae14c
--- /dev/null
+++ b/fs/xfs/support/ktrace.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <xfs.h>
+
+static kmem_zone_t *ktrace_hdr_zone;
+static kmem_zone_t *ktrace_ent_zone;
+static int          ktrace_zentries;
+
+void
+ktrace_init(int zentries)
+{
+	ktrace_zentries = zentries;
+
+	ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
+					"ktrace_hdr");
+	ASSERT(ktrace_hdr_zone);
+
+	ktrace_ent_zone = kmem_zone_init(ktrace_zentries
+					* sizeof(ktrace_entry_t),
+					"ktrace_ent");
+	ASSERT(ktrace_ent_zone);
+}
+
+void
+ktrace_uninit(void)
+{
+	kmem_cache_destroy(ktrace_hdr_zone);
+	kmem_cache_destroy(ktrace_ent_zone);
+}
+
+/*
+ * ktrace_alloc()
+ *
+ * Allocate a ktrace header and enough buffering for the given
+ * number of entries.
+ */
+ktrace_t *
+ktrace_alloc(int nentries, int sleep)
+{
+	ktrace_t        *ktp;
+	ktrace_entry_t  *ktep;
+
+	ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
+
+	if (ktp == (ktrace_t*)NULL) {
+		/*
+		 * KM_SLEEP callers don't expect failure.
+		 */
+		if (sleep & KM_SLEEP)
+			panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
+
+		return NULL;
+	}
+
+	/*
+	 * Special treatment for buffers with the ktrace_zentries entries
+	 */
+	if (nentries == ktrace_zentries) {
+		ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
+							    sleep);
+	} else {
+		ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
+							    sleep);
+	}
+
+	if (ktep == NULL) {
+		/*
+		 * KM_SLEEP callers don't expect failure.
+		 */
+		if (sleep & KM_SLEEP)
+			panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
+
+		kmem_free(ktp, sizeof(*ktp));
+
+		return NULL;
+	}
+
+	spinlock_init(&(ktp->kt_lock), "kt_lock");
+
+	ktp->kt_entries  = ktep;
+	ktp->kt_nentries = nentries;
+	ktp->kt_index    = 0;
+	ktp->kt_rollover = 0;
+	return ktp;
+}
+
+
+/*
+ * ktrace_free()
+ *
+ * Free up the ktrace header and buffer.  It is up to the caller
+ * to ensure that no-one is referencing it.
+ */
+void
+ktrace_free(ktrace_t *ktp)
+{
+	int     entries_size;
+
+	if (ktp == (ktrace_t *)NULL)
+		return;
+
+	spinlock_destroy(&ktp->kt_lock);
+
+	/*
+	 * Special treatment for the Vnode trace buffer.
+	 */
+	if (ktp->kt_nentries == ktrace_zentries) {
+		kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
+	} else {
+		entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
+
+		kmem_free(ktp->kt_entries, entries_size);
+	}
+
+	kmem_zone_free(ktrace_hdr_zone, ktp);
+}
+
+
+/*
+ * Enter the given values into the "next" entry in the trace buffer.
+ * kt_index is always the index of the next entry to be filled.
+ */
+void
+ktrace_enter(
+	ktrace_t        *ktp,
+	void            *val0,
+	void            *val1,
+	void            *val2,
+	void            *val3,
+	void            *val4,
+	void            *val5,
+	void            *val6,
+	void            *val7,
+	void            *val8,
+	void            *val9,
+	void            *val10,
+	void            *val11,
+	void            *val12,
+	void            *val13,
+	void            *val14,
+	void            *val15)
+{
+	static lock_t   wrap_lock = SPIN_LOCK_UNLOCKED;
+	unsigned long	flags;
+	int             index;
+	ktrace_entry_t  *ktep;
+
+	ASSERT(ktp != NULL);
+
+	/*
+	 * Grab an entry by pushing the index up to the next one.
+	 */
+	spin_lock_irqsave(&wrap_lock, flags);
+	index = ktp->kt_index;
+	if (++ktp->kt_index == ktp->kt_nentries)
+		ktp->kt_index = 0;
+	spin_unlock_irqrestore(&wrap_lock, flags);
+
+	if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
+		ktp->kt_rollover = 1;
+
+	ASSERT((index >= 0) && (index < ktp->kt_nentries));
+
+	ktep = &(ktp->kt_entries[index]);
+
+	ktep->val[0]  = val0;
+	ktep->val[1]  = val1;
+	ktep->val[2]  = val2;
+	ktep->val[3]  = val3;
+	ktep->val[4]  = val4;
+	ktep->val[5]  = val5;
+	ktep->val[6]  = val6;
+	ktep->val[7]  = val7;
+	ktep->val[8]  = val8;
+	ktep->val[9]  = val9;
+	ktep->val[10] = val10;
+	ktep->val[11] = val11;
+	ktep->val[12] = val12;
+	ktep->val[13] = val13;
+	ktep->val[14] = val14;
+	ktep->val[15] = val15;
+}
+
+/*
+ * Return the number of entries in the trace buffer.
+ */
+int
+ktrace_nentries(
+	ktrace_t        *ktp)
+{
+	if (ktp == NULL) {
+		return 0;
+	}
+
+	return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
+}
+
+/*
+ * ktrace_first()
+ *
+ * This is used to find the start of the trace buffer.
+ * In conjunction with ktrace_next() it can be used to
+ * iterate through the entire trace buffer.  This code does
+ * not do any locking because it is assumed that it is called
+ * from the debugger.
+ *
+ * The caller must pass in a pointer to a ktrace_snap
+ * structure in which we will keep some state used to
+ * iterate through the buffer.  This state must not touched
+ * by any code outside of this module.
+ */
+ktrace_entry_t *
+ktrace_first(ktrace_t   *ktp, ktrace_snap_t     *ktsp)
+{
+	ktrace_entry_t  *ktep;
+	int             index;
+	int             nentries;
+
+	if (ktp->kt_rollover)
+		index = ktp->kt_index;
+	else
+		index = 0;
+
+	ktsp->ks_start = index;
+	ktep = &(ktp->kt_entries[index]);
+
+	nentries = ktrace_nentries(ktp);
+	index++;
+	if (index < nentries) {
+		ktsp->ks_index = index;
+	} else {
+		ktsp->ks_index = 0;
+		if (index > nentries)
+			ktep = NULL;
+	}
+	return ktep;
+}
+
+/*
+ * ktrace_next()
+ *
+ * This is used to iterate through the entries of the given
+ * trace buffer.  The caller must pass in the ktrace_snap_t
+ * structure initialized by ktrace_first().  The return value
+ * will be either a pointer to the next ktrace_entry or NULL
+ * if all of the entries have been traversed.
+ */
+ktrace_entry_t *
+ktrace_next(
+	ktrace_t        *ktp,
+	ktrace_snap_t   *ktsp)
+{
+	int             index;
+	ktrace_entry_t  *ktep;
+
+	index = ktsp->ks_index;
+	if (index == ktsp->ks_start) {
+		ktep = NULL;
+	} else {
+		ktep = &ktp->kt_entries[index];
+	}
+
+	index++;
+	if (index == ktrace_nentries(ktp)) {
+		ktsp->ks_index = 0;
+	} else {
+		ktsp->ks_index = index;
+	}
+
+	return ktep;
+}
+
+/*
+ * ktrace_skip()
+ *
+ * Skip the next "count" entries and return the entry after that.
+ * Return NULL if this causes us to iterate past the beginning again.
+ */
+ktrace_entry_t *
+ktrace_skip(
+	ktrace_t        *ktp,
+	int             count,
+	ktrace_snap_t   *ktsp)
+{
+	int             index;
+	int             new_index;
+	ktrace_entry_t  *ktep;
+	int             nentries = ktrace_nentries(ktp);
+
+	index = ktsp->ks_index;
+	new_index = index + count;
+	while (new_index >= nentries) {
+		new_index -= nentries;
+	}
+	if (index == ktsp->ks_start) {
+		/*
+		 * We've iterated around to the start, so we're done.
+		 */
+		ktep = NULL;
+	} else if ((new_index < index) && (index < ktsp->ks_index)) {
+		/*
+		 * We've skipped past the start again, so we're done.
+		 */
+		ktep = NULL;
+		ktsp->ks_index = ktsp->ks_start;
+	} else {
+		ktep = &(ktp->kt_entries[new_index]);
+		new_index++;
+		if (new_index == nentries) {
+			ktsp->ks_index = 0;
+		} else {
+			ktsp->ks_index = new_index;
+		}
+	}
+	return ktep;
+}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
new file mode 100644
index 0000000..92d1a1a
--- /dev/null
+++ b/fs/xfs/support/ktrace.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_KTRACE_H__
+#define __XFS_SUPPORT_KTRACE_H__
+
+#include <spin.h>
+
+/*
+ * Trace buffer entry structure.
+ */
+typedef struct ktrace_entry {
+	void	*val[16];
+} ktrace_entry_t;
+
+/*
+ * Trace buffer header structure.
+ */
+typedef struct ktrace {
+	lock_t		kt_lock;	/* mutex to guard counters */
+	int		kt_nentries;	/* number of entries in trace buf */
+	int		kt_index;	/* current index in entries */
+	int		kt_rollover;
+	ktrace_entry_t	*kt_entries;	/* buffer of entries */
+} ktrace_t;
+
+/*
+ * Trace buffer snapshot structure.
+ */
+typedef struct ktrace_snap {
+	int		ks_start;	/* kt_index at time of snap */
+	int		ks_index;	/* current index */
+} ktrace_snap_t;
+
+
+#ifdef CONFIG_XFS_TRACE
+
+extern void ktrace_init(int zentries);
+extern void ktrace_uninit(void);
+
+extern ktrace_t *ktrace_alloc(int, int);
+extern void ktrace_free(ktrace_t *);
+
+extern void ktrace_enter(
+	ktrace_t	*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*,
+	void		*);
+
+extern ktrace_entry_t   *ktrace_first(ktrace_t *, ktrace_snap_t *);
+extern int              ktrace_nentries(ktrace_t *);
+extern ktrace_entry_t   *ktrace_next(ktrace_t *, ktrace_snap_t *);
+extern ktrace_entry_t   *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
+
+#else
+#define ktrace_init(x)	do { } while (0)
+#define ktrace_uninit()	do { } while (0)
+#endif	/* CONFIG_XFS_TRACE */
+
+#endif	/* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/support/move.c b/fs/xfs/support/move.c
new file mode 100644
index 0000000..15b5194
--- /dev/null
+++ b/fs/xfs/support/move.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <xfs.h>
+
+/* Read from kernel buffer at src to user/kernel buffer defined
+ * by the uio structure. Advance the pointer in the uio struct
+ * as we go.
+ */
+int
+uio_read(caddr_t src, size_t len, struct uio *uio)
+{
+	size_t	count;
+
+	if (!len || !uio->uio_resid)
+		return 0;
+
+	count = uio->uio_iov->iov_len;
+	if (!count)
+		return 0;
+	if (count > len)
+		count = len;
+
+	if (uio->uio_segflg == UIO_USERSPACE) {
+		if (copy_to_user(uio->uio_iov->iov_base, src, count))
+			return EFAULT;
+	} else {
+		ASSERT(uio->uio_segflg == UIO_SYSSPACE);
+		memcpy(uio->uio_iov->iov_base, src, count);
+	}
+
+	uio->uio_iov->iov_base = (void*)((char*)uio->uio_iov->iov_base + count);
+	uio->uio_iov->iov_len -= count;
+	uio->uio_offset += count;
+	uio->uio_resid -= count;
+	return 0;
+}
diff --git a/fs/xfs/support/move.h b/fs/xfs/support/move.h
new file mode 100644
index 0000000..3d406dc
--- /dev/null
+++ b/fs/xfs/support/move.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ * 
+ * Portions Copyright (c) 1982, 1986, 1993, 1994
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef __XFS_SUPPORT_MOVE_H__
+#define __XFS_SUPPORT_MOVE_H__
+
+#include <linux/uio.h>
+#include <asm/uaccess.h>
+
+/* Segment flag values. */
+enum uio_seg {
+	UIO_USERSPACE,          /* from user data space */
+	UIO_SYSSPACE,           /* from system space */
+};
+
+struct uio {
+	struct iovec	*uio_iov;   /* pointer to array of iovecs */
+	int		uio_iovcnt; /* number of iovecs in array */
+	xfs_off_t	uio_offset; /* offset in file this uio corresponds to */
+	int		uio_resid;  /* residual i/o count */
+	enum uio_seg	uio_segflg; /* see above */
+};
+
+typedef struct uio uio_t;
+typedef struct iovec iovec_t;
+
+extern int	uio_read (caddr_t, size_t, uio_t *);
+
+#endif  /* __XFS_SUPPORT_MOVE_H__ */
diff --git a/fs/xfs/support/qsort.c b/fs/xfs/support/qsort.c
new file mode 100644
index 0000000..1ec8241
--- /dev/null
+++ b/fs/xfs/support/qsort.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) { 		\
+	long i = (n) / sizeof (TYPE); 			\
+	register TYPE *pi = (TYPE *) (parmi); 		\
+	register TYPE *pj = (TYPE *) (parmj); 		\
+	do { 						\
+		register TYPE	t = *pi;		\
+		*pi++ = *pj;				\
+		*pj++ = t;				\
+        } while (--i > 0);				\
+}
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+	es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
+
+static __inline void
+swapfunc(char *a, char *b, int n, int swaptype)
+{
+	if (swaptype <= 1) 
+		swapcode(long, a, b, n)
+	else
+		swapcode(char, a, b, n)
+}
+
+#define swap(a, b)					\
+	if (swaptype == 0) {				\
+		long t = *(long *)(a);			\
+		*(long *)(a) = *(long *)(b);		\
+		*(long *)(b) = t;			\
+	} else						\
+		swapfunc(a, b, es, swaptype)
+
+#define vecswap(a, b, n) 	if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+static __inline char *
+med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
+{
+	return cmp(a, b) < 0 ?
+	       (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
+              :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
+}
+
+void
+qsort(void *aa, size_t n, size_t es, int (*cmp)(const void *, const void *))
+{
+	char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+	int d, r, swaptype, swap_cnt;
+	register char *a = aa;
+
+loop:	SWAPINIT(a, es);
+	swap_cnt = 0;
+	if (n < 7) {
+		for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
+			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+			     pl -= es)
+				swap(pl, pl - es);
+		return;
+	}
+	pm = (char *)a + (n / 2) * es;
+	if (n > 7) {
+		pl = (char *)a;
+		pn = (char *)a + (n - 1) * es;
+		if (n > 40) {
+			d = (n / 8) * es;
+			pl = med3(pl, pl + d, pl + 2 * d, cmp);
+			pm = med3(pm - d, pm, pm + d, cmp);
+			pn = med3(pn - 2 * d, pn - d, pn, cmp);
+		}
+		pm = med3(pl, pm, pn, cmp);
+	}
+	swap(a, pm);
+	pa = pb = (char *)a + es;
+
+	pc = pd = (char *)a + (n - 1) * es;
+	for (;;) {
+		while (pb <= pc && (r = cmp(pb, a)) <= 0) {
+			if (r == 0) {
+				swap_cnt = 1;
+				swap(pa, pb);
+				pa += es;
+			}
+			pb += es;
+		}
+		while (pb <= pc && (r = cmp(pc, a)) >= 0) {
+			if (r == 0) {
+				swap_cnt = 1;
+				swap(pc, pd);
+				pd -= es;
+			}
+			pc -= es;
+		}
+		if (pb > pc)
+			break;
+		swap(pb, pc);
+		swap_cnt = 1;
+		pb += es;
+		pc -= es;
+	}
+	if (swap_cnt == 0) {  /* Switch to insertion sort */
+		for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
+			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; 
+			     pl -= es)
+				swap(pl, pl - es);
+		return;
+	}
+
+	pn = (char *)a + n * es;
+	r = min(pa - (char *)a, pb - pa);
+	vecswap(a, pb - r, r);
+	r = min((long)(pd - pc), (long)(pn - pd - es));
+	vecswap(pb, pn - r, r);
+	if ((r = pb - pa) > es)
+		qsort(a, r / es, es, cmp);
+	if ((r = pd - pc) > es) { 
+		/* Iterate rather than recurse to save stack space */
+		a = pn - r;
+		n = r / es;
+		goto loop;
+	}
+/*		qsort(pn - r, r / es, es, cmp);*/
+}
diff --git a/fs/xfs/support/qsort.h b/fs/xfs/support/qsort.h
new file mode 100644
index 0000000..9426310
--- /dev/null
+++ b/fs/xfs/support/qsort.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef QSORT_H
+#define QSORT_H
+
+extern void qsort (void *const pbase,
+		    size_t total_elems,
+		    size_t size,
+		    int (*cmp)(const void *, const void *));
+
+#endif
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
new file mode 100644
index 0000000..81f40cf
--- /dev/null
+++ b/fs/xfs/support/uuid.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <xfs.h>
+
+static mutex_t	uuid_monitor;
+static int	uuid_table_size;
+static uuid_t	*uuid_table;
+
+void
+uuid_init(void)
+{
+	mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor");
+}
+
+/*
+ * uuid_getnodeuniq - obtain the node unique fields of a UUID.
+ *
+ * This is not in any way a standard or condoned UUID function;
+ * it just something that's needed for user-level file handles.
+ */
+void
+uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
+{
+	char	*uu = (char *)uuid;
+
+	/* on IRIX, this function assumes big-endian fields within
+	 * the uuid, so we use INT_GET to get the same result on
+	 * little-endian systems
+	 */
+
+	fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) +
+		   INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT);
+	fsid[1] =  INT_GET(*(u_int32_t*)(uu  ), ARCH_CONVERT);
+}
+
+void
+uuid_create_nil(uuid_t *uuid)
+{
+	memset(uuid, 0, sizeof(*uuid));
+}
+
+int
+uuid_is_nil(uuid_t *uuid)
+{
+	int	i;
+	char	*cp = (char *)uuid;
+
+	if (uuid == NULL)
+		return 0;
+	/* implied check of version number here... */
+	for (i = 0; i < sizeof *uuid; i++)
+		if (*cp++) return 0;	/* not nil */
+	return 1;	/* is nil */
+}
+
+int
+uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
+{
+	return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
+}
+
+/*
+ * Given a 128-bit uuid, return a 64-bit value by adding the top and bottom
+ * 64-bit words.  NOTE: This function can not be changed EVER.  Although
+ * brain-dead, some applications depend on this 64-bit value remaining
+ * persistent.  Specifically, DMI vendors store the value as a persistent
+ * filehandle.
+ */
+__uint64_t
+uuid_hash64(uuid_t *uuid)
+{
+	__uint64_t	*sp = (__uint64_t *)uuid;
+
+	return sp[0] + sp[1];
+}
+
+int
+uuid_table_insert(uuid_t *uuid)
+{
+	int	i, hole;
+
+	mutex_lock(&uuid_monitor, PVFS);
+	for (i = 0, hole = -1; i < uuid_table_size; i++) {
+		if (uuid_is_nil(&uuid_table[i])) {
+			hole = i;
+			continue;
+		}
+		if (uuid_equal(uuid, &uuid_table[i])) {
+			mutex_unlock(&uuid_monitor);
+			return 0;
+		}
+	}
+	if (hole < 0) {
+		uuid_table = kmem_realloc(uuid_table,
+			(uuid_table_size + 1) * sizeof(*uuid_table),
+			uuid_table_size  * sizeof(*uuid_table),
+			KM_SLEEP);
+		hole = uuid_table_size++;
+	}
+	uuid_table[hole] = *uuid;
+	mutex_unlock(&uuid_monitor);
+	return 1;
+}
+
+void
+uuid_table_remove(uuid_t *uuid)
+{
+	int	i;
+
+	mutex_lock(&uuid_monitor, PVFS);
+	for (i = 0; i < uuid_table_size; i++) {
+		if (uuid_is_nil(&uuid_table[i]))
+			continue;
+		if (!uuid_equal(uuid, &uuid_table[i]))
+			continue;
+		uuid_create_nil(&uuid_table[i]);
+		break;
+	}
+	ASSERT(i < uuid_table_size);
+	mutex_unlock(&uuid_monitor);
+}
diff --git a/fs/xfs/support/uuid.h b/fs/xfs/support/uuid.h
new file mode 100644
index 0000000..5220ea5
--- /dev/null
+++ b/fs/xfs/support/uuid.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_UUID_H__
+#define __XFS_SUPPORT_UUID_H__
+
+typedef struct {
+	unsigned char	__u_bits[16];
+} uuid_t;
+
+void uuid_init(void);
+void uuid_create_nil(uuid_t *uuid);
+int uuid_is_nil(uuid_t *uuid);
+int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
+void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
+__uint64_t uuid_hash64(uuid_t *uuid);
+int uuid_table_insert(uuid_t *uuid);
+void uuid_table_remove(uuid_t *uuid);
+
+#endif	/* __XFS_SUPPORT_UUID_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
new file mode 100644
index 0000000..7e276dc
--- /dev/null
+++ b/fs/xfs/xfs.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_H__
+#define __XFS_H__
+
+#include <linux-2.6/xfs_linux.h>
+
+#include <xfs_fs.h> 
+#include <xfs_macros.h>
+
+#endif	/* __XFS_H__ */
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
new file mode 100644
index 0000000..8d01dce
--- /dev/null
+++ b/fs/xfs/xfs_acl.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (c) 2001-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_inum.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_acl.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+
+#include <linux/posix_acl_xattr.h>
+
+STATIC int	xfs_acl_setmode(vnode_t *, xfs_acl_t *, int *);
+STATIC void     xfs_acl_filter_mode(mode_t, xfs_acl_t *);
+STATIC void	xfs_acl_get_endian(xfs_acl_t *);
+STATIC int	xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
+STATIC int	xfs_acl_invalid(xfs_acl_t *);
+STATIC void	xfs_acl_sync_mode(mode_t, xfs_acl_t *);
+STATIC void	xfs_acl_get_attr(vnode_t *, xfs_acl_t *, int, int, int *);
+STATIC void	xfs_acl_set_attr(vnode_t *, xfs_acl_t *, int, int *);
+STATIC int	xfs_acl_allow_set(vnode_t *, int);
+
+kmem_zone_t *xfs_acl_zone;
+
+
+/*
+ * Test for existence of access ACL attribute as efficiently as possible.
+ */
+int
+xfs_acl_vhasacl_access(
+	vnode_t		*vp)
+{
+	int		error;
+
+	xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error);
+	return (error == 0);
+}
+
+/*
+ * Test for existence of default ACL attribute as efficiently as possible.
+ */
+int
+xfs_acl_vhasacl_default(
+	vnode_t		*vp)
+{
+	int		error;
+
+	if (vp->v_type != VDIR)
+		return 0;
+	xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
+	return (error == 0);
+}
+
+/*
+ * Convert from extended attribute representation to in-memory for XFS.
+ */
+STATIC int
+posix_acl_xattr_to_xfs(
+	posix_acl_xattr_header	*src,
+	size_t			size,
+	xfs_acl_t		*dest)
+{
+	posix_acl_xattr_entry	*src_entry;
+	xfs_acl_entry_t		*dest_entry;
+	int			n;
+
+	if (!src || !dest)
+		return EINVAL;
+
+	if (size < sizeof(posix_acl_xattr_header))
+		return EINVAL;
+
+	if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
+		return EOPNOTSUPP;
+
+	memset(dest, 0, sizeof(xfs_acl_t));
+	dest->acl_cnt = posix_acl_xattr_count(size);
+	if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES)
+		return EINVAL;
+
+	/*
+	 * acl_set_file(3) may request that we set default ACLs with
+	 * zero length -- defend (gracefully) against that here.
+	 */
+	if (!dest->acl_cnt)
+		return 0;
+
+	src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src));
+	dest_entry = &dest->acl_entry[0];
+
+	for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) {
+		dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm);
+		if (_ACL_PERM_INVALID(dest_entry->ae_perm))
+			return EINVAL;
+		dest_entry->ae_tag  = le16_to_cpu(src_entry->e_tag);
+		switch(dest_entry->ae_tag) {
+		case ACL_USER:
+		case ACL_GROUP:
+			dest_entry->ae_id = le32_to_cpu(src_entry->e_id);
+			break;
+		case ACL_USER_OBJ:
+		case ACL_GROUP_OBJ:
+		case ACL_MASK:
+		case ACL_OTHER:
+			dest_entry->ae_id = ACL_UNDEFINED_ID;
+			break;
+		default:
+			return EINVAL;
+		}
+	}
+	if (xfs_acl_invalid(dest))
+		return EINVAL;
+
+	return 0;
+}
+
+/*
+ * Comparison function called from qsort().
+ * Primary key is ae_tag, secondary key is ae_id.
+ */
+STATIC int
+xfs_acl_entry_compare(
+	const void	*va,
+	const void	*vb)
+{
+	xfs_acl_entry_t	*a = (xfs_acl_entry_t *)va,
+			*b = (xfs_acl_entry_t *)vb;
+
+	if (a->ae_tag == b->ae_tag)
+		return (a->ae_id - b->ae_id);
+	return (a->ae_tag - b->ae_tag);
+}
+
+/*
+ * Convert from in-memory XFS to extended attribute representation.
+ */
+STATIC int
+posix_acl_xfs_to_xattr(
+	xfs_acl_t		*src,
+	posix_acl_xattr_header	*dest,
+	size_t			size)
+{
+	int			n;
+	size_t			new_size = posix_acl_xattr_size(src->acl_cnt);
+	posix_acl_xattr_entry	*dest_entry;
+	xfs_acl_entry_t		*src_entry;
+
+	if (size < new_size)
+		return -ERANGE;
+
+	/* Need to sort src XFS ACL by <ae_tag,ae_id> */
+	qsort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]),
+		xfs_acl_entry_compare);
+
+	dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+	dest_entry = &dest->a_entries[0];
+	src_entry = &src->acl_entry[0];
+	for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) {
+		dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm);
+		if (_ACL_PERM_INVALID(src_entry->ae_perm))
+			return -EINVAL;
+		dest_entry->e_tag  = cpu_to_le16(src_entry->ae_tag);
+		switch (src_entry->ae_tag) {
+		case ACL_USER:
+		case ACL_GROUP:
+			dest_entry->e_id = cpu_to_le32(src_entry->ae_id);
+				break;
+		case ACL_USER_OBJ:
+		case ACL_GROUP_OBJ:
+		case ACL_MASK:
+		case ACL_OTHER:
+			dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return new_size;
+}
+
+int
+xfs_acl_vget(
+	vnode_t		*vp,
+	void		*acl,
+	size_t		size,
+	int		kind)
+{
+	int			error;
+	xfs_acl_t		*xfs_acl = NULL;
+	posix_acl_xattr_header	*ext_acl = acl;
+	int			flags = 0;
+
+	VN_HOLD(vp);
+	if(size) {
+		if (!(_ACL_ALLOC(xfs_acl))) {
+			error = ENOMEM;
+			goto out;
+		}
+		memset(xfs_acl, 0, sizeof(xfs_acl_t));
+	} else
+		flags = ATTR_KERNOVAL;
+
+	xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error);
+	if (error)
+		goto out;
+
+	if (!size) {
+		error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES);
+	} else {
+		if (xfs_acl_invalid(xfs_acl)) {
+			error = EINVAL;
+			goto out;
+		}
+		if (kind == _ACL_TYPE_ACCESS) {
+			vattr_t	va;
+
+			va.va_mask = XFS_AT_MODE;
+			VOP_GETATTR(vp, &va, 0, sys_cred, error);
+			if (error)
+				goto out;
+			xfs_acl_sync_mode(va.va_mode, xfs_acl);
+		}
+		error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
+	}
+out:
+	VN_RELE(vp);
+	if(xfs_acl)
+		_ACL_FREE(xfs_acl);
+	return -error;
+}
+
+int
+xfs_acl_vremove(
+	vnode_t		*vp,
+	int		kind)
+{
+	int		error;
+
+	VN_HOLD(vp);
+	error = xfs_acl_allow_set(vp, kind);
+	if (!error) {
+		VOP_ATTR_REMOVE(vp, kind == _ACL_TYPE_DEFAULT?
+				SGI_ACL_DEFAULT: SGI_ACL_FILE,
+				ATTR_ROOT, sys_cred, error);
+		if (error == ENOATTR)
+			error = 0;	/* 'scool */
+	}
+	VN_RELE(vp);
+	return -error;
+}
+
+int
+xfs_acl_vset(
+	vnode_t			*vp,
+	void			*acl,
+	size_t			size,
+	int			kind)
+{
+	posix_acl_xattr_header	*ext_acl = acl;
+	xfs_acl_t		*xfs_acl;
+	int			error;
+	int			basicperms = 0; /* more than std unix perms? */
+
+	if (!acl)
+		return -EINVAL;
+
+	if (!(_ACL_ALLOC(xfs_acl)))
+		return -ENOMEM;
+
+	error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl);
+	if (error) {
+		_ACL_FREE(xfs_acl);
+		return -error;
+	}
+	if (!xfs_acl->acl_cnt) {
+		_ACL_FREE(xfs_acl);
+		return 0;
+	}
+
+	VN_HOLD(vp);
+	error = xfs_acl_allow_set(vp, kind);
+	if (error)
+		goto out;
+
+	/* Incoming ACL exists, set file mode based on its value */
+	if (kind == _ACL_TYPE_ACCESS)
+		xfs_acl_setmode(vp, xfs_acl, &basicperms);
+
+	/*
+	 * If we have more than std unix permissions, set up the actual attr.
+	 * Otherwise, delete any existing attr.  This prevents us from
+	 * having actual attrs for permissions that can be stored in the
+	 * standard permission bits.
+	 */
+	if (!basicperms) {
+		xfs_acl_set_attr(vp, xfs_acl, kind, &error);
+	} else {
+		xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
+	}
+
+out:
+	VN_RELE(vp);
+	_ACL_FREE(xfs_acl);
+	return -error;
+}
+
+int
+xfs_acl_iaccess(
+	xfs_inode_t	*ip,
+	mode_t		mode,
+	cred_t		*cr)
+{
+	xfs_acl_t	*acl;
+	int		rval;
+
+	if (!(_ACL_ALLOC(acl)))
+		return -1;
+
+	/* If the file has no ACL return -1. */
+	rval = sizeof(xfs_acl_t);
+	if (xfs_attr_fetch(ip, SGI_ACL_FILE, SGI_ACL_FILE_SIZE,
+			(char *)acl, &rval, ATTR_ROOT | ATTR_KERNACCESS, cr)) {
+		_ACL_FREE(acl);
+		return -1;
+	}
+	xfs_acl_get_endian(acl);
+
+	/* If the file has an empty ACL return -1. */
+	if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) {
+		_ACL_FREE(acl);
+		return -1;
+	}
+
+	/* Synchronize ACL with mode bits */
+	xfs_acl_sync_mode(ip->i_d.di_mode, acl);
+
+	rval = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr);
+	_ACL_FREE(acl);
+	return rval;
+}
+
+STATIC int
+xfs_acl_allow_set(
+	vnode_t		*vp,
+	int		kind)
+{
+	vattr_t		va;
+	int		error;
+
+	if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
+		return EPERM;
+	if (kind == _ACL_TYPE_DEFAULT && vp->v_type != VDIR)
+		return ENOTDIR;
+	if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+		return EROFS;
+	va.va_mask = XFS_AT_UID;
+	VOP_GETATTR(vp, &va, 0, NULL, error);
+	if (error)
+		return error;
+	if (va.va_uid != current->fsuid && !capable(CAP_FOWNER))
+		return EPERM;
+	return error;
+}
+
+/*
+ * The access control process to determine the access permission:
+ *	if uid == file owner id, use the file owner bits.
+ *	if gid == file owner group id, use the file group bits.
+ *	scan ACL for a maching user or group, and use matched entry
+ *	permission. Use total permissions of all matching group entries,
+ *	until all acl entries are exhausted. The final permission produced
+ *	by matching acl entry or entries needs to be & with group permission.
+ *	if not owner, owning group, or matching entry in ACL, use file
+ *	other bits.  
+ */
+STATIC int
+xfs_acl_capability_check(
+	mode_t		mode,
+	cred_t		*cr)
+{
+	if ((mode & ACL_READ) && !capable_cred(cr, CAP_DAC_READ_SEARCH))
+		return EACCES;
+	if ((mode & ACL_WRITE) && !capable_cred(cr, CAP_DAC_OVERRIDE))
+		return EACCES;
+	if ((mode & ACL_EXECUTE) && !capable_cred(cr, CAP_DAC_OVERRIDE))
+		return EACCES;
+
+	return 0;
+}
+
+/*
+ * Note: cr is only used here for the capability check if the ACL test fails.
+ *       It is not used to find out the credentials uid or groups etc, as was
+ *       done in IRIX. It is assumed that the uid and groups for the current
+ *       thread are taken from "current" instead of the cr parameter.
+ */
+STATIC int
+xfs_acl_access(
+	uid_t		fuid,
+	gid_t		fgid,
+	xfs_acl_t	*fap,
+	mode_t		md,
+	cred_t		*cr)
+{
+	xfs_acl_entry_t	matched;
+	int		i, allows;
+	int		maskallows = -1;	/* true, but not 1, either */
+	int		seen_userobj = 0;
+
+	matched.ae_tag = 0;	/* Invalid type */
+	md >>= 6;	/* Normalize the bits for comparison */
+
+	for (i = 0; i < fap->acl_cnt; i++) {
+		/*
+		 * Break out if we've got a user_obj entry or
+		 * a user entry and the mask (and have processed USER_OBJ)
+		 */
+		if (matched.ae_tag == ACL_USER_OBJ)
+			break;
+		if (matched.ae_tag == ACL_USER) {
+			if (maskallows != -1 && seen_userobj)
+				break;
+			if (fap->acl_entry[i].ae_tag != ACL_MASK &&
+			    fap->acl_entry[i].ae_tag != ACL_USER_OBJ)
+				continue;
+		}
+		/* True if this entry allows the requested access */
+		allows = ((fap->acl_entry[i].ae_perm & md) == md);
+
+		switch (fap->acl_entry[i].ae_tag) {
+		case ACL_USER_OBJ:
+			seen_userobj = 1;
+			if (fuid != current->fsuid)
+				continue;
+			matched.ae_tag = ACL_USER_OBJ;
+			matched.ae_perm = allows;
+			break;
+		case ACL_USER:
+			if (fap->acl_entry[i].ae_id != current->fsuid)
+				continue;
+			matched.ae_tag = ACL_USER;
+			matched.ae_perm = allows;
+			break;
+		case ACL_GROUP_OBJ:
+			if ((matched.ae_tag == ACL_GROUP_OBJ ||
+			    matched.ae_tag == ACL_GROUP) && !allows)
+				continue;
+			if (!in_group_p(fgid))
+				continue;
+			matched.ae_tag = ACL_GROUP_OBJ;
+			matched.ae_perm = allows;
+			break;
+		case ACL_GROUP:
+			if ((matched.ae_tag == ACL_GROUP_OBJ ||
+			    matched.ae_tag == ACL_GROUP) && !allows)
+				continue;
+			if (!in_group_p(fap->acl_entry[i].ae_id))
+				continue;
+			matched.ae_tag = ACL_GROUP;
+			matched.ae_perm = allows;
+			break;
+		case ACL_MASK:
+			maskallows = allows;
+			break;
+		case ACL_OTHER:
+			if (matched.ae_tag != 0)
+				continue;
+			matched.ae_tag = ACL_OTHER;
+			matched.ae_perm = allows;
+			break;
+		}
+	}
+	/*
+	 * First possibility is that no matched entry allows access.
+	 * The capability to override DAC may exist, so check for it.
+	 */
+	switch (matched.ae_tag) {
+	case ACL_OTHER:
+	case ACL_USER_OBJ:
+		if (matched.ae_perm)
+			return 0;
+		break;
+	case ACL_USER:
+	case ACL_GROUP_OBJ:
+	case ACL_GROUP:
+		if (maskallows && matched.ae_perm)
+			return 0;
+		break;
+	case 0:
+		break;
+	}
+
+	return xfs_acl_capability_check(md, cr);
+}
+
+/*
+ * ACL validity checker.
+ *   This acl validation routine checks each ACL entry read in makes sense.
+ */
+STATIC int
+xfs_acl_invalid(
+	xfs_acl_t	*aclp)
+{
+	xfs_acl_entry_t	*entry, *e;
+	int		user = 0, group = 0, other = 0, mask = 0;
+	int		mask_required = 0;
+	int		i, j;
+
+	if (!aclp)
+		goto acl_invalid;
+
+	if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES)
+		goto acl_invalid;
+
+	for (i = 0; i < aclp->acl_cnt; i++) {
+		entry = &aclp->acl_entry[i];
+		switch (entry->ae_tag) {
+		case ACL_USER_OBJ:
+			if (user++)
+				goto acl_invalid;
+			break;
+		case ACL_GROUP_OBJ:
+			if (group++)
+				goto acl_invalid;
+			break;
+		case ACL_OTHER:
+			if (other++)
+				goto acl_invalid;
+			break;
+		case ACL_USER:
+		case ACL_GROUP:
+			for (j = i + 1; j < aclp->acl_cnt; j++) {
+				e = &aclp->acl_entry[j];
+				if (e->ae_id == entry->ae_id &&
+				    e->ae_tag == entry->ae_tag)
+					goto acl_invalid;
+			}
+			mask_required++;
+			break;
+		case ACL_MASK:
+			if (mask++)
+				goto acl_invalid;
+			break;
+		default:
+			goto acl_invalid;
+		}
+	}
+	if (!user || !group || !other || (mask_required && !mask))
+		goto acl_invalid;
+	else
+		return 0;
+acl_invalid:
+	return EINVAL;
+}
+
+/*
+ * Do ACL endian conversion.
+ */
+STATIC void
+xfs_acl_get_endian(
+	xfs_acl_t	*aclp)
+{
+	xfs_acl_entry_t	*ace, *end;
+
+	INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
+	end = &aclp->acl_entry[0]+aclp->acl_cnt;
+	for (ace = &aclp->acl_entry[0]; ace < end; ace++) {
+		INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag);
+		INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id);
+		INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm);
+	}
+}
+
+/*
+ * Get the ACL from the EA and do endian conversion.
+ */
+STATIC void
+xfs_acl_get_attr(
+	vnode_t		*vp,
+	xfs_acl_t	*aclp,
+	int		kind,
+	int		flags,
+	int		*error)
+{
+	int		len = sizeof(xfs_acl_t);
+
+	ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
+	flags |= ATTR_ROOT;
+	VOP_ATTR_GET(vp,
+		kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT,
+		(char *)aclp, &len, flags, sys_cred, *error);
+	if (*error || (flags & ATTR_KERNOVAL))
+		return;
+	xfs_acl_get_endian(aclp);
+}
+
+/*
+ * Set the EA with the ACL and do endian conversion.
+ */
+STATIC void
+xfs_acl_set_attr(
+	vnode_t		*vp,
+	xfs_acl_t	*aclp,
+	int		kind,
+	int		*error)
+{
+	xfs_acl_entry_t	*ace, *newace, *end;
+	xfs_acl_t	*newacl;
+	int		len;
+
+	if (!(_ACL_ALLOC(newacl))) {
+		*error = ENOMEM;
+		return;
+	}
+
+	len = sizeof(xfs_acl_t) -
+	      (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt));
+	end = &aclp->acl_entry[0]+aclp->acl_cnt;
+	for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0];
+	     ace < end;
+	     ace++, newace++) {
+		INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag);
+		INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id);
+		INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
+	}
+	INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
+	VOP_ATTR_SET(vp,
+		kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT,
+		(char *)newacl, len, ATTR_ROOT, sys_cred, *error);
+	_ACL_FREE(newacl);
+}
+
+int
+xfs_acl_vtoacl(
+	vnode_t		*vp,
+	xfs_acl_t	*access_acl,
+	xfs_acl_t	*default_acl)
+{
+	vattr_t		va;
+	int		error = 0;
+
+	if (access_acl) {
+		/*
+		 * Get the Access ACL and the mode.  If either cannot
+		 * be obtained for some reason, invalidate the access ACL.
+		 */
+		xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
+		if (!error) {
+			/* Got the ACL, need the mode... */
+			va.va_mask = XFS_AT_MODE;
+			VOP_GETATTR(vp, &va, 0, sys_cred, error);
+		}
+
+		if (error)
+			access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
+		else /* We have a good ACL and the file mode, synchronize. */
+			xfs_acl_sync_mode(va.va_mode, access_acl);
+	}
+
+	if (default_acl) {
+		xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error);
+		if (error)
+			default_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
+	}
+	return error;
+}
+
+/*
+ * This function retrieves the parent directory's acl, processes it
+ * and lets the child inherit the acl(s) that it should.
+ */
+int
+xfs_acl_inherit(
+	vnode_t		*vp,
+	vattr_t		*vap,
+	xfs_acl_t	*pdaclp)
+{
+	xfs_acl_t	*cacl;
+	int		error = 0;
+	int		basicperms = 0;
+
+	/*
+	 * If the parent does not have a default ACL, or it's an
+	 * invalid ACL, we're done.
+	 */
+	if (!vp)
+		return 0;
+	if (!pdaclp || xfs_acl_invalid(pdaclp))
+		return 0;
+
+	/*
+	 * Copy the default ACL of the containing directory to
+	 * the access ACL of the new file and use the mode that
+	 * was passed in to set up the correct initial values for
+	 * the u::,g::[m::], and o:: entries.  This is what makes
+	 * umask() "work" with ACL's.
+	 */
+
+	if (!(_ACL_ALLOC(cacl)))
+		return ENOMEM;
+
+	memcpy(cacl, pdaclp, sizeof(xfs_acl_t));
+	xfs_acl_filter_mode(vap->va_mode, cacl);
+	xfs_acl_setmode(vp, cacl, &basicperms);
+
+	/*
+	 * Set the Default and Access ACL on the file.  The mode is already
+	 * set on the file, so we don't need to worry about that.
+	 *
+	 * If the new file is a directory, its default ACL is a copy of
+	 * the containing directory's default ACL.
+	 */
+	if (vp->v_type == VDIR)
+		xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
+	if (!error && !basicperms)
+		xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
+	_ACL_FREE(cacl);
+	return error;
+}
+
+/*
+ * Set up the correct mode on the file based on the supplied ACL.  This
+ * makes sure that the mode on the file reflects the state of the
+ * u::,g::[m::], and o:: entries in the ACL.  Since the mode is where
+ * the ACL is going to get the permissions for these entries, we must
+ * synchronize the mode whenever we set the ACL on a file.
+ */
+STATIC int
+xfs_acl_setmode(
+	vnode_t		*vp,
+	xfs_acl_t	*acl,
+	int		*basicperms)
+{
+	vattr_t		va;
+	xfs_acl_entry_t	*ap;
+	xfs_acl_entry_t	*gap = NULL;
+	int		i, error, nomask = 1;
+
+	*basicperms = 1;
+
+	if (acl->acl_cnt == XFS_ACL_NOT_PRESENT)
+		return 0;
+
+	/*
+	 * Copy the u::, g::, o::, and m:: bits from the ACL into the
+	 * mode.  The m:: bits take precedence over the g:: bits.
+	 */
+	va.va_mask = XFS_AT_MODE;
+	VOP_GETATTR(vp, &va, 0, sys_cred, error);
+	if (error)
+		return error;
+
+	va.va_mask = XFS_AT_MODE;
+	va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
+	ap = acl->acl_entry;
+	for (i = 0; i < acl->acl_cnt; ++i) {
+		switch (ap->ae_tag) {
+		case ACL_USER_OBJ:
+			va.va_mode |= ap->ae_perm << 6;
+			break;
+		case ACL_GROUP_OBJ:
+			gap = ap;
+			break;
+		case ACL_MASK:	/* more than just standard modes */
+			nomask = 0;
+			va.va_mode |= ap->ae_perm << 3;
+			*basicperms = 0;
+			break;
+		case ACL_OTHER:
+			va.va_mode |= ap->ae_perm;
+			break;
+		default:	/* more than just standard modes */
+			*basicperms = 0;
+			break;
+		}
+		ap++;
+	}
+
+	/* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */
+	if (gap && nomask)
+		va.va_mode |= gap->ae_perm << 3;
+
+	VOP_SETATTR(vp, &va, 0, sys_cred, error);
+	return error;
+}
+
+/*
+ * The permissions for the special ACL entries (u::, g::[m::], o::) are
+ * actually stored in the file mode (if there is both a group and a mask,
+ * the group is stored in the ACL entry and the mask is stored on the file).
+ * This allows the mode to remain automatically in sync with the ACL without
+ * the need for a call-back to the ACL system at every point where the mode
+ * could change.  This function takes the permissions from the specified mode
+ * and places it in the supplied ACL.
+ *
+ * This implementation draws its validity from the fact that, when the ACL
+ * was assigned, the mode was copied from the ACL.
+ * If the mode did not change, therefore, the mode remains exactly what was
+ * taken from the special ACL entries at assignment.
+ * If a subsequent chmod() was done, the POSIX spec says that the change in
+ * mode must cause an update to the ACL seen at user level and used for
+ * access checks.  Before and after a mode change, therefore, the file mode
+ * most accurately reflects what the special ACL entries should permit/deny.
+ *
+ * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly,
+ *         the existing mode bits will override whatever is in the
+ *         ACL. Similarly, if there is a pre-existing ACL that was
+ *         never in sync with its mode (owing to a bug in 6.5 and
+ *         before), it will now magically (or mystically) be
+ *         synchronized.  This could cause slight astonishment, but
+ *         it is better than inconsistent permissions.
+ *
+ * The supplied ACL is a template that may contain any combination
+ * of special entries.  These are treated as place holders when we fill
+ * out the ACL.  This routine does not add or remove special entries, it
+ * simply unites each special entry with its associated set of permissions.
+ */
+STATIC void
+xfs_acl_sync_mode(
+	mode_t		mode,
+	xfs_acl_t	*acl)
+{
+	int		i, nomask = 1;
+	xfs_acl_entry_t	*ap;
+	xfs_acl_entry_t	*gap = NULL;
+
+	/*
+	 * Set ACL entries. POSIX1003.1eD16 requires that the MASK
+	 * be set instead of the GROUP entry, if there is a MASK.
+	 */
+	for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
+		switch (ap->ae_tag) {
+		case ACL_USER_OBJ:
+			ap->ae_perm = (mode >> 6) & 0x7;
+			break;
+		case ACL_GROUP_OBJ:
+			gap = ap;
+			break;
+		case ACL_MASK:
+			nomask = 0;
+			ap->ae_perm = (mode >> 3) & 0x7;
+			break;
+		case ACL_OTHER:
+			ap->ae_perm = mode & 0x7;
+			break;
+		default:
+			break;
+		}
+	}
+	/* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
+	if (gap && nomask)
+		gap->ae_perm = (mode >> 3) & 0x7;
+}
+
+/*
+ * When inheriting an Access ACL from a directory Default ACL,
+ * the ACL bits are set to the intersection of the ACL default
+ * permission bits and the file permission bits in mode. If there
+ * are no permission bits on the file then we must not give them
+ * the ACL. This is what what makes umask() work with ACLs.
+ */
+STATIC void
+xfs_acl_filter_mode(
+	mode_t		mode,
+	xfs_acl_t	*acl)
+{
+	int		i, nomask = 1;
+	xfs_acl_entry_t	*ap;
+	xfs_acl_entry_t	*gap = NULL;
+
+	/*
+	 * Set ACL entries. POSIX1003.1eD16 requires that the MASK
+	 * be merged with GROUP entry, if there is a MASK.
+	 */
+	for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
+		switch (ap->ae_tag) {
+		case ACL_USER_OBJ:
+			ap->ae_perm &= (mode >> 6) & 0x7;
+			break;
+		case ACL_GROUP_OBJ:
+			gap = ap;
+			break;
+		case ACL_MASK:
+			nomask = 0;
+			ap->ae_perm &= (mode >> 3) & 0x7;
+			break;
+		case ACL_OTHER:
+			ap->ae_perm &= mode & 0x7;
+			break;
+		default:
+			break;
+		}
+	}
+	/* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
+	if (gap && nomask)
+		gap->ae_perm &= (mode >> 3) & 0x7;
+}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
new file mode 100644
index 0000000..0363eb4
--- /dev/null
+++ b/fs/xfs/xfs_acl.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ACL_H__
+#define __XFS_ACL_H__
+
+/*
+ * Access Control Lists
+ */
+typedef __uint16_t	xfs_acl_perm_t;
+typedef __int32_t	xfs_acl_type_t;
+typedef __int32_t	xfs_acl_tag_t;
+typedef __int32_t	xfs_acl_id_t;
+
+#define XFS_ACL_MAX_ENTRIES 25
+#define XFS_ACL_NOT_PRESENT (-1)
+
+typedef struct xfs_acl_entry {
+	xfs_acl_tag_t	ae_tag;
+	xfs_acl_id_t	ae_id;
+	xfs_acl_perm_t	ae_perm;
+} xfs_acl_entry_t;
+
+typedef struct xfs_acl {
+	__int32_t	acl_cnt;
+	xfs_acl_entry_t	acl_entry[XFS_ACL_MAX_ENTRIES];
+} xfs_acl_t;
+
+/* On-disk XFS extended attribute names */
+#define SGI_ACL_FILE	"SGI_ACL_FILE"
+#define SGI_ACL_DEFAULT	"SGI_ACL_DEFAULT"
+#define SGI_ACL_FILE_SIZE	(sizeof(SGI_ACL_FILE)-1)
+#define SGI_ACL_DEFAULT_SIZE	(sizeof(SGI_ACL_DEFAULT)-1)
+
+
+#ifdef CONFIG_XFS_POSIX_ACL
+
+struct vattr;
+struct vnode;
+struct xfs_inode;
+
+extern struct kmem_zone *xfs_acl_zone;
+#define xfs_acl_zone_init(zone, name)	\
+		(zone) = kmem_zone_init(sizeof(xfs_acl_t), name)
+#define xfs_acl_zone_destroy(zone)	kmem_cache_destroy(zone)
+
+extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *);
+extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
+extern int xfs_acl_vtoacl(struct vnode *, xfs_acl_t *, xfs_acl_t *);
+extern int xfs_acl_vhasacl_access(struct vnode *);
+extern int xfs_acl_vhasacl_default(struct vnode *);
+extern int xfs_acl_vset(struct vnode *, void *, size_t, int);
+extern int xfs_acl_vget(struct vnode *, void *, size_t, int);
+extern int xfs_acl_vremove(struct vnode *vp, int);
+
+#define _ACL_TYPE_ACCESS	1
+#define _ACL_TYPE_DEFAULT	2
+#define _ACL_PERM_INVALID(perm)	((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
+
+#define _ACL_INHERIT(c,v,d)	(xfs_acl_inherit(c,v,d))
+#define _ACL_GET_ACCESS(pv,pa)	(xfs_acl_vtoacl(pv,pa,NULL) == 0)
+#define _ACL_GET_DEFAULT(pv,pd)	(xfs_acl_vtoacl(pv,NULL,pd) == 0)
+#define _ACL_ACCESS_EXISTS	xfs_acl_vhasacl_access
+#define _ACL_DEFAULT_EXISTS	xfs_acl_vhasacl_default
+#define _ACL_XFS_IACCESS(i,m,c) (XFS_IFORK_Q(i) ? xfs_acl_iaccess(i,m,c) : -1)
+
+#define _ACL_ALLOC(a)		((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP))
+#define _ACL_FREE(a)		((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0)
+
+#else
+#define xfs_acl_zone_init(zone,name)
+#define xfs_acl_zone_destroy(zone)
+#define xfs_acl_vset(v,p,sz,t)	(-EOPNOTSUPP)
+#define xfs_acl_vget(v,p,sz,t)	(-EOPNOTSUPP)
+#define xfs_acl_vremove(v,t)	(-EOPNOTSUPP)
+#define xfs_acl_vhasacl_access(v)	(0)
+#define xfs_acl_vhasacl_default(v)	(0)
+#define _ACL_ALLOC(a)		(1)	/* successfully allocate nothing */
+#define _ACL_FREE(a)		((void)0)
+#define _ACL_INHERIT(c,v,d)	(0)
+#define _ACL_GET_ACCESS(pv,pa)	(0)
+#define _ACL_GET_DEFAULT(pv,pd)	(0)
+#define _ACL_ACCESS_EXISTS	(NULL)
+#define _ACL_DEFAULT_EXISTS	(NULL)
+#define _ACL_XFS_IACCESS(i,m,c) (-1)
+#endif
+
+#endif	/* __XFS_ACL_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
new file mode 100644
index 0000000..96b70f7
--- /dev/null
+++ b/fs/xfs/xfs_ag.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_AG_H__
+#define	__XFS_AG_H__
+
+/*
+ * Allocation group header
+ * This is divided into three structures, placed in sequential 512-byte
+ * buffers after a copy of the superblock (also in a 512-byte buffer).
+ */
+
+struct xfs_buf;
+struct xfs_mount;
+struct xfs_trans;
+
+#define	XFS_AGF_MAGIC	0x58414746	/* 'XAGF' */
+#define	XFS_AGI_MAGIC	0x58414749	/* 'XAGI' */
+#define	XFS_AGF_VERSION	1
+#define	XFS_AGI_VERSION	1
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_GOOD_VERSION)
+int xfs_agf_good_version(unsigned v);
+#define	XFS_AGF_GOOD_VERSION(v)	xfs_agf_good_version(v)
+#else
+#define XFS_AGF_GOOD_VERSION(v)		((v) == XFS_AGF_VERSION)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_GOOD_VERSION)
+int xfs_agi_good_version(unsigned v);
+#define	XFS_AGI_GOOD_VERSION(v)	xfs_agi_good_version(v)
+#else
+#define XFS_AGI_GOOD_VERSION(v)		((v) == XFS_AGI_VERSION)
+#endif
+
+/*
+ * Btree number 0 is bno, 1 is cnt.  This value gives the size of the
+ * arrays below.
+ */
+#define	XFS_BTNUM_AGF	((int)XFS_BTNUM_CNTi + 1)
+
+/*
+ * The second word of agf_levels in the first a.g. overlaps the EFS
+ * superblock's magic number.  Since the magic numbers valid for EFS
+ * are > 64k, our value cannot be confused for an EFS superblock's.
+ */
+
+typedef struct xfs_agf
+{
+	/*
+	 * Common allocation group header information
+	 */
+	__uint32_t	agf_magicnum;	/* magic number == XFS_AGF_MAGIC */
+	__uint32_t	agf_versionnum;	/* header version == XFS_AGF_VERSION */
+	xfs_agnumber_t	agf_seqno;	/* sequence # starting from 0 */
+	xfs_agblock_t	agf_length;	/* size in blocks of a.g. */
+	/*
+	 * Freespace information
+	 */
+	xfs_agblock_t	agf_roots[XFS_BTNUM_AGF];	/* root blocks */
+	__uint32_t	agf_spare0;	/* spare field */
+	__uint32_t	agf_levels[XFS_BTNUM_AGF];	/* btree levels */
+	__uint32_t	agf_spare1;	/* spare field */
+	__uint32_t	agf_flfirst;	/* first freelist block's index */
+	__uint32_t	agf_fllast;	/* last freelist block's index */
+	__uint32_t	agf_flcount;	/* count of blocks in freelist */
+	xfs_extlen_t	agf_freeblks;	/* total free blocks */
+	xfs_extlen_t	agf_longest;	/* longest free space */
+} xfs_agf_t;
+
+#define	XFS_AGF_MAGICNUM	0x00000001
+#define	XFS_AGF_VERSIONNUM	0x00000002
+#define	XFS_AGF_SEQNO		0x00000004
+#define	XFS_AGF_LENGTH		0x00000008
+#define	XFS_AGF_ROOTS		0x00000010
+#define	XFS_AGF_LEVELS		0x00000020
+#define	XFS_AGF_FLFIRST		0x00000040
+#define	XFS_AGF_FLLAST		0x00000080
+#define	XFS_AGF_FLCOUNT		0x00000100
+#define	XFS_AGF_FREEBLKS	0x00000200
+#define	XFS_AGF_LONGEST		0x00000400
+#define	XFS_AGF_NUM_BITS	11
+#define	XFS_AGF_ALL_BITS	((1 << XFS_AGF_NUM_BITS) - 1)
+
+/* disk block (xfs_daddr_t) in the AG */
+#define XFS_AGF_DADDR(mp)	((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_BLOCK)
+xfs_agblock_t xfs_agf_block(struct xfs_mount *mp);
+#define	XFS_AGF_BLOCK(mp)	xfs_agf_block(mp)
+#else
+#define XFS_AGF_BLOCK(mp)	XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
+#endif
+
+/*
+ * Size of the unlinked inode hash table in the agi.
+ */
+#define	XFS_AGI_UNLINKED_BUCKETS	64
+
+typedef struct xfs_agi
+{
+	/*
+	 * Common allocation group header information
+	 */
+	__uint32_t	agi_magicnum;	/* magic number == XFS_AGI_MAGIC */
+	__uint32_t	agi_versionnum;	/* header version == XFS_AGI_VERSION */
+	xfs_agnumber_t	agi_seqno;	/* sequence # starting from 0 */
+	xfs_agblock_t	agi_length;	/* size in blocks of a.g. */
+	/*
+	 * Inode information
+	 * Inodes are mapped by interpreting the inode number, so no
+	 * mapping data is needed here.
+	 */
+	xfs_agino_t	agi_count;	/* count of allocated inodes */
+	xfs_agblock_t	agi_root;	/* root of inode btree */
+	__uint32_t	agi_level;	/* levels in inode btree */
+	xfs_agino_t	agi_freecount;	/* number of free inodes */
+	xfs_agino_t	agi_newino;	/* new inode just allocated */
+	xfs_agino_t	agi_dirino;	/* last directory inode chunk */
+	/*
+	 * Hash table of inodes which have been unlinked but are
+	 * still being referenced.
+	 */
+	xfs_agino_t	agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
+} xfs_agi_t;
+
+#define	XFS_AGI_MAGICNUM	0x00000001
+#define	XFS_AGI_VERSIONNUM	0x00000002
+#define	XFS_AGI_SEQNO		0x00000004
+#define	XFS_AGI_LENGTH		0x00000008
+#define	XFS_AGI_COUNT		0x00000010
+#define	XFS_AGI_ROOT		0x00000020
+#define	XFS_AGI_LEVEL		0x00000040
+#define	XFS_AGI_FREECOUNT	0x00000080
+#define	XFS_AGI_NEWINO		0x00000100
+#define	XFS_AGI_DIRINO		0x00000200
+#define	XFS_AGI_UNLINKED	0x00000400
+#define	XFS_AGI_NUM_BITS	11
+#define	XFS_AGI_ALL_BITS	((1 << XFS_AGI_NUM_BITS) - 1)
+
+/* disk block (xfs_daddr_t) in the AG */
+#define XFS_AGI_DADDR(mp)	((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_BLOCK)
+xfs_agblock_t xfs_agi_block(struct xfs_mount *mp);
+#define	XFS_AGI_BLOCK(mp)	xfs_agi_block(mp)
+#else
+#define XFS_AGI_BLOCK(mp)	XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
+#endif
+
+/*
+ * The third a.g. block contains the a.g. freelist, an array
+ * of block pointers to blocks owned by the allocation btree code.
+ */
+#define XFS_AGFL_DADDR(mp)	((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGFL_BLOCK)
+xfs_agblock_t xfs_agfl_block(struct xfs_mount *mp);
+#define	XFS_AGFL_BLOCK(mp)	xfs_agfl_block(mp)
+#else
+#define XFS_AGFL_BLOCK(mp)	XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
+#endif
+#define XFS_AGFL_SIZE(mp)	((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t))
+
+typedef struct xfs_agfl {
+	xfs_agblock_t	agfl_bno[1];	/* actually XFS_AGFL_SIZE(mp) */
+} xfs_agfl_t;
+
+/*
+ * Busy block/extent entry.  Used in perag to mark blocks that have been freed
+ * but whose transactions aren't committed to disk yet.
+ */
+typedef struct xfs_perag_busy {
+	xfs_agblock_t	busy_start;
+	xfs_extlen_t	busy_length;
+	struct xfs_trans *busy_tp;	/* transaction that did the free */
+} xfs_perag_busy_t;
+
+/*
+ * Per-ag incore structure, copies of information in agf and agi,
+ * to improve the performance of allocation group selection.
+ *
+ * pick sizes which fit in allocation buckets well
+ */
+#if (BITS_PER_LONG == 32)
+#define XFS_PAGB_NUM_SLOTS	84
+#elif (BITS_PER_LONG == 64)
+#define XFS_PAGB_NUM_SLOTS	128
+#endif
+
+typedef struct xfs_perag
+{
+	char		pagf_init;	/* this agf's entry is initialized */
+	char		pagi_init;	/* this agi's entry is initialized */
+	char		pagf_metadata;	/* the agf is prefered to be metadata */
+	char		pagi_inodeok;	/* The agi is ok for inodes */
+	__uint8_t	pagf_levels[XFS_BTNUM_AGF];
+					/* # of levels in bno & cnt btree */
+	__uint32_t	pagf_flcount;	/* count of blocks in freelist */
+	xfs_extlen_t	pagf_freeblks;	/* total free blocks */
+	xfs_extlen_t	pagf_longest;	/* longest free space */
+	xfs_agino_t	pagi_freecount;	/* number of free inodes */
+#ifdef __KERNEL__
+	lock_t		pagb_lock;	/* lock for pagb_list */
+#endif
+	int		pagb_count;	/* pagb slots in use */
+	xfs_perag_busy_t *pagb_list;	/* unstable blocks */
+} xfs_perag_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MAXLEVELS)
+int xfs_ag_maxlevels(struct xfs_mount *mp);
+#define	XFS_AG_MAXLEVELS(mp)		xfs_ag_maxlevels(mp)
+#else
+#define	XFS_AG_MAXLEVELS(mp)	((mp)->m_ag_maxlevels)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST)
+int xfs_min_freelist(xfs_agf_t *a, struct xfs_mount *mp);
+#define	XFS_MIN_FREELIST(a,mp)		xfs_min_freelist(a,mp)
+#else
+#define	XFS_MIN_FREELIST(a,mp)	\
+	XFS_MIN_FREELIST_RAW(	\
+		INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \
+		INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_PAG)
+int xfs_min_freelist_pag(xfs_perag_t *pag, struct xfs_mount *mp);
+#define	XFS_MIN_FREELIST_PAG(pag,mp)	xfs_min_freelist_pag(pag,mp)
+#else
+#define	XFS_MIN_FREELIST_PAG(pag,mp)	\
+	XFS_MIN_FREELIST_RAW((uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
+			     (uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_RAW)
+int xfs_min_freelist_raw(int bl, int cl, struct xfs_mount *mp);
+#define	XFS_MIN_FREELIST_RAW(bl,cl,mp)	xfs_min_freelist_raw(bl,cl,mp)
+#else
+#define	XFS_MIN_FREELIST_RAW(bl,cl,mp)	\
+	(MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + \
+	 MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_FSB)
+xfs_fsblock_t xfs_agb_to_fsb(struct xfs_mount *mp, xfs_agnumber_t agno,
+			     xfs_agblock_t agbno);
+#define XFS_AGB_TO_FSB(mp,agno,agbno)	xfs_agb_to_fsb(mp,agno,agbno)
+#else
+#define	XFS_AGB_TO_FSB(mp,agno,agbno) \
+	(((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGNO)
+xfs_agnumber_t xfs_fsb_to_agno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
+#define	XFS_FSB_TO_AGNO(mp,fsbno)	xfs_fsb_to_agno(mp,fsbno)
+#else
+#define	XFS_FSB_TO_AGNO(mp,fsbno) \
+	((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGBNO)
+xfs_agblock_t xfs_fsb_to_agbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
+#define	XFS_FSB_TO_AGBNO(mp,fsbno)	xfs_fsb_to_agbno(mp,fsbno)
+#else
+#define	XFS_FSB_TO_AGBNO(mp,fsbno) \
+	((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_DADDR)
+xfs_daddr_t xfs_agb_to_daddr(struct xfs_mount *mp, xfs_agnumber_t agno,
+				xfs_agblock_t agbno);
+#define	XFS_AGB_TO_DADDR(mp,agno,agbno)	xfs_agb_to_daddr(mp,agno,agbno)
+#else
+#define	XFS_AGB_TO_DADDR(mp,agno,agbno) \
+	((xfs_daddr_t)(XFS_FSB_TO_BB(mp, \
+		(xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno))))
+#endif
+/*
+ * XFS_DADDR_TO_AGNO and XFS_DADDR_TO_AGBNO moved to xfs_mount.h
+ * to avoid header file ordering change
+ */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_DADDR)
+xfs_daddr_t xfs_ag_daddr(struct xfs_mount *mp, xfs_agnumber_t agno,
+				xfs_daddr_t d);
+#define	XFS_AG_DADDR(mp,agno,d)		xfs_ag_daddr(mp,agno,d)
+#else
+#define	XFS_AG_DADDR(mp,agno,d)	(XFS_AGB_TO_DADDR(mp, agno, 0) + (d))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGF)
+xfs_agf_t *xfs_buf_to_agf(struct xfs_buf *bp);
+#define	XFS_BUF_TO_AGF(bp)		xfs_buf_to_agf(bp)
+#else
+#define	XFS_BUF_TO_AGF(bp)	((xfs_agf_t *)XFS_BUF_PTR(bp))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGI)
+xfs_agi_t *xfs_buf_to_agi(struct xfs_buf *bp);
+#define	XFS_BUF_TO_AGI(bp)		xfs_buf_to_agi(bp)
+#else
+#define	XFS_BUF_TO_AGI(bp)	((xfs_agi_t *)XFS_BUF_PTR(bp))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGFL)
+xfs_agfl_t *xfs_buf_to_agfl(struct xfs_buf *bp);
+#define	XFS_BUF_TO_AGFL(bp)		xfs_buf_to_agfl(bp)
+#else
+#define	XFS_BUF_TO_AGFL(bp)	((xfs_agfl_t *)XFS_BUF_PTR(bp))
+#endif
+
+/*
+ * For checking for bad ranges of xfs_daddr_t's, covering multiple
+ * allocation groups or a single xfs_daddr_t that's a superblock copy.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_CHECK_DADDR)
+void xfs_ag_check_daddr(struct xfs_mount *mp, xfs_daddr_t d, xfs_extlen_t len);
+#define	XFS_AG_CHECK_DADDR(mp,d,len)	xfs_ag_check_daddr(mp,d,len)
+#else
+#define	XFS_AG_CHECK_DADDR(mp,d,len)	\
+	((len) == 1 ? \
+	    ASSERT((d) == XFS_SB_DADDR || \
+		   XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \
+	    ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \
+		   XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1)))
+#endif
+
+#endif	/* __XFS_AG_H__ */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
new file mode 100644
index 0000000..36603db
--- /dev/null
+++ b/fs/xfs/xfs_alloc.c
@@ -0,0 +1,2623 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Free space allocation for XFS.
+ */
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include "xfs_error.h"
+
+
+#define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
+
+#define	XFSA_FIXUP_BNO_OK	1
+#define	XFSA_FIXUP_CNT_OK	2
+
+int
+xfs_alloc_search_busy(xfs_trans_t *tp,
+		    xfs_agnumber_t agno,
+		    xfs_agblock_t bno,
+		    xfs_extlen_t len);
+
+#if defined(XFS_ALLOC_TRACE)
+ktrace_t *xfs_alloc_trace_buf;
+
+#define	TRACE_ALLOC(s,a)	\
+	xfs_alloc_trace_alloc(fname, s, a, __LINE__)
+#define	TRACE_FREE(s,a,b,x,f)	\
+	xfs_alloc_trace_free(fname, s, mp, a, b, x, f, __LINE__)
+#define	TRACE_MODAGF(s,a,f)	\
+	xfs_alloc_trace_modagf(fname, s, mp, a, f, __LINE__)
+#define	TRACE_BUSY(fname,s,ag,agb,l,sl,tp)	\
+	xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
+#define	TRACE_UNBUSY(fname,s,ag,sl,tp)	\
+	xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
+#define	TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp)	\
+	xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
+#else
+#define	TRACE_ALLOC(s,a)
+#define	TRACE_FREE(s,a,b,x,f)
+#define	TRACE_MODAGF(s,a,f)
+#define	TRACE_BUSY(s,a,ag,agb,l,sl,tp)
+#define	TRACE_UNBUSY(fname,s,ag,sl,tp)
+#define	TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp)
+#endif	/* XFS_ALLOC_TRACE */
+
+/*
+ * Prototypes for per-ag allocation routines
+ */
+
+STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
+STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
+STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
+STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
+	xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+
+/*
+ * Internal functions.
+ */
+
+/*
+ * Compute aligned version of the found extent.
+ * Takes alignment and min length into account.
+ */
+STATIC int				/* success (>= minlen) */
+xfs_alloc_compute_aligned(
+	xfs_agblock_t	foundbno,	/* starting block in found extent */
+	xfs_extlen_t	foundlen,	/* length in found extent */
+	xfs_extlen_t	alignment,	/* alignment for allocation */
+	xfs_extlen_t	minlen,		/* minimum length for allocation */
+	xfs_agblock_t	*resbno,	/* result block number */
+	xfs_extlen_t	*reslen)	/* result length */
+{
+	xfs_agblock_t	bno;
+	xfs_extlen_t	diff;
+	xfs_extlen_t	len;
+
+	if (alignment > 1 && foundlen >= minlen) {
+		bno = roundup(foundbno, alignment);
+		diff = bno - foundbno;
+		len = diff >= foundlen ? 0 : foundlen - diff;
+	} else {
+		bno = foundbno;
+		len = foundlen;
+	}
+	*resbno = bno;
+	*reslen = len;
+	return len >= minlen;
+}
+
+/*
+ * Compute best start block and diff for "near" allocations.
+ * freelen >= wantlen already checked by caller.
+ */
+STATIC xfs_extlen_t			/* difference value (absolute) */
+xfs_alloc_compute_diff(
+	xfs_agblock_t	wantbno,	/* target starting block */
+	xfs_extlen_t	wantlen,	/* target length */
+	xfs_extlen_t	alignment,	/* target alignment */
+	xfs_agblock_t	freebno,	/* freespace's starting block */
+	xfs_extlen_t	freelen,	/* freespace's length */
+	xfs_agblock_t	*newbnop)	/* result: best start block from free */
+{
+	xfs_agblock_t	freeend;	/* end of freespace extent */
+	xfs_agblock_t	newbno1;	/* return block number */
+	xfs_agblock_t	newbno2;	/* other new block number */
+	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
+	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
+	xfs_agblock_t	wantend;	/* end of target extent */
+
+	ASSERT(freelen >= wantlen);
+	freeend = freebno + freelen;
+	wantend = wantbno + wantlen;
+	if (freebno >= wantbno) {
+		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
+			newbno1 = NULLAGBLOCK;
+	} else if (freeend >= wantend && alignment > 1) {
+		newbno1 = roundup(wantbno, alignment);
+		newbno2 = newbno1 - alignment;
+		if (newbno1 >= freeend)
+			newbno1 = NULLAGBLOCK;
+		else
+			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
+		if (newbno2 < freebno)
+			newbno2 = NULLAGBLOCK;
+		else
+			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
+		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
+			if (newlen1 < newlen2 ||
+			    (newlen1 == newlen2 &&
+			     XFS_ABSDIFF(newbno1, wantbno) >
+			     XFS_ABSDIFF(newbno2, wantbno)))
+				newbno1 = newbno2;
+		} else if (newbno2 != NULLAGBLOCK)
+			newbno1 = newbno2;
+	} else if (freeend >= wantend) {
+		newbno1 = wantbno;
+	} else if (alignment > 1) {
+		newbno1 = roundup(freeend - wantlen, alignment);
+		if (newbno1 > freeend - wantlen &&
+		    newbno1 - alignment >= freebno)
+			newbno1 -= alignment;
+		else if (newbno1 >= freeend)
+			newbno1 = NULLAGBLOCK;
+	} else
+		newbno1 = freeend - wantlen;
+	*newbnop = newbno1;
+	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
+}
+
+/*
+ * Fix up the length, based on mod and prod.
+ * len should be k * prod + mod for some k.
+ * If len is too small it is returned unchanged.
+ * If len hits maxlen it is left alone.
+ */
+STATIC void
+xfs_alloc_fix_len(
+	xfs_alloc_arg_t	*args)		/* allocation argument structure */
+{
+	xfs_extlen_t	k;
+	xfs_extlen_t	rlen;
+
+	ASSERT(args->mod < args->prod);
+	rlen = args->len;
+	ASSERT(rlen >= args->minlen);
+	ASSERT(rlen <= args->maxlen);
+	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
+	    (args->mod == 0 && rlen < args->prod))
+		return;
+	k = rlen % args->prod;
+	if (k == args->mod)
+		return;
+	if (k > args->mod) {
+		if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
+			return;
+	} else {
+		if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
+		    (int)args->minlen)
+			return;
+	}
+	ASSERT(rlen >= args->minlen);
+	ASSERT(rlen <= args->maxlen);
+	args->len = rlen;
+}
+
+/*
+ * Fix up length if there is too little space left in the a.g.
+ * Return 1 if ok, 0 if too little, should give up.
+ */
+STATIC int
+xfs_alloc_fix_minleft(
+	xfs_alloc_arg_t	*args)		/* allocation argument structure */
+{
+	xfs_agf_t	*agf;		/* a.g. freelist header */
+	int		diff;		/* free space difference */
+
+	if (args->minleft == 0)
+		return 1;
+	agf = XFS_BUF_TO_AGF(args->agbp);
+	diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT)
+		+ INT_GET(agf->agf_flcount, ARCH_CONVERT)
+		- args->len - args->minleft;
+	if (diff >= 0)
+		return 1;
+	args->len += diff;		/* shrink the allocated space */
+	if (args->len >= args->minlen)
+		return 1;
+	args->agbno = NULLAGBLOCK;
+	return 0;
+}
+
+/*
+ * Update the two btrees, logically removing from freespace the extent
+ * starting at rbno, rlen blocks.  The extent is contained within the
+ * actual (current) free extent fbno for flen blocks.
+ * Flags are passed in indicating whether the cursors are set to the
+ * relevant records.
+ */
+STATIC int				/* error code */
+xfs_alloc_fixup_trees(
+	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
+	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
+	xfs_agblock_t	fbno,		/* starting block of free extent */
+	xfs_extlen_t	flen,		/* length of free extent */
+	xfs_agblock_t	rbno,		/* starting block of returned extent */
+	xfs_extlen_t	rlen,		/* length of returned extent */
+	int		flags)		/* flags, XFSA_FIXUP_... */
+{
+	int		error;		/* error code */
+	int		i;		/* operation results */
+	xfs_agblock_t	nfbno1;		/* first new free startblock */
+	xfs_agblock_t	nfbno2;		/* second new free startblock */
+	xfs_extlen_t	nflen1=0;	/* first new free length */
+	xfs_extlen_t	nflen2=0;	/* second new free length */
+
+	/*
+	 * Look up the record in the by-size tree if necessary.
+	 */
+	if (flags & XFSA_FIXUP_CNT_OK) {
+#ifdef DEBUG
+		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(
+			i == 1 && nfbno1 == fbno && nflen1 == flen);
+#endif
+	} else {
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	}
+	/*
+	 * Look up the record in the by-block tree if necessary.
+	 */
+	if (flags & XFSA_FIXUP_BNO_OK) {
+#ifdef DEBUG
+		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(
+			i == 1 && nfbno1 == fbno && nflen1 == flen);
+#endif
+	} else {
+		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	}
+#ifdef DEBUG
+	{
+		xfs_alloc_block_t	*bnoblock;
+		xfs_alloc_block_t	*cntblock;
+
+		if (bno_cur->bc_nlevels == 1 &&
+		    cnt_cur->bc_nlevels == 1) {
+			bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
+			cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
+			XFS_WANT_CORRUPTED_RETURN(
+				INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT));
+		}
+	}
+#endif
+	/*
+	 * Deal with all four cases: the allocated record is contained
+	 * within the freespace record, so we can have new freespace
+	 * at either (or both) end, or no freespace remaining.
+	 */
+	if (rbno == fbno && rlen == flen)
+		nfbno1 = nfbno2 = NULLAGBLOCK;
+	else if (rbno == fbno) {
+		nfbno1 = rbno + rlen;
+		nflen1 = flen - rlen;
+		nfbno2 = NULLAGBLOCK;
+	} else if (rbno + rlen == fbno + flen) {
+		nfbno1 = fbno;
+		nflen1 = flen - rlen;
+		nfbno2 = NULLAGBLOCK;
+	} else {
+		nfbno1 = fbno;
+		nflen1 = rbno - fbno;
+		nfbno2 = rbno + rlen;
+		nflen2 = (fbno + flen) - nfbno2;
+	}
+	/*
+	 * Delete the entry from the by-size btree.
+	 */
+	if ((error = xfs_alloc_delete(cnt_cur, &i)))
+		return error;
+	XFS_WANT_CORRUPTED_RETURN(i == 1);
+	/*
+	 * Add new by-size btree entry(s).
+	 */
+	if (nfbno1 != NULLAGBLOCK) {
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 0);
+		if ((error = xfs_alloc_insert(cnt_cur, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	}
+	if (nfbno2 != NULLAGBLOCK) {
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 0);
+		if ((error = xfs_alloc_insert(cnt_cur, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	}
+	/*
+	 * Fix up the by-block btree entry(s).
+	 */
+	if (nfbno1 == NULLAGBLOCK) {
+		/*
+		 * No remaining freespace, just delete the by-block tree entry.
+		 */
+		if ((error = xfs_alloc_delete(bno_cur, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	} else {
+		/*
+		 * Update the by-block entry to start later|be shorter.
+		 */
+		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
+			return error;
+	}
+	if (nfbno2 != NULLAGBLOCK) {
+		/*
+		 * 2 resulting free entries, need to add one.
+		 */
+		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 0);
+		if ((error = xfs_alloc_insert(bno_cur, &i)))
+			return error;
+		XFS_WANT_CORRUPTED_RETURN(i == 1);
+	}
+	return 0;
+}
+
+/*
+ * Read in the allocation group free block array.
+ */
+STATIC int				/* error */
+xfs_alloc_read_agfl(
+	xfs_mount_t	*mp,		/* mount point structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
+{
+	xfs_buf_t	*bp;		/* return value */
+	int		error;
+
+	ASSERT(agno != NULLAGNUMBER);
+	error = xfs_trans_read_buf(
+			mp, tp, mp->m_ddev_targp,
+			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
+			XFS_FSS_TO_BB(mp, 1), 0, &bp);
+	if (error)
+		return error;
+	ASSERT(bp);
+	ASSERT(!XFS_BUF_GETERROR(bp));
+	XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
+	*bpp = bp;
+	return 0;
+}
+
+#if defined(XFS_ALLOC_TRACE)
+/*
+ * Add an allocation trace entry for an alloc call.
+ */
+STATIC void
+xfs_alloc_trace_alloc(
+	char		*name,		/* function tag string */
+	char		*str,		/* additional string */
+	xfs_alloc_arg_t	*args,		/* allocation argument structure */
+	int		line)		/* source line number */
+{
+	ktrace_enter(xfs_alloc_trace_buf,
+		(void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
+		(void *)name,
+		(void *)str,
+		(void *)args->mp,
+		(void *)(__psunsigned_t)args->agno,
+		(void *)(__psunsigned_t)args->agbno,
+		(void *)(__psunsigned_t)args->minlen,
+		(void *)(__psunsigned_t)args->maxlen,
+		(void *)(__psunsigned_t)args->mod,
+		(void *)(__psunsigned_t)args->prod,
+		(void *)(__psunsigned_t)args->minleft,
+		(void *)(__psunsigned_t)args->total,
+		(void *)(__psunsigned_t)args->alignment,
+		(void *)(__psunsigned_t)args->len,
+		(void *)((((__psint_t)args->type) << 16) |
+			 (__psint_t)args->otype),
+		(void *)(__psint_t)((args->wasdel << 3) |
+				    (args->wasfromfl << 2) |
+				    (args->isfl << 1) |
+				    (args->userdata << 0)));
+}
+
+/*
+ * Add an allocation trace entry for a free call.
+ */
+STATIC void
+xfs_alloc_trace_free(
+	char		*name,		/* function tag string */
+	char		*str,		/* additional string */
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_agblock_t	agbno,		/* a.g. relative block number */
+	xfs_extlen_t	len,		/* length of extent */
+	int		isfl,		/* set if is freelist allocation/free */
+	int		line)		/* source line number */
+{
+	ktrace_enter(xfs_alloc_trace_buf,
+		(void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
+		(void *)name,
+		(void *)str,
+		(void *)mp,
+		(void *)(__psunsigned_t)agno,
+		(void *)(__psunsigned_t)agbno,
+		(void *)(__psunsigned_t)len,
+		(void *)(__psint_t)isfl,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * Add an allocation trace entry for modifying an agf.
+ */
+STATIC void
+xfs_alloc_trace_modagf(
+	char		*name,		/* function tag string */
+	char		*str,		/* additional string */
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_agf_t	*agf,		/* new agf value */
+	int		flags,		/* logging flags for agf */
+	int		line)		/* source line number */
+{
+	ktrace_enter(xfs_alloc_trace_buf,
+		(void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
+		(void *)name,
+		(void *)str,
+		(void *)mp,
+		(void *)(__psint_t)flags,
+		(void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
+						ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
+						ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
+						ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
+						ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT),
+		(void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT));
+}
+
+STATIC void
+xfs_alloc_trace_busy(
+	char		*name,		/* function tag string */
+	char		*str,		/* additional string */
+	xfs_mount_t	*mp,		/* file system mount poing */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_agblock_t	agbno,		/* a.g. relative block number */
+	xfs_extlen_t	len,		/* length of extent */
+	int		slot,		/* perag Busy slot */
+	xfs_trans_t	*tp,
+	int		trtype,		/* type: add, delete, search */
+	int		line)		/* source line number */
+{
+	ktrace_enter(xfs_alloc_trace_buf,
+		(void *)(__psint_t)(trtype | (line << 16)),
+		(void *)name,
+		(void *)str,
+		(void *)mp,
+		(void *)(__psunsigned_t)agno,
+		(void *)(__psunsigned_t)agbno,
+		(void *)(__psunsigned_t)len,
+		(void *)(__psint_t)slot,
+		(void *)tp,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+}
+#endif	/* XFS_ALLOC_TRACE */
+
+/*
+ * Allocation group level functions.
+ */
+
+/*
+ * Allocate a variable extent in the allocation group agno.
+ * Type and bno are used to determine where in the allocation group the
+ * extent will start.
+ * Extent's length (returned in *len) will be between minlen and maxlen,
+ * and of the form k * prod + mod unless there's nothing that large.
+ * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
+ */
+STATIC int			/* error */
+xfs_alloc_ag_vextent(
+	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
+{
+	int		error=0;
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_ag_vextent";
+#endif
+
+	ASSERT(args->minlen > 0);
+	ASSERT(args->maxlen > 0);
+	ASSERT(args->minlen <= args->maxlen);
+	ASSERT(args->mod < args->prod);
+	ASSERT(args->alignment > 0);
+	/*
+	 * Branch to correct routine based on the type.
+	 */
+	args->wasfromfl = 0;
+	switch (args->type) {
+	case XFS_ALLOCTYPE_THIS_AG:
+		error = xfs_alloc_ag_vextent_size(args);
+		break;
+	case XFS_ALLOCTYPE_NEAR_BNO:
+		error = xfs_alloc_ag_vextent_near(args);
+		break;
+	case XFS_ALLOCTYPE_THIS_BNO:
+		error = xfs_alloc_ag_vextent_exact(args);
+		break;
+	default:
+		ASSERT(0);
+		/* NOTREACHED */
+	}
+	if (error)
+		return error;
+	/*
+	 * If the allocation worked, need to change the agf structure
+	 * (and log it), and the superblock.
+	 */
+	if (args->agbno != NULLAGBLOCK) {
+		xfs_agf_t	*agf;	/* allocation group freelist header */
+#ifdef XFS_ALLOC_TRACE
+		xfs_mount_t	*mp = args->mp;
+#endif
+		long		slen = (long)args->len;
+
+		ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
+		ASSERT(!(args->wasfromfl) || !args->isfl);
+		ASSERT(args->agbno % args->alignment == 0);
+		if (!(args->wasfromfl)) {
+
+			agf = XFS_BUF_TO_AGF(args->agbp);
+			INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len));
+			xfs_trans_agblocks_delta(args->tp,
+						 -((long)(args->len)));
+			args->pag->pagf_freeblks -= args->len;
+			ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT)
+				<= INT_GET(agf->agf_length, ARCH_CONVERT));
+			TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
+			xfs_alloc_log_agf(args->tp, args->agbp,
+						XFS_AGF_FREEBLKS);
+			/* search the busylist for these blocks */
+			xfs_alloc_search_busy(args->tp, args->agno,
+					args->agbno, args->len);
+		}
+		if (!args->isfl)
+			xfs_trans_mod_sb(args->tp,
+				args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
+					XFS_TRANS_SB_FDBLOCKS, -slen);
+		XFS_STATS_INC(xs_allocx);
+		XFS_STATS_ADD(xs_allocb, args->len);
+	}
+	return 0;
+}
+
+/*
+ * Allocate a variable extent at exactly agno/bno.
+ * Extent's length (returned in *len) will be between minlen and maxlen,
+ * and of the form k * prod + mod unless there's nothing that large.
+ * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
+ */
+STATIC int			/* error */
+xfs_alloc_ag_vextent_exact(
+	xfs_alloc_arg_t	*args)	/* allocation argument structure */
+{
+	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
+	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
+	xfs_agblock_t	end;	/* end of allocated extent */
+	int		error;
+	xfs_agblock_t	fbno;	/* start block of found extent */
+	xfs_agblock_t	fend;	/* end block of found extent */
+	xfs_extlen_t	flen;	/* length of found extent */
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_ag_vextent_exact";
+#endif
+	int		i;	/* success/failure of operation */
+	xfs_agblock_t	maxend;	/* end of maximal extent */
+	xfs_agblock_t	minend;	/* end of minimal extent */
+	xfs_extlen_t	rlen;	/* length of returned extent */
+
+	ASSERT(args->alignment == 1);
+	/*
+	 * Allocate/initialize a cursor for the by-number freespace btree.
+	 */
+	bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_BNO, NULL, 0);
+	/*
+	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
+	 * Look for the closest free block <= bno, it must contain bno
+	 * if any free block does.
+	 */
+	if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+		goto error0;
+	if (!i) {
+		/*
+		 * Didn't find it, return null.
+		 */
+		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+		args->agbno = NULLAGBLOCK;
+		return 0;
+	}
+	/*
+	 * Grab the freespace record.
+	 */
+	if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+		goto error0;
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	ASSERT(fbno <= args->agbno);
+	minend = args->agbno + args->minlen;
+	maxend = args->agbno + args->maxlen;
+	fend = fbno + flen;
+	/*
+	 * Give up if the freespace isn't long enough for the minimum request.
+	 */
+	if (fend < minend) {
+		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+		args->agbno = NULLAGBLOCK;
+		return 0;
+	}
+	/*
+	 * End of extent will be smaller of the freespace end and the
+	 * maximal requested end.
+	 */
+	end = XFS_AGBLOCK_MIN(fend, maxend);
+	/*
+	 * Fix the length according to mod and prod if given.
+	 */
+	args->len = end - args->agbno;
+	xfs_alloc_fix_len(args);
+	if (!xfs_alloc_fix_minleft(args)) {
+		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+		return 0;
+	}
+	rlen = args->len;
+	ASSERT(args->agbno + rlen <= fend);
+	end = args->agbno + rlen;
+	/*
+	 * We are allocating agbno for rlen [agbno .. end]
+	 * Allocate/initialize a cursor for the by-size btree.
+	 */
+	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_CNT, NULL, 0);
+	ASSERT(args->agbno + args->len <=
+		INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
+			ARCH_CONVERT));
+	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
+			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
+		goto error0;
+	}
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+	TRACE_ALLOC("normal", args);
+	args->wasfromfl = 0;
+	return 0;
+
+error0:
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
+	TRACE_ALLOC("error", args);
+	return error;
+}
+
+/*
+ * Allocate a variable extent near bno in the allocation group agno.
+ * Extent's length (returned in len) will be between minlen and maxlen,
+ * and of the form k * prod + mod unless there's nothing that large.
+ * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
+ */
+STATIC int				/* error */
+xfs_alloc_ag_vextent_near(
+	xfs_alloc_arg_t	*args)		/* allocation argument structure */
+{
+	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
+	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
+	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_ag_vextent_near";
+#endif
+	xfs_agblock_t	gtbno;		/* start bno of right side entry */
+	xfs_agblock_t	gtbnoa;		/* aligned ... */
+	xfs_extlen_t	gtdiff;		/* difference to right side entry */
+	xfs_extlen_t	gtlen;		/* length of right side entry */
+	xfs_extlen_t	gtlena;		/* aligned ... */
+	xfs_agblock_t	gtnew;		/* useful start bno of right side */
+	int		error;		/* error code */
+	int		i;		/* result code, temporary */
+	int		j;		/* result code, temporary */
+	xfs_agblock_t	ltbno;		/* start bno of left side entry */
+	xfs_agblock_t	ltbnoa;		/* aligned ... */
+	xfs_extlen_t	ltdiff;		/* difference to left side entry */
+	/*REFERENCED*/
+	xfs_agblock_t	ltend;		/* end bno of left side entry */
+	xfs_extlen_t	ltlen;		/* length of left side entry */
+	xfs_extlen_t	ltlena;		/* aligned ... */
+	xfs_agblock_t	ltnew;		/* useful start bno of left side */
+	xfs_extlen_t	rlen;		/* length of returned extent */
+#if defined(DEBUG) && defined(__KERNEL__)
+	/*
+	 * Randomly don't execute the first algorithm.
+	 */
+	int		dofirst;	/* set to do first algorithm */
+
+	dofirst = random() & 1;
+#endif
+	/*
+	 * Get a cursor for the by-size btree.
+	 */
+	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_CNT, NULL, 0);
+	ltlen = 0;
+	bno_cur_lt = bno_cur_gt = NULL;
+	/*
+	 * See if there are any free extents as big as maxlen.
+	 */
+	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
+		goto error0;
+	/*
+	 * If none, then pick up the last entry in the tree unless the
+	 * tree is empty.
+	 */
+	if (!i) {
+		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
+				&ltlen, &i)))
+			goto error0;
+		if (i == 0 || ltlen == 0) {
+			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+			return 0;
+		}
+		ASSERT(i == 1);
+	}
+	args->wasfromfl = 0;
+	/*
+	 * First algorithm.
+	 * If the requested extent is large wrt the freespaces available
+	 * in this a.g., then the cursor will be pointing to a btree entry
+	 * near the right edge of the tree.  If it's in the last btree leaf
+	 * block, then we just examine all the entries in that block
+	 * that are big enough, and pick the best one.
+	 * This is written as a while loop so we can break out of it,
+	 * but we never loop back to the top.
+	 */
+	while (xfs_btree_islastblock(cnt_cur, 0)) {
+		xfs_extlen_t	bdiff;
+		int		besti=0;
+		xfs_extlen_t	blen=0;
+		xfs_agblock_t	bnew=0;
+
+#if defined(DEBUG) && defined(__KERNEL__)
+		if (!dofirst)
+			break;
+#endif
+		/*
+		 * Start from the entry that lookup found, sequence through
+		 * all larger free blocks.  If we're actually pointing at a
+		 * record smaller than maxlen, go to the start of this block,
+		 * and skip all those smaller than minlen.
+		 */
+		if (ltlen || args->alignment > 1) {
+			cnt_cur->bc_ptrs[0] = 1;
+			do {
+				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
+						&ltlen, &i)))
+					goto error0;
+				XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+				if (ltlen >= args->minlen)
+					break;
+				if ((error = xfs_alloc_increment(cnt_cur, 0, &i)))
+					goto error0;
+			} while (i);
+			ASSERT(ltlen >= args->minlen);
+			if (!i)
+				break;
+		}
+		i = cnt_cur->bc_ptrs[0];
+		for (j = 1, blen = 0, bdiff = 0;
+		     !error && j && (blen < args->maxlen || bdiff > 0);
+		     error = xfs_alloc_increment(cnt_cur, 0, &j)) {
+			/*
+			 * For each entry, decide if it's better than
+			 * the previous best entry.
+			 */
+			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if (!xfs_alloc_compute_aligned(ltbno, ltlen,
+					args->alignment, args->minlen,
+					&ltbnoa, &ltlena))
+				continue;
+			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
+			xfs_alloc_fix_len(args);
+			ASSERT(args->len >= args->minlen);
+			if (args->len < blen)
+				continue;
+			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+				args->alignment, ltbno, ltlen, &ltnew);
+			if (ltnew != NULLAGBLOCK &&
+			    (args->len > blen || ltdiff < bdiff)) {
+				bdiff = ltdiff;
+				bnew = ltnew;
+				blen = args->len;
+				besti = cnt_cur->bc_ptrs[0];
+			}
+		}
+		/*
+		 * It didn't work.  We COULD be in a case where
+		 * there's a good record somewhere, so try again.
+		 */
+		if (blen == 0)
+			break;
+		/*
+		 * Point at the best entry, and retrieve it again.
+		 */
+		cnt_cur->bc_ptrs[0] = besti;
+		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		ltend = ltbno + ltlen;
+		ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
+				ARCH_CONVERT));
+		args->len = blen;
+		if (!xfs_alloc_fix_minleft(args)) {
+			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+			TRACE_ALLOC("nominleft", args);
+			return 0;
+		}
+		blen = args->len;
+		/*
+		 * We are allocating starting at bnew for blen blocks.
+		 */
+		args->agbno = bnew;
+		ASSERT(bnew >= ltbno);
+		ASSERT(bnew + blen <= ltend);
+		/*
+		 * Set up a cursor for the by-bno tree.
+		 */
+		bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp,
+			args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0);
+		/*
+		 * Fix up the btree entries.
+		 */
+		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
+				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
+			goto error0;
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+		TRACE_ALLOC("first", args);
+		return 0;
+	}
+	/*
+	 * Second algorithm.
+	 * Search in the by-bno tree to the left and to the right
+	 * simultaneously, until in each case we find a space big enough,
+	 * or run into the edge of the tree.  When we run into the edge,
+	 * we deallocate that cursor.
+	 * If both searches succeed, we compare the two spaces and pick
+	 * the better one.
+	 * With alignment, it's possible for both to fail; the upper
+	 * level algorithm that picks allocation groups for allocations
+	 * is not supposed to do this.
+	 */
+	/*
+	 * Allocate and initialize the cursor for the leftward search.
+	 */
+	bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_BNO, NULL, 0);
+	/*
+	 * Lookup <= bno to find the leftward search's starting point.
+	 */
+	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
+		goto error0;
+	if (!i) {
+		/*
+		 * Didn't find anything; use this cursor for the rightward
+		 * search.
+		 */
+		bno_cur_gt = bno_cur_lt;
+		bno_cur_lt = NULL;
+	}
+	/*
+	 * Found something.  Duplicate the cursor for the rightward search.
+	 */
+	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
+		goto error0;
+	/*
+	 * Increment the cursor, so we will point at the entry just right
+	 * of the leftward entry if any, or to the leftmost entry.
+	 */
+	if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
+		goto error0;
+	if (!i) {
+		/*
+		 * It failed, there are no rightward entries.
+		 */
+		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
+		bno_cur_gt = NULL;
+	}
+	/*
+	 * Loop going left with the leftward cursor, right with the
+	 * rightward cursor, until either both directions give up or
+	 * we find an entry at least as big as minlen.
+	 */
+	do {
+		if (bno_cur_lt) {
+			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if (xfs_alloc_compute_aligned(ltbno, ltlen,
+					args->alignment, args->minlen,
+					&ltbnoa, &ltlena))
+				break;
+			if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i)))
+				goto error0;
+			if (!i) {
+				xfs_btree_del_cursor(bno_cur_lt,
+						     XFS_BTREE_NOERROR);
+				bno_cur_lt = NULL;
+			}
+		}
+		if (bno_cur_gt) {
+			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if (xfs_alloc_compute_aligned(gtbno, gtlen,
+					args->alignment, args->minlen,
+					&gtbnoa, &gtlena))
+				break;
+			if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
+				goto error0;
+			if (!i) {
+				xfs_btree_del_cursor(bno_cur_gt,
+						     XFS_BTREE_NOERROR);
+				bno_cur_gt = NULL;
+			}
+		}
+	} while (bno_cur_lt || bno_cur_gt);
+	/*
+	 * Got both cursors still active, need to find better entry.
+	 */
+	if (bno_cur_lt && bno_cur_gt) {
+		/*
+		 * Left side is long enough, look for a right side entry.
+		 */
+		if (ltlena >= args->minlen) {
+			/*
+			 * Fix up the length.
+			 */
+			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
+			xfs_alloc_fix_len(args);
+			rlen = args->len;
+			ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+				args->alignment, ltbno, ltlen, &ltnew);
+			/*
+			 * Not perfect.
+			 */
+			if (ltdiff) {
+				/*
+				 * Look until we find a better one, run out of
+				 * space, or run off the end.
+				 */
+				while (bno_cur_lt && bno_cur_gt) {
+					if ((error = xfs_alloc_get_rec(
+							bno_cur_gt, &gtbno,
+							&gtlen, &i)))
+						goto error0;
+					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+					xfs_alloc_compute_aligned(gtbno, gtlen,
+						args->alignment, args->minlen,
+						&gtbnoa, &gtlena);
+					/*
+					 * The left one is clearly better.
+					 */
+					if (gtbnoa >= args->agbno + ltdiff) {
+						xfs_btree_del_cursor(
+							bno_cur_gt,
+							XFS_BTREE_NOERROR);
+						bno_cur_gt = NULL;
+						break;
+					}
+					/*
+					 * If we reach a big enough entry,
+					 * compare the two and pick the best.
+					 */
+					if (gtlena >= args->minlen) {
+						args->len =
+							XFS_EXTLEN_MIN(gtlena,
+								args->maxlen);
+						xfs_alloc_fix_len(args);
+						rlen = args->len;
+						gtdiff = xfs_alloc_compute_diff(
+							args->agbno, rlen,
+							args->alignment,
+							gtbno, gtlen, &gtnew);
+						/*
+						 * Right side is better.
+						 */
+						if (gtdiff < ltdiff) {
+							xfs_btree_del_cursor(
+								bno_cur_lt,
+								XFS_BTREE_NOERROR);
+							bno_cur_lt = NULL;
+						}
+						/*
+						 * Left side is better.
+						 */
+						else {
+							xfs_btree_del_cursor(
+								bno_cur_gt,
+								XFS_BTREE_NOERROR);
+							bno_cur_gt = NULL;
+						}
+						break;
+					}
+					/*
+					 * Fell off the right end.
+					 */
+					if ((error = xfs_alloc_increment(
+							bno_cur_gt, 0, &i)))
+						goto error0;
+					if (!i) {
+						xfs_btree_del_cursor(
+							bno_cur_gt,
+							XFS_BTREE_NOERROR);
+						bno_cur_gt = NULL;
+						break;
+					}
+				}
+			}
+			/*
+			 * The left side is perfect, trash the right side.
+			 */
+			else {
+				xfs_btree_del_cursor(bno_cur_gt,
+						     XFS_BTREE_NOERROR);
+				bno_cur_gt = NULL;
+			}
+		}
+		/*
+		 * It's the right side that was found first, look left.
+		 */
+		else {
+			/*
+			 * Fix up the length.
+			 */
+			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
+			xfs_alloc_fix_len(args);
+			rlen = args->len;
+			gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+				args->alignment, gtbno, gtlen, &gtnew);
+			/*
+			 * Right side entry isn't perfect.
+			 */
+			if (gtdiff) {
+				/*
+				 * Look until we find a better one, run out of
+				 * space, or run off the end.
+				 */
+				while (bno_cur_lt && bno_cur_gt) {
+					if ((error = xfs_alloc_get_rec(
+							bno_cur_lt, &ltbno,
+							&ltlen, &i)))
+						goto error0;
+					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+					xfs_alloc_compute_aligned(ltbno, ltlen,
+						args->alignment, args->minlen,
+						&ltbnoa, &ltlena);
+					/*
+					 * The right one is clearly better.
+					 */
+					if (ltbnoa <= args->agbno - gtdiff) {
+						xfs_btree_del_cursor(
+							bno_cur_lt,
+							XFS_BTREE_NOERROR);
+						bno_cur_lt = NULL;
+						break;
+					}
+					/*
+					 * If we reach a big enough entry,
+					 * compare the two and pick the best.
+					 */
+					if (ltlena >= args->minlen) {
+						args->len = XFS_EXTLEN_MIN(
+							ltlena, args->maxlen);
+						xfs_alloc_fix_len(args);
+						rlen = args->len;
+						ltdiff = xfs_alloc_compute_diff(
+							args->agbno, rlen,
+							args->alignment,
+							ltbno, ltlen, &ltnew);
+						/*
+						 * Left side is better.
+						 */
+						if (ltdiff < gtdiff) {
+							xfs_btree_del_cursor(
+								bno_cur_gt,
+								XFS_BTREE_NOERROR);
+							bno_cur_gt = NULL;
+						}
+						/*
+						 * Right side is better.
+						 */
+						else {
+							xfs_btree_del_cursor(
+								bno_cur_lt,
+								XFS_BTREE_NOERROR);
+							bno_cur_lt = NULL;
+						}
+						break;
+					}
+					/*
+					 * Fell off the left end.
+					 */
+					if ((error = xfs_alloc_decrement(
+							bno_cur_lt, 0, &i)))
+						goto error0;
+					if (!i) {
+						xfs_btree_del_cursor(bno_cur_lt,
+							XFS_BTREE_NOERROR);
+						bno_cur_lt = NULL;
+						break;
+					}
+				}
+			}
+			/*
+			 * The right side is perfect, trash the left side.
+			 */
+			else {
+				xfs_btree_del_cursor(bno_cur_lt,
+					XFS_BTREE_NOERROR);
+				bno_cur_lt = NULL;
+			}
+		}
+	}
+	/*
+	 * If we couldn't get anything, give up.
+	 */
+	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
+		TRACE_ALLOC("neither", args);
+		args->agbno = NULLAGBLOCK;
+		return 0;
+	}
+	/*
+	 * At this point we have selected a freespace entry, either to the
+	 * left or to the right.  If it's on the right, copy all the
+	 * useful variables to the "left" set so we only have one
+	 * copy of this code.
+	 */
+	if (bno_cur_gt) {
+		bno_cur_lt = bno_cur_gt;
+		bno_cur_gt = NULL;
+		ltbno = gtbno;
+		ltbnoa = gtbnoa;
+		ltlen = gtlen;
+		ltlena = gtlena;
+		j = 1;
+	} else
+		j = 0;
+	/*
+	 * Fix up the length and compute the useful address.
+	 */
+	ltend = ltbno + ltlen;
+	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
+	xfs_alloc_fix_len(args);
+	if (!xfs_alloc_fix_minleft(args)) {
+		TRACE_ALLOC("nominleft", args);
+		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+		return 0;
+	}
+	rlen = args->len;
+	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
+		ltlen, &ltnew);
+	ASSERT(ltnew >= ltbno);
+	ASSERT(ltnew + rlen <= ltend);
+	ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
+		ARCH_CONVERT));
+	args->agbno = ltnew;
+	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
+			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
+		goto error0;
+	TRACE_ALLOC(j ? "gt" : "lt", args);
+	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+	return 0;
+
+ error0:
+	TRACE_ALLOC("error", args);
+	if (cnt_cur != NULL)
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
+	if (bno_cur_lt != NULL)
+		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
+	if (bno_cur_gt != NULL)
+		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Allocate a variable extent anywhere in the allocation group agno.
+ * Extent's length (returned in len) will be between minlen and maxlen,
+ * and of the form k * prod + mod unless there's nothing that large.
+ * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
+ */
+STATIC int				/* error */
+xfs_alloc_ag_vextent_size(
+	xfs_alloc_arg_t	*args)		/* allocation argument structure */
+{
+	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
+	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
+	int		error;		/* error result */
+	xfs_agblock_t	fbno;		/* start of found freespace */
+	xfs_extlen_t	flen;		/* length of found freespace */
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_ag_vextent_size";
+#endif
+	int		i;		/* temp status variable */
+	xfs_agblock_t	rbno;		/* returned block number */
+	xfs_extlen_t	rlen;		/* length of returned extent */
+
+	/*
+	 * Allocate and initialize a cursor for the by-size btree.
+	 */
+	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_CNT, NULL, 0);
+	bno_cur = NULL;
+	/*
+	 * Look for an entry >= maxlen+alignment-1 blocks.
+	 */
+	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
+			args->maxlen + args->alignment - 1, &i)))
+		goto error0;
+	/*
+	 * If none, then pick up the last entry in the tree unless the
+	 * tree is empty.
+	 */
+	if (!i) {
+		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno,
+				&flen, &i)))
+			goto error0;
+		if (i == 0 || flen == 0) {
+			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+			TRACE_ALLOC("noentry", args);
+			return 0;
+		}
+		ASSERT(i == 1);
+	}
+	/*
+	 * There's a freespace as big as maxlen+alignment-1, get it.
+	 */
+	else {
+		if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	}
+	/*
+	 * In the first case above, we got the last entry in the
+	 * by-size btree.  Now we check to see if the space hits maxlen
+	 * once aligned; if not, we search left for something better.
+	 * This can't happen in the second case above.
+	 */
+	xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
+		&rbno, &rlen);
+	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
+	XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
+			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
+	if (rlen < args->maxlen) {
+		xfs_agblock_t	bestfbno;
+		xfs_extlen_t	bestflen;
+		xfs_agblock_t	bestrbno;
+		xfs_extlen_t	bestrlen;
+
+		bestrlen = rlen;
+		bestrbno = rbno;
+		bestflen = flen;
+		bestfbno = fbno;
+		for (;;) {
+			if ((error = xfs_alloc_decrement(cnt_cur, 0, &i)))
+				goto error0;
+			if (i == 0)
+				break;
+			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
+					&i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if (flen < bestrlen)
+				break;
+			xfs_alloc_compute_aligned(fbno, flen, args->alignment,
+				args->minlen, &rbno, &rlen);
+			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
+			XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
+				(rlen <= flen && rbno + rlen <= fbno + flen),
+				error0);
+			if (rlen > bestrlen) {
+				bestrlen = rlen;
+				bestrbno = rbno;
+				bestflen = flen;
+				bestfbno = fbno;
+				if (rlen == args->maxlen)
+					break;
+			}
+		}
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
+				&i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		rlen = bestrlen;
+		rbno = bestrbno;
+		flen = bestflen;
+		fbno = bestfbno;
+	}
+	args->wasfromfl = 0;
+	/*
+	 * Fix up the length.
+	 */
+	args->len = rlen;
+	xfs_alloc_fix_len(args);
+	if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+		TRACE_ALLOC("nominleft", args);
+		args->agbno = NULLAGBLOCK;
+		return 0;
+	}
+	rlen = args->len;
+	XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
+	/*
+	 * Allocate and initialize a cursor for the by-block tree.
+	 */
+	bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
+		args->agno, XFS_BTNUM_BNO, NULL, 0);
+	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
+			rbno, rlen, XFSA_FIXUP_CNT_OK)))
+		goto error0;
+	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	cnt_cur = bno_cur = NULL;
+	args->len = rlen;
+	args->agbno = rbno;
+	XFS_WANT_CORRUPTED_GOTO(
+		args->agbno + args->len <=
+			INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
+			ARCH_CONVERT),
+		error0);
+	TRACE_ALLOC("normal", args);
+	return 0;
+
+error0:
+	TRACE_ALLOC("error", args);
+	if (cnt_cur)
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
+	if (bno_cur)
+		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Deal with the case where only small freespaces remain.
+ * Either return the contents of the last freespace record,
+ * or allocate space from the freelist if there is nothing in the tree.
+ */
+STATIC int			/* error */
+xfs_alloc_ag_vextent_small(
+	xfs_alloc_arg_t	*args,	/* allocation argument structure */
+	xfs_btree_cur_t	*ccur,	/* by-size cursor */
+	xfs_agblock_t	*fbnop,	/* result block number */
+	xfs_extlen_t	*flenp,	/* result length */
+	int		*stat)	/* status: 0-freelist, 1-normal/none */
+{
+	int		error;
+	xfs_agblock_t	fbno;
+	xfs_extlen_t	flen;
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_ag_vextent_small";
+#endif
+	int		i;
+
+	if ((error = xfs_alloc_decrement(ccur, 0, &i)))
+		goto error0;
+	if (i) {
+		if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	}
+	/*
+	 * Nothing in the btree, try the freelist.  Make sure
+	 * to respect minleft even when pulling from the
+	 * freelist.
+	 */
+	else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
+		 (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount,
+			ARCH_CONVERT) > args->minleft)) {
+		if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno)))
+			goto error0;
+		if (fbno != NULLAGBLOCK) {
+			if (args->userdata) {
+				xfs_buf_t	*bp;
+
+				bp = xfs_btree_get_bufs(args->mp, args->tp,
+					args->agno, fbno, 0);
+				xfs_trans_binval(args->tp, bp);
+			}
+			args->len = 1;
+			args->agbno = fbno;
+			XFS_WANT_CORRUPTED_GOTO(
+				args->agbno + args->len <=
+				INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
+					ARCH_CONVERT),
+				error0);
+			args->wasfromfl = 1;
+			TRACE_ALLOC("freelist", args);
+			*stat = 0;
+			return 0;
+		}
+		/*
+		 * Nothing in the freelist.
+		 */
+		else
+			flen = 0;
+	}
+	/*
+	 * Can't allocate from the freelist for some reason.
+	 */
+	else
+		flen = 0;
+	/*
+	 * Can't do the allocation, give up.
+	 */
+	if (flen < args->minlen) {
+		args->agbno = NULLAGBLOCK;
+		TRACE_ALLOC("notenough", args);
+		flen = 0;
+	}
+	*fbnop = fbno;
+	*flenp = flen;
+	*stat = 1;
+	TRACE_ALLOC("normal", args);
+	return 0;
+
+error0:
+	TRACE_ALLOC("error", args);
+	return error;
+}
+
+/*
+ * Free the extent starting at agno/bno for length.
+ */
+STATIC int			/* error */
+xfs_free_ag_extent(
+	xfs_trans_t	*tp,	/* transaction pointer */
+	xfs_buf_t	*agbp,	/* buffer for a.g. freelist header */
+	xfs_agnumber_t	agno,	/* allocation group number */
+	xfs_agblock_t	bno,	/* starting block number */
+	xfs_extlen_t	len,	/* length of extent */
+	int		isfl)	/* set if is freelist blocks - no sb acctg */
+{
+	xfs_btree_cur_t	*bno_cur;	/* cursor for by-block btree */
+	xfs_btree_cur_t	*cnt_cur;	/* cursor for by-size btree */
+	int		error;		/* error return value */
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_free_ag_extent";
+#endif
+	xfs_agblock_t	gtbno;		/* start of right neighbor block */
+	xfs_extlen_t	gtlen;		/* length of right neighbor block */
+	int		haveleft;	/* have a left neighbor block */
+	int		haveright;	/* have a right neighbor block */
+	int		i;		/* temp, result code */
+	xfs_agblock_t	ltbno;		/* start of left neighbor block */
+	xfs_extlen_t	ltlen;		/* length of left neighbor block */
+	xfs_mount_t	*mp;		/* mount point struct for filesystem */
+	xfs_agblock_t	nbno;		/* new starting block of freespace */
+	xfs_extlen_t	nlen;		/* new length of freespace */
+
+	mp = tp->t_mountp;
+	/*
+	 * Allocate and initialize a cursor for the by-block btree.
+	 */
+	bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL,
+		0);
+	cnt_cur = NULL;
+	/*
+	 * Look for a neighboring block on the left (lower block numbers)
+	 * that is contiguous with this space.
+	 */
+	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
+		goto error0;
+	if (haveleft) {
+		/*
+		 * There is a block to our left.
+		 */
+		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * It's not contiguous, though.
+		 */
+		if (ltbno + ltlen < bno)
+			haveleft = 0;
+		else {
+			/*
+			 * If this failure happens the request to free this
+			 * space was invalid, it's (partly) already free.
+			 * Very bad.
+			 */
+			XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
+		}
+	}
+	/*
+	 * Look for a neighboring block on the right (higher block numbers)
+	 * that is contiguous with this space.
+	 */
+	if ((error = xfs_alloc_increment(bno_cur, 0, &haveright)))
+		goto error0;
+	if (haveright) {
+		/*
+		 * There is a block to our right.
+		 */
+		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * It's not contiguous, though.
+		 */
+		if (bno + len < gtbno)
+			haveright = 0;
+		else {
+			/*
+			 * If this failure happens the request to free this
+			 * space was invalid, it's (partly) already free.
+			 * Very bad.
+			 */
+			XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
+		}
+	}
+	/*
+	 * Now allocate and initialize a cursor for the by-size tree.
+	 */
+	cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL,
+		0);
+	/*
+	 * Have both left and right contiguous neighbors.
+	 * Merge all three into a single free block.
+	 */
+	if (haveleft && haveright) {
+		/*
+		 * Delete the old by-size entry on the left.
+		 */
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_delete(cnt_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Delete the old by-size entry on the right.
+		 */
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_delete(cnt_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Delete the old by-block entry for the right block.
+		 */
+		if ((error = xfs_alloc_delete(bno_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Move the by-block cursor back to the left neighbor.
+		 */
+		if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+#ifdef DEBUG
+		/*
+		 * Check that this is the right record: delete didn't
+		 * mangle the cursor.
+		 */
+		{
+			xfs_agblock_t	xxbno;
+			xfs_extlen_t	xxlen;
+
+			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
+					&i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(
+				i == 1 && xxbno == ltbno && xxlen == ltlen,
+				error0);
+		}
+#endif
+		/*
+		 * Update remaining by-block entry to the new, joined block.
+		 */
+		nbno = ltbno;
+		nlen = len + ltlen + gtlen;
+		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
+			goto error0;
+	}
+	/*
+	 * Have only a left contiguous neighbor.
+	 * Merge it together with the new freespace.
+	 */
+	else if (haveleft) {
+		/*
+		 * Delete the old by-size entry on the left.
+		 */
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_delete(cnt_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Back up the by-block cursor to the left neighbor, and
+		 * update its length.
+		 */
+		if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		nbno = ltbno;
+		nlen = len + ltlen;
+		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
+			goto error0;
+	}
+	/*
+	 * Have only a right contiguous neighbor.
+	 * Merge it together with the new freespace.
+	 */
+	else if (haveright) {
+		/*
+		 * Delete the old by-size entry on the right.
+		 */
+		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_delete(cnt_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Update the starting block and length of the right
+		 * neighbor in the by-block tree.
+		 */
+		nbno = bno;
+		nlen = len + gtlen;
+		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
+			goto error0;
+	}
+	/*
+	 * No contiguous neighbors.
+	 * Insert the new freespace into the by-block tree.
+	 */
+	else {
+		nbno = bno;
+		nlen = len;
+		if ((error = xfs_alloc_insert(bno_cur, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	}
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	bno_cur = NULL;
+	/*
+	 * In all cases we need to insert the new freespace in the by-size tree.
+	 */
+	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
+		goto error0;
+	XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
+	if ((error = xfs_alloc_insert(cnt_cur, &i)))
+		goto error0;
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+	cnt_cur = NULL;
+	/*
+	 * Update the freespace totals in the ag and superblock.
+	 */
+	{
+		xfs_agf_t	*agf;
+		xfs_perag_t	*pag;		/* per allocation group data */
+
+		agf = XFS_BUF_TO_AGF(agbp);
+		pag = &mp->m_perag[agno];
+		INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len);
+		xfs_trans_agblocks_delta(tp, len);
+		pag->pagf_freeblks += len;
+		XFS_WANT_CORRUPTED_GOTO(
+			INT_GET(agf->agf_freeblks, ARCH_CONVERT)
+				<= INT_GET(agf->agf_length, ARCH_CONVERT),
+			error0);
+		TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
+		xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
+		if (!isfl)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
+		XFS_STATS_INC(xs_freex);
+		XFS_STATS_ADD(xs_freeb, len);
+	}
+	TRACE_FREE(haveleft ?
+			(haveright ? "both" : "left") :
+			(haveright ? "right" : "none"),
+		agno, bno, len, isfl);
+
+	/*
+	 * Since blocks move to the free list without the coordination
+	 * used in xfs_bmap_finish, we can't allow block to be available
+	 * for reallocation and non-transaction writing (user data)
+	 * until we know that the transaction that moved it to the free
+	 * list is permanently on disk.  We track the blocks by declaring
+	 * these blocks as "busy"; the busy list is maintained on a per-ag
+	 * basis and each transaction records which entries should be removed
+	 * when the iclog commits to disk.  If a busy block is allocated,
+	 * the iclog is pushed up to the LSN that freed the block.
+	 */
+	xfs_alloc_mark_busy(tp, agno, bno, len);
+	return 0;
+
+ error0:
+	TRACE_FREE("error", agno, bno, len, isfl);
+	if (bno_cur)
+		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
+	if (cnt_cur)
+		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Visible (exported) allocation/free functions.
+ * Some of these are used just by xfs_alloc_btree.c and this file.
+ */
+
+/*
+ * Compute and fill in value of m_ag_maxlevels.
+ */
+void
+xfs_alloc_compute_maxlevels(
+	xfs_mount_t	*mp)	/* file system mount structure */
+{
+	int		level;
+	uint		maxblocks;
+	uint		maxleafents;
+	int		minleafrecs;
+	int		minnoderecs;
+
+	maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
+	minleafrecs = mp->m_alloc_mnr[0];
+	minnoderecs = mp->m_alloc_mnr[1];
+	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
+	for (level = 1; maxblocks > 1; level++)
+		maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
+	mp->m_ag_maxlevels = level;
+}
+
+/*
+ * Decide whether to use this allocation group for this allocation.
+ * If so, fix up the btree freelist's size.
+ */
+STATIC int			/* error */
+xfs_alloc_fix_freelist(
+	xfs_alloc_arg_t	*args,	/* allocation argument structure */
+	int		flags)	/* XFS_ALLOC_FLAG_... */
+{
+	xfs_buf_t	*agbp;	/* agf buffer pointer */
+	xfs_agf_t	*agf;	/* a.g. freespace structure pointer */
+	xfs_buf_t	*agflbp;/* agfl buffer pointer */
+	xfs_agblock_t	bno;	/* freelist block */
+	xfs_extlen_t	delta;	/* new blocks needed in freelist */
+	int		error;	/* error result code */
+	xfs_extlen_t	longest;/* longest extent in allocation group */
+	xfs_mount_t	*mp;	/* file system mount point structure */
+	xfs_extlen_t	need;	/* total blocks needed in freelist */
+	xfs_perag_t	*pag;	/* per-ag information structure */
+	xfs_alloc_arg_t	targs;	/* local allocation arguments */
+	xfs_trans_t	*tp;	/* transaction pointer */
+
+	mp = args->mp;
+
+	pag = args->pag;
+	tp = args->tp;
+	if (!pag->pagf_init) {
+		if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
+				&agbp)))
+			return error;
+		if (!pag->pagf_init) {
+			args->agbp = NULL;
+			return 0;
+		}
+	} else
+		agbp = NULL;
+
+	/* If this is a metadata prefered pag and we are user data
+	 * then try somewhere else if we are not being asked to
+	 * try harder at this point
+	 */
+	if (pag->pagf_metadata && args->userdata && flags) {
+		args->agbp = NULL;
+		return 0;
+	}
+
+	need = XFS_MIN_FREELIST_PAG(pag, mp);
+	delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
+	/*
+	 * If it looks like there isn't a long enough extent, or enough
+	 * total blocks, reject it.
+	 */
+	longest = (pag->pagf_longest > delta) ?
+		(pag->pagf_longest - delta) :
+		(pag->pagf_flcount > 0 || pag->pagf_longest > 0);
+	if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
+	    (args->minleft &&
+	     (int)(pag->pagf_freeblks + pag->pagf_flcount -
+		   need - args->total) <
+	     (int)args->minleft)) {
+		if (agbp)
+			xfs_trans_brelse(tp, agbp);
+		args->agbp = NULL;
+		return 0;
+	}
+	/*
+	 * Get the a.g. freespace buffer.
+	 * Can fail if we're not blocking on locks, and it's held.
+	 */
+	if (agbp == NULL) {
+		if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
+				&agbp)))
+			return error;
+		if (agbp == NULL) {
+			args->agbp = NULL;
+			return 0;
+		}
+	}
+	/*
+	 * Figure out how many blocks we should have in the freelist.
+	 */
+	agf = XFS_BUF_TO_AGF(agbp);
+	need = XFS_MIN_FREELIST(agf, mp);
+	delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ?
+		(need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0;
+	/*
+	 * If there isn't enough total or single-extent, reject it.
+	 */
+	longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
+	longest = (longest > delta) ? (longest - delta) :
+		(INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0);
+	if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
+	     (args->minleft &&
+		(int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) +
+		   INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) <
+	     (int)args->minleft)) {
+		xfs_trans_brelse(tp, agbp);
+		args->agbp = NULL;
+		return 0;
+	}
+	/*
+	 * Make the freelist shorter if it's too long.
+	 */
+	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) {
+		xfs_buf_t	*bp;
+
+		if ((error = xfs_alloc_get_freelist(tp, agbp, &bno)))
+			return error;
+		if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
+			return error;
+		bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+		xfs_trans_binval(tp, bp);
+	}
+	/*
+	 * Initialize the args structure.
+	 */
+	targs.tp = tp;
+	targs.mp = mp;
+	targs.agbp = agbp;
+	targs.agno = args->agno;
+	targs.mod = targs.minleft = targs.wasdel = targs.userdata =
+		targs.minalignslop = 0;
+	targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
+	targs.type = XFS_ALLOCTYPE_THIS_AG;
+	targs.pag = pag;
+	if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
+		return error;
+	/*
+	 * Make the freelist longer if it's too short.
+	 */
+	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) {
+		targs.agbno = 0;
+		targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT);
+		/*
+		 * Allocate as many blocks as possible at once.
+		 */
+		if ((error = xfs_alloc_ag_vextent(&targs)))
+			return error;
+		/*
+		 * Stop if we run out.  Won't happen if callers are obeying
+		 * the restrictions correctly.  Can happen for free calls
+		 * on a completely full ag.
+		 */
+		if (targs.agbno == NULLAGBLOCK)
+			break;
+		/*
+		 * Put each allocated block on the list.
+		 */
+		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
+			if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp,
+					bno)))
+				return error;
+		}
+	}
+	args->agbp = agbp;
+	return 0;
+}
+
+/*
+ * Get a block from the freelist.
+ * Returns with the buffer for the block gotten.
+ */
+int				/* error */
+xfs_alloc_get_freelist(
+	xfs_trans_t	*tp,	/* transaction pointer */
+	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
+	xfs_agblock_t	*bnop)	/* block address retrieved from freelist */
+{
+	xfs_agf_t	*agf;	/* a.g. freespace structure */
+	xfs_agfl_t	*agfl;	/* a.g. freelist structure */
+	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
+	xfs_agblock_t	bno;	/* block number returned */
+	int		error;
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_get_freelist";
+#endif
+	xfs_mount_t	*mp;	/* mount structure */
+	xfs_perag_t	*pag;	/* per allocation group data */
+
+	agf = XFS_BUF_TO_AGF(agbp);
+	/*
+	 * Freelist is empty, give up.
+	 */
+	if (!agf->agf_flcount) {
+		*bnop = NULLAGBLOCK;
+		return 0;
+	}
+	/*
+	 * Read the array of free blocks.
+	 */
+	mp = tp->t_mountp;
+	if ((error = xfs_alloc_read_agfl(mp, tp,
+			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
+		return error;
+	agfl = XFS_BUF_TO_AGFL(agflbp);
+	/*
+	 * Get the block number and update the data structures.
+	 */
+	bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT);
+	INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1);
+	xfs_trans_brelse(tp, agflbp);
+	if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
+		agf->agf_flfirst = 0;
+	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
+	INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1);
+	xfs_trans_agflist_delta(tp, -1);
+	pag->pagf_flcount--;
+	TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
+	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
+	*bnop = bno;
+
+	/*
+	 * As blocks are freed, they are added to the per-ag busy list
+	 * and remain there until the freeing transaction is committed to
+	 * disk.  Now that we have allocated blocks, this list must be
+	 * searched to see if a block is being reused.  If one is, then
+	 * the freeing transaction must be pushed to disk NOW by forcing
+	 * to disk all iclogs up that transaction's LSN.
+	 */
+	xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
+	return 0;
+}
+
+/*
+ * Log the given fields from the agf structure.
+ */
+void
+xfs_alloc_log_agf(
+	xfs_trans_t	*tp,	/* transaction pointer */
+	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
+	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
+{
+	int	first;		/* first byte offset */
+	int	last;		/* last byte offset */
+	static const short	offsets[] = {
+		offsetof(xfs_agf_t, agf_magicnum),
+		offsetof(xfs_agf_t, agf_versionnum),
+		offsetof(xfs_agf_t, agf_seqno),
+		offsetof(xfs_agf_t, agf_length),
+		offsetof(xfs_agf_t, agf_roots[0]),
+		offsetof(xfs_agf_t, agf_levels[0]),
+		offsetof(xfs_agf_t, agf_flfirst),
+		offsetof(xfs_agf_t, agf_fllast),
+		offsetof(xfs_agf_t, agf_flcount),
+		offsetof(xfs_agf_t, agf_freeblks),
+		offsetof(xfs_agf_t, agf_longest),
+		sizeof(xfs_agf_t)
+	};
+
+	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
+	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
+}
+
+/*
+ * Interface for inode allocation to force the pag data to be initialized.
+ */
+int					/* error */
+xfs_alloc_pagf_init(
+	xfs_mount_t		*mp,	/* file system mount structure */
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_agnumber_t		agno,	/* allocation group number */
+	int			flags)	/* XFS_ALLOC_FLAGS_... */
+{
+	xfs_buf_t		*bp;
+	int			error;
+
+	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
+		return error;
+	if (bp)
+		xfs_trans_brelse(tp, bp);
+	return 0;
+}
+
+/*
+ * Put the block on the freelist for the allocation group.
+ */
+int					/* error */
+xfs_alloc_put_freelist(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
+	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
+	xfs_agblock_t		bno)	/* block being freed */
+{
+	xfs_agf_t		*agf;	/* a.g. freespace structure */
+	xfs_agfl_t		*agfl;	/* a.g. free block array */
+	xfs_agblock_t		*blockp;/* pointer to array entry */
+	int			error;
+#ifdef XFS_ALLOC_TRACE
+	static char		fname[] = "xfs_alloc_put_freelist";
+#endif
+	xfs_mount_t		*mp;	/* mount structure */
+	xfs_perag_t		*pag;	/* per allocation group data */
+
+	agf = XFS_BUF_TO_AGF(agbp);
+	mp = tp->t_mountp;
+
+	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
+			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
+		return error;
+	agfl = XFS_BUF_TO_AGFL(agflbp);
+	INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1);
+	if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
+		agf->agf_fllast = 0;
+	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
+	INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1);
+	xfs_trans_agflist_delta(tp, 1);
+	pag->pagf_flcount++;
+	ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp));
+	blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)];
+	INT_SET(*blockp, ARCH_CONVERT, bno);
+	TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
+	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
+	xfs_trans_log_buf(tp, agflbp,
+		(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
+		(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
+			sizeof(xfs_agblock_t) - 1));
+	return 0;
+}
+
+/*
+ * Read in the allocation group header (free/alloc section).
+ */
+int					/* error */
+xfs_alloc_read_agf(
+	xfs_mount_t	*mp,		/* mount point structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	int		flags,		/* XFS_ALLOC_FLAG_... */
+	xfs_buf_t	**bpp)		/* buffer for the ag freelist header */
+{
+	xfs_agf_t	*agf;		/* ag freelist header */
+	int		agf_ok;		/* set if agf is consistent */
+	xfs_buf_t	*bp;		/* return value */
+	xfs_perag_t	*pag;		/* per allocation group data */
+	int		error;
+
+	ASSERT(agno != NULLAGNUMBER);
+	error = xfs_trans_read_buf(
+			mp, tp, mp->m_ddev_targp,
+			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
+			XFS_FSS_TO_BB(mp, 1),
+			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U,
+			&bp);
+	if (error)
+		return error;
+	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
+	if (!bp) {
+		*bpp = NULL;
+		return 0;
+	}
+	/*
+	 * Validate the magic number of the agf block.
+	 */
+	agf = XFS_BUF_TO_AGF(bp);
+	agf_ok =
+		INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC &&
+		XFS_AGF_GOOD_VERSION(
+			INT_GET(agf->agf_versionnum, ARCH_CONVERT)) &&
+		INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
+				INT_GET(agf->agf_length, ARCH_CONVERT) &&
+		INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
+		INT_GET(agf->agf_fllast,  ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
+		INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp);
+	if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
+			XFS_RANDOM_ALLOC_READ_AGF))) {
+		XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
+				     XFS_ERRLEVEL_LOW, mp, agf);
+		xfs_trans_brelse(tp, bp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	pag = &mp->m_perag[agno];
+	if (!pag->pagf_init) {
+		pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT);
+		pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT);
+		pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
+		pag->pagf_levels[XFS_BTNUM_BNOi] =
+			INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT);
+		pag->pagf_levels[XFS_BTNUM_CNTi] =
+			INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT);
+		spinlock_init(&pag->pagb_lock, "xfspagb");
+		pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
+					sizeof(xfs_perag_busy_t), KM_SLEEP);
+		pag->pagf_init = 1;
+	}
+#ifdef DEBUG
+	else if (!XFS_FORCED_SHUTDOWN(mp)) {
+		ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT));
+		ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT));
+		ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT));
+		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
+		       INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT));
+		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
+		       INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT));
+	}
+#endif
+	XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Allocate an extent (variable-size).
+ * Depending on the allocation type, we either look in a single allocation
+ * group or loop over the allocation groups to find the result.
+ */
+int				/* error */
+xfs_alloc_vextent(
+	xfs_alloc_arg_t	*args)	/* allocation argument structure */
+{
+	xfs_agblock_t	agsize;	/* allocation group size */
+	int		error;
+	int		flags;	/* XFS_ALLOC_FLAG_... locking flags */
+#ifdef XFS_ALLOC_TRACE
+	static char	fname[] = "xfs_alloc_vextent";
+#endif
+	xfs_extlen_t	minleft;/* minimum left value, temp copy */
+	xfs_mount_t	*mp;	/* mount structure pointer */
+	xfs_agnumber_t	sagno;	/* starting allocation group number */
+	xfs_alloctype_t	type;	/* input allocation type */
+	int		bump_rotor = 0;
+	int		no_min = 0;
+	xfs_agnumber_t	rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+
+	mp = args->mp;
+	type = args->otype = args->type;
+	args->agbno = NULLAGBLOCK;
+	/*
+	 * Just fix this up, for the case where the last a.g. is shorter
+	 * (or there's only one a.g.) and the caller couldn't easily figure
+	 * that out (xfs_bmap_alloc).
+	 */
+	agsize = mp->m_sb.sb_agblocks;
+	if (args->maxlen > agsize)
+		args->maxlen = agsize;
+	if (args->alignment == 0)
+		args->alignment = 1;
+	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
+	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
+	ASSERT(args->minlen <= args->maxlen);
+	ASSERT(args->minlen <= agsize);
+	ASSERT(args->mod < args->prod);
+	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
+	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
+	    args->minlen > args->maxlen || args->minlen > agsize ||
+	    args->mod >= args->prod) {
+		args->fsbno = NULLFSBLOCK;
+		TRACE_ALLOC("badargs", args);
+		return 0;
+	}
+	minleft = args->minleft;
+
+	switch (type) {
+	case XFS_ALLOCTYPE_THIS_AG:
+	case XFS_ALLOCTYPE_NEAR_BNO:
+	case XFS_ALLOCTYPE_THIS_BNO:
+		/*
+		 * These three force us into a single a.g.
+		 */
+		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+		down_read(&mp->m_peraglock);
+		args->pag = &mp->m_perag[args->agno];
+		args->minleft = 0;
+		error = xfs_alloc_fix_freelist(args, 0);
+		args->minleft = minleft;
+		if (error) {
+			TRACE_ALLOC("nofix", args);
+			goto error0;
+		}
+		if (!args->agbp) {
+			up_read(&mp->m_peraglock);
+			TRACE_ALLOC("noagbp", args);
+			break;
+		}
+		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
+		if ((error = xfs_alloc_ag_vextent(args)))
+			goto error0;
+		up_read(&mp->m_peraglock);
+		break;
+	case XFS_ALLOCTYPE_START_BNO:
+		/*
+		 * Try near allocation first, then anywhere-in-ag after
+		 * the first a.g. fails.
+		 */
+		if ((args->userdata  == XFS_ALLOC_INITIAL_USER_DATA) &&
+		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
+			args->fsbno = XFS_AGB_TO_FSB(mp,
+					((mp->m_agfrotor / rotorstep) %
+					mp->m_sb.sb_agcount), 0);
+			bump_rotor = 1;
+		}
+		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
+		args->type = XFS_ALLOCTYPE_NEAR_BNO;
+		/* FALLTHROUGH */
+	case XFS_ALLOCTYPE_ANY_AG:
+	case XFS_ALLOCTYPE_START_AG:
+	case XFS_ALLOCTYPE_FIRST_AG:
+		/*
+		 * Rotate through the allocation groups looking for a winner.
+		 */
+		if (type == XFS_ALLOCTYPE_ANY_AG) {
+			/*
+			 * Start with the last place we left off.
+			 */
+			args->agno = sagno = (mp->m_agfrotor / rotorstep) %
+					mp->m_sb.sb_agcount;
+			args->type = XFS_ALLOCTYPE_THIS_AG;
+			flags = XFS_ALLOC_FLAG_TRYLOCK;
+		} else if (type == XFS_ALLOCTYPE_FIRST_AG) {
+			/*
+			 * Start with allocation group given by bno.
+			 */
+			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+			args->type = XFS_ALLOCTYPE_THIS_AG;
+			sagno = 0;
+			flags = 0;
+		} else {
+			if (type == XFS_ALLOCTYPE_START_AG)
+				args->type = XFS_ALLOCTYPE_THIS_AG;
+			/*
+			 * Start with the given allocation group.
+			 */
+			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+			flags = XFS_ALLOC_FLAG_TRYLOCK;
+		}
+		/*
+		 * Loop over allocation groups twice; first time with
+		 * trylock set, second time without.
+		 */
+		down_read(&mp->m_peraglock);
+		for (;;) {
+			args->pag = &mp->m_perag[args->agno];
+			if (no_min) args->minleft = 0;
+			error = xfs_alloc_fix_freelist(args, flags);
+			args->minleft = minleft;
+			if (error) {
+				TRACE_ALLOC("nofix", args);
+				goto error0;
+			}
+			/*
+			 * If we get a buffer back then the allocation will fly.
+			 */
+			if (args->agbp) {
+				if ((error = xfs_alloc_ag_vextent(args)))
+					goto error0;
+				break;
+			}
+			TRACE_ALLOC("loopfailed", args);
+			/*
+			 * Didn't work, figure out the next iteration.
+			 */
+			if (args->agno == sagno &&
+			    type == XFS_ALLOCTYPE_START_BNO)
+				args->type = XFS_ALLOCTYPE_THIS_AG;
+			if (++(args->agno) == mp->m_sb.sb_agcount)
+				args->agno = 0;
+			/*
+			 * Reached the starting a.g., must either be done
+			 * or switch to non-trylock mode.
+			 */
+			if (args->agno == sagno) {
+				if (no_min == 1) {
+					args->agbno = NULLAGBLOCK;
+					TRACE_ALLOC("allfailed", args);
+					break;
+				}
+				if (flags == 0) {
+					no_min = 1;
+				} else {
+					flags = 0;
+					if (type == XFS_ALLOCTYPE_START_BNO) {
+						args->agbno = XFS_FSB_TO_AGBNO(mp,
+							args->fsbno);
+						args->type = XFS_ALLOCTYPE_NEAR_BNO;
+					}
+				}
+			}
+		}
+		up_read(&mp->m_peraglock);
+		if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
+			if (args->agno == sagno)
+				mp->m_agfrotor = (mp->m_agfrotor + 1) %
+					(mp->m_sb.sb_agcount * rotorstep);
+			else
+				mp->m_agfrotor = (args->agno * rotorstep + 1) %
+					(mp->m_sb.sb_agcount * rotorstep);
+		}
+		break;
+	default:
+		ASSERT(0);
+		/* NOTREACHED */
+	}
+	if (args->agbno == NULLAGBLOCK)
+		args->fsbno = NULLFSBLOCK;
+	else {
+		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
+#ifdef DEBUG
+		ASSERT(args->len >= args->minlen);
+		ASSERT(args->len <= args->maxlen);
+		ASSERT(args->agbno % args->alignment == 0);
+		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
+			args->len);
+#endif
+	}
+	return 0;
+error0:
+	up_read(&mp->m_peraglock);
+	return error;
+}
+
+/*
+ * Free an extent.
+ * Just break up the extent address and hand off to xfs_free_ag_extent
+ * after fixing up the freelist.
+ */
+int				/* error */
+xfs_free_extent(
+	xfs_trans_t	*tp,	/* transaction pointer */
+	xfs_fsblock_t	bno,	/* starting block number of extent */
+	xfs_extlen_t	len)	/* length of extent */
+{
+#ifdef DEBUG
+	xfs_agf_t	*agf;	/* a.g. freespace header */
+#endif
+	xfs_alloc_arg_t	args;	/* allocation argument structure */
+	int		error;
+
+	ASSERT(len != 0);
+	args.tp = tp;
+	args.mp = tp->t_mountp;
+	args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
+	ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+	args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+	args.alignment = 1;
+	args.minlen = args.minleft = args.minalignslop = 0;
+	down_read(&args.mp->m_peraglock);
+	args.pag = &args.mp->m_perag[args.agno];
+	if ((error = xfs_alloc_fix_freelist(&args, 0)))
+		goto error0;
+#ifdef DEBUG
+	ASSERT(args.agbp != NULL);
+	agf = XFS_BUF_TO_AGF(args.agbp);
+	ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT));
+#endif
+	error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
+		len, 0);
+error0:
+	up_read(&args.mp->m_peraglock);
+	return error;
+}
+
+
+/*
+ * AG Busy list management
+ * The busy list contains block ranges that have been freed but whose
+ * transacations have not yet hit disk.  If any block listed in a busy
+ * list is reused, the transaction that freed it must be forced to disk
+ * before continuing to use the block.
+ *
+ * xfs_alloc_mark_busy - add to the per-ag busy list
+ * xfs_alloc_clear_busy - remove an item from the per-ag busy list
+ */
+void
+xfs_alloc_mark_busy(xfs_trans_t *tp,
+		    xfs_agnumber_t agno,
+		    xfs_agblock_t bno,
+		    xfs_extlen_t len)
+{
+	xfs_mount_t		*mp;
+	xfs_perag_busy_t	*bsy;
+	int			n;
+	SPLDECL(s);
+
+	mp = tp->t_mountp;
+	s = mutex_spinlock(&mp->m_perag[agno].pagb_lock);
+
+	/* search pagb_list for an open slot */
+	for (bsy = mp->m_perag[agno].pagb_list, n = 0;
+	     n < XFS_PAGB_NUM_SLOTS;
+	     bsy++, n++) {
+		if (bsy->busy_tp == NULL) {
+			break;
+		}
+	}
+
+	if (n < XFS_PAGB_NUM_SLOTS) {
+		bsy = &mp->m_perag[agno].pagb_list[n];
+		mp->m_perag[agno].pagb_count++;
+		TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
+		bsy->busy_start = bno;
+		bsy->busy_length = len;
+		bsy->busy_tp = tp;
+		xfs_trans_add_busy(tp, agno, n);
+	} else {
+		TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
+		/*
+		 * The busy list is full!  Since it is now not possible to
+		 * track the free block, make this a synchronous transaction
+		 * to insure that the block is not reused before this
+		 * transaction commits.
+		 */
+		xfs_trans_set_sync(tp);
+	}
+
+	mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s);
+}
+
+void
+xfs_alloc_clear_busy(xfs_trans_t *tp,
+		     xfs_agnumber_t agno,
+		     int idx)
+{
+	xfs_mount_t		*mp;
+	xfs_perag_busy_t	*list;
+	SPLDECL(s);
+
+	mp = tp->t_mountp;
+
+	s = mutex_spinlock(&mp->m_perag[agno].pagb_lock);
+	list = mp->m_perag[agno].pagb_list;
+
+	ASSERT(idx < XFS_PAGB_NUM_SLOTS);
+	if (list[idx].busy_tp == tp) {
+		TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
+		list[idx].busy_tp = NULL;
+		mp->m_perag[agno].pagb_count--;
+	} else {
+		TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
+	}
+
+	mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s);
+}
+
+
+/*
+ * returns non-zero if any of (agno,bno):len is in a busy list
+ */
+int
+xfs_alloc_search_busy(xfs_trans_t *tp,
+		    xfs_agnumber_t agno,
+		    xfs_agblock_t bno,
+		    xfs_extlen_t len)
+{
+	xfs_mount_t		*mp;
+	xfs_perag_busy_t	*bsy;
+	int			n;
+	xfs_agblock_t		uend, bend;
+	xfs_lsn_t		lsn;
+	int			cnt;
+	SPLDECL(s);
+
+	mp = tp->t_mountp;
+
+	s = mutex_spinlock(&mp->m_perag[agno].pagb_lock);
+	cnt = mp->m_perag[agno].pagb_count;
+
+	uend = bno + len - 1;
+
+	/* search pagb_list for this slot, skipping open slots */
+	for (bsy = mp->m_perag[agno].pagb_list, n = 0;
+	     cnt; bsy++, n++) {
+
+		/*
+		 * (start1,length1) within (start2, length2)
+		 */
+		if (bsy->busy_tp != NULL) {
+			bend = bsy->busy_start + bsy->busy_length - 1;
+			if ((bno > bend) ||
+			    (uend < bsy->busy_start)) {
+				cnt--;
+			} else {
+				TRACE_BUSYSEARCH("xfs_alloc_search_busy",
+						 "found1", agno, bno, len, n,
+						 tp);
+				break;
+			}
+		}
+	}
+
+	/*
+	 * If a block was found, force the log through the LSN of the
+	 * transaction that freed the block
+	 */
+	if (cnt) {
+		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp);
+		lsn = bsy->busy_tp->t_commit_lsn;
+		mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s);
+		xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
+	} else {
+		TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp);
+		n = -1;
+		mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s);
+	}
+
+	return n;
+}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
new file mode 100644
index 0000000..72329c8
--- /dev/null
+++ b/fs/xfs/xfs_alloc.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ALLOC_H__
+#define	__XFS_ALLOC_H__
+
+struct xfs_buf;
+struct xfs_mount;
+struct xfs_perag;
+struct xfs_trans;
+
+/*
+ * Freespace allocation types.  Argument to xfs_alloc_[v]extent.
+ */
+typedef enum xfs_alloctype
+{
+	XFS_ALLOCTYPE_ANY_AG,		/* allocate anywhere, use rotor */
+	XFS_ALLOCTYPE_FIRST_AG,		/* ... start at ag 0 */
+	XFS_ALLOCTYPE_START_AG,		/* anywhere, start in this a.g. */
+	XFS_ALLOCTYPE_THIS_AG,		/* anywhere in this a.g. */
+	XFS_ALLOCTYPE_START_BNO,	/* near this block else anywhere */
+	XFS_ALLOCTYPE_NEAR_BNO,		/* in this a.g. and near this block */
+	XFS_ALLOCTYPE_THIS_BNO		/* at exactly this block */
+} xfs_alloctype_t;
+
+/*
+ * Flags for xfs_alloc_fix_freelist.
+ */
+#define	XFS_ALLOC_FLAG_TRYLOCK	0x00000001  /* use trylock for buffer locking */
+
+/*
+ * Argument structure for xfs_alloc routines.
+ * This is turned into a structure to avoid having 20 arguments passed
+ * down several levels of the stack.
+ */
+typedef struct xfs_alloc_arg {
+	struct xfs_trans *tp;		/* transaction pointer */
+	struct xfs_mount *mp;		/* file system mount point */
+	struct xfs_buf	*agbp;		/* buffer for a.g. freelist header */
+	struct xfs_perag *pag;		/* per-ag struct for this agno */
+	xfs_fsblock_t	fsbno;		/* file system block number */
+	xfs_agnumber_t	agno;		/* allocation group number */
+	xfs_agblock_t	agbno;		/* allocation group-relative block # */
+	xfs_extlen_t	minlen;		/* minimum size of extent */
+	xfs_extlen_t	maxlen;		/* maximum size of extent */
+	xfs_extlen_t	mod;		/* mod value for extent size */
+	xfs_extlen_t	prod;		/* prod value for extent size */
+	xfs_extlen_t	minleft;	/* min blocks must be left after us */
+	xfs_extlen_t	total;		/* total blocks needed in xaction */
+	xfs_extlen_t	alignment;	/* align answer to multiple of this */
+	xfs_extlen_t	minalignslop;	/* slop for minlen+alignment calcs */
+	xfs_extlen_t	len;		/* output: actual size of extent */
+	xfs_alloctype_t	type;		/* allocation type XFS_ALLOCTYPE_... */
+	xfs_alloctype_t	otype;		/* original allocation type */
+	char		wasdel;		/* set if allocation was prev delayed */
+	char		wasfromfl;	/* set if allocation is from freelist */
+	char		isfl;		/* set if is freelist blocks - !actg */
+	char		userdata;	/* set if this is user data */
+} xfs_alloc_arg_t;
+
+/*
+ * Defines for userdata
+ */
+#define XFS_ALLOC_USERDATA		1	/* allocation is for user data*/
+#define XFS_ALLOC_INITIAL_USER_DATA	2	/* special case start of file */
+
+
+#ifdef __KERNEL__
+
+#if defined(XFS_ALLOC_TRACE)
+/*
+ * Allocation tracing buffer size.
+ */
+#define	XFS_ALLOC_TRACE_SIZE	4096
+extern ktrace_t *xfs_alloc_trace_buf;
+
+/*
+ * Types for alloc tracing.
+ */
+#define	XFS_ALLOC_KTRACE_ALLOC	1
+#define	XFS_ALLOC_KTRACE_FREE	2
+#define	XFS_ALLOC_KTRACE_MODAGF	3
+#define	XFS_ALLOC_KTRACE_BUSY	4
+#define	XFS_ALLOC_KTRACE_UNBUSY	5
+#define	XFS_ALLOC_KTRACE_BUSYSEARCH	6
+#endif
+
+/*
+ * Compute and fill in value of m_ag_maxlevels.
+ */
+void
+xfs_alloc_compute_maxlevels(
+	struct xfs_mount	*mp);	/* file system mount structure */
+
+/*
+ * Get a block from the freelist.
+ * Returns with the buffer for the block gotten.
+ */
+int				/* error */
+xfs_alloc_get_freelist(
+	struct xfs_trans *tp,	/* transaction pointer */
+	struct xfs_buf	*agbp,	/* buffer containing the agf structure */
+	xfs_agblock_t	*bnop);	/* block address retrieved from freelist */
+
+/*
+ * Log the given fields from the agf structure.
+ */
+void
+xfs_alloc_log_agf(
+	struct xfs_trans *tp,	/* transaction pointer */
+	struct xfs_buf	*bp,	/* buffer for a.g. freelist header */
+	int		fields);/* mask of fields to be logged (XFS_AGF_...) */
+
+/*
+ * Interface for inode allocation to force the pag data to be initialized.
+ */
+int				/* error */
+xfs_alloc_pagf_init(
+	struct xfs_mount *mp,	/* file system mount structure */
+	struct xfs_trans *tp,	/* transaction pointer */
+	xfs_agnumber_t	agno,	/* allocation group number */
+	int		flags);	/* XFS_ALLOC_FLAGS_... */
+
+/*
+ * Put the block on the freelist for the allocation group.
+ */
+int				/* error */
+xfs_alloc_put_freelist(
+	struct xfs_trans *tp,	/* transaction pointer */
+	struct xfs_buf	*agbp,	/* buffer for a.g. freelist header */
+	struct xfs_buf	*agflbp,/* buffer for a.g. free block array */
+	xfs_agblock_t	bno);	/* block being freed */
+
+/*
+ * Read in the allocation group header (free/alloc section).
+ */
+int					/* error  */
+xfs_alloc_read_agf(
+	struct xfs_mount *mp,		/* mount point structure */
+	struct xfs_trans *tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	int		flags,		/* XFS_ALLOC_FLAG_... */
+	struct xfs_buf	**bpp);		/* buffer for the ag freelist header */
+
+/*
+ * Allocate an extent (variable-size).
+ */
+int				/* error */
+xfs_alloc_vextent(
+	xfs_alloc_arg_t	*args);	/* allocation argument structure */
+
+/*
+ * Free an extent.
+ */
+int				/* error */
+xfs_free_extent(
+	struct xfs_trans *tp,	/* transaction pointer */
+	xfs_fsblock_t	bno,	/* starting block number of extent */
+	xfs_extlen_t	len);	/* length of extent */
+
+void
+xfs_alloc_mark_busy(xfs_trans_t *tp,
+		xfs_agnumber_t agno,
+		xfs_agblock_t bno,
+		xfs_extlen_t len);
+
+void
+xfs_alloc_clear_busy(xfs_trans_t *tp,
+		xfs_agnumber_t ag,
+		int idx);
+
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
new file mode 100644
index 0000000..e0355a1
--- /dev/null
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -0,0 +1,2204 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Free space allocation for XFS.
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+
+/*
+ * Prototypes for internal functions.
+ */
+
+STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
+STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
+STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
+		xfs_alloc_key_t *, xfs_btree_cur_t **, int *);
+STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int);
+
+/*
+ * Internal functions.
+ */
+
+/*
+ * Single level of the xfs_alloc_delete record deletion routine.
+ * Delete record pointed to by cur/level.
+ * Remove the record from its block then rebalance the tree.
+ * Return 0 for error, 1 for done, 2 to go on to the next level.
+ */
+STATIC int				/* error */
+xfs_alloc_delrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level removing record from */
+	int			*stat)	/* fail/done/go-on */
+{
+	xfs_agf_t		*agf;	/* allocation group freelist header */
+	xfs_alloc_block_t	*block;	/* btree block record/key lives in */
+	xfs_agblock_t		bno;	/* btree block number */
+	xfs_buf_t		*bp;	/* buffer for block */
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_alloc_key_t		key;	/* kp points here if block is level 0 */
+	xfs_agblock_t		lbno;	/* left block's block number */
+	xfs_buf_t		*lbp;	/* left block's buffer pointer */
+	xfs_alloc_block_t	*left;	/* left btree block */
+	xfs_alloc_key_t		*lkp=NULL;	/* left block key pointer */
+	xfs_alloc_ptr_t		*lpp=NULL;	/* left block address pointer */
+	int			lrecs=0;	/* number of records in left block */
+	xfs_alloc_rec_t		*lrp;	/* left block record pointer */
+	xfs_mount_t		*mp;	/* mount structure */
+	int			ptr;	/* index in btree block for this rec */
+	xfs_agblock_t		rbno;	/* right block's block number */
+	xfs_buf_t		*rbp;	/* right block's buffer pointer */
+	xfs_alloc_block_t	*right;	/* right btree block */
+	xfs_alloc_key_t		*rkp;	/* right block key pointer */
+	xfs_alloc_ptr_t		*rpp;	/* right block address pointer */
+	int			rrecs=0;	/* number of records in right block */
+	xfs_alloc_rec_t		*rrp;	/* right block record pointer */
+	xfs_btree_cur_t		*tcur;	/* temporary btree cursor */
+
+	/*
+	 * Get the index of the entry being deleted, check for nothing there.
+	 */
+	ptr = cur->bc_ptrs[level];
+	if (ptr == 0) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Get the buffer & block containing the record or key/ptr.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+#endif
+	/*
+	 * Fail if we're off the end of the block.
+	 */
+	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		*stat = 0;
+		return 0;
+	}
+	XFS_STATS_INC(xs_abt_delrec);
+	/*
+	 * It's a nonleaf.  Excise the key and ptr being deleted, by
+	 * sliding the entries past them down one.
+	 * Log the changed areas of the block.
+	 */
+	if (level > 0) {
+		lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
+		lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+			memmove(&lkp[ptr - 1], &lkp[ptr],
+				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */
+			memmove(&lpp[ptr - 1], &lpp[ptr],
+				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */
+			xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
+			xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
+		}
+	}
+	/*
+	 * It's a leaf.  Excise the record being deleted, by sliding the
+	 * entries past it down one.  Log the changed areas of the block.
+	 */
+	else {
+		lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
+		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+			memmove(&lrp[ptr - 1], &lrp[ptr],
+				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp));
+			xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
+		}
+		/*
+		 * If it's the first record in the block, we'll need a key
+		 * structure to pass up to the next level (updkey).
+		 */
+		if (ptr == 1) {
+			key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */
+			key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */
+			lkp = &key;
+		}
+	}
+	/*
+	 * Decrement and log the number of entries in the block.
+	 */
+	INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
+	/*
+	 * See if the longest free extent in the allocation group was
+	 * changed by this operation.  True if it's the by-size btree, and
+	 * this is the leaf level, and there is no right sibling block,
+	 * and this was the last record.
+	 */
+	agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+	mp = cur->bc_mp;
+
+	if (level == 0 &&
+	    cur->bc_btnum == XFS_BTNUM_CNT &&
+	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
+	    ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1);
+		/*
+		 * There are still records in the block.  Grab the size
+		 * from the last one.
+		 */
+		if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+			rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur);
+			INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT);
+		}
+		/*
+		 * No free extents left.
+		 */
+		else
+			agf->agf_longest = 0;
+		mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest =
+			INT_GET(agf->agf_longest, ARCH_CONVERT);
+		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
+			XFS_AGF_LONGEST);
+	}
+	/*
+	 * Is this the root level?  If so, we're almost done.
+	 */
+	if (level == cur->bc_nlevels - 1) {
+		/*
+		 * If this is the root level,
+		 * and there's only one entry left,
+		 * and it's NOT the leaf level,
+		 * then we can get rid of this level.
+		 */
+		if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) {
+			/*
+			 * lpp is still set to the first pointer in the block.
+			 * Make it the new root of the btree.
+			 */
+			bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT);
+			INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT);
+			INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1);
+			mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--;
+			/*
+			 * Put this buffer/block on the ag's freelist.
+			 */
+			if ((error = xfs_alloc_put_freelist(cur->bc_tp,
+					cur->bc_private.a.agbp, NULL, bno)))
+				return error;
+			/*
+			 * Since blocks move to the free list without the
+			 * coordination used in xfs_bmap_finish, we can't allow
+			 * block to be available for reallocation and
+			 * non-transaction writing (user data) until we know
+			 * that the transaction that moved it to the free list
+			 * is permanently on disk. We track the blocks by
+			 * declaring these blocks as "busy"; the busy list is
+			 * maintained on a per-ag basis and each transaction
+			 * records which entries should be removed when the
+			 * iclog commits to disk. If a busy block is
+			 * allocated, the iclog is pushed up to the LSN
+			 * that freed the block.
+			 */
+			xfs_alloc_mark_busy(cur->bc_tp,
+				INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
+
+			xfs_trans_agbtree_delta(cur->bc_tp, -1);
+			xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
+				XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+			/*
+			 * Update the cursor so there's one fewer level.
+			 */
+			xfs_btree_setbuf(cur, level, NULL);
+			cur->bc_nlevels--;
+		} else if (level > 0 &&
+			   (error = xfs_alloc_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * If we deleted the leftmost entry in the block, update the
+	 * key values above us in the tree.
+	 */
+	if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1)))
+		return error;
+	/*
+	 * If the number of records remaining in the block is at least
+	 * the minimum, we're done.
+	 */
+	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
+		if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * Otherwise, we have to move some records around to keep the
+	 * tree balanced.  Look at the left and right sibling blocks to
+	 * see if we can re-balance by moving only one record.
+	 */
+	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	bno = NULLAGBLOCK;
+	ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
+	/*
+	 * Duplicate the cursor so our btree manipulations here won't
+	 * disrupt the next level up.
+	 */
+	if ((error = xfs_btree_dup_cursor(cur, &tcur)))
+		return error;
+	/*
+	 * If there's a right sibling, see if it's ok to shift an entry
+	 * out of it.
+	 */
+	if (rbno != NULLAGBLOCK) {
+		/*
+		 * Move the temp cursor to the last entry in the next block.
+		 * Actually any entry but the first would suffice.
+		 */
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_increment(tcur, level, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Grab a pointer to the block.
+		 */
+		rbp = tcur->bc_bufs[level];
+		right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+			goto error0;
+#endif
+		/*
+		 * Grab the current block number, for future use.
+		 */
+		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		/*
+		 * If right block is full enough so that removing one entry
+		 * won't make it too empty, and left-shifting an entry out
+		 * of right to us works, we're done.
+		 */
+		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		     XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
+			if ((error = xfs_alloc_lshift(tcur, level, &i)))
+				goto error0;
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_ALLOC_BLOCK_MINRECS(level, cur));
+				xfs_btree_del_cursor(tcur,
+						     XFS_BTREE_NOERROR);
+				if (level > 0 &&
+				    (error = xfs_alloc_decrement(cur, level,
+					    &i)))
+					return error;
+				*stat = 1;
+				return 0;
+			}
+		}
+		/*
+		 * Otherwise, grab the number of records in right for
+		 * future reference, and fix up the temp cursor to point
+		 * to our block again (last record).
+		 */
+		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		if (lbno != NULLAGBLOCK) {
+			i = xfs_btree_firstrec(tcur, level);
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if ((error = xfs_alloc_decrement(tcur, level, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		}
+	}
+	/*
+	 * If there's a left sibling, see if it's ok to shift an entry
+	 * out of it.
+	 */
+	if (lbno != NULLAGBLOCK) {
+		/*
+		 * Move the temp cursor to the first entry in the
+		 * previous block.
+		 */
+		i = xfs_btree_firstrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_alloc_decrement(tcur, level, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		xfs_btree_firstrec(tcur, level);
+		/*
+		 * Grab a pointer to the block.
+		 */
+		lbp = tcur->bc_bufs[level];
+		left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+			goto error0;
+#endif
+		/*
+		 * Grab the current block number, for future use.
+		 */
+		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		/*
+		 * If left block is full enough so that removing one entry
+		 * won't make it too empty, and right-shifting an entry out
+		 * of left to us works, we're done.
+		 */
+		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		     XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
+			if ((error = xfs_alloc_rshift(tcur, level, &i)))
+				goto error0;
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_ALLOC_BLOCK_MINRECS(level, cur));
+				xfs_btree_del_cursor(tcur,
+						     XFS_BTREE_NOERROR);
+				if (level == 0)
+					cur->bc_ptrs[0]++;
+				*stat = 1;
+				return 0;
+			}
+		}
+		/*
+		 * Otherwise, grab the number of records in right for
+		 * future reference.
+		 */
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	/*
+	 * Delete the temp cursor, we're done with it.
+	 */
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	/*
+	 * If here, we need to do a join to keep the tree balanced.
+	 */
+	ASSERT(bno != NULLAGBLOCK);
+	/*
+	 * See if we can join with the left neighbor block.
+	 */
+	if (lbno != NULLAGBLOCK &&
+	    lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * Set "right" to be the starting block,
+		 * "left" to be the left neighbor.
+		 */
+		rbno = bno;
+		right = block;
+		rbp = bp;
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.a.agno, lbno, 0, &lbp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+		if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+			return error;
+	}
+	/*
+	 * If that won't work, see if we can join with the right neighbor block.
+	 */
+	else if (rbno != NULLAGBLOCK &&
+		 rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		  XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * Set "left" to be the starting block,
+		 * "right" to be the right neighbor.
+		 */
+		lbno = bno;
+		left = block;
+		lbp = bp;
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.a.agno, rbno, 0, &rbp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+		if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+			return error;
+	}
+	/*
+	 * Otherwise, we can't fix the imbalance.
+	 * Just return.  This is probably a logic error, but it's not fatal.
+	 */
+	else {
+		if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * We're now going to join "left" and "right" by moving all the stuff
+	 * in "right" to "left" and deleting "right".
+	 */
+	if (level > 0) {
+		/*
+		 * It's a non-leaf.  Move keys and pointers.
+		 */
+		lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
+		lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
+		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
+		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */
+		memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */
+		xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
+				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
+				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	} else {
+		/*
+		 * It's a leaf.  Move records.
+		 */
+		lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
+		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
+		memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
+		xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
+				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	}
+	/*
+	 * If we joined with the left neighbor, set the buffer in the
+	 * cursor to the left block, and fix up the index.
+	 */
+	if (bp != lbp) {
+		xfs_btree_setbuf(cur, level, lbp);
+		cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	/*
+	 * If we joined with the right neighbor and there's a level above
+	 * us, increment the cursor at that level.
+	 */
+	else if (level + 1 < cur->bc_nlevels &&
+		 (error = xfs_alloc_increment(cur, level + 1, &i)))
+		return error;
+	/*
+	 * Fix up the number of records in the surviving block.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	/*
+	 * Fix up the right block pointer in the surviving block, and log it.
+	 */
+	left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */
+	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+	/*
+	 * If there is a right sibling now, make it point to the
+	 * remaining block.
+	 */
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		xfs_alloc_block_t	*rrblock;
+		xfs_buf_t		*rrbp;
+
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+				&rrbp, XFS_ALLOC_BTREE_REF)))
+			return error;
+		rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
+		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
+			return error;
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
+	}
+	/*
+	 * Free the deleting block by putting it on the freelist.
+	 */
+	if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+			NULL, rbno)))
+		return error;
+	/*
+	 * Since blocks move to the free list without the coordination
+	 * used in xfs_bmap_finish, we can't allow block to be available
+	 * for reallocation and non-transaction writing (user data)
+	 * until we know that the transaction that moved it to the free
+	 * list is permanently on disk. We track the blocks by declaring
+	 * these blocks as "busy"; the busy list is maintained on a
+	 * per-ag basis and each transaction records which entries
+	 * should be removed when the iclog commits to disk. If a
+	 * busy block is allocated, the iclog is pushed up to the
+	 * LSN that freed the block.
+	 */
+	xfs_alloc_mark_busy(cur->bc_tp,
+		INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
+
+	xfs_trans_agbtree_delta(cur->bc_tp, -1);
+	/*
+	 * Adjust the current level's cursor so that we're left referring
+	 * to the right node, after we're done.
+	 * If this leaves the ptr value 0 our caller will fix it up.
+	 */
+	if (level > 0)
+		cur->bc_ptrs[level]--;
+	/*
+	 * Return value means the next level up has something to do.
+	 */
+	*stat = 2;
+	return 0;
+
+error0:
+	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Insert one record/level.  Return information to the caller
+ * allowing the next level up to proceed if necessary.
+ */
+STATIC int				/* error */
+xfs_alloc_insrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to insert record at */
+	xfs_agblock_t		*bnop,	/* i/o: block number inserted */
+	xfs_alloc_rec_t		*recp,	/* i/o: record data inserted */
+	xfs_btree_cur_t		**curp,	/* output: new cursor replacing cur */
+	int			*stat)	/* output: success/failure */
+{
+	xfs_agf_t		*agf;	/* allocation group freelist header */
+	xfs_alloc_block_t	*block;	/* btree block record/key lives in */
+	xfs_buf_t		*bp;	/* buffer for block */
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_alloc_key_t		key;	/* key value being inserted */
+	xfs_alloc_key_t		*kp;	/* pointer to btree keys */
+	xfs_agblock_t		nbno;	/* block number of allocated block */
+	xfs_btree_cur_t		*ncur;	/* new cursor to be used at next lvl */
+	xfs_alloc_key_t		nkey;	/* new key value, from split */
+	xfs_alloc_rec_t		nrec;	/* new record value, for caller */
+	int			optr;	/* old ptr value */
+	xfs_alloc_ptr_t		*pp;	/* pointer to btree addresses */
+	int			ptr;	/* index in btree block for this rec */
+	xfs_alloc_rec_t		*rp;	/* pointer to btree records */
+
+	ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0);
+	/*
+	 * If we made it to the root level, allocate a new root block
+	 * and we're done.
+	 */
+	if (level >= cur->bc_nlevels) {
+		XFS_STATS_INC(xs_abt_insrec);
+		if ((error = xfs_alloc_newroot(cur, &i)))
+			return error;
+		*bnop = NULLAGBLOCK;
+		*stat = i;
+		return 0;
+	}
+	/*
+	 * Make a key out of the record data to be inserted, and save it.
+	 */
+	key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */
+	key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */
+	optr = ptr = cur->bc_ptrs[level];
+	/*
+	 * If we're off the left edge, return failure.
+	 */
+	if (ptr == 0) {
+		*stat = 0;
+		return 0;
+	}
+	XFS_STATS_INC(xs_abt_insrec);
+	/*
+	 * Get pointers to the btree buffer and block.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+	/*
+	 * Check that the new entry is being inserted in the right place.
+	 */
+	if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		if (level == 0) {
+			rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
+			xfs_btree_check_rec(cur->bc_btnum, recp, rp);
+		} else {
+			kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
+			xfs_btree_check_key(cur->bc_btnum, &key, kp);
+		}
+	}
+#endif
+	nbno = NULLAGBLOCK;
+	ncur = (xfs_btree_cur_t *)0;
+	/*
+	 * If the block is full, we can't insert the new entry until we
+	 * make the block un-full.
+	 */
+	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * First, try shifting an entry to the right neighbor.
+		 */
+		if ((error = xfs_alloc_rshift(cur, level, &i)))
+			return error;
+		if (i) {
+			/* nothing */
+		}
+		/*
+		 * Next, try shifting an entry to the left neighbor.
+		 */
+		else {
+			if ((error = xfs_alloc_lshift(cur, level, &i)))
+				return error;
+			if (i)
+				optr = ptr = cur->bc_ptrs[level];
+			else {
+				/*
+				 * Next, try splitting the current block in
+				 * half. If this works we have to re-set our
+				 * variables because we could be in a
+				 * different block now.
+				 */
+				if ((error = xfs_alloc_split(cur, level, &nbno,
+						&nkey, &ncur, &i)))
+					return error;
+				if (i) {
+					bp = cur->bc_bufs[level];
+					block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+					if ((error =
+						xfs_btree_check_sblock(cur,
+							block, level, bp)))
+						return error;
+#endif
+					ptr = cur->bc_ptrs[level];
+					nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */
+					nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */
+				}
+				/*
+				 * Otherwise the insert fails.
+				 */
+				else {
+					*stat = 0;
+					return 0;
+				}
+			}
+		}
+	}
+	/*
+	 * At this point we know there's room for our new entry in the block
+	 * we're pointing at.
+	 */
+	if (level > 0) {
+		/*
+		 * It's a non-leaf entry.  Make a hole for the new data
+		 * in the key and ptr regions of the block.
+		 */
+		kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
+		pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memmove(&kp[ptr], &kp[ptr - 1],
+			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */
+		memmove(&pp[ptr], &pp[ptr - 1],
+			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
+			return error;
+#endif
+		/*
+		 * Now stuff the new data in, bump numrecs and log the new data.
+		 */
+		kp[ptr - 1] = key;
+		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
+		INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1);
+		xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
+		xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
+#ifdef DEBUG
+		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT))
+			xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
+				kp + ptr);
+#endif
+	} else {
+		/*
+		 * It's a leaf entry.  Make a hole for the new record.
+		 */
+		rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
+		memmove(&rp[ptr], &rp[ptr - 1],
+			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp));
+		/*
+		 * Now stuff the new record in, bump numrecs
+		 * and log the new data.
+		 */
+		rp[ptr - 1] = *recp; /* INT_: struct copy */
+		INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1);
+		xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
+#ifdef DEBUG
+		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT))
+			xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
+				rp + ptr);
+#endif
+	}
+	/*
+	 * Log the new number of records in the btree header.
+	 */
+	xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
+	/*
+	 * If we inserted at the start of a block, update the parents' keys.
+	 */
+	if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1)))
+		return error;
+	/*
+	 * Look to see if the longest extent in the allocation group
+	 * needs to be updated.
+	 */
+
+	agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+	if (level == 0 &&
+	    cur->bc_btnum == XFS_BTNUM_CNT &&
+	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
+	    INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) {
+		/*
+		 * If this is a leaf in the by-size btree and there
+		 * is no right sibling block and this block is bigger
+		 * than the previous longest block, update it.
+		 */
+		INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT);
+		cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest
+			= INT_GET(recp->ar_blockcount, ARCH_CONVERT);
+		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
+			XFS_AGF_LONGEST);
+	}
+	/*
+	 * Return the new block number, if any.
+	 * If there is one, give back a record value and a cursor too.
+	 */
+	*bnop = nbno;
+	if (nbno != NULLAGBLOCK) {
+		*recp = nrec; /* INT_: struct copy */
+		*curp = ncur; /* INT_: struct copy */
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Log header fields from a btree block.
+ */
+STATIC void
+xfs_alloc_log_block(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			fields)	/* mask of fields: XFS_BB_... */
+{
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	static const short	offsets[] = {	/* table of offsets */
+		offsetof(xfs_alloc_block_t, bb_magic),
+		offsetof(xfs_alloc_block_t, bb_level),
+		offsetof(xfs_alloc_block_t, bb_numrecs),
+		offsetof(xfs_alloc_block_t, bb_leftsib),
+		offsetof(xfs_alloc_block_t, bb_rightsib),
+		sizeof(xfs_alloc_block_t)
+	};
+
+	xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
+	xfs_trans_log_buf(tp, bp, first, last);
+}
+
+/*
+ * Log keys from a btree block (nonleaf).
+ */
+STATIC void
+xfs_alloc_log_keys(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			kfirst,	/* index of first key to log */
+	int			klast)	/* index of last key to log */
+{
+	xfs_alloc_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	xfs_alloc_key_t		*kp;	/* key pointer in btree block */
+	int			last;	/* last byte offset logged */
+
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+	kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Log block pointer fields from a btree block (nonleaf).
+ */
+STATIC void
+xfs_alloc_log_ptrs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			pfirst,	/* index of first pointer to log */
+	int			plast)	/* index of last pointer to log */
+{
+	xfs_alloc_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	xfs_alloc_ptr_t		*pp;	/* block-pointer pointer in btree blk */
+
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+	pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Log records from a btree block (leaf).
+ */
+STATIC void
+xfs_alloc_log_recs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			rfirst,	/* index of first record to log */
+	int			rlast)	/* index of last record to log */
+{
+	xfs_alloc_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	xfs_alloc_rec_t		*rp;	/* record pointer for btree block */
+
+
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+	rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
+#ifdef DEBUG
+	{
+		xfs_agf_t	*agf;
+		xfs_alloc_rec_t	*p;
+
+		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+		for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
+			ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <=
+			       INT_GET(agf->agf_length, ARCH_CONVERT));
+	}
+#endif
+	first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Lookup the record.  The cursor is made to point to it, based on dir.
+ * Return 0 if can't find any such record, 1 for success.
+ */
+STATIC int				/* error */
+xfs_alloc_lookup(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_lookup_t		dir,	/* <=, ==, or >= */
+	int			*stat)	/* success/failure */
+{
+	xfs_agblock_t		agbno;	/* a.g. relative btree block number */
+	xfs_agnumber_t		agno;	/* allocation group number */
+	xfs_alloc_block_t	*block=NULL;	/* current btree block */
+	int			diff;	/* difference for the current key */
+	int			error;	/* error return value */
+	int			keyno=0;	/* current key number */
+	int			level;	/* level in the btree */
+	xfs_mount_t		*mp;	/* file system mount point */
+
+	XFS_STATS_INC(xs_abt_lookup);
+	/*
+	 * Get the allocation group header, and the root block number.
+	 */
+	mp = cur->bc_mp;
+
+	{
+		xfs_agf_t	*agf;	/* a.g. freespace header */
+
+		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+		agno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
+		agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT);
+	}
+	/*
+	 * Iterate over each level in the btree, starting at the root.
+	 * For each level above the leaves, find the key we need, based
+	 * on the lookup record, then follow the corresponding block
+	 * pointer down to the next level.
+	 */
+	for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+		xfs_buf_t	*bp;	/* buffer pointer for btree block */
+		xfs_daddr_t	d;	/* disk address of btree block */
+
+		/*
+		 * Get the disk address we're looking for.
+		 */
+		d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+		/*
+		 * If the old buffer at this level is for a different block,
+		 * throw it away, otherwise just use it.
+		 */
+		bp = cur->bc_bufs[level];
+		if (bp && XFS_BUF_ADDR(bp) != d)
+			bp = (xfs_buf_t *)0;
+		if (!bp) {
+			/*
+			 * Need to get a new buffer.  Read it, then
+			 * set it in the cursor, releasing the old one.
+			 */
+			if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno,
+					agbno, 0, &bp, XFS_ALLOC_BTREE_REF)))
+				return error;
+			xfs_btree_setbuf(cur, level, bp);
+			/*
+			 * Point to the btree block, now that we have the buffer
+			 */
+			block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+			if ((error = xfs_btree_check_sblock(cur, block, level,
+					bp)))
+				return error;
+		} else
+			block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+		/*
+		 * If we already had a key match at a higher level, we know
+		 * we need to use the first entry in this block.
+		 */
+		if (diff == 0)
+			keyno = 1;
+		/*
+		 * Otherwise we need to search this block.  Do a binary search.
+		 */
+		else {
+			int		high;	/* high entry number */
+			xfs_alloc_key_t	*kkbase=NULL;/* base of keys in block */
+			xfs_alloc_rec_t	*krbase=NULL;/* base of records in block */
+			int		low;	/* low entry number */
+
+			/*
+			 * Get a pointer to keys or records.
+			 */
+			if (level > 0)
+				kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur);
+			else
+				krbase = XFS_ALLOC_REC_ADDR(block, 1, cur);
+			/*
+			 * Set low and high entry numbers, 1-based.
+			 */
+			low = 1;
+			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+				/*
+				 * If the block is empty, the tree must
+				 * be an empty leaf.
+				 */
+				ASSERT(level == 0 && cur->bc_nlevels == 1);
+				cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
+				*stat = 0;
+				return 0;
+			}
+			/*
+			 * Binary search the block.
+			 */
+			while (low <= high) {
+				xfs_extlen_t	blockcount;	/* key value */
+				xfs_agblock_t	startblock;	/* key value */
+
+				XFS_STATS_INC(xs_abt_compare);
+				/*
+				 * keyno is average of low and high.
+				 */
+				keyno = (low + high) >> 1;
+				/*
+				 * Get startblock & blockcount.
+				 */
+				if (level > 0) {
+					xfs_alloc_key_t	*kkp;
+
+					kkp = kkbase + keyno - 1;
+					startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT);
+					blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT);
+				} else {
+					xfs_alloc_rec_t	*krp;
+
+					krp = krbase + keyno - 1;
+					startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT);
+					blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT);
+				}
+				/*
+				 * Compute difference to get next direction.
+				 */
+				if (cur->bc_btnum == XFS_BTNUM_BNO)
+					diff = (int)startblock -
+					       (int)cur->bc_rec.a.ar_startblock;
+				else if (!(diff = (int)blockcount -
+					    (int)cur->bc_rec.a.ar_blockcount))
+					diff = (int)startblock -
+					    (int)cur->bc_rec.a.ar_startblock;
+				/*
+				 * Less than, move right.
+				 */
+				if (diff < 0)
+					low = keyno + 1;
+				/*
+				 * Greater than, move left.
+				 */
+				else if (diff > 0)
+					high = keyno - 1;
+				/*
+				 * Equal, we're done.
+				 */
+				else
+					break;
+			}
+		}
+		/*
+		 * If there are more levels, set up for the next level
+		 * by getting the block number and filling in the cursor.
+		 */
+		if (level > 0) {
+			/*
+			 * If we moved left, need the previous key number,
+			 * unless there isn't one.
+			 */
+			if (diff > 0 && --keyno < 1)
+				keyno = 1;
+			agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT);
+#ifdef DEBUG
+			if ((error = xfs_btree_check_sptr(cur, agbno, level)))
+				return error;
+#endif
+			cur->bc_ptrs[level] = keyno;
+		}
+	}
+	/*
+	 * Done with the search.
+	 * See if we need to adjust the results.
+	 */
+	if (dir != XFS_LOOKUP_LE && diff < 0) {
+		keyno++;
+		/*
+		 * If ge search and we went off the end of the block, but it's
+		 * not the last block, we're in the wrong block.
+		 */
+		if (dir == XFS_LOOKUP_GE &&
+		    keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
+		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			int	i;
+
+			cur->bc_ptrs[0] = keyno;
+			if ((error = xfs_alloc_increment(cur, 0, &i)))
+				return error;
+			XFS_WANT_CORRUPTED_RETURN(i == 1);
+			*stat = 1;
+			return 0;
+		}
+	}
+	else if (dir == XFS_LOOKUP_LE && diff > 0)
+		keyno--;
+	cur->bc_ptrs[0] = keyno;
+	/*
+	 * Return if we succeeded or not.
+	 */
+	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		*stat = 0;
+	else
+		*stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
+	return 0;
+}
+
+/*
+ * Move 1 record left from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int				/* error */
+xfs_alloc_lshift(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to shift record on */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+#ifdef DEBUG
+	int			i;	/* loop index */
+#endif
+	xfs_alloc_key_t		key;	/* key value for leaf level upward */
+	xfs_buf_t		*lbp;	/* buffer for left neighbor block */
+	xfs_alloc_block_t	*left;	/* left neighbor btree block */
+	int			nrec;	/* new number of left block entries */
+	xfs_buf_t		*rbp;	/* buffer for right (current) block */
+	xfs_alloc_block_t	*right;	/* right (current) btree block */
+	xfs_alloc_key_t		*rkp=NULL;	/* key pointer for right block */
+	xfs_alloc_ptr_t		*rpp=NULL;	/* address pointer for right block */
+	xfs_alloc_rec_t		*rrp=NULL;	/* record pointer for right block */
+
+	/*
+	 * Set up variables for this block as "right".
+	 */
+	rbp = cur->bc_bufs[level];
+	right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+		return error;
+#endif
+	/*
+	 * If we've got no left sibling then we can't shift an entry left.
+	 */
+	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * If the cursor entry is the one that would be moved, don't
+	 * do it... it's too complicated.
+	 */
+	if (cur->bc_ptrs[level] <= 1) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Set up the left neighbor as "left".
+	 */
+	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+			cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp,
+			XFS_ALLOC_BTREE_REF)))
+		return error;
+	left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+	/*
+	 * If it's full, it can't take another entry.
+	 */
+	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+		*stat = 0;
+		return 0;
+	}
+	nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	/*
+	 * If non-leaf, copy a key and a ptr to the left block.
+	 */
+	if (level > 0) {
+		xfs_alloc_key_t	*lkp;	/* key pointer for left block */
+		xfs_alloc_ptr_t	*lpp;	/* address pointer for left block */
+
+		lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur);
+		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
+		*lkp = *rkp;
+		xfs_alloc_log_keys(cur, lbp, nrec, nrec);
+		lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur);
+		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level)))
+			return error;
+#endif
+		*lpp = *rpp; /* INT_: copy */
+		xfs_alloc_log_ptrs(cur, lbp, nrec, nrec);
+		xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
+	}
+	/*
+	 * If leaf, copy a record to the left block.
+	 */
+	else {
+		xfs_alloc_rec_t	*lrp;	/* record pointer for left block */
+
+		lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur);
+		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
+		*lrp = *rrp;
+		xfs_alloc_log_recs(cur, lbp, nrec, nrec);
+		xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
+	}
+	/*
+	 * Bump and log left's numrecs, decrement and log right's numrecs.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1);
+	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
+	INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
+	/*
+	 * Slide the contents of right down one entry.
+	 */
+	if (level > 0) {
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT),
+					level)))
+				return error;
+		}
+#endif
+		memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	} else {
+		memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
+		key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+		rkp = &key;
+	}
+	/*
+	 * Update the parent key values of right.
+	 */
+	if ((error = xfs_alloc_updkey(cur, rkp, level + 1)))
+		return error;
+	/*
+	 * Slide the cursor value left one.
+	 */
+	cur->bc_ptrs[level]--;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Allocate a new root block, fill it in.
+ */
+STATIC int				/* error */
+xfs_alloc_newroot(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+	xfs_agblock_t		lbno;	/* left block number */
+	xfs_buf_t		*lbp;	/* left btree buffer */
+	xfs_alloc_block_t	*left;	/* left btree block */
+	xfs_mount_t		*mp;	/* mount structure */
+	xfs_agblock_t		nbno;	/* new block number */
+	xfs_buf_t		*nbp;	/* new (root) buffer */
+	xfs_alloc_block_t	*new;	/* new (root) btree block */
+	int			nptr;	/* new value for key index, 1 or 2 */
+	xfs_agblock_t		rbno;	/* right block number */
+	xfs_buf_t		*rbp;	/* right btree buffer */
+	xfs_alloc_block_t	*right;	/* right btree block */
+
+	mp = cur->bc_mp;
+
+	ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
+	/*
+	 * Get a buffer from the freelist blocks, for the new root.
+	 */
+	if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+			&nbno)))
+		return error;
+	/*
+	 * None available, we fail.
+	 */
+	if (nbno == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	xfs_trans_agbtree_delta(cur->bc_tp, 1);
+	nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
+		0);
+	new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
+	/*
+	 * Set the root data in the a.g. freespace structure.
+	 */
+	{
+		xfs_agf_t	*agf;	/* a.g. freespace header */
+		xfs_agnumber_t	seqno;
+
+		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+		INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno);
+		INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1);
+		seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
+		mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
+		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
+			XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+	}
+	/*
+	 * At the previous root level there are now two blocks: the old
+	 * root, and the new block generated when it was split.
+	 * We don't know which one the cursor is pointing at, so we
+	 * set up variables "left" and "right" for each case.
+	 */
+	lbp = cur->bc_bufs[cur->bc_nlevels - 1];
+	left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
+		return error;
+#endif
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		/*
+		 * Our block is left, pick up the right block.
+		 */
+		lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
+		rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.a.agno, rbno, 0, &rbp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+		if ((error = xfs_btree_check_sblock(cur, right,
+				cur->bc_nlevels - 1, rbp)))
+			return error;
+		nptr = 1;
+	} else {
+		/*
+		 * Our block is right, pick up the left block.
+		 */
+		rbp = lbp;
+		right = left;
+		rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
+		lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.a.agno, lbno, 0, &lbp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+		if ((error = xfs_btree_check_sblock(cur, left,
+				cur->bc_nlevels - 1, lbp)))
+			return error;
+		nptr = 2;
+	}
+	/*
+	 * Fill in the new block's btree header and log it.
+	 */
+	INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
+	INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels);
+	INT_SET(new->bb_numrecs, ARCH_CONVERT, 2);
+	INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
+	INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+	xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
+	ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
+	/*
+	 * Fill in the key data in the new root.
+	 */
+	{
+		xfs_alloc_key_t		*kp;	/* btree key pointer */
+
+		kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
+		if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) {
+			kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */
+			kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */
+		} else {
+			xfs_alloc_rec_t	*rp;	/* btree record pointer */
+
+			rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
+			kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */
+			kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */
+			rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
+			kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */
+			kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */
+		}
+	}
+	xfs_alloc_log_keys(cur, nbp, 1, 2);
+	/*
+	 * Fill in the pointer data in the new root.
+	 */
+	{
+		xfs_alloc_ptr_t		*pp;	/* btree address pointer */
+
+		pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
+		INT_SET(pp[0], ARCH_CONVERT, lbno);
+		INT_SET(pp[1], ARCH_CONVERT, rbno);
+	}
+	xfs_alloc_log_ptrs(cur, nbp, 1, 2);
+	/*
+	 * Fix up the cursor.
+	 */
+	xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
+	cur->bc_ptrs[cur->bc_nlevels] = nptr;
+	cur->bc_nlevels++;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Move 1 record right from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int				/* error */
+xfs_alloc_rshift(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to shift record on */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_alloc_key_t		key;	/* key value for leaf level upward */
+	xfs_buf_t		*lbp;	/* buffer for left (current) block */
+	xfs_alloc_block_t	*left;	/* left (current) btree block */
+	xfs_buf_t		*rbp;	/* buffer for right neighbor block */
+	xfs_alloc_block_t	*right;	/* right neighbor btree block */
+	xfs_alloc_key_t		*rkp;	/* key pointer for right block */
+	xfs_btree_cur_t		*tcur;	/* temporary cursor */
+
+	/*
+	 * Set up variables for this block as "left".
+	 */
+	lbp = cur->bc_bufs[level];
+	left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+#endif
+	/*
+	 * If we've got no right sibling then we can't shift an entry right.
+	 */
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * If the cursor entry is the one that would be moved, don't
+	 * do it... it's too complicated.
+	 */
+	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Set up the right neighbor as "right".
+	 */
+	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+			cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp,
+			XFS_ALLOC_BTREE_REF)))
+		return error;
+	right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+		return error;
+	/*
+	 * If it's full, it can't take another entry.
+	 */
+	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Make a hole at the start of the right neighbor block, then
+	 * copy the last left block entry to the hole.
+	 */
+	if (level > 0) {
+		xfs_alloc_key_t	*lkp;	/* key pointer for left block */
+		xfs_alloc_ptr_t	*lpp;	/* address pointer for left block */
+		xfs_alloc_ptr_t	*rpp;	/* address pointer for right block */
+
+		lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
+		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
+			return error;
+#endif
+		*rkp = *lkp; /* INT_: copy */
+		*rpp = *lpp; /* INT_: copy */
+		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
+	} else {
+		xfs_alloc_rec_t	*lrp;	/* record pointer for left block */
+		xfs_alloc_rec_t	*rrp;	/* record pointer for right block */
+
+		lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
+		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		*rrp = *lrp;
+		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
+		key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+		rkp = &key;
+		xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
+	}
+	/*
+	 * Decrement and log left's numrecs, bump and log right's numrecs.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
+	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
+	/*
+	 * Using a temporary cursor, update the parent key values of the
+	 * block on the right.
+	 */
+	if ((error = xfs_btree_dup_cursor(cur, &tcur)))
+		return error;
+	i = xfs_btree_lastrec(tcur, level);
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	if ((error = xfs_alloc_increment(tcur, level, &i)) ||
+	    (error = xfs_alloc_updkey(tcur, rkp, level + 1)))
+		goto error0;
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	*stat = 1;
+	return 0;
+error0:
+	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Split cur/level block in half.
+ * Return new block number and its first record (to be inserted into parent).
+ */
+STATIC int				/* error */
+xfs_alloc_split(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to split */
+	xfs_agblock_t		*bnop,	/* output: block number allocated */
+	xfs_alloc_key_t		*keyp,	/* output: first key of new block */
+	xfs_btree_cur_t		**curp,	/* output: new cursor */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+	int			i;	/* loop index/record number */
+	xfs_agblock_t		lbno;	/* left (current) block number */
+	xfs_buf_t		*lbp;	/* buffer for left block */
+	xfs_alloc_block_t	*left;	/* left (current) btree block */
+	xfs_agblock_t		rbno;	/* right (new) block number */
+	xfs_buf_t		*rbp;	/* buffer for right block */
+	xfs_alloc_block_t	*right;	/* right (new) btree block */
+
+	/*
+	 * Allocate the new block from the freelist.
+	 * If we can't do it, we're toast.  Give up.
+	 */
+	if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+			&rbno)))
+		return error;
+	if (rbno == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	xfs_trans_agbtree_delta(cur->bc_tp, 1);
+	rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno,
+		rbno, 0);
+	/*
+	 * Set up the new block as "right".
+	 */
+	right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
+	/*
+	 * "Left" is the current (according to the cursor) block.
+	 */
+	lbp = cur->bc_bufs[level];
+	left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+#endif
+	/*
+	 * Fill in the btree header for the new block.
+	 */
+	INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
+	right->bb_level = left->bb_level; /* INT_: direct copy */
+	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
+	/*
+	 * Make sure that if there's an odd number of entries now, that
+	 * each new block will have the same number of entries.
+	 */
+	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
+	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
+		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	/*
+	 * For non-leaf blocks, copy keys and addresses over to the new block.
+	 */
+	if (level > 0) {
+		xfs_alloc_key_t	*lkp;	/* left btree key pointer */
+		xfs_alloc_ptr_t	*lpp;	/* left btree address pointer */
+		xfs_alloc_key_t	*rkp;	/* right btree key pointer */
+		xfs_alloc_ptr_t	*rpp;	/* right btree address pointer */
+
+		lkp = XFS_ALLOC_KEY_ADDR(left, i, cur);
+		lpp = XFS_ALLOC_PTR_ADDR(left, i, cur);
+		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
+		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */
+		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */
+		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		*keyp = *rkp;
+	}
+	/*
+	 * For leaf blocks, copy records over to the new block.
+	 */
+	else {
+		xfs_alloc_rec_t	*lrp;	/* left btree record pointer */
+		xfs_alloc_rec_t	*rrp;	/* right btree record pointer */
+
+		lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
+		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
+		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
+		keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+	}
+	/*
+	 * Find the left block number by looking in the buffer.
+	 * Adjust numrecs, sibling pointers.
+	 */
+	lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
+	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
+	INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno);
+	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
+	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+	/*
+	 * If there's a block to the new block's right, make that block
+	 * point back to right instead of to left.
+	 */
+	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		xfs_alloc_block_t	*rrblock;	/* rr btree block */
+		xfs_buf_t		*rrbp;		/* buffer for rrblock */
+
+		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+				cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0,
+				&rrbp, XFS_ALLOC_BTREE_REF)))
+			return error;
+		rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
+		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
+			return error;
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno);
+		xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
+	}
+	/*
+	 * If the cursor is really in the right block, move it there.
+	 * If it's just pointing past the last entry in left, then we'll
+	 * insert there, so don't change anything in that case.
+	 */
+	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+		xfs_btree_setbuf(cur, level, rbp);
+		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	/*
+	 * If there are more levels, we'll need another cursor which refers to
+	 * the right block, no matter where this cursor was.
+	 */
+	if (level + 1 < cur->bc_nlevels) {
+		if ((error = xfs_btree_dup_cursor(cur, curp)))
+			return error;
+		(*curp)->bc_ptrs[level + 1]++;
+	}
+	*bnop = rbno;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Update keys at all levels from here to the root along the cursor's path.
+ */
+STATIC int				/* error */
+xfs_alloc_updkey(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_alloc_key_t		*keyp,	/* new key value to update to */
+	int			level)	/* starting level for update */
+{
+	int			ptr;	/* index of key in block */
+
+	/*
+	 * Go up the tree from this level toward the root.
+	 * At each level, update the key value to the value input.
+	 * Stop when we reach a level where the cursor isn't pointing
+	 * at the first entry in the block.
+	 */
+	for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+		xfs_alloc_block_t	*block;	/* btree block */
+		xfs_buf_t		*bp;	/* buffer for block */
+#ifdef DEBUG
+		int			error;	/* error return value */
+#endif
+		xfs_alloc_key_t		*kp;	/* ptr to btree block keys */
+
+		bp = cur->bc_bufs[level];
+		block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+			return error;
+#endif
+		ptr = cur->bc_ptrs[level];
+		kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
+		*kp = *keyp;
+		xfs_alloc_log_keys(cur, bp, ptr, ptr);
+	}
+	return 0;
+}
+
+/*
+ * Externally visible routines.
+ */
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_alloc_decrement(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat)	/* success/failure */
+{
+	xfs_alloc_block_t	*block;	/* btree block */
+	int			error;	/* error return value */
+	int			lev;	/* btree level */
+
+	ASSERT(level < cur->bc_nlevels);
+	/*
+	 * Read-ahead to the left at this level.
+	 */
+	xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
+	/*
+	 * Decrement the ptr at this level.  If we're still in the block
+	 * then we're done.
+	 */
+	if (--cur->bc_ptrs[level] > 0) {
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * Get a pointer to the btree block.
+	 */
+	block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level,
+			cur->bc_bufs[level])))
+		return error;
+#endif
+	/*
+	 * If we just went off the left edge of the tree, return failure.
+	 */
+	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * March up the tree decrementing pointers.
+	 * Stop when we don't go off the left edge of a block.
+	 */
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		if (--cur->bc_ptrs[lev] > 0)
+			break;
+		/*
+		 * Read-ahead the left block, we're going to read it
+		 * in the next loop.
+		 */
+		xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
+	}
+	/*
+	 * If we went off the root then we are seriously confused.
+	 */
+	ASSERT(lev < cur->bc_nlevels);
+	/*
+	 * Now walk back down the tree, fixing up the cursor's buffer
+	 * pointers and key numbers.
+	 */
+	for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) {
+		xfs_agblock_t	agbno;	/* block number of btree block */
+		xfs_buf_t	*bp;	/* buffer pointer for block */
+
+		agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+				cur->bc_private.a.agno, agbno, 0, &bp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Delete the record pointed to by cur.
+ * The cursor refers to the place where the record was (could be inserted)
+ * when the operation returns.
+ */
+int					/* error */
+xfs_alloc_delete(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	int		*stat)		/* success/failure */
+{
+	int		error;		/* error return value */
+	int		i;		/* result code */
+	int		level;		/* btree level */
+
+	/*
+	 * Go up the tree, starting at leaf level.
+	 * If 2 is returned then a join was done; go to the next level.
+	 * Otherwise we are done.
+	 */
+	for (level = 0, i = 2; i == 2; level++) {
+		if ((error = xfs_alloc_delrec(cur, level, &i)))
+			return error;
+	}
+	if (i == 0) {
+		for (level = 1; level < cur->bc_nlevels; level++) {
+			if (cur->bc_ptrs[level] == 0) {
+				if ((error = xfs_alloc_decrement(cur, level, &i)))
+					return error;
+				break;
+			}
+		}
+	}
+	*stat = i;
+	return 0;
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int					/* error */
+xfs_alloc_get_rec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_agblock_t		*bno,	/* output: starting block of extent */
+	xfs_extlen_t		*len,	/* output: length of extent */
+	int			*stat)	/* output: success/failure */
+{
+	xfs_alloc_block_t	*block;	/* btree block */
+#ifdef DEBUG
+	int			error;	/* error return value */
+#endif
+	int			ptr;	/* record number */
+
+	ptr = cur->bc_ptrs[0];
+	block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
+		return error;
+#endif
+	/*
+	 * Off the right end or left end, return failure.
+	 */
+	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Point to the record and extract its data.
+	 */
+	{
+		xfs_alloc_rec_t		*rec;	/* record data */
+
+		rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
+		*bno = INT_GET(rec->ar_startblock, ARCH_CONVERT);
+		*len = INT_GET(rec->ar_blockcount, ARCH_CONVERT);
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_alloc_increment(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat)	/* success/failure */
+{
+	xfs_alloc_block_t	*block;	/* btree block */
+	xfs_buf_t		*bp;	/* tree block buffer */
+	int			error;	/* error return value */
+	int			lev;	/* btree level */
+
+	ASSERT(level < cur->bc_nlevels);
+	/*
+	 * Read-ahead to the right at this level.
+	 */
+	xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+	/*
+	 * Get a pointer to the btree block.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+#endif
+	/*
+	 * Increment the ptr at this level.  If we're still in the block
+	 * then we're done.
+	 */
+	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * If we just went off the right edge of the tree, return failure.
+	 */
+	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * March up the tree incrementing pointers.
+	 * Stop when we don't go off the right edge of a block.
+	 */
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		bp = cur->bc_bufs[lev];
+		block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+#endif
+		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+			break;
+		/*
+		 * Read-ahead the right block, we're going to read it
+		 * in the next loop.
+		 */
+		xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
+	}
+	/*
+	 * If we went off the root then we are seriously confused.
+	 */
+	ASSERT(lev < cur->bc_nlevels);
+	/*
+	 * Now walk back down the tree, fixing up the cursor's buffer
+	 * pointers and key numbers.
+	 */
+	for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+	     lev > level; ) {
+		xfs_agblock_t	agbno;	/* block number of btree block */
+
+		agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+				cur->bc_private.a.agno, agbno, 0, &bp,
+				XFS_ALLOC_BTREE_REF)))
+			return error;
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_ALLOC_BLOCK(bp);
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+		cur->bc_ptrs[lev] = 1;
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Insert the current record at the point referenced by cur.
+ * The cursor may be inconsistent on return if splits have been done.
+ */
+int					/* error */
+xfs_alloc_insert(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	int		*stat)		/* success/failure */
+{
+	int		error;		/* error return value */
+	int		i;		/* result value, 0 for failure */
+	int		level;		/* current level number in btree */
+	xfs_agblock_t	nbno;		/* new block number (split result) */
+	xfs_btree_cur_t	*ncur;		/* new cursor (split result) */
+	xfs_alloc_rec_t	nrec;		/* record being inserted this level */
+	xfs_btree_cur_t	*pcur;		/* previous level's cursor */
+
+	level = 0;
+	nbno = NULLAGBLOCK;
+	INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock);
+	INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount);
+	ncur = (xfs_btree_cur_t *)0;
+	pcur = cur;
+	/*
+	 * Loop going up the tree, starting at the leaf level.
+	 * Stop when we don't get a split block, that must mean that
+	 * the insert is finished with this level.
+	 */
+	do {
+		/*
+		 * Insert nrec/nbno into this level of the tree.
+		 * Note if we fail, nbno will be null.
+		 */
+		if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
+				&i))) {
+			if (pcur != cur)
+				xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
+			return error;
+		}
+		/*
+		 * See if the cursor we just used is trash.
+		 * Can't trash the caller's cursor, but otherwise we should
+		 * if ncur is a new cursor or we're about to be done.
+		 */
+		if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
+			cur->bc_nlevels = pcur->bc_nlevels;
+			xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
+		}
+		/*
+		 * If we got a new cursor, switch to it.
+		 */
+		if (ncur) {
+			pcur = ncur;
+			ncur = (xfs_btree_cur_t *)0;
+		}
+	} while (nbno != NULLAGBLOCK);
+	*stat = i;
+	return 0;
+}
+
+/*
+ * Lookup the record equal to [bno, len] in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_eq(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agblock_t	bno,		/* starting block of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.a.ar_startblock = bno;
+	cur->bc_rec.a.ar_blockcount = len;
+	return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Lookup the first record greater than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_ge(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agblock_t	bno,		/* starting block of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.a.ar_startblock = bno;
+	cur->bc_rec.a.ar_blockcount = len;
+	return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+/*
+ * Lookup the first record less than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_le(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agblock_t	bno,		/* starting block of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.a.ar_startblock = bno;
+	cur->bc_rec.a.ar_blockcount = len;
+	return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Update the record referred to by cur, to the value given by [bno, len].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+int					/* error */
+xfs_alloc_update(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len)	/* length of extent */
+{
+	xfs_alloc_block_t	*block;	/* btree block to update */
+	int			error;	/* error return value */
+	int			ptr;	/* current record number (updating) */
+
+	ASSERT(len > 0);
+	/*
+	 * Pick up the a.g. freelist struct and the current block.
+	 */
+	block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
+		return error;
+#endif
+	/*
+	 * Get the address of the rec to be updated.
+	 */
+	ptr = cur->bc_ptrs[0];
+	{
+		xfs_alloc_rec_t		*rp;	/* pointer to updated record */
+
+		rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
+		/*
+		 * Fill in the new contents and log them.
+		 */
+		INT_SET(rp->ar_startblock, ARCH_CONVERT, bno);
+		INT_SET(rp->ar_blockcount, ARCH_CONVERT, len);
+		xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr);
+	}
+	/*
+	 * If it's the by-size btree and it's the last leaf block and
+	 * it's the last record... then update the size of the longest
+	 * extent in the a.g., which we cache in the a.g. freelist header.
+	 */
+	if (cur->bc_btnum == XFS_BTNUM_CNT &&
+	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
+	    ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		xfs_agf_t	*agf;	/* a.g. freespace header */
+		xfs_agnumber_t	seqno;
+
+		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+		seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
+		cur->bc_mp->m_perag[seqno].pagf_longest = len;
+		INT_SET(agf->agf_longest, ARCH_CONVERT, len);
+		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
+			XFS_AGF_LONGEST);
+	}
+	/*
+	 * Updating first record in leaf. Pass new key value up to our parent.
+	 */
+	if (ptr == 1) {
+		xfs_alloc_key_t	key;	/* key containing [bno, len] */
+
+		INT_SET(key.ar_startblock, ARCH_CONVERT, bno);
+		INT_SET(key.ar_blockcount, ARCH_CONVERT, len);
+		if ((error = xfs_alloc_updkey(cur, &key, 1)))
+			return error;
+	}
+	return 0;
+}
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
new file mode 100644
index 0000000..ed5161a
--- /dev/null
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ALLOC_BTREE_H__
+#define	__XFS_ALLOC_BTREE_H__
+
+/*
+ * Freespace on-disk structures
+ */
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_btree_sblock;
+struct xfs_mount;
+
+/*
+ * There are two on-disk btrees, one sorted by blockno and one sorted
+ * by blockcount and blockno.  All blocks look the same to make the code
+ * simpler; if we have time later, we'll make the optimizations.
+ */
+#define	XFS_ABTB_MAGIC	0x41425442	/* 'ABTB' for bno tree */
+#define	XFS_ABTC_MAGIC	0x41425443	/* 'ABTC' for cnt tree */
+
+/*
+ * Data record/key structure
+ */
+typedef struct xfs_alloc_rec
+{
+	xfs_agblock_t	ar_startblock;	/* starting block number */
+	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
+} xfs_alloc_rec_t, xfs_alloc_key_t;
+
+typedef xfs_agblock_t xfs_alloc_ptr_t;	/* btree pointer type */
+					/* btree block header type */
+typedef	struct xfs_btree_sblock xfs_alloc_block_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_ALLOC_BLOCK)
+xfs_alloc_block_t *xfs_buf_to_alloc_block(struct xfs_buf *bp);
+#define	XFS_BUF_TO_ALLOC_BLOCK(bp)	xfs_buf_to_alloc_block(bp)
+#else
+#define	XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)(XFS_BUF_PTR(bp)))
+#endif
+
+/*
+ * Real block structures have a size equal to the disk block size.
+ */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_SIZE)
+int xfs_alloc_block_size(int lev, struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_BLOCK_SIZE(lev,cur)	xfs_alloc_block_size(lev,cur)
+#else
+#define	XFS_ALLOC_BLOCK_SIZE(lev,cur)	(1 << (cur)->bc_blocklog)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MAXRECS)
+int xfs_alloc_block_maxrecs(int lev, struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_BLOCK_MAXRECS(lev,cur)	xfs_alloc_block_maxrecs(lev,cur)
+#else
+#define	XFS_ALLOC_BLOCK_MAXRECS(lev,cur)	\
+	((cur)->bc_mp->m_alloc_mxr[lev != 0])
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MINRECS)
+int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_BLOCK_MINRECS(lev,cur)	xfs_alloc_block_minrecs(lev,cur)
+#else
+#define	XFS_ALLOC_BLOCK_MINRECS(lev,cur)	\
+	((cur)->bc_mp->m_alloc_mnr[lev != 0])
+#endif
+
+/*
+ * Minimum and maximum blocksize and sectorsize.
+ * The blocksize upper limit is pretty much arbitrary.
+ * The sectorsize upper limit is due to sizeof(sb_sectsize).
+ */
+#define XFS_MIN_BLOCKSIZE_LOG	9	/* i.e. 512 bytes */
+#define XFS_MAX_BLOCKSIZE_LOG	16	/* i.e. 65536 bytes */
+#define XFS_MIN_BLOCKSIZE	(1 << XFS_MIN_BLOCKSIZE_LOG)
+#define XFS_MAX_BLOCKSIZE	(1 << XFS_MAX_BLOCKSIZE_LOG)
+#define XFS_MIN_SECTORSIZE_LOG	9	/* i.e. 512 bytes */
+#define XFS_MAX_SECTORSIZE_LOG	15	/* i.e. 32768 bytes */
+#define XFS_MIN_SECTORSIZE	(1 << XFS_MIN_SECTORSIZE_LOG)
+#define XFS_MAX_SECTORSIZE	(1 << XFS_MAX_SECTORSIZE_LOG)
+
+/*
+ * Block numbers in the AG:
+ * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK)
+xfs_agblock_t xfs_bno_block(struct xfs_mount *mp);
+#define	XFS_BNO_BLOCK(mp)	xfs_bno_block(mp)
+#else
+#define	XFS_BNO_BLOCK(mp)	((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CNT_BLOCK)
+xfs_agblock_t xfs_cnt_block(struct xfs_mount *mp);
+#define	XFS_CNT_BLOCK(mp)	xfs_cnt_block(mp)
+#else
+#define	XFS_CNT_BLOCK(mp)	((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+#endif
+
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_REC_ADDR)
+xfs_alloc_rec_t *xfs_alloc_rec_addr(xfs_alloc_block_t *bb, int i,
+				    struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_REC_ADDR(bb,i,cur)	xfs_alloc_rec_addr(bb,i,cur)
+#else
+#define	XFS_ALLOC_REC_ADDR(bb,i,cur)	\
+	XFS_BTREE_REC_ADDR(XFS_ALLOC_BLOCK_SIZE(0,cur), xfs_alloc, bb, i, \
+		XFS_ALLOC_BLOCK_MAXRECS(0, cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_KEY_ADDR)
+xfs_alloc_key_t *xfs_alloc_key_addr(xfs_alloc_block_t *bb, int i,
+				    struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_KEY_ADDR(bb,i,cur)	xfs_alloc_key_addr(bb,i,cur)
+#else
+#define	XFS_ALLOC_KEY_ADDR(bb,i,cur)	\
+	XFS_BTREE_KEY_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \
+		XFS_ALLOC_BLOCK_MAXRECS(1, cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_PTR_ADDR)
+xfs_alloc_ptr_t *xfs_alloc_ptr_addr(xfs_alloc_block_t *bb, int i,
+				    struct xfs_btree_cur *cur);
+#define	XFS_ALLOC_PTR_ADDR(bb,i,cur)	xfs_alloc_ptr_addr(bb,i,cur)
+#else
+#define	XFS_ALLOC_PTR_ADDR(bb,i,cur)	\
+	XFS_BTREE_PTR_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \
+		XFS_ALLOC_BLOCK_MAXRECS(1, cur))
+#endif
+
+/*
+ * Prototypes for externally visible routines.
+ */
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_alloc_decrement(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat);	/* success/failure */
+
+/*
+ * Delete the record pointed to by cur.
+ * The cursor refers to the place where the record was (could be inserted)
+ * when the operation returns.
+ */
+int					/* error */
+xfs_alloc_delete(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			*stat);	/* success/failure */
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int					/* error */
+xfs_alloc_get_rec(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		*bno,	/* output: starting block of extent */
+	xfs_extlen_t		*len,	/* output: length of extent */
+	int			*stat);	/* output: success/failure */
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_alloc_increment(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat);	/* success/failure */
+
+/*
+ * Insert the current record at the point referenced by cur.
+ * The cursor may be inconsistent on return if splits have been done.
+ */
+int					/* error */
+xfs_alloc_insert(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the record equal to [bno, len] in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_eq(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the first record greater than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_ge(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the first record less than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_alloc_lookup_le(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+/*
+ * Update the record referred to by cur, to the value given by [bno, len].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+int					/* error */
+xfs_alloc_update(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len);	/* length of extent */
+
+#endif	/* __XFS_ALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
new file mode 100644
index 0000000..ae35189
--- /dev/null
+++ b/fs/xfs/xfs_arch.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ARCH_H__
+#define __XFS_ARCH_H__
+
+#ifndef XFS_BIG_INUMS
+# error XFS_BIG_INUMS must be defined true or false
+#endif
+
+#ifdef __KERNEL__
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+# define __BYTE_ORDER	__LITTLE_ENDIAN
+#endif
+#ifdef __BIG_ENDIAN
+# define __BYTE_ORDER	__BIG_ENDIAN
+#endif
+
+#endif	/* __KERNEL__ */
+
+/* do we need conversion? */
+
+#define ARCH_NOCONVERT 1
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define ARCH_CONVERT	0
+#else
+# define ARCH_CONVERT	ARCH_NOCONVERT
+#endif
+
+/* generic swapping macros */
+
+#ifndef HAVE_SWABMACROS
+#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
+#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
+#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
+#endif
+
+#define INT_SWAP(type, var) \
+    ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
+    ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
+    ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
+    (var))))
+
+/*
+ * get and set integers from potentially unaligned locations
+ */
+
+#define INT_GET_UNALIGNED_16_BE(pointer) \
+   ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
+#define INT_SET_UNALIGNED_16_BE(pointer,value) \
+    { \
+	((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
+	((__u8*)(pointer))[1] = (((value)     ) & 0xff); \
+    }
+
+/* define generic INT_ macros */
+
+#define INT_GET(reference,arch) \
+    (((arch) == ARCH_NOCONVERT) \
+	? \
+	    (reference) \
+	: \
+	    INT_SWAP((reference),(reference)) \
+    )
+
+/* does not return a value */
+#define INT_SET(reference,arch,valueref) \
+    (__builtin_constant_p(valueref) ? \
+	(void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
+	(void)( \
+	    ((reference) = (valueref)), \
+	    ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
+	) \
+    )
+
+/* does not return a value */
+#define INT_MOD_EXPR(reference,arch,code) \
+    (((arch) == ARCH_NOCONVERT) \
+	? \
+	    (void)((reference) code) \
+	: \
+	    (void)( \
+		(reference) = INT_GET((reference),arch) , \
+		((reference) code), \
+		INT_SET(reference, arch, reference) \
+	    ) \
+    )
+
+/* does not return a value */
+#define INT_MOD(reference,arch,delta) \
+    (void)( \
+	INT_MOD_EXPR(reference,arch,+=(delta)) \
+    )
+
+/*
+ * INT_COPY - copy a value between two locations with the
+ *	      _same architecture_ but _potentially different sizes_
+ *
+ *	    if the types of the two parameters are equal or they are
+ *		in native architecture, a simple copy is done
+ *
+ *	    otherwise, architecture conversions are done
+ *
+ */
+
+/* does not return a value */
+#define INT_COPY(dst,src,arch) \
+    ( \
+	((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
+	    ? \
+		(void)((dst) = (src)) \
+	    : \
+		INT_SET(dst, arch, INT_GET(src, arch)) \
+    )
+
+/*
+ * INT_XLATE - copy a value in either direction between two locations
+ *	       with different architectures
+ *
+ *		    dir < 0	- copy from memory to buffer (native to arch)
+ *		    dir > 0	- copy from buffer to memory (arch to native)
+ */
+
+/* does not return a value */
+#define INT_XLATE(buf,mem,dir,arch) {\
+    ASSERT(dir); \
+    if (dir>0) { \
+	(mem)=INT_GET(buf, arch); \
+    } else { \
+	INT_SET(buf, arch, mem); \
+    } \
+}
+
+/*
+ * In directories inode numbers are stored as unaligned arrays of unsigned
+ * 8bit integers on disk.
+ *
+ * For v1 directories or v2 directories that contain inode numbers that
+ * do not fit into 32bit the array has eight members, but the first member
+ * is always zero:
+ *
+ *  |unused|48-55|40-47|32-39|24-31|16-23| 8-15| 0- 7|
+ *
+ * For v2 directories that only contain entries with inode numbers that fit
+ * into 32bits a four-member array is used:
+ *
+ *  |24-31|16-23| 8-15| 0- 7|
+ */ 
+
+#define XFS_GET_DIR_INO4(di) \
+	(((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+
+#define XFS_PUT_DIR_INO4(from, di) \
+do { \
+	(di).i[0] = (((from) & 0xff000000ULL) >> 24); \
+	(di).i[1] = (((from) & 0x00ff0000ULL) >> 16); \
+	(di).i[2] = (((from) & 0x0000ff00ULL) >> 8); \
+	(di).i[3] = ((from) & 0x000000ffULL); \
+} while (0)
+
+#define XFS_DI_HI(di) \
+	(((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+#define XFS_DI_LO(di) \
+	(((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
+
+#define XFS_GET_DIR_INO8(di)        \
+	(((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
+	 ((xfs_ino_t)XFS_DI_HI(di) << 32))
+
+#define XFS_PUT_DIR_INO8(from, di) \
+do { \
+	(di).i[0] = 0; \
+	(di).i[1] = (((from) & 0x00ff000000000000ULL) >> 48); \
+	(di).i[2] = (((from) & 0x0000ff0000000000ULL) >> 40); \
+	(di).i[3] = (((from) & 0x000000ff00000000ULL) >> 32); \
+	(di).i[4] = (((from) & 0x00000000ff000000ULL) >> 24); \
+	(di).i[5] = (((from) & 0x0000000000ff0000ULL) >> 16); \
+	(di).i[6] = (((from) & 0x000000000000ff00ULL) >> 8); \
+	(di).i[7] = ((from) & 0x00000000000000ffULL); \
+} while (0)
+	
+#endif	/* __XFS_ARCH_H__ */
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
new file mode 100644
index 0000000..ee8b590
--- /dev/null
+++ b/fs/xfs/xfs_attr.c
@@ -0,0 +1,2660 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+#include "xfs_quota.h"
+#include "xfs_rw.h"
+#include "xfs_trans_space.h"
+#include "xfs_acl.h"
+
+/*
+ * xfs_attr.c
+ *
+ * Provide the external interfaces to manage attribute lists.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Internal routines when attribute list fits inside the inode.
+ */
+STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
+
+/*
+ * Internal routines when attribute list is one block.
+ */
+STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
+STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
+STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
+
+/*
+ * Internal routines when attribute list is more than one block.
+ */
+STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
+STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
+STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context);
+STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
+STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
+
+/*
+ * Routines to manipulate out-of-line attribute values.
+ */
+STATIC int xfs_attr_rmtval_get(xfs_da_args_t *args);
+STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args);
+STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
+
+#define ATTR_RMTVALUE_MAPSIZE	1	/* # of map entries at once */
+
+#if defined(XFS_ATTR_TRACE)
+ktrace_t *xfs_attr_trace_buf;
+#endif
+
+
+/*========================================================================
+ * Overall external interface routines.
+ *========================================================================*/
+
+int
+xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen,
+	       char *value, int *valuelenp, int flags, struct cred *cred)
+{
+	xfs_da_args_t   args;
+	int             error;
+
+	if ((XFS_IFORK_Q(ip) == 0) ||
+	    (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     ip->i_d.di_anextents == 0))
+		return(ENOATTR);
+
+	if (!(flags & (ATTR_KERNACCESS|ATTR_SECURE))) {
+		if ((error = xfs_iaccess(ip, S_IRUSR, cred)))
+			return(XFS_ERROR(error));
+	}
+
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	memset((char *)&args, 0, sizeof(args));
+	args.name = name;
+	args.namelen = namelen;
+	args.value = value;
+	args.valuelen = *valuelenp;
+	args.flags = flags;
+	args.hashval = xfs_da_hashname(args.name, args.namelen);
+	args.dp = ip;
+	args.whichfork = XFS_ATTR_FORK;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (XFS_IFORK_Q(ip) == 0 ||
+	    (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     ip->i_d.di_anextents == 0)) {
+		error = XFS_ERROR(ENOATTR);
+	} else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+		error = xfs_attr_shortform_getvalue(&args);
+	} else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_get(&args);
+	} else {
+		error = xfs_attr_node_get(&args);
+	}
+
+	/*
+	 * Return the number of bytes in the value to the caller.
+	 */
+	*valuelenp = args.valuelen;
+
+	if (error == EEXIST)
+		error = 0;
+	return(error);
+}
+
+int
+xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
+	     int flags, struct cred *cred)
+{
+	xfs_inode_t	*ip = XFS_BHVTOI(bdp);
+	int		error, namelen;
+
+	XFS_STATS_INC(xs_attr_get);
+
+	if (!name)
+		return(EINVAL);
+	namelen = strlen(name);
+	if (namelen >= MAXNAMELEN)
+		return(EFAULT);		/* match IRIX behaviour */
+
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return(EIO);
+
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+	error = xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	return(error);
+}
+
+/*ARGSUSED*/
+int								/* error */
+xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
+		     struct cred *cred)
+{
+	xfs_da_args_t	args;
+	xfs_inode_t	*dp;
+	xfs_fsblock_t	firstblock;
+	xfs_bmap_free_t flist;
+	int		error, err2, committed;
+	int		local, size;
+	uint		nblks;
+	xfs_mount_t	*mp;
+	int             rsvd = (flags & ATTR_ROOT) != 0;
+	int             namelen;
+
+	namelen = strlen(name);
+	if (namelen >= MAXNAMELEN)
+		return EFAULT;		/* match IRIX behaviour */
+
+	XFS_STATS_INC(xs_attr_set);
+
+	dp = XFS_BHVTOI(bdp);
+	mp = dp->i_mount;
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return (EIO);
+
+	xfs_ilock(dp, XFS_ILOCK_SHARED);
+	if (!(flags & ATTR_SECURE) &&
+	     (error = xfs_iaccess(dp, S_IWUSR, cred))) {
+		xfs_iunlock(dp, XFS_ILOCK_SHARED);
+		return(XFS_ERROR(error));
+	}
+	xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+	/*
+	 * Attach the dquots to the inode.
+	 */
+	if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
+		return (error);
+
+	/*
+	 * If the inode doesn't have an attribute fork, add one.
+	 * (inode must not be locked when we call this routine)
+	 */
+	if (XFS_IFORK_Q(dp) == 0) {
+		error = xfs_bmap_add_attrfork(dp, rsvd);
+		if (error)
+			return(error);
+	}
+
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	memset((char *)&args, 0, sizeof(args));
+	args.name = name;
+	args.namelen = namelen;
+	args.value = value;
+	args.valuelen = valuelen;
+	args.flags = flags;
+	args.hashval = xfs_da_hashname(args.name, args.namelen);
+	args.dp = dp;
+	args.firstblock = &firstblock;
+	args.flist = &flist;
+	args.whichfork = XFS_ATTR_FORK;
+	args.oknoent = 1;
+
+	/* Determine space new attribute will use, and if it will be inline
+	 * or out of line.
+	 */
+	size = xfs_attr_leaf_newentsize(&args, mp->m_sb.sb_blocksize, &local);
+
+	nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
+	if (local) {
+		if (size > (mp->m_sb.sb_blocksize >> 1)) {
+			/* Double split possible */
+			nblks <<= 1;
+		}
+	} else {
+		uint	dblocks = XFS_B_TO_FSB(mp, valuelen);
+		/* Out of line attribute, cannot double split, but make
+		 * room for the attribute value itself.
+		 */
+		nblks += dblocks;
+		nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
+	}
+
+	/* Size is now blocks for attribute data */
+	args.total = nblks;
+
+	/*
+	 * Start our first transaction of the day.
+	 *
+	 * All future transactions during this code must be "chained" off
+	 * this one via the trans_dup() call.  All transactions will contain
+	 * the inode, and the inode will always be marked with trans_ihold().
+	 * Since the inode will be locked in all transactions, we must log
+	 * the inode in every transaction to let it float upward through
+	 * the log.
+	 */
+	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);
+
+	/*
+	 * Root fork attributes can use reserved data blocks for this
+	 * operation if necessary
+	 */
+
+	if (rsvd)
+		args.trans->t_flags |= XFS_TRANS_RESERVE;
+
+	if ((error = xfs_trans_reserve(args.trans, (uint) nblks,
+				      XFS_ATTRSET_LOG_RES(mp, nblks),
+				      0, XFS_TRANS_PERM_LOG_RES,
+				      XFS_ATTRSET_LOG_COUNT))) {
+		xfs_trans_cancel(args.trans, 0);
+		return(error);
+	}
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
+			 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+				XFS_QMOPT_RES_REGBLKS);
+	if (error) {
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+		xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
+		return (error);
+	}
+
+	xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(args.trans, dp);
+
+	/*
+	 * If the attribute list is non-existant or a shortform list,
+	 * upgrade it to a single-leaf-block attribute list.
+	 */
+	if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
+	    ((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) &&
+	     (dp->i_d.di_anextents == 0))) {
+
+		/*
+		 * Build initial attribute list (if required).
+		 */
+		if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+			(void)xfs_attr_shortform_create(&args);
+
+		/*
+		 * Try to add the attr to the attribute list in
+		 * the inode.
+		 */
+		error = xfs_attr_shortform_addname(&args);
+		if (error != ENOSPC) {
+			/*
+			 * Commit the shortform mods, and we're done.
+			 * NOTE: this is also the error path (EEXIST, etc).
+			 */
+			ASSERT(args.trans != NULL);
+
+			/*
+			 * If this is a synchronous mount, make sure that
+			 * the transaction goes to disk before returning
+			 * to the user.
+			 */
+			if (mp->m_flags & XFS_MOUNT_WSYNC) {
+				xfs_trans_set_sync(args.trans);
+			}
+			err2 = xfs_trans_commit(args.trans,
+						 XFS_TRANS_RELEASE_LOG_RES,
+						 NULL);
+			xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+			/*
+			 * Hit the inode change time.
+			 */
+			if (!error && (flags & ATTR_KERNOTIME) == 0) {
+				xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
+			}
+			return(error == 0 ? err2 : error);
+		}
+
+		/*
+		 * It won't fit in the shortform, transform to a leaf block.
+		 * GROT: another possible req'mt for a double-split btree op.
+		 */
+		XFS_BMAP_INIT(args.flist, args.firstblock);
+		error = xfs_attr_shortform_to_leaf(&args);
+		if (!error) {
+			error = xfs_bmap_finish(&args.trans, args.flist,
+						*args.firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args.trans = NULL;
+			xfs_bmap_cancel(&flist);
+			goto out;
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args.trans, dp);
+		}
+
+		/*
+		 * Commit the leaf transformation.  We'll need another (linked)
+		 * transaction to add the new attribute to the leaf.
+		 */
+		if ((error = xfs_attr_rolltrans(&args.trans, dp)))
+			goto out;
+
+	}
+
+	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_addname(&args);
+	} else {
+		error = xfs_attr_node_addname(&args);
+	}
+	if (error) {
+		goto out;
+	}
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * transaction goes to disk before returning to the user.
+	 */
+	if (mp->m_flags & XFS_MOUNT_WSYNC) {
+		xfs_trans_set_sync(args.trans);
+	}
+
+	/*
+	 * Commit the last in the sequence of transactions.
+	 */
+	xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
+	error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES,
+				 NULL);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+	/*
+	 * Hit the inode change time.
+	 */
+	if (!error && (flags & ATTR_KERNOTIME) == 0) {
+		xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
+	}
+
+	return(error);
+
+out:
+	if (args.trans)
+		xfs_trans_cancel(args.trans,
+			XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	return(error);
+}
+
+/*
+ * Generic handler routine to remove a name from an attribute list.
+ * Transitions attribute list from Btree to shortform as necessary.
+ */
+/*ARGSUSED*/
+int								/* error */
+xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred)
+{
+	xfs_da_args_t       args;
+	xfs_inode_t         *dp;
+	xfs_fsblock_t       firstblock;
+	xfs_bmap_free_t     flist;
+	int                 error;
+	xfs_mount_t         *mp;
+	int                 namelen;
+
+	ASSERT(MAXNAMELEN-1<=0xff); /* length is stored in uint8 */
+	namelen = strlen(name);
+	if (namelen>=MAXNAMELEN)
+		return EFAULT; /* match irix behaviour */
+
+	XFS_STATS_INC(xs_attr_remove);
+
+	dp = XFS_BHVTOI(bdp);
+	mp = dp->i_mount;
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return (EIO);
+
+	xfs_ilock(dp, XFS_ILOCK_SHARED);
+	if (!(flags & ATTR_SECURE) &&
+	     (error = xfs_iaccess(dp, S_IWUSR, cred))) {
+		xfs_iunlock(dp, XFS_ILOCK_SHARED);
+		return(XFS_ERROR(error));
+	} else if (XFS_IFORK_Q(dp) == 0 ||
+		   (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+		    dp->i_d.di_anextents == 0)) {
+		xfs_iunlock(dp, XFS_ILOCK_SHARED);
+		return(XFS_ERROR(ENOATTR));
+	}
+	xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	memset((char *)&args, 0, sizeof(args));
+	args.name = name;
+	args.namelen = namelen;
+	args.flags = flags;
+	args.hashval = xfs_da_hashname(args.name, args.namelen);
+	args.dp = dp;
+	args.firstblock = &firstblock;
+	args.flist = &flist;
+	args.total = 0;
+	args.whichfork = XFS_ATTR_FORK;
+
+	/*
+	 * Attach the dquots to the inode.
+	 */
+	if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
+		return (error);
+
+	/*
+	 * Start our first transaction of the day.
+	 *
+	 * All future transactions during this code must be "chained" off
+	 * this one via the trans_dup() call.  All transactions will contain
+	 * the inode, and the inode will always be marked with trans_ihold().
+	 * Since the inode will be locked in all transactions, we must log
+	 * the inode in every transaction to let it float upward through
+	 * the log.
+	 */
+	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);
+
+	/*
+	 * Root fork attributes can use reserved data blocks for this
+	 * operation if necessary
+	 */
+
+	if (flags & ATTR_ROOT)
+		args.trans->t_flags |= XFS_TRANS_RESERVE;
+
+	if ((error = xfs_trans_reserve(args.trans,
+				      XFS_ATTRRM_SPACE_RES(mp),
+				      XFS_ATTRRM_LOG_RES(mp),
+				      0, XFS_TRANS_PERM_LOG_RES,
+				      XFS_ATTRRM_LOG_COUNT))) {
+		xfs_trans_cancel(args.trans, 0);
+		return(error);
+
+	}
+
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+	/*
+	 * No need to make quota reservations here. We expect to release some
+	 * blocks not allocate in the common case.
+	 */
+	xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(args.trans, dp);
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (XFS_IFORK_Q(dp) == 0 ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+		error = XFS_ERROR(ENOATTR);
+		goto out;
+	}
+	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+		error = xfs_attr_shortform_remove(&args);
+		if (error) {
+			goto out;
+		}
+	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_removename(&args);
+	} else {
+		error = xfs_attr_node_removename(&args);
+	}
+	if (error) {
+		goto out;
+	}
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * transaction goes to disk before returning to the user.
+	 */
+	if (mp->m_flags & XFS_MOUNT_WSYNC) {
+		xfs_trans_set_sync(args.trans);
+	}
+
+	/*
+	 * Commit the last in the sequence of transactions.
+	 */
+	xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
+	error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES,
+				 NULL);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+	/*
+	 * Hit the inode change time.
+	 */
+	if (!error && (flags & ATTR_KERNOTIME) == 0) {
+		xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
+	}
+
+	return(error);
+
+out:
+	if (args.trans)
+		xfs_trans_cancel(args.trans,
+			XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	return(error);
+}
+
+/*
+ * Generate a list of extended attribute names and optionally
+ * also value lengths.  Positive return value follows the XFS
+ * convention of being an error, zero or negative return code
+ * is the length of the buffer returned (negated), indicating
+ * success.
+ */
+int
+xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags,
+		      attrlist_cursor_kern_t *cursor, struct cred *cred)
+{
+	xfs_attr_list_context_t context;
+	xfs_inode_t *dp;
+	int error;
+
+	XFS_STATS_INC(xs_attr_list);
+
+	/*
+	 * Validate the cursor.
+	 */
+	if (cursor->pad1 || cursor->pad2)
+		return(XFS_ERROR(EINVAL));
+	if ((cursor->initted == 0) &&
+	    (cursor->hashval || cursor->blkno || cursor->offset))
+		return(XFS_ERROR(EINVAL));
+
+	/*
+	 * Check for a properly aligned buffer.
+	 */
+	if (((long)buffer) & (sizeof(int)-1))
+		return(XFS_ERROR(EFAULT));
+	if (flags & ATTR_KERNOVAL)
+		bufsize = 0;
+
+	/*
+	 * Initialize the output buffer.
+	 */
+	context.dp = dp = XFS_BHVTOI(bdp);
+	context.cursor = cursor;
+	context.count = 0;
+	context.dupcnt = 0;
+	context.resynch = 1;
+	context.flags = flags;
+	if (!(flags & ATTR_KERNAMELS)) {
+		context.bufsize = (bufsize & ~(sizeof(int)-1));  /* align */
+		context.firstu = context.bufsize;
+		context.alist = (attrlist_t *)buffer;
+		context.alist->al_count = 0;
+		context.alist->al_more = 0;
+		context.alist->al_offset[0] = context.bufsize;
+	}
+	else {
+		context.bufsize = bufsize;
+		context.firstu = context.bufsize;
+		context.alist = (attrlist_t *)buffer;
+	}
+
+	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+		return (EIO);
+
+	xfs_ilock(dp, XFS_ILOCK_SHARED);
+	if (!(flags & ATTR_SECURE) &&
+	     (error = xfs_iaccess(dp, S_IRUSR, cred))) {
+		xfs_iunlock(dp, XFS_ILOCK_SHARED);
+		return(XFS_ERROR(error));
+	}
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	xfs_attr_trace_l_c("syscall start", &context);
+	if (XFS_IFORK_Q(dp) == 0 ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+		error = 0;
+	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+		error = xfs_attr_shortform_list(&context);
+	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_list(&context);
+	} else {
+		error = xfs_attr_node_list(&context);
+	}
+	xfs_iunlock(dp, XFS_ILOCK_SHARED);
+	xfs_attr_trace_l_c("syscall end", &context);
+
+	if (!(context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS))) {
+		ASSERT(error >= 0);
+	}
+	else {	/* must return negated buffer size or the error */
+		if (context.count < 0)
+			error = XFS_ERROR(ERANGE);
+		else
+			error = -context.count;
+	}
+
+	return(error);
+}
+
+int								/* error */
+xfs_attr_inactive(xfs_inode_t *dp)
+{
+	xfs_trans_t *trans;
+	xfs_mount_t *mp;
+	int error;
+
+	mp = dp->i_mount;
+	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
+
+	xfs_ilock(dp, XFS_ILOCK_SHARED);
+	if ((XFS_IFORK_Q(dp) == 0) ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+		xfs_iunlock(dp, XFS_ILOCK_SHARED);
+		return(0);
+	}
+	xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+	/*
+	 * Start our first transaction of the day.
+	 *
+	 * All future transactions during this code must be "chained" off
+	 * this one via the trans_dup() call.  All transactions will contain
+	 * the inode, and the inode will always be marked with trans_ihold().
+	 * Since the inode will be locked in all transactions, we must log
+	 * the inode in every transaction to let it float upward through
+	 * the log.
+	 */
+	trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
+	if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
+				      XFS_TRANS_PERM_LOG_RES,
+				      XFS_ATTRINVAL_LOG_COUNT))) {
+		xfs_trans_cancel(trans, 0);
+		return(error);
+	}
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	/*
+	 * No need to make quota reservations here. We expect to release some
+	 * blocks, not allocate, in the common case.
+	 */
+	xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(trans, dp);
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if ((XFS_IFORK_Q(dp) == 0) ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+		error = 0;
+		goto out;
+	}
+	error = xfs_attr_root_inactive(&trans, dp);
+	if (error)
+		goto out;
+	/*
+	 * signal synchronous inactive transactions unless this
+	 * is a synchronous mount filesystem in which case we
+	 * know that we're here because we've been called out of
+	 * xfs_inactive which means that the last reference is gone
+	 * and the unlink transaction has already hit the disk so
+	 * async inactive transactions are safe.
+	 */
+	if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
+				(!(mp->m_flags & XFS_MOUNT_WSYNC)
+				 ? 1 : 0))))
+		goto out;
+
+	/*
+	 * Commit the last in the sequence of transactions.
+	 */
+	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
+	error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES,
+				 NULL);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+	return(error);
+
+out:
+	xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+	xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	return(error);
+}
+
+
+
+/*========================================================================
+ * External routines when attribute list is inside the inode
+ *========================================================================*/
+
+/*
+ * Add a name to the shortform attribute list structure
+ * This is the external routine.
+ */
+STATIC int
+xfs_attr_shortform_addname(xfs_da_args_t *args)
+{
+	int newsize, retval;
+
+	retval = xfs_attr_shortform_lookup(args);
+	if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
+		return(retval);
+	} else if (retval == EEXIST) {
+		if (args->flags & ATTR_CREATE)
+			return(retval);
+		retval = xfs_attr_shortform_remove(args);
+		ASSERT(retval == 0);
+	}
+
+	newsize = XFS_ATTR_SF_TOTSIZE(args->dp);
+	newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
+	if ((newsize <= XFS_IFORK_ASIZE(args->dp)) &&
+	    (args->namelen < XFS_ATTR_SF_ENTSIZE_MAX) &&
+	    (args->valuelen < XFS_ATTR_SF_ENTSIZE_MAX)) {
+		retval = xfs_attr_shortform_add(args);
+		ASSERT(retval == 0);
+	} else {
+		return(XFS_ERROR(ENOSPC));
+	}
+	return(0);
+}
+
+
+/*========================================================================
+ * External routines when attribute list is one block
+ *========================================================================*/
+
+/*
+ * Add a name to the leaf attribute list structure
+ *
+ * This leaf block cannot have a "remote" value, we only call this routine
+ * if bmap_one_block() says there is only one block (ie: no remote blks).
+ */
+int
+xfs_attr_leaf_addname(xfs_da_args_t *args)
+{
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp;
+	int retval, error, committed;
+
+	/*
+	 * Read the (only) block in the attribute list in.
+	 */
+	dp = args->dp;
+	args->blkno = 0;
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
+					     XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+
+	/*
+	 * Look up the given attribute in the leaf block.  Figure out if
+	 * the given flags produce an error or call for an atomic rename.
+	 */
+	retval = xfs_attr_leaf_lookup_int(bp, args);
+	if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
+		xfs_da_brelse(args->trans, bp);
+		return(retval);
+	} else if (retval == EEXIST) {
+		if (args->flags & ATTR_CREATE) {	/* pure create op */
+			xfs_da_brelse(args->trans, bp);
+			return(retval);
+		}
+		args->rename = 1;			/* an atomic rename */
+		args->blkno2 = args->blkno;		/* set 2nd entry info*/
+		args->index2 = args->index;
+		args->rmtblkno2 = args->rmtblkno;
+		args->rmtblkcnt2 = args->rmtblkcnt;
+	}
+
+	/*
+	 * Add the attribute to the leaf block, transitioning to a Btree
+	 * if required.
+	 */
+	retval = xfs_attr_leaf_add(bp, args);
+	xfs_da_buf_done(bp);
+	if (retval == ENOSPC) {
+		/*
+		 * Promote the attribute list to the Btree format, then
+		 * Commit that transaction so that the node_addname() call
+		 * can manage its own transactions.
+		 */
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		error = xfs_attr_leaf_to_node(args);
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			return(error);
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, dp);
+		}
+
+		/*
+		 * Commit the current trans (including the inode) and start
+		 * a new one.
+		 */
+		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+			return (error);
+
+		/*
+		 * Fob the whole rest of the problem off on the Btree code.
+		 */
+		error = xfs_attr_node_addname(args);
+		return(error);
+	}
+
+	/*
+	 * Commit the transaction that added the attr name so that
+	 * later routines can manage their own transactions.
+	 */
+	if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		return (error);
+
+	/*
+	 * If there was an out-of-line value, allocate the blocks we
+	 * identified for its storage and copy the value.  This is done
+	 * after we create the attribute so that we don't overflow the
+	 * maximum size of a transaction and/or hit a deadlock.
+	 */
+	if (args->rmtblkno > 0) {
+		error = xfs_attr_rmtval_set(args);
+		if (error)
+			return(error);
+	}
+
+	/*
+	 * If this is an atomic rename operation, we must "flip" the
+	 * incomplete flags on the "new" and "old" attribute/value pairs
+	 * so that one disappears and one appears atomically.  Then we
+	 * must remove the "old" attribute/value pair.
+	 */
+	if (args->rename) {
+		/*
+		 * In a separate transaction, set the incomplete flag on the
+		 * "old" attr and clear the incomplete flag on the "new" attr.
+		 */
+		error = xfs_attr_leaf_flipflags(args);
+		if (error)
+			return(error);
+
+		/*
+		 * Dismantle the "old" attribute/value pair by removing
+		 * a "remote" value (if it exists).
+		 */
+		args->index = args->index2;
+		args->blkno = args->blkno2;
+		args->rmtblkno = args->rmtblkno2;
+		args->rmtblkcnt = args->rmtblkcnt2;
+		if (args->rmtblkno) {
+			error = xfs_attr_rmtval_remove(args);
+			if (error)
+				return(error);
+		}
+
+		/*
+		 * Read in the block containing the "old" attr, then
+		 * remove the "old" attr from that block (neat, huh!)
+		 */
+		error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1,
+						     &bp, XFS_ATTR_FORK);
+		if (error)
+			return(error);
+		ASSERT(bp != NULL);
+		(void)xfs_attr_leaf_remove(bp, args);
+
+		/*
+		 * If the result is small enough, shrink it all into the inode.
+		 */
+		if (xfs_attr_shortform_allfit(bp, dp)) {
+			XFS_BMAP_INIT(args->flist, args->firstblock);
+			error = xfs_attr_leaf_to_shortform(bp, args);
+			/* bp is gone due to xfs_da_shrink_inode */
+			if (!error) {
+				error = xfs_bmap_finish(&args->trans,
+							args->flist,
+							*args->firstblock,
+							&committed);
+			}
+			if (error) {
+				ASSERT(committed);
+				args->trans = NULL;
+				xfs_bmap_cancel(args->flist);
+				return(error);
+			}
+
+			/*
+			 * bmap_finish() may have committed the last trans
+			 * and started a new one.  We need the inode to be
+			 * in all transactions.
+			 */
+			if (committed) {
+				xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+				xfs_trans_ihold(args->trans, dp);
+			}
+		} else
+			xfs_da_buf_done(bp);
+
+		/*
+		 * Commit the remove and start the next trans in series.
+		 */
+		error = xfs_attr_rolltrans(&args->trans, dp);
+
+	} else if (args->rmtblkno > 0) {
+		/*
+		 * Added a "remote" value, just clear the incomplete flag.
+		 */
+		error = xfs_attr_leaf_clearflag(args);
+	}
+	return(error);
+}
+
+/*
+ * Remove a name from the leaf attribute list structure
+ *
+ * This leaf block cannot have a "remote" value, we only call this routine
+ * if bmap_one_block() says there is only one block (ie: no remote blks).
+ */
+STATIC int
+xfs_attr_leaf_removename(xfs_da_args_t *args)
+{
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp;
+	int committed;
+	int error;
+
+	/*
+	 * Remove the attribute.
+	 */
+	dp = args->dp;
+	args->blkno = 0;
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
+					     XFS_ATTR_FORK);
+	if (error) {
+		return(error);
+	}
+
+	ASSERT(bp != NULL);
+	error = xfs_attr_leaf_lookup_int(bp, args);
+	if (error == ENOATTR) {
+		xfs_da_brelse(args->trans, bp);
+		return(error);
+	}
+
+	(void)xfs_attr_leaf_remove(bp, args);
+
+	/*
+	 * If the result is small enough, shrink it all into the inode.
+	 */
+	if (xfs_attr_shortform_allfit(bp, dp)) {
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		error = xfs_attr_leaf_to_shortform(bp, args);
+		/* bp is gone due to xfs_da_shrink_inode */
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			return(error);
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, dp);
+		}
+	} else
+		xfs_da_buf_done(bp);
+	return(0);
+}
+
+/*
+ * Look up a name in a leaf attribute list structure.
+ *
+ * This leaf block cannot have a "remote" value, we only call this routine
+ * if bmap_one_block() says there is only one block (ie: no remote blks).
+ */
+int
+xfs_attr_leaf_get(xfs_da_args_t *args)
+{
+	xfs_dabuf_t *bp;
+	int error;
+
+	args->blkno = 0;
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
+					     XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+
+	error = xfs_attr_leaf_lookup_int(bp, args);
+	if (error != EEXIST)  {
+		xfs_da_brelse(args->trans, bp);
+		return(error);
+	}
+	error = xfs_attr_leaf_getvalue(bp, args);
+	xfs_da_brelse(args->trans, bp);
+	if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
+		error = xfs_attr_rmtval_get(args);
+	}
+	return(error);
+}
+
+/*
+ * Copy out attribute entries for attr_list(), for leaf attribute lists.
+ */
+STATIC int
+xfs_attr_leaf_list(xfs_attr_list_context_t *context)
+{
+	xfs_attr_leafblock_t *leaf;
+	int error;
+	xfs_dabuf_t *bp;
+
+	context->cursor->blkno = 0;
+	error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						!= XFS_ATTR_LEAF_MAGIC)) {
+		XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
+				     context->dp->i_mount, leaf);
+		xfs_da_brelse(NULL, bp);
+		return(XFS_ERROR(EFSCORRUPTED));
+	}
+
+	(void)xfs_attr_leaf_list_int(bp, context);
+	xfs_da_brelse(NULL, bp);
+	return(0);
+}
+
+
+/*========================================================================
+ * External routines when attribute list size > XFS_LBSIZE(mp).
+ *========================================================================*/
+
+/*
+ * Add a name to a Btree-format attribute list.
+ *
+ * This will involve walking down the Btree, and may involve splitting
+ * leaf nodes and even splitting intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ *
+ * "Remote" attribute values confuse the issue and atomic rename operations
+ * add a whole extra layer of confusion on top of that.
+ */
+STATIC int
+xfs_attr_node_addname(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	xfs_inode_t *dp;
+	xfs_mount_t *mp;
+	int committed, retval, error;
+
+	/*
+	 * Fill in bucket of arguments/results/context to carry around.
+	 */
+	dp = args->dp;
+	mp = dp->i_mount;
+restart:
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = mp;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_attr_node_ents;
+
+	/*
+	 * Search to see if name already exists, and get back a pointer
+	 * to where it should go.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error)
+		goto out;
+	blk = &state->path.blk[ state->path.active-1 ];
+	ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+	if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
+		goto out;
+	} else if (retval == EEXIST) {
+		if (args->flags & ATTR_CREATE)
+			goto out;
+		args->rename = 1;			/* atomic rename op */
+		args->blkno2 = args->blkno;		/* set 2nd entry info*/
+		args->index2 = args->index;
+		args->rmtblkno2 = args->rmtblkno;
+		args->rmtblkcnt2 = args->rmtblkcnt;
+		args->rmtblkno = 0;
+		args->rmtblkcnt = 0;
+	}
+
+	retval = xfs_attr_leaf_add(blk->bp, state->args);
+	if (retval == ENOSPC) {
+		if (state->path.active == 1) {
+			/*
+			 * Its really a single leaf node, but it had
+			 * out-of-line values so it looked like it *might*
+			 * have been a b-tree.
+			 */
+			xfs_da_state_free(state);
+			XFS_BMAP_INIT(args->flist, args->firstblock);
+			error = xfs_attr_leaf_to_node(args);
+			if (!error) {
+				error = xfs_bmap_finish(&args->trans,
+							args->flist,
+							*args->firstblock,
+							&committed);
+			}
+			if (error) {
+				ASSERT(committed);
+				args->trans = NULL;
+				xfs_bmap_cancel(args->flist);
+				goto out;
+			}
+
+			/*
+			 * bmap_finish() may have committed the last trans
+			 * and started a new one.  We need the inode to be
+			 * in all transactions.
+			 */
+			if (committed) {
+				xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+				xfs_trans_ihold(args->trans, dp);
+			}
+
+			/*
+			 * Commit the node conversion and start the next
+			 * trans in the chain.
+			 */
+			if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+				goto out;
+
+			goto restart;
+		}
+
+		/*
+		 * Split as many Btree elements as required.
+		 * This code tracks the new and old attr's location
+		 * in the index/blkno/rmtblkno/rmtblkcnt fields and
+		 * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
+		 */
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		error = xfs_da_split(state);
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			goto out;
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, dp);
+		}
+	} else {
+		/*
+		 * Addition succeeded, update Btree hashvals.
+		 */
+		xfs_da_fixhashpath(state, &state->path);
+	}
+
+	/*
+	 * Kill the state structure, we're done with it and need to
+	 * allow the buffers to come back later.
+	 */
+	xfs_da_state_free(state);
+	state = NULL;
+
+	/*
+	 * Commit the leaf addition or btree split and start the next
+	 * trans in the chain.
+	 */
+	if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		goto out;
+
+	/*
+	 * If there was an out-of-line value, allocate the blocks we
+	 * identified for its storage and copy the value.  This is done
+	 * after we create the attribute so that we don't overflow the
+	 * maximum size of a transaction and/or hit a deadlock.
+	 */
+	if (args->rmtblkno > 0) {
+		error = xfs_attr_rmtval_set(args);
+		if (error)
+			return(error);
+	}
+
+	/*
+	 * If this is an atomic rename operation, we must "flip" the
+	 * incomplete flags on the "new" and "old" attribute/value pairs
+	 * so that one disappears and one appears atomically.  Then we
+	 * must remove the "old" attribute/value pair.
+	 */
+	if (args->rename) {
+		/*
+		 * In a separate transaction, set the incomplete flag on the
+		 * "old" attr and clear the incomplete flag on the "new" attr.
+		 */
+		error = xfs_attr_leaf_flipflags(args);
+		if (error)
+			goto out;
+
+		/*
+		 * Dismantle the "old" attribute/value pair by removing
+		 * a "remote" value (if it exists).
+		 */
+		args->index = args->index2;
+		args->blkno = args->blkno2;
+		args->rmtblkno = args->rmtblkno2;
+		args->rmtblkcnt = args->rmtblkcnt2;
+		if (args->rmtblkno) {
+			error = xfs_attr_rmtval_remove(args);
+			if (error)
+				return(error);
+		}
+
+		/*
+		 * Re-find the "old" attribute entry after any split ops.
+		 * The INCOMPLETE flag means that we will find the "old"
+		 * attr, not the "new" one.
+		 */
+		args->flags |= XFS_ATTR_INCOMPLETE;
+		state = xfs_da_state_alloc();
+		state->args = args;
+		state->mp = mp;
+		state->blocksize = state->mp->m_sb.sb_blocksize;
+		state->node_ents = state->mp->m_attr_node_ents;
+		state->inleaf = 0;
+		error = xfs_da_node_lookup_int(state, &retval);
+		if (error)
+			goto out;
+
+		/*
+		 * Remove the name and update the hashvals in the tree.
+		 */
+		blk = &state->path.blk[ state->path.active-1 ];
+		ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+		error = xfs_attr_leaf_remove(blk->bp, args);
+		xfs_da_fixhashpath(state, &state->path);
+
+		/*
+		 * Check to see if the tree needs to be collapsed.
+		 */
+		if (retval && (state->path.active > 1)) {
+			XFS_BMAP_INIT(args->flist, args->firstblock);
+			error = xfs_da_join(state);
+			if (!error) {
+				error = xfs_bmap_finish(&args->trans,
+							args->flist,
+							*args->firstblock,
+							&committed);
+			}
+			if (error) {
+				ASSERT(committed);
+				args->trans = NULL;
+				xfs_bmap_cancel(args->flist);
+				goto out;
+			}
+
+			/*
+			 * bmap_finish() may have committed the last trans
+			 * and started a new one.  We need the inode to be
+			 * in all transactions.
+			 */
+			if (committed) {
+				xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+				xfs_trans_ihold(args->trans, dp);
+			}
+		}
+
+		/*
+		 * Commit and start the next trans in the chain.
+		 */
+		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+			goto out;
+
+	} else if (args->rmtblkno > 0) {
+		/*
+		 * Added a "remote" value, just clear the incomplete flag.
+		 */
+		error = xfs_attr_leaf_clearflag(args);
+		if (error)
+			goto out;
+	}
+	retval = error = 0;
+
+out:
+	if (state)
+		xfs_da_state_free(state);
+	if (error)
+		return(error);
+	return(retval);
+}
+
+/*
+ * Remove a name from a B-tree attribute list.
+ *
+ * This will involve walking down the Btree, and may involve joining
+ * leaf nodes and even joining intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ */
+STATIC int
+xfs_attr_node_removename(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp;
+	int retval, error, committed;
+
+	/*
+	 * Tie a string around our finger to remind us where we are.
+	 */
+	dp = args->dp;
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_attr_node_ents;
+
+	/*
+	 * Search to see if name exists, and get back a pointer to it.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error || (retval != EEXIST)) {
+		if (error == 0)
+			error = retval;
+		goto out;
+	}
+
+	/*
+	 * If there is an out-of-line value, de-allocate the blocks.
+	 * This is done before we remove the attribute so that we don't
+	 * overflow the maximum size of a transaction and/or hit a deadlock.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	ASSERT(blk->bp != NULL);
+	ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+	if (args->rmtblkno > 0) {
+		/*
+		 * Fill in disk block numbers in the state structure
+		 * so that we can get the buffers back after we commit
+		 * several transactions in the following calls.
+		 */
+		error = xfs_attr_fillstate(state);
+		if (error)
+			goto out;
+
+		/*
+		 * Mark the attribute as INCOMPLETE, then bunmapi() the
+		 * remote value.
+		 */
+		error = xfs_attr_leaf_setflag(args);
+		if (error)
+			goto out;
+		error = xfs_attr_rmtval_remove(args);
+		if (error)
+			goto out;
+
+		/*
+		 * Refill the state structure with buffers, the prior calls
+		 * released our buffers.
+		 */
+		error = xfs_attr_refillstate(state);
+		if (error)
+			goto out;
+	}
+
+	/*
+	 * Remove the name and update the hashvals in the tree.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+	retval = xfs_attr_leaf_remove(blk->bp, args);
+	xfs_da_fixhashpath(state, &state->path);
+
+	/*
+	 * Check to see if the tree needs to be collapsed.
+	 */
+	if (retval && (state->path.active > 1)) {
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		error = xfs_da_join(state);
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			goto out;
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, dp);
+		}
+
+		/*
+		 * Commit the Btree join operation and start a new trans.
+		 */
+		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+			goto out;
+	}
+
+	/*
+	 * If the result is small enough, push it all into the inode.
+	 */
+	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		/*
+		 * Have to get rid of the copy of this dabuf in the state.
+		 */
+		ASSERT(state->path.active == 1);
+		ASSERT(state->path.blk[0].bp);
+		xfs_da_buf_done(state->path.blk[0].bp);
+		state->path.blk[0].bp = NULL;
+
+		error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
+						     XFS_ATTR_FORK);
+		if (error)
+			goto out;
+		ASSERT(INT_GET(((xfs_attr_leafblock_t *)
+				      bp->data)->hdr.info.magic, ARCH_CONVERT)
+						       == XFS_ATTR_LEAF_MAGIC);
+
+		if (xfs_attr_shortform_allfit(bp, dp)) {
+			XFS_BMAP_INIT(args->flist, args->firstblock);
+			error = xfs_attr_leaf_to_shortform(bp, args);
+			/* bp is gone due to xfs_da_shrink_inode */
+			if (!error) {
+				error = xfs_bmap_finish(&args->trans,
+							args->flist,
+							*args->firstblock,
+							&committed);
+			}
+			if (error) {
+				ASSERT(committed);
+				args->trans = NULL;
+				xfs_bmap_cancel(args->flist);
+				goto out;
+			}
+
+			/*
+			 * bmap_finish() may have committed the last trans
+			 * and started a new one.  We need the inode to be
+			 * in all transactions.
+			 */
+			if (committed) {
+				xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+				xfs_trans_ihold(args->trans, dp);
+			}
+		} else
+			xfs_da_brelse(args->trans, bp);
+	}
+	error = 0;
+
+out:
+	xfs_da_state_free(state);
+	return(error);
+}
+
+/*
+ * Fill in the disk block numbers in the state structure for the buffers
+ * that are attached to the state structure.
+ * This is done so that we can quickly reattach ourselves to those buffers
+ * after some set of transaction commit's has released these buffers.
+ */
+STATIC int
+xfs_attr_fillstate(xfs_da_state_t *state)
+{
+	xfs_da_state_path_t *path;
+	xfs_da_state_blk_t *blk;
+	int level;
+
+	/*
+	 * Roll down the "path" in the state structure, storing the on-disk
+	 * block number for those buffers in the "path".
+	 */
+	path = &state->path;
+	ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
+	for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
+		if (blk->bp) {
+			blk->disk_blkno = xfs_da_blkno(blk->bp);
+			xfs_da_buf_done(blk->bp);
+			blk->bp = NULL;
+		} else {
+			blk->disk_blkno = 0;
+		}
+	}
+
+	/*
+	 * Roll down the "altpath" in the state structure, storing the on-disk
+	 * block number for those buffers in the "altpath".
+	 */
+	path = &state->altpath;
+	ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
+	for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
+		if (blk->bp) {
+			blk->disk_blkno = xfs_da_blkno(blk->bp);
+			xfs_da_buf_done(blk->bp);
+			blk->bp = NULL;
+		} else {
+			blk->disk_blkno = 0;
+		}
+	}
+
+	return(0);
+}
+
+/*
+ * Reattach the buffers to the state structure based on the disk block
+ * numbers stored in the state structure.
+ * This is done after some set of transaction commit's has released those
+ * buffers from our grip.
+ */
+STATIC int
+xfs_attr_refillstate(xfs_da_state_t *state)
+{
+	xfs_da_state_path_t *path;
+	xfs_da_state_blk_t *blk;
+	int level, error;
+
+	/*
+	 * Roll down the "path" in the state structure, storing the on-disk
+	 * block number for those buffers in the "path".
+	 */
+	path = &state->path;
+	ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
+	for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
+		if (blk->disk_blkno) {
+			error = xfs_da_read_buf(state->args->trans,
+						state->args->dp,
+						blk->blkno, blk->disk_blkno,
+						&blk->bp, XFS_ATTR_FORK);
+			if (error)
+				return(error);
+		} else {
+			blk->bp = NULL;
+		}
+	}
+
+	/*
+	 * Roll down the "altpath" in the state structure, storing the on-disk
+	 * block number for those buffers in the "altpath".
+	 */
+	path = &state->altpath;
+	ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
+	for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
+		if (blk->disk_blkno) {
+			error = xfs_da_read_buf(state->args->trans,
+						state->args->dp,
+						blk->blkno, blk->disk_blkno,
+						&blk->bp, XFS_ATTR_FORK);
+			if (error)
+				return(error);
+		} else {
+			blk->bp = NULL;
+		}
+	}
+
+	return(0);
+}
+
+/*
+ * Look up a filename in a node attribute list.
+ *
+ * This routine gets called for any attribute fork that has more than one
+ * block, ie: both true Btree attr lists and for single-leaf-blocks with
+ * "remote" values taking up more blocks.
+ */
+int
+xfs_attr_node_get(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	int error, retval;
+	int i;
+
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_attr_node_ents;
+
+	/*
+	 * Search to see if name exists, and get back a pointer to it.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error) {
+		retval = error;
+	} else if (retval == EEXIST) {
+		blk = &state->path.blk[ state->path.active-1 ];
+		ASSERT(blk->bp != NULL);
+		ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+		/*
+		 * Get the value, local or "remote"
+		 */
+		retval = xfs_attr_leaf_getvalue(blk->bp, args);
+		if (!retval && (args->rmtblkno > 0)
+		    && !(args->flags & ATTR_KERNOVAL)) {
+			retval = xfs_attr_rmtval_get(args);
+		}
+	}
+
+	/*
+	 * If not in a transaction, we have to release all the buffers.
+	 */
+	for (i = 0; i < state->path.active; i++) {
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+
+	xfs_da_state_free(state);
+	return(retval);
+}
+
+STATIC int							/* error */
+xfs_attr_node_list(xfs_attr_list_context_t *context)
+{
+	attrlist_cursor_kern_t *cursor;
+	xfs_attr_leafblock_t *leaf;
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	int error, i;
+	xfs_dabuf_t *bp;
+
+	cursor = context->cursor;
+	cursor->initted = 1;
+
+	/*
+	 * Do all sorts of validation on the passed-in cursor structure.
+	 * If anything is amiss, ignore the cursor and look up the hashval
+	 * starting from the btree root.
+	 */
+	bp = NULL;
+	if (cursor->blkno > 0) {
+		error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
+					      &bp, XFS_ATTR_FORK);
+		if ((error != 0) && (error != EFSCORRUPTED))
+			return(error);
+		if (bp) {
+			node = bp->data;
+			switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) {
+			case XFS_DA_NODE_MAGIC:
+				xfs_attr_trace_l_cn("wrong blk", context, node);
+				xfs_da_brelse(NULL, bp);
+				bp = NULL;
+				break;
+			case XFS_ATTR_LEAF_MAGIC:
+				leaf = bp->data;
+				if (cursor->hashval >
+				    INT_GET(leaf->entries[
+					 INT_GET(leaf->hdr.count,
+						ARCH_CONVERT)-1].hashval,
+							ARCH_CONVERT)) {
+					xfs_attr_trace_l_cl("wrong blk",
+							   context, leaf);
+					xfs_da_brelse(NULL, bp);
+					bp = NULL;
+				} else if (cursor->hashval <=
+					     INT_GET(leaf->entries[0].hashval,
+							ARCH_CONVERT)) {
+					xfs_attr_trace_l_cl("maybe wrong blk",
+							   context, leaf);
+					xfs_da_brelse(NULL, bp);
+					bp = NULL;
+				}
+				break;
+			default:
+				xfs_attr_trace_l_c("wrong blk - ??", context);
+				xfs_da_brelse(NULL, bp);
+				bp = NULL;
+			}
+		}
+	}
+
+	/*
+	 * We did not find what we expected given the cursor's contents,
+	 * so we start from the top and work down based on the hash value.
+	 * Note that start of node block is same as start of leaf block.
+	 */
+	if (bp == NULL) {
+		cursor->blkno = 0;
+		for (;;) {
+			error = xfs_da_read_buf(NULL, context->dp,
+						      cursor->blkno, -1, &bp,
+						      XFS_ATTR_FORK);
+			if (error)
+				return(error);
+			if (unlikely(bp == NULL)) {
+				XFS_ERROR_REPORT("xfs_attr_node_list(2)",
+						 XFS_ERRLEVEL_LOW,
+						 context->dp->i_mount);
+				return(XFS_ERROR(EFSCORRUPTED));
+			}
+			node = bp->data;
+			if (INT_GET(node->hdr.info.magic, ARCH_CONVERT)
+							== XFS_ATTR_LEAF_MAGIC)
+				break;
+			if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT)
+							!= XFS_DA_NODE_MAGIC)) {
+				XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
+						     XFS_ERRLEVEL_LOW,
+						     context->dp->i_mount,
+						     node);
+				xfs_da_brelse(NULL, bp);
+				return(XFS_ERROR(EFSCORRUPTED));
+			}
+			btree = node->btree;
+			for (i = 0;
+				i < INT_GET(node->hdr.count, ARCH_CONVERT);
+								btree++, i++) {
+				if (cursor->hashval
+						<= INT_GET(btree->hashval,
+							    ARCH_CONVERT)) {
+					cursor->blkno = INT_GET(btree->before, ARCH_CONVERT);
+					xfs_attr_trace_l_cb("descending",
+							    context, btree);
+					break;
+				}
+			}
+			if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) {
+				xfs_da_brelse(NULL, bp);
+				return(0);
+			}
+			xfs_da_brelse(NULL, bp);
+		}
+	}
+	ASSERT(bp != NULL);
+
+	/*
+	 * Roll upward through the blocks, processing each leaf block in
+	 * order.  As long as there is space in the result buffer, keep
+	 * adding the information.
+	 */
+	for (;;) {
+		leaf = bp->data;
+		if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						!= XFS_ATTR_LEAF_MAGIC)) {
+			XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
+					     XFS_ERRLEVEL_LOW,
+					     context->dp->i_mount, leaf);
+			xfs_da_brelse(NULL, bp);
+			return(XFS_ERROR(EFSCORRUPTED));
+		}
+		error = xfs_attr_leaf_list_int(bp, context);
+		if (error || !leaf->hdr.info.forw)
+			break;	/* not really an error, buffer full or EOF */
+		cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT);
+		xfs_da_brelse(NULL, bp);
+		error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
+					      &bp, XFS_ATTR_FORK);
+		if (error)
+			return(error);
+		if (unlikely((bp == NULL))) {
+			XFS_ERROR_REPORT("xfs_attr_node_list(5)",
+					 XFS_ERRLEVEL_LOW,
+					 context->dp->i_mount);
+			return(XFS_ERROR(EFSCORRUPTED));
+		}
+	}
+	xfs_da_brelse(NULL, bp);
+	return(0);
+}
+
+
+/*========================================================================
+ * External routines for manipulating out-of-line attribute values.
+ *========================================================================*/
+
+/*
+ * Read the value associated with an attribute from the out-of-line buffer
+ * that we stored it in.
+ */
+STATIC int
+xfs_attr_rmtval_get(xfs_da_args_t *args)
+{
+	xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
+	xfs_mount_t *mp;
+	xfs_daddr_t dblkno;
+	xfs_caddr_t dst;
+	xfs_buf_t *bp;
+	int nmap, error, tmp, valuelen, blkcnt, i;
+	xfs_dablk_t lblkno;
+
+	ASSERT(!(args->flags & ATTR_KERNOVAL));
+
+	mp = args->dp->i_mount;
+	dst = args->value;
+	valuelen = args->valuelen;
+	lblkno = args->rmtblkno;
+	while (valuelen > 0) {
+		nmap = ATTR_RMTVALUE_MAPSIZE;
+		error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
+				  args->rmtblkcnt,
+				  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+				  NULL, 0, map, &nmap, NULL);
+		if (error)
+			return(error);
+		ASSERT(nmap >= 1);
+
+		for (i = 0; (i < nmap) && (valuelen > 0); i++) {
+			ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
+			       (map[i].br_startblock != HOLESTARTBLOCK));
+			dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
+			blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+			error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
+					     blkcnt, XFS_BUF_LOCK, &bp);
+			if (error)
+				return(error);
+
+			tmp = (valuelen < XFS_BUF_SIZE(bp))
+				? valuelen : XFS_BUF_SIZE(bp);
+			xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
+			xfs_buf_relse(bp);
+			dst += tmp;
+			valuelen -= tmp;
+
+			lblkno += map[i].br_blockcount;
+		}
+	}
+	ASSERT(valuelen == 0);
+	return(0);
+}
+
+/*
+ * Write the value associated with an attribute into the out-of-line buffer
+ * that we have defined for it.
+ */
+STATIC int
+xfs_attr_rmtval_set(xfs_da_args_t *args)
+{
+	xfs_mount_t *mp;
+	xfs_fileoff_t lfileoff;
+	xfs_inode_t *dp;
+	xfs_bmbt_irec_t map;
+	xfs_daddr_t dblkno;
+	xfs_caddr_t src;
+	xfs_buf_t *bp;
+	xfs_dablk_t lblkno;
+	int blkcnt, valuelen, nmap, error, tmp, committed;
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	src = args->value;
+
+	/*
+	 * Find a "hole" in the attribute address space large enough for
+	 * us to drop the new attribute's value into.
+	 */
+	blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+	lfileoff = 0;
+	error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
+						   XFS_ATTR_FORK);
+	if (error) {
+		return(error);
+	}
+	args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
+	args->rmtblkcnt = blkcnt;
+
+	/*
+	 * Roll through the "value", allocating blocks on disk as required.
+	 */
+	while (blkcnt > 0) {
+		/*
+		 * Allocate a single extent, up to the size of the value.
+		 */
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		nmap = 1;
+		error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno,
+				  blkcnt,
+				  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
+							XFS_BMAPI_WRITE,
+				  args->firstblock, args->total, &map, &nmap,
+				  args->flist);
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			return(error);
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, dp);
+		}
+
+		ASSERT(nmap == 1);
+		ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+		       (map.br_startblock != HOLESTARTBLOCK));
+		lblkno += map.br_blockcount;
+		blkcnt -= map.br_blockcount;
+
+		/*
+		 * Start the next trans in the chain.
+		 */
+		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+			return (error);
+	}
+
+	/*
+	 * Roll through the "value", copying the attribute value to the
+	 * already-allocated blocks.  Blocks are written synchronously
+	 * so that we can know they are all on disk before we turn off
+	 * the INCOMPLETE flag.
+	 */
+	lblkno = args->rmtblkno;
+	valuelen = args->valuelen;
+	while (valuelen > 0) {
+		/*
+		 * Try to remember where we decided to put the value.
+		 */
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		nmap = 1;
+		error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno,
+				  args->rmtblkcnt,
+				  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+				  args->firstblock, 0, &map, &nmap, NULL);
+		if (error) {
+			return(error);
+		}
+		ASSERT(nmap == 1);
+		ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+		       (map.br_startblock != HOLESTARTBLOCK));
+
+		dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
+		blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+
+		bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno,
+							blkcnt, XFS_BUF_LOCK);
+		ASSERT(bp);
+		ASSERT(!XFS_BUF_GETERROR(bp));
+
+		tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
+							XFS_BUF_SIZE(bp);
+		xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
+		if (tmp < XFS_BUF_SIZE(bp))
+			xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
+		if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
+			return (error);
+		}
+		src += tmp;
+		valuelen -= tmp;
+
+		lblkno += map.br_blockcount;
+	}
+	ASSERT(valuelen == 0);
+	return(0);
+}
+
+/*
+ * Remove the value associated with an attribute by deleting the
+ * out-of-line buffer that it is stored on.
+ */
+STATIC int
+xfs_attr_rmtval_remove(xfs_da_args_t *args)
+{
+	xfs_mount_t *mp;
+	xfs_bmbt_irec_t map;
+	xfs_buf_t *bp;
+	xfs_daddr_t dblkno;
+	xfs_dablk_t lblkno;
+	int valuelen, blkcnt, nmap, error, done, committed;
+
+	mp = args->dp->i_mount;
+
+	/*
+	 * Roll through the "value", invalidating the attribute value's
+	 * blocks.
+	 */
+	lblkno = args->rmtblkno;
+	valuelen = args->rmtblkcnt;
+	while (valuelen > 0) {
+		/*
+		 * Try to remember where we decided to put the value.
+		 */
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		nmap = 1;
+		error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno,
+					args->rmtblkcnt,
+					XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+					args->firstblock, 0, &map, &nmap,
+					args->flist);
+		if (error) {
+			return(error);
+		}
+		ASSERT(nmap == 1);
+		ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+		       (map.br_startblock != HOLESTARTBLOCK));
+
+		dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
+		blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+
+		/*
+		 * If the "remote" value is in the cache, remove it.
+		 */
+		bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
+				XFS_INCORE_TRYLOCK);
+		if (bp) {
+			XFS_BUF_STALE(bp);
+			XFS_BUF_UNDELAYWRITE(bp);
+			xfs_buf_relse(bp);
+			bp = NULL;
+		}
+
+		valuelen -= map.br_blockcount;
+
+		lblkno += map.br_blockcount;
+	}
+
+	/*
+	 * Keep de-allocating extents until the remote-value region is gone.
+	 */
+	lblkno = args->rmtblkno;
+	blkcnt = args->rmtblkcnt;
+	done = 0;
+	while (!done) {
+		XFS_BMAP_INIT(args->flist, args->firstblock);
+		error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
+				    XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+				    1, args->firstblock, args->flist, &done);
+		if (!error) {
+			error = xfs_bmap_finish(&args->trans, args->flist,
+						*args->firstblock, &committed);
+		}
+		if (error) {
+			ASSERT(committed);
+			args->trans = NULL;
+			xfs_bmap_cancel(args->flist);
+			return(error);
+		}
+
+		/*
+		 * bmap_finish() may have committed the last trans and started
+		 * a new one.  We need the inode to be in all transactions.
+		 */
+		if (committed) {
+			xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(args->trans, args->dp);
+		}
+
+		/*
+		 * Close out trans and start the next one in the chain.
+		 */
+		if ((error = xfs_attr_rolltrans(&args->trans, args->dp)))
+			return (error);
+	}
+	return(0);
+}
+
+#if defined(XFS_ATTR_TRACE)
+/*
+ * Add a trace buffer entry for an attr_list context structure.
+ */
+void
+xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
+{
+	xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where,
+		(__psunsigned_t)context->dp,
+		(__psunsigned_t)context->cursor->hashval,
+		(__psunsigned_t)context->cursor->blkno,
+		(__psunsigned_t)context->cursor->offset,
+		(__psunsigned_t)context->alist,
+		(__psunsigned_t)context->bufsize,
+		(__psunsigned_t)context->count,
+		(__psunsigned_t)context->firstu,
+		(__psunsigned_t)
+			((context->count > 0) &&
+			!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
+				? (ATTR_ENTRY(context->alist,
+					      context->count-1)->a_valuelen)
+				: 0,
+		(__psunsigned_t)context->dupcnt,
+		(__psunsigned_t)context->flags,
+		(__psunsigned_t)NULL,
+		(__psunsigned_t)NULL,
+		(__psunsigned_t)NULL);
+}
+
+/*
+ * Add a trace buffer entry for a context structure and a Btree node.
+ */
+void
+xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
+			 struct xfs_da_intnode *node)
+{
+	xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where,
+		(__psunsigned_t)context->dp,
+		(__psunsigned_t)context->cursor->hashval,
+		(__psunsigned_t)context->cursor->blkno,
+		(__psunsigned_t)context->cursor->offset,
+		(__psunsigned_t)context->alist,
+		(__psunsigned_t)context->bufsize,
+		(__psunsigned_t)context->count,
+		(__psunsigned_t)context->firstu,
+		(__psunsigned_t)
+			((context->count > 0) &&
+			!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
+				? (ATTR_ENTRY(context->alist,
+					      context->count-1)->a_valuelen)
+				: 0,
+		(__psunsigned_t)context->dupcnt,
+		(__psunsigned_t)context->flags,
+		(__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT),
+		(__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT),
+		(__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
+}
+
+/*
+ * Add a trace buffer entry for a context structure and a Btree element.
+ */
+void
+xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
+			  struct xfs_da_node_entry *btree)
+{
+	xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where,
+		(__psunsigned_t)context->dp,
+		(__psunsigned_t)context->cursor->hashval,
+		(__psunsigned_t)context->cursor->blkno,
+		(__psunsigned_t)context->cursor->offset,
+		(__psunsigned_t)context->alist,
+		(__psunsigned_t)context->bufsize,
+		(__psunsigned_t)context->count,
+		(__psunsigned_t)context->firstu,
+		(__psunsigned_t)
+			((context->count > 0) &&
+			!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
+				? (ATTR_ENTRY(context->alist,
+					      context->count-1)->a_valuelen)
+				: 0,
+		(__psunsigned_t)context->dupcnt,
+		(__psunsigned_t)context->flags,
+		(__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT),
+		(__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT),
+		(__psunsigned_t)NULL);
+}
+
+/*
+ * Add a trace buffer entry for a context structure and a leaf block.
+ */
+void
+xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
+			      struct xfs_attr_leafblock *leaf)
+{
+	xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where,
+		(__psunsigned_t)context->dp,
+		(__psunsigned_t)context->cursor->hashval,
+		(__psunsigned_t)context->cursor->blkno,
+		(__psunsigned_t)context->cursor->offset,
+		(__psunsigned_t)context->alist,
+		(__psunsigned_t)context->bufsize,
+		(__psunsigned_t)context->count,
+		(__psunsigned_t)context->firstu,
+		(__psunsigned_t)
+			((context->count > 0) &&
+			!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
+				? (ATTR_ENTRY(context->alist,
+					      context->count-1)->a_valuelen)
+				: 0,
+		(__psunsigned_t)context->dupcnt,
+		(__psunsigned_t)context->flags,
+		(__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT),
+		(__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
+		(__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
+}
+
+/*
+ * Add a trace buffer entry for the arguments given to the routine,
+ * generic form.
+ */
+void
+xfs_attr_trace_enter(int type, char *where,
+			 __psunsigned_t a2, __psunsigned_t a3,
+			 __psunsigned_t a4, __psunsigned_t a5,
+			 __psunsigned_t a6, __psunsigned_t a7,
+			 __psunsigned_t a8, __psunsigned_t a9,
+			 __psunsigned_t a10, __psunsigned_t a11,
+			 __psunsigned_t a12, __psunsigned_t a13,
+			 __psunsigned_t a14, __psunsigned_t a15)
+{
+	ASSERT(xfs_attr_trace_buf);
+	ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
+					 (void *)where,
+					 (void *)a2,  (void *)a3,  (void *)a4,
+					 (void *)a5,  (void *)a6,  (void *)a7,
+					 (void *)a8,  (void *)a9,  (void *)a10,
+					 (void *)a11, (void *)a12, (void *)a13,
+					 (void *)a14, (void *)a15);
+}
+#endif	/* XFS_ATTR_TRACE */
+
+
+/*========================================================================
+ * System (pseudo) namespace attribute interface routines.
+ *========================================================================*/
+
+STATIC int
+posix_acl_access_set(
+	vnode_t	*vp, char *name, void *data, size_t size, int xflags)
+{
+	return xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS);
+}
+
+STATIC int
+posix_acl_access_remove(
+	struct vnode *vp, char *name, int xflags)
+{
+	return xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
+}
+
+STATIC int
+posix_acl_access_get(
+	vnode_t *vp, char *name, void *data, size_t size, int xflags)
+{
+	return xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
+}
+
+STATIC int
+posix_acl_access_exists(
+	vnode_t *vp)
+{
+	return xfs_acl_vhasacl_access(vp);
+}
+
+STATIC int
+posix_acl_default_set(
+	vnode_t	*vp, char *name, void *data, size_t size, int xflags)
+{
+	return xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT);
+}
+
+STATIC int
+posix_acl_default_get(
+	vnode_t *vp, char *name, void *data, size_t size, int xflags)
+{
+	return xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
+}
+
+STATIC int
+posix_acl_default_remove(
+	struct vnode *vp, char *name, int xflags)
+{
+	return xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT);
+}
+
+STATIC int
+posix_acl_default_exists(
+	vnode_t *vp)
+{
+	return xfs_acl_vhasacl_default(vp);
+}
+
+struct attrnames posix_acl_access = {
+	.attr_name	= "posix_acl_access",
+	.attr_namelen	= sizeof("posix_acl_access") - 1,
+	.attr_get	= posix_acl_access_get,
+	.attr_set	= posix_acl_access_set,
+	.attr_remove	= posix_acl_access_remove,
+	.attr_exists	= posix_acl_access_exists,
+};
+
+struct attrnames posix_acl_default = {
+	.attr_name	= "posix_acl_default",
+	.attr_namelen	= sizeof("posix_acl_default") - 1,
+	.attr_get	= posix_acl_default_get,
+	.attr_set	= posix_acl_default_set,
+	.attr_remove	= posix_acl_default_remove,
+	.attr_exists	= posix_acl_default_exists,
+};
+
+struct attrnames *attr_system_names[] =
+	{ &posix_acl_access, &posix_acl_default };
+
+
+/*========================================================================
+ * Namespace-prefix-style attribute name interface routines.
+ *========================================================================*/
+
+STATIC int
+attr_generic_set(
+	struct vnode *vp, char *name, void *data, size_t size, int xflags)
+{
+	int 	error;
+
+	VOP_ATTR_SET(vp, name, data, size, xflags, NULL, error);
+	return -error;
+}
+
+STATIC int
+attr_generic_get(
+	struct vnode *vp, char *name, void *data, size_t size, int xflags)
+{
+	int	error, asize = size;
+
+	VOP_ATTR_GET(vp, name, data, &asize, xflags, NULL, error);
+	if (!error)
+		return asize;
+	return -error;
+}
+
+STATIC int
+attr_generic_remove(
+	struct vnode *vp, char *name, int xflags)
+{
+	int	error;
+
+	VOP_ATTR_REMOVE(vp, name, xflags, NULL, error);
+	return -error;
+}
+
+STATIC int
+attr_generic_listadd(
+	attrnames_t		*prefix,
+	attrnames_t		*namesp,
+	void			*data,
+	size_t			size,
+	ssize_t			*result)
+{
+	char			*p = data + *result;
+
+	*result += prefix->attr_namelen;
+	*result += namesp->attr_namelen + 1;
+	if (!size)
+		return 0;
+	if (*result > size)
+		return -ERANGE;
+	strcpy(p, prefix->attr_name);
+	p += prefix->attr_namelen;
+	strcpy(p, namesp->attr_name);
+	p += namesp->attr_namelen + 1;
+	return 0;
+}
+
+STATIC int
+attr_system_list(
+	struct vnode		*vp,
+	void			*data,
+	size_t			size,
+	ssize_t			*result)
+{
+	attrnames_t		*namesp;
+	int			i, error = 0;
+
+	for (i = 0; i < ATTR_SYSCOUNT; i++) {
+		namesp = attr_system_names[i];
+		if (!namesp->attr_exists || !namesp->attr_exists(vp))
+			continue;
+		error = attr_generic_listadd(&attr_system, namesp,
+						data, size, result);
+		if (error)
+			break;
+	}
+	return error;
+}
+
+int
+attr_generic_list(
+	struct vnode *vp, void *data, size_t size, int xflags, ssize_t *result)
+{
+	attrlist_cursor_kern_t	cursor = { 0 };
+	int			error;
+
+	VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error);
+	if (error > 0)
+		return -error;
+	*result = -error;
+	return attr_system_list(vp, data, size, result);
+}
+
+attrnames_t *
+attr_lookup_namespace(
+	char			*name,
+	struct attrnames	**names,
+	int			nnames)
+{
+	int			i;
+
+	for (i = 0; i < nnames; i++)
+		if (!strncmp(name, names[i]->attr_name, names[i]->attr_namelen))
+			return names[i];
+	return NULL;
+}
+
+/*
+ * Some checks to prevent people abusing EAs to get over quota:
+ * - Don't allow modifying user EAs on devices/symlinks;
+ * - Don't allow modifying user EAs if sticky bit set;
+ */
+STATIC int
+attr_user_capable(
+	struct vnode	*vp,
+	cred_t		*cred)
+{
+	struct inode	*inode = LINVFS_GET_IP(vp);
+
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		return -EPERM;
+	if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
+	    !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
+	    (current_fsuid(cred) != inode->i_uid) && !capable(CAP_FOWNER))
+		return -EPERM;
+	return 0;
+}
+
+STATIC int
+attr_trusted_capable(
+	struct vnode	*vp,
+	cred_t		*cred)
+{
+	struct inode	*inode = LINVFS_GET_IP(vp);
+
+	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+		return -EPERM;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	return 0;
+}
+
+STATIC int
+attr_secure_capable(
+	struct vnode	*vp,
+	cred_t		*cred)
+{
+	return -ENOSECURITY;
+}
+
+STATIC int
+attr_system_set(
+	struct vnode *vp, char *name, void *data, size_t size, int xflags)
+{
+	attrnames_t	*namesp;
+	int		error;
+
+	if (xflags & ATTR_CREATE)
+		return -EINVAL;
+
+	namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	error = namesp->attr_set(vp, name, data, size, xflags);
+	if (!error)
+		error = vn_revalidate(vp);
+	return error;
+}
+
+STATIC int
+attr_system_get(
+	struct vnode *vp, char *name, void *data, size_t size, int xflags)
+{
+	attrnames_t	*namesp;
+
+	namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	return namesp->attr_get(vp, name, data, size, xflags);
+}
+
+STATIC int
+attr_system_remove(
+	struct vnode *vp, char *name, int xflags)
+{
+	attrnames_t	*namesp;
+
+	namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
+	if (!namesp)
+		return -EOPNOTSUPP;
+	return namesp->attr_remove(vp, name, xflags);
+}
+
+struct attrnames attr_system = {
+	.attr_name	= "system.",
+	.attr_namelen	= sizeof("system.") - 1,
+	.attr_flag	= ATTR_SYSTEM,
+	.attr_get	= attr_system_get,
+	.attr_set	= attr_system_set,
+	.attr_remove	= attr_system_remove,
+	.attr_capable	= (attrcapable_t)fs_noerr,
+};
+
+struct attrnames attr_trusted = {
+	.attr_name	= "trusted.",
+	.attr_namelen	= sizeof("trusted.") - 1,
+	.attr_flag	= ATTR_ROOT,
+	.attr_get	= attr_generic_get,
+	.attr_set	= attr_generic_set,
+	.attr_remove	= attr_generic_remove,
+	.attr_capable	= attr_trusted_capable,
+};
+
+struct attrnames attr_secure = {
+	.attr_name	= "security.",
+	.attr_namelen	= sizeof("security.") - 1,
+	.attr_flag	= ATTR_SECURE,
+	.attr_get	= attr_generic_get,
+	.attr_set	= attr_generic_set,
+	.attr_remove	= attr_generic_remove,
+	.attr_capable	= attr_secure_capable,
+};
+
+struct attrnames attr_user = {
+	.attr_name	= "user.",
+	.attr_namelen	= sizeof("user.") - 1,
+	.attr_get	= attr_generic_get,
+	.attr_set	= attr_generic_set,
+	.attr_remove	= attr_generic_remove,
+	.attr_capable	= attr_user_capable,
+};
+
+struct attrnames *attr_namespaces[] =
+	{ &attr_system, &attr_trusted, &attr_secure, &attr_user };
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
new file mode 100644
index 0000000..67cd0f5
--- /dev/null
+++ b/fs/xfs/xfs_attr.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2000, 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ATTR_H__
+#define	__XFS_ATTR_H__
+
+/*
+ * xfs_attr.h
+ *
+ * Large attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys.
+ * The internal links in the Btree are logical block offsets into the file.
+ *
+ * Small attribute lists use a different format and are packed as tightly
+ * as possible so as to fit into the literal area of the inode.
+ */
+
+/*========================================================================
+ * External interfaces
+ *========================================================================*/
+
+struct cred;
+struct vnode;
+
+typedef int (*attrset_t)(struct vnode *, char *, void *, size_t, int);
+typedef int (*attrget_t)(struct vnode *, char *, void *, size_t, int);
+typedef int (*attrremove_t)(struct vnode *, char *, int);
+typedef int (*attrexists_t)(struct vnode *);
+typedef int (*attrcapable_t)(struct vnode *, struct cred *);
+
+typedef struct attrnames {
+	char *		attr_name;
+	unsigned int	attr_namelen;
+	unsigned int	attr_flag;
+	attrget_t	attr_get;
+	attrset_t	attr_set;
+	attrremove_t	attr_remove;
+	attrexists_t	attr_exists;
+	attrcapable_t	attr_capable;
+} attrnames_t;
+
+#define ATTR_NAMECOUNT	4
+extern struct attrnames attr_user;
+extern struct attrnames attr_secure;
+extern struct attrnames attr_system;
+extern struct attrnames attr_trusted;
+extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT];
+
+#define ATTR_SYSCOUNT	2
+extern struct attrnames posix_acl_access;
+extern struct attrnames posix_acl_default;
+extern struct attrnames *attr_system_names[ATTR_SYSCOUNT];
+
+extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int);
+extern int attr_generic_list(struct vnode *, void *, size_t, int, ssize_t *);
+
+#define ATTR_DONTFOLLOW	0x0001	/* -- unused, from IRIX -- */
+#define ATTR_ROOT	0x0002	/* use attrs in root (trusted) namespace */
+#define ATTR_TRUST	0x0004	/* -- unused, from IRIX -- */
+#define ATTR_SECURE	0x0008	/* use attrs in security namespace */
+#define ATTR_CREATE	0x0010	/* pure create: fail if attr already exists */
+#define ATTR_REPLACE	0x0020	/* pure set: fail if attr does not exist */
+#define ATTR_SYSTEM	0x0100	/* use attrs in system (pseudo) namespace */
+
+#define ATTR_KERNACCESS	0x0400	/* [kernel] iaccess, inode held io-locked */
+#define ATTR_KERNOTIME	0x1000	/* [kernel] don't update inode timestamps */
+#define ATTR_KERNOVAL	0x2000	/* [kernel] get attr size only, not value */
+#define ATTR_KERNAMELS	0x4000	/* [kernel] list attr names (simple list) */
+
+#define ATTR_KERNORMALS	0x0800	/* [kernel] normal attr list: user+secure */
+#define ATTR_KERNROOTLS	0x8000	/* [kernel] include root in the attr list */
+#define ATTR_KERNFULLS	(ATTR_KERNORMALS|ATTR_KERNROOTLS)
+
+/*
+ * The maximum size (into the kernel or returned from the kernel) of an
+ * attribute value or the buffer used for an attr_list() call.  Larger
+ * sizes will result in an ERANGE return code.
+ */
+#define	ATTR_MAX_VALUELEN	(64*1024)	/* max length of a value */
+
+/*
+ * Define how lists of attribute names are returned to the user from
+ * the attr_list() call.  A large, 32bit aligned, buffer is passed in
+ * along with its size.  We put an array of offsets at the top that each
+ * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
+ */
+typedef struct attrlist {
+	__s32	al_count;	/* number of entries in attrlist */
+	__s32	al_more;	/* T/F: more attrs (do call again) */
+	__s32	al_offset[1];	/* byte offsets of attrs [var-sized] */
+} attrlist_t;
+
+/*
+ * Show the interesting info about one attribute.  This is what the
+ * al_offset[i] entry points to.
+ */
+typedef struct attrlist_ent {	/* data from attr_list() */
+	__u32	a_valuelen;	/* number bytes in value of attr */
+	char	a_name[1];	/* attr name (NULL terminated) */
+} attrlist_ent_t;
+
+/*
+ * Given a pointer to the (char*) buffer containing the attr_list() result,
+ * and an index, return a pointer to the indicated attribute in the buffer.
+ */
+#define	ATTR_ENTRY(buffer, index)		\
+	((attrlist_ent_t *)			\
+	 &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
+
+/*
+ * Multi-attribute operation vector.
+ */
+typedef struct attr_multiop {
+	int	am_opcode;	/* operation to perform (ATTR_OP_GET, etc.) */
+	int	am_error;	/* [out arg] result of this sub-op (an errno) */
+	char	*am_attrname;	/* attribute name to work with */
+	char	*am_attrvalue;	/* [in/out arg] attribute value (raw bytes) */
+	int	am_length;	/* [in/out arg] length of value */
+	int	am_flags;	/* bitwise OR of attr API flags defined above */
+} attr_multiop_t;
+
+#define ATTR_OP_GET	1	/* return the indicated attr's value */
+#define ATTR_OP_SET	2	/* set/create the indicated attr/value pair */
+#define ATTR_OP_REMOVE	3	/* remove the indicated attr */
+
+/*
+ * Kernel-internal version of the attrlist cursor.
+ */
+typedef struct attrlist_cursor_kern {
+	__u32	hashval;	/* hash value of next entry to add */
+	__u32	blkno;		/* block containing entry (suggestion) */
+	__u32	offset;		/* offset in list of equal-hashvals */
+	__u16	pad1;		/* padding to match user-level */
+	__u8	pad2;		/* padding to match user-level */
+	__u8	initted;	/* T/F: cursor has been initialized */
+} attrlist_cursor_kern_t;
+
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+struct xfs_inode;
+struct attrlist_cursor_kern;
+struct xfs_da_args;
+
+/*
+ * Overall external interface routines.
+ */
+int xfs_attr_get(bhv_desc_t *, char *, char *, int *, int, struct cred *);
+int xfs_attr_set(bhv_desc_t *, char *, char *, int, int, struct cred *);
+int xfs_attr_remove(bhv_desc_t *, char *, int, struct cred *);
+int xfs_attr_list(bhv_desc_t *, char *, int, int,
+			 struct attrlist_cursor_kern *, struct cred *);
+int xfs_attr_inactive(struct xfs_inode *dp);
+
+int xfs_attr_node_get(struct xfs_da_args *);
+int xfs_attr_leaf_get(struct xfs_da_args *);
+int xfs_attr_shortform_getvalue(struct xfs_da_args *);
+int xfs_attr_fetch(struct xfs_inode *, char *, int,
+			char *, int *, int, struct cred *);
+
+#endif	/* __XFS_ATTR_H__ */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
new file mode 100644
index 0000000..b11256e
--- /dev/null
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -0,0 +1,3050 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+/*
+ * xfs_attr_leaf.c
+ *
+ * GROT: figure out how to recover gracefully when bmap returns ENOSPC.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+
+/*
+ * xfs_attr_leaf.c
+ *
+ * Routines to implement leaf blocks of attributes as Btrees of hashed names.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Routines used for growing the Btree.
+ */
+STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
+					      int freemap_index);
+STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer);
+STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
+						   xfs_da_state_blk_t *blk1,
+						   xfs_da_state_blk_t *blk2);
+STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
+					   xfs_da_state_blk_t *leaf_blk_1,
+					   xfs_da_state_blk_t *leaf_blk_2,
+					   int *number_entries_in_blk1,
+					   int *number_usedbytes_in_blk1);
+
+/*
+ * Utility routines.
+ */
+STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
+					 int src_start,
+					 xfs_attr_leafblock_t *dst_leaf,
+					 int dst_start, int move_count,
+					 xfs_mount_t *mp);
+
+
+/*========================================================================
+ * External routines when dirsize < XFS_LITINO(mp).
+ *========================================================================*/
+
+/*
+ * Create the initial contents of a shortform attribute list.
+ */
+int
+xfs_attr_shortform_create(xfs_da_args_t *args)
+{
+	xfs_attr_sf_hdr_t *hdr;
+	xfs_inode_t *dp;
+	xfs_ifork_t *ifp;
+
+	dp = args->dp;
+	ASSERT(dp != NULL);
+	ifp = dp->i_afp;
+	ASSERT(ifp != NULL);
+	ASSERT(ifp->if_bytes == 0);
+	if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
+		ifp->if_flags &= ~XFS_IFEXTENTS;	/* just in case */
+		dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
+		ifp->if_flags |= XFS_IFINLINE;
+	} else {
+		ASSERT(ifp->if_flags & XFS_IFINLINE);
+	}
+	xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
+	hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
+	hdr->count = 0;
+	INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr));
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
+	return(0);
+}
+
+/*
+ * Add a name/value pair to the shortform attribute list.
+ * Overflow from the inode has already been checked for.
+ */
+int
+xfs_attr_shortform_add(xfs_da_args_t *args)
+{
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	int i, offset, size;
+	xfs_inode_t *dp;
+	xfs_ifork_t *ifp;
+
+	dp = args->dp;
+	ifp = dp->i_afp;
+	ASSERT(ifp->if_flags & XFS_IFINLINE);
+	sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT);
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
+		if (sfe->namelen != args->namelen)
+			continue;
+		if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
+			continue;
+		if (((args->flags & ATTR_SECURE) != 0) !=
+		    ((sfe->flags & XFS_ATTR_SECURE) != 0))
+			continue;
+		if (((args->flags & ATTR_ROOT) != 0) !=
+		    ((sfe->flags & XFS_ATTR_ROOT) != 0))
+			continue;
+		return(XFS_ERROR(EEXIST));
+	}
+
+	offset = (char *)sfe - (char *)sf;
+	size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
+	xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
+	sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
+	sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
+
+	sfe->namelen = args->namelen;
+	INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen);
+	sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE :
+			((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
+	memcpy(sfe->nameval, args->name, args->namelen);
+	memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
+	INT_MOD(sf->hdr.count, ARCH_CONVERT, 1);
+	INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
+
+	return(0);
+}
+
+/*
+ * Remove a name from the shortform attribute list structure.
+ */
+int
+xfs_attr_shortform_remove(xfs_da_args_t *args)
+{
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	int base, size=0, end, totsize, i;
+	xfs_inode_t *dp;
+
+	/*
+	 * Remove the attribute.
+	 */
+	dp = args->dp;
+	base = sizeof(xfs_attr_sf_hdr_t);
+	sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT);
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
+					base += size, i++) {
+		size = XFS_ATTR_SF_ENTSIZE(sfe);
+		if (sfe->namelen != args->namelen)
+			continue;
+		if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
+			continue;
+		if (((args->flags & ATTR_SECURE) != 0) !=
+		    ((sfe->flags & XFS_ATTR_SECURE) != 0))
+			continue;
+		if (((args->flags & ATTR_ROOT) != 0) !=
+		    ((sfe->flags & XFS_ATTR_ROOT) != 0))
+			continue;
+		break;
+	}
+	if (i == INT_GET(sf->hdr.count, ARCH_CONVERT))
+		return(XFS_ERROR(ENOATTR));
+
+	end = base + size;
+	totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT);
+	if (end != totsize) {
+		memmove(&((char *)sf)[base], &((char *)sf)[end],
+							totsize - end);
+	}
+	INT_MOD(sf->hdr.count, ARCH_CONVERT, -1);
+	INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size);
+	xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
+
+	return(0);
+}
+
+/*
+ * Look up a name in a shortform attribute list structure.
+ */
+/*ARGSUSED*/
+int
+xfs_attr_shortform_lookup(xfs_da_args_t *args)
+{
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	int i;
+	xfs_ifork_t *ifp;
+
+	ifp = args->dp->i_afp;
+	ASSERT(ifp->if_flags & XFS_IFINLINE);
+	sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT);
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
+		if (sfe->namelen != args->namelen)
+			continue;
+		if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
+			continue;
+		if (((args->flags & ATTR_SECURE) != 0) !=
+		    ((sfe->flags & XFS_ATTR_SECURE) != 0))
+			continue;
+		if (((args->flags & ATTR_ROOT) != 0) !=
+		    ((sfe->flags & XFS_ATTR_ROOT) != 0))
+			continue;
+		return(XFS_ERROR(EEXIST));
+	}
+	return(XFS_ERROR(ENOATTR));
+}
+
+/*
+ * Look up a name in a shortform attribute list structure.
+ */
+/*ARGSUSED*/
+int
+xfs_attr_shortform_getvalue(xfs_da_args_t *args)
+{
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	int i;
+
+	ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
+	sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT);
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
+		if (sfe->namelen != args->namelen)
+			continue;
+		if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
+			continue;
+		if (((args->flags & ATTR_SECURE) != 0) !=
+		    ((sfe->flags & XFS_ATTR_SECURE) != 0))
+			continue;
+		if (((args->flags & ATTR_ROOT) != 0) !=
+		    ((sfe->flags & XFS_ATTR_ROOT) != 0))
+			continue;
+		if (args->flags & ATTR_KERNOVAL) {
+			args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT);
+			return(XFS_ERROR(EEXIST));
+		}
+		if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) {
+			args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT);
+			return(XFS_ERROR(ERANGE));
+		}
+		args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT);
+		memcpy(args->value, &sfe->nameval[args->namelen],
+						    args->valuelen);
+		return(XFS_ERROR(EEXIST));
+	}
+	return(XFS_ERROR(ENOATTR));
+}
+
+/*
+ * Convert from using the shortform to the leaf.
+ */
+int
+xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
+{
+	xfs_inode_t *dp;
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	xfs_da_args_t nargs;
+	char *tmpbuffer;
+	int error, i, size;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+	xfs_ifork_t *ifp;
+
+	dp = args->dp;
+	ifp = dp->i_afp;
+	sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
+	size = INT_GET(sf->hdr.totsize, ARCH_CONVERT);
+	tmpbuffer = kmem_alloc(size, KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+	memcpy(tmpbuffer, ifp->if_u1.if_data, size);
+	sf = (xfs_attr_shortform_t *)tmpbuffer;
+
+	xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
+	bp = NULL;
+	error = xfs_da_grow_inode(args, &blkno);
+	if (error) {
+		/*
+		 * If we hit an IO error middle of the transaction inside
+		 * grow_inode(), we may have inconsistent data. Bail out.
+		 */
+		if (error == EIO)
+			goto out;
+		xfs_idata_realloc(dp, size, XFS_ATTR_FORK);	/* try to put */
+		memcpy(ifp->if_u1.if_data, tmpbuffer, size);	/* it back */
+		goto out;
+	}
+
+	ASSERT(blkno == 0);
+	error = xfs_attr_leaf_create(args, blkno, &bp);
+	if (error) {
+		error = xfs_da_shrink_inode(args, 0, bp);
+		bp = NULL;
+		if (error)
+			goto out;
+		xfs_idata_realloc(dp, size, XFS_ATTR_FORK);	/* try to put */
+		memcpy(ifp->if_u1.if_data, tmpbuffer, size);	/* it back */
+		goto out;
+	}
+
+	memset((char *)&nargs, 0, sizeof(nargs));
+	nargs.dp = dp;
+	nargs.firstblock = args->firstblock;
+	nargs.flist = args->flist;
+	nargs.total = args->total;
+	nargs.whichfork = XFS_ATTR_FORK;
+	nargs.trans = args->trans;
+	nargs.oknoent = 1;
+
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
+		nargs.name = (char *)sfe->nameval;
+		nargs.namelen = sfe->namelen;
+		nargs.value = (char *)&sfe->nameval[nargs.namelen];
+		nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT);
+		nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
+						sfe->namelen);
+		nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE :
+				((sfe->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0);
+		error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
+		ASSERT(error == ENOATTR);
+		error = xfs_attr_leaf_add(bp, &nargs);
+		ASSERT(error != ENOSPC);
+		if (error)
+			goto out;
+		sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+	}
+	error = 0;
+
+out:
+	if(bp)
+		xfs_da_buf_done(bp);
+	kmem_free(tmpbuffer, size);
+	return(error);
+}
+
+STATIC int
+xfs_attr_shortform_compare(const void *a, const void *b)
+{
+	xfs_attr_sf_sort_t *sa, *sb;
+
+	sa = (xfs_attr_sf_sort_t *)a;
+	sb = (xfs_attr_sf_sort_t *)b;
+	if (INT_GET(sa->hash, ARCH_CONVERT)
+				< INT_GET(sb->hash, ARCH_CONVERT)) {
+		return(-1);
+	} else if (INT_GET(sa->hash, ARCH_CONVERT)
+				> INT_GET(sb->hash, ARCH_CONVERT)) {
+		return(1);
+	} else {
+		return(sa->entno - sb->entno);
+	}
+}
+
+/*
+ * Copy out entries of shortform attribute lists for attr_list().
+ * Shortform atrtribute lists are not stored in hashval sorted order.
+ * If the output buffer is not large enough to hold them all, then we
+ * we have to calculate each entries' hashvalue and sort them before
+ * we can begin returning them to the user.
+ */
+/*ARGSUSED*/
+int
+xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+{
+	attrlist_cursor_kern_t *cursor;
+	xfs_attr_sf_sort_t *sbuf, *sbp;
+	xfs_attr_shortform_t *sf;
+	xfs_attr_sf_entry_t *sfe;
+	xfs_inode_t *dp;
+	int sbsize, nsbuf, count, i;
+
+	ASSERT(context != NULL);
+	dp = context->dp;
+	ASSERT(dp != NULL);
+	ASSERT(dp->i_afp != NULL);
+	sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
+	ASSERT(sf != NULL);
+	if (!sf->hdr.count)
+		return(0);
+	cursor = context->cursor;
+	ASSERT(cursor != NULL);
+
+	xfs_attr_trace_l_c("sf start", context);
+
+	/*
+	 * If the buffer is large enough, do not bother with sorting.
+	 * Note the generous fudge factor of 16 overhead bytes per entry.
+	 */
+	if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16)
+							< context->bufsize) {
+		for (i = 0, sfe = &sf->list[0];
+				i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
+			attrnames_t	*namesp;
+
+			if (((context->flags & ATTR_SECURE) != 0) !=
+			    ((sfe->flags & XFS_ATTR_SECURE) != 0) &&
+			    !(context->flags & ATTR_KERNORMALS)) {
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+				continue;
+			}
+			if (((context->flags & ATTR_ROOT) != 0) !=
+			    ((sfe->flags & XFS_ATTR_ROOT) != 0) &&
+			    !(context->flags & ATTR_KERNROOTLS)) {
+				sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+				continue;
+			}
+			namesp = (sfe->flags & XFS_ATTR_SECURE) ? &attr_secure:
+				((sfe->flags & XFS_ATTR_ROOT) ? &attr_trusted :
+				  &attr_user);
+			if (context->flags & ATTR_KERNOVAL) {
+				ASSERT(context->flags & ATTR_KERNAMELS);
+				context->count += namesp->attr_namelen +
+					INT_GET(sfe->namelen, ARCH_CONVERT) + 1;
+			}
+			else {
+				if (xfs_attr_put_listent(context, namesp,
+						   (char *)sfe->nameval,
+						   (int)sfe->namelen,
+						   (int)INT_GET(sfe->valuelen,
+								ARCH_CONVERT)))
+					break;
+			}
+			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+		}
+		xfs_attr_trace_l_c("sf big-gulp", context);
+		return(0);
+	}
+
+	/*
+	 * It didn't all fit, so we have to sort everything on hashval.
+	 */
+	sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf);
+	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+
+	/*
+	 * Scan the attribute list for the rest of the entries, storing
+	 * the relevant info from only those that match into a buffer.
+	 */
+	nsbuf = 0;
+	for (i = 0, sfe = &sf->list[0];
+			i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
+		if (unlikely(
+		    ((char *)sfe < (char *)sf) ||
+		    ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
+			XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
+					     XFS_ERRLEVEL_LOW,
+					     context->dp->i_mount, sfe);
+			xfs_attr_trace_l_c("sf corrupted", context);
+			kmem_free(sbuf, sbsize);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		if (((context->flags & ATTR_SECURE) != 0) !=
+		    ((sfe->flags & XFS_ATTR_SECURE) != 0) &&
+		    !(context->flags & ATTR_KERNORMALS)) {
+			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+			continue;
+		}
+		if (((context->flags & ATTR_ROOT) != 0) !=
+		    ((sfe->flags & XFS_ATTR_ROOT) != 0) &&
+		    !(context->flags & ATTR_KERNROOTLS)) {
+			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+			continue;
+		}
+		sbp->entno = i;
+		INT_SET(sbp->hash, ARCH_CONVERT,
+			xfs_da_hashname((char *)sfe->nameval, sfe->namelen));
+		sbp->name = (char *)sfe->nameval;
+		sbp->namelen = sfe->namelen;
+		/* These are bytes, and both on-disk, don't endian-flip */
+		sbp->valuelen = sfe->valuelen;
+		sbp->flags = sfe->flags;
+		sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+		sbp++;
+		nsbuf++;
+	}
+
+	/*
+	 * Sort the entries on hash then entno.
+	 */
+	qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
+
+	/*
+	 * Re-find our place IN THE SORTED LIST.
+	 */
+	count = 0;
+	cursor->initted = 1;
+	cursor->blkno = 0;
+	for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
+		if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) {
+			if (cursor->offset == count) {
+				break;
+			}
+			count++;
+		} else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) {
+			break;
+		}
+	}
+	if (i == nsbuf) {
+		kmem_free(sbuf, sbsize);
+		xfs_attr_trace_l_c("blk end", context);
+		return(0);
+	}
+
+	/*
+	 * Loop putting entries into the user buffer.
+	 */
+	for ( ; i < nsbuf; i++, sbp++) {
+		attrnames_t	*namesp;
+
+		namesp = (sbp->flags & XFS_ATTR_SECURE) ? &attr_secure :
+			((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted :
+			  &attr_user);
+
+		if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) {
+			cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT);
+			cursor->offset = 0;
+		}
+		if (context->flags & ATTR_KERNOVAL) {
+			ASSERT(context->flags & ATTR_KERNAMELS);
+			context->count += namesp->attr_namelen +
+						sbp->namelen + 1;
+		} else {
+			if (xfs_attr_put_listent(context, namesp,
+					sbp->name, sbp->namelen,
+					INT_GET(sbp->valuelen, ARCH_CONVERT)))
+				break;
+		}
+		cursor->offset++;
+	}
+
+	kmem_free(sbuf, sbsize);
+	xfs_attr_trace_l_c("sf E-O-F", context);
+	return(0);
+}
+
+/*
+ * Check a leaf attribute block to see if all the entries would fit into
+ * a shortform attribute list.
+ */
+int
+xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	int bytes, i;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+
+	entry = &leaf->entries[0];
+	bytes = sizeof(struct xfs_attr_sf_hdr);
+	for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) {
+		if (entry->flags & XFS_ATTR_INCOMPLETE)
+			continue;		/* don't copy partial entries */
+		if (!(entry->flags & XFS_ATTR_LOCAL))
+			return(0);
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+		if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
+			return(0);
+		if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX)
+			return(0);
+		bytes += sizeof(struct xfs_attr_sf_entry)-1
+				+ name_loc->namelen
+				+ INT_GET(name_loc->valuelen, ARCH_CONVERT);
+	}
+	return( bytes < XFS_IFORK_ASIZE(dp) );
+}
+
+/*
+ * Convert a leaf attribute list to shortform attribute list
+ */
+int
+xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_da_args_t nargs;
+	xfs_inode_t *dp;
+	char *tmpbuffer;
+	int error, i;
+
+	dp = args->dp;
+	tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+
+	ASSERT(bp != NULL);
+	memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
+	leaf = (xfs_attr_leafblock_t *)tmpbuffer;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
+
+	/*
+	 * Clean out the prior contents of the attribute list.
+	 */
+	error = xfs_da_shrink_inode(args, 0, bp);
+	if (error)
+		goto out;
+	error = xfs_attr_shortform_create(args);
+	if (error)
+		goto out;
+
+	/*
+	 * Copy the attributes
+	 */
+	memset((char *)&nargs, 0, sizeof(nargs));
+	nargs.dp = dp;
+	nargs.firstblock = args->firstblock;
+	nargs.flist = args->flist;
+	nargs.total = args->total;
+	nargs.whichfork = XFS_ATTR_FORK;
+	nargs.trans = args->trans;
+	nargs.oknoent = 1;
+	entry = &leaf->entries[0];
+	for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) {
+		if (entry->flags & XFS_ATTR_INCOMPLETE)
+			continue;	/* don't copy partial entries */
+		if (!entry->nameidx)
+			continue;
+		ASSERT(entry->flags & XFS_ATTR_LOCAL);
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+		nargs.name = (char *)name_loc->nameval;
+		nargs.namelen = name_loc->namelen;
+		nargs.value = (char *)&name_loc->nameval[nargs.namelen];
+		nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT);
+		nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT);
+		nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE :
+			      ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0);
+		xfs_attr_shortform_add(&nargs);
+	}
+	error = 0;
+
+out:
+	kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount));
+	return(error);
+}
+
+/*
+ * Convert from using a single leaf to a root node and a leaf.
+ */
+int
+xfs_attr_leaf_to_node(xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_da_intnode_t *node;
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp1, *bp2;
+	xfs_dablk_t blkno;
+	int error;
+
+	dp = args->dp;
+	bp1 = bp2 = NULL;
+	error = xfs_da_grow_inode(args, &blkno);
+	if (error)
+		goto out;
+	error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
+					     XFS_ATTR_FORK);
+	if (error)
+		goto out;
+	ASSERT(bp1 != NULL);
+	bp2 = NULL;
+	error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2,
+					    XFS_ATTR_FORK);
+	if (error)
+		goto out;
+	ASSERT(bp2 != NULL);
+	memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
+	xfs_da_buf_done(bp1);
+	bp1 = NULL;
+	xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
+
+	/*
+	 * Set up the new root node.
+	 */
+	error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
+	if (error)
+		goto out;
+	node = bp1->data;
+	leaf = bp2->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	/* both on-disk, don't endian-flip twice */
+	node->btree[0].hashval =
+		leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval;
+	INT_SET(node->btree[0].before, ARCH_CONVERT, blkno);
+	INT_SET(node->hdr.count, ARCH_CONVERT, 1);
+	xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
+	error = 0;
+out:
+	if (bp1)
+		xfs_da_buf_done(bp1);
+	if (bp2)
+		xfs_da_buf_done(bp2);
+	return(error);
+}
+
+
+/*========================================================================
+ * Routines used for growing the Btree.
+ *========================================================================*/
+
+/*
+ * Create the initial contents of a leaf attribute list
+ * or a leaf in a node attribute list.
+ */
+int
+xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_hdr_t *hdr;
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp;
+	int error;
+
+	dp = args->dp;
+	ASSERT(dp != NULL);
+	error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
+					    XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
+	hdr = &leaf->hdr;
+	INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC);
+	INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
+	if (!hdr->firstused) {
+		INT_SET(hdr->firstused, ARCH_CONVERT,
+			XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN);
+	}
+
+	INT_SET(hdr->freemap[0].base, ARCH_CONVERT,
+						sizeof(xfs_attr_leaf_hdr_t));
+	INT_SET(hdr->freemap[0].size, ARCH_CONVERT,
+					  INT_GET(hdr->firstused, ARCH_CONVERT)
+					- INT_GET(hdr->freemap[0].base,
+								ARCH_CONVERT));
+
+	xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
+
+	*bpp = bp;
+	return(0);
+}
+
+/*
+ * Split the leaf node, rebalance, then add the new entry.
+ */
+int
+xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
+				   xfs_da_state_blk_t *newblk)
+{
+	xfs_dablk_t blkno;
+	int error;
+
+	/*
+	 * Allocate space for a new leaf node.
+	 */
+	ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
+	error = xfs_da_grow_inode(state->args, &blkno);
+	if (error)
+		return(error);
+	error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp);
+	if (error)
+		return(error);
+	newblk->blkno = blkno;
+	newblk->magic = XFS_ATTR_LEAF_MAGIC;
+
+	/*
+	 * Rebalance the entries across the two leaves.
+	 * NOTE: rebalance() currently depends on the 2nd block being empty.
+	 */
+	xfs_attr_leaf_rebalance(state, oldblk, newblk);
+	error = xfs_da_blk_link(state, oldblk, newblk);
+	if (error)
+		return(error);
+
+	/*
+	 * Save info on "old" attribute for "atomic rename" ops, leaf_add()
+	 * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
+	 * "new" attrs info.  Will need the "old" info to remove it later.
+	 *
+	 * Insert the "new" entry in the correct block.
+	 */
+	if (state->inleaf)
+		error = xfs_attr_leaf_add(oldblk->bp, state->args);
+	else
+		error = xfs_attr_leaf_add(newblk->bp, state->args);
+
+	/*
+	 * Update last hashval in each block since we added the name.
+	 */
+	oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
+	newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
+	return(error);
+}
+
+/*
+ * Add a name to the leaf attribute list structure.
+ */
+int
+xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_hdr_t *hdr;
+	xfs_attr_leaf_map_t *map;
+	int tablesize, entsize, sum, tmp, i;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT((args->index >= 0)
+		&& (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT)));
+	hdr = &leaf->hdr;
+	entsize = xfs_attr_leaf_newentsize(args,
+			   args->trans->t_mountp->m_sb.sb_blocksize, NULL);
+
+	/*
+	 * Search through freemap for first-fit on new name length.
+	 * (may need to figure in size of entry struct too)
+	 */
+	tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1)
+					* sizeof(xfs_attr_leaf_entry_t)
+					+ sizeof(xfs_attr_leaf_hdr_t);
+	map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1];
+	for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
+		if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) {
+			sum += INT_GET(map->size, ARCH_CONVERT);
+			continue;
+		}
+		if (!map->size)
+			continue;	/* no space in this map */
+		tmp = entsize;
+		if (INT_GET(map->base, ARCH_CONVERT)
+				< INT_GET(hdr->firstused, ARCH_CONVERT))
+			tmp += sizeof(xfs_attr_leaf_entry_t);
+		if (INT_GET(map->size, ARCH_CONVERT) >= tmp) {
+			tmp = xfs_attr_leaf_add_work(bp, args, i);
+			return(tmp);
+		}
+		sum += INT_GET(map->size, ARCH_CONVERT);
+	}
+
+	/*
+	 * If there are no holes in the address space of the block,
+	 * and we don't have enough freespace, then compaction will do us
+	 * no good and we should just give up.
+	 */
+	if (!hdr->holes && (sum < entsize))
+		return(XFS_ERROR(ENOSPC));
+
+	/*
+	 * Compact the entries to coalesce free space.
+	 * This may change the hdr->count via dropping INCOMPLETE entries.
+	 */
+	xfs_attr_leaf_compact(args->trans, bp);
+
+	/*
+	 * After compaction, the block is guaranteed to have only one
+	 * free region, in freemap[0].  If it is not big enough, give up.
+	 */
+	if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT)
+				< (entsize + sizeof(xfs_attr_leaf_entry_t)))
+		return(XFS_ERROR(ENOSPC));
+
+	return(xfs_attr_leaf_add_work(bp, args, 0));
+}
+
+/*
+ * Add a name to a leaf attribute list structure.
+ */
+STATIC int
+xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_hdr_t *hdr;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	xfs_attr_leaf_map_t *map;
+	xfs_mount_t *mp;
+	int tmp, i;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	hdr = &leaf->hdr;
+	ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
+	ASSERT((args->index >= 0)
+		&& (args->index <= INT_GET(hdr->count, ARCH_CONVERT)));
+
+	/*
+	 * Force open some space in the entry array and fill it in.
+	 */
+	entry = &leaf->entries[args->index];
+	if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) {
+		tmp  = INT_GET(hdr->count, ARCH_CONVERT) - args->index;
+		tmp *= sizeof(xfs_attr_leaf_entry_t);
+		memmove((char *)(entry+1), (char *)entry, tmp);
+		xfs_da_log_buf(args->trans, bp,
+		    XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
+	}
+	INT_MOD(hdr->count, ARCH_CONVERT, 1);
+
+	/*
+	 * Allocate space for the new string (at the end of the run).
+	 */
+	map = &hdr->freemap[mapindex];
+	mp = args->trans->t_mountp;
+	ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
+	ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0);
+	ASSERT(INT_GET(map->size, ARCH_CONVERT)
+				>= xfs_attr_leaf_newentsize(args,
+					     mp->m_sb.sb_blocksize, NULL));
+	ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
+	ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0);
+	INT_MOD(map->size, ARCH_CONVERT,
+		-xfs_attr_leaf_newentsize(args, mp->m_sb.sb_blocksize, &tmp));
+	INT_SET(entry->nameidx, ARCH_CONVERT,
+					INT_GET(map->base, ARCH_CONVERT)
+				      + INT_GET(map->size, ARCH_CONVERT));
+	INT_SET(entry->hashval, ARCH_CONVERT, args->hashval);
+	entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
+	entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE :
+			((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
+	if (args->rename) {
+		entry->flags |= XFS_ATTR_INCOMPLETE;
+		if ((args->blkno2 == args->blkno) &&
+		    (args->index2 <= args->index)) {
+			args->index2++;
+		}
+	}
+	xfs_da_log_buf(args->trans, bp,
+			  XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
+	ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT)
+						>= INT_GET((entry-1)->hashval,
+							    ARCH_CONVERT)));
+	ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) ||
+	       (INT_GET(entry->hashval, ARCH_CONVERT)
+			    <= (INT_GET((entry+1)->hashval, ARCH_CONVERT))));
+
+	/*
+	 * Copy the attribute name and value into the new space.
+	 *
+	 * For "remote" attribute values, simply note that we need to
+	 * allocate space for the "remote" value.  We can't actually
+	 * allocate the extents in this transaction, and we can't decide
+	 * which blocks they should be as we might allocate more blocks
+	 * as part of this transaction (a split operation for example).
+	 */
+	if (entry->flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		name_loc->namelen = args->namelen;
+		INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen);
+		memcpy((char *)name_loc->nameval, args->name, args->namelen);
+		memcpy((char *)&name_loc->nameval[args->namelen], args->value,
+				   INT_GET(name_loc->valuelen, ARCH_CONVERT));
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt->namelen = args->namelen;
+		memcpy((char *)name_rmt->name, args->name, args->namelen);
+		entry->flags |= XFS_ATTR_INCOMPLETE;
+		/* just in case */
+		name_rmt->valuelen = 0;
+		name_rmt->valueblk = 0;
+		args->rmtblkno = 1;
+		args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+	}
+	xfs_da_log_buf(args->trans, bp,
+	     XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+				   xfs_attr_leaf_entsize(leaf, args->index)));
+
+	/*
+	 * Update the control info for this leaf node
+	 */
+	if (INT_GET(entry->nameidx, ARCH_CONVERT)
+				< INT_GET(hdr->firstused, ARCH_CONVERT)) {
+		/* both on-disk, don't endian-flip twice */
+		hdr->firstused = entry->nameidx;
+	}
+	ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT)
+				>= ((INT_GET(hdr->count, ARCH_CONVERT)
+					* sizeof(*entry))+sizeof(*hdr)));
+	tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1)
+					* sizeof(xfs_attr_leaf_entry_t)
+					+ sizeof(xfs_attr_leaf_hdr_t);
+	map = &hdr->freemap[0];
+	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
+		if (INT_GET(map->base, ARCH_CONVERT) == tmp) {
+			INT_MOD(map->base, ARCH_CONVERT,
+					sizeof(xfs_attr_leaf_entry_t));
+			INT_MOD(map->size, ARCH_CONVERT,
+					-sizeof(xfs_attr_leaf_entry_t));
+		}
+	}
+	INT_MOD(hdr->usedbytes, ARCH_CONVERT,
+				xfs_attr_leaf_entsize(leaf, args->index));
+	xfs_da_log_buf(args->trans, bp,
+		XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
+	return(0);
+}
+
+/*
+ * Garbage collect a leaf attribute list block by copying it to a new buffer.
+ */
+STATIC void
+xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
+{
+	xfs_attr_leafblock_t *leaf_s, *leaf_d;
+	xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
+	xfs_mount_t *mp;
+	char *tmpbuffer;
+
+	mp = trans->t_mountp;
+	tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+	memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp));
+	memset(bp->data, 0, XFS_LBSIZE(mp));
+
+	/*
+	 * Copy basic information
+	 */
+	leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
+	leaf_d = bp->data;
+	hdr_s = &leaf_s->hdr;
+	hdr_d = &leaf_d->hdr;
+	hdr_d->info = hdr_s->info;	/* struct copy */
+	INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp));
+	/* handle truncation gracefully */
+	if (!hdr_d->firstused) {
+		INT_SET(hdr_d->firstused, ARCH_CONVERT,
+				XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN);
+	}
+	hdr_d->usedbytes = 0;
+	hdr_d->count = 0;
+	hdr_d->holes = 0;
+	INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT,
+					sizeof(xfs_attr_leaf_hdr_t));
+	INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT,
+				INT_GET(hdr_d->firstused, ARCH_CONVERT)
+			      - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
+
+	/*
+	 * Copy all entry's in the same (sorted) order,
+	 * but allocate name/value pairs packed and in sequence.
+	 */
+	xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
+				(int)INT_GET(hdr_s->count, ARCH_CONVERT), mp);
+
+	xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
+
+	kmem_free(tmpbuffer, XFS_LBSIZE(mp));
+}
+
+/*
+ * Redistribute the attribute list entries between two leaf nodes,
+ * taking into account the size of the new entry.
+ *
+ * NOTE: if new block is empty, then it will get the upper half of the
+ * old block.  At present, all (one) callers pass in an empty second block.
+ *
+ * This code adjusts the args->index/blkno and args->index2/blkno2 fields
+ * to match what it is doing in splitting the attribute leaf block.  Those
+ * values are used in "atomic rename" operations on attributes.  Note that
+ * the "new" and "old" values can end up in different blocks.
+ */
+STATIC void
+xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
+				       xfs_da_state_blk_t *blk2)
+{
+	xfs_da_args_t *args;
+	xfs_da_state_blk_t *tmp_blk;
+	xfs_attr_leafblock_t *leaf1, *leaf2;
+	xfs_attr_leaf_hdr_t *hdr1, *hdr2;
+	int count, totallen, max, space, swap;
+
+	/*
+	 * Set up environment.
+	 */
+	ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
+	ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
+	leaf1 = blk1->bp->data;
+	leaf2 = blk2->bp->data;
+	ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	args = state->args;
+
+	/*
+	 * Check ordering of blocks, reverse if it makes things simpler.
+	 *
+	 * NOTE: Given that all (current) callers pass in an empty
+	 * second block, this code should never set "swap".
+	 */
+	swap = 0;
+	if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) {
+		tmp_blk = blk1;
+		blk1 = blk2;
+		blk2 = tmp_blk;
+		leaf1 = blk1->bp->data;
+		leaf2 = blk2->bp->data;
+		swap = 1;
+	}
+	hdr1 = &leaf1->hdr;
+	hdr2 = &leaf2->hdr;
+
+	/*
+	 * Examine entries until we reduce the absolute difference in
+	 * byte usage between the two blocks to a minimum.  Then get
+	 * the direction to copy and the number of elements to move.
+	 *
+	 * "inleaf" is true if the new entry should be inserted into blk1.
+	 * If "swap" is also true, then reverse the sense of "inleaf".
+	 */
+	state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2,
+							    &count, &totallen);
+	if (swap)
+		state->inleaf = !state->inleaf;
+
+	/*
+	 * Move any entries required from leaf to leaf:
+	 */
+	if (count < INT_GET(hdr1->count, ARCH_CONVERT)) {
+		/*
+		 * Figure the total bytes to be added to the destination leaf.
+		 */
+		/* number entries being moved */
+		count = INT_GET(hdr1->count, ARCH_CONVERT) - count;
+		space  = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen;
+		space += count * sizeof(xfs_attr_leaf_entry_t);
+
+		/*
+		 * leaf2 is the destination, compact it if it looks tight.
+		 */
+		max  = INT_GET(hdr2->firstused, ARCH_CONVERT)
+						- sizeof(xfs_attr_leaf_hdr_t);
+		max -= INT_GET(hdr2->count, ARCH_CONVERT)
+					* sizeof(xfs_attr_leaf_entry_t);
+		if (space > max) {
+			xfs_attr_leaf_compact(args->trans, blk2->bp);
+		}
+
+		/*
+		 * Move high entries from leaf1 to low end of leaf2.
+		 */
+		xfs_attr_leaf_moveents(leaf1,
+				INT_GET(hdr1->count, ARCH_CONVERT)-count,
+				leaf2, 0, count, state->mp);
+
+		xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
+		xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
+	} else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) {
+		/*
+		 * I assert that since all callers pass in an empty
+		 * second buffer, this code should never execute.
+		 */
+
+		/*
+		 * Figure the total bytes to be added to the destination leaf.
+		 */
+		/* number entries being moved */
+		count -= INT_GET(hdr1->count, ARCH_CONVERT);
+		space  = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT);
+		space += count * sizeof(xfs_attr_leaf_entry_t);
+
+		/*
+		 * leaf1 is the destination, compact it if it looks tight.
+		 */
+		max  = INT_GET(hdr1->firstused, ARCH_CONVERT)
+						- sizeof(xfs_attr_leaf_hdr_t);
+		max -= INT_GET(hdr1->count, ARCH_CONVERT)
+					* sizeof(xfs_attr_leaf_entry_t);
+		if (space > max) {
+			xfs_attr_leaf_compact(args->trans, blk1->bp);
+		}
+
+		/*
+		 * Move low entries from leaf2 to high end of leaf1.
+		 */
+		xfs_attr_leaf_moveents(leaf2, 0, leaf1,
+				(int)INT_GET(hdr1->count, ARCH_CONVERT), count,
+				state->mp);
+
+		xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
+		xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
+	}
+
+	/*
+	 * Copy out last hashval in each block for B-tree code.
+	 */
+	blk1->hashval =
+	    INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count,
+				    ARCH_CONVERT)-1].hashval, ARCH_CONVERT);
+	blk2->hashval =
+	    INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count,
+				    ARCH_CONVERT)-1].hashval, ARCH_CONVERT);
+
+	/*
+	 * Adjust the expected index for insertion.
+	 * NOTE: this code depends on the (current) situation that the
+	 * second block was originally empty.
+	 *
+	 * If the insertion point moved to the 2nd block, we must adjust
+	 * the index.  We must also track the entry just following the
+	 * new entry for use in an "atomic rename" operation, that entry
+	 * is always the "old" entry and the "new" entry is what we are
+	 * inserting.  The index/blkno fields refer to the "old" entry,
+	 * while the index2/blkno2 fields refer to the "new" entry.
+	 */
+	if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) {
+		ASSERT(state->inleaf == 0);
+		blk2->index = blk1->index
+				- INT_GET(leaf1->hdr.count, ARCH_CONVERT);
+		args->index = args->index2 = blk2->index;
+		args->blkno = args->blkno2 = blk2->blkno;
+	} else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) {
+		if (state->inleaf) {
+			args->index = blk1->index;
+			args->blkno = blk1->blkno;
+			args->index2 = 0;
+			args->blkno2 = blk2->blkno;
+		} else {
+			blk2->index = blk1->index
+				    - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
+			args->index = args->index2 = blk2->index;
+			args->blkno = args->blkno2 = blk2->blkno;
+		}
+	} else {
+		ASSERT(state->inleaf == 1);
+		args->index = args->index2 = blk1->index;
+		args->blkno = args->blkno2 = blk1->blkno;
+	}
+}
+
+/*
+ * Examine entries until we reduce the absolute difference in
+ * byte usage between the two blocks to a minimum.
+ * GROT: Is this really necessary?  With other than a 512 byte blocksize,
+ * GROT: there will always be enough room in either block for a new entry.
+ * GROT: Do a double-split for this case?
+ */
+STATIC int
+xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
+				    xfs_da_state_blk_t *blk1,
+				    xfs_da_state_blk_t *blk2,
+				    int *countarg, int *usedbytesarg)
+{
+	xfs_attr_leafblock_t *leaf1, *leaf2;
+	xfs_attr_leaf_hdr_t *hdr1, *hdr2;
+	xfs_attr_leaf_entry_t *entry;
+	int count, max, index, totallen, half;
+	int lastdelta, foundit, tmp;
+
+	/*
+	 * Set up environment.
+	 */
+	leaf1 = blk1->bp->data;
+	leaf2 = blk2->bp->data;
+	hdr1 = &leaf1->hdr;
+	hdr2 = &leaf2->hdr;
+	foundit = 0;
+	totallen = 0;
+
+	/*
+	 * Examine entries until we reduce the absolute difference in
+	 * byte usage between the two blocks to a minimum.
+	 */
+	max = INT_GET(hdr1->count, ARCH_CONVERT)
+			+ INT_GET(hdr2->count, ARCH_CONVERT);
+	half  = (max+1) * sizeof(*entry);
+	half += INT_GET(hdr1->usedbytes, ARCH_CONVERT)
+				+ INT_GET(hdr2->usedbytes, ARCH_CONVERT)
+				+ xfs_attr_leaf_newentsize(state->args,
+						     state->blocksize, NULL);
+	half /= 2;
+	lastdelta = state->blocksize;
+	entry = &leaf1->entries[0];
+	for (count = index = 0; count < max; entry++, index++, count++) {
+
+#define XFS_ATTR_ABS(A)	(((A) < 0) ? -(A) : (A))
+		/*
+		 * The new entry is in the first block, account for it.
+		 */
+		if (count == blk1->index) {
+			tmp = totallen + sizeof(*entry) +
+				xfs_attr_leaf_newentsize(state->args,
+							 state->blocksize,
+							 NULL);
+			if (XFS_ATTR_ABS(half - tmp) > lastdelta)
+				break;
+			lastdelta = XFS_ATTR_ABS(half - tmp);
+			totallen = tmp;
+			foundit = 1;
+		}
+
+		/*
+		 * Wrap around into the second block if necessary.
+		 */
+		if (count == INT_GET(hdr1->count, ARCH_CONVERT)) {
+			leaf1 = leaf2;
+			entry = &leaf1->entries[0];
+			index = 0;
+		}
+
+		/*
+		 * Figure out if next leaf entry would be too much.
+		 */
+		tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
+									index);
+		if (XFS_ATTR_ABS(half - tmp) > lastdelta)
+			break;
+		lastdelta = XFS_ATTR_ABS(half - tmp);
+		totallen = tmp;
+#undef XFS_ATTR_ABS
+	}
+
+	/*
+	 * Calculate the number of usedbytes that will end up in lower block.
+	 * If new entry not in lower block, fix up the count.
+	 */
+	totallen -= count * sizeof(*entry);
+	if (foundit) {
+		totallen -= sizeof(*entry) +
+				xfs_attr_leaf_newentsize(state->args,
+							 state->blocksize,
+							 NULL);
+	}
+
+	*countarg = count;
+	*usedbytesarg = totallen;
+	return(foundit);
+}
+
+/*========================================================================
+ * Routines used for shrinking the Btree.
+ *========================================================================*/
+
+/*
+ * Check a leaf block and its neighbors to see if the block should be
+ * collapsed into one or the other neighbor.  Always keep the block
+ * with the smaller block number.
+ * If the current block is over 50% full, don't try to join it, return 0.
+ * If the block is empty, fill in the state structure and return 2.
+ * If it can be collapsed, fill in the state structure and return 1.
+ * If nothing can be done, return 0.
+ *
+ * GROT: allow for INCOMPLETE entries in calculation.
+ */
+int
+xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_da_state_blk_t *blk;
+	xfs_da_blkinfo_t *info;
+	int count, bytes, forward, error, retval, i;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+
+	/*
+	 * Check for the degenerate case of the block being over 50% full.
+	 * If so, it's not worth even looking to see if we might be able
+	 * to coalesce with a sibling.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	info = blk->bp->data;
+	ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC);
+	leaf = (xfs_attr_leafblock_t *)info;
+	count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	bytes = sizeof(xfs_attr_leaf_hdr_t) +
+		count * sizeof(xfs_attr_leaf_entry_t) +
+		INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
+	if (bytes > (state->blocksize >> 1)) {
+		*action = 0;	/* blk over 50%, don't try to join */
+		return(0);
+	}
+
+	/*
+	 * Check for the degenerate case of the block being empty.
+	 * If the block is empty, we'll simply delete it, no need to
+	 * coalesce it with a sibling block.  We choose (aribtrarily)
+	 * to merge with the forward block unless it is NULL.
+	 */
+	if (count == 0) {
+		/*
+		 * Make altpath point to the block we want to keep and
+		 * path point to the block we want to drop (this one).
+		 */
+		forward = info->forw;
+		memcpy(&state->altpath, &state->path, sizeof(state->path));
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+		if (error)
+			return(error);
+		if (retval) {
+			*action = 0;
+		} else {
+			*action = 2;
+		}
+		return(0);
+	}
+
+	/*
+	 * Examine each sibling block to see if we can coalesce with
+	 * at least 25% free space to spare.  We need to figure out
+	 * whether to merge with the forward or the backward block.
+	 * We prefer coalescing with the lower numbered sibling so as
+	 * to shrink an attribute list over time.
+	 */
+	/* start with smaller blk num */
+	forward = (INT_GET(info->forw, ARCH_CONVERT)
+					< INT_GET(info->back, ARCH_CONVERT));
+	for (i = 0; i < 2; forward = !forward, i++) {
+		if (forward)
+			blkno = INT_GET(info->forw, ARCH_CONVERT);
+		else
+			blkno = INT_GET(info->back, ARCH_CONVERT);
+		if (blkno == 0)
+			continue;
+		error = xfs_da_read_buf(state->args->trans, state->args->dp,
+					blkno, -1, &bp, XFS_ATTR_FORK);
+		if (error)
+			return(error);
+		ASSERT(bp != NULL);
+
+		leaf = (xfs_attr_leafblock_t *)info;
+		count  = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		bytes  = state->blocksize - (state->blocksize>>2);
+		bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
+		leaf = bp->data;
+		ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+		count += INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
+		bytes -= count * sizeof(xfs_attr_leaf_entry_t);
+		bytes -= sizeof(xfs_attr_leaf_hdr_t);
+		xfs_da_brelse(state->args->trans, bp);
+		if (bytes >= 0)
+			break;	/* fits with at least 25% to spare */
+	}
+	if (i >= 2) {
+		*action = 0;
+		return(0);
+	}
+
+	/*
+	 * Make altpath point to the block we want to keep (the lower
+	 * numbered block) and path point to the block we want to drop.
+	 */
+	memcpy(&state->altpath, &state->path, sizeof(state->path));
+	if (blkno < blk->blkno) {
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+	} else {
+		error = xfs_da_path_shift(state, &state->path, forward,
+						 0, &retval);
+	}
+	if (error)
+		return(error);
+	if (retval) {
+		*action = 0;
+	} else {
+		*action = 1;
+	}
+	return(0);
+}
+
+/*
+ * Remove a name from the leaf attribute list structure.
+ *
+ * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
+ * If two leaves are 37% full, when combined they will leave 25% free.
+ */
+int
+xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_hdr_t *hdr;
+	xfs_attr_leaf_map_t *map;
+	xfs_attr_leaf_entry_t *entry;
+	int before, after, smallest, entsize;
+	int tablesize, tmp, i;
+	xfs_mount_t *mp;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	hdr = &leaf->hdr;
+	mp = args->trans->t_mountp;
+	ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0)
+		&& (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
+	ASSERT((args->index >= 0)
+		&& (args->index < INT_GET(hdr->count, ARCH_CONVERT)));
+	ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT)
+				>= ((INT_GET(hdr->count, ARCH_CONVERT)
+					* sizeof(*entry))+sizeof(*hdr)));
+	entry = &leaf->entries[args->index];
+	ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT)
+				>= INT_GET(hdr->firstused, ARCH_CONVERT));
+	ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
+
+	/*
+	 * Scan through free region table:
+	 *    check for adjacency of free'd entry with an existing one,
+	 *    find smallest free region in case we need to replace it,
+	 *    adjust any map that borders the entry table,
+	 */
+	tablesize = INT_GET(hdr->count, ARCH_CONVERT)
+					* sizeof(xfs_attr_leaf_entry_t)
+					+ sizeof(xfs_attr_leaf_hdr_t);
+	map = &hdr->freemap[0];
+	tmp = INT_GET(map->size, ARCH_CONVERT);
+	before = after = -1;
+	smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
+	entsize = xfs_attr_leaf_entsize(leaf, args->index);
+	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
+		ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
+		ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
+		if (INT_GET(map->base, ARCH_CONVERT) == tablesize) {
+			INT_MOD(map->base, ARCH_CONVERT,
+					-sizeof(xfs_attr_leaf_entry_t));
+			INT_MOD(map->size, ARCH_CONVERT,
+					sizeof(xfs_attr_leaf_entry_t));
+		}
+
+		if ((INT_GET(map->base, ARCH_CONVERT)
+					+ INT_GET(map->size, ARCH_CONVERT))
+				== INT_GET(entry->nameidx, ARCH_CONVERT)) {
+			before = i;
+		} else if (INT_GET(map->base, ARCH_CONVERT)
+			== (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) {
+			after = i;
+		} else if (INT_GET(map->size, ARCH_CONVERT) < tmp) {
+			tmp = INT_GET(map->size, ARCH_CONVERT);
+			smallest = i;
+		}
+	}
+
+	/*
+	 * Coalesce adjacent freemap regions,
+	 * or replace the smallest region.
+	 */
+	if ((before >= 0) || (after >= 0)) {
+		if ((before >= 0) && (after >= 0)) {
+			map = &hdr->freemap[before];
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+			INT_MOD(map->size, ARCH_CONVERT,
+				INT_GET(hdr->freemap[after].size,
+							ARCH_CONVERT));
+			hdr->freemap[after].base = 0;
+			hdr->freemap[after].size = 0;
+		} else if (before >= 0) {
+			map = &hdr->freemap[before];
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+		} else {
+			map = &hdr->freemap[after];
+			/* both on-disk, don't endian flip twice */
+			map->base = entry->nameidx;
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+		}
+	} else {
+		/*
+		 * Replace smallest region (if it is smaller than free'd entry)
+		 */
+		map = &hdr->freemap[smallest];
+		if (INT_GET(map->size, ARCH_CONVERT) < entsize) {
+			INT_SET(map->base, ARCH_CONVERT,
+					INT_GET(entry->nameidx, ARCH_CONVERT));
+			INT_SET(map->size, ARCH_CONVERT, entsize);
+		}
+	}
+
+	/*
+	 * Did we remove the first entry?
+	 */
+	if (INT_GET(entry->nameidx, ARCH_CONVERT)
+				== INT_GET(hdr->firstused, ARCH_CONVERT))
+		smallest = 1;
+	else
+		smallest = 0;
+
+	/*
+	 * Compress the remaining entries and zero out the removed stuff.
+	 */
+	memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
+	INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize);
+	xfs_da_log_buf(args->trans, bp,
+	     XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+				   entsize));
+
+	tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index)
+					* sizeof(xfs_attr_leaf_entry_t);
+	memmove((char *)entry, (char *)(entry+1), tmp);
+	INT_MOD(hdr->count, ARCH_CONVERT, -1);
+	xfs_da_log_buf(args->trans, bp,
+	    XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
+	entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)];
+	memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
+
+	/*
+	 * If we removed the first entry, re-find the first used byte
+	 * in the name area.  Note that if the entry was the "firstused",
+	 * then we don't have a "hole" in our block resulting from
+	 * removing the name.
+	 */
+	if (smallest) {
+		tmp = XFS_LBSIZE(mp);
+		entry = &leaf->entries[0];
+		for (i = INT_GET(hdr->count, ARCH_CONVERT)-1;
+						i >= 0; entry++, i--) {
+			ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT)
+				>= INT_GET(hdr->firstused, ARCH_CONVERT));
+			ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT)
+							< XFS_LBSIZE(mp));
+			if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp)
+				tmp = INT_GET(entry->nameidx, ARCH_CONVERT);
+		}
+		INT_SET(hdr->firstused, ARCH_CONVERT, tmp);
+		if (!hdr->firstused) {
+			INT_SET(hdr->firstused, ARCH_CONVERT,
+					tmp - XFS_ATTR_LEAF_NAME_ALIGN);
+		}
+	} else {
+		hdr->holes = 1;		/* mark as needing compaction */
+	}
+	xfs_da_log_buf(args->trans, bp,
+			  XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
+
+	/*
+	 * Check if leaf is less than 50% full, caller may want to
+	 * "join" the leaf with a sibling if so.
+	 */
+	tmp  = sizeof(xfs_attr_leaf_hdr_t);
+	tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT)
+					* sizeof(xfs_attr_leaf_entry_t);
+	tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
+	return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */
+}
+
+/*
+ * Move all the attribute list entries from drop_leaf into save_leaf.
+ */
+void
+xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
+				       xfs_da_state_blk_t *save_blk)
+{
+	xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
+	xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
+	xfs_mount_t *mp;
+	char *tmpbuffer;
+
+	/*
+	 * Set up environment.
+	 */
+	mp = state->mp;
+	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
+	ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
+	drop_leaf = drop_blk->bp->data;
+	save_leaf = save_blk->bp->data;
+	ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	drop_hdr = &drop_leaf->hdr;
+	save_hdr = &save_leaf->hdr;
+
+	/*
+	 * Save last hashval from dying block for later Btree fixup.
+	 */
+	drop_blk->hashval =
+		INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count,
+						ARCH_CONVERT)-1].hashval,
+								ARCH_CONVERT);
+
+	/*
+	 * Check if we need a temp buffer, or can we do it in place.
+	 * Note that we don't check "leaf" for holes because we will
+	 * always be dropping it, toosmall() decided that for us already.
+	 */
+	if (save_hdr->holes == 0) {
+		/*
+		 * dest leaf has no holes, so we add there.  May need
+		 * to make some room in the entry array.
+		 */
+		if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
+			xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0,
+			     (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
+		} else {
+			xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf,
+				  INT_GET(save_hdr->count, ARCH_CONVERT),
+				  (int)INT_GET(drop_hdr->count, ARCH_CONVERT),
+				  mp);
+		}
+	} else {
+		/*
+		 * Destination has holes, so we make a temporary copy
+		 * of the leaf and add them both to that.
+		 */
+		tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
+		ASSERT(tmpbuffer != NULL);
+		memset(tmpbuffer, 0, state->blocksize);
+		tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer;
+		tmp_hdr = &tmp_leaf->hdr;
+		tmp_hdr->info = save_hdr->info;	/* struct copy */
+		tmp_hdr->count = 0;
+		INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize);
+		if (!tmp_hdr->firstused) {
+			INT_SET(tmp_hdr->firstused, ARCH_CONVERT,
+				state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN);
+		}
+		tmp_hdr->usedbytes = 0;
+		if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
+			xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
+				(int)INT_GET(drop_hdr->count, ARCH_CONVERT),
+				mp);
+			xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf,
+				  INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
+				 (int)INT_GET(save_hdr->count, ARCH_CONVERT),
+				 mp);
+		} else {
+			xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
+				(int)INT_GET(save_hdr->count, ARCH_CONVERT),
+				mp);
+			xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf,
+				INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
+				(int)INT_GET(drop_hdr->count, ARCH_CONVERT),
+				mp);
+		}
+		memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
+		kmem_free(tmpbuffer, state->blocksize);
+	}
+
+	xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
+					   state->blocksize - 1);
+
+	/*
+	 * Copy out last hashval in each block for B-tree code.
+	 */
+	save_blk->hashval =
+		INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count,
+						ARCH_CONVERT)-1].hashval,
+								ARCH_CONVERT);
+}
+
+/*========================================================================
+ * Routines used for finding things in the Btree.
+ *========================================================================*/
+
+/*
+ * Look up a name in a leaf attribute list structure.
+ * This is the internal routine, it uses the caller's buffer.
+ *
+ * Note that duplicate keys are allowed, but only check within the
+ * current leaf node.  The Btree code must check in adjacent leaf nodes.
+ *
+ * Return in args->index the index into the entry[] array of either
+ * the found entry, or where the entry should have been (insert before
+ * that entry).
+ *
+ * Don't change the args->value unless we find the attribute.
+ */
+int
+xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	int probe, span;
+	xfs_dahash_t hashval;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT)
+					< (XFS_LBSIZE(args->dp->i_mount)/8));
+
+	/*
+	 * Binary search.  (note: small blocks will skip this loop)
+	 */
+	hashval = args->hashval;
+	probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2;
+	for (entry = &leaf->entries[probe]; span > 4;
+		   entry = &leaf->entries[probe]) {
+		span /= 2;
+		if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)
+			probe += span;
+		else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval)
+			probe -= span;
+		else
+			break;
+	}
+	ASSERT((probe >= 0) && 
+	       (!leaf->hdr.count
+	       || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT))));
+	ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT)
+							== hashval));
+
+	/*
+	 * Since we may have duplicate hashval's, find the first matching
+	 * hashval in the leaf.
+	 */
+	while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT)
+							>= hashval)) {
+		entry--;
+		probe--;
+	}
+	while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+		&& (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) {
+		entry++;
+		probe++;
+	}
+	if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT))
+		    || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) {
+		args->index = probe;
+		return(XFS_ERROR(ENOATTR));
+	}
+
+	/*
+	 * Duplicate keys may be present, so search all of them for a match.
+	 */
+	for (  ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+			&& (INT_GET(entry->hashval, ARCH_CONVERT) == hashval);
+			entry++, probe++) {
+/*
+ * GROT: Add code to remove incomplete entries.
+ */
+		/*
+		 * If we are looking for INCOMPLETE entries, show only those.
+		 * If we are looking for complete entries, show only those.
+		 */
+		if ((args->flags & XFS_ATTR_INCOMPLETE) !=
+		    (entry->flags & XFS_ATTR_INCOMPLETE)) {
+			continue;
+		}
+		if (entry->flags & XFS_ATTR_LOCAL) {
+			name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe);
+			if (name_loc->namelen != args->namelen)
+				continue;
+			if (memcmp(args->name, (char *)name_loc->nameval,
+					     args->namelen) != 0)
+				continue;
+			if (((args->flags & ATTR_SECURE) != 0) !=
+			    ((entry->flags & XFS_ATTR_SECURE) != 0))
+				continue;
+			if (((args->flags & ATTR_ROOT) != 0) !=
+			    ((entry->flags & XFS_ATTR_ROOT) != 0))
+				continue;
+			args->index = probe;
+			return(XFS_ERROR(EEXIST));
+		} else {
+			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe);
+			if (name_rmt->namelen != args->namelen)
+				continue;
+			if (memcmp(args->name, (char *)name_rmt->name,
+					     args->namelen) != 0)
+				continue;
+			if (((args->flags & ATTR_SECURE) != 0) !=
+			    ((entry->flags & XFS_ATTR_SECURE) != 0))
+				continue;
+			if (((args->flags & ATTR_ROOT) != 0) !=
+			    ((entry->flags & XFS_ATTR_ROOT) != 0))
+				continue;
+			args->index = probe;
+			args->rmtblkno
+				  = INT_GET(name_rmt->valueblk, ARCH_CONVERT);
+			args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
+						   INT_GET(name_rmt->valuelen,
+								ARCH_CONVERT));
+			return(XFS_ERROR(EEXIST));
+		}
+	}
+	args->index = probe;
+	return(XFS_ERROR(ENOATTR));
+}
+
+/*
+ * Get the value associated with an attribute name from a leaf attribute
+ * list structure.
+ */
+int
+xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
+{
+	int valuelen;
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT)
+					< (XFS_LBSIZE(args->dp->i_mount)/8));
+	ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT)));
+
+	entry = &leaf->entries[args->index];
+	if (entry->flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		ASSERT(name_loc->namelen == args->namelen);
+		ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
+		valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT);
+		if (args->flags & ATTR_KERNOVAL) {
+			args->valuelen = valuelen;
+			return(0);
+		}
+		if (args->valuelen < valuelen) {
+			args->valuelen = valuelen;
+			return(XFS_ERROR(ERANGE));
+		}
+		args->valuelen = valuelen;
+		memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		ASSERT(name_rmt->namelen == args->namelen);
+		ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
+		valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT);
+		args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT);
+		args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
+		if (args->flags & ATTR_KERNOVAL) {
+			args->valuelen = valuelen;
+			return(0);
+		}
+		if (args->valuelen < valuelen) {
+			args->valuelen = valuelen;
+			return(XFS_ERROR(ERANGE));
+		}
+		args->valuelen = valuelen;
+	}
+	return(0);
+}
+
+/*========================================================================
+ * Utility routines.
+ *========================================================================*/
+
+/*
+ * Move the indicated entries from one leaf to another.
+ * NOTE: this routine modifies both source and destination leaves.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
+			xfs_attr_leafblock_t *leaf_d, int start_d,
+			int count, xfs_mount_t *mp)
+{
+	xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
+	xfs_attr_leaf_entry_t *entry_s, *entry_d;
+	int desti, tmp, i;
+
+	/*
+	 * Check for nothing to do.
+	 */
+	if (count == 0)
+		return;
+
+	/*
+	 * Set up environment.
+	 */
+	ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	hdr_s = &leaf_s->hdr;
+	hdr_d = &leaf_d->hdr;
+	ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0)
+				&& (INT_GET(hdr_s->count, ARCH_CONVERT)
+						< (XFS_LBSIZE(mp)/8)));
+	ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >=
+		((INT_GET(hdr_s->count, ARCH_CONVERT)
+					* sizeof(*entry_s))+sizeof(*hdr_s)));
+	ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8));
+	ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >=
+		((INT_GET(hdr_d->count, ARCH_CONVERT)
+					* sizeof(*entry_d))+sizeof(*hdr_d)));
+
+	ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT));
+	ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT));
+	ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT));
+
+	/*
+	 * Move the entries in the destination leaf up to make a hole?
+	 */
+	if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) {
+		tmp  = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d;
+		tmp *= sizeof(xfs_attr_leaf_entry_t);
+		entry_s = &leaf_d->entries[start_d];
+		entry_d = &leaf_d->entries[start_d + count];
+		memmove((char *)entry_d, (char *)entry_s, tmp);
+	}
+
+	/*
+	 * Copy all entry's in the same (sorted) order,
+	 * but allocate attribute info packed and in sequence.
+	 */
+	entry_s = &leaf_s->entries[start_s];
+	entry_d = &leaf_d->entries[start_d];
+	desti = start_d;
+	for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
+		ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT)
+				>= INT_GET(hdr_s->firstused, ARCH_CONVERT));
+		tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
+#ifdef GROT
+		/*
+		 * Code to drop INCOMPLETE entries.  Difficult to use as we
+		 * may also need to change the insertion index.  Code turned
+		 * off for 6.2, should be revisited later.
+		 */
+		if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
+			memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
+			INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp);
+			INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
+			entry_d--;	/* to compensate for ++ in loop hdr */
+			desti--;
+			if ((start_s + i) < offset)
+				result++;	/* insertion index adjustment */
+		} else {
+#endif /* GROT */
+			INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp);
+			/* both on-disk, don't endian flip twice */
+			entry_d->hashval = entry_s->hashval;
+			/* both on-disk, don't endian flip twice */
+			entry_d->nameidx = hdr_d->firstused;
+			entry_d->flags = entry_s->flags;
+			ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp
+							<= XFS_LBSIZE(mp));
+			memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti),
+				XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
+			ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp
+							<= XFS_LBSIZE(mp));
+			memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
+			INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp);
+			INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp);
+			INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
+			INT_MOD(hdr_d->count, ARCH_CONVERT, 1);
+			tmp = INT_GET(hdr_d->count, ARCH_CONVERT)
+						* sizeof(xfs_attr_leaf_entry_t)
+						+ sizeof(xfs_attr_leaf_hdr_t);
+			ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp);
+#ifdef GROT
+		}
+#endif /* GROT */
+	}
+
+	/*
+	 * Zero out the entries we just copied.
+	 */
+	if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) {
+		tmp = count * sizeof(xfs_attr_leaf_entry_t);
+		entry_s = &leaf_s->entries[start_s];
+		ASSERT(((char *)entry_s + tmp) <=
+		       ((char *)leaf_s + XFS_LBSIZE(mp)));
+		memset((char *)entry_s, 0, tmp);
+	} else {
+		/*
+		 * Move the remaining entries down to fill the hole,
+		 * then zero the entries at the top.
+		 */
+		tmp  = INT_GET(hdr_s->count, ARCH_CONVERT) - count;
+		tmp *= sizeof(xfs_attr_leaf_entry_t);
+		entry_s = &leaf_s->entries[start_s + count];
+		entry_d = &leaf_s->entries[start_s];
+		memmove((char *)entry_d, (char *)entry_s, tmp);
+
+		tmp = count * sizeof(xfs_attr_leaf_entry_t);
+		entry_s = &leaf_s->entries[INT_GET(hdr_s->count,
+							ARCH_CONVERT)];
+		ASSERT(((char *)entry_s + tmp) <=
+		       ((char *)leaf_s + XFS_LBSIZE(mp)));
+		memset((char *)entry_s, 0, tmp);
+	}
+
+	/*
+	 * Fill in the freemap information
+	 */
+	INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT,
+					sizeof(xfs_attr_leaf_hdr_t));
+	INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT,
+				INT_GET(hdr_d->count, ARCH_CONVERT)
+					* sizeof(xfs_attr_leaf_entry_t));
+	INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT,
+				INT_GET(hdr_d->firstused, ARCH_CONVERT)
+			      - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
+	hdr_d->freemap[1].base = 0;
+	hdr_d->freemap[2].base = 0;
+	hdr_d->freemap[1].size = 0;
+	hdr_d->freemap[2].size = 0;
+	hdr_s->holes = 1;	/* leaf may not be compact */
+}
+
+/*
+ * Compare two leaf blocks "order".
+ * Return 0 unless leaf2 should go before leaf1.
+ */
+int
+xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
+{
+	xfs_attr_leafblock_t *leaf1, *leaf2;
+
+	leaf1 = leaf1_bp->data;
+	leaf2 = leaf2_bp->data;
+	ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC) &&
+	       (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC));
+	if (   (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0)
+	    && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0)
+	    && (   (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) <
+		      INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT))
+		|| (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count,
+				ARCH_CONVERT)-1].hashval, ARCH_CONVERT) <
+		      INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count,
+				ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) {
+		return(1);
+	}
+	return(0);
+}
+
+/*
+ * Pick up the last hashvalue from a leaf block.
+ */
+xfs_dahash_t
+xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
+{
+	xfs_attr_leafblock_t *leaf;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	if (count)
+		*count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	if (!leaf->hdr.count)
+		return(0);
+	return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count,
+				ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
+}
+
+/*
+ * Calculate the number of bytes used to store the indicated attribute
+ * (whether local or remote only calculate bytes in this block).
+ */
+int
+xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
+{
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	int size;
+
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index);
+		size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen,
+						   INT_GET(name_loc->valuelen,
+								ARCH_CONVERT));
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index);
+		size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen);
+	}
+	return(size);
+}
+
+/*
+ * Calculate the number of bytes that would be required to store the new
+ * attribute (whether local or remote only calculate bytes in this block).
+ * This routine decides as a side effect whether the attribute will be
+ * a "local" or a "remote" attribute.
+ */
+int
+xfs_attr_leaf_newentsize(xfs_da_args_t *args, int blocksize, int *local)
+{
+	int size;
+
+	size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(args->namelen, args->valuelen);
+	if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) {
+		if (local) {
+			*local = 1;
+		}
+	} else {
+		size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(args->namelen);
+		if (local) {
+			*local = 0;
+		}
+	}
+	return(size);
+}
+
+/*
+ * Copy out attribute list entries for attr_list(), for leaf attribute lists.
+ */
+int
+xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
+{
+	attrlist_cursor_kern_t *cursor;
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_local_t *name_loc;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	int retval, i;
+
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	cursor = context->cursor;
+	cursor->initted = 1;
+
+	xfs_attr_trace_l_cl("blk start", context, leaf);
+
+	/*
+	 * Re-find our place in the leaf block if this is a new syscall.
+	 */
+	if (context->resynch) {
+		entry = &leaf->entries[0];
+		for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT);
+							entry++, i++) {
+			if (INT_GET(entry->hashval, ARCH_CONVERT)
+							== cursor->hashval) {
+				if (cursor->offset == context->dupcnt) {
+					context->dupcnt = 0;
+					break;
+				}
+				context->dupcnt++;
+			} else if (INT_GET(entry->hashval, ARCH_CONVERT)
+							> cursor->hashval) {
+				context->dupcnt = 0;
+				break;
+			}
+		}
+		if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) {
+			xfs_attr_trace_l_c("not found", context);
+			return(0);
+		}
+	} else {
+		entry = &leaf->entries[0];
+		i = 0;
+	}
+	context->resynch = 0;
+
+	/*
+	 * We have found our place, start copying out the new attributes.
+	 */
+	retval = 0;
+	for (  ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+	     && (retval == 0); entry++, i++) {
+		attrnames_t	*namesp;
+
+		if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) {
+			cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT);
+			cursor->offset = 0;
+		}
+
+		if (entry->flags & XFS_ATTR_INCOMPLETE)
+			continue;		/* skip incomplete entries */
+		if (((context->flags & ATTR_SECURE) != 0) !=
+		    ((entry->flags & XFS_ATTR_SECURE) != 0) &&
+		    !(context->flags & ATTR_KERNORMALS))
+			continue;		/* skip non-matching entries */
+		if (((context->flags & ATTR_ROOT) != 0) !=
+		    ((entry->flags & XFS_ATTR_ROOT) != 0) &&
+		    !(context->flags & ATTR_KERNROOTLS))
+			continue;		/* skip non-matching entries */
+
+		namesp = (entry->flags & XFS_ATTR_SECURE) ? &attr_secure :
+			((entry->flags & XFS_ATTR_ROOT) ? &attr_trusted :
+			  &attr_user);
+
+		if (entry->flags & XFS_ATTR_LOCAL) {
+			name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+			if (context->flags & ATTR_KERNOVAL) {
+				ASSERT(context->flags & ATTR_KERNAMELS);
+				context->count += namesp->attr_namelen +
+						(int)name_loc->namelen + 1;
+			} else {
+				retval = xfs_attr_put_listent(context, namesp,
+					(char *)name_loc->nameval,
+					(int)name_loc->namelen,
+					(int)INT_GET(name_loc->valuelen,
+								ARCH_CONVERT));
+			}
+		} else {
+			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+			if (context->flags & ATTR_KERNOVAL) {
+				ASSERT(context->flags & ATTR_KERNAMELS);
+				context->count += namesp->attr_namelen +
+						(int)name_rmt->namelen + 1;
+			} else {
+				retval = xfs_attr_put_listent(context, namesp,
+					(char *)name_rmt->name,
+					(int)name_rmt->namelen,
+					(int)INT_GET(name_rmt->valuelen,
+								ARCH_CONVERT));
+			}
+		}
+		if (retval == 0) {
+			cursor->offset++;
+		}
+	}
+	xfs_attr_trace_l_cl("blk end", context, leaf);
+	return(retval);
+}
+
+#define	ATTR_ENTBASESIZE		/* minimum bytes used by an attr */ \
+	(((struct attrlist_ent *) 0)->a_name - (char *) 0)
+#define	ATTR_ENTSIZE(namelen)		/* actual bytes used by an attr */ \
+	((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
+	 & ~(sizeof(u_int32_t)-1))
+
+/*
+ * Format an attribute and copy it out to the user's buffer.
+ * Take care to check values and protect against them changing later,
+ * we may be reading them directly out of a user buffer.
+ */
+/*ARGSUSED*/
+int
+xfs_attr_put_listent(xfs_attr_list_context_t *context,
+		     attrnames_t *namesp, char *name, int namelen, int valuelen)
+{
+	attrlist_ent_t *aep;
+	int arraytop;
+
+	ASSERT(!(context->flags & ATTR_KERNOVAL));
+	if (context->flags & ATTR_KERNAMELS) {
+		char *offset;
+
+		ASSERT(context->count >= 0);
+
+		arraytop = context->count + namesp->attr_namelen + namelen + 1;
+		if (arraytop > context->firstu) {
+			context->count = -1;	/* insufficient space */
+			return(1);
+		}
+		offset = (char *)context->alist + context->count;
+		strncpy(offset, namesp->attr_name, namesp->attr_namelen);
+		offset += namesp->attr_namelen;
+		strncpy(offset, name, namelen);			/* real name */
+		offset += namelen;
+		*offset = '\0';
+		context->count += namesp->attr_namelen + namelen + 1;
+		return(0);
+	}
+
+	ASSERT(context->count >= 0);
+	ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
+	ASSERT(context->firstu >= sizeof(*context->alist));
+	ASSERT(context->firstu <= context->bufsize);
+
+	arraytop = sizeof(*context->alist) +
+			context->count * sizeof(context->alist->al_offset[0]);
+	context->firstu -= ATTR_ENTSIZE(namelen);
+	if (context->firstu < arraytop) {
+		xfs_attr_trace_l_c("buffer full", context);
+		context->alist->al_more = 1;
+		return(1);
+	}
+
+	aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]);
+	aep->a_valuelen = valuelen;
+	memcpy(aep->a_name, name, namelen);
+	aep->a_name[ namelen ] = 0;
+	context->alist->al_offset[ context->count++ ] = context->firstu;
+	context->alist->al_count = context->count;
+	xfs_attr_trace_l_c("add", context);
+	return(0);
+}
+
+/*========================================================================
+ * Manage the INCOMPLETE flag in a leaf entry
+ *========================================================================*/
+
+/*
+ * Clear the INCOMPLETE flag on an entry in a leaf block.
+ */
+int
+xfs_attr_leaf_clearflag(xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	xfs_dabuf_t *bp;
+	int error;
+#ifdef DEBUG
+	xfs_attr_leaf_name_local_t *name_loc;
+	int namelen;
+	char *name;
+#endif /* DEBUG */
+
+	/*
+	 * Set up the operation.
+	 */
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
+					     XFS_ATTR_FORK);
+	if (error) {
+		return(error);
+	}
+	ASSERT(bp != NULL);
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT));
+	ASSERT(args->index >= 0);
+	entry = &leaf->entries[ args->index ];
+	ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
+
+#ifdef DEBUG
+	if (entry->flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		namelen = name_loc->namelen;
+		name = (char *)name_loc->nameval;
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		namelen = name_rmt->namelen;
+		name = (char *)name_rmt->name;
+	}
+	ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval);
+	ASSERT(namelen == args->namelen);
+	ASSERT(memcmp(name, args->name, namelen) == 0);
+#endif /* DEBUG */
+
+	entry->flags &= ~XFS_ATTR_INCOMPLETE;
+	xfs_da_log_buf(args->trans, bp,
+			 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
+
+	if (args->rmtblkno) {
+		ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno);
+		INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen);
+		xfs_da_log_buf(args->trans, bp,
+			 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
+	}
+	xfs_da_buf_done(bp);
+
+	/*
+	 * Commit the flag value change and start the next trans in series.
+	 */
+	error = xfs_attr_rolltrans(&args->trans, args->dp);
+
+	return(error);
+}
+
+/*
+ * Set the INCOMPLETE flag on an entry in a leaf block.
+ */
+int
+xfs_attr_leaf_setflag(xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	xfs_dabuf_t *bp;
+	int error;
+
+	/*
+	 * Set up the operation.
+	 */
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
+					     XFS_ATTR_FORK);
+	if (error) {
+		return(error);
+	}
+	ASSERT(bp != NULL);
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT));
+	ASSERT(args->index >= 0);
+	entry = &leaf->entries[ args->index ];
+
+	ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
+	entry->flags |= XFS_ATTR_INCOMPLETE;
+	xfs_da_log_buf(args->trans, bp,
+			XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
+	if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt->valueblk = 0;
+		name_rmt->valuelen = 0;
+		xfs_da_log_buf(args->trans, bp,
+			 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
+	}
+	xfs_da_buf_done(bp);
+
+	/*
+	 * Commit the flag value change and start the next trans in series.
+	 */
+	error = xfs_attr_rolltrans(&args->trans, args->dp);
+
+	return(error);
+}
+
+/*
+ * In a single transaction, clear the INCOMPLETE flag on the leaf entry
+ * given by args->blkno/index and set the INCOMPLETE flag on the leaf
+ * entry given by args->blkno2/index2.
+ *
+ * Note that they could be in different blocks, or in the same block.
+ */
+int
+xfs_attr_leaf_flipflags(xfs_da_args_t *args)
+{
+	xfs_attr_leafblock_t *leaf1, *leaf2;
+	xfs_attr_leaf_entry_t *entry1, *entry2;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	xfs_dabuf_t *bp1, *bp2;
+	int error;
+#ifdef DEBUG
+	xfs_attr_leaf_name_local_t *name_loc;
+	int namelen1, namelen2;
+	char *name1, *name2;
+#endif /* DEBUG */
+
+	/*
+	 * Read the block containing the "old" attr
+	 */
+	error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1,
+					     XFS_ATTR_FORK);
+	if (error) {
+		return(error);
+	}
+	ASSERT(bp1 != NULL);
+
+	/*
+	 * Read the block containing the "new" attr, if it is different
+	 */
+	if (args->blkno2 != args->blkno) {
+		error = xfs_da_read_buf(args->trans, args->dp, args->blkno2,
+					-1, &bp2, XFS_ATTR_FORK);
+		if (error) {
+			return(error);
+		}
+		ASSERT(bp2 != NULL);
+	} else {
+		bp2 = bp1;
+	}
+
+	leaf1 = bp1->data;
+	ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT));
+	ASSERT(args->index >= 0);
+	entry1 = &leaf1->entries[ args->index ];
+
+	leaf2 = bp2->data;
+	ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+	ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT));
+	ASSERT(args->index2 >= 0);
+	entry2 = &leaf2->entries[ args->index2 ];
+
+#ifdef DEBUG
+	if (entry1->flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index);
+		namelen1 = name_loc->namelen;
+		name1 = (char *)name_loc->nameval;
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+		namelen1 = name_rmt->namelen;
+		name1 = (char *)name_rmt->name;
+	}
+	if (entry2->flags & XFS_ATTR_LOCAL) {
+		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2);
+		namelen2 = name_loc->namelen;
+		name2 = (char *)name_loc->nameval;
+	} else {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+		namelen2 = name_rmt->namelen;
+		name2 = (char *)name_rmt->name;
+	}
+	ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT));
+	ASSERT(namelen1 == namelen2);
+	ASSERT(memcmp(name1, name2, namelen1) == 0);
+#endif /* DEBUG */
+
+	ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
+	ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
+
+	entry1->flags &= ~XFS_ATTR_INCOMPLETE;
+	xfs_da_log_buf(args->trans, bp1,
+			  XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
+	if (args->rmtblkno) {
+		ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+		INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno);
+		INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen);
+		xfs_da_log_buf(args->trans, bp1,
+			 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
+	}
+
+	entry2->flags |= XFS_ATTR_INCOMPLETE;
+	xfs_da_log_buf(args->trans, bp2,
+			  XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
+	if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
+		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+		name_rmt->valueblk = 0;
+		name_rmt->valuelen = 0;
+		xfs_da_log_buf(args->trans, bp2,
+			 XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
+	}
+	xfs_da_buf_done(bp1);
+	if (bp1 != bp2)
+		xfs_da_buf_done(bp2);
+
+	/*
+	 * Commit the flag value change and start the next trans in series.
+	 */
+	error = xfs_attr_rolltrans(&args->trans, args->dp);
+
+	return(error);
+}
+
+/*========================================================================
+ * Indiscriminately delete the entire attribute fork
+ *========================================================================*/
+
+/*
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+int
+xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
+{
+	xfs_da_blkinfo_t *info;
+	xfs_daddr_t blkno;
+	xfs_dabuf_t *bp;
+	int error;
+
+	/*
+	 * Read block 0 to see what we have to work with.
+	 * We only get here if we have extents, since we remove
+	 * the extents in reverse order the extent containing
+	 * block 0 must still be there.
+	 */
+	error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	blkno = xfs_da_blkno(bp);
+
+	/*
+	 * Invalidate the tree, even if the "tree" is only a single leaf block.
+	 * This is a depth-first traversal!
+	 */
+	info = bp->data;
+	if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) {
+		error = xfs_attr_node_inactive(trans, dp, bp, 1);
+	} else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) {
+		error = xfs_attr_leaf_inactive(trans, dp, bp);
+	} else {
+		error = XFS_ERROR(EIO);
+		xfs_da_brelse(*trans, bp);
+	}
+	if (error)
+		return(error);
+
+	/*
+	 * Invalidate the incore copy of the root block.
+	 */
+	error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
+	if (error)
+		return(error);
+	xfs_da_binval(*trans, bp);	/* remove from cache */
+	/*
+	 * Commit the invalidate and start the next transaction.
+	 */
+	error = xfs_attr_rolltrans(trans, dp);
+
+	return (error);
+}
+
+/*
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+int
+xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
+				   int level)
+{
+	xfs_da_blkinfo_t *info;
+	xfs_da_intnode_t *node;
+	xfs_dablk_t child_fsb;
+	xfs_daddr_t parent_blkno, child_blkno;
+	int error, count, i;
+	xfs_dabuf_t *child_bp;
+
+	/*
+	 * Since this code is recursive (gasp!) we must protect ourselves.
+	 */
+	if (level > XFS_DA_NODE_MAXDEPTH) {
+		xfs_da_brelse(*trans, bp);	/* no locks for later trans */
+		return(XFS_ERROR(EIO));
+	}
+
+	node = bp->data;
+	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT)
+						== XFS_DA_NODE_MAGIC);
+	parent_blkno = xfs_da_blkno(bp);	/* save for re-read later */
+	count = INT_GET(node->hdr.count, ARCH_CONVERT);
+	if (!count) {
+		xfs_da_brelse(*trans, bp);
+		return(0);
+	}
+	child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT);
+	xfs_da_brelse(*trans, bp);	/* no locks for later trans */
+
+	/*
+	 * If this is the node level just above the leaves, simply loop
+	 * over the leaves removing all of them.  If this is higher up
+	 * in the tree, recurse downward.
+	 */
+	for (i = 0; i < count; i++) {
+		/*
+		 * Read the subsidiary block to see what we have to work with.
+		 * Don't do this in a transaction.  This is a depth-first
+		 * traversal of the tree so we may deal with many blocks
+		 * before we come back to this one.
+		 */
+		error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp,
+						XFS_ATTR_FORK);
+		if (error)
+			return(error);
+		if (child_bp) {
+						/* save for re-read later */
+			child_blkno = xfs_da_blkno(child_bp);
+
+			/*
+			 * Invalidate the subtree, however we have to.
+			 */
+			info = child_bp->data;
+			if (INT_GET(info->magic, ARCH_CONVERT)
+							== XFS_DA_NODE_MAGIC) {
+				error = xfs_attr_node_inactive(trans, dp,
+						child_bp, level+1);
+			} else if (INT_GET(info->magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC) {
+				error = xfs_attr_leaf_inactive(trans, dp,
+						child_bp);
+			} else {
+				error = XFS_ERROR(EIO);
+				xfs_da_brelse(*trans, child_bp);
+			}
+			if (error)
+				return(error);
+
+			/*
+			 * Remove the subsidiary block from the cache
+			 * and from the log.
+			 */
+			error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
+				&child_bp, XFS_ATTR_FORK);
+			if (error)
+				return(error);
+			xfs_da_binval(*trans, child_bp);
+		}
+
+		/*
+		 * If we're not done, re-read the parent to get the next
+		 * child block number.
+		 */
+		if ((i+1) < count) {
+			error = xfs_da_read_buf(*trans, dp, 0, parent_blkno,
+				&bp, XFS_ATTR_FORK);
+			if (error)
+				return(error);
+			child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT);
+			xfs_da_brelse(*trans, bp);
+		}
+		/*
+		 * Atomically commit the whole invalidate stuff.
+		 */
+		if ((error = xfs_attr_rolltrans(trans, dp)))
+			return (error);
+	}
+
+	return(0);
+}
+
+/*
+ * Invalidate all of the "remote" value regions pointed to by a particular
+ * leaf block.
+ * Note that we must release the lock on the buffer so that we are not
+ * caught holding something that the logging code wants to flush to disk.
+ */
+int
+xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
+{
+	xfs_attr_leafblock_t *leaf;
+	xfs_attr_leaf_entry_t *entry;
+	xfs_attr_leaf_name_remote_t *name_rmt;
+	xfs_attr_inactive_list_t *list, *lp;
+	int error, count, size, tmp, i;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
+						== XFS_ATTR_LEAF_MAGIC);
+
+	/*
+	 * Count the number of "remote" value extents.
+	 */
+	count = 0;
+	entry = &leaf->entries[0];
+	for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) {
+		if (   INT_GET(entry->nameidx, ARCH_CONVERT)
+		    && ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+			if (name_rmt->valueblk)
+				count++;
+		}
+	}
+
+	/*
+	 * If there are no "remote" values, we're done.
+	 */
+	if (count == 0) {
+		xfs_da_brelse(*trans, bp);
+		return(0);
+	}
+
+	/*
+	 * Allocate storage for a list of all the "remote" value extents.
+	 */
+	size = count * sizeof(xfs_attr_inactive_list_t);
+	list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP);
+
+	/*
+	 * Identify each of the "remote" value extents.
+	 */
+	lp = list;
+	entry = &leaf->entries[0];
+	for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) {
+		if (   INT_GET(entry->nameidx, ARCH_CONVERT)
+		    && ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+			if (name_rmt->valueblk) {
+				/* both on-disk, don't endian flip twice */
+				lp->valueblk = name_rmt->valueblk;
+				INT_SET(lp->valuelen, ARCH_CONVERT,
+						XFS_B_TO_FSB(dp->i_mount,
+						    INT_GET(name_rmt->valuelen,
+							      ARCH_CONVERT)));
+				lp++;
+			}
+		}
+	}
+	xfs_da_brelse(*trans, bp);	/* unlock for trans. in freextent() */
+
+	/*
+	 * Invalidate each of the "remote" value extents.
+	 */
+	error = 0;
+	for (lp = list, i = 0; i < count; i++, lp++) {
+		tmp = xfs_attr_leaf_freextent(trans, dp,
+						     INT_GET(lp->valueblk,
+								ARCH_CONVERT),
+						     INT_GET(lp->valuelen,
+								ARCH_CONVERT));
+		if (error == 0)
+			error = tmp;	/* save only the 1st errno */
+	}
+
+	kmem_free((xfs_caddr_t)list, size);
+	return(error);
+}
+
+/*
+ * Look at all the extents for this logical region,
+ * invalidate any buffers that are incore/in transactions.
+ */
+int
+xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
+				    xfs_dablk_t blkno, int blkcnt)
+{
+	xfs_bmbt_irec_t map;
+	xfs_dablk_t tblkno;
+	int tblkcnt, dblkcnt, nmap, error;
+	xfs_daddr_t dblkno;
+	xfs_buf_t *bp;
+
+	/*
+	 * Roll through the "value", invalidating the attribute value's
+	 * blocks.
+	 */
+	tblkno = blkno;
+	tblkcnt = blkcnt;
+	while (tblkcnt > 0) {
+		/*
+		 * Try to remember where we decided to put the value.
+		 */
+		nmap = 1;
+		error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
+					XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+					NULL, 0, &map, &nmap, NULL);
+		if (error) {
+			return(error);
+		}
+		ASSERT(nmap == 1);
+		ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+
+		/*
+		 * If it's a hole, these are already unmapped
+		 * so there's nothing to invalidate.
+		 */
+		if (map.br_startblock != HOLESTARTBLOCK) {
+
+			dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
+						  map.br_startblock);
+			dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
+						map.br_blockcount);
+			bp = xfs_trans_get_buf(*trans,
+					dp->i_mount->m_ddev_targp,
+					dblkno, dblkcnt, XFS_BUF_LOCK);
+			xfs_trans_binval(*trans, bp);
+			/*
+			 * Roll to next transaction.
+			 */
+			if ((error = xfs_attr_rolltrans(trans, dp)))
+				return (error);
+		}
+
+		tblkno += map.br_blockcount;
+		tblkcnt -= map.br_blockcount;
+	}
+
+	return(0);
+}
+
+
+/*
+ * Roll from one trans in the sequence of PERMANENT transactions to the next.
+ */
+int
+xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp)
+{
+	xfs_trans_t *trans;
+	unsigned int logres, count;
+	int	error;
+
+	/*
+	 * Ensure that the inode is always logged.
+	 */
+	trans = *transp;
+	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
+
+	/*
+	 * Copy the critical parameters from one trans to the next.
+	 */
+	logres = trans->t_log_res;
+	count = trans->t_log_count;
+	*transp = xfs_trans_dup(trans);
+
+	/*
+	 * Commit the current transaction.
+	 * If this commit failed, then it'd just unlock those items that
+	 * are not marked ihold. That also means that a filesystem shutdown
+	 * is in progress. The caller takes the responsibility to cancel
+	 * the duplicate transaction that gets returned.
+	 */
+	if ((error = xfs_trans_commit(trans, 0, NULL)))
+		return (error);
+
+	trans = *transp;
+
+	/*
+	 * Reserve space in the log for th next transaction.
+	 * This also pushes items in the "AIL", the list of logged items,
+	 * out to disk if they are taking up space at the tail of the log
+	 * that we want to use.  This requires that either nothing be locked
+	 * across this call, or that anything that is locked be logged in
+	 * the prior and the next transactions.
+	 */
+	error = xfs_trans_reserve(trans, 0, logres, 0,
+				  XFS_TRANS_PERM_LOG_RES, count);
+	/*
+	 *  Ensure that the inode is in the new transaction and locked.
+	 */
+	if (!error) {
+		xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(trans, dp);
+	}
+	return (error);
+
+}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
new file mode 100644
index 0000000..b1480e0
--- /dev/null
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2000, 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ATTR_LEAF_H__
+#define	__XFS_ATTR_LEAF_H__
+
+/*
+ * Attribute storage layout, internal structure, access macros, etc.
+ *
+ * Attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ */
+
+struct attrlist;
+struct attrlist_cursor_kern;
+struct attrnames;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_da_state;
+struct xfs_da_state_blk;
+struct xfs_inode;
+struct xfs_trans;
+
+/*========================================================================
+ * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This is the structure of the leaf nodes in the Btree.
+ *
+ * Struct leaf_entry's are packed from the top.  Name/values grow from the
+ * bottom but are not packed.  The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped.  When the freemap doesn't show enough space
+ * for an allocation, we compact the name/value area and try again.  If we
+ * still don't have enough space, then we have to split the block.  The
+ * name/value structs (both local and remote versions) must be 32bit aligned.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual name string.  The root and intermediate node search always
+ * takes the first-in-the-block key match found, so we should only have
+ * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
+ * nodes until the hash key changes or the attribute name is found.
+ *
+ * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
+ * the leaf_entry.  The namespaces are independent only because we also look
+ * at the namespace bit when we are looking for a matching attribute name.
+ *
+ * We also store a "incomplete" bit in the leaf_entry.  It shows that an
+ * attribute is in the middle of being created and should not be shown to
+ * the user if we crash during the time that the bit is set.  We clear the
+ * bit when we have finished setting up the attribute.  We do this because
+ * we cannot create some large attributes inside a single transaction, and we
+ * need some indication that we weren't finished if we crash in the middle.
+ */
+#define XFS_ATTR_LEAF_MAPSIZE	3	/* how many freespace slots */
+
+typedef struct xfs_attr_leafblock {
+	struct xfs_attr_leaf_hdr {	/* constant-structure header block */
+		xfs_da_blkinfo_t info;	/* block type, links, etc. */
+		__uint16_t count;	/* count of active leaf_entry's */
+		__uint16_t usedbytes;	/* num bytes of names/values stored */
+		__uint16_t firstused;	/* first used byte in name area */
+		__uint8_t  holes;	/* != 0 if blk needs compaction */
+		__uint8_t  pad1;
+		struct xfs_attr_leaf_map {	  /* RLE map of free bytes */
+			__uint16_t base;	  /* base of free region */
+			__uint16_t size;	  /* length of free region */
+		} freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */
+	} hdr;
+	struct xfs_attr_leaf_entry {	/* sorted on key, not name */
+		xfs_dahash_t hashval;	/* hash value of name */
+		__uint16_t nameidx;	/* index into buffer of name/value */
+		__uint8_t flags;	/* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+		__uint8_t pad2;		/* unused pad byte */
+	} entries[1];			/* variable sized array */
+	struct xfs_attr_leaf_name_local {
+		__uint16_t valuelen;	/* number of bytes in value */
+		__uint8_t namelen;	/* length of name bytes */
+		__uint8_t nameval[1];	/* name/value bytes */
+	} namelist;			/* grows from bottom of buf */
+	struct xfs_attr_leaf_name_remote {
+		xfs_dablk_t valueblk;	/* block number of value bytes */
+		__uint32_t valuelen;	/* number of bytes in value */
+		__uint8_t namelen;	/* length of name bytes */
+		__uint8_t name[1];	/* name bytes */
+	} valuelist;			/* grows from bottom of buf */
+} xfs_attr_leafblock_t;
+typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t;
+typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t;
+typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t;
+typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t;
+typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
+
+/*
+ * Flags used in the leaf_entry[i].flags field.
+ * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
+ * on the system call, they are "or"ed together for various operations.
+ */
+#define	XFS_ATTR_LOCAL_BIT	0	/* attr is stored locally */
+#define	XFS_ATTR_ROOT_BIT	1	/* limit access to trusted attrs */
+#define	XFS_ATTR_SECURE_BIT	2	/* limit access to secure attrs */
+#define	XFS_ATTR_INCOMPLETE_BIT	7	/* attr in middle of create/delete */
+#define XFS_ATTR_LOCAL		(1 << XFS_ATTR_LOCAL_BIT)
+#define XFS_ATTR_ROOT		(1 << XFS_ATTR_ROOT_BIT)
+#define XFS_ATTR_SECURE		(1 << XFS_ATTR_SECURE_BIT)
+#define XFS_ATTR_INCOMPLETE	(1 << XFS_ATTR_INCOMPLETE_BIT)
+
+/*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+ */
+#define	XFS_ATTR_LEAF_NAME_ALIGN	((uint)sizeof(xfs_dablk_t))
+
+/*
+ * Cast typed pointers for "local" and "remote" name/value structs.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_REMOTE)
+xfs_attr_leaf_name_remote_t *
+xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx);
+#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx)	\
+	xfs_attr_leaf_name_remote(leafp,idx)
+#else
+#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx)	/* remote name struct ptr */ \
+	((xfs_attr_leaf_name_remote_t *)		\
+	 &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ])
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_LOCAL)
+xfs_attr_leaf_name_local_t *
+xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx);
+#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx)	\
+	xfs_attr_leaf_name_local(leafp,idx)
+#else
+#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx)	/* local name struct ptr */ \
+	((xfs_attr_leaf_name_local_t *)		\
+	 &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ])
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME)
+char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx);
+#define XFS_ATTR_LEAF_NAME(leafp,idx)		xfs_attr_leaf_name(leafp,idx)
+#else
+#define XFS_ATTR_LEAF_NAME(leafp,idx)		/* generic name struct ptr */ \
+	(&((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ])
+#endif
+
+/*
+ * Calculate total bytes used (including trailing pad for alignment) for
+ * a "local" name/value structure, a "remote" name/value structure, and
+ * a pointer which might be either.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_REMOTE)
+int xfs_attr_leaf_entsize_remote(int nlen);
+#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen)	\
+	xfs_attr_leaf_entsize_remote(nlen)
+#else
+#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen)	/* space for remote struct */ \
+	(((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
+	  XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL)
+int xfs_attr_leaf_entsize_local(int nlen, int vlen);
+#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen)	\
+	xfs_attr_leaf_entsize_local(nlen,vlen)
+#else
+#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen)	/* space for local struct */ \
+	(((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) + \
+	  XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX)
+int xfs_attr_leaf_entsize_local_max(int bsize);
+#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize)	\
+	xfs_attr_leaf_entsize_local_max(bsize)
+#else
+#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize)	/* max local struct size */ \
+	(((bsize) >> 1) + ((bsize) >> 2))
+#endif
+
+
+/*========================================================================
+ * Structure used to pass context around among the routines.
+ *========================================================================*/
+
+typedef struct xfs_attr_list_context {
+	struct xfs_inode		*dp;	/* inode */
+	struct attrlist_cursor_kern	*cursor;/* position in list */
+	struct attrlist			*alist;	/* output buffer */
+	int				count;	/* num used entries */
+	int				dupcnt;	/* count dup hashvals seen */
+	int				bufsize;/* total buffer size */
+	int				firstu;	/* first used byte in buffer */
+	int				flags;	/* from VOP call */
+	int				resynch;/* T/F: resynch with cursor */
+} xfs_attr_list_context_t;
+
+/*
+ * Used to keep a list of "remote value" extents when unlinking an inode.
+ */
+typedef struct xfs_attr_inactive_list {
+	xfs_dablk_t	valueblk;	/* block number of value bytes */
+	int		valuelen;	/* number of bytes in value */
+} xfs_attr_inactive_list_t;
+
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Internal routines when dirsize < XFS_LITINO(mp).
+ */
+int	xfs_attr_shortform_create(struct xfs_da_args *args);
+int	xfs_attr_shortform_add(struct xfs_da_args *add);
+int	xfs_attr_shortform_lookup(struct xfs_da_args *args);
+int	xfs_attr_shortform_getvalue(struct xfs_da_args *args);
+int	xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
+int	xfs_attr_shortform_remove(struct xfs_da_args *remove);
+int	xfs_attr_shortform_list(struct xfs_attr_list_context *context);
+int	xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp);
+
+/*
+ * Internal routines when dirsize == XFS_LBSIZE(mp).
+ */
+int	xfs_attr_leaf_to_node(struct xfs_da_args *args);
+int	xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp,
+					  struct xfs_da_args *args);
+int	xfs_attr_leaf_clearflag(struct xfs_da_args *args);
+int	xfs_attr_leaf_setflag(struct xfs_da_args *args);
+int	xfs_attr_leaf_flipflags(xfs_da_args_t *args);
+
+/*
+ * Routines used for growing the Btree.
+ */
+int	xfs_attr_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block,
+				    struct xfs_dabuf **bpp);
+int	xfs_attr_leaf_split(struct xfs_da_state *state,
+				   struct xfs_da_state_blk *oldblk,
+				   struct xfs_da_state_blk *newblk);
+int	xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf,
+					struct xfs_da_args *args);
+int	xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args);
+int	xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer,
+				 struct xfs_da_args *args);
+int	xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer,
+				    struct xfs_da_args *args);
+int	xfs_attr_leaf_list_int(struct xfs_dabuf *bp,
+				      struct xfs_attr_list_context *context);
+
+/*
+ * Routines used for shrinking the Btree.
+ */
+int	xfs_attr_leaf_toosmall(struct xfs_da_state *state, int *retval);
+void	xfs_attr_leaf_unbalance(struct xfs_da_state *state,
+				       struct xfs_da_state_blk *drop_blk,
+				       struct xfs_da_state_blk *save_blk);
+int	xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
+int	xfs_attr_node_inactive(struct xfs_trans **trans, struct xfs_inode *dp,
+				      struct xfs_dabuf *bp, int level);
+int	xfs_attr_leaf_inactive(struct xfs_trans **trans, struct xfs_inode *dp,
+				      struct xfs_dabuf *bp);
+int	xfs_attr_leaf_freextent(struct xfs_trans **trans, struct xfs_inode *dp,
+				       xfs_dablk_t blkno, int blkcnt);
+
+/*
+ * Utility routines.
+ */
+xfs_dahash_t	xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count);
+int	xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
+				   struct xfs_dabuf *leaf2_bp);
+int	xfs_attr_leaf_newentsize(struct xfs_da_args *args, int blocksize,
+					int *local);
+int	xfs_attr_leaf_entsize(struct xfs_attr_leafblock *leaf, int index);
+int	xfs_attr_put_listent(struct xfs_attr_list_context *context,
+			     struct attrnames *, char *name, int namelen,
+			     int valuelen);
+int	xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
+
+#endif	/* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
new file mode 100644
index 0000000..ef7d294
--- /dev/null
+++ b/fs/xfs/xfs_attr_sf.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ATTR_SF_H__
+#define	__XFS_ATTR_SF_H__
+
+/*
+ * Attribute storage when stored inside the inode.
+ *
+ * Small attribute lists are packed as tightly as possible so as
+ * to fit into the literal area of the inode.
+ */
+
+struct xfs_inode;
+
+/*
+ * Entries are packed toward the top as tight as possible.
+ */
+typedef struct xfs_attr_shortform {
+	struct xfs_attr_sf_hdr {	/* constant-structure header block */
+		__uint16_t totsize;	/* total bytes in shortform list */
+		__uint8_t count;	/* count of active entries */
+	} hdr;
+	struct xfs_attr_sf_entry {
+		__uint8_t namelen;	/* actual length of name (no NULL) */
+		__uint8_t valuelen;	/* actual length of value (no NULL) */
+		__uint8_t flags;	/* flags bits (see xfs_attr_leaf.h) */
+		__uint8_t nameval[1];	/* name & value bytes concatenated */
+	} list[1];			/* variable sized array */
+} xfs_attr_shortform_t;
+typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t;
+typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t;
+
+/*
+ * We generate this then sort it, attr_list() must return things in hash-order.
+ */
+typedef struct xfs_attr_sf_sort {
+	__uint8_t	entno;		/* entry number in original list */
+	__uint8_t	namelen;	/* length of name value (no null) */
+	__uint8_t	valuelen;	/* length of value */
+	__uint8_t	flags;		/* flags bits (see xfs_attr_leaf.h) */
+	xfs_dahash_t	hash;		/* this entry's hash value */
+	char		*name;		/* name value, pointer into buffer */
+} xfs_attr_sf_sort_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE_BYNAME)
+int xfs_attr_sf_entsize_byname(int nlen, int vlen);
+#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen)	\
+	xfs_attr_sf_entsize_byname(nlen,vlen)
+#else
+#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen)	/* space name/value uses */ \
+	((int)sizeof(xfs_attr_sf_entry_t)-1 + (nlen)+(vlen))
+#endif
+#define XFS_ATTR_SF_ENTSIZE_MAX			/* max space for name&value */ \
+	((1 << (NBBY*(int)sizeof(__uint8_t))) - 1)
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE)
+int xfs_attr_sf_entsize(xfs_attr_sf_entry_t *sfep);
+#define XFS_ATTR_SF_ENTSIZE(sfep)	xfs_attr_sf_entsize(sfep)
+#else
+#define XFS_ATTR_SF_ENTSIZE(sfep)		/* space an entry uses */ \
+	((int)sizeof(xfs_attr_sf_entry_t)-1 + (sfep)->namelen+(sfep)->valuelen)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_NEXTENTRY)
+xfs_attr_sf_entry_t *xfs_attr_sf_nextentry(xfs_attr_sf_entry_t *sfep);
+#define XFS_ATTR_SF_NEXTENTRY(sfep)	xfs_attr_sf_nextentry(sfep)
+#else
+#define XFS_ATTR_SF_NEXTENTRY(sfep)		/* next entry in struct */ \
+	((xfs_attr_sf_entry_t *) \
+		((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_TOTSIZE)
+int xfs_attr_sf_totsize(struct xfs_inode *dp);
+#define XFS_ATTR_SF_TOTSIZE(dp)		xfs_attr_sf_totsize(dp)
+#else
+#define XFS_ATTR_SF_TOTSIZE(dp)			/* total space in use */ \
+	(INT_GET(((xfs_attr_shortform_t *)((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT))
+#endif
+
+#if defined(XFS_ATTR_TRACE)
+/*
+ * Kernel tracing support for attribute lists
+ */
+struct xfs_attr_list_context;
+struct xfs_da_intnode;
+struct xfs_da_node_entry;
+struct xfs_attr_leafblock;
+
+#define	XFS_ATTR_TRACE_SIZE	4096	/* size of global trace buffer */
+extern ktrace_t	*xfs_attr_trace_buf;
+
+/*
+ * Trace record types.
+ */
+#define	XFS_ATTR_KTRACE_L_C	1	/* context */
+#define	XFS_ATTR_KTRACE_L_CN	2	/* context, node */
+#define	XFS_ATTR_KTRACE_L_CB	3	/* context, btree */
+#define	XFS_ATTR_KTRACE_L_CL	4	/* context, leaf */
+
+void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
+void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
+			      struct xfs_da_intnode *node);
+void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
+			      struct xfs_da_node_entry *btree);
+void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
+			      struct xfs_attr_leafblock *leaf);
+void xfs_attr_trace_enter(int type, char *where,
+			     __psunsigned_t a2, __psunsigned_t a3,
+			     __psunsigned_t a4, __psunsigned_t a5,
+			     __psunsigned_t a6, __psunsigned_t a7,
+			     __psunsigned_t a8, __psunsigned_t a9,
+			     __psunsigned_t a10, __psunsigned_t a11,
+			     __psunsigned_t a12, __psunsigned_t a13,
+			     __psunsigned_t a14, __psunsigned_t a15);
+#else
+#define	xfs_attr_trace_l_c(w,c)
+#define	xfs_attr_trace_l_cn(w,c,n)
+#define	xfs_attr_trace_l_cb(w,c,b)
+#define	xfs_attr_trace_l_cl(w,c,l)
+#endif /* XFS_ATTR_TRACE */
+
+#endif	/* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_behavior.c b/fs/xfs/xfs_behavior.c
new file mode 100644
index 0000000..16088e1
--- /dev/null
+++ b/fs/xfs/xfs_behavior.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ *
+ */
+#include "xfs.h"
+
+/*
+ * Source file used to associate/disassociate behaviors with virtualized
+ * objects.  See xfs_behavior.h for more information about behaviors, etc.
+ *
+ * The implementation is split between functions in this file and macros
+ * in xfs_behavior.h.
+ */
+
+/*
+ * Insert a new behavior descriptor into a behavior chain.
+ *
+ * The behavior chain is ordered based on the 'position' number which
+ * lives in the first field of the ops vector (higher numbers first).
+ *
+ * Attemps to insert duplicate ops result in an EINVAL return code.
+ * Otherwise, return 0 to indicate success.
+ */
+int
+bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp)
+{
+	bhv_desc_t	*curdesc, *prev;
+	int		position;
+
+	/*
+	 * Validate the position value of the new behavior.
+	 */
+	position = BHV_POSITION(bdp);
+	ASSERT(position >= BHV_POSITION_BASE && position <= BHV_POSITION_TOP);
+
+	/*
+	 * Find location to insert behavior.  Check for duplicates.
+	 */
+	prev = NULL;
+	for (curdesc = bhp->bh_first;
+	     curdesc != NULL;
+	     curdesc = curdesc->bd_next) {
+
+		/* Check for duplication. */
+		if (curdesc->bd_ops == bdp->bd_ops) {
+			ASSERT(0);
+			return EINVAL;
+		}
+
+		/* Find correct position */
+		if (position >= BHV_POSITION(curdesc)) {
+			ASSERT(position != BHV_POSITION(curdesc));
+			break;		/* found it */
+		}
+
+		prev = curdesc;
+	}
+
+	if (prev == NULL) {
+		/* insert at front of chain */
+		bdp->bd_next = bhp->bh_first;
+		bhp->bh_first = bdp;
+	} else {
+		/* insert after prev */
+		bdp->bd_next = prev->bd_next;
+		prev->bd_next = bdp;
+	}
+
+	return 0;
+}
+
+/*
+ * Remove a behavior descriptor from a position in a behavior chain;
+ * the postition is guaranteed not to be the first position.
+ * Should only be called by the bhv_remove() macro.
+ */
+void
+bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp)
+{
+	bhv_desc_t	*curdesc, *prev;
+
+	ASSERT(bhp->bh_first != NULL);
+	ASSERT(bhp->bh_first->bd_next != NULL);
+
+	prev = bhp->bh_first;
+	for (curdesc = bhp->bh_first->bd_next;
+	     curdesc != NULL;
+	     curdesc = curdesc->bd_next) {
+
+		if (curdesc == bdp)
+			break;		/* found it */
+		prev = curdesc;
+	}
+
+	ASSERT(curdesc == bdp);
+	prev->bd_next = bdp->bd_next;	/* remove from after prev */
+}
+
+/*
+ * Look for a specific ops vector on the specified behavior chain.
+ * Return the associated behavior descriptor.  Or NULL, if not found.
+ */
+bhv_desc_t *
+bhv_lookup(bhv_head_t *bhp, void *ops)
+{
+	bhv_desc_t	*curdesc;
+
+	for (curdesc = bhp->bh_first;
+	     curdesc != NULL;
+	     curdesc = curdesc->bd_next) {
+
+		if (curdesc->bd_ops == ops)
+			return curdesc;
+	}
+
+	return NULL;
+}
+
+/*
+ * Looks for the first behavior within a specified range of positions.
+ * Return the associated behavior descriptor.  Or NULL, if none found.
+ */
+bhv_desc_t *
+bhv_lookup_range(bhv_head_t *bhp, int low, int high)
+{
+	bhv_desc_t	*curdesc;
+
+	for (curdesc = bhp->bh_first;
+	     curdesc != NULL;
+	     curdesc = curdesc->bd_next) {
+
+		int	position = BHV_POSITION(curdesc);
+
+		if (position <= high) {
+			if (position >= low)
+				return curdesc;
+			return NULL;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Return the base behavior in the chain, or NULL if the chain
+ * is empty.
+ *
+ * The caller has not read locked the behavior chain, so acquire the
+ * lock before traversing the chain.
+ */
+bhv_desc_t *
+bhv_base(bhv_head_t *bhp)
+{
+	bhv_desc_t	*curdesc;
+
+	for (curdesc = bhp->bh_first;
+	     curdesc != NULL;
+	     curdesc = curdesc->bd_next) {
+
+		if (curdesc->bd_next == NULL) {
+			return curdesc;
+		}
+	}
+
+	return NULL;
+}
+
+void
+bhv_head_init(
+	bhv_head_t *bhp,
+	char *name)
+{
+	bhp->bh_first = NULL;
+}
+
+void
+bhv_insert_initial(
+	bhv_head_t *bhp,
+	bhv_desc_t *bdp)
+{
+	ASSERT(bhp->bh_first == NULL);
+	(bhp)->bh_first = bdp;
+}
+
+void
+bhv_head_destroy(
+	bhv_head_t *bhp)
+{
+	ASSERT(bhp->bh_first == NULL);
+}
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
new file mode 100644
index 0000000..d5ed5a8
--- /dev/null
+++ b/fs/xfs/xfs_behavior.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_BEHAVIOR_H__
+#define __XFS_BEHAVIOR_H__
+
+/*
+ * Header file used to associate behaviors with virtualized objects.
+ *
+ * A virtualized object is an internal, virtualized representation of
+ * OS entities such as persistent files, processes, or sockets.  Examples
+ * of virtualized objects include vnodes, vprocs, and vsockets.  Often
+ * a virtualized object is referred to simply as an "object."
+ *
+ * A behavior is essentially an implementation layer associated with
+ * an object.  Multiple behaviors for an object are chained together,
+ * the order of chaining determining the order of invocation.  Each
+ * behavior of a given object implements the same set of interfaces
+ * (e.g., the VOP interfaces).
+ *
+ * Behaviors may be dynamically inserted into an object's behavior chain,
+ * such that the addition is transparent to consumers that already have
+ * references to the object.  Typically, a given behavior will be inserted
+ * at a particular location in the behavior chain.  Insertion of new
+ * behaviors is synchronized with operations-in-progress (oip's) so that
+ * the oip's always see a consistent view of the chain.
+ *
+ * The term "interpostion" is used to refer to the act of inserting
+ * a behavior such that it interposes on (i.e., is inserted in front
+ * of) a particular other behavior.  A key example of this is when a
+ * system implementing distributed single system image wishes to
+ * interpose a distribution layer (providing distributed coherency)
+ * in front of an object that is otherwise only accessed locally.
+ *
+ * Note that the traditional vnode/inode combination is simply a virtualized
+ * object that has exactly one associated behavior.
+ *
+ * Behavior synchronization is logic which is necessary under certain
+ * circumstances that there is no conflict between ongoing operations
+ * traversing the behavior chain and those dunamically modifying the
+ * behavior chain.  Because behavior synchronization adds extra overhead
+ * to virtual operation invocation, we want to restrict, as much as
+ * we can, the requirement for this extra code, to those situations
+ * in which it is truly necessary.
+ *
+ * Behavior synchronization is needed whenever there's at least one class
+ * of object in the system for which:
+ * 1) multiple behaviors for a given object are supported,
+ * -- AND --
+ * 2a) insertion of a new behavior can happen dynamically at any time during
+ *     the life of an active object,
+ *	-- AND --
+ *	3a) insertion of a new behavior needs to synchronize with existing
+ *	    ops-in-progress.
+ *	-- OR --
+ *	3b) multiple different behaviors can be dynamically inserted at
+ *	    any time during the life of an active object
+ *	-- OR --
+ *	3c) removal of a behavior can occur at any time during the life of
+ *	    an active object.
+ * -- OR --
+ * 2b) removal of a behavior can occur at any time during the life of an
+ *     active object
+ *
+ */
+
+struct bhv_head_lock;
+
+/*
+ * Behavior head.  Head of the chain of behaviors.
+ * Contained within each virtualized object data structure.
+ */
+typedef struct bhv_head {
+	struct bhv_desc *bh_first;	/* first behavior in chain */
+	struct bhv_head_lock *bh_lockp;	/* pointer to lock info struct */
+} bhv_head_t;
+
+/*
+ * Behavior descriptor.	 Descriptor associated with each behavior.
+ * Contained within the behavior's private data structure.
+ */
+typedef struct bhv_desc {
+	void		*bd_pdata;	/* private data for this behavior */
+	void		*bd_vobj;	/* virtual object associated with */
+	void		*bd_ops;	/* ops for this behavior */
+	struct bhv_desc *bd_next;	/* next behavior in chain */
+} bhv_desc_t;
+
+/*
+ * Behavior identity field.  A behavior's identity determines the position
+ * where it lives within a behavior chain, and it's always the first field
+ * of the behavior's ops vector. The optional id field further identifies the
+ * subsystem responsible for the behavior.
+ */
+typedef struct bhv_identity {
+	__u16	bi_id;		/* owning subsystem id */
+	__u16	bi_position;	/* position in chain */
+} bhv_identity_t;
+
+typedef bhv_identity_t bhv_position_t;
+
+#define BHV_IDENTITY_INIT(id,pos)	{id, pos}
+#define BHV_IDENTITY_INIT_POSITION(pos) BHV_IDENTITY_INIT(0, pos)
+
+/*
+ * Define boundaries of position values.
+ */
+#define BHV_POSITION_INVALID	0	/* invalid position number */
+#define BHV_POSITION_BASE	1	/* base (last) implementation layer */
+#define BHV_POSITION_TOP	63	/* top (first) implementation layer */
+
+/*
+ * Plumbing macros.
+ */
+#define BHV_HEAD_FIRST(bhp)	(ASSERT((bhp)->bh_first), (bhp)->bh_first)
+#define BHV_NEXT(bdp)		(ASSERT((bdp)->bd_next), (bdp)->bd_next)
+#define BHV_NEXTNULL(bdp)	((bdp)->bd_next)
+#define BHV_VOBJ(bdp)		(ASSERT((bdp)->bd_vobj), (bdp)->bd_vobj)
+#define BHV_VOBJNULL(bdp)	((bdp)->bd_vobj)
+#define BHV_PDATA(bdp)		(bdp)->bd_pdata
+#define BHV_OPS(bdp)		(bdp)->bd_ops
+#define BHV_IDENTITY(bdp)	((bhv_identity_t *)(bdp)->bd_ops)
+#define BHV_POSITION(bdp)	(BHV_IDENTITY(bdp)->bi_position)
+
+extern void bhv_head_init(bhv_head_t *, char *);
+extern void bhv_head_destroy(bhv_head_t *);
+extern int  bhv_insert(bhv_head_t *, bhv_desc_t *);
+extern void bhv_insert_initial(bhv_head_t *, bhv_desc_t *);
+
+/*
+ * Initialize a new behavior descriptor.
+ * Arguments:
+ *   bdp - pointer to behavior descriptor
+ *   pdata - pointer to behavior's private data
+ *   vobj - pointer to associated virtual object
+ *   ops - pointer to ops for this behavior
+ */
+#define bhv_desc_init(bdp, pdata, vobj, ops)		\
+ {							\
+	(bdp)->bd_pdata = pdata;			\
+	(bdp)->bd_vobj = vobj;				\
+	(bdp)->bd_ops = ops;				\
+	(bdp)->bd_next = NULL;				\
+ }
+
+/*
+ * Remove a behavior descriptor from a behavior chain.
+ */
+#define bhv_remove(bhp, bdp)				\
+ {							\
+	if ((bhp)->bh_first == (bdp)) {			\
+		/*					\
+		* Remove from front of chain.		\
+		* Atomic wrt oip's.			\
+		*/					\
+	       (bhp)->bh_first = (bdp)->bd_next;	\
+	} else {					\
+	       /* remove from non-front of chain */	\
+	       bhv_remove_not_first(bhp, bdp);		\
+	}						\
+	(bdp)->bd_vobj = NULL;				\
+ }
+
+/*
+ * Behavior module prototypes.
+ */
+extern void		bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp);
+extern bhv_desc_t *	bhv_lookup(bhv_head_t *bhp, void *ops);
+extern bhv_desc_t *	bhv_lookup_range(bhv_head_t *bhp, int low, int high);
+extern bhv_desc_t *	bhv_base(bhv_head_t *bhp);
+
+/* No bhv locking on Linux */
+#define bhv_lookup_unlocked	bhv_lookup
+#define bhv_base_unlocked	bhv_base
+
+#endif /* __XFS_BEHAVIOR_H__ */
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
new file mode 100644
index 0000000..a20a6c3
--- /dev/null
+++ b/fs/xfs/xfs_bit.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * XFS bit manipulation routines, used in non-realtime code.
+ */
+
+#include "xfs.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+
+
+#ifndef HAVE_ARCH_HIGHBIT
+/*
+ * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
+ */
+const char xfs_highbit[256] = {
+       -1, 0, 1, 1, 2, 2, 2, 2,			/* 00 .. 07 */
+	3, 3, 3, 3, 3, 3, 3, 3,			/* 08 .. 0f */
+	4, 4, 4, 4, 4, 4, 4, 4,			/* 10 .. 17 */
+	4, 4, 4, 4, 4, 4, 4, 4,			/* 18 .. 1f */
+	5, 5, 5, 5, 5, 5, 5, 5,			/* 20 .. 27 */
+	5, 5, 5, 5, 5, 5, 5, 5,			/* 28 .. 2f */
+	5, 5, 5, 5, 5, 5, 5, 5,			/* 30 .. 37 */
+	5, 5, 5, 5, 5, 5, 5, 5,			/* 38 .. 3f */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 40 .. 47 */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 48 .. 4f */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 50 .. 57 */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 58 .. 5f */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 60 .. 67 */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 68 .. 6f */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 70 .. 77 */
+	6, 6, 6, 6, 6, 6, 6, 6,			/* 78 .. 7f */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* 80 .. 87 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* 88 .. 8f */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* 90 .. 97 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* 98 .. 9f */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* a0 .. a7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* a8 .. af */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* b0 .. b7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* b8 .. bf */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* c0 .. c7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* c8 .. cf */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* d0 .. d7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* d8 .. df */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* e0 .. e7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* e8 .. ef */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* f0 .. f7 */
+	7, 7, 7, 7, 7, 7, 7, 7,			/* f8 .. ff */
+};
+#endif
+
+/*
+ * Count of bits set in byte, 0..8.
+ */
+static const char xfs_countbit[256] = {
+	0, 1, 1, 2, 1, 2, 2, 3,			/* 00 .. 07 */
+	1, 2, 2, 3, 2, 3, 3, 4,			/* 08 .. 0f */
+	1, 2, 2, 3, 2, 3, 3, 4,			/* 10 .. 17 */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 18 .. 1f */
+	1, 2, 2, 3, 2, 3, 3, 4,			/* 20 .. 27 */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 28 .. 2f */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 30 .. 37 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* 38 .. 3f */
+	1, 2, 2, 3, 2, 3, 3, 4,			/* 40 .. 47 */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 48 .. 4f */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 50 .. 57 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* 58 .. 5f */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 60 .. 67 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* 68 .. 6f */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* 70 .. 77 */
+	4, 5, 5, 6, 5, 6, 6, 7,			/* 78 .. 7f */
+	1, 2, 2, 3, 2, 3, 3, 4,			/* 80 .. 87 */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 88 .. 8f */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* 90 .. 97 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* 98 .. 9f */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* a0 .. a7 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* a8 .. af */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* b0 .. b7 */
+	4, 5, 5, 6, 5, 6, 6, 7,			/* b8 .. bf */
+	2, 3, 3, 4, 3, 4, 4, 5,			/* c0 .. c7 */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* c8 .. cf */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* d0 .. d7 */
+	4, 5, 5, 6, 5, 6, 6, 7,			/* d8 .. df */
+	3, 4, 4, 5, 4, 5, 5, 6,			/* e0 .. e7 */
+	4, 5, 5, 6, 5, 6, 6, 7,			/* e8 .. ef */
+	4, 5, 5, 6, 5, 6, 6, 7,			/* f0 .. f7 */
+	5, 6, 6, 7, 6, 7, 7, 8,			/* f8 .. ff */
+};
+
+/*
+ * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
+ */
+inline int
+xfs_highbit32(
+	__uint32_t	v)
+{
+#ifdef HAVE_ARCH_HIGHBIT
+	return highbit32(v);
+#else
+	int		i;
+
+	if (v & 0xffff0000)
+		if (v & 0xff000000)
+			i = 24;
+		else
+			i = 16;
+	else if (v & 0x0000ffff)
+		if (v & 0x0000ff00)
+			i = 8;
+		else
+			i = 0;
+	else
+		return -1;
+	return i + xfs_highbit[(v >> i) & 0xff];
+#endif
+}
+
+/*
+ * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
+ */
+int
+xfs_lowbit64(
+	__uint64_t	v)
+{
+	__uint32_t	w = (__uint32_t)v;
+	int		n = 0;
+
+	if (w) {	/* lower bits */
+		n = ffs(w);
+	} else {	/* upper bits */
+		w = (__uint32_t)(v >> 32);
+		if (w && (n = ffs(w)))
+			n += 32;
+	}
+	return n - 1;
+}
+
+/*
+ * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
+ */
+int
+xfs_highbit64(
+	__uint64_t	v)
+{
+	__uint32_t	h = (__uint32_t)(v >> 32);
+
+	if (h)
+		return xfs_highbit32(h) + 32;
+	return xfs_highbit32((__uint32_t)v);
+}
+
+
+/*
+ * Count the number of bits set in the bitmap starting with bit
+ * start_bit.  Size is the size of the bitmap in words.
+ *
+ * Do the counting by mapping a byte value to the number of set
+ * bits for that value using the xfs_countbit array, i.e.
+ * xfs_countbit[0] == 0, xfs_countbit[1] == 1, xfs_countbit[2] == 1,
+ * xfs_countbit[3] == 2, etc.
+ */
+int
+xfs_count_bits(uint *map, uint size, uint start_bit)
+{
+	register int	bits;
+	register unsigned char	*bytep;
+	register unsigned char	*end_map;
+	int		byte_bit;
+
+	bits = 0;
+	end_map = (char*)(map + size);
+	bytep = (char*)(map + (start_bit & ~0x7));
+	byte_bit = start_bit & 0x7;
+
+	/*
+	 * If the caller fell off the end of the map, return 0.
+	 */
+	if (bytep >= end_map) {
+		return (0);
+	}
+
+	/*
+	 * If start_bit is not byte aligned, then process the
+	 * first byte separately.
+	 */
+	if (byte_bit != 0) {
+		/*
+		 * Shift off the bits we don't want to look at,
+		 * before indexing into xfs_countbit.
+		 */
+		bits += xfs_countbit[(*bytep >> byte_bit)];
+		bytep++;
+	}
+
+	/*
+	 * Count the bits in each byte until the end of the bitmap.
+	 */
+	while (bytep < end_map) {
+		bits += xfs_countbit[*bytep];
+		bytep++;
+	}
+
+	return (bits);
+}
+
+/*
+ * Count the number of contiguous bits set in the bitmap starting with bit
+ * start_bit.  Size is the size of the bitmap in words.
+ */
+int
+xfs_contig_bits(uint *map, uint	size, uint start_bit)
+{
+	uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
+	uint result = 0;
+	uint tmp;
+
+	size <<= BIT_TO_WORD_SHIFT;
+
+	ASSERT(start_bit < size);
+	size -= start_bit & ~(NBWORD - 1);
+	start_bit &= (NBWORD - 1);
+	if (start_bit) {
+		tmp = *p++;
+		/* set to one first offset bits prior to start */
+		tmp |= (~0U >> (NBWORD-start_bit));
+		if (tmp != ~0U)
+			goto found;
+		result += NBWORD;
+		size -= NBWORD;
+	}
+	while (size) {
+		if ((tmp = *p++) != ~0U)
+			goto found;
+		result += NBWORD;
+		size -= NBWORD;
+	}
+	return result - start_bit;
+found:
+	return result + ffz(tmp) - start_bit;
+}
+
+/*
+ * This takes the bit number to start looking from and
+ * returns the next set bit from there.  It returns -1
+ * if there are no more bits set or the start bit is
+ * beyond the end of the bitmap.
+ *
+ * Size is the number of words, not bytes, in the bitmap.
+ */
+int xfs_next_bit(uint *map, uint size, uint start_bit)
+{
+	uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
+	uint result = start_bit & ~(NBWORD - 1);
+	uint tmp;
+
+	size <<= BIT_TO_WORD_SHIFT;
+
+	if (start_bit >= size)
+		return -1;
+	size -= result;
+	start_bit &= (NBWORD - 1);
+	if (start_bit) {
+		tmp = *p++;
+		/* set to zero first offset bits prior to start */
+		tmp &= (~0U << start_bit);
+		if (tmp != 0U)
+			goto found;
+		result += NBWORD;
+		size -= NBWORD;
+	}
+	while (size) {
+		if ((tmp = *p++) != 0U)
+			goto found;
+		result += NBWORD;
+		size -= NBWORD;
+	}
+	return -1;
+found:
+	return result + ffs(tmp) - 1;
+}
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
new file mode 100644
index 0000000..1e7f57d
--- /dev/null
+++ b/fs/xfs/xfs_bit.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_BIT_H__
+#define	__XFS_BIT_H__
+
+/*
+ * XFS bit manipulation routines.
+ */
+
+/*
+ * masks with n high/low bits set, 32-bit values & 64-bit values
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32HI)
+__uint32_t xfs_mask32hi(int n);
+#define	XFS_MASK32HI(n)		xfs_mask32hi(n)
+#else
+#define	XFS_MASK32HI(n)		((__uint32_t)-1 << (32 - (n)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64HI)
+__uint64_t xfs_mask64hi(int n);
+#define	XFS_MASK64HI(n)		xfs_mask64hi(n)
+#else
+#define	XFS_MASK64HI(n)		((__uint64_t)-1 << (64 - (n)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32LO)
+__uint32_t xfs_mask32lo(int n);
+#define	XFS_MASK32LO(n)		xfs_mask32lo(n)
+#else
+#define	XFS_MASK32LO(n)		(((__uint32_t)1 << (n)) - 1)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64LO)
+__uint64_t xfs_mask64lo(int n);
+#define	XFS_MASK64LO(n)		xfs_mask64lo(n)
+#else
+#define	XFS_MASK64LO(n)		(((__uint64_t)1 << (n)) - 1)
+#endif
+
+/* Get high bit set out of 32-bit argument, -1 if none set */
+extern int xfs_highbit32(__uint32_t v);
+
+/* Get low bit set out of 64-bit argument, -1 if none set */
+extern int xfs_lowbit64(__uint64_t v);
+
+/* Get high bit set out of 64-bit argument, -1 if none set */
+extern int xfs_highbit64(__uint64_t);
+
+/* Count set bits in map starting with start_bit */
+extern int xfs_count_bits(uint *map, uint size, uint start_bit);
+
+/* Count continuous one bits in map starting with start_bit */
+extern int xfs_contig_bits(uint *map, uint size, uint start_bit);
+
+/* Find next set bit in map */
+extern int xfs_next_bit(uint *map, uint size, uint start_bit);
+
+#endif	/* __XFS_BIT_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
new file mode 100644
index 0000000..de31624
--- /dev/null
+++ b/fs/xfs/xfs_bmap.c
@@ -0,0 +1,6246 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_itable.h"
+#include "xfs_extfree_item.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_bit.h"
+#include "xfs_rw.h"
+#include "xfs_quota.h"
+#include "xfs_trans_space.h"
+#include "xfs_buf_item.h"
+
+
+#ifdef DEBUG
+STATIC void
+xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
+#endif
+
+kmem_zone_t		*xfs_bmap_free_item_zone;
+
+/*
+ * Prototypes for internal bmap routines.
+ */
+
+
+/*
+ * Called from xfs_bmap_add_attrfork to handle extents format files.
+ */
+STATIC int					/* error */
+xfs_bmap_add_attrfork_extents(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first block allocated */
+	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
+	int			*flags);	/* inode logging flags */
+
+/*
+ * Called from xfs_bmap_add_attrfork to handle local format files.
+ */
+STATIC int					/* error */
+xfs_bmap_add_attrfork_local(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first block allocated */
+	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
+	int			*flags);	/* inode logging flags */
+
+/*
+ * Called by xfs_bmapi to update extent list structure and the btree
+ * after allocating space (or doing a delayed allocation).
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	xfs_fsblock_t		*first,	/* pointer to firstblock variable */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork, /* data or attr fork */
+	int			rsvd);	/* OK to allocate reserved blocks */
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a delayed
+ * allocation to a real allocation.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_delay_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	xfs_filblks_t		*dnew,	/* new delayed-alloc indirect blocks */
+	xfs_fsblock_t		*first,	/* pointer to firstblock variable */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	int			*logflagsp, /* inode logging flags */
+	int			rsvd);	/* OK to allocate reserved blocks */
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a hole
+ * to a delayed allocation.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_hole_delay(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp,/* inode logging flags */
+	int			rsvd);	/* OK to allocate reserved blocks */
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a hole
+ * to a real allocation.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_hole_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork); /* data or attr fork */
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting an unwritten
+ * allocation to a real allocation or vice versa.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_unwritten_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp); /* inode logging flags */
+
+/*
+ * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
+ * It figures out where to ask the underlying allocator to put the new extent.
+ */
+STATIC int				/* error */
+xfs_bmap_alloc(
+	xfs_bmalloca_t		*ap);	/* bmap alloc argument struct */
+
+/*
+ * Transform a btree format file with only one leaf node, where the
+ * extents list will fit in the inode, into an extents format file.
+ * Since the extent list is already in-core, all we have to do is
+ * give up the space for the btree root and pitch the leaf block.
+ */
+STATIC int				/* error */
+xfs_bmap_btree_to_extents(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork); /* data or attr fork */
+
+#ifdef DEBUG
+/*
+ * Check that the extents list for the inode ip is in the right order.
+ */
+STATIC void
+xfs_bmap_check_extents(
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	int			whichfork);	/* data or attr fork */
+#endif
+
+/*
+ * Called by xfs_bmapi to update extent list structure and the btree
+ * after removing space (or undoing a delayed allocation).
+ */
+STATIC int				/* error */
+xfs_bmap_del_extent(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_trans_t		*tp,	/* current trans pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp,/* inode logging flags */
+	int			whichfork, /* data or attr fork */
+	int			rsvd);	 /* OK to allocate reserved blocks */
+
+/*
+ * Remove the entry "free" from the free item list.  Prev points to the
+ * previous entry, unless "free" is the head of the list.
+ */
+STATIC void
+xfs_bmap_del_free(
+	xfs_bmap_free_t		*flist,	/* free item list header */
+	xfs_bmap_free_item_t	*prev,	/* previous item on list, if any */
+	xfs_bmap_free_item_t	*free);	/* list item to be freed */
+
+/*
+ * Remove count entries from the extents array for inode "ip", starting
+ * at index "idx".  Copies the remaining items down over the deleted ones,
+ * and gives back the excess memory.
+ */
+STATIC void
+xfs_bmap_delete_exlist(
+	xfs_inode_t	*ip,		/* incode inode pointer */
+	xfs_extnum_t	idx,		/* starting delete index */
+	xfs_extnum_t	count,		/* count of items to delete */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Convert an extents-format file into a btree-format file.
+ * The new file will have a root block (in the inode) and a single child block.
+ */
+STATIC int					/* error */
+xfs_bmap_extents_to_btree(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first-block-allocated */
+	xfs_bmap_free_t		*flist,		/* blocks freed in xaction */
+	xfs_btree_cur_t		**curp,		/* cursor returned to caller */
+	int			wasdel,		/* converting a delayed alloc */
+	int			*logflagsp,	/* inode logging flags */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Insert new item(s) in the extent list for inode "ip".
+ * Count new items are inserted at offset idx.
+ */
+STATIC void
+xfs_bmap_insert_exlist(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* starting index of new items */
+	xfs_extnum_t	count,		/* number of inserted items */
+	xfs_bmbt_irec_t	*new,		/* items to insert */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Convert a local file to an extents file.
+ * This code is sort of bogus, since the file data needs to get
+ * logged so it won't be lost.  The bmap-level manipulations are ok, though.
+ */
+STATIC int				/* error */
+xfs_bmap_local_to_extents(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_fsblock_t	*firstblock,	/* first block allocated in xaction */
+	xfs_extlen_t	total,		/* total blocks needed by transaction */
+	int		*logflagsp,	/* inode logging flags */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Search the extents list for the inode, for the extent containing bno.
+ * If bno lies in a hole, point to the next entry.  If bno lies past eof,
+ * *eofp will be set, and *prevp will contain the last entry (null if none).
+ * Else, *lastxp will be set to the index of the found
+ * entry; *gotp will contain the entry.
+ */
+STATIC xfs_bmbt_rec_t *			/* pointer to found extent entry */
+xfs_bmap_search_extents(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_fileoff_t	bno,		/* block number searched for */
+	int		whichfork,	/* data or attr fork */
+	int		*eofp,		/* out: end of file found */
+	xfs_extnum_t	*lastxp,	/* out: last extent index */
+	xfs_bmbt_irec_t	*gotp,		/* out: extent entry found */
+	xfs_bmbt_irec_t	*prevp);	/* out: previous extent entry found */
+
+#ifdef XFS_BMAP_TRACE
+/*
+ * Add a bmap trace buffer entry.  Base routine for the others.
+ */
+STATIC void
+xfs_bmap_trace_addentry(
+	int		opcode,		/* operation */
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(ies) */
+	xfs_extnum_t	cnt,		/* count of entries, 1 or 2 */
+	xfs_bmbt_rec_t	*r1,		/* first record */
+	xfs_bmbt_rec_t	*r2,		/* second record or null */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
+ */
+STATIC void
+xfs_bmap_trace_delete(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(entries) deleted */
+	xfs_extnum_t	cnt,		/* count of entries deleted, 1 or 2 */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
+ * reading in the extents list from the disk (in the btree).
+ */
+STATIC void
+xfs_bmap_trace_insert(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(entries) inserted */
+	xfs_extnum_t	cnt,		/* count of entries inserted, 1 or 2 */
+	xfs_bmbt_irec_t	*r1,		/* inserted record 1 */
+	xfs_bmbt_irec_t	*r2,		/* inserted record 2 or null */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Add bmap trace entry after updating an extent list entry in place.
+ */
+STATIC void
+xfs_bmap_trace_post_update(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry updated */
+	int		whichfork);	/* data or attr fork */
+
+/*
+ * Add bmap trace entry prior to updating an extent list entry in place.
+ */
+STATIC void
+xfs_bmap_trace_pre_update(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry to be updated */
+	int		whichfork);	/* data or attr fork */
+
+#else
+#define	xfs_bmap_trace_delete(f,d,ip,i,c,w)
+#define	xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w)
+#define	xfs_bmap_trace_post_update(f,d,ip,i,w)
+#define	xfs_bmap_trace_pre_update(f,d,ip,i,w)
+#endif	/* XFS_BMAP_TRACE */
+
+/*
+ * Compute the worst-case number of indirect blocks that will be used
+ * for ip's delayed extent of length "len".
+ */
+STATIC xfs_filblks_t
+xfs_bmap_worst_indlen(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_filblks_t		len);	/* delayed extent length */
+
+#ifdef DEBUG
+/*
+ * Perform various validation checks on the values being returned
+ * from xfs_bmapi().
+ */
+STATIC void
+xfs_bmap_validate_ret(
+	xfs_fileoff_t		bno,
+	xfs_filblks_t		len,
+	int			flags,
+	xfs_bmbt_irec_t		*mval,
+	int			nmap,
+	int			ret_nmap);
+#else
+#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+#endif /* DEBUG */
+
+#if defined(XFS_RW_TRACE)
+STATIC void
+xfs_bunmap_trace(
+	xfs_inode_t		*ip,
+	xfs_fileoff_t		bno,
+	xfs_filblks_t		len,
+	int			flags,
+	inst_t			*ra);
+#else
+#define	xfs_bunmap_trace(ip, bno, len, flags, ra)
+#endif	/* XFS_RW_TRACE */
+
+STATIC int
+xfs_bmap_count_tree(
+	xfs_mount_t     *mp,
+	xfs_trans_t     *tp,
+	xfs_fsblock_t   blockno,
+	int             levelin,
+	int		*count);
+
+STATIC int
+xfs_bmap_count_leaves(
+	xfs_bmbt_rec_t		*frp,
+	int			numrecs,
+	int			*count);
+
+/*
+ * Bmap internal routines.
+ */
+
+/*
+ * Called from xfs_bmap_add_attrfork to handle btree format files.
+ */
+STATIC int					/* error */
+xfs_bmap_add_attrfork_btree(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first block allocated */
+	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
+	int			*flags)		/* inode logging flags */
+{
+	xfs_btree_cur_t		*cur;		/* btree cursor */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* file system mount struct */
+	int			stat;		/* newroot status */
+
+	mp = ip->i_mount;
+	if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
+		*flags |= XFS_ILOG_DBROOT;
+	else {
+		cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
+			XFS_DATA_FORK);
+		cur->bc_private.b.flist = flist;
+		cur->bc_private.b.firstblock = *firstblock;
+		if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
+			goto error0;
+		ASSERT(stat == 1);	/* must be at least one entry */
+		if ((error = xfs_bmbt_newroot(cur, flags, &stat)))
+			goto error0;
+		if (stat == 0) {
+			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+			return XFS_ERROR(ENOSPC);
+		}
+		*firstblock = cur->bc_private.b.firstblock;
+		cur->bc_private.b.allocated = 0;
+		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	}
+	return 0;
+error0:
+	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Called from xfs_bmap_add_attrfork to handle extents format files.
+ */
+STATIC int					/* error */
+xfs_bmap_add_attrfork_extents(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first block allocated */
+	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
+	int			*flags)		/* inode logging flags */
+{
+	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
+	int			error;		/* error return value */
+
+	if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
+		return 0;
+	cur = NULL;
+	error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
+		flags, XFS_DATA_FORK);
+	if (cur) {
+		cur->bc_private.b.allocated = 0;
+		xfs_btree_del_cursor(cur,
+			error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	}
+	return error;
+}
+
+/*
+ * Called from xfs_bmap_add_attrfork to handle local format files.
+ */
+STATIC int					/* error */
+xfs_bmap_add_attrfork_local(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first block allocated */
+	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
+	int			*flags)		/* inode logging flags */
+{
+	xfs_da_args_t		dargs;		/* args for dir/attr code */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* mount structure pointer */
+
+	if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
+		return 0;
+	if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+		mp = ip->i_mount;
+		memset(&dargs, 0, sizeof(dargs));
+		dargs.dp = ip;
+		dargs.firstblock = firstblock;
+		dargs.flist = flist;
+		dargs.total = mp->m_dirblkfsbs;
+		dargs.whichfork = XFS_DATA_FORK;
+		dargs.trans = tp;
+		error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs);
+	} else
+		error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
+			XFS_DATA_FORK);
+	return error;
+}
+
+/*
+ * Called by xfs_bmapi to update extent list structure and the btree
+ * after allocating space (or doing a delayed allocation).
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	xfs_fsblock_t		*first,	/* pointer to firstblock variable */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork, /* data or attr fork */
+	int			rsvd)	/* OK to use reserved data blocks */
+{
+	xfs_btree_cur_t		*cur;	/* btree cursor or null */
+	xfs_filblks_t		da_new; /* new count del alloc blocks used */
+	xfs_filblks_t		da_old; /* old count del alloc blocks used */
+	int			error;	/* error return value */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_add_extent";
+#endif
+	xfs_ifork_t		*ifp;	/* inode fork ptr */
+	int			logflags; /* returned value */
+	xfs_extnum_t		nextents; /* number of extents in file now */
+
+	XFS_STATS_INC(xs_add_exlist);
+	cur = *curp;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	ASSERT(idx <= nextents);
+	da_old = da_new = 0;
+	error = 0;
+	/*
+	 * This is the first extent added to a new/empty file.
+	 * Special case this one, so other routines get to assume there are
+	 * already extents in the list.
+	 */
+	if (nextents == 0) {
+		xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new,
+			NULL, whichfork);
+		xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork);
+		ASSERT(cur == NULL);
+		ifp->if_lastex = 0;
+		if (!ISNULLSTARTBLOCK(new->br_startblock)) {
+			XFS_IFORK_NEXT_SET(ip, whichfork, 1);
+			logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+		} else
+			logflags = 0;
+	}
+	/*
+	 * Any kind of new delayed allocation goes here.
+	 */
+	else if (ISNULLSTARTBLOCK(new->br_startblock)) {
+		if (cur)
+			ASSERT((cur->bc_private.b.flags &
+				XFS_BTCUR_BPRV_WASDEL) == 0);
+		if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new,
+				&logflags, rsvd)))
+			goto done;
+	}
+	/*
+	 * Real allocation off the end of the file.
+	 */
+	else if (idx == nextents) {
+		if (cur)
+			ASSERT((cur->bc_private.b.flags &
+				XFS_BTCUR_BPRV_WASDEL) == 0);
+		if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
+				&logflags, whichfork)))
+			goto done;
+	} else {
+		xfs_bmbt_irec_t	prev;	/* old extent at offset idx */
+
+		/*
+		 * Get the record referred to by idx.
+		 */
+		xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev);
+		/*
+		 * If it's a real allocation record, and the new allocation ends
+		 * after the start of the referred to record, then we're filling
+		 * in a delayed or unwritten allocation with a real one, or
+		 * converting real back to unwritten.
+		 */
+		if (!ISNULLSTARTBLOCK(new->br_startblock) &&
+		    new->br_startoff + new->br_blockcount > prev.br_startoff) {
+			if (prev.br_state != XFS_EXT_UNWRITTEN &&
+			    ISNULLSTARTBLOCK(prev.br_startblock)) {
+				da_old = STARTBLOCKVAL(prev.br_startblock);
+				if (cur)
+					ASSERT(cur->bc_private.b.flags &
+						XFS_BTCUR_BPRV_WASDEL);
+				if ((error = xfs_bmap_add_extent_delay_real(ip,
+					idx, &cur, new, &da_new, first, flist,
+					&logflags, rsvd)))
+					goto done;
+			} else if (new->br_state == XFS_EXT_NORM) {
+				ASSERT(new->br_state == XFS_EXT_NORM);
+				if ((error = xfs_bmap_add_extent_unwritten_real(
+					ip, idx, &cur, new, &logflags)))
+					goto done;
+			} else {
+				ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
+				if ((error = xfs_bmap_add_extent_unwritten_real(
+					ip, idx, &cur, new, &logflags)))
+					goto done;
+			}
+			ASSERT(*curp == cur || *curp == NULL);
+		}
+		/*
+		 * Otherwise we're filling in a hole with an allocation.
+		 */
+		else {
+			if (cur)
+				ASSERT((cur->bc_private.b.flags &
+					XFS_BTCUR_BPRV_WASDEL) == 0);
+			if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
+					new, &logflags, whichfork)))
+				goto done;
+		}
+	}
+
+	ASSERT(*curp == cur || *curp == NULL);
+	/*
+	 * Convert to a btree if necessary.
+	 */
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+	    XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
+		int	tmp_logflags;	/* partial log flag return val */
+
+		ASSERT(cur == NULL);
+		error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
+			flist, &cur, da_old > 0, &tmp_logflags, whichfork);
+		logflags |= tmp_logflags;
+		if (error)
+			goto done;
+	}
+	/*
+	 * Adjust for changes in reserved delayed indirect blocks.
+	 * Nothing to do for disk quotas here.
+	 */
+	if (da_old || da_new) {
+		xfs_filblks_t	nblks;
+
+		nblks = da_new;
+		if (cur)
+			nblks += cur->bc_private.b.allocated;
+		ASSERT(nblks <= da_old);
+		if (nblks < da_old)
+			xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
+				(int)(da_old - nblks), rsvd);
+	}
+	/*
+	 * Clear out the allocated field, done with it now in any case.
+	 */
+	if (cur) {
+		cur->bc_private.b.allocated = 0;
+		*curp = cur;
+	}
+done:
+#ifdef DEBUG
+	if (!error)
+		xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
+#endif
+	*logflagsp = logflags;
+	return error;
+}
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a delayed
+ * allocation to a real allocation.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_delay_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	xfs_filblks_t		*dnew,	/* new delayed-alloc indirect blocks */
+	xfs_fsblock_t		*first,	/* pointer to firstblock variable */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	int			*logflagsp, /* inode logging flags */
+	int			rsvd)	/* OK to use reserved data block allocation */
+{
+	xfs_bmbt_rec_t		*base;	/* base of extent entry list */
+	xfs_btree_cur_t		*cur;	/* btree cursor */
+	int			diff;	/* temp value */
+	xfs_bmbt_rec_t		*ep;	/* extent entry for idx */
+	int			error;	/* error return value */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_add_extent_delay_real";
+#endif
+	int			i;	/* temp state */
+	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
+	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
+					/* left is 0, right is 1, prev is 2 */
+	int			rval=0;	/* return value (logging flags) */
+	int			state = 0;/* state bits, accessed thru macros */
+	xfs_filblks_t		temp;	/* value for dnew calculations */
+	xfs_filblks_t		temp2;	/* value for dnew calculations */
+	int			tmp_rval;	/* partial logging flags */
+	enum {				/* bit number definitions for state */
+		LEFT_CONTIG,	RIGHT_CONTIG,
+		LEFT_FILLING,	RIGHT_FILLING,
+		LEFT_DELAY,	RIGHT_DELAY,
+		LEFT_VALID,	RIGHT_VALID
+	};
+
+#define	LEFT		r[0]
+#define	RIGHT		r[1]
+#define	PREV		r[2]
+#define	MASK(b)		(1 << (b))
+#define	MASK2(a,b)	(MASK(a) | MASK(b))
+#define	MASK3(a,b,c)	(MASK2(a,b) | MASK(c))
+#define	MASK4(a,b,c,d)	(MASK3(a,b,c) | MASK(d))
+#define	STATE_SET(b,v)	((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
+#define	STATE_TEST(b)	(state & MASK(b))
+#define	STATE_SET_TEST(b,v)	((v) ? ((state |= MASK(b)), 1) : \
+				       ((state &= ~MASK(b)), 0))
+#define	SWITCH_STATE		\
+	(state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
+
+	/*
+	 * Set up a bunch of variables to make the tests simpler.
+	 */
+	cur = *curp;
+	base = ip->i_df.if_u1.if_extents;
+	ep = &base[idx];
+	xfs_bmbt_get_all(ep, &PREV);
+	new_endoff = new->br_startoff + new->br_blockcount;
+	ASSERT(PREV.br_startoff <= new->br_startoff);
+	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+	/*
+	 * Set flags determining what part of the previous delayed allocation
+	 * extent is being replaced by a real allocation.
+	 */
+	STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
+	STATE_SET(RIGHT_FILLING,
+		PREV.br_startoff + PREV.br_blockcount == new_endoff);
+	/*
+	 * Check and set flags if this segment has a left neighbor.
+	 * Don't set contiguous if the combined extent would be too large.
+	 */
+	if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+		xfs_bmbt_get_all(ep - 1, &LEFT);
+		STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+	}
+	STATE_SET(LEFT_CONTIG,
+		STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
+		LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+		LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+		LEFT.br_state == new->br_state &&
+		LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+	/*
+	 * Check and set flags if this segment has a right neighbor.
+	 * Don't set contiguous if the combined extent would be too large.
+	 * Also check for all-three-contiguous being too large.
+	 */
+	if (STATE_SET_TEST(RIGHT_VALID,
+			idx <
+			ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+		xfs_bmbt_get_all(ep + 1, &RIGHT);
+		STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+	}
+	STATE_SET(RIGHT_CONTIG,
+		STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
+		new_endoff == RIGHT.br_startoff &&
+		new->br_startblock + new->br_blockcount ==
+		    RIGHT.br_startblock &&
+		new->br_state == RIGHT.br_state &&
+		new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+		((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
+		  MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
+		 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+		     <= MAXEXTLEN));
+	error = 0;
+	/*
+	 * Switch out based on the FILLING and CONTIG state bits.
+	 */
+	switch (SWITCH_STATE) {
+
+	case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+		/*
+		 * Filling in all of a previously delayed allocation extent.
+		 * The left and right neighbors are both contiguous with new.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + PREV.br_blockcount +
+			RIGHT.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		ip->i_d.di_nextents--;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+					RIGHT.br_startblock,
+					RIGHT.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_delete(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+					LEFT.br_startblock,
+					LEFT.br_blockcount +
+					PREV.br_blockcount +
+					RIGHT.br_blockcount, LEFT.br_state)))
+				goto done;
+		}
+		*dnew = 0;
+		break;
+
+	case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+		/*
+		 * Filling in all of a previously delayed allocation extent.
+		 * The left neighbor is contiguous, the right is not.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + PREV.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
+					LEFT.br_startblock, LEFT.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+					LEFT.br_startblock,
+					LEFT.br_blockcount +
+					PREV.br_blockcount, LEFT.br_state)))
+				goto done;
+		}
+		*dnew = 0;
+		break;
+
+	case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+		/*
+		 * Filling in all of a previously delayed allocation extent.
+		 * The right neighbor is contiguous, the left is not.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_startblock(ep, new->br_startblock);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount + RIGHT.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+					RIGHT.br_startblock,
+					RIGHT.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+					new->br_startblock,
+					PREV.br_blockcount +
+					RIGHT.br_blockcount, PREV.br_state)))
+				goto done;
+		}
+		*dnew = 0;
+		break;
+
+	case MASK2(LEFT_FILLING, RIGHT_FILLING):
+		/*
+		 * Filling in all of a previously delayed allocation extent.
+		 * Neither the left nor right neighbors are contiguous with
+		 * the new one.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_startblock(ep, new->br_startblock);
+		xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 0);
+			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		*dnew = 0;
+		break;
+
+	case MASK2(LEFT_FILLING, LEFT_CONTIG):
+		/*
+		 * Filling in the first part of a previous delayed allocation.
+		 * The left neighbor is contiguous.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + new->br_blockcount);
+		xfs_bmbt_set_startoff(ep,
+			PREV.br_startoff + new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		temp = PREV.br_blockcount - new->br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep, temp);
+		ip->i_df.if_lastex = idx - 1;
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
+					LEFT.br_startblock, LEFT.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+					LEFT.br_startblock,
+					LEFT.br_blockcount +
+					new->br_blockcount,
+					LEFT.br_state)))
+				goto done;
+		}
+		temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+			STARTBLOCKVAL(PREV.br_startblock));
+		xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+		xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
+			XFS_DATA_FORK);
+		*dnew = temp;
+		break;
+
+	case MASK(LEFT_FILLING):
+		/*
+		 * Filling in the first part of a previous delayed allocation.
+		 * The left neighbor is not contiguous.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
+		xfs_bmbt_set_startoff(ep, new_endoff);
+		temp = PREV.br_blockcount - new->br_blockcount;
+		xfs_bmbt_set_blockcount(ep, temp);
+		xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
+			XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 0);
+			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+		    ip->i_d.di_nextents > ip->i_df.if_ext_max) {
+			error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+					first, flist, &cur, 1, &tmp_rval,
+					XFS_DATA_FORK);
+			rval |= tmp_rval;
+			if (error)
+				goto done;
+		}
+		temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+			STARTBLOCKVAL(PREV.br_startblock) -
+			(cur ? cur->bc_private.b.allocated : 0));
+		base = ip->i_df.if_u1.if_extents;
+		ep = &base[idx + 1];
+		xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+		xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
+			XFS_DATA_FORK);
+		*dnew = temp;
+		break;
+
+	case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+		/*
+		 * Filling in the last part of a previous delayed allocation.
+		 * The right neighbor is contiguous with the new allocation.
+		 */
+		temp = PREV.br_blockcount - new->br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep, temp);
+		xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
+			new->br_blockcount + RIGHT.br_blockcount,
+			RIGHT.br_state);
+		xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+					RIGHT.br_startblock,
+					RIGHT.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, new->br_startoff,
+					new->br_startblock,
+					new->br_blockcount +
+					RIGHT.br_blockcount,
+					RIGHT.br_state)))
+				goto done;
+		}
+		temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+			STARTBLOCKVAL(PREV.br_startblock));
+		xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+		xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		*dnew = temp;
+		break;
+
+	case MASK(RIGHT_FILLING):
+		/*
+		 * Filling in the last part of a previous delayed allocation.
+		 * The right neighbor is not contiguous.
+		 */
+		temp = PREV.br_blockcount - new->br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep, temp);
+		xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
+			new, NULL, XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 0);
+			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+		    ip->i_d.di_nextents > ip->i_df.if_ext_max) {
+			error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+				first, flist, &cur, 1, &tmp_rval,
+				XFS_DATA_FORK);
+			rval |= tmp_rval;
+			if (error)
+				goto done;
+		}
+		temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+			STARTBLOCKVAL(PREV.br_startblock) -
+			(cur ? cur->bc_private.b.allocated : 0));
+		base = ip->i_df.if_u1.if_extents;
+		ep = &base[idx];
+		xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+		xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
+		*dnew = temp;
+		break;
+
+	case 0:
+		/*
+		 * Filling in the middle part of a previous delayed allocation.
+		 * Contiguity is impossible here.
+		 * This case is avoided almost all the time.
+		 */
+		temp = new->br_startoff - PREV.br_startoff;
+		xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep, temp);
+		r[0] = *new;
+		r[1].br_startoff = new_endoff;
+		temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
+		r[1].br_blockcount = temp2;
+		xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
+			XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 0);
+			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+		    ip->i_d.di_nextents > ip->i_df.if_ext_max) {
+			error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+					first, flist, &cur, 1, &tmp_rval,
+					XFS_DATA_FORK);
+			rval |= tmp_rval;
+			if (error)
+				goto done;
+		}
+		temp = xfs_bmap_worst_indlen(ip, temp);
+		temp2 = xfs_bmap_worst_indlen(ip, temp2);
+		diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
+			(cur ? cur->bc_private.b.allocated : 0));
+		if (diff > 0 &&
+		    xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) {
+			/*
+			 * Ick gross gag me with a spoon.
+			 */
+			ASSERT(0);	/* want to see if this ever happens! */
+			while (diff > 0) {
+				if (temp) {
+					temp--;
+					diff--;
+					if (!diff ||
+					    !xfs_mod_incore_sb(ip->i_mount,
+						    XFS_SBS_FDBLOCKS, -diff, rsvd))
+						break;
+				}
+				if (temp2) {
+					temp2--;
+					diff--;
+					if (!diff ||
+					    !xfs_mod_incore_sb(ip->i_mount,
+						    XFS_SBS_FDBLOCKS, -diff, rsvd))
+						break;
+				}
+			}
+		}
+		base = ip->i_df.if_u1.if_extents;
+		ep = &base[idx];
+		xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+		xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
+		xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2));
+		xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
+			XFS_DATA_FORK);
+		*dnew = temp + temp2;
+		break;
+
+	case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK2(LEFT_FILLING, RIGHT_CONTIG):
+	case MASK2(RIGHT_FILLING, LEFT_CONTIG):
+	case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK(LEFT_CONTIG):
+	case MASK(RIGHT_CONTIG):
+		/*
+		 * These cases are all impossible.
+		 */
+		ASSERT(0);
+	}
+	*curp = cur;
+done:
+	*logflagsp = rval;
+	return error;
+#undef	LEFT
+#undef	RIGHT
+#undef	PREV
+#undef	MASK
+#undef	MASK2
+#undef	MASK3
+#undef	MASK4
+#undef	STATE_SET
+#undef	STATE_TEST
+#undef	STATE_SET_TEST
+#undef	SWITCH_STATE
+}
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting an unwritten
+ * allocation to a real allocation or vice versa.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_unwritten_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp) /* inode logging flags */
+{
+	xfs_bmbt_rec_t		*base;	/* base of extent entry list */
+	xfs_btree_cur_t		*cur;	/* btree cursor */
+	xfs_bmbt_rec_t		*ep;	/* extent entry for idx */
+	int			error;	/* error return value */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_add_extent_unwritten_real";
+#endif
+	int			i;	/* temp state */
+	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
+	xfs_exntst_t		newext;	/* new extent state */
+	xfs_exntst_t		oldext;	/* old extent state */
+	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
+					/* left is 0, right is 1, prev is 2 */
+	int			rval=0;	/* return value (logging flags) */
+	int			state = 0;/* state bits, accessed thru macros */
+	enum {				/* bit number definitions for state */
+		LEFT_CONTIG,	RIGHT_CONTIG,
+		LEFT_FILLING,	RIGHT_FILLING,
+		LEFT_DELAY,	RIGHT_DELAY,
+		LEFT_VALID,	RIGHT_VALID
+	};
+
+#define	LEFT		r[0]
+#define	RIGHT		r[1]
+#define	PREV		r[2]
+#define	MASK(b)		(1 << (b))
+#define	MASK2(a,b)	(MASK(a) | MASK(b))
+#define	MASK3(a,b,c)	(MASK2(a,b) | MASK(c))
+#define	MASK4(a,b,c,d)	(MASK3(a,b,c) | MASK(d))
+#define	STATE_SET(b,v)	((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
+#define	STATE_TEST(b)	(state & MASK(b))
+#define	STATE_SET_TEST(b,v)	((v) ? ((state |= MASK(b)), 1) : \
+				       ((state &= ~MASK(b)), 0))
+#define	SWITCH_STATE		\
+	(state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
+
+	/*
+	 * Set up a bunch of variables to make the tests simpler.
+	 */
+	error = 0;
+	cur = *curp;
+	base = ip->i_df.if_u1.if_extents;
+	ep = &base[idx];
+	xfs_bmbt_get_all(ep, &PREV);
+	newext = new->br_state;
+	oldext = (newext == XFS_EXT_UNWRITTEN) ?
+		XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
+	ASSERT(PREV.br_state == oldext);
+	new_endoff = new->br_startoff + new->br_blockcount;
+	ASSERT(PREV.br_startoff <= new->br_startoff);
+	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+	/*
+	 * Set flags determining what part of the previous oldext allocation
+	 * extent is being replaced by a newext allocation.
+	 */
+	STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
+	STATE_SET(RIGHT_FILLING,
+		PREV.br_startoff + PREV.br_blockcount == new_endoff);
+	/*
+	 * Check and set flags if this segment has a left neighbor.
+	 * Don't set contiguous if the combined extent would be too large.
+	 */
+	if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+		xfs_bmbt_get_all(ep - 1, &LEFT);
+		STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+	}
+	STATE_SET(LEFT_CONTIG,
+		STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
+		LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+		LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+		LEFT.br_state == newext &&
+		LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+	/*
+	 * Check and set flags if this segment has a right neighbor.
+	 * Don't set contiguous if the combined extent would be too large.
+	 * Also check for all-three-contiguous being too large.
+	 */
+	if (STATE_SET_TEST(RIGHT_VALID,
+			idx <
+			ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+		xfs_bmbt_get_all(ep + 1, &RIGHT);
+		STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+	}
+	STATE_SET(RIGHT_CONTIG,
+		STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
+		new_endoff == RIGHT.br_startoff &&
+		new->br_startblock + new->br_blockcount ==
+		    RIGHT.br_startblock &&
+		newext == RIGHT.br_state &&
+		new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+		((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
+		  MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
+		 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+		     <= MAXEXTLEN));
+	/*
+	 * Switch out based on the FILLING and CONTIG state bits.
+	 */
+	switch (SWITCH_STATE) {
+
+	case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The left and right neighbors are both contiguous with new.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + PREV.br_blockcount +
+			RIGHT.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		ip->i_d.di_nextents -= 2;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+					RIGHT.br_startblock,
+					RIGHT.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_delete(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_delete(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+				LEFT.br_startblock,
+				LEFT.br_blockcount + PREV.br_blockcount +
+				RIGHT.br_blockcount, LEFT.br_state)))
+				goto done;
+		}
+		break;
+
+	case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The left neighbor is contiguous, the right is not.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + PREV.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
+		ip->i_d.di_nextents--;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock, PREV.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_delete(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+				LEFT.br_startblock,
+				LEFT.br_blockcount + PREV.br_blockcount,
+				LEFT.br_state)))
+				goto done;
+		}
+		break;
+
+	case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The right neighbor is contiguous, the left is not.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount + RIGHT.br_blockcount);
+		xfs_bmbt_set_state(ep, newext);
+		xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
+		ip->i_d.di_nextents--;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+					RIGHT.br_startblock,
+					RIGHT.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_delete(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, new->br_startoff,
+				new->br_startblock,
+				new->br_blockcount + RIGHT.br_blockcount,
+				newext)))
+				goto done;
+		}
+		break;
+
+	case MASK2(LEFT_FILLING, RIGHT_FILLING):
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * Neither the left nor right neighbors are contiguous with
+		 * the new one.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_state(ep, newext);
+		xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, new->br_startoff,
+				new->br_startblock, new->br_blockcount,
+				newext)))
+				goto done;
+		}
+		break;
+
+	case MASK2(LEFT_FILLING, LEFT_CONTIG):
+		/*
+		 * Setting the first part of a previous oldext extent to newext.
+		 * The left neighbor is contiguous.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1,
+			LEFT.br_blockcount + new->br_blockcount);
+		xfs_bmbt_set_startoff(ep,
+			PREV.br_startoff + new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_startblock(ep,
+			new->br_startblock + new->br_blockcount);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount - new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock, PREV.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur,
+				PREV.br_startoff + new->br_blockcount,
+				PREV.br_startblock + new->br_blockcount,
+				PREV.br_blockcount - new->br_blockcount,
+				oldext)))
+				goto done;
+			if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+				goto done;
+			if (xfs_bmbt_update(cur, LEFT.br_startoff,
+				LEFT.br_startblock,
+				LEFT.br_blockcount + new->br_blockcount,
+				LEFT.br_state))
+				goto done;
+		}
+		break;
+
+	case MASK(LEFT_FILLING):
+		/*
+		 * Setting the first part of a previous oldext extent to newext.
+		 * The left neighbor is not contiguous.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
+		ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
+		xfs_bmbt_set_startoff(ep, new_endoff);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount - new->br_blockcount);
+		xfs_bmbt_set_startblock(ep,
+			new->br_startblock + new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK);
+		xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
+			XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock, PREV.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur,
+				PREV.br_startoff + new->br_blockcount,
+				PREV.br_startblock + new->br_blockcount,
+				PREV.br_blockcount - new->br_blockcount,
+				oldext)))
+				goto done;
+			cur->bc_rec.b = *new;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		break;
+
+	case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+		/*
+		 * Setting the last part of a previous oldext extent to newext.
+		 * The right neighbor is contiguous with the new allocation.
+		 */
+		xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount - new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
+			new->br_blockcount + RIGHT.br_blockcount, newext);
+		xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		if (cur == NULL)
+			rval = XFS_ILOG_DEXT;
+		else {
+			rval = 0;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock,
+					PREV.br_blockcount, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+				PREV.br_startblock,
+				PREV.br_blockcount - new->br_blockcount,
+				oldext)))
+				goto done;
+			if ((error = xfs_bmbt_increment(cur, 0, &i)))
+				goto done;
+			if ((error = xfs_bmbt_update(cur, new->br_startoff,
+				new->br_startblock,
+				new->br_blockcount + RIGHT.br_blockcount,
+				newext)))
+				goto done;
+		}
+		break;
+
+	case MASK(RIGHT_FILLING):
+		/*
+		 * Setting the last part of a previous oldext extent to newext.
+		 * The right neighbor is not contiguous.
+		 */
+		xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep,
+			PREV.br_blockcount - new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
+		xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
+			new, NULL, XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		ip->i_d.di_nextents++;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock, PREV.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+				PREV.br_startblock,
+				PREV.br_blockcount - new->br_blockcount,
+				oldext)))
+				goto done;
+			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+					new->br_startblock, new->br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 0);
+			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		break;
+
+	case 0:
+		/*
+		 * Setting the middle part of a previous oldext extent to
+		 * newext.  Contiguity is impossible here.
+		 * One extent becomes three extents.
+		 */
+		xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep,
+			new->br_startoff - PREV.br_startoff);
+		xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
+		r[0] = *new;
+		r[1].br_startoff = new_endoff;
+		r[1].br_blockcount =
+			PREV.br_startoff + PREV.br_blockcount - new_endoff;
+		r[1].br_startblock = new->br_startblock + new->br_blockcount;
+		r[1].br_state = oldext;
+		xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
+			XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx + 1;
+		ip->i_d.di_nextents += 2;
+		if (cur == NULL)
+			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
+		else {
+			rval = XFS_ILOG_CORE;
+			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
+					PREV.br_startblock, PREV.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+			/* new right extent - oldext */
+			if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
+				r[1].br_startblock, r[1].br_blockcount,
+				r[1].br_state)))
+				goto done;
+			/* new left extent - oldext */
+			PREV.br_blockcount =
+				new->br_startoff - PREV.br_startoff;
+			cur->bc_rec.b = PREV;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+			if ((error = xfs_bmbt_increment(cur, 0, &i)))
+				goto done;
+			ASSERT(i == 1);
+			/* new middle extent - newext */
+			cur->bc_rec.b = *new;
+			if ((error = xfs_bmbt_insert(cur, &i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		break;
+
+	case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK2(LEFT_FILLING, RIGHT_CONTIG):
+	case MASK2(RIGHT_FILLING, LEFT_CONTIG):
+	case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+	case MASK(LEFT_CONTIG):
+	case MASK(RIGHT_CONTIG):
+		/*
+		 * These cases are all impossible.
+		 */
+		ASSERT(0);
+	}
+	*curp = cur;
+done:
+	*logflagsp = rval;
+	return error;
+#undef	LEFT
+#undef	RIGHT
+#undef	PREV
+#undef	MASK
+#undef	MASK2
+#undef	MASK3
+#undef	MASK4
+#undef	STATE_SET
+#undef	STATE_TEST
+#undef	STATE_SET_TEST
+#undef	SWITCH_STATE
+}
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a hole
+ * to a delayed allocation.
+ */
+/*ARGSUSED*/
+STATIC int				/* error */
+xfs_bmap_add_extent_hole_delay(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp, /* inode logging flags */
+	int			rsvd)		/* OK to allocate reserved blocks */
+{
+	xfs_bmbt_rec_t		*base;	/* base of extent entry list */
+	xfs_bmbt_rec_t		*ep;	/* extent list entry for idx */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_add_extent_hole_delay";
+#endif
+	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
+	xfs_filblks_t		newlen=0;	/* new indirect size */
+	xfs_filblks_t		oldlen=0;	/* old indirect size */
+	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
+	int			state;  /* state bits, accessed thru macros */
+	xfs_filblks_t		temp;	/* temp for indirect calculations */
+	enum {				/* bit number definitions for state */
+		LEFT_CONTIG,	RIGHT_CONTIG,
+		LEFT_DELAY,	RIGHT_DELAY,
+		LEFT_VALID,	RIGHT_VALID
+	};
+
+#define	MASK(b)			(1 << (b))
+#define	MASK2(a,b)		(MASK(a) | MASK(b))
+#define	STATE_SET(b,v)		((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
+#define	STATE_TEST(b)		(state & MASK(b))
+#define	STATE_SET_TEST(b,v)	((v) ? ((state |= MASK(b)), 1) : \
+				       ((state &= ~MASK(b)), 0))
+#define	SWITCH_STATE		(state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
+
+	base = ip->i_df.if_u1.if_extents;
+	ep = &base[idx];
+	state = 0;
+	ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
+	/*
+	 * Check and set flags if this segment has a left neighbor
+	 */
+	if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+		xfs_bmbt_get_all(ep - 1, &left);
+		STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+	}
+	/*
+	 * Check and set flags if the current (right) segment exists.
+	 * If it doesn't exist, we're converting the hole at end-of-file.
+	 */
+	if (STATE_SET_TEST(RIGHT_VALID,
+			   idx <
+			   ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+		xfs_bmbt_get_all(ep, &right);
+		STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+	}
+	/*
+	 * Set contiguity flags on the left and right neighbors.
+	 * Don't let extents get too large, even if the pieces are contiguous.
+	 */
+	STATE_SET(LEFT_CONTIG,
+		STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
+		left.br_startoff + left.br_blockcount == new->br_startoff &&
+		left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+	STATE_SET(RIGHT_CONTIG,
+		STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
+		new->br_startoff + new->br_blockcount == right.br_startoff &&
+		new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+		(!STATE_TEST(LEFT_CONTIG) ||
+		 (left.br_blockcount + new->br_blockcount +
+		     right.br_blockcount <= MAXEXTLEN)));
+	/*
+	 * Switch out based on the contiguity flags.
+	 */
+	switch (SWITCH_STATE) {
+
+	case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+		/*
+		 * New allocation is contiguous with delayed allocations
+		 * on the left and on the right.
+		 * Merge all three into a single extent list entry.
+		 */
+		temp = left.br_blockcount + new->br_blockcount +
+			right.br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1, temp);
+		oldlen = STARTBLOCKVAL(left.br_startblock) +
+			STARTBLOCKVAL(new->br_startblock) +
+			STARTBLOCKVAL(right.br_startblock);
+		newlen = xfs_bmap_worst_indlen(ip, temp);
+		xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
+		xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
+			XFS_DATA_FORK);
+		xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		break;
+
+	case MASK(LEFT_CONTIG):
+		/*
+		 * New allocation is contiguous with a delayed allocation
+		 * on the left.
+		 * Merge the new allocation with the left neighbor.
+		 */
+		temp = left.br_blockcount + new->br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		xfs_bmbt_set_blockcount(ep - 1, temp);
+		oldlen = STARTBLOCKVAL(left.br_startblock) +
+			STARTBLOCKVAL(new->br_startblock);
+		newlen = xfs_bmap_worst_indlen(ip, temp);
+		xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
+		xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
+			XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx - 1;
+		break;
+
+	case MASK(RIGHT_CONTIG):
+		/*
+		 * New allocation is contiguous with a delayed allocation
+		 * on the right.
+		 * Merge the new allocation with the right neighbor.
+		 */
+		xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK);
+		temp = new->br_blockcount + right.br_blockcount;
+		oldlen = STARTBLOCKVAL(new->br_startblock) +
+			STARTBLOCKVAL(right.br_startblock);
+		newlen = xfs_bmap_worst_indlen(ip, temp);
+		xfs_bmbt_set_allf(ep, new->br_startoff,
+			NULLSTARTBLOCK((int)newlen), temp, right.br_state);
+		xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		break;
+
+	case 0:
+		/*
+		 * New allocation is not contiguous with another
+		 * delayed allocation.
+		 * Insert a new entry.
+		 */
+		oldlen = newlen = 0;
+		xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
+			XFS_DATA_FORK);
+		xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
+		ip->i_df.if_lastex = idx;
+		break;
+	}
+	if (oldlen != newlen) {
+		ASSERT(oldlen > newlen);
+		xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
+			(int)(oldlen - newlen), rsvd);
+		/*
+		 * Nothing to do for disk quota accounting here.
+		 */
+	}
+	*logflagsp = 0;
+	return 0;
+#undef	MASK
+#undef	MASK2
+#undef	STATE_SET
+#undef	STATE_TEST
+#undef	STATE_SET_TEST
+#undef	SWITCH_STATE
+}
+
+/*
+ * Called by xfs_bmap_add_extent to handle cases converting a hole
+ * to a real allocation.
+ */
+STATIC int				/* error */
+xfs_bmap_add_extent_hole_real(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_extnum_t		idx,	/* extent number to update/insert */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*new,	/* new data to put in extent list */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork) /* data or attr fork */
+{
+	xfs_bmbt_rec_t		*ep;	/* pointer to extent entry ins. point */
+	int			error;	/* error return value */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_add_extent_hole_real";
+#endif
+	int			i;	/* temp state */
+	xfs_ifork_t		*ifp;	/* inode fork pointer */
+	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
+	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
+	int			state;	/* state bits, accessed thru macros */
+	enum {				/* bit number definitions for state */
+		LEFT_CONTIG,	RIGHT_CONTIG,
+		LEFT_DELAY,	RIGHT_DELAY,
+		LEFT_VALID,	RIGHT_VALID
+	};
+
+#define	MASK(b)			(1 << (b))
+#define	MASK2(a,b)		(MASK(a) | MASK(b))
+#define	STATE_SET(b,v)		((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
+#define	STATE_TEST(b)		(state & MASK(b))
+#define	STATE_SET_TEST(b,v)	((v) ? ((state |= MASK(b)), 1) : \
+				       ((state &= ~MASK(b)), 0))
+#define	SWITCH_STATE		(state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
+	ep = &ifp->if_u1.if_extents[idx];
+	state = 0;
+	/*
+	 * Check and set flags if this segment has a left neighbor.
+	 */
+	if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+		xfs_bmbt_get_all(ep - 1, &left);
+		STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+	}
+	/*
+	 * Check and set flags if this segment has a current value.
+	 * Not true if we're inserting into the "hole" at eof.
+	 */
+	if (STATE_SET_TEST(RIGHT_VALID,
+			   idx <
+			   ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+		xfs_bmbt_get_all(ep, &right);
+		STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+	}
+	/*
+	 * We're inserting a real allocation between "left" and "right".
+	 * Set the contiguity flags.  Don't let extents get too large.
+	 */
+	STATE_SET(LEFT_CONTIG,
+		STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
+		left.br_startoff + left.br_blockcount == new->br_startoff &&
+		left.br_startblock + left.br_blockcount == new->br_startblock &&
+		left.br_state == new->br_state &&
+		left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+	STATE_SET(RIGHT_CONTIG,
+		STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
+		new->br_startoff + new->br_blockcount == right.br_startoff &&
+		new->br_startblock + new->br_blockcount ==
+		    right.br_startblock &&
+		new->br_state == right.br_state &&
+		new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+		(!STATE_TEST(LEFT_CONTIG) ||
+		 left.br_blockcount + new->br_blockcount +
+		     right.br_blockcount <= MAXEXTLEN));
+
+	/*
+	 * Select which case we're in here, and implement it.
+	 */
+	switch (SWITCH_STATE) {
+
+	case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+		/*
+		 * New allocation is contiguous with real allocations on the
+		 * left and on the right.
+		 * Merge all three into a single extent list entry.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
+			whichfork);
+		xfs_bmbt_set_blockcount(ep - 1,
+			left.br_blockcount + new->br_blockcount +
+			right.br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
+			whichfork);
+		xfs_bmap_trace_delete(fname, "LC|RC", ip,
+			idx, 1, whichfork);
+		xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
+		ifp->if_lastex = idx - 1;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+			XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+		if (cur == NULL) {
+			*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+			return 0;
+		}
+		*logflagsp = XFS_ILOG_CORE;
+		if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
+				right.br_startblock, right.br_blockcount, &i)))
+			return error;
+		ASSERT(i == 1);
+		if ((error = xfs_bmbt_delete(cur, &i)))
+			return error;
+		ASSERT(i == 1);
+		if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+			return error;
+		ASSERT(i == 1);
+		error = xfs_bmbt_update(cur, left.br_startoff,
+				left.br_startblock,
+				left.br_blockcount + new->br_blockcount +
+				right.br_blockcount, left.br_state);
+		return error;
+
+	case MASK(LEFT_CONTIG):
+		/*
+		 * New allocation is contiguous with a real allocation
+		 * on the left.
+		 * Merge the new allocation with the left neighbor.
+		 */
+		xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork);
+		xfs_bmbt_set_blockcount(ep - 1,
+			left.br_blockcount + new->br_blockcount);
+		xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
+		ifp->if_lastex = idx - 1;
+		if (cur == NULL) {
+			*logflagsp = XFS_ILOG_FEXT(whichfork);
+			return 0;
+		}
+		*logflagsp = 0;
+		if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
+				left.br_startblock, left.br_blockcount, &i)))
+			return error;
+		ASSERT(i == 1);
+		error = xfs_bmbt_update(cur, left.br_startoff,
+				left.br_startblock,
+				left.br_blockcount + new->br_blockcount,
+				left.br_state);
+		return error;
+
+	case MASK(RIGHT_CONTIG):
+		/*
+		 * New allocation is contiguous with a real allocation
+		 * on the right.
+		 * Merge the new allocation with the right neighbor.
+		 */
+		xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork);
+		xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
+			new->br_blockcount + right.br_blockcount,
+			right.br_state);
+		xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork);
+		ifp->if_lastex = idx;
+		if (cur == NULL) {
+			*logflagsp = XFS_ILOG_FEXT(whichfork);
+			return 0;
+		}
+		*logflagsp = 0;
+		if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
+				right.br_startblock, right.br_blockcount, &i)))
+			return error;
+		ASSERT(i == 1);
+		error = xfs_bmbt_update(cur, new->br_startoff,
+				new->br_startblock,
+				new->br_blockcount + right.br_blockcount,
+				right.br_state);
+		return error;
+
+	case 0:
+		/*
+		 * New allocation is not contiguous with another
+		 * real allocation.
+		 * Insert a new entry.
+		 */
+		xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
+			whichfork);
+		xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork);
+		ifp->if_lastex = idx;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+			XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+		if (cur == NULL) {
+			*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+			return 0;
+		}
+		*logflagsp = XFS_ILOG_CORE;
+		if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+				new->br_startblock, new->br_blockcount, &i)))
+			return error;
+		ASSERT(i == 0);
+		cur->bc_rec.b.br_state = new->br_state;
+		if ((error = xfs_bmbt_insert(cur, &i)))
+			return error;
+		ASSERT(i == 1);
+		return 0;
+	}
+#undef	MASK
+#undef	MASK2
+#undef	STATE_SET
+#undef	STATE_TEST
+#undef	STATE_SET_TEST
+#undef	SWITCH_STATE
+	/* NOTREACHED */
+	ASSERT(0);
+	return 0; /* keep gcc quite */
+}
+
+#define XFS_ALLOC_GAP_UNITS	4
+
+/*
+ * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
+ * It figures out where to ask the underlying allocator to put the new extent.
+ */
+STATIC int				/* error */
+xfs_bmap_alloc(
+	xfs_bmalloca_t	*ap)		/* bmap alloc argument struct */
+{
+	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
+	xfs_alloctype_t	atype=0;	/* type for allocation routines */
+	int		error;		/* error return value */
+	xfs_agnumber_t	fb_agno;	/* ag number of ap->firstblock */
+	xfs_mount_t	*mp;		/* mount point structure */
+	int		nullfb;		/* true if ap->firstblock isn't set */
+	int		rt;		/* true if inode is realtime */
+#ifdef __KERNEL__
+	xfs_extlen_t	prod=0;		/* product factor for allocators */
+	xfs_extlen_t	ralen=0;	/* realtime allocation length */
+#endif
+
+#define	ISVALID(x,y)	\
+	(rt ? \
+		(x) < mp->m_sb.sb_rblocks : \
+		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
+		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
+		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
+
+	/*
+	 * Set up variables.
+	 */
+	mp = ap->ip->i_mount;
+	nullfb = ap->firstblock == NULLFSBLOCK;
+	rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
+	fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
+#ifdef __KERNEL__
+	if (rt) {
+		xfs_extlen_t	extsz;		/* file extent size for rt */
+		xfs_fileoff_t	nexto;		/* next file offset */
+		xfs_extlen_t	orig_alen;	/* original ap->alen */
+		xfs_fileoff_t	orig_end;	/* original off+len */
+		xfs_fileoff_t	orig_off;	/* original ap->off */
+		xfs_extlen_t	mod_off;	/* modulus calculations */
+		xfs_fileoff_t	prevo;		/* previous file offset */
+		xfs_rtblock_t	rtx;		/* realtime extent number */
+		xfs_extlen_t	temp;		/* temp for rt calculations */
+
+		/*
+		 * Set prod to match the realtime extent size.
+		 */
+		if (!(extsz = ap->ip->i_d.di_extsize))
+			extsz = mp->m_sb.sb_rextsize;
+		prod = extsz / mp->m_sb.sb_rextsize;
+		orig_off = ap->off;
+		orig_alen = ap->alen;
+		orig_end = orig_off + orig_alen;
+		/*
+		 * If the file offset is unaligned vs. the extent size
+		 * we need to align it.  This will be possible unless
+		 * the file was previously written with a kernel that didn't
+		 * perform this alignment.
+		 */
+		mod_off = do_mod(orig_off, extsz);
+		if (mod_off) {
+			ap->alen += mod_off;
+			ap->off -= mod_off;
+		}
+		/*
+		 * Same adjustment for the end of the requested area.
+		 */
+		if ((temp = (ap->alen % extsz)))
+			ap->alen += extsz - temp;
+		/*
+		 * If the previous block overlaps with this proposed allocation
+		 * then move the start forward without adjusting the length.
+		 */
+		prevo =
+			ap->prevp->br_startoff == NULLFILEOFF ?
+				0 :
+				(ap->prevp->br_startoff +
+				 ap->prevp->br_blockcount);
+		if (ap->off != orig_off && ap->off < prevo)
+			ap->off = prevo;
+		/*
+		 * If the next block overlaps with this proposed allocation
+		 * then move the start back without adjusting the length,
+		 * but not before offset 0.
+		 * This may of course make the start overlap previous block,
+		 * and if we hit the offset 0 limit then the next block
+		 * can still overlap too.
+		 */
+		nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
+			NULLFILEOFF : ap->gotp->br_startoff;
+		if (!ap->eof &&
+		    ap->off + ap->alen != orig_end &&
+		    ap->off + ap->alen > nexto)
+			ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
+		/*
+		 * If we're now overlapping the next or previous extent that
+		 * means we can't fit an extsz piece in this hole.  Just move
+		 * the start forward to the first valid spot and set
+		 * the length so we hit the end.
+		 */
+		if ((ap->off != orig_off && ap->off < prevo) ||
+		    (ap->off + ap->alen != orig_end &&
+		     ap->off + ap->alen > nexto)) {
+			ap->off = prevo;
+			ap->alen = nexto - prevo;
+		}
+		/*
+		 * If the result isn't a multiple of rtextents we need to
+		 * remove blocks until it is.
+		 */
+		if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
+			/*
+			 * We're not covering the original request, or
+			 * we won't be able to once we fix the length.
+			 */
+			if (orig_off < ap->off ||
+			    orig_end > ap->off + ap->alen ||
+			    ap->alen - temp < orig_alen)
+				return XFS_ERROR(EINVAL);
+			/*
+			 * Try to fix it by moving the start up.
+			 */
+			if (ap->off + temp <= orig_off) {
+				ap->alen -= temp;
+				ap->off += temp;
+			}
+			/*
+			 * Try to fix it by moving the end in.
+			 */
+			else if (ap->off + ap->alen - temp >= orig_end)
+				ap->alen -= temp;
+			/*
+			 * Set the start to the minimum then trim the length.
+			 */
+			else {
+				ap->alen -= orig_off - ap->off;
+				ap->off = orig_off;
+				ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
+			}
+			/*
+			 * Result doesn't cover the request, fail it.
+			 */
+			if (orig_off < ap->off || orig_end > ap->off + ap->alen)
+				return XFS_ERROR(EINVAL);
+		}
+		ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
+		/*
+		 * If the offset & length are not perfectly aligned
+		 * then kill prod, it will just get us in trouble.
+		 */
+		if (do_mod(ap->off, extsz) || ap->alen % extsz)
+			prod = 1;
+		/*
+		 * Set ralen to be the actual requested length in rtextents.
+		 */
+		ralen = ap->alen / mp->m_sb.sb_rextsize;
+		/*
+		 * If the old value was close enough to MAXEXTLEN that
+		 * we rounded up to it, cut it back so it's valid again.
+		 * Note that if it's a really large request (bigger than
+		 * MAXEXTLEN), we don't hear about that number, and can't
+		 * adjust the starting point to match it.
+		 */
+		if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
+			ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
+		/*
+		 * If it's an allocation to an empty file at offset 0,
+		 * pick an extent that will space things out in the rt area.
+		 */
+		if (ap->eof && ap->off == 0) {
+			error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
+			if (error)
+				return error;
+			ap->rval = rtx * mp->m_sb.sb_rextsize;
+		} else
+			ap->rval = 0;
+	}
+#else
+	if (rt)
+		ap->rval = 0;
+#endif	/* __KERNEL__ */
+	else if (nullfb)
+		ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
+	else
+		ap->rval = ap->firstblock;
+	/*
+	 * If allocating at eof, and there's a previous real block,
+	 * try to use it's last block as our starting point.
+	 */
+	if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
+	    !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
+	    ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
+		    ap->prevp->br_startblock)) {
+		ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
+		/*
+		 * Adjust for the gap between prevp and us.
+		 */
+		adjust = ap->off -
+			(ap->prevp->br_startoff + ap->prevp->br_blockcount);
+		if (adjust &&
+		    ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
+			ap->rval += adjust;
+	}
+	/*
+	 * If not at eof, then compare the two neighbor blocks.
+	 * Figure out whether either one gives us a good starting point,
+	 * and pick the better one.
+	 */
+	else if (!ap->eof) {
+		xfs_fsblock_t	gotbno;		/* right side block number */
+		xfs_fsblock_t	gotdiff=0;	/* right side difference */
+		xfs_fsblock_t	prevbno;	/* left side block number */
+		xfs_fsblock_t	prevdiff=0;	/* left side difference */
+
+		/*
+		 * If there's a previous (left) block, select a requested
+		 * start block based on it.
+		 */
+		if (ap->prevp->br_startoff != NULLFILEOFF &&
+		    !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
+		    (prevbno = ap->prevp->br_startblock +
+			       ap->prevp->br_blockcount) &&
+		    ISVALID(prevbno, ap->prevp->br_startblock)) {
+			/*
+			 * Calculate gap to end of previous block.
+			 */
+			adjust = prevdiff = ap->off -
+				(ap->prevp->br_startoff +
+				 ap->prevp->br_blockcount);
+			/*
+			 * Figure the startblock based on the previous block's
+			 * end and the gap size.
+			 * Heuristic!
+			 * If the gap is large relative to the piece we're
+			 * allocating, or using it gives us an invalid block
+			 * number, then just use the end of the previous block.
+			 */
+			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
+			    ISVALID(prevbno + prevdiff,
+				    ap->prevp->br_startblock))
+				prevbno += adjust;
+			else
+				prevdiff += adjust;
+			/*
+			 * If the firstblock forbids it, can't use it,
+			 * must use default.
+			 */
+			if (!rt && !nullfb &&
+			    XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
+				prevbno = NULLFSBLOCK;
+		}
+		/*
+		 * No previous block or can't follow it, just default.
+		 */
+		else
+			prevbno = NULLFSBLOCK;
+		/*
+		 * If there's a following (right) block, select a requested
+		 * start block based on it.
+		 */
+		if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
+			/*
+			 * Calculate gap to start of next block.
+			 */
+			adjust = gotdiff = ap->gotp->br_startoff - ap->off;
+			/*
+			 * Figure the startblock based on the next block's
+			 * start and the gap size.
+			 */
+			gotbno = ap->gotp->br_startblock;
+			/*
+			 * Heuristic!
+			 * If the gap is large relative to the piece we're
+			 * allocating, or using it gives us an invalid block
+			 * number, then just use the start of the next block
+			 * offset by our length.
+			 */
+			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
+			    ISVALID(gotbno - gotdiff, gotbno))
+				gotbno -= adjust;
+			else if (ISVALID(gotbno - ap->alen, gotbno)) {
+				gotbno -= ap->alen;
+				gotdiff += adjust - ap->alen;
+			} else
+				gotdiff += adjust;
+			/*
+			 * If the firstblock forbids it, can't use it,
+			 * must use default.
+			 */
+			if (!rt && !nullfb &&
+			    XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
+				gotbno = NULLFSBLOCK;
+		}
+		/*
+		 * No next block, just default.
+		 */
+		else
+			gotbno = NULLFSBLOCK;
+		/*
+		 * If both valid, pick the better one, else the only good
+		 * one, else ap->rval is already set (to 0 or the inode block).
+		 */
+		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
+			ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
+		else if (prevbno != NULLFSBLOCK)
+			ap->rval = prevbno;
+		else if (gotbno != NULLFSBLOCK)
+			ap->rval = gotbno;
+	}
+	/*
+	 * If allowed, use ap->rval; otherwise must use firstblock since
+	 * it's in the right allocation group.
+	 */
+	if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
+		;
+	else
+		ap->rval = ap->firstblock;
+	/*
+	 * Realtime allocation, done through xfs_rtallocate_extent.
+	 */
+	if (rt) {
+#ifndef __KERNEL__
+		ASSERT(0);
+#else
+		xfs_rtblock_t	rtb;
+
+		atype = ap->rval == 0 ?
+			XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
+		do_div(ap->rval, mp->m_sb.sb_rextsize);
+		rtb = ap->rval;
+		ap->alen = ralen;
+		if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
+				&ralen, atype, ap->wasdel, prod, &rtb)))
+			return error;
+		if (rtb == NULLFSBLOCK && prod > 1 &&
+		    (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
+						   ap->alen, &ralen, atype,
+						   ap->wasdel, 1, &rtb)))
+			return error;
+		ap->rval = rtb;
+		if (ap->rval != NULLFSBLOCK) {
+			ap->rval *= mp->m_sb.sb_rextsize;
+			ralen *= mp->m_sb.sb_rextsize;
+			ap->alen = ralen;
+			ap->ip->i_d.di_nblocks += ralen;
+			xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+			if (ap->wasdel)
+				ap->ip->i_delayed_blks -= ralen;
+			/*
+			 * Adjust the disk quota also. This was reserved
+			 * earlier.
+			 */
+			XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
+				ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
+						XFS_TRANS_DQ_RTBCOUNT,
+				(long) ralen);
+		} else
+			ap->alen = 0;
+#endif	/* __KERNEL__ */
+	}
+	/*
+	 * Normal allocation, done through xfs_alloc_vextent.
+	 */
+	else {
+		xfs_agnumber_t	ag;
+		xfs_alloc_arg_t	args;
+		xfs_extlen_t	blen;
+		xfs_extlen_t	delta;
+		int		isaligned;
+		xfs_extlen_t	longest;
+		xfs_extlen_t	need;
+		xfs_extlen_t	nextminlen=0;
+		int		notinit;
+		xfs_perag_t	*pag;
+		xfs_agnumber_t	startag;
+		int		tryagain;
+
+		tryagain = isaligned = 0;
+		args.tp = ap->tp;
+		args.mp = mp;
+		args.fsbno = ap->rval;
+		args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
+		blen = 0;
+		if (nullfb) {
+			args.type = XFS_ALLOCTYPE_START_BNO;
+			args.total = ap->total;
+			/*
+			 * Find the longest available space.
+			 * We're going to try for the whole allocation at once.
+			 */
+			startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
+			notinit = 0;
+			down_read(&mp->m_peraglock);
+			while (blen < ap->alen) {
+				pag = &mp->m_perag[ag];
+				if (!pag->pagf_init &&
+				    (error = xfs_alloc_pagf_init(mp, args.tp,
+					    ag, XFS_ALLOC_FLAG_TRYLOCK))) {
+					up_read(&mp->m_peraglock);
+					return error;
+				}
+				/*
+				 * See xfs_alloc_fix_freelist...
+				 */
+				if (pag->pagf_init) {
+					need = XFS_MIN_FREELIST_PAG(pag, mp);
+					delta = need > pag->pagf_flcount ?
+						need - pag->pagf_flcount : 0;
+					longest = (pag->pagf_longest > delta) ?
+						(pag->pagf_longest - delta) :
+						(pag->pagf_flcount > 0 ||
+						 pag->pagf_longest > 0);
+					if (blen < longest)
+						blen = longest;
+				} else
+					notinit = 1;
+				if (++ag == mp->m_sb.sb_agcount)
+					ag = 0;
+				if (ag == startag)
+					break;
+			}
+			up_read(&mp->m_peraglock);
+			/*
+			 * Since the above loop did a BUF_TRYLOCK, it is
+			 * possible that there is space for this request.
+			 */
+			if (notinit || blen < ap->minlen)
+				args.minlen = ap->minlen;
+			/*
+			 * If the best seen length is less than the request
+			 * length, use the best as the minimum.
+			 */
+			else if (blen < ap->alen)
+				args.minlen = blen;
+			/*
+			 * Otherwise we've seen an extent as big as alen,
+			 * use that as the minimum.
+			 */
+			else
+				args.minlen = ap->alen;
+		} else if (ap->low) {
+			args.type = XFS_ALLOCTYPE_FIRST_AG;
+			args.total = args.minlen = ap->minlen;
+		} else {
+			args.type = XFS_ALLOCTYPE_NEAR_BNO;
+			args.total = ap->total;
+			args.minlen = ap->minlen;
+		}
+		if (ap->ip->i_d.di_extsize) {
+			args.prod = ap->ip->i_d.di_extsize;
+			if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
+				args.mod = (xfs_extlen_t)(args.prod - args.mod);
+		} else if (mp->m_sb.sb_blocksize >= NBPP) {
+			args.prod = 1;
+			args.mod = 0;
+		} else {
+			args.prod = NBPP >> mp->m_sb.sb_blocklog;
+			if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
+				args.mod = (xfs_extlen_t)(args.prod - args.mod);
+		}
+		/*
+		 * If we are not low on available data blocks, and the
+		 * underlying logical volume manager is a stripe, and
+		 * the file offset is zero then try to allocate data
+		 * blocks on stripe unit boundary.
+		 * NOTE: ap->aeof is only set if the allocation length
+		 * is >= the stripe unit and the allocation offset is
+		 * at the end of file.
+		 */
+		if (!ap->low && ap->aeof) {
+			if (!ap->off) {
+				args.alignment = mp->m_dalign;
+				atype = args.type;
+				isaligned = 1;
+				/*
+				 * Adjust for alignment
+				 */
+				if (blen > args.alignment && blen <= ap->alen)
+					args.minlen = blen - args.alignment;
+				args.minalignslop = 0;
+			} else {
+				/*
+				 * First try an exact bno allocation.
+				 * If it fails then do a near or start bno
+				 * allocation with alignment turned on.
+				 */
+				atype = args.type;
+				tryagain = 1;
+				args.type = XFS_ALLOCTYPE_THIS_BNO;
+				args.alignment = 1;
+				/*
+				 * Compute the minlen+alignment for the
+				 * next case.  Set slop so that the value
+				 * of minlen+alignment+slop doesn't go up
+				 * between the calls.
+				 */
+				if (blen > mp->m_dalign && blen <= ap->alen)
+					nextminlen = blen - mp->m_dalign;
+				else
+					nextminlen = args.minlen;
+				if (nextminlen + mp->m_dalign > args.minlen + 1)
+					args.minalignslop =
+						nextminlen + mp->m_dalign -
+						args.minlen - 1;
+				else
+					args.minalignslop = 0;
+			}
+		} else {
+			args.alignment = 1;
+			args.minalignslop = 0;
+		}
+		args.minleft = ap->minleft;
+		args.wasdel = ap->wasdel;
+		args.isfl = 0;
+		args.userdata = ap->userdata;
+		if ((error = xfs_alloc_vextent(&args)))
+			return error;
+		if (tryagain && args.fsbno == NULLFSBLOCK) {
+			/*
+			 * Exact allocation failed. Now try with alignment
+			 * turned on.
+			 */
+			args.type = atype;
+			args.fsbno = ap->rval;
+			args.alignment = mp->m_dalign;
+			args.minlen = nextminlen;
+			args.minalignslop = 0;
+			isaligned = 1;
+			if ((error = xfs_alloc_vextent(&args)))
+				return error;
+		}
+		if (isaligned && args.fsbno == NULLFSBLOCK) {
+			/*
+			 * allocation failed, so turn off alignment and
+			 * try again.
+			 */
+			args.type = atype;
+			args.fsbno = ap->rval;
+			args.alignment = 0;
+			if ((error = xfs_alloc_vextent(&args)))
+				return error;
+		}
+		if (args.fsbno == NULLFSBLOCK && nullfb &&
+		    args.minlen > ap->minlen) {
+			args.minlen = ap->minlen;
+			args.type = XFS_ALLOCTYPE_START_BNO;
+			args.fsbno = ap->rval;
+			if ((error = xfs_alloc_vextent(&args)))
+				return error;
+		}
+		if (args.fsbno == NULLFSBLOCK && nullfb) {
+			args.fsbno = 0;
+			args.type = XFS_ALLOCTYPE_FIRST_AG;
+			args.total = ap->minlen;
+			args.minleft = 0;
+			if ((error = xfs_alloc_vextent(&args)))
+				return error;
+			ap->low = 1;
+		}
+		if (args.fsbno != NULLFSBLOCK) {
+			ap->firstblock = ap->rval = args.fsbno;
+			ASSERT(nullfb || fb_agno == args.agno ||
+			       (ap->low && fb_agno < args.agno));
+			ap->alen = args.len;
+			ap->ip->i_d.di_nblocks += args.len;
+			xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+			if (ap->wasdel)
+				ap->ip->i_delayed_blks -= args.len;
+			/*
+			 * Adjust the disk quota also. This was reserved
+			 * earlier.
+			 */
+			XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
+				ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
+						XFS_TRANS_DQ_BCOUNT,
+				(long) args.len);
+		} else {
+			ap->rval = NULLFSBLOCK;
+			ap->alen = 0;
+		}
+	}
+	return 0;
+#undef	ISVALID
+}
+
+/*
+ * Transform a btree format file with only one leaf node, where the
+ * extents list will fit in the inode, into an extents format file.
+ * Since the extent list is already in-core, all we have to do is
+ * give up the space for the btree root and pitch the leaf block.
+ */
+STATIC int				/* error */
+xfs_bmap_btree_to_extents(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork)  /* data or attr fork */
+{
+	/* REFERENCED */
+	xfs_bmbt_block_t	*cblock;/* child btree block */
+	xfs_fsblock_t		cbno;	/* child block number */
+	xfs_buf_t		*cbp;	/* child block's buffer */
+	int			error;	/* error return value */
+	xfs_ifork_t		*ifp;	/* inode fork data */
+	xfs_mount_t		*mp;	/* mount point structure */
+	xfs_bmbt_ptr_t		*pp;	/* ptr to block address */
+	xfs_bmbt_block_t	*rblock;/* root btree block */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+	rblock = ifp->if_broot;
+	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1);
+	ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1);
+	ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
+	mp = ip->i_mount;
+	pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
+	*logflagsp = 0;
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1)))
+		return error;
+#endif
+	cbno = INT_GET(*pp, ARCH_CONVERT);
+	if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
+			XFS_BMAP_BTREE_REF)))
+		return error;
+	cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
+	if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
+		return error;
+	xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
+	ip->i_d.di_nblocks--;
+	XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+	xfs_trans_binval(tp, cbp);
+	if (cur->bc_bufs[0] == cbp)
+		cur->bc_bufs[0] = NULL;
+	xfs_iroot_realloc(ip, -1, whichfork);
+	ASSERT(ifp->if_broot == NULL);
+	ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
+	XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+	*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+	return 0;
+}
+
+/*
+ * Called by xfs_bmapi to update extent list structure and the btree
+ * after removing space (or undoing a delayed allocation).
+ */
+STATIC int				/* error */
+xfs_bmap_del_extent(
+	xfs_inode_t		*ip,	/* incore inode pointer */
+	xfs_trans_t		*tp,	/* current transaction pointer */
+	xfs_extnum_t		idx,	/* extent number to update/delete */
+	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
+	xfs_btree_cur_t		*cur,	/* if null, not a btree */
+	xfs_bmbt_irec_t		*del,	/* data to remove from extent list */
+	int			*logflagsp, /* inode logging flags */
+	int			whichfork, /* data or attr fork */
+	int			rsvd)	/* OK to allocate reserved blocks */
+{
+	xfs_filblks_t		da_new;	/* new delay-alloc indirect blocks */
+	xfs_filblks_t		da_old;	/* old delay-alloc indirect blocks */
+	xfs_fsblock_t		del_endblock=0;	/* first block past del */
+	xfs_fileoff_t		del_endoff;	/* first offset past del */
+	int			delay;	/* current block is delayed allocated */
+	int			do_fx;	/* free extent at end of routine */
+	xfs_bmbt_rec_t		*ep;	/* current extent entry pointer */
+	int			error;	/* error return value */
+	int			flags;	/* inode logging flags */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_del_extent";
+#endif
+	xfs_bmbt_irec_t		got;	/* current extent entry */
+	xfs_fileoff_t		got_endoff;	/* first offset past got */
+	int			i;	/* temp state */
+	xfs_ifork_t		*ifp;	/* inode fork pointer */
+	xfs_mount_t		*mp;	/* mount structure */
+	xfs_filblks_t		nblks;	/* quota/sb block count */
+	xfs_bmbt_irec_t		new;	/* new record to be inserted */
+	/* REFERENCED */
+	xfs_extnum_t		nextents;	/* number of extents in list */
+	uint			qfield;	/* quota field to update */
+	xfs_filblks_t		temp;	/* for indirect length calculations */
+	xfs_filblks_t		temp2;	/* for indirect length calculations */
+
+	XFS_STATS_INC(xs_del_exlist);
+	mp = ip->i_mount;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	ASSERT(idx >= 0 && idx < nextents);
+	ASSERT(del->br_blockcount > 0);
+	ep = &ifp->if_u1.if_extents[idx];
+	xfs_bmbt_get_all(ep, &got);
+	ASSERT(got.br_startoff <= del->br_startoff);
+	del_endoff = del->br_startoff + del->br_blockcount;
+	got_endoff = got.br_startoff + got.br_blockcount;
+	ASSERT(got_endoff >= del_endoff);
+	delay = ISNULLSTARTBLOCK(got.br_startblock);
+	ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
+	flags = 0;
+	qfield = 0;
+	error = 0;
+	/*
+	 * If deleting a real allocation, must free up the disk space.
+	 */
+	if (!delay) {
+		flags = XFS_ILOG_CORE;
+		/*
+		 * Realtime allocation.  Free it and record di_nblocks update.
+		 */
+		if (whichfork == XFS_DATA_FORK &&
+		    (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
+			xfs_fsblock_t	bno;
+			xfs_filblks_t	len;
+
+			ASSERT(do_mod(del->br_blockcount,
+				      mp->m_sb.sb_rextsize) == 0);
+			ASSERT(do_mod(del->br_startblock,
+				      mp->m_sb.sb_rextsize) == 0);
+			bno = del->br_startblock;
+			len = del->br_blockcount;
+			do_div(bno, mp->m_sb.sb_rextsize);
+			do_div(len, mp->m_sb.sb_rextsize);
+			if ((error = xfs_rtfree_extent(ip->i_transp, bno,
+					(xfs_extlen_t)len)))
+				goto done;
+			do_fx = 0;
+			nblks = len * mp->m_sb.sb_rextsize;
+			qfield = XFS_TRANS_DQ_RTBCOUNT;
+		}
+		/*
+		 * Ordinary allocation.
+		 */
+		else {
+			do_fx = 1;
+			nblks = del->br_blockcount;
+			qfield = XFS_TRANS_DQ_BCOUNT;
+		}
+		/*
+		 * Set up del_endblock and cur for later.
+		 */
+		del_endblock = del->br_startblock + del->br_blockcount;
+		if (cur) {
+			if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+					got.br_startblock, got.br_blockcount,
+					&i)))
+				goto done;
+			ASSERT(i == 1);
+		}
+		da_old = da_new = 0;
+	} else {
+		da_old = STARTBLOCKVAL(got.br_startblock);
+		da_new = 0;
+		nblks = 0;
+		do_fx = 0;
+	}
+	/*
+	 * Set flag value to use in switch statement.
+	 * Left-contig is 2, right-contig is 1.
+	 */
+	switch (((got.br_startoff == del->br_startoff) << 1) |
+		(got_endoff == del_endoff)) {
+	case 3:
+		/*
+		 * Matches the whole extent.  Delete the entry.
+		 */
+		xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork);
+		xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
+		ifp->if_lastex = idx;
+		if (delay)
+			break;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+			XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+		flags |= XFS_ILOG_CORE;
+		if (!cur) {
+			flags |= XFS_ILOG_FEXT(whichfork);
+			break;
+		}
+		if ((error = xfs_bmbt_delete(cur, &i)))
+			goto done;
+		ASSERT(i == 1);
+		break;
+
+	case 2:
+		/*
+		 * Deleting the first part of the extent.
+		 */
+		xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork);
+		xfs_bmbt_set_startoff(ep, del_endoff);
+		temp = got.br_blockcount - del->br_blockcount;
+		xfs_bmbt_set_blockcount(ep, temp);
+		ifp->if_lastex = idx;
+		if (delay) {
+			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+				da_old);
+			xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+			xfs_bmap_trace_post_update(fname, "2", ip, idx,
+				whichfork);
+			da_new = temp;
+			break;
+		}
+		xfs_bmbt_set_startblock(ep, del_endblock);
+		xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork);
+		if (!cur) {
+			flags |= XFS_ILOG_FEXT(whichfork);
+			break;
+		}
+		if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
+				got.br_blockcount - del->br_blockcount,
+				got.br_state)))
+			goto done;
+		break;
+
+	case 1:
+		/*
+		 * Deleting the last part of the extent.
+		 */
+		temp = got.br_blockcount - del->br_blockcount;
+		xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork);
+		xfs_bmbt_set_blockcount(ep, temp);
+		ifp->if_lastex = idx;
+		if (delay) {
+			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+				da_old);
+			xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+			xfs_bmap_trace_post_update(fname, "1", ip, idx,
+				whichfork);
+			da_new = temp;
+			break;
+		}
+		xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork);
+		if (!cur) {
+			flags |= XFS_ILOG_FEXT(whichfork);
+			break;
+		}
+		if ((error = xfs_bmbt_update(cur, got.br_startoff,
+				got.br_startblock,
+				got.br_blockcount - del->br_blockcount,
+				got.br_state)))
+			goto done;
+		break;
+
+	case 0:
+		/*
+		 * Deleting the middle of the extent.
+		 */
+		temp = del->br_startoff - got.br_startoff;
+		xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork);
+		xfs_bmbt_set_blockcount(ep, temp);
+		new.br_startoff = del_endoff;
+		temp2 = got_endoff - del_endoff;
+		new.br_blockcount = temp2;
+		new.br_state = got.br_state;
+		if (!delay) {
+			new.br_startblock = del_endblock;
+			flags |= XFS_ILOG_CORE;
+			if (cur) {
+				if ((error = xfs_bmbt_update(cur,
+						got.br_startoff,
+						got.br_startblock, temp,
+						got.br_state)))
+					goto done;
+				if ((error = xfs_bmbt_increment(cur, 0, &i)))
+					goto done;
+				cur->bc_rec.b = new;
+				error = xfs_bmbt_insert(cur, &i);
+				if (error && error != ENOSPC)
+					goto done;
+				/*
+				 * If get no-space back from btree insert,
+				 * it tried a split, and we have a zero
+				 * block reservation.
+				 * Fix up our state and return the error.
+				 */
+				if (error == ENOSPC) {
+					/*
+					 * Reset the cursor, don't trust
+					 * it after any insert operation.
+					 */
+					if ((error = xfs_bmbt_lookup_eq(cur,
+							got.br_startoff,
+							got.br_startblock,
+							temp, &i)))
+						goto done;
+					ASSERT(i == 1);
+					/*
+					 * Update the btree record back
+					 * to the original value.
+					 */
+					if ((error = xfs_bmbt_update(cur,
+							got.br_startoff,
+							got.br_startblock,
+							got.br_blockcount,
+							got.br_state)))
+						goto done;
+					/*
+					 * Reset the extent record back
+					 * to the original value.
+					 */
+					xfs_bmbt_set_blockcount(ep,
+						got.br_blockcount);
+					flags = 0;
+					error = XFS_ERROR(ENOSPC);
+					goto done;
+				}
+				ASSERT(i == 1);
+			} else
+				flags |= XFS_ILOG_FEXT(whichfork);
+			XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+		} else {
+			ASSERT(whichfork == XFS_DATA_FORK);
+			temp = xfs_bmap_worst_indlen(ip, temp);
+			xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+			temp2 = xfs_bmap_worst_indlen(ip, temp2);
+			new.br_startblock = NULLSTARTBLOCK((int)temp2);
+			da_new = temp + temp2;
+			while (da_new > da_old) {
+				if (temp) {
+					temp--;
+					da_new--;
+					xfs_bmbt_set_startblock(ep,
+						NULLSTARTBLOCK((int)temp));
+				}
+				if (da_new == da_old)
+					break;
+				if (temp2) {
+					temp2--;
+					da_new--;
+					new.br_startblock =
+						NULLSTARTBLOCK((int)temp2);
+				}
+			}
+		}
+		xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork);
+		xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL,
+			whichfork);
+		xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork);
+		ifp->if_lastex = idx + 1;
+		break;
+	}
+	/*
+	 * If we need to, add to list of extents to delete.
+	 */
+	if (do_fx)
+		xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
+			mp);
+	/*
+	 * Adjust inode # blocks in the file.
+	 */
+	if (nblks)
+		ip->i_d.di_nblocks -= nblks;
+	/*
+	 * Adjust quota data.
+	 */
+	if (qfield)
+		XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
+
+	/*
+	 * Account for change in delayed indirect blocks.
+	 * Nothing to do for disk quota accounting here.
+	 */
+	ASSERT(da_old >= da_new);
+	if (da_old > da_new)
+		xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
+			rsvd);
+done:
+	*logflagsp = flags;
+	return error;
+}
+
+/*
+ * Remove the entry "free" from the free item list.  Prev points to the
+ * previous entry, unless "free" is the head of the list.
+ */
+STATIC void
+xfs_bmap_del_free(
+	xfs_bmap_free_t		*flist,	/* free item list header */
+	xfs_bmap_free_item_t	*prev,	/* previous item on list, if any */
+	xfs_bmap_free_item_t	*free)	/* list item to be freed */
+{
+	if (prev)
+		prev->xbfi_next = free->xbfi_next;
+	else
+		flist->xbf_first = free->xbfi_next;
+	flist->xbf_count--;
+	kmem_zone_free(xfs_bmap_free_item_zone, free);
+}
+
+/*
+ * Remove count entries from the extents array for inode "ip", starting
+ * at index "idx".  Copies the remaining items down over the deleted ones,
+ * and gives back the excess memory.
+ */
+STATIC void
+xfs_bmap_delete_exlist(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* starting delete index */
+	xfs_extnum_t	count,		/* count of items to delete */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*base;		/* base of extent list */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_extnum_t	nextents;	/* number of extents in list after */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+	base = ifp->if_u1.if_extents;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count;
+	memmove(&base[idx], &base[idx + count],
+		(nextents - idx) * sizeof(*base));
+	xfs_iext_realloc(ip, -count, whichfork);
+}
+
+/*
+ * Convert an extents-format file into a btree-format file.
+ * The new file will have a root block (in the inode) and a single child block.
+ */
+STATIC int					/* error */
+xfs_bmap_extents_to_btree(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	xfs_fsblock_t		*firstblock,	/* first-block-allocated */
+	xfs_bmap_free_t		*flist,		/* blocks freed in xaction */
+	xfs_btree_cur_t		**curp,		/* cursor returned to caller */
+	int			wasdel,		/* converting a delayed alloc */
+	int			*logflagsp,	/* inode logging flags */
+	int			whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_block_t	*ablock;	/* allocated (child) bt block */
+	xfs_buf_t		*abp;		/* buffer for ablock */
+	xfs_alloc_arg_t		args;		/* allocation arguments */
+	xfs_bmbt_rec_t		*arp;		/* child record pointer */
+	xfs_bmbt_block_t	*block;		/* btree root block */
+	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
+	xfs_bmbt_rec_t		*ep;		/* extent list pointer */
+	int			error;		/* error return value */
+	xfs_extnum_t		i, cnt;		/* extent list index */
+	xfs_ifork_t		*ifp;		/* inode fork pointer */
+	xfs_bmbt_key_t		*kp;		/* root block key pointer */
+	xfs_mount_t		*mp;		/* mount structure */
+	xfs_extnum_t		nextents;	/* extent list size */
+	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	/*
+	 * Make space in the inode incore.
+	 */
+	xfs_iroot_realloc(ip, 1, whichfork);
+	ifp->if_flags |= XFS_IFBROOT;
+	/*
+	 * Fill in the root.
+	 */
+	block = ifp->if_broot;
+	INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
+	INT_SET(block->bb_level, ARCH_CONVERT, 1);
+	INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
+	INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
+	INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	/*
+	 * Need a cursor.  Can't allocate until bb_level is filled in.
+	 */
+	mp = ip->i_mount;
+	cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
+		whichfork);
+	cur->bc_private.b.firstblock = *firstblock;
+	cur->bc_private.b.flist = flist;
+	cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+	/*
+	 * Convert to a btree with two levels, one record in root.
+	 */
+	XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+	args.tp = tp;
+	args.mp = mp;
+	if (*firstblock == NULLFSBLOCK) {
+		args.type = XFS_ALLOCTYPE_START_BNO;
+		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
+	} else if (flist->xbf_low) {
+		args.type = XFS_ALLOCTYPE_START_BNO;
+		args.fsbno = *firstblock;
+	} else {
+		args.type = XFS_ALLOCTYPE_NEAR_BNO;
+		args.fsbno = *firstblock;
+	}
+	args.minlen = args.maxlen = args.prod = 1;
+	args.total = args.minleft = args.alignment = args.mod = args.isfl =
+		args.minalignslop = 0;
+	args.wasdel = wasdel;
+	*logflagsp = 0;
+	if ((error = xfs_alloc_vextent(&args))) {
+		xfs_iroot_realloc(ip, -1, whichfork);
+		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+		return error;
+	}
+	/*
+	 * Allocation can't fail, the space was reserved.
+	 */
+	ASSERT(args.fsbno != NULLFSBLOCK);
+	ASSERT(*firstblock == NULLFSBLOCK ||
+	       args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
+	       (flist->xbf_low &&
+		args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
+	*firstblock = cur->bc_private.b.firstblock = args.fsbno;
+	cur->bc_private.b.allocated++;
+	ip->i_d.di_nblocks++;
+	XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
+	abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
+	/*
+	 * Fill in the child block.
+	 */
+	ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
+	INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
+	ablock->bb_level = 0;
+	INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
+	INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
+		if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
+			arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
+			arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
+			arp++; cnt++;
+		}
+	}
+	INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt);
+	ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork));
+	/*
+	 * Fill in the root key and pointer.
+	 */
+	kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
+	arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
+	INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp));
+	pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+	INT_SET(*pp, ARCH_CONVERT, args.fsbno);
+	/*
+	 * Do all this logging at the end so that
+	 * the root is at the right level.
+	 */
+	xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
+	xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT));
+	ASSERT(*curp == NULL);
+	*curp = cur;
+	*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
+	return 0;
+}
+
+/*
+ * Insert new item(s) in the extent list for inode "ip".
+ * Count new items are inserted at offset idx.
+ */
+STATIC void
+xfs_bmap_insert_exlist(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* starting index of new items */
+	xfs_extnum_t	count,		/* number of inserted items */
+	xfs_bmbt_irec_t	*new,		/* items to insert */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*base;		/* extent list base */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_extnum_t	nextents;	/* extent list size */
+	xfs_extnum_t	to;		/* extent list index */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+	xfs_iext_realloc(ip, count, whichfork);
+	base = ifp->if_u1.if_extents;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	memmove(&base[idx + count], &base[idx],
+		(nextents - (idx + count)) * sizeof(*base));
+	for (to = idx; to < idx + count; to++, new++)
+		xfs_bmbt_set_all(&base[to], new);
+}
+
+/*
+ * Convert a local file to an extents file.
+ * This code is out of bounds for data forks of regular files,
+ * since the file data needs to get logged so things will stay consistent.
+ * (The bmap-level manipulations are ok, though).
+ */
+STATIC int				/* error */
+xfs_bmap_local_to_extents(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_fsblock_t	*firstblock,	/* first block allocated in xaction */
+	xfs_extlen_t	total,		/* total blocks needed by transaction */
+	int		*logflagsp,	/* inode logging flags */
+	int		whichfork)	/* data or attr fork */
+{
+	int		error;		/* error return value */
+	int		flags;		/* logging flags returned */
+#ifdef XFS_BMAP_TRACE
+	static char	fname[] = "xfs_bmap_local_to_extents";
+#endif
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+
+	/*
+	 * We don't want to deal with the case of keeping inode data inline yet.
+	 * So sending the data fork of a regular inode is invalid.
+	 */
+	ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
+		 whichfork == XFS_DATA_FORK));
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+	flags = 0;
+	error = 0;
+	if (ifp->if_bytes) {
+		xfs_alloc_arg_t	args;	/* allocation arguments */
+		xfs_buf_t	*bp;	/* buffer for extent list block */
+		xfs_bmbt_rec_t	*ep;	/* extent list pointer */
+
+		args.tp = tp;
+		args.mp = ip->i_mount;
+		ASSERT(ifp->if_flags & XFS_IFINLINE);
+		/*
+		 * Allocate a block.  We know we need only one, since the
+		 * file currently fits in an inode.
+		 */
+		if (*firstblock == NULLFSBLOCK) {
+			args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
+			args.type = XFS_ALLOCTYPE_START_BNO;
+		} else {
+			args.fsbno = *firstblock;
+			args.type = XFS_ALLOCTYPE_NEAR_BNO;
+		}
+		args.total = total;
+		args.mod = args.minleft = args.alignment = args.wasdel =
+			args.isfl = args.minalignslop = 0;
+		args.minlen = args.maxlen = args.prod = 1;
+		if ((error = xfs_alloc_vextent(&args)))
+			goto done;
+		/*
+		 * Can't fail, the space was reserved.
+		 */
+		ASSERT(args.fsbno != NULLFSBLOCK);
+		ASSERT(args.len == 1);
+		*firstblock = args.fsbno;
+		bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+		memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
+			ifp->if_bytes);
+		xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
+		xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+		xfs_iext_realloc(ip, 1, whichfork);
+		ep = ifp->if_u1.if_extents;
+		xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
+		xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
+		XFS_IFORK_NEXT_SET(ip, whichfork, 1);
+		ip->i_d.di_nblocks = 1;
+		XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
+			XFS_TRANS_DQ_BCOUNT, 1L);
+		flags |= XFS_ILOG_FEXT(whichfork);
+	} else
+		ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
+	ifp->if_flags &= ~XFS_IFINLINE;
+	ifp->if_flags |= XFS_IFEXTENTS;
+	XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+	flags |= XFS_ILOG_CORE;
+done:
+	*logflagsp = flags;
+	return error;
+}
+
+xfs_bmbt_rec_t *			/* pointer to found extent entry */
+xfs_bmap_do_search_extents(
+	xfs_bmbt_rec_t	*base,		/* base of extent list */
+	xfs_extnum_t	lastx,		/* last extent index used */
+	xfs_extnum_t	nextents,	/* extent list size */
+	xfs_fileoff_t	bno,		/* block number searched for */
+	int		*eofp,		/* out: end of file found */
+	xfs_extnum_t	*lastxp,	/* out: last extent index */
+	xfs_bmbt_irec_t	*gotp,		/* out: extent entry found */
+	xfs_bmbt_irec_t	*prevp)		/* out: previous extent entry found */
+{
+	xfs_bmbt_rec_t	*ep;		/* extent list entry pointer */
+	xfs_bmbt_irec_t	got;		/* extent list entry, decoded */
+	int		high;		/* high index of binary search */
+	int		low;		/* low index of binary search */
+
+	/*
+	 * Initialize the extent entry structure to catch access to
+	 * uninitialized br_startblock field.
+	 */
+	got.br_startoff = 0xffa5a5a5a5a5a5a5LL;
+	got.br_blockcount = 0xa55a5a5a5a5a5a5aLL;
+	got.br_state = XFS_EXT_INVALID;
+
+#if XFS_BIG_BLKNOS
+	got.br_startblock = 0xffffa5a5a5a5a5a5LL;
+#else
+	got.br_startblock = 0xffffa5a5;
+#endif
+
+	if (lastx != NULLEXTNUM && lastx < nextents)
+		ep = base + lastx;
+	else
+		ep = NULL;
+	prevp->br_startoff = NULLFILEOFF;
+	if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) &&
+	    bno < got.br_startoff +
+		  (got.br_blockcount = xfs_bmbt_get_blockcount(ep)))
+		*eofp = 0;
+	else if (ep && lastx < nextents - 1 &&
+		 bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) &&
+		 bno < got.br_startoff +
+		       (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) {
+		lastx++;
+		ep++;
+		*eofp = 0;
+	} else if (nextents == 0)
+		*eofp = 1;
+	else if (bno == 0 &&
+		 (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) {
+		ep = base;
+		lastx = 0;
+		got.br_blockcount = xfs_bmbt_get_blockcount(ep);
+		*eofp = 0;
+	} else {
+		/* binary search the extents array */
+		low = 0;
+		high = nextents - 1;
+		while (low <= high) {
+			XFS_STATS_INC(xs_cmp_exlist);
+			lastx = (low + high) >> 1;
+			ep = base + lastx;
+			got.br_startoff = xfs_bmbt_get_startoff(ep);
+			got.br_blockcount = xfs_bmbt_get_blockcount(ep);
+			if (bno < got.br_startoff)
+				high = lastx - 1;
+			else if (bno >= got.br_startoff + got.br_blockcount)
+				low = lastx + 1;
+			else {
+				got.br_startblock = xfs_bmbt_get_startblock(ep);
+				got.br_state = xfs_bmbt_get_state(ep);
+				*eofp = 0;
+				*lastxp = lastx;
+				*gotp = got;
+				return ep;
+			}
+		}
+		if (bno >= got.br_startoff + got.br_blockcount) {
+			lastx++;
+			if (lastx == nextents) {
+				*eofp = 1;
+				got.br_startblock = xfs_bmbt_get_startblock(ep);
+				got.br_state = xfs_bmbt_get_state(ep);
+				*prevp = got;
+				ep = NULL;
+			} else {
+				*eofp = 0;
+				xfs_bmbt_get_all(ep, prevp);
+				ep++;
+				got.br_startoff = xfs_bmbt_get_startoff(ep);
+				got.br_blockcount = xfs_bmbt_get_blockcount(ep);
+			}
+		} else {
+			*eofp = 0;
+			if (ep > base)
+				xfs_bmbt_get_all(ep - 1, prevp);
+		}
+	}
+	if (ep) {
+		got.br_startblock = xfs_bmbt_get_startblock(ep);
+		got.br_state = xfs_bmbt_get_state(ep);
+	}
+	*lastxp = lastx;
+	*gotp = got;
+	return ep;
+}
+
+/*
+ * Search the extents list for the inode, for the extent containing bno.
+ * If bno lies in a hole, point to the next entry.  If bno lies past eof,
+ * *eofp will be set, and *prevp will contain the last entry (null if none).
+ * Else, *lastxp will be set to the index of the found
+ * entry; *gotp will contain the entry.
+ */
+STATIC xfs_bmbt_rec_t *                 /* pointer to found extent entry */
+xfs_bmap_search_extents(
+	xfs_inode_t     *ip,            /* incore inode pointer */
+	xfs_fileoff_t   bno,            /* block number searched for */
+	int             whichfork,      /* data or attr fork */
+	int             *eofp,          /* out: end of file found */
+	xfs_extnum_t    *lastxp,        /* out: last extent index */
+	xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
+	xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
+{
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_bmbt_rec_t  *base;          /* base of extent list */
+	xfs_extnum_t    lastx;          /* last extent index used */
+	xfs_extnum_t    nextents;       /* extent list size */
+	xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
+	int		rt;		/* realtime flag    */
+
+	XFS_STATS_INC(xs_look_exlist);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	lastx = ifp->if_lastex;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	base = &ifp->if_u1.if_extents[0];
+
+	ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
+					  lastxp, gotp, prevp);
+	rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME;
+	if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) {
+                cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
+			"start_block : %llx start_off : %llx blkcnt : %llx "
+			"extent-state : %x \n",
+			(ip->i_mount)->m_fsname,(long long)ip->i_ino,
+			gotp->br_startblock, gotp->br_startoff,
+			gotp->br_blockcount,gotp->br_state);
+        }
+        return ep;
+}
+
+
+#ifdef XFS_BMAP_TRACE
+ktrace_t	*xfs_bmap_trace_buf;
+
+/*
+ * Add a bmap trace buffer entry.  Base routine for the others.
+ */
+STATIC void
+xfs_bmap_trace_addentry(
+	int		opcode,		/* operation */
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(ies) */
+	xfs_extnum_t	cnt,		/* count of entries, 1 or 2 */
+	xfs_bmbt_rec_t	*r1,		/* first record */
+	xfs_bmbt_rec_t	*r2,		/* second record or null */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	tr2;
+
+	ASSERT(cnt == 1 || cnt == 2);
+	ASSERT(r1 != NULL);
+	if (cnt == 1) {
+		ASSERT(r2 == NULL);
+		r2 = &tr2;
+		memset(&tr2, 0, sizeof(tr2));
+	} else
+		ASSERT(r2 != NULL);
+	ktrace_enter(xfs_bmap_trace_buf,
+		(void *)(__psint_t)(opcode | (whichfork << 16)),
+		(void *)fname, (void *)desc, (void *)ip,
+		(void *)(__psint_t)idx,
+		(void *)(__psint_t)cnt,
+		(void *)(__psunsigned_t)(ip->i_ino >> 32),
+		(void *)(__psunsigned_t)(unsigned)ip->i_ino,
+		(void *)(__psunsigned_t)(r1->l0 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r1->l0),
+		(void *)(__psunsigned_t)(r1->l1 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r1->l1),
+		(void *)(__psunsigned_t)(r2->l0 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r2->l0),
+		(void *)(__psunsigned_t)(r2->l1 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r2->l1)
+		);
+	ASSERT(ip->i_xtrace);
+	ktrace_enter(ip->i_xtrace,
+		(void *)(__psint_t)(opcode | (whichfork << 16)),
+		(void *)fname, (void *)desc, (void *)ip,
+		(void *)(__psint_t)idx,
+		(void *)(__psint_t)cnt,
+		(void *)(__psunsigned_t)(ip->i_ino >> 32),
+		(void *)(__psunsigned_t)(unsigned)ip->i_ino,
+		(void *)(__psunsigned_t)(r1->l0 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r1->l0),
+		(void *)(__psunsigned_t)(r1->l1 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r1->l1),
+		(void *)(__psunsigned_t)(r2->l0 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r2->l0),
+		(void *)(__psunsigned_t)(r2->l1 >> 32),
+		(void *)(__psunsigned_t)(unsigned)(r2->l1)
+		);
+}
+
+/*
+ * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
+ */
+STATIC void
+xfs_bmap_trace_delete(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(entries) deleted */
+	xfs_extnum_t	cnt,		/* count of entries deleted, 1 or 2 */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
+		cnt, &ifp->if_u1.if_extents[idx],
+		cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL,
+		whichfork);
+}
+
+/*
+ * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
+ * reading in the extents list from the disk (in the btree).
+ */
+STATIC void
+xfs_bmap_trace_insert(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry(entries) inserted */
+	xfs_extnum_t	cnt,		/* count of entries inserted, 1 or 2 */
+	xfs_bmbt_irec_t	*r1,		/* inserted record 1 */
+	xfs_bmbt_irec_t	*r2,		/* inserted record 2 or null */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	tr1;		/* compressed record 1 */
+	xfs_bmbt_rec_t	tr2;		/* compressed record 2 if needed */
+
+	xfs_bmbt_set_all(&tr1, r1);
+	if (cnt == 2) {
+		ASSERT(r2 != NULL);
+		xfs_bmbt_set_all(&tr2, r2);
+	} else {
+		ASSERT(cnt == 1);
+		ASSERT(r2 == NULL);
+	}
+	xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
+		cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
+}
+
+/*
+ * Add bmap trace entry after updating an extent list entry in place.
+ */
+STATIC void
+xfs_bmap_trace_post_update(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry updated */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
+		1, &ifp->if_u1.if_extents[idx], NULL, whichfork);
+}
+
+/*
+ * Add bmap trace entry prior to updating an extent list entry in place.
+ */
+STATIC void
+xfs_bmap_trace_pre_update(
+	char		*fname,		/* function name */
+	char		*desc,		/* operation description */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	idx,		/* index of entry to be updated */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
+		&ifp->if_u1.if_extents[idx], NULL, whichfork);
+}
+#endif	/* XFS_BMAP_TRACE */
+
+/*
+ * Compute the worst-case number of indirect blocks that will be used
+ * for ip's delayed extent of length "len".
+ */
+STATIC xfs_filblks_t
+xfs_bmap_worst_indlen(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_filblks_t	len)		/* delayed extent length */
+{
+	int		level;		/* btree level number */
+	int		maxrecs;	/* maximum record count at this level */
+	xfs_mount_t	*mp;		/* mount structure */
+	xfs_filblks_t	rval;		/* return value */
+
+	mp = ip->i_mount;
+	maxrecs = mp->m_bmap_dmxr[0];
+	for (level = 0, rval = 0;
+	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
+	     level++) {
+		len += maxrecs - 1;
+		do_div(len, maxrecs);
+		rval += len;
+		if (len == 1)
+			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
+				level - 1;
+		if (level == 0)
+			maxrecs = mp->m_bmap_dmxr[1];
+	}
+	return rval;
+}
+
+#if defined(XFS_RW_TRACE)
+STATIC void
+xfs_bunmap_trace(
+	xfs_inode_t		*ip,
+	xfs_fileoff_t		bno,
+	xfs_filblks_t		len,
+	int			flags,
+	inst_t			*ra)
+{
+	if (ip->i_rwtrace == NULL)
+		return;
+	ktrace_enter(ip->i_rwtrace,
+		(void *)(__psint_t)XFS_BUNMAPI,
+		(void *)ip,
+		(void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
+		(void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
+		(void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
+		(void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
+		(void *)(__psint_t)len,
+		(void *)(__psint_t)flags,
+		(void *)(unsigned long)current_cpu(),
+		(void *)ra,
+		(void *)0,
+		(void *)0,
+		(void *)0,
+		(void *)0,
+		(void *)0,
+		(void *)0);
+}
+#endif
+
+/*
+ * Convert inode from non-attributed to attributed.
+ * Must not be in a transaction, ip must not be locked.
+ */
+int						/* error code */
+xfs_bmap_add_attrfork(
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	int			rsvd)		/* OK to allocated reserved blocks in trans */
+{
+	int			blks;		/* space reservation */
+	int			committed;	/* xaction was committed */
+	int			error;		/* error return value */
+	xfs_fsblock_t		firstblock;	/* 1st block/ag allocated */
+	xfs_bmap_free_t		flist;		/* freed extent list */
+	int			logflags;	/* logging flags */
+	xfs_mount_t		*mp;		/* mount structure */
+	unsigned long		s;		/* spinlock spl value */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	ASSERT(ip->i_df.if_ext_max ==
+	       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
+	if (XFS_IFORK_Q(ip))
+		return 0;
+	mp = ip->i_mount;
+	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
+	tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
+	blks = XFS_ADDAFORK_SPACE_RES(mp);
+	if (rsvd)
+		tp->t_flags |= XFS_TRANS_RESERVE;
+	if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
+		goto error0;
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
+			XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+			XFS_QMOPT_RES_REGBLKS);
+	if (error) {
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
+		return error;
+	}
+	if (XFS_IFORK_Q(ip))
+		goto error1;
+	if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
+		/*
+		 * For inodes coming from pre-6.2 filesystems.
+		 */
+		ASSERT(ip->i_d.di_aformat == 0);
+		ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+	}
+	ASSERT(ip->i_d.di_anextents == 0);
+	VN_HOLD(XFS_ITOV(ip));
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_DEV:
+		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+		break;
+	case XFS_DINODE_FMT_UUID:
+		ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
+		break;
+	case XFS_DINODE_FMT_LOCAL:
+	case XFS_DINODE_FMT_EXTENTS:
+	case XFS_DINODE_FMT_BTREE:
+		ip->i_d.di_forkoff = mp->m_attroffset >> 3;
+		break;
+	default:
+		ASSERT(0);
+		error = XFS_ERROR(EINVAL);
+		goto error1;
+	}
+	ip->i_df.if_ext_max =
+		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	ASSERT(ip->i_afp == NULL);
+	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+	ip->i_afp->if_ext_max =
+		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	ip->i_afp->if_flags = XFS_IFEXTENTS;
+	logflags = 0;
+	XFS_BMAP_INIT(&flist, &firstblock);
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_LOCAL:
+		error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
+			&logflags);
+		break;
+	case XFS_DINODE_FMT_EXTENTS:
+		error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
+			&flist, &logflags);
+		break;
+	case XFS_DINODE_FMT_BTREE:
+		error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
+			&logflags);
+		break;
+	default:
+		error = 0;
+		break;
+	}
+	if (logflags)
+		xfs_trans_log_inode(tp, ip, logflags);
+	if (error)
+		goto error2;
+	if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
+		s = XFS_SB_LOCK(mp);
+		if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
+			XFS_SB_VERSION_ADDATTR(&mp->m_sb);
+			XFS_SB_UNLOCK(mp, s);
+			xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
+		} else
+			XFS_SB_UNLOCK(mp, s);
+	}
+	if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed)))
+		goto error2;
+	error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL);
+	ASSERT(ip->i_df.if_ext_max ==
+	       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
+	return error;
+error2:
+	xfs_bmap_cancel(&flist);
+error1:
+	ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+error0:
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+	ASSERT(ip->i_df.if_ext_max ==
+	       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
+	return error;
+}
+
+/*
+ * Add the extent to the list of extents to be free at transaction end.
+ * The list is maintained sorted (by block number).
+ */
+/* ARGSUSED */
+void
+xfs_bmap_add_free(
+	xfs_fsblock_t		bno,		/* fs block number of extent */
+	xfs_filblks_t		len,		/* length of extent */
+	xfs_bmap_free_t		*flist,		/* list of extents */
+	xfs_mount_t		*mp)		/* mount point structure */
+{
+	xfs_bmap_free_item_t	*cur;		/* current (next) element */
+	xfs_bmap_free_item_t	*new;		/* new element */
+	xfs_bmap_free_item_t	*prev;		/* previous element */
+#ifdef DEBUG
+	xfs_agnumber_t		agno;
+	xfs_agblock_t		agbno;
+
+	ASSERT(bno != NULLFSBLOCK);
+	ASSERT(len > 0);
+	ASSERT(len <= MAXEXTLEN);
+	ASSERT(!ISNULLSTARTBLOCK(bno));
+	agno = XFS_FSB_TO_AGNO(mp, bno);
+	agbno = XFS_FSB_TO_AGBNO(mp, bno);
+	ASSERT(agno < mp->m_sb.sb_agcount);
+	ASSERT(agbno < mp->m_sb.sb_agblocks);
+	ASSERT(len < mp->m_sb.sb_agblocks);
+	ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
+#endif
+	ASSERT(xfs_bmap_free_item_zone != NULL);
+	new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+	new->xbfi_startblock = bno;
+	new->xbfi_blockcount = (xfs_extlen_t)len;
+	for (prev = NULL, cur = flist->xbf_first;
+	     cur != NULL;
+	     prev = cur, cur = cur->xbfi_next) {
+		if (cur->xbfi_startblock >= bno)
+			break;
+	}
+	if (prev)
+		prev->xbfi_next = new;
+	else
+		flist->xbf_first = new;
+	new->xbfi_next = cur;
+	flist->xbf_count++;
+}
+
+/*
+ * Compute and fill in the value of the maximum depth of a bmap btree
+ * in this filesystem.  Done once, during mount.
+ */
+void
+xfs_bmap_compute_maxlevels(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	int		whichfork)	/* data or attr fork */
+{
+	int		level;		/* btree level */
+	uint		maxblocks;	/* max blocks at this level */
+	uint		maxleafents;	/* max leaf entries possible */
+	int		maxrootrecs;	/* max records in root block */
+	int		minleafrecs;	/* min records in leaf block */
+	int		minnoderecs;	/* min records in node block */
+	int		sz;		/* root block size */
+
+	/*
+	 * The maximum number of extents in a file, hence the maximum
+	 * number of leaf entries, is controlled by the type of di_nextents
+	 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
+	 * (a signed 16-bit number, xfs_aextnum_t).
+	 */
+	maxleafents = (whichfork == XFS_DATA_FORK) ? MAXEXTNUM : MAXAEXTNUM;
+	minleafrecs = mp->m_bmap_dmnr[0];
+	minnoderecs = mp->m_bmap_dmnr[1];
+	sz = (whichfork == XFS_DATA_FORK) ?
+		mp->m_attroffset :
+		mp->m_sb.sb_inodesize - mp->m_attroffset;
+	maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
+	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
+	for (level = 1; maxblocks > 1; level++) {
+		if (maxblocks <= maxrootrecs)
+			maxblocks = 1;
+		else
+			maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
+	}
+	mp->m_bm_maxlevels[whichfork] = level;
+}
+
+/*
+ * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
+ * caller.  Frees all the extents that need freeing, which must be done
+ * last due to locking considerations.  We never free any extents in
+ * the first transaction.  This is to allow the caller to make the first
+ * transaction a synchronous one so that the pointers to the data being
+ * broken in this transaction will be permanent before the data is actually
+ * freed.  This is necessary to prevent blocks from being reallocated
+ * and written to before the free and reallocation are actually permanent.
+ * We do not just make the first transaction synchronous here, because
+ * there are more efficient ways to gain the same protection in some cases
+ * (see the file truncation code).
+ *
+ * Return 1 if the given transaction was committed and a new one
+ * started, and 0 otherwise in the committed parameter.
+ */
+/*ARGSUSED*/
+int						/* error */
+xfs_bmap_finish(
+	xfs_trans_t		**tp,		/* transaction pointer addr */
+	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
+	xfs_fsblock_t		firstblock,	/* controlled ag for allocs */
+	int			*committed)	/* xact committed or not */
+{
+	xfs_efd_log_item_t	*efd;		/* extent free data */
+	xfs_efi_log_item_t	*efi;		/* extent free intention */
+	int			error;		/* error return value */
+	xfs_bmap_free_item_t	*free;		/* free extent list item */
+	unsigned int		logres;		/* new log reservation */
+	unsigned int		logcount;	/* new log count */
+	xfs_mount_t		*mp;		/* filesystem mount structure */
+	xfs_bmap_free_item_t	*next;		/* next item on free list */
+	xfs_trans_t		*ntp;		/* new transaction pointer */
+
+	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+	if (flist->xbf_count == 0) {
+		*committed = 0;
+		return 0;
+	}
+	ntp = *tp;
+	efi = xfs_trans_get_efi(ntp, flist->xbf_count);
+	for (free = flist->xbf_first; free; free = free->xbfi_next)
+		xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
+			free->xbfi_blockcount);
+	logres = ntp->t_log_res;
+	logcount = ntp->t_log_count;
+	ntp = xfs_trans_dup(*tp);
+	error = xfs_trans_commit(*tp, 0, NULL);
+	*tp = ntp;
+	*committed = 1;
+	/*
+	 * We have a new transaction, so we should return committed=1,
+	 * even though we're returning an error.
+	 */
+	if (error) {
+		return error;
+	}
+	if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
+			logcount)))
+		return error;
+	efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
+	for (free = flist->xbf_first; free != NULL; free = next) {
+		next = free->xbfi_next;
+		if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
+				free->xbfi_blockcount))) {
+			/*
+			 * The bmap free list will be cleaned up at a
+			 * higher level.  The EFI will be canceled when
+			 * this transaction is aborted.
+			 * Need to force shutdown here to make sure it
+			 * happens, since this transaction may not be
+			 * dirty yet.
+			 */
+			mp = ntp->t_mountp;
+			if (!XFS_FORCED_SHUTDOWN(mp))
+				xfs_force_shutdown(mp,
+						   (error == EFSCORRUPTED) ?
+						   XFS_CORRUPT_INCORE :
+						   XFS_METADATA_IO_ERROR);
+			return error;
+		}
+		xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
+			free->xbfi_blockcount);
+		xfs_bmap_del_free(flist, NULL, free);
+	}
+	return 0;
+}
+
+/*
+ * Free up any items left in the list.
+ */
+void
+xfs_bmap_cancel(
+	xfs_bmap_free_t		*flist)	/* list of bmap_free_items */
+{
+	xfs_bmap_free_item_t	*free;	/* free list item */
+	xfs_bmap_free_item_t	*next;
+
+	if (flist->xbf_count == 0)
+		return;
+	ASSERT(flist->xbf_first != NULL);
+	for (free = flist->xbf_first; free; free = next) {
+		next = free->xbfi_next;
+		xfs_bmap_del_free(flist, NULL, free);
+	}
+	ASSERT(flist->xbf_count == 0);
+}
+
+/*
+ * Returns the file-relative block number of the first unused block(s)
+ * in the file with at least "len" logically contiguous blocks free.
+ * This is the lowest-address hole if the file has holes, else the first block
+ * past the end of file.
+ * Return 0 if the file is currently local (in-inode).
+ */
+int						/* error */
+xfs_bmap_first_unused(
+	xfs_trans_t	*tp,			/* transaction pointer */
+	xfs_inode_t	*ip,			/* incore inode */
+	xfs_extlen_t	len,			/* size of hole to find */
+	xfs_fileoff_t	*first_unused,		/* unused block */
+	int		whichfork)		/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*base;			/* base of extent array */
+	xfs_bmbt_rec_t	*ep;			/* pointer to an extent entry */
+	int		error;			/* error return value */
+	xfs_ifork_t	*ifp;			/* inode fork pointer */
+	xfs_fileoff_t	lastaddr;		/* last block number seen */
+	xfs_fileoff_t	lowest;			/* lowest useful block */
+	xfs_fileoff_t	max;			/* starting useful block */
+	xfs_fileoff_t	off;			/* offset for this block */
+	xfs_extnum_t	nextents;		/* number of extent entries */
+
+	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
+	       XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
+	       XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+		*first_unused = 0;
+		return 0;
+	}
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		return error;
+	lowest = *first_unused;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	base = &ifp->if_u1.if_extents[0];
+	for (lastaddr = 0, max = lowest, ep = base;
+	     ep < &base[nextents];
+	     ep++) {
+		off = xfs_bmbt_get_startoff(ep);
+		/*
+		 * See if the hole before this extent will work.
+		 */
+		if (off >= lowest + len && off - max >= len) {
+			*first_unused = max;
+			return 0;
+		}
+		lastaddr = off + xfs_bmbt_get_blockcount(ep);
+		max = XFS_FILEOFF_MAX(lastaddr, lowest);
+	}
+	*first_unused = max;
+	return 0;
+}
+
+/*
+ * Returns the file-relative block number of the last block + 1 before
+ * last_block (input value) in the file.
+ * This is not based on i_size, it is based on the extent list.
+ * Returns 0 for local files, as they do not have an extent list.
+ */
+int						/* error */
+xfs_bmap_last_before(
+	xfs_trans_t	*tp,			/* transaction pointer */
+	xfs_inode_t	*ip,			/* incore inode */
+	xfs_fileoff_t	*last_block,		/* last block */
+	int		whichfork)		/* data or attr fork */
+{
+	xfs_fileoff_t	bno;			/* input file offset */
+	int		eof;			/* hit end of file */
+	xfs_bmbt_rec_t	*ep;			/* pointer to last extent */
+	int		error;			/* error return value */
+	xfs_bmbt_irec_t	got;			/* current extent value */
+	xfs_ifork_t	*ifp;			/* inode fork pointer */
+	xfs_extnum_t	lastx;			/* last extent used */
+	xfs_bmbt_irec_t	prev;			/* previous extent value */
+
+	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+	       return XFS_ERROR(EIO);
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+		*last_block = 0;
+		return 0;
+	}
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		return error;
+	bno = *last_block - 1;
+	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+		&prev);
+	if (eof || xfs_bmbt_get_startoff(ep) > bno) {
+		if (prev.br_startoff == NULLFILEOFF)
+			*last_block = 0;
+		else
+			*last_block = prev.br_startoff + prev.br_blockcount;
+	}
+	/*
+	 * Otherwise *last_block is already the right answer.
+	 */
+	return 0;
+}
+
+/*
+ * Returns the file-relative block number of the first block past eof in
+ * the file.  This is not based on i_size, it is based on the extent list.
+ * Returns 0 for local files, as they do not have an extent list.
+ */
+int						/* error */
+xfs_bmap_last_offset(
+	xfs_trans_t	*tp,			/* transaction pointer */
+	xfs_inode_t	*ip,			/* incore inode */
+	xfs_fileoff_t	*last_block,		/* last block */
+	int		whichfork)		/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*base;			/* base of extent array */
+	xfs_bmbt_rec_t	*ep;			/* pointer to last extent */
+	int		error;			/* error return value */
+	xfs_ifork_t	*ifp;			/* inode fork pointer */
+	xfs_extnum_t	nextents;		/* number of extent entries */
+
+	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+	       return XFS_ERROR(EIO);
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+		*last_block = 0;
+		return 0;
+	}
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		return error;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	if (!nextents) {
+		*last_block = 0;
+		return 0;
+	}
+	base = &ifp->if_u1.if_extents[0];
+	ASSERT(base != NULL);
+	ep = &base[nextents - 1];
+	*last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
+	return 0;
+}
+
+/*
+ * Returns whether the selected fork of the inode has exactly one
+ * block or not.  For the data fork we check this matches di_size,
+ * implying the file's range is 0..bsize-1.
+ */
+int					/* 1=>1 block, 0=>otherwise */
+xfs_bmap_one_block(
+	xfs_inode_t	*ip,		/* incore inode */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*ep;		/* ptr to fork's extent */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	int		rval;		/* return value */
+	xfs_bmbt_irec_t	s;		/* internal version of extent */
+
+#ifndef DEBUG
+	if (whichfork == XFS_DATA_FORK)
+		return ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize;
+#endif	/* !DEBUG */
+	if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
+		return 0;
+	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+		return 0;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+	ep = ifp->if_u1.if_extents;
+	xfs_bmbt_get_all(ep, &s);
+	rval = s.br_startoff == 0 && s.br_blockcount == 1;
+	if (rval && whichfork == XFS_DATA_FORK)
+		ASSERT(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
+	return rval;
+}
+
+/*
+ * Read in the extents to if_extents.
+ * All inode fields are set up by caller, we just traverse the btree
+ * and copy the records in. If the file system cannot contain unwritten
+ * extents, the records are checked for no "state" flags.
+ */
+int					/* error */
+xfs_bmap_read_extents(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_inode_t		*ip,	/* incore inode */
+	int			whichfork) /* data or attr fork */
+{
+	xfs_bmbt_block_t	*block;	/* current btree block */
+	xfs_fsblock_t		bno;	/* block # of "block" */
+	xfs_buf_t		*bp;	/* buffer for "block" */
+	int			error;	/* error return value */
+	xfs_exntfmt_t		exntf;	/* XFS_EXTFMT_NOSTATE, if checking */
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_bmap_read_extents";
+#endif
+	xfs_extnum_t		i, j;	/* index into the extents list */
+	xfs_ifork_t		*ifp;	/* fork structure */
+	int			level;	/* btree level, for checking */
+	xfs_mount_t		*mp;	/* file system mount structure */
+	xfs_bmbt_ptr_t		*pp;	/* pointer to block address */
+	/* REFERENCED */
+	xfs_extnum_t		room;	/* number of entries there's room for */
+	xfs_bmbt_rec_t		*trp;	/* target record pointer */
+
+	bno = NULLFSBLOCK;
+	mp = ip->i_mount;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
+					XFS_EXTFMT_INODE(ip);
+	block = ifp->if_broot;
+	/*
+	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+	 */
+	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
+	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
+	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
+	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
+	ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
+	bno = INT_GET(*pp, ARCH_CONVERT);
+	/*
+	 * Go down the tree until leaf level is reached, following the first
+	 * pointer (leftmost) at each level.
+	 */
+	while (level-- > 0) {
+		if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+				XFS_BMAP_BTREE_REF)))
+			return error;
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		XFS_WANT_CORRUPTED_GOTO(
+			XFS_BMAP_SANITY_CHECK(mp, block, level),
+			error0);
+		if (level == 0)
+			break;
+		pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
+			1, mp->m_bmap_dmxr[1]);
+		XFS_WANT_CORRUPTED_GOTO(
+			XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)),
+			error0);
+		bno = INT_GET(*pp, ARCH_CONVERT);
+		xfs_trans_brelse(tp, bp);
+	}
+	/*
+	 * Here with bp and block set to the leftmost leaf node in the tree.
+	 */
+	room = ifp->if_bytes / (uint)sizeof(*trp);
+	trp = ifp->if_u1.if_extents;
+	i = 0;
+	/*
+	 * Loop over all leaf nodes.  Copy information to the extent list.
+	 */
+	for (;;) {
+		xfs_bmbt_rec_t	*frp, *temp;
+		xfs_fsblock_t	nextbno;
+		xfs_extnum_t	num_recs;
+
+
+		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		if (unlikely(i + num_recs > room)) {
+			ASSERT(i + num_recs <= room);
+			xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+				"corrupt dinode %Lu, (btree extents).  Unmount and run xfs_repair.",
+				(unsigned long long) ip->i_ino);
+			XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
+					 XFS_ERRLEVEL_LOW,
+					ip->i_mount);
+			goto error0;
+		}
+		XFS_WANT_CORRUPTED_GOTO(
+			XFS_BMAP_SANITY_CHECK(mp, block, 0),
+			error0);
+		/*
+		 * Read-ahead the next leaf block, if any.
+		 */
+		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+		if (nextbno != NULLFSBLOCK)
+			xfs_btree_reada_bufl(mp, nextbno, 1);
+		/*
+		 * Copy records into the extent list.
+		 */
+		frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
+			block, 1, mp->m_bmap_dmxr[0]);
+		temp = trp;
+		for (j = 0; j < num_recs; j++, frp++, trp++) {
+			trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
+			trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
+		}
+		if (exntf == XFS_EXTFMT_NOSTATE) {
+			/*
+			 * Check all attribute bmap btree records and
+			 * any "older" data bmap btree records for a
+			 * set bit in the "extent flag" position.
+			 */
+			if (unlikely(xfs_check_nostate_extents(temp, num_recs))) {
+				XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
+						 XFS_ERRLEVEL_LOW,
+						 ip->i_mount);
+				goto error0;
+			}
+		}
+		i += num_recs;
+		xfs_trans_brelse(tp, bp);
+		bno = nextbno;
+		/*
+		 * If we've reached the end, stop.
+		 */
+		if (bno == NULLFSBLOCK)
+			break;
+		if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+				XFS_BMAP_BTREE_REF)))
+			return error;
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+	}
+	ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp));
+	ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
+	xfs_bmap_trace_exlist(fname, ip, i, whichfork);
+	return 0;
+error0:
+	xfs_trans_brelse(tp, bp);
+	return XFS_ERROR(EFSCORRUPTED);
+}
+
+#ifdef XFS_BMAP_TRACE
+/*
+ * Add bmap trace insert entries for all the contents of the extent list.
+ */
+void
+xfs_bmap_trace_exlist(
+	char		*fname,		/* function name */
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_extnum_t	cnt,		/* count of entries in the list */
+	int		whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t	*base;		/* base of extent list */
+	xfs_bmbt_rec_t	*ep;		/* current entry in extent list */
+	xfs_extnum_t	idx;		/* extent list entry number */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_bmbt_irec_t	s;		/* extent list record */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base));
+	base = ifp->if_u1.if_extents;
+	for (idx = 0, ep = base; idx < cnt; idx++, ep++) {
+		xfs_bmbt_get_all(ep, &s);
+		xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL,
+			whichfork);
+	}
+}
+#endif
+
+#ifdef DEBUG
+/*
+ * Validate that the bmbt_irecs being returned from bmapi are valid
+ * given the callers original parameters.  Specifically check the
+ * ranges of the returned irecs to ensure that they only extent beyond
+ * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
+ */
+STATIC void
+xfs_bmap_validate_ret(
+	xfs_fileoff_t		bno,
+	xfs_filblks_t		len,
+	int			flags,
+	xfs_bmbt_irec_t		*mval,
+	int			nmap,
+	int			ret_nmap)
+{
+	int			i;		/* index to map values */
+
+	ASSERT(ret_nmap <= nmap);
+
+	for (i = 0; i < ret_nmap; i++) {
+		ASSERT(mval[i].br_blockcount > 0);
+		if (!(flags & XFS_BMAPI_ENTIRE)) {
+			ASSERT(mval[i].br_startoff >= bno);
+			ASSERT(mval[i].br_blockcount <= len);
+			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
+			       bno + len);
+		} else {
+			ASSERT(mval[i].br_startoff < bno + len);
+			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
+			       bno);
+		}
+		ASSERT(i == 0 ||
+		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
+		       mval[i].br_startoff);
+		if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
+			ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
+			       mval[i].br_startblock != HOLESTARTBLOCK);
+		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
+		       mval[i].br_state == XFS_EXT_UNWRITTEN);
+	}
+}
+#endif /* DEBUG */
+
+
+/*
+ * Map file blocks to filesystem blocks.
+ * File range is given by the bno/len pair.
+ * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
+ * into a hole or past eof.
+ * Only allocates blocks from a single allocation group,
+ * to avoid locking problems.
+ * The returned value in "firstblock" from the first call in a transaction
+ * must be remembered and presented to subsequent calls in "firstblock".
+ * An upper bound for the number of blocks to be allocated is supplied to
+ * the first call in "total"; if no allocation group has that many free
+ * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
+ */
+int					/* error */
+xfs_bmapi(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*ip,		/* incore inode */
+	xfs_fileoff_t	bno,		/* starting file offs. mapped */
+	xfs_filblks_t	len,		/* length to map in file */
+	int		flags,		/* XFS_BMAPI_... */
+	xfs_fsblock_t	*firstblock,	/* first allocated block
+					   controls a.g. for allocs */
+	xfs_extlen_t	total,		/* total blocks needed */
+	xfs_bmbt_irec_t	*mval,		/* output: map values */
+	int		*nmap,		/* i/o: mval size/count */
+	xfs_bmap_free_t	*flist)		/* i/o: list extents to free */
+{
+	xfs_fsblock_t	abno;		/* allocated block number */
+	xfs_extlen_t	alen;		/* allocated extent length */
+	xfs_fileoff_t	aoff;		/* allocated file offset */
+	xfs_bmalloca_t	bma;		/* args for xfs_bmap_alloc */
+	char		contig;		/* allocation must be one extent */
+	xfs_btree_cur_t	*cur;		/* bmap btree cursor */
+	char		delay;		/* this request is for delayed alloc */
+	xfs_fileoff_t	end;		/* end of mapped file region */
+	int		eof;		/* we've hit the end of extent list */
+	xfs_bmbt_rec_t	*ep;		/* extent list entry pointer */
+	int		error;		/* error return */
+	char		exact;		/* don't do all of wasdelayed extent */
+	xfs_bmbt_irec_t	got;		/* current extent list record */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_extlen_t	indlen;		/* indirect blocks length */
+	char		inhole;		/* current location is hole in file */
+	xfs_extnum_t	lastx;		/* last useful extent number */
+	int		logflags;	/* flags for transaction logging */
+	xfs_extlen_t	minleft;	/* min blocks left after allocation */
+	xfs_extlen_t	minlen;		/* min allocation size */
+	xfs_mount_t	*mp;		/* xfs mount structure */
+	int		n;		/* current extent index */
+	int		nallocs;	/* number of extents alloc\'d */
+	xfs_extnum_t	nextents;	/* number of extents in file */
+	xfs_fileoff_t	obno;		/* old block number (offset) */
+	xfs_bmbt_irec_t	prev;		/* previous extent list record */
+	char		stateless;	/* ignore state flag set */
+	int		tmp_logflags;	/* temp flags holder */
+	char		trim;		/* output trimmed to match range */
+	char		userdata;	/* allocating non-metadata */
+	char		wasdelay;	/* old extent was delayed */
+	int		whichfork;	/* data or attr fork */
+	char		wr;		/* this is a write request */
+	char		rsvd;		/* OK to allocate reserved blocks */
+#ifdef DEBUG
+	xfs_fileoff_t	orig_bno;	/* original block number value */
+	int		orig_flags;	/* original flags arg value */
+	xfs_filblks_t	orig_len;	/* original value of len arg */
+	xfs_bmbt_irec_t	*orig_mval;	/* original value of mval */
+	int		orig_nmap;	/* original value of *nmap */
+
+	orig_bno = bno;
+	orig_len = len;
+	orig_flags = flags;
+	orig_mval = mval;
+	orig_nmap = *nmap;
+#endif
+	ASSERT(*nmap >= 1);
+	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
+	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+		XFS_ATTR_FORK : XFS_DATA_FORK;
+	mp = ip->i_mount;
+	if (unlikely(XFS_TEST_ERROR(
+	    (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+	     XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+	     XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
+	     mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+		XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
+		XFS_STATS_INC(xs_blk_mapw);
+	else
+		XFS_STATS_INC(xs_blk_mapr);
+	delay = (flags & XFS_BMAPI_DELAY) != 0;
+	trim = (flags & XFS_BMAPI_ENTIRE) == 0;
+	userdata = (flags & XFS_BMAPI_METADATA) == 0;
+	exact = (flags & XFS_BMAPI_EXACT) != 0;
+	rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
+	contig = (flags & XFS_BMAPI_CONTIG) != 0;
+	/*
+	 * stateless is used to combine extents which
+	 * differ only due to the state of the extents.
+	 * This technique is used from xfs_getbmap()
+	 * when the caller does not wish to see the
+	 * separation (which is the default).
+	 *
+	 * This technique is also used when writing a
+	 * buffer which has been partially written,
+	 * (usually by being flushed during a chunkread),
+	 * to ensure one write takes place. This also
+	 * prevents a change in the xfs inode extents at
+	 * this time, intentionally. This change occurs
+	 * on completion of the write operation, in
+	 * xfs_strat_comp(), where the xfs_bmapi() call
+	 * is transactioned, and the extents combined.
+	 */
+	stateless = (flags & XFS_BMAPI_IGSTATE) != 0;
+	if (stateless && wr)	/* if writing unwritten space, no */
+		wr = 0;		/* allocations are allowed */
+	ASSERT(wr || !delay);
+	logflags = 0;
+	nallocs = 0;
+	cur = NULL;
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+		ASSERT(wr && tp);
+		if ((error = xfs_bmap_local_to_extents(tp, ip,
+				firstblock, total, &logflags, whichfork)))
+			goto error0;
+	}
+	if (wr && *firstblock == NULLFSBLOCK) {
+		if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
+			minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
+		else
+			minleft = 1;
+	} else
+		minleft = 0;
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		goto error0;
+	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+		&prev);
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	n = 0;
+	end = bno + len;
+	obno = bno;
+	bma.ip = NULL;
+	while (bno < end && n < *nmap) {
+		/*
+		 * Reading past eof, act as though there's a hole
+		 * up to end.
+		 */
+		if (eof && !wr)
+			got.br_startoff = end;
+		inhole = eof || got.br_startoff > bno;
+		wasdelay = wr && !inhole && !delay &&
+			ISNULLSTARTBLOCK(got.br_startblock);
+		/*
+		 * First, deal with the hole before the allocated space
+		 * that we found, if any.
+		 */
+		if (wr && (inhole || wasdelay)) {
+			/*
+			 * For the wasdelay case, we could also just
+			 * allocate the stuff asked for in this bmap call
+			 * but that wouldn't be as good.
+			 */
+			if (wasdelay && !exact) {
+				alen = (xfs_extlen_t)got.br_blockcount;
+				aoff = got.br_startoff;
+				if (lastx != NULLEXTNUM && lastx) {
+					ep = &ifp->if_u1.if_extents[lastx - 1];
+					xfs_bmbt_get_all(ep, &prev);
+				}
+			} else if (wasdelay) {
+				alen = (xfs_extlen_t)
+					XFS_FILBLKS_MIN(len,
+						(got.br_startoff +
+						 got.br_blockcount) - bno);
+				aoff = bno;
+			} else {
+				alen = (xfs_extlen_t)
+					XFS_FILBLKS_MIN(len, MAXEXTLEN);
+				if (!eof)
+					alen = (xfs_extlen_t)
+						XFS_FILBLKS_MIN(alen,
+							got.br_startoff - bno);
+				aoff = bno;
+			}
+			minlen = contig ? alen : 1;
+			if (delay) {
+				indlen = (xfs_extlen_t)
+					xfs_bmap_worst_indlen(ip, alen);
+				ASSERT(indlen > 0);
+				/*
+				 * Make a transaction-less quota reservation for
+				 * delayed allocation blocks. This number gets
+				 * adjusted later.
+				 * We return EDQUOT if we haven't allocated
+				 * blks already inside this loop;
+				 */
+				if (XFS_TRANS_RESERVE_BLKQUOTA(
+						mp, NULL, ip, (long)alen)) {
+					if (n == 0) {
+						*nmap = 0;
+						ASSERT(cur == NULL);
+						return XFS_ERROR(EDQUOT);
+					}
+					break;
+				}
+
+				/*
+				 * Split changing sb for alen and indlen since
+				 * they could be coming from different places.
+				 */
+				if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
+					xfs_extlen_t	extsz;
+					xfs_extlen_t	ralen;
+					if (!(extsz = ip->i_d.di_extsize))
+						extsz = mp->m_sb.sb_rextsize;
+					ralen = roundup(alen, extsz);
+					ralen = ralen / mp->m_sb.sb_rextsize;
+					if (xfs_mod_incore_sb(mp,
+						XFS_SBS_FREXTENTS,
+						-(ralen), rsvd)) {
+						if (XFS_IS_QUOTA_ON(ip->i_mount))
+							XFS_TRANS_UNRESERVE_BLKQUOTA(
+						     		mp, NULL, ip,
+								(long)alen);
+						break;
+					}
+				} else {
+					if (xfs_mod_incore_sb(mp,
+							      XFS_SBS_FDBLOCKS,
+							      -(alen), rsvd)) {
+						if (XFS_IS_QUOTA_ON(ip->i_mount))
+							XFS_TRANS_UNRESERVE_BLKQUOTA(
+								mp, NULL, ip,
+								(long)alen);
+						break;
+					}
+				}
+
+				if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
+						-(indlen), rsvd)) {
+					XFS_TRANS_UNRESERVE_BLKQUOTA(
+						mp, NULL, ip, (long)alen);
+					break;
+				}
+				ip->i_delayed_blks += alen;
+				abno = NULLSTARTBLOCK(indlen);
+			} else {
+				/*
+				 * If first time, allocate and fill in
+				 * once-only bma fields.
+				 */
+				if (bma.ip == NULL) {
+					bma.tp = tp;
+					bma.ip = ip;
+					bma.prevp = &prev;
+					bma.gotp = &got;
+					bma.total = total;
+					bma.userdata = 0;
+				}
+				/* Indicate if this is the first user data
+				 * in the file, or just any user data.
+				 */
+				if (userdata) {
+					bma.userdata = (aoff == 0) ?
+						XFS_ALLOC_INITIAL_USER_DATA :
+						XFS_ALLOC_USERDATA;
+				}
+				/*
+				 * Fill in changeable bma fields.
+				 */
+				bma.eof = eof;
+				bma.firstblock = *firstblock;
+				bma.alen = alen;
+				bma.off = aoff;
+				bma.wasdel = wasdelay;
+				bma.minlen = minlen;
+				bma.low = flist->xbf_low;
+				bma.minleft = minleft;
+				/*
+				 * Only want to do the alignment at the
+				 * eof if it is userdata and allocation length
+				 * is larger than a stripe unit.
+				 */
+				if (mp->m_dalign && alen >= mp->m_dalign &&
+				    userdata && whichfork == XFS_DATA_FORK) {
+					if ((error = xfs_bmap_isaeof(ip, aoff,
+							whichfork, &bma.aeof)))
+						goto error0;
+				} else
+					bma.aeof = 0;
+				/*
+				 * Call allocator.
+				 */
+				if ((error = xfs_bmap_alloc(&bma)))
+					goto error0;
+				/*
+				 * Copy out result fields.
+				 */
+				abno = bma.rval;
+				if ((flist->xbf_low = bma.low))
+					minleft = 0;
+				alen = bma.alen;
+				aoff = bma.off;
+				ASSERT(*firstblock == NULLFSBLOCK ||
+				       XFS_FSB_TO_AGNO(mp, *firstblock) ==
+				       XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
+				       (flist->xbf_low &&
+					XFS_FSB_TO_AGNO(mp, *firstblock) <
+					XFS_FSB_TO_AGNO(mp, bma.firstblock)));
+				*firstblock = bma.firstblock;
+				if (cur)
+					cur->bc_private.b.firstblock =
+						*firstblock;
+				if (abno == NULLFSBLOCK)
+					break;
+				if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
+					cur = xfs_btree_init_cursor(mp,
+						tp, NULL, 0, XFS_BTNUM_BMAP,
+						ip, whichfork);
+					cur->bc_private.b.firstblock =
+						*firstblock;
+					cur->bc_private.b.flist = flist;
+				}
+				/*
+				 * Bump the number of extents we've allocated
+				 * in this call.
+				 */
+				nallocs++;
+			}
+			if (cur)
+				cur->bc_private.b.flags =
+					wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
+			got.br_startoff = aoff;
+			got.br_startblock = abno;
+			got.br_blockcount = alen;
+			got.br_state = XFS_EXT_NORM;	/* assume normal */
+			/*
+			 * Determine state of extent, and the filesystem.
+			 * A wasdelay extent has been initialized, so
+			 * shouldn't be flagged as unwritten.
+			 */
+			if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
+				if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
+					got.br_state = XFS_EXT_UNWRITTEN;
+			}
+			error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
+				firstblock, flist, &tmp_logflags, whichfork,
+				rsvd);
+			logflags |= tmp_logflags;
+			if (error)
+				goto error0;
+			lastx = ifp->if_lastex;
+			ep = &ifp->if_u1.if_extents[lastx];
+			nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+			xfs_bmbt_get_all(ep, &got);
+			ASSERT(got.br_startoff <= aoff);
+			ASSERT(got.br_startoff + got.br_blockcount >=
+				aoff + alen);
+#ifdef DEBUG
+			if (delay) {
+				ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
+				ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
+			}
+			ASSERT(got.br_state == XFS_EXT_NORM ||
+			       got.br_state == XFS_EXT_UNWRITTEN);
+#endif
+			/*
+			 * Fall down into the found allocated space case.
+			 */
+		} else if (inhole) {
+			/*
+			 * Reading in a hole.
+			 */
+			mval->br_startoff = bno;
+			mval->br_startblock = HOLESTARTBLOCK;
+			mval->br_blockcount =
+				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
+			mval->br_state = XFS_EXT_NORM;
+			bno += mval->br_blockcount;
+			len -= mval->br_blockcount;
+			mval++;
+			n++;
+			continue;
+		}
+		/*
+		 * Then deal with the allocated space we found.
+		 */
+		ASSERT(ep != NULL);
+		if (trim && (got.br_startoff + got.br_blockcount > obno)) {
+			if (obno > bno)
+				bno = obno;
+			ASSERT((bno >= obno) || (n == 0));
+			ASSERT(bno < end);
+			mval->br_startoff = bno;
+			if (ISNULLSTARTBLOCK(got.br_startblock)) {
+				ASSERT(!wr || delay);
+				mval->br_startblock = DELAYSTARTBLOCK;
+			} else
+				mval->br_startblock =
+					got.br_startblock +
+					(bno - got.br_startoff);
+			/*
+			 * Return the minimum of what we got and what we
+			 * asked for for the length.  We can use the len
+			 * variable here because it is modified below
+			 * and we could have been there before coming
+			 * here if the first part of the allocation
+			 * didn't overlap what was asked for.
+			 */
+			mval->br_blockcount =
+				XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
+					(bno - got.br_startoff));
+			mval->br_state = got.br_state;
+			ASSERT(mval->br_blockcount <= len);
+		} else {
+			*mval = got;
+			if (ISNULLSTARTBLOCK(mval->br_startblock)) {
+				ASSERT(!wr || delay);
+				mval->br_startblock = DELAYSTARTBLOCK;
+			}
+		}
+
+		/*
+		 * Check if writing previously allocated but
+		 * unwritten extents.
+		 */
+		if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
+		    ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
+			/*
+			 * Modify (by adding) the state flag, if writing.
+			 */
+			ASSERT(mval->br_blockcount <= len);
+			if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
+				cur = xfs_btree_init_cursor(mp,
+					tp, NULL, 0, XFS_BTNUM_BMAP,
+					ip, whichfork);
+				cur->bc_private.b.firstblock =
+					*firstblock;
+				cur->bc_private.b.flist = flist;
+			}
+			mval->br_state = XFS_EXT_NORM;
+			error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
+				firstblock, flist, &tmp_logflags, whichfork,
+				rsvd);
+			logflags |= tmp_logflags;
+			if (error)
+				goto error0;
+			lastx = ifp->if_lastex;
+			ep = &ifp->if_u1.if_extents[lastx];
+			nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+			xfs_bmbt_get_all(ep, &got);
+			/*
+			 * We may have combined previously unwritten
+			 * space with written space, so generate
+			 * another request.
+			 */
+			if (mval->br_blockcount < len)
+				continue;
+		}
+
+		ASSERT(!trim ||
+		       ((mval->br_startoff + mval->br_blockcount) <= end));
+		ASSERT(!trim || (mval->br_blockcount <= len) ||
+		       (mval->br_startoff < obno));
+		bno = mval->br_startoff + mval->br_blockcount;
+		len = end - bno;
+		if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
+			ASSERT(mval->br_startblock == mval[-1].br_startblock);
+			ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
+			ASSERT(mval->br_state == mval[-1].br_state);
+			mval[-1].br_blockcount = mval->br_blockcount;
+			mval[-1].br_state = mval->br_state;
+		} else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
+			   mval[-1].br_startblock != DELAYSTARTBLOCK &&
+			   mval[-1].br_startblock != HOLESTARTBLOCK &&
+			   mval->br_startblock ==
+			   mval[-1].br_startblock + mval[-1].br_blockcount &&
+			   (stateless || mval[-1].br_state == mval->br_state)) {
+			ASSERT(mval->br_startoff ==
+			       mval[-1].br_startoff + mval[-1].br_blockcount);
+			mval[-1].br_blockcount += mval->br_blockcount;
+		} else if (n > 0 &&
+			   mval->br_startblock == DELAYSTARTBLOCK &&
+			   mval[-1].br_startblock == DELAYSTARTBLOCK &&
+			   mval->br_startoff ==
+			   mval[-1].br_startoff + mval[-1].br_blockcount) {
+			mval[-1].br_blockcount += mval->br_blockcount;
+			mval[-1].br_state = mval->br_state;
+		} else if (!((n == 0) &&
+			     ((mval->br_startoff + mval->br_blockcount) <=
+			      obno))) {
+			mval++;
+			n++;
+		}
+		/*
+		 * If we're done, stop now.  Stop when we've allocated
+		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
+		 * the transaction may get too big.
+		 */
+		if (bno >= end || n >= *nmap || nallocs >= *nmap)
+			break;
+		/*
+		 * Else go on to the next record.
+		 */
+		ep++;
+		lastx++;
+		if (lastx >= nextents) {
+			eof = 1;
+			prev = got;
+		} else
+			xfs_bmbt_get_all(ep, &got);
+	}
+	ifp->if_lastex = lastx;
+	*nmap = n;
+	/*
+	 * Transform from btree to extents, give it cur.
+	 */
+	if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+	    XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+		ASSERT(wr && cur);
+		error = xfs_bmap_btree_to_extents(tp, ip, cur,
+			&tmp_logflags, whichfork);
+		logflags |= tmp_logflags;
+		if (error)
+			goto error0;
+	}
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
+	       XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
+	error = 0;
+
+error0:
+	/*
+	 * Log everything.  Do this after conversion, there's no point in
+	 * logging the extent list if we've converted to btree format.
+	 */
+	if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+		logflags &= ~XFS_ILOG_FEXT(whichfork);
+	else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
+		 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+		logflags &= ~XFS_ILOG_FBROOT(whichfork);
+	/*
+	 * Log whatever the flags say, even if error.  Otherwise we might miss
+	 * detecting a case where the data is changed, there's an error,
+	 * and it's not logged so we don't shutdown when we should.
+	 */
+	if (logflags) {
+		ASSERT(tp && wr);
+		xfs_trans_log_inode(tp, ip, logflags);
+	}
+	if (cur) {
+		if (!error) {
+			ASSERT(*firstblock == NULLFSBLOCK ||
+			       XFS_FSB_TO_AGNO(mp, *firstblock) ==
+			       XFS_FSB_TO_AGNO(mp,
+				       cur->bc_private.b.firstblock) ||
+			       (flist->xbf_low &&
+				XFS_FSB_TO_AGNO(mp, *firstblock) <
+				XFS_FSB_TO_AGNO(mp,
+					cur->bc_private.b.firstblock)));
+			*firstblock = cur->bc_private.b.firstblock;
+		}
+		xfs_btree_del_cursor(cur,
+			error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	}
+	if (!error)
+		xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
+			orig_nmap, *nmap);
+	return error;
+}
+
+/*
+ * Map file blocks to filesystem blocks, simple version.
+ * One block (extent) only, read-only.
+ * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
+ * For the other flag values, the effect is as if XFS_BMAPI_METADATA
+ * was set and all the others were clear.
+ */
+int						/* error */
+xfs_bmapi_single(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*ip,		/* incore inode */
+	int		whichfork,	/* data or attr fork */
+	xfs_fsblock_t	*fsb,		/* output: mapped block */
+	xfs_fileoff_t	bno)		/* starting file offs. mapped */
+{
+	int		eof;		/* we've hit the end of extent list */
+	int		error;		/* error return */
+	xfs_bmbt_irec_t	got;		/* current extent list record */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_extnum_t	lastx;		/* last useful extent number */
+	xfs_bmbt_irec_t	prev;		/* previous extent list record */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (unlikely(
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
+	       XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
+				ip->i_mount);
+	       return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return XFS_ERROR(EIO);
+	XFS_STATS_INC(xs_blk_mapr);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		return error;
+	(void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+		&prev);
+	/*
+	 * Reading past eof, act as though there's a hole
+	 * up to end.
+	 */
+	if (eof || got.br_startoff > bno) {
+		*fsb = NULLFSBLOCK;
+		return 0;
+	}
+	ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
+	ASSERT(bno < got.br_startoff + got.br_blockcount);
+	*fsb = got.br_startblock + (bno - got.br_startoff);
+	ifp->if_lastex = lastx;
+	return 0;
+}
+
+/*
+ * Unmap (remove) blocks from a file.
+ * If nexts is nonzero then the number of extents to remove is limited to
+ * that value.  If not all extents in the block range can be removed then
+ * *done is set.
+ */
+int						/* error */
+xfs_bunmapi(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		bno,		/* starting offset to unmap */
+	xfs_filblks_t		len,		/* length to unmap in file */
+	int			flags,		/* misc flags */
+	xfs_extnum_t		nexts,		/* number of extents max */
+	xfs_fsblock_t		*firstblock,	/* first allocated block
+						   controls a.g. for allocs */
+	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
+	int			*done)		/* set if not done yet */
+{
+	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
+	xfs_bmbt_irec_t		del;		/* extent being deleted */
+	int			eof;		/* is deleting at eof */
+	xfs_bmbt_rec_t		*ep;		/* extent list entry pointer */
+	int			error;		/* error return value */
+	xfs_extnum_t		extno;		/* extent number in list */
+	xfs_bmbt_irec_t		got;		/* current extent list entry */
+	xfs_ifork_t		*ifp;		/* inode fork pointer */
+	int			isrt;		/* freeing in rt area */
+	xfs_extnum_t		lastx;		/* last extent index used */
+	int			logflags;	/* transaction logging flags */
+	xfs_extlen_t		mod;		/* rt extent offset */
+	xfs_mount_t		*mp;		/* mount structure */
+	xfs_extnum_t		nextents;	/* size of extent list */
+	xfs_bmbt_irec_t		prev;		/* previous extent list entry */
+	xfs_fileoff_t		start;		/* first file offset deleted */
+	int			tmp_logflags;	/* partial logging flags */
+	int			wasdel;		/* was a delayed alloc extent */
+	int			whichfork;	/* data or attribute fork */
+	int			rsvd;		/* OK to allocate reserved blocks */
+	xfs_fsblock_t		sum;
+
+	xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
+	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+		XFS_ATTR_FORK : XFS_DATA_FORK;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (unlikely(
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
+		XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
+				 ip->i_mount);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	mp = ip->i_mount;
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+	rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
+	ASSERT(len > 0);
+	ASSERT(nexts >= 0);
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(tp, ip, whichfork)))
+		return error;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	if (nextents == 0) {
+		*done = 1;
+		return 0;
+	}
+	XFS_STATS_INC(xs_blk_unmap);
+	isrt = (whichfork == XFS_DATA_FORK) &&
+	       (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
+	start = bno;
+	bno = start + len - 1;
+	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+		&prev);
+	/*
+	 * Check to see if the given block number is past the end of the
+	 * file, back up to the last block if so...
+	 */
+	if (eof) {
+		ep = &ifp->if_u1.if_extents[--lastx];
+		xfs_bmbt_get_all(ep, &got);
+		bno = got.br_startoff + got.br_blockcount - 1;
+	}
+	logflags = 0;
+	if (ifp->if_flags & XFS_IFBROOT) {
+		ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+		cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
+			whichfork);
+		cur->bc_private.b.firstblock = *firstblock;
+		cur->bc_private.b.flist = flist;
+		cur->bc_private.b.flags = 0;
+	} else
+		cur = NULL;
+	extno = 0;
+	while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
+	       (nexts == 0 || extno < nexts)) {
+		/*
+		 * Is the found extent after a hole in which bno lives?
+		 * Just back up to the previous extent, if so.
+		 */
+		if (got.br_startoff > bno) {
+			if (--lastx < 0)
+				break;
+			ep--;
+			xfs_bmbt_get_all(ep, &got);
+		}
+		/*
+		 * Is the last block of this extent before the range
+		 * we're supposed to delete?  If so, we're done.
+		 */
+		bno = XFS_FILEOFF_MIN(bno,
+			got.br_startoff + got.br_blockcount - 1);
+		if (bno < start)
+			break;
+		/*
+		 * Then deal with the (possibly delayed) allocated space
+		 * we found.
+		 */
+		ASSERT(ep != NULL);
+		del = got;
+		wasdel = ISNULLSTARTBLOCK(del.br_startblock);
+		if (got.br_startoff < start) {
+			del.br_startoff = start;
+			del.br_blockcount -= start - got.br_startoff;
+			if (!wasdel)
+				del.br_startblock += start - got.br_startoff;
+		}
+		if (del.br_startoff + del.br_blockcount > bno + 1)
+			del.br_blockcount = bno + 1 - del.br_startoff;
+		sum = del.br_startblock + del.br_blockcount;
+		if (isrt &&
+		    (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
+			/*
+			 * Realtime extent not lined up at the end.
+			 * The extent could have been split into written
+			 * and unwritten pieces, or we could just be
+			 * unmapping part of it.  But we can't really
+			 * get rid of part of a realtime extent.
+			 */
+			if (del.br_state == XFS_EXT_UNWRITTEN ||
+			    !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
+				/*
+				 * This piece is unwritten, or we're not
+				 * using unwritten extents.  Skip over it.
+				 */
+				ASSERT(bno >= mod);
+				bno -= mod > del.br_blockcount ?
+					del.br_blockcount : mod;
+				if (bno < got.br_startoff) {
+					if (--lastx >= 0)
+						xfs_bmbt_get_all(--ep, &got);
+				}
+				continue;
+			}
+			/*
+			 * It's written, turn it unwritten.
+			 * This is better than zeroing it.
+			 */
+			ASSERT(del.br_state == XFS_EXT_NORM);
+			ASSERT(xfs_trans_get_block_res(tp) > 0);
+			/*
+			 * If this spans a realtime extent boundary,
+			 * chop it back to the start of the one we end at.
+			 */
+			if (del.br_blockcount > mod) {
+				del.br_startoff += del.br_blockcount - mod;
+				del.br_startblock += del.br_blockcount - mod;
+				del.br_blockcount = mod;
+			}
+			del.br_state = XFS_EXT_UNWRITTEN;
+			error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
+				firstblock, flist, &logflags, XFS_DATA_FORK, 0);
+			if (error)
+				goto error0;
+			goto nodelete;
+		}
+		if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
+			/*
+			 * Realtime extent is lined up at the end but not
+			 * at the front.  We'll get rid of full extents if
+			 * we can.
+			 */
+			mod = mp->m_sb.sb_rextsize - mod;
+			if (del.br_blockcount > mod) {
+				del.br_blockcount -= mod;
+				del.br_startoff += mod;
+				del.br_startblock += mod;
+			} else if ((del.br_startoff == start &&
+				    (del.br_state == XFS_EXT_UNWRITTEN ||
+				     xfs_trans_get_block_res(tp) == 0)) ||
+				   !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
+				/*
+				 * Can't make it unwritten.  There isn't
+				 * a full extent here so just skip it.
+				 */
+				ASSERT(bno >= del.br_blockcount);
+				bno -= del.br_blockcount;
+				if (bno < got.br_startoff) {
+					if (--lastx >= 0)
+						xfs_bmbt_get_all(--ep, &got);
+				}
+				continue;
+			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
+				/*
+				 * This one is already unwritten.
+				 * It must have a written left neighbor.
+				 * Unwrite the killed part of that one and
+				 * try again.
+				 */
+				ASSERT(lastx > 0);
+				xfs_bmbt_get_all(ep - 1, &prev);
+				ASSERT(prev.br_state == XFS_EXT_NORM);
+				ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
+				ASSERT(del.br_startblock ==
+				       prev.br_startblock + prev.br_blockcount);
+				if (prev.br_startoff < start) {
+					mod = start - prev.br_startoff;
+					prev.br_blockcount -= mod;
+					prev.br_startblock += mod;
+					prev.br_startoff = start;
+				}
+				prev.br_state = XFS_EXT_UNWRITTEN;
+				error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
+					&prev, firstblock, flist, &logflags,
+					XFS_DATA_FORK, 0);
+				if (error)
+					goto error0;
+				goto nodelete;
+			} else {
+				ASSERT(del.br_state == XFS_EXT_NORM);
+				del.br_state = XFS_EXT_UNWRITTEN;
+				error = xfs_bmap_add_extent(ip, lastx, &cur,
+					&del, firstblock, flist, &logflags,
+					XFS_DATA_FORK, 0);
+				if (error)
+					goto error0;
+				goto nodelete;
+			}
+		}
+		if (wasdel) {
+			ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
+			xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
+				(int)del.br_blockcount, rsvd);
+			/* Unreserve our quota space */
+			XFS_TRANS_RESERVE_QUOTA_NBLKS(
+				mp, NULL, ip, -((long)del.br_blockcount), 0,
+				isrt ?	XFS_QMOPT_RES_RTBLKS :
+					XFS_QMOPT_RES_REGBLKS);
+			ip->i_delayed_blks -= del.br_blockcount;
+			if (cur)
+				cur->bc_private.b.flags |=
+					XFS_BTCUR_BPRV_WASDEL;
+		} else if (cur)
+			cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
+		/*
+		 * If it's the case where the directory code is running
+		 * with no block reservation, and the deleted block is in
+		 * the middle of its extent, and the resulting insert
+		 * of an extent would cause transformation to btree format,
+		 * then reject it.  The calling code will then swap
+		 * blocks around instead.
+		 * We have to do this now, rather than waiting for the
+		 * conversion to btree format, since the transaction
+		 * will be dirty.
+		 */
+		if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
+		    XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+		    XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
+		    del.br_startoff > got.br_startoff &&
+		    del.br_startoff + del.br_blockcount <
+		    got.br_startoff + got.br_blockcount) {
+			error = XFS_ERROR(ENOSPC);
+			goto error0;
+		}
+		error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
+			&tmp_logflags, whichfork, rsvd);
+		logflags |= tmp_logflags;
+		if (error)
+			goto error0;
+		bno = del.br_startoff - 1;
+nodelete:
+		lastx = ifp->if_lastex;
+		/*
+		 * If not done go on to the next (previous) record.
+		 * Reset ep in case the extents array was re-alloced.
+		 */
+		ep = &ifp->if_u1.if_extents[lastx];
+		if (bno != (xfs_fileoff_t)-1 && bno >= start) {
+			if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
+			    xfs_bmbt_get_startoff(ep) > bno) {
+				lastx--;
+				ep--;
+			}
+			if (lastx >= 0)
+				xfs_bmbt_get_all(ep, &got);
+			extno++;
+		}
+	}
+	ifp->if_lastex = lastx;
+	*done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	/*
+	 * Convert to a btree if necessary.
+	 */
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+	    XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
+		ASSERT(cur == NULL);
+		error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
+			&cur, 0, &tmp_logflags, whichfork);
+		logflags |= tmp_logflags;
+		if (error)
+			goto error0;
+	}
+	/*
+	 * transform from btree to extents, give it cur
+	 */
+	else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+		 XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+		ASSERT(cur != NULL);
+		error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
+			whichfork);
+		logflags |= tmp_logflags;
+		if (error)
+			goto error0;
+	}
+	/*
+	 * transform from extents to local?
+	 */
+	ASSERT(ifp->if_ext_max ==
+	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+	error = 0;
+error0:
+	/*
+	 * Log everything.  Do this after conversion, there's no point in
+	 * logging the extent list if we've converted to btree format.
+	 */
+	if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
+	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+		logflags &= ~XFS_ILOG_FEXT(whichfork);
+	else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
+		 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+		logflags &= ~XFS_ILOG_FBROOT(whichfork);
+	/*
+	 * Log inode even in the error case, if the transaction
+	 * is dirty we'll need to shut down the filesystem.
+	 */
+	if (logflags)
+		xfs_trans_log_inode(tp, ip, logflags);
+	if (cur) {
+		if (!error) {
+			*firstblock = cur->bc_private.b.firstblock;
+			cur->bc_private.b.allocated = 0;
+		}
+		xfs_btree_del_cursor(cur,
+			error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	}
+	return error;
+}
+
+/*
+ * Fcntl interface to xfs_bmapi.
+ */
+int						/* error code */
+xfs_getbmap(
+	bhv_desc_t		*bdp,		/* XFS behavior descriptor*/
+	struct getbmap		*bmv,		/* user bmap structure */
+	void			__user *ap,	/* pointer to user's array */
+	int			interface)	/* interface flags */
+{
+	__int64_t		bmvend;		/* last block requested */
+	int			error;		/* return value */
+	__int64_t		fixlen;		/* length for -1 case */
+	int			i;		/* extent number */
+	xfs_inode_t		*ip;		/* xfs incore inode pointer */
+	vnode_t			*vp;		/* corresponding vnode */
+	int			lock;		/* lock state */
+	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
+	xfs_mount_t		*mp;		/* file system mount point */
+	int			nex;		/* # of user extents can do */
+	int			nexleft;	/* # of user extents left */
+	int			subnex;		/* # of bmapi's can do */
+	int			nmap;		/* number of map entries */
+	struct getbmap		out;		/* output structure */
+	int			whichfork;	/* data or attr fork */
+	int			prealloced;	/* this is a file with
+						 * preallocated data space */
+	int			sh_unwritten;	/* true, if unwritten */
+						/* extents listed separately */
+	int			bmapi_flags;	/* flags for xfs_bmapi */
+	__int32_t		oflags;		/* getbmapx bmv_oflags field */
+
+	vp = BHV_TO_VNODE(bdp);
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
+	sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
+
+	/*	If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
+	 *	generate a DMAPI read event.  Otherwise, if the DM_EVENT_READ
+	 *	bit is set for the file, generate a read event in order
+	 *	that the DMAPI application may do its thing before we return
+	 *	the extents.  Usually this means restoring user file data to
+	 *	regions of the file that look like holes.
+	 *
+	 *	The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
+	 *	BMV_IF_NO_DMAPI_READ so that read events are generated.
+	 *	If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
+	 *	could misinterpret holes in a DMAPI file as true holes,
+	 *	when in fact they may represent offline user data.
+	 */
+	if (   (interface & BMV_IF_NO_DMAPI_READ) == 0
+	    && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
+	    && whichfork == XFS_DATA_FORK) {
+
+		error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
+		if (error)
+			return XFS_ERROR(error);
+	}
+
+	if (whichfork == XFS_ATTR_FORK) {
+		if (XFS_IFORK_Q(ip)) {
+			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
+			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
+			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
+				return XFS_ERROR(EINVAL);
+		} else if (unlikely(
+			   ip->i_d.di_aformat != 0 &&
+			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
+			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
+					 ip->i_mount);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+	} else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
+		   ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
+		   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+		return XFS_ERROR(EINVAL);
+	if (whichfork == XFS_DATA_FORK) {
+		if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
+			prealloced = 1;
+			fixlen = XFS_MAXIOFFSET(mp);
+		} else {
+			prealloced = 0;
+			fixlen = ip->i_d.di_size;
+		}
+	} else {
+		prealloced = 0;
+		fixlen = 1LL << 32;
+	}
+
+	if (bmv->bmv_length == -1) {
+		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
+		bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
+					(__int64_t)0);
+	} else if (bmv->bmv_length < 0)
+		return XFS_ERROR(EINVAL);
+	if (bmv->bmv_length == 0) {
+		bmv->bmv_entries = 0;
+		return 0;
+	}
+	nex = bmv->bmv_count - 1;
+	if (nex <= 0)
+		return XFS_ERROR(EINVAL);
+	bmvend = bmv->bmv_offset + bmv->bmv_length;
+
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+	if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) {
+		/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
+		VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
+	}
+
+	ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
+
+	lock = xfs_ilock_map_shared(ip);
+
+	/*
+	 * Don't let nex be bigger than the number of extents
+	 * we can have assuming alternating holes and real extents.
+	 */
+	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
+		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
+
+	bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
+			((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
+
+	/*
+	 * Allocate enough space to handle "subnex" maps at a time.
+	 */
+	subnex = 16;
+	map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
+
+	bmv->bmv_entries = 0;
+
+	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) {
+		error = 0;
+		goto unlock_and_return;
+	}
+
+	nexleft = nex;
+
+	do {
+		nmap = (nexleft > subnex) ? subnex : nexleft;
+		error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
+				  XFS_BB_TO_FSB(mp, bmv->bmv_length),
+				  bmapi_flags, NULL, 0, map, &nmap, NULL);
+		if (error)
+			goto unlock_and_return;
+		ASSERT(nmap <= subnex);
+
+		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
+			nexleft--;
+			oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
+					BMV_OF_PREALLOC : 0;
+			out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
+			out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
+			if (prealloced &&
+			    map[i].br_startblock == HOLESTARTBLOCK &&
+			    out.bmv_offset + out.bmv_length == bmvend) {
+				/*
+				 * came to hole at end of file
+				 */
+				goto unlock_and_return;
+			} else {
+				out.bmv_block =
+				    (map[i].br_startblock == HOLESTARTBLOCK) ?
+					-1 :
+					XFS_FSB_TO_DB(ip, map[i].br_startblock);
+
+				/* return either getbmap/getbmapx structure. */
+				if (interface & BMV_IF_EXTENDED) {
+					struct	getbmapx	outx;
+
+					GETBMAP_CONVERT(out,outx);
+					outx.bmv_oflags = oflags;
+					outx.bmv_unused1 = outx.bmv_unused2 = 0;
+					if (copy_to_user(ap, &outx,
+							sizeof(outx))) {
+						error = XFS_ERROR(EFAULT);
+						goto unlock_and_return;
+					}
+				} else {
+					if (copy_to_user(ap, &out,
+							sizeof(out))) {
+						error = XFS_ERROR(EFAULT);
+						goto unlock_and_return;
+					}
+				}
+				bmv->bmv_offset =
+					out.bmv_offset + out.bmv_length;
+				bmv->bmv_length = MAX((__int64_t)0,
+					(__int64_t)(bmvend - bmv->bmv_offset));
+				bmv->bmv_entries++;
+				ap = (interface & BMV_IF_EXTENDED) ?
+						(void __user *)
+					((struct getbmapx __user *)ap + 1) :
+						(void __user *)
+					((struct getbmap __user *)ap + 1);
+			}
+		}
+	} while (nmap && nexleft && bmv->bmv_length);
+
+unlock_and_return:
+	xfs_iunlock_map_shared(ip, lock);
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+	kmem_free(map, subnex * sizeof(*map));
+
+	return error;
+}
+
+/*
+ * Check the last inode extent to determine whether this allocation will result
+ * in blocks being allocated at the end of the file. When we allocate new data
+ * blocks at the end of the file which do not start at the previous data block,
+ * we will try to align the new blocks at stripe unit boundaries.
+ */
+int					/* error */
+xfs_bmap_isaeof(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_fileoff_t   off,		/* file offset in fsblocks */
+	int             whichfork,	/* data or attribute fork */
+	char		*aeof)		/* return value */
+{
+	int		error;		/* error return value */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_bmbt_rec_t	*lastrec;	/* extent list entry pointer */
+	xfs_extnum_t	nextents;	/* size of extent list */
+	xfs_bmbt_irec_t	s;		/* expanded extent list entry */
+
+	ASSERT(whichfork == XFS_DATA_FORK);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(NULL, ip, whichfork)))
+		return error;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	if (nextents == 0) {
+		*aeof = 1;
+		return 0;
+	}
+	/*
+	 * Go to the last extent
+	 */
+	lastrec = &ifp->if_u1.if_extents[nextents - 1];
+	xfs_bmbt_get_all(lastrec, &s);
+	/*
+	 * Check we are allocating in the last extent (for delayed allocations)
+	 * or past the last extent for non-delayed allocations.
+	 */
+	*aeof = (off >= s.br_startoff &&
+		 off < s.br_startoff + s.br_blockcount &&
+		 ISNULLSTARTBLOCK(s.br_startblock)) ||
+		off >= s.br_startoff + s.br_blockcount;
+	return 0;
+}
+
+/*
+ * Check if the endoff is outside the last extent. If so the caller will grow
+ * the allocation to a stripe unit boundary.
+ */
+int					/* error */
+xfs_bmap_eof(
+	xfs_inode_t	*ip,		/* incore inode pointer */
+	xfs_fileoff_t	endoff,		/* file offset in fsblocks */
+	int		whichfork,	/* data or attribute fork */
+	int		*eof)		/* result value */
+{
+	xfs_fsblock_t	blockcount;	/* extent block count */
+	int		error;		/* error return value */
+	xfs_ifork_t	*ifp;		/* inode fork pointer */
+	xfs_bmbt_rec_t	*lastrec;	/* extent list entry pointer */
+	xfs_extnum_t	nextents;	/* size of extent list */
+	xfs_fileoff_t	startoff;	/* extent starting file offset */
+
+	ASSERT(whichfork == XFS_DATA_FORK);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+	    (error = xfs_iread_extents(NULL, ip, whichfork)))
+		return error;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	if (nextents == 0) {
+		*eof = 1;
+		return 0;
+	}
+	/*
+	 * Go to the last extent
+	 */
+	lastrec = &ifp->if_u1.if_extents[nextents - 1];
+	startoff = xfs_bmbt_get_startoff(lastrec);
+	blockcount = xfs_bmbt_get_blockcount(lastrec);
+	*eof = endoff >= startoff + blockcount;
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the extents list for the inode ip is in the right order.
+ */
+STATIC void
+xfs_bmap_check_extents(
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	int			whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_rec_t		*base;		/* base of extents list */
+	xfs_bmbt_rec_t		*ep;		/* current extent entry */
+	xfs_ifork_t		*ifp;		/* inode fork pointer */
+	xfs_extnum_t		nextents;	/* number of extents in list */
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+	base = ifp->if_u1.if_extents;
+	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	for (ep = base; ep < &base[nextents - 1]; ep++) {
+		xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
+			(void *)(ep + 1));
+	}
+}
+
+STATIC
+xfs_buf_t *
+xfs_bmap_get_bp(
+	xfs_btree_cur_t         *cur,
+	xfs_fsblock_t		bno)
+{
+	int i;
+	xfs_buf_t *bp;
+
+	if (!cur)
+		return(NULL);
+
+	bp = NULL;
+	for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
+		bp = cur->bc_bufs[i];
+		if (!bp) break;
+		if (XFS_BUF_ADDR(bp) == bno)
+			break;	/* Found it */
+	}
+	if (i == XFS_BTREE_MAXLEVELS)
+		bp = NULL;
+
+	if (!bp) { /* Chase down all the log items to see if the bp is there */
+		xfs_log_item_chunk_t    *licp;
+		xfs_trans_t		*tp;
+
+		tp = cur->bc_tp;
+		licp = &tp->t_items;
+		while (!bp && licp != NULL) {
+			if (XFS_LIC_ARE_ALL_FREE(licp)) {
+				licp = licp->lic_next;
+				continue;
+			}
+			for (i = 0; i < licp->lic_unused; i++) {
+				xfs_log_item_desc_t	*lidp;
+				xfs_log_item_t		*lip;
+				xfs_buf_log_item_t	*bip;
+				xfs_buf_t		*lbp;
+
+				if (XFS_LIC_ISFREE(licp, i)) {
+					continue;
+				}
+
+				lidp = XFS_LIC_SLOT(licp, i);
+				lip = lidp->lid_item;
+				if (lip->li_type != XFS_LI_BUF)
+					continue;
+
+				bip = (xfs_buf_log_item_t *)lip;
+				lbp = bip->bli_buf;
+
+				if (XFS_BUF_ADDR(lbp) == bno) {
+					bp = lbp;
+					break; /* Found it */
+				}
+			}
+			licp = licp->lic_next;
+		}
+	}
+	return(bp);
+}
+
+void
+xfs_check_block(
+	xfs_bmbt_block_t        *block,
+	xfs_mount_t		*mp,
+	int			root,
+	short			sz)
+{
+	int			i, j, dmxr;
+	xfs_bmbt_ptr_t		*pp, *thispa;	/* pointer to block address */
+	xfs_bmbt_key_t		*prevp, *keyp;
+
+	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
+
+	prevp = NULL;
+	for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) {
+		dmxr = mp->m_bmap_dmxr[0];
+
+		if (root) {
+			keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
+		} else {
+			keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize,
+				xfs_bmbt, block, i, dmxr);
+		}
+
+		if (prevp) {
+			xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
+		}
+		prevp = keyp;
+
+		/*
+		 * Compare the block numbers to see if there are dups.
+		 */
+
+		if (root) {
+			pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
+		} else {
+			pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
+				xfs_bmbt, block, i, dmxr);
+		}
+		for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) {
+			if (root) {
+				thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
+			} else {
+				thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
+					xfs_bmbt, block, j, dmxr);
+			}
+			if (INT_GET(*thispa, ARCH_CONVERT) ==
+			    INT_GET(*pp, ARCH_CONVERT)) {
+				cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
+					__FUNCTION__, j, i,
+					INT_GET(*thispa, ARCH_CONVERT));
+				panic("%s: ptrs are equal in node\n",
+					__FUNCTION__);
+			}
+		}
+	}
+}
+
+/*
+ * Check that the extents for the inode ip are in the right order in all
+ * btree leaves.
+ */
+
+STATIC void
+xfs_bmap_check_leaf_extents(
+	xfs_btree_cur_t		*cur,	/* btree cursor or null */
+	xfs_inode_t		*ip,		/* incore inode pointer */
+	int			whichfork)	/* data or attr fork */
+{
+	xfs_bmbt_block_t	*block;	/* current btree block */
+	xfs_fsblock_t		bno;	/* block # of "block" */
+	xfs_buf_t		*bp;	/* buffer for "block" */
+	int			error;	/* error return value */
+	xfs_extnum_t		i=0;	/* index into the extents list */
+	xfs_ifork_t		*ifp;	/* fork structure */
+	int			level;	/* btree level, for checking */
+	xfs_mount_t		*mp;	/* file system mount structure */
+	xfs_bmbt_ptr_t		*pp;	/* pointer to block address */
+	xfs_bmbt_rec_t		*ep, *lastp;	/* extent pointers in block entry */
+	int			bp_release = 0;
+
+	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
+		return;
+	}
+
+	bno = NULLFSBLOCK;
+	mp = ip->i_mount;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	block = ifp->if_broot;
+	/*
+	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+	 */
+	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
+	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
+	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
+	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
+	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
+	ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
+	bno = INT_GET(*pp, ARCH_CONVERT);
+	/*
+	 * Go down the tree until leaf level is reached, following the first
+	 * pointer (leftmost) at each level.
+	 */
+	while (level-- > 0) {
+		/* See if buf is in cur first */
+		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+		if (bp) {
+			bp_release = 0;
+		} else {
+			bp_release = 1;
+		}
+		if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+				XFS_BMAP_BTREE_REF)))
+			goto error_norelse;
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		XFS_WANT_CORRUPTED_GOTO(
+			XFS_BMAP_SANITY_CHECK(mp, block, level),
+			error0);
+		if (level == 0)
+			break;
+
+		/*
+		 * Check this block for basic sanity (increasing keys and
+		 * no duplicate blocks).
+		 */
+
+		xfs_check_block(block, mp, 0, 0);
+		pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
+			1, mp->m_bmap_dmxr[1]);
+		XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0);
+		bno = INT_GET(*pp, ARCH_CONVERT);
+		if (bp_release) {
+			bp_release = 0;
+			xfs_trans_brelse(NULL, bp);
+		}
+	}
+
+	/*
+	 * Here with bp and block set to the leftmost leaf node in the tree.
+	 */
+	i = 0;
+
+	/*
+	 * Loop over all leaf nodes checking that all extents are in the right order.
+	 */
+	lastp = NULL;
+	for (;;) {
+		xfs_bmbt_rec_t	*frp;
+		xfs_fsblock_t	nextbno;
+		xfs_extnum_t	num_recs;
+
+
+		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+
+		/*
+		 * Read-ahead the next leaf block, if any.
+		 */
+
+		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+
+		/*
+		 * Check all the extents to make sure they are OK.
+		 * If we had a previous block, the last entry should
+		 * conform with the first entry in this one.
+		 */
+
+		frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
+			block, 1, mp->m_bmap_dmxr[0]);
+
+		for (ep = frp;ep < frp + (num_recs - 1); ep++) {
+			if (lastp) {
+				xfs_btree_check_rec(XFS_BTNUM_BMAP,
+					(void *)lastp, (void *)ep);
+			}
+			xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
+				(void *)(ep + 1));
+		}
+		lastp = frp + num_recs - 1; /* For the next iteration */
+
+		i += num_recs;
+		if (bp_release) {
+			bp_release = 0;
+			xfs_trans_brelse(NULL, bp);
+		}
+		bno = nextbno;
+		/*
+		 * If we've reached the end, stop.
+		 */
+		if (bno == NULLFSBLOCK)
+			break;
+
+		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+		if (bp) {
+			bp_release = 0;
+		} else {
+			bp_release = 1;
+		}
+		if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+				XFS_BMAP_BTREE_REF)))
+			goto error_norelse;
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+	}
+	if (bp_release) {
+		bp_release = 0;
+		xfs_trans_brelse(NULL, bp);
+	}
+	return;
+
+error0:
+	cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
+	if (bp_release)
+		xfs_trans_brelse(NULL, bp);
+error_norelse:
+	cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
+		i, __FUNCTION__);
+	panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
+	return;
+}
+#endif
+
+/*
+ * Count fsblocks of the given fork.
+ */
+int						/* error */
+xfs_bmap_count_blocks(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*ip,		/* incore inode */
+	int			whichfork,	/* data or attr fork */
+	int			*count)		/* out: count of blocks */
+{
+	xfs_bmbt_block_t	*block;	/* current btree block */
+	xfs_fsblock_t		bno;	/* block # of "block" */
+	xfs_ifork_t		*ifp;	/* fork structure */
+	int			level;	/* btree level, for checking */
+	xfs_mount_t		*mp;	/* file system mount structure */
+	xfs_bmbt_ptr_t		*pp;	/* pointer to block address */
+
+	bno = NULLFSBLOCK;
+	mp = ip->i_mount;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
+		if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents,
+			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
+			count) < 0)) {
+			XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
+					 XFS_ERRLEVEL_LOW, mp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		return 0;
+	}
+
+	/*
+	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+	 */
+	block = ifp->if_broot;
+	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
+	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
+	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
+	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
+	ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
+	bno = INT_GET(*pp, ARCH_CONVERT);
+
+	if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) {
+		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
+				 mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	return 0;
+}
+
+/*
+ * Recursively walks each level of a btree
+ * to count total fsblocks is use.
+ */
+int                                     /* error */
+xfs_bmap_count_tree(
+	xfs_mount_t     *mp,            /* file system mount point */
+	xfs_trans_t     *tp,            /* transaction pointer */
+	xfs_fsblock_t   blockno,	/* file system block number */
+	int             levelin,	/* level in btree */
+	int		*count)		/* Count of blocks */
+{
+	int			error;
+	xfs_buf_t		*bp, *nbp;
+	int			level = levelin;
+	xfs_bmbt_ptr_t          *pp;
+	xfs_fsblock_t           bno = blockno;
+	xfs_fsblock_t		nextbno;
+	xfs_bmbt_block_t        *block, *nextblock;
+	int			numrecs;
+	xfs_bmbt_rec_t		*frp;
+
+	if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
+		return error;
+	*count += 1;
+	block = XFS_BUF_TO_BMBT_BLOCK(bp);
+
+	if (--level) {
+		/* Not at node above leafs, count this level of nodes */
+		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+		while (nextbno != NULLFSBLOCK) {
+			if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
+				0, &nbp, XFS_BMAP_BTREE_REF)))
+				return error;
+			*count += 1;
+			nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
+			nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT);
+			xfs_trans_brelse(tp, nbp);
+		}
+
+		/* Dive to the next level */
+		pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
+			xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+		bno = INT_GET(*pp, ARCH_CONVERT);
+		if (unlikely((error =
+		     xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) {
+			xfs_trans_brelse(tp, bp);
+			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
+					 XFS_ERRLEVEL_LOW, mp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		xfs_trans_brelse(tp, bp);
+	} else {
+		/* count all level 1 nodes and their leaves */
+		for (;;) {
+			nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+			numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+			frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
+				xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
+			if (unlikely(xfs_bmap_count_leaves(frp, numrecs, count) < 0)) {
+				xfs_trans_brelse(tp, bp);
+				XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
+						 XFS_ERRLEVEL_LOW, mp);
+				return XFS_ERROR(EFSCORRUPTED);
+			}
+			xfs_trans_brelse(tp, bp);
+			if (nextbno == NULLFSBLOCK)
+				break;
+			bno = nextbno;
+			if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+				XFS_BMAP_BTREE_REF)))
+				return error;
+			*count += 1;
+			block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Count leaf blocks given a pointer to an extent list.
+ */
+int
+xfs_bmap_count_leaves(
+	xfs_bmbt_rec_t		*frp,
+	int			numrecs,
+	int			*count)
+{
+	int		b;
+
+	for ( b = 1; b <= numrecs; b++, frp++)
+		*count += xfs_bmbt_disk_get_blockcount(frp);
+	return 0;
+}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
new file mode 100644
index 0000000..f1bc22f
--- /dev/null
+++ b/fs/xfs/xfs_bmap.h
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_BMAP_H__
+#define	__XFS_BMAP_H__
+
+struct getbmap;
+struct xfs_bmbt_irec;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * List of extents to be free "later".
+ * The list is kept sorted on xbf_startblock.
+ */
+typedef struct xfs_bmap_free_item
+{
+	xfs_fsblock_t		xbfi_startblock;/* starting fs block number */
+	xfs_extlen_t		xbfi_blockcount;/* number of blocks in extent */
+	struct xfs_bmap_free_item *xbfi_next;	/* link to next entry */
+} xfs_bmap_free_item_t;
+
+/*
+ * Header for free extent list.
+ */
+typedef	struct xfs_bmap_free
+{
+	xfs_bmap_free_item_t	*xbf_first;	/* list of to-be-free extents */
+	int			xbf_count;	/* count of items on list */
+	int			xbf_low;	/* kludge: alloc in low mode */
+} xfs_bmap_free_t;
+
+#define	XFS_BMAP_MAX_NMAP	4
+
+/*
+ * Flags for xfs_bmapi
+ */
+#define	XFS_BMAPI_WRITE		0x001	/* write operation: allocate space */
+#define XFS_BMAPI_DELAY		0x002	/* delayed write operation */
+#define XFS_BMAPI_ENTIRE	0x004	/* return entire extent, not trimmed */
+#define XFS_BMAPI_METADATA	0x008	/* mapping metadata not user data */
+#define XFS_BMAPI_EXACT		0x010	/* allocate only to spec'd bounds */
+#define XFS_BMAPI_ATTRFORK	0x020	/* use attribute fork not data */
+#define XFS_BMAPI_ASYNC		0x040	/* bunmapi xactions can be async */
+#define XFS_BMAPI_RSVBLOCKS	0x080	/* OK to alloc. reserved data blocks */
+#define	XFS_BMAPI_PREALLOC	0x100	/* preallocation op: unwritten space */
+#define	XFS_BMAPI_IGSTATE	0x200	/* Ignore state - */
+					/* combine contig. space */
+#define	XFS_BMAPI_CONTIG	0x400	/* must allocate only one extent */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAPI_AFLAG)
+int xfs_bmapi_aflag(int w);
+#define	XFS_BMAPI_AFLAG(w)	xfs_bmapi_aflag(w)
+#else
+#define	XFS_BMAPI_AFLAG(w)	((w) == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0)
+#endif
+
+/*
+ * Special values for xfs_bmbt_irec_t br_startblock field.
+ */
+#define	DELAYSTARTBLOCK		((xfs_fsblock_t)-1LL)
+#define	HOLESTARTBLOCK		((xfs_fsblock_t)-2LL)
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_INIT)
+void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp);
+#define	XFS_BMAP_INIT(flp,fbp)	xfs_bmap_init(flp,fbp)
+#else
+#define	XFS_BMAP_INIT(flp,fbp)	\
+	((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
+	 (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK)
+#endif
+
+/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+typedef struct xfs_bmalloca {
+	xfs_fsblock_t		firstblock; /* i/o first block allocated */
+	xfs_fsblock_t		rval;	/* starting block of new extent */
+	xfs_fileoff_t		off;	/* offset in file filling in */
+	struct xfs_trans	*tp;	/* transaction pointer */
+	struct xfs_inode	*ip;	/* incore inode pointer */
+	struct xfs_bmbt_irec	*prevp;	/* extent before the new one */
+	struct xfs_bmbt_irec	*gotp;	/* extent after, or delayed */
+	xfs_extlen_t		alen;	/* i/o length asked/allocated */
+	xfs_extlen_t		total;	/* total blocks needed for xaction */
+	xfs_extlen_t		minlen;	/* mininum allocation size (blocks) */
+	xfs_extlen_t		minleft; /* amount must be left after alloc */
+	char			eof;	/* set if allocating past last extent */
+	char			wasdel;	/* replacing a delayed allocation */
+	char			userdata;/* set if is user data */
+	char			low;	/* low on space, using seq'l ags */
+	char			aeof;   /* allocated space at eof */
+} xfs_bmalloca_t;
+
+#ifdef __KERNEL__
+
+#if defined(XFS_BMAP_TRACE)
+/*
+ * Trace operations for bmap extent tracing
+ */
+#define	XFS_BMAP_KTRACE_DELETE	1
+#define	XFS_BMAP_KTRACE_INSERT	2
+#define	XFS_BMAP_KTRACE_PRE_UP	3
+#define	XFS_BMAP_KTRACE_POST_UP	4
+
+#define	XFS_BMAP_TRACE_SIZE	4096	/* size of global trace buffer */
+#define	XFS_BMAP_KTRACE_SIZE	32	/* size of per-inode trace buffer */
+extern ktrace_t	*xfs_bmap_trace_buf;
+
+/*
+ * Add bmap trace insert entries for all the contents of the extent list.
+ */
+void
+xfs_bmap_trace_exlist(
+	char			*fname,		/* function name */
+	struct xfs_inode	*ip,		/* incore inode pointer */
+	xfs_extnum_t		cnt,		/* count of entries in list */
+	int			whichfork);	/* data or attr fork */
+#else
+#define	xfs_bmap_trace_exlist(f,ip,c,w)
+#endif
+
+/*
+ * Convert inode from non-attributed to attributed.
+ * Must not be in a transaction, ip must not be locked.
+ */
+int					/* error code */
+xfs_bmap_add_attrfork(
+	struct xfs_inode	*ip,	/* incore inode pointer */
+	int					rsvd);	/* flag for reserved block allocation */
+
+/*
+ * Add the extent to the list of extents to be free at transaction end.
+ * The list is maintained sorted (by block number).
+ */
+void
+xfs_bmap_add_free(
+	xfs_fsblock_t		bno,		/* fs block number of extent */
+	xfs_filblks_t		len,		/* length of extent */
+	xfs_bmap_free_t		*flist,		/* list of extents */
+	struct xfs_mount	*mp);		/* mount point structure */
+
+/*
+ * Routine to clean up the free list data structure when
+ * an error occurs during a transaction.
+ */
+void
+xfs_bmap_cancel(
+	xfs_bmap_free_t		*flist);	/* free list to clean up */
+
+/*
+ * Compute and fill in the value of the maximum depth of a bmap btree
+ * in this filesystem.  Done once, during mount.
+ */
+void
+xfs_bmap_compute_maxlevels(
+	struct xfs_mount	*mp,	/* file system mount structure */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
+ * caller.  Frees all the extents that need freeing, which must be done
+ * last due to locking considerations.
+ *
+ * Return 1 if the given transaction was committed and a new one allocated,
+ * and 0 otherwise.
+ */
+int						/* error */
+xfs_bmap_finish(
+	struct xfs_trans	**tp,		/* transaction pointer addr */
+	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
+	xfs_fsblock_t		firstblock,	/* controlled a.g. for allocs */
+	int			*committed);	/* xact committed or not */
+
+/*
+ * Returns the file-relative block number of the first unused block in the file.
+ * This is the lowest-address hole if the file has holes, else the first block
+ * past the end of file.
+ */
+int						/* error */
+xfs_bmap_first_unused(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_extlen_t		len,		/* size of hole to find */
+	xfs_fileoff_t		*unused,	/* unused block num */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Returns the file-relative block number of the last block + 1 before
+ * last_block (input value) in the file.
+ * This is not based on i_size, it is based on the extent list.
+ * Returns 0 for local files, as they do not have an extent list.
+ */
+int						/* error */
+xfs_bmap_last_before(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		*last_block,	/* last block */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Returns the file-relative block number of the first block past eof in
+ * the file.  This is not based on i_size, it is based on the extent list.
+ * Returns 0 for local files, as they do not have an extent list.
+ */
+int						/* error */
+xfs_bmap_last_offset(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		*unused,	/* last block num */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Returns whether the selected fork of the inode has exactly one
+ * block or not.  For the data fork we check this matches di_size,
+ * implying the file's range is 0..bsize-1.
+ */
+int
+xfs_bmap_one_block(
+	struct xfs_inode	*ip,		/* incore inode */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Read in the extents to iu_extents.
+ * All inode fields are set up by caller, we just traverse the btree
+ * and copy the records in.
+ */
+int						/* error */
+xfs_bmap_read_extents(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	int			whichfork);	/* data or attr fork */
+
+/*
+ * Map file blocks to filesystem blocks.
+ * File range is given by the bno/len pair.
+ * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
+ * into a hole or past eof.
+ * Only allocates blocks from a single allocation group,
+ * to avoid locking problems.
+ * The returned value in "firstblock" from the first call in a transaction
+ * must be remembered and presented to subsequent calls in "firstblock".
+ * An upper bound for the number of blocks to be allocated is supplied to
+ * the first call in "total"; if no allocation group has that many free
+ * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
+ */
+int						/* error */
+xfs_bmapi(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		bno,		/* starting file offs. mapped */
+	xfs_filblks_t		len,		/* length to map in file */
+	int			flags,		/* XFS_BMAPI_... */
+	xfs_fsblock_t		*firstblock,	/* first allocated block
+						   controls a.g. for allocs */
+	xfs_extlen_t		total,		/* total blocks needed */
+	struct xfs_bmbt_irec	*mval,		/* output: map values */
+	int			*nmap,		/* i/o: mval size/count */
+	xfs_bmap_free_t		*flist);	/* i/o: list extents to free */
+
+/*
+ * Map file blocks to filesystem blocks, simple version.
+ * One block only, read-only.
+ * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
+ * For the other flag values, the effect is as if XFS_BMAPI_METADATA
+ * was set and all the others were clear.
+ */
+int						/* error */
+xfs_bmapi_single(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	int			whichfork,	/* data or attr fork */
+	xfs_fsblock_t		*fsb,		/* output: mapped block */
+	xfs_fileoff_t		bno);		/* starting file offs. mapped */
+
+/*
+ * Unmap (remove) blocks from a file.
+ * If nexts is nonzero then the number of extents to remove is limited to
+ * that value.  If not all extents in the block range can be removed then
+ * *done is set.
+ */
+int						/* error */
+xfs_bunmapi(
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		bno,		/* starting offset to unmap */
+	xfs_filblks_t		len,		/* length to unmap in file */
+	int			flags,		/* XFS_BMAPI_... */
+	xfs_extnum_t		nexts,		/* number of extents max */
+	xfs_fsblock_t		*firstblock,	/* first allocated block
+						   controls a.g. for allocs */
+	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
+	int			*done);		/* set if not done yet */
+
+/*
+ * Fcntl interface to xfs_bmapi.
+ */
+int						/* error code */
+xfs_getbmap(
+	bhv_desc_t		*bdp,		/* XFS behavior descriptor*/
+	struct getbmap		*bmv,		/* user bmap structure */
+	void			__user *ap,	/* pointer to user's array */
+	int			iflags);	/* interface flags */
+
+/*
+ * Check the last inode extent to determine whether this allocation will result
+ * in blocks being allocated at the end of the file. When we allocate new data
+ * blocks at the end of the file which do not start at the previous data block,
+ * we will try to align the new blocks at stripe unit boundaries.
+ */
+int
+xfs_bmap_isaeof(
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		off,
+	int			whichfork,
+	char			*aeof);
+
+/*
+ * Check if the endoff is outside the last extent. If so the caller will grow
+ * the allocation to a stripe unit boundary
+ */
+int
+xfs_bmap_eof(
+	struct xfs_inode        *ip,
+	xfs_fileoff_t           endoff,
+	int                     whichfork,
+	int                     *eof);
+
+/*
+ * Count fsblocks of the given fork.
+ */
+int
+xfs_bmap_count_blocks(
+	xfs_trans_t		*tp,
+	struct xfs_inode	*ip,
+	int			whichfork,
+	int			*count);
+
+/*
+ * Check an extent list, which has just been read, for
+ * any bit in the extent flag field.
+ */
+int
+xfs_check_nostate_extents(
+	xfs_bmbt_rec_t		*ep,
+	xfs_extnum_t		num);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
new file mode 100644
index 0000000..163305a
--- /dev/null
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -0,0 +1,2807 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_itable.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+
+#if defined(XFS_BMBT_TRACE)
+ktrace_t	*xfs_bmbt_trace_buf;
+#endif
+
+/*
+ * Prototypes for internal btree functions.
+ */
+
+
+STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
+STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
+		xfs_bmbt_key_t *, xfs_btree_cur_t **, int *);
+STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
+
+
+#if defined(XFS_BMBT_TRACE)
+
+static char	ARGS[] = "args";
+static char	ENTRY[] = "entry";
+static char	ERROR[] = "error";
+#undef EXIT
+static char	EXIT[] = "exit";
+
+/*
+ * Add a trace buffer entry for the arguments given to the routine,
+ * generic form.
+ */
+STATIC void
+xfs_bmbt_trace_enter(
+	char		*func,
+	xfs_btree_cur_t	*cur,
+	char		*s,
+	int		type,
+	int		line,
+	__psunsigned_t	a0,
+	__psunsigned_t	a1,
+	__psunsigned_t	a2,
+	__psunsigned_t	a3,
+	__psunsigned_t	a4,
+	__psunsigned_t	a5,
+	__psunsigned_t	a6,
+	__psunsigned_t	a7,
+	__psunsigned_t	a8,
+	__psunsigned_t	a9,
+	__psunsigned_t	a10)
+{
+	xfs_inode_t	*ip;
+	int		whichfork;
+
+	ip = cur->bc_private.b.ip;
+	whichfork = cur->bc_private.b.whichfork;
+	ktrace_enter(xfs_bmbt_trace_buf,
+		(void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
+		(void *)func, (void *)s, (void *)ip, (void *)cur,
+		(void *)a0, (void *)a1, (void *)a2, (void *)a3,
+		(void *)a4, (void *)a5, (void *)a6, (void *)a7,
+		(void *)a8, (void *)a9, (void *)a10);
+	ASSERT(ip->i_btrace);
+	ktrace_enter(ip->i_btrace,
+		(void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
+		(void *)func, (void *)s, (void *)ip, (void *)cur,
+		(void *)a0, (void *)a1, (void *)a2, (void *)a3,
+		(void *)a4, (void *)a5, (void *)a6, (void *)a7,
+		(void *)a8, (void *)a9, (void *)a10);
+}
+/*
+ * Add a trace buffer entry for arguments, for a buffer & 1 integer arg.
+ */
+STATIC void
+xfs_bmbt_trace_argbi(
+	char		*func,
+	xfs_btree_cur_t	*cur,
+	xfs_buf_t	*b,
+	int		i,
+	int		line)
+{
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line,
+		(__psunsigned_t)b, i, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for a buffer & 2 integer args.
+ */
+STATIC void
+xfs_bmbt_trace_argbii(
+	char		*func,
+	xfs_btree_cur_t	*cur,
+	xfs_buf_t	*b,
+	int		i0,
+	int		i1,
+	int		line)
+{
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line,
+		(__psunsigned_t)b, i0, i1, 0,
+		0, 0, 0, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for 3 block-length args
+ * and an integer arg.
+ */
+STATIC void
+xfs_bmbt_trace_argfffi(
+	char			*func,
+	xfs_btree_cur_t		*cur,
+	xfs_dfiloff_t		o,
+	xfs_dfsbno_t		b,
+	xfs_dfilblks_t		i,
+	int			j,
+	int			line)
+{
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line,
+		o >> 32, (int)o, b >> 32, (int)b,
+		i >> 32, (int)i, (int)j, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for one integer arg.
+ */
+STATIC void
+xfs_bmbt_trace_argi(
+	char		*func,
+	xfs_btree_cur_t	*cur,
+	int		i,
+	int		line)
+{
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line,
+		i, 0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for int, fsblock, key.
+ */
+STATIC void
+xfs_bmbt_trace_argifk(
+	char			*func,
+	xfs_btree_cur_t		*cur,
+	int			i,
+	xfs_fsblock_t		f,
+	xfs_bmbt_key_t		*k,
+	int			line)
+{
+	xfs_dfsbno_t		d;
+	xfs_dfiloff_t		o;
+
+	d = (xfs_dfsbno_t)f;
+	o = INT_GET(k->br_startoff, ARCH_CONVERT);
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
+		i, d >> 32, (int)d, o >> 32,
+		(int)o, 0, 0, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for int, fsblock, rec.
+ */
+STATIC void
+xfs_bmbt_trace_argifr(
+	char			*func,
+	xfs_btree_cur_t		*cur,
+	int			i,
+	xfs_fsblock_t		f,
+	xfs_bmbt_rec_t		*r,
+	int			line)
+{
+	xfs_dfsbno_t		b;
+	xfs_dfilblks_t		c;
+	xfs_dfsbno_t		d;
+	xfs_dfiloff_t		o;
+	xfs_bmbt_irec_t		s;
+
+	d = (xfs_dfsbno_t)f;
+	xfs_bmbt_disk_get_all(r, &s);
+	o = (xfs_dfiloff_t)s.br_startoff;
+	b = (xfs_dfsbno_t)s.br_startblock;
+	c = s.br_blockcount;
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line,
+		i, d >> 32, (int)d, o >> 32,
+		(int)o, b >> 32, (int)b, c >> 32,
+		(int)c, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for arguments, for int, key.
+ */
+STATIC void
+xfs_bmbt_trace_argik(
+	char			*func,
+	xfs_btree_cur_t		*cur,
+	int			i,
+	xfs_bmbt_key_t		*k,
+	int			line)
+{
+	xfs_dfiloff_t		o;
+
+	o = INT_GET(k->br_startoff, ARCH_CONVERT);
+	xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
+		i, o >> 32, (int)o, 0,
+		0, 0, 0, 0,
+		0, 0, 0);
+}
+
+/*
+ * Add a trace buffer entry for the cursor/operation.
+ */
+STATIC void
+xfs_bmbt_trace_cursor(
+	char		*func,
+	xfs_btree_cur_t	*cur,
+	char		*s,
+	int		line)
+{
+	xfs_bmbt_rec_t	r;
+
+	xfs_bmbt_set_all(&r, &cur->bc_rec.b);
+	xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line,
+		(cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) |
+		cur->bc_private.b.allocated,
+		INT_GET(r.l0, ARCH_CONVERT) >> 32, (int)INT_GET(r.l0, ARCH_CONVERT), INT_GET(r.l1, ARCH_CONVERT) >> 32, (int)INT_GET(r.l1, ARCH_CONVERT),
+		(unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1],
+		(unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3],
+		(cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1],
+		(cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]);
+}
+
+#define	XFS_BMBT_TRACE_ARGBI(c,b,i)	\
+	xfs_bmbt_trace_argbi(fname, c, b, i, __LINE__)
+#define	XFS_BMBT_TRACE_ARGBII(c,b,i,j)	\
+	xfs_bmbt_trace_argbii(fname, c, b, i, j, __LINE__)
+#define	XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)	\
+	xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__)
+#define	XFS_BMBT_TRACE_ARGI(c,i)	\
+	xfs_bmbt_trace_argi(fname, c, i, __LINE__)
+#define	XFS_BMBT_TRACE_ARGIFK(c,i,f,k)	\
+	xfs_bmbt_trace_argifk(fname, c, i, f, k, __LINE__)
+#define	XFS_BMBT_TRACE_ARGIFR(c,i,f,r)	\
+	xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__)
+#define	XFS_BMBT_TRACE_ARGIK(c,i,k)	\
+	xfs_bmbt_trace_argik(fname, c, i, k, __LINE__)
+#define	XFS_BMBT_TRACE_CURSOR(c,s)	\
+	xfs_bmbt_trace_cursor(fname, c, s, __LINE__)
+#else
+#define	XFS_BMBT_TRACE_ARGBI(c,b,i)
+#define	XFS_BMBT_TRACE_ARGBII(c,b,i,j)
+#define	XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)
+#define	XFS_BMBT_TRACE_ARGI(c,i)
+#define	XFS_BMBT_TRACE_ARGIFK(c,i,f,k)
+#define	XFS_BMBT_TRACE_ARGIFR(c,i,f,r)
+#define	XFS_BMBT_TRACE_ARGIK(c,i,k)
+#define	XFS_BMBT_TRACE_CURSOR(c,s)
+#endif	/* XFS_BMBT_TRACE */
+
+
+/*
+ * Internal functions.
+ */
+
+/*
+ * Delete record pointed to by cur/level.
+ */
+STATIC int					/* error */
+xfs_bmbt_delrec(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	int			*stat)		/* success/failure */
+{
+	xfs_bmbt_block_t	*block;		/* bmap btree block */
+	xfs_fsblock_t		bno;		/* fs-relative block number */
+	xfs_buf_t		*bp;		/* buffer for block */
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_delrec";
+#endif
+	int			i;		/* loop counter */
+	int			j;		/* temp state */
+	xfs_bmbt_key_t		key;		/* bmap btree key */
+	xfs_bmbt_key_t		*kp=NULL;	/* pointer to bmap btree key */
+	xfs_fsblock_t		lbno;		/* left sibling block number */
+	xfs_buf_t		*lbp;		/* left buffer pointer */
+	xfs_bmbt_block_t	*left;		/* left btree block */
+	xfs_bmbt_key_t		*lkp;		/* left btree key */
+	xfs_bmbt_ptr_t		*lpp;		/* left address pointer */
+	int			lrecs=0;	/* left record count */
+	xfs_bmbt_rec_t		*lrp;		/* left record pointer */
+	xfs_mount_t		*mp;		/* file system mount point */
+	xfs_bmbt_ptr_t		*pp;		/* pointer to bmap block addr */
+	int			ptr;		/* key/record index */
+	xfs_fsblock_t		rbno;		/* right sibling block number */
+	xfs_buf_t		*rbp;		/* right buffer pointer */
+	xfs_bmbt_block_t	*right;		/* right btree block */
+	xfs_bmbt_key_t		*rkp;		/* right btree key */
+	xfs_bmbt_rec_t		*rp;		/* pointer to bmap btree rec */
+	xfs_bmbt_ptr_t		*rpp;		/* right address pointer */
+	xfs_bmbt_block_t	*rrblock;	/* right-right btree block */
+	xfs_buf_t		*rrbp;		/* right-right buffer pointer */
+	int			rrecs=0;	/* right record count */
+	xfs_bmbt_rec_t		*rrp;		/* right record pointer */
+	xfs_btree_cur_t		*tcur;		/* temporary btree cursor */
+	int			numrecs;	/* temporary numrec count */
+	int			numlrecs, numrrecs;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, level);
+	ptr = cur->bc_ptrs[level];
+	tcur = (xfs_btree_cur_t *)0;
+	if (ptr == 0) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	block = xfs_bmbt_get_block(cur, level, &bp);
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		goto error0;
+	}
+#endif
+	if (ptr > numrecs) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	XFS_STATS_INC(xs_bmbt_delrec);
+	if (level > 0) {
+		kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
+		pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = ptr; i < numrecs; i++) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				goto error0;
+			}
+		}
+#endif
+		if (ptr < numrecs) {
+			memmove(&kp[ptr - 1], &kp[ptr],
+				(numrecs - ptr) * sizeof(*kp));
+			memmove(&pp[ptr - 1], &pp[ptr], /* INT_: direct copy */
+				(numrecs - ptr) * sizeof(*pp));
+			xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
+			xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
+		}
+	} else {
+		rp = XFS_BMAP_REC_IADDR(block, 1, cur);
+		if (ptr < numrecs) {
+			memmove(&rp[ptr - 1], &rp[ptr],
+				(numrecs - ptr) * sizeof(*rp));
+			xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
+		}
+		if (ptr == 1) {
+			INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rp));
+			kp = &key;
+		}
+	}
+	numrecs--;
+	INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+	xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
+	/*
+	 * We're at the root level.
+	 * First, shrink the root block in-memory.
+	 * Try to get rid of the next level down.
+	 * If we can't then there's nothing left to do.
+	 */
+	if (level == cur->bc_nlevels - 1) {
+		xfs_iroot_realloc(cur->bc_private.b.ip, -1,
+			cur->bc_private.b.whichfork);
+		if ((error = xfs_bmbt_killroot(cur))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		goto error0;
+	}
+	if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
+		if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	/*
+	 * One child of root, need to get a chance to copy its contents
+	 * into the root and delete it. Can't go up to next level,
+	 * there's nothing to delete there.
+	 */
+	if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
+	    level == cur->bc_nlevels - 2) {
+		if ((error = xfs_bmbt_killroot(cur))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
+	if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		goto error0;
+	}
+	bno = NULLFSBLOCK;
+	if (rbno != NULLFSBLOCK) {
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_bmbt_increment(tcur, level, &i))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		rbp = tcur->bc_bufs[level];
+		right = XFS_BUF_TO_BMBT_BLOCK(rbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+#endif
+		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		    XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
+			if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				goto error0;
+			}
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_BMAP_BLOCK_IMINRECS(level, tcur));
+				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+				tcur = NULL;
+				if (level > 0) {
+					if ((error = xfs_bmbt_decrement(cur,
+							level, &i))) {
+						XFS_BMBT_TRACE_CURSOR(cur,
+							ERROR);
+						goto error0;
+					}
+				}
+				XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+				*stat = 1;
+				return 0;
+			}
+		}
+		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		if (lbno != NULLFSBLOCK) {
+			i = xfs_btree_firstrec(tcur, level);
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				goto error0;
+			}
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		}
+	}
+	if (lbno != NULLFSBLOCK) {
+		i = xfs_btree_firstrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * decrement to last in block
+		 */
+		if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		i = xfs_btree_firstrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		lbp = tcur->bc_bufs[level];
+		left = XFS_BUF_TO_BMBT_BLOCK(lbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+#endif
+		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		    XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
+			if ((error = xfs_bmbt_rshift(tcur, level, &i))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				goto error0;
+			}
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_BMAP_BLOCK_IMINRECS(level, tcur));
+				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+				tcur = NULL;
+				if (level == 0)
+					cur->bc_ptrs[0]++;
+				XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+				*stat = 1;
+				return 0;
+			}
+		}
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	tcur = NULL;
+	mp = cur->bc_mp;
+	ASSERT(bno != NULLFSBLOCK);
+	if (lbno != NULLFSBLOCK &&
+	    lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+		rbno = bno;
+		right = block;
+		rbp = bp;
+		if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
+				XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		left = XFS_BUF_TO_BMBT_BLOCK(lbp);
+		if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+	} else if (rbno != NULLFSBLOCK &&
+		   rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		   XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+		lbno = bno;
+		left = block;
+		lbp = bp;
+		if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
+				XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		right = XFS_BUF_TO_BMBT_BLOCK(rbp);
+		if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	} else {
+		if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+	if (level > 0) {
+		lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
+		lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
+		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
+		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < numrrecs; i++) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				goto error0;
+			}
+		}
+#endif
+		memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
+		memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
+		xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
+		xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
+	} else {
+		lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
+		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
+		memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
+		xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
+	}
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs);
+	left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */
+	xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+		if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
+				INT_GET(left->bb_rightsib, ARCH_CONVERT),
+				0, &rrbp, XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
+		if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			goto error0;
+		}
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+	}
+	xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
+		cur->bc_private.b.flist, mp);
+	cur->bc_private.b.ip->i_d.di_nblocks--;
+	xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
+	XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
+			XFS_TRANS_DQ_BCOUNT, -1L);
+	xfs_trans_binval(cur->bc_tp, rbp);
+	if (bp != lbp) {
+		cur->bc_bufs[level] = lbp;
+		cur->bc_ptrs[level] += lrecs;
+		cur->bc_ra[level] = 0;
+	} else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		goto error0;
+	}
+	if (level > 0)
+		cur->bc_ptrs[level]--;
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 2;
+	return 0;
+
+error0:
+	if (tcur)
+		xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+	return error;
+}
+
+#ifdef DEBUG
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_bmbt_get_rec(
+	xfs_btree_cur_t		*cur,
+	xfs_fileoff_t		*off,
+	xfs_fsblock_t		*bno,
+	xfs_filblks_t		*len,
+	xfs_exntst_t		*state,
+	int			*stat)
+{
+	xfs_bmbt_block_t	*block;
+	xfs_buf_t		*bp;
+#ifdef DEBUG
+	int			error;
+#endif
+	int			ptr;
+	xfs_bmbt_rec_t		*rp;
+
+	block = xfs_bmbt_get_block(cur, 0, &bp);
+	ptr = cur->bc_ptrs[0];
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, 0, bp)))
+		return error;
+#endif
+	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+		*stat = 0;
+		return 0;
+	}
+	rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
+	*off = xfs_bmbt_disk_get_startoff(rp);
+	*bno = xfs_bmbt_disk_get_startblock(rp);
+	*len = xfs_bmbt_disk_get_blockcount(rp);
+	*state = xfs_bmbt_disk_get_state(rp);
+	*stat = 1;
+	return 0;
+}
+#endif
+
+/*
+ * Insert one record/level.  Return information to the caller
+ * allowing the next level up to proceed if necessary.
+ */
+STATIC int					/* error */
+xfs_bmbt_insrec(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	xfs_fsblock_t		*bnop,
+	xfs_bmbt_rec_t		*recp,
+	xfs_btree_cur_t		**curp,
+	int			*stat)		/* no-go/done/continue */
+{
+	xfs_bmbt_block_t	*block;		/* bmap btree block */
+	xfs_buf_t		*bp;		/* buffer for block */
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_insrec";
+#endif
+	int			i;		/* loop index */
+	xfs_bmbt_key_t		key;		/* bmap btree key */
+	xfs_bmbt_key_t		*kp=NULL;	/* pointer to bmap btree key */
+	int			logflags;	/* inode logging flags */
+	xfs_fsblock_t		nbno;		/* new block number */
+	struct xfs_btree_cur	*ncur;		/* new btree cursor */
+	xfs_bmbt_key_t		nkey;		/* new btree key value */
+	xfs_bmbt_rec_t		nrec;		/* new record count */
+	int			optr;		/* old key/record index */
+	xfs_bmbt_ptr_t		*pp;		/* pointer to bmap block addr */
+	int			ptr;		/* key/record index */
+	xfs_bmbt_rec_t		*rp=NULL;	/* pointer to bmap btree rec */
+	int			numrecs;
+
+	ASSERT(level < cur->bc_nlevels);
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
+	ncur = (xfs_btree_cur_t *)0;
+	INT_SET(key.br_startoff, ARCH_CONVERT,
+		xfs_bmbt_disk_get_startoff(recp));
+	optr = ptr = cur->bc_ptrs[level];
+	if (ptr == 0) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	XFS_STATS_INC(xs_bmbt_insrec);
+	block = xfs_bmbt_get_block(cur, level, &bp);
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	if (ptr <= numrecs) {
+		if (level == 0) {
+			rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
+			xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
+		} else {
+			kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
+			xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
+		}
+	}
+#endif
+	nbno = NULLFSBLOCK;
+	if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+		if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
+			/*
+			 * A root block, that can be made bigger.
+			 */
+			xfs_iroot_realloc(cur->bc_private.b.ip, 1,
+				cur->bc_private.b.whichfork);
+			block = xfs_bmbt_get_block(cur, level, &bp);
+		} else if (level == cur->bc_nlevels - 1) {
+			if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) ||
+			    *stat == 0) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+			xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+				logflags);
+			block = xfs_bmbt_get_block(cur, level, &bp);
+		} else {
+			if ((error = xfs_bmbt_rshift(cur, level, &i))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+			if (i) {
+				/* nothing */
+			} else {
+				if ((error = xfs_bmbt_lshift(cur, level, &i))) {
+					XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+					return error;
+				}
+				if (i) {
+					optr = ptr = cur->bc_ptrs[level];
+				} else {
+					if ((error = xfs_bmbt_split(cur, level,
+							&nbno, &nkey, &ncur,
+							&i))) {
+						XFS_BMBT_TRACE_CURSOR(cur,
+							ERROR);
+						return error;
+					}
+					if (i) {
+						block = xfs_bmbt_get_block(
+							    cur, level, &bp);
+#ifdef DEBUG
+						if ((error =
+						    xfs_btree_check_lblock(cur,
+							    block, level, bp))) {
+							XFS_BMBT_TRACE_CURSOR(
+								cur, ERROR);
+							return error;
+						}
+#endif
+						ptr = cur->bc_ptrs[level];
+						xfs_bmbt_disk_set_allf(&nrec,
+							nkey.br_startoff, 0, 0,
+							XFS_EXT_NORM);
+					} else {
+						XFS_BMBT_TRACE_CURSOR(cur,
+							EXIT);
+						*stat = 0;
+						return 0;
+					}
+				}
+			}
+		}
+	}
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	if (level > 0) {
+		kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
+		pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = numrecs; i >= ptr; i--) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT),
+					level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+		}
+#endif
+		memmove(&kp[ptr], &kp[ptr - 1],
+			(numrecs - ptr + 1) * sizeof(*kp));
+		memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */
+			(numrecs - ptr + 1) * sizeof(*pp));
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)*bnop,
+				level))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		kp[ptr - 1] = key;
+		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
+		numrecs++;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
+		xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
+	} else {
+		rp = XFS_BMAP_REC_IADDR(block, 1, cur);
+		memmove(&rp[ptr], &rp[ptr - 1],
+			(numrecs - ptr + 1) * sizeof(*rp));
+		rp[ptr - 1] = *recp;
+		numrecs++;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
+	}
+	xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
+#ifdef DEBUG
+	if (ptr < numrecs) {
+		if (level == 0)
+			xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
+				rp + ptr);
+		else
+			xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
+				kp + ptr);
+	}
+#endif
+	if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	*bnop = nbno;
+	if (nbno != NULLFSBLOCK) {
+		*recp = nrec;
+		*curp = ncur;
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+}
+
+STATIC int
+xfs_bmbt_killroot(
+	xfs_btree_cur_t		*cur)
+{
+	xfs_bmbt_block_t	*block;
+	xfs_bmbt_block_t	*cblock;
+	xfs_buf_t		*cbp;
+	xfs_bmbt_key_t		*ckp;
+	xfs_bmbt_ptr_t		*cpp;
+#ifdef DEBUG
+	int			error;
+#endif
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_killroot";
+#endif
+	int			i;
+	xfs_bmbt_key_t		*kp;
+	xfs_inode_t		*ip;
+	xfs_ifork_t		*ifp;
+	int			level;
+	xfs_bmbt_ptr_t		*pp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	level = cur->bc_nlevels - 1;
+	ASSERT(level >= 1);
+	/*
+	 * Don't deal with the root block needs to be a leaf case.
+	 * We're just going to turn the thing back into extents anyway.
+	 */
+	if (level == 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		return 0;
+	}
+	block = xfs_bmbt_get_block(cur, level, &cbp);
+	/*
+	 * Give up if the root has multiple children.
+	 */
+	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) != 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		return 0;
+	}
+	/*
+	 * Only do this if the next level will fit.
+	 * Then the data must be copied up to the inode,
+	 * instead of freeing the root you free the next level.
+	 */
+	cbp = cur->bc_bufs[level - 1];
+	cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
+	if (INT_GET(cblock->bb_numrecs, ARCH_CONVERT) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		return 0;
+	}
+	ASSERT(INT_GET(cblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO);
+	ASSERT(INT_GET(cblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO);
+	ip = cur->bc_private.b.ip;
+	ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
+	ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
+	       XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
+	i = (int)(INT_GET(cblock->bb_numrecs, ARCH_CONVERT) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
+	if (i) {
+		xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
+		block = ifp->if_broot;
+	}
+	INT_MOD(block->bb_numrecs, ARCH_CONVERT, i);
+	ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
+	kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
+	ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
+	memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+	pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+	cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
+#ifdef DEBUG
+	for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) {
+		if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+	}
+#endif
+	memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+	xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
+			cur->bc_private.b.flist, cur->bc_mp);
+	ip->i_d.di_nblocks--;
+	XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
+			XFS_TRANS_DQ_BCOUNT, -1L);
+	xfs_trans_binval(cur->bc_tp, cbp);
+	cur->bc_bufs[level - 1] = NULL;
+	INT_MOD(block->bb_level, ARCH_CONVERT, -1);
+	xfs_trans_log_inode(cur->bc_tp, ip,
+		XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
+	cur->bc_nlevels--;
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	return 0;
+}
+
+/*
+ * Log key values from the btree block.
+ */
+STATIC void
+xfs_bmbt_log_keys(
+	xfs_btree_cur_t	*cur,
+	xfs_buf_t	*bp,
+	int		kfirst,
+	int		klast)
+{
+#ifdef XFS_BMBT_TRACE
+	static char	fname[] = "xfs_bmbt_log_keys";
+#endif
+	xfs_trans_t	*tp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
+	tp = cur->bc_tp;
+	if (bp) {
+		xfs_bmbt_block_t	*block;
+		int			first;
+		xfs_bmbt_key_t		*kp;
+		int			last;
+
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
+		first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
+		last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
+		xfs_trans_log_buf(tp, bp, first, last);
+	} else {
+		xfs_inode_t		 *ip;
+
+		ip = cur->bc_private.b.ip;
+		xfs_trans_log_inode(tp, ip,
+			XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+}
+
+/*
+ * Log pointer values from the btree block.
+ */
+STATIC void
+xfs_bmbt_log_ptrs(
+	xfs_btree_cur_t	*cur,
+	xfs_buf_t	*bp,
+	int		pfirst,
+	int		plast)
+{
+#ifdef XFS_BMBT_TRACE
+	static char	fname[] = "xfs_bmbt_log_ptrs";
+#endif
+	xfs_trans_t	*tp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
+	tp = cur->bc_tp;
+	if (bp) {
+		xfs_bmbt_block_t	*block;
+		int			first;
+		int			last;
+		xfs_bmbt_ptr_t		*pp;
+
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
+		first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
+		last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
+		xfs_trans_log_buf(tp, bp, first, last);
+	} else {
+		xfs_inode_t		*ip;
+
+		ip = cur->bc_private.b.ip;
+		xfs_trans_log_inode(tp, ip,
+			XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+}
+
+/*
+ * Lookup the record.  The cursor is made to point to it, based on dir.
+ */
+STATIC int				/* error */
+xfs_bmbt_lookup(
+	xfs_btree_cur_t		*cur,
+	xfs_lookup_t		dir,
+	int			*stat)		/* success/failure */
+{
+	xfs_bmbt_block_t	*block=NULL;
+	xfs_buf_t		*bp;
+	xfs_daddr_t		d;
+	xfs_sfiloff_t		diff;
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char	fname[] = "xfs_bmbt_lookup";
+#endif
+	xfs_fsblock_t		fsbno=0;
+	int			high;
+	int			i;
+	int			keyno=0;
+	xfs_bmbt_key_t		*kkbase=NULL;
+	xfs_bmbt_key_t		*kkp;
+	xfs_bmbt_rec_t		*krbase=NULL;
+	xfs_bmbt_rec_t		*krp;
+	int			level;
+	int			low;
+	xfs_mount_t		*mp;
+	xfs_bmbt_ptr_t		*pp;
+	xfs_bmbt_irec_t		*rp;
+	xfs_fileoff_t		startoff;
+	xfs_trans_t		*tp;
+
+	XFS_STATS_INC(xs_bmbt_lookup);
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, (int)dir);
+	tp = cur->bc_tp;
+	mp = cur->bc_mp;
+	rp = &cur->bc_rec.b;
+	for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+		if (level < cur->bc_nlevels - 1) {
+			d = XFS_FSB_TO_DADDR(mp, fsbno);
+			bp = cur->bc_bufs[level];
+			if (bp && XFS_BUF_ADDR(bp) != d)
+				bp = (xfs_buf_t *)0;
+			if (!bp) {
+				if ((error = xfs_btree_read_bufl(mp, tp, fsbno,
+						0, &bp, XFS_BMAP_BTREE_REF))) {
+					XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+					return error;
+				}
+				xfs_btree_setbuf(cur, level, bp);
+				block = XFS_BUF_TO_BMBT_BLOCK(bp);
+				if ((error = xfs_btree_check_lblock(cur, block,
+						level, bp))) {
+					XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+					return error;
+				}
+			} else
+				block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		} else
+			block = xfs_bmbt_get_block(cur, level, &bp);
+		if (diff == 0)
+			keyno = 1;
+		else {
+			if (level > 0)
+				kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur);
+			else
+				krbase = XFS_BMAP_REC_IADDR(block, 1, cur);
+			low = 1;
+			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+				ASSERT(level == 0);
+				cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
+				XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+				*stat = 0;
+				return 0;
+			}
+			while (low <= high) {
+				XFS_STATS_INC(xs_bmbt_compare);
+				keyno = (low + high) >> 1;
+				if (level > 0) {
+					kkp = kkbase + keyno - 1;
+					startoff = INT_GET(kkp->br_startoff, ARCH_CONVERT);
+				} else {
+					krp = krbase + keyno - 1;
+					startoff = xfs_bmbt_disk_get_startoff(krp);
+				}
+				diff = (xfs_sfiloff_t)
+						(startoff - rp->br_startoff);
+				if (diff < 0)
+					low = keyno + 1;
+				else if (diff > 0)
+					high = keyno - 1;
+				else
+					break;
+			}
+		}
+		if (level > 0) {
+			if (diff > 0 && --keyno < 1)
+				keyno = 1;
+			pp = XFS_BMAP_PTR_IADDR(block, keyno, cur);
+#ifdef DEBUG
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+#endif
+			fsbno = INT_GET(*pp, ARCH_CONVERT);
+			cur->bc_ptrs[level] = keyno;
+		}
+	}
+	if (dir != XFS_LOOKUP_LE && diff < 0) {
+		keyno++;
+		/*
+		 * If ge search and we went off the end of the block, but it's
+		 * not the last block, we're in the wrong block.
+		 */
+		if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
+		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+			cur->bc_ptrs[0] = keyno;
+			if ((error = xfs_bmbt_increment(cur, 0, &i))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+			XFS_WANT_CORRUPTED_RETURN(i == 1);
+			XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+			*stat = 1;
+			return 0;
+		}
+	}
+	else if (dir == XFS_LOOKUP_LE && diff > 0)
+		keyno--;
+	cur->bc_ptrs[0] = keyno;
+	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+	} else {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
+	}
+	return 0;
+}
+
+/*
+ * Move 1 record left from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int					/* error */
+xfs_bmbt_lshift(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	int			*stat)		/* success/failure */
+{
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_lshift";
+#endif
+#ifdef DEBUG
+	int			i;		/* loop counter */
+#endif
+	xfs_bmbt_key_t		key;		/* bmap btree key */
+	xfs_buf_t		*lbp;		/* left buffer pointer */
+	xfs_bmbt_block_t	*left;		/* left btree block */
+	xfs_bmbt_key_t		*lkp=NULL;	/* left btree key */
+	xfs_bmbt_ptr_t		*lpp;		/* left address pointer */
+	int			lrecs;		/* left record count */
+	xfs_bmbt_rec_t		*lrp=NULL;	/* left record pointer */
+	xfs_mount_t		*mp;		/* file system mount point */
+	xfs_buf_t		*rbp;		/* right buffer pointer */
+	xfs_bmbt_block_t	*right;		/* right btree block */
+	xfs_bmbt_key_t		*rkp=NULL;	/* right btree key */
+	xfs_bmbt_ptr_t		*rpp=NULL;	/* right address pointer */
+	xfs_bmbt_rec_t		*rrp=NULL;	/* right record pointer */
+	int			rrecs;		/* right record count */
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, level);
+	if (level == cur->bc_nlevels - 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	rbp = cur->bc_bufs[level];
+	right = XFS_BUF_TO_BMBT_BLOCK(rbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	if (cur->bc_ptrs[level] <= 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	mp = cur->bc_mp;
+	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0,
+			&lbp, XFS_BMAP_BTREE_REF))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	left = XFS_BUF_TO_BMBT_BLOCK(lbp);
+	if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	if (level > 0) {
+		lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
+		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
+		*lkp = *rkp;
+		xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs);
+		lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
+		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		*lpp = *rpp; /* INT_: direct copy */
+		xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs);
+	} else {
+		lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur);
+		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
+		*lrp = *rrp;
+		xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
+	}
+	INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs);
+	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
+#ifdef DEBUG
+	if (level > 0)
+		xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp);
+	else
+		xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
+#endif
+	rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1;
+	INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs);
+	xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
+	if (level > 0) {
+#ifdef DEBUG
+		for (i = 0; i < rrecs; i++) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT),
+					level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+		}
+#endif
+		memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
+		memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
+		xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
+		xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
+	} else {
+		memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
+		xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
+		INT_SET(key.br_startoff, ARCH_CONVERT,
+			xfs_bmbt_disk_get_startoff(rrp));
+		rkp = &key;
+	}
+	if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	cur->bc_ptrs[level]--;
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Move 1 record right from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int					/* error */
+xfs_bmbt_rshift(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	int			*stat)		/* success/failure */
+{
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_rshift";
+#endif
+	int			i;		/* loop counter */
+	xfs_bmbt_key_t		key;		/* bmap btree key */
+	xfs_buf_t		*lbp;		/* left buffer pointer */
+	xfs_bmbt_block_t	*left;		/* left btree block */
+	xfs_bmbt_key_t		*lkp;		/* left btree key */
+	xfs_bmbt_ptr_t		*lpp;		/* left address pointer */
+	xfs_bmbt_rec_t		*lrp;		/* left record pointer */
+	xfs_mount_t		*mp;		/* file system mount point */
+	xfs_buf_t		*rbp;		/* right buffer pointer */
+	xfs_bmbt_block_t	*right;		/* right btree block */
+	xfs_bmbt_key_t		*rkp;		/* right btree key */
+	xfs_bmbt_ptr_t		*rpp;		/* right address pointer */
+	xfs_bmbt_rec_t		*rrp=NULL;	/* right record pointer */
+	struct xfs_btree_cur	*tcur;		/* temporary btree cursor */
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, level);
+	if (level == cur->bc_nlevels - 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	lbp = cur->bc_bufs[level];
+	left = XFS_BUF_TO_BMBT_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	mp = cur->bc_mp;
+	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+			&rbp, XFS_BMAP_BTREE_REF))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	right = XFS_BUF_TO_BMBT_BLOCK(rbp);
+	if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	if (level > 0) {
+		lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
+		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+		}
+#endif
+		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		*rkp = *lkp;
+		*rpp = *lpp; /* INT_: direct copy */
+		xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+	} else {
+		lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
+		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		*rrp = *lrp;
+		xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		INT_SET(key.br_startoff, ARCH_CONVERT,
+			xfs_bmbt_disk_get_startoff(rrp));
+		rkp = &key;
+	}
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
+	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+#ifdef DEBUG
+	if (level > 0)
+		xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
+	else
+		xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1);
+#endif
+	xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
+	if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	i = xfs_btree_lastrec(tcur, level);
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	if ((error = xfs_bmbt_increment(tcur, level, &i))) {
+		XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
+		goto error1;
+	}
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) {
+		XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
+		goto error1;
+	}
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+error0:
+	XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+error1:
+	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Determine the extent state.
+ */
+/* ARGSUSED */
+STATIC xfs_exntst_t
+xfs_extent_state(
+	xfs_filblks_t		blks,
+	int			extent_flag)
+{
+	if (extent_flag) {
+		ASSERT(blks != 0);	/* saved for DMIG */
+		return XFS_EXT_UNWRITTEN;
+	}
+	return XFS_EXT_NORM;
+}
+
+
+/*
+ * Split cur/level block in half.
+ * Return new block number and its first record (to be inserted into parent).
+ */
+STATIC int					/* error */
+xfs_bmbt_split(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	xfs_fsblock_t		*bnop,
+	xfs_bmbt_key_t		*keyp,
+	xfs_btree_cur_t		**curp,
+	int			*stat)		/* success/failure */
+{
+	xfs_alloc_arg_t		args;		/* block allocation args */
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_split";
+#endif
+	int			i;		/* loop counter */
+	xfs_fsblock_t		lbno;		/* left sibling block number */
+	xfs_buf_t		*lbp;		/* left buffer pointer */
+	xfs_bmbt_block_t	*left;		/* left btree block */
+	xfs_bmbt_key_t		*lkp;		/* left btree key */
+	xfs_bmbt_ptr_t		*lpp;		/* left address pointer */
+	xfs_bmbt_rec_t		*lrp;		/* left record pointer */
+	xfs_buf_t		*rbp;		/* right buffer pointer */
+	xfs_bmbt_block_t	*right;		/* right btree block */
+	xfs_bmbt_key_t		*rkp;		/* right btree key */
+	xfs_bmbt_ptr_t		*rpp;		/* right address pointer */
+	xfs_bmbt_block_t	*rrblock;	/* right-right btree block */
+	xfs_buf_t		*rrbp;		/* right-right buffer pointer */
+	xfs_bmbt_rec_t		*rrp;		/* right record pointer */
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, keyp);
+	args.tp = cur->bc_tp;
+	args.mp = cur->bc_mp;
+	lbp = cur->bc_bufs[level];
+	lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
+	left = XFS_BUF_TO_BMBT_BLOCK(lbp);
+	args.fsbno = cur->bc_private.b.firstblock;
+	if (args.fsbno == NULLFSBLOCK) {
+		args.fsbno = lbno;
+		args.type = XFS_ALLOCTYPE_START_BNO;
+	} else if (cur->bc_private.b.flist->xbf_low)
+		args.type = XFS_ALLOCTYPE_FIRST_AG;
+	else
+		args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	args.mod = args.minleft = args.alignment = args.total = args.isfl =
+		args.userdata = args.minalignslop = 0;
+	args.minlen = args.maxlen = args.prod = 1;
+	args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
+	if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return XFS_ERROR(ENOSPC);
+	}
+	if ((error = xfs_alloc_vextent(&args))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	if (args.fsbno == NULLFSBLOCK) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	ASSERT(args.len == 1);
+	cur->bc_private.b.firstblock = args.fsbno;
+	cur->bc_private.b.allocated++;
+	cur->bc_private.b.ip->i_d.di_nblocks++;
+	xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
+	XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
+			XFS_TRANS_DQ_BCOUNT, 1L);
+	rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
+	right = XFS_BUF_TO_BMBT_BLOCK(rbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	INT_SET(right->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
+	right->bb_level = left->bb_level; /* INT_: direct copy */
+	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
+	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
+	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
+		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	if (level > 0) {
+		lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
+		lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
+		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
+		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) {
+				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+				return error;
+			}
+		}
+#endif
+		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT);
+	} else {
+		lrp = XFS_BMAP_REC_IADDR(left, i, cur);
+		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
+		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp);
+	}
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
+	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
+	INT_SET(left->bb_rightsib, ARCH_CONVERT, args.fsbno);
+	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
+	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+		if ((error = xfs_btree_read_bufl(args.mp, args.tp,
+				INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
+				XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
+		if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.fsbno);
+		xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+	}
+	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+		xfs_btree_setbuf(cur, level, rbp);
+		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	if (level + 1 < cur->bc_nlevels) {
+		if ((error = xfs_btree_dup_cursor(cur, curp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		(*curp)->bc_ptrs[level + 1]++;
+	}
+	*bnop = args.fsbno;
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+}
+
+
+/*
+ * Update keys for the record.
+ */
+STATIC int
+xfs_bmbt_updkey(
+	xfs_btree_cur_t		*cur,
+	xfs_bmbt_key_t		*keyp,	/* on-disk format */
+	int			level)
+{
+	xfs_bmbt_block_t	*block;
+	xfs_buf_t		*bp;
+#ifdef DEBUG
+	int			error;
+#endif
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_updkey";
+#endif
+	xfs_bmbt_key_t		*kp;
+	int			ptr;
+
+	ASSERT(level >= 1);
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGIK(cur, level, keyp);
+	for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+		block = xfs_bmbt_get_block(cur, level, &bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		ptr = cur->bc_ptrs[level];
+		kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
+		*kp = *keyp;
+		xfs_bmbt_log_keys(cur, bp, ptr, ptr);
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	return 0;
+}
+
+/*
+ * Convert on-disk form of btree root to in-memory form.
+ */
+void
+xfs_bmdr_to_bmbt(
+	xfs_bmdr_block_t	*dblock,
+	int			dblocklen,
+	xfs_bmbt_block_t	*rblock,
+	int			rblocklen)
+{
+	int			dmxr;
+	xfs_bmbt_key_t		*fkp;
+	xfs_bmbt_ptr_t		*fpp;
+	xfs_bmbt_key_t		*tkp;
+	xfs_bmbt_ptr_t		*tpp;
+
+	INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
+	rblock->bb_level = dblock->bb_level;	/* both in on-disk format */
+	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0);
+	rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */
+	INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
+	INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
+	fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
+	tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
+	fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
+	tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
+	dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
+	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
+	memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
+}
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int						/* error */
+xfs_bmbt_decrement(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	int			*stat)		/* success/failure */
+{
+	xfs_bmbt_block_t	*block;
+	xfs_buf_t		*bp;
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_decrement";
+#endif
+	xfs_fsblock_t		fsbno;
+	int			lev;
+	xfs_mount_t		*mp;
+	xfs_trans_t		*tp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, level);
+	ASSERT(level < cur->bc_nlevels);
+	if (level < cur->bc_nlevels - 1)
+		xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
+	if (--cur->bc_ptrs[level] > 0) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	block = xfs_bmbt_get_block(cur, level, &bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		if (--cur->bc_ptrs[lev] > 0)
+			break;
+		if (lev < cur->bc_nlevels - 1)
+			xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
+	}
+	if (lev == cur->bc_nlevels) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	tp = cur->bc_tp;
+	mp = cur->bc_mp;
+	for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
+		fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
+				XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Delete the record pointed to by cur.
+ */
+int					/* error */
+xfs_bmbt_delete(
+	xfs_btree_cur_t	*cur,
+	int		*stat)		/* success/failure */
+{
+	int		error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char	fname[] = "xfs_bmbt_delete";
+#endif
+	int		i;
+	int		level;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	for (level = 0, i = 2; i == 2; level++) {
+		if ((error = xfs_bmbt_delrec(cur, level, &i))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+	}
+	if (i == 0) {
+		for (level = 1; level < cur->bc_nlevels; level++) {
+			if (cur->bc_ptrs[level] == 0) {
+				if ((error = xfs_bmbt_decrement(cur, level,
+						&i))) {
+					XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+					return error;
+				}
+				break;
+			}
+		}
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = i;
+	return 0;
+}
+
+/*
+ * Convert a compressed bmap extent record to an uncompressed form.
+ * This code must be in sync with the routines xfs_bmbt_get_startoff,
+ * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
+ */
+
+STATIC __inline__ void
+__xfs_bmbt_get_all(
+		__uint64_t l0,
+		__uint64_t l1,
+		xfs_bmbt_irec_t *s)
+{
+	int	ext_flag;
+	xfs_exntst_t st;
+
+	ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
+	s->br_startoff = ((xfs_fileoff_t)l0 &
+			   XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+#if XFS_BIG_BLKNOS
+	s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
+			   (((xfs_fsblock_t)l1) >> 21);
+#else
+#ifdef DEBUG
+	{
+		xfs_dfsbno_t	b;
+
+		b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
+		    (((xfs_dfsbno_t)l1) >> 21);
+		ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+		s->br_startblock = (xfs_fsblock_t)b;
+	}
+#else	/* !DEBUG */
+	s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
+#endif	/* DEBUG */
+#endif	/* XFS_BIG_BLKNOS */
+	s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
+	/* This is xfs_extent_state() in-line */
+	if (ext_flag) {
+		ASSERT(s->br_blockcount != 0);	/* saved for DMIG */
+		st = XFS_EXT_UNWRITTEN;
+	} else
+		st = XFS_EXT_NORM;
+	s->br_state = st;
+}
+
+void
+xfs_bmbt_get_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s)
+{
+	__xfs_bmbt_get_all(r->l0, r->l1, s);
+}
+
+/*
+ * Get the block pointer for the given level of the cursor.
+ * Fill in the buffer pointer, if applicable.
+ */
+xfs_bmbt_block_t *
+xfs_bmbt_get_block(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	xfs_buf_t		**bpp)
+{
+	xfs_ifork_t		*ifp;
+	xfs_bmbt_block_t	*rval;
+
+	if (level < cur->bc_nlevels - 1) {
+		*bpp = cur->bc_bufs[level];
+		rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
+	} else {
+		*bpp = NULL;
+		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
+			cur->bc_private.b.whichfork);
+		rval = ifp->if_broot;
+	}
+	return rval;
+}
+
+/*
+ * Extract the blockcount field from an in memory bmap extent record.
+ */
+xfs_filblks_t
+xfs_bmbt_get_blockcount(
+	xfs_bmbt_rec_t	*r)
+{
+	return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
+}
+
+/*
+ * Extract the startblock field from an in memory bmap extent record.
+ */
+xfs_fsblock_t
+xfs_bmbt_get_startblock(
+	xfs_bmbt_rec_t	*r)
+{
+#if XFS_BIG_BLKNOS
+	return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+	       (((xfs_fsblock_t)r->l1) >> 21);
+#else
+#ifdef DEBUG
+	xfs_dfsbno_t	b;
+
+	b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+	    (((xfs_dfsbno_t)r->l1) >> 21);
+	ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+	return (xfs_fsblock_t)b;
+#else	/* !DEBUG */
+	return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
+#endif	/* DEBUG */
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+/*
+ * Extract the startoff field from an in memory bmap extent record.
+ */
+xfs_fileoff_t
+xfs_bmbt_get_startoff(
+	xfs_bmbt_rec_t	*r)
+{
+	return ((xfs_fileoff_t)r->l0 &
+		 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+}
+
+xfs_exntst_t
+xfs_bmbt_get_state(
+	xfs_bmbt_rec_t	*r)
+{
+	int	ext_flag;
+
+	ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
+	return xfs_extent_state(xfs_bmbt_get_blockcount(r),
+				ext_flag);
+}
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+/* Endian flipping versions of the bmbt extraction functions */
+void
+xfs_bmbt_disk_get_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s)
+{
+	__uint64_t	l0, l1;
+
+	l0 = INT_GET(r->l0, ARCH_CONVERT);
+	l1 = INT_GET(r->l1, ARCH_CONVERT);
+
+	__xfs_bmbt_get_all(l0, l1, s);
+}
+
+/*
+ * Extract the blockcount field from an on disk bmap extent record.
+ */
+xfs_filblks_t
+xfs_bmbt_disk_get_blockcount(
+	xfs_bmbt_rec_t	*r)
+{
+	return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21));
+}
+
+/*
+ * Extract the startblock field from an on disk bmap extent record.
+ */
+xfs_fsblock_t
+xfs_bmbt_disk_get_startblock(
+	xfs_bmbt_rec_t	*r)
+{
+#if XFS_BIG_BLKNOS
+	return (((xfs_fsblock_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) |
+	       (((xfs_fsblock_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
+#else
+#ifdef DEBUG
+	xfs_dfsbno_t	b;
+
+	b = (((xfs_dfsbno_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) |
+	    (((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
+	ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+	return (xfs_fsblock_t)b;
+#else	/* !DEBUG */
+	return (xfs_fsblock_t)(((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
+#endif	/* DEBUG */
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+/*
+ * Extract the startoff field from a disk format bmap extent record.
+ */
+xfs_fileoff_t
+xfs_bmbt_disk_get_startoff(
+	xfs_bmbt_rec_t	*r)
+{
+	return ((xfs_fileoff_t)INT_GET(r->l0, ARCH_CONVERT) &
+		 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+}
+
+xfs_exntst_t
+xfs_bmbt_disk_get_state(
+	xfs_bmbt_rec_t  *r)
+{
+	int	ext_flag;
+
+	ext_flag = (int)((INT_GET(r->l0, ARCH_CONVERT)) >> (64 - BMBT_EXNTFLAG_BITLEN));
+	return xfs_extent_state(xfs_bmbt_disk_get_blockcount(r),
+				ext_flag);
+}
+#endif
+
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int						/* error */
+xfs_bmbt_increment(
+	xfs_btree_cur_t		*cur,
+	int			level,
+	int			*stat)		/* success/failure */
+{
+	xfs_bmbt_block_t	*block;
+	xfs_buf_t		*bp;
+	int			error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_increment";
+#endif
+	xfs_fsblock_t		fsbno;
+	int			lev;
+	xfs_mount_t		*mp;
+	xfs_trans_t		*tp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGI(cur, level);
+	ASSERT(level < cur->bc_nlevels);
+	if (level < cur->bc_nlevels - 1)
+		xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+	block = xfs_bmbt_get_block(cur, level, &bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 1;
+		return 0;
+	}
+	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		block = xfs_bmbt_get_block(cur, lev, &bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+			break;
+		if (lev < cur->bc_nlevels - 1)
+			xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
+	}
+	if (lev == cur->bc_nlevels) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	tp = cur->bc_tp;
+	mp = cur->bc_mp;
+	for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
+		fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
+				XFS_BMAP_BTREE_REF))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_BMBT_BLOCK(bp);
+		if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		cur->bc_ptrs[lev] = 1;
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Insert the current record at the point referenced by cur.
+ */
+int					/* error */
+xfs_bmbt_insert(
+	xfs_btree_cur_t	*cur,
+	int		*stat)		/* success/failure */
+{
+	int		error;		/* error return value */
+#ifdef XFS_BMBT_TRACE
+	static char	fname[] = "xfs_bmbt_insert";
+#endif
+	int		i;
+	int		level;
+	xfs_fsblock_t	nbno;
+	xfs_btree_cur_t	*ncur;
+	xfs_bmbt_rec_t	nrec;
+	xfs_btree_cur_t	*pcur;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	level = 0;
+	nbno = NULLFSBLOCK;
+	xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
+	ncur = (xfs_btree_cur_t *)0;
+	pcur = cur;
+	do {
+		if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
+				&i))) {
+			if (pcur != cur)
+				xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
+			cur->bc_nlevels = pcur->bc_nlevels;
+			cur->bc_private.b.allocated +=
+				pcur->bc_private.b.allocated;
+			pcur->bc_private.b.allocated = 0;
+			ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
+			       (cur->bc_private.b.ip->i_d.di_flags &
+				XFS_DIFLAG_REALTIME));
+			cur->bc_private.b.firstblock =
+				pcur->bc_private.b.firstblock;
+			ASSERT(cur->bc_private.b.flist ==
+			       pcur->bc_private.b.flist);
+			xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
+		}
+		if (ncur) {
+			pcur = ncur;
+			ncur = (xfs_btree_cur_t *)0;
+		}
+	} while (nbno != NULLFSBLOCK);
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*stat = i;
+	return 0;
+error0:
+	XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+	return error;
+}
+
+/*
+ * Log fields from the btree block header.
+ */
+void
+xfs_bmbt_log_block(
+	xfs_btree_cur_t		*cur,
+	xfs_buf_t		*bp,
+	int			fields)
+{
+	int			first;
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_log_block";
+#endif
+	int			last;
+	xfs_trans_t		*tp;
+	static const short	offsets[] = {
+		offsetof(xfs_bmbt_block_t, bb_magic),
+		offsetof(xfs_bmbt_block_t, bb_level),
+		offsetof(xfs_bmbt_block_t, bb_numrecs),
+		offsetof(xfs_bmbt_block_t, bb_leftsib),
+		offsetof(xfs_bmbt_block_t, bb_rightsib),
+		sizeof(xfs_bmbt_block_t)
+	};
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
+	tp = cur->bc_tp;
+	if (bp) {
+		xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
+				  &last);
+		xfs_trans_log_buf(tp, bp, first, last);
+	} else
+		xfs_trans_log_inode(tp, cur->bc_private.b.ip,
+			XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+}
+
+/*
+ * Log record values from the btree block.
+ */
+void
+xfs_bmbt_log_recs(
+	xfs_btree_cur_t		*cur,
+	xfs_buf_t		*bp,
+	int			rfirst,
+	int			rlast)
+{
+	xfs_bmbt_block_t	*block;
+	int			first;
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_log_recs";
+#endif
+	int			last;
+	xfs_bmbt_rec_t		*rp;
+	xfs_trans_t		*tp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
+	ASSERT(bp);
+	tp = cur->bc_tp;
+	block = XFS_BUF_TO_BMBT_BLOCK(bp);
+	rp = XFS_BMAP_REC_DADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(tp, bp, first, last);
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+}
+
+int					/* error */
+xfs_bmbt_lookup_eq(
+	xfs_btree_cur_t	*cur,
+	xfs_fileoff_t	off,
+	xfs_fsblock_t	bno,
+	xfs_filblks_t	len,
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.b.br_startoff = off;
+	cur->bc_rec.b.br_startblock = bno;
+	cur->bc_rec.b.br_blockcount = len;
+	return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+int					/* error */
+xfs_bmbt_lookup_ge(
+	xfs_btree_cur_t	*cur,
+	xfs_fileoff_t	off,
+	xfs_fsblock_t	bno,
+	xfs_filblks_t	len,
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.b.br_startoff = off;
+	cur->bc_rec.b.br_startblock = bno;
+	cur->bc_rec.b.br_blockcount = len;
+	return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+int					/* error */
+xfs_bmbt_lookup_le(
+	xfs_btree_cur_t	*cur,
+	xfs_fileoff_t	off,
+	xfs_fsblock_t	bno,
+	xfs_filblks_t	len,
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.b.br_startoff = off;
+	cur->bc_rec.b.br_startblock = bno;
+	cur->bc_rec.b.br_blockcount = len;
+	return xfs_bmbt_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Give the bmap btree a new root block.  Copy the old broot contents
+ * down into a real block and make the broot point to it.
+ */
+int						/* error */
+xfs_bmbt_newroot(
+	xfs_btree_cur_t		*cur,		/* btree cursor */
+	int			*logflags,	/* logging flags for inode */
+	int			*stat)		/* return status - 0 fail */
+{
+	xfs_alloc_arg_t		args;		/* allocation arguments */
+	xfs_bmbt_block_t	*block;		/* bmap btree block */
+	xfs_buf_t		*bp;		/* buffer for block */
+	xfs_bmbt_block_t	*cblock;	/* child btree block */
+	xfs_bmbt_key_t		*ckp;		/* child key pointer */
+	xfs_bmbt_ptr_t		*cpp;		/* child ptr pointer */
+	int			error;		/* error return code */
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_newroot";
+#endif
+#ifdef DEBUG
+	int			i;		/* loop counter */
+#endif
+	xfs_bmbt_key_t		*kp;		/* pointer to bmap btree key */
+	int			level;		/* btree level */
+	xfs_bmbt_ptr_t		*pp;		/* pointer to bmap block addr */
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	level = cur->bc_nlevels - 1;
+	block = xfs_bmbt_get_block(cur, level, &bp);
+	/*
+	 * Copy the root into a real block.
+	 */
+	args.mp = cur->bc_mp;
+	pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+	args.tp = cur->bc_tp;
+	args.fsbno = cur->bc_private.b.firstblock;
+	args.mod = args.minleft = args.alignment = args.total = args.isfl =
+		args.userdata = args.minalignslop = 0;
+	args.minlen = args.maxlen = args.prod = 1;
+	args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
+	if (args.fsbno == NULLFSBLOCK) {
+#ifdef DEBUG
+		if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+#endif
+		args.fsbno = INT_GET(*pp, ARCH_CONVERT);
+		args.type = XFS_ALLOCTYPE_START_BNO;
+	} else if (args.wasdel)
+		args.type = XFS_ALLOCTYPE_FIRST_AG;
+	else
+		args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	if ((error = xfs_alloc_vextent(&args))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	if (args.fsbno == NULLFSBLOCK) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		*stat = 0;
+		return 0;
+	}
+	ASSERT(args.len == 1);
+	cur->bc_private.b.firstblock = args.fsbno;
+	cur->bc_private.b.allocated++;
+	cur->bc_private.b.ip->i_d.di_nblocks++;
+	XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
+			  XFS_TRANS_DQ_BCOUNT, 1L);
+	bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
+	cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
+	*cblock = *block;
+	INT_MOD(block->bb_level, ARCH_CONVERT, +1);
+	INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
+	cur->bc_nlevels++;
+	cur->bc_ptrs[level + 1] = 1;
+	kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
+	ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
+	memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+	cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
+#ifdef DEBUG
+	for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) {
+		if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) {
+			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+			return error;
+		}
+	}
+#endif
+	memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno,
+			level))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	INT_SET(*pp, ARCH_CONVERT, args.fsbno);
+	xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT),
+		cur->bc_private.b.whichfork);
+	xfs_btree_setbuf(cur, level, bp);
+	/*
+	 * Do all this logging at the end so that
+	 * the root is at the right level.
+	 */
+	xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
+	xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
+	xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	*logflags |=
+		XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Set all the fields in a bmap extent record from the uncompressed form.
+ */
+void
+xfs_bmbt_set_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t	*s)
+{
+	int	extent_flag;
+
+	ASSERT((s->br_state == XFS_EXT_NORM) ||
+		(s->br_state == XFS_EXT_UNWRITTEN));
+	extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1;
+	ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0);
+	ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0);
+#if XFS_BIG_BLKNOS
+	ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0);
+	r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+		 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+		 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43);
+	r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+		 ((xfs_bmbt_rec_base_t)s->br_blockcount &
+		 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+#else	/* !XFS_BIG_BLKNOS */
+	if (ISNULLSTARTBLOCK(s->br_startblock)) {
+		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+			  (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+		r->l1 = XFS_MASK64HI(11) |
+			  ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+			  ((xfs_bmbt_rec_base_t)s->br_blockcount &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	} else {
+		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)s->br_startoff << 9);
+		r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+			  ((xfs_bmbt_rec_base_t)s->br_blockcount &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	}
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+/*
+ * Set all the fields in a bmap extent record from the arguments.
+ */
+void
+xfs_bmbt_set_allf(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	o,
+	xfs_fsblock_t	b,
+	xfs_filblks_t	c,
+	xfs_exntst_t	v)
+{
+	int	extent_flag;
+
+	ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN));
+	extent_flag = (v == XFS_EXT_NORM) ? 0 : 1;
+	ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
+	ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+#if XFS_BIG_BLKNOS
+	ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+	r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+		((xfs_bmbt_rec_base_t)o << 9) |
+		((xfs_bmbt_rec_base_t)b >> 43);
+	r->l1 = ((xfs_bmbt_rec_base_t)b << 21) |
+		((xfs_bmbt_rec_base_t)c &
+		(xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+#else	/* !XFS_BIG_BLKNOS */
+	if (ISNULLSTARTBLOCK(b)) {
+		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)o << 9) |
+			 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+		r->l1 = XFS_MASK64HI(11) |
+			  ((xfs_bmbt_rec_base_t)b << 21) |
+			  ((xfs_bmbt_rec_base_t)c &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	} else {
+		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)o << 9);
+		r->l1 = ((xfs_bmbt_rec_base_t)b << 21) |
+			 ((xfs_bmbt_rec_base_t)c &
+			 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	}
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+/*
+ * Set all the fields in a bmap extent record from the uncompressed form.
+ */
+void
+xfs_bmbt_disk_set_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s)
+{
+	int	extent_flag;
+
+	ASSERT((s->br_state == XFS_EXT_NORM) ||
+		(s->br_state == XFS_EXT_UNWRITTEN));
+	extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1;
+	ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0);
+	ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0);
+#if XFS_BIG_BLKNOS
+	ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0);
+	INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+		  ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+		  ((xfs_bmbt_rec_base_t)s->br_startblock >> 43));
+	INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+		  ((xfs_bmbt_rec_base_t)s->br_blockcount &
+		   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+#else	/* !XFS_BIG_BLKNOS */
+	if (ISNULLSTARTBLOCK(s->br_startblock)) {
+		INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+			  (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
+		INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) |
+			  ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+			  ((xfs_bmbt_rec_base_t)s->br_blockcount &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+	} else {
+		INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)s->br_startoff << 9));
+		INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+			  ((xfs_bmbt_rec_base_t)s->br_blockcount &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+	}
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+/*
+ * Set all the fields in a disk format bmap extent record from the arguments.
+ */
+void
+xfs_bmbt_disk_set_allf(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	o,
+	xfs_fsblock_t	b,
+	xfs_filblks_t	c,
+	xfs_exntst_t	v)
+{
+	int	extent_flag;
+
+	ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN));
+	extent_flag = (v == XFS_EXT_NORM) ? 0 : 1;
+	ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
+	ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+#if XFS_BIG_BLKNOS
+	ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+	INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+		((xfs_bmbt_rec_base_t)o << 9) |
+		((xfs_bmbt_rec_base_t)b >> 43));
+	INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) |
+		  ((xfs_bmbt_rec_base_t)c &
+		   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+#else	/* !XFS_BIG_BLKNOS */
+	if (ISNULLSTARTBLOCK(b)) {
+		INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)o << 9) |
+			 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
+		INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) |
+			  ((xfs_bmbt_rec_base_t)b << 21) |
+			  ((xfs_bmbt_rec_base_t)c &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+	} else {
+		INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+			((xfs_bmbt_rec_base_t)o << 9));
+		INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) |
+			  ((xfs_bmbt_rec_base_t)c &
+			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+	}
+#endif	/* XFS_BIG_BLKNOS */
+}
+#endif
+
+/*
+ * Set the blockcount field in a bmap extent record.
+ */
+void
+xfs_bmbt_set_blockcount(
+	xfs_bmbt_rec_t	*r,
+	xfs_filblks_t	v)
+{
+	ASSERT((v & XFS_MASK64HI(43)) == 0);
+	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
+		  (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
+}
+
+/*
+ * Set the startblock field in a bmap extent record.
+ */
+void
+xfs_bmbt_set_startblock(
+	xfs_bmbt_rec_t	*r,
+	xfs_fsblock_t	v)
+{
+#if XFS_BIG_BLKNOS
+	ASSERT((v & XFS_MASK64HI(12)) == 0);
+	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
+		  (xfs_bmbt_rec_base_t)(v >> 43);
+	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
+		  (xfs_bmbt_rec_base_t)(v << 21);
+#else	/* !XFS_BIG_BLKNOS */
+	if (ISNULLSTARTBLOCK(v)) {
+		r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+		r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
+			  ((xfs_bmbt_rec_base_t)v << 21) |
+			  (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	} else {
+		r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+		r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
+			  (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+	}
+#endif	/* XFS_BIG_BLKNOS */
+}
+
+/*
+ * Set the startoff field in a bmap extent record.
+ */
+void
+xfs_bmbt_set_startoff(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	v)
+{
+	ASSERT((v & XFS_MASK64HI(9)) == 0);
+	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
+		((xfs_bmbt_rec_base_t)v << 9) |
+		  (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
+}
+
+/*
+ * Set the extent state field in a bmap extent record.
+ */
+void
+xfs_bmbt_set_state(
+	xfs_bmbt_rec_t	*r,
+	xfs_exntst_t	v)
+{
+	ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
+	if (v == XFS_EXT_NORM)
+		r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
+	else
+		r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
+}
+
+/*
+ * Convert in-memory form of btree root to on-disk form.
+ */
+void
+xfs_bmbt_to_bmdr(
+	xfs_bmbt_block_t	*rblock,
+	int			rblocklen,
+	xfs_bmdr_block_t	*dblock,
+	int			dblocklen)
+{
+	int			dmxr;
+	xfs_bmbt_key_t		*fkp;
+	xfs_bmbt_ptr_t		*fpp;
+	xfs_bmbt_key_t		*tkp;
+	xfs_bmbt_ptr_t		*tpp;
+
+	ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC);
+	ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO);
+	ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO);
+	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0);
+	dblock->bb_level = rblock->bb_level;	/* both in on-disk format */
+	dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */
+	dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
+	fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
+	tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
+	fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
+	tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
+	dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
+	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
+	memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
+}
+
+/*
+ * Update the record to the passed values.
+ */
+int
+xfs_bmbt_update(
+	xfs_btree_cur_t		*cur,
+	xfs_fileoff_t		off,
+	xfs_fsblock_t		bno,
+	xfs_filblks_t		len,
+	xfs_exntst_t		state)
+{
+	xfs_bmbt_block_t	*block;
+	xfs_buf_t		*bp;
+	int			error;
+#ifdef XFS_BMBT_TRACE
+	static char		fname[] = "xfs_bmbt_update";
+#endif
+	xfs_bmbt_key_t		key;
+	int			ptr;
+	xfs_bmbt_rec_t		*rp;
+
+	XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
+	XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno,
+		(xfs_dfilblks_t)len, (int)state);
+	block = xfs_bmbt_get_block(cur, 0, &bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+#endif
+	ptr = cur->bc_ptrs[0];
+	rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
+	xfs_bmbt_disk_set_allf(rp, off, bno, len, state);
+	xfs_bmbt_log_recs(cur, bp, ptr, ptr);
+	if (ptr > 1) {
+		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+		return 0;
+	}
+	INT_SET(key.br_startoff, ARCH_CONVERT, off);
+	if ((error = xfs_bmbt_updkey(cur, &key, 1))) {
+		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
+		return error;
+	}
+	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
+	return 0;
+}
+
+/*
+ * Check an extent list, which has just been read, for
+ * any bit in the extent flag field. ASSERT on debug
+ * kernels, as this condition should not occur.
+ * Return an error condition (1) if any flags found,
+ * otherwise return 0.
+ */
+
+int
+xfs_check_nostate_extents(
+	xfs_bmbt_rec_t		*ep,
+	xfs_extnum_t		num)
+{
+	for (; num > 0; num--, ep++) {
+		if ((ep->l0 >>
+		     (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
+			ASSERT(0);
+			return 1;
+		}
+	}
+	return 0;
+}
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
new file mode 100644
index 0000000..843ff12
--- /dev/null
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -0,0 +1,701 @@
+/*
+ * Copyright (c) 2000,2002-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_BMAP_BTREE_H__
+#define __XFS_BMAP_BTREE_H__
+
+#define XFS_BMAP_MAGIC	0x424d4150	/* 'BMAP' */
+
+struct xfs_btree_cur;
+struct xfs_btree_lblock;
+struct xfs_mount;
+struct xfs_inode;
+
+/*
+ * Bmap root header, on-disk form only.
+ */
+typedef struct xfs_bmdr_block
+{
+	__uint16_t	bb_level;	/* 0 is a leaf */
+	__uint16_t	bb_numrecs;	/* current # of data records */
+} xfs_bmdr_block_t;
+
+/*
+ * Bmap btree record and extent descriptor.
+ * For 32-bit kernels,
+ *  l0:31 is an extent flag (value 1 indicates non-normal).
+ *  l0:0-30 and l1:9-31 are startoff.
+ *  l1:0-8, l2:0-31, and l3:21-31 are startblock.
+ *  l3:0-20 are blockcount.
+ * For 64-bit kernels,
+ *  l0:63 is an extent flag (value 1 indicates non-normal).
+ *  l0:9-62 are startoff.
+ *  l0:0-8 and l1:21-63 are startblock.
+ *  l1:0-20 are blockcount.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+
+#define BMBT_TOTAL_BITLEN	128	/* 128 bits, 16 bytes */
+#define BMBT_EXNTFLAG_BITOFF	0
+#define BMBT_EXNTFLAG_BITLEN	1
+#define BMBT_STARTOFF_BITOFF	(BMBT_EXNTFLAG_BITOFF + BMBT_EXNTFLAG_BITLEN)
+#define BMBT_STARTOFF_BITLEN	54
+#define BMBT_STARTBLOCK_BITOFF	(BMBT_STARTOFF_BITOFF + BMBT_STARTOFF_BITLEN)
+#define BMBT_STARTBLOCK_BITLEN	52
+#define BMBT_BLOCKCOUNT_BITOFF	\
+	(BMBT_STARTBLOCK_BITOFF + BMBT_STARTBLOCK_BITLEN)
+#define BMBT_BLOCKCOUNT_BITLEN	(BMBT_TOTAL_BITLEN - BMBT_BLOCKCOUNT_BITOFF)
+
+#else
+
+#define BMBT_TOTAL_BITLEN	128	/* 128 bits, 16 bytes */
+#define BMBT_EXNTFLAG_BITOFF	63
+#define BMBT_EXNTFLAG_BITLEN	1
+#define BMBT_STARTOFF_BITOFF	(BMBT_EXNTFLAG_BITOFF - BMBT_STARTOFF_BITLEN)
+#define BMBT_STARTOFF_BITLEN	54
+#define BMBT_STARTBLOCK_BITOFF	85 /* 128 - 43 (other 9 is in first word) */
+#define BMBT_STARTBLOCK_BITLEN	52
+#define BMBT_BLOCKCOUNT_BITOFF	64 /* Start of second 64 bit container */
+#define BMBT_BLOCKCOUNT_BITLEN	21
+
+#endif
+
+
+#define BMBT_USE_64	1
+
+typedef struct xfs_bmbt_rec_32
+{
+	__uint32_t		l0, l1, l2, l3;
+} xfs_bmbt_rec_32_t;
+typedef struct xfs_bmbt_rec_64
+{
+	__uint64_t		l0, l1;
+} xfs_bmbt_rec_64_t;
+
+typedef __uint64_t	xfs_bmbt_rec_base_t;	/* use this for casts */
+typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t;
+
+/*
+ * Values and macros for delayed-allocation startblock fields.
+ */
+#define STARTBLOCKVALBITS	17
+#define STARTBLOCKMASKBITS	(15 + XFS_BIG_BLKNOS * 20)
+#define DSTARTBLOCKMASKBITS	(15 + 20)
+#define STARTBLOCKMASK		\
+	(((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#define DSTARTBLOCKMASK		\
+	(((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLSTARTBLOCK)
+int isnullstartblock(xfs_fsblock_t x);
+#define ISNULLSTARTBLOCK(x)	isnullstartblock(x)
+#else
+#define ISNULLSTARTBLOCK(x)	(((x) & STARTBLOCKMASK) == STARTBLOCKMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLDSTARTBLOCK)
+int isnulldstartblock(xfs_dfsbno_t x);
+#define ISNULLDSTARTBLOCK(x)	isnulldstartblock(x)
+#else
+#define ISNULLDSTARTBLOCK(x)	(((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_NULLSTARTBLOCK)
+xfs_fsblock_t nullstartblock(int k);
+#define NULLSTARTBLOCK(k)	nullstartblock(k)
+#else
+#define NULLSTARTBLOCK(k)	\
+	((ASSERT(k < (1 << STARTBLOCKVALBITS))), (STARTBLOCKMASK | (k)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_STARTBLOCKVAL)
+xfs_filblks_t startblockval(xfs_fsblock_t x);
+#define STARTBLOCKVAL(x)	startblockval(x)
+#else
+#define STARTBLOCKVAL(x)	((xfs_filblks_t)((x) & ~STARTBLOCKMASK))
+#endif
+
+/*
+ * Possible extent formats.
+ */
+typedef enum {
+	XFS_EXTFMT_NOSTATE = 0,
+	XFS_EXTFMT_HASSTATE
+} xfs_exntfmt_t;
+
+/*
+ * Possible extent states.
+ */
+typedef enum {
+	XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+	XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
+} xfs_exntst_t;
+
+/*
+ * Extent state and extent format macros.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTFMT_INODE )
+xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip);
+#define XFS_EXTFMT_INODE(x)	xfs_extfmt_inode(x)
+#else
+#define XFS_EXTFMT_INODE(x) \
+  (XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \
+	XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
+#endif
+#define ISUNWRITTEN(x)	((x)->br_state == XFS_EXT_UNWRITTEN)
+
+/*
+ * Incore version of above.
+ */
+typedef struct xfs_bmbt_irec
+{
+	xfs_fileoff_t	br_startoff;	/* starting file offset */
+	xfs_fsblock_t	br_startblock;	/* starting block number */
+	xfs_filblks_t	br_blockcount;	/* number of blocks */
+	xfs_exntst_t	br_state;	/* extent state */
+} xfs_bmbt_irec_t;
+
+/*
+ * Key structure for non-leaf levels of the tree.
+ */
+typedef struct xfs_bmbt_key
+{
+	xfs_dfiloff_t	br_startoff;	/* starting file offset */
+} xfs_bmbt_key_t, xfs_bmdr_key_t;
+
+typedef xfs_dfsbno_t xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;	/* btree pointer type */
+					/* btree block header type */
+typedef struct xfs_btree_lblock xfs_bmbt_block_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BMBT_BLOCK)
+xfs_bmbt_block_t *xfs_buf_to_bmbt_block(struct xfs_buf *bp);
+#define XFS_BUF_TO_BMBT_BLOCK(bp)		xfs_buf_to_bmbt_block(bp)
+#else
+#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)(XFS_BUF_PTR(bp)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_DSIZE)
+int xfs_bmap_rblock_dsize(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_RBLOCK_DSIZE(lev,cur)		xfs_bmap_rblock_dsize(lev,cur)
+#else
+#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_ISIZE)
+int xfs_bmap_rblock_isize(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_RBLOCK_ISIZE(lev,cur)		xfs_bmap_rblock_isize(lev,cur)
+#else
+#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \
+	((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \
+			    (cur)->bc_private.b.whichfork)->if_broot_bytes)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_IBLOCK_SIZE)
+int xfs_bmap_iblock_size(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_IBLOCK_SIZE(lev,cur)		xfs_bmap_iblock_size(lev,cur)
+#else
+#define XFS_BMAP_IBLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DSIZE)
+int xfs_bmap_block_dsize(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_DSIZE(lev,cur)		xfs_bmap_block_dsize(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_DSIZE(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BMAP_RBLOCK_DSIZE(lev,cur) : \
+		XFS_BMAP_IBLOCK_SIZE(lev,cur))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_ISIZE)
+int xfs_bmap_block_isize(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_ISIZE(lev,cur)		xfs_bmap_block_isize(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_ISIZE(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BMAP_RBLOCK_ISIZE(lev,cur) : \
+		XFS_BMAP_IBLOCK_SIZE(lev,cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMAXRECS)
+int xfs_bmap_block_dmaxrecs(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur)	xfs_bmap_block_dmaxrecs(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
+			xfs_bmdr, (lev) == 0) : \
+		((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMAXRECS)
+int xfs_bmap_block_imaxrecs(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur)	xfs_bmap_block_imaxrecs(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \
+			xfs_bmbt, (lev) == 0) : \
+		((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMINRECS)
+int xfs_bmap_block_dminrecs(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_DMINRECS(lev,cur)	xfs_bmap_block_dminrecs(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
+			xfs_bmdr, (lev) == 0) : \
+		((cur)->bc_mp->m_bmap_dmnr[(lev) != 0]))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMINRECS)
+int xfs_bmap_block_iminrecs(int lev, struct xfs_btree_cur *cur);
+#define XFS_BMAP_BLOCK_IMINRECS(lev,cur)	xfs_bmap_block_iminrecs(lev,cur)
+#else
+#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \
+	((lev) == (cur)->bc_nlevels - 1 ? \
+		XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \
+			xfs_bmbt, (lev) == 0) : \
+		((cur)->bc_mp->m_bmap_dmnr[(lev) != 0]))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_DADDR)
+xfs_bmbt_rec_t *
+xfs_bmap_rec_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_REC_DADDR(bb,i,cur)		xfs_bmap_rec_daddr(bb,i,cur)
+#else
+#define XFS_BMAP_REC_DADDR(bb,i,cur) \
+	XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_IADDR)
+xfs_bmbt_rec_t *
+xfs_bmap_rec_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_REC_IADDR(bb,i,cur)		xfs_bmap_rec_iaddr(bb,i,cur)
+#else
+#define XFS_BMAP_REC_IADDR(bb,i,cur) \
+	XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_DADDR)
+xfs_bmbt_key_t *
+xfs_bmap_key_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_KEY_DADDR(bb,i,cur)		xfs_bmap_key_daddr(bb,i,cur)
+#else
+#define XFS_BMAP_KEY_DADDR(bb,i,cur) \
+	XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_IADDR)
+xfs_bmbt_key_t *
+xfs_bmap_key_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_KEY_IADDR(bb,i,cur)		xfs_bmap_key_iaddr(bb,i,cur)
+#else
+#define XFS_BMAP_KEY_IADDR(bb,i,cur) \
+	XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_DADDR)
+xfs_bmbt_ptr_t *
+xfs_bmap_ptr_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_PTR_DADDR(bb,i,cur)		xfs_bmap_ptr_daddr(bb,i,cur)
+#else
+#define XFS_BMAP_PTR_DADDR(bb,i,cur) \
+	XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_IADDR)
+xfs_bmbt_ptr_t *
+xfs_bmap_ptr_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define XFS_BMAP_PTR_IADDR(bb,i,cur)		xfs_bmap_ptr_iaddr(bb,i,cur)
+#else
+#define XFS_BMAP_PTR_IADDR(bb,i,cur) \
+	XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE(		\
+		INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+		xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
+			INT_GET((bb)->bb_level, ARCH_CONVERT), cur))
+#endif
+
+/*
+ * These are to be used when we know the size of the block and
+ * we don't have a cursor.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_REC_ADDR)
+xfs_bmbt_rec_t *xfs_bmap_broot_rec_addr(xfs_bmbt_block_t *bb, int i, int sz);
+#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz)	xfs_bmap_broot_rec_addr(bb,i,sz)
+#else
+#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \
+	XFS_BTREE_REC_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_KEY_ADDR)
+xfs_bmbt_key_t *xfs_bmap_broot_key_addr(xfs_bmbt_block_t *bb, int i, int sz);
+#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz)	xfs_bmap_broot_key_addr(bb,i,sz)
+#else
+#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \
+	XFS_BTREE_KEY_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_PTR_ADDR)
+xfs_bmbt_ptr_t *xfs_bmap_broot_ptr_addr(xfs_bmbt_block_t *bb, int i, int sz);
+#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz)	xfs_bmap_broot_ptr_addr(bb,i,sz)
+#else
+#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
+	XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_NUMRECS)
+int xfs_bmap_broot_numrecs(xfs_bmdr_block_t *bb);
+#define XFS_BMAP_BROOT_NUMRECS(bb)		xfs_bmap_broot_numrecs(bb)
+#else
+#define XFS_BMAP_BROOT_NUMRECS(bb) (INT_GET((bb)->bb_numrecs, ARCH_CONVERT))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_MAXRECS)
+int xfs_bmap_broot_maxrecs(int sz);
+#define XFS_BMAP_BROOT_MAXRECS(sz)		xfs_bmap_broot_maxrecs(sz)
+#else
+#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE_CALC)
+int xfs_bmap_broot_space_calc(int nrecs);
+#define XFS_BMAP_BROOT_SPACE_CALC(nrecs)	xfs_bmap_broot_space_calc(nrecs)
+#else
+#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \
+	((int)(sizeof(xfs_bmbt_block_t) + \
+	       ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE)
+int xfs_bmap_broot_space(xfs_bmdr_block_t *bb);
+#define XFS_BMAP_BROOT_SPACE(bb)		xfs_bmap_broot_space(bb)
+#else
+#define XFS_BMAP_BROOT_SPACE(bb) \
+	XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMDR_SPACE_CALC)
+int xfs_bmdr_space_calc(int nrecs);
+#define XFS_BMDR_SPACE_CALC(nrecs)		xfs_bmdr_space_calc(nrecs)
+#else
+#define XFS_BMDR_SPACE_CALC(nrecs)	\
+	((int)(sizeof(xfs_bmdr_block_t) + \
+	       ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))))
+#endif
+
+/*
+ * Maximum number of bmap btree levels.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BM_MAXLEVELS)
+int xfs_bm_maxlevels(struct xfs_mount *mp, int w);
+#define XFS_BM_MAXLEVELS(mp,w)			xfs_bm_maxlevels(mp,w)
+#else
+#define XFS_BM_MAXLEVELS(mp,w)		((mp)->m_bm_maxlevels[w])
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_SANITY_CHECK)
+int xfs_bmap_sanity_check(struct xfs_mount *mp, xfs_bmbt_block_t *bb,
+	int level);
+#define XFS_BMAP_SANITY_CHECK(mp,bb,level)	\
+	xfs_bmap_sanity_check(mp,bb,level)
+#else
+#define XFS_BMAP_SANITY_CHECK(mp,bb,level)	\
+	(INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \
+	 INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \
+	 INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \
+	 INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0])
+#endif
+
+
+#ifdef __KERNEL__
+
+#if defined(XFS_BMBT_TRACE)
+/*
+ * Trace buffer entry types.
+ */
+#define XFS_BMBT_KTRACE_ARGBI	1
+#define XFS_BMBT_KTRACE_ARGBII	2
+#define XFS_BMBT_KTRACE_ARGFFFI 3
+#define XFS_BMBT_KTRACE_ARGI	4
+#define XFS_BMBT_KTRACE_ARGIFK	5
+#define XFS_BMBT_KTRACE_ARGIFR	6
+#define XFS_BMBT_KTRACE_ARGIK	7
+#define XFS_BMBT_KTRACE_CUR	8
+
+#define XFS_BMBT_TRACE_SIZE	4096	/* size of global trace buffer */
+#define XFS_BMBT_KTRACE_SIZE	32	/* size of per-inode trace buffer */
+extern ktrace_t	*xfs_bmbt_trace_buf;
+#endif
+
+/*
+ * Prototypes for xfs_bmap.c to call.
+ */
+
+void
+xfs_bmdr_to_bmbt(
+	xfs_bmdr_block_t *,
+	int,
+	xfs_bmbt_block_t *,
+	int);
+
+int
+xfs_bmbt_decrement(
+	struct xfs_btree_cur *,
+	int,
+	int *);
+
+int
+xfs_bmbt_delete(
+	struct xfs_btree_cur *,
+	int *);
+
+void
+xfs_bmbt_get_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s);
+
+xfs_bmbt_block_t *
+xfs_bmbt_get_block(
+	struct xfs_btree_cur	*cur,
+	int			level,
+	struct xfs_buf		**bpp);
+
+xfs_filblks_t
+xfs_bmbt_get_blockcount(
+	xfs_bmbt_rec_t	*r);
+
+xfs_fsblock_t
+xfs_bmbt_get_startblock(
+	xfs_bmbt_rec_t	*r);
+
+xfs_fileoff_t
+xfs_bmbt_get_startoff(
+	xfs_bmbt_rec_t	*r);
+
+xfs_exntst_t
+xfs_bmbt_get_state(
+	xfs_bmbt_rec_t	*r);
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+void
+xfs_bmbt_disk_get_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s);
+
+xfs_exntst_t
+xfs_bmbt_disk_get_state(
+	xfs_bmbt_rec_t	*r);
+
+xfs_filblks_t
+xfs_bmbt_disk_get_blockcount(
+	xfs_bmbt_rec_t	*r);
+
+xfs_fsblock_t
+xfs_bmbt_disk_get_startblock(
+	xfs_bmbt_rec_t	*r);
+
+xfs_fileoff_t
+xfs_bmbt_disk_get_startoff(
+	xfs_bmbt_rec_t	*r);
+
+#else
+#define xfs_bmbt_disk_get_all(r, s) \
+	xfs_bmbt_get_all(r, s)
+#define xfs_bmbt_disk_get_state(r) \
+	xfs_bmbt_get_state(r)
+#define xfs_bmbt_disk_get_blockcount(r) \
+	xfs_bmbt_get_blockcount(r)
+#define xfs_bmbt_disk_get_startblock(r) \
+	xfs_bmbt_get_blockcount(r)
+#define xfs_bmbt_disk_get_startoff(r) \
+	xfs_bmbt_get_startoff(r)
+#endif
+
+int
+xfs_bmbt_increment(
+	struct xfs_btree_cur *,
+	int,
+	int *);
+
+int
+xfs_bmbt_insert(
+	struct xfs_btree_cur *,
+	int *);
+
+void
+xfs_bmbt_log_block(
+	struct xfs_btree_cur *,
+	struct xfs_buf *,
+	int);
+
+void
+xfs_bmbt_log_recs(
+	struct xfs_btree_cur *,
+	struct xfs_buf *,
+	int,
+	int);
+
+int
+xfs_bmbt_lookup_eq(
+	struct xfs_btree_cur *,
+	xfs_fileoff_t,
+	xfs_fsblock_t,
+	xfs_filblks_t,
+	int *);
+
+int
+xfs_bmbt_lookup_ge(
+	struct xfs_btree_cur *,
+	xfs_fileoff_t,
+	xfs_fsblock_t,
+	xfs_filblks_t,
+	int *);
+
+int
+xfs_bmbt_lookup_le(
+	struct xfs_btree_cur *,
+	xfs_fileoff_t,
+	xfs_fsblock_t,
+	xfs_filblks_t,
+	int *);
+
+/*
+ * Give the bmap btree a new root block.  Copy the old broot contents
+ * down into a real block and make the broot point to it.
+ */
+int						/* error */
+xfs_bmbt_newroot(
+	struct xfs_btree_cur	*cur,		/* btree cursor */
+	int			*logflags,	/* logging flags for inode */
+	int			*stat);		/* return status - 0 fail */
+
+void
+xfs_bmbt_set_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s);
+
+void
+xfs_bmbt_set_allf(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	o,
+	xfs_fsblock_t	b,
+	xfs_filblks_t	c,
+	xfs_exntst_t	v);
+
+void
+xfs_bmbt_set_blockcount(
+	xfs_bmbt_rec_t	*r,
+	xfs_filblks_t	v);
+
+void
+xfs_bmbt_set_startblock(
+	xfs_bmbt_rec_t	*r,
+	xfs_fsblock_t	v);
+
+void
+xfs_bmbt_set_startoff(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	v);
+
+void
+xfs_bmbt_set_state(
+	xfs_bmbt_rec_t	*r,
+	xfs_exntst_t	v);
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+void
+xfs_bmbt_disk_set_all(
+	xfs_bmbt_rec_t	*r,
+	xfs_bmbt_irec_t *s);
+
+void
+xfs_bmbt_disk_set_allf(
+	xfs_bmbt_rec_t	*r,
+	xfs_fileoff_t	o,
+	xfs_fsblock_t	b,
+	xfs_filblks_t	c,
+	xfs_exntst_t	v);
+#else
+#define xfs_bmbt_disk_set_all(r, s) \
+	xfs_bmbt_set_all(r, s)
+#define xfs_bmbt_disk_set_allf(r, o, b, c, v) \
+	xfs_bmbt_set_allf(r, o, b, c, v)
+#endif
+
+void
+xfs_bmbt_to_bmdr(
+	xfs_bmbt_block_t *,
+	int,
+	xfs_bmdr_block_t *,
+	int);
+
+int
+xfs_bmbt_update(
+	struct xfs_btree_cur *,
+	xfs_fileoff_t,
+	xfs_fsblock_t,
+	xfs_filblks_t,
+	xfs_exntst_t);
+
+#ifdef DEBUG
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_bmbt_get_rec(
+	struct xfs_btree_cur *,
+	xfs_fileoff_t *,
+	xfs_fsblock_t *,
+	xfs_filblks_t *,
+	xfs_exntst_t *,
+	int *);
+#endif
+
+
+/*
+ * Search an extent list for the extent which includes block
+ * bno.
+ */
+xfs_bmbt_rec_t *
+xfs_bmap_do_search_extents(
+	xfs_bmbt_rec_t *,
+	xfs_extnum_t,
+	xfs_extnum_t,
+	xfs_fileoff_t,
+	int *,
+	xfs_extnum_t *,
+	xfs_bmbt_irec_t *,
+	xfs_bmbt_irec_t *);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
new file mode 100644
index 0000000..9dd22dd
--- /dev/null
+++ b/fs/xfs/xfs_btree.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains common code for the space manager's btree implementations.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bit.h"
+#include "xfs_error.h"
+
+/*
+ * Cursor allocation zone.
+ */
+kmem_zone_t	*xfs_btree_cur_zone;
+
+/*
+ * Btree magic numbers.
+ */
+const __uint32_t xfs_magics[XFS_BTNUM_MAX] =
+{
+	XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
+};
+
+/*
+ * Prototypes for internal routines.
+ */
+
+/*
+ * Checking routine: return maxrecs for the block.
+ */
+STATIC int				/* number of records fitting in block */
+xfs_btree_maxrecs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_block_t	*block);/* generic btree block pointer */
+
+/*
+ * Internal routines.
+ */
+
+/*
+ * Checking routine: return maxrecs for the block.
+ */
+STATIC int				/* number of records fitting in block */
+xfs_btree_maxrecs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_block_t	*block)	/* generic btree block pointer */
+{
+	switch (cur->bc_btnum) {
+	case XFS_BTNUM_BNO:
+	case XFS_BTNUM_CNT:
+		return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+	case XFS_BTNUM_BMAP:
+		return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+	case XFS_BTNUM_INO:
+		return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+	default:
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/*
+ * External routines.
+ */
+
+#ifdef DEBUG
+/*
+ * Debug routine: check that block header is ok.
+ */
+void
+xfs_btree_check_block(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_block_t	*block,	/* generic btree block pointer */
+	int			level,	/* level of the btree block */
+	xfs_buf_t		*bp)	/* buffer containing block, if any */
+{
+	if (XFS_BTREE_LONG_PTRS(cur->bc_btnum))
+		xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level,
+			bp);
+	else
+		xfs_btree_check_sblock(cur, (xfs_btree_sblock_t *)block, level,
+			bp);
+}
+
+/*
+ * Debug routine: check that keys are in the right order.
+ */
+void
+xfs_btree_check_key(
+	xfs_btnum_t	btnum,		/* btree identifier */
+	void		*ak1,		/* pointer to left (lower) key */
+	void		*ak2)		/* pointer to right (higher) key */
+{
+	switch (btnum) {
+	case XFS_BTNUM_BNO: {
+		xfs_alloc_key_t	*k1;
+		xfs_alloc_key_t	*k2;
+
+		k1 = ak1;
+		k2 = ak2;
+		ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT));
+		break;
+	    }
+	case XFS_BTNUM_CNT: {
+		xfs_alloc_key_t	*k1;
+		xfs_alloc_key_t	*k2;
+
+		k1 = ak1;
+		k2 = ak2;
+		ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) ||
+		       (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) &&
+			INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)));
+		break;
+	    }
+	case XFS_BTNUM_BMAP: {
+		xfs_bmbt_key_t	*k1;
+		xfs_bmbt_key_t	*k2;
+
+		k1 = ak1;
+		k2 = ak2;
+		ASSERT(INT_GET(k1->br_startoff, ARCH_CONVERT) < INT_GET(k2->br_startoff, ARCH_CONVERT));
+		break;
+	    }
+	case XFS_BTNUM_INO: {
+		xfs_inobt_key_t	*k1;
+		xfs_inobt_key_t	*k2;
+
+		k1 = ak1;
+		k2 = ak2;
+		ASSERT(INT_GET(k1->ir_startino, ARCH_CONVERT) < INT_GET(k2->ir_startino, ARCH_CONVERT));
+		break;
+	    }
+	default:
+		ASSERT(0);
+	}
+}
+#endif	/* DEBUG */
+
+/*
+ * Checking routine: check that long form block header is ok.
+ */
+/* ARGSUSED */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_lblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_lblock_t	*block,	/* btree long form block pointer */
+	int			level,	/* level of the btree block */
+	xfs_buf_t		*bp)	/* buffer for block, if any */
+{
+	int			lblock_ok; /* block passes checks */
+	xfs_mount_t		*mp;	/* file system mount point */
+
+	mp = cur->bc_mp;
+	lblock_ok =
+		INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] &&
+		INT_GET(block->bb_level, ARCH_CONVERT) == level &&
+		INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+			xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
+		block->bb_leftsib &&
+		(INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO ||
+		 XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) &&
+		block->bb_rightsib &&
+		(INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO ||
+		 XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT)));
+	if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK,
+			XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
+		if (bp)
+			xfs_buftrace("LBTREE ERROR", bp);
+		XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
+				 mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+/*
+ * Checking routine: check that (long) pointer is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_lptr(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_dfsbno_t	ptr,		/* btree block disk address */
+	int		level)		/* btree block level */
+{
+	xfs_mount_t	*mp;		/* file system mount point */
+
+	mp = cur->bc_mp;
+	XFS_WANT_CORRUPTED_RETURN(
+		level > 0 &&
+		ptr != NULLDFSBNO &&
+		XFS_FSB_SANITY_CHECK(mp, ptr));
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Debug routine: check that records are in the right order.
+ */
+void
+xfs_btree_check_rec(
+	xfs_btnum_t	btnum,		/* btree identifier */
+	void		*ar1,		/* pointer to left (lower) record */
+	void		*ar2)		/* pointer to right (higher) record */
+{
+	switch (btnum) {
+	case XFS_BTNUM_BNO: {
+		xfs_alloc_rec_t	*r1;
+		xfs_alloc_rec_t	*r2;
+
+		r1 = ar1;
+		r2 = ar2;
+		ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <=
+		       INT_GET(r2->ar_startblock, ARCH_CONVERT));
+		break;
+	    }
+	case XFS_BTNUM_CNT: {
+		xfs_alloc_rec_t	*r1;
+		xfs_alloc_rec_t	*r2;
+
+		r1 = ar1;
+		r2 = ar2;
+		ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) ||
+		       (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) &&
+			INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT)));
+		break;
+	    }
+	case XFS_BTNUM_BMAP: {
+		xfs_bmbt_rec_t	*r1;
+		xfs_bmbt_rec_t	*r2;
+
+		r1 = ar1;
+		r2 = ar2;
+		ASSERT(xfs_bmbt_disk_get_startoff(r1) +
+		       xfs_bmbt_disk_get_blockcount(r1) <=
+		       xfs_bmbt_disk_get_startoff(r2));
+		break;
+	    }
+	case XFS_BTNUM_INO: {
+		xfs_inobt_rec_t	*r1;
+		xfs_inobt_rec_t	*r2;
+
+		r1 = ar1;
+		r2 = ar2;
+		ASSERT(INT_GET(r1->ir_startino, ARCH_CONVERT) + XFS_INODES_PER_CHUNK <=
+		       INT_GET(r2->ir_startino, ARCH_CONVERT));
+		break;
+	    }
+	default:
+		ASSERT(0);
+	}
+}
+#endif	/* DEBUG */
+
+/*
+ * Checking routine: check that block header is ok.
+ */
+/* ARGSUSED */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_sblock_t	*block,	/* btree short form block pointer */
+	int			level,	/* level of the btree block */
+	xfs_buf_t		*bp)	/* buffer containing block */
+{
+	xfs_buf_t		*agbp;	/* buffer for ag. freespace struct */
+	xfs_agf_t		*agf;	/* ag. freespace structure */
+	xfs_agblock_t		agflen;	/* native ag. freespace length */
+	int			sblock_ok; /* block passes checks */
+
+	agbp = cur->bc_private.a.agbp;
+	agf = XFS_BUF_TO_AGF(agbp);
+	agflen = INT_GET(agf->agf_length, ARCH_CONVERT);
+	sblock_ok =
+		INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] &&
+		INT_GET(block->bb_level, ARCH_CONVERT) == level &&
+		INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+			xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
+		(INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK ||
+		 INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) &&
+		block->bb_leftsib &&
+		(INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK ||
+		 INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) &&
+		block->bb_rightsib;
+	if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
+			XFS_ERRTAG_BTREE_CHECK_SBLOCK,
+			XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
+		if (bp)
+			xfs_buftrace("SBTREE ERROR", bp);
+		XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW,
+				 cur->bc_mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+/*
+ * Checking routine: check that (short) pointer is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sptr(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agblock_t	ptr,		/* btree block disk address */
+	int		level)		/* btree block level */
+{
+	xfs_buf_t	*agbp;		/* buffer for ag. freespace struct */
+	xfs_agf_t	*agf;		/* ag. freespace structure */
+
+	agbp = cur->bc_private.a.agbp;
+	agf = XFS_BUF_TO_AGF(agbp);
+	XFS_WANT_CORRUPTED_RETURN(
+		level > 0 &&
+		ptr != NULLAGBLOCK && ptr != 0 &&
+		ptr < INT_GET(agf->agf_length, ARCH_CONVERT));
+	return 0;
+}
+
+/*
+ * Delete the btree cursor.
+ */
+void
+xfs_btree_del_cursor(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	int		error)		/* del because of error */
+{
+	int		i;		/* btree level */
+
+	/*
+	 * Clear the buffer pointers, and release the buffers.
+	 * If we're doing this in the face of an error, we
+	 * need to make sure to inspect all of the entries
+	 * in the bc_bufs array for buffers to be unlocked.
+	 * This is because some of the btree code works from
+	 * level n down to 0, and if we get an error along
+	 * the way we won't have initialized all the entries
+	 * down to 0.
+	 */
+	for (i = 0; i < cur->bc_nlevels; i++) {
+		if (cur->bc_bufs[i])
+			xfs_btree_setbuf(cur, i, NULL);
+		else if (!error)
+			break;
+	}
+	/*
+	 * Can't free a bmap cursor without having dealt with the
+	 * allocated indirect blocks' accounting.
+	 */
+	ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
+	       cur->bc_private.b.allocated == 0);
+	/*
+	 * Free the cursor.
+	 */
+	kmem_zone_free(xfs_btree_cur_zone, cur);
+}
+
+/*
+ * Duplicate the btree cursor.
+ * Allocate a new one, copy the record, re-get the buffers.
+ */
+int					/* error */
+xfs_btree_dup_cursor(
+	xfs_btree_cur_t	*cur,		/* input cursor */
+	xfs_btree_cur_t	**ncur)		/* output cursor */
+{
+	xfs_buf_t	*bp;		/* btree block's buffer pointer */
+	int		error;		/* error return value */
+	int		i;		/* level number of btree block */
+	xfs_mount_t	*mp;		/* mount structure for filesystem */
+	xfs_btree_cur_t	*new;		/* new cursor value */
+	xfs_trans_t	*tp;		/* transaction pointer, can be NULL */
+
+	tp = cur->bc_tp;
+	mp = cur->bc_mp;
+	/*
+	 * Allocate a new cursor like the old one.
+	 */
+	new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp,
+		cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip,
+		cur->bc_private.b.whichfork);
+	/*
+	 * Copy the record currently in the cursor.
+	 */
+	new->bc_rec = cur->bc_rec;
+	/*
+	 * For each level current, re-get the buffer and copy the ptr value.
+	 */
+	for (i = 0; i < new->bc_nlevels; i++) {
+		new->bc_ptrs[i] = cur->bc_ptrs[i];
+		new->bc_ra[i] = cur->bc_ra[i];
+		if ((bp = cur->bc_bufs[i])) {
+			if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+				XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) {
+				xfs_btree_del_cursor(new, error);
+				*ncur = NULL;
+				return error;
+			}
+			new->bc_bufs[i] = bp;
+			ASSERT(bp);
+			ASSERT(!XFS_BUF_GETERROR(bp));
+		} else
+			new->bc_bufs[i] = NULL;
+	}
+	/*
+	 * For bmap btrees, copy the firstblock, flist, and flags values,
+	 * since init cursor doesn't get them.
+	 */
+	if (new->bc_btnum == XFS_BTNUM_BMAP) {
+		new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
+		new->bc_private.b.flist = cur->bc_private.b.flist;
+		new->bc_private.b.flags = cur->bc_private.b.flags;
+	}
+	*ncur = new;
+	return 0;
+}
+
+/*
+ * Change the cursor to point to the first record at the given level.
+ * Other levels are unaffected.
+ */
+int					/* success=1, failure=0 */
+xfs_btree_firstrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level)	/* level to change */
+{
+	xfs_btree_block_t	*block;	/* generic btree block pointer */
+	xfs_buf_t		*bp;	/* buffer containing block */
+
+	/*
+	 * Get the block pointer for this level.
+	 */
+	block = xfs_btree_get_block(cur, level, &bp);
+	xfs_btree_check_block(cur, block, level, bp);
+	/*
+	 * It's empty, there is no such record.
+	 */
+	if (!block->bb_h.bb_numrecs)
+		return 0;
+	/*
+	 * Set the ptr value to 1, that's the first record/key.
+	 */
+	cur->bc_ptrs[level] = 1;
+	return 1;
+}
+
+/*
+ * Retrieve the block pointer from the cursor at the given level.
+ * This may be a bmap btree root or from a buffer.
+ */
+xfs_btree_block_t *			/* generic btree block pointer */
+xfs_btree_get_block(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree */
+	xfs_buf_t		**bpp)	/* buffer containing the block */
+{
+	xfs_btree_block_t	*block;	/* return value */
+	xfs_buf_t		*bp;	/* return buffer */
+	xfs_ifork_t		*ifp;	/* inode fork pointer */
+	int			whichfork; /* data or attr fork */
+
+	if (cur->bc_btnum == XFS_BTNUM_BMAP && level == cur->bc_nlevels - 1) {
+		whichfork = cur->bc_private.b.whichfork;
+		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, whichfork);
+		block = (xfs_btree_block_t *)ifp->if_broot;
+		bp = NULL;
+	} else {
+		bp = cur->bc_bufs[level];
+		block = XFS_BUF_TO_BLOCK(bp);
+	}
+	ASSERT(block != NULL);
+	*bpp = bp;
+	return block;
+}
+
+/*
+ * Get a buffer for the block, return it with no data read.
+ * Long-form addressing.
+ */
+xfs_buf_t *				/* buffer for fsbno */
+xfs_btree_get_bufl(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_fsblock_t	fsbno,		/* file system block number */
+	uint		lock)		/* lock flags for get_buf */
+{
+	xfs_buf_t	*bp;		/* buffer pointer (return value) */
+	xfs_daddr_t		d;		/* real disk block address */
+
+	ASSERT(fsbno != NULLFSBLOCK);
+	d = XFS_FSB_TO_DADDR(mp, fsbno);
+	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
+	ASSERT(bp);
+	ASSERT(!XFS_BUF_GETERROR(bp));
+	return bp;
+}
+
+/*
+ * Get a buffer for the block, return it with no data read.
+ * Short-form addressing.
+ */
+xfs_buf_t *				/* buffer for agno/agbno */
+xfs_btree_get_bufs(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_agblock_t	agbno,		/* allocation group block number */
+	uint		lock)		/* lock flags for get_buf */
+{
+	xfs_buf_t	*bp;		/* buffer pointer (return value) */
+	xfs_daddr_t		d;		/* real disk block address */
+
+	ASSERT(agno != NULLAGNUMBER);
+	ASSERT(agbno != NULLAGBLOCK);
+	d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
+	ASSERT(bp);
+	ASSERT(!XFS_BUF_GETERROR(bp));
+	return bp;
+}
+
+/*
+ * Allocate a new btree cursor.
+ * The cursor is either for allocation (A) or bmap (B) or inodes (I).
+ */
+xfs_btree_cur_t *			/* new btree cursor */
+xfs_btree_init_cursor(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_buf_t	*agbp,		/* (A only) buffer for agf structure */
+					/* (I only) buffer for agi structure */
+	xfs_agnumber_t	agno,		/* (AI only) allocation group number */
+	xfs_btnum_t	btnum,		/* btree identifier */
+	xfs_inode_t	*ip,		/* (B only) inode owning the btree */
+	int		whichfork)	/* (B only) data or attr fork */
+{
+	xfs_agf_t	*agf;		/* (A) allocation group freespace */
+	xfs_agi_t	*agi;		/* (I) allocation group inodespace */
+	xfs_btree_cur_t	*cur;		/* return value */
+	xfs_ifork_t	*ifp;		/* (I) inode fork pointer */
+	int		nlevels=0;	/* number of levels in the btree */
+
+	ASSERT(xfs_btree_cur_zone != NULL);
+	/*
+	 * Allocate a new cursor.
+	 */
+	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
+	/*
+	 * Deduce the number of btree levels from the arguments.
+	 */
+	switch (btnum) {
+	case XFS_BTNUM_BNO:
+	case XFS_BTNUM_CNT:
+		agf = XFS_BUF_TO_AGF(agbp);
+		nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT);
+		break;
+	case XFS_BTNUM_BMAP:
+		ifp = XFS_IFORK_PTR(ip, whichfork);
+		nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
+		break;
+	case XFS_BTNUM_INO:
+		agi = XFS_BUF_TO_AGI(agbp);
+		nlevels = INT_GET(agi->agi_level, ARCH_CONVERT);
+		break;
+	default:
+		ASSERT(0);
+	}
+	/*
+	 * Fill in the common fields.
+	 */
+	cur->bc_tp = tp;
+	cur->bc_mp = mp;
+	cur->bc_nlevels = nlevels;
+	cur->bc_btnum = btnum;
+	cur->bc_blocklog = mp->m_sb.sb_blocklog;
+	/*
+	 * Fill in private fields.
+	 */
+	switch (btnum) {
+	case XFS_BTNUM_BNO:
+	case XFS_BTNUM_CNT:
+		/*
+		 * Allocation btree fields.
+		 */
+		cur->bc_private.a.agbp = agbp;
+		cur->bc_private.a.agno = agno;
+		break;
+	case XFS_BTNUM_BMAP:
+		/*
+		 * Bmap btree fields.
+		 */
+		cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
+		cur->bc_private.b.ip = ip;
+		cur->bc_private.b.firstblock = NULLFSBLOCK;
+		cur->bc_private.b.flist = NULL;
+		cur->bc_private.b.allocated = 0;
+		cur->bc_private.b.flags = 0;
+		cur->bc_private.b.whichfork = whichfork;
+		break;
+	case XFS_BTNUM_INO:
+		/*
+		 * Inode allocation btree fields.
+		 */
+		cur->bc_private.i.agbp = agbp;
+		cur->bc_private.i.agno = agno;
+		break;
+	default:
+		ASSERT(0);
+	}
+	return cur;
+}
+
+/*
+ * Check for the cursor referring to the last block at the given level.
+ */
+int					/* 1=is last block, 0=not last block */
+xfs_btree_islastblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level)	/* level to check */
+{
+	xfs_btree_block_t	*block;	/* generic btree block pointer */
+	xfs_buf_t		*bp;	/* buffer containing block */
+
+	block = xfs_btree_get_block(cur, level, &bp);
+	xfs_btree_check_block(cur, block, level, bp);
+	if (XFS_BTREE_LONG_PTRS(cur->bc_btnum))
+		return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO;
+	else
+		return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK;
+}
+
+/*
+ * Change the cursor to point to the last record in the current block
+ * at the given level.  Other levels are unaffected.
+ */
+int					/* success=1, failure=0 */
+xfs_btree_lastrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level)	/* level to change */
+{
+	xfs_btree_block_t	*block;	/* generic btree block pointer */
+	xfs_buf_t		*bp;	/* buffer containing block */
+
+	/*
+	 * Get the block pointer for this level.
+	 */
+	block = xfs_btree_get_block(cur, level, &bp);
+	xfs_btree_check_block(cur, block, level, bp);
+	/*
+	 * It's empty, there is no such record.
+	 */
+	if (!block->bb_h.bb_numrecs)
+		return 0;
+	/*
+	 * Set the ptr value to numrecs, that's the last record/key.
+	 */
+	cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT);
+	return 1;
+}
+
+/*
+ * Compute first and last byte offsets for the fields given.
+ * Interprets the offsets table, which contains struct field offsets.
+ */
+void
+xfs_btree_offsets(
+	__int64_t	fields,		/* bitmask of fields */
+	const short	*offsets,	/* table of field offsets */
+	int		nbits,		/* number of bits to inspect */
+	int		*first,		/* output: first byte offset */
+	int		*last)		/* output: last byte offset */
+{
+	int		i;		/* current bit number */
+	__int64_t	imask;		/* mask for current bit number */
+
+	ASSERT(fields != 0);
+	/*
+	 * Find the lowest bit, so the first byte offset.
+	 */
+	for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
+		if (imask & fields) {
+			*first = offsets[i];
+			break;
+		}
+	}
+	/*
+	 * Find the highest bit, so the last byte offset.
+	 */
+	for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
+		if (imask & fields) {
+			*last = offsets[i + 1] - 1;
+			break;
+		}
+	}
+}
+
+/*
+ * Get a buffer for the block, return it read in.
+ * Long-form addressing.
+ */
+int					/* error */
+xfs_btree_read_bufl(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_fsblock_t	fsbno,		/* file system block number */
+	uint		lock,		/* lock flags for read_buf */
+	xfs_buf_t	**bpp,		/* buffer for fsbno */
+	int		refval)		/* ref count value for buffer */
+{
+	xfs_buf_t	*bp;		/* return value */
+	xfs_daddr_t		d;		/* real disk block address */
+	int		error;
+
+	ASSERT(fsbno != NULLFSBLOCK);
+	d = XFS_FSB_TO_DADDR(mp, fsbno);
+	if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
+			mp->m_bsize, lock, &bp))) {
+		return error;
+	}
+	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
+	if (bp != NULL) {
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
+	}
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Get a buffer for the block, return it read in.
+ * Short-form addressing.
+ */
+int					/* error */
+xfs_btree_read_bufs(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_agblock_t	agbno,		/* allocation group block number */
+	uint		lock,		/* lock flags for read_buf */
+	xfs_buf_t	**bpp,		/* buffer for agno/agbno */
+	int		refval)		/* ref count value for buffer */
+{
+	xfs_buf_t	*bp;		/* return value */
+	xfs_daddr_t	d;		/* real disk block address */
+	int		error;
+
+	ASSERT(agno != NULLAGNUMBER);
+	ASSERT(agbno != NULLAGBLOCK);
+	d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+	if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
+					mp->m_bsize, lock, &bp))) {
+		return error;
+	}
+	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
+	if (bp != NULL) {
+		switch (refval) {
+		case XFS_ALLOC_BTREE_REF:
+			XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
+			break;
+		case XFS_INO_BTREE_REF:
+			XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval);
+			break;
+		}
+	}
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Read-ahead the block, don't wait for it, don't return a buffer.
+ * Long-form addressing.
+ */
+/* ARGSUSED */
+void
+xfs_btree_reada_bufl(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_fsblock_t	fsbno,		/* file system block number */
+	xfs_extlen_t	count)		/* count of filesystem blocks */
+{
+	xfs_daddr_t		d;
+
+	ASSERT(fsbno != NULLFSBLOCK);
+	d = XFS_FSB_TO_DADDR(mp, fsbno);
+	xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+}
+
+/*
+ * Read-ahead the block, don't wait for it, don't return a buffer.
+ * Short-form addressing.
+ */
+/* ARGSUSED */
+void
+xfs_btree_reada_bufs(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_agblock_t	agbno,		/* allocation group block number */
+	xfs_extlen_t	count)		/* count of filesystem blocks */
+{
+	xfs_daddr_t		d;
+
+	ASSERT(agno != NULLAGNUMBER);
+	ASSERT(agbno != NULLAGBLOCK);
+	d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+	xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+}
+
+/*
+ * Read-ahead btree blocks, at the given level.
+ * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
+ */
+int
+xfs_btree_readahead_core(
+	xfs_btree_cur_t		*cur,		/* btree cursor */
+	int			lev,		/* level in btree */
+	int			lr)		/* left/right bits */
+{
+	xfs_alloc_block_t	*a;
+	xfs_bmbt_block_t	*b;
+	xfs_inobt_block_t	*i;
+	int			rval = 0;
+
+	ASSERT(cur->bc_bufs[lev] != NULL);
+	cur->bc_ra[lev] |= lr;
+	switch (cur->bc_btnum) {
+	case XFS_BTNUM_BNO:
+	case XFS_BTNUM_CNT:
+		a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]);
+		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+				INT_GET(a->bb_leftsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+				INT_GET(a->bb_rightsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		break;
+	case XFS_BTNUM_BMAP:
+		b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]);
+		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) {
+			xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+			xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		break;
+	case XFS_BTNUM_INO:
+		i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
+		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+				INT_GET(i->bb_leftsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+				INT_GET(i->bb_rightsib, ARCH_CONVERT), 1);
+			rval++;
+		}
+		break;
+	default:
+		ASSERT(0);
+	}
+	return rval;
+}
+
+/*
+ * Set the buffer for level "lev" in the cursor to bp, releasing
+ * any previous buffer.
+ */
+void
+xfs_btree_setbuf(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			lev,	/* level in btree */
+	xfs_buf_t		*bp)	/* new buffer to set */
+{
+	xfs_btree_block_t	*b;	/* btree block */
+	xfs_buf_t		*obp;	/* old buffer pointer */
+
+	obp = cur->bc_bufs[lev];
+	if (obp)
+		xfs_trans_brelse(cur->bc_tp, obp);
+	cur->bc_bufs[lev] = bp;
+	cur->bc_ra[lev] = 0;
+	if (!bp)
+		return;
+	b = XFS_BUF_TO_BLOCK(bp);
+	if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) {
+		if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO)
+			cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
+		if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO)
+			cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
+	} else {
+		if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK)
+			cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
+		if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK)
+			cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
+	}
+}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
new file mode 100644
index 0000000..93872bb
--- /dev/null
+++ b/fs/xfs/xfs_btree.h
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_BTREE_H__
+#define	__XFS_BTREE_H__
+
+struct xfs_buf;
+struct xfs_bmap_free;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * This nonsense is to make -wlint happy.
+ */
+#define	XFS_LOOKUP_EQ	((xfs_lookup_t)XFS_LOOKUP_EQi)
+#define	XFS_LOOKUP_LE	((xfs_lookup_t)XFS_LOOKUP_LEi)
+#define	XFS_LOOKUP_GE	((xfs_lookup_t)XFS_LOOKUP_GEi)
+
+#define	XFS_BTNUM_BNO	((xfs_btnum_t)XFS_BTNUM_BNOi)
+#define	XFS_BTNUM_CNT	((xfs_btnum_t)XFS_BTNUM_CNTi)
+#define	XFS_BTNUM_BMAP	((xfs_btnum_t)XFS_BTNUM_BMAPi)
+#define	XFS_BTNUM_INO	((xfs_btnum_t)XFS_BTNUM_INOi)
+
+/*
+ * Short form header: space allocation btrees.
+ */
+typedef struct xfs_btree_sblock
+{
+	__uint32_t	bb_magic;	/* magic number for block type */
+	__uint16_t	bb_level;	/* 0 is a leaf */
+	__uint16_t	bb_numrecs;	/* current # of data records */
+	xfs_agblock_t	bb_leftsib;	/* left sibling block or NULLAGBLOCK */
+	xfs_agblock_t	bb_rightsib;	/* right sibling block or NULLAGBLOCK */
+} xfs_btree_sblock_t;
+
+/*
+ * Long form header: bmap btrees.
+ */
+typedef struct xfs_btree_lblock
+{
+	__uint32_t	bb_magic;	/* magic number for block type */
+	__uint16_t	bb_level;	/* 0 is a leaf */
+	__uint16_t	bb_numrecs;	/* current # of data records */
+	xfs_dfsbno_t	bb_leftsib;	/* left sibling block or NULLDFSBNO */
+	xfs_dfsbno_t	bb_rightsib;	/* right sibling block or NULLDFSBNO */
+} xfs_btree_lblock_t;
+
+/*
+ * Combined header and structure, used by common code.
+ */
+typedef struct xfs_btree_hdr
+{
+	__uint32_t	bb_magic;	/* magic number for block type */
+	__uint16_t	bb_level;	/* 0 is a leaf */
+	__uint16_t	bb_numrecs;	/* current # of data records */
+} xfs_btree_hdr_t;
+
+typedef struct xfs_btree_block
+{
+	xfs_btree_hdr_t	bb_h;		/* header */
+	union		{
+		struct	{
+			xfs_agblock_t	bb_leftsib;
+			xfs_agblock_t	bb_rightsib;
+		}	s;		/* short form pointers */
+		struct	{
+			xfs_dfsbno_t	bb_leftsib;
+			xfs_dfsbno_t	bb_rightsib;
+		}	l;		/* long form pointers */
+	}		bb_u;		/* rest */
+} xfs_btree_block_t;
+
+/*
+ * For logging record fields.
+ */
+#define	XFS_BB_MAGIC		0x01
+#define	XFS_BB_LEVEL		0x02
+#define	XFS_BB_NUMRECS		0x04
+#define	XFS_BB_LEFTSIB		0x08
+#define	XFS_BB_RIGHTSIB		0x10
+#define	XFS_BB_NUM_BITS		5
+#define	XFS_BB_ALL_BITS		((1 << XFS_BB_NUM_BITS) - 1)
+
+/*
+ * Boolean to select which form of xfs_btree_block_t.bb_u to use.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BTREE_LONG_PTRS)
+int xfs_btree_long_ptrs(xfs_btnum_t btnum);
+#define	XFS_BTREE_LONG_PTRS(btnum)	((btnum) == XFS_BTNUM_BMAP)
+#else
+#define	XFS_BTREE_LONG_PTRS(btnum)	((btnum) == XFS_BTNUM_BMAP)
+#endif
+
+/*
+ * Magic numbers for btree blocks.
+ */
+extern const __uint32_t	xfs_magics[];
+
+/*
+ * Maximum and minimum records in a btree block.
+ * Given block size, type prefix, and leaf flag (0 or 1).
+ * The divisor below is equivalent to lf ? (e1) : (e2) but that produces
+ * compiler warnings.
+ */
+#define	XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf)	\
+	((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \
+	 (((lf) * (uint)sizeof(t ## _rec_t)) + \
+	  ((1 - (lf)) * \
+	   ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t))))))
+#define	XFS_BTREE_BLOCK_MINRECS(bsz,t,lf)	\
+	(XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2)
+
+/*
+ * Record, key, and pointer address calculation macros.
+ * Given block size, type prefix, block pointer, and index of requested entry
+ * (first entry numbered 1).
+ */
+#define	XFS_BTREE_REC_ADDR(bsz,t,bb,i,mxr)	\
+	((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \
+	 ((i) - 1) * sizeof(t ## _rec_t)))
+#define	XFS_BTREE_KEY_ADDR(bsz,t,bb,i,mxr)	\
+	((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \
+	 ((i) - 1) * sizeof(t ## _key_t)))
+#define	XFS_BTREE_PTR_ADDR(bsz,t,bb,i,mxr)	\
+	((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \
+	 (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t)))
+
+#define	XFS_BTREE_MAXLEVELS	8	/* max of all btrees */
+
+/*
+ * Btree cursor structure.
+ * This collects all information needed by the btree code in one place.
+ */
+typedef struct xfs_btree_cur
+{
+	struct xfs_trans	*bc_tp;	/* transaction we're in, if any */
+	struct xfs_mount	*bc_mp;	/* file system mount struct */
+	union {
+		xfs_alloc_rec_t		a;
+		xfs_bmbt_irec_t		b;
+		xfs_inobt_rec_t		i;
+	}		bc_rec;		/* current insert/search record value */
+	struct xfs_buf	*bc_bufs[XFS_BTREE_MAXLEVELS];	/* buf ptr per level */
+	int		bc_ptrs[XFS_BTREE_MAXLEVELS];	/* key/record # */
+	__uint8_t	bc_ra[XFS_BTREE_MAXLEVELS];	/* readahead bits */
+#define	XFS_BTCUR_LEFTRA	1	/* left sibling has been read-ahead */
+#define	XFS_BTCUR_RIGHTRA	2	/* right sibling has been read-ahead */
+	__uint8_t	bc_nlevels;	/* number of levels in the tree */
+	__uint8_t	bc_blocklog;	/* log2(blocksize) of btree blocks */
+	xfs_btnum_t	bc_btnum;	/* identifies which btree type */
+	union {
+		struct {			/* needed for BNO, CNT */
+			struct xfs_buf	*agbp;	/* agf buffer pointer */
+			xfs_agnumber_t	agno;	/* ag number */
+		} a;
+		struct {			/* needed for BMAP */
+			struct xfs_inode *ip;	/* pointer to our inode */
+			struct xfs_bmap_free *flist;	/* list to free after */
+			xfs_fsblock_t	firstblock;	/* 1st blk allocated */
+			int		allocated;	/* count of alloced */
+			short		forksize;	/* fork's inode space */
+			char		whichfork;	/* data or attr fork */
+			char		flags;		/* flags */
+#define	XFS_BTCUR_BPRV_WASDEL	1			/* was delayed */
+		} b;
+		struct {			/* needed for INO */
+			struct xfs_buf	*agbp;	/* agi buffer pointer */
+			xfs_agnumber_t	agno;	/* ag number */
+		} i;
+	}		bc_private;	/* per-btree type data */
+} xfs_btree_cur_t;
+
+#define	XFS_BTREE_NOERROR	0
+#define	XFS_BTREE_ERROR		1
+
+/*
+ * Convert from buffer to btree block header.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BLOCK)
+xfs_btree_block_t *xfs_buf_to_block(struct xfs_buf *bp);
+#define	XFS_BUF_TO_BLOCK(bp)	xfs_buf_to_block(bp)
+#else
+#define	XFS_BUF_TO_BLOCK(bp)	((xfs_btree_block_t *)(XFS_BUF_PTR(bp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_LBLOCK)
+xfs_btree_lblock_t *xfs_buf_to_lblock(struct xfs_buf *bp);
+#define	XFS_BUF_TO_LBLOCK(bp)	xfs_buf_to_lblock(bp)
+#else
+#define	XFS_BUF_TO_LBLOCK(bp)	((xfs_btree_lblock_t *)(XFS_BUF_PTR(bp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBLOCK)
+xfs_btree_sblock_t *xfs_buf_to_sblock(struct xfs_buf *bp);
+#define	XFS_BUF_TO_SBLOCK(bp)	xfs_buf_to_sblock(bp)
+#else
+#define	XFS_BUF_TO_SBLOCK(bp)	((xfs_btree_sblock_t *)(XFS_BUF_PTR(bp)))
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef DEBUG
+/*
+ * Debug routine: check that block header is ok.
+ */
+void
+xfs_btree_check_block(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_block_t	*block,	/* generic btree block pointer */
+	int			level,	/* level of the btree block */
+	struct xfs_buf		*bp);	/* buffer containing block, if any */
+
+/*
+ * Debug routine: check that keys are in the right order.
+ */
+void
+xfs_btree_check_key(
+	xfs_btnum_t		btnum,	/* btree identifier */
+	void			*ak1,	/* pointer to left (lower) key */
+	void			*ak2);	/* pointer to right (higher) key */
+
+/*
+ * Debug routine: check that records are in the right order.
+ */
+void
+xfs_btree_check_rec(
+	xfs_btnum_t		btnum,	/* btree identifier */
+	void			*ar1,	/* pointer to left (lower) record */
+	void			*ar2);	/* pointer to right (higher) record */
+#else
+#define	xfs_btree_check_block(a,b,c,d)
+#define	xfs_btree_check_key(a,b,c)
+#define	xfs_btree_check_rec(a,b,c)
+#endif	/* DEBUG */
+
+/*
+ * Checking routine: check that long form block header is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_lblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_lblock_t	*block,	/* btree long form block pointer */
+	int			level,	/* level of the btree block */
+	struct xfs_buf		*bp);	/* buffer containing block, if any */
+
+/*
+ * Checking routine: check that (long) pointer is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_lptr(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_dfsbno_t		ptr,	/* btree block disk address */
+	int			level);	/* btree block level */
+
+/*
+ * Checking routine: check that short form block header is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_btree_sblock_t	*block,	/* btree short form block pointer */
+	int			level,	/* level of the btree block */
+	struct xfs_buf		*bp);	/* buffer containing block */
+
+/*
+ * Checking routine: check that (short) pointer is ok.
+ */
+int					/* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sptr(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_agblock_t		ptr,	/* btree block disk address */
+	int			level);	/* btree block level */
+
+/*
+ * Delete the btree cursor.
+ */
+void
+xfs_btree_del_cursor(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			error);	/* del because of error */
+
+/*
+ * Duplicate the btree cursor.
+ * Allocate a new one, copy the record, re-get the buffers.
+ */
+int					/* error */
+xfs_btree_dup_cursor(
+	xfs_btree_cur_t		*cur,	/* input cursor */
+	xfs_btree_cur_t		**ncur);/* output cursor */
+
+/*
+ * Change the cursor to point to the first record in the current block
+ * at the given level.  Other levels are unaffected.
+ */
+int					/* success=1, failure=0 */
+xfs_btree_firstrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level);	/* level to change */
+
+/*
+ * Retrieve the block pointer from the cursor at the given level.
+ * This may be a bmap btree root or from a buffer.
+ */
+xfs_btree_block_t *			/* generic btree block pointer */
+xfs_btree_get_block(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree */
+	struct xfs_buf		**bpp);	/* buffer containing the block */
+
+/*
+ * Get a buffer for the block, return it with no data read.
+ * Long-form addressing.
+ */
+struct xfs_buf *				/* buffer for fsbno */
+xfs_btree_get_bufl(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_fsblock_t		fsbno,	/* file system block number */
+	uint			lock);	/* lock flags for get_buf */
+
+/*
+ * Get a buffer for the block, return it with no data read.
+ * Short-form addressing.
+ */
+struct xfs_buf *				/* buffer for agno/agbno */
+xfs_btree_get_bufs(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_agnumber_t		agno,	/* allocation group number */
+	xfs_agblock_t		agbno,	/* allocation group block number */
+	uint			lock);	/* lock flags for get_buf */
+
+/*
+ * Allocate a new btree cursor.
+ * The cursor is either for allocation (A) or bmap (B).
+ */
+xfs_btree_cur_t *			/* new btree cursor */
+xfs_btree_init_cursor(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	struct xfs_buf		*agbp,	/* (A only) buffer for agf structure */
+	xfs_agnumber_t		agno,	/* (A only) allocation group number */
+	xfs_btnum_t		btnum,	/* btree identifier */
+	struct xfs_inode	*ip,	/* (B only) inode owning the btree */
+	int			whichfork); /* (B only) data/attr fork */
+
+/*
+ * Check for the cursor referring to the last block at the given level.
+ */
+int					/* 1=is last block, 0=not last block */
+xfs_btree_islastblock(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level);	/* level to check */
+
+/*
+ * Change the cursor to point to the last record in the current block
+ * at the given level.  Other levels are unaffected.
+ */
+int					/* success=1, failure=0 */
+xfs_btree_lastrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level);	/* level to change */
+
+/*
+ * Compute first and last byte offsets for the fields given.
+ * Interprets the offsets table, which contains struct field offsets.
+ */
+void
+xfs_btree_offsets(
+	__int64_t		fields,	/* bitmask of fields */
+	const short		*offsets,/* table of field offsets */
+	int			nbits,	/* number of bits to inspect */
+	int			*first,	/* output: first byte offset */
+	int			*last);	/* output: last byte offset */
+
+/*
+ * Get a buffer for the block, return it read in.
+ * Long-form addressing.
+ */
+int					/* error */
+xfs_btree_read_bufl(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_fsblock_t		fsbno,	/* file system block number */
+	uint			lock,	/* lock flags for read_buf */
+	struct xfs_buf		**bpp,	/* buffer for fsbno */
+	int			refval);/* ref count value for buffer */
+
+/*
+ * Get a buffer for the block, return it read in.
+ * Short-form addressing.
+ */
+int					/* error */
+xfs_btree_read_bufs(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_agnumber_t		agno,	/* allocation group number */
+	xfs_agblock_t		agbno,	/* allocation group block number */
+	uint			lock,	/* lock flags for read_buf */
+	struct xfs_buf		**bpp,	/* buffer for agno/agbno */
+	int			refval);/* ref count value for buffer */
+
+/*
+ * Read-ahead the block, don't wait for it, don't return a buffer.
+ * Long-form addressing.
+ */
+void					/* error */
+xfs_btree_reada_bufl(
+	struct xfs_mount	*mp,	/* file system mount point */
+	xfs_fsblock_t		fsbno,	/* file system block number */
+	xfs_extlen_t		count);	/* count of filesystem blocks */
+
+/*
+ * Read-ahead the block, don't wait for it, don't return a buffer.
+ * Short-form addressing.
+ */
+void					/* error */
+xfs_btree_reada_bufs(
+	struct xfs_mount	*mp,	/* file system mount point */
+	xfs_agnumber_t		agno,	/* allocation group number */
+	xfs_agblock_t		agbno,	/* allocation group block number */
+	xfs_extlen_t		count);	/* count of filesystem blocks */
+
+/*
+ * Read-ahead btree blocks, at the given level.
+ * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
+ */
+int					/* readahead block count */
+xfs_btree_readahead_core(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			lev,	/* level in btree */
+	int			lr);	/* left/right bits */
+
+static inline int			/* readahead block count */
+xfs_btree_readahead(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			lev,	/* level in btree */
+	int			lr)	/* left/right bits */
+{
+	if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
+		return 0;
+
+	return xfs_btree_readahead_core(cur, lev, lr);
+}
+
+
+/*
+ * Set the buffer for level "lev" in the cursor to bp, releasing
+ * any previous buffer.
+ */
+void
+xfs_btree_setbuf(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			lev,	/* level in btree */
+	struct xfs_buf		*bp);	/* new buffer to set */
+
+#endif	/* __KERNEL__ */
+
+
+/*
+ * Min and max functions for extlen, agblock, fileoff, and filblks types.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MIN)
+xfs_extlen_t xfs_extlen_min(xfs_extlen_t a, xfs_extlen_t b);
+#define	XFS_EXTLEN_MIN(a,b)	xfs_extlen_min(a,b)
+#else
+#define	XFS_EXTLEN_MIN(a,b)	\
+	((xfs_extlen_t)(a) < (xfs_extlen_t)(b) ? \
+	 (xfs_extlen_t)(a) : (xfs_extlen_t)(b))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MAX)
+xfs_extlen_t xfs_extlen_max(xfs_extlen_t a, xfs_extlen_t b);
+#define	XFS_EXTLEN_MAX(a,b)	xfs_extlen_max(a,b)
+#else
+#define	XFS_EXTLEN_MAX(a,b)	\
+	((xfs_extlen_t)(a) > (xfs_extlen_t)(b) ? \
+	 (xfs_extlen_t)(a) : (xfs_extlen_t)(b))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MIN)
+xfs_agblock_t xfs_agblock_min(xfs_agblock_t a, xfs_agblock_t b);
+#define	XFS_AGBLOCK_MIN(a,b)	xfs_agblock_min(a,b)
+#else
+#define	XFS_AGBLOCK_MIN(a,b)	\
+	((xfs_agblock_t)(a) < (xfs_agblock_t)(b) ? \
+	 (xfs_agblock_t)(a) : (xfs_agblock_t)(b))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MAX)
+xfs_agblock_t xfs_agblock_max(xfs_agblock_t a, xfs_agblock_t b);
+#define	XFS_AGBLOCK_MAX(a,b)	xfs_agblock_max(a,b)
+#else
+#define	XFS_AGBLOCK_MAX(a,b)	\
+	((xfs_agblock_t)(a) > (xfs_agblock_t)(b) ? \
+	 (xfs_agblock_t)(a) : (xfs_agblock_t)(b))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MIN)
+xfs_fileoff_t xfs_fileoff_min(xfs_fileoff_t a, xfs_fileoff_t b);
+#define	XFS_FILEOFF_MIN(a,b)	xfs_fileoff_min(a,b)
+#else
+#define	XFS_FILEOFF_MIN(a,b)	\
+	((xfs_fileoff_t)(a) < (xfs_fileoff_t)(b) ? \
+	 (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MAX)
+xfs_fileoff_t xfs_fileoff_max(xfs_fileoff_t a, xfs_fileoff_t b);
+#define	XFS_FILEOFF_MAX(a,b)	xfs_fileoff_max(a,b)
+#else
+#define	XFS_FILEOFF_MAX(a,b)	\
+	((xfs_fileoff_t)(a) > (xfs_fileoff_t)(b) ? \
+	 (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MIN)
+xfs_filblks_t xfs_filblks_min(xfs_filblks_t a, xfs_filblks_t b);
+#define	XFS_FILBLKS_MIN(a,b)	xfs_filblks_min(a,b)
+#else
+#define	XFS_FILBLKS_MIN(a,b)	\
+	((xfs_filblks_t)(a) < (xfs_filblks_t)(b) ? \
+	 (xfs_filblks_t)(a) : (xfs_filblks_t)(b))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MAX)
+xfs_filblks_t xfs_filblks_max(xfs_filblks_t a, xfs_filblks_t b);
+#define	XFS_FILBLKS_MAX(a,b)	xfs_filblks_max(a,b)
+#else
+#define	XFS_FILBLKS_MAX(a,b)	\
+	((xfs_filblks_t)(a) > (xfs_filblks_t)(b) ? \
+	 (xfs_filblks_t)(a) : (xfs_filblks_t)(b))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_SANITY_CHECK)
+int xfs_fsb_sanity_check(struct xfs_mount *mp, xfs_fsblock_t fsb);
+#define	XFS_FSB_SANITY_CHECK(mp,fsb)	xfs_fsb_sanity_check(mp,fsb)
+#else
+#define	XFS_FSB_SANITY_CHECK(mp,fsb)	\
+	(XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
+	 XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
+#endif
+
+/*
+ * Macros to set EFSCORRUPTED & return/branch.
+ */
+#define	XFS_WANT_CORRUPTED_GOTO(x,l)	\
+	{ \
+		int fs_is_ok = (x); \
+		ASSERT(fs_is_ok); \
+		if (unlikely(!fs_is_ok)) { \
+			XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \
+					 XFS_ERRLEVEL_LOW, NULL); \
+			error = XFS_ERROR(EFSCORRUPTED); \
+			goto l; \
+		} \
+	}
+
+#define	XFS_WANT_CORRUPTED_RETURN(x)	\
+	{ \
+		int fs_is_ok = (x); \
+		ASSERT(fs_is_ok); \
+		if (unlikely(!fs_is_ok)) { \
+			XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \
+					 XFS_ERRLEVEL_LOW, NULL); \
+			return XFS_ERROR(EFSCORRUPTED); \
+		} \
+	}
+
+#endif	/* __XFS_BTREE_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
new file mode 100644
index 0000000..9ab0039
--- /dev/null
+++ b/fs/xfs/xfs_buf_item.c
@@ -0,0 +1,1221 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains the implementation of the xfs_buf_log_item.
+ * It contains the item operations used to manipulate the buf log
+ * items as well as utility routines used by the buffer specific
+ * transaction routines.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_rw.h"
+#include "xfs_bit.h"
+#include "xfs_error.h"
+
+
+kmem_zone_t	*xfs_buf_item_zone;
+
+#ifdef XFS_TRANS_DEBUG
+/*
+ * This function uses an alternate strategy for tracking the bytes
+ * that the user requests to be logged.  This can then be used
+ * in conjunction with the bli_orig array in the buf log item to
+ * catch bugs in our callers' code.
+ *
+ * We also double check the bits set in xfs_buf_item_log using a
+ * simple algorithm to check that every byte is accounted for.
+ */
+STATIC void
+xfs_buf_item_log_debug(
+	xfs_buf_log_item_t	*bip,
+	uint			first,
+	uint			last)
+{
+	uint	x;
+	uint	byte;
+	uint	nbytes;
+	uint	chunk_num;
+	uint	word_num;
+	uint	bit_num;
+	uint	bit_set;
+	uint	*wordp;
+
+	ASSERT(bip->bli_logged != NULL);
+	byte = first;
+	nbytes = last - first + 1;
+	bfset(bip->bli_logged, first, nbytes);
+	for (x = 0; x < nbytes; x++) {
+		chunk_num = byte >> XFS_BLI_SHIFT;
+		word_num = chunk_num >> BIT_TO_WORD_SHIFT;
+		bit_num = chunk_num & (NBWORD - 1);
+		wordp = &(bip->bli_format.blf_data_map[word_num]);
+		bit_set = *wordp & (1 << bit_num);
+		ASSERT(bit_set);
+		byte++;
+	}
+}
+
+/*
+ * This function is called when we flush something into a buffer without
+ * logging it.  This happens for things like inodes which are logged
+ * separately from the buffer.
+ */
+void
+xfs_buf_item_flush_log_debug(
+	xfs_buf_t	*bp,
+	uint		first,
+	uint		last)
+{
+	xfs_buf_log_item_t	*bip;
+	uint			nbytes;
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+	if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) {
+		return;
+	}
+
+	ASSERT(bip->bli_logged != NULL);
+	nbytes = last - first + 1;
+	bfset(bip->bli_logged, first, nbytes);
+}
+
+/*
+ * This function is called to verify that our caller's have logged
+ * all the bytes that they changed.
+ *
+ * It does this by comparing the original copy of the buffer stored in
+ * the buf log item's bli_orig array to the current copy of the buffer
+ * and ensuring that all bytes which miscompare are set in the bli_logged
+ * array of the buf log item.
+ */
+STATIC void
+xfs_buf_item_log_check(
+	xfs_buf_log_item_t	*bip)
+{
+	char		*orig;
+	char		*buffer;
+	int		x;
+	xfs_buf_t	*bp;
+
+	ASSERT(bip->bli_orig != NULL);
+	ASSERT(bip->bli_logged != NULL);
+
+	bp = bip->bli_buf;
+	ASSERT(XFS_BUF_COUNT(bp) > 0);
+	ASSERT(XFS_BUF_PTR(bp) != NULL);
+	orig = bip->bli_orig;
+	buffer = XFS_BUF_PTR(bp);
+	for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
+		if (orig[x] != buffer[x] && !btst(bip->bli_logged, x))
+			cmn_err(CE_PANIC,
+	"xfs_buf_item_log_check bip %x buffer %x orig %x index %d",
+				bip, bp, orig, x);
+	}
+}
+#else
+#define		xfs_buf_item_log_debug(x,y,z)
+#define		xfs_buf_item_log_check(x)
+#endif
+
+STATIC void	xfs_buf_error_relse(xfs_buf_t *bp);
+STATIC void	xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
+
+/*
+ * This returns the number of log iovecs needed to log the
+ * given buf log item.
+ *
+ * It calculates this as 1 iovec for the buf log format structure
+ * and 1 for each stretch of non-contiguous chunks to be logged.
+ * Contiguous chunks are logged in a single iovec.
+ *
+ * If the XFS_BLI_STALE flag has been set, then log nothing.
+ */
+uint
+xfs_buf_item_size(
+	xfs_buf_log_item_t	*bip)
+{
+	uint		nvecs;
+	int		next_bit;
+	int		last_bit;
+	xfs_buf_t	*bp;
+
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		/*
+		 * The buffer is stale, so all we need to log
+		 * is the buf log format structure with the
+		 * cancel flag in it.
+		 */
+		xfs_buf_item_trace("SIZE STALE", bip);
+		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+		return 1;
+	}
+
+	bp = bip->bli_buf;
+	ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
+	nvecs = 1;
+	last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
+					 bip->bli_format.blf_map_size, 0);
+	ASSERT(last_bit != -1);
+	nvecs++;
+	while (last_bit != -1) {
+		/*
+		 * This takes the bit number to start looking from and
+		 * returns the next set bit from there.  It returns -1
+		 * if there are no more bits set or the start bit is
+		 * beyond the end of the bitmap.
+		 */
+		next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
+						 bip->bli_format.blf_map_size,
+						 last_bit + 1);
+		/*
+		 * If we run out of bits, leave the loop,
+		 * else if we find a new set of bits bump the number of vecs,
+		 * else keep scanning the current set of bits.
+		 */
+		if (next_bit == -1) {
+			last_bit = -1;
+		} else if (next_bit != last_bit + 1) {
+			last_bit = next_bit;
+			nvecs++;
+		} else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) !=
+			   (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) +
+			    XFS_BLI_CHUNK)) {
+			last_bit = next_bit;
+			nvecs++;
+		} else {
+			last_bit++;
+		}
+	}
+
+	xfs_buf_item_trace("SIZE NORM", bip);
+	return nvecs;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given log buf item.  It fills the first entry with a buf log
+ * format structure, and the rest point to contiguous chunks
+ * within the buffer.
+ */
+void
+xfs_buf_item_format(
+	xfs_buf_log_item_t	*bip,
+	xfs_log_iovec_t		*log_vector)
+{
+	uint		base_size;
+	uint		nvecs;
+	xfs_log_iovec_t	*vecp;
+	xfs_buf_t	*bp;
+	int		first_bit;
+	int		last_bit;
+	int		next_bit;
+	uint		nbits;
+	uint		buffer_offset;
+
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+	       (bip->bli_flags & XFS_BLI_STALE));
+	bp = bip->bli_buf;
+	ASSERT(XFS_BUF_BP_ISMAPPED(bp));
+	vecp = log_vector;
+
+	/*
+	 * The size of the base structure is the size of the
+	 * declared structure plus the space for the extra words
+	 * of the bitmap.  We subtract one from the map size, because
+	 * the first element of the bitmap is accounted for in the
+	 * size of the base structure.
+	 */
+	base_size =
+		(uint)(sizeof(xfs_buf_log_format_t) +
+		       ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
+	vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
+	vecp->i_len = base_size;
+	vecp++;
+	nvecs = 1;
+
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		/*
+		 * The buffer is stale, so all we need to log
+		 * is the buf log format structure with the
+		 * cancel flag in it.
+		 */
+		xfs_buf_item_trace("FORMAT STALE", bip);
+		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+		bip->bli_format.blf_size = nvecs;
+		return;
+	}
+
+	/*
+	 * Fill in an iovec for each set of contiguous chunks.
+	 */
+	first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
+					 bip->bli_format.blf_map_size, 0);
+	ASSERT(first_bit != -1);
+	last_bit = first_bit;
+	nbits = 1;
+	for (;;) {
+		/*
+		 * This takes the bit number to start looking from and
+		 * returns the next set bit from there.  It returns -1
+		 * if there are no more bits set or the start bit is
+		 * beyond the end of the bitmap.
+		 */
+		next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
+						 bip->bli_format.blf_map_size,
+						 (uint)last_bit + 1);
+		/*
+		 * If we run out of bits fill in the last iovec and get
+		 * out of the loop.
+		 * Else if we start a new set of bits then fill in the
+		 * iovec for the series we were looking at and start
+		 * counting the bits in the new one.
+		 * Else we're still in the same set of bits so just
+		 * keep counting and scanning.
+		 */
+		if (next_bit == -1) {
+			buffer_offset = first_bit * XFS_BLI_CHUNK;
+			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
+			vecp->i_len = nbits * XFS_BLI_CHUNK;
+			nvecs++;
+			break;
+		} else if (next_bit != last_bit + 1) {
+			buffer_offset = first_bit * XFS_BLI_CHUNK;
+			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
+			vecp->i_len = nbits * XFS_BLI_CHUNK;
+			nvecs++;
+			vecp++;
+			first_bit = next_bit;
+			last_bit = next_bit;
+			nbits = 1;
+		} else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) !=
+			   (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) +
+			    XFS_BLI_CHUNK)) {
+			buffer_offset = first_bit * XFS_BLI_CHUNK;
+			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
+			vecp->i_len = nbits * XFS_BLI_CHUNK;
+/* You would think we need to bump the nvecs here too, but we do not
+ * this number is used by recovery, and it gets confused by the boundary
+ * split here
+ *			nvecs++;
+ */
+			vecp++;
+			first_bit = next_bit;
+			last_bit = next_bit;
+			nbits = 1;
+		} else {
+			last_bit++;
+			nbits++;
+		}
+	}
+	bip->bli_format.blf_size = nvecs;
+
+	/*
+	 * Check to make sure everything is consistent.
+	 */
+	xfs_buf_item_trace("FORMAT NORM", bip);
+	xfs_buf_item_log_check(bip);
+}
+
+/*
+ * This is called to pin the buffer associated with the buf log
+ * item in memory so it cannot be written out.  Simply call bpin()
+ * on the buffer to do this.
+ */
+void
+xfs_buf_item_pin(
+	xfs_buf_log_item_t	*bip)
+{
+	xfs_buf_t	*bp;
+
+	bp = bip->bli_buf;
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+	       (bip->bli_flags & XFS_BLI_STALE));
+	xfs_buf_item_trace("PIN", bip);
+	xfs_buftrace("XFS_PIN", bp);
+	xfs_bpin(bp);
+}
+
+
+/*
+ * This is called to unpin the buffer associated with the buf log
+ * item which was previously pinned with a call to xfs_buf_item_pin().
+ * Just call bunpin() on the buffer to do this.
+ *
+ * Also drop the reference to the buf item for the current transaction.
+ * If the XFS_BLI_STALE flag is set and we are the last reference,
+ * then free up the buf log item and unlock the buffer.
+ */
+void
+xfs_buf_item_unpin(
+	xfs_buf_log_item_t	*bip,
+	int			stale)
+{
+	xfs_mount_t	*mp;
+	xfs_buf_t	*bp;
+	int		freed;
+	SPLDECL(s);
+
+	bp = bip->bli_buf;
+	ASSERT(bp != NULL);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	xfs_buf_item_trace("UNPIN", bip);
+	xfs_buftrace("XFS_UNPIN", bp);
+
+	freed = atomic_dec_and_test(&bip->bli_refcount);
+	mp = bip->bli_item.li_mountp;
+	xfs_bunpin(bp);
+	if (freed && stale) {
+		ASSERT(bip->bli_flags & XFS_BLI_STALE);
+		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
+		ASSERT(XFS_BUF_ISSTALE(bp));
+		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+		xfs_buf_item_trace("UNPIN STALE", bip);
+		xfs_buftrace("XFS_UNPIN STALE", bp);
+		/*
+		 * If we get called here because of an IO error, we may
+		 * or may not have the item on the AIL. xfs_trans_delete_ail()
+		 * will take care of that situation.
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
+			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
+			XFS_BUF_SET_FSPRIVATE(bp, NULL);
+			XFS_BUF_CLR_IODONE_FUNC(bp);
+		} else {
+			AIL_LOCK(mp,s);
+			xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
+			xfs_buf_item_relse(bp);
+			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
+		}
+		xfs_buf_relse(bp);
+	}
+}
+
+/*
+ * this is called from uncommit in the forced-shutdown path.
+ * we need to check to see if the reference count on the log item
+ * is going to drop to zero.  If so, unpin will free the log item
+ * so we need to free the item's descriptor (that points to the item)
+ * in the transaction.
+ */
+void
+xfs_buf_item_unpin_remove(
+	xfs_buf_log_item_t	*bip,
+	xfs_trans_t		*tp)
+{
+	xfs_buf_t		*bp;
+	xfs_log_item_desc_t	*lidp;
+	int			stale = 0;
+
+	bp = bip->bli_buf;
+	/*
+	 * will xfs_buf_item_unpin() call xfs_buf_item_relse()?
+	 */
+	if ((atomic_read(&bip->bli_refcount) == 1) &&
+	    (bip->bli_flags & XFS_BLI_STALE)) {
+		ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
+		xfs_buf_item_trace("UNPIN REMOVE", bip);
+		xfs_buftrace("XFS_UNPIN_REMOVE", bp);
+		/*
+		 * yes -- clear the xaction descriptor in-use flag
+		 * and free the chunk if required.  We can safely
+		 * do some work here and then call buf_item_unpin
+		 * to do the rest because if the if is true, then
+		 * we are holding the buffer locked so no one else
+		 * will be able to bump up the refcount.
+		 */
+		lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip);
+		stale = lidp->lid_flags & XFS_LID_BUF_STALE;
+		xfs_trans_free_item(tp, lidp);
+		/*
+		 * Since the transaction no longer refers to the buffer,
+		 * the buffer should no longer refer to the transaction.
+		 */
+		XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+	}
+
+	xfs_buf_item_unpin(bip, stale);
+
+	return;
+}
+
+/*
+ * This is called to attempt to lock the buffer associated with this
+ * buf log item.  Don't sleep on the buffer lock.  If we can't get
+ * the lock right away, return 0.  If we can get the lock, pull the
+ * buffer from the free list, mark it busy, and return 1.
+ */
+uint
+xfs_buf_item_trylock(
+	xfs_buf_log_item_t	*bip)
+{
+	xfs_buf_t	*bp;
+
+	bp = bip->bli_buf;
+
+	if (XFS_BUF_ISPINNED(bp)) {
+		return XFS_ITEM_PINNED;
+	}
+
+	if (!XFS_BUF_CPSEMA(bp)) {
+		return XFS_ITEM_LOCKED;
+	}
+
+	/*
+	 * Remove the buffer from the free list.  Only do this
+	 * if it's on the free list.  Private buffers like the
+	 * superblock buffer are not.
+	 */
+	XFS_BUF_HOLD(bp);
+
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	xfs_buf_item_trace("TRYLOCK SUCCESS", bip);
+	return XFS_ITEM_SUCCESS;
+}
+
+/*
+ * Release the buffer associated with the buf log item.
+ * If there is no dirty logged data associated with the
+ * buffer recorded in the buf log item, then free the
+ * buf log item and remove the reference to it in the
+ * buffer.
+ *
+ * This call ignores the recursion count.  It is only called
+ * when the buffer should REALLY be unlocked, regardless
+ * of the recursion count.
+ *
+ * If the XFS_BLI_HOLD flag is set in the buf log item, then
+ * free the log item if necessary but do not unlock the buffer.
+ * This is for support of xfs_trans_bhold(). Make sure the
+ * XFS_BLI_HOLD field is cleared if we don't free the item.
+ */
+void
+xfs_buf_item_unlock(
+	xfs_buf_log_item_t	*bip)
+{
+	int		aborted;
+	xfs_buf_t	*bp;
+	uint		hold;
+
+	bp = bip->bli_buf;
+	xfs_buftrace("XFS_UNLOCK", bp);
+
+	/*
+	 * Clear the buffer's association with this transaction.
+	 */
+	XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+
+	/*
+	 * If this is a transaction abort, don't return early.
+	 * Instead, allow the brelse to happen.
+	 * Normally it would be done for stale (cancelled) buffers
+	 * at unpin time, but we'll never go through the pin/unpin
+	 * cycle if we abort inside commit.
+	 */
+	aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
+
+	/*
+	 * If the buf item is marked stale, then don't do anything.
+	 * We'll unlock the buffer and free the buf item when the
+	 * buffer is unpinned for the last time.
+	 */
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		bip->bli_flags &= ~XFS_BLI_LOGGED;
+		xfs_buf_item_trace("UNLOCK STALE", bip);
+		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+		if (!aborted)
+			return;
+	}
+
+	/*
+	 * Drop the transaction's reference to the log item if
+	 * it was not logged as part of the transaction.  Otherwise
+	 * we'll drop the reference in xfs_buf_item_unpin() when
+	 * the transaction is really through with the buffer.
+	 */
+	if (!(bip->bli_flags & XFS_BLI_LOGGED)) {
+		atomic_dec(&bip->bli_refcount);
+	} else {
+		/*
+		 * Clear the logged flag since this is per
+		 * transaction state.
+		 */
+		bip->bli_flags &= ~XFS_BLI_LOGGED;
+	}
+
+	/*
+	 * Before possibly freeing the buf item, determine if we should
+	 * release the buffer at the end of this routine.
+	 */
+	hold = bip->bli_flags & XFS_BLI_HOLD;
+	xfs_buf_item_trace("UNLOCK", bip);
+
+	/*
+	 * If the buf item isn't tracking any data, free it.
+	 * Otherwise, if XFS_BLI_HOLD is set clear it.
+	 */
+	if (xfs_count_bits(bip->bli_format.blf_data_map,
+			      bip->bli_format.blf_map_size, 0) == 0) {
+		xfs_buf_item_relse(bp);
+	} else if (hold) {
+		bip->bli_flags &= ~XFS_BLI_HOLD;
+	}
+
+	/*
+	 * Release the buffer if XFS_BLI_HOLD was not set.
+	 */
+	if (!hold) {
+		xfs_buf_relse(bp);
+	}
+}
+
+/*
+ * This is called to find out where the oldest active copy of the
+ * buf log item in the on disk log resides now that the last log
+ * write of it completed at the given lsn.
+ * We always re-log all the dirty data in a buffer, so usually the
+ * latest copy in the on disk log is the only one that matters.  For
+ * those cases we simply return the given lsn.
+ *
+ * The one exception to this is for buffers full of newly allocated
+ * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
+ * flag set, indicating that only the di_next_unlinked fields from the
+ * inodes in the buffers will be replayed during recovery.  If the
+ * original newly allocated inode images have not yet been flushed
+ * when the buffer is so relogged, then we need to make sure that we
+ * keep the old images in the 'active' portion of the log.  We do this
+ * by returning the original lsn of that transaction here rather than
+ * the current one.
+ */
+xfs_lsn_t
+xfs_buf_item_committed(
+	xfs_buf_log_item_t	*bip,
+	xfs_lsn_t		lsn)
+{
+	xfs_buf_item_trace("COMMITTED", bip);
+	if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
+	    (bip->bli_item.li_lsn != 0)) {
+		return bip->bli_item.li_lsn;
+	}
+	return (lsn);
+}
+
+/*
+ * This is called when the transaction holding the buffer is aborted.
+ * Just behave as if the transaction had been cancelled. If we're shutting down
+ * and have aborted this transaction, we'll trap this buffer when it tries to
+ * get written out.
+ */
+void
+xfs_buf_item_abort(
+	xfs_buf_log_item_t	*bip)
+{
+	xfs_buf_t	*bp;
+
+	bp = bip->bli_buf;
+	xfs_buftrace("XFS_ABORT", bp);
+	XFS_BUF_SUPER_STALE(bp);
+	xfs_buf_item_unlock(bip);
+	return;
+}
+
+/*
+ * This is called to asynchronously write the buffer associated with this
+ * buf log item out to disk. The buffer will already have been locked by
+ * a successful call to xfs_buf_item_trylock().  If the buffer still has
+ * B_DELWRI set, then get it going out to disk with a call to bawrite().
+ * If not, then just release the buffer.
+ */
+void
+xfs_buf_item_push(
+	xfs_buf_log_item_t	*bip)
+{
+	xfs_buf_t	*bp;
+
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	xfs_buf_item_trace("PUSH", bip);
+
+	bp = bip->bli_buf;
+
+	if (XFS_BUF_ISDELAYWRITE(bp)) {
+		xfs_bawrite(bip->bli_item.li_mountp, bp);
+	} else {
+		xfs_buf_relse(bp);
+	}
+}
+
+/* ARGSUSED */
+void
+xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all buf log items.
+ */
+struct xfs_item_ops xfs_buf_item_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_buf_item_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
+					xfs_buf_item_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_buf_item_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_buf_item_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_buf_item_abort,
+	.iop_pushbuf	= NULL,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_buf_item_committing
+};
+
+
+/*
+ * Allocate a new buf log item to go with the given buffer.
+ * Set the buffer's b_fsprivate field to point to the new
+ * buf log item.  If there are other item's attached to the
+ * buffer (see xfs_buf_attach_iodone() below), then put the
+ * buf log item at the front.
+ */
+void
+xfs_buf_item_init(
+	xfs_buf_t	*bp,
+	xfs_mount_t	*mp)
+{
+	xfs_log_item_t		*lip;
+	xfs_buf_log_item_t	*bip;
+	int			chunks;
+	int			map_size;
+
+	/*
+	 * Check to see if there is already a buf log item for
+	 * this buffer.  If there is, it is guaranteed to be
+	 * the first.  If we do already have one, there is
+	 * nothing to do here so return.
+	 */
+	if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp)
+		XFS_BUF_SET_FSPRIVATE3(bp, mp);
+	XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
+	if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
+		lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+		if (lip->li_type == XFS_LI_BUF) {
+			return;
+		}
+	}
+
+	/*
+	 * chunks is the number of XFS_BLI_CHUNK size pieces
+	 * the buffer can be divided into. Make sure not to
+	 * truncate any pieces.  map_size is the size of the
+	 * bitmap needed to describe the chunks of the buffer.
+	 */
+	chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT);
+	map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
+
+	bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
+						    KM_SLEEP);
+	bip->bli_item.li_type = XFS_LI_BUF;
+	bip->bli_item.li_ops = &xfs_buf_item_ops;
+	bip->bli_item.li_mountp = mp;
+	bip->bli_buf = bp;
+	bip->bli_format.blf_type = XFS_LI_BUF;
+	bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
+	bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
+	bip->bli_format.blf_map_size = map_size;
+#ifdef XFS_BLI_TRACE
+	bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP);
+#endif
+
+#ifdef XFS_TRANS_DEBUG
+	/*
+	 * Allocate the arrays for tracking what needs to be logged
+	 * and what our callers request to be logged.  bli_orig
+	 * holds a copy of the original, clean buffer for comparison
+	 * against, and bli_logged keeps a 1 bit flag per byte in
+	 * the buffer to indicate which bytes the callers have asked
+	 * to have logged.
+	 */
+	bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
+	memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp));
+	bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
+#endif
+
+	/*
+	 * Put the buf item into the list of items attached to the
+	 * buffer at the front.
+	 */
+	if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
+		bip->bli_item.li_bio_list =
+				XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	}
+	XFS_BUF_SET_FSPRIVATE(bp, bip);
+}
+
+
+/*
+ * Mark bytes first through last inclusive as dirty in the buf
+ * item's bitmap.
+ */
+void
+xfs_buf_item_log(
+	xfs_buf_log_item_t	*bip,
+	uint			first,
+	uint			last)
+{
+	uint		first_bit;
+	uint		last_bit;
+	uint		bits_to_set;
+	uint		bits_set;
+	uint		word_num;
+	uint		*wordp;
+	uint		bit;
+	uint		end_bit;
+	uint		mask;
+
+	/*
+	 * Mark the item as having some dirty data for
+	 * quick reference in xfs_buf_item_dirty.
+	 */
+	bip->bli_flags |= XFS_BLI_DIRTY;
+
+	/*
+	 * Convert byte offsets to bit numbers.
+	 */
+	first_bit = first >> XFS_BLI_SHIFT;
+	last_bit = last >> XFS_BLI_SHIFT;
+
+	/*
+	 * Calculate the total number of bits to be set.
+	 */
+	bits_to_set = last_bit - first_bit + 1;
+
+	/*
+	 * Get a pointer to the first word in the bitmap
+	 * to set a bit in.
+	 */
+	word_num = first_bit >> BIT_TO_WORD_SHIFT;
+	wordp = &(bip->bli_format.blf_data_map[word_num]);
+
+	/*
+	 * Calculate the starting bit in the first word.
+	 */
+	bit = first_bit & (uint)(NBWORD - 1);
+
+	/*
+	 * First set any bits in the first word of our range.
+	 * If it starts at bit 0 of the word, it will be
+	 * set below rather than here.  That is what the variable
+	 * bit tells us. The variable bits_set tracks the number
+	 * of bits that have been set so far.  End_bit is the number
+	 * of the last bit to be set in this word plus one.
+	 */
+	if (bit) {
+		end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
+		mask = ((1 << (end_bit - bit)) - 1) << bit;
+		*wordp |= mask;
+		wordp++;
+		bits_set = end_bit - bit;
+	} else {
+		bits_set = 0;
+	}
+
+	/*
+	 * Now set bits a whole word at a time that are between
+	 * first_bit and last_bit.
+	 */
+	while ((bits_to_set - bits_set) >= NBWORD) {
+		*wordp |= 0xffffffff;
+		bits_set += NBWORD;
+		wordp++;
+	}
+
+	/*
+	 * Finally, set any bits left to be set in one last partial word.
+	 */
+	end_bit = bits_to_set - bits_set;
+	if (end_bit) {
+		mask = (1 << end_bit) - 1;
+		*wordp |= mask;
+	}
+
+	xfs_buf_item_log_debug(bip, first, last);
+}
+
+
+/*
+ * Return 1 if the buffer has some data that has been logged (at any
+ * point, not just the current transaction) and 0 if not.
+ */
+uint
+xfs_buf_item_dirty(
+	xfs_buf_log_item_t	*bip)
+{
+	return (bip->bli_flags & XFS_BLI_DIRTY);
+}
+
+/*
+ * This is called when the buf log item is no longer needed.  It should
+ * free the buf log item associated with the given buffer and clear
+ * the buffer's pointer to the buf log item.  If there are no more
+ * items in the list, clear the b_iodone field of the buffer (see
+ * xfs_buf_attach_iodone() below).
+ */
+void
+xfs_buf_item_relse(
+	xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	xfs_buftrace("XFS_RELSE", bp);
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+	XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
+	if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
+	    (XFS_BUF_IODONE_FUNC(bp) != NULL)) {
+		ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0);
+		XFS_BUF_CLR_IODONE_FUNC(bp);
+	}
+
+#ifdef XFS_TRANS_DEBUG
+	kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
+	bip->bli_orig = NULL;
+	kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
+	bip->bli_logged = NULL;
+#endif /* XFS_TRANS_DEBUG */
+
+#ifdef XFS_BLI_TRACE
+	ktrace_free(bip->bli_trace);
+#endif
+	kmem_zone_free(xfs_buf_item_zone, bip);
+}
+
+
+/*
+ * Add the given log item with its callback to the list of callbacks
+ * to be called when the buffer's I/O completes.  If it is not set
+ * already, set the buffer's b_iodone() routine to be
+ * xfs_buf_iodone_callbacks() and link the log item into the list of
+ * items rooted at b_fsprivate.  Items are always added as the second
+ * entry in the list if there is a first, because the buf item code
+ * assumes that the buf log item is first.
+ */
+void
+xfs_buf_attach_iodone(
+	xfs_buf_t	*bp,
+	void		(*cb)(xfs_buf_t *, xfs_log_item_t *),
+	xfs_log_item_t	*lip)
+{
+	xfs_log_item_t	*head_lip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+
+	lip->li_cb = cb;
+	if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
+		head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+		lip->li_bio_list = head_lip->li_bio_list;
+		head_lip->li_bio_list = lip;
+	} else {
+		XFS_BUF_SET_FSPRIVATE(bp, lip);
+	}
+
+	ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) ||
+	       (XFS_BUF_IODONE_FUNC(bp) == NULL));
+	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
+}
+
+STATIC void
+xfs_buf_do_callbacks(
+	xfs_buf_t	*bp,
+	xfs_log_item_t	*lip)
+{
+	xfs_log_item_t	*nlip;
+
+	while (lip != NULL) {
+		nlip = lip->li_bio_list;
+		ASSERT(lip->li_cb != NULL);
+		/*
+		 * Clear the next pointer so we don't have any
+		 * confusion if the item is added to another buf.
+		 * Don't touch the log item after calling its
+		 * callback, because it could have freed itself.
+		 */
+		lip->li_bio_list = NULL;
+		lip->li_cb(bp, lip);
+		lip = nlip;
+	}
+}
+
+/*
+ * This is the iodone() function for buffers which have had callbacks
+ * attached to them by xfs_buf_attach_iodone().  It should remove each
+ * log item from the buffer's list and call the callback of each in turn.
+ * When done, the buffer's fsprivate field is set to NULL and the buffer
+ * is unlocked with a call to iodone().
+ */
+void
+xfs_buf_iodone_callbacks(
+	xfs_buf_t	*bp)
+{
+	xfs_log_item_t	*lip;
+	static ulong	lasttime;
+	static xfs_buftarg_t *lasttarg;
+	xfs_mount_t	*mp;
+
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+
+	if (XFS_BUF_GETERROR(bp) != 0) {
+		/*
+		 * If we've already decided to shutdown the filesystem
+		 * because of IO errors, there's no point in giving this
+		 * a retry.
+		 */
+		mp = lip->li_mountp;
+		if (XFS_FORCED_SHUTDOWN(mp)) {
+			ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
+			XFS_BUF_SUPER_STALE(bp);
+			xfs_buftrace("BUF_IODONE_CB", bp);
+			xfs_buf_do_callbacks(bp, lip);
+			XFS_BUF_SET_FSPRIVATE(bp, NULL);
+			XFS_BUF_CLR_IODONE_FUNC(bp);
+
+			/*
+			 * XFS_SHUT flag gets set when we go thru the
+			 * entire buffer cache and deliberately start
+			 * throwing away delayed write buffers.
+			 * Since there's no biowait done on those,
+			 * we should just brelse them.
+			 */
+			if (XFS_BUF_ISSHUT(bp)) {
+			    XFS_BUF_UNSHUT(bp);
+				xfs_buf_relse(bp);
+			} else {
+				xfs_biodone(bp);
+			}
+
+			return;
+		}
+
+		if ((XFS_BUF_TARGET(bp) != lasttarg) ||
+		    (time_after(jiffies, (lasttime + 5*HZ)))) {
+			lasttime = jiffies;
+			prdev("XFS write error in file system meta-data "
+			      "block 0x%llx in %s",
+			      XFS_BUF_TARGET(bp),
+			      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
+		}
+		lasttarg = XFS_BUF_TARGET(bp);
+
+		if (XFS_BUF_ISASYNC(bp)) {
+			/*
+			 * If the write was asynchronous then noone will be
+			 * looking for the error.  Clear the error state
+			 * and write the buffer out again delayed write.
+			 *
+			 * XXXsup This is OK, so long as we catch these
+			 * before we start the umount; we don't want these
+			 * DELWRI metadata bufs to be hanging around.
+			 */
+			XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
+
+			if (!(XFS_BUF_ISSTALE(bp))) {
+				XFS_BUF_DELAYWRITE(bp);
+				XFS_BUF_DONE(bp);
+				XFS_BUF_SET_START(bp);
+			}
+			ASSERT(XFS_BUF_IODONE_FUNC(bp));
+			xfs_buftrace("BUF_IODONE ASYNC", bp);
+			xfs_buf_relse(bp);
+		} else {
+			/*
+			 * If the write of the buffer was not asynchronous,
+			 * then we want to make sure to return the error
+			 * to the caller of bwrite().  Because of this we
+			 * cannot clear the B_ERROR state at this point.
+			 * Instead we install a callback function that
+			 * will be called when the buffer is released, and
+			 * that routine will clear the error state and
+			 * set the buffer to be written out again after
+			 * some delay.
+			 */
+			/* We actually overwrite the existing b-relse
+			   function at times, but we're gonna be shutting down
+			   anyway. */
+			XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
+			XFS_BUF_DONE(bp);
+			XFS_BUF_V_IODONESEMA(bp);
+		}
+		return;
+	}
+#ifdef XFSERRORDEBUG
+	xfs_buftrace("XFS BUFCB NOERR", bp);
+#endif
+	xfs_buf_do_callbacks(bp, lip);
+	XFS_BUF_SET_FSPRIVATE(bp, NULL);
+	XFS_BUF_CLR_IODONE_FUNC(bp);
+	xfs_biodone(bp);
+}
+
+/*
+ * This is a callback routine attached to a buffer which gets an error
+ * when being written out synchronously.
+ */
+STATIC void
+xfs_buf_error_relse(
+	xfs_buf_t	*bp)
+{
+	xfs_log_item_t	*lip;
+	xfs_mount_t	*mp;
+
+	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	mp = (xfs_mount_t *)lip->li_mountp;
+	ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
+
+	XFS_BUF_STALE(bp);
+	XFS_BUF_DONE(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+	XFS_BUF_ERROR(bp,0);
+	xfs_buftrace("BUF_ERROR_RELSE", bp);
+	if (! XFS_FORCED_SHUTDOWN(mp))
+		xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+	/*
+	 * We have to unpin the pinned buffers so do the
+	 * callbacks.
+	 */
+	xfs_buf_do_callbacks(bp, lip);
+	XFS_BUF_SET_FSPRIVATE(bp, NULL);
+	XFS_BUF_CLR_IODONE_FUNC(bp);
+	XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
+	xfs_buf_relse(bp);
+}
+
+
+/*
+ * This is the iodone() function for buffers which have been
+ * logged.  It is called when they are eventually flushed out.
+ * It should remove the buf item from the AIL, and free the buf item.
+ * It is called by xfs_buf_iodone_callbacks() above which will take
+ * care of cleaning up the buffer itself.
+ */
+/* ARGSUSED */
+void
+xfs_buf_iodone(
+	xfs_buf_t		*bp,
+	xfs_buf_log_item_t	*bip)
+{
+	struct xfs_mount	*mp;
+	SPLDECL(s);
+
+	ASSERT(bip->bli_buf == bp);
+
+	mp = bip->bli_item.li_mountp;
+
+	/*
+	 * If we are forcibly shutting down, this may well be
+	 * off the AIL already. That's because we simulate the
+	 * log-committed callbacks to unpin these buffers. Or we may never
+	 * have put this item on AIL because of the transaction was
+	 * aborted forcibly. xfs_trans_delete_ail() takes care of these.
+	 *
+	 * Either way, AIL is useless if we're forcing a shutdown.
+	 */
+	AIL_LOCK(mp,s);
+	/*
+	 * xfs_trans_delete_ail() drops the AIL lock.
+	 */
+	xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
+
+#ifdef XFS_TRANS_DEBUG
+	kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
+	bip->bli_orig = NULL;
+	kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
+	bip->bli_logged = NULL;
+#endif /* XFS_TRANS_DEBUG */
+
+#ifdef XFS_BLI_TRACE
+	ktrace_free(bip->bli_trace);
+#endif
+	kmem_zone_free(xfs_buf_item_zone, bip);
+}
+
+#if defined(XFS_BLI_TRACE)
+void
+xfs_buf_item_trace(
+	char			*id,
+	xfs_buf_log_item_t	*bip)
+{
+	xfs_buf_t		*bp;
+	ASSERT(bip->bli_trace != NULL);
+
+	bp = bip->bli_buf;
+	ktrace_enter(bip->bli_trace,
+		     (void *)id,
+		     (void *)bip->bli_buf,
+		     (void *)((unsigned long)bip->bli_flags),
+		     (void *)((unsigned long)bip->bli_recur),
+		     (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
+		     (void *)((unsigned long)
+				(0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
+		     (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
+		     (void *)((unsigned long)XFS_BUF_COUNT(bp)),
+		     (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
+		     XFS_BUF_FSPRIVATE(bp, void *),
+		     XFS_BUF_FSPRIVATE2(bp, void *),
+		     (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
+		     (void *)XFS_BUF_IODONE_FUNC(bp),
+		     (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
+		     (void *)bip->bli_item.li_desc,
+		     (void *)((unsigned long)bip->bli_item.li_flags));
+}
+#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
new file mode 100644
index 0000000..5f1b0c9
--- /dev/null
+++ b/fs/xfs/xfs_buf_item.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_BUF_ITEM_H__
+#define	__XFS_BUF_ITEM_H__
+
+/*
+ * This is the structure used to lay out a buf log item in the
+ * log.  The data map describes which 128 byte chunks of the buffer
+ * have been logged.  This structure works only on buffers that
+ * reside up to the first TB in the filesystem.  These buffers are
+ * generated only by pre-6.2 systems and are known as XFS_LI_6_1_BUF.
+ */
+typedef struct xfs_buf_log_format_v1 {
+	unsigned short	blf_type;	/* buf log item type indicator */
+	unsigned short	blf_size;	/* size of this item */
+	__int32_t	blf_blkno;	/* starting blkno of this buf */
+	ushort		blf_flags;	/* misc state */
+	ushort		blf_len;	/* number of blocks in this buf */
+	unsigned int	blf_map_size;	/* size of data bitmap in words */
+	unsigned int	blf_data_map[1];/* variable size bitmap of */
+					/*   regions of buffer in this item */
+} xfs_buf_log_format_v1_t;
+
+/*
+ * This is a form of the above structure with a 64 bit blkno field.
+ * For 6.2 and beyond, this is XFS_LI_BUF.  We use this to log everything.
+ */
+typedef struct xfs_buf_log_format_t {
+	unsigned short	blf_type;	/* buf log item type indicator */
+	unsigned short	blf_size;	/* size of this item */
+	ushort		blf_flags;	/* misc state */
+	ushort		blf_len;	/* number of blocks in this buf */
+	__int64_t	blf_blkno;	/* starting blkno of this buf */
+	unsigned int	blf_map_size;	/* size of data bitmap in words */
+	unsigned int	blf_data_map[1];/* variable size bitmap of */
+					/*   regions of buffer in this item */
+} xfs_buf_log_format_t;
+
+/*
+ * This flag indicates that the buffer contains on disk inodes
+ * and requires special recovery handling.
+ */
+#define	XFS_BLI_INODE_BUF	0x1
+/*
+ * This flag indicates that the buffer should not be replayed
+ * during recovery because its blocks are being freed.
+ */
+#define	XFS_BLI_CANCEL		0x2
+/*
+ * This flag indicates that the buffer contains on disk
+ * user or group dquots and may require special recovery handling.
+ */
+#define	XFS_BLI_UDQUOT_BUF	0x4
+/* #define XFS_BLI_PDQUOT_BUF	0x8 */
+#define	XFS_BLI_GDQUOT_BUF	0x10
+
+#define	XFS_BLI_CHUNK		128
+#define	XFS_BLI_SHIFT		7
+#define	BIT_TO_WORD_SHIFT	5
+#define	NBWORD			(NBBY * sizeof(unsigned int))
+
+/*
+ * buf log item flags
+ */
+#define	XFS_BLI_HOLD		0x01
+#define	XFS_BLI_DIRTY		0x02
+#define	XFS_BLI_STALE		0x04
+#define	XFS_BLI_LOGGED		0x08
+#define	XFS_BLI_INODE_ALLOC_BUF	0x10
+#define XFS_BLI_STALE_INODE	0x20
+
+
+#ifdef __KERNEL__
+
+struct xfs_buf;
+struct ktrace;
+struct xfs_mount;
+struct xfs_buf_log_item;
+
+#if defined(XFS_BLI_TRACE)
+#define	XFS_BLI_TRACE_SIZE	32
+
+void	xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
+#else
+#define	xfs_buf_item_trace(id, bip)
+#endif
+
+/*
+ * This is the in core log item structure used to track information
+ * needed to log buffers.  It tracks how many times the lock has been
+ * locked, and which 128 byte chunks of the buffer are dirty.
+ */
+typedef struct xfs_buf_log_item {
+	xfs_log_item_t		bli_item;	/* common item structure */
+	struct xfs_buf		*bli_buf;	/* real buffer pointer */
+	unsigned int		bli_flags;	/* misc flags */
+	unsigned int		bli_recur;	/* lock recursion count */
+	atomic_t		bli_refcount;	/* cnt of tp refs */
+#ifdef XFS_BLI_TRACE
+	struct ktrace		*bli_trace;	/* event trace buf */
+#endif
+#ifdef XFS_TRANS_DEBUG
+	char			*bli_orig;	/* original buffer copy */
+	char			*bli_logged;	/* bytes logged (bitmap) */
+#endif
+	xfs_buf_log_format_t	bli_format;	/* in-log header */
+} xfs_buf_log_item_t;
+
+/*
+ * This structure is used during recovery to record the buf log
+ * items which have been canceled and should not be replayed.
+ */
+typedef struct xfs_buf_cancel {
+	xfs_daddr_t		bc_blkno;
+	uint			bc_len;
+	int			bc_refcount;
+	struct xfs_buf_cancel	*bc_next;
+} xfs_buf_cancel_t;
+
+void	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
+void	xfs_buf_item_relse(struct xfs_buf *);
+void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
+uint	xfs_buf_item_dirty(xfs_buf_log_item_t *);
+void	xfs_buf_attach_iodone(struct xfs_buf *,
+			      void(*)(struct xfs_buf *, xfs_log_item_t *),
+			      xfs_log_item_t *);
+void	xfs_buf_iodone_callbacks(struct xfs_buf *);
+void	xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *);
+
+#ifdef XFS_TRANS_DEBUG
+void
+xfs_buf_item_flush_log_debug(
+	struct xfs_buf *bp,
+	uint	first,
+	uint	last);
+#else
+#define	xfs_buf_item_flush_log_debug(bp, first, last)
+#endif
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_BUF_ITEM_H__ */
diff --git a/fs/xfs/xfs_cap.h b/fs/xfs/xfs_cap.h
new file mode 100644
index 0000000..2deac73
--- /dev/null
+++ b/fs/xfs/xfs_cap.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_CAP_H__
+#define __XFS_CAP_H__
+
+/*
+ * Capabilities
+ */
+typedef __uint64_t xfs_cap_value_t;
+
+typedef struct xfs_cap_set {
+	xfs_cap_value_t	cap_effective;	/* use in capability checks */
+	xfs_cap_value_t	cap_permitted;	/* combined with file attrs */
+	xfs_cap_value_t	cap_inheritable;/* pass through exec */
+} xfs_cap_set_t;
+
+/* On-disk XFS extended attribute names */
+#define SGI_CAP_FILE	"SGI_CAP_FILE"
+#define SGI_CAP_FILE_SIZE	(sizeof(SGI_CAP_FILE)-1)
+#define SGI_CAP_LINUX	"SGI_CAP_LINUX"
+#define SGI_CAP_LINUX_SIZE	(sizeof(SGI_CAP_LINUX)-1)
+
+/*
+ * For Linux, we take the bitfields directly from capability.h
+ * and no longer attempt to keep this attribute ondisk compatible
+ * with IRIX.  Since this attribute is only set on exectuables,
+ * it just doesn't make much sense to try.  We do use a different
+ * named attribute though, to avoid confusion.
+ */
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_FS_POSIX_CAP
+
+#include <linux/posix_cap_xattr.h>
+
+struct vnode;
+
+extern int xfs_cap_vhascap(struct vnode *);
+extern int xfs_cap_vset(struct vnode *, void *, size_t);
+extern int xfs_cap_vget(struct vnode *, void *, size_t);
+extern int xfs_cap_vremove(struct vnode *vp);
+
+#define _CAP_EXISTS		xfs_cap_vhascap
+
+#else
+#define xfs_cap_vset(v,p,sz)	(-EOPNOTSUPP)
+#define xfs_cap_vget(v,p,sz)	(-EOPNOTSUPP)
+#define xfs_cap_vremove(v)	(-EOPNOTSUPP)
+#define _CAP_EXISTS		(NULL)
+#endif
+
+#endif	/* __KERNEL__ */
+
+#endif  /* __XFS_CAP_H__ */
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
new file mode 100644
index 0000000..b3215ff
--- /dev/null
+++ b/fs/xfs/xfs_clnt.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_CLNT_H__
+#define __XFS_CLNT_H__
+
+/*
+ * XFS arguments structure, constructed from the arguments we
+ * are passed via the mount system call.
+ *
+ * NOTE: The mount system call is handled differently between
+ * Linux and IRIX.  In IRIX we worked work with a binary data
+ * structure coming in across the syscall interface from user
+ * space (the mount userspace knows about each filesystem type
+ * and the set of valid options for it, and converts the users
+ * argument string into a binary structure _before_ making the
+ * system call), and the ABI issues that this implies.
+ *
+ * In Linux, we are passed a comma separated set of options;
+ * ie. a NULL terminated string of characters.  Userspace mount
+ * code does not have any knowledge of mount options expected by
+ * each filesystem type and so each filesystem parses its mount
+ * options in kernel space.
+ *
+ * For the Linux port, we kept this structure pretty much intact
+ * and use it internally (because the existing code groks it).
+ */
+struct xfs_mount_args {
+	int	flags;		/* flags -> see XFSMNT_... macros below */
+	int	logbufs;	/* Number of log buffers, -1 to default */
+	int	logbufsize;	/* Size of log buffers, -1 to default */
+	char	fsname[MAXNAMELEN+1];	/* data device name */
+	char	rtname[MAXNAMELEN+1];	/* realtime device filename */
+	char	logname[MAXNAMELEN+1];	/* journal device filename */
+	char	mtpt[MAXNAMELEN+1];	/* filesystem mount point */
+	int	sunit;		/* stripe unit (BBs) */
+	int	swidth;		/* stripe width (BBs), multiple of sunit */
+	uchar_t iosizelog;	/* log2 of the preferred I/O size */
+	int	ihashsize;	/* inode hash table size (buckets) */
+};
+
+/*
+ * XFS mount option flags
+ */
+#define	XFSMNT_CHKLOG		0x00000001	/* check log */
+#define	XFSMNT_WSYNC		0x00000002	/* safe mode nfs mount
+						 * compatible */
+#define	XFSMNT_INO64		0x00000004	/* move inode numbers up
+						 * past 2^32 */
+#define XFSMNT_UQUOTA		0x00000008	/* user quota accounting */
+#define XFSMNT_PQUOTA		0x00000010	/* IRIX prj quota accounting */
+#define XFSMNT_UQUOTAENF	0x00000020	/* user quota limit
+						 * enforcement */
+#define XFSMNT_PQUOTAENF	0x00000040	/* IRIX project quota limit
+						 * enforcement */
+#define XFSMNT_NOATIME		0x00000100	/* don't modify access
+						 * times on reads */
+#define XFSMNT_NOALIGN		0x00000200	/* don't allocate at
+						 * stripe boundaries*/
+#define XFSMNT_RETERR		0x00000400	/* return error to user */
+#define XFSMNT_NORECOVERY	0x00000800	/* no recovery, implies
+						 * read-only mount */
+#define XFSMNT_SHARED		0x00001000	/* shared XFS mount */
+#define XFSMNT_IOSIZE		0x00002000	/* optimize for I/O size */
+#define XFSMNT_OSYNCISOSYNC	0x00004000	/* o_sync is REALLY o_sync */
+						/* (osyncisdsync is now default) */
+#define XFSMNT_32BITINODES	0x00200000	/* restrict inodes to 32
+						 * bits of address space */
+#define XFSMNT_GQUOTA		0x00400000	/* group quota accounting */
+#define XFSMNT_GQUOTAENF	0x00800000	/* group quota limit
+						 * enforcement */
+#define XFSMNT_NOUUID		0x01000000	/* Ignore fs uuid */
+#define XFSMNT_DMAPI		0x02000000	/* enable dmapi/xdsm */
+#define XFSMNT_NOLOGFLUSH	0x04000000	/* Don't flush for log blocks */
+#define XFSMNT_IDELETE		0x08000000	/* inode cluster delete */
+#define XFSMNT_SWALLOC		0x10000000	/* turn on stripe width
+						 * allocation */
+#define XFSMNT_IHASHSIZE	0x20000000	/* inode hash table size */
+#define XFSMNT_DIRSYNC		0x40000000	/* sync creat,link,unlink,rename
+						 * symlink,mkdir,rmdir,mknod */
+
+#endif	/* __XFS_CLNT_H__ */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
new file mode 100644
index 0000000..d7fe288
--- /dev/null
+++ b/fs/xfs/xfs_da_btree.c
@@ -0,0 +1,2648 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_node.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+
+/*
+ * xfs_da_btree.c
+ *
+ * Routines to implement directories as Btrees of hashed names.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Routines used for growing the Btree.
+ */
+STATIC int xfs_da_root_split(xfs_da_state_t *state,
+					    xfs_da_state_blk_t *existing_root,
+					    xfs_da_state_blk_t *new_child);
+STATIC int xfs_da_node_split(xfs_da_state_t *state,
+					    xfs_da_state_blk_t *existing_blk,
+					    xfs_da_state_blk_t *split_blk,
+					    xfs_da_state_blk_t *blk_to_add,
+					    int treelevel,
+					    int *result);
+STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
+					 xfs_da_state_blk_t *node_blk_1,
+					 xfs_da_state_blk_t *node_blk_2);
+STATIC void xfs_da_node_add(xfs_da_state_t *state,
+				   xfs_da_state_blk_t *old_node_blk,
+				   xfs_da_state_blk_t *new_node_blk);
+
+/*
+ * Routines used for shrinking the Btree.
+ */
+STATIC int xfs_da_root_join(xfs_da_state_t *state,
+					   xfs_da_state_blk_t *root_blk);
+STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
+STATIC void xfs_da_node_remove(xfs_da_state_t *state,
+					      xfs_da_state_blk_t *drop_blk);
+STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
+					 xfs_da_state_blk_t *src_node_blk,
+					 xfs_da_state_blk_t *dst_node_blk);
+
+/*
+ * Utility routines.
+ */
+STATIC uint	xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
+STATIC int	xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
+STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra);
+
+
+/*========================================================================
+ * Routines used for growing the Btree.
+ *========================================================================*/
+
+/*
+ * Create the initial contents of an intermediate node.
+ */
+int
+xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
+				 xfs_dabuf_t **bpp, int whichfork)
+{
+	xfs_da_intnode_t *node;
+	xfs_dabuf_t *bp;
+	int error;
+	xfs_trans_t *tp;
+
+	tp = args->trans;
+	error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+	node = bp->data;
+	node->hdr.info.forw = 0;
+	node->hdr.info.back = 0;
+	INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC);
+	node->hdr.info.pad = 0;
+	node->hdr.count = 0;
+	INT_SET(node->hdr.level, ARCH_CONVERT, level);
+
+	xfs_da_log_buf(tp, bp,
+		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
+
+	*bpp = bp;
+	return(0);
+}
+
+/*
+ * Split a leaf node, rebalance, then possibly split
+ * intermediate nodes, rebalance, etc.
+ */
+int							/* error */
+xfs_da_split(xfs_da_state_t *state)
+{
+	xfs_da_state_blk_t *oldblk, *newblk, *addblk;
+	xfs_da_intnode_t *node;
+	xfs_dabuf_t *bp;
+	int max, action, error, i;
+
+	/*
+	 * Walk back up the tree splitting/inserting/adjusting as necessary.
+	 * If we need to insert and there isn't room, split the node, then
+	 * decide which fragment to insert the new block from below into.
+	 * Note that we may split the root this way, but we need more fixup.
+	 */
+	max = state->path.active - 1;
+	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
+	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
+	       state->path.blk[max].magic == XFS_DIRX_LEAF_MAGIC(state->mp));
+
+	addblk = &state->path.blk[max];		/* initial dummy value */
+	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
+		oldblk = &state->path.blk[i];
+		newblk = &state->altpath.blk[i];
+
+		/*
+		 * If a leaf node then
+		 *     Allocate a new leaf node, then rebalance across them.
+		 * else if an intermediate node then
+		 *     We split on the last layer, must we split the node?
+		 */
+		switch (oldblk->magic) {
+		case XFS_ATTR_LEAF_MAGIC:
+#ifndef __KERNEL__
+			return(ENOTTY);
+#else
+			error = xfs_attr_leaf_split(state, oldblk, newblk);
+			if ((error != 0) && (error != ENOSPC)) {
+				return(error);	/* GROT: attr is inconsistent */
+			}
+			if (!error) {
+				addblk = newblk;
+				break;
+			}
+			/*
+			 * Entry wouldn't fit, split the leaf again.
+			 */
+			state->extravalid = 1;
+			if (state->inleaf) {
+				state->extraafter = 0;	/* before newblk */
+				error = xfs_attr_leaf_split(state, oldblk,
+							    &state->extrablk);
+			} else {
+				state->extraafter = 1;	/* after newblk */
+				error = xfs_attr_leaf_split(state, newblk,
+							    &state->extrablk);
+			}
+			if (error)
+				return(error);	/* GROT: attr inconsistent */
+			addblk = newblk;
+			break;
+#endif
+		case XFS_DIR_LEAF_MAGIC:
+			ASSERT(XFS_DIR_IS_V1(state->mp));
+			error = xfs_dir_leaf_split(state, oldblk, newblk);
+			if ((error != 0) && (error != ENOSPC)) {
+				return(error);	/* GROT: dir is inconsistent */
+			}
+			if (!error) {
+				addblk = newblk;
+				break;
+			}
+			/*
+			 * Entry wouldn't fit, split the leaf again.
+			 */
+			state->extravalid = 1;
+			if (state->inleaf) {
+				state->extraafter = 0;	/* before newblk */
+				error = xfs_dir_leaf_split(state, oldblk,
+							   &state->extrablk);
+				if (error)
+					return(error);	/* GROT: dir incon. */
+				addblk = newblk;
+			} else {
+				state->extraafter = 1;	/* after newblk */
+				error = xfs_dir_leaf_split(state, newblk,
+							   &state->extrablk);
+				if (error)
+					return(error);	/* GROT: dir incon. */
+				addblk = newblk;
+			}
+			break;
+		case XFS_DIR2_LEAFN_MAGIC:
+			ASSERT(XFS_DIR_IS_V2(state->mp));
+			error = xfs_dir2_leafn_split(state, oldblk, newblk);
+			if (error)
+				return error;
+			addblk = newblk;
+			break;
+		case XFS_DA_NODE_MAGIC:
+			error = xfs_da_node_split(state, oldblk, newblk, addblk,
+							 max - i, &action);
+			xfs_da_buf_done(addblk->bp);
+			addblk->bp = NULL;
+			if (error)
+				return(error);	/* GROT: dir is inconsistent */
+			/*
+			 * Record the newly split block for the next time thru?
+			 */
+			if (action)
+				addblk = newblk;
+			else
+				addblk = NULL;
+			break;
+		}
+
+		/*
+		 * Update the btree to show the new hashval for this child.
+		 */
+		xfs_da_fixhashpath(state, &state->path);
+		/*
+		 * If we won't need this block again, it's getting dropped
+		 * from the active path by the loop control, so we need
+		 * to mark it done now.
+		 */
+		if (i > 0 || !addblk)
+			xfs_da_buf_done(oldblk->bp);
+	}
+	if (!addblk)
+		return(0);
+
+	/*
+	 * Split the root node.
+	 */
+	ASSERT(state->path.active == 0);
+	oldblk = &state->path.blk[0];
+	error = xfs_da_root_split(state, oldblk, addblk);
+	if (error) {
+		xfs_da_buf_done(oldblk->bp);
+		xfs_da_buf_done(addblk->bp);
+		addblk->bp = NULL;
+		return(error);	/* GROT: dir is inconsistent */
+	}
+
+	/*
+	 * Update pointers to the node which used to be block 0 and
+	 * just got bumped because of the addition of a new root node.
+	 * There might be three blocks involved if a double split occurred,
+	 * and the original block 0 could be at any position in the list.
+	 */
+
+	node = oldblk->bp->data;
+	if (node->hdr.info.forw) {
+		if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) {
+			bp = addblk->bp;
+		} else {
+			ASSERT(state->extravalid);
+			bp = state->extrablk.bp;
+		}
+		node = bp->data;
+		INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno);
+		xfs_da_log_buf(state->args->trans, bp,
+		    XFS_DA_LOGRANGE(node, &node->hdr.info,
+		    sizeof(node->hdr.info)));
+	}
+	node = oldblk->bp->data;
+	if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) {
+		if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) {
+			bp = addblk->bp;
+		} else {
+			ASSERT(state->extravalid);
+			bp = state->extrablk.bp;
+		}
+		node = bp->data;
+		INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno);
+		xfs_da_log_buf(state->args->trans, bp,
+		    XFS_DA_LOGRANGE(node, &node->hdr.info,
+		    sizeof(node->hdr.info)));
+	}
+	xfs_da_buf_done(oldblk->bp);
+	xfs_da_buf_done(addblk->bp);
+	addblk->bp = NULL;
+	return(0);
+}
+
+/*
+ * Split the root.  We have to create a new root and point to the two
+ * parts (the split old root) that we just created.  Copy block zero to
+ * the EOF, extending the inode in process.
+ */
+STATIC int						/* error */
+xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
+				 xfs_da_state_blk_t *blk2)
+{
+	xfs_da_intnode_t *node, *oldroot;
+	xfs_da_args_t *args;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+	int error, size;
+	xfs_inode_t *dp;
+	xfs_trans_t *tp;
+	xfs_mount_t *mp;
+	xfs_dir2_leaf_t *leaf;
+
+	/*
+	 * Copy the existing (incorrect) block from the root node position
+	 * to a free space somewhere.
+	 */
+	args = state->args;
+	ASSERT(args != NULL);
+	error = xfs_da_grow_inode(args, &blkno);
+	if (error)
+		return(error);
+	dp = args->dp;
+	tp = args->trans;
+	mp = state->mp;
+	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+	node = bp->data;
+	oldroot = blk1->bp->data;
+	if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) {
+		size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] -
+			     (char *)oldroot);
+	} else {
+		ASSERT(XFS_DIR_IS_V2(mp));
+		ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+		leaf = (xfs_dir2_leaf_t *)oldroot;
+		size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] -
+			     (char *)leaf);
+	}
+	memcpy(node, oldroot, size);
+	xfs_da_log_buf(tp, bp, 0, size - 1);
+	xfs_da_buf_done(blk1->bp);
+	blk1->bp = bp;
+	blk1->blkno = blkno;
+
+	/*
+	 * Set up the new root node.
+	 */
+	error = xfs_da_node_create(args,
+		args->whichfork == XFS_DATA_FORK &&
+		XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0,
+		INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork);
+	if (error)
+		return(error);
+	node = bp->data;
+	INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval);
+	INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno);
+	INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval);
+	INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno);
+	INT_SET(node->hdr.count, ARCH_CONVERT, 2);
+
+#ifdef DEBUG
+	if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) {
+		ASSERT(blk1->blkno >= mp->m_dirleafblk &&
+		       blk1->blkno < mp->m_dirfreeblk);
+		ASSERT(blk2->blkno >= mp->m_dirleafblk &&
+		       blk2->blkno < mp->m_dirfreeblk);
+	}
+#endif
+
+	/* Header is already logged by xfs_da_node_create */
+	xfs_da_log_buf(tp, bp,
+		XFS_DA_LOGRANGE(node, node->btree,
+			sizeof(xfs_da_node_entry_t) * 2));
+	xfs_da_buf_done(bp);
+
+	return(0);
+}
+
+/*
+ * Split the node, rebalance, then add the new entry.
+ */
+STATIC int						/* error */
+xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
+				 xfs_da_state_blk_t *newblk,
+				 xfs_da_state_blk_t *addblk,
+				 int treelevel, int *result)
+{
+	xfs_da_intnode_t *node;
+	xfs_dablk_t blkno;
+	int newcount, error;
+	int useextra;
+
+	node = oldblk->bp->data;
+	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+
+	/*
+	 * With V2 the extra block is data or freespace.
+	 */
+	useextra = state->extravalid && XFS_DIR_IS_V1(state->mp);
+	newcount = 1 + useextra;
+	/*
+	 * Do we have to split the node?
+	 */
+	if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) {
+		/*
+		 * Allocate a new node, add to the doubly linked chain of
+		 * nodes, then move some of our excess entries into it.
+		 */
+		error = xfs_da_grow_inode(state->args, &blkno);
+		if (error)
+			return(error);	/* GROT: dir is inconsistent */
+
+		error = xfs_da_node_create(state->args, blkno, treelevel,
+					   &newblk->bp, state->args->whichfork);
+		if (error)
+			return(error);	/* GROT: dir is inconsistent */
+		newblk->blkno = blkno;
+		newblk->magic = XFS_DA_NODE_MAGIC;
+		xfs_da_node_rebalance(state, oldblk, newblk);
+		error = xfs_da_blk_link(state, oldblk, newblk);
+		if (error)
+			return(error);
+		*result = 1;
+	} else {
+		*result = 0;
+	}
+
+	/*
+	 * Insert the new entry(s) into the correct block
+	 * (updating last hashval in the process).
+	 *
+	 * xfs_da_node_add() inserts BEFORE the given index,
+	 * and as a result of using node_lookup_int() we always
+	 * point to a valid entry (not after one), but a split
+	 * operation always results in a new block whose hashvals
+	 * FOLLOW the current block.
+	 *
+	 * If we had double-split op below us, then add the extra block too.
+	 */
+	node = oldblk->bp->data;
+	if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) {
+		oldblk->index++;
+		xfs_da_node_add(state, oldblk, addblk);
+		if (useextra) {
+			if (state->extraafter)
+				oldblk->index++;
+			xfs_da_node_add(state, oldblk, &state->extrablk);
+			state->extravalid = 0;
+		}
+	} else {
+		newblk->index++;
+		xfs_da_node_add(state, newblk, addblk);
+		if (useextra) {
+			if (state->extraafter)
+				newblk->index++;
+			xfs_da_node_add(state, newblk, &state->extrablk);
+			state->extravalid = 0;
+		}
+	}
+
+	return(0);
+}
+
+/*
+ * Balance the btree elements between two intermediate nodes,
+ * usually one full and one empty.
+ *
+ * NOTE: if blk2 is empty, then it will get the upper half of blk1.
+ */
+STATIC void
+xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
+				     xfs_da_state_blk_t *blk2)
+{
+	xfs_da_intnode_t *node1, *node2, *tmpnode;
+	xfs_da_node_entry_t *btree_s, *btree_d;
+	int count, tmp;
+	xfs_trans_t *tp;
+
+	node1 = blk1->bp->data;
+	node2 = blk2->bp->data;
+	/*
+	 * Figure out how many entries need to move, and in which direction.
+	 * Swap the nodes around if that makes it simpler.
+	 */
+	if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) &&
+	    ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) ||
+	     (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
+	      INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) {
+		tmpnode = node1;
+		node1 = node2;
+		node2 = tmpnode;
+	}
+	ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2;
+	if (count == 0)
+		return;
+	tp = state->args->trans;
+	/*
+	 * Two cases: high-to-low and low-to-high.
+	 */
+	if (count > 0) {
+		/*
+		 * Move elements in node2 up to make a hole.
+		 */
+		if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) {
+			tmp *= (uint)sizeof(xfs_da_node_entry_t);
+			btree_s = &node2->btree[0];
+			btree_d = &node2->btree[count];
+			memmove(btree_d, btree_s, tmp);
+		}
+
+		/*
+		 * Move the req'd B-tree elements from high in node1 to
+		 * low in node2.
+		 */
+		INT_MOD(node2->hdr.count, ARCH_CONVERT, count);
+		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
+		btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count];
+		btree_d = &node2->btree[0];
+		memcpy(btree_d, btree_s, tmp);
+		INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count));
+
+	} else {
+		/*
+		 * Move the req'd B-tree elements from low in node2 to
+		 * high in node1.
+		 */
+		count = -count;
+		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
+		btree_s = &node2->btree[0];
+		btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)];
+		memcpy(btree_d, btree_s, tmp);
+		INT_MOD(node1->hdr.count, ARCH_CONVERT, count);
+		xfs_da_log_buf(tp, blk1->bp,
+			XFS_DA_LOGRANGE(node1, btree_d, tmp));
+
+		/*
+		 * Move elements in node2 down to fill the hole.
+		 */
+		tmp  = INT_GET(node2->hdr.count, ARCH_CONVERT) - count;
+		tmp *= (uint)sizeof(xfs_da_node_entry_t);
+		btree_s = &node2->btree[count];
+		btree_d = &node2->btree[0];
+		memmove(btree_d, btree_s, tmp);
+		INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count));
+	}
+
+	/*
+	 * Log header of node 1 and all current bits of node 2.
+	 */
+	xfs_da_log_buf(tp, blk1->bp,
+		XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
+	xfs_da_log_buf(tp, blk2->bp,
+		XFS_DA_LOGRANGE(node2, &node2->hdr,
+			sizeof(node2->hdr) +
+			sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT)));
+
+	/*
+	 * Record the last hashval from each block for upward propagation.
+	 * (note: don't use the swapped node pointers)
+	 */
+	node1 = blk1->bp->data;
+	node2 = blk2->bp->data;
+	blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+	blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+
+	/*
+	 * Adjust the expected index for insertion.
+	 */
+	if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) {
+		blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT);
+		blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1;	/* make it invalid */
+	}
+}
+
+/*
+ * Add a new entry to an intermediate node.
+ */
+STATIC void
+xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
+			       xfs_da_state_blk_t *newblk)
+{
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	int tmp;
+	xfs_mount_t *mp;
+
+	node = oldblk->bp->data;
+	mp = state->mp;
+	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)));
+	ASSERT(newblk->blkno != 0);
+	if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+		ASSERT(newblk->blkno >= mp->m_dirleafblk &&
+		       newblk->blkno < mp->m_dirfreeblk);
+
+	/*
+	 * We may need to make some room before we insert the new node.
+	 */
+	tmp = 0;
+	btree = &node->btree[ oldblk->index ];
+	if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) {
+		tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree);
+		memmove(btree + 1, btree, tmp);
+	}
+	INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval);
+	INT_SET(btree->before, ARCH_CONVERT, newblk->blkno);
+	xfs_da_log_buf(state->args->trans, oldblk->bp,
+		XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
+	INT_MOD(node->hdr.count, ARCH_CONVERT, +1);
+	xfs_da_log_buf(state->args->trans, oldblk->bp,
+		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
+
+	/*
+	 * Copy the last hash value from the oldblk to propagate upwards.
+	 */
+	oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+}
+
+/*========================================================================
+ * Routines used for shrinking the Btree.
+ *========================================================================*/
+
+/*
+ * Deallocate an empty leaf node, remove it from its parent,
+ * possibly deallocating that block, etc...
+ */
+int
+xfs_da_join(xfs_da_state_t *state)
+{
+	xfs_da_state_blk_t *drop_blk, *save_blk;
+	int action, error;
+
+	action = 0;
+	drop_blk = &state->path.blk[ state->path.active-1 ];
+	save_blk = &state->altpath.blk[ state->path.active-1 ];
+	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
+	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
+	       drop_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp));
+
+	/*
+	 * Walk back up the tree joining/deallocating as necessary.
+	 * When we stop dropping blocks, break out.
+	 */
+	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
+		 state->path.active--) {
+		/*
+		 * See if we can combine the block with a neighbor.
+		 *   (action == 0) => no options, just leave
+		 *   (action == 1) => coalesce, then unlink
+		 *   (action == 2) => block empty, unlink it
+		 */
+		switch (drop_blk->magic) {
+		case XFS_ATTR_LEAF_MAGIC:
+#ifndef __KERNEL__
+			error = ENOTTY;
+#else
+			error = xfs_attr_leaf_toosmall(state, &action);
+#endif
+			if (error)
+				return(error);
+			if (action == 0)
+				return(0);
+#ifdef __KERNEL__
+			xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
+#endif
+			break;
+		case XFS_DIR_LEAF_MAGIC:
+			ASSERT(XFS_DIR_IS_V1(state->mp));
+			error = xfs_dir_leaf_toosmall(state, &action);
+			if (error)
+				return(error);
+			if (action == 0)
+				return(0);
+			xfs_dir_leaf_unbalance(state, drop_blk, save_blk);
+			break;
+		case XFS_DIR2_LEAFN_MAGIC:
+			ASSERT(XFS_DIR_IS_V2(state->mp));
+			error = xfs_dir2_leafn_toosmall(state, &action);
+			if (error)
+				return error;
+			if (action == 0)
+				return 0;
+			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
+			break;
+		case XFS_DA_NODE_MAGIC:
+			/*
+			 * Remove the offending node, fixup hashvals,
+			 * check for a toosmall neighbor.
+			 */
+			xfs_da_node_remove(state, drop_blk);
+			xfs_da_fixhashpath(state, &state->path);
+			error = xfs_da_node_toosmall(state, &action);
+			if (error)
+				return(error);
+			if (action == 0)
+				return 0;
+			xfs_da_node_unbalance(state, drop_blk, save_blk);
+			break;
+		}
+		xfs_da_fixhashpath(state, &state->altpath);
+		error = xfs_da_blk_unlink(state, drop_blk, save_blk);
+		xfs_da_state_kill_altpath(state);
+		if (error)
+			return(error);
+		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
+							 drop_blk->bp);
+		drop_blk->bp = NULL;
+		if (error)
+			return(error);
+	}
+	/*
+	 * We joined all the way to the top.  If it turns out that
+	 * we only have one entry in the root, make the child block
+	 * the new root.
+	 */
+	xfs_da_node_remove(state, drop_blk);
+	xfs_da_fixhashpath(state, &state->path);
+	error = xfs_da_root_join(state, &state->path.blk[0]);
+	return(error);
+}
+
+/*
+ * We have only one entry in the root.  Copy the only remaining child of
+ * the old root to block 0 as the new root node.
+ */
+STATIC int
+xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
+{
+	xfs_da_intnode_t *oldroot;
+	/* REFERENCED */
+	xfs_da_blkinfo_t *blkinfo;
+	xfs_da_args_t *args;
+	xfs_dablk_t child;
+	xfs_dabuf_t *bp;
+	int error;
+
+	args = state->args;
+	ASSERT(args != NULL);
+	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
+	oldroot = root_blk->bp->data;
+	ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	ASSERT(!oldroot->hdr.info.forw);
+	ASSERT(!oldroot->hdr.info.back);
+
+	/*
+	 * If the root has more than one child, then don't do anything.
+	 */
+	if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1)
+		return(0);
+
+	/*
+	 * Read in the (only) child block, then copy those bytes into
+	 * the root block's buffer and free the original child block.
+	 */
+	child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT);
+	ASSERT(child != 0);
+	error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
+					     args->whichfork);
+	if (error)
+		return(error);
+	ASSERT(bp != NULL);
+	blkinfo = bp->data;
+	if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) {
+		ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+		       INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC);
+	} else {
+		ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	}
+	ASSERT(!blkinfo->forw);
+	ASSERT(!blkinfo->back);
+	memcpy(root_blk->bp->data, bp->data, state->blocksize);
+	xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
+	error = xfs_da_shrink_inode(args, child, bp);
+	return(error);
+}
+
+/*
+ * Check a node block and its neighbors to see if the block should be
+ * collapsed into one or the other neighbor.  Always keep the block
+ * with the smaller block number.
+ * If the current block is over 50% full, don't try to join it, return 0.
+ * If the block is empty, fill in the state structure and return 2.
+ * If it can be collapsed, fill in the state structure and return 1.
+ * If nothing can be done, return 0.
+ */
+STATIC int
+xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
+{
+	xfs_da_intnode_t *node;
+	xfs_da_state_blk_t *blk;
+	xfs_da_blkinfo_t *info;
+	int count, forward, error, retval, i;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+
+	/*
+	 * Check for the degenerate case of the block being over 50% full.
+	 * If so, it's not worth even looking to see if we might be able
+	 * to coalesce with a sibling.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	info = blk->bp->data;
+	ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	node = (xfs_da_intnode_t *)info;
+	count = INT_GET(node->hdr.count, ARCH_CONVERT);
+	if (count > (state->node_ents >> 1)) {
+		*action = 0;	/* blk over 50%, don't try to join */
+		return(0);	/* blk over 50%, don't try to join */
+	}
+
+	/*
+	 * Check for the degenerate case of the block being empty.
+	 * If the block is empty, we'll simply delete it, no need to
+	 * coalesce it with a sibling block.  We choose (aribtrarily)
+	 * to merge with the forward block unless it is NULL.
+	 */
+	if (count == 0) {
+		/*
+		 * Make altpath point to the block we want to keep and
+		 * path point to the block we want to drop (this one).
+		 */
+		forward = info->forw;
+		memcpy(&state->altpath, &state->path, sizeof(state->path));
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+		if (error)
+			return(error);
+		if (retval) {
+			*action = 0;
+		} else {
+			*action = 2;
+		}
+		return(0);
+	}
+
+	/*
+	 * Examine each sibling block to see if we can coalesce with
+	 * at least 25% free space to spare.  We need to figure out
+	 * whether to merge with the forward or the backward block.
+	 * We prefer coalescing with the lower numbered sibling so as
+	 * to shrink a directory over time.
+	 */
+	/* start with smaller blk num */
+	forward = (INT_GET(info->forw, ARCH_CONVERT)
+				< INT_GET(info->back, ARCH_CONVERT));
+	for (i = 0; i < 2; forward = !forward, i++) {
+		if (forward)
+			blkno = INT_GET(info->forw, ARCH_CONVERT);
+		else
+			blkno = INT_GET(info->back, ARCH_CONVERT);
+		if (blkno == 0)
+			continue;
+		error = xfs_da_read_buf(state->args->trans, state->args->dp,
+					blkno, -1, &bp, state->args->whichfork);
+		if (error)
+			return(error);
+		ASSERT(bp != NULL);
+
+		node = (xfs_da_intnode_t *)info;
+		count  = state->node_ents;
+		count -= state->node_ents >> 2;
+		count -= INT_GET(node->hdr.count, ARCH_CONVERT);
+		node = bp->data;
+		ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+		count -= INT_GET(node->hdr.count, ARCH_CONVERT);
+		xfs_da_brelse(state->args->trans, bp);
+		if (count >= 0)
+			break;	/* fits with at least 25% to spare */
+	}
+	if (i >= 2) {
+		*action = 0;
+		return(0);
+	}
+
+	/*
+	 * Make altpath point to the block we want to keep (the lower
+	 * numbered block) and path point to the block we want to drop.
+	 */
+	memcpy(&state->altpath, &state->path, sizeof(state->path));
+	if (blkno < blk->blkno) {
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+		if (error) {
+			return(error);
+		}
+		if (retval) {
+			*action = 0;
+			return(0);
+		}
+	} else {
+		error = xfs_da_path_shift(state, &state->path, forward,
+						 0, &retval);
+		if (error) {
+			return(error);
+		}
+		if (retval) {
+			*action = 0;
+			return(0);
+		}
+	}
+	*action = 1;
+	return(0);
+}
+
+/*
+ * Walk back up the tree adjusting hash values as necessary,
+ * when we stop making changes, return.
+ */
+void
+xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
+{
+	xfs_da_state_blk_t *blk;
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	xfs_dahash_t lasthash=0;
+	int level, count;
+
+	level = path->active-1;
+	blk = &path->blk[ level ];
+	switch (blk->magic) {
+#ifdef __KERNEL__
+	case XFS_ATTR_LEAF_MAGIC:
+		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
+		if (count == 0)
+			return;
+		break;
+#endif
+	case XFS_DIR_LEAF_MAGIC:
+		ASSERT(XFS_DIR_IS_V1(state->mp));
+		lasthash = xfs_dir_leaf_lasthash(blk->bp, &count);
+		if (count == 0)
+			return;
+		break;
+	case XFS_DIR2_LEAFN_MAGIC:
+		ASSERT(XFS_DIR_IS_V2(state->mp));
+		lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
+		if (count == 0)
+			return;
+		break;
+	case XFS_DA_NODE_MAGIC:
+		lasthash = xfs_da_node_lasthash(blk->bp, &count);
+		if (count == 0)
+			return;
+		break;
+	}
+	for (blk--, level--; level >= 0; blk--, level--) {
+		node = blk->bp->data;
+		ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+		btree = &node->btree[ blk->index ];
+		if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash)
+			break;
+		blk->hashval = lasthash;
+		INT_SET(btree->hashval, ARCH_CONVERT, lasthash);
+		xfs_da_log_buf(state->args->trans, blk->bp,
+				  XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
+
+		lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+	}
+}
+
+/*
+ * Remove an entry from an intermediate node.
+ */
+STATIC void
+xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
+{
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	int tmp;
+
+	node = drop_blk->bp->data;
+	ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT));
+	ASSERT(drop_blk->index >= 0);
+
+	/*
+	 * Copy over the offending entry, or just zero it out.
+	 */
+	btree = &node->btree[drop_blk->index];
+	if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) {
+		tmp  = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1;
+		tmp *= (uint)sizeof(xfs_da_node_entry_t);
+		memmove(btree, btree + 1, tmp);
+		xfs_da_log_buf(state->args->trans, drop_blk->bp,
+		    XFS_DA_LOGRANGE(node, btree, tmp));
+		btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ];
+	}
+	memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
+	xfs_da_log_buf(state->args->trans, drop_blk->bp,
+	    XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
+	INT_MOD(node->hdr.count, ARCH_CONVERT, -1);
+	xfs_da_log_buf(state->args->trans, drop_blk->bp,
+	    XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
+
+	/*
+	 * Copy the last hash value from the block to propagate upwards.
+	 */
+	btree--;
+	drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT);
+}
+
+/*
+ * Unbalance the btree elements between two intermediate nodes,
+ * move all Btree elements from one node into another.
+ */
+STATIC void
+xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
+				     xfs_da_state_blk_t *save_blk)
+{
+	xfs_da_intnode_t *drop_node, *save_node;
+	xfs_da_node_entry_t *btree;
+	int tmp;
+	xfs_trans_t *tp;
+
+	drop_node = drop_blk->bp->data;
+	save_node = save_blk->bp->data;
+	ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	tp = state->args->trans;
+
+	/*
+	 * If the dying block has lower hashvals, then move all the
+	 * elements in the remaining block up to make a hole.
+	 */
+	if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) ||
+	    (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
+	     INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))
+	{
+		btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ];
+		tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
+		memmove(btree, &save_node->btree[0], tmp);
+		btree = &save_node->btree[0];
+		xfs_da_log_buf(tp, save_blk->bp,
+			XFS_DA_LOGRANGE(save_node, btree,
+				(INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) *
+				sizeof(xfs_da_node_entry_t)));
+	} else {
+		btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ];
+		xfs_da_log_buf(tp, save_blk->bp,
+			XFS_DA_LOGRANGE(save_node, btree,
+				INT_GET(drop_node->hdr.count, ARCH_CONVERT) *
+				sizeof(xfs_da_node_entry_t)));
+	}
+
+	/*
+	 * Move all the B-tree elements from drop_blk to save_blk.
+	 */
+	tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
+	memcpy(btree, &drop_node->btree[0], tmp);
+	INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT));
+
+	xfs_da_log_buf(tp, save_blk->bp,
+		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
+			sizeof(save_node->hdr)));
+
+	/*
+	 * Save the last hashval in the remaining block for upward propagation.
+	 */
+	save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+}
+
+/*========================================================================
+ * Routines used for finding things in the Btree.
+ *========================================================================*/
+
+/*
+ * Walk down the Btree looking for a particular filename, filling
+ * in the state structure as we go.
+ *
+ * We will set the state structure to point to each of the elements
+ * in each of the nodes where either the hashval is or should be.
+ *
+ * We support duplicate hashval's so for each entry in the current
+ * node that could contain the desired hashval, descend.  This is a
+ * pruned depth-first tree search.
+ */
+int							/* error */
+xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
+{
+	xfs_da_state_blk_t *blk;
+	xfs_da_blkinfo_t *curr;
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	xfs_dablk_t blkno;
+	int probe, span, max, error, retval;
+	xfs_dahash_t hashval;
+	xfs_da_args_t *args;
+
+	args = state->args;
+
+	/*
+	 * Descend thru the B-tree searching each level for the right
+	 * node to use, until the right hashval is found.
+	 */
+	if (args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(state->mp))
+		blkno = state->mp->m_dirleafblk;
+	else
+		blkno = 0;
+	for (blk = &state->path.blk[0], state->path.active = 1;
+			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
+			 blk++, state->path.active++) {
+		/*
+		 * Read the next node down in the tree.
+		 */
+		blk->blkno = blkno;
+		error = xfs_da_read_buf(args->trans, args->dp, blkno,
+					-1, &blk->bp, args->whichfork);
+		if (error) {
+			blk->blkno = 0;
+			state->path.active--;
+			return(error);
+		}
+		curr = blk->bp->data;
+		ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC ||
+		       INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+		       INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC);
+
+		/*
+		 * Search an intermediate node for a match.
+		 */
+		blk->magic = INT_GET(curr->magic, ARCH_CONVERT);
+		if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) {
+			node = blk->bp->data;
+			blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+
+			/*
+			 * Binary search.  (note: small blocks will skip loop)
+			 */
+			max = INT_GET(node->hdr.count, ARCH_CONVERT);
+			probe = span = max / 2;
+			hashval = args->hashval;
+			for (btree = &node->btree[probe]; span > 4;
+				   btree = &node->btree[probe]) {
+				span /= 2;
+				if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)
+					probe += span;
+				else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval)
+					probe -= span;
+				else
+					break;
+			}
+			ASSERT((probe >= 0) && (probe < max));
+			ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval));
+
+			/*
+			 * Since we may have duplicate hashval's, find the first
+			 * matching hashval in the node.
+			 */
+			while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) {
+				btree--;
+				probe--;
+			}
+			while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) {
+				btree++;
+				probe++;
+			}
+
+			/*
+			 * Pick the right block to descend on.
+			 */
+			if (probe == max) {
+				blk->index = max-1;
+				blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT);
+			} else {
+				blk->index = probe;
+				blkno = INT_GET(btree->before, ARCH_CONVERT);
+			}
+		}
+#ifdef __KERNEL__
+		else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) {
+			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
+			break;
+		}
+#endif
+		else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) {
+			blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL);
+			break;
+		}
+		else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) {
+			blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
+			break;
+		}
+	}
+
+	/*
+	 * A leaf block that ends in the hashval that we are interested in
+	 * (final hashval == search hashval) means that the next block may
+	 * contain more entries with the same hashval, shift upward to the
+	 * next leaf and keep searching.
+	 */
+	for (;;) {
+		if (blk->magic == XFS_DIR_LEAF_MAGIC) {
+			ASSERT(XFS_DIR_IS_V1(state->mp));
+			retval = xfs_dir_leaf_lookup_int(blk->bp, args,
+								  &blk->index);
+		} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
+			ASSERT(XFS_DIR_IS_V2(state->mp));
+			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
+							&blk->index, state);
+		}
+#ifdef __KERNEL__
+		else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
+			retval = xfs_attr_leaf_lookup_int(blk->bp, args);
+			blk->index = args->index;
+			args->blkno = blk->blkno;
+		}
+#endif
+		if (((retval == ENOENT) || (retval == ENOATTR)) &&
+		    (blk->hashval == args->hashval)) {
+			error = xfs_da_path_shift(state, &state->path, 1, 1,
+							 &retval);
+			if (error)
+				return(error);
+			if (retval == 0) {
+				continue;
+			}
+#ifdef __KERNEL__
+			else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
+				/* path_shift() gives ENOENT */
+				retval = XFS_ERROR(ENOATTR);
+			}
+#endif
+		}
+		break;
+	}
+	*result = retval;
+	return(0);
+}
+
+/*========================================================================
+ * Utility routines.
+ *========================================================================*/
+
+/*
+ * Link a new block into a doubly linked list of blocks (of whatever type).
+ */
+int							/* error */
+xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
+			       xfs_da_state_blk_t *new_blk)
+{
+	xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
+	xfs_da_args_t *args;
+	int before=0, error;
+	xfs_dabuf_t *bp;
+
+	/*
+	 * Set up environment.
+	 */
+	args = state->args;
+	ASSERT(args != NULL);
+	old_info = old_blk->bp->data;
+	new_info = new_blk->bp->data;
+	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
+	       old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
+	ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT));
+	ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT));
+	ASSERT(old_blk->magic == new_blk->magic);
+
+	switch (old_blk->magic) {
+#ifdef __KERNEL__
+	case XFS_ATTR_LEAF_MAGIC:
+		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
+		break;
+#endif
+	case XFS_DIR_LEAF_MAGIC:
+		ASSERT(XFS_DIR_IS_V1(state->mp));
+		before = xfs_dir_leaf_order(old_blk->bp, new_blk->bp);
+		break;
+	case XFS_DIR2_LEAFN_MAGIC:
+		ASSERT(XFS_DIR_IS_V2(state->mp));
+		before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
+		break;
+	case XFS_DA_NODE_MAGIC:
+		before = xfs_da_node_order(old_blk->bp, new_blk->bp);
+		break;
+	}
+
+	/*
+	 * Link blocks in appropriate order.
+	 */
+	if (before) {
+		/*
+		 * Link new block in before existing block.
+		 */
+		INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno);
+		new_info->back = old_info->back; /* INT_: direct copy */
+		if (INT_GET(old_info->back, ARCH_CONVERT)) {
+			error = xfs_da_read_buf(args->trans, args->dp,
+						INT_GET(old_info->back,
+							ARCH_CONVERT), -1, &bp,
+						args->whichfork);
+			if (error)
+				return(error);
+			ASSERT(bp != NULL);
+			tmp_info = bp->data;
+			ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT));
+			ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno);
+			INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno);
+			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
+			xfs_da_buf_done(bp);
+		}
+		INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno);
+	} else {
+		/*
+		 * Link new block in after existing block.
+		 */
+		new_info->forw = old_info->forw; /* INT_: direct copy */
+		INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno);
+		if (INT_GET(old_info->forw, ARCH_CONVERT)) {
+			error = xfs_da_read_buf(args->trans, args->dp,
+						INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp,
+						args->whichfork);
+			if (error)
+				return(error);
+			ASSERT(bp != NULL);
+			tmp_info = bp->data;
+			ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT)
+				    == INT_GET(old_info->magic, ARCH_CONVERT));
+			ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT)
+				    == old_blk->blkno);
+			INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno);
+			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
+			xfs_da_buf_done(bp);
+		}
+		INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno);
+	}
+
+	xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
+	xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
+	return(0);
+}
+
+/*
+ * Compare two intermediate nodes for "order".
+ */
+STATIC int
+xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
+{
+	xfs_da_intnode_t *node1, *node2;
+
+	node1 = node1_bp->data;
+	node2 = node2_bp->data;
+	ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) &&
+	       (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC));
+	if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) &&
+	    ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) <
+	      INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) ||
+	     (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
+	      INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) {
+		return(1);
+	}
+	return(0);
+}
+
+/*
+ * Pick up the last hashvalue from an intermediate node.
+ */
+STATIC uint
+xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
+{
+	xfs_da_intnode_t *node;
+
+	node = bp->data;
+	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+	if (count)
+		*count = INT_GET(node->hdr.count, ARCH_CONVERT);
+	if (!node->hdr.count)
+		return(0);
+	return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT));
+}
+
+/*
+ * Unlink a block from a doubly linked list of blocks.
+ */
+int							/* error */
+xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
+				 xfs_da_state_blk_t *save_blk)
+{
+	xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
+	xfs_da_args_t *args;
+	xfs_dabuf_t *bp;
+	int error;
+
+	/*
+	 * Set up environment.
+	 */
+	args = state->args;
+	ASSERT(args != NULL);
+	save_info = save_blk->bp->data;
+	drop_info = drop_blk->bp->data;
+	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
+	       save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
+	ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT));
+	ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT));
+	ASSERT(save_blk->magic == drop_blk->magic);
+	ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) ||
+	       (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno));
+	ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) ||
+	       (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno));
+
+	/*
+	 * Unlink the leaf block from the doubly linked chain of leaves.
+	 */
+	if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) {
+		save_info->back = drop_info->back; /* INT_: direct copy */
+		if (INT_GET(drop_info->back, ARCH_CONVERT)) {
+			error = xfs_da_read_buf(args->trans, args->dp,
+						INT_GET(drop_info->back,
+							ARCH_CONVERT), -1, &bp,
+						args->whichfork);
+			if (error)
+				return(error);
+			ASSERT(bp != NULL);
+			tmp_info = bp->data;
+			ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT));
+			ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno);
+			INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno);
+			xfs_da_log_buf(args->trans, bp, 0,
+						    sizeof(*tmp_info) - 1);
+			xfs_da_buf_done(bp);
+		}
+	} else {
+		save_info->forw = drop_info->forw; /* INT_: direct copy */
+		if (INT_GET(drop_info->forw, ARCH_CONVERT)) {
+			error = xfs_da_read_buf(args->trans, args->dp,
+						INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp,
+						args->whichfork);
+			if (error)
+				return(error);
+			ASSERT(bp != NULL);
+			tmp_info = bp->data;
+			ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT)
+				    == INT_GET(save_info->magic, ARCH_CONVERT));
+			ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT)
+				    == drop_blk->blkno);
+			INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno);
+			xfs_da_log_buf(args->trans, bp, 0,
+						    sizeof(*tmp_info) - 1);
+			xfs_da_buf_done(bp);
+		}
+	}
+
+	xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
+	return(0);
+}
+
+/*
+ * Move a path "forward" or "!forward" one block at the current level.
+ *
+ * This routine will adjust a "path" to point to the next block
+ * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
+ * Btree, including updating pointers to the intermediate nodes between
+ * the new bottom and the root.
+ */
+int							/* error */
+xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
+				 int forward, int release, int *result)
+{
+	xfs_da_state_blk_t *blk;
+	xfs_da_blkinfo_t *info;
+	xfs_da_intnode_t *node;
+	xfs_da_args_t *args;
+	xfs_dablk_t blkno=0;
+	int level, error;
+
+	/*
+	 * Roll up the Btree looking for the first block where our
+	 * current index is not at the edge of the block.  Note that
+	 * we skip the bottom layer because we want the sibling block.
+	 */
+	args = state->args;
+	ASSERT(args != NULL);
+	ASSERT(path != NULL);
+	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
+	level = (path->active-1) - 1;	/* skip bottom layer in path */
+	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
+		ASSERT(blk->bp != NULL);
+		node = blk->bp->data;
+		ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+		if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) {
+			blk->index++;
+			blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT);
+			break;
+		} else if (!forward && (blk->index > 0)) {
+			blk->index--;
+			blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT);
+			break;
+		}
+	}
+	if (level < 0) {
+		*result = XFS_ERROR(ENOENT);	/* we're out of our tree */
+		ASSERT(args->oknoent);
+		return(0);
+	}
+
+	/*
+	 * Roll down the edge of the subtree until we reach the
+	 * same depth we were at originally.
+	 */
+	for (blk++, level++; level < path->active; blk++, level++) {
+		/*
+		 * Release the old block.
+		 * (if it's dirty, trans won't actually let go)
+		 */
+		if (release)
+			xfs_da_brelse(args->trans, blk->bp);
+
+		/*
+		 * Read the next child block.
+		 */
+		blk->blkno = blkno;
+		error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
+						     &blk->bp, args->whichfork);
+		if (error)
+			return(error);
+		ASSERT(blk->bp != NULL);
+		info = blk->bp->data;
+		ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC ||
+		       INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+		       INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC);
+		blk->magic = INT_GET(info->magic, ARCH_CONVERT);
+		if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) {
+			node = (xfs_da_intnode_t *)info;
+			blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+			if (forward)
+				blk->index = 0;
+			else
+				blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1;
+			blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT);
+		} else {
+			ASSERT(level == path->active-1);
+			blk->index = 0;
+			switch(blk->magic) {
+#ifdef __KERNEL__
+			case XFS_ATTR_LEAF_MAGIC:
+				blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
+								      NULL);
+				break;
+#endif
+			case XFS_DIR_LEAF_MAGIC:
+				ASSERT(XFS_DIR_IS_V1(state->mp));
+				blk->hashval = xfs_dir_leaf_lasthash(blk->bp,
+								     NULL);
+				break;
+			case XFS_DIR2_LEAFN_MAGIC:
+				ASSERT(XFS_DIR_IS_V2(state->mp));
+				blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
+								       NULL);
+				break;
+			default:
+				ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
+				       blk->magic ==
+				       XFS_DIRX_LEAF_MAGIC(state->mp));
+				break;
+			}
+		}
+	}
+	*result = 0;
+	return(0);
+}
+
+
+/*========================================================================
+ * Utility routines.
+ *========================================================================*/
+
+/*
+ * Implement a simple hash on a character string.
+ * Rotate the hash value by 7 bits, then XOR each character in.
+ * This is implemented with some source-level loop unrolling.
+ */
+xfs_dahash_t
+xfs_da_hashname(uchar_t *name, int namelen)
+{
+	xfs_dahash_t hash;
+
+#ifdef SLOWVERSION
+	/*
+	 * This is the old one-byte-at-a-time version.
+	 */
+	for (hash = 0; namelen > 0; namelen--)
+		hash = *name++ ^ rol32(hash, 7);
+
+	return(hash);
+#else
+	/*
+	 * Do four characters at a time as long as we can.
+	 */
+	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
+		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
+		       (name[3] << 0) ^ rol32(hash, 7 * 4);
+
+	/*
+	 * Now do the rest of the characters.
+	 */
+	switch (namelen) {
+	case 3:
+		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
+		       rol32(hash, 7 * 3);
+	case 2:
+		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
+	case 1:
+		return (name[0] << 0) ^ rol32(hash, 7 * 1);
+	case 0:
+		return hash;
+	}
+	/* NOTREACHED */
+#endif
+	return 0; /* keep gcc happy */
+}
+
+/*
+ * Add a block to the btree ahead of the file.
+ * Return the new block number to the caller.
+ */
+int
+xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
+{
+	xfs_fileoff_t bno, b;
+	xfs_bmbt_irec_t map;
+	xfs_bmbt_irec_t	*mapp;
+	xfs_inode_t *dp;
+	int nmap, error, w, count, c, got, i, mapi;
+	xfs_fsize_t size;
+	xfs_trans_t *tp;
+	xfs_mount_t *mp;
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	w = args->whichfork;
+	tp = args->trans;
+	/*
+	 * For new directories adjust the file offset and block count.
+	 */
+	if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) {
+		bno = mp->m_dirleafblk;
+		count = mp->m_dirblkfsbs;
+	} else {
+		bno = 0;
+		count = 1;
+	}
+	/*
+	 * Find a spot in the file space to put the new block.
+	 */
+	if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w))) {
+		return error;
+	}
+	if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+		ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk);
+	/*
+	 * Try mapping it in one filesystem block.
+	 */
+	nmap = 1;
+	ASSERT(args->firstblock != NULL);
+	if ((error = xfs_bmapi(tp, dp, bno, count,
+			XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
+			XFS_BMAPI_CONTIG,
+			args->firstblock, args->total, &map, &nmap,
+			args->flist))) {
+		return error;
+	}
+	ASSERT(nmap <= 1);
+	if (nmap == 1) {
+		mapp = &map;
+		mapi = 1;
+	}
+	/*
+	 * If we didn't get it and the block might work if fragmented,
+	 * try without the CONTIG flag.  Loop until we get it all.
+	 */
+	else if (nmap == 0 && count > 1) {
+		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
+		for (b = bno, mapi = 0; b < bno + count; ) {
+			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
+			c = (int)(bno + count - b);
+			if ((error = xfs_bmapi(tp, dp, b, c,
+					XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|
+					XFS_BMAPI_METADATA,
+					args->firstblock, args->total,
+					&mapp[mapi], &nmap, args->flist))) {
+				kmem_free(mapp, sizeof(*mapp) * count);
+				return error;
+			}
+			if (nmap < 1)
+				break;
+			mapi += nmap;
+			b = mapp[mapi - 1].br_startoff +
+			    mapp[mapi - 1].br_blockcount;
+		}
+	} else {
+		mapi = 0;
+		mapp = NULL;
+	}
+	/*
+	 * Count the blocks we got, make sure it matches the total.
+	 */
+	for (i = 0, got = 0; i < mapi; i++)
+		got += mapp[i].br_blockcount;
+	if (got != count || mapp[0].br_startoff != bno ||
+	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
+	    bno + count) {
+		if (mapp != &map)
+			kmem_free(mapp, sizeof(*mapp) * count);
+		return XFS_ERROR(ENOSPC);
+	}
+	if (mapp != &map)
+		kmem_free(mapp, sizeof(*mapp) * count);
+	*new_blkno = (xfs_dablk_t)bno;
+	/*
+	 * For version 1 directories, adjust the file size if it changed.
+	 */
+	if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) {
+		ASSERT(mapi == 1);
+		if ((error = xfs_bmap_last_offset(tp, dp, &bno, w)))
+			return error;
+		size = XFS_FSB_TO_B(mp, bno);
+		if (size != dp->i_d.di_size) {
+			dp->i_d.di_size = size;
+			xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Ick.  We need to always be able to remove a btree block, even
+ * if there's no space reservation because the filesystem is full.
+ * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
+ * It swaps the target block with the last block in the file.  The
+ * last block in the file can always be removed since it can't cause
+ * a bmap btree split to do that.
+ */
+STATIC int
+xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
+		      xfs_dabuf_t **dead_bufp)
+{
+	xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
+	xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
+	xfs_fileoff_t lastoff;
+	xfs_inode_t *ip;
+	xfs_trans_t *tp;
+	xfs_mount_t *mp;
+	int error, w, entno, level, dead_level;
+	xfs_da_blkinfo_t *dead_info, *sib_info;
+	xfs_da_intnode_t *par_node, *dead_node;
+	xfs_dir_leafblock_t *dead_leaf;
+	xfs_dir2_leaf_t *dead_leaf2;
+	xfs_dahash_t dead_hash;
+
+	dead_buf = *dead_bufp;
+	dead_blkno = *dead_blknop;
+	tp = args->trans;
+	ip = args->dp;
+	w = args->whichfork;
+	ASSERT(w == XFS_DATA_FORK);
+	mp = ip->i_mount;
+	if (XFS_DIR_IS_V2(mp)) {
+		lastoff = mp->m_dirfreeblk;
+		error = xfs_bmap_last_before(tp, ip, &lastoff, w);
+	} else
+		error = xfs_bmap_last_offset(tp, ip, &lastoff, w);
+	if (error)
+		return error;
+	if (unlikely(lastoff == 0)) {
+		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
+				 mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	/*
+	 * Read the last block in the btree space.
+	 */
+	last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
+	if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
+		return error;
+	/*
+	 * Copy the last block into the dead buffer and log it.
+	 */
+	memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
+	xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
+	dead_info = dead_buf->data;
+	/*
+	 * Get values from the moved block.
+	 */
+	if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) {
+		ASSERT(XFS_DIR_IS_V1(mp));
+		dead_leaf = (xfs_dir_leafblock_t *)dead_info;
+		dead_level = 0;
+		dead_hash =
+			INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+	} else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) {
+		ASSERT(XFS_DIR_IS_V2(mp));
+		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
+		dead_level = 0;
+		dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+	} else {
+		ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);
+		dead_node = (xfs_da_intnode_t *)dead_info;
+		dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT);
+		dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+	}
+	sib_buf = par_buf = NULL;
+	/*
+	 * If the moved block has a left sibling, fix up the pointers.
+	 */
+	if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) {
+		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
+			goto done;
+		sib_info = sib_buf->data;
+		if (unlikely(
+		    INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno ||
+		    INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno);
+		xfs_da_log_buf(tp, sib_buf,
+			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
+					sizeof(sib_info->forw)));
+		xfs_da_buf_done(sib_buf);
+		sib_buf = NULL;
+	}
+	/*
+	 * If the moved block has a right sibling, fix up the pointers.
+	 */
+	if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) {
+		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
+			goto done;
+		sib_info = sib_buf->data;
+		if (unlikely(
+		       INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno
+		    || INT_GET(sib_info->magic, ARCH_CONVERT)
+				!= INT_GET(dead_info->magic, ARCH_CONVERT))) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno);
+		xfs_da_log_buf(tp, sib_buf,
+			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
+					sizeof(sib_info->back)));
+		xfs_da_buf_done(sib_buf);
+		sib_buf = NULL;
+	}
+	par_blkno = XFS_DIR_IS_V1(mp) ? 0 : mp->m_dirleafblk;
+	level = -1;
+	/*
+	 * Walk down the tree looking for the parent of the moved block.
+	 */
+	for (;;) {
+		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
+			goto done;
+		par_node = par_buf->data;
+		if (unlikely(
+		    INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC ||
+		    (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		level = INT_GET(par_node->hdr.level, ARCH_CONVERT);
+		for (entno = 0;
+		     entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) &&
+		     INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash;
+		     entno++)
+			continue;
+		if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT);
+		if (level == dead_level + 1)
+			break;
+		xfs_da_brelse(tp, par_buf);
+		par_buf = NULL;
+	}
+	/*
+	 * We're in the right parent block.
+	 * Look for the right entry.
+	 */
+	for (;;) {
+		for (;
+		     entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) &&
+		     INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno;
+		     entno++)
+			continue;
+		if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT))
+			break;
+		par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT);
+		xfs_da_brelse(tp, par_buf);
+		par_buf = NULL;
+		if (unlikely(par_blkno == 0)) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
+			goto done;
+		par_node = par_buf->data;
+		if (unlikely(
+		    INT_GET(par_node->hdr.level, ARCH_CONVERT) != level ||
+		    INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) {
+			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
+					 XFS_ERRLEVEL_LOW, mp);
+			error = XFS_ERROR(EFSCORRUPTED);
+			goto done;
+		}
+		entno = 0;
+	}
+	/*
+	 * Update the parent entry pointing to the moved block.
+	 */
+	INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno);
+	xfs_da_log_buf(tp, par_buf,
+		XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
+				sizeof(par_node->btree[entno].before)));
+	xfs_da_buf_done(par_buf);
+	xfs_da_buf_done(dead_buf);
+	*dead_blknop = last_blkno;
+	*dead_bufp = last_buf;
+	return 0;
+done:
+	if (par_buf)
+		xfs_da_brelse(tp, par_buf);
+	if (sib_buf)
+		xfs_da_brelse(tp, sib_buf);
+	xfs_da_brelse(tp, last_buf);
+	return error;
+}
+
+/*
+ * Remove a btree block from a directory or attribute.
+ */
+int
+xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
+		    xfs_dabuf_t *dead_buf)
+{
+	xfs_inode_t *dp;
+	int done, error, w, count;
+	xfs_fileoff_t bno;
+	xfs_fsize_t size;
+	xfs_trans_t *tp;
+	xfs_mount_t *mp;
+
+	dp = args->dp;
+	w = args->whichfork;
+	tp = args->trans;
+	mp = dp->i_mount;
+	if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+		count = mp->m_dirblkfsbs;
+	else
+		count = 1;
+	for (;;) {
+		/*
+		 * Remove extents.  If we get ENOSPC for a dir we have to move
+		 * the last block to the place we want to kill.
+		 */
+		if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
+				XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA,
+				0, args->firstblock, args->flist,
+				&done)) == ENOSPC) {
+			if (w != XFS_DATA_FORK)
+				goto done;
+			if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
+					&dead_buf)))
+				goto done;
+		} else if (error)
+			goto done;
+		else
+			break;
+	}
+	ASSERT(done);
+	xfs_da_binval(tp, dead_buf);
+	/*
+	 * Adjust the directory size for version 1.
+	 */
+	if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) {
+		if ((error = xfs_bmap_last_offset(tp, dp, &bno, w)))
+			return error;
+		size = XFS_FSB_TO_B(dp->i_mount, bno);
+		if (size != dp->i_d.di_size) {
+			dp->i_d.di_size = size;
+			xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+		}
+	}
+	return 0;
+done:
+	xfs_da_binval(tp, dead_buf);
+	return error;
+}
+
+/*
+ * See if the mapping(s) for this btree block are valid, i.e.
+ * don't contain holes, are logically contiguous, and cover the whole range.
+ */
+STATIC int
+xfs_da_map_covers_blocks(
+	int		nmap,
+	xfs_bmbt_irec_t	*mapp,
+	xfs_dablk_t	bno,
+	int		count)
+{
+	int		i;
+	xfs_fileoff_t	off;
+
+	for (i = 0, off = bno; i < nmap; i++) {
+		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
+		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
+			return 0;
+		}
+		if (off != mapp[i].br_startoff) {
+			return 0;
+		}
+		off += mapp[i].br_blockcount;
+	}
+	return off == bno + count;
+}
+
+/*
+ * Make a dabuf.
+ * Used for get_buf, read_buf, read_bufr, and reada_buf.
+ */
+STATIC int
+xfs_da_do_buf(
+	xfs_trans_t	*trans,
+	xfs_inode_t	*dp,
+	xfs_dablk_t	bno,
+	xfs_daddr_t	*mappedbnop,
+	xfs_dabuf_t	**bpp,
+	int		whichfork,
+	int		caller,
+	inst_t		*ra)
+{
+	xfs_buf_t	*bp = NULL;
+	xfs_buf_t	**bplist;
+	int		error=0;
+	int		i;
+	xfs_bmbt_irec_t	map;
+	xfs_bmbt_irec_t	*mapp;
+	xfs_daddr_t	mappedbno;
+	xfs_mount_t	*mp;
+	int		nbplist=0;
+	int		nfsb;
+	int		nmap;
+	xfs_dabuf_t	*rbp;
+
+	mp = dp->i_mount;
+	if (whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+		nfsb = mp->m_dirblkfsbs;
+	else
+		nfsb = 1;
+	mappedbno = *mappedbnop;
+	/*
+	 * Caller doesn't have a mapping.  -2 means don't complain
+	 * if we land in a hole.
+	 */
+	if (mappedbno == -1 || mappedbno == -2) {
+		/*
+		 * Optimize the one-block case.
+		 */
+		if (nfsb == 1) {
+			xfs_fsblock_t	fsb;
+
+			if ((error =
+			    xfs_bmapi_single(trans, dp, whichfork, &fsb,
+				    (xfs_fileoff_t)bno))) {
+				return error;
+			}
+			mapp = &map;
+			if (fsb == NULLFSBLOCK) {
+				nmap = 0;
+			} else {
+				map.br_startblock = fsb;
+				map.br_startoff = (xfs_fileoff_t)bno;
+				map.br_blockcount = 1;
+				nmap = 1;
+			}
+		} else {
+			mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
+			nmap = nfsb;
+			if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
+					nfsb,
+					XFS_BMAPI_METADATA |
+						XFS_BMAPI_AFLAG(whichfork),
+					NULL, 0, mapp, &nmap, NULL)))
+				goto exit0;
+		}
+	} else {
+		map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
+		map.br_startoff = (xfs_fileoff_t)bno;
+		map.br_blockcount = nfsb;
+		mapp = &map;
+		nmap = 1;
+	}
+	if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
+		error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
+		if (unlikely(error == EFSCORRUPTED)) {
+			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
+				int	i;
+				cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
+					(long long)bno);
+				cmn_err(CE_ALERT, "dir: inode %lld\n",
+					(long long)dp->i_ino);
+				for (i = 0; i < nmap; i++) {
+					cmn_err(CE_ALERT,
+						"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n",
+						i,
+						(long long)mapp[i].br_startoff,
+						(long long)mapp[i].br_startblock,
+						(long long)mapp[i].br_blockcount,
+						mapp[i].br_state);
+				}
+			}
+			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
+					 XFS_ERRLEVEL_LOW, mp);
+		}
+		goto exit0;
+	}
+	if (caller != 3 && nmap > 1) {
+		bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
+		nbplist = 0;
+	} else
+		bplist = NULL;
+	/*
+	 * Turn the mapping(s) into buffer(s).
+	 */
+	for (i = 0; i < nmap; i++) {
+		int	nmapped;
+
+		mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
+		if (i == 0)
+			*mappedbnop = mappedbno;
+		nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
+		switch (caller) {
+		case 0:
+			bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
+				mappedbno, nmapped, 0);
+			error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
+			break;
+		case 1:
+#ifndef __KERNEL__
+		case 2:
+#endif
+			bp = NULL;
+			error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
+				mappedbno, nmapped, 0, &bp);
+			break;
+#ifdef __KERNEL__
+		case 3:
+			xfs_baread(mp->m_ddev_targp, mappedbno, nmapped);
+			error = 0;
+			bp = NULL;
+			break;
+#endif
+		}
+		if (error) {
+			if (bp)
+				xfs_trans_brelse(trans, bp);
+			goto exit1;
+		}
+		if (!bp)
+			continue;
+		if (caller == 1) {
+			if (whichfork == XFS_ATTR_FORK) {
+				XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
+						XFS_ATTR_BTREE_REF);
+			} else {
+				XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
+						XFS_DIR_BTREE_REF);
+			}
+		}
+		if (bplist) {
+			bplist[nbplist++] = bp;
+		}
+	}
+	/*
+	 * Build a dabuf structure.
+	 */
+	if (bplist) {
+		rbp = xfs_da_buf_make(nbplist, bplist, ra);
+	} else if (bp)
+		rbp = xfs_da_buf_make(1, &bp, ra);
+	else
+		rbp = NULL;
+	/*
+	 * For read_buf, check the magic number.
+	 */
+	if (caller == 1) {
+		xfs_dir2_data_t		*data;
+		xfs_dir2_free_t		*free;
+		xfs_da_blkinfo_t	*info;
+		uint			magic, magic1;
+
+		info = rbp->data;
+		data = rbp->data;
+		free = rbp->data;
+		magic = INT_GET(info->magic, ARCH_CONVERT);
+		magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT);
+		if (unlikely(
+		    XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
+				   (magic != XFS_DIR_LEAF_MAGIC) &&
+				   (magic != XFS_ATTR_LEAF_MAGIC) &&
+				   (magic != XFS_DIR2_LEAF1_MAGIC) &&
+				   (magic != XFS_DIR2_LEAFN_MAGIC) &&
+				   (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
+				   (magic1 != XFS_DIR2_DATA_MAGIC) &&
+				   (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC),
+				mp, XFS_ERRTAG_DA_READ_BUF,
+				XFS_RANDOM_DA_READ_BUF))) {
+			xfs_buftrace("DA READ ERROR", rbp->bps[0]);
+			XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
+					     XFS_ERRLEVEL_LOW, mp, info);
+			error = XFS_ERROR(EFSCORRUPTED);
+			xfs_da_brelse(trans, rbp);
+			nbplist = 0;
+			goto exit1;
+		}
+	}
+	if (bplist) {
+		kmem_free(bplist, sizeof(*bplist) * nmap);
+	}
+	if (mapp != &map) {
+		kmem_free(mapp, sizeof(*mapp) * nfsb);
+	}
+	if (bpp)
+		*bpp = rbp;
+	return 0;
+exit1:
+	if (bplist) {
+		for (i = 0; i < nbplist; i++)
+			xfs_trans_brelse(trans, bplist[i]);
+		kmem_free(bplist, sizeof(*bplist) * nmap);
+	}
+exit0:
+	if (mapp != &map)
+		kmem_free(mapp, sizeof(*mapp) * nfsb);
+	if (bpp)
+		*bpp = NULL;
+	return error;
+}
+
+/*
+ * Get a buffer for the dir/attr block.
+ */
+int
+xfs_da_get_buf(
+	xfs_trans_t	*trans,
+	xfs_inode_t	*dp,
+	xfs_dablk_t	bno,
+	xfs_daddr_t		mappedbno,
+	xfs_dabuf_t	**bpp,
+	int		whichfork)
+{
+	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0,
+						 (inst_t *)__return_address);
+}
+
+/*
+ * Get a buffer for the dir/attr block, fill in the contents.
+ */
+int
+xfs_da_read_buf(
+	xfs_trans_t	*trans,
+	xfs_inode_t	*dp,
+	xfs_dablk_t	bno,
+	xfs_daddr_t		mappedbno,
+	xfs_dabuf_t	**bpp,
+	int		whichfork)
+{
+	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1,
+		(inst_t *)__return_address);
+}
+
+/*
+ * Readahead the dir/attr block.
+ */
+xfs_daddr_t
+xfs_da_reada_buf(
+	xfs_trans_t	*trans,
+	xfs_inode_t	*dp,
+	xfs_dablk_t	bno,
+	int		whichfork)
+{
+	xfs_daddr_t		rval;
+
+	rval = -1;
+	if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3,
+			(inst_t *)__return_address))
+		return -1;
+	else
+		return rval;
+}
+
+/*
+ * Calculate the number of bits needed to hold i different values.
+ */
+uint
+xfs_da_log2_roundup(uint i)
+{
+	uint rval;
+
+	for (rval = 0; rval < NBBY * sizeof(i); rval++) {
+		if ((1 << rval) >= i)
+			break;
+	}
+	return(rval);
+}
+
+kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
+kmem_zone_t *xfs_dabuf_zone;		/* dabuf zone */
+
+/*
+ * Allocate a dir-state structure.
+ * We don't put them on the stack since they're large.
+ */
+xfs_da_state_t *
+xfs_da_state_alloc(void)
+{
+	return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP);
+}
+
+/*
+ * Kill the altpath contents of a da-state structure.
+ */
+void
+xfs_da_state_kill_altpath(xfs_da_state_t *state)
+{
+	int	i;
+
+	for (i = 0; i < state->altpath.active; i++) {
+		if (state->altpath.blk[i].bp) {
+			if (state->altpath.blk[i].bp != state->path.blk[i].bp)
+				xfs_da_buf_done(state->altpath.blk[i].bp);
+			state->altpath.blk[i].bp = NULL;
+		}
+	}
+	state->altpath.active = 0;
+}
+
+/*
+ * Free a da-state structure.
+ */
+void
+xfs_da_state_free(xfs_da_state_t *state)
+{
+	int	i;
+
+	xfs_da_state_kill_altpath(state);
+	for (i = 0; i < state->path.active; i++) {
+		if (state->path.blk[i].bp)
+			xfs_da_buf_done(state->path.blk[i].bp);
+	}
+	if (state->extravalid && state->extrablk.bp)
+		xfs_da_buf_done(state->extrablk.bp);
+#ifdef DEBUG
+	memset((char *)state, 0, sizeof(*state));
+#endif /* DEBUG */
+	kmem_zone_free(xfs_da_state_zone, state);
+}
+
+#ifdef XFS_DABUF_DEBUG
+xfs_dabuf_t	*xfs_dabuf_global_list;
+lock_t		xfs_dabuf_global_lock;
+#endif
+
+/*
+ * Create a dabuf.
+ */
+/* ARGSUSED */
+STATIC xfs_dabuf_t *
+xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
+{
+	xfs_buf_t	*bp;
+	xfs_dabuf_t	*dabuf;
+	int		i;
+	int		off;
+
+	if (nbuf == 1)
+		dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP);
+	else
+		dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP);
+	dabuf->dirty = 0;
+#ifdef XFS_DABUF_DEBUG
+	dabuf->ra = ra;
+	dabuf->target = XFS_BUF_TARGET(bps[0]);
+	dabuf->blkno = XFS_BUF_ADDR(bps[0]);
+#endif
+	if (nbuf == 1) {
+		dabuf->nbuf = 1;
+		bp = bps[0];
+		dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
+		dabuf->data = XFS_BUF_PTR(bp);
+		dabuf->bps[0] = bp;
+	} else {
+		dabuf->nbuf = nbuf;
+		for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
+			dabuf->bps[i] = bp = bps[i];
+			dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
+		}
+		dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
+		for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
+			bp = bps[i];
+			memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
+				XFS_BUF_COUNT(bp));
+		}
+	}
+#ifdef XFS_DABUF_DEBUG
+	{
+		SPLDECL(s);
+		xfs_dabuf_t	*p;
+
+		s = mutex_spinlock(&xfs_dabuf_global_lock);
+		for (p = xfs_dabuf_global_list; p; p = p->next) {
+			ASSERT(p->blkno != dabuf->blkno ||
+			       p->target != dabuf->target);
+		}
+		dabuf->prev = NULL;
+		if (xfs_dabuf_global_list)
+			xfs_dabuf_global_list->prev = dabuf;
+		dabuf->next = xfs_dabuf_global_list;
+		xfs_dabuf_global_list = dabuf;
+		mutex_spinunlock(&xfs_dabuf_global_lock, s);
+	}
+#endif
+	return dabuf;
+}
+
+/*
+ * Un-dirty a dabuf.
+ */
+STATIC void
+xfs_da_buf_clean(xfs_dabuf_t *dabuf)
+{
+	xfs_buf_t	*bp;
+	int		i;
+	int		off;
+
+	if (dabuf->dirty) {
+		ASSERT(dabuf->nbuf > 1);
+		dabuf->dirty = 0;
+		for (i = off = 0; i < dabuf->nbuf;
+				i++, off += XFS_BUF_COUNT(bp)) {
+			bp = dabuf->bps[i];
+			memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
+				XFS_BUF_COUNT(bp));
+		}
+	}
+}
+
+/*
+ * Release a dabuf.
+ */
+void
+xfs_da_buf_done(xfs_dabuf_t *dabuf)
+{
+	ASSERT(dabuf);
+	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
+	if (dabuf->dirty)
+		xfs_da_buf_clean(dabuf);
+	if (dabuf->nbuf > 1)
+		kmem_free(dabuf->data, BBTOB(dabuf->bbcount));
+#ifdef XFS_DABUF_DEBUG
+	{
+		SPLDECL(s);
+
+		s = mutex_spinlock(&xfs_dabuf_global_lock);
+		if (dabuf->prev)
+			dabuf->prev->next = dabuf->next;
+		else
+			xfs_dabuf_global_list = dabuf->next;
+		if (dabuf->next)
+			dabuf->next->prev = dabuf->prev;
+		mutex_spinunlock(&xfs_dabuf_global_lock, s);
+	}
+	memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
+#endif
+	if (dabuf->nbuf == 1)
+		kmem_zone_free(xfs_dabuf_zone, dabuf);
+	else
+		kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf));
+}
+
+/*
+ * Log transaction from a dabuf.
+ */
+void
+xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
+{
+	xfs_buf_t	*bp;
+	uint		f;
+	int		i;
+	uint		l;
+	int		off;
+
+	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
+	if (dabuf->nbuf == 1) {
+		ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0]));
+		xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
+		return;
+	}
+	dabuf->dirty = 1;
+	ASSERT(first <= last);
+	for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
+		bp = dabuf->bps[i];
+		f = off;
+		l = f + XFS_BUF_COUNT(bp) - 1;
+		if (f < first)
+			f = first;
+		if (l > last)
+			l = last;
+		if (f <= l)
+			xfs_trans_log_buf(tp, bp, f - off, l - off);
+		/*
+		 * B_DONE is set by xfs_trans_log buf.
+		 * If we don't set it on a new buffer (get not read)
+		 * then if we don't put anything in the buffer it won't
+		 * be set, and at commit it it released into the cache,
+		 * and then a read will fail.
+		 */
+		else if (!(XFS_BUF_ISDONE(bp)))
+		  XFS_BUF_DONE(bp);
+	}
+	ASSERT(last < off);
+}
+
+/*
+ * Release dabuf from a transaction.
+ * Have to free up the dabuf before the buffers are released,
+ * since the synchronization on the dabuf is really the lock on the buffer.
+ */
+void
+xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
+{
+	xfs_buf_t	*bp;
+	xfs_buf_t	**bplist;
+	int		i;
+	int		nbuf;
+
+	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
+	if ((nbuf = dabuf->nbuf) == 1) {
+		bplist = &bp;
+		bp = dabuf->bps[0];
+	} else {
+		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
+		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
+	}
+	xfs_da_buf_done(dabuf);
+	for (i = 0; i < nbuf; i++)
+		xfs_trans_brelse(tp, bplist[i]);
+	if (bplist != &bp)
+		kmem_free(bplist, nbuf * sizeof(*bplist));
+}
+
+/*
+ * Invalidate dabuf from a transaction.
+ */
+void
+xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
+{
+	xfs_buf_t	*bp;
+	xfs_buf_t	**bplist;
+	int		i;
+	int		nbuf;
+
+	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
+	if ((nbuf = dabuf->nbuf) == 1) {
+		bplist = &bp;
+		bp = dabuf->bps[0];
+	} else {
+		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
+		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
+	}
+	xfs_da_buf_done(dabuf);
+	for (i = 0; i < nbuf; i++)
+		xfs_trans_binval(tp, bplist[i]);
+	if (bplist != &bp)
+		kmem_free(bplist, nbuf * sizeof(*bplist));
+}
+
+/*
+ * Get the first daddr from a dabuf.
+ */
+xfs_daddr_t
+xfs_da_blkno(xfs_dabuf_t *dabuf)
+{
+	ASSERT(dabuf->nbuf);
+	ASSERT(dabuf->data);
+	return XFS_BUF_ADDR(dabuf->bps[0]);
+}
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
new file mode 100644
index 0000000..9fc699d
--- /dev/null
+++ b/fs/xfs/xfs_da_btree.h
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DA_BTREE_H__
+#define	__XFS_DA_BTREE_H__
+
+struct xfs_buf;
+struct xfs_bmap_free;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+struct zone;
+
+/*========================================================================
+ * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
+ *
+ * Is is used to manage a doubly linked list of all blocks at the same
+ * level in the Btree, and to identify which type of block this is.
+ */
+#define XFS_DA_NODE_MAGIC	0xfebe	/* magic number: non-leaf blocks */
+#define XFS_DIR_LEAF_MAGIC	0xfeeb	/* magic number: directory leaf blks */
+#define XFS_ATTR_LEAF_MAGIC	0xfbee	/* magic number: attribute leaf blks */
+#define	XFS_DIR2_LEAF1_MAGIC	0xd2f1	/* magic number: v2 dirlf single blks */
+#define	XFS_DIR2_LEAFN_MAGIC	0xd2ff	/* magic number: v2 dirlf multi blks */
+
+#define	XFS_DIRX_LEAF_MAGIC(mp)	\
+	(XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC)
+
+typedef struct xfs_da_blkinfo {
+	xfs_dablk_t forw;			/* previous block in list */
+	xfs_dablk_t back;			/* following block in list */
+	__uint16_t magic;			/* validity check on block */
+	__uint16_t pad;				/* unused */
+} xfs_da_blkinfo_t;
+
+/*
+ * This is the structure of the root and intermediate nodes in the Btree.
+ * The leaf nodes are defined above.
+ *
+ * Entries are not packed.
+ *
+ * Since we have duplicate keys, use a binary search but always follow
+ * all match in the block, not just the first match found.
+ */
+#define	XFS_DA_NODE_MAXDEPTH	5	/* max depth of Btree */
+
+typedef struct xfs_da_intnode {
+	struct xfs_da_node_hdr {	/* constant-structure header block */
+		xfs_da_blkinfo_t info;	/* block type, links, etc. */
+		__uint16_t count;	/* count of active entries */
+		__uint16_t level;	/* level above leaves (leaf == 0) */
+	} hdr;
+	struct xfs_da_node_entry {
+		xfs_dahash_t hashval;	/* hash value for this descendant */
+		xfs_dablk_t before;	/* Btree block before this key */
+	} btree[1];			/* variable sized array of keys */
+} xfs_da_intnode_t;
+typedef struct xfs_da_node_hdr xfs_da_node_hdr_t;
+typedef struct xfs_da_node_entry xfs_da_node_entry_t;
+
+#define XFS_DA_MAXHASH	((xfs_dahash_t)-1) /* largest valid hash value */
+
+/*
+ * Macros used by directory code to interface to the filesystem.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBSIZE)
+int xfs_lbsize(struct xfs_mount *mp);
+#define	XFS_LBSIZE(mp)			xfs_lbsize(mp)
+#else
+#define	XFS_LBSIZE(mp)	((mp)->m_sb.sb_blocksize)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBLOG)
+int xfs_lblog(struct xfs_mount *mp);
+#define	XFS_LBLOG(mp)			xfs_lblog(mp)
+#else
+#define	XFS_LBLOG(mp)	((mp)->m_sb.sb_blocklog)
+#endif
+
+/*
+ * Macros used by directory code to interface to the kernel
+ */
+
+/*
+ * Macros used to manipulate directory off_t's
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_BNOENTRY)
+__uint32_t xfs_da_make_bnoentry(struct xfs_mount *mp, xfs_dablk_t bno,
+				int entry);
+#define	XFS_DA_MAKE_BNOENTRY(mp,bno,entry)	\
+	xfs_da_make_bnoentry(mp,bno,entry)
+#else
+#define	XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \
+	(((bno) << (mp)->m_dircook_elog) | (entry))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_COOKIE)
+xfs_off_t xfs_da_make_cookie(struct xfs_mount *mp, xfs_dablk_t bno, int entry,
+				xfs_dahash_t hash);
+#define	XFS_DA_MAKE_COOKIE(mp,bno,entry,hash)	\
+	xfs_da_make_cookie(mp,bno,entry,hash)
+#else
+#define	XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \
+	(((xfs_off_t)XFS_DA_MAKE_BNOENTRY(mp, bno, entry) << 32) | (hash))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_HASH)
+xfs_dahash_t xfs_da_cookie_hash(struct xfs_mount *mp, xfs_off_t cookie);
+#define	XFS_DA_COOKIE_HASH(mp,cookie)		xfs_da_cookie_hash(mp,cookie)
+#else
+#define	XFS_DA_COOKIE_HASH(mp,cookie)	((xfs_dahash_t)(cookie))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_BNO)
+xfs_dablk_t xfs_da_cookie_bno(struct xfs_mount *mp, xfs_off_t cookie);
+#define	XFS_DA_COOKIE_BNO(mp,cookie)		xfs_da_cookie_bno(mp,cookie)
+#else
+#define	XFS_DA_COOKIE_BNO(mp,cookie) \
+	(((xfs_off_t)(cookie) >> 31) == -1LL ? \
+		(xfs_dablk_t)0 : \
+		(xfs_dablk_t)((xfs_off_t)(cookie) >> ((mp)->m_dircook_elog + 32)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_ENTRY)
+int xfs_da_cookie_entry(struct xfs_mount *mp, xfs_off_t cookie);
+#define	XFS_DA_COOKIE_ENTRY(mp,cookie)		xfs_da_cookie_entry(mp,cookie)
+#else
+#define	XFS_DA_COOKIE_ENTRY(mp,cookie) \
+	(((xfs_off_t)(cookie) >> 31) == -1LL ? \
+		(xfs_dablk_t)0 : \
+		(xfs_dablk_t)(((xfs_off_t)(cookie) >> 32) & \
+			      ((1 << (mp)->m_dircook_elog) - 1)))
+#endif
+
+
+/*========================================================================
+ * Btree searching and modification structure definitions.
+ *========================================================================*/
+
+/*
+ * Structure to ease passing around component names.
+ */
+typedef struct xfs_da_args {
+	uchar_t		*name;		/* string (maybe not NULL terminated) */
+	int		namelen;	/* length of string (maybe no NULL) */
+	uchar_t		*value;		/* set of bytes (maybe contain NULLs) */
+	int		valuelen;	/* length of value */
+	int		flags;		/* argument flags (eg: ATTR_NOCREATE) */
+	xfs_dahash_t	hashval;	/* hash value of name */
+	xfs_ino_t	inumber;	/* input/output inode number */
+	struct xfs_inode *dp;		/* directory inode to manipulate */
+	xfs_fsblock_t	*firstblock;	/* ptr to firstblock for bmap calls */
+	struct xfs_bmap_free *flist;	/* ptr to freelist for bmap_finish */
+	struct xfs_trans *trans;	/* current trans (changes over time) */
+	xfs_extlen_t	total;		/* total blocks needed, for 1st bmap */
+	int		whichfork;	/* data or attribute fork */
+	xfs_dablk_t	blkno;		/* blkno of attr leaf of interest */
+	int		index;		/* index of attr of interest in blk */
+	xfs_dablk_t	rmtblkno;	/* remote attr value starting blkno */
+	int		rmtblkcnt;	/* remote attr value block count */
+	xfs_dablk_t	blkno2;		/* blkno of 2nd attr leaf of interest */
+	int		index2;		/* index of 2nd attr in blk */
+	xfs_dablk_t	rmtblkno2;	/* remote attr value starting blkno */
+	int		rmtblkcnt2;	/* remote attr value block count */
+	unsigned char	justcheck;	/* T/F: check for ok with no space */
+	unsigned char	rename;		/* T/F: this is an atomic rename op */
+	unsigned char	addname;	/* T/F: this is an add operation */
+	unsigned char	oknoent;	/* T/F: ok to return ENOENT, else die */
+} xfs_da_args_t;
+
+/*
+ * Structure to describe buffer(s) for a block.
+ * This is needed in the directory version 2 format case, when
+ * multiple non-contiguous fsblocks might be needed to cover one
+ * logical directory block.
+ * If the buffer count is 1 then the data pointer points to the
+ * same place as the b_addr field for the buffer, else to kmem_alloced memory.
+ */
+typedef struct xfs_dabuf {
+	int		nbuf;		/* number of buffer pointers present */
+	short		dirty;		/* data needs to be copied back */
+	short		bbcount;	/* how large is data in bbs */
+	void		*data;		/* pointer for buffers' data */
+#ifdef XFS_DABUF_DEBUG
+	inst_t		*ra;		/* return address of caller to make */
+	struct xfs_dabuf *next;		/* next in global chain */
+	struct xfs_dabuf *prev;		/* previous in global chain */
+	struct xfs_buftarg *target;	/* device for buffer */
+	xfs_daddr_t	blkno;		/* daddr first in bps[0] */
+#endif
+	struct xfs_buf	*bps[1];	/* actually nbuf of these */
+} xfs_dabuf_t;
+#define	XFS_DA_BUF_SIZE(n)	\
+	(sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
+
+#ifdef XFS_DABUF_DEBUG
+extern xfs_dabuf_t	*xfs_dabuf_global_list;
+#endif
+
+/*
+ * Storage for holding state during Btree searches and split/join ops.
+ *
+ * Only need space for 5 intermediate nodes.  With a minimum of 62-way
+ * fanout to the Btree, we can support over 900 million directory blocks,
+ * which is slightly more than enough.
+ */
+typedef struct xfs_da_state_blk {
+	xfs_dabuf_t	*bp;		/* buffer containing block */
+	xfs_dablk_t	blkno;		/* filesystem blkno of buffer */
+	xfs_daddr_t	disk_blkno;	/* on-disk blkno (in BBs) of buffer */
+	int		index;		/* relevant index into block */
+	xfs_dahash_t	hashval;	/* last hash value in block */
+	int		magic;		/* blk's magic number, ie: blk type */
+} xfs_da_state_blk_t;
+
+typedef struct xfs_da_state_path {
+	int			active;		/* number of active levels */
+	xfs_da_state_blk_t	blk[XFS_DA_NODE_MAXDEPTH];
+} xfs_da_state_path_t;
+
+typedef struct xfs_da_state {
+	xfs_da_args_t		*args;		/* filename arguments */
+	struct xfs_mount	*mp;		/* filesystem mount point */
+	unsigned int		blocksize;	/* logical block size */
+	unsigned int		node_ents;	/* how many entries in danode */
+	xfs_da_state_path_t	path;		/* search/split paths */
+	xfs_da_state_path_t	altpath;	/* alternate path for join */
+	unsigned char		inleaf;		/* insert into 1->lf, 0->splf */
+	unsigned char		extravalid;	/* T/F: extrablk is in use */
+	unsigned char		extraafter;	/* T/F: extrablk is after new */
+	xfs_da_state_blk_t	extrablk;	/* for double-splits on leafs */
+						/* for dirv2 extrablk is data */
+} xfs_da_state_t;
+
+/*
+ * Utility macros to aid in logging changed structure fields.
+ */
+#define XFS_DA_LOGOFF(BASE, ADDR)	((char *)(ADDR) - (char *)(BASE))
+#define XFS_DA_LOGRANGE(BASE, ADDR, SIZE)	\
+		(uint)(XFS_DA_LOGOFF(BASE, ADDR)), \
+		(uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1)
+
+
+#ifdef __KERNEL__
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Routines used for growing the Btree.
+ */
+int	xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
+					 xfs_dabuf_t **bpp, int whichfork);
+int	xfs_da_split(xfs_da_state_t *state);
+
+/*
+ * Routines used for shrinking the Btree.
+ */
+int	xfs_da_join(xfs_da_state_t *state);
+void	xfs_da_fixhashpath(xfs_da_state_t *state,
+					  xfs_da_state_path_t *path_to_to_fix);
+
+/*
+ * Routines used for finding things in the Btree.
+ */
+int	xfs_da_node_lookup_int(xfs_da_state_t *state, int *result);
+int	xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
+					 int forward, int release, int *result);
+/*
+ * Utility routines.
+ */
+int	xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
+					 xfs_da_state_blk_t *save_blk);
+int	xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
+				       xfs_da_state_blk_t *new_blk);
+
+/*
+ * Utility routines.
+ */
+int	xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno);
+int	xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
+			      xfs_dablk_t bno, xfs_daddr_t mappedbno,
+			      xfs_dabuf_t **bp, int whichfork);
+int	xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
+			       xfs_dablk_t bno, xfs_daddr_t mappedbno,
+			       xfs_dabuf_t **bpp, int whichfork);
+xfs_daddr_t	xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
+			xfs_dablk_t bno, int whichfork);
+int	xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
+					  xfs_dabuf_t *dead_buf);
+
+uint xfs_da_hashname(uchar_t *name_string, int name_length);
+uint xfs_da_log2_roundup(uint i);
+xfs_da_state_t *xfs_da_state_alloc(void);
+void xfs_da_state_free(xfs_da_state_t *state);
+void xfs_da_state_kill_altpath(xfs_da_state_t *state);
+
+void xfs_da_buf_done(xfs_dabuf_t *dabuf);
+void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first,
+			   uint last);
+void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
+void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
+xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
+
+extern struct kmem_zone *xfs_da_state_zone;
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
new file mode 100644
index 0000000..08d551a
--- /dev/null
+++ b/fs/xfs/xfs_dfrag.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_ag.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_ialloc.h"
+#include "xfs_itable.h"
+#include "xfs_dfrag.h"
+#include "xfs_error.h"
+#include "xfs_mac.h"
+#include "xfs_rw.h"
+
+/*
+ * Syssgi interface for swapext
+ */
+int
+xfs_swapext(
+	xfs_swapext_t	__user *sxp)
+{
+	xfs_swapext_t	sx;
+	xfs_inode_t     *ip=NULL, *tip=NULL, *ips[2];
+	xfs_trans_t     *tp;
+	xfs_mount_t     *mp;
+	xfs_bstat_t	*sbp;
+	struct file	*fp = NULL, *tfp = NULL;
+	vnode_t		*vp, *tvp;
+	bhv_desc_t      *bdp, *tbdp;
+	vn_bhv_head_t   *bhp, *tbhp;
+	uint		lock_flags=0;
+	int		ilf_fields, tilf_fields;
+	int		error = 0;
+	xfs_ifork_t	tempif, *ifp, *tifp;
+	__uint64_t	tmp;
+	int		aforkblks = 0;
+	int		taforkblks = 0;
+	int		locked = 0;
+
+	if (copy_from_user(&sx, sxp, sizeof(sx)))
+		return XFS_ERROR(EFAULT);
+
+	/* Pull information for the target fd */
+	if (((fp = fget((int)sx.sx_fdtarget)) == NULL) ||
+	    ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL))  {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	bhp = VN_BHV_HEAD(vp);
+	bdp = vn_bhv_lookup(bhp, &xfs_vnodeops);
+	if (bdp == NULL) {
+		error = XFS_ERROR(EBADF);
+		goto error0;
+	} else {
+		ip = XFS_BHVTOI(bdp);
+	}
+
+	if (((tfp = fget((int)sx.sx_fdtmp)) == NULL) ||
+	    ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	tbhp = VN_BHV_HEAD(tvp);
+	tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops);
+	if (tbdp == NULL) {
+		error = XFS_ERROR(EBADF);
+		goto error0;
+	} else {
+		tip = XFS_BHVTOI(tbdp);
+	}
+
+	if (ip->i_mount != tip->i_mount) {
+		error =  XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	if (ip->i_ino == tip->i_ino) {
+		error =  XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	mp = ip->i_mount;
+
+	sbp = &sx.sx_stat;
+
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		error =  XFS_ERROR(EIO);
+		goto error0;
+	}
+
+	locked = 1;
+
+	/* Lock in i_ino order */
+	if (ip->i_ino < tip->i_ino) {
+		ips[0] = ip;
+		ips[1] = tip;
+	} else {
+		ips[0] = tip;
+		ips[1] = ip;
+	}
+	lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
+	xfs_lock_inodes(ips, 2, 0, lock_flags);
+
+	/* Check permissions */
+	error = xfs_iaccess(ip, S_IWUSR, NULL);
+	if (error)
+		goto error0;
+
+	error = xfs_iaccess(tip, S_IWUSR, NULL);
+	if (error)
+		goto error0;
+
+	/* Verify that both files have the same format */
+	if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	/* Verify both files are either real-time or non-realtime */
+	if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) !=
+	    (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	/* Should never get a local format */
+	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	if (VN_CACHED(tvp) != 0)
+		xfs_inval_cached_pages(XFS_ITOV(tip), &(tip->i_iocore),
+						(loff_t)0, 0, 0);
+
+	/* Verify O_DIRECT for ftmp */
+	if (VN_CACHED(tvp) != 0) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	/* Verify all data are being swapped */
+	if (sx.sx_offset != 0 ||
+	    sx.sx_length != ip->i_d.di_size ||
+	    sx.sx_length != tip->i_d.di_size) {
+		error = XFS_ERROR(EFAULT);
+		goto error0;
+	}
+
+	/*
+	 * If the target has extended attributes, the tmp file
+	 * must also in order to ensure the correct data fork
+	 * format.
+	 */
+	if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
+		error = XFS_ERROR(EINVAL);
+		goto error0;
+	}
+
+	/*
+	 * Compare the current change & modify times with that
+	 * passed in.  If they differ, we abort this swap.
+	 * This is the mechanism used to ensure the calling
+	 * process that the file was not changed out from
+	 * under it.
+	 */
+	if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) ||
+	    (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) ||
+	    (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) ||
+	    (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) {
+		error = XFS_ERROR(EBUSY);
+		goto error0;
+	}
+
+	/* We need to fail if the file is memory mapped.  Once we have tossed
+	 * all existing pages, the page fault will have no option
+	 * but to go to the filesystem for pages. By making the page fault call
+	 * VOP_READ (or write in the case of autogrow) they block on the iolock
+	 * until we have switched the extents.
+	 */
+	if (VN_MAPPED(vp)) {
+		error = XFS_ERROR(EBUSY);
+		goto error0;
+	}
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	xfs_iunlock(tip, XFS_ILOCK_EXCL);
+
+	/*
+	 * There is a race condition here since we gave up the
+	 * ilock.  However, the data fork will not change since
+	 * we have the iolock (locked for truncation too) so we
+	 * are safe.  We don't really care if non-io related
+	 * fields change.
+	 */
+
+	VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
+	if ((error = xfs_trans_reserve(tp, 0,
+				     XFS_ICHANGE_LOG_RES(mp), 0,
+				     0, 0))) {
+		xfs_iunlock(ip,  XFS_IOLOCK_EXCL);
+		xfs_iunlock(tip, XFS_IOLOCK_EXCL);
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+	xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+
+	/*
+	 * Count the number of extended attribute blocks
+	 */
+	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
+	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
+		if (error) {
+			xfs_iunlock(ip,  lock_flags);
+			xfs_iunlock(tip, lock_flags);
+			xfs_trans_cancel(tp, 0);
+			return error;
+		}
+	}
+	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
+	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
+			&taforkblks);
+		if (error) {
+			xfs_iunlock(ip,  lock_flags);
+			xfs_iunlock(tip, lock_flags);
+			xfs_trans_cancel(tp, 0);
+			return error;
+		}
+	}
+
+	/*
+	 * Swap the data forks of the inodes
+	 */
+	ifp = &ip->i_df;
+	tifp = &tip->i_df;
+	tempif = *ifp;	/* struct copy */
+	*ifp = *tifp;	/* struct copy */
+	*tifp = tempif;	/* struct copy */
+
+	/*
+	 * Fix the on-disk inode values
+	 */
+	tmp = (__uint64_t)ip->i_d.di_nblocks;
+	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
+	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
+
+	tmp = (__uint64_t) ip->i_d.di_nextents;
+	ip->i_d.di_nextents = tip->i_d.di_nextents;
+	tip->i_d.di_nextents = tmp;
+
+	tmp = (__uint64_t) ip->i_d.di_format;
+	ip->i_d.di_format = tip->i_d.di_format;
+	tip->i_d.di_format = tmp;
+
+	ilf_fields = XFS_ILOG_CORE;
+
+	switch(ip->i_d.di_format) {
+	case XFS_DINODE_FMT_EXTENTS:
+		/* If the extents fit in the inode, fix the
+		 * pointer.  Otherwise it's already NULL or
+		 * pointing to the extent.
+		 */
+		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+			ifp->if_u1.if_extents =
+				ifp->if_u2.if_inline_ext;
+		}
+		ilf_fields |= XFS_ILOG_DEXT;
+		break;
+	case XFS_DINODE_FMT_BTREE:
+		ilf_fields |= XFS_ILOG_DBROOT;
+		break;
+	}
+
+	tilf_fields = XFS_ILOG_CORE;
+
+	switch(tip->i_d.di_format) {
+	case XFS_DINODE_FMT_EXTENTS:
+		/* If the extents fit in the inode, fix the
+		 * pointer.  Otherwise it's already NULL or
+		 * pointing to the extent.
+		 */
+		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+			tifp->if_u1.if_extents =
+				tifp->if_u2.if_inline_ext;
+		}
+		tilf_fields |= XFS_ILOG_DEXT;
+		break;
+	case XFS_DINODE_FMT_BTREE:
+		tilf_fields |= XFS_ILOG_DBROOT;
+		break;
+	}
+
+	/*
+	 * Increment vnode ref counts since xfs_trans_commit &
+	 * xfs_trans_cancel will both unlock the inodes and
+	 * decrement the associated ref counts.
+	 */
+	VN_HOLD(vp);
+	VN_HOLD(tvp);
+
+	xfs_trans_ijoin(tp, ip, lock_flags);
+	xfs_trans_ijoin(tp, tip, lock_flags);
+
+	xfs_trans_log_inode(tp, ip,  ilf_fields);
+	xfs_trans_log_inode(tp, tip, tilf_fields);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * transaction goes to disk before returning to the user.
+	 */
+	if (mp->m_flags & XFS_MOUNT_WSYNC) {
+		xfs_trans_set_sync(tp);
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT, NULL);
+
+	fput(fp);
+	fput(tfp);
+
+	return error;
+
+ error0:
+	if (locked) {
+		xfs_iunlock(ip,  lock_flags);
+		xfs_iunlock(tip, lock_flags);
+	}
+
+	if (fp != NULL) fput(fp);
+	if (tfp != NULL) fput(tfp);
+
+	return error;
+}
diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h
new file mode 100644
index 0000000..9048605
--- /dev/null
+++ b/fs/xfs/xfs_dfrag.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DFRAG_H__
+#define	__XFS_DFRAG_H__
+
+/*
+ * Structure passed to xfs_swapext
+ */
+
+typedef struct xfs_swapext
+{
+	__int64_t	sx_version;	/* version */
+	__int64_t	sx_fdtarget;	/* fd of target file */
+	__int64_t	sx_fdtmp;	/* fd of tmp file */
+	xfs_off_t	sx_offset;	/* offset into file */
+	xfs_off_t	sx_length;	/* leng from offset */
+	char		sx_pad[16];	/* pad space, unused */
+	xfs_bstat_t	sx_stat;	/* stat of target b4 copy */
+} xfs_swapext_t;
+
+/*
+ * Version flag
+ */
+#define XFS_SX_VERSION		0
+
+#ifdef __KERNEL__
+/*
+ * Prototypes for visible xfs_dfrag.c routines.
+ */
+
+/*
+ * Syscall interface for xfs_swapext
+ */
+int	xfs_swapext(struct xfs_swapext __user *sx);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_DFRAG_H__ */
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
new file mode 100644
index 0000000..f5c932b
--- /dev/null
+++ b/fs/xfs/xfs_dinode.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DINODE_H__
+#define	__XFS_DINODE_H__
+
+struct xfs_buf;
+struct xfs_mount;
+
+#define	XFS_DINODE_VERSION_1	1
+#define	XFS_DINODE_VERSION_2	2
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DINODE_GOOD_VERSION)
+int xfs_dinode_good_version(int v);
+#define XFS_DINODE_GOOD_VERSION(v)	xfs_dinode_good_version(v)
+#else
+#define XFS_DINODE_GOOD_VERSION(v)	(((v) == XFS_DINODE_VERSION_1) || \
+					 ((v) == XFS_DINODE_VERSION_2))
+#endif
+#define	XFS_DINODE_MAGIC	0x494e	/* 'IN' */
+
+/*
+ * Disk inode structure.
+ * This is just the header; the inode is expanded to fill a variable size
+ * with the last field expanding.  It is split into the core and "other"
+ * because we only need the core part in the in-core inode.
+ */
+typedef struct xfs_timestamp {
+	__int32_t	t_sec;		/* timestamp seconds */
+	__int32_t	t_nsec;		/* timestamp nanoseconds */
+} xfs_timestamp_t;
+
+/*
+ * Note: Coordinate changes to this structure with the XFS_DI_* #defines
+ * below and the offsets table in xfs_ialloc_log_di().
+ */
+typedef struct xfs_dinode_core
+{
+	__uint16_t	di_magic;	/* inode magic # = XFS_DINODE_MAGIC */
+	__uint16_t	di_mode;	/* mode and type of file */
+	__int8_t	di_version;	/* inode version */
+	__int8_t	di_format;	/* format of di_c data */
+	__uint16_t	di_onlink;	/* old number of links to file */
+	__uint32_t	di_uid;		/* owner's user id */
+	__uint32_t	di_gid;		/* owner's group id */
+	__uint32_t	di_nlink;	/* number of links to file */
+	__uint16_t	di_projid;	/* owner's project id */
+	__uint8_t	di_pad[8];	/* unused, zeroed space */
+	__uint16_t	di_flushiter;	/* incremented on flush */
+	xfs_timestamp_t	di_atime;	/* time last accessed */
+	xfs_timestamp_t	di_mtime;	/* time last modified */
+	xfs_timestamp_t	di_ctime;	/* time created/inode modified */
+	xfs_fsize_t	di_size;	/* number of bytes in file */
+	xfs_drfsbno_t	di_nblocks;	/* # of direct & btree blocks used */
+	xfs_extlen_t	di_extsize;	/* basic/minimum extent size for file */
+	xfs_extnum_t	di_nextents;	/* number of extents in data fork */
+	xfs_aextnum_t	di_anextents;	/* number of extents in attribute fork*/
+	__uint8_t	di_forkoff;	/* attr fork offs, <<3 for 64b align */
+	__int8_t	di_aformat;	/* format of attr fork's data */
+	__uint32_t	di_dmevmask;	/* DMIG event mask */
+	__uint16_t	di_dmstate;	/* DMIG state info */
+	__uint16_t	di_flags;	/* random flags, XFS_DIFLAG_... */
+	__uint32_t	di_gen;		/* generation number */
+} xfs_dinode_core_t;
+
+#define DI_MAX_FLUSH 0xffff
+
+typedef struct xfs_dinode
+{
+	xfs_dinode_core_t	di_core;
+	/*
+	 * In adding anything between the core and the union, be
+	 * sure to update the macros like XFS_LITINO below and
+	 * XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h.
+	 */
+	xfs_agino_t		di_next_unlinked;/* agi unlinked list ptr */
+	union {
+		xfs_bmdr_block_t di_bmbt;	/* btree root block */
+		xfs_bmbt_rec_32_t di_bmx[1];	/* extent list */
+		xfs_dir_shortform_t di_dirsf;	/* shortform directory */
+		xfs_dir2_sf_t	di_dir2sf;	/* shortform directory v2 */
+		char		di_c[1];	/* local contents */
+		xfs_dev_t	di_dev;		/* device for S_IFCHR/S_IFBLK */
+		uuid_t		di_muuid;	/* mount point value */
+		char		di_symlink[1];	/* local symbolic link */
+	}		di_u;
+	union {
+		xfs_bmdr_block_t di_abmbt;	/* btree root block */
+		xfs_bmbt_rec_32_t di_abmx[1];	/* extent list */
+		xfs_attr_shortform_t di_attrsf;	/* shortform attribute list */
+	}		di_a;
+} xfs_dinode_t;
+
+/*
+ * The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
+ * Since the pathconf interface is signed, we use 2^31 - 1 instead.
+ * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
+ */
+#define	XFS_MAXLINK		((1U << 31) - 1U)
+#define	XFS_MAXLINK_1		65535U
+
+/*
+ * Bit names for logging disk inodes only
+ */
+#define	XFS_DI_MAGIC		0x0000001
+#define	XFS_DI_MODE		0x0000002
+#define	XFS_DI_VERSION		0x0000004
+#define	XFS_DI_FORMAT		0x0000008
+#define	XFS_DI_ONLINK		0x0000010
+#define	XFS_DI_UID		0x0000020
+#define	XFS_DI_GID		0x0000040
+#define	XFS_DI_NLINK		0x0000080
+#define	XFS_DI_PROJID		0x0000100
+#define	XFS_DI_PAD		0x0000200
+#define	XFS_DI_ATIME		0x0000400
+#define	XFS_DI_MTIME		0x0000800
+#define	XFS_DI_CTIME		0x0001000
+#define	XFS_DI_SIZE		0x0002000
+#define	XFS_DI_NBLOCKS		0x0004000
+#define	XFS_DI_EXTSIZE		0x0008000
+#define	XFS_DI_NEXTENTS		0x0010000
+#define	XFS_DI_NAEXTENTS	0x0020000
+#define	XFS_DI_FORKOFF		0x0040000
+#define	XFS_DI_AFORMAT		0x0080000
+#define	XFS_DI_DMEVMASK		0x0100000
+#define	XFS_DI_DMSTATE		0x0200000
+#define	XFS_DI_FLAGS		0x0400000
+#define	XFS_DI_GEN		0x0800000
+#define	XFS_DI_NEXT_UNLINKED	0x1000000
+#define	XFS_DI_U		0x2000000
+#define	XFS_DI_A		0x4000000
+#define	XFS_DI_NUM_BITS		27
+#define	XFS_DI_ALL_BITS		((1 << XFS_DI_NUM_BITS) - 1)
+#define	XFS_DI_CORE_BITS	(XFS_DI_ALL_BITS & ~(XFS_DI_U|XFS_DI_A))
+
+/*
+ * Values for di_format
+ */
+typedef enum xfs_dinode_fmt
+{
+	XFS_DINODE_FMT_DEV,		/* CHR, BLK: di_dev */
+	XFS_DINODE_FMT_LOCAL,		/* DIR, REG: di_c */
+					/* LNK: di_symlink */
+	XFS_DINODE_FMT_EXTENTS,		/* DIR, REG, LNK: di_bmx */
+	XFS_DINODE_FMT_BTREE,		/* DIR, REG, LNK: di_bmbt */
+	XFS_DINODE_FMT_UUID		/* MNT: di_uuid */
+} xfs_dinode_fmt_t;
+
+/*
+ * Inode minimum and maximum sizes.
+ */
+#define	XFS_DINODE_MIN_LOG	8
+#define	XFS_DINODE_MAX_LOG	11
+#define	XFS_DINODE_MIN_SIZE	(1 << XFS_DINODE_MIN_LOG)
+#define	XFS_DINODE_MAX_SIZE	(1 << XFS_DINODE_MAX_LOG)
+
+/*
+ * Inode size for given fs.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LITINO)
+int xfs_litino(struct xfs_mount *mp);
+#define	XFS_LITINO(mp)		xfs_litino(mp)
+#else
+#define	XFS_LITINO(mp)	((mp)->m_litino)
+#endif
+#define	XFS_BROOT_SIZE_ADJ	\
+	(sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t))
+
+/*
+ * Fork identifiers.  Here so utilities can use them without including
+ * xfs_inode.h.
+ */
+#define	XFS_DATA_FORK	0
+#define	XFS_ATTR_FORK	1
+
+/*
+ * Inode data & attribute fork sizes, per inode.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_Q)
+int xfs_cfork_q_disk(xfs_dinode_core_t *dcp);
+int xfs_cfork_q(xfs_dinode_core_t *dcp);
+#define	XFS_CFORK_Q_DISK(dcp)               xfs_cfork_q_disk(dcp)
+#define	XFS_CFORK_Q(dcp)                    xfs_cfork_q(dcp)
+#else
+#define	XFS_CFORK_Q_DISK(dcp)		    ((dcp)->di_forkoff != 0)
+#define XFS_CFORK_Q(dcp)                    ((dcp)->di_forkoff != 0)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_BOFF)
+int xfs_cfork_boff_disk(xfs_dinode_core_t *dcp);
+int xfs_cfork_boff(xfs_dinode_core_t *dcp);
+#define	XFS_CFORK_BOFF_DISK(dcp)	    xfs_cfork_boff_disk(dcp)
+#define	XFS_CFORK_BOFF(dcp)	            xfs_cfork_boff(dcp)
+#else
+#define	XFS_CFORK_BOFF_DISK(dcp)	    ((int)(INT_GET((dcp)->di_forkoff, ARCH_CONVERT) << 3))
+#define XFS_CFORK_BOFF(dcp)                 ((int)((dcp)->di_forkoff << 3))
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_DSIZE)
+int xfs_cfork_dsize_disk(xfs_dinode_core_t *dcp, struct xfs_mount *mp);
+int xfs_cfork_dsize(xfs_dinode_core_t *dcp, struct xfs_mount *mp);
+#define	XFS_CFORK_DSIZE_DISK(dcp,mp)        xfs_cfork_dsize_disk(dcp,mp)
+#define	XFS_CFORK_DSIZE(dcp,mp)             xfs_cfork_dsize(dcp,mp)
+#else
+#define	XFS_CFORK_DSIZE_DISK(dcp,mp) \
+	(XFS_CFORK_Q_DISK(dcp) ? XFS_CFORK_BOFF_DISK(dcp) : XFS_LITINO(mp))
+#define XFS_CFORK_DSIZE(dcp,mp) \
+	(XFS_CFORK_Q(dcp) ? XFS_CFORK_BOFF(dcp) : XFS_LITINO(mp))
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_ASIZE)
+int xfs_cfork_asize_disk(xfs_dinode_core_t *dcp, struct xfs_mount *mp);
+int xfs_cfork_asize(xfs_dinode_core_t *dcp, struct xfs_mount *mp);
+#define	XFS_CFORK_ASIZE_DISK(dcp,mp)        xfs_cfork_asize_disk(dcp,mp)
+#define	XFS_CFORK_ASIZE(dcp,mp)             xfs_cfork_asize(dcp,mp)
+#else
+#define	XFS_CFORK_ASIZE_DISK(dcp,mp) \
+	(XFS_CFORK_Q_DISK(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF_DISK(dcp) : 0)
+#define XFS_CFORK_ASIZE(dcp,mp) \
+	(XFS_CFORK_Q(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF(dcp) : 0)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_SIZE)
+int xfs_cfork_size_disk(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w);
+int xfs_cfork_size(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w);
+#define	XFS_CFORK_SIZE_DISK(dcp,mp,w)       xfs_cfork_size_disk(dcp,mp,w)
+#define	XFS_CFORK_SIZE(dcp,mp,w)            xfs_cfork_size(dcp,mp,w)
+#else
+#define	XFS_CFORK_SIZE_DISK(dcp,mp,w) \
+	((w) == XFS_DATA_FORK ? \
+		XFS_CFORK_DSIZE_DISK(dcp, mp) : \
+	 	XFS_CFORK_ASIZE_DISK(dcp, mp))
+#define XFS_CFORK_SIZE(dcp,mp,w) \
+	((w) == XFS_DATA_FORK ? \
+		XFS_CFORK_DSIZE(dcp, mp) : XFS_CFORK_ASIZE(dcp, mp))
+
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DSIZE)
+int xfs_dfork_dsize(xfs_dinode_t *dip, struct xfs_mount *mp);
+#define	XFS_DFORK_DSIZE(dip,mp)             xfs_dfork_dsize(dip,mp)
+#else
+#define XFS_DFORK_DSIZE(dip,mp)             XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_ASIZE)
+int xfs_dfork_asize(xfs_dinode_t *dip, struct xfs_mount *mp);
+#define	XFS_DFORK_ASIZE(dip,mp)             xfs_dfork_asize(dip,mp)
+#else
+#define XFS_DFORK_ASIZE(dip,mp)             XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_SIZE)
+int xfs_dfork_size(xfs_dinode_t *dip, struct xfs_mount *mp, int w);
+#define	XFS_DFORK_SIZE(dip,mp,w)            xfs_dfork_size(dip,mp,w)
+#else
+#define	XFS_DFORK_SIZE(dip,mp,w)	    XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
+
+#endif
+
+/*
+ * Macros for accessing per-fork disk inode information.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_Q)
+int xfs_dfork_q(xfs_dinode_t *dip);
+#define	XFS_DFORK_Q(dip)	            xfs_dfork_q(dip)
+#else
+#define	XFS_DFORK_Q(dip)                    XFS_CFORK_Q_DISK(&(dip)->di_core)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_BOFF)
+int xfs_dfork_boff(xfs_dinode_t *dip);
+#define	XFS_DFORK_BOFF(dip)		    xfs_dfork_boff(dip)
+#else
+#define	XFS_DFORK_BOFF(dip)		    XFS_CFORK_BOFF_DISK(&(dip)->di_core)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DPTR)
+char *xfs_dfork_dptr(xfs_dinode_t *dip);
+#define	XFS_DFORK_DPTR(dip)	            xfs_dfork_dptr(dip)
+#else
+#define	XFS_DFORK_DPTR(dip)		    ((dip)->di_u.di_c)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_APTR)
+char *xfs_dfork_aptr(xfs_dinode_t *dip);
+#define	XFS_DFORK_APTR(dip)                 xfs_dfork_aptr(dip)
+#else
+#define	XFS_DFORK_APTR(dip)		    ((dip)->di_u.di_c + XFS_DFORK_BOFF(dip))
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_PTR)
+char *xfs_dfork_ptr(xfs_dinode_t *dip, int w);
+#define	XFS_DFORK_PTR(dip,w)                xfs_dfork_ptr(dip,w)
+#else
+#define	XFS_DFORK_PTR(dip,w)	\
+	((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip))
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FORMAT)
+int xfs_cfork_format(xfs_dinode_core_t *dcp, int w);
+#define	XFS_CFORK_FORMAT(dcp,w)             xfs_cfork_format(dcp,w)
+#else
+#define	XFS_CFORK_FORMAT(dcp,w) \
+	((w) == XFS_DATA_FORK ? (dcp)->di_format : (dcp)->di_aformat)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FMT_SET)
+void xfs_cfork_fmt_set(xfs_dinode_core_t *dcp, int w, int n);
+#define	XFS_CFORK_FMT_SET(dcp,w,n)           xfs_cfork_fmt_set(dcp,w,n)
+#else
+#define	XFS_CFORK_FMT_SET(dcp,w,n) \
+	((w) == XFS_DATA_FORK ? \
+		((dcp)->di_format = (n)) : \
+		((dcp)->di_aformat = (n)))
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXTENTS)
+int xfs_cfork_nextents_disk(xfs_dinode_core_t *dcp, int w);
+int xfs_cfork_nextents(xfs_dinode_core_t *dcp, int w);
+#define	XFS_CFORK_NEXTENTS_DISK(dcp,w)       xfs_cfork_nextents_disk(dcp,w)
+#define	XFS_CFORK_NEXTENTS(dcp,w)            xfs_cfork_nextents(dcp,w)
+#else
+#define	XFS_CFORK_NEXTENTS_DISK(dcp,w) \
+	((w) == XFS_DATA_FORK ? \
+	 	INT_GET((dcp)->di_nextents, ARCH_CONVERT) : \
+	 	INT_GET((dcp)->di_anextents, ARCH_CONVERT))
+#define XFS_CFORK_NEXTENTS(dcp,w) \
+	((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXT_SET)
+void xfs_cfork_next_set(xfs_dinode_core_t *dcp, int w, int n);
+#define	XFS_CFORK_NEXT_SET(dcp,w,n)	        xfs_cfork_next_set(dcp,w,n)
+#else
+#define	XFS_CFORK_NEXT_SET(dcp,w,n) \
+	((w) == XFS_DATA_FORK ? \
+		((dcp)->di_nextents = (n)) : \
+		((dcp)->di_anextents = (n)))
+
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_NEXTENTS)
+int xfs_dfork_nextents(xfs_dinode_t *dip, int w);
+#define	XFS_DFORK_NEXTENTS(dip,w) xfs_dfork_nextents(dip,w)
+#else
+#define	XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_DINODE)
+xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp);
+#define	XFS_BUF_TO_DINODE(bp)	xfs_buf_to_dinode(bp)
+#else
+#define	XFS_BUF_TO_DINODE(bp)	((xfs_dinode_t *)(XFS_BUF_PTR(bp)))
+#endif
+
+/*
+ * Values for di_flags
+ * There should be a one-to-one correspondence between these flags and the
+ * XFS_XFLAG_s.
+ */
+#define XFS_DIFLAG_REALTIME_BIT  0	/* file's blocks come from rt area */
+#define XFS_DIFLAG_PREALLOC_BIT  1	/* file space has been preallocated */
+#define XFS_DIFLAG_NEWRTBM_BIT   2	/* for rtbitmap inode, new format */
+#define XFS_DIFLAG_IMMUTABLE_BIT 3	/* inode is immutable */
+#define XFS_DIFLAG_APPEND_BIT    4	/* inode is append-only */
+#define XFS_DIFLAG_SYNC_BIT      5	/* inode is written synchronously */
+#define XFS_DIFLAG_NOATIME_BIT   6	/* do not update atime */
+#define XFS_DIFLAG_NODUMP_BIT    7	/* do not dump */
+#define XFS_DIFLAG_RTINHERIT_BIT 8	/* create with realtime bit set */
+#define XFS_DIFLAG_PROJINHERIT_BIT  9	/* create with parents projid */
+#define XFS_DIFLAG_NOSYMLINKS_BIT  10	/* disallow symlink creation */
+#define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
+#define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
+#define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
+#define XFS_DIFLAG_IMMUTABLE     (1 << XFS_DIFLAG_IMMUTABLE_BIT)
+#define XFS_DIFLAG_APPEND        (1 << XFS_DIFLAG_APPEND_BIT)
+#define XFS_DIFLAG_SYNC          (1 << XFS_DIFLAG_SYNC_BIT)
+#define XFS_DIFLAG_NOATIME       (1 << XFS_DIFLAG_NOATIME_BIT)
+#define XFS_DIFLAG_NODUMP        (1 << XFS_DIFLAG_NODUMP_BIT)
+#define XFS_DIFLAG_RTINHERIT     (1 << XFS_DIFLAG_RTINHERIT_BIT)
+#define XFS_DIFLAG_PROJINHERIT   (1 << XFS_DIFLAG_PROJINHERIT_BIT)
+#define XFS_DIFLAG_NOSYMLINKS    (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
+
+#define XFS_DIFLAG_ANY \
+	(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
+	 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
+	 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
+	 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
+
+#endif	/* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
new file mode 100644
index 0000000..ba30bc7
--- /dev/null
+++ b/fs/xfs/xfs_dir.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_error.h"
+
+/*
+ * xfs_dir.c
+ *
+ * Provide the external interfaces to manage directories.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Functions for the dirops interfaces.
+ */
+static void	xfs_dir_mount(struct xfs_mount *mp);
+
+static int	xfs_dir_isempty(struct xfs_inode *dp);
+
+static int	xfs_dir_init(struct xfs_trans *trans,
+			     struct xfs_inode *dir,
+			     struct xfs_inode *parent_dir);
+
+static int	xfs_dir_createname(struct xfs_trans *trans,
+				   struct xfs_inode *dp,
+				   char *name_string,
+				   int name_len,
+				   xfs_ino_t inode_number,
+				   xfs_fsblock_t *firstblock,
+				   xfs_bmap_free_t *flist,
+				   xfs_extlen_t total);
+
+static int	xfs_dir_lookup(struct xfs_trans *tp,
+			       struct xfs_inode *dp,
+			       char *name_string,
+			       int name_length,
+			       xfs_ino_t *inode_number);
+
+static int	xfs_dir_removename(struct xfs_trans *trans,
+				   struct xfs_inode *dp,
+				   char *name_string,
+				   int name_length,
+				   xfs_ino_t ino,
+				   xfs_fsblock_t *firstblock,
+				   xfs_bmap_free_t *flist,
+				   xfs_extlen_t total);
+
+static int	xfs_dir_getdents(struct xfs_trans *tp,
+				 struct xfs_inode *dp,
+				 struct uio *uiop,
+				 int *eofp);
+
+static int	xfs_dir_replace(struct xfs_trans *tp,
+				struct xfs_inode *dp,
+				char *name_string,
+				int name_length,
+				xfs_ino_t inode_number,
+				xfs_fsblock_t *firstblock,
+				xfs_bmap_free_t *flist,
+				xfs_extlen_t total);
+
+static int	xfs_dir_canenter(struct xfs_trans *tp,
+				 struct xfs_inode *dp,
+				 char *name_string,
+				 int name_length);
+
+static int	xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp,
+						  xfs_dinode_t *dip);
+
+xfs_dirops_t xfsv1_dirops = {
+	.xd_mount			= xfs_dir_mount,
+	.xd_isempty			= xfs_dir_isempty,
+	.xd_init			= xfs_dir_init,
+	.xd_createname			= xfs_dir_createname,
+	.xd_lookup			= xfs_dir_lookup,
+	.xd_removename			= xfs_dir_removename,
+	.xd_getdents			= xfs_dir_getdents,
+	.xd_replace			= xfs_dir_replace,
+	.xd_canenter			= xfs_dir_canenter,
+	.xd_shortform_validate_ondisk	= xfs_dir_shortform_validate_ondisk,
+	.xd_shortform_to_single		= xfs_dir_shortform_to_leaf,
+};
+
+/*
+ * Internal routines when dirsize == XFS_LBSIZE(mp).
+ */
+STATIC int xfs_dir_leaf_lookup(xfs_da_args_t *args);
+STATIC int xfs_dir_leaf_removename(xfs_da_args_t *args, int *number_entries,
+						 int *total_namebytes);
+STATIC int xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp,
+					     uio_t *uio, int *eofp,
+					     xfs_dirent_t *dbp,
+					     xfs_dir_put_t put);
+STATIC int xfs_dir_leaf_replace(xfs_da_args_t *args);
+
+/*
+ * Internal routines when dirsize > XFS_LBSIZE(mp).
+ */
+STATIC int xfs_dir_node_addname(xfs_da_args_t *args);
+STATIC int xfs_dir_node_lookup(xfs_da_args_t *args);
+STATIC int xfs_dir_node_removename(xfs_da_args_t *args);
+STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp,
+					     uio_t *uio, int *eofp,
+					     xfs_dirent_t *dbp,
+					     xfs_dir_put_t put);
+STATIC int xfs_dir_node_replace(xfs_da_args_t *args);
+
+#if defined(XFS_DIR_TRACE)
+ktrace_t *xfs_dir_trace_buf;
+#endif
+
+
+/*========================================================================
+ * Overall external interface routines.
+ *========================================================================*/
+
+xfs_dahash_t	xfs_dir_hash_dot, xfs_dir_hash_dotdot;
+
+/*
+ * One-time startup routine called from xfs_init().
+ */
+void
+xfs_dir_startup(void)
+{
+	xfs_dir_hash_dot = xfs_da_hashname(".", 1);
+	xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
+}
+
+/*
+ * Initialize directory-related fields in the mount structure.
+ */
+static void
+xfs_dir_mount(xfs_mount_t *mp)
+{
+	uint shortcount, leafcount, count;
+
+	mp->m_dirversion = 1;
+	shortcount = (mp->m_attroffset - (uint)sizeof(xfs_dir_sf_hdr_t)) /
+		     (uint)sizeof(xfs_dir_sf_entry_t);
+	leafcount = (XFS_LBSIZE(mp) - (uint)sizeof(xfs_dir_leaf_hdr_t)) /
+		    ((uint)sizeof(xfs_dir_leaf_entry_t) +
+		     (uint)sizeof(xfs_dir_leaf_name_t));
+	count = shortcount > leafcount ? shortcount : leafcount;
+	mp->m_dircook_elog = xfs_da_log2_roundup(count + 1);
+	ASSERT(mp->m_dircook_elog <= mp->m_sb.sb_blocklog);
+	mp->m_dir_node_ents = mp->m_attr_node_ents =
+		(XFS_LBSIZE(mp) - (uint)sizeof(xfs_da_node_hdr_t)) /
+		(uint)sizeof(xfs_da_node_entry_t);
+	mp->m_dir_magicpct = (XFS_LBSIZE(mp) * 37) / 100;
+	mp->m_dirblksize = mp->m_sb.sb_blocksize;
+	mp->m_dirblkfsbs = 1;
+}
+
+/*
+ * Return 1 if directory contains only "." and "..".
+ */
+static int
+xfs_dir_isempty(xfs_inode_t *dp)
+{
+	xfs_dir_sf_hdr_t *hdr;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	if (dp->i_d.di_size == 0)
+		return(1);
+	if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
+		return(0);
+	hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data;
+	return(hdr->count == 0);
+}
+
+/*
+ * Initialize a directory with its "." and ".." entries.
+ */
+static int
+xfs_dir_init(xfs_trans_t *trans, xfs_inode_t *dir, xfs_inode_t *parent_dir)
+{
+	xfs_da_args_t args;
+	int error;
+
+	memset((char *)&args, 0, sizeof(args));
+	args.dp = dir;
+	args.trans = trans;
+
+	ASSERT((dir->i_d.di_mode & S_IFMT) == S_IFDIR);
+	if ((error = xfs_dir_ino_validate(trans->t_mountp, parent_dir->i_ino)))
+		return error;
+
+	return(xfs_dir_shortform_create(&args, parent_dir->i_ino));
+}
+
+/*
+ * Generic handler routine to add a name to a directory.
+ * Transitions directory from shortform to Btree as necessary.
+ */
+static int							/* error */
+xfs_dir_createname(xfs_trans_t *trans, xfs_inode_t *dp, char *name,
+		   int namelen, xfs_ino_t inum, xfs_fsblock_t *firstblock,
+		   xfs_bmap_free_t *flist, xfs_extlen_t total)
+{
+	xfs_da_args_t args;
+	int retval, newsize, done;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum)))
+		return (retval);
+
+	XFS_STATS_INC(xs_dir_create);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = inum;
+	args.dp = dp;
+	args.firstblock = firstblock;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = trans;
+	args.justcheck = 0;
+	args.addname = args.oknoent = 1;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	done = 0;
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen);
+		if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) {
+			retval = xfs_dir_shortform_addname(&args);
+			done = 1;
+		} else {
+			if (total == 0)
+				return XFS_ERROR(ENOSPC);
+			retval = xfs_dir_shortform_to_leaf(&args);
+			done = retval != 0;
+		}
+	}
+	if (!done && xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_addname(&args);
+		done = retval != ENOSPC;
+		if (!done) {
+			if (total == 0)
+				return XFS_ERROR(ENOSPC);
+			retval = xfs_dir_leaf_to_node(&args);
+			done = retval != 0;
+		}
+	}
+	if (!done) {
+		retval = xfs_dir_node_addname(&args);
+	}
+	return(retval);
+}
+
+/*
+ * Generic handler routine to check if a name can be added to a directory,
+ * without adding any blocks to the directory.
+ */
+static int							/* error */
+xfs_dir_canenter(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen)
+{
+	xfs_da_args_t args;
+	int retval, newsize;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = 0;
+	args.dp = dp;
+	args.firstblock = NULL;
+	args.flist = NULL;
+	args.total = 0;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = trans;
+	args.justcheck = args.addname = args.oknoent = 1;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen);
+		if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp))
+			retval = 0;
+		else
+			retval = XFS_ERROR(ENOSPC);
+	} else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_addname(&args);
+	} else {
+		retval = xfs_dir_node_addname(&args);
+	}
+	return(retval);
+}
+
+/*
+ * Generic handler routine to remove a name from a directory.
+ * Transitions directory from Btree to shortform as necessary.
+ */
+static int							/* error */
+xfs_dir_removename(xfs_trans_t *trans, xfs_inode_t *dp, char *name,
+		   int namelen, xfs_ino_t ino, xfs_fsblock_t *firstblock,
+		   xfs_bmap_free_t *flist, xfs_extlen_t total)
+{
+	xfs_da_args_t args;
+	int count, totallen, newsize, retval;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	XFS_STATS_INC(xs_dir_remove);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = ino;
+	args.dp = dp;
+	args.firstblock = firstblock;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = trans;
+	args.justcheck = args.addname = args.oknoent = 0;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		retval = xfs_dir_shortform_removename(&args);
+	} else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_removename(&args, &count, &totallen);
+		if (retval == 0) {
+			newsize = XFS_DIR_SF_ALLFIT(count, totallen);
+			if (newsize <= XFS_IFORK_DSIZE(dp)) {
+				retval = xfs_dir_leaf_to_shortform(&args);
+			}
+		}
+	} else {
+		retval = xfs_dir_node_removename(&args);
+	}
+	return(retval);
+}
+
+static int							/* error */
+xfs_dir_lookup(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen,
+				   xfs_ino_t *inum)
+{
+	xfs_da_args_t args;
+	int retval;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	XFS_STATS_INC(xs_dir_lookup);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = 0;
+	args.dp = dp;
+	args.firstblock = NULL;
+	args.flist = NULL;
+	args.total = 0;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = trans;
+	args.justcheck = args.addname = 0;
+	args.oknoent = 1;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		retval = xfs_dir_shortform_lookup(&args);
+	} else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_lookup(&args);
+	} else {
+		retval = xfs_dir_node_lookup(&args);
+	}
+	if (retval == EEXIST)
+		retval = 0;
+	*inum = args.inumber;
+	return(retval);
+}
+
+/*
+ * Implement readdir.
+ */
+static int							/* error */
+xfs_dir_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp)
+{
+	xfs_dirent_t *dbp;
+	int  alignment, retval;
+	xfs_dir_put_t put;
+
+	XFS_STATS_INC(xs_dir_getdents);
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	/*
+	 * If our caller has given us a single contiguous memory buffer,
+	 * just work directly within that buffer.  If it's in user memory,
+	 * lock it down first.
+	 */
+	alignment = sizeof(xfs_off_t) - 1;
+	if ((uio->uio_iovcnt == 1) &&
+	    (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) &&
+	    ((uio->uio_iov[0].iov_len & alignment) == 0)) {
+		dbp = NULL;
+		put = xfs_dir_put_dirent64_direct;
+	} else {
+		dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP);
+		put = xfs_dir_put_dirent64_uio;
+	}
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	*eofp = 0;
+
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		retval = xfs_dir_shortform_getdents(dp, uio, eofp, dbp, put);
+	} else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_getdents(trans, dp, uio, eofp, dbp, put);
+	} else {
+		retval = xfs_dir_node_getdents(trans, dp, uio, eofp, dbp, put);
+	}
+	if (dbp != NULL)
+		kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN);
+
+	return(retval);
+}
+
+static int							/* error */
+xfs_dir_replace(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen,
+				    xfs_ino_t inum, xfs_fsblock_t *firstblock,
+				    xfs_bmap_free_t *flist, xfs_extlen_t total)
+{
+	xfs_da_args_t args;
+	int retval;
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum)))
+		return retval;
+
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = inum;
+	args.dp = dp;
+	args.firstblock = firstblock;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = trans;
+	args.justcheck = args.addname = args.oknoent = 0;
+
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+		retval = xfs_dir_shortform_replace(&args);
+	} else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
+		retval = xfs_dir_leaf_replace(&args);
+	} else {
+		retval = xfs_dir_node_replace(&args);
+	}
+
+	return(retval);
+}
+
+static int
+xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
+{
+	xfs_ino_t		ino;
+	int			namelen_sum;
+	int			count;
+	xfs_dir_shortform_t	*sf;
+	xfs_dir_sf_entry_t	*sfe;
+	int			i;
+
+
+
+	if ((INT_GET(dp->di_core.di_mode, ARCH_CONVERT) & S_IFMT) != S_IFDIR) {
+		return 0;
+	}
+	if (INT_GET(dp->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_LOCAL) {
+		return 0;
+	}
+	if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) {
+		xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p",
+			dp);
+		return 1;
+	}
+	sf = (xfs_dir_shortform_t *)(&dp->di_u.di_dirsf);
+	ino = XFS_GET_DIR_INO8(sf->hdr.parent);
+	if (xfs_dir_ino_validate(mp, ino))
+		return 1;
+
+	count =	sf->hdr.count;
+	if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) {
+		xfs_fs_cmn_err(CE_WARN, mp,
+			"Invalid shortform count: dp 0x%p", dp);
+		return(1);
+	}
+
+	if (count == 0) {
+		return 0;
+	}
+
+	namelen_sum = 0;
+	sfe = &sf->list[0];
+	for (i = sf->hdr.count - 1; i >= 0; i--) {
+		ino = XFS_GET_DIR_INO8(sfe->inumber);
+		xfs_dir_ino_validate(mp, ino);
+		if (sfe->namelen >= XFS_LITINO(mp)) {
+			xfs_fs_cmn_err(CE_WARN, mp,
+				"Invalid shortform namelen: dp 0x%p", dp);
+			return 1;
+		}
+		namelen_sum += sfe->namelen;
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+	if (namelen_sum >= XFS_LITINO(mp)) {
+		xfs_fs_cmn_err(CE_WARN, mp,
+			"Invalid shortform namelen: dp 0x%p", dp);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*========================================================================
+ * External routines when dirsize == XFS_LBSIZE(dp->i_mount).
+ *========================================================================*/
+
+/*
+ * Add a name to the leaf directory structure
+ * This is the external routine.
+ */
+int
+xfs_dir_leaf_addname(xfs_da_args_t *args)
+{
+	int index, retval;
+	xfs_dabuf_t *bp;
+
+	retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
+					      XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+
+	retval = xfs_dir_leaf_lookup_int(bp, args, &index);
+	if (retval == ENOENT)
+		retval = xfs_dir_leaf_add(bp, args, index);
+	xfs_da_buf_done(bp);
+	return(retval);
+}
+
+/*
+ * Remove a name from the leaf directory structure
+ * This is the external routine.
+ */
+STATIC int
+xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen)
+{
+	xfs_dir_leafblock_t *leaf;
+	int index, retval;
+	xfs_dabuf_t *bp;
+
+	retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
+					      XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	retval = xfs_dir_leaf_lookup_int(bp, args, &index);
+	if (retval == EEXIST) {
+		(void)xfs_dir_leaf_remove(args->trans, bp, index);
+		*count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		*totallen = INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
+		retval = 0;
+	}
+	xfs_da_buf_done(bp);
+	return(retval);
+}
+
+/*
+ * Look up a name in a leaf directory structure.
+ * This is the external routine.
+ */
+STATIC int
+xfs_dir_leaf_lookup(xfs_da_args_t *args)
+{
+	int index, retval;
+	xfs_dabuf_t *bp;
+
+	retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
+					      XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+	retval = xfs_dir_leaf_lookup_int(bp, args, &index);
+	xfs_da_brelse(args->trans, bp);
+	return(retval);
+}
+
+/*
+ * Copy out directory entries for getdents(), for leaf directories.
+ */
+STATIC int
+xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
+				  int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put)
+{
+	xfs_dabuf_t *bp;
+	int retval, eob;
+
+	retval = xfs_da_read_buf(dp->i_transp, dp, 0, -1, &bp, XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+	retval = xfs_dir_leaf_getdents_int(bp, dp, 0, uio, &eob, dbp, put, -1);
+	xfs_da_brelse(trans, bp);
+	*eofp = (eob == 0);
+	return(retval);
+}
+
+/*
+ * Look up a name in a leaf directory structure, replace the inode number.
+ * This is the external routine.
+ */
+STATIC int
+xfs_dir_leaf_replace(xfs_da_args_t *args)
+{
+	int index, retval;
+	xfs_dabuf_t *bp;
+	xfs_ino_t inum;
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+
+	inum = args->inumber;
+	retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
+					      XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+	retval = xfs_dir_leaf_lookup_int(bp, args, &index);
+	if (retval == EEXIST) {
+		leaf = bp->data;
+		entry = &leaf->entries[index];
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+		/* XXX - replace assert? */
+		XFS_DIR_SF_PUT_DIRINO(&inum, &namest->inumber);
+		xfs_da_log_buf(args->trans, bp,
+		    XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber)));
+		xfs_da_buf_done(bp);
+		retval = 0;
+	} else
+		xfs_da_brelse(args->trans, bp);
+	return(retval);
+}
+
+
+/*========================================================================
+ * External routines when dirsize > XFS_LBSIZE(mp).
+ *========================================================================*/
+
+/*
+ * Add a name to a Btree-format directory.
+ *
+ * This will involve walking down the Btree, and may involve splitting
+ * leaf nodes and even splitting intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ */
+STATIC int
+xfs_dir_node_addname(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	int retval, error;
+
+	/*
+	 * Fill in bucket of arguments/results/context to carry around.
+	 */
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+
+	/*
+	 * Search to see if name already exists, and get back a pointer
+	 * to where it should go.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error)
+		retval = error;
+	if (retval != ENOENT)
+		goto error;
+	blk = &state->path.blk[ state->path.active-1 ];
+	ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
+	retval = xfs_dir_leaf_add(blk->bp, args, blk->index);
+	if (retval == 0) {
+		/*
+		 * Addition succeeded, update Btree hashvals.
+		 */
+		if (!args->justcheck)
+			xfs_da_fixhashpath(state, &state->path);
+	} else {
+		/*
+		 * Addition failed, split as many Btree elements as required.
+		 */
+		if (args->total == 0) {
+			ASSERT(retval == ENOSPC);
+			goto error;
+		}
+		retval = xfs_da_split(state);
+	}
+error:
+	xfs_da_state_free(state);
+
+	return(retval);
+}
+
+/*
+ * Remove a name from a B-tree directory.
+ *
+ * This will involve walking down the Btree, and may involve joining
+ * leaf nodes and even joining intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ */
+STATIC int
+xfs_dir_node_removename(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	int retval, error;
+
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+
+	/*
+	 * Search to see if name exists, and get back a pointer to it.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error)
+		retval = error;
+	if (retval != EEXIST) {
+		xfs_da_state_free(state);
+		return(retval);
+	}
+
+	/*
+	 * Remove the name and update the hashvals in the tree.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
+	retval = xfs_dir_leaf_remove(args->trans, blk->bp, blk->index);
+	xfs_da_fixhashpath(state, &state->path);
+
+	/*
+	 * Check to see if the tree needs to be collapsed.
+	 */
+	error = 0;
+	if (retval) {
+		error = xfs_da_join(state);
+	}
+
+	xfs_da_state_free(state);
+	if (error)
+		return(error);
+	return(0);
+}
+
+/*
+ * Look up a filename in a int directory.
+ * Use an internal routine to actually do all the work.
+ */
+STATIC int
+xfs_dir_node_lookup(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	int retval, error, i;
+
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+
+	/*
+	 * Search to see if name exists,
+	 * and get back a pointer to it.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error) {
+		retval = error;
+	}
+
+	/*
+	 * If not in a transaction, we have to release all the buffers.
+	 */
+	for (i = 0; i < state->path.active; i++) {
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+
+	xfs_da_state_free(state);
+	return(retval);
+}
+
+STATIC int
+xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
+				  int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put)
+{
+	xfs_da_intnode_t *node;
+	xfs_da_node_entry_t *btree;
+	xfs_dir_leafblock_t *leaf = NULL;
+	xfs_dablk_t bno, nextbno;
+	xfs_dahash_t cookhash;
+	xfs_mount_t *mp;
+	int error, eob, i;
+	xfs_dabuf_t *bp;
+	xfs_daddr_t nextda;
+
+	/*
+	 * Pick up our context.
+	 */
+	mp = dp->i_mount;
+	bp = NULL;
+	bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset);
+	cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
+
+	xfs_dir_trace_g_du("node: start", dp, uio);
+
+	/*
+	 * Re-find our place, even if we're confused about what our place is.
+	 *
+	 * First we check the block number from the magic cookie, it is a
+	 * cache of where we ended last time.  If we find a leaf block, and
+	 * the starting hashval in that block is less than our desired
+	 * hashval, then we run with it.
+	 */
+	if (bno > 0) {
+		error = xfs_da_read_buf(trans, dp, bno, -2, &bp, XFS_DATA_FORK);
+		if ((error != 0) && (error != EFSCORRUPTED))
+			return(error);
+		if (bp)
+			leaf = bp->data;
+		if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) {
+			xfs_dir_trace_g_dub("node: block not a leaf",
+						   dp, uio, bno);
+			xfs_da_brelse(trans, bp);
+			bp = NULL;
+		}
+		if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) {
+			xfs_dir_trace_g_dub("node: leaf hash too large",
+						   dp, uio, bno);
+			xfs_da_brelse(trans, bp);
+			bp = NULL;
+		}
+		if (bp &&
+		    cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) {
+			xfs_dir_trace_g_dub("node: leaf hash too small",
+						   dp, uio, bno);
+			xfs_da_brelse(trans, bp);
+			bp = NULL;
+		}
+	}
+
+	/*
+	 * If we did not find a leaf block from the blockno in the cookie,
+	 * or we there was no blockno in the cookie (eg: first time thru),
+	 * the we start at the top of the Btree and re-find our hashval.
+	 */
+	if (bp == NULL) {
+		xfs_dir_trace_g_du("node: start at root" , dp, uio);
+		bno = 0;
+		for (;;) {
+			error = xfs_da_read_buf(trans, dp, bno, -1, &bp,
+						       XFS_DATA_FORK);
+			if (error)
+				return(error);
+			if (bp == NULL)
+				return(XFS_ERROR(EFSCORRUPTED));
+			node = bp->data;
+			if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)
+				break;
+			btree = &node->btree[0];
+			xfs_dir_trace_g_dun("node: node detail", dp, uio, node);
+			for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) {
+				if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) {
+					bno = INT_GET(btree->before, ARCH_CONVERT);
+					break;
+				}
+			}
+			if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) {
+				xfs_da_brelse(trans, bp);
+				xfs_dir_trace_g_du("node: hash beyond EOF",
+							  dp, uio);
+				uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0,
+							     XFS_DA_MAXHASH);
+				*eofp = 1;
+				return(0);
+			}
+			xfs_dir_trace_g_dub("node: going to block",
+						   dp, uio, bno);
+			xfs_da_brelse(trans, bp);
+		}
+	}
+	ASSERT(cookhash != XFS_DA_MAXHASH);
+
+	/*
+	 * We've dropped down to the (first) leaf block that contains the
+	 * hashval we are interested in.  Continue rolling upward thru the
+	 * leaf blocks until we fill up our buffer.
+	 */
+	for (;;) {
+		leaf = bp->data;
+		if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) {
+			xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf);
+			xfs_da_brelse(trans, bp);
+			XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)",
+					     XFS_ERRLEVEL_LOW, mp, leaf);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf);
+		if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) {
+			nextda = xfs_da_reada_buf(trans, dp, nextbno,
+						  XFS_DATA_FORK);
+		} else
+			nextda = -1;
+		error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp,
+						  put, nextda);
+		xfs_da_brelse(trans, bp);
+		bno = nextbno;
+		if (eob) {
+			xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno);
+			*eofp = 0;
+			return(error);
+		}
+		if (bno == 0)
+			break;
+		error = xfs_da_read_buf(trans, dp, bno, nextda, &bp,
+					XFS_DATA_FORK);
+		if (error)
+			return(error);
+		if (unlikely(bp == NULL)) {
+			XFS_ERROR_REPORT("xfs_dir_node_getdents(2)",
+					 XFS_ERRLEVEL_LOW, mp);
+			return(XFS_ERROR(EFSCORRUPTED));
+		}
+	}
+	*eofp = 1;
+	xfs_dir_trace_g_du("node: E-O-F", dp, uio);
+	return(0);
+}
+
+/*
+ * Look up a filename in an int directory, replace the inode number.
+ * Use an internal routine to actually do the lookup.
+ */
+STATIC int
+xfs_dir_node_replace(xfs_da_args_t *args)
+{
+	xfs_da_state_t *state;
+	xfs_da_state_blk_t *blk;
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+	xfs_ino_t inum;
+	int retval, error, i;
+	xfs_dabuf_t *bp;
+
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_sb.sb_blocksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+	inum = args->inumber;
+
+	/*
+	 * Search to see if name exists,
+	 * and get back a pointer to it.
+	 */
+	error = xfs_da_node_lookup_int(state, &retval);
+	if (error) {
+		retval = error;
+	}
+
+	if (retval == EEXIST) {
+		blk = &state->path.blk[state->path.active - 1];
+		ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
+		bp = blk->bp;
+		leaf = bp->data;
+		entry = &leaf->entries[blk->index];
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+		/* XXX - replace assert ? */
+		XFS_DIR_SF_PUT_DIRINO(&inum, &namest->inumber);
+		xfs_da_log_buf(args->trans, bp,
+		    XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber)));
+		xfs_da_buf_done(bp);
+		blk->bp = NULL;
+		retval = 0;
+	} else {
+		i = state->path.active - 1;
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+	for (i = 0; i < state->path.active - 1; i++) {
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+
+	xfs_da_state_free(state);
+	return(retval);
+}
+
+#if defined(XFS_DIR_TRACE)
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_du(char *where, xfs_inode_t *dp, uio_t *uio)
+{
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DU, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_dub(char *where, xfs_inode_t *dp, uio_t *uio, xfs_dablk_t bno)
+{
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUB, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     (void *)(unsigned long)bno,
+		     NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio,
+			xfs_da_intnode_t *node)
+{
+	int	last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1;
+
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     (void *)(unsigned long)
+			INT_GET(node->hdr.info.forw, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(node->hdr.count, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(node->btree[0].hashval, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(node->btree[last].hashval, ARCH_CONVERT),
+		     NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio,
+			xfs_dir_leafblock_t *leaf)
+{
+	int	last = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1;
+
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUL, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     (void *)(unsigned long)
+			INT_GET(leaf->hdr.info.forw, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(leaf->hdr.count, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
+		     (void *)(unsigned long)
+			INT_GET(leaf->entries[last].hashval, ARCH_CONVERT),
+		     NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_due(char *where, xfs_inode_t *dp, uio_t *uio,
+			xfs_dir_leaf_entry_t *entry)
+{
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUE, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     (void *)(unsigned long)
+			INT_GET(entry->hashval, ARCH_CONVERT),
+		     NULL, NULL, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for an inode and a uio.
+ */
+void
+xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie)
+{
+	xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUC, where,
+		     (void *)dp, (void *)dp->i_mount,
+		     (void *)((unsigned long)(uio->uio_offset >> 32)),
+		     (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
+		     (void *)(unsigned long)uio->uio_resid,
+		     (void *)((unsigned long)(cookie >> 32)),
+		     (void *)((unsigned long)(cookie & 0xFFFFFFFF)),
+		     NULL, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * Add a trace buffer entry for the arguments given to the routine,
+ * generic form.
+ */
+void
+xfs_dir_trace_enter(int type, char *where,
+			void * a0, void * a1,
+			void * a2, void * a3,
+			void * a4, void * a5,
+			void * a6, void * a7,
+			void * a8, void * a9,
+			void * a10, void * a11)
+{
+	ASSERT(xfs_dir_trace_buf);
+	ktrace_enter(xfs_dir_trace_buf, (void *)(unsigned long)type,
+					(void *)where,
+					(void *)a0, (void *)a1, (void *)a2,
+					(void *)a3, (void *)a4, (void *)a5,
+					(void *)a6, (void *)a7, (void *)a8,
+					(void *)a9, (void *)a10, (void *)a11,
+					NULL, NULL);
+}
+#endif	/* XFS_DIR_TRACE */
diff --git a/fs/xfs/xfs_dir.h b/fs/xfs/xfs_dir.h
new file mode 100644
index 0000000..4dbc9f5
--- /dev/null
+++ b/fs/xfs/xfs_dir.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR_H__
+#define	__XFS_DIR_H__
+
+/*
+ * Large directories are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Filenames are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of a filename may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ *
+ * Small directories use a different format and are packed as tightly
+ * as possible so as to fit into the literal area of the inode.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+struct uio;
+struct xfs_bmap_free;
+struct xfs_da_args;
+struct xfs_dinode;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Directory function types.
+ * Put in structures (xfs_dirops_t) for v1 and v2 directories.
+ */
+typedef void	(*xfs_dir_mount_t)(struct xfs_mount *mp);
+typedef int	(*xfs_dir_isempty_t)(struct xfs_inode *dp);
+typedef int	(*xfs_dir_init_t)(struct xfs_trans *tp,
+				  struct xfs_inode *dp,
+				  struct xfs_inode *pdp);
+typedef int	(*xfs_dir_createname_t)(struct xfs_trans *tp,
+					struct xfs_inode *dp,
+					char *name,
+					int namelen,
+					xfs_ino_t inum,
+					xfs_fsblock_t *first,
+					struct xfs_bmap_free *flist,
+					xfs_extlen_t total);
+typedef int	(*xfs_dir_lookup_t)(struct xfs_trans *tp,
+				    struct xfs_inode *dp,
+				    char *name,
+				    int namelen,
+				    xfs_ino_t *inum);
+typedef int	(*xfs_dir_removename_t)(struct xfs_trans *tp,
+					struct xfs_inode *dp,
+					char *name,
+					int namelen,
+					xfs_ino_t ino,
+					xfs_fsblock_t *first,
+					struct xfs_bmap_free *flist,
+					xfs_extlen_t total);
+typedef int	(*xfs_dir_getdents_t)(struct xfs_trans *tp,
+				      struct xfs_inode *dp,
+				      struct uio *uio,
+				      int *eofp);
+typedef int	(*xfs_dir_replace_t)(struct xfs_trans *tp,
+				     struct xfs_inode *dp,
+				     char *name,
+				     int namelen,
+				     xfs_ino_t inum,
+				     xfs_fsblock_t *first,
+				     struct xfs_bmap_free *flist,
+				     xfs_extlen_t total);
+typedef int	(*xfs_dir_canenter_t)(struct xfs_trans *tp,
+				      struct xfs_inode *dp,
+				      char *name,
+				      int namelen);
+typedef int	(*xfs_dir_shortform_validate_ondisk_t)(struct xfs_mount *mp,
+						       struct xfs_dinode *dip);
+typedef int	(*xfs_dir_shortform_to_single_t)(struct xfs_da_args *args);
+
+typedef struct xfs_dirops {
+	xfs_dir_mount_t				xd_mount;
+	xfs_dir_isempty_t			xd_isempty;
+	xfs_dir_init_t				xd_init;
+	xfs_dir_createname_t			xd_createname;
+	xfs_dir_lookup_t			xd_lookup;
+	xfs_dir_removename_t			xd_removename;
+	xfs_dir_getdents_t			xd_getdents;
+	xfs_dir_replace_t			xd_replace;
+	xfs_dir_canenter_t			xd_canenter;
+	xfs_dir_shortform_validate_ondisk_t	xd_shortform_validate_ondisk;
+	xfs_dir_shortform_to_single_t		xd_shortform_to_single;
+} xfs_dirops_t;
+
+/*
+ * Overall external interface routines.
+ */
+void	xfs_dir_startup(void);	/* called exactly once */
+
+#define	XFS_DIR_MOUNT(mp)	\
+	((mp)->m_dirops.xd_mount(mp))
+#define	XFS_DIR_ISEMPTY(mp,dp)	\
+	((mp)->m_dirops.xd_isempty(dp))
+#define	XFS_DIR_INIT(mp,tp,dp,pdp)	\
+	((mp)->m_dirops.xd_init(tp,dp,pdp))
+#define	XFS_DIR_CREATENAME(mp,tp,dp,name,namelen,inum,first,flist,total) \
+	((mp)->m_dirops.xd_createname(tp,dp,name,namelen,inum,first,flist,\
+				      total))
+#define	XFS_DIR_LOOKUP(mp,tp,dp,name,namelen,inum)	\
+	((mp)->m_dirops.xd_lookup(tp,dp,name,namelen,inum))
+#define	XFS_DIR_REMOVENAME(mp,tp,dp,name,namelen,ino,first,flist,total)	\
+	((mp)->m_dirops.xd_removename(tp,dp,name,namelen,ino,first,flist,total))
+#define	XFS_DIR_GETDENTS(mp,tp,dp,uio,eofp)	\
+	((mp)->m_dirops.xd_getdents(tp,dp,uio,eofp))
+#define	XFS_DIR_REPLACE(mp,tp,dp,name,namelen,inum,first,flist,total)	\
+	((mp)->m_dirops.xd_replace(tp,dp,name,namelen,inum,first,flist,total))
+#define	XFS_DIR_CANENTER(mp,tp,dp,name,namelen)	\
+	((mp)->m_dirops.xd_canenter(tp,dp,name,namelen))
+#define	XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp,dip)	\
+	((mp)->m_dirops.xd_shortform_validate_ondisk(mp,dip))
+#define	XFS_DIR_SHORTFORM_TO_SINGLE(mp,args)	\
+	((mp)->m_dirops.xd_shortform_to_single(args))
+
+#define	XFS_DIR_IS_V1(mp)	((mp)->m_dirversion == 1)
+extern xfs_dirops_t xfsv1_dirops;
+
+#endif	/* __XFS_DIR_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
new file mode 100644
index 0000000..49fc0a3
--- /dev/null
+++ b/fs/xfs/xfs_dir2.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * XFS v2 directory implmentation.
+ * Top-level and utility routines.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_node.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+
+/*
+ * Declarations for interface routines.
+ */
+static void	xfs_dir2_mount(xfs_mount_t *mp);
+static int	xfs_dir2_isempty(xfs_inode_t *dp);
+static int	xfs_dir2_init(xfs_trans_t *tp, xfs_inode_t *dp,
+			      xfs_inode_t *pdp);
+static int	xfs_dir2_createname(xfs_trans_t *tp, xfs_inode_t *dp,
+				    char *name, int namelen, xfs_ino_t inum,
+				    xfs_fsblock_t *first,
+				    xfs_bmap_free_t *flist, xfs_extlen_t total);
+static int	xfs_dir2_lookup(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
+				int namelen, xfs_ino_t *inum);
+static int	xfs_dir2_removename(xfs_trans_t *tp, xfs_inode_t *dp,
+				    char *name, int namelen, xfs_ino_t ino,
+				    xfs_fsblock_t *first,
+				    xfs_bmap_free_t *flist, xfs_extlen_t total);
+static int	xfs_dir2_getdents(xfs_trans_t *tp, xfs_inode_t *dp, uio_t *uio,
+				  int *eofp);
+static int	xfs_dir2_replace(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
+				 int namelen, xfs_ino_t inum,
+				 xfs_fsblock_t *first, xfs_bmap_free_t *flist,
+				 xfs_extlen_t total);
+static int	xfs_dir2_canenter(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
+				  int namelen);
+static int	xfs_dir2_shortform_validate_ondisk(xfs_mount_t *mp,
+						   xfs_dinode_t *dip);
+
+/*
+ * Utility routine declarations.
+ */
+static int	xfs_dir2_put_dirent64_direct(xfs_dir2_put_args_t *pa);
+static int	xfs_dir2_put_dirent64_uio(xfs_dir2_put_args_t *pa);
+
+/*
+ * Directory operations vector.
+ */
+xfs_dirops_t	xfsv2_dirops = {
+	.xd_mount			= xfs_dir2_mount,
+	.xd_isempty			= xfs_dir2_isempty,
+	.xd_init			= xfs_dir2_init,
+	.xd_createname			= xfs_dir2_createname,
+	.xd_lookup			= xfs_dir2_lookup,
+	.xd_removename			= xfs_dir2_removename,
+	.xd_getdents			= xfs_dir2_getdents,
+	.xd_replace			= xfs_dir2_replace,
+	.xd_canenter			= xfs_dir2_canenter,
+	.xd_shortform_validate_ondisk	= xfs_dir2_shortform_validate_ondisk,
+	.xd_shortform_to_single		= xfs_dir2_sf_to_block,
+};
+
+/*
+ * Interface routines.
+ */
+
+/*
+ * Initialize directory-related fields in the mount structure.
+ */
+static void
+xfs_dir2_mount(
+	xfs_mount_t	*mp)		/* filesystem mount point */
+{
+	mp->m_dirversion = 2;
+	ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
+	       XFS_MAX_BLOCKSIZE);
+	mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
+	mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog;
+	mp->m_dirdatablk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_DATA_FIRSTDB(mp));
+	mp->m_dirleafblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_LEAF_FIRSTDB(mp));
+	mp->m_dirfreeblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_FREE_FIRSTDB(mp));
+	mp->m_attr_node_ents =
+		(mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) /
+		(uint)sizeof(xfs_da_node_entry_t);
+	mp->m_dir_node_ents =
+		(mp->m_dirblksize - (uint)sizeof(xfs_da_node_hdr_t)) /
+		(uint)sizeof(xfs_da_node_entry_t);
+	mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100;
+}
+
+/*
+ * Return 1 if directory contains only "." and "..".
+ */
+static int				/* return code */
+xfs_dir2_isempty(
+	xfs_inode_t	*dp)		/* incore inode structure */
+{
+	xfs_dir2_sf_t	*sfp;		/* shortform directory structure */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	/*
+	 * Might happen during shutdown.
+	 */
+	if (dp->i_d.di_size == 0) {
+		return 1;
+	}
+	if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
+		return 0;
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	return !sfp->hdr.count;
+}
+
+/*
+ * Initialize a directory with its "." and ".." entries.
+ */
+static int				/* error */
+xfs_dir2_init(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	xfs_inode_t	*pdp)		/* incore parent directory inode */
+{
+	xfs_da_args_t	args;		/* operation arguments */
+	int		error;		/* error return value */
+
+	memset((char *)&args, 0, sizeof(args));
+	args.dp = dp;
+	args.trans = tp;
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino))) {
+		return error;
+	}
+	return xfs_dir2_sf_create(&args, pdp->i_ino);
+}
+
+/*
+  Enter a name in a directory.
+ */
+static int					/* error */
+xfs_dir2_createname(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*dp,		/* incore directory inode */
+	char			*name,		/* new entry name */
+	int			namelen,	/* new entry name length */
+	xfs_ino_t		inum,		/* new entry inode number */
+	xfs_fsblock_t		*first,		/* bmap's firstblock */
+	xfs_bmap_free_t		*flist,		/* bmap's freeblock list */
+	xfs_extlen_t		total)		/* bmap's total block count */
+{
+	xfs_da_args_t		args;		/* operation arguments */
+	int			rval;		/* return value */
+	int			v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) {
+		return rval;
+	}
+	XFS_STATS_INC(xs_dir_create);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = inum;
+	args.dp = dp;
+	args.firstblock = first;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = tp;
+	args.justcheck = 0;
+	args.addname = args.oknoent = 1;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_addname(&args);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_block_addname(&args);
+	else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_leaf_addname(&args);
+	else
+		rval = xfs_dir2_node_addname(&args);
+	return rval;
+}
+
+/*
+ * Lookup a name in a directory, give back the inode number.
+ */
+static int				/* error */
+xfs_dir2_lookup(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	char		*name,		/* lookup name */
+	int		namelen,	/* lookup name length */
+	xfs_ino_t	*inum)		/* out: inode number */
+{
+	xfs_da_args_t	args;		/* operation arguments */
+	int		rval;		/* return value */
+	int		v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	XFS_STATS_INC(xs_dir_lookup);
+
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = 0;
+	args.dp = dp;
+	args.firstblock = NULL;
+	args.flist = NULL;
+	args.total = 0;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = tp;
+	args.justcheck = args.addname = 0;
+	args.oknoent = 1;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_lookup(&args);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_block_lookup(&args);
+	else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_leaf_lookup(&args);
+	else
+		rval = xfs_dir2_node_lookup(&args);
+	if (rval == EEXIST)
+		rval = 0;
+	if (rval == 0)
+		*inum = args.inumber;
+	return rval;
+}
+
+/*
+ * Remove an entry from a directory.
+ */
+static int				/* error */
+xfs_dir2_removename(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	char		*name,		/* name of entry to remove */
+	int		namelen,	/* name length of entry to remove */
+	xfs_ino_t	ino,		/* inode number of entry to remove */
+	xfs_fsblock_t	*first,		/* bmap's firstblock */
+	xfs_bmap_free_t	*flist,		/* bmap's freeblock list */
+	xfs_extlen_t	total)		/* bmap's total block count */
+{
+	xfs_da_args_t	args;		/* operation arguments */
+	int		rval;		/* return value */
+	int		v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	XFS_STATS_INC(xs_dir_remove);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = ino;
+	args.dp = dp;
+	args.firstblock = first;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = tp;
+	args.justcheck = args.addname = args.oknoent = 0;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_removename(&args);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_block_removename(&args);
+	else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_leaf_removename(&args);
+	else
+		rval = xfs_dir2_node_removename(&args);
+	return rval;
+}
+
+/*
+ * Read a directory.
+ */
+static int				/* error */
+xfs_dir2_getdents(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	uio_t		*uio,		/* caller's buffer control */
+	int		*eofp)		/* out: eof reached */
+{
+	int		alignment;	/* alignment required for ABI */
+	xfs_dirent_t	*dbp;		/* malloc'ed buffer */
+	xfs_dir2_put_t	put;		/* entry formatting routine */
+	int		rval;		/* return value */
+	int		v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	XFS_STATS_INC(xs_dir_getdents);
+	/*
+	 * If our caller has given us a single contiguous aligned memory buffer,
+	 * just work directly within that buffer.  If it's in user memory,
+	 * lock it down first.
+	 */
+	alignment = sizeof(xfs_off_t) - 1;
+	if ((uio->uio_iovcnt == 1) &&
+	    (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) &&
+	    ((uio->uio_iov[0].iov_len & alignment) == 0)) {
+		dbp = NULL;
+		put = xfs_dir2_put_dirent64_direct;
+	} else {
+		dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP);
+		put = xfs_dir2_put_dirent64_uio;
+	}
+
+	*eofp = 0;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_getdents(dp, uio, eofp, dbp, put);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		;
+	} else if (v)
+		rval = xfs_dir2_block_getdents(tp, dp, uio, eofp, dbp, put);
+	else
+		rval = xfs_dir2_leaf_getdents(tp, dp, uio, eofp, dbp, put);
+	if (dbp != NULL)
+		kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN);
+	return rval;
+}
+
+/*
+ * Replace the inode number of a directory entry.
+ */
+static int				/* error */
+xfs_dir2_replace(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	char		*name,		/* name of entry to replace */
+	int		namelen,	/* name length of entry to replace */
+	xfs_ino_t	inum,		/* new inode number */
+	xfs_fsblock_t	*first,		/* bmap's firstblock */
+	xfs_bmap_free_t	*flist,		/* bmap's freeblock list */
+	xfs_extlen_t	total)		/* bmap's total block count */
+{
+	xfs_da_args_t	args;		/* operation arguments */
+	int		rval;		/* return value */
+	int		v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) {
+		return rval;
+	}
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = inum;
+	args.dp = dp;
+	args.firstblock = first;
+	args.flist = flist;
+	args.total = total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = tp;
+	args.justcheck = args.addname = args.oknoent = 0;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_replace(&args);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_block_replace(&args);
+	else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_leaf_replace(&args);
+	else
+		rval = xfs_dir2_node_replace(&args);
+	return rval;
+}
+
+/*
+ * See if this entry can be added to the directory without allocating space.
+ */
+static int				/* error */
+xfs_dir2_canenter(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	char		*name,		/* name of entry to add */
+	int		namelen)	/* name length of entry to add */
+{
+	xfs_da_args_t	args;		/* operation arguments */
+	int		rval;		/* return value */
+	int		v;		/* type-checking value */
+
+	ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
+	/*
+	 * Fill in the arg structure for this request.
+	 */
+	args.name = name;
+	args.namelen = namelen;
+	args.hashval = xfs_da_hashname(name, namelen);
+	args.inumber = 0;
+	args.dp = dp;
+	args.firstblock = NULL;
+	args.flist = NULL;
+	args.total = 0;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = tp;
+	args.justcheck = args.addname = args.oknoent = 1;
+	/*
+	 * Decide on what work routines to call based on the inode size.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+		rval = xfs_dir2_sf_addname(&args);
+	else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_block_addname(&args);
+	else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+		return rval;
+	} else if (v)
+		rval = xfs_dir2_leaf_addname(&args);
+	else
+		rval = xfs_dir2_node_addname(&args);
+	return rval;
+}
+
+/*
+ * Dummy routine for shortform inode validation.
+ * Can't really do this.
+ */
+/* ARGSUSED */
+static int				/* error */
+xfs_dir2_shortform_validate_ondisk(
+	xfs_mount_t	*mp,		/* filesystem mount point */
+	xfs_dinode_t	*dip)		/* ondisk inode */
+{
+	return 0;
+}
+
+/*
+ * Utility routines.
+ */
+
+/*
+ * Add a block to the directory.
+ * This routine is for data and free blocks, not leaf/node blocks
+ * which are handled by xfs_da_grow_inode.
+ */
+int					/* error */
+xfs_dir2_grow_inode(
+	xfs_da_args_t	*args,		/* operation arguments */
+	int		space,		/* v2 dir's space XFS_DIR2_xxx_SPACE */
+	xfs_dir2_db_t	*dbp)		/* out: block number added */
+{
+	xfs_fileoff_t	bno;		/* directory offset of new block */
+	int		count;		/* count of filesystem blocks */
+	xfs_inode_t	*dp;		/* incore directory inode */
+	int		error;		/* error return value */
+	int		got;		/* blocks actually mapped */
+	int		i;		/* temp mapping index */
+	xfs_bmbt_irec_t	map;		/* single structure for bmap */
+	int		mapi;		/* mapping index */
+	xfs_bmbt_irec_t	*mapp;		/* bmap mapping structure(s) */
+	xfs_mount_t	*mp;		/* filesystem mount point */
+	int		nmap;		/* number of bmap entries */
+	xfs_trans_t	*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_s("grow_inode", args, space);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	/*
+	 * Set lowest possible block in the space requested.
+	 */
+	bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE);
+	count = mp->m_dirblkfsbs;
+	/*
+	 * Find the first hole for our block.
+	 */
+	if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK))) {
+		return error;
+	}
+	nmap = 1;
+	ASSERT(args->firstblock != NULL);
+	/*
+	 * Try mapping the new block contiguously (one extent).
+	 */
+	if ((error = xfs_bmapi(tp, dp, bno, count,
+			XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
+			args->firstblock, args->total, &map, &nmap,
+			args->flist))) {
+		return error;
+	}
+	ASSERT(nmap <= 1);
+	/*
+	 * Got it in 1.
+	 */
+	if (nmap == 1) {
+		mapp = &map;
+		mapi = 1;
+	}
+	/*
+	 * Didn't work and this is a multiple-fsb directory block.
+	 * Try again with contiguous flag turned on.
+	 */
+	else if (nmap == 0 && count > 1) {
+		xfs_fileoff_t	b;	/* current file offset */
+
+		/*
+		 * Space for maximum number of mappings.
+		 */
+		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
+		/*
+		 * Iterate until we get to the end of our block.
+		 */
+		for (b = bno, mapi = 0; b < bno + count; ) {
+			int	c;	/* current fsb count */
+
+			/*
+			 * Can't map more than MAX_NMAP at once.
+			 */
+			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
+			c = (int)(bno + count - b);
+			if ((error = xfs_bmapi(tp, dp, b, c,
+					XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
+					args->firstblock, args->total,
+					&mapp[mapi], &nmap, args->flist))) {
+				kmem_free(mapp, sizeof(*mapp) * count);
+				return error;
+			}
+			if (nmap < 1)
+				break;
+			/*
+			 * Add this bunch into our table, go to the next offset.
+			 */
+			mapi += nmap;
+			b = mapp[mapi - 1].br_startoff +
+			    mapp[mapi - 1].br_blockcount;
+		}
+	}
+	/*
+	 * Didn't work.
+	 */
+	else {
+		mapi = 0;
+		mapp = NULL;
+	}
+	/*
+	 * See how many fsb's we got.
+	 */
+	for (i = 0, got = 0; i < mapi; i++)
+		got += mapp[i].br_blockcount;
+	/*
+	 * Didn't get enough fsb's, or the first/last block's are wrong.
+	 */
+	if (got != count || mapp[0].br_startoff != bno ||
+	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
+	    bno + count) {
+		if (mapp != &map)
+			kmem_free(mapp, sizeof(*mapp) * count);
+		return XFS_ERROR(ENOSPC);
+	}
+	/*
+	 * Done with the temporary mapping table.
+	 */
+	if (mapp != &map)
+		kmem_free(mapp, sizeof(*mapp) * count);
+	*dbp = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)bno);
+	/*
+	 * Update file's size if this is the data space and it grew.
+	 */
+	if (space == XFS_DIR2_DATA_SPACE) {
+		xfs_fsize_t	size;		/* directory file (data) size */
+
+		size = XFS_FSB_TO_B(mp, bno + count);
+		if (size > dp->i_d.di_size) {
+			dp->i_d.di_size = size;
+			xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+		}
+	}
+	return 0;
+}
+
+/*
+ * See if the directory is a single-block form directory.
+ */
+int					/* error */
+xfs_dir2_isblock(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	int		*vp)		/* out: 1 is block, 0 is not block */
+{
+	xfs_fileoff_t	last;		/* last file offset */
+	xfs_mount_t	*mp;		/* filesystem mount point */
+	int		rval;		/* return value */
+
+	mp = dp->i_mount;
+	if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) {
+		return rval;
+	}
+	rval = XFS_FSB_TO_B(mp, last) == mp->m_dirblksize;
+	ASSERT(rval == 0 || dp->i_d.di_size == mp->m_dirblksize);
+	*vp = rval;
+	return 0;
+}
+
+/*
+ * See if the directory is a single-leaf form directory.
+ */
+int					/* error */
+xfs_dir2_isleaf(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_inode_t	*dp,		/* incore directory inode */
+	int		*vp)		/* out: 1 is leaf, 0 is not leaf */
+{
+	xfs_fileoff_t	last;		/* last file offset */
+	xfs_mount_t	*mp;		/* filesystem mount point */
+	int		rval;		/* return value */
+
+	mp = dp->i_mount;
+	if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) {
+		return rval;
+	}
+	*vp = last == mp->m_dirleafblk + (1 << mp->m_sb.sb_dirblklog);
+	return 0;
+}
+
+/*
+ * Getdents put routine for 64-bit ABI, direct form.
+ */
+static int					/* error */
+xfs_dir2_put_dirent64_direct(
+	xfs_dir2_put_args_t	*pa)		/* argument bundle */
+{
+	xfs_dirent_t		*idbp;		/* dirent pointer */
+	iovec_t			*iovp;		/* io vector */
+	int			namelen;	/* entry name length */
+	int			reclen;		/* entry total length */
+	uio_t			*uio;		/* I/O control */
+
+	namelen = pa->namelen;
+	reclen = DIRENTSIZE(namelen);
+	uio = pa->uio;
+	/*
+	 * Won't fit in the remaining space.
+	 */
+	if (reclen > uio->uio_resid) {
+		pa->done = 0;
+		return 0;
+	}
+	iovp = uio->uio_iov;
+	idbp = (xfs_dirent_t *)iovp->iov_base;
+	iovp->iov_base = (char *)idbp + reclen;
+	iovp->iov_len -= reclen;
+	uio->uio_resid -= reclen;
+	idbp->d_reclen = reclen;
+	idbp->d_ino = pa->ino;
+	idbp->d_off = pa->cook;
+	idbp->d_name[namelen] = '\0';
+	pa->done = 1;
+	memcpy(idbp->d_name, pa->name, namelen);
+	return 0;
+}
+
+/*
+ * Getdents put routine for 64-bit ABI, uio form.
+ */
+static int					/* error */
+xfs_dir2_put_dirent64_uio(
+	xfs_dir2_put_args_t	*pa)		/* argument bundle */
+{
+	xfs_dirent_t		*idbp;		/* dirent pointer */
+	int			namelen;	/* entry name length */
+	int			reclen;		/* entry total length */
+	int			rval;		/* return value */
+	uio_t			*uio;		/* I/O control */
+
+	namelen = pa->namelen;
+	reclen = DIRENTSIZE(namelen);
+	uio = pa->uio;
+	/*
+	 * Won't fit in the remaining space.
+	 */
+	if (reclen > uio->uio_resid) {
+		pa->done = 0;
+		return 0;
+	}
+	idbp = pa->dbp;
+	idbp->d_reclen = reclen;
+	idbp->d_ino = pa->ino;
+	idbp->d_off = pa->cook;
+	idbp->d_name[namelen] = '\0';
+	memcpy(idbp->d_name, pa->name, namelen);
+	rval = uio_read((caddr_t)idbp, reclen, uio);
+	pa->done = (rval == 0);
+	return rval;
+}
+
+/*
+ * Remove the given block from the directory.
+ * This routine is used for data and free blocks, leaf/node are done
+ * by xfs_da_shrink_inode.
+ */
+int
+xfs_dir2_shrink_inode(
+	xfs_da_args_t	*args,		/* operation arguments */
+	xfs_dir2_db_t	db,		/* directory block number */
+	xfs_dabuf_t	*bp)		/* block's buffer */
+{
+	xfs_fileoff_t	bno;		/* directory file offset */
+	xfs_dablk_t	da;		/* directory file offset */
+	int		done;		/* bunmap is finished */
+	xfs_inode_t	*dp;		/* incore directory inode */
+	int		error;		/* error return value */
+	xfs_mount_t	*mp;		/* filesystem mount point */
+	xfs_trans_t	*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_db("shrink_inode", args, db, bp);
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	da = XFS_DIR2_DB_TO_DA(mp, db);
+	/*
+	 * Unmap the fsblock(s).
+	 */
+	if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
+			XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
+			&done))) {
+		/*
+		 * ENOSPC actually can happen if we're in a removename with
+		 * no space reservation, and the resulting block removal
+		 * would cause a bmap btree split or conversion from extents
+		 * to btree.  This can only happen for un-fragmented
+		 * directory blocks, since you need to be punching out
+		 * the middle of an extent.
+		 * In this case we need to leave the block in the file,
+		 * and not binval it.
+		 * So the block has to be in a consistent empty state
+		 * and appropriately logged.
+		 * We don't free up the buffer, the caller can tell it
+		 * hasn't happened since it got an error back.
+		 */
+		return error;
+	}
+	ASSERT(done);
+	/*
+	 * Invalidate the buffer from the transaction.
+	 */
+	xfs_da_binval(tp, bp);
+	/*
+	 * If it's not a data block, we're done.
+	 */
+	if (db >= XFS_DIR2_LEAF_FIRSTDB(mp))
+		return 0;
+	/*
+	 * If the block isn't the last one in the directory, we're done.
+	 */
+	if (dp->i_d.di_size > XFS_DIR2_DB_OFF_TO_BYTE(mp, db + 1, 0))
+		return 0;
+	bno = da;
+	if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
+		/*
+		 * This can't really happen unless there's kernel corruption.
+		 */
+		return error;
+	}
+	if (db == mp->m_dirdatablk)
+		ASSERT(bno == 0);
+	else
+		ASSERT(bno > 0);
+	/*
+	 * Set the size to the new last block.
+	 */
+	dp->i_d.di_size = XFS_FSB_TO_B(mp, bno);
+	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+	return 0;
+}
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
new file mode 100644
index 0000000..8f4fc7f
--- /dev/null
+++ b/fs/xfs/xfs_dir2.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_H__
+#define	__XFS_DIR2_H__
+
+struct uio;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_dir2_put_args;
+struct xfs_inode;
+struct xfs_trans;
+
+/*
+ * Directory version 2.
+ * There are 4 possible formats:
+ *	shortform
+ *	single block - data with embedded leaf at the end
+ *	multiple data blocks, single leaf+freeindex block
+ *	data blocks, node&leaf blocks (btree), freeindex blocks
+ *
+ *	The shortform format is in xfs_dir2_sf.h.
+ *	The single block format is in xfs_dir2_block.h.
+ *	The data block format is in xfs_dir2_data.h.
+ *	The leaf and freeindex block formats are in xfs_dir2_leaf.h.
+ *	Node blocks are the same as the other version, in xfs_da_btree.h.
+ */
+
+/*
+ * Byte offset in data block and shortform entry.
+ */
+typedef	__uint16_t	xfs_dir2_data_off_t;
+#define	NULLDATAOFF	0xffffU
+typedef uint		xfs_dir2_data_aoff_t;	/* argument form */
+
+/*
+ * Directory block number (logical dirblk in file)
+ */
+typedef	__uint32_t	xfs_dir2_db_t;
+
+/*
+ * Byte offset in a directory.
+ */
+typedef	xfs_off_t		xfs_dir2_off_t;
+
+/*
+ * For getdents, argument struct for put routines.
+ */
+typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa);
+typedef struct xfs_dir2_put_args {
+	xfs_off_t		cook;		/* cookie of (next) entry */
+	xfs_intino_t	ino;		/* inode number */
+	struct xfs_dirent	*dbp;		/* buffer pointer */
+	char		*name;		/* directory entry name */
+	int		namelen;	/* length of name */
+	int		done;		/* output: set if value was stored */
+	xfs_dir2_put_t	put;		/* put function ptr (i/o) */
+	struct uio	*uio;		/* uio control structure */
+} xfs_dir2_put_args_t;
+
+#define	XFS_DIR_IS_V2(mp)	((mp)->m_dirversion == 2)
+extern xfs_dirops_t	xfsv2_dirops;
+
+/*
+ * Other interfaces used by the rest of the dir v2 code.
+ */
+extern int
+	xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
+			    xfs_dir2_db_t *dbp);
+
+extern int
+	xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp);
+
+extern int
+	xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp);
+
+extern int
+	xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
+			      struct xfs_dabuf *bp);
+
+#endif	/* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
new file mode 100644
index 0000000..bc4c40f
--- /dev/null
+++ b/fs/xfs/xfs_dir2_block.c
@@ -0,0 +1,1248 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_block.c
+ * XFS V2 directory implementation, single-block form.
+ * See xfs_dir2_block.h for the format.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_error.h"
+
+/*
+ * Local function prototypes.
+ */
+static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first,
+				    int last);
+static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp);
+static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp,
+				     int *entno);
+static int xfs_dir2_block_sort(const void *a, const void *b);
+
+/*
+ * Add an entry to a block directory.
+ */
+int						/* error */
+xfs_dir2_block_addname(
+	xfs_da_args_t		*args)		/* directory op arguments */
+{
+	xfs_dir2_data_free_t	*bf;		/* bestfree table in block */
+	xfs_dir2_block_t	*block;		/* directory block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dabuf_t		*bp;		/* buffer for block */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	int			compact;	/* need to compact leaf ents */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_inode_t		*dp;		/* directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
+	int			error;		/* error return value */
+	xfs_dir2_data_unused_t	*enddup=NULL;	/* unused at end of data */
+	xfs_dahash_t		hash;		/* hash value of found entry */
+	int			high;		/* high index for binary srch */
+	int			highstale;	/* high stale index */
+	int			lfloghigh=0;	/* last final leaf to log */
+	int			lfloglow=0;	/* first final leaf to log */
+	int			len;		/* length of the new entry */
+	int			low;		/* low index for binary srch */
+	int			lowstale;	/* low stale index */
+	int			mid=0;		/* midpoint for binary srch */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log header */
+	int			needscan;	/* need to rescan freespace */
+	xfs_dir2_data_off_t	*tagp;		/* pointer to tag value */
+	xfs_trans_t		*tp;		/* transaction structure */
+
+	xfs_dir2_trace_args("block_addname", args);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	/*
+	 * Read the (one and only) directory block into dabuf bp.
+	 */
+	if ((error =
+	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
+		return error;
+	}
+	ASSERT(bp != NULL);
+	block = bp->data;
+	/*
+	 * Check the magic number, corrupted if wrong.
+	 */
+	if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT)
+						!= XFS_DIR2_BLOCK_MAGIC)) {
+		XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
+				     XFS_ERRLEVEL_LOW, mp, block);
+		xfs_da_brelse(tp, bp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	len = XFS_DIR2_DATA_ENTSIZE(args->namelen);
+	/*
+	 * Set up pointers to parts of the block.
+	 */
+	bf = block->hdr.bestfree;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * No stale entries?  Need space for entry and new leaf.
+	 */
+	if (!btp->stale) {
+		/*
+		 * Tag just before the first leaf entry.
+		 */
+		tagp = (xfs_dir2_data_off_t *)blp - 1;
+		/*
+		 * Data object just before the first leaf entry.
+		 */
+		enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT));
+		/*
+		 * If it's not free then can't do this add without cleaning up:
+		 * the space before the first leaf entry needs to be free so it
+		 * can be expanded to hold the pointer to the new entry.
+		 */
+		if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG)
+			dup = enddup = NULL;
+		/*
+		 * Check out the biggest freespace and see if it's the same one.
+		 */
+		else {
+			dup = (xfs_dir2_data_unused_t *)
+			      ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT));
+			if (dup == enddup) {
+				/*
+				 * It is the biggest freespace, is it too small
+				 * to hold the new leaf too?
+				 */
+				if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) {
+					/*
+					 * Yes, we use the second-largest
+					 * entry instead if it works.
+					 */
+					if (INT_GET(bf[1].length, ARCH_CONVERT) >= len)
+						dup = (xfs_dir2_data_unused_t *)
+						      ((char *)block +
+						       INT_GET(bf[1].offset, ARCH_CONVERT));
+					else
+						dup = NULL;
+				}
+			} else {
+				/*
+				 * Not the same free entry,
+				 * just check its length.
+				 */
+				if (INT_GET(dup->length, ARCH_CONVERT) < len) {
+					dup = NULL;
+				}
+			}
+		}
+		compact = 0;
+	}
+	/*
+	 * If there are stale entries we'll use one for the leaf.
+	 * Is the biggest entry enough to avoid compaction?
+	 */
+	else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) {
+		dup = (xfs_dir2_data_unused_t *)
+		      ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT));
+		compact = 0;
+	}
+	/*
+	 * Will need to compact to make this work.
+	 */
+	else {
+		/*
+		 * Tag just before the first leaf entry.
+		 */
+		tagp = (xfs_dir2_data_off_t *)blp - 1;
+		/*
+		 * Data object just before the first leaf entry.
+		 */
+		dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT));
+		/*
+		 * If it's not free then the data will go where the
+		 * leaf data starts now, if it works at all.
+		 */
+		if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) {
+			if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) *
+			    (uint)sizeof(*blp) < len)
+				dup = NULL;
+		} else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len)
+			dup = NULL;
+		else
+			dup = (xfs_dir2_data_unused_t *)blp;
+		compact = 1;
+	}
+	/*
+	 * If this isn't a real add, we're done with the buffer.
+	 */
+	if (args->justcheck)
+		xfs_da_brelse(tp, bp);
+	/*
+	 * If we don't have space for the new entry & leaf ...
+	 */
+	if (!dup) {
+		/*
+		 * Not trying to actually do anything, or don't have
+		 * a space reservation: return no-space.
+		 */
+		if (args->justcheck || args->total == 0)
+			return XFS_ERROR(ENOSPC);
+		/*
+		 * Convert to the next larger format.
+		 * Then add the new entry in that format.
+		 */
+		error = xfs_dir2_block_to_leaf(args, bp);
+		xfs_da_buf_done(bp);
+		if (error)
+			return error;
+		return xfs_dir2_leaf_addname(args);
+	}
+	/*
+	 * Just checking, and it would work, so say so.
+	 */
+	if (args->justcheck)
+		return 0;
+	needlog = needscan = 0;
+	/*
+	 * If need to compact the leaf entries, do it now.
+	 * Leave the highest-numbered stale entry stale.
+	 * XXX should be the one closest to mid but mid is not yet computed.
+	 */
+	if (compact) {
+		int	fromidx;		/* source leaf index */
+		int	toidx;			/* target leaf index */
+
+		for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1,
+			highstale = lfloghigh = -1;
+		     fromidx >= 0;
+		     fromidx--) {
+			if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) {
+				if (highstale == -1)
+					highstale = toidx;
+				else {
+					if (lfloghigh == -1)
+						lfloghigh = toidx;
+					continue;
+				}
+			}
+			if (fromidx < toidx)
+				blp[toidx] = blp[fromidx];
+			toidx--;
+		}
+		lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1);
+		lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1;
+		INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1));
+		xfs_dir2_data_make_free(tp, bp,
+			(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
+			(xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)),
+			&needlog, &needscan);
+		blp += INT_GET(btp->stale, ARCH_CONVERT) - 1;
+		INT_SET(btp->stale, ARCH_CONVERT, 1);
+		/*
+		 * If we now need to rebuild the bestfree map, do so.
+		 * This needs to happen before the next call to use_free.
+		 */
+		if (needscan) {
+			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
+				&needlog, NULL);
+			needscan = 0;
+		}
+	}
+	/*
+	 * Set leaf logging boundaries to impossible state.
+	 * For the no-stale case they're set explicitly.
+	 */
+	else if (INT_GET(btp->stale, ARCH_CONVERT)) {
+		lfloglow = INT_GET(btp->count, ARCH_CONVERT);
+		lfloghigh = -1;
+	}
+	/*
+	 * Find the slot that's first lower than our hash value, -1 if none.
+	 */
+	for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) {
+		mid = (low + high) >> 1;
+		if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval)
+			break;
+		if (hash < args->hashval)
+			low = mid + 1;
+		else
+			high = mid - 1;
+	}
+	while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) {
+		mid--;
+	}
+	/*
+	 * No stale entries, will use enddup space to hold new leaf.
+	 */
+	if (!btp->stale) {
+		/*
+		 * Mark the space needed for the new leaf entry, now in use.
+		 */
+		xfs_dir2_data_use_free(tp, bp, enddup,
+			(xfs_dir2_data_aoff_t)
+			((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) -
+			 sizeof(*blp)),
+			(xfs_dir2_data_aoff_t)sizeof(*blp),
+			&needlog, &needscan);
+		/*
+		 * Update the tail (entry count).
+		 */
+		INT_MOD(btp->count, ARCH_CONVERT, +1);
+		/*
+		 * If we now need to rebuild the bestfree map, do so.
+		 * This needs to happen before the next call to use_free.
+		 */
+		if (needscan) {
+			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
+				&needlog, NULL);
+			needscan = 0;
+		}
+		/*
+		 * Adjust pointer to the first leaf entry, we're about to move
+		 * the table up one to open up space for the new leaf entry.
+		 * Then adjust our index to match.
+		 */
+		blp--;
+		mid++;
+		if (mid)
+			memmove(blp, &blp[1], mid * sizeof(*blp));
+		lfloglow = 0;
+		lfloghigh = mid;
+	}
+	/*
+	 * Use a stale leaf for our new entry.
+	 */
+	else {
+		for (lowstale = mid;
+		     lowstale >= 0 &&
+			INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR;
+		     lowstale--)
+			continue;
+		for (highstale = mid + 1;
+		     highstale < INT_GET(btp->count, ARCH_CONVERT) &&
+			INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR &&
+			(lowstale < 0 || mid - lowstale > highstale - mid);
+		     highstale++)
+			continue;
+		/*
+		 * Move entries toward the low-numbered stale entry.
+		 */
+		if (lowstale >= 0 &&
+		    (highstale == INT_GET(btp->count, ARCH_CONVERT) ||
+		     mid - lowstale <= highstale - mid)) {
+			if (mid - lowstale)
+				memmove(&blp[lowstale], &blp[lowstale + 1],
+					(mid - lowstale) * sizeof(*blp));
+			lfloglow = MIN(lowstale, lfloglow);
+			lfloghigh = MAX(mid, lfloghigh);
+		}
+		/*
+		 * Move entries toward the high-numbered stale entry.
+		 */
+		else {
+			ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT));
+			mid++;
+			if (highstale - mid)
+				memmove(&blp[mid + 1], &blp[mid],
+					(highstale - mid) * sizeof(*blp));
+			lfloglow = MIN(mid, lfloglow);
+			lfloghigh = MAX(highstale, lfloghigh);
+		}
+		INT_MOD(btp->stale, ARCH_CONVERT, -1);
+	}
+	/*
+	 * Point to the new data entry.
+	 */
+	dep = (xfs_dir2_data_entry_t *)dup;
+	/*
+	 * Fill in the leaf entry.
+	 */
+	INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval);
+	INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block));
+	xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
+	/*
+	 * Mark space for the data entry used.
+	 */
+	xfs_dir2_data_use_free(tp, bp, dup,
+		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
+		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
+	/*
+	 * Create the new data entry.
+	 */
+	INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+	dep->namelen = args->namelen;
+	memcpy(dep->name, args->name, args->namelen);
+	tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+	INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
+	/*
+	 * Clean up the bestfree array and log the header, tail, and entry.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog,
+			NULL);
+	if (needlog)
+		xfs_dir2_data_log_header(tp, bp);
+	xfs_dir2_block_log_tail(tp, bp);
+	xfs_dir2_data_log_entry(tp, bp, dep);
+	xfs_dir2_data_check(dp, bp);
+	xfs_da_buf_done(bp);
+	return 0;
+}
+
+/*
+ * Readdir for block directories.
+ */
+int						/* error */
+xfs_dir2_block_getdents(
+	xfs_trans_t		*tp,		/* transaction (NULL) */
+	xfs_inode_t		*dp,		/* incore inode */
+	uio_t			*uio,		/* caller's buffer control */
+	int			*eofp,		/* eof reached? (out) */
+	xfs_dirent_t		*dbp,		/* caller's buffer */
+	xfs_dir2_put_t		put)		/* abi's formatting function */
+{
+	xfs_dir2_block_t	*block;		/* directory block structure */
+	xfs_dabuf_t		*bp;		/* buffer for block */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
+	char			*endptr;	/* end of the data entries */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_put_args_t	p;		/* arg package for put rtn */
+	char			*ptr;		/* current data entry */
+	int			wantoff;	/* starting block offset */
+
+	mp = dp->i_mount;
+	/*
+	 * If the block number in the offset is out of range, we're done.
+	 */
+	if (XFS_DIR2_DATAPTR_TO_DB(mp, uio->uio_offset) > mp->m_dirdatablk) {
+		*eofp = 1;
+		return 0;
+	}
+	/*
+	 * Can't read the block, give up, else get dabuf in bp.
+	 */
+	if ((error =
+	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
+		return error;
+	}
+	ASSERT(bp != NULL);
+	/*
+	 * Extract the byte offset we start at from the seek pointer.
+	 * We'll skip entries before this.
+	 */
+	wantoff = XFS_DIR2_DATAPTR_TO_OFF(mp, uio->uio_offset);
+	block = bp->data;
+	xfs_dir2_data_check(dp, bp);
+	/*
+	 * Set up values for the loop.
+	 */
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	ptr = (char *)block->u;
+	endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
+	p.dbp = dbp;
+	p.put = put;
+	p.uio = uio;
+	/*
+	 * Loop over the data portion of the block.
+	 * Each object is a real entry (dep) or an unused one (dup).
+	 */
+	while (ptr < endptr) {
+		dup = (xfs_dir2_data_unused_t *)ptr;
+		/*
+		 * Unused, skip it.
+		 */
+		if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) {
+			ptr += INT_GET(dup->length, ARCH_CONVERT);
+			continue;
+		}
+
+		dep = (xfs_dir2_data_entry_t *)ptr;
+
+		/*
+		 * Bump pointer for the next iteration.
+		 */
+		ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
+		/*
+		 * The entry is before the desired starting point, skip it.
+		 */
+		if ((char *)dep - (char *)block < wantoff)
+			continue;
+		/*
+		 * Set up argument structure for put routine.
+		 */
+		p.namelen = dep->namelen;
+
+		p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+						    ptr - (char *)block);
+		p.ino = INT_GET(dep->inumber, ARCH_CONVERT);
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = (char *)dep->name;
+
+		/*
+		 * Put the entry in the caller's buffer.
+		 */
+		error = p.put(&p);
+
+		/*
+		 * If it didn't fit, set the final offset to here & return.
+		 */
+		if (!p.done) {
+			uio->uio_offset =
+				XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+					(char *)dep - (char *)block);
+			xfs_da_brelse(tp, bp);
+			return error;
+		}
+	}
+
+	/*
+	 * Reached the end of the block.
+	 * Set the offset to a nonexistent block 1 and return.
+	 */
+	*eofp = 1;
+
+	uio->uio_offset =
+		XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0);
+
+	xfs_da_brelse(tp, bp);
+
+	return 0;
+}
+
+/*
+ * Log leaf entries from the block.
+ */
+static void
+xfs_dir2_block_log_leaf(
+	xfs_trans_t		*tp,		/* transaction structure */
+	xfs_dabuf_t		*bp,		/* block buffer */
+	int			first,		/* index of first logged leaf */
+	int			last)		/* index of last logged leaf */
+{
+	xfs_dir2_block_t	*block;		/* directory block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	mp = tp->t_mountp;
+	block = bp->data;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block),
+		(uint)((char *)&blp[last + 1] - (char *)block - 1));
+}
+
+/*
+ * Log the block tail.
+ */
+static void
+xfs_dir2_block_log_tail(
+	xfs_trans_t		*tp,		/* transaction structure */
+	xfs_dabuf_t		*bp)		/* block buffer */
+{
+	xfs_dir2_block_t	*block;		/* directory block structure */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	mp = tp->t_mountp;
+	block = bp->data;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block),
+		(uint)((char *)(btp + 1) - (char *)block - 1));
+}
+
+/*
+ * Look up an entry in the block.  This is the external routine,
+ * xfs_dir2_block_lookup_int does the real work.
+ */
+int						/* error */
+xfs_dir2_block_lookup(
+	xfs_da_args_t		*args)		/* dir lookup arguments */
+{
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_inode_t		*dp;		/* incore inode */
+	int			ent;		/* entry index */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	xfs_dir2_trace_args("block_lookup", args);
+	/*
+	 * Get the buffer, look up the entry.
+	 * If not found (ENOENT) then return, have no buffer.
+	 */
+	if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent)))
+		return error;
+	dp = args->dp;
+	mp = dp->i_mount;
+	block = bp->data;
+	xfs_dir2_data_check(dp, bp);
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * Get the offset from the leaf entry, to point to the data.
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT)));
+	/*
+	 * Fill in inode number, release the block.
+	 */
+	args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+	xfs_da_brelse(args->trans, bp);
+	return XFS_ERROR(EEXIST);
+}
+
+/*
+ * Internal block lookup routine.
+ */
+static int					/* error */
+xfs_dir2_block_lookup_int(
+	xfs_da_args_t		*args,		/* dir lookup arguments */
+	xfs_dabuf_t		**bpp,		/* returned block buffer */
+	int			*entno)		/* returned entry number */
+{
+	xfs_dir2_dataptr_t	addr;		/* data entry address */
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_inode_t		*dp;		/* incore inode */
+	int			error;		/* error return value */
+	xfs_dahash_t		hash;		/* found hash value */
+	int			high;		/* binary search high index */
+	int			low;		/* binary search low index */
+	int			mid;		/* binary search current idx */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	/*
+	 * Read the buffer, return error if we can't get it.
+	 */
+	if ((error =
+	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
+		return error;
+	}
+	ASSERT(bp != NULL);
+	block = bp->data;
+	xfs_dir2_data_check(dp, bp);
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * Loop doing a binary search for our hash value.
+	 * Find our entry, ENOENT if it's not there.
+	 */
+	for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) {
+		ASSERT(low <= high);
+		mid = (low + high) >> 1;
+		if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval)
+			break;
+		if (hash < args->hashval)
+			low = mid + 1;
+		else
+			high = mid - 1;
+		if (low > high) {
+			ASSERT(args->oknoent);
+			xfs_da_brelse(tp, bp);
+			return XFS_ERROR(ENOENT);
+		}
+	}
+	/*
+	 * Back up to the first one with the right hash value.
+	 */
+	while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) {
+		mid--;
+	}
+	/*
+	 * Now loop forward through all the entries with the
+	 * right hash value looking for our name.
+	 */
+	do {
+		if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		/*
+		 * Get pointer to the entry from the leaf.
+		 */
+		dep = (xfs_dir2_data_entry_t *)
+			((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr));
+		/*
+		 * Compare, if it's right give back buffer & entry number.
+		 */
+		if (dep->namelen == args->namelen &&
+		    dep->name[0] == args->name[0] &&
+		    memcmp(dep->name, args->name, args->namelen) == 0) {
+			*bpp = bp;
+			*entno = mid;
+			return 0;
+		}
+	} while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash);
+	/*
+	 * No match, release the buffer and return ENOENT.
+	 */
+	ASSERT(args->oknoent);
+	xfs_da_brelse(tp, bp);
+	return XFS_ERROR(ENOENT);
+}
+
+/*
+ * Remove an entry from a block format directory.
+ * If that makes the block small enough to fit in shortform, transform it.
+ */
+int						/* error */
+xfs_dir2_block_removename(
+	xfs_da_args_t		*args)		/* directory operation args */
+{
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf pointer */
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_inode_t		*dp;		/* incore inode */
+	int			ent;		/* block leaf entry index */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log block header */
+	int			needscan;	/* need to fixup bestfree */
+	xfs_dir2_sf_hdr_t	sfh;		/* shortform header */
+	int			size;		/* shortform size */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args("block_removename", args);
+	/*
+	 * Look up the entry in the block.  Gets the buffer and entry index.
+	 * It will always be there, the vnodeops level does a lookup first.
+	 */
+	if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) {
+		return error;
+	}
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	block = bp->data;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * Point to the data entry using the leaf entry.
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT)));
+	/*
+	 * Mark the data entry's space free.
+	 */
+	needlog = needscan = 0;
+	xfs_dir2_data_make_free(tp, bp,
+		(xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
+		XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan);
+	/*
+	 * Fix up the block tail.
+	 */
+	INT_MOD(btp->stale, ARCH_CONVERT, +1);
+	xfs_dir2_block_log_tail(tp, bp);
+	/*
+	 * Remove the leaf entry by marking it stale.
+	 */
+	INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR);
+	xfs_dir2_block_log_leaf(tp, bp, ent, ent);
+	/*
+	 * Fix up bestfree, log the header if necessary.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog,
+			NULL);
+	if (needlog)
+		xfs_dir2_data_log_header(tp, bp);
+	xfs_dir2_data_check(dp, bp);
+	/*
+	 * See if the size as a shortform is good enough.
+	 */
+	if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
+	    XFS_IFORK_DSIZE(dp)) {
+		xfs_da_buf_done(bp);
+		return 0;
+	}
+	/*
+	 * If it works, do the conversion.
+	 */
+	return xfs_dir2_block_to_sf(args, bp, size, &sfh);
+}
+
+/*
+ * Replace an entry in a V2 block directory.
+ * Change the inode number to the new value.
+ */
+int						/* error */
+xfs_dir2_block_replace(
+	xfs_da_args_t		*args)		/* directory operation args */
+{
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* block data entry */
+	xfs_inode_t		*dp;		/* incore inode */
+	int			ent;		/* leaf entry index */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	xfs_dir2_trace_args("block_replace", args);
+	/*
+	 * Lookup the entry in the directory.  Get buffer and entry index.
+	 * This will always succeed since the caller has already done a lookup.
+	 */
+	if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) {
+		return error;
+	}
+	dp = args->dp;
+	mp = dp->i_mount;
+	block = bp->data;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * Point to the data entry we need to change.
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT)));
+	ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber);
+	/*
+	 * Change the inode number to the new value.
+	 */
+	INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+	xfs_dir2_data_log_entry(args->trans, bp, dep);
+	xfs_dir2_data_check(dp, bp);
+	xfs_da_buf_done(bp);
+	return 0;
+}
+
+/*
+ * Qsort comparison routine for the block leaf entries.
+ */
+static int					/* sort order */
+xfs_dir2_block_sort(
+	const void			*a,	/* first leaf entry */
+	const void			*b)	/* second leaf entry */
+{
+	const xfs_dir2_leaf_entry_t	*la;	/* first leaf entry */
+	const xfs_dir2_leaf_entry_t	*lb;	/* second leaf entry */
+
+	la = a;
+	lb = b;
+	return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 :
+		(INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0);
+}
+
+/*
+ * Convert a V2 leaf directory to a V2 block directory if possible.
+ */
+int						/* error */
+xfs_dir2_leaf_to_block(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*lbp,		/* leaf buffer */
+	xfs_dabuf_t		*dbp)		/* data buffer */
+{
+	xfs_dir2_data_off_t	*bestsp;	/* leaf bests table */
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* unused data entry */
+	int			error;		/* error return value */
+	int			from;		/* leaf from index */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* file system mount point */
+	int			needlog;	/* need to log data header */
+	int			needscan;	/* need to scan for bestfree */
+	xfs_dir2_sf_hdr_t	sfh;		/* shortform header */
+	int			size;		/* bytes used */
+	xfs_dir2_data_off_t	*tagp;		/* end of entry (tag) */
+	int			to;		/* block/leaf to index */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	leaf = lbp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC);
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	/*
+	 * If there are data blocks other than the first one, take this
+	 * opportunity to remove trailing empty data blocks that may have
+	 * been left behind during no-space-reservation operations.
+	 * These will show up in the leaf bests table.
+	 */
+	while (dp->i_d.di_size > mp->m_dirblksize) {
+		bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
+		if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) ==
+		    mp->m_dirblksize - (uint)sizeof(block->hdr)) {
+			if ((error =
+			    xfs_dir2_leaf_trim_data(args, lbp,
+				    (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1))))
+				goto out;
+		} else {
+			error = 0;
+			goto out;
+		}
+	}
+	/*
+	 * Read the data block if we don't already have it, give up if it fails.
+	 */
+	if (dbp == NULL &&
+	    (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
+		    XFS_DATA_FORK))) {
+		goto out;
+	}
+	block = dbp->data;
+	ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC);
+	/*
+	 * Size of the "leaf" area in the block.
+	 */
+	size = (uint)sizeof(block->tail) +
+	       (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT));
+	/*
+	 * Look at the last data entry.
+	 */
+	tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1;
+	dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT));
+	/*
+	 * If it's not free or is too short we can't do it.
+	 */
+	if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) {
+		error = 0;
+		goto out;
+	}
+	/*
+	 * Start converting it to block form.
+	 */
+	INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC);
+	needlog = 1;
+	needscan = 0;
+	/*
+	 * Use up the space at the end of the block (blp/btp).
+	 */
+	xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
+		&needlog, &needscan);
+	/*
+	 * Initialize the block tail.
+	 */
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT));
+	btp->stale = 0;
+	xfs_dir2_block_log_tail(tp, dbp);
+	/*
+	 * Initialize the block leaf area.  We compact out stale entries.
+	 */
+	lep = XFS_DIR2_BLOCK_LEAF_P(btp);
+	for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) {
+		if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		lep[to++] = leaf->ents[from];
+	}
+	ASSERT(to == INT_GET(btp->count, ARCH_CONVERT));
+	xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1);
+	/*
+	 * Scan the bestfree if we need it and log the data block header.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog,
+			NULL);
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	/*
+	 * Pitch the old leaf block.
+	 */
+	error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
+	lbp = NULL;
+	if (error) {
+		goto out;
+	}
+	/*
+	 * Now see if the resulting block can be shrunken to shortform.
+	 */
+	if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
+	    XFS_IFORK_DSIZE(dp)) {
+		error = 0;
+		goto out;
+	}
+	return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
+out:
+	if (lbp)
+		xfs_da_buf_done(lbp);
+	if (dbp)
+		xfs_da_buf_done(dbp);
+	return error;
+}
+
+/*
+ * Convert the shortform directory to block form.
+ */
+int						/* error */
+xfs_dir2_sf_to_block(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_dir2_db_t		blkno;		/* dir-relative block # (0) */
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
+	char			*buf;		/* sf buffer */
+	int			buf_len;
+	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			dummy;		/* trash */
+	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
+	int			endoffset;	/* end of data objects */
+	int			error;		/* error return value */
+	int			i;		/* index */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log block header */
+	int			needscan;	/* need to scan block freespc */
+	int			newoffset;	/* offset from current entry */
+	int			offset;		/* target block offset */
+	xfs_dir2_sf_entry_t	*sfep;		/* sf entry pointer */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+	xfs_dir2_data_off_t	*tagp;		/* end of data entry */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args("sf_to_block", args);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Bomb out if the shortform directory is way too short.
+	 */
+	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+	/*
+	 * Copy the directory into the stack buffer.
+	 * Then pitch the incore inode data so we can make extents.
+	 */
+
+	buf_len = dp->i_df.if_bytes;
+	buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
+
+	memcpy(buf, sfp, dp->i_df.if_bytes);
+	xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
+	dp->i_d.di_size = 0;
+	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+	/*
+	 * Reset pointer - old sfp is gone.
+	 */
+	sfp = (xfs_dir2_sf_t *)buf;
+	/*
+	 * Add block 0 to the inode.
+	 */
+	error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
+	if (error) {
+		kmem_free(buf, buf_len);
+		return error;
+	}
+	/*
+	 * Initialize the data block.
+	 */
+	error = xfs_dir2_data_init(args, blkno, &bp);
+	if (error) {
+		kmem_free(buf, buf_len);
+		return error;
+	}
+	block = bp->data;
+	INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC);
+	/*
+	 * Compute size of block "tail" area.
+	 */
+	i = (uint)sizeof(*btp) +
+	    (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
+	/*
+	 * The whole thing is initialized to free by the init routine.
+	 * Say we're using the leaf and tail area.
+	 */
+	dup = (xfs_dir2_data_unused_t *)block->u;
+	needlog = needscan = 0;
+	xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
+		&needscan);
+	ASSERT(needscan == 0);
+	/*
+	 * Fill in the tail.
+	 */
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2);	/* ., .. */
+	btp->stale = 0;
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	endoffset = (uint)((char *)blp - (char *)block);
+	/*
+	 * Remove the freespace, we'll manage it.
+	 */
+	xfs_dir2_data_use_free(tp, bp, dup,
+		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
+		INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan);
+	/*
+	 * Create entry for .
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
+	INT_SET(dep->inumber, ARCH_CONVERT, dp->i_ino);
+	dep->namelen = 1;
+	dep->name[0] = '.';
+	tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+	INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
+	xfs_dir2_data_log_entry(tp, bp, dep);
+	INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot);
+	INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block));
+	/*
+	 * Create entry for ..
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+		((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
+	INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent));
+	dep->namelen = 2;
+	dep->name[0] = dep->name[1] = '.';
+	tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+	INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
+	xfs_dir2_data_log_entry(tp, bp, dep);
+	INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot);
+	INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block));
+	offset = XFS_DIR2_DATA_FIRST_OFFSET;
+	/*
+	 * Loop over existing entries, stuff them in.
+	 */
+	if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT))
+		sfep = NULL;
+	else
+		sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	/*
+	 * Need to preserve the existing offset values in the sf directory.
+	 * Insert holes (unused entries) where necessary.
+	 */
+	while (offset < endoffset) {
+		/*
+		 * sfep is null when we reach the end of the list.
+		 */
+		if (sfep == NULL)
+			newoffset = endoffset;
+		else
+			newoffset = XFS_DIR2_SF_GET_OFFSET(sfep);
+		/*
+		 * There should be a hole here, make one.
+		 */
+		if (offset < newoffset) {
+			dup = (xfs_dir2_data_unused_t *)
+			      ((char *)block + offset);
+			INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+			INT_SET(dup->length, ARCH_CONVERT, newoffset - offset);
+			INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT,
+				(xfs_dir2_data_off_t)
+				((char *)dup - (char *)block));
+			xfs_dir2_data_log_unused(tp, bp, dup);
+			(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
+				dup, &dummy);
+			offset += INT_GET(dup->length, ARCH_CONVERT);
+			continue;
+		}
+		/*
+		 * Copy a real entry.
+		 */
+		dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
+		INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER(sfp,
+				XFS_DIR2_SF_INUMBERP(sfep)));
+		dep->namelen = sfep->namelen;
+		memcpy(dep->name, sfep->name, dep->namelen);
+		tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+		INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
+		xfs_dir2_data_log_entry(tp, bp, dep);
+		INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen));
+		INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp,
+						 (char *)dep - (char *)block));
+		offset = (int)((char *)(tagp + 1) - (char *)block);
+		if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT))
+			sfep = NULL;
+		else
+			sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
+	}
+	/* Done with the temporary buffer */
+	kmem_free(buf, buf_len);
+	/*
+	 * Sort the leaf entries by hash value.
+	 */
+	qsort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort);
+	/*
+	 * Log the leaf entry area and tail.
+	 * Already logged the header in data_init, ignore needlog.
+	 */
+	ASSERT(needscan == 0);
+	xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1);
+	xfs_dir2_block_log_tail(tp, bp);
+	xfs_dir2_data_check(dp, bp);
+	xfs_da_buf_done(bp);
+	return 0;
+}
diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h
new file mode 100644
index 0000000..5a578b8
--- /dev/null
+++ b/fs/xfs/xfs_dir2_block.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_BLOCK_H__
+#define	__XFS_DIR2_BLOCK_H__
+
+/*
+ * xfs_dir2_block.h
+ * Directory version 2, single block format structures
+ */
+
+struct uio;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_dir2_data_hdr;
+struct xfs_dir2_leaf_entry;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * The single block format is as follows:
+ * xfs_dir2_data_hdr_t structure
+ * xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures
+ * xfs_dir2_leaf_entry_t structures
+ * xfs_dir2_block_tail_t structure
+ */
+
+#define	XFS_DIR2_BLOCK_MAGIC	0x58443242	/* XD2B: for one block dirs */
+
+typedef struct xfs_dir2_block_tail {
+	__uint32_t	count;			/* count of leaf entries */
+	__uint32_t	stale;			/* count of stale lf entries */
+} xfs_dir2_block_tail_t;
+
+/*
+ * Generic single-block structure, for xfs_db.
+ */
+typedef struct xfs_dir2_block {
+	xfs_dir2_data_hdr_t	hdr;		/* magic XFS_DIR2_BLOCK_MAGIC */
+	xfs_dir2_data_union_t	u[1];
+	xfs_dir2_leaf_entry_t	leaf[1];
+	xfs_dir2_block_tail_t	tail;
+} xfs_dir2_block_t;
+
+/*
+ * Pointer to the leaf header embedded in a data block (1-block format)
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_TAIL_P)
+xfs_dir2_block_tail_t *
+xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block);
+#define	XFS_DIR2_BLOCK_TAIL_P(mp,block)	xfs_dir2_block_tail_p(mp,block)
+#else
+#define	XFS_DIR2_BLOCK_TAIL_P(mp,block)	\
+	(((xfs_dir2_block_tail_t *)((char *)(block) + (mp)->m_dirblksize)) - 1)
+#endif
+
+/*
+ * Pointer to the leaf entries embedded in a data block (1-block format)
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_LEAF_P)
+struct xfs_dir2_leaf_entry *xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp);
+#define	XFS_DIR2_BLOCK_LEAF_P(btp) \
+	xfs_dir2_block_leaf_p(btp)
+#else
+#define	XFS_DIR2_BLOCK_LEAF_P(btp)	\
+	(((struct xfs_dir2_leaf_entry *)(btp)) - INT_GET((btp)->count, ARCH_CONVERT))
+#endif
+
+/*
+ * Function declarations.
+ */
+
+extern int
+	xfs_dir2_block_addname(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_block_getdents(struct xfs_trans *tp, struct xfs_inode *dp,
+				struct uio *uio, int *eofp, struct xfs_dirent *dbp,
+				xfs_dir2_put_t put);
+
+extern int
+	xfs_dir2_block_lookup(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_block_removename(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_block_replace(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_leaf_to_block(struct xfs_da_args *args, struct xfs_dabuf *lbp,
+			       struct xfs_dabuf *dbp);
+
+extern int
+	xfs_dir2_sf_to_block(struct xfs_da_args *args);
+
+#endif	/* __XFS_DIR2_BLOCK_H__ */
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
new file mode 100644
index 0000000..db9887a
--- /dev/null
+++ b/fs/xfs/xfs_dir2_data.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_data.c
+ * Core data block handling routines for XFS V2 directories.
+ * See xfs_dir2_data.h for data structures.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_error.h"
+
+#ifdef DEBUG
+/*
+ * Check the consistency of the data block.
+ * The input can also be a block-format directory.
+ * Pop an assert if we find anything bad.
+ */
+void
+xfs_dir2_data_check(
+	xfs_inode_t		*dp,		/* incore inode pointer */
+	xfs_dabuf_t		*bp)		/* data block's buffer */
+{
+	xfs_dir2_dataptr_t	addr;		/* addr for leaf lookup */
+	xfs_dir2_data_free_t	*bf;		/* bestfree table */
+	xfs_dir2_block_tail_t	*btp=NULL;	/* block tail */
+	int			count;		/* count of entries found */
+	xfs_dir2_data_t		*d;		/* data block pointer */
+	xfs_dir2_data_entry_t	*dep;		/* data entry */
+	xfs_dir2_data_free_t	*dfp;		/* bestfree entry */
+	xfs_dir2_data_unused_t	*dup;		/* unused entry */
+	char			*endp;		/* end of useful data */
+	int			freeseen;	/* mask of bestfrees seen */
+	xfs_dahash_t		hash;		/* hash of current name */
+	int			i;		/* leaf index */
+	int			lastfree;	/* last entry was unused */
+	xfs_dir2_leaf_entry_t	*lep=NULL;	/* block leaf entries */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	char			*p;		/* current data position */
+	int			stale;		/* count of stale leaves */
+
+	mp = dp->i_mount;
+	d = bp->data;
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	bf = d->hdr.bestfree;
+	p = (char *)d->u;
+	if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) {
+		btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
+		lep = XFS_DIR2_BLOCK_LEAF_P(btp);
+		endp = (char *)lep;
+	} else
+		endp = (char *)d + mp->m_dirblksize;
+	count = lastfree = freeseen = 0;
+	/*
+	 * Account for zero bestfree entries.
+	 */
+	if (!bf[0].length) {
+		ASSERT(!bf[0].offset);
+		freeseen |= 1 << 0;
+	}
+	if (!bf[1].length) {
+		ASSERT(!bf[1].offset);
+		freeseen |= 1 << 1;
+	}
+	if (!bf[2].length) {
+		ASSERT(!bf[2].offset);
+		freeseen |= 1 << 2;
+	}
+	ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT));
+	ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT));
+	/*
+	 * Loop over the data/unused entries.
+	 */
+	while (p < endp) {
+		dup = (xfs_dir2_data_unused_t *)p;
+		/*
+		 * If it's unused, look for the space in the bestfree table.
+		 * If we find it, account for that, else make sure it
+		 * doesn't need to be there.
+		 */
+		if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) {
+			ASSERT(lastfree == 0);
+			ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT) ==
+			       (char *)dup - (char *)d);
+			dfp = xfs_dir2_data_freefind(d, dup);
+			if (dfp) {
+				i = (int)(dfp - bf);
+				ASSERT((freeseen & (1 << i)) == 0);
+				freeseen |= 1 << i;
+			} else
+				ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT));
+			p += INT_GET(dup->length, ARCH_CONVERT);
+			lastfree = 1;
+			continue;
+		}
+		/*
+		 * It's a real entry.  Validate the fields.
+		 * If this is a block directory then make sure it's
+		 * in the leaf section of the block.
+		 * The linear search is crude but this is DEBUG code.
+		 */
+		dep = (xfs_dir2_data_entry_t *)p;
+		ASSERT(dep->namelen != 0);
+		ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0);
+		ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) ==
+		       (char *)dep - (char *)d);
+		count++;
+		lastfree = 0;
+		if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) {
+			addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+				(xfs_dir2_data_aoff_t)
+				((char *)dep - (char *)d));
+			hash = xfs_da_hashname((char *)dep->name, dep->namelen);
+			for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) {
+				if (INT_GET(lep[i].address, ARCH_CONVERT) == addr &&
+				    INT_GET(lep[i].hashval, ARCH_CONVERT) == hash)
+					break;
+			}
+			ASSERT(i < INT_GET(btp->count, ARCH_CONVERT));
+		}
+		p += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
+	}
+	/*
+	 * Need to have seen all the entries and all the bestfree slots.
+	 */
+	ASSERT(freeseen == 7);
+	if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) {
+		for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) {
+			if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+				stale++;
+			if (i > 0)
+				ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT));
+		}
+		ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT));
+		ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT));
+	}
+}
+#endif
+
+/*
+ * Given a data block and an unused entry from that block,
+ * return the bestfree entry if any that corresponds to it.
+ */
+xfs_dir2_data_free_t *
+xfs_dir2_data_freefind(
+	xfs_dir2_data_t		*d,		/* data block */
+	xfs_dir2_data_unused_t	*dup)		/* data unused entry */
+{
+	xfs_dir2_data_free_t	*dfp;		/* bestfree entry */
+	xfs_dir2_data_aoff_t	off;		/* offset value needed */
+#if defined(DEBUG) && defined(__KERNEL__)
+	int			matched;	/* matched the value */
+	int			seenzero;	/* saw a 0 bestfree entry */
+#endif
+
+	off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d);
+#if defined(DEBUG) && defined(__KERNEL__)
+	/*
+	 * Validate some consistency in the bestfree table.
+	 * Check order, non-overlapping entries, and if we find the
+	 * one we're looking for it has to be exact.
+	 */
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0;
+	     dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
+	     dfp++) {
+		if (!dfp->offset) {
+			ASSERT(!dfp->length);
+			seenzero = 1;
+			continue;
+		}
+		ASSERT(seenzero == 0);
+		if (INT_GET(dfp->offset, ARCH_CONVERT) == off) {
+			matched = 1;
+			ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT));
+		} else if (off < INT_GET(dfp->offset, ARCH_CONVERT))
+			ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT));
+		else
+			ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off);
+		ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT));
+		if (dfp > &d->hdr.bestfree[0])
+			ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT));
+	}
+#endif
+	/*
+	 * If this is smaller than the smallest bestfree entry,
+	 * it can't be there since they're sorted.
+	 */
+	if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT))
+		return NULL;
+	/*
+	 * Look at the three bestfree entries for our guy.
+	 */
+	for (dfp = &d->hdr.bestfree[0];
+	     dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
+	     dfp++) {
+		if (!dfp->offset)
+			return NULL;
+		if (INT_GET(dfp->offset, ARCH_CONVERT) == off)
+			return dfp;
+	}
+	/*
+	 * Didn't find it.  This only happens if there are duplicate lengths.
+	 */
+	return NULL;
+}
+
+/*
+ * Insert an unused-space entry into the bestfree table.
+ */
+xfs_dir2_data_free_t *				/* entry inserted */
+xfs_dir2_data_freeinsert(
+	xfs_dir2_data_t		*d,		/* data block pointer */
+	xfs_dir2_data_unused_t	*dup,		/* unused space */
+	int			*loghead)	/* log the data header (out) */
+{
+	xfs_dir2_data_free_t	*dfp;		/* bestfree table pointer */
+	xfs_dir2_data_free_t	new;		/* new bestfree entry */
+
+#ifdef __KERNEL__
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+#endif
+	dfp = d->hdr.bestfree;
+	INT_COPY(new.length, dup->length, ARCH_CONVERT);
+	INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d));
+	/*
+	 * Insert at position 0, 1, or 2; or not at all.
+	 */
+	if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) {
+		dfp[2] = dfp[1];
+		dfp[1] = dfp[0];
+		dfp[0] = new;
+		*loghead = 1;
+		return &dfp[0];
+	}
+	if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) {
+		dfp[2] = dfp[1];
+		dfp[1] = new;
+		*loghead = 1;
+		return &dfp[1];
+	}
+	if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) {
+		dfp[2] = new;
+		*loghead = 1;
+		return &dfp[2];
+	}
+	return NULL;
+}
+
+/*
+ * Remove a bestfree entry from the table.
+ */
+void
+xfs_dir2_data_freeremove(
+	xfs_dir2_data_t		*d,		/* data block pointer */
+	xfs_dir2_data_free_t	*dfp,		/* bestfree entry pointer */
+	int			*loghead)	/* out: log data header */
+{
+#ifdef __KERNEL__
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+#endif
+	/*
+	 * It's the first entry, slide the next 2 up.
+	 */
+	if (dfp == &d->hdr.bestfree[0]) {
+		d->hdr.bestfree[0] = d->hdr.bestfree[1];
+		d->hdr.bestfree[1] = d->hdr.bestfree[2];
+	}
+	/*
+	 * It's the second entry, slide the 3rd entry up.
+	 */
+	else if (dfp == &d->hdr.bestfree[1])
+		d->hdr.bestfree[1] = d->hdr.bestfree[2];
+	/*
+	 * Must be the last entry.
+	 */
+	else
+		ASSERT(dfp == &d->hdr.bestfree[2]);
+	/*
+	 * Clear the 3rd entry, must be zero now.
+	 */
+	d->hdr.bestfree[2].length = 0;
+	d->hdr.bestfree[2].offset = 0;
+	*loghead = 1;
+}
+
+/*
+ * Given a data block, reconstruct its bestfree map.
+ */
+void
+xfs_dir2_data_freescan(
+	xfs_mount_t		*mp,		/* filesystem mount point */
+	xfs_dir2_data_t		*d,		/* data block pointer */
+	int			*loghead,	/* out: log data header */
+	char			*aendp)		/* in: caller's endp */
+{
+	xfs_dir2_block_tail_t	*btp;		/* block tail */
+	xfs_dir2_data_entry_t	*dep;		/* active data entry */
+	xfs_dir2_data_unused_t	*dup;		/* unused data entry */
+	char			*endp;		/* end of block's data */
+	char			*p;		/* current entry pointer */
+
+#ifdef __KERNEL__
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+#endif
+	/*
+	 * Start by clearing the table.
+	 */
+	memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree));
+	*loghead = 1;
+	/*
+	 * Set up pointers.
+	 */
+	p = (char *)d->u;
+	if (aendp)
+		endp = aendp;
+	else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) {
+		btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
+		endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
+	} else
+		endp = (char *)d + mp->m_dirblksize;
+	/*
+	 * Loop over the block's entries.
+	 */
+	while (p < endp) {
+		dup = (xfs_dir2_data_unused_t *)p;
+		/*
+		 * If it's a free entry, insert it.
+		 */
+		if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) {
+			ASSERT((char *)dup - (char *)d ==
+			       INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT));
+			xfs_dir2_data_freeinsert(d, dup, loghead);
+			p += INT_GET(dup->length, ARCH_CONVERT);
+		}
+		/*
+		 * For active entries, check their tags and skip them.
+		 */
+		else {
+			dep = (xfs_dir2_data_entry_t *)p;
+			ASSERT((char *)dep - (char *)d ==
+			       INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT));
+			p += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
+		}
+	}
+}
+
+/*
+ * Initialize a data block at the given block number in the directory.
+ * Give back the buffer for the created block.
+ */
+int						/* error */
+xfs_dir2_data_init(
+	xfs_da_args_t		*args,		/* directory operation args */
+	xfs_dir2_db_t		blkno,		/* logical dir block number */
+	xfs_dabuf_t		**bpp)		/* output block buffer */
+{
+	xfs_dabuf_t		*bp;		/* block buffer */
+	xfs_dir2_data_t		*d;		/* pointer to block */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
+	int			error;		/* error return value */
+	int			i;		/* bestfree index */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+	int                     t;              /* temp */
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	/*
+	 * Get the buffer set up for the block.
+	 */
+	error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, blkno), -1, &bp,
+		XFS_DATA_FORK);
+	if (error) {
+		return error;
+	}
+	ASSERT(bp != NULL);
+	/*
+	 * Initialize the header.
+	 */
+	d = bp->data;
+	INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC);
+	INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr));
+	for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
+		d->hdr.bestfree[i].length = 0;
+		d->hdr.bestfree[i].offset = 0;
+	}
+	/*
+	 * Set up an unused entry for the block's body.
+	 */
+	dup = &d->u[0].unused;
+	INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+
+	t=mp->m_dirblksize - (uint)sizeof(d->hdr);
+	INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t);
+	INT_SET(dup->length, ARCH_CONVERT, t);
+	INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT,
+		(xfs_dir2_data_off_t)((char *)dup - (char *)d));
+	/*
+	 * Log it and return it.
+	 */
+	xfs_dir2_data_log_header(tp, bp);
+	xfs_dir2_data_log_unused(tp, bp, dup);
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Log an active data entry from the block.
+ */
+void
+xfs_dir2_data_log_entry(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* block buffer */
+	xfs_dir2_data_entry_t	*dep)		/* data entry pointer */
+{
+	xfs_dir2_data_t		*d;		/* data block pointer */
+
+	d = bp->data;
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d),
+		(uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) -
+		       (char *)d - 1));
+}
+
+/*
+ * Log a data block header.
+ */
+void
+xfs_dir2_data_log_header(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp)		/* block buffer */
+{
+	xfs_dir2_data_t		*d;		/* data block pointer */
+
+	d = bp->data;
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d),
+		(uint)(sizeof(d->hdr) - 1));
+}
+
+/*
+ * Log a data unused entry.
+ */
+void
+xfs_dir2_data_log_unused(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* block buffer */
+	xfs_dir2_data_unused_t	*dup)		/* data unused pointer */
+{
+	xfs_dir2_data_t		*d;		/* data block pointer */
+
+	d = bp->data;
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	/*
+	 * Log the first part of the unused entry.
+	 */
+	xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d),
+		(uint)((char *)&dup->length + sizeof(dup->length) -
+		       1 - (char *)d));
+	/*
+	 * Log the end (tag) of the unused entry.
+	 */
+	xfs_da_log_buf(tp, bp,
+		(uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d),
+		(uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d +
+		       sizeof(xfs_dir2_data_off_t) - 1));
+}
+
+/*
+ * Make a byte range in the data block unused.
+ * Its current contents are unimportant.
+ */
+void
+xfs_dir2_data_make_free(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* block buffer */
+	xfs_dir2_data_aoff_t	offset,		/* starting byte offset */
+	xfs_dir2_data_aoff_t	len,		/* length in bytes */
+	int			*needlogp,	/* out: log header */
+	int			*needscanp)	/* out: regen bestfree */
+{
+	xfs_dir2_data_t		*d;		/* data block pointer */
+	xfs_dir2_data_free_t	*dfp;		/* bestfree pointer */
+	char			*endptr;	/* end of data area */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needscan;	/* need to regen bestfree */
+	xfs_dir2_data_unused_t	*newdup;	/* new unused entry */
+	xfs_dir2_data_unused_t	*postdup;	/* unused entry after us */
+	xfs_dir2_data_unused_t	*prevdup;	/* unused entry before us */
+
+	mp = tp->t_mountp;
+	d = bp->data;
+	/*
+	 * Figure out where the end of the data area is.
+	 */
+	if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC)
+		endptr = (char *)d + mp->m_dirblksize;
+	else {
+		xfs_dir2_block_tail_t	*btp;	/* block tail */
+
+		ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+		btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
+		endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
+	}
+	/*
+	 * If this isn't the start of the block, then back up to
+	 * the previous entry and see if it's free.
+	 */
+	if (offset > sizeof(d->hdr)) {
+		xfs_dir2_data_off_t	*tagp;	/* tag just before us */
+
+		tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1;
+		prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT));
+		if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG)
+			prevdup = NULL;
+	} else
+		prevdup = NULL;
+	/*
+	 * If this isn't the end of the block, see if the entry after
+	 * us is free.
+	 */
+	if ((char *)d + offset + len < endptr) {
+		postdup =
+			(xfs_dir2_data_unused_t *)((char *)d + offset + len);
+		if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG)
+			postdup = NULL;
+	} else
+		postdup = NULL;
+	ASSERT(*needscanp == 0);
+	needscan = 0;
+	/*
+	 * Previous and following entries are both free,
+	 * merge everything into a single free entry.
+	 */
+	if (prevdup && postdup) {
+		xfs_dir2_data_free_t	*dfp2;	/* another bestfree pointer */
+
+		/*
+		 * See if prevdup and/or postdup are in bestfree table.
+		 */
+		dfp = xfs_dir2_data_freefind(d, prevdup);
+		dfp2 = xfs_dir2_data_freefind(d, postdup);
+		/*
+		 * We need a rescan unless there are exactly 2 free entries
+		 * namely our two.  Then we know what's happening, otherwise
+		 * since the third bestfree is there, there might be more
+		 * entries.
+		 */
+		needscan = d->hdr.bestfree[2].length;
+		/*
+		 * Fix up the new big freespace.
+		 */
+		INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT));
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)prevdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, prevdup);
+		if (!needscan) {
+			/*
+			 * Has to be the case that entries 0 and 1 are
+			 * dfp and dfp2 (don't know which is which), and
+			 * entry 2 is empty.
+			 * Remove entry 1 first then entry 0.
+			 */
+			ASSERT(dfp && dfp2);
+			if (dfp == &d->hdr.bestfree[1]) {
+				dfp = &d->hdr.bestfree[0];
+				ASSERT(dfp2 == dfp);
+				dfp2 = &d->hdr.bestfree[1];
+			}
+			xfs_dir2_data_freeremove(d, dfp2, needlogp);
+			xfs_dir2_data_freeremove(d, dfp, needlogp);
+			/*
+			 * Now insert the new entry.
+			 */
+			dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp);
+			ASSERT(dfp == &d->hdr.bestfree[0]);
+			ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT));
+			ASSERT(!dfp[1].length);
+			ASSERT(!dfp[2].length);
+		}
+	}
+	/*
+	 * The entry before us is free, merge with it.
+	 */
+	else if (prevdup) {
+		dfp = xfs_dir2_data_freefind(d, prevdup);
+		INT_MOD(prevdup->length, ARCH_CONVERT, len);
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)prevdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, prevdup);
+		/*
+		 * If the previous entry was in the table, the new entry
+		 * is longer, so it will be in the table too.  Remove
+		 * the old one and add the new one.
+		 */
+		if (dfp) {
+			xfs_dir2_data_freeremove(d, dfp, needlogp);
+			(void)xfs_dir2_data_freeinsert(d, prevdup, needlogp);
+		}
+		/*
+		 * Otherwise we need a scan if the new entry is big enough.
+		 */
+		else
+			needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT);
+	}
+	/*
+	 * The following entry is free, merge with it.
+	 */
+	else if (postdup) {
+		dfp = xfs_dir2_data_freefind(d, postdup);
+		newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
+		INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+		INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT));
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup);
+		/*
+		 * If the following entry was in the table, the new entry
+		 * is longer, so it will be in the table too.  Remove
+		 * the old one and add the new one.
+		 */
+		if (dfp) {
+			xfs_dir2_data_freeremove(d, dfp, needlogp);
+			(void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
+		}
+		/*
+		 * Otherwise we need a scan if the new entry is big enough.
+		 */
+		else
+			needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT);
+	}
+	/*
+	 * Neither neighbor is free.  Make a new entry.
+	 */
+	else {
+		newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
+		INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+		INT_SET(newdup->length, ARCH_CONVERT, len);
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup);
+		(void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
+	}
+	*needscanp = needscan;
+}
+
+/*
+ * Take a byte range out of an existing unused space and make it un-free.
+ */
+void
+xfs_dir2_data_use_free(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* data block buffer */
+	xfs_dir2_data_unused_t	*dup,		/* unused entry */
+	xfs_dir2_data_aoff_t	offset,		/* starting offset to use */
+	xfs_dir2_data_aoff_t	len,		/* length to use */
+	int			*needlogp,	/* out: need to log header */
+	int			*needscanp)	/* out: need regen bestfree */
+{
+	xfs_dir2_data_t		*d;		/* data block */
+	xfs_dir2_data_free_t	*dfp;		/* bestfree pointer */
+	int			matchback;	/* matches end of freespace */
+	int			matchfront;	/* matches start of freespace */
+	int			needscan;	/* need to regen bestfree */
+	xfs_dir2_data_unused_t	*newdup;	/* new unused entry */
+	xfs_dir2_data_unused_t	*newdup2;	/* another new unused entry */
+	int			oldlen;		/* old unused entry's length */
+
+	d = bp->data;
+	ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ||
+	       INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC);
+	ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG);
+	ASSERT(offset >= (char *)dup - (char *)d);
+	ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d);
+	ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT));
+	/*
+	 * Look up the entry in the bestfree table.
+	 */
+	dfp = xfs_dir2_data_freefind(d, dup);
+	oldlen = INT_GET(dup->length, ARCH_CONVERT);
+	ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT));
+	/*
+	 * Check for alignment with front and back of the entry.
+	 */
+	matchfront = (char *)dup - (char *)d == offset;
+	matchback = (char *)dup + oldlen - (char *)d == offset + len;
+	ASSERT(*needscanp == 0);
+	needscan = 0;
+	/*
+	 * If we matched it exactly we just need to get rid of it from
+	 * the bestfree table.
+	 */
+	if (matchfront && matchback) {
+		if (dfp) {
+			needscan = d->hdr.bestfree[2].offset;
+			if (!needscan)
+				xfs_dir2_data_freeremove(d, dfp, needlogp);
+		}
+	}
+	/*
+	 * We match the first part of the entry.
+	 * Make a new entry with the remaining freespace.
+	 */
+	else if (matchfront) {
+		newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
+		INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+		INT_SET(newdup->length, ARCH_CONVERT, oldlen - len);
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup);
+		/*
+		 * If it was in the table, remove it and add the new one.
+		 */
+		if (dfp) {
+			xfs_dir2_data_freeremove(d, dfp, needlogp);
+			dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
+			ASSERT(dfp != NULL);
+			ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT));
+			ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d);
+			/*
+			 * If we got inserted at the last slot,
+			 * that means we don't know if there was a better
+			 * choice for the last slot, or not.  Rescan.
+			 */
+			needscan = dfp == &d->hdr.bestfree[2];
+		}
+	}
+	/*
+	 * We match the last part of the entry.
+	 * Trim the allocated space off the tail of the entry.
+	 */
+	else if (matchback) {
+		newdup = dup;
+		INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t)
+			(((char *)d + offset) - (char *)newdup));
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup);
+		/*
+		 * If it was in the table, remove it and add the new one.
+		 */
+		if (dfp) {
+			xfs_dir2_data_freeremove(d, dfp, needlogp);
+			dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
+			ASSERT(dfp != NULL);
+			ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT));
+			ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d);
+			/*
+			 * If we got inserted at the last slot,
+			 * that means we don't know if there was a better
+			 * choice for the last slot, or not.  Rescan.
+			 */
+			needscan = dfp == &d->hdr.bestfree[2];
+		}
+	}
+	/*
+	 * Poking out the middle of an entry.
+	 * Make two new entries.
+	 */
+	else {
+		newdup = dup;
+		INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t)
+			(((char *)d + offset) - (char *)newdup));
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup);
+		newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
+		INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG);
+		INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT));
+		INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup2), ARCH_CONVERT,
+			(xfs_dir2_data_off_t)((char *)newdup2 - (char *)d));
+		xfs_dir2_data_log_unused(tp, bp, newdup2);
+		/*
+		 * If the old entry was in the table, we need to scan
+		 * if the 3rd entry was valid, since these entries
+		 * are smaller than the old one.
+		 * If we don't need to scan that means there were 1 or 2
+		 * entries in the table, and removing the old and adding
+		 * the 2 new will work.
+		 */
+		if (dfp) {
+			needscan = d->hdr.bestfree[2].length;
+			if (!needscan) {
+				xfs_dir2_data_freeremove(d, dfp, needlogp);
+				(void)xfs_dir2_data_freeinsert(d, newdup,
+					needlogp);
+				(void)xfs_dir2_data_freeinsert(d, newdup2,
+					needlogp);
+			}
+		}
+	}
+	*needscanp = needscan;
+}
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
new file mode 100644
index 0000000..3f02294
--- /dev/null
+++ b/fs/xfs/xfs_dir2_data.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_DATA_H__
+#define	__XFS_DIR2_DATA_H__
+
+/*
+ * Directory format 2, data block structures.
+ */
+
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_inode;
+struct xfs_trans;
+
+/*
+ * Constants.
+ */
+#define	XFS_DIR2_DATA_MAGIC	0x58443244	/* XD2D: for multiblock dirs */
+#define	XFS_DIR2_DATA_ALIGN_LOG	3		/* i.e., 8 bytes */
+#define	XFS_DIR2_DATA_ALIGN	(1 << XFS_DIR2_DATA_ALIGN_LOG)
+#define	XFS_DIR2_DATA_FREE_TAG	0xffff
+#define	XFS_DIR2_DATA_FD_COUNT	3
+
+/*
+ * Directory address space divided into sections,
+ * spaces separated by 32gb.
+ */
+#define	XFS_DIR2_SPACE_SIZE	(1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
+#define	XFS_DIR2_DATA_SPACE	0
+#define	XFS_DIR2_DATA_OFFSET	(XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
+#define	XFS_DIR2_DATA_FIRSTDB(mp)	\
+	XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATA_OFFSET)
+
+/*
+ * Offsets of . and .. in data space (always block 0)
+ */
+#define	XFS_DIR2_DATA_DOT_OFFSET	\
+	((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t))
+#define	XFS_DIR2_DATA_DOTDOT_OFFSET	\
+	(XFS_DIR2_DATA_DOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(1))
+#define	XFS_DIR2_DATA_FIRST_OFFSET		\
+	(XFS_DIR2_DATA_DOTDOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(2))
+
+/*
+ * Structures.
+ */
+
+/*
+ * Describe a free area in the data block.
+ * The freespace will be formatted as a xfs_dir2_data_unused_t.
+ */
+typedef struct xfs_dir2_data_free {
+	xfs_dir2_data_off_t	offset;		/* start of freespace */
+	xfs_dir2_data_off_t	length;		/* length of freespace */
+} xfs_dir2_data_free_t;
+
+/*
+ * Header for the data blocks.
+ * Always at the beginning of a directory-sized block.
+ * The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
+ */
+typedef struct xfs_dir2_data_hdr {
+	__uint32_t		magic;		/* XFS_DIR2_DATA_MAGIC */
+						/* or XFS_DIR2_BLOCK_MAGIC */
+	xfs_dir2_data_free_t	bestfree[XFS_DIR2_DATA_FD_COUNT];
+} xfs_dir2_data_hdr_t;
+
+/*
+ * Active entry in a data block.  Aligned to 8 bytes.
+ * Tag appears as the last 2 bytes.
+ */
+typedef struct xfs_dir2_data_entry {
+	xfs_ino_t		inumber;	/* inode number */
+	__uint8_t		namelen;	/* name length */
+	__uint8_t		name[1];	/* name bytes, no null */
+						/* variable offset */
+	xfs_dir2_data_off_t	tag;		/* starting offset of us */
+} xfs_dir2_data_entry_t;
+
+/*
+ * Unused entry in a data block.  Aligned to 8 bytes.
+ * Tag appears as the last 2 bytes.
+ */
+typedef struct xfs_dir2_data_unused {
+	__uint16_t		freetag;	/* XFS_DIR2_DATA_FREE_TAG */
+	xfs_dir2_data_off_t	length;		/* total free length */
+						/* variable offset */
+	xfs_dir2_data_off_t	tag;		/* starting offset of us */
+} xfs_dir2_data_unused_t;
+
+typedef union {
+	xfs_dir2_data_entry_t	entry;
+	xfs_dir2_data_unused_t	unused;
+} xfs_dir2_data_union_t;
+
+/*
+ * Generic data block structure, for xfs_db.
+ */
+typedef struct xfs_dir2_data {
+	xfs_dir2_data_hdr_t	hdr;		/* magic XFS_DIR2_DATA_MAGIC */
+	xfs_dir2_data_union_t	u[1];
+} xfs_dir2_data_t;
+
+/*
+ * Macros.
+ */
+
+/*
+ * Size of a data entry.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTSIZE)
+int xfs_dir2_data_entsize(int n);
+#define XFS_DIR2_DATA_ENTSIZE(n)	xfs_dir2_data_entsize(n)
+#else
+#define	XFS_DIR2_DATA_ENTSIZE(n)	\
+	((int)(roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \
+		 (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN)))
+#endif
+
+/*
+ * Pointer to an entry's tag word.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTRY_TAG_P)
+xfs_dir2_data_off_t *xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep);
+#define	XFS_DIR2_DATA_ENTRY_TAG_P(dep)	xfs_dir2_data_entry_tag_p(dep)
+#else
+#define	XFS_DIR2_DATA_ENTRY_TAG_P(dep)	\
+	((xfs_dir2_data_off_t *)\
+	 ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \
+	  (uint)sizeof(xfs_dir2_data_off_t)))
+#endif
+
+/*
+ * Pointer to a freespace's tag word.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_UNUSED_TAG_P)
+xfs_dir2_data_off_t *xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup);
+#define	XFS_DIR2_DATA_UNUSED_TAG_P(dup) \
+	xfs_dir2_data_unused_tag_p(dup)
+#else
+#define	XFS_DIR2_DATA_UNUSED_TAG_P(dup)	\
+	((xfs_dir2_data_off_t *)\
+	 ((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \
+			- (uint)sizeof(xfs_dir2_data_off_t)))
+#endif
+
+/*
+ * Function declarations.
+ */
+
+#ifdef DEBUG
+extern void
+	xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
+#else
+#define	xfs_dir2_data_check(dp,bp)
+#endif
+
+extern xfs_dir2_data_free_t *
+	xfs_dir2_data_freefind(xfs_dir2_data_t *d,
+			       xfs_dir2_data_unused_t *dup);
+
+extern xfs_dir2_data_free_t *
+	xfs_dir2_data_freeinsert(xfs_dir2_data_t *d,
+				 xfs_dir2_data_unused_t *dup, int *loghead);
+
+extern void
+	xfs_dir2_data_freeremove(xfs_dir2_data_t *d,
+				 xfs_dir2_data_free_t *dfp, int *loghead);
+
+extern void
+	xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d,
+			       int *loghead, char *aendp);
+
+extern int
+	xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
+			   struct xfs_dabuf **bpp);
+
+extern void
+	xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
+				xfs_dir2_data_entry_t *dep);
+
+extern void
+	xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp);
+
+extern void
+	xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
+				 xfs_dir2_data_unused_t *dup);
+
+extern void
+	xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+				xfs_dir2_data_aoff_t offset,
+				xfs_dir2_data_aoff_t len, int *needlogp,
+				int *needscanp);
+
+extern void
+	xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+			       xfs_dir2_data_unused_t *dup,
+			       xfs_dir2_data_aoff_t offset,
+			       xfs_dir2_data_aoff_t len, int *needlogp,
+			       int *needscanp);
+
+#endif	/* __XFS_DIR2_DATA_H__ */
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
new file mode 100644
index 0000000..262d1e8
--- /dev/null
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -0,0 +1,1896 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_leaf.c
+ * XFS directory version 2 implementation - single leaf form
+ * see xfs_dir2_leaf.h for data structures.
+ * These directories have multiple XFS_DIR2_DATA blocks and one
+ * XFS_DIR2_LEAF1 block containing the hash table and freespace map.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_node.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+
+/*
+ * Local function declarations.
+ */
+#ifdef DEBUG
+static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp);
+#else
+#define	xfs_dir2_leaf_check(dp, bp)
+#endif
+static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp,
+				    int *indexp, xfs_dabuf_t **dbpp);
+
+/*
+ * Convert a block form directory to a leaf form directory.
+ */
+int						/* error */
+xfs_dir2_block_to_leaf(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*dbp)		/* input block's buffer */
+{
+	xfs_dir2_data_off_t	*bestsp;	/* leaf's bestsp entries */
+	xfs_dablk_t		blkno;		/* leaf block's bno */
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_leaf_entry_t	*blp;		/* block's leaf entries */
+	xfs_dir2_block_tail_t	*btp;		/* block's tail */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	xfs_dabuf_t		*lbp;		/* leaf block's buffer */
+	xfs_dir2_db_t		ldb;		/* leaf block's bno */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf's tail */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log block header */
+	int			needscan;	/* need to rescan bestfree */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_b("block_to_leaf", args, dbp);
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	/*
+	 * Add the leaf block to the inode.
+	 * This interface will only put blocks in the leaf/node range.
+	 * Since that's empty now, we'll get the root (block 0 in range).
+	 */
+	if ((error = xfs_da_grow_inode(args, &blkno))) {
+		return error;
+	}
+	ldb = XFS_DIR2_DA_TO_DB(mp, blkno);
+	ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp));
+	/*
+	 * Initialize the leaf block, get a buffer for it.
+	 */
+	if ((error = xfs_dir2_leaf_init(args, ldb, &lbp, XFS_DIR2_LEAF1_MAGIC))) {
+		return error;
+	}
+	ASSERT(lbp != NULL);
+	leaf = lbp->data;
+	block = dbp->data;
+	xfs_dir2_data_check(dp, dbp);
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+	/*
+	 * Set the counts in the leaf header.
+	 */
+	INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */
+	INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */
+	/*
+	 * Could compact these but I think we always do the conversion
+	 * after squeezing out stale entries.
+	 */
+	memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t));
+	xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1);
+	needscan = 0;
+	needlog = 1;
+	/*
+	 * Make the space formerly occupied by the leaf entries and block
+	 * tail be free.
+	 */
+	xfs_dir2_data_make_free(tp, dbp,
+		(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
+		(xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize -
+				       (char *)blp),
+		&needlog, &needscan);
+	/*
+	 * Fix up the block header, make it a data block.
+	 */
+	INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC);
+	if (needscan)
+		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog,
+			NULL);
+	/*
+	 * Set up leaf tail and bests table.
+	 */
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	INT_SET(ltp->bestcount, ARCH_CONVERT, 1);
+	bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
+	INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT);
+	/*
+	 * Log the data header and leaf bests table.
+	 */
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	xfs_dir2_leaf_check(dp, lbp);
+	xfs_dir2_data_check(dp, dbp);
+	xfs_dir2_leaf_log_bests(tp, lbp, 0, 0);
+	xfs_da_buf_done(lbp);
+	return 0;
+}
+
+/*
+ * Add an entry to a leaf form directory.
+ */
+int						/* error */
+xfs_dir2_leaf_addname(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_dir2_data_off_t	*bestsp;	/* freespace table in leaf */
+	int			compact;	/* need to compact leaves */
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data block entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* data unused entry */
+	int			error;		/* error return value */
+	int			grown;		/* allocated new data block */
+	int			highstale;	/* index of next stale leaf */
+	int			i;		/* temporary, index */
+	int			index;		/* leaf table position */
+	xfs_dabuf_t		*lbp;		/* leaf's buffer */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	int			length;		/* length of new entry */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry table pointer */
+	int			lfloglow;	/* low leaf logging index */
+	int			lfloghigh;	/* high leaf logging index */
+	int			lowstale;	/* index of prev stale leaf */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail pointer */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needbytes;	/* leaf block bytes needed */
+	int			needlog;	/* need to log data header */
+	int			needscan;	/* need to rescan data free */
+	xfs_dir2_data_off_t	*tagp;		/* end of data entry */
+	xfs_trans_t		*tp;		/* transaction pointer */
+	xfs_dir2_db_t		use_block;	/* data block number */
+
+	xfs_dir2_trace_args("leaf_addname", args);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	/*
+	 * Read the leaf block.
+	 */
+	error = xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp,
+		XFS_DATA_FORK);
+	if (error) {
+		return error;
+	}
+	ASSERT(lbp != NULL);
+	/*
+	 * Look up the entry by hash value and name.
+	 * We know it's not there, our caller has already done a lookup.
+	 * So the index is of the entry to insert in front of.
+	 * But if there are dup hash values the index is of the first of those.
+	 */
+	index = xfs_dir2_leaf_search_hash(args, lbp);
+	leaf = lbp->data;
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
+	length = XFS_DIR2_DATA_ENTSIZE(args->namelen);
+	/*
+	 * See if there are any entries with the same hash value
+	 * and space in their block for the new entry.
+	 * This is good because it puts multiple same-hash value entries
+	 * in a data block, improving the lookup of those entries.
+	 */
+	for (use_block = -1, lep = &leaf->ents[index];
+	     index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval;
+	     index++, lep++) {
+		if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT));
+		ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT));
+		ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF);
+		if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) {
+			use_block = i;
+			break;
+		}
+	}
+	/*
+	 * Didn't find a block yet, linear search all the data blocks.
+	 */
+	if (use_block == -1) {
+		for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) {
+			/*
+			 * Remember a block we see that's missing.
+			 */
+			if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1)
+				use_block = i;
+			else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) {
+				use_block = i;
+				break;
+			}
+		}
+	}
+	/*
+	 * How many bytes do we need in the leaf block?
+	 */
+	needbytes =
+		(leaf->hdr.stale ? 0 : (uint)sizeof(leaf->ents[0])) +
+		(use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0]));
+	/*
+	 * Now kill use_block if it refers to a missing block, so we
+	 * can use it as an indication of allocation needed.
+	 */
+	if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF)
+		use_block = -1;
+	/*
+	 * If we don't have enough free bytes but we can make enough
+	 * by compacting out stale entries, we'll do that.
+	 */
+	if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes &&
+	    INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) {
+		compact = 1;
+	}
+	/*
+	 * Otherwise if we don't have enough free bytes we need to
+	 * convert to node form.
+	 */
+	else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <
+		 needbytes) {
+		/*
+		 * Just checking or no space reservation, give up.
+		 */
+		if (args->justcheck || args->total == 0) {
+			xfs_da_brelse(tp, lbp);
+			return XFS_ERROR(ENOSPC);
+		}
+		/*
+		 * Convert to node form.
+		 */
+		error = xfs_dir2_leaf_to_node(args, lbp);
+		xfs_da_buf_done(lbp);
+		if (error)
+			return error;
+		/*
+		 * Then add the new entry.
+		 */
+		return xfs_dir2_node_addname(args);
+	}
+	/*
+	 * Otherwise it will fit without compaction.
+	 */
+	else
+		compact = 0;
+	/*
+	 * If just checking, then it will fit unless we needed to allocate
+	 * a new data block.
+	 */
+	if (args->justcheck) {
+		xfs_da_brelse(tp, lbp);
+		return use_block == -1 ? XFS_ERROR(ENOSPC) : 0;
+	}
+	/*
+	 * If no allocations are allowed, return now before we've
+	 * changed anything.
+	 */
+	if (args->total == 0 && use_block == -1) {
+		xfs_da_brelse(tp, lbp);
+		return XFS_ERROR(ENOSPC);
+	}
+	/*
+	 * Need to compact the leaf entries, removing stale ones.
+	 * Leave one stale entry behind - the one closest to our
+	 * insertion index - and we'll shift that one to our insertion
+	 * point later.
+	 */
+	if (compact) {
+		xfs_dir2_leaf_compact_x1(lbp, &index, &lowstale, &highstale,
+			&lfloglow, &lfloghigh);
+	}
+	/*
+	 * There are stale entries, so we'll need log-low and log-high
+	 * impossibly bad values later.
+	 */
+	else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) {
+		lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		lfloghigh = -1;
+	}
+	/*
+	 * If there was no data block space found, we need to allocate
+	 * a new one.
+	 */
+	if (use_block == -1) {
+		/*
+		 * Add the new data block.
+		 */
+		if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE,
+				&use_block))) {
+			xfs_da_brelse(tp, lbp);
+			return error;
+		}
+		/*
+		 * Initialize the block.
+		 */
+		if ((error = xfs_dir2_data_init(args, use_block, &dbp))) {
+			xfs_da_brelse(tp, lbp);
+			return error;
+		}
+		/*
+		 * If we're adding a new data block on the end we need to
+		 * extend the bests table.  Copy it up one entry.
+		 */
+		if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) {
+			bestsp--;
+			memmove(&bestsp[0], &bestsp[1],
+				INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0]));
+			INT_MOD(ltp->bestcount, ARCH_CONVERT, +1);
+			xfs_dir2_leaf_log_tail(tp, lbp);
+			xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
+		}
+		/*
+		 * If we're filling in a previously empty block just log it.
+		 */
+		else
+			xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
+		data = dbp->data;
+		INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT);
+		grown = 1;
+	}
+	/*
+	 * Already had space in some data block.
+	 * Just read that one in.
+	 */
+	else {
+		if ((error =
+		    xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, use_block),
+			    -1, &dbp, XFS_DATA_FORK))) {
+			xfs_da_brelse(tp, lbp);
+			return error;
+		}
+		data = dbp->data;
+		grown = 0;
+	}
+	xfs_dir2_data_check(dp, dbp);
+	/*
+	 * Point to the biggest freespace in our data block.
+	 */
+	dup = (xfs_dir2_data_unused_t *)
+	      ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT));
+	ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length);
+	needscan = needlog = 0;
+	/*
+	 * Mark the initial part of our freespace in use for the new entry.
+	 */
+	xfs_dir2_data_use_free(tp, dbp, dup,
+		(xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
+		&needlog, &needscan);
+	/*
+	 * Initialize our new entry (at last).
+	 */
+	dep = (xfs_dir2_data_entry_t *)dup;
+	INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+	dep->namelen = args->namelen;
+	memcpy(dep->name, args->name, dep->namelen);
+	tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+	INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data));
+	/*
+	 * Need to scan fix up the bestfree table.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, data, &needlog, NULL);
+	/*
+	 * Need to log the data block's header.
+	 */
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	xfs_dir2_data_log_entry(tp, dbp, dep);
+	/*
+	 * If the bests table needs to be changed, do it.
+	 * Log the change unless we've already done that.
+	 */
+	if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) {
+		INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT);
+		if (!grown)
+			xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
+	}
+	/*
+	 * Now we need to make room to insert the leaf entry.
+	 * If there are no stale entries, we just insert a hole at index.
+	 */
+	if (!leaf->hdr.stale) {
+		/*
+		 * lep is still good as the index leaf entry.
+		 */
+		if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+			memmove(lep + 1, lep,
+				(INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep));
+		/*
+		 * Record low and high logging indices for the leaf.
+		 */
+		lfloglow = index;
+		lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1);
+	}
+	/*
+	 * There are stale entries.
+	 * We will use one of them for the new entry.
+	 * It's probably not at the right location, so we'll have to
+	 * shift some up or down first.
+	 */
+	else {
+		/*
+		 * If we didn't compact before, we need to find the nearest
+		 * stale entries before and after our insertion point.
+		 */
+		if (compact == 0) {
+			/*
+			 * Find the first stale entry before the insertion
+			 * point, if any.
+			 */
+			for (lowstale = index - 1;
+			     lowstale >= 0 &&
+				INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) !=
+				XFS_DIR2_NULL_DATAPTR;
+			     lowstale--)
+				continue;
+			/*
+			 * Find the next stale entry at or after the insertion
+			 * point, if any.   Stop if we go so far that the
+			 * lowstale entry would be better.
+			 */
+			for (highstale = index;
+			     highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) &&
+				INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) !=
+				XFS_DIR2_NULL_DATAPTR &&
+				(lowstale < 0 ||
+				 index - lowstale - 1 >= highstale - index);
+			     highstale++)
+				continue;
+		}
+		/*
+		 * If the low one is better, use it.
+		 */
+		if (lowstale >= 0 &&
+		    (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) ||
+		     index - lowstale - 1 < highstale - index)) {
+			ASSERT(index - lowstale - 1 >= 0);
+			ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) ==
+			       XFS_DIR2_NULL_DATAPTR);
+			/*
+			 * Copy entries up to cover the stale entry
+			 * and make room for the new entry.
+			 */
+			if (index - lowstale - 1 > 0)
+				memmove(&leaf->ents[lowstale],
+					&leaf->ents[lowstale + 1],
+					(index - lowstale - 1) * sizeof(*lep));
+			lep = &leaf->ents[index - 1];
+			lfloglow = MIN(lowstale, lfloglow);
+			lfloghigh = MAX(index - 1, lfloghigh);
+		}
+		/*
+		 * The high one is better, so use that one.
+		 */
+		else {
+			ASSERT(highstale - index >= 0);
+			ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) ==
+			       XFS_DIR2_NULL_DATAPTR);
+			/*
+			 * Copy entries down to copver the stale entry
+			 * and make room for the new entry.
+			 */
+			if (highstale - index > 0)
+				memmove(&leaf->ents[index + 1],
+					&leaf->ents[index],
+					(highstale - index) * sizeof(*lep));
+			lep = &leaf->ents[index];
+			lfloglow = MIN(index, lfloglow);
+			lfloghigh = MAX(highstale, lfloghigh);
+		}
+		INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1);
+	}
+	/*
+	 * Fill in the new leaf entry.
+	 */
+	INT_SET(lep->hashval, ARCH_CONVERT, args->hashval);
+	INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT)));
+	/*
+	 * Log the leaf fields and give up the buffers.
+	 */
+	xfs_dir2_leaf_log_header(tp, lbp);
+	xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh);
+	xfs_dir2_leaf_check(dp, lbp);
+	xfs_da_buf_done(lbp);
+	xfs_dir2_data_check(dp, dbp);
+	xfs_da_buf_done(dbp);
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check the internal consistency of a leaf1 block.
+ * Pop an assert if something is wrong.
+ */
+void
+xfs_dir2_leaf_check(
+	xfs_inode_t		*dp,		/* incore directory inode */
+	xfs_dabuf_t		*bp)		/* leaf's buffer */
+{
+	int			i;		/* leaf index */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail pointer */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			stale;		/* count of stale leaves */
+
+	leaf = bp->data;
+	mp = dp->i_mount;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC);
+	/*
+	 * This value is not restrictive enough.
+	 * Should factor in the size of the bests table as well.
+	 * We can deduce a value for that from di_size.
+	 */
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp));
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	/*
+	 * Leaves and bests don't overlap.
+	 */
+	ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <=
+	       (char *)XFS_DIR2_LEAF_BESTS_P(ltp));
+	/*
+	 * Check hash value order, count stale entries.
+	 */
+	for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) {
+		if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+			ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <=
+			       INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT));
+		if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			stale++;
+	}
+	ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale);
+}
+#endif	/* DEBUG */
+
+/*
+ * Compact out any stale entries in the leaf.
+ * Log the header and changed leaf entries, if any.
+ */
+void
+xfs_dir2_leaf_compact(
+	xfs_da_args_t	*args,		/* operation arguments */
+	xfs_dabuf_t	*bp)		/* leaf buffer */
+{
+	int		from;		/* source leaf index */
+	xfs_dir2_leaf_t	*leaf;		/* leaf structure */
+	int		loglow;		/* first leaf entry to log */
+	int		to;		/* target leaf index */
+
+	leaf = bp->data;
+	if (!leaf->hdr.stale) {
+		return;
+	}
+	/*
+	 * Compress out the stale entries in place.
+	 */
+	for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) {
+		if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		/*
+		 * Only actually copy the entries that are different.
+		 */
+		if (from > to) {
+			if (loglow == -1)
+				loglow = to;
+			leaf->ents[to] = leaf->ents[from];
+		}
+		to++;
+	}
+	/*
+	 * Update and log the header, log the leaf entries.
+	 */
+	ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to);
+	INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT)));
+	leaf->hdr.stale = 0;
+	xfs_dir2_leaf_log_header(args->trans, bp);
+	if (loglow != -1)
+		xfs_dir2_leaf_log_ents(args->trans, bp, loglow, to - 1);
+}
+
+/*
+ * Compact the leaf entries, removing stale ones.
+ * Leave one stale entry behind - the one closest to our
+ * insertion index - and the caller will shift that one to our insertion
+ * point later.
+ * Return new insertion index, where the remaining stale entry is,
+ * and leaf logging indices.
+ */
+void
+xfs_dir2_leaf_compact_x1(
+	xfs_dabuf_t	*bp,		/* leaf buffer */
+	int		*indexp,	/* insertion index */
+	int		*lowstalep,	/* out: stale entry before us */
+	int		*highstalep,	/* out: stale entry after us */
+	int		*lowlogp,	/* out: low log index */
+	int		*highlogp)	/* out: high log index */
+{
+	int		from;		/* source copy index */
+	int		highstale;	/* stale entry at/after index */
+	int		index;		/* insertion index */
+	int		keepstale;	/* source index of kept stale */
+	xfs_dir2_leaf_t	*leaf;		/* leaf structure */
+	int		lowstale;	/* stale entry before index */
+	int		newindex=0;	/* new insertion index */
+	int		to;		/* destination copy index */
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1);
+	index = *indexp;
+	/*
+	 * Find the first stale entry before our index, if any.
+	 */
+	for (lowstale = index - 1;
+	     lowstale >= 0 &&
+		INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR;
+	     lowstale--)
+		continue;
+	/*
+	 * Find the first stale entry at or after our index, if any.
+	 * Stop if the answer would be worse than lowstale.
+	 */
+	for (highstale = index;
+	     highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) &&
+		INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR &&
+		(lowstale < 0 || index - lowstale > highstale - index);
+	     highstale++)
+		continue;
+	/*
+	 * Pick the better of lowstale and highstale.
+	 */
+	if (lowstale >= 0 &&
+	    (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) ||
+	     index - lowstale <= highstale - index))
+		keepstale = lowstale;
+	else
+		keepstale = highstale;
+	/*
+	 * Copy the entries in place, removing all the stale entries
+	 * except keepstale.
+	 */
+	for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) {
+		/*
+		 * Notice the new value of index.
+		 */
+		if (index == from)
+			newindex = to;
+		if (from != keepstale &&
+		    INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) {
+			if (from == to)
+				*lowlogp = to;
+			continue;
+		}
+		/*
+		 * Record the new keepstale value for the insertion.
+		 */
+		if (from == keepstale)
+			lowstale = highstale = to;
+		/*
+		 * Copy only the entries that have moved.
+		 */
+		if (from > to)
+			leaf->ents[to] = leaf->ents[from];
+		to++;
+	}
+	ASSERT(from > to);
+	/*
+	 * If the insertion point was past the last entry,
+	 * set the new insertion point accordingly.
+	 */
+	if (index == from)
+		newindex = to;
+	*indexp = newindex;
+	/*
+	 * Adjust the leaf header values.
+	 */
+	INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to));
+	INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1);
+	/*
+	 * Remember the low/high stale value only in the "right"
+	 * direction.
+	 */
+	if (lowstale >= newindex)
+		lowstale = -1;
+	else
+		highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	*highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1;
+	*lowstalep = lowstale;
+	*highstalep = highstale;
+}
+
+/*
+ * Getdents (readdir) for leaf and node directories.
+ * This reads the data blocks only, so is the same for both forms.
+ */
+int						/* error */
+xfs_dir2_leaf_getdents(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_inode_t		*dp,		/* incore directory inode */
+	uio_t			*uio,		/* I/O control & vectors */
+	int			*eofp,		/* out: reached end of dir */
+	xfs_dirent_t		*dbp,		/* caller's buffer */
+	xfs_dir2_put_t		put)		/* ABI formatting routine */
+{
+	xfs_dabuf_t		*bp;		/* data block buffer */
+	int			byteoff;	/* offset in current block */
+	xfs_dir2_db_t		curdb;		/* db for current block */
+	xfs_dir2_off_t		curoff;		/* current overall offset */
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dir2_data_entry_t	*dep;		/* data entry */
+	xfs_dir2_data_unused_t	*dup;		/* unused entry */
+	int			eof;		/* reached end of directory */
+	int			error=0;		/* error return value */
+	int			i;		/* temporary loop index */
+	int			j;		/* temporary loop index */
+	int			length;		/* temporary length value */
+	xfs_bmbt_irec_t		*map;		/* map vector for blocks */
+	xfs_extlen_t		map_blocks;	/* number of fsbs in map */
+	xfs_dablk_t		map_off;	/* last mapped file offset */
+	int			map_size;	/* total entries in *map */
+	int			map_valid;	/* valid entries in *map */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_off_t		newoff;		/* new curoff after new blk */
+	int			nmap;		/* mappings to ask xfs_bmapi */
+	xfs_dir2_put_args_t	p;		/* formatting arg bundle */
+	char			*ptr=NULL;		/* pointer to current data */
+	int			ra_current;	/* number of read-ahead blks */
+	int			ra_index;	/* *map index for read-ahead */
+	int			ra_offset;	/* map entry offset for ra */
+	int			ra_want;	/* readahead count wanted */
+
+	/*
+	 * If the offset is at or past the largest allowed value,
+	 * give up right away, return eof.
+	 */
+	if (uio->uio_offset >= XFS_DIR2_MAX_DATAPTR) {
+		*eofp = 1;
+		return 0;
+	}
+	mp = dp->i_mount;
+	/*
+	 * Setup formatting arguments.
+	 */
+	p.dbp = dbp;
+	p.put = put;
+	p.uio = uio;
+	/*
+	 * Set up to bmap a number of blocks based on the caller's
+	 * buffer size, the directory block size, and the filesystem
+	 * block size.
+	 */
+	map_size =
+		howmany(uio->uio_resid + mp->m_dirblksize,
+			mp->m_sb.sb_blocksize);
+	map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP);
+	map_valid = ra_index = ra_offset = ra_current = map_blocks = 0;
+	bp = NULL;
+	eof = 1;
+	/*
+	 * Inside the loop we keep the main offset value as a byte offset
+	 * in the directory file.
+	 */
+	curoff = XFS_DIR2_DATAPTR_TO_BYTE(mp, uio->uio_offset);
+	/*
+	 * Force this conversion through db so we truncate the offset
+	 * down to get the start of the data block.
+	 */
+	map_off = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, curoff));
+	/*
+	 * Loop over directory entries until we reach the end offset.
+	 * Get more blocks and readahead as necessary.
+	 */
+	while (curoff < XFS_DIR2_LEAF_OFFSET) {
+		/*
+		 * If we have no buffer, or we're off the end of the
+		 * current buffer, need to get another one.
+		 */
+		if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) {
+			/*
+			 * If we have a buffer, we need to release it and
+			 * take it out of the mapping.
+			 */
+			if (bp) {
+				xfs_da_brelse(tp, bp);
+				bp = NULL;
+				map_blocks -= mp->m_dirblkfsbs;
+				/*
+				 * Loop to get rid of the extents for the
+				 * directory block.
+				 */
+				for (i = mp->m_dirblkfsbs; i > 0; ) {
+					j = MIN((int)map->br_blockcount, i);
+					map->br_blockcount -= j;
+					map->br_startblock += j;
+					map->br_startoff += j;
+					/*
+					 * If mapping is done, pitch it from
+					 * the table.
+					 */
+					if (!map->br_blockcount && --map_valid)
+						memmove(&map[0], &map[1],
+							sizeof(map[0]) *
+							map_valid);
+					i -= j;
+				}
+			}
+			/*
+			 * Recalculate the readahead blocks wanted.
+			 */
+			ra_want = howmany(uio->uio_resid + mp->m_dirblksize,
+					  mp->m_sb.sb_blocksize) - 1;
+			/*
+			 * If we don't have as many as we want, and we haven't
+			 * run out of data blocks, get some more mappings.
+			 */
+			if (1 + ra_want > map_blocks &&
+			    map_off <
+			    XFS_DIR2_BYTE_TO_DA(mp, XFS_DIR2_LEAF_OFFSET)) {
+				/*
+				 * Get more bmaps, fill in after the ones
+				 * we already have in the table.
+				 */
+				nmap = map_size - map_valid;
+				error = xfs_bmapi(tp, dp,
+					map_off,
+					XFS_DIR2_BYTE_TO_DA(mp,
+						XFS_DIR2_LEAF_OFFSET) - map_off,
+					XFS_BMAPI_METADATA, NULL, 0,
+					&map[map_valid], &nmap, NULL);
+				/*
+				 * Don't know if we should ignore this or
+				 * try to return an error.
+				 * The trouble with returning errors
+				 * is that readdir will just stop without
+				 * actually passing the error through.
+				 */
+				if (error)
+					break;	/* XXX */
+				/*
+				 * If we got all the mappings we asked for,
+				 * set the final map offset based on the
+				 * last bmap value received.
+				 * Otherwise, we've reached the end.
+				 */
+				if (nmap == map_size - map_valid)
+					map_off =
+					map[map_valid + nmap - 1].br_startoff +
+					map[map_valid + nmap - 1].br_blockcount;
+				else
+					map_off =
+						XFS_DIR2_BYTE_TO_DA(mp,
+							XFS_DIR2_LEAF_OFFSET);
+				/*
+				 * Look for holes in the mapping, and
+				 * eliminate them.  Count up the valid blocks.
+				 */
+				for (i = map_valid; i < map_valid + nmap; ) {
+					if (map[i].br_startblock ==
+					    HOLESTARTBLOCK) {
+						nmap--;
+						length = map_valid + nmap - i;
+						if (length)
+							memmove(&map[i],
+								&map[i + 1],
+								sizeof(map[i]) *
+								length);
+					} else {
+						map_blocks +=
+							map[i].br_blockcount;
+						i++;
+					}
+				}
+				map_valid += nmap;
+			}
+			/*
+			 * No valid mappings, so no more data blocks.
+			 */
+			if (!map_valid) {
+				curoff = XFS_DIR2_DA_TO_BYTE(mp, map_off);
+				break;
+			}
+			/*
+			 * Read the directory block starting at the first
+			 * mapping.
+			 */
+			curdb = XFS_DIR2_DA_TO_DB(mp, map->br_startoff);
+			error = xfs_da_read_buf(tp, dp, map->br_startoff,
+				map->br_blockcount >= mp->m_dirblkfsbs ?
+				    XFS_FSB_TO_DADDR(mp, map->br_startblock) :
+				    -1,
+				&bp, XFS_DATA_FORK);
+			/*
+			 * Should just skip over the data block instead
+			 * of giving up.
+			 */
+			if (error)
+				break;	/* XXX */
+			/*
+			 * Adjust the current amount of read-ahead: we just
+			 * read a block that was previously ra.
+			 */
+			if (ra_current)
+				ra_current -= mp->m_dirblkfsbs;
+			/*
+			 * Do we need more readahead?
+			 */
+			for (ra_index = ra_offset = i = 0;
+			     ra_want > ra_current && i < map_blocks;
+			     i += mp->m_dirblkfsbs) {
+				ASSERT(ra_index < map_valid);
+				/*
+				 * Read-ahead a contiguous directory block.
+				 */
+				if (i > ra_current &&
+				    map[ra_index].br_blockcount >=
+				    mp->m_dirblkfsbs) {
+					xfs_baread(mp->m_ddev_targp,
+						XFS_FSB_TO_DADDR(mp,
+						   map[ra_index].br_startblock +
+						   ra_offset),
+						(int)BTOBB(mp->m_dirblksize));
+					ra_current = i;
+				}
+				/*
+				 * Read-ahead a non-contiguous directory block.
+				 * This doesn't use our mapping, but this
+				 * is a very rare case.
+				 */
+				else if (i > ra_current) {
+					(void)xfs_da_reada_buf(tp, dp,
+						map[ra_index].br_startoff +
+						ra_offset, XFS_DATA_FORK);
+					ra_current = i;
+				}
+				/*
+				 * Advance offset through the mapping table.
+				 */
+				for (j = 0; j < mp->m_dirblkfsbs; j++) {
+					/*
+					 * The rest of this extent but not
+					 * more than a dir block.
+					 */
+					length = MIN(mp->m_dirblkfsbs,
+						(int)(map[ra_index].br_blockcount -
+						ra_offset));
+					j += length;
+					ra_offset += length;
+					/*
+					 * Advance to the next mapping if
+					 * this one is used up.
+					 */
+					if (ra_offset ==
+					    map[ra_index].br_blockcount) {
+						ra_offset = 0;
+						ra_index++;
+					}
+				}
+			}
+			/*
+			 * Having done a read, we need to set a new offset.
+			 */
+			newoff = XFS_DIR2_DB_OFF_TO_BYTE(mp, curdb, 0);
+			/*
+			 * Start of the current block.
+			 */
+			if (curoff < newoff)
+				curoff = newoff;
+			/*
+			 * Make sure we're in the right block.
+			 */
+			else if (curoff > newoff)
+				ASSERT(XFS_DIR2_BYTE_TO_DB(mp, curoff) ==
+				       curdb);
+			data = bp->data;
+			xfs_dir2_data_check(dp, bp);
+			/*
+			 * Find our position in the block.
+			 */
+			ptr = (char *)&data->u;
+			byteoff = XFS_DIR2_BYTE_TO_OFF(mp, curoff);
+			/*
+			 * Skip past the header.
+			 */
+			if (byteoff == 0)
+				curoff += (uint)sizeof(data->hdr);
+			/*
+			 * Skip past entries until we reach our offset.
+			 */
+			else {
+				while ((char *)ptr - (char *)data < byteoff) {
+					dup = (xfs_dir2_data_unused_t *)ptr;
+
+					if (INT_GET(dup->freetag, ARCH_CONVERT)
+						  == XFS_DIR2_DATA_FREE_TAG) {
+
+						length = INT_GET(dup->length,
+								 ARCH_CONVERT);
+						ptr += length;
+						continue;
+					}
+					dep = (xfs_dir2_data_entry_t *)ptr;
+					length =
+					   XFS_DIR2_DATA_ENTSIZE(dep->namelen);
+					ptr += length;
+				}
+				/*
+				 * Now set our real offset.
+				 */
+				curoff =
+					XFS_DIR2_DB_OFF_TO_BYTE(mp,
+					    XFS_DIR2_BYTE_TO_DB(mp, curoff),
+					    (char *)ptr - (char *)data);
+				if (ptr >= (char *)data + mp->m_dirblksize) {
+					continue;
+				}
+			}
+		}
+		/*
+		 * We have a pointer to an entry.
+		 * Is it a live one?
+		 */
+		dup = (xfs_dir2_data_unused_t *)ptr;
+		/*
+		 * No, it's unused, skip over it.
+		 */
+		if (INT_GET(dup->freetag, ARCH_CONVERT)
+						== XFS_DIR2_DATA_FREE_TAG) {
+			length = INT_GET(dup->length, ARCH_CONVERT);
+			ptr += length;
+			curoff += length;
+			continue;
+		}
+
+		/*
+		 * Copy the entry into the putargs, and try formatting it.
+		 */
+		dep = (xfs_dir2_data_entry_t *)ptr;
+
+		p.namelen = dep->namelen;
+
+		length = XFS_DIR2_DATA_ENTSIZE(p.namelen);
+
+		p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length);
+
+		p.ino = INT_GET(dep->inumber, ARCH_CONVERT);
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = (char *)dep->name;
+
+		error = p.put(&p);
+
+		/*
+		 * Won't fit.  Return to caller.
+		 */
+		if (!p.done) {
+			eof = 0;
+			break;
+		}
+		/*
+		 * Advance to next entry in the block.
+		 */
+		ptr += length;
+		curoff += length;
+	}
+
+	/*
+	 * All done.  Set output offset value to current offset.
+	 */
+	*eofp = eof;
+	if (curoff > XFS_DIR2_DATAPTR_TO_BYTE(mp, XFS_DIR2_MAX_DATAPTR))
+		uio->uio_offset = XFS_DIR2_MAX_DATAPTR;
+	else
+		uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff);
+	kmem_free(map, map_size * sizeof(*map));
+	if (bp)
+		xfs_da_brelse(tp, bp);
+	return error;
+}
+
+/*
+ * Initialize a new leaf block, leaf1 or leafn magic accepted.
+ */
+int
+xfs_dir2_leaf_init(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dir2_db_t		bno,		/* directory block number */
+	xfs_dabuf_t		**bpp,		/* out: leaf buffer */
+	int			magic)		/* magic number for block */
+{
+	xfs_dabuf_t		*bp;		/* leaf buffer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	ASSERT(dp != NULL);
+	tp = args->trans;
+	mp = dp->i_mount;
+	ASSERT(bno >= XFS_DIR2_LEAF_FIRSTDB(mp) &&
+	       bno < XFS_DIR2_FREE_FIRSTDB(mp));
+	/*
+	 * Get the buffer for the block.
+	 */
+	error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, bno), -1, &bp,
+		XFS_DATA_FORK);
+	if (error) {
+		return error;
+	}
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	/*
+	 * Initialize the header.
+	 */
+	INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic);
+	leaf->hdr.info.forw = 0;
+	leaf->hdr.info.back = 0;
+	leaf->hdr.count = 0;
+	leaf->hdr.stale = 0;
+	xfs_dir2_leaf_log_header(tp, bp);
+	/*
+	 * If it's a leaf-format directory initialize the tail.
+	 * In this case our caller has the real bests table to copy into
+	 * the block.
+	 */
+	if (magic == XFS_DIR2_LEAF1_MAGIC) {
+		ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+		ltp->bestcount = 0;
+		xfs_dir2_leaf_log_tail(tp, bp);
+	}
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Log the bests entries indicated from a leaf1 block.
+ */
+void
+xfs_dir2_leaf_log_bests(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* leaf buffer */
+	int			first,		/* first entry to log */
+	int			last)		/* last entry to log */
+{
+	xfs_dir2_data_off_t	*firstb;	/* pointer to first entry */
+	xfs_dir2_data_off_t	*lastb;		/* pointer to last entry */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC);
+	ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf);
+	firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first;
+	lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last;
+	xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf),
+		(uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1));
+}
+
+/*
+ * Log the leaf entries indicated from a leaf1 or leafn block.
+ */
+void
+xfs_dir2_leaf_log_ents(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* leaf buffer */
+	int			first,		/* first entry to log */
+	int			last)		/* last entry to log */
+{
+	xfs_dir2_leaf_entry_t	*firstlep;	/* pointer to first entry */
+	xfs_dir2_leaf_entry_t	*lastlep;	/* pointer to last entry */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC ||
+	       INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	firstlep = &leaf->ents[first];
+	lastlep = &leaf->ents[last];
+	xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
+		(uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1));
+}
+
+/*
+ * Log the header of the leaf1 or leafn block.
+ */
+void
+xfs_dir2_leaf_log_header(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp)		/* leaf buffer */
+{
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC ||
+	       INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
+		(uint)(sizeof(leaf->hdr) - 1));
+}
+
+/*
+ * Log the tail of the leaf1 block.
+ */
+void
+xfs_dir2_leaf_log_tail(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp)		/* leaf buffer */
+{
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	mp = tp->t_mountp;
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC);
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
+		(uint)(mp->m_dirblksize - 1));
+}
+
+/*
+ * Look up the entry referred to by args in the leaf format directory.
+ * Most of the work is done by the xfs_dir2_leaf_lookup_int routine which
+ * is also used by the node-format code.
+ */
+int
+xfs_dir2_leaf_lookup(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data block entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	int			index;		/* found entry index */
+	xfs_dabuf_t		*lbp;		/* leaf buffer */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args("leaf_lookup", args);
+	/*
+	 * Look up name in the leaf block, returning both buffers and index.
+	 */
+	if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+		return error;
+	}
+	tp = args->trans;
+	dp = args->dp;
+	xfs_dir2_leaf_check(dp, lbp);
+	leaf = lbp->data;
+	/*
+	 * Get to the leaf entry and contained data entry address.
+	 */
+	lep = &leaf->ents[index];
+	/*
+	 * Point to the data entry.
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)dbp->data +
+	       XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT)));
+	/*
+	 * Return the found inode number.
+	 */
+	args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+	xfs_da_brelse(tp, dbp);
+	xfs_da_brelse(tp, lbp);
+	return XFS_ERROR(EEXIST);
+}
+
+/*
+ * Look up name/hash in the leaf block.
+ * Fill in indexp with the found index, and dbpp with the data buffer.
+ * If not found dbpp will be NULL, and ENOENT comes back.
+ * lbpp will always be filled in with the leaf buffer unless there's an error.
+ */
+static int					/* error */
+xfs_dir2_leaf_lookup_int(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		**lbpp,		/* out: leaf buffer */
+	int			*indexp,	/* out: index in leaf block */
+	xfs_dabuf_t		**dbpp)		/* out: data buffer */
+{
+	xfs_dir2_db_t		curdb;		/* current data block number */
+	xfs_dabuf_t		*dbp;		/* data buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	int			index;		/* index in leaf block */
+	xfs_dabuf_t		*lbp;		/* leaf buffer */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_db_t		newdb;		/* new data block number */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	/*
+	 * Read the leaf block into the buffer.
+	 */
+	if ((error =
+	    xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp,
+		    XFS_DATA_FORK))) {
+		return error;
+	}
+	*lbpp = lbp;
+	leaf = lbp->data;
+	xfs_dir2_leaf_check(dp, lbp);
+	/*
+	 * Look for the first leaf entry with our hash value.
+	 */
+	index = xfs_dir2_leaf_search_hash(args, lbp);
+	/*
+	 * Loop over all the entries with the right hash value
+	 * looking to match the name.
+	 */
+	for (lep = &leaf->ents[index], dbp = NULL, curdb = -1;
+	     index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval;
+	     lep++, index++) {
+		/*
+		 * Skip over stale leaf entries.
+		 */
+		if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		/*
+		 * Get the new data block number.
+		 */
+		newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT));
+		/*
+		 * If it's not the same as the old data block number,
+		 * need to pitch the old one and read the new one.
+		 */
+		if (newdb != curdb) {
+			if (dbp)
+				xfs_da_brelse(tp, dbp);
+			if ((error =
+			    xfs_da_read_buf(tp, dp,
+				    XFS_DIR2_DB_TO_DA(mp, newdb), -1, &dbp,
+				    XFS_DATA_FORK))) {
+				xfs_da_brelse(tp, lbp);
+				return error;
+			}
+			xfs_dir2_data_check(dp, dbp);
+			curdb = newdb;
+		}
+		/*
+		 * Point to the data entry.
+		 */
+		dep = (xfs_dir2_data_entry_t *)
+		      ((char *)dbp->data +
+		       XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)));
+		/*
+		 * If it matches then return it.
+		 */
+		if (dep->namelen == args->namelen &&
+		    dep->name[0] == args->name[0] &&
+		    memcmp(dep->name, args->name, args->namelen) == 0) {
+			*dbpp = dbp;
+			*indexp = index;
+			return 0;
+		}
+	}
+	/*
+	 * No match found, return ENOENT.
+	 */
+	ASSERT(args->oknoent);
+	if (dbp)
+		xfs_da_brelse(tp, dbp);
+	xfs_da_brelse(tp, lbp);
+	return XFS_ERROR(ENOENT);
+}
+
+/*
+ * Remove an entry from a leaf format directory.
+ */
+int						/* error */
+xfs_dir2_leaf_removename(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_dir2_data_off_t	*bestsp;	/* leaf block best freespace */
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dir2_db_t		db;		/* data block number */
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data entry structure */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	xfs_dir2_db_t		i;		/* temporary data block # */
+	int			index;		/* index into leaf entries */
+	xfs_dabuf_t		*lbp;		/* leaf buffer */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log data header */
+	int			needscan;	/* need to rescan data frees */
+	xfs_dir2_data_off_t	oldbest;	/* old value of best free */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args("leaf_removename", args);
+	/*
+	 * Lookup the leaf entry, get the leaf and data blocks read in.
+	 */
+	if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+		return error;
+	}
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	leaf = lbp->data;
+	data = dbp->data;
+	xfs_dir2_data_check(dp, dbp);
+	/*
+	 * Point to the leaf entry, use that to point to the data entry.
+	 */
+	lep = &leaf->ents[index];
+	db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT));
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)));
+	needscan = needlog = 0;
+	oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT);
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
+	ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest);
+	/*
+	 * Mark the former data entry unused.
+	 */
+	xfs_dir2_data_make_free(tp, dbp,
+		(xfs_dir2_data_aoff_t)((char *)dep - (char *)data),
+		XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan);
+	/*
+	 * We just mark the leaf entry stale by putting a null in it.
+	 */
+	INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1);
+	xfs_dir2_leaf_log_header(tp, lbp);
+	INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR);
+	xfs_dir2_leaf_log_ents(tp, lbp, index, index);
+	/*
+	 * Scan the freespace in the data block again if necessary,
+	 * log the data block header if necessary.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, data, &needlog, NULL);
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	/*
+	 * If the longest freespace in the data block has changed,
+	 * put the new value in the bests table and log that.
+	 */
+	if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) {
+		INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT);
+		xfs_dir2_leaf_log_bests(tp, lbp, db, db);
+	}
+	xfs_dir2_data_check(dp, dbp);
+	/*
+	 * If the data block is now empty then get rid of the data block.
+	 */
+	if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) ==
+	    mp->m_dirblksize - (uint)sizeof(data->hdr)) {
+		ASSERT(db != mp->m_dirdatablk);
+		if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
+			/*
+			 * Nope, can't get rid of it because it caused
+			 * allocation of a bmap btree block to do so.
+			 * Just go on, returning success, leaving the
+			 * empty block in place.
+			 */
+			if (error == ENOSPC && args->total == 0) {
+				xfs_da_buf_done(dbp);
+				error = 0;
+			}
+			xfs_dir2_leaf_check(dp, lbp);
+			xfs_da_buf_done(lbp);
+			return error;
+		}
+		dbp = NULL;
+		/*
+		 * If this is the last data block then compact the
+		 * bests table by getting rid of entries.
+		 */
+		if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) {
+			/*
+			 * Look for the last active entry (i).
+			 */
+			for (i = db - 1; i > 0; i--) {
+				if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF)
+					break;
+			}
+			/*
+			 * Copy the table down so inactive entries at the
+			 * end are removed.
+			 */
+			memmove(&bestsp[db - i], bestsp,
+				(INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp));
+			INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i));
+			xfs_dir2_leaf_log_tail(tp, lbp);
+			xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
+		} else
+			INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF);
+	}
+	/*
+	 * If the data block was not the first one, drop it.
+	 */
+	else if (db != mp->m_dirdatablk && dbp != NULL) {
+		xfs_da_buf_done(dbp);
+		dbp = NULL;
+	}
+	xfs_dir2_leaf_check(dp, lbp);
+	/*
+	 * See if we can convert to block form.
+	 */
+	return xfs_dir2_leaf_to_block(args, lbp, dbp);
+}
+
+/*
+ * Replace the inode number in a leaf format directory entry.
+ */
+int						/* error */
+xfs_dir2_leaf_replace(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data block entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	int			index;		/* index of leaf entry */
+	xfs_dabuf_t		*lbp;		/* leaf buffer */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args("leaf_replace", args);
+	/*
+	 * Look up the entry.
+	 */
+	if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+		return error;
+	}
+	dp = args->dp;
+	leaf = lbp->data;
+	/*
+	 * Point to the leaf entry, get data address from it.
+	 */
+	lep = &leaf->ents[index];
+	/*
+	 * Point to the data entry.
+	 */
+	dep = (xfs_dir2_data_entry_t *)
+	      ((char *)dbp->data +
+	       XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT)));
+	ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT));
+	/*
+	 * Put the new inode number in, log it.
+	 */
+	INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+	tp = args->trans;
+	xfs_dir2_data_log_entry(tp, dbp, dep);
+	xfs_da_buf_done(dbp);
+	xfs_dir2_leaf_check(dp, lbp);
+	xfs_da_brelse(tp, lbp);
+	return 0;
+}
+
+/*
+ * Return index in the leaf block (lbp) which is either the first
+ * one with this hash value, or if there are none, the insert point
+ * for that hash value.
+ */
+int						/* index value */
+xfs_dir2_leaf_search_hash(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*lbp)		/* leaf buffer */
+{
+	xfs_dahash_t		hash=0;		/* hash from this entry */
+	xfs_dahash_t		hashwant;	/* hash value looking for */
+	int			high;		/* high leaf index */
+	int			low;		/* low leaf index */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	int			mid=0;		/* current leaf index */
+
+	leaf = lbp->data;
+#ifndef __KERNEL__
+	if (!leaf->hdr.count)
+		return 0;
+#endif
+	/*
+	 * Note, the table cannot be empty, so we have to go through the loop.
+	 * Binary search the leaf entries looking for our hash value.
+	 */
+	for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1,
+		hashwant = args->hashval;
+	     low <= high; ) {
+		mid = (low + high) >> 1;
+		if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant)
+			break;
+		if (hash < hashwant)
+			low = mid + 1;
+		else
+			high = mid - 1;
+	}
+	/*
+	 * Found one, back up through all the equal hash values.
+	 */
+	if (hash == hashwant) {
+		while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) {
+			mid--;
+		}
+	}
+	/*
+	 * Need to point to an entry higher than ours.
+	 */
+	else if (hash < hashwant)
+		mid++;
+	return mid;
+}
+
+/*
+ * Trim off a trailing data block.  We know it's empty since the leaf
+ * freespace table says so.
+ */
+int						/* error */
+xfs_dir2_leaf_trim_data(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*lbp,		/* leaf buffer */
+	xfs_dir2_db_t		db)		/* data block number */
+{
+	xfs_dir2_data_off_t	*bestsp;	/* leaf bests table */
+#ifdef DEBUG
+	xfs_dir2_data_t		*data;		/* data block structure */
+#endif
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return value */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	/*
+	 * Read the offending data block.  We need its buffer.
+	 */
+	if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, db), -1, &dbp,
+			XFS_DATA_FORK))) {
+		return error;
+	}
+#ifdef DEBUG
+	data = dbp->data;
+	ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC);
+#endif
+	/* this seems to be an error
+	 * data is only valid if DEBUG is defined?
+	 * RMC 09/08/1999
+	 */
+
+	leaf = lbp->data;
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) ==
+	       mp->m_dirblksize - (uint)sizeof(data->hdr));
+	ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
+	/*
+	 * Get rid of the data block.
+	 */
+	if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
+		ASSERT(error != ENOSPC);
+		xfs_da_brelse(tp, dbp);
+		return error;
+	}
+	/*
+	 * Eliminate the last bests entry from the table.
+	 */
+	bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
+	INT_MOD(ltp->bestcount, ARCH_CONVERT, -1);
+	memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp));
+	xfs_dir2_leaf_log_tail(tp, lbp);
+	xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
+	return 0;
+}
+
+/*
+ * Convert node form directory to leaf form directory.
+ * The root of the node form dir needs to already be a LEAFN block.
+ * Just return if we can't do anything.
+ */
+int						/* error */
+xfs_dir2_node_to_leaf(
+	xfs_da_state_t		*state)		/* directory operation state */
+{
+	xfs_da_args_t		*args;		/* operation arguments */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	xfs_dabuf_t		*fbp;		/* buffer for freespace block */
+	xfs_fileoff_t		fo;		/* freespace file offset */
+	xfs_dir2_free_t		*free;		/* freespace structure */
+	xfs_dabuf_t		*lbp;		/* buffer for leaf block */
+	xfs_dir2_leaf_tail_t	*ltp;		/* tail of leaf structure */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			rval;		/* successful free trim? */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	/*
+	 * There's more than a leaf level in the btree, so there must
+	 * be multiple leafn blocks.  Give up.
+	 */
+	if (state->path.active > 1)
+		return 0;
+	args = state->args;
+	xfs_dir2_trace_args("node_to_leaf", args);
+	mp = state->mp;
+	dp = args->dp;
+	tp = args->trans;
+	/*
+	 * Get the last offset in the file.
+	 */
+	if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) {
+		return error;
+	}
+	fo -= mp->m_dirblkfsbs;
+	/*
+	 * If there are freespace blocks other than the first one,
+	 * take this opportunity to remove trailing empty freespace blocks
+	 * that may have been left behind during no-space-reservation
+	 * operations.
+	 */
+	while (fo > mp->m_dirfreeblk) {
+		if ((error = xfs_dir2_node_trim_free(args, fo, &rval))) {
+			return error;
+		}
+		if (rval)
+			fo -= mp->m_dirblkfsbs;
+		else
+			return 0;
+	}
+	/*
+	 * Now find the block just before the freespace block.
+	 */
+	if ((error = xfs_bmap_last_before(tp, dp, &fo, XFS_DATA_FORK))) {
+		return error;
+	}
+	/*
+	 * If it's not the single leaf block, give up.
+	 */
+	if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize)
+		return 0;
+	lbp = state->path.blk[0].bp;
+	leaf = lbp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	/*
+	 * Read the freespace block.
+	 */
+	if ((error = xfs_da_read_buf(tp, dp, mp->m_dirfreeblk, -1, &fbp,
+			XFS_DATA_FORK))) {
+		return error;
+	}
+	free = fbp->data;
+	ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+	ASSERT(!free->hdr.firstdb);
+	/*
+	 * Now see if the leafn and free data will fit in a leaf1.
+	 * If not, release the buffer and give up.
+	 */
+	if ((uint)sizeof(leaf->hdr) +
+	    (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) +
+	    INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) +
+	    (uint)sizeof(leaf->tail) >
+	    mp->m_dirblksize) {
+		xfs_da_brelse(tp, fbp);
+		return 0;
+	}
+	/*
+	 * If the leaf has any stale entries in it, compress them out.
+	 * The compact routine will log the header.
+	 */
+	if (INT_GET(leaf->hdr.stale, ARCH_CONVERT))
+		xfs_dir2_leaf_compact(args, lbp);
+	else
+		xfs_dir2_leaf_log_header(tp, lbp);
+	INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC);
+	/*
+	 * Set up the leaf tail from the freespace block.
+	 */
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT);
+	/*
+	 * Set up the leaf bests table.
+	 */
+	memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests,
+		INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0]));
+	xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
+	xfs_dir2_leaf_log_tail(tp, lbp);
+	xfs_dir2_leaf_check(dp, lbp);
+	/*
+	 * Get rid of the freespace block.
+	 */
+	error = xfs_dir2_shrink_inode(args, XFS_DIR2_FREE_FIRSTDB(mp), fbp);
+	if (error) {
+		/*
+		 * This can't fail here because it can only happen when
+		 * punching out the middle of an extent, and this is an
+		 * isolated block.
+		 */
+		ASSERT(error != ENOSPC);
+		return error;
+	}
+	fbp = NULL;
+	/*
+	 * Now see if we can convert the single-leaf directory
+	 * down to a block form directory.
+	 * This routine always kills the dabuf for the leaf, so
+	 * eliminate it from the path.
+	 */
+	error = xfs_dir2_leaf_to_block(args, lbp, NULL);
+	state->path.blk[0].bp = NULL;
+	return error;
+}
diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h
new file mode 100644
index 0000000..7f20eee
--- /dev/null
+++ b/fs/xfs/xfs_dir2_leaf.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_LEAF_H__
+#define	__XFS_DIR2_LEAF_H__
+
+/*
+ * Directory version 2, leaf block structures.
+ */
+
+struct uio;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Constants.
+ */
+
+/*
+ * Offset of the leaf/node space.  First block in this space
+ * is the btree root.
+ */
+#define	XFS_DIR2_LEAF_SPACE	1
+#define	XFS_DIR2_LEAF_OFFSET	(XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
+#define	XFS_DIR2_LEAF_FIRSTDB(mp)	\
+	XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_LEAF_OFFSET)
+
+/*
+ * Types.
+ */
+
+/*
+ * Offset in data space of a data entry.
+ */
+typedef	__uint32_t	xfs_dir2_dataptr_t;
+#define	XFS_DIR2_MAX_DATAPTR	((xfs_dir2_dataptr_t)0xffffffff)
+#define	XFS_DIR2_NULL_DATAPTR	((xfs_dir2_dataptr_t)0)
+
+/*
+ * Structures.
+ */
+
+/*
+ * Leaf block header.
+ */
+typedef struct xfs_dir2_leaf_hdr {
+	xfs_da_blkinfo_t	info;		/* header for da routines */
+	__uint16_t		count;		/* count of entries */
+	__uint16_t		stale;		/* count of stale entries */
+} xfs_dir2_leaf_hdr_t;
+
+/*
+ * Leaf block entry.
+ */
+typedef struct xfs_dir2_leaf_entry {
+	xfs_dahash_t		hashval;	/* hash value of name */
+	xfs_dir2_dataptr_t	address;	/* address of data entry */
+} xfs_dir2_leaf_entry_t;
+
+/*
+ * Leaf block tail.
+ */
+typedef struct xfs_dir2_leaf_tail {
+	__uint32_t		bestcount;
+} xfs_dir2_leaf_tail_t;
+
+/*
+ * Leaf block.
+ * bests and tail are at the end of the block for single-leaf only
+ * (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC).
+ */
+typedef struct xfs_dir2_leaf {
+	xfs_dir2_leaf_hdr_t	hdr;		/* leaf header */
+	xfs_dir2_leaf_entry_t	ents[1];	/* entries */
+						/* ... */
+	xfs_dir2_data_off_t	bests[1];	/* best free counts */
+	xfs_dir2_leaf_tail_t	tail;		/* leaf tail */
+} xfs_dir2_leaf_t;
+
+/*
+ * Macros.
+ * The DB blocks are logical directory block numbers, not filesystem blocks.
+ */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_MAX_LEAF_ENTS)
+int
+xfs_dir2_max_leaf_ents(struct xfs_mount *mp);
+#define	XFS_DIR2_MAX_LEAF_ENTS(mp)	\
+	xfs_dir2_max_leaf_ents(mp)
+#else
+#define	XFS_DIR2_MAX_LEAF_ENTS(mp)	\
+	((int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / \
+	       (uint)sizeof(xfs_dir2_leaf_entry_t)))
+#endif
+
+/*
+ * Get address of the bestcount field in the single-leaf block.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_TAIL_P)
+xfs_dir2_leaf_tail_t *
+xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp);
+#define	XFS_DIR2_LEAF_TAIL_P(mp,lp)	\
+	xfs_dir2_leaf_tail_p(mp, lp)
+#else
+#define	XFS_DIR2_LEAF_TAIL_P(mp,lp)	\
+	((xfs_dir2_leaf_tail_t *)\
+	 ((char *)(lp) + (mp)->m_dirblksize - \
+	  (uint)sizeof(xfs_dir2_leaf_tail_t)))
+#endif
+
+/*
+ * Get address of the bests array in the single-leaf block.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_BESTS_P)
+xfs_dir2_data_off_t *
+xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp);
+#define	XFS_DIR2_LEAF_BESTS_P(ltp)	xfs_dir2_leaf_bests_p(ltp)
+#else
+#define	XFS_DIR2_LEAF_BESTS_P(ltp)	\
+	((xfs_dir2_data_off_t *)(ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT))
+#endif
+
+/*
+ * Convert dataptr to byte in file space
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_BYTE)
+xfs_dir2_off_t
+xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp);
+#define	XFS_DIR2_DATAPTR_TO_BYTE(mp,dp)	xfs_dir2_dataptr_to_byte(mp, dp)
+#else
+#define	XFS_DIR2_DATAPTR_TO_BYTE(mp,dp)	\
+	((xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG)
+#endif
+
+/*
+ * Convert byte in file space to dataptr.  It had better be aligned.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DATAPTR)
+xfs_dir2_dataptr_t
+xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by);
+#define	XFS_DIR2_BYTE_TO_DATAPTR(mp,by)	xfs_dir2_byte_to_dataptr(mp,by)
+#else
+#define	XFS_DIR2_BYTE_TO_DATAPTR(mp,by)	\
+	((xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG))
+#endif
+
+/*
+ * Convert dataptr to a block number
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_DB)
+xfs_dir2_db_t
+xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp);
+#define	XFS_DIR2_DATAPTR_TO_DB(mp,dp)	xfs_dir2_dataptr_to_db(mp, dp)
+#else
+#define	XFS_DIR2_DATAPTR_TO_DB(mp,dp)	\
+	XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp))
+#endif
+
+/*
+ * Convert dataptr to a byte offset in a block
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_OFF)
+xfs_dir2_data_aoff_t
+xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp);
+#define	XFS_DIR2_DATAPTR_TO_OFF(mp,dp)	xfs_dir2_dataptr_to_off(mp, dp)
+#else
+#define	XFS_DIR2_DATAPTR_TO_OFF(mp,dp)	\
+	XFS_DIR2_BYTE_TO_OFF(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp))
+#endif
+
+/*
+ * Convert block and offset to byte in space
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_BYTE)
+xfs_dir2_off_t
+xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
+			xfs_dir2_data_aoff_t o);
+#define	XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o)	\
+	xfs_dir2_db_off_to_byte(mp, db, o)
+#else
+#define	XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o)	\
+	(((xfs_dir2_off_t)(db) << \
+	 ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o))
+#endif
+
+/*
+ * Convert byte in space to (DB) block
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DB)
+xfs_dir2_db_t xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by);
+#define	XFS_DIR2_BYTE_TO_DB(mp,by)	xfs_dir2_byte_to_db(mp, by)
+#else
+#define	XFS_DIR2_BYTE_TO_DB(mp,by)	\
+	((xfs_dir2_db_t)((by) >> \
+			 ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)))
+#endif
+
+/*
+ * Convert byte in space to (DA) block
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DA)
+xfs_dablk_t xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by);
+#define	XFS_DIR2_BYTE_TO_DA(mp,by)	xfs_dir2_byte_to_da(mp, by)
+#else
+#define	XFS_DIR2_BYTE_TO_DA(mp,by)	\
+	XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, by))
+#endif
+
+/*
+ * Convert byte in space to offset in a block
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_OFF)
+xfs_dir2_data_aoff_t
+xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by);
+#define	XFS_DIR2_BYTE_TO_OFF(mp,by)	xfs_dir2_byte_to_off(mp, by)
+#else
+#define	XFS_DIR2_BYTE_TO_OFF(mp,by)	\
+	((xfs_dir2_data_aoff_t)((by) & \
+				((1 << ((mp)->m_sb.sb_blocklog + \
+					(mp)->m_sb.sb_dirblklog)) - 1)))
+#endif
+
+/*
+ * Convert block and offset to dataptr
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_DATAPTR)
+xfs_dir2_dataptr_t
+xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
+			   xfs_dir2_data_aoff_t o);
+#define	XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o)	\
+	xfs_dir2_db_off_to_dataptr(mp, db, o)
+#else
+#define	XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o)	\
+	XFS_DIR2_BYTE_TO_DATAPTR(mp, XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o))
+#endif
+
+/*
+ * Convert block (DB) to block (dablk)
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_DA)
+xfs_dablk_t xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db);
+#define	XFS_DIR2_DB_TO_DA(mp,db)	xfs_dir2_db_to_da(mp, db)
+#else
+#define	XFS_DIR2_DB_TO_DA(mp,db)	\
+	((xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog))
+#endif
+
+/*
+ * Convert block (dablk) to block (DB)
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_DB)
+xfs_dir2_db_t xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da);
+#define	XFS_DIR2_DA_TO_DB(mp,da)	xfs_dir2_da_to_db(mp, da)
+#else
+#define	XFS_DIR2_DA_TO_DB(mp,da)	\
+	((xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog))
+#endif
+
+/*
+ * Convert block (dablk) to byte offset in space
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_BYTE)
+xfs_dir2_off_t xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da);
+#define XFS_DIR2_DA_TO_BYTE(mp,da)	xfs_dir2_da_to_byte(mp, da)
+#else
+#define	XFS_DIR2_DA_TO_BYTE(mp,da)	\
+	XFS_DIR2_DB_OFF_TO_BYTE(mp, XFS_DIR2_DA_TO_DB(mp, da), 0)
+#endif
+
+/*
+ * Function declarations.
+ */
+
+extern int
+	xfs_dir2_block_to_leaf(struct xfs_da_args *args, struct xfs_dabuf *dbp);
+
+extern int
+	xfs_dir2_leaf_addname(struct xfs_da_args *args);
+
+extern void
+	xfs_dir2_leaf_compact(struct xfs_da_args *args, struct xfs_dabuf *bp);
+
+extern void
+	xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
+				 int *lowstalep, int *highstalep, int *lowlogp,
+				 int *highlogp);
+
+extern int
+	xfs_dir2_leaf_getdents(struct xfs_trans *tp, struct xfs_inode *dp,
+			       struct uio *uio, int *eofp, struct xfs_dirent *dbp,
+			       xfs_dir2_put_t put);
+
+extern int
+	xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
+			   struct xfs_dabuf **bpp, int magic);
+
+extern void
+	xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
+			       int first, int last);
+
+extern void
+	xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
+				int first, int last);
+
+extern void
+	xfs_dir2_leaf_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp);
+
+extern void
+	xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
+
+extern int
+	xfs_dir2_leaf_lookup(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_leaf_removename(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_leaf_replace(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
+				  struct xfs_dabuf *lbp);
+extern int
+	xfs_dir2_leaf_trim_data(struct xfs_da_args *args, struct xfs_dabuf *lbp,				xfs_dir2_db_t db);
+
+extern int
+	xfs_dir2_node_to_leaf(struct xfs_da_state *state);
+
+#endif	/* __XFS_DIR2_LEAF_H__ */
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
new file mode 100644
index 0000000..a7615d86
--- /dev/null
+++ b/fs/xfs/xfs_dir2_node.c
@@ -0,0 +1,2020 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_node.c
+ * XFS directory implementation, version 2, node form files
+ * See data structures in xfs_dir2_node.h and xfs_da_btree.h.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_node.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_error.h"
+
+/*
+ * Function declarations.
+ */
+static void xfs_dir2_free_log_header(xfs_trans_t *tp, xfs_dabuf_t *bp);
+static int xfs_dir2_leafn_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index);
+#ifdef DEBUG
+static void xfs_dir2_leafn_check(xfs_inode_t *dp, xfs_dabuf_t *bp);
+#else
+#define	xfs_dir2_leafn_check(dp, bp)
+#endif
+static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, xfs_dabuf_t *bp_s,
+				    int start_s, xfs_dabuf_t *bp_d, int start_d,
+				    int count);
+static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state,
+				     xfs_da_state_blk_t *blk1,
+				     xfs_da_state_blk_t *blk2);
+static int xfs_dir2_leafn_remove(xfs_da_args_t *args, xfs_dabuf_t *bp,
+				 int index, xfs_da_state_blk_t *dblk,
+				 int *rval);
+static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
+				     xfs_da_state_blk_t *fblk);
+
+/*
+ * Log entries from a freespace block.
+ */
+void
+xfs_dir2_free_log_bests(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp,		/* freespace buffer */
+	int			first,		/* first entry to log */
+	int			last)		/* last entry to log */
+{
+	xfs_dir2_free_t		*free;		/* freespace structure */
+
+	free = bp->data;
+	ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+	xfs_da_log_buf(tp, bp,
+		(uint)((char *)&free->bests[first] - (char *)free),
+		(uint)((char *)&free->bests[last] - (char *)free +
+		       sizeof(free->bests[0]) - 1));
+}
+
+/*
+ * Log header from a freespace block.
+ */
+static void
+xfs_dir2_free_log_header(
+	xfs_trans_t		*tp,		/* transaction pointer */
+	xfs_dabuf_t		*bp)		/* freespace buffer */
+{
+	xfs_dir2_free_t		*free;		/* freespace structure */
+
+	free = bp->data;
+	ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+	xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
+		(uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
+}
+
+/*
+ * Convert a leaf-format directory to a node-format directory.
+ * We need to change the magic number of the leaf block, and copy
+ * the freespace table out of the leaf block into its own block.
+ */
+int						/* error */
+xfs_dir2_leaf_to_node(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*lbp)		/* leaf buffer */
+{
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return value */
+	xfs_dabuf_t		*fbp;		/* freespace buffer */
+	xfs_dir2_db_t		fdb;		/* freespace block number */
+	xfs_dir2_free_t		*free;		/* freespace structure */
+	xfs_dir2_data_off_t	*from;		/* pointer to freespace entry */
+	int			i;		/* leaf freespace index */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			n;		/* count of live freespc ents */
+	xfs_dir2_data_off_t	off;		/* freespace entry value */
+	xfs_dir2_data_off_t	*to;		/* pointer to freespace entry */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_b("leaf_to_node", args, lbp);
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	/*
+	 * Add a freespace block to the directory.
+	 */
+	if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, &fdb))) {
+		return error;
+	}
+	ASSERT(fdb == XFS_DIR2_FREE_FIRSTDB(mp));
+	/*
+	 * Get the buffer for the new freespace block.
+	 */
+	if ((error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), -1, &fbp,
+			XFS_DATA_FORK))) {
+		return error;
+	}
+	ASSERT(fbp != NULL);
+	free = fbp->data;
+	leaf = lbp->data;
+	ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
+	/*
+	 * Initialize the freespace block header.
+	 */
+	INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC);
+	free->hdr.firstdb = 0;
+	ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize);
+	INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT);
+	/*
+	 * Copy freespace entries from the leaf block to the new block.
+	 * Count active entries.
+	 */
+	for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests;
+	     i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) {
+		if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF)
+			n++;
+		INT_SET(*to, ARCH_CONVERT, off);
+	}
+	INT_SET(free->hdr.nused, ARCH_CONVERT, n);
+	INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC);
+	/*
+	 * Log everything.
+	 */
+	xfs_dir2_leaf_log_header(tp, lbp);
+	xfs_dir2_free_log_header(tp, fbp);
+	xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1);
+	xfs_da_buf_done(fbp);
+	xfs_dir2_leafn_check(dp, lbp);
+	return 0;
+}
+
+/*
+ * Add a leaf entry to a leaf block in a node-form directory.
+ * The other work necessary is done from the caller.
+ */
+static int					/* error */
+xfs_dir2_leafn_add(
+	xfs_dabuf_t		*bp,		/* leaf buffer */
+	xfs_da_args_t		*args,		/* operation arguments */
+	int			index)		/* insertion pt for new entry */
+{
+	int			compact;	/* compacting stale leaves */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			highstale;	/* next stale entry */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	int			lfloghigh;	/* high leaf entry logging */
+	int			lfloglow;	/* low leaf entry logging */
+	int			lowstale;	/* previous stale entry */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_sb("leafn_add", args, index, bp);
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	leaf = bp->data;
+
+	/*
+	 * Quick check just to make sure we are not going to index
+	 * into other peoples memory
+	 */
+	if (index < 0)
+		return XFS_ERROR(EFSCORRUPTED);
+
+	/*
+	 * If there are already the maximum number of leaf entries in
+	 * the block, if there are no stale entries it won't fit.
+	 * Caller will do a split.  If there are stale entries we'll do
+	 * a compact.
+	 */
+
+	if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) {
+		if (!leaf->hdr.stale)
+			return XFS_ERROR(ENOSPC);
+		compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1;
+	} else
+		compact = 0;
+	ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval);
+	ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) ||
+	       INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval);
+
+	if (args->justcheck)
+		return 0;
+
+	/*
+	 * Compact out all but one stale leaf entry.  Leaves behind
+	 * the entry closest to index.
+	 */
+	if (compact) {
+		xfs_dir2_leaf_compact_x1(bp, &index, &lowstale, &highstale,
+			&lfloglow, &lfloghigh);
+	}
+	/*
+	 * Set impossible logging indices for this case.
+	 */
+	else if (leaf->hdr.stale) {
+		lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		lfloghigh = -1;
+	}
+	/*
+	 * No stale entries, just insert a space for the new entry.
+	 */
+	if (!leaf->hdr.stale) {
+		lep = &leaf->ents[index];
+		if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT))
+			memmove(lep + 1, lep,
+				(INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep));
+		lfloglow = index;
+		lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1);
+	}
+	/*
+	 * There are stale entries.  We'll use one for the new entry.
+	 */
+	else {
+		/*
+		 * If we didn't do a compact then we need to figure out
+		 * which stale entry will be used.
+		 */
+		if (compact == 0) {
+			/*
+			 * Find first stale entry before our insertion point.
+			 */
+			for (lowstale = index - 1;
+			     lowstale >= 0 &&
+				INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) !=
+				XFS_DIR2_NULL_DATAPTR;
+			     lowstale--)
+				continue;
+			/*
+			 * Find next stale entry after insertion point.
+			 * Stop looking if the answer would be worse than
+			 * lowstale already found.
+			 */
+			for (highstale = index;
+			     highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) &&
+				INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) !=
+				XFS_DIR2_NULL_DATAPTR &&
+				(lowstale < 0 ||
+				 index - lowstale - 1 >= highstale - index);
+			     highstale++)
+				continue;
+		}
+		/*
+		 * Using the low stale entry.
+		 * Shift entries up toward the stale slot.
+		 */
+		if (lowstale >= 0 &&
+		    (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) ||
+		     index - lowstale - 1 < highstale - index)) {
+			ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) ==
+			       XFS_DIR2_NULL_DATAPTR);
+			ASSERT(index - lowstale - 1 >= 0);
+			if (index - lowstale - 1 > 0)
+				memmove(&leaf->ents[lowstale],
+					&leaf->ents[lowstale + 1],
+					(index - lowstale - 1) * sizeof(*lep));
+			lep = &leaf->ents[index - 1];
+			lfloglow = MIN(lowstale, lfloglow);
+			lfloghigh = MAX(index - 1, lfloghigh);
+		}
+		/*
+		 * Using the high stale entry.
+		 * Shift entries down toward the stale slot.
+		 */
+		else {
+			ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) ==
+			       XFS_DIR2_NULL_DATAPTR);
+			ASSERT(highstale - index >= 0);
+			if (highstale - index > 0)
+				memmove(&leaf->ents[index + 1],
+					&leaf->ents[index],
+					(highstale - index) * sizeof(*lep));
+			lep = &leaf->ents[index];
+			lfloglow = MIN(index, lfloglow);
+			lfloghigh = MAX(highstale, lfloghigh);
+		}
+		INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1);
+	}
+	/*
+	 * Insert the new entry, log everything.
+	 */
+	INT_SET(lep->hashval, ARCH_CONVERT, args->hashval);
+	INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index));
+	xfs_dir2_leaf_log_header(tp, bp);
+	xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh);
+	xfs_dir2_leafn_check(dp, bp);
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check internal consistency of a leafn block.
+ */
+void
+xfs_dir2_leafn_check(
+	xfs_inode_t	*dp,			/* incore directory inode */
+	xfs_dabuf_t	*bp)			/* leaf buffer */
+{
+	int		i;			/* leaf index */
+	xfs_dir2_leaf_t	*leaf;			/* leaf structure */
+	xfs_mount_t	*mp;			/* filesystem mount point */
+	int		stale;			/* count of stale leaves */
+
+	leaf = bp->data;
+	mp = dp->i_mount;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp));
+	for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) {
+		if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) {
+			ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <=
+			       INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT));
+		}
+		if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			stale++;
+	}
+	ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale);
+}
+#endif	/* DEBUG */
+
+/*
+ * Return the last hash value in the leaf.
+ * Stale entries are ok.
+ */
+xfs_dahash_t					/* hash value */
+xfs_dir2_leafn_lasthash(
+	xfs_dabuf_t	*bp,			/* leaf buffer */
+	int		*count)			/* count of entries in leaf */
+{
+	xfs_dir2_leaf_t	*leaf;			/* leaf structure */
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	if (count)
+		*count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	if (!leaf->hdr.count)
+		return 0;
+	return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+}
+
+/*
+ * Look up a leaf entry in a node-format leaf block.
+ * If this is an addname then the extrablk in state is a freespace block,
+ * otherwise it's a data block.
+ */
+int
+xfs_dir2_leafn_lookup_int(
+	xfs_dabuf_t		*bp,		/* leaf buffer */
+	xfs_da_args_t		*args,		/* operation arguments */
+	int			*indexp,	/* out: leaf entry index */
+	xfs_da_state_t		*state)		/* state to fill in */
+{
+	xfs_dabuf_t		*curbp;		/* current data/free buffer */
+	xfs_dir2_db_t		curdb;		/* current data block number */
+	xfs_dir2_db_t		curfdb;		/* current free block number */
+	xfs_dir2_data_entry_t	*dep;		/* data block entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return value */
+	int			fi;		/* free entry index */
+	xfs_dir2_free_t		*free=NULL;	/* free block structure */
+	int			index;		/* leaf entry index */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	int			length=0;	/* length of new data entry */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_db_t		newdb;		/* new data block number */
+	xfs_dir2_db_t		newfdb;		/* new free block number */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+#ifdef __KERNEL__
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0);
+#endif
+	xfs_dir2_leafn_check(dp, bp);
+	/*
+	 * Look up the hash value in the leaf entries.
+	 */
+	index = xfs_dir2_leaf_search_hash(args, bp);
+	/*
+	 * Do we have a buffer coming in?
+	 */
+	if (state->extravalid)
+		curbp = state->extrablk.bp;
+	else
+		curbp = NULL;
+	/*
+	 * For addname, it's a free block buffer, get the block number.
+	 */
+	if (args->addname) {
+		curfdb = curbp ? state->extrablk.blkno : -1;
+		curdb = -1;
+		length = XFS_DIR2_DATA_ENTSIZE(args->namelen);
+		if ((free = (curbp ? curbp->data : NULL)))
+			ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+	}
+	/*
+	 * For others, it's a data block buffer, get the block number.
+	 */
+	else {
+		curfdb = -1;
+		curdb = curbp ? state->extrablk.blkno : -1;
+	}
+	/*
+	 * Loop over leaf entries with the right hash value.
+	 */
+	for (lep = &leaf->ents[index];
+	     index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval;
+	     lep++, index++) {
+		/*
+		 * Skip stale leaf entries.
+		 */
+		if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		/*
+		 * Pull the data block number from the entry.
+		 */
+		newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT));
+		/*
+		 * For addname, we're looking for a place to put the new entry.
+		 * We want to use a data block with an entry of equal
+		 * hash value to ours if there is one with room.
+		 */
+		if (args->addname) {
+			/*
+			 * If this block isn't the data block we already have
+			 * in hand, take a look at it.
+			 */
+			if (newdb != curdb) {
+				curdb = newdb;
+				/*
+				 * Convert the data block to the free block
+				 * holding its freespace information.
+				 */
+				newfdb = XFS_DIR2_DB_TO_FDB(mp, newdb);
+				/*
+				 * If it's not the one we have in hand,
+				 * read it in.
+				 */
+				if (newfdb != curfdb) {
+					/*
+					 * If we had one before, drop it.
+					 */
+					if (curbp)
+						xfs_da_brelse(tp, curbp);
+					/*
+					 * Read the free block.
+					 */
+					if ((error = xfs_da_read_buf(tp, dp,
+							XFS_DIR2_DB_TO_DA(mp,
+								newfdb),
+							-1, &curbp,
+							XFS_DATA_FORK))) {
+						return error;
+					}
+					curfdb = newfdb;
+					free = curbp->data;
+					ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) ==
+					       XFS_DIR2_FREE_MAGIC);
+					ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) %
+						XFS_DIR2_MAX_FREE_BESTS(mp)) ==
+					       0);
+					ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb);
+					ASSERT(curdb <
+					       INT_GET(free->hdr.firstdb, ARCH_CONVERT) +
+					       INT_GET(free->hdr.nvalid, ARCH_CONVERT));
+				}
+				/*
+				 * Get the index for our entry.
+				 */
+				fi = XFS_DIR2_DB_TO_FDINDEX(mp, curdb);
+				/*
+				 * If it has room, return it.
+				 */
+				if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) {
+					XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
+							 XFS_ERRLEVEL_LOW, mp);
+					return XFS_ERROR(EFSCORRUPTED);
+				}
+				if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) {
+					*indexp = index;
+					state->extravalid = 1;
+					state->extrablk.bp = curbp;
+					state->extrablk.blkno = curfdb;
+					state->extrablk.index = fi;
+					state->extrablk.magic =
+						XFS_DIR2_FREE_MAGIC;
+					ASSERT(args->oknoent);
+					return XFS_ERROR(ENOENT);
+				}
+			}
+		}
+		/*
+		 * Not adding a new entry, so we really want to find
+		 * the name given to us.
+		 */
+		else {
+			/*
+			 * If it's a different data block, go get it.
+			 */
+			if (newdb != curdb) {
+				/*
+				 * If we had a block before, drop it.
+				 */
+				if (curbp)
+					xfs_da_brelse(tp, curbp);
+				/*
+				 * Read the data block.
+				 */
+				if ((error =
+				    xfs_da_read_buf(tp, dp,
+					    XFS_DIR2_DB_TO_DA(mp, newdb), -1,
+					    &curbp, XFS_DATA_FORK))) {
+					return error;
+				}
+				xfs_dir2_data_check(dp, curbp);
+				curdb = newdb;
+			}
+			/*
+			 * Point to the data entry.
+			 */
+			dep = (xfs_dir2_data_entry_t *)
+			      ((char *)curbp->data +
+			       XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)));
+			/*
+			 * Compare the entry, return it if it matches.
+			 */
+			if (dep->namelen == args->namelen &&
+			    dep->name[0] == args->name[0] &&
+			    memcmp(dep->name, args->name, args->namelen) == 0) {
+				args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+				*indexp = index;
+				state->extravalid = 1;
+				state->extrablk.bp = curbp;
+				state->extrablk.blkno = curdb;
+				state->extrablk.index =
+					(int)((char *)dep -
+					      (char *)curbp->data);
+				state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+				return XFS_ERROR(EEXIST);
+			}
+		}
+	}
+	/*
+	 * Didn't find a match.
+	 * If we are holding a buffer, give it back in case our caller
+	 * finds it useful.
+	 */
+	if ((state->extravalid = (curbp != NULL))) {
+		state->extrablk.bp = curbp;
+		state->extrablk.index = -1;
+		/*
+		 * For addname, giving back a free block.
+		 */
+		if (args->addname) {
+			state->extrablk.blkno = curfdb;
+			state->extrablk.magic = XFS_DIR2_FREE_MAGIC;
+		}
+		/*
+		 * For other callers, giving back a data block.
+		 */
+		else {
+			state->extrablk.blkno = curdb;
+			state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+		}
+	}
+	/*
+	 * Return the final index, that will be the insertion point.
+	 */
+	*indexp = index;
+	ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent);
+	return XFS_ERROR(ENOENT);
+}
+
+/*
+ * Move count leaf entries from source to destination leaf.
+ * Log entries and headers.  Stale entries are preserved.
+ */
+static void
+xfs_dir2_leafn_moveents(
+	xfs_da_args_t	*args,			/* operation arguments */
+	xfs_dabuf_t	*bp_s,			/* source leaf buffer */
+	int		start_s,		/* source leaf index */
+	xfs_dabuf_t	*bp_d,			/* destination leaf buffer */
+	int		start_d,		/* destination leaf index */
+	int		count)			/* count of leaves to copy */
+{
+	xfs_dir2_leaf_t	*leaf_d;		/* destination leaf structure */
+	xfs_dir2_leaf_t	*leaf_s;		/* source leaf structure */
+	int		stale;			/* count stale leaves copied */
+	xfs_trans_t	*tp;			/* transaction pointer */
+
+	xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d,
+		start_d, count);
+	/*
+	 * Silently return if nothing to do.
+	 */
+	if (count == 0) {
+		return;
+	}
+	tp = args->trans;
+	leaf_s = bp_s->data;
+	leaf_d = bp_d->data;
+	/*
+	 * If the destination index is not the end of the current
+	 * destination leaf entries, open up a hole in the destination
+	 * to hold the new entries.
+	 */
+	if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) {
+		memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d],
+			(INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) *
+			sizeof(xfs_dir2_leaf_entry_t));
+		xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count,
+			count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1);
+	}
+	/*
+	 * If the source has stale leaves, count the ones in the copy range
+	 * so we can update the header correctly.
+	 */
+	if (leaf_s->hdr.stale) {
+		int	i;			/* temp leaf index */
+
+		for (i = start_s, stale = 0; i < start_s + count; i++) {
+			if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR)
+				stale++;
+		}
+	} else
+		stale = 0;
+	/*
+	 * Copy the leaf entries from source to destination.
+	 */
+	memcpy(&leaf_d->ents[start_d], &leaf_s->ents[start_s],
+		count * sizeof(xfs_dir2_leaf_entry_t));
+	xfs_dir2_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1);
+	/*
+	 * If there are source entries after the ones we copied,
+	 * delete the ones we copied by sliding the next ones down.
+	 */
+	if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) {
+		memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count],
+			count * sizeof(xfs_dir2_leaf_entry_t));
+		xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1);
+	}
+	/*
+	 * Update the headers and log them.
+	 */
+	INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count));
+	INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale));
+	INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count);
+	INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale);
+	xfs_dir2_leaf_log_header(tp, bp_s);
+	xfs_dir2_leaf_log_header(tp, bp_d);
+	xfs_dir2_leafn_check(args->dp, bp_s);
+	xfs_dir2_leafn_check(args->dp, bp_d);
+}
+
+/*
+ * Determine the sort order of two leaf blocks.
+ * Returns 1 if both are valid and leaf2 should be before leaf1, else 0.
+ */
+int						/* sort order */
+xfs_dir2_leafn_order(
+	xfs_dabuf_t	*leaf1_bp,		/* leaf1 buffer */
+	xfs_dabuf_t	*leaf2_bp)		/* leaf2 buffer */
+{
+	xfs_dir2_leaf_t	*leaf1;			/* leaf1 structure */
+	xfs_dir2_leaf_t	*leaf2;			/* leaf2 structure */
+
+	leaf1 = leaf1_bp->data;
+	leaf2 = leaf2_bp->data;
+	ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 &&
+	    INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 &&
+	    (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) ||
+	     INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) <
+	     INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)))
+		return 1;
+	return 0;
+}
+
+/*
+ * Rebalance leaf entries between two leaf blocks.
+ * This is actually only called when the second block is new,
+ * though the code deals with the general case.
+ * A new entry will be inserted in one of the blocks, and that
+ * entry is taken into account when balancing.
+ */
+static void
+xfs_dir2_leafn_rebalance(
+	xfs_da_state_t		*state,		/* btree cursor */
+	xfs_da_state_blk_t	*blk1,		/* first btree block */
+	xfs_da_state_blk_t	*blk2)		/* second btree block */
+{
+	xfs_da_args_t		*args;		/* operation arguments */
+	int			count;		/* count (& direction) leaves */
+	int			isleft;		/* new goes in left leaf */
+	xfs_dir2_leaf_t		*leaf1;		/* first leaf structure */
+	xfs_dir2_leaf_t		*leaf2;		/* second leaf structure */
+	int			mid;		/* midpoint leaf index */
+#ifdef DEBUG
+	int			oldstale;	/* old count of stale leaves */
+#endif
+	int			oldsum;		/* old total leaf count */
+	int			swap;		/* swapped leaf blocks */
+
+	args = state->args;
+	/*
+	 * If the block order is wrong, swap the arguments.
+	 */
+	if ((swap = xfs_dir2_leafn_order(blk1->bp, blk2->bp))) {
+		xfs_da_state_blk_t	*tmp;	/* temp for block swap */
+
+		tmp = blk1;
+		blk1 = blk2;
+		blk2 = tmp;
+	}
+	leaf1 = blk1->bp->data;
+	leaf2 = blk2->bp->data;
+	oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT);
+#ifdef DEBUG
+	oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT);
+#endif
+	mid = oldsum >> 1;
+	/*
+	 * If the old leaf count was odd then the new one will be even,
+	 * so we need to divide the new count evenly.
+	 */
+	if (oldsum & 1) {
+		xfs_dahash_t	midhash;	/* middle entry hash value */
+
+		if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT))
+			midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT);
+		else
+			midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT);
+		isleft = args->hashval <= midhash;
+	}
+	/*
+	 * If the old count is even then the new count is odd, so there's
+	 * no preferred side for the new entry.
+	 * Pick the left one.
+	 */
+	else
+		isleft = 1;
+	/*
+	 * Calculate moved entry count.  Positive means left-to-right,
+	 * negative means right-to-left.  Then move the entries.
+	 */
+	count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0);
+	if (count > 0)
+		xfs_dir2_leafn_moveents(args, blk1->bp,
+			INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count);
+	else if (count < 0)
+		xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp,
+			INT_GET(leaf1->hdr.count, ARCH_CONVERT), count);
+	ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum);
+	ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale);
+	/*
+	 * Mark whether we're inserting into the old or new leaf.
+	 */
+	if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT))
+		state->inleaf = swap;
+	else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT))
+		state->inleaf = !swap;
+	else
+		state->inleaf =
+			swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT));
+	/*
+	 * Adjust the expected index for insertion.
+	 */
+	if (!state->inleaf)
+		blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
+	
+	/* 
+	 * Finally sanity check just to make sure we are not returning a negative index 
+	 */
+	if(blk2->index < 0) {
+		state->inleaf = 1;
+		blk2->index = 0;
+		cmn_err(CE_ALERT,
+			"xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: "
+			"blk1->index %d\n",
+			blk1->index);
+	}
+}
+
+/*
+ * Remove an entry from a node directory.
+ * This removes the leaf entry and the data entry,
+ * and updates the free block if necessary.
+ */
+static int					/* error */
+xfs_dir2_leafn_remove(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*bp,		/* leaf buffer */
+	int			index,		/* leaf entry index */
+	xfs_da_state_blk_t	*dblk,		/* data block */
+	int			*rval)		/* resulting block needs join */
+{
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dir2_db_t		db;		/* data block number */
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data block entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
+	int			longest;	/* longest data free entry */
+	int			off;		/* data block entry offset */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log data header */
+	int			needscan;	/* need to rescan data frees */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	xfs_dir2_trace_args_sb("leafn_remove", args, index, bp);
+	dp = args->dp;
+	tp = args->trans;
+	mp = dp->i_mount;
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	/*
+	 * Point to the entry we're removing.
+	 */
+	lep = &leaf->ents[index];
+	/*
+	 * Extract the data block and offset from the entry.
+	 */
+	db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT));
+	ASSERT(dblk->blkno == db);
+	off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT));
+	ASSERT(dblk->index == off);
+	/*
+	 * Kill the leaf entry by marking it stale.
+	 * Log the leaf block changes.
+	 */
+	INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1);
+	xfs_dir2_leaf_log_header(tp, bp);
+	INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR);
+	xfs_dir2_leaf_log_ents(tp, bp, index, index);
+	/*
+	 * Make the data entry free.  Keep track of the longest freespace
+	 * in the data block in case it changes.
+	 */
+	dbp = dblk->bp;
+	data = dbp->data;
+	dep = (xfs_dir2_data_entry_t *)((char *)data + off);
+	longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT);
+	needlog = needscan = 0;
+	xfs_dir2_data_make_free(tp, dbp, off,
+		XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan);
+	/*
+	 * Rescan the data block freespaces for bestfree.
+	 * Log the data block header if needed.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, data, &needlog, NULL);
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	xfs_dir2_data_check(dp, dbp);
+	/*
+	 * If the longest data block freespace changes, need to update
+	 * the corresponding freeblock entry.
+	 */
+	if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) {
+		int		error;		/* error return value */
+		xfs_dabuf_t	*fbp;		/* freeblock buffer */
+		xfs_dir2_db_t	fdb;		/* freeblock block number */
+		int		findex;		/* index in freeblock entries */
+		xfs_dir2_free_t	*free;		/* freeblock structure */
+		int		logfree;	/* need to log free entry */
+
+		/*
+		 * Convert the data block number to a free block,
+		 * read in the free block.
+		 */
+		fdb = XFS_DIR2_DB_TO_FDB(mp, db);
+		if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb),
+				-1, &fbp, XFS_DATA_FORK))) {
+			return error;
+		}
+		free = fbp->data;
+		ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+		ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) ==
+		       XFS_DIR2_MAX_FREE_BESTS(mp) *
+		       (fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
+		/*
+		 * Calculate which entry we need to fix.
+		 */
+		findex = XFS_DIR2_DB_TO_FDINDEX(mp, db);
+		longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT);
+		/*
+		 * If the data block is now empty we can get rid of it
+		 * (usually).
+		 */
+		if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) {
+			/*
+			 * Try to punch out the data block.
+			 */
+			error = xfs_dir2_shrink_inode(args, db, dbp);
+			if (error == 0) {
+				dblk->bp = NULL;
+				data = NULL;
+			}
+			/*
+			 * We can get ENOSPC if there's no space reservation.
+			 * In this case just drop the buffer and some one else
+			 * will eventually get rid of the empty block.
+			 */
+			else if (error == ENOSPC && args->total == 0)
+				xfs_da_buf_done(dbp);
+			else
+				return error;
+		}
+		/*
+		 * If we got rid of the data block, we can eliminate that entry
+		 * in the free block.
+		 */
+		if (data == NULL) {
+			/*
+			 * One less used entry in the free table.
+			 */
+			INT_MOD(free->hdr.nused, ARCH_CONVERT, -1);
+			xfs_dir2_free_log_header(tp, fbp);
+			/*
+			 * If this was the last entry in the table, we can
+			 * trim the table size back.  There might be other
+			 * entries at the end referring to non-existent
+			 * data blocks, get those too.
+			 */
+			if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) {
+				int	i;		/* free entry index */
+
+				for (i = findex - 1;
+				     i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF;
+				     i--)
+					continue;
+				INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1);
+				logfree = 0;
+			}
+			/*
+			 * Not the last entry, just punch it out.
+			 */
+			else {
+				INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF);
+				logfree = 1;
+			}
+			/*
+			 * If there are no useful entries left in the block,
+			 * get rid of the block if we can.
+			 */
+			if (!free->hdr.nused) {
+				error = xfs_dir2_shrink_inode(args, fdb, fbp);
+				if (error == 0) {
+					fbp = NULL;
+					logfree = 0;
+				} else if (error != ENOSPC || args->total != 0)
+					return error;
+				/*
+				 * It's possible to get ENOSPC if there is no
+				 * space reservation.  In this case some one
+				 * else will eventually get rid of this block.
+				 */
+			}
+		}
+		/*
+		 * Data block is not empty, just set the free entry to
+		 * the new value.
+		 */
+		else {
+			INT_SET(free->bests[findex], ARCH_CONVERT, longest);
+			logfree = 1;
+		}
+		/*
+		 * Log the free entry that changed, unless we got rid of it.
+		 */
+		if (logfree)
+			xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+		/*
+		 * Drop the buffer if we still have it.
+		 */
+		if (fbp)
+			xfs_da_buf_done(fbp);
+	}
+	xfs_dir2_leafn_check(dp, bp);
+	/*
+	 * Return indication of whether this leaf block is emtpy enough
+	 * to justify trying to join it with a neighbor.
+	 */
+	*rval =
+		((uint)sizeof(leaf->hdr) +
+		 (uint)sizeof(leaf->ents[0]) *
+		 (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) <
+		mp->m_dir_magicpct;
+	return 0;
+}
+
+/*
+ * Split the leaf entries in the old block into old and new blocks.
+ */
+int						/* error */
+xfs_dir2_leafn_split(
+	xfs_da_state_t		*state,		/* btree cursor */
+	xfs_da_state_blk_t	*oldblk,	/* original block */
+	xfs_da_state_blk_t	*newblk)	/* newly created block */
+{
+	xfs_da_args_t		*args;		/* operation arguments */
+	xfs_dablk_t		blkno;		/* new leaf block number */
+	int			error;		/* error return value */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+
+	/*
+	 * Allocate space for a new leaf node.
+	 */
+	args = state->args;
+	mp = args->dp->i_mount;
+	ASSERT(args != NULL);
+	ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC);
+	error = xfs_da_grow_inode(args, &blkno);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Initialize the new leaf block.
+	 */
+	error = xfs_dir2_leaf_init(args, XFS_DIR2_DA_TO_DB(mp, blkno),
+		&newblk->bp, XFS_DIR2_LEAFN_MAGIC);
+	if (error) {
+		return error;
+	}
+	newblk->blkno = blkno;
+	newblk->magic = XFS_DIR2_LEAFN_MAGIC;
+	/*
+	 * Rebalance the entries across the two leaves, link the new
+	 * block into the leaves.
+	 */
+	xfs_dir2_leafn_rebalance(state, oldblk, newblk);
+	error = xfs_da_blk_link(state, oldblk, newblk);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Insert the new entry in the correct block.
+	 */
+	if (state->inleaf)
+		error = xfs_dir2_leafn_add(oldblk->bp, args, oldblk->index);
+	else
+		error = xfs_dir2_leafn_add(newblk->bp, args, newblk->index);
+	/*
+	 * Update last hashval in each block since we added the name.
+	 */
+	oldblk->hashval = xfs_dir2_leafn_lasthash(oldblk->bp, NULL);
+	newblk->hashval = xfs_dir2_leafn_lasthash(newblk->bp, NULL);
+	xfs_dir2_leafn_check(args->dp, oldblk->bp);
+	xfs_dir2_leafn_check(args->dp, newblk->bp);
+	return error;
+}
+
+/*
+ * Check a leaf block and its neighbors to see if the block should be
+ * collapsed into one or the other neighbor.  Always keep the block
+ * with the smaller block number.
+ * If the current block is over 50% full, don't try to join it, return 0.
+ * If the block is empty, fill in the state structure and return 2.
+ * If it can be collapsed, fill in the state structure and return 1.
+ * If nothing can be done, return 0.
+ */
+int						/* error */
+xfs_dir2_leafn_toosmall(
+	xfs_da_state_t		*state,		/* btree cursor */
+	int			*action)	/* resulting action to take */
+{
+	xfs_da_state_blk_t	*blk;		/* leaf block */
+	xfs_dablk_t		blkno;		/* leaf block number */
+	xfs_dabuf_t		*bp;		/* leaf buffer */
+	int			bytes;		/* bytes in use */
+	int			count;		/* leaf live entry count */
+	int			error;		/* error return value */
+	int			forward;	/* sibling block direction */
+	int			i;		/* sibling counter */
+	xfs_da_blkinfo_t	*info;		/* leaf block header */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	int			rval;		/* result from path_shift */
+
+	/*
+	 * Check for the degenerate case of the block being over 50% full.
+	 * If so, it's not worth even looking to see if we might be able
+	 * to coalesce with a sibling.
+	 */
+	blk = &state->path.blk[state->path.active - 1];
+	info = blk->bp->data;
+	ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	leaf = (xfs_dir2_leaf_t *)info;
+	count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT);
+	bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]);
+	if (bytes > (state->blocksize >> 1)) {
+		/*
+		 * Blk over 50%, don't try to join.
+		 */
+		*action = 0;
+		return 0;
+	}
+	/*
+	 * Check for the degenerate case of the block being empty.
+	 * If the block is empty, we'll simply delete it, no need to
+	 * coalesce it with a sibling block.  We choose (arbitrarily)
+	 * to merge with the forward block unless it is NULL.
+	 */
+	if (count == 0) {
+		/*
+		 * Make altpath point to the block we want to keep and
+		 * path point to the block we want to drop (this one).
+		 */
+		forward = info->forw;
+		memcpy(&state->altpath, &state->path, sizeof(state->path));
+		error = xfs_da_path_shift(state, &state->altpath, forward, 0,
+			&rval);
+		if (error)
+			return error;
+		*action = rval ? 2 : 0;
+		return 0;
+	}
+	/*
+	 * Examine each sibling block to see if we can coalesce with
+	 * at least 25% free space to spare.  We need to figure out
+	 * whether to merge with the forward or the backward block.
+	 * We prefer coalescing with the lower numbered sibling so as
+	 * to shrink a directory over time.
+	 */
+	forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT);
+	for (i = 0, bp = NULL; i < 2; forward = !forward, i++) {
+		blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT);
+		if (blkno == 0)
+			continue;
+		/*
+		 * Read the sibling leaf block.
+		 */
+		if ((error =
+		    xfs_da_read_buf(state->args->trans, state->args->dp, blkno,
+			    -1, &bp, XFS_DATA_FORK))) {
+			return error;
+		}
+		ASSERT(bp != NULL);
+		/*
+		 * Count bytes in the two blocks combined.
+		 */
+		leaf = (xfs_dir2_leaf_t *)info;
+		count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT);
+		bytes = state->blocksize - (state->blocksize >> 2);
+		leaf = bp->data;
+		ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+		count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT);
+		bytes -= count * (uint)sizeof(leaf->ents[0]);
+		/*
+		 * Fits with at least 25% to spare.
+		 */
+		if (bytes >= 0)
+			break;
+		xfs_da_brelse(state->args->trans, bp);
+	}
+	/*
+	 * Didn't like either block, give up.
+	 */
+	if (i >= 2) {
+		*action = 0;
+		return 0;
+	}
+	/*
+	 * Done with the sibling leaf block here, drop the dabuf
+	 * so path_shift can get it.
+	 */
+	xfs_da_buf_done(bp);
+	/*
+	 * Make altpath point to the block we want to keep (the lower
+	 * numbered block) and path point to the block we want to drop.
+	 */
+	memcpy(&state->altpath, &state->path, sizeof(state->path));
+	if (blkno < blk->blkno)
+		error = xfs_da_path_shift(state, &state->altpath, forward, 0,
+			&rval);
+	else
+		error = xfs_da_path_shift(state, &state->path, forward, 0,
+			&rval);
+	if (error) {
+		return error;
+	}
+	*action = rval ? 0 : 1;
+	return 0;
+}
+
+/*
+ * Move all the leaf entries from drop_blk to save_blk.
+ * This is done as part of a join operation.
+ */
+void
+xfs_dir2_leafn_unbalance(
+	xfs_da_state_t		*state,		/* cursor */
+	xfs_da_state_blk_t	*drop_blk,	/* dead block */
+	xfs_da_state_blk_t	*save_blk)	/* surviving block */
+{
+	xfs_da_args_t		*args;		/* operation arguments */
+	xfs_dir2_leaf_t		*drop_leaf;	/* dead leaf structure */
+	xfs_dir2_leaf_t		*save_leaf;	/* surviving leaf structure */
+
+	args = state->args;
+	ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
+	ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
+	drop_leaf = drop_blk->bp->data;
+	save_leaf = save_blk->bp->data;
+	ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC);
+	/*
+	 * If there are any stale leaf entries, take this opportunity
+	 * to purge them.
+	 */
+	if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT))
+		xfs_dir2_leaf_compact(args, drop_blk->bp);
+	if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT))
+		xfs_dir2_leaf_compact(args, save_blk->bp);
+	/*
+	 * Move the entries from drop to the appropriate end of save.
+	 */
+	drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+	if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp))
+		xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0,
+			INT_GET(drop_leaf->hdr.count, ARCH_CONVERT));
+	else
+		xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp,
+			INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT));
+	save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
+	xfs_dir2_leafn_check(args->dp, save_blk->bp);
+}
+
+/*
+ * Top-level node form directory addname routine.
+ */
+int						/* error */
+xfs_dir2_node_addname(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_da_state_blk_t	*blk;		/* leaf block for insert */
+	int			error;		/* error return value */
+	int			rval;		/* sub-return value */
+	xfs_da_state_t		*state;		/* btree cursor */
+
+	xfs_dir2_trace_args("node_addname", args);
+	/*
+	 * Allocate and initialize the state (btree cursor).
+	 */
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_dirblksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+	/*
+	 * Look up the name.  We're not supposed to find it, but
+	 * this gives us the insertion point.
+	 */
+	error = xfs_da_node_lookup_int(state, &rval);
+	if (error)
+		rval = error;
+	if (rval != ENOENT) {
+		goto done;
+	}
+	/*
+	 * Add the data entry to a data block.
+	 * Extravalid is set to a freeblock found by lookup.
+	 */
+	rval = xfs_dir2_node_addname_int(args,
+		state->extravalid ? &state->extrablk : NULL);
+	if (rval) {
+		goto done;
+	}
+	blk = &state->path.blk[state->path.active - 1];
+	ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
+	/*
+	 * Add the new leaf entry.
+	 */
+	rval = xfs_dir2_leafn_add(blk->bp, args, blk->index);
+	if (rval == 0) {
+		/*
+		 * It worked, fix the hash values up the btree.
+		 */
+		if (!args->justcheck)
+			xfs_da_fixhashpath(state, &state->path);
+	} else {
+		/*
+		 * It didn't work, we need to split the leaf block.
+		 */
+		if (args->total == 0) {
+			ASSERT(rval == ENOSPC);
+			goto done;
+		}
+		/*
+		 * Split the leaf block and insert the new entry.
+		 */
+		rval = xfs_da_split(state);
+	}
+done:
+	xfs_da_state_free(state);
+	return rval;
+}
+
+/*
+ * Add the data entry for a node-format directory name addition.
+ * The leaf entry is added in xfs_dir2_leafn_add.
+ * We may enter with a freespace block that the lookup found.
+ */
+static int					/* error */
+xfs_dir2_node_addname_int(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_da_state_blk_t	*fblk)		/* optional freespace block */
+{
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dir2_db_t		dbno;		/* data block number */
+	xfs_dabuf_t		*dbp;		/* data block buffer */
+	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* data unused entry pointer */
+	int			error;		/* error return value */
+	xfs_dir2_db_t		fbno;		/* freespace block number */
+	xfs_dabuf_t		*fbp;		/* freespace buffer */
+	int			findex;		/* freespace entry index */
+	xfs_dir2_free_t		*free=NULL;	/* freespace block structure */
+	xfs_dir2_db_t		ifbno;		/* initial freespace block no */
+	xfs_dir2_db_t		lastfbno=0;	/* highest freespace block no */
+	int			length;		/* length of the new entry */
+	int			logfree;	/* need to log free entry */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	int			needlog;	/* need to log data header */
+	int			needscan;	/* need to rescan data frees */
+	xfs_dir2_data_off_t	*tagp;		/* data entry tag pointer */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	length = XFS_DIR2_DATA_ENTSIZE(args->namelen);
+	/*
+	 * If we came in with a freespace block that means that lookup
+	 * found an entry with our hash value.  This is the freespace
+	 * block for that data entry.
+	 */
+	if (fblk) {
+		fbp = fblk->bp;
+		/*
+		 * Remember initial freespace block number.
+		 */
+		ifbno = fblk->blkno;
+		free = fbp->data;
+		ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+		findex = fblk->index;
+		/*
+		 * This means the free entry showed that the data block had
+		 * space for our entry, so we remembered it.
+		 * Use that data block.
+		 */
+		if (findex >= 0) {
+			ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT));
+			ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF);
+			ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length);
+			dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex;
+		}
+		/*
+		 * The data block looked at didn't have enough room.
+		 * We'll start at the beginning of the freespace entries.
+		 */
+		else {
+			dbno = -1;
+			findex = 0;
+		}
+	}
+	/*
+	 * Didn't come in with a freespace block, so don't have a data block.
+	 */
+	else {
+		ifbno = dbno = -1;
+		fbp = NULL;
+		findex = 0;
+	}
+	/*
+	 * If we don't have a data block yet, we're going to scan the
+	 * freespace blocks looking for one.  Figure out what the
+	 * highest freespace block number is.
+	 */
+	if (dbno == -1) {
+		xfs_fileoff_t	fo;		/* freespace block number */
+
+		if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK)))
+			return error;
+		lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo);
+		fbno = ifbno;
+	}
+	/*
+	 * While we haven't identified a data block, search the freeblock
+	 * data for a good data block.  If we find a null freeblock entry,
+	 * indicating a hole in the data blocks, remember that.
+	 */
+	while (dbno == -1) {
+		/*
+		 * If we don't have a freeblock in hand, get the next one.
+		 */
+		if (fbp == NULL) {
+			/*
+			 * Happens the first time through unless lookup gave
+			 * us a freespace block to start with.
+			 */
+			if (++fbno == 0)
+				fbno = XFS_DIR2_FREE_FIRSTDB(mp);
+			/*
+			 * If it's ifbno we already looked at it.
+			 */
+			if (fbno == ifbno)
+				fbno++;
+			/*
+			 * If it's off the end we're done.
+			 */
+			if (fbno >= lastfbno)
+				break;
+			/*
+			 * Read the block.  There can be holes in the
+			 * freespace blocks, so this might not succeed.
+			 * This should be really rare, so there's no reason
+			 * to avoid it.
+			 */
+			if ((error = xfs_da_read_buf(tp, dp,
+					XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp,
+					XFS_DATA_FORK))) {
+				return error;
+			}
+			if (unlikely(fbp == NULL)) {
+				continue;
+			}
+			free = fbp->data;
+			ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+			findex = 0;
+		}
+		/*
+		 * Look at the current free entry.  Is it good enough?
+		 */
+		if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF &&
+		    INT_GET(free->bests[findex], ARCH_CONVERT) >= length)
+			dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex;
+		else {
+			/*
+			 * Are we done with the freeblock?
+			 */
+			if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) {
+				/*
+				 * Drop the block.
+				 */
+				xfs_da_brelse(tp, fbp);
+				fbp = NULL;
+				if (fblk && fblk->bp)
+					fblk->bp = NULL;
+			}
+		}
+	}
+	/*
+	 * If we don't have a data block, we need to allocate one and make
+	 * the freespace entries refer to it.
+	 */
+	if (unlikely(dbno == -1)) {
+		/*
+		 * Not allowed to allocate, return failure.
+		 */
+		if (args->justcheck || args->total == 0) {
+			/*
+			 * Drop the freespace buffer unless it came from our
+			 * caller.
+			 */
+			if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
+				xfs_da_buf_done(fbp);
+			return XFS_ERROR(ENOSPC);
+		}
+		/*
+		 * Allocate and initialize the new data block.
+		 */
+		if (unlikely((error = xfs_dir2_grow_inode(args,
+							 XFS_DIR2_DATA_SPACE,
+							 &dbno)) ||
+		    (error = xfs_dir2_data_init(args, dbno, &dbp)))) {
+			/*
+			 * Drop the freespace buffer unless it came from our
+			 * caller.
+			 */
+			if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
+				xfs_da_buf_done(fbp);
+			return error;
+		}
+		/*
+		 * If (somehow) we have a freespace block, get rid of it.
+		 */
+		if (fbp)
+			xfs_da_brelse(tp, fbp);
+		if (fblk && fblk->bp)
+			fblk->bp = NULL;
+
+		/*
+		 * Get the freespace block corresponding to the data block
+		 * that was just allocated.
+		 */
+		fbno = XFS_DIR2_DB_TO_FDB(mp, dbno);
+		if (unlikely(error = xfs_da_read_buf(tp, dp,
+				XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp,
+				XFS_DATA_FORK))) {
+			xfs_da_buf_done(dbp);
+			return error;
+  		}
+		/*
+		 * If there wasn't a freespace block, the read will
+		 * return a NULL fbp.  Allocate and initialize a new one.
+		 */
+		if( fbp == NULL ) {
+			if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE,
+							&fbno))) {
+				return error;
+			}
+
+			if (unlikely(XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno)) {
+				cmn_err(CE_ALERT,
+					"xfs_dir2_node_addname_int: dir ino "
+					"%llu needed freesp block %lld for\n"
+					"  data block %lld, got %lld\n"
+					"  ifbno %llu lastfbno %d\n",
+					(unsigned long long)dp->i_ino,
+					(long long)XFS_DIR2_DB_TO_FDB(mp, dbno),
+					(long long)dbno, (long long)fbno,
+					(unsigned long long)ifbno, lastfbno);
+				if (fblk) {
+					cmn_err(CE_ALERT,
+						" fblk 0x%p blkno %llu "
+						"index %d magic 0x%x\n",
+						fblk,
+						(unsigned long long)fblk->blkno,
+						fblk->index,
+						fblk->magic);
+				} else {
+					cmn_err(CE_ALERT,
+						" ... fblk is NULL\n");
+				}
+				XFS_ERROR_REPORT("xfs_dir2_node_addname_int",
+						 XFS_ERRLEVEL_LOW, mp);
+				return XFS_ERROR(EFSCORRUPTED);
+			}
+
+			/*
+			 * Get a buffer for the new block.
+			 */
+			if ((error = xfs_da_get_buf(tp, dp,
+						   XFS_DIR2_DB_TO_DA(mp, fbno),
+						   -1, &fbp, XFS_DATA_FORK))) {
+				return error;
+			}
+			ASSERT(fbp != NULL);
+
+			/*
+			 * Initialize the new block to be empty, and remember
+			 * its first slot as our empty slot.
+			 */
+			free = fbp->data;
+			INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC);
+			INT_SET(free->hdr.firstdb, ARCH_CONVERT,
+				(fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
+				XFS_DIR2_MAX_FREE_BESTS(mp));
+			free->hdr.nvalid = 0;
+			free->hdr.nused = 0;
+		} else {
+			free = fbp->data;
+			ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+		}
+
+		/*
+		 * Set the freespace block index from the data block number.
+		 */
+		findex = XFS_DIR2_DB_TO_FDINDEX(mp, dbno);
+		/*
+		 * If it's after the end of the current entries in the
+		 * freespace block, extend that table.
+		 */
+		if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) {
+			ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp));
+			INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1);
+			/*
+			 * Tag new entry so nused will go up.
+			 */
+			INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF);
+		}
+		/*
+		 * If this entry was for an empty data block
+		 * (this should always be true) then update the header.
+		 */
+		if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) {
+			INT_MOD(free->hdr.nused, ARCH_CONVERT, +1);
+			xfs_dir2_free_log_header(tp, fbp);
+		}
+		/*
+		 * Update the real value in the table.
+		 * We haven't allocated the data entry yet so this will
+		 * change again.
+		 */
+		data = dbp->data;
+		INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT);
+		logfree = 1;
+	}
+	/*
+	 * We had a data block so we don't have to make a new one.
+	 */
+	else {
+		/*
+		 * If just checking, we succeeded.
+		 */
+		if (args->justcheck) {
+			if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
+				xfs_da_buf_done(fbp);
+			return 0;
+		}
+		/*
+		 * Read the data block in.
+		 */
+		if (unlikely(
+		    error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, dbno),
+				-1, &dbp, XFS_DATA_FORK))) {
+			if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
+				xfs_da_buf_done(fbp);
+			return error;
+		}
+		data = dbp->data;
+		logfree = 0;
+	}
+	ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length);
+	/*
+	 * Point to the existing unused space.
+	 */
+	dup = (xfs_dir2_data_unused_t *)
+	      ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT));
+	needscan = needlog = 0;
+	/*
+	 * Mark the first part of the unused space, inuse for us.
+	 */
+	xfs_dir2_data_use_free(tp, dbp, dup,
+		(xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
+		&needlog, &needscan);
+	/*
+	 * Fill in the new entry and log it.
+	 */
+	dep = (xfs_dir2_data_entry_t *)dup;
+	INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+	dep->namelen = args->namelen;
+	memcpy(dep->name, args->name, dep->namelen);
+	tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+	INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data));
+	xfs_dir2_data_log_entry(tp, dbp, dep);
+	/*
+	 * Rescan the block for bestfree if needed.
+	 */
+	if (needscan)
+		xfs_dir2_data_freescan(mp, data, &needlog, NULL);
+	/*
+	 * Log the data block header if needed.
+	 */
+	if (needlog)
+		xfs_dir2_data_log_header(tp, dbp);
+	/*
+	 * If the freespace entry is now wrong, update it.
+	 */
+	if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) {
+		INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT);
+		logfree = 1;
+	}
+	/*
+	 * Log the freespace entry if needed.
+	 */
+	if (logfree)
+		xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+	/*
+	 * If the caller didn't hand us the freespace block, drop it.
+	 */
+	if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
+		xfs_da_buf_done(fbp);
+	/*
+	 * Return the data block and offset in args, then drop the data block.
+	 */
+	args->blkno = (xfs_dablk_t)dbno;
+	args->index = INT_GET(*tagp, ARCH_CONVERT);
+	xfs_da_buf_done(dbp);
+	return 0;
+}
+
+/*
+ * Lookup an entry in a node-format directory.
+ * All the real work happens in xfs_da_node_lookup_int.
+ * The only real output is the inode number of the entry.
+ */
+int						/* error */
+xfs_dir2_node_lookup(
+	xfs_da_args_t	*args)			/* operation arguments */
+{
+	int		error;			/* error return value */
+	int		i;			/* btree level */
+	int		rval;			/* operation return value */
+	xfs_da_state_t	*state;			/* btree cursor */
+
+	xfs_dir2_trace_args("node_lookup", args);
+	/*
+	 * Allocate and initialize the btree cursor.
+	 */
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_dirblksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+	/*
+	 * Fill in the path to the entry in the cursor.
+	 */
+	error = xfs_da_node_lookup_int(state, &rval);
+	if (error)
+		rval = error;
+	/*
+	 * Release the btree blocks and leaf block.
+	 */
+	for (i = 0; i < state->path.active; i++) {
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+	/*
+	 * Release the data block if we have it.
+	 */
+	if (state->extravalid && state->extrablk.bp) {
+		xfs_da_brelse(args->trans, state->extrablk.bp);
+		state->extrablk.bp = NULL;
+	}
+	xfs_da_state_free(state);
+	return rval;
+}
+
+/*
+ * Remove an entry from a node-format directory.
+ */
+int						/* error */
+xfs_dir2_node_removename(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_da_state_blk_t	*blk;		/* leaf block */
+	int			error;		/* error return value */
+	int			rval;		/* operation return value */
+	xfs_da_state_t		*state;		/* btree cursor */
+
+	xfs_dir2_trace_args("node_removename", args);
+	/*
+	 * Allocate and initialize the btree cursor.
+	 */
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_dirblksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+	/*
+	 * Look up the entry we're deleting, set up the cursor.
+	 */
+	error = xfs_da_node_lookup_int(state, &rval);
+	if (error) {
+		rval = error;
+	}
+	/*
+	 * Didn't find it, upper layer screwed up.
+	 */
+	if (rval != EEXIST) {
+		xfs_da_state_free(state);
+		return rval;
+	}
+	blk = &state->path.blk[state->path.active - 1];
+	ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
+	ASSERT(state->extravalid);
+	/*
+	 * Remove the leaf and data entries.
+	 * Extrablk refers to the data block.
+	 */
+	error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
+		&state->extrablk, &rval);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Fix the hash values up the btree.
+	 */
+	xfs_da_fixhashpath(state, &state->path);
+	/*
+	 * If we need to join leaf blocks, do it.
+	 */
+	if (rval && state->path.active > 1)
+		error = xfs_da_join(state);
+	/*
+	 * If no errors so far, try conversion to leaf format.
+	 */
+	if (!error)
+		error = xfs_dir2_node_to_leaf(state);
+	xfs_da_state_free(state);
+	return error;
+}
+
+/*
+ * Replace an entry's inode number in a node-format directory.
+ */
+int						/* error */
+xfs_dir2_node_replace(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_da_state_blk_t	*blk;		/* leaf block */
+	xfs_dir2_data_t		*data;		/* data block structure */
+	xfs_dir2_data_entry_t	*dep;		/* data entry changed */
+	int			error;		/* error return value */
+	int			i;		/* btree level */
+	xfs_ino_t		inum;		/* new inode number */
+	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry being changed */
+	int			rval;		/* internal return value */
+	xfs_da_state_t		*state;		/* btree cursor */
+
+	xfs_dir2_trace_args("node_replace", args);
+	/*
+	 * Allocate and initialize the btree cursor.
+	 */
+	state = xfs_da_state_alloc();
+	state->args = args;
+	state->mp = args->dp->i_mount;
+	state->blocksize = state->mp->m_dirblksize;
+	state->node_ents = state->mp->m_dir_node_ents;
+	inum = args->inumber;
+	/*
+	 * Lookup the entry to change in the btree.
+	 */
+	error = xfs_da_node_lookup_int(state, &rval);
+	if (error) {
+		rval = error;
+	}
+	/*
+	 * It should be found, since the vnodeops layer has looked it up
+	 * and locked it.  But paranoia is good.
+	 */
+	if (rval == EEXIST) {
+		/*
+		 * Find the leaf entry.
+		 */
+		blk = &state->path.blk[state->path.active - 1];
+		ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
+		leaf = blk->bp->data;
+		lep = &leaf->ents[blk->index];
+		ASSERT(state->extravalid);
+		/*
+		 * Point to the data entry.
+		 */
+		data = state->extrablk.bp->data;
+		ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC);
+		dep = (xfs_dir2_data_entry_t *)
+		      ((char *)data +
+		       XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT)));
+		ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT));
+		/*
+		 * Fill in the new inode number and log the entry.
+		 */
+		INT_SET(dep->inumber, ARCH_CONVERT, inum);
+		xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep);
+		rval = 0;
+	}
+	/*
+	 * Didn't find it, and we're holding a data block.  Drop it.
+	 */
+	else if (state->extravalid) {
+		xfs_da_brelse(args->trans, state->extrablk.bp);
+		state->extrablk.bp = NULL;
+	}
+	/*
+	 * Release all the buffers in the cursor.
+	 */
+	for (i = 0; i < state->path.active; i++) {
+		xfs_da_brelse(args->trans, state->path.blk[i].bp);
+		state->path.blk[i].bp = NULL;
+	}
+	xfs_da_state_free(state);
+	return rval;
+}
+
+/*
+ * Trim off a trailing empty freespace block.
+ * Return (in rvalp) 1 if we did it, 0 if not.
+ */
+int						/* error */
+xfs_dir2_node_trim_free(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_fileoff_t		fo,		/* free block number */
+	int			*rvalp)		/* out: did something */
+{
+	xfs_dabuf_t		*bp;		/* freespace buffer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return code */
+	xfs_dir2_free_t		*free;		/* freespace structure */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_trans_t		*tp;		/* transaction pointer */
+
+	dp = args->dp;
+	mp = dp->i_mount;
+	tp = args->trans;
+	/*
+	 * Read the freespace block.
+	 */
+	if (unlikely(error = xfs_da_read_buf(tp, dp, (xfs_dablk_t)fo, -2, &bp,
+			XFS_DATA_FORK))) {
+		return error;
+	}
+
+	/*
+	 * There can be holes in freespace.  If fo is a hole, there's
+	 * nothing to do.
+	 */
+	if (bp == NULL) {
+		return 0;
+	}
+	free = bp->data;
+	ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
+	/*
+	 * If there are used entries, there's nothing to do.
+	 */
+	if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) {
+		xfs_da_brelse(tp, bp);
+		*rvalp = 0;
+		return 0;
+	}
+	/*
+	 * Blow the block away.
+	 */
+	if ((error =
+	    xfs_dir2_shrink_inode(args, XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo),
+		    bp))) {
+		/*
+		 * Can't fail with ENOSPC since that only happens with no
+		 * space reservation, when breaking up an extent into two
+		 * pieces.  This is the last block of an extent.
+		 */
+		ASSERT(error != ENOSPC);
+		xfs_da_brelse(tp, bp);
+		return error;
+	}
+	/*
+	 * Return that we succeeded.
+	 */
+	*rvalp = 1;
+	return 0;
+}
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
new file mode 100644
index 0000000..96db420
--- /dev/null
+++ b/fs/xfs/xfs_dir2_node.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_NODE_H__
+#define	__XFS_DIR2_NODE_H__
+
+/*
+ * Directory version 2, btree node format structures
+ */
+
+struct uio;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_da_state;
+struct xfs_da_state_blk;
+struct xfs_inode;
+struct xfs_trans;
+
+/*
+ * Constants.
+ */
+
+/*
+ * Offset of the freespace index.
+ */
+#define	XFS_DIR2_FREE_SPACE	2
+#define	XFS_DIR2_FREE_OFFSET	(XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
+#define	XFS_DIR2_FREE_FIRSTDB(mp)	\
+	XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_FREE_OFFSET)
+
+#define	XFS_DIR2_FREE_MAGIC	0x58443246	/* XD2F */
+
+/*
+ * Structures.
+ */
+typedef	struct xfs_dir2_free_hdr {
+	__uint32_t		magic;		/* XFS_DIR2_FREE_MAGIC */
+	__int32_t		firstdb;	/* db of first entry */
+	__int32_t		nvalid;		/* count of valid entries */
+	__int32_t		nused;		/* count of used entries */
+} xfs_dir2_free_hdr_t;
+
+typedef struct xfs_dir2_free {
+	xfs_dir2_free_hdr_t	hdr;		/* block header */
+	xfs_dir2_data_off_t	bests[1];	/* best free counts */
+						/* unused entries are -1 */
+} xfs_dir2_free_t;
+#define	XFS_DIR2_MAX_FREE_BESTS(mp)	\
+	(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \
+	 (uint)sizeof(xfs_dir2_data_off_t))
+
+/*
+ * Macros.
+ */
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDB)
+xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db);
+#define	XFS_DIR2_DB_TO_FDB(mp,db)	xfs_dir2_db_to_fdb(mp, db)
+#else
+#define	XFS_DIR2_DB_TO_FDB(mp,db)	\
+	(XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp))
+#endif
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDINDEX)
+int
+xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db);
+#define	XFS_DIR2_DB_TO_FDINDEX(mp,db)	xfs_dir2_db_to_fdindex(mp, db)
+#else
+#define	XFS_DIR2_DB_TO_FDINDEX(mp,db)	((db) % XFS_DIR2_MAX_FREE_BESTS(mp))
+#endif
+
+/*
+ * Functions.
+ */
+
+extern void
+	xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
+				int first, int last);
+
+extern int
+	xfs_dir2_leaf_to_node(struct xfs_da_args *args, struct xfs_dabuf *lbp);
+
+extern xfs_dahash_t
+	xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
+
+extern int
+	xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
+				  struct xfs_da_args *args, int *indexp,
+				  struct xfs_da_state *state);
+
+extern int
+	xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
+			     struct xfs_dabuf *leaf2_bp);
+
+extern int
+	xfs_dir2_leafn_split(struct xfs_da_state *state,
+			     struct xfs_da_state_blk *oldblk,
+			     struct xfs_da_state_blk *newblk);
+
+extern int
+	xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
+
+extern void
+	xfs_dir2_leafn_unbalance(struct xfs_da_state *state,
+				 struct xfs_da_state_blk *drop_blk,
+				 struct xfs_da_state_blk *save_blk);
+
+extern int
+	xfs_dir2_node_addname(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_node_lookup(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_node_removename(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_node_replace(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
+				int *rvalp);
+
+#endif	/* __XFS_DIR2_NODE_H__ */
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
new file mode 100644
index 0000000..6bbc616
--- /dev/null
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -0,0 +1,1317 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_sf.c
+ * Shortform directory implementation for v2 directories.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_error.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_trace.h"
+
+/*
+ * Prototypes for internal functions.
+ */
+static void xfs_dir2_sf_addname_easy(xfs_da_args_t *args,
+				     xfs_dir2_sf_entry_t *sfep,
+				     xfs_dir2_data_aoff_t offset,
+				     int new_isize);
+static void xfs_dir2_sf_addname_hard(xfs_da_args_t *args, int objchange,
+				     int new_isize);
+static int xfs_dir2_sf_addname_pick(xfs_da_args_t *args, int objchange,
+				    xfs_dir2_sf_entry_t **sfepp,
+				    xfs_dir2_data_aoff_t *offsetp);
+#ifdef DEBUG
+static void xfs_dir2_sf_check(xfs_da_args_t *args);
+#else
+#define	xfs_dir2_sf_check(args)
+#endif /* DEBUG */
+#if XFS_BIG_INUMS
+static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
+static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
+#endif /* XFS_BIG_INUMS */
+
+/*
+ * Given a block directory (dp/block), calculate its size as a shortform (sf)
+ * directory and a header for the sf directory, if it will fit it the
+ * space currently present in the inode.  If it won't fit, the output
+ * size is too big (but not accurate).
+ */
+int						/* size for sf form */
+xfs_dir2_block_sfsize(
+	xfs_inode_t		*dp,		/* incore inode pointer */
+	xfs_dir2_block_t	*block,		/* block directory data */
+	xfs_dir2_sf_hdr_t	*sfhp)		/* output: header for sf form */
+{
+	xfs_dir2_dataptr_t	addr;		/* data entry address */
+	xfs_dir2_leaf_entry_t	*blp;		/* leaf area of the block */
+	xfs_dir2_block_tail_t	*btp;		/* tail area of the block */
+	int			count;		/* shortform entry count */
+	xfs_dir2_data_entry_t	*dep;		/* data entry in the block */
+	int			i;		/* block entry index */
+	int			i8count;	/* count of big-inode entries */
+	int			isdot;		/* entry is "." */
+	int			isdotdot;	/* entry is ".." */
+	xfs_mount_t		*mp;		/* mount structure pointer */
+	int			namelen;	/* total name bytes */
+	xfs_ino_t		parent;		/* parent inode number */
+	int			size=0;		/* total computed size */
+
+	mp = dp->i_mount;
+
+	count = i8count = namelen = 0;
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	blp = XFS_DIR2_BLOCK_LEAF_P(btp);
+
+	/*
+	 * Iterate over the block's data entries by using the leaf pointers.
+	 */
+	for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) {
+		if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR)
+			continue;
+		/*
+		 * Calculate the pointer to the entry at hand.
+		 */
+		dep = (xfs_dir2_data_entry_t *)
+		      ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr));
+		/*
+		 * Detect . and .., so we can special-case them.
+		 * . is not included in sf directories.
+		 * .. is included by just the parent inode number.
+		 */
+		isdot = dep->namelen == 1 && dep->name[0] == '.';
+		isdotdot =
+			dep->namelen == 2 &&
+			dep->name[0] == '.' && dep->name[1] == '.';
+#if XFS_BIG_INUMS
+		if (!isdot)
+			i8count += INT_GET(dep->inumber, ARCH_CONVERT) > XFS_DIR2_MAX_SHORT_INUM;
+#endif
+		if (!isdot && !isdotdot) {
+			count++;
+			namelen += dep->namelen;
+		} else if (isdotdot)
+			parent = INT_GET(dep->inumber, ARCH_CONVERT);
+		/*
+		 * Calculate the new size, see if we should give up yet.
+		 */
+		size = XFS_DIR2_SF_HDR_SIZE(i8count) +		/* header */
+		       count +					/* namelen */
+		       count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */
+		       namelen +				/* name */
+		       (i8count ?				/* inumber */
+				(uint)sizeof(xfs_dir2_ino8_t) * count :
+				(uint)sizeof(xfs_dir2_ino4_t) * count);
+		if (size > XFS_IFORK_DSIZE(dp))
+			return size;		/* size value is a failure */
+	}
+	/*
+	 * Create the output header, if it worked.
+	 */
+	sfhp->count = count;
+	sfhp->i8count = i8count;
+	XFS_DIR2_SF_PUT_INUMBER((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent);
+	return size;
+}
+
+/*
+ * Convert a block format directory to shortform.
+ * Caller has already checked that it will fit, and built us a header.
+ */
+int						/* error */
+xfs_dir2_block_to_sf(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dabuf_t		*bp,		/* block buffer */
+	int			size,		/* shortform directory size */
+	xfs_dir2_sf_hdr_t	*sfhp)		/* shortform directory hdr */
+{
+	xfs_dir2_block_t	*block;		/* block structure */
+	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
+	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_data_unused_t	*dup;		/* unused data pointer */
+	char			*endptr;	/* end of data entries */
+	int			error;		/* error return value */
+	int			logflags;	/* inode logging flags */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	char			*ptr;		/* current data pointer */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+	xfs_ino_t               temp;
+
+	xfs_dir2_trace_args_sb("block_to_sf", args, size, bp);
+	dp = args->dp;
+	mp = dp->i_mount;
+
+	/*
+	 * Make a copy of the block data, so we can shrink the inode
+	 * and add local data.
+	 */
+	block = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
+	memcpy(block, bp->data, mp->m_dirblksize);
+	logflags = XFS_ILOG_CORE;
+	if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
+		ASSERT(error != ENOSPC);
+		goto out;
+	}
+	/*
+	 * The buffer is now unconditionally gone, whether
+	 * xfs_dir2_shrink_inode worked or not.
+	 *
+	 * Convert the inode to local format.
+	 */
+	dp->i_df.if_flags &= ~XFS_IFEXTENTS;
+	dp->i_df.if_flags |= XFS_IFINLINE;
+	dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+	ASSERT(dp->i_df.if_bytes == 0);
+	xfs_idata_realloc(dp, size, XFS_DATA_FORK);
+	logflags |= XFS_ILOG_DDATA;
+	/*
+	 * Copy the header into the newly allocate local space.
+	 */
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count));
+	dp->i_d.di_size = size;
+	/*
+	 * Set up to loop over the block's entries.
+	 */
+	btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
+	ptr = (char *)block->u;
+	endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
+	sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	/*
+	 * Loop over the active and unused entries.
+	 * Stop when we reach the leaf/tail portion of the block.
+	 */
+	while (ptr < endptr) {
+		/*
+		 * If it's unused, just skip over it.
+		 */
+		dup = (xfs_dir2_data_unused_t *)ptr;
+		if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) {
+			ptr += INT_GET(dup->length, ARCH_CONVERT);
+			continue;
+		}
+		dep = (xfs_dir2_data_entry_t *)ptr;
+		/*
+		 * Skip .
+		 */
+		if (dep->namelen == 1 && dep->name[0] == '.')
+			ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == dp->i_ino);
+		/*
+		 * Skip .., but make sure the inode number is right.
+		 */
+		else if (dep->namelen == 2 &&
+			 dep->name[0] == '.' && dep->name[1] == '.')
+			ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) ==
+			       XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent));
+		/*
+		 * Normal entry, copy it into shortform.
+		 */
+		else {
+			sfep->namelen = dep->namelen;
+			XFS_DIR2_SF_PUT_OFFSET(sfep,
+				(xfs_dir2_data_aoff_t)
+				((char *)dep - (char *)block));
+			memcpy(sfep->name, dep->name, dep->namelen);
+			temp=INT_GET(dep->inumber, ARCH_CONVERT);
+			XFS_DIR2_SF_PUT_INUMBER(sfp, &temp,
+				XFS_DIR2_SF_INUMBERP(sfep));
+			sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
+		}
+		ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
+	}
+	ASSERT((char *)sfep - (char *)sfp == size);
+	xfs_dir2_sf_check(args);
+out:
+	xfs_trans_log_inode(args->trans, dp, logflags);
+	kmem_free(block, mp->m_dirblksize);
+	return error;
+}
+
+/*
+ * Add a name to a shortform directory.
+ * There are two algorithms, "easy" and "hard" which we decide on
+ * before changing anything.
+ * Convert to block form if necessary, if the new entry won't fit.
+ */
+int						/* error */
+xfs_dir2_sf_addname(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	int			add_entsize;	/* size of the new entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			error;		/* error return value */
+	int			incr_isize;	/* total change in size */
+	int			new_isize;	/* di_size after adding name */
+	int			objchange;	/* changing to 8-byte inodes */
+	xfs_dir2_data_aoff_t	offset;		/* offset for new entry */
+	int			old_isize;	/* di_size before adding name */
+	int			pick;		/* which algorithm to use */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform entry */
+
+	xfs_dir2_trace_args("sf_addname", args);
+	ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
+	dp = args->dp;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Make sure the shortform value has some of its header.
+	 */
+	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+	/*
+	 * Compute entry (and change in) size.
+	 */
+	add_entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen);
+	incr_isize = add_entsize;
+	objchange = 0;
+#if XFS_BIG_INUMS
+	/*
+	 * Do we have to change to 8 byte inodes?
+	 */
+	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
+		/*
+		 * Yes, adjust the entry size and the total size.
+		 */
+		add_entsize +=
+			(uint)sizeof(xfs_dir2_ino8_t) -
+			(uint)sizeof(xfs_dir2_ino4_t);
+		incr_isize +=
+			(sfp->hdr.count + 2) *
+			((uint)sizeof(xfs_dir2_ino8_t) -
+			 (uint)sizeof(xfs_dir2_ino4_t));
+		objchange = 1;
+	}
+#endif
+	old_isize = (int)dp->i_d.di_size;
+	new_isize = old_isize + incr_isize;
+	/*
+	 * Won't fit as shortform any more (due to size),
+	 * or the pick routine says it won't (due to offset values).
+	 */
+	if (new_isize > XFS_IFORK_DSIZE(dp) ||
+	    (pick =
+	     xfs_dir2_sf_addname_pick(args, objchange, &sfep, &offset)) == 0) {
+		/*
+		 * Just checking or no space reservation, it doesn't fit.
+		 */
+		if (args->justcheck || args->total == 0)
+			return XFS_ERROR(ENOSPC);
+		/*
+		 * Convert to block form then add the name.
+		 */
+		error = xfs_dir2_sf_to_block(args);
+		if (error)
+			return error;
+		return xfs_dir2_block_addname(args);
+	}
+	/*
+	 * Just checking, it fits.
+	 */
+	if (args->justcheck)
+		return 0;
+	/*
+	 * Do it the easy way - just add it at the end.
+	 */
+	if (pick == 1)
+		xfs_dir2_sf_addname_easy(args, sfep, offset, new_isize);
+	/*
+	 * Do it the hard way - look for a place to insert the new entry.
+	 * Convert to 8 byte inode numbers first if necessary.
+	 */
+	else {
+		ASSERT(pick == 2);
+#if XFS_BIG_INUMS
+		if (objchange)
+			xfs_dir2_sf_toino8(args);
+#endif
+		xfs_dir2_sf_addname_hard(args, objchange, new_isize);
+	}
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+	return 0;
+}
+
+/*
+ * Add the new entry the "easy" way.
+ * This is copying the old directory and adding the new entry at the end.
+ * Since it's sorted by "offset" we need room after the last offset
+ * that's already there, and then room to convert to a block directory.
+ * This is already checked by the pick routine.
+ */
+static void
+xfs_dir2_sf_addname_easy(
+	xfs_da_args_t		*args,		/* operation arguments */
+	xfs_dir2_sf_entry_t	*sfep,		/* pointer to new entry */
+	xfs_dir2_data_aoff_t	offset,		/* offset to use for new ent */
+	int			new_isize)	/* new directory size */
+{
+	int			byteoff;	/* byte offset in sf dir */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+
+	dp = args->dp;
+
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	byteoff = (int)((char *)sfep - (char *)sfp);
+	/*
+	 * Grow the in-inode space.
+	 */
+	xfs_idata_realloc(dp, XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen),
+		XFS_DATA_FORK);
+	/*
+	 * Need to set up again due to realloc of the inode data.
+	 */
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff);
+	/*
+	 * Fill in the new entry.
+	 */
+	sfep->namelen = args->namelen;
+	XFS_DIR2_SF_PUT_OFFSET(sfep, offset);
+	memcpy(sfep->name, args->name, sfep->namelen);
+	XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber,
+		XFS_DIR2_SF_INUMBERP(sfep));
+	/*
+	 * Update the header and inode.
+	 */
+	sfp->hdr.count++;
+#if XFS_BIG_INUMS
+	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM)
+		sfp->hdr.i8count++;
+#endif
+	dp->i_d.di_size = new_isize;
+	xfs_dir2_sf_check(args);
+}
+
+/*
+ * Add the new entry the "hard" way.
+ * The caller has already converted to 8 byte inode numbers if necessary,
+ * in which case we need to leave the i8count at 1.
+ * Find a hole that the new entry will fit into, and copy
+ * the first part of the entries, the new entry, and the last part of
+ * the entries.
+ */
+/* ARGSUSED */
+static void
+xfs_dir2_sf_addname_hard(
+	xfs_da_args_t		*args,		/* operation arguments */
+	int			objchange,	/* changing inode number size */
+	int			new_isize)	/* new directory size */
+{
+	int			add_datasize;	/* data size need for new ent */
+	char			*buf;		/* buffer for old */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			eof;		/* reached end of old dir */
+	int			nbytes;		/* temp for byte copies */
+	xfs_dir2_data_aoff_t	new_offset;	/* next offset value */
+	xfs_dir2_data_aoff_t	offset;		/* current offset value */
+	int			old_isize;	/* previous di_size */
+	xfs_dir2_sf_entry_t	*oldsfep;	/* entry in original dir */
+	xfs_dir2_sf_t		*oldsfp;	/* original shortform dir */
+	xfs_dir2_sf_entry_t	*sfep;		/* entry in new dir */
+	xfs_dir2_sf_t		*sfp;		/* new shortform dir */
+
+	/*
+	 * Copy the old directory to the stack buffer.
+	 */
+	dp = args->dp;
+
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	old_isize = (int)dp->i_d.di_size;
+	buf = kmem_alloc(old_isize, KM_SLEEP);
+	oldsfp = (xfs_dir2_sf_t *)buf;
+	memcpy(oldsfp, sfp, old_isize);
+	/*
+	 * Loop over the old directory finding the place we're going
+	 * to insert the new entry.
+	 * If it's going to end up at the end then oldsfep will point there.
+	 */
+	for (offset = XFS_DIR2_DATA_FIRST_OFFSET,
+	      oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp),
+	      add_datasize = XFS_DIR2_DATA_ENTSIZE(args->namelen),
+	      eof = (char *)oldsfep == &buf[old_isize];
+	     !eof;
+	     offset = new_offset + XFS_DIR2_DATA_ENTSIZE(oldsfep->namelen),
+	      oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep),
+	      eof = (char *)oldsfep == &buf[old_isize]) {
+		new_offset = XFS_DIR2_SF_GET_OFFSET(oldsfep);
+		if (offset + add_datasize <= new_offset)
+			break;
+	}
+	/*
+	 * Get rid of the old directory, then allocate space for
+	 * the new one.  We do this so xfs_idata_realloc won't copy
+	 * the data.
+	 */
+	xfs_idata_realloc(dp, -old_isize, XFS_DATA_FORK);
+	xfs_idata_realloc(dp, new_isize, XFS_DATA_FORK);
+	/*
+	 * Reset the pointer since the buffer was reallocated.
+	 */
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	/*
+	 * Copy the first part of the directory, including the header.
+	 */
+	nbytes = (int)((char *)oldsfep - (char *)oldsfp);
+	memcpy(sfp, oldsfp, nbytes);
+	sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + nbytes);
+	/*
+	 * Fill in the new entry, and update the header counts.
+	 */
+	sfep->namelen = args->namelen;
+	XFS_DIR2_SF_PUT_OFFSET(sfep, offset);
+	memcpy(sfep->name, args->name, sfep->namelen);
+	XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber,
+		XFS_DIR2_SF_INUMBERP(sfep));
+	sfp->hdr.count++;
+#if XFS_BIG_INUMS
+	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
+		sfp->hdr.i8count++;
+#endif
+	/*
+	 * If there's more left to copy, do that.
+	 */
+	if (!eof) {
+		sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
+		memcpy(sfep, oldsfep, old_isize - nbytes);
+	}
+	kmem_free(buf, old_isize);
+	dp->i_d.di_size = new_isize;
+	xfs_dir2_sf_check(args);
+}
+
+/*
+ * Decide if the new entry will fit at all.
+ * If it will fit, pick between adding the new entry to the end (easy)
+ * or somewhere else (hard).
+ * Return 0 (won't fit), 1 (easy), 2 (hard).
+ */
+/*ARGSUSED*/
+static int					/* pick result */
+xfs_dir2_sf_addname_pick(
+	xfs_da_args_t		*args,		/* operation arguments */
+	int			objchange,	/* inode # size changes */
+	xfs_dir2_sf_entry_t	**sfepp,	/* out(1): new entry ptr */
+	xfs_dir2_data_aoff_t	*offsetp)	/* out(1): new offset */
+{
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			holefit;	/* found hole it will fit in */
+	int			i;		/* entry number */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_data_aoff_t	offset;		/* data block offset */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+	int			size;		/* entry's data size */
+	int			used;		/* data bytes used */
+
+	dp = args->dp;
+	mp = dp->i_mount;
+
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	size = XFS_DIR2_DATA_ENTSIZE(args->namelen);
+	offset = XFS_DIR2_DATA_FIRST_OFFSET;
+	sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	holefit = 0;
+	/*
+	 * Loop over sf entries.
+	 * Keep track of data offset and whether we've seen a place
+	 * to insert the new entry.
+	 */
+	for (i = 0; i < sfp->hdr.count; i++) {
+		if (!holefit)
+			holefit = offset + size <= XFS_DIR2_SF_GET_OFFSET(sfep);
+		offset = XFS_DIR2_SF_GET_OFFSET(sfep) +
+			 XFS_DIR2_DATA_ENTSIZE(sfep->namelen);
+		sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
+	}
+	/*
+	 * Calculate data bytes used excluding the new entry, if this
+	 * was a data block (block form directory).
+	 */
+	used = offset +
+	       (sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+	       (uint)sizeof(xfs_dir2_block_tail_t);
+	/*
+	 * If it won't fit in a block form then we can't insert it,
+	 * we'll go back, convert to block, then try the insert and convert
+	 * to leaf.
+	 */
+	if (used + (holefit ? 0 : size) > mp->m_dirblksize)
+		return 0;
+	/*
+	 * If changing the inode number size, do it the hard way.
+	 */
+#if XFS_BIG_INUMS
+	if (objchange) {
+		return 2;
+	}
+#else
+	ASSERT(objchange == 0);
+#endif
+	/*
+	 * If it won't fit at the end then do it the hard way (use the hole).
+	 */
+	if (used + size > mp->m_dirblksize)
+		return 2;
+	/*
+	 * Do it the easy way.
+	 */
+	*sfepp = sfep;
+	*offsetp = offset;
+	return 1;
+}
+
+#ifdef DEBUG
+/*
+ * Check consistency of shortform directory, assert if bad.
+ */
+static void
+xfs_dir2_sf_check(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			i;		/* entry number */
+	int			i8count;	/* number of big inode#s */
+	xfs_ino_t		ino;		/* entry inode number */
+	int			offset;		/* data offset */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform dir entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+
+	dp = args->dp;
+
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	offset = XFS_DIR2_DATA_FIRST_OFFSET;
+	ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent);
+	i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	     i < sfp->hdr.count;
+	     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
+		ASSERT(XFS_DIR2_SF_GET_OFFSET(sfep) >= offset);
+		ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep));
+		i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+		offset =
+			XFS_DIR2_SF_GET_OFFSET(sfep) +
+			XFS_DIR2_DATA_ENTSIZE(sfep->namelen);
+	}
+	ASSERT(i8count == sfp->hdr.i8count);
+	ASSERT(XFS_BIG_INUMS || i8count == 0);
+	ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
+	ASSERT(offset +
+	       (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+	       (uint)sizeof(xfs_dir2_block_tail_t) <=
+	       dp->i_mount->m_dirblksize);
+}
+#endif	/* DEBUG */
+
+/*
+ * Create a new (shortform) directory.
+ */
+int					/* error, always 0 */
+xfs_dir2_sf_create(
+	xfs_da_args_t	*args,		/* operation arguments */
+	xfs_ino_t	pino)		/* parent inode number */
+{
+	xfs_inode_t	*dp;		/* incore directory inode */
+	int		i8count;	/* parent inode is an 8-byte number */
+	xfs_dir2_sf_t	*sfp;		/* shortform structure */
+	int		size;		/* directory size */
+
+	xfs_dir2_trace_args_i("sf_create", args, pino);
+	dp = args->dp;
+
+	ASSERT(dp != NULL);
+	ASSERT(dp->i_d.di_size == 0);
+	/*
+	 * If it's currently a zero-length extent file,
+	 * convert it to local format.
+	 */
+	if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) {
+		dp->i_df.if_flags &= ~XFS_IFEXTENTS;	/* just in case */
+		dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+		xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
+		dp->i_df.if_flags |= XFS_IFINLINE;
+	}
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	ASSERT(dp->i_df.if_bytes == 0);
+	i8count = pino > XFS_DIR2_MAX_SHORT_INUM;
+	size = XFS_DIR2_SF_HDR_SIZE(i8count);
+	/*
+	 * Make a buffer for the data.
+	 */
+	xfs_idata_realloc(dp, size, XFS_DATA_FORK);
+	/*
+	 * Fill in the header,
+	 */
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	sfp->hdr.i8count = i8count;
+	/*
+	 * Now can put in the inode number, since i8count is set.
+	 */
+	XFS_DIR2_SF_PUT_INUMBER(sfp, &pino, &sfp->hdr.parent);
+	sfp->hdr.count = 0;
+	dp->i_d.di_size = size;
+	xfs_dir2_sf_check(args);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+	return 0;
+}
+
+int						/* error */
+xfs_dir2_sf_getdents(
+	xfs_inode_t		*dp,		/* incore directory inode */
+	uio_t			*uio,		/* caller's buffer control */
+	int			*eofp,		/* eof reached? (out) */
+	xfs_dirent_t		*dbp,		/* caller's buffer */
+	xfs_dir2_put_t		put)		/* abi's formatting function */
+{
+	int			error;		/* error return value */
+	int			i;		/* shortform entry number */
+	xfs_mount_t		*mp;		/* filesystem mount point */
+	xfs_dir2_dataptr_t	off;		/* current entry's offset */
+	xfs_dir2_put_args_t	p;		/* arg package for put rtn */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform directory entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+	xfs_off_t			dir_offset;
+
+	mp = dp->i_mount;
+
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Give up if the directory is way too short.
+	 */
+	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		return XFS_ERROR(EIO);
+	}
+
+	dir_offset = uio->uio_offset;
+
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+
+	ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+
+	/*
+	 * If the block number in the offset is out of range, we're done.
+	 */
+	if (XFS_DIR2_DATAPTR_TO_DB(mp, dir_offset) > mp->m_dirdatablk) {
+		*eofp = 1;
+		return 0;
+	}
+
+	/*
+	 * Set up putargs structure.
+	 */
+	p.dbp = dbp;
+	p.put = put;
+	p.uio = uio;
+	/*
+	 * Put . entry unless we're starting past it.
+	 */
+	if (dir_offset <=
+		    XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+					       XFS_DIR2_DATA_DOT_OFFSET)) {
+		p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 0,
+						XFS_DIR2_DATA_DOTDOT_OFFSET);
+		p.ino = dp->i_ino;
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = ".";
+		p.namelen = 1;
+
+		error = p.put(&p);
+
+		if (!p.done) {
+			uio->uio_offset =
+				XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+						XFS_DIR2_DATA_DOT_OFFSET);
+			return error;
+		}
+	}
+
+	/*
+	 * Put .. entry unless we're starting past it.
+	 */
+	if (dir_offset <=
+		    XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+					       XFS_DIR2_DATA_DOTDOT_OFFSET)) {
+		p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+						XFS_DIR2_DATA_FIRST_OFFSET);
+		p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent);
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = "..";
+		p.namelen = 2;
+
+		error = p.put(&p);
+
+		if (!p.done) {
+			uio->uio_offset =
+				XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+					XFS_DIR2_DATA_DOTDOT_OFFSET);
+			return error;
+		}
+	}
+
+	/*
+	 * Loop while there are more entries and put'ing works.
+	 */
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+		     i < sfp->hdr.count;
+			     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
+
+		off = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+				XFS_DIR2_SF_GET_OFFSET(sfep));
+
+		if (dir_offset > off)
+			continue;
+
+		p.namelen = sfep->namelen;
+
+		p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
+			XFS_DIR2_SF_GET_OFFSET(sfep) +
+			XFS_DIR2_DATA_ENTSIZE(p.namelen));
+
+		p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep));
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = (char *)sfep->name;
+
+		error = p.put(&p);
+
+		if (!p.done) {
+			uio->uio_offset = off;
+			return error;
+		}
+	}
+
+	/*
+	 * They all fit.
+	 */
+	*eofp = 1;
+
+	uio->uio_offset =
+		XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0);
+
+	return 0;
+}
+
+/*
+ * Lookup an entry in a shortform directory.
+ * Returns EEXIST if found, ENOENT if not found.
+ */
+int						/* error */
+xfs_dir2_sf_lookup(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			i;		/* entry index */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform directory entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+
+	xfs_dir2_trace_args("sf_lookup", args);
+	xfs_dir2_sf_check(args);
+	dp = args->dp;
+
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Bail out if the directory is way too short.
+	 */
+	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+	/*
+	 * Special case for .
+	 */
+	if (args->namelen == 1 && args->name[0] == '.') {
+		args->inumber = dp->i_ino;
+		return XFS_ERROR(EEXIST);
+	}
+	/*
+	 * Special case for ..
+	 */
+	if (args->namelen == 2 &&
+	    args->name[0] == '.' && args->name[1] == '.') {
+		args->inumber = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent);
+		return XFS_ERROR(EEXIST);
+	}
+	/*
+	 * Loop over all the entries trying to match ours.
+	 */
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	     i < sfp->hdr.count;
+	     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
+		if (sfep->namelen == args->namelen &&
+		    sfep->name[0] == args->name[0] &&
+		    memcmp(args->name, sfep->name, args->namelen) == 0) {
+			args->inumber =
+				XFS_DIR2_SF_GET_INUMBER(sfp,
+					XFS_DIR2_SF_INUMBERP(sfep));
+			return XFS_ERROR(EEXIST);
+		}
+	}
+	/*
+	 * Didn't find it.
+	 */
+	ASSERT(args->oknoent);
+	return XFS_ERROR(ENOENT);
+}
+
+/*
+ * Remove an entry from a shortform directory.
+ */
+int						/* error */
+xfs_dir2_sf_removename(
+	xfs_da_args_t		*args)
+{
+	int			byteoff;	/* offset of removed entry */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			entsize;	/* this entry's size */
+	int			i;		/* shortform entry index */
+	int			newsize;	/* new inode size */
+	int			oldsize;	/* old inode size */
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform directory entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+
+	xfs_dir2_trace_args("sf_removename", args);
+	dp = args->dp;
+
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	oldsize = (int)dp->i_d.di_size;
+	/*
+	 * Bail out if the directory is way too short.
+	 */
+	if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == oldsize);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(oldsize >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+	/*
+	 * Loop over the old directory entries.
+	 * Find the one we're deleting.
+	 */
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+	     i < sfp->hdr.count;
+	     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
+		if (sfep->namelen == args->namelen &&
+		    sfep->name[0] == args->name[0] &&
+		    memcmp(sfep->name, args->name, args->namelen) == 0) {
+			ASSERT(XFS_DIR2_SF_GET_INUMBER(sfp,
+					XFS_DIR2_SF_INUMBERP(sfep)) ==
+				args->inumber);
+			break;
+		}
+	}
+	/*
+	 * Didn't find it.
+	 */
+	if (i == sfp->hdr.count) {
+		return XFS_ERROR(ENOENT);
+	}
+	/*
+	 * Calculate sizes.
+	 */
+	byteoff = (int)((char *)sfep - (char *)sfp);
+	entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen);
+	newsize = oldsize - entsize;
+	/*
+	 * Copy the part if any after the removed entry, sliding it down.
+	 */
+	if (byteoff + entsize < oldsize)
+		memmove((char *)sfp + byteoff, (char *)sfp + byteoff + entsize,
+			oldsize - (byteoff + entsize));
+	/*
+	 * Fix up the header and file size.
+	 */
+	sfp->hdr.count--;
+	dp->i_d.di_size = newsize;
+	/*
+	 * Reallocate, making it smaller.
+	 */
+	xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+#if XFS_BIG_INUMS
+	/*
+	 * Are we changing inode number size?
+	 */
+	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
+		if (sfp->hdr.i8count == 1)
+			xfs_dir2_sf_toino4(args);
+		else
+			sfp->hdr.i8count--;
+	}
+#endif
+	xfs_dir2_sf_check(args);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+	return 0;
+}
+
+/*
+ * Replace the inode number of an entry in a shortform directory.
+ */
+int						/* error */
+xfs_dir2_sf_replace(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			i;		/* entry index */
+#if XFS_BIG_INUMS || defined(DEBUG)
+	xfs_ino_t		ino=0;		/* entry old inode number */
+#endif
+#if XFS_BIG_INUMS
+	int			i8elevated;	/* sf_toino8 set i8count=1 */
+#endif
+	xfs_dir2_sf_entry_t	*sfep;		/* shortform directory entry */
+	xfs_dir2_sf_t		*sfp;		/* shortform structure */
+
+	xfs_dir2_trace_args("sf_replace", args);
+	dp = args->dp;
+
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Bail out if the shortform directory is way too small.
+	 */
+	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count));
+#if XFS_BIG_INUMS
+	/*
+	 * New inode number is large, and need to convert to 8-byte inodes.
+	 */
+	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
+		int	error;			/* error return value */
+		int	newsize;		/* new inode size */
+
+		newsize =
+			dp->i_df.if_bytes +
+			(sfp->hdr.count + 1) *
+			((uint)sizeof(xfs_dir2_ino8_t) -
+			 (uint)sizeof(xfs_dir2_ino4_t));
+		/*
+		 * Won't fit as shortform, convert to block then do replace.
+		 */
+		if (newsize > XFS_IFORK_DSIZE(dp)) {
+			error = xfs_dir2_sf_to_block(args);
+			if (error) {
+				return error;
+			}
+			return xfs_dir2_block_replace(args);
+		}
+		/*
+		 * Still fits, convert to 8-byte now.
+		 */
+		xfs_dir2_sf_toino8(args);
+		i8elevated = 1;
+		sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	} else
+		i8elevated = 0;
+#endif
+	ASSERT(args->namelen != 1 || args->name[0] != '.');
+	/*
+	 * Replace ..'s entry.
+	 */
+	if (args->namelen == 2 &&
+	    args->name[0] == '.' && args->name[1] == '.') {
+#if XFS_BIG_INUMS || defined(DEBUG)
+		ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent);
+		ASSERT(args->inumber != ino);
+#endif
+		XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, &sfp->hdr.parent);
+	}
+	/*
+	 * Normal entry, look for the name.
+	 */
+	else {
+		for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
+		     i < sfp->hdr.count;
+		     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
+			if (sfep->namelen == args->namelen &&
+			    sfep->name[0] == args->name[0] &&
+			    memcmp(args->name, sfep->name, args->namelen) == 0) {
+#if XFS_BIG_INUMS || defined(DEBUG)
+				ino = XFS_DIR2_SF_GET_INUMBER(sfp,
+					XFS_DIR2_SF_INUMBERP(sfep));
+				ASSERT(args->inumber != ino);
+#endif
+				XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber,
+					XFS_DIR2_SF_INUMBERP(sfep));
+				break;
+			}
+		}
+		/*
+		 * Didn't find it.
+		 */
+		if (i == sfp->hdr.count) {
+			ASSERT(args->oknoent);
+#if XFS_BIG_INUMS
+			if (i8elevated)
+				xfs_dir2_sf_toino4(args);
+#endif
+			return XFS_ERROR(ENOENT);
+		}
+	}
+#if XFS_BIG_INUMS
+	/*
+	 * See if the old number was large, the new number is small.
+	 */
+	if (ino > XFS_DIR2_MAX_SHORT_INUM &&
+	    args->inumber <= XFS_DIR2_MAX_SHORT_INUM) {
+		/*
+		 * And the old count was one, so need to convert to small.
+		 */
+		if (sfp->hdr.i8count == 1)
+			xfs_dir2_sf_toino4(args);
+		else
+			sfp->hdr.i8count--;
+	}
+	/*
+	 * See if the old number was small, the new number is large.
+	 */
+	if (ino <= XFS_DIR2_MAX_SHORT_INUM &&
+	    args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
+		/*
+		 * add to the i8count unless we just converted to 8-byte
+		 * inodes (which does an implied i8count = 1)
+		 */
+		ASSERT(sfp->hdr.i8count != 0);
+		if (!i8elevated)
+			sfp->hdr.i8count++;
+	}
+#endif
+	xfs_dir2_sf_check(args);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
+	return 0;
+}
+
+#if XFS_BIG_INUMS
+/*
+ * Convert from 8-byte inode numbers to 4-byte inode numbers.
+ * The last 8-byte inode number is gone, but the count is still 1.
+ */
+static void
+xfs_dir2_sf_toino4(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	char			*buf;		/* old dir's buffer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			i;		/* entry index */
+	xfs_ino_t		ino;		/* entry inode number */
+	int			newsize;	/* new inode size */
+	xfs_dir2_sf_entry_t	*oldsfep;	/* old sf entry */
+	xfs_dir2_sf_t		*oldsfp;	/* old sf directory */
+	int			oldsize;	/* old inode size */
+	xfs_dir2_sf_entry_t	*sfep;		/* new sf entry */
+	xfs_dir2_sf_t		*sfp;		/* new sf directory */
+
+	xfs_dir2_trace_args("sf_toino4", args);
+	dp = args->dp;
+
+	/*
+	 * Copy the old directory to the buffer.
+	 * Then nuke it from the inode, and add the new buffer to the inode.
+	 * Don't want xfs_idata_realloc copying the data here.
+	 */
+	oldsize = dp->i_df.if_bytes;
+	buf = kmem_alloc(oldsize, KM_SLEEP);
+	oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(oldsfp->hdr.i8count == 1);
+	memcpy(buf, oldsfp, oldsize);
+	/*
+	 * Compute the new inode size.
+	 */
+	newsize =
+		oldsize -
+		(oldsfp->hdr.count + 1) *
+		((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
+	xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
+	xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
+	/*
+	 * Reset our pointers, the data has moved.
+	 */
+	oldsfp = (xfs_dir2_sf_t *)buf;
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	/*
+	 * Fill in the new header.
+	 */
+	sfp->hdr.count = oldsfp->hdr.count;
+	sfp->hdr.i8count = 0;
+	ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent);
+	XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent);
+	/*
+	 * Copy the entries field by field.
+	 */
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp),
+		    oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp);
+	     i < sfp->hdr.count;
+	     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep),
+		  oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) {
+		sfep->namelen = oldsfep->namelen;
+		sfep->offset = oldsfep->offset;
+		memcpy(sfep->name, oldsfep->name, sfep->namelen);
+		ino = XFS_DIR2_SF_GET_INUMBER(oldsfp,
+			XFS_DIR2_SF_INUMBERP(oldsfep));
+		XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep));
+	}
+	/*
+	 * Clean up the inode.
+	 */
+	kmem_free(buf, oldsize);
+	dp->i_d.di_size = newsize;
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+}
+
+/*
+ * Convert from 4-byte inode numbers to 8-byte inode numbers.
+ * The new 8-byte inode number is not there yet, we leave with the
+ * count 1 but no corresponding entry.
+ */
+static void
+xfs_dir2_sf_toino8(
+	xfs_da_args_t		*args)		/* operation arguments */
+{
+	char			*buf;		/* old dir's buffer */
+	xfs_inode_t		*dp;		/* incore directory inode */
+	int			i;		/* entry index */
+	xfs_ino_t		ino;		/* entry inode number */
+	int			newsize;	/* new inode size */
+	xfs_dir2_sf_entry_t	*oldsfep;	/* old sf entry */
+	xfs_dir2_sf_t		*oldsfp;	/* old sf directory */
+	int			oldsize;	/* old inode size */
+	xfs_dir2_sf_entry_t	*sfep;		/* new sf entry */
+	xfs_dir2_sf_t		*sfp;		/* new sf directory */
+
+	xfs_dir2_trace_args("sf_toino8", args);
+	dp = args->dp;
+
+	/*
+	 * Copy the old directory to the buffer.
+	 * Then nuke it from the inode, and add the new buffer to the inode.
+	 * Don't want xfs_idata_realloc copying the data here.
+	 */
+	oldsize = dp->i_df.if_bytes;
+	buf = kmem_alloc(oldsize, KM_SLEEP);
+	oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	ASSERT(oldsfp->hdr.i8count == 0);
+	memcpy(buf, oldsfp, oldsize);
+	/*
+	 * Compute the new inode size.
+	 */
+	newsize =
+		oldsize +
+		(oldsfp->hdr.count + 1) *
+		((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
+	xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
+	xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
+	/*
+	 * Reset our pointers, the data has moved.
+	 */
+	oldsfp = (xfs_dir2_sf_t *)buf;
+	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+	/*
+	 * Fill in the new header.
+	 */
+	sfp->hdr.count = oldsfp->hdr.count;
+	sfp->hdr.i8count = 1;
+	ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent);
+	XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent);
+	/*
+	 * Copy the entries field by field.
+	 */
+	for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp),
+		    oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp);
+	     i < sfp->hdr.count;
+	     i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep),
+		  oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) {
+		sfep->namelen = oldsfep->namelen;
+		sfep->offset = oldsfep->offset;
+		memcpy(sfep->name, oldsfep->name, sfep->namelen);
+		ino = XFS_DIR2_SF_GET_INUMBER(oldsfp,
+			XFS_DIR2_SF_INUMBERP(oldsfep));
+		XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep));
+	}
+	/*
+	 * Clean up the inode.
+	 */
+	kmem_free(buf, oldsize);
+	dp->i_d.di_size = newsize;
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+}
+#endif	/* XFS_BIG_INUMS */
diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h
new file mode 100644
index 0000000..bac6f5a
--- /dev/null
+++ b/fs/xfs/xfs_dir2_sf.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_SF_H__
+#define	__XFS_DIR2_SF_H__
+
+/*
+ * Directory layout when stored internal to an inode.
+ *
+ * Small directories are packed as tightly as possible so as to
+ * fit into the literal area of the inode.
+ */
+
+struct uio;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_dir2_block;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Maximum size of a shortform directory.
+ */
+#define	XFS_DIR2_SF_MAX_SIZE	\
+	(XFS_DINODE_MAX_SIZE - (uint)sizeof(xfs_dinode_core_t) - \
+	 (uint)sizeof(xfs_agino_t))
+
+/*
+ * Inode number stored as 8 8-bit values.
+ */
+typedef	struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
+
+/*
+ * Inode number stored as 4 8-bit values.
+ * Works a lot of the time, when all the inode numbers in a directory
+ * fit in 32 bits.
+ */
+typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
+
+typedef union {
+	xfs_dir2_ino8_t	i8;
+	xfs_dir2_ino4_t	i4;
+} xfs_dir2_inou_t;
+#define	XFS_DIR2_MAX_SHORT_INUM	((xfs_ino_t)0xffffffffULL)
+
+/*
+ * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
+ * Only need 16 bits, this is the byte offset into the single block form.
+ */
+typedef struct { __uint8_t i[2]; } xfs_dir2_sf_off_t;
+
+/*
+ * The parent directory has a dedicated field, and the self-pointer must
+ * be calculated on the fly.
+ *
+ * Entries are packed toward the top as tightly as possible.  The header
+ * and the elements must be memcpy'd out into a work area to get correct
+ * alignment for the inode number fields.
+ */
+typedef struct xfs_dir2_sf_hdr {
+	__uint8_t		count;		/* count of entries */
+	__uint8_t		i8count;	/* count of 8-byte inode #s */
+	xfs_dir2_inou_t		parent;		/* parent dir inode number */
+} xfs_dir2_sf_hdr_t;
+
+typedef struct xfs_dir2_sf_entry {
+	__uint8_t		namelen;	/* actual name length */
+	xfs_dir2_sf_off_t	offset;		/* saved offset */
+	__uint8_t		name[1];	/* name, variable size */
+	xfs_dir2_inou_t		inumber;	/* inode number, var. offset */
+} xfs_dir2_sf_entry_t;
+
+typedef struct xfs_dir2_sf {
+	xfs_dir2_sf_hdr_t	hdr;		/* shortform header */
+	xfs_dir2_sf_entry_t	list[1];	/* shortform entries */
+} xfs_dir2_sf_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_HDR_SIZE)
+int xfs_dir2_sf_hdr_size(int i8count);
+#define	XFS_DIR2_SF_HDR_SIZE(i8count)	xfs_dir2_sf_hdr_size(i8count)
+#else
+#define	XFS_DIR2_SF_HDR_SIZE(i8count)	\
+	((uint)sizeof(xfs_dir2_sf_hdr_t) - \
+	 ((i8count) == 0) * \
+	 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_INUMBERP)
+xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep);
+#define	XFS_DIR2_SF_INUMBERP(sfep)	xfs_dir2_sf_inumberp(sfep)
+#else
+#define	XFS_DIR2_SF_INUMBERP(sfep)	\
+	((xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen])
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_INUMBER)
+xfs_intino_t xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from);
+#define	XFS_DIR2_SF_GET_INUMBER(sfp, from)	\
+	xfs_dir2_sf_get_inumber(sfp, from)
+
+#else
+#define	XFS_DIR2_SF_GET_INUMBER(sfp, from)	\
+	((sfp)->hdr.i8count == 0 ? \
+		(xfs_intino_t)XFS_GET_DIR_INO4((from)->i4) : \
+		(xfs_intino_t)XFS_GET_DIR_INO8((from)->i8))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_INUMBER)
+void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from,
+				     xfs_dir2_inou_t *to);
+#define	XFS_DIR2_SF_PUT_INUMBER(sfp,from,to)	\
+	xfs_dir2_sf_put_inumber(sfp,from,to)
+#else
+#define	XFS_DIR2_SF_PUT_INUMBER(sfp,from,to)	\
+	if ((sfp)->hdr.i8count == 0) { \
+		XFS_PUT_DIR_INO4(*(from), (to)->i4); \
+	} else { \
+		XFS_PUT_DIR_INO8(*(from), (to)->i8); \
+	}
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_OFFSET)
+xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep);
+#define	XFS_DIR2_SF_GET_OFFSET(sfep)	\
+	xfs_dir2_sf_get_offset(sfep)
+#else
+#define	XFS_DIR2_SF_GET_OFFSET(sfep)	\
+	INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_OFFSET)
+void xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep,
+				    xfs_dir2_data_aoff_t off);
+#define	XFS_DIR2_SF_PUT_OFFSET(sfep,off) \
+	xfs_dir2_sf_put_offset(sfep,off)
+#else
+#define	XFS_DIR2_SF_PUT_OFFSET(sfep,off)	\
+	INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i,off)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYNAME)
+int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len);
+#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len)	\
+	xfs_dir2_sf_entsize_byname(sfp,len)
+#else
+#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len)	/* space a name uses */ \
+	((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \
+	 ((sfp)->hdr.i8count == 0) * \
+	 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYENTRY)
+int xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep);
+#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep)	\
+	xfs_dir2_sf_entsize_byentry(sfp,sfep)
+#else
+#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep)	/* space an entry uses */ \
+	((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \
+	 ((sfp)->hdr.i8count == 0) * \
+	 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_FIRSTENTRY)
+xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp);
+#define XFS_DIR2_SF_FIRSTENTRY(sfp)	xfs_dir2_sf_firstentry(sfp)
+#else
+#define XFS_DIR2_SF_FIRSTENTRY(sfp)	/* first entry in struct */ \
+	((xfs_dir2_sf_entry_t *) \
+	 ((char *)(sfp) + XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_NEXTENTRY)
+xfs_dir2_sf_entry_t *xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp,
+					   xfs_dir2_sf_entry_t *sfep);
+#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep)		xfs_dir2_sf_nextentry(sfp,sfep)
+#else
+#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep)		/* next entry in struct */ \
+	((xfs_dir2_sf_entry_t *) \
+		((char *)(sfep) + XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep)))
+#endif
+
+/*
+ * Functions.
+ */
+
+extern int
+	xfs_dir2_block_sfsize(struct xfs_inode *dp,
+			      struct xfs_dir2_block *block,
+			      xfs_dir2_sf_hdr_t *sfhp);
+
+extern int
+	xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
+			     int size, xfs_dir2_sf_hdr_t *sfhp);
+
+extern int
+	xfs_dir2_sf_addname(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
+
+extern int
+	xfs_dir2_sf_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp,
+			     struct xfs_dirent *dbp, xfs_dir2_put_t put);
+
+extern int
+	xfs_dir2_sf_lookup(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_sf_removename(struct xfs_da_args *args);
+
+extern int
+	xfs_dir2_sf_replace(struct xfs_da_args *args);
+
+#endif	/* __XFS_DIR2_SF_H__ */
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
new file mode 100644
index 0000000..9d64173
--- /dev/null
+++ b/fs/xfs/xfs_dir2_trace.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir2_trace.c
+ * Tracing for xfs v2 directories.
+ */
+#include "xfs.h"
+
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_trace.h"
+
+#ifdef XFS_DIR2_TRACE
+ktrace_t	*xfs_dir2_trace_buf;
+
+/*
+ * Enter something in the trace buffers.
+ */
+static void
+xfs_dir2_trace_enter(
+	xfs_inode_t	*dp,
+	int		type,
+	char		*where,
+	char		*name,
+	int		namelen,
+	void		*a0,
+	void		*a1,
+	void		*a2,
+	void		*a3,
+	void		*a4,
+	void		*a5,
+	void		*a6,
+	void		*a7)
+{
+	void		*n[5];
+
+	ASSERT(xfs_dir2_trace_buf);
+	ASSERT(dp->i_dir_trace);
+	if (name)
+		memcpy(n, name, min((int)sizeof(n), namelen));
+	else
+		memset((char *)n, 0, sizeof(n));
+	ktrace_enter(xfs_dir2_trace_buf,
+		(void *)(long)type, (void *)where,
+		(void *)a0, (void *)a1, (void *)a2, (void *)a3,
+		(void *)a4, (void *)a5, (void *)a6, (void *)a7,
+		(void *)(long)namelen,
+		(void *)n[0], (void *)n[1], (void *)n[2],
+		(void *)n[3], (void *)n[4]);
+	ktrace_enter(dp->i_dir_trace,
+		(void *)(long)type, (void *)where,
+		(void *)a0, (void *)a1, (void *)a2, (void *)a3,
+		(void *)a4, (void *)a5, (void *)a6, (void *)a7,
+		(void *)(long)namelen,
+		(void *)n[0], (void *)n[1], (void *)n[2],
+		(void *)n[3], (void *)n[4]);
+}
+
+void
+xfs_dir2_trace_args(
+	char		*where,
+	xfs_da_args_t	*args)
+{
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck, NULL, NULL);
+}
+
+void
+xfs_dir2_trace_args_b(
+	char		*where,
+	xfs_da_args_t	*args,
+	xfs_dabuf_t	*bp)
+{
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck,
+		(void *)(bp ? bp->bps[0] : NULL), NULL);
+}
+
+void
+xfs_dir2_trace_args_bb(
+	char		*where,
+	xfs_da_args_t	*args,
+	xfs_dabuf_t	*lbp,
+	xfs_dabuf_t	*dbp)
+{
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck,
+		(void *)(lbp ? lbp->bps[0] : NULL),
+		(void *)(dbp ? dbp->bps[0] : NULL));
+}
+
+void
+xfs_dir2_trace_args_bibii(
+	char		*where,
+	xfs_da_args_t	*args,
+	xfs_dabuf_t	*bs,
+	int		ss,
+	xfs_dabuf_t	*bd,
+	int		sd,
+	int		c)
+{
+	xfs_buf_t	*bpbs = bs ? bs->bps[0] : NULL;
+	xfs_buf_t	*bpbd = bd ? bd->bps[0] : NULL;
+
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)args->dp, (void *)args->trans,
+		(void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd,
+		(void *)(long)c, NULL);
+}
+
+void
+xfs_dir2_trace_args_db(
+	char		*where,
+	xfs_da_args_t	*args,
+	xfs_dir2_db_t	db,
+	xfs_dabuf_t	*bp)
+{
+	xfs_buf_t	*dbp = bp ? bp->bps[0] : NULL;
+
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck, (void *)(long)db,
+		(void *)dbp);
+}
+
+void
+xfs_dir2_trace_args_i(
+	char		*where,
+	xfs_da_args_t	*args,
+	xfs_ino_t	i)
+{
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck,
+		(void *)((unsigned long)(i >> 32)),
+		(void *)((unsigned long)(i & 0xFFFFFFFF)));
+}
+
+void
+xfs_dir2_trace_args_s(
+	char		*where,
+	xfs_da_args_t	*args,
+	int		s)
+{
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck, (void *)(long)s, NULL);
+}
+
+void
+xfs_dir2_trace_args_sb(
+	char		*where,
+	xfs_da_args_t	*args,
+	int		s,
+	xfs_dabuf_t	*bp)
+{
+	xfs_buf_t	*dbp = bp ? bp->bps[0] : NULL;
+
+	xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where,
+		(char *)args->name, (int)args->namelen,
+		(void *)(unsigned long)args->hashval,
+		(void *)((unsigned long)(args->inumber >> 32)),
+		(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
+		(void *)args->dp, (void *)args->trans,
+		(void *)(unsigned long)args->justcheck, (void *)(long)s,
+		(void *)dbp);
+}
+#endif	/* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h
new file mode 100644
index 0000000..0a178bf
--- /dev/null
+++ b/fs/xfs/xfs_dir2_trace.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR2_TRACE_H__
+#define __XFS_DIR2_TRACE_H__
+
+/*
+ * Tracing for xfs v2 directories.
+ */
+
+#if defined(XFS_DIR2_TRACE)
+
+struct ktrace;
+struct xfs_dabuf;
+struct xfs_da_args;
+
+#define	XFS_DIR2_GTRACE_SIZE		4096	/* global buffer */
+#define	XFS_DIR2_KTRACE_SIZE		32	/* per-inode buffer */
+extern struct ktrace *xfs_dir2_trace_buf;
+
+#define	XFS_DIR2_KTRACE_ARGS		1	/* args only */
+#define	XFS_DIR2_KTRACE_ARGS_B		2	/* args + buffer */
+#define	XFS_DIR2_KTRACE_ARGS_BB		3	/* args + 2 buffers */
+#define	XFS_DIR2_KTRACE_ARGS_DB		4	/* args, db, buffer */
+#define	XFS_DIR2_KTRACE_ARGS_I		5	/* args, inum */
+#define	XFS_DIR2_KTRACE_ARGS_S		6	/* args, int */
+#define	XFS_DIR2_KTRACE_ARGS_SB		7	/* args, int, buffer */
+#define	XFS_DIR2_KTRACE_ARGS_BIBII	8	/* args, buf/int/buf/int/int */
+
+void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
+void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
+			   struct xfs_dabuf *bp);
+void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args,
+			    struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
+void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args,
+			       struct xfs_dabuf *bs, int ss,
+			       struct xfs_dabuf *bd, int sd, int c);
+void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args,
+			    xfs_dir2_db_t db, struct xfs_dabuf *bp);
+void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i);
+void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s);
+void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
+			    struct xfs_dabuf *bp);
+
+#else	/* XFS_DIR2_TRACE */
+
+#define	xfs_dir2_trace_args(where, args)
+#define	xfs_dir2_trace_args_b(where, args, bp)
+#define	xfs_dir2_trace_args_bb(where, args, lbp, dbp)
+#define	xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c)
+#define	xfs_dir2_trace_args_db(where, args, db, bp)
+#define	xfs_dir2_trace_args_i(where, args, i)
+#define	xfs_dir2_trace_args_s(where, args, s)
+#define	xfs_dir2_trace_args_sb(where, args, s, bp)
+
+#endif	/* XFS_DIR2_TRACE */
+
+#endif	/* __XFS_DIR2_TRACE_H__ */
diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c
new file mode 100644
index 0000000..617018d
--- /dev/null
+++ b/fs/xfs/xfs_dir_leaf.c
@@ -0,0 +1,2231 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * xfs_dir_leaf.c
+ *
+ * GROT: figure out how to recover gracefully when bmap returns ENOSPC.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_error.h"
+
+/*
+ * xfs_dir_leaf.c
+ *
+ * Routines to implement leaf blocks of directories as Btrees of hashed names.
+ */
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Routines used for growing the Btree.
+ */
+STATIC void xfs_dir_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
+					      int insertion_index,
+					      int freemap_index);
+STATIC int xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer,
+					    int musthave, int justcheck);
+STATIC void xfs_dir_leaf_rebalance(xfs_da_state_t *state,
+						  xfs_da_state_blk_t *blk1,
+						  xfs_da_state_blk_t *blk2);
+STATIC int xfs_dir_leaf_figure_balance(xfs_da_state_t *state,
+					  xfs_da_state_blk_t *leaf_blk_1,
+					  xfs_da_state_blk_t *leaf_blk_2,
+					  int *number_entries_in_blk1,
+					  int *number_namebytes_in_blk1);
+
+/*
+ * Utility routines.
+ */
+STATIC void xfs_dir_leaf_moveents(xfs_dir_leafblock_t *src_leaf,
+					      int src_start,
+					      xfs_dir_leafblock_t *dst_leaf,
+					      int dst_start, int move_count,
+					      xfs_mount_t *mp);
+
+
+/*========================================================================
+ * External routines when dirsize < XFS_IFORK_DSIZE(dp).
+ *========================================================================*/
+
+
+/*
+ * Validate a given inode number.
+ */
+int
+xfs_dir_ino_validate(xfs_mount_t *mp, xfs_ino_t ino)
+{
+	xfs_agblock_t	agblkno;
+	xfs_agino_t	agino;
+	xfs_agnumber_t	agno;
+	int		ino_ok;
+	int		ioff;
+
+	agno = XFS_INO_TO_AGNO(mp, ino);
+	agblkno = XFS_INO_TO_AGBNO(mp, ino);
+	ioff = XFS_INO_TO_OFFSET(mp, ino);
+	agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff);
+	ino_ok =
+		agno < mp->m_sb.sb_agcount &&
+		agblkno < mp->m_sb.sb_agblocks &&
+		agblkno != 0 &&
+		ioff < (1 << mp->m_sb.sb_inopblog) &&
+		XFS_AGINO_TO_INO(mp, agno, agino) == ino;
+	if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
+			XFS_RANDOM_DIR_INO_VALIDATE))) {
+		xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx",
+				(unsigned long long) ino);
+		XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+/*
+ * Create the initial contents of a shortform directory.
+ */
+int
+xfs_dir_shortform_create(xfs_da_args_t *args, xfs_ino_t parent)
+{
+	xfs_dir_sf_hdr_t *hdr;
+	xfs_inode_t *dp;
+
+	dp = args->dp;
+	ASSERT(dp != NULL);
+	ASSERT(dp->i_d.di_size == 0);
+	if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) {
+		dp->i_df.if_flags &= ~XFS_IFEXTENTS;	/* just in case */
+		dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+		xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
+		dp->i_df.if_flags |= XFS_IFINLINE;
+	}
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	ASSERT(dp->i_df.if_bytes == 0);
+	xfs_idata_realloc(dp, sizeof(*hdr), XFS_DATA_FORK);
+	hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data;
+	XFS_DIR_SF_PUT_DIRINO(&parent, &hdr->parent);
+
+	hdr->count = 0;
+	dp->i_d.di_size = sizeof(*hdr);
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+	return(0);
+}
+
+/*
+ * Add a name to the shortform directory structure.
+ * Overflow from the inode has already been checked for.
+ */
+int
+xfs_dir_shortform_addname(xfs_da_args_t *args)
+{
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	int i, offset, size;
+	xfs_inode_t *dp;
+
+	dp = args->dp;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Catch the case where the conversion from shortform to leaf
+	 * failed part way through.
+	 */
+	if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
+		if (sfe->namelen == args->namelen &&
+		    args->name[0] == sfe->name[0] &&
+		    memcmp(args->name, sfe->name, args->namelen) == 0)
+			return(XFS_ERROR(EEXIST));
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+
+	offset = (int)((char *)sfe - (char *)sf);
+	size = XFS_DIR_SF_ENTSIZE_BYNAME(args->namelen);
+	xfs_idata_realloc(dp, size, XFS_DATA_FORK);
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	sfe = (xfs_dir_sf_entry_t *)((char *)sf + offset);
+
+	XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber);
+	sfe->namelen = args->namelen;
+	memcpy(sfe->name, args->name, sfe->namelen);
+	INT_MOD(sf->hdr.count, ARCH_CONVERT, +1);
+
+	dp->i_d.di_size += size;
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+
+	return(0);
+}
+
+/*
+ * Remove a name from the shortform directory structure.
+ */
+int
+xfs_dir_shortform_removename(xfs_da_args_t *args)
+{
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	int base, size = 0, i;
+	xfs_inode_t *dp;
+
+	dp = args->dp;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Catch the case where the conversion from shortform to leaf
+	 * failed part way through.
+	 */
+	if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	base = sizeof(xfs_dir_sf_hdr_t);
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	sfe = &sf->list[0];
+	for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
+		size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe);
+		if (sfe->namelen == args->namelen &&
+		    sfe->name[0] == args->name[0] &&
+		    memcmp(sfe->name, args->name, args->namelen) == 0)
+			break;
+		base += size;
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+	if (i < 0) {
+		ASSERT(args->oknoent);
+		return(XFS_ERROR(ENOENT));
+	}
+
+	if ((base + size) != dp->i_d.di_size) {
+		memmove(&((char *)sf)[base], &((char *)sf)[base+size],
+					      dp->i_d.di_size - (base+size));
+	}
+	INT_MOD(sf->hdr.count, ARCH_CONVERT, -1);
+
+	xfs_idata_realloc(dp, -size, XFS_DATA_FORK);
+	dp->i_d.di_size -= size;
+	xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
+
+	return(0);
+}
+
+/*
+ * Look up a name in a shortform directory structure.
+ */
+int
+xfs_dir_shortform_lookup(xfs_da_args_t *args)
+{
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	int i;
+	xfs_inode_t *dp;
+
+	dp = args->dp;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Catch the case where the conversion from shortform to leaf
+	 * failed part way through.
+	 */
+	if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	if (args->namelen == 2 &&
+	    args->name[0] == '.' && args->name[1] == '.') {
+		XFS_DIR_SF_GET_DIRINO(&sf->hdr.parent, &args->inumber);
+		return(XFS_ERROR(EEXIST));
+	}
+	if (args->namelen == 1 && args->name[0] == '.') {
+		args->inumber = dp->i_ino;
+		return(XFS_ERROR(EEXIST));
+	}
+	sfe = &sf->list[0];
+	for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
+		if (sfe->namelen == args->namelen &&
+		    sfe->name[0] == args->name[0] &&
+		    memcmp(args->name, sfe->name, args->namelen) == 0) {
+			XFS_DIR_SF_GET_DIRINO(&sfe->inumber, &args->inumber);
+			return(XFS_ERROR(EEXIST));
+		}
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+	ASSERT(args->oknoent);
+	return(XFS_ERROR(ENOENT));
+}
+
+/*
+ * Convert from using the shortform to the leaf.
+ */
+int
+xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs)
+{
+	xfs_inode_t *dp;
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	xfs_da_args_t args;
+	xfs_ino_t inumber;
+	char *tmpbuffer;
+	int retval, i, size;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+
+	dp = iargs->dp;
+	/*
+	 * Catch the case where the conversion from shortform to leaf
+	 * failed part way through.
+	 */
+	if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	size = dp->i_df.if_bytes;
+	tmpbuffer = kmem_alloc(size, KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+
+	memcpy(tmpbuffer, dp->i_df.if_u1.if_data, size);
+
+	sf = (xfs_dir_shortform_t *)tmpbuffer;
+	XFS_DIR_SF_GET_DIRINO(&sf->hdr.parent, &inumber);
+
+	xfs_idata_realloc(dp, -size, XFS_DATA_FORK);
+	dp->i_d.di_size = 0;
+	xfs_trans_log_inode(iargs->trans, dp, XFS_ILOG_CORE);
+	retval = xfs_da_grow_inode(iargs, &blkno);
+	if (retval)
+		goto out;
+
+	ASSERT(blkno == 0);
+	retval = xfs_dir_leaf_create(iargs, blkno, &bp);
+	if (retval)
+		goto out;
+	xfs_da_buf_done(bp);
+
+	args.name = ".";
+	args.namelen = 1;
+	args.hashval = xfs_dir_hash_dot;
+	args.inumber = dp->i_ino;
+	args.dp = dp;
+	args.firstblock = iargs->firstblock;
+	args.flist = iargs->flist;
+	args.total = iargs->total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = iargs->trans;
+	args.justcheck = 0;
+	args.addname = args.oknoent = 1;
+	retval = xfs_dir_leaf_addname(&args);
+	if (retval)
+		goto out;
+
+	args.name = "..";
+	args.namelen = 2;
+	args.hashval = xfs_dir_hash_dotdot;
+	args.inumber = inumber;
+	retval = xfs_dir_leaf_addname(&args);
+	if (retval)
+		goto out;
+
+	sfe = &sf->list[0];
+	for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
+		args.name = (char *)(sfe->name);
+		args.namelen = sfe->namelen;
+		args.hashval = xfs_da_hashname((char *)(sfe->name),
+					       sfe->namelen);
+		XFS_DIR_SF_GET_DIRINO(&sfe->inumber, &args.inumber);
+		retval = xfs_dir_leaf_addname(&args);
+		if (retval)
+			goto out;
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+	retval = 0;
+
+out:
+	kmem_free(tmpbuffer, size);
+	return(retval);
+}
+
+STATIC int
+xfs_dir_shortform_compare(const void *a, const void *b)
+{
+	xfs_dir_sf_sort_t *sa, *sb;
+
+	sa = (xfs_dir_sf_sort_t *)a;
+	sb = (xfs_dir_sf_sort_t *)b;
+	if (sa->hash < sb->hash)
+		return -1;
+	else if (sa->hash > sb->hash)
+		return 1;
+	else
+		return sa->entno - sb->entno;
+}
+
+/*
+ * Copy out directory entries for getdents(), for shortform directories.
+ */
+/*ARGSUSED*/
+int
+xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp,
+				       xfs_dirent_t *dbp, xfs_dir_put_t put)
+{
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	int retval, i, sbsize, nsbuf, lastresid=0, want_entno;
+	xfs_mount_t *mp;
+	xfs_dahash_t cookhash, hash;
+	xfs_dir_put_args_t p;
+	xfs_dir_sf_sort_t *sbuf, *sbp;
+
+	mp = dp->i_mount;
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
+	want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset);
+	nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2;
+	sbsize = (nsbuf + 1) * sizeof(*sbuf);
+	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+
+	xfs_dir_trace_g_du("sf: start", dp, uio);
+
+	/*
+	 * Collect all the entries into the buffer.
+	 * Entry 0 is .
+	 */
+	sbp->entno = 0;
+	sbp->seqno = 0;
+	sbp->hash = xfs_dir_hash_dot;
+	sbp->ino = dp->i_ino;
+	sbp->name = ".";
+	sbp->namelen = 1;
+	sbp++;
+
+	/*
+	 * Entry 1 is ..
+	 */
+	sbp->entno = 1;
+	sbp->seqno = 0;
+	sbp->hash = xfs_dir_hash_dotdot;
+	sbp->ino = XFS_GET_DIR_INO8(sf->hdr.parent);
+	sbp->name = "..";
+	sbp->namelen = 2;
+	sbp++;
+
+	/*
+	 * Scan the directory data for the rest of the entries.
+	 */
+	for (i = 0, sfe = &sf->list[0];
+			i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
+
+		if (unlikely(
+		    ((char *)sfe < (char *)sf) ||
+		    ((char *)sfe >= ((char *)sf + dp->i_df.if_bytes)))) {
+			xfs_dir_trace_g_du("sf: corrupted", dp, uio);
+			XFS_CORRUPTION_ERROR("xfs_dir_shortform_getdents",
+					     XFS_ERRLEVEL_LOW, mp, sfe);
+			kmem_free(sbuf, sbsize);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+
+		sbp->entno = i + 2;
+		sbp->seqno = 0;
+		sbp->hash = xfs_da_hashname((char *)sfe->name, sfe->namelen);
+		sbp->ino = XFS_GET_DIR_INO8(sfe->inumber);
+		sbp->name = (char *)sfe->name;
+		sbp->namelen = sfe->namelen;
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+		sbp++;
+	}
+
+	/*
+	 * Sort the entries on hash then entno.
+	 */
+	qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_dir_shortform_compare);
+	/*
+	 * Stuff in last entry.
+	 */
+	sbp->entno = nsbuf;
+	sbp->hash = XFS_DA_MAXHASH;
+	sbp->seqno = 0;
+	/*
+	 * Figure out the sequence numbers in case there's a hash duplicate.
+	 */
+	for (hash = sbuf->hash, sbp = sbuf + 1;
+				sbp < &sbuf[nsbuf + 1]; sbp++) {
+		if (sbp->hash == hash)
+			sbp->seqno = sbp[-1].seqno + 1;
+		else
+			hash = sbp->hash;
+	}
+
+	/*
+	 * Set up put routine.
+	 */
+	p.dbp = dbp;
+	p.put = put;
+	p.uio = uio;
+
+	/*
+	 * Find our place.
+	 */
+	for (sbp = sbuf; sbp < &sbuf[nsbuf + 1]; sbp++) {
+		if (sbp->hash > cookhash ||
+		    (sbp->hash == cookhash && sbp->seqno >= want_entno))
+			break;
+	}
+
+	/*
+	 * Did we fail to find anything?  We stop at the last entry,
+	 * the one we put maxhash into.
+	 */
+	if (sbp == &sbuf[nsbuf]) {
+		kmem_free(sbuf, sbsize);
+		xfs_dir_trace_g_du("sf: hash beyond end", dp, uio);
+		uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH);
+		*eofp = 1;
+		return 0;
+	}
+
+	/*
+	 * Loop putting entries into the user buffer.
+	 */
+	while (sbp < &sbuf[nsbuf]) {
+		/*
+		 * Save the first resid in a run of equal-hashval entries
+		 * so that we can back them out if they don't all fit.
+		 */
+		if (sbp->seqno == 0 || sbp == sbuf)
+			lastresid = uio->uio_resid;
+		XFS_PUT_COOKIE(p.cook, mp, 0, sbp[1].seqno, sbp[1].hash);
+		p.ino = sbp->ino;
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = sbp->name;
+		p.namelen = sbp->namelen;
+		retval = p.put(&p);
+		if (!p.done) {
+			uio->uio_offset =
+				XFS_DA_MAKE_COOKIE(mp, 0, 0, sbp->hash);
+			kmem_free(sbuf, sbsize);
+			uio->uio_resid = lastresid;
+			xfs_dir_trace_g_du("sf: E-O-B", dp, uio);
+			return retval;
+		}
+		sbp++;
+	}
+	kmem_free(sbuf, sbsize);
+	uio->uio_offset = p.cook.o;
+	*eofp = 1;
+	xfs_dir_trace_g_du("sf: E-O-F", dp, uio);
+	return 0;
+}
+
+/*
+ * Look up a name in a shortform directory structure, replace the inode number.
+ */
+int
+xfs_dir_shortform_replace(xfs_da_args_t *args)
+{
+	xfs_dir_shortform_t *sf;
+	xfs_dir_sf_entry_t *sfe;
+	xfs_inode_t *dp;
+	int i;
+
+	dp = args->dp;
+	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+	/*
+	 * Catch the case where the conversion from shortform to leaf
+	 * failed part way through.
+	 */
+	if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
+		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+	ASSERT(dp->i_df.if_u1.if_data != NULL);
+	sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
+	if (args->namelen == 2 &&
+	    args->name[0] == '.' && args->name[1] == '.') {
+		/* XXX - replace assert? */
+		XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sf->hdr.parent);
+		xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
+		return(0);
+	}
+	ASSERT(args->namelen != 1 || args->name[0] != '.');
+	sfe = &sf->list[0];
+	for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
+		if (sfe->namelen == args->namelen &&
+		    sfe->name[0] == args->name[0] &&
+		    memcmp(args->name, sfe->name, args->namelen) == 0) {
+			ASSERT(memcmp((char *)&args->inumber,
+				(char *)&sfe->inumber, sizeof(xfs_ino_t)));
+			XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber);
+			xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
+			return(0);
+		}
+		sfe = XFS_DIR_SF_NEXTENTRY(sfe);
+	}
+	ASSERT(args->oknoent);
+	return(XFS_ERROR(ENOENT));
+}
+
+/*
+ * Convert a leaf directory to shortform structure
+ */
+int
+xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_hdr_t *hdr;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+	xfs_da_args_t args;
+	xfs_inode_t *dp;
+	xfs_ino_t parent;
+	char *tmpbuffer;
+	int retval, i;
+	xfs_dabuf_t *bp;
+
+	dp = iargs->dp;
+	tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+
+	retval = xfs_da_read_buf(iargs->trans, iargs->dp, 0, -1, &bp,
+					       XFS_DATA_FORK);
+	if (retval)
+		goto out;
+	ASSERT(bp != NULL);
+	memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
+	leaf = (xfs_dir_leafblock_t *)tmpbuffer;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
+
+	/*
+	 * Find and special case the parent inode number
+	 */
+	hdr = &leaf->hdr;
+	entry = &leaf->entries[0];
+	for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) {
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+		if ((entry->namelen == 2) &&
+		    (namest->name[0] == '.') &&
+		    (namest->name[1] == '.')) {
+			XFS_DIR_SF_GET_DIRINO(&namest->inumber, &parent);
+			entry->nameidx = 0;
+		} else if ((entry->namelen == 1) && (namest->name[0] == '.')) {
+			entry->nameidx = 0;
+		}
+	}
+	retval = xfs_da_shrink_inode(iargs, 0, bp);
+	if (retval)
+		goto out;
+	retval = xfs_dir_shortform_create(iargs, parent);
+	if (retval)
+		goto out;
+
+	/*
+	 * Copy the rest of the filenames
+	 */
+	entry = &leaf->entries[0];
+	args.dp = dp;
+	args.firstblock = iargs->firstblock;
+	args.flist = iargs->flist;
+	args.total = iargs->total;
+	args.whichfork = XFS_DATA_FORK;
+	args.trans = iargs->trans;
+	args.justcheck = 0;
+	args.addname = args.oknoent = 1;
+	for (i = 0; i < INT_GET(hdr->count, ARCH_CONVERT); entry++, i++) {
+		if (!entry->nameidx)
+			continue;
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+		args.name = (char *)(namest->name);
+		args.namelen = entry->namelen;
+		args.hashval = INT_GET(entry->hashval, ARCH_CONVERT);
+		XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args.inumber);
+		xfs_dir_shortform_addname(&args);
+	}
+
+out:
+	kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount));
+	return(retval);
+}
+
+/*
+ * Convert from using a single leaf to a root node and a leaf.
+ */
+int
+xfs_dir_leaf_to_node(xfs_da_args_t *args)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_da_intnode_t *node;
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp1, *bp2;
+	xfs_dablk_t blkno;
+	int retval;
+
+	dp = args->dp;
+	retval = xfs_da_grow_inode(args, &blkno);
+	ASSERT(blkno == 1);
+	if (retval)
+		return(retval);
+	retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
+					      XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp1 != NULL);
+	retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2,
+					     XFS_DATA_FORK);
+	if (retval) {
+		xfs_da_buf_done(bp1);
+		return(retval);
+	}
+	ASSERT(bp2 != NULL);
+	memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
+	xfs_da_buf_done(bp1);
+	xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
+
+	/*
+	 * Set up the new root node.
+	 */
+	retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK);
+	if (retval) {
+		xfs_da_buf_done(bp2);
+		return(retval);
+	}
+	node = bp1->data;
+	leaf = bp2->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT));
+	xfs_da_buf_done(bp2);
+	INT_SET(node->btree[0].before, ARCH_CONVERT, blkno);
+	INT_SET(node->hdr.count, ARCH_CONVERT, 1);
+	xfs_da_log_buf(args->trans, bp1,
+		XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0])));
+	xfs_da_buf_done(bp1);
+
+	return(retval);
+}
+
+
+/*========================================================================
+ * Routines used for growing the Btree.
+ *========================================================================*/
+
+/*
+ * Create the initial contents of a leaf directory
+ * or a leaf in a node directory.
+ */
+int
+xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_hdr_t *hdr;
+	xfs_inode_t *dp;
+	xfs_dabuf_t *bp;
+	int retval;
+
+	dp = args->dp;
+	ASSERT(dp != NULL);
+	retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK);
+	if (retval)
+		return(retval);
+	ASSERT(bp != NULL);
+	leaf = bp->data;
+	memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
+	hdr = &leaf->hdr;
+	INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC);
+	INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
+	if (!hdr->firstused)
+		INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1);
+	INT_SET(hdr->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t));
+	INT_SET(hdr->freemap[0].size, ARCH_CONVERT, INT_GET(hdr->firstused, ARCH_CONVERT) - INT_GET(hdr->freemap[0].base, ARCH_CONVERT));
+
+	xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
+
+	*bpp = bp;
+	return(0);
+}
+
+/*
+ * Split the leaf node, rebalance, then add the new entry.
+ */
+int
+xfs_dir_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
+				  xfs_da_state_blk_t *newblk)
+{
+	xfs_dablk_t blkno;
+	xfs_da_args_t *args;
+	int error;
+
+	/*
+	 * Allocate space for a new leaf node.
+	 */
+	args = state->args;
+	ASSERT(args != NULL);
+	ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC);
+	error = xfs_da_grow_inode(args, &blkno);
+	if (error)
+		return(error);
+	error = xfs_dir_leaf_create(args, blkno, &newblk->bp);
+	if (error)
+		return(error);
+	newblk->blkno = blkno;
+	newblk->magic = XFS_DIR_LEAF_MAGIC;
+
+	/*
+	 * Rebalance the entries across the two leaves.
+	 */
+	xfs_dir_leaf_rebalance(state, oldblk, newblk);
+	error = xfs_da_blk_link(state, oldblk, newblk);
+	if (error)
+		return(error);
+
+	/*
+	 * Insert the new entry in the correct block.
+	 */
+	if (state->inleaf) {
+		error = xfs_dir_leaf_add(oldblk->bp, args, oldblk->index);
+	} else {
+		error = xfs_dir_leaf_add(newblk->bp, args, newblk->index);
+	}
+
+	/*
+	 * Update last hashval in each block since we added the name.
+	 */
+	oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL);
+	newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL);
+	return(error);
+}
+
+/*
+ * Add a name to the leaf directory structure.
+ *
+ * Must take into account fragmented leaves and leaves where spacemap has
+ * lost some freespace information (ie: holes).
+ */
+int
+xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_hdr_t *hdr;
+	xfs_dir_leaf_map_t *map;
+	int tablesize, entsize, sum, i, tmp, error;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT)));
+	hdr = &leaf->hdr;
+	entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen);
+
+	/*
+	 * Search through freemap for first-fit on new name length.
+	 * (may need to figure in size of entry struct too)
+	 */
+	tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) * (uint)sizeof(xfs_dir_leaf_entry_t)
+			+ (uint)sizeof(xfs_dir_leaf_hdr_t);
+	map = &hdr->freemap[XFS_DIR_LEAF_MAPSIZE-1];
+	for (sum = 0, i = XFS_DIR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
+		if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) {
+			sum += INT_GET(map->size, ARCH_CONVERT);
+			continue;
+		}
+		if (!map->size)
+			continue;	/* no space in this map */
+		tmp = entsize;
+		if (INT_GET(map->base, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT))
+			tmp += (uint)sizeof(xfs_dir_leaf_entry_t);
+		if (INT_GET(map->size, ARCH_CONVERT) >= tmp) {
+			if (!args->justcheck)
+				xfs_dir_leaf_add_work(bp, args, index, i);
+			return(0);
+		}
+		sum += INT_GET(map->size, ARCH_CONVERT);
+	}
+
+	/*
+	 * If there are no holes in the address space of the block,
+	 * and we don't have enough freespace, then compaction will do us
+	 * no good and we should just give up.
+	 */
+	if (!hdr->holes && (sum < entsize))
+		return(XFS_ERROR(ENOSPC));
+
+	/*
+	 * Compact the entries to coalesce free space.
+	 * Pass the justcheck flag so the checking pass can return
+	 * an error, without changing anything, if it won't fit.
+	 */
+	error = xfs_dir_leaf_compact(args->trans, bp,
+			args->total == 0 ?
+				entsize +
+				(uint)sizeof(xfs_dir_leaf_entry_t) : 0,
+			args->justcheck);
+	if (error)
+		return(error);
+	/*
+	 * After compaction, the block is guaranteed to have only one
+	 * free region, in freemap[0].  If it is not big enough, give up.
+	 */
+	if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) <
+	    (entsize + (uint)sizeof(xfs_dir_leaf_entry_t)))
+		return(XFS_ERROR(ENOSPC));
+
+	if (!args->justcheck)
+		xfs_dir_leaf_add_work(bp, args, index, 0);
+	return(0);
+}
+
+/*
+ * Add a name to a leaf directory structure.
+ */
+STATIC void
+xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index,
+		      int mapindex)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_hdr_t *hdr;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+	xfs_dir_leaf_map_t *map;
+	/* REFERENCED */
+	xfs_mount_t *mp;
+	int tmp, i;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	hdr = &leaf->hdr;
+	ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE));
+	ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT)));
+
+	/*
+	 * Force open some space in the entry array and fill it in.
+	 */
+	entry = &leaf->entries[index];
+	if (index < INT_GET(hdr->count, ARCH_CONVERT)) {
+		tmp  = INT_GET(hdr->count, ARCH_CONVERT) - index;
+		tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
+		memmove(entry + 1, entry, tmp);
+		xfs_da_log_buf(args->trans, bp,
+		    XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
+	}
+	INT_MOD(hdr->count, ARCH_CONVERT, +1);
+
+	/*
+	 * Allocate space for the new string (at the end of the run).
+	 */
+	map = &hdr->freemap[mapindex];
+	mp = args->trans->t_mountp;
+	ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
+	ASSERT(INT_GET(map->size, ARCH_CONVERT) >= XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen));
+	ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
+	INT_MOD(map->size, ARCH_CONVERT, -(XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen)));
+	INT_SET(entry->nameidx, ARCH_CONVERT, INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT));
+	INT_SET(entry->hashval, ARCH_CONVERT, args->hashval);
+	entry->namelen = args->namelen;
+	xfs_da_log_buf(args->trans, bp,
+	    XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
+
+	/*
+	 * Copy the string and inode number into the new space.
+	 */
+	namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+	XFS_DIR_SF_PUT_DIRINO(&args->inumber, &namest->inumber);
+	memcpy(namest->name, args->name, args->namelen);
+	xfs_da_log_buf(args->trans, bp,
+	    XFS_DA_LOGRANGE(leaf, namest, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry)));
+
+	/*
+	 * Update the control info for this leaf node
+	 */
+	if (INT_GET(entry->nameidx, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT))
+		INT_COPY(hdr->firstused, entry->nameidx, ARCH_CONVERT);
+	ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr)));
+	tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) * (uint)sizeof(xfs_dir_leaf_entry_t)
+			+ (uint)sizeof(xfs_dir_leaf_hdr_t);
+	map = &hdr->freemap[0];
+	for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) {
+		if (INT_GET(map->base, ARCH_CONVERT) == tmp) {
+			INT_MOD(map->base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t));
+			INT_MOD(map->size, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t)));
+		}
+	}
+	INT_MOD(hdr->namebytes, ARCH_CONVERT, args->namelen);
+	xfs_da_log_buf(args->trans, bp,
+		XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
+}
+
+/*
+ * Garbage collect a leaf directory block by copying it to a new buffer.
+ */
+STATIC int
+xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave,
+		     int justcheck)
+{
+	xfs_dir_leafblock_t *leaf_s, *leaf_d;
+	xfs_dir_leaf_hdr_t *hdr_s, *hdr_d;
+	xfs_mount_t *mp;
+	char *tmpbuffer;
+	char *tmpbuffer2=NULL;
+	int rval;
+	int lbsize;
+
+	mp = trans->t_mountp;
+	lbsize = XFS_LBSIZE(mp);
+	tmpbuffer = kmem_alloc(lbsize, KM_SLEEP);
+	ASSERT(tmpbuffer != NULL);
+	memcpy(tmpbuffer, bp->data, lbsize);
+
+	/*
+	 * Make a second copy in case xfs_dir_leaf_moveents()
+	 * below destroys the original.
+	 */
+	if (musthave || justcheck) {
+		tmpbuffer2 = kmem_alloc(lbsize, KM_SLEEP);
+		memcpy(tmpbuffer2, bp->data, lbsize);
+	}
+	memset(bp->data, 0, lbsize);
+
+	/*
+	 * Copy basic information
+	 */
+	leaf_s = (xfs_dir_leafblock_t *)tmpbuffer;
+	leaf_d = bp->data;
+	hdr_s = &leaf_s->hdr;
+	hdr_d = &leaf_d->hdr;
+	hdr_d->info = hdr_s->info;	/* struct copy */
+	INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize);
+	if (!hdr_d->firstused)
+		INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize - 1);
+	hdr_d->namebytes = 0;
+	hdr_d->count = 0;
+	hdr_d->holes = 0;
+	INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t));
+	INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
+
+	/*
+	 * Copy all entry's in the same (sorted) order,
+	 * but allocate filenames packed and in sequence.
+	 * This changes the source (leaf_s) as well.
+	 */
+	xfs_dir_leaf_moveents(leaf_s, 0, leaf_d, 0, (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp);
+
+	if (musthave && INT_GET(hdr_d->freemap[0].size, ARCH_CONVERT) < musthave)
+		rval = XFS_ERROR(ENOSPC);
+	else
+		rval = 0;
+
+	if (justcheck || rval == ENOSPC) {
+		ASSERT(tmpbuffer2);
+		memcpy(bp->data, tmpbuffer2, lbsize);
+	} else {
+		xfs_da_log_buf(trans, bp, 0, lbsize - 1);
+	}
+
+	kmem_free(tmpbuffer, lbsize);
+	if (musthave || justcheck)
+		kmem_free(tmpbuffer2, lbsize);
+	return(rval);
+}
+
+/*
+ * Redistribute the directory entries between two leaf nodes,
+ * taking into account the size of the new entry.
+ *
+ * NOTE: if new block is empty, then it will get the upper half of old block.
+ */
+STATIC void
+xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
+				      xfs_da_state_blk_t *blk2)
+{
+	xfs_da_state_blk_t *tmp_blk;
+	xfs_dir_leafblock_t *leaf1, *leaf2;
+	xfs_dir_leaf_hdr_t *hdr1, *hdr2;
+	int count, totallen, max, space, swap;
+
+	/*
+	 * Set up environment.
+	 */
+	ASSERT(blk1->magic == XFS_DIR_LEAF_MAGIC);
+	ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC);
+	leaf1 = blk1->bp->data;
+	leaf2 = blk2->bp->data;
+	ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+
+	/*
+	 * Check ordering of blocks, reverse if it makes things simpler.
+	 */
+	swap = 0;
+	if (xfs_dir_leaf_order(blk1->bp, blk2->bp)) {
+		tmp_blk = blk1;
+		blk1 = blk2;
+		blk2 = tmp_blk;
+		leaf1 = blk1->bp->data;
+		leaf2 = blk2->bp->data;
+		swap = 1;
+	}
+	hdr1 = &leaf1->hdr;
+	hdr2 = &leaf2->hdr;
+
+	/*
+	 * Examine entries until we reduce the absolute difference in
+	 * byte usage between the two blocks to a minimum.  Then get
+	 * the direction to copy and the number of elements to move.
+	 */
+	state->inleaf = xfs_dir_leaf_figure_balance(state, blk1, blk2,
+							   &count, &totallen);
+	if (swap)
+		state->inleaf = !state->inleaf;
+
+	/*
+	 * Move any entries required from leaf to leaf:
+	 */
+	if (count < INT_GET(hdr1->count, ARCH_CONVERT)) {
+		/*
+		 * Figure the total bytes to be added to the destination leaf.
+		 */
+		count = INT_GET(hdr1->count, ARCH_CONVERT) - count;	/* number entries being moved */
+		space  = INT_GET(hdr1->namebytes, ARCH_CONVERT) - totallen;
+		space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1);
+		space += count * (uint)sizeof(xfs_dir_leaf_entry_t);
+
+		/*
+		 * leaf2 is the destination, compact it if it looks tight.
+		 */
+		max  = INT_GET(hdr2->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t);
+		max -= INT_GET(hdr2->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
+		if (space > max) {
+			xfs_dir_leaf_compact(state->args->trans, blk2->bp,
+								 0, 0);
+		}
+
+		/*
+		 * Move high entries from leaf1 to low end of leaf2.
+		 */
+		xfs_dir_leaf_moveents(leaf1, INT_GET(hdr1->count, ARCH_CONVERT) - count,
+					     leaf2, 0, count, state->mp);
+
+		xfs_da_log_buf(state->args->trans, blk1->bp, 0,
+						   state->blocksize-1);
+		xfs_da_log_buf(state->args->trans, blk2->bp, 0,
+						   state->blocksize-1);
+
+	} else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) {
+		/*
+		 * Figure the total bytes to be added to the destination leaf.
+		 */
+		count -= INT_GET(hdr1->count, ARCH_CONVERT);		/* number entries being moved */
+		space  = totallen - INT_GET(hdr1->namebytes, ARCH_CONVERT);
+		space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1);
+		space += count * (uint)sizeof(xfs_dir_leaf_entry_t);
+
+		/*
+		 * leaf1 is the destination, compact it if it looks tight.
+		 */
+		max  = INT_GET(hdr1->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t);
+		max -= INT_GET(hdr1->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
+		if (space > max) {
+			xfs_dir_leaf_compact(state->args->trans, blk1->bp,
+								 0, 0);
+		}
+
+		/*
+		 * Move low entries from leaf2 to high end of leaf1.
+		 */
+		xfs_dir_leaf_moveents(leaf2, 0, leaf1, (int)INT_GET(hdr1->count, ARCH_CONVERT),
+					     count, state->mp);
+
+		xfs_da_log_buf(state->args->trans, blk1->bp, 0,
+						   state->blocksize-1);
+		xfs_da_log_buf(state->args->trans, blk2->bp, 0,
+						   state->blocksize-1);
+	}
+
+	/*
+	 * Copy out last hashval in each block for B-tree code.
+	 */
+	blk1->hashval = INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+	blk2->hashval = INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+
+	/*
+	 * Adjust the expected index for insertion.
+	 * GROT: this doesn't work unless blk2 was originally empty.
+	 */
+	if (!state->inleaf) {
+		blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
+	}
+}
+
+/*
+ * Examine entries until we reduce the absolute difference in
+ * byte usage between the two blocks to a minimum.
+ * GROT: Is this really necessary?  With other than a 512 byte blocksize,
+ * GROT: there will always be enough room in either block for a new entry.
+ * GROT: Do a double-split for this case?
+ */
+STATIC int
+xfs_dir_leaf_figure_balance(xfs_da_state_t *state,
+					   xfs_da_state_blk_t *blk1,
+					   xfs_da_state_blk_t *blk2,
+					   int *countarg, int *namebytesarg)
+{
+	xfs_dir_leafblock_t *leaf1, *leaf2;
+	xfs_dir_leaf_hdr_t *hdr1, *hdr2;
+	xfs_dir_leaf_entry_t *entry;
+	int count, max, totallen, half;
+	int lastdelta, foundit, tmp;
+
+	/*
+	 * Set up environment.
+	 */
+	leaf1 = blk1->bp->data;
+	leaf2 = blk2->bp->data;
+	hdr1 = &leaf1->hdr;
+	hdr2 = &leaf2->hdr;
+	foundit = 0;
+	totallen = 0;
+
+	/*
+	 * Examine entries until we reduce the absolute difference in
+	 * byte usage between the two blocks to a minimum.
+	 */
+	max = INT_GET(hdr1->count, ARCH_CONVERT) + INT_GET(hdr2->count, ARCH_CONVERT);
+	half  = (max+1) * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1);
+	half += INT_GET(hdr1->namebytes, ARCH_CONVERT) + INT_GET(hdr2->namebytes, ARCH_CONVERT) + state->args->namelen;
+	half /= 2;
+	lastdelta = state->blocksize;
+	entry = &leaf1->entries[0];
+	for (count = 0; count < max; entry++, count++) {
+
+#define XFS_DIR_ABS(A)	(((A) < 0) ? -(A) : (A))
+		/*
+		 * The new entry is in the first block, account for it.
+		 */
+		if (count == blk1->index) {
+			tmp = totallen + (uint)sizeof(*entry)
+				+ XFS_DIR_LEAF_ENTSIZE_BYNAME(state->args->namelen);
+			if (XFS_DIR_ABS(half - tmp) > lastdelta)
+				break;
+			lastdelta = XFS_DIR_ABS(half - tmp);
+			totallen = tmp;
+			foundit = 1;
+		}
+
+		/*
+		 * Wrap around into the second block if necessary.
+		 */
+		if (count == INT_GET(hdr1->count, ARCH_CONVERT)) {
+			leaf1 = leaf2;
+			entry = &leaf1->entries[0];
+		}
+
+		/*
+		 * Figure out if next leaf entry would be too much.
+		 */
+		tmp = totallen + (uint)sizeof(*entry)
+				+ XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry);
+		if (XFS_DIR_ABS(half - tmp) > lastdelta)
+			break;
+		lastdelta = XFS_DIR_ABS(half - tmp);
+		totallen = tmp;
+#undef XFS_DIR_ABS
+	}
+
+	/*
+	 * Calculate the number of namebytes that will end up in lower block.
+	 * If new entry not in lower block, fix up the count.
+	 */
+	totallen -=
+		count * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1);
+	if (foundit) {
+		totallen -= (sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1) +
+			    state->args->namelen;
+	}
+
+	*countarg = count;
+	*namebytesarg = totallen;
+	return(foundit);
+}
+
+/*========================================================================
+ * Routines used for shrinking the Btree.
+ *========================================================================*/
+
+/*
+ * Check a leaf block and its neighbors to see if the block should be
+ * collapsed into one or the other neighbor.  Always keep the block
+ * with the smaller block number.
+ * If the current block is over 50% full, don't try to join it, return 0.
+ * If the block is empty, fill in the state structure and return 2.
+ * If it can be collapsed, fill in the state structure and return 1.
+ * If nothing can be done, return 0.
+ */
+int
+xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_da_state_blk_t *blk;
+	xfs_da_blkinfo_t *info;
+	int count, bytes, forward, error, retval, i;
+	xfs_dablk_t blkno;
+	xfs_dabuf_t *bp;
+
+	/*
+	 * Check for the degenerate case of the block being over 50% full.
+	 * If so, it's not worth even looking to see if we might be able
+	 * to coalesce with a sibling.
+	 */
+	blk = &state->path.blk[ state->path.active-1 ];
+	info = blk->bp->data;
+	ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	leaf = (xfs_dir_leafblock_t *)info;
+	count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) +
+		count * (uint)sizeof(xfs_dir_leaf_entry_t) +
+		count * ((uint)sizeof(xfs_dir_leaf_name_t)-1) +
+		INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
+	if (bytes > (state->blocksize >> 1)) {
+		*action = 0;	/* blk over 50%, don't try to join */
+		return(0);
+	}
+
+	/*
+	 * Check for the degenerate case of the block being empty.
+	 * If the block is empty, we'll simply delete it, no need to
+	 * coalesce it with a sibling block.  We choose (aribtrarily)
+	 * to merge with the forward block unless it is NULL.
+	 */
+	if (count == 0) {
+		/*
+		 * Make altpath point to the block we want to keep and
+		 * path point to the block we want to drop (this one).
+		 */
+		forward = info->forw;
+		memcpy(&state->altpath, &state->path, sizeof(state->path));
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+		if (error)
+			return(error);
+		if (retval) {
+			*action = 0;
+		} else {
+			*action = 2;
+		}
+		return(0);
+	}
+
+	/*
+	 * Examine each sibling block to see if we can coalesce with
+	 * at least 25% free space to spare.  We need to figure out
+	 * whether to merge with the forward or the backward block.
+	 * We prefer coalescing with the lower numbered sibling so as
+	 * to shrink a directory over time.
+	 */
+	forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT));	/* start with smaller blk num */
+	for (i = 0; i < 2; forward = !forward, i++) {
+		if (forward)
+			blkno = INT_GET(info->forw, ARCH_CONVERT);
+		else
+			blkno = INT_GET(info->back, ARCH_CONVERT);
+		if (blkno == 0)
+			continue;
+		error = xfs_da_read_buf(state->args->trans, state->args->dp,
+							    blkno, -1, &bp,
+							    XFS_DATA_FORK);
+		if (error)
+			return(error);
+		ASSERT(bp != NULL);
+
+		leaf = (xfs_dir_leafblock_t *)info;
+		count  = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		bytes  = state->blocksize - (state->blocksize>>2);
+		bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
+		leaf = bp->data;
+		ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+		count += INT_GET(leaf->hdr.count, ARCH_CONVERT);
+		bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
+		bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1);
+		bytes -= count * (uint)sizeof(xfs_dir_leaf_entry_t);
+		bytes -= (uint)sizeof(xfs_dir_leaf_hdr_t);
+		if (bytes >= 0)
+			break;	/* fits with at least 25% to spare */
+
+		xfs_da_brelse(state->args->trans, bp);
+	}
+	if (i >= 2) {
+		*action = 0;
+		return(0);
+	}
+	xfs_da_buf_done(bp);
+
+	/*
+	 * Make altpath point to the block we want to keep (the lower
+	 * numbered block) and path point to the block we want to drop.
+	 */
+	memcpy(&state->altpath, &state->path, sizeof(state->path));
+	if (blkno < blk->blkno) {
+		error = xfs_da_path_shift(state, &state->altpath, forward,
+						 0, &retval);
+	} else {
+		error = xfs_da_path_shift(state, &state->path, forward,
+						 0, &retval);
+	}
+	if (error)
+		return(error);
+	if (retval) {
+		*action = 0;
+	} else {
+		*action = 1;
+	}
+	return(0);
+}
+
+/*
+ * Remove a name from the leaf directory structure.
+ *
+ * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
+ * If two leaves are 37% full, when combined they will leave 25% free.
+ */
+int
+xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_hdr_t *hdr;
+	xfs_dir_leaf_map_t *map;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+	int before, after, smallest, entsize;
+	int tablesize, tmp, i;
+	xfs_mount_t *mp;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	hdr = &leaf->hdr;
+	mp = trans->t_mountp;
+	ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
+	ASSERT((index >= 0) && (index < INT_GET(hdr->count, ARCH_CONVERT)));
+	ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr)));
+	entry = &leaf->entries[index];
+	ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT));
+	ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
+
+	/*
+	 * Scan through free region table:
+	 *    check for adjacency of free'd entry with an existing one,
+	 *    find smallest free region in case we need to replace it,
+	 *    adjust any map that borders the entry table,
+	 */
+	tablesize = INT_GET(hdr->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)
+			+ (uint)sizeof(xfs_dir_leaf_hdr_t);
+	map = &hdr->freemap[0];
+	tmp = INT_GET(map->size, ARCH_CONVERT);
+	before = after = -1;
+	smallest = XFS_DIR_LEAF_MAPSIZE - 1;
+	entsize = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry);
+	for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) {
+		ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
+		ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
+		if (INT_GET(map->base, ARCH_CONVERT) == tablesize) {
+			INT_MOD(map->base, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t)));
+			INT_MOD(map->size, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t));
+		}
+
+		if ((INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)) == INT_GET(entry->nameidx, ARCH_CONVERT)) {
+			before = i;
+		} else if (INT_GET(map->base, ARCH_CONVERT) == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) {
+			after = i;
+		} else if (INT_GET(map->size, ARCH_CONVERT) < tmp) {
+			tmp = INT_GET(map->size, ARCH_CONVERT);
+			smallest = i;
+		}
+	}
+
+	/*
+	 * Coalesce adjacent freemap regions,
+	 * or replace the smallest region.
+	 */
+	if ((before >= 0) || (after >= 0)) {
+		if ((before >= 0) && (after >= 0)) {
+			map = &hdr->freemap[before];
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+			INT_MOD(map->size, ARCH_CONVERT, INT_GET(hdr->freemap[after].size, ARCH_CONVERT));
+			hdr->freemap[after].base = 0;
+			hdr->freemap[after].size = 0;
+		} else if (before >= 0) {
+			map = &hdr->freemap[before];
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+		} else {
+			map = &hdr->freemap[after];
+			INT_COPY(map->base, entry->nameidx, ARCH_CONVERT);
+			INT_MOD(map->size, ARCH_CONVERT, entsize);
+		}
+	} else {
+		/*
+		 * Replace smallest region (if it is smaller than free'd entry)
+		 */
+		map = &hdr->freemap[smallest];
+		if (INT_GET(map->size, ARCH_CONVERT) < entsize) {
+			INT_COPY(map->base, entry->nameidx, ARCH_CONVERT);
+			INT_SET(map->size, ARCH_CONVERT, entsize);
+		}
+	}
+
+	/*
+	 * Did we remove the first entry?
+	 */
+	if (INT_GET(entry->nameidx, ARCH_CONVERT) == INT_GET(hdr->firstused, ARCH_CONVERT))
+		smallest = 1;
+	else
+		smallest = 0;
+
+	/*
+	 * Compress the remaining entries and zero out the removed stuff.
+	 */
+	namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+	memset((char *)namest, 0, entsize);
+	xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, namest, entsize));
+
+	INT_MOD(hdr->namebytes, ARCH_CONVERT, -(entry->namelen));
+	tmp = (INT_GET(hdr->count, ARCH_CONVERT) - index) * (uint)sizeof(xfs_dir_leaf_entry_t);
+	memmove(entry, entry + 1, tmp);
+	INT_MOD(hdr->count, ARCH_CONVERT, -1);
+	xfs_da_log_buf(trans, bp,
+	    XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
+	entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)];
+	memset((char *)entry, 0, sizeof(xfs_dir_leaf_entry_t));
+
+	/*
+	 * If we removed the first entry, re-find the first used byte
+	 * in the name area.  Note that if the entry was the "firstused",
+	 * then we don't have a "hole" in our block resulting from
+	 * removing the name.
+	 */
+	if (smallest) {
+		tmp = XFS_LBSIZE(mp);
+		entry = &leaf->entries[0];
+		for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) {
+			ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT));
+			ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
+			if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp)
+				tmp = INT_GET(entry->nameidx, ARCH_CONVERT);
+		}
+		INT_SET(hdr->firstused, ARCH_CONVERT, tmp);
+		if (!hdr->firstused)
+			INT_SET(hdr->firstused, ARCH_CONVERT, tmp - 1);
+	} else {
+		hdr->holes = 1;		/* mark as needing compaction */
+	}
+
+	xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
+
+	/*
+	 * Check if leaf is less than 50% full, caller may want to
+	 * "join" the leaf with a sibling if so.
+	 */
+	tmp  = (uint)sizeof(xfs_dir_leaf_hdr_t);
+	tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
+	tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1);
+	tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
+	if (tmp < mp->m_dir_magicpct)
+		return(1);			/* leaf is < 37% full */
+	return(0);
+}
+
+/*
+ * Move all the directory entries from drop_leaf into save_leaf.
+ */
+void
+xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
+				      xfs_da_state_blk_t *save_blk)
+{
+	xfs_dir_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
+	xfs_dir_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
+	xfs_mount_t *mp;
+	char *tmpbuffer;
+
+	/*
+	 * Set up environment.
+	 */
+	mp = state->mp;
+	ASSERT(drop_blk->magic == XFS_DIR_LEAF_MAGIC);
+	ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC);
+	drop_leaf = drop_blk->bp->data;
+	save_leaf = save_blk->bp->data;
+	ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	drop_hdr = &drop_leaf->hdr;
+	save_hdr = &save_leaf->hdr;
+
+	/*
+	 * Save last hashval from dying block for later Btree fixup.
+	 */
+	drop_blk->hashval = INT_GET(drop_leaf->entries[ drop_leaf->hdr.count-1 ].hashval, ARCH_CONVERT);
+
+	/*
+	 * Check if we need a temp buffer, or can we do it in place.
+	 * Note that we don't check "leaf" for holes because we will
+	 * always be dropping it, toosmall() decided that for us already.
+	 */
+	if (save_hdr->holes == 0) {
+		/*
+		 * dest leaf has no holes, so we add there.  May need
+		 * to make some room in the entry array.
+		 */
+		if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) {
+			xfs_dir_leaf_moveents(drop_leaf, 0, save_leaf, 0,
+						 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
+		} else {
+			xfs_dir_leaf_moveents(drop_leaf, 0,
+					      save_leaf, INT_GET(save_hdr->count, ARCH_CONVERT),
+					      (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
+		}
+	} else {
+		/*
+		 * Destination has holes, so we make a temporary copy
+		 * of the leaf and add them both to that.
+		 */
+		tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
+		ASSERT(tmpbuffer != NULL);
+		memset(tmpbuffer, 0, state->blocksize);
+		tmp_leaf = (xfs_dir_leafblock_t *)tmpbuffer;
+		tmp_hdr = &tmp_leaf->hdr;
+		tmp_hdr->info = save_hdr->info;	/* struct copy */
+		tmp_hdr->count = 0;
+		INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize);
+		if (!tmp_hdr->firstused)
+			INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize - 1);
+		tmp_hdr->namebytes = 0;
+		if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) {
+			xfs_dir_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
+						 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
+			xfs_dir_leaf_moveents(save_leaf, 0,
+					      tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
+					      (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp);
+		} else {
+			xfs_dir_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
+						 (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp);
+			xfs_dir_leaf_moveents(drop_leaf, 0,
+					      tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
+					      (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
+		}
+		memcpy(save_leaf, tmp_leaf, state->blocksize);
+		kmem_free(tmpbuffer, state->blocksize);
+	}
+
+	xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
+					   state->blocksize - 1);
+
+	/*
+	 * Copy out last hashval in each block for B-tree code.
+	 */
+	save_blk->hashval = INT_GET(save_leaf->entries[ INT_GET(save_leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
+}
+
+/*========================================================================
+ * Routines used for finding things in the Btree.
+ *========================================================================*/
+
+/*
+ * Look up a name in a leaf directory structure.
+ * This is the internal routine, it uses the caller's buffer.
+ *
+ * Note that duplicate keys are allowed, but only check within the
+ * current leaf node.  The Btree code must check in adjacent leaf nodes.
+ *
+ * Return in *index the index into the entry[] array of either the found
+ * entry, or where the entry should have been (insert before that entry).
+ *
+ * Don't change the args->inumber unless we find the filename.
+ */
+int
+xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index)
+{
+	xfs_dir_leafblock_t *leaf;
+	xfs_dir_leaf_entry_t *entry;
+	xfs_dir_leaf_name_t *namest;
+	int probe, span;
+	xfs_dahash_t hashval;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8));
+
+	/*
+	 * Binary search.  (note: small blocks will skip this loop)
+	 */
+	hashval = args->hashval;
+	probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2;
+	for (entry = &leaf->entries[probe]; span > 4;
+		   entry = &leaf->entries[probe]) {
+		span /= 2;
+		if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)
+			probe += span;
+		else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval)
+			probe -= span;
+		else
+			break;
+	}
+	ASSERT((probe >= 0) && \
+	       ((!leaf->hdr.count) || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT))));
+	ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) == hashval));
+
+	/*
+	 * Since we may have duplicate hashval's, find the first matching
+	 * hashval in the leaf.
+	 */
+	while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) >= hashval)) {
+		entry--;
+		probe--;
+	}
+	while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) {
+		entry++;
+		probe++;
+	}
+	if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) {
+		*index = probe;
+		ASSERT(args->oknoent);
+		return(XFS_ERROR(ENOENT));
+	}
+
+	/*
+	 * Duplicate keys may be present, so search all of them for a match.
+	 */
+	while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)) {
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
+		if (entry->namelen == args->namelen &&
+		    namest->name[0] == args->name[0] &&
+		    memcmp(args->name, namest->name, args->namelen) == 0) {
+			XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args->inumber);
+			*index = probe;
+			return(XFS_ERROR(EEXIST));
+		}
+		entry++;
+		probe++;
+	}
+	*index = probe;
+	ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent);
+	return(XFS_ERROR(ENOENT));
+}
+
+/*========================================================================
+ * Utility routines.
+ *========================================================================*/
+
+/*
+ * Move the indicated entries from one leaf to another.
+ * NOTE: this routine modifies both source and destination leaves.
+ */
+/* ARGSUSED */
+STATIC void
+xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
+		      xfs_dir_leafblock_t *leaf_d, int start_d,
+		      int count, xfs_mount_t *mp)
+{
+	xfs_dir_leaf_hdr_t *hdr_s, *hdr_d;
+	xfs_dir_leaf_entry_t *entry_s, *entry_d;
+	int tmp, i;
+
+	/*
+	 * Check for nothing to do.
+	 */
+	if (count == 0)
+		return;
+
+	/*
+	 * Set up environment.
+	 */
+	ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	hdr_s = &leaf_s->hdr;
+	hdr_d = &leaf_d->hdr;
+	ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
+	ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >=
+		((INT_GET(hdr_s->count, ARCH_CONVERT)*sizeof(*entry_s))+sizeof(*hdr_s)));
+	ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8));
+	ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >=
+		((INT_GET(hdr_d->count, ARCH_CONVERT)*sizeof(*entry_d))+sizeof(*hdr_d)));
+
+	ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT));
+	ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT));
+	ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT));
+
+	/*
+	 * Move the entries in the destination leaf up to make a hole?
+	 */
+	if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) {
+		tmp  = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d;
+		tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
+		entry_s = &leaf_d->entries[start_d];
+		entry_d = &leaf_d->entries[start_d + count];
+		memcpy(entry_d, entry_s, tmp);
+	}
+
+	/*
+	 * Copy all entry's in the same (sorted) order,
+	 * but allocate filenames packed and in sequence.
+	 */
+	entry_s = &leaf_s->entries[start_s];
+	entry_d = &leaf_d->entries[start_d];
+	for (i = 0; i < count; entry_s++, entry_d++, i++) {
+		ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) >= INT_GET(hdr_s->firstused, ARCH_CONVERT));
+		tmp = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry_s);
+		INT_MOD(hdr_d->firstused, ARCH_CONVERT, -(tmp));
+		entry_d->hashval = entry_s->hashval; /* INT_: direct copy */
+		INT_COPY(entry_d->nameidx, hdr_d->firstused, ARCH_CONVERT);
+		entry_d->namelen = entry_s->namelen;
+		ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
+		memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)),
+		       XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), tmp);
+		ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
+		memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)),
+		      0, tmp);
+		INT_MOD(hdr_s->namebytes, ARCH_CONVERT, -(entry_d->namelen));
+		INT_MOD(hdr_d->namebytes, ARCH_CONVERT, entry_d->namelen);
+		INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
+		INT_MOD(hdr_d->count, ARCH_CONVERT, +1);
+		tmp  = INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)
+				+ (uint)sizeof(xfs_dir_leaf_hdr_t);
+		ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp);
+
+	}
+
+	/*
+	 * Zero out the entries we just copied.
+	 */
+	if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) {
+		tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
+		entry_s = &leaf_s->entries[start_s];
+		ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
+		memset((char *)entry_s, 0, tmp);
+	} else {
+		/*
+		 * Move the remaining entries down to fill the hole,
+		 * then zero the entries at the top.
+		 */
+		tmp  = INT_GET(hdr_s->count, ARCH_CONVERT) - count;
+		tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
+		entry_s = &leaf_s->entries[start_s + count];
+		entry_d = &leaf_s->entries[start_s];
+		memcpy(entry_d, entry_s, tmp);
+
+		tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
+		entry_s = &leaf_s->entries[INT_GET(hdr_s->count, ARCH_CONVERT)];
+		ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
+		memset((char *)entry_s, 0, tmp);
+	}
+
+	/*
+	 * Fill in the freemap information
+	 */
+	INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_hdr_t));
+	INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t));
+	INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
+	INT_SET(hdr_d->freemap[1].base, ARCH_CONVERT, (hdr_d->freemap[2].base = 0));
+	INT_SET(hdr_d->freemap[1].size, ARCH_CONVERT, (hdr_d->freemap[2].size = 0));
+	hdr_s->holes = 1;	/* leaf may not be compact */
+}
+
+/*
+ * Compare two leaf blocks "order".
+ */
+int
+xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
+{
+	xfs_dir_leafblock_t *leaf1, *leaf2;
+
+	leaf1 = leaf1_bp->data;
+	leaf2 = leaf2_bp->data;
+	ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) &&
+	       (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC));
+	if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) &&
+	    ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) <
+	      INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) ||
+	     (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
+	      INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) {
+		return(1);
+	}
+	return(0);
+}
+
+/*
+ * Pick up the last hashvalue from a leaf block.
+ */
+xfs_dahash_t
+xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count)
+{
+	xfs_dir_leafblock_t *leaf;
+
+	leaf = bp->data;
+	ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
+	if (count)
+		*count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
+	if (!leaf->hdr.count)
+		return(0);
+	return(INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT));
+}
+
+/*
+ * Copy out directory entries for getdents(), for leaf directories.
+ */
+int
+xfs_dir_leaf_getdents_int(
+	xfs_dabuf_t	*bp,
+	xfs_inode_t	*dp,
+	xfs_dablk_t	bno,
+	uio_t		*uio,
+	int		*eobp,
+	xfs_dirent_t	*dbp,
+	xfs_dir_put_t	put,
+	xfs_daddr_t		nextda)
+{
+	xfs_dir_leafblock_t	*leaf;
+	xfs_dir_leaf_entry_t	*entry;
+	xfs_dir_leaf_name_t	*namest;
+	int			entno, want_entno, i, nextentno;
+	xfs_mount_t		*mp;
+	xfs_dahash_t		cookhash;
+	xfs_dahash_t		nexthash = 0;
+#if (BITS_PER_LONG == 32)
+	xfs_dahash_t		lasthash = XFS_DA_MAXHASH;
+#endif
+	xfs_dir_put_args_t	p;
+
+	mp = dp->i_mount;
+	leaf = bp->data;
+	if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) {
+		*eobp = 1;
+		return(XFS_ERROR(ENOENT));	/* XXX wrong code */
+	}
+
+	want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset);
+
+	cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
+
+	xfs_dir_trace_g_dul("leaf: start", dp, uio, leaf);
+
+	/*
+	 * Re-find our place.
+	 */
+	for (i = entno = 0, entry = &leaf->entries[0];
+		     i < INT_GET(leaf->hdr.count, ARCH_CONVERT);
+			     entry++, i++) {
+
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf,
+				    INT_GET(entry->nameidx, ARCH_CONVERT));
+
+		if (unlikely(
+		    ((char *)namest < (char *)leaf) ||
+		    ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)))) {
+			XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(1)",
+					     XFS_ERRLEVEL_LOW, mp, leaf);
+			xfs_dir_trace_g_du("leaf: corrupted", dp, uio);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		if (INT_GET(entry->hashval, ARCH_CONVERT) >= cookhash) {
+			if (   entno < want_entno
+			    && INT_GET(entry->hashval, ARCH_CONVERT)
+							== cookhash) {
+				/*
+				 * Trying to get to a particular offset in a
+				 * run of equal-hashval entries.
+				 */
+				entno++;
+			} else if (   want_entno > 0
+				   && entno == want_entno
+				   && INT_GET(entry->hashval, ARCH_CONVERT)
+							== cookhash) {
+				break;
+			} else {
+				entno = 0;
+				break;
+			}
+		}
+	}
+
+	if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) {
+		xfs_dir_trace_g_du("leaf: hash not found", dp, uio);
+		if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))
+			uio->uio_offset =
+				XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH);
+		/*
+		 * Don't set uio_offset if there's another block:
+		 * the node code will be setting uio_offset anyway.
+		 */
+		*eobp = 0;
+		return(0);
+	}
+	xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry);
+
+	p.dbp = dbp;
+	p.put = put;
+	p.uio = uio;
+
+	/*
+	 * We're synchronized, start copying entries out to the user.
+	 */
+	for (; entno >= 0 && i < INT_GET(leaf->hdr.count, ARCH_CONVERT);
+			     entry++, i++, (entno = nextentno)) {
+		int lastresid=0, retval;
+		xfs_dircook_t lastoffset;
+		xfs_dahash_t thishash;
+
+		/*
+		 * Check for a damaged directory leaf block and pick up
+		 * the inode number from this entry.
+		 */
+		namest = XFS_DIR_LEAF_NAMESTRUCT(leaf,
+				    INT_GET(entry->nameidx, ARCH_CONVERT));
+
+		if (unlikely(
+		    ((char *)namest < (char *)leaf) ||
+		    ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)))) {
+			XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(2)",
+					     XFS_ERRLEVEL_LOW, mp, leaf);
+			xfs_dir_trace_g_du("leaf: corrupted", dp, uio);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+
+		xfs_dir_trace_g_duc("leaf: middle cookie  ",
+						   dp, uio, p.cook.o);
+
+		if (i < (INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1)) {
+			nexthash = INT_GET(entry[1].hashval, ARCH_CONVERT);
+
+			if (nexthash == INT_GET(entry->hashval, ARCH_CONVERT))
+				nextentno = entno + 1;
+			else
+				nextentno = 0;
+			XFS_PUT_COOKIE(p.cook, mp, bno, nextentno, nexthash);
+			xfs_dir_trace_g_duc("leaf: middle cookie  ",
+						   dp, uio, p.cook.o);
+
+		} else if ((thishash = INT_GET(leaf->hdr.info.forw,
+							ARCH_CONVERT))) {
+			xfs_dabuf_t *bp2;
+			xfs_dir_leafblock_t *leaf2;
+
+			ASSERT(nextda != -1);
+
+			retval = xfs_da_read_buf(dp->i_transp, dp, thishash,
+						 nextda, &bp2, XFS_DATA_FORK);
+			if (retval)
+				return(retval);
+
+			ASSERT(bp2 != NULL);
+
+			leaf2 = bp2->data;
+
+			if (unlikely(
+			       (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT)
+						!= XFS_DIR_LEAF_MAGIC)
+			    || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT)
+						!= bno))) {	/* GROT */
+				XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)",
+						     XFS_ERRLEVEL_LOW, mp,
+						     leaf2);
+				xfs_da_brelse(dp->i_transp, bp2);
+
+				return(XFS_ERROR(EFSCORRUPTED));
+			}
+
+			nexthash = INT_GET(leaf2->entries[0].hashval,
+								ARCH_CONVERT);
+			nextentno = -1;
+			XFS_PUT_COOKIE(p.cook, mp, thishash, 0, nexthash);
+			xfs_da_brelse(dp->i_transp, bp2);
+			xfs_dir_trace_g_duc("leaf: next blk cookie",
+						   dp, uio, p.cook.o);
+		} else {
+			nextentno = -1;
+			XFS_PUT_COOKIE(p.cook, mp, 0, 0, XFS_DA_MAXHASH);
+		}
+
+		/*
+		 * Save off the cookie so we can fall back should the
+		 * 'put' into the outgoing buffer fails.  To handle a run
+		 * of equal-hashvals, the off_t structure on 64bit
+		 * builds has entno built into the cookie to ID the
+		 * entry.  On 32bit builds, we only have space for the
+		 * hashval so we can't ID specific entries within a group
+		 * of same hashval entries.   For this, lastoffset is set
+		 * to the first in the run of equal hashvals so we don't
+		 * include any entries unless we can include all entries
+		 * that share the same hashval.  Hopefully the buffer
+		 * provided is big enough to handle it (see pv763517).
+		 */
+#if (BITS_PER_LONG == 32)
+		if ((thishash = INT_GET(entry->hashval, ARCH_CONVERT))
+								!= lasthash) {
+			XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash);
+			lastresid = uio->uio_resid;
+			lasthash = thishash;
+		} else {
+			xfs_dir_trace_g_duc("leaf: DUP COOKIES, skipped",
+						   dp, uio, p.cook.o);
+		}
+#else
+		thishash = INT_GET(entry->hashval, ARCH_CONVERT);
+		XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash);
+		lastresid = uio->uio_resid;
+#endif /* BITS_PER_LONG == 32 */
+
+		/*
+		 * Put the current entry into the outgoing buffer.  If we fail
+		 * then restore the UIO to the first entry in the current
+		 * run of equal-hashval entries (probably one 1 entry long).
+		 */
+		p.ino = XFS_GET_DIR_INO8(namest->inumber);
+#if XFS_BIG_INUMS
+		p.ino += mp->m_inoadd;
+#endif
+		p.name = (char *)namest->name;
+		p.namelen = entry->namelen;
+
+		retval = p.put(&p);
+
+		if (!p.done) {
+			uio->uio_offset = lastoffset.o;
+			uio->uio_resid = lastresid;
+
+			*eobp = 1;
+
+			xfs_dir_trace_g_du("leaf: E-O-B", dp, uio);
+
+			return(retval);
+		}
+	}
+
+	uio->uio_offset = p.cook.o;
+
+	*eobp = 0;
+
+	xfs_dir_trace_g_du("leaf: E-O-F", dp, uio);
+
+	return(0);
+}
+
+/*
+ * Format a dirent64 structure and copy it out the the user's buffer.
+ */
+int
+xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa)
+{
+	iovec_t *iovp;
+	int reclen, namelen;
+	xfs_dirent_t *idbp;
+	uio_t *uio;
+
+	namelen = pa->namelen;
+	reclen = DIRENTSIZE(namelen);
+	uio = pa->uio;
+	if (reclen > uio->uio_resid) {
+		pa->done = 0;
+		return 0;
+	}
+	iovp = uio->uio_iov;
+	idbp = (xfs_dirent_t *)iovp->iov_base;
+	iovp->iov_base = (char *)idbp + reclen;
+	iovp->iov_len -= reclen;
+	uio->uio_resid -= reclen;
+	idbp->d_reclen = reclen;
+	idbp->d_ino = pa->ino;
+	idbp->d_off = pa->cook.o;
+	idbp->d_name[namelen] = '\0';
+	pa->done = 1;
+	memcpy(idbp->d_name, pa->name, namelen);
+	return 0;
+}
+
+/*
+ * Format a dirent64 structure and copy it out the the user's buffer.
+ */
+int
+xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa)
+{
+	int		retval, reclen, namelen;
+	xfs_dirent_t	*idbp;
+	uio_t		*uio;
+
+	namelen = pa->namelen;
+	reclen = DIRENTSIZE(namelen);
+	uio = pa->uio;
+	if (reclen > uio->uio_resid) {
+		pa->done = 0;
+		return 0;
+	}
+	idbp = pa->dbp;
+	idbp->d_reclen = reclen;
+	idbp->d_ino = pa->ino;
+	idbp->d_off = pa->cook.o;
+	idbp->d_name[namelen] = '\0';
+	memcpy(idbp->d_name, pa->name, namelen);
+	retval = uio_read((caddr_t)idbp, reclen, uio);
+	pa->done = (retval == 0);
+	return retval;
+}
diff --git a/fs/xfs/xfs_dir_leaf.h b/fs/xfs/xfs_dir_leaf.h
new file mode 100644
index 0000000..00d68d3
--- /dev/null
+++ b/fs/xfs/xfs_dir_leaf.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR_LEAF_H__
+#define	__XFS_DIR_LEAF_H__
+
+/*
+ * Directory layout, internal structure, access macros, etc.
+ *
+ * Large directories are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Filenames are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of a filename may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ */
+
+struct uio;
+struct xfs_bmap_free;
+struct xfs_dabuf;
+struct xfs_da_args;
+struct xfs_da_state;
+struct xfs_da_state_blk;
+struct xfs_dir_put_args;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*========================================================================
+ * Directory Structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This is the structure of the leaf nodes in the Btree.
+ *
+ * Struct leaf_entry's are packed from the top.  Names grow from the bottom
+ * but are not packed.  The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped.  When the freemap doesn't show enough space
+ * for an allocation, we compact the namelist area and try again.  If we
+ * still don't have enough space, then we have to split the block.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual string.  The root and intermediate node search always takes
+ * the first-in-the-block key match found, so we should only have to work
+ * "forw"ard.  If none matches, continue with the "forw"ard leaf nodes
+ * until the hash key changes or the filename is found.
+ *
+ * The parent directory and the self-pointer are explicitly represented
+ * (ie: there are entries for "." and "..").
+ *
+ * Note that the count being a __uint16_t limits us to something like a
+ * blocksize of 1.3MB in the face of worst case (short) filenames.
+ */
+#define XFS_DIR_LEAF_MAPSIZE	3	/* how many freespace slots */
+
+typedef struct xfs_dir_leafblock {
+	struct xfs_dir_leaf_hdr {	/* constant-structure header block */
+		xfs_da_blkinfo_t info;	/* block type, links, etc. */
+		__uint16_t count;	/* count of active leaf_entry's */
+		__uint16_t namebytes;	/* num bytes of name strings stored */
+		__uint16_t firstused;	/* first used byte in name area */
+		__uint8_t  holes;	/* != 0 if blk needs compaction */
+		__uint8_t  pad1;
+		struct xfs_dir_leaf_map {/* RLE map of free bytes */
+			__uint16_t base; /* base of free region */
+			__uint16_t size; /* run length of free region */
+		} freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */
+	} hdr;
+	struct xfs_dir_leaf_entry {	/* sorted on key, not name */
+		xfs_dahash_t hashval;	/* hash value of name */
+		__uint16_t nameidx;	/* index into buffer of name */
+		__uint8_t namelen;	/* length of name string */
+		__uint8_t pad2;
+	} entries[1];			/* var sized array */
+	struct xfs_dir_leaf_name {
+		xfs_dir_ino_t inumber;	/* inode number for this key */
+		__uint8_t name[1];	/* name string itself */
+	} namelist[1];			/* grows from bottom of buf */
+} xfs_dir_leafblock_t;
+typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t;
+typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t;
+typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t;
+typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t;
+
+/*
+ * Length of name for which a 512-byte block filesystem
+ * can get a double split.
+ */
+#define	XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN	\
+	(512 - (uint)sizeof(xfs_dir_leaf_hdr_t) - \
+	 (uint)sizeof(xfs_dir_leaf_entry_t) * 2 - \
+	 (uint)sizeof(xfs_dir_leaf_name_t) * 2 - (MAXNAMELEN - 2) + 1 + 1)
+
+typedef int (*xfs_dir_put_t)(struct xfs_dir_put_args *pa);
+
+typedef union {
+	xfs_off_t		o;		/* offset (cookie) */
+	/*
+	 * Watch the order here (endian-ness dependent).
+	 */
+	struct {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+		xfs_dahash_t	h;	/* hash value */
+		__uint32_t	be;	/* block and entry */
+#else	/* __BYTE_ORDER == __BIG_ENDIAN */
+		__uint32_t	be;	/* block and entry */
+		xfs_dahash_t	h;	/* hash value */
+#endif	/* __BYTE_ORDER == __BIG_ENDIAN */
+	} s;
+} xfs_dircook_t;
+
+#define	XFS_PUT_COOKIE(c,mp,bno,entry,hash)	\
+	((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
+
+typedef struct xfs_dir_put_args
+{
+	xfs_dircook_t	cook;		/* cookie of (next) entry */
+	xfs_intino_t	ino;		/* inode number */
+	struct xfs_dirent	*dbp;		/* buffer pointer */
+	char		*name;		/* directory entry name */
+	int		namelen;	/* length of name */
+	int		done;		/* output: set if value was stored */
+	xfs_dir_put_t	put;		/* put function ptr (i/o) */
+	struct uio	*uio;		/* uio control structure */
+} xfs_dir_put_args_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYNAME)
+int xfs_dir_leaf_entsize_byname(int len);
+#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len)	xfs_dir_leaf_entsize_byname(len)
+#else
+#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len)	/* space a name will use */ \
+	((uint)sizeof(xfs_dir_leaf_name_t)-1 + len)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYENTRY)
+int xfs_dir_leaf_entsize_byentry(xfs_dir_leaf_entry_t *entry);
+#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry)	\
+	xfs_dir_leaf_entsize_byentry(entry)
+#else
+#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry)	/* space an entry will use */ \
+	((uint)sizeof(xfs_dir_leaf_name_t)-1 + (entry)->namelen)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_NAMESTRUCT)
+xfs_dir_leaf_name_t *
+xfs_dir_leaf_namestruct(xfs_dir_leafblock_t *leafp, int offset);
+#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset)	\
+	xfs_dir_leaf_namestruct(leafp,offset)
+#else
+#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset)	/* point to name struct */ \
+	((xfs_dir_leaf_name_t *)&((char *)(leafp))[offset])
+#endif
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
+
+/*
+ * Internal routines when dirsize < XFS_LITINO(mp).
+ */
+int xfs_dir_shortform_create(struct xfs_da_args *args, xfs_ino_t parent);
+int xfs_dir_shortform_addname(struct xfs_da_args *args);
+int xfs_dir_shortform_lookup(struct xfs_da_args *args);
+int xfs_dir_shortform_to_leaf(struct xfs_da_args *args);
+int xfs_dir_shortform_removename(struct xfs_da_args *args);
+int xfs_dir_shortform_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp,
+				      struct xfs_dirent *dbp, xfs_dir_put_t put);
+int xfs_dir_shortform_replace(struct xfs_da_args *args);
+
+/*
+ * Internal routines when dirsize == XFS_LBSIZE(mp).
+ */
+int xfs_dir_leaf_to_node(struct xfs_da_args *args);
+int xfs_dir_leaf_to_shortform(struct xfs_da_args *args);
+
+/*
+ * Routines used for growing the Btree.
+ */
+int	xfs_dir_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block,
+				   struct xfs_dabuf **bpp);
+int	xfs_dir_leaf_split(struct xfs_da_state *state,
+				  struct xfs_da_state_blk *oldblk,
+				  struct xfs_da_state_blk *newblk);
+int	xfs_dir_leaf_add(struct xfs_dabuf *leaf_buffer,
+				struct xfs_da_args *args, int insertion_index);
+int	xfs_dir_leaf_addname(struct xfs_da_args *args);
+int	xfs_dir_leaf_lookup_int(struct xfs_dabuf *leaf_buffer,
+				       struct xfs_da_args *args,
+				       int *index_found_at);
+int	xfs_dir_leaf_remove(struct xfs_trans *trans,
+				   struct xfs_dabuf *leaf_buffer,
+				   int index_to_remove);
+int	xfs_dir_leaf_getdents_int(struct xfs_dabuf *bp, struct xfs_inode *dp,
+					 xfs_dablk_t bno, struct uio *uio,
+					 int *eobp, struct xfs_dirent *dbp,
+					 xfs_dir_put_t put, xfs_daddr_t nextda);
+
+/*
+ * Routines used for shrinking the Btree.
+ */
+int	xfs_dir_leaf_toosmall(struct xfs_da_state *state, int *retval);
+void	xfs_dir_leaf_unbalance(struct xfs_da_state *state,
+					     struct xfs_da_state_blk *drop_blk,
+					     struct xfs_da_state_blk *save_blk);
+
+/*
+ * Utility routines.
+ */
+uint	xfs_dir_leaf_lasthash(struct xfs_dabuf *bp, int *count);
+int	xfs_dir_leaf_order(struct xfs_dabuf *leaf1_bp,
+				  struct xfs_dabuf *leaf2_bp);
+int	xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa);
+int	xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa);
+int	xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
+
+
+/*
+ * Global data.
+ */
+extern xfs_dahash_t	xfs_dir_hash_dot, xfs_dir_hash_dotdot;
+
+#endif /* __XFS_DIR_LEAF_H__ */
diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h
new file mode 100644
index 0000000..a61bcfc
--- /dev/null
+++ b/fs/xfs/xfs_dir_sf.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DIR_SF_H__
+#define	__XFS_DIR_SF_H__
+
+/*
+ * Directory layout when stored internal to an inode.
+ *
+ * Small directories are packed as tightly as possible so as to
+ * fit into the literal area of the inode.
+ */
+
+typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t;
+
+/*
+ * The parent directory has a dedicated field, and the self-pointer must
+ * be calculated on the fly.
+ *
+ * Entries are packed toward the top as tight as possible.  The header
+ * and the elements much be memcpy'd out into a work area to get correct
+ * alignment for the inode number fields.
+ */
+typedef struct xfs_dir_shortform {
+	struct xfs_dir_sf_hdr {		/* constant-structure header block */
+		xfs_dir_ino_t parent;	/* parent dir inode number */
+		__uint8_t count;	/* count of active entries */
+	} hdr;
+	struct xfs_dir_sf_entry {
+		xfs_dir_ino_t inumber;	/* referenced inode number */
+		__uint8_t namelen;	/* actual length of name (no NULL) */
+		__uint8_t name[1];	/* name */
+	} list[1];			/* variable sized array */
+} xfs_dir_shortform_t;
+typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t;
+typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t;
+
+/*
+ * We generate this then sort it, so that readdirs are returned in
+ * hash-order.  Else seekdir won't work.
+ */
+typedef struct xfs_dir_sf_sort {
+	__uint8_t	entno;		/* .=0, ..=1, else entry# + 2 */
+	__uint8_t	seqno;		/* sequence # with same hash value */
+	__uint8_t	namelen;	/* length of name value (no null) */
+	xfs_dahash_t	hash;		/* this entry's hash value */
+	xfs_intino_t	ino;		/* this entry's inode number */
+	char		*name;		/* name value, pointer into buffer */
+} xfs_dir_sf_sort_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_GET_DIRINO)
+void xfs_dir_sf_get_dirino(xfs_dir_ino_t *from, xfs_ino_t *to);
+#define	XFS_DIR_SF_GET_DIRINO(from,to)		    xfs_dir_sf_get_dirino(from, to)
+#else
+#define	XFS_DIR_SF_GET_DIRINO(from,to)		    (*(to) = XFS_GET_DIR_INO8(*from))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_PUT_DIRINO)
+void xfs_dir_sf_put_dirino(xfs_ino_t *from, xfs_dir_ino_t *to);
+#define	XFS_DIR_SF_PUT_DIRINO(from,to)    xfs_dir_sf_put_dirino(from, to)
+#else
+#define	XFS_DIR_SF_PUT_DIRINO(from,to)    XFS_PUT_DIR_INO8(*(from), *(to))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYNAME)
+int xfs_dir_sf_entsize_byname(int len);
+#define XFS_DIR_SF_ENTSIZE_BYNAME(len)		xfs_dir_sf_entsize_byname(len)
+#else
+#define XFS_DIR_SF_ENTSIZE_BYNAME(len)		/* space a name uses */ \
+	((uint)sizeof(xfs_dir_sf_entry_t)-1 + (len))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYENTRY)
+int xfs_dir_sf_entsize_byentry(xfs_dir_sf_entry_t *sfep);
+#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep)	xfs_dir_sf_entsize_byentry(sfep)
+#else
+#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep)	/* space an entry uses */ \
+	((uint)sizeof(xfs_dir_sf_entry_t)-1 + (sfep)->namelen)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_NEXTENTRY)
+xfs_dir_sf_entry_t *xfs_dir_sf_nextentry(xfs_dir_sf_entry_t *sfep);
+#define XFS_DIR_SF_NEXTENTRY(sfep)		xfs_dir_sf_nextentry(sfep)
+#else
+#define XFS_DIR_SF_NEXTENTRY(sfep)		/* next entry in struct */ \
+	((xfs_dir_sf_entry_t *) \
+		((char *)(sfep) + XFS_DIR_SF_ENTSIZE_BYENTRY(sfep)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ALLFIT)
+int xfs_dir_sf_allfit(int count, int totallen);
+#define XFS_DIR_SF_ALLFIT(count,totallen)	\
+	xfs_dir_sf_allfit(count,totallen)
+#else
+#define XFS_DIR_SF_ALLFIT(count,totallen)	/* will all entries fit? */ \
+	((uint)sizeof(xfs_dir_sf_hdr_t) + \
+	       ((uint)sizeof(xfs_dir_sf_entry_t)-1)*(count) + (totallen))
+#endif
+
+#if defined(XFS_DIR_TRACE)
+
+/*
+ * Kernel tracing support for directories.
+ */
+struct uio;
+struct xfs_inode;
+struct xfs_da_intnode;
+struct xfs_dinode;
+struct xfs_dir_leafblock;
+struct xfs_dir_leaf_entry;
+
+#define	XFS_DIR_TRACE_SIZE	4096	/* size of global trace buffer */
+extern ktrace_t	*xfs_dir_trace_buf;
+
+/*
+ * Trace record types.
+ */
+#define	XFS_DIR_KTRACE_G_DU	1	/* dp, uio */
+#define	XFS_DIR_KTRACE_G_DUB	2	/* dp, uio, bno */
+#define	XFS_DIR_KTRACE_G_DUN	3	/* dp, uio, node */
+#define	XFS_DIR_KTRACE_G_DUL	4	/* dp, uio, leaf */
+#define	XFS_DIR_KTRACE_G_DUE	5	/* dp, uio, leaf entry */
+#define	XFS_DIR_KTRACE_G_DUC	6	/* dp, uio, cookie */
+
+void xfs_dir_trace_g_du(char *where, struct xfs_inode *dp, struct uio *uio);
+void xfs_dir_trace_g_dub(char *where, struct xfs_inode *dp, struct uio *uio,
+			      xfs_dablk_t bno);
+void xfs_dir_trace_g_dun(char *where, struct xfs_inode *dp, struct uio *uio,
+			      struct xfs_da_intnode *node);
+void xfs_dir_trace_g_dul(char *where, struct xfs_inode *dp, struct uio *uio,
+			      struct xfs_dir_leafblock *leaf);
+void xfs_dir_trace_g_due(char *where, struct xfs_inode *dp, struct uio *uio,
+			      struct xfs_dir_leaf_entry *entry);
+void xfs_dir_trace_g_duc(char *where, struct xfs_inode *dp, struct uio *uio,
+			      xfs_off_t cookie);
+void xfs_dir_trace_enter(int type, char *where,
+			     void *a0, void *a1, void *a2, void *a3,
+			     void *a4, void *a5, void *a6, void *a7,
+			     void *a8, void *a9, void *a10, void *a11);
+#else
+#define	xfs_dir_trace_g_du(w,d,u)
+#define	xfs_dir_trace_g_dub(w,d,u,b)
+#define	xfs_dir_trace_g_dun(w,d,u,n)
+#define	xfs_dir_trace_g_dul(w,d,u,l)
+#define	xfs_dir_trace_g_due(w,d,u,e)
+#define	xfs_dir_trace_g_duc(w,d,u,c)
+#endif /* DEBUG */
+
+#endif	/* __XFS_DIR_SF_H__ */
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
new file mode 100644
index 0000000..55ae3e6
--- /dev/null
+++ b/fs/xfs/xfs_dmapi.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_DMAPI_H__
+#define __XFS_DMAPI_H__
+
+/*	Values used to define the on-disk version of dm_attrname_t. All
+ *	on-disk attribute names start with the 8-byte string "SGI_DMI_".
+ *
+ *      In the on-disk inode, DMAPI attribute names consist of the user-provided
+ *      name with the DMATTR_PREFIXSTRING pre-pended.  This string must NEVER be
+ *      changed.
+ */
+
+#define DMATTR_PREFIXLEN	8
+#define DMATTR_PREFIXSTRING	"SGI_DMI_"
+
+typedef enum {
+	DM_EVENT_INVALID	= -1,
+	DM_EVENT_CANCEL		= 0,		/* not supported */
+	DM_EVENT_MOUNT		= 1,
+	DM_EVENT_PREUNMOUNT	= 2,
+	DM_EVENT_UNMOUNT	= 3,
+	DM_EVENT_DEBUT		= 4,		/* not supported */
+	DM_EVENT_CREATE		= 5,
+	DM_EVENT_CLOSE		= 6,		/* not supported */
+	DM_EVENT_POSTCREATE	= 7,
+	DM_EVENT_REMOVE		= 8,
+	DM_EVENT_POSTREMOVE	= 9,
+	DM_EVENT_RENAME		= 10,
+	DM_EVENT_POSTRENAME	= 11,
+	DM_EVENT_LINK		= 12,
+	DM_EVENT_POSTLINK	= 13,
+	DM_EVENT_SYMLINK	= 14,
+	DM_EVENT_POSTSYMLINK	= 15,
+	DM_EVENT_READ		= 16,
+	DM_EVENT_WRITE		= 17,
+	DM_EVENT_TRUNCATE	= 18,
+	DM_EVENT_ATTRIBUTE	= 19,
+	DM_EVENT_DESTROY	= 20,
+	DM_EVENT_NOSPACE	= 21,
+	DM_EVENT_USER		= 22,
+	DM_EVENT_MAX		= 23
+} dm_eventtype_t;
+#define HAVE_DM_EVENTTYPE_T
+
+typedef enum {
+	DM_RIGHT_NULL,
+	DM_RIGHT_SHARED,
+	DM_RIGHT_EXCL
+} dm_right_t;
+#define HAVE_DM_RIGHT_T
+
+/* Defines for determining if an event message should be sent. */
+#define	DM_EVENT_ENABLED(vfsp, ip, event) ( \
+	unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
+		( ((ip)->i_d.di_dmevmask & (1 << event)) || \
+		  ((ip)->i_mount->m_dmevmask & (1 << event)) ) \
+	)
+
+#define	DM_EVENT_ENABLED_IO(vfsp, io, event) ( \
+	unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
+		( ((io)->io_dmevmask & (1 << event)) || \
+		  ((io)->io_mount->m_dmevmask & (1 << event)) ) \
+	)
+
+#define DM_XFS_VALID_FS_EVENTS		( \
+	(1 << DM_EVENT_PREUNMOUNT)	| \
+	(1 << DM_EVENT_UNMOUNT)		| \
+	(1 << DM_EVENT_NOSPACE)		| \
+	(1 << DM_EVENT_DEBUT)		| \
+	(1 << DM_EVENT_CREATE)		| \
+	(1 << DM_EVENT_POSTCREATE)	| \
+	(1 << DM_EVENT_REMOVE)		| \
+	(1 << DM_EVENT_POSTREMOVE)	| \
+	(1 << DM_EVENT_RENAME)		| \
+	(1 << DM_EVENT_POSTRENAME)	| \
+	(1 << DM_EVENT_LINK)		| \
+	(1 << DM_EVENT_POSTLINK)	| \
+	(1 << DM_EVENT_SYMLINK)		| \
+	(1 << DM_EVENT_POSTSYMLINK)	| \
+	(1 << DM_EVENT_ATTRIBUTE)	| \
+	(1 << DM_EVENT_DESTROY)		)
+
+/* Events valid in dm_set_eventlist() when called with a file handle for
+   a regular file or a symlink.  These events are persistent.
+*/
+
+#define	DM_XFS_VALID_FILE_EVENTS	( \
+	(1 << DM_EVENT_ATTRIBUTE)	| \
+	(1 << DM_EVENT_DESTROY)		)
+
+/* Events valid in dm_set_eventlist() when called with a file handle for
+   a directory.  These events are persistent.
+*/
+
+#define	DM_XFS_VALID_DIRECTORY_EVENTS	( \
+	(1 << DM_EVENT_CREATE)		| \
+	(1 << DM_EVENT_POSTCREATE)	| \
+	(1 << DM_EVENT_REMOVE)		| \
+	(1 << DM_EVENT_POSTREMOVE)	| \
+	(1 << DM_EVENT_RENAME)		| \
+	(1 << DM_EVENT_POSTRENAME)	| \
+	(1 << DM_EVENT_LINK)		| \
+	(1 << DM_EVENT_POSTLINK)	| \
+	(1 << DM_EVENT_SYMLINK)		| \
+	(1 << DM_EVENT_POSTSYMLINK)	| \
+	(1 << DM_EVENT_ATTRIBUTE)	| \
+	(1 << DM_EVENT_DESTROY)		)
+
+/* Events supported by the XFS filesystem. */
+#define	DM_XFS_SUPPORTED_EVENTS		( \
+	(1 << DM_EVENT_MOUNT)		| \
+	(1 << DM_EVENT_PREUNMOUNT)	| \
+	(1 << DM_EVENT_UNMOUNT)		| \
+	(1 << DM_EVENT_NOSPACE)		| \
+	(1 << DM_EVENT_CREATE)		| \
+	(1 << DM_EVENT_POSTCREATE)	| \
+	(1 << DM_EVENT_REMOVE)		| \
+	(1 << DM_EVENT_POSTREMOVE)	| \
+	(1 << DM_EVENT_RENAME)		| \
+	(1 << DM_EVENT_POSTRENAME)	| \
+	(1 << DM_EVENT_LINK)		| \
+	(1 << DM_EVENT_POSTLINK)	| \
+	(1 << DM_EVENT_SYMLINK)		| \
+	(1 << DM_EVENT_POSTSYMLINK)	| \
+	(1 << DM_EVENT_READ)		| \
+	(1 << DM_EVENT_WRITE)		| \
+	(1 << DM_EVENT_TRUNCATE)	| \
+	(1 << DM_EVENT_ATTRIBUTE)	| \
+	(1 << DM_EVENT_DESTROY)		)
+
+
+/*
+ *	Definitions used for the flags field on dm_send_*_event().
+ */
+
+#define DM_FLAGS_NDELAY		0x001	/* return EAGAIN after dm_pending() */
+#define DM_FLAGS_UNWANTED	0x002	/* event not in fsys dm_eventset_t */
+#define DM_FLAGS_ISEM		0x004	/* thread holds i_sem */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,21)
+/* i_alloc_sem was added in 2.4.22-pre1 */
+#define DM_FLAGS_IALLOCSEM_RD	0x010	/* thread holds i_alloc_sem rd */
+#define DM_FLAGS_IALLOCSEM_WR	0x020	/* thread holds i_alloc_sem wr */
+#endif
+#endif
+
+/*
+ *	Based on IO_ISDIRECT, decide which i_ flag is set.
+ */
+#ifdef DM_FLAGS_IALLOCSEM_RD
+#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
+			      DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_ISEM)
+#define DM_SEM_FLAG_WR	(DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
+#else
+#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
+			      0 : DM_FLAGS_ISEM)
+#define DM_SEM_FLAG_WR	(DM_FLAGS_ISEM)
+#endif
+
+/*
+ *	Macros to turn caller specified delay/block flags into
+ *	dm_send_xxxx_event flag DM_FLAGS_NDELAY.
+ */
+
+#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
+			DM_FLAGS_NDELAY : 0)
+#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
+
+
+extern struct bhv_vfsops xfs_dmops;
+
+#ifdef CONFIG_XFS_DMAPI
+void xfs_dm_init(struct file_system_type *);
+void xfs_dm_exit(struct file_system_type *);
+#define XFS_DM_INIT(fstype)	xfs_dm_init(fstype)
+#define XFS_DM_EXIT(fstype)	xfs_dm_exit(fstype)
+#else
+#define XFS_DM_INIT(fstype)
+#define XFS_DM_EXIT(fstype)
+#endif
+
+#endif  /* __XFS_DMAPI_H__ */
diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c
new file mode 100644
index 0000000..cec54ba
--- /dev/null
+++ b/fs/xfs/xfs_dmops.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+
+xfs_dmops_t	xfs_dmcore_stub = {
+	.xfs_send_data		= (xfs_send_data_t)fs_nosys,
+	.xfs_send_mmap		= (xfs_send_mmap_t)fs_noerr,
+	.xfs_send_destroy	= (xfs_send_destroy_t)fs_nosys,
+	.xfs_send_namesp	= (xfs_send_namesp_t)fs_nosys,
+	.xfs_send_unmount	= (xfs_send_unmount_t)fs_noval,
+};
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
new file mode 100644
index 0000000..bbe1dea
--- /dev/null
+++ b/fs/xfs/xfs_error.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_sb.h"
+#include "xfs_trans.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_utils.h"
+#include "xfs_error.h"
+
+#ifdef DEBUG
+
+int	xfs_etrap[XFS_ERROR_NTRAP] = {
+	0,
+};
+
+int
+xfs_error_trap(int e)
+{
+	int i;
+
+	if (!e)
+		return 0;
+	for (i = 0; i < XFS_ERROR_NTRAP; i++) {
+		if (xfs_etrap[i] == 0)
+			break;
+		if (e != xfs_etrap[i])
+			continue;
+		cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
+		debug_stop_all_cpus((void *)-1LL);
+		BUG();
+		break;
+	}
+	return e;
+}
+#endif
+
+#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
+
+int	xfs_etest[XFS_NUM_INJECT_ERROR];
+int64_t	xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
+char *	xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
+
+void
+xfs_error_test_init(void)
+{
+	memset(xfs_etest, 0, sizeof(xfs_etest));
+	memset(xfs_etest_fsid, 0, sizeof(xfs_etest_fsid));
+	memset(xfs_etest_fsname, 0, sizeof(xfs_etest_fsname));
+}
+
+int
+xfs_error_test(int error_tag, int *fsidp, char *expression,
+	       int line, char *file, unsigned long randfactor)
+{
+	int i;
+	int64_t fsid;
+
+	if (random() % randfactor)
+		return 0;
+
+	memcpy(&fsid, fsidp, sizeof(xfs_fsid_t));
+
+	for (i = 0; i < XFS_NUM_INJECT_ERROR; i++)  {
+		if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) {
+			cmn_err(CE_WARN,
+	"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
+				expression, file, line, xfs_etest_fsname[i]);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int
+xfs_errortag_add(int error_tag, xfs_mount_t *mp)
+{
+	int i;
+	int len;
+	int64_t fsid;
+
+	memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
+
+	for (i = 0; i < XFS_NUM_INJECT_ERROR; i++)  {
+		if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
+			cmn_err(CE_WARN, "XFS error tag #%d on", error_tag);
+			return 0;
+		}
+	}
+
+	for (i = 0; i < XFS_NUM_INJECT_ERROR; i++)  {
+		if (xfs_etest[i] == 0) {
+			cmn_err(CE_WARN, "Turned on XFS error tag #%d",
+				error_tag);
+			xfs_etest[i] = error_tag;
+			xfs_etest_fsid[i] = fsid;
+			len = strlen(mp->m_fsname);
+			xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
+			strcpy(xfs_etest_fsname[i], mp->m_fsname);
+			return 0;
+		}
+	}
+
+	cmn_err(CE_WARN, "error tag overflow, too many turned on");
+
+	return 1;
+}
+
+int
+xfs_errortag_clear(int error_tag, xfs_mount_t *mp)
+{
+	int i;
+	int64_t fsid;
+
+	memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
+
+	for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
+		if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
+			xfs_etest[i] = 0;
+			xfs_etest_fsid[i] = 0LL;
+			kmem_free(xfs_etest_fsname[i],
+				  strlen(xfs_etest_fsname[i]) + 1);
+			xfs_etest_fsname[i] = NULL;
+			cmn_err(CE_WARN, "Cleared XFS error tag #%d",
+				error_tag);
+			return 0;
+		}
+	}
+
+	cmn_err(CE_WARN, "XFS error tag %d not on", error_tag);
+
+	return 1;
+}
+
+int
+xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud)
+{
+	int i;
+	int cleared = 0;
+
+	for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
+		if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) &&
+		     xfs_etest[i] != 0) {
+			cleared = 1;
+			cmn_err(CE_WARN, "Clearing XFS error tag #%d",
+				xfs_etest[i]);
+			xfs_etest[i] = 0;
+			xfs_etest_fsid[i] = 0LL;
+			kmem_free(xfs_etest_fsname[i],
+				  strlen(xfs_etest_fsname[i]) + 1);
+			xfs_etest_fsname[i] = NULL;
+		}
+	}
+
+	if (loud || cleared)
+		cmn_err(CE_WARN,
+			"Cleared all XFS error tags for filesystem \"%s\"",
+			fsname);
+
+	return 0;
+}
+
+int
+xfs_errortag_clearall(xfs_mount_t *mp)
+{
+	int64_t fsid;
+
+	memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
+
+	return xfs_errortag_clearall_umount(fsid, mp->m_fsname, 1);
+}
+#endif /* DEBUG || INDUCE_IO_ERROR */
+
+static void
+xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap)
+{
+	if (mp != NULL) {
+		char	*newfmt;
+		int	len = 16 + mp->m_fsname_len + strlen(fmt);
+
+		newfmt = kmem_alloc(len, KM_SLEEP);
+		sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt);
+		icmn_err(level, newfmt, ap);
+		kmem_free(newfmt, len);
+	} else {
+		icmn_err(level, fmt, ap);
+	}
+}
+
+void
+xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	xfs_fs_vcmn_err(level, mp, fmt, ap);
+	va_end(ap);
+}
+
+void
+xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
+{
+	va_list ap;
+
+#ifdef DEBUG
+	xfs_panic_mask |= XFS_PTAG_SHUTDOWN_CORRUPT;
+#endif
+
+	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)
+	    && (level & CE_ALERT)) {
+		level &= ~CE_ALERT;
+		level |= CE_PANIC;
+		cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG.");
+	}
+	va_start(ap, fmt);
+	xfs_fs_vcmn_err(level, mp, fmt, ap);
+	va_end(ap);
+}
+
+void
+xfs_error_report(
+	char		*tag,
+	int		level,
+	xfs_mount_t	*mp,
+	char		*fname,
+	int		linenum,
+	inst_t		*ra)
+{
+	if (level <= xfs_error_level) {
+		xfs_cmn_err(XFS_PTAG_ERROR_REPORT,
+			    CE_ALERT, mp,
+		"XFS internal error %s at line %d of file %s.  Caller 0x%p\n",
+			    tag, linenum, fname, ra);
+
+		xfs_stack_trace();
+	}
+}
+
+void
+xfs_hex_dump(void *p, int length)
+{
+	__uint8_t *uip = (__uint8_t*)p;
+	int	i;
+	char	sbuf[128], *s;
+
+	s = sbuf;
+	*s = '\0';
+	for (i=0; i<length; i++, uip++) {
+		if ((i % 16) == 0) {
+			if (*s != '\0')
+				cmn_err(CE_ALERT, "%s\n", sbuf);
+			s = sbuf;
+			sprintf(s, "0x%x: ", i);
+			while( *s != '\0')
+				s++;
+		}
+		sprintf(s, "%02x ", *uip);
+
+		/*
+		 * the kernel sprintf is a void; user sprintf returns
+		 * the sprintf'ed string's length.  Find the new end-
+		 * of-string
+		 */
+		while( *s != '\0')
+			s++;
+	}
+	cmn_err(CE_ALERT, "%s\n", sbuf);
+}
+
+void
+xfs_corruption_error(
+	char		*tag,
+	int		level,
+	xfs_mount_t	*mp,
+	void		*p,
+	char		*fname,
+	int		linenum,
+	inst_t		*ra)
+{
+	if (level <= xfs_error_level)
+		xfs_hex_dump(p, 16);
+	xfs_error_report(tag, level, mp, fname, linenum, ra);
+}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
new file mode 100644
index 0000000..6bc0535
--- /dev/null
+++ b/fs/xfs/xfs_error.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_ERROR_H__
+#define	__XFS_ERROR_H__
+
+#define prdev(fmt,targ,args...) \
+	printk("XFS: device %s- " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
+
+#define XFS_ERECOVER	1	/* Failure to recover log */
+#define XFS_ELOGSTAT	2	/* Failure to stat log in user space */
+#define XFS_ENOLOGSPACE	3	/* Reservation too large */
+#define XFS_ENOTSUP	4	/* Operation not supported */
+#define	XFS_ENOLSN	5	/* Can't find the lsn you asked for */
+#define XFS_ENOTFOUND	6
+#define XFS_ENOTXFS	7	/* Not XFS filesystem */
+
+#ifdef DEBUG
+#define	XFS_ERROR_NTRAP	10
+extern int	xfs_etrap[XFS_ERROR_NTRAP];
+extern int	xfs_error_trap(int);
+#define	XFS_ERROR(e)	xfs_error_trap(e)
+#else
+#define	XFS_ERROR(e)	(e)
+#endif
+
+struct xfs_mount;
+
+extern void
+xfs_error_report(
+	char		*tag,
+	int		level,
+	struct xfs_mount *mp,
+	char		*fname,
+	int		linenum,
+	inst_t		*ra);
+
+extern void
+xfs_corruption_error(
+	char		*tag,
+	int		level,
+	struct xfs_mount *mp,
+	void		*p,
+	char		*fname,
+	int		linenum,
+	inst_t		*ra);
+
+extern void
+xfs_hex_dump(void *p, int length);
+
+#define	XFS_ERROR_REPORT(e, lvl, mp)	\
+	xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
+#define	XFS_CORRUPTION_ERROR(e, lvl, mp, mem)	\
+	xfs_corruption_error(e, lvl, mp, mem, \
+			     __FILE__, __LINE__, __return_address)
+
+#define XFS_ERRLEVEL_OFF	0
+#define XFS_ERRLEVEL_LOW	1
+#define XFS_ERRLEVEL_HIGH	5
+
+/*
+ * error injection tags - the labels can be anything you want
+ * but each tag should have its own unique number
+ */
+
+#define XFS_ERRTAG_NOERROR				0
+#define XFS_ERRTAG_IFLUSH_1				1
+#define XFS_ERRTAG_IFLUSH_2				2
+#define XFS_ERRTAG_IFLUSH_3				3
+#define XFS_ERRTAG_IFLUSH_4				4
+#define XFS_ERRTAG_IFLUSH_5				5
+#define XFS_ERRTAG_IFLUSH_6				6
+#define	XFS_ERRTAG_DA_READ_BUF				7
+#define	XFS_ERRTAG_BTREE_CHECK_LBLOCK			8
+#define	XFS_ERRTAG_BTREE_CHECK_SBLOCK			9
+#define	XFS_ERRTAG_ALLOC_READ_AGF			10
+#define	XFS_ERRTAG_IALLOC_READ_AGI			11
+#define	XFS_ERRTAG_ITOBP_INOTOBP			12
+#define	XFS_ERRTAG_IUNLINK				13
+#define	XFS_ERRTAG_IUNLINK_REMOVE			14
+#define	XFS_ERRTAG_DIR_INO_VALIDATE			15
+#define XFS_ERRTAG_BULKSTAT_READ_CHUNK			16
+#define XFS_ERRTAG_IODONE_IOERR				17
+#define XFS_ERRTAG_STRATREAD_IOERR			18
+#define XFS_ERRTAG_STRATCMPL_IOERR			19
+#define XFS_ERRTAG_DIOWRITE_IOERR			20
+#define XFS_ERRTAG_BMAPIFORMAT				21
+#define XFS_ERRTAG_MAX					22
+
+/*
+ * Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
+ */
+#define XFS_RANDOM_DEFAULT				100
+#define XFS_RANDOM_IFLUSH_1				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_2				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_3				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_4				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_5				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_6				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_DA_READ_BUF				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_BTREE_CHECK_LBLOCK			(XFS_RANDOM_DEFAULT/4)
+#define XFS_RANDOM_BTREE_CHECK_SBLOCK			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_ALLOC_READ_AGF			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IALLOC_READ_AGI			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_ITOBP_INOTOBP			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IUNLINK				XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IUNLINK_REMOVE			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_DIR_INO_VALIDATE			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_BULKSTAT_READ_CHUNK			XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IODONE_IOERR				(XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_STRATREAD_IOERR			(XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_STRATCMPL_IOERR			(XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_DIOWRITE_IOERR			(XFS_RANDOM_DEFAULT/10)
+#define	XFS_RANDOM_BMAPIFORMAT				XFS_RANDOM_DEFAULT
+
+#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
+extern int	xfs_error_test(int, int *, char *, int, char *, unsigned long);
+void xfs_error_test_init(void);
+
+#define	XFS_NUM_INJECT_ERROR				10
+
+#ifdef __ANSI_CPP__
+#define XFS_TEST_ERROR(expr, mp, tag, rf)		\
+	((expr) || \
+	 xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \
+			 (rf)))
+#else
+#define XFS_TEST_ERROR(expr, mp, tag, rf)		\
+	((expr) || \
+	 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
+			(rf)))
+#endif /* __ANSI_CPP__ */
+
+int		xfs_errortag_add(int error_tag, xfs_mount_t *mp);
+int		xfs_errortag_clear(int error_tag, xfs_mount_t *mp);
+
+int		xfs_errortag_clearall(xfs_mount_t *mp);
+int		xfs_errortag_clearall_umount(int64_t fsid, char *fsname,
+						int loud);
+#else
+#define XFS_TEST_ERROR(expr, mp, tag, rf)	(expr)
+#define xfs_errortag_add(tag, mp)		(ENOSYS)
+#define xfs_errortag_clearall(mp)		(ENOSYS)
+#endif /* (DEBUG || INDUCE_IO_ERROR) */
+
+/*
+ * XFS panic tags -- allow a call to xfs_cmn_err() be turned into
+ *			a panic by setting xfs_panic_mask in a
+ *			sysctl.  update xfs_max[XFS_PARAM] if
+ *			more are added.
+ */
+#define		XFS_NO_PTAG			0
+#define		XFS_PTAG_IFLUSH			0x00000001
+#define		XFS_PTAG_LOGRES			0x00000002
+#define		XFS_PTAG_AILDELETE		0x00000004
+#define		XFS_PTAG_ERROR_REPORT		0x00000008
+#define		XFS_PTAG_SHUTDOWN_CORRUPT	0x00000010
+#define		XFS_PTAG_SHUTDOWN_IOERROR	0x00000020
+#define		XFS_PTAG_SHUTDOWN_LOGERROR	0x00000040
+
+struct xfs_mount;
+/* PRINTFLIKE4 */
+void		xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
+			    char *fmt, ...);
+/* PRINTFLIKE3 */
+void		xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
+
+#endif	/* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
new file mode 100644
index 0000000..5eafd5b
--- /dev/null
+++ b/fs/xfs/xfs_extfree_item.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains the implementation of the xfs_efi_log_item
+ * and xfs_efd_log_item items.
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_extfree_item.h"
+
+
+kmem_zone_t	*xfs_efi_zone;
+kmem_zone_t	*xfs_efd_zone;
+
+STATIC void	xfs_efi_item_unlock(xfs_efi_log_item_t *);
+STATIC void	xfs_efi_item_abort(xfs_efi_log_item_t *);
+STATIC void	xfs_efd_item_abort(xfs_efd_log_item_t *);
+
+
+
+/*
+ * This returns the number of iovecs needed to log the given efi item.
+ * We only need 1 iovec for an efi item.  It just logs the efi_log_format
+ * structure.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_efi_item_size(xfs_efi_log_item_t *efip)
+{
+	return 1;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given efi log item. We use only 1 iovec, and we point that
+ * at the efi_log_format structure embedded in the efi item.
+ * It is at this point that we assert that all of the extent
+ * slots in the efi item have been filled.
+ */
+STATIC void
+xfs_efi_item_format(xfs_efi_log_item_t	*efip,
+		    xfs_log_iovec_t	*log_vector)
+{
+	uint	size;
+
+	ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
+
+	efip->efi_format.efi_type = XFS_LI_EFI;
+
+	size = sizeof(xfs_efi_log_format_t);
+	size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
+	efip->efi_format.efi_size = 1;
+
+	log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
+	log_vector->i_len = size;
+	ASSERT(size >= sizeof(xfs_efi_log_format_t));
+}
+
+
+/*
+ * Pinning has no meaning for an efi item, so just return.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efi_item_pin(xfs_efi_log_item_t *efip)
+{
+	return;
+}
+
+
+/*
+ * While EFIs cannot really be pinned, the unpin operation is the
+ * last place at which the EFI is manipulated during a transaction.
+ * Here we coordinate with xfs_efi_cancel() to determine who gets to
+ * free the EFI.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
+{
+	int		nexts;
+	int		size;
+	xfs_mount_t	*mp;
+	SPLDECL(s);
+
+	mp = efip->efi_item.li_mountp;
+	AIL_LOCK(mp, s);
+	if (efip->efi_flags & XFS_EFI_CANCELED) {
+		/*
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
+
+		nexts = efip->efi_format.efi_nextents;
+		if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+			size = sizeof(xfs_efi_log_item_t);
+			size += (nexts - 1) * sizeof(xfs_extent_t);
+			kmem_free(efip, size);
+		} else {
+			kmem_zone_free(xfs_efi_zone, efip);
+		}
+	} else {
+		efip->efi_flags |= XFS_EFI_COMMITTED;
+		AIL_UNLOCK(mp, s);
+	}
+
+	return;
+}
+
+/*
+ * like unpin only we have to also clear the xaction descriptor
+ * pointing the log item if we free the item.  This routine duplicates
+ * unpin because efi_flags is protected by the AIL lock.  Freeing
+ * the descriptor and then calling unpin would force us to drop the AIL
+ * lock which would open up a race condition.
+ */
+STATIC void
+xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
+{
+	int		nexts;
+	int		size;
+	xfs_mount_t	*mp;
+	xfs_log_item_desc_t	*lidp;
+	SPLDECL(s);
+
+	mp = efip->efi_item.li_mountp;
+	AIL_LOCK(mp, s);
+	if (efip->efi_flags & XFS_EFI_CANCELED) {
+		/*
+		 * free the xaction descriptor pointing to this item
+		 */
+		lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip);
+		xfs_trans_free_item(tp, lidp);
+		/*
+		 * pull the item off the AIL.
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
+		/*
+		 * now free the item itself
+		 */
+		nexts = efip->efi_format.efi_nextents;
+		if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+			size = sizeof(xfs_efi_log_item_t);
+			size += (nexts - 1) * sizeof(xfs_extent_t);
+			kmem_free(efip, size);
+		} else {
+			kmem_zone_free(xfs_efi_zone, efip);
+		}
+	} else {
+		efip->efi_flags |= XFS_EFI_COMMITTED;
+		AIL_UNLOCK(mp, s);
+	}
+
+	return;
+}
+
+/*
+ * Efi items have no locking or pushing.  However, since EFIs are
+ * pulled from the AIL when their corresponding EFDs are committed
+ * to disk, their situation is very similar to being pinned.  Return
+ * XFS_ITEM_PINNED so that the caller will eventually flush the log.
+ * This should help in getting the EFI out of the AIL.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
+{
+	return XFS_ITEM_PINNED;
+}
+
+/*
+ * Efi items have no locking, so just return.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
+{
+	if (efip->efi_item.li_flags & XFS_LI_ABORTED)
+		xfs_efi_item_abort(efip);
+	return;
+}
+
+/*
+ * The EFI is logged only once and cannot be moved in the log, so
+ * simply return the lsn at which it's been logged.  The canceled
+ * flag is not paid any attention here.  Checking for that is delayed
+ * until the EFI is unpinned.
+ */
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
+{
+	return lsn;
+}
+
+/*
+ * This is called when the transaction logging the EFI is aborted.
+ * Free up the EFI and return.  No need to clean up the slot for
+ * the item in the transaction.  That was done by the unpin code
+ * which is called prior to this routine in the abort/fs-shutdown path.
+ */
+STATIC void
+xfs_efi_item_abort(xfs_efi_log_item_t *efip)
+{
+	int	nexts;
+	int	size;
+
+	nexts = efip->efi_format.efi_nextents;
+	if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+		size = sizeof(xfs_efi_log_item_t);
+		size += (nexts - 1) * sizeof(xfs_extent_t);
+		kmem_free(efip, size);
+	} else {
+		kmem_zone_free(xfs_efi_zone, efip);
+	}
+	return;
+}
+
+/*
+ * There isn't much you can do to push on an efi item.  It is simply
+ * stuck waiting for all of its corresponding efd items to be
+ * committed to disk.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efi_item_push(xfs_efi_log_item_t *efip)
+{
+	return;
+}
+
+/*
+ * The EFI dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
+{
+	return;
+}
+
+/*
+ * This is the ops vector shared by all efi log items.
+ */
+struct xfs_item_ops xfs_efi_item_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_efi_item_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_efi_item_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_efi_item_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))xfs_efi_item_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
+					xfs_efi_item_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_efi_item_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_efi_item_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_efi_item_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_efi_item_abort,
+	.iop_pushbuf	= NULL,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_efi_item_committing
+};
+
+
+/*
+ * Allocate and initialize an efi item with the given number of extents.
+ */
+xfs_efi_log_item_t *
+xfs_efi_init(xfs_mount_t	*mp,
+	     uint		nextents)
+
+{
+	xfs_efi_log_item_t	*efip;
+	uint			size;
+
+	ASSERT(nextents > 0);
+	if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
+		size = (uint)(sizeof(xfs_efi_log_item_t) +
+			((nextents - 1) * sizeof(xfs_extent_t)));
+		efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP);
+	} else {
+		efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone,
+							     KM_SLEEP);
+	}
+
+	efip->efi_item.li_type = XFS_LI_EFI;
+	efip->efi_item.li_ops = &xfs_efi_item_ops;
+	efip->efi_item.li_mountp = mp;
+	efip->efi_format.efi_nextents = nextents;
+	efip->efi_format.efi_id = (__psint_t)(void*)efip;
+
+	return (efip);
+}
+
+/*
+ * This is called by the efd item code below to release references to
+ * the given efi item.  Each efd calls this with the number of
+ * extents that it has logged, and when the sum of these reaches
+ * the total number of extents logged by this efi item we can free
+ * the efi item.
+ *
+ * Freeing the efi item requires that we remove it from the AIL.
+ * We'll use the AIL lock to protect our counters as well as
+ * the removal from the AIL.
+ */
+void
+xfs_efi_release(xfs_efi_log_item_t	*efip,
+		uint			nextents)
+{
+	xfs_mount_t	*mp;
+	int		extents_left;
+	uint		size;
+	int		nexts;
+	SPLDECL(s);
+
+	mp = efip->efi_item.li_mountp;
+	ASSERT(efip->efi_next_extent > 0);
+	ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
+
+	AIL_LOCK(mp, s);
+	ASSERT(efip->efi_next_extent >= nextents);
+	efip->efi_next_extent -= nextents;
+	extents_left = efip->efi_next_extent;
+	if (extents_left == 0) {
+		/*
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
+	} else {
+		AIL_UNLOCK(mp, s);
+	}
+
+	if (extents_left == 0) {
+		nexts = efip->efi_format.efi_nextents;
+		if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+			size = sizeof(xfs_efi_log_item_t);
+			size += (nexts - 1) * sizeof(xfs_extent_t);
+			kmem_free(efip, size);
+		} else {
+			kmem_zone_free(xfs_efi_zone, efip);
+		}
+	}
+}
+
+/*
+ * This is called when the transaction that should be committing the
+ * EFD corresponding to the given EFI is aborted.  The committed and
+ * canceled flags are used to coordinate the freeing of the EFI and
+ * the references by the transaction that committed it.
+ */
+STATIC void
+xfs_efi_cancel(
+	xfs_efi_log_item_t	*efip)
+{
+	int		nexts;
+	int		size;
+	xfs_mount_t	*mp;
+	SPLDECL(s);
+
+	mp = efip->efi_item.li_mountp;
+	AIL_LOCK(mp, s);
+	if (efip->efi_flags & XFS_EFI_COMMITTED) {
+		/*
+		 * xfs_trans_delete_ail() drops the AIL lock.
+		 */
+		xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
+
+		nexts = efip->efi_format.efi_nextents;
+		if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+			size = sizeof(xfs_efi_log_item_t);
+			size += (nexts - 1) * sizeof(xfs_extent_t);
+			kmem_free(efip, size);
+		} else {
+			kmem_zone_free(xfs_efi_zone, efip);
+		}
+	} else {
+		efip->efi_flags |= XFS_EFI_CANCELED;
+		AIL_UNLOCK(mp, s);
+	}
+
+	return;
+}
+
+
+
+
+
+/*
+ * This returns the number of iovecs needed to log the given efd item.
+ * We only need 1 iovec for an efd item.  It just logs the efd_log_format
+ * structure.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_efd_item_size(xfs_efd_log_item_t *efdp)
+{
+	return 1;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given efd log item. We use only 1 iovec, and we point that
+ * at the efd_log_format structure embedded in the efd item.
+ * It is at this point that we assert that all of the extent
+ * slots in the efd item have been filled.
+ */
+STATIC void
+xfs_efd_item_format(xfs_efd_log_item_t	*efdp,
+		    xfs_log_iovec_t	*log_vector)
+{
+	uint	size;
+
+	ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
+
+	efdp->efd_format.efd_type = XFS_LI_EFD;
+
+	size = sizeof(xfs_efd_log_format_t);
+	size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
+	efdp->efd_format.efd_size = 1;
+
+	log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
+	log_vector->i_len = size;
+	ASSERT(size >= sizeof(xfs_efd_log_format_t));
+}
+
+
+/*
+ * Pinning has no meaning for an efd item, so just return.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
+{
+	return;
+}
+
+
+/*
+ * Since pinning has no meaning for an efd item, unpinning does
+ * not either.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_unpin(xfs_efd_log_item_t *efdp, int stale)
+{
+	return;
+}
+
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp)
+{
+	return;
+}
+
+/*
+ * Efd items have no locking, so just return success.
+ */
+/*ARGSUSED*/
+STATIC uint
+xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
+{
+	return XFS_ITEM_LOCKED;
+}
+
+/*
+ * Efd items have no locking or pushing, so return failure
+ * so that the caller doesn't bother with us.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
+{
+	if (efdp->efd_item.li_flags & XFS_LI_ABORTED)
+		xfs_efd_item_abort(efdp);
+	return;
+}
+
+/*
+ * When the efd item is committed to disk, all we need to do
+ * is delete our reference to our partner efi item and then
+ * free ourselves.  Since we're freeing ourselves we must
+ * return -1 to keep the transaction code from further referencing
+ * this item.
+ */
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
+{
+	uint	size;
+	int	nexts;
+
+	/*
+	 * If we got a log I/O error, it's always the case that the LR with the
+	 * EFI got unpinned and freed before the EFD got aborted.
+	 */
+	if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
+		xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
+
+	nexts = efdp->efd_format.efd_nextents;
+	if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
+		size = sizeof(xfs_efd_log_item_t);
+		size += (nexts - 1) * sizeof(xfs_extent_t);
+		kmem_free(efdp, size);
+	} else {
+		kmem_zone_free(xfs_efd_zone, efdp);
+	}
+
+	return (xfs_lsn_t)-1;
+}
+
+/*
+ * The transaction of which this EFD is a part has been aborted.
+ * Inform its companion EFI of this fact and then clean up after
+ * ourselves.  No need to clean up the slot for the item in the
+ * transaction.  That was done by the unpin code which is called
+ * prior to this routine in the abort/fs-shutdown path.
+ */
+STATIC void
+xfs_efd_item_abort(xfs_efd_log_item_t *efdp)
+{
+	int	nexts;
+	int	size;
+
+	/*
+	 * If we got a log I/O error, it's always the case that the LR with the
+	 * EFI got unpinned and freed before the EFD got aborted. So don't
+	 * reference the EFI at all in that case.
+	 */
+	if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
+		xfs_efi_cancel(efdp->efd_efip);
+
+	nexts = efdp->efd_format.efd_nextents;
+	if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
+		size = sizeof(xfs_efd_log_item_t);
+		size += (nexts - 1) * sizeof(xfs_extent_t);
+		kmem_free(efdp, size);
+	} else {
+		kmem_zone_free(xfs_efd_zone, efdp);
+	}
+	return;
+}
+
+/*
+ * There isn't much you can do to push on an efd item.  It is simply
+ * stuck waiting for the log to be flushed to disk.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_push(xfs_efd_log_item_t *efdp)
+{
+	return;
+}
+
+/*
+ * The EFD dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+/*ARGSUSED*/
+STATIC void
+xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn)
+{
+	return;
+}
+
+/*
+ * This is the ops vector shared by all efd log items.
+ */
+struct xfs_item_ops xfs_efd_item_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_efd_item_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_efd_item_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_efd_item_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))xfs_efd_item_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
+					xfs_efd_item_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_efd_item_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_efd_item_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_efd_item_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_efd_item_abort,
+	.iop_pushbuf	= NULL,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_efd_item_committing
+};
+
+
+/*
+ * Allocate and initialize an efd item with the given number of extents.
+ */
+xfs_efd_log_item_t *
+xfs_efd_init(xfs_mount_t	*mp,
+	     xfs_efi_log_item_t	*efip,
+	     uint		nextents)
+
+{
+	xfs_efd_log_item_t	*efdp;
+	uint			size;
+
+	ASSERT(nextents > 0);
+	if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
+		size = (uint)(sizeof(xfs_efd_log_item_t) +
+			((nextents - 1) * sizeof(xfs_extent_t)));
+		efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP);
+	} else {
+		efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone,
+							     KM_SLEEP);
+	}
+
+	efdp->efd_item.li_type = XFS_LI_EFD;
+	efdp->efd_item.li_ops = &xfs_efd_item_ops;
+	efdp->efd_item.li_mountp = mp;
+	efdp->efd_efip = efip;
+	efdp->efd_format.efd_nextents = nextents;
+	efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
+
+	return (efdp);
+}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
new file mode 100644
index 0000000..7122d61
--- /dev/null
+++ b/fs/xfs/xfs_extfree_item.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_EXTFREE_ITEM_H__
+#define	__XFS_EXTFREE_ITEM_H__
+
+struct xfs_mount;
+struct kmem_zone;
+
+typedef struct xfs_extent {
+	xfs_dfsbno_t	ext_start;
+	xfs_extlen_t	ext_len;
+} xfs_extent_t;
+
+/*
+ * This is the structure used to lay out an efi log item in the
+ * log.  The efi_extents field is a variable size array whose
+ * size is given by efi_nextents.
+ */
+typedef struct xfs_efi_log_format {
+	unsigned short		efi_type;	/* efi log item type */
+	unsigned short		efi_size;	/* size of this item */
+	uint			efi_nextents;	/* # extents to free */
+	__uint64_t		efi_id;		/* efi identifier */
+	xfs_extent_t		efi_extents[1];	/* array of extents to free */
+} xfs_efi_log_format_t;
+
+/*
+ * This is the structure used to lay out an efd log item in the
+ * log.  The efd_extents array is a variable size array whose
+ * size is given by efd_nextents;
+ */
+typedef struct xfs_efd_log_format {
+	unsigned short		efd_type;	/* efd log item type */
+	unsigned short		efd_size;	/* size of this item */
+	uint			efd_nextents;	/* # of extents freed */
+	__uint64_t		efd_efi_id;	/* id of corresponding efi */
+	xfs_extent_t		efd_extents[1];	/* array of extents freed */
+} xfs_efd_log_format_t;
+
+
+#ifdef __KERNEL__
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define	XFS_EFI_MAX_FAST_EXTENTS	16
+
+/*
+ * Define EFI flags.
+ */
+#define	XFS_EFI_RECOVERED	0x1
+#define	XFS_EFI_COMMITTED	0x2
+#define	XFS_EFI_CANCELED	0x4
+
+/*
+ * This is the "extent free intention" log item.  It is used
+ * to log the fact that some extents need to be free.  It is
+ * used in conjunction with the "extent free done" log item
+ * described below.
+ */
+typedef struct xfs_efi_log_item {
+	xfs_log_item_t		efi_item;
+	uint			efi_flags;	/* misc flags */
+	uint			efi_next_extent;
+	xfs_efi_log_format_t	efi_format;
+} xfs_efi_log_item_t;
+
+/*
+ * This is the "extent free done" log item.  It is used to log
+ * the fact that some extents earlier mentioned in an efi item
+ * have been freed.
+ */
+typedef struct xfs_efd_log_item {
+	xfs_log_item_t		efd_item;
+	xfs_efi_log_item_t	*efd_efip;
+	uint			efd_next_extent;
+	xfs_efd_log_format_t	efd_format;
+} xfs_efd_log_item_t;
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define	XFS_EFD_MAX_FAST_EXTENTS	16
+
+extern struct kmem_zone	*xfs_efi_zone;
+extern struct kmem_zone	*xfs_efd_zone;
+
+xfs_efi_log_item_t	*xfs_efi_init(struct xfs_mount *, uint);
+xfs_efd_log_item_t	*xfs_efd_init(struct xfs_mount *, xfs_efi_log_item_t *,
+				      uint);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
new file mode 100644
index 0000000..6ee8443
--- /dev/null
+++ b/fs/xfs/xfs_fs.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 1995-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_FS_H__
+#define __XFS_FS_H__
+
+/*
+ * SGI's XFS filesystem's major stuff (constants, structures)
+ */
+
+#define XFS_NAME	"xfs"
+
+/*
+ * Direct I/O attribute record used with XFS_IOC_DIOINFO
+ * d_miniosz is the min xfer size, xfer size multiple and file seek offset
+ * alignment.
+ */
+#ifndef HAVE_DIOATTR
+struct dioattr {
+	__u32		d_mem;		/* data buffer memory alignment */
+	__u32		d_miniosz;	/* min xfer size		*/
+	__u32		d_maxiosz;	/* max xfer size		*/
+};
+#endif
+
+/*
+ * Structure for XFS_IOC_FSGETXATTR[A] and XFS_IOC_FSSETXATTR.
+ */
+#ifndef HAVE_FSXATTR
+struct fsxattr {
+	__u32		fsx_xflags;	/* xflags field value (get/set) */
+	__u32		fsx_extsize;	/* extsize field value (get/set)*/
+	__u32		fsx_nextents;	/* nextents field value (get)	*/
+	unsigned char	fsx_pad[16];
+};
+#endif
+
+/*
+ * Flags for the bs_xflags/fsx_xflags field
+ * There should be a one-to-one correspondence between these flags and the
+ * XFS_DIFLAG_s.
+ */
+#define XFS_XFLAG_REALTIME	0x00000001	/* data in realtime volume */
+#define XFS_XFLAG_PREALLOC	0x00000002	/* preallocated file extents */
+#define XFS_XFLAG_IMMUTABLE	0x00000008	/* file cannot be modified */
+#define XFS_XFLAG_APPEND	0x00000010	/* all writes append */
+#define XFS_XFLAG_SYNC		0x00000020	/* all writes synchronous */
+#define XFS_XFLAG_NOATIME	0x00000040	/* do not update access time */
+#define XFS_XFLAG_NODUMP	0x00000080	/* do not include in backups */
+#define XFS_XFLAG_RTINHERIT	0x00000100	/* create with rt bit set */
+#define XFS_XFLAG_PROJINHERIT	0x00000200	/* create with parents projid */
+#define XFS_XFLAG_NOSYMLINKS	0x00000400	/* disallow symlink creation */
+#define XFS_XFLAG_HASATTR	0x80000000	/* no DIFLAG for this	*/
+
+/*
+ * Structure for XFS_IOC_GETBMAP.
+ * On input, fill in bmv_offset and bmv_length of the first structure
+ * to indicate the area of interest in the file, and bmv_entry with the
+ * number of array elements given.  The first structure is updated on
+ * return to give the offset and length for the next call.
+ */
+#ifndef HAVE_GETBMAP
+struct getbmap {
+	__s64		bmv_offset;	/* file offset of segment in blocks */
+	__s64		bmv_block;	/* starting block (64-bit daddr_t)  */
+	__s64		bmv_length;	/* length of segment, blocks	    */
+	__s32		bmv_count;	/* # of entries in array incl. 1st  */
+	__s32		bmv_entries;	/* # of entries filled in (output)  */
+};
+#endif
+
+/*
+ *	Structure for XFS_IOC_GETBMAPX.	 Fields bmv_offset through bmv_entries
+ *	are used exactly as in the getbmap structure.  The getbmapx structure
+ *	has additional bmv_iflags and bmv_oflags fields. The bmv_iflags field
+ *	is only used for the first structure.  It contains input flags
+ *	specifying XFS_IOC_GETBMAPX actions.  The bmv_oflags field is filled
+ *	in by the XFS_IOC_GETBMAPX command for each returned structure after
+ *	the first.
+ */
+#ifndef HAVE_GETBMAPX
+struct getbmapx {
+	__s64		bmv_offset;	/* file offset of segment in blocks */
+	__s64		bmv_block;	/* starting block (64-bit daddr_t)  */
+	__s64		bmv_length;	/* length of segment, blocks	    */
+	__s32		bmv_count;	/* # of entries in array incl. 1st  */
+	__s32		bmv_entries;	/* # of entries filled in (output). */
+	__s32		bmv_iflags;	/* input flags (1st structure)	    */
+	__s32		bmv_oflags;	/* output flags (after 1st structure)*/
+	__s32		bmv_unused1;	/* future use			    */
+	__s32		bmv_unused2;	/* future use			    */
+};
+#endif
+
+/*	bmv_iflags values - set by XFS_IOC_GETBMAPX caller.	*/
+#define BMV_IF_ATTRFORK		0x1	/* return attr fork rather than data */
+#define BMV_IF_NO_DMAPI_READ	0x2	/* Do not generate DMAPI read event  */
+#define BMV_IF_PREALLOC		0x4	/* rtn status BMV_OF_PREALLOC if req */
+#define BMV_IF_VALID	(BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC)
+#ifdef __KERNEL__
+#define BMV_IF_EXTENDED 0x40000000	/* getpmapx if set */
+#endif
+
+/*	bmv_oflags values - returned for for each non-header segment */
+#define BMV_OF_PREALLOC		0x1	/* segment = unwritten pre-allocation */
+
+/*	Convert getbmap <-> getbmapx - move fields from p1 to p2. */
+#define GETBMAP_CONVERT(p1,p2) {	\
+	p2.bmv_offset = p1.bmv_offset;	\
+	p2.bmv_block = p1.bmv_block;	\
+	p2.bmv_length = p1.bmv_length;	\
+	p2.bmv_count = p1.bmv_count;	\
+	p2.bmv_entries = p1.bmv_entries;  }
+
+
+/*
+ * Structure for XFS_IOC_FSSETDM.
+ * For use by backup and restore programs to set the XFS on-disk inode
+ * fields di_dmevmask and di_dmstate.  These must be set to exactly and
+ * only values previously obtained via xfs_bulkstat!  (Specifically the
+ * xfs_bstat_t fields bs_dmevmask and bs_dmstate.)
+ */
+#ifndef HAVE_FSDMIDATA
+struct fsdmidata {
+	__u32		fsd_dmevmask;	/* corresponds to di_dmevmask */
+	__u16		fsd_padding;
+	__u16		fsd_dmstate;	/* corresponds to di_dmstate  */
+};
+#endif
+
+/*
+ * File segment locking set data type for 64 bit access.
+ * Also used for all the RESV/FREE interfaces.
+ */
+typedef struct xfs_flock64 {
+	__s16		l_type;
+	__s16		l_whence;
+	__s64		l_start;
+	__s64		l_len;		/* len == 0 means until end of file */
+	__s32		l_sysid;
+	__u32		l_pid;
+	__s32		l_pad[4];	/* reserve area			    */
+} xfs_flock64_t;
+
+/*
+ * Output for XFS_IOC_FSGEOMETRY_V1
+ */
+typedef struct xfs_fsop_geom_v1 {
+	__u32		blocksize;	/* filesystem (data) block size */
+	__u32		rtextsize;	/* realtime extent size		*/
+	__u32		agblocks;	/* fsblocks in an AG		*/
+	__u32		agcount;	/* number of allocation groups	*/
+	__u32		logblocks;	/* fsblocks in the log		*/
+	__u32		sectsize;	/* (data) sector size, bytes	*/
+	__u32		inodesize;	/* inode size in bytes		*/
+	__u32		imaxpct;	/* max allowed inode space(%)	*/
+	__u64		datablocks;	/* fsblocks in data subvolume	*/
+	__u64		rtblocks;	/* fsblocks in realtime subvol	*/
+	__u64		rtextents;	/* rt extents in realtime subvol*/
+	__u64		logstart;	/* starting fsblock of the log	*/
+	unsigned char	uuid[16];	/* unique id of the filesystem	*/
+	__u32		sunit;		/* stripe unit, fsblocks	*/
+	__u32		swidth;		/* stripe width, fsblocks	*/
+	__s32		version;	/* structure version		*/
+	__u32		flags;		/* superblock version flags	*/
+	__u32		logsectsize;	/* log sector size, bytes	*/
+	__u32		rtsectsize;	/* realtime sector size, bytes	*/
+	__u32		dirblocksize;	/* directory block size, bytes	*/
+} xfs_fsop_geom_v1_t;
+
+/*
+ * Output for XFS_IOC_FSGEOMETRY
+ */
+typedef struct xfs_fsop_geom {
+	__u32		blocksize;	/* filesystem (data) block size */
+	__u32		rtextsize;	/* realtime extent size		*/
+	__u32		agblocks;	/* fsblocks in an AG		*/
+	__u32		agcount;	/* number of allocation groups	*/
+	__u32		logblocks;	/* fsblocks in the log		*/
+	__u32		sectsize;	/* (data) sector size, bytes	*/
+	__u32		inodesize;	/* inode size in bytes		*/
+	__u32		imaxpct;	/* max allowed inode space(%)	*/
+	__u64		datablocks;	/* fsblocks in data subvolume	*/
+	__u64		rtblocks;	/* fsblocks in realtime subvol	*/
+	__u64		rtextents;	/* rt extents in realtime subvol*/
+	__u64		logstart;	/* starting fsblock of the log	*/
+	unsigned char	uuid[16];	/* unique id of the filesystem	*/
+	__u32		sunit;		/* stripe unit, fsblocks	*/
+	__u32		swidth;		/* stripe width, fsblocks	*/
+	__s32		version;	/* structure version		*/
+	__u32		flags;		/* superblock version flags	*/
+	__u32		logsectsize;	/* log sector size, bytes	*/
+	__u32		rtsectsize;	/* realtime sector size, bytes	*/
+	__u32		dirblocksize;	/* directory block size, bytes	*/
+	__u32		logsunit;	/* log stripe unit, bytes */
+} xfs_fsop_geom_t;
+
+/* Output for XFS_FS_COUNTS */
+typedef struct xfs_fsop_counts {
+	__u64	freedata;	/* free data section blocks */
+	__u64	freertx;	/* free rt extents */
+	__u64	freeino;	/* free inodes */
+	__u64	allocino;	/* total allocated inodes */
+} xfs_fsop_counts_t;
+
+/* Input/Output for XFS_GET_RESBLKS and XFS_SET_RESBLKS */
+typedef struct xfs_fsop_resblks {
+	__u64  resblks;
+	__u64  resblks_avail;
+} xfs_fsop_resblks_t;
+
+#define XFS_FSOP_GEOM_VERSION	0
+
+#define XFS_FSOP_GEOM_FLAGS_ATTR	0x0001	/* attributes in use	*/
+#define XFS_FSOP_GEOM_FLAGS_NLINK	0x0002	/* 32-bit nlink values	*/
+#define XFS_FSOP_GEOM_FLAGS_QUOTA	0x0004	/* quotas enabled	*/
+#define XFS_FSOP_GEOM_FLAGS_IALIGN	0x0008	/* inode alignment	*/
+#define XFS_FSOP_GEOM_FLAGS_DALIGN	0x0010	/* large data alignment */
+#define XFS_FSOP_GEOM_FLAGS_SHARED	0x0020	/* read-only shared	*/
+#define XFS_FSOP_GEOM_FLAGS_EXTFLG	0x0040	/* special extent flag	*/
+#define XFS_FSOP_GEOM_FLAGS_DIRV2	0x0080	/* directory version 2	*/
+#define XFS_FSOP_GEOM_FLAGS_LOGV2	0x0100	/* log format version 2	*/
+#define XFS_FSOP_GEOM_FLAGS_SECTOR	0x0200	/* sector sizes >1BB	*/
+
+
+/*
+ * Minimum and maximum sizes need for growth checks
+ */
+#define XFS_MIN_AG_BLOCKS	64
+#define XFS_MIN_LOG_BLOCKS	512
+#define XFS_MAX_LOG_BLOCKS	(64 * 1024)
+#define XFS_MIN_LOG_BYTES	(256 * 1024)
+#define XFS_MAX_LOG_BYTES	(128 * 1024 * 1024)
+
+/*
+ * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT
+ */
+typedef struct xfs_growfs_data {
+	__u64		newblocks;	/* new data subvol size, fsblocks */
+	__u32		imaxpct;	/* new inode space percentage limit */
+} xfs_growfs_data_t;
+
+typedef struct xfs_growfs_log {
+	__u32		newblocks;	/* new log size, fsblocks */
+	__u32		isint;		/* 1 if new log is internal */
+} xfs_growfs_log_t;
+
+typedef struct xfs_growfs_rt {
+	__u64		newblocks;	/* new realtime size, fsblocks */
+	__u32		extsize;	/* new realtime extent size, fsblocks */
+} xfs_growfs_rt_t;
+
+
+/*
+ * Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE
+ */
+typedef struct xfs_bstime {
+	time_t		tv_sec;		/* seconds		*/
+	__s32		tv_nsec;	/* and nanoseconds	*/
+} xfs_bstime_t;
+
+typedef struct xfs_bstat {
+	__u64		bs_ino;		/* inode number			*/
+	__u16		bs_mode;	/* type and mode		*/
+	__u16		bs_nlink;	/* number of links		*/
+	__u32		bs_uid;		/* user id			*/
+	__u32		bs_gid;		/* group id			*/
+	__u32		bs_rdev;	/* device value			*/
+	__s32		bs_blksize;	/* block size			*/
+	__s64		bs_size;	/* file size			*/
+	xfs_bstime_t	bs_atime;	/* access time			*/
+	xfs_bstime_t	bs_mtime;	/* modify time			*/
+	xfs_bstime_t	bs_ctime;	/* inode change time		*/
+	int64_t		bs_blocks;	/* number of blocks		*/
+	__u32		bs_xflags;	/* extended flags		*/
+	__s32		bs_extsize;	/* extent size			*/
+	__s32		bs_extents;	/* number of extents		*/
+	__u32		bs_gen;		/* generation count		*/
+	__u16		bs_projid;	/* project id			*/
+	unsigned char	bs_pad[14];	/* pad space, unused		*/
+	__u32		bs_dmevmask;	/* DMIG event mask		*/
+	__u16		bs_dmstate;	/* DMIG state info		*/
+	__u16		bs_aextents;	/* attribute number of extents	*/
+} xfs_bstat_t;
+
+/*
+ * The user-level BulkStat Request interface structure.
+ */
+typedef struct xfs_fsop_bulkreq {
+	__u64		__user *lastip;	/* last inode # pointer		*/
+	__s32		icount;		/* count of entries in buffer	*/
+	void		__user *ubuffer;/* user buffer for inode desc.	*/
+	__s32		__user *ocount;	/* output count pointer		*/
+} xfs_fsop_bulkreq_t;
+
+
+/*
+ * Structures returned from xfs_inumbers routine (XFS_IOC_FSINUMBERS).
+ */
+typedef struct xfs_inogrp {
+	__u64		xi_startino;	/* starting inode number	*/
+	__s32		xi_alloccount;	/* # bits set in allocmask	*/
+	__u64		xi_allocmask;	/* mask of allocated inodes	*/
+} xfs_inogrp_t;
+
+
+/*
+ * Error injection.
+ */
+typedef struct xfs_error_injection {
+	__s32		fd;
+	__s32		errtag;
+} xfs_error_injection_t;
+
+
+/*
+ * The user-level Handle Request interface structure.
+ */
+typedef struct xfs_fsop_handlereq {
+	__u32		fd;		/* fd for FD_TO_HANDLE		*/
+	void		__user *path;	/* user pathname		*/
+	__u32		oflags;		/* open flags			*/
+	void		__user *ihandle;/* user supplied handle		*/
+	__u32		ihandlen;	/* user supplied length		*/
+	void		__user *ohandle;/* user buffer for handle	*/
+	__u32		__user *ohandlen;/* user buffer length		*/
+} xfs_fsop_handlereq_t;
+
+/*
+ * Compound structures for passing args through Handle Request interfaces
+ * xfs_fssetdm_by_handle, xfs_attrlist_by_handle, xfs_attrmulti_by_handle
+ * - ioctls: XFS_IOC_FSSETDM_BY_HANDLE, XFS_IOC_ATTRLIST_BY_HANDLE, and
+ *	     XFS_IOC_ATTRMULTI_BY_HANDLE
+ */
+
+typedef struct xfs_fsop_setdm_handlereq {
+	struct xfs_fsop_handlereq	hreq;	/* handle information	*/
+	struct fsdmidata		__user *data;	/* DMAPI data	*/
+} xfs_fsop_setdm_handlereq_t;
+
+typedef struct xfs_attrlist_cursor {
+	__u32		opaque[4];
+} xfs_attrlist_cursor_t;
+
+typedef struct xfs_fsop_attrlist_handlereq {
+	struct xfs_fsop_handlereq	hreq; /* handle interface structure */
+	struct xfs_attrlist_cursor	pos; /* opaque cookie, list offset */
+	__u32				flags;	/* which namespace to use */
+	__u32				buflen;	/* length of buffer supplied */
+	void				__user *buffer;	/* returned names */
+} xfs_fsop_attrlist_handlereq_t;
+
+typedef struct xfs_attr_multiop {
+	__u32		am_opcode;
+	__s32		am_error;
+	void		__user *am_attrname;
+	void		__user *am_attrvalue;
+	__u32		am_length;
+	__u32		am_flags;
+} xfs_attr_multiop_t;
+
+typedef struct xfs_fsop_attrmulti_handlereq {
+	struct xfs_fsop_handlereq	hreq; /* handle interface structure */
+	__u32				opcount;/* count of following multiop */
+	struct xfs_attr_multiop		__user *ops; /* attr_multi data */
+} xfs_fsop_attrmulti_handlereq_t;
+
+/*
+ * per machine unique filesystem identifier types.
+ */
+typedef struct { __u32 val[2]; } xfs_fsid_t; /* file system id type */
+
+
+#ifndef HAVE_FID
+#define MAXFIDSZ	46
+
+typedef struct fid {
+	__u16		fid_len;		/* length of data in bytes */
+	unsigned char	fid_data[MAXFIDSZ];	/* data (fid_len worth)  */
+} fid_t;
+#endif
+
+typedef struct xfs_fid {
+	__u16	xfs_fid_len;		/* length of remainder	*/
+	__u16	xfs_fid_pad;
+	__u32	xfs_fid_gen;		/* generation number	*/
+	__u64	xfs_fid_ino;		/* 64 bits inode number */
+} xfs_fid_t;
+
+typedef struct xfs_fid2 {
+	__u16	fid_len;	/* length of remainder */
+	__u16	fid_pad;	/* padding, must be zero */
+	__u32	fid_gen;	/* generation number */
+	__u64	fid_ino;	/* inode number */
+} xfs_fid2_t;
+
+typedef struct xfs_handle {
+	union {
+		__s64	    align;	/* force alignment of ha_fid	 */
+		xfs_fsid_t  _ha_fsid;	/* unique file system identifier */
+	} ha_u;
+	xfs_fid_t	ha_fid;		/* file system specific file ID	 */
+} xfs_handle_t;
+#define ha_fsid ha_u._ha_fsid
+
+#define XFS_HSIZE(handle)	(((char *) &(handle).ha_fid.xfs_fid_pad	 \
+				 - (char *) &(handle))			  \
+				 + (handle).ha_fid.xfs_fid_len)
+
+#define XFS_HANDLE_CMP(h1, h2)	memcmp(h1, h2, sizeof(xfs_handle_t))
+
+#define FSHSIZE		sizeof(fsid_t)
+
+/* 
+ * Flags for going down operation
+ */
+#define XFS_FSOP_GOING_FLAGS_DEFAULT		0x0	/* going down */
+#define XFS_FSOP_GOING_FLAGS_LOGFLUSH		0x1	/* flush log but not data */
+#define XFS_FSOP_GOING_FLAGS_NOLOGFLUSH		0x2	/* don't flush log nor data */
+
+/*
+ * ioctl commands that are used by Linux filesystems
+ */
+#define XFS_IOC_GETXFLAGS	_IOR('f', 1, long)
+#define XFS_IOC_SETXFLAGS	_IOW('f', 2, long)
+#define XFS_IOC_GETVERSION	_IOR('v', 1, long)
+
+/*
+ * ioctl commands that replace IRIX fcntl()'s
+ * For 'documentation' purposed more than anything else,
+ * the "cmd #" field reflects the IRIX fcntl number.
+ */
+#define XFS_IOC_ALLOCSP		_IOW ('X', 10, struct xfs_flock64)
+#define XFS_IOC_FREESP		_IOW ('X', 11, struct xfs_flock64)
+#define XFS_IOC_DIOINFO		_IOR ('X', 30, struct dioattr)
+#define XFS_IOC_FSGETXATTR	_IOR ('X', 31, struct fsxattr)
+#define XFS_IOC_FSSETXATTR	_IOW ('X', 32, struct fsxattr)
+#define XFS_IOC_ALLOCSP64	_IOW ('X', 36, struct xfs_flock64)
+#define XFS_IOC_FREESP64	_IOW ('X', 37, struct xfs_flock64)
+#define XFS_IOC_GETBMAP		_IOWR('X', 38, struct getbmap)
+#define XFS_IOC_FSSETDM		_IOW ('X', 39, struct fsdmidata)
+#define XFS_IOC_RESVSP		_IOW ('X', 40, struct xfs_flock64)
+#define XFS_IOC_UNRESVSP	_IOW ('X', 41, struct xfs_flock64)
+#define XFS_IOC_RESVSP64	_IOW ('X', 42, struct xfs_flock64)
+#define XFS_IOC_UNRESVSP64	_IOW ('X', 43, struct xfs_flock64)
+#define XFS_IOC_GETBMAPA	_IOWR('X', 44, struct getbmap)
+#define XFS_IOC_FSGETXATTRA	_IOR ('X', 45, struct fsxattr)
+/*	XFS_IOC_SETBIOSIZE ---- deprecated 46	   */
+/*	XFS_IOC_GETBIOSIZE ---- deprecated 47	   */
+#define XFS_IOC_GETBMAPX	_IOWR('X', 56, struct getbmap)
+
+/*
+ * ioctl commands that replace IRIX syssgi()'s
+ */
+#define XFS_IOC_FSGEOMETRY_V1	     _IOR ('X', 100, struct xfs_fsop_geom_v1)
+#define XFS_IOC_FSBULKSTAT	     _IOWR('X', 101, struct xfs_fsop_bulkreq)
+#define XFS_IOC_FSBULKSTAT_SINGLE    _IOWR('X', 102, struct xfs_fsop_bulkreq)
+#define XFS_IOC_FSINUMBERS	     _IOWR('X', 103, struct xfs_fsop_bulkreq)
+#define XFS_IOC_PATH_TO_FSHANDLE     _IOWR('X', 104, struct xfs_fsop_handlereq)
+#define XFS_IOC_PATH_TO_HANDLE	     _IOWR('X', 105, struct xfs_fsop_handlereq)
+#define XFS_IOC_FD_TO_HANDLE	     _IOWR('X', 106, struct xfs_fsop_handlereq)
+#define XFS_IOC_OPEN_BY_HANDLE	     _IOWR('X', 107, struct xfs_fsop_handlereq)
+#define XFS_IOC_READLINK_BY_HANDLE   _IOWR('X', 108, struct xfs_fsop_handlereq)
+#define XFS_IOC_SWAPEXT		     _IOWR('X', 109, struct xfs_swapext)
+#define XFS_IOC_FSGROWFSDATA	     _IOW ('X', 110, struct xfs_growfs_data)
+#define XFS_IOC_FSGROWFSLOG	     _IOW ('X', 111, struct xfs_growfs_log)
+#define XFS_IOC_FSGROWFSRT	     _IOW ('X', 112, struct xfs_growfs_rt)
+#define XFS_IOC_FSCOUNTS	     _IOR ('X', 113, struct xfs_fsop_counts)
+#define XFS_IOC_SET_RESBLKS	     _IOWR('X', 114, struct xfs_fsop_resblks)
+#define XFS_IOC_GET_RESBLKS	     _IOR ('X', 115, struct xfs_fsop_resblks)
+#define XFS_IOC_ERROR_INJECTION	     _IOW ('X', 116, struct xfs_error_injection)
+#define XFS_IOC_ERROR_CLEARALL	     _IOW ('X', 117, struct xfs_error_injection)
+/*	XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118	 */
+#define XFS_IOC_FREEZE		     _IOWR('X', 119, int)
+#define XFS_IOC_THAW		     _IOWR('X', 120, int)
+#define XFS_IOC_FSSETDM_BY_HANDLE    _IOW ('X', 121, struct xfs_fsop_setdm_handlereq)
+#define XFS_IOC_ATTRLIST_BY_HANDLE   _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
+#define XFS_IOC_ATTRMULTI_BY_HANDLE  _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq)
+#define XFS_IOC_FSGEOMETRY	     _IOR ('X', 124, struct xfs_fsop_geom)
+#define XFS_IOC_GOINGDOWN	     _IOR ('X', 125, __uint32_t)
+/*	XFS_IOC_GETFSUUID ---------- deprecated 140	 */
+
+
+#ifndef HAVE_BBMACROS
+/*
+ * Block I/O parameterization.	A basic block (BB) is the lowest size of
+ * filesystem allocation, and must equal 512.  Length units given to bio
+ * routines are in BB's.
+ */
+#define BBSHIFT		9
+#define BBSIZE		(1<<BBSHIFT)
+#define BBMASK		(BBSIZE-1)
+#define BTOBB(bytes)	(((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
+#define BTOBBT(bytes)	((__u64)(bytes) >> BBSHIFT)
+#define BBTOB(bbs)	((bbs) << BBSHIFT)
+#endif
+
+#endif	/* __XFS_FS_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
new file mode 100644
index 0000000..2121305
--- /dev/null
+++ b/fs/xfs/xfs_fsops.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_ag.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_error.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_fsops.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_refcache.h"
+#include "xfs_trans_space.h"
+#include "xfs_rtalloc.h"
+#include "xfs_dir2.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_inode_item.h"
+
+/*
+ * File system operations
+ */
+
+int
+xfs_fs_geometry(
+	xfs_mount_t		*mp,
+	xfs_fsop_geom_t		*geo,
+	int			new_version)
+{
+	geo->blocksize = mp->m_sb.sb_blocksize;
+	geo->rtextsize = mp->m_sb.sb_rextsize;
+	geo->agblocks = mp->m_sb.sb_agblocks;
+	geo->agcount = mp->m_sb.sb_agcount;
+	geo->logblocks = mp->m_sb.sb_logblocks;
+	geo->sectsize = mp->m_sb.sb_sectsize;
+	geo->inodesize = mp->m_sb.sb_inodesize;
+	geo->imaxpct = mp->m_sb.sb_imax_pct;
+	geo->datablocks = mp->m_sb.sb_dblocks;
+	geo->rtblocks = mp->m_sb.sb_rblocks;
+	geo->rtextents = mp->m_sb.sb_rextents;
+	geo->logstart = mp->m_sb.sb_logstart;
+	ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
+	memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
+	if (new_version >= 2) {
+		geo->sunit = mp->m_sb.sb_unit;
+		geo->swidth = mp->m_sb.sb_width;
+	}
+	if (new_version >= 3) {
+		geo->version = XFS_FSOP_GEOM_VERSION;
+		geo->flags =
+			(XFS_SB_VERSION_HASATTR(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
+			(XFS_SB_VERSION_HASNLINK(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
+			(XFS_SB_VERSION_HASQUOTA(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
+			(XFS_SB_VERSION_HASALIGN(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
+			(XFS_SB_VERSION_HASDALIGN(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
+			(XFS_SB_VERSION_HASSHARED(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
+			(XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
+			(XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
+			(XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_SECTOR : 0);
+		geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
+				mp->m_sb.sb_logsectsize : BBSIZE;
+		geo->rtsectsize = mp->m_sb.sb_blocksize;
+		geo->dirblocksize = mp->m_dirblksize;
+	}
+	if (new_version >= 4) {
+		geo->flags |=
+			(XFS_SB_VERSION_HASLOGV2(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
+		geo->logsunit = mp->m_sb.sb_logsunit;
+	}
+	return 0;
+}
+
+static int
+xfs_growfs_data_private(
+	xfs_mount_t		*mp,		/* mount point for filesystem */
+	xfs_growfs_data_t	*in)		/* growfs data input struct */
+{
+	xfs_agf_t		*agf;
+	xfs_agi_t		*agi;
+	xfs_agnumber_t		agno;
+	xfs_extlen_t		agsize;
+	xfs_extlen_t		tmpsize;
+	xfs_alloc_rec_t		*arec;
+	xfs_btree_sblock_t	*block;
+	xfs_buf_t		*bp;
+	int			bucket;
+	int			dpct;
+	int			error;
+	xfs_agnumber_t		nagcount;
+	xfs_agnumber_t		nagimax = 0;
+	xfs_rfsblock_t		nb, nb_mod;
+	xfs_rfsblock_t		new;
+	xfs_rfsblock_t		nfree;
+	xfs_agnumber_t		oagcount;
+	int			pct;
+	xfs_sb_t		*sbp;
+	xfs_trans_t		*tp;
+
+	nb = in->newblocks;
+	pct = in->imaxpct;
+	if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
+		return XFS_ERROR(EINVAL);
+	dpct = pct - mp->m_sb.sb_imax_pct;
+	error = xfs_read_buf(mp, mp->m_ddev_targp,
+			XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
+			XFS_FSS_TO_BB(mp, 1), 0, &bp);
+	if (error)
+		return error;
+	ASSERT(bp);
+	xfs_buf_relse(bp);
+
+	new = nb;	/* use new as a temporary here */
+	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
+	nagcount = new + (nb_mod != 0);
+	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
+		nagcount--;
+		nb = nagcount * mp->m_sb.sb_agblocks;
+		if (nb < mp->m_sb.sb_dblocks)
+			return XFS_ERROR(EINVAL);
+	}
+	new = nb - mp->m_sb.sb_dblocks;
+	oagcount = mp->m_sb.sb_agcount;
+	if (nagcount > oagcount) {
+		down_write(&mp->m_peraglock);
+		mp->m_perag = kmem_realloc(mp->m_perag,
+			sizeof(xfs_perag_t) * nagcount,
+			sizeof(xfs_perag_t) * oagcount,
+			KM_SLEEP);
+		memset(&mp->m_perag[oagcount], 0,
+			(nagcount - oagcount) * sizeof(xfs_perag_t));
+		mp->m_flags |= XFS_MOUNT_32BITINODES;
+		nagimax = xfs_initialize_perag(mp, nagcount);
+		up_write(&mp->m_peraglock);
+	}
+	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
+	if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
+			XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+
+	nfree = 0;
+	for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
+		/*
+		 * AG freelist header block
+		 */
+		bp = xfs_buf_get(mp->m_ddev_targp,
+				  XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
+				  XFS_FSS_TO_BB(mp, 1), 0);
+		agf = XFS_BUF_TO_AGF(bp);
+		memset(agf, 0, mp->m_sb.sb_sectsize);
+		INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC);
+		INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION);
+		INT_SET(agf->agf_seqno, ARCH_CONVERT, agno);
+		if (agno == nagcount - 1)
+			agsize =
+				nb -
+				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
+		else
+			agsize = mp->m_sb.sb_agblocks;
+		INT_SET(agf->agf_length, ARCH_CONVERT, agsize);
+		INT_SET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT,
+			XFS_BNO_BLOCK(mp));
+		INT_SET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT,
+			XFS_CNT_BLOCK(mp));
+		INT_SET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT, 1);
+		INT_SET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT, 1);
+		agf->agf_flfirst = 0;
+		INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1);
+		agf->agf_flcount = 0;
+		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
+		INT_SET(agf->agf_freeblks, ARCH_CONVERT, tmpsize);
+		INT_SET(agf->agf_longest, ARCH_CONVERT, tmpsize);
+		error = xfs_bwrite(mp, bp);
+		if (error) {
+			goto error0;
+		}
+		/*
+		 * AG inode header block
+		 */
+		bp = xfs_buf_get(mp->m_ddev_targp,
+				  XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
+				  XFS_FSS_TO_BB(mp, 1), 0);
+		agi = XFS_BUF_TO_AGI(bp);
+		memset(agi, 0, mp->m_sb.sb_sectsize);
+		INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC);
+		INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION);
+		INT_SET(agi->agi_seqno, ARCH_CONVERT, agno);
+		INT_SET(agi->agi_length, ARCH_CONVERT, agsize);
+		agi->agi_count = 0;
+		INT_SET(agi->agi_root, ARCH_CONVERT, XFS_IBT_BLOCK(mp));
+		INT_SET(agi->agi_level, ARCH_CONVERT, 1);
+		agi->agi_freecount = 0;
+		INT_SET(agi->agi_newino, ARCH_CONVERT, NULLAGINO);
+		INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO);
+		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
+			INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT,
+				NULLAGINO);
+		error = xfs_bwrite(mp, bp);
+		if (error) {
+			goto error0;
+		}
+		/*
+		 * BNO btree root block
+		 */
+		bp = xfs_buf_get(mp->m_ddev_targp,
+			XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
+			BTOBB(mp->m_sb.sb_blocksize), 0);
+		block = XFS_BUF_TO_SBLOCK(bp);
+		memset(block, 0, mp->m_sb.sb_blocksize);
+		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC);
+		block->bb_level = 0;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
+		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
+		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
+			block, 1, mp->m_alloc_mxr[0]);
+		INT_SET(arec->ar_startblock, ARCH_CONVERT,
+			XFS_PREALLOC_BLOCKS(mp));
+		INT_SET(arec->ar_blockcount, ARCH_CONVERT,
+			agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT));
+		error = xfs_bwrite(mp, bp);
+		if (error) {
+			goto error0;
+		}
+		/*
+		 * CNT btree root block
+		 */
+		bp = xfs_buf_get(mp->m_ddev_targp,
+			XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
+			BTOBB(mp->m_sb.sb_blocksize), 0);
+		block = XFS_BUF_TO_SBLOCK(bp);
+		memset(block, 0, mp->m_sb.sb_blocksize);
+		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC);
+		block->bb_level = 0;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
+		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
+		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
+			block, 1, mp->m_alloc_mxr[0]);
+		INT_SET(arec->ar_startblock, ARCH_CONVERT,
+			XFS_PREALLOC_BLOCKS(mp));
+		INT_SET(arec->ar_blockcount, ARCH_CONVERT,
+			agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT));
+		nfree += INT_GET(arec->ar_blockcount, ARCH_CONVERT);
+		error = xfs_bwrite(mp, bp);
+		if (error) {
+			goto error0;
+		}
+		/*
+		 * INO btree root block
+		 */
+		bp = xfs_buf_get(mp->m_ddev_targp,
+			XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
+			BTOBB(mp->m_sb.sb_blocksize), 0);
+		block = XFS_BUF_TO_SBLOCK(bp);
+		memset(block, 0, mp->m_sb.sb_blocksize);
+		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
+		block->bb_level = 0;
+		block->bb_numrecs = 0;
+		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
+		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		error = xfs_bwrite(mp, bp);
+		if (error) {
+			goto error0;
+		}
+	}
+	xfs_trans_agblocks_delta(tp, nfree);
+	/*
+	 * There are new blocks in the old last a.g.
+	 */
+	if (new) {
+		/*
+		 * Change the agi length.
+		 */
+		error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
+		if (error) {
+			goto error0;
+		}
+		ASSERT(bp);
+		agi = XFS_BUF_TO_AGI(bp);
+		INT_MOD(agi->agi_length, ARCH_CONVERT, new);
+		ASSERT(nagcount == oagcount ||
+		       INT_GET(agi->agi_length, ARCH_CONVERT) ==
+				mp->m_sb.sb_agblocks);
+		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
+		/*
+		 * Change agf length.
+		 */
+		error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
+		if (error) {
+			goto error0;
+		}
+		ASSERT(bp);
+		agf = XFS_BUF_TO_AGF(bp);
+		INT_MOD(agf->agf_length, ARCH_CONVERT, new);
+		ASSERT(INT_GET(agf->agf_length, ARCH_CONVERT) ==
+				INT_GET(agi->agi_length, ARCH_CONVERT));
+		/*
+		 * Free the new space.
+		 */
+		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
+			INT_GET(agf->agf_length, ARCH_CONVERT) - new), new);
+		if (error) {
+			goto error0;
+		}
+	}
+	if (nagcount > oagcount)
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
+	if (nb > mp->m_sb.sb_dblocks)
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
+				 nb - mp->m_sb.sb_dblocks);
+	if (nfree)
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
+	if (dpct)
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
+	error = xfs_trans_commit(tp, 0, NULL);
+	if (error) {
+		return error;
+	}
+	/* New allocation groups fully initialized, so update mount struct */
+	if (nagimax)
+		mp->m_maxagi = nagimax;
+	if (mp->m_sb.sb_imax_pct) {
+		__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
+		do_div(icount, 100);
+		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
+	} else
+		mp->m_maxicount = 0;
+	for (agno = 1; agno < nagcount; agno++) {
+		error = xfs_read_buf(mp, mp->m_ddev_targp,
+				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
+				  XFS_FSS_TO_BB(mp, 1), 0, &bp);
+		if (error) {
+			xfs_fs_cmn_err(CE_WARN, mp,
+			"error %d reading secondary superblock for ag %d",
+				error, agno);
+			break;
+		}
+		sbp = XFS_BUF_TO_SBP(bp);
+		xfs_xlatesb(sbp, &mp->m_sb, -1, XFS_SB_ALL_BITS);
+		/*
+		 * If we get an error writing out the alternate superblocks,
+		 * just issue a warning and continue.  The real work is
+		 * already done and committed.
+		 */
+		if (!(error = xfs_bwrite(mp, bp))) {
+			continue;
+		} else {
+			xfs_fs_cmn_err(CE_WARN, mp,
+		"write error %d updating secondary superblock for ag %d",
+				error, agno);
+			break; /* no point in continuing */
+		}
+	}
+	return 0;
+
+ error0:
+	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+	return error;
+}
+
+static int
+xfs_growfs_log_private(
+	xfs_mount_t		*mp,	/* mount point for filesystem */
+	xfs_growfs_log_t	*in)	/* growfs log input struct */
+{
+	xfs_extlen_t		nb;
+
+	nb = in->newblocks;
+	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
+		return XFS_ERROR(EINVAL);
+	if (nb == mp->m_sb.sb_logblocks &&
+	    in->isint == (mp->m_sb.sb_logstart != 0))
+		return XFS_ERROR(EINVAL);
+	/*
+	 * Moving the log is hard, need new interfaces to sync
+	 * the log first, hold off all activity while moving it.
+	 * Can have shorter or longer log in the same space,
+	 * or transform internal to external log or vice versa.
+	 */
+	return XFS_ERROR(ENOSYS);
+}
+
+/*
+ * protected versions of growfs function acquire and release locks on the mount
+ * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
+ * XFS_IOC_FSGROWFSRT
+ */
+
+
+int
+xfs_growfs_data(
+	xfs_mount_t		*mp,
+	xfs_growfs_data_t	*in)
+{
+	int error;
+	if (!cpsema(&mp->m_growlock))
+		return XFS_ERROR(EWOULDBLOCK);
+	error = xfs_growfs_data_private(mp, in);
+	vsema(&mp->m_growlock);
+	return error;
+}
+
+int
+xfs_growfs_log(
+	xfs_mount_t		*mp,
+	xfs_growfs_log_t	*in)
+{
+	int error;
+	if (!cpsema(&mp->m_growlock))
+		return XFS_ERROR(EWOULDBLOCK);
+	error = xfs_growfs_log_private(mp, in);
+	vsema(&mp->m_growlock);
+	return error;
+}
+
+/*
+ * exported through ioctl XFS_IOC_FSCOUNTS
+ */
+
+int
+xfs_fs_counts(
+	xfs_mount_t		*mp,
+	xfs_fsop_counts_t	*cnt)
+{
+	unsigned long	s;
+
+	s = XFS_SB_LOCK(mp);
+	cnt->freedata = mp->m_sb.sb_fdblocks;
+	cnt->freertx = mp->m_sb.sb_frextents;
+	cnt->freeino = mp->m_sb.sb_ifree;
+	cnt->allocino = mp->m_sb.sb_icount;
+	XFS_SB_UNLOCK(mp, s);
+	return 0;
+}
+
+/*
+ * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
+ *
+ * xfs_reserve_blocks is called to set m_resblks
+ * in the in-core mount table. The number of unused reserved blocks
+ * is kept in m_resbls_avail.
+ *
+ * Reserve the requested number of blocks if available. Otherwise return
+ * as many as possible to satisfy the request. The actual number
+ * reserved are returned in outval
+ *
+ * A null inval pointer indicates that only the current reserved blocks
+ * available  should  be returned no settings are changed.
+ */
+
+int
+xfs_reserve_blocks(
+	xfs_mount_t             *mp,
+	__uint64_t              *inval,
+	xfs_fsop_resblks_t      *outval)
+{
+	__int64_t		lcounter, delta;
+	__uint64_t		request;
+	unsigned long		s;
+
+	/* If inval is null, report current values and return */
+
+	if (inval == (__uint64_t *)NULL) {
+		outval->resblks = mp->m_resblks;
+		outval->resblks_avail = mp->m_resblks_avail;
+		return(0);
+	}
+
+	request = *inval;
+	s = XFS_SB_LOCK(mp);
+
+	/*
+	 * If our previous reservation was larger than the current value,
+	 * then move any unused blocks back to the free pool.
+	 */
+
+	if (mp->m_resblks > request) {
+		lcounter = mp->m_resblks_avail - request;
+		if (lcounter  > 0) {		/* release unused blocks */
+			mp->m_sb.sb_fdblocks += lcounter;
+			mp->m_resblks_avail -= lcounter;
+		}
+		mp->m_resblks = request;
+	} else {
+		delta = request - mp->m_resblks;
+		lcounter = mp->m_sb.sb_fdblocks - delta;
+		if (lcounter < 0) {
+			/* We can't satisfy the request, just get what we can */
+			mp->m_resblks += mp->m_sb.sb_fdblocks;
+			mp->m_resblks_avail += mp->m_sb.sb_fdblocks;
+			mp->m_sb.sb_fdblocks = 0;
+		} else {
+			mp->m_sb.sb_fdblocks = lcounter;
+			mp->m_resblks = request;
+			mp->m_resblks_avail += delta;
+		}
+	}
+
+	outval->resblks = mp->m_resblks;
+	outval->resblks_avail = mp->m_resblks_avail;
+	XFS_SB_UNLOCK(mp, s);
+	return(0);
+}
+
+void
+xfs_fs_log_dummy(xfs_mount_t *mp)
+{
+	xfs_trans_t *tp;
+	xfs_inode_t *ip;
+
+
+	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+	atomic_inc(&mp->m_active_trans);
+	if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
+		xfs_trans_cancel(tp, 0);
+		return;
+	}
+
+	ip = mp->m_rootip;
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	xfs_trans_set_sync(tp);
+	xfs_trans_commit(tp, 0, NULL);
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
+int
+xfs_fs_goingdown(
+	xfs_mount_t	*mp,
+	__uint32_t	inflags)
+{
+	switch (inflags) {
+	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
+		struct vfs *vfsp = XFS_MTOVFS(mp);
+		struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev);
+
+		if (sb) {
+			xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
+			thaw_bdev(sb->s_bdev, sb);
+		}
+	
+		break;
+	}
+	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
+		xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
+		break;
+	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
+		xfs_force_shutdown(mp, XFS_FORCE_UMOUNT|XFS_LOG_IO_ERROR);
+		break;
+	default:
+		return XFS_ERROR(EINVAL);
+	}
+
+	return 0;
+}
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
new file mode 100644
index 0000000..b614861
--- /dev/null
+++ b/fs/xfs/xfs_fsops.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_FSOPS_H__
+#define	__XFS_FSOPS_H__
+
+int
+xfs_fs_geometry(
+	xfs_mount_t		*mp,
+	xfs_fsop_geom_t		*geo,
+	int			new_version);
+
+int
+xfs_growfs_data(
+	xfs_mount_t		*mp,
+	xfs_growfs_data_t	*in);
+
+int
+xfs_growfs_log(
+	xfs_mount_t		*mp,
+	xfs_growfs_log_t	*in);
+
+int
+xfs_fs_counts(
+	xfs_mount_t		*mp,
+	xfs_fsop_counts_t	*cnt);
+
+int
+xfs_reserve_blocks(
+	xfs_mount_t		*mp,
+	__uint64_t		*inval,
+	xfs_fsop_resblks_t	*outval);
+
+int
+xfs_fs_goingdown(
+	xfs_mount_t		*mp,
+	__uint32_t		inflags);
+
+#endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
new file mode 100644
index 0000000..ce5fee9
--- /dev/null
+++ b/fs/xfs/xfs_ialloc.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_bmap.h"
+
+/*
+ * Log specified fields for the inode given by bp and off.
+ */
+STATIC void
+xfs_ialloc_log_di(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_buf_t	*bp,		/* inode buffer */
+	int		off,		/* index of inode in buffer */
+	int		fields)		/* bitmask of fields to log */
+{
+	int			first;		/* first byte number */
+	int			ioffset;	/* off in bytes */
+	int			last;		/* last byte number */
+	xfs_mount_t		*mp;		/* mount point structure */
+	static const short	offsets[] = {	/* field offsets */
+						/* keep in sync with bits */
+		offsetof(xfs_dinode_core_t, di_magic),
+		offsetof(xfs_dinode_core_t, di_mode),
+		offsetof(xfs_dinode_core_t, di_version),
+		offsetof(xfs_dinode_core_t, di_format),
+		offsetof(xfs_dinode_core_t, di_onlink),
+		offsetof(xfs_dinode_core_t, di_uid),
+		offsetof(xfs_dinode_core_t, di_gid),
+		offsetof(xfs_dinode_core_t, di_nlink),
+		offsetof(xfs_dinode_core_t, di_projid),
+		offsetof(xfs_dinode_core_t, di_pad),
+		offsetof(xfs_dinode_core_t, di_atime),
+		offsetof(xfs_dinode_core_t, di_mtime),
+		offsetof(xfs_dinode_core_t, di_ctime),
+		offsetof(xfs_dinode_core_t, di_size),
+		offsetof(xfs_dinode_core_t, di_nblocks),
+		offsetof(xfs_dinode_core_t, di_extsize),
+		offsetof(xfs_dinode_core_t, di_nextents),
+		offsetof(xfs_dinode_core_t, di_anextents),
+		offsetof(xfs_dinode_core_t, di_forkoff),
+		offsetof(xfs_dinode_core_t, di_aformat),
+		offsetof(xfs_dinode_core_t, di_dmevmask),
+		offsetof(xfs_dinode_core_t, di_dmstate),
+		offsetof(xfs_dinode_core_t, di_flags),
+		offsetof(xfs_dinode_core_t, di_gen),
+		offsetof(xfs_dinode_t, di_next_unlinked),
+		offsetof(xfs_dinode_t, di_u),
+		offsetof(xfs_dinode_t, di_a),
+		sizeof(xfs_dinode_t)
+	};
+
+
+	ASSERT(offsetof(xfs_dinode_t, di_core) == 0);
+	ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0);
+	mp = tp->t_mountp;
+	/*
+	 * Get the inode-relative first and last bytes for these fields
+	 */
+	xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last);
+	/*
+	 * Convert to buffer offsets and log it.
+	 */
+	ioffset = off << mp->m_sb.sb_inodelog;
+	first += ioffset;
+	last += ioffset;
+	xfs_trans_log_buf(tp, bp, first, last);
+}
+
+/*
+ * Allocation group level functions.
+ */
+
+/*
+ * Allocate new inodes in the allocation group specified by agbp.
+ * Return 0 for success, else error code.
+ */
+STATIC int				/* error code or 0 */
+xfs_ialloc_ag_alloc(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_buf_t	*agbp,		/* alloc group buffer */
+	int		*alloc)
+{
+	xfs_agi_t	*agi;		/* allocation group header */
+	xfs_alloc_arg_t	args;		/* allocation argument structure */
+	int		blks_per_cluster;  /* fs blocks per inode cluster */
+	xfs_btree_cur_t	*cur;		/* inode btree cursor */
+	xfs_daddr_t	d;		/* disk addr of buffer */
+	int		error;
+	xfs_buf_t	*fbuf;		/* new free inodes' buffer */
+	xfs_dinode_t	*free;		/* new free inode structure */
+	int		i;		/* inode counter */
+	int		j;		/* block counter */
+	int		nbufs;		/* num bufs of new inodes */
+	xfs_agino_t	newino;		/* new first inode's number */
+	xfs_agino_t	newlen;		/* new number of inodes */
+	int		ninodes;	/* num inodes per buf */
+	xfs_agino_t	thisino;	/* current inode number, for loop */
+	int		version;	/* inode version number to use */
+	int		isaligned;	/* inode allocation at stripe unit */
+					/* boundary */
+	xfs_dinode_core_t dic;          /* a dinode_core to copy to new */
+					/* inodes */
+
+	args.tp = tp;
+	args.mp = tp->t_mountp;
+
+	/*
+	 * Locking will ensure that we don't have two callers in here
+	 * at one time.
+	 */
+	newlen = XFS_IALLOC_INODES(args.mp);
+	if (args.mp->m_maxicount &&
+	    args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
+		return XFS_ERROR(ENOSPC);
+	args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
+	/*
+	 * Set the alignment for the allocation.
+	 * If stripe alignment is turned on then align at stripe unit
+	 * boundary.
+	 * If the cluster size is smaller than a filesystem block
+	 * then we're doing I/O for inodes in filesystem block size pieces,
+	 * so don't need alignment anyway.
+	 */
+	isaligned = 0;
+	if (args.mp->m_sinoalign) {
+		ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
+		args.alignment = args.mp->m_dalign;
+		isaligned = 1;
+	} else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
+	    args.mp->m_sb.sb_inoalignmt >=
+	    XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
+		args.alignment = args.mp->m_sb.sb_inoalignmt;
+	else
+		args.alignment = 1;
+	agi = XFS_BUF_TO_AGI(agbp);
+	/*
+	 * Need to figure out where to allocate the inode blocks.
+	 * Ideally they should be spaced out through the a.g.
+	 * For now, just allocate blocks up front.
+	 */
+	args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+				    args.agbno);
+	/*
+	 * Allocate a fixed-size extent of inodes.
+	 */
+	args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	args.mod = args.total = args.wasdel = args.isfl = args.userdata =
+		args.minalignslop = 0;
+	args.prod = 1;
+	/*
+	 * Allow space for the inode btree to split.
+	 */
+	args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
+	if ((error = xfs_alloc_vextent(&args)))
+		return error;
+
+	/*
+	 * If stripe alignment is turned on, then try again with cluster
+	 * alignment.
+	 */
+	if (isaligned && args.fsbno == NULLFSBLOCK) {
+		args.type = XFS_ALLOCTYPE_NEAR_BNO;
+		args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
+		args.fsbno = XFS_AGB_TO_FSB(args.mp,
+				INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno);
+		if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
+			args.mp->m_sb.sb_inoalignmt >=
+			XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
+				args.alignment = args.mp->m_sb.sb_inoalignmt;
+		else
+			args.alignment = 1;
+		if ((error = xfs_alloc_vextent(&args)))
+			return error;
+	}
+
+	if (args.fsbno == NULLFSBLOCK) {
+		*alloc = 0;
+		return 0;
+	}
+	ASSERT(args.len == args.minlen);
+	/*
+	 * Convert the results.
+	 */
+	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
+	/*
+	 * Loop over the new block(s), filling in the inodes.
+	 * For small block sizes, manipulate the inodes in buffers
+	 * which are multiples of the blocks size.
+	 */
+	if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
+		blks_per_cluster = 1;
+		nbufs = (int)args.len;
+		ninodes = args.mp->m_sb.sb_inopblock;
+	} else {
+		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
+				   args.mp->m_sb.sb_blocksize;
+		nbufs = (int)args.len / blks_per_cluster;
+		ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
+	}
+	/*
+	 * Figure out what version number to use in the inodes we create.
+	 * If the superblock version has caught up to the one that supports
+	 * the new inode format, then use the new inode version.  Otherwise
+	 * use the old version so that old kernels will continue to be
+	 * able to use the file system.
+	 */
+	if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb))
+		version = XFS_DINODE_VERSION_2;
+	else
+		version = XFS_DINODE_VERSION_1;
+
+	memset(&dic, 0, sizeof(xfs_dinode_core_t));
+	INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
+	INT_SET(dic.di_version, ARCH_CONVERT, version);
+
+	for (j = 0; j < nbufs; j++) {
+		/*
+		 * Get the block.
+		 */
+		d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+				     args.agbno + (j * blks_per_cluster));
+		fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
+					 args.mp->m_bsize * blks_per_cluster,
+					 XFS_BUF_LOCK);
+		ASSERT(fbuf);
+		ASSERT(!XFS_BUF_GETERROR(fbuf));
+		/*
+		 * Loop over the inodes in this buffer.
+		 */
+
+		for (i = 0; i < ninodes; i++) {
+			free = XFS_MAKE_IPTR(args.mp, fbuf, i);
+			memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t));
+			INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
+			xfs_ialloc_log_di(tp, fbuf, i,
+				XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
+		}
+		xfs_trans_inode_alloc_buf(tp, fbuf);
+	}
+	INT_MOD(agi->agi_count, ARCH_CONVERT, newlen);
+	INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen);
+	down_read(&args.mp->m_peraglock);
+	args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen;
+	up_read(&args.mp->m_peraglock);
+	INT_SET(agi->agi_newino, ARCH_CONVERT, newino);
+	/*
+	 * Insert records describing the new inode chunk into the btree.
+	 */
+	cur = xfs_btree_init_cursor(args.mp, tp, agbp,
+			INT_GET(agi->agi_seqno, ARCH_CONVERT),
+			XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
+	for (thisino = newino;
+	     thisino < newino + newlen;
+	     thisino += XFS_INODES_PER_CHUNK) {
+		if ((error = xfs_inobt_lookup_eq(cur, thisino,
+				XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {
+			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+			return error;
+		}
+		ASSERT(i == 0);
+		if ((error = xfs_inobt_insert(cur, &i))) {
+			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+			return error;
+		}
+		ASSERT(i == 1);
+	}
+	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	/*
+	 * Log allocation group header fields
+	 */
+	xfs_ialloc_log_agi(tp, agbp,
+		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
+	/*
+	 * Modify/log superblock values for inode count and inode free count.
+	 */
+	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
+	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
+	*alloc = 1;
+	return 0;
+}
+
+STATIC __inline xfs_agnumber_t
+xfs_ialloc_next_ag(
+	xfs_mount_t	*mp)
+{
+	xfs_agnumber_t	agno;
+
+	spin_lock(&mp->m_agirotor_lock);
+	agno = mp->m_agirotor;
+	if (++mp->m_agirotor == mp->m_maxagi)
+		mp->m_agirotor = 0;
+	spin_unlock(&mp->m_agirotor_lock);
+
+	return agno;
+}
+
+/*
+ * Select an allocation group to look for a free inode in, based on the parent
+ * inode and then mode.  Return the allocation group buffer.
+ */
+STATIC xfs_buf_t *			/* allocation group buffer */
+xfs_ialloc_ag_select(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_ino_t	parent,		/* parent directory inode number */
+	mode_t		mode,		/* bits set to indicate file type */
+	int		okalloc)	/* ok to allocate more space */
+{
+	xfs_buf_t	*agbp;		/* allocation group header buffer */
+	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */
+	xfs_agnumber_t	agno;		/* current ag number */
+	int		flags;		/* alloc buffer locking flags */
+	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */
+	xfs_extlen_t	longest = 0;	/* longest extent available */
+	xfs_mount_t	*mp;		/* mount point structure */
+	int		needspace;	/* file mode implies space allocated */
+	xfs_perag_t	*pag;		/* per allocation group data */
+	xfs_agnumber_t	pagno;		/* parent (starting) ag number */
+
+	/*
+	 * Files of these types need at least one block if length > 0
+	 * (and they won't fit in the inode, but that's hard to figure out).
+	 */
+	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
+	mp = tp->t_mountp;
+	agcount = mp->m_maxagi;
+	if (S_ISDIR(mode))
+		pagno = xfs_ialloc_next_ag(mp);
+	else {
+		pagno = XFS_INO_TO_AGNO(mp, parent);
+		if (pagno >= agcount)
+			pagno = 0;
+	}
+	ASSERT(pagno < agcount);
+	/*
+	 * Loop through allocation groups, looking for one with a little
+	 * free space in it.  Note we don't look for free inodes, exactly.
+	 * Instead, we include whether there is a need to allocate inodes
+	 * to mean that blocks must be allocated for them,
+	 * if none are currently free.
+	 */
+	agno = pagno;
+	flags = XFS_ALLOC_FLAG_TRYLOCK;
+	down_read(&mp->m_peraglock);
+	for (;;) {
+		pag = &mp->m_perag[agno];
+		if (!pag->pagi_init) {
+			if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
+				agbp = NULL;
+				goto nextag;
+			}
+		} else
+			agbp = NULL;
+
+		if (!pag->pagi_inodeok) {
+			xfs_ialloc_next_ag(mp);
+			goto unlock_nextag;
+		}
+
+		/*
+		 * Is there enough free space for the file plus a block
+		 * of inodes (if we need to allocate some)?
+		 */
+		ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);
+		if (ineed && !pag->pagf_init) {
+			if (agbp == NULL &&
+			    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
+				agbp = NULL;
+				goto nextag;
+			}
+			(void)xfs_alloc_pagf_init(mp, tp, agno, flags);
+		}
+		if (!ineed || pag->pagf_init) {
+			if (ineed && !(longest = pag->pagf_longest))
+				longest = pag->pagf_flcount > 0;
+			if (!ineed ||
+			    (pag->pagf_freeblks >= needspace + ineed &&
+			     longest >= ineed &&
+			     okalloc)) {
+				if (agbp == NULL &&
+				    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
+					agbp = NULL;
+					goto nextag;
+				}
+				up_read(&mp->m_peraglock);
+				return agbp;
+			}
+		}
+unlock_nextag:
+		if (agbp)
+			xfs_trans_brelse(tp, agbp);
+nextag:
+		/*
+		 * No point in iterating over the rest, if we're shutting
+		 * down.
+		 */
+		if (XFS_FORCED_SHUTDOWN(mp)) {
+			up_read(&mp->m_peraglock);
+			return (xfs_buf_t *)0;
+		}
+		agno++;
+		if (agno >= agcount)
+			agno = 0;
+		if (agno == pagno) {
+			if (flags == 0) {
+				up_read(&mp->m_peraglock);
+				return (xfs_buf_t *)0;
+			}
+			flags = 0;
+		}
+	}
+}
+
+/*
+ * Visible inode allocation functions.
+ */
+
+/*
+ * Allocate an inode on disk.
+ * Mode is used to tell whether the new inode will need space, and whether
+ * it is a directory.
+ *
+ * The arguments IO_agbp and alloc_done are defined to work within
+ * the constraint of one allocation per transaction.
+ * xfs_dialloc() is designed to be called twice if it has to do an
+ * allocation to make more free inodes.  On the first call,
+ * IO_agbp should be set to NULL. If an inode is available,
+ * i.e., xfs_dialloc() did not need to do an allocation, an inode
+ * number is returned.  In this case, IO_agbp would be set to the
+ * current ag_buf and alloc_done set to false.
+ * If an allocation needed to be done, xfs_dialloc would return
+ * the current ag_buf in IO_agbp and set alloc_done to true.
+ * The caller should then commit the current transaction, allocate a new
+ * transaction, and call xfs_dialloc() again, passing in the previous
+ * value of IO_agbp.  IO_agbp should be held across the transactions.
+ * Since the agbp is locked across the two calls, the second call is
+ * guaranteed to have a free inode available.
+ *
+ * Once we successfully pick an inode its number is returned and the
+ * on-disk data structures are updated.  The inode itself is not read
+ * in, since doing so would break ordering constraints with xfs_reclaim.
+ */
+int
+xfs_dialloc(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_ino_t	parent,		/* parent inode (directory) */
+	mode_t		mode,		/* mode bits for new inode */
+	int		okalloc,	/* ok to allocate more space */
+	xfs_buf_t	**IO_agbp,	/* in/out ag header's buffer */
+	boolean_t	*alloc_done,	/* true if we needed to replenish
+					   inode freelist */
+	xfs_ino_t	*inop)		/* inode number allocated */
+{
+	xfs_agnumber_t	agcount;	/* number of allocation groups */
+	xfs_buf_t	*agbp;		/* allocation group header's buffer */
+	xfs_agnumber_t	agno;		/* allocation group number */
+	xfs_agi_t	*agi;		/* allocation group header structure */
+	xfs_btree_cur_t	*cur;		/* inode allocation btree cursor */
+	int		error;		/* error return value */
+	int		i;		/* result code */
+	int		ialloced;	/* inode allocation status */
+	int		noroom = 0;	/* no space for inode blk allocation */
+	xfs_ino_t	ino;		/* fs-relative inode to be returned */
+	/* REFERENCED */
+	int		j;		/* result code */
+	xfs_mount_t	*mp;		/* file system mount structure */
+	int		offset;		/* index of inode in chunk */
+	xfs_agino_t	pagino;		/* parent's a.g. relative inode # */
+	xfs_agnumber_t	pagno;		/* parent's allocation group number */
+	xfs_inobt_rec_t	rec;		/* inode allocation record */
+	xfs_agnumber_t	tagno;		/* testing allocation group number */
+	xfs_btree_cur_t	*tcur;		/* temp cursor */
+	xfs_inobt_rec_t	trec;		/* temp inode allocation record */
+
+
+	if (*IO_agbp == NULL) {
+		/*
+		 * We do not have an agbp, so select an initial allocation
+		 * group for inode allocation.
+		 */
+		agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
+		/*
+		 * Couldn't find an allocation group satisfying the
+		 * criteria, give up.
+		 */
+		if (!agbp) {
+			*inop = NULLFSINO;
+			return 0;
+		}
+		agi = XFS_BUF_TO_AGI(agbp);
+		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+	} else {
+		/*
+		 * Continue where we left off before.  In this case, we
+		 * know that the allocation group has free inodes.
+		 */
+		agbp = *IO_agbp;
+		agi = XFS_BUF_TO_AGI(agbp);
+		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+		ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
+	}
+	mp = tp->t_mountp;
+	agcount = mp->m_sb.sb_agcount;
+	agno = INT_GET(agi->agi_seqno, ARCH_CONVERT);
+	tagno = agno;
+	pagno = XFS_INO_TO_AGNO(mp, parent);
+	pagino = XFS_INO_TO_AGINO(mp, parent);
+
+	/*
+	 * If we have already hit the ceiling of inode blocks then clear
+	 * okalloc so we scan all available agi structures for a free
+	 * inode.
+	 */
+
+	if (mp->m_maxicount &&
+	    mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
+		noroom = 1;
+		okalloc = 0;
+	}
+
+	/*
+	 * Loop until we find an allocation group that either has free inodes
+	 * or in which we can allocate some inodes.  Iterate through the
+	 * allocation groups upward, wrapping at the end.
+	 */
+	*alloc_done = B_FALSE;
+	while (!agi->agi_freecount) {
+		/*
+		 * Don't do anything if we're not supposed to allocate
+		 * any blocks, just go on to the next ag.
+		 */
+		if (okalloc) {
+			/*
+			 * Try to allocate some new inodes in the allocation
+			 * group.
+			 */
+			if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
+				xfs_trans_brelse(tp, agbp);
+				if (error == ENOSPC) {
+					*inop = NULLFSINO;
+					return 0;
+				} else
+					return error;
+			}
+			if (ialloced) {
+				/*
+				 * We successfully allocated some inodes, return
+				 * the current context to the caller so that it
+				 * can commit the current transaction and call
+				 * us again where we left off.
+				 */
+				ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
+				*alloc_done = B_TRUE;
+				*IO_agbp = agbp;
+				*inop = NULLFSINO;
+				return 0;
+			}
+		}
+		/*
+		 * If it failed, give up on this ag.
+		 */
+		xfs_trans_brelse(tp, agbp);
+		/*
+		 * Go on to the next ag: get its ag header.
+		 */
+nextag:
+		if (++tagno == agcount)
+			tagno = 0;
+		if (tagno == agno) {
+			*inop = NULLFSINO;
+			return noroom ? ENOSPC : 0;
+		}
+		down_read(&mp->m_peraglock);
+		if (mp->m_perag[tagno].pagi_inodeok == 0) {
+			up_read(&mp->m_peraglock);
+			goto nextag;
+		}
+		error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
+		up_read(&mp->m_peraglock);
+		if (error)
+			goto nextag;
+		agi = XFS_BUF_TO_AGI(agbp);
+		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+	}
+	/*
+	 * Here with an allocation group that has a free inode.
+	 * Reset agno since we may have chosen a new ag in the
+	 * loop above.
+	 */
+	agno = tagno;
+	*IO_agbp = NULL;
+	cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+				    XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
+	/*
+	 * If pagino is 0 (this is the root inode allocation) use newino.
+	 * This must work because we've just allocated some.
+	 */
+	if (!pagino)
+		pagino = INT_GET(agi->agi_newino, ARCH_CONVERT);
+#ifdef DEBUG
+	if (cur->bc_nlevels == 1) {
+		int	freecount = 0;
+
+		if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		do {
+			if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
+					&rec.ir_freecount, &rec.ir_free, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			freecount += rec.ir_freecount;
+			if ((error = xfs_inobt_increment(cur, 0, &i)))
+				goto error0;
+		} while (i == 1);
+
+		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		       XFS_FORCED_SHUTDOWN(mp));
+	}
+#endif
+	/*
+	 * If in the same a.g. as the parent, try to get near the parent.
+	 */
+	if (pagno == agno) {
+		if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i)))
+			goto error0;
+		if (i != 0 &&
+		    (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
+			    &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
+		    j == 1 &&
+		    rec.ir_freecount > 0) {
+			/*
+			 * Found a free inode in the same chunk
+			 * as parent, done.
+			 */
+		}
+		/*
+		 * In the same a.g. as parent, but parent's chunk is full.
+		 */
+		else {
+			int	doneleft;	/* done, to the left */
+			int	doneright;	/* done, to the right */
+
+			if (error)
+				goto error0;
+			ASSERT(i == 1);
+			ASSERT(j == 1);
+			/*
+			 * Duplicate the cursor, search left & right
+			 * simultaneously.
+			 */
+			if ((error = xfs_btree_dup_cursor(cur, &tcur)))
+				goto error0;
+			/*
+			 * Search left with tcur, back up 1 record.
+			 */
+			if ((error = xfs_inobt_decrement(tcur, 0, &i)))
+				goto error1;
+			doneleft = !i;
+			if (!doneleft) {
+				if ((error = xfs_inobt_get_rec(tcur,
+						&trec.ir_startino,
+						&trec.ir_freecount,
+						&trec.ir_free, &i)))
+					goto error1;
+				XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
+			}
+			/*
+			 * Search right with cur, go forward 1 record.
+			 */
+			if ((error = xfs_inobt_increment(cur, 0, &i)))
+				goto error1;
+			doneright = !i;
+			if (!doneright) {
+				if ((error = xfs_inobt_get_rec(cur,
+						&rec.ir_startino,
+						&rec.ir_freecount,
+						&rec.ir_free, &i)))
+					goto error1;
+				XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
+			}
+			/*
+			 * Loop until we find the closest inode chunk
+			 * with a free one.
+			 */
+			while (!doneleft || !doneright) {
+				int	useleft;  /* using left inode
+						     chunk this time */
+
+				/*
+				 * Figure out which block is closer,
+				 * if both are valid.
+				 */
+				if (!doneleft && !doneright)
+					useleft =
+						pagino -
+						(trec.ir_startino +
+						 XFS_INODES_PER_CHUNK - 1) <
+						 rec.ir_startino - pagino;
+				else
+					useleft = !doneleft;
+				/*
+				 * If checking the left, does it have
+				 * free inodes?
+				 */
+				if (useleft && trec.ir_freecount) {
+					/*
+					 * Yes, set it up as the chunk to use.
+					 */
+					rec = trec;
+					xfs_btree_del_cursor(cur,
+						XFS_BTREE_NOERROR);
+					cur = tcur;
+					break;
+				}
+				/*
+				 * If checking the right, does it have
+				 * free inodes?
+				 */
+				if (!useleft && rec.ir_freecount) {
+					/*
+					 * Yes, it's already set up.
+					 */
+					xfs_btree_del_cursor(tcur,
+						XFS_BTREE_NOERROR);
+					break;
+				}
+				/*
+				 * If used the left, get another one
+				 * further left.
+				 */
+				if (useleft) {
+					if ((error = xfs_inobt_decrement(tcur, 0,
+							&i)))
+						goto error1;
+					doneleft = !i;
+					if (!doneleft) {
+						if ((error = xfs_inobt_get_rec(
+							    tcur,
+							    &trec.ir_startino,
+							    &trec.ir_freecount,
+							    &trec.ir_free, &i)))
+							goto error1;
+						XFS_WANT_CORRUPTED_GOTO(i == 1,
+							error1);
+					}
+				}
+				/*
+				 * If used the right, get another one
+				 * further right.
+				 */
+				else {
+					if ((error = xfs_inobt_increment(cur, 0,
+							&i)))
+						goto error1;
+					doneright = !i;
+					if (!doneright) {
+						if ((error = xfs_inobt_get_rec(
+							    cur,
+							    &rec.ir_startino,
+							    &rec.ir_freecount,
+							    &rec.ir_free, &i)))
+							goto error1;
+						XFS_WANT_CORRUPTED_GOTO(i == 1,
+							error1);
+					}
+				}
+			}
+			ASSERT(!doneleft || !doneright);
+		}
+	}
+	/*
+	 * In a different a.g. from the parent.
+	 * See if the most recently allocated block has any free.
+	 */
+	else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) {
+		if ((error = xfs_inobt_lookup_eq(cur,
+				INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i)))
+			goto error0;
+		if (i == 1 &&
+		    (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
+			    &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
+		    j == 1 &&
+		    rec.ir_freecount > 0) {
+			/*
+			 * The last chunk allocated in the group still has
+			 * a free inode.
+			 */
+		}
+		/*
+		 * None left in the last group, search the whole a.g.
+		 */
+		else {
+			if (error)
+				goto error0;
+			if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
+				goto error0;
+			ASSERT(i == 1);
+			for (;;) {
+				if ((error = xfs_inobt_get_rec(cur,
+						&rec.ir_startino,
+						&rec.ir_freecount, &rec.ir_free,
+						&i)))
+					goto error0;
+				XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+				if (rec.ir_freecount > 0)
+					break;
+				if ((error = xfs_inobt_increment(cur, 0, &i)))
+					goto error0;
+				XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			}
+		}
+	}
+	offset = XFS_IALLOC_FIND_FREE(&rec.ir_free);
+	ASSERT(offset >= 0);
+	ASSERT(offset < XFS_INODES_PER_CHUNK);
+	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
+				   XFS_INODES_PER_CHUNK) == 0);
+	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
+	XFS_INOBT_CLR_FREE(&rec, offset);
+	rec.ir_freecount--;
+	if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
+			rec.ir_free)))
+		goto error0;
+	INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1);
+	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
+	down_read(&mp->m_peraglock);
+	mp->m_perag[tagno].pagi_freecount--;
+	up_read(&mp->m_peraglock);
+#ifdef DEBUG
+	if (cur->bc_nlevels == 1) {
+		int	freecount = 0;
+
+		if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
+			goto error0;
+		do {
+			if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
+					&rec.ir_freecount, &rec.ir_free, &i)))
+				goto error0;
+			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+			freecount += rec.ir_freecount;
+			if ((error = xfs_inobt_increment(cur, 0, &i)))
+				goto error0;
+		} while (i == 1);
+		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		       XFS_FORCED_SHUTDOWN(mp));
+	}
+#endif
+	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
+	*inop = ino;
+	return 0;
+error1:
+	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+error0:
+	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Free disk inode.  Carefully avoids touching the incore inode, all
+ * manipulations incore are the caller's responsibility.
+ * The on-disk inode is not changed by this operation, only the
+ * btree (free inode mask) is changed.
+ */
+int
+xfs_difree(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_ino_t	inode,		/* inode to be freed */
+	xfs_bmap_free_t	*flist,		/* extents to free */
+	int		*delete,	/* set if inode cluster was deleted */
+	xfs_ino_t	*first_ino)	/* first inode in deleted cluster */
+{
+	/* REFERENCED */
+	xfs_agblock_t	agbno;	/* block number containing inode */
+	xfs_buf_t	*agbp;	/* buffer containing allocation group header */
+	xfs_agino_t	agino;	/* inode number relative to allocation group */
+	xfs_agnumber_t	agno;	/* allocation group number */
+	xfs_agi_t	*agi;	/* allocation group header */
+	xfs_btree_cur_t	*cur;	/* inode btree cursor */
+	int		error;	/* error return value */
+	int		i;	/* result code */
+	int		ilen;	/* inodes in an inode cluster */
+	xfs_mount_t	*mp;	/* mount structure for filesystem */
+	int		off;	/* offset of inode in inode chunk */
+	xfs_inobt_rec_t	rec;	/* btree record */
+
+	mp = tp->t_mountp;
+
+	/*
+	 * Break up inode number into its components.
+	 */
+	agno = XFS_INO_TO_AGNO(mp, inode);
+	if (agno >= mp->m_sb.sb_agcount)  {
+		cmn_err(CE_WARN,
+			"xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s.  Returning EINVAL.",
+			agno, mp->m_sb.sb_agcount, mp->m_fsname);
+		ASSERT(0);
+		return XFS_ERROR(EINVAL);
+	}
+	agino = XFS_INO_TO_AGINO(mp, inode);
+	if (inode != XFS_AGINO_TO_INO(mp, agno, agino))  {
+		cmn_err(CE_WARN,
+			"xfs_difree: inode != XFS_AGINO_TO_INO() (%d != %d) on %s.  Returning EINVAL.",
+			inode, XFS_AGINO_TO_INO(mp, agno, agino), mp->m_fsname);
+		ASSERT(0);
+		return XFS_ERROR(EINVAL);
+	}
+	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+	if (agbno >= mp->m_sb.sb_agblocks)  {
+		cmn_err(CE_WARN,
+			"xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s.  Returning EINVAL.",
+			agbno, mp->m_sb.sb_agblocks, mp->m_fsname);
+		ASSERT(0);
+		return XFS_ERROR(EINVAL);
+	}
+	/*
+	 * Get the allocation group header.
+	 */
+	down_read(&mp->m_peraglock);
+	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+	up_read(&mp->m_peraglock);
+	if (error) {
+		cmn_err(CE_WARN,
+			"xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s.  Returning error.",
+			error, mp->m_fsname);
+		return error;
+	}
+	agi = XFS_BUF_TO_AGI(agbp);
+	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+	ASSERT(agbno < INT_GET(agi->agi_length, ARCH_CONVERT));
+	/*
+	 * Initialize the cursor.
+	 */
+	cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO,
+		(xfs_inode_t *)0, 0);
+#ifdef DEBUG
+	if (cur->bc_nlevels == 1) {
+		int freecount = 0;
+
+		if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
+			goto error0;
+		do {
+			if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
+					&rec.ir_freecount, &rec.ir_free, &i)))
+				goto error0;
+			if (i) {
+				freecount += rec.ir_freecount;
+				if ((error = xfs_inobt_increment(cur, 0, &i)))
+					goto error0;
+			}
+		} while (i == 1);
+		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		       XFS_FORCED_SHUTDOWN(mp));
+	}
+#endif
+	/*
+	 * Look for the entry describing this inode.
+	 */
+	if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
+		cmn_err(CE_WARN,
+			"xfs_difree: xfs_inobt_lookup_le returned()  an error %d on %s.  Returning error.",
+			error, mp->m_fsname);
+		goto error0;
+	}
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount,
+			&rec.ir_free, &i))) {
+		cmn_err(CE_WARN,
+			"xfs_difree: xfs_inobt_get_rec()  returned an error %d on %s.  Returning error.",
+			error, mp->m_fsname);
+		goto error0;
+	}
+	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+	/*
+	 * Get the offset in the inode chunk.
+	 */
+	off = agino - rec.ir_startino;
+	ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
+	ASSERT(!XFS_INOBT_IS_FREE(&rec, off));
+	/*
+	 * Mark the inode free & increment the count.
+	 */
+	XFS_INOBT_SET_FREE(&rec, off);
+	rec.ir_freecount++;
+
+	/*
+	 * When an inode cluster is free, it becomes elgible for removal
+	 */
+	if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
+	    (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
+
+		*delete = 1;
+		*first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
+
+		/*
+		 * Remove the inode cluster from the AGI B+Tree, adjust the
+		 * AGI and Superblock inode counts, and mark the disk space
+		 * to be freed when the transaction is committed.
+		 */
+		ilen = XFS_IALLOC_INODES(mp);
+		INT_MOD(agi->agi_count, ARCH_CONVERT, -ilen);
+		INT_MOD(agi->agi_freecount, ARCH_CONVERT, -(ilen - 1));
+		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
+		down_read(&mp->m_peraglock);
+		mp->m_perag[agno].pagi_freecount -= ilen - 1;
+		up_read(&mp->m_peraglock);
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
+
+		if ((error = xfs_inobt_delete(cur, &i))) {
+			cmn_err(CE_WARN, "xfs_difree: xfs_inobt_delete returned an error %d on %s.\n",
+				error, mp->m_fsname);
+			goto error0;
+		}
+
+		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp,
+				agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)),
+				XFS_IALLOC_BLOCKS(mp), flist, mp);
+	} else {
+		*delete = 0;
+
+		if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) {
+			cmn_err(CE_WARN,
+				"xfs_difree: xfs_inobt_update()  returned an error %d on %s.  Returning error.",
+				error, mp->m_fsname);
+			goto error0;
+		}
+		/* 
+		 * Change the inode free counts and log the ag/sb changes.
+		 */
+		INT_MOD(agi->agi_freecount, ARCH_CONVERT, 1);
+		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
+		down_read(&mp->m_peraglock);
+		mp->m_perag[agno].pagi_freecount++;
+		up_read(&mp->m_peraglock);
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
+	}
+
+#ifdef DEBUG
+	if (cur->bc_nlevels == 1) {
+		int freecount = 0;
+
+		if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
+			goto error0;
+		do {
+			if ((error = xfs_inobt_get_rec(cur,
+					&rec.ir_startino,
+					&rec.ir_freecount,
+					&rec.ir_free, &i)))
+				goto error0;
+			if (i) {
+				freecount += rec.ir_freecount;
+				if ((error = xfs_inobt_increment(cur, 0, &i)))
+					goto error0;
+			}
+		} while (i == 1);
+		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		       XFS_FORCED_SHUTDOWN(mp));
+	}
+#endif
+	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	return 0;
+
+error0:
+	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Return the location of the inode in bno/off, for mapping it into a buffer.
+ */
+/*ARGSUSED*/
+int
+xfs_dilocate(
+	xfs_mount_t	*mp,	/* file system mount structure */
+	xfs_trans_t	*tp,	/* transaction pointer */
+	xfs_ino_t	ino,	/* inode to locate */
+	xfs_fsblock_t	*bno,	/* output: block containing inode */
+	int		*len,	/* output: num blocks in inode cluster */
+	int		*off,	/* output: index in block of inode */
+	uint		flags)	/* flags concerning inode lookup */
+{
+	xfs_agblock_t	agbno;	/* block number of inode in the alloc group */
+	xfs_buf_t	*agbp;	/* agi buffer */
+	xfs_agino_t	agino;	/* inode number within alloc group */
+	xfs_agnumber_t	agno;	/* allocation group number */
+	int		blks_per_cluster; /* num blocks per inode cluster */
+	xfs_agblock_t	chunk_agbno;	/* first block in inode chunk */
+	xfs_agino_t	chunk_agino;	/* first agino in inode chunk */
+	__int32_t	chunk_cnt;	/* count of free inodes in chunk */
+	xfs_inofree_t	chunk_free;	/* mask of free inodes in chunk */
+	xfs_agblock_t	cluster_agbno;	/* first block in inode cluster */
+	xfs_btree_cur_t	*cur;	/* inode btree cursor */
+	int		error;	/* error code */
+	int		i;	/* temp state */
+	int		offset;	/* index of inode in its buffer */
+	int		offset_agbno;	/* blks from chunk start to inode */
+
+	ASSERT(ino != NULLFSINO);
+	/*
+	 * Split up the inode number into its parts.
+	 */
+	agno = XFS_INO_TO_AGNO(mp, ino);
+	agino = XFS_INO_TO_AGINO(mp, ino);
+	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+	if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
+	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+#ifdef DEBUG
+		if (agno >= mp->m_sb.sb_agcount) {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+					"xfs_dilocate: agno (%d) >= "
+					"mp->m_sb.sb_agcount (%d)",
+					agno,  mp->m_sb.sb_agcount);
+		}
+		if (agbno >= mp->m_sb.sb_agblocks) {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+					"xfs_dilocate: agbno (0x%llx) >= "
+					"mp->m_sb.sb_agblocks (0x%lx)",
+					(unsigned long long) agbno,
+					(unsigned long) mp->m_sb.sb_agblocks);
+		}
+		if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+					"xfs_dilocate: ino (0x%llx) != "
+					"XFS_AGINO_TO_INO(mp, agno, agino) "
+					"(0x%llx)",
+					ino, XFS_AGINO_TO_INO(mp, agno, agino));
+		}
+#endif /* DEBUG */
+		return XFS_ERROR(EINVAL);
+	}
+	if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) ||
+	    !(flags & XFS_IMAP_LOOKUP)) {
+		offset = XFS_INO_TO_OFFSET(mp, ino);
+		ASSERT(offset < mp->m_sb.sb_inopblock);
+		*bno = XFS_AGB_TO_FSB(mp, agno, agbno);
+		*off = offset;
+		*len = 1;
+		return 0;
+	}
+	blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
+	if (*bno != NULLFSBLOCK) {
+		offset = XFS_INO_TO_OFFSET(mp, ino);
+		ASSERT(offset < mp->m_sb.sb_inopblock);
+		cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno);
+		*off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
+			offset;
+		*len = blks_per_cluster;
+		return 0;
+	}
+	if (mp->m_inoalign_mask) {
+		offset_agbno = agbno & mp->m_inoalign_mask;
+		chunk_agbno = agbno - offset_agbno;
+	} else {
+		down_read(&mp->m_peraglock);
+		error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+		up_read(&mp->m_peraglock);
+		if (error) {
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
+					"xfs_ialloc_read_agi() returned "
+					"error %d, agno %d",
+					error, agno);
+#endif /* DEBUG */
+			return error;
+		}
+		cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO,
+			(xfs_inode_t *)0, 0);
+		if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
+					"xfs_inobt_lookup_le() failed");
+#endif /* DEBUG */
+			goto error0;
+		}
+		if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt,
+				&chunk_free, &i))) {
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
+					"xfs_inobt_get_rec() failed");
+#endif /* DEBUG */
+			goto error0;
+		}
+		if (i == 0) {
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
+					"xfs_inobt_get_rec() failed");
+#endif /* DEBUG */
+			error = XFS_ERROR(EINVAL);
+		}
+		xfs_trans_brelse(tp, agbp);
+		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+		if (error)
+			return error;
+		chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino);
+		offset_agbno = agbno - chunk_agbno;
+	}
+	ASSERT(agbno >= chunk_agbno);
+	cluster_agbno = chunk_agbno +
+		((offset_agbno / blks_per_cluster) * blks_per_cluster);
+	offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
+		XFS_INO_TO_OFFSET(mp, ino);
+	*bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno);
+	*off = offset;
+	*len = blks_per_cluster;
+	return 0;
+error0:
+	xfs_trans_brelse(tp, agbp);
+	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Compute and fill in value of m_in_maxlevels.
+ */
+void
+xfs_ialloc_compute_maxlevels(
+	xfs_mount_t	*mp)		/* file system mount structure */
+{
+	int		level;
+	uint		maxblocks;
+	uint		maxleafents;
+	int		minleafrecs;
+	int		minnoderecs;
+
+	maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
+		XFS_INODES_PER_CHUNK_LOG;
+	minleafrecs = mp->m_alloc_mnr[0];
+	minnoderecs = mp->m_alloc_mnr[1];
+	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
+	for (level = 1; maxblocks > 1; level++)
+		maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
+	mp->m_in_maxlevels = level;
+}
+
+/*
+ * Log specified fields for the ag hdr (inode section)
+ */
+void
+xfs_ialloc_log_agi(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_buf_t	*bp,		/* allocation group header buffer */
+	int		fields)		/* bitmask of fields to log */
+{
+	int			first;		/* first byte number */
+	int			last;		/* last byte number */
+	static const short	offsets[] = {	/* field starting offsets */
+					/* keep in sync with bit definitions */
+		offsetof(xfs_agi_t, agi_magicnum),
+		offsetof(xfs_agi_t, agi_versionnum),
+		offsetof(xfs_agi_t, agi_seqno),
+		offsetof(xfs_agi_t, agi_length),
+		offsetof(xfs_agi_t, agi_count),
+		offsetof(xfs_agi_t, agi_root),
+		offsetof(xfs_agi_t, agi_level),
+		offsetof(xfs_agi_t, agi_freecount),
+		offsetof(xfs_agi_t, agi_newino),
+		offsetof(xfs_agi_t, agi_dirino),
+		offsetof(xfs_agi_t, agi_unlinked),
+		sizeof(xfs_agi_t)
+	};
+#ifdef DEBUG
+	xfs_agi_t		*agi;	/* allocation group header */
+
+	agi = XFS_BUF_TO_AGI(bp);
+	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+#endif
+	/*
+	 * Compute byte offsets for the first and last fields.
+	 */
+	xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last);
+	/*
+	 * Log the allocation group inode header buffer.
+	 */
+	xfs_trans_log_buf(tp, bp, first, last);
+}
+
+/*
+ * Read in the allocation group header (inode allocation section)
+ */
+int
+xfs_ialloc_read_agi(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	xfs_buf_t	**bpp)		/* allocation group hdr buf */
+{
+	xfs_agi_t	*agi;		/* allocation group header */
+	int		agi_ok;		/* agi is consistent */
+	xfs_buf_t	*bp;		/* allocation group hdr buf */
+	xfs_perag_t	*pag;		/* per allocation group data */
+	int		error;
+
+	ASSERT(agno != NULLAGNUMBER);
+	error = xfs_trans_read_buf(
+			mp, tp, mp->m_ddev_targp,
+			XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
+			XFS_FSS_TO_BB(mp, 1), 0, &bp);
+	if (error)
+		return error;
+	ASSERT(bp && !XFS_BUF_GETERROR(bp));
+
+	/*
+	 * Validate the magic number of the agi block.
+	 */
+	agi = XFS_BUF_TO_AGI(bp);
+	agi_ok =
+		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(
+			INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
+			XFS_RANDOM_IALLOC_READ_AGI))) {
+		XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW,
+				     mp, agi);
+		xfs_trans_brelse(tp, bp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	pag = &mp->m_perag[agno];
+	if (!pag->pagi_init) {
+		pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT);
+		pag->pagi_init = 1;
+	} else {
+		/*
+		 * It's possible for these to be out of sync if
+		 * we are in the middle of a forced shutdown.
+		 */
+		ASSERT(pag->pagi_freecount ==
+				INT_GET(agi->agi_freecount, ARCH_CONVERT)
+			|| XFS_FORCED_SHUTDOWN(mp));
+	}
+
+#ifdef DEBUG
+	{
+		int	i;
+
+		for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
+			ASSERT(agi->agi_unlinked[i]);
+	}
+#endif
+
+	XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF);
+	*bpp = bp;
+	return 0;
+}
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
new file mode 100644
index 0000000..db6d001
--- /dev/null
+++ b/fs/xfs/xfs_ialloc.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_IALLOC_H__
+#define	__XFS_IALLOC_H__
+
+struct xfs_buf;
+struct xfs_dinode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Allocation parameters for inode allocation.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_INODES)
+int xfs_ialloc_inodes(struct xfs_mount *mp);
+#define	XFS_IALLOC_INODES(mp)	xfs_ialloc_inodes(mp)
+#else
+#define	XFS_IALLOC_INODES(mp)	((mp)->m_ialloc_inos)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_BLOCKS)
+xfs_extlen_t xfs_ialloc_blocks(struct xfs_mount *mp);
+#define	XFS_IALLOC_BLOCKS(mp)	xfs_ialloc_blocks(mp)
+#else
+#define	XFS_IALLOC_BLOCKS(mp)	((mp)->m_ialloc_blks)
+#endif
+
+/*
+ * For small block file systems, move inodes in clusters of this size.
+ * When we don't have a lot of memory, however, we go a bit smaller
+ * to reduce the number of AGI and ialloc btree blocks we need to keep
+ * around for xfs_dilocate().  We choose which one to use in
+ * xfs_mount_int().
+ */
+#define	XFS_INODE_BIG_CLUSTER_SIZE	8192
+#define	XFS_INODE_SMALL_CLUSTER_SIZE	4096
+#define	XFS_INODE_CLUSTER_SIZE(mp)	(mp)->m_inode_cluster_size
+
+/*
+ * Make an inode pointer out of the buffer/offset.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MAKE_IPTR)
+struct xfs_dinode *xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o);
+#define	XFS_MAKE_IPTR(mp,b,o)		xfs_make_iptr(mp,b,o)
+#else
+#define	XFS_MAKE_IPTR(mp,b,o) \
+	((xfs_dinode_t *)(xfs_buf_offset(b, (o) << (mp)->m_sb.sb_inodelog)))
+#endif
+
+/*
+ * Find a free (set) bit in the inode bitmask.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_FIND_FREE)
+int xfs_ialloc_find_free(xfs_inofree_t *fp);
+#define	XFS_IALLOC_FIND_FREE(fp)	xfs_ialloc_find_free(fp)
+#else
+#define	XFS_IALLOC_FIND_FREE(fp)	xfs_lowbit64(*(fp))
+#endif
+
+
+#ifdef __KERNEL__
+
+/*
+ * Prototypes for visible xfs_ialloc.c routines.
+ */
+
+/*
+ * Allocate an inode on disk.
+ * Mode is used to tell whether the new inode will need space, and whether
+ * it is a directory.
+ *
+ * To work within the constraint of one allocation per transaction,
+ * xfs_dialloc() is designed to be called twice if it has to do an
+ * allocation to make more free inodes.  If an inode is
+ * available without an allocation, agbp would be set to the current
+ * agbp and alloc_done set to false.
+ * If an allocation needed to be done, agbp would be set to the
+ * inode header of the allocation group and alloc_done set to true.
+ * The caller should then commit the current transaction and allocate a new
+ * transaction.  xfs_dialloc() should then be called again with
+ * the agbp value returned from the previous call.
+ *
+ * Once we successfully pick an inode its number is returned and the
+ * on-disk data structures are updated.  The inode itself is not read
+ * in, since doing so would break ordering constraints with xfs_reclaim.
+ *
+ * *agbp should be set to NULL on the first call, *alloc_done set to FALSE.
+ */
+int					/* error */
+xfs_dialloc(
+	struct xfs_trans *tp,		/* transaction pointer */
+	xfs_ino_t	parent,		/* parent inode (directory) */
+	mode_t		mode,		/* mode bits for new inode */
+	int		okalloc,	/* ok to allocate more space */
+	struct xfs_buf	**agbp,		/* buf for a.g. inode header */
+	boolean_t	*alloc_done,	/* an allocation was done to replenish
+					   the free inodes */
+	xfs_ino_t	*inop);		/* inode number allocated */
+
+/*
+ * Free disk inode.  Carefully avoids touching the incore inode, all
+ * manipulations incore are the caller's responsibility.
+ * The on-disk inode is not changed by this operation, only the
+ * btree (free inode mask) is changed.
+ */
+int					/* error */
+xfs_difree(
+	struct xfs_trans *tp,		/* transaction pointer */
+	xfs_ino_t	inode,		/* inode to be freed */
+	struct xfs_bmap_free *flist,	/* extents to free */
+	int		*delete,	/* set if inode cluster was deleted */
+	xfs_ino_t	*first_ino);	/* first inode in deleted cluster */
+
+/*
+ * Return the location of the inode in bno/len/off,
+ * for mapping it into a buffer.
+ */
+int
+xfs_dilocate(
+	struct xfs_mount *mp,		/* file system mount structure */
+	struct xfs_trans *tp,		/* transaction pointer */
+	xfs_ino_t	ino,		/* inode to locate */
+	xfs_fsblock_t	*bno,		/* output: block containing inode */
+	int		*len,		/* output: num blocks in cluster*/
+	int		*off,		/* output: index in block of inode */
+	uint		flags);		/* flags for inode btree lookup */
+
+/*
+ * Compute and fill in value of m_in_maxlevels.
+ */
+void
+xfs_ialloc_compute_maxlevels(
+	struct xfs_mount *mp);		/* file system mount structure */
+
+/*
+ * Log specified fields for the ag hdr (inode section)
+ */
+void
+xfs_ialloc_log_agi(
+	struct xfs_trans *tp,		/* transaction pointer */
+	struct xfs_buf	*bp,		/* allocation group header buffer */
+	int		fields);	/* bitmask of fields to log */
+
+/*
+ * Read in the allocation group header (inode allocation section)
+ */
+int					/* error */
+xfs_ialloc_read_agi(
+	struct xfs_mount *mp,		/* file system mount structure */
+	struct xfs_trans *tp,		/* transaction pointer */
+	xfs_agnumber_t	agno,		/* allocation group number */
+	struct xfs_buf	**bpp);		/* allocation group hdr buf */
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
new file mode 100644
index 0000000..2d4daec
--- /dev/null
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -0,0 +1,2094 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+
+/*
+ * Inode allocation management for XFS.
+ */
+
+/*
+ * Prototypes for internal functions.
+ */
+
+STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int);
+STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
+STATIC int xfs_inobt_lshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *);
+STATIC int xfs_inobt_rshift(xfs_btree_cur_t *, int, int *);
+STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
+		xfs_inobt_key_t *, xfs_btree_cur_t **, int *);
+STATIC int xfs_inobt_updkey(xfs_btree_cur_t *, xfs_inobt_key_t *, int);
+
+/*
+ * Internal functions.
+ */
+
+/*
+ * Single level of the xfs_inobt_delete record deletion routine.
+ * Delete record pointed to by cur/level.
+ * Remove the record from its block then rebalance the tree.
+ * Return 0 for error, 1 for done, 2 to go on to the next level.
+ */
+STATIC int				/* error */
+xfs_inobt_delrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level removing record from */
+	int			*stat)	/* fail/done/go-on */
+{
+	xfs_buf_t		*agbp;	/* buffer for a.g. inode header */
+	xfs_mount_t		*mp;	/* mount structure */
+	xfs_agi_t		*agi;	/* allocation group inode header */
+	xfs_inobt_block_t	*block;	/* btree block record/key lives in */
+	xfs_agblock_t		bno;	/* btree block number */
+	xfs_buf_t		*bp;	/* buffer for block */
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_inobt_key_t		key;	/* kp points here if block is level 0 */
+	xfs_inobt_key_t		*kp = NULL;	/* pointer to btree keys */
+	xfs_agblock_t		lbno;	/* left block's block number */
+	xfs_buf_t		*lbp;	/* left block's buffer pointer */
+	xfs_inobt_block_t	*left;	/* left btree block */
+	xfs_inobt_key_t		*lkp;	/* left block key pointer */
+	xfs_inobt_ptr_t		*lpp;	/* left block address pointer */
+	int			lrecs = 0;	/* number of records in left block */
+	xfs_inobt_rec_t		*lrp;	/* left block record pointer */
+	xfs_inobt_ptr_t		*pp = NULL;	/* pointer to btree addresses */
+	int			ptr;	/* index in btree block for this rec */
+	xfs_agblock_t		rbno;	/* right block's block number */
+	xfs_buf_t		*rbp;	/* right block's buffer pointer */
+	xfs_inobt_block_t	*right;	/* right btree block */
+	xfs_inobt_key_t		*rkp;	/* right block key pointer */
+	xfs_inobt_rec_t		*rp;	/* pointer to btree records */
+	xfs_inobt_ptr_t		*rpp;	/* right block address pointer */
+	int			rrecs = 0;	/* number of records in right block */
+	int			numrecs;
+	xfs_inobt_rec_t		*rrp;	/* right block record pointer */
+	xfs_btree_cur_t		*tcur;	/* temporary btree cursor */
+
+	mp = cur->bc_mp;
+
+	/*
+	 * Get the index of the entry being deleted, check for nothing there.
+	 */
+	ptr = cur->bc_ptrs[level];
+	if (ptr == 0) {
+		*stat = 0;
+		return 0;
+	}
+
+	/*
+	 * Get the buffer & block containing the record or key/ptr.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+#endif
+	/*
+	 * Fail if we're off the end of the block.
+	 */
+
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	if (ptr > numrecs) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * It's a nonleaf.  Excise the key and ptr being deleted, by
+	 * sliding the entries past them down one.
+	 * Log the changed areas of the block.
+	 */
+	if (level > 0) {
+		kp = XFS_INOBT_KEY_ADDR(block, 1, cur);
+		pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = ptr; i < numrecs; i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		if (ptr < numrecs) {
+			memmove(&kp[ptr - 1], &kp[ptr],
+				(numrecs - ptr) * sizeof(*kp));
+			memmove(&pp[ptr - 1], &pp[ptr],
+				(numrecs - ptr) * sizeof(*kp));
+			xfs_inobt_log_keys(cur, bp, ptr, numrecs - 1);
+			xfs_inobt_log_ptrs(cur, bp, ptr, numrecs - 1);
+		}
+	}
+	/*
+	 * It's a leaf.  Excise the record being deleted, by sliding the
+	 * entries past it down one.  Log the changed areas of the block.
+	 */
+	else {
+		rp = XFS_INOBT_REC_ADDR(block, 1, cur);
+		if (ptr < numrecs) {
+			memmove(&rp[ptr - 1], &rp[ptr],
+				(numrecs - ptr) * sizeof(*rp));
+			xfs_inobt_log_recs(cur, bp, ptr, numrecs - 1);
+		}
+		/*
+		 * If it's the first record in the block, we'll need a key
+		 * structure to pass up to the next level (updkey).
+		 */
+		if (ptr == 1) {
+			key.ir_startino = rp->ir_startino;
+			kp = &key;
+		}
+	}
+	/*
+	 * Decrement and log the number of entries in the block.
+	 */
+	numrecs--;
+	INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+	xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
+	/*
+	 * Is this the root level?  If so, we're almost done.
+	 */
+	if (level == cur->bc_nlevels - 1) {
+		/*
+		 * If this is the root level,
+		 * and there's only one entry left,
+		 * and it's NOT the leaf level,
+		 * then we can get rid of this level.
+		 */
+		if (numrecs == 1 && level > 0) {
+			agbp = cur->bc_private.i.agbp;
+			agi = XFS_BUF_TO_AGI(agbp);
+			/*
+			 * pp is still set to the first pointer in the block.
+			 * Make it the new root of the btree.
+			 */
+			bno = INT_GET(agi->agi_root, ARCH_CONVERT);
+			agi->agi_root = *pp;
+			INT_MOD(agi->agi_level, ARCH_CONVERT, -1);
+			/*
+			 * Free the block.
+			 */
+			if ((error = xfs_free_extent(cur->bc_tp,
+				XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1)))
+				return error;
+			xfs_trans_binval(cur->bc_tp, bp);
+			xfs_ialloc_log_agi(cur->bc_tp, agbp,
+				XFS_AGI_ROOT | XFS_AGI_LEVEL);
+			/*
+			 * Update the cursor so there's one fewer level.
+			 */
+			cur->bc_bufs[level] = NULL;
+			cur->bc_nlevels--;
+		} else if (level > 0 &&
+			   (error = xfs_inobt_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * If we deleted the leftmost entry in the block, update the
+	 * key values above us in the tree.
+	 */
+	if (ptr == 1 && (error = xfs_inobt_updkey(cur, kp, level + 1)))
+		return error;
+	/*
+	 * If the number of records remaining in the block is at least
+	 * the minimum, we're done.
+	 */
+	if (numrecs >= XFS_INOBT_BLOCK_MINRECS(level, cur)) {
+		if (level > 0 &&
+		    (error = xfs_inobt_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * Otherwise, we have to move some records around to keep the
+	 * tree balanced.  Look at the left and right sibling blocks to
+	 * see if we can re-balance by moving only one record.
+	 */
+	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	bno = NULLAGBLOCK;
+	ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
+	/*
+	 * Duplicate the cursor so our btree manipulations here won't
+	 * disrupt the next level up.
+	 */
+	if ((error = xfs_btree_dup_cursor(cur, &tcur)))
+		return error;
+	/*
+	 * If there's a right sibling, see if it's ok to shift an entry
+	 * out of it.
+	 */
+	if (rbno != NULLAGBLOCK) {
+		/*
+		 * Move the temp cursor to the last entry in the next block.
+		 * Actually any entry but the first would suffice.
+		 */
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		if ((error = xfs_inobt_increment(tcur, level, &i)))
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		i = xfs_btree_lastrec(tcur, level);
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		/*
+		 * Grab a pointer to the block.
+		 */
+		rbp = tcur->bc_bufs[level];
+		right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+			goto error0;
+#endif
+		/*
+		 * Grab the current block number, for future use.
+		 */
+		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		/*
+		 * If right block is full enough so that removing one entry
+		 * won't make it too empty, and left-shifting an entry out
+		 * of right to us works, we're done.
+		 */
+		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		     XFS_INOBT_BLOCK_MINRECS(level, cur)) {
+			if ((error = xfs_inobt_lshift(tcur, level, &i)))
+				goto error0;
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_INOBT_BLOCK_MINRECS(level, cur));
+				xfs_btree_del_cursor(tcur,
+						     XFS_BTREE_NOERROR);
+				if (level > 0 &&
+				    (error = xfs_inobt_decrement(cur, level,
+						&i)))
+					return error;
+				*stat = 1;
+				return 0;
+			}
+		}
+		/*
+		 * Otherwise, grab the number of records in right for
+		 * future reference, and fix up the temp cursor to point
+		 * to our block again (last record).
+		 */
+		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		if (lbno != NULLAGBLOCK) {
+			xfs_btree_firstrec(tcur, level);
+			if ((error = xfs_inobt_decrement(tcur, level, &i)))
+				goto error0;
+		}
+	}
+	/*
+	 * If there's a left sibling, see if it's ok to shift an entry
+	 * out of it.
+	 */
+	if (lbno != NULLAGBLOCK) {
+		/*
+		 * Move the temp cursor to the first entry in the
+		 * previous block.
+		 */
+		xfs_btree_firstrec(tcur, level);
+		if ((error = xfs_inobt_decrement(tcur, level, &i)))
+			goto error0;
+		xfs_btree_firstrec(tcur, level);
+		/*
+		 * Grab a pointer to the block.
+		 */
+		lbp = tcur->bc_bufs[level];
+		left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+			goto error0;
+#endif
+		/*
+		 * Grab the current block number, for future use.
+		 */
+		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		/*
+		 * If left block is full enough so that removing one entry
+		 * won't make it too empty, and right-shifting an entry out
+		 * of left to us works, we're done.
+		 */
+		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		     XFS_INOBT_BLOCK_MINRECS(level, cur)) {
+			if ((error = xfs_inobt_rshift(tcur, level, &i)))
+				goto error0;
+			if (i) {
+				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				       XFS_INOBT_BLOCK_MINRECS(level, cur));
+				xfs_btree_del_cursor(tcur,
+						     XFS_BTREE_NOERROR);
+				if (level == 0)
+					cur->bc_ptrs[0]++;
+				*stat = 1;
+				return 0;
+			}
+		}
+		/*
+		 * Otherwise, grab the number of records in right for
+		 * future reference.
+		 */
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	/*
+	 * Delete the temp cursor, we're done with it.
+	 */
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	/*
+	 * If here, we need to do a join to keep the tree balanced.
+	 */
+	ASSERT(bno != NULLAGBLOCK);
+	/*
+	 * See if we can join with the left neighbor block.
+	 */
+	if (lbno != NULLAGBLOCK &&
+	    lrecs + numrecs <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * Set "right" to be the starting block,
+		 * "left" to be the left neighbor.
+		 */
+		rbno = bno;
+		right = block;
+		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rbp = bp;
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.i.agno, lbno, 0, &lbp,
+				XFS_INO_BTREE_REF)))
+			return error;
+		left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+			return error;
+	}
+	/*
+	 * If that won't work, see if we can join with the right neighbor block.
+	 */
+	else if (rbno != NULLAGBLOCK &&
+		 rrecs + numrecs <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * Set "left" to be the starting block,
+		 * "right" to be the right neighbor.
+		 */
+		lbno = bno;
+		left = block;
+		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lbp = bp;
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.i.agno, rbno, 0, &rbp,
+				XFS_INO_BTREE_REF)))
+			return error;
+		right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+			return error;
+	}
+	/*
+	 * Otherwise, we can't fix the imbalance.
+	 * Just return.  This is probably a logic error, but it's not fatal.
+	 */
+	else {
+		if (level > 0 && (error = xfs_inobt_decrement(cur, level, &i)))
+			return error;
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * We're now going to join "left" and "right" by moving all the stuff
+	 * in "right" to "left" and deleting "right".
+	 */
+	if (level > 0) {
+		/*
+		 * It's a non-leaf.  Move keys and pointers.
+		 */
+		lkp = XFS_INOBT_KEY_ADDR(left, lrecs + 1, cur);
+		lpp = XFS_INOBT_PTR_ADDR(left, lrecs + 1, cur);
+		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
+		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < rrecs; i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memcpy(lkp, rkp, rrecs * sizeof(*lkp));
+		memcpy(lpp, rpp, rrecs * sizeof(*lpp));
+		xfs_inobt_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
+		xfs_inobt_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
+	} else {
+		/*
+		 * It's a leaf.  Move records.
+		 */
+		lrp = XFS_INOBT_REC_ADDR(left, lrecs + 1, cur);
+		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
+		memcpy(lrp, rrp, rrecs * sizeof(*lrp));
+		xfs_inobt_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
+	}
+	/*
+	 * If we joined with the left neighbor, set the buffer in the
+	 * cursor to the left block, and fix up the index.
+	 */
+	if (bp != lbp) {
+		xfs_btree_setbuf(cur, level, lbp);
+		cur->bc_ptrs[level] += lrecs;
+	}
+	/*
+	 * If we joined with the right neighbor and there's a level above
+	 * us, increment the cursor at that level.
+	 */
+	else if (level + 1 < cur->bc_nlevels &&
+		 (error = xfs_alloc_increment(cur, level + 1, &i)))
+		return error;
+	/*
+	 * Fix up the number of records in the surviving block.
+	 */
+	lrecs += rrecs;
+	INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs);
+	/*
+	 * Fix up the right block pointer in the surviving block, and log it.
+	 */
+	left->bb_rightsib = right->bb_rightsib;
+	xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+	/*
+	 * If there is a right sibling now, make it point to the
+	 * remaining block.
+	 */
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		xfs_inobt_block_t	*rrblock;
+		xfs_buf_t		*rrbp;
+
+		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+				cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+				&rrbp, XFS_INO_BTREE_REF)))
+			return error;
+		rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
+		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
+			return error;
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
+	}
+	/*
+	 * Free the deleting block.
+	 */
+	if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp,
+				     cur->bc_private.i.agno, rbno), 1)))
+		return error;
+	xfs_trans_binval(cur->bc_tp, rbp);
+	/*
+	 * Readjust the ptr at this level if it's not a leaf, since it's
+	 * still pointing at the deletion point, which makes the cursor
+	 * inconsistent.  If this makes the ptr 0, the caller fixes it up.
+	 * We can't use decrement because it would change the next level up.
+	 */
+	if (level > 0)
+		cur->bc_ptrs[level]--;
+	/*
+	 * Return value means the next level up has something to do.
+	 */
+	*stat = 2;
+	return 0;
+
+error0:
+	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+	return error;
+}
+
+/*
+ * Insert one record/level.  Return information to the caller
+ * allowing the next level up to proceed if necessary.
+ */
+STATIC int				/* error */
+xfs_inobt_insrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to insert record at */
+	xfs_agblock_t		*bnop,	/* i/o: block number inserted */
+	xfs_inobt_rec_t		*recp,	/* i/o: record data inserted */
+	xfs_btree_cur_t		**curp,	/* output: new cursor replacing cur */
+	int			*stat)	/* success/failure */
+{
+	xfs_inobt_block_t	*block;	/* btree block record/key lives in */
+	xfs_buf_t		*bp;	/* buffer for block */
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_inobt_key_t		key;	/* key value being inserted */
+	xfs_inobt_key_t		*kp=NULL;	/* pointer to btree keys */
+	xfs_agblock_t		nbno;	/* block number of allocated block */
+	xfs_btree_cur_t		*ncur;	/* new cursor to be used at next lvl */
+	xfs_inobt_key_t		nkey;	/* new key value, from split */
+	xfs_inobt_rec_t		nrec;	/* new record value, for caller */
+	int			numrecs;
+	int			optr;	/* old ptr value */
+	xfs_inobt_ptr_t		*pp;	/* pointer to btree addresses */
+	int			ptr;	/* index in btree block for this rec */
+	xfs_inobt_rec_t		*rp=NULL;	/* pointer to btree records */
+
+	/*
+	 * If we made it to the root level, allocate a new root block
+	 * and we're done.
+	 */
+	if (level >= cur->bc_nlevels) {
+		error = xfs_inobt_newroot(cur, &i);
+		*bnop = NULLAGBLOCK;
+		*stat = i;
+		return error;
+	}
+	/*
+	 * Make a key out of the record data to be inserted, and save it.
+	 */
+	key.ir_startino = recp->ir_startino; /* INT_: direct copy */
+	optr = ptr = cur->bc_ptrs[level];
+	/*
+	 * If we're off the left edge, return failure.
+	 */
+	if (ptr == 0) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Get pointers to the btree buffer and block.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+	/*
+	 * Check that the new entry is being inserted in the right place.
+	 */
+	if (ptr <= numrecs) {
+		if (level == 0) {
+			rp = XFS_INOBT_REC_ADDR(block, ptr, cur);
+			xfs_btree_check_rec(cur->bc_btnum, recp, rp);
+		} else {
+			kp = XFS_INOBT_KEY_ADDR(block, ptr, cur);
+			xfs_btree_check_key(cur->bc_btnum, &key, kp);
+		}
+	}
+#endif
+	nbno = NULLAGBLOCK;
+	ncur = (xfs_btree_cur_t *)0;
+	/*
+	 * If the block is full, we can't insert the new entry until we
+	 * make the block un-full.
+	 */
+	if (numrecs == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+		/*
+		 * First, try shifting an entry to the right neighbor.
+		 */
+		if ((error = xfs_inobt_rshift(cur, level, &i)))
+			return error;
+		if (i) {
+			/* nothing */
+		}
+		/*
+		 * Next, try shifting an entry to the left neighbor.
+		 */
+		else {
+			if ((error = xfs_inobt_lshift(cur, level, &i)))
+				return error;
+			if (i) {
+				optr = ptr = cur->bc_ptrs[level];
+			} else {
+				/*
+				 * Next, try splitting the current block
+				 * in half. If this works we have to
+				 * re-set our variables because
+				 * we could be in a different block now.
+				 */
+				if ((error = xfs_inobt_split(cur, level, &nbno,
+						&nkey, &ncur, &i)))
+					return error;
+				if (i) {
+					bp = cur->bc_bufs[level];
+					block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+					if ((error = xfs_btree_check_sblock(cur,
+							block, level, bp)))
+						return error;
+#endif
+					ptr = cur->bc_ptrs[level];
+					nrec.ir_startino = nkey.ir_startino; /* INT_: direct copy */
+				} else {
+					/*
+					 * Otherwise the insert fails.
+					 */
+					*stat = 0;
+					return 0;
+				}
+			}
+		}
+	}
+	/*
+	 * At this point we know there's room for our new entry in the block
+	 * we're pointing at.
+	 */
+	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	if (level > 0) {
+		/*
+		 * It's a non-leaf entry.  Make a hole for the new data
+		 * in the key and ptr regions of the block.
+		 */
+		kp = XFS_INOBT_KEY_ADDR(block, 1, cur);
+		pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
+#ifdef DEBUG
+		for (i = numrecs; i >= ptr; i--) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memmove(&kp[ptr], &kp[ptr - 1],
+			(numrecs - ptr + 1) * sizeof(*kp));
+		memmove(&pp[ptr], &pp[ptr - 1],
+			(numrecs - ptr + 1) * sizeof(*pp));
+		/*
+		 * Now stuff the new data in, bump numrecs and log the new data.
+		 */
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
+			return error;
+#endif
+		kp[ptr - 1] = key; /* INT_: struct copy */
+		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
+		numrecs++;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		xfs_inobt_log_keys(cur, bp, ptr, numrecs);
+		xfs_inobt_log_ptrs(cur, bp, ptr, numrecs);
+	} else {
+		/*
+		 * It's a leaf entry.  Make a hole for the new record.
+		 */
+		rp = XFS_INOBT_REC_ADDR(block, 1, cur);
+		memmove(&rp[ptr], &rp[ptr - 1],
+			(numrecs - ptr + 1) * sizeof(*rp));
+		/*
+		 * Now stuff the new record in, bump numrecs
+		 * and log the new data.
+		 */
+		rp[ptr - 1] = *recp; /* INT_: struct copy */
+		numrecs++;
+		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		xfs_inobt_log_recs(cur, bp, ptr, numrecs);
+	}
+	/*
+	 * Log the new number of records in the btree header.
+	 */
+	xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
+#ifdef DEBUG
+	/*
+	 * Check that the key/record is in the right place, now.
+	 */
+	if (ptr < numrecs) {
+		if (level == 0)
+			xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
+				rp + ptr);
+		else
+			xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
+				kp + ptr);
+	}
+#endif
+	/*
+	 * If we inserted at the start of a block, update the parents' keys.
+	 */
+	if (optr == 1 && (error = xfs_inobt_updkey(cur, &key, level + 1)))
+		return error;
+	/*
+	 * Return the new block number, if any.
+	 * If there is one, give back a record value and a cursor too.
+	 */
+	*bnop = nbno;
+	if (nbno != NULLAGBLOCK) {
+		*recp = nrec; /* INT_: struct copy */
+		*curp = ncur;
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Log header fields from a btree block.
+ */
+STATIC void
+xfs_inobt_log_block(
+	xfs_trans_t		*tp,	/* transaction pointer */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			fields)	/* mask of fields: XFS_BB_... */
+{
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	static const short	offsets[] = {	/* table of offsets */
+		offsetof(xfs_inobt_block_t, bb_magic),
+		offsetof(xfs_inobt_block_t, bb_level),
+		offsetof(xfs_inobt_block_t, bb_numrecs),
+		offsetof(xfs_inobt_block_t, bb_leftsib),
+		offsetof(xfs_inobt_block_t, bb_rightsib),
+		sizeof(xfs_inobt_block_t)
+	};
+
+	xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
+	xfs_trans_log_buf(tp, bp, first, last);
+}
+
+/*
+ * Log keys from a btree block (nonleaf).
+ */
+STATIC void
+xfs_inobt_log_keys(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			kfirst,	/* index of first key to log */
+	int			klast)	/* index of last key to log */
+{
+	xfs_inobt_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	xfs_inobt_key_t		*kp;	/* key pointer in btree block */
+	int			last;	/* last byte offset logged */
+
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+	kp = XFS_INOBT_KEY_ADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Log block pointer fields from a btree block (nonleaf).
+ */
+STATIC void
+xfs_inobt_log_ptrs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			pfirst,	/* index of first pointer to log */
+	int			plast)	/* index of last pointer to log */
+{
+	xfs_inobt_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	xfs_inobt_ptr_t		*pp;	/* block-pointer pointer in btree blk */
+
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+	pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Log records from a btree block (leaf).
+ */
+STATIC void
+xfs_inobt_log_recs(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_buf_t		*bp,	/* buffer containing btree block */
+	int			rfirst,	/* index of first record to log */
+	int			rlast)	/* index of last record to log */
+{
+	xfs_inobt_block_t	*block;	/* btree block to log from */
+	int			first;	/* first byte offset logged */
+	int			last;	/* last byte offset logged */
+	xfs_inobt_rec_t		*rp;	/* record pointer for btree block */
+
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+	rp = XFS_INOBT_REC_ADDR(block, 1, cur);
+	first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
+	last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
+	xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+}
+
+/*
+ * Lookup the record.  The cursor is made to point to it, based on dir.
+ * Return 0 if can't find any such record, 1 for success.
+ */
+STATIC int				/* error */
+xfs_inobt_lookup(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_lookup_t		dir,	/* <=, ==, or >= */
+	int			*stat)	/* success/failure */
+{
+	xfs_agblock_t		agbno;	/* a.g. relative btree block number */
+	xfs_agnumber_t		agno;	/* allocation group number */
+	xfs_inobt_block_t	*block=NULL;	/* current btree block */
+	__int64_t		diff;	/* difference for the current key */
+	int			error;	/* error return value */
+	int			keyno=0;	/* current key number */
+	int			level;	/* level in the btree */
+	xfs_mount_t		*mp;	/* file system mount point */
+
+	/*
+	 * Get the allocation group header, and the root block number.
+	 */
+	mp = cur->bc_mp;
+	{
+		xfs_agi_t	*agi;	/* a.g. inode header */
+
+		agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+		agno = INT_GET(agi->agi_seqno, ARCH_CONVERT);
+		agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
+	}
+	/*
+	 * Iterate over each level in the btree, starting at the root.
+	 * For each level above the leaves, find the key we need, based
+	 * on the lookup record, then follow the corresponding block
+	 * pointer down to the next level.
+	 */
+	for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+		xfs_buf_t	*bp;	/* buffer pointer for btree block */
+		xfs_daddr_t	d;	/* disk address of btree block */
+
+		/*
+		 * Get the disk address we're looking for.
+		 */
+		d = XFS_AGB_TO_DADDR(mp, agno, agbno);
+		/*
+		 * If the old buffer at this level is for a different block,
+		 * throw it away, otherwise just use it.
+		 */
+		bp = cur->bc_bufs[level];
+		if (bp && XFS_BUF_ADDR(bp) != d)
+			bp = (xfs_buf_t *)0;
+		if (!bp) {
+			/*
+			 * Need to get a new buffer.  Read it, then
+			 * set it in the cursor, releasing the old one.
+			 */
+			if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
+					agno, agbno, 0, &bp, XFS_INO_BTREE_REF)))
+				return error;
+			xfs_btree_setbuf(cur, level, bp);
+			/*
+			 * Point to the btree block, now that we have the buffer
+			 */
+			block = XFS_BUF_TO_INOBT_BLOCK(bp);
+			if ((error = xfs_btree_check_sblock(cur, block, level,
+					bp)))
+				return error;
+		} else
+			block = XFS_BUF_TO_INOBT_BLOCK(bp);
+		/*
+		 * If we already had a key match at a higher level, we know
+		 * we need to use the first entry in this block.
+		 */
+		if (diff == 0)
+			keyno = 1;
+		/*
+		 * Otherwise we need to search this block.  Do a binary search.
+		 */
+		else {
+			int		high;	/* high entry number */
+			xfs_inobt_key_t	*kkbase=NULL;/* base of keys in block */
+			xfs_inobt_rec_t	*krbase=NULL;/* base of records in block */
+			int		low;	/* low entry number */
+
+			/*
+			 * Get a pointer to keys or records.
+			 */
+			if (level > 0)
+				kkbase = XFS_INOBT_KEY_ADDR(block, 1, cur);
+			else
+				krbase = XFS_INOBT_REC_ADDR(block, 1, cur);
+			/*
+			 * Set low and high entry numbers, 1-based.
+			 */
+			low = 1;
+			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+				/*
+				 * If the block is empty, the tree must
+				 * be an empty leaf.
+				 */
+				ASSERT(level == 0 && cur->bc_nlevels == 1);
+				cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
+				*stat = 0;
+				return 0;
+			}
+			/*
+			 * Binary search the block.
+			 */
+			while (low <= high) {
+				xfs_agino_t	startino;	/* key value */
+
+				/*
+				 * keyno is average of low and high.
+				 */
+				keyno = (low + high) >> 1;
+				/*
+				 * Get startino.
+				 */
+				if (level > 0) {
+					xfs_inobt_key_t	*kkp;
+
+					kkp = kkbase + keyno - 1;
+					startino = INT_GET(kkp->ir_startino, ARCH_CONVERT);
+				} else {
+					xfs_inobt_rec_t	*krp;
+
+					krp = krbase + keyno - 1;
+					startino = INT_GET(krp->ir_startino, ARCH_CONVERT);
+				}
+				/*
+				 * Compute difference to get next direction.
+				 */
+				diff = (__int64_t)
+					startino - cur->bc_rec.i.ir_startino;
+				/*
+				 * Less than, move right.
+				 */
+				if (diff < 0)
+					low = keyno + 1;
+				/*
+				 * Greater than, move left.
+				 */
+				else if (diff > 0)
+					high = keyno - 1;
+				/*
+				 * Equal, we're done.
+				 */
+				else
+					break;
+			}
+		}
+		/*
+		 * If there are more levels, set up for the next level
+		 * by getting the block number and filling in the cursor.
+		 */
+		if (level > 0) {
+			/*
+			 * If we moved left, need the previous key number,
+			 * unless there isn't one.
+			 */
+			if (diff > 0 && --keyno < 1)
+				keyno = 1;
+			agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT);
+#ifdef DEBUG
+			if ((error = xfs_btree_check_sptr(cur, agbno, level)))
+				return error;
+#endif
+			cur->bc_ptrs[level] = keyno;
+		}
+	}
+	/*
+	 * Done with the search.
+	 * See if we need to adjust the results.
+	 */
+	if (dir != XFS_LOOKUP_LE && diff < 0) {
+		keyno++;
+		/*
+		 * If ge search and we went off the end of the block, but it's
+		 * not the last block, we're in the wrong block.
+		 */
+		if (dir == XFS_LOOKUP_GE &&
+		    keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
+		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+			int	i;
+
+			cur->bc_ptrs[0] = keyno;
+			if ((error = xfs_inobt_increment(cur, 0, &i)))
+				return error;
+			ASSERT(i == 1);
+			*stat = 1;
+			return 0;
+		}
+	}
+	else if (dir == XFS_LOOKUP_LE && diff > 0)
+		keyno--;
+	cur->bc_ptrs[0] = keyno;
+	/*
+	 * Return if we succeeded or not.
+	 */
+	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		*stat = 0;
+	else
+		*stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
+	return 0;
+}
+
+/*
+ * Move 1 record left from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int				/* error */
+xfs_inobt_lshift(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to shift record on */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+#ifdef DEBUG
+	int			i;	/* loop index */
+#endif
+	xfs_inobt_key_t		key;	/* key value for leaf level upward */
+	xfs_buf_t		*lbp;	/* buffer for left neighbor block */
+	xfs_inobt_block_t	*left;	/* left neighbor btree block */
+	xfs_inobt_key_t		*lkp=NULL;	/* key pointer for left block */
+	xfs_inobt_ptr_t		*lpp;	/* address pointer for left block */
+	xfs_inobt_rec_t		*lrp=NULL;	/* record pointer for left block */
+	int			nrec;	/* new number of left block entries */
+	xfs_buf_t		*rbp;	/* buffer for right (current) block */
+	xfs_inobt_block_t	*right;	/* right (current) btree block */
+	xfs_inobt_key_t		*rkp=NULL;	/* key pointer for right block */
+	xfs_inobt_ptr_t		*rpp=NULL;	/* address pointer for right block */
+	xfs_inobt_rec_t		*rrp=NULL;	/* record pointer for right block */
+
+	/*
+	 * Set up variables for this block as "right".
+	 */
+	rbp = cur->bc_bufs[level];
+	right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+		return error;
+#endif
+	/*
+	 * If we've got no left sibling then we can't shift an entry left.
+	 */
+	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * If the cursor entry is the one that would be moved, don't
+	 * do it... it's too complicated.
+	 */
+	if (cur->bc_ptrs[level] <= 1) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Set up the left neighbor as "left".
+	 */
+	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+			cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp,
+			XFS_INO_BTREE_REF)))
+		return error;
+	left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+	/*
+	 * If it's full, it can't take another entry.
+	 */
+	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+		*stat = 0;
+		return 0;
+	}
+	nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	/*
+	 * If non-leaf, copy a key and a ptr to the left block.
+	 */
+	if (level > 0) {
+		lkp = XFS_INOBT_KEY_ADDR(left, nrec, cur);
+		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
+		*lkp = *rkp;
+		xfs_inobt_log_keys(cur, lbp, nrec, nrec);
+		lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur);
+		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level)))
+			return error;
+#endif
+		*lpp = *rpp; /* INT_: no-change copy */
+		xfs_inobt_log_ptrs(cur, lbp, nrec, nrec);
+	}
+	/*
+	 * If leaf, copy a record to the left block.
+	 */
+	else {
+		lrp = XFS_INOBT_REC_ADDR(left, nrec, cur);
+		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
+		*lrp = *rrp;
+		xfs_inobt_log_recs(cur, lbp, nrec, nrec);
+	}
+	/*
+	 * Bump and log left's numrecs, decrement and log right's numrecs.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1);
+	xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
+#ifdef DEBUG
+	if (level > 0)
+		xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
+	else
+		xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
+#endif
+	INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
+	/*
+	 * Slide the contents of right down one entry.
+	 */
+	if (level > 0) {
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT),
+					level)))
+				return error;
+		}
+#endif
+		memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	} else {
+		memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
+		rkp = &key;
+	}
+	/*
+	 * Update the parent key values of right.
+	 */
+	if ((error = xfs_inobt_updkey(cur, rkp, level + 1)))
+		return error;
+	/*
+	 * Slide the cursor value left one.
+	 */
+	cur->bc_ptrs[level]--;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Allocate a new root block, fill it in.
+ */
+STATIC int				/* error */
+xfs_inobt_newroot(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			*stat)	/* success/failure */
+{
+	xfs_agi_t		*agi;	/* a.g. inode header */
+	xfs_alloc_arg_t		args;	/* allocation argument structure */
+	xfs_inobt_block_t	*block;	/* one half of the old root block */
+	xfs_buf_t		*bp;	/* buffer containing block */
+	int			error;	/* error return value */
+	xfs_inobt_key_t		*kp;	/* btree key pointer */
+	xfs_agblock_t		lbno;	/* left block number */
+	xfs_buf_t		*lbp;	/* left buffer pointer */
+	xfs_inobt_block_t	*left;	/* left btree block */
+	xfs_buf_t		*nbp;	/* new (root) buffer */
+	xfs_inobt_block_t	*new;	/* new (root) btree block */
+	int			nptr;	/* new value for key index, 1 or 2 */
+	xfs_inobt_ptr_t		*pp;	/* btree address pointer */
+	xfs_agblock_t		rbno;	/* right block number */
+	xfs_buf_t		*rbp;	/* right buffer pointer */
+	xfs_inobt_block_t	*right;	/* right btree block */
+	xfs_inobt_rec_t		*rp;	/* btree record pointer */
+
+	ASSERT(cur->bc_nlevels < XFS_IN_MAXLEVELS(cur->bc_mp));
+
+	/*
+	 * Get a block & a buffer.
+	 */
+	agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+	args.tp = cur->bc_tp;
+	args.mp = cur->bc_mp;
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno,
+		INT_GET(agi->agi_root, ARCH_CONVERT));
+	args.mod = args.minleft = args.alignment = args.total = args.wasdel =
+		args.isfl = args.userdata = args.minalignslop = 0;
+	args.minlen = args.maxlen = args.prod = 1;
+	args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	if ((error = xfs_alloc_vextent(&args)))
+		return error;
+	/*
+	 * None available, we fail.
+	 */
+	if (args.fsbno == NULLFSBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	ASSERT(args.len == 1);
+	nbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0);
+	new = XFS_BUF_TO_INOBT_BLOCK(nbp);
+	/*
+	 * Set the root data in the a.g. inode structure.
+	 */
+	INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno);
+	INT_MOD(agi->agi_level, ARCH_CONVERT, 1);
+	xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
+		XFS_AGI_ROOT | XFS_AGI_LEVEL);
+	/*
+	 * At the previous root level there are now two blocks: the old
+	 * root, and the new block generated when it was split.
+	 * We don't know which one the cursor is pointing at, so we
+	 * set up variables "left" and "right" for each case.
+	 */
+	bp = cur->bc_bufs[cur->bc_nlevels - 1];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp)))
+		return error;
+#endif
+	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		/*
+		 * Our block is left, pick up the right block.
+		 */
+		lbp = bp;
+		lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp));
+		left = block;
+		rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
+				rbno, 0, &rbp, XFS_INO_BTREE_REF)))
+			return error;
+		bp = rbp;
+		right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+		if ((error = xfs_btree_check_sblock(cur, right,
+				cur->bc_nlevels - 1, rbp)))
+			return error;
+		nptr = 1;
+	} else {
+		/*
+		 * Our block is right, pick up the left block.
+		 */
+		rbp = bp;
+		rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp));
+		right = block;
+		lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
+				lbno, 0, &lbp, XFS_INO_BTREE_REF)))
+			return error;
+		bp = lbp;
+		left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+		if ((error = xfs_btree_check_sblock(cur, left,
+				cur->bc_nlevels - 1, lbp)))
+			return error;
+		nptr = 2;
+	}
+	/*
+	 * Fill in the new block's btree header and log it.
+	 */
+	INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
+	INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels);
+	INT_SET(new->bb_numrecs, ARCH_CONVERT, 2);
+	INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
+	INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+	xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS);
+	ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
+	/*
+	 * Fill in the key data in the new root.
+	 */
+	kp = XFS_INOBT_KEY_ADDR(new, 1, cur);
+	if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) {
+		kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */
+		kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */
+	} else {
+		rp = XFS_INOBT_REC_ADDR(left, 1, cur);
+		INT_COPY(kp[0].ir_startino, rp->ir_startino, ARCH_CONVERT);
+		rp = XFS_INOBT_REC_ADDR(right, 1, cur);
+		INT_COPY(kp[1].ir_startino, rp->ir_startino, ARCH_CONVERT);
+	}
+	xfs_inobt_log_keys(cur, nbp, 1, 2);
+	/*
+	 * Fill in the pointer data in the new root.
+	 */
+	pp = XFS_INOBT_PTR_ADDR(new, 1, cur);
+	INT_SET(pp[0], ARCH_CONVERT, lbno);
+	INT_SET(pp[1], ARCH_CONVERT, rbno);
+	xfs_inobt_log_ptrs(cur, nbp, 1, 2);
+	/*
+	 * Fix up the cursor.
+	 */
+	xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
+	cur->bc_ptrs[cur->bc_nlevels] = nptr;
+	cur->bc_nlevels++;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Move 1 record right from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int				/* error */
+xfs_inobt_rshift(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to shift record on */
+	int			*stat)	/* success/failure */
+{
+	int			error;	/* error return value */
+	int			i;	/* loop index */
+	xfs_inobt_key_t		key;	/* key value for leaf level upward */
+	xfs_buf_t		*lbp;	/* buffer for left (current) block */
+	xfs_inobt_block_t	*left;	/* left (current) btree block */
+	xfs_inobt_key_t		*lkp;	/* key pointer for left block */
+	xfs_inobt_ptr_t		*lpp;	/* address pointer for left block */
+	xfs_inobt_rec_t		*lrp;	/* record pointer for left block */
+	xfs_buf_t		*rbp;	/* buffer for right neighbor block */
+	xfs_inobt_block_t	*right;	/* right neighbor btree block */
+	xfs_inobt_key_t		*rkp;	/* key pointer for right block */
+	xfs_inobt_ptr_t		*rpp;	/* address pointer for right block */
+	xfs_inobt_rec_t		*rrp=NULL;	/* record pointer for right block */
+	xfs_btree_cur_t		*tcur;	/* temporary cursor */
+
+	/*
+	 * Set up variables for this block as "left".
+	 */
+	lbp = cur->bc_bufs[level];
+	left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+#endif
+	/*
+	 * If we've got no right sibling then we can't shift an entry right.
+	 */
+	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * If the cursor entry is the one that would be moved, don't
+	 * do it... it's too complicated.
+	 */
+	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Set up the right neighbor as "right".
+	 */
+	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+			cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp,
+			XFS_INO_BTREE_REF)))
+		return error;
+	right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
+		return error;
+	/*
+	 * If it's full, it can't take another entry.
+	 */
+	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Make a hole at the start of the right neighbor block, then
+	 * copy the last left block entry to the hole.
+	 */
+	if (level > 0) {
+		lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
+		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
+			return error;
+#endif
+		*rkp = *lkp; /* INT_: no change copy */
+		*rpp = *lpp; /* INT_: no change copy */
+		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+	} else {
+		lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
+		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		*rrp = *lrp;
+		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
+		rkp = &key;
+	}
+	/*
+	 * Decrement and log left's numrecs, bump and log right's numrecs.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
+	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+#ifdef DEBUG
+	if (level > 0)
+		xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
+	else
+		xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
+#endif
+	xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
+	/*
+	 * Using a temporary cursor, update the parent key values of the
+	 * block on the right.
+	 */
+	if ((error = xfs_btree_dup_cursor(cur, &tcur)))
+		return error;
+	xfs_btree_lastrec(tcur, level);
+	if ((error = xfs_inobt_increment(tcur, level, &i)) ||
+	    (error = xfs_inobt_updkey(tcur, rkp, level + 1))) {
+		xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+		return error;
+	}
+	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Split cur/level block in half.
+ * Return new block number and its first record (to be inserted into parent).
+ */
+STATIC int				/* error */
+xfs_inobt_split(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level to split */
+	xfs_agblock_t		*bnop,	/* output: block number allocated */
+	xfs_inobt_key_t		*keyp,	/* output: first key of new block */
+	xfs_btree_cur_t		**curp,	/* output: new cursor */
+	int			*stat)	/* success/failure */
+{
+	xfs_alloc_arg_t		args;	/* allocation argument structure */
+	int			error;	/* error return value */
+	int			i;	/* loop index/record number */
+	xfs_agblock_t		lbno;	/* left (current) block number */
+	xfs_buf_t		*lbp;	/* buffer for left block */
+	xfs_inobt_block_t	*left;	/* left (current) btree block */
+	xfs_inobt_key_t		*lkp;	/* left btree key pointer */
+	xfs_inobt_ptr_t		*lpp;	/* left btree address pointer */
+	xfs_inobt_rec_t		*lrp;	/* left btree record pointer */
+	xfs_buf_t		*rbp;	/* buffer for right block */
+	xfs_inobt_block_t	*right;	/* right (new) btree block */
+	xfs_inobt_key_t		*rkp;	/* right btree key pointer */
+	xfs_inobt_ptr_t		*rpp;	/* right btree address pointer */
+	xfs_inobt_rec_t		*rrp;	/* right btree record pointer */
+
+	/*
+	 * Set up left block (current one).
+	 */
+	lbp = cur->bc_bufs[level];
+	args.tp = cur->bc_tp;
+	args.mp = cur->bc_mp;
+	lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp));
+	/*
+	 * Allocate the new block.
+	 * If we can't do it, we're toast.  Give up.
+	 */
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno);
+	args.mod = args.minleft = args.alignment = args.total = args.wasdel =
+		args.isfl = args.userdata = args.minalignslop = 0;
+	args.minlen = args.maxlen = args.prod = 1;
+	args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	if ((error = xfs_alloc_vextent(&args)))
+		return error;
+	if (args.fsbno == NULLFSBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	ASSERT(args.len == 1);
+	rbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0);
+	/*
+	 * Set up the new block as "right".
+	 */
+	right = XFS_BUF_TO_INOBT_BLOCK(rbp);
+	/*
+	 * "Left" is the current (according to the cursor) block.
+	 */
+	left = XFS_BUF_TO_INOBT_BLOCK(lbp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
+		return error;
+#endif
+	/*
+	 * Fill in the btree header for the new block.
+	 */
+	INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
+	right->bb_level = left->bb_level; /* INT_: direct copy */
+	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
+	/*
+	 * Make sure that if there's an odd number of entries now, that
+	 * each new block will have the same number of entries.
+	 */
+	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
+	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
+		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	/*
+	 * For non-leaf blocks, copy keys and addresses over to the new block.
+	 */
+	if (level > 0) {
+		lkp = XFS_INOBT_KEY_ADDR(left, i, cur);
+		lpp = XFS_INOBT_PTR_ADDR(left, i, cur);
+		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
+		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
+#ifdef DEBUG
+		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+				return error;
+		}
+#endif
+		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		*keyp = *rkp;
+	}
+	/*
+	 * For leaf blocks, copy records over to the new block.
+	 */
+	else {
+		lrp = XFS_INOBT_REC_ADDR(left, i, cur);
+		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
+		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */
+	}
+	/*
+	 * Find the left block number by looking in the buffer.
+	 * Adjust numrecs, sibling pointers.
+	 */
+	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
+	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
+	INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno);
+	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS);
+	xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+	/*
+	 * If there's a block to the new block's right, make that block
+	 * point back to right instead of to left.
+	 */
+	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		xfs_inobt_block_t	*rrblock;	/* rr btree block */
+		xfs_buf_t		*rrbp;		/* buffer for rrblock */
+
+		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
+				INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
+				XFS_INO_BTREE_REF)))
+			return error;
+		rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
+		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
+			return error;
+		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno);
+		xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB);
+	}
+	/*
+	 * If the cursor is really in the right block, move it there.
+	 * If it's just pointing past the last entry in left, then we'll
+	 * insert there, so don't change anything in that case.
+	 */
+	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+		xfs_btree_setbuf(cur, level, rbp);
+		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+	}
+	/*
+	 * If there are more levels, we'll need another cursor which refers
+	 * the right block, no matter where this cursor was.
+	 */
+	if (level + 1 < cur->bc_nlevels) {
+		if ((error = xfs_btree_dup_cursor(cur, curp)))
+			return error;
+		(*curp)->bc_ptrs[level + 1]++;
+	}
+	*bnop = args.agbno;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Update keys at all levels from here to the root along the cursor's path.
+ */
+STATIC int				/* error */
+xfs_inobt_updkey(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_inobt_key_t		*keyp,	/* new key value to update to */
+	int			level)	/* starting level for update */
+{
+	int			ptr;	/* index of key in block */
+
+	/*
+	 * Go up the tree from this level toward the root.
+	 * At each level, update the key value to the value input.
+	 * Stop when we reach a level where the cursor isn't pointing
+	 * at the first entry in the block.
+	 */
+	for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+		xfs_buf_t		*bp;	/* buffer for block */
+		xfs_inobt_block_t	*block;	/* btree block */
+#ifdef DEBUG
+		int			error;	/* error return value */
+#endif
+		xfs_inobt_key_t		*kp;	/* ptr to btree block keys */
+
+		bp = cur->bc_bufs[level];
+		block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+			return error;
+#endif
+		ptr = cur->bc_ptrs[level];
+		kp = XFS_INOBT_KEY_ADDR(block, ptr, cur);
+		*kp = *keyp;
+		xfs_inobt_log_keys(cur, bp, ptr, ptr);
+	}
+	return 0;
+}
+
+/*
+ * Externally visible routines.
+ */
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_inobt_decrement(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat)	/* success/failure */
+{
+	xfs_inobt_block_t	*block;	/* btree block */
+	int			error;
+	int			lev;	/* btree level */
+
+	ASSERT(level < cur->bc_nlevels);
+	/*
+	 * Read-ahead to the left at this level.
+	 */
+	xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
+	/*
+	 * Decrement the ptr at this level.  If we're still in the block
+	 * then we're done.
+	 */
+	if (--cur->bc_ptrs[level] > 0) {
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * Get a pointer to the btree block.
+	 */
+	block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[level]);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level,
+			cur->bc_bufs[level])))
+		return error;
+#endif
+	/*
+	 * If we just went off the left edge of the tree, return failure.
+	 */
+	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * March up the tree decrementing pointers.
+	 * Stop when we don't go off the left edge of a block.
+	 */
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		if (--cur->bc_ptrs[lev] > 0)
+			break;
+		/*
+		 * Read-ahead the left block, we're going to read it
+		 * in the next loop.
+		 */
+		xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
+	}
+	/*
+	 * If we went off the root then we are seriously confused.
+	 */
+	ASSERT(lev < cur->bc_nlevels);
+	/*
+	 * Now walk back down the tree, fixing up the cursor's buffer
+	 * pointers and key numbers.
+	 */
+	for (block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); lev > level; ) {
+		xfs_agblock_t	agbno;	/* block number of btree block */
+		xfs_buf_t	*bp;	/* buffer containing btree block */
+
+		agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+				cur->bc_private.i.agno, agbno, 0, &bp,
+				XFS_INO_BTREE_REF)))
+			return error;
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_INOBT_BLOCK(bp);
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Delete the record pointed to by cur.
+ * The cursor refers to the place where the record was (could be inserted)
+ * when the operation returns.
+ */
+int					/* error */
+xfs_inobt_delete(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	int		*stat)		/* success/failure */
+{
+	int		error;
+	int		i;		/* result code */
+	int		level;		/* btree level */
+
+	/*
+	 * Go up the tree, starting at leaf level.
+	 * If 2 is returned then a join was done; go to the next level.
+	 * Otherwise we are done.
+	 */
+	for (level = 0, i = 2; i == 2; level++) {
+		if ((error = xfs_inobt_delrec(cur, level, &i)))
+			return error;
+	}
+	if (i == 0) {
+		for (level = 1; level < cur->bc_nlevels; level++) {
+			if (cur->bc_ptrs[level] == 0) {
+				if ((error = xfs_inobt_decrement(cur, level, &i)))
+					return error;
+				break;
+			}
+		}
+	}
+	*stat = i;
+	return 0;
+}
+
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int					/* error */
+xfs_inobt_get_rec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_agino_t		*ino,	/* output: starting inode of chunk */
+	__int32_t		*fcnt,	/* output: number of free inodes */
+	xfs_inofree_t		*free,	/* output: free inode mask */
+	int			*stat)	/* output: success/failure */
+{
+	xfs_inobt_block_t	*block;	/* btree block */
+	xfs_buf_t		*bp;	/* buffer containing btree block */
+#ifdef DEBUG
+	int			error;	/* error return value */
+#endif
+	int			ptr;	/* record number */
+	xfs_inobt_rec_t		*rec;	/* record data */
+
+	bp = cur->bc_bufs[0];
+	ptr = cur->bc_ptrs[0];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, 0, bp)))
+		return error;
+#endif
+	/*
+	 * Off the right end or left end, return failure.
+	 */
+	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * Point to the record and extract its data.
+	 */
+	rec = XFS_INOBT_REC_ADDR(block, ptr, cur);
+	*ino = INT_GET(rec->ir_startino, ARCH_CONVERT);
+	*fcnt = INT_GET(rec->ir_freecount, ARCH_CONVERT);
+	*free = INT_GET(rec->ir_free, ARCH_CONVERT);
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_inobt_increment(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat)	/* success/failure */
+{
+	xfs_inobt_block_t	*block;	/* btree block */
+	xfs_buf_t		*bp;	/* buffer containing btree block */
+	int			error;	/* error return value */
+	int			lev;	/* btree level */
+
+	ASSERT(level < cur->bc_nlevels);
+	/*
+	 * Read-ahead to the right at this level.
+	 */
+	xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+	/*
+	 * Get a pointer to the btree block.
+	 */
+	bp = cur->bc_bufs[level];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
+		return error;
+#endif
+	/*
+	 * Increment the ptr at this level.  If we're still in the block
+	 * then we're done.
+	 */
+	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		*stat = 1;
+		return 0;
+	}
+	/*
+	 * If we just went off the right edge of the tree, return failure.
+	 */
+	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+		*stat = 0;
+		return 0;
+	}
+	/*
+	 * March up the tree incrementing pointers.
+	 * Stop when we don't go off the right edge of a block.
+	 */
+	for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+		bp = cur->bc_bufs[lev];
+		block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+#endif
+		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+			break;
+		/*
+		 * Read-ahead the right block, we're going to read it
+		 * in the next loop.
+		 */
+		xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
+	}
+	/*
+	 * If we went off the root then we are seriously confused.
+	 */
+	ASSERT(lev < cur->bc_nlevels);
+	/*
+	 * Now walk back down the tree, fixing up the cursor's buffer
+	 * pointers and key numbers.
+	 */
+	for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_INOBT_BLOCK(bp);
+	     lev > level; ) {
+		xfs_agblock_t	agbno;	/* block number of btree block */
+
+		agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
+				cur->bc_private.i.agno, agbno, 0, &bp,
+				XFS_INO_BTREE_REF)))
+			return error;
+		lev--;
+		xfs_btree_setbuf(cur, lev, bp);
+		block = XFS_BUF_TO_INOBT_BLOCK(bp);
+		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
+			return error;
+		cur->bc_ptrs[lev] = 1;
+	}
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Insert the current record at the point referenced by cur.
+ * The cursor may be inconsistent on return if splits have been done.
+ */
+int					/* error */
+xfs_inobt_insert(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	int		*stat)		/* success/failure */
+{
+	int		error;		/* error return value */
+	int		i;		/* result value, 0 for failure */
+	int		level;		/* current level number in btree */
+	xfs_agblock_t	nbno;		/* new block number (split result) */
+	xfs_btree_cur_t	*ncur;		/* new cursor (split result) */
+	xfs_inobt_rec_t	nrec;		/* record being inserted this level */
+	xfs_btree_cur_t	*pcur;		/* previous level's cursor */
+
+	level = 0;
+	nbno = NULLAGBLOCK;
+	INT_SET(nrec.ir_startino, ARCH_CONVERT, cur->bc_rec.i.ir_startino);
+	INT_SET(nrec.ir_freecount, ARCH_CONVERT, cur->bc_rec.i.ir_freecount);
+	INT_SET(nrec.ir_free, ARCH_CONVERT, cur->bc_rec.i.ir_free);
+	ncur = (xfs_btree_cur_t *)0;
+	pcur = cur;
+	/*
+	 * Loop going up the tree, starting at the leaf level.
+	 * Stop when we don't get a split block, that must mean that
+	 * the insert is finished with this level.
+	 */
+	do {
+		/*
+		 * Insert nrec/nbno into this level of the tree.
+		 * Note if we fail, nbno will be null.
+		 */
+		if ((error = xfs_inobt_insrec(pcur, level++, &nbno, &nrec, &ncur,
+				&i))) {
+			if (pcur != cur)
+				xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
+			return error;
+		}
+		/*
+		 * See if the cursor we just used is trash.
+		 * Can't trash the caller's cursor, but otherwise we should
+		 * if ncur is a new cursor or we're about to be done.
+		 */
+		if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
+			cur->bc_nlevels = pcur->bc_nlevels;
+			xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
+		}
+		/*
+		 * If we got a new cursor, switch to it.
+		 */
+		if (ncur) {
+			pcur = ncur;
+			ncur = (xfs_btree_cur_t *)0;
+		}
+	} while (nbno != NULLAGBLOCK);
+	*stat = i;
+	return 0;
+}
+
+/*
+ * Lookup the record equal to ino in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_eq(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agino_t	ino,		/* starting inode of chunk */
+	__int32_t	fcnt,		/* free inode count */
+	xfs_inofree_t	free,		/* free inode mask */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.i.ir_startino = ino;
+	cur->bc_rec.i.ir_freecount = fcnt;
+	cur->bc_rec.i.ir_free = free;
+	return xfs_inobt_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Lookup the first record greater than or equal to ino
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_ge(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agino_t	ino,		/* starting inode of chunk */
+	__int32_t	fcnt,		/* free inode count */
+	xfs_inofree_t	free,		/* free inode mask */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.i.ir_startino = ino;
+	cur->bc_rec.i.ir_freecount = fcnt;
+	cur->bc_rec.i.ir_free = free;
+	return xfs_inobt_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+/*
+ * Lookup the first record less than or equal to ino
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_le(
+	xfs_btree_cur_t	*cur,		/* btree cursor */
+	xfs_agino_t	ino,		/* starting inode of chunk */
+	__int32_t	fcnt,		/* free inode count */
+	xfs_inofree_t	free,		/* free inode mask */
+	int		*stat)		/* success/failure */
+{
+	cur->bc_rec.i.ir_startino = ino;
+	cur->bc_rec.i.ir_freecount = fcnt;
+	cur->bc_rec.i.ir_free = free;
+	return xfs_inobt_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Update the record referred to by cur, to the value given
+ * by [ino, fcnt, free].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+int					/* error */
+xfs_inobt_update(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	xfs_agino_t		ino,	/* starting inode of chunk */
+	__int32_t		fcnt,	/* free inode count */
+	xfs_inofree_t		free)	/* free inode mask */
+{
+	xfs_inobt_block_t	*block;	/* btree block to update */
+	xfs_buf_t		*bp;	/* buffer containing btree block */
+	int			error;	/* error return value */
+	int			ptr;	/* current record number (updating) */
+	xfs_inobt_rec_t		*rp;	/* pointer to updated record */
+
+	/*
+	 * Pick up the current block.
+	 */
+	bp = cur->bc_bufs[0];
+	block = XFS_BUF_TO_INOBT_BLOCK(bp);
+#ifdef DEBUG
+	if ((error = xfs_btree_check_sblock(cur, block, 0, bp)))
+		return error;
+#endif
+	/*
+	 * Get the address of the rec to be updated.
+	 */
+	ptr = cur->bc_ptrs[0];
+	rp = XFS_INOBT_REC_ADDR(block, ptr, cur);
+	/*
+	 * Fill in the new contents and log them.
+	 */
+	INT_SET(rp->ir_startino, ARCH_CONVERT, ino);
+	INT_SET(rp->ir_freecount, ARCH_CONVERT, fcnt);
+	INT_SET(rp->ir_free, ARCH_CONVERT, free);
+	xfs_inobt_log_recs(cur, bp, ptr, ptr);
+	/*
+	 * Updating first record in leaf. Pass new key value up to our parent.
+	 */
+	if (ptr == 1) {
+		xfs_inobt_key_t	key;	/* key containing [ino] */
+
+		INT_SET(key.ir_startino, ARCH_CONVERT, ino);
+		if ((error = xfs_inobt_updkey(cur, &key, 1)))
+			return error;
+	}
+	return 0;
+}
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
new file mode 100644
index 0000000..803c4d1
--- /dev/null
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_IALLOC_BTREE_H__
+#define	__XFS_IALLOC_BTREE_H__
+
+/*
+ * Inode map on-disk structures
+ */
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_btree_sblock;
+struct xfs_mount;
+
+/*
+ * There is a btree for the inode map per allocation group.
+ */
+#define	XFS_IBT_MAGIC	0x49414254	/* 'IABT' */
+
+typedef	__uint64_t	xfs_inofree_t;
+#define	XFS_INODES_PER_CHUNK	(NBBY * sizeof(xfs_inofree_t))
+#define	XFS_INODES_PER_CHUNK_LOG	(XFS_NBBYLOG + 3)
+#define	XFS_INOBT_ALL_FREE	((xfs_inofree_t)-1)
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASKN)
+xfs_inofree_t xfs_inobt_maskn(int i, int n);
+#define	XFS_INOBT_MASKN(i,n)		xfs_inobt_maskn(i,n)
+#else
+#define	XFS_INOBT_MASKN(i,n)	\
+	((((n) >= XFS_INODES_PER_CHUNK ? \
+		(xfs_inofree_t)0 : ((xfs_inofree_t)1 << (n))) - 1) << (i))
+#endif
+
+/*
+ * Data record structure
+ */
+typedef struct xfs_inobt_rec
+{
+	xfs_agino_t	ir_startino;	/* starting inode number */
+	__int32_t	ir_freecount;	/* count of free inodes (set bits) */
+	xfs_inofree_t	ir_free;	/* free inode mask */
+} xfs_inobt_rec_t;
+
+/*
+ * Key structure
+ */
+typedef struct xfs_inobt_key
+{
+	xfs_agino_t	ir_startino;	/* starting inode number */
+} xfs_inobt_key_t;
+
+typedef xfs_agblock_t xfs_inobt_ptr_t;	/* btree pointer type */
+					/* btree block header type */
+typedef	struct xfs_btree_sblock xfs_inobt_block_t;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_INOBT_BLOCK)
+xfs_inobt_block_t *xfs_buf_to_inobt_block(struct xfs_buf *bp);
+#define	XFS_BUF_TO_INOBT_BLOCK(bp)	xfs_buf_to_inobt_block(bp)
+#else
+#define	XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)(XFS_BUF_PTR(bp)))
+#endif
+
+/*
+ * Bit manipulations for ir_free.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASK)
+xfs_inofree_t xfs_inobt_mask(int i);
+#define	XFS_INOBT_MASK(i)		xfs_inobt_mask(i)
+#else
+#define	XFS_INOBT_MASK(i)		((xfs_inofree_t)1 << (i))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_FREE)
+int xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i);
+#define	XFS_INOBT_IS_FREE(rp,i)	xfs_inobt_is_free(rp,i)
+#else
+#define	XFS_INOBT_IS_FREE(rp,i)	(((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_SET_FREE)
+void xfs_inobt_set_free(xfs_inobt_rec_t *rp, int i);
+#define	XFS_INOBT_SET_FREE(rp,i)	xfs_inobt_set_free(rp,i)
+#else
+#define	XFS_INOBT_SET_FREE(rp,i)	((rp)->ir_free |= XFS_INOBT_MASK(i))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_CLR_FREE)
+void xfs_inobt_clr_free(xfs_inobt_rec_t *rp, int i);
+#define	XFS_INOBT_CLR_FREE(rp,i)	xfs_inobt_clr_free(rp,i)
+#else
+#define	XFS_INOBT_CLR_FREE(rp,i)	((rp)->ir_free &= ~XFS_INOBT_MASK(i))
+#endif
+
+/*
+ * Real block structures have a size equal to the disk block size.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_SIZE)
+int xfs_inobt_block_size(int lev, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_BLOCK_SIZE(lev,cur)	xfs_inobt_block_size(lev,cur)
+#else
+#define	XFS_INOBT_BLOCK_SIZE(lev,cur)	(1 << (cur)->bc_blocklog)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MAXRECS)
+int xfs_inobt_block_maxrecs(int lev, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_BLOCK_MAXRECS(lev,cur)	xfs_inobt_block_maxrecs(lev,cur)
+#else
+#define	XFS_INOBT_BLOCK_MAXRECS(lev,cur)	\
+	((cur)->bc_mp->m_inobt_mxr[lev != 0])
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MINRECS)
+int xfs_inobt_block_minrecs(int lev, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_BLOCK_MINRECS(lev,cur)	xfs_inobt_block_minrecs(lev,cur)
+#else
+#define	XFS_INOBT_BLOCK_MINRECS(lev,cur)	\
+	((cur)->bc_mp->m_inobt_mnr[lev != 0])
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_LAST_REC)
+int xfs_inobt_is_last_rec(struct xfs_btree_cur *cur);
+#define	XFS_INOBT_IS_LAST_REC(cur)	xfs_inobt_is_last_rec(cur)
+#else
+#define	XFS_INOBT_IS_LAST_REC(cur)	\
+	((cur)->bc_ptrs[0] == \
+		INT_GET(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs, ARCH_CONVERT))
+#endif
+
+/*
+ * Maximum number of inode btree levels.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IN_MAXLEVELS)
+int xfs_in_maxlevels(struct xfs_mount *mp);
+#define	XFS_IN_MAXLEVELS(mp)		xfs_in_maxlevels(mp)
+#else
+#define	XFS_IN_MAXLEVELS(mp)		((mp)->m_in_maxlevels)
+#endif
+
+/*
+ * block numbers in the AG.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IBT_BLOCK)
+xfs_agblock_t xfs_ibt_block(struct xfs_mount *mp);
+#define	XFS_IBT_BLOCK(mp)		xfs_ibt_block(mp)
+#else
+#define	XFS_IBT_BLOCK(mp)	((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_PREALLOC_BLOCKS)
+xfs_agblock_t xfs_prealloc_blocks(struct xfs_mount *mp);
+#define	XFS_PREALLOC_BLOCKS(mp)		xfs_prealloc_blocks(mp)
+#else
+#define	XFS_PREALLOC_BLOCKS(mp)	((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
+#endif
+
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_REC_ADDR)
+xfs_inobt_rec_t *
+xfs_inobt_rec_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_REC_ADDR(bb,i,cur)	xfs_inobt_rec_addr(bb,i,cur)
+#else
+#define	XFS_INOBT_REC_ADDR(bb,i,cur)	\
+	XFS_BTREE_REC_ADDR(XFS_INOBT_BLOCK_SIZE(0,cur), xfs_inobt, bb, i, \
+		XFS_INOBT_BLOCK_MAXRECS(0, cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_KEY_ADDR)
+xfs_inobt_key_t *
+xfs_inobt_key_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_KEY_ADDR(bb,i,cur)	xfs_inobt_key_addr(bb,i,cur)
+#else
+#define	XFS_INOBT_KEY_ADDR(bb,i,cur)	\
+	XFS_BTREE_KEY_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \
+		XFS_INOBT_BLOCK_MAXRECS(1, cur))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_PTR_ADDR)
+xfs_inobt_ptr_t *
+xfs_inobt_ptr_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur);
+#define	XFS_INOBT_PTR_ADDR(bb,i,cur)	xfs_inobt_ptr_addr(bb,i,cur)
+#else
+#define	XFS_INOBT_PTR_ADDR(bb,i,cur)	\
+	XFS_BTREE_PTR_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \
+		XFS_INOBT_BLOCK_MAXRECS(1, cur))
+#endif
+
+/*
+ * Prototypes for externally visible routines.
+ */
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_inobt_decrement(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat); /* success/failure */
+
+/*
+ * Delete the record pointed to by cur.
+ * The cursor refers to the place where the record was (could be inserted)
+ * when the operation returns.
+ */
+int					/* error */
+xfs_inobt_delete(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			*stat);	/* success/failure */
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int					/* error */
+xfs_inobt_get_rec(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agino_t		*ino,	/* output: starting inode of chunk */
+	__int32_t		*fcnt,	/* output: number of free inodes */
+	xfs_inofree_t		*free,	/* output: free inode mask */
+	int			*stat);	/* output: success/failure */
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int					/* error */
+xfs_inobt_increment(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			level,	/* level in btree, 0 is leaf */
+	int			*stat);	/* success/failure */
+
+/*
+ * Insert the current record at the point referenced by cur.
+ * The cursor may be inconsistent on return if splits have been done.
+ */
+int					/* error */
+xfs_inobt_insert(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the record equal to ino in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_eq(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agino_t		ino,	/* starting inode of chunk */
+	__int32_t		fcnt,	/* free inode count */
+	xfs_inofree_t		free,	/* free inode mask */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the first record greater than or equal to ino
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_ge(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agino_t		ino,	/* starting inode of chunk */
+	__int32_t		fcnt,	/* free inode count */
+	xfs_inofree_t		free,	/* free inode mask */
+	int			*stat);	/* success/failure */
+
+/*
+ * Lookup the first record less than or equal to ino
+ * in the btree given by cur.
+ */
+int					/* error */
+xfs_inobt_lookup_le(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agino_t		ino,	/* starting inode of chunk */
+	__int32_t		fcnt,	/* free inode count */
+	xfs_inofree_t		free,	/* free inode mask */
+	int			*stat);	/* success/failure */
+
+/*
+ * Update the record referred to by cur, to the value given
+ * by [ino, fcnt, free].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+int					/* error */
+xfs_inobt_update(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agino_t		ino,	/* starting inode of chunk */
+	__int32_t		fcnt,	/* free inode count */
+	xfs_inofree_t		free);	/* free inode mask */
+
+#endif	/* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
new file mode 100644
index 0000000..3a0ba1d
--- /dev/null
+++ b/fs/xfs/xfs_iget.c
@@ -0,0 +1,1022 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_utils.h"
+#include "xfs_bit.h"
+
+/*
+ * Initialize the inode hash table for the newly mounted file system.
+ * Choose an initial table size based on user specified value, else
+ * use a simple algorithm using the maximum number of inodes as an
+ * indicator for table size, and clamp it between one and some large
+ * number of pages.
+ */
+void
+xfs_ihash_init(xfs_mount_t *mp)
+{
+	__uint64_t	icount;
+	uint		i, flags = KM_SLEEP | KM_MAYFAIL;
+
+	if (!mp->m_ihsize) {
+		icount = mp->m_maxicount ? mp->m_maxicount :
+			 (mp->m_sb.sb_dblocks << mp->m_sb.sb_inopblog);
+		mp->m_ihsize = 1 << max_t(uint, 8,
+					(xfs_highbit64(icount) + 1) / 2);
+		mp->m_ihsize = min_t(uint, mp->m_ihsize,
+					(64 * NBPP) / sizeof(xfs_ihash_t));
+	}
+
+	while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
+						sizeof(xfs_ihash_t), flags))) {
+		if ((mp->m_ihsize >>= 1) <= NBPP)
+			flags = KM_SLEEP;
+	}
+	for (i = 0; i < mp->m_ihsize; i++) {
+		rwlock_init(&(mp->m_ihash[i].ih_lock));
+	}
+}
+
+/*
+ * Free up structures allocated by xfs_ihash_init, at unmount time.
+ */
+void
+xfs_ihash_free(xfs_mount_t *mp)
+{
+	kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
+	mp->m_ihash = NULL;
+}
+
+/*
+ * Initialize the inode cluster hash table for the newly mounted file system.
+ * Its size is derived from the ihash table size.
+ */
+void
+xfs_chash_init(xfs_mount_t *mp)
+{
+	uint	i;
+
+	mp->m_chsize = max_t(uint, 1, mp->m_ihsize /
+			 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog));
+	mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
+	mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
+						 * sizeof(xfs_chash_t),
+						 KM_SLEEP);
+	for (i = 0; i < mp->m_chsize; i++) {
+		spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
+	}
+}
+
+/*
+ * Free up structures allocated by xfs_chash_init, at unmount time.
+ */
+void
+xfs_chash_free(xfs_mount_t *mp)
+{
+	int	i;
+
+	for (i = 0; i < mp->m_chsize; i++) {
+		spinlock_destroy(&mp->m_chash[i].ch_lock);
+	}
+
+	kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t));
+	mp->m_chash = NULL;
+}
+
+/*
+ * Look up an inode by number in the given file system.
+ * The inode is looked up in the hash table for the file system
+ * represented by the mount point parameter mp.  Each bucket of
+ * the hash table is guarded by an individual semaphore.
+ *
+ * If the inode is found in the hash table, its corresponding vnode
+ * is obtained with a call to vn_get().  This call takes care of
+ * coordination with the reclamation of the inode and vnode.  Note
+ * that the vmap structure is filled in while holding the hash lock.
+ * This gives us the state of the inode/vnode when we found it and
+ * is used for coordination in vn_get().
+ *
+ * If it is not in core, read it in from the file system's device and
+ * add the inode into the hash table.
+ *
+ * The inode is locked according to the value of the lock_flags parameter.
+ * This flag parameter indicates how and if the inode's IO lock and inode lock
+ * should be taken.
+ *
+ * mp -- the mount point structure for the current file system.  It points
+ *       to the inode hash table.
+ * tp -- a pointer to the current transaction if there is one.  This is
+ *       simply passed through to the xfs_iread() call.
+ * ino -- the number of the inode desired.  This is the unique identifier
+ *        within the file system for the inode being requested.
+ * lock_flags -- flags indicating how to lock the inode.  See the comment
+ *		 for xfs_ilock() for a list of valid values.
+ * bno -- the block number starting the buffer containing the inode,
+ *	  if known (as by bulkstat), else 0.
+ */
+STATIC int
+xfs_iget_core(
+	vnode_t		*vp,
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	uint		flags,
+	uint		lock_flags,
+	xfs_inode_t	**ipp,
+	xfs_daddr_t	bno)
+{
+	xfs_ihash_t	*ih;
+	xfs_inode_t	*ip;
+	xfs_inode_t	*iq;
+	vnode_t		*inode_vp;
+	ulong		version;
+	int		error;
+	/* REFERENCED */
+	xfs_chash_t	*ch;
+	xfs_chashlist_t	*chl, *chlnew;
+	SPLDECL(s);
+
+
+	ih = XFS_IHASH(mp, ino);
+
+again:
+	read_lock(&ih->ih_lock);
+
+	for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
+		if (ip->i_ino == ino) {
+			/*
+			 * If INEW is set this inode is being set up
+			 * we need to pause and try again.
+			 */
+			if (ip->i_flags & XFS_INEW) {
+				read_unlock(&ih->ih_lock);
+				delay(1);
+				XFS_STATS_INC(xs_ig_frecycle);
+
+				goto again;
+			}
+
+			inode_vp = XFS_ITOV_NULL(ip);
+			if (inode_vp == NULL) {
+				/*
+				 * If IRECLAIM is set this inode is
+				 * on its way out of the system,
+				 * we need to pause and try again.
+				 */
+				if (ip->i_flags & XFS_IRECLAIM) {
+					read_unlock(&ih->ih_lock);
+					delay(1);
+					XFS_STATS_INC(xs_ig_frecycle);
+
+					goto again;
+				}
+
+				vn_trace_exit(vp, "xfs_iget.alloc",
+					(inst_t *)__return_address);
+
+				XFS_STATS_INC(xs_ig_found);
+
+				ip->i_flags &= ~XFS_IRECLAIMABLE;
+				read_unlock(&ih->ih_lock);
+
+				XFS_MOUNT_ILOCK(mp);
+				list_del_init(&ip->i_reclaim);
+				XFS_MOUNT_IUNLOCK(mp);
+
+				goto finish_inode;
+
+			} else if (vp != inode_vp) {
+				struct inode *inode = LINVFS_GET_IP(inode_vp);
+
+				/* The inode is being torn down, pause and
+				 * try again.
+				 */
+				if (inode->i_state & (I_FREEING | I_CLEAR)) {
+					read_unlock(&ih->ih_lock);
+					delay(1);
+					XFS_STATS_INC(xs_ig_frecycle);
+
+					goto again;
+				}
+/* Chances are the other vnode (the one in the inode) is being torn
+ * down right now, and we landed on top of it. Question is, what do
+ * we do? Unhook the old inode and hook up the new one?
+ */
+				cmn_err(CE_PANIC,
+			"xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
+						inode_vp, vp);
+			}
+
+			read_unlock(&ih->ih_lock);
+
+			XFS_STATS_INC(xs_ig_found);
+
+finish_inode:
+			if (ip->i_d.di_mode == 0) {
+				if (!(flags & IGET_CREATE))
+					return ENOENT;
+				xfs_iocore_inode_reinit(ip);
+			}
+	
+			if (lock_flags != 0)
+				xfs_ilock(ip, lock_flags);
+
+			ip->i_flags &= ~XFS_ISTALE;
+
+			vn_trace_exit(vp, "xfs_iget.found",
+						(inst_t *)__return_address);
+			goto return_ip;
+		}
+	}
+
+	/*
+	 * Inode cache miss: save the hash chain version stamp and unlock
+	 * the chain, so we don't deadlock in vn_alloc.
+	 */
+	XFS_STATS_INC(xs_ig_missed);
+
+	version = ih->ih_version;
+
+	read_unlock(&ih->ih_lock);
+
+	/*
+	 * Read the disk inode attributes into a new inode structure and get
+	 * a new vnode for it. This should also initialize i_ino and i_mount.
+	 */
+	error = xfs_iread(mp, tp, ino, &ip, bno);
+	if (error) {
+		return error;
+	}
+
+	vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
+
+	xfs_inode_lock_init(ip, vp);
+	xfs_iocore_inode_init(ip);
+
+	if (lock_flags != 0) {
+		xfs_ilock(ip, lock_flags);
+	}
+		
+	if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
+		xfs_idestroy(ip);
+		return ENOENT;
+	}
+
+	/*
+	 * Put ip on its hash chain, unless someone else hashed a duplicate
+	 * after we released the hash lock.
+	 */
+	write_lock(&ih->ih_lock);
+
+	if (ih->ih_version != version) {
+		for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) {
+			if (iq->i_ino == ino) {
+				write_unlock(&ih->ih_lock);
+				xfs_idestroy(ip);
+
+				XFS_STATS_INC(xs_ig_dup);
+				goto again;
+			}
+		}
+	}
+
+	/*
+	 * These values _must_ be set before releasing ihlock!
+	 */
+	ip->i_hash = ih;
+	if ((iq = ih->ih_next)) {
+		iq->i_prevp = &ip->i_next;
+	}
+	ip->i_next = iq;
+	ip->i_prevp = &ih->ih_next;
+	ih->ih_next = ip;
+	ip->i_udquot = ip->i_gdquot = NULL;
+	ih->ih_version++;
+	ip->i_flags |= XFS_INEW;
+
+	write_unlock(&ih->ih_lock);
+
+	/*
+	 * put ip on its cluster's hash chain
+	 */
+	ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL &&
+	       ip->i_cnext == NULL);
+
+	chlnew = NULL;
+	ch = XFS_CHASH(mp, ip->i_blkno);
+ chlredo:
+	s = mutex_spinlock(&ch->ch_lock);
+	for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
+		if (chl->chl_blkno == ip->i_blkno) {
+
+			/* insert this inode into the doubly-linked list
+			 * where chl points */
+			if ((iq = chl->chl_ip)) {
+				ip->i_cprev = iq->i_cprev;
+				iq->i_cprev->i_cnext = ip;
+				iq->i_cprev = ip;
+				ip->i_cnext = iq;
+			} else {
+				ip->i_cnext = ip;
+				ip->i_cprev = ip;
+			}
+			chl->chl_ip = ip;
+			ip->i_chash = chl;
+			break;
+		}
+	}
+
+	/* no hash list found for this block; add a new hash list */
+	if (chl == NULL)  {
+		if (chlnew == NULL) {
+			mutex_spinunlock(&ch->ch_lock, s);
+			ASSERT(xfs_chashlist_zone != NULL);
+			chlnew = (xfs_chashlist_t *)
+					kmem_zone_alloc(xfs_chashlist_zone,
+						KM_SLEEP);
+			ASSERT(chlnew != NULL);
+			goto chlredo;
+		} else {
+			ip->i_cnext = ip;
+			ip->i_cprev = ip;
+			ip->i_chash = chlnew;
+			chlnew->chl_ip = ip;
+			chlnew->chl_blkno = ip->i_blkno;
+			chlnew->chl_next = ch->ch_list;
+			ch->ch_list = chlnew;
+			chlnew = NULL;
+		}
+	} else {
+		if (chlnew != NULL) {
+			kmem_zone_free(xfs_chashlist_zone, chlnew);
+		}
+	}
+
+	mutex_spinunlock(&ch->ch_lock, s);
+
+
+	/*
+	 * Link ip to its mount and thread it on the mount's inode list.
+	 */
+	XFS_MOUNT_ILOCK(mp);
+	if ((iq = mp->m_inodes)) {
+		ASSERT(iq->i_mprev->i_mnext == iq);
+		ip->i_mprev = iq->i_mprev;
+		iq->i_mprev->i_mnext = ip;
+		iq->i_mprev = ip;
+		ip->i_mnext = iq;
+	} else {
+		ip->i_mnext = ip;
+		ip->i_mprev = ip;
+	}
+	mp->m_inodes = ip;
+
+	XFS_MOUNT_IUNLOCK(mp);
+
+ return_ip:
+	ASSERT(ip->i_df.if_ext_max ==
+	       XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
+
+	ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
+	       ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
+
+	*ipp = ip;
+
+	/*
+	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
+	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
+	 */
+	VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
+
+	return 0;
+}
+
+
+/*
+ * The 'normal' internal xfs_iget, if needed it will
+ * 'allocate', or 'get', the vnode.
+ */
+int
+xfs_iget(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	uint		flags,
+	uint		lock_flags,
+	xfs_inode_t	**ipp,
+	xfs_daddr_t	bno)
+{
+	struct inode	*inode;
+	vnode_t		*vp = NULL;
+	int		error;
+
+retry:
+	XFS_STATS_INC(xs_ig_attempts);
+
+	if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
+		bhv_desc_t	*bdp;
+		xfs_inode_t	*ip;
+		int		newnode;
+
+		vp = LINVFS_GET_VP(inode);
+		if (inode->i_state & I_NEW) {
+inode_allocate:
+			vn_initialize(inode);
+			error = xfs_iget_core(vp, mp, tp, ino, flags,
+					lock_flags, ipp, bno);
+			if (error) {
+				vn_mark_bad(vp);
+				if (inode->i_state & I_NEW)
+					unlock_new_inode(inode);
+				iput(inode);
+			}
+		} else {
+			/* These are true if the inode is in inactive or
+			 * reclaim. The linux inode is about to go away,
+			 * wait for that path to finish, and try again.
+			 */
+			if (vp->v_flag & (VINACT | VRECLM)) {
+				vn_wait(vp);
+				iput(inode);
+				goto retry;
+			}
+
+			if (is_bad_inode(inode)) {
+				iput(inode);
+				return EIO;
+			}
+
+			bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
+			if (bdp == NULL) {
+				XFS_STATS_INC(xs_ig_dup);
+				goto inode_allocate;
+			}
+			ip = XFS_BHVTOI(bdp);
+			if (lock_flags != 0)
+				xfs_ilock(ip, lock_flags);
+			newnode = (ip->i_d.di_mode == 0);
+			if (newnode)
+				xfs_iocore_inode_reinit(ip);
+			XFS_STATS_INC(xs_ig_found);
+			*ipp = ip;
+			error = 0;
+		}
+	} else
+		error = ENOMEM;	/* If we got no inode we are out of memory */
+
+	return error;
+}
+
+/*
+ * Do the setup for the various locks within the incore inode.
+ */
+void
+xfs_inode_lock_init(
+	xfs_inode_t	*ip,
+	vnode_t		*vp)
+{
+	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
+		     "xfsino", (long)vp->v_number);
+	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
+	init_waitqueue_head(&ip->i_ipin_wait);
+	atomic_set(&ip->i_pincount, 0);
+	init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);
+}
+
+/*
+ * Look for the inode corresponding to the given ino in the hash table.
+ * If it is there and its i_transp pointer matches tp, return it.
+ * Otherwise, return NULL.
+ */
+xfs_inode_t *
+xfs_inode_incore(xfs_mount_t	*mp,
+		 xfs_ino_t	ino,
+		 xfs_trans_t	*tp)
+{
+	xfs_ihash_t	*ih;
+	xfs_inode_t	*ip;
+
+	ih = XFS_IHASH(mp, ino);
+	read_lock(&ih->ih_lock);
+	for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
+		if (ip->i_ino == ino) {
+			/*
+			 * If we find it and tp matches, return it.
+			 * Otherwise break from the loop and return
+			 * NULL.
+			 */
+			if (ip->i_transp == tp) {
+				read_unlock(&ih->ih_lock);
+				return (ip);
+			}
+			break;
+		}
+	}
+	read_unlock(&ih->ih_lock);
+	return (NULL);
+}
+
+/*
+ * Decrement reference count of an inode structure and unlock it.
+ *
+ * ip -- the inode being released
+ * lock_flags -- this parameter indicates the inode's locks to be
+ *       to be released.  See the comment on xfs_iunlock() for a list
+ *	 of valid values.
+ */
+void
+xfs_iput(xfs_inode_t	*ip,
+	 uint		lock_flags)
+{
+	vnode_t	*vp = XFS_ITOV(ip);
+
+	vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
+
+	xfs_iunlock(ip, lock_flags);
+
+	VN_RELE(vp);
+}
+
+/*
+ * Special iput for brand-new inodes that are still locked
+ */
+void
+xfs_iput_new(xfs_inode_t	*ip,
+	     uint		lock_flags)
+{
+	vnode_t		*vp = XFS_ITOV(ip);
+	struct inode	*inode = LINVFS_GET_IP(vp);
+
+	vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
+
+	if ((ip->i_d.di_mode == 0)) {
+		ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
+		vn_mark_bad(vp);
+	}
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+	if (lock_flags)
+		xfs_iunlock(ip, lock_flags);
+	VN_RELE(vp);
+}
+
+
+/*
+ * This routine embodies the part of the reclaim code that pulls
+ * the inode from the inode hash table and the mount structure's
+ * inode list.
+ * This should only be called from xfs_reclaim().
+ */
+void
+xfs_ireclaim(xfs_inode_t *ip)
+{
+	vnode_t		*vp;
+
+	/*
+	 * Remove from old hash list and mount list.
+	 */
+	XFS_STATS_INC(xs_ig_reclaims);
+
+	xfs_iextract(ip);
+
+	/*
+	 * Here we do a spurious inode lock in order to coordinate with
+	 * xfs_sync().  This is because xfs_sync() references the inodes
+	 * in the mount list without taking references on the corresponding
+	 * vnodes.  We make that OK here by ensuring that we wait until
+	 * the inode is unlocked in xfs_sync() before we go ahead and
+	 * free it.  We get both the regular lock and the io lock because
+	 * the xfs_sync() code may need to drop the regular one but will
+	 * still hold the io lock.
+	 */
+	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+	/*
+	 * Release dquots (and their references) if any. An inode may escape
+	 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
+	 */
+	XFS_QM_DQDETACH(ip->i_mount, ip);
+
+	/*
+	 * Pull our behavior descriptor from the vnode chain.
+	 */
+	vp = XFS_ITOV_NULL(ip);
+	if (vp) {
+		vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
+	}
+
+	/*
+	 * Free all memory associated with the inode.
+	 */
+	xfs_idestroy(ip);
+}
+
+/*
+ * This routine removes an about-to-be-destroyed inode from
+ * all of the lists in which it is located with the exception
+ * of the behavior chain.
+ */
+void
+xfs_iextract(
+	xfs_inode_t	*ip)
+{
+	xfs_ihash_t	*ih;
+	xfs_inode_t	*iq;
+	xfs_mount_t	*mp;
+	xfs_chash_t	*ch;
+	xfs_chashlist_t *chl, *chm;
+	SPLDECL(s);
+
+	ih = ip->i_hash;
+	write_lock(&ih->ih_lock);
+	if ((iq = ip->i_next)) {
+		iq->i_prevp = ip->i_prevp;
+	}
+	*ip->i_prevp = iq;
+	write_unlock(&ih->ih_lock);
+
+	/*
+	 * Remove from cluster hash list
+	 *   1) delete the chashlist if this is the last inode on the chashlist
+	 *   2) unchain from list of inodes
+	 *   3) point chashlist->chl_ip to 'chl_next' if to this inode.
+	 */
+	mp = ip->i_mount;
+	ch = XFS_CHASH(mp, ip->i_blkno);
+	s = mutex_spinlock(&ch->ch_lock);
+
+	if (ip->i_cnext == ip) {
+		/* Last inode on chashlist */
+		ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);
+		ASSERT(ip->i_chash != NULL);
+		chm=NULL;
+		for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
+			if (chl->chl_blkno == ip->i_blkno) {
+				if (chm == NULL) {
+					/* first item on the list */
+					ch->ch_list = chl->chl_next;
+				} else {
+					chm->chl_next = chl->chl_next;
+				}
+				kmem_zone_free(xfs_chashlist_zone, chl);
+				break;
+			} else {
+				ASSERT(chl->chl_ip != ip);
+				chm = chl;
+			}
+		}
+		ASSERT_ALWAYS(chl != NULL);
+       } else {
+		/* delete one inode from a non-empty list */
+		iq = ip->i_cnext;
+		iq->i_cprev = ip->i_cprev;
+		ip->i_cprev->i_cnext = iq;
+		if (ip->i_chash->chl_ip == ip) {
+			ip->i_chash->chl_ip = iq;
+		}
+		ip->i_chash = __return_address;
+		ip->i_cprev = __return_address;
+		ip->i_cnext = __return_address;
+	}
+	mutex_spinunlock(&ch->ch_lock, s);
+
+	/*
+	 * Remove from mount's inode list.
+	 */
+	XFS_MOUNT_ILOCK(mp);
+	ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
+	iq = ip->i_mnext;
+	iq->i_mprev = ip->i_mprev;
+	ip->i_mprev->i_mnext = iq;
+
+	/*
+	 * Fix up the head pointer if it points to the inode being deleted.
+	 */
+	if (mp->m_inodes == ip) {
+		if (ip == iq) {
+			mp->m_inodes = NULL;
+		} else {
+			mp->m_inodes = iq;
+		}
+	}
+
+	/* Deal with the deleted inodes list */
+	list_del_init(&ip->i_reclaim);
+
+	mp->m_ireclaims++;
+	XFS_MOUNT_IUNLOCK(mp);
+}
+
+/*
+ * This is a wrapper routine around the xfs_ilock() routine
+ * used to centralize some grungy code.  It is used in places
+ * that wish to lock the inode solely for reading the extents.
+ * The reason these places can't just call xfs_ilock(SHARED)
+ * is that the inode lock also guards to bringing in of the
+ * extents from disk for a file in b-tree format.  If the inode
+ * is in b-tree format, then we need to lock the inode exclusively
+ * until the extents are read in.  Locking it exclusively all
+ * the time would limit our parallelism unnecessarily, though.
+ * What we do instead is check to see if the extents have been
+ * read in yet, and only lock the inode exclusively if they
+ * have not.
+ *
+ * The function returns a value which should be given to the
+ * corresponding xfs_iunlock_map_shared().  This value is
+ * the mode in which the lock was actually taken.
+ */
+uint
+xfs_ilock_map_shared(
+	xfs_inode_t	*ip)
+{
+	uint	lock_mode;
+
+	if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
+	    ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
+		lock_mode = XFS_ILOCK_EXCL;
+	} else {
+		lock_mode = XFS_ILOCK_SHARED;
+	}
+
+	xfs_ilock(ip, lock_mode);
+
+	return lock_mode;
+}
+
+/*
+ * This is simply the unlock routine to go with xfs_ilock_map_shared().
+ * All it does is call xfs_iunlock() with the given lock_mode.
+ */
+void
+xfs_iunlock_map_shared(
+	xfs_inode_t	*ip,
+	unsigned int	lock_mode)
+{
+	xfs_iunlock(ip, lock_mode);
+}
+
+/*
+ * The xfs inode contains 2 locks: a multi-reader lock called the
+ * i_iolock and a multi-reader lock called the i_lock.  This routine
+ * allows either or both of the locks to be obtained.
+ *
+ * The 2 locks should always be ordered so that the IO lock is
+ * obtained first in order to prevent deadlock.
+ *
+ * ip -- the inode being locked
+ * lock_flags -- this parameter indicates the inode's locks
+ *       to be locked.  It can be:
+ *		XFS_IOLOCK_SHARED,
+ *		XFS_IOLOCK_EXCL,
+ *		XFS_ILOCK_SHARED,
+ *		XFS_ILOCK_EXCL,
+ *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
+ *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
+ *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
+ *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
+ */
+void
+xfs_ilock(xfs_inode_t	*ip,
+	  uint		lock_flags)
+{
+	/*
+	 * You can't set both SHARED and EXCL for the same lock,
+	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
+	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
+	 */
+	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
+	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+	ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
+
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		mrupdate(&ip->i_iolock);
+	} else if (lock_flags & XFS_IOLOCK_SHARED) {
+		mraccess(&ip->i_iolock);
+	}
+	if (lock_flags & XFS_ILOCK_EXCL) {
+		mrupdate(&ip->i_lock);
+	} else if (lock_flags & XFS_ILOCK_SHARED) {
+		mraccess(&ip->i_lock);
+	}
+	xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
+}
+
+/*
+ * This is just like xfs_ilock(), except that the caller
+ * is guaranteed not to sleep.  It returns 1 if it gets
+ * the requested locks and 0 otherwise.  If the IO lock is
+ * obtained but the inode lock cannot be, then the IO lock
+ * is dropped before returning.
+ *
+ * ip -- the inode being locked
+ * lock_flags -- this parameter indicates the inode's locks to be
+ *       to be locked.  See the comment for xfs_ilock() for a list
+ *	 of valid values.
+ *
+ */
+int
+xfs_ilock_nowait(xfs_inode_t	*ip,
+		 uint		lock_flags)
+{
+	int	iolocked;
+	int	ilocked;
+
+	/*
+	 * You can't set both SHARED and EXCL for the same lock,
+	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
+	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
+	 */
+	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
+	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+	ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
+
+	iolocked = 0;
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		iolocked = mrtryupdate(&ip->i_iolock);
+		if (!iolocked) {
+			return 0;
+		}
+	} else if (lock_flags & XFS_IOLOCK_SHARED) {
+		iolocked = mrtryaccess(&ip->i_iolock);
+		if (!iolocked) {
+			return 0;
+		}
+	}
+	if (lock_flags & XFS_ILOCK_EXCL) {
+		ilocked = mrtryupdate(&ip->i_lock);
+		if (!ilocked) {
+			if (iolocked) {
+				mrunlock(&ip->i_iolock);
+			}
+			return 0;
+		}
+	} else if (lock_flags & XFS_ILOCK_SHARED) {
+		ilocked = mrtryaccess(&ip->i_lock);
+		if (!ilocked) {
+			if (iolocked) {
+				mrunlock(&ip->i_iolock);
+			}
+			return 0;
+		}
+	}
+	xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
+	return 1;
+}
+
+/*
+ * xfs_iunlock() is used to drop the inode locks acquired with
+ * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
+ * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
+ * that we know which locks to drop.
+ *
+ * ip -- the inode being unlocked
+ * lock_flags -- this parameter indicates the inode's locks to be
+ *       to be unlocked.  See the comment for xfs_ilock() for a list
+ *	 of valid values for this parameter.
+ *
+ */
+void
+xfs_iunlock(xfs_inode_t	*ip,
+	    uint	lock_flags)
+{
+	/*
+	 * You can't set both SHARED and EXCL for the same lock,
+	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
+	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
+	 */
+	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
+	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0);
+	ASSERT(lock_flags != 0);
+
+	if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
+		ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
+		       (ismrlocked(&ip->i_iolock, MR_ACCESS)));
+		ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
+		       (ismrlocked(&ip->i_iolock, MR_UPDATE)));
+		mrunlock(&ip->i_iolock);
+	}
+
+	if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
+		ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
+		       (ismrlocked(&ip->i_lock, MR_ACCESS)));
+		ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
+		       (ismrlocked(&ip->i_lock, MR_UPDATE)));
+		mrunlock(&ip->i_lock);
+
+		/*
+		 * Let the AIL know that this item has been unlocked in case
+		 * it is in the AIL and anyone is waiting on it.  Don't do
+		 * this if the caller has asked us not to.
+		 */
+		if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
+		     ip->i_itemp != NULL) {
+			xfs_trans_unlocked_item(ip->i_mount,
+						(xfs_log_item_t*)(ip->i_itemp));
+		}
+	}
+	xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
+}
+
+/*
+ * give up write locks.  the i/o lock cannot be held nested
+ * if it is being demoted.
+ */
+void
+xfs_ilock_demote(xfs_inode_t	*ip,
+		 uint		lock_flags)
+{
+	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
+	ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
+
+	if (lock_flags & XFS_ILOCK_EXCL) {
+		ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+		mrdemote(&ip->i_lock);
+	}
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+		mrdemote(&ip->i_iolock);
+	}
+}
+
+/*
+ * The following three routines simply manage the i_flock
+ * semaphore embedded in the inode.  This semaphore synchronizes
+ * processes attempting to flush the in-core inode back to disk.
+ */
+void
+xfs_iflock(xfs_inode_t *ip)
+{
+	psema(&(ip->i_flock), PINOD|PLTWAIT);
+}
+
+int
+xfs_iflock_nowait(xfs_inode_t *ip)
+{
+	return (cpsema(&(ip->i_flock)));
+}
+
+void
+xfs_ifunlock(xfs_inode_t *ip)
+{
+	ASSERT(valusema(&(ip->i_flock)) <= 0);
+	vsema(&(ip->i_flock));
+}
diff --git a/fs/xfs/xfs_imap.h b/fs/xfs/xfs_imap.h
new file mode 100644
index 0000000..e385064
--- /dev/null
+++ b/fs/xfs/xfs_imap.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_IMAP_H__
+#define	__XFS_IMAP_H__
+
+/*
+ * This is the structure passed to xfs_imap() to map
+ * an inode number to its on disk location.
+ */
+typedef struct xfs_imap {
+	xfs_daddr_t	im_blkno;	/* starting BB of inode chunk */
+	uint		im_len;		/* length in BBs of inode chunk */
+	xfs_agblock_t	im_agblkno;	/* logical block of inode chunk in ag */
+	ushort		im_ioffset;	/* inode offset in block in "inodes" */
+	ushort		im_boffset;	/* inode offset in block in bytes */
+} xfs_imap_t;
+
+#ifdef __KERNEL__
+struct xfs_mount;
+struct xfs_trans;
+int	xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
+		 xfs_imap_t *, uint);
+#endif
+
+#endif	/* __XFS_IMAP_H__ */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
new file mode 100644
index 0000000..43c632a
--- /dev/null
+++ b/fs/xfs/xfs_inode.c
@@ -0,0 +1,3876 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_imap.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_buf_item.h"
+#include "xfs_rw.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+#include "xfs_utils.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_quota.h"
+#include "xfs_mac.h"
+#include "xfs_acl.h"
+
+
+kmem_zone_t *xfs_ifork_zone;
+kmem_zone_t *xfs_inode_zone;
+kmem_zone_t *xfs_chashlist_zone;
+
+/*
+ * Used in xfs_itruncate().  This is the maximum number of extents
+ * freed from a file in a single transaction.
+ */
+#define	XFS_ITRUNC_MAX_EXTENTS	2
+
+STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
+STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
+STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
+STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
+
+
+#ifdef DEBUG
+/*
+ * Make sure that the extents in the given memory buffer
+ * are valid.
+ */
+STATIC void
+xfs_validate_extents(
+	xfs_bmbt_rec_t		*ep,
+	int			nrecs,
+	int			disk,
+	xfs_exntfmt_t		fmt)
+{
+	xfs_bmbt_irec_t		irec;
+	xfs_bmbt_rec_t		rec;
+	int			i;
+
+	for (i = 0; i < nrecs; i++) {
+		rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
+		rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
+		if (disk)
+			xfs_bmbt_disk_get_all(&rec, &irec);
+		else
+			xfs_bmbt_get_all(&rec, &irec);
+		if (fmt == XFS_EXTFMT_NOSTATE)
+			ASSERT(irec.br_state == XFS_EXT_NORM);
+		ep++;
+	}
+}
+#else /* DEBUG */
+#define xfs_validate_extents(ep, nrecs, disk, fmt)
+#endif /* DEBUG */
+
+/*
+ * Check that none of the inode's in the buffer have a next
+ * unlinked field of 0.
+ */
+#if defined(DEBUG)
+void
+xfs_inobp_check(
+	xfs_mount_t	*mp,
+	xfs_buf_t	*bp)
+{
+	int		i;
+	int		j;
+	xfs_dinode_t	*dip;
+
+	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
+
+	for (i = 0; i < j; i++) {
+		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+					i * mp->m_sb.sb_inodesize);
+		if (!dip->di_next_unlinked)  {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"Detected a bogus zero next_unlinked field in incore inode buffer 0x%p.  About to pop an ASSERT.",
+				bp);
+			ASSERT(dip->di_next_unlinked);
+		}
+	}
+}
+#endif
+
+/*
+ * called from bwrite on xfs inode buffers
+ */
+void
+xfs_inobp_bwcheck(xfs_buf_t *bp)
+{
+	xfs_mount_t	*mp;
+	int		i;
+	int		j;
+	xfs_dinode_t	*dip;
+
+	ASSERT(XFS_BUF_FSPRIVATE3(bp, void *) != NULL);
+
+	mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
+
+
+	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
+
+	for (i = 0; i < j; i++)  {
+		dip = (xfs_dinode_t *) xfs_buf_offset(bp,
+						i * mp->m_sb.sb_inodesize);
+		if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
+			cmn_err(CE_WARN,
+"Bad magic # 0x%x in XFS inode buffer 0x%Lx, starting blockno %Ld, offset 0x%x",
+				INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
+				(__uint64_t)(__psunsigned_t) bp,
+				(__int64_t) XFS_BUF_ADDR(bp),
+				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+			xfs_fs_cmn_err(CE_WARN, mp,
+				"corrupt, unmount and run xfs_repair");
+		}
+		if (!dip->di_next_unlinked)  {
+			cmn_err(CE_WARN,
+"Bad next_unlinked field (0) in XFS inode buffer 0x%p, starting blockno %Ld, offset 0x%x",
+				(__uint64_t)(__psunsigned_t) bp,
+				(__int64_t) XFS_BUF_ADDR(bp),
+				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+			xfs_fs_cmn_err(CE_WARN, mp,
+				"corrupt, unmount and run xfs_repair");
+		}
+	}
+
+	return;
+}
+
+/*
+ * This routine is called to map an inode number within a file
+ * system to the buffer containing the on-disk version of the
+ * inode.  It returns a pointer to the buffer containing the
+ * on-disk inode in the bpp parameter, and in the dip parameter
+ * it returns a pointer to the on-disk inode within that buffer.
+ *
+ * If a non-zero error is returned, then the contents of bpp and
+ * dipp are undefined.
+ *
+ * Use xfs_imap() to determine the size and location of the
+ * buffer to read from disk.
+ */
+int
+xfs_inotobp(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	xfs_dinode_t	**dipp,
+	xfs_buf_t	**bpp,
+	int		*offset)
+{
+	int		di_ok;
+	xfs_imap_t	imap;
+	xfs_buf_t	*bp;
+	int		error;
+	xfs_dinode_t	*dip;
+
+	/*
+	 * Call the space managment code to find the location of the
+	 * inode on disk.
+	 */
+	imap.im_blkno = 0;
+	error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
+	if (error != 0) {
+		cmn_err(CE_WARN,
+	"xfs_inotobp: xfs_imap()  returned an "
+	"error %d on %s.  Returning error.", error, mp->m_fsname);
+		return error;
+	}
+
+	/*
+	 * If the inode number maps to a block outside the bounds of the
+	 * file system then return NULL rather than calling read_buf
+	 * and panicing when we get an error from the driver.
+	 */
+	if ((imap.im_blkno + imap.im_len) >
+	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
+		cmn_err(CE_WARN,
+	"xfs_inotobp: inode number (%d + %d) maps to a block outside the bounds "
+	"of the file system %s.  Returning EINVAL.",
+			imap.im_blkno, imap.im_len,mp->m_fsname);
+		return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
+	 * default to just a read_buf() call.
+	 */
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
+				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
+
+	if (error) {
+		cmn_err(CE_WARN,
+	"xfs_inotobp: xfs_trans_read_buf()  returned an "
+	"error %d on %s.  Returning error.", error, mp->m_fsname);
+		return error;
+	}
+	dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
+	di_ok =
+		INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
+		XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
+	if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
+			XFS_RANDOM_ITOBP_INOTOBP))) {
+		XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
+		xfs_trans_brelse(tp, bp);
+		cmn_err(CE_WARN,
+	"xfs_inotobp: XFS_TEST_ERROR()  returned an "
+	"error on %s.  Returning EFSCORRUPTED.",  mp->m_fsname);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	xfs_inobp_check(mp, bp);
+
+	/*
+	 * Set *dipp to point to the on-disk inode in the buffer.
+	 */
+	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
+	*bpp = bp;
+	*offset = imap.im_boffset;
+	return 0;
+}
+
+
+/*
+ * This routine is called to map an inode to the buffer containing
+ * the on-disk version of the inode.  It returns a pointer to the
+ * buffer containing the on-disk inode in the bpp parameter, and in
+ * the dip parameter it returns a pointer to the on-disk inode within
+ * that buffer.
+ *
+ * If a non-zero error is returned, then the contents of bpp and
+ * dipp are undefined.
+ *
+ * If the inode is new and has not yet been initialized, use xfs_imap()
+ * to determine the size and location of the buffer to read from disk.
+ * If the inode has already been mapped to its buffer and read in once,
+ * then use the mapping information stored in the inode rather than
+ * calling xfs_imap().  This allows us to avoid the overhead of looking
+ * at the inode btree for small block file systems (see xfs_dilocate()).
+ * We can tell whether the inode has been mapped in before by comparing
+ * its disk block address to 0.  Only uninitialized inodes will have
+ * 0 for the disk block address.
+ */
+int
+xfs_itobp(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_dinode_t	**dipp,
+	xfs_buf_t	**bpp,
+	xfs_daddr_t	bno)
+{
+	xfs_buf_t	*bp;
+	int		error;
+	xfs_imap_t	imap;
+#ifdef __KERNEL__
+	int		i;
+	int		ni;
+#endif
+
+	if (ip->i_blkno == (xfs_daddr_t)0) {
+		/*
+		 * Call the space management code to find the location of the
+		 * inode on disk.
+		 */
+		imap.im_blkno = bno;
+		error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP);
+		if (error != 0) {
+			return error;
+		}
+
+		/*
+		 * If the inode number maps to a block outside the bounds
+		 * of the file system then return NULL rather than calling
+		 * read_buf and panicing when we get an error from the
+		 * driver.
+		 */
+		if ((imap.im_blkno + imap.im_len) >
+		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
+					"(imap.im_blkno (0x%llx) "
+					"+ imap.im_len (0x%llx)) > "
+					" XFS_FSB_TO_BB(mp, "
+					"mp->m_sb.sb_dblocks) (0x%llx)",
+					(unsigned long long) imap.im_blkno,
+					(unsigned long long) imap.im_len,
+					XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
+#endif /* DEBUG */
+			return XFS_ERROR(EINVAL);
+		}
+
+		/*
+		 * Fill in the fields in the inode that will be used to
+		 * map the inode to its buffer from now on.
+		 */
+		ip->i_blkno = imap.im_blkno;
+		ip->i_len = imap.im_len;
+		ip->i_boffset = imap.im_boffset;
+	} else {
+		/*
+		 * We've already mapped the inode once, so just use the
+		 * mapping that we saved the first time.
+		 */
+		imap.im_blkno = ip->i_blkno;
+		imap.im_len = ip->i_len;
+		imap.im_boffset = ip->i_boffset;
+	}
+	ASSERT(bno == 0 || bno == imap.im_blkno);
+
+	/*
+	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
+	 * default to just a read_buf() call.
+	 */
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
+				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
+
+	if (error) {
+#ifdef DEBUG
+		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
+				"xfs_trans_read_buf() returned error %d, "
+				"imap.im_blkno 0x%llx, imap.im_len 0x%llx",
+				error, (unsigned long long) imap.im_blkno,
+				(unsigned long long) imap.im_len);
+#endif /* DEBUG */
+		return error;
+	}
+#ifdef __KERNEL__
+	/*
+	 * Validate the magic number and version of every inode in the buffer
+	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
+	 */
+#ifdef DEBUG
+	ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
+#else
+	ni = 1;
+#endif
+	for (i = 0; i < ni; i++) {
+		int		di_ok;
+		xfs_dinode_t	*dip;
+
+		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+					(i << mp->m_sb.sb_inodelog));
+		di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
+			    XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
+		if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
+				 XFS_RANDOM_ITOBP_INOTOBP))) {
+#ifdef DEBUG
+			prdev("bad inode magic/vsn daddr %lld #%d (magic=%x)",
+				mp->m_ddev_targp,
+				(unsigned long long)imap.im_blkno, i,
+				INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
+#endif
+			XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
+					     mp, dip);
+			xfs_trans_brelse(tp, bp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+	}
+#endif	/* __KERNEL__ */
+
+	xfs_inobp_check(mp, bp);
+
+	/*
+	 * Mark the buffer as an inode buffer now that it looks good
+	 */
+	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
+
+	/*
+	 * Set *dipp to point to the on-disk inode in the buffer.
+	 */
+	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Move inode type and inode format specific information from the
+ * on-disk inode to the in-core inode.  For fifos, devs, and sockets
+ * this means set if_rdev to the proper value.  For files, directories,
+ * and symlinks this means to bring in the in-line data or extent
+ * pointers.  For a file in B-tree format, only the root is immediately
+ * brought in-core.  The rest will be in-lined in if_extents when it
+ * is first referenced (see xfs_iread_extents()).
+ */
+STATIC int
+xfs_iformat(
+	xfs_inode_t		*ip,
+	xfs_dinode_t		*dip)
+{
+	xfs_attr_shortform_t	*atp;
+	int			size;
+	int			error;
+	xfs_fsize_t             di_size;
+	ip->i_df.if_ext_max =
+		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	error = 0;
+
+	if (unlikely(
+	    INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
+		INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
+	    INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
+		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu."
+			"  Unmount and run xfs_repair.",
+			(unsigned long long)ip->i_ino,
+			(int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
+			    + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
+			(unsigned long long)
+			INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT));
+		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
+				     ip->i_mount, dip);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
+		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+			"corrupt dinode %Lu, forkoff = 0x%x."
+			"  Unmount and run xfs_repair.",
+			(unsigned long long)ip->i_ino,
+			(int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
+		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
+				     ip->i_mount, dip);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	switch (ip->i_d.di_mode & S_IFMT) {
+	case S_IFIFO:
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFSOCK:
+		if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) {
+			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
+					      ip->i_mount, dip);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		ip->i_d.di_size = 0;
+		ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
+		break;
+
+	case S_IFREG:
+	case S_IFLNK:
+	case S_IFDIR:
+		switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) {
+		case XFS_DINODE_FMT_LOCAL:
+			/*
+			 * no local regular files yet
+			 */
+			if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
+				xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+					"corrupt inode (local format for regular file) %Lu.  Unmount and run xfs_repair.",
+					(unsigned long long) ip->i_ino);
+				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
+						     XFS_ERRLEVEL_LOW,
+						     ip->i_mount, dip);
+				return XFS_ERROR(EFSCORRUPTED);
+			}
+
+			di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
+			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
+				xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+					"corrupt inode %Lu (bad size %Ld for local inode).  Unmount and run xfs_repair.",
+					(unsigned long long) ip->i_ino,
+					(long long) di_size);
+				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
+						     XFS_ERRLEVEL_LOW,
+						     ip->i_mount, dip);
+				return XFS_ERROR(EFSCORRUPTED);
+			}
+
+			size = (int)di_size;
+			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
+			break;
+		case XFS_DINODE_FMT_EXTENTS:
+			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
+			break;
+		case XFS_DINODE_FMT_BTREE:
+			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
+			break;
+		default:
+			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
+					 ip->i_mount);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		break;
+
+	default:
+		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (error) {
+		return error;
+	}
+	if (!XFS_DFORK_Q(dip))
+		return 0;
+	ASSERT(ip->i_afp == NULL);
+	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+	ip->i_afp->if_ext_max =
+		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
+	case XFS_DINODE_FMT_LOCAL:
+		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
+		size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT);
+		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
+		break;
+	case XFS_DINODE_FMT_EXTENTS:
+		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
+		break;
+	case XFS_DINODE_FMT_BTREE:
+		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
+		break;
+	default:
+		error = XFS_ERROR(EFSCORRUPTED);
+		break;
+	}
+	if (error) {
+		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+		ip->i_afp = NULL;
+		xfs_idestroy_fork(ip, XFS_DATA_FORK);
+	}
+	return error;
+}
+
+/*
+ * The file is in-lined in the on-disk inode.
+ * If it fits into if_inline_data, then copy
+ * it there, otherwise allocate a buffer for it
+ * and copy the data there.  Either way, set
+ * if_data to point at the data.
+ * If we allocate a buffer for the data, make
+ * sure that its size is a multiple of 4 and
+ * record the real size in i_real_bytes.
+ */
+STATIC int
+xfs_iformat_local(
+	xfs_inode_t	*ip,
+	xfs_dinode_t	*dip,
+	int		whichfork,
+	int		size)
+{
+	xfs_ifork_t	*ifp;
+	int		real_size;
+
+	/*
+	 * If the size is unreasonable, then something
+	 * is wrong and we just bail out rather than crash in
+	 * kmem_alloc() or memcpy() below.
+	 */
+	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
+		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+			"corrupt inode %Lu (bad size %d for local fork, size = %d).  Unmount and run xfs_repair.",
+			(unsigned long long) ip->i_ino, size,
+			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
+		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
+				     ip->i_mount, dip);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	real_size = 0;
+	if (size == 0)
+		ifp->if_u1.if_data = NULL;
+	else if (size <= sizeof(ifp->if_u2.if_inline_data))
+		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+	else {
+		real_size = roundup(size, 4);
+		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+	}
+	ifp->if_bytes = size;
+	ifp->if_real_bytes = real_size;
+	if (size)
+		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
+	ifp->if_flags &= ~XFS_IFEXTENTS;
+	ifp->if_flags |= XFS_IFINLINE;
+	return 0;
+}
+
+/*
+ * The file consists of a set of extents all
+ * of which fit into the on-disk inode.
+ * If there are few enough extents to fit into
+ * the if_inline_ext, then copy them there.
+ * Otherwise allocate a buffer for them and copy
+ * them into it.  Either way, set if_extents
+ * to point at the extents.
+ */
+STATIC int
+xfs_iformat_extents(
+	xfs_inode_t	*ip,
+	xfs_dinode_t	*dip,
+	int		whichfork)
+{
+	xfs_bmbt_rec_t	*ep, *dp;
+	xfs_ifork_t	*ifp;
+	int		nex;
+	int		real_size;
+	int		size;
+	int		i;
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
+	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
+
+	/*
+	 * If the number of extents is unreasonable, then something
+	 * is wrong and we just bail out rather than crash in
+	 * kmem_alloc() or memcpy() below.
+	 */
+	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
+		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+			"corrupt inode %Lu ((a)extents = %d).  Unmount and run xfs_repair.",
+			(unsigned long long) ip->i_ino, nex);
+		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
+				     ip->i_mount, dip);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	real_size = 0;
+	if (nex == 0)
+		ifp->if_u1.if_extents = NULL;
+	else if (nex <= XFS_INLINE_EXTS)
+		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
+	else {
+		ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP);
+		ASSERT(ifp->if_u1.if_extents != NULL);
+		real_size = size;
+	}
+	ifp->if_bytes = size;
+	ifp->if_real_bytes = real_size;
+	if (size) {
+		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
+		xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip));
+		ep = ifp->if_u1.if_extents;
+		for (i = 0; i < nex; i++, ep++, dp++) {
+			ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
+								ARCH_CONVERT);
+			ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
+								ARCH_CONVERT);
+		}
+		xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
+			whichfork);
+		if (whichfork != XFS_DATA_FORK ||
+			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
+				if (unlikely(xfs_check_nostate_extents(
+				    ifp->if_u1.if_extents, nex))) {
+					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
+							 XFS_ERRLEVEL_LOW,
+							 ip->i_mount);
+					return XFS_ERROR(EFSCORRUPTED);
+				}
+	}
+	ifp->if_flags |= XFS_IFEXTENTS;
+	return 0;
+}
+
+/*
+ * The file has too many extents to fit into
+ * the inode, so they are in B-tree format.
+ * Allocate a buffer for the root of the B-tree
+ * and copy the root into it.  The i_extents
+ * field will remain NULL until all of the
+ * extents are read in (when they are needed).
+ */
+STATIC int
+xfs_iformat_btree(
+	xfs_inode_t		*ip,
+	xfs_dinode_t		*dip,
+	int			whichfork)
+{
+	xfs_bmdr_block_t	*dfp;
+	xfs_ifork_t		*ifp;
+	/* REFERENCED */
+	int			nrecs;
+	int			size;
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
+	size = XFS_BMAP_BROOT_SPACE(dfp);
+	nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
+
+	/*
+	 * blow out if -- fork has less extents than can fit in
+	 * fork (fork shouldn't be a btree format), root btree
+	 * block has more records than can fit into the fork,
+	 * or the number of extents is greater than the number of
+	 * blocks.
+	 */
+	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
+	    || XFS_BMDR_SPACE_CALC(nrecs) >
+			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
+	    || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+			"corrupt inode %Lu (btree).  Unmount and run xfs_repair.",
+			(unsigned long long) ip->i_ino);
+		XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
+				 ip->i_mount);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	ifp->if_broot_bytes = size;
+	ifp->if_broot = kmem_alloc(size, KM_SLEEP);
+	ASSERT(ifp->if_broot != NULL);
+	/*
+	 * Copy and convert from the on-disk structure
+	 * to the in-memory structure.
+	 */
+	xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
+		ifp->if_broot, size);
+	ifp->if_flags &= ~XFS_IFEXTENTS;
+	ifp->if_flags |= XFS_IFBROOT;
+
+	return 0;
+}
+
+/*
+ * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk
+ * and native format
+ *
+ * buf  = on-disk representation
+ * dip  = native representation
+ * dir  = direction - +ve -> disk to native
+ *                    -ve -> native to disk
+ */
+void
+xfs_xlate_dinode_core(
+	xfs_caddr_t		buf,
+	xfs_dinode_core_t	*dip,
+	int			dir)
+{
+	xfs_dinode_core_t	*buf_core = (xfs_dinode_core_t *)buf;
+	xfs_dinode_core_t	*mem_core = (xfs_dinode_core_t *)dip;
+	xfs_arch_t		arch = ARCH_CONVERT;
+
+	ASSERT(dir);
+
+	INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
+	INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch);
+	INT_XLATE(buf_core->di_version,	mem_core->di_version, dir, arch);
+	INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch);
+	INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
+	INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch);
+	INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch);
+	INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
+	INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
+
+	if (dir > 0) {
+		memcpy(mem_core->di_pad, buf_core->di_pad,
+			sizeof(buf_core->di_pad));
+	} else {
+		memcpy(buf_core->di_pad, mem_core->di_pad,
+			sizeof(buf_core->di_pad));
+	}
+
+	INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch);
+
+	INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec,
+			dir, arch);
+	INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec,
+			dir, arch);
+	INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec,
+			dir, arch);
+	INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec,
+			dir, arch);
+	INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec,
+			dir, arch);
+	INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec,
+			dir, arch);
+	INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch);
+	INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch);
+	INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch);
+	INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch);
+	INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch);
+	INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch);
+	INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch);
+	INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch);
+	INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch);
+	INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch);
+	INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch);
+}
+
+STATIC uint
+_xfs_dic2xflags(
+	xfs_dinode_core_t	*dic,
+	__uint16_t		di_flags)
+{
+	uint			flags = 0;
+
+	if (di_flags & XFS_DIFLAG_ANY) {
+		if (di_flags & XFS_DIFLAG_REALTIME)
+			flags |= XFS_XFLAG_REALTIME;
+		if (di_flags & XFS_DIFLAG_PREALLOC)
+			flags |= XFS_XFLAG_PREALLOC;
+		if (di_flags & XFS_DIFLAG_IMMUTABLE)
+			flags |= XFS_XFLAG_IMMUTABLE;
+		if (di_flags & XFS_DIFLAG_APPEND)
+			flags |= XFS_XFLAG_APPEND;
+		if (di_flags & XFS_DIFLAG_SYNC)
+			flags |= XFS_XFLAG_SYNC;
+		if (di_flags & XFS_DIFLAG_NOATIME)
+			flags |= XFS_XFLAG_NOATIME;
+		if (di_flags & XFS_DIFLAG_NODUMP)
+			flags |= XFS_XFLAG_NODUMP;
+		if (di_flags & XFS_DIFLAG_RTINHERIT)
+			flags |= XFS_XFLAG_RTINHERIT;
+		if (di_flags & XFS_DIFLAG_PROJINHERIT)
+			flags |= XFS_XFLAG_PROJINHERIT;
+		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
+			flags |= XFS_XFLAG_NOSYMLINKS;
+	}
+
+	return flags;
+}
+
+uint
+xfs_ip2xflags(
+	xfs_inode_t		*ip)
+{
+	xfs_dinode_core_t	*dic = &ip->i_d;
+
+	return _xfs_dic2xflags(dic, dic->di_flags) |
+		(XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
+}
+
+uint
+xfs_dic2xflags(
+	xfs_dinode_core_t	*dic)
+{
+	return _xfs_dic2xflags(dic, INT_GET(dic->di_flags, ARCH_CONVERT)) |
+		(XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
+}
+
+/*
+ * Given a mount structure and an inode number, return a pointer
+ * to a newly allocated in-core inode coresponding to the given
+ * inode number.
+ *
+ * Initialize the inode's attributes and extent pointers if it
+ * already has them (it will not if the inode has no links).
+ */
+int
+xfs_iread(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	xfs_inode_t	**ipp,
+	xfs_daddr_t	bno)
+{
+	xfs_buf_t	*bp;
+	xfs_dinode_t	*dip;
+	xfs_inode_t	*ip;
+	int		error;
+
+	ASSERT(xfs_inode_zone != NULL);
+
+	ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
+	ip->i_ino = ino;
+	ip->i_mount = mp;
+
+	/*
+	 * Get pointer's to the on-disk inode and the buffer containing it.
+	 * If the inode number refers to a block outside the file system
+	 * then xfs_itobp() will return NULL.  In this case we should
+	 * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
+	 * know that this is a new incore inode.
+	 */
+	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno);
+
+	if (error != 0) {
+		kmem_zone_free(xfs_inode_zone, ip);
+		return error;
+	}
+
+	/*
+	 * Initialize inode's trace buffers.
+	 * Do this before xfs_iformat in case it adds entries.
+	 */
+#ifdef XFS_BMAP_TRACE
+	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_BMBT_TRACE
+	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_RW_TRACE
+	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_ILOCK_TRACE
+	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_DIR2_TRACE
+	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
+#endif
+
+	/*
+	 * If we got something that isn't an inode it means someone
+	 * (nfs or dmi) has a stale handle.
+	 */
+	if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
+		kmem_zone_free(xfs_inode_zone, ip);
+		xfs_trans_brelse(tp, bp);
+#ifdef DEBUG
+		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
+				"dip->di_core.di_magic (0x%x) != "
+				"XFS_DINODE_MAGIC (0x%x)",
+				INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
+				XFS_DINODE_MAGIC);
+#endif /* DEBUG */
+		return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * If the on-disk inode is already linked to a directory
+	 * entry, copy all of the inode into the in-core inode.
+	 * xfs_iformat() handles copying in the inode format
+	 * specific information.
+	 * Otherwise, just get the truly permanent information.
+	 */
+	if (dip->di_core.di_mode) {
+		xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
+		     &(ip->i_d), 1);
+		error = xfs_iformat(ip, dip);
+		if (error)  {
+			kmem_zone_free(xfs_inode_zone, ip);
+			xfs_trans_brelse(tp, bp);
+#ifdef DEBUG
+			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
+					"xfs_iformat() returned error %d",
+					error);
+#endif /* DEBUG */
+			return error;
+		}
+	} else {
+		ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT);
+		ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT);
+		ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT);
+		ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT);
+		/*
+		 * Make sure to pull in the mode here as well in
+		 * case the inode is released without being used.
+		 * This ensures that xfs_inactive() will see that
+		 * the inode is already free and not try to mess
+		 * with the uninitialized part of it.
+		 */
+		ip->i_d.di_mode = 0;
+		/*
+		 * Initialize the per-fork minima and maxima for a new
+		 * inode here.  xfs_iformat will do it for old inodes.
+		 */
+		ip->i_df.if_ext_max =
+			XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	}
+
+	INIT_LIST_HEAD(&ip->i_reclaim);
+
+	/*
+	 * The inode format changed when we moved the link count and
+	 * made it 32 bits long.  If this is an old format inode,
+	 * convert it in memory to look like a new one.  If it gets
+	 * flushed to disk we will convert back before flushing or
+	 * logging it.  We zero out the new projid field and the old link
+	 * count field.  We'll handle clearing the pad field (the remains
+	 * of the old uuid field) when we actually convert the inode to
+	 * the new format. We don't change the version number so that we
+	 * can distinguish this from a real new format inode.
+	 */
+	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
+		ip->i_d.di_nlink = ip->i_d.di_onlink;
+		ip->i_d.di_onlink = 0;
+		ip->i_d.di_projid = 0;
+	}
+
+	ip->i_delayed_blks = 0;
+
+	/*
+	 * Mark the buffer containing the inode as something to keep
+	 * around for a while.  This helps to keep recently accessed
+	 * meta-data in-core longer.
+	 */
+	 XFS_BUF_SET_REF(bp, XFS_INO_REF);
+
+	/*
+	 * Use xfs_trans_brelse() to release the buffer containing the
+	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
+	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
+	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
+	 * will only release the buffer if it is not dirty within the
+	 * transaction.  It will be OK to release the buffer in this case,
+	 * because inodes on disk are never destroyed and we will be
+	 * locking the new in-core inode before putting it in the hash
+	 * table where other processes can find it.  Thus we don't have
+	 * to worry about the inode being changed just because we released
+	 * the buffer.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*ipp = ip;
+	return 0;
+}
+
+/*
+ * Read in extents from a btree-format inode.
+ * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
+ */
+int
+xfs_iread_extents(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	int		whichfork)
+{
+	int		error;
+	xfs_ifork_t	*ifp;
+	size_t		size;
+
+	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
+		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
+				 ip->i_mount);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	/*
+	 * We know that the size is valid (it's checked in iformat_btree)
+	 */
+	ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP);
+	ASSERT(ifp->if_u1.if_extents != NULL);
+	ifp->if_lastex = NULLEXTNUM;
+	ifp->if_bytes = ifp->if_real_bytes = (int)size;
+	ifp->if_flags |= XFS_IFEXTENTS;
+	error = xfs_bmap_read_extents(tp, ip, whichfork);
+	if (error) {
+		kmem_free(ifp->if_u1.if_extents, size);
+		ifp->if_u1.if_extents = NULL;
+		ifp->if_bytes = ifp->if_real_bytes = 0;
+		ifp->if_flags &= ~XFS_IFEXTENTS;
+		return error;
+	}
+	xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents,
+		XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip));
+	return 0;
+}
+
+/*
+ * Allocate an inode on disk and return a copy of its in-core version.
+ * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
+ * appropriately within the inode.  The uid and gid for the inode are
+ * set according to the contents of the given cred structure.
+ *
+ * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
+ * has a free inode available, call xfs_iget()
+ * to obtain the in-core version of the allocated inode.  Finally,
+ * fill in the inode and log its initial contents.  In this case,
+ * ialloc_context would be set to NULL and call_again set to false.
+ *
+ * If xfs_dialloc() does not have an available inode,
+ * it will replenish its supply by doing an allocation. Since we can
+ * only do one allocation within a transaction without deadlocks, we
+ * must commit the current transaction before returning the inode itself.
+ * In this case, therefore, we will set call_again to true and return.
+ * The caller should then commit the current transaction, start a new
+ * transaction, and call xfs_ialloc() again to actually get the inode.
+ *
+ * To ensure that some other process does not grab the inode that
+ * was allocated during the first call to xfs_ialloc(), this routine
+ * also returns the [locked] bp pointing to the head of the freelist
+ * as ialloc_context.  The caller should hold this buffer across
+ * the commit and pass it back into this routine on the second call.
+ */
+int
+xfs_ialloc(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*pip,
+	mode_t		mode,
+	nlink_t		nlink,
+	xfs_dev_t	rdev,
+	cred_t		*cr,
+	xfs_prid_t	prid,
+	int		okalloc,
+	xfs_buf_t	**ialloc_context,
+	boolean_t	*call_again,
+	xfs_inode_t	**ipp)
+{
+	xfs_ino_t	ino;
+	xfs_inode_t	*ip;
+	vnode_t		*vp;
+	uint		flags;
+	int		error;
+
+	/*
+	 * Call the space management code to pick
+	 * the on-disk inode to be allocated.
+	 */
+	error = xfs_dialloc(tp, pip->i_ino, mode, okalloc,
+			    ialloc_context, call_again, &ino);
+	if (error != 0) {
+		return error;
+	}
+	if (*call_again || ino == NULLFSINO) {
+		*ipp = NULL;
+		return 0;
+	}
+	ASSERT(*ialloc_context == NULL);
+
+	/*
+	 * Get the in-core inode with the lock held exclusively.
+	 * This is because we're setting fields here we need
+	 * to prevent others from looking at until we're done.
+	 */
+	error = xfs_trans_iget(tp->t_mountp, tp, ino,
+			IGET_CREATE, XFS_ILOCK_EXCL, &ip);
+	if (error != 0) {
+		return error;
+	}
+	ASSERT(ip != NULL);
+
+	vp = XFS_ITOV(ip);
+	vp->v_type = IFTOVT(mode);
+	ip->i_d.di_mode = (__uint16_t)mode;
+	ip->i_d.di_onlink = 0;
+	ip->i_d.di_nlink = nlink;
+	ASSERT(ip->i_d.di_nlink == nlink);
+	ip->i_d.di_uid = current_fsuid(cr);
+	ip->i_d.di_gid = current_fsgid(cr);
+	ip->i_d.di_projid = prid;
+	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+
+	/*
+	 * If the superblock version is up to where we support new format
+	 * inodes and this is currently an old format inode, then change
+	 * the inode version number now.  This way we only do the conversion
+	 * here rather than here and in the flush/logging code.
+	 */
+	if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
+	    ip->i_d.di_version == XFS_DINODE_VERSION_1) {
+		ip->i_d.di_version = XFS_DINODE_VERSION_2;
+		/*
+		 * We've already zeroed the old link count, the projid field,
+		 * and the pad field.
+		 */
+	}
+
+	/*
+	 * Project ids won't be stored on disk if we are using a version 1 inode.
+	 */
+	if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
+		xfs_bump_ino_vers2(tp, ip);
+
+	if (XFS_INHERIT_GID(pip, vp->v_vfsp)) {
+		ip->i_d.di_gid = pip->i_d.di_gid;
+		if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
+			ip->i_d.di_mode |= S_ISGID;
+		}
+	}
+
+	/*
+	 * If the group ID of the new file does not match the effective group
+	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
+	 * (and only if the irix_sgid_inherit compatibility variable is set).
+	 */
+	if ((irix_sgid_inherit) &&
+	    (ip->i_d.di_mode & S_ISGID) &&
+	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
+		ip->i_d.di_mode &= ~S_ISGID;
+	}
+
+	ip->i_d.di_size = 0;
+	ip->i_d.di_nextents = 0;
+	ASSERT(ip->i_d.di_nblocks == 0);
+	xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
+	/*
+	 * di_gen will have been taken care of in xfs_iread.
+	 */
+	ip->i_d.di_extsize = 0;
+	ip->i_d.di_dmevmask = 0;
+	ip->i_d.di_dmstate = 0;
+	ip->i_d.di_flags = 0;
+	flags = XFS_ILOG_CORE;
+	switch (mode & S_IFMT) {
+	case S_IFIFO:
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFSOCK:
+		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
+		ip->i_df.if_u2.if_rdev = rdev;
+		ip->i_df.if_flags = 0;
+		flags |= XFS_ILOG_DEV;
+		break;
+	case S_IFREG:
+	case S_IFDIR:
+		if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
+			if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
+				if ((mode & S_IFMT) == S_IFDIR) {
+					ip->i_d.di_flags |= XFS_DIFLAG_RTINHERIT;
+				} else {
+					ip->i_d.di_flags |= XFS_DIFLAG_REALTIME;
+					ip->i_iocore.io_flags |= XFS_IOCORE_RT;
+				}
+			}
+			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
+			    xfs_inherit_noatime)
+				ip->i_d.di_flags |= XFS_DIFLAG_NOATIME;
+			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
+			    xfs_inherit_nodump)
+				ip->i_d.di_flags |= XFS_DIFLAG_NODUMP;
+			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
+			    xfs_inherit_sync)
+				ip->i_d.di_flags |= XFS_DIFLAG_SYNC;
+			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
+			    xfs_inherit_nosymlinks)
+				ip->i_d.di_flags |= XFS_DIFLAG_NOSYMLINKS;
+		}
+		/* FALLTHROUGH */
+	case S_IFLNK:
+		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
+		ip->i_df.if_flags = XFS_IFEXTENTS;
+		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
+		ip->i_df.if_u1.if_extents = NULL;
+		break;
+	default:
+		ASSERT(0);
+	}
+	/*
+	 * Attribute fork settings for new inode.
+	 */
+	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+	ip->i_d.di_anextents = 0;
+
+	/*
+	 * Log the new values stuffed into the inode.
+	 */
+	xfs_trans_log_inode(tp, ip, flags);
+
+	/* now that we have a v_type we can set Linux inode ops (& unlock) */
+	VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
+
+	*ipp = ip;
+	return 0;
+}
+
+/*
+ * Check to make sure that there are no blocks allocated to the
+ * file beyond the size of the file.  We don't check this for
+ * files with fixed size extents or real time extents, but we
+ * at least do it for regular files.
+ */
+#ifdef DEBUG
+void
+xfs_isize_check(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip,
+	xfs_fsize_t	isize)
+{
+	xfs_fileoff_t	map_first;
+	int		nimaps;
+	xfs_bmbt_irec_t	imaps[2];
+
+	if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
+		return;
+
+	if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME )
+		return;
+
+	nimaps = 2;
+	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
+	/*
+	 * The filesystem could be shutting down, so bmapi may return
+	 * an error.
+	 */
+	if (xfs_bmapi(NULL, ip, map_first,
+			 (XFS_B_TO_FSB(mp,
+				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
+			  map_first),
+			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
+			 NULL))
+	    return;
+	ASSERT(nimaps == 1);
+	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
+}
+#endif	/* DEBUG */
+
+/*
+ * Calculate the last possible buffered byte in a file.  This must
+ * include data that was buffered beyond the EOF by the write code.
+ * This also needs to deal with overflowing the xfs_fsize_t type
+ * which can happen for sizes near the limit.
+ *
+ * We also need to take into account any blocks beyond the EOF.  It
+ * may be the case that they were buffered by a write which failed.
+ * In that case the pages will still be in memory, but the inode size
+ * will never have been updated.
+ */
+xfs_fsize_t
+xfs_file_last_byte(
+	xfs_inode_t	*ip)
+{
+	xfs_mount_t	*mp;
+	xfs_fsize_t	last_byte;
+	xfs_fileoff_t	last_block;
+	xfs_fileoff_t	size_last_block;
+	int		error;
+
+	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
+
+	mp = ip->i_mount;
+	/*
+	 * Only check for blocks beyond the EOF if the extents have
+	 * been read in.  This eliminates the need for the inode lock,
+	 * and it also saves us from looking when it really isn't
+	 * necessary.
+	 */
+	if (ip->i_df.if_flags & XFS_IFEXTENTS) {
+		error = xfs_bmap_last_offset(NULL, ip, &last_block,
+			XFS_DATA_FORK);
+		if (error) {
+			last_block = 0;
+		}
+	} else {
+		last_block = 0;
+	}
+	size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_d.di_size);
+	last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
+
+	last_byte = XFS_FSB_TO_B(mp, last_block);
+	if (last_byte < 0) {
+		return XFS_MAXIOFFSET(mp);
+	}
+	last_byte += (1 << mp->m_writeio_log);
+	if (last_byte < 0) {
+		return XFS_MAXIOFFSET(mp);
+	}
+	return last_byte;
+}
+
+#if defined(XFS_RW_TRACE)
+STATIC void
+xfs_itrunc_trace(
+	int		tag,
+	xfs_inode_t	*ip,
+	int		flag,
+	xfs_fsize_t	new_size,
+	xfs_off_t	toss_start,
+	xfs_off_t	toss_finish)
+{
+	if (ip->i_rwtrace == NULL) {
+		return;
+	}
+
+	ktrace_enter(ip->i_rwtrace,
+		     (void*)((long)tag),
+		     (void*)ip,
+		     (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
+		     (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
+		     (void*)((long)flag),
+		     (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
+		     (void*)(unsigned long)(new_size & 0xffffffff),
+		     (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
+		     (void*)(unsigned long)(toss_start & 0xffffffff),
+		     (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
+		     (void*)(unsigned long)(toss_finish & 0xffffffff),
+		     (void*)(unsigned long)current_cpu(),
+		     (void*)0,
+		     (void*)0,
+		     (void*)0,
+		     (void*)0);
+}
+#else
+#define	xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
+#endif
+
+/*
+ * Start the truncation of the file to new_size.  The new size
+ * must be smaller than the current size.  This routine will
+ * clear the buffer and page caches of file data in the removed
+ * range, and xfs_itruncate_finish() will remove the underlying
+ * disk blocks.
+ *
+ * The inode must have its I/O lock locked EXCLUSIVELY, and it
+ * must NOT have the inode lock held at all.  This is because we're
+ * calling into the buffer/page cache code and we can't hold the
+ * inode lock when we do so.
+ *
+ * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
+ * or XFS_ITRUNC_MAYBE.  The XFS_ITRUNC_MAYBE value should be used
+ * in the case that the caller is locking things out of order and
+ * may not be able to call xfs_itruncate_finish() with the inode lock
+ * held without dropping the I/O lock.  If the caller must drop the
+ * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
+ * must be called again with all the same restrictions as the initial
+ * call.
+ */
+void
+xfs_itruncate_start(
+	xfs_inode_t	*ip,
+	uint		flags,
+	xfs_fsize_t	new_size)
+{
+	xfs_fsize_t	last_byte;
+	xfs_off_t	toss_start;
+	xfs_mount_t	*mp;
+	vnode_t		*vp;
+
+	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
+	ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size));
+	ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
+	       (flags == XFS_ITRUNC_MAYBE));
+
+	mp = ip->i_mount;
+	vp = XFS_ITOV(ip);
+	/*
+	 * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers
+	 * overlapping the region being removed.  We have to use
+	 * the less efficient VOP_FLUSHINVAL_PAGES() in the case that the
+	 * caller may not be able to finish the truncate without
+	 * dropping the inode's I/O lock.  Make sure
+	 * to catch any pages brought in by buffers overlapping
+	 * the EOF by searching out beyond the isize by our
+	 * block size. We round new_size up to a block boundary
+	 * so that we don't toss things on the same block as
+	 * new_size but before it.
+	 *
+	 * Before calling VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES(), make sure to
+	 * call remapf() over the same region if the file is mapped.
+	 * This frees up mapped file references to the pages in the
+	 * given range and for the VOP_FLUSHINVAL_PAGES() case it ensures
+	 * that we get the latest mapped changes flushed out.
+	 */
+	toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
+	toss_start = XFS_FSB_TO_B(mp, toss_start);
+	if (toss_start < 0) {
+		/*
+		 * The place to start tossing is beyond our maximum
+		 * file size, so there is no way that the data extended
+		 * out there.
+		 */
+		return;
+	}
+	last_byte = xfs_file_last_byte(ip);
+	xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
+			 last_byte);
+	if (last_byte > toss_start) {
+		if (flags & XFS_ITRUNC_DEFINITE) {
+			VOP_TOSS_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED);
+		} else {
+			VOP_FLUSHINVAL_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED);
+		}
+	}
+
+#ifdef DEBUG
+	if (new_size == 0) {
+		ASSERT(VN_CACHED(vp) == 0);
+	}
+#endif
+}
+
+/*
+ * Shrink the file to the given new_size.  The new
+ * size must be smaller than the current size.
+ * This will free up the underlying blocks
+ * in the removed range after a call to xfs_itruncate_start()
+ * or xfs_atruncate_start().
+ *
+ * The transaction passed to this routine must have made
+ * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
+ * This routine may commit the given transaction and
+ * start new ones, so make sure everything involved in
+ * the transaction is tidy before calling here.
+ * Some transaction will be returned to the caller to be
+ * committed.  The incoming transaction must already include
+ * the inode, and both inode locks must be held exclusively.
+ * The inode must also be "held" within the transaction.  On
+ * return the inode will be "held" within the returned transaction.
+ * This routine does NOT require any disk space to be reserved
+ * for it within the transaction.
+ *
+ * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
+ * and it indicates the fork which is to be truncated.  For the
+ * attribute fork we only support truncation to size 0.
+ *
+ * We use the sync parameter to indicate whether or not the first
+ * transaction we perform might have to be synchronous.  For the attr fork,
+ * it needs to be so if the unlink of the inode is not yet known to be
+ * permanent in the log.  This keeps us from freeing and reusing the
+ * blocks of the attribute fork before the unlink of the inode becomes
+ * permanent.
+ *
+ * For the data fork, we normally have to run synchronously if we're
+ * being called out of the inactive path or we're being called
+ * out of the create path where we're truncating an existing file.
+ * Either way, the truncate needs to be sync so blocks don't reappear
+ * in the file with altered data in case of a crash.  wsync filesystems
+ * can run the first case async because anything that shrinks the inode
+ * has to run sync so by the time we're called here from inactive, the
+ * inode size is permanently set to 0.
+ *
+ * Calls from the truncate path always need to be sync unless we're
+ * in a wsync filesystem and the file has already been unlinked.
+ *
+ * The caller is responsible for correctly setting the sync parameter.
+ * It gets too hard for us to guess here which path we're being called
+ * out of just based on inode state.
+ */
+int
+xfs_itruncate_finish(
+	xfs_trans_t	**tp,
+	xfs_inode_t	*ip,
+	xfs_fsize_t	new_size,
+	int		fork,
+	int		sync)
+{
+	xfs_fsblock_t	first_block;
+	xfs_fileoff_t	first_unmap_block;
+	xfs_fileoff_t	last_block;
+	xfs_filblks_t	unmap_len=0;
+	xfs_mount_t	*mp;
+	xfs_trans_t	*ntp;
+	int		done;
+	int		committed;
+	xfs_bmap_free_t	free_list;
+	int		error;
+
+	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
+	ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size));
+	ASSERT(*tp != NULL);
+	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+	ASSERT(ip->i_transp == *tp);
+	ASSERT(ip->i_itemp != NULL);
+	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
+
+
+	ntp = *tp;
+	mp = (ntp)->t_mountp;
+	ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
+
+	/*
+	 * We only support truncating the entire attribute fork.
+	 */
+	if (fork == XFS_ATTR_FORK) {
+		new_size = 0LL;
+	}
+	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
+	xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
+	/*
+	 * The first thing we do is set the size to new_size permanently
+	 * on disk.  This way we don't have to worry about anyone ever
+	 * being able to look at the data being freed even in the face
+	 * of a crash.  What we're getting around here is the case where
+	 * we free a block, it is allocated to another file, it is written
+	 * to, and then we crash.  If the new data gets written to the
+	 * file but the log buffers containing the free and reallocation
+	 * don't, then we'd end up with garbage in the blocks being freed.
+	 * As long as we make the new_size permanent before actually
+	 * freeing any blocks it doesn't matter if they get writtten to.
+	 *
+	 * The callers must signal into us whether or not the size
+	 * setting here must be synchronous.  There are a few cases
+	 * where it doesn't have to be synchronous.  Those cases
+	 * occur if the file is unlinked and we know the unlink is
+	 * permanent or if the blocks being truncated are guaranteed
+	 * to be beyond the inode eof (regardless of the link count)
+	 * and the eof value is permanent.  Both of these cases occur
+	 * only on wsync-mounted filesystems.  In those cases, we're
+	 * guaranteed that no user will ever see the data in the blocks
+	 * that are being truncated so the truncate can run async.
+	 * In the free beyond eof case, the file may wind up with
+	 * more blocks allocated to it than it needs if we crash
+	 * and that won't get fixed until the next time the file
+	 * is re-opened and closed but that's ok as that shouldn't
+	 * be too many blocks.
+	 *
+	 * However, we can't just make all wsync xactions run async
+	 * because there's one call out of the create path that needs
+	 * to run sync where it's truncating an existing file to size
+	 * 0 whose size is > 0.
+	 *
+	 * It's probably possible to come up with a test in this
+	 * routine that would correctly distinguish all the above
+	 * cases from the values of the function parameters and the
+	 * inode state but for sanity's sake, I've decided to let the
+	 * layers above just tell us.  It's simpler to correctly figure
+	 * out in the layer above exactly under what conditions we
+	 * can run async and I think it's easier for others read and
+	 * follow the logic in case something has to be changed.
+	 * cscope is your friend -- rcc.
+	 *
+	 * The attribute fork is much simpler.
+	 *
+	 * For the attribute fork we allow the caller to tell us whether
+	 * the unlink of the inode that led to this call is yet permanent
+	 * in the on disk log.  If it is not and we will be freeing extents
+	 * in this inode then we make the first transaction synchronous
+	 * to make sure that the unlink is permanent by the time we free
+	 * the blocks.
+	 */
+	if (fork == XFS_DATA_FORK) {
+		if (ip->i_d.di_nextents > 0) {
+			ip->i_d.di_size = new_size;
+			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
+		}
+	} else if (sync) {
+		ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
+		if (ip->i_d.di_anextents > 0)
+			xfs_trans_set_sync(ntp);
+	}
+	ASSERT(fork == XFS_DATA_FORK ||
+		(fork == XFS_ATTR_FORK &&
+			((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
+			 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
+
+	/*
+	 * Since it is possible for space to become allocated beyond
+	 * the end of the file (in a crash where the space is allocated
+	 * but the inode size is not yet updated), simply remove any
+	 * blocks which show up between the new EOF and the maximum
+	 * possible file size.  If the first block to be removed is
+	 * beyond the maximum file size (ie it is the same as last_block),
+	 * then there is nothing to do.
+	 */
+	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+	ASSERT(first_unmap_block <= last_block);
+	done = 0;
+	if (last_block == first_unmap_block) {
+		done = 1;
+	} else {
+		unmap_len = last_block - first_unmap_block + 1;
+	}
+	while (!done) {
+		/*
+		 * Free up up to XFS_ITRUNC_MAX_EXTENTS.  xfs_bunmapi()
+		 * will tell us whether it freed the entire range or
+		 * not.  If this is a synchronous mount (wsync),
+		 * then we can tell bunmapi to keep all the
+		 * transactions asynchronous since the unlink
+		 * transaction that made this inode inactive has
+		 * already hit the disk.  There's no danger of
+		 * the freed blocks being reused, there being a
+		 * crash, and the reused blocks suddenly reappearing
+		 * in this file with garbage in them once recovery
+		 * runs.
+		 */
+		XFS_BMAP_INIT(&free_list, &first_block);
+		error = xfs_bunmapi(ntp, ip, first_unmap_block,
+				    unmap_len,
+				    XFS_BMAPI_AFLAG(fork) |
+				      (sync ? 0 : XFS_BMAPI_ASYNC),
+				    XFS_ITRUNC_MAX_EXTENTS,
+				    &first_block, &free_list, &done);
+		if (error) {
+			/*
+			 * If the bunmapi call encounters an error,
+			 * return to the caller where the transaction
+			 * can be properly aborted.  We just need to
+			 * make sure we're not holding any resources
+			 * that we were not when we came in.
+			 */
+			xfs_bmap_cancel(&free_list);
+			return error;
+		}
+
+		/*
+		 * Duplicate the transaction that has the permanent
+		 * reservation and commit the old transaction.
+		 */
+		error = xfs_bmap_finish(tp, &free_list, first_block,
+					&committed);
+		ntp = *tp;
+		if (error) {
+			/*
+			 * If the bmap finish call encounters an error,
+			 * return to the caller where the transaction
+			 * can be properly aborted.  We just need to
+			 * make sure we're not holding any resources
+			 * that we were not when we came in.
+			 *
+			 * Aborting from this point might lose some
+			 * blocks in the file system, but oh well.
+			 */
+			xfs_bmap_cancel(&free_list);
+			if (committed) {
+				/*
+				 * If the passed in transaction committed
+				 * in xfs_bmap_finish(), then we want to
+				 * add the inode to this one before returning.
+				 * This keeps things simple for the higher
+				 * level code, because it always knows that
+				 * the inode is locked and held in the
+				 * transaction that returns to it whether
+				 * errors occur or not.  We don't mark the
+				 * inode dirty so that this transaction can
+				 * be easily aborted if possible.
+				 */
+				xfs_trans_ijoin(ntp, ip,
+					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+				xfs_trans_ihold(ntp, ip);
+			}
+			return error;
+		}
+
+		if (committed) {
+			/*
+			 * The first xact was committed,
+			 * so add the inode to the new one.
+			 * Mark it dirty so it will be logged
+			 * and moved forward in the log as
+			 * part of every commit.
+			 */
+			xfs_trans_ijoin(ntp, ip,
+					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+			xfs_trans_ihold(ntp, ip);
+			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
+		}
+		ntp = xfs_trans_dup(ntp);
+		(void) xfs_trans_commit(*tp, 0, NULL);
+		*tp = ntp;
+		error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+					  XFS_TRANS_PERM_LOG_RES,
+					  XFS_ITRUNCATE_LOG_COUNT);
+		/*
+		 * Add the inode being truncated to the next chained
+		 * transaction.
+		 */
+		xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+		xfs_trans_ihold(ntp, ip);
+		if (error)
+			return (error);
+	}
+	/*
+	 * Only update the size in the case of the data fork, but
+	 * always re-log the inode so that our permanent transaction
+	 * can keep on rolling it forward in the log.
+	 */
+	if (fork == XFS_DATA_FORK) {
+		xfs_isize_check(mp, ip, new_size);
+		ip->i_d.di_size = new_size;
+	}
+	xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
+	ASSERT((new_size != 0) ||
+	       (fork == XFS_ATTR_FORK) ||
+	       (ip->i_delayed_blks == 0));
+	ASSERT((new_size != 0) ||
+	       (fork == XFS_ATTR_FORK) ||
+	       (ip->i_d.di_nextents == 0));
+	xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
+	return 0;
+}
+
+
+/*
+ * xfs_igrow_start
+ *
+ * Do the first part of growing a file: zero any data in the last
+ * block that is beyond the old EOF.  We need to do this before
+ * the inode is joined to the transaction to modify the i_size.
+ * That way we can drop the inode lock and call into the buffer
+ * cache to get the buffer mapping the EOF.
+ */
+int
+xfs_igrow_start(
+	xfs_inode_t	*ip,
+	xfs_fsize_t	new_size,
+	cred_t		*credp)
+{
+	xfs_fsize_t	isize;
+	int		error;
+
+	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
+	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
+	ASSERT(new_size > ip->i_d.di_size);
+
+	error = 0;
+	isize = ip->i_d.di_size;
+	/*
+	 * Zero any pages that may have been created by
+	 * xfs_write_file() beyond the end of the file
+	 * and any blocks between the old and new file sizes.
+	 */
+	error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize,
+				new_size);
+	return error;
+}
+
+/*
+ * xfs_igrow_finish
+ *
+ * This routine is called to extend the size of a file.
+ * The inode must have both the iolock and the ilock locked
+ * for update and it must be a part of the current transaction.
+ * The xfs_igrow_start() function must have been called previously.
+ * If the change_flag is not zero, the inode change timestamp will
+ * be updated.
+ */
+void
+xfs_igrow_finish(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_fsize_t	new_size,
+	int		change_flag)
+{
+	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
+	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
+	ASSERT(ip->i_transp == tp);
+	ASSERT(new_size > ip->i_d.di_size);
+
+	/*
+	 * Update the file size.  Update the inode change timestamp
+	 * if change_flag set.
+	 */
+	ip->i_d.di_size = new_size;
+	if (change_flag)
+		xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+}
+
+
+/*
+ * This is called when the inode's link count goes to 0.
+ * We place the on-disk inode on a list in the AGI.  It
+ * will be pulled from this list when the inode is freed.
+ */
+int
+xfs_iunlink(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip)
+{
+	xfs_mount_t	*mp;
+	xfs_agi_t	*agi;
+	xfs_dinode_t	*dip;
+	xfs_buf_t	*agibp;
+	xfs_buf_t	*ibp;
+	xfs_agnumber_t	agno;
+	xfs_daddr_t	agdaddr;
+	xfs_agino_t	agino;
+	short		bucket_index;
+	int		offset;
+	int		error;
+	int		agi_ok;
+
+	ASSERT(ip->i_d.di_nlink == 0);
+	ASSERT(ip->i_d.di_mode != 0);
+	ASSERT(ip->i_transp == tp);
+
+	mp = tp->t_mountp;
+
+	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
+
+	/*
+	 * Get the agi buffer first.  It ensures lock ordering
+	 * on the list.
+	 */
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
+				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Validate the magic number of the agi block.
+	 */
+	agi = XFS_BUF_TO_AGI(agibp);
+	agi_ok =
+		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
+			XFS_RANDOM_IUNLINK))) {
+		XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
+		xfs_trans_brelse(tp, agibp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	/*
+	 * Get the index into the agi hash table for the
+	 * list this inode will go on.
+	 */
+	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+	ASSERT(agino != 0);
+	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+	ASSERT(agi->agi_unlinked[bucket_index]);
+	ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != agino);
+
+	if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO) {
+		/*
+		 * There is already another inode in the bucket we need
+		 * to add ourselves to.  Add us at the front of the list.
+		 * Here we put the head pointer into our next pointer,
+		 * and then we fall through to point the head at us.
+		 */
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+		if (error) {
+			return error;
+		}
+		ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO);
+		ASSERT(dip->di_next_unlinked);
+		/* both on-disk, don't endian flip twice */
+		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
+		offset = ip->i_boffset +
+			offsetof(xfs_dinode_t, di_next_unlinked);
+		xfs_trans_inode_buf(tp, ibp);
+		xfs_trans_log_buf(tp, ibp, offset,
+				  (offset + sizeof(xfs_agino_t) - 1));
+		xfs_inobp_check(mp, ibp);
+	}
+
+	/*
+	 * Point the bucket head pointer at the inode being inserted.
+	 */
+	ASSERT(agino != 0);
+	INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, agino);
+	offset = offsetof(xfs_agi_t, agi_unlinked) +
+		(sizeof(xfs_agino_t) * bucket_index);
+	xfs_trans_log_buf(tp, agibp, offset,
+			  (offset + sizeof(xfs_agino_t) - 1));
+	return 0;
+}
+
+/*
+ * Pull the on-disk inode from the AGI unlinked list.
+ */
+STATIC int
+xfs_iunlink_remove(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip)
+{
+	xfs_ino_t	next_ino;
+	xfs_mount_t	*mp;
+	xfs_agi_t	*agi;
+	xfs_dinode_t	*dip;
+	xfs_buf_t	*agibp;
+	xfs_buf_t	*ibp;
+	xfs_agnumber_t	agno;
+	xfs_daddr_t	agdaddr;
+	xfs_agino_t	agino;
+	xfs_agino_t	next_agino;
+	xfs_buf_t	*last_ibp;
+	xfs_dinode_t	*last_dip;
+	short		bucket_index;
+	int		offset, last_offset;
+	int		error;
+	int		agi_ok;
+
+	/*
+	 * First pull the on-disk inode from the AGI unlinked list.
+	 */
+	mp = tp->t_mountp;
+
+	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
+
+	/*
+	 * Get the agi buffer first.  It ensures lock ordering
+	 * on the list.
+	 */
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
+				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
+	if (error) {
+		cmn_err(CE_WARN,
+			"xfs_iunlink_remove: xfs_trans_read_buf()  returned an error %d on %s.  Returning error.",
+			error, mp->m_fsname);
+		return error;
+	}
+	/*
+	 * Validate the magic number of the agi block.
+	 */
+	agi = XFS_BUF_TO_AGI(agibp);
+	agi_ok =
+		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
+			XFS_RANDOM_IUNLINK_REMOVE))) {
+		XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
+				     mp, agi);
+		xfs_trans_brelse(tp, agibp);
+		cmn_err(CE_WARN,
+			"xfs_iunlink_remove: XFS_TEST_ERROR()  returned an error on %s.  Returning EFSCORRUPTED.",
+			 mp->m_fsname);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	/*
+	 * Get the index into the agi hash table for the
+	 * list this inode will go on.
+	 */
+	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+	ASSERT(agino != 0);
+	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+	ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO);
+	ASSERT(agi->agi_unlinked[bucket_index]);
+
+	if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) == agino) {
+		/*
+		 * We're at the head of the list.  Get the inode's
+		 * on-disk buffer to see if there is anyone after us
+		 * on the list.  Only modify our next pointer if it
+		 * is not already NULLAGINO.  This saves us the overhead
+		 * of dealing with the buffer when there is no need to
+		 * change it.
+		 */
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+		if (error) {
+			cmn_err(CE_WARN,
+				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
+				error, mp->m_fsname);
+			return error;
+		}
+		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
+		ASSERT(next_agino != 0);
+		if (next_agino != NULLAGINO) {
+			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
+			offset = ip->i_boffset +
+				offsetof(xfs_dinode_t, di_next_unlinked);
+			xfs_trans_inode_buf(tp, ibp);
+			xfs_trans_log_buf(tp, ibp, offset,
+					  (offset + sizeof(xfs_agino_t) - 1));
+			xfs_inobp_check(mp, ibp);
+		} else {
+			xfs_trans_brelse(tp, ibp);
+		}
+		/*
+		 * Point the bucket head pointer at the next inode.
+		 */
+		ASSERT(next_agino != 0);
+		ASSERT(next_agino != agino);
+		INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, next_agino);
+		offset = offsetof(xfs_agi_t, agi_unlinked) +
+			(sizeof(xfs_agino_t) * bucket_index);
+		xfs_trans_log_buf(tp, agibp, offset,
+				  (offset + sizeof(xfs_agino_t) - 1));
+	} else {
+		/*
+		 * We need to search the list for the inode being freed.
+		 */
+		next_agino = INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT);
+		last_ibp = NULL;
+		while (next_agino != agino) {
+			/*
+			 * If the last inode wasn't the one pointing to
+			 * us, then release its buffer since we're not
+			 * going to do anything with it.
+			 */
+			if (last_ibp != NULL) {
+				xfs_trans_brelse(tp, last_ibp);
+			}
+			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
+			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
+					    &last_ibp, &last_offset);
+			if (error) {
+				cmn_err(CE_WARN,
+			"xfs_iunlink_remove: xfs_inotobp()  returned an error %d on %s.  Returning error.",
+					error, mp->m_fsname);
+				return error;
+			}
+			next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT);
+			ASSERT(next_agino != NULLAGINO);
+			ASSERT(next_agino != 0);
+		}
+		/*
+		 * Now last_ibp points to the buffer previous to us on
+		 * the unlinked list.  Pull us from the list.
+		 */
+		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+		if (error) {
+			cmn_err(CE_WARN,
+				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
+				error, mp->m_fsname);
+			return error;
+		}
+		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
+		ASSERT(next_agino != 0);
+		ASSERT(next_agino != agino);
+		if (next_agino != NULLAGINO) {
+			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
+			offset = ip->i_boffset +
+				offsetof(xfs_dinode_t, di_next_unlinked);
+			xfs_trans_inode_buf(tp, ibp);
+			xfs_trans_log_buf(tp, ibp, offset,
+					  (offset + sizeof(xfs_agino_t) - 1));
+			xfs_inobp_check(mp, ibp);
+		} else {
+			xfs_trans_brelse(tp, ibp);
+		}
+		/*
+		 * Point the previous inode on the list to the next inode.
+		 */
+		INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino);
+		ASSERT(next_agino != 0);
+		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
+		xfs_trans_inode_buf(tp, last_ibp);
+		xfs_trans_log_buf(tp, last_ibp, offset,
+				  (offset + sizeof(xfs_agino_t) - 1));
+		xfs_inobp_check(mp, last_ibp);
+	}
+	return 0;
+}
+
+static __inline__ int xfs_inode_clean(xfs_inode_t *ip)
+{
+	return (((ip->i_itemp == NULL) ||
+		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
+		(ip->i_update_core == 0));
+}
+
+void
+xfs_ifree_cluster(
+	xfs_inode_t	*free_ip,
+	xfs_trans_t	*tp,
+	xfs_ino_t	inum)
+{
+	xfs_mount_t		*mp = free_ip->i_mount;
+	int			blks_per_cluster;
+	int			nbufs;
+	int			ninodes;
+	int			i, j, found, pre_flushed;
+	xfs_daddr_t		blkno;
+	xfs_buf_t		*bp;
+	xfs_ihash_t		*ih;
+	xfs_inode_t		*ip, **ip_found;
+	xfs_inode_log_item_t	*iip;
+	xfs_log_item_t		*lip;
+	SPLDECL(s);
+
+	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
+		blks_per_cluster = 1;
+		ninodes = mp->m_sb.sb_inopblock;
+		nbufs = XFS_IALLOC_BLOCKS(mp);
+	} else {
+		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
+					mp->m_sb.sb_blocksize;
+		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
+		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
+	}
+
+	ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
+
+	for (j = 0; j < nbufs; j++, inum += ninodes) {
+		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
+					 XFS_INO_TO_AGBNO(mp, inum));
+
+
+		/*
+		 * Look for each inode in memory and attempt to lock it,
+		 * we can be racing with flush and tail pushing here.
+		 * any inode we get the locks on, add to an array of
+		 * inode items to process later.
+		 *
+		 * The get the buffer lock, we could beat a flush
+		 * or tail pushing thread to the lock here, in which
+		 * case they will go looking for the inode buffer
+		 * and fail, we need some other form of interlock
+		 * here.
+		 */
+		found = 0;
+		for (i = 0; i < ninodes; i++) {
+			ih = XFS_IHASH(mp, inum + i);
+			read_lock(&ih->ih_lock);
+			for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
+				if (ip->i_ino == inum + i)
+					break;
+			}
+
+			/* Inode not in memory or we found it already,
+			 * nothing to do
+			 */
+			if (!ip || (ip->i_flags & XFS_ISTALE)) {
+				read_unlock(&ih->ih_lock);
+				continue;
+			}
+
+			if (xfs_inode_clean(ip)) {
+				read_unlock(&ih->ih_lock);
+				continue;
+			}
+
+			/* If we can get the locks then add it to the
+			 * list, otherwise by the time we get the bp lock
+			 * below it will already be attached to the
+			 * inode buffer.
+			 */
+
+			/* This inode will already be locked - by us, lets
+			 * keep it that way.
+			 */
+
+			if (ip == free_ip) {
+				if (xfs_iflock_nowait(ip)) {
+					ip->i_flags |= XFS_ISTALE;
+
+					if (xfs_inode_clean(ip)) {
+						xfs_ifunlock(ip);
+					} else {
+						ip_found[found++] = ip;
+					}
+				}
+				read_unlock(&ih->ih_lock);
+				continue;
+			}
+
+			if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
+				if (xfs_iflock_nowait(ip)) {
+					ip->i_flags |= XFS_ISTALE;
+
+					if (xfs_inode_clean(ip)) {
+						xfs_ifunlock(ip);
+						xfs_iunlock(ip, XFS_ILOCK_EXCL);
+					} else {
+						ip_found[found++] = ip;
+					}
+				} else {
+					xfs_iunlock(ip, XFS_ILOCK_EXCL);
+				}
+			}
+
+			read_unlock(&ih->ih_lock);
+		}
+
+		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 
+					mp->m_bsize * blks_per_cluster,
+					XFS_BUF_LOCK);
+
+		pre_flushed = 0;
+		lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+		while (lip) {
+			if (lip->li_type == XFS_LI_INODE) {
+				iip = (xfs_inode_log_item_t *)lip;
+				ASSERT(iip->ili_logged == 1);
+				lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
+				AIL_LOCK(mp,s);
+				iip->ili_flush_lsn = iip->ili_item.li_lsn;
+				AIL_UNLOCK(mp, s);
+				iip->ili_inode->i_flags |= XFS_ISTALE;
+				pre_flushed++;
+			}
+			lip = lip->li_bio_list;
+		}
+
+		for (i = 0; i < found; i++) {
+			ip = ip_found[i];
+			iip = ip->i_itemp;
+
+			if (!iip) {
+				ip->i_update_core = 0;
+				xfs_ifunlock(ip);
+				xfs_iunlock(ip, XFS_ILOCK_EXCL);
+				continue;
+			}
+
+			iip->ili_last_fields = iip->ili_format.ilf_fields;
+			iip->ili_format.ilf_fields = 0;
+			iip->ili_logged = 1;
+			AIL_LOCK(mp,s);
+			iip->ili_flush_lsn = iip->ili_item.li_lsn;
+			AIL_UNLOCK(mp, s);
+
+			xfs_buf_attach_iodone(bp,
+				(void(*)(xfs_buf_t*,xfs_log_item_t*))
+				xfs_istale_done, (xfs_log_item_t *)iip);
+			if (ip != free_ip) {
+				xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			}
+		}
+
+		if (found || pre_flushed)
+			xfs_trans_stale_inode_buf(tp, bp);
+		xfs_trans_binval(tp, bp);
+	}
+
+	kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
+}
+
+/*
+ * This is called to return an inode to the inode free list.
+ * The inode should already be truncated to 0 length and have
+ * no pages associated with it.  This routine also assumes that
+ * the inode is already a part of the transaction.
+ *
+ * The on-disk copy of the inode will have been added to the list
+ * of unlinked inodes in the AGI. We need to remove the inode from
+ * that list atomically with respect to freeing it here.
+ */
+int
+xfs_ifree(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	xfs_bmap_free_t	*flist)
+{
+	int			error;
+	int			delete;
+	xfs_ino_t		first_ino;
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+	ASSERT(ip->i_transp == tp);
+	ASSERT(ip->i_d.di_nlink == 0);
+	ASSERT(ip->i_d.di_nextents == 0);
+	ASSERT(ip->i_d.di_anextents == 0);
+	ASSERT((ip->i_d.di_size == 0) ||
+	       ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
+	ASSERT(ip->i_d.di_nblocks == 0);
+
+	/*
+	 * Pull the on-disk inode from the AGI unlinked list.
+	 */
+	error = xfs_iunlink_remove(tp, ip);
+	if (error != 0) {
+		return error;
+	}
+
+	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
+	if (error != 0) {
+		return error;
+	}
+	ip->i_d.di_mode = 0;		/* mark incore inode as free */
+	ip->i_d.di_flags = 0;
+	ip->i_d.di_dmevmask = 0;
+	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
+	ip->i_df.if_ext_max =
+		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
+	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+	/*
+	 * Bump the generation count so no one will be confused
+	 * by reincarnations of this inode.
+	 */
+	ip->i_d.di_gen++;
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+	if (delete) {
+		xfs_ifree_cluster(ip, tp, first_ino);
+	}
+
+	return 0;
+}
+
+/*
+ * Reallocate the space for if_broot based on the number of records
+ * being added or deleted as indicated in rec_diff.  Move the records
+ * and pointers in if_broot to fit the new size.  When shrinking this
+ * will eliminate holes between the records and pointers created by
+ * the caller.  When growing this will create holes to be filled in
+ * by the caller.
+ *
+ * The caller must not request to add more records than would fit in
+ * the on-disk inode root.  If the if_broot is currently NULL, then
+ * if we adding records one will be allocated.  The caller must also
+ * not request that the number of records go below zero, although
+ * it can go to zero.
+ *
+ * ip -- the inode whose if_broot area is changing
+ * ext_diff -- the change in the number of records, positive or negative,
+ *	 requested for the if_broot array.
+ */
+void
+xfs_iroot_realloc(
+	xfs_inode_t		*ip,
+	int			rec_diff,
+	int			whichfork)
+{
+	int			cur_max;
+	xfs_ifork_t		*ifp;
+	xfs_bmbt_block_t	*new_broot;
+	int			new_max;
+	size_t			new_size;
+	char			*np;
+	char			*op;
+
+	/*
+	 * Handle the degenerate case quietly.
+	 */
+	if (rec_diff == 0) {
+		return;
+	}
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (rec_diff > 0) {
+		/*
+		 * If there wasn't any memory allocated before, just
+		 * allocate it now and get out.
+		 */
+		if (ifp->if_broot_bytes == 0) {
+			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
+			ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
+								     KM_SLEEP);
+			ifp->if_broot_bytes = (int)new_size;
+			return;
+		}
+
+		/*
+		 * If there is already an existing if_broot, then we need
+		 * to realloc() it and shift the pointers to their new
+		 * location.  The records don't change location because
+		 * they are kept butted up against the btree block header.
+		 */
+		cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
+		new_max = cur_max + rec_diff;
+		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
+		ifp->if_broot = (xfs_bmbt_block_t *)
+		  kmem_realloc(ifp->if_broot,
+				new_size,
+				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
+				KM_SLEEP);
+		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
+						      ifp->if_broot_bytes);
+		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
+						      (int)new_size);
+		ifp->if_broot_bytes = (int)new_size;
+		ASSERT(ifp->if_broot_bytes <=
+			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
+		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
+		return;
+	}
+
+	/*
+	 * rec_diff is less than 0.  In this case, we are shrinking the
+	 * if_broot buffer.  It must already exist.  If we go to zero
+	 * records, just get rid of the root and clear the status bit.
+	 */
+	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
+	cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
+	new_max = cur_max + rec_diff;
+	ASSERT(new_max >= 0);
+	if (new_max > 0)
+		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
+	else
+		new_size = 0;
+	if (new_size > 0) {
+		new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
+		/*
+		 * First copy over the btree block header.
+		 */
+		memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
+	} else {
+		new_broot = NULL;
+		ifp->if_flags &= ~XFS_IFBROOT;
+	}
+
+	/*
+	 * Only copy the records and pointers if there are any.
+	 */
+	if (new_max > 0) {
+		/*
+		 * First copy the records.
+		 */
+		op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
+						     ifp->if_broot_bytes);
+		np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
+						     (int)new_size);
+		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
+
+		/*
+		 * Then copy the pointers.
+		 */
+		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
+						     ifp->if_broot_bytes);
+		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
+						     (int)new_size);
+		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
+	}
+	kmem_free(ifp->if_broot, ifp->if_broot_bytes);
+	ifp->if_broot = new_broot;
+	ifp->if_broot_bytes = (int)new_size;
+	ASSERT(ifp->if_broot_bytes <=
+		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
+	return;
+}
+
+
+/*
+ * This is called when the amount of space needed for if_extents
+ * is increased or decreased.  The change in size is indicated by
+ * the number of extents that need to be added or deleted in the
+ * ext_diff parameter.
+ *
+ * If the amount of space needed has decreased below the size of the
+ * inline buffer, then switch to using the inline buffer.  Otherwise,
+ * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
+ * to what is needed.
+ *
+ * ip -- the inode whose if_extents area is changing
+ * ext_diff -- the change in the number of extents, positive or negative,
+ *	 requested for the if_extents array.
+ */
+void
+xfs_iext_realloc(
+	xfs_inode_t	*ip,
+	int		ext_diff,
+	int		whichfork)
+{
+	int		byte_diff;
+	xfs_ifork_t	*ifp;
+	int		new_size;
+	uint		rnew_size;
+
+	if (ext_diff == 0) {
+		return;
+	}
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t);
+	new_size = (int)ifp->if_bytes + byte_diff;
+	ASSERT(new_size >= 0);
+
+	if (new_size == 0) {
+		if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) {
+			ASSERT(ifp->if_real_bytes != 0);
+			kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
+		}
+		ifp->if_u1.if_extents = NULL;
+		rnew_size = 0;
+	} else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) {
+		/*
+		 * If the valid extents can fit in if_inline_ext,
+		 * copy them from the malloc'd vector and free it.
+		 */
+		if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) {
+			/*
+			 * For now, empty files are format EXTENTS,
+			 * so the if_extents pointer is null.
+			 */
+			if (ifp->if_u1.if_extents) {
+				memcpy(ifp->if_u2.if_inline_ext,
+					ifp->if_u1.if_extents, new_size);
+				kmem_free(ifp->if_u1.if_extents,
+					  ifp->if_real_bytes);
+			}
+			ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
+		}
+		rnew_size = 0;
+	} else {
+		rnew_size = new_size;
+		if ((rnew_size & (rnew_size - 1)) != 0)
+			rnew_size = xfs_iroundup(rnew_size);
+		/*
+		 * Stuck with malloc/realloc.
+		 */
+		if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) {
+			ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
+				kmem_alloc(rnew_size, KM_SLEEP);
+			memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
+			      sizeof(ifp->if_u2.if_inline_ext));
+		} else if (rnew_size != ifp->if_real_bytes) {
+			ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
+			  kmem_realloc(ifp->if_u1.if_extents,
+					rnew_size,
+					ifp->if_real_bytes,
+					KM_NOFS);
+		}
+	}
+	ifp->if_real_bytes = rnew_size;
+	ifp->if_bytes = new_size;
+}
+
+
+/*
+ * This is called when the amount of space needed for if_data
+ * is increased or decreased.  The change in size is indicated by
+ * the number of bytes that need to be added or deleted in the
+ * byte_diff parameter.
+ *
+ * If the amount of space needed has decreased below the size of the
+ * inline buffer, then switch to using the inline buffer.  Otherwise,
+ * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
+ * to what is needed.
+ *
+ * ip -- the inode whose if_data area is changing
+ * byte_diff -- the change in the number of bytes, positive or negative,
+ *	 requested for the if_data array.
+ */
+void
+xfs_idata_realloc(
+	xfs_inode_t	*ip,
+	int		byte_diff,
+	int		whichfork)
+{
+	xfs_ifork_t	*ifp;
+	int		new_size;
+	int		real_size;
+
+	if (byte_diff == 0) {
+		return;
+	}
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	new_size = (int)ifp->if_bytes + byte_diff;
+	ASSERT(new_size >= 0);
+
+	if (new_size == 0) {
+		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+		}
+		ifp->if_u1.if_data = NULL;
+		real_size = 0;
+	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
+		/*
+		 * If the valid extents/data can fit in if_inline_ext/data,
+		 * copy them from the malloc'd vector and free it.
+		 */
+		if (ifp->if_u1.if_data == NULL) {
+			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+			ASSERT(ifp->if_real_bytes != 0);
+			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
+			      new_size);
+			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+		}
+		real_size = 0;
+	} else {
+		/*
+		 * Stuck with malloc/realloc.
+		 * For inline data, the underlying buffer must be
+		 * a multiple of 4 bytes in size so that it can be
+		 * logged and stay on word boundaries.  We enforce
+		 * that here.
+		 */
+		real_size = roundup(new_size, 4);
+		if (ifp->if_u1.if_data == NULL) {
+			ASSERT(ifp->if_real_bytes == 0);
+			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+			/*
+			 * Only do the realloc if the underlying size
+			 * is really changing.
+			 */
+			if (ifp->if_real_bytes != real_size) {
+				ifp->if_u1.if_data =
+					kmem_realloc(ifp->if_u1.if_data,
+							real_size,
+							ifp->if_real_bytes,
+							KM_SLEEP);
+			}
+		} else {
+			ASSERT(ifp->if_real_bytes == 0);
+			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
+				ifp->if_bytes);
+		}
+	}
+	ifp->if_real_bytes = real_size;
+	ifp->if_bytes = new_size;
+	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
+}
+
+
+
+
+/*
+ * Map inode to disk block and offset.
+ *
+ * mp -- the mount point structure for the current file system
+ * tp -- the current transaction
+ * ino -- the inode number of the inode to be located
+ * imap -- this structure is filled in with the information necessary
+ *	 to retrieve the given inode from disk
+ * flags -- flags to pass to xfs_dilocate indicating whether or not
+ *	 lookups in the inode btree were OK or not
+ */
+int
+xfs_imap(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	xfs_imap_t	*imap,
+	uint		flags)
+{
+	xfs_fsblock_t	fsbno;
+	int		len;
+	int		off;
+	int		error;
+
+	fsbno = imap->im_blkno ?
+		XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
+	error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
+	if (error != 0) {
+		return error;
+	}
+	imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
+	imap->im_len = XFS_FSB_TO_BB(mp, len);
+	imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
+	imap->im_ioffset = (ushort)off;
+	imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
+	return 0;
+}
+
+void
+xfs_idestroy_fork(
+	xfs_inode_t	*ip,
+	int		whichfork)
+{
+	xfs_ifork_t	*ifp;
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (ifp->if_broot != NULL) {
+		kmem_free(ifp->if_broot, ifp->if_broot_bytes);
+		ifp->if_broot = NULL;
+	}
+
+	/*
+	 * If the format is local, then we can't have an extents
+	 * array so just look for an inline data array.  If we're
+	 * not local then we may or may not have an extents list,
+	 * so check and free it up if we do.
+	 */
+	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
+		    (ifp->if_u1.if_data != NULL)) {
+			ASSERT(ifp->if_real_bytes != 0);
+			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+			ifp->if_u1.if_data = NULL;
+			ifp->if_real_bytes = 0;
+		}
+	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
+		   (ifp->if_u1.if_extents != NULL) &&
+		   (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) {
+		ASSERT(ifp->if_real_bytes != 0);
+		kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
+		ifp->if_u1.if_extents = NULL;
+		ifp->if_real_bytes = 0;
+	}
+	ASSERT(ifp->if_u1.if_extents == NULL ||
+	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
+	ASSERT(ifp->if_real_bytes == 0);
+	if (whichfork == XFS_ATTR_FORK) {
+		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+		ip->i_afp = NULL;
+	}
+}
+
+/*
+ * This is called free all the memory associated with an inode.
+ * It must free the inode itself and any buffers allocated for
+ * if_extents/if_data and if_broot.  It must also free the lock
+ * associated with the inode.
+ */
+void
+xfs_idestroy(
+	xfs_inode_t	*ip)
+{
+
+	switch (ip->i_d.di_mode & S_IFMT) {
+	case S_IFREG:
+	case S_IFDIR:
+	case S_IFLNK:
+		xfs_idestroy_fork(ip, XFS_DATA_FORK);
+		break;
+	}
+	if (ip->i_afp)
+		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+	mrfree(&ip->i_lock);
+	mrfree(&ip->i_iolock);
+	freesema(&ip->i_flock);
+#ifdef XFS_BMAP_TRACE
+	ktrace_free(ip->i_xtrace);
+#endif
+#ifdef XFS_BMBT_TRACE
+	ktrace_free(ip->i_btrace);
+#endif
+#ifdef XFS_RW_TRACE
+	ktrace_free(ip->i_rwtrace);
+#endif
+#ifdef XFS_ILOCK_TRACE
+	ktrace_free(ip->i_lock_trace);
+#endif
+#ifdef XFS_DIR2_TRACE
+	ktrace_free(ip->i_dir_trace);
+#endif
+	if (ip->i_itemp) {
+		/* XXXdpd should be able to assert this but shutdown
+		 * is leaving the AIL behind. */
+		ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) ||
+		       XFS_FORCED_SHUTDOWN(ip->i_mount));
+		xfs_inode_item_destroy(ip);
+	}
+	kmem_zone_free(xfs_inode_zone, ip);
+}
+
+
+/*
+ * Increment the pin count of the given buffer.
+ * This value is protected by ipinlock spinlock in the mount structure.
+ */
+void
+xfs_ipin(
+	xfs_inode_t	*ip)
+{
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+
+	atomic_inc(&ip->i_pincount);
+}
+
+/*
+ * Decrement the pin count of the given inode, and wake up
+ * anyone in xfs_iwait_unpin() if the count goes to 0.  The
+ * inode must have been previoulsy pinned with a call to xfs_ipin().
+ */
+void
+xfs_iunpin(
+	xfs_inode_t	*ip)
+{
+	ASSERT(atomic_read(&ip->i_pincount) > 0);
+
+	if (atomic_dec_and_test(&ip->i_pincount)) {
+		vnode_t	*vp = XFS_ITOV_NULL(ip);
+
+		/* make sync come back and flush this inode */
+		if (vp) {
+			struct inode	*inode = LINVFS_GET_IP(vp);
+
+			if (!(inode->i_state & I_NEW))
+				mark_inode_dirty_sync(inode);
+		}
+
+		wake_up(&ip->i_ipin_wait);
+	}
+}
+
+/*
+ * This is called to wait for the given inode to be unpinned.
+ * It will sleep until this happens.  The caller must have the
+ * inode locked in at least shared mode so that the buffer cannot
+ * be subsequently pinned once someone is waiting for it to be
+ * unpinned.
+ */
+void
+xfs_iunpin_wait(
+	xfs_inode_t	*ip)
+{
+	xfs_inode_log_item_t	*iip;
+	xfs_lsn_t	lsn;
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
+
+	if (atomic_read(&ip->i_pincount) == 0) {
+		return;
+	}
+
+	iip = ip->i_itemp;
+	if (iip && iip->ili_last_lsn) {
+		lsn = iip->ili_last_lsn;
+	} else {
+		lsn = (xfs_lsn_t)0;
+	}
+
+	/*
+	 * Give the log a push so we don't wait here too long.
+	 */
+	xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
+
+	wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
+}
+
+
+/*
+ * xfs_iextents_copy()
+ *
+ * This is called to copy the REAL extents (as opposed to the delayed
+ * allocation extents) from the inode into the given buffer.  It
+ * returns the number of bytes copied into the buffer.
+ *
+ * If there are no delayed allocation extents, then we can just
+ * memcpy() the extents into the buffer.  Otherwise, we need to
+ * examine each extent in turn and skip those which are delayed.
+ */
+int
+xfs_iextents_copy(
+	xfs_inode_t		*ip,
+	xfs_bmbt_rec_t		*buffer,
+	int			whichfork)
+{
+	int			copied;
+	xfs_bmbt_rec_t		*dest_ep;
+	xfs_bmbt_rec_t		*ep;
+#ifdef XFS_BMAP_TRACE
+	static char		fname[] = "xfs_iextents_copy";
+#endif
+	int			i;
+	xfs_ifork_t		*ifp;
+	int			nrecs;
+	xfs_fsblock_t		start_block;
+
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+	ASSERT(ifp->if_bytes > 0);
+
+	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork);
+	ASSERT(nrecs > 0);
+
+	/*
+	 * There are some delayed allocation extents in the
+	 * inode, so copy the extents one at a time and skip
+	 * the delayed ones.  There must be at least one
+	 * non-delayed extent.
+	 */
+	ep = ifp->if_u1.if_extents;
+	dest_ep = buffer;
+	copied = 0;
+	for (i = 0; i < nrecs; i++) {
+		start_block = xfs_bmbt_get_startblock(ep);
+		if (ISNULLSTARTBLOCK(start_block)) {
+			/*
+			 * It's a delayed allocation extent, so skip it.
+			 */
+			ep++;
+			continue;
+		}
+
+		/* Translate to on disk format */
+		put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
+			      (__uint64_t*)&dest_ep->l0);
+		put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
+			      (__uint64_t*)&dest_ep->l1);
+		dest_ep++;
+		ep++;
+		copied++;
+	}
+	ASSERT(copied != 0);
+	xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip));
+
+	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
+}
+
+/*
+ * Each of the following cases stores data into the same region
+ * of the on-disk inode, so only one of them can be valid at
+ * any given time. While it is possible to have conflicting formats
+ * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
+ * in EXTENTS format, this can only happen when the fork has
+ * changed formats after being modified but before being flushed.
+ * In these cases, the format always takes precedence, because the
+ * format indicates the current state of the fork.
+ */
+/*ARGSUSED*/
+STATIC int
+xfs_iflush_fork(
+	xfs_inode_t		*ip,
+	xfs_dinode_t		*dip,
+	xfs_inode_log_item_t	*iip,
+	int			whichfork,
+	xfs_buf_t		*bp)
+{
+	char			*cp;
+	xfs_ifork_t		*ifp;
+	xfs_mount_t		*mp;
+#ifdef XFS_TRANS_DEBUG
+	int			first;
+#endif
+	static const short	brootflag[2] =
+		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
+	static const short	dataflag[2] =
+		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
+	static const short	extflag[2] =
+		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
+
+	if (iip == NULL)
+		return 0;
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	/*
+	 * This can happen if we gave up in iformat in an error path,
+	 * for the attribute fork.
+	 */
+	if (ifp == NULL) {
+		ASSERT(whichfork == XFS_ATTR_FORK);
+		return 0;
+	}
+	cp = XFS_DFORK_PTR(dip, whichfork);
+	mp = ip->i_mount;
+	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+	case XFS_DINODE_FMT_LOCAL:
+		if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
+		    (ifp->if_bytes > 0)) {
+			ASSERT(ifp->if_u1.if_data != NULL);
+			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
+			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
+		}
+		if (whichfork == XFS_DATA_FORK) {
+			if (unlikely(XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp, dip))) {
+				XFS_ERROR_REPORT("xfs_iflush_fork",
+						 XFS_ERRLEVEL_LOW, mp);
+				return XFS_ERROR(EFSCORRUPTED);
+			}
+		}
+		break;
+
+	case XFS_DINODE_FMT_EXTENTS:
+		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
+		       !(iip->ili_format.ilf_fields & extflag[whichfork]));
+		ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0));
+		ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0));
+		if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
+		    (ifp->if_bytes > 0)) {
+			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
+			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
+				whichfork);
+		}
+		break;
+
+	case XFS_DINODE_FMT_BTREE:
+		if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
+		    (ifp->if_broot_bytes > 0)) {
+			ASSERT(ifp->if_broot != NULL);
+			ASSERT(ifp->if_broot_bytes <=
+			       (XFS_IFORK_SIZE(ip, whichfork) +
+				XFS_BROOT_SIZE_ADJ));
+			xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
+				(xfs_bmdr_block_t *)cp,
+				XFS_DFORK_SIZE(dip, mp, whichfork));
+		}
+		break;
+
+	case XFS_DINODE_FMT_DEV:
+		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
+			ASSERT(whichfork == XFS_DATA_FORK);
+			INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev);
+		}
+		break;
+
+	case XFS_DINODE_FMT_UUID:
+		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
+			ASSERT(whichfork == XFS_DATA_FORK);
+			memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
+				sizeof(uuid_t));
+		}
+		break;
+
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * xfs_iflush() will write a modified inode's changes out to the
+ * inode's on disk home.  The caller must have the inode lock held
+ * in at least shared mode and the inode flush semaphore must be
+ * held as well.  The inode lock will still be held upon return from
+ * the call and the caller is free to unlock it.
+ * The inode flush lock will be unlocked when the inode reaches the disk.
+ * The flags indicate how the inode's buffer should be written out.
+ */
+int
+xfs_iflush(
+	xfs_inode_t		*ip,
+	uint			flags)
+{
+	xfs_inode_log_item_t	*iip;
+	xfs_buf_t		*bp;
+	xfs_dinode_t		*dip;
+	xfs_mount_t		*mp;
+	int			error;
+	/* REFERENCED */
+	xfs_chash_t		*ch;
+	xfs_inode_t		*iq;
+	int			clcount;	/* count of inodes clustered */
+	int			bufwasdelwri;
+	enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
+	SPLDECL(s);
+
+	XFS_STATS_INC(xs_iflush_count);
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+	ASSERT(valusema(&ip->i_flock) <= 0);
+	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
+
+	iip = ip->i_itemp;
+	mp = ip->i_mount;
+
+	/*
+	 * If the inode isn't dirty, then just release the inode
+	 * flush lock and do nothing.
+	 */
+	if ((ip->i_update_core == 0) &&
+	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
+		ASSERT((iip != NULL) ?
+			 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
+		xfs_ifunlock(ip);
+		return 0;
+	}
+
+	/*
+	 * We can't flush the inode until it is unpinned, so
+	 * wait for it.  We know noone new can pin it, because
+	 * we are holding the inode lock shared and you need
+	 * to hold it exclusively to pin the inode.
+	 */
+	xfs_iunpin_wait(ip);
+
+	/*
+	 * This may have been unpinned because the filesystem is shutting
+	 * down forcibly. If that's the case we must not write this inode
+	 * to disk, because the log record didn't make it to disk!
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		ip->i_update_core = 0;
+		if (iip)
+			iip->ili_format.ilf_fields = 0;
+		xfs_ifunlock(ip);
+		return XFS_ERROR(EIO);
+	}
+
+	/*
+	 * Get the buffer containing the on-disk inode.
+	 */
+	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0);
+	if (error != 0) {
+		xfs_ifunlock(ip);
+		return error;
+	}
+
+	/*
+	 * Decide how buffer will be flushed out.  This is done before
+	 * the call to xfs_iflush_int because this field is zeroed by it.
+	 */
+	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
+		/*
+		 * Flush out the inode buffer according to the directions
+		 * of the caller.  In the cases where the caller has given
+		 * us a choice choose the non-delwri case.  This is because
+		 * the inode is in the AIL and we need to get it out soon.
+		 */
+		switch (flags) {
+		case XFS_IFLUSH_SYNC:
+		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
+			flags = 0;
+			break;
+		case XFS_IFLUSH_ASYNC:
+		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
+			flags = INT_ASYNC;
+			break;
+		case XFS_IFLUSH_DELWRI:
+			flags = INT_DELWRI;
+			break;
+		default:
+			ASSERT(0);
+			flags = 0;
+			break;
+		}
+	} else {
+		switch (flags) {
+		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
+		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
+		case XFS_IFLUSH_DELWRI:
+			flags = INT_DELWRI;
+			break;
+		case XFS_IFLUSH_ASYNC:
+			flags = INT_ASYNC;
+			break;
+		case XFS_IFLUSH_SYNC:
+			flags = 0;
+			break;
+		default:
+			ASSERT(0);
+			flags = 0;
+			break;
+		}
+	}
+
+	/*
+	 * First flush out the inode that xfs_iflush was called with.
+	 */
+	error = xfs_iflush_int(ip, bp);
+	if (error) {
+		goto corrupt_out;
+	}
+
+	/*
+	 * inode clustering:
+	 * see if other inodes can be gathered into this write
+	 */
+
+	ip->i_chash->chl_buf = bp;
+
+	ch = XFS_CHASH(mp, ip->i_blkno);
+	s = mutex_spinlock(&ch->ch_lock);
+
+	clcount = 0;
+	for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) {
+		/*
+		 * Do an un-protected check to see if the inode is dirty and
+		 * is a candidate for flushing.  These checks will be repeated
+		 * later after the appropriate locks are acquired.
+		 */
+		iip = iq->i_itemp;
+		if ((iq->i_update_core == 0) &&
+		    ((iip == NULL) ||
+		     !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
+		      xfs_ipincount(iq) == 0) {
+			continue;
+		}
+
+		/*
+		 * Try to get locks.  If any are unavailable,
+		 * then this inode cannot be flushed and is skipped.
+		 */
+
+		/* get inode locks (just i_lock) */
+		if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
+			/* get inode flush lock */
+			if (xfs_iflock_nowait(iq)) {
+				/* check if pinned */
+				if (xfs_ipincount(iq) == 0) {
+					/* arriving here means that
+					 * this inode can be flushed.
+					 * first re-check that it's
+					 * dirty
+					 */
+					iip = iq->i_itemp;
+					if ((iq->i_update_core != 0)||
+					    ((iip != NULL) &&
+					     (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
+						clcount++;
+						error = xfs_iflush_int(iq, bp);
+						if (error) {
+							xfs_iunlock(iq,
+								    XFS_ILOCK_SHARED);
+							goto cluster_corrupt_out;
+						}
+					} else {
+						xfs_ifunlock(iq);
+					}
+				} else {
+					xfs_ifunlock(iq);
+				}
+			}
+			xfs_iunlock(iq, XFS_ILOCK_SHARED);
+		}
+	}
+	mutex_spinunlock(&ch->ch_lock, s);
+
+	if (clcount) {
+		XFS_STATS_INC(xs_icluster_flushcnt);
+		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
+	}
+
+	/*
+	 * If the buffer is pinned then push on the log so we won't
+	 * get stuck waiting in the write for too long.
+	 */
+	if (XFS_BUF_ISPINNED(bp)){
+		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+	}
+
+	if (flags & INT_DELWRI) {
+		xfs_bdwrite(mp, bp);
+	} else if (flags & INT_ASYNC) {
+		xfs_bawrite(mp, bp);
+	} else {
+		error = xfs_bwrite(mp, bp);
+	}
+	return error;
+
+corrupt_out:
+	xfs_buf_relse(bp);
+	xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+	xfs_iflush_abort(ip);
+	/*
+	 * Unlocks the flush lock
+	 */
+	return XFS_ERROR(EFSCORRUPTED);
+
+cluster_corrupt_out:
+	/* Corruption detected in the clustering loop.  Invalidate the
+	 * inode buffer and shut down the filesystem.
+	 */
+	mutex_spinunlock(&ch->ch_lock, s);
+
+	/*
+	 * Clean up the buffer.  If it was B_DELWRI, just release it --
+	 * brelse can handle it with no problems.  If not, shut down the
+	 * filesystem before releasing the buffer.
+	 */
+	if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
+		xfs_buf_relse(bp);
+	}
+
+	xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+
+	if(!bufwasdelwri)  {
+		/*
+		 * Just like incore_relse: if we have b_iodone functions,
+		 * mark the buffer as an error and call them.  Otherwise
+		 * mark it as stale and brelse.
+		 */
+		if (XFS_BUF_IODONE_FUNC(bp)) {
+			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+			XFS_BUF_UNDONE(bp);
+			XFS_BUF_STALE(bp);
+			XFS_BUF_SHUT(bp);
+			XFS_BUF_ERROR(bp,EIO);
+			xfs_biodone(bp);
+		} else {
+			XFS_BUF_STALE(bp);
+			xfs_buf_relse(bp);
+		}
+	}
+
+	xfs_iflush_abort(iq);
+	/*
+	 * Unlocks the flush lock
+	 */
+	return XFS_ERROR(EFSCORRUPTED);
+}
+
+
+STATIC int
+xfs_iflush_int(
+	xfs_inode_t		*ip,
+	xfs_buf_t		*bp)
+{
+	xfs_inode_log_item_t	*iip;
+	xfs_dinode_t		*dip;
+	xfs_mount_t		*mp;
+#ifdef XFS_TRANS_DEBUG
+	int			first;
+#endif
+	SPLDECL(s);
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+	ASSERT(valusema(&ip->i_flock) <= 0);
+	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
+
+	iip = ip->i_itemp;
+	mp = ip->i_mount;
+
+
+	/*
+	 * If the inode isn't dirty, then just release the inode
+	 * flush lock and do nothing.
+	 */
+	if ((ip->i_update_core == 0) &&
+	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
+		xfs_ifunlock(ip);
+		return 0;
+	}
+
+	/* set *dip = inode's place in the buffer */
+	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
+
+	/*
+	 * Clear i_update_core before copying out the data.
+	 * This is for coordination with our timestamp updates
+	 * that don't hold the inode lock. They will always
+	 * update the timestamps BEFORE setting i_update_core,
+	 * so if we clear i_update_core after they set it we
+	 * are guaranteed to see their updates to the timestamps.
+	 * I believe that this depends on strongly ordered memory
+	 * semantics, but we have that.  We use the SYNCHRONIZE
+	 * macro to make sure that the compiler does not reorder
+	 * the i_update_core access below the data copy below.
+	 */
+	ip->i_update_core = 0;
+	SYNCHRONIZE();
+
+	if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
+			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
+		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+		    "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
+			ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip);
+		goto corrupt_out;
+	}
+	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
+				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
+		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+			"xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
+			ip->i_ino, ip, ip->i_d.di_magic);
+		goto corrupt_out;
+	}
+	if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
+		if (XFS_TEST_ERROR(
+		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
+		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
+		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
+			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+				"xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
+				ip->i_ino, ip);
+			goto corrupt_out;
+		}
+	} else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+		if (XFS_TEST_ERROR(
+		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
+		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
+		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
+		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
+			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+				"xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
+				ip->i_ino, ip);
+			goto corrupt_out;
+		}
+	}
+	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
+				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
+				XFS_RANDOM_IFLUSH_5)) {
+		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+			"xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
+			ip->i_ino,
+			ip->i_d.di_nextents + ip->i_d.di_anextents,
+			ip->i_d.di_nblocks,
+			ip);
+		goto corrupt_out;
+	}
+	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
+				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
+		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
+			"xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
+			ip->i_ino, ip->i_d.di_forkoff, ip);
+		goto corrupt_out;
+	}
+	/*
+	 * bump the flush iteration count, used to detect flushes which
+	 * postdate a log record during recovery.
+	 */
+
+	ip->i_d.di_flushiter++;
+
+	/*
+	 * Copy the dirty parts of the inode into the on-disk
+	 * inode.  We always copy out the core of the inode,
+	 * because if the inode is dirty at all the core must
+	 * be.
+	 */
+	xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1);
+
+	/* Wrap, we never let the log put out DI_MAX_FLUSH */
+	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
+		ip->i_d.di_flushiter = 0;
+
+	/*
+	 * If this is really an old format inode and the superblock version
+	 * has not been updated to support only new format inodes, then
+	 * convert back to the old inode format.  If the superblock version
+	 * has been updated, then make the conversion permanent.
+	 */
+	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
+	       XFS_SB_VERSION_HASNLINK(&mp->m_sb));
+	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
+		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
+			/*
+			 * Convert it back.
+			 */
+			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
+			INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink);
+		} else {
+			/*
+			 * The superblock version has already been bumped,
+			 * so just make the conversion to the new inode
+			 * format permanent.
+			 */
+			ip->i_d.di_version = XFS_DINODE_VERSION_2;
+			INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
+			ip->i_d.di_onlink = 0;
+			dip->di_core.di_onlink = 0;
+			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+			memset(&(dip->di_core.di_pad[0]), 0,
+			      sizeof(dip->di_core.di_pad));
+			ASSERT(ip->i_d.di_projid == 0);
+		}
+	}
+
+	if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
+		goto corrupt_out;
+	}
+
+	if (XFS_IFORK_Q(ip)) {
+		/*
+		 * The only error from xfs_iflush_fork is on the data fork.
+		 */
+		(void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
+	}
+	xfs_inobp_check(mp, bp);
+
+	/*
+	 * We've recorded everything logged in the inode, so we'd
+	 * like to clear the ilf_fields bits so we don't log and
+	 * flush things unnecessarily.  However, we can't stop
+	 * logging all this information until the data we've copied
+	 * into the disk buffer is written to disk.  If we did we might
+	 * overwrite the copy of the inode in the log with all the
+	 * data after re-logging only part of it, and in the face of
+	 * a crash we wouldn't have all the data we need to recover.
+	 *
+	 * What we do is move the bits to the ili_last_fields field.
+	 * When logging the inode, these bits are moved back to the
+	 * ilf_fields field.  In the xfs_iflush_done() routine we
+	 * clear ili_last_fields, since we know that the information
+	 * those bits represent is permanently on disk.  As long as
+	 * the flush completes before the inode is logged again, then
+	 * both ilf_fields and ili_last_fields will be cleared.
+	 *
+	 * We can play with the ilf_fields bits here, because the inode
+	 * lock must be held exclusively in order to set bits there
+	 * and the flush lock protects the ili_last_fields bits.
+	 * Set ili_logged so the flush done
+	 * routine can tell whether or not to look in the AIL.
+	 * Also, store the current LSN of the inode so that we can tell
+	 * whether the item has moved in the AIL from xfs_iflush_done().
+	 * In order to read the lsn we need the AIL lock, because
+	 * it is a 64 bit value that cannot be read atomically.
+	 */
+	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
+		iip->ili_last_fields = iip->ili_format.ilf_fields;
+		iip->ili_format.ilf_fields = 0;
+		iip->ili_logged = 1;
+
+		ASSERT(sizeof(xfs_lsn_t) == 8);	/* don't lock if it shrinks */
+		AIL_LOCK(mp,s);
+		iip->ili_flush_lsn = iip->ili_item.li_lsn;
+		AIL_UNLOCK(mp, s);
+
+		/*
+		 * Attach the function xfs_iflush_done to the inode's
+		 * buffer.  This will remove the inode from the AIL
+		 * and unlock the inode's flush lock when the inode is
+		 * completely written to disk.
+		 */
+		xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
+				      xfs_iflush_done, (xfs_log_item_t *)iip);
+
+		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+		ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
+	} else {
+		/*
+		 * We're flushing an inode which is not in the AIL and has
+		 * not been logged but has i_update_core set.  For this
+		 * case we can use a B_DELWRI flush and immediately drop
+		 * the inode flush lock because we can avoid the whole
+		 * AIL state thing.  It's OK to drop the flush lock now,
+		 * because we've already locked the buffer and to do anything
+		 * you really need both.
+		 */
+		if (iip != NULL) {
+			ASSERT(iip->ili_logged == 0);
+			ASSERT(iip->ili_last_fields == 0);
+			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
+		}
+		xfs_ifunlock(ip);
+	}
+
+	return 0;
+
+corrupt_out:
+	return XFS_ERROR(EFSCORRUPTED);
+}
+
+
+/*
+ * Flush all inactive inodes in mp.  Return true if no user references
+ * were found, false otherwise.
+ */
+int
+xfs_iflush_all(
+	xfs_mount_t	*mp,
+	int		flag)
+{
+	int		busy;
+	int		done;
+	int		purged;
+	xfs_inode_t	*ip;
+	vmap_t		vmap;
+	vnode_t		*vp;
+
+	busy = done = 0;
+	while (!done) {
+		purged = 0;
+		XFS_MOUNT_ILOCK(mp);
+		ip = mp->m_inodes;
+		if (ip == NULL) {
+			break;
+		}
+		do {
+			/* Make sure we skip markers inserted by sync */
+			if (ip->i_mount == NULL) {
+				ip = ip->i_mnext;
+				continue;
+			}
+
+			/*
+			 * It's up to our caller to purge the root
+			 * and quota vnodes later.
+			 */
+			vp = XFS_ITOV_NULL(ip);
+
+			if (!vp) {
+				XFS_MOUNT_IUNLOCK(mp);
+				xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
+				purged = 1;
+				break;
+			}
+
+			if (vn_count(vp) != 0) {
+				if (vn_count(vp) == 1 &&
+				    (ip == mp->m_rootip ||
+				     (mp->m_quotainfo &&
+				      (ip->i_ino == mp->m_sb.sb_uquotino ||
+				       ip->i_ino == mp->m_sb.sb_gquotino)))) {
+
+					ip = ip->i_mnext;
+					continue;
+				}
+				if (!(flag & XFS_FLUSH_ALL)) {
+					busy = 1;
+					done = 1;
+					break;
+				}
+				/*
+				 * Ignore busy inodes but continue flushing
+				 * others.
+				 */
+				ip = ip->i_mnext;
+				continue;
+			}
+			/*
+			 * Sample vp mapping while holding mp locked on MP
+			 * systems, so we don't purge a reclaimed or
+			 * nonexistent vnode.  We break from the loop
+			 * since we know that we modify
+			 * it by pulling ourselves from it in xfs_reclaim()
+			 * called via vn_purge() below.  Set ip to the next
+			 * entry in the list anyway so we'll know below
+			 * whether we reached the end or not.
+			 */
+			VMAP(vp, vmap);
+			XFS_MOUNT_IUNLOCK(mp);
+
+			vn_purge(vp, &vmap);
+
+			purged = 1;
+			break;
+		} while (ip != mp->m_inodes);
+		/*
+		 * We need to distinguish between when we exit the loop
+		 * after a purge and when we simply hit the end of the
+		 * list.  We can't use the (ip == mp->m_inodes) test,
+		 * because when we purge an inode at the start of the list
+		 * the next inode on the list becomes mp->m_inodes.  That
+		 * would cause such a test to bail out early.  The purged
+		 * variable tells us how we got out of the loop.
+		 */
+		if (!purged) {
+			done = 1;
+		}
+	}
+	XFS_MOUNT_IUNLOCK(mp);
+	return !busy;
+}
+
+
+/*
+ * xfs_iaccess: check accessibility of inode for mode.
+ */
+int
+xfs_iaccess(
+	xfs_inode_t	*ip,
+	mode_t		mode,
+	cred_t		*cr)
+{
+	int		error;
+	mode_t		orgmode = mode;
+	struct inode	*inode = LINVFS_GET_IP(XFS_ITOV(ip));
+
+	if (mode & S_IWUSR) {
+		umode_t		imode = inode->i_mode;
+
+		if (IS_RDONLY(inode) &&
+		    (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
+			return XFS_ERROR(EROFS);
+
+		if (IS_IMMUTABLE(inode))
+			return XFS_ERROR(EACCES);
+	}
+
+	/*
+	 * If there's an Access Control List it's used instead of
+	 * the mode bits.
+	 */
+	if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
+		return error ? XFS_ERROR(error) : 0;
+
+	if (current_fsuid(cr) != ip->i_d.di_uid) {
+		mode >>= 3;
+		if (!in_group_p((gid_t)ip->i_d.di_gid))
+			mode >>= 3;
+	}
+
+	/*
+	 * If the DACs are ok we don't need any capability check.
+	 */
+	if ((ip->i_d.di_mode & mode) == mode)
+		return 0;
+	/*
+	 * Read/write DACs are always overridable.
+	 * Executable DACs are overridable if at least one exec bit is set.
+	 */
+	if (!(orgmode & S_IXUSR) ||
+	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
+		if (capable_cred(cr, CAP_DAC_OVERRIDE))
+			return 0;
+
+	if ((orgmode == S_IRUSR) ||
+	    (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
+		if (capable_cred(cr, CAP_DAC_READ_SEARCH))
+			return 0;
+#ifdef	NOISE
+		cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
+#endif	/* NOISE */
+		return XFS_ERROR(EACCES);
+	}
+	return XFS_ERROR(EACCES);
+}
+
+/*
+ * xfs_iroundup: round up argument to next power of two
+ */
+uint
+xfs_iroundup(
+	uint	v)
+{
+	int i;
+	uint m;
+
+	if ((v & (v - 1)) == 0)
+		return v;
+	ASSERT((v & 0x80000000) == 0);
+	if ((v & (v + 1)) == 0)
+		return v + 1;
+	for (i = 0, m = 1; i < 31; i++, m <<= 1) {
+		if (v & m)
+			continue;
+		v |= m;
+		if ((v & (v + 1)) == 0)
+			return v + 1;
+	}
+	ASSERT(0);
+	return( 0 );
+}
+
+/*
+ * Change the requested timestamp in the given inode.
+ * We don't lock across timestamp updates, and we don't log them but
+ * we do record the fact that there is dirty information in core.
+ *
+ * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG
+ *		with XFS_ICHGTIME_ACC to be sure that access time
+ *		update will take.  Calling first with XFS_ICHGTIME_ACC
+ *		and then XFS_ICHGTIME_MOD may fail to modify the access
+ *		timestamp if the filesystem is mounted noacctm.
+ */
+void
+xfs_ichgtime(xfs_inode_t *ip,
+	     int flags)
+{
+	timespec_t	tv;
+	vnode_t		*vp = XFS_ITOV(ip);
+	struct inode	*inode = LINVFS_GET_IP(vp);
+
+	/*
+	 * We're not supposed to change timestamps in readonly-mounted
+	 * filesystems.  Throw it away if anyone asks us.
+	 */
+	if (unlikely(vp->v_vfsp->vfs_flag & VFS_RDONLY))
+		return;
+
+	/*
+	 * Don't update access timestamps on reads if mounted "noatime"
+	 * Throw it away if anyone asks us.
+	 */
+	if ((ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
+	    ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG))
+			== XFS_ICHGTIME_ACC))
+		return;
+
+	nanotime(&tv);
+	if (flags & XFS_ICHGTIME_MOD) {
+		VN_MTIMESET(vp, &tv);
+		ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
+		ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
+	}
+	if (flags & XFS_ICHGTIME_ACC) {
+		VN_ATIMESET(vp, &tv);
+		ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec;
+		ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec;
+	}
+	if (flags & XFS_ICHGTIME_CHG) {
+		VN_CTIMESET(vp, &tv);
+		ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
+		ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
+	}
+
+	/*
+	 * We update the i_update_core field _after_ changing
+	 * the timestamps in order to coordinate properly with
+	 * xfs_iflush() so that we don't lose timestamp updates.
+	 * This keeps us from having to hold the inode lock
+	 * while doing this.  We use the SYNCHRONIZE macro to
+	 * ensure that the compiler does not reorder the update
+	 * of i_update_core above the timestamp updates above.
+	 */
+	SYNCHRONIZE();
+	ip->i_update_core = 1;
+	if (!(inode->i_state & I_LOCK))
+		mark_inode_dirty_sync(inode);
+}
+
+#ifdef XFS_ILOCK_TRACE
+ktrace_t	*xfs_ilock_trace_buf;
+
+void
+xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
+{
+	ktrace_enter(ip->i_lock_trace,
+		     (void *)ip,
+		     (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
+		     (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
+		     (void *)ra,		/* caller of ilock */
+		     (void *)(unsigned long)current_cpu(),
+		     (void *)(unsigned long)current_pid(),
+		     NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
+}
+#endif
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
new file mode 100644
index 0000000..a53b1cc
--- /dev/null
+++ b/fs/xfs/xfs_inode.h
@@ -0,0 +1,554 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_INODE_H__
+#define	__XFS_INODE_H__
+
+/*
+ * File incore extent information, present for each of data & attr forks.
+ */
+#define	XFS_INLINE_EXTS	2
+#define	XFS_INLINE_DATA	32
+typedef struct xfs_ifork {
+	int			if_bytes;	/* bytes in if_u1 */
+	int			if_real_bytes;	/* bytes allocated in if_u1 */
+	xfs_bmbt_block_t	*if_broot;	/* file's incore btree root */
+	short			if_broot_bytes;	/* bytes allocated for root */
+	unsigned char		if_flags;	/* per-fork flags */
+	unsigned char		if_ext_max;	/* max # of extent records */
+	xfs_extnum_t		if_lastex;	/* last if_extents used */
+	union {
+		xfs_bmbt_rec_t	*if_extents;	/* linear map file exts */
+		char		*if_data;	/* inline file data */
+	} if_u1;
+	union {
+		xfs_bmbt_rec_t	if_inline_ext[XFS_INLINE_EXTS];
+						/* very small file extents */
+		char		if_inline_data[XFS_INLINE_DATA];
+						/* very small file data */
+		xfs_dev_t	if_rdev;	/* dev number if special */
+		uuid_t		if_uuid;	/* mount point value */
+	} if_u2;
+} xfs_ifork_t;
+
+/*
+ * Flags for xfs_ichgtime().
+ */
+#define	XFS_ICHGTIME_MOD	0x1	/* data fork modification timestamp */
+#define	XFS_ICHGTIME_ACC	0x2	/* data fork access timestamp */
+#define	XFS_ICHGTIME_CHG	0x4	/* inode field change timestamp */
+
+/*
+ * Per-fork incore inode flags.
+ */
+#define	XFS_IFINLINE	0x0001	/* Inline data is read in */
+#define	XFS_IFEXTENTS	0x0002	/* All extent pointers are read in */
+#define	XFS_IFBROOT	0x0004	/* i_broot points to the bmap b-tree root */
+
+/*
+ * Flags for xfs_imap() and xfs_dilocate().
+ */
+#define	XFS_IMAP_LOOKUP		0x1
+
+/*
+ * Maximum number of extent pointers in if_u1.if_extents.
+ */
+#define	XFS_MAX_INCORE_EXTENTS	32768
+
+
+#ifdef __KERNEL__
+struct bhv_desc;
+struct cred;
+struct ktrace;
+struct vnode;
+struct xfs_buf;
+struct xfs_bmap_free;
+struct xfs_bmbt_irec;
+struct xfs_bmbt_block;
+struct xfs_inode;
+struct xfs_inode_log_item;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_dquot;
+
+#if defined(XFS_ILOCK_TRACE)
+#define XFS_ILOCK_KTRACE_SIZE	32
+extern ktrace_t *xfs_ilock_trace_buf;
+extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *);
+#else
+#define	xfs_ilock_trace(i,n,f,ra)
+#endif
+
+/*
+ * This structure is used to communicate which extents of a file
+ * were holes when a write started from xfs_write_file() to
+ * xfs_strat_read().  This is necessary so that we can know which
+ * blocks need to be zeroed when they are read in in xfs_strat_read()
+ * if they weren\'t allocated when the buffer given to xfs_strat_read()
+ * was mapped.
+ *
+ * We keep a list of these attached to the inode.  The list is
+ * protected by the inode lock and the fact that the io lock is
+ * held exclusively by writers.
+ */
+typedef struct xfs_gap {
+	struct xfs_gap	*xg_next;
+	xfs_fileoff_t	xg_offset_fsb;
+	xfs_extlen_t	xg_count_fsb;
+} xfs_gap_t;
+
+typedef struct dm_attrs_s {
+	__uint32_t	da_dmevmask;	/* DMIG event mask */
+	__uint16_t	da_dmstate;	/* DMIG state info */
+	__uint16_t	da_pad;		/* DMIG extra padding */
+} dm_attrs_t;
+
+typedef struct xfs_iocore {
+	void			*io_obj;	/* pointer to container
+						 * inode or dcxvn structure */
+	struct xfs_mount	*io_mount;	/* fs mount struct ptr */
+#ifdef DEBUG
+	mrlock_t		*io_lock;	/* inode IO lock */
+	mrlock_t		*io_iolock;	/* inode IO lock */
+#endif
+
+	/* I/O state */
+	xfs_fsize_t		io_new_size;	/* sz when write completes */
+
+	/* Miscellaneous state. */
+	unsigned int		io_flags;	/* IO related flags */
+
+	/* DMAPI state */
+	dm_attrs_t		io_dmattrs;
+
+} xfs_iocore_t;
+
+#define        io_dmevmask     io_dmattrs.da_dmevmask
+#define        io_dmstate      io_dmattrs.da_dmstate
+
+#define XFS_IO_INODE(io)	((xfs_inode_t *) ((io)->io_obj))
+#define XFS_IO_DCXVN(io)	((dcxvn_t *) ((io)->io_obj))
+
+/*
+ * Flags in the flags field
+ */
+
+#define XFS_IOCORE_RT		0x1
+
+/*
+ * xfs_iocore prototypes
+ */
+
+extern void xfs_iocore_inode_init(struct xfs_inode *);
+extern void xfs_iocore_inode_reinit(struct xfs_inode *);
+
+
+/*
+ * This is the type used in the xfs inode hash table.
+ * An array of these is allocated for each mounted
+ * file system to hash the inodes for that file system.
+ */
+typedef struct xfs_ihash {
+	struct xfs_inode	*ih_next;
+	rwlock_t		ih_lock;
+	uint			ih_version;
+} xfs_ihash_t;
+
+#define XFS_IHASH(mp,ino) ((mp)->m_ihash + (((uint)(ino)) % (mp)->m_ihsize))
+
+/*
+ * This is the xfs inode cluster hash.  This hash is used by xfs_iflush to
+ * find inodes that share a cluster and can be flushed to disk at the same
+ * time.
+ */
+typedef struct xfs_chashlist {
+	struct xfs_chashlist	*chl_next;
+	struct xfs_inode	*chl_ip;
+	xfs_daddr_t		chl_blkno;	/* starting block number of
+						 * the cluster */
+	struct xfs_buf		*chl_buf;	/* the inode buffer */
+} xfs_chashlist_t;
+
+typedef struct xfs_chash {
+	xfs_chashlist_t		*ch_list;
+	lock_t			ch_lock;
+} xfs_chash_t;
+
+#define XFS_CHASH(mp,blk) ((mp)->m_chash + (((uint)blk) % (mp)->m_chsize))
+
+
+/*
+ * This is the xfs in-core inode structure.
+ * Most of the on-disk inode is embedded in the i_d field.
+ *
+ * The extent pointers/inline file space, however, are managed
+ * separately.  The memory for this information is pointed to by
+ * the if_u1 unions depending on the type of the data.
+ * This is used to linearize the array of extents for fast in-core
+ * access.  This is used until the file's number of extents
+ * surpasses XFS_MAX_INCORE_EXTENTS, at which point all extent pointers
+ * are accessed through the buffer cache.
+ *
+ * Other state kept in the in-core inode is used for identification,
+ * locking, transactional updating, etc of the inode.
+ *
+ * Generally, we do not want to hold the i_rlock while holding the
+ * i_ilock. Hierarchy is i_iolock followed by i_rlock.
+ *
+ * xfs_iptr_t contains all the inode fields upto and including the
+ * i_mnext and i_mprev fields, it is used as a marker in the inode
+ * chain off the mount structure by xfs_sync calls.
+ */
+
+typedef struct {
+	struct xfs_ihash	*ip_hash;	/* pointer to hash header */
+	struct xfs_inode	*ip_next;	/* inode hash link forw */
+	struct xfs_inode	*ip_mnext;	/* next inode in mount list */
+	struct xfs_inode	*ip_mprev;	/* ptr to prev inode */
+	struct xfs_inode	**ip_prevp;	/* ptr to prev i_next */
+	struct xfs_mount	*ip_mount;	/* fs mount struct ptr */
+} xfs_iptr_t;
+
+typedef struct xfs_inode {
+	/* Inode linking and identification information. */
+	struct xfs_ihash	*i_hash;	/* pointer to hash header */
+	struct xfs_inode	*i_next;	/* inode hash link forw */
+	struct xfs_inode	*i_mnext;	/* next inode in mount list */
+	struct xfs_inode	*i_mprev;	/* ptr to prev inode */
+	struct xfs_inode	**i_prevp;	/* ptr to prev i_next */
+	struct xfs_mount	*i_mount;	/* fs mount struct ptr */
+	struct list_head	i_reclaim;	/* reclaim list */
+	struct bhv_desc		i_bhv_desc;	/* inode behavior descriptor*/
+	struct xfs_dquot	*i_udquot;	/* user dquot */
+	struct xfs_dquot	*i_gdquot;	/* group dquot */
+
+	/* Inode location stuff */
+	xfs_ino_t		i_ino;		/* inode number (agno/agino)*/
+	xfs_daddr_t		i_blkno;	/* blkno of inode buffer */
+	ushort			i_len;		/* len of inode buffer */
+	ushort			i_boffset;	/* off of inode in buffer */
+
+	/* Extent information. */
+	xfs_ifork_t		*i_afp;		/* attribute fork pointer */
+	xfs_ifork_t		i_df;		/* data fork */
+
+	/* Transaction and locking information. */
+	struct xfs_trans	*i_transp;	/* ptr to owning transaction*/
+	struct xfs_inode_log_item *i_itemp;	/* logging information */
+	mrlock_t		i_lock;		/* inode lock */
+	mrlock_t		i_iolock;	/* inode IO lock */
+	sema_t			i_flock;	/* inode flush lock */
+	atomic_t		i_pincount;	/* inode pin count */
+	wait_queue_head_t	i_ipin_wait;	/* inode pinning wait queue */
+#ifdef HAVE_REFCACHE
+	struct xfs_inode	**i_refcache;	/* ptr to entry in ref cache */
+	struct xfs_inode	*i_release;	/* inode to unref */
+#endif
+	/* I/O state */
+	xfs_iocore_t		i_iocore;	/* I/O core */
+
+	/* Miscellaneous state. */
+	unsigned short		i_flags;	/* see defined flags below */
+	unsigned char		i_update_core;	/* timestamps/size is dirty */
+	unsigned char		i_update_size;	/* di_size field is dirty */
+	unsigned int		i_gen;		/* generation count */
+	unsigned int		i_delayed_blks;	/* count of delay alloc blks */
+
+	xfs_dinode_core_t	i_d;		/* most of ondisk inode */
+	xfs_chashlist_t		*i_chash;	/* cluster hash list header */
+	struct xfs_inode	*i_cnext;	/* cluster hash link forward */
+	struct xfs_inode	*i_cprev;	/* cluster hash link backward */
+
+	/* Trace buffers per inode. */
+#ifdef XFS_BMAP_TRACE
+	struct ktrace		*i_xtrace;	/* inode extent list trace */
+#endif
+#ifdef XFS_BMBT_TRACE
+	struct ktrace		*i_btrace;	/* inode bmap btree trace */
+#endif
+#ifdef XFS_RW_TRACE
+	struct ktrace		*i_rwtrace;	/* inode read/write trace */
+#endif
+#ifdef XFS_ILOCK_TRACE
+	struct ktrace		*i_lock_trace;	/* inode lock/unlock trace */
+#endif
+#ifdef XFS_DIR2_TRACE
+	struct ktrace		*i_dir_trace;	/* inode directory trace */
+#endif
+} xfs_inode_t;
+
+#endif	/* __KERNEL__ */
+
+
+/*
+ * Fork handling.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_PTR)
+xfs_ifork_t *xfs_ifork_ptr(xfs_inode_t *ip, int w);
+#define	XFS_IFORK_PTR(ip,w)		xfs_ifork_ptr(ip,w)
+#else
+#define	XFS_IFORK_PTR(ip,w)   ((w) == XFS_DATA_FORK ? &(ip)->i_df : (ip)->i_afp)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_Q)
+int xfs_ifork_q(xfs_inode_t *ip);
+#define	XFS_IFORK_Q(ip)			xfs_ifork_q(ip)
+#else
+#define	XFS_IFORK_Q(ip)			XFS_CFORK_Q(&(ip)->i_d)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_DSIZE)
+int xfs_ifork_dsize(xfs_inode_t *ip);
+#define	XFS_IFORK_DSIZE(ip)		xfs_ifork_dsize(ip)
+#else
+#define	XFS_IFORK_DSIZE(ip)		XFS_CFORK_DSIZE(&ip->i_d, ip->i_mount)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_ASIZE)
+int xfs_ifork_asize(xfs_inode_t *ip);
+#define	XFS_IFORK_ASIZE(ip)		xfs_ifork_asize(ip)
+#else
+#define	XFS_IFORK_ASIZE(ip)		XFS_CFORK_ASIZE(&ip->i_d, ip->i_mount)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_SIZE)
+int xfs_ifork_size(xfs_inode_t *ip, int w);
+#define	XFS_IFORK_SIZE(ip,w)		xfs_ifork_size(ip,w)
+#else
+#define	XFS_IFORK_SIZE(ip,w)		XFS_CFORK_SIZE(&ip->i_d, ip->i_mount, w)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FORMAT)
+int xfs_ifork_format(xfs_inode_t *ip, int w);
+#define	XFS_IFORK_FORMAT(ip,w)		xfs_ifork_format(ip,w)
+#else
+#define	XFS_IFORK_FORMAT(ip,w)		XFS_CFORK_FORMAT(&ip->i_d, w)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FMT_SET)
+void xfs_ifork_fmt_set(xfs_inode_t *ip, int w, int n);
+#define	XFS_IFORK_FMT_SET(ip,w,n)	xfs_ifork_fmt_set(ip,w,n)
+#else
+#define	XFS_IFORK_FMT_SET(ip,w,n)	XFS_CFORK_FMT_SET(&ip->i_d, w, n)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXTENTS)
+int xfs_ifork_nextents(xfs_inode_t *ip, int w);
+#define	XFS_IFORK_NEXTENTS(ip,w)	xfs_ifork_nextents(ip,w)
+#else
+#define	XFS_IFORK_NEXTENTS(ip,w)	XFS_CFORK_NEXTENTS(&ip->i_d, w)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXT_SET)
+void xfs_ifork_next_set(xfs_inode_t *ip, int w, int n);
+#define	XFS_IFORK_NEXT_SET(ip,w,n)	xfs_ifork_next_set(ip,w,n)
+#else
+#define	XFS_IFORK_NEXT_SET(ip,w,n)	XFS_CFORK_NEXT_SET(&ip->i_d, w, n)
+#endif
+
+
+#ifdef __KERNEL__
+
+/*
+ * In-core inode flags.
+ */
+#define XFS_IGRIO	0x0001  /* inode used for guaranteed rate i/o */
+#define XFS_IUIOSZ	0x0002  /* inode i/o sizes have been explicitly set */
+#define XFS_IQUIESCE    0x0004  /* we have started quiescing for this inode */
+#define XFS_IRECLAIM    0x0008  /* we have started reclaiming this inode    */
+#define XFS_ISTALE	0x0010	/* inode has been staled */
+#define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */
+#define XFS_INEW	0x0040
+
+/*
+ * Flags for inode locking.
+ */
+#define	XFS_IOLOCK_EXCL		0x001
+#define	XFS_IOLOCK_SHARED	0x002
+#define	XFS_ILOCK_EXCL		0x004
+#define	XFS_ILOCK_SHARED	0x008
+#define	XFS_IUNLOCK_NONOTIFY	0x010
+#define XFS_EXTENT_TOKEN_RD	0x040
+#define XFS_SIZE_TOKEN_RD	0x080
+#define XFS_EXTSIZE_RD		(XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD)
+#define XFS_WILLLEND		0x100	/* Always acquire tokens for lending */
+#define XFS_EXTENT_TOKEN_WR	(XFS_EXTENT_TOKEN_RD | XFS_WILLLEND)
+#define XFS_SIZE_TOKEN_WR       (XFS_SIZE_TOKEN_RD | XFS_WILLLEND)
+#define XFS_EXTSIZE_WR		(XFS_EXTSIZE_RD | XFS_WILLLEND)
+
+
+#define XFS_LOCK_MASK	\
+	(XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL | \
+	 XFS_ILOCK_SHARED | XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD | \
+	 XFS_WILLLEND)
+
+/*
+ * Flags for xfs_iflush()
+ */
+#define	XFS_IFLUSH_DELWRI_ELSE_SYNC	1
+#define	XFS_IFLUSH_DELWRI_ELSE_ASYNC	2
+#define	XFS_IFLUSH_SYNC			3
+#define	XFS_IFLUSH_ASYNC		4
+#define	XFS_IFLUSH_DELWRI		5
+
+/*
+ * Flags for xfs_iflush_all.
+ */
+#define	XFS_FLUSH_ALL		0x1
+
+/*
+ * Flags for xfs_itruncate_start().
+ */
+#define	XFS_ITRUNC_DEFINITE	0x1
+#define	XFS_ITRUNC_MAYBE	0x2
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOV)
+struct vnode *xfs_itov(xfs_inode_t *ip);
+#define	XFS_ITOV(ip)		xfs_itov(ip)
+#else
+#define	XFS_ITOV(ip)		BHV_TO_VNODE(XFS_ITOBHV(ip))
+#endif
+#define	XFS_ITOV_NULL(ip)	BHV_TO_VNODE_NULL(XFS_ITOBHV(ip))
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOBHV)
+struct bhv_desc *xfs_itobhv(xfs_inode_t *ip);
+#define	XFS_ITOBHV(ip)		xfs_itobhv(ip)
+#else
+#define	XFS_ITOBHV(ip)		((struct bhv_desc *)(&((ip)->i_bhv_desc)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOI)
+xfs_inode_t *xfs_bhvtoi(struct bhv_desc *bhvp);
+#define	XFS_BHVTOI(bhvp)	xfs_bhvtoi(bhvp)
+#else
+#define	XFS_BHVTOI(bhvp)	\
+	((xfs_inode_t *)((char *)(bhvp) - \
+			 (char *)&(((xfs_inode_t *)0)->i_bhv_desc)))
+#endif
+
+#define BHV_IS_XFS(bdp)		(BHV_OPS(bdp) == &xfs_vnodeops)
+
+/*
+ * For multiple groups support: if S_ISGID bit is set in the parent
+ * directory, group of new file is set to that of the parent, and
+ * new subdirectory gets S_ISGID bit from parent.
+ */
+#define XFS_INHERIT_GID(pip, vfsp)	\
+	(((vfsp)->vfs_flag & VFS_GRPID) || ((pip)->i_d.di_mode & S_ISGID))
+
+/*
+ * xfs_iget.c prototypes.
+ */
+
+#define IGET_CREATE	1
+
+void		xfs_ihash_init(struct xfs_mount *);
+void		xfs_ihash_free(struct xfs_mount *);
+void		xfs_chash_init(struct xfs_mount *);
+void		xfs_chash_free(struct xfs_mount *);
+xfs_inode_t	*xfs_inode_incore(struct xfs_mount *, xfs_ino_t,
+				  struct xfs_trans *);
+void            xfs_inode_lock_init(xfs_inode_t *, struct vnode *);
+int		xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
+			 uint, uint, xfs_inode_t **, xfs_daddr_t);
+void		xfs_iput(xfs_inode_t *, uint);
+void		xfs_iput_new(xfs_inode_t *, uint);
+void		xfs_ilock(xfs_inode_t *, uint);
+int		xfs_ilock_nowait(xfs_inode_t *, uint);
+void		xfs_iunlock(xfs_inode_t *, uint);
+void		xfs_ilock_demote(xfs_inode_t *, uint);
+void		xfs_iflock(xfs_inode_t *);
+int		xfs_iflock_nowait(xfs_inode_t *);
+uint		xfs_ilock_map_shared(xfs_inode_t *);
+void		xfs_iunlock_map_shared(xfs_inode_t *, uint);
+void		xfs_ifunlock(xfs_inode_t *);
+void		xfs_ireclaim(xfs_inode_t *);
+int		xfs_finish_reclaim(xfs_inode_t *, int, int);
+int		xfs_finish_reclaim_all(struct xfs_mount *, int);
+
+/*
+ * xfs_inode.c prototypes.
+ */
+int		xfs_inotobp(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
+			    xfs_dinode_t **, struct xfs_buf **, int *);
+int		xfs_itobp(struct xfs_mount *, struct xfs_trans *,
+			  xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **,
+			  xfs_daddr_t);
+int		xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
+			  xfs_inode_t **, xfs_daddr_t);
+int		xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
+int		xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, nlink_t,
+			   xfs_dev_t, struct cred *, xfs_prid_t, int,
+			   struct xfs_buf **, boolean_t *, xfs_inode_t **);
+void		xfs_xlate_dinode_core(xfs_caddr_t, struct xfs_dinode_core *,
+					int);
+uint		xfs_ip2xflags(struct xfs_inode *);
+uint		xfs_dic2xflags(struct xfs_dinode_core *);
+int		xfs_ifree(struct xfs_trans *, xfs_inode_t *,
+			   struct xfs_bmap_free *);
+void		xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t);
+int		xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *,
+				     xfs_fsize_t, int, int);
+int		xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
+int		xfs_igrow_start(xfs_inode_t *, xfs_fsize_t, struct cred *);
+void		xfs_igrow_finish(struct xfs_trans *, xfs_inode_t *,
+				 xfs_fsize_t, int);
+
+void		xfs_idestroy_fork(xfs_inode_t *, int);
+void		xfs_idestroy(xfs_inode_t *);
+void		xfs_idata_realloc(xfs_inode_t *, int, int);
+void		xfs_iextract(xfs_inode_t *);
+void		xfs_iext_realloc(xfs_inode_t *, int, int);
+void		xfs_iroot_realloc(xfs_inode_t *, int, int);
+void		xfs_ipin(xfs_inode_t *);
+void		xfs_iunpin(xfs_inode_t *);
+int		xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int);
+int		xfs_iflush(xfs_inode_t *, uint);
+int		xfs_iflush_all(struct xfs_mount *, int);
+int		xfs_iaccess(xfs_inode_t *, mode_t, cred_t *);
+uint		xfs_iroundup(uint);
+void		xfs_ichgtime(xfs_inode_t *, int);
+xfs_fsize_t	xfs_file_last_byte(xfs_inode_t *);
+void		xfs_lock_inodes(xfs_inode_t **, int, int, uint);
+
+#define xfs_ipincount(ip)	((unsigned int) atomic_read(&ip->i_pincount))
+
+#ifdef DEBUG
+void		xfs_isize_check(struct xfs_mount *, xfs_inode_t *, xfs_fsize_t);
+#else	/* DEBUG */
+#define xfs_isize_check(mp, ip, isize)
+#endif	/* DEBUG */
+
+#if defined(DEBUG)
+void		xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
+#else
+#define	xfs_inobp_check(mp, bp)
+#endif /* DEBUG */
+
+extern struct kmem_zone	*xfs_chashlist_zone;
+extern struct kmem_zone	*xfs_ifork_zone;
+extern struct kmem_zone	*xfs_inode_zone;
+extern struct kmem_zone	*xfs_ili_zone;
+extern struct vnodeops	xfs_vnodeops;
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
new file mode 100644
index 0000000..768cb18
--- /dev/null
+++ b/fs/xfs/xfs_inode_item.c
@@ -0,0 +1,1092 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains the implementation of the xfs_inode_log_item.
+ * It contains the item operations used to manipulate the inode log
+ * items as well as utility routines used by the inode specific
+ * transaction routines.
+ */
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_ag.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_rw.h"
+
+
+kmem_zone_t	*xfs_ili_zone;		/* inode log item zone */
+
+/*
+ * This returns the number of iovecs needed to log the given inode item.
+ *
+ * We need one iovec for the inode log format structure, one for the
+ * inode core, and possibly one for the inode data/extents/b-tree root
+ * and one for the inode attribute data/extents/b-tree root.
+ */
+STATIC uint
+xfs_inode_item_size(
+	xfs_inode_log_item_t	*iip)
+{
+	uint		nvecs;
+	xfs_inode_t	*ip;
+
+	ip = iip->ili_inode;
+	nvecs = 2;
+
+	/*
+	 * Only log the data/extents/b-tree root if there is something
+	 * left to log.
+	 */
+	iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
+
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_EXTENTS:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) &&
+		    (ip->i_d.di_nextents > 0) &&
+		    (ip->i_df.if_bytes > 0)) {
+			ASSERT(ip->i_df.if_u1.if_extents != NULL);
+			nvecs++;
+		} else {
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT;
+		}
+		break;
+
+	case XFS_DINODE_FMT_BTREE:
+		ASSERT(ip->i_df.if_ext_max ==
+		       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) &&
+		    (ip->i_df.if_broot_bytes > 0)) {
+			ASSERT(ip->i_df.if_broot != NULL);
+			nvecs++;
+		} else {
+			ASSERT(!(iip->ili_format.ilf_fields &
+				 XFS_ILOG_DBROOT));
+#ifdef XFS_TRANS_DEBUG
+			if (iip->ili_root_size > 0) {
+				ASSERT(iip->ili_root_size ==
+				       ip->i_df.if_broot_bytes);
+				ASSERT(memcmp(iip->ili_orig_root,
+					    ip->i_df.if_broot,
+					    iip->ili_root_size) == 0);
+			} else {
+				ASSERT(ip->i_df.if_broot_bytes == 0);
+			}
+#endif
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT;
+		}
+		break;
+
+	case XFS_DINODE_FMT_LOCAL:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) &&
+		    (ip->i_df.if_bytes > 0)) {
+			ASSERT(ip->i_df.if_u1.if_data != NULL);
+			ASSERT(ip->i_d.di_size > 0);
+			nvecs++;
+		} else {
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA;
+		}
+		break;
+
+	case XFS_DINODE_FMT_DEV:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
+			  XFS_ILOG_DEXT | XFS_ILOG_UUID);
+		break;
+
+	case XFS_DINODE_FMT_UUID:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
+			  XFS_ILOG_DEXT | XFS_ILOG_DEV);
+		break;
+
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	/*
+	 * If there are no attributes associated with this file,
+	 * then there cannot be anything more to log.
+	 * Clear all attribute-related log flags.
+	 */
+	if (!XFS_IFORK_Q(ip)) {
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
+		return nvecs;
+	}
+
+	/*
+	 * Log any necessary attribute data.
+	 */
+	switch (ip->i_d.di_aformat) {
+	case XFS_DINODE_FMT_EXTENTS:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) &&
+		    (ip->i_d.di_anextents > 0) &&
+		    (ip->i_afp->if_bytes > 0)) {
+			ASSERT(ip->i_afp->if_u1.if_extents != NULL);
+			nvecs++;
+		} else {
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT;
+		}
+		break;
+
+	case XFS_DINODE_FMT_BTREE:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) &&
+		    (ip->i_afp->if_broot_bytes > 0)) {
+			ASSERT(ip->i_afp->if_broot != NULL);
+			nvecs++;
+		} else {
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT;
+		}
+		break;
+
+	case XFS_DINODE_FMT_LOCAL:
+		iip->ili_format.ilf_fields &=
+			~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
+		if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) &&
+		    (ip->i_afp->if_bytes > 0)) {
+			ASSERT(ip->i_afp->if_u1.if_data != NULL);
+			nvecs++;
+		} else {
+			iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA;
+		}
+		break;
+
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	return nvecs;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given inode log item.  It fills the first item with an inode
+ * log format structure, the second with the on-disk inode structure,
+ * and a possible third and/or fourth with the inode data/extents/b-tree
+ * root and inode attributes data/extents/b-tree root.
+ */
+STATIC void
+xfs_inode_item_format(
+	xfs_inode_log_item_t	*iip,
+	xfs_log_iovec_t		*log_vector)
+{
+	uint			nvecs;
+	xfs_log_iovec_t		*vecp;
+	xfs_inode_t		*ip;
+	size_t			data_bytes;
+	xfs_bmbt_rec_t		*ext_buffer;
+	int			nrecs;
+	xfs_mount_t		*mp;
+
+	ip = iip->ili_inode;
+	vecp = log_vector;
+
+	vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
+	vecp->i_len  = sizeof(xfs_inode_log_format_t);
+	vecp++;
+	nvecs	     = 1;
+
+	/*
+	 * Clear i_update_core if the timestamps (or any other
+	 * non-transactional modification) need flushing/logging
+	 * and we're about to log them with the rest of the core.
+	 *
+	 * This is the same logic as xfs_iflush() but this code can't
+	 * run at the same time as xfs_iflush because we're in commit
+	 * processing here and so we have the inode lock held in
+	 * exclusive mode.  Although it doesn't really matter
+	 * for the timestamps if both routines were to grab the
+	 * timestamps or not.  That would be ok.
+	 *
+	 * We clear i_update_core before copying out the data.
+	 * This is for coordination with our timestamp updates
+	 * that don't hold the inode lock. They will always
+	 * update the timestamps BEFORE setting i_update_core,
+	 * so if we clear i_update_core after they set it we
+	 * are guaranteed to see their updates to the timestamps
+	 * either here.  Likewise, if they set it after we clear it
+	 * here, we'll see it either on the next commit of this
+	 * inode or the next time the inode gets flushed via
+	 * xfs_iflush().  This depends on strongly ordered memory
+	 * semantics, but we have that.  We use the SYNCHRONIZE
+	 * macro to make sure that the compiler does not reorder
+	 * the i_update_core access below the data copy below.
+	 */
+	if (ip->i_update_core)  {
+		ip->i_update_core = 0;
+		SYNCHRONIZE();
+	}
+
+	/*
+	 * We don't have to worry about re-ordering here because
+	 * the update_size field is protected by the inode lock
+	 * and we have that held in exclusive mode.
+	 */
+	if (ip->i_update_size)
+		ip->i_update_size = 0;
+
+	vecp->i_addr = (xfs_caddr_t)&ip->i_d;
+	vecp->i_len  = sizeof(xfs_dinode_core_t);
+	vecp++;
+	nvecs++;
+	iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
+
+	/*
+	 * If this is really an old format inode, then we need to
+	 * log it as such.  This means that we have to copy the link
+	 * count from the new field to the old.  We don't have to worry
+	 * about the new fields, because nothing trusts them as long as
+	 * the old inode version number is there.  If the superblock already
+	 * has a new version number, then we don't bother converting back.
+	 */
+	mp = ip->i_mount;
+	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
+	       XFS_SB_VERSION_HASNLINK(&mp->m_sb));
+	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
+		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
+			/*
+			 * Convert it back.
+			 */
+			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
+			ip->i_d.di_onlink = ip->i_d.di_nlink;
+		} else {
+			/*
+			 * The superblock version has already been bumped,
+			 * so just make the conversion to the new inode
+			 * format permanent.
+			 */
+			ip->i_d.di_version = XFS_DINODE_VERSION_2;
+			ip->i_d.di_onlink = 0;
+			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+		}
+	}
+
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_EXTENTS:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) {
+			ASSERT(ip->i_df.if_bytes > 0);
+			ASSERT(ip->i_df.if_u1.if_extents != NULL);
+			ASSERT(ip->i_d.di_nextents > 0);
+			ASSERT(iip->ili_extents_buf == NULL);
+			nrecs = ip->i_df.if_bytes /
+				(uint)sizeof(xfs_bmbt_rec_t);
+			ASSERT(nrecs > 0);
+#if __BYTE_ORDER == __BIG_ENDIAN
+			if (nrecs == ip->i_d.di_nextents) {
+				/*
+				 * There are no delayed allocation
+				 * extents, so just point to the
+				 * real extents array.
+				 */
+				vecp->i_addr =
+					(char *)(ip->i_df.if_u1.if_extents);
+				vecp->i_len = ip->i_df.if_bytes;
+			} else
+#endif
+			{
+				/*
+				 * There are delayed allocation extents
+				 * in the inode, or we need to convert
+				 * the extents to on disk format.
+				 * Use xfs_iextents_copy()
+				 * to copy only the real extents into
+				 * a separate buffer.  We'll free the
+				 * buffer in the unlock routine.
+				 */
+				ext_buffer = kmem_alloc(ip->i_df.if_bytes,
+					KM_SLEEP);
+				iip->ili_extents_buf = ext_buffer;
+				vecp->i_addr = (xfs_caddr_t)ext_buffer;
+				vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
+						XFS_DATA_FORK);
+			}
+			ASSERT(vecp->i_len <= ip->i_df.if_bytes);
+			iip->ili_format.ilf_dsize = vecp->i_len;
+			vecp++;
+			nvecs++;
+		}
+		break;
+
+	case XFS_DINODE_FMT_BTREE:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_DDATA | XFS_ILOG_DEXT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) {
+			ASSERT(ip->i_df.if_broot_bytes > 0);
+			ASSERT(ip->i_df.if_broot != NULL);
+			vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
+			vecp->i_len = ip->i_df.if_broot_bytes;
+			vecp++;
+			nvecs++;
+			iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
+		}
+		break;
+
+	case XFS_DINODE_FMT_LOCAL:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_DBROOT | XFS_ILOG_DEXT |
+			  XFS_ILOG_DEV | XFS_ILOG_UUID)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) {
+			ASSERT(ip->i_df.if_bytes > 0);
+			ASSERT(ip->i_df.if_u1.if_data != NULL);
+			ASSERT(ip->i_d.di_size > 0);
+
+			vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data;
+			/*
+			 * Round i_bytes up to a word boundary.
+			 * The underlying memory is guaranteed to
+			 * to be there by xfs_idata_realloc().
+			 */
+			data_bytes = roundup(ip->i_df.if_bytes, 4);
+			ASSERT((ip->i_df.if_real_bytes == 0) ||
+			       (ip->i_df.if_real_bytes == data_bytes));
+			vecp->i_len = (int)data_bytes;
+			vecp++;
+			nvecs++;
+			iip->ili_format.ilf_dsize = (unsigned)data_bytes;
+		}
+		break;
+
+	case XFS_DINODE_FMT_DEV:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_DBROOT | XFS_ILOG_DEXT |
+			  XFS_ILOG_DDATA | XFS_ILOG_UUID)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
+			iip->ili_format.ilf_u.ilfu_rdev =
+				ip->i_df.if_u2.if_rdev;
+		}
+		break;
+
+	case XFS_DINODE_FMT_UUID:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_DBROOT | XFS_ILOG_DEXT |
+			  XFS_ILOG_DDATA | XFS_ILOG_DEV)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
+			iip->ili_format.ilf_u.ilfu_uuid =
+				ip->i_df.if_u2.if_uuid;
+		}
+		break;
+
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	/*
+	 * If there are no attributes associated with the file,
+	 * then we're done.
+	 * Assert that no attribute-related log flags are set.
+	 */
+	if (!XFS_IFORK_Q(ip)) {
+		ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
+		iip->ili_format.ilf_size = nvecs;
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
+		return;
+	}
+
+	switch (ip->i_d.di_aformat) {
+	case XFS_DINODE_FMT_EXTENTS:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
+			ASSERT(ip->i_afp->if_bytes > 0);
+			ASSERT(ip->i_afp->if_u1.if_extents != NULL);
+			ASSERT(ip->i_d.di_anextents > 0);
+#ifdef DEBUG
+			nrecs = ip->i_afp->if_bytes /
+				(uint)sizeof(xfs_bmbt_rec_t);
+#endif
+			ASSERT(nrecs > 0);
+			ASSERT(nrecs == ip->i_d.di_anextents);
+#if __BYTE_ORDER == __BIG_ENDIAN
+			/*
+			 * There are not delayed allocation extents
+			 * for attributes, so just point at the array.
+			 */
+			vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents);
+			vecp->i_len = ip->i_afp->if_bytes;
+#else
+			ASSERT(iip->ili_aextents_buf == NULL);
+			/*
+			 * Need to endian flip before logging
+			 */
+			ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
+				KM_SLEEP);
+			iip->ili_aextents_buf = ext_buffer;
+			vecp->i_addr = (xfs_caddr_t)ext_buffer;
+			vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
+					XFS_ATTR_FORK);
+#endif
+			iip->ili_format.ilf_asize = vecp->i_len;
+			vecp++;
+			nvecs++;
+		}
+		break;
+
+	case XFS_DINODE_FMT_BTREE:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_ADATA | XFS_ILOG_AEXT)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) {
+			ASSERT(ip->i_afp->if_broot_bytes > 0);
+			ASSERT(ip->i_afp->if_broot != NULL);
+			vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
+			vecp->i_len = ip->i_afp->if_broot_bytes;
+			vecp++;
+			nvecs++;
+			iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
+		}
+		break;
+
+	case XFS_DINODE_FMT_LOCAL:
+		ASSERT(!(iip->ili_format.ilf_fields &
+			 (XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
+		if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) {
+			ASSERT(ip->i_afp->if_bytes > 0);
+			ASSERT(ip->i_afp->if_u1.if_data != NULL);
+
+			vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data;
+			/*
+			 * Round i_bytes up to a word boundary.
+			 * The underlying memory is guaranteed to
+			 * to be there by xfs_idata_realloc().
+			 */
+			data_bytes = roundup(ip->i_afp->if_bytes, 4);
+			ASSERT((ip->i_afp->if_real_bytes == 0) ||
+			       (ip->i_afp->if_real_bytes == data_bytes));
+			vecp->i_len = (int)data_bytes;
+			vecp++;
+			nvecs++;
+			iip->ili_format.ilf_asize = (unsigned)data_bytes;
+		}
+		break;
+
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
+	iip->ili_format.ilf_size = nvecs;
+}
+
+
+/*
+ * This is called to pin the inode associated with the inode log
+ * item in memory so it cannot be written out.  Do this by calling
+ * xfs_ipin() to bump the pin count in the inode while holding the
+ * inode pin lock.
+ */
+STATIC void
+xfs_inode_item_pin(
+	xfs_inode_log_item_t	*iip)
+{
+	ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE));
+	xfs_ipin(iip->ili_inode);
+}
+
+
+/*
+ * This is called to unpin the inode associated with the inode log
+ * item which was previously pinned with a call to xfs_inode_item_pin().
+ * Just call xfs_iunpin() on the inode to do this.
+ */
+/* ARGSUSED */
+STATIC void
+xfs_inode_item_unpin(
+	xfs_inode_log_item_t	*iip,
+	int			stale)
+{
+	xfs_iunpin(iip->ili_inode);
+}
+
+/* ARGSUSED */
+STATIC void
+xfs_inode_item_unpin_remove(
+	xfs_inode_log_item_t	*iip,
+	xfs_trans_t		*tp)
+{
+	xfs_iunpin(iip->ili_inode);
+}
+
+/*
+ * This is called to attempt to lock the inode associated with this
+ * inode log item, in preparation for the push routine which does the actual
+ * iflush.  Don't sleep on the inode lock or the flush lock.
+ *
+ * If the flush lock is already held, indicating that the inode has
+ * been or is in the process of being flushed, then (ideally) we'd like to
+ * see if the inode's buffer is still incore, and if so give it a nudge.
+ * We delay doing so until the pushbuf routine, though, to avoid holding
+ * the AIL lock across a call to the blackhole which is the buffercache.
+ * Also we don't want to sleep in any device strategy routines, which can happen
+ * if we do the subsequent bawrite in here.
+ */
+STATIC uint
+xfs_inode_item_trylock(
+	xfs_inode_log_item_t	*iip)
+{
+	register xfs_inode_t	*ip;
+
+	ip = iip->ili_inode;
+
+	if (xfs_ipincount(ip) > 0) {
+		return XFS_ITEM_PINNED;
+	}
+
+	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+		return XFS_ITEM_LOCKED;
+	}
+
+	if (!xfs_iflock_nowait(ip)) {
+		/*
+		 * If someone else isn't already trying to push the inode
+		 * buffer, we get to do it.
+		 */
+		if (iip->ili_pushbuf_flag == 0) {
+			iip->ili_pushbuf_flag = 1;
+#ifdef DEBUG
+			iip->ili_push_owner = get_thread_id();
+#endif
+			/*
+			 * Inode is left locked in shared mode.
+			 * Pushbuf routine gets to unlock it.
+			 */
+			return XFS_ITEM_PUSHBUF;
+		} else {
+			/*
+			 * We hold the AIL_LOCK, so we must specify the
+			 * NONOTIFY flag so that we won't double trip.
+			 */
+			xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
+			return XFS_ITEM_FLUSHING;
+		}
+		/* NOTREACHED */
+	}
+
+	/* Stale items should force out the iclog */
+	if (ip->i_flags & XFS_ISTALE) {
+		xfs_ifunlock(ip);
+		xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
+		return XFS_ITEM_PINNED;
+	}
+
+#ifdef DEBUG
+	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+		ASSERT(iip->ili_format.ilf_fields != 0);
+		ASSERT(iip->ili_logged == 0);
+		ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL);
+	}
+#endif
+	return XFS_ITEM_SUCCESS;
+}
+
+/*
+ * Unlock the inode associated with the inode log item.
+ * Clear the fields of the inode and inode log item that
+ * are specific to the current transaction.  If the
+ * hold flags is set, do not unlock the inode.
+ */
+STATIC void
+xfs_inode_item_unlock(
+	xfs_inode_log_item_t	*iip)
+{
+	uint		hold;
+	uint		iolocked;
+	uint		lock_flags;
+	xfs_inode_t	*ip;
+
+	ASSERT(iip != NULL);
+	ASSERT(iip->ili_inode->i_itemp != NULL);
+	ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE));
+	ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
+		  XFS_ILI_IOLOCKED_EXCL)) ||
+	       ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE));
+	ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
+		  XFS_ILI_IOLOCKED_SHARED)) ||
+	       ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS));
+	/*
+	 * Clear the transaction pointer in the inode.
+	 */
+	ip = iip->ili_inode;
+	ip->i_transp = NULL;
+
+	/*
+	 * If the inode needed a separate buffer with which to log
+	 * its extents, then free it now.
+	 */
+	if (iip->ili_extents_buf != NULL) {
+		ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
+		ASSERT(ip->i_d.di_nextents > 0);
+		ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT);
+		ASSERT(ip->i_df.if_bytes > 0);
+		kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes);
+		iip->ili_extents_buf = NULL;
+	}
+	if (iip->ili_aextents_buf != NULL) {
+		ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
+		ASSERT(ip->i_d.di_anextents > 0);
+		ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
+		ASSERT(ip->i_afp->if_bytes > 0);
+		kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes);
+		iip->ili_aextents_buf = NULL;
+	}
+
+	/*
+	 * Figure out if we should unlock the inode or not.
+	 */
+	hold = iip->ili_flags & XFS_ILI_HOLD;
+
+	/*
+	 * Before clearing out the flags, remember whether we
+	 * are holding the inode's IO lock.
+	 */
+	iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY;
+
+	/*
+	 * Clear out the fields of the inode log item particular
+	 * to the current transaction.
+	 */
+	iip->ili_ilock_recur = 0;
+	iip->ili_iolock_recur = 0;
+	iip->ili_flags = 0;
+
+	/*
+	 * Unlock the inode if XFS_ILI_HOLD was not set.
+	 */
+	if (!hold) {
+		lock_flags = XFS_ILOCK_EXCL;
+		if (iolocked & XFS_ILI_IOLOCKED_EXCL) {
+			lock_flags |= XFS_IOLOCK_EXCL;
+		} else if (iolocked & XFS_ILI_IOLOCKED_SHARED) {
+			lock_flags |= XFS_IOLOCK_SHARED;
+		}
+		xfs_iput(iip->ili_inode, lock_flags);
+	}
+}
+
+/*
+ * This is called to find out where the oldest active copy of the
+ * inode log item in the on disk log resides now that the last log
+ * write of it completed at the given lsn.  Since we always re-log
+ * all dirty data in an inode, the latest copy in the on disk log
+ * is the only one that matters.  Therefore, simply return the
+ * given lsn.
+ */
+/*ARGSUSED*/
+STATIC xfs_lsn_t
+xfs_inode_item_committed(
+	xfs_inode_log_item_t	*iip,
+	xfs_lsn_t		lsn)
+{
+	return (lsn);
+}
+
+/*
+ * The transaction with the inode locked has aborted.  The inode
+ * must not be dirty within the transaction (unless we're forcibly
+ * shutting down).  We simply unlock just as if the transaction
+ * had been cancelled.
+ */
+STATIC void
+xfs_inode_item_abort(
+	xfs_inode_log_item_t	*iip)
+{
+	xfs_inode_item_unlock(iip);
+	return;
+}
+
+
+/*
+ * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
+ * failed to get the inode flush lock but did get the inode locked SHARED.
+ * Here we're trying to see if the inode buffer is incore, and if so whether it's
+ * marked delayed write. If that's the case, we'll initiate a bawrite on that
+ * buffer to expedite the process.
+ *
+ * We aren't holding the AIL_LOCK (or the flush lock) when this gets called,
+ * so it is inherently race-y.
+ */
+STATIC void
+xfs_inode_item_pushbuf(
+	xfs_inode_log_item_t	*iip)
+{
+	xfs_inode_t	*ip;
+	xfs_mount_t	*mp;
+	xfs_buf_t	*bp;
+	uint		dopush;
+
+	ip = iip->ili_inode;
+
+	ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
+
+	/*
+	 * The ili_pushbuf_flag keeps others from
+	 * trying to duplicate our effort.
+	 */
+	ASSERT(iip->ili_pushbuf_flag != 0);
+	ASSERT(iip->ili_push_owner == get_thread_id());
+
+	/*
+	 * If flushlock isn't locked anymore, chances are that the
+	 * inode flush completed and the inode was taken off the AIL.
+	 * So, just get out.
+	 */
+	if ((valusema(&(ip->i_flock)) > 0)  ||
+	    ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
+		iip->ili_pushbuf_flag = 0;
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+		return;
+	}
+
+	mp = ip->i_mount;
+	bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
+		    iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);
+
+	if (bp != NULL) {
+		if (XFS_BUF_ISDELAYWRITE(bp)) {
+			/*
+			 * We were racing with iflush because we don't hold
+			 * the AIL_LOCK or the flush lock. However, at this point,
+			 * we have the buffer, and we know that it's dirty.
+			 * So, it's possible that iflush raced with us, and
+			 * this item is already taken off the AIL.
+			 * If not, we can flush it async.
+			 */
+			dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
+				  (valusema(&(ip->i_flock)) <= 0));
+			iip->ili_pushbuf_flag = 0;
+			xfs_iunlock(ip, XFS_ILOCK_SHARED);
+			xfs_buftrace("INODE ITEM PUSH", bp);
+			if (XFS_BUF_ISPINNED(bp)) {
+				xfs_log_force(mp, (xfs_lsn_t)0,
+					      XFS_LOG_FORCE);
+			}
+			if (dopush) {
+				xfs_bawrite(mp, bp);
+			} else {
+				xfs_buf_relse(bp);
+			}
+		} else {
+			iip->ili_pushbuf_flag = 0;
+			xfs_iunlock(ip, XFS_ILOCK_SHARED);
+			xfs_buf_relse(bp);
+		}
+		return;
+	}
+	/*
+	 * We have to be careful about resetting pushbuf flag too early (above).
+	 * Even though in theory we can do it as soon as we have the buflock,
+	 * we don't want others to be doing work needlessly. They'll come to
+	 * this function thinking that pushing the buffer is their
+	 * responsibility only to find that the buffer is still locked by
+	 * another doing the same thing
+	 */
+	iip->ili_pushbuf_flag = 0;
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	return;
+}
+
+
+/*
+ * This is called to asynchronously write the inode associated with this
+ * inode log item out to disk. The inode will already have been locked by
+ * a successful call to xfs_inode_item_trylock().
+ */
+STATIC void
+xfs_inode_item_push(
+	xfs_inode_log_item_t	*iip)
+{
+	xfs_inode_t	*ip;
+
+	ip = iip->ili_inode;
+
+	ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
+	ASSERT(valusema(&(ip->i_flock)) <= 0);
+	/*
+	 * Since we were able to lock the inode's flush lock and
+	 * we found it on the AIL, the inode must be dirty.  This
+	 * is because the inode is removed from the AIL while still
+	 * holding the flush lock in xfs_iflush_done().  Thus, if
+	 * we found it in the AIL and were able to obtain the flush
+	 * lock without sleeping, then there must not have been
+	 * anyone in the process of flushing the inode.
+	 */
+	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
+	       iip->ili_format.ilf_fields != 0);
+
+	/*
+	 * Write out the inode.  The completion routine ('iflush_done') will
+	 * pull it from the AIL, mark it clean, unlock the flush lock.
+	 */
+	(void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	return;
+}
+
+/*
+ * XXX rcc - this one really has to do something.  Probably needs
+ * to stamp in a new field in the incore inode.
+ */
+/* ARGSUSED */
+STATIC void
+xfs_inode_item_committing(
+	xfs_inode_log_item_t	*iip,
+	xfs_lsn_t		lsn)
+{
+	iip->ili_last_lsn = lsn;
+	return;
+}
+
+/*
+ * This is the ops vector shared by all buf log items.
+ */
+struct xfs_item_ops xfs_inode_item_ops = {
+	.iop_size	= (uint(*)(xfs_log_item_t*))xfs_inode_item_size,
+	.iop_format	= (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
+					xfs_inode_item_format,
+	.iop_pin	= (void(*)(xfs_log_item_t*))xfs_inode_item_pin,
+	.iop_unpin	= (void(*)(xfs_log_item_t*, int))xfs_inode_item_unpin,
+	.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
+					xfs_inode_item_unpin_remove,
+	.iop_trylock	= (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock,
+	.iop_unlock	= (void(*)(xfs_log_item_t*))xfs_inode_item_unlock,
+	.iop_committed	= (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_inode_item_committed,
+	.iop_push	= (void(*)(xfs_log_item_t*))xfs_inode_item_push,
+	.iop_abort	= (void(*)(xfs_log_item_t*))xfs_inode_item_abort,
+	.iop_pushbuf	= (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf,
+	.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
+					xfs_inode_item_committing
+};
+
+
+/*
+ * Initialize the inode log item for a newly allocated (in-core) inode.
+ */
+void
+xfs_inode_item_init(
+	xfs_inode_t	*ip,
+	xfs_mount_t	*mp)
+{
+	xfs_inode_log_item_t	*iip;
+
+	ASSERT(ip->i_itemp == NULL);
+	iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
+
+	iip->ili_item.li_type = XFS_LI_INODE;
+	iip->ili_item.li_ops = &xfs_inode_item_ops;
+	iip->ili_item.li_mountp = mp;
+	iip->ili_inode = ip;
+
+	/*
+	   We have zeroed memory. No need ...
+	   iip->ili_extents_buf = NULL;
+	   iip->ili_pushbuf_flag = 0;
+	 */
+
+	iip->ili_format.ilf_type = XFS_LI_INODE;
+	iip->ili_format.ilf_ino = ip->i_ino;
+	iip->ili_format.ilf_blkno = ip->i_blkno;
+	iip->ili_format.ilf_len = ip->i_len;
+	iip->ili_format.ilf_boffset = ip->i_boffset;
+}
+
+/*
+ * Free the inode log item and any memory hanging off of it.
+ */
+void
+xfs_inode_item_destroy(
+	xfs_inode_t	*ip)
+{
+#ifdef XFS_TRANS_DEBUG
+	if (ip->i_itemp->ili_root_size != 0) {
+		kmem_free(ip->i_itemp->ili_orig_root,
+			  ip->i_itemp->ili_root_size);
+	}
+#endif
+	kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+}
+
+
+/*
+ * This is the inode flushing I/O completion routine.  It is called
+ * from interrupt level when the buffer containing the inode is
+ * flushed to disk.  It is responsible for removing the inode item
+ * from the AIL if it has not been re-logged, and unlocking the inode's
+ * flush lock.
+ */
+/*ARGSUSED*/
+void
+xfs_iflush_done(
+	xfs_buf_t		*bp,
+	xfs_inode_log_item_t	*iip)
+{
+	xfs_inode_t	*ip;
+	SPLDECL(s);
+
+	ip = iip->ili_inode;
+
+	/*
+	 * We only want to pull the item from the AIL if it is
+	 * actually there and its location in the log has not
+	 * changed since we started the flush.  Thus, we only bother
+	 * if the ili_logged flag is set and the inode's lsn has not
+	 * changed.  First we check the lsn outside
+	 * the lock since it's cheaper, and then we recheck while
+	 * holding the lock before removing the inode from the AIL.
+	 */
+	if (iip->ili_logged &&
+	    (iip->ili_item.li_lsn == iip->ili_flush_lsn)) {
+		AIL_LOCK(ip->i_mount, s);
+		if (iip->ili_item.li_lsn == iip->ili_flush_lsn) {
+			/*
+			 * xfs_trans_delete_ail() drops the AIL lock.
+			 */
+			xfs_trans_delete_ail(ip->i_mount,
+					     (xfs_log_item_t*)iip, s);
+		} else {
+			AIL_UNLOCK(ip->i_mount, s);
+		}
+	}
+
+	iip->ili_logged = 0;
+
+	/*
+	 * Clear the ili_last_fields bits now that we know that the
+	 * data corresponding to them is safely on disk.
+	 */
+	iip->ili_last_fields = 0;
+
+	/*
+	 * Release the inode's flush lock since we're done with it.
+	 */
+	xfs_ifunlock(ip);
+
+	return;
+}
+
+/*
+ * This is the inode flushing abort routine.  It is called
+ * from xfs_iflush when the filesystem is shutting down to clean
+ * up the inode state.
+ * It is responsible for removing the inode item
+ * from the AIL if it has not been re-logged, and unlocking the inode's
+ * flush lock.
+ */
+void
+xfs_iflush_abort(
+	xfs_inode_t		*ip)
+{
+	xfs_inode_log_item_t	*iip;
+	xfs_mount_t		*mp;
+	SPLDECL(s);
+
+	iip = ip->i_itemp;
+	mp = ip->i_mount;
+	if (iip) {
+		if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
+			AIL_LOCK(mp, s);
+			if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
+				/*
+				 * xfs_trans_delete_ail() drops the AIL lock.
+				 */
+				xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip,
+					s);
+			} else
+				AIL_UNLOCK(mp, s);
+		}
+		iip->ili_logged = 0;
+		/*
+		 * Clear the ili_last_fields bits now that we know that the
+		 * data corresponding to them is safely on disk.
+		 */
+		iip->ili_last_fields = 0;
+		/*
+		 * Clear the inode logging fields so no more flushes are
+		 * attempted.
+		 */
+		iip->ili_format.ilf_fields = 0;
+	}
+	/*
+	 * Release the inode's flush lock since we're done with it.
+	 */
+	xfs_ifunlock(ip);
+}
+
+void
+xfs_istale_done(
+	xfs_buf_t		*bp,
+	xfs_inode_log_item_t	*iip)
+{
+	xfs_iflush_abort(iip->ili_inode);
+}
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
new file mode 100644
index 0000000..d8775e0
--- /dev/null
+++ b/fs/xfs/xfs_inode_item.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_INODE_ITEM_H__
+#define	__XFS_INODE_ITEM_H__
+
+/*
+ * This is the structure used to lay out an inode log item in the
+ * log.  The size of the inline data/extents/b-tree root to be logged
+ * (if any) is indicated in the ilf_dsize field.  Changes to this structure
+ * must be added on to the end.
+ *
+ * Convention for naming inode log item versions :  The current version
+ * is always named XFS_LI_INODE.  When an inode log item gets superseded,
+ * add the latest version of IRIX that will generate logs with that item
+ * to the version name.
+ *
+ * -Version 1 of this structure (XFS_LI_5_3_INODE) included up to the first
+ *	union (ilf_u) field.  This was released with IRIX 5.3-XFS.
+ * -Version 2 of this structure (XFS_LI_6_1_INODE) is currently the entire
+ *	structure.  This was released with IRIX 6.0.1-XFS and IRIX 6.1.
+ * -Version 3 of this structure (XFS_LI_INODE) is the same as version 2
+ *	so a new structure definition wasn't necessary.  However, we had
+ *	to add a new type because the inode cluster size changed from 4K
+ *	to 8K and the version number had to be rev'ved to keep older kernels
+ *	from trying to recover logs with the 8K buffers in them.  The logging
+ *	code can handle recovery on different-sized clusters now so hopefully
+ *	this'll be the last time we need to change the inode log item just
+ *	for a change in the inode cluster size.  This new version was
+ *	released with IRIX 6.2.
+ */
+typedef struct xfs_inode_log_format {
+	unsigned short		ilf_type;	/* inode log item type */
+	unsigned short		ilf_size;	/* size of this item */
+	uint			ilf_fields;	/* flags for fields logged */
+	ushort			ilf_asize;	/* size of attr d/ext/root */
+	ushort			ilf_dsize;	/* size of data/ext/root */
+	xfs_ino_t		ilf_ino;	/* inode number */
+	union {
+		xfs_dev_t	ilfu_rdev;	/* rdev value for dev inode*/
+		uuid_t		ilfu_uuid;	/* mount point value */
+	} ilf_u;
+	__int64_t		ilf_blkno;	/* blkno of inode buffer */
+	int			ilf_len;	/* len of inode buffer */
+	int			ilf_boffset;	/* off of inode in buffer */
+} xfs_inode_log_format_t;
+
+/* Initial version shipped with IRIX 5.3-XFS */
+typedef struct xfs_inode_log_format_v1 {
+	unsigned short		ilf_type;	/* inode log item type */
+	unsigned short		ilf_size;	/* size of this item */
+	uint			ilf_fields;	/* flags for fields logged */
+	uint			ilf_dsize;	/* size of data/ext/root */
+	xfs_ino_t		ilf_ino;	/* inode number */
+	union {
+		xfs_dev_t	ilfu_rdev;	/* rdev value for dev inode*/
+		uuid_t		ilfu_uuid;	/* mount point value */
+	} ilf_u;
+} xfs_inode_log_format_t_v1;
+
+/*
+ * Flags for xfs_trans_log_inode flags field.
+ */
+#define	XFS_ILOG_CORE	0x001	/* log standard inode fields */
+#define	XFS_ILOG_DDATA	0x002	/* log i_df.if_data */
+#define	XFS_ILOG_DEXT	0x004	/* log i_df.if_extents */
+#define	XFS_ILOG_DBROOT	0x008	/* log i_df.i_broot */
+#define	XFS_ILOG_DEV	0x010	/* log the dev field */
+#define	XFS_ILOG_UUID	0x020	/* log the uuid field */
+#define	XFS_ILOG_ADATA	0x040	/* log i_af.if_data */
+#define	XFS_ILOG_AEXT	0x080	/* log i_af.if_extents */
+#define	XFS_ILOG_ABROOT	0x100	/* log i_af.i_broot */
+
+#define	XFS_ILOG_NONCORE	(XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
+				 XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
+				 XFS_ILOG_UUID | XFS_ILOG_ADATA | \
+				 XFS_ILOG_AEXT | XFS_ILOG_ABROOT)
+
+#define	XFS_ILOG_DFORK		(XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
+				 XFS_ILOG_DBROOT)
+
+#define	XFS_ILOG_AFORK		(XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+				 XFS_ILOG_ABROOT)
+
+#define	XFS_ILOG_ALL		(XFS_ILOG_CORE | XFS_ILOG_DDATA | \
+				 XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \
+				 XFS_ILOG_DEV | XFS_ILOG_UUID | \
+				 XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+				 XFS_ILOG_ABROOT)
+
+#define	XFS_ILI_HOLD		0x1
+#define	XFS_ILI_IOLOCKED_EXCL	0x2
+#define	XFS_ILI_IOLOCKED_SHARED	0x4
+
+#define	XFS_ILI_IOLOCKED_ANY   (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED)
+
+
+#ifdef __KERNEL__
+
+struct xfs_buf;
+struct xfs_bmbt_rec_64;
+struct xfs_inode;
+struct xfs_mount;
+
+
+typedef struct xfs_inode_log_item {
+	xfs_log_item_t		ili_item;	   /* common portion */
+	struct xfs_inode	*ili_inode;	   /* inode ptr */
+	xfs_lsn_t		ili_flush_lsn;	   /* lsn at last flush */
+	xfs_lsn_t		ili_last_lsn;	   /* lsn at last transaction */
+	unsigned short		ili_ilock_recur;   /* lock recursion count */
+	unsigned short		ili_iolock_recur;  /* lock recursion count */
+	unsigned short		ili_flags;	   /* misc flags */
+	unsigned short		ili_logged;	   /* flushed logged data */
+	unsigned int		ili_last_fields;   /* fields when flushed */
+	struct xfs_bmbt_rec_64	*ili_extents_buf;  /* array of logged
+						      data exts */
+	struct xfs_bmbt_rec_64	*ili_aextents_buf; /* array of logged
+						      attr exts */
+	unsigned int            ili_pushbuf_flag;  /* one bit used in push_ail */
+
+#ifdef DEBUG
+	uint64_t                ili_push_owner;    /* one who sets pushbuf_flag
+						      above gets to push the buf */
+#endif
+#ifdef XFS_TRANS_DEBUG
+	int			ili_root_size;
+	char			*ili_orig_root;
+#endif
+	xfs_inode_log_format_t	ili_format;	   /* logged structure */
+} xfs_inode_log_item_t;
+
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FDATA)
+int xfs_ilog_fdata(int w);
+#define	XFS_ILOG_FDATA(w)	xfs_ilog_fdata(w)
+#else
+#define	XFS_ILOG_FDATA(w)	\
+	((w) == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA)
+#endif
+
+#endif	/* __KERNEL__ */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FBROOT)
+int xfs_ilog_fbroot(int w);
+#define	XFS_ILOG_FBROOT(w)	xfs_ilog_fbroot(w)
+#else
+#define	XFS_ILOG_FBROOT(w)	\
+	((w) == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FEXT)
+int xfs_ilog_fext(int w);
+#define	XFS_ILOG_FEXT(w)	xfs_ilog_fext(w)
+#else
+#define	XFS_ILOG_FEXT(w)	\
+	((w) == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT)
+#endif
+
+#ifdef __KERNEL__
+
+void	xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
+void	xfs_inode_item_destroy(struct xfs_inode *);
+void	xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *);
+void	xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *);
+void	xfs_iflush_abort(struct xfs_inode *);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_INODE_ITEM_H__ */
diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/xfs_inum.h
new file mode 100644
index 0000000..a3af2d5
--- /dev/null
+++ b/fs/xfs/xfs_inum.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_INUM_H__
+#define	__XFS_INUM_H__
+
+/*
+ * Inode number format:
+ * low inopblog bits - offset in block
+ * next agblklog bits - block number in ag
+ * next agno_log bits - ag number
+ * high agno_log-agblklog-inopblog bits - 0
+ */
+
+typedef	__uint32_t	xfs_agino_t;	/* within allocation grp inode number */
+
+/*
+ * Useful inode bits for this kernel.
+ * Used in some places where having 64-bits in the 32-bit kernels
+ * costs too much.
+ */
+#if XFS_BIG_INUMS
+typedef	xfs_ino_t	xfs_intino_t;
+#else
+typedef	__uint32_t	xfs_intino_t;
+#endif
+
+#define	NULLFSINO	((xfs_ino_t)-1)
+#define	NULLAGINO	((xfs_agino_t)-1)
+
+struct xfs_mount;
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_MASK)
+__uint32_t xfs_ino_mask(int k);
+#define	XFS_INO_MASK(k)			xfs_ino_mask(k)
+#else
+#define	XFS_INO_MASK(k)	((__uint32_t)((1ULL << (k)) - 1))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_OFFSET_BITS)
+int xfs_ino_offset_bits(struct xfs_mount *mp);
+#define	XFS_INO_OFFSET_BITS(mp)		xfs_ino_offset_bits(mp)
+#else
+#define	XFS_INO_OFFSET_BITS(mp)	((mp)->m_sb.sb_inopblog)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGBNO_BITS)
+int xfs_ino_agbno_bits(struct xfs_mount *mp);
+#define	XFS_INO_AGBNO_BITS(mp)		xfs_ino_agbno_bits(mp)
+#else
+#define	XFS_INO_AGBNO_BITS(mp)	((mp)->m_sb.sb_agblklog)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGINO_BITS)
+int xfs_ino_agino_bits(struct xfs_mount *mp);
+#define	XFS_INO_AGINO_BITS(mp)		xfs_ino_agino_bits(mp)
+#else
+#define	XFS_INO_AGINO_BITS(mp)		((mp)->m_agino_log)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGNO_BITS)
+int xfs_ino_agno_bits(struct xfs_mount *mp);
+#define	XFS_INO_AGNO_BITS(mp)		xfs_ino_agno_bits(mp)
+#else
+#define	XFS_INO_AGNO_BITS(mp)	((mp)->m_agno_log)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_BITS)
+int xfs_ino_bits(struct xfs_mount *mp);
+#define	XFS_INO_BITS(mp)		xfs_ino_bits(mp)
+#else
+#define	XFS_INO_BITS(mp)	(XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGNO)
+xfs_agnumber_t xfs_ino_to_agno(struct xfs_mount *mp, xfs_ino_t i);
+#define	XFS_INO_TO_AGNO(mp,i)		xfs_ino_to_agno(mp,i)
+#else
+#define	XFS_INO_TO_AGNO(mp,i)	\
+	((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGINO)
+xfs_agino_t xfs_ino_to_agino(struct xfs_mount *mp, xfs_ino_t i);
+#define	XFS_INO_TO_AGINO(mp,i)		xfs_ino_to_agino(mp,i)
+#else
+#define	XFS_INO_TO_AGINO(mp,i)	\
+	((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGBNO)
+xfs_agblock_t xfs_ino_to_agbno(struct xfs_mount *mp, xfs_ino_t i);
+#define	XFS_INO_TO_AGBNO(mp,i)		xfs_ino_to_agbno(mp,i)
+#else
+#define	XFS_INO_TO_AGBNO(mp,i)	\
+	(((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \
+	 XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_OFFSET)
+int xfs_ino_to_offset(struct xfs_mount *mp, xfs_ino_t i);
+#define	XFS_INO_TO_OFFSET(mp,i)		xfs_ino_to_offset(mp,i)
+#else
+#define	XFS_INO_TO_OFFSET(mp,i)	\
+	((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_FSB)
+xfs_fsblock_t xfs_ino_to_fsb(struct xfs_mount *mp, xfs_ino_t i);
+#define	XFS_INO_TO_FSB(mp,i)		xfs_ino_to_fsb(mp,i)
+#else
+#define	XFS_INO_TO_FSB(mp,i)	\
+	XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_INO)
+xfs_ino_t
+xfs_agino_to_ino(struct xfs_mount *mp, xfs_agnumber_t a, xfs_agino_t i);
+#define	XFS_AGINO_TO_INO(mp,a,i)	xfs_agino_to_ino(mp,a,i)
+#else
+#define	XFS_AGINO_TO_INO(mp,a,i)	\
+	(((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_AGBNO)
+xfs_agblock_t xfs_agino_to_agbno(struct xfs_mount *mp, xfs_agino_t i);
+#define	XFS_AGINO_TO_AGBNO(mp,i)	xfs_agino_to_agbno(mp,i)
+#else
+#define	XFS_AGINO_TO_AGBNO(mp,i)	((i) >> XFS_INO_OFFSET_BITS(mp))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_OFFSET)
+int xfs_agino_to_offset(struct xfs_mount *mp, xfs_agino_t i);
+#define	XFS_AGINO_TO_OFFSET(mp,i)	xfs_agino_to_offset(mp,i)
+#else
+#define	XFS_AGINO_TO_OFFSET(mp,i)	\
+	((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_OFFBNO_TO_AGINO)
+xfs_agino_t xfs_offbno_to_agino(struct xfs_mount *mp, xfs_agblock_t b, int o);
+#define	XFS_OFFBNO_TO_AGINO(mp,b,o)	xfs_offbno_to_agino(mp,b,o)
+#else
+#define	XFS_OFFBNO_TO_AGINO(mp,b,o)	\
+	((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
+#endif
+
+#if XFS_BIG_INUMS
+#define	XFS_MAXINUMBER		((xfs_ino_t)((1ULL << 56) - 1ULL))
+#define	XFS_INO64_OFFSET	((xfs_ino_t)(1ULL << 32))
+#else
+#define	XFS_MAXINUMBER		((xfs_ino_t)((1ULL << 32) - 1ULL))
+#endif
+#define	XFS_MAXINUMBER_32	((xfs_ino_t)((1ULL << 32) - 1ULL))
+
+#endif	/* __XFS_INUM_H__ */
diff --git a/fs/xfs/xfs_iocore.c b/fs/xfs/xfs_iocore.c
new file mode 100644
index 0000000..414ec49
--- /dev/null
+++ b/fs/xfs/xfs_iocore.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_itable.h"
+#include "xfs_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+#include "xfs_rw.h"
+#include "xfs_quota.h"
+#include "xfs_trans_space.h"
+#include "xfs_iomap.h"
+
+
+STATIC xfs_fsize_t
+xfs_size_fn(
+	xfs_inode_t		*ip)
+{
+	return (ip->i_d.di_size);
+}
+
+STATIC int
+xfs_ioinit(
+	struct vfs		*vfsp,
+	struct xfs_mount_args	*mntargs,
+	int			flags)
+{
+	return xfs_mountfs(vfsp, XFS_VFSTOM(vfsp), flags);
+}
+
+xfs_ioops_t	xfs_iocore_xfs = {
+	.xfs_ioinit		= (xfs_ioinit_t) xfs_ioinit,
+	.xfs_bmapi_func		= (xfs_bmapi_t) xfs_bmapi,
+	.xfs_bmap_eof_func	= (xfs_bmap_eof_t) xfs_bmap_eof,
+	.xfs_iomap_write_direct =
+			(xfs_iomap_write_direct_t) xfs_iomap_write_direct,
+	.xfs_iomap_write_delay =
+			(xfs_iomap_write_delay_t) xfs_iomap_write_delay,
+	.xfs_iomap_write_allocate =
+			(xfs_iomap_write_allocate_t) xfs_iomap_write_allocate,
+	.xfs_iomap_write_unwritten =
+			(xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten,
+	.xfs_ilock		= (xfs_lock_t) xfs_ilock,
+	.xfs_lck_map_shared	= (xfs_lck_map_shared_t) xfs_ilock_map_shared,
+	.xfs_ilock_demote	= (xfs_lock_demote_t) xfs_ilock_demote,
+	.xfs_ilock_nowait	= (xfs_lock_nowait_t) xfs_ilock_nowait,
+	.xfs_unlock		= (xfs_unlk_t) xfs_iunlock,
+	.xfs_size_func		= (xfs_size_t) xfs_size_fn,
+	.xfs_iodone		= (xfs_iodone_t) fs_noerr,
+};
+
+void
+xfs_iocore_inode_reinit(
+	xfs_inode_t	*ip)
+{
+	xfs_iocore_t	*io = &ip->i_iocore;
+
+	io->io_flags = 0;
+	if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
+		io->io_flags |= XFS_IOCORE_RT;
+	io->io_dmevmask = ip->i_d.di_dmevmask;
+	io->io_dmstate = ip->i_d.di_dmstate;
+}
+
+void
+xfs_iocore_inode_init(
+	xfs_inode_t	*ip)
+{
+	xfs_iocore_t	*io = &ip->i_iocore;
+	xfs_mount_t	*mp = ip->i_mount;
+
+	io->io_mount = mp;
+#ifdef DEBUG
+	io->io_lock = &ip->i_lock;
+	io->io_iolock = &ip->i_iolock;
+#endif
+
+	io->io_obj = (void *)ip;
+
+	xfs_iocore_inode_reinit(ip);
+}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
new file mode 100644
index 0000000..3826e8f
--- /dev/null
+++ b/fs/xfs/xfs_iomap.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_space.h"
+#include "xfs_utils.h"
+#include "xfs_iomap.h"
+
+#if defined(XFS_RW_TRACE)
+void
+xfs_iomap_enter_trace(
+	int		tag,
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	ssize_t		count)
+{
+	xfs_inode_t	*ip = XFS_IO_INODE(io);
+
+	if (!ip->i_rwtrace)
+		return;
+
+	ktrace_enter(ip->i_rwtrace,
+		(void *)((unsigned long)tag),
+		(void *)ip,
+		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(offset & 0xffffffff)),
+		(void *)((unsigned long)count),
+		(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL,
+		(void *)NULL);
+}
+
+void
+xfs_iomap_map_trace(
+	int		tag,
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	ssize_t		count,
+	xfs_iomap_t	*iomapp,
+	xfs_bmbt_irec_t	*imapp,
+	int		flags)
+{
+	xfs_inode_t	*ip = XFS_IO_INODE(io);
+
+	if (!ip->i_rwtrace)
+		return;
+
+	ktrace_enter(ip->i_rwtrace,
+		(void *)((unsigned long)tag),
+		(void *)ip,
+		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(offset & 0xffffffff)),
+		(void *)((unsigned long)count),
+		(void *)((unsigned long)flags),
+		(void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
+		(void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
+		(void *)((unsigned long)(iomapp->iomap_delta)),
+		(void *)((unsigned long)(iomapp->iomap_bsize)),
+		(void *)((unsigned long)(iomapp->iomap_bn)),
+		(void *)(__psint_t)(imapp->br_startoff),
+		(void *)((unsigned long)(imapp->br_blockcount)),
+		(void *)(__psint_t)(imapp->br_startblock));
+}
+#else
+#define xfs_iomap_enter_trace(tag, io, offset, count)
+#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
+#endif
+
+#define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
+						<< mp->m_writeio_log)
+#define XFS_STRAT_WRITE_IMAPS	2
+#define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
+
+STATIC int
+xfs_imap_to_bmap(
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	xfs_bmbt_irec_t *imap,
+	xfs_iomap_t	*iomapp,
+	int		imaps,			/* Number of imap entries */
+	int		iomaps,			/* Number of iomap entries */
+	int		flags)
+{
+	xfs_mount_t	*mp;
+	xfs_fsize_t	nisize;
+	int		pbm;
+	xfs_fsblock_t	start_block;
+
+	mp = io->io_mount;
+	nisize = XFS_SIZE(mp, io);
+	if (io->io_new_size > nisize)
+		nisize = io->io_new_size;
+
+	for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) {
+		iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+		iomapp->iomap_delta = offset - iomapp->iomap_offset;
+		iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
+		iomapp->iomap_flags = flags;
+
+		if (io->io_flags & XFS_IOCORE_RT) {
+			iomapp->iomap_flags |= IOMAP_REALTIME;
+			iomapp->iomap_target = mp->m_rtdev_targp;
+		} else {
+			iomapp->iomap_target = mp->m_ddev_targp;
+		}
+		start_block = imap->br_startblock;
+		if (start_block == HOLESTARTBLOCK) {
+			iomapp->iomap_bn = IOMAP_DADDR_NULL;
+			iomapp->iomap_flags |= IOMAP_HOLE;
+		} else if (start_block == DELAYSTARTBLOCK) {
+			iomapp->iomap_bn = IOMAP_DADDR_NULL;
+			iomapp->iomap_flags |= IOMAP_DELAY;
+		} else {
+			iomapp->iomap_bn = XFS_FSB_TO_DB_IO(io, start_block);
+			if (ISUNWRITTEN(imap))
+				iomapp->iomap_flags |= IOMAP_UNWRITTEN;
+		}
+
+		if ((iomapp->iomap_offset + iomapp->iomap_bsize) >= nisize) {
+			iomapp->iomap_flags |= IOMAP_EOF;
+		}
+
+		offset += iomapp->iomap_bsize - iomapp->iomap_delta;
+	}
+	return pbm;	/* Return the number filled */
+}
+
+int
+xfs_iomap(
+	xfs_iocore_t	*io,
+	xfs_off_t	offset,
+	ssize_t		count,
+	int		flags,
+	xfs_iomap_t	*iomapp,
+	int		*niomaps)
+{
+	xfs_mount_t	*mp = io->io_mount;
+	xfs_fileoff_t	offset_fsb, end_fsb;
+	int		error = 0;
+	int		lockmode = 0;
+	xfs_bmbt_irec_t	imap;
+	int		nimaps = 1;
+	int		bmapi_flags = 0;
+	int		iomap_flags = 0;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	switch (flags &
+		(BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE |
+		 BMAPI_UNWRITTEN | BMAPI_DEVICE)) {
+	case BMAPI_READ:
+		xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count);
+		lockmode = XFS_LCK_MAP_SHARED(mp, io);
+		bmapi_flags = XFS_BMAPI_ENTIRE;
+		if (flags & BMAPI_IGNSTATE)
+			bmapi_flags |= XFS_BMAPI_IGSTATE;
+		break;
+	case BMAPI_WRITE:
+		xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count);
+		lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
+		bmapi_flags = 0;
+		XFS_ILOCK(mp, io, lockmode);
+		break;
+	case BMAPI_ALLOCATE:
+		xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, io, offset, count);
+		lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
+		bmapi_flags = XFS_BMAPI_ENTIRE;
+		/* Attempt non-blocking lock */
+		if (flags & BMAPI_TRYLOCK) {
+			if (!XFS_ILOCK_NOWAIT(mp, io, lockmode))
+				return XFS_ERROR(EAGAIN);
+		} else {
+			XFS_ILOCK(mp, io, lockmode);
+		}
+		break;
+	case BMAPI_UNWRITTEN:
+		goto phase2;
+	case BMAPI_DEVICE:
+		lockmode = XFS_LCK_MAP_SHARED(mp, io);
+		iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ?
+			mp->m_rtdev_targp : mp->m_ddev_targp;
+		error = 0;
+		*niomaps = 1;
+		goto out;
+	default:
+		BUG();
+	}
+
+	ASSERT(offset <= mp->m_maxioffset);
+	if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
+		count = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+	error = XFS_BMAPI(mp, NULL, io, offset_fsb,
+			(xfs_filblks_t)(end_fsb - offset_fsb),
+			bmapi_flags,  NULL, 0, &imap,
+			&nimaps, NULL);
+
+	if (error)
+		goto out;
+
+phase2:
+	switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE|BMAPI_UNWRITTEN)) {
+	case BMAPI_WRITE:
+		/* If we found an extent, return it */
+		if (nimaps && (imap.br_startblock != HOLESTARTBLOCK)) {
+			xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
+					offset, count, iomapp, &imap, flags);
+			break;
+		}
+
+		if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
+			error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
+					count, flags, &imap, &nimaps, nimaps);
+		} else {
+			error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
+					flags, &imap, &nimaps);
+		}
+		if (!error) {
+			xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, io,
+					offset, count, iomapp, &imap, flags);
+		}
+		iomap_flags = IOMAP_NEW;
+		break;
+	case BMAPI_ALLOCATE:
+		/* If we found an extent, return it */
+		XFS_IUNLOCK(mp, io, lockmode);
+		lockmode = 0;
+
+		if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) {
+			xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
+					offset, count, iomapp, &imap, flags);
+			break;
+		}
+
+		error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
+		break;
+	case BMAPI_UNWRITTEN:
+		lockmode = 0;
+		error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count);
+		nimaps = 0;
+		break;
+	}
+
+	if (nimaps) {
+		*niomaps = xfs_imap_to_bmap(io, offset, &imap,
+					    iomapp, nimaps, *niomaps, iomap_flags);
+	} else if (niomaps) {
+		*niomaps = 0;
+	}
+
+out:
+	if (lockmode)
+		XFS_IUNLOCK(mp, io, lockmode);
+	return XFS_ERROR(error);
+}
+
+STATIC int
+xfs_flush_space(
+	xfs_inode_t	*ip,
+	int		*fsynced,
+	int		*ioflags)
+{
+	switch (*fsynced) {
+	case 0:
+		if (ip->i_delayed_blks) {
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			xfs_flush_inode(ip);
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			*fsynced = 1;
+		} else {
+			*ioflags |= BMAPI_SYNC;
+			*fsynced = 2;
+		}
+		return 0;
+	case 1:
+		*fsynced = 2;
+		*ioflags |= BMAPI_SYNC;
+		return 0;
+	case 2:
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		xfs_flush_device(ip);
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		*fsynced = 3;
+		return 0;
+	}
+	return 1;
+}
+
+int
+xfs_iomap_write_direct(
+	xfs_inode_t	*ip,
+	loff_t		offset,
+	size_t		count,
+	int		flags,
+	xfs_bmbt_irec_t *ret_imap,
+	int		*nmaps,
+	int		found)
+{
+	xfs_mount_t	*mp = ip->i_mount;
+	xfs_iocore_t	*io = &ip->i_iocore;
+	xfs_fileoff_t	offset_fsb;
+	xfs_fileoff_t	last_fsb;
+	xfs_filblks_t	count_fsb;
+	xfs_fsize_t	isize;
+	xfs_fsblock_t	firstfsb;
+	int		nimaps, maps;
+	int		error;
+	int		bmapi_flag;
+	int		rt;
+	xfs_trans_t	*tp;
+	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
+	xfs_bmap_free_t free_list;
+	int		aeof;
+	xfs_filblks_t	datablocks;
+	int		committed;
+	int		numrtextents;
+	uint		resblks;
+
+	/*
+	 * Make sure that the dquots are there. This doesn't hold
+	 * the ilock across a disk read.
+	 */
+	error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
+	if (error)
+		return XFS_ERROR(error);
+
+	maps = min(XFS_WRITE_IMAPS, *nmaps);
+	nimaps = maps;
+
+	isize = ip->i_d.di_size;
+	aeof = (offset + count) > isize;
+
+	if (io->io_new_size > isize)
+		isize = io->io_new_size;
+
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+	count_fsb = last_fsb - offset_fsb;
+	if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
+		xfs_fileoff_t	map_last_fsb;
+
+		map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
+
+		if (map_last_fsb < last_fsb) {
+			last_fsb = map_last_fsb;
+			count_fsb = last_fsb - offset_fsb;
+		}
+		ASSERT(count_fsb > 0);
+	}
+
+	/*
+	 * determine if reserving space on
+	 * the data or realtime partition.
+	 */
+	if ((rt = XFS_IS_REALTIME_INODE(ip))) {
+		int	sbrtextsize, iprtextsize;
+
+		sbrtextsize = mp->m_sb.sb_rextsize;
+		iprtextsize =
+			ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize;
+		numrtextents = (count_fsb + iprtextsize - 1);
+		do_div(numrtextents, sbrtextsize);
+		datablocks = 0;
+	} else {
+		datablocks = count_fsb;
+		numrtextents = 0;
+	}
+
+	/*
+	 * allocate and setup the transaction
+	 */
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+
+	resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
+
+	error = xfs_trans_reserve(tp, resblks,
+			XFS_WRITE_LOG_RES(mp), numrtextents,
+			XFS_TRANS_PERM_LOG_RES,
+			XFS_WRITE_LOG_COUNT);
+
+	/*
+	 * check for running out of space
+	 */
+	if (error)
+		/*
+		 * Free the transaction structure.
+		 */
+		xfs_trans_cancel(tp, 0);
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	if (error)
+		goto error_out; /* Don't return in above if .. trans ..,
+					need lock to return */
+
+	if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) {
+		error = (EDQUOT);
+		goto error1;
+	}
+	nimaps = 1;
+
+	bmapi_flag = XFS_BMAPI_WRITE;
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+
+	if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt))
+		bmapi_flag |= XFS_BMAPI_PREALLOC;
+
+	/*
+	 * issue the bmapi() call to allocate the blocks
+	 */
+	XFS_BMAP_INIT(&free_list, &firstfsb);
+	imapp = &imap[0];
+	error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
+		bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list);
+	if (error) {
+		goto error0;
+	}
+
+	/*
+	 * complete the transaction
+	 */
+
+	error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+	if (error) {
+		goto error0;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (error) {
+		goto error_out;
+	}
+
+	/* copy any maps to caller's array and return any error. */
+	if (nimaps == 0) {
+		error = (ENOSPC);
+		goto error_out;
+	}
+
+	*ret_imap = imap[0];
+	*nmaps = 1;
+	if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {
+                cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "
+                        "start_block : %llx start_off : %llx blkcnt : %llx "
+                        "extent-state : %x \n",
+                        (ip->i_mount)->m_fsname,
+                        (long long)ip->i_ino,
+                        ret_imap->br_startblock, ret_imap->br_startoff,
+                        ret_imap->br_blockcount,ret_imap->br_state);
+        }
+	return 0;
+
+ error0:	/* Cancel bmap, unlock inode, and cancel trans */
+	xfs_bmap_cancel(&free_list);
+
+ error1:	/* Just cancel transaction */
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+	*nmaps = 0;	/* nothing set-up here */
+
+error_out:
+	return XFS_ERROR(error);
+}
+
+int
+xfs_iomap_write_delay(
+	xfs_inode_t	*ip,
+	loff_t		offset,
+	size_t		count,
+	int		ioflag,
+	xfs_bmbt_irec_t *ret_imap,
+	int		*nmaps)
+{
+	xfs_mount_t	*mp = ip->i_mount;
+	xfs_iocore_t	*io = &ip->i_iocore;
+	xfs_fileoff_t	offset_fsb;
+	xfs_fileoff_t	last_fsb;
+	xfs_fsize_t	isize;
+	xfs_fsblock_t	firstblock;
+	int		nimaps;
+	int		error;
+	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
+	int		aeof;
+	int		fsynced = 0;
+
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
+
+	/*
+	 * Make sure that the dquots are there. This doesn't hold
+	 * the ilock across a disk read.
+	 */
+
+	error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
+	if (error)
+		return XFS_ERROR(error);
+
+retry:
+	isize = ip->i_d.di_size;
+	if (io->io_new_size > isize) {
+		isize = io->io_new_size;
+	}
+
+	aeof = 0;
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+	/*
+	 * If the caller is doing a write at the end of the file,
+	 * then extend the allocation (and the buffer used for the write)
+	 * out to the file system's write iosize.  We clean up any extra
+	 * space left over when the file is closed in xfs_inactive().
+	 *
+	 * For sync writes, we are flushing delayed allocate space to
+	 * try to make additional space available for allocation near
+	 * the filesystem full boundary - preallocation hurts in that
+	 * situation, of course.
+	 */
+	if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
+		xfs_off_t	aligned_offset;
+		xfs_filblks_t   count_fsb;
+		unsigned int	iosize;
+		xfs_fileoff_t	ioalign;
+		int		n;
+		xfs_fileoff_t   start_fsb;
+
+		/*
+		 * If there are any real blocks past eof, then don't
+		 * do any speculative allocation.
+		 */
+		start_fsb = XFS_B_TO_FSBT(mp,
+					((xfs_ufsize_t)(offset + count - 1)));
+		count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+		while (count_fsb > 0) {
+			nimaps = XFS_WRITE_IMAPS;
+			error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
+					0, &firstblock, 0, imap, &nimaps, NULL);
+			if (error) {
+				return error;
+			}
+			for (n = 0; n < nimaps; n++) {
+				if ( !(io->io_flags & XFS_IOCORE_RT)  && 
+					!imap[n].br_startblock) {
+					cmn_err(CE_PANIC,"Access to block "
+						"zero:  fs <%s> inode: %lld "
+						"start_block : %llx start_off "
+						": %llx blkcnt : %llx "
+						"extent-state : %x \n",
+						(ip->i_mount)->m_fsname,
+						(long long)ip->i_ino,
+						imap[n].br_startblock,
+						imap[n].br_startoff,
+						imap[n].br_blockcount,
+						imap[n].br_state);
+        			}
+				if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
+				    (imap[n].br_startblock != DELAYSTARTBLOCK)) {
+					goto write_map;
+				}
+				start_fsb += imap[n].br_blockcount;
+				count_fsb -= imap[n].br_blockcount;
+			}
+		}
+		iosize = mp->m_writeio_blocks;
+		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
+		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
+		last_fsb = ioalign + iosize;
+		aeof = 1;
+	}
+write_map:
+	nimaps = XFS_WRITE_IMAPS;
+	firstblock = NULLFSBLOCK;
+
+	/*
+	 * If mounted with the "-o swalloc" option, roundup the allocation
+	 * request to a stripe width boundary if the file size is >=
+	 * stripe width and we are allocating past the allocation eof.
+	 */
+	if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth 
+	    && (mp->m_flags & XFS_MOUNT_SWALLOC)
+	    && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {
+		int eof;
+		xfs_fileoff_t new_last_fsb;
+
+		new_last_fsb = roundup_64(last_fsb, mp->m_swidth);
+		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
+		if (error) {
+			return error;
+		}
+		if (eof) {
+			last_fsb = new_last_fsb;
+		}
+	/*
+	 * Roundup the allocation request to a stripe unit (m_dalign) boundary
+	 * if the file size is >= stripe unit size, and we are allocating past
+	 * the allocation eof.
+	 */
+	} else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&
+		   (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
+		int eof;
+		xfs_fileoff_t new_last_fsb;
+		new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
+		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
+		if (error) {
+			return error;
+		}
+		if (eof) {
+			last_fsb = new_last_fsb;
+		}
+	/*
+	 * Round up the allocation request to a real-time extent boundary
+	 * if the file is on the real-time subvolume.
+	 */
+	} else if (io->io_flags & XFS_IOCORE_RT && aeof) {
+		int eof;
+		xfs_fileoff_t new_last_fsb;
+
+		new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);
+		error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
+		if (error) {
+			return error;
+		}
+		if (eof)
+			last_fsb = new_last_fsb;
+	}
+	error = xfs_bmapi(NULL, ip, offset_fsb,
+			  (xfs_filblks_t)(last_fsb - offset_fsb),
+			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
+			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
+			  &nimaps, NULL);
+	/*
+	 * This can be EDQUOT, if nimaps == 0
+	 */
+	if (error && (error != ENOSPC)) {
+		return XFS_ERROR(error);
+	}
+	/*
+	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
+	 * then we must have run out of space.
+	 */
+	if (nimaps == 0) {
+		xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
+					io, offset, count);
+		if (xfs_flush_space(ip, &fsynced, &ioflag))
+			return XFS_ERROR(ENOSPC);
+
+		error = 0;
+		goto retry;
+	}
+
+	*ret_imap = imap[0];
+	*nmaps = 1;
+	if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {
+		cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "
+                        "start_block : %llx start_off : %llx blkcnt : %llx "
+                        "extent-state : %x \n",
+                        (ip->i_mount)->m_fsname,
+                        (long long)ip->i_ino,
+                        ret_imap->br_startblock, ret_imap->br_startoff,
+                        ret_imap->br_blockcount,ret_imap->br_state);
+	}
+	return 0;
+}
+
+/*
+ * Pass in a delayed allocate extent, convert it to real extents;
+ * return to the caller the extent we create which maps on top of
+ * the originating callers request.
+ *
+ * Called without a lock on the inode.
+ */
+int
+xfs_iomap_write_allocate(
+	xfs_inode_t	*ip,
+	xfs_bmbt_irec_t *map,
+	int		*retmap)
+{
+	xfs_mount_t	*mp = ip->i_mount;
+	xfs_iocore_t    *io = &ip->i_iocore;
+	xfs_fileoff_t	offset_fsb, last_block;
+	xfs_fileoff_t	end_fsb, map_start_fsb;
+	xfs_fsblock_t	first_block;
+	xfs_bmap_free_t	free_list;
+	xfs_filblks_t	count_fsb;
+	xfs_bmbt_irec_t	imap[XFS_STRAT_WRITE_IMAPS];
+	xfs_trans_t	*tp;
+	int		i, nimaps, committed;
+	int		error = 0;
+	int		nres;
+
+	*retmap = 0;
+
+	/*
+	 * Make sure that the dquots are there.
+	 */
+	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
+		return XFS_ERROR(error);
+
+	offset_fsb = map->br_startoff;
+	count_fsb = map->br_blockcount;
+	map_start_fsb = offset_fsb;
+
+	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
+
+	while (count_fsb != 0) {
+		/*
+		 * Set up a transaction with which to allocate the
+		 * backing store for the file.  Do allocations in a
+		 * loop until we get some space in the range we are
+		 * interested in.  The other space that might be allocated
+		 * is in the delayed allocation extent on which we sit
+		 * but before our buffer starts.
+		 */
+
+		nimaps = 0;
+		while (nimaps == 0) {
+			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+			error = xfs_trans_reserve(tp, nres,
+					XFS_WRITE_LOG_RES(mp),
+					0, XFS_TRANS_PERM_LOG_RES,
+					XFS_WRITE_LOG_COUNT);
+			if (error == ENOSPC) {
+				error = xfs_trans_reserve(tp, 0,
+						XFS_WRITE_LOG_RES(mp),
+						0,
+						XFS_TRANS_PERM_LOG_RES,
+						XFS_WRITE_LOG_COUNT);
+			}
+			if (error) {
+				xfs_trans_cancel(tp, 0);
+				return XFS_ERROR(error);
+			}
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+			xfs_trans_ihold(tp, ip);
+
+			XFS_BMAP_INIT(&free_list, &first_block);
+
+			nimaps = XFS_STRAT_WRITE_IMAPS;
+			/*
+			 * Ensure we don't go beyond eof - it is possible
+			 * the extents changed since we did the read call,
+			 * we dropped the ilock in the interim.
+			 */
+
+			end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);
+			xfs_bmap_last_offset(NULL, ip, &last_block,
+				XFS_DATA_FORK);
+			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
+			if ((map_start_fsb + count_fsb) > last_block) {
+				count_fsb = last_block - map_start_fsb;
+				if (count_fsb == 0) {
+					error = EAGAIN;
+					goto trans_cancel;
+				}
+			}
+
+			/* Go get the actual blocks */
+			error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
+					XFS_BMAPI_WRITE, &first_block, 1,
+					imap, &nimaps, &free_list);
+			if (error)
+				goto trans_cancel;
+
+			error = xfs_bmap_finish(&tp, &free_list,
+					first_block, &committed);
+			if (error)
+				goto trans_cancel;
+
+			error = xfs_trans_commit(tp,
+					XFS_TRANS_RELEASE_LOG_RES, NULL);
+			if (error)
+				goto error0;
+
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		}
+
+		/*
+		 * See if we were able to allocate an extent that
+		 * covers at least part of the callers request
+		 */
+
+		for (i = 0; i < nimaps; i++) {
+			if ( !(io->io_flags & XFS_IOCORE_RT)  && 
+				!imap[i].br_startblock) {
+				cmn_err(CE_PANIC,"Access to block zero:  "
+					"fs <%s> inode: %lld "
+					"start_block : %llx start_off : %llx " 
+					"blkcnt : %llx extent-state : %x \n",
+					(ip->i_mount)->m_fsname,
+					(long long)ip->i_ino,
+					imap[i].br_startblock,
+					imap[i].br_startoff,
+				        imap[i].br_blockcount,imap[i].br_state);
+                        }
+			if ((map->br_startoff >= imap[i].br_startoff) &&
+			    (map->br_startoff < (imap[i].br_startoff +
+						 imap[i].br_blockcount))) {
+				*map = imap[i];
+				*retmap = 1;
+				XFS_STATS_INC(xs_xstrat_quick);
+				return 0;
+			}
+			count_fsb -= imap[i].br_blockcount;
+		}
+
+		/* So far we have not mapped the requested part of the
+		 * file, just surrounding data, try again.
+		 */
+		nimaps--;
+		offset_fsb = imap[nimaps].br_startoff +
+			     imap[nimaps].br_blockcount;
+		map_start_fsb = offset_fsb;
+	}
+
+trans_cancel:
+	xfs_bmap_cancel(&free_list);
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error0:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return XFS_ERROR(error);
+}
+
+int
+xfs_iomap_write_unwritten(
+	xfs_inode_t	*ip,
+	loff_t		offset,
+	size_t		count)
+{
+	xfs_mount_t	*mp = ip->i_mount;
+	xfs_iocore_t    *io = &ip->i_iocore;
+	xfs_trans_t	*tp;
+	xfs_fileoff_t	offset_fsb;
+	xfs_filblks_t	count_fsb;
+	xfs_filblks_t	numblks_fsb;
+	xfs_bmbt_irec_t	imap;
+	int		committed;
+	int		error;
+	int		nres;
+	int		nimaps;
+	xfs_fsblock_t	firstfsb;
+	xfs_bmap_free_t	free_list;
+
+	xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
+				&ip->i_iocore, offset, count);
+
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
+
+	do {
+		nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+
+		/*
+		 * set up a transaction to convert the range of extents
+		 * from unwritten to real. Do allocations in a loop until
+		 * we have covered the range passed in.
+		 */
+
+		tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+		error = xfs_trans_reserve(tp, nres,
+				XFS_WRITE_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES,
+				XFS_WRITE_LOG_COUNT);
+		if (error) {
+			xfs_trans_cancel(tp, 0);
+			goto error0;
+		}
+
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+
+		/*
+		 * Modify the unwritten extent state of the buffer.
+		 */
+		XFS_BMAP_INIT(&free_list, &firstfsb);
+		nimaps = 1;
+		error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
+				  XFS_BMAPI_WRITE, &firstfsb,
+				  1, &imap, &nimaps, &free_list);
+		if (error)
+			goto error_on_bmapi_transaction;
+
+		error = xfs_bmap_finish(&(tp), &(free_list),
+				firstfsb, &committed);
+		if (error)
+			goto error_on_bmapi_transaction;
+
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		if (error)
+			goto error0;
+		
+		if ( !(io->io_flags & XFS_IOCORE_RT)  && !imap.br_startblock) {
+			cmn_err(CE_PANIC,"Access to block zero:  fs <%s> "
+				"inode: %lld start_block : %llx start_off : "
+				"%llx blkcnt : %llx extent-state : %x \n",
+				(ip->i_mount)->m_fsname,
+				(long long)ip->i_ino,
+				imap.br_startblock,imap.br_startoff,
+				imap.br_blockcount,imap.br_state);
+        	}
+
+		if ((numblks_fsb = imap.br_blockcount) == 0) {
+			/*
+			 * The numblks_fsb value should always get
+			 * smaller, otherwise the loop is stuck.
+			 */
+			ASSERT(imap.br_blockcount);
+			break;
+		}
+		offset_fsb += numblks_fsb;
+		count_fsb -= numblks_fsb;
+	} while (count_fsb > 0);
+
+	return 0;
+
+error_on_bmapi_transaction:
+	xfs_bmap_cancel(&free_list);
+	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+error0:
+	return XFS_ERROR(error);
+}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
new file mode 100644
index 0000000..31c9108
--- /dev/null
+++ b/fs/xfs/xfs_iomap.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2003,2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+
+
+#ifndef __XFS_IOMAP_H__
+#define __XFS_IOMAP_H__
+
+#define IOMAP_DADDR_NULL ((xfs_daddr_t) (-1LL))
+
+
+typedef enum {				/* iomap_flags values */
+	IOMAP_EOF =		0x01,	/* mapping contains EOF   */
+	IOMAP_HOLE =		0x02,	/* mapping covers a hole  */
+	IOMAP_DELAY =		0x04,	/* mapping covers delalloc region  */
+	IOMAP_REALTIME =	0x10,	/* mapping on the realtime device  */
+	IOMAP_UNWRITTEN =	0x20,	/* mapping covers allocated */
+					/* but uninitialized file data  */
+	IOMAP_NEW =		0x40	/* just allocate */
+} iomap_flags_t;
+
+typedef enum {
+	/* base extent manipulation calls */
+	BMAPI_READ = (1 << 0),		/* read extents */
+	BMAPI_WRITE = (1 << 1),		/* create extents */
+	BMAPI_ALLOCATE = (1 << 2),	/* delayed allocate to real extents */
+	BMAPI_UNWRITTEN  = (1 << 3),	/* unwritten extents to real extents */
+	/* modifiers */
+	BMAPI_IGNSTATE = (1 << 4),	/* ignore unwritten state on read */
+	BMAPI_DIRECT = (1 << 5),		/* direct instead of buffered write */
+	BMAPI_MMAP = (1 << 6),		/* allocate for mmap write */
+	BMAPI_SYNC = (1 << 7),		/* sync write to flush delalloc space */
+	BMAPI_TRYLOCK = (1 << 8),	/* non-blocking request */
+	BMAPI_DEVICE = (1 << 9),	/* we only want to know the device */
+} bmapi_flags_t;
+
+
+/*
+ * xfs_iomap_t:  File system I/O map
+ *
+ * The iomap_bn field is expressed in 512-byte blocks, and is where the 
+ * mapping starts on disk.
+ *
+ * The iomap_offset, iomap_bsize and iomap_delta fields are in bytes.
+ * iomap_offset is the offset of the mapping in the file itself.
+ * iomap_bsize is the size of the mapping,  iomap_delta is the 
+ * desired data's offset into the mapping, given the offset supplied 
+ * to the file I/O map routine.
+ *
+ * When a request is made to read beyond the logical end of the object,
+ * iomap_size may be set to 0, but iomap_offset and iomap_length should be set
+ * to the actual amount of underlying storage that has been allocated, if any.
+ */
+
+typedef struct xfs_iomap {
+	xfs_daddr_t		iomap_bn;	/* first 512b blk of mapping */
+	xfs_buftarg_t		*iomap_target;
+	loff_t			iomap_offset;	/* offset of mapping, bytes */
+	loff_t			iomap_bsize;	/* size of mapping, bytes */
+	size_t			iomap_delta;	/* offset into mapping, bytes */
+	iomap_flags_t		iomap_flags;
+} xfs_iomap_t;
+
+struct xfs_iocore;
+struct xfs_inode;
+struct xfs_bmbt_irec;
+
+extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int,
+		     struct xfs_iomap *, int *);
+extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t,
+				  int, struct xfs_bmbt_irec *, int *, int);
+extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, int,
+				 struct xfs_bmbt_irec *, int *);
+extern int xfs_iomap_write_allocate(struct xfs_inode *,
+				struct xfs_bmbt_irec *, int *);
+extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t);
+
+#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
new file mode 100644
index 0000000..8fbc8d3
--- /dev/null
+++ b/fs/xfs/xfs_itable.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_ag.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_ialloc.h"
+#include "xfs_itable.h"
+#include "xfs_error.h"
+
+#ifndef HAVE_USERACC
+#define useracc(ubuffer, size, flags, foo) (0)
+#define unuseracc(ubuffer, size, flags)
+#endif
+
+STATIC int
+xfs_bulkstat_one_iget(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	ino,		/* inode number to get data for */
+	xfs_daddr_t	bno,		/* starting bno of inode cluster */
+	xfs_bstat_t	*buf,		/* return buffer */
+	int		*stat)		/* BULKSTAT_RV_... */
+{
+	xfs_dinode_core_t *dic;		/* dinode core info pointer */
+	xfs_inode_t	*ip;		/* incore inode pointer */
+	int		error;
+
+	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
+	if (error) {
+		*stat = BULKSTAT_RV_NOTHING;
+		return error;
+	}
+
+	ASSERT(ip != NULL);
+	ASSERT(ip->i_blkno != (xfs_daddr_t)0);
+	if (ip->i_d.di_mode == 0) {
+		*stat = BULKSTAT_RV_NOTHING;
+		error = XFS_ERROR(ENOENT);
+		goto out_iput;
+	}
+
+	dic = &ip->i_d;
+
+	/* xfs_iget returns the following without needing
+	 * further change.
+	 */
+	buf->bs_nlink = dic->di_nlink;
+	buf->bs_projid = dic->di_projid;
+	buf->bs_ino = ino;
+	buf->bs_mode = dic->di_mode;
+	buf->bs_uid = dic->di_uid;
+	buf->bs_gid = dic->di_gid;
+	buf->bs_size = dic->di_size;
+	buf->bs_atime.tv_sec = dic->di_atime.t_sec;
+	buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
+	buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
+	buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
+	buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
+	buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
+	buf->bs_xflags = xfs_ip2xflags(ip);
+	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
+	buf->bs_extents = dic->di_nextents;
+	buf->bs_gen = dic->di_gen;
+	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
+	buf->bs_dmevmask = dic->di_dmevmask;
+	buf->bs_dmstate = dic->di_dmstate;
+	buf->bs_aextents = dic->di_anextents;
+
+	switch (dic->di_format) {
+	case XFS_DINODE_FMT_DEV:
+		buf->bs_rdev = ip->i_df.if_u2.if_rdev;
+		buf->bs_blksize = BLKDEV_IOSIZE;
+		buf->bs_blocks = 0;
+		break;
+	case XFS_DINODE_FMT_LOCAL:
+	case XFS_DINODE_FMT_UUID:
+		buf->bs_rdev = 0;
+		buf->bs_blksize = mp->m_sb.sb_blocksize;
+		buf->bs_blocks = 0;
+		break;
+	case XFS_DINODE_FMT_EXTENTS:
+	case XFS_DINODE_FMT_BTREE:
+		buf->bs_rdev = 0;
+		buf->bs_blksize = mp->m_sb.sb_blocksize;
+		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
+		break;
+	}
+
+ out_iput:
+	xfs_iput(ip, XFS_ILOCK_SHARED);
+	return error;
+}
+
+STATIC int
+xfs_bulkstat_one_dinode(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	ino,		/* inode number to get data for */
+	xfs_dinode_t	*dip,		/* dinode inode pointer */
+	xfs_bstat_t	*buf)		/* return buffer */
+{
+	xfs_dinode_core_t *dic;		/* dinode core info pointer */
+
+	dic = &dip->di_core;
+
+	/*
+	 * The inode format changed when we moved the link count and
+	 * made it 32 bits long.  If this is an old format inode,
+	 * convert it in memory to look like a new one.  If it gets
+	 * flushed to disk we will convert back before flushing or
+	 * logging it.  We zero out the new projid field and the old link
+	 * count field.  We'll handle clearing the pad field (the remains
+	 * of the old uuid field) when we actually convert the inode to
+	 * the new format. We don't change the version number so that we
+	 * can distinguish this from a real new format inode.
+	 */
+	if (INT_GET(dic->di_version, ARCH_CONVERT) == XFS_DINODE_VERSION_1) {
+		buf->bs_nlink = INT_GET(dic->di_onlink, ARCH_CONVERT);
+		buf->bs_projid = 0;
+	} else {
+		buf->bs_nlink = INT_GET(dic->di_nlink, ARCH_CONVERT);
+		buf->bs_projid = INT_GET(dic->di_projid, ARCH_CONVERT);
+	}
+
+	buf->bs_ino = ino;
+	buf->bs_mode = INT_GET(dic->di_mode, ARCH_CONVERT);
+	buf->bs_uid = INT_GET(dic->di_uid, ARCH_CONVERT);
+	buf->bs_gid = INT_GET(dic->di_gid, ARCH_CONVERT);
+	buf->bs_size = INT_GET(dic->di_size, ARCH_CONVERT);
+	buf->bs_atime.tv_sec = INT_GET(dic->di_atime.t_sec, ARCH_CONVERT);
+	buf->bs_atime.tv_nsec = INT_GET(dic->di_atime.t_nsec, ARCH_CONVERT);
+	buf->bs_mtime.tv_sec = INT_GET(dic->di_mtime.t_sec, ARCH_CONVERT);
+	buf->bs_mtime.tv_nsec = INT_GET(dic->di_mtime.t_nsec, ARCH_CONVERT);
+	buf->bs_ctime.tv_sec = INT_GET(dic->di_ctime.t_sec, ARCH_CONVERT);
+	buf->bs_ctime.tv_nsec = INT_GET(dic->di_ctime.t_nsec, ARCH_CONVERT);
+	buf->bs_xflags = xfs_dic2xflags(dic);
+	buf->bs_extsize = INT_GET(dic->di_extsize, ARCH_CONVERT) << mp->m_sb.sb_blocklog;
+	buf->bs_extents = INT_GET(dic->di_nextents, ARCH_CONVERT);
+	buf->bs_gen = INT_GET(dic->di_gen, ARCH_CONVERT);
+	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
+	buf->bs_dmevmask = INT_GET(dic->di_dmevmask, ARCH_CONVERT);
+	buf->bs_dmstate = INT_GET(dic->di_dmstate, ARCH_CONVERT);
+	buf->bs_aextents = INT_GET(dic->di_anextents, ARCH_CONVERT);
+
+	switch (INT_GET(dic->di_format, ARCH_CONVERT)) {
+	case XFS_DINODE_FMT_DEV:
+		buf->bs_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
+		buf->bs_blksize = BLKDEV_IOSIZE;
+		buf->bs_blocks = 0;
+		break;
+	case XFS_DINODE_FMT_LOCAL:
+	case XFS_DINODE_FMT_UUID:
+		buf->bs_rdev = 0;
+		buf->bs_blksize = mp->m_sb.sb_blocksize;
+		buf->bs_blocks = 0;
+		break;
+	case XFS_DINODE_FMT_EXTENTS:
+	case XFS_DINODE_FMT_BTREE:
+		buf->bs_rdev = 0;
+		buf->bs_blksize = mp->m_sb.sb_blocksize;
+		buf->bs_blocks = INT_GET(dic->di_nblocks, ARCH_CONVERT);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * Return stat information for one inode.
+ * Return 0 if ok, else errno.
+ */
+int		       		/* error status */
+xfs_bulkstat_one(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	ino,		/* inode number to get data for */
+	void		__user *buffer,	/* buffer to place output in */
+	int		ubsize,		/* size of buffer */
+	void		*private_data,	/* my private data */
+	xfs_daddr_t	bno,		/* starting bno of inode cluster */
+	int		*ubused,	/* bytes used by me */
+	void		*dibuff,	/* on-disk inode buffer */
+	int		*stat)		/* BULKSTAT_RV_... */
+{
+	xfs_bstat_t	*buf;		/* return buffer */
+	int		error = 0;	/* error value */
+	xfs_dinode_t	*dip;		/* dinode inode pointer */
+
+	dip = (xfs_dinode_t *)dibuff;
+
+	if (!buffer || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
+	    (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
+	     (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) {
+		*stat = BULKSTAT_RV_NOTHING;
+		return XFS_ERROR(EINVAL);
+	}
+	if (ubsize < sizeof(*buf)) {
+		*stat = BULKSTAT_RV_NOTHING;
+		return XFS_ERROR(ENOMEM);
+	}
+
+	buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
+
+	if (dip == NULL) {
+		/* We're not being passed a pointer to a dinode.  This happens
+		 * if BULKSTAT_FG_IGET is selected.  Do the iget.
+		 */
+		error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
+		if (error)
+			goto out_free;
+	} else {
+		xfs_bulkstat_one_dinode(mp, ino, dip, buf);
+	}
+
+	if (copy_to_user(buffer, buf, sizeof(*buf)))  {
+		*stat = BULKSTAT_RV_NOTHING;
+		error =  EFAULT;
+		goto out_free;
+	}
+
+	*stat = BULKSTAT_RV_DIDONE;
+	if (ubused)
+		*ubused = sizeof(*buf);
+
+ out_free:
+	kmem_free(buf, sizeof(*buf));
+	return error;
+}
+
+/*
+ * Return stat information in bulk (by-inode) for the filesystem.
+ */
+int					/* error status */
+xfs_bulkstat(
+	xfs_mount_t		*mp,	/* mount point for filesystem */
+	xfs_ino_t		*lastinop, /* last inode returned */
+	int			*ubcountp, /* size of buffer/count returned */
+	bulkstat_one_pf		formatter, /* func that'd fill a single buf */
+	void			*private_data,/* private data for formatter */
+	size_t			statstruct_size, /* sizeof struct filling */
+	char			__user *ubuffer, /* buffer with inode stats */
+	int			flags,	/* defined in xfs_itable.h */
+	int			*done)	/* 1 if there're more stats to get */
+{
+	xfs_agblock_t		agbno=0;/* allocation group block number */
+	xfs_buf_t		*agbp;	/* agi header buffer */
+	xfs_agi_t		*agi;	/* agi header data */
+	xfs_agino_t		agino;	/* inode # in allocation group */
+	xfs_agnumber_t		agno;	/* allocation group number */
+	xfs_daddr_t		bno;	/* inode cluster start daddr */
+	int			chunkidx; /* current index into inode chunk */
+	int			clustidx; /* current index into inode cluster */
+	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
+	int			end_of_ag; /* set if we've seen the ag end */
+	int			error;	/* error code */
+	int                     fmterror;/* bulkstat formatter result */
+	__int32_t		gcnt;	/* current btree rec's count */
+	xfs_inofree_t		gfree;	/* current btree rec's free mask */
+	xfs_agino_t		gino;	/* current btree rec's start inode */
+	int			i;	/* loop index */
+	int			icount;	/* count of inodes good in irbuf */
+	xfs_ino_t		ino;	/* inode number (filesystem) */
+	xfs_inobt_rec_t		*irbp;	/* current irec buffer pointer */
+	xfs_inobt_rec_t		*irbuf;	/* start of irec buffer */
+	xfs_inobt_rec_t		*irbufend; /* end of good irec buffer entries */
+	xfs_ino_t		lastino=0; /* last inode number returned */
+	int			nbcluster; /* # of blocks in a cluster */
+	int			nicluster; /* # of inodes in a cluster */
+	int			nimask;	/* mask for inode clusters */
+	int			nirbuf;	/* size of irbuf */
+	int			rval;	/* return value error code */
+	int			tmp;	/* result value from btree calls */
+	int			ubcount; /* size of user's buffer */
+	int			ubleft;	/* bytes left in user's buffer */
+	char			__user *ubufp;	/* pointer into user's buffer */
+	int			ubelem;	/* spaces used in user's buffer */
+	int			ubused;	/* bytes used by formatter */
+	xfs_buf_t		*bp;	/* ptr to on-disk inode cluster buf */
+	xfs_dinode_t		*dip;	/* ptr into bp for specific inode */
+	xfs_inode_t		*ip;	/* ptr to in-core inode struct */
+
+	/*
+	 * Get the last inode value, see if there's nothing to do.
+	 */
+	ino = (xfs_ino_t)*lastinop;
+	dip = NULL;
+	agno = XFS_INO_TO_AGNO(mp, ino);
+	agino = XFS_INO_TO_AGINO(mp, ino);
+	if (agno >= mp->m_sb.sb_agcount ||
+	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+		*done = 1;
+		*ubcountp = 0;
+		return 0;
+	}
+	ubcount = *ubcountp; /* statstruct's */
+	ubleft = ubcount * statstruct_size; /* bytes */
+	*ubcountp = ubelem = 0;
+	*done = 0;
+	fmterror = 0;
+	ubufp = ubuffer;
+	nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
+		mp->m_sb.sb_inopblock :
+		(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
+	nimask = ~(nicluster - 1);
+	nbcluster = nicluster >> mp->m_sb.sb_inopblog;
+	/*
+	 * Lock down the user's buffer. If a buffer was not sent, as in the case
+	 * disk quota code calls here, we skip this.
+	 */
+	if (ubuffer &&
+	    (error = useracc(ubuffer, ubcount * statstruct_size,
+			(B_READ|B_PHYS), NULL))) {
+		return error;
+	}
+	/*
+	 * Allocate a page-sized buffer for inode btree records.
+	 * We could try allocating something smaller, but for normal
+	 * calls we'll always (potentially) need the whole page.
+	 */
+	irbuf = kmem_alloc(NBPC, KM_SLEEP);
+	nirbuf = NBPC / sizeof(*irbuf);
+	/*
+	 * Loop over the allocation groups, starting from the last
+	 * inode returned; 0 means start of the allocation group.
+	 */
+	rval = 0;
+	while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) {
+		bp = NULL;
+		down_read(&mp->m_peraglock);
+		error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+		up_read(&mp->m_peraglock);
+		if (error) {
+			/*
+			 * Skip this allocation group and go to the next one.
+			 */
+			agno++;
+			agino = 0;
+			continue;
+		}
+		agi = XFS_BUF_TO_AGI(agbp);
+		/*
+		 * Allocate and initialize a btree cursor for ialloc btree.
+		 */
+		cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
+			(xfs_inode_t *)0, 0);
+		irbp = irbuf;
+		irbufend = irbuf + nirbuf;
+		end_of_ag = 0;
+		/*
+		 * If we're returning in the middle of an allocation group,
+		 * we need to get the remainder of the chunk we're in.
+		 */
+		if (agino > 0) {
+			/*
+			 * Lookup the inode chunk that this inode lives in.
+			 */
+			error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp);
+			if (!error &&	/* no I/O error */
+			    tmp &&	/* lookup succeeded */
+					/* got the record, should always work */
+			    !(error = xfs_inobt_get_rec(cur, &gino, &gcnt,
+				    &gfree, &i)) &&
+			    i == 1 &&
+					/* this is the right chunk */
+			    agino < gino + XFS_INODES_PER_CHUNK &&
+					/* lastino was not last in chunk */
+			    (chunkidx = agino - gino + 1) <
+				    XFS_INODES_PER_CHUNK &&
+					/* there are some left allocated */
+			    XFS_INOBT_MASKN(chunkidx,
+				    XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) {
+				/*
+				 * Grab the chunk record.  Mark all the
+				 * uninteresting inodes (because they're
+				 * before our start point) free.
+				 */
+				for (i = 0; i < chunkidx; i++) {
+					if (XFS_INOBT_MASK(i) & ~gfree)
+						gcnt++;
+				}
+				gfree |= XFS_INOBT_MASKN(0, chunkidx);
+				INT_SET(irbp->ir_startino, ARCH_CONVERT, gino);
+				INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt);
+				INT_SET(irbp->ir_free, ARCH_CONVERT, gfree);
+				irbp++;
+				agino = gino + XFS_INODES_PER_CHUNK;
+				icount = XFS_INODES_PER_CHUNK - gcnt;
+			} else {
+				/*
+				 * If any of those tests failed, bump the
+				 * inode number (just in case).
+				 */
+				agino++;
+				icount = 0;
+			}
+			/*
+			 * In any case, increment to the next record.
+			 */
+			if (!error)
+				error = xfs_inobt_increment(cur, 0, &tmp);
+		} else {
+			/*
+			 * Start of ag.  Lookup the first inode chunk.
+			 */
+			error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp);
+			icount = 0;
+		}
+		/*
+		 * Loop through inode btree records in this ag,
+		 * until we run out of inodes or space in the buffer.
+		 */
+		while (irbp < irbufend && icount < ubcount) {
+			/*
+			 * Loop as long as we're unable to read the
+			 * inode btree.
+			 */
+			while (error) {
+				agino += XFS_INODES_PER_CHUNK;
+				if (XFS_AGINO_TO_AGBNO(mp, agino) >=
+						INT_GET(agi->agi_length, ARCH_CONVERT))
+					break;
+				error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
+							    &tmp);
+			}
+			/*
+			 * If ran off the end of the ag either with an error,
+			 * or the normal way, set end and stop collecting.
+			 */
+			if (error ||
+			    (error = xfs_inobt_get_rec(cur, &gino, &gcnt,
+				    &gfree, &i)) ||
+			    i == 0) {
+				end_of_ag = 1;
+				break;
+			}
+			/*
+			 * If this chunk has any allocated inodes, save it.
+			 */
+			if (gcnt < XFS_INODES_PER_CHUNK) {
+				INT_SET(irbp->ir_startino, ARCH_CONVERT, gino);
+				INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt);
+				INT_SET(irbp->ir_free, ARCH_CONVERT, gfree);
+				irbp++;
+				icount += XFS_INODES_PER_CHUNK - gcnt;
+			}
+			/*
+			 * Set agino to after this chunk and bump the cursor.
+			 */
+			agino = gino + XFS_INODES_PER_CHUNK;
+			error = xfs_inobt_increment(cur, 0, &tmp);
+		}
+		/*
+		 * Drop the btree buffers and the agi buffer.
+		 * We can't hold any of the locks these represent
+		 * when calling iget.
+		 */
+		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+		xfs_buf_relse(agbp);
+		/*
+		 * Now format all the good inodes into the user's buffer.
+		 */
+		irbufend = irbp;
+		for (irbp = irbuf;
+		     irbp < irbufend && ubleft >= statstruct_size; irbp++) {
+			/*
+			 * Read-ahead the next chunk's worth of inodes.
+			 */
+			if (&irbp[1] < irbufend) {
+				/*
+				 * Loop over all clusters in the next chunk.
+				 * Do a readahead if there are any allocated
+				 * inodes in that cluster.
+				 */
+				for (agbno = XFS_AGINO_TO_AGBNO(mp,
+							INT_GET(irbp[1].ir_startino, ARCH_CONVERT)),
+				     chunkidx = 0;
+				     chunkidx < XFS_INODES_PER_CHUNK;
+				     chunkidx += nicluster,
+				     agbno += nbcluster) {
+					if (XFS_INOBT_MASKN(chunkidx,
+							    nicluster) &
+					    ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT)))
+						xfs_btree_reada_bufs(mp, agno,
+							agbno, nbcluster);
+				}
+			}
+			/*
+			 * Now process this chunk of inodes.
+			 */
+			for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0;
+			     ubleft > 0 &&
+				INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK;
+			     chunkidx++, clustidx++, agino++) {
+				ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
+				/*
+				 * Recompute agbno if this is the
+				 * first inode of the cluster.
+				 *
+				 * Careful with clustidx.   There can be
+				 * multple clusters per chunk, a single
+				 * cluster per chunk or a cluster that has
+				 * inodes represented from several different
+				 * chunks (if blocksize is large).
+				 *
+				 * Because of this, the starting clustidx is
+				 * initialized to zero in this loop but must
+				 * later be reset after reading in the cluster
+				 * buffer.
+				 */
+				if ((chunkidx & (nicluster - 1)) == 0) {
+					agbno = XFS_AGINO_TO_AGBNO(mp,
+							INT_GET(irbp->ir_startino, ARCH_CONVERT)) +
+						((chunkidx & nimask) >>
+						 mp->m_sb.sb_inopblog);
+
+					if (flags & BULKSTAT_FG_QUICK) {
+						ino = XFS_AGINO_TO_INO(mp, agno,
+								       agino);
+						bno = XFS_AGB_TO_DADDR(mp, agno,
+								       agbno);
+
+						/*
+						 * Get the inode cluster buffer
+						 */
+						ASSERT(xfs_inode_zone != NULL);
+						ip = kmem_zone_zalloc(xfs_inode_zone,
+								      KM_SLEEP);
+						ip->i_ino = ino;
+						ip->i_mount = mp;
+						if (bp)
+							xfs_buf_relse(bp);
+						error = xfs_itobp(mp, NULL, ip,
+								  &dip, &bp, bno);
+						if (!error)
+							clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
+						kmem_zone_free(xfs_inode_zone, ip);
+						if (XFS_TEST_ERROR(error != 0,
+								   mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
+								   XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
+							bp = NULL;
+							break;
+						}
+					}
+				}
+				/*
+				 * Skip if this inode is free.
+				 */
+				if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT))
+					continue;
+				/*
+				 * Count used inodes as free so we can tell
+				 * when the chunk is used up.
+				 */
+				INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1);
+				ino = XFS_AGINO_TO_INO(mp, agno, agino);
+				bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
+				if (flags & BULKSTAT_FG_QUICK) {
+					dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+					      (clustidx << mp->m_sb.sb_inodelog));
+
+					if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT)
+						    != XFS_DINODE_MAGIC
+					    || !XFS_DINODE_GOOD_VERSION(
+						    INT_GET(dip->di_core.di_version, ARCH_CONVERT)))
+						continue;
+				}
+
+				/*
+				 * Get the inode and fill in a single buffer.
+				 * BULKSTAT_FG_QUICK uses dip to fill it in.
+				 * BULKSTAT_FG_IGET uses igets.
+				 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
+				 * This is also used to count inodes/blks, etc
+				 * in xfs_qm_quotacheck.
+				 */
+				ubused = statstruct_size;
+				error = formatter(mp, ino, ubufp,
+						ubleft, private_data,
+						bno, &ubused, dip, &fmterror);
+				if (fmterror == BULKSTAT_RV_NOTHING) {
+					if (error == ENOMEM)
+						ubleft = 0;
+					continue;
+				}
+				if (fmterror == BULKSTAT_RV_GIVEUP) {
+					ubleft = 0;
+					ASSERT(error);
+					rval = error;
+					break;
+				}
+				if (ubufp)
+					ubufp += ubused;
+				ubleft -= ubused;
+				ubelem++;
+				lastino = ino;
+			}
+		}
+
+		if (bp)
+			xfs_buf_relse(bp);
+
+		/*
+		 * Set up for the next loop iteration.
+		 */
+		if (ubleft > 0) {
+			if (end_of_ag) {
+				agno++;
+				agino = 0;
+			} else
+				agino = XFS_INO_TO_AGINO(mp, lastino);
+		} else
+			break;
+	}
+	/*
+	 * Done, we're either out of filesystem or space to put the data.
+	 */
+	kmem_free(irbuf, NBPC);
+	if (ubuffer)
+		unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS));
+	*ubcountp = ubelem;
+	if (agno >= mp->m_sb.sb_agcount) {
+		/*
+		 * If we ran out of filesystem, mark lastino as off
+		 * the end of the filesystem, so the next call
+		 * will return immediately.
+		 */
+		*lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
+		*done = 1;
+	} else
+		*lastinop = (xfs_ino_t)lastino;
+
+	return rval;
+}
+
+/*
+ * Return stat information in bulk (by-inode) for the filesystem.
+ * Special case for non-sequential one inode bulkstat.
+ */
+int					/* error status */
+xfs_bulkstat_single(
+	xfs_mount_t		*mp,	/* mount point for filesystem */
+	xfs_ino_t		*lastinop, /* inode to return */
+	char			__user *buffer, /* buffer with inode stats */
+	int			*done)	/* 1 if there're more stats to get */
+{
+	int			count;	/* count value for bulkstat call */
+	int			error;	/* return value */
+	xfs_ino_t		ino;	/* filesystem inode number */
+	int			res;	/* result from bs1 */
+
+	/*
+	 * note that requesting valid inode numbers which are not allocated
+	 * to inodes will most likely cause xfs_itobp to generate warning
+	 * messages about bad magic numbers. This is ok. The fact that
+	 * the inode isn't actually an inode is handled by the
+	 * error check below. Done this way to make the usual case faster
+	 * at the expense of the error case.
+	 */
+
+	ino = (xfs_ino_t)*lastinop;
+	error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
+				 NULL, 0, NULL, NULL, &res);
+	if (error) {
+		/*
+		 * Special case way failed, do it the "long" way
+		 * to see if that works.
+		 */
+		(*lastinop)--;
+		count = 1;
+		if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
+				NULL, sizeof(xfs_bstat_t), buffer,
+				BULKSTAT_FG_IGET, done))
+			return error;
+		if (count == 0 || (xfs_ino_t)*lastinop != ino)
+			return error == EFSCORRUPTED ?
+				XFS_ERROR(EINVAL) : error;
+		else
+			return 0;
+	}
+	*done = 0;
+	return 0;
+}
+
+/*
+ * Return inode number table for the filesystem.
+ */
+int					/* error status */
+xfs_inumbers(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	*lastino,	/* last inode returned */
+	int		*count,		/* size of buffer/count returned */
+	xfs_inogrp_t	__user *ubuffer)/* buffer with inode descriptions */
+{
+	xfs_buf_t	*agbp;
+	xfs_agino_t	agino;
+	xfs_agnumber_t	agno;
+	int		bcount;
+	xfs_inogrp_t	*buffer;
+	int		bufidx;
+	xfs_btree_cur_t	*cur;
+	int		error;
+	__int32_t	gcnt;
+	xfs_inofree_t	gfree;
+	xfs_agino_t	gino;
+	int		i;
+	xfs_ino_t	ino;
+	int		left;
+	int		tmp;
+
+	ino = (xfs_ino_t)*lastino;
+	agno = XFS_INO_TO_AGNO(mp, ino);
+	agino = XFS_INO_TO_AGINO(mp, ino);
+	left = *count;
+	*count = 0;
+	bcount = MIN(left, (int)(NBPP / sizeof(*buffer)));
+	buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+	error = bufidx = 0;
+	cur = NULL;
+	agbp = NULL;
+	while (left > 0 && agno < mp->m_sb.sb_agcount) {
+		if (agbp == NULL) {
+			down_read(&mp->m_peraglock);
+			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+			up_read(&mp->m_peraglock);
+			if (error) {
+				/*
+				 * If we can't read the AGI of this ag,
+				 * then just skip to the next one.
+				 */
+				ASSERT(cur == NULL);
+				agbp = NULL;
+				agno++;
+				agino = 0;
+				continue;
+			}
+			cur = xfs_btree_init_cursor(mp, NULL, agbp, agno,
+				XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
+			error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp);
+			if (error) {
+				xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+				cur = NULL;
+				xfs_buf_relse(agbp);
+				agbp = NULL;
+				/*
+				 * Move up the the last inode in the current
+				 * chunk.  The lookup_ge will always get
+				 * us the first inode in the next chunk.
+				 */
+				agino += XFS_INODES_PER_CHUNK - 1;
+				continue;
+			}
+		}
+		if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree,
+			&i)) ||
+		    i == 0) {
+			xfs_buf_relse(agbp);
+			agbp = NULL;
+			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+			cur = NULL;
+			agno++;
+			agino = 0;
+			continue;
+		}
+		agino = gino + XFS_INODES_PER_CHUNK - 1;
+		buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino);
+		buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt;
+		buffer[bufidx].xi_allocmask = ~gfree;
+		bufidx++;
+		left--;
+		if (bufidx == bcount) {
+			if (copy_to_user(ubuffer, buffer,
+					bufidx * sizeof(*buffer))) {
+				error = XFS_ERROR(EFAULT);
+				break;
+			}
+			ubuffer += bufidx;
+			*count += bufidx;
+			bufidx = 0;
+		}
+		if (left) {
+			error = xfs_inobt_increment(cur, 0, &tmp);
+			if (error) {
+				xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+				cur = NULL;
+				xfs_buf_relse(agbp);
+				agbp = NULL;
+				/*
+				 * The agino value has already been bumped.
+				 * Just try to skip up to it.
+				 */
+				agino += XFS_INODES_PER_CHUNK;
+				continue;
+			}
+		}
+	}
+	if (!error) {
+		if (bufidx) {
+			if (copy_to_user(ubuffer, buffer,
+					bufidx * sizeof(*buffer)))
+				error = XFS_ERROR(EFAULT);
+			else
+				*count += bufidx;
+		}
+		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
+	}
+	kmem_free(buffer, bcount * sizeof(*buffer));
+	if (cur)
+		xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
+					   XFS_BTREE_NOERROR));
+	if (agbp)
+		xfs_buf_relse(agbp);
+	return error;
+}
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
new file mode 100644
index 0000000..2be9d18
--- /dev/null
+++ b/fs/xfs/xfs_itable.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_ITABLE_H__
+#define	__XFS_ITABLE_H__
+
+/*
+ * xfs_bulkstat() is used to fill in xfs_bstat structures as well as dm_stat
+ * structures (by the dmi library). This is a pointer to a formatter function
+ * that will iget the inode and fill in the appropriate structure.
+ * see xfs_bulkstat_one() and xfs_dm_bulkstat_one() in dmapi_xfs.c
+ */
+typedef int (*bulkstat_one_pf)(struct xfs_mount	*mp,
+			       xfs_ino_t	ino,
+			       void		__user *buffer,
+			       int		ubsize,
+			       void		*private_data,
+			       xfs_daddr_t	bno,
+			       int		*ubused,
+			       void		*dip,
+			       int		*stat);
+
+/*
+ * Values for stat return value.
+ */
+#define	BULKSTAT_RV_NOTHING	0
+#define	BULKSTAT_RV_DIDONE	1
+#define	BULKSTAT_RV_GIVEUP	2
+
+/*
+ * Values for bulkstat flag argument.
+ */
+#define	BULKSTAT_FG_IGET	0x1	/* Go through the buffer cache */
+#define	BULKSTAT_FG_QUICK	0x2	/* No iget, walk the dinode cluster */
+#define BULKSTAT_FG_VFSLOCKED	0x4	/* Already have vfs lock */
+
+/*
+ * Return stat information in bulk (by-inode) for the filesystem.
+ */
+int					/* error status */
+xfs_bulkstat(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_ino_t	*lastino,	/* last inode returned */
+	int		*count,		/* size of buffer/count returned */
+	bulkstat_one_pf formatter,	/* func that'd fill a single buf */
+	void		*private_data,	/* private data for formatter */
+	size_t		statstruct_size,/* sizeof struct that we're filling */
+	char		__user *ubuffer,/* buffer with inode stats */
+	int		flags,		/* flag to control access method */
+	int		*done);		/* 1 if there're more stats to get */
+
+int
+xfs_bulkstat_single(
+	xfs_mount_t		*mp,
+	xfs_ino_t		*lastinop,
+	char			__user *buffer,
+	int			*done);
+
+int
+xfs_bulkstat_one(
+	xfs_mount_t		*mp,
+	xfs_ino_t		ino,
+	void			__user *buffer,
+	int			ubsize,
+	void			*private_data,
+	xfs_daddr_t		bno,
+	int			*ubused,
+	void			*dibuff,
+	int			*stat);
+
+int					/* error status */
+xfs_inumbers(
+	xfs_mount_t		*mp,	/* mount point for filesystem */
+	xfs_ino_t		*last,	/* last inode returned */
+	int			*count,	/* size of buffer/count returned */
+	xfs_inogrp_t		__user *buffer);/* buffer with inode info */
+
+#endif	/* __XFS_ITABLE_H__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
new file mode 100644
index 0000000..092d5fb
--- /dev/null
+++ b/fs/xfs/xfs_log.c
@@ -0,0 +1,3560 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * High level interface routines for log manager
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_ag.h"
+#include "xfs_sb.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_log_recover.h"
+#include "xfs_bit.h"
+#include "xfs_rw.h"
+#include "xfs_trans_priv.h"
+
+
+#define xlog_write_adv_cnt(ptr, len, off, bytes) \
+	{ (ptr) += (bytes); \
+	  (len) -= (bytes); \
+	  (off) += (bytes);}
+
+/* Local miscellaneous function prototypes */
+STATIC int	 xlog_bdstrat_cb(struct xfs_buf *);
+STATIC int	 xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
+				    xlog_in_core_t **, xfs_lsn_t *);
+STATIC xlog_t *  xlog_alloc_log(xfs_mount_t	*mp,
+				xfs_buftarg_t	*log_target,
+				xfs_daddr_t	blk_offset,
+				int		num_bblks);
+STATIC int	 xlog_space_left(xlog_t *log, int cycle, int bytes);
+STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
+STATIC void	 xlog_unalloc_log(xlog_t *log);
+STATIC int	 xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
+			    int nentries, xfs_log_ticket_t tic,
+			    xfs_lsn_t *start_lsn,
+			    xlog_in_core_t **commit_iclog,
+			    uint flags);
+
+/* local state machine functions */
+STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
+STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
+STATIC int  xlog_state_get_iclog_space(xlog_t		*log,
+				       int		len,
+				       xlog_in_core_t	**iclog,
+				       xlog_ticket_t	*ticket,
+				       int		*continued_write,
+				       int		*logoffsetp);
+STATIC void xlog_state_put_ticket(xlog_t	*log,
+				  xlog_ticket_t *tic);
+STATIC int  xlog_state_release_iclog(xlog_t		*log,
+				     xlog_in_core_t	*iclog);
+STATIC void xlog_state_switch_iclogs(xlog_t		*log,
+				     xlog_in_core_t *iclog,
+				     int		eventual_size);
+STATIC int  xlog_state_sync(xlog_t *log, xfs_lsn_t lsn, uint flags);
+STATIC int  xlog_state_sync_all(xlog_t *log, uint flags);
+STATIC void xlog_state_want_sync(xlog_t	*log, xlog_in_core_t *iclog);
+
+/* local functions to manipulate grant head */
+STATIC int  xlog_grant_log_space(xlog_t		*log,
+				 xlog_ticket_t	*xtic);
+STATIC void xlog_grant_push_ail(xfs_mount_t	*mp,
+				int		need_bytes);
+STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
+					   xlog_ticket_t *ticket);
+STATIC int xlog_regrant_write_log_space(xlog_t		*log,
+					 xlog_ticket_t  *ticket);
+STATIC void xlog_ungrant_log_space(xlog_t	 *log,
+				   xlog_ticket_t *ticket);
+
+
+/* local ticket functions */
+STATIC void		xlog_state_ticket_alloc(xlog_t *log);
+STATIC xlog_ticket_t	*xlog_ticket_get(xlog_t *log,
+					 int	unit_bytes,
+					 int	count,
+					 char	clientid,
+					 uint	flags);
+STATIC void		xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket);
+
+/* local debug functions */
+#if defined(DEBUG) && !defined(XLOG_NOLOG)
+STATIC void	xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr);
+STATIC void	xlog_verify_grant_head(xlog_t *log, int equals);
+STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
+				  int count, boolean_t syncing);
+STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
+				     xfs_lsn_t tail_lsn);
+#else
+#define xlog_verify_dest_ptr(a,b)
+#define xlog_verify_grant_head(a,b)
+#define xlog_verify_iclog(a,b,c,d)
+#define xlog_verify_tail_lsn(a,b,c)
+#endif
+
+int		xlog_iclogs_empty(xlog_t *log);
+
+#ifdef DEBUG
+int xlog_do_error = 0;
+int xlog_req_num  = 0;
+int xlog_error_mod = 33;
+#endif
+
+#define XLOG_FORCED_SHUTDOWN(log)	(log->l_flags & XLOG_IO_ERROR)
+
+/*
+ * 0 => disable log manager
+ * 1 => enable log manager
+ * 2 => enable log manager and log debugging
+ */
+#if defined(XLOG_NOLOG) || defined(DEBUG)
+int   xlog_debug = 1;
+xfs_buftarg_t *xlog_target;
+#endif
+
+#if defined(XFS_LOG_TRACE)
+
+void
+xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
+{
+	if (! log->l_grant_trace) {
+		log->l_grant_trace = ktrace_alloc(1024, KM_NOSLEEP);
+		if (! log->l_grant_trace)
+			return;
+	}
+
+	ktrace_enter(log->l_grant_trace,
+		     (void *)tic,
+		     (void *)log->l_reserve_headq,
+		     (void *)log->l_write_headq,
+		     (void *)((unsigned long)log->l_grant_reserve_cycle),
+		     (void *)((unsigned long)log->l_grant_reserve_bytes),
+		     (void *)((unsigned long)log->l_grant_write_cycle),
+		     (void *)((unsigned long)log->l_grant_write_bytes),
+		     (void *)((unsigned long)log->l_curr_cycle),
+		     (void *)((unsigned long)log->l_curr_block),
+		     (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
+		     (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
+		     (void *)string,
+		     (void *)((unsigned long)13),
+		     (void *)((unsigned long)14),
+		     (void *)((unsigned long)15),
+		     (void *)((unsigned long)16));
+}
+
+void
+xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
+{
+	pid_t pid;
+
+	pid = current_pid();
+
+	if (!iclog->ic_trace)
+		iclog->ic_trace = ktrace_alloc(256, KM_SLEEP);
+	ktrace_enter(iclog->ic_trace,
+		     (void *)((unsigned long)state),
+		     (void *)((unsigned long)pid),
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0,
+		     (void *)0);
+}
+
+#else
+#define	xlog_trace_loggrant(log,tic,string)
+#define	xlog_trace_iclog(iclog,state)
+#endif /* XFS_LOG_TRACE */
+
+/*
+ * NOTES:
+ *
+ *	1. currblock field gets updated at startup and after in-core logs
+ *		marked as with WANT_SYNC.
+ */
+
+/*
+ * This routine is called when a user of a log manager ticket is done with
+ * the reservation.  If the ticket was ever used, then a commit record for
+ * the associated transaction is written out as a log operation header with
+ * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
+ * a given ticket.  If the ticket was one with a permanent reservation, then
+ * a few operations are done differently.  Permanent reservation tickets by
+ * default don't release the reservation.  They just commit the current
+ * transaction with the belief that the reservation is still needed.  A flag
+ * must be passed in before permanent reservations are actually released.
+ * When these type of tickets are not released, they need to be set into
+ * the inited state again.  By doing this, a start record will be written
+ * out when the next write occurs.
+ */
+xfs_lsn_t
+xfs_log_done(xfs_mount_t	*mp,
+	     xfs_log_ticket_t	xtic,
+	     void		**iclog,
+	     uint		flags)
+{
+	xlog_t		*log    = mp->m_log;
+	xlog_ticket_t	*ticket = (xfs_log_ticket_t) xtic;
+	xfs_lsn_t	lsn	= 0;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return 0;
+#endif
+
+	if (XLOG_FORCED_SHUTDOWN(log) ||
+	    /*
+	     * If nothing was ever written, don't write out commit record.
+	     * If we get an error, just continue and give back the log ticket.
+	     */
+	    (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
+	     (xlog_commit_record(mp, ticket,
+				 (xlog_in_core_t **)iclog, &lsn)))) {
+		lsn = (xfs_lsn_t) -1;
+		if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
+			flags |= XFS_LOG_REL_PERM_RESERV;
+		}
+	}
+
+
+	if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
+	    (flags & XFS_LOG_REL_PERM_RESERV)) {
+		/*
+		 * Release ticket if not permanent reservation or a specifc
+		 * request has been made to release a permanent reservation.
+		 */
+		xlog_ungrant_log_space(log, ticket);
+		xlog_state_put_ticket(log, ticket);
+	} else {
+		xlog_regrant_reserve_log_space(log, ticket);
+	}
+
+	/* If this ticket was a permanent reservation and we aren't
+	 * trying to release it, reset the inited flags; so next time
+	 * we write, a start record will be written out.
+	 */
+	if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
+	    (flags & XFS_LOG_REL_PERM_RESERV) == 0)
+		ticket->t_flags |= XLOG_TIC_INITED;
+
+	return lsn;
+}	/* xfs_log_done */
+
+
+/*
+ * Force the in-core log to disk.  If flags == XFS_LOG_SYNC,
+ *	the force is done synchronously.
+ *
+ * Asynchronous forces are implemented by setting the WANT_SYNC
+ * bit in the appropriate in-core log and then returning.
+ *
+ * Synchronous forces are implemented with a semaphore.  All callers
+ * to force a given lsn to disk will wait on a semaphore attached to the
+ * specific in-core log.  When given in-core log finally completes its
+ * write to disk, that thread will wake up all threads waiting on the
+ * semaphore.
+ */
+int
+xfs_log_force(xfs_mount_t *mp,
+	      xfs_lsn_t	  lsn,
+	      uint	  flags)
+{
+	int	rval;
+	xlog_t *log = mp->m_log;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return 0;
+#endif
+
+	ASSERT(flags & XFS_LOG_FORCE);
+
+	XFS_STATS_INC(xs_log_force);
+
+	if ((log->l_flags & XLOG_IO_ERROR) == 0) {
+		if (lsn == 0)
+			rval = xlog_state_sync_all(log, flags);
+		else
+			rval = xlog_state_sync(log, lsn, flags);
+	} else {
+		rval = XFS_ERROR(EIO);
+	}
+
+	return rval;
+
+}	/* xfs_log_force */
+
+/*
+ * Attaches a new iclog I/O completion callback routine during
+ * transaction commit.  If the log is in error state, a non-zero
+ * return code is handed back and the caller is responsible for
+ * executing the callback at an appropriate time.
+ */
+int
+xfs_log_notify(xfs_mount_t	  *mp,		/* mount of partition */
+	       void		  *iclog_hndl,	/* iclog to hang callback off */
+	       xfs_log_callback_t *cb)
+{
+	xlog_t *log = mp->m_log;
+	xlog_in_core_t	  *iclog = (xlog_in_core_t *)iclog_hndl;
+	int	abortflg, spl;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return 0;
+#endif
+	cb->cb_next = NULL;
+	spl = LOG_LOCK(log);
+	abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
+	if (!abortflg) {
+		ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
+			      (iclog->ic_state == XLOG_STATE_WANT_SYNC));
+		cb->cb_next = NULL;
+		*(iclog->ic_callback_tail) = cb;
+		iclog->ic_callback_tail = &(cb->cb_next);
+	}
+	LOG_UNLOCK(log, spl);
+	return abortflg;
+}	/* xfs_log_notify */
+
+int
+xfs_log_release_iclog(xfs_mount_t *mp,
+		      void	  *iclog_hndl)
+{
+	xlog_t *log = mp->m_log;
+	xlog_in_core_t	  *iclog = (xlog_in_core_t *)iclog_hndl;
+
+	if (xlog_state_release_iclog(log, iclog)) {
+		xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+		return(EIO);
+	}
+
+	return 0;
+}
+
+/*
+ *  1. Reserve an amount of on-disk log space and return a ticket corresponding
+ *	to the reservation.
+ *  2. Potentially, push buffers at tail of log to disk.
+ *
+ * Each reservation is going to reserve extra space for a log record header.
+ * When writes happen to the on-disk log, we don't subtract the length of the
+ * log record header from any reservation.  By wasting space in each
+ * reservation, we prevent over allocation problems.
+ */
+int
+xfs_log_reserve(xfs_mount_t	 *mp,
+		int		 unit_bytes,
+		int		 cnt,
+		xfs_log_ticket_t *ticket,
+		__uint8_t	 client,
+		uint		 flags)
+{
+	xlog_t		*log = mp->m_log;
+	xlog_ticket_t	*internal_ticket;
+	int		retval;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return 0;
+#endif
+	retval = 0;
+	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
+	ASSERT((flags & XFS_LOG_NOSLEEP) == 0);
+
+	if (XLOG_FORCED_SHUTDOWN(log))
+		return XFS_ERROR(EIO);
+
+	XFS_STATS_INC(xs_try_logspace);
+
+	if (*ticket != NULL) {
+		ASSERT(flags & XFS_LOG_PERM_RESERV);
+		internal_ticket = (xlog_ticket_t *)*ticket;
+		xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+		retval = xlog_regrant_write_log_space(log, internal_ticket);
+	} else {
+		/* may sleep if need to allocate more tickets */
+		internal_ticket = xlog_ticket_get(log, unit_bytes, cnt,
+						  client, flags);
+		*ticket = internal_ticket;
+		xlog_grant_push_ail(mp,
+				    (internal_ticket->t_unit_res *
+				     internal_ticket->t_cnt));
+		retval = xlog_grant_log_space(log, internal_ticket);
+	}
+
+	return retval;
+}	/* xfs_log_reserve */
+
+
+/*
+ * Mount a log filesystem
+ *
+ * mp		- ubiquitous xfs mount point structure
+ * log_target	- buftarg of on-disk log device
+ * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
+ * num_bblocks	- Number of BBSIZE blocks in on-disk log
+ *
+ * Return error or zero.
+ */
+int
+xfs_log_mount(xfs_mount_t	*mp,
+	      xfs_buftarg_t	*log_target,
+	      xfs_daddr_t	blk_offset,
+	      int		num_bblks)
+{
+	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
+		cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
+	else {
+		cmn_err(CE_NOTE,
+			"!Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
+			mp->m_fsname);
+		ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY);
+	}
+
+	mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug) {
+		cmn_err(CE_NOTE, "log dev: %s", XFS_BUFTARG_NAME(log_target));
+		return 0;
+	}
+#endif
+	/*
+	 * skip log recovery on a norecovery mount.  pretend it all
+	 * just worked.
+	 */
+	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
+		int	error;
+		vfs_t	*vfsp = XFS_MTOVFS(mp);
+		int	readonly = (vfsp->vfs_flag & VFS_RDONLY);
+
+		if (readonly)
+			vfsp->vfs_flag &= ~VFS_RDONLY;
+
+		error = xlog_recover(mp->m_log, readonly);
+
+		if (readonly)
+			vfsp->vfs_flag |= VFS_RDONLY;
+		if (error) {
+			cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
+			xlog_unalloc_log(mp->m_log);
+			return error;
+		}
+	}
+
+	/* Normal transactions can now occur */
+	mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
+
+	/* End mounting message in xfs_log_mount_finish */
+	return 0;
+}	/* xfs_log_mount */
+
+/*
+ * Finish the recovery of the file system.  This is separate from
+ * the xfs_log_mount() call, because it depends on the code in
+ * xfs_mountfs() to read in the root and real-time bitmap inodes
+ * between calling xfs_log_mount() and here.
+ *
+ * mp		- ubiquitous xfs mount point structure
+ */
+int
+xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags)
+{
+	int	error;
+
+	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
+		error = xlog_recover_finish(mp->m_log, mfsi_flags);
+	else {
+		error = 0;
+		ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY);
+	}
+
+	return error;
+}
+
+/*
+ * Unmount processing for the log.
+ */
+int
+xfs_log_unmount(xfs_mount_t *mp)
+{
+	int		error;
+
+	error = xfs_log_unmount_write(mp);
+	xfs_log_unmount_dealloc(mp);
+	return (error);
+}
+
+/*
+ * Final log writes as part of unmount.
+ *
+ * Mark the filesystem clean as unmount happens.  Note that during relocation
+ * this routine needs to be executed as part of source-bag while the
+ * deallocation must not be done until source-end.
+ */
+
+/*
+ * Unmount record used to have a string "Unmount filesystem--" in the
+ * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
+ * We just write the magic number now since that particular field isn't
+ * currently architecture converted and "nUmount" is a bit foo.
+ * As far as I know, there weren't any dependencies on the old behaviour.
+ */
+
+int
+xfs_log_unmount_write(xfs_mount_t *mp)
+{
+	xlog_t		 *log = mp->m_log;
+	xlog_in_core_t	 *iclog;
+#ifdef DEBUG
+	xlog_in_core_t	 *first_iclog;
+#endif
+	xfs_log_iovec_t  reg[1];
+	xfs_log_ticket_t tic = NULL;
+	xfs_lsn_t	 lsn;
+	int		 error;
+	SPLDECL(s);
+
+	/* the data section must be 32 bit size aligned */
+	struct {
+	    __uint16_t magic;
+	    __uint16_t pad1;
+	    __uint32_t pad2; /* may as well make it 64 bits */
+	} magic = { XLOG_UNMOUNT_TYPE, 0, 0 };
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return 0;
+#endif
+
+	/*
+	 * Don't write out unmount record on read-only mounts.
+	 * Or, if we are doing a forced umount (typically because of IO errors).
+	 */
+	if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
+		return 0;
+
+	xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+
+#ifdef DEBUG
+	first_iclog = iclog = log->l_iclog;
+	do {
+		if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
+			ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
+			ASSERT(iclog->ic_offset == 0);
+		}
+		iclog = iclog->ic_next;
+	} while (iclog != first_iclog);
+#endif
+	if (! (XLOG_FORCED_SHUTDOWN(log))) {
+		reg[0].i_addr = (void*)&magic;
+		reg[0].i_len  = sizeof(magic);
+
+		error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
+		if (!error) {
+			/* remove inited flag */
+			((xlog_ticket_t *)tic)->t_flags = 0;
+			error = xlog_write(mp, reg, 1, tic, &lsn,
+					   NULL, XLOG_UNMOUNT_TRANS);
+			/*
+			 * At this point, we're umounting anyway,
+			 * so there's no point in transitioning log state
+			 * to IOERROR. Just continue...
+			 */
+		}
+
+		if (error) {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"xfs_log_unmount: unmount record failed");
+		}
+
+
+		s = LOG_LOCK(log);
+		iclog = log->l_iclog;
+		iclog->ic_refcnt++;
+		LOG_UNLOCK(log, s);
+		xlog_state_want_sync(log, iclog);
+		(void) xlog_state_release_iclog(log, iclog);
+
+		s = LOG_LOCK(log);
+		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
+		      iclog->ic_state == XLOG_STATE_DIRTY)) {
+			if (!XLOG_FORCED_SHUTDOWN(log)) {
+				sv_wait(&iclog->ic_forcesema, PMEM,
+					&log->l_icloglock, s);
+			} else {
+				LOG_UNLOCK(log, s);
+			}
+		} else {
+			LOG_UNLOCK(log, s);
+		}
+		if (tic)
+			xlog_state_put_ticket(log, tic);
+	} else {
+		/*
+		 * We're already in forced_shutdown mode, couldn't
+		 * even attempt to write out the unmount transaction.
+		 *
+		 * Go through the motions of sync'ing and releasing
+		 * the iclog, even though no I/O will actually happen,
+		 * we need to wait for other log I/O's that may already
+		 * be in progress.  Do this as a separate section of
+		 * code so we'll know if we ever get stuck here that
+		 * we're in this odd situation of trying to unmount
+		 * a file system that went into forced_shutdown as
+		 * the result of an unmount..
+		 */
+		s = LOG_LOCK(log);
+		iclog = log->l_iclog;
+		iclog->ic_refcnt++;
+		LOG_UNLOCK(log, s);
+
+		xlog_state_want_sync(log, iclog);
+		(void) xlog_state_release_iclog(log, iclog);
+
+		s = LOG_LOCK(log);
+
+		if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
+			|| iclog->ic_state == XLOG_STATE_DIRTY
+			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
+
+				sv_wait(&iclog->ic_forcesema, PMEM,
+					&log->l_icloglock, s);
+		} else {
+			LOG_UNLOCK(log, s);
+		}
+	}
+
+	return 0;
+}	/* xfs_log_unmount_write */
+
+/*
+ * Deallocate log structures for unmount/relocation.
+ */
+void
+xfs_log_unmount_dealloc(xfs_mount_t *mp)
+{
+	xlog_unalloc_log(mp->m_log);
+}
+
+/*
+ * Write region vectors to log.  The write happens using the space reservation
+ * of the ticket (tic).  It is not a requirement that all writes for a given
+ * transaction occur with one call to xfs_log_write().
+ */
+int
+xfs_log_write(xfs_mount_t *	mp,
+	      xfs_log_iovec_t	reg[],
+	      int		nentries,
+	      xfs_log_ticket_t	tic,
+	      xfs_lsn_t		*start_lsn)
+{
+	int	error;
+	xlog_t *log = mp->m_log;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ) {
+		*start_lsn = 0;
+		return 0;
+	}
+#endif
+	if (XLOG_FORCED_SHUTDOWN(log))
+		return XFS_ERROR(EIO);
+
+	if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) {
+		xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+	}
+	return (error);
+}	/* xfs_log_write */
+
+
+void
+xfs_log_move_tail(xfs_mount_t	*mp,
+		  xfs_lsn_t	tail_lsn)
+{
+	xlog_ticket_t	*tic;
+	xlog_t		*log = mp->m_log;
+	int		need_bytes, free_bytes, cycle, bytes;
+	SPLDECL(s);
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	if (!xlog_debug && xlog_target == log->l_targ)
+		return;
+#endif
+	/* XXXsup tmp */
+	if (XLOG_FORCED_SHUTDOWN(log))
+		return;
+	ASSERT(!XFS_FORCED_SHUTDOWN(mp));
+
+	if (tail_lsn == 0) {
+		/* needed since sync_lsn is 64 bits */
+		s = LOG_LOCK(log);
+		tail_lsn = log->l_last_sync_lsn;
+		LOG_UNLOCK(log, s);
+	}
+
+	s = GRANT_LOCK(log);
+
+	/* Also an invalid lsn.  1 implies that we aren't passing in a valid
+	 * tail_lsn.
+	 */
+	if (tail_lsn != 1) {
+		log->l_tail_lsn = tail_lsn;
+	}
+
+	if ((tic = log->l_write_headq)) {
+#ifdef DEBUG
+		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
+			panic("Recovery problem");
+#endif
+		cycle = log->l_grant_write_cycle;
+		bytes = log->l_grant_write_bytes;
+		free_bytes = xlog_space_left(log, cycle, bytes);
+		do {
+			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+			if (free_bytes < tic->t_unit_res && tail_lsn != 1)
+				break;
+			tail_lsn = 0;
+			free_bytes -= tic->t_unit_res;
+			sv_signal(&tic->t_sema);
+			tic = tic->t_next;
+		} while (tic != log->l_write_headq);
+	}
+	if ((tic = log->l_reserve_headq)) {
+#ifdef DEBUG
+		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
+			panic("Recovery problem");
+#endif
+		cycle = log->l_grant_reserve_cycle;
+		bytes = log->l_grant_reserve_bytes;
+		free_bytes = xlog_space_left(log, cycle, bytes);
+		do {
+			if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+				need_bytes = tic->t_unit_res*tic->t_cnt;
+			else
+				need_bytes = tic->t_unit_res;
+			if (free_bytes < need_bytes && tail_lsn != 1)
+				break;
+			tail_lsn = 0;
+			free_bytes -= need_bytes;
+			sv_signal(&tic->t_sema);
+			tic = tic->t_next;
+		} while (tic != log->l_reserve_headq);
+	}
+	GRANT_UNLOCK(log, s);
+}	/* xfs_log_move_tail */
+
+/*
+ * Determine if we have a transaction that has gone to disk
+ * that needs to be covered. Log activity needs to be idle (no AIL and
+ * nothing in the iclogs). And, we need to be in the right state indicating
+ * something has gone out.
+ */
+int
+xfs_log_need_covered(xfs_mount_t *mp)
+{
+	SPLDECL(s);
+	int		needed = 0, gen;
+	xlog_t		*log = mp->m_log;
+	vfs_t		*vfsp = XFS_MTOVFS(mp);
+
+	if (fs_frozen(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
+	    (vfsp->vfs_flag & VFS_RDONLY))
+		return 0;
+
+	s = LOG_LOCK(log);
+	if (((log->l_covered_state == XLOG_STATE_COVER_NEED) ||
+		(log->l_covered_state == XLOG_STATE_COVER_NEED2))
+			&& !xfs_trans_first_ail(mp, &gen)
+			&& xlog_iclogs_empty(log)) {
+		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+			log->l_covered_state = XLOG_STATE_COVER_DONE;
+		else {
+			ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2);
+			log->l_covered_state = XLOG_STATE_COVER_DONE2;
+		}
+		needed = 1;
+	}
+	LOG_UNLOCK(log, s);
+	return(needed);
+}
+
+/******************************************************************************
+ *
+ *	local routines
+ *
+ ******************************************************************************
+ */
+
+/* xfs_trans_tail_ail returns 0 when there is nothing in the list.
+ * The log manager must keep track of the last LR which was committed
+ * to disk.  The lsn of this LR will become the new tail_lsn whenever
+ * xfs_trans_tail_ail returns 0.  If we don't do this, we run into
+ * the situation where stuff could be written into the log but nothing
+ * was ever in the AIL when asked.  Eventually, we panic since the
+ * tail hits the head.
+ *
+ * We may be holding the log iclog lock upon entering this routine.
+ */
+xfs_lsn_t
+xlog_assign_tail_lsn(xfs_mount_t *mp)
+{
+	xfs_lsn_t tail_lsn;
+	SPLDECL(s);
+	xlog_t	  *log = mp->m_log;
+
+	tail_lsn = xfs_trans_tail_ail(mp);
+	s = GRANT_LOCK(log);
+	if (tail_lsn != 0) {
+		log->l_tail_lsn = tail_lsn;
+	} else {
+		tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
+	}
+	GRANT_UNLOCK(log, s);
+
+	return tail_lsn;
+}	/* xlog_assign_tail_lsn */
+
+
+/*
+ * Return the space in the log between the tail and the head.  The head
+ * is passed in the cycle/bytes formal parms.  In the special case where
+ * the reserve head has wrapped passed the tail, this calculation is no
+ * longer valid.  In this case, just return 0 which means there is no space
+ * in the log.  This works for all places where this function is called
+ * with the reserve head.  Of course, if the write head were to ever
+ * wrap the tail, we should blow up.  Rather than catch this case here,
+ * we depend on other ASSERTions in other parts of the code.   XXXmiken
+ *
+ * This code also handles the case where the reservation head is behind
+ * the tail.  The details of this case are described below, but the end
+ * result is that we return the size of the log as the amount of space left.
+ */
+int
+xlog_space_left(xlog_t *log, int cycle, int bytes)
+{
+	int free_bytes;
+	int tail_bytes;
+	int tail_cycle;
+
+	tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
+	tail_cycle = CYCLE_LSN(log->l_tail_lsn);
+	if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
+		free_bytes = log->l_logsize - (bytes - tail_bytes);
+	} else if ((tail_cycle + 1) < cycle) {
+		return 0;
+	} else if (tail_cycle < cycle) {
+		ASSERT(tail_cycle == (cycle - 1));
+		free_bytes = tail_bytes - bytes;
+	} else {
+		/*
+		 * The reservation head is behind the tail.
+		 * In this case we just want to return the size of the
+		 * log as the amount of space left.
+		 */
+		xfs_fs_cmn_err(CE_ALERT, log->l_mp,
+			"xlog_space_left: head behind tail\n"
+			"  tail_cycle = %d, tail_bytes = %d\n"
+			"  GH   cycle = %d, GH   bytes = %d",
+			tail_cycle, tail_bytes, cycle, bytes);
+		ASSERT(0);
+		free_bytes = log->l_logsize;
+	}
+	return free_bytes;
+}	/* xlog_space_left */
+
+
+/*
+ * Log function which is called when an io completes.
+ *
+ * The log manager needs its own routine, in order to control what
+ * happens with the buffer after the write completes.
+ */
+void
+xlog_iodone(xfs_buf_t *bp)
+{
+	xlog_in_core_t	*iclog;
+	xlog_t		*l;
+	int		aborted;
+
+	iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2);
+	XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
+	aborted = 0;
+
+	/*
+	 * Some versions of cpp barf on the recursive definition of
+	 * ic_log -> hic_fields.ic_log and expand ic_log twice when
+	 * it is passed through two macros.  Workaround broken cpp.
+	 */
+	l = iclog->ic_log;
+
+	/*
+	 * Race to shutdown the filesystem if we see an error.
+	 */
+	if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp,
+			XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
+		xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
+		XFS_BUF_STALE(bp);
+		xfs_force_shutdown(l->l_mp, XFS_LOG_IO_ERROR);
+		/*
+		 * This flag will be propagated to the trans-committed
+		 * callback routines to let them know that the log-commit
+		 * didn't succeed.
+		 */
+		aborted = XFS_LI_ABORTED;
+	} else if (iclog->ic_state & XLOG_STATE_IOERROR) {
+		aborted = XFS_LI_ABORTED;
+	}
+	xlog_state_done_syncing(iclog, aborted);
+	if (!(XFS_BUF_ISASYNC(bp))) {
+		/*
+		 * Corresponding psema() will be done in bwrite().  If we don't
+		 * vsema() here, panic.
+		 */
+		XFS_BUF_V_IODONESEMA(bp);
+	}
+}	/* xlog_iodone */
+
+/*
+ * The bdstrat callback function for log bufs. This gives us a central
+ * place to trap bufs in case we get hit by a log I/O error and need to
+ * shutdown. Actually, in practice, even when we didn't get a log error,
+ * we transition the iclogs to IOERROR state *after* flushing all existing
+ * iclogs to disk. This is because we don't want anymore new transactions to be
+ * started or completed afterwards.
+ */
+STATIC int
+xlog_bdstrat_cb(struct xfs_buf *bp)
+{
+	xlog_in_core_t *iclog;
+
+	iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
+
+	if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
+	  /* note for irix bstrat will need  struct bdevsw passed
+	   * Fix the following macro if the code ever is merged
+	   */
+	    XFS_bdstrat(bp);
+		return 0;
+	}
+
+	xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
+	XFS_BUF_ERROR(bp, EIO);
+	XFS_BUF_STALE(bp);
+	xfs_biodone(bp);
+	return (XFS_ERROR(EIO));
+
+
+}
+
+/*
+ * Return size of each in-core log record buffer.
+ *
+ * Low memory machines only get 2 16KB buffers.  We don't want to waste
+ * memory here.  However, all other machines get at least 2 32KB buffers.
+ * The number is hard coded because we don't care about the minimum
+ * memory size, just 32MB systems.
+ *
+ * If the filesystem blocksize is too large, we may need to choose a
+ * larger size since the directory code currently logs entire blocks.
+ */
+
+STATIC void
+xlog_get_iclog_buffer_size(xfs_mount_t	*mp,
+			   xlog_t	*log)
+{
+	int size;
+	int xhdrs;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	/*
+	 * When logbufs == 0, someone has disabled the log from the FSTAB
+	 * file.  This is not a documented feature.  We need to set xlog_debug
+	 * to zero (this deactivates the log) and set xlog_target to the
+	 * appropriate device.  Only one filesystem may be affected as such
+	 * since this is just a performance hack to test what we might be able
+	 * to get if the log were not present.
+	 */
+	if (mp->m_logbufs == 0) {
+		xlog_debug = 0;
+		xlog_target = log->l_targ;
+		log->l_iclog_bufs = XLOG_MIN_ICLOGS;
+	} else
+#endif
+	{
+		/*
+		 * This is the normal path.  If m_logbufs == -1, then the
+		 * admin has chosen to use the system defaults for logbuffers.
+		 */
+		if (mp->m_logbufs == -1) { 
+			if (xfs_physmem <= btoc(128*1024*1024)) { 
+				log->l_iclog_bufs = XLOG_MIN_ICLOGS; 
+			} else if (xfs_physmem <= btoc(400*1024*1024)) { 
+				log->l_iclog_bufs = XLOG_MED_ICLOGS; 
+			} else {
+				/* 256K with 32K bufs */
+				log->l_iclog_bufs = XLOG_MAX_ICLOGS;
+			}
+		} else
+			log->l_iclog_bufs = mp->m_logbufs;
+
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+		/* We are reactivating a filesystem after it was inactive */
+		if (log->l_targ == xlog_target) {
+			xlog_target = NULL;
+			xlog_debug = 1;
+		}
+#endif
+	}
+
+	/*
+	 * Buffer size passed in from mount system call.
+	 */
+	if (mp->m_logbsize != -1) {
+		size = log->l_iclog_size = mp->m_logbsize;
+		log->l_iclog_size_log = 0;
+		while (size != 1) {
+			log->l_iclog_size_log++;
+			size >>= 1;
+		}
+
+		if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
+			/* # headers = size / 32K
+			 * one header holds cycles from 32K of data
+			 */
+
+			xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
+			if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
+				xhdrs++;
+			log->l_iclog_hsize = xhdrs << BBSHIFT;
+			log->l_iclog_heads = xhdrs;
+		} else {
+			ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
+			log->l_iclog_hsize = BBSIZE;
+			log->l_iclog_heads = 1;
+		}
+		return;
+	}
+
+	/*
+	 * Special case machines that have less than 32MB of memory.
+	 * All machines with more memory use 32KB buffers.
+	 */
+	if (xfs_physmem <= btoc(32*1024*1024)) {
+		/* Don't change; min configuration */
+		log->l_iclog_size = XLOG_RECORD_BSIZE;		/* 16k */
+		log->l_iclog_size_log = XLOG_RECORD_BSHIFT;
+	} else {
+		log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;	/* 32k */
+		log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
+	}
+
+	/* the default log size is 16k or 32k which is one header sector */
+	log->l_iclog_hsize = BBSIZE;
+	log->l_iclog_heads = 1;
+
+	/*
+	 * For 16KB, we use 3 32KB buffers.  For 32KB block sizes, we use
+	 * 4 32KB buffers.  For 64KB block sizes, we use 8 32KB buffers.
+	 */
+	if (mp->m_sb.sb_blocksize >= 16*1024) {
+		log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
+		log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
+		if (mp->m_logbufs == -1) {
+			switch (mp->m_sb.sb_blocksize) {
+			    case 16*1024:			/* 16 KB */
+				log->l_iclog_bufs = 3;
+				break;
+			    case 32*1024:			/* 32 KB */
+				log->l_iclog_bufs = 4;
+				break;
+			    case 64*1024:			/* 64 KB */
+				log->l_iclog_bufs = 8;
+				break;
+			    default:
+				xlog_panic("XFS: Invalid blocksize");
+				break;
+			}
+		}
+	}
+}	/* xlog_get_iclog_buffer_size */
+
+
+/*
+ * This routine initializes some of the log structure for a given mount point.
+ * Its primary purpose is to fill in enough, so recovery can occur.  However,
+ * some other stuff may be filled in too.
+ */
+STATIC xlog_t *
+xlog_alloc_log(xfs_mount_t	*mp,
+	       xfs_buftarg_t	*log_target,
+	       xfs_daddr_t	blk_offset,
+	       int		num_bblks)
+{
+	xlog_t			*log;
+	xlog_rec_header_t	*head;
+	xlog_in_core_t		**iclogp;
+	xlog_in_core_t		*iclog, *prev_iclog=NULL;
+	xfs_buf_t		*bp;
+	int			i;
+	int			iclogsize;
+
+	log = (xlog_t *)kmem_zalloc(sizeof(xlog_t), KM_SLEEP);
+
+	log->l_mp	   = mp;
+	log->l_targ	   = log_target;
+	log->l_logsize     = BBTOB(num_bblks);
+	log->l_logBBstart  = blk_offset;
+	log->l_logBBsize   = num_bblks;
+	log->l_covered_state = XLOG_STATE_COVER_IDLE;
+	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
+
+	log->l_prev_block  = -1;
+	ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, 1, 0);
+	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
+	log->l_last_sync_lsn = log->l_tail_lsn;
+	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
+	log->l_grant_reserve_cycle = 1;
+	log->l_grant_write_cycle = 1;
+
+	if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) {
+		log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
+		ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
+		/* for larger sector sizes, must have v2 or external log */
+		ASSERT(log->l_sectbb_log == 0 ||
+			log->l_logBBstart == 0 ||
+			XFS_SB_VERSION_HASLOGV2(&mp->m_sb));
+		ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
+	}
+	log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
+
+	xlog_get_iclog_buffer_size(mp, log);
+
+	bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
+	XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
+	XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
+	XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+	log->l_xbuf = bp;
+
+	spinlock_init(&log->l_icloglock, "iclog");
+	spinlock_init(&log->l_grant_lock, "grhead_iclog");
+	initnsema(&log->l_flushsema, 0, "ic-flush");
+	xlog_state_ticket_alloc(log);  /* wait until after icloglock inited */
+
+	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
+	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
+
+	iclogp = &log->l_iclog;
+	/*
+	 * The amount of memory to allocate for the iclog structure is
+	 * rather funky due to the way the structure is defined.  It is
+	 * done this way so that we can use different sizes for machines
+	 * with different amounts of memory.  See the definition of
+	 * xlog_in_core_t in xfs_log_priv.h for details.
+	 */
+	iclogsize = log->l_iclog_size;
+	ASSERT(log->l_iclog_size >= 4096);
+	for (i=0; i < log->l_iclog_bufs; i++) {
+		*iclogp = (xlog_in_core_t *)
+			  kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
+		iclog = *iclogp;
+		iclog->hic_data = (xlog_in_core_2_t *)
+			  kmem_zalloc(iclogsize, KM_SLEEP);
+
+		iclog->ic_prev = prev_iclog;
+		prev_iclog = iclog;
+		log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
+
+		head = &iclog->ic_header;
+		memset(head, 0, sizeof(xlog_rec_header_t));
+		INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
+		INT_SET(head->h_version, ARCH_CONVERT,
+			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
+		INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size);
+		/* new fields */
+		INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT);
+		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
+
+		bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
+		XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
+		XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
+		XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
+		iclog->ic_bp = bp;
+
+		iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
+		iclog->ic_state = XLOG_STATE_ACTIVE;
+		iclog->ic_log = log;
+		iclog->ic_callback_tail = &(iclog->ic_callback);
+		iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize;
+
+		ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
+		ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
+		sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force");
+		sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write");
+
+		iclogp = &iclog->ic_next;
+	}
+	*iclogp = log->l_iclog;			/* complete ring */
+	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
+
+	return log;
+}	/* xlog_alloc_log */
+
+
+/*
+ * Write out the commit record of a transaction associated with the given
+ * ticket.  Return the lsn of the commit record.
+ */
+STATIC int
+xlog_commit_record(xfs_mount_t  *mp,
+		   xlog_ticket_t *ticket,
+		   xlog_in_core_t **iclog,
+		   xfs_lsn_t	*commitlsnp)
+{
+	int		error;
+	xfs_log_iovec_t	reg[1];
+
+	reg[0].i_addr = NULL;
+	reg[0].i_len = 0;
+
+	ASSERT_ALWAYS(iclog);
+	if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
+			       iclog, XLOG_COMMIT_TRANS))) {
+		xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+	}
+	return (error);
+}	/* xlog_commit_record */
+
+
+/*
+ * Push on the buffer cache code if we ever use more than 75% of the on-disk
+ * log space.  This code pushes on the lsn which would supposedly free up
+ * the 25% which we want to leave free.  We may need to adopt a policy which
+ * pushes on an lsn which is further along in the log once we reach the high
+ * water mark.  In this manner, we would be creating a low water mark.
+ */
+void
+xlog_grant_push_ail(xfs_mount_t	*mp,
+		    int		need_bytes)
+{
+    xlog_t	*log = mp->m_log;	/* pointer to the log */
+    xfs_lsn_t	tail_lsn;		/* lsn of the log tail */
+    xfs_lsn_t	threshold_lsn = 0;	/* lsn we'd like to be at */
+    int		free_blocks;		/* free blocks left to write to */
+    int		free_bytes;		/* free bytes left to write to */
+    int		threshold_block;	/* block in lsn we'd like to be at */
+    int		threshold_cycle;	/* lsn cycle we'd like to be at */
+    int		free_threshold;
+    SPLDECL(s);
+
+    ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+
+    s = GRANT_LOCK(log);
+    free_bytes = xlog_space_left(log,
+				 log->l_grant_reserve_cycle,
+				 log->l_grant_reserve_bytes);
+    tail_lsn = log->l_tail_lsn;
+    free_blocks = BTOBBT(free_bytes);
+
+    /*
+     * Set the threshold for the minimum number of free blocks in the
+     * log to the maximum of what the caller needs, one quarter of the
+     * log, and 256 blocks.
+     */
+    free_threshold = BTOBB(need_bytes);
+    free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+    free_threshold = MAX(free_threshold, 256);
+    if (free_blocks < free_threshold) {
+	threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
+	threshold_cycle = CYCLE_LSN(tail_lsn);
+	if (threshold_block >= log->l_logBBsize) {
+	    threshold_block -= log->l_logBBsize;
+	    threshold_cycle += 1;
+	}
+	ASSIGN_ANY_LSN_HOST(threshold_lsn, threshold_cycle,
+		       threshold_block);
+
+	/* Don't pass in an lsn greater than the lsn of the last
+	 * log record known to be on disk.
+	 */
+	if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
+	    threshold_lsn = log->l_last_sync_lsn;
+    }
+    GRANT_UNLOCK(log, s);
+
+    /*
+     * Get the transaction layer to kick the dirty buffers out to
+     * disk asynchronously. No point in trying to do this if
+     * the filesystem is shutting down.
+     */
+    if (threshold_lsn &&
+	!XLOG_FORCED_SHUTDOWN(log))
+	    xfs_trans_push_ail(mp, threshold_lsn);
+}	/* xlog_grant_push_ail */
+
+
+/*
+ * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
+ * fashion.  Previously, we should have moved the current iclog
+ * ptr in the log to point to the next available iclog.  This allows further
+ * write to continue while this code syncs out an iclog ready to go.
+ * Before an in-core log can be written out, the data section must be scanned
+ * to save away the 1st word of each BBSIZE block into the header.  We replace
+ * it with the current cycle count.  Each BBSIZE block is tagged with the
+ * cycle count because there in an implicit assumption that drives will
+ * guarantee that entire 512 byte blocks get written at once.  In other words,
+ * we can't have part of a 512 byte block written and part not written.  By
+ * tagging each block, we will know which blocks are valid when recovering
+ * after an unclean shutdown.
+ *
+ * This routine is single threaded on the iclog.  No other thread can be in
+ * this routine with the same iclog.  Changing contents of iclog can there-
+ * fore be done without grabbing the state machine lock.  Updating the global
+ * log will require grabbing the lock though.
+ *
+ * The entire log manager uses a logical block numbering scheme.  Only
+ * log_sync (and then only bwrite()) know about the fact that the log may
+ * not start with block zero on a given device.  The log block start offset
+ * is added immediately before calling bwrite().
+ */
+
+int
+xlog_sync(xlog_t		*log,
+	  xlog_in_core_t	*iclog)
+{
+	xfs_caddr_t	dptr;		/* pointer to byte sized element */
+	xfs_buf_t	*bp;
+	int		i, ops;
+	uint		count;		/* byte count of bwrite */
+	uint		count_init;	/* initial count before roundup */
+	int		roundoff;       /* roundoff to BB or stripe */
+	int		split = 0;	/* split write into two regions */
+	int		error;
+	SPLDECL(s);
+	int		v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb);
+
+	XFS_STATS_INC(xs_log_writes);
+	ASSERT(iclog->ic_refcnt == 0);
+
+	/* Add for LR header */
+	count_init = log->l_iclog_hsize + iclog->ic_offset;
+
+	/* Round out the log write size */
+	if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
+		/* we have a v2 stripe unit to use */
+		count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
+	} else {
+		count = BBTOB(BTOBB(count_init));
+	}
+	roundoff = count - count_init;
+	ASSERT(roundoff >= 0);
+	ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
+                roundoff < log->l_mp->m_sb.sb_logsunit)
+		|| 
+		(log->l_mp->m_sb.sb_logsunit <= 1 && 
+		 roundoff < BBTOB(1)));
+
+	/* move grant heads by roundoff in sync */
+	s = GRANT_LOCK(log);
+	XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
+	XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
+	GRANT_UNLOCK(log, s);
+
+	/* put cycle number in every block */
+	xlog_pack_data(log, iclog, roundoff); 
+
+	/* real byte length */
+	if (v2) {
+		INT_SET(iclog->ic_header.h_len, 
+			ARCH_CONVERT,
+			iclog->ic_offset + roundoff);
+	} else {
+		INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
+	}
+
+	/* put ops count in correct order */
+	ops = iclog->ic_header.h_num_logops;
+	INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
+
+	bp	    = iclog->ic_bp;
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
+	XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
+	XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
+
+	XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
+
+	/* Do we need to split this write into 2 parts? */
+	if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
+		split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
+		count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
+		iclog->ic_bwritecnt = 2;	/* split into 2 writes */
+	} else {
+		iclog->ic_bwritecnt = 1;
+	}
+	XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count);
+	XFS_BUF_SET_FSPRIVATE(bp, iclog);	/* save for later */
+	XFS_BUF_BUSY(bp);
+	XFS_BUF_ASYNC(bp);
+	/*
+	 * Do a disk write cache flush for the log block.
+	 * This is a bit of a sledgehammer, it would be better
+	 * to use a tag barrier here that just prevents reordering.
+	 * It may not be needed to flush the first split block in the log wrap
+	 * case, but do it anyways to be safe -AK
+	 */
+	if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH))
+		XFS_BUF_FLUSH(bp);
+
+	ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
+	ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
+
+	xlog_verify_iclog(log, iclog, count, B_TRUE);
+
+	/* account for log which doesn't start at block #0 */
+	XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
+	/*
+	 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
+	 * is shutting down.
+	 */
+	XFS_BUF_WRITE(bp);
+
+	if ((error = XFS_bwrite(bp))) {
+		xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
+				  XFS_BUF_ADDR(bp));
+		return (error);
+	}
+	if (split) {
+		bp		= iclog->ic_log->l_xbuf;
+		ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
+							(unsigned long)1);
+		XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
+		XFS_BUF_SET_ADDR(bp, 0);	     /* logical 0 */
+		XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
+					    (__psint_t)count), split);
+		XFS_BUF_SET_FSPRIVATE(bp, iclog);
+		XFS_BUF_BUSY(bp);
+		XFS_BUF_ASYNC(bp);
+		if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH))
+			XFS_BUF_FLUSH(bp);
+		dptr = XFS_BUF_PTR(bp);
+		/*
+		 * Bump the cycle numbers at the start of each block
+		 * since this part of the buffer is at the start of
+		 * a new cycle.  Watch out for the header magic number
+		 * case, though.
+		 */
+		for (i=0; i<split; i += BBSIZE) {
+			INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
+			if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
+				INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
+			dptr += BBSIZE;
+		}
+
+		ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
+		ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
+
+		/* account for internal log which does't start at block #0 */
+		XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
+		XFS_BUF_WRITE(bp);
+		if ((error = XFS_bwrite(bp))) {
+			xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
+					  bp, XFS_BUF_ADDR(bp));
+			return (error);
+		}
+	}
+	return (0);
+}	/* xlog_sync */
+
+
+/*
+ * Unallocate a log structure
+ */
+void
+xlog_unalloc_log(xlog_t *log)
+{
+	xlog_in_core_t	*iclog, *next_iclog;
+	xlog_ticket_t	*tic, *next_tic;
+	int		i;
+
+
+	iclog = log->l_iclog;
+	for (i=0; i<log->l_iclog_bufs; i++) {
+		sv_destroy(&iclog->ic_forcesema);
+		sv_destroy(&iclog->ic_writesema);
+		xfs_buf_free(iclog->ic_bp);
+#ifdef XFS_LOG_TRACE
+		if (iclog->ic_trace != NULL) {
+			ktrace_free(iclog->ic_trace);
+		}
+#endif
+		next_iclog = iclog->ic_next;
+		kmem_free(iclog->hic_data, log->l_iclog_size);
+		kmem_free(iclog, sizeof(xlog_in_core_t));
+		iclog = next_iclog;
+	}
+	freesema(&log->l_flushsema);
+	spinlock_destroy(&log->l_icloglock);
+	spinlock_destroy(&log->l_grant_lock);
+
+	/* XXXsup take a look at this again. */
+	if ((log->l_ticket_cnt != log->l_ticket_tcnt)  &&
+	    !XLOG_FORCED_SHUTDOWN(log)) {
+		xfs_fs_cmn_err(CE_WARN, log->l_mp,
+			"xlog_unalloc_log: (cnt: %d, total: %d)",
+			log->l_ticket_cnt, log->l_ticket_tcnt);
+		/* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
+
+	} else {
+		tic = log->l_unmount_free;
+		while (tic) {
+			next_tic = tic->t_next;
+			kmem_free(tic, NBPP);
+			tic = next_tic;
+		}
+	}
+	xfs_buf_free(log->l_xbuf);
+#ifdef XFS_LOG_TRACE
+	if (log->l_trace != NULL) {
+		ktrace_free(log->l_trace);
+	}
+	if (log->l_grant_trace != NULL) {
+		ktrace_free(log->l_grant_trace);
+	}
+#endif
+	log->l_mp->m_log = NULL;
+	kmem_free(log, sizeof(xlog_t));
+}	/* xlog_unalloc_log */
+
+/*
+ * Update counters atomically now that memcpy is done.
+ */
+/* ARGSUSED */
+static inline void
+xlog_state_finish_copy(xlog_t		*log,
+		       xlog_in_core_t	*iclog,
+		       int		record_cnt,
+		       int		copy_bytes)
+{
+	SPLDECL(s);
+
+	s = LOG_LOCK(log);
+
+	iclog->ic_header.h_num_logops += record_cnt;
+	iclog->ic_offset += copy_bytes;
+
+	LOG_UNLOCK(log, s);
+}	/* xlog_state_finish_copy */
+
+
+
+
+/*
+ * Write some region out to in-core log
+ *
+ * This will be called when writing externally provided regions or when
+ * writing out a commit record for a given transaction.
+ *
+ * General algorithm:
+ *	1. Find total length of this write.  This may include adding to the
+ *		lengths passed in.
+ *	2. Check whether we violate the tickets reservation.
+ *	3. While writing to this iclog
+ *	    A. Reserve as much space in this iclog as can get
+ *	    B. If this is first write, save away start lsn
+ *	    C. While writing this region:
+ *		1. If first write of transaction, write start record
+ *		2. Write log operation header (header per region)
+ *		3. Find out if we can fit entire region into this iclog
+ *		4. Potentially, verify destination memcpy ptr
+ *		5. Memcpy (partial) region
+ *		6. If partial copy, release iclog; otherwise, continue
+ *			copying more regions into current iclog
+ *	4. Mark want sync bit (in simulation mode)
+ *	5. Release iclog for potential flush to on-disk log.
+ *
+ * ERRORS:
+ * 1.	Panic if reservation is overrun.  This should never happen since
+ *	reservation amounts are generated internal to the filesystem.
+ * NOTES:
+ * 1. Tickets are single threaded data structures.
+ * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
+ *	syncing routine.  When a single log_write region needs to span
+ *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
+ *	on all log operation writes which don't contain the end of the
+ *	region.  The XLOG_END_TRANS bit is used for the in-core log
+ *	operation which contains the end of the continued log_write region.
+ * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
+ *	we don't really know exactly how much space will be used.  As a result,
+ *	we don't update ic_offset until the end when we know exactly how many
+ *	bytes have been written out.
+ */
+int
+xlog_write(xfs_mount_t *	mp,
+	   xfs_log_iovec_t	reg[],
+	   int			nentries,
+	   xfs_log_ticket_t	tic,
+	   xfs_lsn_t		*start_lsn,
+	   xlog_in_core_t	**commit_iclog,
+	   uint			flags)
+{
+    xlog_t	     *log    = mp->m_log;
+    xlog_ticket_t    *ticket = (xlog_ticket_t *)tic;
+    xlog_op_header_t *logop_head;    /* ptr to log operation header */
+    xlog_in_core_t   *iclog;	     /* ptr to current in-core log */
+    __psint_t	     ptr;	     /* copy address into data region */
+    int		     len;	     /* # xlog_write() bytes 2 still copy */
+    int		     index;	     /* region index currently copying */
+    int		     log_offset;     /* offset (from 0) into data region */
+    int		     start_rec_copy; /* # bytes to copy for start record */
+    int		     partial_copy;   /* did we split a region? */
+    int		     partial_copy_len;/* # bytes copied if split region */
+    int		     need_copy;	     /* # bytes need to memcpy this region */
+    int		     copy_len;	     /* # bytes actually memcpy'ing */
+    int		     copy_off;	     /* # bytes from entry start */
+    int		     contwr;	     /* continued write of in-core log? */
+    int		     error;
+    int		     record_cnt = 0, data_cnt = 0;
+
+    partial_copy_len = partial_copy = 0;
+
+    /* Calculate potential maximum space.  Each region gets its own
+     * xlog_op_header_t and may need to be double word aligned.
+     */
+    len = 0;
+    if (ticket->t_flags & XLOG_TIC_INITED)     /* acct for start rec of xact */
+	len += sizeof(xlog_op_header_t);
+
+    for (index = 0; index < nentries; index++) {
+	len += sizeof(xlog_op_header_t);	    /* each region gets >= 1 */
+	len += reg[index].i_len;
+    }
+    contwr = *start_lsn = 0;
+
+    if (ticket->t_curr_res < len) {
+#ifdef DEBUG
+	xlog_panic(
+		"xfs_log_write: reservation ran out. Need to up reservation");
+#else
+	/* Customer configurable panic */
+	xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
+		"xfs_log_write: reservation ran out. Need to up reservation");
+	/* If we did not panic, shutdown the filesystem */
+	xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+#endif
+    } else
+	ticket->t_curr_res -= len;
+
+    for (index = 0; index < nentries; ) {
+	if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
+					       &contwr, &log_offset)))
+		return (error);
+
+	ASSERT(log_offset <= iclog->ic_size - 1);
+	ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset);
+
+	/* start_lsn is the first lsn written to. That's all we need. */
+	if (! *start_lsn)
+	    *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
+
+	/* This loop writes out as many regions as can fit in the amount
+	 * of space which was allocated by xlog_state_get_iclog_space().
+	 */
+	while (index < nentries) {
+	    ASSERT(reg[index].i_len % sizeof(__int32_t) == 0);
+	    ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0);
+	    start_rec_copy = 0;
+
+	    /* If first write for transaction, insert start record.
+	     * We can't be trying to commit if we are inited.  We can't
+	     * have any "partial_copy" if we are inited.
+	     */
+	    if (ticket->t_flags & XLOG_TIC_INITED) {
+		logop_head		= (xlog_op_header_t *)ptr;
+		INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid);
+		logop_head->oh_clientid = ticket->t_clientid;
+		logop_head->oh_len	= 0;
+		logop_head->oh_flags    = XLOG_START_TRANS;
+		logop_head->oh_res2	= 0;
+		ticket->t_flags		&= ~XLOG_TIC_INITED;	/* clear bit */
+		record_cnt++;
+
+		start_rec_copy = sizeof(xlog_op_header_t);
+		xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy);
+	    }
+
+	    /* Copy log operation header directly into data section */
+	    logop_head			= (xlog_op_header_t *)ptr;
+	    INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid);
+	    logop_head->oh_clientid	= ticket->t_clientid;
+	    logop_head->oh_res2		= 0;
+
+	    /* header copied directly */
+	    xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t));
+
+	    /* are we copying a commit or unmount record? */
+	    logop_head->oh_flags = flags;
+
+	    /*
+	     * We've seen logs corrupted with bad transaction client
+	     * ids.  This makes sure that XFS doesn't generate them on.
+	     * Turn this into an EIO and shut down the filesystem.
+	     */
+	    switch (logop_head->oh_clientid)  {
+	    case XFS_TRANSACTION:
+	    case XFS_VOLUME:
+	    case XFS_LOG:
+		break;
+	    default:
+		xfs_fs_cmn_err(CE_WARN, mp,
+		    "Bad XFS transaction clientid 0x%x in ticket 0x%p",
+		    logop_head->oh_clientid, tic);
+		return XFS_ERROR(EIO);
+	    }
+
+	    /* Partial write last time? => (partial_copy != 0)
+	     * need_copy is the amount we'd like to copy if everything could
+	     * fit in the current memcpy.
+	     */
+	    need_copy =	reg[index].i_len - partial_copy_len;
+
+	    copy_off = partial_copy_len;
+	    if (need_copy <= iclog->ic_size - log_offset) { /*complete write */
+		INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy);
+		if (partial_copy)
+		    logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
+		partial_copy_len = partial_copy = 0;
+	    } else {					    /* partial write */
+		copy_len = iclog->ic_size - log_offset;
+		INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len);
+		logop_head->oh_flags |= XLOG_CONTINUE_TRANS;
+		if (partial_copy)
+			logop_head->oh_flags |= XLOG_WAS_CONT_TRANS;
+		partial_copy_len += copy_len;
+		partial_copy++;
+		len += sizeof(xlog_op_header_t); /* from splitting of region */
+		/* account for new log op header */
+		ticket->t_curr_res -= sizeof(xlog_op_header_t);
+	    }
+	    xlog_verify_dest_ptr(log, ptr);
+
+	    /* copy region */
+	    ASSERT(copy_len >= 0);
+	    memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len);
+	    xlog_write_adv_cnt(ptr, len, log_offset, copy_len);
+
+	    /* make copy_len total bytes copied, including headers */
+	    copy_len += start_rec_copy + sizeof(xlog_op_header_t);
+	    record_cnt++;
+	    data_cnt += contwr ? copy_len : 0;
+	    if (partial_copy) {			/* copied partial region */
+		    /* already marked WANT_SYNC by xlog_state_get_iclog_space */
+		    xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
+		    record_cnt = data_cnt = 0;
+		    if ((error = xlog_state_release_iclog(log, iclog)))
+			    return (error);
+		    break;			/* don't increment index */
+	    } else {				/* copied entire region */
+		index++;
+		partial_copy_len = partial_copy = 0;
+
+		if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
+		    xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
+		    record_cnt = data_cnt = 0;
+		    xlog_state_want_sync(log, iclog);
+		    if (commit_iclog) {
+			ASSERT(flags & XLOG_COMMIT_TRANS);
+			*commit_iclog = iclog;
+		    } else if ((error = xlog_state_release_iclog(log, iclog)))
+			   return (error);
+		    if (index == nentries)
+			    return 0;		/* we are done */
+		    else
+			    break;
+		}
+	    } /* if (partial_copy) */
+	} /* while (index < nentries) */
+    } /* for (index = 0; index < nentries; ) */
+    ASSERT(len == 0);
+
+    xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
+    if (commit_iclog) {
+	ASSERT(flags & XLOG_COMMIT_TRANS);
+	*commit_iclog = iclog;
+	return 0;
+    }
+    return (xlog_state_release_iclog(log, iclog));
+}	/* xlog_write */
+
+
+/*****************************************************************************
+ *
+ *		State Machine functions
+ *
+ *****************************************************************************
+ */
+
+/* Clean iclogs starting from the head.  This ordering must be
+ * maintained, so an iclog doesn't become ACTIVE beyond one that
+ * is SYNCING.  This is also required to maintain the notion that we use
+ * a counting semaphore to hold off would be writers to the log when every
+ * iclog is trying to sync to disk.
+ *
+ * State Change: DIRTY -> ACTIVE
+ */
+void
+xlog_state_clean_log(xlog_t *log)
+{
+	xlog_in_core_t	*iclog;
+	int changed = 0;
+
+	iclog = log->l_iclog;
+	do {
+		if (iclog->ic_state == XLOG_STATE_DIRTY) {
+			iclog->ic_state	= XLOG_STATE_ACTIVE;
+			iclog->ic_offset       = 0;
+			iclog->ic_callback	= NULL;   /* don't need to free */
+			/*
+			 * If the number of ops in this iclog indicate it just
+			 * contains the dummy transaction, we can
+			 * change state into IDLE (the second time around).
+			 * Otherwise we should change the state into
+			 * NEED a dummy.
+			 * We don't need to cover the dummy.
+			 */
+			if (!changed &&
+			   (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) {
+				changed = 1;
+			} else {
+				/*
+				 * We have two dirty iclogs so start over
+				 * This could also be num of ops indicates
+				 * this is not the dummy going out.
+				 */
+				changed = 2;
+			}
+			iclog->ic_header.h_num_logops = 0;
+			memset(iclog->ic_header.h_cycle_data, 0,
+			      sizeof(iclog->ic_header.h_cycle_data));
+			iclog->ic_header.h_lsn = 0;
+		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
+			/* do nothing */;
+		else
+			break;	/* stop cleaning */
+		iclog = iclog->ic_next;
+	} while (iclog != log->l_iclog);
+
+	/* log is locked when we are called */
+	/*
+	 * Change state for the dummy log recording.
+	 * We usually go to NEED. But we go to NEED2 if the changed indicates
+	 * we are done writing the dummy record.
+	 * If we are done with the second dummy recored (DONE2), then
+	 * we go to IDLE.
+	 */
+	if (changed) {
+		switch (log->l_covered_state) {
+		case XLOG_STATE_COVER_IDLE:
+		case XLOG_STATE_COVER_NEED:
+		case XLOG_STATE_COVER_NEED2:
+			log->l_covered_state = XLOG_STATE_COVER_NEED;
+			break;
+
+		case XLOG_STATE_COVER_DONE:
+			if (changed == 1)
+				log->l_covered_state = XLOG_STATE_COVER_NEED2;
+			else
+				log->l_covered_state = XLOG_STATE_COVER_NEED;
+			break;
+
+		case XLOG_STATE_COVER_DONE2:
+			if (changed == 1)
+				log->l_covered_state = XLOG_STATE_COVER_IDLE;
+			else
+				log->l_covered_state = XLOG_STATE_COVER_NEED;
+			break;
+
+		default:
+			ASSERT(0);
+		}
+	}
+}	/* xlog_state_clean_log */
+
+STATIC xfs_lsn_t
+xlog_get_lowest_lsn(
+	xlog_t		*log)
+{
+	xlog_in_core_t  *lsn_log;
+	xfs_lsn_t	lowest_lsn, lsn;
+
+	lsn_log = log->l_iclog;
+	lowest_lsn = 0;
+	do {
+	    if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
+		lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT);
+		if ((lsn && !lowest_lsn) ||
+		    (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
+			lowest_lsn = lsn;
+		}
+	    }
+	    lsn_log = lsn_log->ic_next;
+	} while (lsn_log != log->l_iclog);
+	return(lowest_lsn);
+}
+
+
+STATIC void
+xlog_state_do_callback(
+	xlog_t		*log,
+	int		aborted,
+	xlog_in_core_t	*ciclog)
+{
+	xlog_in_core_t	   *iclog;
+	xlog_in_core_t	   *first_iclog;	/* used to know when we've
+						 * processed all iclogs once */
+	xfs_log_callback_t *cb, *cb_next;
+	int		   flushcnt = 0;
+	xfs_lsn_t	   lowest_lsn;
+	int		   ioerrors;	/* counter: iclogs with errors */
+	int		   loopdidcallbacks; /* flag: inner loop did callbacks*/
+	int		   funcdidcallbacks; /* flag: function did callbacks */
+	int		   repeats;	/* for issuing console warnings if
+					 * looping too many times */
+	SPLDECL(s);
+
+	s = LOG_LOCK(log);
+	first_iclog = iclog = log->l_iclog;
+	ioerrors = 0;
+	funcdidcallbacks = 0;
+	repeats = 0;
+
+	do {
+		/*
+		 * Scan all iclogs starting with the one pointed to by the
+		 * log.  Reset this starting point each time the log is
+		 * unlocked (during callbacks).
+		 *
+		 * Keep looping through iclogs until one full pass is made
+		 * without running any callbacks.
+		 */
+		first_iclog = log->l_iclog;
+		iclog = log->l_iclog;
+		loopdidcallbacks = 0;
+		repeats++;
+
+		do {
+
+			/* skip all iclogs in the ACTIVE & DIRTY states */
+			if (iclog->ic_state &
+			    (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
+				iclog = iclog->ic_next;
+				continue;
+			}
+
+			/*
+			 * Between marking a filesystem SHUTDOWN and stopping
+			 * the log, we do flush all iclogs to disk (if there
+			 * wasn't a log I/O error). So, we do want things to
+			 * go smoothly in case of just a SHUTDOWN  w/o a
+			 * LOG_IO_ERROR.
+			 */
+			if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
+				/*
+				 * Can only perform callbacks in order.  Since
+				 * this iclog is not in the DONE_SYNC/
+				 * DO_CALLBACK state, we skip the rest and
+				 * just try to clean up.  If we set our iclog
+				 * to DO_CALLBACK, we will not process it when
+				 * we retry since a previous iclog is in the
+				 * CALLBACK and the state cannot change since
+				 * we are holding the LOG_LOCK.
+				 */
+				if (!(iclog->ic_state &
+					(XLOG_STATE_DONE_SYNC |
+						 XLOG_STATE_DO_CALLBACK))) {
+					if (ciclog && (ciclog->ic_state ==
+							XLOG_STATE_DONE_SYNC)) {
+						ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
+					}
+					break;
+				}
+				/*
+				 * We now have an iclog that is in either the
+				 * DO_CALLBACK or DONE_SYNC states. The other
+				 * states (WANT_SYNC, SYNCING, or CALLBACK were
+				 * caught by the above if and are going to
+				 * clean (i.e. we aren't doing their callbacks)
+				 * see the above if.
+				 */
+
+				/*
+				 * We will do one more check here to see if we
+				 * have chased our tail around.
+				 */
+
+				lowest_lsn = xlog_get_lowest_lsn(log);
+				if (lowest_lsn && (
+					XFS_LSN_CMP(
+						lowest_lsn,
+						INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
+					)<0)) {
+					iclog = iclog->ic_next;
+					continue; /* Leave this iclog for
+						   * another thread */
+				}
+
+				iclog->ic_state = XLOG_STATE_CALLBACK;
+
+				LOG_UNLOCK(log, s);
+
+				/* l_last_sync_lsn field protected by
+				 * GRANT_LOCK. Don't worry about iclog's lsn.
+				 * No one else can be here except us.
+				 */
+				s = GRANT_LOCK(log);
+				ASSERT(XFS_LSN_CMP(
+						log->l_last_sync_lsn,
+						INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
+					)<=0);
+				log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
+				GRANT_UNLOCK(log, s);
+
+				/*
+				 * Keep processing entries in the callback list
+				 * until we come around and it is empty.  We
+				 * need to atomically see that the list is
+				 * empty and change the state to DIRTY so that
+				 * we don't miss any more callbacks being added.
+				 */
+				s = LOG_LOCK(log);
+			} else {
+				ioerrors++;
+			}
+			cb = iclog->ic_callback;
+
+			while (cb != 0) {
+				iclog->ic_callback_tail = &(iclog->ic_callback);
+				iclog->ic_callback = NULL;
+				LOG_UNLOCK(log, s);
+
+				/* perform callbacks in the order given */
+				for (; cb != 0; cb = cb_next) {
+					cb_next = cb->cb_next;
+					cb->cb_func(cb->cb_arg, aborted);
+				}
+				s = LOG_LOCK(log);
+				cb = iclog->ic_callback;
+			}
+
+			loopdidcallbacks++;
+			funcdidcallbacks++;
+
+			ASSERT(iclog->ic_callback == 0);
+			if (!(iclog->ic_state & XLOG_STATE_IOERROR))
+				iclog->ic_state = XLOG_STATE_DIRTY;
+
+			/*
+			 * Transition from DIRTY to ACTIVE if applicable.
+			 * NOP if STATE_IOERROR.
+			 */
+			xlog_state_clean_log(log);
+
+			/* wake up threads waiting in xfs_log_force() */
+			sv_broadcast(&iclog->ic_forcesema);
+
+			iclog = iclog->ic_next;
+		} while (first_iclog != iclog);
+		if (repeats && (repeats % 10) == 0) {
+			xfs_fs_cmn_err(CE_WARN, log->l_mp,
+				"xlog_state_do_callback: looping %d", repeats);
+		}
+	} while (!ioerrors && loopdidcallbacks);
+
+	/*
+	 * make one last gasp attempt to see if iclogs are being left in
+	 * limbo..
+	 */
+#ifdef DEBUG
+	if (funcdidcallbacks) {
+		first_iclog = iclog = log->l_iclog;
+		do {
+			ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
+			/*
+			 * Terminate the loop if iclogs are found in states
+			 * which will cause other threads to clean up iclogs.
+			 *
+			 * SYNCING - i/o completion will go through logs
+			 * DONE_SYNC - interrupt thread should be waiting for
+			 *              LOG_LOCK
+			 * IOERROR - give up hope all ye who enter here
+			 */
+			if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+			    iclog->ic_state == XLOG_STATE_SYNCING ||
+			    iclog->ic_state == XLOG_STATE_DONE_SYNC ||
+			    iclog->ic_state == XLOG_STATE_IOERROR )
+				break;
+			iclog = iclog->ic_next;
+		} while (first_iclog != iclog);
+	}
+#endif
+
+	if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) {
+		flushcnt = log->l_flushcnt;
+		log->l_flushcnt = 0;
+	}
+	LOG_UNLOCK(log, s);
+	while (flushcnt--)
+		vsema(&log->l_flushsema);
+}	/* xlog_state_do_callback */
+
+
+/*
+ * Finish transitioning this iclog to the dirty state.
+ *
+ * Make sure that we completely execute this routine only when this is
+ * the last call to the iclog.  There is a good chance that iclog flushes,
+ * when we reach the end of the physical log, get turned into 2 separate
+ * calls to bwrite.  Hence, one iclog flush could generate two calls to this
+ * routine.  By using the reference count bwritecnt, we guarantee that only
+ * the second completion goes through.
+ *
+ * Callbacks could take time, so they are done outside the scope of the
+ * global state machine log lock.  Assume that the calls to cvsema won't
+ * take a long time.  At least we know it won't sleep.
+ */
+void
+xlog_state_done_syncing(
+	xlog_in_core_t	*iclog,
+	int		aborted)
+{
+	xlog_t		   *log = iclog->ic_log;
+	SPLDECL(s);
+
+	s = LOG_LOCK(log);
+
+	ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
+	       iclog->ic_state == XLOG_STATE_IOERROR);
+	ASSERT(iclog->ic_refcnt == 0);
+	ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
+
+
+	/*
+	 * If we got an error, either on the first buffer, or in the case of
+	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
+	 * and none should ever be attempted to be written to disk
+	 * again.
+	 */
+	if (iclog->ic_state != XLOG_STATE_IOERROR) {
+		if (--iclog->ic_bwritecnt == 1) {
+			LOG_UNLOCK(log, s);
+			return;
+		}
+		iclog->ic_state = XLOG_STATE_DONE_SYNC;
+	}
+
+	/*
+	 * Someone could be sleeping prior to writing out the next
+	 * iclog buffer, we wake them all, one will get to do the
+	 * I/O, the others get to wait for the result.
+	 */
+	sv_broadcast(&iclog->ic_writesema);
+	LOG_UNLOCK(log, s);
+	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
+}	/* xlog_state_done_syncing */
+
+
+/*
+ * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
+ * sleep.  The flush semaphore is set to the number of in-core buffers and
+ * decremented around disk syncing.  Therefore, if all buffers are syncing,
+ * this semaphore will cause new writes to sleep until a sync completes.
+ * Otherwise, this code just does p() followed by v().  This approximates
+ * a sleep/wakeup except we can't race.
+ *
+ * The in-core logs are used in a circular fashion. They are not used
+ * out-of-order even when an iclog past the head is free.
+ *
+ * return:
+ *	* log_offset where xlog_write() can start writing into the in-core
+ *		log's data space.
+ *	* in-core log pointer to which xlog_write() should write.
+ *	* boolean indicating this is a continued write to an in-core log.
+ *		If this is the last write, then the in-core log's offset field
+ *		needs to be incremented, depending on the amount of data which
+ *		is copied.
+ */
+int
+xlog_state_get_iclog_space(xlog_t	  *log,
+			   int		  len,
+			   xlog_in_core_t **iclogp,
+			   xlog_ticket_t  *ticket,
+			   int		  *continued_write,
+			   int		  *logoffsetp)
+{
+	SPLDECL(s);
+	int		  log_offset;
+	xlog_rec_header_t *head;
+	xlog_in_core_t	  *iclog;
+	int		  error;
+
+restart:
+	s = LOG_LOCK(log);
+	if (XLOG_FORCED_SHUTDOWN(log)) {
+		LOG_UNLOCK(log, s);
+		return XFS_ERROR(EIO);
+	}
+
+	iclog = log->l_iclog;
+	if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) {
+		log->l_flushcnt++;
+		LOG_UNLOCK(log, s);
+		xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
+		XFS_STATS_INC(xs_log_noiclogs);
+		/* Ensure that log writes happen */
+		psema(&log->l_flushsema, PINOD);
+		goto restart;
+	}
+	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+	head = &iclog->ic_header;
+
+	iclog->ic_refcnt++;			/* prevents sync */
+	log_offset = iclog->ic_offset;
+
+	/* On the 1st write to an iclog, figure out lsn.  This works
+	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
+	 * committing to.  If the offset is set, that's how many blocks
+	 * must be written.
+	 */
+	if (log_offset == 0) {
+		ticket->t_curr_res -= log->l_iclog_hsize;
+		INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
+		ASSIGN_LSN(head->h_lsn, log);
+		ASSERT(log->l_curr_block >= 0);
+	}
+
+	/* If there is enough room to write everything, then do it.  Otherwise,
+	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
+	 * bit is on, so this will get flushed out.  Don't update ic_offset
+	 * until you know exactly how many bytes get copied.  Therefore, wait
+	 * until later to update ic_offset.
+	 *
+	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
+	 * can fit into remaining data section.
+	 */
+	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
+		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
+
+		/* If I'm the only one writing to this iclog, sync it to disk */
+		if (iclog->ic_refcnt == 1) {
+			LOG_UNLOCK(log, s);
+			if ((error = xlog_state_release_iclog(log, iclog)))
+				return (error);
+		} else {
+			iclog->ic_refcnt--;
+			LOG_UNLOCK(log, s);
+		}
+		goto restart;
+	}
+
+	/* Do we have enough room to write the full amount in the remainder
+	 * of this iclog?  Or must we continue a write on the next iclog and
+	 * mark this iclog as completely taken?  In the case where we switch
+	 * iclogs (to mark it taken), this particular iclog will release/sync
+	 * to disk in xlog_write().
+	 */
+	if (len <= iclog->ic_size - iclog->ic_offset) {
+		*continued_write = 0;
+		iclog->ic_offset += len;
+	} else {
+		*continued_write = 1;
+		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
+	}
+	*iclogp = iclog;
+
+	ASSERT(iclog->ic_offset <= iclog->ic_size);
+	LOG_UNLOCK(log, s);
+
+	*logoffsetp = log_offset;
+	return 0;
+}	/* xlog_state_get_iclog_space */
+
+/*
+ * Atomically get the log space required for a log ticket.
+ *
+ * Once a ticket gets put onto the reserveq, it will only return after
+ * the needed reservation is satisfied.
+ */
+STATIC int
+xlog_grant_log_space(xlog_t	   *log,
+		     xlog_ticket_t *tic)
+{
+	int		 free_bytes;
+	int		 need_bytes;
+	SPLDECL(s);
+#ifdef DEBUG
+	xfs_lsn_t	 tail_lsn;
+#endif
+
+
+#ifdef DEBUG
+	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
+		panic("grant Recovery problem");
+#endif
+
+	/* Is there space or do we need to sleep? */
+	s = GRANT_LOCK(log);
+	xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter");
+
+	/* something is already sleeping; insert new transaction at end */
+	if (log->l_reserve_headq) {
+		XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+		xlog_trace_loggrant(log, tic,
+				    "xlog_grant_log_space: sleep 1");
+		/*
+		 * Gotta check this before going to sleep, while we're
+		 * holding the grant lock.
+		 */
+		if (XLOG_FORCED_SHUTDOWN(log))
+			goto error_return;
+
+		XFS_STATS_INC(xs_sleep_logspace);
+		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		/*
+		 * If we got an error, and the filesystem is shutting down,
+		 * we'll catch it down below. So just continue...
+		 */
+		xlog_trace_loggrant(log, tic,
+				    "xlog_grant_log_space: wake 1");
+		s = GRANT_LOCK(log);
+	}
+	if (tic->t_flags & XFS_LOG_PERM_RESERV)
+		need_bytes = tic->t_unit_res*tic->t_ocnt;
+	else
+		need_bytes = tic->t_unit_res;
+
+redo:
+	if (XLOG_FORCED_SHUTDOWN(log))
+		goto error_return;
+
+	free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
+				     log->l_grant_reserve_bytes);
+	if (free_bytes < need_bytes) {
+		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
+			XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+		xlog_trace_loggrant(log, tic,
+				    "xlog_grant_log_space: sleep 2");
+		XFS_STATS_INC(xs_sleep_logspace);
+		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+
+		if (XLOG_FORCED_SHUTDOWN(log)) {
+			s = GRANT_LOCK(log);
+			goto error_return;
+		}
+
+		xlog_trace_loggrant(log, tic,
+				    "xlog_grant_log_space: wake 2");
+		xlog_grant_push_ail(log->l_mp, need_bytes);
+		s = GRANT_LOCK(log);
+		goto redo;
+	} else if (tic->t_flags & XLOG_TIC_IN_Q)
+		XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+
+	/* we've got enough space */
+	XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w');
+	XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r');
+#ifdef DEBUG
+	tail_lsn = log->l_tail_lsn;
+	/*
+	 * Check to make sure the grant write head didn't just over lap the
+	 * tail.  If the cycles are the same, we can't be overlapping.
+	 * Otherwise, make sure that the cycles differ by exactly one and
+	 * check the byte count.
+	 */
+	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
+		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
+		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
+	}
+#endif
+	xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit");
+	xlog_verify_grant_head(log, 1);
+	GRANT_UNLOCK(log, s);
+	return 0;
+
+ error_return:
+	if (tic->t_flags & XLOG_TIC_IN_Q)
+		XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+	xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
+	/*
+	 * If we are failing, make sure the ticket doesn't have any
+	 * current reservations. We don't want to add this back when
+	 * the ticket/transaction gets cancelled.
+	 */
+	tic->t_curr_res = 0;
+	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
+	GRANT_UNLOCK(log, s);
+	return XFS_ERROR(EIO);
+}	/* xlog_grant_log_space */
+
+
+/*
+ * Replenish the byte reservation required by moving the grant write head.
+ *
+ *
+ */
+STATIC int
+xlog_regrant_write_log_space(xlog_t	   *log,
+			     xlog_ticket_t *tic)
+{
+	SPLDECL(s);
+	int		free_bytes, need_bytes;
+	xlog_ticket_t	*ntic;
+#ifdef DEBUG
+	xfs_lsn_t	tail_lsn;
+#endif
+
+	tic->t_curr_res = tic->t_unit_res;
+
+	if (tic->t_cnt > 0)
+		return (0);
+
+#ifdef DEBUG
+	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
+		panic("regrant Recovery problem");
+#endif
+
+	s = GRANT_LOCK(log);
+	xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter");
+
+	if (XLOG_FORCED_SHUTDOWN(log))
+		goto error_return;
+
+	/* If there are other waiters on the queue then give them a
+	 * chance at logspace before us. Wake up the first waiters,
+	 * if we do not wake up all the waiters then go to sleep waiting
+	 * for more free space, otherwise try to get some space for
+	 * this transaction.
+	 */
+
+	if ((ntic = log->l_write_headq)) {
+		free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
+					     log->l_grant_write_bytes);
+		do {
+			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
+
+			if (free_bytes < ntic->t_unit_res)
+				break;
+			free_bytes -= ntic->t_unit_res;
+			sv_signal(&ntic->t_sema);
+			ntic = ntic->t_next;
+		} while (ntic != log->l_write_headq);
+
+		if (ntic != log->l_write_headq) {
+			if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
+				XLOG_INS_TICKETQ(log->l_write_headq, tic);
+
+			xlog_trace_loggrant(log, tic,
+				    "xlog_regrant_write_log_space: sleep 1");
+			XFS_STATS_INC(xs_sleep_logspace);
+			sv_wait(&tic->t_sema, PINOD|PLTWAIT,
+				&log->l_grant_lock, s);
+
+			/* If we're shutting down, this tic is already
+			 * off the queue */
+			if (XLOG_FORCED_SHUTDOWN(log)) {
+				s = GRANT_LOCK(log);
+				goto error_return;
+			}
+
+			xlog_trace_loggrant(log, tic,
+				    "xlog_regrant_write_log_space: wake 1");
+			xlog_grant_push_ail(log->l_mp, tic->t_unit_res);
+			s = GRANT_LOCK(log);
+		}
+	}
+
+	need_bytes = tic->t_unit_res;
+
+redo:
+	if (XLOG_FORCED_SHUTDOWN(log))
+		goto error_return;
+
+	free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
+				     log->l_grant_write_bytes);
+	if (free_bytes < need_bytes) {
+		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
+			XLOG_INS_TICKETQ(log->l_write_headq, tic);
+		XFS_STATS_INC(xs_sleep_logspace);
+		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+
+		/* If we're shutting down, this tic is already off the queue */
+		if (XLOG_FORCED_SHUTDOWN(log)) {
+			s = GRANT_LOCK(log);
+			goto error_return;
+		}
+
+		xlog_trace_loggrant(log, tic,
+				    "xlog_regrant_write_log_space: wake 2");
+		xlog_grant_push_ail(log->l_mp, need_bytes);
+		s = GRANT_LOCK(log);
+		goto redo;
+	} else if (tic->t_flags & XLOG_TIC_IN_Q)
+		XLOG_DEL_TICKETQ(log->l_write_headq, tic);
+
+	XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */
+#ifdef DEBUG
+	tail_lsn = log->l_tail_lsn;
+	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
+		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
+		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
+	}
+#endif
+
+	xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit");
+	xlog_verify_grant_head(log, 1);
+	GRANT_UNLOCK(log, s);
+	return (0);
+
+
+ error_return:
+	if (tic->t_flags & XLOG_TIC_IN_Q)
+		XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+	xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
+	/*
+	 * If we are failing, make sure the ticket doesn't have any
+	 * current reservations. We don't want to add this back when
+	 * the ticket/transaction gets cancelled.
+	 */
+	tic->t_curr_res = 0;
+	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
+	GRANT_UNLOCK(log, s);
+	return XFS_ERROR(EIO);
+}	/* xlog_regrant_write_log_space */
+
+
+/* The first cnt-1 times through here we don't need to
+ * move the grant write head because the permanent
+ * reservation has reserved cnt times the unit amount.
+ * Release part of current permanent unit reservation and
+ * reset current reservation to be one units worth.  Also
+ * move grant reservation head forward.
+ */
+STATIC void
+xlog_regrant_reserve_log_space(xlog_t	     *log,
+			       xlog_ticket_t *ticket)
+{
+	SPLDECL(s);
+
+	xlog_trace_loggrant(log, ticket,
+			    "xlog_regrant_reserve_log_space: enter");
+	if (ticket->t_cnt > 0)
+		ticket->t_cnt--;
+
+	s = GRANT_LOCK(log);
+	XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
+	XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+	ticket->t_curr_res = ticket->t_unit_res;
+	xlog_trace_loggrant(log, ticket,
+			    "xlog_regrant_reserve_log_space: sub current res");
+	xlog_verify_grant_head(log, 1);
+
+	/* just return if we still have some of the pre-reserved space */
+	if (ticket->t_cnt > 0) {
+		GRANT_UNLOCK(log, s);
+		return;
+	}
+
+	XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r');
+	xlog_trace_loggrant(log, ticket,
+			    "xlog_regrant_reserve_log_space: exit");
+	xlog_verify_grant_head(log, 0);
+	GRANT_UNLOCK(log, s);
+	ticket->t_curr_res = ticket->t_unit_res;
+}	/* xlog_regrant_reserve_log_space */
+
+
+/*
+ * Give back the space left from a reservation.
+ *
+ * All the information we need to make a correct determination of space left
+ * is present.  For non-permanent reservations, things are quite easy.  The
+ * count should have been decremented to zero.  We only need to deal with the
+ * space remaining in the current reservation part of the ticket.  If the
+ * ticket contains a permanent reservation, there may be left over space which
+ * needs to be released.  A count of N means that N-1 refills of the current
+ * reservation can be done before we need to ask for more space.  The first
+ * one goes to fill up the first current reservation.  Once we run out of
+ * space, the count will stay at zero and the only space remaining will be
+ * in the current reservation field.
+ */
+STATIC void
+xlog_ungrant_log_space(xlog_t	     *log,
+		       xlog_ticket_t *ticket)
+{
+	SPLDECL(s);
+
+	if (ticket->t_cnt > 0)
+		ticket->t_cnt--;
+
+	s = GRANT_LOCK(log);
+	xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
+
+	XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
+	XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+
+	xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
+
+	/* If this is a permanent reservation ticket, we may be able to free
+	 * up more space based on the remaining count.
+	 */
+	if (ticket->t_cnt > 0) {
+		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
+		XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w');
+		XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r');
+	}
+
+	xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
+	xlog_verify_grant_head(log, 1);
+	GRANT_UNLOCK(log, s);
+	xfs_log_move_tail(log->l_mp, 1);
+}	/* xlog_ungrant_log_space */
+
+
+/*
+ * Atomically put back used ticket.
+ */
+void
+xlog_state_put_ticket(xlog_t	    *log,
+		      xlog_ticket_t *tic)
+{
+	unsigned long s;
+
+	s = LOG_LOCK(log);
+	xlog_ticket_put(log, tic);
+	LOG_UNLOCK(log, s);
+}	/* xlog_state_put_ticket */
+
+/*
+ * Flush iclog to disk if this is the last reference to the given iclog and
+ * the WANT_SYNC bit is set.
+ *
+ * When this function is entered, the iclog is not necessarily in the
+ * WANT_SYNC state.  It may be sitting around waiting to get filled.
+ *
+ *
+ */
+int
+xlog_state_release_iclog(xlog_t		*log,
+			 xlog_in_core_t	*iclog)
+{
+	SPLDECL(s);
+	int		sync = 0;	/* do we sync? */
+
+	xlog_assign_tail_lsn(log->l_mp);
+
+	s = LOG_LOCK(log);
+
+	if (iclog->ic_state & XLOG_STATE_IOERROR) {
+		LOG_UNLOCK(log, s);
+		return XFS_ERROR(EIO);
+	}
+
+	ASSERT(iclog->ic_refcnt > 0);
+	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
+	       iclog->ic_state == XLOG_STATE_WANT_SYNC);
+
+	if (--iclog->ic_refcnt == 0 &&
+	    iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+		sync++;
+		iclog->ic_state = XLOG_STATE_SYNCING;
+		INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn);
+		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+		/* cycle incremented when incrementing curr_block */
+	}
+
+	LOG_UNLOCK(log, s);
+
+	/*
+	 * We let the log lock go, so it's possible that we hit a log I/O
+	 * error or someother SHUTDOWN condition that marks the iclog
+	 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
+	 * this iclog has consistent data, so we ignore IOERROR
+	 * flags after this point.
+	 */
+	if (sync) {
+		return xlog_sync(log, iclog);
+	}
+	return (0);
+
+}	/* xlog_state_release_iclog */
+
+
+/*
+ * This routine will mark the current iclog in the ring as WANT_SYNC
+ * and move the current iclog pointer to the next iclog in the ring.
+ * When this routine is called from xlog_state_get_iclog_space(), the
+ * exact size of the iclog has not yet been determined.  All we know is
+ * that every data block.  We have run out of space in this log record.
+ */
+STATIC void
+xlog_state_switch_iclogs(xlog_t		*log,
+			 xlog_in_core_t *iclog,
+			 int		eventual_size)
+{
+	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+	if (!eventual_size)
+		eventual_size = iclog->ic_offset;
+	iclog->ic_state = XLOG_STATE_WANT_SYNC;
+	INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block);
+	log->l_prev_block = log->l_curr_block;
+	log->l_prev_cycle = log->l_curr_cycle;
+
+	/* roll log?: ic_offset changed later */
+	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
+
+	/* Round up to next log-sunit */
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
+	    log->l_mp->m_sb.sb_logsunit > 1) {
+		__uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
+		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
+	}
+
+	if (log->l_curr_block >= log->l_logBBsize) {
+		log->l_curr_cycle++;
+		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
+			log->l_curr_cycle++;
+		log->l_curr_block -= log->l_logBBsize;
+		ASSERT(log->l_curr_block >= 0);
+	}
+	ASSERT(iclog == log->l_iclog);
+	log->l_iclog = iclog->ic_next;
+}	/* xlog_state_switch_iclogs */
+
+
+/*
+ * Write out all data in the in-core log as of this exact moment in time.
+ *
+ * Data may be written to the in-core log during this call.  However,
+ * we don't guarantee this data will be written out.  A change from past
+ * implementation means this routine will *not* write out zero length LRs.
+ *
+ * Basically, we try and perform an intelligent scan of the in-core logs.
+ * If we determine there is no flushable data, we just return.  There is no
+ * flushable data if:
+ *
+ *	1. the current iclog is active and has no data; the previous iclog
+ *		is in the active or dirty state.
+ *	2. the current iclog is drity, and the previous iclog is in the
+ *		active or dirty state.
+ *
+ * We may sleep (call psema) if:
+ *
+ *	1. the current iclog is not in the active nor dirty state.
+ *	2. the current iclog dirty, and the previous iclog is not in the
+ *		active nor dirty state.
+ *	3. the current iclog is active, and there is another thread writing
+ *		to this particular iclog.
+ *	4. a) the current iclog is active and has no other writers
+ *	   b) when we return from flushing out this iclog, it is still
+ *		not in the active nor dirty state.
+ */
+STATIC int
+xlog_state_sync_all(xlog_t *log, uint flags)
+{
+	xlog_in_core_t	*iclog;
+	xfs_lsn_t	lsn;
+	SPLDECL(s);
+
+	s = LOG_LOCK(log);
+
+	iclog = log->l_iclog;
+	if (iclog->ic_state & XLOG_STATE_IOERROR) {
+		LOG_UNLOCK(log, s);
+		return XFS_ERROR(EIO);
+	}
+
+	/* If the head iclog is not active nor dirty, we just attach
+	 * ourselves to the head and go to sleep.
+	 */
+	if (iclog->ic_state == XLOG_STATE_ACTIVE ||
+	    iclog->ic_state == XLOG_STATE_DIRTY) {
+		/*
+		 * If the head is dirty or (active and empty), then
+		 * we need to look at the previous iclog.  If the previous
+		 * iclog is active or dirty we are done.  There is nothing
+		 * to sync out.  Otherwise, we attach ourselves to the
+		 * previous iclog and go to sleep.
+		 */
+		if (iclog->ic_state == XLOG_STATE_DIRTY ||
+		    (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) {
+			iclog = iclog->ic_prev;
+			if (iclog->ic_state == XLOG_STATE_ACTIVE ||
+			    iclog->ic_state == XLOG_STATE_DIRTY)
+				goto no_sleep;
+			else
+				goto maybe_sleep;
+		} else {
+			if (iclog->ic_refcnt == 0) {
+				/* We are the only one with access to this
+				 * iclog.  Flush it out now.  There should
+				 * be a roundoff of zero to show that someone
+				 * has already taken care of the roundoff from
+				 * the previous sync.
+				 */
+				iclog->ic_refcnt++;
+				lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
+				xlog_state_switch_iclogs(log, iclog, 0);
+				LOG_UNLOCK(log, s);
+
+				if (xlog_state_release_iclog(log, iclog))
+					return XFS_ERROR(EIO);
+				s = LOG_LOCK(log);
+				if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn &&
+				    iclog->ic_state != XLOG_STATE_DIRTY)
+					goto maybe_sleep;
+				else
+					goto no_sleep;
+			} else {
+				/* Someone else is writing to this iclog.
+				 * Use its call to flush out the data.  However,
+				 * the other thread may not force out this LR,
+				 * so we mark it WANT_SYNC.
+				 */
+				xlog_state_switch_iclogs(log, iclog, 0);
+				goto maybe_sleep;
+			}
+		}
+	}
+
+	/* By the time we come around again, the iclog could've been filled
+	 * which would give it another lsn.  If we have a new lsn, just
+	 * return because the relevant data has been flushed.
+	 */
+maybe_sleep:
+	if (flags & XFS_LOG_SYNC) {
+		/*
+		 * We must check if we're shutting down here, before
+		 * we wait, while we're holding the LOG_LOCK.
+		 * Then we check again after waking up, in case our
+		 * sleep was disturbed by a bad news.
+		 */
+		if (iclog->ic_state & XLOG_STATE_IOERROR) {
+			LOG_UNLOCK(log, s);
+			return XFS_ERROR(EIO);
+		}
+		XFS_STATS_INC(xs_log_force_sleep);
+		sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s);
+		/*
+		 * No need to grab the log lock here since we're
+		 * only deciding whether or not to return EIO
+		 * and the memory read should be atomic.
+		 */
+		if (iclog->ic_state & XLOG_STATE_IOERROR)
+			return XFS_ERROR(EIO);
+
+	} else {
+
+no_sleep:
+		LOG_UNLOCK(log, s);
+	}
+	return 0;
+}	/* xlog_state_sync_all */
+
+
+/*
+ * Used by code which implements synchronous log forces.
+ *
+ * Find in-core log with lsn.
+ *	If it is in the DIRTY state, just return.
+ *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
+ *		state and go to sleep or return.
+ *	If it is in any other state, go to sleep or return.
+ *
+ * If filesystem activity goes to zero, the iclog will get flushed only by
+ * bdflush().
+ */
+int
+xlog_state_sync(xlog_t	  *log,
+		xfs_lsn_t lsn,
+		uint	  flags)
+{
+    xlog_in_core_t	*iclog;
+    int			already_slept = 0;
+    SPLDECL(s);
+
+
+try_again:
+    s = LOG_LOCK(log);
+    iclog = log->l_iclog;
+
+    if (iclog->ic_state & XLOG_STATE_IOERROR) {
+	    LOG_UNLOCK(log, s);
+	    return XFS_ERROR(EIO);
+    }
+
+    do {
+	if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) {
+	    iclog = iclog->ic_next;
+	    continue;
+	}
+
+	if (iclog->ic_state == XLOG_STATE_DIRTY) {
+		LOG_UNLOCK(log, s);
+		return 0;
+	}
+
+	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+		/*
+		 * We sleep here if we haven't already slept (e.g.
+		 * this is the first time we've looked at the correct
+		 * iclog buf) and the buffer before us is going to
+		 * be sync'ed. The reason for this is that if we
+		 * are doing sync transactions here, by waiting for
+		 * the previous I/O to complete, we can allow a few
+		 * more transactions into this iclog before we close
+		 * it down.
+		 *
+		 * Otherwise, we mark the buffer WANT_SYNC, and bump
+		 * up the refcnt so we can release the log (which drops
+		 * the ref count).  The state switch keeps new transaction
+		 * commits from using this buffer.  When the current commits
+		 * finish writing into the buffer, the refcount will drop to
+		 * zero and the buffer will go out then.
+		 */
+		if (!already_slept &&
+		    (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
+						 XLOG_STATE_SYNCING))) {
+			ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
+			XFS_STATS_INC(xs_log_force_sleep);
+			sv_wait(&iclog->ic_prev->ic_writesema, PSWP,
+				&log->l_icloglock, s);
+			already_slept = 1;
+			goto try_again;
+		} else {
+			iclog->ic_refcnt++;
+			xlog_state_switch_iclogs(log, iclog, 0);
+			LOG_UNLOCK(log, s);
+			if (xlog_state_release_iclog(log, iclog))
+				return XFS_ERROR(EIO);
+			s = LOG_LOCK(log);
+		}
+	}
+
+	if ((flags & XFS_LOG_SYNC) && /* sleep */
+	    !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
+
+		/*
+		 * Don't wait on the forcesema if we know that we've
+		 * gotten a log write error.
+		 */
+		if (iclog->ic_state & XLOG_STATE_IOERROR) {
+			LOG_UNLOCK(log, s);
+			return XFS_ERROR(EIO);
+		}
+		XFS_STATS_INC(xs_log_force_sleep);
+		sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s);
+		/*
+		 * No need to grab the log lock here since we're
+		 * only deciding whether or not to return EIO
+		 * and the memory read should be atomic.
+		 */
+		if (iclog->ic_state & XLOG_STATE_IOERROR)
+			return XFS_ERROR(EIO);
+	} else {		/* just return */
+		LOG_UNLOCK(log, s);
+	}
+	return 0;
+
+    } while (iclog != log->l_iclog);
+
+    LOG_UNLOCK(log, s);
+    return (0);
+}	/* xlog_state_sync */
+
+
+/*
+ * Called when we want to mark the current iclog as being ready to sync to
+ * disk.
+ */
+void
+xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
+{
+	SPLDECL(s);
+
+	s = LOG_LOCK(log);
+
+	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+		xlog_state_switch_iclogs(log, iclog, 0);
+	} else {
+		ASSERT(iclog->ic_state &
+			(XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
+	}
+
+	LOG_UNLOCK(log, s);
+}	/* xlog_state_want_sync */
+
+
+
+/*****************************************************************************
+ *
+ *		TICKET functions
+ *
+ *****************************************************************************
+ */
+
+/*
+ *	Algorithm doesn't take into account page size. ;-(
+ */
+STATIC void
+xlog_state_ticket_alloc(xlog_t *log)
+{
+	xlog_ticket_t	*t_list;
+	xlog_ticket_t	*next;
+	xfs_caddr_t	buf;
+	uint		i = (NBPP / sizeof(xlog_ticket_t)) - 2;
+	SPLDECL(s);
+
+	/*
+	 * The kmem_zalloc may sleep, so we shouldn't be holding the
+	 * global lock.  XXXmiken: may want to use zone allocator.
+	 */
+	buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP);
+
+	s = LOG_LOCK(log);
+
+	/* Attach 1st ticket to Q, so we can keep track of allocated memory */
+	t_list = (xlog_ticket_t *)buf;
+	t_list->t_next = log->l_unmount_free;
+	log->l_unmount_free = t_list++;
+	log->l_ticket_cnt++;
+	log->l_ticket_tcnt++;
+
+	/* Next ticket becomes first ticket attached to ticket free list */
+	if (log->l_freelist != NULL) {
+		ASSERT(log->l_tail != NULL);
+		log->l_tail->t_next = t_list;
+	} else {
+		log->l_freelist = t_list;
+	}
+	log->l_ticket_cnt++;
+	log->l_ticket_tcnt++;
+
+	/* Cycle through rest of alloc'ed memory, building up free Q */
+	for ( ; i > 0; i--) {
+		next = t_list + 1;
+		t_list->t_next = next;
+		t_list = next;
+		log->l_ticket_cnt++;
+		log->l_ticket_tcnt++;
+	}
+	t_list->t_next = NULL;
+	log->l_tail = t_list;
+	LOG_UNLOCK(log, s);
+}	/* xlog_state_ticket_alloc */
+
+
+/*
+ * Put ticket into free list
+ *
+ * Assumption: log lock is held around this call.
+ */
+STATIC void
+xlog_ticket_put(xlog_t		*log,
+		xlog_ticket_t	*ticket)
+{
+	sv_destroy(&ticket->t_sema);
+
+	/*
+	 * Don't think caching will make that much difference.  It's
+	 * more important to make debug easier.
+	 */
+#if 0
+	/* real code will want to use LIFO for caching */
+	ticket->t_next = log->l_freelist;
+	log->l_freelist = ticket;
+	/* no need to clear fields */
+#else
+	/* When we debug, it is easier if tickets are cycled */
+	ticket->t_next     = NULL;
+	if (log->l_tail != 0) {
+		log->l_tail->t_next = ticket;
+	} else {
+		ASSERT(log->l_freelist == 0);
+		log->l_freelist = ticket;
+	}
+	log->l_tail	    = ticket;
+#endif /* DEBUG */
+	log->l_ticket_cnt++;
+}	/* xlog_ticket_put */
+
+
+/*
+ * Grab ticket off freelist or allocation some more
+ */
+xlog_ticket_t *
+xlog_ticket_get(xlog_t		*log,
+		int		unit_bytes,
+		int		cnt,
+		char		client,
+		uint		xflags)
+{
+	xlog_ticket_t	*tic;
+	uint		num_headers;
+	SPLDECL(s);
+
+ alloc:
+	if (log->l_freelist == NULL)
+		xlog_state_ticket_alloc(log);		/* potentially sleep */
+
+	s = LOG_LOCK(log);
+	if (log->l_freelist == NULL) {
+		LOG_UNLOCK(log, s);
+		goto alloc;
+	}
+	tic		= log->l_freelist;
+	log->l_freelist	= tic->t_next;
+	if (log->l_freelist == NULL)
+		log->l_tail = NULL;
+	log->l_ticket_cnt--;
+	LOG_UNLOCK(log, s);
+
+	/*
+	 * Permanent reservations have up to 'cnt'-1 active log operations
+	 * in the log.  A unit in this case is the amount of space for one
+	 * of these log operations.  Normal reservations have a cnt of 1
+	 * and their unit amount is the total amount of space required.
+	 *
+	 * The following lines of code account for non-transaction data
+	 * which occupy space in the on-disk log. 
+	 */
+
+	/* for start-rec */
+	unit_bytes += sizeof(xlog_op_header_t); 
+
+	/* for padding */
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
+		log->l_mp->m_sb.sb_logsunit > 1) {
+		/* log su roundoff */
+		unit_bytes += log->l_mp->m_sb.sb_logsunit;  
+	} else {
+		/* BB roundoff */
+		unit_bytes += BBSIZE;
+        }
+
+	/* for commit-rec */
+	unit_bytes += sizeof(xlog_op_header_t);
+ 
+	/* for LR headers */
+	num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
+	unit_bytes += log->l_iclog_hsize * num_headers;
+
+	tic->t_unit_res		= unit_bytes;
+	tic->t_curr_res		= unit_bytes;
+	tic->t_cnt		= cnt;
+	tic->t_ocnt		= cnt;
+	tic->t_tid		= (xlog_tid_t)((__psint_t)tic & 0xffffffff);
+	tic->t_clientid		= client;
+	tic->t_flags		= XLOG_TIC_INITED;
+	if (xflags & XFS_LOG_PERM_RESERV)
+		tic->t_flags |= XLOG_TIC_PERM_RESERV;
+	sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
+
+	return tic;
+}	/* xlog_ticket_get */
+
+
+/******************************************************************************
+ *
+ *		Log debug routines
+ *
+ ******************************************************************************
+ */
+#if defined(DEBUG) && !defined(XLOG_NOLOG)
+/*
+ * Make sure that the destination ptr is within the valid data region of
+ * one of the iclogs.  This uses backup pointers stored in a different
+ * part of the log in case we trash the log structure.
+ */
+void
+xlog_verify_dest_ptr(xlog_t     *log,
+		     __psint_t  ptr)
+{
+	int i;
+	int good_ptr = 0;
+
+	for (i=0; i < log->l_iclog_bufs; i++) {
+		if (ptr >= (__psint_t)log->l_iclog_bak[i] &&
+		    ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size)
+			good_ptr++;
+	}
+	if (! good_ptr)
+		xlog_panic("xlog_verify_dest_ptr: invalid ptr");
+}	/* xlog_verify_dest_ptr */
+
+STATIC void
+xlog_verify_grant_head(xlog_t *log, int equals)
+{
+    if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
+	if (equals)
+	    ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
+	else
+	    ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
+    } else {
+	ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
+	ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
+    }
+}	/* xlog_verify_grant_head */
+
+/* check if it will fit */
+STATIC void
+xlog_verify_tail_lsn(xlog_t	    *log,
+		     xlog_in_core_t *iclog,
+		     xfs_lsn_t	    tail_lsn)
+{
+    int blocks;
+
+    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
+	blocks =
+	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
+	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
+	    xlog_panic("xlog_verify_tail_lsn: ran out of log space");
+    } else {
+	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
+
+	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
+	    xlog_panic("xlog_verify_tail_lsn: tail wrapped");
+
+	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
+	if (blocks < BTOBB(iclog->ic_offset) + 1)
+	    xlog_panic("xlog_verify_tail_lsn: ran out of log space");
+    }
+}	/* xlog_verify_tail_lsn */
+
+/*
+ * Perform a number of checks on the iclog before writing to disk.
+ *
+ * 1. Make sure the iclogs are still circular
+ * 2. Make sure we have a good magic number
+ * 3. Make sure we don't have magic numbers in the data
+ * 4. Check fields of each log operation header for:
+ *	A. Valid client identifier
+ *	B. tid ptr value falls in valid ptr space (user space code)
+ *	C. Length in log record header is correct according to the
+ *		individual operation headers within record.
+ * 5. When a bwrite will occur within 5 blocks of the front of the physical
+ *	log, check the preceding blocks of the physical log to make sure all
+ *	the cycle numbers agree with the current cycle number.
+ */
+STATIC void
+xlog_verify_iclog(xlog_t	 *log,
+		  xlog_in_core_t *iclog,
+		  int		 count,
+		  boolean_t	 syncing)
+{
+	xlog_op_header_t	*ophead;
+	xlog_in_core_t		*icptr;
+	xlog_in_core_2_t	*xhdr;
+	xfs_caddr_t		ptr;
+	xfs_caddr_t		base_ptr;
+	__psint_t		field_offset;
+	__uint8_t		clientid;
+	int			len, i, j, k, op_len;
+	int			idx;
+	SPLDECL(s);
+
+	/* check validity of iclog pointers */
+	s = LOG_LOCK(log);
+	icptr = log->l_iclog;
+	for (i=0; i < log->l_iclog_bufs; i++) {
+		if (icptr == 0)
+			xlog_panic("xlog_verify_iclog: invalid ptr");
+		icptr = icptr->ic_next;
+	}
+	if (icptr != log->l_iclog)
+		xlog_panic("xlog_verify_iclog: corrupt iclog ring");
+	LOG_UNLOCK(log, s);
+
+	/* check log magic numbers */
+	ptr = (xfs_caddr_t) &(iclog->ic_header);
+	if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM)
+		xlog_panic("xlog_verify_iclog: invalid magic num");
+
+	for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count;
+	     ptr += BBSIZE) {
+		if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
+			xlog_panic("xlog_verify_iclog: unexpected magic num");
+	}
+
+	/* check fields */
+	len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT);
+	ptr = iclog->ic_datap;
+	base_ptr = ptr;
+	ophead = (xlog_op_header_t *)ptr;
+	xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
+	for (i = 0; i < len; i++) {
+		ophead = (xlog_op_header_t *)ptr;
+
+		/* clientid is only 1 byte */
+		field_offset = (__psint_t)
+			       ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
+		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
+			clientid = ophead->oh_clientid;
+		} else {
+			idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
+			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
+				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+				clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
+			} else {
+				clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
+			}
+		}
+		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
+			cmn_err(CE_WARN, "xlog_verify_iclog: invalid clientid %d op 0x%p offset 0x%x", clientid, ophead, field_offset);
+
+		/* check length */
+		field_offset = (__psint_t)
+			       ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
+		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
+			op_len = INT_GET(ophead->oh_len, ARCH_CONVERT);
+		} else {
+			idx = BTOBBT((__psint_t)&ophead->oh_len -
+				    (__psint_t)iclog->ic_datap);
+			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
+				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+				op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
+			} else {
+				op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
+			}
+		}
+		ptr += sizeof(xlog_op_header_t) + op_len;
+	}
+}	/* xlog_verify_iclog */
+#endif /* DEBUG && !XLOG_NOLOG */
+
+/*
+ * Mark all iclogs IOERROR. LOG_LOCK is held by the caller.
+ */
+STATIC int
+xlog_state_ioerror(
+	xlog_t	*log)
+{
+	xlog_in_core_t	*iclog, *ic;
+
+	iclog = log->l_iclog;
+	if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
+		/*
+		 * Mark all the incore logs IOERROR.
+		 * From now on, no log flushes will result.
+		 */
+		ic = iclog;
+		do {
+			ic->ic_state = XLOG_STATE_IOERROR;
+			ic = ic->ic_next;
+		} while (ic != iclog);
+		return (0);
+	}
+	/*
+	 * Return non-zero, if state transition has already happened.
+	 */
+	return (1);
+}
+
+/*
+ * This is called from xfs_force_shutdown, when we're forcibly
+ * shutting down the filesystem, typically because of an IO error.
+ * Our main objectives here are to make sure that:
+ *	a. the filesystem gets marked 'SHUTDOWN' for all interested
+ *	   parties to find out, 'atomically'.
+ *	b. those who're sleeping on log reservations, pinned objects and
+ *	    other resources get woken up, and be told the bad news.
+ *	c. nothing new gets queued up after (a) and (b) are done.
+ *	d. if !logerror, flush the iclogs to disk, then seal them off
+ *	   for business.
+ */
+int
+xfs_log_force_umount(
+	struct xfs_mount	*mp,
+	int			logerror)
+{
+	xlog_ticket_t	*tic;
+	xlog_t		*log;
+	int		retval;
+	SPLDECL(s);
+	SPLDECL(s2);
+
+	log = mp->m_log;
+
+	/*
+	 * If this happens during log recovery, don't worry about
+	 * locking; the log isn't open for business yet.
+	 */
+	if (!log ||
+	    log->l_flags & XLOG_ACTIVE_RECOVERY) {
+		mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
+		XFS_BUF_DONE(mp->m_sb_bp);
+		return (0);
+	}
+
+	/*
+	 * Somebody could've already done the hard work for us.
+	 * No need to get locks for this.
+	 */
+	if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
+		ASSERT(XLOG_FORCED_SHUTDOWN(log));
+		return (1);
+	}
+	retval = 0;
+	/*
+	 * We must hold both the GRANT lock and the LOG lock,
+	 * before we mark the filesystem SHUTDOWN and wake
+	 * everybody up to tell the bad news.
+	 */
+	s = GRANT_LOCK(log);
+	s2 = LOG_LOCK(log);
+	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
+	XFS_BUF_DONE(mp->m_sb_bp);
+	/*
+	 * This flag is sort of redundant because of the mount flag, but
+	 * it's good to maintain the separation between the log and the rest
+	 * of XFS.
+	 */
+	log->l_flags |= XLOG_IO_ERROR;
+
+	/*
+	 * If we hit a log error, we want to mark all the iclogs IOERROR
+	 * while we're still holding the loglock.
+	 */
+	if (logerror)
+		retval = xlog_state_ioerror(log);
+	LOG_UNLOCK(log, s2);
+
+	/*
+	 * We don't want anybody waiting for log reservations
+	 * after this. That means we have to wake up everybody
+	 * queued up on reserve_headq as well as write_headq.
+	 * In addition, we make sure in xlog_{re}grant_log_space
+	 * that we don't enqueue anything once the SHUTDOWN flag
+	 * is set, and this action is protected by the GRANTLOCK.
+	 */
+	if ((tic = log->l_reserve_headq)) {
+		do {
+			sv_signal(&tic->t_sema);
+			tic = tic->t_next;
+		} while (tic != log->l_reserve_headq);
+	}
+
+	if ((tic = log->l_write_headq)) {
+		do {
+			sv_signal(&tic->t_sema);
+			tic = tic->t_next;
+		} while (tic != log->l_write_headq);
+	}
+	GRANT_UNLOCK(log, s);
+
+	if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
+		ASSERT(!logerror);
+		/*
+		 * Force the incore logs to disk before shutting the
+		 * log down completely.
+		 */
+		xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC);
+		s2 = LOG_LOCK(log);
+		retval = xlog_state_ioerror(log);
+		LOG_UNLOCK(log, s2);
+	}
+	/*
+	 * Wake up everybody waiting on xfs_log_force.
+	 * Callback all log item committed functions as if the
+	 * log writes were completed.
+	 */
+	xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
+
+#ifdef XFSERRORDEBUG
+	{
+		xlog_in_core_t	*iclog;
+
+		s = LOG_LOCK(log);
+		iclog = log->l_iclog;
+		do {
+			ASSERT(iclog->ic_callback == 0);
+			iclog = iclog->ic_next;
+		} while (iclog != log->l_iclog);
+		LOG_UNLOCK(log, s);
+	}
+#endif
+	/* return non-zero if log IOERROR transition had already happened */
+	return (retval);
+}
+
+int
+xlog_iclogs_empty(xlog_t *log)
+{
+	xlog_in_core_t	*iclog;
+
+	iclog = log->l_iclog;
+	do {
+		/* endianness does not matter here, zero is zero in
+		 * any language.
+		 */
+		if (iclog->ic_header.h_num_logops)
+			return(0);
+		iclog = iclog->ic_next;
+	} while (iclog != log->l_iclog);
+	return(1);
+}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
new file mode 100644
index 0000000..0db122d
--- /dev/null
+++ b/fs/xfs/xfs_log.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_LOG_H__
+#define __XFS_LOG_H__
+
+/* get lsn fields */
+
+#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
+#define BLOCK_LSN(lsn) ((uint)(lsn))
+/* this is used in a spot where we might otherwise double-endian-flip */
+#define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0])
+
+#ifdef __KERNEL__
+/*
+ * By comparing each compnent, we don't have to worry about extra
+ * endian issues in treating two 32 bit numbers as one 64 bit number
+ */
+static
+#if defined(__GNUC__) && (__GNUC__ == 2) && ( (__GNUC_MINOR__ == 95) || (__GNUC_MINOR__ == 96))
+__attribute__((unused))	/* gcc 2.95, 2.96 miscompile this when inlined */
+#else
+__inline__
+#endif
+xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
+{
+	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
+		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
+
+	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
+		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
+
+	return 0;
+}
+
+#define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
+
+/*
+ * Macros, structures, prototypes for interface to the log manager.
+ */
+
+/*
+ * Flags to xfs_log_mount
+ */
+#define XFS_LOG_RECOVER		0x1
+
+/*
+ * Flags to xfs_log_done()
+ */
+#define XFS_LOG_REL_PERM_RESERV	0x1
+
+
+/*
+ * Flags to xfs_log_reserve()
+ *
+ *	XFS_LOG_SLEEP:	 If space is not available, sleep (default)
+ *	XFS_LOG_NOSLEEP: If space is not available, return error
+ *	XFS_LOG_PERM_RESERV: Permanent reservation.  When writes are
+ *		performed against this type of reservation, the reservation
+ *		is not decreased.  Long running transactions should use this.
+ */
+#define XFS_LOG_SLEEP		0x0
+#define XFS_LOG_NOSLEEP		0x1
+#define XFS_LOG_PERM_RESERV	0x2
+#define XFS_LOG_RESV_ALL	(XFS_LOG_NOSLEEP|XFS_LOG_PERM_RESERV)
+
+
+/*
+ * Flags to xfs_log_force()
+ *
+ *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
+ *	XFS_LOG_FORCE:	Start in-core log write now.
+ *	XFS_LOG_URGE:	Start write within some window of time.
+ *
+ * Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set.
+ */
+#define XFS_LOG_SYNC		0x1
+#define XFS_LOG_FORCE		0x2
+#define XFS_LOG_URGE		0x4
+
+#endif	/* __KERNEL__ */
+
+
+/* Log Clients */
+#define XFS_TRANSACTION		0x69
+#define XFS_VOLUME		0x2
+#define XFS_LOG			0xaa
+
+typedef struct xfs_log_iovec {
+	xfs_caddr_t		i_addr;		/* beginning address of region */
+	int		i_len;		/* length in bytes of region */
+} xfs_log_iovec_t;
+
+typedef void* xfs_log_ticket_t;
+
+/*
+ * Structure used to pass callback function and the function's argument
+ * to the log manager.
+ */
+typedef struct xfs_log_callback {
+	struct xfs_log_callback	*cb_next;
+	void			(*cb_func)(void *, int);
+	void			*cb_arg;
+} xfs_log_callback_t;
+
+
+#ifdef __KERNEL__
+/* Log manager interfaces */
+struct xfs_mount;
+xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
+		       xfs_log_ticket_t ticket,
+		       void		**iclog,
+		       uint		flags);
+int	  xfs_log_force(struct xfs_mount *mp,
+			xfs_lsn_t	 lsn,
+			uint		 flags);
+int	  xfs_log_mount(struct xfs_mount	*mp,
+			struct xfs_buftarg	*log_target,
+			xfs_daddr_t		start_block,
+			int		 	num_bblocks);
+int	  xfs_log_mount_finish(struct xfs_mount *mp, int);
+void	  xfs_log_move_tail(struct xfs_mount	*mp,
+			    xfs_lsn_t		tail_lsn);
+int	  xfs_log_notify(struct xfs_mount	*mp,
+			 void			*iclog,
+			 xfs_log_callback_t	*callback_entry);
+int	  xfs_log_release_iclog(struct xfs_mount *mp,
+			 void			 *iclog_hndl);
+int	  xfs_log_reserve(struct xfs_mount *mp,
+			  int		   length,
+			  int		   count,
+			  xfs_log_ticket_t *ticket,
+			  __uint8_t	   clientid,
+			  uint		   flags);
+int	  xfs_log_write(struct xfs_mount *mp,
+			xfs_log_iovec_t  region[],
+			int		 nentries,
+			xfs_log_ticket_t ticket,
+			xfs_lsn_t	 *start_lsn);
+int	  xfs_log_unmount(struct xfs_mount *mp);
+int	  xfs_log_unmount_write(struct xfs_mount *mp);
+void      xfs_log_unmount_dealloc(struct xfs_mount *mp);
+int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
+int	  xfs_log_need_covered(struct xfs_mount *mp);
+
+void	  xlog_iodone(struct xfs_buf *);
+
+#endif
+
+
+extern int xlog_debug;		/* set to 1 to enable real log */
+
+
+#endif	/* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
new file mode 100644
index 0000000..c31e3ce
--- /dev/null
+++ b/fs/xfs/xfs_log_priv.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_LOG_PRIV_H__
+#define __XFS_LOG_PRIV_H__
+
+struct xfs_buf;
+struct ktrace;
+struct log;
+struct xfs_buf_cancel;
+struct xfs_mount;
+
+/*
+ * Macros, structures, prototypes for internal log manager use.
+ */
+
+#define XLOG_MIN_ICLOGS		2
+#define XLOG_MED_ICLOGS		4
+#define XLOG_MAX_ICLOGS		8
+#define XLOG_CALLBACK_SIZE	10
+#define XLOG_HEADER_MAGIC_NUM	0xFEEDbabe	/* Invalid cycle number */
+#define XLOG_VERSION_1		1
+#define XLOG_VERSION_2		2		/* Large IClogs, Log sunit */
+#define XLOG_VERSION_OKBITS	(XLOG_VERSION_1 | XLOG_VERSION_2)
+#define XLOG_RECORD_BSIZE	(16*1024)	/* eventually 32k */
+#define XLOG_BIG_RECORD_BSIZE	(32*1024)	/* 32k buffers */
+#define XLOG_MAX_RECORD_BSIZE	(256*1024)
+#define XLOG_HEADER_CYCLE_SIZE	(32*1024)	/* cycle data in header */
+#define XLOG_RECORD_BSHIFT	14		/* 16384 == 1 << 14 */
+#define XLOG_BIG_RECORD_BSHIFT	15		/* 32k == 1 << 15 */
+#define XLOG_MAX_RECORD_BSHIFT	18		/* 256k == 1 << 18 */
+#define XLOG_BTOLSUNIT(log, b)  (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
+                                 (log)->l_mp->m_sb.sb_logsunit)
+#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
+
+#define XLOG_HEADER_SIZE	512
+
+#define XLOG_REC_SHIFT(log) \
+	BTOBB(1 << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \
+	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
+#define XLOG_TOTAL_REC_SHIFT(log) \
+	BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \
+	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
+
+/*
+ *  set lsns
+ */
+
+#define ASSIGN_ANY_LSN_HOST(lsn,cycle,block)  \
+    { \
+	(lsn) = ((xfs_lsn_t)(cycle)<<32)|(block); \
+    }
+#define ASSIGN_ANY_LSN_DISK(lsn,cycle,block)  \
+    { \
+	INT_SET(((uint *)&(lsn))[0], ARCH_CONVERT, (cycle)); \
+	INT_SET(((uint *)&(lsn))[1], ARCH_CONVERT, (block)); \
+    }
+#define ASSIGN_LSN(lsn,log) \
+    ASSIGN_ANY_LSN_DISK(lsn,(log)->l_curr_cycle,(log)->l_curr_block);
+
+#define XLOG_SET(f,b)		(((f) & (b)) == (b))
+
+#define GET_CYCLE(ptr, arch) \
+    (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \
+	 INT_GET(*((uint *)(ptr)+1), arch) : \
+	 INT_GET(*(uint *)(ptr), arch) \
+    )
+
+#define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
+
+
+#ifdef __KERNEL__
+
+/*
+ * get client id from packed copy.
+ *
+ * this hack is here because the xlog_pack code copies four bytes
+ * of xlog_op_header containing the fields oh_clientid, oh_flags
+ * and oh_res2 into the packed copy.
+ *
+ * later on this four byte chunk is treated as an int and the
+ * client id is pulled out.
+ *
+ * this has endian issues, of course.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define GET_CLIENT_ID(i,arch) \
+    ((i) & 0xff)
+#else
+#define GET_CLIENT_ID(i,arch) \
+    ((i) >> 24)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_SUB_SPACE)
+void xlog_grant_sub_space(struct log *log, int bytes, int type);
+#define XLOG_GRANT_SUB_SPACE(log,bytes,type)	\
+	xlog_grant_sub_space(log,bytes,type)
+#else
+#define XLOG_GRANT_SUB_SPACE(log,bytes,type)				\
+    {									\
+	if (type == 'w') {						\
+		(log)->l_grant_write_bytes -= (bytes);			\
+		if ((log)->l_grant_write_bytes < 0) {			\
+			(log)->l_grant_write_bytes += (log)->l_logsize;	\
+			(log)->l_grant_write_cycle--;			\
+		}							\
+	} else {							\
+		(log)->l_grant_reserve_bytes -= (bytes);		\
+		if ((log)->l_grant_reserve_bytes < 0) {			\
+			(log)->l_grant_reserve_bytes += (log)->l_logsize;\
+			(log)->l_grant_reserve_cycle--;			\
+		}							\
+	 }								\
+    }
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_ADD_SPACE)
+void xlog_grant_add_space(struct log *log, int bytes, int type);
+#define XLOG_GRANT_ADD_SPACE(log,bytes,type)	\
+	xlog_grant_add_space(log,bytes,type)
+#else
+#define XLOG_GRANT_ADD_SPACE(log,bytes,type)				\
+    {									\
+	if (type == 'w') {						\
+		(log)->l_grant_write_bytes += (bytes);			\
+		if ((log)->l_grant_write_bytes > (log)->l_logsize) {	\
+			(log)->l_grant_write_bytes -= (log)->l_logsize;	\
+			(log)->l_grant_write_cycle++;			\
+		}							\
+	} else {							\
+		(log)->l_grant_reserve_bytes += (bytes);		\
+		if ((log)->l_grant_reserve_bytes > (log)->l_logsize) {	\
+			(log)->l_grant_reserve_bytes -= (log)->l_logsize;\
+			(log)->l_grant_reserve_cycle++;			\
+		}							\
+	 }								\
+    }
+#endif
+#define XLOG_INS_TICKETQ(q,tic)				\
+    {							\
+	if (q) {					\
+		(tic)->t_next	    = (q);		\
+		(tic)->t_prev	    = (q)->t_prev;	\
+		(q)->t_prev->t_next = (tic);		\
+		(q)->t_prev	    = (tic);		\
+	} else {					\
+		(tic)->t_prev = (tic)->t_next = (tic);	\
+		(q) = (tic);				\
+	}						\
+	(tic)->t_flags |= XLOG_TIC_IN_Q;		\
+    }
+#define XLOG_DEL_TICKETQ(q,tic)				\
+    {							\
+	if ((tic) == (tic)->t_next) {			\
+		(q) = NULL;				\
+	} else {					\
+		(q) = (tic)->t_next;			\
+		(tic)->t_next->t_prev = (tic)->t_prev;	\
+		(tic)->t_prev->t_next = (tic)->t_next;	\
+	}						\
+	(tic)->t_next = (tic)->t_prev = NULL;		\
+	(tic)->t_flags &= ~XLOG_TIC_IN_Q;		\
+    }
+
+
+#define GRANT_LOCK(log)		mutex_spinlock(&(log)->l_grant_lock)
+#define GRANT_UNLOCK(log, s)	mutex_spinunlock(&(log)->l_grant_lock, s)
+#define LOG_LOCK(log)		mutex_spinlock(&(log)->l_icloglock)
+#define LOG_UNLOCK(log, s)	mutex_spinunlock(&(log)->l_icloglock, s)
+
+#define xlog_panic(args...)	cmn_err(CE_PANIC, ## args)
+#define xlog_exit(args...)	cmn_err(CE_PANIC, ## args)
+#define xlog_warn(args...)	cmn_err(CE_WARN, ## args)
+
+/*
+ * In core log state
+ */
+#define XLOG_STATE_ACTIVE    0x0001 /* Current IC log being written to */
+#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
+#define XLOG_STATE_SYNCING   0x0004 /* This IC log is syncing */
+#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
+#define XLOG_STATE_DO_CALLBACK \
+			     0x0010 /* Process callback functions */
+#define XLOG_STATE_CALLBACK  0x0020 /* Callback functions now */
+#define XLOG_STATE_DIRTY     0x0040 /* Dirty IC log, not ready for ACTIVE status*/
+#define XLOG_STATE_IOERROR   0x0080 /* IO error happened in sync'ing log */
+#define XLOG_STATE_ALL	     0x7FFF /* All possible valid flags */
+#define XLOG_STATE_NOTUSED   0x8000 /* This IC log not being used */
+#endif	/* __KERNEL__ */
+
+/*
+ * Flags to log operation header
+ *
+ * The first write of a new transaction will be preceded with a start
+ * record, XLOG_START_TRANS.  Once a transaction is committed, a commit
+ * record is written, XLOG_COMMIT_TRANS.  If a single region can not fit into
+ * the remainder of the current active in-core log, it is split up into
+ * multiple regions.  Each partial region will be marked with a
+ * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS.
+ *
+ */
+#define XLOG_START_TRANS	0x01	/* Start a new transaction */
+#define XLOG_COMMIT_TRANS	0x02	/* Commit this transaction */
+#define XLOG_CONTINUE_TRANS	0x04	/* Cont this trans into new region */
+#define XLOG_WAS_CONT_TRANS	0x08	/* Cont this trans into new region */
+#define XLOG_END_TRANS		0x10	/* End a continued transaction */
+#define XLOG_UNMOUNT_TRANS	0x20	/* Unmount a filesystem transaction */
+#define XLOG_SKIP_TRANS		(XLOG_COMMIT_TRANS | XLOG_CONTINUE_TRANS | \
+				 XLOG_WAS_CONT_TRANS | XLOG_END_TRANS | \
+				 XLOG_UNMOUNT_TRANS)
+
+#ifdef __KERNEL__
+/*
+ * Flags to log ticket
+ */
+#define XLOG_TIC_INITED		0x1	/* has been initialized */
+#define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
+#define XLOG_TIC_IN_Q		0x4
+#endif	/* __KERNEL__ */
+
+#define XLOG_UNMOUNT_TYPE	0x556e	/* Un for Unmount */
+
+/*
+ * Flags for log structure
+ */
+#define XLOG_CHKSUM_MISMATCH	0x1	/* used only during recovery */
+#define XLOG_ACTIVE_RECOVERY	0x2	/* in the middle of recovery */
+#define	XLOG_RECOVERY_NEEDED	0x4	/* log was recovered */
+#define XLOG_IO_ERROR		0x8	/* log hit an I/O error, and being
+					   shutdown */
+typedef __uint32_t xlog_tid_t;
+
+
+#ifdef __KERNEL__
+/*
+ * Below are states for covering allocation transactions.
+ * By covering, we mean changing the h_tail_lsn in the last on-disk
+ * log write such that no allocation transactions will be re-done during
+ * recovery after a system crash. Recovery starts at the last on-disk
+ * log write.
+ *
+ * These states are used to insert dummy log entries to cover
+ * space allocation transactions which can undo non-transactional changes
+ * after a crash. Writes to a file with space
+ * already allocated do not result in any transactions. Allocations
+ * might include space beyond the EOF. So if we just push the EOF a
+ * little, the last transaction for the file could contain the wrong
+ * size. If there is no file system activity, after an allocation
+ * transaction, and the system crashes, the allocation transaction
+ * will get replayed and the file will be truncated. This could
+ * be hours/days/... after the allocation occurred.
+ *
+ * The fix for this is to do two dummy transactions when the
+ * system is idle. We need two dummy transaction because the h_tail_lsn
+ * in the log record header needs to point beyond the last possible
+ * non-dummy transaction. The first dummy changes the h_tail_lsn to
+ * the first transaction before the dummy. The second dummy causes
+ * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
+ *
+ * These dummy transactions get committed when everything
+ * is idle (after there has been some activity).
+ *
+ * There are 5 states used to control this.
+ *
+ *  IDLE -- no logging has been done on the file system or
+ *		we are done covering previous transactions.
+ *  NEED -- logging has occurred and we need a dummy transaction
+ *		when the log becomes idle.
+ *  DONE -- we were in the NEED state and have committed a dummy
+ *		transaction.
+ *  NEED2 -- we detected that a dummy transaction has gone to the
+ *		on disk log with no other transactions.
+ *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
+ *
+ * There are two places where we switch states:
+ *
+ * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
+ *	We commit the dummy transaction and switch to DONE or DONE2,
+ *	respectively. In all other states, we don't do anything.
+ *
+ * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
+ *
+ *	No matter what state we are in, if this isn't the dummy
+ *	transaction going out, the next state is NEED.
+ *	So, if we aren't in the DONE or DONE2 states, the next state
+ *	is NEED. We can't be finishing a write of the dummy record
+ *	unless it was committed and the state switched to DONE or DONE2.
+ *
+ *	If we are in the DONE state and this was a write of the
+ *		dummy transaction, we move to NEED2.
+ *
+ *	If we are in the DONE2 state and this was a write of the
+ *		dummy transaction, we move to IDLE.
+ *
+ *
+ * Writing only one dummy transaction can get appended to
+ * one file space allocation. When this happens, the log recovery
+ * code replays the space allocation and a file could be truncated.
+ * This is why we have the NEED2 and DONE2 states before going idle.
+ */
+
+#define XLOG_STATE_COVER_IDLE	0
+#define XLOG_STATE_COVER_NEED	1
+#define XLOG_STATE_COVER_DONE	2
+#define XLOG_STATE_COVER_NEED2	3
+#define XLOG_STATE_COVER_DONE2	4
+
+#define XLOG_COVER_OPS		5
+
+typedef struct xlog_ticket {
+	sv_t		   t_sema;	 /* sleep on this semaphore	 :20 */
+	struct xlog_ticket *t_next;	 /*			         : 4 */
+	struct xlog_ticket *t_prev;	 /*				 : 4 */
+	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4 */
+	int		   t_curr_res;	 /* current reservation in bytes : 4 */
+	int		   t_unit_res;	 /* unit reservation in bytes    : 4 */
+	__uint8_t	   t_ocnt;	 /* original count		 : 1 */
+	__uint8_t	   t_cnt;	 /* current count		 : 1 */
+	__uint8_t	   t_clientid;	 /* who does this belong to;	 : 1 */
+	__uint8_t	   t_flags;	 /* properties of reservation	 : 1 */
+} xlog_ticket_t;
+#endif
+
+
+typedef struct xlog_op_header {
+	xlog_tid_t oh_tid;	/* transaction id of operation	:  4 b */
+	int	   oh_len;	/* bytes in data region		:  4 b */
+	__uint8_t  oh_clientid;	/* who sent me this		:  1 b */
+	__uint8_t  oh_flags;	/*				:  1 b */
+	ushort	   oh_res2;	/* 32 bit align			:  2 b */
+} xlog_op_header_t;
+
+
+/* valid values for h_fmt */
+#define XLOG_FMT_UNKNOWN  0
+#define XLOG_FMT_LINUX_LE 1
+#define XLOG_FMT_LINUX_BE 2
+#define XLOG_FMT_IRIX_BE  3
+
+/* our fmt */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define XLOG_FMT XLOG_FMT_LINUX_LE
+#else
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define XLOG_FMT XLOG_FMT_LINUX_BE
+#else
+#error unknown byte order
+#endif
+#endif
+
+typedef struct xlog_rec_header {
+	uint	  h_magicno;	/* log record (LR) identifier		:  4 */
+	uint	  h_cycle;	/* write cycle of log			:  4 */
+	int	  h_version;	/* LR version				:  4 */
+	int	  h_len;	/* len in bytes; should be 64-bit aligned: 4 */
+	xfs_lsn_t h_lsn;	/* lsn of this LR			:  8 */
+	xfs_lsn_t h_tail_lsn;	/* lsn of 1st LR w/ buffers not committed: 8 */
+	uint	  h_chksum;	/* may not be used; non-zero if used	:  4 */
+	int	  h_prev_block; /* block number to previous LR		:  4 */
+	int	  h_num_logops;	/* number of log operations in this LR	:  4 */
+	uint	  h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
+	/* new fields */
+	int       h_fmt;        /* format of log record                 :  4 */
+	uuid_t    h_fs_uuid;    /* uuid of FS                           : 16 */
+	int       h_size;	/* iclog size				:  4 */
+} xlog_rec_header_t;
+
+typedef struct xlog_rec_ext_header {
+	uint	  xh_cycle;	/* write cycle of log			: 4 */
+	uint	  xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*	: 256 */
+} xlog_rec_ext_header_t;
+
+#ifdef __KERNEL__
+/*
+ * - A log record header is 512 bytes.  There is plenty of room to grow the
+ *	xlog_rec_header_t into the reserved space.
+ * - ic_data follows, so a write to disk can start at the beginning of
+ *	the iclog.
+ * - ic_forcesema is used to implement synchronous forcing of the iclog to disk.
+ * - ic_next is the pointer to the next iclog in the ring.
+ * - ic_bp is a pointer to the buffer used to write this incore log to disk.
+ * - ic_log is a pointer back to the global log structure.
+ * - ic_callback is a linked list of callback function/argument pairs to be
+ *	called after an iclog finishes writing.
+ * - ic_size is the full size of the header plus data.
+ * - ic_offset is the current number of bytes written to in this iclog.
+ * - ic_refcnt is bumped when someone is writing to the log.
+ * - ic_state is the state of the iclog.
+ */
+typedef struct xlog_iclog_fields {
+	sv_t			ic_forcesema;
+	sv_t			ic_writesema;
+	struct xlog_in_core	*ic_next;
+	struct xlog_in_core	*ic_prev;
+	struct xfs_buf		*ic_bp;
+	struct log		*ic_log;
+	xfs_log_callback_t	*ic_callback;
+	xfs_log_callback_t	**ic_callback_tail;
+#ifdef XFS_LOG_TRACE
+	struct ktrace		*ic_trace;
+#endif
+	int			ic_size;
+	int			ic_offset;
+	int			ic_refcnt;
+	int			ic_bwritecnt;
+	ushort_t		ic_state;
+	char			*ic_datap;	/* pointer to iclog data */
+} xlog_iclog_fields_t;
+
+typedef union xlog_in_core2 {
+	xlog_rec_header_t	hic_header;
+	xlog_rec_ext_header_t	hic_xheader;
+	char			hic_sector[XLOG_HEADER_SIZE];
+} xlog_in_core_2_t;
+
+typedef struct xlog_in_core {
+	xlog_iclog_fields_t	hic_fields;
+	xlog_in_core_2_t	*hic_data;
+} xlog_in_core_t;
+
+/*
+ * Defines to save our code from this glop.
+ */
+#define	ic_forcesema	hic_fields.ic_forcesema
+#define ic_writesema	hic_fields.ic_writesema
+#define	ic_next		hic_fields.ic_next
+#define	ic_prev		hic_fields.ic_prev
+#define	ic_bp		hic_fields.ic_bp
+#define	ic_log		hic_fields.ic_log
+#define	ic_callback	hic_fields.ic_callback
+#define	ic_callback_tail hic_fields.ic_callback_tail
+#define	ic_trace	hic_fields.ic_trace
+#define	ic_size		hic_fields.ic_size
+#define	ic_offset	hic_fields.ic_offset
+#define	ic_refcnt	hic_fields.ic_refcnt
+#define	ic_bwritecnt	hic_fields.ic_bwritecnt
+#define	ic_state	hic_fields.ic_state
+#define ic_datap	hic_fields.ic_datap
+#define ic_header	hic_data->hic_header
+
+/*
+ * The reservation head lsn is not made up of a cycle number and block number.
+ * Instead, it uses a cycle number and byte number.  Logs don't expect to
+ * overflow 31 bits worth of byte offset, so using a byte number will mean
+ * that round off problems won't occur when releasing partial reservations.
+ */
+typedef struct log {
+	/* The following block of fields are changed while holding icloglock */
+	sema_t			l_flushsema;    /* iclog flushing semaphore */
+	int			l_flushcnt;	/* # of procs waiting on this
+						 * sema */
+	int			l_ticket_cnt;	/* free ticket count */
+	int			l_ticket_tcnt;	/* total ticket count */
+	int			l_covered_state;/* state of "covering disk
+						 * log entries" */
+	xlog_ticket_t		*l_freelist;    /* free list of tickets */
+	xlog_ticket_t		*l_unmount_free;/* kmem_free these addresses */
+	xlog_ticket_t		*l_tail;        /* free list of tickets */
+	xlog_in_core_t		*l_iclog;       /* head log queue	*/
+	lock_t			l_icloglock;    /* grab to change iclog state */
+	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
+						 * buffers */
+	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
+	struct xfs_mount	*l_mp;	        /* mount point */
+	struct xfs_buf		*l_xbuf;        /* extra buffer for log
+						 * wrapping */
+	struct xfs_buftarg	*l_targ;        /* buftarg of log */
+	xfs_daddr_t		l_logBBstart;   /* start block of log */
+	int			l_logsize;      /* size of log in bytes */
+	int			l_logBBsize;    /* size of log in BB chunks */
+	int			l_curr_cycle;   /* Cycle number of log writes */
+	int			l_prev_cycle;   /* Cycle number before last
+						 * block increment */
+	int			l_curr_block;   /* current logical log block */
+	int			l_prev_block;   /* previous logical log block */
+	int			l_iclog_size;	/* size of log in bytes */
+	int			l_iclog_size_log; /* log power size of log */
+	int			l_iclog_bufs;	/* number of iclog buffers */
+
+	/* The following field are used for debugging; need to hold icloglock */
+	char			*l_iclog_bak[XLOG_MAX_ICLOGS];
+
+	/* The following block of fields are changed while holding grant_lock */
+	lock_t			l_grant_lock;
+	xlog_ticket_t		*l_reserve_headq;
+	xlog_ticket_t		*l_write_headq;
+	int			l_grant_reserve_cycle;
+	int			l_grant_reserve_bytes;
+	int			l_grant_write_cycle;
+	int			l_grant_write_bytes;
+
+	/* The following fields don't need locking */
+#ifdef XFS_LOG_TRACE
+	struct ktrace		*l_trace;
+	struct ktrace		*l_grant_trace;
+#endif
+	uint			l_flags;
+	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
+	struct xfs_buf_cancel	**l_buf_cancel_table;
+	int			l_iclog_hsize;  /* size of iclog header */
+	int			l_iclog_heads;  /* # of iclog header sectors */
+	uint			l_sectbb_log;   /* log2 of sector size in BBs */
+	uint			l_sectbb_mask;  /* sector size (in BBs)
+						 * alignment mask */
+} xlog_t;
+
+
+/* common routines */
+extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
+extern int	 xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk);
+extern int	 xlog_find_tail(xlog_t	*log,
+				xfs_daddr_t *head_blk,
+				xfs_daddr_t *tail_blk,
+				int readonly);
+extern int	 xlog_recover(xlog_t *log, int readonly);
+extern int	 xlog_recover_finish(xlog_t *log, int mfsi_flags);
+extern void	 xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
+extern void	 xlog_recover_process_iunlinks(xlog_t *log);
+
+extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
+extern void	 xlog_put_bp(struct xfs_buf *);
+extern int	 xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
+extern xfs_caddr_t xlog_align(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
+
+/* iclog tracing */
+#define XLOG_TRACE_GRAB_FLUSH  1
+#define XLOG_TRACE_REL_FLUSH   2
+#define XLOG_TRACE_SLEEP_FLUSH 3
+#define XLOG_TRACE_WAKE_FLUSH  4
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
new file mode 100644
index 0000000..9824b5b
--- /dev/null
+++ b/fs/xfs/xfs_log_recover.c
@@ -0,0 +1,4098 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_ag.h"
+#include "xfs_sb.h"
+#include "xfs_trans.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_error.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_imap.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_log_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_log_recover.h"
+#include "xfs_extfree_item.h"
+#include "xfs_trans_priv.h"
+#include "xfs_bit.h"
+#include "xfs_quota.h"
+#include "xfs_rw.h"
+
+STATIC int	xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
+STATIC int	xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
+STATIC void	xlog_recover_insert_item_backq(xlog_recover_item_t **q,
+					       xlog_recover_item_t *item);
+#if defined(DEBUG)
+STATIC void	xlog_recover_check_summary(xlog_t *);
+STATIC void	xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
+#else
+#define	xlog_recover_check_summary(log)
+#define	xlog_recover_check_ail(mp, lip, gen)
+#endif
+
+
+/*
+ * Sector aligned buffer routines for buffer create/read/write/access
+ */
+
+#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)	\
+	( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
+	((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
+#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)	((bno) & ~(log)->l_sectbb_mask)
+
+xfs_buf_t *
+xlog_get_bp(
+	xlog_t		*log,
+	int		num_bblks)
+{
+	ASSERT(num_bblks > 0);
+
+	if (log->l_sectbb_log) {
+		if (num_bblks > 1)
+			num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+		num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
+	}
+	return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
+}
+
+void
+xlog_put_bp(
+	xfs_buf_t	*bp)
+{
+	xfs_buf_free(bp);
+}
+
+
+/*
+ * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
+ */
+int
+xlog_bread(
+	xlog_t		*log,
+	xfs_daddr_t	blk_no,
+	int		nbblks,
+	xfs_buf_t	*bp)
+{
+	int		error;
+
+	if (log->l_sectbb_log) {
+		blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
+		nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
+	}
+
+	ASSERT(nbblks > 0);
+	ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
+	ASSERT(bp);
+
+	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
+	XFS_BUF_READ(bp);
+	XFS_BUF_BUSY(bp);
+	XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
+	XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
+
+	xfsbdstrat(log->l_mp, bp);
+	if ((error = xfs_iowait(bp)))
+		xfs_ioerror_alert("xlog_bread", log->l_mp,
+				  bp, XFS_BUF_ADDR(bp));
+	return error;
+}
+
+/*
+ * Write out the buffer at the given block for the given number of blocks.
+ * The buffer is kept locked across the write and is returned locked.
+ * This can only be used for synchronous log writes.
+ */
+int
+xlog_bwrite(
+	xlog_t		*log,
+	xfs_daddr_t	blk_no,
+	int		nbblks,
+	xfs_buf_t	*bp)
+{
+	int		error;
+
+	if (log->l_sectbb_log) {
+		blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
+		nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
+	}
+
+	ASSERT(nbblks > 0);
+	ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
+
+	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
+	XFS_BUF_ZEROFLAGS(bp);
+	XFS_BUF_BUSY(bp);
+	XFS_BUF_HOLD(bp);
+	XFS_BUF_PSEMA(bp, PRIBIO);
+	XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
+	XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
+
+	if ((error = xfs_bwrite(log->l_mp, bp)))
+		xfs_ioerror_alert("xlog_bwrite", log->l_mp,
+				  bp, XFS_BUF_ADDR(bp));
+	return error;
+}
+
+xfs_caddr_t
+xlog_align(
+	xlog_t		*log,
+	xfs_daddr_t	blk_no,
+	int		nbblks,
+	xfs_buf_t	*bp)
+{
+	xfs_caddr_t	ptr;
+
+	if (!log->l_sectbb_log)
+		return XFS_BUF_PTR(bp);
+
+	ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
+	ASSERT(XFS_BUF_SIZE(bp) >=
+		BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
+	return ptr;
+}
+
+#ifdef DEBUG
+/*
+ * dump debug superblock and log record information
+ */
+STATIC void
+xlog_header_check_dump(
+	xfs_mount_t		*mp,
+	xlog_rec_header_t	*head)
+{
+	int			b;
+
+	printk("%s:  SB : uuid = ", __FUNCTION__);
+	for (b = 0; b < 16; b++)
+		printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
+	printk(", fmt = %d\n", XLOG_FMT);
+	printk("    log : uuid = ");
+	for (b = 0; b < 16; b++)
+		printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
+	printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
+}
+#else
+#define xlog_header_check_dump(mp, head)
+#endif
+
+/*
+ * check log record header for recovery
+ */
+STATIC int
+xlog_header_check_recover(
+	xfs_mount_t		*mp,
+	xlog_rec_header_t	*head)
+{
+	ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
+
+	/*
+	 * IRIX doesn't write the h_fmt field and leaves it zeroed
+	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
+	 * a dirty log created in IRIX.
+	 */
+	if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
+		xlog_warn(
+	"XFS: dirty log written in incompatible format - can't recover");
+		xlog_header_check_dump(mp, head);
+		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
+				 XFS_ERRLEVEL_HIGH, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+		xlog_warn(
+	"XFS: dirty log entry has mismatched uuid - can't recover");
+		xlog_header_check_dump(mp, head);
+		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
+				 XFS_ERRLEVEL_HIGH, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+/*
+ * read the head block of the log and check the header
+ */
+STATIC int
+xlog_header_check_mount(
+	xfs_mount_t		*mp,
+	xlog_rec_header_t	*head)
+{
+	ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
+
+	if (uuid_is_nil(&head->h_fs_uuid)) {
+		/*
+		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
+		 * h_fs_uuid is nil, we assume this log was last mounted
+		 * by IRIX and continue.
+		 */
+		xlog_warn("XFS: nil uuid in log - IRIX style log");
+	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+		xlog_warn("XFS: log has mismatched uuid - can't recover");
+		xlog_header_check_dump(mp, head);
+		XFS_ERROR_REPORT("xlog_header_check_mount",
+				 XFS_ERRLEVEL_HIGH, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+STATIC void
+xlog_recover_iodone(
+	struct xfs_buf	*bp)
+{
+	xfs_mount_t	*mp;
+
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
+
+	if (XFS_BUF_GETERROR(bp)) {
+		/*
+		 * We're not going to bother about retrying
+		 * this during recovery. One strike!
+		 */
+		mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
+		xfs_ioerror_alert("xlog_recover_iodone",
+				  mp, bp, XFS_BUF_ADDR(bp));
+		xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+	}
+	XFS_BUF_SET_FSPRIVATE(bp, NULL);
+	XFS_BUF_CLR_IODONE_FUNC(bp);
+	xfs_biodone(bp);
+}
+
+/*
+ * This routine finds (to an approximation) the first block in the physical
+ * log which contains the given cycle.  It uses a binary search algorithm.
+ * Note that the algorithm can not be perfect because the disk will not
+ * necessarily be perfect.
+ */
+int
+xlog_find_cycle_start(
+	xlog_t		*log,
+	xfs_buf_t	*bp,
+	xfs_daddr_t	first_blk,
+	xfs_daddr_t	*last_blk,
+	uint		cycle)
+{
+	xfs_caddr_t	offset;
+	xfs_daddr_t	mid_blk;
+	uint		mid_cycle;
+	int		error;
+
+	mid_blk = BLK_AVG(first_blk, *last_blk);
+	while (mid_blk != first_blk && mid_blk != *last_blk) {
+		if ((error = xlog_bread(log, mid_blk, 1, bp)))
+			return error;
+		offset = xlog_align(log, mid_blk, 1, bp);
+		mid_cycle = GET_CYCLE(offset, ARCH_CONVERT);
+		if (mid_cycle == cycle) {
+			*last_blk = mid_blk;
+			/* last_half_cycle == mid_cycle */
+		} else {
+			first_blk = mid_blk;
+			/* first_half_cycle == mid_cycle */
+		}
+		mid_blk = BLK_AVG(first_blk, *last_blk);
+	}
+	ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
+	       (mid_blk == *last_blk && mid_blk-1 == first_blk));
+
+	return 0;
+}
+
+/*
+ * Check that the range of blocks does not contain the cycle number
+ * given.  The scan needs to occur from front to back and the ptr into the
+ * region must be updated since a later routine will need to perform another
+ * test.  If the region is completely good, we end up returning the same
+ * last block number.
+ *
+ * Set blkno to -1 if we encounter no errors.  This is an invalid block number
+ * since we don't ever expect logs to get this large.
+ */
+STATIC int
+xlog_find_verify_cycle(
+	xlog_t		*log,
+	xfs_daddr_t	start_blk,
+	int		nbblks,
+	uint		stop_on_cycle_no,
+	xfs_daddr_t	*new_blk)
+{
+	xfs_daddr_t	i, j;
+	uint		cycle;
+	xfs_buf_t	*bp;
+	xfs_daddr_t	bufblks;
+	xfs_caddr_t	buf = NULL;
+	int		error = 0;
+
+	bufblks = 1 << ffs(nbblks);
+
+	while (!(bp = xlog_get_bp(log, bufblks))) {
+		/* can't get enough memory to do everything in one big buffer */
+		bufblks >>= 1;
+		if (bufblks <= log->l_sectbb_log)
+			return ENOMEM;
+	}
+
+	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
+		int	bcount;
+
+		bcount = min(bufblks, (start_blk + nbblks - i));
+
+		if ((error = xlog_bread(log, i, bcount, bp)))
+			goto out;
+
+		buf = xlog_align(log, i, bcount, bp);
+		for (j = 0; j < bcount; j++) {
+			cycle = GET_CYCLE(buf, ARCH_CONVERT);
+			if (cycle == stop_on_cycle_no) {
+				*new_blk = i+j;
+				goto out;
+			}
+
+			buf += BBSIZE;
+		}
+	}
+
+	*new_blk = -1;
+
+out:
+	xlog_put_bp(bp);
+	return error;
+}
+
+/*
+ * Potentially backup over partial log record write.
+ *
+ * In the typical case, last_blk is the number of the block directly after
+ * a good log record.  Therefore, we subtract one to get the block number
+ * of the last block in the given buffer.  extra_bblks contains the number
+ * of blocks we would have read on a previous read.  This happens when the
+ * last log record is split over the end of the physical log.
+ *
+ * extra_bblks is the number of blocks potentially verified on a previous
+ * call to this routine.
+ */
+STATIC int
+xlog_find_verify_log_record(
+	xlog_t			*log,
+	xfs_daddr_t		start_blk,
+	xfs_daddr_t		*last_blk,
+	int			extra_bblks)
+{
+	xfs_daddr_t		i;
+	xfs_buf_t		*bp;
+	xfs_caddr_t		offset = NULL;
+	xlog_rec_header_t	*head = NULL;
+	int			error = 0;
+	int			smallmem = 0;
+	int			num_blks = *last_blk - start_blk;
+	int			xhdrs;
+
+	ASSERT(start_blk != 0 || *last_blk != start_blk);
+
+	if (!(bp = xlog_get_bp(log, num_blks))) {
+		if (!(bp = xlog_get_bp(log, 1)))
+			return ENOMEM;
+		smallmem = 1;
+	} else {
+		if ((error = xlog_bread(log, start_blk, num_blks, bp)))
+			goto out;
+		offset = xlog_align(log, start_blk, num_blks, bp);
+		offset += ((num_blks - 1) << BBSHIFT);
+	}
+
+	for (i = (*last_blk) - 1; i >= 0; i--) {
+		if (i < start_blk) {
+			/* valid log record not found */
+			xlog_warn(
+		"XFS: Log inconsistent (didn't find previous header)");
+			ASSERT(0);
+			error = XFS_ERROR(EIO);
+			goto out;
+		}
+
+		if (smallmem) {
+			if ((error = xlog_bread(log, i, 1, bp)))
+				goto out;
+			offset = xlog_align(log, i, 1, bp);
+		}
+
+		head = (xlog_rec_header_t *)offset;
+
+		if (XLOG_HEADER_MAGIC_NUM ==
+		    INT_GET(head->h_magicno, ARCH_CONVERT))
+			break;
+
+		if (!smallmem)
+			offset -= BBSIZE;
+	}
+
+	/*
+	 * We hit the beginning of the physical log & still no header.  Return
+	 * to caller.  If caller can handle a return of -1, then this routine
+	 * will be called again for the end of the physical log.
+	 */
+	if (i == -1) {
+		error = -1;
+		goto out;
+	}
+
+	/*
+	 * We have the final block of the good log (the first block
+	 * of the log record _before_ the head. So we check the uuid.
+	 */
+	if ((error = xlog_header_check_mount(log->l_mp, head)))
+		goto out;
+
+	/*
+	 * We may have found a log record header before we expected one.
+	 * last_blk will be the 1st block # with a given cycle #.  We may end
+	 * up reading an entire log record.  In this case, we don't want to
+	 * reset last_blk.  Only when last_blk points in the middle of a log
+	 * record do we update last_blk.
+	 */
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+		uint	h_size = INT_GET(head->h_size, ARCH_CONVERT);
+
+		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
+		if (h_size % XLOG_HEADER_CYCLE_SIZE)
+			xhdrs++;
+	} else {
+		xhdrs = 1;
+	}
+
+	if (*last_blk - i + extra_bblks
+			!= BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
+		*last_blk = i;
+
+out:
+	xlog_put_bp(bp);
+	return error;
+}
+
+/*
+ * Head is defined to be the point of the log where the next log write
+ * write could go.  This means that incomplete LR writes at the end are
+ * eliminated when calculating the head.  We aren't guaranteed that previous
+ * LR have complete transactions.  We only know that a cycle number of
+ * current cycle number -1 won't be present in the log if we start writing
+ * from our current block number.
+ *
+ * last_blk contains the block number of the first block with a given
+ * cycle number.
+ *
+ * Return: zero if normal, non-zero if error.
+ */
+int
+xlog_find_head(
+	xlog_t 		*log,
+	xfs_daddr_t	*return_head_blk)
+{
+	xfs_buf_t	*bp;
+	xfs_caddr_t	offset;
+	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
+	int		num_scan_bblks;
+	uint		first_half_cycle, last_half_cycle;
+	uint		stop_on_cycle;
+	int		error, log_bbnum = log->l_logBBsize;
+
+	/* Is the end of the log device zeroed? */
+	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
+		*return_head_blk = first_blk;
+
+		/* Is the whole lot zeroed? */
+		if (!first_blk) {
+			/* Linux XFS shouldn't generate totally zeroed logs -
+			 * mkfs etc write a dummy unmount record to a fresh
+			 * log so we can store the uuid in there
+			 */
+			xlog_warn("XFS: totally zeroed log");
+		}
+
+		return 0;
+	} else if (error) {
+		xlog_warn("XFS: empty log check failed");
+		return error;
+	}
+
+	first_blk = 0;			/* get cycle # of 1st block */
+	bp = xlog_get_bp(log, 1);
+	if (!bp)
+		return ENOMEM;
+	if ((error = xlog_bread(log, 0, 1, bp)))
+		goto bp_err;
+	offset = xlog_align(log, 0, 1, bp);
+	first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
+
+	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
+	if ((error = xlog_bread(log, last_blk, 1, bp)))
+		goto bp_err;
+	offset = xlog_align(log, last_blk, 1, bp);
+	last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
+	ASSERT(last_half_cycle != 0);
+
+	/*
+	 * If the 1st half cycle number is equal to the last half cycle number,
+	 * then the entire log is stamped with the same cycle number.  In this
+	 * case, head_blk can't be set to zero (which makes sense).  The below
+	 * math doesn't work out properly with head_blk equal to zero.  Instead,
+	 * we set it to log_bbnum which is an invalid block number, but this
+	 * value makes the math correct.  If head_blk doesn't changed through
+	 * all the tests below, *head_blk is set to zero at the very end rather
+	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
+	 * in a circular file.
+	 */
+	if (first_half_cycle == last_half_cycle) {
+		/*
+		 * In this case we believe that the entire log should have
+		 * cycle number last_half_cycle.  We need to scan backwards
+		 * from the end verifying that there are no holes still
+		 * containing last_half_cycle - 1.  If we find such a hole,
+		 * then the start of that hole will be the new head.  The
+		 * simple case looks like
+		 *        x | x ... | x - 1 | x
+		 * Another case that fits this picture would be
+		 *        x | x + 1 | x ... | x
+		 * In this case the head really is somwhere at the end of the
+		 * log, as one of the latest writes at the beginning was
+		 * incomplete.
+		 * One more case is
+		 *        x | x + 1 | x ... | x - 1 | x
+		 * This is really the combination of the above two cases, and
+		 * the head has to end up at the start of the x-1 hole at the
+		 * end of the log.
+		 *
+		 * In the 256k log case, we will read from the beginning to the
+		 * end of the log and search for cycle numbers equal to x-1.
+		 * We don't worry about the x+1 blocks that we encounter,
+		 * because we know that they cannot be the head since the log
+		 * started with x.
+		 */
+		head_blk = log_bbnum;
+		stop_on_cycle = last_half_cycle - 1;
+	} else {
+		/*
+		 * In this case we want to find the first block with cycle
+		 * number matching last_half_cycle.  We expect the log to be
+		 * some variation on
+		 *        x + 1 ... | x ...
+		 * The first block with cycle number x (last_half_cycle) will
+		 * be where the new head belongs.  First we do a binary search
+		 * for the first occurrence of last_half_cycle.  The binary
+		 * search may not be totally accurate, so then we scan back
+		 * from there looking for occurrences of last_half_cycle before
+		 * us.  If that backwards scan wraps around the beginning of
+		 * the log, then we look for occurrences of last_half_cycle - 1
+		 * at the end of the log.  The cases we're looking for look
+		 * like
+		 *        x + 1 ... | x | x + 1 | x ...
+		 *                               ^ binary search stopped here
+		 * or
+		 *        x + 1 ... | x ... | x - 1 | x
+		 *        <---------> less than scan distance
+		 */
+		stop_on_cycle = last_half_cycle;
+		if ((error = xlog_find_cycle_start(log, bp, first_blk,
+						&head_blk, last_half_cycle)))
+			goto bp_err;
+	}
+
+	/*
+	 * Now validate the answer.  Scan back some number of maximum possible
+	 * blocks and make sure each one has the expected cycle number.  The
+	 * maximum is determined by the total possible amount of buffering
+	 * in the in-core log.  The following number can be made tighter if
+	 * we actually look at the block size of the filesystem.
+	 */
+	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
+	if (head_blk >= num_scan_bblks) {
+		/*
+		 * We are guaranteed that the entire check can be performed
+		 * in one buffer.
+		 */
+		start_blk = head_blk - num_scan_bblks;
+		if ((error = xlog_find_verify_cycle(log,
+						start_blk, num_scan_bblks,
+						stop_on_cycle, &new_blk)))
+			goto bp_err;
+		if (new_blk != -1)
+			head_blk = new_blk;
+	} else {		/* need to read 2 parts of log */
+		/*
+		 * We are going to scan backwards in the log in two parts.
+		 * First we scan the physical end of the log.  In this part
+		 * of the log, we are looking for blocks with cycle number
+		 * last_half_cycle - 1.
+		 * If we find one, then we know that the log starts there, as
+		 * we've found a hole that didn't get written in going around
+		 * the end of the physical log.  The simple case for this is
+		 *        x + 1 ... | x ... | x - 1 | x
+		 *        <---------> less than scan distance
+		 * If all of the blocks at the end of the log have cycle number
+		 * last_half_cycle, then we check the blocks at the start of
+		 * the log looking for occurrences of last_half_cycle.  If we
+		 * find one, then our current estimate for the location of the
+		 * first occurrence of last_half_cycle is wrong and we move
+		 * back to the hole we've found.  This case looks like
+		 *        x + 1 ... | x | x + 1 | x ...
+		 *                               ^ binary search stopped here
+		 * Another case we need to handle that only occurs in 256k
+		 * logs is
+		 *        x + 1 ... | x ... | x+1 | x ...
+		 *                   ^ binary search stops here
+		 * In a 256k log, the scan at the end of the log will see the
+		 * x + 1 blocks.  We need to skip past those since that is
+		 * certainly not the head of the log.  By searching for
+		 * last_half_cycle-1 we accomplish that.
+		 */
+		start_blk = log_bbnum - num_scan_bblks + head_blk;
+		ASSERT(head_blk <= INT_MAX &&
+			(xfs_daddr_t) num_scan_bblks - head_blk >= 0);
+		if ((error = xlog_find_verify_cycle(log, start_blk,
+					num_scan_bblks - (int)head_blk,
+					(stop_on_cycle - 1), &new_blk)))
+			goto bp_err;
+		if (new_blk != -1) {
+			head_blk = new_blk;
+			goto bad_blk;
+		}
+
+		/*
+		 * Scan beginning of log now.  The last part of the physical
+		 * log is good.  This scan needs to verify that it doesn't find
+		 * the last_half_cycle.
+		 */
+		start_blk = 0;
+		ASSERT(head_blk <= INT_MAX);
+		if ((error = xlog_find_verify_cycle(log,
+					start_blk, (int)head_blk,
+					stop_on_cycle, &new_blk)))
+			goto bp_err;
+		if (new_blk != -1)
+			head_blk = new_blk;
+	}
+
+ bad_blk:
+	/*
+	 * Now we need to make sure head_blk is not pointing to a block in
+	 * the middle of a log record.
+	 */
+	num_scan_bblks = XLOG_REC_SHIFT(log);
+	if (head_blk >= num_scan_bblks) {
+		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
+
+		/* start ptr at last block ptr before head_blk */
+		if ((error = xlog_find_verify_log_record(log, start_blk,
+							&head_blk, 0)) == -1) {
+			error = XFS_ERROR(EIO);
+			goto bp_err;
+		} else if (error)
+			goto bp_err;
+	} else {
+		start_blk = 0;
+		ASSERT(head_blk <= INT_MAX);
+		if ((error = xlog_find_verify_log_record(log, start_blk,
+							&head_blk, 0)) == -1) {
+			/* We hit the beginning of the log during our search */
+			start_blk = log_bbnum - num_scan_bblks + head_blk;
+			new_blk = log_bbnum;
+			ASSERT(start_blk <= INT_MAX &&
+				(xfs_daddr_t) log_bbnum-start_blk >= 0);
+			ASSERT(head_blk <= INT_MAX);
+			if ((error = xlog_find_verify_log_record(log,
+							start_blk, &new_blk,
+							(int)head_blk)) == -1) {
+				error = XFS_ERROR(EIO);
+				goto bp_err;
+			} else if (error)
+				goto bp_err;
+			if (new_blk != log_bbnum)
+				head_blk = new_blk;
+		} else if (error)
+			goto bp_err;
+	}
+
+	xlog_put_bp(bp);
+	if (head_blk == log_bbnum)
+		*return_head_blk = 0;
+	else
+		*return_head_blk = head_blk;
+	/*
+	 * When returning here, we have a good block number.  Bad block
+	 * means that during a previous crash, we didn't have a clean break
+	 * from cycle number N to cycle number N-1.  In this case, we need
+	 * to find the first block with cycle number N-1.
+	 */
+	return 0;
+
+ bp_err:
+	xlog_put_bp(bp);
+
+	if (error)
+	    xlog_warn("XFS: failed to find log head");
+	return error;
+}
+
+/*
+ * Find the sync block number or the tail of the log.
+ *
+ * This will be the block number of the last record to have its
+ * associated buffers synced to disk.  Every log record header has
+ * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
+ * to get a sync block number.  The only concern is to figure out which
+ * log record header to believe.
+ *
+ * The following algorithm uses the log record header with the largest
+ * lsn.  The entire log record does not need to be valid.  We only care
+ * that the header is valid.
+ *
+ * We could speed up search by using current head_blk buffer, but it is not
+ * available.
+ */
+int
+xlog_find_tail(
+	xlog_t			*log,
+	xfs_daddr_t		*head_blk,
+	xfs_daddr_t		*tail_blk,
+	int			readonly)
+{
+	xlog_rec_header_t	*rhead;
+	xlog_op_header_t	*op_head;
+	xfs_caddr_t		offset = NULL;
+	xfs_buf_t		*bp;
+	int			error, i, found;
+	xfs_daddr_t		umount_data_blk;
+	xfs_daddr_t		after_umount_blk;
+	xfs_lsn_t		tail_lsn;
+	int			hblks;
+
+	found = 0;
+
+	/*
+	 * Find previous log record
+	 */
+	if ((error = xlog_find_head(log, head_blk)))
+		return error;
+
+	bp = xlog_get_bp(log, 1);
+	if (!bp)
+		return ENOMEM;
+	if (*head_blk == 0) {				/* special case */
+		if ((error = xlog_bread(log, 0, 1, bp)))
+			goto bread_err;
+		offset = xlog_align(log, 0, 1, bp);
+		if (GET_CYCLE(offset, ARCH_CONVERT) == 0) {
+			*tail_blk = 0;
+			/* leave all other log inited values alone */
+			goto exit;
+		}
+	}
+
+	/*
+	 * Search backwards looking for log record header block
+	 */
+	ASSERT(*head_blk < INT_MAX);
+	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
+		if ((error = xlog_bread(log, i, 1, bp)))
+			goto bread_err;
+		offset = xlog_align(log, i, 1, bp);
+		if (XLOG_HEADER_MAGIC_NUM ==
+		    INT_GET(*(uint *)offset, ARCH_CONVERT)) {
+			found = 1;
+			break;
+		}
+	}
+	/*
+	 * If we haven't found the log record header block, start looking
+	 * again from the end of the physical log.  XXXmiken: There should be
+	 * a check here to make sure we didn't search more than N blocks in
+	 * the previous code.
+	 */
+	if (!found) {
+		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
+			if ((error = xlog_bread(log, i, 1, bp)))
+				goto bread_err;
+			offset = xlog_align(log, i, 1, bp);
+			if (XLOG_HEADER_MAGIC_NUM ==
+			    INT_GET(*(uint*)offset, ARCH_CONVERT)) {
+				found = 2;
+				break;
+			}
+		}
+	}
+	if (!found) {
+		xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+
+	/* find blk_no of tail of log */
+	rhead = (xlog_rec_header_t *)offset;
+	*tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
+
+	/*
+	 * Reset log values according to the state of the log when we
+	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
+	 * one because the next write starts a new cycle rather than
+	 * continuing the cycle of the last good log record.  At this
+	 * point we have guaranteed that all partial log records have been
+	 * accounted for.  Therefore, we know that the last good log record
+	 * written was complete and ended exactly on the end boundary
+	 * of the physical log.
+	 */
+	log->l_prev_block = i;
+	log->l_curr_block = (int)*head_blk;
+	log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
+	if (found == 2)
+		log->l_curr_cycle++;
+	log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
+	log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
+	log->l_grant_reserve_cycle = log->l_curr_cycle;
+	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
+	log->l_grant_write_cycle = log->l_curr_cycle;
+	log->l_grant_write_bytes = BBTOB(log->l_curr_block);
+
+	/*
+	 * Look for unmount record.  If we find it, then we know there
+	 * was a clean unmount.  Since 'i' could be the last block in
+	 * the physical log, we convert to a log block before comparing
+	 * to the head_blk.
+	 *
+	 * Save the current tail lsn to use to pass to
+	 * xlog_clear_stale_blocks() below.  We won't want to clear the
+	 * unmount record if there is one, so we pass the lsn of the
+	 * unmount record rather than the block after it.
+	 */
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+		int	h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
+		int	h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
+
+		if ((h_version & XLOG_VERSION_2) &&
+		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+			if (h_size % XLOG_HEADER_CYCLE_SIZE)
+				hblks++;
+		} else {
+			hblks = 1;
+		}
+	} else {
+		hblks = 1;
+	}
+	after_umount_blk = (i + hblks + (int)
+		BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
+	tail_lsn = log->l_tail_lsn;
+	if (*head_blk == after_umount_blk &&
+	    INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
+		umount_data_blk = (i + hblks) % log->l_logBBsize;
+		if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
+			goto bread_err;
+		}
+		offset = xlog_align(log, umount_data_blk, 1, bp);
+		op_head = (xlog_op_header_t *)offset;
+		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
+			/*
+			 * Set tail and last sync so that newly written
+			 * log records will point recovery to after the
+			 * current unmount record.
+			 */
+			ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle,
+					after_umount_blk);
+			ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
+					after_umount_blk);
+			*tail_blk = after_umount_blk;
+		}
+	}
+
+	/*
+	 * Make sure that there are no blocks in front of the head
+	 * with the same cycle number as the head.  This can happen
+	 * because we allow multiple outstanding log writes concurrently,
+	 * and the later writes might make it out before earlier ones.
+	 *
+	 * We use the lsn from before modifying it so that we'll never
+	 * overwrite the unmount record after a clean unmount.
+	 *
+	 * Do this only if we are going to recover the filesystem
+	 *
+	 * NOTE: This used to say "if (!readonly)"
+	 * However on Linux, we can & do recover a read-only filesystem.
+	 * We only skip recovery if NORECOVERY is specified on mount,
+	 * in which case we would not be here.
+	 *
+	 * But... if the -device- itself is readonly, just skip this.
+	 * We can't recover this device anyway, so it won't matter.
+	 */
+	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
+		error = xlog_clear_stale_blocks(log, tail_lsn);
+	}
+
+bread_err:
+exit:
+	xlog_put_bp(bp);
+
+	if (error)
+		xlog_warn("XFS: failed to locate log tail");
+	return error;
+}
+
+/*
+ * Is the log zeroed at all?
+ *
+ * The last binary search should be changed to perform an X block read
+ * once X becomes small enough.  You can then search linearly through
+ * the X blocks.  This will cut down on the number of reads we need to do.
+ *
+ * If the log is partially zeroed, this routine will pass back the blkno
+ * of the first block with cycle number 0.  It won't have a complete LR
+ * preceding it.
+ *
+ * Return:
+ *	0  => the log is completely written to
+ *	-1 => use *blk_no as the first block of the log
+ *	>0 => error has occurred
+ */
+int
+xlog_find_zeroed(
+	xlog_t		*log,
+	xfs_daddr_t	*blk_no)
+{
+	xfs_buf_t	*bp;
+	xfs_caddr_t	offset;
+	uint	        first_cycle, last_cycle;
+	xfs_daddr_t	new_blk, last_blk, start_blk;
+	xfs_daddr_t     num_scan_bblks;
+	int	        error, log_bbnum = log->l_logBBsize;
+
+	/* check totally zeroed log */
+	bp = xlog_get_bp(log, 1);
+	if (!bp)
+		return ENOMEM;
+	if ((error = xlog_bread(log, 0, 1, bp)))
+		goto bp_err;
+	offset = xlog_align(log, 0, 1, bp);
+	first_cycle = GET_CYCLE(offset, ARCH_CONVERT);
+	if (first_cycle == 0) {		/* completely zeroed log */
+		*blk_no = 0;
+		xlog_put_bp(bp);
+		return -1;
+	}
+
+	/* check partially zeroed log */
+	if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
+		goto bp_err;
+	offset = xlog_align(log, log_bbnum-1, 1, bp);
+	last_cycle = GET_CYCLE(offset, ARCH_CONVERT);
+	if (last_cycle != 0) {		/* log completely written to */
+		xlog_put_bp(bp);
+		return 0;
+	} else if (first_cycle != 1) {
+		/*
+		 * If the cycle of the last block is zero, the cycle of
+		 * the first block must be 1. If it's not, maybe we're
+		 * not looking at a log... Bail out.
+		 */
+		xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
+		return XFS_ERROR(EINVAL);
+	}
+
+	/* we have a partially zeroed log */
+	last_blk = log_bbnum-1;
+	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
+		goto bp_err;
+
+	/*
+	 * Validate the answer.  Because there is no way to guarantee that
+	 * the entire log is made up of log records which are the same size,
+	 * we scan over the defined maximum blocks.  At this point, the maximum
+	 * is not chosen to mean anything special.   XXXmiken
+	 */
+	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
+	ASSERT(num_scan_bblks <= INT_MAX);
+
+	if (last_blk < num_scan_bblks)
+		num_scan_bblks = last_blk;
+	start_blk = last_blk - num_scan_bblks;
+
+	/*
+	 * We search for any instances of cycle number 0 that occur before
+	 * our current estimate of the head.  What we're trying to detect is
+	 *        1 ... | 0 | 1 | 0...
+	 *                       ^ binary search ends here
+	 */
+	if ((error = xlog_find_verify_cycle(log, start_blk,
+					 (int)num_scan_bblks, 0, &new_blk)))
+		goto bp_err;
+	if (new_blk != -1)
+		last_blk = new_blk;
+
+	/*
+	 * Potentially backup over partial log record write.  We don't need
+	 * to search the end of the log because we know it is zero.
+	 */
+	if ((error = xlog_find_verify_log_record(log, start_blk,
+				&last_blk, 0)) == -1) {
+	    error = XFS_ERROR(EIO);
+	    goto bp_err;
+	} else if (error)
+	    goto bp_err;
+
+	*blk_no = last_blk;
+bp_err:
+	xlog_put_bp(bp);
+	if (error)
+		return error;
+	return -1;
+}
+
+/*
+ * These are simple subroutines used by xlog_clear_stale_blocks() below
+ * to initialize a buffer full of empty log record headers and write
+ * them into the log.
+ */
+STATIC void
+xlog_add_record(
+	xlog_t			*log,
+	xfs_caddr_t		buf,
+	int			cycle,
+	int			block,
+	int			tail_cycle,
+	int			tail_block)
+{
+	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
+
+	memset(buf, 0, BBSIZE);
+	INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
+	INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
+	INT_SET(recp->h_version, ARCH_CONVERT,
+			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
+	ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block);
+	ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block);
+	INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
+	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
+}
+
+STATIC int
+xlog_write_log_records(
+	xlog_t		*log,
+	int		cycle,
+	int		start_block,
+	int		blocks,
+	int		tail_cycle,
+	int		tail_block)
+{
+	xfs_caddr_t	offset;
+	xfs_buf_t	*bp;
+	int		balign, ealign;
+	int		sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+	int		end_block = start_block + blocks;
+	int		bufblks;
+	int		error = 0;
+	int		i, j = 0;
+
+	bufblks = 1 << ffs(blocks);
+	while (!(bp = xlog_get_bp(log, bufblks))) {
+		bufblks >>= 1;
+		if (bufblks <= log->l_sectbb_log)
+			return ENOMEM;
+	}
+
+	/* We may need to do a read at the start to fill in part of
+	 * the buffer in the starting sector not covered by the first
+	 * write below.
+	 */
+	balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
+	if (balign != start_block) {
+		if ((error = xlog_bread(log, start_block, 1, bp))) {
+			xlog_put_bp(bp);
+			return error;
+		}
+		j = start_block - balign;
+	}
+
+	for (i = start_block; i < end_block; i += bufblks) {
+		int		bcount, endcount;
+
+		bcount = min(bufblks, end_block - start_block);
+		endcount = bcount - j;
+
+		/* We may need to do a read at the end to fill in part of
+		 * the buffer in the final sector not covered by the write.
+		 * If this is the same sector as the above read, skip it.
+		 */
+		ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
+		if (j == 0 && (start_block + endcount > ealign)) {
+			offset = XFS_BUF_PTR(bp);
+			balign = BBTOB(ealign - start_block);
+			XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
+			if ((error = xlog_bread(log, ealign, sectbb, bp)))
+				break;
+			XFS_BUF_SET_PTR(bp, offset, bufblks);
+		}
+
+		offset = xlog_align(log, start_block, endcount, bp);
+		for (; j < endcount; j++) {
+			xlog_add_record(log, offset, cycle, i+j,
+					tail_cycle, tail_block);
+			offset += BBSIZE;
+		}
+		error = xlog_bwrite(log, start_block, endcount, bp);
+		if (error)
+			break;
+		start_block += endcount;
+		j = 0;
+	}
+	xlog_put_bp(bp);
+	return error;
+}
+
+/*
+ * This routine is called to blow away any incomplete log writes out
+ * in front of the log head.  We do this so that we won't become confused
+ * if we come up, write only a little bit more, and then crash again.
+ * If we leave the partial log records out there, this situation could
+ * cause us to think those partial writes are valid blocks since they
+ * have the current cycle number.  We get rid of them by overwriting them
+ * with empty log records with the old cycle number rather than the
+ * current one.
+ *
+ * The tail lsn is passed in rather than taken from
+ * the log so that we will not write over the unmount record after a
+ * clean unmount in a 512 block log.  Doing so would leave the log without
+ * any valid log records in it until a new one was written.  If we crashed
+ * during that time we would not be able to recover.
+ */
+STATIC int
+xlog_clear_stale_blocks(
+	xlog_t		*log,
+	xfs_lsn_t	tail_lsn)
+{
+	int		tail_cycle, head_cycle;
+	int		tail_block, head_block;
+	int		tail_distance, max_distance;
+	int		distance;
+	int		error;
+
+	tail_cycle = CYCLE_LSN(tail_lsn);
+	tail_block = BLOCK_LSN(tail_lsn);
+	head_cycle = log->l_curr_cycle;
+	head_block = log->l_curr_block;
+
+	/*
+	 * Figure out the distance between the new head of the log
+	 * and the tail.  We want to write over any blocks beyond the
+	 * head that we may have written just before the crash, but
+	 * we don't want to overwrite the tail of the log.
+	 */
+	if (head_cycle == tail_cycle) {
+		/*
+		 * The tail is behind the head in the physical log,
+		 * so the distance from the head to the tail is the
+		 * distance from the head to the end of the log plus
+		 * the distance from the beginning of the log to the
+		 * tail.
+		 */
+		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
+			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
+					 XFS_ERRLEVEL_LOW, log->l_mp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		tail_distance = tail_block + (log->l_logBBsize - head_block);
+	} else {
+		/*
+		 * The head is behind the tail in the physical log,
+		 * so the distance from the head to the tail is just
+		 * the tail block minus the head block.
+		 */
+		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
+			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
+					 XFS_ERRLEVEL_LOW, log->l_mp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+		tail_distance = tail_block - head_block;
+	}
+
+	/*
+	 * If the head is right up against the tail, we can't clear
+	 * anything.
+	 */
+	if (tail_distance <= 0) {
+		ASSERT(tail_distance == 0);
+		return 0;
+	}
+
+	max_distance = XLOG_TOTAL_REC_SHIFT(log);
+	/*
+	 * Take the smaller of the maximum amount of outstanding I/O
+	 * we could have and the distance to the tail to clear out.
+	 * We take the smaller so that we don't overwrite the tail and
+	 * we don't waste all day writing from the head to the tail
+	 * for no reason.
+	 */
+	max_distance = MIN(max_distance, tail_distance);
+
+	if ((head_block + max_distance) <= log->l_logBBsize) {
+		/*
+		 * We can stomp all the blocks we need to without
+		 * wrapping around the end of the log.  Just do it
+		 * in a single write.  Use the cycle number of the
+		 * current cycle minus one so that the log will look like:
+		 *     n ... | n - 1 ...
+		 */
+		error = xlog_write_log_records(log, (head_cycle - 1),
+				head_block, max_distance, tail_cycle,
+				tail_block);
+		if (error)
+			return error;
+	} else {
+		/*
+		 * We need to wrap around the end of the physical log in
+		 * order to clear all the blocks.  Do it in two separate
+		 * I/Os.  The first write should be from the head to the
+		 * end of the physical log, and it should use the current
+		 * cycle number minus one just like above.
+		 */
+		distance = log->l_logBBsize - head_block;
+		error = xlog_write_log_records(log, (head_cycle - 1),
+				head_block, distance, tail_cycle,
+				tail_block);
+
+		if (error)
+			return error;
+
+		/*
+		 * Now write the blocks at the start of the physical log.
+		 * This writes the remainder of the blocks we want to clear.
+		 * It uses the current cycle number since we're now on the
+		 * same cycle as the head so that we get:
+		 *    n ... n ... | n - 1 ...
+		 *    ^^^^^ blocks we're writing
+		 */
+		distance = max_distance - (log->l_logBBsize - head_block);
+		error = xlog_write_log_records(log, head_cycle, 0, distance,
+				tail_cycle, tail_block);
+		if (error)
+			return error;
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ *		Log recover routines
+ *
+ ******************************************************************************
+ */
+
+STATIC xlog_recover_t *
+xlog_recover_find_tid(
+	xlog_recover_t		*q,
+	xlog_tid_t		tid)
+{
+	xlog_recover_t		*p = q;
+
+	while (p != NULL) {
+		if (p->r_log_tid == tid)
+		    break;
+		p = p->r_next;
+	}
+	return p;
+}
+
+STATIC void
+xlog_recover_put_hashq(
+	xlog_recover_t		**q,
+	xlog_recover_t		*trans)
+{
+	trans->r_next = *q;
+	*q = trans;
+}
+
+STATIC void
+xlog_recover_add_item(
+	xlog_recover_item_t	**itemq)
+{
+	xlog_recover_item_t	*item;
+
+	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
+	xlog_recover_insert_item_backq(itemq, item);
+}
+
+STATIC int
+xlog_recover_add_to_cont_trans(
+	xlog_recover_t		*trans,
+	xfs_caddr_t		dp,
+	int			len)
+{
+	xlog_recover_item_t	*item;
+	xfs_caddr_t		ptr, old_ptr;
+	int			old_len;
+
+	item = trans->r_itemq;
+	if (item == 0) {
+		/* finish copying rest of trans header */
+		xlog_recover_add_item(&trans->r_itemq);
+		ptr = (xfs_caddr_t) &trans->r_theader +
+				sizeof(xfs_trans_header_t) - len;
+		memcpy(ptr, dp, len); /* d, s, l */
+		return 0;
+	}
+	item = item->ri_prev;
+
+	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
+	old_len = item->ri_buf[item->ri_cnt-1].i_len;
+
+	ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0);
+	memcpy(&ptr[old_len], dp, len); /* d, s, l */
+	item->ri_buf[item->ri_cnt-1].i_len += len;
+	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
+	return 0;
+}
+
+/*
+ * The next region to add is the start of a new region.  It could be
+ * a whole region or it could be the first part of a new region.  Because
+ * of this, the assumption here is that the type and size fields of all
+ * format structures fit into the first 32 bits of the structure.
+ *
+ * This works because all regions must be 32 bit aligned.  Therefore, we
+ * either have both fields or we have neither field.  In the case we have
+ * neither field, the data part of the region is zero length.  We only have
+ * a log_op_header and can throw away the header since a new one will appear
+ * later.  If we have at least 4 bytes, then we can determine how many regions
+ * will appear in the current log item.
+ */
+STATIC int
+xlog_recover_add_to_trans(
+	xlog_recover_t		*trans,
+	xfs_caddr_t		dp,
+	int			len)
+{
+	xfs_inode_log_format_t	*in_f;			/* any will do */
+	xlog_recover_item_t	*item;
+	xfs_caddr_t		ptr;
+
+	if (!len)
+		return 0;
+	item = trans->r_itemq;
+	if (item == 0) {
+		ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
+		if (len == sizeof(xfs_trans_header_t))
+			xlog_recover_add_item(&trans->r_itemq);
+		memcpy(&trans->r_theader, dp, len); /* d, s, l */
+		return 0;
+	}
+
+	ptr = kmem_alloc(len, KM_SLEEP);
+	memcpy(ptr, dp, len);
+	in_f = (xfs_inode_log_format_t *)ptr;
+
+	if (item->ri_prev->ri_total != 0 &&
+	     item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
+		xlog_recover_add_item(&trans->r_itemq);
+	}
+	item = trans->r_itemq;
+	item = item->ri_prev;
+
+	if (item->ri_total == 0) {		/* first region to be added */
+		item->ri_total	= in_f->ilf_size;
+		ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
+		item->ri_buf = kmem_zalloc((item->ri_total *
+					    sizeof(xfs_log_iovec_t)), KM_SLEEP);
+	}
+	ASSERT(item->ri_total > item->ri_cnt);
+	/* Description region is ri_buf[0] */
+	item->ri_buf[item->ri_cnt].i_addr = ptr;
+	item->ri_buf[item->ri_cnt].i_len  = len;
+	item->ri_cnt++;
+	return 0;
+}
+
+STATIC void
+xlog_recover_new_tid(
+	xlog_recover_t		**q,
+	xlog_tid_t		tid,
+	xfs_lsn_t		lsn)
+{
+	xlog_recover_t		*trans;
+
+	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
+	trans->r_log_tid   = tid;
+	trans->r_lsn	   = lsn;
+	xlog_recover_put_hashq(q, trans);
+}
+
+STATIC int
+xlog_recover_unlink_tid(
+	xlog_recover_t		**q,
+	xlog_recover_t		*trans)
+{
+	xlog_recover_t		*tp;
+	int			found = 0;
+
+	ASSERT(trans != 0);
+	if (trans == *q) {
+		*q = (*q)->r_next;
+	} else {
+		tp = *q;
+		while (tp != 0) {
+			if (tp->r_next == trans) {
+				found = 1;
+				break;
+			}
+			tp = tp->r_next;
+		}
+		if (!found) {
+			xlog_warn(
+			     "XFS: xlog_recover_unlink_tid: trans not found");
+			ASSERT(0);
+			return XFS_ERROR(EIO);
+		}
+		tp->r_next = tp->r_next->r_next;
+	}
+	return 0;
+}
+
+STATIC void
+xlog_recover_insert_item_backq(
+	xlog_recover_item_t	**q,
+	xlog_recover_item_t	*item)
+{
+	if (*q == 0) {
+		item->ri_prev = item->ri_next = item;
+		*q = item;
+	} else {
+		item->ri_next		= *q;
+		item->ri_prev		= (*q)->ri_prev;
+		(*q)->ri_prev		= item;
+		item->ri_prev->ri_next	= item;
+	}
+}
+
+STATIC void
+xlog_recover_insert_item_frontq(
+	xlog_recover_item_t	**q,
+	xlog_recover_item_t	*item)
+{
+	xlog_recover_insert_item_backq(q, item);
+	*q = item;
+}
+
+STATIC int
+xlog_recover_reorder_trans(
+	xlog_t			*log,
+	xlog_recover_t		*trans)
+{
+	xlog_recover_item_t	*first_item, *itemq, *itemq_next;
+	xfs_buf_log_format_t	*buf_f;
+	xfs_buf_log_format_v1_t	*obuf_f;
+	ushort			flags = 0;
+
+	first_item = itemq = trans->r_itemq;
+	trans->r_itemq = NULL;
+	do {
+		itemq_next = itemq->ri_next;
+		buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
+		switch (ITEM_TYPE(itemq)) {
+		case XFS_LI_BUF:
+			flags = buf_f->blf_flags;
+			break;
+		case XFS_LI_6_1_BUF:
+		case XFS_LI_5_3_BUF:
+			obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+			flags = obuf_f->blf_flags;
+			break;
+		}
+
+		switch (ITEM_TYPE(itemq)) {
+		case XFS_LI_BUF:
+		case XFS_LI_6_1_BUF:
+		case XFS_LI_5_3_BUF:
+			if (!(flags & XFS_BLI_CANCEL)) {
+				xlog_recover_insert_item_frontq(&trans->r_itemq,
+								itemq);
+				break;
+			}
+		case XFS_LI_INODE:
+		case XFS_LI_6_1_INODE:
+		case XFS_LI_5_3_INODE:
+		case XFS_LI_DQUOT:
+		case XFS_LI_QUOTAOFF:
+		case XFS_LI_EFD:
+		case XFS_LI_EFI:
+			xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
+			break;
+		default:
+			xlog_warn(
+	"XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
+			ASSERT(0);
+			return XFS_ERROR(EIO);
+		}
+		itemq = itemq_next;
+	} while (first_item != itemq);
+	return 0;
+}
+
+/*
+ * Build up the table of buf cancel records so that we don't replay
+ * cancelled data in the second pass.  For buffer records that are
+ * not cancel records, there is nothing to do here so we just return.
+ *
+ * If we get a cancel record which is already in the table, this indicates
+ * that the buffer was cancelled multiple times.  In order to ensure
+ * that during pass 2 we keep the record in the table until we reach its
+ * last occurrence in the log, we keep a reference count in the cancel
+ * record in the table to tell us how many times we expect to see this
+ * record during the second pass.
+ */
+STATIC void
+xlog_recover_do_buffer_pass1(
+	xlog_t			*log,
+	xfs_buf_log_format_t	*buf_f)
+{
+	xfs_buf_cancel_t	*bcp;
+	xfs_buf_cancel_t	*nextp;
+	xfs_buf_cancel_t	*prevp;
+	xfs_buf_cancel_t	**bucket;
+	xfs_buf_log_format_v1_t	*obuf_f;
+	xfs_daddr_t		blkno = 0;
+	uint			len = 0;
+	ushort			flags = 0;
+
+	switch (buf_f->blf_type) {
+	case XFS_LI_BUF:
+		blkno = buf_f->blf_blkno;
+		len = buf_f->blf_len;
+		flags = buf_f->blf_flags;
+		break;
+	case XFS_LI_6_1_BUF:
+	case XFS_LI_5_3_BUF:
+		obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+		blkno = (xfs_daddr_t) obuf_f->blf_blkno;
+		len = obuf_f->blf_len;
+		flags = obuf_f->blf_flags;
+		break;
+	}
+
+	/*
+	 * If this isn't a cancel buffer item, then just return.
+	 */
+	if (!(flags & XFS_BLI_CANCEL))
+		return;
+
+	/*
+	 * Insert an xfs_buf_cancel record into the hash table of
+	 * them.  If there is already an identical record, bump
+	 * its reference count.
+	 */
+	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
+					  XLOG_BC_TABLE_SIZE];
+	/*
+	 * If the hash bucket is empty then just insert a new record into
+	 * the bucket.
+	 */
+	if (*bucket == NULL) {
+		bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
+						     KM_SLEEP);
+		bcp->bc_blkno = blkno;
+		bcp->bc_len = len;
+		bcp->bc_refcount = 1;
+		bcp->bc_next = NULL;
+		*bucket = bcp;
+		return;
+	}
+
+	/*
+	 * The hash bucket is not empty, so search for duplicates of our
+	 * record.  If we find one them just bump its refcount.  If not
+	 * then add us at the end of the list.
+	 */
+	prevp = NULL;
+	nextp = *bucket;
+	while (nextp != NULL) {
+		if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
+			nextp->bc_refcount++;
+			return;
+		}
+		prevp = nextp;
+		nextp = nextp->bc_next;
+	}
+	ASSERT(prevp != NULL);
+	bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
+					     KM_SLEEP);
+	bcp->bc_blkno = blkno;
+	bcp->bc_len = len;
+	bcp->bc_refcount = 1;
+	bcp->bc_next = NULL;
+	prevp->bc_next = bcp;
+}
+
+/*
+ * Check to see whether the buffer being recovered has a corresponding
+ * entry in the buffer cancel record table.  If it does then return 1
+ * so that it will be cancelled, otherwise return 0.  If the buffer is
+ * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
+ * the refcount on the entry in the table and remove it from the table
+ * if this is the last reference.
+ *
+ * We remove the cancel record from the table when we encounter its
+ * last occurrence in the log so that if the same buffer is re-used
+ * again after its last cancellation we actually replay the changes
+ * made at that point.
+ */
+STATIC int
+xlog_check_buffer_cancelled(
+	xlog_t			*log,
+	xfs_daddr_t		blkno,
+	uint			len,
+	ushort			flags)
+{
+	xfs_buf_cancel_t	*bcp;
+	xfs_buf_cancel_t	*prevp;
+	xfs_buf_cancel_t	**bucket;
+
+	if (log->l_buf_cancel_table == NULL) {
+		/*
+		 * There is nothing in the table built in pass one,
+		 * so this buffer must not be cancelled.
+		 */
+		ASSERT(!(flags & XFS_BLI_CANCEL));
+		return 0;
+	}
+
+	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
+					  XLOG_BC_TABLE_SIZE];
+	bcp = *bucket;
+	if (bcp == NULL) {
+		/*
+		 * There is no corresponding entry in the table built
+		 * in pass one, so this buffer has not been cancelled.
+		 */
+		ASSERT(!(flags & XFS_BLI_CANCEL));
+		return 0;
+	}
+
+	/*
+	 * Search for an entry in the buffer cancel table that
+	 * matches our buffer.
+	 */
+	prevp = NULL;
+	while (bcp != NULL) {
+		if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
+			/*
+			 * We've go a match, so return 1 so that the
+			 * recovery of this buffer is cancelled.
+			 * If this buffer is actually a buffer cancel
+			 * log item, then decrement the refcount on the
+			 * one in the table and remove it if this is the
+			 * last reference.
+			 */
+			if (flags & XFS_BLI_CANCEL) {
+				bcp->bc_refcount--;
+				if (bcp->bc_refcount == 0) {
+					if (prevp == NULL) {
+						*bucket = bcp->bc_next;
+					} else {
+						prevp->bc_next = bcp->bc_next;
+					}
+					kmem_free(bcp,
+						  sizeof(xfs_buf_cancel_t));
+				}
+			}
+			return 1;
+		}
+		prevp = bcp;
+		bcp = bcp->bc_next;
+	}
+	/*
+	 * We didn't find a corresponding entry in the table, so
+	 * return 0 so that the buffer is NOT cancelled.
+	 */
+	ASSERT(!(flags & XFS_BLI_CANCEL));
+	return 0;
+}
+
+STATIC int
+xlog_recover_do_buffer_pass2(
+	xlog_t			*log,
+	xfs_buf_log_format_t	*buf_f)
+{
+	xfs_buf_log_format_v1_t	*obuf_f;
+	xfs_daddr_t		blkno = 0;
+	ushort			flags = 0;
+	uint			len = 0;
+
+	switch (buf_f->blf_type) {
+	case XFS_LI_BUF:
+		blkno = buf_f->blf_blkno;
+		flags = buf_f->blf_flags;
+		len = buf_f->blf_len;
+		break;
+	case XFS_LI_6_1_BUF:
+	case XFS_LI_5_3_BUF:
+		obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+		blkno = (xfs_daddr_t) obuf_f->blf_blkno;
+		flags = obuf_f->blf_flags;
+		len = (xfs_daddr_t) obuf_f->blf_len;
+		break;
+	}
+
+	return xlog_check_buffer_cancelled(log, blkno, len, flags);
+}
+
+/*
+ * Perform recovery for a buffer full of inodes.  In these buffers,
+ * the only data which should be recovered is that which corresponds
+ * to the di_next_unlinked pointers in the on disk inode structures.
+ * The rest of the data for the inodes is always logged through the
+ * inodes themselves rather than the inode buffer and is recovered
+ * in xlog_recover_do_inode_trans().
+ *
+ * The only time when buffers full of inodes are fully recovered is
+ * when the buffer is full of newly allocated inodes.  In this case
+ * the buffer will not be marked as an inode buffer and so will be
+ * sent to xlog_recover_do_reg_buffer() below during recovery.
+ */
+STATIC int
+xlog_recover_do_inode_buffer(
+	xfs_mount_t		*mp,
+	xlog_recover_item_t	*item,
+	xfs_buf_t		*bp,
+	xfs_buf_log_format_t	*buf_f)
+{
+	int			i;
+	int			item_index;
+	int			bit;
+	int			nbits;
+	int			reg_buf_offset;
+	int			reg_buf_bytes;
+	int			next_unlinked_offset;
+	int			inodes_per_buf;
+	xfs_agino_t		*logged_nextp;
+	xfs_agino_t		*buffer_nextp;
+	xfs_buf_log_format_v1_t	*obuf_f;
+	unsigned int		*data_map = NULL;
+	unsigned int		map_size = 0;
+
+	switch (buf_f->blf_type) {
+	case XFS_LI_BUF:
+		data_map = buf_f->blf_data_map;
+		map_size = buf_f->blf_map_size;
+		break;
+	case XFS_LI_6_1_BUF:
+	case XFS_LI_5_3_BUF:
+		obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+		data_map = obuf_f->blf_data_map;
+		map_size = obuf_f->blf_map_size;
+		break;
+	}
+	/*
+	 * Set the variables corresponding to the current region to
+	 * 0 so that we'll initialize them on the first pass through
+	 * the loop.
+	 */
+	reg_buf_offset = 0;
+	reg_buf_bytes = 0;
+	bit = 0;
+	nbits = 0;
+	item_index = 0;
+	inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
+	for (i = 0; i < inodes_per_buf; i++) {
+		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
+			offsetof(xfs_dinode_t, di_next_unlinked);
+
+		while (next_unlinked_offset >=
+		       (reg_buf_offset + reg_buf_bytes)) {
+			/*
+			 * The next di_next_unlinked field is beyond
+			 * the current logged region.  Find the next
+			 * logged region that contains or is beyond
+			 * the current di_next_unlinked field.
+			 */
+			bit += nbits;
+			bit = xfs_next_bit(data_map, map_size, bit);
+
+			/*
+			 * If there are no more logged regions in the
+			 * buffer, then we're done.
+			 */
+			if (bit == -1) {
+				return 0;
+			}
+
+			nbits = xfs_contig_bits(data_map, map_size,
+							 bit);
+			ASSERT(nbits > 0);
+			reg_buf_offset = bit << XFS_BLI_SHIFT;
+			reg_buf_bytes = nbits << XFS_BLI_SHIFT;
+			item_index++;
+		}
+
+		/*
+		 * If the current logged region starts after the current
+		 * di_next_unlinked field, then move on to the next
+		 * di_next_unlinked field.
+		 */
+		if (next_unlinked_offset < reg_buf_offset) {
+			continue;
+		}
+
+		ASSERT(item->ri_buf[item_index].i_addr != NULL);
+		ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
+		ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
+
+		/*
+		 * The current logged region contains a copy of the
+		 * current di_next_unlinked field.  Extract its value
+		 * and copy it to the buffer copy.
+		 */
+		logged_nextp = (xfs_agino_t *)
+			       ((char *)(item->ri_buf[item_index].i_addr) +
+				(next_unlinked_offset - reg_buf_offset));
+		if (unlikely(*logged_nextp == 0)) {
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
+				item, bp);
+			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
+					 XFS_ERRLEVEL_LOW, mp);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+
+		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
+					      next_unlinked_offset);
+		INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp);
+	}
+
+	return 0;
+}
+
+/*
+ * Perform a 'normal' buffer recovery.  Each logged region of the
+ * buffer should be copied over the corresponding region in the
+ * given buffer.  The bitmap in the buf log format structure indicates
+ * where to place the logged data.
+ */
+/*ARGSUSED*/
+STATIC void
+xlog_recover_do_reg_buffer(
+	xfs_mount_t		*mp,
+	xlog_recover_item_t	*item,
+	xfs_buf_t		*bp,
+	xfs_buf_log_format_t	*buf_f)
+{
+	int			i;
+	int			bit;
+	int			nbits;
+	xfs_buf_log_format_v1_t	*obuf_f;
+	unsigned int		*data_map = NULL;
+	unsigned int		map_size = 0;
+	int                     error;
+
+	switch (buf_f->blf_type) {
+	case XFS_LI_BUF:
+		data_map = buf_f->blf_data_map;
+		map_size = buf_f->blf_map_size;
+		break;
+	case XFS_LI_6_1_BUF:
+	case XFS_LI_5_3_BUF:
+		obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+		data_map = obuf_f->blf_data_map;
+		map_size = obuf_f->blf_map_size;
+		break;
+	}
+	bit = 0;
+	i = 1;  /* 0 is the buf format structure */
+	while (1) {
+		bit = xfs_next_bit(data_map, map_size, bit);
+		if (bit == -1)
+			break;
+		nbits = xfs_contig_bits(data_map, map_size, bit);
+		ASSERT(nbits > 0);
+		ASSERT(item->ri_buf[i].i_addr != 0);
+		ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
+		ASSERT(XFS_BUF_COUNT(bp) >=
+		       ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
+
+		/*
+		 * Do a sanity check if this is a dquot buffer. Just checking
+		 * the first dquot in the buffer should do. XXXThis is
+		 * probably a good thing to do for other buf types also.
+		 */
+		error = 0;
+		if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
+			error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
+					       item->ri_buf[i].i_addr,
+					       -1, 0, XFS_QMOPT_DOWARN,
+					       "dquot_buf_recover");
+		}
+		if (!error)
+			memcpy(xfs_buf_offset(bp,
+				(uint)bit << XFS_BLI_SHIFT),	/* dest */
+				item->ri_buf[i].i_addr,		/* source */
+				nbits<<XFS_BLI_SHIFT);		/* length */
+		i++;
+		bit += nbits;
+	}
+
+	/* Shouldn't be any more regions */
+	ASSERT(i == item->ri_total);
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_qm_dqcheck(
+	xfs_disk_dquot_t *ddq,
+	xfs_dqid_t	 id,
+	uint		 type,	  /* used only when IO_dorepair is true */
+	uint		 flags,
+	char		 *str)
+{
+	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
+	int		errs = 0;
+
+	/*
+	 * We can encounter an uninitialized dquot buffer for 2 reasons:
+	 * 1. If we crash while deleting the quotainode(s), and those blks got
+	 *    used for user data. This is because we take the path of regular
+	 *    file deletion; however, the size field of quotainodes is never
+	 *    updated, so all the tricks that we play in itruncate_finish
+	 *    don't quite matter.
+	 *
+	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
+	 *    But the allocation will be replayed so we'll end up with an
+	 *    uninitialized quota block.
+	 *
+	 * This is all fine; things are still consistent, and we haven't lost
+	 * any quota information. Just don't complain about bad dquot blks.
+	 */
+	if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
+		if (flags & XFS_QMOPT_DOWARN)
+			cmn_err(CE_ALERT,
+			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
+			str, id,
+			INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
+		errs++;
+	}
+	if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
+		if (flags & XFS_QMOPT_DOWARN)
+			cmn_err(CE_ALERT,
+			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
+			str, id,
+			INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
+		errs++;
+	}
+
+	if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
+	    INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
+		if (flags & XFS_QMOPT_DOWARN)
+			cmn_err(CE_ALERT,
+			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
+			str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
+		errs++;
+	}
+
+	if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
+		if (flags & XFS_QMOPT_DOWARN)
+			cmn_err(CE_ALERT,
+			"%s : ondisk-dquot 0x%p, ID mismatch: "
+			"0x%x expected, found id 0x%x",
+			str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
+		errs++;
+	}
+
+	if (!errs && ddq->d_id) {
+		if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
+		    INT_GET(ddq->d_bcount, ARCH_CONVERT) >=
+				INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
+			if (!ddq->d_btimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					cmn_err(CE_ALERT,
+					"%s : Dquot ID 0x%x (0x%p) "
+					"BLK TIMER NOT STARTED",
+					str, (int)
+					INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
+				errs++;
+			}
+		}
+		if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
+		    INT_GET(ddq->d_icount, ARCH_CONVERT) >=
+				INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
+			if (!ddq->d_itimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					cmn_err(CE_ALERT,
+					"%s : Dquot ID 0x%x (0x%p) "
+					"INODE TIMER NOT STARTED",
+					str, (int)
+					INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
+				errs++;
+			}
+		}
+		if (INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT) &&
+		    INT_GET(ddq->d_rtbcount, ARCH_CONVERT) >=
+				INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT)) {
+			if (!ddq->d_rtbtimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					cmn_err(CE_ALERT,
+					"%s : Dquot ID 0x%x (0x%p) "
+					"RTBLK TIMER NOT STARTED",
+					str, (int)
+					INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
+				errs++;
+			}
+		}
+	}
+
+	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
+		return errs;
+
+	if (flags & XFS_QMOPT_DOWARN)
+		cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
+
+	/*
+	 * Typically, a repair is only requested by quotacheck.
+	 */
+	ASSERT(id != -1);
+	ASSERT(flags & XFS_QMOPT_DQREPAIR);
+	memset(d, 0, sizeof(xfs_dqblk_t));
+	INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
+	INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
+	INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
+	INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
+
+	return errs;
+}
+
+/*
+ * Perform a dquot buffer recovery.
+ * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
+ * (ie. USR or GRP), then just toss this buffer away; don't recover it.
+ * Else, treat it as a regular buffer and do recovery.
+ */
+STATIC void
+xlog_recover_do_dquot_buffer(
+	xfs_mount_t		*mp,
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	xfs_buf_t		*bp,
+	xfs_buf_log_format_t	*buf_f)
+{
+	uint			type;
+
+	/*
+	 * Filesystems are required to send in quota flags at mount time.
+	 */
+	if (mp->m_qflags == 0) {
+		return;
+	}
+
+	type = 0;
+	if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
+		type |= XFS_DQ_USER;
+	if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
+		type |= XFS_DQ_GROUP;
+	/*
+	 * This type of quotas was turned off, so ignore this buffer
+	 */
+	if (log->l_quotaoffs_flag & type)
+		return;
+
+	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+}
+
+/*
+ * This routine replays a modification made to a buffer at runtime.
+ * There are actually two types of buffer, regular and inode, which
+ * are handled differently.  Inode buffers are handled differently
+ * in that we only recover a specific set of data from them, namely
+ * the inode di_next_unlinked fields.  This is because all other inode
+ * data is actually logged via inode records and any data we replay
+ * here which overlaps that may be stale.
+ *
+ * When meta-data buffers are freed at run time we log a buffer item
+ * with the XFS_BLI_CANCEL bit set to indicate that previous copies
+ * of the buffer in the log should not be replayed at recovery time.
+ * This is so that if the blocks covered by the buffer are reused for
+ * file data before we crash we don't end up replaying old, freed
+ * meta-data into a user's file.
+ *
+ * To handle the cancellation of buffer log items, we make two passes
+ * over the log during recovery.  During the first we build a table of
+ * those buffers which have been cancelled, and during the second we
+ * only replay those buffers which do not have corresponding cancel
+ * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
+ * for more details on the implementation of the table of cancel records.
+ */
+STATIC int
+xlog_recover_do_buffer_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	int			pass)
+{
+	xfs_buf_log_format_t	*buf_f;
+	xfs_buf_log_format_v1_t	*obuf_f;
+	xfs_mount_t		*mp;
+	xfs_buf_t		*bp;
+	int			error;
+	int			cancel;
+	xfs_daddr_t		blkno;
+	int			len;
+	ushort			flags;
+
+	buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
+
+	if (pass == XLOG_RECOVER_PASS1) {
+		/*
+		 * In this pass we're only looking for buf items
+		 * with the XFS_BLI_CANCEL bit set.
+		 */
+		xlog_recover_do_buffer_pass1(log, buf_f);
+		return 0;
+	} else {
+		/*
+		 * In this pass we want to recover all the buffers
+		 * which have not been cancelled and are not
+		 * cancellation buffers themselves.  The routine
+		 * we call here will tell us whether or not to
+		 * continue with the replay of this buffer.
+		 */
+		cancel = xlog_recover_do_buffer_pass2(log, buf_f);
+		if (cancel) {
+			return 0;
+		}
+	}
+	switch (buf_f->blf_type) {
+	case XFS_LI_BUF:
+		blkno = buf_f->blf_blkno;
+		len = buf_f->blf_len;
+		flags = buf_f->blf_flags;
+		break;
+	case XFS_LI_6_1_BUF:
+	case XFS_LI_5_3_BUF:
+		obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
+		blkno = obuf_f->blf_blkno;
+		len = obuf_f->blf_len;
+		flags = obuf_f->blf_flags;
+		break;
+	default:
+		xfs_fs_cmn_err(CE_ALERT, log->l_mp,
+			"xfs_log_recover: unknown buffer type 0x%x, dev %s",
+			buf_f->blf_type, XFS_BUFTARG_NAME(log->l_targ));
+		XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
+				 XFS_ERRLEVEL_LOW, log->l_mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	mp = log->l_mp;
+	if (flags & XFS_BLI_INODE_BUF) {
+		bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
+								XFS_BUF_LOCK);
+	} else {
+		bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
+	}
+	if (XFS_BUF_ISERROR(bp)) {
+		xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
+				  bp, blkno);
+		error = XFS_BUF_GETERROR(bp);
+		xfs_buf_relse(bp);
+		return error;
+	}
+
+	error = 0;
+	if (flags & XFS_BLI_INODE_BUF) {
+		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
+	} else if (flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) {
+		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
+	} else {
+		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+	}
+	if (error)
+		return XFS_ERROR(error);
+
+	/*
+	 * Perform delayed write on the buffer.  Asynchronous writes will be
+	 * slower when taking into account all the buffers to be flushed.
+	 *
+	 * Also make sure that only inode buffers with good sizes stay in
+	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
+	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
+	 * buffers in the log can be a different size if the log was generated
+	 * by an older kernel using unclustered inode buffers or a newer kernel
+	 * running with a different inode cluster size.  Regardless, if the
+	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
+	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
+	 * the buffer out of the buffer cache so that the buffer won't
+	 * overlap with future reads of those inodes.
+	 */
+	if (XFS_DINODE_MAGIC ==
+	    INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
+	    (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
+			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
+		XFS_BUF_STALE(bp);
+		error = xfs_bwrite(mp, bp);
+	} else {
+		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
+		       XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
+		XFS_BUF_SET_FSPRIVATE(bp, mp);
+		XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+		xfs_bdwrite(mp, bp);
+	}
+
+	return (error);
+}
+
+STATIC int
+xlog_recover_do_inode_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	int			pass)
+{
+	xfs_inode_log_format_t	*in_f;
+	xfs_mount_t		*mp;
+	xfs_buf_t		*bp;
+	xfs_imap_t		imap;
+	xfs_dinode_t		*dip;
+	xfs_ino_t		ino;
+	int			len;
+	xfs_caddr_t		src;
+	xfs_caddr_t		dest;
+	int			error;
+	int			attr_index;
+	uint			fields;
+	xfs_dinode_core_t	*dicp;
+
+	if (pass == XLOG_RECOVER_PASS1) {
+		return 0;
+	}
+
+	in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
+	ino = in_f->ilf_ino;
+	mp = log->l_mp;
+	if (ITEM_TYPE(item) == XFS_LI_INODE) {
+		imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
+		imap.im_len = in_f->ilf_len;
+		imap.im_boffset = in_f->ilf_boffset;
+	} else {
+		/*
+		 * It's an old inode format record.  We don't know where
+		 * its cluster is located on disk, and we can't allow
+		 * xfs_imap() to figure it out because the inode btrees
+		 * are not ready to be used.  Therefore do not pass the
+		 * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give
+		 * us only the single block in which the inode lives
+		 * rather than its cluster, so we must make sure to
+		 * invalidate the buffer when we write it out below.
+		 */
+		imap.im_blkno = 0;
+		xfs_imap(log->l_mp, NULL, ino, &imap, 0);
+	}
+
+	/*
+	 * Inode buffers can be freed, look out for it,
+	 * and do not replay the inode.
+	 */
+	if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0))
+		return 0;
+
+	bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
+								XFS_BUF_LOCK);
+	if (XFS_BUF_ISERROR(bp)) {
+		xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
+				  bp, imap.im_blkno);
+		error = XFS_BUF_GETERROR(bp);
+		xfs_buf_relse(bp);
+		return error;
+	}
+	error = 0;
+	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
+	dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
+
+	/*
+	 * Make sure the place we're flushing out to really looks
+	 * like an inode!
+	 */
+	if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) {
+		xfs_buf_relse(bp);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
+			dip, bp, ino);
+		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
+				 XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr);
+	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
+		xfs_buf_relse(bp);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
+			item, ino);
+		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
+				 XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	/* Skip replay when the on disk inode is newer than the log one */
+	if (dicp->di_flushiter <
+	    INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)) {
+		/*
+		 * Deal with the wrap case, DI_MAX_FLUSH is less
+		 * than smaller numbers
+		 */
+		if ((INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)
+							== DI_MAX_FLUSH) &&
+		    (dicp->di_flushiter < (DI_MAX_FLUSH>>1))) {
+			/* do nothing */
+		} else {
+			xfs_buf_relse(bp);
+			return 0;
+		}
+	}
+	/* Take the opportunity to reset the flush iteration count */
+	dicp->di_flushiter = 0;
+
+	if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
+		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
+		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
+			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
+					 XFS_ERRLEVEL_LOW, mp, dicp);
+			xfs_buf_relse(bp);
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+				item, dip, bp, ino);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+	} else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
+		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
+		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
+		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
+			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
+					     XFS_ERRLEVEL_LOW, mp, dicp);
+			xfs_buf_relse(bp);
+			xfs_fs_cmn_err(CE_ALERT, mp,
+				"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+				item, dip, bp, ino);
+			return XFS_ERROR(EFSCORRUPTED);
+		}
+	}
+	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
+		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
+				     XFS_ERRLEVEL_LOW, mp, dicp);
+		xfs_buf_relse(bp);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
+			item, dip, bp, ino,
+			dicp->di_nextents + dicp->di_anextents,
+			dicp->di_nblocks);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
+		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
+				     XFS_ERRLEVEL_LOW, mp, dicp);
+		xfs_buf_relse(bp);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
+			item, dip, bp, ino, dicp->di_forkoff);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
+		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
+				     XFS_ERRLEVEL_LOW, mp, dicp);
+		xfs_buf_relse(bp);
+		xfs_fs_cmn_err(CE_ALERT, mp,
+			"xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
+			item->ri_buf[1].i_len, item);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	/* The core is in in-core format */
+	xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
+			      (xfs_dinode_core_t*)item->ri_buf[1].i_addr, -1);
+
+	/* the rest is in on-disk format */
+	if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
+		memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
+			item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
+			item->ri_buf[1].i_len  - sizeof(xfs_dinode_core_t));
+	}
+
+	fields = in_f->ilf_fields;
+	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
+	case XFS_ILOG_DEV:
+		INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev);
+
+		break;
+	case XFS_ILOG_UUID:
+		dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
+		break;
+	}
+
+	if (in_f->ilf_size == 2)
+		goto write_inode_buffer;
+	len = item->ri_buf[2].i_len;
+	src = item->ri_buf[2].i_addr;
+	ASSERT(in_f->ilf_size <= 4);
+	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
+	ASSERT(!(fields & XFS_ILOG_DFORK) ||
+	       (len == in_f->ilf_dsize));
+
+	switch (fields & XFS_ILOG_DFORK) {
+	case XFS_ILOG_DDATA:
+	case XFS_ILOG_DEXT:
+		memcpy(&dip->di_u, src, len);
+		break;
+
+	case XFS_ILOG_DBROOT:
+		xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
+				 &(dip->di_u.di_bmbt),
+				 XFS_DFORK_DSIZE(dip, mp));
+		break;
+
+	default:
+		/*
+		 * There are no data fork flags set.
+		 */
+		ASSERT((fields & XFS_ILOG_DFORK) == 0);
+		break;
+	}
+
+	/*
+	 * If we logged any attribute data, recover it.  There may or
+	 * may not have been any other non-core data logged in this
+	 * transaction.
+	 */
+	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
+		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
+			attr_index = 3;
+		} else {
+			attr_index = 2;
+		}
+		len = item->ri_buf[attr_index].i_len;
+		src = item->ri_buf[attr_index].i_addr;
+		ASSERT(len == in_f->ilf_asize);
+
+		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
+		case XFS_ILOG_ADATA:
+		case XFS_ILOG_AEXT:
+			dest = XFS_DFORK_APTR(dip);
+			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
+			memcpy(dest, src, len);
+			break;
+
+		case XFS_ILOG_ABROOT:
+			dest = XFS_DFORK_APTR(dip);
+			xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
+					 (xfs_bmdr_block_t*)dest,
+					 XFS_DFORK_ASIZE(dip, mp));
+			break;
+
+		default:
+			xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
+			ASSERT(0);
+			xfs_buf_relse(bp);
+			return XFS_ERROR(EIO);
+		}
+	}
+
+write_inode_buffer:
+	if (ITEM_TYPE(item) == XFS_LI_INODE) {
+		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
+		       XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
+		XFS_BUF_SET_FSPRIVATE(bp, mp);
+		XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+		xfs_bdwrite(mp, bp);
+	} else {
+		XFS_BUF_STALE(bp);
+		error = xfs_bwrite(mp, bp);
+	}
+
+	return (error);
+}
+
+/*
+ * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
+ * structure, so that we know not to do any dquot item or dquot buffer recovery,
+ * of that type.
+ */
+STATIC int
+xlog_recover_do_quotaoff_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	int			pass)
+{
+	xfs_qoff_logformat_t	*qoff_f;
+
+	if (pass == XLOG_RECOVER_PASS2) {
+		return (0);
+	}
+
+	qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
+	ASSERT(qoff_f);
+
+	/*
+	 * The logitem format's flag tells us if this was user quotaoff,
+	 * group quotaoff or both.
+	 */
+	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
+		log->l_quotaoffs_flag |= XFS_DQ_USER;
+	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
+		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
+
+	return (0);
+}
+
+/*
+ * Recover a dquot record
+ */
+STATIC int
+xlog_recover_do_dquot_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	int			pass)
+{
+	xfs_mount_t		*mp;
+	xfs_buf_t		*bp;
+	struct xfs_disk_dquot	*ddq, *recddq;
+	int			error;
+	xfs_dq_logformat_t	*dq_f;
+	uint			type;
+
+	if (pass == XLOG_RECOVER_PASS1) {
+		return 0;
+	}
+	mp = log->l_mp;
+
+	/*
+	 * Filesystems are required to send in quota flags at mount time.
+	 */
+	if (mp->m_qflags == 0)
+		return (0);
+
+	recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
+	ASSERT(recddq);
+	/*
+	 * This type of quotas was turned off, so ignore this record.
+	 */
+	type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
+			(XFS_DQ_USER | XFS_DQ_GROUP);
+	ASSERT(type);
+	if (log->l_quotaoffs_flag & type)
+		return (0);
+
+	/*
+	 * At this point we know that quota was _not_ turned off.
+	 * Since the mount flags are not indicating to us otherwise, this
+	 * must mean that quota is on, and the dquot needs to be replayed.
+	 * Remember that we may not have fully recovered the superblock yet,
+	 * so we can't do the usual trick of looking at the SB quota bits.
+	 *
+	 * The other possibility, of course, is that the quota subsystem was
+	 * removed since the last mount - ENOSYS.
+	 */
+	dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
+	ASSERT(dq_f);
+	if ((error = xfs_qm_dqcheck(recddq,
+			   dq_f->qlf_id,
+			   0, XFS_QMOPT_DOWARN,
+			   "xlog_recover_do_dquot_trans (log copy)"))) {
+		return XFS_ERROR(EIO);
+	}
+	ASSERT(dq_f->qlf_len == 1);
+
+	error = xfs_read_buf(mp, mp->m_ddev_targp,
+			     dq_f->qlf_blkno,
+			     XFS_FSB_TO_BB(mp, dq_f->qlf_len),
+			     0, &bp);
+	if (error) {
+		xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
+				  bp, dq_f->qlf_blkno);
+		return error;
+	}
+	ASSERT(bp);
+	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
+
+	/*
+	 * At least the magic num portion should be on disk because this
+	 * was among a chunk of dquots created earlier, and we did some
+	 * minimal initialization then.
+	 */
+	if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+			   "xlog_recover_do_dquot_trans")) {
+		xfs_buf_relse(bp);
+		return XFS_ERROR(EIO);
+	}
+
+	memcpy(ddq, recddq, item->ri_buf[1].i_len);
+
+	ASSERT(dq_f->qlf_size == 2);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
+	       XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
+	XFS_BUF_SET_FSPRIVATE(bp, mp);
+	XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+	xfs_bdwrite(mp, bp);
+
+	return (0);
+}
+
+/*
+ * This routine is called to create an in-core extent free intent
+ * item from the efi format structure which was logged on disk.
+ * It allocates an in-core efi, copies the extents from the format
+ * structure into it, and adds the efi to the AIL with the given
+ * LSN.
+ */
+STATIC void
+xlog_recover_do_efi_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	xfs_lsn_t		lsn,
+	int			pass)
+{
+	xfs_mount_t		*mp;
+	xfs_efi_log_item_t	*efip;
+	xfs_efi_log_format_t	*efi_formatp;
+	SPLDECL(s);
+
+	if (pass == XLOG_RECOVER_PASS1) {
+		return;
+	}
+
+	efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
+	ASSERT(item->ri_buf[0].i_len ==
+	       (sizeof(xfs_efi_log_format_t) +
+		((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))));
+
+	mp = log->l_mp;
+	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
+	memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
+	      sizeof(xfs_efi_log_format_t) +
+	      ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
+	efip->efi_next_extent = efi_formatp->efi_nextents;
+	efip->efi_flags |= XFS_EFI_COMMITTED;
+
+	AIL_LOCK(mp,s);
+	/*
+	 * xfs_trans_update_ail() drops the AIL lock.
+	 */
+	xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
+}
+
+
+/*
+ * This routine is called when an efd format structure is found in
+ * a committed transaction in the log.  It's purpose is to cancel
+ * the corresponding efi if it was still in the log.  To do this
+ * it searches the AIL for the efi with an id equal to that in the
+ * efd format structure.  If we find it, we remove the efi from the
+ * AIL and free it.
+ */
+STATIC void
+xlog_recover_do_efd_trans(
+	xlog_t			*log,
+	xlog_recover_item_t	*item,
+	int			pass)
+{
+	xfs_mount_t		*mp;
+	xfs_efd_log_format_t	*efd_formatp;
+	xfs_efi_log_item_t	*efip = NULL;
+	xfs_log_item_t		*lip;
+	int			gen;
+	int			nexts;
+	__uint64_t		efi_id;
+	SPLDECL(s);
+
+	if (pass == XLOG_RECOVER_PASS1) {
+		return;
+	}
+
+	efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
+	ASSERT(item->ri_buf[0].i_len ==
+	       (sizeof(xfs_efd_log_format_t) +
+		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t))));
+	efi_id = efd_formatp->efd_efi_id;
+
+	/*
+	 * Search for the efi with the id in the efd format structure
+	 * in the AIL.
+	 */
+	mp = log->l_mp;
+	AIL_LOCK(mp,s);
+	lip = xfs_trans_first_ail(mp, &gen);
+	while (lip != NULL) {
+		if (lip->li_type == XFS_LI_EFI) {
+			efip = (xfs_efi_log_item_t *)lip;
+			if (efip->efi_format.efi_id == efi_id) {
+				/*
+				 * xfs_trans_delete_ail() drops the
+				 * AIL lock.
+				 */
+				xfs_trans_delete_ail(mp, lip, s);
+				break;
+			}
+		}
+		lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
+	}
+	if (lip == NULL) {
+		AIL_UNLOCK(mp, s);
+	}
+
+	/*
+	 * If we found it, then free it up.  If it wasn't there, it
+	 * must have been overwritten in the log.  Oh well.
+	 */
+	if (lip != NULL) {
+		nexts = efip->efi_format.efi_nextents;
+		if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+			kmem_free(lip, sizeof(xfs_efi_log_item_t) +
+				  ((nexts - 1) * sizeof(xfs_extent_t)));
+		} else {
+			kmem_zone_free(xfs_efi_zone, efip);
+		}
+	}
+}
+
+/*
+ * Perform the transaction
+ *
+ * If the transaction modifies a buffer or inode, do it now.  Otherwise,
+ * EFIs and EFDs get queued up by adding entries into the AIL for them.
+ */
+STATIC int
+xlog_recover_do_trans(
+	xlog_t			*log,
+	xlog_recover_t		*trans,
+	int			pass)
+{
+	int			error = 0;
+	xlog_recover_item_t	*item, *first_item;
+
+	if ((error = xlog_recover_reorder_trans(log, trans)))
+		return error;
+	first_item = item = trans->r_itemq;
+	do {
+		/*
+		 * we don't need to worry about the block number being
+		 * truncated in > 1 TB buffers because in user-land,
+		 * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
+		 * the blkno's will get through the user-mode buffer
+		 * cache properly.  The only bad case is o32 kernels
+		 * where xfs_daddr_t is 32-bits but mount will warn us
+		 * off a > 1 TB filesystem before we get here.
+		 */
+		if ((ITEM_TYPE(item) == XFS_LI_BUF) ||
+		    (ITEM_TYPE(item) == XFS_LI_6_1_BUF) ||
+		    (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) {
+			if  ((error = xlog_recover_do_buffer_trans(log, item,
+								 pass)))
+				break;
+		} else if ((ITEM_TYPE(item) == XFS_LI_INODE) ||
+			   (ITEM_TYPE(item) == XFS_LI_6_1_INODE) ||
+			   (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) {
+			if ((error = xlog_recover_do_inode_trans(log, item,
+								pass)))
+				break;
+		} else if (ITEM_TYPE(item) == XFS_LI_EFI) {
+			xlog_recover_do_efi_trans(log, item, trans->r_lsn,
+						  pass);
+		} else if (ITEM_TYPE(item) == XFS_LI_EFD) {
+			xlog_recover_do_efd_trans(log, item, pass);
+		} else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
+			if ((error = xlog_recover_do_dquot_trans(log, item,
+								   pass)))
+					break;
+		} else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
+			if ((error = xlog_recover_do_quotaoff_trans(log, item,
+								   pass)))
+					break;
+		} else {
+			xlog_warn("XFS: xlog_recover_do_trans");
+			ASSERT(0);
+			error = XFS_ERROR(EIO);
+			break;
+		}
+		item = item->ri_next;
+	} while (first_item != item);
+
+	return error;
+}
+
+/*
+ * Free up any resources allocated by the transaction
+ *
+ * Remember that EFIs, EFDs, and IUNLINKs are handled later.
+ */
+STATIC void
+xlog_recover_free_trans(
+	xlog_recover_t		*trans)
+{
+	xlog_recover_item_t	*first_item, *item, *free_item;
+	int			i;
+
+	item = first_item = trans->r_itemq;
+	do {
+		free_item = item;
+		item = item->ri_next;
+		 /* Free the regions in the item. */
+		for (i = 0; i < free_item->ri_cnt; i++) {
+			kmem_free(free_item->ri_buf[i].i_addr,
+				  free_item->ri_buf[i].i_len);
+		}
+		/* Free the item itself */
+		kmem_free(free_item->ri_buf,
+			  (free_item->ri_total * sizeof(xfs_log_iovec_t)));
+		kmem_free(free_item, sizeof(xlog_recover_item_t));
+	} while (first_item != item);
+	/* Free the transaction recover structure */
+	kmem_free(trans, sizeof(xlog_recover_t));
+}
+
+STATIC int
+xlog_recover_commit_trans(
+	xlog_t			*log,
+	xlog_recover_t		**q,
+	xlog_recover_t		*trans,
+	int			pass)
+{
+	int			error;
+
+	if ((error = xlog_recover_unlink_tid(q, trans)))
+		return error;
+	if ((error = xlog_recover_do_trans(log, trans, pass)))
+		return error;
+	xlog_recover_free_trans(trans);			/* no error */
+	return 0;
+}
+
+STATIC int
+xlog_recover_unmount_trans(
+	xlog_recover_t		*trans)
+{
+	/* Do nothing now */
+	xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
+	return 0;
+}
+
+/*
+ * There are two valid states of the r_state field.  0 indicates that the
+ * transaction structure is in a normal state.  We have either seen the
+ * start of the transaction or the last operation we added was not a partial
+ * operation.  If the last operation we added to the transaction was a
+ * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
+ *
+ * NOTE: skip LRs with 0 data length.
+ */
+STATIC int
+xlog_recover_process_data(
+	xlog_t			*log,
+	xlog_recover_t		*rhash[],
+	xlog_rec_header_t	*rhead,
+	xfs_caddr_t		dp,
+	int			pass)
+{
+	xfs_caddr_t		lp;
+	int			num_logops;
+	xlog_op_header_t	*ohead;
+	xlog_recover_t		*trans;
+	xlog_tid_t		tid;
+	int			error;
+	unsigned long		hash;
+	uint			flags;
+
+	lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
+	num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
+
+	/* check the log format matches our own - else we can't recover */
+	if (xlog_header_check_recover(log->l_mp, rhead))
+		return (XFS_ERROR(EIO));
+
+	while ((dp < lp) && num_logops) {
+		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
+		ohead = (xlog_op_header_t *)dp;
+		dp += sizeof(xlog_op_header_t);
+		if (ohead->oh_clientid != XFS_TRANSACTION &&
+		    ohead->oh_clientid != XFS_LOG) {
+			xlog_warn(
+		"XFS: xlog_recover_process_data: bad clientid");
+			ASSERT(0);
+			return (XFS_ERROR(EIO));
+		}
+		tid = INT_GET(ohead->oh_tid, ARCH_CONVERT);
+		hash = XLOG_RHASH(tid);
+		trans = xlog_recover_find_tid(rhash[hash], tid);
+		if (trans == NULL) {		   /* not found; add new tid */
+			if (ohead->oh_flags & XLOG_START_TRANS)
+				xlog_recover_new_tid(&rhash[hash], tid,
+					INT_GET(rhead->h_lsn, ARCH_CONVERT));
+		} else {
+			ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp);
+			flags = ohead->oh_flags & ~XLOG_END_TRANS;
+			if (flags & XLOG_WAS_CONT_TRANS)
+				flags &= ~XLOG_CONTINUE_TRANS;
+			switch (flags) {
+			case XLOG_COMMIT_TRANS:
+				error = xlog_recover_commit_trans(log,
+						&rhash[hash], trans, pass);
+				break;
+			case XLOG_UNMOUNT_TRANS:
+				error = xlog_recover_unmount_trans(trans);
+				break;
+			case XLOG_WAS_CONT_TRANS:
+				error = xlog_recover_add_to_cont_trans(trans,
+						dp, INT_GET(ohead->oh_len,
+							ARCH_CONVERT));
+				break;
+			case XLOG_START_TRANS:
+				xlog_warn(
+			"XFS: xlog_recover_process_data: bad transaction");
+				ASSERT(0);
+				error = XFS_ERROR(EIO);
+				break;
+			case 0:
+			case XLOG_CONTINUE_TRANS:
+				error = xlog_recover_add_to_trans(trans,
+						dp, INT_GET(ohead->oh_len,
+							ARCH_CONVERT));
+				break;
+			default:
+				xlog_warn(
+			"XFS: xlog_recover_process_data: bad flag");
+				ASSERT(0);
+				error = XFS_ERROR(EIO);
+				break;
+			}
+			if (error)
+				return error;
+		}
+		dp += INT_GET(ohead->oh_len, ARCH_CONVERT);
+		num_logops--;
+	}
+	return 0;
+}
+
+/*
+ * Process an extent free intent item that was recovered from
+ * the log.  We need to free the extents that it describes.
+ */
+STATIC void
+xlog_recover_process_efi(
+	xfs_mount_t		*mp,
+	xfs_efi_log_item_t	*efip)
+{
+	xfs_efd_log_item_t	*efdp;
+	xfs_trans_t		*tp;
+	int			i;
+	xfs_extent_t		*extp;
+	xfs_fsblock_t		startblock_fsb;
+
+	ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
+
+	/*
+	 * First check the validity of the extents described by the
+	 * EFI.  If any are bad, then assume that all are bad and
+	 * just toss the EFI.
+	 */
+	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+		extp = &(efip->efi_format.efi_extents[i]);
+		startblock_fsb = XFS_BB_TO_FSB(mp,
+				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
+		if ((startblock_fsb == 0) ||
+		    (extp->ext_len == 0) ||
+		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
+		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
+			/*
+			 * This will pull the EFI from the AIL and
+			 * free the memory associated with it.
+			 */
+			xfs_efi_release(efip, efip->efi_format.efi_nextents);
+			return;
+		}
+	}
+
+	tp = xfs_trans_alloc(mp, 0);
+	xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
+	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
+
+	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+		extp = &(efip->efi_format.efi_extents[i]);
+		xfs_free_extent(tp, extp->ext_start, extp->ext_len);
+		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
+					 extp->ext_len);
+	}
+
+	efip->efi_flags |= XFS_EFI_RECOVERED;
+	xfs_trans_commit(tp, 0, NULL);
+}
+
+/*
+ * Verify that once we've encountered something other than an EFI
+ * in the AIL that there are no more EFIs in the AIL.
+ */
+#if defined(DEBUG)
+STATIC void
+xlog_recover_check_ail(
+	xfs_mount_t		*mp,
+	xfs_log_item_t		*lip,
+	int			gen)
+{
+	int			orig_gen = gen;
+
+	do {
+		ASSERT(lip->li_type != XFS_LI_EFI);
+		lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
+		/*
+		 * The check will be bogus if we restart from the
+		 * beginning of the AIL, so ASSERT that we don't.
+		 * We never should since we're holding the AIL lock
+		 * the entire time.
+		 */
+		ASSERT(gen == orig_gen);
+	} while (lip != NULL);
+}
+#endif	/* DEBUG */
+
+/*
+ * When this is called, all of the EFIs which did not have
+ * corresponding EFDs should be in the AIL.  What we do now
+ * is free the extents associated with each one.
+ *
+ * Since we process the EFIs in normal transactions, they
+ * will be removed at some point after the commit.  This prevents
+ * us from just walking down the list processing each one.
+ * We'll use a flag in the EFI to skip those that we've already
+ * processed and use the AIL iteration mechanism's generation
+ * count to try to speed this up at least a bit.
+ *
+ * When we start, we know that the EFIs are the only things in
+ * the AIL.  As we process them, however, other items are added
+ * to the AIL.  Since everything added to the AIL must come after
+ * everything already in the AIL, we stop processing as soon as
+ * we see something other than an EFI in the AIL.
+ */
+STATIC void
+xlog_recover_process_efis(
+	xlog_t			*log)
+{
+	xfs_log_item_t		*lip;
+	xfs_efi_log_item_t	*efip;
+	int			gen;
+	xfs_mount_t		*mp;
+	SPLDECL(s);
+
+	mp = log->l_mp;
+	AIL_LOCK(mp,s);
+
+	lip = xfs_trans_first_ail(mp, &gen);
+	while (lip != NULL) {
+		/*
+		 * We're done when we see something other than an EFI.
+		 */
+		if (lip->li_type != XFS_LI_EFI) {
+			xlog_recover_check_ail(mp, lip, gen);
+			break;
+		}
+
+		/*
+		 * Skip EFIs that we've already processed.
+		 */
+		efip = (xfs_efi_log_item_t *)lip;
+		if (efip->efi_flags & XFS_EFI_RECOVERED) {
+			lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
+			continue;
+		}
+
+		AIL_UNLOCK(mp, s);
+		xlog_recover_process_efi(mp, efip);
+		AIL_LOCK(mp,s);
+		lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
+	}
+	AIL_UNLOCK(mp, s);
+}
+
+/*
+ * This routine performs a transaction to null out a bad inode pointer
+ * in an agi unlinked inode hash bucket.
+ */
+STATIC void
+xlog_recover_clear_agi_bucket(
+	xfs_mount_t	*mp,
+	xfs_agnumber_t	agno,
+	int		bucket)
+{
+	xfs_trans_t	*tp;
+	xfs_agi_t	*agi;
+	xfs_buf_t	*agibp;
+	int		offset;
+	int		error;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
+	xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
+
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+				   XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
+				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
+	if (error) {
+		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+		return;
+	}
+
+	agi = XFS_BUF_TO_AGI(agibp);
+	if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) {
+		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+		return;
+	}
+	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+
+	INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO);
+	offset = offsetof(xfs_agi_t, agi_unlinked) +
+		 (sizeof(xfs_agino_t) * bucket);
+	xfs_trans_log_buf(tp, agibp, offset,
+			  (offset + sizeof(xfs_agino_t) - 1));
+
+	(void) xfs_trans_commit(tp, 0, NULL);
+}
+
+/*
+ * xlog_iunlink_recover
+ *
+ * This is called during recovery to process any inodes which
+ * we unlinked but not freed when the system crashed.  These
+ * inodes will be on the lists in the AGI blocks.  What we do
+ * here is scan all the AGIs and fully truncate and free any
+ * inodes found on the lists.  Each inode is removed from the
+ * lists when it has been fully truncated and is freed.  The
+ * freeing of the inode and its removal from the list must be
+ * atomic.
+ */
+void
+xlog_recover_process_iunlinks(
+	xlog_t		*log)
+{
+	xfs_mount_t	*mp;
+	xfs_agnumber_t	agno;
+	xfs_agi_t	*agi;
+	xfs_buf_t	*agibp;
+	xfs_buf_t	*ibp;
+	xfs_dinode_t	*dip;
+	xfs_inode_t	*ip;
+	xfs_agino_t	agino;
+	xfs_ino_t	ino;
+	int		bucket;
+	int		error;
+	uint		mp_dmevmask;
+
+	mp = log->l_mp;
+
+	/*
+	 * Prevent any DMAPI event from being sent while in this function.
+	 */
+	mp_dmevmask = mp->m_dmevmask;
+	mp->m_dmevmask = 0;
+
+	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+		/*
+		 * Find the agi for this ag.
+		 */
+		agibp = xfs_buf_read(mp->m_ddev_targp,
+				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
+				XFS_FSS_TO_BB(mp, 1), 0);
+		if (XFS_BUF_ISERROR(agibp)) {
+			xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
+				log->l_mp, agibp,
+				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
+		}
+		agi = XFS_BUF_TO_AGI(agibp);
+		ASSERT(XFS_AGI_MAGIC ==
+			INT_GET(agi->agi_magicnum, ARCH_CONVERT));
+
+		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
+
+			agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT);
+			while (agino != NULLAGINO) {
+
+				/*
+				 * Release the agi buffer so that it can
+				 * be acquired in the normal course of the
+				 * transaction to truncate and free the inode.
+				 */
+				xfs_buf_relse(agibp);
+
+				ino = XFS_AGINO_TO_INO(mp, agno, agino);
+				error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
+				ASSERT(error || (ip != NULL));
+
+				if (!error) {
+					/*
+					 * Get the on disk inode to find the
+					 * next inode in the bucket.
+					 */
+					error = xfs_itobp(mp, NULL, ip, &dip,
+							&ibp, 0);
+					ASSERT(error || (dip != NULL));
+				}
+
+				if (!error) {
+					ASSERT(ip->i_d.di_nlink == 0);
+
+					/* setup for the next pass */
+					agino = INT_GET(dip->di_next_unlinked,
+							ARCH_CONVERT);
+					xfs_buf_relse(ibp);
+					/*
+					 * Prevent any DMAPI event from
+					 * being sent when the
+					 * reference on the inode is
+					 * dropped.
+					 */
+					ip->i_d.di_dmevmask = 0;
+
+					/*
+					 * If this is a new inode, handle
+					 * it specially.  Otherwise,
+					 * just drop our reference to the
+					 * inode.  If there are no
+					 * other references, this will
+					 * send the inode to
+					 * xfs_inactive() which will
+					 * truncate the file and free
+					 * the inode.
+					 */
+					if (ip->i_d.di_mode == 0)
+						xfs_iput_new(ip, 0);
+					else
+						VN_RELE(XFS_ITOV(ip));
+				} else {
+					/*
+					 * We can't read in the inode
+					 * this bucket points to, or
+					 * this inode is messed up.  Just
+					 * ditch this bucket of inodes.  We
+					 * will lose some inodes and space,
+					 * but at least we won't hang.  Call
+					 * xlog_recover_clear_agi_bucket()
+					 * to perform a transaction to clear
+					 * the inode pointer in the bucket.
+					 */
+					xlog_recover_clear_agi_bucket(mp, agno,
+							bucket);
+
+					agino = NULLAGINO;
+				}
+
+				/*
+				 * Reacquire the agibuffer and continue around
+				 * the loop.
+				 */
+				agibp = xfs_buf_read(mp->m_ddev_targp,
+						XFS_AG_DADDR(mp, agno,
+							XFS_AGI_DADDR(mp)),
+						XFS_FSS_TO_BB(mp, 1), 0);
+				if (XFS_BUF_ISERROR(agibp)) {
+					xfs_ioerror_alert(
+				"xlog_recover_process_iunlinks(#2)",
+						log->l_mp, agibp,
+						XFS_AG_DADDR(mp, agno,
+							XFS_AGI_DADDR(mp)));
+				}
+				agi = XFS_BUF_TO_AGI(agibp);
+				ASSERT(XFS_AGI_MAGIC == INT_GET(
+					agi->agi_magicnum, ARCH_CONVERT));
+			}
+		}
+
+		/*
+		 * Release the buffer for the current agi so we can
+		 * go on to the next one.
+		 */
+		xfs_buf_relse(agibp);
+	}
+
+	mp->m_dmevmask = mp_dmevmask;
+}
+
+
+#ifdef DEBUG
+STATIC void
+xlog_pack_data_checksum(
+	xlog_t		*log,
+	xlog_in_core_t	*iclog,
+	int		size)
+{
+	int		i;
+	uint		*up;
+	uint		chksum = 0;
+
+	up = (uint *)iclog->ic_datap;
+	/* divide length by 4 to get # words */
+	for (i = 0; i < (size >> 2); i++) {
+		chksum ^= INT_GET(*up, ARCH_CONVERT);
+		up++;
+	}
+	INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
+}
+#else
+#define xlog_pack_data_checksum(log, iclog, size)
+#endif
+
+/*
+ * Stamp cycle number in every block
+ */
+void
+xlog_pack_data(
+	xlog_t			*log,
+	xlog_in_core_t		*iclog,
+	int			roundoff)
+{
+	int			i, j, k;
+	int			size = iclog->ic_offset + roundoff;
+	uint			cycle_lsn;
+	xfs_caddr_t		dp;
+	xlog_in_core_2_t	*xhdr;
+
+	xlog_pack_data_checksum(log, iclog, size);
+
+	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
+
+	dp = iclog->ic_datap;
+	for (i = 0; i < BTOBB(size) &&
+		i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
+		iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
+		*(uint *)dp = cycle_lsn;
+		dp += BBSIZE;
+	}
+
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+		xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
+		for ( ; i < BTOBB(size); i++) {
+			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+			xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
+			*(uint *)dp = cycle_lsn;
+			dp += BBSIZE;
+		}
+
+		for (i = 1; i < log->l_iclog_heads; i++) {
+			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
+		}
+	}
+}
+
+#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
+STATIC void
+xlog_unpack_data_checksum(
+	xlog_rec_header_t	*rhead,
+	xfs_caddr_t		dp,
+	xlog_t			*log)
+{
+	uint			*up = (uint *)dp;
+	uint			chksum = 0;
+	int			i;
+
+	/* divide length by 4 to get # words */
+	for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
+		chksum ^= INT_GET(*up, ARCH_CONVERT);
+		up++;
+	}
+	if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
+	    if (rhead->h_chksum ||
+		((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
+		    cmn_err(CE_DEBUG,
+			"XFS: LogR chksum mismatch: was (0x%x) is (0x%x)",
+			    INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
+		    cmn_err(CE_DEBUG,
+"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
+		    if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+			    cmn_err(CE_DEBUG,
+				"XFS: LogR this is a LogV2 filesystem");
+		    }
+		    log->l_flags |= XLOG_CHKSUM_MISMATCH;
+	    }
+	}
+}
+#else
+#define xlog_unpack_data_checksum(rhead, dp, log)
+#endif
+
+STATIC void
+xlog_unpack_data(
+	xlog_rec_header_t	*rhead,
+	xfs_caddr_t		dp,
+	xlog_t			*log)
+{
+	int			i, j, k;
+	xlog_in_core_2_t	*xhdr;
+
+	for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
+		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
+		*(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
+		dp += BBSIZE;
+	}
+
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+		xhdr = (xlog_in_core_2_t *)rhead;
+		for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
+			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
+			*(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
+			dp += BBSIZE;
+		}
+	}
+
+	xlog_unpack_data_checksum(rhead, dp, log);
+}
+
+STATIC int
+xlog_valid_rec_header(
+	xlog_t			*log,
+	xlog_rec_header_t	*rhead,
+	xfs_daddr_t		blkno)
+{
+	int			hlen;
+
+	if (unlikely(
+	    (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
+			XLOG_HEADER_MAGIC_NUM))) {
+		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
+				XFS_ERRLEVEL_LOW, log->l_mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (unlikely(
+	    (!rhead->h_version ||
+	    (INT_GET(rhead->h_version, ARCH_CONVERT) &
+			(~XLOG_VERSION_OKBITS)) != 0))) {
+		xlog_warn("XFS: %s: unrecognised log version (%d).",
+			__FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
+		return XFS_ERROR(EIO);
+	}
+
+	/* LR body must have data or it wouldn't have been written */
+	hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
+	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
+		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
+				XFS_ERRLEVEL_LOW, log->l_mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
+		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
+				XFS_ERRLEVEL_LOW, log->l_mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+	return 0;
+}
+
+/*
+ * Read the log from tail to head and process the log records found.
+ * Handle the two cases where the tail and head are in the same cycle
+ * and where the active portion of the log wraps around the end of
+ * the physical log separately.  The pass parameter is passed through
+ * to the routines called to process the data and is not looked at
+ * here.
+ */
+STATIC int
+xlog_do_recovery_pass(
+	xlog_t			*log,
+	xfs_daddr_t		head_blk,
+	xfs_daddr_t		tail_blk,
+	int			pass)
+{
+	xlog_rec_header_t	*rhead;
+	xfs_daddr_t		blk_no;
+	xfs_caddr_t		bufaddr, offset;
+	xfs_buf_t		*hbp, *dbp;
+	int			error = 0, h_size;
+	int			bblks, split_bblks;
+	int			hblks, split_hblks, wrapped_hblks;
+	xlog_recover_t		*rhash[XLOG_RHASH_SIZE];
+
+	ASSERT(head_blk != tail_blk);
+
+	/*
+	 * Read the header of the tail block and get the iclog buffer size from
+	 * h_size.  Use this to tell how many sectors make up the log header.
+	 */
+	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
+		/*
+		 * When using variable length iclogs, read first sector of
+		 * iclog header and extract the header size from it.  Get a
+		 * new hbp that is the correct size.
+		 */
+		hbp = xlog_get_bp(log, 1);
+		if (!hbp)
+			return ENOMEM;
+		if ((error = xlog_bread(log, tail_blk, 1, hbp)))
+			goto bread_err1;
+		offset = xlog_align(log, tail_blk, 1, hbp);
+		rhead = (xlog_rec_header_t *)offset;
+		error = xlog_valid_rec_header(log, rhead, tail_blk);
+		if (error)
+			goto bread_err1;
+		h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
+		if ((INT_GET(rhead->h_version, ARCH_CONVERT)
+				& XLOG_VERSION_2) &&
+		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+			if (h_size % XLOG_HEADER_CYCLE_SIZE)
+				hblks++;
+			xlog_put_bp(hbp);
+			hbp = xlog_get_bp(log, hblks);
+		} else {
+			hblks = 1;
+		}
+	} else {
+		ASSERT(log->l_sectbb_log == 0);
+		hblks = 1;
+		hbp = xlog_get_bp(log, 1);
+		h_size = XLOG_BIG_RECORD_BSIZE;
+	}
+
+	if (!hbp)
+		return ENOMEM;
+	dbp = xlog_get_bp(log, BTOBB(h_size));
+	if (!dbp) {
+		xlog_put_bp(hbp);
+		return ENOMEM;
+	}
+
+	memset(rhash, 0, sizeof(rhash));
+	if (tail_blk <= head_blk) {
+		for (blk_no = tail_blk; blk_no < head_blk; ) {
+			if ((error = xlog_bread(log, blk_no, hblks, hbp)))
+				goto bread_err2;
+			offset = xlog_align(log, blk_no, hblks, hbp);
+			rhead = (xlog_rec_header_t *)offset;
+			error = xlog_valid_rec_header(log, rhead, blk_no);
+			if (error)
+				goto bread_err2;
+
+			/* blocks in data section */
+			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
+			error = xlog_bread(log, blk_no + hblks, bblks, dbp);
+			if (error)
+				goto bread_err2;
+			offset = xlog_align(log, blk_no + hblks, bblks, dbp);
+			xlog_unpack_data(rhead, offset, log);
+			if ((error = xlog_recover_process_data(log,
+						rhash, rhead, offset, pass)))
+				goto bread_err2;
+			blk_no += bblks + hblks;
+		}
+	} else {
+		/*
+		 * Perform recovery around the end of the physical log.
+		 * When the head is not on the same cycle number as the tail,
+		 * we can't do a sequential recovery as above.
+		 */
+		blk_no = tail_blk;
+		while (blk_no < log->l_logBBsize) {
+			/*
+			 * Check for header wrapping around physical end-of-log
+			 */
+			offset = NULL;
+			split_hblks = 0;
+			wrapped_hblks = 0;
+			if (blk_no + hblks <= log->l_logBBsize) {
+				/* Read header in one read */
+				error = xlog_bread(log, blk_no, hblks, hbp);
+				if (error)
+					goto bread_err2;
+				offset = xlog_align(log, blk_no, hblks, hbp);
+			} else {
+				/* This LR is split across physical log end */
+				if (blk_no != log->l_logBBsize) {
+					/* some data before physical log end */
+					ASSERT(blk_no <= INT_MAX);
+					split_hblks = log->l_logBBsize - (int)blk_no;
+					ASSERT(split_hblks > 0);
+					if ((error = xlog_bread(log, blk_no,
+							split_hblks, hbp)))
+						goto bread_err2;
+					offset = xlog_align(log, blk_no,
+							split_hblks, hbp);
+				}
+				/*
+				 * Note: this black magic still works with
+				 * large sector sizes (non-512) only because:
+				 * - we increased the buffer size originally
+				 *   by 1 sector giving us enough extra space
+				 *   for the second read;
+				 * - the log start is guaranteed to be sector
+				 *   aligned;
+				 * - we read the log end (LR header start)
+				 *   _first_, then the log start (LR header end)
+				 *   - order is important.
+				 */
+				bufaddr = XFS_BUF_PTR(hbp);
+				XFS_BUF_SET_PTR(hbp,
+						bufaddr + BBTOB(split_hblks),
+						BBTOB(hblks - split_hblks));
+				wrapped_hblks = hblks - split_hblks;
+				error = xlog_bread(log, 0, wrapped_hblks, hbp);
+				if (error)
+					goto bread_err2;
+				XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
+				if (!offset)
+					offset = xlog_align(log, 0,
+							wrapped_hblks, hbp);
+			}
+			rhead = (xlog_rec_header_t *)offset;
+			error = xlog_valid_rec_header(log, rhead,
+						split_hblks ? blk_no : 0);
+			if (error)
+				goto bread_err2;
+
+			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
+			blk_no += hblks;
+
+			/* Read in data for log record */
+			if (blk_no + bblks <= log->l_logBBsize) {
+				error = xlog_bread(log, blk_no, bblks, dbp);
+				if (error)
+					goto bread_err2;
+				offset = xlog_align(log, blk_no, bblks, dbp);
+			} else {
+				/* This log record is split across the
+				 * physical end of log */
+				offset = NULL;
+				split_bblks = 0;
+				if (blk_no != log->l_logBBsize) {
+					/* some data is before the physical
+					 * end of log */
+					ASSERT(!wrapped_hblks);
+					ASSERT(blk_no <= INT_MAX);
+					split_bblks =
+						log->l_logBBsize - (int)blk_no;
+					ASSERT(split_bblks > 0);
+					if ((error = xlog_bread(log, blk_no,
+							split_bblks, dbp)))
+						goto bread_err2;
+					offset = xlog_align(log, blk_no,
+							split_bblks, dbp);
+				}
+				/*
+				 * Note: this black magic still works with
+				 * large sector sizes (non-512) only because:
+				 * - we increased the buffer size originally
+				 *   by 1 sector giving us enough extra space
+				 *   for the second read;
+				 * - the log start is guaranteed to be sector
+				 *   aligned;
+				 * - we read the log end (LR header start)
+				 *   _first_, then the log start (LR header end)
+				 *   - order is important.
+				 */
+				bufaddr = XFS_BUF_PTR(dbp);
+				XFS_BUF_SET_PTR(dbp,
+						bufaddr + BBTOB(split_bblks),
+						BBTOB(bblks - split_bblks));
+				if ((error = xlog_bread(log, wrapped_hblks,
+						bblks - split_bblks, dbp)))
+					goto bread_err2;
+				XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
+				if (!offset)
+					offset = xlog_align(log, wrapped_hblks,
+						bblks - split_bblks, dbp);
+			}
+			xlog_unpack_data(rhead, offset, log);
+			if ((error = xlog_recover_process_data(log, rhash,
+							rhead, offset, pass)))
+				goto bread_err2;
+			blk_no += bblks;
+		}
+
+		ASSERT(blk_no >= log->l_logBBsize);
+		blk_no -= log->l_logBBsize;
+
+		/* read first part of physical log */
+		while (blk_no < head_blk) {
+			if ((error = xlog_bread(log, blk_no, hblks, hbp)))
+				goto bread_err2;
+			offset = xlog_align(log, blk_no, hblks, hbp);
+			rhead = (xlog_rec_header_t *)offset;
+			error = xlog_valid_rec_header(log, rhead, blk_no);
+			if (error)
+				goto bread_err2;
+			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
+			if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
+				goto bread_err2;
+			offset = xlog_align(log, blk_no+hblks, bblks, dbp);
+			xlog_unpack_data(rhead, offset, log);
+			if ((error = xlog_recover_process_data(log, rhash,
+							rhead, offset, pass)))
+				goto bread_err2;
+			blk_no += bblks + hblks;
+		}
+	}
+
+ bread_err2:
+	xlog_put_bp(dbp);
+ bread_err1:
+	xlog_put_bp(hbp);
+	return error;
+}
+
+/*
+ * Do the recovery of the log.  We actually do this in two phases.
+ * The two passes are necessary in order to implement the function
+ * of cancelling a record written into the log.  The first pass
+ * determines those things which have been cancelled, and the
+ * second pass replays log items normally except for those which
+ * have been cancelled.  The handling of the replay and cancellations
+ * takes place in the log item type specific routines.
+ *
+ * The table of items which have cancel records in the log is allocated
+ * and freed at this level, since only here do we know when all of
+ * the log recovery has been completed.
+ */
+STATIC int
+xlog_do_log_recovery(
+	xlog_t		*log,
+	xfs_daddr_t	head_blk,
+	xfs_daddr_t	tail_blk)
+{
+	int		error;
+
+	ASSERT(head_blk != tail_blk);
+
+	/*
+	 * First do a pass to find all of the cancelled buf log items.
+	 * Store them in the buf_cancel_table for use in the second pass.
+	 */
+	log->l_buf_cancel_table =
+		(xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
+						 sizeof(xfs_buf_cancel_t*),
+						 KM_SLEEP);
+	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
+				      XLOG_RECOVER_PASS1);
+	if (error != 0) {
+		kmem_free(log->l_buf_cancel_table,
+			  XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
+		log->l_buf_cancel_table = NULL;
+		return error;
+	}
+	/*
+	 * Then do a second pass to actually recover the items in the log.
+	 * When it is complete free the table of buf cancel items.
+	 */
+	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
+				      XLOG_RECOVER_PASS2);
+#ifdef DEBUG
+	{
+		int	i;
+
+		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+			ASSERT(log->l_buf_cancel_table[i] == NULL);
+	}
+#endif	/* DEBUG */
+
+	kmem_free(log->l_buf_cancel_table,
+		  XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
+	log->l_buf_cancel_table = NULL;
+
+	return error;
+}
+
+/*
+ * Do the actual recovery
+ */
+STATIC int
+xlog_do_recover(
+	xlog_t		*log,
+	xfs_daddr_t	head_blk,
+	xfs_daddr_t	tail_blk)
+{
+	int		error;
+	xfs_buf_t	*bp;
+	xfs_sb_t	*sbp;
+
+	/*
+	 * First replay the images in the log.
+	 */
+	error = xlog_do_log_recovery(log, head_blk, tail_blk);
+	if (error) {
+		return error;
+	}
+
+	XFS_bflush(log->l_mp->m_ddev_targp);
+
+	/*
+	 * If IO errors happened during recovery, bail out.
+	 */
+	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
+		return (EIO);
+	}
+
+	/*
+	 * We now update the tail_lsn since much of the recovery has completed
+	 * and there may be space available to use.  If there were no extent
+	 * or iunlinks, we can free up the entire log and set the tail_lsn to
+	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
+	 * lsn of the last known good LR on disk.  If there are extent frees
+	 * or iunlinks they will have some entries in the AIL; so we look at
+	 * the AIL to determine how to set the tail_lsn.
+	 */
+	xlog_assign_tail_lsn(log->l_mp);
+
+	/*
+	 * Now that we've finished replaying all buffer and inode
+	 * updates, re-read in the superblock.
+	 */
+	bp = xfs_getsb(log->l_mp, 0);
+	XFS_BUF_UNDONE(bp);
+	XFS_BUF_READ(bp);
+	xfsbdstrat(log->l_mp, bp);
+	if ((error = xfs_iowait(bp))) {
+		xfs_ioerror_alert("xlog_do_recover",
+				  log->l_mp, bp, XFS_BUF_ADDR(bp));
+		ASSERT(0);
+		xfs_buf_relse(bp);
+		return error;
+	}
+
+	/* Convert superblock from on-disk format */
+	sbp = &log->l_mp->m_sb;
+	xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, XFS_SB_ALL_BITS);
+	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
+	ASSERT(XFS_SB_GOOD_VERSION(sbp));
+	xfs_buf_relse(bp);
+
+	xlog_recover_check_summary(log);
+
+	/* Normal transactions can now occur */
+	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
+	return 0;
+}
+
+/*
+ * Perform recovery and re-initialize some log variables in xlog_find_tail.
+ *
+ * Return error or zero.
+ */
+int
+xlog_recover(
+	xlog_t		*log,
+	int		readonly)
+{
+	xfs_daddr_t	head_blk, tail_blk;
+	int		error;
+
+	/* find the tail of the log */
+	if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly)))
+		return error;
+
+	if (tail_blk != head_blk) {
+		/* There used to be a comment here:
+		 *
+		 * disallow recovery on read-only mounts.  note -- mount
+		 * checks for ENOSPC and turns it into an intelligent
+		 * error message.
+		 * ...but this is no longer true.  Now, unless you specify
+		 * NORECOVERY (in which case this function would never be
+		 * called), we just go ahead and recover.  We do this all
+		 * under the vfs layer, so we can get away with it unless
+		 * the device itself is read-only, in which case we fail.
+		 */
+		if ((error = xfs_dev_is_read_only(log->l_mp,
+						"recovery required"))) {
+			return error;
+		}
+
+		cmn_err(CE_NOTE,
+			"Starting XFS recovery on filesystem: %s (dev: %s)",
+			log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
+
+		error = xlog_do_recover(log, head_blk, tail_blk);
+		log->l_flags |= XLOG_RECOVERY_NEEDED;
+	}
+	return error;
+}
+
+/*
+ * In the first part of recovery we replay inodes and buffers and build
+ * up the list of extent free items which need to be processed.  Here
+ * we process the extent free items and clean up the on disk unlinked
+ * inode lists.  This is separated from the first part of recovery so
+ * that the root and real-time bitmap inodes can be read in from disk in
+ * between the two stages.  This is necessary so that we can free space
+ * in the real-time portion of the file system.
+ */
+int
+xlog_recover_finish(
+	xlog_t		*log,
+	int		mfsi_flags)
+{
+	/*
+	 * Now we're ready to do the transactions needed for the
+	 * rest of recovery.  Start with completing all the extent
+	 * free intent records and then process the unlinked inode
+	 * lists.  At this point, we essentially run in normal mode
+	 * except that we're still performing recovery actions
+	 * rather than accepting new requests.
+	 */
+	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
+		xlog_recover_process_efis(log);
+		/*
+		 * Sync the log to get all the EFIs out of the AIL.
+		 * This isn't absolutely necessary, but it helps in
+		 * case the unlink transactions would have problems
+		 * pushing the EFIs out of the way.
+		 */
+		xfs_log_force(log->l_mp, (xfs_lsn_t)0,
+			      (XFS_LOG_FORCE | XFS_LOG_SYNC));
+
+		if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
+			xlog_recover_process_iunlinks(log);
+		}
+
+		xlog_recover_check_summary(log);
+
+		cmn_err(CE_NOTE,
+			"Ending XFS recovery on filesystem: %s (dev: %s)",
+			log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
+		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
+	} else {
+		cmn_err(CE_DEBUG,
+			"!Ending clean XFS mount for filesystem: %s",
+			log->l_mp->m_fsname);
+	}
+	return 0;
+}
+
+
+#if defined(DEBUG)
+/*
+ * Read all of the agf and agi counters and check that they
+ * are consistent with the superblock counters.
+ */
+void
+xlog_recover_check_summary(
+	xlog_t		*log)
+{
+	xfs_mount_t	*mp;
+	xfs_agf_t	*agfp;
+	xfs_agi_t	*agip;
+	xfs_buf_t	*agfbp;
+	xfs_buf_t	*agibp;
+	xfs_daddr_t	agfdaddr;
+	xfs_daddr_t	agidaddr;
+	xfs_buf_t	*sbbp;
+#ifdef XFS_LOUD_RECOVERY
+	xfs_sb_t	*sbp;
+#endif
+	xfs_agnumber_t	agno;
+	__uint64_t	freeblks;
+	__uint64_t	itotal;
+	__uint64_t	ifree;
+
+	mp = log->l_mp;
+
+	freeblks = 0LL;
+	itotal = 0LL;
+	ifree = 0LL;
+	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+		agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
+		agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
+				XFS_FSS_TO_BB(mp, 1), 0);
+		if (XFS_BUF_ISERROR(agfbp)) {
+			xfs_ioerror_alert("xlog_recover_check_summary(agf)",
+						mp, agfbp, agfdaddr);
+		}
+		agfp = XFS_BUF_TO_AGF(agfbp);
+		ASSERT(XFS_AGF_MAGIC ==
+			INT_GET(agfp->agf_magicnum, ARCH_CONVERT));
+		ASSERT(XFS_AGF_GOOD_VERSION(
+			INT_GET(agfp->agf_versionnum, ARCH_CONVERT)));
+		ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno);
+
+		freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) +
+			    INT_GET(agfp->agf_flcount, ARCH_CONVERT);
+		xfs_buf_relse(agfbp);
+
+		agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
+		agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
+				XFS_FSS_TO_BB(mp, 1), 0);
+		if (XFS_BUF_ISERROR(agibp)) {
+			xfs_ioerror_alert("xlog_recover_check_summary(agi)",
+					  mp, agibp, agidaddr);
+		}
+		agip = XFS_BUF_TO_AGI(agibp);
+		ASSERT(XFS_AGI_MAGIC ==
+			INT_GET(agip->agi_magicnum, ARCH_CONVERT));
+		ASSERT(XFS_AGI_GOOD_VERSION(
+			INT_GET(agip->agi_versionnum, ARCH_CONVERT)));
+		ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno);
+
+		itotal += INT_GET(agip->agi_count, ARCH_CONVERT);
+		ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT);
+		xfs_buf_relse(agibp);
+	}
+
+	sbbp = xfs_getsb(mp, 0);
+#ifdef XFS_LOUD_RECOVERY
+	sbp = &mp->m_sb;
+	xfs_xlatesb(XFS_BUF_TO_SBP(sbbp), sbp, 1, XFS_SB_ALL_BITS);
+	cmn_err(CE_NOTE,
+		"xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
+		sbp->sb_icount, itotal);
+	cmn_err(CE_NOTE,
+		"xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
+		sbp->sb_ifree, ifree);
+	cmn_err(CE_NOTE,
+		"xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
+		sbp->sb_fdblocks, freeblks);
+#if 0
+	/*
+	 * This is turned off until I account for the allocation
+	 * btree blocks which live in free space.
+	 */
+	ASSERT(sbp->sb_icount == itotal);
+	ASSERT(sbp->sb_ifree == ifree);
+	ASSERT(sbp->sb_fdblocks == freeblks);
+#endif
+#endif
+	xfs_buf_relse(sbbp);
+}
+#endif /* DEBUG */
diff --git a/fs/xfs/xfs_log_recover.h b/fs/xfs/xfs_log_recover.h
new file mode 100644
index 0000000..42158b4
--- /dev/null
+++ b/fs/xfs/xfs_log_recover.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_LOG_RECOVER_H__
+#define __XFS_LOG_RECOVER_H__
+
+/*
+ * Macros, structures, prototypes for internal log manager use.
+ */
+
+#define XLOG_RHASH_BITS  4
+#define XLOG_RHASH_SIZE	16
+#define XLOG_RHASH_SHIFT 2
+#define XLOG_RHASH(tid)	\
+	((((__uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1))
+
+#define XLOG_MAX_REGIONS_IN_ITEM   (XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK / 2 + 1)
+
+
+/*
+ * item headers are in ri_buf[0].  Additional buffers follow.
+ */
+typedef struct xlog_recover_item {
+	struct xlog_recover_item *ri_next;
+	struct xlog_recover_item *ri_prev;
+	int			 ri_type;
+	int			 ri_cnt;	/* count of regions found */
+	int			 ri_total;	/* total regions */
+	xfs_log_iovec_t		 *ri_buf;	/* ptr to regions buffer */
+} xlog_recover_item_t;
+
+struct xlog_tid;
+typedef struct xlog_recover {
+	struct xlog_recover *r_next;
+	xlog_tid_t	    r_log_tid;		/* log's transaction id */
+	xfs_trans_header_t  r_theader;		/* trans header for partial */
+	int		    r_state;		/* not needed */
+	xfs_lsn_t	    r_lsn;		/* xact lsn */
+	xlog_recover_item_t *r_itemq;		/* q for items */
+} xlog_recover_t;
+
+#define ITEM_TYPE(i)	(*(ushort *)(i)->ri_buf[0].i_addr)
+
+/*
+ * This is the number of entries in the l_buf_cancel_table used during
+ * recovery.
+ */
+#define	XLOG_BC_TABLE_SIZE	64
+
+#define	XLOG_RECOVER_PASS1	1
+#define	XLOG_RECOVER_PASS2	2
+
+#endif	/* __XFS_LOG_RECOVER_H__ */
diff --git a/fs/xfs/xfs_mac.h b/fs/xfs/xfs_mac.h
new file mode 100644
index 0000000..8d59aaf
--- /dev/null
+++ b/fs/xfs/xfs_mac.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2001-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_MAC_H__
+#define __XFS_MAC_H__
+
+/*
+ * Mandatory Access Control
+ *
+ * Layout of a composite MAC label:
+ * ml_list contains the list of categories (MSEN) followed by the list of
+ * divisions (MINT). This is actually a header for the data structure which
+ * will have an ml_list with more than one element.
+ *
+ *      -------------------------------
+ *      | ml_msen_type | ml_mint_type |
+ *      -------------------------------
+ *      | ml_level     | ml_grade     |
+ *      -------------------------------
+ *      | ml_catcount                 |
+ *      -------------------------------
+ *      | ml_divcount                 |
+ *      -------------------------------
+ *      | category 1                  |
+ *      | . . .                       |
+ *      | category N                  | (where N = ml_catcount)
+ *      -------------------------------
+ *      | division 1                  |
+ *      | . . .                       |
+ *      | division M                  | (where M = ml_divcount)
+ *      -------------------------------
+ */
+#define XFS_MAC_MAX_SETS	250
+typedef struct xfs_mac_label {
+	__uint8_t	ml_msen_type;	/* MSEN label type */
+	__uint8_t	ml_mint_type;	/* MINT label type */
+	__uint8_t	ml_level;	/* Hierarchical level */
+	__uint8_t	ml_grade;	/* Hierarchical grade */
+	__uint16_t	ml_catcount;	/* Category count */
+	__uint16_t	ml_divcount;	/* Division count */
+					/* Category set, then Division set */
+	__uint16_t	ml_list[XFS_MAC_MAX_SETS];
+} xfs_mac_label_t;
+
+/* MSEN label type names. Choose an upper case ASCII character.  */
+#define XFS_MSEN_ADMIN_LABEL	'A'	/* Admin: low<admin != tcsec<high */
+#define XFS_MSEN_EQUAL_LABEL	'E'	/* Wildcard - always equal */
+#define XFS_MSEN_HIGH_LABEL	'H'	/* System High - always dominates */
+#define XFS_MSEN_MLD_HIGH_LABEL	'I'	/* System High, multi-level dir */
+#define XFS_MSEN_LOW_LABEL	'L'	/* System Low - always dominated */
+#define XFS_MSEN_MLD_LABEL	'M'	/* TCSEC label on a multi-level dir */
+#define XFS_MSEN_MLD_LOW_LABEL	'N'	/* System Low, multi-level dir */
+#define XFS_MSEN_TCSEC_LABEL	'T'	/* TCSEC label */
+#define XFS_MSEN_UNKNOWN_LABEL	'U'	/* unknown label */
+
+/* MINT label type names. Choose a lower case ASCII character.  */
+#define XFS_MINT_BIBA_LABEL	'b'	/* Dual of a TCSEC label */
+#define XFS_MINT_EQUAL_LABEL	'e'	/* Wildcard - always equal */
+#define XFS_MINT_HIGH_LABEL	'h'	/* High Grade - always dominates */
+#define XFS_MINT_LOW_LABEL	'l'	/* Low Grade - always dominated */
+
+/* On-disk XFS extended attribute names */
+#define SGI_MAC_FILE	"SGI_MAC_FILE"
+#define SGI_MAC_FILE_SIZE	(sizeof(SGI_MAC_FILE)-1)
+
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_FS_POSIX_MAC
+
+/* NOT YET IMPLEMENTED */
+
+#define MACEXEC		00100
+#define MACWRITE	00200
+#define MACREAD		00400
+
+struct xfs_inode;
+extern int  xfs_mac_iaccess(struct xfs_inode *, mode_t, cred_t *);
+
+#define _MAC_XFS_IACCESS(i,m,c) (xfs_mac_iaccess(i,m,c))
+#define _MAC_VACCESS(v,c,m)	(xfs_mac_vaccess(v,c,m))
+#define _MAC_EXISTS		xfs_mac_vhaslabel
+
+#else
+#define _MAC_XFS_IACCESS(i,m,c)	(0)
+#define _MAC_VACCESS(v,c,m)	(0)
+#define _MAC_EXISTS		(NULL)
+#endif
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_MAC_H__ */
diff --git a/fs/xfs/xfs_macros.c b/fs/xfs/xfs_macros.c
new file mode 100644
index 0000000..ce4f46c
--- /dev/null
+++ b/fs/xfs/xfs_macros.c
@@ -0,0 +1,2136 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#define	XFS_MACRO_C
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_ialloc.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_rw.h"
+#include "xfs_log_priv.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
+#include "xfs_dir2_node.h"
+#include "xfs_bit.h"
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_ISNULLDSTARTBLOCK)
+int
+isnulldstartblock(xfs_dfsbno_t x)
+{
+	return ISNULLDSTARTBLOCK(x);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_ISNULLSTARTBLOCK)
+int
+isnullstartblock(xfs_fsblock_t x)
+{
+	return ISNULLSTARTBLOCK(x);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_NULLSTARTBLOCK)
+xfs_fsblock_t
+nullstartblock(int k)
+{
+	return NULLSTARTBLOCK(k);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_STARTBLOCKVAL)
+xfs_filblks_t
+startblockval(xfs_fsblock_t x)
+{
+	return STARTBLOCKVAL(x);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AG_CHECK_DADDR)
+void
+xfs_ag_check_daddr(xfs_mount_t *mp, xfs_daddr_t d, xfs_extlen_t len)
+{
+	XFS_AG_CHECK_DADDR(mp, d, len);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AG_DADDR)
+xfs_daddr_t
+xfs_ag_daddr(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_daddr_t d)
+{
+	return XFS_AG_DADDR(mp, agno, d);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AG_MAXLEVELS)
+int
+xfs_ag_maxlevels(xfs_mount_t *mp)
+{
+	return XFS_AG_MAXLEVELS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGB_TO_DADDR)
+xfs_daddr_t
+xfs_agb_to_daddr(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_agblock_t agbno)
+{
+	return XFS_AGB_TO_DADDR(mp, agno, agbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGB_TO_FSB)
+xfs_fsblock_t
+xfs_agb_to_fsb(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_agblock_t agbno)
+{
+	return XFS_AGB_TO_FSB(mp, agno, agbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGBLOCK_MAX)
+xfs_agblock_t
+xfs_agblock_max(xfs_agblock_t a, xfs_agblock_t b)
+{
+	return XFS_AGBLOCK_MAX(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGBLOCK_MIN)
+xfs_agblock_t
+xfs_agblock_min(xfs_agblock_t a, xfs_agblock_t b)
+{
+	return XFS_AGBLOCK_MIN(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGF_BLOCK)
+xfs_agblock_t
+xfs_agf_block(xfs_mount_t *mp)
+{
+	return XFS_AGF_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGF_GOOD_VERSION)
+int
+xfs_agf_good_version(unsigned v)
+{
+	return XFS_AGF_GOOD_VERSION(v);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGFL_BLOCK)
+xfs_agblock_t
+xfs_agfl_block(xfs_mount_t *mp)
+{
+	return XFS_AGFL_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGI_BLOCK)
+xfs_agblock_t
+xfs_agi_block(xfs_mount_t *mp)
+{
+	return XFS_AGI_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGI_GOOD_VERSION)
+int
+xfs_agi_good_version(unsigned v)
+{
+	return XFS_AGI_GOOD_VERSION(v);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGINO_TO_AGBNO)
+xfs_agblock_t
+xfs_agino_to_agbno(xfs_mount_t *mp, xfs_agino_t i)
+{
+	return XFS_AGINO_TO_AGBNO(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGINO_TO_INO)
+xfs_ino_t
+xfs_agino_to_ino(xfs_mount_t *mp, xfs_agnumber_t a, xfs_agino_t i)
+{
+	return XFS_AGINO_TO_INO(mp, a, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_AGINO_TO_OFFSET)
+int
+xfs_agino_to_offset(xfs_mount_t *mp, xfs_agino_t i)
+{
+	return XFS_AGINO_TO_OFFSET(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_BLOCK_MAXRECS)
+int
+xfs_alloc_block_maxrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_BLOCK_MAXRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_BLOCK_MINRECS)
+int
+xfs_alloc_block_minrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_BLOCK_MINRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_BLOCK_SIZE)
+/*ARGSUSED1*/
+int
+xfs_alloc_block_size(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_BLOCK_SIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_KEY_ADDR)
+/*ARGSUSED3*/
+xfs_alloc_key_t *
+xfs_alloc_key_addr(xfs_alloc_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_KEY_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_PTR_ADDR)
+xfs_alloc_ptr_t *
+xfs_alloc_ptr_addr(xfs_alloc_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_PTR_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ALLOC_REC_ADDR)
+/*ARGSUSED3*/
+xfs_alloc_rec_t *
+xfs_alloc_rec_addr(xfs_alloc_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_ALLOC_REC_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL)
+int
+xfs_attr_leaf_entsize_local(int nlen, int vlen)
+{
+	return XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen, vlen);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX)
+int
+xfs_attr_leaf_entsize_local_max(int bsize)
+{
+	return XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_ENTSIZE_REMOTE)
+int
+xfs_attr_leaf_entsize_remote(int nlen)
+{
+	return XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_NAME)
+char *
+xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
+{
+	return XFS_ATTR_LEAF_NAME(leafp, idx);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_NAME_LOCAL)
+xfs_attr_leaf_name_local_t *
+xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
+{
+	return XFS_ATTR_LEAF_NAME_LOCAL(leafp, idx);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_LEAF_NAME_REMOTE)
+xfs_attr_leaf_name_remote_t *
+xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
+{
+	return XFS_ATTR_LEAF_NAME_REMOTE(leafp, idx);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_SF_ENTSIZE)
+int
+xfs_attr_sf_entsize(xfs_attr_sf_entry_t *sfep)
+{
+	return XFS_ATTR_SF_ENTSIZE(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_SF_ENTSIZE_BYNAME)
+int
+xfs_attr_sf_entsize_byname(int nlen, int vlen)
+{
+	return XFS_ATTR_SF_ENTSIZE_BYNAME(nlen, vlen);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_SF_NEXTENTRY)
+xfs_attr_sf_entry_t *
+xfs_attr_sf_nextentry(xfs_attr_sf_entry_t *sfep)
+{
+	return XFS_ATTR_SF_NEXTENTRY(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ATTR_SF_TOTSIZE)
+int
+xfs_attr_sf_totsize(xfs_inode_t *dp)
+{
+	return XFS_ATTR_SF_TOTSIZE(dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BHVTOI)
+xfs_inode_t *
+xfs_bhvtoi(bhv_desc_t *bhvp)
+{
+	return XFS_BHVTOI(bhvp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BHVTOM)
+xfs_mount_t *
+xfs_bhvtom(bhv_desc_t *bdp)
+{
+	return XFS_BHVTOM(bdp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_VFSTOM)
+xfs_mount_t *
+xfs_vfstom(vfs_t *vfs)
+{
+	return XFS_VFSTOM(vfs);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BM_MAXLEVELS)
+int
+xfs_bm_maxlevels(xfs_mount_t *mp, int w)
+{
+	return XFS_BM_MAXLEVELS(mp, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_DMAXRECS)
+int
+xfs_bmap_block_dmaxrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_DMAXRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_DMINRECS)
+int
+xfs_bmap_block_dminrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_DMINRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_DSIZE)
+int
+xfs_bmap_block_dsize(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_DSIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_IMAXRECS)
+int
+xfs_bmap_block_imaxrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_IMAXRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_IMINRECS)
+int
+xfs_bmap_block_iminrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_IMINRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BLOCK_ISIZE)
+int
+xfs_bmap_block_isize(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_BLOCK_ISIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_KEY_ADDR)
+/*ARGSUSED3*/
+xfs_bmbt_key_t *
+xfs_bmap_broot_key_addr(xfs_bmbt_block_t *bb, int i, int sz)
+{
+	return XFS_BMAP_BROOT_KEY_ADDR(bb, i, sz);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_MAXRECS)
+int
+xfs_bmap_broot_maxrecs(int sz)
+{
+	return XFS_BMAP_BROOT_MAXRECS(sz);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_NUMRECS)
+int
+xfs_bmap_broot_numrecs(xfs_bmdr_block_t *bb)
+{
+	return XFS_BMAP_BROOT_NUMRECS(bb);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_PTR_ADDR)
+xfs_bmbt_ptr_t *
+xfs_bmap_broot_ptr_addr(xfs_bmbt_block_t *bb, int i, int sz)
+{
+	return XFS_BMAP_BROOT_PTR_ADDR(bb, i, sz);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_REC_ADDR)
+/*ARGSUSED3*/
+xfs_bmbt_rec_t *
+xfs_bmap_broot_rec_addr(xfs_bmbt_block_t *bb, int i, int sz)
+{
+	return XFS_BMAP_BROOT_REC_ADDR(bb, i, sz);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_SPACE)
+int
+xfs_bmap_broot_space(xfs_bmdr_block_t *bb)
+{
+	return XFS_BMAP_BROOT_SPACE(bb);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_BROOT_SPACE_CALC)
+int
+xfs_bmap_broot_space_calc(int nrecs)
+{
+	return XFS_BMAP_BROOT_SPACE_CALC(nrecs);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_IBLOCK_SIZE)
+/*ARGSUSED1*/
+int
+xfs_bmap_iblock_size(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_IBLOCK_SIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_INIT)
+void
+xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
+{
+	XFS_BMAP_INIT(flp, fbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_KEY_DADDR)
+/*ARGSUSED3*/
+xfs_bmbt_key_t *
+xfs_bmap_key_daddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_KEY_DADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_KEY_IADDR)
+/*ARGSUSED3*/
+xfs_bmbt_key_t *
+xfs_bmap_key_iaddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_KEY_IADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_PTR_DADDR)
+xfs_bmbt_ptr_t *
+xfs_bmap_ptr_daddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_PTR_DADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_PTR_IADDR)
+xfs_bmbt_ptr_t *
+xfs_bmap_ptr_iaddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_PTR_IADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_RBLOCK_DSIZE)
+/*ARGSUSED1*/
+int
+xfs_bmap_rblock_dsize(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_RBLOCK_DSIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_RBLOCK_ISIZE)
+/*ARGSUSED1*/
+int
+xfs_bmap_rblock_isize(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_RBLOCK_ISIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_REC_DADDR)
+/*ARGSUSED3*/
+xfs_bmbt_rec_t *
+xfs_bmap_rec_daddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_REC_DADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_REC_IADDR)
+/*ARGSUSED3*/
+xfs_bmbt_rec_t *
+xfs_bmap_rec_iaddr(xfs_bmbt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_BMAP_REC_IADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAP_SANITY_CHECK)
+int
+xfs_bmap_sanity_check(xfs_mount_t *mp, xfs_bmbt_block_t *bb, int level)
+{
+	return XFS_BMAP_SANITY_CHECK(mp, bb, level);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMAPI_AFLAG)
+int
+xfs_bmapi_aflag(int w)
+{
+	return XFS_BMAPI_AFLAG(w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BMDR_SPACE_CALC)
+int
+xfs_bmdr_space_calc(int nrecs)
+{
+	return XFS_BMDR_SPACE_CALC(nrecs);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BNO_BLOCK)
+xfs_agblock_t
+xfs_bno_block(xfs_mount_t *mp)
+{
+	return XFS_BNO_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BTREE_LONG_PTRS)
+int
+xfs_btree_long_ptrs(xfs_btnum_t btnum)
+{
+	return XFS_BTREE_LONG_PTRS(btnum);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_AGF)
+xfs_agf_t *
+xfs_buf_to_agf(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_AGF(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_AGFL)
+xfs_agfl_t *
+xfs_buf_to_agfl(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_AGFL(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_AGI)
+xfs_agi_t *
+xfs_buf_to_agi(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_AGI(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_ALLOC_BLOCK)
+xfs_alloc_block_t *
+xfs_buf_to_alloc_block(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_ALLOC_BLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_BLOCK)
+xfs_btree_block_t *
+xfs_buf_to_block(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_BLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_BMBT_BLOCK)
+xfs_bmbt_block_t *
+xfs_buf_to_bmbt_block(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_BMBT_BLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_DINODE)
+xfs_dinode_t *
+xfs_buf_to_dinode(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_DINODE(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_INOBT_BLOCK)
+xfs_inobt_block_t *
+xfs_buf_to_inobt_block(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_INOBT_BLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_LBLOCK)
+xfs_btree_lblock_t *
+xfs_buf_to_lblock(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_LBLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_SBLOCK)
+xfs_btree_sblock_t *
+xfs_buf_to_sblock(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_SBLOCK(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_BUF_TO_SBP)
+xfs_sb_t *
+xfs_buf_to_sbp(xfs_buf_t *bp)
+{
+	return XFS_BUF_TO_SBP(bp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_ASIZE)
+int
+xfs_cfork_asize_disk(xfs_dinode_core_t *dcp, xfs_mount_t *mp)
+{
+	return XFS_CFORK_ASIZE_DISK(dcp, mp);
+}
+int
+xfs_cfork_asize(xfs_dinode_core_t *dcp, xfs_mount_t *mp)
+{
+	return XFS_CFORK_ASIZE(dcp, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_BOFF)
+int
+xfs_cfork_boff_disk(xfs_dinode_core_t *dcp)
+{
+	return XFS_CFORK_BOFF_DISK(dcp);
+}
+int
+xfs_cfork_boff(xfs_dinode_core_t *dcp)
+{
+	return XFS_CFORK_BOFF(dcp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_DSIZE)
+int
+xfs_cfork_dsize_disk(xfs_dinode_core_t *dcp, xfs_mount_t *mp)
+{
+	return XFS_CFORK_DSIZE_DISK(dcp, mp);
+}
+int
+xfs_cfork_dsize(xfs_dinode_core_t *dcp, xfs_mount_t *mp)
+{
+	return XFS_CFORK_DSIZE(dcp, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_FMT_SET)
+void
+xfs_cfork_fmt_set(xfs_dinode_core_t *dcp, int w, int n)
+{
+	XFS_CFORK_FMT_SET(dcp, w, n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_FORMAT)
+int
+xfs_cfork_format(xfs_dinode_core_t *dcp, int w)
+{
+	return XFS_CFORK_FORMAT(dcp, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_NEXT_SET)
+void
+xfs_cfork_next_set(xfs_dinode_core_t *dcp, int w, int n)
+{
+	XFS_CFORK_NEXT_SET(dcp, w, n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_NEXTENTS)
+int
+xfs_cfork_nextents_disk(xfs_dinode_core_t *dcp, int w)
+{
+	return XFS_CFORK_NEXTENTS_DISK(dcp, w);
+}
+int
+xfs_cfork_nextents(xfs_dinode_core_t *dcp, int w)
+{
+	return XFS_CFORK_NEXTENTS(dcp, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_Q)
+int
+xfs_cfork_q_disk(xfs_dinode_core_t *dcp)
+{
+	return XFS_CFORK_Q_DISK(dcp);
+}
+int
+xfs_cfork_q(xfs_dinode_core_t *dcp)
+{
+	return XFS_CFORK_Q(dcp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CFORK_SIZE)
+int
+xfs_cfork_size_disk(xfs_dinode_core_t *dcp, xfs_mount_t *mp, int w)
+{
+	return XFS_CFORK_SIZE_DISK(dcp, mp, w);
+}
+int
+xfs_cfork_size(xfs_dinode_core_t *dcp, xfs_mount_t *mp, int w)
+{
+	return XFS_CFORK_SIZE(dcp, mp, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_CNT_BLOCK)
+xfs_agblock_t
+xfs_cnt_block(xfs_mount_t *mp)
+{
+	return XFS_CNT_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DA_COOKIE_BNO)
+xfs_dablk_t
+xfs_da_cookie_bno(xfs_mount_t *mp, xfs_off_t cookie)
+{
+	return XFS_DA_COOKIE_BNO(mp, cookie);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DA_COOKIE_ENTRY)
+int
+xfs_da_cookie_entry(xfs_mount_t *mp, xfs_off_t cookie)
+{
+	return XFS_DA_COOKIE_ENTRY(mp, cookie);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DA_COOKIE_HASH)
+/*ARGSUSED1*/
+xfs_dahash_t
+xfs_da_cookie_hash(xfs_mount_t *mp, xfs_off_t cookie)
+{
+	return XFS_DA_COOKIE_HASH(mp, cookie);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DA_MAKE_BNOENTRY)
+__uint32_t
+xfs_da_make_bnoentry(xfs_mount_t *mp, xfs_dablk_t bno, int entry)
+{
+	return XFS_DA_MAKE_BNOENTRY(mp, bno, entry);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DA_MAKE_COOKIE)
+xfs_off_t
+xfs_da_make_cookie(xfs_mount_t *mp, xfs_dablk_t bno, int entry,
+		   xfs_dahash_t hash)
+{
+	return XFS_DA_MAKE_COOKIE(mp, bno, entry, hash);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DADDR_TO_AGBNO)
+xfs_agblock_t
+xfs_daddr_to_agbno(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	return XFS_DADDR_TO_AGBNO(mp, d);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DADDR_TO_AGNO)
+xfs_agnumber_t
+xfs_daddr_to_agno(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	return XFS_DADDR_TO_AGNO(mp, d);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DADDR_TO_FSB)
+xfs_fsblock_t
+xfs_daddr_to_fsb(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	return XFS_DADDR_TO_FSB(mp, d);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_APTR)
+char *
+xfs_dfork_aptr(xfs_dinode_t *dip)
+{
+	return XFS_DFORK_APTR(dip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_ASIZE)
+int
+xfs_dfork_asize(xfs_dinode_t *dip, xfs_mount_t *mp)
+{
+	return XFS_DFORK_ASIZE(dip, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_BOFF)
+int
+xfs_dfork_boff(xfs_dinode_t *dip)
+{
+	return XFS_DFORK_BOFF(dip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_DPTR)
+char *
+xfs_dfork_dptr(xfs_dinode_t *dip)
+{
+	return XFS_DFORK_DPTR(dip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_DSIZE)
+int
+xfs_dfork_dsize(xfs_dinode_t *dip, xfs_mount_t *mp)
+{
+	return XFS_DFORK_DSIZE(dip, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_NEXTENTS)
+int
+xfs_dfork_nextents(xfs_dinode_t *dip, int w)
+{
+	return XFS_DFORK_NEXTENTS(dip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_PTR)
+char *
+xfs_dfork_ptr(xfs_dinode_t *dip, int w)
+{
+	return XFS_DFORK_PTR(dip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_Q)
+int
+xfs_dfork_q(xfs_dinode_t *dip)
+{
+	return XFS_DFORK_Q(dip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DFORK_SIZE)
+int
+xfs_dfork_size(xfs_dinode_t *dip, xfs_mount_t *mp, int w)
+{
+	return XFS_DFORK_SIZE(dip, mp, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DINODE_GOOD_VERSION)
+int
+xfs_dinode_good_version(int v)
+{
+	return XFS_DINODE_GOOD_VERSION(v);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYENTRY)
+int
+xfs_dir_leaf_entsize_byentry(xfs_dir_leaf_entry_t *entry)
+{
+	return XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYNAME)
+int
+xfs_dir_leaf_entsize_byname(int len)
+{
+	return XFS_DIR_LEAF_ENTSIZE_BYNAME(len);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_LEAF_NAMESTRUCT)
+xfs_dir_leaf_name_t *
+xfs_dir_leaf_namestruct(xfs_dir_leafblock_t *leafp, int offset)
+{
+	return XFS_DIR_LEAF_NAMESTRUCT(leafp, offset);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_ALLFIT)
+int
+xfs_dir_sf_allfit(int count, int totallen)
+{
+	return XFS_DIR_SF_ALLFIT(count, totallen);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_ENTSIZE_BYENTRY)
+int
+xfs_dir_sf_entsize_byentry(xfs_dir_sf_entry_t *sfep)
+{
+	return XFS_DIR_SF_ENTSIZE_BYENTRY(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_ENTSIZE_BYNAME)
+int
+xfs_dir_sf_entsize_byname(int len)
+{
+	return XFS_DIR_SF_ENTSIZE_BYNAME(len);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_GET_DIRINO)
+void
+xfs_dir_sf_get_dirino(xfs_dir_ino_t *from, xfs_ino_t *to)
+{
+	XFS_DIR_SF_GET_DIRINO(from, to);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_NEXTENTRY)
+xfs_dir_sf_entry_t *
+xfs_dir_sf_nextentry(xfs_dir_sf_entry_t *sfep)
+{
+	return XFS_DIR_SF_NEXTENTRY(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR_SF_PUT_DIRINO)
+void
+xfs_dir_sf_put_dirino(xfs_ino_t *from, xfs_dir_ino_t *to)
+{
+	XFS_DIR_SF_PUT_DIRINO(from, to);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BLOCK_LEAF_P)
+xfs_dir2_leaf_entry_t *
+xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
+{
+	return XFS_DIR2_BLOCK_LEAF_P(btp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BLOCK_TAIL_P)
+xfs_dir2_block_tail_t *
+xfs_dir2_block_tail_p(xfs_mount_t *mp, xfs_dir2_block_t *block)
+{
+	return XFS_DIR2_BLOCK_TAIL_P(mp, block);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BYTE_TO_DA)
+xfs_dablk_t
+xfs_dir2_byte_to_da(xfs_mount_t *mp, xfs_dir2_off_t by)
+{
+	return XFS_DIR2_BYTE_TO_DA(mp, by);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BYTE_TO_DATAPTR)
+/* ARGSUSED */
+xfs_dir2_dataptr_t
+xfs_dir2_byte_to_dataptr(xfs_mount_t *mp, xfs_dir2_off_t by)
+{
+	return XFS_DIR2_BYTE_TO_DATAPTR(mp, by);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BYTE_TO_DB)
+xfs_dir2_db_t
+xfs_dir2_byte_to_db(xfs_mount_t *mp, xfs_dir2_off_t by)
+{
+	return XFS_DIR2_BYTE_TO_DB(mp, by);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_BYTE_TO_OFF)
+xfs_dir2_data_aoff_t
+xfs_dir2_byte_to_off(xfs_mount_t *mp, xfs_dir2_off_t by)
+{
+	return XFS_DIR2_BYTE_TO_OFF(mp, by);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DA_TO_BYTE)
+xfs_dir2_off_t
+xfs_dir2_da_to_byte(xfs_mount_t *mp, xfs_dablk_t da)
+{
+	return XFS_DIR2_DA_TO_BYTE(mp, da);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DA_TO_DB)
+xfs_dir2_db_t
+xfs_dir2_da_to_db(xfs_mount_t *mp, xfs_dablk_t da)
+{
+	return XFS_DIR2_DA_TO_DB(mp, da);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATA_ENTRY_TAG_P)
+xfs_dir2_data_off_t *
+xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
+{
+	return XFS_DIR2_DATA_ENTRY_TAG_P(dep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATA_ENTSIZE)
+int
+xfs_dir2_data_entsize(int n)
+{
+	return XFS_DIR2_DATA_ENTSIZE(n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATA_UNUSED_TAG_P)
+xfs_dir2_data_off_t *
+xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
+{
+	return XFS_DIR2_DATA_UNUSED_TAG_P(dup);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATAPTR_TO_BYTE)
+/* ARGSUSED */
+xfs_dir2_off_t
+xfs_dir2_dataptr_to_byte(xfs_mount_t *mp, xfs_dir2_dataptr_t dp)
+{
+	return XFS_DIR2_DATAPTR_TO_BYTE(mp, dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATAPTR_TO_DB)
+xfs_dir2_db_t
+xfs_dir2_dataptr_to_db(xfs_mount_t *mp, xfs_dir2_dataptr_t dp)
+{
+	return XFS_DIR2_DATAPTR_TO_DB(mp, dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DATAPTR_TO_OFF)
+xfs_dir2_data_aoff_t
+xfs_dir2_dataptr_to_off(xfs_mount_t *mp, xfs_dir2_dataptr_t dp)
+{
+	return XFS_DIR2_DATAPTR_TO_OFF(mp, dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DB_OFF_TO_BYTE)
+xfs_dir2_off_t
+xfs_dir2_db_off_to_byte(xfs_mount_t *mp, xfs_dir2_db_t db,
+			xfs_dir2_data_aoff_t o)
+{
+	return XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DB_OFF_TO_DATAPTR)
+xfs_dir2_dataptr_t
+xfs_dir2_db_off_to_dataptr(xfs_mount_t *mp, xfs_dir2_db_t db,
+			   xfs_dir2_data_aoff_t o)
+{
+	return XFS_DIR2_DB_OFF_TO_DATAPTR(mp, db, o);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DB_TO_DA)
+xfs_dablk_t
+xfs_dir2_db_to_da(xfs_mount_t *mp, xfs_dir2_db_t db)
+{
+	return XFS_DIR2_DB_TO_DA(mp, db);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DB_TO_FDB)
+xfs_dir2_db_t
+xfs_dir2_db_to_fdb(xfs_mount_t *mp, xfs_dir2_db_t db)
+{
+	return XFS_DIR2_DB_TO_FDB(mp, db);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_DB_TO_FDINDEX)
+int
+xfs_dir2_db_to_fdindex(xfs_mount_t *mp, xfs_dir2_db_t db)
+{
+	return XFS_DIR2_DB_TO_FDINDEX(mp, db);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_LEAF_BESTS_P)
+xfs_dir2_data_off_t *
+xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
+{
+	return XFS_DIR2_LEAF_BESTS_P(ltp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_LEAF_TAIL_P)
+xfs_dir2_leaf_tail_t *
+xfs_dir2_leaf_tail_p(xfs_mount_t *mp, xfs_dir2_leaf_t *lp)
+{
+	return XFS_DIR2_LEAF_TAIL_P(mp, lp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_MAX_LEAF_ENTS)
+int
+xfs_dir2_max_leaf_ents(xfs_mount_t *mp)
+{
+	return XFS_DIR2_MAX_LEAF_ENTS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_ENTSIZE_BYENTRY)
+int
+xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
+{
+	return XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp, sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_FIRSTENTRY)
+xfs_dir2_sf_entry_t *
+xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp)
+{
+	return XFS_DIR2_SF_FIRSTENTRY(sfp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_ENTSIZE_BYNAME)
+int
+xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len)
+{
+	return XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, len);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_GET_INUMBER)
+xfs_intino_t
+xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from)
+{
+	return XFS_DIR2_SF_GET_INUMBER(sfp, from);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_GET_OFFSET)
+xfs_dir2_data_aoff_t
+xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
+{
+	return XFS_DIR2_SF_GET_OFFSET(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_HDR_SIZE)
+int
+xfs_dir2_sf_hdr_size(int i8count)
+{
+	return XFS_DIR2_SF_HDR_SIZE(i8count);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_INUMBERP)
+xfs_dir2_inou_t *
+xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep)
+{
+	return XFS_DIR2_SF_INUMBERP(sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_NEXTENTRY)
+xfs_dir2_sf_entry_t *
+xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
+{
+	return XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_PUT_INUMBER)
+void
+xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from, xfs_dir2_inou_t *to)
+{
+	XFS_DIR2_SF_PUT_INUMBER(sfp, from, to);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_DIR2_SF_PUT_OFFSET)
+void
+xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
+{
+	XFS_DIR2_SF_PUT_OFFSET(sfep, off);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_EXTFMT_INODE )
+xfs_exntfmt_t
+xfs_extfmt_inode(struct xfs_inode *ip)
+{
+	return XFS_EXTFMT_INODE(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_EXTLEN_MAX)
+xfs_extlen_t
+xfs_extlen_max(xfs_extlen_t a, xfs_extlen_t b)
+{
+	return XFS_EXTLEN_MAX(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_EXTLEN_MIN)
+xfs_extlen_t
+xfs_extlen_min(xfs_extlen_t a, xfs_extlen_t b)
+{
+	return XFS_EXTLEN_MIN(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FILBLKS_MAX)
+xfs_filblks_t
+xfs_filblks_max(xfs_filblks_t a, xfs_filblks_t b)
+{
+	return XFS_FILBLKS_MAX(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FILBLKS_MIN)
+xfs_filblks_t
+xfs_filblks_min(xfs_filblks_t a, xfs_filblks_t b)
+{
+	return XFS_FILBLKS_MIN(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FILEOFF_MAX)
+xfs_fileoff_t
+xfs_fileoff_max(xfs_fileoff_t a, xfs_fileoff_t b)
+{
+	return XFS_FILEOFF_MAX(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FILEOFF_MIN)
+xfs_fileoff_t
+xfs_fileoff_min(xfs_fileoff_t a, xfs_fileoff_t b)
+{
+	return XFS_FILEOFF_MIN(a, b);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FSB_SANITY_CHECK)
+int
+xfs_fsb_sanity_check(xfs_mount_t *mp, xfs_fsblock_t fsbno)
+{
+	return XFS_FSB_SANITY_CHECK(mp, fsbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FSB_TO_AGBNO)
+xfs_agblock_t
+xfs_fsb_to_agbno(xfs_mount_t *mp, xfs_fsblock_t fsbno)
+{
+	return XFS_FSB_TO_AGBNO(mp, fsbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FSB_TO_AGNO)
+xfs_agnumber_t
+xfs_fsb_to_agno(xfs_mount_t *mp, xfs_fsblock_t fsbno)
+{
+	return XFS_FSB_TO_AGNO(mp, fsbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FSB_TO_DADDR)
+xfs_daddr_t
+xfs_fsb_to_daddr(xfs_mount_t *mp, xfs_fsblock_t fsbno)
+{
+	return XFS_FSB_TO_DADDR(mp, fsbno);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_FSB_TO_DB)
+xfs_daddr_t
+xfs_fsb_to_db(xfs_inode_t *ip, xfs_fsblock_t fsb)
+{
+	return XFS_FSB_TO_DB(ip, fsb);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_HDR_BLOCK)
+xfs_agblock_t
+xfs_hdr_block(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	return XFS_HDR_BLOCK(mp, d);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IALLOC_BLOCKS)
+xfs_extlen_t
+xfs_ialloc_blocks(xfs_mount_t *mp)
+{
+	return XFS_IALLOC_BLOCKS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IALLOC_FIND_FREE)
+int
+xfs_ialloc_find_free(xfs_inofree_t *fp)
+{
+	return XFS_IALLOC_FIND_FREE(fp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IALLOC_INODES)
+int
+xfs_ialloc_inodes(xfs_mount_t *mp)
+{
+	return XFS_IALLOC_INODES(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IBT_BLOCK)
+xfs_agblock_t
+xfs_ibt_block(xfs_mount_t *mp)
+{
+	return XFS_IBT_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_ASIZE)
+int
+xfs_ifork_asize(xfs_inode_t *ip)
+{
+	return XFS_IFORK_ASIZE(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_DSIZE)
+int
+xfs_ifork_dsize(xfs_inode_t *ip)
+{
+	return XFS_IFORK_DSIZE(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_FMT_SET)
+void
+xfs_ifork_fmt_set(xfs_inode_t *ip, int w, int n)
+{
+	XFS_IFORK_FMT_SET(ip, w, n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_FORMAT)
+int
+xfs_ifork_format(xfs_inode_t *ip, int w)
+{
+	return XFS_IFORK_FORMAT(ip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_NEXT_SET)
+void
+xfs_ifork_next_set(xfs_inode_t *ip, int w, int n)
+{
+	XFS_IFORK_NEXT_SET(ip, w, n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_NEXTENTS)
+int
+xfs_ifork_nextents(xfs_inode_t *ip, int w)
+{
+	return XFS_IFORK_NEXTENTS(ip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_PTR)
+xfs_ifork_t *
+xfs_ifork_ptr(xfs_inode_t *ip, int w)
+{
+	return XFS_IFORK_PTR(ip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_Q)
+int
+xfs_ifork_q(xfs_inode_t *ip)
+{
+	return XFS_IFORK_Q(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IFORK_SIZE)
+int
+xfs_ifork_size(xfs_inode_t *ip, int w)
+{
+	return XFS_IFORK_SIZE(ip, w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ILOG_FBROOT)
+int
+xfs_ilog_fbroot(int w)
+{
+	return XFS_ILOG_FBROOT(w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ILOG_FDATA)
+int
+xfs_ilog_fdata(int w)
+{
+	return XFS_ILOG_FDATA(w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ILOG_FEXT)
+int
+xfs_ilog_fext(int w)
+{
+	return XFS_ILOG_FEXT(w);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_IN_MAXLEVELS)
+int
+xfs_in_maxlevels(xfs_mount_t *mp)
+{
+	return XFS_IN_MAXLEVELS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_AGBNO_BITS)
+int
+xfs_ino_agbno_bits(xfs_mount_t *mp)
+{
+	return XFS_INO_AGBNO_BITS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_AGINO_BITS)
+int
+xfs_ino_agino_bits(xfs_mount_t *mp)
+{
+	return XFS_INO_AGINO_BITS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_AGNO_BITS)
+int
+xfs_ino_agno_bits(xfs_mount_t *mp)
+{
+	return XFS_INO_AGNO_BITS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_BITS)
+int
+xfs_ino_bits(xfs_mount_t *mp)
+{
+	return XFS_INO_BITS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_MASK)
+__uint32_t
+xfs_ino_mask(int k)
+{
+	return XFS_INO_MASK(k);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_OFFSET_BITS)
+int
+xfs_ino_offset_bits(xfs_mount_t *mp)
+{
+	return XFS_INO_OFFSET_BITS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_TO_AGBNO)
+xfs_agblock_t
+xfs_ino_to_agbno(xfs_mount_t *mp, xfs_ino_t i)
+{
+	return XFS_INO_TO_AGBNO(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_TO_AGINO)
+xfs_agino_t
+xfs_ino_to_agino(xfs_mount_t *mp, xfs_ino_t i)
+{
+	return XFS_INO_TO_AGINO(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_TO_AGNO)
+xfs_agnumber_t
+xfs_ino_to_agno(xfs_mount_t *mp, xfs_ino_t i)
+{
+	return XFS_INO_TO_AGNO(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_TO_FSB)
+xfs_fsblock_t
+xfs_ino_to_fsb(xfs_mount_t *mp, xfs_ino_t i)
+{
+	return XFS_INO_TO_FSB(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INO_TO_OFFSET)
+int
+xfs_ino_to_offset(xfs_mount_t *mp, xfs_ino_t i)
+{
+	return XFS_INO_TO_OFFSET(mp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_BLOCK_MAXRECS)
+int
+xfs_inobt_block_maxrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_BLOCK_MAXRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_BLOCK_MINRECS)
+int
+xfs_inobt_block_minrecs(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_BLOCK_MINRECS(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_BLOCK_SIZE)
+/*ARGSUSED1*/
+int
+xfs_inobt_block_size(int lev, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_BLOCK_SIZE(lev, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_CLR_FREE)
+void
+xfs_inobt_clr_free(xfs_inobt_rec_t *rp, int i)
+{
+	XFS_INOBT_CLR_FREE(rp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_IS_FREE)
+int
+xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i)
+{
+	return XFS_INOBT_IS_FREE(rp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_IS_LAST_REC)
+int
+xfs_inobt_is_last_rec(xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_IS_LAST_REC(cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_KEY_ADDR)
+/*ARGSUSED3*/
+xfs_inobt_key_t *
+xfs_inobt_key_addr(xfs_inobt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_KEY_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_MASK)
+xfs_inofree_t
+xfs_inobt_mask(int i)
+{
+	return XFS_INOBT_MASK(i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_MASKN)
+xfs_inofree_t
+xfs_inobt_maskn(int i, int n)
+{
+	return XFS_INOBT_MASKN(i, n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_PTR_ADDR)
+xfs_inobt_ptr_t *
+xfs_inobt_ptr_addr(xfs_inobt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_PTR_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_REC_ADDR)
+/*ARGSUSED3*/
+xfs_inobt_rec_t *
+xfs_inobt_rec_addr(xfs_inobt_block_t *bb, int i, xfs_btree_cur_t *cur)
+{
+	return XFS_INOBT_REC_ADDR(bb, i, cur);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_SET_FREE)
+void
+xfs_inobt_set_free(xfs_inobt_rec_t *rp, int i)
+{
+	XFS_INOBT_SET_FREE(rp, i);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ITOBHV)
+bhv_desc_t *
+xfs_itobhv(xfs_inode_t *ip)
+{
+	return XFS_ITOBHV(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_ITOV)
+vnode_t *
+xfs_itov(xfs_inode_t *ip)
+{
+	return XFS_ITOV(ip);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LBLOG)
+int
+xfs_lblog(xfs_mount_t *mp)
+{
+	return XFS_LBLOG(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LBSIZE)
+int
+xfs_lbsize(xfs_mount_t *mp)
+{
+	return XFS_LBSIZE(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_ALL_FREE)
+void
+xfs_lic_all_free(xfs_log_item_chunk_t *cp)
+{
+	XFS_LIC_ALL_FREE(cp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_ARE_ALL_FREE)
+int
+xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
+{
+	return XFS_LIC_ARE_ALL_FREE(cp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_CLAIM)
+void
+xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
+{
+	XFS_LIC_CLAIM(cp, slot);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_DESC_TO_CHUNK)
+xfs_log_item_chunk_t *
+xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
+{
+	return XFS_LIC_DESC_TO_CHUNK(dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_DESC_TO_SLOT)
+int
+xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
+{
+	return XFS_LIC_DESC_TO_SLOT(dp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_INIT)
+void
+xfs_lic_init(xfs_log_item_chunk_t *cp)
+{
+	XFS_LIC_INIT(cp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_INIT_SLOT)
+void
+xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
+{
+	XFS_LIC_INIT_SLOT(cp, slot);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_ISFREE)
+int
+xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
+{
+	return XFS_LIC_ISFREE(cp, slot);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_RELSE)
+void
+xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
+{
+	XFS_LIC_RELSE(cp, slot);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_SLOT)
+xfs_log_item_desc_t *
+xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
+{
+	return XFS_LIC_SLOT(cp, slot);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LIC_VACANCY)
+int
+xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
+{
+	return XFS_LIC_VACANCY(cp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_LITINO)
+int
+xfs_litino(xfs_mount_t *mp)
+{
+	return XFS_LITINO(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MAKE_IPTR)
+xfs_dinode_t *
+xfs_make_iptr(xfs_mount_t *mp, xfs_buf_t *b, int o)
+{
+	return XFS_MAKE_IPTR(mp, b, o);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MASK32HI)
+__uint32_t
+xfs_mask32hi(int n)
+{
+	return XFS_MASK32HI(n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MASK32LO)
+__uint32_t
+xfs_mask32lo(int n)
+{
+	return XFS_MASK32LO(n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MASK64HI)
+__uint64_t
+xfs_mask64hi(int n)
+{
+	return XFS_MASK64HI(n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MASK64LO)
+__uint64_t
+xfs_mask64lo(int n)
+{
+	return XFS_MASK64LO(n);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MIN_FREELIST)
+int
+xfs_min_freelist(xfs_agf_t *a, xfs_mount_t *mp)
+{
+	return XFS_MIN_FREELIST(a, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MIN_FREELIST_PAG)
+int
+xfs_min_freelist_pag(xfs_perag_t *pag, xfs_mount_t *mp)
+{
+	return XFS_MIN_FREELIST_PAG(pag, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MIN_FREELIST_RAW)
+int
+xfs_min_freelist_raw(uint bl, uint cl, xfs_mount_t *mp)
+{
+	return XFS_MIN_FREELIST_RAW(bl, cl, mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_MTOVFS)
+vfs_t *
+xfs_mtovfs(xfs_mount_t *mp)
+{
+	return XFS_MTOVFS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_OFFBNO_TO_AGINO)
+xfs_agino_t
+xfs_offbno_to_agino(xfs_mount_t *mp, xfs_agblock_t b, int o)
+{
+	return XFS_OFFBNO_TO_AGINO(mp, b, o);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_PREALLOC_BLOCKS)
+xfs_agblock_t
+xfs_prealloc_blocks(xfs_mount_t *mp)
+{
+	return XFS_PREALLOC_BLOCKS(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_BLOCK)
+xfs_agblock_t
+xfs_sb_block(xfs_mount_t *mp)
+{
+	return XFS_SB_BLOCK(mp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_GOOD_VERSION)
+int
+xfs_sb_good_version(xfs_sb_t *sbp)
+{
+	return XFS_SB_GOOD_VERSION(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_ADDATTR)
+void
+xfs_sb_version_addattr(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_ADDATTR(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_ADDDALIGN)
+void
+xfs_sb_version_adddalign(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_ADDDALIGN(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_ADDNLINK)
+void
+xfs_sb_version_addnlink(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_ADDNLINK(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_ADDQUOTA)
+void
+xfs_sb_version_addquota(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_ADDQUOTA(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_ADDSHARED)
+void
+xfs_sb_version_addshared(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_ADDSHARED(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASALIGN)
+int
+xfs_sb_version_hasalign(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASALIGN(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASATTR)
+int
+xfs_sb_version_hasattr(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASATTR(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASDALIGN)
+int
+xfs_sb_version_hasdalign(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASDALIGN(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASDIRV2)
+int
+xfs_sb_version_hasdirv2(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASDIRV2(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASEXTFLGBIT)
+int
+xfs_sb_version_hasextflgbit(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASEXTFLGBIT(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASNLINK)
+int
+xfs_sb_version_hasnlink(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASNLINK(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASQUOTA)
+int
+xfs_sb_version_hasquota(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASQUOTA(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASSHARED)
+int
+xfs_sb_version_hasshared(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASSHARED(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_NUM)
+int
+xfs_sb_version_num(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_NUM(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_SUBALIGN)
+void
+xfs_sb_version_subalign(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_SUBALIGN(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_SUBSHARED)
+void
+xfs_sb_version_subshared(xfs_sb_t *sbp)
+{
+	XFS_SB_VERSION_SUBSHARED(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASLOGV2)
+int
+xfs_sb_version_haslogv2(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASLOGV2(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASSECTOR)
+int
+xfs_sb_version_hassector(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASSECTOR(sbp);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_TONEW)
+unsigned
+xfs_sb_version_tonew(unsigned v)
+{
+	return XFS_SB_VERSION_TONEW(v);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_TOOLD)
+unsigned
+xfs_sb_version_toold(unsigned v)
+{
+	return XFS_SB_VERSION_TOOLD(v);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XLOG_GRANT_ADD_SPACE)
+void
+xlog_grant_add_space(xlog_t *log, int bytes, int type)
+{
+	XLOG_GRANT_ADD_SPACE(log, bytes, type);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XLOG_GRANT_SUB_SPACE)
+void
+xlog_grant_sub_space(xlog_t *log, int bytes, int type)
+{
+	XLOG_GRANT_SUB_SPACE(log, bytes, type);
+}
+#endif
+
+#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_SB_VERSION_HASMOREBITS)
+int
+xfs_sb_version_hasmorebits(xfs_sb_t *sbp)
+{
+	return XFS_SB_VERSION_HASMOREBITS(sbp);
+}
+#endif
+
diff --git a/fs/xfs/xfs_macros.h b/fs/xfs/xfs_macros.h
new file mode 100644
index 0000000..0a93075
--- /dev/null
+++ b/fs/xfs/xfs_macros.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_MACROS_H__
+#define	__XFS_MACROS_H__
+
+/*
+ * Set for debug kernels and simulation
+ * These replacements save space.
+ * Used in xfs_macros.c.
+ */
+#define	XFS_WANT_SPACE_C	\
+	(!defined(_STANDALONE) && defined(DEBUG))
+
+/*
+ * Set for debug simulation and kernel builds, but not for standalone.
+ * These replacements do not save space.
+ * Used in xfs_macros.c.
+ */
+#define	XFS_WANT_FUNCS_C	\
+	(!defined(_STANDALONE) && defined(DEBUG))
+
+/*
+ * Corresponding names used in .h files.
+ */
+#define	XFS_WANT_SPACE	(XFS_WANT_SPACE_C && !defined(XFS_MACRO_C))
+#define	XFS_WANT_FUNCS	(XFS_WANT_FUNCS_C && !defined(XFS_MACRO_C))
+
+/*
+ * These are the macros that get turned into functions to save space.
+ */
+#define	XFSSO_NULLSTARTBLOCK 1
+#define	XFSSO_XFS_AGB_TO_DADDR 1
+#define XFSSO_XFS_AGB_TO_FSB 1
+#define	XFSSO_XFS_AGINO_TO_INO 1
+#define	XFSSO_XFS_ALLOC_BLOCK_MINRECS 1
+#define	XFSSO_XFS_ATTR_SF_NEXTENTRY 1
+#define	XFSSO_XFS_BMAP_BLOCK_DMAXRECS 1
+#define	XFSSO_XFS_BMAP_BLOCK_IMAXRECS 1
+#define	XFSSO_XFS_BMAP_BLOCK_IMINRECS 1
+#define	XFSSO_XFS_BMAP_INIT 1
+#define	XFSSO_XFS_BMAP_PTR_IADDR 1
+#define	XFSSO_XFS_BMAP_SANITY_CHECK 1
+#define	XFSSO_XFS_BMAPI_AFLAG 1
+#define	XFSSO_XFS_CFORK_SIZE 1
+#define	XFSSO_XFS_DA_COOKIE_BNO 1
+#define	XFSSO_XFS_DA_COOKIE_ENTRY 1
+#define	XFSSO_XFS_DADDR_TO_AGBNO 1
+#define	XFSSO_XFS_DADDR_TO_FSB 1
+#define	XFSSO_XFS_DFORK_PTR 1
+#define	XFSSO_XFS_DIR_SF_GET_DIRINO 1
+#define	XFSSO_XFS_DIR_SF_NEXTENTRY 1
+#define	XFSSO_XFS_DIR_SF_PUT_DIRINO 1
+#define	XFSSO_XFS_FILBLKS_MIN 1
+#define	XFSSO_XFS_FSB_SANITY_CHECK 1
+#define	XFSSO_XFS_FSB_TO_DADDR 1
+#define	XFSSO_XFS_FSB_TO_DB 1
+#define	XFSSO_XFS_IALLOC_INODES 1
+#define	XFSSO_XFS_IFORK_ASIZE 1
+#define	XFSSO_XFS_IFORK_DSIZE 1
+#define	XFSSO_XFS_IFORK_FORMAT 1
+#define	XFSSO_XFS_IFORK_NEXT_SET 1
+#define	XFSSO_XFS_IFORK_NEXTENTS 1
+#define	XFSSO_XFS_IFORK_PTR 1
+#define	XFSSO_XFS_ILOG_FBROOT 1
+#define	XFSSO_XFS_ILOG_FEXT 1
+#define	XFSSO_XFS_INO_MASK 1
+#define	XFSSO_XFS_INO_TO_FSB 1
+#define	XFSSO_XFS_INODE_CLEAR_READ_AHEAD 1
+#define	XFSSO_XFS_MIN_FREELIST 1
+#define XFSSO_XFS_SB_GOOD_VERSION 1
+#define XFSSO_XFS_SB_VERSION_HASNLINK 1
+#define	XFSSO_XLOG_GRANT_ADD_SPACE 1
+#define	XFSSO_XLOG_GRANT_SUB_SPACE 1
+
+#endif	/* __XFS_MACROS_H__ */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
new file mode 100644
index 0000000..b57423c
--- /dev/null
+++ b/fs/xfs/xfs_mount.c
@@ -0,0 +1,1586 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_rtalloc.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+#include "xfs_rw.h"
+#include "xfs_quota.h"
+#include "xfs_fsops.h"
+
+STATIC void	xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
+STATIC int	xfs_uuid_mount(xfs_mount_t *);
+STATIC void	xfs_uuid_unmount(xfs_mount_t *mp);
+
+static struct {
+    short offset;
+    short type;     /* 0 = integer
+		* 1 = binary / string (no translation)
+		*/
+} xfs_sb_info[] = {
+    { offsetof(xfs_sb_t, sb_magicnum),   0 },
+    { offsetof(xfs_sb_t, sb_blocksize),  0 },
+    { offsetof(xfs_sb_t, sb_dblocks),    0 },
+    { offsetof(xfs_sb_t, sb_rblocks),    0 },
+    { offsetof(xfs_sb_t, sb_rextents),   0 },
+    { offsetof(xfs_sb_t, sb_uuid),       1 },
+    { offsetof(xfs_sb_t, sb_logstart),   0 },
+    { offsetof(xfs_sb_t, sb_rootino),    0 },
+    { offsetof(xfs_sb_t, sb_rbmino),     0 },
+    { offsetof(xfs_sb_t, sb_rsumino),    0 },
+    { offsetof(xfs_sb_t, sb_rextsize),   0 },
+    { offsetof(xfs_sb_t, sb_agblocks),   0 },
+    { offsetof(xfs_sb_t, sb_agcount),    0 },
+    { offsetof(xfs_sb_t, sb_rbmblocks),  0 },
+    { offsetof(xfs_sb_t, sb_logblocks),  0 },
+    { offsetof(xfs_sb_t, sb_versionnum), 0 },
+    { offsetof(xfs_sb_t, sb_sectsize),   0 },
+    { offsetof(xfs_sb_t, sb_inodesize),  0 },
+    { offsetof(xfs_sb_t, sb_inopblock),  0 },
+    { offsetof(xfs_sb_t, sb_fname[0]),   1 },
+    { offsetof(xfs_sb_t, sb_blocklog),   0 },
+    { offsetof(xfs_sb_t, sb_sectlog),    0 },
+    { offsetof(xfs_sb_t, sb_inodelog),   0 },
+    { offsetof(xfs_sb_t, sb_inopblog),   0 },
+    { offsetof(xfs_sb_t, sb_agblklog),   0 },
+    { offsetof(xfs_sb_t, sb_rextslog),   0 },
+    { offsetof(xfs_sb_t, sb_inprogress), 0 },
+    { offsetof(xfs_sb_t, sb_imax_pct),   0 },
+    { offsetof(xfs_sb_t, sb_icount),     0 },
+    { offsetof(xfs_sb_t, sb_ifree),      0 },
+    { offsetof(xfs_sb_t, sb_fdblocks),   0 },
+    { offsetof(xfs_sb_t, sb_frextents),  0 },
+    { offsetof(xfs_sb_t, sb_uquotino),   0 },
+    { offsetof(xfs_sb_t, sb_gquotino),   0 },
+    { offsetof(xfs_sb_t, sb_qflags),     0 },
+    { offsetof(xfs_sb_t, sb_flags),      0 },
+    { offsetof(xfs_sb_t, sb_shared_vn),  0 },
+    { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
+    { offsetof(xfs_sb_t, sb_unit),	 0 },
+    { offsetof(xfs_sb_t, sb_width),	 0 },
+    { offsetof(xfs_sb_t, sb_dirblklog),	 0 },
+    { offsetof(xfs_sb_t, sb_logsectlog), 0 },
+    { offsetof(xfs_sb_t, sb_logsectsize),0 },
+    { offsetof(xfs_sb_t, sb_logsunit),	 0 },
+    { offsetof(xfs_sb_t, sb_features2),	 0 },
+    { sizeof(xfs_sb_t),			 0 }
+};
+
+/*
+ * Return a pointer to an initialized xfs_mount structure.
+ */
+xfs_mount_t *
+xfs_mount_init(void)
+{
+	xfs_mount_t *mp;
+
+	mp = kmem_zalloc(sizeof(*mp), KM_SLEEP);
+
+	AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
+	spinlock_init(&mp->m_sb_lock, "xfs_sb");
+	mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock");
+	initnsema(&mp->m_growlock, 1, "xfs_grow");
+	/*
+	 * Initialize the AIL.
+	 */
+	xfs_trans_ail_init(mp);
+
+	atomic_set(&mp->m_active_trans, 0);
+
+	return mp;
+}
+
+/*
+ * Free up the resources associated with a mount structure.  Assume that
+ * the structure was initially zeroed, so we can tell which fields got
+ * initialized.
+ */
+void
+xfs_mount_free(
+	xfs_mount_t *mp,
+	int	    remove_bhv)
+{
+	if (mp->m_ihash)
+		xfs_ihash_free(mp);
+	if (mp->m_chash)
+		xfs_chash_free(mp);
+
+	if (mp->m_perag) {
+		int	agno;
+
+		for (agno = 0; agno < mp->m_maxagi; agno++)
+			if (mp->m_perag[agno].pagb_list)
+				kmem_free(mp->m_perag[agno].pagb_list,
+						sizeof(xfs_perag_busy_t) *
+							XFS_PAGB_NUM_SLOTS);
+		kmem_free(mp->m_perag,
+			  sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
+	}
+
+	AIL_LOCK_DESTROY(&mp->m_ail_lock);
+	spinlock_destroy(&mp->m_sb_lock);
+	mutex_destroy(&mp->m_ilock);
+	freesema(&mp->m_growlock);
+	if (mp->m_quotainfo)
+		XFS_QM_DONE(mp);
+
+	if (mp->m_fsname != NULL)
+		kmem_free(mp->m_fsname, mp->m_fsname_len);
+
+	if (remove_bhv) {
+		struct vfs	*vfsp = XFS_MTOVFS(mp);
+
+		bhv_remove_all_vfsops(vfsp, 0);
+		VFS_REMOVEBHV(vfsp, &mp->m_bhv);
+	}
+
+	kmem_free(mp, sizeof(xfs_mount_t));
+}
+
+
+/*
+ * Check the validity of the SB found.
+ */
+STATIC int
+xfs_mount_validate_sb(
+	xfs_mount_t	*mp,
+	xfs_sb_t	*sbp)
+{
+	/*
+	 * If the log device and data device have the
+	 * same device number, the log is internal.
+	 * Consequently, the sb_logstart should be non-zero.  If
+	 * we have a zero sb_logstart in this case, we may be trying to mount
+	 * a volume filesystem in a non-volume manner.
+	 */
+	if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+		cmn_err(CE_WARN, "XFS: bad magic number");
+		return XFS_ERROR(EWRONGFS);
+	}
+
+	if (!XFS_SB_GOOD_VERSION(sbp)) {
+		cmn_err(CE_WARN, "XFS: bad version");
+		return XFS_ERROR(EWRONGFS);
+	}
+
+	if (unlikely(
+	    sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
+		cmn_err(CE_WARN,
+	"XFS: filesystem is marked as having an external log; "
+	"specify logdev on the\nmount command line.");
+		XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)",
+				     XFS_ERRLEVEL_HIGH, mp, sbp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	if (unlikely(
+	    sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
+		cmn_err(CE_WARN,
+	"XFS: filesystem is marked as having an internal log; "
+	"don't specify logdev on\nthe mount command line.");
+		XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)",
+				     XFS_ERRLEVEL_HIGH, mp, sbp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	/*
+	 * More sanity checking. These were stolen directly from
+	 * xfs_repair.
+	 */
+	if (unlikely(
+	    sbp->sb_agcount <= 0					||
+	    sbp->sb_sectsize < XFS_MIN_SECTORSIZE			||
+	    sbp->sb_sectsize > XFS_MAX_SECTORSIZE			||
+	    sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG			||
+	    sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG			||
+	    sbp->sb_blocksize < XFS_MIN_BLOCKSIZE			||
+	    sbp->sb_blocksize > XFS_MAX_BLOCKSIZE			||
+	    sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG			||
+	    sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG			||
+	    sbp->sb_inodesize < XFS_DINODE_MIN_SIZE			||
+	    sbp->sb_inodesize > XFS_DINODE_MAX_SIZE			||
+	    (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)	||
+	    (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)	||
+	    sbp->sb_imax_pct > 100)) {
+		cmn_err(CE_WARN, "XFS: SB sanity check 1 failed");
+		XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)",
+				     XFS_ERRLEVEL_LOW, mp, sbp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	/*
+	 * Sanity check AG count, size fields against data size field
+	 */
+	if (unlikely(
+	    sbp->sb_dblocks == 0 ||
+	    sbp->sb_dblocks >
+	     (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
+	    sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
+			      sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
+		cmn_err(CE_WARN, "XFS: SB sanity check 2 failed");
+		XFS_ERROR_REPORT("xfs_mount_validate_sb(4)",
+				 XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
+	ASSERT(sbp->sb_blocklog >= BBSHIFT);
+
+#if XFS_BIG_BLKNOS     /* Limited by ULONG_MAX of page cache index */
+	if (unlikely(
+	    (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX ||
+	    (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) {
+#else                  /* Limited by UINT_MAX of sectors */
+	if (unlikely(
+	    (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX ||
+	    (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) {
+#endif
+		cmn_err(CE_WARN,
+	"XFS: File system is too large to be mounted on this system.");
+		return XFS_ERROR(E2BIG);
+	}
+
+	if (unlikely(sbp->sb_inprogress)) {
+		cmn_err(CE_WARN, "XFS: file system busy");
+		XFS_ERROR_REPORT("xfs_mount_validate_sb(5)",
+				 XFS_ERRLEVEL_LOW, mp);
+		return XFS_ERROR(EFSCORRUPTED);
+	}
+
+	/*
+	 * Until this is fixed only page-sized or smaller data blocks work.
+	 */
+	if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
+		cmn_err(CE_WARN,
+		"XFS: Attempted to mount file system with blocksize %d bytes",
+			sbp->sb_blocksize);
+		cmn_err(CE_WARN,
+		"XFS: Only page-sized (%d) or less blocksizes currently work.",
+			PAGE_SIZE);
+		return XFS_ERROR(ENOSYS);
+	}
+
+	return 0;
+}
+
+xfs_agnumber_t
+xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount)
+{
+	xfs_agnumber_t	index, max_metadata;
+	xfs_perag_t	*pag;
+	xfs_agino_t	agino;
+	xfs_ino_t	ino;
+	xfs_sb_t	*sbp = &mp->m_sb;
+	xfs_ino_t	max_inum = XFS_MAXINUMBER_32;
+
+	/* Check to see if the filesystem can overflow 32 bit inodes */
+	agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
+	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
+
+	/* Clear the mount flag if no inode can overflow 32 bits
+	 * on this filesystem, or if specifically requested..
+	 */
+	if ((mp->m_flags & XFS_MOUNT_32BITINOOPT) && ino > max_inum) {
+		mp->m_flags |= XFS_MOUNT_32BITINODES;
+	} else {
+		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
+	}
+
+	/* If we can overflow then setup the ag headers accordingly */
+	if (mp->m_flags & XFS_MOUNT_32BITINODES) {
+		/* Calculate how much should be reserved for inodes to
+		 * meet the max inode percentage.
+		 */
+		if (mp->m_maxicount) {
+			__uint64_t	icount;
+
+			icount = sbp->sb_dblocks * sbp->sb_imax_pct;
+			do_div(icount, 100);
+			icount += sbp->sb_agblocks - 1;
+			do_div(icount, mp->m_ialloc_blks);
+			max_metadata = icount;
+		} else {
+			max_metadata = agcount;
+		}
+		for (index = 0; index < agcount; index++) {
+			ino = XFS_AGINO_TO_INO(mp, index, agino);
+			if (ino > max_inum) {
+				index++;
+				break;
+			}
+
+			/* This ag is prefered for inodes */
+			pag = &mp->m_perag[index];
+			pag->pagi_inodeok = 1;
+			if (index < max_metadata)
+				pag->pagf_metadata = 1;
+		}
+	} else {
+		/* Setup default behavior for smaller filesystems */
+		for (index = 0; index < agcount; index++) {
+			pag = &mp->m_perag[index];
+			pag->pagi_inodeok = 1;
+		}
+	}
+	return index;
+}
+
+/*
+ * xfs_xlatesb
+ *
+ *     data       - on disk version of sb
+ *     sb         - a superblock
+ *     dir        - conversion direction: <0 - convert sb to buf
+ *                                        >0 - convert buf to sb
+ *     fields     - which fields to copy (bitmask)
+ */
+void
+xfs_xlatesb(
+	void		*data,
+	xfs_sb_t	*sb,
+	int		dir,
+	__int64_t	fields)
+{
+	xfs_caddr_t	buf_ptr;
+	xfs_caddr_t	mem_ptr;
+	xfs_sb_field_t	f;
+	int		first;
+	int		size;
+
+	ASSERT(dir);
+	ASSERT(fields);
+
+	if (!fields)
+		return;
+
+	buf_ptr = (xfs_caddr_t)data;
+	mem_ptr = (xfs_caddr_t)sb;
+
+	while (fields) {
+		f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+		first = xfs_sb_info[f].offset;
+		size = xfs_sb_info[f + 1].offset - first;
+
+		ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
+
+		if (size == 1 || xfs_sb_info[f].type == 1) {
+			if (dir > 0) {
+				memcpy(mem_ptr + first, buf_ptr + first, size);
+			} else {
+				memcpy(buf_ptr + first, mem_ptr + first, size);
+			}
+		} else {
+			switch (size) {
+			case 2:
+				INT_XLATE(*(__uint16_t*)(buf_ptr+first),
+					  *(__uint16_t*)(mem_ptr+first),
+					  dir, ARCH_CONVERT);
+				break;
+			case 4:
+				INT_XLATE(*(__uint32_t*)(buf_ptr+first),
+					  *(__uint32_t*)(mem_ptr+first),
+					  dir, ARCH_CONVERT);
+				break;
+			case 8:
+				INT_XLATE(*(__uint64_t*)(buf_ptr+first),
+					  *(__uint64_t*)(mem_ptr+first), dir, ARCH_CONVERT);
+				break;
+			default:
+				ASSERT(0);
+			}
+		}
+
+		fields &= ~(1LL << f);
+	}
+}
+
+/*
+ * xfs_readsb
+ *
+ * Does the initial read of the superblock.
+ */
+int
+xfs_readsb(xfs_mount_t *mp)
+{
+	unsigned int	sector_size;
+	unsigned int	extra_flags;
+	xfs_buf_t	*bp;
+	xfs_sb_t	*sbp;
+	int		error;
+
+	ASSERT(mp->m_sb_bp == NULL);
+	ASSERT(mp->m_ddev_targp != NULL);
+
+	/*
+	 * Allocate a (locked) buffer to hold the superblock.
+	 * This will be kept around at all times to optimize
+	 * access to the superblock.
+	 */
+	sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
+	extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
+
+	bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
+				BTOBB(sector_size), extra_flags);
+	if (!bp || XFS_BUF_ISERROR(bp)) {
+		cmn_err(CE_WARN, "XFS: SB read failed");
+		error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
+		goto fail;
+	}
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+
+	/*
+	 * Initialize the mount structure from the superblock.
+	 * But first do some basic consistency checking.
+	 */
+	sbp = XFS_BUF_TO_SBP(bp);
+	xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS);
+
+	error = xfs_mount_validate_sb(mp, &(mp->m_sb));
+	if (error) {
+		cmn_err(CE_WARN, "XFS: SB validate failed");
+		goto fail;
+	}
+
+	/*
+	 * We must be able to do sector-sized and sector-aligned IO.
+	 */
+	if (sector_size > mp->m_sb.sb_sectsize) {
+		cmn_err(CE_WARN,
+			"XFS: device supports only %u byte sectors (not %u)",
+			sector_size, mp->m_sb.sb_sectsize);
+		error = ENOSYS;
+		goto fail;
+	}
+
+	/*
+	 * If device sector size is smaller than the superblock size,
+	 * re-read the superblock so the buffer is correctly sized.
+	 */
+	if (sector_size < mp->m_sb.sb_sectsize) {
+		XFS_BUF_UNMANAGE(bp);
+		xfs_buf_relse(bp);
+		sector_size = mp->m_sb.sb_sectsize;
+		bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
+					BTOBB(sector_size), extra_flags);
+		if (!bp || XFS_BUF_ISERROR(bp)) {
+			cmn_err(CE_WARN, "XFS: SB re-read failed");
+			error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
+			goto fail;
+		}
+		ASSERT(XFS_BUF_ISBUSY(bp));
+		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+	}
+
+	mp->m_sb_bp = bp;
+	xfs_buf_relse(bp);
+	ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
+	return 0;
+
+ fail:
+	if (bp) {
+		XFS_BUF_UNMANAGE(bp);
+		xfs_buf_relse(bp);
+	}
+	return error;
+}
+
+
+/*
+ * xfs_mount_common
+ *
+ * Mount initialization code establishing various mount
+ * fields from the superblock associated with the given
+ * mount structure
+ */
+void
+xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
+{
+	int	i;
+
+	mp->m_agfrotor = mp->m_agirotor = 0;
+	spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock");
+	mp->m_maxagi = mp->m_sb.sb_agcount;
+	mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
+	mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
+	mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
+	mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
+	mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
+	mp->m_litino = sbp->sb_inodesize -
+		((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t));
+	mp->m_blockmask = sbp->sb_blocksize - 1;
+	mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
+	mp->m_blockwmask = mp->m_blockwsize - 1;
+	INIT_LIST_HEAD(&mp->m_del_inodes);
+
+	/*
+	 * Setup for attributes, in case they get created.
+	 * This value is for inodes getting attributes for the first time,
+	 * the per-inode value is for old attribute values.
+	 */
+	ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
+	switch (sbp->sb_inodesize) {
+	case 256:
+		mp->m_attroffset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(2);
+		break;
+	case 512:
+	case 1024:
+	case 2048:
+		mp->m_attroffset = XFS_BMDR_SPACE_CALC(12);
+		break;
+	default:
+		ASSERT(0);
+	}
+	ASSERT(mp->m_attroffset < XFS_LITINO(mp));
+
+	for (i = 0; i < 2; i++) {
+		mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
+			xfs_alloc, i == 0);
+		mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
+			xfs_alloc, i == 0);
+	}
+	for (i = 0; i < 2; i++) {
+		mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
+			xfs_bmbt, i == 0);
+		mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
+			xfs_bmbt, i == 0);
+	}
+	for (i = 0; i < 2; i++) {
+		mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
+			xfs_inobt, i == 0);
+		mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
+			xfs_inobt, i == 0);
+	}
+
+	mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
+	mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
+					sbp->sb_inopblock);
+	mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
+}
+/*
+ * xfs_mountfs
+ *
+ * This function does the following on an initial mount of a file system:
+ *	- reads the superblock from disk and init the mount struct
+ *	- if we're a 32-bit kernel, do a size check on the superblock
+ *		so we don't mount terabyte filesystems
+ *	- init mount struct realtime fields
+ *	- allocate inode hash table for fs
+ *	- init directory manager
+ *	- perform recovery and init the log manager
+ */
+int
+xfs_mountfs(
+	vfs_t		*vfsp,
+	xfs_mount_t	*mp,
+	int		mfsi_flags)
+{
+	xfs_buf_t	*bp;
+	xfs_sb_t	*sbp = &(mp->m_sb);
+	xfs_inode_t	*rip;
+	vnode_t		*rvp = NULL;
+	int		readio_log, writeio_log;
+	xfs_daddr_t	d;
+	__uint64_t	ret64;
+	__int64_t	update_flags;
+	uint		quotamount, quotaflags;
+	int		agno;
+	int		uuid_mounted = 0;
+	int		error = 0;
+
+	if (mp->m_sb_bp == NULL) {
+		if ((error = xfs_readsb(mp))) {
+			return (error);
+		}
+	}
+	xfs_mount_common(mp, sbp);
+
+	/*
+	 * Check if sb_agblocks is aligned at stripe boundary
+	 * If sb_agblocks is NOT aligned turn off m_dalign since
+	 * allocator alignment is within an ag, therefore ag has
+	 * to be aligned at stripe boundary.
+	 */
+	update_flags = 0LL;
+	if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
+		/*
+		 * If stripe unit and stripe width are not multiples
+		 * of the fs blocksize turn off alignment.
+		 */
+		if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
+		    (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
+			if (mp->m_flags & XFS_MOUNT_RETERR) {
+				cmn_err(CE_WARN,
+					"XFS: alignment check 1 failed");
+				error = XFS_ERROR(EINVAL);
+				goto error1;
+			}
+			mp->m_dalign = mp->m_swidth = 0;
+		} else {
+			/*
+			 * Convert the stripe unit and width to FSBs.
+			 */
+			mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
+			if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
+				if (mp->m_flags & XFS_MOUNT_RETERR) {
+					error = XFS_ERROR(EINVAL);
+					goto error1;
+				}
+				xfs_fs_cmn_err(CE_WARN, mp,
+"stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
+					mp->m_dalign, mp->m_swidth,
+					sbp->sb_agblocks);
+
+				mp->m_dalign = 0;
+				mp->m_swidth = 0;
+			} else if (mp->m_dalign) {
+				mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
+			} else {
+				if (mp->m_flags & XFS_MOUNT_RETERR) {
+					xfs_fs_cmn_err(CE_WARN, mp,
+"stripe alignment turned off: sunit(%d) less than bsize(%d)",
+                                        	mp->m_dalign,
+						mp->m_blockmask +1);
+					error = XFS_ERROR(EINVAL);
+					goto error1;
+				}
+				mp->m_swidth = 0;
+			}
+		}
+
+		/*
+		 * Update superblock with new values
+		 * and log changes
+		 */
+		if (XFS_SB_VERSION_HASDALIGN(sbp)) {
+			if (sbp->sb_unit != mp->m_dalign) {
+				sbp->sb_unit = mp->m_dalign;
+				update_flags |= XFS_SB_UNIT;
+			}
+			if (sbp->sb_width != mp->m_swidth) {
+				sbp->sb_width = mp->m_swidth;
+				update_flags |= XFS_SB_WIDTH;
+			}
+		}
+	} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
+		    XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) {
+			mp->m_dalign = sbp->sb_unit;
+			mp->m_swidth = sbp->sb_width;
+	}
+
+	xfs_alloc_compute_maxlevels(mp);
+	xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
+	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
+	xfs_ialloc_compute_maxlevels(mp);
+
+	if (sbp->sb_imax_pct) {
+		__uint64_t	icount;
+
+		/* Make sure the maximum inode count is a multiple of the
+		 * units we allocate inodes in.
+		 */
+
+		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
+		do_div(icount, 100);
+		do_div(icount, mp->m_ialloc_blks);
+		mp->m_maxicount = (icount * mp->m_ialloc_blks)  <<
+				   sbp->sb_inopblog;
+	} else
+		mp->m_maxicount = 0;
+
+	mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
+
+	/*
+	 * XFS uses the uuid from the superblock as the unique
+	 * identifier for fsid.  We can not use the uuid from the volume
+	 * since a single partition filesystem is identical to a single
+	 * partition volume/filesystem.
+	 */
+	if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
+	    (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
+		if (xfs_uuid_mount(mp)) {
+			error = XFS_ERROR(EINVAL);
+			goto error1;
+		}
+		uuid_mounted=1;
+		ret64 = uuid_hash64(&sbp->sb_uuid);
+		memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64));
+	}
+
+	/*
+	 * Set the default minimum read and write sizes unless
+	 * already specified in a mount option.
+	 * We use smaller I/O sizes when the file system
+	 * is being used for NFS service (wsync mount option).
+	 */
+	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
+		if (mp->m_flags & XFS_MOUNT_WSYNC) {
+			readio_log = XFS_WSYNC_READIO_LOG;
+			writeio_log = XFS_WSYNC_WRITEIO_LOG;
+		} else {
+			readio_log = XFS_READIO_LOG_LARGE;
+			writeio_log = XFS_WRITEIO_LOG_LARGE;
+		}
+	} else {
+		readio_log = mp->m_readio_log;
+		writeio_log = mp->m_writeio_log;
+	}
+
+	/*
+	 * Set the number of readahead buffers to use based on
+	 * physical memory size.
+	 */
+	if (xfs_physmem <= 4096)		/* <= 16MB */
+		mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB;
+	else if (xfs_physmem <= 8192)	/* <= 32MB */
+		mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB;
+	else
+		mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32;
+	if (sbp->sb_blocklog > readio_log) {
+		mp->m_readio_log = sbp->sb_blocklog;
+	} else {
+		mp->m_readio_log = readio_log;
+	}
+	mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
+	if (sbp->sb_blocklog > writeio_log) {
+		mp->m_writeio_log = sbp->sb_blocklog;
+	} else {
+		mp->m_writeio_log = writeio_log;
+	}
+	mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
+
+	/*
+	 * Set the inode cluster size based on the physical memory
+	 * size.  This may still be overridden by the file system
+	 * block size if it is larger than the chosen cluster size.
+	 */
+	if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */
+		mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE;
+	} else {
+		mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
+	}
+	/*
+	 * Set whether we're using inode alignment.
+	 */
+	if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) &&
+	    mp->m_sb.sb_inoalignmt >=
+	    XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+		mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
+	else
+		mp->m_inoalign_mask = 0;
+	/*
+	 * If we are using stripe alignment, check whether
+	 * the stripe unit is a multiple of the inode alignment
+	 */
+	if (mp->m_dalign && mp->m_inoalign_mask &&
+	    !(mp->m_dalign & mp->m_inoalign_mask))
+		mp->m_sinoalign = mp->m_dalign;
+	else
+		mp->m_sinoalign = 0;
+	/*
+	 * Check that the data (and log if separate) are an ok size.
+	 */
+	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
+	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
+		cmn_err(CE_WARN, "XFS: size check 1 failed");
+		error = XFS_ERROR(E2BIG);
+		goto error1;
+	}
+	error = xfs_read_buf(mp, mp->m_ddev_targp,
+			     d - XFS_FSS_TO_BB(mp, 1),
+			     XFS_FSS_TO_BB(mp, 1), 0, &bp);
+	if (!error) {
+		xfs_buf_relse(bp);
+	} else {
+		cmn_err(CE_WARN, "XFS: size check 2 failed");
+		if (error == ENOSPC) {
+			error = XFS_ERROR(E2BIG);
+		}
+		goto error1;
+	}
+
+	if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
+	    mp->m_logdev_targp != mp->m_ddev_targp) {
+		d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
+		if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
+			cmn_err(CE_WARN, "XFS: size check 3 failed");
+			error = XFS_ERROR(E2BIG);
+			goto error1;
+		}
+		error = xfs_read_buf(mp, mp->m_logdev_targp,
+				     d - XFS_FSB_TO_BB(mp, 1),
+				     XFS_FSB_TO_BB(mp, 1), 0, &bp);
+		if (!error) {
+			xfs_buf_relse(bp);
+		} else {
+			cmn_err(CE_WARN, "XFS: size check 3 failed");
+			if (error == ENOSPC) {
+				error = XFS_ERROR(E2BIG);
+			}
+			goto error1;
+		}
+	}
+
+	/*
+	 * Initialize realtime fields in the mount structure
+	 */
+	if ((error = xfs_rtmount_init(mp))) {
+		cmn_err(CE_WARN, "XFS: RT mount failed");
+		goto error1;
+	}
+
+	/*
+	 * For client case we are done now
+	 */
+	if (mfsi_flags & XFS_MFSI_CLIENT) {
+		return(0);
+	}
+
+	/*
+	 *  Copies the low order bits of the timestamp and the randomly
+	 *  set "sequence" number out of a UUID.
+	 */
+	uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
+
+	/*
+	 *  The vfs structure needs to have a file system independent
+	 *  way of checking for the invariant file system ID.  Since it
+	 *  can't look at mount structures it has a pointer to the data
+	 *  in the mount structure.
+	 *
+	 *  File systems that don't support user level file handles (i.e.
+	 *  all of them except for XFS) will leave vfs_altfsid as NULL.
+	 */
+	vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid;
+	mp->m_dmevmask = 0;	/* not persistent; set after each mount */
+
+	/*
+	 * Select the right directory manager.
+	 */
+	mp->m_dirops =
+		XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ?
+			xfsv2_dirops :
+			xfsv1_dirops;
+
+	/*
+	 * Initialize directory manager's entries.
+	 */
+	XFS_DIR_MOUNT(mp);
+
+	/*
+	 * Initialize the attribute manager's entries.
+	 */
+	mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
+
+	/*
+	 * Initialize the precomputed transaction reservations values.
+	 */
+	xfs_trans_init(mp);
+
+	/*
+	 * Allocate and initialize the inode hash table for this
+	 * file system.
+	 */
+	xfs_ihash_init(mp);
+	xfs_chash_init(mp);
+
+	/*
+	 * Allocate and initialize the per-ag data.
+	 */
+	init_rwsem(&mp->m_peraglock);
+	mp->m_perag =
+		kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
+
+	mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
+
+	/*
+	 * log's mount-time initialization. Perform 1st part recovery if needed
+	 */
+	if (likely(sbp->sb_logblocks > 0)) {	/* check for volume case */
+		error = xfs_log_mount(mp, mp->m_logdev_targp,
+				      XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
+				      XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
+		if (error) {
+			cmn_err(CE_WARN, "XFS: log mount failed");
+			goto error2;
+		}
+	} else {	/* No log has been defined */
+		cmn_err(CE_WARN, "XFS: no log defined");
+		XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
+		error = XFS_ERROR(EFSCORRUPTED);
+		goto error2;
+	}
+
+	/*
+	 * Get and sanity-check the root inode.
+	 * Save the pointer to it in the mount structure.
+	 */
+	error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
+	if (error) {
+		cmn_err(CE_WARN, "XFS: failed to read root inode");
+		goto error3;
+	}
+
+	ASSERT(rip != NULL);
+	rvp = XFS_ITOV(rip);
+
+	if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
+		cmn_err(CE_WARN, "XFS: corrupted root inode");
+		prdev("Root inode %llu is not a directory",
+		      mp->m_ddev_targp, (unsigned long long)rip->i_ino);
+		xfs_iunlock(rip, XFS_ILOCK_EXCL);
+		XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
+				 mp);
+		error = XFS_ERROR(EFSCORRUPTED);
+		goto error4;
+	}
+	mp->m_rootip = rip;	/* save it */
+
+	xfs_iunlock(rip, XFS_ILOCK_EXCL);
+
+	/*
+	 * Initialize realtime inode pointers in the mount structure
+	 */
+	if ((error = xfs_rtmount_inodes(mp))) {
+		/*
+		 * Free up the root inode.
+		 */
+		cmn_err(CE_WARN, "XFS: failed to read RT inodes");
+		goto error4;
+	}
+
+	/*
+	 * If fs is not mounted readonly, then update the superblock
+	 * unit and width changes.
+	 */
+	if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
+		xfs_mount_log_sbunit(mp, update_flags);
+
+	/*
+	 * Initialise the XFS quota management subsystem for this mount
+	 */
+	if ((error = XFS_QM_INIT(mp, &quotamount, &quotaflags)))
+		goto error4;
+
+	/*
+	 * Finish recovering the file system.  This part needed to be
+	 * delayed until after the root and real-time bitmap inodes
+	 * were consistently read in.
+	 */
+	error = xfs_log_mount_finish(mp, mfsi_flags);
+	if (error) {
+		cmn_err(CE_WARN, "XFS: log mount finish failed");
+		goto error4;
+	}
+
+	/*
+	 * Complete the quota initialisation, post-log-replay component.
+	 */
+	if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
+		goto error4;
+
+	return 0;
+
+ error4:
+	/*
+	 * Free up the root inode.
+	 */
+	VN_RELE(rvp);
+ error3:
+	xfs_log_unmount_dealloc(mp);
+ error2:
+	xfs_ihash_free(mp);
+	xfs_chash_free(mp);
+	for (agno = 0; agno < sbp->sb_agcount; agno++)
+		if (mp->m_perag[agno].pagb_list)
+			kmem_free(mp->m_perag[agno].pagb_list,
+			  sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
+	kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
+	mp->m_perag = NULL;
+	/* FALLTHROUGH */
+ error1:
+	if (uuid_mounted)
+		xfs_uuid_unmount(mp);
+	xfs_freesb(mp);
+	return error;
+}
+
+/*
+ * xfs_unmountfs
+ *
+ * This flushes out the inodes,dquots and the superblock, unmounts the
+ * log and makes sure that incore structures are freed.
+ */
+int
+xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
+{
+	struct vfs	*vfsp = XFS_MTOVFS(mp);
+#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+	int64_t		fsid;
+#endif
+
+	xfs_iflush_all(mp, XFS_FLUSH_ALL);
+
+	XFS_QM_DQPURGEALL(mp,
+		XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
+
+	/*
+	 * Flush out the log synchronously so that we know for sure
+	 * that nothing is pinned.  This is important because bflush()
+	 * will skip pinned buffers.
+	 */
+	xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+
+	xfs_binval(mp->m_ddev_targp);
+	if (mp->m_rtdev_targp) {
+		xfs_binval(mp->m_rtdev_targp);
+	}
+
+	xfs_unmountfs_writesb(mp);
+
+	xfs_unmountfs_wait(mp); 		/* wait for async bufs */
+
+	xfs_log_unmount(mp);			/* Done! No more fs ops. */
+
+	xfs_freesb(mp);
+
+	/*
+	 * All inodes from this mount point should be freed.
+	 */
+	ASSERT(mp->m_inodes == NULL);
+
+	/*
+	 * We may have bufs that are in the process of getting written still.
+	 * We must wait for the I/O completion of those. The sync flag here
+	 * does a two pass iteration thru the bufcache.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		xfs_incore_relse(mp->m_ddev_targp, 0, 1); /* synchronous */
+	}
+
+	xfs_unmountfs_close(mp, cr);
+	if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
+		xfs_uuid_unmount(mp);
+
+#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+	/*
+	 * clear all error tags on this filesystem
+	 */
+	memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t));
+	xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0);
+#endif
+	XFS_IODONE(vfsp);
+	xfs_mount_free(mp, 1);
+	return 0;
+}
+
+void
+xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
+{
+	if (mp->m_logdev_targp != mp->m_ddev_targp)
+		xfs_free_buftarg(mp->m_logdev_targp, 1);
+	if (mp->m_rtdev_targp)
+		xfs_free_buftarg(mp->m_rtdev_targp, 1);
+	xfs_free_buftarg(mp->m_ddev_targp, 0);
+}
+
+void
+xfs_unmountfs_wait(xfs_mount_t *mp)
+{
+	if (mp->m_logdev_targp != mp->m_ddev_targp)
+		xfs_wait_buftarg(mp->m_logdev_targp);
+	if (mp->m_rtdev_targp)
+		xfs_wait_buftarg(mp->m_rtdev_targp);
+	xfs_wait_buftarg(mp->m_ddev_targp);
+}
+
+int
+xfs_unmountfs_writesb(xfs_mount_t *mp)
+{
+	xfs_buf_t	*sbp;
+	xfs_sb_t	*sb;
+	int		error = 0;
+
+	/*
+	 * skip superblock write if fs is read-only, or
+	 * if we are doing a forced umount.
+	 */
+	sbp = xfs_getsb(mp, 0);
+	if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
+		XFS_FORCED_SHUTDOWN(mp))) {
+		/*
+		 * mark shared-readonly if desired
+		 */
+		sb = XFS_BUF_TO_SBP(sbp);
+		if (mp->m_mk_sharedro) {
+			if (!(sb->sb_flags & XFS_SBF_READONLY))
+				sb->sb_flags |= XFS_SBF_READONLY;
+			if (!XFS_SB_VERSION_HASSHARED(sb))
+				XFS_SB_VERSION_ADDSHARED(sb);
+			xfs_fs_cmn_err(CE_NOTE, mp,
+				"Unmounting, marking shared read-only");
+		}
+		XFS_BUF_UNDONE(sbp);
+		XFS_BUF_UNREAD(sbp);
+		XFS_BUF_UNDELAYWRITE(sbp);
+		XFS_BUF_WRITE(sbp);
+		XFS_BUF_UNASYNC(sbp);
+		ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
+		xfsbdstrat(mp, sbp);
+		/* Nevermind errors we might get here. */
+		error = xfs_iowait(sbp);
+		if (error)
+			xfs_ioerror_alert("xfs_unmountfs_writesb",
+					  mp, sbp, XFS_BUF_ADDR(sbp));
+		if (error && mp->m_mk_sharedro)
+			xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting.  Filesystem may not be marked shared readonly");
+	}
+	xfs_buf_relse(sbp);
+	return (error);
+}
+
+/*
+ * xfs_mod_sb() can be used to copy arbitrary changes to the
+ * in-core superblock into the superblock buffer to be logged.
+ * It does not provide the higher level of locking that is
+ * needed to protect the in-core superblock from concurrent
+ * access.
+ */
+void
+xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+{
+	xfs_buf_t	*bp;
+	int		first;
+	int		last;
+	xfs_mount_t	*mp;
+	xfs_sb_t	*sbp;
+	xfs_sb_field_t	f;
+
+	ASSERT(fields);
+	if (!fields)
+		return;
+	mp = tp->t_mountp;
+	bp = xfs_trans_getsb(tp, mp, 0);
+	sbp = XFS_BUF_TO_SBP(bp);
+	first = sizeof(xfs_sb_t);
+	last = 0;
+
+	/* translate/copy */
+
+	xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, fields);
+
+	/* find modified range */
+
+	f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+	ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+	first = xfs_sb_info[f].offset;
+
+	f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
+	ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+	last = xfs_sb_info[f + 1].offset - 1;
+
+	xfs_trans_log_buf(tp, bp, first, last);
+}
+
+/*
+ * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
+ * a delta to a specified field in the in-core superblock.  Simply
+ * switch on the field indicated and apply the delta to that field.
+ * Fields are not allowed to dip below zero, so if the delta would
+ * do this do not apply it and return EINVAL.
+ *
+ * The SB_LOCK must be held when this routine is called.
+ */
+STATIC int
+xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
+			int delta, int rsvd)
+{
+	int		scounter;	/* short counter for 32 bit fields */
+	long long	lcounter;	/* long counter for 64 bit fields */
+	long long	res_used, rem;
+
+	/*
+	 * With the in-core superblock spin lock held, switch
+	 * on the indicated field.  Apply the delta to the
+	 * proper field.  If the fields value would dip below
+	 * 0, then do not apply the delta and return EINVAL.
+	 */
+	switch (field) {
+	case XFS_SBS_ICOUNT:
+		lcounter = (long long)mp->m_sb.sb_icount;
+		lcounter += delta;
+		if (lcounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_icount = lcounter;
+		return (0);
+	case XFS_SBS_IFREE:
+		lcounter = (long long)mp->m_sb.sb_ifree;
+		lcounter += delta;
+		if (lcounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_ifree = lcounter;
+		return (0);
+	case XFS_SBS_FDBLOCKS:
+
+		lcounter = (long long)mp->m_sb.sb_fdblocks;
+		res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
+
+		if (delta > 0) {		/* Putting blocks back */
+			if (res_used > delta) {
+				mp->m_resblks_avail += delta;
+			} else {
+				rem = delta - res_used;
+				mp->m_resblks_avail = mp->m_resblks;
+				lcounter += rem;
+			}
+		} else {				/* Taking blocks away */
+
+			lcounter += delta;
+
+		/*
+		 * If were out of blocks, use any available reserved blocks if
+		 * were allowed to.
+		 */
+
+			if (lcounter < 0) {
+				if (rsvd) {
+					lcounter = (long long)mp->m_resblks_avail + delta;
+					if (lcounter < 0) {
+						return (XFS_ERROR(ENOSPC));
+					}
+					mp->m_resblks_avail = lcounter;
+					return (0);
+				} else {	/* not reserved */
+					return (XFS_ERROR(ENOSPC));
+				}
+			}
+		}
+
+		mp->m_sb.sb_fdblocks = lcounter;
+		return (0);
+	case XFS_SBS_FREXTENTS:
+		lcounter = (long long)mp->m_sb.sb_frextents;
+		lcounter += delta;
+		if (lcounter < 0) {
+			return (XFS_ERROR(ENOSPC));
+		}
+		mp->m_sb.sb_frextents = lcounter;
+		return (0);
+	case XFS_SBS_DBLOCKS:
+		lcounter = (long long)mp->m_sb.sb_dblocks;
+		lcounter += delta;
+		if (lcounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_dblocks = lcounter;
+		return (0);
+	case XFS_SBS_AGCOUNT:
+		scounter = mp->m_sb.sb_agcount;
+		scounter += delta;
+		if (scounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_agcount = scounter;
+		return (0);
+	case XFS_SBS_IMAX_PCT:
+		scounter = mp->m_sb.sb_imax_pct;
+		scounter += delta;
+		if (scounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_imax_pct = scounter;
+		return (0);
+	case XFS_SBS_REXTSIZE:
+		scounter = mp->m_sb.sb_rextsize;
+		scounter += delta;
+		if (scounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_rextsize = scounter;
+		return (0);
+	case XFS_SBS_RBMBLOCKS:
+		scounter = mp->m_sb.sb_rbmblocks;
+		scounter += delta;
+		if (scounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_rbmblocks = scounter;
+		return (0);
+	case XFS_SBS_RBLOCKS:
+		lcounter = (long long)mp->m_sb.sb_rblocks;
+		lcounter += delta;
+		if (lcounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_rblocks = lcounter;
+		return (0);
+	case XFS_SBS_REXTENTS:
+		lcounter = (long long)mp->m_sb.sb_rextents;
+		lcounter += delta;
+		if (lcounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_rextents = lcounter;
+		return (0);
+	case XFS_SBS_REXTSLOG:
+		scounter = mp->m_sb.sb_rextslog;
+		scounter += delta;
+		if (scounter < 0) {
+			ASSERT(0);
+			return (XFS_ERROR(EINVAL));
+		}
+		mp->m_sb.sb_rextslog = scounter;
+		return (0);
+	default:
+		ASSERT(0);
+		return (XFS_ERROR(EINVAL));
+	}
+}
+
+/*
+ * xfs_mod_incore_sb() is used to change a field in the in-core
+ * superblock structure by the specified delta.  This modification
+ * is protected by the SB_LOCK.  Just use the xfs_mod_incore_sb_unlocked()
+ * routine to do the work.
+ */
+int
+xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
+{
+	unsigned long	s;
+	int	status;
+
+	s = XFS_SB_LOCK(mp);
+	status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
+	XFS_SB_UNLOCK(mp, s);
+	return (status);
+}
+
+/*
+ * xfs_mod_incore_sb_batch() is used to change more than one field
+ * in the in-core superblock structure at a time.  This modification
+ * is protected by a lock internal to this module.  The fields and
+ * changes to those fields are specified in the array of xfs_mod_sb
+ * structures passed in.
+ *
+ * Either all of the specified deltas will be applied or none of
+ * them will.  If any modified field dips below 0, then all modifications
+ * will be backed out and EINVAL will be returned.
+ */
+int
+xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
+{
+	unsigned long	s;
+	int		status=0;
+	xfs_mod_sb_t	*msbp;
+
+	/*
+	 * Loop through the array of mod structures and apply each
+	 * individually.  If any fail, then back out all those
+	 * which have already been applied.  Do all of this within
+	 * the scope of the SB_LOCK so that all of the changes will
+	 * be atomic.
+	 */
+	s = XFS_SB_LOCK(mp);
+	msbp = &msb[0];
+	for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
+		/*
+		 * Apply the delta at index n.  If it fails, break
+		 * from the loop so we'll fall into the undo loop
+		 * below.
+		 */
+		status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
+						    msbp->msb_delta, rsvd);
+		if (status != 0) {
+			break;
+		}
+	}
+
+	/*
+	 * If we didn't complete the loop above, then back out
+	 * any changes made to the superblock.  If you add code
+	 * between the loop above and here, make sure that you
+	 * preserve the value of status. Loop back until
+	 * we step below the beginning of the array.  Make sure
+	 * we don't touch anything back there.
+	 */
+	if (status != 0) {
+		msbp--;
+		while (msbp >= msb) {
+			status = xfs_mod_incore_sb_unlocked(mp,
+				    msbp->msb_field, -(msbp->msb_delta), rsvd);
+			ASSERT(status == 0);
+			msbp--;
+		}
+	}
+	XFS_SB_UNLOCK(mp, s);
+	return (status);
+}
+
+/*
+ * xfs_getsb() is called to obtain the buffer for the superblock.
+ * The buffer is returned locked and read in from disk.
+ * The buffer should be released with a call to xfs_brelse().
+ *
+ * If the flags parameter is BUF_TRYLOCK, then we'll only return
+ * the superblock buffer if it can be locked without sleeping.
+ * If it can't then we'll return NULL.
+ */
+xfs_buf_t *
+xfs_getsb(
+	xfs_mount_t	*mp,
+	int		flags)
+{
+	xfs_buf_t	*bp;
+
+	ASSERT(mp->m_sb_bp != NULL);
+	bp = mp->m_sb_bp;
+	if (flags & XFS_BUF_TRYLOCK) {
+		if (!XFS_BUF_CPSEMA(bp)) {
+			return NULL;
+		}
+	} else {
+		XFS_BUF_PSEMA(bp, PRIBIO);
+	}
+	XFS_BUF_HOLD(bp);
+	ASSERT(XFS_BUF_ISDONE(bp));
+	return (bp);
+}
+
+/*
+ * Used to free the superblock along various error paths.
+ */
+void
+xfs_freesb(
+	xfs_mount_t	*mp)
+{
+	xfs_buf_t	*bp;
+
+	/*
+	 * Use xfs_getsb() so that the buffer will be locked
+	 * when we call xfs_buf_relse().
+	 */
+	bp = xfs_getsb(mp, 0);
+	XFS_BUF_UNMANAGE(bp);
+	xfs_buf_relse(bp);
+	mp->m_sb_bp = NULL;
+}
+
+/*
+ * See if the UUID is unique among mounted XFS filesystems.
+ * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
+ */
+STATIC int
+xfs_uuid_mount(
+	xfs_mount_t	*mp)
+{
+	if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
+		cmn_err(CE_WARN,
+			"XFS: Filesystem %s has nil UUID - can't mount",
+			mp->m_fsname);
+		return -1;
+	}
+	if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
+		cmn_err(CE_WARN,
+			"XFS: Filesystem %s has duplicate UUID - can't mount",
+			mp->m_fsname);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * Remove filesystem from the UUID table.
+ */
+STATIC void
+xfs_uuid_unmount(
+	xfs_mount_t	*mp)
+{
+	uuid_table_remove(&mp->m_sb.sb_uuid);
+}
+
+/*
+ * Used to log changes to the superblock unit and width fields which could
+ * be altered by the mount options. Only the first superblock is updated.
+ */
+STATIC void
+xfs_mount_log_sbunit(
+	xfs_mount_t	*mp,
+	__int64_t	fields)
+{
+	xfs_trans_t	*tp;
+
+	ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID));
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
+	if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+				XFS_DEFAULT_LOG_COUNT)) {
+		xfs_trans_cancel(tp, 0);
+		return;
+	}
+	xfs_mod_sb(tp, fields);
+	xfs_trans_commit(tp, 0, NULL);
+}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
new file mode 100644
index 0000000..5fc6201
--- /dev/null
+++ b/fs/xfs/xfs_mount.h
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_MOUNT_H__
+#define	__XFS_MOUNT_H__
+
+
+typedef struct xfs_trans_reservations {
+	uint	tr_write;	/* extent alloc trans */
+	uint	tr_itruncate;	/* truncate trans */
+	uint	tr_rename;	/* rename trans */
+	uint	tr_link;	/* link trans */
+	uint	tr_remove;	/* unlink trans */
+	uint	tr_symlink;	/* symlink trans */
+	uint	tr_create;	/* create trans */
+	uint	tr_mkdir;	/* mkdir trans */
+	uint	tr_ifree;	/* inode free trans */
+	uint	tr_ichange;	/* inode update trans */
+	uint	tr_growdata;	/* fs data section grow trans */
+	uint	tr_swrite;	/* sync write inode trans */
+	uint	tr_addafork;	/* cvt inode to attributed trans */
+	uint	tr_writeid;	/* write setuid/setgid file */
+	uint	tr_attrinval;	/* attr fork buffer invalidation */
+	uint	tr_attrset;	/* set/create an attribute */
+	uint	tr_attrrm;	/* remove an attribute */
+	uint	tr_clearagi;	/* clear bad agi unlinked ino bucket */
+	uint	tr_growrtalloc;	/* grow realtime allocations */
+	uint	tr_growrtzero;	/* grow realtime zeroing */
+	uint	tr_growrtfree;	/* grow realtime freeing */
+} xfs_trans_reservations_t;
+
+
+#ifndef __KERNEL__
+/*
+ * Moved here from xfs_ag.h to avoid reordering header files
+ */
+#define XFS_DADDR_TO_AGNO(mp,d) \
+	((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks))
+#define XFS_DADDR_TO_AGBNO(mp,d) \
+	((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks))
+#else
+struct cred;
+struct log;
+struct vfs;
+struct vnode;
+struct xfs_mount_args;
+struct xfs_ihash;
+struct xfs_chash;
+struct xfs_inode;
+struct xfs_perag;
+struct xfs_iocore;
+struct xfs_bmbt_irec;
+struct xfs_bmap_free;
+
+#define	AIL_LOCK_T		lock_t
+#define	AIL_LOCKINIT(x,y)	spinlock_init(x,y)
+#define	AIL_LOCK_DESTROY(x)	spinlock_destroy(x)
+#define	AIL_LOCK(mp,s)		s=mutex_spinlock(&(mp)->m_ail_lock)
+#define	AIL_UNLOCK(mp,s)	mutex_spinunlock(&(mp)->m_ail_lock, s)
+
+
+/*
+ * Prototypes and functions for the Data Migration subsystem.
+ */
+
+typedef int	(*xfs_send_data_t)(int, struct vnode *,
+			xfs_off_t, size_t, int, vrwlock_t *);
+typedef int	(*xfs_send_mmap_t)(struct vm_area_struct *, uint);
+typedef int	(*xfs_send_destroy_t)(struct vnode *, dm_right_t);
+typedef int	(*xfs_send_namesp_t)(dm_eventtype_t, struct vfs *,
+			struct vnode *,
+			dm_right_t, struct vnode *, dm_right_t,
+			char *, char *, mode_t, int, int);
+typedef void	(*xfs_send_unmount_t)(struct vfs *, struct vnode *,
+			dm_right_t, mode_t, int, int);
+
+typedef struct xfs_dmops {
+	xfs_send_data_t		xfs_send_data;
+	xfs_send_mmap_t		xfs_send_mmap;
+	xfs_send_destroy_t	xfs_send_destroy;
+	xfs_send_namesp_t	xfs_send_namesp;
+	xfs_send_unmount_t	xfs_send_unmount;
+} xfs_dmops_t;
+
+#define XFS_SEND_DATA(mp, ev,vp,off,len,fl,lock) \
+	(*(mp)->m_dm_ops.xfs_send_data)(ev,vp,off,len,fl,lock)
+#define XFS_SEND_MMAP(mp, vma,fl) \
+	(*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl)
+#define XFS_SEND_DESTROY(mp, vp,right) \
+	(*(mp)->m_dm_ops.xfs_send_destroy)(vp,right)
+#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
+	(*(mp)->m_dm_ops.xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
+#define XFS_SEND_PREUNMOUNT(mp, vfs,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
+	(*(mp)->m_dm_ops.xfs_send_namesp)(DM_EVENT_PREUNMOUNT,vfs,b1,r1,b2,r2,n1,n2,mode,rval,fl)
+#define XFS_SEND_UNMOUNT(mp, vfsp,vp,right,mode,rval,fl) \
+	(*(mp)->m_dm_ops.xfs_send_unmount)(vfsp,vp,right,mode,rval,fl)
+
+
+/*
+ * Prototypes and functions for the Quota Management subsystem.
+ */
+
+struct xfs_dquot;
+struct xfs_dqtrxops;
+struct xfs_quotainfo;
+
+typedef int	(*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
+typedef int	(*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int);
+typedef int	(*xfs_qmunmount_t)(struct xfs_mount *);
+typedef void	(*xfs_qmdone_t)(struct xfs_mount *);
+typedef void	(*xfs_dqrele_t)(struct xfs_dquot *);
+typedef int	(*xfs_dqattach_t)(struct xfs_inode *, uint);
+typedef void	(*xfs_dqdetach_t)(struct xfs_inode *);
+typedef int	(*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
+typedef int	(*xfs_dqvopalloc_t)(struct xfs_mount *,
+			struct xfs_inode *, uid_t, gid_t, uint,
+			struct xfs_dquot **, struct xfs_dquot **);
+typedef void	(*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
+			struct xfs_dquot *, struct xfs_dquot *);
+typedef int	(*xfs_dqvoprename_t)(struct xfs_inode **);
+typedef struct xfs_dquot * (*xfs_dqvopchown_t)(
+			struct xfs_trans *, struct xfs_inode *,
+			struct xfs_dquot **, struct xfs_dquot *);
+typedef int	(*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
+			struct xfs_dquot *, struct xfs_dquot *, uint);
+
+typedef struct xfs_qmops {
+	xfs_qminit_t		xfs_qminit;
+	xfs_qmdone_t		xfs_qmdone;
+	xfs_qmmount_t		xfs_qmmount;
+	xfs_qmunmount_t		xfs_qmunmount;
+	xfs_dqrele_t		xfs_dqrele;
+	xfs_dqattach_t		xfs_dqattach;
+	xfs_dqdetach_t		xfs_dqdetach;
+	xfs_dqpurgeall_t	xfs_dqpurgeall;
+	xfs_dqvopalloc_t	xfs_dqvopalloc;
+	xfs_dqvopcreate_t	xfs_dqvopcreate;
+	xfs_dqvoprename_t	xfs_dqvoprename;
+	xfs_dqvopchown_t	xfs_dqvopchown;
+	xfs_dqvopchownresv_t	xfs_dqvopchownresv;
+	struct xfs_dqtrxops	*xfs_dqtrxops;
+} xfs_qmops_t;
+
+#define XFS_QM_INIT(mp, mnt, fl) \
+	(*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl)
+#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \
+	(*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl, mfsi_flags)
+#define XFS_QM_UNMOUNT(mp) \
+	(*(mp)->m_qm_ops.xfs_qmunmount)(mp)
+#define XFS_QM_DONE(mp) \
+	(*(mp)->m_qm_ops.xfs_qmdone)(mp)
+#define XFS_QM_DQRELE(mp, dq) \
+	(*(mp)->m_qm_ops.xfs_dqrele)(dq)
+#define XFS_QM_DQATTACH(mp, ip, fl) \
+	(*(mp)->m_qm_ops.xfs_dqattach)(ip, fl)
+#define XFS_QM_DQDETACH(mp, ip) \
+	(*(mp)->m_qm_ops.xfs_dqdetach)(ip)
+#define XFS_QM_DQPURGEALL(mp, fl) \
+	(*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl)
+#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \
+	(*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2)
+#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
+	(*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2)
+#define XFS_QM_DQVOPRENAME(mp, ip) \
+	(*(mp)->m_qm_ops.xfs_dqvoprename)(ip)
+#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \
+	(*(mp)->m_qm_ops.xfs_dqvopchown)(tp, ip, dqp, dq)
+#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \
+	(*(mp)->m_qm_ops.xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl)
+
+
+/*
+ * Prototypes and functions for I/O core modularization.
+ */
+
+typedef int		(*xfs_ioinit_t)(struct vfs *,
+				struct xfs_mount_args *, int);
+typedef int		(*xfs_bmapi_t)(struct xfs_trans *, void *,
+				xfs_fileoff_t, xfs_filblks_t, int,
+				xfs_fsblock_t *, xfs_extlen_t,
+				struct xfs_bmbt_irec *, int *,
+				struct xfs_bmap_free *);
+typedef int		(*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *);
+typedef int		(*xfs_iomap_write_direct_t)(
+				void *, loff_t, size_t, int,
+				struct xfs_bmbt_irec *, int *, int);
+typedef int		(*xfs_iomap_write_delay_t)(
+				void *, loff_t, size_t, int,
+				struct xfs_bmbt_irec *, int *);
+typedef int		(*xfs_iomap_write_allocate_t)(
+				void *, struct xfs_bmbt_irec *, int *);
+typedef int		(*xfs_iomap_write_unwritten_t)(
+				void *, loff_t, size_t);
+typedef uint		(*xfs_lck_map_shared_t)(void *);
+typedef void		(*xfs_lock_t)(void *, uint);
+typedef void		(*xfs_lock_demote_t)(void *, uint);
+typedef int		(*xfs_lock_nowait_t)(void *, uint);
+typedef void		(*xfs_unlk_t)(void *, unsigned int);
+typedef xfs_fsize_t	(*xfs_size_t)(void *);
+typedef xfs_fsize_t	(*xfs_iodone_t)(struct vfs *);
+
+typedef struct xfs_ioops {
+	xfs_ioinit_t			xfs_ioinit;
+	xfs_bmapi_t			xfs_bmapi_func;
+	xfs_bmap_eof_t			xfs_bmap_eof_func;
+	xfs_iomap_write_direct_t	xfs_iomap_write_direct;
+	xfs_iomap_write_delay_t		xfs_iomap_write_delay;
+	xfs_iomap_write_allocate_t	xfs_iomap_write_allocate;
+	xfs_iomap_write_unwritten_t	xfs_iomap_write_unwritten;
+	xfs_lock_t			xfs_ilock;
+	xfs_lck_map_shared_t		xfs_lck_map_shared;
+	xfs_lock_demote_t		xfs_ilock_demote;
+	xfs_lock_nowait_t		xfs_ilock_nowait;
+	xfs_unlk_t			xfs_unlock;
+	xfs_size_t			xfs_size_func;
+	xfs_iodone_t			xfs_iodone;
+} xfs_ioops_t;
+
+#define XFS_IOINIT(vfsp, args, flags) \
+	(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
+#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist)	\
+	(*(mp)->m_io_ops.xfs_bmapi_func) \
+		(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
+#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \
+	(*(mp)->m_io_ops.xfs_bmap_eof_func) \
+		((io)->io_obj, endoff, whichfork, eof)
+#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
+	(*(mp)->m_io_ops.xfs_iomap_write_direct) \
+		((io)->io_obj, offset, count, flags, mval, nmap, found)
+#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
+	(*(mp)->m_io_ops.xfs_iomap_write_delay) \
+		((io)->io_obj, offset, count, flags, mval, nmap)
+#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
+	(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
+		((io)->io_obj, mval, nmap)
+#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
+	(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
+		((io)->io_obj, offset, count)
+#define XFS_LCK_MAP_SHARED(mp, io) \
+	(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
+#define XFS_ILOCK(mp, io, mode) \
+	(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
+#define XFS_ILOCK_NOWAIT(mp, io, mode) \
+	(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
+#define XFS_IUNLOCK(mp, io, mode) \
+	(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
+#define XFS_ILOCK_DEMOTE(mp, io, mode) \
+	(*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode)
+#define XFS_SIZE(mp, io) \
+	(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
+#define XFS_IODONE(vfsp) \
+	(*(mp)->m_io_ops.xfs_iodone)(vfsp)
+
+
+typedef struct xfs_mount {
+	bhv_desc_t		m_bhv;		/* vfs xfs behavior */
+	xfs_tid_t		m_tid;		/* next unused tid for fs */
+	AIL_LOCK_T		m_ail_lock;	/* fs AIL mutex */
+	xfs_ail_entry_t		m_ail;		/* fs active log item list */
+	uint			m_ail_gen;	/* fs AIL generation count */
+	xfs_sb_t		m_sb;		/* copy of fs superblock */
+	lock_t			m_sb_lock;	/* sb counter mutex */
+	struct xfs_buf		*m_sb_bp;	/* buffer for superblock */
+	char			*m_fsname;	/* filesystem name */
+	int			m_fsname_len;	/* strlen of fs name */
+	int			m_bsize;	/* fs logical block size */
+	xfs_agnumber_t		m_agfrotor;	/* last ag where space found */
+	xfs_agnumber_t		m_agirotor;	/* last ag dir inode alloced */
+	lock_t			m_agirotor_lock;/* .. and lock protecting it */
+	xfs_agnumber_t		m_maxagi;	/* highest inode alloc group */
+	uint			m_ihsize;	/* size of next field */
+	struct xfs_ihash	*m_ihash;	/* fs private inode hash table*/
+	struct xfs_inode	*m_inodes;	/* active inode list */
+	struct list_head	m_del_inodes;	/* inodes to reclaim */
+	mutex_t			m_ilock;	/* inode list mutex */
+	uint			m_ireclaims;	/* count of calls to reclaim*/
+	uint			m_readio_log;	/* min read size log bytes */
+	uint			m_readio_blocks; /* min read size blocks */
+	uint			m_writeio_log;	/* min write size log bytes */
+	uint			m_writeio_blocks; /* min write size blocks */
+	struct log		*m_log;		/* log specific stuff */
+	int			m_logbufs;	/* number of log buffers */
+	int			m_logbsize;	/* size of each log buffer */
+	uint			m_rsumlevels;	/* rt summary levels */
+	uint			m_rsumsize;	/* size of rt summary, bytes */
+	struct xfs_inode	*m_rbmip;	/* pointer to bitmap inode */
+	struct xfs_inode	*m_rsumip;	/* pointer to summary inode */
+	struct xfs_inode	*m_rootip;	/* pointer to root directory */
+	struct xfs_quotainfo	*m_quotainfo;	/* disk quota information */
+	xfs_buftarg_t		*m_ddev_targp;	/* saves taking the address */
+	xfs_buftarg_t		*m_logdev_targp;/* ptr to log device */
+	xfs_buftarg_t		*m_rtdev_targp;	/* ptr to rt device */
+#define m_dev		m_ddev_targp->pbr_dev
+	__uint8_t		m_dircook_elog;	/* log d-cookie entry bits */
+	__uint8_t		m_blkbit_log;	/* blocklog + NBBY */
+	__uint8_t		m_blkbb_log;	/* blocklog - BBSHIFT */
+	__uint8_t		m_agno_log;	/* log #ag's */
+	__uint8_t		m_agino_log;	/* #bits for agino in inum */
+	__uint8_t		m_nreadaheads;	/* #readahead buffers */
+	__uint16_t		m_inode_cluster_size;/* min inode buf size */
+	uint			m_blockmask;	/* sb_blocksize-1 */
+	uint			m_blockwsize;	/* sb_blocksize in words */
+	uint			m_blockwmask;	/* blockwsize-1 */
+	uint			m_alloc_mxr[2];	/* XFS_ALLOC_BLOCK_MAXRECS */
+	uint			m_alloc_mnr[2];	/* XFS_ALLOC_BLOCK_MINRECS */
+	uint			m_bmap_dmxr[2];	/* XFS_BMAP_BLOCK_DMAXRECS */
+	uint			m_bmap_dmnr[2];	/* XFS_BMAP_BLOCK_DMINRECS */
+	uint			m_inobt_mxr[2];	/* XFS_INOBT_BLOCK_MAXRECS */
+	uint			m_inobt_mnr[2];	/* XFS_INOBT_BLOCK_MINRECS */
+	uint			m_ag_maxlevels;	/* XFS_AG_MAXLEVELS */
+	uint			m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
+	uint			m_in_maxlevels;	/* XFS_IN_MAXLEVELS */
+	struct xfs_perag	*m_perag;	/* per-ag accounting info */
+	struct rw_semaphore	m_peraglock;	/* lock for m_perag (pointer) */
+	sema_t			m_growlock;	/* growfs mutex */
+	int			m_fixedfsid[2];	/* unchanged for life of FS */
+	uint			m_dmevmask;	/* DMI events for this FS */
+	uint			m_flags;	/* global mount flags */
+	uint			m_attroffset;	/* inode attribute offset */
+	uint			m_dir_node_ents; /* #entries in a dir danode */
+	uint			m_attr_node_ents; /* #entries in attr danode */
+	int			m_ialloc_inos;	/* inodes in inode allocation */
+	int			m_ialloc_blks;	/* blocks in inode allocation */
+	int			m_litino;	/* size of inode union area */
+	int			m_inoalign_mask;/* mask sb_inoalignmt if used */
+	uint			m_qflags;	/* quota status flags */
+	xfs_trans_reservations_t m_reservations;/* precomputed res values */
+	__uint64_t		m_maxicount;	/* maximum inode count */
+	__uint64_t		m_maxioffset;	/* maximum inode offset */
+	__uint64_t		m_resblks;	/* total reserved blocks */
+	__uint64_t		m_resblks_avail;/* available reserved blocks */
+#if XFS_BIG_INUMS
+	xfs_ino_t		m_inoadd;	/* add value for ino64_offset */
+#endif
+	int			m_dalign;	/* stripe unit */
+	int			m_swidth;	/* stripe width */
+	int			m_sinoalign;	/* stripe unit inode alignmnt */
+	int			m_attr_magicpct;/* 37% of the blocksize */
+	int			m_dir_magicpct;	/* 37% of the dir blocksize */
+	__uint8_t		m_mk_sharedro;	/* mark shared ro on unmount */
+	__uint8_t		m_inode_quiesce;/* call quiesce on new inodes.
+						   field governed by m_ilock */
+	__uint8_t		m_sectbb_log;	/* sectlog - BBSHIFT */
+	__uint8_t		m_dirversion;	/* 1 or 2 */
+	xfs_dirops_t		m_dirops;	/* table of dir funcs */
+	int			m_dirblksize;	/* directory block sz--bytes */
+	int			m_dirblkfsbs;	/* directory block sz--fsbs */
+	xfs_dablk_t		m_dirdatablk;	/* blockno of dir data v2 */
+	xfs_dablk_t		m_dirleafblk;	/* blockno of dir non-data v2 */
+	xfs_dablk_t		m_dirfreeblk;	/* blockno of dirfreeindex v2 */
+	uint			m_chsize;	/* size of next field */
+	struct xfs_chash	*m_chash;	/* fs private inode per-cluster
+						 * hash table */
+	struct xfs_dmops	m_dm_ops;	/* vector of DMI ops */
+	struct xfs_qmops	m_qm_ops;	/* vector of XQM ops */
+	struct xfs_ioops	m_io_ops;	/* vector of I/O ops */
+	atomic_t		m_active_trans;	/* number trans frozen */
+} xfs_mount_t;
+
+/*
+ * Flags for m_flags.
+ */
+#define	XFS_MOUNT_WSYNC		0x00000001	/* for nfs - all metadata ops
+						   must be synchronous except
+						   for space allocations */
+#define	XFS_MOUNT_INO64		0x00000002
+			     /* 0x00000004	-- currently unused */
+			     /* 0x00000008	-- currently unused */
+#define XFS_MOUNT_FS_SHUTDOWN	0x00000010	/* atomic stop of all filesystem
+						   operations, typically for
+						   disk errors in metadata */
+#define XFS_MOUNT_NOATIME	0x00000020	/* don't modify inode access
+						   times on reads */
+#define XFS_MOUNT_RETERR	0x00000040      /* return alignment errors to
+						   user */
+#define XFS_MOUNT_NOALIGN	0x00000080	/* turn off stripe alignment
+						   allocations */
+			     /* 0x00000100	-- currently unused */
+			     /*	0x00000200	-- currently unused */
+#define XFS_MOUNT_NORECOVERY	0x00000400	/* no recovery - dirty fs */
+#define XFS_MOUNT_SHARED	0x00000800	/* shared mount */
+#define XFS_MOUNT_DFLT_IOSIZE	0x00001000	/* set default i/o size */
+#define XFS_MOUNT_OSYNCISOSYNC	0x00002000	/* o_sync is REALLY o_sync */
+						/* osyncisdsync is now default*/
+#define XFS_MOUNT_32BITINODES	0x00004000	/* do not create inodes above
+						 * 32 bits in size */
+#define XFS_MOUNT_32BITINOOPT	0x00008000	/* saved mount option state */
+#define XFS_MOUNT_NOUUID	0x00010000	/* ignore uuid during mount */
+#define XFS_MOUNT_NOLOGFLUSH	0x00020000
+#define XFS_MOUNT_IDELETE	0x00040000	/* delete empty inode clusters*/
+#define XFS_MOUNT_SWALLOC	0x00080000	/* turn on stripe width
+						 * allocation */
+#define XFS_MOUNT_IHASHSIZE	0x00100000	/* inode hash table size */
+#define XFS_MOUNT_DIRSYNC	0x00200000	/* synchronous directory ops */
+
+/*
+ * Default minimum read and write sizes.
+ */
+#define XFS_READIO_LOG_LARGE	16
+#define XFS_WRITEIO_LOG_LARGE	16
+
+/*
+ * Max and min values for UIO and mount-option defined I/O sizes;
+ * min value can't be less than a page.  Currently unused.
+ */
+#define XFS_MAX_IO_LOG		16	/* 64K */
+#define XFS_MIN_IO_LOG		PAGE_SHIFT
+
+/*
+ * Synchronous read and write sizes.  This should be
+ * better for NFSv2 wsync filesystems.
+ */
+#define	XFS_WSYNC_READIO_LOG	15	/* 32K */
+#define	XFS_WSYNC_WRITEIO_LOG	14	/* 16K */
+
+#define XFS_MAXIOFFSET(mp)	((mp)->m_maxioffset)
+
+#define XFS_FORCED_SHUTDOWN(mp)	((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
+#define xfs_force_shutdown(m,f)	\
+	VFS_FORCE_SHUTDOWN((XFS_MTOVFS(m)), f, __FILE__, __LINE__)
+
+/*
+ * Flags sent to xfs_force_shutdown.
+ */
+#define XFS_METADATA_IO_ERROR	0x1
+#define XFS_LOG_IO_ERROR	0x2
+#define XFS_FORCE_UMOUNT	0x4
+#define XFS_CORRUPT_INCORE	0x8	/* Corrupt in-memory data structures */
+#define XFS_SHUTDOWN_REMOTE_REQ 0x10	/* Shutdown came from remote cell */
+
+/*
+ * xflags for xfs_syncsub
+ */
+#define XFS_XSYNC_RELOC		0x01
+
+/*
+ * Flags for xfs_mountfs
+ */
+#define XFS_MFSI_SECOND		0x01	/* Secondary mount -- skip stuff */
+#define XFS_MFSI_CLIENT		0x02	/* Is a client -- skip lots of stuff */
+#define XFS_MFSI_NOUNLINK	0x08	/* Skip unlinked inode processing in */
+					/* log recovery */
+#define XFS_MFSI_NO_QUOTACHECK	0x10	/* Skip quotacheck processing */
+
+/*
+ * Macros for getting from mount to vfs and back.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MTOVFS)
+struct vfs *xfs_mtovfs(xfs_mount_t *mp);
+#define	XFS_MTOVFS(mp)		xfs_mtovfs(mp)
+#else
+#define	XFS_MTOVFS(mp)		(bhvtovfs(&(mp)->m_bhv))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOM)
+xfs_mount_t *xfs_bhvtom(bhv_desc_t *bdp);
+#define	XFS_BHVTOM(bdp)	xfs_bhvtom(bdp)
+#else
+#define XFS_BHVTOM(bdp)		((xfs_mount_t *)BHV_PDATA(bdp))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_VFSTOM)
+xfs_mount_t *xfs_vfstom(vfs_t *vfs);
+#define XFS_VFSTOM(vfs) xfs_vfstom(vfs)
+#else
+#define XFS_VFSTOM(vfs)		\
+	(XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfs), &xfs_vfsops)))
+#endif
+
+
+/*
+ * Moved here from xfs_ag.h to avoid reordering header files
+ */
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGNO)
+xfs_agnumber_t xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d);
+#define XFS_DADDR_TO_AGNO(mp,d)         xfs_daddr_to_agno(mp,d)
+#else
+
+static inline xfs_agnumber_t XFS_DADDR_TO_AGNO(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	d = XFS_BB_TO_FSBT(mp, d);
+	do_div(d, mp->m_sb.sb_agblocks);
+	return (xfs_agnumber_t) d;
+}
+
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGBNO)
+xfs_agblock_t xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d);
+#define XFS_DADDR_TO_AGBNO(mp,d)        xfs_daddr_to_agbno(mp,d)
+#else
+
+static inline xfs_agblock_t XFS_DADDR_TO_AGBNO(xfs_mount_t *mp, xfs_daddr_t d)
+{
+	d = XFS_BB_TO_FSBT(mp, d);
+	return (xfs_agblock_t) do_div(d, mp->m_sb.sb_agblocks);
+}
+
+#endif
+
+/*
+ * This structure is for use by the xfs_mod_incore_sb_batch() routine.
+ */
+typedef struct xfs_mod_sb {
+	xfs_sb_field_t	msb_field;	/* Field to modify, see below */
+	int		msb_delta;	/* Change to make to specified field */
+} xfs_mod_sb_t;
+
+#define	XFS_MOUNT_ILOCK(mp)	mutex_lock(&((mp)->m_ilock), PINOD)
+#define	XFS_MOUNT_IUNLOCK(mp)	mutex_unlock(&((mp)->m_ilock))
+#define	XFS_SB_LOCK(mp)		mutex_spinlock(&(mp)->m_sb_lock)
+#define	XFS_SB_UNLOCK(mp,s)	mutex_spinunlock(&(mp)->m_sb_lock,(s))
+
+extern xfs_mount_t *xfs_mount_init(void);
+extern void	xfs_mod_sb(xfs_trans_t *, __int64_t);
+extern void	xfs_mount_free(xfs_mount_t *mp, int remove_bhv);
+extern int	xfs_mountfs(struct vfs *, xfs_mount_t *mp, int);
+
+extern int	xfs_unmountfs(xfs_mount_t *, struct cred *);
+extern void	xfs_unmountfs_wait(xfs_mount_t *);
+extern void	xfs_unmountfs_close(xfs_mount_t *, struct cred *);
+extern int	xfs_unmountfs_writesb(xfs_mount_t *);
+extern int	xfs_unmount_flush(xfs_mount_t *, int);
+extern int	xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int);
+extern int	xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
+			uint, int);
+extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
+extern int	xfs_readsb(xfs_mount_t *mp);
+extern void	xfs_freesb(xfs_mount_t *);
+extern void	xfs_do_force_shutdown(bhv_desc_t *, int, char *, int);
+extern int	xfs_syncsub(xfs_mount_t *, int, int, int *);
+extern xfs_agnumber_t	xfs_initialize_perag(xfs_mount_t *, xfs_agnumber_t);
+extern void	xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t);
+
+extern struct vfsops xfs_vfsops;
+extern struct vnodeops xfs_vnodeops;
+
+extern struct xfs_dmops xfs_dmcore_stub;
+extern struct xfs_qmops xfs_qmcore_stub;
+extern struct xfs_ioops xfs_iocore_xfs;
+
+extern int	xfs_init(void);
+extern void	xfs_cleanup(void);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
new file mode 100644
index 0000000..4f40c92
--- /dev/null
+++ b/fs/xfs/xfs_qmops.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.	 Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "xfs.h"
+
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+
+
+STATIC struct xfs_dquot *
+xfs_dqvopchown_default(
+	struct xfs_trans	*tp,
+	struct xfs_inode	*ip,
+	struct xfs_dquot	**dqp,
+	struct xfs_dquot	*dq)
+{
+	return NULL;
+}
+
+xfs_qmops_t	xfs_qmcore_stub = {
+	.xfs_qminit		= (xfs_qminit_t) fs_noerr,
+	.xfs_qmdone		= (xfs_qmdone_t) fs_noerr,
+	.xfs_qmmount		= (xfs_qmmount_t) fs_noerr,
+	.xfs_qmunmount		= (xfs_qmunmount_t) fs_noerr,
+	.xfs_dqrele		= (xfs_dqrele_t) fs_noerr,
+	.xfs_dqattach		= (xfs_dqattach_t) fs_noerr,
+	.xfs_dqdetach		= (xfs_dqdetach_t) fs_noerr,
+	.xfs_dqpurgeall		= (xfs_dqpurgeall_t) fs_noerr,
+	.xfs_dqvopalloc		= (xfs_dqvopalloc_t) fs_noerr,
+	.xfs_dqvopcreate	= (xfs_dqvopcreate_t) fs_noerr,
+	.xfs_dqvoprename	= (xfs_dqvoprename_t) fs_noerr,
+	.xfs_dqvopchown		= xfs_dqvopchown_default,
+	.xfs_dqvopchownresv	= (xfs_dqvopchownresv_t) fs_noerr,
+};
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
new file mode 100644
index 0000000..703ec4e
--- /dev/null
+++ b/fs/xfs/xfs_quota.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_QUOTA_H__
+#define __XFS_QUOTA_H__
+
+/*
+ * The ondisk form of a dquot structure.
+ */
+#define XFS_DQUOT_MAGIC		0x4451		/* 'DQ' */
+#define XFS_DQUOT_VERSION	(u_int8_t)0x01	/* latest version number */
+
+/*
+ * uid_t and gid_t are hard-coded to 32 bits in the inode.
+ * Hence, an 'id' in a dquot is 32 bits..
+ */
+typedef __int32_t	xfs_dqid_t;
+
+/*
+ * Eventhough users may not have quota limits occupying all 64-bits,
+ * they may need 64-bit accounting. Hence, 64-bit quota-counters,
+ * and quota-limits. This is a waste in the common case, but hey ...
+ */
+typedef __uint64_t	xfs_qcnt_t;
+typedef __uint16_t	xfs_qwarncnt_t;
+
+/*
+ * This is the main portion of the on-disk representation of quota
+ * information for a user. This is the q_core of the xfs_dquot_t that
+ * is kept in kernel memory. We pad this with some more expansion room
+ * to construct the on disk structure.
+ */
+typedef struct	xfs_disk_dquot {
+/*16*/	u_int16_t	d_magic;	/* dquot magic = XFS_DQUOT_MAGIC */
+/*8 */	u_int8_t	d_version;	/* dquot version */
+/*8 */	u_int8_t	d_flags;	/* XFS_DQ_USER/PROJ/GROUP */
+/*32*/	xfs_dqid_t	d_id;		/* user,project,group id */
+/*64*/	xfs_qcnt_t	d_blk_hardlimit;/* absolute limit on disk blks */
+/*64*/	xfs_qcnt_t	d_blk_softlimit;/* preferred limit on disk blks */
+/*64*/	xfs_qcnt_t	d_ino_hardlimit;/* maximum # allocated inodes */
+/*64*/	xfs_qcnt_t	d_ino_softlimit;/* preferred inode limit */
+/*64*/	xfs_qcnt_t	d_bcount;	/* disk blocks owned by the user */
+/*64*/	xfs_qcnt_t	d_icount;	/* inodes owned by the user */
+/*32*/	__int32_t	d_itimer;	/* zero if within inode limits if not,
+					   this is when we refuse service */
+/*32*/	__int32_t	d_btimer;	/* similar to above; for disk blocks */
+/*16*/	xfs_qwarncnt_t	d_iwarns;	/* warnings issued wrt num inodes */
+/*16*/	xfs_qwarncnt_t	d_bwarns;	/* warnings issued wrt disk blocks */
+/*32*/	__int32_t	d_pad0;		/* 64 bit align */
+/*64*/	xfs_qcnt_t	d_rtb_hardlimit;/* absolute limit on realtime blks */
+/*64*/	xfs_qcnt_t	d_rtb_softlimit;/* preferred limit on RT disk blks */
+/*64*/	xfs_qcnt_t	d_rtbcount;	/* realtime blocks owned */
+/*32*/	__int32_t	d_rtbtimer;	/* similar to above; for RT disk blocks */
+/*16*/	xfs_qwarncnt_t	d_rtbwarns;	/* warnings issued wrt RT disk blocks */
+/*16*/	__uint16_t	d_pad;
+} xfs_disk_dquot_t;
+
+/*
+ * This is what goes on disk. This is separated from the xfs_disk_dquot because
+ * carrying the unnecessary padding would be a waste of memory.
+ */
+typedef struct xfs_dqblk {
+	xfs_disk_dquot_t  dd_diskdq;	/* portion that lives incore as well */
+	char		  dd_fill[32];	/* filling for posterity */
+} xfs_dqblk_t;
+
+/*
+ * flags for q_flags field in the dquot.
+ */
+#define XFS_DQ_USER		0x0001		/* a user quota */
+/* #define XFS_DQ_PROJ		0x0002		-- project quota (IRIX) */
+#define XFS_DQ_GROUP		0x0004		/* a group quota */
+#define XFS_DQ_FLOCKED		0x0008		/* flush lock taken */
+#define XFS_DQ_DIRTY		0x0010		/* dquot is dirty */
+#define XFS_DQ_WANT		0x0020		/* for lookup/reclaim race */
+#define XFS_DQ_INACTIVE		0x0040		/* dq off mplist & hashlist */
+#define XFS_DQ_MARKER		0x0080		/* sentinel */
+
+/*
+ * In the worst case, when both user and group quotas are on,
+ * we can have a max of three dquots changing in a single transaction.
+ */
+#define XFS_DQUOT_LOGRES(mp)	(sizeof(xfs_disk_dquot_t) * 3)
+
+
+/*
+ * These are the structures used to lay out dquots and quotaoff
+ * records on the log. Quite similar to those of inodes.
+ */
+
+/*
+ * log format struct for dquots.
+ * The first two fields must be the type and size fitting into
+ * 32 bits : log_recovery code assumes that.
+ */
+typedef struct xfs_dq_logformat {
+	__uint16_t		qlf_type;      /* dquot log item type */
+	__uint16_t		qlf_size;      /* size of this item */
+	xfs_dqid_t		qlf_id;	       /* usr/grp id number : 32 bits */
+	__int64_t		qlf_blkno;     /* blkno of dquot buffer */
+	__int32_t		qlf_len;       /* len of dquot buffer */
+	__uint32_t		qlf_boffset;   /* off of dquot in buffer */
+} xfs_dq_logformat_t;
+
+/*
+ * log format struct for QUOTAOFF records.
+ * The first two fields must be the type and size fitting into
+ * 32 bits : log_recovery code assumes that.
+ * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
+ * to the first and ensures that the first logitem is taken out of the AIL
+ * only when the last one is securely committed.
+ */
+typedef struct xfs_qoff_logformat {
+	unsigned short		qf_type;	/* quotaoff log item type */
+	unsigned short		qf_size;	/* size of this item */
+	unsigned int		qf_flags;	/* USR and/or GRP */
+	char			qf_pad[12];	/* padding for future */
+} xfs_qoff_logformat_t;
+
+
+/*
+ * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
+ */
+#define XFS_UQUOTA_ACCT	0x0001  /* user quota accounting ON */
+#define XFS_UQUOTA_ENFD	0x0002  /* user quota limits enforced */
+#define XFS_UQUOTA_CHKD	0x0004  /* quotacheck run on usr quotas */
+#define XFS_PQUOTA_ACCT	0x0008  /* (IRIX) project quota accounting ON */
+#define XFS_GQUOTA_ENFD	0x0010  /* group quota limits enforced */
+#define XFS_GQUOTA_CHKD	0x0020  /* quotacheck run on grp quotas */
+#define XFS_GQUOTA_ACCT	0x0040  /* group quota accounting ON */
+
+/*
+ * Incore only flags for quotaoff - these bits get cleared when quota(s)
+ * are in the process of getting turned off. These flags are in m_qflags but
+ * never in sb_qflags.
+ */
+#define XFS_UQUOTA_ACTIVE	0x0080  /* uquotas are being turned off */
+#define XFS_GQUOTA_ACTIVE	0x0100  /* gquotas are being turned off */
+
+/*
+ * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees
+ * quota will be not be switched off as long as that inode lock is held.
+ */
+#define XFS_IS_QUOTA_ON(mp)	((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \
+						   XFS_GQUOTA_ACTIVE))
+#define XFS_IS_UQUOTA_ON(mp)	((mp)->m_qflags & XFS_UQUOTA_ACTIVE)
+#define XFS_IS_GQUOTA_ON(mp)	((mp)->m_qflags & XFS_GQUOTA_ACTIVE)
+
+/*
+ * Flags to tell various functions what to do. Not all of these are meaningful
+ * to a single function. None of these XFS_QMOPT_* flags are meant to have
+ * persistent values (ie. their values can and will change between versions)
+ */
+#define XFS_QMOPT_DQLOCK	0x0000001 /* dqlock */
+#define XFS_QMOPT_DQALLOC	0x0000002 /* alloc dquot ondisk if needed */
+#define XFS_QMOPT_UQUOTA	0x0000004 /* user dquot requested */
+#define XFS_QMOPT_GQUOTA	0x0000008 /* group dquot requested */
+#define XFS_QMOPT_FORCE_RES	0x0000010 /* ignore quota limits */
+#define XFS_QMOPT_DQSUSER	0x0000020 /* don't cache super users dquot */
+#define XFS_QMOPT_SBVERSION	0x0000040 /* change superblock version num */
+#define XFS_QMOPT_QUOTAOFF	0x0000080 /* quotas are being turned off */
+#define XFS_QMOPT_UMOUNTING	0x0000100 /* filesys is being unmounted */
+#define XFS_QMOPT_DOLOG		0x0000200 /* log buf changes (in quotacheck) */
+#define XFS_QMOPT_DOWARN        0x0000400 /* increase warning cnt if necessary */
+#define XFS_QMOPT_ILOCKED	0x0000800 /* inode is already locked (excl) */
+#define XFS_QMOPT_DQREPAIR	0x0001000 /* repair dquot, if damaged. */
+
+/*
+ * flags to xfs_trans_mod_dquot to indicate which field needs to be
+ * modified.
+ */
+#define XFS_QMOPT_RES_REGBLKS	0x0010000
+#define XFS_QMOPT_RES_RTBLKS	0x0020000
+#define XFS_QMOPT_BCOUNT	0x0040000
+#define XFS_QMOPT_ICOUNT	0x0080000
+#define XFS_QMOPT_RTBCOUNT	0x0100000
+#define XFS_QMOPT_DELBCOUNT	0x0200000
+#define XFS_QMOPT_DELRTBCOUNT	0x0400000
+#define XFS_QMOPT_RES_INOS	0x0800000
+
+/*
+ * flags for dqflush and dqflush_all.
+ */
+#define XFS_QMOPT_SYNC		0x1000000
+#define XFS_QMOPT_ASYNC		0x2000000
+#define XFS_QMOPT_DELWRI	0x4000000
+
+/*
+ * flags for dqalloc.
+ */
+#define XFS_QMOPT_INHERIT	0x8000000
+
+/*
+ * flags to xfs_trans_mod_dquot.
+ */
+#define XFS_TRANS_DQ_RES_BLKS	XFS_QMOPT_RES_REGBLKS
+#define XFS_TRANS_DQ_RES_RTBLKS	XFS_QMOPT_RES_RTBLKS
+#define XFS_TRANS_DQ_RES_INOS	XFS_QMOPT_RES_INOS
+#define XFS_TRANS_DQ_BCOUNT	XFS_QMOPT_BCOUNT
+#define XFS_TRANS_DQ_DELBCOUNT	XFS_QMOPT_DELBCOUNT
+#define XFS_TRANS_DQ_ICOUNT	XFS_QMOPT_ICOUNT
+#define XFS_TRANS_DQ_RTBCOUNT	XFS_QMOPT_RTBCOUNT
+#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT
+
+
+#define XFS_QMOPT_QUOTALL	(XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA)
+#define XFS_QMOPT_RESBLK_MASK	(XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+
+#ifdef __KERNEL__
+/*
+ * This check is done typically without holding the inode lock;
+ * that may seem racey, but it is harmless in the context that it is used.
+ * The inode cannot go inactive as long a reference is kept, and
+ * therefore if dquot(s) were attached, they'll stay consistent.
+ * If, for example, the ownership of the inode changes while
+ * we didn't have the inode locked, the appropriate dquot(s) will be
+ * attached atomically.
+ */
+#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\
+				     (ip)->i_udquot == NULL) || \
+				    (XFS_IS_GQUOTA_ON(mp) && \
+				     (ip)->i_gdquot == NULL))
+
+#define XFS_QM_NEED_QUOTACHECK(mp) ((XFS_IS_UQUOTA_ON(mp) && \
+				     (mp->m_sb.sb_qflags & \
+				      XFS_UQUOTA_CHKD) == 0) || \
+				    (XFS_IS_GQUOTA_ON(mp) && \
+				     (mp->m_sb.sb_qflags & \
+				      XFS_GQUOTA_CHKD) == 0))
+
+#define XFS_MOUNT_QUOTA_ALL	(XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
+				 XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\
+				 XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD)
+#define XFS_MOUNT_QUOTA_MASK	(XFS_MOUNT_QUOTA_ALL | XFS_UQUOTA_ACTIVE | \
+				 XFS_GQUOTA_ACTIVE)
+
+
+/*
+ * The structure kept inside the xfs_trans_t keep track of dquot changes
+ * within a transaction and apply them later.
+ */
+typedef struct xfs_dqtrx {
+	struct xfs_dquot *qt_dquot;	  /* the dquot this refers to */
+	ulong		qt_blk_res;	  /* blks reserved on a dquot */
+	ulong		qt_blk_res_used;  /* blks used from the reservation */
+	ulong		qt_ino_res;	  /* inode reserved on a dquot */
+	ulong		qt_ino_res_used;  /* inodes used from the reservation */
+	long		qt_bcount_delta;  /* dquot blk count changes */
+	long		qt_delbcnt_delta; /* delayed dquot blk count changes */
+	long		qt_icount_delta;  /* dquot inode count changes */
+	ulong		qt_rtblk_res;	  /* # blks reserved on a dquot */
+	ulong		qt_rtblk_res_used;/* # blks used from reservation */
+	long		qt_rtbcount_delta;/* dquot realtime blk changes */
+	long		qt_delrtb_delta;  /* delayed RT blk count changes */
+} xfs_dqtrx_t;
+
+/*
+ * Dquot transaction functions, used if quota is enabled.
+ */
+typedef void	(*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *);
+typedef void	(*qo_mod_dquot_byino_t)(struct xfs_trans *,
+				struct xfs_inode *, uint, long);
+typedef void	(*qo_free_dqinfo_t)(struct xfs_trans *);
+typedef void	(*qo_apply_dquot_deltas_t)(struct xfs_trans *);
+typedef void	(*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *);
+typedef int	(*qo_reserve_quota_nblks_t)(
+				struct xfs_trans *, struct xfs_mount *,
+				struct xfs_inode *, long, long, uint);
+typedef int	(*qo_reserve_quota_bydquots_t)(
+				struct xfs_trans *, struct xfs_mount *,
+				struct xfs_dquot *, struct xfs_dquot *,
+				long, long, uint);
+typedef struct xfs_dqtrxops {
+	qo_dup_dqinfo_t			qo_dup_dqinfo;
+	qo_free_dqinfo_t		qo_free_dqinfo;
+	qo_mod_dquot_byino_t		qo_mod_dquot_byino;
+	qo_apply_dquot_deltas_t		qo_apply_dquot_deltas;
+	qo_reserve_quota_nblks_t	qo_reserve_quota_nblks;
+	qo_reserve_quota_bydquots_t	qo_reserve_quota_bydquots;
+	qo_unreserve_and_mod_dquots_t	qo_unreserve_and_mod_dquots;
+} xfs_dqtrxops_t;
+
+#define XFS_DQTRXOP(mp, tp, op, args...) \
+		((mp)->m_qm_ops.xfs_dqtrxops ? \
+		((mp)->m_qm_ops.xfs_dqtrxops->op)(tp, ## args) : 0)
+
+#define XFS_DQTRXOP_VOID(mp, tp, op, args...) \
+		((mp)->m_qm_ops.xfs_dqtrxops ? \
+		((mp)->m_qm_ops.xfs_dqtrxops->op)(tp, ## args) : (void)0)
+
+#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \
+	XFS_DQTRXOP_VOID(mp, otp, qo_dup_dqinfo, ntp)
+#define XFS_TRANS_FREE_DQINFO(mp, tp) \
+	XFS_DQTRXOP_VOID(mp, tp, qo_free_dqinfo)
+#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \
+	XFS_DQTRXOP_VOID(mp, tp, qo_mod_dquot_byino, ip, field, delta)
+#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \
+	XFS_DQTRXOP_VOID(mp, tp, qo_apply_dquot_deltas)
+#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \
+	XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl)
+#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \
+	XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl)
+#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \
+	XFS_DQTRXOP_VOID(mp, tp, qo_unreserve_and_mod_dquots)
+
+#define XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, nblks) \
+	XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
+				XFS_QMOPT_RES_REGBLKS)
+#define XFS_TRANS_RESERVE_BLKQUOTA_FORCE(mp, tp, ip, nblks) \
+	XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
+				XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES)
+#define XFS_TRANS_UNRESERVE_BLKQUOTA(mp, tp, ip, nblks) \
+	XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), 0, \
+				XFS_QMOPT_RES_REGBLKS)
+#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
+	XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \
+				f | XFS_QMOPT_RES_REGBLKS)
+#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
+	XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \
+				f | XFS_QMOPT_RES_REGBLKS)
+
+extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
+
+extern struct bhv_vfsops xfs_qmops;
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_refcache.h b/fs/xfs/xfs_refcache.h
new file mode 100644
index 0000000..cd8ddfd
--- /dev/null
+++ b/fs/xfs/xfs_refcache.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_REFCACHE_H__
+#define __XFS_REFCACHE_H__
+
+#ifdef HAVE_REFCACHE
+/*
+ * Maximum size (in inodes) for the NFS reference cache
+ */
+#define XFS_REFCACHE_SIZE_MAX	512
+
+struct xfs_inode;
+struct xfs_mount;
+
+extern void xfs_refcache_insert(struct xfs_inode *);
+extern void xfs_refcache_purge_ip(struct xfs_inode *);
+extern void xfs_refcache_purge_mp(struct xfs_mount *);
+extern void xfs_refcache_purge_some(struct xfs_mount *);
+extern void xfs_refcache_resize(int);
+extern void xfs_refcache_destroy(void);
+
+extern void xfs_refcache_iunlock(struct xfs_inode *, uint);
+
+#else
+
+#define xfs_refcache_insert(ip)		do { } while (0)
+#define xfs_refcache_purge_ip(ip)	do { } while (0)
+#define xfs_refcache_purge_mp(mp)	do { } while (0)
+#define xfs_refcache_purge_some(mp)	do { } while (0)
+#define xfs_refcache_resize(size)	do { } while (0)
+#define xfs_refcache_destroy()		do { } while (0)
+
+#define xfs_refcache_iunlock(ip, flags)	xfs_iunlock(ip, flags)
+
+#endif
+
+#endif	/* __XFS_REFCACHE_H__ */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
new file mode 100644
index 0000000..cb13f9a
--- /dev/null
+++ b/fs/xfs/xfs_rename.c
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_refcache.h"
+#include "xfs_utils.h"
+#include "xfs_trans_space.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir_leaf.h"
+
+
+/*
+ * Given an array of up to 4 inode pointers, unlock the pointed to inodes.
+ * If there are fewer than 4 entries in the array, the empty entries will
+ * be at the end and will have NULL pointers in them.
+ */
+STATIC void
+xfs_rename_unlock4(
+	xfs_inode_t	**i_tab,
+	uint		lock_mode)
+{
+	int	i;
+
+	xfs_iunlock(i_tab[0], lock_mode);
+	for (i = 1; i < 4; i++) {
+		if (i_tab[i] == NULL) {
+			break;
+		}
+		/*
+		 * Watch out for duplicate entries in the table.
+		 */
+		if (i_tab[i] != i_tab[i-1]) {
+			xfs_iunlock(i_tab[i], lock_mode);
+		}
+	}
+}
+
+#ifdef DEBUG
+int xfs_rename_skip, xfs_rename_nskip;
+#endif
+
+/*
+ * The following routine will acquire the locks required for a rename
+ * operation. The code understands the semantics of renames and will
+ * validate that name1 exists under dp1 & that name2 may or may not
+ * exist under dp2.
+ *
+ * We are renaming dp1/name1 to dp2/name2.
+ *
+ * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success.
+ */
+STATIC int
+xfs_lock_for_rename(
+	xfs_inode_t	*dp1,	/* old (source) directory inode */
+	xfs_inode_t	*dp2,	/* new (target) directory inode */
+	vname_t		*vname1,/* old entry name */
+	vname_t		*vname2,/* new entry name */
+	xfs_inode_t	**ipp1,	/* inode of old entry */
+	xfs_inode_t	**ipp2,	/* inode of new entry, if it
+				   already exists, NULL otherwise. */
+	xfs_inode_t	**i_tab,/* array of inode returned, sorted */
+	int		*num_inodes)  /* number of inodes in array */
+{
+	xfs_inode_t		*ip1, *ip2, *temp;
+	xfs_ino_t		inum1, inum2;
+	int			error;
+	int			i, j;
+	uint			lock_mode;
+	int			diff_dirs = (dp1 != dp2);
+
+	ip2 = NULL;
+
+	/*
+	 * First, find out the current inums of the entries so that we
+	 * can determine the initial locking order.  We'll have to
+	 * sanity check stuff after all the locks have been acquired
+	 * to see if we still have the right inodes, directories, etc.
+	 */
+	lock_mode = xfs_ilock_map_shared(dp1);
+	error = xfs_get_dir_entry(vname1, &ip1);
+	if (error) {
+		xfs_iunlock_map_shared(dp1, lock_mode);
+		return error;
+	}
+
+	inum1 = ip1->i_ino;
+
+	ASSERT(ip1);
+	ITRACE(ip1);
+
+	/*
+	 * Unlock dp1 and lock dp2 if they are different.
+	 */
+
+	if (diff_dirs) {
+		xfs_iunlock_map_shared(dp1, lock_mode);
+		lock_mode = xfs_ilock_map_shared(dp2);
+	}
+
+	error = xfs_dir_lookup_int(XFS_ITOBHV(dp2), lock_mode,
+				   vname2, &inum2, &ip2);
+	if (error == ENOENT) {		/* target does not need to exist. */
+		inum2 = 0;
+	} else if (error) {
+		/*
+		 * If dp2 and dp1 are the same, the next line unlocks dp1.
+		 * Got it?
+		 */
+		xfs_iunlock_map_shared(dp2, lock_mode);
+		IRELE (ip1);
+		return error;
+	} else {
+		ITRACE(ip2);
+	}
+
+	/*
+	 * i_tab contains a list of pointers to inodes.  We initialize
+	 * the table here & we'll sort it.  We will then use it to
+	 * order the acquisition of the inode locks.
+	 *
+	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
+	 */
+	i_tab[0] = dp1;
+	i_tab[1] = dp2;
+	i_tab[2] = ip1;
+	if (inum2 == 0) {
+		*num_inodes = 3;
+		i_tab[3] = NULL;
+	} else {
+		*num_inodes = 4;
+		i_tab[3] = ip2;
+	}
+
+	/*
+	 * Sort the elements via bubble sort.  (Remember, there are at
+	 * most 4 elements to sort, so this is adequate.)
+	 */
+	for (i=0; i < *num_inodes; i++) {
+		for (j=1; j < *num_inodes; j++) {
+			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
+				temp = i_tab[j];
+				i_tab[j] = i_tab[j-1];
+				i_tab[j-1] = temp;
+			}
+		}
+	}
+
+	/*
+	 * We have dp2 locked. If it isn't first, unlock it.
+	 * If it is first, tell xfs_lock_inodes so it can skip it
+	 * when locking. if dp1 == dp2, xfs_lock_inodes will skip both
+	 * since they are equal. xfs_lock_inodes needs all these inodes
+	 * so that it can unlock and retry if there might be a dead-lock
+	 * potential with the log.
+	 */
+
+	if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) {
+#ifdef DEBUG
+		xfs_rename_skip++;
+#endif
+		xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED);
+	} else {
+#ifdef DEBUG
+		xfs_rename_nskip++;
+#endif
+		xfs_iunlock_map_shared(dp2, lock_mode);
+		xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED);
+	}
+
+	/*
+	 * Set the return value. Null out any unused entries in i_tab.
+	 */
+	*ipp1 = *ipp2 = NULL;
+	for (i=0; i < *num_inodes; i++) {
+		if (i_tab[i]->i_ino == inum1) {
+			*ipp1 = i_tab[i];
+		}
+		if (i_tab[i]->i_ino == inum2) {
+			*ipp2 = i_tab[i];
+		}
+	}
+	for (;i < 4; i++) {
+		i_tab[i] = NULL;
+	}
+	return 0;
+}
+
+
+int rename_which_error_return = 0;
+
+/*
+ * xfs_rename
+ */
+int
+xfs_rename(
+	bhv_desc_t	*src_dir_bdp,
+	vname_t		*src_vname,
+	vnode_t		*target_dir_vp,
+	vname_t		*target_vname,
+	cred_t		*credp)
+{
+	xfs_trans_t	*tp;
+	xfs_inode_t	*src_dp, *target_dp, *src_ip, *target_ip;
+	xfs_mount_t	*mp;
+	int		new_parent;		/* moving to a new dir */
+	int		src_is_directory;	/* src_name is a directory */
+	int		error;
+	xfs_bmap_free_t free_list;
+	xfs_fsblock_t   first_block;
+	int		cancel_flags;
+	int		committed;
+	xfs_inode_t	*inodes[4];
+	int		target_ip_dropped = 0;	/* dropped target_ip link? */
+	vnode_t		*src_dir_vp;
+	bhv_desc_t	*target_dir_bdp;
+	int		spaceres;
+	int		target_link_zero = 0;
+	int		num_inodes;
+	char		*src_name = VNAME(src_vname);
+	char		*target_name = VNAME(target_vname);
+	int		src_namelen = VNAMELEN(src_vname);
+	int		target_namelen = VNAMELEN(target_vname);
+
+	src_dir_vp = BHV_TO_VNODE(src_dir_bdp);
+	vn_trace_entry(src_dir_vp, "xfs_rename", (inst_t *)__return_address);
+	vn_trace_entry(target_dir_vp, "xfs_rename", (inst_t *)__return_address);
+
+	/*
+	 * Find the XFS behavior descriptor for the target directory
+	 * vnode since it was not handed to us.
+	 */
+	target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp),
+						&xfs_vnodeops);
+	if (target_dir_bdp == NULL) {
+		return XFS_ERROR(EXDEV);
+	}
+
+	src_dp = XFS_BHVTOI(src_dir_bdp);
+	target_dp = XFS_BHVTOI(target_dir_bdp);
+	mp = src_dp->i_mount;
+
+	if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
+	    DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
+				target_dp, DM_EVENT_RENAME)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
+					src_dir_vp, DM_RIGHT_NULL,
+					target_dir_vp, DM_RIGHT_NULL,
+					src_name, target_name,
+					0, 0, 0);
+		if (error) {
+			return error;
+		}
+	}
+	/* Return through std_return after this point. */
+
+	/*
+	 * Lock all the participating inodes. Depending upon whether
+	 * the target_name exists in the target directory, and
+	 * whether the target directory is the same as the source
+	 * directory, we can lock from 2 to 4 inodes.
+	 * xfs_lock_for_rename() will return ENOENT if src_name
+	 * does not exist in the source directory.
+	 */
+	tp = NULL;
+	error = xfs_lock_for_rename(src_dp, target_dp, src_vname,
+			target_vname, &src_ip, &target_ip, inodes,
+			&num_inodes);
+
+	if (error) {
+		rename_which_error_return = __LINE__;
+		/*
+		 * We have nothing locked, no inode references, and
+		 * no transaction, so just get out.
+		 */
+		goto std_return;
+	}
+
+	ASSERT(src_ip != NULL);
+
+	if ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+		/*
+		 * Check for link count overflow on target_dp
+		 */
+		if (target_ip == NULL && (src_dp != target_dp) &&
+		    target_dp->i_d.di_nlink >= XFS_MAXLINK) {
+			rename_which_error_return = __LINE__;
+			error = XFS_ERROR(EMLINK);
+			xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+			goto rele_return;
+		}
+	}
+
+	new_parent = (src_dp != target_dp);
+	src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
+
+	/*
+	 * Drop the locks on our inodes so that we can start the transaction.
+	 */
+	xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+
+	XFS_BMAP_INIT(&free_list, &first_block);
+	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);
+	error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
+	if (error == ENOSPC) {
+		spaceres = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
+	}
+	if (error) {
+		rename_which_error_return = __LINE__;
+		xfs_trans_cancel(tp, 0);
+		goto rele_return;
+	}
+
+	/*
+	 * Attach the dquots to the inodes
+	 */
+	if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
+		xfs_trans_cancel(tp, cancel_flags);
+		rename_which_error_return = __LINE__;
+		goto rele_return;
+	}
+
+	/*
+	 * Reacquire the inode locks we dropped above.
+	 */
+	xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL);
+
+	/*
+	 * Join all the inodes to the transaction. From this point on,
+	 * we can rely on either trans_commit or trans_cancel to unlock
+	 * them.  Note that we need to add a vnode reference to the
+	 * directories since trans_commit & trans_cancel will decrement
+	 * them when they unlock the inodes.  Also, we need to be careful
+	 * not to add an inode to the transaction more than once.
+	 */
+	VN_HOLD(src_dir_vp);
+	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
+	if (new_parent) {
+		VN_HOLD(target_dir_vp);
+		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
+	}
+	if ((src_ip != src_dp) && (src_ip != target_dp)) {
+		xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
+	}
+	if ((target_ip != NULL) &&
+	    (target_ip != src_ip) &&
+	    (target_ip != src_dp) &&
+	    (target_ip != target_dp)) {
+		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
+	}
+
+	/*
+	 * Set up the target.
+	 */
+	if (target_ip == NULL) {
+		/*
+		 * If there's no space reservation, check the entry will
+		 * fit before actually inserting it.
+		 */
+		if (spaceres == 0 &&
+		    (error = XFS_DIR_CANENTER(mp, tp, target_dp, target_name,
+				target_namelen))) {
+			rename_which_error_return = __LINE__;
+			goto error_return;
+		}
+		/*
+		 * If target does not exist and the rename crosses
+		 * directories, adjust the target directory link count
+		 * to account for the ".." reference from the new entry.
+		 */
+		error = XFS_DIR_CREATENAME(mp, tp, target_dp, target_name,
+					   target_namelen, src_ip->i_ino,
+					   &first_block, &free_list, spaceres);
+		if (error == ENOSPC) {
+			rename_which_error_return = __LINE__;
+			goto error_return;
+		}
+		if (error) {
+			rename_which_error_return = __LINE__;
+			goto abort_return;
+		}
+		xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+		if (new_parent && src_is_directory) {
+			error = xfs_bumplink(tp, target_dp);
+			if (error) {
+				rename_which_error_return = __LINE__;
+				goto abort_return;
+			}
+		}
+	} else { /* target_ip != NULL */
+
+		/*
+		 * If target exists and it's a directory, check that both
+		 * target and source are directories and that target can be
+		 * destroyed, or that neither is a directory.
+		 */
+		if ((target_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+			/*
+			 * Make sure target dir is empty.
+			 */
+			if (!(XFS_DIR_ISEMPTY(target_ip->i_mount, target_ip)) ||
+			    (target_ip->i_d.di_nlink > 2)) {
+				error = XFS_ERROR(EEXIST);
+				rename_which_error_return = __LINE__;
+				goto error_return;
+			}
+		}
+
+		/*
+		 * Link the source inode under the target name.
+		 * If the source inode is a directory and we are moving
+		 * it across directories, its ".." entry will be
+		 * inconsistent until we replace that down below.
+		 *
+		 * In case there is already an entry with the same
+		 * name at the destination directory, remove it first.
+		 */
+		error = XFS_DIR_REPLACE(mp, tp, target_dp, target_name,
+			target_namelen, src_ip->i_ino, &first_block,
+			&free_list, spaceres);
+		if (error) {
+			rename_which_error_return = __LINE__;
+			goto abort_return;
+		}
+		xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+		/*
+		 * Decrement the link count on the target since the target
+		 * dir no longer points to it.
+		 */
+		error = xfs_droplink(tp, target_ip);
+		if (error) {
+			rename_which_error_return = __LINE__;
+			goto abort_return;
+		}
+		target_ip_dropped = 1;
+
+		if (src_is_directory) {
+			/*
+			 * Drop the link from the old "." entry.
+			 */
+			error = xfs_droplink(tp, target_ip);
+			if (error) {
+				rename_which_error_return = __LINE__;
+				goto abort_return;
+			}
+		}
+
+		/* Do this test while we still hold the locks */
+		target_link_zero = (target_ip)->i_d.di_nlink==0;
+
+	} /* target_ip != NULL */
+
+	/*
+	 * Remove the source.
+	 */
+	if (new_parent && src_is_directory) {
+
+		/*
+		 * Rewrite the ".." entry to point to the new
+		 * directory.
+		 */
+		error = XFS_DIR_REPLACE(mp, tp, src_ip, "..", 2,
+					target_dp->i_ino, &first_block,
+					&free_list, spaceres);
+		ASSERT(error != EEXIST);
+		if (error) {
+			rename_which_error_return = __LINE__;
+			goto abort_return;
+		}
+		xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+	} else {
+		/*
+		 * We always want to hit the ctime on the source inode.
+		 * We do it in the if clause above for the 'new_parent &&
+		 * src_is_directory' case, and here we get all the other
+		 * cases.  This isn't strictly required by the standards
+		 * since the source inode isn't really being changed,
+		 * but old unix file systems did it and some incremental
+		 * backup programs won't work without it.
+		 */
+		xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG);
+	}
+
+	/*
+	 * Adjust the link count on src_dp.  This is necessary when
+	 * renaming a directory, either within one parent when
+	 * the target existed, or across two parent directories.
+	 */
+	if (src_is_directory && (new_parent || target_ip != NULL)) {
+
+		/*
+		 * Decrement link count on src_directory since the
+		 * entry that's moved no longer points to it.
+		 */
+		error = xfs_droplink(tp, src_dp);
+		if (error) {
+			rename_which_error_return = __LINE__;
+			goto abort_return;
+		}
+	}
+
+	error = XFS_DIR_REMOVENAME(mp, tp, src_dp, src_name, src_namelen,
+			src_ip->i_ino, &first_block, &free_list, spaceres);
+	if (error) {
+		rename_which_error_return = __LINE__;
+		goto abort_return;
+	}
+	xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+	/*
+	 * Update the generation counts on all the directory inodes
+	 * that we're modifying.
+	 */
+	src_dp->i_gen++;
+	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
+
+	if (new_parent) {
+		target_dp->i_gen++;
+		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
+	}
+
+	/*
+	 * If there was a target inode, take an extra reference on
+	 * it here so that it doesn't go to xfs_inactive() from
+	 * within the commit.
+	 */
+	if (target_ip != NULL) {
+		IHOLD(target_ip);
+	}
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * rename transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	/*
+	 * Take refs. for vop_link_removed calls below.  No need to worry
+	 * about directory refs. because the caller holds them.
+	 *
+	 * Do holds before the xfs_bmap_finish since it might rele them down
+	 * to zero.
+	 */
+
+	if (target_ip_dropped)
+		IHOLD(target_ip);
+	IHOLD(src_ip);
+
+	error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+	if (error) {
+		xfs_bmap_cancel(&free_list);
+		xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
+				 XFS_TRANS_ABORT));
+		if (target_ip != NULL) {
+			IRELE(target_ip);
+		}
+		if (target_ip_dropped) {
+			IRELE(target_ip);
+		}
+		IRELE(src_ip);
+		goto std_return;
+	}
+
+	/*
+	 * trans_commit will unlock src_ip, target_ip & decrement
+	 * the vnode references.
+	 */
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (target_ip != NULL) {
+		xfs_refcache_purge_ip(target_ip);
+		IRELE(target_ip);
+	}
+	/*
+	 * Let interposed file systems know about removed links.
+	 */
+	if (target_ip_dropped) {
+		VOP_LINK_REMOVED(XFS_ITOV(target_ip), target_dir_vp,
+					target_link_zero);
+		IRELE(target_ip);
+	}
+
+	FSC_NOTIFY_NAME_CHANGED(XFS_ITOV(src_ip));
+
+	IRELE(src_ip);
+
+	/* Fall through to std_return with error = 0 or errno from
+	 * xfs_trans_commit	 */
+std_return:
+	if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) ||
+	    DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
+				target_dp, DM_EVENT_POSTRENAME)) {
+		(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
+					src_dir_vp, DM_RIGHT_NULL,
+					target_dir_vp, DM_RIGHT_NULL,
+					src_name, target_name,
+					0, error, 0);
+	}
+	return error;
+
+ abort_return:
+	cancel_flags |= XFS_TRANS_ABORT;
+	/* FALLTHROUGH */
+ error_return:
+	xfs_bmap_cancel(&free_list);
+	xfs_trans_cancel(tp, cancel_flags);
+	goto std_return;
+
+ rele_return:
+	IRELE(src_ip);
+	if (target_ip != NULL) {
+		IRELE(target_ip);
+	}
+	goto std_return;
+}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
new file mode 100644
index 0000000..2c37822
--- /dev/null
+++ b/fs/xfs/xfs_rtalloc.c
@@ -0,0 +1,2469 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Free realtime space allocation for XFS.
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_fsops.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+#include "xfs_inode_item.h"
+#include "xfs_trans_space.h"
+
+
+/*
+ * Prototypes for internal functions.
+ */
+
+
+STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
+		xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *);
+STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int,
+		xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *);
+STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
+		xfs_extlen_t, int, xfs_rtblock_t *, int *);
+STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
+		xfs_rtblock_t, xfs_rtblock_t *);
+STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
+		xfs_rtblock_t, xfs_rtblock_t *);
+STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int,
+		xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *);
+STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
+		xfs_extlen_t, int);
+STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
+		xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *);
+
+/*
+ * Internal functions.
+ */
+
+/*
+ * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set.
+ */
+STATIC int
+xfs_lowbit32(
+	__uint32_t	v)
+{
+	if (v)
+		return ffs(v) - 1;
+	return -1;
+}
+
+/*
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ */
+STATIC int				/* error */
+xfs_growfs_rt_alloc(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_extlen_t	oblocks,	/* old count of blocks */
+	xfs_extlen_t	nblocks,	/* new count of blocks */
+	xfs_ino_t	ino)		/* inode number (bitmap/summary) */
+{
+	xfs_fileoff_t	bno;		/* block number in file */
+	xfs_buf_t	*bp;		/* temporary buffer for zeroing */
+	int		cancelflags;	/* flags for xfs_trans_cancel */
+	int		committed;	/* transaction committed flag */
+	xfs_daddr_t	d;		/* disk block address */
+	int		error;		/* error return value */
+	xfs_fsblock_t	firstblock;	/* first block allocated in xaction */
+	xfs_bmap_free_t	flist;		/* list of freed blocks */
+	xfs_fsblock_t	fsbno;		/* filesystem block for bno */
+	xfs_inode_t	*ip;		/* pointer to incore inode */
+	xfs_bmbt_irec_t	map;		/* block map output */
+	int		nmap;		/* number of block maps */
+	int		resblks;	/* space reservation */
+	xfs_trans_t	*tp;		/* transaction pointer */
+
+	/*
+	 * Allocate space to the file, as necessary.
+	 */
+	while (oblocks < nblocks) {
+		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
+		resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
+		cancelflags = 0;
+		/*
+		 * Reserve space & log for one extent added to the file.
+		 */
+		if ((error = xfs_trans_reserve(tp, resblks,
+				XFS_GROWRTALLOC_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES,
+				XFS_DEFAULT_PERM_LOG_COUNT)))
+			goto error_exit;
+		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+		/*
+		 * Lock the inode.
+		 */
+		if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL, &ip)))
+			goto error_exit;
+		XFS_BMAP_INIT(&flist, &firstblock);
+		/*
+		 * Allocate blocks to the bitmap file.
+		 */
+		nmap = 1;
+		cancelflags |= XFS_TRANS_ABORT;
+		error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks,
+			XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock,
+			resblks, &map, &nmap, &flist);
+		if (!error && nmap < 1)
+			error = XFS_ERROR(ENOSPC);
+		if (error)
+			goto error_exit;
+		/*
+		 * Free any blocks freed up in the transaction, then commit.
+		 */
+		error = xfs_bmap_finish(&tp, &flist, firstblock, &committed);
+		if (error)
+			goto error_exit;
+		xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+		/*
+		 * Now we need to clear the allocated blocks.
+		 * Do this one block per transaction, to keep it simple.
+		 */
+		cancelflags = 0;
+		for (bno = map.br_startoff, fsbno = map.br_startblock;
+		     bno < map.br_startoff + map.br_blockcount;
+		     bno++, fsbno++) {
+			tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
+			/*
+			 * Reserve log for one block zeroing.
+			 */
+			if ((error = xfs_trans_reserve(tp, 0,
+					XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0)))
+				goto error_exit;
+			/*
+			 * Lock the bitmap inode.
+			 */
+			if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL,
+					&ip)))
+				goto error_exit;
+			/*
+			 * Get a buffer for the block.
+			 */
+			d = XFS_FSB_TO_DADDR(mp, fsbno);
+			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+				mp->m_bsize, 0);
+			if (bp == NULL) {
+				error = XFS_ERROR(EIO);
+				goto error_exit;
+			}
+			memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize);
+			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+			/*
+			 * Commit the transaction.
+			 */
+			xfs_trans_commit(tp, 0, NULL);
+		}
+		/*
+		 * Go on to the next extent, if any.
+		 */
+		oblocks = map.br_startoff + map.br_blockcount;
+	}
+	return 0;
+error_exit:
+	xfs_trans_cancel(tp, cancelflags);
+	return error;
+}
+
+/*
+ * Attempt to allocate an extent minlen<=len<=maxlen starting from
+ * bitmap block bbno.  If we don't get maxlen then use prod to trim
+ * the length, if given.  Returns error; returns starting block in *rtblock.
+ * The lengths are all in rtextents.
+ */
+STATIC int				/* error */
+xfs_rtallocate_extent_block(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	xfs_extlen_t	minlen,		/* minimum length to allocate */
+	xfs_extlen_t	maxlen,		/* maximum length to allocate */
+	xfs_extlen_t	*len,		/* out: actual length allocated */
+	xfs_rtblock_t	*nextp,		/* out: next block to try */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_extlen_t	prod,		/* extent product factor */
+	xfs_rtblock_t	*rtblock)	/* out: start block allocated */
+{
+	xfs_rtblock_t	besti;		/* best rtblock found so far */
+	xfs_rtblock_t	bestlen;	/* best length found so far */
+	xfs_rtblock_t	end;		/* last rtblock in chunk */
+	int		error;		/* error value */
+	xfs_rtblock_t	i;		/* current rtblock trying */
+	xfs_rtblock_t	next;		/* next rtblock to try */
+	int		stat;		/* status from internal calls */
+
+	/*
+	 * Loop over all the extents starting in this bitmap block,
+	 * looking for one that's long enough.
+	 */
+	for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
+		end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+	     i <= end;
+	     i++) {
+		/*
+		 * See if there's a free extent of maxlen starting at i.
+		 * If it's not so then next will contain the first non-free.
+		 */
+		error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+		if (error) {
+			return error;
+		}
+		if (stat) {
+			/*
+			 * i for maxlen is all free, allocate and return that.
+			 */
+			error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
+				rsb);
+			if (error) {
+				return error;
+			}
+			*len = maxlen;
+			*rtblock = i;
+			return 0;
+		}
+		/*
+		 * In the case where we have a variable-sized allocation
+		 * request, figure out how big this free piece is,
+		 * and if it's big enough for the minimum, and the best
+		 * so far, remember it.
+		 */
+		if (minlen < maxlen) {
+			xfs_rtblock_t	thislen;	/* this extent size */
+
+			thislen = next - i;
+			if (thislen >= minlen && thislen > bestlen) {
+				besti = i;
+				bestlen = thislen;
+			}
+		}
+		/*
+		 * If not done yet, find the start of the next free space.
+		 */
+		if (next < end) {
+			error = xfs_rtfind_forw(mp, tp, next, end, &i);
+			if (error) {
+				return error;
+			}
+		} else
+			break;
+	}
+	/*
+	 * Searched the whole thing & didn't find a maxlen free extent.
+	 */
+	if (minlen < maxlen && besti != -1) {
+		xfs_extlen_t	p;	/* amount to trim length by */
+
+		/*
+		 * If size should be a multiple of prod, make that so.
+		 */
+		if (prod > 1 && (p = do_mod(bestlen, prod)))
+			bestlen -= p;
+		/*
+		 * Allocate besti for bestlen & return that.
+		 */
+		error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+		*len = bestlen;
+		*rtblock = besti;
+		return 0;
+	}
+	/*
+	 * Allocation failed.  Set *nextp to the next block to try.
+	 */
+	*nextp = next;
+	*rtblock = NULLRTBLOCK;
+	return 0;
+}
+
+/*
+ * Allocate an extent of length minlen<=len<=maxlen, starting at block
+ * bno.  If we don't get maxlen then use prod to trim the length, if given.
+ * Returns error; returns starting block in *rtblock.
+ * The lengths are all in rtextents.
+ */
+STATIC int				/* error */
+xfs_rtallocate_extent_exact(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number to allocate */
+	xfs_extlen_t	minlen,		/* minimum length to allocate */
+	xfs_extlen_t	maxlen,		/* maximum length to allocate */
+	xfs_extlen_t	*len,		/* out: actual length allocated */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_extlen_t	prod,		/* extent product factor */
+	xfs_rtblock_t	*rtblock)	/* out: start block allocated */
+{
+	int		error;		/* error value */
+	xfs_extlen_t	i;		/* extent length trimmed due to prod */
+	int		isfree;		/* extent is free */
+	xfs_rtblock_t	next;		/* next block to try (dummy) */
+
+	ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+	/*
+	 * Check if the range in question (for maxlen) is free.
+	 */
+	error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree);
+	if (error) {
+		return error;
+	}
+	if (isfree) {
+		/*
+		 * If it is, allocate it and return success.
+		 */
+		error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+		*len = maxlen;
+		*rtblock = bno;
+		return 0;
+	}
+	/*
+	 * If not, allocate what there is, if it's at least minlen.
+	 */
+	maxlen = next - bno;
+	if (maxlen < minlen) {
+		/*
+		 * Failed, return failure status.
+		 */
+		*rtblock = NULLRTBLOCK;
+		return 0;
+	}
+	/*
+	 * Trim off tail of extent, if prod is specified.
+	 */
+	if (prod > 1 && (i = maxlen % prod)) {
+		maxlen -= i;
+		if (maxlen < minlen) {
+			/*
+			 * Now we can't do it, return failure status.
+			 */
+			*rtblock = NULLRTBLOCK;
+			return 0;
+		}
+	}
+	/*
+	 * Allocate what we can and return it.
+	 */
+	error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+	if (error) {
+		return error;
+	}
+	*len = maxlen;
+	*rtblock = bno;
+	return 0;
+}
+
+/*
+ * Allocate an extent of length minlen<=len<=maxlen, starting as near
+ * to bno as possible.  If we don't get maxlen then use prod to trim
+ * the length, if given.  The lengths are all in rtextents.
+ */
+STATIC int				/* error */
+xfs_rtallocate_extent_near(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number to allocate */
+	xfs_extlen_t	minlen,		/* minimum length to allocate */
+	xfs_extlen_t	maxlen,		/* maximum length to allocate */
+	xfs_extlen_t	*len,		/* out: actual length allocated */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_extlen_t	prod,		/* extent product factor */
+	xfs_rtblock_t	*rtblock)	/* out: start block allocated */
+{
+	int		any;		/* any useful extents from summary */
+	xfs_rtblock_t	bbno;		/* bitmap block number */
+	int		error;		/* error value */
+	int		i;		/* bitmap block offset (loop control) */
+	int		j;		/* secondary loop control */
+	int		log2len;	/* log2 of minlen */
+	xfs_rtblock_t	n;		/* next block to try */
+	xfs_rtblock_t	r;		/* result block */
+
+	ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+	/*
+	 * If the block number given is off the end, silently set it to
+	 * the last block.
+	 */
+	if (bno >= mp->m_sb.sb_rextents)
+		bno = mp->m_sb.sb_rextents - 1;
+	/*
+	 * Try the exact allocation first.
+	 */
+	error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len,
+		rbpp, rsb, prod, &r);
+	if (error) {
+		return error;
+	}
+	/*
+	 * If the exact allocation worked, return that.
+	 */
+	if (r != NULLRTBLOCK) {
+		*rtblock = r;
+		return 0;
+	}
+	bbno = XFS_BITTOBLOCK(mp, bno);
+	i = 0;
+	log2len = xfs_highbit32(minlen);
+	/*
+	 * Loop over all bitmap blocks (bbno + i is current block).
+	 */
+	for (;;) {
+		/*
+		 * Get summary information of extents of all useful levels
+		 * starting in this bitmap block.
+		 */
+		error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1,
+			bbno + i, rbpp, rsb, &any);
+		if (error) {
+			return error;
+		}
+		/*
+		 * If there are any useful extents starting here, try
+		 * allocating one.
+		 */
+		if (any) {
+			/*
+			 * On the positive side of the starting location.
+			 */
+			if (i >= 0) {
+				/*
+				 * Try to allocate an extent starting in
+				 * this block.
+				 */
+				error = xfs_rtallocate_extent_block(mp, tp,
+					bbno + i, minlen, maxlen, len, &n, rbpp,
+					rsb, prod, &r);
+				if (error) {
+					return error;
+				}
+				/*
+				 * If it worked, return it.
+				 */
+				if (r != NULLRTBLOCK) {
+					*rtblock = r;
+					return 0;
+				}
+			}
+			/*
+			 * On the negative side of the starting location.
+			 */
+			else {		/* i < 0 */
+				/*
+				 * Loop backwards through the bitmap blocks from
+				 * the starting point-1 up to where we are now.
+				 * There should be an extent which ends in this
+				 * bitmap block and is long enough.
+				 */
+				for (j = -1; j > i; j--) {
+					/*
+					 * Grab the summary information for
+					 * this bitmap block.
+					 */
+					error = xfs_rtany_summary(mp, tp,
+						log2len, mp->m_rsumlevels - 1,
+						bbno + j, rbpp, rsb, &any);
+					if (error) {
+						return error;
+					}
+					/*
+					 * If there's no extent given in the
+					 * summary that means the extent we
+					 * found must carry over from an
+					 * earlier block.  If there is an
+					 * extent given, we've already tried
+					 * that allocation, don't do it again.
+					 */
+					if (any)
+						continue;
+					error = xfs_rtallocate_extent_block(mp,
+						tp, bbno + j, minlen, maxlen,
+						len, &n, rbpp, rsb, prod, &r);
+					if (error) {
+						return error;
+					}
+					/*
+					 * If it works, return the extent.
+					 */
+					if (r != NULLRTBLOCK) {
+						*rtblock = r;
+						return 0;
+					}
+				}
+				/*
+				 * There weren't intervening bitmap blocks
+				 * with a long enough extent, or the
+				 * allocation didn't work for some reason
+				 * (i.e. it's a little * too short).
+				 * Try to allocate from the summary block
+				 * that we found.
+				 */
+				error = xfs_rtallocate_extent_block(mp, tp,
+					bbno + i, minlen, maxlen, len, &n, rbpp,
+					rsb, prod, &r);
+				if (error) {
+					return error;
+				}
+				/*
+				 * If it works, return the extent.
+				 */
+				if (r != NULLRTBLOCK) {
+					*rtblock = r;
+					return 0;
+				}
+			}
+		}
+		/*
+		 * Loop control.  If we were on the positive side, and there's
+		 * still more blocks on the negative side, go there.
+		 */
+		if (i > 0 && (int)bbno - i >= 0)
+			i = -i;
+		/*
+		 * If positive, and no more negative, but there are more
+		 * positive, go there.
+		 */
+		else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1)
+			i++;
+		/*
+		 * If negative or 0 (just started), and there are positive
+		 * blocks to go, go there.  The 0 case moves to block 1.
+		 */
+		else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1)
+			i = 1 - i;
+		/*
+		 * If negative or 0 and there are more negative blocks,
+		 * go there.
+		 */
+		else if (i <= 0 && (int)bbno + i > 0)
+			i--;
+		/*
+		 * Must be done.  Return failure.
+		 */
+		else
+			break;
+	}
+	*rtblock = NULLRTBLOCK;
+	return 0;
+}
+
+/*
+ * Allocate an extent of length minlen<=len<=maxlen, with no position
+ * specified.  If we don't get maxlen then use prod to trim
+ * the length, if given.  The lengths are all in rtextents.
+ */
+STATIC int				/* error */
+xfs_rtallocate_extent_size(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_extlen_t	minlen,		/* minimum length to allocate */
+	xfs_extlen_t	maxlen,		/* maximum length to allocate */
+	xfs_extlen_t	*len,		/* out: actual length allocated */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_extlen_t	prod,		/* extent product factor */
+	xfs_rtblock_t	*rtblock)	/* out: start block allocated */
+{
+	int		error;		/* error value */
+	int		i;		/* bitmap block number */
+	int		l;		/* level number (loop control) */
+	xfs_rtblock_t	n;		/* next block to be tried */
+	xfs_rtblock_t	r;		/* result block number */
+	xfs_suminfo_t	sum;		/* summary information for extents */
+
+	ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+	/*
+	 * Loop over all the levels starting with maxlen.
+	 * At each level, look at all the bitmap blocks, to see if there
+	 * are extents starting there that are long enough (>= maxlen).
+	 * Note, only on the initial level can the allocation fail if
+	 * the summary says there's an extent.
+	 */
+	for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) {
+		/*
+		 * Loop over all the bitmap blocks.
+		 */
+		for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
+			/*
+			 * Get the summary for this level/block.
+			 */
+			error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
+				&sum);
+			if (error) {
+				return error;
+			}
+			/*
+			 * Nothing there, on to the next block.
+			 */
+			if (!sum)
+				continue;
+			/*
+			 * Try allocating the extent.
+			 */
+			error = xfs_rtallocate_extent_block(mp, tp, i, maxlen,
+				maxlen, len, &n, rbpp, rsb, prod, &r);
+			if (error) {
+				return error;
+			}
+			/*
+			 * If it worked, return that.
+			 */
+			if (r != NULLRTBLOCK) {
+				*rtblock = r;
+				return 0;
+			}
+			/*
+			 * If the "next block to try" returned from the
+			 * allocator is beyond the next bitmap block,
+			 * skip to that bitmap block.
+			 */
+			if (XFS_BITTOBLOCK(mp, n) > i + 1)
+				i = XFS_BITTOBLOCK(mp, n) - 1;
+		}
+	}
+	/*
+	 * Didn't find any maxlen blocks.  Try smaller ones, unless
+	 * we're asking for a fixed size extent.
+	 */
+	if (minlen > --maxlen) {
+		*rtblock = NULLRTBLOCK;
+		return 0;
+	}
+	/*
+	 * Loop over sizes, from maxlen down to minlen.
+	 * This time, when we do the allocations, allow smaller ones
+	 * to succeed.
+	 */
+	for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
+		/*
+		 * Loop over all the bitmap blocks, try an allocation
+		 * starting in that block.
+		 */
+		for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
+			/*
+			 * Get the summary information for this level/block.
+			 */
+			error =	xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
+						  &sum);
+			if (error) {
+				return error;
+			}
+			/*
+			 * If nothing there, go on to next.
+			 */
+			if (!sum)
+				continue;
+			/*
+			 * Try the allocation.  Make sure the specified
+			 * minlen/maxlen are in the possible range for
+			 * this summary level.
+			 */
+			error = xfs_rtallocate_extent_block(mp, tp, i,
+					XFS_RTMAX(minlen, 1 << l),
+					XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
+					len, &n, rbpp, rsb, prod, &r);
+			if (error) {
+				return error;
+			}
+			/*
+			 * If it worked, return that extent.
+			 */
+			if (r != NULLRTBLOCK) {
+				*rtblock = r;
+				return 0;
+			}
+			/*
+			 * If the "next block to try" returned from the
+			 * allocator is beyond the next bitmap block,
+			 * skip to that bitmap block.
+			 */
+			if (XFS_BITTOBLOCK(mp, n) > i + 1)
+				i = XFS_BITTOBLOCK(mp, n) - 1;
+		}
+	}
+	/*
+	 * Got nothing, return failure.
+	 */
+	*rtblock = NULLRTBLOCK;
+	return 0;
+}
+
+/*
+ * Mark an extent specified by start and len allocated.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int				/* error */
+xfs_rtallocate_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* start block to allocate */
+	xfs_extlen_t	len,		/* length to allocate */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_rtblock_t	end;		/* end of the allocated extent */
+	int		error;		/* error value */
+	xfs_rtblock_t	postblock;	/* first block allocated > end */
+	xfs_rtblock_t	preblock;	/* first block allocated < start */
+
+	end = start + len - 1;
+	/*
+	 * Assume we're allocating out of the middle of a free extent.
+	 * We need to find the beginning and end of the extent so we can
+	 * properly update the summary.
+	 */
+	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Find the next allocated block (end of free extent).
+	 */
+	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+		&postblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Decrement the summary information corresponding to the entire
+	 * (old) free extent.
+	 */
+	error = xfs_rtmodify_summary(mp, tp,
+		XFS_RTBLOCKLOG(postblock + 1 - preblock),
+		XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+	if (error) {
+		return error;
+	}
+	/*
+	 * If there are blocks not being allocated at the front of the
+	 * old extent, add summary data for them to be free.
+	 */
+	if (preblock < start) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(start - preblock),
+			XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * If there are blocks not being allocated at the end of the
+	 * old extent, add summary data for them to be free.
+	 */
+	if (postblock > end) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(postblock - end),
+			XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * Modify the bitmap to mark this extent allocated.
+	 */
+	error = xfs_rtmodify_range(mp, tp, start, len, 0);
+	return error;
+}
+
+/*
+ * Return whether there are any free extents in the size range given
+ * by low and high, for the bitmap block bbno.
+ */
+STATIC int				/* error */
+xfs_rtany_summary(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		low,		/* low log2 extent size */
+	int		high,		/* high log2 extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	int		*stat)		/* out: any good extents here? */
+{
+	int		error;		/* error value */
+	int		log;		/* loop counter, log2 of ext. size */
+	xfs_suminfo_t	sum;		/* summary data */
+
+	/*
+	 * Loop over logs of extent sizes.  Order is irrelevant.
+	 */
+	for (log = low; log <= high; log++) {
+		/*
+		 * Get one summary datum.
+		 */
+		error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
+		if (error) {
+			return error;
+		}
+		/*
+		 * If there are any, return success.
+		 */
+		if (sum) {
+			*stat = 1;
+			return 0;
+		}
+	}
+	/*
+	 * Found nothing, return failure.
+	 */
+	*stat = 0;
+	return 0;
+}
+
+/*
+ * Get a buffer for the bitmap or summary file block specified.
+ * The buffer is returned read and locked.
+ */
+STATIC int				/* error */
+xfs_rtbuf_get(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	block,		/* block number in bitmap or summary */
+	int		issum,		/* is summary not bitmap */
+	xfs_buf_t	**bpp)		/* output: buffer for the block */
+{
+	xfs_buf_t	*bp;		/* block buffer, result */
+	xfs_daddr_t	d;		/* disk addr of block */
+	int		error;		/* error value */
+	xfs_fsblock_t	fsb;		/* fs block number for block */
+	xfs_inode_t	*ip;		/* bitmap or summary inode */
+
+	ip = issum ? mp->m_rsumip : mp->m_rbmip;
+	/*
+	 * Map from the file offset (block) and inode number to the
+	 * file system block.
+	 */
+	error = xfs_bmapi_single(tp, ip, XFS_DATA_FORK, &fsb, block);
+	if (error) {
+		return error;
+	}
+	ASSERT(fsb != NULLFSBLOCK);
+	/*
+	 * Convert to disk address for buffer cache.
+	 */
+	d = XFS_FSB_TO_DADDR(mp, fsb);
+	/*
+	 * Read the buffer.
+	 */
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
+				   mp->m_bsize, 0, &bp);
+	if (error) {
+		return error;
+	}
+	ASSERT(bp && !XFS_BUF_GETERROR(bp));
+	*bpp = bp;
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the given extent (block range) is allocated already.
+ */
+STATIC int				/* error */
+xfs_rtcheck_alloc_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		*stat)		/* out: 1 for allocated, 0 for not */
+{
+	xfs_rtblock_t	new;		/* dummy for xfs_rtcheck_range */
+
+	return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat);
+}
+#endif
+
+#ifdef DEBUG
+/*
+ * Check whether the given block in the bitmap has the given value.
+ */
+STATIC int				/* 1 for matches, 0 for not */
+xfs_rtcheck_bit(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* bit (block) to check */
+	int		val)		/* 1 for free, 0 for allocated */
+{
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* pointer into the buffer */
+	/* REFERENCED */
+	int		error;		/* error value */
+	xfs_rtword_t	wdiff;		/* difference between bit & expected */
+	int		word;		/* word number in the buffer */
+	xfs_rtword_t	wval;		/* word value from buffer */
+
+	block = XFS_BITTOBLOCK(mp, start);
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+	word = XFS_BITTOWORD(mp, start);
+	bit = (int)(start & (XFS_NBWORD - 1));
+	wval = bufp[word];
+	xfs_trans_brelse(tp, bp);
+	wdiff = (wval ^ -val) & ((xfs_rtword_t)1 << bit);
+	return !wdiff;
+}
+#endif	/* DEBUG */
+
+#if 0
+/*
+ * Check that the given extent (block range) is free already.
+ */
+STATIC int				/* error */
+xfs_rtcheck_free_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		*stat)		/* out: 1 for free, 0 for not */
+{
+	xfs_rtblock_t	new;		/* dummy for xfs_rtcheck_range */
+
+	return xfs_rtcheck_range(mp, tp, bno, len, 1, &new, stat);
+}
+#endif
+
+/*
+ * Check that the given range is either all allocated (val = 0) or
+ * all free (val = 1).
+ */
+STATIC int				/* error */
+xfs_rtcheck_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block number of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		val,		/* 1 for free, 0 for allocated */
+	xfs_rtblock_t	*new,		/* out: first block not matching */
+	int		*stat)		/* out: 1 for matches, 0 for not */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	lastbit;	/* last useful bit in word */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute starting bitmap block number
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	/*
+	 * Read the bitmap block.
+	 */
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+	/*
+	 * Compute the starting word's address, and starting bit.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	/*
+	 * 0 (allocated) => all zero's; 1 (free) => all one's.
+	 */
+	val = -val;
+	/*
+	 * If not starting on a word boundary, deal with the first
+	 * (partial) word.
+	 */
+	if (bit) {
+		/*
+		 * Compute first bit not examined.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		/*
+		 * Mask of relevant bits.
+		 */
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ val) & mask)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = XFS_RTLOBIT(wdiff) - bit;
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		}
+		i = lastbit - bit;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ val)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Mask of relevant bits.
+		 */
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ val) & mask)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * Successful, return.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*new = start + i;
+	*stat = 1;
+	return 0;
+}
+
+/*
+ * Copy and transform the summary file, given the old and new
+ * parameters in the mount structures.
+ */
+STATIC int				/* error */
+xfs_rtcopy_summary(
+	xfs_mount_t	*omp,		/* old file system mount point */
+	xfs_mount_t	*nmp,		/* new file system mount point */
+	xfs_trans_t	*tp)		/* transaction pointer */
+{
+	xfs_rtblock_t	bbno;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* summary buffer */
+	int		error;		/* error return value */
+	int		log;		/* summary level number (log length) */
+	xfs_suminfo_t	sum;		/* summary data */
+	xfs_fsblock_t	sumbno;		/* summary block number */
+
+	bp = NULL;
+	for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
+		for (bbno = omp->m_sb.sb_rbmblocks - 1;
+		     (xfs_srtblock_t)bbno >= 0;
+		     bbno--) {
+			error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
+				&sumbno, &sum);
+			if (error)
+				return error;
+			if (sum == 0)
+				continue;
+			error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
+				&bp, &sumbno);
+			if (error)
+				return error;
+			error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
+				&bp, &sumbno);
+			if (error)
+				return error;
+			ASSERT(sum > 0);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Searching backward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+STATIC int				/* error */
+xfs_rtfind_back(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to look at */
+	xfs_rtblock_t	limit,		/* last block to look at */
+	xfs_rtblock_t	*rtblock)	/* out: start block found */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	firstbit;	/* first useful bit in the word */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	len;		/* length of inspected area */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	want;		/* mask for "good" values */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute and read in starting bitmap block for starting block.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+	/*
+	 * Get the first word's index & point to it.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	len = start - limit + 1;
+	/*
+	 * Compute match value, based on the bit at start: if 1 (free)
+	 * then all-ones, else all-zeroes.
+	 */
+	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+	/*
+	 * If the starting position is not word-aligned, deal with the
+	 * partial word.
+	 */
+	if (bit < XFS_NBWORD - 1) {
+		/*
+		 * Calculate first (leftmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
+		mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
+			firstbit;
+		/*
+		 * Calculate the difference between the value there
+		 * and what we're looking for.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different.  Mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = bit - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		}
+		i = bit - firstbit + 1;
+		/*
+		 * Go on to previous block if that's where the previous word is
+		 * and we need the previous word.
+		 */
+		if (--word == -1 && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = XFS_BLOCKWMASK(mp);
+			b = &bufp[word];
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b--;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the previous one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ want)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to previous block if that's where the previous word is
+		 * and we need the previous word.
+		 */
+		if (--word == -1 && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = XFS_BLOCKWMASK(mp);
+			b = &bufp[word];
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b--;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if (len - i) {
+		/*
+		 * Calculate first (leftmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		firstbit = XFS_NBWORD - (len - i);
+		mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * No match, return that we scanned the whole area.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*rtblock = start - i + 1;
+	return 0;
+}
+
+/*
+ * Searching forward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+STATIC int				/* error */
+xfs_rtfind_forw(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to look at */
+	xfs_rtblock_t	limit,		/* last block to look at */
+	xfs_rtblock_t	*rtblock)	/* out: start block found */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	lastbit;	/* last useful bit in the word */
+	xfs_rtblock_t	len;		/* length of inspected area */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	want;		/* mask for "good" values */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute and read in starting bitmap block for starting block.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+	/*
+	 * Get the first word's index & point to it.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	len = limit - start + 1;
+	/*
+	 * Compute match value, based on the bit at start: if 1 (free)
+	 * then all-ones, else all-zeroes.
+	 */
+	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+	/*
+	 * If the starting position is not word-aligned, deal with the
+	 * partial word.
+	 */
+	if (bit) {
+		/*
+		 * Calculate last (rightmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Calculate the difference between the value there
+		 * and what we're looking for.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different.  Mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = XFS_RTLOBIT(wdiff) - bit;
+			*rtblock = start + i - 1;
+			return 0;
+		}
+		i = lastbit - bit;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ want)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*rtblock = start + i - 1;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Calculate mask for all the relevant bits in this word.
+		 */
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*rtblock = start + i - 1;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * No match, return that we scanned the whole area.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*rtblock = start + i - 1;
+	return 0;
+}
+
+/*
+ * Mark an extent specified by start and len freed.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int				/* error */
+xfs_rtfree_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to free */
+	xfs_extlen_t	len,		/* length to free */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_rtblock_t	end;		/* end of the freed extent */
+	int		error;		/* error value */
+	xfs_rtblock_t	postblock;	/* first block freed > end */
+	xfs_rtblock_t	preblock;	/* first block freed < start */
+
+	end = start + len - 1;
+	/*
+	 * Modify the bitmap to mark this extent freed.
+	 */
+	error = xfs_rtmodify_range(mp, tp, start, len, 1);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Assume we're freeing out of the middle of an allocated extent.
+	 * We need to find the beginning and end of the extent so we can
+	 * properly update the summary.
+	 */
+	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Find the next allocated block (end of allocated extent).
+	 */
+	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+		&postblock);
+	/*
+	 * If there are blocks not being freed at the front of the
+	 * old extent, add summary data for them to be allocated.
+	 */
+	if (preblock < start) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(start - preblock),
+			XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * If there are blocks not being freed at the end of the
+	 * old extent, add summary data for them to be allocated.
+	 */
+	if (postblock > end) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(postblock - end),
+			XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * Increment the summary information corresponding to the entire
+	 * (new) free extent.
+	 */
+	error = xfs_rtmodify_summary(mp, tp,
+		XFS_RTBLOCKLOG(postblock + 1 - preblock),
+		XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+	return error;
+}
+
+/*
+ * Read and return the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+STATIC int				/* error */
+xfs_rtget_summary(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		log,		/* log2 of extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_suminfo_t	*sum)		/* out: summary info for this block */
+{
+	xfs_buf_t	*bp;		/* buffer for summary block */
+	int		error;		/* error value */
+	xfs_fsblock_t	sb;		/* summary fsblock */
+	int		so;		/* index into the summary file */
+	xfs_suminfo_t	*sp;		/* pointer to returned data */
+
+	/*
+	 * Compute entry number in the summary file.
+	 */
+	so = XFS_SUMOFFS(mp, log, bbno);
+	/*
+	 * Compute the block number in the summary file.
+	 */
+	sb = XFS_SUMOFFSTOBLOCK(mp, so);
+	/*
+	 * If we have an old buffer, and the block number matches, use that.
+	 */
+	if (rbpp && *rbpp && *rsb == sb)
+		bp = *rbpp;
+	/*
+	 * Otherwise we have to get the buffer.
+	 */
+	else {
+		/*
+		 * If there was an old one, get rid of it first.
+		 */
+		if (rbpp && *rbpp)
+			xfs_trans_brelse(tp, *rbpp);
+		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+		if (error) {
+			return error;
+		}
+		/*
+		 * Remember this buffer and block for the next call.
+		 */
+		if (rbpp) {
+			*rbpp = bp;
+			*rsb = sb;
+		}
+	}
+	/*
+	 * Point to the summary information & copy it out.
+	 */
+	sp = XFS_SUMPTR(mp, bp, so);
+	*sum = *sp;
+	/*
+	 * Drop the buffer if we're not asked to remember it.
+	 */
+	if (!rbpp)
+		xfs_trans_brelse(tp, bp);
+	return 0;
+}
+
+/*
+ * Set the given range of bitmap bits to the given value.
+ * Do whatever I/O and logging is required.
+ */
+STATIC int				/* error */
+xfs_rtmodify_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to modify */
+	xfs_extlen_t	len,		/* length of extent to modify */
+	int		val)		/* 1 for free, 0 for allocated */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtword_t	*first;		/* first used word in the buffer */
+	int		i;		/* current bit number rel. to start */
+	int		lastbit;	/* last useful bit in word */
+	xfs_rtword_t	mask;		/* mask o frelevant bits for value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute starting bitmap block number.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	/*
+	 * Read the bitmap block, and point to its data.
+	 */
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+	/*
+	 * Compute the starting word's address, and starting bit.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	first = b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	/*
+	 * 0 (allocated) => all zeroes; 1 (free) => all ones.
+	 */
+	val = -val;
+	/*
+	 * If not starting on a word boundary, deal with the first
+	 * (partial) word.
+	 */
+	if (bit) {
+		/*
+		 * Compute first bit not changed and mask of relevant bits.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Set/clear the active bits.
+		 */
+		if (val)
+			*b |= mask;
+		else
+			*b &= ~mask;
+		i = lastbit - bit;
+		/*
+		 * Go on to the next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * Log the changed part of this block.
+			 * Get the next one.
+			 */
+			xfs_trans_log_buf(tp, bp,
+				(uint)((char *)first - (char *)bufp),
+				(uint)((char *)b - (char *)bufp));
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Set the word value correctly.
+		 */
+		*b = val;
+		i += XFS_NBWORD;
+		/*
+		 * Go on to the next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * Log the changed part of this block.
+			 * Get the next one.
+			 */
+			xfs_trans_log_buf(tp, bp,
+				(uint)((char *)first - (char *)bufp),
+				(uint)((char *)b - (char *)bufp));
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Compute a mask of relevant bits.
+		 */
+		bit = 0;
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Set/clear the active bits.
+		 */
+		if (val)
+			*b |= mask;
+		else
+			*b &= ~mask;
+		b++;
+	}
+	/*
+	 * Log any remaining changed bytes.
+	 */
+	if (b > first)
+		xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
+			(uint)((char *)b - (char *)bufp - 1));
+	return 0;
+}
+
+/*
+ * Read and modify the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+STATIC int				/* error */
+xfs_rtmodify_summary(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		log,		/* log2 of extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	int		delta,		/* change to make to summary info */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_buf_t	*bp;		/* buffer for the summary block */
+	int		error;		/* error value */
+	xfs_fsblock_t	sb;		/* summary fsblock */
+	int		so;		/* index into the summary file */
+	xfs_suminfo_t	*sp;		/* pointer to returned data */
+
+	/*
+	 * Compute entry number in the summary file.
+	 */
+	so = XFS_SUMOFFS(mp, log, bbno);
+	/*
+	 * Compute the block number in the summary file.
+	 */
+	sb = XFS_SUMOFFSTOBLOCK(mp, so);
+	/*
+	 * If we have an old buffer, and the block number matches, use that.
+	 */
+	if (rbpp && *rbpp && *rsb == sb)
+		bp = *rbpp;
+	/*
+	 * Otherwise we have to get the buffer.
+	 */
+	else {
+		/*
+		 * If there was an old one, get rid of it first.
+		 */
+		if (rbpp && *rbpp)
+			xfs_trans_brelse(tp, *rbpp);
+		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+		if (error) {
+			return error;
+		}
+		/*
+		 * Remember this buffer and block for the next call.
+		 */
+		if (rbpp) {
+			*rbpp = bp;
+			*rsb = sb;
+		}
+	}
+	/*
+	 * Point to the summary information, modify and log it.
+	 */
+	sp = XFS_SUMPTR(mp, bp, so);
+	*sp += delta;
+	xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)),
+		(uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1));
+	return 0;
+}
+
+/*
+ * Visible (exported) functions.
+ */
+
+/*
+ * Grow the realtime area of the filesystem.
+ */
+int
+xfs_growfs_rt(
+	xfs_mount_t	*mp,		/* mount point for filesystem */
+	xfs_growfs_rt_t	*in)		/* growfs rt input struct */
+{
+	xfs_rtblock_t	bmbno;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* temporary buffer */
+	int		cancelflags;	/* flags for xfs_trans_cancel */
+	int		error;		/* error return value */
+	xfs_inode_t	*ip;		/* bitmap inode, used as lock */
+	xfs_mount_t	*nmp;		/* new (fake) mount structure */
+	xfs_drfsbno_t	nrblocks;	/* new number of realtime blocks */
+	xfs_extlen_t	nrbmblocks;	/* new number of rt bitmap blocks */
+	xfs_drtbno_t	nrextents;	/* new number of realtime extents */
+	uint8_t		nrextslog;	/* new log2 of sb_rextents */
+	xfs_extlen_t	nrsumblocks;	/* new number of summary blocks */
+	uint		nrsumlevels;	/* new rt summary levels */
+	uint		nrsumsize;	/* new size of rt summary, bytes */
+	xfs_sb_t	*nsbp;		/* new superblock */
+	xfs_extlen_t	rbmblocks;	/* current number of rt bitmap blocks */
+	xfs_extlen_t	rsumblocks;	/* current number of rt summary blks */
+	xfs_sb_t	*sbp;		/* old superblock */
+	xfs_fsblock_t	sumbno;		/* summary block number */
+	xfs_trans_t	*tp;		/* transaction pointer */
+
+	sbp = &mp->m_sb;
+	/*
+	 * Initial error checking.
+	 */
+	if (mp->m_rtdev_targp || mp->m_rbmip == NULL ||
+	    (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
+	    (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
+		return XFS_ERROR(EINVAL);
+	/*
+	 * Read in the last block of the device, make sure it exists.
+	 */
+	error = xfs_read_buf(mp, mp->m_rtdev_targp,
+			XFS_FSB_TO_BB(mp, in->newblocks - 1),
+			XFS_FSB_TO_BB(mp, 1), 0, &bp);
+	if (error)
+		return error;
+	ASSERT(bp);
+	xfs_buf_relse(bp);
+	/*
+	 * Calculate new parameters.  These are the final values to be reached.
+	 */
+	nrextents = nrblocks;
+	do_div(nrextents, in->extsize);
+	nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize);
+	nrextslog = xfs_highbit32(nrextents);
+	nrsumlevels = nrextslog + 1;
+	nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks;
+	nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+	nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
+	/*
+	 * New summary size can't be more than half the size of
+	 * the log.  This prevents us from getting a log overflow,
+	 * since we'll log basically the whole summary file at once.
+	 */
+	if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1))
+		return XFS_ERROR(EINVAL);
+	/*
+	 * Get the old block counts for bitmap and summary inodes.
+	 * These can't change since other growfs callers are locked out.
+	 */
+	rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_d.di_size);
+	rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_d.di_size);
+	/*
+	 * Allocate space to the bitmap and summary files, as necessary.
+	 */
+	if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks,
+			mp->m_sb.sb_rbmino)))
+		return error;
+	if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
+			mp->m_sb.sb_rsumino)))
+		return error;
+	nmp = NULL;
+	/*
+	 * Loop over the bitmap blocks.
+	 * We will do everything one bitmap block at a time.
+	 * Skip the current block if it is exactly full.
+	 * This also deals with the case where there were no rtextents before.
+	 */
+	for (bmbno = sbp->sb_rbmblocks -
+		     ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
+	     bmbno < nrbmblocks;
+	     bmbno++) {
+		/*
+		 * Allocate a new (fake) mount/sb.
+		 */
+		nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
+		*nmp = *mp;
+		nsbp = &nmp->m_sb;
+		/*
+		 * Calculate new sb and mount fields for this round.
+		 */
+		nsbp->sb_rextsize = in->extsize;
+		nsbp->sb_rbmblocks = bmbno + 1;
+		nsbp->sb_rblocks =
+			XFS_RTMIN(nrblocks,
+				  nsbp->sb_rbmblocks * NBBY *
+				  nsbp->sb_blocksize * nsbp->sb_rextsize);
+		nsbp->sb_rextents = nsbp->sb_rblocks;
+		do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+		nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
+		nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
+		nrsumsize =
+			(uint)sizeof(xfs_suminfo_t) * nrsumlevels *
+			nsbp->sb_rbmblocks;
+		nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+		nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
+		/*
+		 * Start a transaction, get the log reservation.
+		 */
+		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE);
+		cancelflags = 0;
+		if ((error = xfs_trans_reserve(tp, 0,
+				XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0)))
+			goto error_exit;
+		/*
+		 * Lock out other callers by grabbing the bitmap inode lock.
+		 */
+		if ((error = xfs_trans_iget(mp, tp, 0, mp->m_sb.sb_rbmino,
+				XFS_ILOCK_EXCL, &ip)))
+			goto error_exit;
+		ASSERT(ip == mp->m_rbmip);
+		/*
+		 * Update the bitmap inode's size.
+		 */
+		mp->m_rbmip->i_d.di_size =
+			nsbp->sb_rbmblocks * nsbp->sb_blocksize;
+		xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+		cancelflags |= XFS_TRANS_ABORT;
+		/*
+		 * Get the summary inode into the transaction.
+		 */
+		if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino,
+				0, XFS_ILOCK_EXCL, &ip)))
+			goto error_exit;
+		ASSERT(ip == mp->m_rsumip);
+		/*
+		 * Update the summary inode's size.
+		 */
+		mp->m_rsumip->i_d.di_size = nmp->m_rsumsize;
+		xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
+		/*
+		 * Copy summary data from old to new sizes.
+		 * Do this when the real size (not block-aligned) changes.
+		 */
+		if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks ||
+		    mp->m_rsumlevels != nmp->m_rsumlevels) {
+			error = xfs_rtcopy_summary(mp, nmp, tp);
+			if (error)
+				goto error_exit;
+		}
+		/*
+		 * Update superblock fields.
+		 */
+		if (nsbp->sb_rextsize != sbp->sb_rextsize)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE,
+				nsbp->sb_rextsize - sbp->sb_rextsize);
+		if (nsbp->sb_rbmblocks != sbp->sb_rbmblocks)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS,
+				nsbp->sb_rbmblocks - sbp->sb_rbmblocks);
+		if (nsbp->sb_rblocks != sbp->sb_rblocks)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS,
+				nsbp->sb_rblocks - sbp->sb_rblocks);
+		if (nsbp->sb_rextents != sbp->sb_rextents)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS,
+				nsbp->sb_rextents - sbp->sb_rextents);
+		if (nsbp->sb_rextslog != sbp->sb_rextslog)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG,
+				nsbp->sb_rextslog - sbp->sb_rextslog);
+		/*
+		 * Free new extent.
+		 */
+		bp = NULL;
+		error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
+			nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
+		if (error)
+			goto error_exit;
+		/*
+		 * Mark more blocks free in the superblock.
+		 */
+		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS,
+			nsbp->sb_rextents - sbp->sb_rextents);
+		/*
+		 * Free the fake mp structure.
+		 */
+		kmem_free(nmp, sizeof(*nmp));
+		nmp = NULL;
+		/*
+		 * Update mp values into the real mp structure.
+		 */
+		mp->m_rsumlevels = nrsumlevels;
+		mp->m_rsumsize = nrsumsize;
+		/*
+		 * Commit the transaction.
+		 */
+		xfs_trans_commit(tp, 0, NULL);
+	}
+	return 0;
+
+	/*
+	 * Error paths come here.
+	 */
+error_exit:
+	if (nmp)
+		kmem_free(nmp, sizeof(*nmp));
+	xfs_trans_cancel(tp, cancelflags);
+	return error;
+}
+
+/*
+ * Allocate an extent in the realtime subvolume, with the usual allocation
+ * parameters.  The length units are all in realtime extents, as is the
+ * result block number.
+ */
+int					/* error */
+xfs_rtallocate_extent(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number to allocate */
+	xfs_extlen_t	minlen,		/* minimum length to allocate */
+	xfs_extlen_t	maxlen,		/* maximum length to allocate */
+	xfs_extlen_t	*len,		/* out: actual length allocated */
+	xfs_alloctype_t	type,		/* allocation type XFS_ALLOCTYPE... */
+	int		wasdel,		/* was a delayed allocation extent */
+	xfs_extlen_t	prod,		/* extent product factor */
+	xfs_rtblock_t	*rtblock)	/* out: start block allocated */
+{
+	int		error;		/* error value */
+	xfs_inode_t	*ip;		/* inode for bitmap file */
+	xfs_mount_t	*mp;		/* file system mount structure */
+	xfs_rtblock_t	r;		/* result allocated block */
+	xfs_fsblock_t	sb;		/* summary file block number */
+	xfs_buf_t	*sumbp;		/* summary file block buffer */
+
+	ASSERT(minlen > 0 && minlen <= maxlen);
+	mp = tp->t_mountp;
+	/*
+	 * If prod is set then figure out what to do to minlen and maxlen.
+	 */
+	if (prod > 1) {
+		xfs_extlen_t	i;
+
+		if ((i = maxlen % prod))
+			maxlen -= i;
+		if ((i = minlen % prod))
+			minlen += prod - i;
+		if (maxlen < minlen) {
+			*rtblock = NULLRTBLOCK;
+			return 0;
+		}
+	}
+	/*
+	 * Lock out other callers by grabbing the bitmap inode lock.
+	 */
+	error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
+	if (error) {
+		return error;
+	}
+	sumbp = NULL;
+	/*
+	 * Allocate by size, or near another block, or exactly at some block.
+	 */
+	switch (type) {
+	case XFS_ALLOCTYPE_ANY_AG:
+		error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len,
+				&sumbp,	&sb, prod, &r);
+		break;
+	case XFS_ALLOCTYPE_NEAR_BNO:
+		error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen,
+				len, &sumbp, &sb, prod, &r);
+		break;
+	case XFS_ALLOCTYPE_THIS_BNO:
+		error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen,
+				len, &sumbp, &sb, prod, &r);
+		break;
+	default:
+		ASSERT(0);
+	}
+	if (error) {
+		return error;
+	}
+	/*
+	 * If it worked, update the superblock.
+	 */
+	if (r != NULLRTBLOCK) {
+		long	slen = (long)*len;
+
+		ASSERT(*len >= minlen && *len <= maxlen);
+		if (wasdel)
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FREXTENTS, -slen);
+		else
+			xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, -slen);
+	}
+	*rtblock = r;
+	return 0;
+}
+
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int					/* error */
+xfs_rtfree_extent(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number to free */
+	xfs_extlen_t	len)		/* length of extent freed */
+{
+	int		error;		/* error value */
+	xfs_inode_t	*ip;		/* bitmap file inode */
+	xfs_mount_t	*mp;		/* file system mount structure */
+	xfs_fsblock_t	sb;		/* summary file block number */
+	xfs_buf_t	*sumbp;		/* summary file block buffer */
+
+	mp = tp->t_mountp;
+	/*
+	 * Synchronize by locking the bitmap inode.
+	 */
+	error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
+	if (error) {
+		return error;
+	}
+#if defined(__KERNEL__) && defined(DEBUG)
+	/*
+	 * Check to see that this whole range is currently allocated.
+	 */
+	{
+		int	stat;		/* result from checking range */
+
+		error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat);
+		if (error) {
+			return error;
+		}
+		ASSERT(stat);
+	}
+#endif
+	sumbp = NULL;
+	/*
+	 * Free the range of realtime blocks.
+	 */
+	error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Mark more blocks free in the superblock.
+	 */
+	xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
+	/*
+	 * If we've now freed all the blocks, reset the file sequence
+	 * number to 0.
+	 */
+	if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
+	    mp->m_sb.sb_rextents) {
+		if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+			ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+		*(__uint64_t *)&ip->i_d.di_atime = 0;
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	}
+	return 0;
+}
+
+/*
+ * Initialize realtime fields in the mount structure.
+ */
+int				/* error */
+xfs_rtmount_init(
+	xfs_mount_t	*mp)	/* file system mount structure */
+{
+	xfs_buf_t	*bp;	/* buffer for last block of subvolume */
+	xfs_daddr_t	d;	/* address of last block of subvolume */
+	int		error;	/* error return value */
+	xfs_sb_t	*sbp;	/* filesystem superblock copy in mount */
+
+	sbp = &mp->m_sb;
+	if (sbp->sb_rblocks == 0)
+		return 0;
+	if (mp->m_rtdev_targp == NULL) {
+		cmn_err(CE_WARN,
+	"XFS: This filesystem has a realtime volume, use rtdev=device option");
+		return XFS_ERROR(ENODEV);
+	}
+	mp->m_rsumlevels = sbp->sb_rextslog + 1;
+	mp->m_rsumsize =
+		(uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels *
+		sbp->sb_rbmblocks;
+	mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize);
+	mp->m_rbmip = mp->m_rsumip = NULL;
+	/*
+	 * Check that the realtime section is an ok size.
+	 */
+	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
+	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) {
+		cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu",
+			(unsigned long long) XFS_BB_TO_FSB(mp, d),
+			(unsigned long long) mp->m_sb.sb_rblocks);
+		return XFS_ERROR(E2BIG);
+	}
+	error = xfs_read_buf(mp, mp->m_rtdev_targp,
+				d - XFS_FSB_TO_BB(mp, 1),
+				XFS_FSB_TO_BB(mp, 1), 0, &bp);
+	if (error) {
+		cmn_err(CE_WARN,
+	"XFS: realtime mount -- xfs_read_buf failed, returned %d", error);
+		if (error == ENOSPC)
+			return XFS_ERROR(E2BIG);
+		return error;
+	}
+	xfs_buf_relse(bp);
+	return 0;
+}
+
+/*
+ * Get the bitmap and summary inodes into the mount structure
+ * at mount time.
+ */
+int					/* error */
+xfs_rtmount_inodes(
+	xfs_mount_t	*mp)		/* file system mount structure */
+{
+	int		error;		/* error return value */
+	xfs_sb_t	*sbp;
+
+	sbp = &mp->m_sb;
+	if (sbp->sb_rbmino == NULLFSINO)
+		return 0;
+	error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0);
+	if (error)
+		return error;
+	ASSERT(mp->m_rbmip != NULL);
+	ASSERT(sbp->sb_rsumino != NULLFSINO);
+	error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
+	if (error) {
+		VN_RELE(XFS_ITOV(mp->m_rbmip));
+		return error;
+	}
+	ASSERT(mp->m_rsumip != NULL);
+	return 0;
+}
+
+/*
+ * Pick an extent for allocation at the start of a new realtime file.
+ * Use the sequence number stored in the atime field of the bitmap inode.
+ * Translate this to a fraction of the rtextents, and return the product
+ * of rtextents and the fraction.
+ * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
+ */
+int					/* error */
+xfs_rtpick_extent(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_extlen_t	len,		/* allocation length (rtextents) */
+	xfs_rtblock_t	*pick)		/* result rt extent */
+{
+	xfs_rtblock_t	b;		/* result block */
+	int		error;		/* error return value */
+	xfs_inode_t	*ip;		/* bitmap incore inode */
+	int		log2;		/* log of sequence number */
+	__uint64_t	resid;		/* residual after log removed */
+	__uint64_t	seq;		/* sequence number of file creation */
+	__uint64_t	*seqp;		/* pointer to seqno in inode */
+
+	error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
+	if (error)
+		return error;
+	ASSERT(ip == mp->m_rbmip);
+	seqp = (__uint64_t *)&ip->i_d.di_atime;
+	if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
+		ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+		*seqp = 0;
+	}
+	seq = *seqp;
+	if ((log2 = xfs_highbit64(seq)) == -1)
+		b = 0;
+	else {
+		resid = seq - (1ULL << log2);
+		b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
+		    (log2 + 1);
+		if (b >= mp->m_sb.sb_rextents)
+			b = do_mod(b, mp->m_sb.sb_rextents);
+		if (b + len > mp->m_sb.sb_rextents)
+			b = mp->m_sb.sb_rextents - len;
+	}
+	*seqp = seq + 1;
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	*pick = b;
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Debug code: print out the value of a range in the bitmap.
+ */
+void
+xfs_rtprint_range(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to print */
+	xfs_extlen_t	len)		/* length to print */
+{
+	xfs_extlen_t	i;		/* block number in the extent */
+
+	printk("%Ld: ", (long long)start);
+	for (i = 0; i < len; i++)
+		printk("%d", xfs_rtcheck_bit(mp, tp, start + i, 1));
+	printk("\n");
+}
+
+/*
+ * Debug code: print the summary file.
+ */
+void
+xfs_rtprint_summary(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp)		/* transaction pointer */
+{
+	xfs_suminfo_t	c;		/* summary data */
+	xfs_rtblock_t	i;		/* bitmap block number */
+	int		l;		/* summary information level */
+	int		p;		/* flag for printed anything */
+	xfs_fsblock_t	sb;		/* summary block number */
+	xfs_buf_t	*sumbp;		/* summary block buffer */
+
+	sumbp = NULL;
+	for (l = 0; l < mp->m_rsumlevels; l++) {
+		for (p = 0, i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
+			(void)xfs_rtget_summary(mp, tp, l, i, &sumbp, &sb, &c);
+			if (c) {
+				if (!p) {
+					printk("%Ld-%Ld:", 1LL << l,
+						XFS_RTMIN((1LL << l) +
+							  ((1LL << l) - 1LL),
+							 mp->m_sb.sb_rextents));
+					p = 1;
+				}
+				printk(" %Ld:%d", (long long)i, c);
+			}
+		}
+		if (p)
+			printk("\n");
+	}
+	if (sumbp)
+		xfs_trans_brelse(tp, sumbp);
+}
+#endif	/* DEBUG */
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
new file mode 100644
index 0000000..e271026
--- /dev/null
+++ b/fs/xfs/xfs_rtalloc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_RTALLOC_H__
+#define	__XFS_RTALLOC_H__
+
+struct xfs_mount;
+struct xfs_trans;
+
+#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/* Min and max rt extent sizes, specified in bytes */
+#define	XFS_MAX_RTEXTSIZE	(1024 * 1024 * 1024)	/* 1GB */
+#define	XFS_DFL_RTEXTSIZE	(64 * 1024)	        /* 64KB */
+#define	XFS_MIN_RTEXTSIZE	(4 * 1024)		/* 4KB */
+
+/*
+ * Constants for bit manipulations.
+ */
+#define	XFS_NBBYLOG	3		/* log2(NBBY) */
+#define	XFS_WORDLOG	2		/* log2(sizeof(xfs_rtword_t)) */
+#define	XFS_NBWORDLOG	(XFS_NBBYLOG + XFS_WORDLOG)
+#define	XFS_NBWORD	(1 << XFS_NBWORDLOG)
+#define	XFS_WORDMASK	((1 << XFS_WORDLOG) - 1)
+
+#define	XFS_BLOCKSIZE(mp)	((mp)->m_sb.sb_blocksize)
+#define	XFS_BLOCKMASK(mp)	((mp)->m_blockmask)
+#define	XFS_BLOCKWSIZE(mp)	((mp)->m_blockwsize)
+#define	XFS_BLOCKWMASK(mp)	((mp)->m_blockwmask)
+
+/*
+ * Summary and bit manipulation macros.
+ */
+#define	XFS_SUMOFFS(mp,ls,bb)	((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb)))
+#define	XFS_SUMOFFSTOBLOCK(mp,s)	\
+	(((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
+#define	XFS_SUMPTR(mp,bp,so)	\
+	((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \
+		(((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
+
+#define	XFS_BITTOBLOCK(mp,bi)	((bi) >> (mp)->m_blkbit_log)
+#define	XFS_BLOCKTOBIT(mp,bb)	((bb) << (mp)->m_blkbit_log)
+#define	XFS_BITTOWORD(mp,bi)	\
+	((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp)))
+
+#define	XFS_RTMIN(a,b)	((a) < (b) ? (a) : (b))
+#define	XFS_RTMAX(a,b)	((a) > (b) ? (a) : (b))
+
+#define	XFS_RTLOBIT(w)	xfs_lowbit32(w)
+#define	XFS_RTHIBIT(w)	xfs_highbit32(w)
+
+#if XFS_BIG_BLKNOS
+#define	XFS_RTBLOCKLOG(b)	xfs_highbit64(b)
+#else
+#define	XFS_RTBLOCKLOG(b)	xfs_highbit32(b)
+#endif
+
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_XFS_RT
+/*
+ * Function prototypes for exported functions.
+ */
+
+/*
+ * Allocate an extent in the realtime subvolume, with the usual allocation
+ * parameters.  The length units are all in realtime extents, as is the
+ * result block number.
+ */
+int					/* error */
+xfs_rtallocate_extent(
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_rtblock_t		bno,	/* starting block number to allocate */
+	xfs_extlen_t		minlen,	/* minimum length to allocate */
+	xfs_extlen_t		maxlen,	/* maximum length to allocate */
+	xfs_extlen_t		*len,	/* out: actual length allocated */
+	xfs_alloctype_t		type,	/* allocation type XFS_ALLOCTYPE... */
+	int			wasdel,	/* was a delayed allocation extent */
+	xfs_extlen_t		prod,	/* extent product factor */
+	xfs_rtblock_t		*rtblock); /* out: start block allocated */
+
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int					/* error */
+xfs_rtfree_extent(
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_rtblock_t		bno,	/* starting block number to free */
+	xfs_extlen_t		len);	/* length of extent freed */
+
+/*
+ * Initialize realtime fields in the mount structure.
+ */
+int					/* error */
+xfs_rtmount_init(
+	struct xfs_mount	*mp);	/* file system mount structure */
+
+/*
+ * Get the bitmap and summary inodes into the mount structure
+ * at mount time.
+ */
+int					/* error */
+xfs_rtmount_inodes(
+	struct xfs_mount	*mp);	/* file system mount structure */
+
+/*
+ * Pick an extent for allocation at the start of a new realtime file.
+ * Use the sequence number stored in the atime field of the bitmap inode.
+ * Translate this to a fraction of the rtextents, and return the product
+ * of rtextents and the fraction.
+ * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
+ */
+int					/* error */
+xfs_rtpick_extent(
+	struct xfs_mount	*mp,	/* file system mount point */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_extlen_t		len,	/* allocation length (rtextents) */
+	xfs_rtblock_t		*pick);	/* result rt extent */
+
+/*
+ * Debug code: print out the value of a range in the bitmap.
+ */
+void
+xfs_rtprint_range(
+	struct xfs_mount	*mp,	/* file system mount structure */
+	struct xfs_trans	*tp,	/* transaction pointer */
+	xfs_rtblock_t		start,	/* starting block to print */
+	xfs_extlen_t		len);	/* length to print */
+
+/*
+ * Debug code: print the summary file.
+ */
+void
+xfs_rtprint_summary(
+	struct xfs_mount	*mp,	/* file system mount structure */
+	struct xfs_trans	*tp);	/* transaction pointer */
+
+/*
+ * Grow the realtime area of the filesystem.
+ */
+int
+xfs_growfs_rt(
+	struct xfs_mount	*mp,	/* file system mount structure */
+	xfs_growfs_rt_t		*in);	/* user supplied growfs struct */
+
+#else
+# define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb)  (ENOSYS)
+# define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
+# define xfs_rtpick_extent(m,t,l,rb)                    (ENOSYS)
+# define xfs_growfs_rt(mp,in)                           (ENOSYS)
+# define xfs_rtmount_init(m)    (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+# define xfs_rtmount_inodes(m)  (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+#endif	/* CONFIG_XFS_RT */
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_RTALLOC_H__ */
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
new file mode 100644
index 0000000..d3ff7ae
--- /dev/null
+++ b/fs/xfs/xfs_rw.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_itable.h"
+#include "xfs_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_acl.h"
+#include "xfs_mac.h"
+#include "xfs_error.h"
+#include "xfs_buf_item.h"
+#include "xfs_rw.h"
+
+/*
+ * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
+ * which clears the setuid and setgid bits when a file is written.
+ */
+int
+xfs_write_clear_setuid(
+	xfs_inode_t	*ip)
+{
+	xfs_mount_t	*mp;
+	xfs_trans_t	*tp;
+	int		error;
+
+	mp = ip->i_mount;
+	tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+	if ((error = xfs_trans_reserve(tp, 0,
+				      XFS_WRITEID_LOG_RES(mp),
+				      0, 0, 0))) {
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	ip->i_d.di_mode &= ~S_ISUID;
+
+	/*
+	 * Note that we don't have to worry about mandatory
+	 * file locking being disabled here because we only
+	 * clear the S_ISGID bit if the Group execute bit is
+	 * on, but if it was on then mandatory locking wouldn't
+	 * have been enabled.
+	 */
+	if (ip->i_d.di_mode & S_IXGRP) {
+		ip->i_d.di_mode &= ~S_ISGID;
+	}
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return 0;
+}
+
+/*
+ * Force a shutdown of the filesystem instantly while keeping
+ * the filesystem consistent. We don't do an unmount here; just shutdown
+ * the shop, make sure that absolutely nothing persistent happens to
+ * this filesystem after this point.
+ */
+
+void
+xfs_do_force_shutdown(
+	bhv_desc_t	*bdp,
+	int		flags,
+	char		*fname,
+	int		lnnum)
+{
+	int		logerror;
+	xfs_mount_t	*mp;
+
+	mp = XFS_BHVTOM(bdp);
+	logerror = flags & XFS_LOG_IO_ERROR;
+
+	if (!(flags & XFS_FORCE_UMOUNT)) {
+		cmn_err(CE_NOTE,
+		"xfs_force_shutdown(%s,0x%x) called from line %d of file %s.  Return address = 0x%p",
+			mp->m_fsname,flags,lnnum,fname,__return_address);
+	}
+	/*
+	 * No need to duplicate efforts.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
+		return;
+
+	/*
+	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
+	 * queue up anybody new on the log reservations, and wakes up
+	 * everybody who's sleeping on log reservations and tells
+	 * them the bad news.
+	 */
+	if (xfs_log_force_umount(mp, logerror))
+		return;
+
+	if (flags & XFS_CORRUPT_INCORE) {
+		xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp,
+    "Corruption of in-memory data detected.  Shutting down filesystem: %s",
+			mp->m_fsname);
+		if (XFS_ERRLEVEL_HIGH <= xfs_error_level) {
+			xfs_stack_trace();
+		}
+	} else if (!(flags & XFS_FORCE_UMOUNT)) {
+		if (logerror) {
+			xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp,
+			"Log I/O Error Detected.  Shutting down filesystem: %s",
+				mp->m_fsname);
+		} else if (!(flags & XFS_SHUTDOWN_REMOTE_REQ)) {
+			xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp,
+				"I/O Error Detected.  Shutting down filesystem: %s",
+				mp->m_fsname);
+		}
+	}
+	if (!(flags & XFS_FORCE_UMOUNT)) {
+		cmn_err(CE_ALERT,
+		"Please umount the filesystem, and rectify the problem(s)");
+	}
+}
+
+
+/*
+ * Called when we want to stop a buffer from getting written or read.
+ * We attach the EIO error, muck with its flags, and call biodone
+ * so that the proper iodone callbacks get called.
+ */
+int
+xfs_bioerror(
+	xfs_buf_t *bp)
+{
+
+#ifdef XFSERRORDEBUG
+	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
+#endif
+
+	/*
+	 * No need to wait until the buffer is unpinned.
+	 * We aren't flushing it.
+	 */
+	xfs_buftrace("XFS IOERROR", bp);
+	XFS_BUF_ERROR(bp, EIO);
+	/*
+	 * We're calling biodone, so delete B_DONE flag. Either way
+	 * we have to call the iodone callback, and calling biodone
+	 * probably is the best way since it takes care of
+	 * GRIO as well.
+	 */
+	XFS_BUF_UNREAD(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+	XFS_BUF_UNDONE(bp);
+	XFS_BUF_STALE(bp);
+
+	XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+	xfs_biodone(bp);
+
+	return (EIO);
+}
+
+/*
+ * Same as xfs_bioerror, except that we are releasing the buffer
+ * here ourselves, and avoiding the biodone call.
+ * This is meant for userdata errors; metadata bufs come with
+ * iodone functions attached, so that we can track down errors.
+ */
+int
+xfs_bioerror_relse(
+	xfs_buf_t *bp)
+{
+	int64_t fl;
+
+	ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
+	ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
+
+	xfs_buftrace("XFS IOERRELSE", bp);
+	fl = XFS_BUF_BFLAGS(bp);
+	/*
+	 * No need to wait until the buffer is unpinned.
+	 * We aren't flushing it.
+	 *
+	 * chunkhold expects B_DONE to be set, whether
+	 * we actually finish the I/O or not. We don't want to
+	 * change that interface.
+	 */
+	XFS_BUF_UNREAD(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+	XFS_BUF_DONE(bp);
+	XFS_BUF_STALE(bp);
+	XFS_BUF_CLR_IODONE_FUNC(bp);
+	XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+	if (!(fl & XFS_B_ASYNC)) {
+		/*
+		 * Mark b_error and B_ERROR _both_.
+		 * Lot's of chunkcache code assumes that.
+		 * There's no reason to mark error for
+		 * ASYNC buffers.
+		 */
+		XFS_BUF_ERROR(bp, EIO);
+		XFS_BUF_V_IODONESEMA(bp);
+	} else {
+		xfs_buf_relse(bp);
+	}
+	return (EIO);
+}
+/*
+ * Prints out an ALERT message about I/O error.
+ */
+void
+xfs_ioerror_alert(
+	char			*func,
+	struct xfs_mount	*mp,
+	xfs_buf_t		*bp,
+	xfs_daddr_t		blkno)
+{
+	cmn_err(CE_ALERT,
+ "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
+ "       (\"%s\") error %d buf count %u",
+		(!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
+		XFS_BUFTARG_NAME(bp->pb_target),
+		(__uint64_t)blkno,
+		func,
+		XFS_BUF_GETERROR(bp),
+		XFS_BUF_COUNT(bp));
+}
+
+/*
+ * This isn't an absolute requirement, but it is
+ * just a good idea to call xfs_read_buf instead of
+ * directly doing a read_buf call. For one, we shouldn't
+ * be doing this disk read if we are in SHUTDOWN state anyway,
+ * so this stops that from happening. Secondly, this does all
+ * the error checking stuff and the brelse if appropriate for
+ * the caller, so the code can be a little leaner.
+ */
+
+int
+xfs_read_buf(
+	struct xfs_mount *mp,
+	xfs_buftarg_t	 *target,
+	xfs_daddr_t	 blkno,
+	int              len,
+	uint             flags,
+	xfs_buf_t	 **bpp)
+{
+	xfs_buf_t	 *bp;
+	int		 error;
+
+	if (flags)
+		bp = xfs_buf_read_flags(target, blkno, len, flags);
+	else
+		bp = xfs_buf_read(target, blkno, len, flags);
+	if (!bp)
+		return XFS_ERROR(EIO);
+	error = XFS_BUF_GETERROR(bp);
+	if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) {
+		*bpp = bp;
+	} else {
+		*bpp = NULL;
+		if (error) {
+			xfs_ioerror_alert("xfs_read_buf", mp, bp, XFS_BUF_ADDR(bp));
+		} else {
+			error = XFS_ERROR(EIO);
+		}
+		if (bp) {
+			XFS_BUF_UNDONE(bp);
+			XFS_BUF_UNDELAYWRITE(bp);
+			XFS_BUF_STALE(bp);
+			/*
+			 * brelse clears B_ERROR and b_error
+			 */
+			xfs_buf_relse(bp);
+		}
+	}
+	return (error);
+}
+
+/*
+ * Wrapper around bwrite() so that we can trap
+ * write errors, and act accordingly.
+ */
+int
+xfs_bwrite(
+	struct xfs_mount *mp,
+	struct xfs_buf	 *bp)
+{
+	int	error;
+
+	/*
+	 * XXXsup how does this work for quotas.
+	 */
+	XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
+	XFS_BUF_SET_FSPRIVATE3(bp, mp);
+	XFS_BUF_WRITE(bp);
+
+	if ((error = XFS_bwrite(bp))) {
+		ASSERT(mp);
+		/*
+		 * Cannot put a buftrace here since if the buffer is not
+		 * B_HOLD then we will brelse() the buffer before returning
+		 * from bwrite and we could be tracing a buffer that has
+		 * been reused.
+		 */
+		xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+	}
+	return (error);
+}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
new file mode 100644
index 0000000..c8b10bf
--- /dev/null
+++ b/fs/xfs/xfs_rw.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_RW_H__
+#define	__XFS_RW_H__
+
+struct xfs_buf;
+struct xfs_inode;
+struct xfs_mount;
+
+/*
+ * Maximum count of bmaps used by read and write paths.
+ */
+#define	XFS_MAX_RW_NBMAPS	4
+
+/*
+ * Counts of readahead buffers to use based on physical memory size.
+ * None of these should be more than XFS_MAX_RW_NBMAPS.
+ */
+#define	XFS_RW_NREADAHEAD_16MB	2
+#define	XFS_RW_NREADAHEAD_32MB	3
+#define	XFS_RW_NREADAHEAD_K32	4
+#define	XFS_RW_NREADAHEAD_K64	4
+
+/*
+ * Maximum size of a buffer that we\'ll map.  Making this
+ * too big will degrade performance due to the number of
+ * pages which need to be gathered.  Making it too small
+ * will prevent us from doing large I/O\'s to hardware that
+ * needs it.
+ *
+ * This is currently set to 512 KB.
+ */
+#define	XFS_MAX_BMAP_LEN_BB	1024
+#define	XFS_MAX_BMAP_LEN_BYTES	524288
+
+/*
+ * Convert the given file system block to a disk block.
+ * We have to treat it differently based on whether the
+ * file is a real time file or not, because the bmap code
+ * does.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DB)
+xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
+#define	XFS_FSB_TO_DB(ip,fsb)	xfs_fsb_to_db(ip,fsb)
+#else
+#define	XFS_FSB_TO_DB(ip,fsb) \
+		(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) ? \
+		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
+		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)))
+#endif
+
+#define XFS_FSB_TO_DB_IO(io,fsb) \
+		(((io)->io_flags & XFS_IOCORE_RT) ? \
+		 XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \
+		 XFS_FSB_TO_DADDR((io)->io_mount, (fsb)))
+
+/*
+ * Prototypes for functions in xfs_rw.c.
+ */
+
+int
+xfs_write_clear_setuid(
+	struct xfs_inode	*ip);
+
+int
+xfs_bwrite(
+	struct xfs_mount	*mp,
+	struct xfs_buf		*bp);
+
+int
+xfs_bioerror(
+	struct xfs_buf		*b);
+
+int
+xfs_bioerror_relse(
+	struct xfs_buf		*b);
+
+int
+xfs_read_buf(
+	struct xfs_mount	*mp,
+	xfs_buftarg_t		*target,
+	xfs_daddr_t		blkno,
+	int			len,
+	uint			flags,
+	struct xfs_buf		**bpp);
+
+void
+xfs_ioerror_alert(
+	char			*func,
+	struct xfs_mount	*mp,
+	xfs_buf_t		*bp,
+	xfs_daddr_t		blkno);
+
+
+/*
+ * Prototypes for functions in xfs_vnodeops.c.
+ */
+
+int
+xfs_rwlock(
+	bhv_desc_t		*bdp,
+	vrwlock_t		write_lock);
+
+void
+xfs_rwunlock(
+	bhv_desc_t		*bdp,
+	vrwlock_t		write_lock);
+
+int
+xfs_change_file_space(
+	bhv_desc_t		*bdp,
+	int			cmd,
+	xfs_flock64_t		*bf,
+	xfs_off_t		offset,
+	cred_t			*credp,
+	int			flags);
+
+int
+xfs_set_dmattrs(
+	bhv_desc_t		*bdp,
+	u_int			evmask,
+	u_int16_t		state,
+	cred_t			*credp);
+
+#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
new file mode 100644
index 0000000..ad090a8
--- /dev/null
+++ b/fs/xfs/xfs_sb.h
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2000-2001 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SB_H__
+#define	__XFS_SB_H__
+
+/*
+ * Super block
+ * Fits into a sector-sized buffer at address 0 of each allocation group.
+ * Only the first of these is ever updated except during growfs.
+ */
+
+struct xfs_buf;
+struct xfs_mount;
+
+#define	XFS_SB_MAGIC		0x58465342	/* 'XFSB' */
+#define	XFS_SB_VERSION_1	1		/* 5.3, 6.0.1, 6.1 */
+#define	XFS_SB_VERSION_2	2		/* 6.2 - attributes */
+#define	XFS_SB_VERSION_3	3		/* 6.2 - new inode version */
+#define	XFS_SB_VERSION_4	4		/* 6.2+ - bitmask version */
+#define	XFS_SB_VERSION_NUMBITS		0x000f
+#define	XFS_SB_VERSION_ALLFBITS		0xfff0
+#define	XFS_SB_VERSION_SASHFBITS	0xf000
+#define	XFS_SB_VERSION_REALFBITS	0x0ff0
+#define	XFS_SB_VERSION_ATTRBIT		0x0010
+#define	XFS_SB_VERSION_NLINKBIT		0x0020
+#define	XFS_SB_VERSION_QUOTABIT		0x0040
+#define	XFS_SB_VERSION_ALIGNBIT		0x0080
+#define	XFS_SB_VERSION_DALIGNBIT	0x0100
+#define	XFS_SB_VERSION_SHAREDBIT	0x0200
+#define XFS_SB_VERSION_LOGV2BIT		0x0400
+#define XFS_SB_VERSION_SECTORBIT	0x0800
+#define	XFS_SB_VERSION_EXTFLGBIT	0x1000
+#define	XFS_SB_VERSION_DIRV2BIT		0x2000
+#define	XFS_SB_VERSION_MOREBITSBIT	0x8000
+#define	XFS_SB_VERSION_OKSASHFBITS	\
+	(XFS_SB_VERSION_EXTFLGBIT | \
+	 XFS_SB_VERSION_DIRV2BIT)
+#define	XFS_SB_VERSION_OKREALFBITS	\
+	(XFS_SB_VERSION_ATTRBIT | \
+	 XFS_SB_VERSION_NLINKBIT | \
+	 XFS_SB_VERSION_QUOTABIT | \
+	 XFS_SB_VERSION_ALIGNBIT | \
+	 XFS_SB_VERSION_DALIGNBIT | \
+	 XFS_SB_VERSION_SHAREDBIT | \
+	 XFS_SB_VERSION_LOGV2BIT | \
+	 XFS_SB_VERSION_SECTORBIT)
+#define	XFS_SB_VERSION_OKSASHBITS	\
+	(XFS_SB_VERSION_NUMBITS | \
+	 XFS_SB_VERSION_REALFBITS | \
+	 XFS_SB_VERSION_OKSASHFBITS)
+#define	XFS_SB_VERSION_OKREALBITS	\
+	(XFS_SB_VERSION_NUMBITS | \
+	 XFS_SB_VERSION_OKREALFBITS | \
+	 XFS_SB_VERSION_OKSASHFBITS)
+#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits)	\
+	(((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \
+	  (morebits)) ? \
+		(XFS_SB_VERSION_4 | \
+		 ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \
+		 ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \
+		 ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \
+		 ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \
+		 ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \
+		 ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \
+		 ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \
+		XFS_SB_VERSION_1)
+
+/*
+ * There are two words to hold XFS "feature" bits: the original
+ * word, sb_versionnum, and sb_features2.  Whenever a bit is set in
+ * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set.
+ *
+ * These defines represent bits in sb_features2.
+ */
+#define XFS_SB_VERSION2_REALFBITS	0x00ffffff	/* Mask: features */
+#define XFS_SB_VERSION2_RESERVED1BIT	0x00000001
+#define XFS_SB_VERSION2_SASHFBITS	0xff000000	/* Mask: features that
+							   require changing
+							   PROM and SASH */
+
+#define	XFS_SB_VERSION2_OKREALFBITS	\
+	(0)
+#define	XFS_SB_VERSION2_OKSASHFBITS	\
+	(0)
+#define XFS_SB_VERSION2_OKREALBITS	\
+	(XFS_SB_VERSION2_OKREALFBITS |	\
+	 XFS_SB_VERSION2_OKSASHFBITS )
+
+/*
+ * mkfs macro to set up sb_features2 word
+ */
+#define	XFS_SB_VERSION2_MKFS(xyz)	\
+	((xyz) ? 0 : 0)
+
+typedef struct xfs_sb
+{
+	__uint32_t	sb_magicnum;	/* magic number == XFS_SB_MAGIC */
+	__uint32_t	sb_blocksize;	/* logical block size, bytes */
+	xfs_drfsbno_t	sb_dblocks;	/* number of data blocks */
+	xfs_drfsbno_t	sb_rblocks;	/* number of realtime blocks */
+	xfs_drtbno_t	sb_rextents;	/* number of realtime extents */
+	uuid_t		sb_uuid;	/* file system unique id */
+	xfs_dfsbno_t	sb_logstart;	/* starting block of log if internal */
+	xfs_ino_t	sb_rootino;	/* root inode number */
+	xfs_ino_t	sb_rbmino;	/* bitmap inode for realtime extents */
+	xfs_ino_t	sb_rsumino;	/* summary inode for rt bitmap */
+	xfs_agblock_t	sb_rextsize;	/* realtime extent size, blocks */
+	xfs_agblock_t	sb_agblocks;	/* size of an allocation group */
+	xfs_agnumber_t	sb_agcount;	/* number of allocation groups */
+	xfs_extlen_t	sb_rbmblocks;	/* number of rt bitmap blocks */
+	xfs_extlen_t	sb_logblocks;	/* number of log blocks */
+	__uint16_t	sb_versionnum;	/* header version == XFS_SB_VERSION */
+	__uint16_t	sb_sectsize;	/* volume sector size, bytes */
+	__uint16_t	sb_inodesize;	/* inode size, bytes */
+	__uint16_t	sb_inopblock;	/* inodes per block */
+	char		sb_fname[12];	/* file system name */
+	__uint8_t	sb_blocklog;	/* log2 of sb_blocksize */
+	__uint8_t	sb_sectlog;	/* log2 of sb_sectsize */
+	__uint8_t	sb_inodelog;	/* log2 of sb_inodesize */
+	__uint8_t	sb_inopblog;	/* log2 of sb_inopblock */
+	__uint8_t	sb_agblklog;	/* log2 of sb_agblocks (rounded up) */
+	__uint8_t	sb_rextslog;	/* log2 of sb_rextents */
+	__uint8_t	sb_inprogress;	/* mkfs is in progress, don't mount */
+	__uint8_t	sb_imax_pct;	/* max % of fs for inode space */
+					/* statistics */
+	/*
+	 * These fields must remain contiguous.  If you really
+	 * want to change their layout, make sure you fix the
+	 * code in xfs_trans_apply_sb_deltas().
+	 */
+	__uint64_t	sb_icount;	/* allocated inodes */
+	__uint64_t	sb_ifree;	/* free inodes */
+	__uint64_t	sb_fdblocks;	/* free data blocks */
+	__uint64_t	sb_frextents;	/* free realtime extents */
+	/*
+	 * End contiguous fields.
+	 */
+	xfs_ino_t	sb_uquotino;	/* user quota inode */
+	xfs_ino_t	sb_gquotino;	/* group quota inode */
+	__uint16_t	sb_qflags;	/* quota flags */
+	__uint8_t	sb_flags;	/* misc. flags */
+	__uint8_t	sb_shared_vn;	/* shared version number */
+	xfs_extlen_t	sb_inoalignmt;	/* inode chunk alignment, fsblocks */
+	__uint32_t	sb_unit;	/* stripe or raid unit */
+	__uint32_t	sb_width;	/* stripe or raid width */
+	__uint8_t	sb_dirblklog;	/* log2 of dir block size (fsbs) */
+	__uint8_t	sb_logsectlog;	/* log2 of the log sector size */
+	__uint16_t	sb_logsectsize;	/* sector size for the log, bytes */
+	__uint32_t	sb_logsunit;	/* stripe unit size for the log */
+	__uint32_t	sb_features2;	/* additonal feature bits */
+} xfs_sb_t;
+
+/*
+ * Sequence number values for the fields.
+ */
+typedef enum {
+	XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
+	XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
+	XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
+	XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
+	XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
+	XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
+	XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
+	XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
+	XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
+	XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
+	XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
+	XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
+	XFS_SBS_FEATURES2,
+	XFS_SBS_FIELDCOUNT
+} xfs_sb_field_t;
+
+/*
+ * Mask values, defined based on the xfs_sb_field_t values.
+ * Only define the ones we're using.
+ */
+#define	XFS_SB_MVAL(x)		(1LL << XFS_SBS_ ## x)
+#define	XFS_SB_UUID		XFS_SB_MVAL(UUID)
+#define	XFS_SB_FNAME		XFS_SB_MVAL(FNAME)
+#define	XFS_SB_ROOTINO		XFS_SB_MVAL(ROOTINO)
+#define	XFS_SB_RBMINO		XFS_SB_MVAL(RBMINO)
+#define	XFS_SB_RSUMINO		XFS_SB_MVAL(RSUMINO)
+#define	XFS_SB_VERSIONNUM	XFS_SB_MVAL(VERSIONNUM)
+#define XFS_SB_UQUOTINO		XFS_SB_MVAL(UQUOTINO)
+#define XFS_SB_GQUOTINO		XFS_SB_MVAL(GQUOTINO)
+#define XFS_SB_QFLAGS		XFS_SB_MVAL(QFLAGS)
+#define XFS_SB_SHARED_VN	XFS_SB_MVAL(SHARED_VN)
+#define XFS_SB_UNIT		XFS_SB_MVAL(UNIT)
+#define XFS_SB_WIDTH		XFS_SB_MVAL(WIDTH)
+#define	XFS_SB_NUM_BITS		((int)XFS_SBS_FIELDCOUNT)
+#define	XFS_SB_ALL_BITS		((1LL << XFS_SB_NUM_BITS) - 1)
+#define	XFS_SB_MOD_BITS		\
+	(XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
+	 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
+	 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH)
+
+/*
+ * Misc. Flags - warning - these will be cleared by xfs_repair unless
+ * a feature bit is set when the flag is used.
+ */
+#define XFS_SBF_NOFLAGS		0x00	/* no flags set */
+#define XFS_SBF_READONLY	0x01	/* only read-only mounts allowed */
+
+/*
+ * define max. shared version we can interoperate with
+ */
+#define XFS_SB_MAX_SHARED_VN	0
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_NUM)
+int xfs_sb_version_num(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_NUM(sbp)	xfs_sb_version_num(sbp)
+#else
+#define	XFS_SB_VERSION_NUM(sbp)	((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_GOOD_VERSION)
+int xfs_sb_good_version(xfs_sb_t *sbp);
+#define	XFS_SB_GOOD_VERSION(sbp)	xfs_sb_good_version(sbp)
+#else
+#define	XFS_SB_GOOD_VERSION_INT(sbp)	\
+	((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \
+	  ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \
+	   ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	    !(((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) || \
+	      (((sbp)->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && \
+	       ((sbp)->sb_features2 & ~XFS_SB_VERSION2_OKREALBITS)))
+
+#ifdef __KERNEL__
+#define	XFS_SB_GOOD_VERSION(sbp)	\
+	(XFS_SB_GOOD_VERSION_INT(sbp) && \
+	  (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN) ))
+#else
+/*
+ * extra 2 paren's here (( to unconfuse paren-matching editors
+ * like vi because XFS_SB_GOOD_VERSION_INT is a partial expression
+ * and the two XFS_SB_GOOD_VERSION's each 2 more close paren's to
+ * complete the expression.
+ */
+#define XFS_SB_GOOD_VERSION(sbp)	\
+	(XFS_SB_GOOD_VERSION_INT(sbp) && \
+	  (!((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) || \
+	   (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN)) ))
+#endif /* __KERNEL__ */
+#endif
+
+#define	XFS_SB_GOOD_SASH_VERSION(sbp)	\
+	((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \
+	  ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \
+	 ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	  !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKSASHBITS)))
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TONEW)
+unsigned xfs_sb_version_tonew(unsigned v);
+#define	XFS_SB_VERSION_TONEW(v)	xfs_sb_version_tonew(v)
+#else
+#define	XFS_SB_VERSION_TONEW(v)	\
+	((((v) == XFS_SB_VERSION_1) ? \
+		0 : \
+		(((v) == XFS_SB_VERSION_2) ? \
+			XFS_SB_VERSION_ATTRBIT : \
+			(XFS_SB_VERSION_ATTRBIT | XFS_SB_VERSION_NLINKBIT))) | \
+	 XFS_SB_VERSION_4)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TOOLD)
+unsigned xfs_sb_version_toold(unsigned v);
+#define	XFS_SB_VERSION_TOOLD(v)	xfs_sb_version_toold(v)
+#else
+#define	XFS_SB_VERSION_TOOLD(v)	\
+	(((v) & (XFS_SB_VERSION_QUOTABIT | XFS_SB_VERSION_ALIGNBIT)) ? \
+		0 : \
+		(((v) & XFS_SB_VERSION_NLINKBIT) ? \
+			XFS_SB_VERSION_3 : \
+			(((v) & XFS_SB_VERSION_ATTRBIT) ?  \
+				XFS_SB_VERSION_2 : \
+				XFS_SB_VERSION_1)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASATTR)
+int xfs_sb_version_hasattr(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_HASATTR(sbp)	xfs_sb_version_hasattr(sbp)
+#else
+#define	XFS_SB_VERSION_HASATTR(sbp)	\
+	(((sbp)->sb_versionnum == XFS_SB_VERSION_2) || \
+	 ((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \
+	 ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	  ((sbp)->sb_versionnum & XFS_SB_VERSION_ATTRBIT)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDATTR)
+void xfs_sb_version_addattr(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_ADDATTR(sbp)	xfs_sb_version_addattr(sbp)
+#else
+#define	XFS_SB_VERSION_ADDATTR(sbp)	\
+	((sbp)->sb_versionnum = \
+	 (((sbp)->sb_versionnum == XFS_SB_VERSION_1) ? \
+		XFS_SB_VERSION_2 : \
+		((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) ? \
+			((sbp)->sb_versionnum | XFS_SB_VERSION_ATTRBIT) : \
+			(XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT))))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASNLINK)
+int xfs_sb_version_hasnlink(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_HASNLINK(sbp)	xfs_sb_version_hasnlink(sbp)
+#else
+#define	XFS_SB_VERSION_HASNLINK(sbp)	\
+	(((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \
+	 ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	  ((sbp)->sb_versionnum & XFS_SB_VERSION_NLINKBIT)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDNLINK)
+void xfs_sb_version_addnlink(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_ADDNLINK(sbp)	xfs_sb_version_addnlink(sbp)
+#else
+#define	XFS_SB_VERSION_ADDNLINK(sbp)	\
+	((sbp)->sb_versionnum = \
+	 ((sbp)->sb_versionnum <= XFS_SB_VERSION_2 ? \
+		XFS_SB_VERSION_3 : \
+		((sbp)->sb_versionnum | XFS_SB_VERSION_NLINKBIT)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASQUOTA)
+int xfs_sb_version_hasquota(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_HASQUOTA(sbp)	xfs_sb_version_hasquota(sbp)
+#else
+#define	XFS_SB_VERSION_HASQUOTA(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_QUOTABIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDQUOTA)
+void xfs_sb_version_addquota(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_ADDQUOTA(sbp)	xfs_sb_version_addquota(sbp)
+#else
+#define	XFS_SB_VERSION_ADDQUOTA(sbp)	\
+	((sbp)->sb_versionnum = \
+	 (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 ? \
+		((sbp)->sb_versionnum | XFS_SB_VERSION_QUOTABIT) : \
+		(XFS_SB_VERSION_TONEW((sbp)->sb_versionnum) | \
+		 XFS_SB_VERSION_QUOTABIT)))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASALIGN)
+int xfs_sb_version_hasalign(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_HASALIGN(sbp)	xfs_sb_version_hasalign(sbp)
+#else
+#define	XFS_SB_VERSION_HASALIGN(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_ALIGNBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBALIGN)
+void xfs_sb_version_subalign(xfs_sb_t *sbp);
+#define	XFS_SB_VERSION_SUBALIGN(sbp)	xfs_sb_version_subalign(sbp)
+#else
+#define	XFS_SB_VERSION_SUBALIGN(sbp)	\
+	((sbp)->sb_versionnum = \
+	 XFS_SB_VERSION_TOOLD((sbp)->sb_versionnum & ~XFS_SB_VERSION_ALIGNBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDALIGN)
+int xfs_sb_version_hasdalign(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASDALIGN(sbp)	xfs_sb_version_hasdalign(sbp)
+#else
+#define XFS_SB_VERSION_HASDALIGN(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_DALIGNBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDDALIGN)
+int xfs_sb_version_adddalign(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_ADDDALIGN(sbp)	xfs_sb_version_adddalign(sbp)
+#else
+#define XFS_SB_VERSION_ADDDALIGN(sbp)	\
+	((sbp)->sb_versionnum = \
+		((sbp)->sb_versionnum | XFS_SB_VERSION_DALIGNBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSHARED)
+int xfs_sb_version_hasshared(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASSHARED(sbp)	xfs_sb_version_hasshared(sbp)
+#else
+#define XFS_SB_VERSION_HASSHARED(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDSHARED)
+int xfs_sb_version_addshared(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_ADDSHARED(sbp)	xfs_sb_version_addshared(sbp)
+#else
+#define XFS_SB_VERSION_ADDSHARED(sbp)	\
+	((sbp)->sb_versionnum = \
+		((sbp)->sb_versionnum | XFS_SB_VERSION_SHAREDBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBSHARED)
+int xfs_sb_version_subshared(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_SUBSHARED(sbp)	xfs_sb_version_subshared(sbp)
+#else
+#define XFS_SB_VERSION_SUBSHARED(sbp)	\
+	((sbp)->sb_versionnum = \
+		((sbp)->sb_versionnum & ~XFS_SB_VERSION_SHAREDBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDIRV2)
+int xfs_sb_version_hasdirv2(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASDIRV2(sbp)	xfs_sb_version_hasdirv2(sbp)
+#else
+#define XFS_SB_VERSION_HASDIRV2(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASLOGV2)
+int xfs_sb_version_haslogv2(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASLOGV2(sbp)   xfs_sb_version_haslogv2(sbp)
+#else
+#define XFS_SB_VERSION_HASLOGV2(sbp)   \
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	((sbp)->sb_versionnum & XFS_SB_VERSION_LOGV2BIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASEXTFLGBIT)
+int xfs_sb_version_hasextflgbit(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASEXTFLGBIT(sbp)	xfs_sb_version_hasextflgbit(sbp)
+#else
+#define XFS_SB_VERSION_HASEXTFLGBIT(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDEXTFLGBIT)
+int xfs_sb_version_addextflgbit(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp)	xfs_sb_version_addextflgbit(sbp)
+#else
+#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp)	\
+	((sbp)->sb_versionnum = \
+		((sbp)->sb_versionnum | XFS_SB_VERSION_EXTFLGBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBEXTFLGBIT)
+int xfs_sb_version_subextflgbit(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp)	xfs_sb_version_subextflgbit(sbp)
+#else
+#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp)	\
+	((sbp)->sb_versionnum = \
+		((sbp)->sb_versionnum & ~XFS_SB_VERSION_EXTFLGBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSECTOR)
+int xfs_sb_version_hassector(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASSECTOR(sbp)   xfs_sb_version_hassector(sbp)
+#else
+#define XFS_SB_VERSION_HASSECTOR(sbp)   \
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASMOREBITSBIT)
+int xfs_sb_version_hasmorebits(xfs_sb_t *sbp);
+#define XFS_SB_VERSION_HASMOREBITS(sbp)	xfs_sb_version_hasmorebits(sbp)
+#else
+#define XFS_SB_VERSION_HASMOREBITS(sbp)	\
+	((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+	 ((sbp)->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT))
+#endif
+
+/*
+ * sb_features2 bit version macros.
+ *
+ * For example, for a bit defined as XFS_SB_VERSION2_YBIT, has a macro:
+ *
+ * SB_VERSION_HASYBIT(xfs_sb_t *sbp)
+ *	((XFS_SB_VERSION_HASMOREBITS(sbp) &&
+ *	 ((sbp)->sb_versionnum & XFS_SB_VERSION2_YBIT)
+ */
+
+/*
+ * end of superblock version macros
+ */
+
+#define XFS_SB_DADDR	((xfs_daddr_t)0)	/* daddr in filesystem/ag */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_BLOCK)
+xfs_agblock_t xfs_sb_block(struct xfs_mount *mp);
+#define	XFS_SB_BLOCK(mp)	xfs_sb_block(mp)
+#else
+#define	XFS_SB_BLOCK(mp)	XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_HDR_BLOCK)
+xfs_agblock_t xfs_hdr_block(struct xfs_mount *mp, xfs_daddr_t d);
+#define	XFS_HDR_BLOCK(mp,d)	xfs_hdr_block(mp,d)
+#else
+#define	XFS_HDR_BLOCK(mp,d)	((xfs_agblock_t)(XFS_BB_TO_FSBT(mp,d)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_FSB)
+xfs_fsblock_t xfs_daddr_to_fsb(struct xfs_mount *mp, xfs_daddr_t d);
+#define	XFS_DADDR_TO_FSB(mp,d)		xfs_daddr_to_fsb(mp,d)
+#else
+#define	XFS_DADDR_TO_FSB(mp,d) \
+	XFS_AGB_TO_FSB(mp, XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DADDR)
+xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno);
+#define	XFS_FSB_TO_DADDR(mp,fsbno)	xfs_fsb_to_daddr(mp,fsbno)
+#else
+#define	XFS_FSB_TO_DADDR(mp,fsbno) \
+	XFS_AGB_TO_DADDR(mp, XFS_FSB_TO_AGNO(mp,fsbno), \
+			 XFS_FSB_TO_AGBNO(mp,fsbno))
+#endif
+
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP)
+xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp);
+#define XFS_BUF_TO_SBP(bp)	xfs_buf_to_sbp(bp)
+#else
+#define XFS_BUF_TO_SBP(bp)	((xfs_sb_t *)XFS_BUF_PTR(bp))
+#endif
+
+/*
+ * File system sector to basic block conversions.
+ */
+#define XFS_FSS_TO_BB(mp,sec)	((sec) << (mp)->m_sectbb_log)
+#define XFS_BB_TO_FSS(mp,bb)	\
+	(((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log)
+#define XFS_BB_TO_FSST(mp,bb)	((bb) >> (mp)->m_sectbb_log)
+
+/*
+ * File system sector to byte conversions.
+ */
+#define XFS_FSS_TO_B(mp,sectno)	((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog)
+#define XFS_B_TO_FSST(mp,b)	(((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog)
+
+/*
+ * File system block to basic block conversions.
+ */
+#define	XFS_FSB_TO_BB(mp,fsbno)	((fsbno) << (mp)->m_blkbb_log)
+#define	XFS_BB_TO_FSB(mp,bb)	\
+	(((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log)
+#define	XFS_BB_TO_FSBT(mp,bb)	((bb) >> (mp)->m_blkbb_log)
+#define	XFS_BB_FSB_OFFSET(mp,bb) ((bb) & ((mp)->m_bsize - 1))
+
+/*
+ * File system block to byte conversions.
+ */
+#define XFS_FSB_TO_B(mp,fsbno)	((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
+#define XFS_B_TO_FSB(mp,b)	\
+	((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
+#define XFS_B_TO_FSBT(mp,b)	(((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
+#define XFS_B_FSB_OFFSET(mp,b)	((b) & (mp)->m_blockmask)
+
+#endif	/* __XFS_SB_H__ */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
new file mode 100644
index 0000000..3db0e22
--- /dev/null
+++ b/fs/xfs/xfs_trans.c
@@ -0,0 +1,1315 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_error.h"
+#include "xfs_trans_priv.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_quota.h"
+#include "xfs_trans_space.h"
+
+
+STATIC void	xfs_trans_apply_sb_deltas(xfs_trans_t *);
+STATIC uint	xfs_trans_count_vecs(xfs_trans_t *);
+STATIC void	xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *);
+STATIC void	xfs_trans_uncommit(xfs_trans_t *, uint);
+STATIC void	xfs_trans_committed(xfs_trans_t *, int);
+STATIC void	xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int);
+STATIC void	xfs_trans_free(xfs_trans_t *);
+
+kmem_zone_t		*xfs_trans_zone;
+
+
+/*
+ * Initialize the precomputed transaction reservation values
+ * in the mount structure.
+ */
+void
+xfs_trans_init(
+	xfs_mount_t	*mp)
+{
+	xfs_trans_reservations_t	*resp;
+
+	resp = &(mp->m_reservations);
+	resp->tr_write =
+		(uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_itruncate =
+		(uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_rename =
+		(uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp);
+	resp->tr_remove =
+		(uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_symlink =
+		(uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_create =
+		(uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_mkdir =
+		(uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_ifree =
+		(uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_ichange =
+		(uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp);
+	resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp);
+	resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp);
+	resp->tr_addafork =
+		(uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp);
+	resp->tr_attrset =
+		(uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_attrrm =
+		(uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
+	resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp);
+	resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp);
+	resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp);
+	resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp);
+}
+
+/*
+ * This routine is called to allocate a transaction structure.
+ * The type parameter indicates the type of the transaction.  These
+ * are enumerated in xfs_trans.h.
+ *
+ * Dynamically allocate the transaction structure from the transaction
+ * zone, initialize it, and return it to the caller.
+ */
+xfs_trans_t *
+xfs_trans_alloc(
+	xfs_mount_t	*mp,
+	uint		type)
+{
+	fs_check_frozen(XFS_MTOVFS(mp), SB_FREEZE_TRANS);
+	atomic_inc(&mp->m_active_trans);
+
+	return (_xfs_trans_alloc(mp, type));
+
+}
+
+xfs_trans_t *
+_xfs_trans_alloc(
+	xfs_mount_t	*mp,
+	uint		type)
+{
+	xfs_trans_t	*tp;
+
+	ASSERT(xfs_trans_zone != NULL);
+	tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+
+	/*
+	 * Initialize the transaction structure.
+	 */
+	tp->t_magic = XFS_TRANS_MAGIC;
+	tp->t_type = type;
+	tp->t_mountp = mp;
+	tp->t_items_free = XFS_LIC_NUM_SLOTS;
+	tp->t_busy_free = XFS_LBC_NUM_SLOTS;
+	XFS_LIC_INIT(&(tp->t_items));
+	XFS_LBC_INIT(&(tp->t_busy));
+
+	return (tp);
+}
+
+/*
+ * This is called to create a new transaction which will share the
+ * permanent log reservation of the given transaction.  The remaining
+ * unused block and rt extent reservations are also inherited.  This
+ * implies that the original transaction is no longer allowed to allocate
+ * blocks.  Locks and log items, however, are no inherited.  They must
+ * be added to the new transaction explicitly.
+ */
+xfs_trans_t *
+xfs_trans_dup(
+	xfs_trans_t	*tp)
+{
+	xfs_trans_t	*ntp;
+
+	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+
+	/*
+	 * Initialize the new transaction structure.
+	 */
+	ntp->t_magic = XFS_TRANS_MAGIC;
+	ntp->t_type = tp->t_type;
+	ntp->t_mountp = tp->t_mountp;
+	ntp->t_items_free = XFS_LIC_NUM_SLOTS;
+	ntp->t_busy_free = XFS_LBC_NUM_SLOTS;
+	XFS_LIC_INIT(&(ntp->t_items));
+	XFS_LBC_INIT(&(ntp->t_busy));
+
+	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+#if defined(XLOG_NOLOG) || defined(DEBUG)
+	ASSERT(!xlog_debug || tp->t_ticket != NULL);
+#else
+	ASSERT(tp->t_ticket != NULL);
+#endif
+	ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE);
+	ntp->t_ticket = tp->t_ticket;
+	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
+	tp->t_blk_res = tp->t_blk_res_used;
+	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
+	tp->t_rtx_res = tp->t_rtx_res_used;
+	PFLAGS_DUP(&tp->t_pflags, &ntp->t_pflags);
+
+	XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
+
+	atomic_inc(&tp->t_mountp->m_active_trans);
+	return ntp;
+}
+
+/*
+ * This is called to reserve free disk blocks and log space for the
+ * given transaction.  This must be done before allocating any resources
+ * within the transaction.
+ *
+ * This will return ENOSPC if there are not enough blocks available.
+ * It will sleep waiting for available log space.
+ * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
+ * is used by long running transactions.  If any one of the reservations
+ * fails then they will all be backed out.
+ *
+ * This does not do quota reservations. That typically is done by the
+ * caller afterwards.
+ */
+int
+xfs_trans_reserve(
+	xfs_trans_t	*tp,
+	uint		blocks,
+	uint		logspace,
+	uint		rtextents,
+	uint		flags,
+	uint		logcount)
+{
+	int		log_flags;
+	int		error;
+	int	rsvd;
+
+	error = 0;
+	rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+
+	/* Mark this thread as being in a transaction */
+        PFLAGS_SET_FSTRANS(&tp->t_pflags);
+
+	/*
+	 * Attempt to reserve the needed disk blocks by decrementing
+	 * the number needed from the number available.  This will
+	 * fail if the count would go below zero.
+	 */
+	if (blocks > 0) {
+		error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
+					  -blocks, rsvd);
+		if (error != 0) {
+                        PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+			return (XFS_ERROR(ENOSPC));
+		}
+		tp->t_blk_res += blocks;
+	}
+
+	/*
+	 * Reserve the log space needed for this transaction.
+	 */
+	if (logspace > 0) {
+		ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace));
+		ASSERT((tp->t_log_count == 0) ||
+			(tp->t_log_count == logcount));
+		if (flags & XFS_TRANS_PERM_LOG_RES) {
+			log_flags = XFS_LOG_PERM_RESERV;
+			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
+		} else {
+			ASSERT(tp->t_ticket == NULL);
+			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+			log_flags = 0;
+		}
+
+		error = xfs_log_reserve(tp->t_mountp, logspace, logcount,
+					&tp->t_ticket,
+					XFS_TRANSACTION, log_flags);
+		if (error) {
+			goto undo_blocks;
+		}
+		tp->t_log_res = logspace;
+		tp->t_log_count = logcount;
+	}
+
+	/*
+	 * Attempt to reserve the needed realtime extents by decrementing
+	 * the number needed from the number available.  This will
+	 * fail if the count would go below zero.
+	 */
+	if (rtextents > 0) {
+		error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
+					  -rtextents, rsvd);
+		if (error) {
+			error = XFS_ERROR(ENOSPC);
+			goto undo_log;
+		}
+		tp->t_rtx_res += rtextents;
+	}
+
+	return 0;
+
+	/*
+	 * Error cases jump to one of these labels to undo any
+	 * reservations which have already been performed.
+	 */
+undo_log:
+	if (logspace > 0) {
+		if (flags & XFS_TRANS_PERM_LOG_RES) {
+			log_flags = XFS_LOG_REL_PERM_RESERV;
+		} else {
+			log_flags = 0;
+		}
+		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
+		tp->t_ticket = NULL;
+		tp->t_log_res = 0;
+		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
+	}
+
+undo_blocks:
+	if (blocks > 0) {
+		(void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
+					 blocks, rsvd);
+		tp->t_blk_res = 0;
+	}
+
+        PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+
+	return (error);
+}
+
+
+/*
+ * This is called to set the a callback to be called when the given
+ * transaction is committed to disk.  The transaction pointer and the
+ * argument pointer will be passed to the callback routine.
+ *
+ * Only one callback can be associated with any single transaction.
+ */
+void
+xfs_trans_callback(
+	xfs_trans_t		*tp,
+	xfs_trans_callback_t	callback,
+	void			*arg)
+{
+	ASSERT(tp->t_callback == NULL);
+	tp->t_callback = callback;
+	tp->t_callarg = arg;
+}
+
+
+/*
+ * Record the indicated change to the given field for application
+ * to the file system's superblock when the transaction commits.
+ * For now, just store the change in the transaction structure.
+ *
+ * Mark the transaction structure to indicate that the superblock
+ * needs to be updated before committing.
+ */
+void
+xfs_trans_mod_sb(
+	xfs_trans_t	*tp,
+	uint		field,
+	long		delta)
+{
+
+	switch (field) {
+	case XFS_TRANS_SB_ICOUNT:
+		tp->t_icount_delta += delta;
+		break;
+	case XFS_TRANS_SB_IFREE:
+		tp->t_ifree_delta += delta;
+		break;
+	case XFS_TRANS_SB_FDBLOCKS:
+		/*
+		 * Track the number of blocks allocated in the
+		 * transaction.  Make sure it does not exceed the
+		 * number reserved.
+		 */
+		if (delta < 0) {
+			tp->t_blk_res_used += (uint)-delta;
+			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
+		}
+		tp->t_fdblocks_delta += delta;
+		break;
+	case XFS_TRANS_SB_RES_FDBLOCKS:
+		/*
+		 * The allocation has already been applied to the
+		 * in-core superblock's counter.  This should only
+		 * be applied to the on-disk superblock.
+		 */
+		ASSERT(delta < 0);
+		tp->t_res_fdblocks_delta += delta;
+		break;
+	case XFS_TRANS_SB_FREXTENTS:
+		/*
+		 * Track the number of blocks allocated in the
+		 * transaction.  Make sure it does not exceed the
+		 * number reserved.
+		 */
+		if (delta < 0) {
+			tp->t_rtx_res_used += (uint)-delta;
+			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
+		}
+		tp->t_frextents_delta += delta;
+		break;
+	case XFS_TRANS_SB_RES_FREXTENTS:
+		/*
+		 * The allocation has already been applied to the
+		 * in-core superblocks's counter.  This should only
+		 * be applied to the on-disk superblock.
+		 */
+		ASSERT(delta < 0);
+		tp->t_res_frextents_delta += delta;
+		break;
+	case XFS_TRANS_SB_DBLOCKS:
+		ASSERT(delta > 0);
+		tp->t_dblocks_delta += delta;
+		break;
+	case XFS_TRANS_SB_AGCOUNT:
+		ASSERT(delta > 0);
+		tp->t_agcount_delta += delta;
+		break;
+	case XFS_TRANS_SB_IMAXPCT:
+		tp->t_imaxpct_delta += delta;
+		break;
+	case XFS_TRANS_SB_REXTSIZE:
+		tp->t_rextsize_delta += delta;
+		break;
+	case XFS_TRANS_SB_RBMBLOCKS:
+		tp->t_rbmblocks_delta += delta;
+		break;
+	case XFS_TRANS_SB_RBLOCKS:
+		tp->t_rblocks_delta += delta;
+		break;
+	case XFS_TRANS_SB_REXTENTS:
+		tp->t_rextents_delta += delta;
+		break;
+	case XFS_TRANS_SB_REXTSLOG:
+		tp->t_rextslog_delta += delta;
+		break;
+	default:
+		ASSERT(0);
+		return;
+	}
+
+	tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY);
+}
+
+/*
+ * xfs_trans_apply_sb_deltas() is called from the commit code
+ * to bring the superblock buffer into the current transaction
+ * and modify it as requested by earlier calls to xfs_trans_mod_sb().
+ *
+ * For now we just look at each field allowed to change and change
+ * it if necessary.
+ */
+STATIC void
+xfs_trans_apply_sb_deltas(
+	xfs_trans_t	*tp)
+{
+	xfs_sb_t	*sbp;
+	xfs_buf_t	*bp;
+	int		whole = 0;
+
+	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
+	sbp = XFS_BUF_TO_SBP(bp);
+
+	/*
+	 * Check that superblock mods match the mods made to AGF counters.
+	 */
+	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
+	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
+		tp->t_ag_btree_delta));
+
+	if (tp->t_icount_delta != 0) {
+		INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta);
+	}
+	if (tp->t_ifree_delta != 0) {
+		INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta);
+	}
+
+	if (tp->t_fdblocks_delta != 0) {
+		INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta);
+	}
+	if (tp->t_res_fdblocks_delta != 0) {
+		INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta);
+	}
+
+	if (tp->t_frextents_delta != 0) {
+		INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta);
+	}
+	if (tp->t_res_frextents_delta != 0) {
+		INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_res_frextents_delta);
+	}
+	if (tp->t_dblocks_delta != 0) {
+		INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta);
+		whole = 1;
+	}
+	if (tp->t_agcount_delta != 0) {
+		INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta);
+		whole = 1;
+	}
+	if (tp->t_imaxpct_delta != 0) {
+		INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta);
+		whole = 1;
+	}
+	if (tp->t_rextsize_delta != 0) {
+		INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta);
+		whole = 1;
+	}
+	if (tp->t_rbmblocks_delta != 0) {
+		INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta);
+		whole = 1;
+	}
+	if (tp->t_rblocks_delta != 0) {
+		INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta);
+		whole = 1;
+	}
+	if (tp->t_rextents_delta != 0) {
+		INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta);
+		whole = 1;
+	}
+	if (tp->t_rextslog_delta != 0) {
+		INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta);
+		whole = 1;
+	}
+
+	if (whole)
+		/*
+		 * Log the whole thing, the fields are discontiguous.
+		 */
+		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1);
+	else
+		/*
+		 * Since all the modifiable fields are contiguous, we
+		 * can get away with this.
+		 */
+		xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount),
+				  offsetof(xfs_sb_t, sb_frextents) +
+				  sizeof(sbp->sb_frextents) - 1);
+
+	XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1;
+}
+
+/*
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused
+ * reservations and apply superblock counter changes to the in-core
+ * superblock.
+ *
+ * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
+ */
+void
+xfs_trans_unreserve_and_mod_sb(
+	xfs_trans_t	*tp)
+{
+	xfs_mod_sb_t	msb[14];	/* If you add cases, add entries */
+	xfs_mod_sb_t	*msbp;
+	/* REFERENCED */
+	int		error;
+	int		rsvd;
+
+	msbp = msb;
+	rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+
+	/*
+	 * Release any reserved blocks.  Any that were allocated
+	 * will be taken back again by fdblocks_delta below.
+	 */
+	if (tp->t_blk_res > 0) {
+		msbp->msb_field = XFS_SBS_FDBLOCKS;
+		msbp->msb_delta = tp->t_blk_res;
+		msbp++;
+	}
+
+	/*
+	 * Release any reserved real time extents .  Any that were
+	 * allocated will be taken back again by frextents_delta below.
+	 */
+	if (tp->t_rtx_res > 0) {
+		msbp->msb_field = XFS_SBS_FREXTENTS;
+		msbp->msb_delta = tp->t_rtx_res;
+		msbp++;
+	}
+
+	/*
+	 * Apply any superblock modifications to the in-core version.
+	 * The t_res_fdblocks_delta and t_res_frextents_delta fields are
+	 * explicity NOT applied to the in-core superblock.
+	 * The idea is that that has already been done.
+	 */
+	if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
+		if (tp->t_icount_delta != 0) {
+			msbp->msb_field = XFS_SBS_ICOUNT;
+			msbp->msb_delta = (int)tp->t_icount_delta;
+			msbp++;
+		}
+		if (tp->t_ifree_delta != 0) {
+			msbp->msb_field = XFS_SBS_IFREE;
+			msbp->msb_delta = (int)tp->t_ifree_delta;
+			msbp++;
+		}
+		if (tp->t_fdblocks_delta != 0) {
+			msbp->msb_field = XFS_SBS_FDBLOCKS;
+			msbp->msb_delta = (int)tp->t_fdblocks_delta;
+			msbp++;
+		}
+		if (tp->t_frextents_delta != 0) {
+			msbp->msb_field = XFS_SBS_FREXTENTS;
+			msbp->msb_delta = (int)tp->t_frextents_delta;
+			msbp++;
+		}
+		if (tp->t_dblocks_delta != 0) {
+			msbp->msb_field = XFS_SBS_DBLOCKS;
+			msbp->msb_delta = (int)tp->t_dblocks_delta;
+			msbp++;
+		}
+		if (tp->t_agcount_delta != 0) {
+			msbp->msb_field = XFS_SBS_AGCOUNT;
+			msbp->msb_delta = (int)tp->t_agcount_delta;
+			msbp++;
+		}
+		if (tp->t_imaxpct_delta != 0) {
+			msbp->msb_field = XFS_SBS_IMAX_PCT;
+			msbp->msb_delta = (int)tp->t_imaxpct_delta;
+			msbp++;
+		}
+		if (tp->t_rextsize_delta != 0) {
+			msbp->msb_field = XFS_SBS_REXTSIZE;
+			msbp->msb_delta = (int)tp->t_rextsize_delta;
+			msbp++;
+		}
+		if (tp->t_rbmblocks_delta != 0) {
+			msbp->msb_field = XFS_SBS_RBMBLOCKS;
+			msbp->msb_delta = (int)tp->t_rbmblocks_delta;
+			msbp++;
+		}
+		if (tp->t_rblocks_delta != 0) {
+			msbp->msb_field = XFS_SBS_RBLOCKS;
+			msbp->msb_delta = (int)tp->t_rblocks_delta;
+			msbp++;
+		}
+		if (tp->t_rextents_delta != 0) {
+			msbp->msb_field = XFS_SBS_REXTENTS;
+			msbp->msb_delta = (int)tp->t_rextents_delta;
+			msbp++;
+		}
+		if (tp->t_rextslog_delta != 0) {
+			msbp->msb_field = XFS_SBS_REXTSLOG;
+			msbp->msb_delta = (int)tp->t_rextslog_delta;
+			msbp++;
+		}
+	}
+
+	/*
+	 * If we need to change anything, do it.
+	 */
+	if (msbp > msb) {
+		error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
+			(uint)(msbp - msb), rsvd);
+		ASSERT(error == 0);
+	}
+}
+
+
+/*
+ * xfs_trans_commit
+ *
+ * Commit the given transaction to the log a/synchronously.
+ *
+ * XFS disk error handling mechanism is not based on a typical
+ * transaction abort mechanism. Logically after the filesystem
+ * gets marked 'SHUTDOWN', we can't let any new transactions
+ * be durable - ie. committed to disk - because some metadata might
+ * be inconsistent. In such cases, this returns an error, and the
+ * caller may assume that all locked objects joined to the transaction
+ * have already been unlocked as if the commit had succeeded.
+ * Do not reference the transaction structure after this call.
+ */
+ /*ARGSUSED*/
+int
+xfs_trans_commit(
+	xfs_trans_t	*tp,
+	uint		flags,
+	xfs_lsn_t	*commit_lsn_p)
+{
+	xfs_log_iovec_t		*log_vector;
+	int			nvec;
+	xfs_mount_t		*mp;
+	xfs_lsn_t		commit_lsn;
+	/* REFERENCED */
+	int			error;
+	int			log_flags;
+	int			sync;
+#define	XFS_TRANS_LOGVEC_COUNT	16
+	xfs_log_iovec_t		log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
+#if defined(XLOG_NOLOG) || defined(DEBUG)
+	static xfs_lsn_t	trans_lsn = 1;
+#endif
+	void			*commit_iclog;
+	int			shutdown;
+
+	commit_lsn = -1;
+
+	/*
+	 * Determine whether this commit is releasing a permanent
+	 * log reservation or not.
+	 */
+	if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+		ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+		log_flags = XFS_LOG_REL_PERM_RESERV;
+	} else {
+		log_flags = 0;
+	}
+	mp = tp->t_mountp;
+
+	/*
+	 * If there is nothing to be logged by the transaction,
+	 * then unlock all of the items associated with the
+	 * transaction and free the transaction structure.
+	 * Also make sure to return any reserved blocks to
+	 * the free pool.
+	 */
+shut_us_down:
+	shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0;
+	if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) {
+		xfs_trans_unreserve_and_mod_sb(tp);
+		/*
+		 * It is indeed possible for the transaction to be
+		 * not dirty but the dqinfo portion to be. All that
+		 * means is that we have some (non-persistent) quota
+		 * reservations that need to be unreserved.
+		 */
+		XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
+		if (tp->t_ticket) {
+			commit_lsn = xfs_log_done(mp, tp->t_ticket,
+							NULL, log_flags);
+			if (commit_lsn == -1 && !shutdown)
+				shutdown = XFS_ERROR(EIO);
+		}
+                PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+		xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0);
+		xfs_trans_free_busy(tp);
+		xfs_trans_free(tp);
+		XFS_STATS_INC(xs_trans_empty);
+		if (commit_lsn_p)
+			*commit_lsn_p = commit_lsn;
+		return (shutdown);
+	}
+#if defined(XLOG_NOLOG) || defined(DEBUG)
+	ASSERT(!xlog_debug || tp->t_ticket != NULL);
+#else
+	ASSERT(tp->t_ticket != NULL);
+#endif
+
+	/*
+	 * If we need to update the superblock, then do it now.
+	 */
+	if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
+		xfs_trans_apply_sb_deltas(tp);
+	}
+	XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
+
+	/*
+	 * Ask each log item how many log_vector entries it will
+	 * need so we can figure out how many to allocate.
+	 * Try to avoid the kmem_alloc() call in the common case
+	 * by using a vector from the stack when it fits.
+	 */
+	nvec = xfs_trans_count_vecs(tp);
+
+	if (nvec == 0) {
+		xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+		goto shut_us_down;
+	}
+
+
+	if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
+		log_vector = log_vector_fast;
+	} else {
+		log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec *
+						   sizeof(xfs_log_iovec_t),
+						   KM_SLEEP);
+	}
+
+	/*
+	 * Fill in the log_vector and pin the logged items, and
+	 * then write the transaction to the log.
+	 */
+	xfs_trans_fill_vecs(tp, log_vector);
+
+	/*
+	 * Ignore errors here. xfs_log_done would do the right thing.
+	 * We need to put the ticket, etc. away.
+	 */
+	error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket,
+			     &(tp->t_lsn));
+
+#if defined(XLOG_NOLOG) || defined(DEBUG)
+	if (xlog_debug) {
+		commit_lsn = xfs_log_done(mp, tp->t_ticket,
+					  &commit_iclog, log_flags);
+	} else {
+		commit_lsn = 0;
+		tp->t_lsn = trans_lsn++;
+	}
+#else
+	/*
+	 * This is the regular case.  At this point (after the call finishes),
+	 * the transaction is committed incore and could go out to disk at
+	 * any time.  However, all the items associated with the transaction
+	 * are still locked and pinned in memory.
+	 */
+	commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
+#endif
+
+	tp->t_commit_lsn = commit_lsn;
+	if (nvec > XFS_TRANS_LOGVEC_COUNT) {
+		kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t));
+	}
+
+	if (commit_lsn_p)
+		*commit_lsn_p = commit_lsn;
+
+	/*
+	 * If we got a log write error. Unpin the logitems that we
+	 * had pinned, clean up, free trans structure, and return error.
+	 */
+	if (error || commit_lsn == -1) {
+                PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+		xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
+		return XFS_ERROR(EIO);
+	}
+
+	/*
+	 * Once the transaction has committed, unused
+	 * reservations need to be released and changes to
+	 * the superblock need to be reflected in the in-core
+	 * version.  Do that now.
+	 */
+	xfs_trans_unreserve_and_mod_sb(tp);
+
+	sync = tp->t_flags & XFS_TRANS_SYNC;
+
+	/*
+	 * Tell the LM to call the transaction completion routine
+	 * when the log write with LSN commit_lsn completes (e.g.
+	 * when the transaction commit really hits the on-disk log).
+	 * After this call we cannot reference tp, because the call
+	 * can happen at any time and the call will free the transaction
+	 * structure pointed to by tp.  The only case where we call
+	 * the completion routine (xfs_trans_committed) directly is
+	 * if the log is turned off on a debug kernel or we're
+	 * running in simulation mode (the log is explicitly turned
+	 * off).
+	 */
+	tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed;
+	tp->t_logcb.cb_arg = tp;
+
+	/*
+	 * We need to pass the iclog buffer which was used for the
+	 * transaction commit record into this function, and attach
+	 * the callback to it. The callback must be attached before
+	 * the items are unlocked to avoid racing with other threads
+	 * waiting for an item to unlock.
+	 */
+	shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb));
+
+	/*
+	 * Mark this thread as no longer being in a transaction
+	 */
+	PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+
+	/*
+	 * Once all the items of the transaction have been copied
+	 * to the in core log and the callback is attached, the
+	 * items can be unlocked.
+	 *
+	 * This will free descriptors pointing to items which were
+	 * not logged since there is nothing more to do with them.
+	 * For items which were logged, we will keep pointers to them
+	 * so they can be unpinned after the transaction commits to disk.
+	 * This will also stamp each modified meta-data item with
+	 * the commit lsn of this transaction for dependency tracking
+	 * purposes.
+	 */
+	xfs_trans_unlock_items(tp, commit_lsn);
+
+	/*
+	 * If we detected a log error earlier, finish committing
+	 * the transaction now (unpin log items, etc).
+	 *
+	 * Order is critical here, to avoid using the transaction
+	 * pointer after its been freed (by xfs_trans_committed
+	 * either here now, or as a callback).  We cannot do this
+	 * step inside xfs_log_notify as was done earlier because
+	 * of this issue.
+	 */
+	if (shutdown)
+		xfs_trans_committed(tp, XFS_LI_ABORTED);
+
+	/*
+	 * Now that the xfs_trans_committed callback has been attached,
+	 * and the items are released we can finally allow the iclog to
+	 * go to disk.
+	 */
+	error = xfs_log_release_iclog(mp, commit_iclog);
+
+	/*
+	 * If the transaction needs to be synchronous, then force the
+	 * log out now and wait for it.
+	 */
+	if (sync) {
+		if (!error)
+			error = xfs_log_force(mp, commit_lsn,
+				      XFS_LOG_FORCE | XFS_LOG_SYNC);
+		XFS_STATS_INC(xs_trans_sync);
+	} else {
+		XFS_STATS_INC(xs_trans_async);
+	}
+
+	return (error);
+}
+
+
+/*
+ * Total up the number of log iovecs needed to commit this
+ * transaction.  The transaction itself needs one for the
+ * transaction header.  Ask each dirty item in turn how many
+ * it needs to get the total.
+ */
+STATIC uint
+xfs_trans_count_vecs(
+	xfs_trans_t	*tp)
+{
+	int			nvecs;
+	xfs_log_item_desc_t	*lidp;
+
+	nvecs = 1;
+	lidp = xfs_trans_first_item(tp);
+	ASSERT(lidp != NULL);
+
+	/* In the non-debug case we need to start bailing out if we
+	 * didn't find a log_item here, return zero and let trans_commit
+	 * deal with it.
+	 */
+	if (lidp == NULL)
+		return 0;
+
+	while (lidp != NULL) {
+		/*
+		 * Skip items which aren't dirty in this transaction.
+		 */
+		if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+			lidp = xfs_trans_next_item(tp, lidp);
+			continue;
+		}
+		lidp->lid_size = IOP_SIZE(lidp->lid_item);
+		nvecs += lidp->lid_size;
+		lidp = xfs_trans_next_item(tp, lidp);
+	}
+
+	return nvecs;
+}
+
+/*
+ * Called from the trans_commit code when we notice that
+ * the filesystem is in the middle of a forced shutdown.
+ */
+STATIC void
+xfs_trans_uncommit(
+	xfs_trans_t	*tp,
+	uint		flags)
+{
+	xfs_log_item_desc_t	*lidp;
+
+	for (lidp = xfs_trans_first_item(tp);
+	     lidp != NULL;
+	     lidp = xfs_trans_next_item(tp, lidp)) {
+		/*
+		 * Unpin all but those that aren't dirty.
+		 */
+		if (lidp->lid_flags & XFS_LID_DIRTY)
+			IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+	}
+
+	xfs_trans_unreserve_and_mod_sb(tp);
+	XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
+
+	xfs_trans_free_items(tp, flags);
+	xfs_trans_free_busy(tp);
+	xfs_trans_free(tp);
+}
+
+/*
+ * Fill in the vector with pointers to data to be logged
+ * by this transaction.  The transaction header takes
+ * the first vector, and then each dirty item takes the
+ * number of vectors it indicated it needed in xfs_trans_count_vecs().
+ *
+ * As each item fills in the entries it needs, also pin the item
+ * so that it cannot be flushed out until the log write completes.
+ */
+STATIC void
+xfs_trans_fill_vecs(
+	xfs_trans_t		*tp,
+	xfs_log_iovec_t		*log_vector)
+{
+	xfs_log_item_desc_t	*lidp;
+	xfs_log_iovec_t		*vecp;
+	uint			nitems;
+
+	/*
+	 * Skip over the entry for the transaction header, we'll
+	 * fill that in at the end.
+	 */
+	vecp = log_vector + 1;		/* pointer arithmetic */
+
+	nitems = 0;
+	lidp = xfs_trans_first_item(tp);
+	ASSERT(lidp != NULL);
+	while (lidp != NULL) {
+		/*
+		 * Skip items which aren't dirty in this transaction.
+		 */
+		if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+			lidp = xfs_trans_next_item(tp, lidp);
+			continue;
+		}
+		/*
+		 * The item may be marked dirty but not log anything.
+		 * This can be used to get called when a transaction
+		 * is committed.
+		 */
+		if (lidp->lid_size) {
+			nitems++;
+		}
+		IOP_FORMAT(lidp->lid_item, vecp);
+		vecp += lidp->lid_size;		/* pointer arithmetic */
+		IOP_PIN(lidp->lid_item);
+		lidp = xfs_trans_next_item(tp, lidp);
+	}
+
+	/*
+	 * Now that we've counted the number of items in this
+	 * transaction, fill in the transaction header.
+	 */
+	tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
+	tp->t_header.th_type = tp->t_type;
+	tp->t_header.th_num_items = nitems;
+	log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
+	log_vector->i_len = sizeof(xfs_trans_header_t);
+}
+
+
+/*
+ * Unlock all of the transaction's items and free the transaction.
+ * The transaction must not have modified any of its items, because
+ * there is no way to restore them to their previous state.
+ *
+ * If the transaction has made a log reservation, make sure to release
+ * it as well.
+ */
+void
+xfs_trans_cancel(
+	xfs_trans_t		*tp,
+	int			flags)
+{
+	int			log_flags;
+#ifdef DEBUG
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_desc_t	*lidp;
+	xfs_log_item_t		*lip;
+	int			i;
+#endif
+
+	/*
+	 * See if the caller is being too lazy to figure out if
+	 * the transaction really needs an abort.
+	 */
+	if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY))
+		flags &= ~XFS_TRANS_ABORT;
+	/*
+	 * See if the caller is relying on us to shut down the
+	 * filesystem.  This happens in paths where we detect
+	 * corruption and decide to give up.
+	 */
+	if ((tp->t_flags & XFS_TRANS_DIRTY) &&
+	    !XFS_FORCED_SHUTDOWN(tp->t_mountp))
+		xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE);
+#ifdef DEBUG
+	if (!(flags & XFS_TRANS_ABORT)) {
+		licp = &(tp->t_items);
+		while (licp != NULL) {
+			lidp = licp->lic_descs;
+			for (i = 0; i < licp->lic_unused; i++, lidp++) {
+				if (XFS_LIC_ISFREE(licp, i)) {
+					continue;
+				}
+
+				lip = lidp->lid_item;
+				if (!XFS_FORCED_SHUTDOWN(tp->t_mountp))
+					ASSERT(!(lip->li_type == XFS_LI_EFD));
+			}
+			licp = licp->lic_next;
+		}
+	}
+#endif
+	xfs_trans_unreserve_and_mod_sb(tp);
+	XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
+
+	if (tp->t_ticket) {
+		if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+			ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+			log_flags = XFS_LOG_REL_PERM_RESERV;
+		} else {
+			log_flags = 0;
+		}
+		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
+	}
+
+	/* mark this thread as no longer being in a transaction */
+        PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+
+	xfs_trans_free_items(tp, flags);
+	xfs_trans_free_busy(tp);
+	xfs_trans_free(tp);
+}
+
+
+/*
+ * Free the transaction structure.  If there is more clean up
+ * to do when the structure is freed, add it here.
+ */
+STATIC void
+xfs_trans_free(
+	xfs_trans_t	*tp)
+{
+	atomic_dec(&tp->t_mountp->m_active_trans);
+	XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
+	kmem_zone_free(xfs_trans_zone, tp);
+}
+
+
+/*
+ * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
+ *
+ * This is typically called by the LM when a transaction has been fully
+ * committed to disk.  It needs to unpin the items which have
+ * been logged by the transaction and update their positions
+ * in the AIL if necessary.
+ * This also gets called when the transactions didn't get written out
+ * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
+ *
+ * Call xfs_trans_chunk_committed() to process the items in
+ * each chunk.
+ */
+STATIC void
+xfs_trans_committed(
+	xfs_trans_t	*tp,
+	int		abortflag)
+{
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_chunk_t	*next_licp;
+	xfs_log_busy_chunk_t	*lbcp;
+	xfs_log_busy_slot_t	*lbsp;
+	int			i;
+
+	/*
+	 * Call the transaction's completion callback if there
+	 * is one.
+	 */
+	if (tp->t_callback != NULL) {
+		tp->t_callback(tp, tp->t_callarg);
+	}
+
+	/*
+	 * Special case the chunk embedded in the transaction.
+	 */
+	licp = &(tp->t_items);
+	if (!(XFS_LIC_ARE_ALL_FREE(licp))) {
+		xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
+	}
+
+	/*
+	 * Process the items in each chunk in turn.
+	 */
+	licp = licp->lic_next;
+	while (licp != NULL) {
+		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
+		next_licp = licp->lic_next;
+		kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+		licp = next_licp;
+	}
+
+	/*
+	 * Clear all the per-AG busy list items listed in this transaction
+	 */
+	lbcp = &tp->t_busy;
+	while (lbcp != NULL) {
+		for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
+			if (!XFS_LBC_ISFREE(lbcp, i)) {
+				xfs_alloc_clear_busy(tp, lbsp->lbc_ag,
+						     lbsp->lbc_idx);
+			}
+		}
+		lbcp = lbcp->lbc_next;
+	}
+	xfs_trans_free_busy(tp);
+
+	/*
+	 * That's it for the transaction structure.  Free it.
+	 */
+	xfs_trans_free(tp);
+}
+
+/*
+ * This is called to perform the commit processing for each
+ * item described by the given chunk.
+ *
+ * The commit processing consists of unlocking items which were
+ * held locked with the SYNC_UNLOCK attribute, calling the committed
+ * routine of each logged item, updating the item's position in the AIL
+ * if necessary, and unpinning each item.  If the committed routine
+ * returns -1, then do nothing further with the item because it
+ * may have been freed.
+ *
+ * Since items are unlocked when they are copied to the incore
+ * log, it is possible for two transactions to be completing
+ * and manipulating the same item simultaneously.  The AIL lock
+ * will protect the lsn field of each item.  The value of this
+ * field can never go backwards.
+ *
+ * We unpin the items after repositioning them in the AIL, because
+ * otherwise they could be immediately flushed and we'd have to race
+ * with the flusher trying to pull the item from the AIL as we add it.
+ */
+STATIC void
+xfs_trans_chunk_committed(
+	xfs_log_item_chunk_t	*licp,
+	xfs_lsn_t		lsn,
+	int			aborted)
+{
+	xfs_log_item_desc_t	*lidp;
+	xfs_log_item_t		*lip;
+	xfs_lsn_t		item_lsn;
+	struct xfs_mount	*mp;
+	int			i;
+	SPLDECL(s);
+
+	lidp = licp->lic_descs;
+	for (i = 0; i < licp->lic_unused; i++, lidp++) {
+		if (XFS_LIC_ISFREE(licp, i)) {
+			continue;
+		}
+
+		lip = lidp->lid_item;
+		if (aborted)
+			lip->li_flags |= XFS_LI_ABORTED;
+
+		/*
+		 * Send in the ABORTED flag to the COMMITTED routine
+		 * so that it knows whether the transaction was aborted
+		 * or not.
+		 */
+		item_lsn = IOP_COMMITTED(lip, lsn);
+
+		/*
+		 * If the committed routine returns -1, make
+		 * no more references to the item.
+		 */
+		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) {
+			continue;
+		}
+
+		/*
+		 * If the returned lsn is greater than what it
+		 * contained before, update the location of the
+		 * item in the AIL.  If it is not, then do nothing.
+		 * Items can never move backwards in the AIL.
+		 *
+		 * While the new lsn should usually be greater, it
+		 * is possible that a later transaction completing
+		 * simultaneously with an earlier one using the
+		 * same item could complete first with a higher lsn.
+		 * This would cause the earlier transaction to fail
+		 * the test below.
+		 */
+		mp = lip->li_mountp;
+		AIL_LOCK(mp,s);
+		if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
+			/*
+			 * This will set the item's lsn to item_lsn
+			 * and update the position of the item in
+			 * the AIL.
+			 *
+			 * xfs_trans_update_ail() drops the AIL lock.
+			 */
+			xfs_trans_update_ail(mp, lip, item_lsn, s);
+		} else {
+			AIL_UNLOCK(mp, s);
+		}
+
+		/*
+		 * Now that we've repositioned the item in the AIL,
+		 * unpin it so it can be flushed. Pass information
+		 * about buffer stale state down from the log item
+		 * flags, if anyone else stales the buffer we do not
+		 * want to pay any attention to it.
+		 */
+		IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE);
+	}
+}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
new file mode 100644
index 0000000..bd37ccb
--- /dev/null
+++ b/fs/xfs/xfs_trans.h
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef	__XFS_TRANS_H__
+#define	__XFS_TRANS_H__
+
+/*
+ * This is the structure written in the log at the head of
+ * every transaction. It identifies the type and id of the
+ * transaction, and contains the number of items logged by
+ * the transaction so we know how many to expect during recovery.
+ *
+ * Do not change the below structure without redoing the code in
+ * xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans().
+ */
+typedef struct xfs_trans_header {
+	uint		th_magic;		/* magic number */
+	uint		th_type;		/* transaction type */
+	__int32_t	th_tid;			/* transaction id (unused) */
+	uint		th_num_items;		/* num items logged by trans */
+} xfs_trans_header_t;
+
+#define	XFS_TRANS_HEADER_MAGIC	0x5452414e	/* TRAN */
+
+/*
+ * Log item types.
+ */
+#define	XFS_LI_5_3_BUF		0x1234	/* v1 bufs, 1-block inode buffers */
+#define	XFS_LI_5_3_INODE	0x1235	/* 1-block inode buffers */
+#define	XFS_LI_EFI		0x1236
+#define	XFS_LI_EFD		0x1237
+#define	XFS_LI_IUNLINK		0x1238
+#define	XFS_LI_6_1_INODE	0x1239	/* 4K non-aligned inode bufs */
+#define	XFS_LI_6_1_BUF		0x123a	/* v1, 4K inode buffers */
+#define	XFS_LI_INODE		0x123b	/* aligned ino chunks, var-size ibufs */
+#define	XFS_LI_BUF		0x123c	/* v2 bufs, variable sized inode bufs */
+#define	XFS_LI_DQUOT		0x123d
+#define	XFS_LI_QUOTAOFF		0x123e
+
+/*
+ * Transaction types.  Used to distinguish types of buffers.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE	1
+#define XFS_TRANS_SETATTR_SIZE		2
+#define XFS_TRANS_INACTIVE		3
+#define XFS_TRANS_CREATE		4
+#define XFS_TRANS_CREATE_TRUNC		5
+#define XFS_TRANS_TRUNCATE_FILE		6
+#define XFS_TRANS_REMOVE		7
+#define XFS_TRANS_LINK			8
+#define XFS_TRANS_RENAME		9
+#define XFS_TRANS_MKDIR			10
+#define XFS_TRANS_RMDIR			11
+#define XFS_TRANS_SYMLINK		12
+#define XFS_TRANS_SET_DMATTRS		13
+#define XFS_TRANS_GROWFS		14
+#define XFS_TRANS_STRAT_WRITE		15
+#define XFS_TRANS_DIOSTRAT		16
+#define	XFS_TRANS_WRITE_SYNC		17
+#define	XFS_TRANS_WRITEID		18
+#define	XFS_TRANS_ADDAFORK		19
+#define	XFS_TRANS_ATTRINVAL		20
+#define	XFS_TRANS_ATRUNCATE		21
+#define	XFS_TRANS_ATTR_SET		22
+#define	XFS_TRANS_ATTR_RM		23
+#define	XFS_TRANS_ATTR_FLAG		24
+#define	XFS_TRANS_CLEAR_AGI_BUCKET	25
+#define XFS_TRANS_QM_SBCHANGE		26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1		27
+#define XFS_TRANS_DUMMY2		28
+#define XFS_TRANS_QM_QUOTAOFF		29
+#define XFS_TRANS_QM_DQALLOC		30
+#define XFS_TRANS_QM_SETQLIM		31
+#define XFS_TRANS_QM_DQCLUSTER		32
+#define XFS_TRANS_QM_QINOCREATE		33
+#define XFS_TRANS_QM_QUOTAOFF_END	34
+#define XFS_TRANS_SB_UNIT		35
+#define XFS_TRANS_FSYNC_TS		36
+#define	XFS_TRANS_GROWFSRT_ALLOC	37
+#define	XFS_TRANS_GROWFSRT_ZERO		38
+#define	XFS_TRANS_GROWFSRT_FREE		39
+#define	XFS_TRANS_SWAPEXT		40
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+
+#ifdef __KERNEL__
+struct xfs_buf;
+struct xfs_buftarg;
+struct xfs_efd_log_item;
+struct xfs_efi_log_item;
+struct xfs_inode;
+struct xfs_item_ops;
+struct xfs_log_iovec;
+struct xfs_log_item;
+struct xfs_log_item_desc;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_dquot_acct;
+
+typedef struct xfs_ail_entry {
+	struct xfs_log_item	*ail_forw;	/* AIL forw pointer */
+	struct xfs_log_item	*ail_back;	/* AIL back pointer */
+} xfs_ail_entry_t;
+
+/*
+ * This structure is passed as a parameter to xfs_trans_push_ail()
+ * and is used to track the what LSN the waiting processes are
+ * waiting to become unused.
+ */
+typedef struct xfs_ail_ticket {
+	xfs_lsn_t		at_lsn;		/* lsn waitin for */
+	struct xfs_ail_ticket	*at_forw;	/* wait list ptr */
+	struct xfs_ail_ticket	*at_back;	/* wait list ptr */
+	sv_t			at_sema;	/* wait sema */
+} xfs_ail_ticket_t;
+
+
+typedef struct xfs_log_item {
+	xfs_ail_entry_t			li_ail;		/* AIL pointers */
+	xfs_lsn_t			li_lsn;		/* last on-disk lsn */
+	struct xfs_log_item_desc	*li_desc;	/* ptr to current desc*/
+	struct xfs_mount		*li_mountp;	/* ptr to fs mount */
+	uint				li_type;	/* item type */
+	uint				li_flags;	/* misc flags */
+	struct xfs_log_item		*li_bio_list;	/* buffer item list */
+	void				(*li_cb)(struct xfs_buf *,
+						 struct xfs_log_item *);
+							/* buffer item iodone */
+							/* callback func */
+	struct xfs_item_ops		*li_ops;	/* function list */
+} xfs_log_item_t;
+
+#define	XFS_LI_IN_AIL	0x1
+#define XFS_LI_ABORTED	0x2
+
+typedef struct xfs_item_ops {
+	uint (*iop_size)(xfs_log_item_t *);
+	void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
+	void (*iop_pin)(xfs_log_item_t *);
+	void (*iop_unpin)(xfs_log_item_t *, int);
+	void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
+	uint (*iop_trylock)(xfs_log_item_t *);
+	void (*iop_unlock)(xfs_log_item_t *);
+	xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
+	void (*iop_push)(xfs_log_item_t *);
+	void (*iop_abort)(xfs_log_item_t *);
+	void (*iop_pushbuf)(xfs_log_item_t *);
+	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
+} xfs_item_ops_t;
+
+#define IOP_SIZE(ip)		(*(ip)->li_ops->iop_size)(ip)
+#define IOP_FORMAT(ip,vp)	(*(ip)->li_ops->iop_format)(ip, vp)
+#define IOP_PIN(ip)		(*(ip)->li_ops->iop_pin)(ip)
+#define IOP_UNPIN(ip, flags)	(*(ip)->li_ops->iop_unpin)(ip, flags)
+#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
+#define IOP_TRYLOCK(ip)		(*(ip)->li_ops->iop_trylock)(ip)
+#define IOP_UNLOCK(ip)		(*(ip)->li_ops->iop_unlock)(ip)
+#define IOP_COMMITTED(ip, lsn)	(*(ip)->li_ops->iop_committed)(ip, lsn)
+#define IOP_PUSH(ip)		(*(ip)->li_ops->iop_push)(ip)
+#define IOP_ABORT(ip)		(*(ip)->li_ops->iop_abort)(ip)
+#define IOP_PUSHBUF(ip)		(*(ip)->li_ops->iop_pushbuf)(ip)
+#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn)
+
+/*
+ * Return values for the IOP_TRYLOCK() routines.
+ */
+#define	XFS_ITEM_SUCCESS	0
+#define	XFS_ITEM_PINNED		1
+#define	XFS_ITEM_LOCKED		2
+#define	XFS_ITEM_FLUSHING	3
+#define XFS_ITEM_PUSHBUF	4
+
+#endif	/* __KERNEL__ */
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction.  It points to the log item and keeps some
+ * flags to track the state of the log item.  It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+typedef struct xfs_log_item_desc {
+	xfs_log_item_t	*lid_item;
+	ushort		lid_size;
+	unsigned char	lid_flags;
+	unsigned char	lid_index;
+} xfs_log_item_desc_t;
+
+#define XFS_LID_DIRTY		0x1
+#define XFS_LID_PINNED		0x2
+#define XFS_LID_BUF_STALE	0x8
+
+/*
+ * This structure is used to maintain a chunk list of log_item_desc
+ * structures. The free field is a bitmask indicating which descriptors
+ * in this chunk's array are free.  The unused field is the first value
+ * not used since this chunk was allocated.
+ */
+#define	XFS_LIC_NUM_SLOTS	15
+typedef struct xfs_log_item_chunk {
+	struct xfs_log_item_chunk	*lic_next;
+	ushort				lic_free;
+	ushort				lic_unused;
+	xfs_log_item_desc_t		lic_descs[XFS_LIC_NUM_SLOTS];
+} xfs_log_item_chunk_t;
+
+#define	XFS_LIC_MAX_SLOT	(XFS_LIC_NUM_SLOTS - 1)
+#define	XFS_LIC_FREEMASK	((1 << XFS_LIC_NUM_SLOTS) - 1)
+
+
+/*
+ * Initialize the given chunk.  Set the chunk's free descriptor mask
+ * to indicate that all descriptors are free.  The caller gets to set
+ * lic_unused to the right value (0 matches all free).  The
+ * lic_descs.lid_index values are set up as each desc is allocated.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT)
+void xfs_lic_init(xfs_log_item_chunk_t *cp);
+#define	XFS_LIC_INIT(cp)	xfs_lic_init(cp)
+#else
+#define	XFS_LIC_INIT(cp)	((cp)->lic_free = XFS_LIC_FREEMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT_SLOT)
+void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot);
+#define	XFS_LIC_INIT_SLOT(cp,slot)	xfs_lic_init_slot(cp, slot)
+#else
+#define	XFS_LIC_INIT_SLOT(cp,slot)	\
+	((cp)->lic_descs[slot].lid_index = (unsigned char)(slot))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_VACANCY)
+int xfs_lic_vacancy(xfs_log_item_chunk_t *cp);
+#define	XFS_LIC_VACANCY(cp)		xfs_lic_vacancy(cp)
+#else
+#define	XFS_LIC_VACANCY(cp)		(((cp)->lic_free) & XFS_LIC_FREEMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ALL_FREE)
+void xfs_lic_all_free(xfs_log_item_chunk_t *cp);
+#define	XFS_LIC_ALL_FREE(cp)		xfs_lic_all_free(cp)
+#else
+#define	XFS_LIC_ALL_FREE(cp)		((cp)->lic_free = XFS_LIC_FREEMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ARE_ALL_FREE)
+int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp);
+#define	XFS_LIC_ARE_ALL_FREE(cp)	xfs_lic_are_all_free(cp)
+#else
+#define	XFS_LIC_ARE_ALL_FREE(cp)	(((cp)->lic_free & XFS_LIC_FREEMASK) ==\
+					XFS_LIC_FREEMASK)
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ISFREE)
+int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot);
+#define	XFS_LIC_ISFREE(cp,slot)	xfs_lic_isfree(cp,slot)
+#else
+#define	XFS_LIC_ISFREE(cp,slot)	((cp)->lic_free & (1 << (slot)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_CLAIM)
+void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot);
+#define	XFS_LIC_CLAIM(cp,slot)		xfs_lic_claim(cp,slot)
+#else
+#define	XFS_LIC_CLAIM(cp,slot)		((cp)->lic_free &= ~(1 << (slot)))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_RELSE)
+void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot);
+#define	XFS_LIC_RELSE(cp,slot)		xfs_lic_relse(cp,slot)
+#else
+#define	XFS_LIC_RELSE(cp,slot)		((cp)->lic_free |= 1 << (slot))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_SLOT)
+xfs_log_item_desc_t *xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot);
+#define	XFS_LIC_SLOT(cp,slot)		xfs_lic_slot(cp,slot)
+#else
+#define	XFS_LIC_SLOT(cp,slot)		(&((cp)->lic_descs[slot]))
+#endif
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_SLOT)
+int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp);
+#define	XFS_LIC_DESC_TO_SLOT(dp)	xfs_lic_desc_to_slot(dp)
+#else
+#define	XFS_LIC_DESC_TO_SLOT(dp)	((uint)((dp)->lid_index))
+#endif
+/*
+ * Calculate the address of a chunk given a descriptor pointer:
+ * dp - dp->lid_index give the address of the start of the lic_descs array.
+ * From this we subtract the offset of the lic_descs field in a chunk.
+ * All of this yields the address of the chunk, which is
+ * cast to a chunk pointer.
+ */
+#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_CHUNK)
+xfs_log_item_chunk_t *xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp);
+#define	XFS_LIC_DESC_TO_CHUNK(dp)	xfs_lic_desc_to_chunk(dp)
+#else
+#define	XFS_LIC_DESC_TO_CHUNK(dp)	((xfs_log_item_chunk_t*) \
+					(((xfs_caddr_t)((dp) - (dp)->lid_index)) -\
+					(xfs_caddr_t)(((xfs_log_item_chunk_t*) \
+					0)->lic_descs)))
+#endif
+
+#ifdef __KERNEL__
+/*
+ * This structure is used to maintain a list of block ranges that have been
+ * freed in the transaction.  The ranges are listed in the perag[] busy list
+ * between when they're freed and the transaction is committed to disk.
+ */
+
+typedef struct xfs_log_busy_slot {
+	xfs_agnumber_t		lbc_ag;
+	ushort			lbc_idx;	/* index in perag.busy[] */
+} xfs_log_busy_slot_t;
+
+#define XFS_LBC_NUM_SLOTS	31
+typedef struct xfs_log_busy_chunk {
+	struct xfs_log_busy_chunk	*lbc_next;
+	uint				lbc_free;	/* bitmask of free slots */
+	ushort				lbc_unused;	/* first unused */
+	xfs_log_busy_slot_t		lbc_busy[XFS_LBC_NUM_SLOTS];
+} xfs_log_busy_chunk_t;
+
+#define	XFS_LBC_MAX_SLOT	(XFS_LBC_NUM_SLOTS - 1)
+#define	XFS_LBC_FREEMASK	((1U << XFS_LBC_NUM_SLOTS) - 1)
+
+#define	XFS_LBC_INIT(cp)	((cp)->lbc_free = XFS_LBC_FREEMASK)
+#define	XFS_LBC_CLAIM(cp, slot)	((cp)->lbc_free &= ~(1 << (slot)))
+#define	XFS_LBC_SLOT(cp, slot)	(&((cp)->lbc_busy[(slot)]))
+#define	XFS_LBC_VACANCY(cp)	(((cp)->lbc_free) & XFS_LBC_FREEMASK)
+#define	XFS_LBC_ISFREE(cp, slot) ((cp)->lbc_free & (1 << (slot)))
+
+/*
+ * This is the type of function which can be given to xfs_trans_callback()
+ * to be called upon the transaction's commit to disk.
+ */
+typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
+
+/*
+ * This is the structure maintained for every active transaction.
+ */
+typedef struct xfs_trans {
+	unsigned int		t_magic;	/* magic number */
+	xfs_log_callback_t	t_logcb;	/* log callback struct */
+	struct xfs_trans	*t_forw;	/* async list pointers */
+	struct xfs_trans	*t_back;	/* async list pointers */
+	unsigned int		t_type;		/* transaction type */
+	unsigned int		t_log_res;	/* amt of log space resvd */
+	unsigned int		t_log_count;	/* count for perm log res */
+	unsigned int		t_blk_res;	/* # of blocks resvd */
+	unsigned int		t_blk_res_used;	/* # of resvd blocks used */
+	unsigned int		t_rtx_res;	/* # of rt extents resvd */
+	unsigned int		t_rtx_res_used;	/* # of resvd rt extents used */
+	xfs_log_ticket_t	t_ticket;	/* log mgr ticket */
+	sema_t			t_sema;		/* sema for commit completion */
+	xfs_lsn_t		t_lsn;		/* log seq num of start of
+						 * transaction. */
+	xfs_lsn_t		t_commit_lsn;	/* log seq num of end of
+						 * transaction. */
+	struct xfs_mount	*t_mountp;	/* ptr to fs mount struct */
+	struct xfs_dquot_acct   *t_dqinfo;	/* accting info for dquots */
+	xfs_trans_callback_t	t_callback;	/* transaction callback */
+	void			*t_callarg;	/* callback arg */
+	unsigned int		t_flags;	/* misc flags */
+	long			t_icount_delta;	/* superblock icount change */
+	long			t_ifree_delta;	/* superblock ifree change */
+	long			t_fdblocks_delta; /* superblock fdblocks chg */
+	long			t_res_fdblocks_delta; /* on-disk only chg */
+	long			t_frextents_delta;/* superblock freextents chg*/
+	long			t_res_frextents_delta; /* on-disk only chg */
+	long			t_ag_freeblks_delta; /* debugging counter */
+	long			t_ag_flist_delta; /* debugging counter */
+	long			t_ag_btree_delta; /* debugging counter */
+	long			t_dblocks_delta;/* superblock dblocks change */
+	long			t_agcount_delta;/* superblock agcount change */
+	long			t_imaxpct_delta;/* superblock imaxpct change */
+	long			t_rextsize_delta;/* superblock rextsize chg */
+	long			t_rbmblocks_delta;/* superblock rbmblocks chg */
+	long			t_rblocks_delta;/* superblock rblocks change */
+	long			t_rextents_delta;/* superblocks rextents chg */
+	long			t_rextslog_delta;/* superblocks rextslog chg */
+	unsigned int		t_items_free;	/* log item descs free */
+	xfs_log_item_chunk_t	t_items;	/* first log item desc chunk */
+	xfs_trans_header_t	t_header;	/* header for in-log trans */
+	unsigned int		t_busy_free;	/* busy descs free */
+	xfs_log_busy_chunk_t	t_busy;		/* busy/async free blocks */
+        xfs_pflags_t            t_pflags;       /* saved pflags state */
+} xfs_trans_t;
+
+#endif	/* __KERNEL__ */
+
+
+#define	XFS_TRANS_MAGIC		0x5452414E	/* 'TRAN' */
+/*
+ * Values for t_flags.
+ */
+#define	XFS_TRANS_DIRTY		0x01	/* something needs to be logged */
+#define	XFS_TRANS_SB_DIRTY	0x02	/* superblock is modified */
+#define	XFS_TRANS_PERM_LOG_RES	0x04	/* xact took a permanent log res */
+#define	XFS_TRANS_SYNC		0x08	/* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY	0x10	/* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE	0x20    /* OK to use reserved data blocks */
+
+/*
+ * Values for call flags parameter.
+ */
+#define	XFS_TRANS_NOSLEEP		0x1
+#define	XFS_TRANS_WAIT			0x2
+#define	XFS_TRANS_RELEASE_LOG_RES	0x4
+#define	XFS_TRANS_ABORT			0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define	XFS_TRANS_SB_ICOUNT		0x00000001
+#define	XFS_TRANS_SB_IFREE		0x00000002
+#define	XFS_TRANS_SB_FDBLOCKS		0x00000004
+#define	XFS_TRANS_SB_RES_FDBLOCKS	0x00000008
+#define	XFS_TRANS_SB_FREXTENTS		0x00000010
+#define	XFS_TRANS_SB_RES_FREXTENTS	0x00000020
+#define	XFS_TRANS_SB_DBLOCKS		0x00000040
+#define	XFS_TRANS_SB_AGCOUNT		0x00000080
+#define	XFS_TRANS_SB_IMAXPCT		0x00000100
+#define	XFS_TRANS_SB_REXTSIZE		0x00000200
+#define	XFS_TRANS_SB_RBMBLOCKS		0x00000400
+#define	XFS_TRANS_SB_RBLOCKS		0x00000800
+#define	XFS_TRANS_SB_REXTENTS		0x00001000
+#define	XFS_TRANS_SB_REXTSLOG		0x00002000
+
+
+/*
+ * Various log reservation values.
+ * These are based on the size of the file system block
+ * because that is what most transactions manipulate.
+ * Each adds in an additional 128 bytes per item logged to
+ * try to account for the overhead of the transaction mechanism.
+ *
+ * Note:
+ * Most of the reservations underestimate the number of allocation
+ * groups into which they could free extents in the xfs_bmap_finish()
+ * call.  This is because the number in the worst case is quite high
+ * and quite unusual.  In order to fix this we need to change
+ * xfs_bmap_finish() to free extents in only a single AG at a time.
+ * This will require changes to the EFI code as well, however, so that
+ * the EFI for the extents not freed is logged again in each transaction.
+ * See bug 261917.
+ */
+
+/*
+ * Per-extent log reservation for the allocation btree changes
+ * involved in freeing or allocating an extent.
+ * 2 trees * (2 blocks/level * max depth - 1) * block size
+ */
+#define	XFS_ALLOCFREE_LOG_RES(mp,nx) \
+	((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1)))
+#define	XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
+	((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1)))
+
+/*
+ * Per-directory log reservation for any directory change.
+ * dir blocks: (1 btree block per level + data block + free block) * dblock size
+ * bmap btree: (levels + 2) * max depth * block size
+ * v2 directory blocks can be fragmented below the dirblksize down to the fsb
+ * size, so account for that in the DAENTER macros.
+ */
+#define	XFS_DIROP_LOG_RES(mp)	\
+	(XFS_FSB_TO_B(mp, XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK)) + \
+	 (XFS_FSB_TO_B(mp, XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)))
+#define	XFS_DIROP_LOG_COUNT(mp)	\
+	(XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \
+	 XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)
+
+/*
+ * In a write transaction we can allocate a maximum of 2
+ * extents.  This gives:
+ *    the inode getting the new extents: inode size
+ *    the inode\'s bmap btree: max depth * block size
+ *    the agfs of the ags from which the extents are allocated: 2 * sector
+ *    the superblock free block counter: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join:
+ *    the agfs of the ags containing the blocks: 2 * sector size
+ *    the agfls of the ags containing the blocks: 2 * sector size
+ *    the super block free block counter: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+#define XFS_CALC_WRITE_LOG_RES(mp) \
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \
+	  (2 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 2) + \
+	  (128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))),\
+	 ((2 * (mp)->m_sb.sb_sectsize) + \
+	  (2 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 2) + \
+	  (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
+
+#define	XFS_WRITE_LOG_RES(mp)	((mp)->m_reservations.tr_write)
+
+/*
+ * In truncating a file we free up to two extents at once.  We can modify:
+ *    the inode being truncated: inode size
+ *    the inode\'s bmap btree: (max depth + 1) * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ *    the agf for each of the ags: 4 * sector size
+ *    the agfl for each of the ags: 4 * sector size
+ *    the super block to reflect the freed blocks: sector size
+ *    worst case split in allocation btrees per extent assuming 4 extents:
+ *		4 exts * 2 trees * (2 * max depth - 1) * block size
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+#define	XFS_CALC_ITRUNCATE_LOG_RES(mp) \
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + \
+	  (128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \
+	 ((4 * (mp)->m_sb.sb_sectsize) + \
+	  (4 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 4) + \
+	  (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \
+	  (128 * 5) + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	   (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
+	    XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
+
+#define	XFS_ITRUNCATE_LOG_RES(mp)   ((mp)->m_reservations.tr_itruncate)
+
+/*
+ * In renaming a files we can modify:
+ *    the four inodes involved: 4 * inode size
+ *    the two directory btrees: 2 * (max depth + v2) * dir block size
+ *    the two directory bmap btrees: 2 * max depth * block size
+ * And the bmap_finish transaction can free dir and bmap blocks (two sets
+ *	of bmap blocks) giving:
+ *    the agf for the ags in which the blocks live: 3 * sector size
+ *    the agfl for the ags in which the blocks live: 3 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_RENAME_LOG_RES(mp) \
+	(MAX( \
+	 ((4 * (mp)->m_sb.sb_inodesize) + \
+	  (2 * XFS_DIROP_LOG_RES(mp)) + \
+	  (128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp)))), \
+	 ((3 * (mp)->m_sb.sb_sectsize) + \
+	  (3 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 3) + \
+	  (128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3))))))
+
+#define	XFS_RENAME_LOG_RES(mp)	((mp)->m_reservations.tr_rename)
+
+/*
+ * For creating a link to an inode:
+ *    the parent directory inode: inode size
+ *    the linked inode: inode size
+ *    the directory btree could split: (max depth + v2) * dir block size
+ *    the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free some bmap blocks giving:
+ *    the agf for the ag in which the blocks live: sector size
+ *    the agfl for the ag in which the blocks live: sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_LINK_LOG_RES(mp) \
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  (mp)->m_sb.sb_inodesize + \
+	  XFS_DIROP_LOG_RES(mp) + \
+	  (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \
+	 ((mp)->m_sb.sb_sectsize + \
+	  (mp)->m_sb.sb_sectsize + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	  (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
+
+#define	XFS_LINK_LOG_RES(mp)	((mp)->m_reservations.tr_link)
+
+/*
+ * For removing a directory entry we can modify:
+ *    the parent directory inode: inode size
+ *    the removed inode: inode size
+ *    the directory btree could join: (max depth + v2) * dir block size
+ *    the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free the dir and bmap blocks giving:
+ *    the agf for the ag in which the blocks live: 2 * sector size
+ *    the agfl for the ag in which the blocks live: 2 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_REMOVE_LOG_RES(mp)	\
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  (mp)->m_sb.sb_inodesize + \
+	  XFS_DIROP_LOG_RES(mp) + \
+	  (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \
+	 ((2 * (mp)->m_sb.sb_sectsize) + \
+	  (2 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 2) + \
+	  (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
+
+#define	XFS_REMOVE_LOG_RES(mp)	((mp)->m_reservations.tr_remove)
+
+/*
+ * For symlink we can modify:
+ *    the parent directory inode: inode size
+ *    the new inode: inode size
+ *    the inode btree entry: 1 block
+ *    the directory btree: (max depth + v2) * dir block size
+ *    the directory inode\'s bmap btree: (max depth + v2) * block size
+ *    the blocks for the symlink: 1 KB
+ * Or in the first xact we allocate some inodes giving:
+ *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_SYMLINK_LOG_RES(mp)		\
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  (mp)->m_sb.sb_inodesize + \
+	  XFS_FSB_TO_B(mp, 1) + \
+	  XFS_DIROP_LOG_RES(mp) + \
+	  1024 + \
+	  (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \
+	 (2 * (mp)->m_sb.sb_sectsize + \
+	  XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
+	  XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	  (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
+	   XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
+
+#define	XFS_SYMLINK_LOG_RES(mp)	((mp)->m_reservations.tr_symlink)
+
+/*
+ * For create we can modify:
+ *    the parent directory inode: inode size
+ *    the new inode: inode size
+ *    the inode btree entry: block size
+ *    the superblock for the nlink flag: sector size
+ *    the directory btree: (max depth + v2) * dir block size
+ *    the directory inode\'s bmap btree: (max depth + v2) * block size
+ * Or in the first xact we allocate some inodes giving:
+ *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ *    the superblock for the nlink flag: sector size
+ *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+#define	XFS_CALC_CREATE_LOG_RES(mp)		\
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  (mp)->m_sb.sb_inodesize + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_FSB_TO_B(mp, 1) + \
+	  XFS_DIROP_LOG_RES(mp) + \
+	  (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \
+	 (3 * (mp)->m_sb.sb_sectsize + \
+	  XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
+	  XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	  (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
+	   XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
+
+#define	XFS_CREATE_LOG_RES(mp)	((mp)->m_reservations.tr_create)
+
+/*
+ * Making a new directory is the same as creating a new file.
+ */
+#define	XFS_CALC_MKDIR_LOG_RES(mp)	XFS_CALC_CREATE_LOG_RES(mp)
+
+#define	XFS_MKDIR_LOG_RES(mp)	((mp)->m_reservations.tr_mkdir)
+
+/*
+ * In freeing an inode we can modify:
+ *    the inode being freed: inode size
+ *    the super block free inode counter: sector size
+ *    the agi hash list and counters: sector size
+ *    the inode btree entry: block size
+ *    the on disk inode before ours in the agi hash list: inode cluster size
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+#define	XFS_CALC_IFREE_LOG_RES(mp) \
+	((mp)->m_sb.sb_inodesize + \
+	 (mp)->m_sb.sb_sectsize + \
+	 (mp)->m_sb.sb_sectsize + \
+	 XFS_FSB_TO_B((mp), 1) + \
+	 MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \
+	 (128 * 5) + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	  (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
+	   XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
+
+
+#define	XFS_IFREE_LOG_RES(mp)	((mp)->m_reservations.tr_ifree)
+
+/*
+ * When only changing the inode we log the inode and possibly the superblock
+ * We also add a bit of slop for the transaction stuff.
+ */
+#define	XFS_CALC_ICHANGE_LOG_RES(mp)	((mp)->m_sb.sb_inodesize + \
+					 (mp)->m_sb.sb_sectsize + 512)
+
+#define	XFS_ICHANGE_LOG_RES(mp)	((mp)->m_reservations.tr_ichange)
+
+/*
+ * Growing the data section of the filesystem.
+ *	superblock
+ *	agi and agf
+ *	allocation btrees
+ */
+#define	XFS_CALC_GROWDATA_LOG_RES(mp) \
+	((mp)->m_sb.sb_sectsize * 3 + \
+	 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	 (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
+
+#define	XFS_GROWDATA_LOG_RES(mp)    ((mp)->m_reservations.tr_growdata)
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the first set of transactions (ALLOC) we allocate space to the
+ * bitmap or summary files.
+ *	superblock: sector size
+ *	agf of the ag from which the extent is allocated: sector size
+ *	bmap btree for bitmap/summary inode: max depth * blocksize
+ *	bitmap/summary inode: inode size
+ *	allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
+ */
+#define	XFS_CALC_GROWRTALLOC_LOG_RES(mp) \
+	(2 * (mp)->m_sb.sb_sectsize + \
+	 XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \
+	 (mp)->m_sb.sb_inodesize + \
+	 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	 (128 * \
+	  (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + \
+	   XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
+
+#define	XFS_GROWRTALLOC_LOG_RES(mp)	((mp)->m_reservations.tr_growrtalloc)
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the second set of transactions (ZERO) we zero the new metadata blocks.
+ *	one bitmap/summary block: blocksize
+ */
+#define	XFS_CALC_GROWRTZERO_LOG_RES(mp) \
+	((mp)->m_sb.sb_blocksize + 128)
+
+#define	XFS_GROWRTZERO_LOG_RES(mp)	((mp)->m_reservations.tr_growrtzero)
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the third set of transactions (FREE) we update metadata without
+ * allocating any new blocks.
+ *	superblock: sector size
+ *	bitmap inode: inode size
+ *	summary inode: inode size
+ *	one bitmap block: blocksize
+ *	summary blocks: new summary size
+ */
+#define	XFS_CALC_GROWRTFREE_LOG_RES(mp) \
+	((mp)->m_sb.sb_sectsize + \
+	 2 * (mp)->m_sb.sb_inodesize + \
+	 (mp)->m_sb.sb_blocksize + \
+	 (mp)->m_rsumsize + \
+	 (128 * 5))
+
+#define	XFS_GROWRTFREE_LOG_RES(mp)	((mp)->m_reservations.tr_growrtfree)
+
+/*
+ * Logging the inode modification timestamp on a synchronous write.
+ *	inode
+ */
+#define	XFS_CALC_SWRITE_LOG_RES(mp) \
+	((mp)->m_sb.sb_inodesize + 128)
+
+#define	XFS_SWRITE_LOG_RES(mp)	((mp)->m_reservations.tr_swrite)
+
+/*
+ * Logging the inode timestamps on an fsync -- same as SWRITE
+ * as long as SWRITE logs the entire inode core
+ */
+#define XFS_FSYNC_TS_LOG_RES(mp)        ((mp)->m_reservations.tr_swrite)
+
+/*
+ * Logging the inode mode bits when writing a setuid/setgid file
+ *	inode
+ */
+#define	XFS_CALC_WRITEID_LOG_RES(mp) \
+	((mp)->m_sb.sb_inodesize + 128)
+
+#define	XFS_WRITEID_LOG_RES(mp)	((mp)->m_reservations.tr_swrite)
+
+/*
+ * Converting the inode from non-attributed to attributed.
+ *	the inode being converted: inode size
+ *	agf block and superblock (for block allocation)
+ *	the new block (directory sized)
+ *	bmap blocks for the new directory block
+ *	allocation btrees
+ */
+#define	XFS_CALC_ADDAFORK_LOG_RES(mp)	\
+	((mp)->m_sb.sb_inodesize + \
+	 (mp)->m_sb.sb_sectsize * 2 + \
+	 (mp)->m_dirblksize + \
+	 (XFS_DIR_IS_V1(mp) ? 0 : \
+	    XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1))) + \
+	 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
+	 (128 * (4 + \
+		 (XFS_DIR_IS_V1(mp) ? 0 : \
+			 XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \
+		 XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
+
+#define	XFS_ADDAFORK_LOG_RES(mp)	((mp)->m_reservations.tr_addafork)
+
+/*
+ * Removing the attribute fork of a file
+ *    the inode being truncated: inode size
+ *    the inode\'s bmap btree: max depth * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ *    the agf for each of the ags: 4 * sector size
+ *    the agfl for each of the ags: 4 * sector size
+ *    the super block to reflect the freed blocks: sector size
+ *    worst case split in allocation btrees per extent assuming 4 extents:
+ *		4 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_ATTRINVAL_LOG_RES(mp)	\
+	(MAX( \
+	 ((mp)->m_sb.sb_inodesize + \
+	  XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \
+	  (128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))), \
+	 ((4 * (mp)->m_sb.sb_sectsize) + \
+	  (4 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 4) + \
+	  (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))))))
+
+#define	XFS_ATTRINVAL_LOG_RES(mp)	((mp)->m_reservations.tr_attrinval)
+
+/*
+ * Setting an attribute.
+ *	the inode getting the attribute
+ *	the superblock for allocations
+ *	the agfs extents are allocated from
+ *	the attribute btree * max depth
+ *	the inode allocation btree
+ * Since attribute transaction space is dependent on the size of the attribute,
+ * the calculation is done partially at mount time and partially at runtime.
+ */
+#define	XFS_CALC_ATTRSET_LOG_RES(mp)	\
+	((mp)->m_sb.sb_inodesize + \
+	 (mp)->m_sb.sb_sectsize + \
+	  XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \
+	  (128 * (2 + XFS_DA_NODE_MAXDEPTH)))
+
+#define	XFS_ATTRSET_LOG_RES(mp, ext)	\
+	((mp)->m_reservations.tr_attrset + \
+	 (ext * (mp)->m_sb.sb_sectsize) + \
+	 (ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \
+	 (128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))))
+
+/*
+ * Removing an attribute.
+ *    the inode: inode size
+ *    the attribute btree could join: max depth * block size
+ *    the inode bmap btree could join or split: max depth * block size
+ * And the bmap_finish transaction can free the attr blocks freed giving:
+ *    the agf for the ag in which the blocks live: 2 * sector size
+ *    the agfl for the ag in which the blocks live: 2 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+#define	XFS_CALC_ATTRRM_LOG_RES(mp)	\
+	(MAX( \
+	  ((mp)->m_sb.sb_inodesize + \
+	  XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \
+	  XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \
+	  (128 * (1 + XFS_DA_NODE_MAXDEPTH + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \
+	 ((2 * (mp)->m_sb.sb_sectsize) + \
+	  (2 * (mp)->m_sb.sb_sectsize) + \
+	  (mp)->m_sb.sb_sectsize + \
+	  XFS_ALLOCFREE_LOG_RES(mp, 2) + \
+	  (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
+
+#define	XFS_ATTRRM_LOG_RES(mp)	((mp)->m_reservations.tr_attrrm)
+
+/*
+ * Clearing a bad agino number in an agi hash bucket.
+ */
+#define	XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp) \
+	((mp)->m_sb.sb_sectsize + 128)
+
+#define	XFS_CLEAR_AGI_BUCKET_LOG_RES(mp)  ((mp)->m_reservations.tr_clearagi)
+
+
+/*
+ * Various log count values.
+ */
+#define	XFS_DEFAULT_LOG_COUNT		1
+#define	XFS_DEFAULT_PERM_LOG_COUNT	2
+#define	XFS_ITRUNCATE_LOG_COUNT		2
+#define XFS_INACTIVE_LOG_COUNT		2
+#define	XFS_CREATE_LOG_COUNT		2
+#define	XFS_MKDIR_LOG_COUNT		3
+#define	XFS_SYMLINK_LOG_COUNT		3
+#define	XFS_REMOVE_LOG_COUNT		2
+#define	XFS_LINK_LOG_COUNT		2
+#define	XFS_RENAME_LOG_COUNT		2
+#define	XFS_WRITE_LOG_COUNT		2
+#define	XFS_ADDAFORK_LOG_COUNT		2
+#define	XFS_ATTRINVAL_LOG_COUNT		1
+#define	XFS_ATTRSET_LOG_COUNT		3
+#define	XFS_ATTRRM_LOG_COUNT		3
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer
+ * reference count values.  This determine how hard the buffer
+ * cache tries to hold onto the buffer.
+ */
+#define	XFS_AGF_REF		4
+#define	XFS_AGI_REF		4
+#define	XFS_AGFL_REF		3
+#define	XFS_INO_BTREE_REF	3
+#define	XFS_ALLOC_BTREE_REF	2
+#define	XFS_BMAP_BTREE_REF	2
+#define	XFS_DIR_BTREE_REF	2
+#define	XFS_ATTR_BTREE_REF	1
+#define	XFS_INO_REF		1
+#define	XFS_DQUOT_REF		1
+
+#ifdef __KERNEL__
+/*
+ * XFS transaction mechanism exported interfaces that are
+ * actually macros.
+ */
+#define	xfs_trans_get_log_res(tp)	((tp)->t_log_res)
+#define	xfs_trans_get_log_count(tp)	((tp)->t_log_count)
+#define	xfs_trans_get_block_res(tp)	((tp)->t_blk_res)
+#define	xfs_trans_set_sync(tp)		((tp)->t_flags |= XFS_TRANS_SYNC)
+
+#ifdef DEBUG
+#define	xfs_trans_agblocks_delta(tp, d)	((tp)->t_ag_freeblks_delta += (long)d)
+#define	xfs_trans_agflist_delta(tp, d)	((tp)->t_ag_flist_delta += (long)d)
+#define	xfs_trans_agbtree_delta(tp, d)	((tp)->t_ag_btree_delta += (long)d)
+#else
+#define	xfs_trans_agblocks_delta(tp, d)
+#define	xfs_trans_agflist_delta(tp, d)
+#define	xfs_trans_agbtree_delta(tp, d)
+#endif
+
+/*
+ * XFS transaction mechanism exported interfaces.
+ */
+void		xfs_trans_init(struct xfs_mount *);
+xfs_trans_t	*xfs_trans_alloc(struct xfs_mount *, uint);
+xfs_trans_t	*_xfs_trans_alloc(struct xfs_mount *, uint);
+xfs_trans_t	*xfs_trans_dup(xfs_trans_t *);
+int		xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
+				  uint, uint);
+void		xfs_trans_callback(xfs_trans_t *,
+				   void (*)(xfs_trans_t *, void *), void *);
+void		xfs_trans_mod_sb(xfs_trans_t *, uint, long);
+struct xfs_buf	*xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t,
+				   int, uint);
+int		xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *,
+				   struct xfs_buftarg *, xfs_daddr_t, int, uint,
+				   struct xfs_buf **);
+struct xfs_buf	*xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
+
+void		xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
+void		xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
+void		xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
+int		xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
+			       xfs_ino_t , uint, uint, struct xfs_inode **);
+void		xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint);
+void		xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *);
+void		xfs_trans_ihold_release(xfs_trans_t *, struct xfs_inode *);
+void		xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
+void		xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
+struct xfs_efi_log_item	*xfs_trans_get_efi(xfs_trans_t *, uint);
+void		xfs_efi_release(struct xfs_efi_log_item *, uint);
+void		xfs_trans_log_efi_extent(xfs_trans_t *,
+					 struct xfs_efi_log_item *,
+					 xfs_fsblock_t,
+					 xfs_extlen_t);
+struct xfs_efd_log_item	*xfs_trans_get_efd(xfs_trans_t *,
+				  struct xfs_efi_log_item *,
+				  uint);
+void		xfs_trans_log_efd_extent(xfs_trans_t *,
+					 struct xfs_efd_log_item *,
+					 xfs_fsblock_t,
+					 xfs_extlen_t);
+int		xfs_trans_commit(xfs_trans_t *, uint flags, xfs_lsn_t *);
+void		xfs_trans_cancel(xfs_trans_t *, int);
+void		xfs_trans_ail_init(struct xfs_mount *);
+xfs_lsn_t	xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t);
+xfs_lsn_t	xfs_trans_tail_ail(struct xfs_mount *);
+void		xfs_trans_unlocked_item(struct xfs_mount *,
+					xfs_log_item_t *);
+xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp,
+					xfs_agnumber_t ag,
+					xfs_extlen_t idx);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
new file mode 100644
index 0000000..7bc5eab
--- /dev/null
+++ b/fs/xfs/xfs_trans_ail.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_error.h"
+
+STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *);
+STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *);
+STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *);
+STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *);
+
+#ifdef DEBUG
+STATIC void xfs_ail_check(xfs_ail_entry_t *);
+#else
+#define	xfs_ail_check(a)
+#endif /* DEBUG */
+
+
+/*
+ * This is called by the log manager code to determine the LSN
+ * of the tail of the log.  This is exactly the LSN of the first
+ * item in the AIL.  If the AIL is empty, then this function
+ * returns 0.
+ *
+ * We need the AIL lock in order to get a coherent read of the
+ * lsn of the last item in the AIL.
+ */
+xfs_lsn_t
+xfs_trans_tail_ail(
+	xfs_mount_t	*mp)
+{
+	xfs_lsn_t	lsn;
+	xfs_log_item_t	*lip;
+	SPLDECL(s);
+
+	AIL_LOCK(mp,s);
+	lip = xfs_ail_min(&(mp->m_ail));
+	if (lip == NULL) {
+		lsn = (xfs_lsn_t)0;
+	} else {
+		lsn = lip->li_lsn;
+	}
+	AIL_UNLOCK(mp, s);
+
+	return lsn;
+}
+
+/*
+ * xfs_trans_push_ail
+ *
+ * This routine is called to move the tail of the AIL
+ * forward.  It does this by trying to flush items in the AIL
+ * whose lsns are below the given threshold_lsn.
+ *
+ * The routine returns the lsn of the tail of the log.
+ */
+xfs_lsn_t
+xfs_trans_push_ail(
+	xfs_mount_t		*mp,
+	xfs_lsn_t		threshold_lsn)
+{
+	xfs_lsn_t		lsn;
+	xfs_log_item_t		*lip;
+	int			gen;
+	int			restarts;
+	int			lock_result;
+	int			flush_log;
+	SPLDECL(s);
+
+#define	XFS_TRANS_PUSH_AIL_RESTARTS	10
+
+	AIL_LOCK(mp,s);
+	lip = xfs_trans_first_ail(mp, &gen);
+	if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) {
+		/*
+		 * Just return if the AIL is empty.
+		 */
+		AIL_UNLOCK(mp, s);
+		return (xfs_lsn_t)0;
+	}
+
+	XFS_STATS_INC(xs_push_ail);
+
+	/*
+	 * While the item we are looking at is below the given threshold
+	 * try to flush it out.  Make sure to limit the number of times
+	 * we allow xfs_trans_next_ail() to restart scanning from the
+	 * beginning of the list.  We'd like not to stop until we've at least
+	 * tried to push on everything in the AIL with an LSN less than
+	 * the given threshold. However, we may give up before that if
+	 * we realize that we've been holding the AIL_LOCK for 'too long',
+	 * blocking interrupts. Currently, too long is < 500us roughly.
+	 */
+	flush_log = 0;
+	restarts = 0;
+	while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) &&
+		(XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) {
+		/*
+		 * If we can lock the item without sleeping, unlock
+		 * the AIL lock and flush the item.  Then re-grab the
+		 * AIL lock so we can look for the next item on the
+		 * AIL.  Since we unlock the AIL while we flush the
+		 * item, the next routine may start over again at the
+		 * the beginning of the list if anything has changed.
+		 * That is what the generation count is for.
+		 *
+		 * If we can't lock the item, either its holder will flush
+		 * it or it is already being flushed or it is being relogged.
+		 * In any of these case it is being taken care of and we
+		 * can just skip to the next item in the list.
+		 */
+		lock_result = IOP_TRYLOCK(lip);
+		switch (lock_result) {
+		      case XFS_ITEM_SUCCESS:
+			AIL_UNLOCK(mp, s);
+			XFS_STATS_INC(xs_push_ail_success);
+			IOP_PUSH(lip);
+			AIL_LOCK(mp,s);
+			break;
+
+		      case XFS_ITEM_PUSHBUF:
+			AIL_UNLOCK(mp, s);
+			XFS_STATS_INC(xs_push_ail_pushbuf);
+#ifdef XFSRACEDEBUG
+			delay_for_intr();
+			delay(300);
+#endif
+			ASSERT(lip->li_ops->iop_pushbuf);
+			ASSERT(lip);
+			IOP_PUSHBUF(lip);
+			AIL_LOCK(mp,s);
+			break;
+
+		      case XFS_ITEM_PINNED:
+			XFS_STATS_INC(xs_push_ail_pinned);
+			flush_log = 1;
+			break;
+
+		      case XFS_ITEM_LOCKED:
+			XFS_STATS_INC(xs_push_ail_locked);
+			break;
+
+		      case XFS_ITEM_FLUSHING:
+			XFS_STATS_INC(xs_push_ail_flushing);
+			break;
+
+		      default:
+			ASSERT(0);
+			break;
+		}
+
+		lip = xfs_trans_next_ail(mp, lip, &gen, &restarts);
+		if (lip == NULL) {
+			break;
+		}
+		if (XFS_FORCED_SHUTDOWN(mp)) {
+			/*
+			 * Just return if we shut down during the last try.
+			 */
+			AIL_UNLOCK(mp, s);
+			return (xfs_lsn_t)0;
+		}
+
+	}
+
+	if (flush_log) {
+		/*
+		 * If something we need to push out was pinned, then
+		 * push out the log so it will become unpinned and
+		 * move forward in the AIL.
+		 */
+		AIL_UNLOCK(mp, s);
+		XFS_STATS_INC(xs_push_ail_flush);
+		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+		AIL_LOCK(mp, s);
+	}
+
+	lip = xfs_ail_min(&(mp->m_ail));
+	if (lip == NULL) {
+		lsn = (xfs_lsn_t)0;
+	} else {
+		lsn = lip->li_lsn;
+	}
+
+	AIL_UNLOCK(mp, s);
+	return lsn;
+}	/* xfs_trans_push_ail */
+
+
+/*
+ * This is to be called when an item is unlocked that may have
+ * been in the AIL.  It will wake up the first member of the AIL
+ * wait list if this item's unlocking might allow it to progress.
+ * If the item is in the AIL, then we need to get the AIL lock
+ * while doing our checking so we don't race with someone going
+ * to sleep waiting for this event in xfs_trans_push_ail().
+ */
+void
+xfs_trans_unlocked_item(
+	xfs_mount_t	*mp,
+	xfs_log_item_t	*lip)
+{
+	xfs_log_item_t	*min_lip;
+
+	/*
+	 * If we're forcibly shutting down, we may have
+	 * unlocked log items arbitrarily. The last thing
+	 * we want to do is to move the tail of the log
+	 * over some potentially valid data.
+	 */
+	if (!(lip->li_flags & XFS_LI_IN_AIL) ||
+	    XFS_FORCED_SHUTDOWN(mp)) {
+		return;
+	}
+
+	/*
+	 * This is the one case where we can call into xfs_ail_min()
+	 * without holding the AIL lock because we only care about the
+	 * case where we are at the tail of the AIL.  If the object isn't
+	 * at the tail, it doesn't matter what result we get back.  This
+	 * is slightly racy because since we were just unlocked, we could
+	 * go to sleep between the call to xfs_ail_min and the call to
+	 * xfs_log_move_tail, have someone else lock us, commit to us disk,
+	 * move us out of the tail of the AIL, and then we wake up.  However,
+	 * the call to xfs_log_move_tail() doesn't do anything if there's
+	 * not enough free space to wake people up so we're safe calling it.
+	 */
+	min_lip = xfs_ail_min(&mp->m_ail);
+
+	if (min_lip == lip)
+		xfs_log_move_tail(mp, 1);
+}	/* xfs_trans_unlocked_item */
+
+
+/*
+ * Update the position of the item in the AIL with the new
+ * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
+ * it to its new position by removing it and re-adding it.
+ *
+ * Wakeup anyone with an lsn less than the item's lsn.  If the item
+ * we move in the AIL is the minimum one, update the tail lsn in the
+ * log manager.
+ *
+ * Increment the AIL's generation count to indicate that the tree
+ * has changed.
+ *
+ * This function must be called with the AIL lock held.  The lock
+ * is dropped before returning, so the caller must pass in the
+ * cookie returned by AIL_LOCK.
+ */
+void
+xfs_trans_update_ail(
+	xfs_mount_t	*mp,
+	xfs_log_item_t	*lip,
+	xfs_lsn_t	lsn,
+	unsigned long	s)
+{
+	xfs_ail_entry_t		*ailp;
+	xfs_log_item_t		*dlip=NULL;
+	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
+
+	ailp = &(mp->m_ail);
+	mlip = xfs_ail_min(ailp);
+
+	if (lip->li_flags & XFS_LI_IN_AIL) {
+		dlip = xfs_ail_delete(ailp, lip);
+		ASSERT(dlip == lip);
+	} else {
+		lip->li_flags |= XFS_LI_IN_AIL;
+	}
+
+	lip->li_lsn = lsn;
+
+	xfs_ail_insert(ailp, lip);
+	mp->m_ail_gen++;
+
+	if (mlip == dlip) {
+		mlip = xfs_ail_min(&(mp->m_ail));
+		AIL_UNLOCK(mp, s);
+		xfs_log_move_tail(mp, mlip->li_lsn);
+	} else {
+		AIL_UNLOCK(mp, s);
+	}
+
+
+}	/* xfs_trans_update_ail */
+
+/*
+ * Delete the given item from the AIL.  It must already be in
+ * the AIL.
+ *
+ * Wakeup anyone with an lsn less than item's lsn.    If the item
+ * we delete in the AIL is the minimum one, update the tail lsn in the
+ * log manager.
+ *
+ * Clear the IN_AIL flag from the item, reset its lsn to 0, and
+ * bump the AIL's generation count to indicate that the tree
+ * has changed.
+ *
+ * This function must be called with the AIL lock held.  The lock
+ * is dropped before returning, so the caller must pass in the
+ * cookie returned by AIL_LOCK.
+ */
+void
+xfs_trans_delete_ail(
+	xfs_mount_t	*mp,
+	xfs_log_item_t	*lip,
+	unsigned long	s)
+{
+	xfs_ail_entry_t		*ailp;
+	xfs_log_item_t		*dlip;
+	xfs_log_item_t		*mlip;
+
+	if (lip->li_flags & XFS_LI_IN_AIL) {
+		ailp = &(mp->m_ail);
+		mlip = xfs_ail_min(ailp);
+		dlip = xfs_ail_delete(ailp, lip);
+		ASSERT(dlip == lip);
+
+
+		lip->li_flags &= ~XFS_LI_IN_AIL;
+		lip->li_lsn = 0;
+		mp->m_ail_gen++;
+
+		if (mlip == dlip) {
+			mlip = xfs_ail_min(&(mp->m_ail));
+			AIL_UNLOCK(mp, s);
+			xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0));
+		} else {
+			AIL_UNLOCK(mp, s);
+		}
+	}
+	else {
+		/*
+		 * If the file system is not being shutdown, we are in
+		 * serious trouble if we get to this stage.
+		 */
+		if (XFS_FORCED_SHUTDOWN(mp))
+			AIL_UNLOCK(mp, s);
+		else {
+			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+				"xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL");
+			xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+			AIL_UNLOCK(mp, s);
+		}
+	}
+}
+
+
+
+/*
+ * Return the item in the AIL with the smallest lsn.
+ * Return the current tree generation number for use
+ * in calls to xfs_trans_next_ail().
+ */
+xfs_log_item_t *
+xfs_trans_first_ail(
+	xfs_mount_t	*mp,
+	int		*gen)
+{
+	xfs_log_item_t	*lip;
+
+	lip = xfs_ail_min(&(mp->m_ail));
+	*gen = (int)mp->m_ail_gen;
+
+	return (lip);
+}
+
+/*
+ * If the generation count of the tree has not changed since the
+ * caller last took something from the AIL, then return the elmt
+ * in the tree which follows the one given.  If the count has changed,
+ * then return the minimum elmt of the AIL and bump the restarts counter
+ * if one is given.
+ */
+xfs_log_item_t *
+xfs_trans_next_ail(
+	xfs_mount_t	*mp,
+	xfs_log_item_t	*lip,
+	int		*gen,
+	int		*restarts)
+{
+	xfs_log_item_t	*nlip;
+
+	ASSERT(mp && lip && gen);
+	if (mp->m_ail_gen == *gen) {
+		nlip = xfs_ail_next(&(mp->m_ail), lip);
+	} else {
+		nlip = xfs_ail_min(&(mp->m_ail));
+		*gen = (int)mp->m_ail_gen;
+		if (restarts != NULL) {
+			XFS_STATS_INC(xs_push_ail_restarts);
+			(*restarts)++;
+		}
+	}
+
+	return (nlip);
+}
+
+
+/*
+ * The active item list (AIL) is a doubly linked list of log
+ * items sorted by ascending lsn.  The base of the list is
+ * a forw/back pointer pair embedded in the xfs mount structure.
+ * The base is initialized with both pointers pointing to the
+ * base.  This case always needs to be distinguished, because
+ * the base has no lsn to look at.  We almost always insert
+ * at the end of the list, so on inserts we search from the
+ * end of the list to find where the new item belongs.
+ */
+
+/*
+ * Initialize the doubly linked list to point only to itself.
+ */
+void
+xfs_trans_ail_init(
+	xfs_mount_t	*mp)
+{
+	mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail);
+	mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail);
+}
+
+/*
+ * Insert the given log item into the AIL.
+ * We almost always insert at the end of the list, so on inserts
+ * we search from the end of the list to find where the
+ * new item belongs.
+ */
+STATIC void
+xfs_ail_insert(
+	xfs_ail_entry_t	*base,
+	xfs_log_item_t	*lip)
+/* ARGSUSED */
+{
+	xfs_log_item_t	*next_lip;
+
+	/*
+	 * If the list is empty, just insert the item.
+	 */
+	if (base->ail_back == (xfs_log_item_t*)base) {
+		base->ail_forw = lip;
+		base->ail_back = lip;
+		lip->li_ail.ail_forw = (xfs_log_item_t*)base;
+		lip->li_ail.ail_back = (xfs_log_item_t*)base;
+		return;
+	}
+
+	next_lip = base->ail_back;
+	while ((next_lip != (xfs_log_item_t*)base) &&
+	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) {
+		next_lip = next_lip->li_ail.ail_back;
+	}
+	ASSERT((next_lip == (xfs_log_item_t*)base) ||
+	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
+	lip->li_ail.ail_forw = next_lip->li_ail.ail_forw;
+	lip->li_ail.ail_back = next_lip;
+	next_lip->li_ail.ail_forw = lip;
+	lip->li_ail.ail_forw->li_ail.ail_back = lip;
+
+	xfs_ail_check(base);
+	return;
+}
+
+/*
+ * Delete the given item from the AIL.  Return a pointer to the item.
+ */
+/*ARGSUSED*/
+STATIC xfs_log_item_t *
+xfs_ail_delete(
+	xfs_ail_entry_t	*base,
+	xfs_log_item_t	*lip)
+/* ARGSUSED */
+{
+	lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back;
+	lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw;
+	lip->li_ail.ail_forw = NULL;
+	lip->li_ail.ail_back = NULL;
+
+	xfs_ail_check(base);
+	return lip;
+}
+
+/*
+ * Return a pointer to the first item in the AIL.
+ * If the AIL is empty, then return NULL.
+ */
+STATIC xfs_log_item_t *
+xfs_ail_min(
+	xfs_ail_entry_t	*base)
+/* ARGSUSED */
+{
+	register xfs_log_item_t *forw = base->ail_forw;
+	if (forw == (xfs_log_item_t*)base) {
+		return NULL;
+	}
+	return forw;
+}
+
+/*
+ * Return a pointer to the item which follows
+ * the given item in the AIL.  If the given item
+ * is the last item in the list, then return NULL.
+ */
+STATIC xfs_log_item_t *
+xfs_ail_next(
+	xfs_ail_entry_t	*base,
+	xfs_log_item_t	*lip)
+/* ARGSUSED */
+{
+	if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) {
+		return NULL;
+	}
+	return lip->li_ail.ail_forw;
+
+}
+
+#ifdef DEBUG
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+	xfs_ail_entry_t *base)
+{
+	xfs_log_item_t	*lip;
+	xfs_log_item_t	*prev_lip;
+
+	lip = base->ail_forw;
+	if (lip == (xfs_log_item_t*)base) {
+		/*
+		 * Make sure the pointers are correct when the list
+		 * is empty.
+		 */
+		ASSERT(base->ail_back == (xfs_log_item_t*)base);
+		return;
+	}
+
+	/*
+	 * Walk the list checking forward and backward pointers,
+	 * lsn ordering, and that every entry has the XFS_LI_IN_AIL
+	 * flag set.
+	 */
+	prev_lip = (xfs_log_item_t*)base;
+	while (lip != (xfs_log_item_t*)base) {
+		if (prev_lip != (xfs_log_item_t*)base) {
+			ASSERT(prev_lip->li_ail.ail_forw == lip);
+			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+		}
+		ASSERT(lip->li_ail.ail_back == prev_lip);
+		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+		prev_lip = lip;
+		lip = lip->li_ail.ail_forw;
+	}
+	ASSERT(lip == (xfs_log_item_t*)base);
+	ASSERT(base->ail_back == prev_lip);
+}
+#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
new file mode 100644
index 0000000..a9682b9
--- /dev/null
+++ b/fs/xfs/xfs_trans_buf.c
@@ -0,0 +1,1093 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+
+
+STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
+		xfs_daddr_t, int);
+STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
+		xfs_daddr_t, int);
+
+
+/*
+ * Get and lock the buffer for the caller if it is not already
+ * locked within the given transaction.  If it is already locked
+ * within the transaction, just increment its lock recursion count
+ * and return a pointer to it.
+ *
+ * Use the fast path function xfs_trans_buf_item_match() or the buffer
+ * cache routine incore_match() to find the buffer
+ * if it is already owned by this transaction.
+ *
+ * If we don't already own the buffer, use get_buf() to get it.
+ * If it doesn't yet have an associated xfs_buf_log_item structure,
+ * then allocate one and add the item to this transaction.
+ *
+ * If the transaction pointer is NULL, make this just a normal
+ * get_buf() call.
+ */
+xfs_buf_t *
+xfs_trans_get_buf(xfs_trans_t	*tp,
+		  xfs_buftarg_t	*target_dev,
+		  xfs_daddr_t	blkno,
+		  int		len,
+		  uint		flags)
+{
+	xfs_buf_t		*bp;
+	xfs_buf_log_item_t	*bip;
+
+	if (flags == 0)
+		flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+
+	/*
+	 * Default to a normal get_buf() call if the tp is NULL.
+	 */
+	if (tp == NULL) {
+		bp = xfs_buf_get_flags(target_dev, blkno, len,
+							flags | BUF_BUSY);
+		return(bp);
+	}
+
+	/*
+	 * If we find the buffer in the cache with this transaction
+	 * pointer in its b_fsprivate2 field, then we know we already
+	 * have it locked.  In this case we just increment the lock
+	 * recursion count and return the buffer to the caller.
+	 */
+	if (tp->t_items.lic_next == NULL) {
+		bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
+	} else {
+		bp  = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
+	}
+	if (bp != NULL) {
+		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+		if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
+			xfs_buftrace("TRANS GET RECUR SHUT", bp);
+			XFS_BUF_SUPER_STALE(bp);
+		}
+		/*
+		 * If the buffer is stale then it was binval'ed
+		 * since last read.  This doesn't matter since the
+		 * caller isn't allowed to use the data anyway.
+		 */
+		else if (XFS_BUF_ISSTALE(bp)) {
+			xfs_buftrace("TRANS GET RECUR STALE", bp);
+			ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
+		}
+		ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+		ASSERT(bip != NULL);
+		ASSERT(atomic_read(&bip->bli_refcount) > 0);
+		bip->bli_recur++;
+		xfs_buftrace("TRANS GET RECUR", bp);
+		xfs_buf_item_trace("GET RECUR", bip);
+		return (bp);
+	}
+
+	/*
+	 * We always specify the BUF_BUSY flag within a transaction so
+	 * that get_buf does not try to push out a delayed write buffer
+	 * which might cause another transaction to take place (if the
+	 * buffer was delayed alloc).  Such recursive transactions can
+	 * easily deadlock with our current transaction as well as cause
+	 * us to run out of stack space.
+	 */
+	bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY);
+	if (bp == NULL) {
+		return NULL;
+	}
+
+	ASSERT(!XFS_BUF_GETERROR(bp));
+
+	/*
+	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
+	 * it doesn't have one yet, then allocate one and initialize it.
+	 * The checks to see if one is there are in xfs_buf_item_init().
+	 */
+	xfs_buf_item_init(bp, tp->t_mountp);
+
+	/*
+	 * Set the recursion count for the buffer within this transaction
+	 * to 0.
+	 */
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
+	bip->bli_recur = 0;
+
+	/*
+	 * Take a reference for this transaction on the buf item.
+	 */
+	atomic_inc(&bip->bli_refcount);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
+
+	/*
+	 * Initialize b_fsprivate2 so we can find it with incore_match()
+	 * above.
+	 */
+	XFS_BUF_SET_FSPRIVATE2(bp, tp);
+
+	xfs_buftrace("TRANS GET", bp);
+	xfs_buf_item_trace("GET", bip);
+	return (bp);
+}
+
+/*
+ * Get and lock the superblock buffer of this file system for the
+ * given transaction.
+ *
+ * We don't need to use incore_match() here, because the superblock
+ * buffer is a private buffer which we keep a pointer to in the
+ * mount structure.
+ */
+xfs_buf_t *
+xfs_trans_getsb(xfs_trans_t	*tp,
+		struct xfs_mount *mp,
+		int		flags)
+{
+	xfs_buf_t		*bp;
+	xfs_buf_log_item_t	*bip;
+
+	/*
+	 * Default to just trying to lock the superblock buffer
+	 * if tp is NULL.
+	 */
+	if (tp == NULL) {
+		return (xfs_getsb(mp, flags));
+	}
+
+	/*
+	 * If the superblock buffer already has this transaction
+	 * pointer in its b_fsprivate2 field, then we know we already
+	 * have it locked.  In this case we just increment the lock
+	 * recursion count and return the buffer to the caller.
+	 */
+	bp = mp->m_sb_bp;
+	if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
+		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+		ASSERT(bip != NULL);
+		ASSERT(atomic_read(&bip->bli_refcount) > 0);
+		bip->bli_recur++;
+		xfs_buf_item_trace("GETSB RECUR", bip);
+		return (bp);
+	}
+
+	bp = xfs_getsb(mp, flags);
+	if (bp == NULL) {
+		return NULL;
+	}
+
+	/*
+	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
+	 * it doesn't have one yet, then allocate one and initialize it.
+	 * The checks to see if one is there are in xfs_buf_item_init().
+	 */
+	xfs_buf_item_init(bp, mp);
+
+	/*
+	 * Set the recursion count for the buffer within this transaction
+	 * to 0.
+	 */
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
+	bip->bli_recur = 0;
+
+	/*
+	 * Take a reference for this transaction on the buf item.
+	 */
+	atomic_inc(&bip->bli_refcount);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
+
+	/*
+	 * Initialize b_fsprivate2 so we can find it with incore_match()
+	 * above.
+	 */
+	XFS_BUF_SET_FSPRIVATE2(bp, tp);
+
+	xfs_buf_item_trace("GETSB", bip);
+	return (bp);
+}
+
+#ifdef DEBUG
+xfs_buftarg_t *xfs_error_target;
+int	xfs_do_error;
+int	xfs_req_num;
+int	xfs_error_mod = 33;
+#endif
+
+/*
+ * Get and lock the buffer for the caller if it is not already
+ * locked within the given transaction.  If it has not yet been
+ * read in, read it from disk. If it is already locked
+ * within the transaction and already read in, just increment its
+ * lock recursion count and return a pointer to it.
+ *
+ * Use the fast path function xfs_trans_buf_item_match() or the buffer
+ * cache routine incore_match() to find the buffer
+ * if it is already owned by this transaction.
+ *
+ * If we don't already own the buffer, use read_buf() to get it.
+ * If it doesn't yet have an associated xfs_buf_log_item structure,
+ * then allocate one and add the item to this transaction.
+ *
+ * If the transaction pointer is NULL, make this just a normal
+ * read_buf() call.
+ */
+int
+xfs_trans_read_buf(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_buftarg_t	*target,
+	xfs_daddr_t	blkno,
+	int		len,
+	uint		flags,
+	xfs_buf_t	**bpp)
+{
+	xfs_buf_t		*bp;
+	xfs_buf_log_item_t	*bip;
+	int			error;
+
+	if (flags == 0)
+		flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+
+	/*
+	 * Default to a normal get_buf() call if the tp is NULL.
+	 */
+	if (tp == NULL) {
+		bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
+		if (!bp)
+			return XFS_ERROR(ENOMEM);
+
+		if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) {
+			xfs_ioerror_alert("xfs_trans_read_buf", mp,
+					  bp, blkno);
+			error = XFS_BUF_GETERROR(bp);
+			xfs_buf_relse(bp);
+			return error;
+		}
+#ifdef DEBUG
+		if (xfs_do_error && (bp != NULL)) {
+			if (xfs_error_target == target) {
+				if (((xfs_req_num++) % xfs_error_mod) == 0) {
+					xfs_buf_relse(bp);
+					printk("Returning error!\n");
+					return XFS_ERROR(EIO);
+				}
+			}
+		}
+#endif
+		if (XFS_FORCED_SHUTDOWN(mp))
+			goto shutdown_abort;
+		*bpp = bp;
+		return 0;
+	}
+
+	/*
+	 * If we find the buffer in the cache with this transaction
+	 * pointer in its b_fsprivate2 field, then we know we already
+	 * have it locked.  If it is already read in we just increment
+	 * the lock recursion count and return the buffer to the caller.
+	 * If the buffer is not yet read in, then we read it in, increment
+	 * the lock recursion count, and return it to the caller.
+	 */
+	if (tp->t_items.lic_next == NULL) {
+		bp = xfs_trans_buf_item_match(tp, target, blkno, len);
+	} else {
+		bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
+	}
+	if (bp != NULL) {
+		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+		ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+		ASSERT((XFS_BUF_ISERROR(bp)) == 0);
+		if (!(XFS_BUF_ISDONE(bp))) {
+			xfs_buftrace("READ_BUF_INCORE !DONE", bp);
+			ASSERT(!XFS_BUF_ISASYNC(bp));
+			XFS_BUF_READ(bp);
+			xfsbdstrat(tp->t_mountp, bp);
+			xfs_iowait(bp);
+			if (XFS_BUF_GETERROR(bp) != 0) {
+				xfs_ioerror_alert("xfs_trans_read_buf", mp,
+						  bp, blkno);
+				error = XFS_BUF_GETERROR(bp);
+				xfs_buf_relse(bp);
+				/*
+				 * We can gracefully recover from most
+				 * read errors. Ones we can't are those
+				 * that happen after the transaction's
+				 * already dirty.
+				 */
+				if (tp->t_flags & XFS_TRANS_DIRTY)
+					xfs_force_shutdown(tp->t_mountp,
+							   XFS_METADATA_IO_ERROR);
+				return error;
+			}
+		}
+		/*
+		 * We never locked this buf ourselves, so we shouldn't
+		 * brelse it either. Just get out.
+		 */
+		if (XFS_FORCED_SHUTDOWN(mp)) {
+			xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
+			*bpp = NULL;
+			return XFS_ERROR(EIO);
+		}
+
+
+		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+		bip->bli_recur++;
+
+		ASSERT(atomic_read(&bip->bli_refcount) > 0);
+		xfs_buf_item_trace("READ RECUR", bip);
+		*bpp = bp;
+		return 0;
+	}
+
+	/*
+	 * We always specify the BUF_BUSY flag within a transaction so
+	 * that get_buf does not try to push out a delayed write buffer
+	 * which might cause another transaction to take place (if the
+	 * buffer was delayed alloc).  Such recursive transactions can
+	 * easily deadlock with our current transaction as well as cause
+	 * us to run out of stack space.
+	 */
+	bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
+	if (bp == NULL) {
+		*bpp = NULL;
+		return 0;
+	}
+	if (XFS_BUF_GETERROR(bp) != 0) {
+	    XFS_BUF_SUPER_STALE(bp);
+		xfs_buftrace("READ ERROR", bp);
+		error = XFS_BUF_GETERROR(bp);
+
+		xfs_ioerror_alert("xfs_trans_read_buf", mp,
+				  bp, blkno);
+		if (tp->t_flags & XFS_TRANS_DIRTY)
+			xfs_force_shutdown(tp->t_mountp, XFS_METADATA_IO_ERROR);
+		xfs_buf_relse(bp);
+		return error;
+	}
+#ifdef DEBUG
+	if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
+		if (xfs_error_target == target) {
+			if (((xfs_req_num++) % xfs_error_mod) == 0) {
+				xfs_force_shutdown(tp->t_mountp,
+						   XFS_METADATA_IO_ERROR);
+				xfs_buf_relse(bp);
+				printk("Returning error in trans!\n");
+				return XFS_ERROR(EIO);
+			}
+		}
+	}
+#endif
+	if (XFS_FORCED_SHUTDOWN(mp))
+		goto shutdown_abort;
+
+	/*
+	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
+	 * it doesn't have one yet, then allocate one and initialize it.
+	 * The checks to see if one is there are in xfs_buf_item_init().
+	 */
+	xfs_buf_item_init(bp, tp->t_mountp);
+
+	/*
+	 * Set the recursion count for the buffer within this transaction
+	 * to 0.
+	 */
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
+	bip->bli_recur = 0;
+
+	/*
+	 * Take a reference for this transaction on the buf item.
+	 */
+	atomic_inc(&bip->bli_refcount);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
+
+	/*
+	 * Initialize b_fsprivate2 so we can find it with incore_match()
+	 * above.
+	 */
+	XFS_BUF_SET_FSPRIVATE2(bp, tp);
+
+	xfs_buftrace("TRANS READ", bp);
+	xfs_buf_item_trace("READ", bip);
+	*bpp = bp;
+	return 0;
+
+shutdown_abort:
+	/*
+	 * the theory here is that buffer is good but we're
+	 * bailing out because the filesystem is being forcibly
+	 * shut down.  So we should leave the b_flags alone since
+	 * the buffer's not staled and just get out.
+	 */
+#if defined(DEBUG)
+	if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
+		cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
+#endif
+	ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
+						(XFS_B_STALE|XFS_B_DELWRI));
+
+	xfs_buftrace("READ_BUF XFSSHUTDN", bp);
+	xfs_buf_relse(bp);
+	*bpp = NULL;
+	return XFS_ERROR(EIO);
+}
+
+
+/*
+ * Release the buffer bp which was previously acquired with one of the
+ * xfs_trans_... buffer allocation routines if the buffer has not
+ * been modified within this transaction.  If the buffer is modified
+ * within this transaction, do decrement the recursion count but do
+ * not release the buffer even if the count goes to 0.  If the buffer is not
+ * modified within the transaction, decrement the recursion count and
+ * release the buffer if the recursion count goes to 0.
+ *
+ * If the buffer is to be released and it was not modified before
+ * this transaction began, then free the buf_log_item associated with it.
+ *
+ * If the transaction pointer is NULL, make this just a normal
+ * brelse() call.
+ */
+void
+xfs_trans_brelse(xfs_trans_t	*tp,
+		 xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+	xfs_log_item_t		*lip;
+	xfs_log_item_desc_t	*lidp;
+
+	/*
+	 * Default to a normal brelse() call if the tp is NULL.
+	 */
+	if (tp == NULL) {
+		ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
+		/*
+		 * If there's a buf log item attached to the buffer,
+		 * then let the AIL know that the buffer is being
+		 * unlocked.
+		 */
+		if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
+			lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+			if (lip->li_type == XFS_LI_BUF) {
+				bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
+				xfs_trans_unlocked_item(
+						bip->bli_item.li_mountp,
+						lip);
+			}
+		}
+		xfs_buf_relse(bp);
+		return;
+	}
+
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	/*
+	 * Find the item descriptor pointing to this buffer's
+	 * log item.  It must be there.
+	 */
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
+	ASSERT(lidp != NULL);
+
+	/*
+	 * If the release is just for a recursive lock,
+	 * then decrement the count and return.
+	 */
+	if (bip->bli_recur > 0) {
+		bip->bli_recur--;
+		xfs_buf_item_trace("RELSE RECUR", bip);
+		return;
+	}
+
+	/*
+	 * If the buffer is dirty within this transaction, we can't
+	 * release it until we commit.
+	 */
+	if (lidp->lid_flags & XFS_LID_DIRTY) {
+		xfs_buf_item_trace("RELSE DIRTY", bip);
+		return;
+	}
+
+	/*
+	 * If the buffer has been invalidated, then we can't release
+	 * it until the transaction commits to disk unless it is re-dirtied
+	 * as part of this transaction.  This prevents us from pulling
+	 * the item from the AIL before we should.
+	 */
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		xfs_buf_item_trace("RELSE STALE", bip);
+		return;
+	}
+
+	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
+	xfs_buf_item_trace("RELSE", bip);
+
+	/*
+	 * Free up the log item descriptor tracking the released item.
+	 */
+	xfs_trans_free_item(tp, lidp);
+
+	/*
+	 * Clear the hold flag in the buf log item if it is set.
+	 * We wouldn't want the next user of the buffer to
+	 * get confused.
+	 */
+	if (bip->bli_flags & XFS_BLI_HOLD) {
+		bip->bli_flags &= ~XFS_BLI_HOLD;
+	}
+
+	/*
+	 * Drop our reference to the buf log item.
+	 */
+	atomic_dec(&bip->bli_refcount);
+
+	/*
+	 * If the buf item is not tracking data in the log, then
+	 * we must free it before releasing the buffer back to the
+	 * free pool.  Before releasing the buffer to the free pool,
+	 * clear the transaction pointer in b_fsprivate2 to dissolve
+	 * its relation to this transaction.
+	 */
+	if (!xfs_buf_item_dirty(bip)) {
+/***
+		ASSERT(bp->b_pincount == 0);
+***/
+		ASSERT(atomic_read(&bip->bli_refcount) == 0);
+		ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
+		ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
+		xfs_buf_item_relse(bp);
+		bip = NULL;
+	}
+	XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+
+	/*
+	 * If we've still got a buf log item on the buffer, then
+	 * tell the AIL that the buffer is being unlocked.
+	 */
+	if (bip != NULL) {
+		xfs_trans_unlocked_item(bip->bli_item.li_mountp,
+					(xfs_log_item_t*)bip);
+	}
+
+	xfs_buf_relse(bp);
+	return;
+}
+
+/*
+ * Add the locked buffer to the transaction.
+ * The buffer must be locked, and it cannot be associated with any
+ * transaction.
+ *
+ * If the buffer does not yet have a buf log item associated with it,
+ * then allocate one for it.  Then add the buf item to the transaction.
+ */
+void
+xfs_trans_bjoin(xfs_trans_t	*tp,
+		xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
+
+	/*
+	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
+	 * it doesn't have one yet, then allocate one and initialize it.
+	 * The checks to see if one is there are in xfs_buf_item_init().
+	 */
+	xfs_buf_item_init(bp, tp->t_mountp);
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
+
+	/*
+	 * Take a reference for this transaction on the buf item.
+	 */
+	atomic_inc(&bip->bli_refcount);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
+
+	/*
+	 * Initialize b_fsprivate2 so we can find it with incore_match()
+	 * in xfs_trans_get_buf() and friends above.
+	 */
+	XFS_BUF_SET_FSPRIVATE2(bp, tp);
+
+	xfs_buf_item_trace("BJOIN", bip);
+}
+
+/*
+ * Mark the buffer as not needing to be unlocked when the buf item's
+ * IOP_UNLOCK() routine is called.  The buffer must already be locked
+ * and associated with the given transaction.
+ */
+/* ARGSUSED */
+void
+xfs_trans_bhold(xfs_trans_t	*tp,
+		xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	bip->bli_flags |= XFS_BLI_HOLD;
+	xfs_buf_item_trace("BHOLD", bip);
+}
+
+/*
+ * This is called to mark bytes first through last inclusive of the given
+ * buffer as needing to be logged when the transaction is committed.
+ * The buffer must already be associated with the given transaction.
+ *
+ * First and last are numbers relative to the beginning of this buffer,
+ * so the first byte in the buffer is numbered 0 regardless of the
+ * value of b_blkno.
+ */
+void
+xfs_trans_log_buf(xfs_trans_t	*tp,
+		  xfs_buf_t	*bp,
+		  uint		first,
+		  uint		last)
+{
+	xfs_buf_log_item_t	*bip;
+	xfs_log_item_desc_t	*lidp;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+	ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
+	ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
+	       (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
+
+	/*
+	 * Mark the buffer as needing to be written out eventually,
+	 * and set its iodone function to remove the buffer's buf log
+	 * item from the AIL and free it when the buffer is flushed
+	 * to disk.  See xfs_buf_attach_iodone() for more details
+	 * on li_cb and xfs_buf_iodone_callbacks().
+	 * If we end up aborting this transaction, we trap this buffer
+	 * inside the b_bdstrat callback so that this won't get written to
+	 * disk.
+	 */
+	XFS_BUF_DELAYWRITE(bp);
+	XFS_BUF_DONE(bp);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
+	bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
+
+	/*
+	 * If we invalidated the buffer within this transaction, then
+	 * cancel the invalidation now that we're dirtying the buffer
+	 * again.  There are no races with the code in xfs_buf_item_unpin(),
+	 * because we have a reference to the buffer this entire time.
+	 */
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		xfs_buf_item_trace("BLOG UNSTALE", bip);
+		bip->bli_flags &= ~XFS_BLI_STALE;
+		ASSERT(XFS_BUF_ISSTALE(bp));
+		XFS_BUF_UNSTALE(bp);
+		bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL;
+	}
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+	lidp->lid_flags &= ~XFS_LID_BUF_STALE;
+	bip->bli_flags |= XFS_BLI_LOGGED;
+	xfs_buf_item_log(bip, first, last);
+	xfs_buf_item_trace("BLOG", bip);
+}
+
+
+/*
+ * This called to invalidate a buffer that is being used within
+ * a transaction.  Typically this is because the blocks in the
+ * buffer are being freed, so we need to prevent it from being
+ * written out when we're done.  Allowing it to be written again
+ * might overwrite data in the free blocks if they are reallocated
+ * to a file.
+ *
+ * We prevent the buffer from being written out by clearing the
+ * B_DELWRI flag.  We can't always
+ * get rid of the buf log item at this point, though, because
+ * the buffer may still be pinned by another transaction.  If that
+ * is the case, then we'll wait until the buffer is committed to
+ * disk for the last time (we can tell by the ref count) and
+ * free it in xfs_buf_item_unpin().  Until it is cleaned up we
+ * will keep the buffer locked so that the buffer and buf log item
+ * are not reused.
+ */
+void
+xfs_trans_binval(
+	xfs_trans_t	*tp,
+	xfs_buf_t	*bp)
+{
+	xfs_log_item_desc_t	*lidp;
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
+	ASSERT(lidp != NULL);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	if (bip->bli_flags & XFS_BLI_STALE) {
+		/*
+		 * If the buffer is already invalidated, then
+		 * just return.
+		 */
+		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
+		ASSERT(XFS_BUF_ISSTALE(bp));
+		ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
+		ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF));
+		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
+		ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
+		ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
+		xfs_buftrace("XFS_BINVAL RECUR", bp);
+		xfs_buf_item_trace("BINVAL RECUR", bip);
+		return;
+	}
+
+	/*
+	 * Clear the dirty bit in the buffer and set the STALE flag
+	 * in the buf log item.  The STALE flag will be used in
+	 * xfs_buf_item_unpin() to determine if it should clean up
+	 * when the last reference to the buf item is given up.
+	 * We set the XFS_BLI_CANCEL flag in the buf log format structure
+	 * and log the buf item.  This will be used at recovery time
+	 * to determine that copies of the buffer in the log before
+	 * this should not be replayed.
+	 * We mark the item descriptor and the transaction dirty so
+	 * that we'll hold the buffer until after the commit.
+	 *
+	 * Since we're invalidating the buffer, we also clear the state
+	 * about which parts of the buffer have been logged.  We also
+	 * clear the flag indicating that this is an inode buffer since
+	 * the data in the buffer will no longer be valid.
+	 *
+	 * We set the stale bit in the buffer as well since we're getting
+	 * rid of it.
+	 */
+	XFS_BUF_UNDELAYWRITE(bp);
+	XFS_BUF_STALE(bp);
+	bip->bli_flags |= XFS_BLI_STALE;
+	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
+	bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
+	bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
+	memset((char *)(bip->bli_format.blf_data_map), 0,
+	      (bip->bli_format.blf_map_size * sizeof(uint)));
+	lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	xfs_buftrace("XFS_BINVAL", bp);
+	xfs_buf_item_trace("BINVAL", bip);
+}
+
+/*
+ * This call is used to indicate that the buffer contains on-disk
+ * inodes which must be handled specially during recovery.  They
+ * require special handling because only the di_next_unlinked from
+ * the inodes in the buffer should be recovered.  The rest of the
+ * data in the buffer is logged via the inodes themselves.
+ *
+ * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log
+ * format structure so that we'll know what to do at recovery time.
+ */
+/* ARGSUSED */
+void
+xfs_trans_inode_buf(
+	xfs_trans_t	*tp,
+	xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF;
+}
+
+/*
+ * This call is used to indicate that the buffer is going to
+ * be staled and was an inode buffer. This means it gets
+ * special processing during unpin - where any inodes 
+ * associated with the buffer should be removed from ail.
+ * There is also special processing during recovery,
+ * any replay of the inodes in the buffer needs to be
+ * prevented as the buffer may have been reused.
+ */
+void
+xfs_trans_stale_inode_buf(
+	xfs_trans_t	*tp,
+	xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	bip->bli_flags |= XFS_BLI_STALE_INODE;
+	bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
+		xfs_buf_iodone;
+}
+
+
+
+/*
+ * Mark the buffer as being one which contains newly allocated
+ * inodes.  We need to make sure that even if this buffer is
+ * relogged as an 'inode buf' we still recover all of the inode
+ * images in the face of a crash.  This works in coordination with
+ * xfs_buf_item_committed() to ensure that the buffer remains in the
+ * AIL at its original location even after it has been relogged.
+ */
+/* ARGSUSED */
+void
+xfs_trans_inode_alloc_buf(
+	xfs_trans_t	*tp,
+	xfs_buf_t	*bp)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
+}
+
+
+/*
+ * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
+ * dquots. However, unlike in inode buffer recovery, dquot buffers get
+ * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
+ * The only thing that makes dquot buffers different from regular
+ * buffers is that we must not replay dquot bufs when recovering
+ * if a _corresponding_ quotaoff has happened. We also have to distinguish
+ * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
+ * can be turned off independently.
+ */
+/* ARGSUSED */
+void
+xfs_trans_dquot_buf(
+	xfs_trans_t	*tp,
+	xfs_buf_t	*bp,
+	uint		type)
+{
+	xfs_buf_log_item_t	*bip;
+
+	ASSERT(XFS_BUF_ISBUSY(bp));
+	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
+	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+	ASSERT(type == XFS_BLI_UDQUOT_BUF ||
+	       type == XFS_BLI_GDQUOT_BUF);
+
+	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
+	bip->bli_format.blf_flags |= type;
+}
+
+/*
+ * Check to see if a buffer matching the given parameters is already
+ * a part of the given transaction.  Only check the first, embedded
+ * chunk, since we don't want to spend all day scanning large transactions.
+ */
+STATIC xfs_buf_t *
+xfs_trans_buf_item_match(
+	xfs_trans_t	*tp,
+	xfs_buftarg_t	*target,
+	xfs_daddr_t	blkno,
+	int		len)
+{
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_desc_t	*lidp;
+	xfs_buf_log_item_t	*blip;
+	xfs_buf_t		*bp;
+	int			i;
+
+	bp = NULL;
+	len = BBTOB(len);
+	licp = &tp->t_items;
+	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+		for (i = 0; i < licp->lic_unused; i++) {
+			/*
+			 * Skip unoccupied slots.
+			 */
+			if (XFS_LIC_ISFREE(licp, i)) {
+				continue;
+			}
+
+			lidp = XFS_LIC_SLOT(licp, i);
+			blip = (xfs_buf_log_item_t *)lidp->lid_item;
+			if (blip->bli_item.li_type != XFS_LI_BUF) {
+				continue;
+			}
+
+			bp = blip->bli_buf;
+			if ((XFS_BUF_TARGET(bp) == target) &&
+			    (XFS_BUF_ADDR(bp) == blkno) &&
+			    (XFS_BUF_COUNT(bp) == len)) {
+				/*
+				 * We found it.  Break out and
+				 * return the pointer to the buffer.
+				 */
+				break;
+			} else {
+				bp = NULL;
+			}
+		}
+	}
+	return bp;
+}
+
+/*
+ * Check to see if a buffer matching the given parameters is already
+ * a part of the given transaction.  Check all the chunks, we
+ * want to be thorough.
+ */
+STATIC xfs_buf_t *
+xfs_trans_buf_item_match_all(
+	xfs_trans_t	*tp,
+	xfs_buftarg_t	*target,
+	xfs_daddr_t	blkno,
+	int		len)
+{
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_desc_t	*lidp;
+	xfs_buf_log_item_t	*blip;
+	xfs_buf_t		*bp;
+	int			i;
+
+	bp = NULL;
+	len = BBTOB(len);
+	for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
+		if (XFS_LIC_ARE_ALL_FREE(licp)) {
+			ASSERT(licp == &tp->t_items);
+			ASSERT(licp->lic_next == NULL);
+			return NULL;
+		}
+		for (i = 0; i < licp->lic_unused; i++) {
+			/*
+			 * Skip unoccupied slots.
+			 */
+			if (XFS_LIC_ISFREE(licp, i)) {
+				continue;
+			}
+
+			lidp = XFS_LIC_SLOT(licp, i);
+			blip = (xfs_buf_log_item_t *)lidp->lid_item;
+			if (blip->bli_item.li_type != XFS_LI_BUF) {
+				continue;
+			}
+
+			bp = blip->bli_buf;
+			if ((XFS_BUF_TARGET(bp) == target) &&
+			    (XFS_BUF_ADDR(bp) == blkno) &&
+			    (XFS_BUF_COUNT(bp) == len)) {
+				/*
+				 * We found it.  Break out and
+				 * return the pointer to the buffer.
+				 */
+				return bp;
+			}
+		}
+	}
+	return NULL;
+}
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
new file mode 100644
index 0000000..93259a1
--- /dev/null
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_extfree_item.h"
+
+/*
+ * This routine is called to allocate an "extent free intention"
+ * log item that will hold nextents worth of extents.  The
+ * caller must use all nextents extents, because we are not
+ * flexible about this at all.
+ */
+xfs_efi_log_item_t *
+xfs_trans_get_efi(xfs_trans_t	*tp,
+		  uint		nextents)
+{
+	xfs_efi_log_item_t	*efip;
+
+	ASSERT(tp != NULL);
+	ASSERT(nextents > 0);
+
+	efip = xfs_efi_init(tp->t_mountp, nextents);
+	ASSERT(efip != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip);
+
+	return (efip);
+}
+
+/*
+ * This routine is called to indicate that the described
+ * extent is to be logged as needing to be freed.  It should
+ * be called once for each extent to be freed.
+ */
+void
+xfs_trans_log_efi_extent(xfs_trans_t		*tp,
+			 xfs_efi_log_item_t	*efip,
+			 xfs_fsblock_t		start_block,
+			 xfs_extlen_t		ext_len)
+{
+	xfs_log_item_desc_t	*lidp;
+	uint			next_extent;
+	xfs_extent_t		*extp;
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip);
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+
+	next_extent = efip->efi_next_extent;
+	ASSERT(next_extent < efip->efi_format.efi_nextents);
+	extp = &(efip->efi_format.efi_extents[next_extent]);
+	extp->ext_start = start_block;
+	extp->ext_len = ext_len;
+	efip->efi_next_extent++;
+}
+
+
+/*
+ * This routine is called to allocate an "extent free done"
+ * log item that will hold nextents worth of extents.  The
+ * caller must use all nextents extents, because we are not
+ * flexible about this at all.
+ */
+xfs_efd_log_item_t *
+xfs_trans_get_efd(xfs_trans_t		*tp,
+		  xfs_efi_log_item_t	*efip,
+		  uint			nextents)
+{
+	xfs_efd_log_item_t	*efdp;
+
+	ASSERT(tp != NULL);
+	ASSERT(nextents > 0);
+
+	efdp = xfs_efd_init(tp->t_mountp, efip, nextents);
+	ASSERT(efdp != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp);
+
+	return (efdp);
+}
+
+/*
+ * This routine is called to indicate that the described
+ * extent is to be logged as having been freed.  It should
+ * be called once for each extent freed.
+ */
+void
+xfs_trans_log_efd_extent(xfs_trans_t		*tp,
+			 xfs_efd_log_item_t	*efdp,
+			 xfs_fsblock_t		start_block,
+			 xfs_extlen_t		ext_len)
+{
+	xfs_log_item_desc_t	*lidp;
+	uint			next_extent;
+	xfs_extent_t		*extp;
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp);
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+
+	next_extent = efdp->efd_next_extent;
+	ASSERT(next_extent < efdp->efd_format.efd_nextents);
+	extp = &(efdp->efd_format.efd_extents[next_extent]);
+	extp->ext_start = start_block;
+	extp->ext_len = ext_len;
+	efdp->efd_next_extent++;
+}
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
new file mode 100644
index 0000000..e2c3706f
--- /dev/null
+++ b/fs/xfs/xfs_trans_inode.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_trans_priv.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+
+#ifdef XFS_TRANS_DEBUG
+STATIC void
+xfs_trans_inode_broot_debug(
+	xfs_inode_t	*ip);
+#else
+#define	xfs_trans_inode_broot_debug(ip)
+#endif
+
+
+/*
+ * Get and lock the inode for the caller if it is not already
+ * locked within the given transaction.  If it is already locked
+ * within the transaction, just increment its lock recursion count
+ * and return a pointer to it.
+ *
+ * For an inode to be locked in a transaction, the inode lock, as
+ * opposed to the io lock, must be taken exclusively.  This ensures
+ * that the inode can be involved in only 1 transaction at a time.
+ * Lock recursion is handled on the io lock, but only for lock modes
+ * of equal or lesser strength.  That is, you can recur on the io lock
+ * held EXCL with a SHARED request but not vice versa.  Also, if
+ * the inode is already a part of the transaction then you cannot
+ * go from not holding the io lock to having it EXCL or SHARED.
+ *
+ * Use the inode cache routine xfs_inode_incore() to find the inode
+ * if it is already owned by this transaction.
+ *
+ * If we don't already own the inode, use xfs_iget() to get it.
+ * Since the inode log item structure is embedded in the incore
+ * inode structure and is initialized when the inode is brought
+ * into memory, there is nothing to do with it here.
+ *
+ * If the given transaction pointer is NULL, just call xfs_iget().
+ * This simplifies code which must handle both cases.
+ */
+int
+xfs_trans_iget(
+	xfs_mount_t	*mp,
+	xfs_trans_t	*tp,
+	xfs_ino_t	ino,
+	uint		flags,
+	uint		lock_flags,
+	xfs_inode_t	**ipp)
+{
+	int			error;
+	xfs_inode_t		*ip;
+	xfs_inode_log_item_t	*iip;
+
+	/*
+	 * If the transaction pointer is NULL, just call the normal
+	 * xfs_iget().
+	 */
+	if (tp == NULL)
+		return xfs_iget(mp, NULL, ino, flags, lock_flags, ipp, 0);
+
+	/*
+	 * If we find the inode in core with this transaction
+	 * pointer in its i_transp field, then we know we already
+	 * have it locked.  In this case we just increment the lock
+	 * recursion count and return the inode to the caller.
+	 * Assert that the inode is already locked in the mode requested
+	 * by the caller.  We cannot do lock promotions yet, so
+	 * die if someone gets this wrong.
+	 */
+	if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) {
+		/*
+		 * Make sure that the inode lock is held EXCL and
+		 * that the io lock is never upgraded when the inode
+		 * is already a part of the transaction.
+		 */
+		ASSERT(ip->i_itemp != NULL);
+		ASSERT(lock_flags & XFS_ILOCK_EXCL);
+		ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+		ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
+		       ismrlocked(&ip->i_iolock, MR_UPDATE));
+		ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
+		       (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL));
+		ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
+		       ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS)));
+		ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
+		       (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY));
+
+		if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
+			ip->i_itemp->ili_iolock_recur++;
+		}
+		if (lock_flags & XFS_ILOCK_EXCL) {
+			ip->i_itemp->ili_ilock_recur++;
+		}
+		*ipp = ip;
+		return 0;
+	}
+
+	ASSERT(lock_flags & XFS_ILOCK_EXCL);
+	error = xfs_iget(tp->t_mountp, tp, ino, flags, lock_flags, &ip, 0);
+	if (error) {
+		return error;
+	}
+	ASSERT(ip != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	if (ip->i_itemp == NULL)
+		xfs_inode_item_init(ip, mp);
+	iip = ip->i_itemp;
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip));
+
+	xfs_trans_inode_broot_debug(ip);
+
+	/*
+	 * If the IO lock has been acquired, mark that in
+	 * the inode log item so we'll know to unlock it
+	 * when the transaction commits.
+	 */
+	ASSERT(iip->ili_flags == 0);
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
+	} else if (lock_flags & XFS_IOLOCK_SHARED) {
+		iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
+	}
+
+	/*
+	 * Initialize i_transp so we can find it with xfs_inode_incore()
+	 * above.
+	 */
+	ip->i_transp = tp;
+
+	*ipp = ip;
+	return 0;
+}
+
+/*
+ * Add the locked inode to the transaction.
+ * The inode must be locked, and it cannot be associated with any
+ * transaction.  The caller must specify the locks already held
+ * on the inode.
+ */
+void
+xfs_trans_ijoin(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	uint		lock_flags)
+{
+	xfs_inode_log_item_t	*iip;
+
+	ASSERT(ip->i_transp == NULL);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+	ASSERT(lock_flags & XFS_ILOCK_EXCL);
+	if (ip->i_itemp == NULL)
+		xfs_inode_item_init(ip, ip->i_mount);
+	iip = ip->i_itemp;
+	ASSERT(iip->ili_flags == 0);
+	ASSERT(iip->ili_ilock_recur == 0);
+	ASSERT(iip->ili_iolock_recur == 0);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip));
+
+	xfs_trans_inode_broot_debug(ip);
+
+	/*
+	 * If the IO lock is already held, mark that in the inode log item.
+	 */
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
+	} else if (lock_flags & XFS_IOLOCK_SHARED) {
+		iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
+	}
+
+	/*
+	 * Initialize i_transp so we can find it with xfs_inode_incore()
+	 * in xfs_trans_iget() above.
+	 */
+	ip->i_transp = tp;
+}
+
+
+
+/*
+ * Mark the inode as not needing to be unlocked when the inode item's
+ * IOP_UNLOCK() routine is called.  The inode must already be locked
+ * and associated with the given transaction.
+ */
+/*ARGSUSED*/
+void
+xfs_trans_ihold(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip)
+{
+	ASSERT(ip->i_transp == tp);
+	ASSERT(ip->i_itemp != NULL);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+
+	ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
+}
+
+/*
+ * Cancel the previous inode hold request made on this inode
+ * for this transaction.
+ */
+/*ARGSUSED*/
+void
+xfs_trans_ihold_release(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip)
+{
+	ASSERT(ip->i_transp == tp);
+	ASSERT(ip->i_itemp != NULL);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
+
+	ip->i_itemp->ili_flags &= ~XFS_ILI_HOLD;
+}
+
+
+/*
+ * This is called to mark the fields indicated in fieldmask as needing
+ * to be logged when the transaction is committed.  The inode must
+ * already be associated with the given transaction.
+ *
+ * The values for fieldmask are defined in xfs_inode_item.h.  We always
+ * log all of the core inode if any of it has changed, and we always log
+ * all of the inline data/extents/b-tree root if any of them has changed.
+ */
+void
+xfs_trans_log_inode(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip,
+	uint		flags)
+{
+	xfs_log_item_desc_t	*lidp;
+
+	ASSERT(ip->i_transp == tp);
+	ASSERT(ip->i_itemp != NULL);
+	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+
+	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
+	ASSERT(lidp != NULL);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	lidp->lid_flags |= XFS_LID_DIRTY;
+
+	/*
+	 * Always OR in the bits from the ili_last_fields field.
+	 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
+	 * routines in the eventual clearing of the ilf_fields bits.
+	 * See the big comment in xfs_iflush() for an explanation of
+	 * this coorination mechanism.
+	 */
+	flags |= ip->i_itemp->ili_last_fields;
+	ip->i_itemp->ili_format.ilf_fields |= flags;
+}
+
+#ifdef XFS_TRANS_DEBUG
+/*
+ * Keep track of the state of the inode btree root to make sure we
+ * log it properly.
+ */
+STATIC void
+xfs_trans_inode_broot_debug(
+	xfs_inode_t	*ip)
+{
+	xfs_inode_log_item_t	*iip;
+
+	ASSERT(ip->i_itemp != NULL);
+	iip = ip->i_itemp;
+	if (iip->ili_root_size != 0) {
+		ASSERT(iip->ili_orig_root != NULL);
+		kmem_free(iip->ili_orig_root, iip->ili_root_size);
+		iip->ili_root_size = 0;
+		iip->ili_orig_root = NULL;
+	}
+	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+		ASSERT((ip->i_df.if_broot != NULL) &&
+		       (ip->i_df.if_broot_bytes > 0));
+		iip->ili_root_size = ip->i_df.if_broot_bytes;
+		iip->ili_orig_root =
+			(char*)kmem_alloc(iip->ili_root_size, KM_SLEEP);
+		memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot),
+		      iip->ili_root_size);
+	}
+}
+#endif
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
new file mode 100644
index 0000000..1b8a756
--- /dev/null
+++ b/fs/xfs/xfs_trans_item.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+
+STATIC int	xfs_trans_unlock_chunk(xfs_log_item_chunk_t *,
+					int, int, xfs_lsn_t);
+
+/*
+ * This is called to add the given log item to the transaction's
+ * list of log items.  It must find a free log item descriptor
+ * or allocate a new one and add the item to that descriptor.
+ * The function returns a pointer to item descriptor used to point
+ * to the new item.  The log item will now point to its new descriptor
+ * with its li_desc field.
+ */
+xfs_log_item_desc_t *
+xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
+{
+	xfs_log_item_desc_t	*lidp;
+	xfs_log_item_chunk_t	*licp;
+	int			i=0;
+
+	/*
+	 * If there are no free descriptors, allocate a new chunk
+	 * of them and put it at the front of the chunk list.
+	 */
+	if (tp->t_items_free == 0) {
+		licp = (xfs_log_item_chunk_t*)
+		       kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP);
+		ASSERT(licp != NULL);
+		/*
+		 * Initialize the chunk, and then
+		 * claim the first slot in the newly allocated chunk.
+		 */
+		XFS_LIC_INIT(licp);
+		XFS_LIC_CLAIM(licp, 0);
+		licp->lic_unused = 1;
+		XFS_LIC_INIT_SLOT(licp, 0);
+		lidp = XFS_LIC_SLOT(licp, 0);
+
+		/*
+		 * Link in the new chunk and update the free count.
+		 */
+		licp->lic_next = tp->t_items.lic_next;
+		tp->t_items.lic_next = licp;
+		tp->t_items_free = XFS_LIC_NUM_SLOTS - 1;
+
+		/*
+		 * Initialize the descriptor and the generic portion
+		 * of the log item.
+		 *
+		 * Point the new slot at this item and return it.
+		 * Also point the log item at its currently active
+		 * descriptor and set the item's mount pointer.
+		 */
+		lidp->lid_item = lip;
+		lidp->lid_flags = 0;
+		lidp->lid_size = 0;
+		lip->li_desc = lidp;
+		lip->li_mountp = tp->t_mountp;
+		return (lidp);
+	}
+
+	/*
+	 * Find the free descriptor. It is somewhere in the chunklist
+	 * of descriptors.
+	 */
+	licp = &tp->t_items;
+	while (licp != NULL) {
+		if (XFS_LIC_VACANCY(licp)) {
+			if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
+				i = licp->lic_unused;
+				ASSERT(XFS_LIC_ISFREE(licp, i));
+				break;
+			}
+			for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
+				if (XFS_LIC_ISFREE(licp, i))
+					break;
+			}
+			ASSERT(i <= XFS_LIC_MAX_SLOT);
+			break;
+		}
+		licp = licp->lic_next;
+	}
+	ASSERT(licp != NULL);
+	/*
+	 * If we find a free descriptor, claim it,
+	 * initialize it, and return it.
+	 */
+	XFS_LIC_CLAIM(licp, i);
+	if (licp->lic_unused <= i) {
+		licp->lic_unused = i + 1;
+		XFS_LIC_INIT_SLOT(licp, i);
+	}
+	lidp = XFS_LIC_SLOT(licp, i);
+	tp->t_items_free--;
+	lidp->lid_item = lip;
+	lidp->lid_flags = 0;
+	lidp->lid_size = 0;
+	lip->li_desc = lidp;
+	lip->li_mountp = tp->t_mountp;
+	return (lidp);
+}
+
+/*
+ * Free the given descriptor.
+ *
+ * This requires setting the bit in the chunk's free mask corresponding
+ * to the given slot.
+ */
+void
+xfs_trans_free_item(xfs_trans_t	*tp, xfs_log_item_desc_t *lidp)
+{
+	uint			slot;
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_chunk_t	**licpp;
+
+	slot = XFS_LIC_DESC_TO_SLOT(lidp);
+	licp = XFS_LIC_DESC_TO_CHUNK(lidp);
+	XFS_LIC_RELSE(licp, slot);
+	lidp->lid_item->li_desc = NULL;
+	tp->t_items_free++;
+
+	/*
+	 * If there are no more used items in the chunk and this is not
+	 * the chunk embedded in the transaction structure, then free
+	 * the chunk. First pull it from the chunk list and then
+	 * free it back to the heap.  We didn't bother with a doubly
+	 * linked list here because the lists should be very short
+	 * and this is not a performance path.  It's better to save
+	 * the memory of the extra pointer.
+	 *
+	 * Also decrement the transaction structure's count of free items
+	 * by the number in a chunk since we are freeing an empty chunk.
+	 */
+	if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) {
+		licpp = &(tp->t_items.lic_next);
+		while (*licpp != licp) {
+			ASSERT(*licpp != NULL);
+			licpp = &((*licpp)->lic_next);
+		}
+		*licpp = licp->lic_next;
+		kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+		tp->t_items_free -= XFS_LIC_NUM_SLOTS;
+	}
+}
+
+/*
+ * This is called to find the descriptor corresponding to the given
+ * log item.  It returns a pointer to the descriptor.
+ * The log item MUST have a corresponding descriptor in the given
+ * transaction.  This routine does not return NULL, it panics.
+ *
+ * The descriptor pointer is kept in the log item's li_desc field.
+ * Just return it.
+ */
+/*ARGSUSED*/
+xfs_log_item_desc_t *
+xfs_trans_find_item(xfs_trans_t	*tp, xfs_log_item_t *lip)
+{
+	ASSERT(lip->li_desc != NULL);
+
+	return (lip->li_desc);
+}
+
+
+/*
+ * Return a pointer to the first descriptor in the chunk list.
+ * This does not return NULL if there are none, it panics.
+ *
+ * The first descriptor must be in either the first or second chunk.
+ * This is because the only chunk allowed to be empty is the first.
+ * All others are freed when they become empty.
+ *
+ * At some point this and xfs_trans_next_item() should be optimized
+ * to quickly look at the mask to determine if there is anything to
+ * look at.
+ */
+xfs_log_item_desc_t *
+xfs_trans_first_item(xfs_trans_t *tp)
+{
+	xfs_log_item_chunk_t	*licp;
+	int			i;
+
+	licp = &tp->t_items;
+	/*
+	 * If it's not in the first chunk, skip to the second.
+	 */
+	if (XFS_LIC_ARE_ALL_FREE(licp)) {
+		licp = licp->lic_next;
+	}
+
+	/*
+	 * Return the first non-free descriptor in the chunk.
+	 */
+	ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+	for (i = 0; i < licp->lic_unused; i++) {
+		if (XFS_LIC_ISFREE(licp, i)) {
+			continue;
+		}
+
+		return (XFS_LIC_SLOT(licp, i));
+	}
+	cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
+	return(NULL);
+}
+
+
+/*
+ * Given a descriptor, return the next descriptor in the chunk list.
+ * This returns NULL if there are no more used descriptors in the list.
+ *
+ * We do this by first locating the chunk in which the descriptor resides,
+ * and then scanning forward in the chunk and the list for the next
+ * used descriptor.
+ */
+/*ARGSUSED*/
+xfs_log_item_desc_t *
+xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
+{
+	xfs_log_item_chunk_t	*licp;
+	int			i;
+
+	licp = XFS_LIC_DESC_TO_CHUNK(lidp);
+
+	/*
+	 * First search the rest of the chunk. The for loop keeps us
+	 * from referencing things beyond the end of the chunk.
+	 */
+	for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) {
+		if (XFS_LIC_ISFREE(licp, i)) {
+			continue;
+		}
+
+		return (XFS_LIC_SLOT(licp, i));
+	}
+
+	/*
+	 * Now search the next chunk.  It must be there, because the
+	 * next chunk would have been freed if it were empty.
+	 * If there is no next chunk, return NULL.
+	 */
+	if (licp->lic_next == NULL) {
+		return (NULL);
+	}
+
+	licp = licp->lic_next;
+	ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+	for (i = 0; i < licp->lic_unused; i++) {
+		if (XFS_LIC_ISFREE(licp, i)) {
+			continue;
+		}
+
+		return (XFS_LIC_SLOT(licp, i));
+	}
+	ASSERT(0);
+	/* NOTREACHED */
+	return NULL; /* keep gcc quite */
+}
+
+/*
+ * This is called to unlock all of the items of a transaction and to free
+ * all the descriptors of that transaction.
+ *
+ * It walks the list of descriptors and unlocks each item.  It frees
+ * each chunk except that embedded in the transaction as it goes along.
+ */
+void
+xfs_trans_free_items(
+	xfs_trans_t	*tp,
+	int		flags)
+{
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_chunk_t	*next_licp;
+	int			abort;
+
+	abort = flags & XFS_TRANS_ABORT;
+	licp = &tp->t_items;
+	/*
+	 * Special case the embedded chunk so we don't free it below.
+	 */
+	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+		(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
+		XFS_LIC_ALL_FREE(licp);
+		licp->lic_unused = 0;
+	}
+	licp = licp->lic_next;
+
+	/*
+	 * Unlock each item in each chunk and free the chunks.
+	 */
+	while (licp != NULL) {
+		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
+		next_licp = licp->lic_next;
+		kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+		licp = next_licp;
+	}
+
+	/*
+	 * Reset the transaction structure's free item count.
+	 */
+	tp->t_items_free = XFS_LIC_NUM_SLOTS;
+	tp->t_items.lic_next = NULL;
+}
+
+
+
+/*
+ * This is called to unlock the items associated with a transaction.
+ * Items which were not logged should be freed.
+ * Those which were logged must still be tracked so they can be unpinned
+ * when the transaction commits.
+ */
+void
+xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
+{
+	xfs_log_item_chunk_t	*licp;
+	xfs_log_item_chunk_t	*next_licp;
+	xfs_log_item_chunk_t	**licpp;
+	int			freed;
+
+	freed = 0;
+	licp = &tp->t_items;
+
+	/*
+	 * Special case the embedded chunk so we don't free.
+	 */
+	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+		freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
+	}
+	licpp = &(tp->t_items.lic_next);
+	licp = licp->lic_next;
+
+	/*
+	 * Unlock each item in each chunk, free non-dirty descriptors,
+	 * and free empty chunks.
+	 */
+	while (licp != NULL) {
+		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
+		next_licp = licp->lic_next;
+		if (XFS_LIC_ARE_ALL_FREE(licp)) {
+			*licpp = next_licp;
+			kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+			freed -= XFS_LIC_NUM_SLOTS;
+		} else {
+			licpp = &(licp->lic_next);
+		}
+		ASSERT(*licpp == next_licp);
+		licp = next_licp;
+	}
+
+	/*
+	 * Fix the free descriptor count in the transaction.
+	 */
+	tp->t_items_free += freed;
+}
+
+/*
+ * Unlock each item pointed to by a descriptor in the given chunk.
+ * Stamp the commit lsn into each item if necessary.
+ * Free descriptors pointing to items which are not dirty if freeing_chunk
+ * is zero. If freeing_chunk is non-zero, then we need to unlock all
+ * items in the chunk.
+ * 
+ * Return the number of descriptors freed.
+ */
+STATIC int
+xfs_trans_unlock_chunk(
+	xfs_log_item_chunk_t	*licp,
+	int			freeing_chunk,
+	int			abort,
+	xfs_lsn_t		commit_lsn)
+{
+	xfs_log_item_desc_t	*lidp;
+	xfs_log_item_t		*lip;
+	int			i;
+	int			freed;
+
+	freed = 0;
+	lidp = licp->lic_descs;
+	for (i = 0; i < licp->lic_unused; i++, lidp++) {
+		if (XFS_LIC_ISFREE(licp, i)) {
+			continue;
+		}
+		lip = lidp->lid_item;
+		lip->li_desc = NULL;
+
+		if (commit_lsn != NULLCOMMITLSN)
+			IOP_COMMITTING(lip, commit_lsn);
+		if (abort)
+			lip->li_flags |= XFS_LI_ABORTED;
+		IOP_UNLOCK(lip);
+
+		/*
+		 * Free the descriptor if the item is not dirty
+		 * within this transaction and the caller is not
+		 * going to just free the entire thing regardless.
+		 */
+		if (!(freeing_chunk) &&
+		    (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
+			XFS_LIC_RELSE(licp, i);
+			freed++;
+		}
+	}
+
+	return (freed);
+}
+
+
+/*
+ * This is called to add the given busy item to the transaction's
+ * list of busy items.  It must find a free busy item descriptor
+ * or allocate a new one and add the item to that descriptor.
+ * The function returns a pointer to busy descriptor used to point
+ * to the new busy entry.  The log busy entry will now point to its new
+ * descriptor with its ???? field.
+ */
+xfs_log_busy_slot_t *
+xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx)
+{
+	xfs_log_busy_chunk_t	*lbcp;
+	xfs_log_busy_slot_t	*lbsp;
+	int			i=0;
+
+	/*
+	 * If there are no free descriptors, allocate a new chunk
+	 * of them and put it at the front of the chunk list.
+	 */
+	if (tp->t_busy_free == 0) {
+		lbcp = (xfs_log_busy_chunk_t*)
+		       kmem_alloc(sizeof(xfs_log_busy_chunk_t), KM_SLEEP);
+		ASSERT(lbcp != NULL);
+		/*
+		 * Initialize the chunk, and then
+		 * claim the first slot in the newly allocated chunk.
+		 */
+		XFS_LBC_INIT(lbcp);
+		XFS_LBC_CLAIM(lbcp, 0);
+		lbcp->lbc_unused = 1;
+		lbsp = XFS_LBC_SLOT(lbcp, 0);
+
+		/*
+		 * Link in the new chunk and update the free count.
+		 */
+		lbcp->lbc_next = tp->t_busy.lbc_next;
+		tp->t_busy.lbc_next = lbcp;
+		tp->t_busy_free = XFS_LIC_NUM_SLOTS - 1;
+
+		/*
+		 * Initialize the descriptor and the generic portion
+		 * of the log item.
+		 *
+		 * Point the new slot at this item and return it.
+		 * Also point the log item at its currently active
+		 * descriptor and set the item's mount pointer.
+		 */
+		lbsp->lbc_ag = ag;
+		lbsp->lbc_idx = idx;
+		return (lbsp);
+	}
+
+	/*
+	 * Find the free descriptor. It is somewhere in the chunklist
+	 * of descriptors.
+	 */
+	lbcp = &tp->t_busy;
+	while (lbcp != NULL) {
+		if (XFS_LBC_VACANCY(lbcp)) {
+			if (lbcp->lbc_unused <= XFS_LBC_MAX_SLOT) {
+				i = lbcp->lbc_unused;
+				break;
+			} else {
+				/* out-of-order vacancy */
+				printk("OOO vacancy lbcp 0x%p\n", lbcp);
+				ASSERT(0);
+			}
+		}
+		lbcp = lbcp->lbc_next;
+	}
+	ASSERT(lbcp != NULL);
+	/*
+	 * If we find a free descriptor, claim it,
+	 * initialize it, and return it.
+	 */
+	XFS_LBC_CLAIM(lbcp, i);
+	if (lbcp->lbc_unused <= i) {
+		lbcp->lbc_unused = i + 1;
+	}
+	lbsp = XFS_LBC_SLOT(lbcp, i);
+	tp->t_busy_free--;
+	lbsp->lbc_ag = ag;
+	lbsp->lbc_idx = idx;
+	return (lbsp);
+}
+
+
+/*
+ * xfs_trans_free_busy
+ * Free all of the busy lists from a transaction
+ */
+void
+xfs_trans_free_busy(xfs_trans_t *tp)
+{
+	xfs_log_busy_chunk_t	*lbcp;
+	xfs_log_busy_chunk_t	*lbcq;
+
+	lbcp = tp->t_busy.lbc_next;
+	while (lbcp != NULL) {
+		lbcq = lbcp->lbc_next;
+		kmem_free(lbcp, sizeof(xfs_log_busy_chunk_t));
+		lbcp = lbcq;
+	}
+
+	XFS_LBC_INIT(&tp->t_busy);
+	tp->t_busy.lbc_unused = 0;
+}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
new file mode 100644
index 0000000..d4dae7d
--- /dev/null
+++ b/fs/xfs/xfs_trans_priv.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_TRANS_PRIV_H__
+#define	__XFS_TRANS_PRIV_H__
+
+struct xfs_log_item;
+struct xfs_log_item_desc;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * From xfs_trans_item.c
+ */
+struct xfs_log_item_desc	*xfs_trans_add_item(struct xfs_trans *,
+					    struct xfs_log_item *);
+void				xfs_trans_free_item(struct xfs_trans *,
+					    struct xfs_log_item_desc *);
+struct xfs_log_item_desc	*xfs_trans_find_item(struct xfs_trans *,
+					     struct xfs_log_item *);
+struct xfs_log_item_desc	*xfs_trans_first_item(struct xfs_trans *);
+struct xfs_log_item_desc	*xfs_trans_next_item(struct xfs_trans *,
+					     struct xfs_log_item_desc *);
+void				xfs_trans_free_items(struct xfs_trans *, int);
+void				xfs_trans_unlock_items(struct xfs_trans *,
+							xfs_lsn_t);
+void				xfs_trans_free_busy(xfs_trans_t *tp);
+xfs_log_busy_slot_t		*xfs_trans_add_busy(xfs_trans_t *tp,
+						    xfs_agnumber_t ag,
+						    xfs_extlen_t idx);
+
+/*
+ * From xfs_trans_ail.c
+ */
+void			xfs_trans_update_ail(struct xfs_mount *,
+				     struct xfs_log_item *, xfs_lsn_t,
+				     unsigned long);
+void			xfs_trans_delete_ail(struct xfs_mount *,
+				     struct xfs_log_item *, unsigned long);
+struct xfs_log_item	*xfs_trans_first_ail(struct xfs_mount *, int *);
+struct xfs_log_item	*xfs_trans_next_ail(struct xfs_mount *,
+				     struct xfs_log_item *, int *, int *);
+
+
+#endif	/* __XFS_TRANS_PRIV_H__ */
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/xfs_trans_space.h
new file mode 100644
index 0000000..e91d173
--- /dev/null
+++ b/fs/xfs/xfs_trans_space.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_TRANS_SPACE_H__
+#define __XFS_TRANS_SPACE_H__
+
+/*
+ * Components of space reservations.
+ */
+#define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)    \
+		(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
+#define	XFS_EXTENTADD_SPACE_RES(mp,w)	(XFS_BM_MAXLEVELS(mp,w) - 1)
+#define XFS_NEXTENTADD_SPACE_RES(mp,b,w)\
+	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
+	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
+	  XFS_EXTENTADD_SPACE_RES(mp,w))
+#define	XFS_DAENTER_1B(mp,w)	((w) == XFS_DATA_FORK ? (mp)->m_dirblkfsbs : 1)
+#define	XFS_DAENTER_DBS(mp,w)	\
+	(XFS_DA_NODE_MAXDEPTH + \
+	 ((XFS_DIR_IS_V2(mp) && (w) == XFS_DATA_FORK) ? 2 : 0))
+#define	XFS_DAENTER_BLOCKS(mp,w)	\
+	(XFS_DAENTER_1B(mp,w) * XFS_DAENTER_DBS(mp,w))
+#define	XFS_DAENTER_BMAP1B(mp,w)	\
+	XFS_NEXTENTADD_SPACE_RES(mp, XFS_DAENTER_1B(mp, w), w)
+#define	XFS_DAENTER_BMAPS(mp,w)		\
+	(XFS_DAENTER_DBS(mp,w) * XFS_DAENTER_BMAP1B(mp,w))
+#define	XFS_DAENTER_SPACE_RES(mp,w)	\
+	(XFS_DAENTER_BLOCKS(mp,w) + XFS_DAENTER_BMAPS(mp,w))
+#define	XFS_DAREMOVE_SPACE_RES(mp,w)	XFS_DAENTER_BMAPS(mp,w)
+#define	XFS_DIRENTER_MAX_SPLIT(mp,nl)	\
+	(((mp)->m_sb.sb_blocksize == 512 && \
+	  XFS_DIR_IS_V1(mp) && \
+	  (nl) >= XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN) ? 2 : 1)
+#define	XFS_DIRENTER_SPACE_RES(mp,nl)	\
+	(XFS_DAENTER_SPACE_RES(mp, XFS_DATA_FORK) * \
+	 XFS_DIRENTER_MAX_SPLIT(mp,nl))
+#define	XFS_DIRREMOVE_SPACE_RES(mp)	\
+	XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
+#define	XFS_IALLOC_SPACE_RES(mp)	\
+	(XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp)-1)
+
+/*
+ * Space reservation values for various transactions.
+ */
+#define	XFS_ADDAFORK_SPACE_RES(mp)	\
+	((mp)->m_dirblkfsbs + \
+	 (XFS_DIR_IS_V1(mp) ? 0 : XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK)))
+#define	XFS_ATTRRM_SPACE_RES(mp)	\
+	XFS_DAREMOVE_SPACE_RES(mp, XFS_ATTR_FORK)
+/* This macro is not used - see inline code in xfs_attr_set */
+#define	XFS_ATTRSET_SPACE_RES(mp, v)	\
+	(XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) + XFS_B_TO_FSB(mp, v))
+#define	XFS_CREATE_SPACE_RES(mp,nl)	\
+	(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
+#define	XFS_DIOSTRAT_SPACE_RES(mp, v)	\
+	(XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + (v))
+#define	XFS_GROWFS_SPACE_RES(mp)	\
+	(2 * XFS_AG_MAXLEVELS(mp))
+#define	XFS_GROWFSRT_SPACE_RES(mp,b)	\
+	((b) + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK))
+#define	XFS_LINK_SPACE_RES(mp,nl)	\
+	XFS_DIRENTER_SPACE_RES(mp,nl)
+#define	XFS_MKDIR_SPACE_RES(mp,nl)	\
+	(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
+#define	XFS_QM_DQALLOC_SPACE_RES(mp)	\
+	(XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + \
+	 XFS_DQUOT_CLUSTER_SIZE_FSB)
+#define	XFS_QM_QINOCREATE_SPACE_RES(mp)	\
+	XFS_IALLOC_SPACE_RES(mp)
+#define	XFS_REMOVE_SPACE_RES(mp)	\
+	XFS_DIRREMOVE_SPACE_RES(mp)
+#define	XFS_RENAME_SPACE_RES(mp,nl)	\
+	(XFS_DIRREMOVE_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
+#define	XFS_SYMLINK_SPACE_RES(mp,nl,b)	\
+	(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b))
+
+#endif	/* __XFS_TRANS_SPACE_H__ */
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
new file mode 100644
index 0000000..04609d2
--- /dev/null
+++ b/fs/xfs/xfs_types.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_TYPES_H__
+#define	__XFS_TYPES_H__
+
+#ifdef __KERNEL__
+
+/*
+ * POSIX Extensions
+ */
+typedef unsigned char		uchar_t;
+typedef unsigned short		ushort_t;
+typedef unsigned int		uint_t;
+typedef unsigned long		ulong_t;
+
+/*
+ * Additional type declarations for XFS
+ */
+typedef signed char		__int8_t;
+typedef unsigned char		__uint8_t;
+typedef signed short int	__int16_t;
+typedef unsigned short int	__uint16_t;
+typedef signed int		__int32_t;
+typedef unsigned int		__uint32_t;
+typedef signed long long int	__int64_t;
+typedef unsigned long long int	__uint64_t;
+
+typedef enum { B_FALSE,B_TRUE }	boolean_t;
+typedef __int64_t		prid_t;		/* project ID */
+typedef __uint32_t		inst_t;		/* an instruction */
+
+typedef __s64			xfs_off_t;	/* <file offset> type */
+typedef __u64			xfs_ino_t;	/* <inode> type */
+typedef __s64			xfs_daddr_t;	/* <disk address> type */
+typedef char *			xfs_caddr_t;	/* <core address> type */
+typedef __u32			xfs_dev_t;
+
+/* __psint_t is the same size as a pointer */
+#if (BITS_PER_LONG == 32)
+typedef __int32_t __psint_t;
+typedef __uint32_t __psunsigned_t;
+#elif (BITS_PER_LONG == 64)
+typedef __int64_t __psint_t;
+typedef __uint64_t __psunsigned_t;
+#else
+#error BITS_PER_LONG must be 32 or 64
+#endif
+
+#endif	/* __KERNEL__ */
+
+typedef __uint32_t	xfs_agblock_t;	/* blockno in alloc. group */
+typedef	__uint32_t	xfs_extlen_t;	/* extent length in blocks */
+typedef	__uint32_t	xfs_agnumber_t;	/* allocation group number */
+typedef __int32_t	xfs_extnum_t;	/* # of extents in a file */
+typedef __int16_t	xfs_aextnum_t;	/* # extents in an attribute fork */
+typedef	__int64_t	xfs_fsize_t;	/* bytes in a file */
+typedef __uint64_t	xfs_ufsize_t;	/* unsigned bytes in a file */
+
+typedef	__int32_t	xfs_suminfo_t;	/* type of bitmap summary info */
+typedef	__int32_t	xfs_rtword_t;	/* word type for bitmap manipulations */
+
+typedef	__int64_t	xfs_lsn_t;	/* log sequence number */
+typedef	__int32_t	xfs_tid_t;	/* transaction identifier */
+
+typedef	__uint32_t	xfs_dablk_t;	/* dir/attr block number (in file) */
+typedef	__uint32_t	xfs_dahash_t;	/* dir/attr hash value */
+
+typedef __uint16_t	xfs_prid_t;	/* prid_t truncated to 16bits in XFS */
+
+/*
+ * These types are 64 bits on disk but are either 32 or 64 bits in memory.
+ * Disk based types:
+ */
+typedef __uint64_t	xfs_dfsbno_t;	/* blockno in filesystem (agno|agbno) */
+typedef __uint64_t	xfs_drfsbno_t;	/* blockno in filesystem (raw) */
+typedef	__uint64_t	xfs_drtbno_t;	/* extent (block) in realtime area */
+typedef	__uint64_t	xfs_dfiloff_t;	/* block number in a file */
+typedef	__uint64_t	xfs_dfilblks_t;	/* number of blocks in a file */
+
+/*
+ * Memory based types are conditional.
+ */
+#if XFS_BIG_BLKNOS
+typedef	__uint64_t	xfs_fsblock_t;	/* blockno in filesystem (agno|agbno) */
+typedef __uint64_t	xfs_rfsblock_t;	/* blockno in filesystem (raw) */
+typedef __uint64_t	xfs_rtblock_t;	/* extent (block) in realtime area */
+typedef	__int64_t	xfs_srtblock_t;	/* signed version of xfs_rtblock_t */
+#else
+typedef	__uint32_t	xfs_fsblock_t;	/* blockno in filesystem (agno|agbno) */
+typedef __uint32_t	xfs_rfsblock_t;	/* blockno in filesystem (raw) */
+typedef __uint32_t	xfs_rtblock_t;	/* extent (block) in realtime area */
+typedef	__int32_t	xfs_srtblock_t;	/* signed version of xfs_rtblock_t */
+#endif
+typedef __uint64_t	xfs_fileoff_t;	/* block number in a file */
+typedef __int64_t	xfs_sfiloff_t;	/* signed block number in a file */
+typedef __uint64_t	xfs_filblks_t;	/* number of blocks in a file */
+
+typedef __uint8_t	xfs_arch_t;	/* architecture of an xfs fs */
+
+/*
+ * Null values for the types.
+ */
+#define	NULLDFSBNO	((xfs_dfsbno_t)-1)
+#define	NULLDRFSBNO	((xfs_drfsbno_t)-1)
+#define	NULLDRTBNO	((xfs_drtbno_t)-1)
+#define	NULLDFILOFF	((xfs_dfiloff_t)-1)
+
+#define	NULLFSBLOCK	((xfs_fsblock_t)-1)
+#define	NULLRFSBLOCK	((xfs_rfsblock_t)-1)
+#define	NULLRTBLOCK	((xfs_rtblock_t)-1)
+#define	NULLFILEOFF	((xfs_fileoff_t)-1)
+
+#define	NULLAGBLOCK	((xfs_agblock_t)-1)
+#define	NULLAGNUMBER	((xfs_agnumber_t)-1)
+#define	NULLEXTNUM	((xfs_extnum_t)-1)
+
+#define NULLCOMMITLSN	((xfs_lsn_t)-1)
+
+/*
+ * Max values for extlen, extnum, aextnum.
+ */
+#define	MAXEXTLEN	((xfs_extlen_t)0x001fffff)	/* 21 bits */
+#define	MAXEXTNUM	((xfs_extnum_t)0x7fffffff)	/* signed int */
+#define	MAXAEXTNUM	((xfs_aextnum_t)0x7fff)		/* signed short */
+
+/*
+ * MAXNAMELEN is the length (including the terminating null) of
+ * the longest permissible file (component) name.
+ */
+#define MAXNAMELEN	256
+
+typedef struct xfs_dirent {		/* data from readdir() */
+	xfs_ino_t	d_ino;		/* inode number of entry */
+	xfs_off_t	d_off;		/* offset of disk directory entry */
+	unsigned short	d_reclen;	/* length of this record */
+	char		d_name[1];	/* name of file */
+} xfs_dirent_t;
+
+#define DIRENTBASESIZE		(((xfs_dirent_t *)0)->d_name - (char *)0)
+#define DIRENTSIZE(namelen)	\
+	((DIRENTBASESIZE + (namelen) + \
+		sizeof(xfs_off_t)) & ~(sizeof(xfs_off_t) - 1))
+
+typedef enum {
+	XFS_LOOKUP_EQi, XFS_LOOKUP_LEi, XFS_LOOKUP_GEi
+} xfs_lookup_t;
+
+typedef enum {
+	XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_BMAPi, XFS_BTNUM_INOi,
+	XFS_BTNUM_MAX
+} xfs_btnum_t;
+
+#endif	/* __XFS_TYPES_H__ */
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
new file mode 100644
index 0000000..816b945
--- /dev/null
+++ b/fs/xfs/xfs_utils.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_rw.h"
+#include "xfs_itable.h"
+#include "xfs_utils.h"
+
+/*
+ * xfs_get_dir_entry is used to get a reference to an inode given
+ * its parent directory inode and the name of the file.	 It does
+ * not lock the child inode, and it unlocks the directory before
+ * returning.  The directory's generation number is returned for
+ * use by a later call to xfs_lock_dir_and_entry.
+ */
+int
+xfs_get_dir_entry(
+	vname_t		*dentry,
+	xfs_inode_t	**ipp)
+{
+	vnode_t		*vp;
+	bhv_desc_t	*bdp;
+
+	vp = VNAME_TO_VNODE(dentry);
+	bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
+	if (!bdp) {
+		*ipp = NULL;
+		return XFS_ERROR(ENOENT);
+	}
+	VN_HOLD(vp);
+	*ipp = XFS_BHVTOI(bdp);
+	return 0;
+}
+
+int
+xfs_dir_lookup_int(
+	bhv_desc_t	*dir_bdp,
+	uint		lock_mode,
+	vname_t		*dentry,
+	xfs_ino_t	*inum,
+	xfs_inode_t	**ipp)
+{
+	vnode_t		*dir_vp;
+	xfs_inode_t	*dp;
+	int		error;
+
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	dp = XFS_BHVTOI(dir_bdp);
+
+	error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp,
+				VNAME(dentry), VNAMELEN(dentry), inum);
+	if (!error) {
+		/*
+		 * Unlock the directory. We do this because we can't
+		 * hold the directory lock while doing the vn_get()
+		 * in xfs_iget().  Doing so could cause us to hold
+		 * a lock while waiting for the inode to finish
+		 * being inactive while it's waiting for a log
+		 * reservation in the inactive routine.
+		 */
+		xfs_iunlock(dp, lock_mode);
+		error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0);
+		xfs_ilock(dp, lock_mode);
+
+		if (error) {
+			*ipp = NULL;
+		} else if ((*ipp)->i_d.di_mode == 0) {
+			/*
+			 * The inode has been freed.  Something is
+			 * wrong so just get out of here.
+			 */
+			xfs_iunlock(dp, lock_mode);
+			xfs_iput_new(*ipp, 0);
+			*ipp = NULL;
+			xfs_ilock(dp, lock_mode);
+			error = XFS_ERROR(ENOENT);
+		}
+	}
+	return error;
+}
+
+/*
+ * Allocates a new inode from disk and return a pointer to the
+ * incore copy. This routine will internally commit the current
+ * transaction and allocate a new one if the Space Manager needed
+ * to do an allocation to replenish the inode free-list.
+ *
+ * This routine is designed to be called from xfs_create and
+ * xfs_create_dir.
+ *
+ */
+int
+xfs_dir_ialloc(
+	xfs_trans_t	**tpp,		/* input: current transaction;
+					   output: may be a new transaction. */
+	xfs_inode_t	*dp,		/* directory within whose allocate
+					   the inode. */
+	mode_t		mode,
+	nlink_t		nlink,
+	xfs_dev_t	rdev,
+	cred_t		*credp,
+	prid_t		prid,		/* project id */
+	int		okalloc,	/* ok to allocate new space */
+	xfs_inode_t	**ipp,		/* pointer to inode; it will be
+					   locked. */
+	int		*committed)
+
+{
+	xfs_trans_t	*tp;
+	xfs_trans_t	*ntp;
+	xfs_inode_t	*ip;
+	xfs_buf_t	*ialloc_context = NULL;
+	boolean_t	call_again = B_FALSE;
+	int		code;
+	uint		log_res;
+	uint		log_count;
+	void		*dqinfo;
+	uint		tflags;
+
+	tp = *tpp;
+	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+	/*
+	 * xfs_ialloc will return a pointer to an incore inode if
+	 * the Space Manager has an available inode on the free
+	 * list. Otherwise, it will do an allocation and replenish
+	 * the freelist.  Since we can only do one allocation per
+	 * transaction without deadlocks, we will need to commit the
+	 * current transaction and start a new one.  We will then
+	 * need to call xfs_ialloc again to get the inode.
+	 *
+	 * If xfs_ialloc did an allocation to replenish the freelist,
+	 * it returns the bp containing the head of the freelist as
+	 * ialloc_context. We will hold a lock on it across the
+	 * transaction commit so that no other process can steal
+	 * the inode(s) that we've just allocated.
+	 */
+	code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc,
+			  &ialloc_context, &call_again, &ip);
+
+	/*
+	 * Return an error if we were unable to allocate a new inode.
+	 * This should only happen if we run out of space on disk or
+	 * encounter a disk error.
+	 */
+	if (code) {
+		*ipp = NULL;
+		return code;
+	}
+	if (!call_again && (ip == NULL)) {
+		*ipp = NULL;
+		return XFS_ERROR(ENOSPC);
+	}
+
+	/*
+	 * If call_again is set, then we were unable to get an
+	 * inode in one operation.  We need to commit the current
+	 * transaction and call xfs_ialloc() again.  It is guaranteed
+	 * to succeed the second time.
+	 */
+	if (call_again) {
+
+		/*
+		 * Normally, xfs_trans_commit releases all the locks.
+		 * We call bhold to hang on to the ialloc_context across
+		 * the commit.  Holding this buffer prevents any other
+		 * processes from doing any allocations in this
+		 * allocation group.
+		 */
+		xfs_trans_bhold(tp, ialloc_context);
+		/*
+		 * Save the log reservation so we can use
+		 * them in the next transaction.
+		 */
+		log_res = xfs_trans_get_log_res(tp);
+		log_count = xfs_trans_get_log_count(tp);
+
+		/*
+		 * We want the quota changes to be associated with the next
+		 * transaction, NOT this one. So, detach the dqinfo from this
+		 * and attach it to the next transaction.
+		 */
+		dqinfo = NULL;
+		tflags = 0;
+		if (tp->t_dqinfo) {
+			dqinfo = (void *)tp->t_dqinfo;
+			tp->t_dqinfo = NULL;
+			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
+			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
+		}
+
+		ntp = xfs_trans_dup(tp);
+		code = xfs_trans_commit(tp, 0, NULL);
+		tp = ntp;
+		if (committed != NULL) {
+			*committed = 1;
+		}
+		/*
+		 * If we get an error during the commit processing,
+		 * release the buffer that is still held and return
+		 * to the caller.
+		 */
+		if (code) {
+			xfs_buf_relse(ialloc_context);
+			if (dqinfo) {
+				tp->t_dqinfo = dqinfo;
+				XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
+			}
+			*tpp = ntp;
+			*ipp = NULL;
+			return code;
+		}
+		code = xfs_trans_reserve(tp, 0, log_res, 0,
+					 XFS_TRANS_PERM_LOG_RES, log_count);
+		/*
+		 * Re-attach the quota info that we detached from prev trx.
+		 */
+		if (dqinfo) {
+			tp->t_dqinfo = dqinfo;
+			tp->t_flags |= tflags;
+		}
+
+		if (code) {
+			xfs_buf_relse(ialloc_context);
+			*tpp = ntp;
+			*ipp = NULL;
+			return code;
+		}
+		xfs_trans_bjoin(tp, ialloc_context);
+
+		/*
+		 * Call ialloc again. Since we've locked out all
+		 * other allocations in this allocation group,
+		 * this call should always succeed.
+		 */
+		code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid,
+				  okalloc, &ialloc_context, &call_again, &ip);
+
+		/*
+		 * If we get an error at this point, return to the caller
+		 * so that the current transaction can be aborted.
+		 */
+		if (code) {
+			*tpp = tp;
+			*ipp = NULL;
+			return code;
+		}
+		ASSERT ((!call_again) && (ip != NULL));
+
+	} else {
+		if (committed != NULL) {
+			*committed = 0;
+		}
+	}
+
+	*ipp = ip;
+	*tpp = tp;
+
+	return 0;
+}
+
+/*
+ * Decrement the link count on an inode & log the change.
+ * If this causes the link count to go to zero, initiate the
+ * logging activity required to truncate a file.
+ */
+int				/* error */
+xfs_droplink(
+	xfs_trans_t *tp,
+	xfs_inode_t *ip)
+{
+	int	error;
+
+	xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+
+	ASSERT (ip->i_d.di_nlink > 0);
+	ip->i_d.di_nlink--;
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+	error = 0;
+	if (ip->i_d.di_nlink == 0) {
+		/*
+		 * We're dropping the last link to this file.
+		 * Move the on-disk inode to the AGI unlinked list.
+		 * From xfs_inactive() we will pull the inode from
+		 * the list and free it.
+		 */
+		error = xfs_iunlink(tp, ip);
+	}
+	return error;
+}
+
+/*
+ * This gets called when the inode's version needs to be changed from 1 to 2.
+ * Currently this happens when the nlink field overflows the old 16-bit value
+ * or when chproj is called to change the project for the first time.
+ * As a side effect the superblock version will also get rev'd
+ * to contain the NLINK bit.
+ */
+void
+xfs_bump_ino_vers2(
+	xfs_trans_t	*tp,
+	xfs_inode_t	*ip)
+{
+	xfs_mount_t	*mp;
+	unsigned long		s;
+
+	ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
+	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1);
+
+	ip->i_d.di_version = XFS_DINODE_VERSION_2;
+	ip->i_d.di_onlink = 0;
+	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+	mp = tp->t_mountp;
+	if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
+		s = XFS_SB_LOCK(mp);
+		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
+			XFS_SB_VERSION_ADDNLINK(&mp->m_sb);
+			XFS_SB_UNLOCK(mp, s);
+			xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
+		} else {
+			XFS_SB_UNLOCK(mp, s);
+		}
+	}
+	/* Caller must log the inode */
+}
+
+/*
+ * Increment the link count on an inode & log the change.
+ */
+int
+xfs_bumplink(
+	xfs_trans_t *tp,
+	xfs_inode_t *ip)
+{
+	if (ip->i_d.di_nlink >= XFS_MAXLINK)
+		return XFS_ERROR(EMLINK);
+	xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+
+	ASSERT(ip->i_d.di_nlink > 0);
+	ip->i_d.di_nlink++;
+	if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) &&
+	    (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
+		/*
+		 * The inode has increased its number of links beyond
+		 * what can fit in an old format inode.  It now needs
+		 * to be converted to a version 2 inode with a 32 bit
+		 * link count.  If this is the first inode in the file
+		 * system to do this, then we need to bump the superblock
+		 * version number as well.
+		 */
+		xfs_bump_ino_vers2(tp, ip);
+	}
+
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	return 0;
+}
+
+/*
+ * Try to truncate the given file to 0 length.  Currently called
+ * only out of xfs_remove when it has to truncate a file to free
+ * up space for the remove to proceed.
+ */
+int
+xfs_truncate_file(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip)
+{
+	xfs_trans_t	*tp;
+	int		error;
+
+#ifdef QUOTADEBUG
+	/*
+	 * This is called to truncate the quotainodes too.
+	 */
+	if (XFS_IS_UQUOTA_ON(mp)) {
+		if (ip->i_ino != mp->m_sb.sb_uquotino)
+			ASSERT(ip->i_udquot);
+	}
+	if (XFS_IS_GQUOTA_ON(mp)) {
+		if (ip->i_ino != mp->m_sb.sb_gquotino)
+			ASSERT(ip->i_gdquot);
+	}
+#endif
+	/*
+	 * Make the call to xfs_itruncate_start before starting the
+	 * transaction, because we cannot make the call while we're
+	 * in a transaction.
+	 */
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0);
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
+	if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+				      XFS_TRANS_PERM_LOG_RES,
+				      XFS_ITRUNCATE_LOG_COUNT))) {
+		xfs_trans_cancel(tp, 0);
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		return error;
+	}
+
+	/*
+	 * Follow the normal truncate locking protocol.  Since we
+	 * hold the inode in the transaction, we know that it's number
+	 * of references will stay constant.
+	 */
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	/*
+	 * Signal a sync xaction.  The only case where that isn't
+	 * the case is if we're truncating an already unlinked file
+	 * on a wsync fs.  In that case, we know the blocks can't
+	 * reappear in the file because the links to file are
+	 * permanently toast.  Currently, we're always going to
+	 * want a sync transaction because this code is being
+	 * called from places where nlink is guaranteed to be 1
+	 * but I'm leaving the tests in to protect against future
+	 * changes -- rcc.
+	 */
+	error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0,
+				     XFS_DATA_FORK,
+				     ((ip->i_d.di_nlink != 0 ||
+				       !(mp->m_flags & XFS_MOUNT_WSYNC))
+				      ? 1 : 0));
+	if (error) {
+		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
+				 XFS_TRANS_ABORT);
+	} else {
+		xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,
+					 NULL);
+	}
+	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+	return error;
+}
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
new file mode 100644
index 0000000..e1ed6a5
--- /dev/null
+++ b/fs/xfs/xfs_utils.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_UTILS_H__
+#define __XFS_UTILS_H__
+
+#define IRELE(ip)	VN_RELE(XFS_ITOV(ip))
+#define IHOLD(ip)	VN_HOLD(XFS_ITOV(ip))
+#define	ITRACE(ip)	vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \
+				(inst_t *)__return_address)
+
+extern int xfs_rename (bhv_desc_t *, vname_t *, vnode_t *, vname_t *, cred_t *);
+extern int xfs_get_dir_entry (vname_t *, xfs_inode_t **);
+extern int xfs_dir_lookup_int (bhv_desc_t *, uint, vname_t *, xfs_ino_t *,
+				xfs_inode_t **);
+extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *);
+extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, nlink_t,
+				xfs_dev_t, cred_t *, prid_t, int,
+				xfs_inode_t **, int *);
+extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *);
+extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *);
+extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *);
+
+#endif	/* __XFS_UTILS_H__ */
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
new file mode 100644
index 0000000..00aae9c
--- /dev/null
+++ b/fs/xfs/xfs_vfsops.c
@@ -0,0 +1,1941 @@
+/*
+ * XFS filesystem operations.
+ *
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_ag.h"
+#include "xfs_error.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_rw.h"
+#include "xfs_refcache.h"
+#include "xfs_buf_item.h"
+#include "xfs_extfree_item.h"
+#include "xfs_quota.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_acl.h"
+#include "xfs_attr.h"
+#include "xfs_clnt.h"
+#include "xfs_log_priv.h"
+
+STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
+
+int
+xfs_init(void)
+{
+	extern kmem_zone_t	*xfs_bmap_free_item_zone;
+	extern kmem_zone_t	*xfs_btree_cur_zone;
+	extern kmem_zone_t	*xfs_trans_zone;
+	extern kmem_zone_t	*xfs_buf_item_zone;
+	extern kmem_zone_t	*xfs_dabuf_zone;
+#ifdef XFS_DABUF_DEBUG
+	extern lock_t	        xfs_dabuf_global_lock;
+	spinlock_init(&xfs_dabuf_global_lock, "xfsda");
+#endif
+
+	/*
+	 * Initialize all of the zone allocators we use.
+	 */
+	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
+						 "xfs_bmap_free_item");
+	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
+					    "xfs_btree_cur");
+	xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
+	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
+	xfs_da_state_zone =
+		kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
+	xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
+
+	/*
+	 * The size of the zone allocated buf log item is the maximum
+	 * size possible under XFS.  This wastes a little bit of memory,
+	 * but it is much faster.
+	 */
+	xfs_buf_item_zone =
+		kmem_zone_init((sizeof(xfs_buf_log_item_t) +
+				(((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
+				  NBWORD) * sizeof(int))),
+			       "xfs_buf_item");
+	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
+				       ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
+				      "xfs_efd_item");
+	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
+				       ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
+				      "xfs_efi_item");
+	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
+	xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili");
+	xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t),
+					    "xfs_chashlist");
+	xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
+
+	/*
+	 * Allocate global trace buffers.
+	 */
+#ifdef XFS_ALLOC_TRACE
+	xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_BMAP_TRACE
+	xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_BMBT_TRACE
+	xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_DIR_TRACE
+	xfs_dir_trace_buf = ktrace_alloc(XFS_DIR_TRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_ATTR_TRACE
+	xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP);
+#endif
+#ifdef XFS_DIR2_TRACE
+	xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_SLEEP);
+#endif
+
+	xfs_dir_startup();
+
+#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
+	xfs_error_test_init();
+#endif /* DEBUG || INDUCE_IO_ERROR */
+
+	xfs_init_procfs();
+	xfs_sysctl_register();
+	return 0;
+}
+
+void
+xfs_cleanup(void)
+{
+	extern kmem_zone_t	*xfs_bmap_free_item_zone;
+	extern kmem_zone_t	*xfs_btree_cur_zone;
+	extern kmem_zone_t	*xfs_inode_zone;
+	extern kmem_zone_t	*xfs_trans_zone;
+	extern kmem_zone_t	*xfs_da_state_zone;
+	extern kmem_zone_t	*xfs_dabuf_zone;
+	extern kmem_zone_t	*xfs_efd_zone;
+	extern kmem_zone_t	*xfs_efi_zone;
+	extern kmem_zone_t	*xfs_buf_item_zone;
+	extern kmem_zone_t	*xfs_chashlist_zone;
+
+	xfs_cleanup_procfs();
+	xfs_sysctl_unregister();
+	xfs_refcache_destroy();
+	xfs_acl_zone_destroy(xfs_acl_zone);
+
+#ifdef XFS_DIR2_TRACE
+	ktrace_free(xfs_dir2_trace_buf);
+#endif
+#ifdef XFS_ATTR_TRACE
+	ktrace_free(xfs_attr_trace_buf);
+#endif
+#ifdef XFS_DIR_TRACE
+	ktrace_free(xfs_dir_trace_buf);
+#endif
+#ifdef XFS_BMBT_TRACE
+	ktrace_free(xfs_bmbt_trace_buf);
+#endif
+#ifdef XFS_BMAP_TRACE
+	ktrace_free(xfs_bmap_trace_buf);
+#endif
+#ifdef XFS_ALLOC_TRACE
+	ktrace_free(xfs_alloc_trace_buf);
+#endif
+
+	kmem_cache_destroy(xfs_bmap_free_item_zone);
+	kmem_cache_destroy(xfs_btree_cur_zone);
+	kmem_cache_destroy(xfs_inode_zone);
+	kmem_cache_destroy(xfs_trans_zone);
+	kmem_cache_destroy(xfs_da_state_zone);
+	kmem_cache_destroy(xfs_dabuf_zone);
+	kmem_cache_destroy(xfs_buf_item_zone);
+	kmem_cache_destroy(xfs_efd_zone);
+	kmem_cache_destroy(xfs_efi_zone);
+	kmem_cache_destroy(xfs_ifork_zone);
+	kmem_cache_destroy(xfs_ili_zone);
+	kmem_cache_destroy(xfs_chashlist_zone);
+}
+
+/*
+ * xfs_start_flags
+ *
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock has _not_ yet been read in.
+ */
+STATIC int
+xfs_start_flags(
+	struct vfs		*vfs,
+	struct xfs_mount_args	*ap,
+	struct xfs_mount	*mp)
+{
+	/* Values are in BBs */
+	if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
+		/*
+		 * At this point the superblock has not been read
+		 * in, therefore we do not know the block size.
+		 * Before the mount call ends we will convert
+		 * these to FSBs.
+		 */
+		mp->m_dalign = ap->sunit;
+		mp->m_swidth = ap->swidth;
+	}
+
+	if (ap->logbufs != -1 &&
+#if defined(DEBUG) || defined(XLOG_NOLOG)
+	    ap->logbufs != 0 &&
+#endif
+	    (ap->logbufs < XLOG_MIN_ICLOGS ||
+	     ap->logbufs > XLOG_MAX_ICLOGS)) {
+		cmn_err(CE_WARN,
+			"XFS: invalid logbufs value: %d [not %d-%d]",
+			ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
+		return XFS_ERROR(EINVAL);
+	}
+	mp->m_logbufs = ap->logbufs;
+	if (ap->logbufsize != -1 &&
+	    ap->logbufsize != 16 * 1024 &&
+	    ap->logbufsize != 32 * 1024 &&
+	    ap->logbufsize != 64 * 1024 &&
+	    ap->logbufsize != 128 * 1024 &&
+	    ap->logbufsize != 256 * 1024) {
+		cmn_err(CE_WARN,
+	"XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+			ap->logbufsize);
+		return XFS_ERROR(EINVAL);
+	}
+	mp->m_ihsize = ap->ihashsize;
+	mp->m_logbsize = ap->logbufsize;
+	mp->m_fsname_len = strlen(ap->fsname) + 1;
+	mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP);
+	strcpy(mp->m_fsname, ap->fsname);
+
+	if (ap->flags & XFSMNT_WSYNC)
+		mp->m_flags |= XFS_MOUNT_WSYNC;
+#if XFS_BIG_INUMS
+	if (ap->flags & XFSMNT_INO64) {
+		mp->m_flags |= XFS_MOUNT_INO64;
+		mp->m_inoadd = XFS_INO64_OFFSET;
+	}
+#endif
+	if (ap->flags & XFSMNT_NOATIME)
+		mp->m_flags |= XFS_MOUNT_NOATIME;
+
+	if (ap->flags & XFSMNT_RETERR)
+		mp->m_flags |= XFS_MOUNT_RETERR;
+
+	if (ap->flags & XFSMNT_NOALIGN)
+		mp->m_flags |= XFS_MOUNT_NOALIGN;
+
+	if (ap->flags & XFSMNT_SWALLOC)
+		mp->m_flags |= XFS_MOUNT_SWALLOC;
+
+	if (ap->flags & XFSMNT_OSYNCISOSYNC)
+		mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
+
+	if (ap->flags & XFSMNT_32BITINODES)
+		mp->m_flags |= (XFS_MOUNT_32BITINODES | XFS_MOUNT_32BITINOOPT);
+
+	if (ap->flags & XFSMNT_IOSIZE) {
+		if (ap->iosizelog > XFS_MAX_IO_LOG ||
+		    ap->iosizelog < XFS_MIN_IO_LOG) {
+			cmn_err(CE_WARN,
+		"XFS: invalid log iosize: %d [not %d-%d]",
+				ap->iosizelog, XFS_MIN_IO_LOG,
+				XFS_MAX_IO_LOG);
+			return XFS_ERROR(EINVAL);
+		}
+
+		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
+		mp->m_readio_log = mp->m_writeio_log = ap->iosizelog;
+	}
+
+	if (ap->flags & XFSMNT_IHASHSIZE)
+		mp->m_flags |= XFS_MOUNT_IHASHSIZE;
+
+	if (ap->flags & XFSMNT_IDELETE)
+		mp->m_flags |= XFS_MOUNT_IDELETE;
+
+	if (ap->flags & XFSMNT_DIRSYNC)
+		mp->m_flags |= XFS_MOUNT_DIRSYNC;
+
+	/*
+	 * no recovery flag requires a read-only mount
+	 */
+	if (ap->flags & XFSMNT_NORECOVERY) {
+		if (!(vfs->vfs_flag & VFS_RDONLY)) {
+			cmn_err(CE_WARN,
+	"XFS: tried to mount a FS read-write without recovery!");
+			return XFS_ERROR(EINVAL);
+		}
+		mp->m_flags |= XFS_MOUNT_NORECOVERY;
+	}
+
+	if (ap->flags & XFSMNT_NOUUID)
+		mp->m_flags |= XFS_MOUNT_NOUUID;
+	if (ap->flags & XFSMNT_NOLOGFLUSH)
+		mp->m_flags |= XFS_MOUNT_NOLOGFLUSH;
+
+	return 0;
+}
+
+/*
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock _has_ now been read in.
+ */
+STATIC int
+xfs_finish_flags(
+	struct vfs		*vfs,
+	struct xfs_mount_args	*ap,
+	struct xfs_mount	*mp)
+{
+	int			ronly = (vfs->vfs_flag & VFS_RDONLY);
+
+	/* Fail a mount where the logbuf is smaller then the log stripe */
+	if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
+		if ((ap->logbufsize == -1) &&
+		    (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
+			mp->m_logbsize = mp->m_sb.sb_logsunit;
+		} else if (ap->logbufsize < mp->m_sb.sb_logsunit) {
+			cmn_err(CE_WARN,
+	"XFS: logbuf size must be greater than or equal to log stripe size");
+			return XFS_ERROR(EINVAL);
+		}
+	} else {
+		/* Fail a mount if the logbuf is larger than 32K */
+		if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) {
+			cmn_err(CE_WARN,
+	"XFS: logbuf size for version 1 logs must be 16K or 32K");
+			return XFS_ERROR(EINVAL);
+		}
+	}
+
+	/*
+	 * prohibit r/w mounts of read-only filesystems
+	 */
+	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
+		cmn_err(CE_WARN,
+	"XFS: cannot mount a read-only filesystem as read-write");
+		return XFS_ERROR(EROFS);
+	}
+
+	/*
+	 * disallow mount attempts with (IRIX) project quota enabled
+	 */
+	if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
+	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT)) {
+		cmn_err(CE_WARN,
+	"XFS: cannot mount a filesystem with IRIX project quota enabled");
+		return XFS_ERROR(ENOSYS);
+	}
+
+	/*
+	 * check for shared mount.
+	 */
+	if (ap->flags & XFSMNT_SHARED) {
+		if (!XFS_SB_VERSION_HASSHARED(&mp->m_sb))
+			return XFS_ERROR(EINVAL);
+
+		/*
+		 * For IRIX 6.5, shared mounts must have the shared
+		 * version bit set, have the persistent readonly
+		 * field set, must be version 0 and can only be mounted
+		 * read-only.
+		 */
+		if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) ||
+		     (mp->m_sb.sb_shared_vn != 0))
+			return XFS_ERROR(EINVAL);
+
+		mp->m_flags |= XFS_MOUNT_SHARED;
+
+		/*
+		 * Shared XFS V0 can't deal with DMI.  Return EINVAL.
+		 */
+		if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI))
+			return XFS_ERROR(EINVAL);
+	}
+
+	return 0;
+}
+
+/*
+ * xfs_mount
+ *
+ * The file system configurations are:
+ *	(1) device (partition) with data and internal log
+ *	(2) logical volume with data and log subvolumes.
+ *	(3) logical volume with data, log, and realtime subvolumes.
+ *
+ * We only have to handle opening the log and realtime volumes here if
+ * they are present.  The data subvolume has already been opened by
+ * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev.
+ */
+STATIC int
+xfs_mount(
+	struct bhv_desc		*bhvp,
+	struct xfs_mount_args	*args,
+	cred_t			*credp)
+{
+	struct vfs		*vfsp = bhvtovfs(bhvp);
+	struct bhv_desc		*p;
+	struct xfs_mount	*mp = XFS_BHVTOM(bhvp);
+	struct block_device	*ddev, *logdev, *rtdev;
+	int			flags = 0, error;
+
+	ddev = vfsp->vfs_super->s_bdev;
+	logdev = rtdev = NULL;
+
+	/*
+	 * Setup xfs_mount function vectors from available behaviors
+	 */
+	p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM);
+	mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_stub;
+	p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM);
+	mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_stub;
+	p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO);
+	mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs;
+
+	/*
+	 * Open real time and log devices - order is important.
+	 */
+	if (args->logname[0]) {
+		error = xfs_blkdev_get(mp, args->logname, &logdev);
+		if (error)
+			return error;
+	}
+	if (args->rtname[0]) {
+		error = xfs_blkdev_get(mp, args->rtname, &rtdev);
+		if (error) {
+			xfs_blkdev_put(logdev);
+			return error;
+		}
+
+		if (rtdev == ddev || rtdev == logdev) {
+			cmn_err(CE_WARN,
+	"XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
+			xfs_blkdev_put(logdev);
+			xfs_blkdev_put(rtdev);
+			return EINVAL;
+		}
+	}
+
+	/*
+	 * Setup xfs_mount buffer target pointers
+	 */
+	error = ENOMEM;
+	mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
+	if (!mp->m_ddev_targp) {
+		xfs_blkdev_put(logdev);
+		xfs_blkdev_put(rtdev);
+		return error;
+	}
+	if (rtdev) {
+		mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
+		if (!mp->m_rtdev_targp)
+			goto error0;
+	}
+	mp->m_logdev_targp = (logdev && logdev != ddev) ?
+				xfs_alloc_buftarg(logdev, 1) : mp->m_ddev_targp;
+	if (!mp->m_logdev_targp)
+		goto error0;
+
+	/*
+	 * Setup flags based on mount(2) options and then the superblock
+	 */
+	error = xfs_start_flags(vfsp, args, mp);
+	if (error)
+		goto error1;
+	error = xfs_readsb(mp);
+	if (error)
+		goto error1;
+	error = xfs_finish_flags(vfsp, args, mp);
+	if (error)
+		goto error2;
+
+	/*
+	 * Setup xfs_mount buffer target pointers based on superblock
+	 */
+	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
+				    mp->m_sb.sb_sectsize);
+	if (!error && logdev && logdev != ddev) {
+		unsigned int	log_sector_size = BBSIZE;
+
+		if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb))
+			log_sector_size = mp->m_sb.sb_logsectsize;
+		error = xfs_setsize_buftarg(mp->m_logdev_targp,
+					    mp->m_sb.sb_blocksize,
+					    log_sector_size);
+	}
+	if (!error && rtdev)
+		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
+					    mp->m_sb.sb_blocksize,
+					    mp->m_sb.sb_sectsize);
+	if (error)
+		goto error2;
+
+	error = XFS_IOINIT(vfsp, args, flags);
+	if (!error)
+		return 0;
+error2:
+	if (mp->m_sb_bp)
+		xfs_freesb(mp);
+error1:
+	xfs_binval(mp->m_ddev_targp);
+	if (logdev && logdev != ddev)
+		xfs_binval(mp->m_logdev_targp);
+	if (rtdev)
+		xfs_binval(mp->m_rtdev_targp);
+error0:
+	xfs_unmountfs_close(mp, credp);
+	return error;
+}
+
+STATIC int
+xfs_unmount(
+	bhv_desc_t	*bdp,
+	int		flags,
+	cred_t		*credp)
+{
+	struct vfs	*vfsp = bhvtovfs(bdp);
+	xfs_mount_t	*mp = XFS_BHVTOM(bdp);
+	xfs_inode_t	*rip;
+	vnode_t		*rvp;
+	int		unmount_event_wanted = 0;
+	int		unmount_event_flags = 0;
+	int		xfs_unmountfs_needed = 0;
+	int		error;
+
+	rip = mp->m_rootip;
+	rvp = XFS_ITOV(rip);
+
+	if (vfsp->vfs_flag & VFS_DMI) {
+		error = XFS_SEND_PREUNMOUNT(mp, vfsp,
+				rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL,
+				NULL, NULL, 0, 0,
+				(mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
+					0:DM_FLAGS_UNWANTED);
+			if (error)
+				return XFS_ERROR(error);
+		unmount_event_wanted = 1;
+		unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))?
+					0 : DM_FLAGS_UNWANTED;
+	}
+
+	/*
+	 * First blow any referenced inode from this file system
+	 * out of the reference cache, and delete the timer.
+	 */
+	xfs_refcache_purge_mp(mp);
+
+	XFS_bflush(mp->m_ddev_targp);
+	error = xfs_unmount_flush(mp, 0);
+	if (error)
+		goto out;
+
+	ASSERT(vn_count(rvp) == 1);
+
+	/*
+	 * Drop the reference count
+	 */
+	VN_RELE(rvp);
+
+	/*
+	 * If we're forcing a shutdown, typically because of a media error,
+	 * we want to make sure we invalidate dirty pages that belong to
+	 * referenced vnodes as well.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		error = xfs_sync(&mp->m_bhv,
+			 (SYNC_WAIT | SYNC_CLOSE), credp);
+		ASSERT(error != EFSCORRUPTED);
+	}
+	xfs_unmountfs_needed = 1;
+
+out:
+	/*	Send DMAPI event, if required.
+	 *	Then do xfs_unmountfs() if needed.
+	 *	Then return error (or zero).
+	 */
+	if (unmount_event_wanted) {
+		/* Note: mp structure must still exist for
+		 * XFS_SEND_UNMOUNT() call.
+		 */
+		XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL,
+			DM_RIGHT_NULL, 0, error, unmount_event_flags);
+	}
+	if (xfs_unmountfs_needed) {
+		/*
+		 * Call common unmount function to flush to disk
+		 * and free the super block buffer & mount structures.
+		 */
+		xfs_unmountfs(mp, credp);
+	}
+
+	return XFS_ERROR(error);
+}
+
+#define REMOUNT_READONLY_FLAGS	(SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
+
+STATIC int
+xfs_mntupdate(
+	bhv_desc_t			*bdp,
+	int				*flags,
+	struct xfs_mount_args		*args)
+{
+	struct vfs	*vfsp = bhvtovfs(bdp);
+	xfs_mount_t	*mp = XFS_BHVTOM(bdp);
+	int		pincount, error;
+	int		count = 0;
+
+	if (args->flags & XFSMNT_NOATIME)
+		mp->m_flags |= XFS_MOUNT_NOATIME;
+	else
+		mp->m_flags &= ~XFS_MOUNT_NOATIME;
+
+	if (!(vfsp->vfs_flag & VFS_RDONLY)) {
+		VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error);
+	}
+
+	if (*flags & MS_RDONLY) {
+		xfs_refcache_purge_mp(mp);
+		xfs_flush_buftarg(mp->m_ddev_targp, 0);
+		xfs_finish_reclaim_all(mp, 0);
+
+		/* This loop must run at least twice.
+		 * The first instance of the loop will flush
+		 * most meta data but that will generate more
+		 * meta data (typically directory updates).
+		 * Which then must be flushed and logged before
+		 * we can write the unmount record.
+		 */ 
+		do {
+			VFS_SYNC(vfsp, REMOUNT_READONLY_FLAGS, NULL, error);
+			pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
+			if (!pincount) {
+				delay(50);
+				count++;
+			}
+		} while (count < 2);
+
+		/* Ok now write out an unmount record */
+		xfs_log_unmount_write(mp);
+		xfs_unmountfs_writesb(mp);
+		vfsp->vfs_flag |= VFS_RDONLY;
+	} else {
+		vfsp->vfs_flag &= ~VFS_RDONLY;
+	}
+
+	return 0;
+}
+
+/*
+ * xfs_unmount_flush implements a set of flush operation on special
+ * inodes, which are needed as a separate set of operations so that
+ * they can be called as part of relocation process.
+ */
+int
+xfs_unmount_flush(
+	xfs_mount_t	*mp,		/* Mount structure we are getting
+					   rid of. */
+	int             relocation)	/* Called from vfs relocation. */
+{
+	xfs_inode_t	*rip = mp->m_rootip;
+	xfs_inode_t	*rbmip;
+	xfs_inode_t	*rsumip = NULL;
+	vnode_t		*rvp = XFS_ITOV(rip);
+	int		error;
+
+	xfs_ilock(rip, XFS_ILOCK_EXCL);
+	xfs_iflock(rip);
+
+	/*
+	 * Flush out the real time inodes.
+	 */
+	if ((rbmip = mp->m_rbmip) != NULL) {
+		xfs_ilock(rbmip, XFS_ILOCK_EXCL);
+		xfs_iflock(rbmip);
+		error = xfs_iflush(rbmip, XFS_IFLUSH_SYNC);
+		xfs_iunlock(rbmip, XFS_ILOCK_EXCL);
+
+		if (error == EFSCORRUPTED)
+			goto fscorrupt_out;
+
+		ASSERT(vn_count(XFS_ITOV(rbmip)) == 1);
+
+		rsumip = mp->m_rsumip;
+		xfs_ilock(rsumip, XFS_ILOCK_EXCL);
+		xfs_iflock(rsumip);
+		error = xfs_iflush(rsumip, XFS_IFLUSH_SYNC);
+		xfs_iunlock(rsumip, XFS_ILOCK_EXCL);
+
+		if (error == EFSCORRUPTED)
+			goto fscorrupt_out;
+
+		ASSERT(vn_count(XFS_ITOV(rsumip)) == 1);
+	}
+
+	/*
+	 * Synchronously flush root inode to disk
+	 */
+	error = xfs_iflush(rip, XFS_IFLUSH_SYNC);
+	if (error == EFSCORRUPTED)
+		goto fscorrupt_out2;
+
+	if (vn_count(rvp) != 1 && !relocation) {
+		xfs_iunlock(rip, XFS_ILOCK_EXCL);
+		return XFS_ERROR(EBUSY);
+	}
+
+	/*
+	 * Release dquot that rootinode, rbmino and rsumino might be holding,
+	 * flush and purge the quota inodes.
+	 */
+	error = XFS_QM_UNMOUNT(mp);
+	if (error == EFSCORRUPTED)
+		goto fscorrupt_out2;
+
+	if (rbmip) {
+		VN_RELE(XFS_ITOV(rbmip));
+		VN_RELE(XFS_ITOV(rsumip));
+	}
+
+	xfs_iunlock(rip, XFS_ILOCK_EXCL);
+	return 0;
+
+fscorrupt_out:
+	xfs_ifunlock(rip);
+
+fscorrupt_out2:
+	xfs_iunlock(rip, XFS_ILOCK_EXCL);
+
+	return XFS_ERROR(EFSCORRUPTED);
+}
+
+/*
+ * xfs_root extracts the root vnode from a vfs.
+ *
+ * vfsp -- the vfs struct for the desired file system
+ * vpp  -- address of the caller's vnode pointer which should be
+ *         set to the desired fs root vnode
+ */
+STATIC int
+xfs_root(
+	bhv_desc_t	*bdp,
+	vnode_t		**vpp)
+{
+	vnode_t		*vp;
+
+	vp = XFS_ITOV((XFS_BHVTOM(bdp))->m_rootip);
+	VN_HOLD(vp);
+	*vpp = vp;
+	return 0;
+}
+
+/*
+ * xfs_statvfs
+ *
+ * Fill in the statvfs structure for the given file system.  We use
+ * the superblock lock in the mount structure to ensure a consistent
+ * snapshot of the counters returned.
+ */
+STATIC int
+xfs_statvfs(
+	bhv_desc_t	*bdp,
+	xfs_statfs_t	*statp,
+	vnode_t		*vp)
+{
+	__uint64_t	fakeinos;
+	xfs_extlen_t	lsize;
+	xfs_mount_t	*mp;
+	xfs_sb_t	*sbp;
+	unsigned long	s;
+	u64 id;
+
+	mp = XFS_BHVTOM(bdp);
+	sbp = &(mp->m_sb);
+
+	statp->f_type = XFS_SB_MAGIC;
+
+	s = XFS_SB_LOCK(mp);
+	statp->f_bsize = sbp->sb_blocksize;
+	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
+	statp->f_blocks = sbp->sb_dblocks - lsize;
+	statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks;
+	fakeinos = statp->f_bfree << sbp->sb_inopblog;
+#if XFS_BIG_INUMS
+	fakeinos += mp->m_inoadd;
+#endif
+	statp->f_files =
+	    MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
+	if (mp->m_maxicount)
+#if XFS_BIG_INUMS
+		if (!mp->m_inoadd)
+#endif
+			statp->f_files = min_t(typeof(statp->f_files),
+						statp->f_files,
+						mp->m_maxicount);
+	statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+	XFS_SB_UNLOCK(mp, s);
+
+	id = huge_encode_dev(mp->m_dev);
+	statp->f_fsid.val[0] = (u32)id;
+	statp->f_fsid.val[1] = (u32)(id >> 32);
+	statp->f_namelen = MAXNAMELEN - 1;
+
+	return 0;
+}
+
+
+/*
+ * xfs_sync flushes any pending I/O to file system vfsp.
+ *
+ * This routine is called by vfs_sync() to make sure that things make it
+ * out to disk eventually, on sync() system calls to flush out everything,
+ * and when the file system is unmounted.  For the vfs_sync() case, all
+ * we really need to do is sync out the log to make all of our meta-data
+ * updates permanent (except for timestamps).  For calls from pflushd(),
+ * dirty pages are kept moving by calling pdflush() on the inodes
+ * containing them.  We also flush the inodes that we can lock without
+ * sleeping and the superblock if we can lock it without sleeping from
+ * vfs_sync() so that items at the tail of the log are always moving out.
+ *
+ * Flags:
+ *      SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
+ *		       to sleep if we can help it.  All we really need
+ *		       to do is ensure that the log is synced at least
+ *		       periodically.  We also push the inodes and
+ *		       superblock if we can lock them without sleeping
+ *			and they are not pinned.
+ *      SYNC_ATTR    - We need to flush the inodes.  If SYNC_BDFLUSH is not
+ *		       set, then we really want to lock each inode and flush
+ *		       it.
+ *      SYNC_WAIT    - All the flushes that take place in this call should
+ *		       be synchronous.
+ *      SYNC_DELWRI  - This tells us to push dirty pages associated with
+ *		       inodes.  SYNC_WAIT and SYNC_BDFLUSH are used to
+ *		       determine if they should be flushed sync, async, or
+ *		       delwri.
+ *      SYNC_CLOSE   - This flag is passed when the system is being
+ *		       unmounted.  We should sync and invalidate everthing.
+ *      SYNC_FSDATA  - This indicates that the caller would like to make
+ *		       sure the superblock is safe on disk.  We can ensure
+ *		       this by simply makeing sure the log gets flushed
+ *		       if SYNC_BDFLUSH is set, and by actually writing it
+ *		       out otherwise.
+ *
+ */
+/*ARGSUSED*/
+STATIC int
+xfs_sync(
+	bhv_desc_t	*bdp,
+	int		flags,
+	cred_t		*credp)
+{
+	xfs_mount_t	*mp;
+
+	mp = XFS_BHVTOM(bdp);
+	return (xfs_syncsub(mp, flags, 0, NULL));
+}
+
+/*
+ * xfs sync routine for internal use
+ *
+ * This routine supports all of the flags defined for the generic VFS_SYNC
+ * interface as explained above under xfs_sync.  In the interests of not
+ * changing interfaces within the 6.5 family, additional internallly-
+ * required functions are specified within a separate xflags parameter,
+ * only available by calling this routine.
+ *
+ */
+STATIC int
+xfs_sync_inodes(
+	xfs_mount_t	*mp,
+	int		flags,
+	int             xflags,
+	int             *bypassed)
+{
+	xfs_inode_t	*ip = NULL;
+	xfs_inode_t	*ip_next;
+	xfs_buf_t	*bp;
+	vnode_t		*vp = NULL;
+	vmap_t		vmap;
+	int		error;
+	int		last_error;
+	uint64_t	fflag;
+	uint		lock_flags;
+	uint		base_lock_flags;
+	boolean_t	mount_locked;
+	boolean_t	vnode_refed;
+	int		preempt;
+	xfs_dinode_t	*dip;
+	xfs_iptr_t	*ipointer;
+#ifdef DEBUG
+	boolean_t	ipointer_in = B_FALSE;
+
+#define IPOINTER_SET	ipointer_in = B_TRUE
+#define IPOINTER_CLR	ipointer_in = B_FALSE
+#else
+#define IPOINTER_SET
+#define IPOINTER_CLR
+#endif
+
+
+/* Insert a marker record into the inode list after inode ip. The list
+ * must be locked when this is called. After the call the list will no
+ * longer be locked.
+ */
+#define IPOINTER_INSERT(ip, mp)	{ \
+		ASSERT(ipointer_in == B_FALSE); \
+		ipointer->ip_mnext = ip->i_mnext; \
+		ipointer->ip_mprev = ip; \
+		ip->i_mnext = (xfs_inode_t *)ipointer; \
+		ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \
+		preempt = 0; \
+		XFS_MOUNT_IUNLOCK(mp); \
+		mount_locked = B_FALSE; \
+		IPOINTER_SET; \
+	}
+
+/* Remove the marker from the inode list. If the marker was the only item
+ * in the list then there are no remaining inodes and we should zero out
+ * the whole list. If we are the current head of the list then move the head
+ * past us.
+ */
+#define IPOINTER_REMOVE(ip, mp)	{ \
+		ASSERT(ipointer_in == B_TRUE); \
+		if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \
+			ip = ipointer->ip_mnext; \
+			ip->i_mprev = ipointer->ip_mprev; \
+			ipointer->ip_mprev->i_mnext = ip; \
+			if (mp->m_inodes == (xfs_inode_t *)ipointer) { \
+				mp->m_inodes = ip; \
+			} \
+		} else { \
+			ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \
+			mp->m_inodes = NULL; \
+			ip = NULL; \
+		} \
+		IPOINTER_CLR; \
+	}
+
+#define XFS_PREEMPT_MASK	0x7f
+
+	if (bypassed)
+		*bypassed = 0;
+	if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
+		return 0;
+	error = 0;
+	last_error = 0;
+	preempt = 0;
+
+	/* Allocate a reference marker */
+	ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP);
+
+	fflag = XFS_B_ASYNC;		/* default is don't wait */
+	if (flags & SYNC_BDFLUSH)
+		fflag = XFS_B_DELWRI;
+	if (flags & SYNC_WAIT)
+		fflag = 0;		/* synchronous overrides all */
+
+	base_lock_flags = XFS_ILOCK_SHARED;
+	if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
+		/*
+		 * We need the I/O lock if we're going to call any of
+		 * the flush/inval routines.
+		 */
+		base_lock_flags |= XFS_IOLOCK_SHARED;
+	}
+
+	XFS_MOUNT_ILOCK(mp);
+
+	ip = mp->m_inodes;
+
+	mount_locked = B_TRUE;
+	vnode_refed  = B_FALSE;
+
+	IPOINTER_CLR;
+
+	do {
+		ASSERT(ipointer_in == B_FALSE);
+		ASSERT(vnode_refed == B_FALSE);
+
+		lock_flags = base_lock_flags;
+
+		/*
+		 * There were no inodes in the list, just break out
+		 * of the loop.
+		 */
+		if (ip == NULL) {
+			break;
+		}
+
+		/*
+		 * We found another sync thread marker - skip it
+		 */
+		if (ip->i_mount == NULL) {
+			ip = ip->i_mnext;
+			continue;
+		}
+
+		vp = XFS_ITOV_NULL(ip);
+
+		/*
+		 * If the vnode is gone then this is being torn down,
+		 * call reclaim if it is flushed, else let regular flush
+		 * code deal with it later in the loop.
+		 */
+
+		if (vp == NULL) {
+			/* Skip ones already in reclaim */
+			if (ip->i_flags & XFS_IRECLAIM) {
+				ip = ip->i_mnext;
+				continue;
+			}
+			if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
+				ip = ip->i_mnext;
+			} else if ((xfs_ipincount(ip) == 0) &&
+				    xfs_iflock_nowait(ip)) {
+				IPOINTER_INSERT(ip, mp);
+
+				xfs_finish_reclaim(ip, 1,
+						XFS_IFLUSH_DELWRI_ELSE_ASYNC);
+
+				XFS_MOUNT_ILOCK(mp);
+				mount_locked = B_TRUE;
+				IPOINTER_REMOVE(ip, mp);
+			} else {
+				xfs_iunlock(ip, XFS_ILOCK_EXCL);
+				ip = ip->i_mnext;
+			}
+			continue;
+		}
+
+		if (VN_BAD(vp)) {
+			ip = ip->i_mnext;
+			continue;
+		}
+
+		if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
+			XFS_MOUNT_IUNLOCK(mp);
+			kmem_free(ipointer, sizeof(xfs_iptr_t));
+			return 0;
+		}
+
+		/*
+		 * If this is just vfs_sync() or pflushd() calling
+		 * then we can skip inodes for which it looks like
+		 * there is nothing to do.  Since we don't have the
+		 * inode locked this is racey, but these are periodic
+		 * calls so it doesn't matter.  For the others we want
+		 * to know for sure, so we at least try to lock them.
+		 */
+		if (flags & SYNC_BDFLUSH) {
+			if (((ip->i_itemp == NULL) ||
+			     !(ip->i_itemp->ili_format.ilf_fields &
+			       XFS_ILOG_ALL)) &&
+			    (ip->i_update_core == 0)) {
+				ip = ip->i_mnext;
+				continue;
+			}
+		}
+
+		/*
+		 * Try to lock without sleeping.  We're out of order with
+		 * the inode list lock here, so if we fail we need to drop
+		 * the mount lock and try again.  If we're called from
+		 * bdflush() here, then don't bother.
+		 *
+		 * The inode lock here actually coordinates with the
+		 * almost spurious inode lock in xfs_ireclaim() to prevent
+		 * the vnode we handle here without a reference from
+		 * being freed while we reference it.  If we lock the inode
+		 * while it's on the mount list here, then the spurious inode
+		 * lock in xfs_ireclaim() after the inode is pulled from
+		 * the mount list will sleep until we release it here.
+		 * This keeps the vnode from being freed while we reference
+		 * it.  It is also cheaper and simpler than actually doing
+		 * a vn_get() for every inode we touch here.
+		 */
+		if (xfs_ilock_nowait(ip, lock_flags) == 0) {
+
+			if ((flags & SYNC_BDFLUSH) || (vp == NULL)) {
+				ip = ip->i_mnext;
+				continue;
+			}
+
+			/*
+			 * We need to unlock the inode list lock in order
+			 * to lock the inode. Insert a marker record into
+			 * the inode list to remember our position, dropping
+			 * the lock is now done inside the IPOINTER_INSERT
+			 * macro.
+			 *
+			 * We also use the inode list lock to protect us
+			 * in taking a snapshot of the vnode version number
+			 * for use in calling vn_get().
+			 */
+			VMAP(vp, vmap);
+			IPOINTER_INSERT(ip, mp);
+
+			vp = vn_get(vp, &vmap);
+			if (vp == NULL) {
+				/*
+				 * The vnode was reclaimed once we let go
+				 * of the inode list lock.  Skip to the
+				 * next list entry. Remove the marker.
+				 */
+
+				XFS_MOUNT_ILOCK(mp);
+
+				mount_locked = B_TRUE;
+				vnode_refed  = B_FALSE;
+
+				IPOINTER_REMOVE(ip, mp);
+
+				continue;
+			}
+
+			xfs_ilock(ip, lock_flags);
+
+			ASSERT(vp == XFS_ITOV(ip));
+			ASSERT(ip->i_mount == mp);
+
+			vnode_refed = B_TRUE;
+		}
+
+		/* From here on in the loop we may have a marker record
+		 * in the inode list.
+		 */
+
+		if ((flags & SYNC_CLOSE)  && (vp != NULL)) {
+			/*
+			 * This is the shutdown case.  We just need to
+			 * flush and invalidate all the pages associated
+			 * with the inode.  Drop the inode lock since
+			 * we can't hold it across calls to the buffer
+			 * cache.
+			 *
+			 * We don't set the VREMAPPING bit in the vnode
+			 * here, because we don't hold the vnode lock
+			 * exclusively.  It doesn't really matter, though,
+			 * because we only come here when we're shutting
+			 * down anyway.
+			 */
+			xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+			if (XFS_FORCED_SHUTDOWN(mp)) {
+				VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);
+			} else {
+				VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF);
+			}
+
+			xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+		} else if ((flags & SYNC_DELWRI) && (vp != NULL)) {
+			if (VN_DIRTY(vp)) {
+				/* We need to have dropped the lock here,
+				 * so insert a marker if we have not already
+				 * done so.
+				 */
+				if (mount_locked) {
+					IPOINTER_INSERT(ip, mp);
+				}
+
+				/*
+				 * Drop the inode lock since we can't hold it
+				 * across calls to the buffer cache.
+				 */
+				xfs_iunlock(ip, XFS_ILOCK_SHARED);
+				VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1,
+							fflag, FI_NONE, error);
+				xfs_ilock(ip, XFS_ILOCK_SHARED);
+			}
+
+		}
+
+		if (flags & SYNC_BDFLUSH) {
+			if ((flags & SYNC_ATTR) &&
+			    ((ip->i_update_core) ||
+			     ((ip->i_itemp != NULL) &&
+			      (ip->i_itemp->ili_format.ilf_fields != 0)))) {
+
+				/* Insert marker and drop lock if not already
+				 * done.
+				 */
+				if (mount_locked) {
+					IPOINTER_INSERT(ip, mp);
+				}
+
+				/*
+				 * We don't want the periodic flushing of the
+				 * inodes by vfs_sync() to interfere with
+				 * I/O to the file, especially read I/O
+				 * where it is only the access time stamp
+				 * that is being flushed out.  To prevent
+				 * long periods where we have both inode
+				 * locks held shared here while reading the
+				 * inode's buffer in from disk, we drop the
+				 * inode lock while reading in the inode
+				 * buffer.  We have to release the buffer
+				 * and reacquire the inode lock so that they
+				 * are acquired in the proper order (inode
+				 * locks first).  The buffer will go at the
+				 * end of the lru chain, though, so we can
+				 * expect it to still be there when we go
+				 * for it again in xfs_iflush().
+				 */
+				if ((xfs_ipincount(ip) == 0) &&
+				    xfs_iflock_nowait(ip)) {
+
+					xfs_ifunlock(ip);
+					xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+					error = xfs_itobp(mp, NULL, ip,
+							  &dip, &bp, 0);
+					if (!error) {
+						xfs_buf_relse(bp);
+					} else {
+						/* Bailing out, remove the
+						 * marker and free it.
+						 */
+						XFS_MOUNT_ILOCK(mp);
+
+						IPOINTER_REMOVE(ip, mp);
+
+						XFS_MOUNT_IUNLOCK(mp);
+
+						ASSERT(!(lock_flags &
+							XFS_IOLOCK_SHARED));
+
+						kmem_free(ipointer,
+							sizeof(xfs_iptr_t));
+						return (0);
+					}
+
+					/*
+					 * Since we dropped the inode lock,
+					 * the inode may have been reclaimed.
+					 * Therefore, we reacquire the mount
+					 * lock and check to see if we were the
+					 * inode reclaimed. If this happened
+					 * then the ipointer marker will no
+					 * longer point back at us. In this
+					 * case, move ip along to the inode
+					 * after the marker, remove the marker
+					 * and continue.
+					 */
+					XFS_MOUNT_ILOCK(mp);
+					mount_locked = B_TRUE;
+
+					if (ip != ipointer->ip_mprev) {
+						IPOINTER_REMOVE(ip, mp);
+
+						ASSERT(!vnode_refed);
+						ASSERT(!(lock_flags &
+							XFS_IOLOCK_SHARED));
+						continue;
+					}
+
+					ASSERT(ip->i_mount == mp);
+
+					if (xfs_ilock_nowait(ip,
+						    XFS_ILOCK_SHARED) == 0) {
+						ASSERT(ip->i_mount == mp);
+						/*
+						 * We failed to reacquire
+						 * the inode lock without
+						 * sleeping, so just skip
+						 * the inode for now.  We
+						 * clear the ILOCK bit from
+						 * the lock_flags so that we
+						 * won't try to drop a lock
+						 * we don't hold below.
+						 */
+						lock_flags &= ~XFS_ILOCK_SHARED;
+						IPOINTER_REMOVE(ip_next, mp);
+					} else if ((xfs_ipincount(ip) == 0) &&
+						   xfs_iflock_nowait(ip)) {
+						ASSERT(ip->i_mount == mp);
+						/*
+						 * Since this is vfs_sync()
+						 * calling we only flush the
+						 * inode out if we can lock
+						 * it without sleeping and
+						 * it is not pinned.  Drop
+						 * the mount lock here so
+						 * that we don't hold it for
+						 * too long. We already have
+						 * a marker in the list here.
+						 */
+						XFS_MOUNT_IUNLOCK(mp);
+						mount_locked = B_FALSE;
+						error = xfs_iflush(ip,
+							   XFS_IFLUSH_DELWRI);
+					} else {
+						ASSERT(ip->i_mount == mp);
+						IPOINTER_REMOVE(ip_next, mp);
+					}
+				}
+
+			}
+
+		} else {
+			if ((flags & SYNC_ATTR) &&
+			    ((ip->i_update_core) ||
+			     ((ip->i_itemp != NULL) &&
+			      (ip->i_itemp->ili_format.ilf_fields != 0)))) {
+				if (mount_locked) {
+					IPOINTER_INSERT(ip, mp);
+				}
+
+				if (flags & SYNC_WAIT) {
+					xfs_iflock(ip);
+					error = xfs_iflush(ip,
+							   XFS_IFLUSH_SYNC);
+				} else {
+					/*
+					 * If we can't acquire the flush
+					 * lock, then the inode is already
+					 * being flushed so don't bother
+					 * waiting.  If we can lock it then
+					 * do a delwri flush so we can
+					 * combine multiple inode flushes
+					 * in each disk write.
+					 */
+					if (xfs_iflock_nowait(ip)) {
+						error = xfs_iflush(ip,
+							   XFS_IFLUSH_DELWRI);
+					}
+					else if (bypassed)
+						(*bypassed)++;
+				}
+			}
+		}
+
+		if (lock_flags != 0) {
+			xfs_iunlock(ip, lock_flags);
+		}
+
+		if (vnode_refed) {
+			/*
+			 * If we had to take a reference on the vnode
+			 * above, then wait until after we've unlocked
+			 * the inode to release the reference.  This is
+			 * because we can be already holding the inode
+			 * lock when VN_RELE() calls xfs_inactive().
+			 *
+			 * Make sure to drop the mount lock before calling
+			 * VN_RELE() so that we don't trip over ourselves if
+			 * we have to go for the mount lock again in the
+			 * inactive code.
+			 */
+			if (mount_locked) {
+				IPOINTER_INSERT(ip, mp);
+			}
+
+			VN_RELE(vp);
+
+			vnode_refed = B_FALSE;
+		}
+
+		if (error) {
+			last_error = error;
+		}
+
+		/*
+		 * bail out if the filesystem is corrupted.
+		 */
+		if (error == EFSCORRUPTED)  {
+			if (!mount_locked) {
+				XFS_MOUNT_ILOCK(mp);
+				IPOINTER_REMOVE(ip, mp);
+			}
+			XFS_MOUNT_IUNLOCK(mp);
+			ASSERT(ipointer_in == B_FALSE);
+			kmem_free(ipointer, sizeof(xfs_iptr_t));
+			return XFS_ERROR(error);
+		}
+
+		/* Let other threads have a chance at the mount lock
+		 * if we have looped many times without dropping the
+		 * lock.
+		 */
+		if ((++preempt & XFS_PREEMPT_MASK) == 0) {
+			if (mount_locked) {
+				IPOINTER_INSERT(ip, mp);
+			}
+		}
+
+		if (mount_locked == B_FALSE) {
+			XFS_MOUNT_ILOCK(mp);
+			mount_locked = B_TRUE;
+			IPOINTER_REMOVE(ip, mp);
+			continue;
+		}
+
+		ASSERT(ipointer_in == B_FALSE);
+		ip = ip->i_mnext;
+
+	} while (ip != mp->m_inodes);
+
+	XFS_MOUNT_IUNLOCK(mp);
+
+	ASSERT(ipointer_in == B_FALSE);
+
+	kmem_free(ipointer, sizeof(xfs_iptr_t));
+	return XFS_ERROR(last_error);
+}
+
+/*
+ * xfs sync routine for internal use
+ *
+ * This routine supports all of the flags defined for the generic VFS_SYNC
+ * interface as explained above under xfs_sync.  In the interests of not
+ * changing interfaces within the 6.5 family, additional internallly-
+ * required functions are specified within a separate xflags parameter,
+ * only available by calling this routine.
+ *
+ */
+int
+xfs_syncsub(
+	xfs_mount_t	*mp,
+	int		flags,
+	int             xflags,
+	int             *bypassed)
+{
+	int		error = 0;
+	int		last_error = 0;
+	uint		log_flags = XFS_LOG_FORCE;
+	xfs_buf_t	*bp;
+	xfs_buf_log_item_t	*bip;
+
+	/*
+	 * Sync out the log.  This ensures that the log is periodically
+	 * flushed even if there is not enough activity to fill it up.
+	 */
+	if (flags & SYNC_WAIT)
+		log_flags |= XFS_LOG_SYNC;
+
+	xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
+
+	if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
+		if (flags & SYNC_BDFLUSH)
+			xfs_finish_reclaim_all(mp, 1);
+		else
+			error = xfs_sync_inodes(mp, flags, xflags, bypassed);
+	}
+
+	/*
+	 * Flushing out dirty data above probably generated more
+	 * log activity, so if this isn't vfs_sync() then flush
+	 * the log again.
+	 */
+	if (flags & SYNC_DELWRI) {
+		xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
+	}
+
+	if (flags & SYNC_FSDATA) {
+		/*
+		 * If this is vfs_sync() then only sync the superblock
+		 * if we can lock it without sleeping and it is not pinned.
+		 */
+		if (flags & SYNC_BDFLUSH) {
+			bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
+			if (bp != NULL) {
+				bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
+				if ((bip != NULL) &&
+				    xfs_buf_item_dirty(bip)) {
+					if (!(XFS_BUF_ISPINNED(bp))) {
+						XFS_BUF_ASYNC(bp);
+						error = xfs_bwrite(mp, bp);
+					} else {
+						xfs_buf_relse(bp);
+					}
+				} else {
+					xfs_buf_relse(bp);
+				}
+			}
+		} else {
+			bp = xfs_getsb(mp, 0);
+			/*
+			 * If the buffer is pinned then push on the log so
+			 * we won't get stuck waiting in the write for
+			 * someone, maybe ourselves, to flush the log.
+			 * Even though we just pushed the log above, we
+			 * did not have the superblock buffer locked at
+			 * that point so it can become pinned in between
+			 * there and here.
+			 */
+			if (XFS_BUF_ISPINNED(bp))
+				xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+			if (flags & SYNC_WAIT)
+				XFS_BUF_UNASYNC(bp);
+			else
+				XFS_BUF_ASYNC(bp);
+			error = xfs_bwrite(mp, bp);
+		}
+		if (error) {
+			last_error = error;
+		}
+	}
+
+	/*
+	 * If this is the periodic sync, then kick some entries out of
+	 * the reference cache.  This ensures that idle entries are
+	 * eventually kicked out of the cache.
+	 */
+	if (flags & SYNC_REFCACHE) {
+		xfs_refcache_purge_some(mp);
+	}
+
+	/*
+	 * Now check to see if the log needs a "dummy" transaction.
+	 */
+
+	if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
+		xfs_trans_t *tp;
+		xfs_inode_t *ip;
+
+		/*
+		 * Put a dummy transaction in the log to tell
+		 * recovery that all others are OK.
+		 */
+		tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+		if ((error = xfs_trans_reserve(tp, 0,
+				XFS_ICHANGE_LOG_RES(mp),
+				0, 0, 0)))  {
+			xfs_trans_cancel(tp, 0);
+			return error;
+		}
+
+		ip = mp->m_rootip;
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+		error = xfs_trans_commit(tp, 0, NULL);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
+	}
+
+	/*
+	 * When shutting down, we need to insure that the AIL is pushed
+	 * to disk or the filesystem can appear corrupt from the PROM.
+	 */
+	if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
+		XFS_bflush(mp->m_ddev_targp);
+		if (mp->m_rtdev_targp) {
+			XFS_bflush(mp->m_rtdev_targp);
+		}
+	}
+
+	return XFS_ERROR(last_error);
+}
+
+/*
+ * xfs_vget - called by DMAPI and NFSD to get vnode from file handle
+ */
+STATIC int
+xfs_vget(
+	bhv_desc_t	*bdp,
+	vnode_t		**vpp,
+	fid_t		*fidp)
+{
+	xfs_mount_t	*mp = XFS_BHVTOM(bdp);
+	xfs_fid_t	*xfid = (struct xfs_fid *)fidp;
+	xfs_inode_t	*ip;
+	int		error;
+	xfs_ino_t	ino;
+	unsigned int	igen;
+
+	/*
+	 * Invalid.  Since handles can be created in user space and passed in
+	 * via gethandle(), this is not cause for a panic.
+	 */
+	if (xfid->xfs_fid_len != sizeof(*xfid) - sizeof(xfid->xfs_fid_len))
+		return XFS_ERROR(EINVAL);
+
+	ino  = xfid->xfs_fid_ino;
+	igen = xfid->xfs_fid_gen;
+
+	/*
+	 * NFS can sometimes send requests for ino 0.  Fail them gracefully.
+	 */
+	if (ino == 0)
+		return XFS_ERROR(ESTALE);
+
+	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
+	if (error) {
+		*vpp = NULL;
+		return error;
+	}
+
+	if (ip == NULL) {
+		*vpp = NULL;
+		return XFS_ERROR(EIO);
+	}
+
+	if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
+		xfs_iput_new(ip, XFS_ILOCK_SHARED);
+		*vpp = NULL;
+		return XFS_ERROR(ENOENT);
+	}
+
+	*vpp = XFS_ITOV(ip);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	return 0;
+}
+
+
+#define MNTOPT_LOGBUFS	"logbufs"	/* number of XFS log buffers */
+#define MNTOPT_LOGBSIZE	"logbsize"	/* size of XFS log buffers */
+#define MNTOPT_LOGDEV	"logdev"	/* log device */
+#define MNTOPT_RTDEV	"rtdev"		/* realtime I/O device */
+#define MNTOPT_BIOSIZE	"biosize"	/* log2 of preferred buffered io size */
+#define MNTOPT_WSYNC	"wsync"		/* safe-mode nfs compatible mount */
+#define MNTOPT_INO64	"ino64"		/* force inodes into 64-bit range */
+#define MNTOPT_NOALIGN	"noalign"	/* turn off stripe alignment */
+#define MNTOPT_SWALLOC	"swalloc"	/* turn on stripe width allocation */
+#define MNTOPT_SUNIT	"sunit"		/* data volume stripe unit */
+#define MNTOPT_SWIDTH	"swidth"	/* data volume stripe width */
+#define MNTOPT_NOUUID	"nouuid"	/* ignore filesystem UUID */
+#define MNTOPT_MTPT	"mtpt"		/* filesystem mount point */
+#define MNTOPT_IHASHSIZE    "ihashsize"    /* size of inode hash table */
+#define MNTOPT_NORECOVERY   "norecovery"   /* don't run XFS recovery */
+#define MNTOPT_NOLOGFLUSH   "nologflush"   /* don't hard flush on log writes */
+#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
+#define MNTOPT_64BITINODE   "inode64"	/* inodes can be allocated anywhere */
+#define MNTOPT_IKEEP	"ikeep"		/* do not free empty inode clusters */
+#define MNTOPT_NOIKEEP	"noikeep"	/* free empty inode clusters */
+
+
+int
+xfs_parseargs(
+	struct bhv_desc		*bhv,
+	char			*options,
+	struct xfs_mount_args	*args,
+	int			update)
+{
+	struct vfs		*vfsp = bhvtovfs(bhv);
+	char			*this_char, *value, *eov;
+	int			dsunit, dswidth, vol_dsunit, vol_dswidth;
+	int			iosize;
+
+#if 0	/* XXX: off by default, until some remaining issues ironed out */
+	args->flags |= XFSMNT_IDELETE; /* default to on */
+#endif
+
+	if (!options)
+		return 0;
+
+	iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0;
+
+	while ((this_char = strsep(&options, ",")) != NULL) {
+		if (!*this_char)
+			continue;
+		if ((value = strchr(this_char, '=')) != NULL)
+			*value++ = 0;
+
+		if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_LOGBUFS);
+				return EINVAL;
+			}
+			args->logbufs = simple_strtoul(value, &eov, 10);
+		} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
+			int	last, in_kilobytes = 0;
+
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_LOGBSIZE);
+				return EINVAL;
+			}
+			last = strlen(value) - 1;
+			if (value[last] == 'K' || value[last] == 'k') {
+				in_kilobytes = 1;
+				value[last] = '\0';
+			}
+			args->logbufsize = simple_strtoul(value, &eov, 10);
+			if (in_kilobytes)
+				args->logbufsize <<= 10;
+		} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_LOGDEV);
+				return EINVAL;
+			}
+			strncpy(args->logname, value, MAXNAMELEN);
+		} else if (!strcmp(this_char, MNTOPT_MTPT)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_MTPT);
+				return EINVAL;
+			}
+			strncpy(args->mtpt, value, MAXNAMELEN);
+		} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_RTDEV);
+				return EINVAL;
+			}
+			strncpy(args->rtname, value, MAXNAMELEN);
+		} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_BIOSIZE); 
+				return EINVAL;
+			}
+			iosize = simple_strtoul(value, &eov, 10);
+			args->flags |= XFSMNT_IOSIZE;
+			args->iosizelog = (uint8_t) iosize;
+		} else if (!strcmp(this_char, MNTOPT_IHASHSIZE)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					this_char); 
+				return EINVAL;
+			}
+			args->flags |= XFSMNT_IHASHSIZE;
+			args->ihashsize = simple_strtoul(value, &eov, 10);
+		} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
+			args->flags |= XFSMNT_WSYNC;
+		} else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
+			args->flags |= XFSMNT_OSYNCISOSYNC;
+		} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
+			args->flags |= XFSMNT_NORECOVERY;
+		} else if (!strcmp(this_char, MNTOPT_INO64)) {
+			args->flags |= XFSMNT_INO64;
+#if !XFS_BIG_INUMS
+			printk("XFS: %s option not allowed on this system\n",
+				MNTOPT_INO64);
+			return EINVAL;
+#endif
+		} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
+			args->flags |= XFSMNT_NOALIGN;
+		} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
+			args->flags |= XFSMNT_SWALLOC;
+		} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_SUNIT);
+				return EINVAL;
+			}
+			dsunit = simple_strtoul(value, &eov, 10);
+		} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
+			if (!value || !*value) {
+				printk("XFS: %s option requires an argument\n",
+					MNTOPT_SWIDTH);
+				return EINVAL;
+			}
+			dswidth = simple_strtoul(value, &eov, 10);
+		} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
+			args->flags &= ~XFSMNT_32BITINODES;
+#if !XFS_BIG_INUMS
+			printk("XFS: %s option not allowed on this system\n",
+				MNTOPT_64BITINODE);
+			return EINVAL;
+#endif
+		} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
+			args->flags |= XFSMNT_NOUUID;
+		} else if (!strcmp(this_char, MNTOPT_NOLOGFLUSH)) {
+			args->flags |= XFSMNT_NOLOGFLUSH;
+		} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
+			args->flags &= ~XFSMNT_IDELETE;
+		} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
+			args->flags |= XFSMNT_IDELETE;
+		} else if (!strcmp(this_char, "osyncisdsync")) {
+			/* no-op, this is now the default */
+printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
+		} else if (!strcmp(this_char, "irixsgid")) {
+printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n");
+		} else {
+			printk("XFS: unknown mount option [%s].\n", this_char);
+			return EINVAL;
+		}
+	}
+
+	if (args->flags & XFSMNT_NORECOVERY) {
+		if ((vfsp->vfs_flag & VFS_RDONLY) == 0) {
+			printk("XFS: no-recovery mounts must be read-only.\n");
+			return EINVAL;
+		}
+	}
+
+	if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) {
+		printk(
+	"XFS: sunit and swidth options incompatible with the noalign option\n");
+		return EINVAL;
+	}
+
+	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
+		printk("XFS: sunit and swidth must be specified together\n");
+		return EINVAL;
+	}
+
+	if (dsunit && (dswidth % dsunit != 0)) {
+		printk(
+	"XFS: stripe width (%d) must be a multiple of the stripe unit (%d)\n",
+			dswidth, dsunit);
+		return EINVAL;
+	}
+
+	if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
+		if (dsunit) {
+			args->sunit = dsunit;
+			args->flags |= XFSMNT_RETERR;
+		} else {
+			args->sunit = vol_dsunit;
+		}
+		dswidth ? (args->swidth = dswidth) :
+			  (args->swidth = vol_dswidth);
+	} else {
+		args->sunit = args->swidth = 0;
+	}
+
+	return 0;
+}
+
+int
+xfs_showargs(
+	struct bhv_desc		*bhv,
+	struct seq_file		*m)
+{
+	static struct proc_xfs_info {
+		int	flag;
+		char	*str;
+	} xfs_info[] = {
+		/* the few simple ones we can get from the mount struct */
+		{ XFS_MOUNT_WSYNC,		"," MNTOPT_WSYNC },
+		{ XFS_MOUNT_INO64,		"," MNTOPT_INO64 },
+		{ XFS_MOUNT_NOALIGN,		"," MNTOPT_NOALIGN },
+		{ XFS_MOUNT_SWALLOC,		"," MNTOPT_SWALLOC },
+		{ XFS_MOUNT_NOUUID,		"," MNTOPT_NOUUID },
+		{ XFS_MOUNT_NORECOVERY,		"," MNTOPT_NORECOVERY },
+		{ XFS_MOUNT_OSYNCISOSYNC,	"," MNTOPT_OSYNCISOSYNC },
+		{ XFS_MOUNT_NOLOGFLUSH,		"," MNTOPT_NOLOGFLUSH },
+		{ XFS_MOUNT_IDELETE,		"," MNTOPT_NOIKEEP },
+		{ 0, NULL }
+	};
+	struct proc_xfs_info	*xfs_infop;
+	struct xfs_mount	*mp = XFS_BHVTOM(bhv);
+
+	for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
+		if (mp->m_flags & xfs_infop->flag)
+			seq_puts(m, xfs_infop->str);
+	}
+
+	if (mp->m_flags & XFS_MOUNT_IHASHSIZE)
+		seq_printf(m, "," MNTOPT_IHASHSIZE "=%d", mp->m_ihsize);
+
+	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
+		seq_printf(m, "," MNTOPT_BIOSIZE "=%d", mp->m_writeio_log);
+
+	if (mp->m_logbufs > 0)
+		seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
+
+	if (mp->m_logbsize > 0)
+		seq_printf(m, "," MNTOPT_LOGBSIZE "=%d", mp->m_logbsize);
+
+	if (mp->m_ddev_targp != mp->m_logdev_targp)
+		seq_printf(m, "," MNTOPT_LOGDEV "=%s",
+				XFS_BUFTARG_NAME(mp->m_logdev_targp));
+
+	if (mp->m_rtdev_targp && mp->m_ddev_targp != mp->m_rtdev_targp)
+		seq_printf(m, "," MNTOPT_RTDEV "=%s",
+				XFS_BUFTARG_NAME(mp->m_rtdev_targp));
+
+	if (mp->m_dalign > 0)
+		seq_printf(m, "," MNTOPT_SUNIT "=%d",
+				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
+
+	if (mp->m_swidth > 0)
+		seq_printf(m, "," MNTOPT_SWIDTH "=%d",
+				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
+
+	if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT))
+		seq_printf(m, "," MNTOPT_64BITINODE);
+	
+	return 0;
+}
+
+STATIC void
+xfs_freeze(
+	bhv_desc_t	*bdp)
+{
+	xfs_mount_t	*mp = XFS_BHVTOM(bdp);
+
+	while (atomic_read(&mp->m_active_trans) > 0)
+		delay(100);
+
+	/* Push the superblock and write an unmount record */
+	xfs_log_unmount_write(mp);
+	xfs_unmountfs_writesb(mp);
+}
+
+
+vfsops_t xfs_vfsops = {
+	BHV_IDENTITY_INIT(VFS_BHV_XFS,VFS_POSITION_XFS),
+	.vfs_parseargs		= xfs_parseargs,
+	.vfs_showargs		= xfs_showargs,
+	.vfs_mount		= xfs_mount,
+	.vfs_unmount		= xfs_unmount,
+	.vfs_mntupdate		= xfs_mntupdate,
+	.vfs_root		= xfs_root,
+	.vfs_statvfs		= xfs_statvfs,
+	.vfs_sync		= xfs_sync,
+	.vfs_vget		= xfs_vget,
+	.vfs_dmapiops		= (vfs_dmapiops_t)fs_nosys,
+	.vfs_quotactl		= (vfs_quotactl_t)fs_nosys,
+	.vfs_init_vnode		= xfs_initialize_vnode,
+	.vfs_force_shutdown	= xfs_do_force_shutdown,
+	.vfs_freeze		= xfs_freeze,
+};
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
new file mode 100644
index 0000000..7009296
--- /dev/null
+++ b/fs/xfs/xfs_vnodeops.c
@@ -0,0 +1,4712 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_macros.h"
+#include "xfs_types.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_itable.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode_item.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr.h"
+#include "xfs_rw.h"
+#include "xfs_refcache.h"
+#include "xfs_error.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
+#include "xfs_utils.h"
+#include "xfs_trans_space.h"
+#include "xfs_dir_leaf.h"
+#include "xfs_mac.h"
+#include "xfs_log_priv.h"
+
+
+/*
+ * The maximum pathlen is 1024 bytes. Since the minimum file system
+ * blocksize is 512 bytes, we can get a max of 2 extents back from
+ * bmapi.
+ */
+#define SYMLINK_MAPS 2
+
+/*
+ * For xfs, we check that the file isn't too big to be opened by this kernel.
+ * No other open action is required for regular files.  Devices are handled
+ * through the specfs file system, pipes through fifofs.  Device and
+ * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively,
+ * when a new vnode is first looked up or created.
+ */
+STATIC int
+xfs_open(
+	bhv_desc_t	*bdp,
+	cred_t		*credp)
+{
+	int		mode;
+	vnode_t		*vp;
+	xfs_inode_t	*ip;
+
+	vp = BHV_TO_VNODE(bdp);
+	ip = XFS_BHVTOI(bdp);
+
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return XFS_ERROR(EIO);
+
+	/*
+	 * If it's a directory with any blocks, read-ahead block 0
+	 * as we're almost certain to have the next operation be a read there.
+	 */
+	if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) {
+		mode = xfs_ilock_map_shared(ip);
+		if (ip->i_d.di_nextents > 0)
+			(void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
+		xfs_iunlock(ip, mode);
+	}
+	return 0;
+}
+
+
+/*
+ * xfs_getattr
+ */
+STATIC int
+xfs_getattr(
+	bhv_desc_t	*bdp,
+	vattr_t		*vap,
+	int		flags,
+	cred_t		*credp)
+{
+	xfs_inode_t	*ip;
+	xfs_mount_t	*mp;
+	vnode_t		*vp;
+
+	vp  = BHV_TO_VNODE(bdp);
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	if (!(flags & ATTR_LAZY))
+		xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+	vap->va_size = ip->i_d.di_size;
+	if (vap->va_mask == XFS_AT_SIZE)
+		goto all_done;
+
+	vap->va_nblocks =
+		XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+	vap->va_nodeid = ip->i_ino;
+#if XFS_BIG_INUMS
+	vap->va_nodeid += mp->m_inoadd;
+#endif
+	vap->va_nlink = ip->i_d.di_nlink;
+
+	/*
+	 * Quick exit for non-stat callers
+	 */
+	if ((vap->va_mask &
+	    ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID|
+	      XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0)
+		goto all_done;
+
+	/*
+	 * Copy from in-core inode.
+	 */
+	vap->va_type = vp->v_type;
+	vap->va_mode = ip->i_d.di_mode & MODEMASK;
+	vap->va_uid = ip->i_d.di_uid;
+	vap->va_gid = ip->i_d.di_gid;
+	vap->va_projid = ip->i_d.di_projid;
+
+	/*
+	 * Check vnode type block/char vs. everything else.
+	 * Do it with bitmask because that's faster than looking
+	 * for multiple values individually.
+	 */
+	if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
+		vap->va_rdev = 0;
+
+		if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
+
+#if 0
+			/* Large block sizes confuse various
+			 * user space programs, so letting the
+			 * stripe size through is not a good
+			 * idea for now.
+			 */
+			vap->va_blocksize = mp->m_swidth ?
+				/*
+				 * If the underlying volume is a stripe, then
+				 * return the stripe width in bytes as the
+				 * recommended I/O size.
+				 */
+				(mp->m_swidth << mp->m_sb.sb_blocklog) :
+				/*
+				 * Return the largest of the preferred buffer
+				 * sizes since doing small I/Os into larger
+				 * buffers causes buffers to be decommissioned.
+				 * The value returned is in bytes.
+				 */
+				(1 << (int)MAX(mp->m_readio_log,
+					       mp->m_writeio_log));
+
+#else
+			vap->va_blocksize =
+				/*
+				 * Return the largest of the preferred buffer
+				 * sizes since doing small I/Os into larger
+				 * buffers causes buffers to be decommissioned.
+				 * The value returned is in bytes.
+				 */
+				1 << (int)MAX(mp->m_readio_log,
+					       mp->m_writeio_log);
+#endif
+		} else {
+
+			/*
+			 * If the file blocks are being allocated from a
+			 * realtime partition, then return the inode's
+			 * realtime extent size or the realtime volume's
+			 * extent size.
+			 */
+			vap->va_blocksize = ip->i_d.di_extsize ?
+				(ip->i_d.di_extsize << mp->m_sb.sb_blocklog) :
+				(mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
+		}
+	} else {
+		vap->va_rdev = ip->i_df.if_u2.if_rdev;
+		vap->va_blocksize = BLKDEV_IOSIZE;
+	}
+
+	vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec;
+	vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
+	vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
+	vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
+	vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
+	vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
+
+	/*
+	 * Exit for stat callers.  See if any of the rest of the fields
+	 * to be filled in are needed.
+	 */
+	if ((vap->va_mask &
+	     (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
+	      XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
+		goto all_done;
+
+	/*
+	 * Convert di_flags to xflags.
+	 */
+	vap->va_xflags = xfs_ip2xflags(ip);
+
+	/*
+	 * Exit for inode revalidate.  See if any of the rest of
+	 * the fields to be filled in are needed.
+	 */
+	if ((vap->va_mask &
+	     (XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
+	      XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
+		goto all_done;
+
+	vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog;
+	vap->va_nextents =
+		(ip->i_df.if_flags & XFS_IFEXTENTS) ?
+			ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) :
+			ip->i_d.di_nextents;
+	if (ip->i_afp)
+		vap->va_anextents =
+			(ip->i_afp->if_flags & XFS_IFEXTENTS) ?
+				ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) :
+				 ip->i_d.di_anextents;
+	else
+		vap->va_anextents = 0;
+	vap->va_gen = ip->i_d.di_gen;
+
+ all_done:
+	if (!(flags & ATTR_LAZY))
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	return 0;
+}
+
+
+/*
+ * xfs_setattr
+ */
+int
+xfs_setattr(
+	bhv_desc_t		*bdp,
+	vattr_t			*vap,
+	int			flags,
+	cred_t			*credp)
+{
+	xfs_inode_t		*ip;
+	xfs_trans_t		*tp;
+	xfs_mount_t		*mp;
+	int			mask;
+	int			code;
+	uint			lock_flags;
+	uint			commit_flags=0;
+	uid_t			uid=0, iuid=0;
+	gid_t			gid=0, igid=0;
+	int			timeflags = 0;
+	vnode_t			*vp;
+	xfs_prid_t		projid=0, iprojid=0;
+	int			mandlock_before, mandlock_after;
+	struct xfs_dquot	*udqp, *gdqp, *olddquot1, *olddquot2;
+	int			file_owner;
+	int			need_iolock = (flags & ATTR_DMI) == 0;
+
+	vp = BHV_TO_VNODE(bdp);
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+		return XFS_ERROR(EROFS);
+
+	/*
+	 * Cannot set certain attributes.
+	 */
+	mask = vap->va_mask;
+	if (mask & XFS_AT_NOSET) {
+		return XFS_ERROR(EINVAL);
+	}
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	/*
+	 * Timestamps do not need to be logged and hence do not
+	 * need to be done within a transaction.
+	 */
+	if (mask & XFS_AT_UPDTIMES) {
+		ASSERT((mask & ~XFS_AT_UPDTIMES) == 0);
+		timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) |
+			    ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) |
+			    ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0);
+		xfs_ichgtime(ip, timeflags);
+		return 0;
+	}
+
+	olddquot1 = olddquot2 = NULL;
+	udqp = gdqp = NULL;
+
+	/*
+	 * If disk quotas is on, we make sure that the dquots do exist on disk,
+	 * before we start any other transactions. Trying to do this later
+	 * is messy. We don't care to take a readlock to look at the ids
+	 * in inode here, because we can't hold it across the trans_reserve.
+	 * If the IDs do change before we take the ilock, we're covered
+	 * because the i_*dquot fields will get updated anyway.
+	 */
+	if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) {
+		uint	qflags = 0;
+
+		if (mask & XFS_AT_UID) {
+			uid = vap->va_uid;
+			qflags |= XFS_QMOPT_UQUOTA;
+		} else {
+			uid = ip->i_d.di_uid;
+		}
+		if (mask & XFS_AT_GID) {
+			gid = vap->va_gid;
+			qflags |= XFS_QMOPT_GQUOTA;
+		}  else {
+			gid = ip->i_d.di_gid;
+		}
+		/*
+		 * We take a reference when we initialize udqp and gdqp,
+		 * so it is important that we never blindly double trip on
+		 * the same variable. See xfs_create() for an example.
+		 */
+		ASSERT(udqp == NULL);
+		ASSERT(gdqp == NULL);
+		code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp);
+		if (code)
+			return (code);
+	}
+
+	/*
+	 * For the other attributes, we acquire the inode lock and
+	 * first do an error checking pass.
+	 */
+	tp = NULL;
+	lock_flags = XFS_ILOCK_EXCL;
+	if (!(mask & XFS_AT_SIZE)) {
+		if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) ||
+		    (mp->m_flags & XFS_MOUNT_WSYNC)) {
+			tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+			commit_flags = 0;
+			if ((code = xfs_trans_reserve(tp, 0,
+						     XFS_ICHANGE_LOG_RES(mp), 0,
+						     0, 0))) {
+				lock_flags = 0;
+				goto error_return;
+			}
+		}
+	} else {
+		if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) &&
+		    !(flags & ATTR_DMI)) {
+			int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
+			code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp,
+				vap->va_size, 0, dmflags, NULL);
+			if (code) {
+				lock_flags = 0;
+				goto error_return;
+			}
+		}
+		if (need_iolock)
+			lock_flags |= XFS_IOLOCK_EXCL;
+	}
+
+	xfs_ilock(ip, lock_flags);
+
+	/* boolean: are we the file owner? */
+	file_owner = (current_fsuid(credp) == ip->i_d.di_uid);
+
+	/*
+	 * Change various properties of a file.
+	 * Only the owner or users with CAP_FOWNER
+	 * capability may do these things.
+	 */
+	if (mask &
+	    (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID|
+	     XFS_AT_GID|XFS_AT_PROJID)) {
+		/*
+		 * CAP_FOWNER overrides the following restrictions:
+		 *
+		 * The user ID of the calling process must be equal
+		 * to the file owner ID, except in cases where the
+		 * CAP_FSETID capability is applicable.
+		 */
+		if (!file_owner && !capable(CAP_FOWNER)) {
+			code = XFS_ERROR(EPERM);
+			goto error_return;
+		}
+
+		/*
+		 * CAP_FSETID overrides the following restrictions:
+		 *
+		 * The effective user ID of the calling process shall match
+		 * the file owner when setting the set-user-ID and
+		 * set-group-ID bits on that file.
+		 *
+		 * The effective group ID or one of the supplementary group
+		 * IDs of the calling process shall match the group owner of
+		 * the file when setting the set-group-ID bit on that file
+		 */
+		if (mask & XFS_AT_MODE) {
+			mode_t m = 0;
+
+			if ((vap->va_mode & S_ISUID) && !file_owner)
+				m |= S_ISUID;
+			if ((vap->va_mode & S_ISGID) &&
+			    !in_group_p((gid_t)ip->i_d.di_gid))
+				m |= S_ISGID;
+#if 0
+			/* Linux allows this, Irix doesn't. */
+			if ((vap->va_mode & S_ISVTX) && vp->v_type != VDIR)
+				m |= S_ISVTX;
+#endif
+			if (m && !capable(CAP_FSETID))
+				vap->va_mode &= ~m;
+		}
+	}
+
+	/*
+	 * Change file ownership.  Must be the owner or privileged.
+	 * If the system was configured with the "restricted_chown"
+	 * option, the owner is not permitted to give away the file,
+	 * and can change the group id only to a group of which he
+	 * or she is a member.
+	 */
+	if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
+		/*
+		 * These IDs could have changed since we last looked at them.
+		 * But, we're assured that if the ownership did change
+		 * while we didn't have the inode locked, inode's dquot(s)
+		 * would have changed also.
+		 */
+		iuid = ip->i_d.di_uid;
+		iprojid = ip->i_d.di_projid;
+		igid = ip->i_d.di_gid;
+		gid = (mask & XFS_AT_GID) ? vap->va_gid : igid;
+		uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid;
+		projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid :
+			 iprojid;
+
+		/*
+		 * CAP_CHOWN overrides the following restrictions:
+		 *
+		 * If _POSIX_CHOWN_RESTRICTED is defined, this capability
+		 * shall override the restriction that a process cannot
+		 * change the user ID of a file it owns and the restriction
+		 * that the group ID supplied to the chown() function
+		 * shall be equal to either the group ID or one of the
+		 * supplementary group IDs of the calling process.
+		 *
+		 * XXX: How does restricted_chown affect projid?
+		 */
+		if (restricted_chown &&
+		    (iuid != uid || (igid != gid &&
+				     !in_group_p((gid_t)gid))) &&
+		    !capable(CAP_CHOWN)) {
+			code = XFS_ERROR(EPERM);
+			goto error_return;
+		}
+		/*
+		 * Do a quota reservation only if uid or gid is actually
+		 * going to change.
+		 */
+		if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
+		    (XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
+			ASSERT(tp);
+			code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
+						capable(CAP_FOWNER) ?
+						XFS_QMOPT_FORCE_RES : 0);
+			if (code)	/* out of quota */
+				goto error_return;
+		}
+	}
+
+	/*
+	 * Truncate file.  Must have write permission and not be a directory.
+	 */
+	if (mask & XFS_AT_SIZE) {
+		/* Short circuit the truncate case for zero length files */
+		if ((vap->va_size == 0) &&
+		   (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) {
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			lock_flags &= ~XFS_ILOCK_EXCL;
+			if (mask & XFS_AT_CTIME)
+				xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+			code = 0;
+			goto error_return;
+		}
+
+		if (vp->v_type == VDIR) {
+			code = XFS_ERROR(EISDIR);
+			goto error_return;
+		} else if (vp->v_type != VREG) {
+			code = XFS_ERROR(EINVAL);
+			goto error_return;
+		}
+		/*
+		 * Make sure that the dquots are attached to the inode.
+		 */
+		if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED)))
+			goto error_return;
+	}
+
+	/*
+	 * Change file access or modified times.
+	 */
+	if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) {
+		if (!file_owner) {
+			if ((flags & ATTR_UTIME) &&
+			    !capable(CAP_FOWNER)) {
+				code = XFS_ERROR(EPERM);
+				goto error_return;
+			}
+		}
+	}
+
+	/*
+	 * Change extent size or realtime flag.
+	 */
+	if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
+		/*
+		 * Can't change extent size if any extents are allocated.
+		 */
+		if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+		    (mask & XFS_AT_EXTSIZE) &&
+		    ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
+		     vap->va_extsize) ) {
+			code = XFS_ERROR(EINVAL);	/* EFBIG? */
+			goto error_return;
+		}
+
+		/*
+		 * Can't set extent size unless the file is marked, or
+		 * about to be marked as a realtime file.
+		 *
+		 * This check will be removed when fixed size extents
+		 * with buffered data writes is implemented.
+		 *
+		 */
+		if ((mask & XFS_AT_EXTSIZE)			&&
+		    ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
+		     vap->va_extsize) &&
+		    (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
+		       ((mask & XFS_AT_XFLAGS) &&
+			(vap->va_xflags & XFS_XFLAG_REALTIME))))) {
+			code = XFS_ERROR(EINVAL);
+			goto error_return;
+		}
+
+		/*
+		 * Can't change realtime flag if any extents are allocated.
+		 */
+		if (ip->i_d.di_nextents && (mask & XFS_AT_XFLAGS) &&
+		    (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) !=
+		    (vap->va_xflags & XFS_XFLAG_REALTIME)) {
+			code = XFS_ERROR(EINVAL);	/* EFBIG? */
+			goto error_return;
+		}
+		/*
+		 * Extent size must be a multiple of the appropriate block
+		 * size, if set at all.
+		 */
+		if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) {
+			xfs_extlen_t	size;
+
+			if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
+			    ((mask & XFS_AT_XFLAGS) &&
+			    (vap->va_xflags & XFS_XFLAG_REALTIME))) {
+				size = mp->m_sb.sb_rextsize <<
+				       mp->m_sb.sb_blocklog;
+			} else {
+				size = mp->m_sb.sb_blocksize;
+			}
+			if (vap->va_extsize % size) {
+				code = XFS_ERROR(EINVAL);
+				goto error_return;
+			}
+		}
+		/*
+		 * If realtime flag is set then must have realtime data.
+		 */
+		if ((mask & XFS_AT_XFLAGS) &&
+		    (vap->va_xflags & XFS_XFLAG_REALTIME)) {
+			if ((mp->m_sb.sb_rblocks == 0) ||
+			    (mp->m_sb.sb_rextsize == 0) ||
+			    (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
+				code = XFS_ERROR(EINVAL);
+				goto error_return;
+			}
+		}
+
+		/*
+		 * Can't modify an immutable/append-only file unless
+		 * we have appropriate permission.
+		 */
+		if ((mask & XFS_AT_XFLAGS) &&
+		    (ip->i_d.di_flags &
+				(XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
+		     (vap->va_xflags &
+				(XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
+		    !capable(CAP_LINUX_IMMUTABLE)) {
+			code = XFS_ERROR(EPERM);
+			goto error_return;
+		}
+	}
+
+	/*
+	 * Now we can make the changes.  Before we join the inode
+	 * to the transaction, if XFS_AT_SIZE is set then take care of
+	 * the part of the truncation that must be done without the
+	 * inode lock.  This needs to be done before joining the inode
+	 * to the transaction, because the inode cannot be unlocked
+	 * once it is a part of the transaction.
+	 */
+	if (mask & XFS_AT_SIZE) {
+		code = 0;
+		if (vap->va_size > ip->i_d.di_size)
+			code = xfs_igrow_start(ip, vap->va_size, credp);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		if (!code)
+			code = xfs_itruncate_data(ip, vap->va_size);
+		if (code) {
+			ASSERT(tp == NULL);
+			lock_flags &= ~XFS_ILOCK_EXCL;
+			ASSERT(lock_flags == XFS_IOLOCK_EXCL);
+			goto error_return;
+		}
+		tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
+		if ((code = xfs_trans_reserve(tp, 0,
+					     XFS_ITRUNCATE_LOG_RES(mp), 0,
+					     XFS_TRANS_PERM_LOG_RES,
+					     XFS_ITRUNCATE_LOG_COUNT))) {
+			xfs_trans_cancel(tp, 0);
+			if (need_iolock)
+				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+			return code;
+		}
+		commit_flags = XFS_TRANS_RELEASE_LOG_RES;
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+	}
+
+	if (tp) {
+		xfs_trans_ijoin(tp, ip, lock_flags);
+		xfs_trans_ihold(tp, ip);
+	}
+
+	/* determine whether mandatory locking mode changes */
+	mandlock_before = MANDLOCK(vp, ip->i_d.di_mode);
+
+	/*
+	 * Truncate file.  Must have write permission and not be a directory.
+	 */
+	if (mask & XFS_AT_SIZE) {
+		if (vap->va_size > ip->i_d.di_size) {
+			xfs_igrow_finish(tp, ip, vap->va_size,
+			    !(flags & ATTR_DMI));
+		} else if ((vap->va_size <= ip->i_d.di_size) ||
+			   ((vap->va_size == 0) && ip->i_d.di_nextents)) {
+			/*
+			 * signal a sync transaction unless
+			 * we're truncating an already unlinked
+			 * file on a wsync filesystem
+			 */
+			code = xfs_itruncate_finish(&tp, ip,
+					    (xfs_fsize_t)vap->va_size,
+					    XFS_DATA_FORK,
+					    ((ip->i_d.di_nlink != 0 ||
+					      !(mp->m_flags & XFS_MOUNT_WSYNC))
+					     ? 1 : 0));
+			if (code) {
+				goto abort_return;
+			}
+		}
+		/*
+		 * Have to do this even if the file's size doesn't change.
+		 */
+		timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+	}
+
+	/*
+	 * Change file access modes.
+	 */
+	if (mask & XFS_AT_MODE) {
+		ip->i_d.di_mode &= S_IFMT;
+		ip->i_d.di_mode |= vap->va_mode & ~S_IFMT;
+
+		xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+		timeflags |= XFS_ICHGTIME_CHG;
+	}
+
+	/*
+	 * Change file ownership.  Must be the owner or privileged.
+	 * If the system was configured with the "restricted_chown"
+	 * option, the owner is not permitted to give away the file,
+	 * and can change the group id only to a group of which he
+	 * or she is a member.
+	 */
+	if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
+		/*
+		 * CAP_FSETID overrides the following restrictions:
+		 *
+		 * The set-user-ID and set-group-ID bits of a file will be
+		 * cleared upon successful return from chown()
+		 */
+		if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+		    !capable(CAP_FSETID)) {
+			ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+		}
+
+		/*
+		 * Change the ownerships and register quota modifications
+		 * in the transaction.
+		 */
+		if (iuid != uid) {
+			if (XFS_IS_UQUOTA_ON(mp)) {
+				ASSERT(mask & XFS_AT_UID);
+				ASSERT(udqp);
+				olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
+							&ip->i_udquot, udqp);
+			}
+			ip->i_d.di_uid = uid;
+		}
+		if (igid != gid) {
+			if (XFS_IS_GQUOTA_ON(mp)) {
+				ASSERT(mask & XFS_AT_GID);
+				ASSERT(gdqp);
+				olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
+							&ip->i_gdquot, gdqp);
+			}
+			ip->i_d.di_gid = gid;
+		}
+		if (iprojid != projid) {
+			ip->i_d.di_projid = projid;
+			/*
+			 * We may have to rev the inode as well as
+			 * the superblock version number since projids didn't
+			 * exist before DINODE_VERSION_2 and SB_VERSION_NLINK.
+			 */
+			if (ip->i_d.di_version == XFS_DINODE_VERSION_1)
+				xfs_bump_ino_vers2(tp, ip);
+		}
+
+		xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+		timeflags |= XFS_ICHGTIME_CHG;
+	}
+
+
+	/*
+	 * Change file access or modified times.
+	 */
+	if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) {
+		if (mask & XFS_AT_ATIME) {
+			ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec;
+			ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec;
+			ip->i_update_core = 1;
+			timeflags &= ~XFS_ICHGTIME_ACC;
+		}
+		if (mask & XFS_AT_MTIME) {
+			ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec;
+			ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec;
+			timeflags &= ~XFS_ICHGTIME_MOD;
+			timeflags |= XFS_ICHGTIME_CHG;
+		}
+		if (tp && (flags & ATTR_UTIME))
+			xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+	}
+
+	/*
+	 * Change XFS-added attributes.
+	 */
+	if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
+		if (mask & XFS_AT_EXTSIZE) {
+			/*
+			 * Converting bytes to fs blocks.
+			 */
+			ip->i_d.di_extsize = vap->va_extsize >>
+				mp->m_sb.sb_blocklog;
+		}
+		if (mask & XFS_AT_XFLAGS) {
+			uint	di_flags;
+
+			/* can't set PREALLOC this way, just preserve it */
+			di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
+			if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
+				di_flags |= XFS_DIFLAG_IMMUTABLE;
+			if (vap->va_xflags & XFS_XFLAG_APPEND)
+				di_flags |= XFS_DIFLAG_APPEND;
+			if (vap->va_xflags & XFS_XFLAG_SYNC)
+				di_flags |= XFS_DIFLAG_SYNC;
+			if (vap->va_xflags & XFS_XFLAG_NOATIME)
+				di_flags |= XFS_DIFLAG_NOATIME;
+			if (vap->va_xflags & XFS_XFLAG_NODUMP)
+				di_flags |= XFS_DIFLAG_NODUMP;
+			if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+				if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
+					di_flags |= XFS_DIFLAG_RTINHERIT;
+				if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
+					di_flags |= XFS_DIFLAG_NOSYMLINKS;
+			} else {
+				if (vap->va_xflags & XFS_XFLAG_REALTIME) {
+					di_flags |= XFS_DIFLAG_REALTIME;
+					ip->i_iocore.io_flags |= XFS_IOCORE_RT;
+				} else {
+					ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
+				}
+			}
+			ip->i_d.di_flags = di_flags;
+		}
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+		timeflags |= XFS_ICHGTIME_CHG;
+	}
+
+	/*
+	 * Change file inode change time only if XFS_AT_CTIME set
+	 * AND we have been called by a DMI function.
+	 */
+
+	if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) {
+		ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec;
+		ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec;
+		ip->i_update_core = 1;
+		timeflags &= ~XFS_ICHGTIME_CHG;
+	}
+
+	/*
+	 * Send out timestamp changes that need to be set to the
+	 * current time.  Not done when called by a DMI function.
+	 */
+	if (timeflags && !(flags & ATTR_DMI))
+		xfs_ichgtime(ip, timeflags);
+
+	XFS_STATS_INC(xs_ig_attrchg);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * transaction goes to disk before returning to the user.
+	 * This is slightly sub-optimal in that truncates require
+	 * two sync transactions instead of one for wsync filesytems.
+	 * One for the truncate and one for the timestamps since we
+	 * don't want to change the timestamps unless we're sure the
+	 * truncate worked.  Truncates are less than 1% of the laddis
+	 * mix so this probably isn't worth the trouble to optimize.
+	 */
+	code = 0;
+	if (tp) {
+		if (mp->m_flags & XFS_MOUNT_WSYNC)
+			xfs_trans_set_sync(tp);
+
+		code = xfs_trans_commit(tp, commit_flags, NULL);
+	}
+
+	/*
+	 * If the (regular) file's mandatory locking mode changed, then
+	 * notify the vnode.  We do this under the inode lock to prevent
+	 * racing calls to vop_vnode_change.
+	 */
+	mandlock_after = MANDLOCK(vp, ip->i_d.di_mode);
+	if (mandlock_before != mandlock_after) {
+		VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING,
+				 mandlock_after);
+	}
+
+	xfs_iunlock(ip, lock_flags);
+
+	/*
+	 * Release any dquot(s) the inode had kept before chown.
+	 */
+	XFS_QM_DQRELE(mp, olddquot1);
+	XFS_QM_DQRELE(mp, olddquot2);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	if (code) {
+		return code;
+	}
+
+	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) &&
+	    !(flags & ATTR_DMI)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, vp, DM_RIGHT_NULL,
+					NULL, DM_RIGHT_NULL, NULL, NULL,
+					0, 0, AT_DELAY_FLAG(flags));
+	}
+	return 0;
+
+ abort_return:
+	commit_flags |= XFS_TRANS_ABORT;
+	/* FALLTHROUGH */
+ error_return:
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+	if (tp) {
+		xfs_trans_cancel(tp, commit_flags);
+	}
+	if (lock_flags != 0) {
+		xfs_iunlock(ip, lock_flags);
+	}
+	return code;
+}
+
+
+/*
+ * xfs_access
+ * Null conversion from vnode mode bits to inode mode bits, as in efs.
+ */
+STATIC int
+xfs_access(
+	bhv_desc_t	*bdp,
+	int		mode,
+	cred_t		*credp)
+{
+	xfs_inode_t	*ip;
+	int		error;
+
+	vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
+					       (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+	error = xfs_iaccess(ip, mode, credp);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	return error;
+}
+
+
+/*
+ * xfs_readlink
+ *
+ */
+STATIC int
+xfs_readlink(
+	bhv_desc_t	*bdp,
+	uio_t		*uiop,
+	int		ioflags,
+	cred_t		*credp)
+{
+	xfs_inode_t     *ip;
+	int		count;
+	xfs_off_t	offset;
+	int		pathlen;
+	vnode_t		*vp;
+	int		error = 0;
+	xfs_mount_t	*mp;
+	int             nmaps;
+	xfs_bmbt_irec_t mval[SYMLINK_MAPS];
+	xfs_daddr_t	d;
+	int		byte_cnt;
+	int		n;
+	xfs_buf_t	*bp;
+
+	vp = BHV_TO_VNODE(bdp);
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
+
+	offset = uiop->uio_offset;
+	count = uiop->uio_resid;
+
+	if (offset < 0) {
+		error = XFS_ERROR(EINVAL);
+		goto error_return;
+	}
+	if (count <= 0) {
+		error = 0;
+		goto error_return;
+	}
+
+	if (!(ioflags & IO_INVIS)) {
+		xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
+	}
+
+	/*
+	 * See if the symlink is stored inline.
+	 */
+	pathlen = (int)ip->i_d.di_size;
+
+	if (ip->i_df.if_flags & XFS_IFINLINE) {
+		error = uio_read(ip->i_df.if_u1.if_data, pathlen, uiop);
+	}
+	else {
+		/*
+		 * Symlink not inline.  Call bmap to get it in.
+		 */
+		nmaps = SYMLINK_MAPS;
+
+		error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen),
+				  0, NULL, 0, mval, &nmaps, NULL);
+
+		if (error) {
+			goto error_return;
+		}
+
+		for (n = 0; n < nmaps; n++) {
+			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
+			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
+			bp = xfs_buf_read(mp->m_ddev_targp, d,
+				      BTOBB(byte_cnt), 0);
+			error = XFS_BUF_GETERROR(bp);
+			if (error) {
+				xfs_ioerror_alert("xfs_readlink",
+					  ip->i_mount, bp, XFS_BUF_ADDR(bp));
+				xfs_buf_relse(bp);
+				goto error_return;
+			}
+			if (pathlen < byte_cnt)
+				byte_cnt = pathlen;
+			pathlen -= byte_cnt;
+
+			error = uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop);
+			xfs_buf_relse (bp);
+		}
+
+	}
+
+
+error_return:
+
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	return error;
+}
+
+
+/*
+ * xfs_fsync
+ *
+ * This is called to sync the inode and its data out to disk.
+ * We need to hold the I/O lock while flushing the data, and
+ * the inode lock while flushing the inode.  The inode lock CANNOT
+ * be held while flushing the data, so acquire after we're done
+ * with that.
+ */
+STATIC int
+xfs_fsync(
+	bhv_desc_t	*bdp,
+	int		flag,
+	cred_t		*credp,
+	xfs_off_t	start,
+	xfs_off_t	stop)
+{
+	xfs_inode_t	*ip;
+	xfs_trans_t	*tp;
+	int		error;
+
+	vn_trace_entry(BHV_TO_VNODE(bdp),
+			__FUNCTION__, (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+
+	ASSERT(start >= 0 && stop >= -1);
+
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return XFS_ERROR(EIO);
+
+	/*
+	 * We always need to make sure that the required inode state
+	 * is safe on disk.  The vnode might be clean but because
+	 * of committed transactions that haven't hit the disk yet.
+	 * Likewise, there could be unflushed non-transactional
+	 * changes to the inode core that have to go to disk.
+	 *
+	 * The following code depends on one assumption:  that
+	 * any transaction that changes an inode logs the core
+	 * because it has to change some field in the inode core
+	 * (typically nextents or nblocks).  That assumption
+	 * implies that any transactions against an inode will
+	 * catch any non-transactional updates.  If inode-altering
+	 * transactions exist that violate this assumption, the
+	 * code breaks.  Right now, it figures that if the involved
+	 * update_* field is clear and the inode is unpinned, the
+	 * inode is clean.  Either it's been flushed or it's been
+	 * committed and the commit has hit the disk unpinning the inode.
+	 * (Note that xfs_inode_item_format() called at commit clears
+	 * the update_* fields.)
+	 */
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+	/* If we are flushing data then we care about update_size
+	 * being set, otherwise we care about update_core
+	 */
+	if ((flag & FSYNC_DATA) ?
+			(ip->i_update_size == 0) :
+			(ip->i_update_core == 0)) {
+		/*
+		 * Timestamps/size haven't changed since last inode
+		 * flush or inode transaction commit.  That means
+		 * either nothing got written or a transaction
+		 * committed which caught the updates.	If the
+		 * latter happened and the transaction hasn't
+		 * hit the disk yet, the inode will be still
+		 * be pinned.  If it is, force the log.
+		 */
+
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+		if (xfs_ipincount(ip)) {
+			xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
+				      XFS_LOG_FORCE |
+				      ((flag & FSYNC_WAIT)
+				       ? XFS_LOG_SYNC : 0));
+		}
+		error = 0;
+	} else	{
+		/*
+		 * Kick off a transaction to log the inode
+		 * core to get the updates.  Make it
+		 * sync if FSYNC_WAIT is passed in (which
+		 * is done by everybody but specfs).  The
+		 * sync transaction will also force the log.
+		 */
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+		tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
+		if ((error = xfs_trans_reserve(tp, 0,
+				XFS_FSYNC_TS_LOG_RES(ip->i_mount),
+				0, 0, 0)))  {
+			xfs_trans_cancel(tp, 0);
+			return error;
+		}
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+		/*
+		 * Note - it's possible that we might have pushed
+		 * ourselves out of the way during trans_reserve
+		 * which would flush the inode.	 But there's no
+		 * guarantee that the inode buffer has actually
+		 * gone out yet (it's delwri).	Plus the buffer
+		 * could be pinned anyway if it's part of an
+		 * inode in another recent transaction.	 So we
+		 * play it safe and fire off the transaction anyway.
+		 */
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+		if (flag & FSYNC_WAIT)
+			xfs_trans_set_sync(tp);
+		error = xfs_trans_commit(tp, 0, NULL);
+
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+	return error;
+}
+
+/*
+ * This is called by xfs_inactive to free any blocks beyond eof,
+ * when the link count isn't zero.
+ */
+STATIC int
+xfs_inactive_free_eofblocks(
+	xfs_mount_t	*mp,
+	xfs_inode_t	*ip)
+{
+	xfs_trans_t	*tp;
+	int		error;
+	xfs_fileoff_t	end_fsb;
+	xfs_fileoff_t	last_fsb;
+	xfs_filblks_t	map_len;
+	int		nimaps;
+	xfs_bmbt_irec_t	imap;
+
+	/*
+	 * Figure out if there are any blocks beyond the end
+	 * of the file.  If not, then there is nothing to do.
+	 */
+	end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_d.di_size));
+	last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+	map_len = last_fsb - end_fsb;
+	if (map_len <= 0)
+		return (0);
+
+	nimaps = 1;
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+	error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
+			  NULL, 0, &imap, &nimaps, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	if (!error && (nimaps != 0) &&
+	    (imap.br_startblock != HOLESTARTBLOCK)) {
+		/*
+		 * Attach the dquots to the inode up front.
+		 */
+		if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
+			return (error);
+
+		/*
+		 * There are blocks after the end of file.
+		 * Free them up now by truncating the file to
+		 * its current size.
+		 */
+		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+
+		/*
+		 * Do the xfs_itruncate_start() call before
+		 * reserving any log space because
+		 * itruncate_start will call into the buffer
+		 * cache and we can't
+		 * do that within a transaction.
+		 */
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+		xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
+				    ip->i_d.di_size);
+
+		error = xfs_trans_reserve(tp, 0,
+					  XFS_ITRUNCATE_LOG_RES(mp),
+					  0, XFS_TRANS_PERM_LOG_RES,
+					  XFS_ITRUNCATE_LOG_COUNT);
+		if (error) {
+			ASSERT(XFS_FORCED_SHUTDOWN(mp));
+			xfs_trans_cancel(tp, 0);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+			return (error);
+		}
+
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, ip,
+				XFS_IOLOCK_EXCL |
+				XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+
+		error = xfs_itruncate_finish(&tp, ip,
+					     ip->i_d.di_size,
+					     XFS_DATA_FORK,
+					     0);
+		/*
+		 * If we get an error at this point we
+		 * simply don't bother truncating the file.
+		 */
+		if (error) {
+			xfs_trans_cancel(tp,
+					 (XFS_TRANS_RELEASE_LOG_RES |
+					  XFS_TRANS_ABORT));
+		} else {
+			error = xfs_trans_commit(tp,
+						XFS_TRANS_RELEASE_LOG_RES,
+						NULL);
+		}
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	}
+	return (error);
+}
+
+/*
+ * Free a symlink that has blocks associated with it.
+ */
+STATIC int
+xfs_inactive_symlink_rmt(
+	xfs_inode_t	*ip,
+	xfs_trans_t	**tpp)
+{
+	xfs_buf_t	*bp;
+	int		committed;
+	int		done;
+	int		error;
+	xfs_fsblock_t	first_block;
+	xfs_bmap_free_t	free_list;
+	int		i;
+	xfs_mount_t	*mp;
+	xfs_bmbt_irec_t	mval[SYMLINK_MAPS];
+	int		nmaps;
+	xfs_trans_t	*ntp;
+	int		size;
+	xfs_trans_t	*tp;
+
+	tp = *tpp;
+	mp = ip->i_mount;
+	ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip));
+	/*
+	 * We're freeing a symlink that has some
+	 * blocks allocated to it.  Free the
+	 * blocks here.  We know that we've got
+	 * either 1 or 2 extents and that we can
+	 * free them all in one bunmapi call.
+	 */
+	ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
+	if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		xfs_trans_cancel(tp, 0);
+		*tpp = NULL;
+		return error;
+	}
+	/*
+	 * Lock the inode, fix the size, and join it to the transaction.
+	 * Hold it so in the normal path, we still have it locked for
+	 * the second transaction.  In the error paths we need it
+	 * held so the cancel won't rele it, see below.
+	 */
+	xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	size = (int)ip->i_d.di_size;
+	ip->i_d.di_size = 0;
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	/*
+	 * Find the block(s) so we can inval and unmap them.
+	 */
+	done = 0;
+	XFS_BMAP_INIT(&free_list, &first_block);
+	nmaps = sizeof(mval) / sizeof(mval[0]);
+	if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
+			XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
+			&free_list)))
+		goto error0;
+	/*
+	 * Invalidate the block(s).
+	 */
+	for (i = 0; i < nmaps; i++) {
+		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+			XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
+			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
+		xfs_trans_binval(tp, bp);
+	}
+	/*
+	 * Unmap the dead block(s) to the free_list.
+	 */
+	if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+			&first_block, &free_list, &done)))
+		goto error1;
+	ASSERT(done);
+	/*
+	 * Commit the first transaction.  This logs the EFI and the inode.
+	 */
+	if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed)))
+		goto error1;
+	/*
+	 * The transaction must have been committed, since there were
+	 * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
+	 * The new tp has the extent freeing and EFDs.
+	 */
+	ASSERT(committed);
+	/*
+	 * The first xact was committed, so add the inode to the new one.
+	 * Mark it dirty so it will be logged and moved forward in the log as
+	 * part of every commit.
+	 */
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	/*
+	 * Get a new, empty transaction to return to our caller.
+	 */
+	ntp = xfs_trans_dup(tp);
+	/*
+	 * Commit the transaction containing extent freeing and EFD's.
+	 * If we get an error on the commit here or on the reserve below,
+	 * we need to unlock the inode since the new transaction doesn't
+	 * have the inode attached.
+	 */
+	error = xfs_trans_commit(tp, 0, NULL);
+	tp = ntp;
+	if (error) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		goto error0;
+	}
+	/*
+	 * Remove the memory for extent descriptions (just bookkeeping).
+	 */
+	if (ip->i_df.if_bytes)
+		xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
+	ASSERT(ip->i_df.if_bytes == 0);
+	/*
+	 * Put an itruncate log reservation in the new transaction
+	 * for our caller.
+	 */
+	if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		goto error0;
+	}
+	/*
+	 * Return with the inode locked but not joined to the transaction.
+	 */
+	*tpp = tp;
+	return 0;
+
+ error1:
+	xfs_bmap_cancel(&free_list);
+ error0:
+	/*
+	 * Have to come here with the inode locked and either
+	 * (held and in the transaction) or (not in the transaction).
+	 * If the inode isn't held then cancel would iput it, but
+	 * that's wrong since this is inactive and the vnode ref
+	 * count is 0 already.
+	 * Cancel won't do anything to the inode if held, but it still
+	 * needs to be locked until the cancel is done, if it was
+	 * joined to the transaction.
+	 */
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	*tpp = NULL;
+	return error;
+
+}
+
+STATIC int
+xfs_inactive_symlink_local(
+	xfs_inode_t	*ip,
+	xfs_trans_t	**tpp)
+{
+	int		error;
+
+	ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
+	/*
+	 * We're freeing a symlink which fit into
+	 * the inode.  Just free the memory used
+	 * to hold the old symlink.
+	 */
+	error = xfs_trans_reserve(*tpp, 0,
+				  XFS_ITRUNCATE_LOG_RES(ip->i_mount),
+				  0, XFS_TRANS_PERM_LOG_RES,
+				  XFS_ITRUNCATE_LOG_COUNT);
+
+	if (error) {
+		xfs_trans_cancel(*tpp, 0);
+		*tpp = NULL;
+		return (error);
+	}
+	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+	/*
+	 * Zero length symlinks _can_ exist.
+	 */
+	if (ip->i_df.if_bytes > 0) {
+		xfs_idata_realloc(ip,
+				  -(ip->i_df.if_bytes),
+				  XFS_DATA_FORK);
+		ASSERT(ip->i_df.if_bytes == 0);
+	}
+	return (0);
+}
+
+/*
+ *
+ */
+STATIC int
+xfs_inactive_attrs(
+	xfs_inode_t	*ip,
+	xfs_trans_t	**tpp)
+{
+	xfs_trans_t	*tp;
+	int		error;
+	xfs_mount_t	*mp;
+
+	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+	tp = *tpp;
+	mp = ip->i_mount;
+	ASSERT(ip->i_d.di_forkoff != 0);
+	xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+	error = xfs_attr_inactive(ip);
+	if (error) {
+		*tpp = NULL;
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		return (error); /* goto out*/
+	}
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+	error = xfs_trans_reserve(tp, 0,
+				  XFS_IFREE_LOG_RES(mp),
+				  0, XFS_TRANS_PERM_LOG_RES,
+				  XFS_INACTIVE_LOG_COUNT);
+	if (error) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		xfs_trans_cancel(tp, 0);
+		*tpp = NULL;
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		return (error);
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+	xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+
+	ASSERT(ip->i_d.di_anextents == 0);
+
+	*tpp = tp;
+	return (0);
+}
+
+STATIC int
+xfs_release(
+	bhv_desc_t	*bdp)
+{
+	xfs_inode_t	*ip;
+	vnode_t		*vp;
+	xfs_mount_t	*mp;
+	int		error;
+
+	vp = BHV_TO_VNODE(bdp);
+	ip = XFS_BHVTOI(bdp);
+
+	if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) {
+		return 0;
+	}
+
+	/* If this is a read-only mount, don't do this (would generate I/O) */
+	if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+		return 0;
+
+#ifdef HAVE_REFCACHE
+	/* If we are in the NFS reference cache then don't do this now */
+	if (ip->i_refcache)
+		return 0;
+#endif
+
+	mp = ip->i_mount;
+
+	if (ip->i_d.di_nlink != 0) {
+		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+		     ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
+		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
+		    (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) {
+			if ((error = xfs_inactive_free_eofblocks(mp, ip)))
+				return (error);
+			/* Update linux inode block count after free above */
+			LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp,
+				ip->i_d.di_nblocks + ip->i_delayed_blks);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * xfs_inactive
+ *
+ * This is called when the vnode reference count for the vnode
+ * goes to zero.  If the file has been unlinked, then it must
+ * now be truncated.  Also, we clear all of the read-ahead state
+ * kept for the inode here since the file is now closed.
+ */
+STATIC int
+xfs_inactive(
+	bhv_desc_t	*bdp,
+	cred_t		*credp)
+{
+	xfs_inode_t	*ip;
+	vnode_t		*vp;
+	xfs_bmap_free_t	free_list; 
+	xfs_fsblock_t	first_block;
+	int		committed;
+	xfs_trans_t	*tp;
+	xfs_mount_t	*mp;
+	int		error;
+	int		truncate;
+
+	vp = BHV_TO_VNODE(bdp);
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+
+	/*
+	 * If the inode is already free, then there can be nothing
+	 * to clean up here.
+	 */
+	if (ip->i_d.di_mode == 0 || VN_BAD(vp)) {
+		ASSERT(ip->i_df.if_real_bytes == 0);
+		ASSERT(ip->i_df.if_broot_bytes == 0);
+		return VN_INACTIVE_CACHE;
+	}
+
+	/*
+	 * Only do a truncate if it's a regular file with
+	 * some actual space in it.  It's OK to look at the
+	 * inode's fields without the lock because we're the
+	 * only one with a reference to the inode.
+	 */
+	truncate = ((ip->i_d.di_nlink == 0) &&
+	    ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) &&
+	    ((ip->i_d.di_mode & S_IFMT) == S_IFREG));
+
+	mp = ip->i_mount;
+
+	if (ip->i_d.di_nlink == 0 &&
+	    DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) {
+		(void) XFS_SEND_DESTROY(mp, vp, DM_RIGHT_NULL);
+	}
+
+	error = 0;
+
+	/* If this is a read-only mount, don't do this (would generate I/O) */
+	if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+		goto out;
+
+	if (ip->i_d.di_nlink != 0) {
+		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+		     ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
+		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
+		    (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) ||
+		     (ip->i_delayed_blks != 0))) {
+			if ((error = xfs_inactive_free_eofblocks(mp, ip)))
+				return (VN_INACTIVE_CACHE);
+			/* Update linux inode block count after free above */
+			LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp,
+				ip->i_d.di_nblocks + ip->i_delayed_blks);
+		}
+		goto out;
+	}
+
+	ASSERT(ip->i_d.di_nlink == 0);
+
+	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
+		return (VN_INACTIVE_CACHE);
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+	if (truncate) {
+		/*
+		 * Do the xfs_itruncate_start() call before
+		 * reserving any log space because itruncate_start
+		 * will call into the buffer cache and we can't
+		 * do that within a transaction.
+		 */
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+		xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
+
+		error = xfs_trans_reserve(tp, 0,
+					  XFS_ITRUNCATE_LOG_RES(mp),
+					  0, XFS_TRANS_PERM_LOG_RES,
+					  XFS_ITRUNCATE_LOG_COUNT);
+		if (error) {
+			/* Don't call itruncate_cleanup */
+			ASSERT(XFS_FORCED_SHUTDOWN(mp));
+			xfs_trans_cancel(tp, 0);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+			return (VN_INACTIVE_CACHE);
+		}
+
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+
+		/*
+		 * normally, we have to run xfs_itruncate_finish sync.
+		 * But if filesystem is wsync and we're in the inactive
+		 * path, then we know that nlink == 0, and that the
+		 * xaction that made nlink == 0 is permanently committed
+		 * since xfs_remove runs as a synchronous transaction.
+		 */
+		error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK,
+				(!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0));
+
+		if (error) {
+			xfs_trans_cancel(tp,
+				XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+			return (VN_INACTIVE_CACHE);
+		}
+	} else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) {
+
+		/*
+		 * If we get an error while cleaning up a
+		 * symlink we bail out.
+		 */
+		error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ?
+			xfs_inactive_symlink_rmt(ip, &tp) :
+			xfs_inactive_symlink_local(ip, &tp);
+
+		if (error) {
+			ASSERT(tp == NULL);
+			return (VN_INACTIVE_CACHE);
+		}
+
+		xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+	} else {
+		error = xfs_trans_reserve(tp, 0,
+					  XFS_IFREE_LOG_RES(mp),
+					  0, XFS_TRANS_PERM_LOG_RES,
+					  XFS_INACTIVE_LOG_COUNT);
+		if (error) {
+			ASSERT(XFS_FORCED_SHUTDOWN(mp));
+			xfs_trans_cancel(tp, 0);
+			return (VN_INACTIVE_CACHE);
+		}
+
+		xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+		xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+	}
+
+	/*
+	 * If there are attributes associated with the file
+	 * then blow them away now.  The code calls a routine
+	 * that recursively deconstructs the attribute fork.
+	 * We need to just commit the current transaction
+	 * because we can't use it for xfs_attr_inactive().
+	 */
+	if (ip->i_d.di_anextents > 0) {
+		error = xfs_inactive_attrs(ip, &tp);
+		/*
+		 * If we got an error, the transaction is already
+		 * cancelled, and the inode is unlocked. Just get out.
+		 */
+		 if (error)
+			 return (VN_INACTIVE_CACHE);
+	} else if (ip->i_afp) {
+		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+	}
+
+	/*
+	 * Free the inode.
+	 */
+	XFS_BMAP_INIT(&free_list, &first_block);
+	error = xfs_ifree(tp, ip, &free_list);
+	if (error) {
+		/*
+		 * If we fail to free the inode, shut down.  The cancel
+		 * might do that, we need to make sure.  Otherwise the
+		 * inode might be lost for a long time or forever.
+		 */
+		if (!XFS_FORCED_SHUTDOWN(mp)) {
+			cmn_err(CE_NOTE,
+		"xfs_inactive:	xfs_ifree() returned an error = %d on %s",
+				error, mp->m_fsname);
+			xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+		}
+		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+	} else {
+		/*
+		 * Credit the quota account(s). The inode is gone.
+		 */
+		XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+		/*
+		 * Just ignore errors at this point.  There is
+		 * nothing we can do except to try to keep going.
+		 */
+		(void) xfs_bmap_finish(&tp,  &free_list, first_block,
+				       &committed);
+		(void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	}
+	/*
+	 * Release the dquots held by inode, if any.
+	 */
+	XFS_QM_DQDETACH(mp, ip);
+
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+
+ out:
+	return VN_INACTIVE_CACHE;
+}
+
+
+/*
+ * xfs_lookup
+ */
+STATIC int
+xfs_lookup(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	vnode_t			**vpp,
+	int			flags,
+	vnode_t			*rdir,
+	cred_t			*credp)
+{
+	xfs_inode_t		*dp, *ip;
+	xfs_ino_t		e_inum;
+	int			error;
+	uint			lock_mode;
+	vnode_t			*dir_vp;
+
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	dp = XFS_BHVTOI(dir_bdp);
+
+	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+		return XFS_ERROR(EIO);
+
+	lock_mode = xfs_ilock_map_shared(dp);
+	error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip);
+	if (!error) {
+		*vpp = XFS_ITOV(ip);
+		ITRACE(ip);
+	}
+	xfs_iunlock_map_shared(dp, lock_mode);
+	return error;
+}
+
+
+/*
+ * xfs_create (create a new file).
+ */
+STATIC int
+xfs_create(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	vattr_t			*vap,
+	vnode_t			**vpp,
+	cred_t			*credp)
+{
+	char			*name = VNAME(dentry);
+	vnode_t			*dir_vp;
+	xfs_inode_t		*dp, *ip;
+	vnode_t		        *vp=NULL;
+	xfs_trans_t		*tp;
+	xfs_mount_t	        *mp;
+	xfs_dev_t		rdev;
+	int                     error;
+	xfs_bmap_free_t		free_list;
+	xfs_fsblock_t		first_block;
+	boolean_t		dp_joined_to_trans;
+	int			dm_event_sent = 0;
+	uint			cancel_flags;
+	int			committed;
+	xfs_prid_t		prid;
+	struct xfs_dquot	*udqp, *gdqp;
+	uint			resblks;
+	int			dm_di_mode;
+	int			namelen;
+
+	ASSERT(!*vpp);
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	dp = XFS_BHVTOI(dir_bdp);
+	mp = dp->i_mount;
+
+	dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
+	namelen = VNAMELEN(dentry);
+
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
+				dir_vp, DM_RIGHT_NULL, NULL,
+				DM_RIGHT_NULL, name, NULL,
+				dm_di_mode, 0, 0);
+
+		if (error)
+			return error;
+		dm_event_sent = 1;
+	}
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	/* Return through std_return after this point. */
+
+	udqp = gdqp = NULL;
+	if (vap->va_mask & XFS_AT_PROJID)
+		prid = (xfs_prid_t)vap->va_projid;
+	else
+		prid = (xfs_prid_t)dfltprid;
+
+	/*
+	 * Make sure that we have allocated dquot(s) on disk.
+	 */
+	error = XFS_QM_DQVOPALLOC(mp, dp,
+			current_fsuid(credp), current_fsgid(credp),
+			XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
+	if (error)
+		goto std_return;
+
+	ip = NULL;
+	dp_joined_to_trans = B_FALSE;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	resblks = XFS_CREATE_SPACE_RES(mp, namelen);
+	/*
+	 * Initially assume that the file does not exist and
+	 * reserve the resources for that case.  If that is not
+	 * the case we'll drop the one we have and get a more
+	 * appropriate transaction later.
+	 */
+	error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT);
+	if (error == ENOSPC) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT);
+	}
+	if (error) {
+		cancel_flags = 0;
+		dp = NULL;
+		goto error_return;
+	}
+
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	XFS_BMAP_INIT(&free_list, &first_block);
+
+	ASSERT(ip == NULL);
+
+	/*
+	 * Reserve disk quota and the inode.
+	 */
+	error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
+	if (error)
+		goto error_return;
+
+	if (resblks == 0 &&
+	    (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
+		goto error_return;
+	rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
+	error = xfs_dir_ialloc(&tp, dp,
+			MAKEIMODE(vap->va_type,vap->va_mode), 1,
+			rdev, credp, prid, resblks > 0,
+			&ip, &committed);
+	if (error) {
+		if (error == ENOSPC)
+			goto error_return;
+		goto abort_return;
+	}
+	ITRACE(ip);
+
+	/*
+	 * At this point, we've gotten a newly allocated inode.
+	 * It is locked (and joined to the transaction).
+	 */
+
+	ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
+
+	/*
+	 * Now we join the directory inode to the transaction.
+	 * We do not do it earlier because xfs_dir_ialloc
+	 * might commit the previous transaction (and release
+	 * all the locks).
+	 */
+
+	VN_HOLD(dir_vp);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	dp_joined_to_trans = B_TRUE;
+
+	error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino,
+		&first_block, &free_list,
+		resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+	if (error) {
+		ASSERT(error != ENOSPC);
+		goto abort_return;
+	}
+	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * create transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	dp->i_gen++;
+
+	/*
+	 * Attach the dquot(s) to the inodes and modify them incore.
+	 * These ids of the inode couldn't have changed since the new
+	 * inode has been locked ever since it was created.
+	 */
+	XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
+
+	/*
+	 * xfs_trans_commit normally decrements the vnode ref count
+	 * when it unlocks the inode. Since we want to return the
+	 * vnode to the caller, we bump the vnode ref count now.
+	 */
+	IHOLD(ip);
+	vp = XFS_ITOV(ip);
+
+	error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+	if (error) {
+		xfs_bmap_cancel(&free_list);
+		goto abort_rele;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (error) {
+		IRELE(ip);
+		tp = NULL;
+		goto error_return;
+	}
+
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	/*
+	 * Propogate the fact that the vnode changed after the
+	 * xfs_inode locks have been released.
+	 */
+	VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
+
+	*vpp = vp;
+
+	/* Fallthrough to std_return with error = 0  */
+
+std_return:
+	if ( (*vpp || (error != 0 && dm_event_sent != 0)) &&
+			DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
+							DM_EVENT_POSTCREATE)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
+			dir_vp, DM_RIGHT_NULL,
+			*vpp ? vp:NULL,
+			DM_RIGHT_NULL, name, NULL,
+			dm_di_mode, error, 0);
+	}
+	return error;
+
+ abort_return:
+	cancel_flags |= XFS_TRANS_ABORT;
+	/* FALLTHROUGH */
+ error_return:
+
+	if (tp != NULL)
+		xfs_trans_cancel(tp, cancel_flags);
+
+	if (!dp_joined_to_trans && (dp != NULL))
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	goto std_return;
+
+ abort_rele:
+	/*
+	 * Wait until after the current transaction is aborted to
+	 * release the inode.  This prevents recursive transactions
+	 * and deadlocks from xfs_inactive.
+	 */
+	cancel_flags |= XFS_TRANS_ABORT;
+	xfs_trans_cancel(tp, cancel_flags);
+	IRELE(ip);
+
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	goto std_return;
+}
+
+#ifdef DEBUG
+/*
+ * Some counters to see if (and how often) we are hitting some deadlock
+ * prevention code paths.
+ */
+
+int xfs_rm_locks;
+int xfs_rm_lock_delays;
+int xfs_rm_attempts;
+#endif
+
+/*
+ * The following routine will lock the inodes associated with the
+ * directory and the named entry in the directory. The locks are
+ * acquired in increasing inode number.
+ *
+ * If the entry is "..", then only the directory is locked. The
+ * vnode ref count will still include that from the .. entry in
+ * this case.
+ *
+ * There is a deadlock we need to worry about. If the locked directory is
+ * in the AIL, it might be blocking up the log. The next inode we lock
+ * could be already locked by another thread waiting for log space (e.g
+ * a permanent log reservation with a long running transaction (see
+ * xfs_itruncate_finish)). To solve this, we must check if the directory
+ * is in the ail and use lock_nowait. If we can't lock, we need to
+ * drop the inode lock on the directory and try again. xfs_iunlock will
+ * potentially push the tail if we were holding up the log.
+ */
+STATIC int
+xfs_lock_dir_and_entry(
+	xfs_inode_t	*dp,
+	vname_t		*dentry,
+	xfs_inode_t	*ip)	/* inode of entry 'name' */
+{
+	int		attempts;
+	xfs_ino_t	e_inum;
+	xfs_inode_t	*ips[2];
+	xfs_log_item_t	*lp;
+
+#ifdef DEBUG
+	xfs_rm_locks++;
+#endif
+	attempts = 0;
+
+again:
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	e_inum = ip->i_ino;
+
+	ITRACE(ip);
+
+	/*
+	 * We want to lock in increasing inum. Since we've already
+	 * acquired the lock on the directory, we may need to release
+	 * if if the inum of the entry turns out to be less.
+	 */
+	if (e_inum > dp->i_ino) {
+		/*
+		 * We are already in the right order, so just
+		 * lock on the inode of the entry.
+		 * We need to use nowait if dp is in the AIL.
+		 */
+
+		lp = (xfs_log_item_t *)dp->i_itemp;
+		if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+			if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
+				attempts++;
+#ifdef DEBUG
+				xfs_rm_attempts++;
+#endif
+
+				/*
+				 * Unlock dp and try again.
+				 * xfs_iunlock will try to push the tail
+				 * if the inode is in the AIL.
+				 */
+
+				xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+				if ((attempts % 5) == 0) {
+					delay(1); /* Don't just spin the CPU */
+#ifdef DEBUG
+					xfs_rm_lock_delays++;
+#endif
+				}
+				goto again;
+			}
+		} else {
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+		}
+	} else if (e_inum < dp->i_ino) {
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+		ips[0] = ip;
+		ips[1] = dp;
+		xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+	}
+	/* else	 e_inum == dp->i_ino */
+	/*     This can happen if we're asked to lock /x/..
+	 *     the entry is "..", which is also the parent directory.
+	 */
+
+	return 0;
+}
+
+#ifdef DEBUG
+int xfs_locked_n;
+int xfs_small_retries;
+int xfs_middle_retries;
+int xfs_lots_retries;
+int xfs_lock_delays;
+#endif
+
+/*
+ * The following routine will lock n inodes in exclusive mode.
+ * We assume the caller calls us with the inodes in i_ino order.
+ *
+ * We need to detect deadlock where an inode that we lock
+ * is in the AIL and we start waiting for another inode that is locked
+ * by a thread in a long running transaction (such as truncate). This can
+ * result in deadlock since the long running trans might need to wait
+ * for the inode we just locked in order to push the tail and free space
+ * in the log.
+ */
+void
+xfs_lock_inodes(
+	xfs_inode_t	**ips,
+	int		inodes,
+	int		first_locked,
+	uint		lock_mode)
+{
+	int		attempts = 0, i, j, try_lock;
+	xfs_log_item_t	*lp;
+
+	ASSERT(ips && (inodes >= 2)); /* we need at least two */
+
+	if (first_locked) {
+		try_lock = 1;
+		i = 1;
+	} else {
+		try_lock = 0;
+		i = 0;
+	}
+
+again:
+	for (; i < inodes; i++) {
+		ASSERT(ips[i]);
+
+		if (i && (ips[i] == ips[i-1]))	/* Already locked */
+			continue;
+
+		/*
+		 * If try_lock is not set yet, make sure all locked inodes
+		 * are not in the AIL.
+		 * If any are, set try_lock to be used later.
+		 */
+
+		if (!try_lock) {
+			for (j = (i - 1); j >= 0 && !try_lock; j--) {
+				lp = (xfs_log_item_t *)ips[j]->i_itemp;
+				if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+					try_lock++;
+				}
+			}
+		}
+
+		/*
+		 * If any of the previous locks we have locked is in the AIL,
+		 * we must TRY to get the second and subsequent locks. If
+		 * we can't get any, we must release all we have
+		 * and try again.
+		 */
+
+		if (try_lock) {
+			/* try_lock must be 0 if i is 0. */
+			/*
+			 * try_lock means we have an inode locked
+			 * that is in the AIL.
+			 */
+			ASSERT(i != 0);
+			if (!xfs_ilock_nowait(ips[i], lock_mode)) {
+				attempts++;
+
+				/*
+				 * Unlock all previous guys and try again.
+				 * xfs_iunlock will try to push the tail
+				 * if the inode is in the AIL.
+				 */
+
+				for(j = i - 1; j >= 0; j--) {
+
+					/*
+					 * Check to see if we've already
+					 * unlocked this one.
+					 * Not the first one going back,
+					 * and the inode ptr is the same.
+					 */
+					if ((j != (i - 1)) && ips[j] ==
+								ips[j+1])
+						continue;
+
+					xfs_iunlock(ips[j], lock_mode);
+				}
+
+				if ((attempts % 5) == 0) {
+					delay(1); /* Don't just spin the CPU */
+#ifdef DEBUG
+					xfs_lock_delays++;
+#endif
+				}
+				i = 0;
+				try_lock = 0;
+				goto again;
+			}
+		} else {
+			xfs_ilock(ips[i], lock_mode);
+		}
+	}
+
+#ifdef DEBUG
+	if (attempts) {
+		if (attempts < 5) xfs_small_retries++;
+		else if (attempts < 100) xfs_middle_retries++;
+		else xfs_lots_retries++;
+	} else {
+		xfs_locked_n++;
+	}
+#endif
+}
+
+#ifdef	DEBUG
+#define	REMOVE_DEBUG_TRACE(x)	{remove_which_error_return = (x);}
+int remove_which_error_return = 0;
+#else /* ! DEBUG */
+#define	REMOVE_DEBUG_TRACE(x)
+#endif	/* ! DEBUG */
+
+
+/*
+ * xfs_remove
+ *
+ */
+STATIC int
+xfs_remove(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	cred_t			*credp)
+{
+	vnode_t			*dir_vp;
+	char			*name = VNAME(dentry);
+	xfs_inode_t             *dp, *ip;
+	xfs_trans_t             *tp = NULL;
+	xfs_mount_t		*mp;
+	int                     error = 0;
+	xfs_bmap_free_t         free_list;
+	xfs_fsblock_t           first_block;
+	int			cancel_flags;
+	int			committed;
+	int			dm_di_mode = 0;
+	int			link_zero;
+	uint			resblks;
+	int			namelen;
+
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	dp = XFS_BHVTOI(dir_bdp);
+	mp = dp->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	namelen = VNAMELEN(dentry);
+
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp,
+					DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
+					name, NULL, 0, 0, 0);
+		if (error)
+			return error;
+	}
+
+	/* From this point on, return through std_return */
+	ip = NULL;
+
+	/*
+	 * We need to get a reference to ip before we get our log
+	 * reservation. The reason for this is that we cannot call
+	 * xfs_iget for an inode for which we do not have a reference
+	 * once we've acquired a log reservation. This is because the
+	 * inode we are trying to get might be in xfs_inactive going
+	 * for a log reservation. Since we'll have to wait for the
+	 * inactive code to complete before returning from xfs_iget,
+	 * we need to make sure that we don't have log space reserved
+	 * when we call xfs_iget.  Instead we get an unlocked referece
+	 * to the inode before getting our log reservation.
+	 */
+	error = xfs_get_dir_entry(dentry, &ip);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto std_return;
+	}
+
+	dm_di_mode = ip->i_d.di_mode;
+
+	vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
+
+	ITRACE(ip);
+
+	error = XFS_QM_DQATTACH(mp, dp, 0);
+	if (!error && dp != ip)
+		error = XFS_QM_DQATTACH(mp, ip, 0);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		IRELE(ip);
+		goto std_return;
+	}
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	/*
+	 * We try to get the real space reservation first,
+	 * allowing for directory btree deletion(s) implying
+	 * possible bmap insert(s).  If we can't get the space
+	 * reservation then we use 0 instead, and avoid the bmap
+	 * btree insert(s) in the directory code by, if the bmap
+	 * insert tries to happen, instead trimming the LAST
+	 * block from the directory.
+	 */
+	resblks = XFS_REMOVE_SPACE_RES(mp);
+	error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT);
+	if (error == ENOSPC) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT);
+	}
+	if (error) {
+		ASSERT(error != ENOSPC);
+		REMOVE_DEBUG_TRACE(__LINE__);
+		xfs_trans_cancel(tp, 0);
+		IRELE(ip);
+		return error;
+	}
+
+	error = xfs_lock_dir_and_entry(dp, dentry, ip);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		xfs_trans_cancel(tp, cancel_flags);
+		IRELE(ip);
+		goto std_return;
+	}
+
+	/*
+	 * At this point, we've gotten both the directory and the entry
+	 * inodes locked.
+	 */
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	if (dp != ip) {
+		/*
+		 * Increment vnode ref count only in this case since
+		 * there's an extra vnode reference in the case where
+		 * dp == ip.
+		 */
+		IHOLD(dp);
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	}
+
+	/*
+	 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
+	 */
+	XFS_BMAP_INIT(&free_list, &first_block);
+	error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino,
+		&first_block, &free_list, 0);
+	if (error) {
+		ASSERT(error != ENOENT);
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto error1;
+	}
+	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+	dp->i_gen++;
+	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+	error = xfs_droplink(tp, ip);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto error1;
+	}
+
+	/* Determine if this is the last link while
+	 * we are in the transaction.
+	 */
+	link_zero = (ip)->i_d.di_nlink==0;
+
+	/*
+	 * Take an extra ref on the inode so that it doesn't
+	 * go to xfs_inactive() from within the commit.
+	 */
+	IHOLD(ip);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * remove transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto error_rele;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (error) {
+		IRELE(ip);
+		goto std_return;
+	}
+
+	/*
+	 * Before we drop our extra reference to the inode, purge it
+	 * from the refcache if it is there.  By waiting until afterwards
+	 * to do the IRELE, we ensure that we won't go inactive in the
+	 * xfs_refcache_purge_ip routine (although that would be OK).
+	 */
+	xfs_refcache_purge_ip(ip);
+
+	vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
+
+	/*
+	 * Let interposed file systems know about removed links.
+	 */
+	VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero);
+
+	IRELE(ip);
+
+/*	Fall through to std_return with error = 0 */
+ std_return:
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp,
+						DM_EVENT_POSTREMOVE)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
+				dir_vp, DM_RIGHT_NULL,
+				NULL, DM_RIGHT_NULL,
+				name, NULL, dm_di_mode, error, 0);
+	}
+	return error;
+
+ error1:
+	xfs_bmap_cancel(&free_list);
+	cancel_flags |= XFS_TRANS_ABORT;
+	xfs_trans_cancel(tp, cancel_flags);
+	goto std_return;
+
+ error_rele:
+	/*
+	 * In this case make sure to not release the inode until after
+	 * the current transaction is aborted.  Releasing it beforehand
+	 * can cause us to go to xfs_inactive and start a recursive
+	 * transaction which can easily deadlock with the current one.
+	 */
+	xfs_bmap_cancel(&free_list);
+	cancel_flags |= XFS_TRANS_ABORT;
+	xfs_trans_cancel(tp, cancel_flags);
+
+	/*
+	 * Before we drop our extra reference to the inode, purge it
+	 * from the refcache if it is there.  By waiting until afterwards
+	 * to do the IRELE, we ensure that we won't go inactive in the
+	 * xfs_refcache_purge_ip routine (although that would be OK).
+	 */
+	xfs_refcache_purge_ip(ip);
+
+	IRELE(ip);
+
+	goto std_return;
+}
+
+
+/*
+ * xfs_link
+ *
+ */
+STATIC int
+xfs_link(
+	bhv_desc_t		*target_dir_bdp,
+	vnode_t			*src_vp,
+	vname_t			*dentry,
+	cred_t			*credp)
+{
+	xfs_inode_t		*tdp, *sip;
+	xfs_trans_t		*tp;
+	xfs_mount_t		*mp;
+	xfs_inode_t		*ips[2];
+	int			error;
+	xfs_bmap_free_t         free_list;
+	xfs_fsblock_t           first_block;
+	int			cancel_flags;
+	int			committed;
+	vnode_t			*target_dir_vp;
+	bhv_desc_t		*src_bdp;
+	int			resblks;
+	char			*target_name = VNAME(dentry);
+	int			target_namelen;
+
+	target_dir_vp = BHV_TO_VNODE(target_dir_bdp);
+	vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address);
+	vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	target_namelen = VNAMELEN(dentry);
+	if (src_vp->v_type == VDIR)
+		return XFS_ERROR(EPERM);
+
+	/*
+	 * For now, manually find the XFS behavior descriptor for
+	 * the source vnode.  If it doesn't exist then something
+	 * is wrong and we should just return an error.
+	 * Eventually we need to figure out how link is going to
+	 * work in the face of stacked vnodes.
+	 */
+	src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
+	if (src_bdp == NULL) {
+		return XFS_ERROR(EXDEV);
+	}
+	sip = XFS_BHVTOI(src_bdp);
+	tdp = XFS_BHVTOI(target_dir_bdp);
+	mp = tdp->i_mount;
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
+					target_dir_vp, DM_RIGHT_NULL,
+					src_vp, DM_RIGHT_NULL,
+					target_name, NULL, 0, 0, 0);
+		if (error)
+			return error;
+	}
+
+	/* Return through std_return after this point. */
+
+	error = XFS_QM_DQATTACH(mp, sip, 0);
+	if (!error && sip != tdp)
+		error = XFS_QM_DQATTACH(mp, tdp, 0);
+	if (error)
+		goto std_return;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	resblks = XFS_LINK_SPACE_RES(mp, target_namelen);
+	error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
+	if (error == ENOSPC) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
+	}
+	if (error) {
+		cancel_flags = 0;
+		goto error_return;
+	}
+
+	if (sip->i_ino < tdp->i_ino) {
+		ips[0] = sip;
+		ips[1] = tdp;
+	} else {
+		ips[0] = tdp;
+		ips[1] = sip;
+	}
+
+	xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+
+	/*
+	 * Increment vnode ref counts since xfs_trans_commit &
+	 * xfs_trans_cancel will both unlock the inodes and
+	 * decrement the associated ref counts.
+	 */
+	VN_HOLD(src_vp);
+	VN_HOLD(target_dir_vp);
+	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+
+	/*
+	 * If the source has too many links, we can't make any more to it.
+	 */
+	if (sip->i_d.di_nlink >= XFS_MAXLINK) {
+		error = XFS_ERROR(EMLINK);
+		goto error_return;
+	}
+
+	if (resblks == 0 &&
+	    (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name,
+			target_namelen)))
+		goto error_return;
+
+	XFS_BMAP_INIT(&free_list, &first_block);
+
+	error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen,
+				   sip->i_ino, &first_block, &free_list,
+				   resblks);
+	if (error)
+		goto abort_return;
+	xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	tdp->i_gen++;
+	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
+
+	error = xfs_bumplink(tp, sip);
+	if (error) {
+		goto abort_return;
+	}
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * link transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
+	if (error) {
+		xfs_bmap_cancel(&free_list);
+		goto abort_return;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (error) {
+		goto std_return;
+	}
+
+	/* Fall through to std_return with error = 0. */
+std_return:
+	if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip,
+						DM_EVENT_POSTLINK)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
+				target_dir_vp, DM_RIGHT_NULL,
+				src_vp, DM_RIGHT_NULL,
+				target_name, NULL, 0, error, 0);
+	}
+	return error;
+
+ abort_return:
+	cancel_flags |= XFS_TRANS_ABORT;
+	/* FALLTHROUGH */
+ error_return:
+	xfs_trans_cancel(tp, cancel_flags);
+
+	goto std_return;
+}
+/*
+ * xfs_mkdir
+ *
+ */
+STATIC int
+xfs_mkdir(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	vattr_t			*vap,
+	vnode_t			**vpp,
+	cred_t			*credp)
+{
+	char			*dir_name = VNAME(dentry);
+	xfs_inode_t             *dp;
+	xfs_inode_t		*cdp;	/* inode of created dir */
+	vnode_t			*cvp;	/* vnode of created dir */
+	xfs_trans_t		*tp;
+	xfs_mount_t		*mp;
+	int			cancel_flags;
+	int			error;
+	int			committed;
+	xfs_bmap_free_t         free_list;
+	xfs_fsblock_t           first_block;
+	vnode_t			*dir_vp;
+	boolean_t		dp_joined_to_trans;
+	boolean_t		created = B_FALSE;
+	int			dm_event_sent = 0;
+	xfs_prid_t		prid;
+	struct xfs_dquot	*udqp, *gdqp;
+	uint			resblks;
+	int			dm_di_mode;
+	int			dir_namelen;
+
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	dp = XFS_BHVTOI(dir_bdp);
+	mp = dp->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	dir_namelen = VNAMELEN(dentry);
+
+	tp = NULL;
+	dp_joined_to_trans = B_FALSE;
+	dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
+
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
+					dir_vp, DM_RIGHT_NULL, NULL,
+					DM_RIGHT_NULL, dir_name, NULL,
+					dm_di_mode, 0, 0);
+		if (error)
+			return error;
+		dm_event_sent = 1;
+	}
+
+	/* Return through std_return after this point. */
+
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	mp = dp->i_mount;
+	udqp = gdqp = NULL;
+	if (vap->va_mask & XFS_AT_PROJID)
+		prid = (xfs_prid_t)vap->va_projid;
+	else
+		prid = (xfs_prid_t)dfltprid;
+
+	/*
+	 * Make sure that we have allocated dquot(s) on disk.
+	 */
+	error = XFS_QM_DQVOPALLOC(mp, dp,
+			current_fsuid(credp), current_fsgid(credp),
+			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
+	if (error)
+		goto std_return;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen);
+	error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0,
+				  XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT);
+	if (error == ENOSPC) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0,
+					  XFS_TRANS_PERM_LOG_RES,
+					  XFS_MKDIR_LOG_COUNT);
+	}
+	if (error) {
+		cancel_flags = 0;
+		dp = NULL;
+		goto error_return;
+	}
+
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	/*
+	 * Check for directory link count overflow.
+	 */
+	if (dp->i_d.di_nlink >= XFS_MAXLINK) {
+		error = XFS_ERROR(EMLINK);
+		goto error_return;
+	}
+
+	/*
+	 * Reserve disk quota and the inode.
+	 */
+	error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
+	if (error)
+		goto error_return;
+
+	if (resblks == 0 &&
+	    (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen)))
+		goto error_return;
+	/*
+	 * create the directory inode.
+	 */
+	error = xfs_dir_ialloc(&tp, dp,
+			MAKEIMODE(vap->va_type,vap->va_mode), 2,
+			0, credp, prid, resblks > 0,
+		&cdp, NULL);
+	if (error) {
+		if (error == ENOSPC)
+			goto error_return;
+		goto abort_return;
+	}
+	ITRACE(cdp);
+
+	/*
+	 * Now we add the directory inode to the transaction.
+	 * We waited until now since xfs_dir_ialloc might start
+	 * a new transaction.  Had we joined the transaction
+	 * earlier, the locks might have gotten released.
+	 */
+	VN_HOLD(dir_vp);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	dp_joined_to_trans = B_TRUE;
+
+	XFS_BMAP_INIT(&free_list, &first_block);
+
+	error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen,
+			cdp->i_ino, &first_block, &free_list,
+			resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+	if (error) {
+		ASSERT(error != ENOSPC);
+		goto error1;
+	}
+	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+	/*
+	 * Bump the in memory version number of the parent directory
+	 * so that other processes accessing it will recognize that
+	 * the directory has changed.
+	 */
+	dp->i_gen++;
+
+	error = XFS_DIR_INIT(mp, tp, cdp, dp);
+	if (error) {
+		goto error2;
+	}
+
+	cdp->i_gen = 1;
+	error = xfs_bumplink(tp, dp);
+	if (error) {
+		goto error2;
+	}
+
+	cvp = XFS_ITOV(cdp);
+
+	created = B_TRUE;
+
+	*vpp = cvp;
+	IHOLD(cdp);
+
+	/*
+	 * Attach the dquots to the new inode and modify the icount incore.
+	 */
+	XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * mkdir transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+	if (error) {
+		IRELE(cdp);
+		goto error2;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+	if (error) {
+		IRELE(cdp);
+	}
+
+	/* Fall through to std_return with error = 0 or errno from
+	 * xfs_trans_commit. */
+
+std_return:
+	if ( (created || (error != 0 && dm_event_sent != 0)) &&
+			DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
+						DM_EVENT_POSTCREATE)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
+					dir_vp, DM_RIGHT_NULL,
+					created ? XFS_ITOV(cdp):NULL,
+					DM_RIGHT_NULL,
+					dir_name, NULL,
+					dm_di_mode, error, 0);
+	}
+	return error;
+
+ error2:
+ error1:
+	xfs_bmap_cancel(&free_list);
+ abort_return:
+	cancel_flags |= XFS_TRANS_ABORT;
+ error_return:
+	xfs_trans_cancel(tp, cancel_flags);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	if (!dp_joined_to_trans && (dp != NULL)) {
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	}
+
+	goto std_return;
+}
+
+
+/*
+ * xfs_rmdir
+ *
+ */
+STATIC int
+xfs_rmdir(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	cred_t			*credp)
+{
+	char			*name = VNAME(dentry);
+	xfs_inode_t             *dp;
+	xfs_inode_t             *cdp;   /* child directory */
+	xfs_trans_t             *tp;
+	xfs_mount_t		*mp;
+	int                     error;
+	xfs_bmap_free_t         free_list;
+	xfs_fsblock_t           first_block;
+	int			cancel_flags;
+	int			committed;
+	vnode_t			*dir_vp;
+	int			dm_di_mode = 0;
+	int			last_cdp_link;
+	int			namelen;
+	uint			resblks;
+
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	dp = XFS_BHVTOI(dir_bdp);
+	mp = dp->i_mount;
+
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount))
+		return XFS_ERROR(EIO);
+	namelen = VNAMELEN(dentry);
+
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
+					dir_vp, DM_RIGHT_NULL,
+					NULL, DM_RIGHT_NULL,
+					name, NULL, 0, 0, 0);
+		if (error)
+			return XFS_ERROR(error);
+	}
+
+	/* Return through std_return after this point. */
+
+	cdp = NULL;
+
+	/*
+	 * We need to get a reference to cdp before we get our log
+	 * reservation.  The reason for this is that we cannot call
+	 * xfs_iget for an inode for which we do not have a reference
+	 * once we've acquired a log reservation.  This is because the
+	 * inode we are trying to get might be in xfs_inactive going
+	 * for a log reservation.  Since we'll have to wait for the
+	 * inactive code to complete before returning from xfs_iget,
+	 * we need to make sure that we don't have log space reserved
+	 * when we call xfs_iget.  Instead we get an unlocked referece
+	 * to the inode before getting our log reservation.
+	 */
+	error = xfs_get_dir_entry(dentry, &cdp);
+	if (error) {
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto std_return;
+	}
+	mp = dp->i_mount;
+	dm_di_mode = cdp->i_d.di_mode;
+
+	/*
+	 * Get the dquots for the inodes.
+	 */
+	error = XFS_QM_DQATTACH(mp, dp, 0);
+	if (!error && dp != cdp)
+		error = XFS_QM_DQATTACH(mp, cdp, 0);
+	if (error) {
+		IRELE(cdp);
+		REMOVE_DEBUG_TRACE(__LINE__);
+		goto std_return;
+	}
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	/*
+	 * We try to get the real space reservation first,
+	 * allowing for directory btree deletion(s) implying
+	 * possible bmap insert(s).  If we can't get the space
+	 * reservation then we use 0 instead, and avoid the bmap
+	 * btree insert(s) in the directory code by, if the bmap
+	 * insert tries to happen, instead trimming the LAST
+	 * block from the directory.
+	 */
+	resblks = XFS_REMOVE_SPACE_RES(mp);
+	error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT);
+	if (error == ENOSPC) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT);
+	}
+	if (error) {
+		ASSERT(error != ENOSPC);
+		cancel_flags = 0;
+		IRELE(cdp);
+		goto error_return;
+	}
+	XFS_BMAP_INIT(&free_list, &first_block);
+
+	/*
+	 * Now lock the child directory inode and the parent directory
+	 * inode in the proper order.  This will take care of validating
+	 * that the directory entry for the child directory inode has
+	 * not changed while we were obtaining a log reservation.
+	 */
+	error = xfs_lock_dir_and_entry(dp, dentry, cdp);
+	if (error) {
+		xfs_trans_cancel(tp, cancel_flags);
+		IRELE(cdp);
+		goto std_return;
+	}
+
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	if (dp != cdp) {
+		/*
+		 * Only increment the parent directory vnode count if
+		 * we didn't bump it in looking up cdp.  The only time
+		 * we don't bump it is when we're looking up ".".
+		 */
+		VN_HOLD(dir_vp);
+	}
+
+	ITRACE(cdp);
+	xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL);
+
+	ASSERT(cdp->i_d.di_nlink >= 2);
+	if (cdp->i_d.di_nlink != 2) {
+		error = XFS_ERROR(ENOTEMPTY);
+		goto error_return;
+	}
+	if (!XFS_DIR_ISEMPTY(mp, cdp)) {
+		error = XFS_ERROR(ENOTEMPTY);
+		goto error_return;
+	}
+
+	error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino,
+		&first_block, &free_list, resblks);
+	if (error) {
+		goto error1;
+	}
+
+	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+	/*
+	 * Bump the in memory generation count on the parent
+	 * directory so that other can know that it has changed.
+	 */
+	dp->i_gen++;
+
+	/*
+	 * Drop the link from cdp's "..".
+	 */
+	error = xfs_droplink(tp, dp);
+	if (error) {
+		goto error1;
+	}
+
+	/*
+	 * Drop the link from dp to cdp.
+	 */
+	error = xfs_droplink(tp, cdp);
+	if (error) {
+		goto error1;
+	}
+
+	/*
+	 * Drop the "." link from cdp to self.
+	 */
+	error = xfs_droplink(tp, cdp);
+	if (error) {
+		goto error1;
+	}
+
+	/* Determine these before committing transaction */
+	last_cdp_link = (cdp)->i_d.di_nlink==0;
+
+	/*
+	 * Take an extra ref on the child vnode so that it
+	 * does not go to xfs_inactive() from within the commit.
+	 */
+	IHOLD(cdp);
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * rmdir transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
+	if (error) {
+		xfs_bmap_cancel(&free_list);
+		xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
+				 XFS_TRANS_ABORT));
+		IRELE(cdp);
+		goto std_return;
+	}
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	if (error) {
+		IRELE(cdp);
+		goto std_return;
+	}
+
+
+	/*
+	 * Let interposed file systems know about removed links.
+	 */
+	VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link);
+
+	IRELE(cdp);
+
+	/* Fall through to std_return with error = 0 or the errno
+	 * from xfs_trans_commit. */
+std_return:
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
+					dir_vp, DM_RIGHT_NULL,
+					NULL, DM_RIGHT_NULL,
+					name, NULL, dm_di_mode,
+					error, 0);
+	}
+	return error;
+
+ error1:
+	xfs_bmap_cancel(&free_list);
+	cancel_flags |= XFS_TRANS_ABORT;
+ error_return:
+	xfs_trans_cancel(tp, cancel_flags);
+	goto std_return;
+}
+
+
+/*
+ * xfs_readdir
+ *
+ * Read dp's entries starting at uiop->uio_offset and translate them into
+ * bufsize bytes worth of struct dirents starting at bufbase.
+ */
+STATIC int
+xfs_readdir(
+	bhv_desc_t	*dir_bdp,
+	uio_t		*uiop,
+	cred_t		*credp,
+	int		*eofp)
+{
+	xfs_inode_t	*dp;
+	xfs_trans_t	*tp = NULL;
+	int		error = 0;
+	uint		lock_mode;
+	xfs_off_t	start_offset;
+
+	vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
+					       (inst_t *)__return_address);
+	dp = XFS_BHVTOI(dir_bdp);
+
+	if (XFS_FORCED_SHUTDOWN(dp->i_mount)) {
+		return XFS_ERROR(EIO);
+	}
+
+	lock_mode = xfs_ilock_map_shared(dp);
+	start_offset = uiop->uio_offset;
+	error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
+	if (start_offset != uiop->uio_offset) {
+		xfs_ichgtime(dp, XFS_ICHGTIME_ACC);
+	}
+	xfs_iunlock_map_shared(dp, lock_mode);
+	return error;
+}
+
+
+/*
+ * xfs_symlink
+ *
+ */
+STATIC int
+xfs_symlink(
+	bhv_desc_t		*dir_bdp,
+	vname_t			*dentry,
+	vattr_t			*vap,
+	char			*target_path,
+	vnode_t			**vpp,
+	cred_t			*credp)
+{
+	xfs_trans_t		*tp;
+	xfs_mount_t		*mp;
+	xfs_inode_t		*dp;
+	xfs_inode_t		*ip;
+	int			error;
+	int			pathlen;
+	xfs_bmap_free_t		free_list;
+	xfs_fsblock_t		first_block;
+	boolean_t		dp_joined_to_trans;
+	vnode_t			*dir_vp;
+	uint			cancel_flags;
+	int			committed;
+	xfs_fileoff_t		first_fsb;
+	xfs_filblks_t		fs_blocks;
+	int			nmaps;
+	xfs_bmbt_irec_t		mval[SYMLINK_MAPS];
+	xfs_daddr_t		d;
+	char			*cur_chunk;
+	int			byte_cnt;
+	int			n;
+	xfs_buf_t		*bp;
+	xfs_prid_t		prid;
+	struct xfs_dquot	*udqp, *gdqp;
+	uint			resblks;
+	char			*link_name = VNAME(dentry);
+	int			link_namelen;
+
+	*vpp = NULL;
+	dir_vp = BHV_TO_VNODE(dir_bdp);
+	dp = XFS_BHVTOI(dir_bdp);
+	dp_joined_to_trans = B_FALSE;
+	error = 0;
+	ip = NULL;
+	tp = NULL;
+
+	vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+
+	mp = dp->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	link_namelen = VNAMELEN(dentry);
+
+	/*
+	 * Check component lengths of the target path name.
+	 */
+	pathlen = strlen(target_path);
+	if (pathlen >= MAXPATHLEN)      /* total string too long */
+		return XFS_ERROR(ENAMETOOLONG);
+	if (pathlen >= MAXNAMELEN) {    /* is any component too long? */
+		int len, total;
+		char *path;
+
+		for(total = 0, path = target_path; total < pathlen;) {
+			/*
+			 * Skip any slashes.
+			 */
+			while(*path == '/') {
+				total++;
+				path++;
+			}
+
+			/*
+			 * Count up to the next slash or end of path.
+			 * Error out if the component is bigger than MAXNAMELEN.
+			 */
+			for(len = 0; *path != '/' && total < pathlen;total++, path++) {
+				if (++len >= MAXNAMELEN) {
+					error = ENAMETOOLONG;
+					return error;
+				}
+			}
+		}
+	}
+
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) {
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_vp,
+					DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
+					link_name, target_path, 0, 0, 0);
+		if (error)
+			return error;
+	}
+
+	/* Return through std_return after this point. */
+
+	udqp = gdqp = NULL;
+	if (vap->va_mask & XFS_AT_PROJID)
+		prid = (xfs_prid_t)vap->va_projid;
+	else
+		prid = (xfs_prid_t)dfltprid;
+
+	/*
+	 * Make sure that we have allocated dquot(s) on disk.
+	 */
+	error = XFS_QM_DQVOPALLOC(mp, dp,
+			current_fsuid(credp), current_fsgid(credp),
+			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
+	if (error)
+		goto std_return;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
+	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+	/*
+	 * The symlink will fit into the inode data fork?
+	 * There can't be any attributes so we get the whole variable part.
+	 */
+	if (pathlen <= XFS_LITINO(mp))
+		fs_blocks = 0;
+	else
+		fs_blocks = XFS_B_TO_FSB(mp, pathlen);
+	resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks);
+	error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
+	if (error == ENOSPC && fs_blocks == 0) {
+		resblks = 0;
+		error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0,
+				XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
+	}
+	if (error) {
+		cancel_flags = 0;
+		dp = NULL;
+		goto error_return;
+	}
+
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+	/*
+	 * Check whether the directory allows new symlinks or not.
+	 */
+	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
+		error = XFS_ERROR(EPERM);
+		goto error_return;
+	}
+
+	/*
+	 * Reserve disk quota : blocks and inode.
+	 */
+	error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
+	if (error)
+		goto error_return;
+
+	/*
+	 * Check for ability to enter directory entry, if no space reserved.
+	 */
+	if (resblks == 0 &&
+	    (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen)))
+		goto error_return;
+	/*
+	 * Initialize the bmap freelist prior to calling either
+	 * bmapi or the directory create code.
+	 */
+	XFS_BMAP_INIT(&free_list, &first_block);
+
+	/*
+	 * Allocate an inode for the symlink.
+	 */
+	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (vap->va_mode&~S_IFMT),
+			       1, 0, credp, prid, resblks > 0, &ip, NULL);
+	if (error) {
+		if (error == ENOSPC)
+			goto error_return;
+		goto error1;
+	}
+	ITRACE(ip);
+
+	VN_HOLD(dir_vp);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	dp_joined_to_trans = B_TRUE;
+
+	/*
+	 * Also attach the dquot(s) to it, if applicable.
+	 */
+	XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
+
+	if (resblks)
+		resblks -= XFS_IALLOC_SPACE_RES(mp);
+	/*
+	 * If the symlink will fit into the inode, write it inline.
+	 */
+	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
+		xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
+		memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
+		ip->i_d.di_size = pathlen;
+
+		/*
+		 * The inode was initially created in extent format.
+		 */
+		ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
+		ip->i_df.if_flags |= XFS_IFINLINE;
+
+		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
+
+	} else {
+		first_fsb = 0;
+		nmaps = SYMLINK_MAPS;
+
+		error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
+				  XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
+				  &first_block, resblks, mval, &nmaps,
+				  &free_list);
+		if (error) {
+			goto error1;
+		}
+
+		if (resblks)
+			resblks -= fs_blocks;
+		ip->i_d.di_size = pathlen;
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+		cur_chunk = target_path;
+		for (n = 0; n < nmaps; n++) {
+			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
+			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
+			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+					       BTOBB(byte_cnt), 0);
+			ASSERT(bp && !XFS_BUF_GETERROR(bp));
+			if (pathlen < byte_cnt) {
+				byte_cnt = pathlen;
+			}
+			pathlen -= byte_cnt;
+
+			memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt);
+			cur_chunk += byte_cnt;
+
+			xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1);
+		}
+	}
+
+	/*
+	 * Create the directory entry for the symlink.
+	 */
+	error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen,
+			ip->i_ino, &first_block, &free_list, resblks);
+	if (error) {
+		goto error1;
+	}
+	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+	/*
+	 * Bump the in memory version number of the parent directory
+	 * so that other processes accessing it will recognize that
+	 * the directory has changed.
+	 */
+	dp->i_gen++;
+
+	/*
+	 * If this is a synchronous mount, make sure that the
+	 * symlink transaction goes to disk before returning to
+	 * the user.
+	 */
+	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+		xfs_trans_set_sync(tp);
+	}
+
+	/*
+	 * xfs_trans_commit normally decrements the vnode ref count
+	 * when it unlocks the inode. Since we want to return the
+	 * vnode to the caller, we bump the vnode ref count now.
+	 */
+	IHOLD(ip);
+
+	error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+	if (error) {
+		goto error2;
+	}
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	/* Fall through to std_return with error = 0 or errno from
+	 * xfs_trans_commit	*/
+std_return:
+	if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
+			     DM_EVENT_POSTSYMLINK)) {
+		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
+					dir_vp, DM_RIGHT_NULL,
+					error ? NULL : XFS_ITOV(ip),
+					DM_RIGHT_NULL, link_name, target_path,
+					0, error, 0);
+	}
+
+	if (!error) {
+		vnode_t *vp;
+
+		ASSERT(ip);
+		vp = XFS_ITOV(ip);
+		*vpp = vp;
+	}
+	return error;
+
+ error2:
+	IRELE(ip);
+ error1:
+	xfs_bmap_cancel(&free_list);
+	cancel_flags |= XFS_TRANS_ABORT;
+ error_return:
+	xfs_trans_cancel(tp, cancel_flags);
+	XFS_QM_DQRELE(mp, udqp);
+	XFS_QM_DQRELE(mp, gdqp);
+
+	if (!dp_joined_to_trans && (dp != NULL)) {
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+	}
+
+	goto std_return;
+}
+
+
+/*
+ * xfs_fid2
+ *
+ * A fid routine that takes a pointer to a previously allocated
+ * fid structure (like xfs_fast_fid) but uses a 64 bit inode number.
+ */
+STATIC int
+xfs_fid2(
+	bhv_desc_t	*bdp,
+	fid_t		*fidp)
+{
+	xfs_inode_t	*ip;
+	xfs_fid2_t	*xfid;
+
+	vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
+				       (inst_t *)__return_address);
+	ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t));
+
+	xfid = (xfs_fid2_t *)fidp;
+	ip = XFS_BHVTOI(bdp);
+	xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len);
+	xfid->fid_pad = 0;
+	/*
+	 * use memcpy because the inode is a long long and there's no
+	 * assurance that xfid->fid_ino is properly aligned.
+	 */
+	memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino));
+	xfid->fid_gen = ip->i_d.di_gen;
+
+	return 0;
+}
+
+
+/*
+ * xfs_rwlock
+ */
+int
+xfs_rwlock(
+	bhv_desc_t	*bdp,
+	vrwlock_t	locktype)
+{
+	xfs_inode_t	*ip;
+	vnode_t		*vp;
+
+	vp = BHV_TO_VNODE(bdp);
+	if (vp->v_type == VDIR)
+		return 1;
+	ip = XFS_BHVTOI(bdp);
+	if (locktype == VRWLOCK_WRITE) {
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	} else if (locktype == VRWLOCK_TRY_READ) {
+		return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED));
+	} else if (locktype == VRWLOCK_TRY_WRITE) {
+		return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL));
+	} else {
+		ASSERT((locktype == VRWLOCK_READ) ||
+		       (locktype == VRWLOCK_WRITE_DIRECT));
+		xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	}
+
+	return 1;
+}
+
+
+/*
+ * xfs_rwunlock
+ */
+void
+xfs_rwunlock(
+	bhv_desc_t	*bdp,
+	vrwlock_t	locktype)
+{
+	xfs_inode_t     *ip;
+	vnode_t		*vp;
+
+	vp = BHV_TO_VNODE(bdp);
+	if (vp->v_type == VDIR)
+		return;
+	ip = XFS_BHVTOI(bdp);
+	if (locktype == VRWLOCK_WRITE) {
+		/*
+		 * In the write case, we may have added a new entry to
+		 * the reference cache.  This might store a pointer to
+		 * an inode to be released in this inode.  If it is there,
+		 * clear the pointer and release the inode after unlocking
+		 * this one.
+		 */
+		xfs_refcache_iunlock(ip, XFS_IOLOCK_EXCL);
+	} else {
+		ASSERT((locktype == VRWLOCK_READ) ||
+		       (locktype == VRWLOCK_WRITE_DIRECT));
+		xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	}
+	return;
+}
+
+STATIC int
+xfs_inode_flush(
+	bhv_desc_t	*bdp,
+	int		flags)
+{
+	xfs_inode_t	*ip;
+	xfs_mount_t	*mp;
+	xfs_inode_log_item_t *iip;
+	int		error = 0;
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+	iip = ip->i_itemp;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	/*
+	 * Bypass inodes which have already been cleaned by
+	 * the inode flush clustering code inside xfs_iflush
+	 */
+	if ((ip->i_update_core == 0) &&
+	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)))
+		return 0;
+
+	if (flags & FLUSH_LOG) {
+		if (iip && iip->ili_last_lsn) {
+			xlog_t		*log = mp->m_log;
+			xfs_lsn_t	sync_lsn;
+			int		s, log_flags = XFS_LOG_FORCE;
+
+			s = GRANT_LOCK(log);
+			sync_lsn = log->l_last_sync_lsn;
+			GRANT_UNLOCK(log, s);
+
+			if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0))
+				return 0;
+
+			if (flags & FLUSH_SYNC)
+				log_flags |= XFS_LOG_SYNC;
+			return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+		}
+	}
+
+	/*
+	 * We make this non-blocking if the inode is contended,
+	 * return EAGAIN to indicate to the caller that they
+	 * did not succeed. This prevents the flush path from
+	 * blocking on inodes inside another operation right
+	 * now, they get caught later by xfs_sync.
+	 */
+	if (flags & FLUSH_INODE) {
+		int	flush_flags;
+
+		if (xfs_ipincount(ip))
+			return EAGAIN;
+
+		if (flags & FLUSH_SYNC) {
+			xfs_ilock(ip, XFS_ILOCK_SHARED);
+			xfs_iflock(ip);
+		} else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+			if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
+				xfs_iunlock(ip, XFS_ILOCK_SHARED);
+				return EAGAIN;
+			}
+		} else {
+			return EAGAIN;
+		}
+
+		if (flags & FLUSH_SYNC)
+			flush_flags = XFS_IFLUSH_SYNC;
+		else
+			flush_flags = XFS_IFLUSH_ASYNC;
+
+		error = xfs_iflush(ip, flush_flags);
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	}
+
+	return error;
+}
+
+
+int
+xfs_set_dmattrs (
+	bhv_desc_t	*bdp,
+	u_int		evmask,
+	u_int16_t	state,
+	cred_t		*credp)
+{
+	xfs_inode_t     *ip;
+	xfs_trans_t	*tp;
+	xfs_mount_t	*mp;
+	int		error;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return XFS_ERROR(EPERM);
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
+	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+	ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask;
+	ip->i_iocore.io_dmstate  = ip->i_d.di_dmstate  = state;
+
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	IHOLD(ip);
+	error = xfs_trans_commit(tp, 0, NULL);
+
+	return error;
+}
+
+
+/*
+ * xfs_reclaim
+ */
+STATIC int
+xfs_reclaim(
+	bhv_desc_t	*bdp)
+{
+	xfs_inode_t	*ip;
+	vnode_t		*vp;
+
+	vp = BHV_TO_VNODE(bdp);
+	ip = XFS_BHVTOI(bdp);
+
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	ASSERT(!VN_MAPPED(vp));
+
+	/* bad inode, get out here ASAP */
+	if (VN_BAD(vp)) {
+		xfs_ireclaim(ip);
+		return 0;
+	}
+
+	if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
+		if (ip->i_d.di_size > 0) {
+			/*
+			 * Flush and invalidate any data left around that is
+			 * a part of this file.
+			 *
+			 * Get the inode's i/o lock so that buffers are pushed
+			 * out while holding the proper lock.  We can't hold
+			 * the inode lock here since flushing out buffers may
+			 * cause us to try to get the lock in xfs_strategy().
+			 *
+			 * We don't have to call remapf() here, because there
+			 * cannot be any mapped file references to this vnode
+			 * since it is being reclaimed.
+			 */
+			xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+			/*
+			 * If we hit an IO error, we need to make sure that the
+			 * buffer and page caches of file data for
+			 * the file are tossed away. We don't want to use
+			 * VOP_FLUSHINVAL_PAGES here because we don't want dirty
+			 * pages to stay attached to the vnode, but be
+			 * marked P_BAD. pdflush/vnode_pagebad
+			 * hates that.
+			 */
+			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+				VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE);
+			} else {
+				VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
+			}
+
+			ASSERT(VN_CACHED(vp) == 0);
+			ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
+			       ip->i_delayed_blks == 0);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		} else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+			/*
+			 * di_size field may not be quite accurate if we're
+			 * shutting down.
+			 */
+			VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
+			ASSERT(VN_CACHED(vp) == 0);
+		}
+	}
+
+	/* If we have nothing to flush with this inode then complete the
+	 * teardown now, otherwise break the link between the xfs inode
+	 * and the linux inode and clean up the xfs inode later. This
+	 * avoids flushing the inode to disk during the delete operation
+	 * itself.
+	 */
+	if (!ip->i_update_core && (ip->i_itemp == NULL)) {
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_iflock(ip);
+		return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
+	} else {
+		xfs_mount_t	*mp = ip->i_mount;
+
+		/* Protect sync from us */
+		XFS_MOUNT_ILOCK(mp);
+		vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
+		list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
+		ip->i_flags |= XFS_IRECLAIMABLE;
+		XFS_MOUNT_IUNLOCK(mp);
+	}
+	return 0;
+}
+
+int
+xfs_finish_reclaim(
+	xfs_inode_t	*ip,
+	int		locked,
+	int		sync_mode)
+{
+	xfs_ihash_t	*ih = ip->i_hash;
+	vnode_t		*vp = XFS_ITOV_NULL(ip);
+	int		error;
+
+	if (vp && VN_BAD(vp))
+		goto reclaim;
+
+	/* The hash lock here protects a thread in xfs_iget_core from
+	 * racing with us on linking the inode back with a vnode.
+	 * Once we have the XFS_IRECLAIM flag set it will not touch
+	 * us.
+	 */
+	write_lock(&ih->ih_lock);
+	if ((ip->i_flags & XFS_IRECLAIM) ||
+	    (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) {
+		write_unlock(&ih->ih_lock);
+		if (locked) {
+			xfs_ifunlock(ip);
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		}
+		return(1);
+	}
+	ip->i_flags |= XFS_IRECLAIM;
+	write_unlock(&ih->ih_lock);
+
+	/*
+	 * If the inode is still dirty, then flush it out.  If the inode
+	 * is not in the AIL, then it will be OK to flush it delwri as
+	 * long as xfs_iflush() does not keep any references to the inode.
+	 * We leave that decision up to xfs_iflush() since it has the
+	 * knowledge of whether it's OK to simply do a delwri flush of
+	 * the inode or whether we need to wait until the inode is
+	 * pulled from the AIL.
+	 * We get the flush lock regardless, though, just to make sure
+	 * we don't free it while it is being flushed.
+	 */
+	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+		if (!locked) {
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			xfs_iflock(ip);
+		}
+
+		if (ip->i_update_core ||
+		    ((ip->i_itemp != NULL) &&
+		     (ip->i_itemp->ili_format.ilf_fields != 0))) {
+			error = xfs_iflush(ip, sync_mode);
+			/*
+			 * If we hit an error, typically because of filesystem
+			 * shutdown, we don't need to let vn_reclaim to know
+			 * because we're gonna reclaim the inode anyway.
+			 */
+			if (error) {
+				xfs_iunlock(ip, XFS_ILOCK_EXCL);
+				goto reclaim;
+			}
+			xfs_iflock(ip); /* synchronize with xfs_iflush_done */
+		}
+
+		ASSERT(ip->i_update_core == 0);
+		ASSERT(ip->i_itemp == NULL ||
+		       ip->i_itemp->ili_format.ilf_fields == 0);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	} else if (locked) {
+		/*
+		 * We are not interested in doing an iflush if we're
+		 * in the process of shutting down the filesystem forcibly.
+		 * So, just reclaim the inode.
+		 */
+		xfs_ifunlock(ip);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+
+ reclaim:
+	xfs_ireclaim(ip);
+	return 0;
+}
+
+int
+xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock)
+{
+	int		purged;
+	xfs_inode_t	*ip, *n;
+	int		done = 0;
+
+	while (!done) {
+		purged = 0;
+		XFS_MOUNT_ILOCK(mp);
+		list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
+			if (noblock) {
+				if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
+					continue;
+				if (xfs_ipincount(ip) ||
+				    !xfs_iflock_nowait(ip)) {
+					xfs_iunlock(ip, XFS_ILOCK_EXCL);
+					continue;
+				}
+			}
+			XFS_MOUNT_IUNLOCK(mp);
+			xfs_finish_reclaim(ip, noblock,
+				XFS_IFLUSH_DELWRI_ELSE_ASYNC);
+			purged = 1;
+			break;
+		}
+
+		done = !purged;
+	}
+
+	XFS_MOUNT_IUNLOCK(mp);
+	return 0;
+}
+
+/*
+ * xfs_alloc_file_space()
+ *      This routine allocates disk space for the given file.
+ *
+ *	If alloc_type == 0, this request is for an ALLOCSP type
+ *	request which will change the file size.  In this case, no
+ *	DMAPI event will be generated by the call.  A TRUNCATE event
+ *	will be generated later by xfs_setattr.
+ *
+ *	If alloc_type != 0, this request is for a RESVSP type
+ *	request, and a DMAPI DM_EVENT_WRITE will be generated if the
+ *	lower block boundary byte address is less than the file's
+ *	length.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+int
+xfs_alloc_file_space(
+	xfs_inode_t		*ip,
+	xfs_off_t		offset,
+	xfs_off_t		len,
+	int			alloc_type,
+	int			attr_flags)
+{
+	xfs_filblks_t		allocated_fsb;
+	xfs_filblks_t		allocatesize_fsb;
+	int			committed;
+	xfs_off_t		count;
+	xfs_filblks_t		datablocks;
+	int			error;
+	xfs_fsblock_t		firstfsb;
+	xfs_bmap_free_t		free_list;
+	xfs_bmbt_irec_t		*imapp;
+	xfs_bmbt_irec_t		imaps[1];
+	xfs_mount_t		*mp;
+	int			numrtextents;
+	int			reccount;
+	uint			resblks;
+	int			rt;
+	int			rtextsize;
+	xfs_fileoff_t		startoffset_fsb;
+	xfs_trans_t		*tp;
+	int			xfs_bmapi_flags;
+
+	vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
+	mp = ip->i_mount;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return XFS_ERROR(EIO);
+
+	/*
+	 * determine if this is a realtime file
+	 */
+	if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) {
+		if (ip->i_d.di_extsize)
+			rtextsize = ip->i_d.di_extsize;
+		else
+			rtextsize = mp->m_sb.sb_rextsize;
+	} else
+		rtextsize = 0;
+
+	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
+		return error;
+
+	if (len <= 0)
+		return XFS_ERROR(EINVAL);
+
+	count = len;
+	error = 0;
+	imapp = &imaps[0];
+	reccount = 1;
+	xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
+	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
+	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+
+	/*	Generate a DMAPI event if needed.	*/
+	if (alloc_type != 0 && offset < ip->i_d.di_size &&
+			(attr_flags&ATTR_DMI) == 0  &&
+			DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
+		xfs_off_t           end_dmi_offset;
+
+		end_dmi_offset = offset+len;
+		if (end_dmi_offset > ip->i_d.di_size)
+			end_dmi_offset = ip->i_d.di_size;
+		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
+			offset, end_dmi_offset - offset,
+			0, NULL);
+		if (error)
+			return(error);
+	}
+
+	/*
+	 * allocate file space until done or until there is an error
+	 */
+retry:
+	while (allocatesize_fsb && !error) {
+		/*
+		 * determine if reserving space on
+		 * the data or realtime partition.
+		 */
+		if (rt) {
+			xfs_fileoff_t s, e;
+
+			s = startoffset_fsb;
+			do_div(s, rtextsize);
+			s *= rtextsize;
+			e = roundup_64(startoffset_fsb + allocatesize_fsb,
+				rtextsize);
+			numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize;
+			datablocks = 0;
+		} else {
+			datablocks = allocatesize_fsb;
+			numrtextents = 0;
+		}
+
+		/*
+		 * allocate and setup the transaction
+		 */
+		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+		resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
+		error = xfs_trans_reserve(tp,
+					  resblks,
+					  XFS_WRITE_LOG_RES(mp),
+					  numrtextents,
+					  XFS_TRANS_PERM_LOG_RES,
+					  XFS_WRITE_LOG_COUNT);
+
+		/*
+		 * check for running out of space
+		 */
+		if (error) {
+			/*
+			 * Free the transaction structure.
+			 */
+			ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+			xfs_trans_cancel(tp, 0);
+			break;
+		}
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		error = XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp,
+				ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
+				XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+		if (error)
+			goto error1;
+
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+
+		/*
+		 * issue the bmapi() call to allocate the blocks
+		 */
+		XFS_BMAP_INIT(&free_list, &firstfsb);
+		error = xfs_bmapi(tp, ip, startoffset_fsb,
+				  allocatesize_fsb, xfs_bmapi_flags,
+				  &firstfsb, 0, imapp, &reccount,
+				  &free_list);
+		if (error) {
+			goto error0;
+		}
+
+		/*
+		 * complete the transaction
+		 */
+		error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+		if (error) {
+			goto error0;
+		}
+
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		if (error) {
+			break;
+		}
+
+		allocated_fsb = imapp->br_blockcount;
+
+		if (reccount == 0) {
+			error = XFS_ERROR(ENOSPC);
+			break;
+		}
+
+		startoffset_fsb += allocated_fsb;
+		allocatesize_fsb -= allocated_fsb;
+	}
+dmapi_enospc_check:
+	if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 &&
+	    DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) {
+
+		error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
+				XFS_ITOV(ip), DM_RIGHT_NULL,
+				XFS_ITOV(ip), DM_RIGHT_NULL,
+				NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
+		if (error == 0)
+			goto retry;	/* Maybe DMAPI app. has made space */
+		/* else fall through with error from XFS_SEND_DATA */
+	}
+
+	return error;
+
+ error0:
+	xfs_bmap_cancel(&free_list);
+ error1:
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	goto dmapi_enospc_check;
+}
+
+/*
+ * Zero file bytes between startoff and endoff inclusive.
+ * The iolock is held exclusive and no blocks are buffered.
+ */
+STATIC int
+xfs_zero_remaining_bytes(
+	xfs_inode_t		*ip,
+	xfs_off_t		startoff,
+	xfs_off_t		endoff)
+{
+	xfs_bmbt_irec_t		imap;
+	xfs_fileoff_t		offset_fsb;
+	xfs_off_t		lastoffset;
+	xfs_off_t		offset;
+	xfs_buf_t		*bp;
+	xfs_mount_t		*mp = ip->i_mount;
+	int			nimap;
+	int			error = 0;
+
+	bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize,
+				ip->i_d.di_flags & XFS_DIFLAG_REALTIME ?
+				mp->m_rtdev_targp : mp->m_ddev_targp);
+
+	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
+		offset_fsb = XFS_B_TO_FSBT(mp, offset);
+		nimap = 1;
+		error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap,
+			&nimap, NULL);
+		if (error || nimap < 1)
+			break;
+		ASSERT(imap.br_blockcount >= 1);
+		ASSERT(imap.br_startoff == offset_fsb);
+		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
+		if (lastoffset > endoff)
+			lastoffset = endoff;
+		if (imap.br_startblock == HOLESTARTBLOCK)
+			continue;
+		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+		if (imap.br_state == XFS_EXT_UNWRITTEN)
+			continue;
+		XFS_BUF_UNDONE(bp);
+		XFS_BUF_UNWRITE(bp);
+		XFS_BUF_READ(bp);
+		XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock));
+		xfsbdstrat(mp, bp);
+		if ((error = xfs_iowait(bp))) {
+			xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
+					  mp, bp, XFS_BUF_ADDR(bp));
+			break;
+		}
+		memset(XFS_BUF_PTR(bp) +
+			(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
+		      0, lastoffset - offset + 1);
+		XFS_BUF_UNDONE(bp);
+		XFS_BUF_UNREAD(bp);
+		XFS_BUF_WRITE(bp);
+		xfsbdstrat(mp, bp);
+		if ((error = xfs_iowait(bp))) {
+			xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
+					  mp, bp, XFS_BUF_ADDR(bp));
+			break;
+		}
+	}
+	xfs_buf_free(bp);
+	return error;
+}
+
+/*
+ * xfs_free_file_space()
+ *      This routine frees disk space for the given file.
+ *
+ *	This routine is only called by xfs_change_file_space
+ *	for an UNRESVSP type call.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+STATIC int
+xfs_free_file_space(
+	xfs_inode_t		*ip,
+	xfs_off_t		offset,
+	xfs_off_t		len,
+	int			attr_flags)
+{
+	int			committed;
+	int			done;
+	xfs_off_t		end_dmi_offset;
+	xfs_fileoff_t		endoffset_fsb;
+	int			error;
+	xfs_fsblock_t		firstfsb;
+	xfs_bmap_free_t		free_list;
+	xfs_off_t		ilen;
+	xfs_bmbt_irec_t		imap;
+	xfs_off_t		ioffset;
+	xfs_extlen_t		mod=0;
+	xfs_mount_t		*mp;
+	int			nimap;
+	uint			resblks;
+	int			rounding;
+	int			rt;
+	xfs_fileoff_t		startoffset_fsb;
+	xfs_trans_t		*tp;
+	int			need_iolock = (attr_flags & ATTR_DMI) == 0;
+
+	vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
+	mp = ip->i_mount;
+
+	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
+		return error;
+
+	error = 0;
+	if (len <= 0)	/* if nothing being freed */
+		return error;
+	rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
+	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
+	end_dmi_offset = offset + len;
+	endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
+
+	if (offset < ip->i_d.di_size &&
+	    (attr_flags & ATTR_DMI) == 0 &&
+	    DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
+		if (end_dmi_offset > ip->i_d.di_size)
+			end_dmi_offset = ip->i_d.di_size;
+		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
+				offset, end_dmi_offset - offset,
+				AT_DELAY_FLAG(attr_flags), NULL);
+		if (error)
+			return(error);
+	}
+
+	if (need_iolock)
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog),
+			(__uint8_t)NBPP);
+	ilen = len + (offset & (rounding - 1));
+	ioffset = offset & ~(rounding - 1);
+	if (ilen & (rounding - 1))
+		ilen = (ilen + rounding) & ~(rounding - 1);
+	xfs_inval_cached_pages(XFS_ITOV(ip), &(ip->i_iocore), ioffset, 0, 0);
+	/*
+	 * Need to zero the stuff we're not freeing, on disk.
+	 * If its a realtime file & can't use unwritten extents then we
+	 * actually need to zero the extent edges.  Otherwise xfs_bunmapi
+	 * will take care of it for us.
+	 */
+	if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
+		nimap = 1;
+		error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0,
+			&imap, &nimap, NULL);
+		if (error)
+			goto out_unlock_iolock;
+		ASSERT(nimap == 0 || nimap == 1);
+		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+			xfs_daddr_t	block;
+
+			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+			block = imap.br_startblock;
+			mod = do_div(block, mp->m_sb.sb_rextsize);
+			if (mod)
+				startoffset_fsb += mp->m_sb.sb_rextsize - mod;
+		}
+		nimap = 1;
+		error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0,
+			&imap, &nimap, NULL);
+		if (error)
+			goto out_unlock_iolock;
+		ASSERT(nimap == 0 || nimap == 1);
+		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+			mod++;
+			if (mod && (mod != mp->m_sb.sb_rextsize))
+				endoffset_fsb -= mod;
+		}
+	}
+	if ((done = (endoffset_fsb <= startoffset_fsb)))
+		/*
+		 * One contiguous piece to clear
+		 */
+		error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
+	else {
+		/*
+		 * Some full blocks, possibly two pieces to clear
+		 */
+		if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
+			error = xfs_zero_remaining_bytes(ip, offset,
+				XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
+		if (!error &&
+		    XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
+			error = xfs_zero_remaining_bytes(ip,
+				XFS_FSB_TO_B(mp, endoffset_fsb),
+				offset + len - 1);
+	}
+
+	/*
+	 * free file space until done or until there is an error
+	 */
+	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+	while (!error && !done) {
+
+		/*
+		 * allocate and setup the transaction
+		 */
+		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+		error = xfs_trans_reserve(tp,
+					  resblks,
+					  XFS_WRITE_LOG_RES(mp),
+					  0,
+					  XFS_TRANS_PERM_LOG_RES,
+					  XFS_WRITE_LOG_COUNT);
+
+		/*
+		 * check for running out of space
+		 */
+		if (error) {
+			/*
+			 * Free the transaction structure.
+			 */
+			ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+			xfs_trans_cancel(tp, 0);
+			break;
+		}
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
+				ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
+				XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+		if (error)
+			goto error1;
+
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_trans_ihold(tp, ip);
+
+		/*
+		 * issue the bunmapi() call to free the blocks
+		 */
+		XFS_BMAP_INIT(&free_list, &firstfsb);
+		error = xfs_bunmapi(tp, ip, startoffset_fsb,
+				  endoffset_fsb - startoffset_fsb,
+				  0, 2, &firstfsb, &free_list, &done);
+		if (error) {
+			goto error0;
+		}
+
+		/*
+		 * complete the transaction
+		 */
+		error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+		if (error) {
+			goto error0;
+		}
+
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+
+ out_unlock_iolock:
+	if (need_iolock)
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
+
+ error0:
+	xfs_bmap_cancel(&free_list);
+ error1:
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+	xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
+		    XFS_ILOCK_EXCL);
+	return error;
+}
+
+/*
+ * xfs_change_file_space()
+ *      This routine allocates or frees disk space for the given file.
+ *      The user specified parameters are checked for alignment and size
+ *      limitations.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+int
+xfs_change_file_space(
+	bhv_desc_t	*bdp,
+	int		cmd,
+	xfs_flock64_t	*bf,
+	xfs_off_t	offset,
+	cred_t		*credp,
+	int		attr_flags)
+{
+	int		clrprealloc;
+	int		error;
+	xfs_fsize_t	fsize;
+	xfs_inode_t	*ip;
+	xfs_mount_t	*mp;
+	int		setprealloc;
+	xfs_off_t	startoffset;
+	xfs_off_t	llen;
+	xfs_trans_t	*tp;
+	vattr_t		va;
+	vnode_t		*vp;
+
+	vp = BHV_TO_VNODE(bdp);
+	vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+
+	ip = XFS_BHVTOI(bdp);
+	mp = ip->i_mount;
+
+	/*
+	 * must be a regular file and have write permission
+	 */
+	if (vp->v_type != VREG)
+		return XFS_ERROR(EINVAL);
+
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+	if ((error = xfs_iaccess(ip, S_IWUSR, credp))) {
+		xfs_iunlock(ip, XFS_ILOCK_SHARED);
+		return error;
+	}
+
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	switch (bf->l_whence) {
+	case 0: /*SEEK_SET*/
+		break;
+	case 1: /*SEEK_CUR*/
+		bf->l_start += offset;
+		break;
+	case 2: /*SEEK_END*/
+		bf->l_start += ip->i_d.di_size;
+		break;
+	default:
+		return XFS_ERROR(EINVAL);
+	}
+
+	llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len;
+
+	if (   (bf->l_start < 0)
+	    || (bf->l_start > XFS_MAXIOFFSET(mp))
+	    || (bf->l_start + llen < 0)
+	    || (bf->l_start + llen > XFS_MAXIOFFSET(mp)))
+		return XFS_ERROR(EINVAL);
+
+	bf->l_whence = 0;
+
+	startoffset = bf->l_start;
+	fsize = ip->i_d.di_size;
+
+	/*
+	 * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
+	 * file space.
+	 * These calls do NOT zero the data space allocated to the file,
+	 * nor do they change the file size.
+	 *
+	 * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file
+	 * space.
+	 * These calls cause the new file data to be zeroed and the file
+	 * size to be changed.
+	 */
+	setprealloc = clrprealloc = 0;
+
+	switch (cmd) {
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_RESVSP64:
+		error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
+								1, attr_flags);
+		if (error)
+			return error;
+		setprealloc = 1;
+		break;
+
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_UNRESVSP64:
+		if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
+								attr_flags)))
+			return error;
+		break;
+
+	case XFS_IOC_ALLOCSP:
+	case XFS_IOC_ALLOCSP64:
+	case XFS_IOC_FREESP:
+	case XFS_IOC_FREESP64:
+		if (startoffset > fsize) {
+			error = xfs_alloc_file_space(ip, fsize,
+					startoffset - fsize, 0, attr_flags);
+			if (error)
+				break;
+		}
+
+		va.va_mask = XFS_AT_SIZE;
+		va.va_size = startoffset;
+
+		error = xfs_setattr(bdp, &va, attr_flags, credp);
+
+		if (error)
+			return error;
+
+		clrprealloc = 1;
+		break;
+
+	default:
+		ASSERT(0);
+		return XFS_ERROR(EINVAL);
+	}
+
+	/*
+	 * update the inode timestamp, mode, and prealloc flag bits
+	 */
+	tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+
+	if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
+				      0, 0, 0))) {
+		/* ASSERT(0); */
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(tp, ip);
+
+	if ((attr_flags & ATTR_DMI) == 0) {
+		ip->i_d.di_mode &= ~S_ISUID;
+
+		/*
+		 * Note that we don't have to worry about mandatory
+		 * file locking being disabled here because we only
+		 * clear the S_ISGID bit if the Group execute bit is
+		 * on, but if it was on then mandatory locking wouldn't
+		 * have been enabled.
+		 */
+		if (ip->i_d.di_mode & S_IXGRP)
+			ip->i_d.di_mode &= ~S_ISGID;
+
+		xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	}
+	if (setprealloc)
+		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+	else if (clrprealloc)
+		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	xfs_trans_set_sync(tp);
+
+	error = xfs_trans_commit(tp, 0, NULL);
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+	return error;
+}
+
+vnodeops_t xfs_vnodeops = {
+	BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
+	.vop_open		= xfs_open,
+	.vop_read		= xfs_read,
+#ifdef HAVE_SENDFILE
+	.vop_sendfile		= xfs_sendfile,
+#endif
+	.vop_write		= xfs_write,
+	.vop_ioctl		= xfs_ioctl,
+	.vop_getattr		= xfs_getattr,
+	.vop_setattr		= xfs_setattr,
+	.vop_access		= xfs_access,
+	.vop_lookup		= xfs_lookup,
+	.vop_create		= xfs_create,
+	.vop_remove		= xfs_remove,
+	.vop_link		= xfs_link,
+	.vop_rename		= xfs_rename,
+	.vop_mkdir		= xfs_mkdir,
+	.vop_rmdir		= xfs_rmdir,
+	.vop_readdir		= xfs_readdir,
+	.vop_symlink		= xfs_symlink,
+	.vop_readlink		= xfs_readlink,
+	.vop_fsync		= xfs_fsync,
+	.vop_inactive		= xfs_inactive,
+	.vop_fid2		= xfs_fid2,
+	.vop_rwlock		= xfs_rwlock,
+	.vop_rwunlock		= xfs_rwunlock,
+	.vop_bmap		= xfs_bmap,
+	.vop_reclaim		= xfs_reclaim,
+	.vop_attr_get		= xfs_attr_get,
+	.vop_attr_set		= xfs_attr_set,
+	.vop_attr_remove	= xfs_attr_remove,
+	.vop_attr_list		= xfs_attr_list,
+	.vop_link_removed	= (vop_link_removed_t)fs_noval,
+	.vop_vnode_change	= (vop_vnode_change_t)fs_noval,
+	.vop_tosspages		= fs_tosspages,
+	.vop_flushinval_pages	= fs_flushinval_pages,
+	.vop_flush_pages	= fs_flush_pages,
+	.vop_release		= xfs_release,
+	.vop_iflush		= xfs_inode_flush,
+};